summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.editorconfig3
-rw-r--r--.mailmap16
-rw-r--r--CREDITS20
-rw-r--r--Documentation/ABI/stable/sysfs-block53
-rw-r--r--Documentation/ABI/stable/sysfs-class-backlight7
-rw-r--r--Documentation/ABI/testing/configfs-tsm63
-rw-r--r--Documentation/ABI/testing/debugfs-tpmi9
-rw-r--r--Documentation/ABI/testing/sysfs-bus-auxiliary9
-rw-r--r--Documentation/ABI/testing/sysfs-bus-i2c-devices-turris-omnia-mcu113
-rw-r--r--Documentation/ABI/testing/sysfs-bus-wmi81
-rw-r--r--Documentation/ABI/testing/sysfs-devices-system-cpu18
-rw-r--r--Documentation/ABI/testing/sysfs-fs-xfs26
-rw-r--r--Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst6
-rw-r--r--Documentation/RCU/Design/Requirements/Requirements.rst16
-rw-r--r--Documentation/RCU/whatisRCU.rst30
-rw-r--r--Documentation/admin-guide/LSM/tomoyo.rst35
-rw-r--r--Documentation/admin-guide/cgroup-v1/pids.rst3
-rw-r--r--Documentation/admin-guide/cgroup-v2.rst47
-rw-r--r--Documentation/admin-guide/cifs/usage.rst36
-rw-r--r--Documentation/admin-guide/gpio/gpio-virtuser.rst177
-rw-r--r--Documentation/admin-guide/gpio/index.rst1
-rw-r--r--Documentation/admin-guide/hw-vuln/spectre.rst86
-rw-r--r--Documentation/admin-guide/index.rst1
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt117
-rw-r--r--Documentation/admin-guide/mm/transhuge.rst4
-rw-r--r--Documentation/admin-guide/pm/amd-pstate.rst18
-rw-r--r--Documentation/admin-guide/pm/cpufreq.rst4
-rw-r--r--Documentation/admin-guide/pmf.rst24
-rw-r--r--Documentation/arch/arm64/cpu-hotplug.rst79
-rw-r--r--Documentation/arch/arm64/index.rst1
-rw-r--r--Documentation/arch/arm64/memory.rst42
-rw-r--r--Documentation/arch/arm64/silicon-errata.rst16
-rw-r--r--Documentation/arch/riscv/cmodx.rst4
-rw-r--r--Documentation/arch/riscv/uabi.rst4
-rw-r--r--Documentation/arch/x86/amd-memory-encryption.rst29
-rw-r--r--Documentation/arch/x86/resctrl.rst27
-rw-r--r--Documentation/block/data-integrity.rst49
-rw-r--r--Documentation/block/writeback_cache_control.rst67
-rw-r--r--Documentation/bpf/libbpf/libbpf_overview.rst8
-rw-r--r--Documentation/bpf/standardization/abi.rst3
-rw-r--r--Documentation/bpf/standardization/instruction-set.rst333
-rw-r--r--Documentation/cdrom/cdrom-standard.rst4
-rw-r--r--Documentation/core-api/swiotlb.rst2
-rw-r--r--Documentation/dev-tools/gpio-sloppy-logic-analyzer.rst93
-rw-r--r--Documentation/dev-tools/index.rst1
-rw-r--r--Documentation/dev-tools/kselftest.rst7
-rw-r--r--Documentation/devicetree/bindings/arm/airoha.yaml4
-rw-r--r--Documentation/devicetree/bindings/arm/amlogic.yaml10
-rw-r--r--Documentation/devicetree/bindings/arm/amlogic/analog-top.txt20
-rw-r--r--Documentation/devicetree/bindings/arm/amlogic/assist.txt17
-rw-r--r--Documentation/devicetree/bindings/arm/amlogic/bootrom.txt17
-rw-r--r--Documentation/devicetree/bindings/arm/amlogic/pmu.txt18
-rw-r--r--Documentation/devicetree/bindings/arm/arm,juno-fpga-apb-regs.yaml61
-rw-r--r--Documentation/devicetree/bindings/arm/atmel-sysregs.txt29
-rw-r--r--Documentation/devicetree/bindings/arm/axis.txt16
-rw-r--r--Documentation/devicetree/bindings/arm/bcm/bcm2835.yaml6
-rw-r--r--Documentation/devicetree/bindings/arm/cpu-enable-method/al,alpine-smp10
-rw-r--r--Documentation/devicetree/bindings/arm/freescale/fsl,vf610-mscm-cpucfg.txt14
-rw-r--r--Documentation/devicetree/bindings/arm/fsl.yaml32
-rw-r--r--Documentation/devicetree/bindings/arm/keystone/ti,sci.yaml2
-rw-r--r--Documentation/devicetree/bindings/arm/marvell/armada-7k-8k.yaml18
-rw-r--r--Documentation/devicetree/bindings/arm/marvell/marvell,dove.txt15
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek.yaml24
-rw-r--r--Documentation/devicetree/bindings/arm/pmu.yaml6
-rw-r--r--Documentation/devicetree/bindings/arm/qcom.yaml27
-rw-r--r--Documentation/devicetree/bindings/arm/rockchip.yaml42
-rw-r--r--Documentation/devicetree/bindings/arm/rtsm-dcscb.txt19
-rw-r--r--Documentation/devicetree/bindings/arm/spear-misc.txt9
-rw-r--r--Documentation/devicetree/bindings/arm/stm32/st,mlahb.yaml3
-rw-r--r--Documentation/devicetree/bindings/arm/stm32/stm32.yaml6
-rw-r--r--Documentation/devicetree/bindings/arm/sunxi.yaml22
-rw-r--r--Documentation/devicetree/bindings/arm/ti/k3.yaml6
-rw-r--r--Documentation/devicetree/bindings/ata/ahci-fsl-qoriq.txt21
-rw-r--r--Documentation/devicetree/bindings/ata/fsl,ahci.yaml64
-rw-r--r--Documentation/devicetree/bindings/cache/qcom,llcc.yaml57
-rw-r--r--Documentation/devicetree/bindings/cache/starfive,jh8100-starlink-cache.yaml66
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,ipq9574-gcc.yaml3
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,qcm2290-gpucc.yaml77
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,sm8450-camcc.yaml7
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,sm8450-videocc.yaml6
-rw-r--r--Documentation/devicetree/bindings/clock/ti,sci-clk.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/ti-keystone-pllctrl.txt20
-rw-r--r--Documentation/devicetree/bindings/dma/fsl,edma.yaml4
-rw-r--r--Documentation/devicetree/bindings/firmware/arm,scmi.yaml12
-rw-r--r--Documentation/devicetree/bindings/firmware/cznic,turris-omnia-mcu.yaml86
-rw-r--r--Documentation/devicetree/bindings/firmware/qcom,scm.yaml15
-rw-r--r--Documentation/devicetree/bindings/fuse/renesas,rcar-efuse.yaml55
-rw-r--r--Documentation/devicetree/bindings/fuse/renesas,rcar-otp.yaml38
-rw-r--r--Documentation/devicetree/bindings/gpio/aspeed,sgpio.yaml10
-rw-r--r--Documentation/devicetree/bindings/gpio/atmel,at91rm9200-gpio.yaml81
-rw-r--r--Documentation/devicetree/bindings/gpio/fsl,qoriq-gpio.yaml87
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-mpc8xxx.txt53
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-pca95xx.yaml1
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-vf610.yaml4
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-zevio.txt16
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio_atmel.txt31
-rw-r--r--Documentation/devicetree/bindings/gpio/lsi,zevio-gpio.yaml43
-rw-r--r--Documentation/devicetree/bindings/hwmon/g762.txt47
-rw-r--r--Documentation/devicetree/bindings/hwmon/gmt,g762.yaml95
-rw-r--r--Documentation/devicetree/bindings/hwmon/maxim,max6639.yaml92
-rw-r--r--Documentation/devicetree/bindings/hwmon/ti,ina2xx.yaml9
-rw-r--r--Documentation/devicetree/bindings/hwmon/ti,tmp108.yaml12
-rw-r--r--Documentation/devicetree/bindings/i2c/atmel,at91sam-i2c.yaml2
-rw-r--r--Documentation/devicetree/bindings/i2c/google,cros-ec-i2c-tunnel.yaml2
-rw-r--r--Documentation/devicetree/bindings/iio/dac/adi,ad3552r.yaml2
-rw-r--r--Documentation/devicetree/bindings/input/allwinner,sun4i-a10-lradc-keys.yaml4
-rw-r--r--Documentation/devicetree/bindings/input/cirrus,cs40l50.yaml68
-rw-r--r--Documentation/devicetree/bindings/input/elan,ekth6915.yaml19
-rw-r--r--Documentation/devicetree/bindings/input/ilitek,ili2901.yaml66
-rw-r--r--Documentation/devicetree/bindings/interconnect/qcom,msm8998-bwmon.yaml3
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/fsl,ls-extirq.yaml1
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/marvell,armada-370-xp-mpic.txt38
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/marvell,mpic.yaml63
-rw-r--r--Documentation/devicetree/bindings/leds/backlight/ti,lm3509.yaml136
-rw-r--r--Documentation/devicetree/bindings/leds/leds-lp55xx.yaml11
-rw-r--r--Documentation/devicetree/bindings/leds/silergy,sy7802.yaml100
-rw-r--r--Documentation/devicetree/bindings/memory-controllers/fsl/fsl,ifc.yaml2
-rw-r--r--Documentation/devicetree/bindings/mfd/marvell,88pm886-a1.yaml76
-rw-r--r--Documentation/devicetree/bindings/mfd/mediatek,mt8195-scpsys.yaml2
-rw-r--r--Documentation/devicetree/bindings/mfd/mfd.txt15
-rw-r--r--Documentation/devicetree/bindings/mfd/qcom,pm8008.yaml144
-rw-r--r--Documentation/devicetree/bindings/mfd/qcom,spmi-pmic.yaml2
-rw-r--r--Documentation/devicetree/bindings/mfd/rockchip,rk809.yaml288
-rw-r--r--Documentation/devicetree/bindings/mfd/rockchip,rk817.yaml70
-rw-r--r--Documentation/devicetree/bindings/mfd/rohm,bd96801-pmic.yaml173
-rw-r--r--Documentation/devicetree/bindings/mfd/syscon-common.yaml71
-rw-r--r--Documentation/devicetree/bindings/mfd/syscon.yaml278
-rw-r--r--Documentation/devicetree/bindings/mfd/ti,twl.yaml167
-rw-r--r--Documentation/devicetree/bindings/mips/mscc.txt17
-rw-r--r--Documentation/devicetree/bindings/mmc/amlogic,meson-gx-mmc.yaml3
-rw-r--r--Documentation/devicetree/bindings/mmc/brcm,sdhci-brcmstb.yaml1
-rw-r--r--Documentation/devicetree/bindings/mmc/fsl,esdhc.yaml105
-rw-r--r--Documentation/devicetree/bindings/mmc/fsl-esdhc.txt52
-rw-r--r--Documentation/devicetree/bindings/mmc/mmc-spi-slot.yaml16
-rw-r--r--Documentation/devicetree/bindings/mmc/sdhci-msm.yaml1
-rw-r--r--Documentation/devicetree/bindings/mmc/sdhci-sprd.txt67
-rw-r--r--Documentation/devicetree/bindings/mmc/sprd,sdhci-r11.yaml112
-rw-r--r--Documentation/devicetree/bindings/mtd/atmel-nand.txt9
-rw-r--r--Documentation/devicetree/bindings/net/airoha,en7581-eth.yaml143
-rw-r--r--Documentation/devicetree/bindings/net/arc_emac.txt46
-rw-r--r--Documentation/devicetree/bindings/net/bluetooth/mediatek,mt7622-bluetooth.yaml51
-rw-r--r--Documentation/devicetree/bindings/net/bluetooth/nxp,88w8987-bt.yaml4
-rw-r--r--Documentation/devicetree/bindings/net/bluetooth/qualcomm-bluetooth.yaml35
-rw-r--r--Documentation/devicetree/bindings/net/can/xilinx,can.yaml2
-rw-r--r--Documentation/devicetree/bindings/net/cdns,macb.yaml1
-rw-r--r--Documentation/devicetree/bindings/net/dsa/lantiq,gswip.yaml202
-rw-r--r--Documentation/devicetree/bindings/net/dsa/lantiq-gswip.txt146
-rw-r--r--Documentation/devicetree/bindings/net/dsa/mediatek,mt7530.yaml6
-rw-r--r--Documentation/devicetree/bindings/net/dsa/vitesse,vsc73xx.txt129
-rw-r--r--Documentation/devicetree/bindings/net/dsa/vitesse,vsc73xx.yaml162
-rw-r--r--Documentation/devicetree/bindings/net/ethernet-controller.yaml1
-rw-r--r--Documentation/devicetree/bindings/net/ethernet-phy.yaml8
-rw-r--r--Documentation/devicetree/bindings/net/fsl,enetc-ierb.yaml38
-rw-r--r--Documentation/devicetree/bindings/net/fsl,enetc-mdio.yaml57
-rw-r--r--Documentation/devicetree/bindings/net/fsl,enetc.yaml66
-rw-r--r--Documentation/devicetree/bindings/net/fsl,fman-dtsec.yaml1
-rw-r--r--Documentation/devicetree/bindings/net/fsl,fman-mdio.yaml123
-rw-r--r--Documentation/devicetree/bindings/net/fsl,fman-muram.yaml40
-rw-r--r--Documentation/devicetree/bindings/net/fsl,fman-port.yaml75
-rw-r--r--Documentation/devicetree/bindings/net/fsl,fman.yaml210
-rw-r--r--Documentation/devicetree/bindings/net/fsl-enetc.txt119
-rw-r--r--Documentation/devicetree/bindings/net/fsl-fman.txt548
-rw-r--r--Documentation/devicetree/bindings/net/fsl-tsec-phy.txt2
-rw-r--r--Documentation/devicetree/bindings/net/hisilicon-hip04-net.txt10
-rw-r--r--Documentation/devicetree/bindings/net/mediatek,net.yaml28
-rw-r--r--Documentation/devicetree/bindings/net/mediatek-bluetooth.txt36
-rw-r--r--Documentation/devicetree/bindings/net/mscc,miim.yaml10
-rw-r--r--Documentation/devicetree/bindings/net/pcs/snps,dw-xpcs.yaml136
-rw-r--r--Documentation/devicetree/bindings/net/pse-pd/microchip,pd692x0.yaml11
-rw-r--r--Documentation/devicetree/bindings/net/pse-pd/ti,tps23881.yaml18
-rw-r--r--Documentation/devicetree/bindings/net/realtek,rtl82xx.yaml40
-rw-r--r--Documentation/devicetree/bindings/net/snps,dwmac.yaml148
-rw-r--r--Documentation/devicetree/bindings/net/stm32-dwmac.yaml49
-rw-r--r--Documentation/devicetree/bindings/net/ti,icss-iep.yaml9
-rw-r--r--Documentation/devicetree/bindings/net/ti,icssg-prueth.yaml9
-rw-r--r--Documentation/devicetree/bindings/net/wireless/qcom,ath10k.yaml5
-rw-r--r--Documentation/devicetree/bindings/net/wireless/qcom,ath11k-pci.yaml46
-rw-r--r--Documentation/devicetree/bindings/net/wireless/qcom,ath11k.yaml9
-rw-r--r--Documentation/devicetree/bindings/net/wireless/qcom,ath12k.yaml99
-rw-r--r--Documentation/devicetree/bindings/net/xlnx,gmii-to-rgmii.yaml5
-rw-r--r--Documentation/devicetree/bindings/perf/fsl-imx-ddr.yaml3
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.yaml3
-rw-r--r--Documentation/devicetree/bindings/platform/lenovo,yoga-c630-ec.yaml83
-rw-r--r--Documentation/devicetree/bindings/power/amlogic,meson-sec-pwrc.yaml2
-rw-r--r--Documentation/devicetree/bindings/ptp/fsl,ptp.yaml144
-rw-r--r--Documentation/devicetree/bindings/ptp/ptp-qoriq.txt87
-rw-r--r--Documentation/devicetree/bindings/pwm/adi,axi-pwmgen.yaml48
-rw-r--r--Documentation/devicetree/bindings/pwm/atmel,at91sam-pwm.yaml4
-rw-r--r--Documentation/devicetree/bindings/pwm/fsl,vf610-ftm-pwm.yaml92
-rw-r--r--Documentation/devicetree/bindings/pwm/imx-pwm.yaml1
-rw-r--r--Documentation/devicetree/bindings/pwm/pwm-fsl-ftm.txt55
-rw-r--r--Documentation/devicetree/bindings/pwm/pwm-gpio.yaml46
-rw-r--r--Documentation/devicetree/bindings/pwm/pwm.yaml6
-rw-r--r--Documentation/devicetree/bindings/regulator/mediatek,mt6873-dvfsrc-regulator.yaml43
-rw-r--r--Documentation/devicetree/bindings/regulator/mt6315-regulator.yaml6
-rw-r--r--Documentation/devicetree/bindings/regulator/nxp,pca9450-regulator.yaml1
-rw-r--r--Documentation/devicetree/bindings/regulator/qcom,qca6390-pmu.yaml185
-rw-r--r--Documentation/devicetree/bindings/regulator/richtek,rtq2208.yaml11
-rw-r--r--Documentation/devicetree/bindings/regulator/rohm,bd96801-regulator.yaml63
-rw-r--r--Documentation/devicetree/bindings/regulator/sprd,sc2731-regulator.txt43
-rw-r--r--Documentation/devicetree/bindings/regulator/sprd,sc2731-regulator.yaml67
-rw-r--r--Documentation/devicetree/bindings/regulator/st,stm32mp1-pwr-reg.yaml7
-rw-r--r--Documentation/devicetree/bindings/regulator/ti,tps65132.yaml3
-rw-r--r--Documentation/devicetree/bindings/regulator/twl-regulator.txt80
-rw-r--r--Documentation/devicetree/bindings/reset/renesas,rzg2l-usbphy-ctrl.yaml10
-rw-r--r--Documentation/devicetree/bindings/reset/ti,sci-reset.yaml2
-rw-r--r--Documentation/devicetree/bindings/riscv/cpus.yaml1
-rw-r--r--Documentation/devicetree/bindings/riscv/microchip.yaml1
-rw-r--r--Documentation/devicetree/bindings/riscv/starfive.yaml1
-rw-r--r--Documentation/devicetree/bindings/serial/mrvl,pxa-ssp.txt64
-rw-r--r--Documentation/devicetree/bindings/soc/fsl/fsl,layerscape-dcfg.yaml1
-rw-r--r--Documentation/devicetree/bindings/soc/fsl/fsl,layerscape-scfg.yaml1
-rw-r--r--Documentation/devicetree/bindings/soc/hisilicon/hisilicon,hi3660-usb3-otg-bc.yaml46
-rw-r--r--Documentation/devicetree/bindings/soc/intel/intel,lgm-syscon.yaml57
-rw-r--r--Documentation/devicetree/bindings/soc/mediatek/mediatek,mutex.yaml1
-rw-r--r--Documentation/devicetree/bindings/soc/microchip/microchip,sparx5-cpu-syscon.yaml49
-rw-r--r--Documentation/devicetree/bindings/soc/qcom/qcom,aoss-qmp.yaml1
-rw-r--r--Documentation/devicetree/bindings/soc/qcom/qcom,smp2p.yaml3
-rw-r--r--Documentation/devicetree/bindings/soc/qcom/qcom,smsm.yaml30
-rw-r--r--Documentation/devicetree/bindings/soc/sprd/sprd,sc9863a-glbregs.yaml55
-rw-r--r--Documentation/devicetree/bindings/soc/sti/st,sti-syscon.yaml9
-rw-r--r--Documentation/devicetree/bindings/soc/ti/sci-pm-domain.yaml2
-rw-r--r--Documentation/devicetree/bindings/soc/ti/ti,am654-serdes-ctrl.yaml42
-rw-r--r--Documentation/devicetree/bindings/soc/ti/ti,j721e-system-controller.yaml (renamed from Documentation/devicetree/bindings/mfd/ti,j721e-system-controller.yaml)4
-rw-r--r--Documentation/devicetree/bindings/spi/amlogic,a1-spifc.yaml3
-rw-r--r--Documentation/devicetree/bindings/spi/atmel,at91rm9200-spi.yaml8
-rw-r--r--Documentation/devicetree/bindings/spi/brcm,bcm2835-spi.txt23
-rw-r--r--Documentation/devicetree/bindings/spi/brcm,bcm2835-spi.yaml50
-rw-r--r--Documentation/devicetree/bindings/spi/fsl,dspi-peripheral-props.yaml30
-rw-r--r--Documentation/devicetree/bindings/spi/fsl,dspi.yaml116
-rw-r--r--Documentation/devicetree/bindings/spi/ibm,spi-fsi.yaml55
-rw-r--r--Documentation/devicetree/bindings/spi/marvell,mmp2-ssp.yaml35
-rw-r--r--Documentation/devicetree/bindings/spi/microchip,mpfs-spi.yaml29
-rw-r--r--Documentation/devicetree/bindings/spi/snps,dw-apb-ssi.yaml4
-rw-r--r--Documentation/devicetree/bindings/spi/spi-cadence.yaml7
-rw-r--r--Documentation/devicetree/bindings/spi/spi-fsl-dspi.txt65
-rw-r--r--Documentation/devicetree/bindings/spi/spi-peripheral-props.yaml1
-rw-r--r--Documentation/devicetree/bindings/sram/allwinner,sun4i-a10-system-control.yaml28
-rw-r--r--Documentation/devicetree/bindings/sram/qcom,imem.yaml1
-rw-r--r--Documentation/devicetree/bindings/thermal/allwinner,sun8i-a83t-ths.yaml6
-rw-r--r--Documentation/devicetree/bindings/thermal/amlogic,thermal.yaml22
-rw-r--r--Documentation/devicetree/bindings/thermal/brcm,avs-ro-thermal.yaml24
-rw-r--r--Documentation/devicetree/bindings/thermal/brcm,avs-tmon.yaml17
-rw-r--r--Documentation/devicetree/bindings/thermal/brcm,bcm2835-thermal.yaml1
-rw-r--r--Documentation/devicetree/bindings/thermal/fsl,scu-thermal.yaml1
-rw-r--r--Documentation/devicetree/bindings/thermal/generic-adc-thermal.yaml5
-rw-r--r--Documentation/devicetree/bindings/thermal/hisilicon,tsensor.yaml57
-rw-r--r--Documentation/devicetree/bindings/thermal/hisilicon-thermal.txt32
-rw-r--r--Documentation/devicetree/bindings/thermal/imx8mm-thermal.yaml5
-rw-r--r--Documentation/devicetree/bindings/thermal/loongson,ls2k-thermal.yaml1
-rw-r--r--Documentation/devicetree/bindings/thermal/mediatek,lvts-thermal.yaml1
-rw-r--r--Documentation/devicetree/bindings/thermal/nvidia,tegra124-soctherm.yaml1
-rw-r--r--Documentation/devicetree/bindings/thermal/nvidia,tegra186-bpmp-thermal.yaml12
-rw-r--r--Documentation/devicetree/bindings/thermal/nvidia,tegra30-tsensor.yaml9
-rw-r--r--Documentation/devicetree/bindings/thermal/qcom,spmi-temp-alarm.yaml1
-rw-r--r--Documentation/devicetree/bindings/thermal/qcom-spmi-adc-tm-hc.yaml8
-rw-r--r--Documentation/devicetree/bindings/thermal/qcom-spmi-adc-tm5.yaml8
-rw-r--r--Documentation/devicetree/bindings/thermal/qcom-tsens.yaml97
-rw-r--r--Documentation/devicetree/bindings/thermal/qoriq-thermal.yaml5
-rw-r--r--Documentation/devicetree/bindings/thermal/rcar-gen3-thermal.yaml71
-rw-r--r--Documentation/devicetree/bindings/thermal/rcar-thermal.yaml64
-rw-r--r--Documentation/devicetree/bindings/thermal/rockchip-thermal.yaml5
-rw-r--r--Documentation/devicetree/bindings/thermal/rzg2l-thermal.yaml43
-rw-r--r--Documentation/devicetree/bindings/thermal/samsung,exynos-thermal.yaml3
-rw-r--r--Documentation/devicetree/bindings/thermal/socionext,uniphier-thermal.yaml5
-rw-r--r--Documentation/devicetree/bindings/thermal/sprd-thermal.yaml49
-rw-r--r--Documentation/devicetree/bindings/thermal/st,stm32-thermal.yaml5
-rw-r--r--Documentation/devicetree/bindings/thermal/thermal-zones.yaml6
-rw-r--r--Documentation/devicetree/bindings/thermal/ti,am654-thermal.yaml15
-rw-r--r--Documentation/devicetree/bindings/thermal/ti,j72xx-thermal.yaml5
-rw-r--r--Documentation/devicetree/bindings/timer/realtek,otto-timer.yaml63
-rw-r--r--Documentation/devicetree/bindings/timer/renesas,tmu.yaml12
-rw-r--r--Documentation/devicetree/bindings/timer/sifive,clint.yaml1
-rw-r--r--Documentation/devicetree/bindings/trivial-devices.yaml10
-rw-r--r--Documentation/devicetree/bindings/usb/realtek,rts5411.yaml1
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.yaml10
-rw-r--r--Documentation/driver-api/cxl/memory-devices.rst15
-rw-r--r--Documentation/driver-api/driver-model/devres.rst3
-rw-r--r--Documentation/driver-api/gpio/board.rst6
-rw-r--r--Documentation/driver-api/gpio/consumer.rst4
-rw-r--r--Documentation/driver-api/gpio/driver.rst5
-rw-r--r--Documentation/driver-api/gpio/drivers-on-gpio.rst7
-rw-r--r--Documentation/driver-api/gpio/index.rst1
-rw-r--r--Documentation/driver-api/gpio/intro.rst12
-rw-r--r--Documentation/driver-api/gpio/legacy.rst679
-rw-r--r--Documentation/filesystems/gfs2-glocks.rst55
-rw-r--r--Documentation/filesystems/index.rst1
-rw-r--r--Documentation/filesystems/iomap/design.rst441
-rw-r--r--Documentation/filesystems/iomap/index.rst13
-rw-r--r--Documentation/filesystems/iomap/operations.rst713
-rw-r--r--Documentation/filesystems/iomap/porting.rst120
-rw-r--r--Documentation/filesystems/mount_api.rst9
-rw-r--r--Documentation/filesystems/proc.rst1
-rw-r--r--Documentation/hid/hid-bpf.rst173
-rw-r--r--Documentation/hwmon/adm1021.rst153
-rw-r--r--Documentation/hwmon/amc6821.rst7
-rw-r--r--Documentation/hwmon/asus_ec_sensors.rst1
-rw-r--r--Documentation/hwmon/corsair-cpro.rst8
-rw-r--r--Documentation/hwmon/corsair-psu.rst6
-rw-r--r--Documentation/hwmon/cros_ec_hwmon.rst26
-rw-r--r--Documentation/hwmon/dell-smm-hwmon.rst2
-rw-r--r--Documentation/hwmon/index.rst8
-rw-r--r--Documentation/hwmon/max31827.rst13
-rw-r--r--Documentation/hwmon/max6642.rst27
-rw-r--r--Documentation/hwmon/mp2891.rst179
-rw-r--r--Documentation/hwmon/mp2993.rst150
-rw-r--r--Documentation/hwmon/mp5920.rst91
-rw-r--r--Documentation/hwmon/mp9941.rst92
-rw-r--r--Documentation/hwmon/spd5118.rst63
-rw-r--r--Documentation/i2c/i2c_bus.svg15
-rw-r--r--Documentation/i2c/summary.rst79
-rw-r--r--Documentation/kbuild/kconfig-language.rst12
-rw-r--r--Documentation/kbuild/modules.rst8
-rw-r--r--Documentation/leds/leds-blinkm.rst2
-rw-r--r--Documentation/netlink/specs/dpll.yaml1
-rw-r--r--Documentation/netlink/specs/ethtool.yaml151
-rw-r--r--Documentation/netlink/specs/netdev.yaml4
-rw-r--r--Documentation/netlink/specs/nfsd.yaml29
-rw-r--r--Documentation/netlink/specs/ovs_flow.yaml17
-rw-r--r--Documentation/netlink/specs/tc.yaml26
-rw-r--r--Documentation/netlink/specs/tcp_metrics.yaml169
-rw-r--r--Documentation/networking/af_xdp.rst33
-rw-r--r--Documentation/networking/device_drivers/ethernet/mellanox/mlx5/counters.rst24
-rw-r--r--Documentation/networking/devlink/devlink-region.rst2
-rw-r--r--Documentation/networking/devlink/ice.rst25
-rw-r--r--Documentation/networking/devlink/octeontx2.rst16
-rw-r--r--Documentation/networking/ethtool-netlink.rst165
-rw-r--r--Documentation/networking/index.rst3
-rw-r--r--Documentation/networking/ip-sysctl.rst27
-rw-r--r--Documentation/networking/iso15765-2.rst386
-rw-r--r--Documentation/networking/mptcp-sysctl.rst70
-rw-r--r--Documentation/networking/mptcp.rst156
-rw-r--r--Documentation/networking/net_dim.rst42
-rw-r--r--Documentation/networking/phy.rst6
-rw-r--r--Documentation/networking/sriov.rst25
-rw-r--r--Documentation/networking/tcp_ao.rst9
-rw-r--r--Documentation/power/regulator/consumer.rst6
-rw-r--r--Documentation/process/maintainer-netdev.rst2
-rw-r--r--Documentation/translations/zh_CN/driver-api/gpio/index.rst2
-rw-r--r--Documentation/translations/zh_CN/driver-api/gpio/legacy.rst618
-rw-r--r--Documentation/translations/zh_TW/gpio.txt574
-rw-r--r--Documentation/userspace-api/gpio/gpio-handle-get-line-values-ioctl.rst7
-rw-r--r--Documentation/userspace-api/gpio/gpio-handle-set-config-ioctl.rst5
-rw-r--r--Documentation/userspace-api/gpio/gpio-handle-set-line-values-ioctl.rst7
-rw-r--r--Documentation/userspace-api/gpio/gpio-lineevent-data-read.rst5
-rw-r--r--Documentation/userspace-api/gpio/gpio-v2-line-event-read.rst5
-rw-r--r--Documentation/userspace-api/gpio/gpio-v2-line-get-values-ioctl.rst7
-rw-r--r--Documentation/userspace-api/gpio/gpio-v2-line-set-config-ioctl.rst7
-rw-r--r--Documentation/userspace-api/gpio/gpio-v2-line-set-values-ioctl.rst7
-rw-r--r--Documentation/userspace-api/gpio/sysfs.rst7
-rw-r--r--Documentation/userspace-api/index.rst1
-rw-r--r--Documentation/userspace-api/ioctl/ioctl-number.rst1
-rw-r--r--Documentation/userspace-api/media/v4l/dev-subdev.rst2
-rw-r--r--Documentation/userspace-api/mfd_noexec.rst86
-rw-r--r--Documentation/virt/coco/sev-guest.rst11
-rw-r--r--Documentation/virt/hyperv/clocks.rst21
-rw-r--r--Documentation/virt/hyperv/overview.rst22
-rw-r--r--Documentation/virt/hyperv/vmbus.rst143
-rw-r--r--MAINTAINERS378
-rw-r--r--Makefile4
-rw-r--r--arch/arc/include/asm/Kbuild2
-rw-r--r--arch/arc/include/asm/unistd.h14
-rw-r--r--arch/arc/include/uapi/asm/Kbuild2
-rw-r--r--arch/arc/include/uapi/asm/unistd.h44
-rw-r--r--arch/arc/kernel/Makefile.syscalls3
-rw-r--r--arch/arc/kernel/sys.c5
-rw-r--r--arch/arc/net/bpf_jit.h2
-rw-r--r--arch/arc/net/bpf_jit_arcv2.c10
-rw-r--r--arch/arc/net/bpf_jit_core.c22
-rw-r--r--arch/arm/Kconfig1
-rw-r--r--arch/arm/boot/dts/allwinner/Makefile62
-rw-r--r--arch/arm/boot/dts/arm/arm-realview-eb-bbrevd.dtsi2
-rw-r--r--arch/arm/boot/dts/arm/arm-realview-eb.dtsi48
-rw-r--r--arch/arm/boot/dts/arm/arm-realview-pb1176.dts38
-rw-r--r--arch/arm/boot/dts/arm/arm-realview-pb11mp.dts48
-rw-r--r--arch/arm/boot/dts/arm/arm-realview-pbx.dtsi48
-rw-r--r--arch/arm/boot/dts/arm/integratorap-im-pd1.dts4
-rw-r--r--arch/arm/boot/dts/arm/integratorap.dts14
-rw-r--r--arch/arm/boot/dts/arm/integratorcp.dts14
-rw-r--r--arch/arm/boot/dts/arm/mps2.dtsi48
-rw-r--r--arch/arm/boot/dts/arm/versatile-ab.dts8
-rw-r--r--arch/arm/boot/dts/arm/vexpress-v2m-rs1.dtsi8
-rw-r--r--arch/arm/boot/dts/arm/vexpress-v2m.dtsi16
-rw-r--r--arch/arm/boot/dts/arm/vexpress-v2p-ca15-tc1.dts14
-rw-r--r--arch/arm/boot/dts/arm/vexpress-v2p-ca15_a7.dts22
-rw-r--r--arch/arm/boot/dts/arm/vexpress-v2p-ca5s.dts12
-rw-r--r--arch/arm/boot/dts/arm/vexpress-v2p-ca9.dts18
-rw-r--r--arch/arm/boot/dts/aspeed/aspeed-g4.dtsi28
-rw-r--r--arch/arm/boot/dts/aspeed/aspeed-g5.dtsi28
-rw-r--r--arch/arm/boot/dts/aspeed/aspeed-g6.dtsi32
-rw-r--r--arch/arm/boot/dts/cirrus/ep7211-edb7211.dts2
-rw-r--r--arch/arm/boot/dts/intel/ixp/intel-ixp42x-linksys-nslu2.dts11
-rw-r--r--arch/arm/boot/dts/marvell/armada-370-xp.dtsi1
-rw-r--r--arch/arm/boot/dts/marvell/armada-375.dtsi1
-rw-r--r--arch/arm/boot/dts/marvell/armada-385-atl-x530.dts13
-rw-r--r--arch/arm/boot/dts/marvell/armada-385-turris-omnia.dts35
-rw-r--r--arch/arm/boot/dts/marvell/armada-38x.dtsi1
-rw-r--r--arch/arm/boot/dts/marvell/armada-39x.dtsi1
-rw-r--r--arch/arm/boot/dts/marvell/kirkwood-blackarmor-nas220.dts6
-rw-r--r--arch/arm/boot/dts/marvell/kirkwood-c200-v1.dts8
-rw-r--r--arch/arm/boot/dts/marvell/kirkwood-cloudbox.dts8
-rw-r--r--arch/arm/boot/dts/marvell/kirkwood-d2net.dts2
-rw-r--r--arch/arm/boot/dts/marvell/kirkwood-dir665.dts22
-rw-r--r--arch/arm/boot/dts/marvell/kirkwood-dns320.dts10
-rw-r--r--arch/arm/boot/dts/marvell/kirkwood-dns325.dts10
-rw-r--r--arch/arm/boot/dts/marvell/kirkwood-dnskw.dtsi8
-rw-r--r--arch/arm/boot/dts/marvell/kirkwood-dockstar.dts4
-rw-r--r--arch/arm/boot/dts/marvell/kirkwood-dreamplug.dts6
-rw-r--r--arch/arm/boot/dts/marvell/kirkwood-goflexnet.dts20
-rw-r--r--arch/arm/boot/dts/marvell/kirkwood-guruplug-server-plus.dts8
-rw-r--r--arch/arm/boot/dts/marvell/kirkwood-ib62x0.dts12
-rw-r--r--arch/arm/boot/dts/marvell/kirkwood-iconnect.dts20
-rw-r--r--arch/arm/boot/dts/marvell/kirkwood-iomega_ix2_200.dts16
-rw-r--r--arch/arm/boot/dts/marvell/kirkwood-l-50.dts20
-rw-r--r--arch/arm/boot/dts/marvell/kirkwood-laplug.dts6
-rw-r--r--arch/arm/boot/dts/marvell/kirkwood-linkstation.dtsi2
-rw-r--r--arch/arm/boot/dts/marvell/kirkwood-linksys-viper.dts10
-rw-r--r--arch/arm/boot/dts/marvell/kirkwood-lsxl.dtsi18
-rw-r--r--arch/arm/boot/dts/marvell/kirkwood-mplcec4.dts12
-rw-r--r--arch/arm/boot/dts/marvell/kirkwood-mv88f6281gtw-ge.dts12
-rw-r--r--arch/arm/boot/dts/marvell/kirkwood-netxbig.dtsi8
-rw-r--r--arch/arm/boot/dts/marvell/kirkwood-ns2-common.dtsi6
-rw-r--r--arch/arm/boot/dts/marvell/kirkwood-ns2lite.dts2
-rw-r--r--arch/arm/boot/dts/marvell/kirkwood-nsa310.dts20
-rw-r--r--arch/arm/boot/dts/marvell/kirkwood-nsa310a.dts18
-rw-r--r--arch/arm/boot/dts/marvell/kirkwood-nsa310s.dts8
-rw-r--r--arch/arm/boot/dts/marvell/kirkwood-nsa320.dts18
-rw-r--r--arch/arm/boot/dts/marvell/kirkwood-nsa325.dts18
-rw-r--r--arch/arm/boot/dts/marvell/kirkwood-nsa3x0-common.dtsi8
-rw-r--r--arch/arm/boot/dts/marvell/kirkwood-openblocks_a6.dts4
-rw-r--r--arch/arm/boot/dts/marvell/kirkwood-openblocks_a7.dts2
-rw-r--r--arch/arm/boot/dts/marvell/kirkwood-pogo_e02.dts4
-rw-r--r--arch/arm/boot/dts/marvell/kirkwood-pogoplug-series-4.dts8
-rw-r--r--arch/arm/boot/dts/marvell/kirkwood-sheevaplug-esata.dts2
-rw-r--r--arch/arm/boot/dts/marvell/kirkwood-sheevaplug.dts4
-rw-r--r--arch/arm/boot/dts/marvell/kirkwood-synology.dtsi58
-rw-r--r--arch/arm/boot/dts/marvell/kirkwood-t5325.dts4
-rw-r--r--arch/arm/boot/dts/marvell/kirkwood-ts219-6281.dts6
-rw-r--r--arch/arm/boot/dts/marvell/kirkwood-ts219-6282.dts6
-rw-r--r--arch/arm/boot/dts/marvell/kirkwood-ts419.dtsi6
-rw-r--r--arch/arm/boot/dts/marvell/mvebu-linkstation-gpio-simple.dtsi2
-rw-r--r--arch/arm/boot/dts/marvell/orion5x-lacie-d2-network.dts9
-rw-r--r--arch/arm/boot/dts/marvell/orion5x-lacie-ethernet-disk-mini-v2.dts7
-rw-r--r--arch/arm/boot/dts/marvell/orion5x-linkstation-lschl.dts4
-rw-r--r--arch/arm/boot/dts/marvell/orion5x-lswsgl.dts25
-rw-r--r--arch/arm/boot/dts/marvell/orion5x-maxtor-shared-storage-2.dts7
-rw-r--r--arch/arm/boot/dts/marvell/orion5x-netgear-wnr854t.dts2
-rw-r--r--arch/arm/boot/dts/marvell/orion5x-rd88f5182-nas.dts2
-rw-r--r--arch/arm/boot/dts/mediatek/mt2701-evb.dts2
-rw-r--r--arch/arm/boot/dts/mediatek/mt7623.dtsi18
-rw-r--r--arch/arm/boot/dts/nspire/nspire-classic.dtsi2
-rw-r--r--arch/arm/boot/dts/nspire/nspire-cx.dts2
-rw-r--r--arch/arm/boot/dts/nspire/nspire.dtsi5
-rw-r--r--arch/arm/boot/dts/nuvoton/nuvoton-npcm730-kudo.dts22
-rw-r--r--arch/arm/boot/dts/nuvoton/nuvoton-npcm750-runbmc-olympus.dts22
-rw-r--r--arch/arm/boot/dts/nxp/imx/Makefile2
-rw-r--r--arch/arm/boot/dts/nxp/imx/e60k02.dtsi4
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx51-apf51dev.dts4
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx51-babbage.dts2
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx51-ts4800.dts4
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx53-m53evk.dts4
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx53-m53menlo.dts1
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx53-qsb-common.dtsi2
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx53-qsb-hdmi.dtso6
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx53-tx53-x03x.dts14
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx53-tx53-x13x.dts6
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6dl-aristainetos2_4.dts5
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6dl-aristainetos_4.dts4
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6dl-aristainetos_7.dts4
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6dl-kontron-samx6i-ads2.dts12
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6dl-kontron-samx6i.dtsi2
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6q-kontron-samx6i-ads2.dts12
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6q-kontron-samx6i.dtsi25
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6qdl-gw52xx.dtsi2
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6qdl-gw53xx.dtsi2
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6qdl-gw54xx.dtsi2
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6qdl-gw560x.dtsi2
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6qdl-gw5903.dtsi2
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6qdl-gw5904.dtsi2
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6qdl-kontron-samx6i-ads2.dtsi148
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6qdl-kontron-samx6i.dtsi58
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6qdl-mba6a.dtsi12
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6qdl-mba6b.dtsi12
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6qdl-sabreauto.dtsi2
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6qdl-tx6-lcd.dtsi21
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6qdl-tx6-lvds.dtsi20
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6ul-tx6ul.dtsi14
-rw-r--r--arch/arm/boot/dts/nxp/mxs/imx28-tx28.dts6
-rw-r--r--arch/arm/boot/dts/qcom/Makefile5
-rw-r--r--arch/arm/boot/dts/qcom/msm8226-motorola-falcon.dts53
-rw-r--r--arch/arm/boot/dts/qcom/qcom-apq8026-samsung-milletwifi.dts573
-rw-r--r--arch/arm/boot/dts/qcom/qcom-apq8064.dtsi7
-rw-r--r--arch/arm/boot/dts/qcom/qcom-apq8084.dtsi2
-rw-r--r--arch/arm/boot/dts/qcom/qcom-ipq4019.dtsi1
-rw-r--r--arch/arm/boot/dts/qcom/qcom-ipq8064.dtsi3
-rw-r--r--arch/arm/boot/dts/qcom/qcom-mdm9615.dtsi1
-rw-r--r--arch/arm/boot/dts/qcom/qcom-msm8226-microsoft-common.dtsi4
-rw-r--r--arch/arm/boot/dts/qcom/qcom-msm8226-samsung-ms013g.dts386
-rw-r--r--arch/arm/boot/dts/qcom/qcom-msm8226.dtsi4
-rw-r--r--arch/arm/boot/dts/qcom/qcom-msm8660.dtsi1
-rw-r--r--arch/arm/boot/dts/qcom/qcom-msm8926-motorola-peregrine.dts121
-rw-r--r--arch/arm/boot/dts/qcom/qcom-msm8960.dtsi5
-rw-r--r--arch/arm/boot/dts/qcom/qcom-msm8974-lge-nexus5-hammerhead.dts6
-rw-r--r--arch/arm/boot/dts/qcom/qcom-msm8974-samsung-hlte.dts401
-rw-r--r--arch/arm/boot/dts/qcom/qcom-msm8974.dtsi28
-rw-r--r--arch/arm/boot/dts/qcom/qcom-msm8974pro-htc-m8.dts353
-rw-r--r--arch/arm/boot/dts/qcom/qcom-msm8974pro-sony-xperia-shinano-aries.dts44
-rw-r--r--arch/arm/boot/dts/qcom/qcom-msm8974pro-sony-xperia-shinano-common.dtsi2
-rw-r--r--arch/arm/boot/dts/renesas/r8a73a4.dtsi1
-rw-r--r--arch/arm/boot/dts/renesas/r8a7742.dtsi1
-rw-r--r--arch/arm/boot/dts/renesas/r8a7743.dtsi1
-rw-r--r--arch/arm/boot/dts/renesas/r8a7744.dtsi1
-rw-r--r--arch/arm/boot/dts/renesas/r8a7745.dtsi1
-rw-r--r--arch/arm/boot/dts/renesas/r8a77470.dtsi1
-rw-r--r--arch/arm/boot/dts/renesas/r8a7790.dtsi1
-rw-r--r--arch/arm/boot/dts/renesas/r8a7791.dtsi1
-rw-r--r--arch/arm/boot/dts/renesas/r8a7792.dtsi1
-rw-r--r--arch/arm/boot/dts/renesas/r8a7793.dtsi1
-rw-r--r--arch/arm/boot/dts/renesas/r8a7794.dtsi1
-rw-r--r--arch/arm/boot/dts/renesas/r9a06g032.dtsi19
-rw-r--r--arch/arm/boot/dts/rockchip/rk3036.dtsi1
-rw-r--r--arch/arm/boot/dts/rockchip/rk3066a-mk808.dts8
-rw-r--r--arch/arm/boot/dts/rockchip/rk3066a.dtsi21
-rw-r--r--arch/arm/boot/dts/rockchip/rk3128.dtsi128
-rw-r--r--arch/arm/boot/dts/rockchip/rk3xxx.dtsi7
-rw-r--r--arch/arm/boot/dts/rockchip/rv1126-edgeble-neu2-io.dts3
-rw-r--r--arch/arm/boot/dts/st/Makefile1
-rw-r--r--arch/arm/boot/dts/st/stih407-family.dtsi6
-rw-r--r--arch/arm/boot/dts/st/stih410.dtsi1
-rw-r--r--arch/arm/boot/dts/st/stih418.dtsi42
-rw-r--r--arch/arm/boot/dts/st/stm32f429.dtsi1
-rw-r--r--arch/arm/boot/dts/st/stm32mp13-pinctrl.dtsi697
-rw-r--r--arch/arm/boot/dts/st/stm32mp131.dtsi38
-rw-r--r--arch/arm/boot/dts/st/stm32mp133.dtsi31
-rw-r--r--arch/arm/boot/dts/st/stm32mp135f-dhcor-dhsbc.dts377
-rw-r--r--arch/arm/boot/dts/st/stm32mp135f-dk.dts128
-rw-r--r--arch/arm/boot/dts/st/stm32mp13xx-dhcor-som.dtsi308
-rw-r--r--arch/arm/boot/dts/st/stm32mp151.dtsi1
-rw-r--r--arch/arm/boot/dts/st/stm32mp157a-dk1-scmi.dts5
-rw-r--r--arch/arm/boot/dts/st/stm32mp157c-dk2-scmi.dts5
-rw-r--r--arch/arm/boot/dts/st/stm32mp157c-ed1-scmi.dts5
-rw-r--r--arch/arm/boot/dts/st/stm32mp157c-ev1-scmi.dts5
-rw-r--r--arch/arm/boot/dts/st/stm32mp157c-osd32mp1-red.dts13
-rw-r--r--arch/arm/boot/dts/st/stm32mp15xc-lxa-tac.dtsi13
-rw-r--r--arch/arm/boot/dts/st/stm32mp15xx-osd32.dtsi13
-rw-r--r--arch/arm/boot/dts/ti/davinci/da850-evm.dts2
-rw-r--r--arch/arm/boot/dts/ti/omap/am335x-guardian.dts2
-rw-r--r--arch/arm/boot/dts/ti/omap/am335x-pdu001.dts2
-rw-r--r--arch/arm/boot/dts/ti/omap/am335x-pepper.dts2
-rw-r--r--arch/arm/boot/dts/ti/omap/am5729-beagleboneai.dts1
-rw-r--r--arch/arm/boot/dts/vt8500/vt8500-bv07.dts2
-rw-r--r--arch/arm/boot/dts/vt8500/vt8500.dtsi2
-rw-r--r--arch/arm/boot/dts/vt8500/wm8505-ref.dts2
-rw-r--r--arch/arm/boot/dts/vt8500/wm8505.dtsi2
-rw-r--r--arch/arm/boot/dts/vt8500/wm8650-mid.dts2
-rw-r--r--arch/arm/boot/dts/vt8500/wm8650.dtsi2
-rw-r--r--arch/arm/boot/dts/vt8500/wm8750.dtsi4
-rw-r--r--arch/arm/boot/dts/vt8500/wm8850-w70v2.dts2
-rw-r--r--arch/arm/boot/dts/vt8500/wm8850.dtsi4
-rw-r--r--arch/arm/configs/at91_dt_defconfig1
-rw-r--r--arch/arm/configs/imx_v6_v7_defconfig7
-rw-r--r--arch/arm/configs/multi_v7_defconfig1
-rw-r--r--arch/arm/configs/vexpress_defconfig1
-rw-r--r--arch/arm/include/asm/cmpxchg.h7
-rw-r--r--arch/arm/include/asm/efi.h13
-rw-r--r--arch/arm/include/asm/unistd.h1
-rw-r--r--arch/arm/kernel/Makefile2
-rw-r--r--arch/arm/kernel/ftrace.c17
-rw-r--r--arch/arm/mach-davinci/pm.c2
-rw-r--r--arch/arm/mach-pxa/devices.c55
-rw-r--r--arch/arm/mach-pxa/devices.h5
-rw-r--r--arch/arm/mach-pxa/gumstix.c24
-rw-r--r--arch/arm/mach-pxa/pxa25x.c8
-rw-r--r--arch/arm/mach-pxa/pxa27x.c9
-rw-r--r--arch/arm/mach-pxa/spitz.c284
-rw-r--r--arch/arm/mach-tegra/board-paz00.c50
-rw-r--r--arch/arm/mach-versatile/Kconfig9
-rw-r--r--arch/arm/mach-versatile/Makefile3
-rw-r--r--arch/arm/mach-versatile/dcscb.c173
-rw-r--r--arch/arm/mach-versatile/dcscb_setup.S33
-rw-r--r--arch/arm/mm/fault.c4
-rw-r--r--arch/arm/tools/syscall.tbl1
-rw-r--r--arch/arm/xen/p2m.c2
-rw-r--r--arch/arm64/Kconfig40
-rw-r--r--arch/arm64/Kconfig.platforms3
-rw-r--r--arch/arm64/boot/dts/Makefile1
-rw-r--r--arch/arm64/boot/dts/airoha/Makefile2
-rw-r--r--arch/arm64/boot/dts/airoha/en7581-evb.dts26
-rw-r--r--arch/arm64/boot/dts/airoha/en7581.dtsi154
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-lts.dts2
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-plus.dts2
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts2
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-pinebook.dts2
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-pinetab-early-adopter.dts2
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-pinetab.dts2
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-sopine-baseboard.dts2
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi37
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64-model-b.dts2
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dts2
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi37
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h616-cpu-opp.dtsi25
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h616.dtsi77
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h64-remix-mini-pc.dts2
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h700-anbernic-rg35xx-2024.dts4
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h700-anbernic-rg35xx-h.dts79
-rw-r--r--arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi10
-rw-r--r--arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts2
-rw-r--r--arch/arm64/boot/dts/altera/socfpga_stratix10_socdk_nand.dts2
-rw-r--r--arch/arm64/boot/dts/amlogic/Makefile4
-rw-r--r--arch/arm64/boot/dts/amlogic/amlogic-a4.dtsi10
-rw-r--r--arch/arm64/boot/dts/amlogic/amlogic-c3.dtsi3
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-a1-ad402.dts45
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-a1.dtsi16
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-axg.dtsi24
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi438
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12.dtsi4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12a-u200.dts2
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12b-bananapi.dtsi14
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12b-dreambox-one.dts17
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12b-dreambox-two.dts20
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12b-dreambox.dtsi154
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12b-radxa-zero2.dts24
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts2
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi10
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl-s905x-vero4k.dts199
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl.dtsi10
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxlx-s905l-p271.dts51
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-s4.dtsi199
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-sm1.dtsi44
-rw-r--r--arch/arm64/boot/dts/apm/apm-merlin.dts2
-rw-r--r--arch/arm64/boot/dts/apm/apm-mustang.dts2
-rw-r--r--arch/arm64/boot/dts/arm/corstone1000-fvp.dts2
-rw-r--r--arch/arm64/boot/dts/arm/corstone1000.dtsi6
-rw-r--r--arch/arm64/boot/dts/arm/foundation-v8.dtsi6
-rw-r--r--arch/arm64/boot/dts/arm/juno-base.dtsi1
-rw-r--r--arch/arm64/boot/dts/arm/juno-clocks.dtsi10
-rw-r--r--arch/arm64/boot/dts/arm/juno-motherboard.dtsi13
-rw-r--r--arch/arm64/boot/dts/arm/rtsm_ve-motherboard.dtsi10
-rw-r--r--arch/arm64/boot/dts/arm/vexpress-v2f-1xv7-ca53x2.dts6
-rw-r--r--arch/arm64/boot/dts/exynos/exynos850.dtsi8
-rw-r--r--arch/arm64/boot/dts/exynos/google/gs101-oriole.dts9
-rw-r--r--arch/arm64/boot/dts/exynos/google/gs101.dtsi22
-rw-r--r--arch/arm64/boot/dts/freescale/Makefile15
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi79
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1028a-rdb.dts31
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi55
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1043a-qds.dts2
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb.dts20
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi138
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1046a-qds.dts2
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi49
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1088a-qds.dts2
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi72
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi2
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls2088a.dtsi2
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls208xa-qds.dtsi2
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls208xa-rdb.dtsi2
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi186
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi56
-rw-r--r--arch/arm64/boot/dts/freescale/imx8-ss-audio.dtsi1
-rw-r--r--arch/arm64/boot/dts/freescale/imx8-ss-cm41.dtsi68
-rw-r--r--arch/arm64/boot/dts/freescale/imx8-ss-conn.dtsi69
-rw-r--r--arch/arm64/boot/dts/freescale/imx8dxl-evk.dts277
-rw-r--r--arch/arm64/boot/dts/freescale/imx8dxl-ss-adma.dtsi78
-rw-r--r--arch/arm64/boot/dts/freescale/imx8dxl-ss-conn.dtsi11
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-evk.dtsi2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-iot-gateway.dts218
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-phygate-tauri-l-rs232-rs232.dtso72
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-phygate-tauri-l-rs232-rs485.dtso76
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-phygate-tauri-l-rs232-rts-cts.dtso41
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-phygate-tauri-l.dts10
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-tqma8mqml.dtsi8
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-ucm-som.dtsi679
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-venice-gw700x.dtsi20
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-venice-gw7901.dts2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-venice-gw7902.dts2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-venice-gw7903.dts2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-venice-gw7904.dts2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi23
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mn-tqma8mqnl.dtsi8
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mn-venice-gw7902.dts2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-beacon-kit.dts12
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-beacon-som.dtsi2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-debix-model-a.dts47
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-dhcom-pdk2.dts39
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-dhcom-pdk3.dts39
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-dhcom-som.dtsi15
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-evk-mx8-dlvds-lcd1.dtso77
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-evk.dts91
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-msc-sm2s-ep1.dts27
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-tqma8mpql-mba8mp-ras314.dts906
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-tqma8mpql-mba8mpxl.dts5
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-tqma8mpql.dtsi8
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-venice-gw702x.dtsi20
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-venice-gw73xx.dtsi2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-venice-gw74xx.dts24
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-verdin-dahlia.dtsi37
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-verdin-dev.dtsi37
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-verdin-mallow.dtsi37
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-verdin-nonwifi.dtsi3
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-verdin-wifi.dtsi3
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-verdin-yavia.dtsi37
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-verdin.dtsi18
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp.dtsi120
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mq-librem5.dtsi2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mq-tqma8mq.dtsi8
-rw-r--r--arch/arm64/boot/dts/freescale/imx8qm-mek.dts346
-rw-r--r--arch/arm64/boot/dts/freescale/imx8qm-ss-audio.dtsi473
-rw-r--r--arch/arm64/boot/dts/freescale/imx8qm.dtsi103
-rw-r--r--arch/arm64/boot/dts/freescale/imx8qxp-mek.dts1
-rw-r--r--arch/arm64/boot/dts/freescale/imx93-11x11-evk.dts310
-rw-r--r--arch/arm64/boot/dts/freescale/imx93-9x9-qsb.dts492
-rw-r--r--arch/arm64/boot/dts/freescale/imx93-tqma9352-mba93xxca.dts73
-rw-r--r--arch/arm64/boot/dts/freescale/imx93-tqma9352-mba93xxla.dts61
-rw-r--r--arch/arm64/boot/dts/freescale/imx93-tqma9352.dtsi6
-rw-r--r--arch/arm64/boot/dts/freescale/imx95-19x19-evk.dts289
-rw-r--r--arch/arm64/boot/dts/freescale/imx95-clock.h187
-rw-r--r--arch/arm64/boot/dts/freescale/imx95-pinfunc.h865
-rw-r--r--arch/arm64/boot/dts/freescale/imx95-power.h47
-rw-r--r--arch/arm64/boot/dts/freescale/imx95.dtsi1192
-rw-r--r--arch/arm64/boot/dts/freescale/qoriq-fman3-0-10g-0.dtsi2
-rw-r--r--arch/arm64/boot/dts/freescale/qoriq-fman3-0-10g-1.dtsi2
-rw-r--r--arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-0.dtsi2
-rw-r--r--arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-1.dtsi2
-rw-r--r--arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-2.dtsi2
-rw-r--r--arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-3.dtsi2
-rw-r--r--arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-4.dtsi2
-rw-r--r--arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-5.dtsi2
-rw-r--r--arch/arm64/boot/dts/freescale/qoriq-fman3-0.dtsi4
-rw-r--r--arch/arm64/boot/dts/freescale/tqma8xx.dtsi8
-rw-r--r--arch/arm64/boot/dts/hisilicon/hi3660.dtsi2
-rw-r--r--arch/arm64/boot/dts/intel/socfpga_agilex_socdk.dts2
-rw-r--r--arch/arm64/boot/dts/intel/socfpga_n5x_socdk.dts2
-rw-r--r--arch/arm64/boot/dts/marvell/Makefile4
-rw-r--r--arch/arm64/boot/dts/marvell/armada-3720-gl-mv1000.dts8
-rw-r--r--arch/arm64/boot/dts/marvell/cn9130-cf-base.dts178
-rw-r--r--arch/arm64/boot/dts/marvell/cn9130-cf-pro.dts375
-rw-r--r--arch/arm64/boot/dts/marvell/cn9130-cf.dtsi197
-rw-r--r--arch/arm64/boot/dts/marvell/cn9130-sr-som.dtsi160
-rw-r--r--arch/arm64/boot/dts/marvell/cn9131-cf-solidwan.dts637
-rw-r--r--arch/arm64/boot/dts/marvell/cn9132-clearfog.dts673
-rw-r--r--arch/arm64/boot/dts/marvell/cn9132-sr-cex7.dtsi712
-rw-r--r--arch/arm64/boot/dts/mediatek/Makefile12
-rw-r--r--arch/arm64/boot/dts/mediatek/mt2712-evb.dts4
-rw-r--r--arch/arm64/boot/dts/mediatek/mt6795-sony-xperia-m5.dts8
-rw-r--r--arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts8
-rw-r--r--arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts4
-rw-r--r--arch/arm64/boot/dts/mediatek/mt7981b-cudy-wr3000-v1.dts74
-rw-r--r--arch/arm64/boot/dts/mediatek/mt7981b-openwrt-one.dts15
-rw-r--r--arch/arm64/boot/dts/mediatek/mt7981b.dtsi78
-rw-r--r--arch/arm64/boot/dts/mediatek/mt7986a-bananapi-bpi-r3-emmc.dtso28
-rw-r--r--arch/arm64/boot/dts/mediatek/mt7986a-bananapi-bpi-r3-mini.dts493
-rw-r--r--arch/arm64/boot/dts/mediatek/mt7986a-bananapi-bpi-r3-nand.dtso74
-rw-r--r--arch/arm64/boot/dts/mediatek/mt7986a-bananapi-bpi-r3-nor.dtso90
-rw-r--r--arch/arm64/boot/dts/mediatek/mt7986a-bananapi-bpi-r3-sd.dtso16
-rw-r--r--arch/arm64/boot/dts/mediatek/mt7988a.dtsi90
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8173-elm-hana.dtsi9
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8173-elm.dtsi6
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8173-evb.dts12
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183-evb.dts6
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183-kukui-audio-da7219.dtsi2
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-cozmo.dts1
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-fennel-sku1.dts1
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-fennel-sku6.dts1
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-fennel-sku7.dts1
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-fennel14-sku2.dts1
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-fennel14.dts1
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-kappa.dts1
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-kenzo.dts1
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-makomo-sku0.dts2
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-makomo-sku1.dts2
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-pico6.dts14
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-willow-sku0.dts1
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-willow-sku1.dts1
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi.dtsi25
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183-kukui-kodama-sku32.dts1
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi18
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183-pumpkin.dts10
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183.dtsi136
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8186-corsola-voltorb-sku589824.dts13
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8186-corsola-voltorb-sku589825.dts25
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8186-corsola-voltorb.dtsi103
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8186-corsola.dtsi42
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8188.dtsi480
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8192-asurada-hayato-r1.dts1
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8192-asurada-hayato-r5-sku2.dts1
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8192-asurada-spherion-r0.dts1
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8192-asurada-spherion-r4.dts1
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi1
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8192.dtsi2
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8195-cherry-dojo-r1.dts114
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi50
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8195-demo.dts26
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8195-evb.dts4
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8195.dtsi2
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8365-evk.dts4
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8365.dtsi3
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8390-genio-700-evk.dts880
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8395-genio-1200-evk.dts34
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8395-kontron-3-5-sbc-i1200.dts1127
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8395-radxa-nio-12l.dts88
-rw-r--r--arch/arm64/boot/dts/microchip/sparx5_pcb134_board.dtsi372
-rw-r--r--arch/arm64/boot/dts/microchip/sparx5_pcb135_board.dtsi95
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra234-p3768-0000+p3767-0000.dts77
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra234-p3768-0000+p3767-0005.dts31
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra234-p3768-0000+p3767.dtsi (renamed from arch/arm64/boot/dts/nvidia/tegra234-p3768-0000.dtsi)28
-rw-r--r--arch/arm64/boot/dts/qcom/Makefile19
-rw-r--r--arch/arm64/boot/dts/qcom/apq8016-schneider-hmibsc.dts491
-rw-r--r--arch/arm64/boot/dts/qcom/ipq5018-tplink-archer-ax55-v1.dts128
-rw-r--r--arch/arm64/boot/dts/qcom/ipq5018.dtsi1
-rw-r--r--arch/arm64/boot/dts/qcom/ipq5332.dtsi1
-rw-r--r--arch/arm64/boot/dts/qcom/ipq6018.dtsi28
-rw-r--r--arch/arm64/boot/dts/qcom/ipq8074.dtsi16
-rw-r--r--arch/arm64/boot/dts/qcom/ipq9574.dtsi39
-rw-r--r--arch/arm64/boot/dts/qcom/msm8216-samsung-fortuna3g.dts14
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916-acer-a1-724.dts26
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916-gplus-fl8005a.dts47
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916-lg-c50.dts140
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916-lg-m216.dts251
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916-motorola-common.dtsi161
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916-motorola-harpia.dts147
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916-motorola-osprey.dts105
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916-motorola-surnia.dts83
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916-samsung-a2015-common.dtsi53
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916-samsung-a3u-eur.dts6
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916-samsung-a5u-eur.dts6
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916-samsung-e5.dts6
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916-samsung-e7.dts7
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916-samsung-fortuna-common.dtsi197
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916-samsung-gprimeltecan.dts70
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916-samsung-grandmax.dts6
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916-samsung-grandprimelte.dts14
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916-samsung-rossa-common.dtsi18
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916-samsung-rossa.dts6
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916.dtsi18
-rw-r--r--arch/arm64/boot/dts/qcom/msm8939-samsung-a7.dts56
-rw-r--r--arch/arm64/boot/dts/qcom/msm8939.dtsi16
-rw-r--r--arch/arm64/boot/dts/qcom/msm8953-motorola-potter.dts1
-rw-r--r--arch/arm64/boot/dts/qcom/msm8953-xiaomi-daisy.dts1
-rw-r--r--arch/arm64/boot/dts/qcom/msm8953-xiaomi-mido.dts1
-rw-r--r--arch/arm64/boot/dts/qcom/msm8953-xiaomi-tissot.dts1
-rw-r--r--arch/arm64/boot/dts/qcom/msm8953-xiaomi-vince.dts1
-rw-r--r--arch/arm64/boot/dts/qcom/msm8953.dtsi30
-rw-r--r--arch/arm64/boot/dts/qcom/msm8956.dtsi4
-rw-r--r--arch/arm64/boot/dts/qcom/msm8976.dtsi579
-rw-r--r--arch/arm64/boot/dts/qcom/msm8994.dtsi14
-rw-r--r--arch/arm64/boot/dts/qcom/msm8996-xiaomi-common.dtsi1
-rw-r--r--arch/arm64/boot/dts/qcom/msm8996.dtsi104
-rw-r--r--arch/arm64/boot/dts/qcom/msm8998.dtsi70
-rw-r--r--arch/arm64/boot/dts/qcom/pm6125.dtsi1
-rw-r--r--arch/arm64/boot/dts/qcom/pm6150.dtsi8
-rw-r--r--arch/arm64/boot/dts/qcom/pm6150l.dtsi3
-rw-r--r--arch/arm64/boot/dts/qcom/pm6350.dtsi1
-rw-r--r--arch/arm64/boot/dts/qcom/pm660.dtsi26
-rw-r--r--arch/arm64/boot/dts/qcom/pm660l.dtsi1
-rw-r--r--arch/arm64/boot/dts/qcom/pm7250b.dtsi47
-rw-r--r--arch/arm64/boot/dts/qcom/pm7325.dtsi2
-rw-r--r--arch/arm64/boot/dts/qcom/pm7550ba.dtsi1
-rw-r--r--arch/arm64/boot/dts/qcom/pm8010.dtsi2
-rw-r--r--arch/arm64/boot/dts/qcom/pm8150.dtsi1
-rw-r--r--arch/arm64/boot/dts/qcom/pm8150b.dtsi1
-rw-r--r--arch/arm64/boot/dts/qcom/pm8150l.dtsi1
-rw-r--r--arch/arm64/boot/dts/qcom/pm8350.dtsi2
-rw-r--r--arch/arm64/boot/dts/qcom/pm8350b.dtsi2
-rw-r--r--arch/arm64/boot/dts/qcom/pm8350c.dtsi2
-rw-r--r--arch/arm64/boot/dts/qcom/pm8450.dtsi1
-rw-r--r--arch/arm64/boot/dts/qcom/pm8550.dtsi1
-rw-r--r--arch/arm64/boot/dts/qcom/pm8550b.dtsi1
-rw-r--r--arch/arm64/boot/dts/qcom/pm8550ve.dtsi1
-rw-r--r--arch/arm64/boot/dts/qcom/pm8550vs.dtsi4
-rw-r--r--arch/arm64/boot/dts/qcom/pm8916.dtsi31
-rw-r--r--arch/arm64/boot/dts/qcom/pm8953.dtsi3
-rw-r--r--arch/arm64/boot/dts/qcom/pm8994.dtsi1
-rw-r--r--arch/arm64/boot/dts/qcom/pm8998.dtsi1
-rw-r--r--arch/arm64/boot/dts/qcom/pmi632.dtsi7
-rw-r--r--arch/arm64/boot/dts/qcom/pmi8950.dtsi8
-rw-r--r--arch/arm64/boot/dts/qcom/pmm8155au_1.dtsi1
-rw-r--r--arch/arm64/boot/dts/qcom/pmm8155au_2.dtsi1
-rw-r--r--arch/arm64/boot/dts/qcom/pmr735a.dtsi2
-rw-r--r--arch/arm64/boot/dts/qcom/pmr735b.dtsi2
-rw-r--r--arch/arm64/boot/dts/qcom/pmr735d_a.dtsi1
-rw-r--r--arch/arm64/boot/dts/qcom/pmr735d_b.dtsi1
-rw-r--r--arch/arm64/boot/dts/qcom/pms405.dtsi1
-rw-r--r--arch/arm64/boot/dts/qcom/pmx75.dtsi1
-rw-r--r--arch/arm64/boot/dts/qcom/qcm2290.dtsi184
-rw-r--r--arch/arm64/boot/dts/qcom/qcm6490-fairphone-fp5.dts157
-rw-r--r--arch/arm64/boot/dts/qcom/qcm6490-idp.dts2
-rw-r--r--arch/arm64/boot/dts/qcom/qcm6490-shift-otter.dts961
-rw-r--r--arch/arm64/boot/dts/qcom/qcs404.dtsi20
-rw-r--r--arch/arm64/boot/dts/qcom/qcs6490-rb3gen2.dts111
-rw-r--r--arch/arm64/boot/dts/qcom/qcs8550-aim300-aiot.dts315
-rw-r--r--arch/arm64/boot/dts/qcom/qcs8550-aim300.dtsi405
-rw-r--r--arch/arm64/boot/dts/qcom/qcs8550.dtsi162
-rw-r--r--arch/arm64/boot/dts/qcom/qdu1000-idp.dts23
-rw-r--r--arch/arm64/boot/dts/qcom/qdu1000.dtsi153
-rw-r--r--arch/arm64/boot/dts/qcom/qrb2210-rb1.dts21
-rw-r--r--arch/arm64/boot/dts/qcom/qrb4210-rb2.dts17
-rw-r--r--arch/arm64/boot/dts/qcom/qrb5165-rb5.dts122
-rw-r--r--arch/arm64/boot/dts/qcom/qru1000-idp.dts23
-rw-r--r--arch/arm64/boot/dts/qcom/sa8155p.dtsi4
-rw-r--r--arch/arm64/boot/dts/qcom/sa8775p-pmics.dtsi8
-rw-r--r--arch/arm64/boot/dts/qcom/sa8775p-ride-r3.dts47
-rw-r--r--arch/arm64/boot/dts/qcom/sa8775p-ride.dts834
-rw-r--r--arch/arm64/boot/dts/qcom/sa8775p-ride.dtsi814
-rw-r--r--arch/arm64/boot/dts/qcom/sa8775p.dtsi1146
-rw-r--r--arch/arm64/boot/dts/qcom/sc7180-trogdor-clamshell.dtsi9
-rw-r--r--arch/arm64/boot/dts/qcom/sc7180-trogdor-coachz.dtsi6
-rw-r--r--arch/arm64/boot/dts/qcom/sc7180-trogdor-detachable.dtsi13
-rw-r--r--arch/arm64/boot/dts/qcom/sc7180-trogdor-homestar.dtsi10
-rw-r--r--arch/arm64/boot/dts/qcom/sc7180-trogdor-kingoftown.dts2
-rw-r--r--arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r1-kb.dts2
-rw-r--r--arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r1-lte.dts2
-rw-r--r--arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r10-kb.dts2
-rw-r--r--arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r10-lte.dts2
-rw-r--r--arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r3-kb.dts2
-rw-r--r--arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r3-lte.dts2
-rw-r--r--arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r9-kb.dts2
-rw-r--r--arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r9-lte.dts2
-rw-r--r--arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor.dtsi3
-rw-r--r--arch/arm64/boot/dts/qcom/sc7180-trogdor-pazquel.dtsi5
-rw-r--r--arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom.dtsi6
-rw-r--r--arch/arm64/boot/dts/qcom/sc7180-trogdor-quackingstick.dtsi15
-rw-r--r--arch/arm64/boot/dts/qcom/sc7180-trogdor-r1.dts3
-rw-r--r--arch/arm64/boot/dts/qcom/sc7180-trogdor-wormdingler.dtsi6
-rw-r--r--arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi8
-rw-r--r--arch/arm64/boot/dts/qcom/sc7180.dtsi29
-rw-r--r--arch/arm64/boot/dts/qcom/sc7280-idp.dtsi1
-rw-r--r--arch/arm64/boot/dts/qcom/sc7280-qcard.dtsi1
-rw-r--r--arch/arm64/boot/dts/qcom/sc7280.dtsi226
-rw-r--r--arch/arm64/boot/dts/qcom/sc8180x-lenovo-flex-5g.dts16
-rw-r--r--arch/arm64/boot/dts/qcom/sc8180x-pmics.dtsi2
-rw-r--r--arch/arm64/boot/dts/qcom/sc8180x-primus.dts20
-rw-r--r--arch/arm64/boot/dts/qcom/sc8180x.dtsi263
-rw-r--r--arch/arm64/boot/dts/qcom/sc8280xp-crd.dts23
-rw-r--r--arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts160
-rw-r--r--arch/arm64/boot/dts/qcom/sc8280xp-pmics.dtsi4
-rw-r--r--arch/arm64/boot/dts/qcom/sc8280xp.dtsi84
-rw-r--r--arch/arm64/boot/dts/qcom/sda660-inforce-ifc6560.dts15
-rw-r--r--arch/arm64/boot/dts/qcom/sdm450-lenovo-tbx605f.dts276
-rw-r--r--arch/arm64/boot/dts/qcom/sdm450-motorola-ali.dts1
-rw-r--r--arch/arm64/boot/dts/qcom/sdm630.dtsi30
-rw-r--r--arch/arm64/boot/dts/qcom/sdm632-fairphone-fp3.dts4
-rw-r--r--arch/arm64/boot/dts/qcom/sdm632-motorola-ocean.dts1
-rw-r--r--arch/arm64/boot/dts/qcom/sdm670.dtsi18
-rw-r--r--arch/arm64/boot/dts/qcom/sdm845-mtp.dts12
-rw-r--r--arch/arm64/boot/dts/qcom/sdm845.dtsi179
-rw-r--r--arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts77
-rw-r--r--arch/arm64/boot/dts/qcom/sdx75-idp.dts45
-rw-r--r--arch/arm64/boot/dts/qcom/sdx75.dtsi583
-rw-r--r--arch/arm64/boot/dts/qcom/sm4250-oneplus-billie2.dts1
-rw-r--r--arch/arm64/boot/dts/qcom/sm4450.dtsi48
-rw-r--r--arch/arm64/boot/dts/qcom/sm6115-fxtec-pro1x.dts1
-rw-r--r--arch/arm64/boot/dts/qcom/sm6115.dtsi46
-rw-r--r--arch/arm64/boot/dts/qcom/sm6115p-lenovo-j606f.dts1
-rw-r--r--arch/arm64/boot/dts/qcom/sm6125-sony-xperia-seine-pdx201.dts8
-rw-r--r--arch/arm64/boot/dts/qcom/sm6125-xiaomi-laurel-sprout.dts6
-rw-r--r--arch/arm64/boot/dts/qcom/sm6125.dtsi8
-rw-r--r--arch/arm64/boot/dts/qcom/sm6350-sony-xperia-lena-pdx213.dts3
-rw-r--r--arch/arm64/boot/dts/qcom/sm6350.dtsi148
-rw-r--r--arch/arm64/boot/dts/qcom/sm6375-sony-xperia-murray-pdx225.dts2
-rw-r--r--arch/arm64/boot/dts/qcom/sm6375.dtsi78
-rw-r--r--arch/arm64/boot/dts/qcom/sm7225-fairphone-fp4.dts417
-rw-r--r--arch/arm64/boot/dts/qcom/sm8150-hdk.dts17
-rw-r--r--arch/arm64/boot/dts/qcom/sm8150.dtsi74
-rw-r--r--arch/arm64/boot/dts/qcom/sm8250-mtp.dts14
-rw-r--r--arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo.dtsi2
-rw-r--r--arch/arm64/boot/dts/qcom/sm8250-xiaomi-elish-common.dtsi8
-rw-r--r--arch/arm64/boot/dts/qcom/sm8250.dtsi83
-rw-r--r--arch/arm64/boot/dts/qcom/sm8350-hdk.dts27
-rw-r--r--arch/arm64/boot/dts/qcom/sm8350.dtsi82
-rw-r--r--arch/arm64/boot/dts/qcom/sm8450-hdk.dts43
-rw-r--r--arch/arm64/boot/dts/qcom/sm8450-sony-xperia-nagara.dtsi2
-rw-r--r--arch/arm64/boot/dts/qcom/sm8450.dtsi225
-rw-r--r--arch/arm64/boot/dts/qcom/sm8550-hdk.dts26
-rw-r--r--arch/arm64/boot/dts/qcom/sm8550-mtp.dts26
-rw-r--r--arch/arm64/boot/dts/qcom/sm8550-qrd.dts134
-rw-r--r--arch/arm64/boot/dts/qcom/sm8550-samsung-q5q.dts593
-rw-r--r--arch/arm64/boot/dts/qcom/sm8550-sony-xperia-yodo-pdx234.dts14
-rw-r--r--arch/arm64/boot/dts/qcom/sm8550.dtsi335
-rw-r--r--arch/arm64/boot/dts/qcom/sm8650-hdk-display-card.dtso141
-rw-r--r--arch/arm64/boot/dts/qcom/sm8650-hdk.dts1355
-rw-r--r--arch/arm64/boot/dts/qcom/sm8650-mtp.dts34
-rw-r--r--arch/arm64/boot/dts/qcom/sm8650-qrd.dts128
-rw-r--r--arch/arm64/boot/dts/qcom/sm8650.dtsi327
-rw-r--r--arch/arm64/boot/dts/qcom/x1e80100-asus-vivobook-s15.dts616
-rw-r--r--arch/arm64/boot/dts/qcom/x1e80100-crd.dts213
-rw-r--r--arch/arm64/boot/dts/qcom/x1e80100-lenovo-yoga-slim7x.dts929
-rw-r--r--arch/arm64/boot/dts/qcom/x1e80100-pmics.dtsi482
-rw-r--r--arch/arm64/boot/dts/qcom/x1e80100-qcp.dts365
-rw-r--r--arch/arm64/boot/dts/qcom/x1e80100.dtsi1844
-rw-r--r--arch/arm64/boot/dts/renesas/condor-common.dtsi6
-rw-r--r--arch/arm64/boot/dts/renesas/r8a774a1.dtsi1
-rw-r--r--arch/arm64/boot/dts/renesas/r8a774b1.dtsi1
-rw-r--r--arch/arm64/boot/dts/renesas/r8a774c0.dtsi1
-rw-r--r--arch/arm64/boot/dts/renesas/r8a774e1.dtsi1
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77951.dtsi1
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77960.dtsi1
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77961.dtsi1
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77965.dtsi1
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77970.dtsi1
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77980-condor.dts8
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77980.dtsi1
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77990.dtsi1
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77995.dtsi1
-rw-r--r--arch/arm64/boot/dts/renesas/r8a779a0.dtsi5
-rw-r--r--arch/arm64/boot/dts/renesas/r8a779f0-spider-cpu.dtsi6
-rw-r--r--arch/arm64/boot/dts/renesas/r8a779f0.dtsi5
-rw-r--r--arch/arm64/boot/dts/renesas/r8a779f4-s4sk.dts6
-rw-r--r--arch/arm64/boot/dts/renesas/r8a779g0-white-hawk-ard-audio-da7212.dtso4
-rw-r--r--arch/arm64/boot/dts/renesas/r8a779g0.dtsi28
-rw-r--r--arch/arm64/boot/dts/renesas/r8a779h0-gray-hawk-single.dts4
-rw-r--r--arch/arm64/boot/dts/renesas/r8a779h0.dtsi737
-rw-r--r--arch/arm64/boot/dts/renesas/r9a07g043.dtsi4
-rw-r--r--arch/arm64/boot/dts/renesas/r9a07g043u.dtsi5
-rw-r--r--arch/arm64/boot/dts/renesas/r9a07g044.dtsi9
-rw-r--r--arch/arm64/boot/dts/renesas/r9a07g054.dtsi9
-rw-r--r--arch/arm64/boot/dts/renesas/r9a08g045.dtsi11
-rw-r--r--arch/arm64/boot/dts/renesas/r9a09g011.dtsi7
-rw-r--r--arch/arm64/boot/dts/renesas/rz-smarc-common.dtsi11
-rw-r--r--arch/arm64/boot/dts/renesas/white-hawk-cpu-common.dtsi29
-rw-r--r--arch/arm64/boot/dts/renesas/white-hawk-ethernet.dtsi103
-rw-r--r--arch/arm64/boot/dts/rockchip/Makefile11
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3308-rock-pi-s.dts99
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3308-rock-s0.dts293
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3308.dtsi31
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3328-rock-pi-e.dts4
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3328-rock64.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3328.dtsi4
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3368-lba3368.dts659
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3368.dtsi3
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-pinephone-pro.dts74
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399pro.dtsi22
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3566-orangepi-3b-v1.1.dts29
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3566-orangepi-3b-v2.1.dts70
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3566-orangepi-3b.dtsi678
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3566-pinenote.dtsi1
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3566-quartz64-a.dts1
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3566-quartz64-b.dts3
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3566-radxa-zero-3.dtsi531
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3566-radxa-zero-3e.dts52
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3566-radxa-zero-3w.dts92
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3566-roc-pc.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3566-rock-3c.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3566-soquartz.dtsi1
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3568-evb1-v10.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3568-fastrhino-r66s.dts4
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3568-fastrhino-r66s.dtsi48
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3568-fastrhino-r68s.dts16
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3568-rock-3a.dts4
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3568-rock-3b.dts781
-rw-r--r--arch/arm64/boot/dts/rockchip/rk356x.dtsi13
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-armsom-sige7.dts4
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-base-pinctrl.dtsi (renamed from arch/arm64/boot/dts/rockchip/rk3588s-pinctrl.dtsi)0
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-base.dtsi2799
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-edgeble-neu6a-common.dtsi4
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-evb1-v10.dts16
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-extra-pinctrl.dtsi (renamed from arch/arm64/boot/dts/rockchip/rk3588-pinctrl.dtsi)0
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-extra.dtsi448
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-friendlyelec-cm3588-nas.dts778
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-friendlyelec-cm3588.dtsi653
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-ok3588-c.dts10
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-opp.dtsi190
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-orangepi-5-plus.dts1
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-quartzpro64.dts13
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-rock-5-itx.dts1177
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-rock-5b-pcie-ep.dtso25
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-rock-5b-pcie-srns.dtso16
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-rock-5b.dts58
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-tiger.dtsi5
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-toybrick-x0.dts4
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-turing-rk1.dtsi7
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588.dtsi413
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588j.dtsi143
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588s-coolpi-4b.dts4
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588s-rock-5a.dts19
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588s.dtsi2670
-rw-r--r--arch/arm64/boot/dts/sprd/ums512.dtsi14
-rw-r--r--arch/arm64/boot/dts/sprd/ums9620.dtsi14
-rw-r--r--arch/arm64/boot/dts/st/stm32mp25-pinctrl.dtsi100
-rw-r--r--arch/arm64/boot/dts/st/stm32mp251.dtsi246
-rw-r--r--arch/arm64/boot/dts/st/stm32mp253.dtsi64
-rw-r--r--arch/arm64/boot/dts/st/stm32mp257f-ev1.dts77
-rw-r--r--arch/arm64/boot/dts/ti/Makefile56
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62-lp-sk-nand.dtso116
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62-lp-sk.dts4
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62-main.dtsi39
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62-verdin-dev.dtsi4
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62-verdin.dtsi4
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62-wakeup.dtsi5
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62.dtsi2
-rw-r--r--arch/arm64/boot/dts/ti/k3-am625-beagleplay.dts2
-rw-r--r--arch/arm64/boot/dts/ti/k3-am625-phyboard-lyra-1-4-ghz-opp.dtso20
-rw-r--r--arch/arm64/boot/dts/ti/k3-am625-phyboard-lyra-rdk.dts467
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62a-main.dtsi32
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62a-phycore-som.dtsi330
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62a-wakeup.dtsi11
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62a7-phyboard-lyra-rdk.dts18
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62a7-sk.dts11
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62p-j722s-common-main.dtsi1062
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62p-j722s-common-mcu.dtsi (renamed from arch/arm64/boot/dts/ti/k3-am62p-mcu.dtsi)11
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62p-j722s-common-thermal.dtsi (renamed from arch/arm64/boot/dts/ti/k3-am62p-thermal.dtsi)0
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62p-j722s-common-wakeup.dtsi (renamed from arch/arm64/boot/dts/ti/k3-am62p-wakeup.dtsi)8
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62p-main.dtsi1083
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62p.dtsi9
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62p5-sk.dts20
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62x-phyboard-lyra.dtsi475
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62x-sk-common.dtsi32
-rw-r--r--arch/arm64/boot/dts/ti/k3-am64-main.dtsi24
-rw-r--r--arch/arm64/boot/dts/ti/k3-am64-phycore-som.dtsi46
-rw-r--r--arch/arm64/boot/dts/ti/k3-am64-tqma64xxl-mbax4xxl-sdcard.dtso4
-rw-r--r--arch/arm64/boot/dts/ti/k3-am64-tqma64xxl-mbax4xxl-wlan.dtso4
-rw-r--r--arch/arm64/boot/dts/ti/k3-am642-evm-icssg1-dualemac-mii.dtso101
-rw-r--r--arch/arm64/boot/dts/ti/k3-am642-evm-nand.dtso148
-rw-r--r--arch/arm64/boot/dts/ti/k3-am642-evm.dts15
-rw-r--r--arch/arm64/boot/dts/ti/k3-am642-hummingboard-t.dts1
-rw-r--r--arch/arm64/boot/dts/ti/k3-am642-phyboard-electra-pcie-usb2.dtso87
-rw-r--r--arch/arm64/boot/dts/ti/k3-am642-phyboard-electra-rdk.dts12
-rw-r--r--arch/arm64/boot/dts/ti/k3-am642-sk.dts12
-rw-r--r--arch/arm64/boot/dts/ti/k3-am642-tqma64xxl-mbax4xxl.dts4
-rw-r--r--arch/arm64/boot/dts/ti/k3-am642-tqma64xxl.dtsi4
-rw-r--r--arch/arm64/boot/dts/ti/k3-am65-iot2050-common-pg1.dtsi12
-rw-r--r--arch/arm64/boot/dts/ti/k3-am65-main.dtsi36
-rw-r--r--arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi12
-rw-r--r--arch/arm64/boot/dts/ti/k3-am654-base-board.dts1
-rw-r--r--arch/arm64/boot/dts/ti/k3-am68-sk-base-board.dts76
-rw-r--r--arch/arm64/boot/dts/ti/k3-am68-sk-som.dtsi86
-rw-r--r--arch/arm64/boot/dts/ti/k3-am69-sk.dts87
-rw-r--r--arch/arm64/boot/dts/ti/k3-am6xx-phycore-disable-eth-phy.dtso19
-rw-r--r--arch/arm64/boot/dts/ti/k3-am6xx-phycore-disable-rtc.dtso15
-rw-r--r--arch/arm64/boot/dts/ti/k3-am6xx-phycore-disable-spi-nor.dtso15
-rw-r--r--arch/arm64/boot/dts/ti/k3-am6xx-phycore-qspi-nor.dtso15
-rw-r--r--arch/arm64/boot/dts/ti/k3-j7200-mcu-wakeup.dtsi14
-rw-r--r--arch/arm64/boot/dts/ti/k3-j7200-som-p0.dtsi5
-rw-r--r--arch/arm64/boot/dts/ti/k3-j721e-common-proc-board-infotainment.dtso164
-rw-r--r--arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi12
-rw-r--r--arch/arm64/boot/dts/ti/k3-j721e-sk.dts117
-rw-r--r--arch/arm64/boot/dts/ti/k3-j721e-som-p0.dtsi1
-rw-r--r--arch/arm64/boot/dts/ti/k3-j721s2-mcu-wakeup.dtsi12
-rw-r--r--arch/arm64/boot/dts/ti/k3-j721s2-som-p0.dtsi5
-rw-r--r--arch/arm64/boot/dts/ti/k3-j722s-evm.dts182
-rw-r--r--arch/arm64/boot/dts/ti/k3-j722s-main.dtsi217
-rw-r--r--arch/arm64/boot/dts/ti/k3-j722s.dtsi165
-rw-r--r--arch/arm64/boot/dts/ti/k3-j784s4-evm-pcie0-pcie1-ep.dtso79
-rw-r--r--arch/arm64/boot/dts/ti/k3-j784s4-evm-quad-port-eth-exp1.dtso147
-rw-r--r--arch/arm64/boot/dts/ti/k3-j784s4-evm-usxgmii-exp1-exp2.dtso81
-rw-r--r--arch/arm64/boot/dts/ti/k3-j784s4-evm.dts383
-rw-r--r--arch/arm64/boot/dts/ti/k3-j784s4-main.dtsi527
-rw-r--r--arch/arm64/boot/dts/ti/k3-j784s4-mcu-wakeup.dtsi14
-rw-r--r--arch/arm64/boot/dts/ti/k3-j784s4.dtsi10
-rw-r--r--arch/arm64/boot/dts/ti/k3-pinctrl.h3
-rw-r--r--arch/arm64/boot/dts/ti/k3-serdes.h8
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-clk-ccf.dtsi16
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-sck-kv-g-revA.dtso19
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-sck-kv-g-revB.dtso41
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-sm-k26-revA.dts19
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-smk-k26-revA.dts8
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-zcu102-rev1.0.dts8
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp.dtsi186
-rw-r--r--arch/arm64/configs/defconfig23
-rw-r--r--arch/arm64/include/asm/Kbuild8
-rw-r--r--arch/arm64/include/asm/acpi.h12
-rw-r--r--arch/arm64/include/asm/arch_gicv3.h15
-rw-r--r--arch/arm64/include/asm/arch_timer.h2
-rw-r--r--arch/arm64/include/asm/arm_pmuv3.h2
-rw-r--r--arch/arm64/include/asm/asm-extable.h3
-rw-r--r--arch/arm64/include/asm/cpucaps.h2
-rw-r--r--arch/arm64/include/asm/cpufeature.h4
-rw-r--r--arch/arm64/include/asm/cputype.h6
-rw-r--r--arch/arm64/include/asm/el2_setup.h6
-rw-r--r--arch/arm64/include/asm/esr.h33
-rw-r--r--arch/arm64/include/asm/io.h36
-rw-r--r--arch/arm64/include/asm/kvm_arm.h6
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h71
-rw-r--r--arch/arm64/include/asm/kvm_host.h25
-rw-r--r--arch/arm64/include/asm/kvm_hyp.h4
-rw-r--r--arch/arm64/include/asm/kvm_pkvm.h9
-rw-r--r--arch/arm64/include/asm/mmu_context.h4
-rw-r--r--arch/arm64/include/asm/mte.h4
-rw-r--r--arch/arm64/include/asm/pgtable-hwdef.h1
-rw-r--r--arch/arm64/include/asm/ptrace.h35
-rw-r--r--arch/arm64/include/asm/runtime-const.h88
-rw-r--r--arch/arm64/include/asm/seccomp.h13
-rw-r--r--arch/arm64/include/asm/smp.h13
-rw-r--r--arch/arm64/include/asm/sysreg.h4
-rw-r--r--arch/arm64/include/asm/uaccess.h169
-rw-r--r--arch/arm64/include/asm/unistd.h22
-rw-r--r--arch/arm64/include/asm/unistd32.h939
-rw-r--r--arch/arm64/include/asm/vdso/compat_gettimeofday.h12
-rw-r--r--arch/arm64/include/asm/word-at-a-time.h11
-rw-r--r--arch/arm64/include/uapi/asm/Kbuild1
-rw-r--r--arch/arm64/include/uapi/asm/unistd.h25
-rw-r--r--arch/arm64/kernel/Makefile1
-rw-r--r--arch/arm64/kernel/Makefile.syscalls6
-rw-r--r--arch/arm64/kernel/acpi.c129
-rw-r--r--arch/arm64/kernel/acpi_numa.c11
-rw-r--r--arch/arm64/kernel/armv8_deprecated.c3
-rw-r--r--arch/arm64/kernel/cpu_errata.c17
-rw-r--r--arch/arm64/kernel/cpufeature.c5
-rw-r--r--arch/arm64/kernel/efi.c2
-rw-r--r--arch/arm64/kernel/image-vars.h5
-rw-r--r--arch/arm64/kernel/mte.c12
-rw-r--r--arch/arm64/kernel/pi/map_kernel.c2
-rw-r--r--arch/arm64/kernel/proton-pack.c2
-rw-r--r--arch/arm64/kernel/psci.c2
-rw-r--r--arch/arm64/kernel/reloc_test_core.c1
-rw-r--r--arch/arm64/kernel/signal32.c4
-rw-r--r--arch/arm64/kernel/sigreturn32.S18
-rw-r--r--arch/arm64/kernel/smp.c74
-rw-r--r--arch/arm64/kernel/sys.c6
-rw-r--r--arch/arm64/kernel/sys32.c17
-rw-r--r--arch/arm64/kernel/syscall.c19
-rw-r--r--arch/arm64/kernel/vmlinux.lds.S3
-rw-r--r--arch/arm64/kvm/arm.c76
-rw-r--r--arch/arm64/kvm/emulate-nested.c21
-rw-r--r--arch/arm64/kvm/fpsimd.c11
-rw-r--r--arch/arm64/kvm/guest.c3
-rw-r--r--arch/arm64/kvm/hyp/aarch32.c18
-rw-r--r--arch/arm64/kvm/hyp/fpsimd.S6
-rw-r--r--arch/arm64/kvm/hyp/include/hyp/switch.h36
-rw-r--r--arch/arm64/kvm/hyp/include/nvhe/fixed_config.h10
-rw-r--r--arch/arm64/kvm/hyp/include/nvhe/pkvm.h1
-rw-r--r--arch/arm64/kvm/hyp/nvhe/ffa.c12
-rw-r--r--arch/arm64/kvm/hyp/nvhe/hyp-main.c84
-rw-r--r--arch/arm64/kvm/hyp/nvhe/pkvm.c21
-rw-r--r--arch/arm64/kvm/hyp/nvhe/setup.c25
-rw-r--r--arch/arm64/kvm/hyp/nvhe/switch.c24
-rw-r--r--arch/arm64/kvm/hyp/nvhe/sys_regs.c2
-rw-r--r--arch/arm64/kvm/hyp/vhe/switch.c12
-rw-r--r--arch/arm64/kvm/nested.c6
-rw-r--r--arch/arm64/kvm/pmu-emul.c1
-rw-r--r--arch/arm64/kvm/reset.c3
-rw-r--r--arch/arm64/kvm/vgic/vgic-init.c2
-rw-r--r--arch/arm64/kvm/vgic/vgic-mmio-v3.c15
-rw-r--r--arch/arm64/kvm/vgic/vgic.h2
-rw-r--r--arch/arm64/mm/contpte.c4
-rw-r--r--arch/arm64/mm/mmu.c3
-rw-r--r--arch/arm64/net/bpf_jit_comp.c16
-rw-r--r--arch/arm64/tools/Makefile6
-rw-r--r--arch/arm64/tools/syscall_32.tbl476
l---------arch/arm64/tools/syscall_64.tbl1
-rw-r--r--arch/csky/include/asm/Kbuild3
-rw-r--r--arch/csky/include/asm/unistd.h3
-rw-r--r--arch/csky/include/uapi/asm/Kbuild2
-rw-r--r--arch/csky/include/uapi/asm/unistd.h14
-rw-r--r--arch/csky/kernel/Makefile.syscalls4
-rw-r--r--arch/csky/kernel/syscall.c2
-rw-r--r--arch/csky/kernel/syscall_table.c4
-rw-r--r--arch/hexagon/include/asm/Kbuild2
-rw-r--r--arch/hexagon/include/asm/syscalls.h6
-rw-r--r--arch/hexagon/include/asm/unistd.h10
-rw-r--r--arch/hexagon/include/uapi/asm/Kbuild2
-rw-r--r--arch/hexagon/include/uapi/asm/unistd.h13
-rw-r--r--arch/hexagon/kernel/Makefile.syscalls3
-rw-r--r--arch/hexagon/kernel/syscalltab.c15
-rw-r--r--arch/loongarch/Kconfig5
-rw-r--r--arch/loongarch/Kconfig.debug1
-rw-r--r--arch/loongarch/boot/dts/loongson-2k0500-ref.dts4
-rw-r--r--arch/loongarch/boot/dts/loongson-2k1000-ref.dts4
-rw-r--r--arch/loongarch/boot/dts/loongson-2k2000-ref.dts2
-rw-r--r--arch/loongarch/include/asm/Kbuild17
-rw-r--r--arch/loongarch/include/asm/hw_breakpoint.h4
-rw-r--r--arch/loongarch/include/asm/numa.h1
-rw-r--r--arch/loongarch/include/asm/stackframe.h2
-rw-r--r--arch/loongarch/include/asm/unistd.h2
-rw-r--r--arch/loongarch/include/uapi/asm/Kbuild2
-rw-r--r--arch/loongarch/include/uapi/asm/unistd.h4
-rw-r--r--arch/loongarch/kernel/Makefile.syscalls4
-rw-r--r--arch/loongarch/kernel/head.S2
-rw-r--r--arch/loongarch/kernel/hw_breakpoint.c96
-rw-r--r--arch/loongarch/kernel/ptrace.c47
-rw-r--r--arch/loongarch/kernel/setup.c6
-rw-r--r--arch/loongarch/kernel/smp.c5
-rw-r--r--arch/loongarch/kernel/syscall.c5
-rw-r--r--arch/loongarch/kernel/vmlinux.lds.S10
-rw-r--r--arch/loongarch/kvm/exit.c2
-rw-r--r--arch/m68k/amiga/config.c9
-rw-r--r--arch/m68k/atari/ataints.c6
-rw-r--r--arch/m68k/configs/amiga_defconfig1
-rw-r--r--arch/m68k/configs/apollo_defconfig1
-rw-r--r--arch/m68k/configs/atari_defconfig1
-rw-r--r--arch/m68k/configs/bvme6000_defconfig1
-rw-r--r--arch/m68k/configs/hp300_defconfig1
-rw-r--r--arch/m68k/configs/mac_defconfig1
-rw-r--r--arch/m68k/configs/multi_defconfig1
-rw-r--r--arch/m68k/configs/mvme147_defconfig1
-rw-r--r--arch/m68k/configs/mvme16x_defconfig1
-rw-r--r--arch/m68k/configs/q40_defconfig1
-rw-r--r--arch/m68k/configs/sun3_defconfig1
-rw-r--r--arch/m68k/configs/sun3x_defconfig1
-rw-r--r--arch/m68k/emu/nfblock.c4
-rw-r--r--arch/m68k/emu/nfcon.c1
-rw-r--r--arch/m68k/include/asm/cmpxchg.h2
-rw-r--r--arch/m68k/include/asm/unistd.h1
-rw-r--r--arch/microblaze/kernel/sys_microblaze.c2
-rw-r--r--arch/mips/bmips/setup.c3
-rw-r--r--arch/mips/include/asm/mipsmtregs.h2
-rw-r--r--arch/mips/include/asm/unistd.h1
-rw-r--r--arch/mips/kernel/syscalls/syscall_n32.tbl2
-rw-r--r--arch/mips/kernel/syscalls/syscall_o32.tbl4
-rw-r--r--arch/mips/pci/ops-rc32434.c4
-rw-r--r--arch/nios2/include/asm/Kbuild2
-rw-r--r--arch/nios2/include/asm/unistd.h12
-rw-r--r--arch/nios2/include/uapi/asm/Kbuild2
-rw-r--r--arch/nios2/include/uapi/asm/unistd.h14
-rw-r--r--arch/nios2/kernel/Makefile.syscalls3
-rw-r--r--arch/nios2/kernel/syscall_table.c6
-rw-r--r--arch/openrisc/include/asm/Kbuild2
-rw-r--r--arch/openrisc/include/asm/syscalls.h4
-rw-r--r--arch/openrisc/include/asm/unistd.h8
-rw-r--r--arch/openrisc/include/uapi/asm/Kbuild2
-rw-r--r--arch/openrisc/include/uapi/asm/unistd.h15
-rw-r--r--arch/openrisc/kernel/Makefile.syscalls3
-rw-r--r--arch/openrisc/kernel/sys_call_table.c9
-rw-r--r--arch/parisc/Kconfig1
-rw-r--r--arch/parisc/include/asm/cacheflush.h15
-rw-r--r--arch/parisc/include/asm/pgtable.h27
-rw-r--r--arch/parisc/include/asm/unistd.h1
-rw-r--r--arch/parisc/kernel/cache.c413
-rw-r--r--arch/parisc/kernel/sys_parisc32.c9
-rw-r--r--arch/parisc/kernel/syscalls/syscall.tbl6
-rw-r--r--arch/powerpc/Kconfig2
-rw-r--r--arch/powerpc/crypto/.gitignore2
-rw-r--r--arch/powerpc/include/asm/uaccess.h27
-rw-r--r--arch/powerpc/include/asm/unistd.h1
-rw-r--r--arch/powerpc/kernel/eeh_pe.c7
-rw-r--r--arch/powerpc/kernel/head_64.S5
-rw-r--r--arch/powerpc/kernel/syscalls/syscall.tbl6
-rw-r--r--arch/powerpc/kexec/core_64.c11
-rw-r--r--arch/powerpc/kvm/book3s_64_vio.c18
-rw-r--r--arch/powerpc/net/bpf_jit_comp.c4
-rw-r--r--arch/powerpc/net/bpf_jit_comp32.c12
-rw-r--r--arch/powerpc/net/bpf_jit_comp64.c12
-rw-r--r--arch/powerpc/platforms/pseries/kexec.c8
-rw-r--r--arch/powerpc/platforms/pseries/lparcfg.c4
-rw-r--r--arch/powerpc/platforms/pseries/pseries.h1
-rw-r--r--arch/powerpc/platforms/pseries/setup.c5
-rw-r--r--arch/riscv/Kconfig14
-rw-r--r--arch/riscv/boot/dts/allwinner/Makefile2
-rw-r--r--arch/riscv/boot/dts/allwinner/sun20i-d1-clockworkpi-v3.14.dts252
-rw-r--r--arch/riscv/boot/dts/allwinner/sun20i-d1-devterm-v3.14.dts36
-rw-r--r--arch/riscv/boot/dts/allwinner/sunxi-d1s-t113.dtsi11
-rw-r--r--arch/riscv/boot/dts/canaan/canaan_kd233.dts7
-rw-r--r--arch/riscv/boot/dts/canaan/k210.dtsi23
-rw-r--r--arch/riscv/boot/dts/canaan/k210_generic.dts5
-rw-r--r--arch/riscv/boot/dts/canaan/sipeed_maix_bit.dts9
-rw-r--r--arch/riscv/boot/dts/canaan/sipeed_maix_dock.dts7
-rw-r--r--arch/riscv/boot/dts/canaan/sipeed_maix_go.dts9
-rw-r--r--arch/riscv/boot/dts/canaan/sipeed_maixduino.dts10
-rw-r--r--arch/riscv/boot/dts/microchip/Makefile1
-rw-r--r--arch/riscv/boot/dts/microchip/mpfs-beaglev-fire-fabric.dtsi82
-rw-r--r--arch/riscv/boot/dts/microchip/mpfs-beaglev-fire.dts223
-rw-r--r--arch/riscv/boot/dts/sophgo/cv1800b-milkv-duo.dts1
-rw-r--r--arch/riscv/boot/dts/sophgo/sg2042-milkv-pioneer.dts12
-rw-r--r--arch/riscv/boot/dts/sophgo/sg2042.dtsi55
-rw-r--r--arch/riscv/boot/dts/starfive/Makefile1
-rw-r--r--arch/riscv/boot/dts/starfive/jh7110-common.dtsi71
-rw-r--r--arch/riscv/boot/dts/starfive/jh7110-milkv-mars.dts7
-rw-r--r--arch/riscv/boot/dts/starfive/jh7110-pine64-star64.dts65
-rw-r--r--arch/riscv/boot/dts/starfive/jh7110-starfive-visionfive-2.dtsi8
-rw-r--r--arch/riscv/boot/dts/starfive/jh7110.dtsi86
-rw-r--r--arch/riscv/boot/dts/thead/th1520.dtsi81
-rw-r--r--arch/riscv/configs/defconfig26
-rw-r--r--arch/riscv/include/asm/Kbuild3
-rw-r--r--arch/riscv/include/asm/cmpxchg.h22
-rw-r--r--arch/riscv/include/asm/insn.h2
-rw-r--r--arch/riscv/include/asm/syscall_table.h7
-rw-r--r--arch/riscv/include/asm/unistd.h13
-rw-r--r--arch/riscv/include/uapi/asm/Kbuild2
-rw-r--r--arch/riscv/include/uapi/asm/unistd.h41
-rw-r--r--arch/riscv/kernel/Makefile.syscalls4
-rw-r--r--arch/riscv/kernel/compat_syscall_table.c6
-rw-r--r--arch/riscv/kernel/cpu_ops_sbi.c2
-rw-r--r--arch/riscv/kernel/cpu_ops_spinwait.c3
-rw-r--r--arch/riscv/kernel/ftrace.c7
-rw-r--r--arch/riscv/kernel/machine_kexec.c10
-rw-r--r--arch/riscv/kernel/patch.c26
-rw-r--r--arch/riscv/kernel/stacktrace.c5
-rw-r--r--arch/riscv/kernel/sys_riscv.c4
-rw-r--r--arch/riscv/kernel/syscall_table.c6
-rw-r--r--arch/riscv/kvm/aia_device.c7
-rw-r--r--arch/riscv/kvm/vcpu_onereg.c4
-rw-r--r--arch/riscv/kvm/vcpu_pmu.c2
-rw-r--r--arch/riscv/mm/fault.c4
-rw-r--r--arch/riscv/mm/init.c21
-rw-r--r--arch/riscv/net/bpf_jit.h51
-rw-r--r--arch/riscv/net/bpf_jit_comp32.c3
-rw-r--r--arch/riscv/net/bpf_jit_comp64.c144
-rw-r--r--arch/riscv/net/bpf_jit_core.c5
-rw-r--r--arch/s390/boot/startup.c38
-rw-r--r--arch/s390/boot/vmem.c12
-rw-r--r--arch/s390/boot/vmlinux.lds.S1
-rw-r--r--arch/s390/configs/debug_defconfig48
-rw-r--r--arch/s390/configs/defconfig45
-rw-r--r--arch/s390/configs/zfcpdump_defconfig5
-rw-r--r--arch/s390/include/asm/entry-common.h2
-rw-r--r--arch/s390/include/asm/kvm_host.h1
-rw-r--r--arch/s390/include/asm/unistd.h1
-rw-r--r--arch/s390/kernel/crash_dump.c54
-rw-r--r--arch/s390/kernel/syscall.c27
-rw-r--r--arch/s390/kernel/syscalls/syscall.tbl2
-rw-r--r--arch/s390/kvm/kvm-s390.c1
-rw-r--r--arch/s390/kvm/kvm-s390.h15
-rw-r--r--arch/s390/kvm/priv.c32
-rw-r--r--arch/s390/mm/pgalloc.c4
-rw-r--r--arch/s390/net/bpf_jit_comp.c489
-rw-r--r--arch/s390/pci/pci_irq.c2
-rw-r--r--arch/sh/boards/board-sh7757lcr.c2
-rw-r--r--arch/sh/boards/mach-ap325rxa/setup.c2
-rw-r--r--arch/sh/boards/mach-ecovec24/setup.c2
-rw-r--r--arch/sh/boards/mach-kfr2r09/setup.c2
-rw-r--r--arch/sh/boards/mach-migor/setup.c2
-rw-r--r--arch/sh/boards/mach-se/7724/setup.c2
-rw-r--r--arch/sh/include/asm/unistd.h2
-rw-r--r--arch/sh/kernel/sys_sh32.c11
-rw-r--r--arch/sh/kernel/syscalls/syscall.tbl3
-rw-r--r--arch/sparc/include/asm/unistd.h2
-rw-r--r--arch/sparc/kernel/sys32.S221
-rw-r--r--arch/sparc/kernel/syscalls/syscall.tbl8
-rw-r--r--arch/um/drivers/ubd_kern.c53
-rw-r--r--arch/um/include/asm/Kbuild2
-rw-r--r--arch/um/include/asm/bpf_perf_event.h9
-rw-r--r--arch/x86/Kconfig36
-rw-r--r--arch/x86/Kconfig.assembler2
-rw-r--r--arch/x86/boot/compressed/Makefile4
-rw-r--r--arch/x86/boot/compressed/kaslr.c43
-rw-r--r--arch/x86/boot/compressed/misc.c5
-rw-r--r--arch/x86/boot/compressed/sev.c86
-rw-r--r--arch/x86/boot/cpucheck.c2
-rw-r--r--arch/x86/boot/main.c42
-rw-r--r--arch/x86/coco/Makefile1
-rw-r--r--arch/x86/coco/core.c1
-rw-r--r--arch/x86/coco/sev/Makefile15
-rw-r--r--arch/x86/coco/sev/core.c (renamed from arch/x86/kernel/sev.c)449
-rw-r--r--arch/x86/coco/sev/shared.c (renamed from arch/x86/kernel/sev-shared.c)460
-rw-r--r--arch/x86/coco/tdx/tdx.c121
-rw-r--r--arch/x86/entry/entry_64_compat.S14
-rw-r--r--arch/x86/entry/syscall_32.c10
-rw-r--r--arch/x86/entry/syscall_64.c9
-rw-r--r--arch/x86/entry/syscall_x32.c7
-rw-r--r--arch/x86/entry/syscalls/syscall_32.tbl9
-rw-r--r--arch/x86/entry/syscalls/syscall_64.tbl7
-rw-r--r--arch/x86/events/amd/core.c28
-rw-r--r--arch/x86/events/amd/uncore.c36
-rw-r--r--arch/x86/events/core.c113
-rw-r--r--arch/x86/events/intel/core.c638
-rw-r--r--arch/x86/events/intel/cstate.c40
-rw-r--r--arch/x86/events/intel/ds.c180
-rw-r--r--arch/x86/events/intel/knc.c2
-rw-r--r--arch/x86/events/intel/p4.c10
-rw-r--r--arch/x86/events/intel/p6.c2
-rw-r--r--arch/x86/events/intel/pt.c4
-rw-r--r--arch/x86/events/intel/pt.h4
-rw-r--r--arch/x86/events/intel/uncore.c98
-rw-r--r--arch/x86/events/intel/uncore.h8
-rw-r--r--arch/x86/events/intel/uncore_discovery.c306
-rw-r--r--arch/x86/events/intel/uncore_discovery.h22
-rw-r--r--arch/x86/events/intel/uncore_snbep.c134
-rw-r--r--arch/x86/events/perf_event.h98
-rw-r--r--arch/x86/events/rapl.c91
-rw-r--r--arch/x86/events/zhaoxin/core.c12
-rw-r--r--arch/x86/hyperv/ivm.c22
-rw-r--r--arch/x86/include/asm/acpi.h7
-rw-r--r--arch/x86/include/asm/alternative.h241
-rw-r--r--arch/x86/include/asm/amd_nb.h4
-rw-r--r--arch/x86/include/asm/cfi.h2
-rw-r--r--arch/x86/include/asm/cmpxchg_32.h12
-rw-r--r--arch/x86/include/asm/cpu_device_id.h8
-rw-r--r--arch/x86/include/asm/cpufeatures.h803
-rw-r--r--arch/x86/include/asm/efi.h23
-rw-r--r--arch/x86/include/asm/entry-common.h15
-rw-r--r--arch/x86/include/asm/init.h3
-rw-r--r--arch/x86/include/asm/intel_ds.h1
-rw-r--r--arch/x86/include/asm/intel_pconfig.h65
-rw-r--r--arch/x86/include/asm/irqflags.h20
-rw-r--r--arch/x86/include/asm/kvm_host.h1
-rw-r--r--arch/x86/include/asm/mce.h3
-rw-r--r--arch/x86/include/asm/msr-index.h11
-rw-r--r--arch/x86/include/asm/page_64.h2
-rw-r--r--arch/x86/include/asm/perf_event.h8
-rw-r--r--arch/x86/include/asm/pgtable.h5
-rw-r--r--arch/x86/include/asm/pgtable_types.h1
-rw-r--r--arch/x86/include/asm/processor.h12
-rw-r--r--arch/x86/include/asm/runtime-const.h61
-rw-r--r--arch/x86/include/asm/set_memory.h3
-rw-r--r--arch/x86/include/asm/setup.h8
-rw-r--r--arch/x86/include/asm/sev-common.h18
-rw-r--r--arch/x86/include/asm/sev.h135
-rw-r--r--arch/x86/include/asm/smp.h1
-rw-r--r--arch/x86/include/asm/tsc.h3
-rw-r--r--arch/x86/include/asm/uaccess.h4
-rw-r--r--arch/x86/include/asm/unistd.h1
-rw-r--r--arch/x86/include/asm/vdso/gettimeofday.h5
-rw-r--r--arch/x86/include/asm/vdso/vsyscall.h1
-rw-r--r--arch/x86/include/asm/vgtod.h5
-rw-r--r--arch/x86/include/asm/vmware.h336
-rw-r--r--arch/x86/include/asm/vmxfeatures.h110
-rw-r--r--arch/x86/include/asm/word-at-a-time.h57
-rw-r--r--arch/x86/include/asm/x86_init.h14
-rw-r--r--arch/x86/include/uapi/asm/svm.h1
-rw-r--r--arch/x86/kernel/Makefile6
-rw-r--r--arch/x86/kernel/acpi/Makefile1
-rw-r--r--arch/x86/kernel/acpi/boot.c86
-rw-r--r--arch/x86/kernel/acpi/madt_playdead.S28
-rw-r--r--arch/x86/kernel/acpi/madt_wakeup.c292
-rw-r--r--arch/x86/kernel/alternative.c30
-rw-r--r--arch/x86/kernel/amd_nb.c53
-rw-r--r--arch/x86/kernel/cpu/Makefile2
-rw-r--r--arch/x86/kernel/cpu/amd.c11
-rw-r--r--arch/x86/kernel/cpu/aperfmperf.c1
-rw-r--r--arch/x86/kernel/cpu/bugs.c16
-rw-r--r--arch/x86/kernel/cpu/common.c7
-rw-r--r--arch/x86/kernel/cpu/cpu.h2
-rw-r--r--arch/x86/kernel/cpu/intel.c211
-rw-r--r--arch/x86/kernel/cpu/intel_pconfig.c84
-rw-r--r--arch/x86/kernel/cpu/mce/core.c7
-rw-r--r--arch/x86/kernel/cpu/mce/inject.c9
-rw-r--r--arch/x86/kernel/cpu/mkcapflags.sh3
-rw-r--r--arch/x86/kernel/cpu/resctrl/core.c312
-rw-r--r--arch/x86/kernel/cpu/resctrl/ctrlmondata.c89
-rw-r--r--arch/x86/kernel/cpu/resctrl/internal.h108
-rw-r--r--arch/x86/kernel/cpu/resctrl/monitor.c253
-rw-r--r--arch/x86/kernel/cpu/resctrl/pseudo_lock.c42
-rw-r--r--arch/x86/kernel/cpu/resctrl/rdtgroup.c288
-rw-r--r--arch/x86/kernel/cpu/scattered.c1
-rw-r--r--arch/x86/kernel/cpu/topology_amd.c4
-rw-r--r--arch/x86/kernel/cpu/vmware.c225
-rw-r--r--arch/x86/kernel/crash.c12
-rw-r--r--arch/x86/kernel/devicetree.c2
-rw-r--r--arch/x86/kernel/e820.c9
-rw-r--r--arch/x86/kernel/fpu/xstate.h14
-rw-r--r--arch/x86/kernel/machine_kexec_64.c11
-rw-r--r--arch/x86/kernel/process.c7
-rw-r--r--arch/x86/kernel/reboot.c18
-rw-r--r--arch/x86/kernel/relocate_kernel_64.S27
-rw-r--r--arch/x86/kernel/setup.c3
-rw-r--r--arch/x86/kernel/time.c20
-rw-r--r--arch/x86/kernel/tsc.c92
-rw-r--r--arch/x86/kernel/vmlinux.lds.S3
-rw-r--r--arch/x86/kernel/x86_init.c8
-rw-r--r--arch/x86/kvm/Kconfig11
-rw-r--r--arch/x86/kvm/emulate.c2
-rw-r--r--arch/x86/kvm/lapic.c39
-rw-r--r--arch/x86/kvm/lapic.h2
-rw-r--r--arch/x86/kvm/mmu/mmu.c46
-rw-r--r--arch/x86/kvm/mmu/spte.h9
-rw-r--r--arch/x86/kvm/mmu/tdp_iter.h2
-rw-r--r--arch/x86/kvm/mmu/tdp_mmu.c2
-rw-r--r--arch/x86/kvm/svm/sev.c19
-rw-r--r--arch/x86/kvm/svm/svm.c69
-rw-r--r--arch/x86/kvm/svm/svm.h4
-rw-r--r--arch/x86/kvm/vmx/nested.c5
-rw-r--r--arch/x86/kvm/vmx/vmx.c11
-rw-r--r--arch/x86/kvm/x86.c20
-rw-r--r--arch/x86/lib/cmdline.c8
-rw-r--r--arch/x86/lib/getuser.S65
-rw-r--r--arch/x86/lib/iomem.c5
-rw-r--r--arch/x86/mm/ident_map.c73
-rw-r--r--arch/x86/mm/init_64.c16
-rw-r--r--arch/x86/mm/mem_encrypt_amd.c16
-rw-r--r--arch/x86/mm/numa.c6
-rw-r--r--arch/x86/mm/pat/set_memory.c75
-rw-r--r--arch/x86/net/bpf_jit_comp.c15
-rw-r--r--arch/x86/pci/intel_mid_pci.c8
-rw-r--r--arch/x86/pci/xen.c4
-rw-r--r--arch/x86/platform/atom/punit_atom_debug.c11
-rw-r--r--arch/x86/platform/efi/Makefile1
-rw-r--r--arch/x86/platform/efi/efi.c2
-rw-r--r--arch/x86/platform/efi/fake_mem.c197
-rw-r--r--arch/x86/platform/efi/memmap.c13
-rw-r--r--arch/x86/platform/intel-mid/intel-mid.c6
-rw-r--r--arch/x86/platform/intel/iosf_mbi.c4
-rw-r--r--arch/x86/um/sys_call_table_32.c10
-rw-r--r--arch/x86/um/sys_call_table_64.c11
-rw-r--r--arch/x86/virt/svm/sev.c44
-rw-r--r--arch/x86/virt/vmx/tdx/tdx.c8
-rw-r--r--arch/x86/xen/apic.c2
-rw-r--r--arch/x86/xen/debugfs.c2
-rw-r--r--arch/x86/xen/debugfs.h7
-rw-r--r--arch/x86/xen/enlighten.c2
-rw-r--r--arch/x86/xen/enlighten_hvm.c2
-rw-r--r--arch/x86/xen/enlighten_pv.c4
-rw-r--r--arch/x86/xen/mmu.c3
-rw-r--r--arch/x86/xen/mmu.h28
-rw-r--r--arch/x86/xen/mmu_hvm.c2
-rw-r--r--arch/x86/xen/mmu_pv.c15
-rw-r--r--arch/x86/xen/multicalls.c128
-rw-r--r--arch/x86/xen/multicalls.h69
-rw-r--r--arch/x86/xen/p2m.c6
-rw-r--r--arch/x86/xen/pmu.c1
-rw-r--r--arch/x86/xen/pmu.h22
-rw-r--r--arch/x86/xen/setup.c1
-rw-r--r--arch/x86/xen/smp.c1
-rw-r--r--arch/x86/xen/smp.h51
-rw-r--r--arch/x86/xen/smp_hvm.c2
-rw-r--r--arch/x86/xen/smp_pv.c3
-rw-r--r--arch/x86/xen/spinlock.c20
-rw-r--r--arch/x86/xen/suspend.c2
-rw-r--r--arch/x86/xen/time.c2
-rw-r--r--arch/x86/xen/xen-ops.h148
-rw-r--r--arch/xtensa/include/asm/current.h2
-rw-r--r--arch/xtensa/include/asm/thread_info.h2
-rw-r--r--arch/xtensa/include/asm/unistd.h1
-rw-r--r--arch/xtensa/platforms/iss/simdisk.c5
-rw-r--r--block/Kconfig8
-rw-r--r--block/Makefile3
-rw-r--r--block/bdev.c41
-rw-r--r--block/bfq-cgroup.c51
-rw-r--r--block/bfq-iosched.c38
-rw-r--r--block/bfq-iosched.h3
-rw-r--r--block/bio-integrity.c161
-rw-r--r--block/bio.c2
-rw-r--r--block/blk-cgroup.h13
-rw-r--r--block/blk-core.c45
-rw-r--r--block/blk-flush.c39
-rw-r--r--block/blk-integrity.c228
-rw-r--r--block/blk-lib.c210
-rw-r--r--block/blk-map.c2
-rw-r--r--block/blk-merge.c92
-rw-r--r--block/blk-mq-debugfs.c13
-rw-r--r--block/blk-mq.c89
-rw-r--r--block/blk-settings.c559
-rw-r--r--block/blk-stat.h1
-rw-r--r--block/blk-sysfs.c434
-rw-r--r--block/blk-throttle.c27
-rw-r--r--block/blk-throttle.h8
-rw-r--r--block/blk-wbt.c22
-rw-r--r--block/blk-zoned.c176
-rw-r--r--block/blk.h22
-rw-r--r--block/elevator.c9
-rw-r--r--block/elevator.h4
-rw-r--r--block/fops.c25
-rw-r--r--block/genhd.c2
-rw-r--r--block/ioctl.c2
-rw-r--r--block/mq-deadline.c20
-rw-r--r--block/sed-opal.c2
-rw-r--r--block/t10-pi.c302
-rw-r--r--drivers/acpi/Makefile1
-rw-r--r--drivers/acpi/ac.c6
-rw-r--r--drivers/acpi/acpi_pad.c19
-rw-r--r--drivers/acpi/acpi_processor.c145
-rw-r--r--drivers/acpi/acpi_tad.c1
-rw-r--r--drivers/acpi/acpi_video.c8
-rw-r--r--drivers/acpi/acpica/acevents.h4
-rw-r--r--drivers/acpi/acpica/evregion.c6
-rw-r--r--drivers/acpi/acpica/evxfregn.c54
-rw-r--r--drivers/acpi/acpica/exregion.c23
-rw-r--r--drivers/acpi/apei/einj-core.c2
-rw-r--r--drivers/acpi/arm64/Makefile6
-rw-r--r--drivers/acpi/arm64/amba.c6
-rw-r--r--drivers/acpi/arm64/cpuidle.c (renamed from arch/arm64/kernel/cpuidle.c)4
-rw-r--r--drivers/acpi/arm64/ffh.c107
-rw-r--r--drivers/acpi/battery.c52
-rw-r--r--drivers/acpi/bus.c2
-rw-r--r--drivers/acpi/cppc_acpi.c4
-rw-r--r--drivers/acpi/ec.c12
-rw-r--r--drivers/acpi/fan.h9
-rw-r--r--drivers/acpi/fan_core.c4
-rw-r--r--drivers/acpi/fan_hwmon.c170
-rw-r--r--drivers/acpi/internal.h4
-rw-r--r--drivers/acpi/mipi-disco-img.c28
-rw-r--r--drivers/acpi/numa/hmat.c6
-rw-r--r--drivers/acpi/platform_profile.c1
-rw-r--r--drivers/acpi/pmic/intel_pmic.c2
-rw-r--r--drivers/acpi/pmic/intel_pmic.h4
-rw-r--r--drivers/acpi/pmic/intel_pmic_bxtwc.c4
-rw-r--r--drivers/acpi/pmic/intel_pmic_bytcrc.c4
-rw-r--r--drivers/acpi/pmic/intel_pmic_chtdc_ti.c17
-rw-r--r--drivers/acpi/pmic/intel_pmic_chtwc.c7
-rw-r--r--drivers/acpi/pmic/intel_pmic_xpower.c11
-rw-r--r--drivers/acpi/processor_core.c3
-rw-r--r--drivers/acpi/processor_driver.c43
-rw-r--r--drivers/acpi/processor_idle.c37
-rw-r--r--drivers/acpi/resource.c14
-rw-r--r--drivers/acpi/sbs.c33
-rw-r--r--drivers/acpi/scan.c47
-rw-r--r--drivers/acpi/tables.c14
-rw-r--r--drivers/acpi/thermal.c8
-rw-r--r--drivers/acpi/video_detect.c16
-rw-r--r--drivers/acpi/x86/lpss.c4
-rw-r--r--drivers/acpi/x86/utils.c68
-rw-r--r--drivers/ata/Kconfig2
-rw-r--r--drivers/ata/ahci.c30
-rw-r--r--drivers/ata/libahci.c12
-rw-r--r--drivers/ata/libata-core.c86
-rw-r--r--drivers/ata/libata-sata.c49
-rw-r--r--drivers/ata/libata-scsi.c220
-rw-r--r--drivers/ata/libata-transport.c5
-rw-r--r--drivers/ata/libata-transport.h3
-rw-r--r--drivers/ata/libata.h2
-rw-r--r--drivers/ata/pata_macio.c13
-rw-r--r--drivers/base/Makefile1
-rw-r--r--drivers/base/auxiliary.c1
-rw-r--r--drivers/base/auxiliary_sysfs.c113
-rw-r--r--drivers/base/core.c48
-rw-r--r--drivers/base/cpu.c12
-rw-r--r--drivers/base/regmap/regcache-maple.c13
-rw-r--r--drivers/base/regmap/regcache.c6
-rw-r--r--drivers/base/regmap/regmap-ac97.c1
-rw-r--r--drivers/base/regmap/regmap-i2c.c4
-rw-r--r--drivers/base/regmap/regmap-irq.c2
-rw-r--r--drivers/base/regmap/regmap-kunit.c158
-rw-r--r--drivers/base/regmap/regmap-ram.c1
-rw-r--r--drivers/base/regmap/regmap-raw-ram.c1
-rw-r--r--drivers/base/regmap/regmap-sccb.c1
-rw-r--r--drivers/base/regmap/regmap-slimbus.c1
-rw-r--r--drivers/base/regmap/regmap-spi-avmm.c1
-rw-r--r--drivers/base/regmap/regmap-spi.c3
-rw-r--r--drivers/base/regmap/regmap-spmi.c1
-rw-r--r--drivers/base/regmap/regmap-w1.c1
-rw-r--r--drivers/base/regmap/regmap.c105
-rw-r--r--drivers/block/Kconfig9
-rw-r--r--drivers/block/Makefile3
-rw-r--r--drivers/block/amiflop.c6
-rw-r--r--drivers/block/aoe/aoeblk.c1
-rw-r--r--drivers/block/ataflop.c6
-rw-r--r--drivers/block/brd.c7
-rw-r--r--drivers/block/drbd/drbd_main.c6
-rw-r--r--drivers/block/floppy.c4
-rw-r--r--drivers/block/loop.c207
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c2
-rw-r--r--drivers/block/n64cart.c2
-rw-r--r--drivers/block/nbd.c77
-rw-r--r--drivers/block/null_blk/main.c30
-rw-r--r--drivers/block/null_blk/null_blk.h1
-rw-r--r--drivers/block/null_blk/zoned.c28
-rw-r--r--drivers/block/pktcdvd.c1
-rw-r--r--drivers/block/ps3disk.c8
-rw-r--r--drivers/block/rbd.c15
-rw-r--r--drivers/block/rnbd/rnbd-clt-sysfs.c2
-rw-r--r--drivers/block/rnbd/rnbd-clt.c16
-rw-r--r--drivers/block/rnbd/rnbd-srv-sysfs.c4
-rw-r--r--drivers/block/rnull.rs73
-rw-r--r--drivers/block/sunvdc.c1
-rw-r--r--drivers/block/swim.c5
-rw-r--r--drivers/block/swim3.c5
-rw-r--r--drivers/block/ublk_drv.c22
-rw-r--r--drivers/block/virtio_blk.c68
-rw-r--r--drivers/block/xen-blkback/blkback.c1
-rw-r--r--drivers/block/xen-blkfront.c73
-rw-r--r--drivers/block/z2ram.c1
-rw-r--r--drivers/block/zram/zram_drv.c6
-rw-r--r--drivers/bluetooth/Kconfig7
-rw-r--r--drivers/bluetooth/btintel.c240
-rw-r--r--drivers/bluetooth/btintel.h11
-rw-r--r--drivers/bluetooth/btintel_pcie.c12
-rw-r--r--drivers/bluetooth/btmtk.c1085
-rw-r--r--drivers/bluetooth/btmtk.h118
-rw-r--r--drivers/bluetooth/btmtksdio.c4
-rw-r--r--drivers/bluetooth/btmtkuart.c1
-rw-r--r--drivers/bluetooth/btnxpuart.c244
-rw-r--r--drivers/bluetooth/btrtl.c2
-rw-r--r--drivers/bluetooth/btusb.c735
-rw-r--r--drivers/bluetooth/hci_bcm4377.c76
-rw-r--r--drivers/bluetooth/hci_ldisc.c2
-rw-r--r--drivers/bluetooth/hci_nokia.c5
-rw-r--r--drivers/bluetooth/hci_qca.c151
-rw-r--r--drivers/bluetooth/hci_vhci.c2
-rw-r--r--drivers/bus/sunxi-rsb.c2
-rw-r--r--drivers/bus/ts-nbus.c2
-rw-r--r--drivers/bus/vexpress-config.c1
-rw-r--r--drivers/cache/Kconfig9
-rw-r--r--drivers/cache/Makefile5
-rw-r--r--drivers/cache/starfive_starlink_cache.c130
-rw-r--r--drivers/cdrom/cdrom.c1
-rw-r--r--drivers/cdrom/gdrom.c1
-rw-r--r--drivers/char/hpet.c34
-rw-r--r--drivers/char/hw_random/core.c47
-rw-r--r--drivers/char/ipmi/ipmb_dev_int.c4
-rw-r--r--drivers/char/ipmi/ipmi_ipmb.c4
-rw-r--r--drivers/char/ipmi/ipmi_ssif.c2
-rw-r--r--drivers/char/ipmi/ssif_bmc.c10
-rw-r--r--drivers/char/tpm/Kconfig2
-rw-r--r--drivers/char/tpm/Makefile2
-rw-r--r--drivers/char/tpm/eventlog/common.c2
-rw-r--r--drivers/char/tpm/tpm-buf.c26
-rw-r--r--drivers/char/tpm/tpm.h2
-rw-r--r--drivers/char/tpm/tpm2-cmd.c10
-rw-r--r--drivers/char/tpm/tpm2-sessions.c438
-rw-r--r--drivers/char/tpm/tpm_tis_core.c3
-rw-r--r--drivers/char/tpm/tpm_tis_core.h2
-rw-r--r--drivers/char/tpm/tpm_tis_spi_main.c4
-rw-r--r--drivers/clk/clkdev.c11
-rw-r--r--drivers/clk/mediatek/clk-mt8183-mfgcfg.c1
-rw-r--r--drivers/clk/mediatek/clk-mtk.c24
-rw-r--r--drivers/clk/mediatek/clk-mtk.h2
-rw-r--r--drivers/clk/qcom/Kconfig8
-rw-r--r--drivers/clk/qcom/Makefile1
-rw-r--r--drivers/clk/qcom/apss-ipq-pll.c2
-rw-r--r--drivers/clk/qcom/camcc-sm8650.c3591
-rw-r--r--drivers/clk/qcom/clk-alpha-pll.c3
-rw-r--r--drivers/clk/qcom/gcc-ipq9574.c10
-rw-r--r--drivers/clk/qcom/gcc-sm6350.c10
-rw-r--r--drivers/clk/qcom/gdsc.c41
-rw-r--r--drivers/clk/qcom/gdsc.h1
-rw-r--r--drivers/clk/qcom/videocc-sc7280.c2
-rw-r--r--drivers/clk/qcom/videocc-sm8250.c4
-rw-r--r--drivers/clk/qcom/videocc-sm8550.c156
-rw-r--r--drivers/clk/sifive/sifive-prci.c8
-rw-r--r--drivers/clk/sunxi-ng/ccu_common.c18
-rw-r--r--drivers/clocksource/Kconfig10
-rw-r--r--drivers/clocksource/Makefile1
-rw-r--r--drivers/clocksource/arm_arch_timer.c2
-rw-r--r--drivers/clocksource/arm_global_timer.c2
-rw-r--r--drivers/clocksource/mips-gic-timer.c20
-rw-r--r--drivers/clocksource/sh_cmt.c13
-rw-r--r--drivers/clocksource/timer-rtl-otto.c291
-rw-r--r--drivers/counter/stm32-timer-cnt.c4
-rw-r--r--drivers/counter/ti-eqep.c6
-rw-r--r--drivers/cpufreq/Kconfig12
-rw-r--r--drivers/cpufreq/Kconfig.x861
-rw-r--r--drivers/cpufreq/Makefile1
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c10
-rw-r--r--drivers/cpufreq/amd-pstate-ut.c15
-rw-r--r--drivers/cpufreq/amd-pstate.c387
-rw-r--r--drivers/cpufreq/amd-pstate.h (renamed from include/linux/amd-pstate.h)35
-rw-r--r--drivers/cpufreq/apple-soc-cpufreq.c4
-rw-r--r--drivers/cpufreq/bmips-cpufreq.c4
-rw-r--r--drivers/cpufreq/cppc_cpufreq.c12
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c1
-rw-r--r--drivers/cpufreq/cpufreq-dt.c3
-rw-r--r--drivers/cpufreq/cpufreq-nforce2.c6
-rw-r--r--drivers/cpufreq/cpufreq.c53
-rw-r--r--drivers/cpufreq/e_powersaver.c3
-rw-r--r--drivers/cpufreq/intel_pstate.c166
-rw-r--r--drivers/cpufreq/longhaul.c5
-rw-r--r--drivers/cpufreq/loongson2_cpufreq.c6
-rw-r--r--drivers/cpufreq/loongson3_cpufreq.c395
-rw-r--r--drivers/cpufreq/mediatek-cpufreq-hw.c4
-rw-r--r--drivers/cpufreq/mediatek-cpufreq.c76
-rw-r--r--drivers/cpufreq/omap-cpufreq.c3
-rw-r--r--drivers/cpufreq/pasemi-cpufreq.c6
-rw-r--r--drivers/cpufreq/pcc-cpufreq.c6
-rw-r--r--drivers/cpufreq/powernow-k6.c5
-rw-r--r--drivers/cpufreq/powernow-k7.c3
-rw-r--r--drivers/cpufreq/powernow-k8.c6
-rw-r--r--drivers/cpufreq/powernv-cpufreq.c4
-rw-r--r--drivers/cpufreq/ppc_cbe_cpufreq.c3
-rw-r--r--drivers/cpufreq/qcom-cpufreq-hw.c4
-rw-r--r--drivers/cpufreq/qcom-cpufreq-nvmem.c13
-rw-r--r--drivers/cpufreq/qoriq-cpufreq.c4
-rw-r--r--drivers/cpufreq/scmi-cpufreq.c8
-rw-r--r--drivers/cpufreq/scpi-cpufreq.c4
-rw-r--r--drivers/cpufreq/sh-cpufreq.c4
-rw-r--r--drivers/cpufreq/sparc-us2e-cpufreq.c3
-rw-r--r--drivers/cpufreq/sparc-us3-cpufreq.c3
-rw-r--r--drivers/cpufreq/speedstep-centrino.c18
-rw-r--r--drivers/cpufreq/sti-cpufreq.c3
-rw-r--r--drivers/cpufreq/sun50i-cpufreq-nvmem.c30
-rw-r--r--drivers/cpufreq/tegra194-cpufreq.c4
-rw-r--r--drivers/cpufreq/ti-cpufreq.c96
-rw-r--r--drivers/cpufreq/vexpress-spc-cpufreq.c5
-rw-r--r--drivers/cpuidle/cpuidle-haltpoll.c1
-rw-r--r--drivers/cpuidle/governors/menu.c17
-rw-r--r--drivers/cpuidle/governors/teo.c194
-rw-r--r--drivers/crypto/caam/Kconfig2
-rw-r--r--drivers/crypto/caam/caamalg_qi2.c28
-rw-r--r--drivers/crypto/caam/caamalg_qi2.h2
-rw-r--r--drivers/crypto/caam/ctrl.c2
-rw-r--r--drivers/crypto/caam/qi.c43
-rw-r--r--drivers/crypto/intel/qat/qat_common/Makefile5
-rw-r--r--drivers/cxl/core/hdm.c13
-rw-r--r--drivers/cxl/core/pmem.c16
-rw-r--r--drivers/cxl/core/region.c121
-rw-r--r--drivers/cxl/cxl.h6
-rw-r--r--drivers/cxl/cxlmem.h21
-rw-r--r--drivers/cxl/mem.c17
-rw-r--r--drivers/dma-buf/st-dma-fence.c6
-rw-r--r--drivers/dma-buf/sync_debug.c4
-rw-r--r--drivers/dma/Kconfig2
-rw-r--r--drivers/dma/idxd/irq.c4
-rw-r--r--drivers/dma/ioat/init.c55
-rw-r--r--drivers/dma/ti/k3-udma-glue.c8
-rw-r--r--drivers/dma/xilinx/xdma.c4
-rw-r--r--drivers/edac/Makefile10
-rw-r--r--drivers/edac/amd64_edac.c77
-rw-r--r--drivers/edac/amd64_edac.h4
-rw-r--r--drivers/edac/dmc520_edac.c4
-rw-r--r--drivers/edac/ghes_edac.c2
-rw-r--r--drivers/edac/i10nm_base.c20
-rw-r--r--drivers/edac/igen6_edac.c12
-rw-r--r--drivers/edac/layerscape_edac.c1
-rw-r--r--drivers/edac/mpc85xx_edac.c1
-rw-r--r--drivers/edac/octeon_edac-l2c.c1
-rw-r--r--drivers/edac/octeon_edac-lmc.c1
-rw-r--r--drivers/edac/octeon_edac-pc.c1
-rw-r--r--drivers/edac/octeon_edac-pci.c1
-rw-r--r--drivers/edac/pnd2_edac.c4
-rw-r--r--drivers/edac/sb_edac.c14
-rw-r--r--drivers/edac/skx_base.c2
-rw-r--r--drivers/edac/skx_common.c21
-rw-r--r--drivers/edac/skx_common.h4
-rw-r--r--drivers/edac/thunderx_edac.c6
-rw-r--r--drivers/firewire/Kconfig2
-rw-r--r--drivers/firewire/core-card.c6
-rw-r--r--drivers/firewire/core-cdev.c6
-rw-r--r--drivers/firewire/core-topology.c2
-rw-r--r--drivers/firewire/core-transaction.c30
-rw-r--r--drivers/firewire/packet-serdes-test.c1
-rw-r--r--drivers/firewire/uapi-test.c1
-rw-r--r--drivers/firmware/arm_ffa/Makefile6
-rw-r--r--drivers/firmware/arm_ffa/bus.c22
-rw-r--r--drivers/firmware/arm_ffa/common.h2
-rw-r--r--drivers/firmware/arm_ffa/driver.c57
-rw-r--r--drivers/firmware/arm_scmi/common.h1
-rw-r--r--drivers/firmware/arm_scmi/mailbox.c57
-rw-r--r--drivers/firmware/arm_scmi/scmi_power_control.c21
-rw-r--r--drivers/firmware/arm_scmi/shmem.c5
-rw-r--r--drivers/firmware/cirrus/cs_dsp.c505
-rw-r--r--drivers/firmware/efi/efi-pstore.c8
-rw-r--r--drivers/firmware/efi/libstub/Makefile2
-rw-r--r--drivers/firmware/efi/libstub/arm64-stub.c13
-rw-r--r--drivers/firmware/efi/libstub/arm64.c3
-rw-r--r--drivers/firmware/efi/libstub/efistub.h9
-rw-r--r--drivers/firmware/efi/libstub/kaslr.c20
-rw-r--r--drivers/firmware/efi/libstub/loongarch.c2
-rw-r--r--drivers/firmware/efi/libstub/relocate.c2
-rw-r--r--drivers/firmware/efi/libstub/smbios.c43
-rw-r--r--drivers/firmware/efi/libstub/unaccepted_memory.c2
-rw-r--r--drivers/firmware/efi/libstub/x86-stub.c83
-rw-r--r--drivers/firmware/efi/libstub/zboot.lds1
-rw-r--r--drivers/firmware/efi/memattr.c2
-rw-r--r--drivers/firmware/efi/memmap.c9
-rw-r--r--drivers/firmware/efi/runtime-wrappers.c13
-rw-r--r--drivers/firmware/google/cbmem.c1
-rw-r--r--drivers/firmware/google/coreboot_table.c1
-rw-r--r--drivers/firmware/google/framebuffer-coreboot.c1
-rw-r--r--drivers/firmware/google/gsmi.c1
-rw-r--r--drivers/firmware/google/memconsole-coreboot.c1
-rw-r--r--drivers/firmware/google/memconsole-x86-legacy.c1
-rw-r--r--drivers/firmware/google/memconsole.c1
-rw-r--r--drivers/firmware/google/vpd.c1
-rw-r--r--drivers/firmware/meson/meson_sm.c1
-rw-r--r--drivers/firmware/microchip/mpfs-auto-update.c136
-rw-r--r--drivers/firmware/psci/psci.c4
-rw-r--r--drivers/firmware/qcom/Kconfig32
-rw-r--r--drivers/firmware/qcom/Makefile1
-rw-r--r--drivers/firmware/qcom/qcom_qseecom_uefisecapp.c256
-rw-r--r--drivers/firmware/qcom/qcom_scm-smc.c30
-rw-r--r--drivers/firmware/qcom/qcom_scm.c197
-rw-r--r--drivers/firmware/qcom/qcom_scm.h9
-rw-r--r--drivers/firmware/qcom/qcom_tzmem.c469
-rw-r--r--drivers/firmware/qcom/qcom_tzmem.h13
-rw-r--r--drivers/firmware/sysfb.c12
-rw-r--r--drivers/firmware/ti_sci.h2
-rw-r--r--drivers/firmware/turris-mox-rwtm.c23
-rw-r--r--drivers/firmware/xilinx/zynqmp.c3
-rw-r--r--drivers/gpio/Kconfig35
-rw-r--r--drivers/gpio/Makefile2
-rw-r--r--drivers/gpio/gpio-amd8111.c4
-rw-r--r--drivers/gpio/gpio-ath79.c2
-rw-r--r--drivers/gpio/gpio-davinci.c5
-rw-r--r--drivers/gpio/gpio-graniterapids.c2
-rw-r--r--drivers/gpio/gpio-gw-pld.c1
-rw-r--r--drivers/gpio/gpio-mc33880.c3
-rw-r--r--drivers/gpio/gpio-mmio.c2
-rw-r--r--drivers/gpio/gpio-pca953x.c3
-rw-r--r--drivers/gpio/gpio-pcf857x.c1
-rw-r--r--drivers/gpio/gpio-pl061.c1
-rw-r--r--drivers/gpio/gpio-rdc321x.c6
-rw-r--r--drivers/gpio/gpio-sim.c60
-rw-r--r--drivers/gpio/gpio-sloppy-logic-analyzer.c344
-rw-r--r--drivers/gpio/gpio-syscon.c27
-rw-r--r--drivers/gpio/gpio-tqmx86.c110
-rw-r--r--drivers/gpio/gpio-virtuser.c1807
-rw-r--r--drivers/gpio/gpiolib-acpi.c4
-rw-r--r--drivers/gpio/gpiolib-cdev.c108
-rw-r--r--drivers/gpio/gpiolib-of.c26
-rw-r--r--drivers/gpio/gpiolib.c36
-rw-r--r--drivers/gpio/gpiolib.h4
-rw-r--r--drivers/gpu/drm/Kconfig1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c34
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c66
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v11_0.c76
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v14_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c11
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_migrate.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.h1
-rw-r--r--drivers/gpu/drm/amd/display/Kconfig2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c65
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c72
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c10
-rw-r--r--drivers/gpu/drm/amd/display/include/dpcd_defs.h5
-rw-r--r--drivers/gpu/drm/amd/include/atomfirmware.h4
-rw-r--r--drivers/gpu/drm/amd/include/pptable.h91
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c13
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_ppsmc.h4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c20
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c73
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c1
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_color_mgmt.c5
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_dev.c8
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c2
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511.h2
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_cec.c13
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_drv.c22
-rw-r--r--drivers/gpu/drm/bridge/panel.c7
-rw-r--r--drivers/gpu/drm/drm_buddy.c2
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c6
-rw-r--r--drivers/gpu/drm/drm_fbdev_dma.c5
-rw-r--r--drivers/gpu/drm/drm_fbdev_generic.c3
-rw-r--r--drivers/gpu/drm/drm_file.c8
-rw-r--r--drivers/gpu/drm/drm_gem_shmem_helper.c5
-rw-r--r--drivers/gpu/drm/drm_panel_orientation_quirks.c15
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c7
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c7
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_lvds.c3
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_lvds.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.c32
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_driver.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c18
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.h4
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_breadcrumbs.c15
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_types.h8
-rw-r--r--drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h6
-rw-r--r--drivers/gpu/drm/lima/lima_gem.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c8
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c37
-rw-r--r--drivers/gpu/drm/msm/registers/gen_header.py5
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv17.c6
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvif/object.c24
-rw-r--r--drivers/gpu/drm/panel/Kconfig2
-rw-r--r--drivers/gpu/drm/panel/panel-lg-sw43408.c2
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c1
-rw-r--r--drivers/gpu/drm/panel/panel-sitronix-st7789v.c20
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem.c2
-rw-r--r--drivers/gpu/drm/panthor/panthor_drv.c6
-rw-r--r--drivers/gpu/drm/panthor/panthor_sched.c44
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c2
-rw-r--r--drivers/gpu/drm/radeon/sumo_dpm.c2
-rw-r--r--drivers/gpu/drm/renesas/shmobile/shmob_drm_drv.c8
-rw-r--r--drivers/gpu/drm/tests/drm_buddy_test.c42
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/Kconfig2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c19
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c28
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_msg.c173
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_msg_arm64.h196
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_msg_x86.h185
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c60
-rw-r--r--drivers/gpu/drm/xe/display/xe_hdcp_gsc.c12
-rw-r--r--drivers/gpu/drm/xe/xe_bo.c47
-rw-r--r--drivers/gpu/drm/xe/xe_bo_types.h3
-rw-r--r--drivers/gpu/drm/xe/xe_gt_idle.c9
-rw-r--r--drivers/gpu/drm/xe/xe_gt_mcr.c6
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c14
-rw-r--r--drivers/gpu/drm/xe/xe_guc.c4
-rw-r--r--drivers/gpu/drm/xe/xe_guc_pc.c6
-rw-r--r--drivers/gpu/drm/xe/xe_guc_submit.c1
-rw-r--r--drivers/gpu/drm/xe/xe_migrate.c20
-rw-r--r--drivers/gpu/drm/xe/xe_pcode.c2
-rw-r--r--drivers/gpu/drm/xe/xe_ring_ops.c18
-rw-r--r--drivers/hid/Makefile6
-rw-r--r--drivers/hid/bpf/Makefile2
-rw-r--r--drivers/hid/bpf/entrypoints/Makefile93
-rw-r--r--drivers/hid/bpf/entrypoints/README4
-rw-r--r--drivers/hid/bpf/entrypoints/entrypoints.bpf.c25
-rw-r--r--drivers/hid/bpf/entrypoints/entrypoints.lskel.h248
-rw-r--r--drivers/hid/bpf/hid_bpf_dispatch.c419
-rw-r--r--drivers/hid/bpf/hid_bpf_dispatch.h13
-rw-r--r--drivers/hid/bpf/hid_bpf_jmp_table.c565
-rw-r--r--drivers/hid/bpf/hid_bpf_struct_ops.c307
-rw-r--r--drivers/hid/bpf/progs/FR-TEC__Raptor-Mach-2.bpf.c9
-rw-r--r--drivers/hid/bpf/progs/HP__Elite-Presenter.bpf.c6
-rw-r--r--drivers/hid/bpf/progs/Huion__Dial-2.bpf.c614
-rw-r--r--drivers/hid/bpf/progs/Huion__Inspiroy-2-S.bpf.c534
-rw-r--r--drivers/hid/bpf/progs/Huion__Kamvas-Pro-19.bpf.c9
-rw-r--r--drivers/hid/bpf/progs/IOGEAR__Kaliber-MMOmentum.bpf.c6
-rw-r--r--drivers/hid/bpf/progs/Makefile2
-rw-r--r--drivers/hid/bpf/progs/Microsoft__Xbox-Elite-2.bpf.c (renamed from drivers/hid/bpf/progs/Microsoft__XBox-Elite-2.bpf.c)21
-rw-r--r--drivers/hid/bpf/progs/Thrustmaster__TCA-Yoke-Boeing.bpf.c144
-rw-r--r--drivers/hid/bpf/progs/Wacom__ArtPen.bpf.c6
-rw-r--r--drivers/hid/bpf/progs/XPPen__Artist24.bpf.c12
-rw-r--r--drivers/hid/bpf/progs/XPPen__ArtistPro16Gen2.bpf.c24
-rw-r--r--drivers/hid/bpf/progs/XPPen__DecoMini4.bpf.c231
-rw-r--r--drivers/hid/bpf/progs/hid_bpf.h6
-rw-r--r--drivers/hid/bpf/progs/hid_bpf_helpers.h1
-rw-r--r--drivers/hid/bpf/progs/hid_report_helpers.h2960
-rw-r--r--drivers/hid/hid-a4tech.c1
-rw-r--r--drivers/hid/hid-apple.c88
-rw-r--r--drivers/hid/hid-asus.c4
-rw-r--r--drivers/hid/hid-aureal.c1
-rw-r--r--drivers/hid/hid-belkin.c1
-rw-r--r--drivers/hid/hid-betopff.c1
-rw-r--r--drivers/hid/hid-bigbenff.c1
-rw-r--r--drivers/hid/hid-cherry.c1
-rw-r--r--drivers/hid/hid-chicony.c1
-rw-r--r--drivers/hid/hid-core.c134
-rw-r--r--drivers/hid/hid-cypress.c1
-rw-r--r--drivers/hid/hid-debug.c2
-rw-r--r--drivers/hid/hid-dr.c1
-rw-r--r--drivers/hid/hid-elecom.c1
-rw-r--r--drivers/hid/hid-elo.c1
-rw-r--r--drivers/hid/hid-emsff.c1
-rw-r--r--drivers/hid/hid-evision.c1
-rw-r--r--drivers/hid/hid-ezkey.c1
-rw-r--r--drivers/hid/hid-gaff.c1
-rw-r--r--drivers/hid/hid-google-hammer.c1
-rw-r--r--drivers/hid/hid-google-stadiaff.c1
-rw-r--r--drivers/hid/hid-gyration.c1
-rw-r--r--drivers/hid/hid-holtek-kbd.c1
-rw-r--r--drivers/hid/hid-holtek-mouse.c1
-rw-r--r--drivers/hid/hid-ids.h2
-rw-r--r--drivers/hid/hid-input.c13
-rw-r--r--drivers/hid/hid-ite.c1
-rw-r--r--drivers/hid/hid-kensington.c3
-rw-r--r--drivers/hid/hid-keytouch.c1
-rw-r--r--drivers/hid/hid-kye.c1
-rw-r--r--drivers/hid/hid-lcpower.c1
-rw-r--r--drivers/hid/hid-lenovo.c1
-rw-r--r--drivers/hid/hid-letsketch.c1
-rw-r--r--drivers/hid/hid-lg-g15.c1
-rw-r--r--drivers/hid/hid-lg.c1
-rw-r--r--drivers/hid/hid-logitech-dj.c5
-rw-r--r--drivers/hid/hid-logitech-hidpp.c1
-rw-r--r--drivers/hid/hid-magicmouse.c1
-rw-r--r--drivers/hid/hid-maltron.c1
-rw-r--r--drivers/hid/hid-mcp2221.c2
-rw-r--r--drivers/hid/hid-megaworld.c1
-rw-r--r--drivers/hid/hid-mf.c1
-rw-r--r--drivers/hid/hid-microsoft.c1
-rw-r--r--drivers/hid/hid-monterey.c1
-rw-r--r--drivers/hid/hid-nintendo.c27
-rw-r--r--drivers/hid/hid-ntrig.c1
-rw-r--r--drivers/hid/hid-nvidia-shield.c4
-rw-r--r--drivers/hid/hid-ortek.c1
-rw-r--r--drivers/hid/hid-petalynx.c1
-rw-r--r--drivers/hid/hid-pl.c1
-rw-r--r--drivers/hid/hid-primax.c1
-rw-r--r--drivers/hid/hid-prodikeys.c1
-rw-r--r--drivers/hid/hid-razer.c1
-rw-r--r--drivers/hid/hid-redragon.c1
-rw-r--r--drivers/hid/hid-retrode.c1
-rw-r--r--drivers/hid/hid-saitek.c1
-rw-r--r--drivers/hid/hid-samsung.c1
-rw-r--r--drivers/hid/hid-semitek.c1
-rw-r--r--drivers/hid/hid-sjoy.c1
-rw-r--r--drivers/hid/hid-sony.c1
-rw-r--r--drivers/hid/hid-speedlink.c1
-rw-r--r--drivers/hid/hid-steam.c5
-rw-r--r--drivers/hid/hid-steelseries.c1
-rw-r--r--drivers/hid/hid-sunplus.c1
-rw-r--r--drivers/hid/hid-tivo.c1
-rw-r--r--drivers/hid/hid-tmff.c1
-rw-r--r--drivers/hid/hid-topseed.c1
-rw-r--r--drivers/hid/hid-twinhan.c1
-rw-r--r--drivers/hid/hid-uclogic-core.c2
-rw-r--r--drivers/hid/hid-uclogic-rdesc-test.c2
-rw-r--r--drivers/hid/hid-uclogic-rdesc.c11
-rw-r--r--drivers/hid/hid-viewsonic.c1
-rw-r--r--drivers/hid/hid-vivaldi-common.c1
-rw-r--r--drivers/hid/hid-waltop.c1
-rw-r--r--drivers/hid/hid-winwing.c1
-rw-r--r--drivers/hid/hid-xinmo.c1
-rw-r--r--drivers/hid/hid-zpff.c1
-rw-r--r--drivers/hid/hid-zydacron.c1
-rw-r--r--drivers/hid/hidraw.c10
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-of-elan.c59
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/bus.c2
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/loader.c79
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/loader.h33
-rw-r--r--drivers/hid/usbhid/hid-core.c2
-rw-r--r--drivers/hv/hv.c37
-rw-r--r--drivers/hv/hv_balloon.c190
-rw-r--r--drivers/hwmon/Kconfig67
-rw-r--r--drivers/hwmon/Makefile4
-rw-r--r--drivers/hwmon/ad7418.c7
-rw-r--r--drivers/hwmon/adc128d818.c4
-rw-r--r--drivers/hwmon/adm1021.c505
-rw-r--r--drivers/hwmon/adm1031.c4
-rw-r--r--drivers/hwmon/ads7828.c7
-rw-r--r--drivers/hwmon/adt7475.c18
-rw-r--r--drivers/hwmon/aht10.c3
-rw-r--r--drivers/hwmon/amc6821.c1356
-rw-r--r--drivers/hwmon/asus-ec-sensors.c10
-rw-r--r--drivers/hwmon/asus_atk0110.c1
-rw-r--r--drivers/hwmon/corsair-cpro.c89
-rw-r--r--drivers/hwmon/corsair-psu.c7
-rw-r--r--drivers/hwmon/cros_ec_hwmon.c283
-rw-r--r--drivers/hwmon/dell-smm-hwmon.c15
-rw-r--r--drivers/hwmon/dme1737.c4
-rw-r--r--drivers/hwmon/ds1621.c4
-rw-r--r--drivers/hwmon/f75375s.c46
-rw-r--r--drivers/hwmon/fschmd.c2
-rw-r--r--drivers/hwmon/g762.c43
-rw-r--r--drivers/hwmon/gsc-hwmon.c9
-rw-r--r--drivers/hwmon/hwmon.c149
-rw-r--r--drivers/hwmon/iio_hwmon.c11
-rw-r--r--drivers/hwmon/ina238.c2
-rw-r--r--drivers/hwmon/ina2xx.c39
-rw-r--r--drivers/hwmon/intel-m10-bmc-hwmon.c2
-rw-r--r--drivers/hwmon/jc42.c49
-rw-r--r--drivers/hwmon/k10temp.c62
-rw-r--r--drivers/hwmon/lm63.c5
-rw-r--r--drivers/hwmon/lm70.c6
-rw-r--r--drivers/hwmon/lm75.c10
-rw-r--r--drivers/hwmon/lm78.c4
-rw-r--r--drivers/hwmon/lm83.c16
-rw-r--r--drivers/hwmon/lm85.c7
-rw-r--r--drivers/hwmon/lm90.c61
-rw-r--r--drivers/hwmon/lm95234.c14
-rw-r--r--drivers/hwmon/ltc2991.c15
-rw-r--r--drivers/hwmon/ltc2992.c4
-rw-r--r--drivers/hwmon/max16065.c10
-rw-r--r--drivers/hwmon/max1668.c4
-rw-r--r--drivers/hwmon/max31827.c23
-rw-r--r--drivers/hwmon/max6639.c534
-rw-r--r--drivers/hwmon/max6642.c314
-rw-r--r--drivers/hwmon/max6697.c12
-rw-r--r--drivers/hwmon/mcp3021.c6
-rw-r--r--drivers/hwmon/mr75203.c1
-rw-r--r--drivers/hwmon/nct6683.c2
-rw-r--r--drivers/hwmon/nct6775-core.c2
-rw-r--r--drivers/hwmon/nct6775.h2
-rw-r--r--drivers/hwmon/nzxt-smart2.c1
-rw-r--r--drivers/hwmon/pmbus/Kconfig36
-rw-r--r--drivers/hwmon/pmbus/Makefile4
-rw-r--r--drivers/hwmon/pmbus/lm25066.c2
-rw-r--r--drivers/hwmon/pmbus/ltc4286.c4
-rw-r--r--drivers/hwmon/pmbus/mp2856.c8
-rw-r--r--drivers/hwmon/pmbus/mp2891.c600
-rw-r--r--drivers/hwmon/pmbus/mp2993.c261
-rw-r--r--drivers/hwmon/pmbus/mp5920.c90
-rw-r--r--drivers/hwmon/pmbus/mp9941.c319
-rw-r--r--drivers/hwmon/powr1220.c6
-rw-r--r--drivers/hwmon/sht3x.c20
-rw-r--r--drivers/hwmon/shtc1.c6
-rw-r--r--drivers/hwmon/spd5118.c703
-rw-r--r--drivers/hwmon/thmc50.c4
-rw-r--r--drivers/hwmon/tmp401.c2
-rw-r--r--drivers/hwmon/tmp421.c6
-rw-r--r--drivers/hwmon/tmp464.c5
-rw-r--r--drivers/hwmon/tmp513.c2
-rw-r--r--drivers/hwmon/tps23861.c2
-rw-r--r--drivers/hwmon/w83627ehf.c4
-rw-r--r--drivers/hwmon/w83781d.c4
-rw-r--r--drivers/hwmon/w83795.c4
-rw-r--r--drivers/i2c/Kconfig2
-rw-r--r--drivers/i2c/busses/Makefile6
-rw-r--r--drivers/i2c/busses/i2c-at91-slave.c3
-rw-r--r--drivers/i2c/busses/i2c-designware-slave.c2
-rw-r--r--drivers/i2c/busses/i2c-ocores.c2
-rw-r--r--drivers/i2c/busses/i2c-pnx.c48
-rw-r--r--drivers/i2c/busses/i2c-rcar.c27
-rw-r--r--drivers/i2c/busses/i2c-synquacer.c11
-rw-r--r--drivers/i2c/busses/i2c-viai2c-common.c71
-rw-r--r--drivers/i2c/busses/i2c-viai2c-common.h2
-rw-r--r--drivers/i2c/busses/i2c-viai2c-wmt.c36
-rw-r--r--drivers/i2c/busses/i2c-viai2c-zhaoxin.c113
-rw-r--r--drivers/i2c/i2c-core-base.c1
-rw-r--r--drivers/i2c/i2c-slave-testunit.c12
-rw-r--r--drivers/i2c/i2c-smbus.c6
-rw-r--r--drivers/idle/intel_idle.c116
-rw-r--r--drivers/iio/accel/Kconfig2
-rw-r--r--drivers/iio/adc/ad7173.c37
-rw-r--r--drivers/iio/adc/ad7266.c2
-rw-r--r--drivers/iio/adc/ad9467.c4
-rw-r--r--drivers/iio/adc/xilinx-ams.c8
-rw-r--r--drivers/iio/chemical/bme680.h2
-rw-r--r--drivers/iio/chemical/bme680_core.c62
-rw-r--r--drivers/iio/common/inv_sensors/inv_sensors_timestamp.c6
-rw-r--r--drivers/iio/dac/Kconfig2
-rw-r--r--drivers/iio/dac/ad5592r-base.c2
-rw-r--r--drivers/iio/humidity/hdc3020.c325
-rw-r--r--drivers/iio/imu/bmi323/bmi323_core.c5
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c4
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c19
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.h2
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_core.c1
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c4
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c4
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c1
-rw-r--r--drivers/iio/industrialio-trigger.c2
-rw-r--r--drivers/iio/inkern.c2
-rw-r--r--drivers/iio/light/apds9306.c4
-rw-r--r--drivers/iio/pressure/bmp280-core.c10
-rw-r--r--drivers/iio/temperature/mlx90635.c6
-rw-r--r--drivers/infiniband/hw/bnxt_re/bnxt_re.h4
-rw-r--r--drivers/infiniband/hw/mana/mr.c1
-rw-r--r--drivers/infiniband/hw/mana/qp.c10
-rw-r--r--drivers/infiniband/hw/mlx5/counters.c4
-rw-r--r--drivers/infiniband/hw/mlx5/main.c23
-rw-r--r--drivers/infiniband/hw/mlx5/mem.c198
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h3
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c8
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c16
-rw-r--r--drivers/infiniband/hw/mlx5/srq.c13
-rw-r--r--drivers/infiniband/sw/rxe/rxe_resp.c13
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c2
-rw-r--r--drivers/input/joystick/xpad.c1
-rw-r--r--drivers/input/misc/88pm886-onkey.c98
-rw-r--r--drivers/input/misc/Kconfig17
-rw-r--r--drivers/input/misc/Makefile2
-rw-r--r--drivers/input/misc/cs40l50-vibra.c555
-rw-r--r--drivers/input/mouse/elantech.c31
-rw-r--r--drivers/input/mouse/vmmouse.c76
-rw-r--r--drivers/input/serio/i8042-acpipnpio.h18
-rw-r--r--drivers/input/tests/input_test.c2
-rw-r--r--drivers/input/touchscreen/ads7846.c12
-rw-r--r--drivers/input/touchscreen/ili210x.c4
-rw-r--r--drivers/input/touchscreen/silead.c19
-rw-r--r--drivers/iommu/amd/amd_iommu.h3
-rw-r--r--drivers/iommu/amd/init.c12
-rw-r--r--drivers/iommu/amd/iommu.c60
-rw-r--r--drivers/iommu/amd/ppr.c25
-rw-r--r--drivers/iommu/dma-iommu.c8
-rw-r--r--drivers/iommu/intel/iommu.c20
-rw-r--r--drivers/irqchip/irq-gic-common.c22
-rw-r--r--drivers/irqchip/irq-gic-common.h7
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c55
-rw-r--r--drivers/irqchip/irq-gic-v3.c282
-rw-r--r--drivers/irqchip/irq-gic.c10
-rw-r--r--drivers/irqchip/irq-hip04.c6
-rw-r--r--drivers/irqchip/irq-loongson-eiointc.c5
-rw-r--r--drivers/irqchip/irq-loongson-liointc.c4
-rw-r--r--drivers/irqchip/irq-riscv-intc.c9
-rw-r--r--drivers/irqchip/irq-sifive-plic.c34
-rw-r--r--drivers/isdn/hardware/mISDN/avmfritz.c1
-rw-r--r--drivers/isdn/hardware/mISDN/hfcmulti.c1
-rw-r--r--drivers/isdn/hardware/mISDN/hfcpci.c1
-rw-r--r--drivers/isdn/hardware/mISDN/hfcsusb.c1
-rw-r--r--drivers/isdn/hardware/mISDN/mISDNinfineon.c1
-rw-r--r--drivers/isdn/hardware/mISDN/mISDNipac.c1
-rw-r--r--drivers/isdn/hardware/mISDN/mISDNisar.c1
-rw-r--r--drivers/isdn/hardware/mISDN/netjet.c1
-rw-r--r--drivers/isdn/hardware/mISDN/speedfax.c1
-rw-r--r--drivers/isdn/hardware/mISDN/w6692.c1
-rw-r--r--drivers/isdn/mISDN/core.c1
-rw-r--r--drivers/isdn/mISDN/dsp_blowfish.c5
-rw-r--r--drivers/isdn/mISDN/dsp_core.c1
-rw-r--r--drivers/isdn/mISDN/l1oip_core.c1
-rw-r--r--drivers/leds/Kconfig32
-rw-r--r--drivers/leds/Makefile2
-rw-r--r--drivers/leds/blink/leds-bcm63138.c1
-rw-r--r--drivers/leds/flash/Kconfig11
-rw-r--r--drivers/leds/flash/Makefile1
-rw-r--r--drivers/leds/flash/leds-as3645a.c4
-rw-r--r--drivers/leds/flash/leds-mt6360.c5
-rw-r--r--drivers/leds/flash/leds-qcom-flash.c10
-rw-r--r--drivers/leds/flash/leds-rt4505.c1
-rw-r--r--drivers/leds/flash/leds-sy7802.c539
-rw-r--r--drivers/leds/led-class-multicolor.c3
-rw-r--r--drivers/leds/led-class.c16
-rw-r--r--drivers/leds/led-core.c62
-rw-r--r--drivers/leds/led-triggers.c35
-rw-r--r--drivers/leds/leds-an30259a.c4
-rw-r--r--drivers/leds/leds-bd2802.c2
-rw-r--r--drivers/leds/leds-blinkm.c2
-rw-r--r--drivers/leds/leds-cros_ec.c277
-rw-r--r--drivers/leds/leds-is31fl319x.c4
-rw-r--r--drivers/leds/leds-lm3530.c2
-rw-r--r--drivers/leds/leds-lm3532.c2
-rw-r--r--drivers/leds/leds-lm3642.c2
-rw-r--r--drivers/leds/leds-lm3697.c2
-rw-r--r--drivers/leds/leds-lp3944.c2
-rw-r--r--drivers/leds/leds-lp3952.c2
-rw-r--r--drivers/leds/leds-lp5521.c410
-rw-r--r--drivers/leds/leds-lp5523.c763
-rw-r--r--drivers/leds/leds-lp5562.c274
-rw-r--r--drivers/leds/leds-lp5569.c544
-rw-r--r--drivers/leds/leds-lp55xx-common.c760
-rw-r--r--drivers/leds/leds-lp55xx-common.h163
-rw-r--r--drivers/leds/leds-lp8501.c313
-rw-r--r--drivers/leds/leds-lp8860.c2
-rw-r--r--drivers/leds/leds-pca9532.c81
-rw-r--r--drivers/leds/leds-powernv.c28
-rw-r--r--drivers/leds/leds-spi-byte.c63
-rw-r--r--drivers/leds/leds-ss4200.c7
-rw-r--r--drivers/leds/leds-tlc591xx.c18
-rw-r--r--drivers/leds/leds-turris-omnia.c2
-rw-r--r--drivers/leds/leds.h1
-rw-r--r--drivers/leds/rgb/Kconfig1
-rw-r--r--drivers/leds/rgb/leds-ktd202x.c80
-rw-r--r--drivers/leds/rgb/leds-ncp5623.c16
-rw-r--r--drivers/leds/rgb/leds-qcom-lpg.c8
-rw-r--r--drivers/leds/simple/simatic-ipc-leds-gpio-apollolake.c1
-rw-r--r--drivers/leds/simple/simatic-ipc-leds-gpio-core.c1
-rw-r--r--drivers/leds/simple/simatic-ipc-leds-gpio-elkhartlake.c1
-rw-r--r--drivers/leds/simple/simatic-ipc-leds-gpio-f7188x.c1
-rw-r--r--drivers/leds/simple/simatic-ipc-leds.c1
-rw-r--r--drivers/leds/trigger/Kconfig16
-rw-r--r--drivers/leds/trigger/Makefile1
-rw-r--r--drivers/leds/trigger/ledtrig-input-events.c165
-rw-r--r--drivers/leds/trigger/ledtrig-timer.c5
-rw-r--r--drivers/mailbox/zynqmp-ipi-mailbox.c1
-rw-r--r--drivers/md/bcache/alloc.c21
-rw-r--r--drivers/md/bcache/bcache.h1
-rw-r--r--drivers/md/bcache/btree.c7
-rw-r--r--drivers/md/bcache/request.c16
-rw-r--r--drivers/md/bcache/super.c13
-rw-r--r--drivers/md/dm-cache-target.c1
-rw-r--r--drivers/md/dm-clone-target.c1
-rw-r--r--drivers/md/dm-core.h1
-rw-r--r--drivers/md/dm-crypt.c4
-rw-r--r--drivers/md/dm-integrity.c47
-rw-r--r--drivers/md/dm-raid.c2
-rw-r--r--drivers/md/dm-table.c362
-rw-r--r--drivers/md/dm-vdo/dm-vdo-target.c2
-rw-r--r--drivers/md/dm-zone.c288
-rw-r--r--drivers/md/dm-zoned-target.c2
-rw-r--r--drivers/md/dm.c174
-rw-r--r--drivers/md/dm.h7
-rw-r--r--drivers/md/md-bitmap.c6
-rw-r--r--drivers/md/md-cluster.c4
-rw-r--r--drivers/md/md.c630
-rw-r--r--drivers/md/md.h136
-rw-r--r--drivers/md/raid0.c30
-rw-r--r--drivers/md/raid1.c34
-rw-r--r--drivers/md/raid10.c23
-rw-r--r--drivers/md/raid5.c114
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys-queue.c6
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys-video.c2
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys.c73
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6.c5
-rw-r--r--drivers/media/pci/intel/ivsc/Kconfig1
-rw-r--r--drivers/media/pci/intel/ivsc/mei_csi.c5
-rw-r--r--drivers/media/pci/mgb4/mgb4_core.c7
-rw-r--r--drivers/media/pci/saa7134/saa7134-cards.c2
-rw-r--r--drivers/media/platform/qcom/venus/pm_helpers.c39
-rw-r--r--drivers/memory/Kconfig2
-rw-r--r--drivers/memstick/host/Kconfig10
-rw-r--r--drivers/memstick/host/Makefile1
-rw-r--r--drivers/memstick/host/rtsx_pci_ms.c638
-rw-r--r--drivers/mfd/88pm800.c2
-rw-r--r--drivers/mfd/88pm805.c2
-rw-r--r--drivers/mfd/88pm860x-core.c2
-rw-r--r--drivers/mfd/88pm886.c148
-rw-r--r--drivers/mfd/Kconfig56
-rw-r--r--drivers/mfd/Makefile12
-rw-r--r--drivers/mfd/aat2870-core.c2
-rw-r--r--drivers/mfd/act8945a.c2
-rw-r--r--drivers/mfd/arizona-core.c1
-rw-r--r--drivers/mfd/arizona-spi.c9
-rw-r--r--drivers/mfd/as3722.c4
-rw-r--r--drivers/mfd/axp20x-i2c.c24
-rw-r--r--drivers/mfd/axp20x.c1
-rw-r--r--drivers/mfd/bd9571mwv.c2
-rw-r--r--drivers/mfd/cros_ec_dev.c20
-rw-r--r--drivers/mfd/cs40l50-core.c570
-rw-r--r--drivers/mfd/cs40l50-i2c.c68
-rw-r--r--drivers/mfd/cs40l50-spi.c68
-rw-r--r--drivers/mfd/da9055-i2c.c2
-rw-r--r--drivers/mfd/intel-lpss-pci.c162
-rw-r--r--drivers/mfd/intel_soc_pmic_bxtwc.c1
-rw-r--r--drivers/mfd/intel_soc_pmic_crc.c4
-rw-r--r--drivers/mfd/lm3533-core.c28
-rw-r--r--drivers/mfd/lp3943.c2
-rw-r--r--drivers/mfd/lp873x.c4
-rw-r--r--drivers/mfd/lp87565.c4
-rw-r--r--drivers/mfd/lp8788.c2
-rw-r--r--drivers/mfd/madera-spi.c9
-rw-r--r--drivers/mfd/max14577.c2
-rw-r--r--drivers/mfd/max8907.c2
-rw-r--r--drivers/mfd/max8925-i2c.c4
-rw-r--r--drivers/mfd/menelaus.c3
-rw-r--r--drivers/mfd/mfd-core.c6
-rw-r--r--drivers/mfd/mt6397-core.c10
-rw-r--r--drivers/mfd/mxs-lradc.c2
-rw-r--r--drivers/mfd/omap-usb-host.c1
-rw-r--r--drivers/mfd/omap-usb-tll.c7
-rw-r--r--drivers/mfd/pcf50633-gpio.c1
-rw-r--r--drivers/mfd/qcom-pm8008.c170
-rw-r--r--drivers/mfd/retu-mfd.c4
-rw-r--r--drivers/mfd/rohm-bd96801.c273
-rw-r--r--drivers/mfd/rsmu_core.c2
-rw-r--r--drivers/mfd/rt4831.c1
-rw-r--r--drivers/mfd/ssbi.c1
-rw-r--r--drivers/mfd/stw481x.c4
-rw-r--r--drivers/mfd/syscon.c48
-rw-r--r--drivers/mfd/timberdale.c18
-rw-r--r--drivers/mfd/tps6105x.c4
-rw-r--r--drivers/mfd/tps6507x.c2
-rw-r--r--drivers/mfd/tps65086.c2
-rw-r--r--drivers/mfd/tps65090.c4
-rw-r--r--drivers/mfd/tps6586x.c4
-rw-r--r--drivers/mfd/tps65912-core.c21
-rw-r--r--drivers/mfd/tps65912-i2c.c10
-rw-r--r--drivers/mfd/tps65912-spi.c8
-rw-r--r--drivers/mfd/tps6594-core.c2
-rw-r--r--drivers/mfd/twl6040.c6
-rw-r--r--drivers/mfd/vexpress-sysreg.c1
-rw-r--r--drivers/mfd/wl1273-core.c2
-rw-r--r--drivers/mfd/wm8350-i2c.c6
-rw-r--r--drivers/mfd/wm8400-core.c2
-rw-r--r--drivers/mfd/wm8994-core.c2
-rw-r--r--drivers/misc/fastrpc.c41
-rw-r--r--drivers/misc/lkdtm/bugs.c30
-rw-r--r--drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gp.c9
-rw-r--r--drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_otpe2p.c4
-rw-r--r--drivers/misc/mei/main.c2
-rw-r--r--drivers/misc/mei/pci-me.c4
-rw-r--r--drivers/misc/mei/platform-vsc.c43
-rw-r--r--drivers/misc/mei/vsc-fw-loader.c4
-rw-r--r--drivers/misc/mei/vsc-tp.c18
-rw-r--r--drivers/mmc/core/block.c42
-rw-r--r--drivers/mmc/core/core.c1
-rw-r--r--drivers/mmc/core/pwrseq_emmc.c1
-rw-r--r--drivers/mmc/core/pwrseq_sd8787.c1
-rw-r--r--drivers/mmc/core/pwrseq_simple.c1
-rw-r--r--drivers/mmc/core/queue.c20
-rw-r--r--drivers/mmc/core/queue.h3
-rw-r--r--drivers/mmc/core/sdio_uart.c1
-rw-r--r--drivers/mmc/host/Kconfig2
-rw-r--r--drivers/mmc/host/atmel-mci.c35
-rw-r--r--drivers/mmc/host/au1xmmc.c37
-rw-r--r--drivers/mmc/host/cb710-mmc.c14
-rw-r--r--drivers/mmc/host/cb710-mmc.h3
-rw-r--r--drivers/mmc/host/davinci_mmc.c14
-rw-r--r--drivers/mmc/host/dw_mmc-bluefield.c18
-rw-r--r--drivers/mmc/host/dw_mmc.c30
-rw-r--r--drivers/mmc/host/dw_mmc.h11
-rw-r--r--drivers/mmc/host/mmc_spi.c5
-rw-r--r--drivers/mmc/host/moxart-mmc.c78
-rw-r--r--drivers/mmc/host/of_mmc_spi.c1
-rw-r--r--drivers/mmc/host/omap.c17
-rw-r--r--drivers/mmc/host/renesas_sdhi.h4
-rw-r--r--drivers/mmc/host/renesas_sdhi_core.c5
-rw-r--r--drivers/mmc/host/renesas_sdhi_internal_dmac.c31
-rw-r--r--drivers/mmc/host/renesas_sdhi_sys_dmac.c14
-rw-r--r--drivers/mmc/host/sdhci-bcm-kona.c2
-rw-r--r--drivers/mmc/host/sdhci-brcmstb.c64
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c16
-rw-r--r--drivers/mmc/host/sdhci-of-dwcmshc.c1
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c38
-rw-r--r--drivers/mmc/host/sdhci-pci-o2micro.c41
-rw-r--r--drivers/mmc/host/sdhci.c67
-rw-r--r--drivers/mmc/host/sdhci.h3
-rw-r--r--drivers/mmc/host/sdhci_am654.c2
-rw-r--r--drivers/mmc/host/tifm_sd.c15
-rw-r--r--drivers/mmc/host/tmio_mmc.h6
-rw-r--r--drivers/mmc/host/tmio_mmc_core.c16
-rw-r--r--drivers/mmc/host/uniphier-sd.c14
-rw-r--r--drivers/mmc/host/via-sdmmc.c25
-rw-r--r--drivers/mmc/host/wbsd.c74
-rw-r--r--drivers/mmc/host/wbsd.h10
-rw-r--r--drivers/mtd/mtd_blkdevs.c9
-rw-r--r--drivers/mtd/nand/raw/Kconfig3
-rw-r--r--drivers/mtd/nand/raw/nand_base.c66
-rw-r--r--drivers/mtd/nand/raw/rockchip-nand-controller.c6
-rw-r--r--drivers/net/arcnet/com20020-isa.c1
-rw-r--r--drivers/net/bonding/bond_main.c7
-rw-r--r--drivers/net/bonding/bond_options.c6
-rw-r--r--drivers/net/can/Kconfig5
-rw-r--r--drivers/net/can/dev/dev.c2
-rw-r--r--drivers/net/can/kvaser_pciefd.c135
-rw-r--r--drivers/net/can/m_can/m_can.c169
-rw-r--r--drivers/net/can/m_can/m_can.h2
-rw-r--r--drivers/net/can/m_can/m_can_pci.c2
-rw-r--r--drivers/net/can/m_can/m_can_platform.c2
-rw-r--r--drivers/net/can/m_can/tcan4x5x-core.c15
-rw-r--r--drivers/net/can/mscan/mscan.c6
-rw-r--r--drivers/net/can/peak_canfd/peak_canfd.c2
-rw-r--r--drivers/net/can/rcar/rcar_canfd.c41
-rw-r--r--drivers/net/can/sja1000/plx_pci.c3
-rw-r--r--drivers/net/can/spi/hi311x.c7
-rw-r--r--drivers/net/can/spi/mcp251x.c11
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c103
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c2
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c2
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c5
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c165
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c129
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-timestamp.c29
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-tx.c55
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd.h61
-rw-r--r--drivers/net/can/usb/Kconfig3
-rw-r--r--drivers/net/can/usb/gs_usb.c7
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c12
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.c2
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.h2
-rw-r--r--drivers/net/can/xilinx_can.c2
-rw-r--r--drivers/net/dsa/Kconfig3
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek.h8
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c2
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek_hwtstamp.h2
-rw-r--r--drivers/net/dsa/lan9303-core.c23
-rw-r--r--drivers/net/dsa/lan9303_i2c.c2
-rw-r--r--drivers/net/dsa/lan9303_mdio.c8
-rw-r--r--drivers/net/dsa/lantiq_gswip.c123
-rw-r--r--drivers/net/dsa/microchip/ksz9477.c61
-rw-r--r--drivers/net/dsa/microchip/ksz9477.h2
-rw-r--r--drivers/net/dsa/microchip/ksz9477_i2c.c4
-rw-r--r--drivers/net/dsa/microchip/ksz9477_reg.h11
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c25
-rw-r--r--drivers/net/dsa/microchip/ksz_common.h8
-rw-r--r--drivers/net/dsa/microchip/ksz_ptp.c2
-rw-r--r--drivers/net/dsa/microchip/ksz_ptp.h2
-rw-r--r--drivers/net/dsa/microchip/lan937x_main.c32
-rw-r--r--drivers/net/dsa/microchip/lan937x_reg.h5
-rw-r--r--drivers/net/dsa/mt7530.c121
-rw-r--r--drivers/net/dsa/mt7530.h1
-rw-r--r--drivers/net/dsa/mv88e6xxx/hwtstamp.c2
-rw-r--r--drivers/net/dsa/mv88e6xxx/hwtstamp.h4
-rw-r--r--drivers/net/dsa/ocelot/felix.c114
-rw-r--r--drivers/net/dsa/ocelot/felix.h9
-rw-r--r--drivers/net/dsa/ocelot/felix_vsc9959.c112
-rw-r--r--drivers/net/dsa/ocelot/ocelot_ext.c54
-rw-r--r--drivers/net/dsa/ocelot/seville_vsc9953.c60
-rw-r--r--drivers/net/dsa/qca/ar9331.c2
-rw-r--r--drivers/net/dsa/qca/qca8k-8xxx.c2
-rw-r--r--drivers/net/dsa/qca/qca8k-common.c118
-rw-r--r--drivers/net/dsa/qca/qca8k-leds.c12
-rw-r--r--drivers/net/dsa/qca/qca8k.h1
-rw-r--r--drivers/net/dsa/sja1105/sja1105_main.c8
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ptp.c2
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ptp.h2
-rw-r--r--drivers/net/dsa/vitesse-vsc73xx-core.c735
-rw-r--r--drivers/net/dsa/vitesse-vsc73xx.h37
-rw-r--r--drivers/net/dsa/xrs700x/xrs700x_i2c.c4
-rw-r--r--drivers/net/ethernet/8390/ne2k-pci.c11
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/adaptec/starfire.c8
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.c11
-rw-r--r--drivers/net/ethernet/amd/7990.c1
-rw-r--r--drivers/net/ethernet/amd/a2065.c1
-rw-r--r--drivers/net/ethernet/amd/ariadne.c1
-rw-r--r--drivers/net/ethernet/amd/atarilance.c1
-rw-r--r--drivers/net/ethernet/amd/hplance.c1
-rw-r--r--drivers/net/ethernet/amd/lance.c1
-rw-r--r--drivers/net/ethernet/amd/mvme147.c1
-rw-r--r--drivers/net/ethernet/amd/sun3lance.c1
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c2
-rw-r--r--drivers/net/ethernet/arc/Kconfig10
-rw-r--r--drivers/net/ethernet/arc/Makefile1
-rw-r--r--drivers/net/ethernet/arc/emac_arc.c88
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c796
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h110
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c175
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h311
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c159
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h44
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c12
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c2
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_types.h2
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c11
-rw-r--r--drivers/net/ethernet/cadence/macb.h10
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c125
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_ethtool.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c6
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c11
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_droq.c5
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c2
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c21
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c2
-rw-r--r--drivers/net/ethernet/cirrus/mac89x0.c1
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_ethtool.c25
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c12
-rw-r--r--drivers/net/ethernet/cortina/gemini.c56
-rw-r--r--drivers/net/ethernet/engleder/tsnep_ethtool.c2
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c76
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.h20
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c2
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c12
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c2
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ethtool.c2
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c18
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_memac.c16
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c2
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_ethtool.c2
-rw-r--r--drivers/net/ethernet/google/gve/Makefile2
-rw-r--r--drivers/net/ethernet/google/gve/gve.h54
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.c228
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.h103
-rw-r--r--drivers/net/ethernet/google/gve/gve_ethtool.c72
-rw-r--r--drivers/net/ethernet/google/gve/gve_flow_rule.c298
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c83
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx_dqo.c8
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx_dqo.c20
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/Makefile11
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c11
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c14
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c21
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h2
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c18
-rw-r--r--drivers/net/ethernet/intel/Kconfig13
-rw-r--r--drivers/net/ethernet/intel/e100.c1
-rw-r--r--drivers/net/ethernet/intel/e1000/Makefile2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c1
-rw-r--r--drivers/net/ethernet/intel/e1000e/Makefile7
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c55
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c151
-rw-r--r--drivers/net/ethernet/intel/e1000e/ptp.c3
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c1
-rw-r--r--drivers/net/ethernet/intel/i40e/Makefile2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.h4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c268
-rw-r--r--drivers/net/ethernet/intel/iavf/Makefile5
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c1
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/devlink.c159
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/devlink_port.c61
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h44
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adapter.c60
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h63
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_cgu_regs.h77
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c198
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h32
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.c30
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.h15
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ddp.c23
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.c101
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.h20
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch_br.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch_br.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c444
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.h29
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hwmon.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c31
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c164
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.c116
-rw-r--r--drivers/net/ethernet/intel/ice/ice_protocol_type.h43
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c345
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.h10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_consts.h402
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.c3242
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.h295
-rw-r--r--drivers/net/ethernet/intel/ice/ice_repr.c16
-rw-r--r--drivers/net/ethernet/intel/ice/ice_repr.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sbq_cmd.h10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.c34
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.h8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c680
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.h20
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tc_lib.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_trace.h18
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h83
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c11
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c13
-rw-r--r--drivers/net/ethernet/intel/idpf/Kconfig26
-rw-r--r--drivers/net/ethernet/intel/idpf/Makefile3
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf.h11
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_ethtool.c152
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h2
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_lib.c89
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_main.c1
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c306
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.c1424
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.h735
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_virtchnl.c178
-rw-r--r--drivers/net/ethernet/intel/igb/Makefile6
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c5
-rw-r--r--drivers/net/ethernet/intel/igbvf/Makefile6
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c1
-rw-r--r--drivers/net/ethernet/intel/igc/Makefile6
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h1
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ethtool.c15
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c7
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ptp.c6
-rw-r--r--drivers/net/ethernet/intel/ixgbe/Makefile8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/Makefile6
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c1
-rw-r--r--drivers/net/ethernet/intel/libeth/Makefile2
-rw-r--r--drivers/net/ethernet/intel/libeth/rx.c133
-rw-r--r--drivers/net/ethernet/intel/libie/Makefile2
-rw-r--r--drivers/net/ethernet/intel/libie/rx.c1
-rw-r--r--drivers/net/ethernet/lantiq_etop.c5
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c8
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h10
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc.h8
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c68
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c23
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c365
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c16
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c33
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h9
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/Makefile3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c10
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h7
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c7
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c66
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c20
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c26
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h55
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c7
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/qos.c7
-rw-r--r--drivers/net/ethernet/mediatek/Kconfig10
-rw-r--r--drivers/net/ethernet/mediatek/Makefile1
-rw-r--r--drivers/net/ethernet/mediatek/airoha_eth.c2730
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c234
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h17
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe.h2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe_offload.c17
-rw-r--r--drivers/net/ethernet/mediatek/mtk_star_emac.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c61
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c74
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs.h13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/qos.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c189
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h56
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c48
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c234
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c211
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c37
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c37
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/pci_vsc.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wc.c434
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Kconfig1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_env.c57
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_env.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_linecards.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_thermal.c51
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/item.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/minimal.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c344
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/port.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c6
-rw-r--r--drivers/net/ethernet/meta/Kconfig31
-rw-r--r--drivers/net/ethernet/meta/Makefile6
-rw-r--r--drivers/net/ethernet/meta/fbnic/Makefile19
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic.h144
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_csr.h838
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_devlink.c88
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_drvinfo.h5
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_fw.c791
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_fw.h124
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_irq.c208
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_mac.c666
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_mac.h86
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_netdev.c488
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_netdev.h63
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_pci.c564
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_phylink.c161
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_rpc.c651
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_rpc.h189
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_tlv.c529
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_tlv.h175
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_txrx.c1913
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_txrx.h127
-rw-r--r--drivers/net/ethernet/micrel/ks8851_common.c10
-rw-r--r--drivers/net/ethernet/micrel/ks8851_spi.c4
-rw-r--r--drivers/net/ethernet/microchip/encx24j600-regmap.c6
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ethtool.c46
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c48
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.h28
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c10
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c2
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c2
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.c2
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api.h2
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api_debugfs_kunit.c2
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c2
-rw-r--r--drivers/net/ethernet/microsoft/Kconfig2
-rw-r--r--drivers/net/ethernet/microsoft/mana/gdma_main.c10
-rw-r--r--drivers/net/ethernet/microsoft/mana/hw_channel.c14
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_en.c101
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_ethtool.c27
-rw-r--r--drivers/net/ethernet/microsoft/mana/shm_channel.c13
-rw-r--r--drivers/net/ethernet/mscc/ocelot_net.c2
-rw-r--r--drivers/net/ethernet/mscc/ocelot_ptp.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c4
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic.h7
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c8
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_debugfs.c2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.c129
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.h12
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_ethtool.c13
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_if.h237
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c157
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.h12
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_main.c2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c110
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c7
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c2
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ptp.c2
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ptp.h2
-rw-r--r--drivers/net/ethernet/qualcomm/qca_debug.c6
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c16
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.h3
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c12
-rw-r--r--drivers/net/ethernet/renesas/Kconfig11
-rw-r--r--drivers/net/ethernet/renesas/Makefile2
-rw-r--r--drivers/net/ethernet/renesas/ravb.h15
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c524
-rw-r--r--drivers/net/ethernet/renesas/rswitch.c6
-rw-r--r--drivers/net/ethernet/renesas/rtsn.c1391
-rw-r--r--drivers/net/ethernet/renesas/rtsn.h464
-rw-r--r--drivers/net/ethernet/sfc/ef10.c2
-rw-r--r--drivers/net/ethernet/sfc/ef100_ethtool.c4
-rw-r--r--drivers/net/ethernet/sfc/efx.c2
-rw-r--r--drivers/net/ethernet/sfc/efx.h2
-rw-r--r--drivers/net/ethernet/sfc/efx_common.c10
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c6
-rw-r--r--drivers/net/ethernet/sfc/ethtool_common.c168
-rw-r--r--drivers/net/ethernet/sfc/ethtool_common.h12
-rw-r--r--drivers/net/ethernet/sfc/falcon/falcon.c2
-rw-r--r--drivers/net/ethernet/sfc/falcon/nic.h2
-rw-r--r--drivers/net/ethernet/sfc/mcdi_filters.c135
-rw-r--r--drivers/net/ethernet/sfc/mcdi_filters.h8
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h28
-rw-r--r--drivers/net/ethernet/sfc/ptp.c2
-rw-r--r--drivers/net/ethernet/sfc/ptp.h5
-rw-r--r--drivers/net/ethernet/sfc/rx_common.c64
-rw-r--r--drivers/net/ethernet/sfc/rx_common.h8
-rw-r--r--drivers/net/ethernet/sfc/siena/ethtool.c2
-rw-r--r--drivers/net/ethernet/sfc/siena/ptp.c2
-rw-r--r--drivers/net/ethernet/sfc/siena/ptp.h4
-rw-r--r--drivers/net/ethernet/sfc/tc.c5
-rw-r--r--drivers/net/ethernet/smsc/smc9194.c1
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c4
-rw-r--r--drivers/net/ethernet/smsc/smc91x.h4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c31
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c64
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rzn1.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c259
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c91
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c32
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h17
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c55
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-common.c7
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-pci.c5
-rw-r--r--drivers/net/ethernet/tehuti/Kconfig15
-rw-r--r--drivers/net/ethernet/tehuti/Makefile3
-rw-r--r--drivers/net/ethernet/tehuti/tn40.c1850
-rw-r--r--drivers/net/ethernet/tehuti/tn40.h233
-rw-r--r--drivers/net/ethernet/tehuti/tn40_mdio.c142
-rw-r--r--drivers/net/ethernet/tehuti/tn40_phy.c76
-rw-r--r--drivers/net/ethernet/tehuti/tn40_regs.h245
-rw-r--r--drivers/net/ethernet/ti/Kconfig2
-rw-r--r--drivers/net/ethernet/ti/Makefile31
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-ethtool.c2
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c11
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.h2
-rw-r--r--drivers/net/ethernet/ti/cpsw_ethtool.c4
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.h2
-rw-r--r--drivers/net/ethernet/ti/icssg/icss_iep.c92
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_classifier.c8
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_common.c56
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_config.c337
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_config.h26
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_ethtool.c3
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_mii_cfg.c4
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.c316
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.h58
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c65
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_queues.c2
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_stats.c3
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_switchdev.c477
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_switchdev.h13
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c4
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_ethtool.c39
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_hw.c33
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_hw.h2
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_lib.c72
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_lib.h1
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_type.h57
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c4
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_main.c4
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/Makefile1
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c427
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_fdir.c643
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_fdir.h20
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c124
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_irq.h2
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_main.c27
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_type.h147
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c7
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c2
-rw-r--r--drivers/net/fjes/fjes_trace.h2
-rw-r--r--drivers/net/geneve.c10
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c4
-rw-r--r--drivers/net/macvlan.c2
-rw-r--r--drivers/net/mctp/mctp-i2c.c45
-rw-r--r--drivers/net/mdio/mdio-mscc-miim.c8
-rw-r--r--drivers/net/netconsole.c9
-rw-r--r--drivers/net/netdevsim/ethtool.c2
-rw-r--r--drivers/net/netdevsim/netdev.c3
-rw-r--r--drivers/net/netkit.c30
-rw-r--r--drivers/net/ntb_netdev.c2
-rw-r--r--drivers/net/pcs/Kconfig6
-rw-r--r--drivers/net/pcs/Makefile3
-rw-r--r--drivers/net/pcs/pcs-xpcs-plat.c460
-rw-r--r--drivers/net/pcs/pcs-xpcs.c361
-rw-r--r--drivers/net/pcs/pcs-xpcs.h7
-rw-r--r--drivers/net/phy/aquantia/Makefile2
-rw-r--r--drivers/net/phy/aquantia/aquantia.h84
-rw-r--r--drivers/net/phy/aquantia/aquantia_firmware.c4
-rw-r--r--drivers/net/phy/aquantia/aquantia_leds.c150
-rw-r--r--drivers/net/phy/aquantia/aquantia_main.c140
-rw-r--r--drivers/net/phy/bcm-phy-lib.c115
-rw-r--r--drivers/net/phy/bcm-phy-lib.h4
-rw-r--r--drivers/net/phy/bcm-phy-ptp.c5
-rw-r--r--drivers/net/phy/broadcom.c403
-rw-r--r--drivers/net/phy/dp83640.c4
-rw-r--r--drivers/net/phy/dp83td510.c264
-rw-r--r--drivers/net/phy/dp83tg720.c38
-rw-r--r--drivers/net/phy/micrel.c126
-rw-r--r--drivers/net/phy/microchip.c126
-rw-r--r--drivers/net/phy/microchip_t1.c2
-rw-r--r--drivers/net/phy/mscc/mscc_ptp.c5
-rw-r--r--drivers/net/phy/mxl-gpy.c58
-rw-r--r--drivers/net/phy/nxp-c45-tja11xx.c5
-rw-r--r--drivers/net/phy/phy-core.c4
-rw-r--r--drivers/net/phy/phy.c2
-rw-r--r--drivers/net/phy/phy_device.c9
-rw-r--r--drivers/net/phy/phylink.c22
-rw-r--r--drivers/net/phy/realtek.c114
-rw-r--r--drivers/net/phy/sfp.c3
-rw-r--r--drivers/net/phy/xilinx_gmii2rgmii.c7
-rw-r--r--drivers/net/ppp/ppp_generic.c15
-rw-r--r--drivers/net/pse-pd/Kconfig1
-rw-r--r--drivers/net/pse-pd/pd692x0.c321
-rw-r--r--drivers/net/pse-pd/pse_core.c176
-rw-r--r--drivers/net/pse-pd/tps23881.c4
-rw-r--r--drivers/net/tun.c7
-rw-r--r--drivers/net/usb/ax88179_178a.c24
-rw-r--r--drivers/net/usb/cdc_ncm.c47
-rw-r--r--drivers/net/usb/lan78xx.c12
-rw-r--r--drivers/net/usb/qmi_wwan.c2
-rw-r--r--drivers/net/usb/r8152.c21
-rw-r--r--drivers/net/usb/rtl8150.c3
-rw-r--r--drivers/net/usb/smsc75xx.c5
-rw-r--r--drivers/net/usb/smsc95xx.c11
-rw-r--r--drivers/net/virtio_net.c982
-rw-r--r--drivers/net/vmxnet3/Makefile2
-rw-r--r--drivers/net/vmxnet3/vmxnet3_defs.h61
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c219
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c2
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h33
-rw-r--r--drivers/net/vrf.c56
-rw-r--r--drivers/net/vxlan/vxlan_core.c17
-rw-r--r--drivers/net/wireguard/allowedips.c4
-rw-r--r--drivers/net/wireguard/queueing.h4
-rw-r--r--drivers/net/wireguard/send.c2
-rw-r--r--drivers/net/wireless/admtek/adm8211.c2
-rw-r--r--drivers/net/wireless/ath/ar5523/ar5523.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/Kconfig7
-rw-r--r--drivers/net/wireless/ath/ath10k/Makefile1
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c32
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h8
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/leds.c90
-rw-r--r--drivers/net/wireless/ath/ath10k/leds.h34
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c3
-rw-r--r--drivers/net/wireless/ath/ath10k/qmi.c11
-rw-r--r--drivers/net/wireless/ath/ath10k/qmi.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-ops.h32
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c54
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h35
-rw-r--r--drivers/net/wireless/ath/ath11k/ahb.c57
-rw-r--r--drivers/net/wireless/ath/ath11k/ce.h6
-rw-r--r--drivers/net/wireless/ath/ath11k/core.c51
-rw-r--r--drivers/net/wireless/ath/ath11k/core.h9
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs.c6
-rw-r--r--drivers/net/wireless/ath/ath11k/dp.c12
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_rx.c107
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_rx.h3
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_tx.c22
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_tx.h4
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.c16
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.h2
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_tx.h4
-rw-r--r--drivers/net/wireless/ath/ath11k/hw.h4
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c236
-rw-r--r--drivers/net/wireless/ath/ath11k/pcic.c25
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.c22
-rw-r--r--drivers/net/wireless/ath/ath11k/reg.c18
-rw-r--r--drivers/net/wireless/ath/ath11k/reg.h4
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.c2
-rw-r--r--drivers/net/wireless/ath/ath12k/Makefile3
-rw-r--r--drivers/net/wireless/ath/ath12k/acpi.c2
-rw-r--r--drivers/net/wireless/ath/ath12k/ce.h6
-rw-r--r--drivers/net/wireless/ath/ath12k/core.c209
-rw-r--r--drivers/net/wireless/ath/ath12k/core.h74
-rw-r--r--drivers/net/wireless/ath/ath12k/debug.h3
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs.c19
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs.h6
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c1540
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h567
-rw-r--r--drivers/net/wireless/ath/ath12k/dp.c83
-rw-r--r--drivers/net/wireless/ath/ath12k/dp.h5
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_mon.c40
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_rx.c169
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_rx.h4
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_tx.c169
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_tx.h5
-rw-r--r--drivers/net/wireless/ath/ath12k/hal.c5
-rw-r--r--drivers/net/wireless/ath/ath12k/hal.h21
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_desc.h73
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_tx.h4
-rw-r--r--drivers/net/wireless/ath/ath12k/hif.h9
-rw-r--r--drivers/net/wireless/ath/ath12k/htc.c6
-rw-r--r--drivers/net/wireless/ath/ath12k/hw.c23
-rw-r--r--drivers/net/wireless/ath/ath12k/hw.h6
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.c852
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.h5
-rw-r--r--drivers/net/wireless/ath/ath12k/mhi.c11
-rw-r--r--drivers/net/wireless/ath/ath12k/pci.c39
-rw-r--r--drivers/net/wireless/ath/ath12k/pci.h1
-rw-r--r--drivers/net/wireless/ath/ath12k/qmi.c8
-rw-r--r--drivers/net/wireless/ath/ath12k/reg.c19
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.c783
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.h632
-rw-r--r--drivers/net/wireless/ath/ath12k/wow.c1026
-rw-r--r--drivers/net/wireless/ath/ath12k/wow.h62
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c2
-rw-r--r--drivers/net/wireless/ath/ath5k/base.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c2
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c2
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c2
-rw-r--r--drivers/net/wireless/ath/wil6210/netdev.c21
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h2
-rw-r--r--drivers/net/wireless/atmel/at76c50x-usb.c58
-rw-r--r--drivers/net/wireless/atmel/at76c50x-usb.h2
-rw-r--r--drivers/net/wireless/broadcom/b43/main.c2
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/main.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c10
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/aiutils.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/antsel.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/channel.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c29
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c24
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.c4
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_tx.c3
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945-mac.c2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945.c2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-mac.c4
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Makefile3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/22000.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/ax210.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/bz.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/sc.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/Makefile2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/agn.h21
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/commands.h8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/dev.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/devices.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/eeprom.c (renamed from drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c)448
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/main.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rs.c21
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/tt.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/alive.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/binding.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/coex.h69
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/config.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/d3.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h27
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h43
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/debug.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/location.h159
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h62
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/offload.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/phy.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/power.h19
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rs.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rx.h12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/scan.h16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/tx.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/debugfs.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/error-dump.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/init.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/regulatory.c19
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/regulatory.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/runtime.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-csr.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c19
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c18
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c394
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.h12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-fh.h34
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-io.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-modparams.h21
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c42
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-utils.c118
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-utils.h (renamed from drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h)17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-prph.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.c448
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h657
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mei/iwl-mei.h11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/constants.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c101
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c38
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c240
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c259
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c67
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/link.c69
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c475
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c149
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h121
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/nvm.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c90
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/power.c129
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.h28
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rx.c18
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c33
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c18
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c53
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tdls.c34
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tests/links.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.c203
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tt.c64
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c40
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/utils.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c86
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h292
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c295
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c1185
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c1222
-rw-r--r--drivers/net/wireless/intel/iwlwifi/queue/tx.c1900
-rw-r--r--drivers/net/wireless/intel/iwlwifi/queue/tx.h191
-rw-r--r--drivers/net/wireless/intersil/p54/fwio.c6
-rw-r--r--drivers/net/wireless/intersil/p54/main.c2
-rw-r--r--drivers/net/wireless/intersil/p54/p54pci.c8
-rw-r--r--drivers/net/wireless/intersil/p54/p54spi.c10
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/main.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.h3
-rw-r--r--drivers/net/wireless/marvell/mwl8k.c14
-rw-r--r--drivers/net/wireless/mediatek/mt76/debugfs.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.c31
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.h9
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h12
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/dma.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/main.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/dma.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/main.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mcu.c10
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/usb.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c58
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h30
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/pci.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/usb.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/dma.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/main.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mac.c66
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/main.c147
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mcu.c46
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/pci.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/init.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mac.c141
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/main.c1077
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mcu.c981
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mcu.h65
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h31
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/pci.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/pci_mac.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x.h109
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_core.c111
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_dma.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_mac.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_usb.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/dma.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/main.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mcu.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/pci.c23
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/main.c2
-rw-r--r--drivers/net/wireless/microchip/wilc1000/cfg80211.c70
-rw-r--r--drivers/net/wireless/microchip/wilc1000/fw.h13
-rw-r--r--drivers/net/wireless/microchip/wilc1000/hif.c24
-rw-r--r--drivers/net/wireless/microchip/wilc1000/hif.h2
-rw-r--r--drivers/net/wireless/microchip/wilc1000/netdev.c115
-rw-r--r--drivers/net/wireless/microchip/wilc1000/netdev.h15
-rw-r--r--drivers/net/wireless/microchip/wilc1000/sdio.c145
-rw-r--r--drivers/net/wireless/microchip/wilc1000/spi.c17
-rw-r--r--drivers/net/wireless/microchip/wilc1000/wlan.c62
-rw-r--r--drivers/net/wireless/microchip/wilc1000/wlan.h2
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/mac.c2
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/mac.h2
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/usb.c4
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00.h12
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00mac.c2
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c2
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c2
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/8188f.c15
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/core.c8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/Kconfig12
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/Makefile1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/core.c19
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192d/hw_common.c94
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192d/hw_common.h28
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192d/trx_common.c92
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192d/trx_common.h16
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c18
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c22
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/Makefile13
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/dm.c120
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/dm.h10
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/fw.c63
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/fw.h9
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/hw.c1212
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/hw.h22
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/led.c10
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/led.h9
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/phy.c3123
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/phy.h32
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/rf.c240
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/rf.h11
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/sw.c395
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/table.c1675
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/table.h29
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/trx.c372
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/trx.h60
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/usb.c36
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/usb.h2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/wifi.h12
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac.c9
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac80211.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.h2
-rw-r--r--drivers/net/wireless/realtek/rtw88/pci.c17
-rw-r--r--drivers/net/wireless/realtek/rtw88/pci.h2
-rw-r--r--drivers/net/wireless/realtek/rtw88/reg.h1
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8703b.c1
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723d.c1
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821c.c1
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b.c1
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c.c1
-rw-r--r--drivers/net/wireless/realtek/rtw88/usb.c31
-rw-r--r--drivers/net/wireless/realtek/rtw89/Kconfig4
-rw-r--r--drivers/net/wireless/realtek/rtw89/Makefile6
-rw-r--r--drivers/net/wireless/realtek/rtw89/cam.c80
-rw-r--r--drivers/net/wireless/realtek/rtw89/chan.c27
-rw-r--r--drivers/net/wireless/realtek/rtw89/chan.h4
-rw-r--r--drivers/net/wireless/realtek/rtw89/coex.c29
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.c128
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.h58
-rw-r--r--drivers/net/wireless/realtek/rtw89/debug.c45
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.c145
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.h10
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.c124
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.h11
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac80211.c45
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac_be.c20
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci.c165
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci.h24
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.c109
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.h17
-rw-r--r--drivers/net/wireless/realtek/rtw89/reg.h56
-rw-r--r--drivers/net/wireless/realtek/rtw89/regd.c190
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b.c7
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851be.c1
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a.c7
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852ae.c1
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b.c1873
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b.h122
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b_common.c2053
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b_common.h388
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c21
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852be.c1
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bt.h13
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.c4019
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.h22
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk_table.c490
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk_table.h38
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c.c7
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c32
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852ce.c1
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922a.c7
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.c17
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922ae.c1
-rw-r--r--drivers/net/wireless/realtek/rtw89/ser.c8
-rw-r--r--drivers/net/wireless/realtek/rtw89/txrx.h4
-rw-r--r--drivers/net/wireless/realtek/rtw89/util.c106
-rw-r--r--drivers/net/wireless/realtek/rtw89/util.h5
-rw-r--r--drivers/net/wireless/realtek/rtw89/wow.c33
-rw-r--r--drivers/net/wireless/realtek/rtw89/wow.h30
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mac80211.c3
-rw-r--r--drivers/net/wireless/silabs/wfx/sta.c2
-rw-r--r--drivers/net/wireless/silabs/wfx/sta.h2
-rw-r--r--drivers/net/wireless/st/cw1200/sta.c2
-rw-r--r--drivers/net/wireless/st/cw1200/sta.h2
-rw-r--r--drivers/net/wireless/ti/wl1251/main.c2
-rw-r--r--drivers/net/wireless/ti/wl18xx/main.c71
-rw-r--r--drivers/net/wireless/ti/wl18xx/tx.c13
-rw-r--r--drivers/net/wireless/ti/wl18xx/wl18xx.h62
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.c16
-rw-r--r--drivers/net/wireless/ti/wlcore/event.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c120
-rw-r--r--drivers/net/wireless/ti/wlcore/tx.c7
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore_i.h10
-rw-r--r--drivers/net/wireless/virtual/mac80211_hwsim.c79
-rw-r--r--drivers/net/wireless/virtual/mac80211_hwsim.h8
-rw-r--r--drivers/net/wireless/virtual/virt_wifi.c20
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_mac.c2
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_mac.h2
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_usb.c10
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_devlink.c2
-rw-r--r--drivers/nfc/microread/i2c.c2
-rw-r--r--drivers/nfc/nfcmrvl/i2c.c2
-rw-r--r--drivers/nfc/nxp-nci/i2c.c2
-rw-r--r--drivers/nfc/pn533/i2c.c2
-rw-r--r--drivers/nfc/pn544/i2c.c2
-rw-r--r--drivers/nfc/s3fwrn5/i2c.c2
-rw-r--r--drivers/nfc/st-nci/i2c.c2
-rw-r--r--drivers/nfc/st21nfca/i2c.c2
-rw-r--r--drivers/nfc/virtual_ncidev.c4
-rw-r--r--drivers/nvdimm/btt.c17
-rw-r--r--drivers/nvdimm/pmem.c14
-rw-r--r--drivers/nvme/host/Kconfig1
-rw-r--r--drivers/nvme/host/apple.c33
-rw-r--r--drivers/nvme/host/constants.c2
-rw-r--r--drivers/nvme/host/core.c404
-rw-r--r--drivers/nvme/host/fabrics.c31
-rw-r--r--drivers/nvme/host/fabrics.h1
-rw-r--r--drivers/nvme/host/fault_inject.c2
-rw-r--r--drivers/nvme/host/fc.c55
-rw-r--r--drivers/nvme/host/ioctl.c30
-rw-r--r--drivers/nvme/host/multipath.c170
-rw-r--r--drivers/nvme/host/nvme.h37
-rw-r--r--drivers/nvme/host/pci.c50
-rw-r--r--drivers/nvme/host/pr.c10
-rw-r--r--drivers/nvme/host/rdma.c34
-rw-r--r--drivers/nvme/host/tcp.c31
-rw-r--r--drivers/nvme/host/zns.c3
-rw-r--r--drivers/nvme/target/Kconfig10
-rw-r--r--drivers/nvme/target/Makefile1
-rw-r--r--drivers/nvme/target/admin-cmd.c24
-rw-r--r--drivers/nvme/target/auth.c14
-rw-r--r--drivers/nvme/target/configfs.c49
-rw-r--r--drivers/nvme/target/core.c86
-rw-r--r--drivers/nvme/target/debugfs.c202
-rw-r--r--drivers/nvme/target/debugfs.h42
-rw-r--r--drivers/nvme/target/discovery.c14
-rw-r--r--drivers/nvme/target/fabrics-cmd-auth.c19
-rw-r--r--drivers/nvme/target/fabrics-cmd.c42
-rw-r--r--drivers/nvme/target/fc.c35
-rw-r--r--drivers/nvme/target/fcloop.c11
-rw-r--r--drivers/nvme/target/io-cmd-bdev.c28
-rw-r--r--drivers/nvme/target/loop.c5
-rw-r--r--drivers/nvme/target/nvmet.h12
-rw-r--r--drivers/nvme/target/passthru.c16
-rw-r--r--drivers/nvme/target/rdma.c22
-rw-r--r--drivers/nvme/target/tcp.c18
-rw-r--r--drivers/nvme/target/zns.c30
-rw-r--r--drivers/nvmem/core.c7
-rw-r--r--drivers/nvmem/meson-efuse.c14
-rw-r--r--drivers/nvmem/rmem.c5
-rw-r--r--drivers/of/irq.c143
-rw-r--r--drivers/of/of_private.h3
-rw-r--r--drivers/of/of_test.c1
-rw-r--r--drivers/of/property.c30
-rw-r--r--drivers/opp/core.c15
-rw-r--r--drivers/opp/of.c32
-rw-r--r--drivers/opp/ti-opp-supply.c6
-rw-r--r--drivers/parport/parport_amiga.c8
-rw-r--r--drivers/pci/Kconfig1
-rw-r--r--drivers/pci/Makefile1
-rw-r--r--drivers/pci/access.c4
-rw-r--r--drivers/pci/bus.c9
-rw-r--r--drivers/pci/msi/msi.c10
-rw-r--r--drivers/pci/of.c14
-rw-r--r--drivers/pci/pci.c1
-rw-r--r--drivers/pci/probe.c5
-rw-r--r--drivers/pci/pwrctl/Kconfig17
-rw-r--r--drivers/pci/pwrctl/Makefile6
-rw-r--r--drivers/pci/pwrctl/core.c137
-rw-r--r--drivers/pci/pwrctl/pci-pwrctl-pwrseq.c89
-rw-r--r--drivers/pci/remove.c3
-rw-r--r--drivers/perf/Kconfig12
-rw-r--r--drivers/perf/Makefile3
-rw-r--r--drivers/perf/arm-ccn.c1
-rw-r--r--drivers/perf/arm-cmn.c116
-rw-r--r--drivers/perf/arm_cspmu/ampere_cspmu.c1
-rw-r--r--drivers/perf/arm_cspmu/arm_cspmu.c1
-rw-r--r--drivers/perf/arm_cspmu/nvidia_cspmu.c1
-rw-r--r--drivers/perf/arm_pmuv3.c26
-rw-r--r--drivers/perf/arm_v6_pmu.c (renamed from arch/arm/kernel/perf_event_v6.c)20
-rw-r--r--drivers/perf/arm_v7_pmu.c (renamed from arch/arm/kernel/perf_event_v7.c)13
-rw-r--r--drivers/perf/arm_xscale_pmu.c (renamed from arch/arm/kernel/perf_event_xscale.c)3
-rw-r--r--drivers/perf/cxl_pmu.c1
-rw-r--r--drivers/perf/fsl_imx8_ddr_perf.c1
-rw-r--r--drivers/perf/fsl_imx9_ddr_perf.c352
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_pmu.c1
-rw-r--r--drivers/perf/marvell_cn10k_ddr_pmu.c1
-rw-r--r--drivers/perf/riscv_pmu.c2
-rw-r--r--drivers/perf/riscv_pmu_sbi.c44
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-combo.c189
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-v6-n4.h32
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v6_n4.h13
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp.h2
-rw-r--r--drivers/phy/renesas/phy-rcar-gen3-usb2.c8
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm2835.c2
-rw-r--r--drivers/pinctrl/core.c2
-rw-r--r--drivers/pinctrl/pinctrl-da9062.c2
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.c68
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.h1
-rw-r--r--drivers/pinctrl/pinctrl-tps6594.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-gpio.c1
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rzg2l.c4
-rw-r--r--drivers/platform/Kconfig2
-rw-r--r--drivers/platform/Makefile3
-rw-r--r--drivers/platform/arm64/Kconfig17
-rw-r--r--drivers/platform/arm64/Makefile1
-rw-r--r--drivers/platform/arm64/lenovo-yoga-c630.c291
-rw-r--r--drivers/platform/chrome/Kconfig2
-rw-r--r--drivers/platform/chrome/cros_ec.c4
-rw-r--r--drivers/platform/chrome/cros_ec_debugfs.c9
-rw-r--r--drivers/platform/chrome/cros_ec_lpc.c210
-rw-r--r--drivers/platform/chrome/cros_ec_lpc_mec.c91
-rw-r--r--drivers/platform/chrome/cros_ec_lpc_mec.h18
-rw-r--r--drivers/platform/chrome/cros_ec_proto.c95
-rw-r--r--drivers/platform/chrome/cros_ec_proto_test.c9
-rw-r--r--drivers/platform/chrome/cros_kbd_led_backlight.c40
-rw-r--r--drivers/platform/chrome/wilco_ec/mailbox.c22
-rw-r--r--drivers/platform/cznic/Kconfig50
-rw-r--r--drivers/platform/cznic/Makefile8
-rw-r--r--drivers/platform/cznic/turris-omnia-mcu-base.c408
-rw-r--r--drivers/platform/cznic/turris-omnia-mcu-gpio.c1095
-rw-r--r--drivers/platform/cznic/turris-omnia-mcu-sys-off-wakeup.c260
-rw-r--r--drivers/platform/cznic/turris-omnia-mcu-trng.c103
-rw-r--r--drivers/platform/cznic/turris-omnia-mcu-watchdog.c130
-rw-r--r--drivers/platform/cznic/turris-omnia-mcu.h194
-rw-r--r--drivers/platform/mellanox/nvsw-sn2201.c5
-rw-r--r--drivers/platform/x86/Kconfig2
-rw-r--r--drivers/platform/x86/amd/hsmp.c50
-rw-r--r--drivers/platform/x86/amd/pmf/pmf.h2
-rw-r--r--drivers/platform/x86/amd/pmf/tee-if.c73
-rw-r--r--drivers/platform/x86/amilo-rfkill.c1
-rw-r--r--drivers/platform/x86/asus-tf103c-dock.c10
-rw-r--r--drivers/platform/x86/asus-wmi.c41
-rw-r--r--drivers/platform/x86/dell/Kconfig13
-rw-r--r--drivers/platform/x86/dell/Makefile1
-rw-r--r--drivers/platform/x86/dell/dell-laptop.c23
-rw-r--r--drivers/platform/x86/dell/dell-pc.c309
-rw-r--r--drivers/platform/x86/dell/dell-smbios-base.c138
-rw-r--r--drivers/platform/x86/dell/dell-smbios.h7
-rw-r--r--drivers/platform/x86/firmware_attributes_class.c1
-rw-r--r--drivers/platform/x86/hp/Kconfig1
-rw-r--r--drivers/platform/x86/hp/hp-bioscfg/enum-attributes.c18
-rw-r--r--drivers/platform/x86/hp/hp-bioscfg/int-attributes.c7
-rw-r--r--drivers/platform/x86/hp/hp-bioscfg/order-list-attributes.c18
-rw-r--r--drivers/platform/x86/hp/hp-bioscfg/passwdobj-attributes.c19
-rw-r--r--drivers/platform/x86/hp/hp-bioscfg/spmobj-attributes.c3
-rw-r--r--drivers/platform/x86/hp/hp-bioscfg/string-attributes.c12
-rw-r--r--drivers/platform/x86/hp/hp-wmi.c188
-rw-r--r--drivers/platform/x86/ibm_rtl.c1
-rw-r--r--drivers/platform/x86/ideapad-laptop.c71
-rw-r--r--drivers/platform/x86/intel/Kconfig11
-rw-r--r--drivers/platform/x86/intel/Makefile4
-rw-r--r--drivers/platform/x86/intel/chtwc_int33fe.c6
-rw-r--r--drivers/platform/x86/intel/hid.c1
-rw-r--r--drivers/platform/x86/intel/ifs/core.c15
-rw-r--r--drivers/platform/x86/intel/intel_plr_tpmi.c354
-rw-r--r--drivers/platform/x86/intel/pmc/core.c262
-rw-r--r--drivers/platform/x86/intel/pmc/pltdrv.c17
-rw-r--r--drivers/platform/x86/intel/rst.c1
-rw-r--r--drivers/platform/x86/intel/smartconnect.c1
-rw-r--r--drivers/platform/x86/intel/speed_select_if/isst_if_common.c75
-rw-r--r--drivers/platform/x86/intel/speed_select_if/isst_if_common.h3
-rw-r--r--drivers/platform/x86/intel/speed_select_if/isst_if_mbox_msr.c4
-rw-r--r--drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c2
-rw-r--r--drivers/platform/x86/intel/telemetry/debugfs.c4
-rw-r--r--drivers/platform/x86/intel/telemetry/pltdrv.c4
-rw-r--r--drivers/platform/x86/intel/tpmi.c11
-rw-r--r--drivers/platform/x86/intel/tpmi_power_domains.c235
-rw-r--r--drivers/platform/x86/intel/tpmi_power_domains.h18
-rw-r--r--drivers/platform/x86/intel/turbo_max_3.c4
-rw-r--r--drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c83
-rw-r--r--drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h13
-rw-r--r--drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c101
-rw-r--r--drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c108
-rw-r--r--drivers/platform/x86/intel/vbtn.c1
-rw-r--r--drivers/platform/x86/intel_ips.c3
-rw-r--r--drivers/platform/x86/intel_scu_wdt.c2
-rw-r--r--drivers/platform/x86/lg-laptop.c89
-rw-r--r--drivers/platform/x86/p2sb.c2
-rw-r--r--drivers/platform/x86/serial-multi-instantiate.c4
-rw-r--r--drivers/platform/x86/siemens/simatic-ipc-batt-apollolake.c1
-rw-r--r--drivers/platform/x86/siemens/simatic-ipc-batt-elkhartlake.c1
-rw-r--r--drivers/platform/x86/siemens/simatic-ipc-batt-f7188x.c1
-rw-r--r--drivers/platform/x86/siemens/simatic-ipc-batt.c1
-rw-r--r--drivers/platform/x86/siemens/simatic-ipc.c1
-rw-r--r--drivers/platform/x86/think-lmi.c4
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c6
-rw-r--r--drivers/platform/x86/toshiba_acpi.c32
-rw-r--r--drivers/platform/x86/touchscreen_dmi.c170
-rw-r--r--drivers/platform/x86/uv_sysfs.c1
-rw-r--r--drivers/platform/x86/wireless-hotkey.c3
-rw-r--r--drivers/platform/x86/wmi.c33
-rw-r--r--drivers/platform/x86/x86-android-tablets/Kconfig2
-rw-r--r--drivers/platform/x86/xo1-rfkill.c1
-rw-r--r--drivers/pmdomain/amlogic/meson-ee-pwrc.c1
-rw-r--r--drivers/pmdomain/amlogic/meson-gx-pwrc-vpu.c1
-rw-r--r--drivers/pmdomain/amlogic/meson-secure-pwrc.c65
-rw-r--r--drivers/pmdomain/arm/scmi_pm_domain.c1
-rw-r--r--drivers/pmdomain/core.c80
-rw-r--r--drivers/pmdomain/imx/gpcv2.c11
-rw-r--r--drivers/pmdomain/qcom/rpmhpd.c7
-rw-r--r--drivers/pmdomain/renesas/rmobile-sysc.c8
-rw-r--r--drivers/pnp/base.h1
-rw-r--r--drivers/pnp/driver.c6
-rw-r--r--drivers/power/Kconfig1
-rw-r--r--drivers/power/Makefile1
-rw-r--r--drivers/power/sequencing/Kconfig29
-rw-r--r--drivers/power/sequencing/Makefile6
-rw-r--r--drivers/power/sequencing/core.c1105
-rw-r--r--drivers/power/sequencing/pwrseq-qcom-wcn.c336
-rw-r--r--drivers/power/supply/Kconfig12
-rw-r--r--drivers/power/supply/Makefile1
-rw-r--r--drivers/power/supply/cros_charge-control.c352
-rw-r--r--drivers/power/supply/power_supply_leds.c23
-rw-r--r--drivers/powercap/idle_inject.c2
-rw-r--r--drivers/powercap/intel_rapl_common.c120
-rw-r--r--drivers/powercap/intel_rapl_msr.c16
-rw-r--r--drivers/ptp/ptp_chardev.c3
-rw-r--r--drivers/ptp/ptp_ines.c2
-rw-r--r--drivers/ptp/ptp_sysfs.c3
-rw-r--r--drivers/ptp/ptp_vmw.c12
-rw-r--r--drivers/pwm/Kconfig24
-rw-r--r--drivers/pwm/Makefile2
-rw-r--r--drivers/pwm/core.c181
-rw-r--r--drivers/pwm/pwm-atmel-tcb.c113
-rw-r--r--drivers/pwm/pwm-axi-pwmgen.c242
-rw-r--r--drivers/pwm/pwm-cros-ec.c64
-rw-r--r--drivers/pwm/pwm-gpio.c241
-rw-r--r--drivers/pwm/pwm-imx-tpm.c16
-rw-r--r--drivers/pwm/pwm-imx1.c1
-rw-r--r--drivers/pwm/pwm-imx27.c1
-rw-r--r--drivers/pwm/pwm-intel-lgm.c1
-rw-r--r--drivers/pwm/pwm-jz4740.c9
-rw-r--r--drivers/pwm/pwm-lpss-pci.c22
-rw-r--r--drivers/pwm/pwm-lpss-platform.c10
-rw-r--r--drivers/pwm/pwm-mediatek.c1
-rw-r--r--drivers/pwm/pwm-meson.c39
-rw-r--r--drivers/pwm/pwm-pxa.c1
-rw-r--r--drivers/pwm/pwm-samsung.c1
-rw-r--r--drivers/pwm/pwm-spear.c1
-rw-r--r--drivers/pwm/pwm-stm32.c50
-rw-r--r--drivers/pwm/pwm-visconti.c1
-rw-r--r--drivers/pwm/pwm-xilinx.c27
-rw-r--r--drivers/ras/amd/atl/core.c52
-rw-r--r--drivers/ras/amd/atl/dehash.c43
-rw-r--r--drivers/ras/amd/atl/denormalize.c561
-rw-r--r--drivers/ras/amd/atl/internal.h50
-rw-r--r--drivers/ras/amd/atl/map.c97
-rw-r--r--drivers/ras/amd/atl/system.c23
-rw-r--r--drivers/ras/amd/atl/umc.c160
-rw-r--r--drivers/ras/amd/fmpm.c4
-rw-r--r--drivers/regulator/88pm886-regulator.c392
-rw-r--r--drivers/regulator/Kconfig34
-rw-r--r--drivers/regulator/Makefile4
-rw-r--r--drivers/regulator/axp20x-regulator.c33
-rw-r--r--drivers/regulator/bd71815-regulator.c2
-rw-r--r--drivers/regulator/bd96801-regulator.c908
-rw-r--r--drivers/regulator/core.c29
-rw-r--r--drivers/regulator/da9121-regulator.c7
-rw-r--r--drivers/regulator/da9210-regulator.c4
-rw-r--r--drivers/regulator/lp3971.c2
-rw-r--r--drivers/regulator/lp3972.c2
-rw-r--r--drivers/regulator/lp8755.c2
-rw-r--r--drivers/regulator/max1586.c2
-rw-r--r--drivers/regulator/max20411-regulator.c5
-rw-r--r--drivers/regulator/max77503-regulator.c8
-rw-r--r--drivers/regulator/max77857-regulator.c2
-rw-r--r--drivers/regulator/max8649.c2
-rw-r--r--drivers/regulator/max8893.c4
-rw-r--r--drivers/regulator/max8952.c4
-rw-r--r--drivers/regulator/mcp16502.c2
-rw-r--r--drivers/regulator/mt6311-regulator.c4
-rw-r--r--drivers/regulator/mtk-dvfsrc-regulator.c248
-rw-r--r--drivers/regulator/pca9450-regulator.c41
-rw-r--r--drivers/regulator/pf8x00-regulator.c8
-rw-r--r--drivers/regulator/pv88060-regulator.c4
-rw-r--r--drivers/regulator/pv88090-regulator.c4
-rw-r--r--drivers/regulator/qcom-pm8008-regulator.c198
-rw-r--r--drivers/regulator/renesas-usb-vbus-regulator.c74
-rw-r--r--drivers/regulator/rt4831-regulator.c1
-rw-r--r--drivers/regulator/rtq2208-regulator.c66
-rw-r--r--drivers/regulator/slg51000-regulator.c4
-rw-r--r--drivers/regulator/stm32-pwr.c1
-rw-r--r--drivers/regulator/sy8106a-regulator.c4
-rw-r--r--drivers/regulator/tps6286x-regulator.c11
-rw-r--r--drivers/regulator/tps6287x-regulator.c10
-rw-r--r--drivers/regulator/tps6594-regulator.c12
-rw-r--r--drivers/regulator/userspace-consumer.c6
-rw-r--r--drivers/reset/Kconfig15
-rw-r--r--drivers/reset/Makefile7
-rw-r--r--drivers/reset/hisilicon/hi6220_reset.c1
-rw-r--r--drivers/reset/reset-imx8mp-audiomix.c128
-rw-r--r--drivers/reset/reset-meson-audio-arb.c9
-rw-r--r--drivers/reset/reset-rzg2l-usbphy-ctrl.c63
-rw-r--r--drivers/reset/sti/Kconfig4
-rw-r--r--drivers/reset/tegra/Kconfig3
-rw-r--r--drivers/s390/block/dasd_eckd.c4
-rw-r--r--drivers/s390/block/dasd_fba.c2
-rw-r--r--drivers/s390/block/dasd_genhd.c1
-rw-r--r--drivers/s390/block/dcssblk.c2
-rw-r--r--drivers/s390/block/scm_blk.c5
-rw-r--r--drivers/s390/char/sclp.c1
-rw-r--r--drivers/s390/cio/vfio_ccw_cp.c9
-rw-r--r--drivers/s390/net/lcs.c3
-rw-r--r--drivers/s390/net/qeth_ethtool.c2
-rw-r--r--drivers/s390/virtio/virtio_ccw.c4
-rw-r--r--drivers/scsi/Kconfig1
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c31
-rw-r--r--drivers/scsi/iscsi_tcp.c8
-rw-r--r--drivers/scsi/libsas/sas_ata.c28
-rw-r--r--drivers/scsi/libsas/sas_discover.c4
-rw-r--r--drivers/scsi/libsas/sas_internal.h14
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c11
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c2
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_app.c62
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_transport.c2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c21
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h3
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c4
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c33
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_transport.c14
-rw-r--r--drivers/scsi/qedf/qedf.h1
-rw-r--r--drivers/scsi/qedf/qedf_main.c47
-rw-r--r--drivers/scsi/scsi.c14
-rw-r--r--drivers/scsi/scsi_debug.c594
-rw-r--r--drivers/scsi/scsi_lib.c9
-rw-r--r--drivers/scsi/scsi_trace.c22
-rw-r--r--drivers/scsi/scsi_transport_sas.c23
-rw-r--r--drivers/scsi/sd.c399
-rw-r--r--drivers/scsi/sd.h31
-rw-r--r--drivers/scsi/sd_dif.c45
-rw-r--r--drivers/scsi/sd_zbc.c52
-rw-r--r--drivers/scsi/sr.c42
-rw-r--r--drivers/scsi/sr.h2
-rw-r--r--drivers/scsi/sr_ioctl.c5
-rw-r--r--drivers/soc/amlogic/meson-clk-measure.c1
-rw-r--r--drivers/soc/amlogic/meson-gx-socinfo.c2
-rw-r--r--drivers/soc/fsl/Kconfig2
-rw-r--r--drivers/soc/fsl/qbman/Kconfig2
-rw-r--r--drivers/soc/imx/soc-imx8m.c1
-rw-r--r--drivers/soc/ixp4xx/ixp4xx-npe.c1
-rw-r--r--drivers/soc/ixp4xx/ixp4xx-qmgr.c1
-rw-r--r--drivers/soc/litex/Kconfig2
-rw-r--r--drivers/soc/litex/litex_soc_ctrl.c4
-rw-r--r--drivers/soc/mediatek/mtk-cmdq-helper.c35
-rw-r--r--drivers/soc/mediatek/mtk-mmsys.c1
-rw-r--r--drivers/soc/mediatek/mtk-mutex.c1
-rw-r--r--drivers/soc/qcom/Kconfig18
-rw-r--r--drivers/soc/qcom/Makefile2
-rw-r--r--drivers/soc/qcom/icc-bwmon.c16
-rw-r--r--drivers/soc/qcom/llcc-qcom.c57
-rw-r--r--drivers/soc/qcom/mdt_loader.c6
-rw-r--r--drivers/soc/qcom/ocmem.c10
-rw-r--r--drivers/soc/qcom/pdr_interface.c46
-rw-r--r--drivers/soc/qcom/pdr_internal.h318
-rw-r--r--drivers/soc/qcom/pmic_glink.c17
-rw-r--r--drivers/soc/qcom/pmic_glink_altmode.c2
-rw-r--r--drivers/soc/qcom/qcom_pd_mapper.c677
-rw-r--r--drivers/soc/qcom/qcom_pdr_msg.c353
-rw-r--r--drivers/soc/qcom/rpmh-rsc.c7
-rw-r--r--drivers/soc/qcom/rpmh.c1
-rw-r--r--drivers/soc/qcom/smem.c33
-rw-r--r--drivers/soc/qcom/smp2p.c11
-rw-r--r--drivers/soc/qcom/smsm.c51
-rw-r--r--drivers/soc/qcom/socinfo.c14
-rw-r--r--drivers/soc/qcom/spm.c1
-rw-r--r--drivers/soc/qcom/wcnss_ctrl.c11
-rw-r--r--drivers/soc/samsung/exynos-pmu.c60
-rw-r--r--drivers/soc/sunxi/sunxi_sram.c4
-rw-r--r--drivers/soc/tegra/fuse/fuse-tegra.c4
-rw-r--r--drivers/soc/tegra/pmc.c8
-rw-r--r--drivers/soc/ti/k3-socinfo.c2
-rw-r--r--drivers/soc/ti/knav_qmss.h2
-rw-r--r--drivers/soc/ti/knav_qmss_acc.c2
-rw-r--r--drivers/soc/ti/knav_qmss_queue.c2
-rw-r--r--drivers/soc/ti/pm33xx.c4
-rw-r--r--drivers/soc/xilinx/xlnx_event_manager.c16
-rw-r--r--drivers/soc/xilinx/zynqmp_power.c155
-rw-r--r--drivers/soundwire/amd_manager.c3
-rw-r--r--drivers/soundwire/intel_auxdevice.c6
-rw-r--r--drivers/soundwire/mipi_disco.c30
-rw-r--r--drivers/spi/Kconfig6
-rw-r--r--drivers/spi/Makefile4
-rw-r--r--drivers/spi/atmel-quadspi.c11
-rw-r--r--drivers/spi/internals.h8
-rw-r--r--drivers/spi/spi-altera-core.c1
-rw-r--r--drivers/spi/spi-axi-spi-engine.c68
-rw-r--r--drivers/spi/spi-bitbang.c73
-rw-r--r--drivers/spi/spi-cadence-xspi.c20
-rw-r--r--drivers/spi/spi-cadence.c27
-rw-r--r--drivers/spi/spi-ch341.c241
-rw-r--r--drivers/spi/spi-cs42l43.c99
-rw-r--r--drivers/spi/spi-davinci.c6
-rw-r--r--drivers/spi/spi-dw-bt1.c10
-rw-r--r--drivers/spi/spi-dw-core.c4
-rw-r--r--drivers/spi/spi-fsl-cpm.c1
-rw-r--r--drivers/spi/spi-fsl-dspi.c19
-rw-r--r--drivers/spi/spi-fsl-lib.c1
-rw-r--r--drivers/spi/spi-fsl-lpspi.c8
-rw-r--r--drivers/spi/spi-gpio.c66
-rw-r--r--drivers/spi/spi-imx.c36
-rw-r--r--drivers/spi/spi-ingenic.c4
-rw-r--r--drivers/spi/spi-meson-spicc.c22
-rw-r--r--drivers/spi/spi-microchip-core.c6
-rw-r--r--drivers/spi/spi-mux.c2
-rw-r--r--drivers/spi/spi-mxic.c2
-rw-r--r--drivers/spi/spi-omap-uwire.c1
-rw-r--r--drivers/spi/spi-omap2-mcspi.c24
-rw-r--r--drivers/spi/spi-pci1xxxx.c5
-rw-r--r--drivers/spi/spi-pxa2xx-pci.c39
-rw-r--r--drivers/spi/spi-pxa2xx-platform.c214
-rw-r--r--drivers/spi/spi-pxa2xx.c259
-rw-r--r--drivers/spi/spi-pxa2xx.h6
-rw-r--r--drivers/spi/spi-qup.c10
-rw-r--r--drivers/spi/spi-rpc-if.c12
-rw-r--r--drivers/spi/spi-stm32-qspi.c12
-rw-r--r--drivers/spi/spi-stm32.c16
-rw-r--r--drivers/spi/spi-wpcm-fiu.c6
-rw-r--r--drivers/spi/spi-xcomm.c75
-rw-r--r--drivers/spi/spi.c147
-rw-r--r--drivers/staging/greybus/gpio.c2
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c6
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h5
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c10
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.h2
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_dev.c7
-rw-r--r--drivers/staging/vt6655/device_main.c2
-rw-r--r--drivers/staging/vt6656/main_usb.c2
-rw-r--r--drivers/target/target_core_iblock.c49
-rw-r--r--drivers/tee/optee/ffa_abi.c12
-rw-r--r--drivers/tee/optee/notif.c9
-rw-r--r--drivers/tee/optee/optee_private.h5
-rw-r--r--drivers/tee/optee/optee_rpc_cmd.h1
-rw-r--r--drivers/tee/optee/rpc.c10
-rw-r--r--drivers/thermal/Kconfig28
-rw-r--r--drivers/thermal/Makefile4
-rw-r--r--drivers/thermal/broadcom/bcm2835_thermal.c49
-rw-r--r--drivers/thermal/gov_bang_bang.c14
-rw-r--r--drivers/thermal/gov_power_allocator.c3
-rw-r--r--drivers/thermal/gov_step_wise.c6
-rw-r--r--drivers/thermal/hisi_thermal.c9
-rw-r--r--drivers/thermal/imx_thermal.c55
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3400_thermal.c2
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3403_thermal.c11
-rw-r--r--drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c22
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device.c3
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device.h1
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c119
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c113
-rw-r--r--drivers/thermal/intel/intel_hfi.c30
-rw-r--r--drivers/thermal/intel/intel_pch_thermal.c5
-rw-r--r--drivers/thermal/intel/intel_quark_dts_thermal.c28
-rw-r--r--drivers/thermal/intel/intel_soc_dts_iosf.c15
-rw-r--r--drivers/thermal/intel/intel_soc_dts_thermal.c2
-rw-r--r--drivers/thermal/intel/intel_tcc.c177
-rw-r--r--drivers/thermal/intel/intel_tcc_cooling.c32
-rw-r--r--drivers/thermal/intel/x86_pkg_temp_thermal.c9
-rw-r--r--drivers/thermal/k3_j72xx_bandgap.c111
-rw-r--r--drivers/thermal/mediatek/lvts_thermal.c48
-rw-r--r--drivers/thermal/qcom/qcom-spmi-adc-tm5.c9
-rw-r--r--drivers/thermal/qcom/qcom-spmi-temp-alarm.c10
-rw-r--r--drivers/thermal/qcom/tsens.c8
-rw-r--r--drivers/thermal/renesas/Kconfig28
-rw-r--r--drivers/thermal/renesas/Makefile5
-rw-r--r--drivers/thermal/renesas/rcar_gen3_thermal.c (renamed from drivers/thermal/rcar_gen3_thermal.c)2
-rw-r--r--drivers/thermal/renesas/rcar_thermal.c (renamed from drivers/thermal/rcar_thermal.c)2
-rw-r--r--drivers/thermal/renesas/rzg2l_thermal.c (renamed from drivers/thermal/rzg2l_thermal.c)2
-rw-r--r--drivers/thermal/samsung/exynos_tmu.c54
-rw-r--r--drivers/thermal/st/st_thermal_memmap.c10
-rw-r--r--drivers/thermal/tegra/soctherm.c15
-rw-r--r--drivers/thermal/thermal-generic-adc.c27
-rw-r--r--drivers/thermal/thermal_core.c109
-rw-r--r--drivers/thermal/thermal_core.h16
-rw-r--r--drivers/thermal/thermal_debugfs.c128
-rw-r--r--drivers/thermal/thermal_debugfs.h2
-rw-r--r--drivers/thermal/thermal_helpers.c47
-rw-r--r--drivers/thermal/thermal_sysfs.c21
-rw-r--r--drivers/thermal/thermal_trip.c71
-rw-r--r--drivers/thermal/uniphier_thermal.c39
-rw-r--r--drivers/thunderbolt/debugfs.c5
-rw-r--r--drivers/tty/mxser.c2
-rw-r--r--drivers/tty/n_tty.c22
-rw-r--r--drivers/tty/serial/8250/8250_core.c5
-rw-r--r--drivers/tty/serial/8250/8250_dw.c36
-rw-r--r--drivers/tty/serial/8250/8250_dwlib.c3
-rw-r--r--drivers/tty/serial/8250/8250_dwlib.h33
-rw-r--r--drivers/tty/serial/8250/8250_omap.c22
-rw-r--r--drivers/tty/serial/8250/8250_pci.c13
-rw-r--r--drivers/tty/serial/8250/8250_pxa.c1
-rw-r--r--drivers/tty/serial/Kconfig3
-rw-r--r--drivers/tty/serial/Makefile2
-rw-r--r--drivers/tty/serial/bcm63xx_uart.c7
-rw-r--r--drivers/tty/serial/imx.c61
-rw-r--r--drivers/tty/serial/ma35d1_serial.c13
-rw-r--r--drivers/tty/serial/mcf.c2
-rw-r--r--drivers/tty/serial/qcom_geni_serial.c51
-rw-r--r--drivers/tty/serial/serial_base.h30
-rw-r--r--drivers/tty/serial/serial_base_bus.c129
-rw-r--r--drivers/tty/serial/serial_core.c6
-rw-r--r--drivers/tty/serial/serial_port.c7
-rw-r--r--drivers/ufs/core/ufs-mcq.c28
-rw-r--r--drivers/ufs/core/ufshcd.c19
-rw-r--r--drivers/usb/Makefile1
-rw-r--r--drivers/usb/atm/cxacru.c14
-rw-r--r--drivers/usb/chipidea/core.c8
-rw-r--r--drivers/usb/chipidea/ulpi.c5
-rw-r--r--drivers/usb/class/cdc-wdm.c4
-rw-r--r--drivers/usb/core/config.c18
-rw-r--r--drivers/usb/core/hcd.c12
-rw-r--r--drivers/usb/core/of.c7
-rw-r--r--drivers/usb/core/quirks.c3
-rw-r--r--drivers/usb/dwc3/core.c26
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c8
-rw-r--r--drivers/usb/gadget/configfs.c3
-rw-r--r--drivers/usb/gadget/function/f_printer.c40
-rw-r--r--drivers/usb/gadget/function/u_ether.c4
-rw-r--r--drivers/usb/gadget/udc/aspeed_udc.c4
-rw-r--r--drivers/usb/host/xhci-pci.c7
-rw-r--r--drivers/usb/host/xhci-ring.c59
-rw-r--r--drivers/usb/host/xhci.c16
-rw-r--r--drivers/usb/host/xhci.h1
-rw-r--r--drivers/usb/musb/da8xx.c8
-rw-r--r--drivers/usb/serial/mos7840.c45
-rw-r--r--drivers/usb/serial/option.c38
-rw-r--r--drivers/usb/storage/alauda.c9
-rw-r--r--drivers/usb/storage/scsiglue.c6
-rw-r--r--drivers/usb/storage/uas.c7
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c5
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c7
-rw-r--r--drivers/usb/typec/ucsi/ucsi_acpi.c61
-rw-r--r--drivers/usb/typec/ucsi/ucsi_glink.c7
-rw-r--r--drivers/usb/typec/ucsi/ucsi_stm32g0.c19
-rw-r--r--drivers/vfio/device_cdev.c7
-rw-r--r--drivers/vfio/group.c7
-rw-r--r--drivers/vfio/pci/vfio_pci_core.c273
-rw-r--r--drivers/vfio/vfio_main.c44
-rw-r--r--drivers/video/backlight/Kconfig7
-rw-r--r--drivers/video/backlight/Makefile1
-rw-r--r--drivers/video/backlight/aat2870_bl.c4
-rw-r--r--drivers/video/backlight/adp8870_bl.c2
-rw-r--r--drivers/video/backlight/ams369fg06.c23
-rw-r--r--drivers/video/backlight/bd6107.c2
-rw-r--r--drivers/video/backlight/corgi_lcd.c4
-rw-r--r--drivers/video/backlight/gpio_backlight.c9
-rw-r--r--drivers/video/backlight/ipaq_micro_bl.c3
-rw-r--r--drivers/video/backlight/jornada720_bl.c3
-rw-r--r--drivers/video/backlight/kb3886_bl.c4
-rw-r--r--drivers/video/backlight/ktd253-backlight.c5
-rw-r--r--drivers/video/backlight/ktz8866.c4
-rw-r--r--drivers/video/backlight/led_bl.c4
-rw-r--r--drivers/video/backlight/lm3509_bl.c343
-rw-r--r--drivers/video/backlight/lm3533_bl.c3
-rw-r--r--drivers/video/backlight/lm3630a_bl.c2
-rw-r--r--drivers/video/backlight/lm3639_bl.c2
-rw-r--r--drivers/video/backlight/lv5207lp.c2
-rw-r--r--drivers/video/backlight/mp3309c.c6
-rw-r--r--drivers/video/backlight/pandora_bl.c3
-rw-r--r--drivers/video/backlight/pcf50633-backlight.c5
-rw-r--r--drivers/video/backlight/platform_lcd.c1
-rw-r--r--drivers/video/backlight/pwm_bl.c4
-rw-r--r--drivers/video/backlight/rave-sp-backlight.c2
-rw-r--r--drivers/video/backlight/rt4831-backlight.c1
-rw-r--r--drivers/video/backlight/sky81452-backlight.c2
-rw-r--r--drivers/virt/coco/sev-guest/sev-guest.c211
-rw-r--r--drivers/virt/coco/tdx-guest/tdx-guest.c26
-rw-r--r--drivers/virt/coco/tsm.c177
-rw-r--r--drivers/watchdog/Kconfig14
-rw-r--r--drivers/watchdog/Makefile1
-rw-r--r--drivers/watchdog/bd96801_wdt.c417
-rw-r--r--drivers/watchdog/menz69_wdt.c1
-rw-r--r--drivers/watchdog/omap_wdt.c1
-rw-r--r--drivers/watchdog/simatic-ipc-wdt.c1
-rw-r--r--drivers/watchdog/ts4800_wdt.c1
-rw-r--r--drivers/watchdog/twl4030_wdt.c1
-rw-r--r--drivers/xen/evtchn.c1
-rw-r--r--drivers/xen/manage.c2
-rw-r--r--drivers/xen/privcmd-buf.c1
-rw-r--r--drivers/xen/privcmd.c36
-rw-r--r--drivers/xen/xen-pciback/pci_stub.c1
-rw-r--r--drivers/zorro/zorro.c3
-rw-r--r--fs/9p/vfs_dentry.c9
-rw-r--r--fs/9p/vfs_inode.c1
-rw-r--r--fs/Kconfig.binfmt8
-rw-r--r--fs/affs/amigaffs.h6
-rw-r--r--fs/afs/inode.c5
-rw-r--r--fs/afs/mntpt.c5
-rw-r--r--fs/aio.c8
-rw-r--r--fs/attr.c2
-rw-r--r--fs/autofs/init.c1
-rw-r--r--fs/autofs/inode.c16
-rw-r--r--fs/bcachefs/alloc_background.c359
-rw-r--r--fs/bcachefs/alloc_background.h14
-rw-r--r--fs/bcachefs/alloc_foreground.c6
-rw-r--r--fs/bcachefs/backpointers.c72
-rw-r--r--fs/bcachefs/bcachefs.h68
-rw-r--r--fs/bcachefs/bcachefs_format.h208
-rw-r--r--fs/bcachefs/bkey.c7
-rw-r--r--fs/bcachefs/bkey.h7
-rw-r--r--fs/bcachefs/bkey_methods.c6
-rw-r--r--fs/bcachefs/bkey_methods.h3
-rw-r--r--fs/bcachefs/btree_cache.c9
-rw-r--r--fs/bcachefs/btree_gc.c89
-rw-r--r--fs/bcachefs/btree_gc.h44
-rw-r--r--fs/bcachefs/btree_gc_types.h29
-rw-r--r--fs/bcachefs/btree_io.c93
-rw-r--r--fs/bcachefs/btree_iter.c42
-rw-r--r--fs/bcachefs/btree_key_cache.c43
-rw-r--r--fs/bcachefs/btree_locking.c11
-rw-r--r--fs/bcachefs/btree_locking.h22
-rw-r--r--fs/bcachefs/btree_node_scan.c9
-rw-r--r--fs/bcachefs/btree_types.h17
-rw-r--r--fs/bcachefs/btree_write_buffer.c37
-rw-r--r--fs/bcachefs/btree_write_buffer.h3
-rw-r--r--fs/bcachefs/buckets.c297
-rw-r--r--fs/bcachefs/buckets.h25
-rw-r--r--fs/bcachefs/buckets_types.h2
-rw-r--r--fs/bcachefs/chardev.c9
-rw-r--r--fs/bcachefs/clock.c7
-rw-r--r--fs/bcachefs/data_update.c47
-rw-r--r--fs/bcachefs/data_update.h5
-rw-r--r--fs/bcachefs/debug.c113
-rw-r--r--fs/bcachefs/disk_groups_format.h21
-rw-r--r--fs/bcachefs/ec.c28
-rw-r--r--fs/bcachefs/errcode.h3
-rw-r--r--fs/bcachefs/error.c19
-rw-r--r--fs/bcachefs/error.h7
-rw-r--r--fs/bcachefs/extents.c9
-rw-r--r--fs/bcachefs/eytzinger.h6
-rw-r--r--fs/bcachefs/fs-io-buffered.c6
-rw-r--r--fs/bcachefs/fs-io-direct.c4
-rw-r--r--fs/bcachefs/fs-ioctl.c19
-rw-r--r--fs/bcachefs/fs.c45
-rw-r--r--fs/bcachefs/fsck.c54
-rw-r--r--fs/bcachefs/io_misc.c2
-rw-r--r--fs/bcachefs/io_read.c41
-rw-r--r--fs/bcachefs/io_write.c19
-rw-r--r--fs/bcachefs/journal.c26
-rw-r--r--fs/bcachefs/journal.h2
-rw-r--r--fs/bcachefs/journal_io.c32
-rw-r--r--fs/bcachefs/journal_seq_blacklist.c2
-rw-r--r--fs/bcachefs/journal_seq_blacklist_format.h15
-rw-r--r--fs/bcachefs/lru.c39
-rw-r--r--fs/bcachefs/lru.h6
-rw-r--r--fs/bcachefs/mean_and_variance_test.c1
-rw-r--r--fs/bcachefs/move.c41
-rw-r--r--fs/bcachefs/movinggc.c7
-rw-r--r--fs/bcachefs/opts.h2
-rw-r--r--fs/bcachefs/recovery.c12
-rw-r--r--fs/bcachefs/replicas_format.h31
-rw-r--r--fs/bcachefs/sb-downgrade.c15
-rw-r--r--fs/bcachefs/sb-downgrade_format.h17
-rw-r--r--fs/bcachefs/sb-errors.c14
-rw-r--r--fs/bcachefs/sb-errors_format.h310
-rw-r--r--fs/bcachefs/sb-errors_types.h281
-rw-r--r--fs/bcachefs/sb-members_format.h110
-rw-r--r--fs/bcachefs/seqmutex.h11
-rw-r--r--fs/bcachefs/snapshot.c100
-rw-r--r--fs/bcachefs/snapshot.h1
-rw-r--r--fs/bcachefs/str_hash.h2
-rw-r--r--fs/bcachefs/super-io.c25
-rw-r--r--fs/bcachefs/super.c40
-rw-r--r--fs/bcachefs/util.c25
-rw-r--r--fs/bcachefs/util.h1
-rw-r--r--fs/befs/linuxvfs.c10
-rw-r--r--fs/binfmt_elf.c101
-rw-r--r--fs/binfmt_elf_fdpic.c5
-rw-r--r--fs/binfmt_misc.c8
-rw-r--r--fs/binfmt_script.c1
-rw-r--r--fs/btrfs/Makefile2
-rw-r--r--fs/btrfs/accessors.h15
-rw-r--r--fs/btrfs/bio.c8
-rw-r--r--fs/btrfs/block-group.c71
-rw-r--r--fs/btrfs/block-group.h3
-rw-r--r--fs/btrfs/btrfs_inode.h166
-rw-r--r--fs/btrfs/compression.c25
-rw-r--r--fs/btrfs/compression.h2
-rw-r--r--fs/btrfs/ctree.c108
-rw-r--r--fs/btrfs/ctree.h18
-rw-r--r--fs/btrfs/defrag.c18
-rw-r--r--fs/btrfs/delalloc-space.c2
-rw-r--r--fs/btrfs/delalloc-space.h2
-rw-r--r--fs/btrfs/delayed-inode.c47
-rw-r--r--fs/btrfs/delayed-inode.h10
-rw-r--r--fs/btrfs/delayed-ref.c51
-rw-r--r--fs/btrfs/delayed-ref.h8
-rw-r--r--fs/btrfs/dev-replace.c4
-rw-r--r--fs/btrfs/dir-item.c8
-rw-r--r--fs/btrfs/dir-item.h6
-rw-r--r--fs/btrfs/direct-io.c1052
-rw-r--r--fs/btrfs/direct-io.h14
-rw-r--r--fs/btrfs/disk-io.c140
-rw-r--r--fs/btrfs/disk-io.h18
-rw-r--r--fs/btrfs/export.c6
-rw-r--r--fs/btrfs/extent-io-tree.c4
-rw-r--r--fs/btrfs/extent-tree.c685
-rw-r--r--fs/btrfs/extent-tree.h8
-rw-r--r--fs/btrfs/extent_io.c1154
-rw-r--r--fs/btrfs/extent_io.h19
-rw-r--r--fs/btrfs/extent_map.c366
-rw-r--r--fs/btrfs/extent_map.h54
-rw-r--r--fs/btrfs/fiemap.c930
-rw-r--r--fs/btrfs/fiemap.h11
-rw-r--r--fs/btrfs/file-item.c52
-rw-r--r--fs/btrfs/file.c365
-rw-r--r--fs/btrfs/file.h4
-rw-r--r--fs/btrfs/free-space-cache.c14
-rw-r--r--fs/btrfs/free-space-tree.c10
-rw-r--r--fs/btrfs/fs.h18
-rw-r--r--fs/btrfs/inode-item.c4
-rw-r--r--fs/btrfs/inode.c1461
-rw-r--r--fs/btrfs/ioctl.c96
-rw-r--r--fs/btrfs/ioctl.h2
-rw-r--r--fs/btrfs/locking.h1
-rw-r--r--fs/btrfs/lru_cache.h1
-rw-r--r--fs/btrfs/lzo.c43
-rw-r--r--fs/btrfs/messages.c3
-rw-r--r--fs/btrfs/misc.h4
-rw-r--r--fs/btrfs/ordered-data.c177
-rw-r--r--fs/btrfs/ordered-data.h27
-rw-r--r--fs/btrfs/print-tree.c10
-rw-r--r--fs/btrfs/props.c20
-rw-r--r--fs/btrfs/props.h4
-rw-r--r--fs/btrfs/qgroup.c235
-rw-r--r--fs/btrfs/qgroup.h25
-rw-r--r--fs/btrfs/raid-stripe-tree.c13
-rw-r--r--fs/btrfs/raid-stripe-tree.h3
-rw-r--r--fs/btrfs/raid56.c118
-rw-r--r--fs/btrfs/ref-verify.c9
-rw-r--r--fs/btrfs/reflink.c8
-rw-r--r--fs/btrfs/relocation.c157
-rw-r--r--fs/btrfs/scrub.c37
-rw-r--r--fs/btrfs/send.c49
-rw-r--r--fs/btrfs/send.h4
-rw-r--r--fs/btrfs/space-info.c269
-rw-r--r--fs/btrfs/space-info.h48
-rw-r--r--fs/btrfs/subpage.c162
-rw-r--r--fs/btrfs/subpage.h9
-rw-r--r--fs/btrfs/super.c51
-rw-r--r--fs/btrfs/super.h2
-rw-r--r--fs/btrfs/sysfs.c85
-rw-r--r--fs/btrfs/tests/btrfs-tests.c5
-rw-r--r--fs/btrfs/tests/extent-map-tests.c120
-rw-r--r--fs/btrfs/tests/inode-tests.c176
-rw-r--r--fs/btrfs/transaction.c31
-rw-r--r--fs/btrfs/transaction.h9
-rw-r--r--fs/btrfs/tree-checker.c37
-rw-r--r--fs/btrfs/tree-log.c132
-rw-r--r--fs/btrfs/tree-log.h6
-rw-r--r--fs/btrfs/ulist.c21
-rw-r--r--fs/btrfs/ulist.h2
-rw-r--r--fs/btrfs/uuid-tree.c10
-rw-r--r--fs/btrfs/uuid-tree.h4
-rw-r--r--fs/btrfs/volumes.c62
-rw-r--r--fs/btrfs/volumes.h2
-rw-r--r--fs/btrfs/xattr.c4
-rw-r--r--fs/btrfs/xattr.h2
-rw-r--r--fs/btrfs/zlib.c56
-rw-r--r--fs/btrfs/zoned.c30
-rw-r--r--fs/btrfs/zoned.h11
-rw-r--r--fs/btrfs/zstd.c70
-rw-r--r--fs/buffer.c7
-rw-r--r--fs/cachefiles/cache.c45
-rw-r--r--fs/cachefiles/daemon.c7
-rw-r--r--fs/cachefiles/internal.h8
-rw-r--r--fs/cachefiles/ondemand.c270
-rw-r--r--fs/cachefiles/volume.c1
-rw-r--r--fs/cachefiles/xattr.c5
-rw-r--r--fs/coda/symlink.c10
-rw-r--r--fs/configfs/dir.c10
-rw-r--r--fs/cramfs/inode.c26
-rw-r--r--fs/dcache.c105
-rw-r--r--fs/debugfs/inode.c26
-rw-r--r--fs/dlm/ast.c172
-rw-r--r--fs/dlm/ast.h11
-rw-r--r--fs/dlm/config.c2
-rw-r--r--fs/dlm/debug_fs.c10
-rw-r--r--fs/dlm/dlm_internal.h60
-rw-r--r--fs/dlm/lock.c568
-rw-r--r--fs/dlm/lock.h7
-rw-r--r--fs/dlm/lockspace.c131
-rw-r--r--fs/dlm/lowcomms.c8
-rw-r--r--fs/dlm/lowcomms.h2
-rw-r--r--fs/dlm/member.c2
-rw-r--r--fs/dlm/memory.c10
-rw-r--r--fs/dlm/midcomms.c4
-rw-r--r--fs/dlm/midcomms.h2
-rw-r--r--fs/dlm/recover.c78
-rw-r--r--fs/dlm/recover.h2
-rw-r--r--fs/dlm/recoverd.c14
-rw-r--r--fs/dlm/user.c42
-rw-r--r--fs/efivarfs/super.c12
-rw-r--r--fs/efs/inode.c1
-rw-r--r--fs/efs/symlink.c14
-rw-r--r--fs/erofs/compress.h61
-rw-r--r--fs/erofs/decompressor.c148
-rw-r--r--fs/erofs/decompressor_deflate.c149
-rw-r--r--fs/erofs/decompressor_lzma.c166
-rw-r--r--fs/erofs/decompressor_zstd.c154
-rw-r--r--fs/erofs/internal.h48
-rw-r--r--fs/erofs/super.c36
-rw-r--r--fs/erofs/zdata.c346
-rw-r--r--fs/erofs/zmap.c6
-rw-r--r--fs/erofs/zutil.c8
-rw-r--r--fs/exec.c63
-rw-r--r--fs/exec_test.c141
-rw-r--r--fs/exfat/dir.c2
-rw-r--r--fs/exfat/file.c22
-rw-r--r--fs/exfat/super.c10
-rw-r--r--fs/exportfs/expfs.c9
-rw-r--r--fs/ext2/balloc.c11
-rw-r--r--fs/ext4/crypto.c10
-rw-r--r--fs/ext4/ext4.h35
-rw-r--r--fs/ext4/namei.c122
-rw-r--r--fs/ext4/super.c26
-rw-r--r--fs/f2fs/acl.c3
-rw-r--r--fs/f2fs/dir.c105
-rw-r--r--fs/f2fs/f2fs.h16
-rw-r--r--fs/f2fs/file.c6
-rw-r--r--fs/f2fs/namei.c10
-rw-r--r--fs/f2fs/recovery.c9
-rw-r--r--fs/f2fs/super.c8
-rw-r--r--fs/fat/fat.h18
-rw-r--r--fs/fat/fat_test.c1
-rw-r--r--fs/fat/inode.c675
-rw-r--r--fs/fat/namei_msdos.c38
-rw-r--r--fs/fat/namei_vfat.c38
-rw-r--r--fs/fhandle.c178
-rw-r--r--fs/file.c4
-rw-r--r--fs/fs_parser.c34
-rw-r--r--fs/fsopen.c7
-rw-r--r--fs/fuse/acl.c4
-rw-r--r--fs/fuse/inode.c24
-rw-r--r--fs/gfs2/glock.c227
-rw-r--r--fs/gfs2/glock.h1
-rw-r--r--fs/gfs2/glops.c42
-rw-r--r--fs/gfs2/incore.h12
-rw-r--r--fs/gfs2/lock_dlm.c28
-rw-r--r--fs/gfs2/ops_fstype.c13
-rw-r--r--fs/gfs2/quota.c388
-rw-r--r--fs/gfs2/super.c1
-rw-r--r--fs/gfs2/trace_gfs2.h6
-rw-r--r--fs/gfs2/util.c12
-rw-r--r--fs/hfs/inode.c3
-rw-r--r--fs/hfs/super.c1
-rw-r--r--fs/hfsplus/bfind.c15
-rw-r--r--fs/hfsplus/extents.c9
-rw-r--r--fs/hfsplus/hfsplus_fs.h21
-rw-r--r--fs/hfsplus/ioctl.c4
-rw-r--r--fs/hfsplus/xattr.c2
-rw-r--r--fs/hostfs/hostfs_kern.c106
-rw-r--r--fs/hpfs/namei.c15
-rw-r--r--fs/hpfs/super.c1
-rw-r--r--fs/hugetlbfs/inode.c12
-rw-r--r--fs/inode.c109
-rw-r--r--fs/internal.h16
-rw-r--r--fs/iomap/buffered-io.c33
-rw-r--r--fs/isofs/inode.c17
-rw-r--r--fs/isofs/rock.c17
-rw-r--r--fs/jbd2/journal.c1
-rw-r--r--fs/jffs2/file.c14
-rw-r--r--fs/jfs/xattr.c4
-rw-r--r--fs/libfs.c74
-rw-r--r--fs/lockd/Makefile9
-rw-r--r--fs/locks.c11
-rw-r--r--fs/minix/inode.c1
-rw-r--r--fs/minix/namei.c3
-rw-r--r--fs/mount.h3
-rw-r--r--fs/mpage.c13
-rw-r--r--fs/namei.c249
-rw-r--r--fs/namespace.c524
-rw-r--r--fs/netfs/buffered_read.c14
-rw-r--r--fs/netfs/buffered_write.c26
-rw-r--r--fs/netfs/direct_read.c2
-rw-r--r--fs/netfs/direct_write.c13
-rw-r--r--fs/netfs/fscache_cache.c4
-rw-r--r--fs/netfs/fscache_cookie.c28
-rw-r--r--fs/netfs/fscache_io.c12
-rw-r--r--fs/netfs/fscache_main.c2
-rw-r--r--fs/netfs/fscache_volume.c18
-rw-r--r--fs/netfs/internal.h44
-rw-r--r--fs/netfs/io.c12
-rw-r--r--fs/netfs/main.c4
-rw-r--r--fs/netfs/misc.c85
-rw-r--r--fs/netfs/objects.c5
-rw-r--r--fs/netfs/write_collect.c23
-rw-r--r--fs/netfs/write_issue.c47
-rw-r--r--fs/nfs/dir.c77
-rw-r--r--fs/nfs/direct.c2
-rw-r--r--fs/nfs/nfs4proc.c24
-rw-r--r--fs/nfs/pagelist.c5
-rw-r--r--fs/nfs/read.c2
-rw-r--r--fs/nfs/symlink.c12
-rw-r--r--fs/nfs/write.c1
-rw-r--r--fs/nfsd/Kconfig2
-rw-r--r--fs/nfsd/filecache.c2
-rw-r--r--fs/nfsd/netlink.c19
-rw-r--r--fs/nfsd/netlink.h5
-rw-r--r--fs/nfsd/nfs2acl.c2
-rw-r--r--fs/nfsd/nfs3acl.c2
-rw-r--r--fs/nfsd/nfs4proc.c5
-rw-r--r--fs/nfsd/nfs4recover.c4
-rw-r--r--fs/nfsd/nfs4xdr.c12
-rw-r--r--fs/nfsd/nfsctl.c149
-rw-r--r--fs/nfsd/nfsd.h3
-rw-r--r--fs/nfsd/nfsfh.c2
-rw-r--r--fs/nfsd/nfssvc.c67
-rw-r--r--fs/nilfs2/alloc.c19
-rw-r--r--fs/nilfs2/alloc.h4
-rw-r--r--fs/nilfs2/dat.c2
-rw-r--r--fs/nilfs2/dir.c40
-rw-r--r--fs/nilfs2/ifile.c7
-rw-r--r--fs/nilfs2/nilfs.h10
-rw-r--r--fs/nilfs2/segment.c3
-rw-r--r--fs/nilfs2/the_nilfs.c6
-rw-r--r--fs/nilfs2/the_nilfs.h2
-rw-r--r--fs/nls/mac-celtic.c1
-rw-r--r--fs/nls/mac-centeuro.c1
-rw-r--r--fs/nls/mac-croatian.c1
-rw-r--r--fs/nls/mac-cyrillic.c1
-rw-r--r--fs/nls/mac-gaelic.c1
-rw-r--r--fs/nls/mac-greek.c1
-rw-r--r--fs/nls/mac-iceland.c1
-rw-r--r--fs/nls/mac-inuit.c1
-rw-r--r--fs/nls/mac-roman.c1
-rw-r--r--fs/nls/mac-romanian.c1
-rw-r--r--fs/nls/mac-turkish.c1
-rw-r--r--fs/nls/nls_ascii.c1
-rw-r--r--fs/nls/nls_base.c1
-rw-r--r--fs/nls/nls_cp1250.c1
-rw-r--r--fs/nls/nls_cp1251.c1
-rw-r--r--fs/nls/nls_cp1255.c1
-rw-r--r--fs/nls/nls_cp437.c1
-rw-r--r--fs/nls/nls_cp737.c1
-rw-r--r--fs/nls/nls_cp775.c1
-rw-r--r--fs/nls/nls_cp850.c1
-rw-r--r--fs/nls/nls_cp852.c1
-rw-r--r--fs/nls/nls_cp855.c1
-rw-r--r--fs/nls/nls_cp857.c1
-rw-r--r--fs/nls/nls_cp860.c1
-rw-r--r--fs/nls/nls_cp861.c1
-rw-r--r--fs/nls/nls_cp862.c1
-rw-r--r--fs/nls/nls_cp863.c1
-rw-r--r--fs/nls/nls_cp864.c1
-rw-r--r--fs/nls/nls_cp865.c1
-rw-r--r--fs/nls/nls_cp866.c1
-rw-r--r--fs/nls/nls_cp869.c1
-rw-r--r--fs/nls/nls_cp874.c1
-rw-r--r--fs/nls/nls_cp932.c1
-rw-r--r--fs/nls/nls_cp936.c1
-rw-r--r--fs/nls/nls_cp949.c1
-rw-r--r--fs/nls/nls_cp950.c1
-rw-r--r--fs/nls/nls_euc-jp.c1
-rw-r--r--fs/nls/nls_iso8859-1.c1
-rw-r--r--fs/nls/nls_iso8859-13.c1
-rw-r--r--fs/nls/nls_iso8859-14.c1
-rw-r--r--fs/nls/nls_iso8859-15.c1
-rw-r--r--fs/nls/nls_iso8859-2.c1
-rw-r--r--fs/nls/nls_iso8859-3.c1
-rw-r--r--fs/nls/nls_iso8859-4.c1
-rw-r--r--fs/nls/nls_iso8859-5.c1
-rw-r--r--fs/nls/nls_iso8859-6.c1
-rw-r--r--fs/nls/nls_iso8859-7.c1
-rw-r--r--fs/nls/nls_iso8859-9.c1
-rw-r--r--fs/nls/nls_koi8-r.c1
-rw-r--r--fs/nls/nls_koi8-ru.c1
-rw-r--r--fs/nls/nls_koi8-u.c1
-rw-r--r--fs/nls/nls_ucs2_utils.c1
-rw-r--r--fs/nls/nls_utf8.c1
-rw-r--r--fs/notify/fsnotify.c31
-rw-r--r--fs/notify/fsnotify.h2
-rw-r--r--fs/notify/mark.c32
-rw-r--r--fs/nsfs.c122
-rw-r--r--fs/ntfs3/super.c12
-rw-r--r--fs/ocfs2/aops.c5
-rw-r--r--fs/ocfs2/journal.c209
-rw-r--r--fs/ocfs2/journal.h2
-rw-r--r--fs/ocfs2/ocfs2.h27
-rw-r--r--fs/ocfs2/ocfs2_trace.h2
-rw-r--r--fs/ocfs2/super.c4
-rw-r--r--fs/open.c43
-rw-r--r--fs/openpromfs/inode.c1
-rw-r--r--fs/orangefs/inode.c13
-rw-r--r--fs/orangefs/orangefs-bufmap.c4
-rw-r--r--fs/overlayfs/dir.c8
-rw-r--r--fs/overlayfs/export.c6
-rw-r--r--fs/pidfs.c90
-rw-r--r--fs/proc/base.c2
-rw-r--r--fs/proc/generic.c6
-rw-r--r--fs/proc/proc_sysctl.c70
-rw-r--r--fs/proc/task_mmu.c3
-rw-r--r--fs/proc_namespace.c6
-rw-r--r--fs/pstore/blk.c2
-rw-r--r--fs/pstore/platform.c1
-rw-r--r--fs/qnx4/inode.c1
-rw-r--r--fs/qnx6/inode.c1
-rw-r--r--fs/quota/dquot.c8
-rw-r--r--fs/read_write.c18
-rw-r--r--fs/readdir.c4
-rw-r--r--fs/reiserfs/inode.c1
-rw-r--r--fs/romfs/super.c22
-rw-r--r--fs/signalfd.c6
-rw-r--r--fs/smb/client/cifsfs.c3
-rw-r--r--fs/smb/client/cifsglob.h7
-rw-r--r--fs/smb/client/cifspdu.h2
-rw-r--r--fs/smb/client/cifssmb.c8
-rw-r--r--fs/smb/client/file.c57
-rw-r--r--fs/smb/client/fs_context.c39
-rw-r--r--fs/smb/client/inode.c4
-rw-r--r--fs/smb/client/smb2ops.c3
-rw-r--r--fs/smb/client/smb2pdu.c22
-rw-r--r--fs/smb/client/smb2transport.c2
-rw-r--r--fs/smb/common/cifs_arc4.c1
-rw-r--r--fs/smb/common/cifs_md4.c1
-rw-r--r--fs/smb/common/smb2pdu.h34
-rw-r--r--fs/smb/server/smb2pdu.c44
-rw-r--r--fs/smb/server/vfs.c17
-rw-r--r--fs/smb/server/vfs.h3
-rw-r--r--fs/smb/server/vfs_cache.c3
-rw-r--r--fs/stat.c206
-rw-r--r--fs/super.c11
-rw-r--r--fs/sysv/super.c1
-rw-r--r--fs/tracefs/inode.c16
-rw-r--r--fs/udf/balloc.c74
-rw-r--r--fs/udf/file.c2
-rw-r--r--fs/udf/inode.c13
-rw-r--r--fs/udf/namei.c2
-rw-r--r--fs/udf/super.c18
-rw-r--r--fs/ufs/dir.c1
-rw-r--r--fs/userfaultfd.c7
-rw-r--r--fs/vboxsf/file.c18
-rw-r--r--fs/vboxsf/super.c16
-rw-r--r--fs/verity/measure.c5
-rw-r--r--fs/xfs/Kconfig12
-rw-r--r--fs/xfs/Makefile1
-rw-r--r--fs/xfs/libxfs/xfs_ag.c2
-rw-r--r--fs/xfs/libxfs/xfs_ag_resv.h19
-rw-r--r--fs/xfs/libxfs/xfs_alloc.c241
-rw-r--r--fs/xfs/libxfs/xfs_alloc.h18
-rw-r--r--fs/xfs/libxfs/xfs_alloc_btree.c64
-rw-r--r--fs/xfs/libxfs/xfs_attr.c40
-rw-r--r--fs/xfs/libxfs/xfs_attr.h3
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c87
-rw-r--r--fs/xfs/libxfs/xfs_bmap.h3
-rw-r--r--fs/xfs/libxfs/xfs_bmap_btree.c2
-rw-r--r--fs/xfs/libxfs/xfs_btree.c51
-rw-r--r--fs/xfs/libxfs/xfs_btree.h16
-rw-r--r--fs/xfs/libxfs/xfs_defer.c4
-rw-r--r--fs/xfs/libxfs/xfs_dir2.c661
-rw-r--r--fs/xfs/libxfs/xfs_dir2.h49
-rw-r--r--fs/xfs/libxfs/xfs_dir2_data.c31
-rw-r--r--fs/xfs/libxfs/xfs_dir2_priv.h7
-rw-r--r--fs/xfs/libxfs/xfs_format.h9
-rw-r--r--fs/xfs/libxfs/xfs_fs.h2
-rw-r--r--fs/xfs/libxfs/xfs_ialloc.c20
-rw-r--r--fs/xfs/libxfs/xfs_ialloc_btree.c2
-rw-r--r--fs/xfs/libxfs/xfs_inode_buf.c47
-rw-r--r--fs/xfs/libxfs/xfs_inode_util.c749
-rw-r--r--fs/xfs/libxfs/xfs_inode_util.h62
-rw-r--r--fs/xfs/libxfs/xfs_ondisk.h1
-rw-r--r--fs/xfs/libxfs/xfs_refcount.c156
-rw-r--r--fs/xfs/libxfs/xfs_refcount.h11
-rw-r--r--fs/xfs/libxfs/xfs_refcount_btree.c2
-rw-r--r--fs/xfs/libxfs/xfs_rmap.c266
-rw-r--r--fs/xfs/libxfs/xfs_rmap.h15
-rw-r--r--fs/xfs/libxfs/xfs_rmap_btree.c7
-rw-r--r--fs/xfs/libxfs/xfs_sb.c7
-rw-r--r--fs/xfs/libxfs/xfs_shared.h7
-rw-r--r--fs/xfs/libxfs/xfs_trans_inode.c2
-rw-r--r--fs/xfs/libxfs/xfs_trans_resv.c1
-rw-r--r--fs/xfs/scrub/common.c1
-rw-r--r--fs/xfs/scrub/newbt.c5
-rw-r--r--fs/xfs/scrub/quota_repair.c1
-rw-r--r--fs/xfs/scrub/reap.c7
-rw-r--r--fs/xfs/scrub/scrub.c2
-rw-r--r--fs/xfs/scrub/tempfile.c21
-rw-r--r--fs/xfs/scrub/xfarray.c9
-rw-r--r--fs/xfs/xfs.h4
-rw-r--r--fs/xfs/xfs_attr_item.c17
-rw-r--r--fs/xfs/xfs_bmap_item.c6
-rw-r--r--fs/xfs/xfs_bmap_util.c52
-rw-r--r--fs/xfs/xfs_bmap_util.h2
-rw-r--r--fs/xfs/xfs_buf_item.c32
-rw-r--r--fs/xfs/xfs_discard.c303
-rw-r--r--fs/xfs/xfs_dquot_item.c31
-rw-r--r--fs/xfs/xfs_drain.c8
-rw-r--r--fs/xfs/xfs_drain.h5
-rw-r--r--fs/xfs/xfs_extfree_item.c119
-rw-r--r--fs/xfs/xfs_extfree_item.h6
-rw-r--r--fs/xfs/xfs_file.c141
-rw-r--r--fs/xfs/xfs_handle.c8
-rw-r--r--fs/xfs/xfs_icache.c7
-rw-r--r--fs/xfs/xfs_inode.c1517
-rw-r--r--fs/xfs/xfs_inode.h70
-rw-r--r--fs/xfs/xfs_inode_item.c38
-rw-r--r--fs/xfs/xfs_ioctl.c60
-rw-r--r--fs/xfs/xfs_iomap.c105
-rw-r--r--fs/xfs/xfs_iops.c66
-rw-r--r--fs/xfs/xfs_iwalk.c5
-rw-r--r--fs/xfs/xfs_linux.h2
-rw-r--r--fs/xfs/xfs_log.c511
-rw-r--r--fs/xfs/xfs_log.h1
-rw-r--r--fs/xfs/xfs_log_cil.c177
-rw-r--r--fs/xfs/xfs_log_priv.h61
-rw-r--r--fs/xfs/xfs_log_recover.c28
-rw-r--r--fs/xfs/xfs_qm.c7
-rw-r--r--fs/xfs/xfs_qm_bhv.c1
-rw-r--r--fs/xfs/xfs_refcount_item.c110
-rw-r--r--fs/xfs/xfs_refcount_item.h5
-rw-r--r--fs/xfs/xfs_reflink.c3
-rw-r--r--fs/xfs/xfs_reflink.h10
-rw-r--r--fs/xfs/xfs_rmap_item.c151
-rw-r--r--fs/xfs/xfs_rmap_item.h4
-rw-r--r--fs/xfs/xfs_rtalloc.c3
-rw-r--r--fs/xfs/xfs_symlink.c70
-rw-r--r--fs/xfs/xfs_sysfs.c29
-rw-r--r--fs/xfs/xfs_trace.c4
-rw-r--r--fs/xfs/xfs_trace.h533
-rw-r--r--fs/xfs/xfs_trans.c129
-rw-r--r--fs/xfs/xfs_trans.h5
-rw-r--r--fs/xfs/xfs_trans_ail.c244
-rw-r--r--fs/xfs/xfs_trans_priv.h44
-rw-r--r--fs/zonefs/super.c1
-rw-r--r--include/acpi/acpi_bus.h1
-rw-r--r--include/acpi/acpixf.h4
-rw-r--r--include/acpi/actbl2.h19
-rw-r--r--include/acpi/battery.h2
-rw-r--r--include/acpi/processor.h2
-rw-r--r--include/asm-generic/Kbuild2
-rw-r--r--include/asm-generic/fixmap.h3
-rw-r--r--include/asm-generic/runtime-const.h15
-rw-r--r--include/asm-generic/syscalls.h2
-rw-r--r--include/asm-generic/vmlinux.lds.h10
-rw-r--r--include/clocksource/timer-xilinx.h2
-rw-r--r--include/drm/drm_buddy.h6
-rw-r--r--include/dt-bindings/arm/qcom,ids.h2
-rw-r--r--include/dt-bindings/clock/qcom,qcm2290-gpucc.h32
-rw-r--r--include/dt-bindings/clock/qcom,sm8650-camcc.h195
-rw-r--r--include/dt-bindings/clock/qcom,sm8650-videocc.h23
-rw-r--r--include/dt-bindings/clock/rk3128-cru.h1
-rw-r--r--include/dt-bindings/clock/sun50i-h616-ccu.h1
-rw-r--r--include/dt-bindings/input/cros-ec-keyboard.h104
-rw-r--r--include/dt-bindings/interconnect/qcom,ipq9574.h59
-rw-r--r--include/dt-bindings/mfd/qcom-pm8008.h19
-rw-r--r--include/dt-bindings/mfd/st,stpmic1.h2
-rw-r--r--include/dt-bindings/net/ti-dp83867.h4
-rw-r--r--include/dt-bindings/net/ti-dp83869.h4
-rw-r--r--include/dt-bindings/power/amlogic,a4-pwrc.h21
-rw-r--r--include/dt-bindings/power/amlogic,a5-pwrc.h21
-rw-r--r--include/dt-bindings/regulator/st,stm32mp25-regulator.h48
-rw-r--r--include/dt-bindings/reset/sun50i-h616-ccu.h1
-rw-r--r--include/dt-bindings/thermal/mediatek,lvts-thermal.h12
-rw-r--r--include/kunit/assert.h13
-rw-r--r--include/kunit/test.h88
-rw-r--r--include/linux/acpi.h17
-rw-r--r--include/linux/atomic/atomic-arch-fallback.h6
-rw-r--r--include/linux/atomic/atomic-instrumented.h8
-rw-r--r--include/linux/atomic/atomic-long.h4
-rw-r--r--include/linux/auxiliary_bus.h24
-rw-r--r--include/linux/backlight.h20
-rw-r--r--include/linux/binfmts.h2
-rw-r--r--include/linux/bio.h4
-rw-r--r--include/linux/blk-integrity.h97
-rw-r--r--include/linux/blk_types.h8
-rw-r--r--include/linux/blkdev.h349
-rw-r--r--include/linux/bpf.h34
-rw-r--r--include/linux/bpf_verifier.h16
-rw-r--r--include/linux/brcmphy.h88
-rw-r--r--include/linux/btf.h67
-rw-r--r--include/linux/bvec.h14
-rw-r--r--include/linux/cache.h59
-rw-r--r--include/linux/cacheinfo.h25
-rw-r--r--include/linux/can/dev.h2
-rw-r--r--include/linux/cc_platform.h10
-rw-r--r--include/linux/cdrom.h2
-rw-r--r--include/linux/cgroup-defs.h7
-rw-r--r--include/linux/cleanup.h19
-rw-r--r--include/linux/clocksource.h27
-rw-r--r--include/linux/clocksource_ids.h1
-rw-r--r--include/linux/closure.h30
-rw-r--r--include/linux/compat.h2
-rw-r--r--include/linux/compiler.h10
-rw-r--r--include/linux/compiler_types.h23
-rw-r--r--include/linux/configfs.h3
-rw-r--r--include/linux/cpu.h33
-rw-r--r--include/linux/cpufreq.h8
-rw-r--r--include/linux/cpuhotplug.h3
-rw-r--r--include/linux/cpuhplock.h49
-rw-r--r--include/linux/cpumask.h25
-rw-r--r--include/linux/dcache.h11
-rw-r--r--include/linux/device-mapper.h7
-rw-r--r--include/linux/device.h2
-rw-r--r--include/linux/dim.h113
-rw-r--r--include/linux/dlm.h17
-rw-r--r--include/linux/dsa/8021q.h8
-rw-r--r--include/linux/dsa/lan9303.h4
-rw-r--r--include/linux/efi.h15
-rw-r--r--include/linux/etherdevice.h8
-rw-r--r--include/linux/ethtool.h179
-rw-r--r--include/linux/exportfs.h2
-rw-r--r--include/linux/file.h20
-rw-r--r--include/linux/filter.h144
-rw-r--r--include/linux/firmware/cirrus/cs_dsp.h27
-rw-r--r--include/linux/firmware/qcom/qcom_qseecom.h8
-rw-r--r--include/linux/firmware/qcom/qcom_scm.h37
-rw-r--r--include/linux/firmware/qcom/qcom_tzmem.h56
-rw-r--r--include/linux/firmware/xlnx-event-manager.h10
-rw-r--r--include/linux/firmware/xlnx-zynqmp.h3
-rw-r--r--include/linux/fortify-string.h8
-rw-r--r--include/linux/fs.h116
-rw-r--r--include/linux/fs_parser.h6
-rw-r--r--include/linux/fscache-cache.h6
-rw-r--r--include/linux/fsnotify.h8
-rw-r--r--include/linux/fsnotify_backend.h8
-rw-r--r--include/linux/ftrace.h6
-rw-r--r--include/linux/gpio.h6
-rw-r--r--include/linux/gpio/driver.h5
-rw-r--r--include/linux/hid.h7
-rw-r--r--include/linux/hid_bpf.h202
-rw-r--r--include/linux/huge_mm.h10
-rw-r--r--include/linux/hwmon.h2
-rw-r--r--include/linux/i2c.h25
-rw-r--r--include/linux/ieee80211.h290
-rw-r--r--include/linux/intel_tcc.h1
-rw-r--r--include/linux/intel_tpmi.h2
-rw-r--r--include/linux/io_uring_types.h18
-rw-r--r--include/linux/iommu.h2
-rw-r--r--include/linux/irq_sim.h17
-rw-r--r--include/linux/irqchip/arm-gic-common.h4
-rw-r--r--include/linux/irqchip/arm-gic-v3-prio.h52
-rw-r--r--include/linux/irqchip/arm-gic-v3.h2
-rw-r--r--include/linux/kcov.h49
-rw-r--r--include/linux/ksm.h17
-rw-r--r--include/linux/leds.h37
-rw-r--r--include/linux/libata.h12
-rw-r--r--include/linux/local_lock.h21
-rw-r--r--include/linux/local_lock_internal.h31
-rw-r--r--include/linux/lockdep.h8
-rw-r--r--include/linux/lsm_hook_defs.h3
-rw-r--r--include/linux/math64.h28
-rw-r--r--include/linux/mfd/88pm886.h69
-rw-r--r--include/linux/mfd/cs40l50.h137
-rw-r--r--include/linux/mfd/idt8a340_reg.h8
-rw-r--r--include/linux/mfd/lm3533.h5
-rw-r--r--include/linux/mfd/rohm-bd96801.h215
-rw-r--r--include/linux/mfd/rohm-generic.h1
-rw-r--r--include/linux/mfd/stm32-timers.h179
-rw-r--r--include/linux/mfd/syscon.h8
-rw-r--r--include/linux/mfd/tmio.h133
-rw-r--r--include/linux/mfd/tps65912.h1
-rw-r--r--include/linux/mii_timestamper.h2
-rw-r--r--include/linux/misc_cgroup.h12
-rw-r--r--include/linux/mlx5/device.h1
-rw-r--r--include/linux/mlx5/driver.h11
-rw-r--r--include/linux/mlx5/mlx5_ifc.h39
-rw-r--r--include/linux/mm.h14
-rw-r--r--include/linux/mm_types.h2
-rw-r--r--include/linux/mmzone.h12
-rw-r--r--include/linux/module.h16
-rw-r--r--include/linux/namei.h8
-rw-r--r--include/linux/net_tstamp.h9
-rw-r--r--include/linux/netdevice.h86
-rw-r--r--include/linux/netdevice_xmit.h13
-rw-r--r--include/linux/netfs.h18
-rw-r--r--include/linux/netlink.h1
-rw-r--r--include/linux/nsproxy.h13
-rw-r--r--include/linux/numa.h5
-rw-r--r--include/linux/nvme-fc-driver.h4
-rw-r--r--include/linux/nvme.h25
-rw-r--r--include/linux/objagg.h1
-rw-r--r--include/linux/page-flags.h21
-rw-r--r--include/linux/page_ref.h57
-rw-r--r--include/linux/pagemap.h45
-rw-r--r--include/linux/path.h9
-rw-r--r--include/linux/pci-pwrctl.h51
-rw-r--r--include/linux/pci.h2
-rw-r--r--include/linux/pci_ids.h4
-rw-r--r--include/linux/pcs/pcs-xpcs.h49
-rw-r--r--include/linux/perf/arm_pmuv3.h2
-rw-r--r--include/linux/perf_event.h11
-rw-r--r--include/linux/pgalloc_tag.h11
-rw-r--r--include/linux/phy.h27
-rw-r--r--include/linux/phylink.h6
-rw-r--r--include/linux/platform_data/cros_ec_commands.h83
-rw-r--r--include/linux/platform_data/cros_ec_proto.h6
-rw-r--r--include/linux/platform_data/lenovo-yoga-c630.h44
-rw-r--r--include/linux/platform_data/mmc-pxamci.h4
-rw-r--r--include/linux/platform_data/tmio.h62
-rw-r--r--include/linux/platform_data/x86/asus-wmi.h4
-rw-r--r--include/linux/platform_data/x86/soc.h12
-rw-r--r--include/linux/pm_domain.h17
-rw-r--r--include/linux/pm_opp.h6
-rw-r--r--include/linux/pnp.h6
-rw-r--r--include/linux/power_supply.h2
-rw-r--r--include/linux/preempt.h41
-rw-r--r--include/linux/printk.h3
-rw-r--r--include/linux/pse-pd/pse.h55
-rw-r--r--include/linux/pwm.h26
-rw-r--r--include/linux/pwrseq/consumer.h56
-rw-r--r--include/linux/pwrseq/provider.h75
-rw-r--r--include/linux/randomize_kstack.h18
-rw-r--r--include/linux/rcu_segcblist.h88
-rw-r--r--include/linux/rcupdate.h62
-rw-r--r--include/linux/regmap.h4
-rw-r--r--include/linux/regulator/consumer.h13
-rw-r--r--include/linux/resctrl.h88
-rw-r--r--include/linux/sched.h60
-rw-r--r--include/linux/security.h5
-rw-r--r--include/linux/serial_core.h21
-rw-r--r--include/linux/sfp.h6
-rw-r--r--include/linux/skbuff.h101
-rw-r--r--include/linux/skbuff_ref.h4
-rw-r--r--include/linux/soc/mediatek/mtk-cmdq.h42
-rw-r--r--include/linux/soc/qcom/llcc-qcom.h4
-rw-r--r--include/linux/soc/qcom/smem.h1
-rw-r--r--include/linux/soc/qcom/socinfo.h34
-rw-r--r--include/linux/soc/samsung/exynos-regs-pmu.h4
-rw-r--r--include/linux/socket.h5
-rw-r--r--include/linux/spi/spi.h22
-rw-r--r--include/linux/spi/spi_bitbang.h7
-rw-r--r--include/linux/spinlock.h14
-rw-r--r--include/linux/srcu.h35
-rw-r--r--include/linux/stat.h3
-rw-r--r--include/linux/stmmac.h8
-rw-r--r--include/linux/string.h2
-rw-r--r--include/linux/sunrpc/svc.h3
-rw-r--r--include/linux/swap.h3
-rw-r--r--include/linux/syscalls.h26
-rw-r--r--include/linux/sysctl.h2
-rw-r--r--include/linux/sysfs.h9
-rw-r--r--include/linux/t10-pi.h22
-rw-r--r--include/linux/task_work.h4
-rw-r--r--include/linux/thermal.h18
-rw-r--r--include/linux/tick.h1
-rw-r--r--include/linux/timekeeping.h6
-rw-r--r--include/linux/tpm.h98
-rw-r--r--include/linux/tsm.h59
-rw-r--r--include/linux/turris-omnia-mcu-interface.h249
-rw-r--r--include/linux/vfio.h1
-rw-r--r--include/linux/vfio_pci_core.h2
-rw-r--r--include/linux/wmi.h4
-rw-r--r--include/linux/wordpart.h8
-rw-r--r--include/linux/workqueue.h2
-rw-r--r--include/net/af_unix.h14
-rw-r--r--include/net/bluetooth/bluetooth.h4
-rw-r--r--include/net/bluetooth/hci.h11
-rw-r--r--include/net/bluetooth/hci_core.h43
-rw-r--r--include/net/bluetooth/hci_sock.h2
-rw-r--r--include/net/bluetooth/hci_sync.h28
-rw-r--r--include/net/bluetooth/rfcomm.h2
-rw-r--r--include/net/caif/caif_layer.h2
-rw-r--r--include/net/cfg80211.h245
-rw-r--r--include/net/devlink.h4
-rw-r--r--include/net/dsa.h10
-rw-r--r--include/net/dst_ops.h2
-rw-r--r--include/net/flow_dissector.h23
-rw-r--r--include/net/flow_offload.h35
-rw-r--r--include/net/ieee80211_radiotap.h1
-rw-r--r--include/net/inet_connection_sock.h2
-rw-r--r--include/net/inet_frag.h4
-rw-r--r--include/net/inet_timewait_sock.h11
-rw-r--r--include/net/ip.h3
-rw-r--r--include/net/ip6_route.h20
-rw-r--r--include/net/ip_fib.h28
-rw-r--r--include/net/ip_tunnels.h5
-rw-r--r--include/net/ipv6_stubs.h3
-rw-r--r--include/net/libeth/cache.h66
-rw-r--r--include/net/libeth/rx.h19
-rw-r--r--include/net/llc_c_st.h4
-rw-r--r--include/net/llc_s_st.h4
-rw-r--r--include/net/mac80211.h75
-rw-r--r--include/net/mana/gdma.h14
-rw-r--r--include/net/mana/mana.h12
-rw-r--r--include/net/netdev_queues.h2
-rw-r--r--include/net/netfilter/nf_flow_table.h15
-rw-r--r--include/net/netfilter/nf_tables.h227
-rw-r--r--include/net/netmem.h15
-rw-r--r--include/net/netns/ipv4.h9
-rw-r--r--include/net/netns/netfilter.h3
-rw-r--r--include/net/netns/xfrm.h1
-rw-r--r--include/net/page_pool/helpers.h91
-rw-r--r--include/net/page_pool/types.h42
-rw-r--r--include/net/psample.h13
-rw-r--r--include/net/regulatory.h2
-rw-r--r--include/net/request_sock.h49
-rw-r--r--include/net/rtnetlink.h1
-rw-r--r--include/net/sctp/stream_sched.h8
-rw-r--r--include/net/seg6.h7
-rw-r--r--include/net/seg6_hmac.h7
-rw-r--r--include/net/seg6_local.h1
-rw-r--r--include/net/sock.h20
-rw-r--r--include/net/tcp.h111
-rw-r--r--include/net/tcp_ao.h49
-rw-r--r--include/net/tcx.h13
-rw-r--r--include/net/xdp_sock.h14
-rw-r--r--include/net/xfrm.h46
-rw-r--r--include/scsi/scsi_devinfo.h4
-rw-r--r--include/scsi/scsi_proto.h1
-rw-r--r--include/scsi/scsi_transport_sas.h2
-rw-r--r--include/soc/mscc/ocelot.h2
-rw-r--r--include/sound/dmaengine_pcm.h1
-rw-r--r--include/sound/pcm.h2
-rw-r--r--include/trace/events/block.h41
-rw-r--r--include/trace/events/btrfs.h37
-rw-r--r--include/trace/events/cachefiles.h8
-rw-r--r--include/trace/events/erofs.h32
-rw-r--r--include/trace/events/firewire.h113
-rw-r--r--include/trace/events/fscache.h4
-rw-r--r--include/trace/events/page_pool.h30
-rw-r--r--include/trace/events/qdisc.h2
-rw-r--r--include/trace/events/scsi.h1
-rw-r--r--include/trace/events/skb.h11
-rw-r--r--include/trace/events/tcp.h317
-rw-r--r--include/uapi/asm-generic/unistd.h6
-rw-r--r--include/uapi/drm/panthor_drm.h5
-rw-r--r--include/uapi/drm/xe_drm.h8
-rw-r--r--include/uapi/linux/bpf.h17
-rw-r--r--include/uapi/linux/btrfs_tree.h22
-rw-r--r--include/uapi/linux/can/isotp.h2
-rw-r--r--include/uapi/linux/cn_proc.h3
-rw-r--r--include/uapi/linux/dlm.h2
-rw-r--r--include/uapi/linux/ethtool.h210
-rw-r--r--include/uapi/linux/ethtool_netlink.h53
-rw-r--r--include/uapi/linux/fs.h5
-rw-r--r--include/uapi/linux/in.h2
-rw-r--r--include/uapi/linux/input-event-codes.h2
-rw-r--r--include/uapi/linux/io_uring.h2
-rw-r--r--include/uapi/linux/kd.h96
-rw-r--r--include/uapi/linux/mount.h10
-rw-r--r--include/uapi/linux/netdev.h1
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h2
-rw-r--r--include/uapi/linux/nfsd_netlink.h10
-rw-r--r--include/uapi/linux/nl80211.h71
-rw-r--r--include/uapi/linux/nsfs.h10
-rw-r--r--include/uapi/linux/openvswitch.h31
-rw-r--r--include/uapi/linux/perf_event.h6
-rw-r--r--include/uapi/linux/pidfd.h14
-rw-r--r--include/uapi/linux/pkt_cls.h10
-rw-r--r--include/uapi/linux/psample.h11
-rw-r--r--include/uapi/linux/stat.h12
-rw-r--r--include/uapi/linux/tcp_metrics.h22
-rw-r--r--include/uapi/linux/trace_mmap.h2
-rw-r--r--include/uapi/linux/xfrm.h1
-rw-r--r--include/uapi/linux/zorro_ids.h3
-rw-r--r--include/uapi/misc/fastrpc.h3
-rw-r--r--include/vdso/datapage.h4
-rw-r--r--include/xen/events.h2
-rw-r--r--init/Kconfig2
-rw-r--r--io_uring/Makefile6
-rw-r--r--io_uring/advise.c16
-rw-r--r--io_uring/cancel.h4
-rw-r--r--io_uring/eventfd.c160
-rw-r--r--io_uring/eventfd.h8
-rw-r--r--io_uring/io-wq.c39
-rw-r--r--io_uring/io-wq.h2
-rw-r--r--io_uring/io_uring.c155
-rw-r--r--io_uring/io_uring.h11
-rw-r--r--io_uring/memmap.c5
-rw-r--r--io_uring/msg_ring.c122
-rw-r--r--io_uring/msg_ring.h1
-rw-r--r--io_uring/napi.c24
-rw-r--r--io_uring/net.c116
-rw-r--r--io_uring/net.h6
-rw-r--r--io_uring/opdef.c39
-rw-r--r--io_uring/opdef.h4
-rw-r--r--io_uring/register.c69
-rw-r--r--io_uring/rsrc.c63
-rw-r--r--io_uring/rw.c9
-rw-r--r--io_uring/statx.c3
-rw-r--r--io_uring/xattr.c4
-rw-r--r--ipc/mqueue.c3
-rw-r--r--kernel/auditfilter.c5
-rw-r--r--kernel/bpf/Makefile8
-rw-r--r--kernel/bpf/arena.c16
-rw-r--r--kernel/bpf/bpf_local_storage.c4
-rw-r--r--kernel/bpf/bpf_lsm.c1
-rw-r--r--kernel/bpf/bpf_struct_ops.c77
-rw-r--r--kernel/bpf/btf.c509
-rw-r--r--kernel/bpf/core.c15
-rw-r--r--kernel/bpf/cpumap.c35
-rw-r--r--kernel/bpf/crypto.c42
-rw-r--r--kernel/bpf/devmap.c60
-rw-r--r--kernel/bpf/helpers.c263
-rw-r--r--kernel/bpf/log.c6
-rw-r--r--kernel/bpf/ringbuf.c31
-rw-r--r--kernel/bpf/syscall.c45
-rw-r--r--kernel/bpf/task_iter.c9
-rw-r--r--kernel/bpf/verifier.c396
-rw-r--r--kernel/cgroup/cgroup.c36
-rw-r--r--kernel/cgroup/cpuset.c197
-rw-r--r--kernel/cgroup/misc.c80
-rw-r--r--kernel/cgroup/pids.c129
-rw-r--r--kernel/cgroup/rstat.c37
-rw-r--r--kernel/cpu.c30
-rw-r--r--kernel/dma/map_benchmark.c25
-rw-r--r--kernel/events/callchain.c2
-rw-r--r--kernel/events/core.c138
-rw-r--r--kernel/events/internal.h6
-rw-r--r--kernel/events/ring_buffer.c7
-rw-r--r--kernel/exit.c5
-rw-r--r--kernel/fork.c37
-rw-r--r--kernel/gcov/gcc_4_7.c4
-rwxr-xr-xkernel/gen_kheaders.sh9
-rw-r--r--kernel/irq/irq_sim.c60
-rw-r--r--kernel/irq/manage.c2
-rw-r--r--kernel/jump_label.c74
-rw-r--r--kernel/kallsyms.c23
-rw-r--r--kernel/kcov.c1
-rw-r--r--kernel/kcsan/kcsan_test.c1
-rw-r--r--kernel/locking/lockdep.c1
-rw-r--r--kernel/locking/locktorture.c1
-rw-r--r--kernel/locking/qspinlock.c2
-rw-r--r--kernel/locking/rwsem.c6
-rw-r--r--kernel/locking/spinlock.c8
-rw-r--r--kernel/module/kallsyms.c25
-rw-r--r--kernel/module/main.c5
-rw-r--r--kernel/pid_namespace.c18
-rw-r--r--kernel/power/swap.c2
-rw-r--r--kernel/printk/Makefile2
-rw-r--r--kernel/printk/conopt.c146
-rw-r--r--kernel/printk/console_cmdline.h6
-rw-r--r--kernel/printk/printk.c23
-rw-r--r--kernel/rcu/rcuscale.c1
-rw-r--r--kernel/rcu/rcutorture.c49
-rw-r--r--kernel/rcu/refscale.c1
-rw-r--r--kernel/rcu/srcutiny.c3
-rw-r--r--kernel/rcu/srcutree.c13
-rw-r--r--kernel/rcu/sync.c12
-rw-r--r--kernel/rcu/tasks.h26
-rw-r--r--kernel/rcu/tree.c92
-rw-r--r--kernel/rcu/tree.h2
-rw-r--r--kernel/rcu/tree_exp.h24
-rw-r--r--kernel/rcu/tree_nocb.h147
-rw-r--r--kernel/rcu/tree_plugin.h14
-rw-r--r--kernel/rcu/tree_stall.h4
-rw-r--r--kernel/scftorture.c3
-rw-r--r--kernel/sched/build_policy.c1
-rw-r--r--kernel/sched/clock.c4
-rw-r--r--kernel/sched/core.c1895
-rw-r--r--kernel/sched/core_sched.c2
-rw-r--r--kernel/sched/cputime.c14
-rw-r--r--kernel/sched/deadline.c15
-rw-r--r--kernel/sched/fair.c30
-rw-r--r--kernel/sched/idle.c12
-rw-r--r--kernel/sched/loadavg.c4
-rw-r--r--kernel/sched/pelt.c4
-rw-r--r--kernel/sched/psi.c81
-rw-r--r--kernel/sched/rt.c22
-rw-r--r--kernel/sched/sched.h435
-rw-r--r--kernel/sched/stats.h13
-rw-r--r--kernel/sched/syscalls.c1699
-rw-r--r--kernel/sched/topology.c12
-rw-r--r--kernel/sched/wait_bit.c4
-rw-r--r--kernel/seccomp.c30
-rw-r--r--kernel/signal.c8
-rw-r--r--kernel/smp.c5
-rw-r--r--kernel/sys_ni.c4
-rw-r--r--kernel/sysctl-test.c50
-rw-r--r--kernel/sysctl.c31
-rw-r--r--kernel/task_work.c58
-rw-r--r--kernel/time/clocksource-wdtest.c1
-rw-r--r--kernel/time/hrtimer.c2
-rw-r--r--kernel/time/test_udelay.c1
-rw-r--r--kernel/time/tick-broadcast.c23
-rw-r--r--kernel/time/tick-common.c42
-rw-r--r--kernel/time/tick-sched.c22
-rw-r--r--kernel/time/time_test.c1
-rw-r--r--kernel/time/timekeeping.c131
-rw-r--r--kernel/torture.c1
-rw-r--r--kernel/trace/Kconfig4
-rw-r--r--kernel/trace/bpf_trace.c27
-rw-r--r--kernel/trace/ftrace.c13
-rw-r--r--kernel/trace/trace_probe.c4
-rw-r--r--kernel/trace/trace_uprobe.c14
-rw-r--r--kernel/utsname_sysctl.c2
-rw-r--r--kernel/workqueue.c401
-rw-r--r--lib/Kconfig9
-rw-r--r--lib/Kconfig.debug21
-rw-r--r--lib/Makefile6
-rw-r--r--lib/alloc_tag.c17
-rwxr-xr-xlib/build_OID_registry4
-rw-r--r--lib/closure.c57
-rw-r--r--lib/debugobjects.c21
-rw-r--r--lib/dim/net_dim.c144
-rw-r--r--lib/fortify_kunit.c8
-rw-r--r--lib/kunit/Makefile2
-rw-r--r--lib/kunit/assert.c19
-rw-r--r--lib/kunit/assert_test.c388
-rw-r--r--lib/kunit/executor.c12
-rw-r--r--lib/kunit/executor_test.c2
-rw-r--r--lib/kunit/kunit-example-test.c1
-rw-r--r--lib/kunit/kunit-test.c1
-rw-r--r--lib/kunit/test.c1
-rw-r--r--lib/kunit/user_alloc.c117
-rw-r--r--lib/list-test.c7
-rw-r--r--lib/objagg.c20
-rw-r--r--lib/overflow_kunit.c20
-rw-r--r--lib/string_helpers_kunit.c1
-rw-r--r--lib/string_kunit.c1
-rw-r--r--lib/test_bpf.c11
-rw-r--r--lib/test_objagg.c2
-rw-r--r--lib/test_rhashtable.c1
-rw-r--r--lib/test_user_copy.c331
-rw-r--r--lib/usercopy_kunit.c335
-rw-r--r--lib/vdso/gettimeofday.c20
-rw-r--r--mm/compaction.c11
-rw-r--r--mm/damon/core.c23
-rw-r--r--mm/debug_vm_pgtable.c31
-rw-r--r--mm/filemap.c22
-rw-r--r--mm/gup.c291
-rw-r--r--mm/huge_memory.c38
-rw-r--r--mm/hugetlb.c86
-rw-r--r--mm/hugetlb_vmemmap.c16
-rw-r--r--mm/internal.h10
-rw-r--r--mm/kasan/common.c2
-rw-r--r--mm/khugepaged.c10
-rw-r--r--mm/kmsan/core.c15
-rw-r--r--mm/ksm.c17
-rw-r--r--mm/memblock.c24
-rw-r--r--mm/memcontrol.c16
-rw-r--r--mm/memory.c23
-rw-r--r--mm/mempool.c2
-rw-r--r--mm/migrate.c26
-rw-r--r--mm/mm_init.c43
-rw-r--r--mm/page-writeback.c32
-rw-r--r--mm/page_alloc.c61
-rw-r--r--mm/page_io.c2
-rw-r--r--mm/page_table_check.c11
-rw-r--r--mm/readahead.c8
-rw-r--r--mm/shmem.c38
-rw-r--r--mm/slub.c12
-rw-r--r--mm/util.c19
-rw-r--r--mm/vmalloc.c33
-rw-r--r--mm/vmscan.c2
-rw-r--r--mm/workingset.c14
-rw-r--r--net/8021q/vlan_dev.c2
-rw-r--r--net/9p/client.c2
-rw-r--r--net/Kconfig13
-rw-r--r--net/atm/ioctl.c4
-rw-r--r--net/ax25/af_ax25.c6
-rw-r--r--net/ax25/ax25_dev.c2
-rw-r--r--net/batman-adv/originator.c27
-rw-r--r--net/batman-adv/translation-table.c47
-rw-r--r--net/bluetooth/Makefile3
-rw-r--r--net/bluetooth/hci_conn.c16
-rw-r--r--net/bluetooth/hci_core.c141
-rw-r--r--net/bluetooth/hci_debugfs.c1
-rw-r--r--net/bluetooth/hci_event.c36
-rw-r--r--net/bluetooth/hci_request.c903
-rw-r--r--net/bluetooth/hci_request.h71
-rw-r--r--net/bluetooth/hci_sync.c118
-rw-r--r--net/bluetooth/iso.c8
-rw-r--r--net/bluetooth/l2cap_core.c15
-rw-r--r--net/bluetooth/l2cap_sock.c14
-rw-r--r--net/bluetooth/mgmt.c51
-rw-r--r--net/bluetooth/msft.c1
-rw-r--r--net/bluetooth/rfcomm/tty.c23
-rw-r--r--net/bpf/bpf_dummy_struct_ops.c4
-rw-r--r--net/bpf/test_run.c50
-rw-r--r--net/bridge/br_forward.c4
-rw-r--r--net/bridge/br_mst.c13
-rw-r--r--net/bridge/br_netfilter_hooks.c20
-rw-r--r--net/bridge/br_netlink_tunnel.c4
-rw-r--r--net/bridge/netfilter/nf_conntrack_bridge.c6
-rw-r--r--net/caif/cfpkt_skbuff.c7
-rw-r--r--net/can/Kconfig11
-rw-r--r--net/can/isotp.c11
-rw-r--r--net/can/j1939/main.c6
-rw-r--r--net/can/j1939/transport.c21
-rw-r--r--net/ceph/crush/mapper.c7
-rw-r--r--net/ceph/mon_client.c14
-rw-r--r--net/core/datagram.c67
-rw-r--r--net/core/dev.c207
-rw-r--r--net/core/dev.h22
-rw-r--r--net/core/dev_ioctl.c9
-rw-r--r--net/core/drop_monitor.c9
-rw-r--r--net/core/dst_cache.c2
-rw-r--r--net/core/filter.c215
-rw-r--r--net/core/flow_dissector.c62
-rw-r--r--net/core/gen_estimator.c2
-rw-r--r--net/core/lwt_bpf.c9
-rw-r--r--net/core/neighbour.c2
-rw-r--r--net/core/net-sysfs.c2
-rw-r--r--net/core/net_namespace.c9
-rw-r--r--net/core/netdev-genl.c16
-rw-r--r--net/core/page_pool.c316
-rw-r--r--net/core/rtnetlink.c69
-rw-r--r--net/core/skbuff.c76
-rw-r--r--net/core/skmsg.c3
-rw-r--r--net/core/sock.c38
-rw-r--r--net/core/sock_diag.c8
-rw-r--r--net/core/sock_map.c22
-rw-r--r--net/core/sysctl_net_core.c75
-rw-r--r--net/core/timestamping.c5
-rw-r--r--net/core/xdp.c8
-rw-r--r--net/dccp/ipv4.c7
-rw-r--r--net/dccp/ipv6.c7
-rw-r--r--net/dccp/minisocks.c9
-rw-r--r--net/devlink/dpipe.c2
-rw-r--r--net/dsa/Kconfig8
-rw-r--r--net/dsa/Makefile1
-rw-r--r--net/dsa/dsa.c2
-rw-r--r--net/dsa/port.c72
-rw-r--r--net/dsa/tag_8021q.c84
-rw-r--r--net/dsa/tag_8021q.h7
-rw-r--r--net/dsa/tag_ocelot_8021q.c2
-rw-r--r--net/dsa/tag_sja1105.c72
-rw-r--r--net/dsa/tag_vsc73xx_8021q.c68
-rw-r--r--net/dsa/user.c99
-rw-r--r--net/dsa/user.h2
-rw-r--r--net/ethernet/eth.c4
-rw-r--r--net/ethtool/Makefile2
-rw-r--r--net/ethtool/cabletest.c4
-rw-r--r--net/ethtool/channels.c6
-rw-r--r--net/ethtool/cmis.h124
-rw-r--r--net/ethtool/cmis_cdb.c602
-rw-r--r--net/ethtool/cmis_fw_update.c399
-rw-r--r--net/ethtool/coalesce.c274
-rw-r--r--net/ethtool/common.c76
-rw-r--r--net/ethtool/common.h4
-rw-r--r--net/ethtool/eeprom.c6
-rw-r--r--net/ethtool/ioctl.c197
-rw-r--r--net/ethtool/linkstate.c41
-rw-r--r--net/ethtool/module.c394
-rw-r--r--net/ethtool/module_fw.h75
-rw-r--r--net/ethtool/netlink.c56
-rw-r--r--net/ethtool/netlink.h16
-rw-r--r--net/ethtool/pse-pd.c123
-rw-r--r--net/ethtool/tsinfo.c12
-rw-r--r--net/ethtool/wol.c2
-rw-r--r--net/hsr/hsr_device.c63
-rw-r--r--net/hsr/hsr_forward.c41
-rw-r--r--net/hsr/hsr_framereg.c12
-rw-r--r--net/hsr/hsr_framereg.h2
-rw-r--r--net/hsr/hsr_main.h4
-rw-r--r--net/hsr/hsr_netlink.c1
-rw-r--r--net/ieee802154/6lowpan/reassembly.c2
-rw-r--r--net/ipv4/af_inet.c2
-rw-r--r--net/ipv4/bpf_tcp_ca.c6
-rw-r--r--net/ipv4/cipso_ipv4.c77
-rw-r--r--net/ipv4/devinet.c9
-rw-r--r--net/ipv4/esp4.c11
-rw-r--r--net/ipv4/esp4_offload.c24
-rw-r--r--net/ipv4/fib_frontend.c7
-rw-r--r--net/ipv4/fib_semantics.c18
-rw-r--r--net/ipv4/fou_core.c2
-rw-r--r--net/ipv4/inet_connection_sock.c75
-rw-r--r--net/ipv4/inet_diag.c2
-rw-r--r--net/ipv4/inet_fragment.c2
-rw-r--r--net/ipv4/inet_timewait_sock.c63
-rw-r--r--net/ipv4/ip_fragment.c2
-rw-r--r--net/ipv4/ip_output.c14
-rw-r--r--net/ipv4/ip_tunnel.c10
-rw-r--r--net/ipv4/metrics.c8
-rw-r--r--net/ipv4/netfilter/nf_tproxy_ipv4.c2
-rw-r--r--net/ipv4/ping.c2
-rw-r--r--net/ipv4/raw.c6
-rw-r--r--net/ipv4/route.c38
-rw-r--r--net/ipv4/syncookies.c2
-rw-r--r--net/ipv4/sysctl_net_ipv4.c80
-rw-r--r--net/ipv4/tcp.c121
-rw-r--r--net/ipv4/tcp_ao.c43
-rw-r--r--net/ipv4/tcp_cong.c20
-rw-r--r--net/ipv4/tcp_fastopen.c7
-rw-r--r--net/ipv4/tcp_input.c151
-rw-r--r--net/ipv4/tcp_ipv4.c67
-rw-r--r--net/ipv4/tcp_metrics.c1
-rw-r--r--net/ipv4/tcp_minisocks.c53
-rw-r--r--net/ipv4/tcp_output.c27
-rw-r--r--net/ipv4/tcp_sigpool.c17
-rw-r--r--net/ipv4/tcp_timer.c29
-rw-r--r--net/ipv4/udp.c19
-rw-r--r--net/ipv4/udp_offload.c8
-rw-r--r--net/ipv6/addrconf.c11
-rw-r--r--net/ipv6/af_inet6.c3
-rw-r--r--net/ipv6/esp6.c3
-rw-r--r--net/ipv6/esp6_offload.c7
-rw-r--r--net/ipv6/ila/ila_lwt.c7
-rw-r--r--net/ipv6/ioam6_iptunnel.c8
-rw-r--r--net/ipv6/ip6_fib.c11
-rw-r--r--net/ipv6/ip6_offload.c2
-rw-r--r--net/ipv6/ip6_output.c12
-rw-r--r--net/ipv6/ipv6_sockglue.c3
-rw-r--r--net/ipv6/ndisc.c2
-rw-r--r--net/ipv6/netfilter.c7
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c2
-rw-r--r--net/ipv6/raw.c10
-rw-r--r--net/ipv6/reassembly.c2
-rw-r--r--net/ipv6/route.c65
-rw-r--r--net/ipv6/rpl_iptunnel.c14
-rw-r--r--net/ipv6/seg6.c33
-rw-r--r--net/ipv6/seg6_iptunnel.c14
-rw-r--r--net/ipv6/seg6_local.c30
-rw-r--r--net/ipv6/syncookies.c2
-rw-r--r--net/ipv6/tcp_ipv6.c44
-rw-r--r--net/ipv6/udp.c14
-rw-r--r--net/ipv6/xfrm6_policy.c15
-rw-r--r--net/l2tp/l2tp_core.c507
-rw-r--r--net/l2tp/l2tp_core.h43
-rw-r--r--net/l2tp/l2tp_debugfs.c13
-rw-r--r--net/l2tp/l2tp_ip.c2
-rw-r--r--net/l2tp/l2tp_ip6.c2
-rw-r--r--net/l2tp/l2tp_netlink.c6
-rw-r--r--net/l2tp/l2tp_ppp.c6
-rw-r--r--net/llc/llc_c_st.c500
-rw-r--r--net/llc/llc_conn.c20
-rw-r--r--net/llc/llc_s_st.c26
-rw-r--r--net/llc/llc_sap.c12
-rw-r--r--net/mac80211/agg-tx.c4
-rw-r--r--net/mac80211/cfg.c175
-rw-r--r--net/mac80211/chan.c323
-rw-r--r--net/mac80211/debugfs.c1
-rw-r--r--net/mac80211/driver-ops.c23
-rw-r--r--net/mac80211/driver-ops.h14
-rw-r--r--net/mac80211/he.c10
-rw-r--r--net/mac80211/ht.c2
-rw-r--r--net/mac80211/ibss.c11
-rw-r--r--net/mac80211/ieee80211_i.h72
-rw-r--r--net/mac80211/iface.c95
-rw-r--r--net/mac80211/link.c20
-rw-r--r--net/mac80211/main.c69
-rw-r--r--net/mac80211/mesh.c3
-rw-r--r--net/mac80211/mesh_pathtbl.c13
-rw-r--r--net/mac80211/mlme.c1081
-rw-r--r--net/mac80211/offchannel.c35
-rw-r--r--net/mac80211/parse.c102
-rw-r--r--net/mac80211/pm.c4
-rw-r--r--net/mac80211/rx.c7
-rw-r--r--net/mac80211/scan.c31
-rw-r--r--net/mac80211/spectmgmt.c23
-rw-r--r--net/mac80211/sta_info.c4
-rw-r--r--net/mac80211/sta_info.h6
-rw-r--r--net/mac80211/tests/Makefile2
-rw-r--r--net/mac80211/tests/tpe.c284
-rw-r--r--net/mac80211/trace.h15
-rw-r--r--net/mac80211/tx.c6
-rw-r--r--net/mac80211/util.c193
-rw-r--r--net/mac80211/vht.c73
-rw-r--r--net/mac802154/main.c14
-rw-r--r--net/mac802154/tx.c8
-rw-r--r--net/mptcp/pm_netlink.c21
-rw-r--r--net/mptcp/protocol.c18
-rw-r--r--net/mptcp/protocol.h5
-rw-r--r--net/mptcp/sockopt.c2
-rw-r--r--net/mptcp/subflow.c2
-rw-r--r--net/ncsi/internal.h2
-rw-r--r--net/ncsi/ncsi-manage.c73
-rw-r--r--net/ncsi/ncsi-rsp.c4
-rw-r--r--net/netfilter/Makefile7
-rw-r--r--net/netfilter/core.c13
-rw-r--r--net/netfilter/ipset/ip_set_core.c92
-rw-r--r--net/netfilter/ipset/ip_set_list_set.c33
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c7
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_sctp.c4
-rw-r--r--net/netfilter/nf_conncount.c8
-rw-r--r--net/netfilter/nf_conntrack_bpf.c68
-rw-r--r--net/netfilter/nf_conntrack_standalone.c15
-rw-r--r--net/netfilter/nf_flow_table_bpf.c121
-rw-r--r--net/netfilter/nf_flow_table_inet.c2
-rw-r--r--net/netfilter/nf_flow_table_offload.c2
-rw-r--r--net/netfilter/nf_flow_table_xdp.c147
-rw-r--r--net/netfilter/nf_hooks_lwtunnel.c70
-rw-r--r--net/netfilter/nf_internals.h6
-rw-r--r--net/netfilter/nf_tables_api.c580
-rw-r--r--net/netfilter/nf_tables_offload.c40
-rw-r--r--net/netfilter/nf_tables_trace.c2
-rw-r--r--net/netfilter/nfnetlink_cttimeout.c3
-rw-r--r--net/netfilter/nfnetlink_queue.c4
-rw-r--r--net/netfilter/nft_fib.c8
-rw-r--r--net/netfilter/nft_hash.c3
-rw-r--r--net/netfilter/nft_immediate.c2
-rw-r--r--net/netfilter/nft_lookup.c3
-rw-r--r--net/netfilter/nft_meta.c3
-rw-r--r--net/netfilter/nft_payload.c99
-rw-r--r--net/netfilter/xt_recent.c8
-rw-r--r--net/netlink/af_netlink.c20
-rw-r--r--net/netrom/nr_timer.c3
-rw-r--r--net/openvswitch/Kconfig1
-rw-r--r--net/openvswitch/actions.c66
-rw-r--r--net/openvswitch/conntrack.c54
-rw-r--r--net/openvswitch/datapath.h3
-rw-r--r--net/openvswitch/flow_netlink.c32
-rw-r--r--net/openvswitch/vport-internal_dev.c10
-rw-r--r--net/openvswitch/vport.c1
-rw-r--r--net/packet/af_packet.c103
-rw-r--r--net/psample/psample.c21
-rw-r--r--net/qrtr/ns.c17
-rw-r--r--net/rds/tcp.c4
-rw-r--r--net/rds/tcp_recv.c4
-rw-r--r--net/rfkill/core.c8
-rw-r--r--net/sched/act_api.c5
-rw-r--r--net/sched/act_bpf.c4
-rw-r--r--net/sched/act_ct.c55
-rw-r--r--net/sched/act_sample.c12
-rw-r--r--net/sched/act_skbmod.c2
-rw-r--r--net/sched/cls_bpf.c4
-rw-r--r--net/sched/cls_flower.c132
-rw-r--r--net/sched/sch_generic.c2
-rw-r--r--net/sched/sch_ingress.c12
-rw-r--r--net/sched/sch_multiq.c2
-rw-r--r--net/sched/sch_taprio.c31
-rw-r--r--net/sctp/socket.c14
-rw-r--r--net/smc/Makefile2
-rw-r--r--net/smc/af_smc.c184
-rw-r--r--net/smc/smc.h38
-rw-r--r--net/smc/smc_core.c7
-rw-r--r--net/smc/smc_inet.c159
-rw-r--r--net/smc/smc_inet.h22
-rw-r--r--net/socket.c48
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c4
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_keys.c2
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c2
-rw-r--r--net/sunrpc/svc.c116
-rw-r--r--net/sunrpc/svc_xprt.c1
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_transport.c83
-rw-r--r--net/sunrpc/xprtsock.c7
-rw-r--r--net/sysctl_net.c11
-rw-r--r--net/tipc/core.h1
-rw-r--r--net/tipc/link.c27
-rw-r--r--net/tipc/node.c1
-rw-r--r--net/tls/tls_device.c11
-rw-r--r--net/tls/tls_main.c9
-rw-r--r--net/unix/af_unix.c332
-rw-r--r--net/unix/diag.c59
-rw-r--r--net/unix/garbage.c17
-rw-r--r--net/wireless/chan.c120
-rw-r--r--net/wireless/core.c17
-rw-r--r--net/wireless/core.h7
-rw-r--r--net/wireless/ibss.c5
-rw-r--r--net/wireless/mesh.c5
-rw-r--r--net/wireless/nl80211.c348
-rw-r--r--net/wireless/nl80211.h4
-rw-r--r--net/wireless/pmsr.c18
-rw-r--r--net/wireless/rdev-ops.h74
-rw-r--r--net/wireless/reg.c2
-rw-r--r--net/wireless/scan.c173
-rw-r--r--net/wireless/sme.c4
-rw-r--r--net/wireless/sysfs.c4
-rw-r--r--net/wireless/tests/chan.c22
-rw-r--r--net/wireless/trace.h227
-rw-r--r--net/wireless/util.c83
-rw-r--r--net/xdp/xsk.c30
-rw-r--r--net/xfrm/Makefile3
-rw-r--r--net/xfrm/xfrm_compat.c6
-rw-r--r--net/xfrm/xfrm_device.c6
-rw-r--r--net/xfrm/xfrm_input.c11
-rw-r--r--net/xfrm/xfrm_nat_keepalive.c292
-rw-r--r--net/xfrm/xfrm_policy.c29
-rw-r--r--net/xfrm/xfrm_state.c82
-rw-r--r--net/xfrm/xfrm_user.c16
-rw-r--r--rust/bindings/bindings_helper.h5
-rw-r--r--rust/helpers.c16
-rw-r--r--rust/kernel/alloc/vec_ext.rs7
-rw-r--r--rust/kernel/block.rs5
-rw-r--r--rust/kernel/block/mq.rs98
-rw-r--r--rust/kernel/block/mq/gen_disk.rs198
-rw-r--r--rust/kernel/block/mq/operations.rs245
-rw-r--r--rust/kernel/block/mq/raw_writer.rs55
-rw-r--r--rust/kernel/block/mq/request.rs253
-rw-r--r--rust/kernel/block/mq/tag_set.rs86
-rw-r--r--rust/kernel/error.rs6
-rw-r--r--rust/kernel/lib.rs2
-rw-r--r--samples/bpf/cpustat_kern.c3
-rw-r--r--samples/hid/Makefile5
-rw-r--r--samples/hid/hid_bpf_attach.bpf.c18
-rw-r--r--samples/hid/hid_bpf_attach.h14
-rw-r--r--samples/hid/hid_mouse.bpf.c26
-rw-r--r--samples/hid/hid_mouse.c39
-rw-r--r--samples/hid/hid_surface_dial.bpf.c10
-rw-r--r--samples/hid/hid_surface_dial.c53
-rw-r--r--scripts/Kconfig.include3
-rw-r--r--scripts/Makefile.asm-generic58
-rw-r--r--scripts/Makefile.asm-headers98
-rw-r--r--scripts/Makefile.btf11
-rw-r--r--scripts/Makefile.dtbinst2
-rw-r--r--scripts/Makefile.host2
-rw-r--r--scripts/Makefile.modfinal2
-rw-r--r--scripts/Makefile.package2
-rw-r--r--scripts/atomic/kerneldoc/sub_and_test2
-rw-r--r--scripts/const_structs.checkpatch10
-rw-r--r--scripts/dtc/Makefile2
-rwxr-xr-xscripts/faddr2line110
-rw-r--r--scripts/gcc-plugins/gcc-common.h5
-rw-r--r--scripts/gdb/linux/Makefile2
-rw-r--r--scripts/kconfig/confdata.c13
-rw-r--r--scripts/kconfig/expr.c31
-rw-r--r--scripts/kconfig/expr.h6
-rw-r--r--scripts/kconfig/gconf.c3
-rw-r--r--scripts/kconfig/menu.c2
-rw-r--r--scripts/kconfig/symbol.c6
-rwxr-xr-xscripts/ld-version.sh8
-rwxr-xr-xscripts/link-vmlinux.sh9
-rwxr-xr-xscripts/make_fit.py3
-rwxr-xr-xscripts/mksysmap28
-rw-r--r--scripts/mod/modpost.c5
-rw-r--r--scripts/package/kernel.spec9
-rw-r--r--scripts/syscall.tbl404
-rwxr-xr-xscripts/syscalltbl.sh18
-rw-r--r--security/Kconfig.hardening15
-rw-r--r--security/apparmor/audit.c6
-rw-r--r--security/apparmor/include/audit.h2
-rw-r--r--security/integrity/ima/ima.h2
-rw-r--r--security/integrity/ima/ima_fs.c3
-rw-r--r--security/integrity/ima/ima_policy.c15
-rw-r--r--security/keys/encrypted-keys/encrypted.c1
-rw-r--r--security/keys/keyctl.c2
-rw-r--r--security/keys/trusted-keys/trusted_core.c1
-rw-r--r--security/landlock/fs.c13
-rw-r--r--security/security.c76
-rw-r--r--security/selinux/hooks.c38
-rw-r--r--security/selinux/include/audit.h4
-rw-r--r--security/selinux/ss/ebitmap.h2
-rw-r--r--security/selinux/ss/services.c5
-rw-r--r--security/smack/smack_lsm.c52
-rw-r--r--security/tomoyo/Kconfig2
-rw-r--r--security/tomoyo/common.c2
-rw-r--r--security/yama/yama_lsm.c1
-rw-r--r--sound/core/init.c9
-rw-r--r--sound/core/jack.c21
-rw-r--r--sound/core/pcm_dmaengine.c22
-rw-r--r--sound/core/pcm_native.c2
-rw-r--r--sound/core/seq/seq_ump_convert.c60
-rw-r--r--sound/core/ump.c15
-rw-r--r--sound/core/ump_convert.c1
-rw-r--r--sound/hda/intel-dsp-config.c10
-rw-r--r--sound/oss/dmasound/dmasound_core.c1
-rw-r--r--sound/pci/hda/Kconfig2
-rw-r--r--sound/pci/hda/cs35l41_hda.c6
-rw-r--r--sound/pci/hda/cs35l41_hda_property.c8
-rw-r--r--sound/pci/hda/cs35l56_hda.c9
-rw-r--r--sound/pci/hda/hda_controller.c3
-rw-r--r--sound/pci/hda/patch_realtek.c68
-rw-r--r--sound/pci/hda/tas2781_hda_i2c.c4
-rw-r--r--sound/soc/amd/acp/acp-i2s.c8
-rw-r--r--sound/soc/amd/acp/acp-pci.c12
-rw-r--r--sound/soc/amd/yc/acp6x-mach.c7
-rw-r--r--sound/soc/atmel/atmel-classd.c7
-rw-r--r--sound/soc/codecs/Kconfig11
-rw-r--r--sound/soc/codecs/Makefile2
-rw-r--r--sound/soc/codecs/cs35l56-shared.c4
-rw-r--r--sound/soc/codecs/cs40l50-codec.c307
-rw-r--r--sound/soc/codecs/cs42l43-jack.c4
-rw-r--r--sound/soc/codecs/cs42l43.c5
-rw-r--r--sound/soc/codecs/es8326.c8
-rw-r--r--sound/soc/codecs/rt5645.c24
-rw-r--r--sound/soc/codecs/rt5645.h6
-rw-r--r--sound/soc/codecs/rt711-sdw.c2
-rw-r--r--sound/soc/codecs/rt722-sdca-sdw.c4
-rw-r--r--sound/soc/codecs/wm_adsp.c1
-rw-r--r--sound/soc/fsl/fsl-asoc-card.c3
-rw-r--r--sound/soc/fsl/imx-pcm-dma.c1
-rw-r--r--sound/soc/intel/avs/topology.c19
-rw-r--r--sound/soc/intel/boards/Kconfig2
-rw-r--r--sound/soc/intel/boards/bytcr_rt5640.c11
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-mtl-match.c2
-rw-r--r--sound/soc/mediatek/mt8183/mt8183-da7219-max98357.c10
-rw-r--r--sound/soc/mediatek/mt8195/mt8195-mt6359.c1
-rw-r--r--sound/soc/mxs/mxs-pcm.c1
-rw-r--r--sound/soc/qcom/qdsp6/q6apm-lpass-dais.c32
-rw-r--r--sound/soc/qcom/sdw.c1
-rw-r--r--sound/soc/rockchip/rockchip_i2s_tdm.c13
-rw-r--r--sound/soc/soc-generic-dmaengine-pcm.c8
-rw-r--r--sound/soc/soc-topology.c35
-rw-r--r--sound/soc/sof/amd/acp-common.c4
-rw-r--r--sound/soc/sof/amd/acp.c2
-rw-r--r--sound/soc/sof/amd/acp63.c4
-rw-r--r--sound/soc/sof/amd/pci-acp63.c1
-rw-r--r--sound/soc/sof/amd/pci-rmb.c1
-rw-r--r--sound/soc/sof/amd/pci-rn.c1
-rw-r--r--sound/soc/sof/amd/pci-vangogh.c1
-rw-r--r--sound/soc/sof/amd/rembrandt.c4
-rw-r--r--sound/soc/sof/amd/renoir.c4
-rw-r--r--sound/soc/sof/amd/vangogh.c4
-rw-r--r--sound/soc/sof/core.c2
-rw-r--r--sound/soc/sof/imx/imx-common.c1
-rw-r--r--sound/soc/sof/imx/imx8.c3
-rw-r--r--sound/soc/sof/imx/imx8m.c3
-rw-r--r--sound/soc/sof/imx/imx8ulp.c3
-rw-r--r--sound/soc/sof/intel/atom.c1
-rw-r--r--sound/soc/sof/intel/bdw.c1
-rw-r--r--sound/soc/sof/intel/byt.c1
-rw-r--r--sound/soc/sof/intel/hda-codec.c1
-rw-r--r--sound/soc/sof/intel/hda-ctrl.c1
-rw-r--r--sound/soc/sof/intel/hda-dai.c18
-rw-r--r--sound/soc/sof/intel/hda-mlink.c1
-rw-r--r--sound/soc/sof/intel/hda-pcm.c6
-rw-r--r--sound/soc/sof/intel/hda.c1
-rw-r--r--sound/soc/sof/intel/pci-apl.c1
-rw-r--r--sound/soc/sof/intel/pci-cnl.c1
-rw-r--r--sound/soc/sof/intel/pci-icl.c1
-rw-r--r--sound/soc/sof/intel/pci-lnl.c1
-rw-r--r--sound/soc/sof/intel/pci-mtl.c1
-rw-r--r--sound/soc/sof/intel/pci-skl.c1
-rw-r--r--sound/soc/sof/intel/pci-tgl.c1
-rw-r--r--sound/soc/sof/intel/pci-tng.c1
-rw-r--r--sound/soc/sof/ipc4-pcm.c12
-rw-r--r--sound/soc/sof/ipc4-topology.c163
-rw-r--r--sound/soc/sof/ipc4-topology.h6
-rw-r--r--sound/soc/sof/mediatek/mt8186/mt8186.c3
-rw-r--r--sound/soc/sof/mediatek/mt8195/mt8195.c3
-rw-r--r--sound/soc/sof/mediatek/mtk-adsp-common.c1
-rw-r--r--sound/soc/sof/nocodec.c2
-rw-r--r--sound/soc/sof/sof-acpi-dev.c1
-rw-r--r--sound/soc/sof/sof-audio.c2
-rw-r--r--sound/soc/sof/sof-client-ipc-flood-test.c2
-rw-r--r--sound/soc/sof/sof-client-ipc-kernel-injector.c2
-rw-r--r--sound/soc/sof/sof-client-ipc-msg-injector.c2
-rw-r--r--sound/soc/sof/sof-client-probes.c2
-rw-r--r--sound/soc/sof/sof-of-dev.c1
-rw-r--r--sound/soc/sof/sof-pci-dev.c1
-rw-r--r--sound/soc/sof/sof-utils.c1
-rw-r--r--sound/soc/sof/stream-ipc.c2
-rw-r--r--sound/soc/sof/xtensa/core.c2
-rw-r--r--sound/soc/ti/davinci-mcasp.c9
-rw-r--r--sound/soc/ti/omap-hdmi.c6
-rw-r--r--tools/arch/arm64/include/asm/cputype.h6
-rw-r--r--tools/arch/arm64/include/uapi/asm/unistd.h1
-rw-r--r--tools/arch/loongarch/include/uapi/asm/unistd.h1
-rw-r--r--tools/arch/x86/include/asm/msr-index.h9
-rw-r--r--tools/arch/x86/include/uapi/asm/kvm.h22
-rw-r--r--tools/arch/x86/kcpuid/Makefile4
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-btf.rst6
-rw-r--r--tools/bpf/bpftool/Makefile3
-rw-r--r--tools/bpf/bpftool/bash-completion/bpftool3
-rw-r--r--tools/bpf/bpftool/btf.c193
-rw-r--r--tools/bpf/bpftool/cgroup.c40
-rw-r--r--tools/bpf/bpftool/common.c2
-rw-r--r--tools/bpf/bpftool/gen.c94
-rw-r--r--tools/bpf/bpftool/prog.c4
-rw-r--r--tools/bpf/bpftool/skeleton/pid_iter.bpf.c7
-rw-r--r--tools/bpf/bpftool/skeleton/profiler.bpf.c14
-rw-r--r--tools/bpf/resolve_btfids/main.c10
-rwxr-xr-xtools/gpio/gpio-sloppy-logic-analyzer.sh246
-rw-r--r--tools/hv/Makefile1
-rw-r--r--tools/include/nolibc/stdint.h19
-rw-r--r--tools/include/nolibc/stdio.h10
-rw-r--r--tools/include/nolibc/stdlib.h109
-rw-r--r--tools/include/uapi/asm-generic/unistd.h9
-rw-r--r--tools/include/uapi/drm/i915_drm.h31
-rw-r--r--tools/include/uapi/linux/bpf.h17
-rw-r--r--tools/include/uapi/linux/kvm.h4
-rw-r--r--tools/include/uapi/linux/netdev.h1
-rw-r--r--tools/include/uapi/linux/stat.h4
-rw-r--r--tools/lib/bpf/Build2
-rw-r--r--tools/lib/bpf/btf.c696
-rw-r--r--tools/lib/bpf/btf.h36
-rw-r--r--tools/lib/bpf/btf_iter.c177
-rw-r--r--tools/lib/bpf/btf_relocate.c519
-rw-r--r--tools/lib/bpf/features.c32
-rw-r--r--tools/lib/bpf/libbpf.c136
-rw-r--r--tools/lib/bpf/libbpf.h23
-rw-r--r--tools/lib/bpf/libbpf.map4
-rw-r--r--tools/lib/bpf/libbpf_internal.h39
-rw-r--r--tools/lib/bpf/linker.c69
-rw-r--r--tools/memory-model/Documentation/README4
-rw-r--r--tools/memory-model/Documentation/access-marking.txt34
-rw-r--r--tools/memory-model/lock.cat62
-rw-r--r--tools/net/ynl/Makefile6
-rw-r--r--tools/net/ynl/Makefile.deps4
-rw-r--r--tools/net/ynl/lib/Makefile4
-rw-r--r--tools/net/ynl/lib/ynl-priv.h30
-rw-r--r--tools/net/ynl/lib/ynl.c10
-rw-r--r--tools/net/ynl/lib/ynl.h2
-rw-r--r--tools/net/ynl/lib/ynl.py2
-rwxr-xr-xtools/net/ynl/ynl-gen-c.py58
-rwxr-xr-xtools/net/ynl/ynl-gen-rst.py13
-rw-r--r--tools/objtool/Documentation/objtool.txt19
-rw-r--r--tools/objtool/arch/x86/decode.c8
-rw-r--r--tools/objtool/arch/x86/special.c23
-rw-r--r--tools/objtool/builtin-check.c4
-rw-r--r--tools/objtool/noreturns.h4
-rw-r--r--tools/objtool/special.c16
-rw-r--r--tools/perf/Makefile.perf1
-rw-r--r--tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl1
-rw-r--r--tools/perf/arch/powerpc/entry/syscalls/syscall.tbl1
-rw-r--r--tools/perf/arch/s390/entry/syscalls/syscall.tbl1
-rw-r--r--tools/perf/arch/x86/entry/syscalls/syscall_64.tbl3
-rw-r--r--tools/perf/builtin-record.c6
-rw-r--r--tools/perf/builtin-trace.c2
-rw-r--r--tools/perf/trace/beauty/arch/x86/include/asm/irq_vectors.h8
-rw-r--r--tools/perf/trace/beauty/include/linux/socket.h3
-rw-r--r--tools/perf/trace/beauty/include/uapi/linux/fcntl.h14
-rw-r--r--tools/perf/trace/beauty/include/uapi/linux/prctl.h22
-rw-r--r--tools/perf/trace/beauty/include/uapi/linux/stat.h4
-rw-r--r--tools/perf/util/comm.c29
-rw-r--r--tools/perf/util/dsos.c26
-rw-r--r--tools/power/cpupower/Makefile47
-rw-r--r--tools/power/cpupower/README160
-rw-r--r--tools/power/cpupower/bench/Makefile5
-rw-r--r--tools/power/cpupower/man/cpupower-monitor.113
-rw-r--r--tools/power/cpupower/utils/helpers/amd.c26
-rw-r--r--tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c2
-rwxr-xr-xtools/power/pm-graph/bootgraph.py16
-rwxr-xr-xtools/power/pm-graph/sleepgraph.py1098
-rw-r--r--tools/power/x86/intel-speed-select/isst-config.c2
-rw-r--r--tools/power/x86/intel-speed-select/isst-core.c6
-rw-r--r--tools/power/x86/turbostat/Makefile6
-rw-r--r--tools/power/x86/turbostat/turbostat.c20
-rwxr-xr-xtools/rcu/rcu-updaters.sh52
-rw-r--r--tools/testing/cxl/test/cxl.c4
-rw-r--r--tools/testing/cxl/test/mem.c1
-rw-r--r--tools/testing/selftests/Makefile1
-rw-r--r--tools/testing/selftests/alsa/Makefile2
-rw-r--r--tools/testing/selftests/arm64/abi/ptrace.c2
-rw-r--r--tools/testing/selftests/arm64/fp/.gitignore1
-rw-r--r--tools/testing/selftests/arm64/fp/Makefile1
-rw-r--r--tools/testing/selftests/arm64/fp/fp-stress.c26
-rw-r--r--tools/testing/selftests/arm64/fp/kernel-test.c324
-rw-r--r--tools/testing/selftests/arm64/tags/Makefile1
-rwxr-xr-xtools/testing/selftests/arm64/tags/run_tags_test.sh12
-rw-r--r--tools/testing/selftests/arm64/tags/tags_test.c10
-rw-r--r--tools/testing/selftests/bpf/DENYLIST.aarch641
-rw-r--r--tools/testing/selftests/bpf/DENYLIST.s390x4
-rw-r--r--tools/testing/selftests/bpf/Makefile2
-rw-r--r--tools/testing/selftests/bpf/bpf_arena_common.h2
-rw-r--r--tools/testing/selftests/bpf/bpf_experimental.h32
-rw-r--r--tools/testing/selftests/bpf/bpf_kfuncs.h2
-rw-r--r--tools/testing/selftests/bpf/bpf_test_no_cfi/bpf_test_no_cfi.c4
-rw-r--r--tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c77
-rw-r--r--tools/testing/selftests/bpf/bpf_testmod/bpf_testmod_kfunc.h10
-rw-r--r--tools/testing/selftests/bpf/config17
-rw-r--r--tools/testing/selftests/bpf/network_helpers.c130
-rw-r--r--tools/testing/selftests/bpf/network_helpers.h24
-rw-r--r--tools/testing/selftests/bpf/prog_tests/arena_atomics.c18
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_cookie.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_nf.c7
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c247
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c6
-rw-r--r--tools/testing/selftests/bpf/prog_tests/btf_distill.c552
-rw-r--r--tools/testing/selftests/bpf/prog_tests/btf_field_iter.c161
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cgroup_v1v2.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cpumask.c5
-rw-r--r--tools/testing/selftests/bpf/prog_tests/ctx_rewrite.c10
-rw-r--r--tools/testing/selftests/bpf/prog_tests/fexit_stress.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/find_vma.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/ip_check_defrag.c14
-rw-r--r--tools/testing/selftests/bpf/prog_tests/kfunc_call.c1
-rw-r--r--tools/testing/selftests/bpf/prog_tests/kfunc_param_nullable.c11
-rw-r--r--tools/testing/selftests/bpf/prog_tests/linked_list.c12
-rw-r--r--tools/testing/selftests/bpf/prog_tests/mptcp.c7
-rw-r--r--tools/testing/selftests/bpf/prog_tests/rbtree.c47
-rw-r--r--tools/testing/selftests/bpf/prog_tests/ringbuf.c56
-rw-r--r--tools/testing/selftests/bpf/prog_tests/send_signal.c3
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sk_lookup.c82
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sockopt_inherit.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tc_links.c61
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tc_netkit.c94
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tc_redirect.c3
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_skb_pkt_end.c1
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_struct_ops_module.c57
-rw-r--r--tools/testing/selftests/bpf/prog_tests/timer_lockup.c91
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tracing_struct.c44
-rw-r--r--tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c134
-rw-r--r--tools/testing/selftests/bpf/prog_tests/verifier.c6
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_flowtable.c168
-rw-r--r--tools/testing/selftests/bpf/progs/arena_atomics.c143
-rw-r--r--tools/testing/selftests/bpf/progs/arena_htab.c17
-rw-r--r--tools/testing/selftests/bpf/progs/arena_list.c1
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_dctcp.c36
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_bpf_array_map.c6
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_bpf_percpu_array_map.c6
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_misc.h15
-rw-r--r--tools/testing/selftests/bpf/progs/cpumask_success.c171
-rw-r--r--tools/testing/selftests/bpf/progs/crypto_bench.c10
-rw-r--r--tools/testing/selftests/bpf/progs/crypto_sanity.c16
-rw-r--r--tools/testing/selftests/bpf/progs/dynptr_fail.c30
-rw-r--r--tools/testing/selftests/bpf/progs/get_func_ip_test.c7
-rw-r--r--tools/testing/selftests/bpf/progs/ip_check_defrag.c10
-rw-r--r--tools/testing/selftests/bpf/progs/iters.c2
-rw-r--r--tools/testing/selftests/bpf/progs/kfunc_call_test.c37
-rw-r--r--tools/testing/selftests/bpf/progs/kprobe_multi_session.c3
-rw-r--r--tools/testing/selftests/bpf/progs/kprobe_multi_session_cookie.c2
-rw-r--r--tools/testing/selftests/bpf/progs/linked_list.c47
-rw-r--r--tools/testing/selftests/bpf/progs/map_percpu_stats.c2
-rw-r--r--tools/testing/selftests/bpf/progs/nested_trust_common.h2
-rw-r--r--tools/testing/selftests/bpf/progs/nested_trust_failure.c8
-rw-r--r--tools/testing/selftests/bpf/progs/nested_trust_success.c8
-rw-r--r--tools/testing/selftests/bpf/progs/netif_receive_skb.c5
-rw-r--r--tools/testing/selftests/bpf/progs/profiler.inc.h5
-rw-r--r--tools/testing/selftests/bpf/progs/rbtree.c77
-rw-r--r--tools/testing/selftests/bpf/progs/rbtree_fail.c2
-rw-r--r--tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c4
-rw-r--r--tools/testing/selftests/bpf/progs/setget_sockopt.c5
-rw-r--r--tools/testing/selftests/bpf/progs/skb_pkt_end.c11
-rw-r--r--tools/testing/selftests/bpf/progs/struct_ops_detach.c10
-rw-r--r--tools/testing/selftests/bpf/progs/test_bpf_ma.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_bpf_nf.c109
-rw-r--r--tools/testing/selftests/bpf/progs/test_bpf_nf_fail.c1
-rw-r--r--tools/testing/selftests/bpf/progs/test_kfunc_dynptr_param.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_kfunc_param_nullable.c43
-rw-r--r--tools/testing/selftests/bpf/progs/test_ringbuf_write.c46
-rw-r--r--tools/testing/selftests/bpf/progs/test_sk_storage_tracing.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_sockmap_kern.h20
-rw-r--r--tools/testing/selftests/bpf/progs/test_sysctl_loop1.c5
-rw-r--r--tools/testing/selftests/bpf/progs/test_sysctl_loop2.c5
-rw-r--r--tools/testing/selftests/bpf/progs/test_sysctl_prog.c5
-rw-r--r--tools/testing/selftests/bpf/progs/test_tc_dtime.c39
-rw-r--r--tools/testing/selftests/bpf/progs/test_tc_link.c35
-rw-r--r--tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.c1
-rw-r--r--tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.h2
-rw-r--r--tools/testing/selftests/bpf/progs/timer_lockup.c87
-rw-r--r--tools/testing/selftests/bpf/progs/tracing_struct.c54
-rw-r--r--tools/testing/selftests/bpf/progs/tracing_struct_many_args.c95
-rw-r--r--tools/testing/selftests/bpf/progs/uprobe_multi.c50
-rw-r--r--tools/testing/selftests/bpf/progs/user_ringbuf_fail.c22
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_arena.c1
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_arena_large.c1
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_bits_iter.c153
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_iterating_callbacks.c382
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_movsx.c63
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_netfilter_ctx.c6
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_or_jmp32_k.c41
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_sockmap_mutate.c187
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_subprog_precision.c2
-rw-r--r--tools/testing/selftests/bpf/progs/wq.c19
-rw-r--r--tools/testing/selftests/bpf/progs/wq_failures.c4
-rw-r--r--tools/testing/selftests/bpf/progs/xdp_flowtable.c148
-rw-r--r--tools/testing/selftests/bpf/progs/xdp_synproxy_kern.c1
-rw-r--r--tools/testing/selftests/bpf/progs/xfrm_info.c1
-rw-r--r--tools/testing/selftests/bpf/test_loader.c115
-rw-r--r--tools/testing/selftests/bpf/test_progs.h9
-rw-r--r--tools/testing/selftests/bpf/test_sockmap.c137
-rw-r--r--tools/testing/selftests/bpf/test_tcp_check_syncookie_user.c33
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c5
-rw-r--r--tools/testing/selftests/bpf/trace_helpers.c13
-rw-r--r--tools/testing/selftests/bpf/verifier/calls.c15
-rw-r--r--tools/testing/selftests/bpf/verifier/precise.c22
-rw-r--r--tools/testing/selftests/bpf/xskxceiver.c40
-rw-r--r--tools/testing/selftests/bpf/xskxceiver.h2
-rw-r--r--tools/testing/selftests/breakpoints/step_after_suspend_test.c1
-rw-r--r--tools/testing/selftests/cachestat/test_cachestat.c1
-rw-r--r--tools/testing/selftests/cgroup/.gitignore11
-rw-r--r--tools/testing/selftests/cgroup/Makefile25
-rwxr-xr-xtools/testing/selftests/cgroup/test_cpuset_prs.sh75
-rw-r--r--tools/testing/selftests/cgroup/test_pids.c178
-rw-r--r--tools/testing/selftests/dma/dma_map_benchmark.c1
-rw-r--r--tools/testing/selftests/drivers/net/hw/Makefile1
-rwxr-xr-xtools/testing/selftests/drivers/net/hw/rss_ctx.py522
-rw-r--r--tools/testing/selftests/drivers/net/lib/py/env.py19
-rw-r--r--tools/testing/selftests/drivers/net/lib/py/load.py37
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/mirror_gre.sh71
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/mirror_gre_scale.sh18
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh55
-rw-r--r--tools/testing/selftests/drivers/net/virtio_net/config8
-rw-r--r--tools/testing/selftests/drivers/platform/x86/intel/ifs/Makefile6
-rwxr-xr-xtools/testing/selftests/drivers/platform/x86/intel/ifs/test_ifs.sh494
-rw-r--r--tools/testing/selftests/exec/Makefile19
-rw-r--r--tools/testing/selftests/exec/load_address.c67
-rw-r--r--tools/testing/selftests/fchmodat2/Makefile11
-rw-r--r--tools/testing/selftests/filesystems/overlayfs/dev_in_maps.c1
-rw-r--r--tools/testing/selftests/filesystems/statmount/Makefile2
-rw-r--r--tools/testing/selftests/filesystems/statmount/statmount.h46
-rw-r--r--tools/testing/selftests/filesystems/statmount/statmount_test.c156
-rw-r--r--tools/testing/selftests/filesystems/statmount/statmount_test_ns.c364
-rw-r--r--tools/testing/selftests/ftrace/config26
-rw-r--r--tools/testing/selftests/ftrace/test.d/dynevent/test_duplicates.tc2
-rw-r--r--tools/testing/selftests/ftrace/test.d/filter/event-filter-function.tc20
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/kprobe_eventname.tc3
-rw-r--r--tools/testing/selftests/futex/Makefile2
-rw-r--r--tools/testing/selftests/futex/functional/Makefile2
-rw-r--r--tools/testing/selftests/futex/functional/futex_requeue_pi.c2
-rw-r--r--tools/testing/selftests/hid/hid_bpf.c426
-rw-r--r--tools/testing/selftests/hid/progs/hid.c392
-rw-r--r--tools/testing/selftests/hid/progs/hid_bpf_helpers.h46
-rw-r--r--tools/testing/selftests/kselftest.h8
-rw-r--r--tools/testing/selftests/kselftest_harness.h43
-rw-r--r--tools/testing/selftests/kvm/Makefile1
-rw-r--r--tools/testing/selftests/kvm/include/x86_64/processor.h1
-rw-r--r--tools/testing/selftests/kvm/lib/riscv/ucall.c1
-rw-r--r--tools/testing/selftests/kvm/lib/x86_64/processor.c15
-rw-r--r--tools/testing/selftests/kvm/riscv/ebreak_test.c1
-rw-r--r--tools/testing/selftests/kvm/riscv/sbi_pmu_test.c1
-rw-r--r--tools/testing/selftests/kvm/s390x/shared_zeropage_test.c111
-rw-r--r--tools/testing/selftests/kvm/x86_64/sev_init2_tests.c4
-rw-r--r--tools/testing/selftests/landlock/fs_test.c45
-rw-r--r--tools/testing/selftests/lib.mk8
-rw-r--r--tools/testing/selftests/lkdtm/tests.txt1
-rw-r--r--tools/testing/selftests/mm/ksm_functional_tests.c38
-rw-r--r--tools/testing/selftests/mm/map_fixed_noreplace.c24
-rw-r--r--tools/testing/selftests/net/.gitignore1
-rw-r--r--tools/testing/selftests/net/Makefile3
-rw-r--r--tools/testing/selftests/net/af_unix/Makefile2
-rw-r--r--tools/testing/selftests/net/af_unix/config3
-rw-r--r--tools/testing/selftests/net/af_unix/msg_oob.c734
-rw-r--r--tools/testing/selftests/net/af_unix/scm_rights.c25
-rw-r--r--tools/testing/selftests/net/af_unix/test_unix_oob.c436
-rwxr-xr-xtools/testing/selftests/net/amt.sh2
-rw-r--r--tools/testing/selftests/net/config8
-rw-r--r--tools/testing/selftests/net/forwarding/Makefile2
-rw-r--r--tools/testing/selftests/net/forwarding/devlink_lib.sh2
-rw-r--r--tools/testing/selftests/net/forwarding/lib.sh92
-rwxr-xr-xtools/testing/selftests/net/forwarding/min_max_mtu.sh283
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre.sh45
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre_bound.sh23
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre_bridge_1d.sh21
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre_bridge_1d_vlan.sh21
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre_bridge_1q.sh21
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre_bridge_1q_lag.sh29
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre_changes.sh73
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre_flower.sh43
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre_lag_lacp.sh65
-rw-r--r--tools/testing/selftests/net/forwarding/mirror_gre_lib.sh90
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre_neigh.sh39
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre_nh.sh35
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre_vlan.sh21
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh69
-rw-r--r--tools/testing/selftests/net/forwarding/mirror_lib.sh79
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_vlan.sh43
-rwxr-xr-xtools/testing/selftests/net/forwarding/router_mpath_seed.sh333
-rwxr-xr-xtools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh8
-rw-r--r--tools/testing/selftests/net/hsr/config1
-rwxr-xr-xtools/testing/selftests/net/hsr/hsr_ping.sh11
-rwxr-xr-xtools/testing/selftests/net/hsr/hsr_redbox.sh15
-rw-r--r--tools/testing/selftests/net/lib.sh71
-rw-r--r--tools/testing/selftests/net/lib/py/ksft.py65
-rw-r--r--tools/testing/selftests/net/lib/py/utils.py61
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_join.sh15
-rw-r--r--tools/testing/selftests/net/mptcp/mptcp_lib.sh63
-rwxr-xr-xtools/testing/selftests/net/mptcp/simult_flows.sh6
-rwxr-xr-xtools/testing/selftests/net/mptcp/userspace_pm.sh46
-rw-r--r--tools/testing/selftests/net/msg_zerocopy.c14
-rwxr-xr-xtools/testing/selftests/net/netfilter/nft_queue.sh37
-rwxr-xr-xtools/testing/selftests/net/netns-sysctl.sh40
-rwxr-xr-xtools/testing/selftests/net/openvswitch/openvswitch.sh171
-rw-r--r--tools/testing/selftests/net/openvswitch/ovs-dpctl.py643
-rw-r--r--tools/testing/selftests/net/openvswitch/settings1
-rwxr-xr-xtools/testing/selftests/net/pmtu.sh145
-rwxr-xr-xtools/testing/selftests/net/srv6_end_dx4_netfilter_test.sh335
-rwxr-xr-xtools/testing/selftests/net/srv6_end_dx6_netfilter_test.sh340
-rw-r--r--tools/testing/selftests/net/tcp_ao/self-connect.c18
-rw-r--r--tools/testing/selftests/net/udpgso.c15
-rwxr-xr-xtools/testing/selftests/net/udpgso.sh43
-rwxr-xr-xtools/testing/selftests/net/vrf_route_leaking.sh93
-rw-r--r--tools/testing/selftests/net/ynl.mk21
-rw-r--r--tools/testing/selftests/nolibc/Makefile2
-rw-r--r--tools/testing/selftests/nolibc/nolibc-test.c109
-rwxr-xr-xtools/testing/selftests/nolibc/run-tests.sh9
-rw-r--r--tools/testing/selftests/openat2/Makefile14
-rw-r--r--tools/testing/selftests/openat2/openat2_test.c1
-rw-r--r--tools/testing/selftests/powerpc/flags.mk5
-rw-r--r--tools/testing/selftests/resctrl/cache.c10
-rw-r--r--tools/testing/selftests/resctrl/cat_test.c37
-rw-r--r--tools/testing/selftests/resctrl/cmt_test.c22
-rw-r--r--tools/testing/selftests/resctrl/mba_test.c26
-rw-r--r--tools/testing/selftests/resctrl/mbm_test.c26
-rw-r--r--tools/testing/selftests/resctrl/resctrl.h49
-rw-r--r--tools/testing/selftests/resctrl/resctrl_val.c371
-rw-r--r--tools/testing/selftests/resctrl/resctrlfs.c67
-rw-r--r--tools/testing/selftests/riscv/sigreturn/sigreturn.c2
-rw-r--r--tools/testing/selftests/sched/cs_prctl_test.c10
-rw-r--r--tools/testing/selftests/seccomp/seccomp_benchmark.c6
-rw-r--r--tools/testing/selftests/seccomp/seccomp_bpf.c131
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/taprio.json44
-rw-r--r--tools/testing/selftests/timens/exec.c6
-rw-r--r--tools/testing/selftests/timens/timer.c2
-rw-r--r--tools/testing/selftests/timens/timerfd.c2
-rw-r--r--tools/testing/selftests/timens/vfork_exec.c4
-rw-r--r--tools/testing/selftests/timers/rtcpie.c3
-rw-r--r--tools/testing/selftests/vDSO/Makefile29
-rw-r--r--tools/testing/selftests/vDSO/parse_vdso.c16
-rw-r--r--tools/testing/selftests/vDSO/vdso_standalone_test_x86.c18
-rw-r--r--tools/testing/selftests/wireguard/qemu/Makefile8
-rw-r--r--tools/testing/selftests/x86/Makefile31
-rw-r--r--tools/testing/selftests/x86/amx.c16
-rw-r--r--tools/testing/selftests/x86/clang_helpers_32.S11
-rw-r--r--tools/testing/selftests/x86/clang_helpers_64.S28
-rw-r--r--tools/testing/selftests/x86/fsgsbase.c6
-rw-r--r--tools/testing/selftests/x86/fsgsbase_restore.c11
-rw-r--r--tools/testing/selftests/x86/sigreturn.c2
-rw-r--r--tools/testing/selftests/x86/syscall_arg_fault.c1
-rw-r--r--tools/testing/selftests/x86/sysret_rip.c20
-rw-r--r--tools/testing/selftests/x86/test_FISTTP.c8
-rw-r--r--tools/testing/selftests/x86/test_vsyscall.c15
-rw-r--r--tools/testing/selftests/x86/vdso_restorer.c2
-rw-r--r--tools/testing/vsock/Makefile13
-rw-r--r--virt/kvm/dirty_ring.c3
-rw-r--r--virt/kvm/guest_memfd.c5
-rw-r--r--virt/kvm/kvm_main.c19
6030 files changed, 258835 insertions, 87168 deletions
diff --git a/.editorconfig b/.editorconfig
index 854773350cc5..29a30ccfc07b 100644
--- a/.editorconfig
+++ b/.editorconfig
@@ -5,7 +5,6 @@ root = true
[{*.{awk,c,dts,dtsi,dtso,h,mk,s,S},Kconfig,Makefile,Makefile.*}]
charset = utf-8
end_of_line = lf
-trim_trailing_whitespace = true
insert_final_newline = true
indent_style = tab
indent_size = 8
@@ -13,7 +12,6 @@ indent_size = 8
[*.{json,py,rs}]
charset = utf-8
end_of_line = lf
-trim_trailing_whitespace = true
insert_final_newline = true
indent_style = space
indent_size = 4
@@ -26,7 +24,6 @@ indent_size = 8
[*.yaml]
charset = utf-8
end_of_line = lf
-trim_trailing_whitespace = unset
insert_final_newline = true
indent_style = space
indent_size = 2
diff --git a/.mailmap b/.mailmap
index 43cd2995dbc2..38f8bed507a2 100644
--- a/.mailmap
+++ b/.mailmap
@@ -72,6 +72,8 @@ Andrey Ryabinin <ryabinin.a.a@gmail.com> <aryabinin@virtuozzo.com>
Andrzej Hajda <andrzej.hajda@intel.com> <a.hajda@samsung.com>
André Almeida <andrealmeid@igalia.com> <andrealmeid@collabora.com>
Andy Adamson <andros@citi.umich.edu>
+Andy Shevchenko <andy@kernel.org> <andy@smile.org.ua>
+Andy Shevchenko <andy@kernel.org> <ext-andriy.shevchenko@nokia.com>
Anilkumar Kolli <quic_akolli@quicinc.com> <akolli@codeaurora.org>
Anirudh Ghayal <quic_aghayal@quicinc.com> <aghayal@codeaurora.org>
Antoine Tenart <atenart@kernel.org> <antoine.tenart@bootlin.com>
@@ -217,6 +219,7 @@ Geliang Tang <geliang@kernel.org> <geliang.tang@suse.com>
Geliang Tang <geliang@kernel.org> <geliangtang@xiaomi.com>
Geliang Tang <geliang@kernel.org> <geliangtang@gmail.com>
Geliang Tang <geliang@kernel.org> <geliangtang@163.com>
+Geliang Tang <geliang@kernel.org> <tanggeliang@kylinos.cn>
Georgi Djakov <djakov@kernel.org> <georgi.djakov@linaro.org>
Gerald Schaefer <gerald.schaefer@linux.ibm.com> <geraldsc@de.ibm.com>
Gerald Schaefer <gerald.schaefer@linux.ibm.com> <gerald.schaefer@de.ibm.com>
@@ -337,10 +340,11 @@ Kalyan Thota <quic_kalyant@quicinc.com> <kalyan_t@codeaurora.org>
Karthikeyan Periyasamy <quic_periyasa@quicinc.com> <periyasa@codeaurora.org>
Kathiravan T <quic_kathirav@quicinc.com> <kathirav@codeaurora.org>
Kay Sievers <kay.sievers@vrfy.org>
-Kees Cook <keescook@chromium.org> <kees.cook@canonical.com>
-Kees Cook <keescook@chromium.org> <keescook@google.com>
-Kees Cook <keescook@chromium.org> <kees@outflux.net>
-Kees Cook <keescook@chromium.org> <kees@ubuntu.com>
+Kees Cook <kees@kernel.org> <kees.cook@canonical.com>
+Kees Cook <kees@kernel.org> <keescook@chromium.org>
+Kees Cook <kees@kernel.org> <keescook@google.com>
+Kees Cook <kees@kernel.org> <kees@outflux.net>
+Kees Cook <kees@kernel.org> <kees@ubuntu.com>
Keith Busch <kbusch@kernel.org> <keith.busch@intel.com>
Keith Busch <kbusch@kernel.org> <keith.busch@linux.intel.com>
Kenneth W Chen <kenneth.w.chen@intel.com>
@@ -380,7 +384,9 @@ Li Yang <leoyang.li@nxp.com> <leoli@freescale.com>
Li Yang <leoyang.li@nxp.com> <leo@zh-kernel.org>
Lior David <quic_liord@quicinc.com> <liord@codeaurora.org>
Lorenzo Pieralisi <lpieralisi@kernel.org> <lorenzo.pieralisi@arm.com>
+Lorenzo Stoakes <lorenzo.stoakes@oracle.com> <lstoakes@gmail.com>
Luca Ceresoli <luca.ceresoli@bootlin.com> <luca@lucaceresoli.net>
+Luca Weiss <luca@lucaweiss.eu> <luca@z3ntu.xyz>
Lukasz Luba <lukasz.luba@arm.com> <l.luba@partner.samsung.com>
Luo Jie <quic_luoj@quicinc.com> <luoj@codeaurora.org>
Maciej W. Rozycki <macro@mips.com> <macro@imgtec.com>
@@ -604,6 +610,7 @@ Simon Kelley <simon@thekelleys.org.uk>
Sricharan Ramabadhran <quic_srichara@quicinc.com> <sricharan@codeaurora.org>
Srinivas Ramana <quic_sramana@quicinc.com> <sramana@codeaurora.org>
Sriram R <quic_srirrama@quicinc.com> <srirrama@codeaurora.org>
+Stanislav Fomichev <sdf@fomichev.me> <sdf@google.com>
Stefan Wahren <wahrenst@gmx.net> <stefan.wahren@i2se.com>
Stéphane Witzmann <stephane.witzmann@ubpmes.univ-bpclermont.fr>
Stephen Hemminger <stephen@networkplumber.org> <shemminger@linux-foundation.org>
@@ -684,6 +691,7 @@ Vivien Didelot <vivien.didelot@gmail.com> <vivien.didelot@savoirfairelinux.com>
Vlad Dogaru <ddvlad@gmail.com> <vlad.dogaru@intel.com>
Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@parallels.com>
Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@virtuozzo.com>
+Weiwen Hu <huweiwen@linux.alibaba.com> <sehuww@mail.scut.edu.cn>
WeiXiong Liao <gmpy.liaowx@gmail.com> <liaoweixiong@allwinnertech.com>
Wen Gong <quic_wgong@quicinc.com> <wgong@codeaurora.org>
Wesley Cheng <quic_wcheng@quicinc.com> <wcheng@codeaurora.org>
diff --git a/CREDITS b/CREDITS
index 0107047f807b..d6cbd4c792a1 100644
--- a/CREDITS
+++ b/CREDITS
@@ -271,6 +271,9 @@ D: Driver for WaveFront soundcards (Turtle Beach Maui, Tropez, Tropez+)
D: Various bugfixes and changes to sound drivers
S: USA
+N: Daniel Bristot de Oliveira
+D: Scheduler contributions, notably: SCHED_DEADLINE
+
N: Carlos Henrique Bauer
E: chbauer@acm.org
E: bauer@atlas.unisinos.br
@@ -796,6 +799,11 @@ E: luisfcorreia@gmail.com
D: Ralink rt2x00 WLAN driver
S: Belas, Portugal
+N: Benoît Cousson
+E: bcousson@baylibre.com
+D: TI OMAP Devicetree platforms
+D: TI OMAP HWMOD boards
+
N: Alan Cox
W: http://www.linux.org.uk/diary/
D: Linux Networking (0.99.10->2.0.29)
@@ -1214,6 +1222,10 @@ D: UDF filesystem
S: (ask for current address)
S: USA
+N: Larry Finger
+E: Larry.Finger@lwfinger.net
+D: Maintainer of wireless drivers, too many to list here
+
N: Jürgen Fischer
E: fischer@norbit.de
D: Author of Adaptec AHA-152x SCSI driver
@@ -3146,9 +3158,11 @@ S: Triftstra=DFe 55
S: 13353 Berlin
S: Germany
-N: Gustavo Pimental
+N: Gustavo Pimentel
E: gustavo.pimentel@synopsys.com
D: PCI driver for Synopsys DesignWare
+D: Synopsys DesignWare eDMA driver
+D: Synopsys DesignWare xData traffic generator
N: Emanuel Pirker
E: epirker@edu.uni-klu.ac.at
@@ -4362,6 +4376,10 @@ N: Haojian Zhuang
E: haojian.zhuang@gmail.com
D: MMP support
+N: Tsahee Zidenberg
+E: tsahee@annapurnalabs.com
+D: Annapurna Labs Alpine Architecture
+
N: Richard Zidlicky
E: rz@linux-m68k.org, rdzidlic@geocities.com
W: http://www.geocities.com/rdzidlic
diff --git a/Documentation/ABI/stable/sysfs-block b/Documentation/ABI/stable/sysfs-block
index 831f19a32e08..cea8856f798d 100644
--- a/Documentation/ABI/stable/sysfs-block
+++ b/Documentation/ABI/stable/sysfs-block
@@ -21,6 +21,59 @@ Description:
device is offset from the internal allocation unit's
natural alignment.
+What: /sys/block/<disk>/atomic_write_max_bytes
+Date: February 2024
+Contact: Himanshu Madhani <himanshu.madhani@oracle.com>
+Description:
+ [RO] This parameter specifies the maximum atomic write
+ size reported by the device. This parameter is relevant
+ for merging of writes, where a merged atomic write
+ operation must not exceed this number of bytes.
+ This parameter may be greater than the value in
+ atomic_write_unit_max_bytes as
+ atomic_write_unit_max_bytes will be rounded down to a
+ power-of-two and atomic_write_unit_max_bytes may also be
+ limited by some other queue limits, such as max_segments.
+ This parameter - along with atomic_write_unit_min_bytes
+ and atomic_write_unit_max_bytes - will not be larger than
+ max_hw_sectors_kb, but may be larger than max_sectors_kb.
+
+
+What: /sys/block/<disk>/atomic_write_unit_min_bytes
+Date: February 2024
+Contact: Himanshu Madhani <himanshu.madhani@oracle.com>
+Description:
+ [RO] This parameter specifies the smallest block which can
+ be written atomically with an atomic write operation. All
+ atomic write operations must begin at a
+ atomic_write_unit_min boundary and must be multiples of
+ atomic_write_unit_min. This value must be a power-of-two.
+
+
+What: /sys/block/<disk>/atomic_write_unit_max_bytes
+Date: February 2024
+Contact: Himanshu Madhani <himanshu.madhani@oracle.com>
+Description:
+ [RO] This parameter defines the largest block which can be
+ written atomically with an atomic write operation. This
+ value must be a multiple of atomic_write_unit_min and must
+ be a power-of-two. This value will not be larger than
+ atomic_write_max_bytes.
+
+
+What: /sys/block/<disk>/atomic_write_boundary_bytes
+Date: February 2024
+Contact: Himanshu Madhani <himanshu.madhani@oracle.com>
+Description:
+ [RO] A device may need to internally split an atomic write I/O
+ which straddles a given logical block address boundary. This
+ parameter specifies the size in bytes of the atomic boundary if
+ one is reported by the device. This value must be a
+ power-of-two and at least the size as in
+ atomic_write_unit_max_bytes.
+ Any attempt to merge atomic write I/Os must not result in a
+ merged I/O which crosses this boundary (if any).
+
What: /sys/block/<disk>/diskseq
Date: February 2021
diff --git a/Documentation/ABI/stable/sysfs-class-backlight b/Documentation/ABI/stable/sysfs-class-backlight
index 023fb52645f8..6102d6bebdf9 100644
--- a/Documentation/ABI/stable/sysfs-class-backlight
+++ b/Documentation/ABI/stable/sysfs-class-backlight
@@ -3,10 +3,11 @@ Date: April 2005
KernelVersion: 2.6.12
Contact: Richard Purdie <rpurdie@rpsys.net>
Description:
- Control BACKLIGHT power, values are FB_BLANK_* from fb.h
+ Control BACKLIGHT power, values are compatible with
+ FB_BLANK_* from fb.h
- - FB_BLANK_UNBLANK (0) : power on.
- - FB_BLANK_POWERDOWN (4) : power off
+ - 0 (FB_BLANK_UNBLANK) : power on.
+ - 4 (FB_BLANK_POWERDOWN) : power off
Users: HAL
What: /sys/class/backlight/<backlight>/brightness
diff --git a/Documentation/ABI/testing/configfs-tsm b/Documentation/ABI/testing/configfs-tsm
index dd24202b5ba5..534408bc1408 100644
--- a/Documentation/ABI/testing/configfs-tsm
+++ b/Documentation/ABI/testing/configfs-tsm
@@ -31,6 +31,18 @@ Description:
Standardization v2.03 Section 4.1.8.1 MSG_REPORT_REQ.
https://www.amd.com/content/dam/amd/en/documents/epyc-technical-docs/specifications/56421.pdf
+What: /sys/kernel/config/tsm/report/$name/manifestblob
+Date: January, 2024
+KernelVersion: v6.10
+Contact: linux-coco@lists.linux.dev
+Description:
+ (RO) Optional supplemental data that a TSM may emit, visibility
+ of this attribute depends on TSM, and may be empty if no
+ manifest data is available.
+
+ See 'service_provider' for information on the format of the
+ manifest blob.
+
What: /sys/kernel/config/tsm/report/$name/provider
Date: September, 2023
KernelVersion: v6.7
@@ -80,3 +92,54 @@ Contact: linux-coco@lists.linux.dev
Description:
(RO) Indicates the minimum permissible value that can be written
to @privlevel.
+
+What: /sys/kernel/config/tsm/report/$name/service_provider
+Date: January, 2024
+KernelVersion: v6.10
+Contact: linux-coco@lists.linux.dev
+Description:
+ (WO) Attribute is visible if a TSM implementation provider
+ supports the concept of attestation reports from a service
+ provider for TVMs, like SEV-SNP running under an SVSM.
+ Specifying the service provider via this attribute will create
+ an attestation report as specified by the service provider.
+ The only currently supported service provider is "svsm".
+
+ For the "svsm" service provider, see the Secure VM Service Module
+ for SEV-SNP Guests v1.00 Section 7. For the doc, search for
+ "site:amd.com "Secure VM Service Module for SEV-SNP
+ Guests", docID: 58019"
+
+What: /sys/kernel/config/tsm/report/$name/service_guid
+Date: January, 2024
+KernelVersion: v6.10
+Contact: linux-coco@lists.linux.dev
+Description:
+ (WO) Attribute is visible if a TSM implementation provider
+ supports the concept of attestation reports from a service
+ provider for TVMs, like SEV-SNP running under an SVSM.
+ Specifying an empty/null GUID (00000000-0000-0000-0000-000000)
+ requests all active services within the service provider be
+ part of the attestation report. Specifying a GUID request
+ an attestation report of just the specified service using the
+ manifest form specified by the service_manifest_version
+ attribute.
+
+ See 'service_provider' for information on the format of the
+ service guid.
+
+What: /sys/kernel/config/tsm/report/$name/service_manifest_version
+Date: January, 2024
+KernelVersion: v6.10
+Contact: linux-coco@lists.linux.dev
+Description:
+ (WO) Attribute is visible if a TSM implementation provider
+ supports the concept of attestation reports from a service
+ provider for TVMs, like SEV-SNP running under an SVSM.
+ Indicates the service manifest version requested for the
+ attestation report (default 0). If this field is not set by
+ the user, the default manifest version of the service (the
+ service's initial/first manifest version) is returned.
+
+ See 'service_provider' for information on the format of the
+ service manifest version.
diff --git a/Documentation/ABI/testing/debugfs-tpmi b/Documentation/ABI/testing/debugfs-tpmi
index 597f0475fe6e..c493a1403d2f 100644
--- a/Documentation/ABI/testing/debugfs-tpmi
+++ b/Documentation/ABI/testing/debugfs-tpmi
@@ -29,3 +29,12 @@ Example:
echo 0,0x20,0xff > mem_write
echo 1,64,64 > mem_write
Users: Debugging, any user space test suite
+
+What: /sys/kernel/debug/tpmi-<n>/plr/domain<n>/status
+Date: Aug 2024
+KernelVersion: 6.11
+Contact: Tero Kristo <tero.kristo@linux.intel.com>
+Description:
+Shows the currently active Performance Limit Reasons for die level and the
+individual CPUs under the die. The contents of this file are sticky, and
+clearing all the statuses can be done by writing "0\n" to this file.
diff --git a/Documentation/ABI/testing/sysfs-bus-auxiliary b/Documentation/ABI/testing/sysfs-bus-auxiliary
new file mode 100644
index 000000000000..cc856079690f
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-auxiliary
@@ -0,0 +1,9 @@
+What: /sys/bus/auxiliary/devices/.../irqs/
+Date: April, 2024
+Contact: Shay Drory <shayd@nvidia.com>
+Description:
+ The /sys/devices/.../irqs directory contains a variable set of
+ files, with each file is named as irq number similar to PCI PF
+ or VF's irq number located in msi_irqs directory.
+ These irq files are added and removed dynamically when an IRQ
+ is requested and freed respectively for the PCI SF.
diff --git a/Documentation/ABI/testing/sysfs-bus-i2c-devices-turris-omnia-mcu b/Documentation/ABI/testing/sysfs-bus-i2c-devices-turris-omnia-mcu
new file mode 100644
index 000000000000..307a55f599cb
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-i2c-devices-turris-omnia-mcu
@@ -0,0 +1,113 @@
+What: /sys/bus/i2c/devices/<mcu_device>/board_revision
+Date: September 2024
+KernelVersion: 6.11
+Contact: Marek Behún <kabel@kernel.org>
+Description: (RO) Contains board revision number.
+
+ Only available if board information is burned in the MCU (older
+ revisions have board information burned in the ATSHA204-A chip).
+
+ Format: %u.
+
+What: /sys/bus/i2c/devices/<mcu_device>/first_mac_address
+Date: September 2024
+KernelVersion: 6.11
+Contact: Marek Behún <kabel@kernel.org>
+Description: (RO) Contains device first MAC address. Each Turris Omnia is
+ allocated 3 MAC addresses. The two additional addresses are
+ computed from the first one by incrementing it.
+
+ Only available if board information is burned in the MCU (older
+ revisions have board information burned in the ATSHA204-A chip).
+
+ Format: %pM.
+
+What: /sys/bus/i2c/devices/<mcu_device>/front_button_mode
+Date: September 2024
+KernelVersion: 6.11
+Contact: Marek Behún <kabel@kernel.org>
+Description: (RW) The front button on the Turris Omnia router can be
+ configured either to change the intensity of all the LEDs on the
+ front panel, or to send the press event to the CPU as an
+ interrupt.
+
+ This file switches between these two modes:
+ - "mcu" makes the button press event be handled by the MCU to
+ change the LEDs panel intensity.
+ - "cpu" makes the button press event be handled by the CPU.
+
+ Format: %s.
+
+What: /sys/bus/i2c/devices/<mcu_device>/front_button_poweron
+Date: September 2024
+KernelVersion: 6.11
+Contact: Marek Behún <kabel@kernel.org>
+Description: (RW) Newer versions of the microcontroller firmware of the
+ Turris Omnia router support powering off the router into true
+ low power mode. The router can be powered on by pressing the
+ front button.
+
+ This file configures whether front button power on is enabled.
+
+ This file is present only if the power off feature is supported
+ by the firmware.
+
+ Format: %i.
+
+What: /sys/bus/i2c/devices/<mcu_device>/fw_features
+Date: September 2024
+KernelVersion: 6.11
+Contact: Marek Behún <kabel@kernel.org>
+Description: (RO) Newer versions of the microcontroller firmware report the
+ features they support. These can be read from this file. If the
+ MCU firmware is too old, this file reads 0x0.
+
+ Format: 0x%x.
+
+What: /sys/bus/i2c/devices/<mcu_device>/fw_version_hash_application
+Date: September 2024
+KernelVersion: 6.11
+Contact: Marek Behún <kabel@kernel.org>
+Description: (RO) Contains the version hash (commit hash) of the application
+ part of the microcontroller firmware.
+
+ Format: %s.
+
+What: /sys/bus/i2c/devices/<mcu_device>/fw_version_hash_bootloader
+Date: September 2024
+KernelVersion: 6.11
+Contact: Marek Behún <kabel@kernel.org>
+Description: (RO) Contains the version hash (commit hash) of the bootloader
+ part of the microcontroller firmware.
+
+ Format: %s.
+
+What: /sys/bus/i2c/devices/<mcu_device>/mcu_type
+Date: September 2024
+KernelVersion: 6.11
+Contact: Marek Behún <kabel@kernel.org>
+Description: (RO) Contains the microcontroller type (STM32, GD32, MKL).
+
+ Format: %s.
+
+What: /sys/bus/i2c/devices/<mcu_device>/reset_selector
+Date: September 2024
+KernelVersion: 6.11
+Contact: Marek Behún <kabel@kernel.org>
+Description: (RO) Contains the selected factory reset level, determined by
+ how long the rear reset button was held by the user during board
+ reset.
+
+ Format: %i.
+
+What: /sys/bus/i2c/devices/<mcu_device>/serial_number
+Date: September 2024
+KernelVersion: 6.11
+Contact: Marek Behún <kabel@kernel.org>
+Description: (RO) Contains the 64-bit board serial number in hexadecimal
+ format.
+
+ Only available if board information is burned in the MCU (older
+ revisions have board information burned in the ATSHA204-A chip).
+
+ Format: %016X.
diff --git a/Documentation/ABI/testing/sysfs-bus-wmi b/Documentation/ABI/testing/sysfs-bus-wmi
new file mode 100644
index 000000000000..aadb35b82198
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-wmi
@@ -0,0 +1,81 @@
+What: /sys/bus/wmi/devices/.../driver_override
+Date: February 2024
+Contact: Armin Wolf <W_Armin@gmx.de>
+Description:
+ This file allows the driver for a device to be specified which
+ will override standard ID table matching.
+ When specified, only a driver with a name matching the value
+ written to driver_override will have an opportunity to bind
+ to the device.
+ The override is specified by writing a string to the
+ driver_override file (echo wmi-event-dummy > driver_override).
+ The override may be cleared with an empty string (echo > \
+ driver_override) which returns the device to standard matching
+ rules binding.
+ Writing to driver_override does not automatically unbind the
+ device from its current driver or make any attempt to automatically
+ load the specified driver. If no driver with a matching name is
+ currently loaded in the kernel, the device will not bind to any
+ driver.
+ This also allows devices to opt-out of driver binding using a
+ driver_override name such as "none". Only a single driver may be
+ specified in the override, there is no support for parsing delimiters.
+
+What: /sys/bus/wmi/devices/.../modalias
+Date: November 20:15
+Contact: Andy Lutomirski <luto@kernel.org>
+Description:
+ This file contains the MODALIAS value emitted by uevent for a
+ given WMI device.
+
+ Format: wmi:XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX.
+
+What: /sys/bus/wmi/devices/.../guid
+Date: November 2015
+Contact: Andy Lutomirski <luto@kernel.org>
+Description:
+ This file contains the GUID used to match WMI devices to
+ compatible WMI drivers. This GUID is not necessarily unique
+ inside a given machine, it is solely used to identify the
+ interface exposed by a given WMI device.
+
+What: /sys/bus/wmi/devices/.../object_id
+Date: November 2015
+Contact: Andy Lutomirski <luto@kernel.org>
+Description:
+ This file contains the WMI object ID used internally to construct
+ the ACPI method names used by non-event WMI devices. It contains
+ two ASCII letters.
+
+What: /sys/bus/wmi/devices/.../notify_id
+Date: November 2015
+Contact: Andy Lutomirski <luto@kernel.org>
+Description:
+ This file contains the WMI notify ID used internally to map ACPI
+ events to WMI event devices. It contains two ASCII letters.
+
+What: /sys/bus/wmi/devices/.../instance_count
+Date: November 2015
+Contact: Andy Lutomirski <luto@kernel.org>
+Description:
+ This file contains the number of WMI object instances being
+ present on a given WMI device. It contains a non-negative
+ number.
+
+What: /sys/bus/wmi/devices/.../expensive
+Date: November 2015
+Contact: Andy Lutomirski <luto@kernel.org>
+Description:
+ This file contains a boolean flag signaling if interacting with
+ the given WMI device will consume significant CPU resources.
+ The WMI driver core will take care of enabling/disabling such
+ WMI devices.
+
+What: /sys/bus/wmi/devices/.../setable
+Date: May 2017
+Contact: Darren Hart (VMware) <dvhart@infradead.org>
+Description:
+ This file contains a boolean flags signaling the data block
+ aassociated with the given WMI device is writable. If the
+ given WMI device is not associated with a data block, then
+ this file will not exist.
diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu
index e7e160954e79..325873385b71 100644
--- a/Documentation/ABI/testing/sysfs-devices-system-cpu
+++ b/Documentation/ABI/testing/sysfs-devices-system-cpu
@@ -605,6 +605,18 @@ Description: Umwait control
Note that a value of zero means there is no limit.
Low order two bits must be zero.
+What: /sys/devices/system/cpu/sev
+ /sys/devices/system/cpu/sev/vmpl
+Date: May 2024
+Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
+Description: Secure Encrypted Virtualization (SEV) information
+
+ This directory is only present when running as an SEV-SNP guest.
+
+ vmpl: Reports the Virtual Machine Privilege Level (VMPL) at which
+ the SEV-SNP guest is running.
+
+
What: /sys/devices/system/cpu/svm
Date: August 2019
Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
@@ -694,3 +706,9 @@ Description:
(RO) indicates whether or not the kernel directly supports
modifying the crash elfcorehdr for CPU hot un/plug and/or
on/offline changes.
+
+What: /sys/devices/system/cpu/enabled
+Date: Nov 2022
+Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
+Description:
+ (RO) the list of CPUs that can be brought online.
diff --git a/Documentation/ABI/testing/sysfs-fs-xfs b/Documentation/ABI/testing/sysfs-fs-xfs
index f704925f6fe9..7da4de948b46 100644
--- a/Documentation/ABI/testing/sysfs-fs-xfs
+++ b/Documentation/ABI/testing/sysfs-fs-xfs
@@ -1,7 +1,7 @@
What: /sys/fs/xfs/<disk>/log/log_head_lsn
Date: July 2014
KernelVersion: 3.17
-Contact: xfs@oss.sgi.com
+Contact: linux-xfs@vger.kernel.org
Description:
The log sequence number (LSN) of the current head of the
log. The LSN is exported in "cycle:basic block" format.
@@ -10,30 +10,28 @@ Users: xfstests
What: /sys/fs/xfs/<disk>/log/log_tail_lsn
Date: July 2014
KernelVersion: 3.17
-Contact: xfs@oss.sgi.com
+Contact: linux-xfs@vger.kernel.org
Description:
The log sequence number (LSN) of the current tail of the
log. The LSN is exported in "cycle:basic block" format.
-What: /sys/fs/xfs/<disk>/log/reserve_grant_head
-Date: July 2014
-KernelVersion: 3.17
-Contact: xfs@oss.sgi.com
+What: /sys/fs/xfs/<disk>/log/reserve_grant_head_bytes
+Date: June 2024
+KernelVersion: 6.11
+Contact: linux-xfs@vger.kernel.org
Description:
The current state of the log reserve grant head. It
represents the total log reservation of all currently
- outstanding transactions. The grant head is exported in
- "cycle:bytes" format.
+ outstanding transactions in bytes.
Users: xfstests
-What: /sys/fs/xfs/<disk>/log/write_grant_head
-Date: July 2014
-KernelVersion: 3.17
-Contact: xfs@oss.sgi.com
+What: /sys/fs/xfs/<disk>/log/write_grant_head_bytes
+Date: June 2024
+KernelVersion: 6.11
+Contact: linux-xfs@vger.kernel.org
Description:
The current state of the log write grant head. It
represents the total log reservation of all currently
outstanding transactions, including regrants due to
- rolling transactions. The grant head is exported in
- "cycle:bytes" format.
+ rolling transactions in bytes.
Users: xfstests
diff --git a/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst b/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst
index 5750f125361b..728b1e690c64 100644
--- a/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst
+++ b/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst
@@ -149,9 +149,9 @@ This case is handled by calls to the strongly ordered
``atomic_add_return()`` read-modify-write atomic operation that
is invoked within ``rcu_dynticks_eqs_enter()`` at idle-entry
time and within ``rcu_dynticks_eqs_exit()`` at idle-exit time.
-The grace-period kthread invokes ``rcu_dynticks_snap()`` and
-``rcu_dynticks_in_eqs_since()`` (both of which invoke
-an ``atomic_add_return()`` of zero) to detect idle CPUs.
+The grace-period kthread invokes first ``ct_dynticks_cpu_acquire()``
+(preceded by a full memory barrier) and ``rcu_dynticks_in_eqs_since()``
+(both of which rely on acquire semantics) to detect idle CPUs.
+-----------------------------------------------------------------------+
| **Quick Quiz**: |
diff --git a/Documentation/RCU/Design/Requirements/Requirements.rst b/Documentation/RCU/Design/Requirements/Requirements.rst
index cccafdaa1f84..f511476b4550 100644
--- a/Documentation/RCU/Design/Requirements/Requirements.rst
+++ b/Documentation/RCU/Design/Requirements/Requirements.rst
@@ -2357,6 +2357,7 @@ section.
#. `Sched Flavor (Historical)`_
#. `Sleepable RCU`_
#. `Tasks RCU`_
+#. `Tasks Trace RCU`_
Bottom-Half Flavor (Historical)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -2610,6 +2611,16 @@ critical sections that are delimited by voluntary context switches, that
is, calls to schedule(), cond_resched(), and
synchronize_rcu_tasks(). In addition, transitions to and from
userspace execution also delimit tasks-RCU read-side critical sections.
+Idle tasks are ignored by Tasks RCU, and Tasks Rude RCU may be used to
+interact with them.
+
+Note well that involuntary context switches are *not* Tasks-RCU quiescent
+states. After all, in preemptible kernels, a task executing code in a
+trampoline might be preempted. In this case, the Tasks-RCU grace period
+clearly cannot end until that task resumes and its execution leaves that
+trampoline. This means, among other things, that cond_resched() does
+not provide a Tasks RCU quiescent state. (Instead, use rcu_softirq_qs()
+from softirq or rcu_tasks_classic_qs() otherwise.)
The tasks-RCU API is quite compact, consisting only of
call_rcu_tasks(), synchronize_rcu_tasks(), and
@@ -2632,6 +2643,11 @@ moniker. And this operation is considered to be quite rude by real-time
workloads that don't want their ``nohz_full`` CPUs receiving IPIs and
by battery-powered systems that don't want their idle CPUs to be awakened.
+Once kernel entry/exit and deep-idle functions have been properly tagged
+``noinstr``, Tasks RCU can start paying attention to idle tasks (except
+those that are idle from RCU's perspective) and then Tasks Rude RCU can
+be removed from the kernel.
+
The tasks-rude-RCU API is also reader-marking-free and thus quite compact,
consisting of call_rcu_tasks_rude(), synchronize_rcu_tasks_rude(),
and rcu_barrier_tasks_rude().
diff --git a/Documentation/RCU/whatisRCU.rst b/Documentation/RCU/whatisRCU.rst
index 94838c65c7d9..d585a5490aee 100644
--- a/Documentation/RCU/whatisRCU.rst
+++ b/Documentation/RCU/whatisRCU.rst
@@ -250,21 +250,25 @@ rcu_assign_pointer()
^^^^^^^^^^^^^^^^^^^^
void rcu_assign_pointer(p, typeof(p) v);
- Yes, rcu_assign_pointer() **is** implemented as a macro, though it
- would be cool to be able to declare a function in this manner.
- (Compiler experts will no doubt disagree.)
+ Yes, rcu_assign_pointer() **is** implemented as a macro, though
+ it would be cool to be able to declare a function in this manner.
+ (And there has been some discussion of adding overloaded functions
+ to the C language, so who knows?)
The updater uses this spatial macro to assign a new value to an
RCU-protected pointer, in order to safely communicate the change
in value from the updater to the reader. This is a spatial (as
opposed to temporal) macro. It does not evaluate to an rvalue,
- but it does execute any memory-barrier instructions required
- for a given CPU architecture. Its ordering properties are that
- of a store-release operation.
-
- Perhaps just as important, it serves to document (1) which
- pointers are protected by RCU and (2) the point at which a
- given structure becomes accessible to other CPUs. That said,
+ but it does provide any compiler directives and memory-barrier
+ instructions required for a given compile or CPU architecture.
+ Its ordering properties are that of a store-release operation,
+ that is, any prior loads and stores required to initialize the
+ structure are ordered before the store that publishes the pointer
+ to that structure.
+
+ Perhaps just as important, rcu_assign_pointer() serves to document
+ (1) which pointers are protected by RCU and (2) the point at which
+ a given structure becomes accessible to other CPUs. That said,
rcu_assign_pointer() is most frequently used indirectly, via
the _rcu list-manipulation primitives such as list_add_rcu().
@@ -283,7 +287,11 @@ rcu_dereference()
executes any needed memory-barrier instructions for a given
CPU architecture. Currently, only Alpha needs memory barriers
within rcu_dereference() -- on other CPUs, it compiles to a
- volatile load.
+ volatile load. However, no mainstream C compilers respect
+ address dependencies, so rcu_dereference() uses volatile casts,
+ which, in combination with the coding guidelines listed in
+ rcu_dereference.rst, prevent current compilers from breaking
+ these dependencies.
Common coding practice uses rcu_dereference() to copy an
RCU-protected pointer to a local variable, then dereferences
diff --git a/Documentation/admin-guide/LSM/tomoyo.rst b/Documentation/admin-guide/LSM/tomoyo.rst
index 4bc9c2b4da6f..bdb2c2e2a1b2 100644
--- a/Documentation/admin-guide/LSM/tomoyo.rst
+++ b/Documentation/admin-guide/LSM/tomoyo.rst
@@ -9,8 +9,8 @@ TOMOYO is a name-based MAC extension (LSM module) for the Linux kernel.
LiveCD-based tutorials are available at
-http://tomoyo.sourceforge.jp/1.8/ubuntu12.04-live.html
-http://tomoyo.sourceforge.jp/1.8/centos6-live.html
+https://tomoyo.sourceforge.net/1.8/ubuntu12.04-live.html
+https://tomoyo.sourceforge.net/1.8/centos6-live.html
Though these tutorials use non-LSM version of TOMOYO, they are useful for you
to know what TOMOYO is.
@@ -21,45 +21,32 @@ How to enable TOMOYO?
Build the kernel with ``CONFIG_SECURITY_TOMOYO=y`` and pass ``security=tomoyo`` on
kernel's command line.
-Please see http://tomoyo.osdn.jp/2.5/ for details.
+Please see https://tomoyo.sourceforge.net/2.6/ for details.
Where is documentation?
=======================
User <-> Kernel interface documentation is available at
-https://tomoyo.osdn.jp/2.5/policy-specification/index.html .
+https://tomoyo.sourceforge.net/2.6/policy-specification/index.html .
Materials we prepared for seminars and symposiums are available at
-https://osdn.jp/projects/tomoyo/docs/?category_id=532&language_id=1 .
+https://sourceforge.net/projects/tomoyo/files/docs/ .
Below lists are chosen from three aspects.
What is TOMOYO?
TOMOYO Linux Overview
- https://osdn.jp/projects/tomoyo/docs/lca2009-takeda.pdf
+ https://sourceforge.net/projects/tomoyo/files/docs/lca2009-takeda.pdf
TOMOYO Linux: pragmatic and manageable security for Linux
- https://osdn.jp/projects/tomoyo/docs/freedomhectaipei-tomoyo.pdf
+ https://sourceforge.net/projects/tomoyo/files/docs/freedomhectaipei-tomoyo.pdf
TOMOYO Linux: A Practical Method to Understand and Protect Your Own Linux Box
- https://osdn.jp/projects/tomoyo/docs/PacSec2007-en-no-demo.pdf
+ https://sourceforge.net/projects/tomoyo/files/docs/PacSec2007-en-no-demo.pdf
What can TOMOYO do?
Deep inside TOMOYO Linux
- https://osdn.jp/projects/tomoyo/docs/lca2009-kumaneko.pdf
+ https://sourceforge.net/projects/tomoyo/files/docs/lca2009-kumaneko.pdf
The role of "pathname based access control" in security.
- https://osdn.jp/projects/tomoyo/docs/lfj2008-bof.pdf
+ https://sourceforge.net/projects/tomoyo/files/docs/lfj2008-bof.pdf
History of TOMOYO?
Realities of Mainlining
- https://osdn.jp/projects/tomoyo/docs/lfj2008.pdf
-
-What is future plan?
-====================
-
-We believe that inode based security and name based security are complementary
-and both should be used together. But unfortunately, so far, we cannot enable
-multiple LSM modules at the same time. We feel sorry that you have to give up
-SELinux/SMACK/AppArmor etc. when you want to use TOMOYO.
-
-We hope that LSM becomes stackable in future. Meanwhile, you can use non-LSM
-version of TOMOYO, available at http://tomoyo.osdn.jp/1.8/ .
-LSM version of TOMOYO is a subset of non-LSM version of TOMOYO. We are planning
-to port non-LSM version's functionalities to LSM versions.
+ https://sourceforge.net/projects/tomoyo/files/docs/lfj2008.pdf
diff --git a/Documentation/admin-guide/cgroup-v1/pids.rst b/Documentation/admin-guide/cgroup-v1/pids.rst
index 6acebd9e72c8..0f9f9a7b1f6c 100644
--- a/Documentation/admin-guide/cgroup-v1/pids.rst
+++ b/Documentation/admin-guide/cgroup-v1/pids.rst
@@ -36,7 +36,8 @@ superset of parent/child/pids.current.
The pids.events file contains event counters:
- - max: Number of times fork failed because limit was hit.
+ - max: Number of times fork failed in the cgroup because limit was hit in
+ self or ancestors.
Example
-------
diff --git a/Documentation/admin-guide/cgroup-v2.rst b/Documentation/admin-guide/cgroup-v2.rst
index 8fbb0519d556..05862f06ed26 100644
--- a/Documentation/admin-guide/cgroup-v2.rst
+++ b/Documentation/admin-guide/cgroup-v2.rst
@@ -239,6 +239,13 @@ cgroup v2 currently supports the following mount options.
will not be tracked by the memory controller (even if cgroup
v2 is remounted later on).
+ pids_localevents
+ The option restores v1-like behavior of pids.events:max, that is only
+ local (inside cgroup proper) fork failures are counted. Without this
+ option pids.events.max represents any pids.max enforcemnt across
+ cgroup's subtree.
+
+
Organizing Processes and Threads
--------------------------------
@@ -2205,12 +2212,18 @@ PID Interface Files
descendants has ever reached.
pids.events
- A read-only flat-keyed file which exists on non-root cgroups. The
- following entries are defined. Unless specified otherwise, a value
- change in this file generates a file modified event.
+ A read-only flat-keyed file which exists on non-root cgroups. Unless
+ specified otherwise, a value change in this file generates a file
+ modified event. The following entries are defined.
max
- Number of times fork failed because limit was hit.
+ The number of times the cgroup's total number of processes hit the pids.max
+ limit (see also pids_localevents).
+
+ pids.events.local
+ Similar to pids.events but the fields in the file are local
+ to the cgroup i.e. not hierarchical. The file modified event
+ generated on this file reflects only the local events.
Organisational operations are not blocked by cgroup policies, so it is
possible to have pids.current > pids.max. This can be done by either
@@ -2346,8 +2359,12 @@ Cpuset Interface Files
is always a subset of it.
Users can manually set it to a value that is different from
- "cpuset.cpus". The only constraint in setting it is that the
- list of CPUs must be exclusive with respect to its sibling.
+ "cpuset.cpus". One constraint in setting it is that the list of
+ CPUs must be exclusive with respect to "cpuset.cpus.exclusive"
+ of its sibling. If "cpuset.cpus.exclusive" of a sibling cgroup
+ isn't set, its "cpuset.cpus" value, if set, cannot be a subset
+ of it to leave at least one CPU available when the exclusive
+ CPUs are taken away.
For a parent cgroup, any one of its exclusive CPUs can only
be distributed to at most one of its child cgroups. Having an
@@ -2363,8 +2380,8 @@ Cpuset Interface Files
cpuset-enabled cgroups.
This file shows the effective set of exclusive CPUs that
- can be used to create a partition root. The content of this
- file will always be a subset of "cpuset.cpus" and its parent's
+ can be used to create a partition root. The content
+ of this file will always be a subset of its parent's
"cpuset.cpus.exclusive.effective" if its parent is not the root
cgroup. It will also be a subset of "cpuset.cpus.exclusive"
if it is set. If "cpuset.cpus.exclusive" is not set, it is
@@ -2625,6 +2642,15 @@ Miscellaneous controller provides 3 interface files. If two misc resources (res_
res_a 3
res_b 0
+ misc.peak
+ A read-only flat-keyed file shown in all cgroups. It shows the
+ historical maximum usage of the resources in the cgroup and its
+ children.::
+
+ $ cat misc.peak
+ res_a 10
+ res_b 8
+
misc.max
A read-write flat-keyed file shown in the non root cgroups. Allowed
maximum usage of the resources in the cgroup and its children.::
@@ -2654,6 +2680,11 @@ Miscellaneous controller provides 3 interface files. If two misc resources (res_
The number of times the cgroup's resource usage was
about to go over the max boundary.
+ misc.events.local
+ Similar to misc.events but the fields in the file are local to the
+ cgroup i.e. not hierarchical. The file modified event generated on
+ this file reflects only the local events.
+
Migration and Ownership
~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/Documentation/admin-guide/cifs/usage.rst b/Documentation/admin-guide/cifs/usage.rst
index aa8290a29dc8..fd4b56c0996f 100644
--- a/Documentation/admin-guide/cifs/usage.rst
+++ b/Documentation/admin-guide/cifs/usage.rst
@@ -723,40 +723,26 @@ Configuration pseudo-files:
======================= =======================================================
SecurityFlags Flags which control security negotiation and
also packet signing. Authentication (may/must)
- flags (e.g. for NTLM and/or NTLMv2) may be combined with
+ flags (e.g. for NTLMv2) may be combined with
the signing flags. Specifying two different password
hashing mechanisms (as "must use") on the other hand
does not make much sense. Default flags are::
- 0x07007
-
- (NTLM, NTLMv2 and packet signing allowed). The maximum
- allowable flags if you want to allow mounts to servers
- using weaker password hashes is 0x37037 (lanman,
- plaintext, ntlm, ntlmv2, signing allowed). Some
- SecurityFlags require the corresponding menuconfig
- options to be enabled. Enabling plaintext
- authentication currently requires also enabling
- lanman authentication in the security flags
- because the cifs module only supports sending
- laintext passwords using the older lanman dialect
- form of the session setup SMB. (e.g. for authentication
- using plain text passwords, set the SecurityFlags
- to 0x30030)::
+ 0x00C5
+
+ (NTLMv2 and packet signing allowed). Some SecurityFlags
+ may require enabling a corresponding menuconfig option.
may use packet signing 0x00001
must use packet signing 0x01001
- may use NTLM (most common password hash) 0x00002
- must use NTLM 0x02002
may use NTLMv2 0x00004
must use NTLMv2 0x04004
- may use Kerberos security 0x00008
- must use Kerberos 0x08008
- may use lanman (weak) password hash 0x00010
- must use lanman password hash 0x10010
- may use plaintext passwords 0x00020
- must use plaintext passwords 0x20020
- (reserved for future packet encryption) 0x00040
+ may use Kerberos security (krb5) 0x00008
+ must use Kerberos 0x08008
+ may use NTLMSSP 0x00080
+ must use NTLMSSP 0x80080
+ seal (packet encryption) 0x00040
+ must seal (not implemented yet) 0x40040
cifsFYI If set to non-zero value, additional debug information
will be logged to the system error log. This field
diff --git a/Documentation/admin-guide/gpio/gpio-virtuser.rst b/Documentation/admin-guide/gpio/gpio-virtuser.rst
new file mode 100644
index 000000000000..2aca70db9f3b
--- /dev/null
+++ b/Documentation/admin-guide/gpio/gpio-virtuser.rst
@@ -0,0 +1,177 @@
+.. SPDX-License-Identifier: GPL-2.0-only
+
+Virtual GPIO Consumer
+=====================
+
+The virtual GPIO Consumer module allows users to instantiate virtual devices
+that request GPIOs and then control their behavior over debugfs. Virtual
+consumer devices can be instantiated from device-tree or over configfs.
+
+A virtual consumer uses the driver-facing GPIO APIs and allows to cover it with
+automated tests driven by user-space. The GPIOs are requested using
+``gpiod_get_array()`` and so we support multiple GPIOs per connector ID.
+
+Creating GPIO consumers
+-----------------------
+
+The gpio-consumer module registers a configfs subsystem called
+``'gpio-virtuser'``. For details of the configfs filesystem, please refer to
+the configfs documentation.
+
+The user can create a hierarchy of configfs groups and items as well as modify
+values of exposed attributes. Once the consumer is instantiated, this hierarchy
+will be translated to appropriate device properties. The general structure is:
+
+**Group:** ``/config/gpio-virtuser``
+
+This is the top directory of the gpio-consumer configfs tree.
+
+**Group:** ``/config/gpio-consumer/example-name``
+
+**Attribute:** ``/config/gpio-consumer/example-name/live``
+
+**Attribute:** ``/config/gpio-consumer/example-name/dev_name``
+
+This is a directory representing a GPIO consumer device.
+
+The read-only ``dev_name`` attribute exposes the name of the device as it will
+appear in the system on the platform bus. This is useful for locating the
+associated debugfs directory under
+``/sys/kernel/debug/gpio-virtuser/$dev_name``.
+
+The ``'live'`` attribute allows to trigger the actual creation of the device
+once it's fully configured. The accepted values are: ``'1'`` to enable the
+virtual device and ``'0'`` to disable and tear it down.
+
+Creating GPIO lookup tables
+---------------------------
+
+Users can create a number of configfs groups under the device group:
+
+**Group:** ``/config/gpio-consumer/example-name/con_id``
+
+The ``'con_id'`` directory represents a single GPIO lookup and its value maps
+to the ``'con_id'`` argument of the ``gpiod_get()`` function. For example:
+``con_id`` == ``'reset'`` maps to the ``reset-gpios`` device property.
+
+Users can assign a number of GPIOs to each lookup. Each GPIO is a sub-directory
+with a user-defined name under the ``'con_id'`` group.
+
+**Attribute:** ``/config/gpio-consumer/example-name/con_id/0/key``
+
+**Attribute:** ``/config/gpio-consumer/example-name/con_id/0/offset``
+
+**Attribute:** ``/config/gpio-consumer/example-name/con_id/0/drive``
+
+**Attribute:** ``/config/gpio-consumer/example-name/con_id/0/pull``
+
+**Attribute:** ``/config/gpio-consumer/example-name/con_id/0/active_low``
+
+**Attribute:** ``/config/gpio-consumer/example-name/con_id/0/transitory``
+
+This is a group describing a single GPIO in the ``con_id-gpios`` property.
+
+For virtual consumers created using configfs we use machine lookup tables so
+this group can be considered as a mapping between the filesystem and the fields
+of a single entry in ``'struct gpiod_lookup'``.
+
+The ``'key'`` attribute represents either the name of the chip this GPIO
+belongs to or the GPIO line name. This depends on the value of the ``'offset'``
+attribute: if its value is >= 0, then ``'key'`` represents the label of the
+chip to lookup while ``'offset'`` represents the offset of the line in that
+chip. If ``'offset'`` is < 0, then ``'key'`` represents the name of the line.
+
+The remaining attributes map to the ``'flags'`` field of the GPIO lookup
+struct. The first two take string values as arguments:
+
+**``'drive'``:** ``'push-pull'``, ``'open-drain'``, ``'open-source'``
+**``'pull'``:** ``'pull-up'``, ``'pull-down'``, ``'pull-disabled'``, ``'as-is'``
+
+``'active_low'`` and ``'transitory'`` are boolean attributes.
+
+Activating GPIO consumers
+-------------------------
+
+Once the confiuration is complete, the ``'live'`` attribute must be set to 1 in
+order to instantiate the consumer. It can be set back to 0 to destroy the
+virtual device. The module will synchronously wait for the new simulated device
+to be successfully probed and if this doesn't happen, writing to ``'live'`` will
+result in an error.
+
+Device-tree
+-----------
+
+Virtual GPIO consumers can also be defined in device-tree. The compatible string
+must be: ``"gpio-virtuser"`` with at least one property following the
+standardized GPIO pattern.
+
+An example device-tree code defining a virtual GPIO consumer:
+
+.. code-block :: none
+
+ gpio-virt-consumer {
+ compatible = "gpio-virtuser";
+
+ foo-gpios = <&gpio0 5 GPIO_ACTIVE_LOW>, <&gpio1 2 0>;
+ bar-gpios = <&gpio0 6 0>;
+ };
+
+Controlling virtual GPIO consumers
+----------------------------------
+
+Once active, the device will export debugfs attributes for controlling GPIO
+arrays as well as each requested GPIO line separately. Let's consider the
+following device property: ``foo-gpios = <&gpio0 0 0>, <&gpio0 4 0>;``.
+
+The following debugfs attribute groups will be created:
+
+**Group:** ``/sys/kernel/debug/gpio-virtuser/$dev_name/gpiod:foo/``
+
+This is the group that will contain the attributes for the entire GPIO array.
+
+**Attribute:** ``/sys/kernel/debug/gpio-virtuser/$dev_name/gpiod:foo/values``
+
+**Attribute:** ``/sys/kernel/debug/gpio-virtuser/$dev_name/gpiod:foo/values_atomic``
+
+Both attributes allow to read and set arrays of GPIO values. User must pass
+exactly the number of values that the array contains in the form of a string
+containing zeroes and ones representing inactive and active GPIO states
+respectively. In this example: ``echo 11 > values``.
+
+The ``values_atomic`` attribute works the same as ``values`` but the kernel
+will execute the GPIO driver callbacks in interrupt context.
+
+**Group:** ``/sys/kernel/debug/gpio-virtuser/$dev_name/gpiod:foo:$index/``
+
+This is a group that represents a single GPIO with ``$index`` being its offset
+in the array.
+
+**Attribute:** ``/sys/kernel/debug/gpio-virtuser/$dev_name/gpiod:foo:$index/consumer``
+
+Allows to set and read the consumer label of the GPIO line.
+
+**Attribute:** ``/sys/kernel/debug/gpio-virtuser/$dev_name/gpiod:foo:$index/debounce``
+
+Allows to set and read the debounce period of the GPIO line.
+
+**Attribute:** ``/sys/kernel/debug/gpio-virtuser/$dev_name/gpiod:foo:$index/direction``
+
+**Attribute:** ``/sys/kernel/debug/gpio-virtuser/$dev_name/gpiod:foo:$index/direction_atomic``
+
+These two attributes allow to set the direction of the GPIO line. They accept
+"input" and "output" as values. The atomic variant executes the driver callback
+in interrupt context.
+
+**Attribute:** ``/sys/kernel/debug/gpio-virtuser/$dev_name/gpiod:foo:$index/interrupts``
+
+If the line is requested in input mode, writing ``1`` to this attribute will
+make the module listen for edge interrupts on the GPIO. Writing ``0`` disables
+the monitoring. Reading this attribute returns the current number of registered
+interrupts (both edges).
+
+**Attribute:** ``/sys/kernel/debug/gpio-virtuser/$dev_name/gpiod:foo:$index/value``
+
+**Attribute:** ``/sys/kernel/debug/gpio-virtuser/$dev_name/gpiod:foo:$index/value_atomic``
+
+Both attributes allow to read and set values of individual requested GPIO lines.
+They accept the following values: ``1`` and ``0``.
diff --git a/Documentation/admin-guide/gpio/index.rst b/Documentation/admin-guide/gpio/index.rst
index 460afd29617e..712f379731cb 100644
--- a/Documentation/admin-guide/gpio/index.rst
+++ b/Documentation/admin-guide/gpio/index.rst
@@ -10,6 +10,7 @@ GPIO
Character Device Userspace API <../../userspace-api/gpio/chardev>
gpio-aggregator
gpio-sim
+ gpio-virtuser
Obsolete APIs <obsolete>
.. only:: subproject and html
diff --git a/Documentation/admin-guide/hw-vuln/spectre.rst b/Documentation/admin-guide/hw-vuln/spectre.rst
index 25a04cda4c2c..132e0bc6007e 100644
--- a/Documentation/admin-guide/hw-vuln/spectre.rst
+++ b/Documentation/admin-guide/hw-vuln/spectre.rst
@@ -592,85 +592,19 @@ Spectre variant 2
Mitigation control on the kernel command line
---------------------------------------------
-Spectre variant 2 mitigation can be disabled or force enabled at the
-kernel command line.
+In general the kernel selects reasonable default mitigations for the
+current CPU.
- nospectre_v1
+Spectre default mitigations can be disabled or changed at the kernel
+command line with the following options:
- [X86,PPC] Disable mitigations for Spectre Variant 1
- (bounds check bypass). With this option data leaks are
- possible in the system.
+ - nospectre_v1
+ - nospectre_v2
+ - spectre_v2={option}
+ - spectre_v2_user={option}
+ - spectre_bhi={option}
- nospectre_v2
-
- [X86] Disable all mitigations for the Spectre variant 2
- (indirect branch prediction) vulnerability. System may
- allow data leaks with this option, which is equivalent
- to spectre_v2=off.
-
-
- spectre_v2=
-
- [X86] Control mitigation of Spectre variant 2
- (indirect branch speculation) vulnerability.
- The default operation protects the kernel from
- user space attacks.
-
- on
- unconditionally enable, implies
- spectre_v2_user=on
- off
- unconditionally disable, implies
- spectre_v2_user=off
- auto
- kernel detects whether your CPU model is
- vulnerable
-
- Selecting 'on' will, and 'auto' may, choose a
- mitigation method at run time according to the
- CPU, the available microcode, the setting of the
- CONFIG_MITIGATION_RETPOLINE configuration option,
- and the compiler with which the kernel was built.
-
- Selecting 'on' will also enable the mitigation
- against user space to user space task attacks.
-
- Selecting 'off' will disable both the kernel and
- the user space protections.
-
- Specific mitigations can also be selected manually:
-
- retpoline auto pick between generic,lfence
- retpoline,generic Retpolines
- retpoline,lfence LFENCE; indirect branch
- retpoline,amd alias for retpoline,lfence
- eibrs Enhanced/Auto IBRS
- eibrs,retpoline Enhanced/Auto IBRS + Retpolines
- eibrs,lfence Enhanced/Auto IBRS + LFENCE
- ibrs use IBRS to protect kernel
-
- Not specifying this option is equivalent to
- spectre_v2=auto.
-
- In general the kernel by default selects
- reasonable mitigations for the current CPU. To
- disable Spectre variant 2 mitigations, boot with
- spectre_v2=off. Spectre variant 1 mitigations
- cannot be disabled.
-
- spectre_bhi=
-
- [X86] Control mitigation of Branch History Injection
- (BHI) vulnerability. This setting affects the deployment
- of the HW BHI control and the SW BHB clearing sequence.
-
- on
- (default) Enable the HW or SW mitigation as
- needed.
- off
- Disable the mitigation.
-
-For spectre_v2_user see Documentation/admin-guide/kernel-parameters.txt
+For more details on the available options, refer to Documentation/admin-guide/kernel-parameters.txt
Mitigation selection guide
--------------------------
diff --git a/Documentation/admin-guide/index.rst b/Documentation/admin-guide/index.rst
index 32ea52f1d150..e85b1adf5908 100644
--- a/Documentation/admin-guide/index.rst
+++ b/Documentation/admin-guide/index.rst
@@ -121,7 +121,6 @@ configure specific aspects of kernel behavior to your liking.
parport
perf-security
pm/index
- pmf
pnp
rapidio
RAS/index
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 500cfa776225..740ca2bc2822 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -12,7 +12,7 @@
acpi= [HW,ACPI,X86,ARM64,RISCV64,EARLY]
Advanced Configuration and Power Interface
Format: { force | on | off | strict | noirq | rsdt |
- copy_dsdt }
+ copy_dsdt | nospcr }
force -- enable ACPI if default was off
on -- enable ACPI but allow fallback to DT [arm64,riscv64]
off -- disable ACPI if default was on
@@ -21,8 +21,12 @@
strictly ACPI specification compliant.
rsdt -- prefer RSDT over (default) XSDT
copy_dsdt -- copy DSDT to memory
- For ARM64 and RISCV64, ONLY "acpi=off", "acpi=on" or
- "acpi=force" are available
+ nospcr -- disable console in ACPI SPCR table as
+ default _serial_ console on ARM64
+ For ARM64, ONLY "acpi=off", "acpi=on", "acpi=force" or
+ "acpi=nospcr" are available
+ For RISCV64, ONLY "acpi=off", "acpi=on" or "acpi=force"
+ are available
See also Documentation/power/runtime_pm.rst, pci=noacpi
@@ -788,25 +792,6 @@
Documentation/networking/netconsole.rst for an
alternative.
- <DEVNAME>:<n>.<n>[,options]
- Use the specified serial port on the serial core bus.
- The addressing uses DEVNAME of the physical serial port
- device, followed by the serial core controller instance,
- and the serial port instance. The options are the same
- as documented for the ttyS addressing above.
-
- The mapping of the serial ports to the tty instances
- can be viewed with:
-
- $ ls -d /sys/bus/serial-base/devices/*:*.*/tty/*
- /sys/bus/serial-base/devices/00:04:0.0/tty/ttyS0
-
- In the above example, the console can be addressed with
- console=00:04:0.0. Note that a console addressed this
- way will only get added when the related device driver
- is ready. The use of an earlycon parameter in addition to
- the console may be desired for console output early on.
-
uart[8250],io,<addr>[,options]
uart[8250],mmio,<addr>[,options]
uart[8250],mmio16,<addr>[,options]
@@ -1450,27 +1435,6 @@
you are really sure that your UEFI does sane gc and
fulfills the spec otherwise your board may brick.
- efi_fake_mem= nn[KMG]@ss[KMG]:aa[,nn[KMG]@ss[KMG]:aa,..] [EFI,X86,EARLY]
- Add arbitrary attribute to specific memory range by
- updating original EFI memory map.
- Region of memory which aa attribute is added to is
- from ss to ss+nn.
-
- If efi_fake_mem=2G@4G:0x10000,2G@0x10a0000000:0x10000
- is specified, EFI_MEMORY_MORE_RELIABLE(0x10000)
- attribute is added to range 0x100000000-0x180000000 and
- 0x10a0000000-0x1120000000.
-
- If efi_fake_mem=8G@9G:0x40000 is specified, the
- EFI_MEMORY_SP(0x40000) attribute is added to
- range 0x240000000-0x43fffffff.
-
- Using this parameter you can do debugging of EFI memmap
- related features. For example, you can do debugging of
- Address Range Mirroring feature even if your box
- doesn't support it, or mark specific memory as
- "soft reserved".
-
efivar_ssdt= [EFI; X86] Name of an EFI variable that contains an SSDT
that is to be dynamically loaded by Linux. If there are
multiple variables with the same name but with different
@@ -1921,6 +1885,28 @@
Format:
<bus_id>,<clkrate>
+ i2c_touchscreen_props= [HW,ACPI,X86]
+ Set device-properties for ACPI-enumerated I2C-attached
+ touchscreen, to e.g. fix coordinates of upside-down
+ mounted touchscreens. If you need this option please
+ submit a drivers/platform/x86/touchscreen_dmi.c patch
+ adding a DMI quirk for this.
+
+ Format:
+ <ACPI_HW_ID>:<prop_name>=<val>[:prop_name=val][:...]
+ Where <val> is one of:
+ Omit "=<val>" entirely Set a boolean device-property
+ Unsigned number Set a u32 device-property
+ Anything else Set a string device-property
+
+ Examples (split over multiple lines):
+ i2c_touchscreen_props=GDIX1001:touchscreen-inverted-x:
+ touchscreen-inverted-y
+
+ i2c_touchscreen_props=MSSL1680:touchscreen-size-x=1920:
+ touchscreen-size-y=1080:touchscreen-inverted-y:
+ firmware-name=gsl1680-vendor-model.fw:silead,home-button
+
i8042.debug [HW] Toggle i8042 debug mode
i8042.unmask_kbd_data
[HW] Enable printing of interrupt data from the KBD port
@@ -2170,12 +2156,6 @@
Format: 0 | 1
Default set by CONFIG_INIT_ON_FREE_DEFAULT_ON.
- init_mlocked_on_free= [MM] Fill freed userspace memory with zeroes if
- it was mlock'ed and not explicitly munlock'ed
- afterwards.
- Format: 0 | 1
- Default set by CONFIG_INIT_MLOCKED_ON_FREE_DEFAULT_ON
-
init_pkru= [X86] Specify the default memory protection keys rights
register contents for all processes. 0x55555554 by
default (disallow access to all but pkey 0). Can
@@ -3407,10 +3387,6 @@
deep - Suspend-To-RAM or equivalent (if supported)
See Documentation/admin-guide/pm/sleep-states.rst.
- mfgpt_irq= [IA-32] Specify the IRQ to use for the
- Multi-Function General Purpose Timers on AMD Geode
- platforms.
-
mfgptfix [X86-32] Fix MFGPT timers on AMD Geode platforms when
the BIOS has incorrectly applied a workaround. TinyBIOS
version 0.98 is known to be affected, 0.99 fixes the
@@ -4752,7 +4728,9 @@
none - Limited to cond_resched() calls
voluntary - Limited to cond_resched() and might_sleep() calls
full - Any section that isn't explicitly preempt disabled
- can be preempted anytime.
+ can be preempted anytime. Tasks will also yield
+ contended spinlocks (if the critical section isn't
+ explicitly preempt disabled beyond the lock itself).
print-fatal-signals=
[KNL] debug: print fatal signals
@@ -5018,6 +4996,14 @@
the ->nocb_bypass queue. The definition of "too
many" is supplied by this kernel boot parameter.
+ rcutree.nohz_full_patience_delay= [KNL]
+ On callback-offloaded (rcu_nocbs) CPUs, avoid
+ disturbing RCU unless the grace period has
+ reached the specified age in milliseconds.
+ Defaults to zero. Large values will be capped
+ at five seconds. All values will be rounded down
+ to the nearest value representable by jiffies.
+
rcutree.qhimark= [KNL]
Set threshold of queued RCU callbacks beyond which
batch limiting is disabled.
@@ -6120,9 +6106,15 @@
deployment of the HW BHI control and the SW BHB
clearing sequence.
- on - (default) Enable the HW or SW mitigation
- as needed.
- off - Disable the mitigation.
+ on - (default) Enable the HW or SW mitigation as
+ needed. This protects the kernel from
+ both syscalls and VMs.
+ vmexit - On systems which don't have the HW mitigation
+ available, enable the SW mitigation on vmexit
+ ONLY. On such systems, the host kernel is
+ protected from VM-originated BHI attacks, but
+ may still be vulnerable to syscall attacks.
+ off - Disable the mitigation.
spectre_v2= [X86,EARLY] Control mitigation of Spectre variant 2
(indirect branch speculation) vulnerability.
@@ -7430,17 +7422,18 @@
Crash from Xen panic notifier, without executing late
panic() code such as dumping handler.
+ xen_mc_debug [X86,XEN,EARLY]
+ Enable multicall debugging when running as a Xen PV guest.
+ Enabling this feature will reduce performance a little
+ bit, so it should only be enabled for obtaining extended
+ debug data in case of multicall errors.
+
xen_msr_safe= [X86,XEN,EARLY]
Format: <bool>
Select whether to always use non-faulting (safe) MSR
access functions when running as Xen PV guest. The
default value is controlled by CONFIG_XEN_PV_MSR_SAFE.
- xen_nopvspin [X86,XEN,EARLY]
- Disables the qspinlock slowpath using Xen PV optimizations.
- This parameter is obsoleted by "nopvspin" parameter, which
- has equivalent effect for XEN platform.
-
xen_nopv [X86]
Disables the PV optimizations forcing the HVM guest to
run as generic HVM guest with no PV drivers.
diff --git a/Documentation/admin-guide/mm/transhuge.rst b/Documentation/admin-guide/mm/transhuge.rst
index 076443cc10a6..d414d3f5592a 100644
--- a/Documentation/admin-guide/mm/transhuge.rst
+++ b/Documentation/admin-guide/mm/transhuge.rst
@@ -467,11 +467,11 @@ anon_fault_fallback_charge
instead falls back to using huge pages with lower orders or
small pages even though the allocation was successful.
-anon_swpout
+swpout
is incremented every time a huge page is swapped out in one
piece without splitting.
-anon_swpout_fallback
+swpout_fallback
is incremented if a huge page has to be split before swapout.
Usually because failed to allocate some continuous swap space
for the huge page.
diff --git a/Documentation/admin-guide/pm/amd-pstate.rst b/Documentation/admin-guide/pm/amd-pstate.rst
index 1e0d101b020a..d0324d44f548 100644
--- a/Documentation/admin-guide/pm/amd-pstate.rst
+++ b/Documentation/admin-guide/pm/amd-pstate.rst
@@ -281,6 +281,22 @@ integer values defined between 0 to 255 when EPP feature is enabled by platform
firmware, if EPP feature is disabled, driver will ignore the written value
This attribute is read-write.
+``boost``
+The `boost` sysfs attribute provides control over the CPU core
+performance boost, allowing users to manage the maximum frequency limitation
+of the CPU. This attribute can be used to enable or disable the boost feature
+on individual CPUs.
+
+When the boost feature is enabled, the CPU can dynamically increase its frequency
+beyond the base frequency, providing enhanced performance for demanding workloads.
+On the other hand, disabling the boost feature restricts the CPU to operate at the
+base frequency, which may be desirable in certain scenarios to prioritize power
+efficiency or manage temperature.
+
+To manipulate the `boost` attribute, users can write a value of `0` to disable the
+boost or `1` to enable it, for the respective CPU using the sysfs path
+`/sys/devices/system/cpu/cpuX/cpufreq/boost`, where `X` represents the CPU number.
+
Other performance and frequency values can be read back from
``/sys/devices/system/cpu/cpuX/acpi_cppc/``, see :ref:`cppc_sysfs`.
@@ -406,7 +422,7 @@ control its functionality at the system level. They are located in the
``/sys/devices/system/cpu/amd_pstate/`` directory and affect all CPUs.
``status``
- Operation mode of the driver: "active", "passive" or "disable".
+ Operation mode of the driver: "active", "passive", "guided" or "disable".
"active"
The driver is functional and in the ``active mode``
diff --git a/Documentation/admin-guide/pm/cpufreq.rst b/Documentation/admin-guide/pm/cpufreq.rst
index 6adb7988e0eb..fe1be4ad88cb 100644
--- a/Documentation/admin-guide/pm/cpufreq.rst
+++ b/Documentation/admin-guide/pm/cpufreq.rst
@@ -267,6 +267,10 @@ are the following:
``related_cpus``
List of all (online and offline) CPUs belonging to this policy.
+``scaling_available_frequencies``
+ List of available frequencies of the CPUs belonging to this policy
+ (in kHz).
+
``scaling_available_governors``
List of ``CPUFreq`` scaling governors present in the kernel that can
be attached to this policy or (if the |intel_pstate| scaling driver is
diff --git a/Documentation/admin-guide/pmf.rst b/Documentation/admin-guide/pmf.rst
deleted file mode 100644
index 9ee729ffc19b..000000000000
--- a/Documentation/admin-guide/pmf.rst
+++ /dev/null
@@ -1,24 +0,0 @@
-.. SPDX-License-Identifier: GPL-2.0
-
-Set udev rules for PMF Smart PC Builder
----------------------------------------
-
-AMD PMF(Platform Management Framework) Smart PC Solution builder has to set the system states
-like S0i3, Screen lock, hibernate etc, based on the output actions provided by the PMF
-TA (Trusted Application).
-
-In order for this to work the PMF driver generates a uevent for userspace to react to. Below are
-sample udev rules that can facilitate this experience when a machine has PMF Smart PC solution builder
-enabled.
-
-Please add the following line(s) to
-``/etc/udev/rules.d/99-local.rules``::
-
- DRIVERS=="amd-pmf", ACTION=="change", ENV{EVENT_ID}=="0", RUN+="/usr/bin/systemctl suspend"
- DRIVERS=="amd-pmf", ACTION=="change", ENV{EVENT_ID}=="1", RUN+="/usr/bin/systemctl hibernate"
- DRIVERS=="amd-pmf", ACTION=="change", ENV{EVENT_ID}=="2", RUN+="/bin/loginctl lock-sessions"
-
-EVENT_ID values:
-0= Put the system to S0i3/S2Idle
-1= Put the system to hibernate
-2= Lock the screen
diff --git a/Documentation/arch/arm64/cpu-hotplug.rst b/Documentation/arch/arm64/cpu-hotplug.rst
new file mode 100644
index 000000000000..76ba8d932c72
--- /dev/null
+++ b/Documentation/arch/arm64/cpu-hotplug.rst
@@ -0,0 +1,79 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. _cpuhp_index:
+
+====================
+CPU Hotplug and ACPI
+====================
+
+CPU hotplug in the arm64 world is commonly used to describe the kernel taking
+CPUs online/offline using PSCI. This document is about ACPI firmware allowing
+CPUs that were not available during boot to be added to the system later.
+
+``possible`` and ``present`` refer to the state of the CPU as seen by linux.
+
+
+CPU Hotplug on physical systems - CPUs not present at boot
+----------------------------------------------------------
+
+Physical systems need to mark a CPU that is ``possible`` but not ``present`` as
+being ``present``. An example would be a dual socket machine, where the package
+in one of the sockets can be replaced while the system is running.
+
+This is not supported.
+
+In the arm64 world CPUs are not a single device but a slice of the system.
+There are no systems that support the physical addition (or removal) of CPUs
+while the system is running, and ACPI is not able to sufficiently describe
+them.
+
+e.g. New CPUs come with new caches, but the platform's cache toplogy is
+described in a static table, the PPTT. How caches are shared between CPUs is
+not discoverable, and must be described by firmware.
+
+e.g. The GIC redistributor for each CPU must be accessed by the driver during
+boot to discover the system wide supported features. ACPI's MADT GICC
+structures can describe a redistributor associated with a disabled CPU, but
+can't describe whether the redistributor is accessible, only that it is not
+'always on'.
+
+arm64's ACPI tables assume that everything described is ``present``.
+
+
+CPU Hotplug on virtual systems - CPUs not enabled at boot
+---------------------------------------------------------
+
+Virtual systems have the advantage that all the properties the system will
+ever have can be described at boot. There are no power-domain considerations
+as such devices are emulated.
+
+CPU Hotplug on virtual systems is supported. It is distinct from physical
+CPU Hotplug as all resources are described as ``present``, but CPUs may be
+marked as disabled by firmware. Only the CPU's online/offline behaviour is
+influenced by firmware. An example is where a virtual machine boots with a
+single CPU, and additional CPUs are added once a cloud orchestrator deploys
+the workload.
+
+For a virtual machine, the VMM (e.g. Qemu) plays the part of firmware.
+
+Virtual hotplug is implemented as a firmware policy affecting which CPUs can be
+brought online. Firmware can enforce its policy via PSCI's return codes. e.g.
+``DENIED``.
+
+The ACPI tables must describe all the resources of the virtual machine. CPUs
+that firmware wishes to disable either from boot (or later) should not be
+``enabled`` in the MADT GICC structures, but should have the ``online capable``
+bit set, to indicate they can be enabled later. The boot CPU must be marked as
+``enabled``. The 'always on' GICR structure must be used to describe the
+redistributors.
+
+CPUs described as ``online capable`` but not ``enabled`` can be set to enabled
+by the DSDT's Processor object's _STA method. On virtual systems the _STA method
+must always report the CPU as ``present``. Changes to the firmware policy can
+be notified to the OS via device-check or eject-request.
+
+CPUs described as ``enabled`` in the static table, should not have their _STA
+modified dynamically by firmware. Soft-restart features such as kexec will
+re-read the static properties of the system from these static tables, and
+may malfunction if these no longer describe the running system. Linux will
+re-discover the dynamic properties of the system from the _STA method later
+during boot.
diff --git a/Documentation/arch/arm64/index.rst b/Documentation/arch/arm64/index.rst
index d08e924204bf..78544de0a8a9 100644
--- a/Documentation/arch/arm64/index.rst
+++ b/Documentation/arch/arm64/index.rst
@@ -13,6 +13,7 @@ ARM64 Architecture
asymmetric-32bit
booting
cpu-feature-registers
+ cpu-hotplug
elf_hwcaps
hugetlbpage
kdump
diff --git a/Documentation/arch/arm64/memory.rst b/Documentation/arch/arm64/memory.rst
index 55a55f30eed8..8a658984b8bb 100644
--- a/Documentation/arch/arm64/memory.rst
+++ b/Documentation/arch/arm64/memory.rst
@@ -18,12 +18,10 @@ ARMv8.2 adds optional support for Large Virtual Address space. This is
only available when running with a 64KB page size and expands the
number of descriptors in the first level of translation.
-User addresses have bits 63:48 set to 0 while the kernel addresses have
-the same bits set to 1. TTBRx selection is given by bit 63 of the
-virtual address. The swapper_pg_dir contains only kernel (global)
-mappings while the user pgd contains only user (non-global) mappings.
-The swapper_pg_dir address is written to TTBR1 and never written to
-TTBR0.
+TTBRx selection is given by bit 55 of the virtual address. The
+swapper_pg_dir contains only kernel (global) mappings while the user pgd
+contains only user (non-global) mappings. The swapper_pg_dir address is
+written to TTBR1 and never written to TTBR0.
AArch64 Linux memory layout with 4KB pages + 4 levels (48-bit)::
@@ -65,14 +63,14 @@ Translation table lookup with 4KB pages::
+--------+--------+--------+--------+--------+--------+--------+--------+
|63 56|55 48|47 40|39 32|31 24|23 16|15 8|7 0|
+--------+--------+--------+--------+--------+--------+--------+--------+
- | | | | | |
- | | | | | v
- | | | | | [11:0] in-page offset
- | | | | +-> [20:12] L3 index
- | | | +-----------> [29:21] L2 index
- | | +---------------------> [38:30] L1 index
- | +-------------------------------> [47:39] L0 index
- +-------------------------------------------------> [63] TTBR0/1
+ | | | | | |
+ | | | | | v
+ | | | | | [11:0] in-page offset
+ | | | | +-> [20:12] L3 index
+ | | | +-----------> [29:21] L2 index
+ | | +---------------------> [38:30] L1 index
+ | +-------------------------------> [47:39] L0 index
+ +----------------------------------------> [55] TTBR0/1
Translation table lookup with 64KB pages::
@@ -80,14 +78,14 @@ Translation table lookup with 64KB pages::
+--------+--------+--------+--------+--------+--------+--------+--------+
|63 56|55 48|47 40|39 32|31 24|23 16|15 8|7 0|
+--------+--------+--------+--------+--------+--------+--------+--------+
- | | | | |
- | | | | v
- | | | | [15:0] in-page offset
- | | | +----------> [28:16] L3 index
- | | +--------------------------> [41:29] L2 index
- | +-------------------------------> [47:42] L1 index (48-bit)
- | [51:42] L1 index (52-bit)
- +-------------------------------------------------> [63] TTBR0/1
+ | | | | |
+ | | | | v
+ | | | | [15:0] in-page offset
+ | | | +----------> [28:16] L3 index
+ | | +--------------------------> [41:29] L2 index
+ | +-------------------------------> [47:42] L1 index (48-bit)
+ | [51:42] L1 index (52-bit)
+ +----------------------------------------> [55] TTBR0/1
When using KVM without the Virtualization Host Extensions, the
diff --git a/Documentation/arch/arm64/silicon-errata.rst b/Documentation/arch/arm64/silicon-errata.rst
index eb8af8032c31..bb83c5d8c675 100644
--- a/Documentation/arch/arm64/silicon-errata.rst
+++ b/Documentation/arch/arm64/silicon-errata.rst
@@ -132,16 +132,26 @@ stable kernels.
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A710 | #2224489 | ARM64_ERRATUM_2224489 |
+----------------+-----------------+-----------------+-----------------------------+
+| ARM | Cortex-A710 | #3324338 | ARM64_ERRATUM_3194386 |
++----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A715 | #2645198 | ARM64_ERRATUM_2645198 |
+----------------+-----------------+-----------------+-----------------------------+
+| ARM | Cortex-A720 | #3456091 | ARM64_ERRATUM_3194386 |
++----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-X1 | #1502854 | N/A |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-X2 | #2119858 | ARM64_ERRATUM_2119858 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-X2 | #2224489 | ARM64_ERRATUM_2224489 |
+----------------+-----------------+-----------------+-----------------------------+
+| ARM | Cortex-X2 | #3324338 | ARM64_ERRATUM_3194386 |
++----------------+-----------------+-----------------+-----------------------------+
+| ARM | Cortex-X3 | #3324335 | ARM64_ERRATUM_3194386 |
++----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-X4 | #3194386 | ARM64_ERRATUM_3194386 |
+----------------+-----------------+-----------------+-----------------------------+
+| ARM | Cortex-X925 | #3324334 | ARM64_ERRATUM_3194386 |
++----------------+-----------------+-----------------+-----------------------------+
| ARM | Neoverse-N1 | #1188873,1418040| ARM64_ERRATUM_1418040 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Neoverse-N1 | #1349291 | N/A |
@@ -156,9 +166,13 @@ stable kernels.
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Neoverse-N2 | #2253138 | ARM64_ERRATUM_2253138 |
+----------------+-----------------+-----------------+-----------------------------+
+| ARM | Neoverse-N2 | #3324339 | ARM64_ERRATUM_3194386 |
++----------------+-----------------+-----------------+-----------------------------+
| ARM | Neoverse-V1 | #1619801 | N/A |
+----------------+-----------------+-----------------+-----------------------------+
-| ARM | Neoverse-V3 | #3312417 | ARM64_ERRATUM_3312417 |
+| ARM | Neoverse-V2 | #3324336 | ARM64_ERRATUM_3194386 |
++----------------+-----------------+-----------------+-----------------------------+
+| ARM | Neoverse-V3 | #3312417 | ARM64_ERRATUM_3194386 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | MMU-500 | #841119,826419 | N/A |
+----------------+-----------------+-----------------+-----------------------------+
diff --git a/Documentation/arch/riscv/cmodx.rst b/Documentation/arch/riscv/cmodx.rst
index 1c0ca06b6c97..8c48bcff3df9 100644
--- a/Documentation/arch/riscv/cmodx.rst
+++ b/Documentation/arch/riscv/cmodx.rst
@@ -62,10 +62,10 @@ cmodx.c::
printf("Value before cmodx: %d\n", value);
// Call prctl before first fence.i is called inside modify_instruction
- prctl(PR_RISCV_SET_ICACHE_FLUSH_CTX_ON, PR_RISCV_CTX_SW_FENCEI, PR_RISCV_SCOPE_PER_PROCESS);
+ prctl(PR_RISCV_SET_ICACHE_FLUSH_CTX, PR_RISCV_CTX_SW_FENCEI_ON, PR_RISCV_SCOPE_PER_PROCESS);
modify_instruction();
// Call prctl after final fence.i is called in process
- prctl(PR_RISCV_SET_ICACHE_FLUSH_CTX_OFF, PR_RISCV_CTX_SW_FENCEI, PR_RISCV_SCOPE_PER_PROCESS);
+ prctl(PR_RISCV_SET_ICACHE_FLUSH_CTX, PR_RISCV_CTX_SW_FENCEI_OFF, PR_RISCV_SCOPE_PER_PROCESS);
value = get_value();
printf("Value after cmodx: %d\n", value);
diff --git a/Documentation/arch/riscv/uabi.rst b/Documentation/arch/riscv/uabi.rst
index 54d199dce78b..2b420bab0527 100644
--- a/Documentation/arch/riscv/uabi.rst
+++ b/Documentation/arch/riscv/uabi.rst
@@ -65,4 +65,6 @@ the extension, or may have deliberately removed it from the listing.
Misaligned accesses
-------------------
-Misaligned accesses are supported in userspace, but they may perform poorly.
+Misaligned scalar accesses are supported in userspace, but they may perform
+poorly. Misaligned vector accesses are only supported if the Zicclsm extension
+is supported.
diff --git a/Documentation/arch/x86/amd-memory-encryption.rst b/Documentation/arch/x86/amd-memory-encryption.rst
index 414bc7402ae7..6df3264f23b9 100644
--- a/Documentation/arch/x86/amd-memory-encryption.rst
+++ b/Documentation/arch/x86/amd-memory-encryption.rst
@@ -130,4 +130,31 @@ SNP feature support.
More details in AMD64 APM[1] Vol 2: 15.34.10 SEV_STATUS MSR
-[1] https://www.amd.com/content/dam/amd/en/documents/processor-tech-docs/programmer-references/24593.pdf
+Secure VM Service Module (SVSM)
+===============================
+SNP provides a feature called Virtual Machine Privilege Levels (VMPL) which
+defines four privilege levels at which guest software can run. The most
+privileged level is 0 and numerically higher numbers have lesser privileges.
+More details in the AMD64 APM Vol 2, section "15.35.7 Virtual Machine
+Privilege Levels", docID: 24593.
+
+When using that feature, different services can run at different protection
+levels, apart from the guest OS but still within the secure SNP environment.
+They can provide services to the guest, like a vTPM, for example.
+
+When a guest is not running at VMPL0, it needs to communicate with the software
+running at VMPL0 to perform privileged operations or to interact with secure
+services. An example fur such a privileged operation is PVALIDATE which is
+*required* to be executed at VMPL0.
+
+In this scenario, the software running at VMPL0 is usually called a Secure VM
+Service Module (SVSM). Discovery of an SVSM and the API used to communicate
+with it is documented in "Secure VM Service Module for SEV-SNP Guests", docID:
+58019.
+
+(Latest versions of the above-mentioned documents can be found by using
+a search engine like duckduckgo.com and typing in:
+
+ site:amd.com "Secure VM Service Module for SEV-SNP Guests", docID: 58019
+
+for example.)
diff --git a/Documentation/arch/x86/resctrl.rst b/Documentation/arch/x86/resctrl.rst
index 627e23869bca..a824affd741d 100644
--- a/Documentation/arch/x86/resctrl.rst
+++ b/Documentation/arch/x86/resctrl.rst
@@ -375,6 +375,10 @@ When monitoring is enabled all MON groups will also contain:
all tasks in the group. In CTRL_MON groups these files provide
the sum for all tasks in the CTRL_MON group and all tasks in
MON groups. Please see example section for more details on usage.
+ On systems with Sub-NUMA Cluster (SNC) enabled there are extra
+ directories for each node (located within the "mon_L3_XX" directory
+ for the L3 cache they occupy). These are named "mon_sub_L3_YY"
+ where "YY" is the node number.
"mon_hw_id":
Available only with debug option. The identifier used by hardware
@@ -484,6 +488,29 @@ if non-contiguous 1s value is supported. On a system with a 20-bit mask
each bit represents 5% of the capacity of the cache. You could partition
the cache into four equal parts with masks: 0x1f, 0x3e0, 0x7c00, 0xf8000.
+Notes on Sub-NUMA Cluster mode
+==============================
+When SNC mode is enabled, Linux may load balance tasks between Sub-NUMA
+nodes much more readily than between regular NUMA nodes since the CPUs
+on Sub-NUMA nodes share the same L3 cache and the system may report
+the NUMA distance between Sub-NUMA nodes with a lower value than used
+for regular NUMA nodes.
+
+The top-level monitoring files in each "mon_L3_XX" directory provide
+the sum of data across all SNC nodes sharing an L3 cache instance.
+Users who bind tasks to the CPUs of a specific Sub-NUMA node can read
+the "llc_occupancy", "mbm_total_bytes", and "mbm_local_bytes" in the
+"mon_sub_L3_YY" directories to get node local data.
+
+Memory bandwidth allocation is still performed at the L3 cache
+level. I.e. throttling controls are applied to all SNC nodes.
+
+L3 cache allocation bitmaps also apply to all SNC nodes. But note that
+the amount of L3 cache represented by each bit is divided by the number
+of SNC nodes per L3 cache. E.g. with a 100MB cache on a system with 10-bit
+allocation masks each bit normally represents 10MB. With SNC mode enabled
+with two SNC nodes per L3 cache, each bit only represents 5MB.
+
Memory bandwidth Allocation and monitoring
==========================================
diff --git a/Documentation/block/data-integrity.rst b/Documentation/block/data-integrity.rst
index 6a760c0eb192..99905e880a0e 100644
--- a/Documentation/block/data-integrity.rst
+++ b/Documentation/block/data-integrity.rst
@@ -153,18 +153,11 @@ bio_free() will automatically free the bip.
4.2 Block Device
----------------
-Because the format of the protection data is tied to the physical
-disk, each block device has been extended with a block integrity
-profile (struct blk_integrity). This optional profile is registered
-with the block layer using blk_integrity_register().
-
-The profile contains callback functions for generating and verifying
-the protection data, as well as getting and setting application tags.
-The profile also contains a few constants to aid in completing,
-merging and splitting the integrity metadata.
+Block devices can set up the integrity information in the integrity
+sub-struture of the queue_limits structure.
Layered block devices will need to pick a profile that's appropriate
-for all subdevices. blk_integrity_compare() can help with that. DM
+for all subdevices. queue_limits_stack_integrity() can help with that. DM
and MD linear, RAID0 and RAID1 are currently supported. RAID4/5/6
will require extra work due to the application tag.
@@ -250,42 +243,6 @@ will require extra work due to the application tag.
integrity upon completion.
-5.4 Registering A Block Device As Capable Of Exchanging Integrity Metadata
---------------------------------------------------------------------------
-
- To enable integrity exchange on a block device the gendisk must be
- registered as capable:
-
- `int blk_integrity_register(gendisk, blk_integrity);`
-
- The blk_integrity struct is a template and should contain the
- following::
-
- static struct blk_integrity my_profile = {
- .name = "STANDARDSBODY-TYPE-VARIANT-CSUM",
- .generate_fn = my_generate_fn,
- .verify_fn = my_verify_fn,
- .tuple_size = sizeof(struct my_tuple_size),
- .tag_size = <tag bytes per hw sector>,
- };
-
- 'name' is a text string which will be visible in sysfs. This is
- part of the userland API so chose it carefully and never change
- it. The format is standards body-type-variant.
- E.g. T10-DIF-TYPE1-IP or T13-EPP-0-CRC.
-
- 'generate_fn' generates appropriate integrity metadata (for WRITE).
-
- 'verify_fn' verifies that the data buffer matches the integrity
- metadata.
-
- 'tuple_size' must be set to match the size of the integrity
- metadata per sector. I.e. 8 for DIF and EPP.
-
- 'tag_size' must be set to identify how many bytes of tag space
- are available per hardware sector. For DIF this is either 2 or
- 0 depending on the value of the Control Mode Page ATO bit.
-
----------------------------------------------------------------------
2007-12-24 Martin K. Petersen <martin.petersen@oracle.com>
diff --git a/Documentation/block/writeback_cache_control.rst b/Documentation/block/writeback_cache_control.rst
index b208488d0aae..c3707d071780 100644
--- a/Documentation/block/writeback_cache_control.rst
+++ b/Documentation/block/writeback_cache_control.rst
@@ -46,41 +46,50 @@ worry if the underlying devices need any explicit cache flushing and how
the Forced Unit Access is implemented. The REQ_PREFLUSH and REQ_FUA flags
may both be set on a single bio.
+Feature settings for block drivers
+----------------------------------
-Implementation details for bio based block drivers
---------------------------------------------------------------
+For devices that do not support volatile write caches there is no driver
+support required, the block layer completes empty REQ_PREFLUSH requests before
+entering the driver and strips off the REQ_PREFLUSH and REQ_FUA bits from
+requests that have a payload.
-These drivers will always see the REQ_PREFLUSH and REQ_FUA bits as they sit
-directly below the submit_bio interface. For remapping drivers the REQ_FUA
-bits need to be propagated to underlying devices, and a global flush needs
-to be implemented for bios with the REQ_PREFLUSH bit set. For real device
-drivers that do not have a volatile cache the REQ_PREFLUSH and REQ_FUA bits
-on non-empty bios can simply be ignored, and REQ_PREFLUSH requests without
-data can be completed successfully without doing any work. Drivers for
-devices with volatile caches need to implement the support for these
-flags themselves without any help from the block layer.
+For devices with volatile write caches the driver needs to tell the block layer
+that it supports flushing caches by setting the
+ BLK_FEAT_WRITE_CACHE
-Implementation details for request_fn based block drivers
----------------------------------------------------------
+flag in the queue_limits feature field. For devices that also support the FUA
+bit the block layer needs to be told to pass on the REQ_FUA bit by also setting
+the
-For devices that do not support volatile write caches there is no driver
-support required, the block layer completes empty REQ_PREFLUSH requests before
-entering the driver and strips off the REQ_PREFLUSH and REQ_FUA bits from
-requests that have a payload. For devices with volatile write caches the
-driver needs to tell the block layer that it supports flushing caches by
-doing::
+ BLK_FEAT_FUA
+
+flag in the features field of the queue_limits structure.
+
+Implementation details for bio based block drivers
+--------------------------------------------------
+
+For bio based drivers the REQ_PREFLUSH and REQ_FUA bit are simply passed on to
+the driver if the driver sets the BLK_FEAT_WRITE_CACHE flag and the driver
+needs to handle them.
+
+*NOTE*: The REQ_FUA bit also gets passed on when the BLK_FEAT_FUA flags is
+_not_ set. Any bio based driver that sets BLK_FEAT_WRITE_CACHE also needs to
+handle REQ_FUA.
- blk_queue_write_cache(sdkp->disk->queue, true, false);
+For remapping drivers the REQ_FUA bits need to be propagated to underlying
+devices, and a global flush needs to be implemented for bios with the
+REQ_PREFLUSH bit set.
-and handle empty REQ_OP_FLUSH requests in its prep_fn/request_fn. Note that
-REQ_PREFLUSH requests with a payload are automatically turned into a sequence
-of an empty REQ_OP_FLUSH request followed by the actual write by the block
-layer. For devices that also support the FUA bit the block layer needs
-to be told to pass through the REQ_FUA bit using::
+Implementation details for blk-mq drivers
+-----------------------------------------
- blk_queue_write_cache(sdkp->disk->queue, true, true);
+When the BLK_FEAT_WRITE_CACHE flag is set, REQ_OP_WRITE | REQ_PREFLUSH requests
+with a payload are automatically turned into a sequence of a REQ_OP_FLUSH
+request followed by the actual write by the block layer.
-and the driver must handle write requests that have the REQ_FUA bit set
-in prep_fn/request_fn. If the FUA bit is not natively supported the block
-layer turns it into an empty REQ_OP_FLUSH request after the actual write.
+When the BLK_FEAT_FUA flags is set, the REQ_FUA bit is simply passed on for the
+REQ_OP_WRITE request, else a REQ_OP_FLUSH request is sent by the block layer
+after the completion of the write request for bio submissions with the REQ_FUA
+bit set.
diff --git a/Documentation/bpf/libbpf/libbpf_overview.rst b/Documentation/bpf/libbpf/libbpf_overview.rst
index f36a2d4ffea2..f4d22f0c62b0 100644
--- a/Documentation/bpf/libbpf/libbpf_overview.rst
+++ b/Documentation/bpf/libbpf/libbpf_overview.rst
@@ -219,6 +219,14 @@ compilation and skeleton generation. Using Libbpf-rs will make building user
space part of the BPF application easier. Note that the BPF program themselves
must still be written in plain C.
+libbpf logging
+==============
+
+By default, libbpf logs informational and warning messages to stderr. The
+verbosity of these messages can be controlled by setting the environment
+variable LIBBPF_LOG_LEVEL to either warn, info, or debug. A custom log
+callback can be set using ``libbpf_set_print()``.
+
Additional Documentation
========================
diff --git a/Documentation/bpf/standardization/abi.rst b/Documentation/bpf/standardization/abi.rst
index 0c2e10eeb89a..41514137cb7b 100644
--- a/Documentation/bpf/standardization/abi.rst
+++ b/Documentation/bpf/standardization/abi.rst
@@ -23,3 +23,6 @@ The BPF calling convention is defined as:
R0 - R5 are scratch registers and BPF programs needs to spill/fill them if
necessary across calls.
+
+The BPF program needs to store the return value into register R0 before doing an
+``EXIT``.
diff --git a/Documentation/bpf/standardization/instruction-set.rst b/Documentation/bpf/standardization/instruction-set.rst
index 00c93eb42613..ab820d565052 100644
--- a/Documentation/bpf/standardization/instruction-set.rst
+++ b/Documentation/bpf/standardization/instruction-set.rst
@@ -5,15 +5,29 @@
BPF Instruction Set Architecture (ISA)
======================================
-eBPF (which is no longer an acronym for anything), also commonly
+eBPF, also commonly
referred to as BPF, is a technology with origins in the Linux kernel
that can run untrusted programs in a privileged context such as an
operating system kernel. This document specifies the BPF instruction
set architecture (ISA).
+As a historical note, BPF originally stood for Berkeley Packet Filter,
+but now that it can do so much more than packet filtering, the acronym
+no longer makes sense. BPF is now considered a standalone term that
+does not stand for anything. The original BPF is sometimes referred to
+as cBPF (classic BPF) to distinguish it from the now widely deployed
+eBPF (extended BPF).
+
Documentation conventions
=========================
+The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT",
+"SHOULD", "SHOULD NOT", "RECOMMENDED", "NOT RECOMMENDED", "MAY", and
+"OPTIONAL" in this document are to be interpreted as described in
+BCP 14 `<https://www.rfc-editor.org/info/rfc2119>`_
+`<https://www.rfc-editor.org/info/rfc8174>`_
+when, and only when, they appear in all capitals, as shown here.
+
For brevity and consistency, this document refers to families
of types using a shorthand syntax and refers to several expository,
mnemonic functions when describing the semantics of instructions.
@@ -25,7 +39,7 @@ Types
This document refers to integer types with the notation `SN` to specify
a type's signedness (`S`) and bit width (`N`), respectively.
-.. table:: Meaning of signedness notation.
+.. table:: Meaning of signedness notation
==== =========
S Meaning
@@ -34,7 +48,7 @@ a type's signedness (`S`) and bit width (`N`), respectively.
s signed
==== =========
-.. table:: Meaning of bit-width notation.
+.. table:: Meaning of bit-width notation
===== =========
N Bit width
@@ -52,24 +66,18 @@ numbers.
Functions
---------
-* htobe16: Takes an unsigned 16-bit number in host-endian format and
- returns the equivalent number as an unsigned 16-bit number in big-endian
- format.
-* htobe32: Takes an unsigned 32-bit number in host-endian format and
- returns the equivalent number as an unsigned 32-bit number in big-endian
- format.
-* htobe64: Takes an unsigned 64-bit number in host-endian format and
- returns the equivalent number as an unsigned 64-bit number in big-endian
- format.
-* htole16: Takes an unsigned 16-bit number in host-endian format and
- returns the equivalent number as an unsigned 16-bit number in little-endian
- format.
-* htole32: Takes an unsigned 32-bit number in host-endian format and
- returns the equivalent number as an unsigned 32-bit number in little-endian
- format.
-* htole64: Takes an unsigned 64-bit number in host-endian format and
- returns the equivalent number as an unsigned 64-bit number in little-endian
- format.
+
+The following byteswap functions are direction-agnostic. That is,
+the same function is used for conversion in either direction discussed
+below.
+
+* be16: Takes an unsigned 16-bit number and converts it between
+ host byte order and big-endian
+ (`IEN137 <https://www.rfc-editor.org/ien/ien137.txt>`_) byte order.
+* be32: Takes an unsigned 32-bit number and converts it between
+ host byte order and big-endian byte order.
+* be64: Takes an unsigned 64-bit number and converts it between
+ host byte order and big-endian byte order.
* bswap16: Takes an unsigned 16-bit number in either big- or little-endian
format and returns the equivalent number with the same bit width but
opposite endianness.
@@ -79,7 +87,12 @@ Functions
* bswap64: Takes an unsigned 64-bit number in either big- or little-endian
format and returns the equivalent number with the same bit width but
opposite endianness.
-
+* le16: Takes an unsigned 16-bit number and converts it between
+ host byte order and little-endian byte order.
+* le32: Takes an unsigned 32-bit number and converts it between
+ host byte order and little-endian byte order.
+* le64: Takes an unsigned 64-bit number and converts it between
+ host byte order and little-endian byte order.
Definitions
-----------
@@ -106,9 +119,9 @@ Conformance groups
An implementation does not need to support all instructions specified in this
document (e.g., deprecated instructions). Instead, a number of conformance
-groups are specified. An implementation must support the base32 conformance
-group and may support additional conformance groups, where supporting a
-conformance group means it must support all instructions in that conformance
+groups are specified. An implementation MUST support the base32 conformance
+group and MAY support additional conformance groups, where supporting a
+conformance group means it MUST support all instructions in that conformance
group.
The use of named conformance groups enables interoperability between a runtime
@@ -209,7 +222,7 @@ For example::
07 1 0 00 00 11 22 33 44 r1 += 0x11223344 // big
Note that most instructions do not use all of the fields.
-Unused fields shall be cleared to zero.
+Unused fields SHALL be cleared to zero.
Wide instruction encoding
--------------------------
@@ -256,18 +269,20 @@ Instruction classes
The three least significant bits of the 'opcode' field store the instruction class:
-===== ===== =============================== ===================================
-class value description reference
-===== ===== =============================== ===================================
-LD 0x0 non-standard load operations `Load and store instructions`_
-LDX 0x1 load into register operations `Load and store instructions`_
-ST 0x2 store from immediate operations `Load and store instructions`_
-STX 0x3 store from register operations `Load and store instructions`_
-ALU 0x4 32-bit arithmetic operations `Arithmetic and jump instructions`_
-JMP 0x5 64-bit jump operations `Arithmetic and jump instructions`_
-JMP32 0x6 32-bit jump operations `Arithmetic and jump instructions`_
-ALU64 0x7 64-bit arithmetic operations `Arithmetic and jump instructions`_
-===== ===== =============================== ===================================
+.. table:: Instruction class
+
+ ===== ===== =============================== ===================================
+ class value description reference
+ ===== ===== =============================== ===================================
+ LD 0x0 non-standard load operations `Load and store instructions`_
+ LDX 0x1 load into register operations `Load and store instructions`_
+ ST 0x2 store from immediate operations `Load and store instructions`_
+ STX 0x3 store from register operations `Load and store instructions`_
+ ALU 0x4 32-bit arithmetic operations `Arithmetic and jump instructions`_
+ JMP 0x5 64-bit jump operations `Arithmetic and jump instructions`_
+ JMP32 0x6 32-bit jump operations `Arithmetic and jump instructions`_
+ ALU64 0x7 64-bit arithmetic operations `Arithmetic and jump instructions`_
+ ===== ===== =============================== ===================================
Arithmetic and jump instructions
================================
@@ -285,12 +300,14 @@ For arithmetic and jump instructions (``ALU``, ``ALU64``, ``JMP`` and
**s (source)**
the source operand location, which unless otherwise specified is one of:
- ====== ===== ==============================================
- source value description
- ====== ===== ==============================================
- K 0 use 32-bit 'imm' value as source operand
- X 1 use 'src_reg' register value as source operand
- ====== ===== ==============================================
+ .. table:: Source operand location
+
+ ====== ===== ==============================================
+ source value description
+ ====== ===== ==============================================
+ K 0 use 32-bit 'imm' value as source operand
+ X 1 use 'src_reg' register value as source operand
+ ====== ===== ==============================================
**instruction class**
the instruction class (see `Instruction classes`_)
@@ -305,27 +322,29 @@ The 'code' field encodes the operation as below, where 'src' refers to the
the source operand and 'dst' refers to the value of the destination
register.
-===== ===== ======= ==========================================================
-name code offset description
-===== ===== ======= ==========================================================
-ADD 0x0 0 dst += src
-SUB 0x1 0 dst -= src
-MUL 0x2 0 dst \*= src
-DIV 0x3 0 dst = (src != 0) ? (dst / src) : 0
-SDIV 0x3 1 dst = (src != 0) ? (dst s/ src) : 0
-OR 0x4 0 dst \|= src
-AND 0x5 0 dst &= src
-LSH 0x6 0 dst <<= (src & mask)
-RSH 0x7 0 dst >>= (src & mask)
-NEG 0x8 0 dst = -dst
-MOD 0x9 0 dst = (src != 0) ? (dst % src) : dst
-SMOD 0x9 1 dst = (src != 0) ? (dst s% src) : dst
-XOR 0xa 0 dst ^= src
-MOV 0xb 0 dst = src
-MOVSX 0xb 8/16/32 dst = (s8,s16,s32)src
-ARSH 0xc 0 :term:`sign extending<Sign Extend>` dst >>= (src & mask)
-END 0xd 0 byte swap operations (see `Byte swap instructions`_ below)
-===== ===== ======= ==========================================================
+.. table:: Arithmetic instructions
+
+ ===== ===== ======= ==========================================================
+ name code offset description
+ ===== ===== ======= ==========================================================
+ ADD 0x0 0 dst += src
+ SUB 0x1 0 dst -= src
+ MUL 0x2 0 dst \*= src
+ DIV 0x3 0 dst = (src != 0) ? (dst / src) : 0
+ SDIV 0x3 1 dst = (src != 0) ? (dst s/ src) : 0
+ OR 0x4 0 dst \|= src
+ AND 0x5 0 dst &= src
+ LSH 0x6 0 dst <<= (src & mask)
+ RSH 0x7 0 dst >>= (src & mask)
+ NEG 0x8 0 dst = -dst
+ MOD 0x9 0 dst = (src != 0) ? (dst % src) : dst
+ SMOD 0x9 1 dst = (src != 0) ? (dst s% src) : dst
+ XOR 0xa 0 dst ^= src
+ MOV 0xb 0 dst = src
+ MOVSX 0xb 8/16/32 dst = (s8,s16,s32)src
+ ARSH 0xc 0 :term:`sign extending<Sign Extend>` dst >>= (src & mask)
+ END 0xd 0 byte swap operations (see `Byte swap instructions`_ below)
+ ===== ===== ======= ==========================================================
Underflow and overflow are allowed during arithmetic operations, meaning
the 64-bit or 32-bit value will wrap. If BPF program execution would
@@ -374,7 +393,7 @@ interpreted as a 64-bit signed value.
Note that there are varying definitions of the signed modulo operation
when the dividend or divisor are negative, where implementations often
vary by language such that Python, Ruby, etc. differ from C, Go, Java,
-etc. This specification requires that signed modulo use truncated division
+etc. This specification requires that signed modulo MUST use truncated division
(where -13 % 3 == -1) as implemented in C, Go, etc.::
a % n = a - n * trunc(a / n)
@@ -386,6 +405,19 @@ The ``MOVSX`` instruction does a move operation with sign extension.
operands into 64-bit operands. Unlike other arithmetic instructions,
``MOVSX`` is only defined for register source operands (``X``).
+``{MOV, K, ALU64}`` means::
+
+ dst = (s64)imm
+
+``{MOV, X, ALU}`` means::
+
+ dst = (u32)src
+
+``{MOVSX, X, ALU}`` with 'offset' 8 means::
+
+ dst = (u32)(s32)(s8)src
+
+
The ``NEG`` instruction is only defined when the source bit is clear
(``K``).
@@ -404,15 +436,17 @@ only and do not use a separate source register or immediate value.
For ``ALU``, the 1-bit source operand field in the opcode is used to
select what byte order the operation converts from or to. For
``ALU64``, the 1-bit source operand field in the opcode is reserved
-and must be set to 0.
+and MUST be set to 0.
+
+.. table:: Byte swap instructions
-===== ======== ===== =================================================
-class source value description
-===== ======== ===== =================================================
-ALU TO_LE 0 convert between host byte order and little endian
-ALU TO_BE 1 convert between host byte order and big endian
-ALU64 Reserved 0 do byte swap unconditionally
-===== ======== ===== =================================================
+ ===== ======== ===== =================================================
+ class source value description
+ ===== ======== ===== =================================================
+ ALU LE 0 convert between host byte order and little endian
+ ALU BE 1 convert between host byte order and big endian
+ ALU64 Reserved 0 do byte swap unconditionally
+ ===== ======== ===== =================================================
The 'imm' field encodes the width of the swap operations. The following widths
are supported: 16, 32 and 64. Width 64 operations belong to the base64
@@ -421,19 +455,19 @@ conformance group.
Examples:
-``{END, TO_LE, ALU}`` with 'imm' = 16/32/64 means::
+``{END, LE, ALU}`` with 'imm' = 16/32/64 means::
- dst = htole16(dst)
- dst = htole32(dst)
- dst = htole64(dst)
+ dst = le16(dst)
+ dst = le32(dst)
+ dst = le64(dst)
-``{END, TO_BE, ALU}`` with 'imm' = 16/32/64 means::
+``{END, BE, ALU}`` with 'imm' = 16/32/64 means::
- dst = htobe16(dst)
- dst = htobe32(dst)
- dst = htobe64(dst)
+ dst = be16(dst)
+ dst = be32(dst)
+ dst = be64(dst)
-``{END, TO_LE, ALU64}`` with 'imm' = 16/32/64 means::
+``{END, TO, ALU64}`` with 'imm' = 16/32/64 means::
dst = bswap16(dst)
dst = bswap32(dst)
@@ -448,27 +482,29 @@ otherwise identical operations, and indicates the base64 conformance
group unless otherwise specified.
The 'code' field encodes the operation as below:
-======== ===== ======= ================================= ===================================================
-code value src_reg description notes
-======== ===== ======= ================================= ===================================================
-JA 0x0 0x0 PC += offset {JA, K, JMP} only
-JA 0x0 0x0 PC += imm {JA, K, JMP32} only
-JEQ 0x1 any PC += offset if dst == src
-JGT 0x2 any PC += offset if dst > src unsigned
-JGE 0x3 any PC += offset if dst >= src unsigned
-JSET 0x4 any PC += offset if dst & src
-JNE 0x5 any PC += offset if dst != src
-JSGT 0x6 any PC += offset if dst > src signed
-JSGE 0x7 any PC += offset if dst >= src signed
-CALL 0x8 0x0 call helper function by static ID {CALL, K, JMP} only, see `Helper functions`_
-CALL 0x8 0x1 call PC += imm {CALL, K, JMP} only, see `Program-local functions`_
-CALL 0x8 0x2 call helper function by BTF ID {CALL, K, JMP} only, see `Helper functions`_
-EXIT 0x9 0x0 return {CALL, K, JMP} only
-JLT 0xa any PC += offset if dst < src unsigned
-JLE 0xb any PC += offset if dst <= src unsigned
-JSLT 0xc any PC += offset if dst < src signed
-JSLE 0xd any PC += offset if dst <= src signed
-======== ===== ======= ================================= ===================================================
+.. table:: Jump instructions
+
+ ======== ===== ======= ================================= ===================================================
+ code value src_reg description notes
+ ======== ===== ======= ================================= ===================================================
+ JA 0x0 0x0 PC += offset {JA, K, JMP} only
+ JA 0x0 0x0 PC += imm {JA, K, JMP32} only
+ JEQ 0x1 any PC += offset if dst == src
+ JGT 0x2 any PC += offset if dst > src unsigned
+ JGE 0x3 any PC += offset if dst >= src unsigned
+ JSET 0x4 any PC += offset if dst & src
+ JNE 0x5 any PC += offset if dst != src
+ JSGT 0x6 any PC += offset if dst > src signed
+ JSGE 0x7 any PC += offset if dst >= src signed
+ CALL 0x8 0x0 call helper function by static ID {CALL, K, JMP} only, see `Helper functions`_
+ CALL 0x8 0x1 call PC += imm {CALL, K, JMP} only, see `Program-local functions`_
+ CALL 0x8 0x2 call helper function by BTF ID {CALL, K, JMP} only, see `Helper functions`_
+ EXIT 0x9 0x0 return {CALL, K, JMP} only
+ JLT 0xa any PC += offset if dst < src unsigned
+ JLE 0xb any PC += offset if dst <= src unsigned
+ JSLT 0xc any PC += offset if dst < src signed
+ JSLE 0xd any PC += offset if dst <= src signed
+ ======== ===== ======= ================================= ===================================================
where 'PC' denotes the program counter, and the offset to increment by
is in units of 64-bit instructions relative to the instruction following
@@ -476,9 +512,6 @@ the jump instruction. Thus 'PC += 1' skips execution of the next
instruction if it's a basic instruction or results in undefined behavior
if the next instruction is a 128-bit wide instruction.
-The BPF program needs to store the return value into register R0 before doing an
-``EXIT``.
-
Example:
``{JSGE, X, JMP32}`` means::
@@ -487,6 +520,10 @@ Example:
where 's>=' indicates a signed '>=' comparison.
+``{JLE, K, JMP}`` means::
+
+ if dst <= (u64)(s64)imm goto +offset
+
``{JA, K, JMP32}`` means::
gotol +imm
@@ -510,19 +547,25 @@ Helper functions are a concept whereby BPF programs can call into a
set of function calls exposed by the underlying platform.
Historically, each helper function was identified by a static ID
-encoded in the 'imm' field. The available helper functions may differ
-for each program type, but static IDs are unique across all program types.
+encoded in the 'imm' field. Further documentation of helper functions
+is outside the scope of this document and standardization is left for
+future work, but use is widely deployed and more information can be
+found in platform-specific documentation (e.g., Linux kernel documentation).
Platforms that support the BPF Type Format (BTF) support identifying
a helper function by a BTF ID encoded in the 'imm' field, where the BTF ID
-identifies the helper name and type.
+identifies the helper name and type. Further documentation of BTF
+is outside the scope of this document and standardization is left for
+future work, but use is widely deployed and more information can be
+found in platform-specific documentation (e.g., Linux kernel documentation).
Program-local functions
~~~~~~~~~~~~~~~~~~~~~~~
Program-local functions are functions exposed by the same BPF program as the
-caller, and are referenced by offset from the call instruction, similar to
-``JA``. The offset is encoded in the 'imm' field of the call instruction.
-An ``EXIT`` within the program-local function will return to the caller.
+caller, and are referenced by offset from the instruction following the call
+instruction, similar to ``JA``. The offset is encoded in the 'imm' field of
+the call instruction. An ``EXIT`` within the program-local function will
+return to the caller.
Load and store instructions
===========================
@@ -537,6 +580,8 @@ For load and store instructions (``LD``, ``LDX``, ``ST``, and ``STX``), the
**mode**
The mode modifier is one of:
+ .. table:: Mode modifier
+
============= ===== ==================================== =============
mode modifier value description reference
============= ===== ==================================== =============
@@ -551,6 +596,8 @@ For load and store instructions (``LD``, ``LDX``, ``ST``, and ``STX``), the
**sz (size)**
The size modifier is one of:
+ .. table:: Size modifier
+
==== ===== =====================
size value description
==== ===== =====================
@@ -619,14 +666,16 @@ The 'imm' field is used to encode the actual atomic operation.
Simple atomic operation use a subset of the values defined to encode
arithmetic operations in the 'imm' field to encode the atomic operation:
-======== ===== ===========
-imm value description
-======== ===== ===========
-ADD 0x00 atomic add
-OR 0x40 atomic or
-AND 0x50 atomic and
-XOR 0xa0 atomic xor
-======== ===== ===========
+.. table:: Simple atomic operations
+
+ ======== ===== ===========
+ imm value description
+ ======== ===== ===========
+ ADD 0x00 atomic add
+ OR 0x40 atomic or
+ AND 0x50 atomic and
+ XOR 0xa0 atomic xor
+ ======== ===== ===========
``{ATOMIC, W, STX}`` with 'imm' = ADD means::
@@ -640,13 +689,15 @@ XOR 0xa0 atomic xor
In addition to the simple atomic operations, there also is a modifier and
two complex atomic operations:
-=========== ================ ===========================
-imm value description
-=========== ================ ===========================
-FETCH 0x01 modifier: return old value
-XCHG 0xe0 | FETCH atomic exchange
-CMPXCHG 0xf0 | FETCH atomic compare and exchange
-=========== ================ ===========================
+.. table:: Complex atomic operations
+
+ =========== ================ ===========================
+ imm value description
+ =========== ================ ===========================
+ FETCH 0x01 modifier: return old value
+ XCHG 0xe0 | FETCH atomic exchange
+ CMPXCHG 0xf0 | FETCH atomic compare and exchange
+ =========== ================ ===========================
The ``FETCH`` modifier is optional for simple atomic operations, and
always set for the complex atomic operations. If the ``FETCH`` flag
@@ -673,17 +724,19 @@ The following table defines a set of ``{IMM, DW, LD}`` instructions
with opcode subtypes in the 'src_reg' field, using new terms such as "map"
defined further below:
-======= ========================================= =========== ==============
-src_reg pseudocode imm type dst type
-======= ========================================= =========== ==============
-0x0 dst = (next_imm << 32) | imm integer integer
-0x1 dst = map_by_fd(imm) map fd map
-0x2 dst = map_val(map_by_fd(imm)) + next_imm map fd data address
-0x3 dst = var_addr(imm) variable id data address
-0x4 dst = code_addr(imm) integer code address
-0x5 dst = map_by_idx(imm) map index map
-0x6 dst = map_val(map_by_idx(imm)) + next_imm map index data address
-======= ========================================= =========== ==============
+.. table:: 64-bit immediate instructions
+
+ ======= ========================================= =========== ==============
+ src_reg pseudocode imm type dst type
+ ======= ========================================= =========== ==============
+ 0x0 dst = (next_imm << 32) | imm integer integer
+ 0x1 dst = map_by_fd(imm) map fd map
+ 0x2 dst = map_val(map_by_fd(imm)) + next_imm map fd data address
+ 0x3 dst = var_addr(imm) variable id data address
+ 0x4 dst = code_addr(imm) integer code address
+ 0x5 dst = map_by_idx(imm) map index map
+ 0x6 dst = map_val(map_by_idx(imm)) + next_imm map index data address
+ ======= ========================================= =========== ==============
where
@@ -725,5 +778,5 @@ carried over from classic BPF. These instructions used an instruction
class of ``LD``, a size modifier of ``W``, ``H``, or ``B``, and a
mode modifier of ``ABS`` or ``IND``. The 'dst_reg' and 'offset' fields were
set to zero, and 'src_reg' was set to zero for ``ABS``. However, these
-instructions are deprecated and should no longer be used. All legacy packet
+instructions are deprecated and SHOULD no longer be used. All legacy packet
access instructions belong to the "packet" conformance group.
diff --git a/Documentation/cdrom/cdrom-standard.rst b/Documentation/cdrom/cdrom-standard.rst
index 7964fe134277..6c1303cff159 100644
--- a/Documentation/cdrom/cdrom-standard.rst
+++ b/Documentation/cdrom/cdrom-standard.rst
@@ -217,7 +217,7 @@ current *struct* is::
int (*media_changed)(struct cdrom_device_info *, int);
int (*tray_move)(struct cdrom_device_info *, int);
int (*lock_door)(struct cdrom_device_info *, int);
- int (*select_speed)(struct cdrom_device_info *, int);
+ int (*select_speed)(struct cdrom_device_info *, unsigned long);
int (*get_last_session) (struct cdrom_device_info *,
struct cdrom_multisession *);
int (*get_mcn)(struct cdrom_device_info *, struct cdrom_mcn *);
@@ -396,7 +396,7 @@ action need be taken, and the return value should be 0.
::
- int select_speed(struct cdrom_device_info *cdi, int speed)
+ int select_speed(struct cdrom_device_info *cdi, unsigned long speed)
Some CD-ROM drives are capable of changing their head-speed. There
are several reasons for changing the speed of a CD-ROM drive. Badly
diff --git a/Documentation/core-api/swiotlb.rst b/Documentation/core-api/swiotlb.rst
index 5ad2c9ca85bc..cf06bae44ff8 100644
--- a/Documentation/core-api/swiotlb.rst
+++ b/Documentation/core-api/swiotlb.rst
@@ -192,7 +192,7 @@ alignment larger than PAGE_SIZE.
Dynamic swiotlb
---------------
-When CONFIG_DYNAMIC_SWIOTLB is enabled, swiotlb can do on-demand expansion of
+When CONFIG_SWIOTLB_DYNAMIC is enabled, swiotlb can do on-demand expansion of
the amount of memory available for allocation as bounce buffers. If a bounce
buffer request fails due to lack of available space, an asynchronous background
task is kicked off to allocate memory from general system memory and turn it
diff --git a/Documentation/dev-tools/gpio-sloppy-logic-analyzer.rst b/Documentation/dev-tools/gpio-sloppy-logic-analyzer.rst
new file mode 100644
index 000000000000..d69f24c0d9e1
--- /dev/null
+++ b/Documentation/dev-tools/gpio-sloppy-logic-analyzer.rst
@@ -0,0 +1,93 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=============================================
+Linux Kernel GPIO based sloppy logic analyzer
+=============================================
+
+:Author: Wolfram Sang
+
+Introduction
+============
+
+This document briefly describes how to run the GPIO based in-kernel sloppy
+logic analyzer running on an isolated CPU.
+
+The sloppy logic analyzer will utilize a few GPIO lines in input mode on a
+system to rapidly sample these digital lines, which will, if the Nyquist
+criteria is met, result in a time series log with approximate waveforms as they
+appeared on these lines. One way to use it is to analyze external traffic
+connected to these GPIO lines with wires (i.e. digital probes), acting as a
+common logic analyzer.
+
+Another feature is to snoop on on-chip peripherals if the I/O cells of these
+peripherals can be used in GPIO input mode at the same time as they are being
+used as inputs or outputs for the peripheral. That means you could e.g. snoop
+I2C traffic without any wiring (if your hardware supports it). In the pin
+control subsystem such pin controllers are called "non-strict": a certain pin
+can be used with a certain peripheral and as a GPIO input line at the same
+time.
+
+Note that this is a last resort analyzer which can be affected by latencies,
+non-deterministic code paths and non-maskable interrupts. It is called 'sloppy'
+for a reason. However, for e.g. remote development, it may be useful to get a
+first view and aid further debugging.
+
+Setup
+=====
+
+Your kernel must have CONFIG_DEBUG_FS and CONFIG_CPUSETS enabled. Ideally, your
+runtime environment does not utilize cpusets otherwise, then isolation of a CPU
+core is easiest. If you do need cpusets, check that helper script for the
+sloppy logic analyzer does not interfere with your other settings.
+
+Tell the kernel which GPIOs are used as probes. For a Device Tree based system,
+you need to use the following bindings. Because these bindings are only for
+debugging, there is no official schema::
+
+ i2c-analyzer {
+ compatible = "gpio-sloppy-logic-analyzer";
+ probe-gpios = <&gpio6 21 GPIO_OPEN_DRAIN>, <&gpio6 4 GPIO_OPEN_DRAIN>;
+ probe-names = "SCL", "SDA";
+ };
+
+Note that you must provide a name for every GPIO specified. Currently a
+maximum of 8 probes are supported. 32 are likely possible but are not
+implemented yet.
+
+Usage
+=====
+
+The logic analyzer is configurable via files in debugfs. However, it is
+strongly recommended to not use them directly, but to use the script
+``tools/gpio/gpio-sloppy-logic-analyzer``. Besides checking parameters more
+extensively, it will isolate the CPU core so you will have the least
+disturbance while measuring.
+
+The script has a help option explaining the parameters. For the above DT
+snippet which analyzes an I2C bus at 400kHz on a Renesas Salvator-XS board, the
+following settings are used: The isolated CPU shall be CPU1 because it is a big
+core in a big.LITTLE setup. Because CPU1 is the default, we don't need a
+parameter. The bus speed is 400kHz. So, the sampling theorem says we need to
+sample at least at 800kHz. However, falling edges of both signals in an I2C
+start condition happen faster, so we need a higher sampling frequency, e.g.
+``-s 1500000`` for 1.5MHz. Also, we don't want to sample right away but wait
+for a start condition on an idle bus. So, we need to set a trigger to a falling
+edge on SDA while SCL stays high, i.e. ``-t 1H+2F``. Last is the duration, let
+us assume 15ms here which results in the parameter ``-d 15000``. So,
+altogether::
+
+ gpio-sloppy-logic-analyzer -s 1500000 -t 1H+2F -d 15000
+
+Note that the process will return you back to the prompt but a sub-process is
+still sampling in the background. Unless this has finished, you will not find a
+result file in the current or specified directory. For the above example, we
+will then need to trigger I2C communication::
+
+ i2cdetect -y -r <your bus number>
+
+Result is a .sr file to be consumed with PulseView or sigrok-cli from the free
+`sigrok`_ project. It is a zip file which also contains the binary sample data
+which may be consumed by other software. The filename is the logic analyzer
+instance name plus a since-epoch timestamp.
+
+.. _sigrok: https://sigrok.org/
diff --git a/Documentation/dev-tools/index.rst b/Documentation/dev-tools/index.rst
index efa49cdc8e2e..6971ed581c08 100644
--- a/Documentation/dev-tools/index.rst
+++ b/Documentation/dev-tools/index.rst
@@ -32,6 +32,7 @@ Documentation/dev-tools/testing-overview.rst
kunit/index
ktap
checkuapi
+ gpio-sloppy-logic-analyzer
.. only:: subproject and html
diff --git a/Documentation/dev-tools/kselftest.rst b/Documentation/dev-tools/kselftest.rst
index dcf634e411bd..f3766e326d1e 100644
--- a/Documentation/dev-tools/kselftest.rst
+++ b/Documentation/dev-tools/kselftest.rst
@@ -228,6 +228,13 @@ In general, the rules for selftests are
* Don't cause the top-level "make run_tests" to fail if your feature is
unconfigured.
+ * The output of tests must conform to the TAP standard to ensure high
+ testing quality and to capture failures/errors with specific details.
+ The kselftest.h and kselftest_harness.h headers provide wrappers for
+ outputting test results. These wrappers should be used for pass,
+ fail, exit, and skip messages. CI systems can easily parse TAP output
+ messages to detect test results.
+
Contributing new tests (details)
================================
diff --git a/Documentation/devicetree/bindings/arm/airoha.yaml b/Documentation/devicetree/bindings/arm/airoha.yaml
index 3292c669ee11..7c38c08dbf3f 100644
--- a/Documentation/devicetree/bindings/arm/airoha.yaml
+++ b/Documentation/devicetree/bindings/arm/airoha.yaml
@@ -22,6 +22,10 @@ properties:
- enum:
- airoha,en7523-evb
- const: airoha,en7523
+ - items:
+ - enum:
+ - airoha,en7581-evb
+ - const: airoha,en7581
additionalProperties: true
diff --git a/Documentation/devicetree/bindings/arm/amlogic.yaml b/Documentation/devicetree/bindings/arm/amlogic.yaml
index a374b98080fe..0647851ae1f5 100644
--- a/Documentation/devicetree/bindings/arm/amlogic.yaml
+++ b/Documentation/devicetree/bindings/arm/amlogic.yaml
@@ -91,6 +91,7 @@ properties:
- libretech,aml-s905x-cc
- libretech,aml-s905x-cc-v2
- nexbox,a95x
+ - osmc,vero4k
- const: amlogic,s905x
- const: amlogic,meson-gxl
@@ -107,6 +108,13 @@ properties:
- const: amlogic,s905d
- const: amlogic,meson-gxl
+ - description: Boards with the Amlogic Meson GXLX S905L SoC
+ items:
+ - enum:
+ - amlogic,p271
+ - const: amlogic,s905l
+ - const: amlogic,meson-gxlx
+
- description: Boards with the Amlogic Meson GXM S912 SoC
items:
- enum:
@@ -169,6 +177,8 @@ properties:
- azw,gtking
- azw,gtking-pro
- bananapi,bpi-m2s
+ - dream,dreambox-one
+ - dream,dreambox-two
- hardkernel,odroid-go-ultra
- hardkernel,odroid-n2
- hardkernel,odroid-n2l
diff --git a/Documentation/devicetree/bindings/arm/amlogic/analog-top.txt b/Documentation/devicetree/bindings/arm/amlogic/analog-top.txt
deleted file mode 100644
index 101dc21014ec..000000000000
--- a/Documentation/devicetree/bindings/arm/amlogic/analog-top.txt
+++ /dev/null
@@ -1,20 +0,0 @@
-Amlogic Meson8 and Meson8b "analog top" registers:
---------------------------------------------------
-
-The analog top registers contain information about the so-called
-"metal revision" (which encodes the "minor version") of the SoC.
-
-Required properties:
-- reg: the register range of the analog top registers
-- compatible: depending on the SoC this should be one of:
- - "amlogic,meson8-analog-top"
- - "amlogic,meson8b-analog-top"
- along with "syscon"
-
-
-Example:
-
- analog_top: analog-top@81a8 {
- compatible = "amlogic,meson8-analog-top", "syscon";
- reg = <0x81a8 0x14>;
- };
diff --git a/Documentation/devicetree/bindings/arm/amlogic/assist.txt b/Documentation/devicetree/bindings/arm/amlogic/assist.txt
deleted file mode 100644
index 7656812b67b9..000000000000
--- a/Documentation/devicetree/bindings/arm/amlogic/assist.txt
+++ /dev/null
@@ -1,17 +0,0 @@
-Amlogic Meson6/Meson8/Meson8b assist registers:
------------------------------------------------
-
-The assist registers contain basic information about the SoC,
-for example the encoded SoC part number.
-
-Required properties:
-- reg: the register range of the assist registers
-- compatible: should be "amlogic,meson-mx-assist" along with "syscon"
-
-
-Example:
-
- assist: assist@7c00 {
- compatible = "amlogic,meson-mx-assist", "syscon";
- reg = <0x7c00 0x200>;
- };
diff --git a/Documentation/devicetree/bindings/arm/amlogic/bootrom.txt b/Documentation/devicetree/bindings/arm/amlogic/bootrom.txt
deleted file mode 100644
index 407e27f230ab..000000000000
--- a/Documentation/devicetree/bindings/arm/amlogic/bootrom.txt
+++ /dev/null
@@ -1,17 +0,0 @@
-Amlogic Meson6/Meson8/Meson8b bootrom:
---------------------------------------
-
-The bootrom register area can be used to access SoC specific
-information, such as the "misc version".
-
-Required properties:
-- reg: the register range of the bootrom registers
-- compatible: should be "amlogic,meson-mx-bootrom" along with "syscon"
-
-
-Example:
-
- bootrom: bootrom@d9040000 {
- compatible = "amlogic,meson-mx-bootrom", "syscon";
- reg = <0xd9040000 0x10000>;
- };
diff --git a/Documentation/devicetree/bindings/arm/amlogic/pmu.txt b/Documentation/devicetree/bindings/arm/amlogic/pmu.txt
deleted file mode 100644
index 72f8d08198b6..000000000000
--- a/Documentation/devicetree/bindings/arm/amlogic/pmu.txt
+++ /dev/null
@@ -1,18 +0,0 @@
-Amlogic Meson8 and Meson8b power-management-unit:
--------------------------------------------------
-
-The pmu is used to turn off and on different power domains of the SoCs
-This includes the power to the CPU cores.
-
-Required node properties:
-- compatible value : depending on the SoC this should be one of:
- "amlogic,meson8-pmu"
- "amlogic,meson8b-pmu"
-- reg : physical base address and the size of the registers window
-
-Example:
-
- pmu@c81000e4 {
- compatible = "amlogic,meson8b-pmu", "syscon";
- reg = <0xc81000e0 0x18>;
- };
diff --git a/Documentation/devicetree/bindings/arm/arm,juno-fpga-apb-regs.yaml b/Documentation/devicetree/bindings/arm/arm,juno-fpga-apb-regs.yaml
new file mode 100644
index 000000000000..ce5f2e1ec1ea
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/arm,juno-fpga-apb-regs.yaml
@@ -0,0 +1,61 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/arm/arm,juno-fpga-apb-regs.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ARM Juno FPGA APB Registers
+
+maintainers:
+ - Sudeep Holla <sudeep.holla@arm.com>
+
+properties:
+ compatible:
+ items:
+ - const: arm,juno-fpga-apb-regs
+ - const: syscon
+ - const: simple-mfd
+
+ reg:
+ maxItems: 1
+
+ ranges: true
+
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 1
+
+patternProperties:
+ "^led@[0-9a-f]+,[0-9a-f]$":
+ $ref: /schemas/leds/register-bit-led.yaml#
+
+required:
+ - compatible
+ - reg
+ - ranges
+ - "#address-cells"
+ - "#size-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ syscon@10000 {
+ compatible = "arm,juno-fpga-apb-regs", "syscon", "simple-mfd";
+ reg = <0x010000 0x1000>;
+ ranges = <0x0 0x10000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ led@8,0 {
+ compatible = "register-bit-led";
+ reg = <0x08 0x04>;
+ offset = <0x08>;
+ mask = <0x01>;
+ label = "vexpress:0";
+ linux,default-trigger = "heartbeat";
+ default-state = "on";
+ };
+ };
diff --git a/Documentation/devicetree/bindings/arm/atmel-sysregs.txt b/Documentation/devicetree/bindings/arm/atmel-sysregs.txt
index 67a66bf74895..7374beb5a613 100644
--- a/Documentation/devicetree/bindings/arm/atmel-sysregs.txt
+++ b/Documentation/devicetree/bindings/arm/atmel-sysregs.txt
@@ -41,35 +41,6 @@ Examples:
reg = <0xffffe800 0x200>;
};
-RAMC PHY Controller required properties:
-- compatible: Should be "microchip,sama7g5-ddr3phy", "syscon"
-- reg: Should contain registers location and length
-
-Example:
-
- ddr3phy: ddr3phy@e3804000 {
- compatible = "microchip,sama7g5-ddr3phy", "syscon";
- reg = <0xe3804000 0x1000>;
-};
-
-Special Function Registers (SFR)
-
-Special Function Registers (SFR) manage specific aspects of the integrated
-memory, bridge implementations, processor and other functionality not controlled
-elsewhere.
-
-required properties:
-- compatible: Should be "atmel,<chip>-sfr", "syscon" or
- "atmel,<chip>-sfrbu", "syscon"
- <chip> can be "sama5d3", "sama5d4" or "sama5d2".
- It also can be "microchip,sam9x60-sfr", "syscon".
-- reg: Should contain registers location and length
-
- sfr@f0038000 {
- compatible = "atmel,sama5d3-sfr", "syscon";
- reg = <0xf0038000 0x60>;
- };
-
Security Module (SECUMOD)
The Security Module macrocell provides all necessary secure functions to avoid
diff --git a/Documentation/devicetree/bindings/arm/axis.txt b/Documentation/devicetree/bindings/arm/axis.txt
index ae345e1c8d2b..ebd33a88776f 100644
--- a/Documentation/devicetree/bindings/arm/axis.txt
+++ b/Documentation/devicetree/bindings/arm/axis.txt
@@ -7,22 +7,6 @@ ARTPEC-6 ARM SoC
Required root node properties:
- compatible = "axis,artpec6";
-ARTPEC-6 System Controller
---------------------------
-
-The ARTPEC-6 has a system controller with mixed functions controlling DMA, PCIe
-and resets.
-
-Required properties:
-- compatible: "axis,artpec6-syscon", "syscon"
-- reg: Address and length of the register bank.
-
-Example:
- syscon {
- compatible = "axis,artpec6-syscon", "syscon";
- reg = <0xf8000000 0x48>;
- };
-
ARTPEC-6 Development board:
---------------------------
Required root node properties:
diff --git a/Documentation/devicetree/bindings/arm/bcm/bcm2835.yaml b/Documentation/devicetree/bindings/arm/bcm/bcm2835.yaml
index 162a39dab218..e4ff71f006b8 100644
--- a/Documentation/devicetree/bindings/arm/bcm/bcm2835.yaml
+++ b/Documentation/devicetree/bindings/arm/bcm/bcm2835.yaml
@@ -23,6 +23,12 @@ properties:
- raspberrypi,4-model-b
- const: brcm,bcm2711
+ - description: BCM2712 based Boards
+ items:
+ - enum:
+ - raspberrypi,5-model-b
+ - const: brcm,bcm2712
+
- description: BCM2835 based Boards
items:
- enum:
diff --git a/Documentation/devicetree/bindings/arm/cpu-enable-method/al,alpine-smp b/Documentation/devicetree/bindings/arm/cpu-enable-method/al,alpine-smp
index 35e5afb6d9ad..cc7b1402a31f 100644
--- a/Documentation/devicetree/bindings/arm/cpu-enable-method/al,alpine-smp
+++ b/Documentation/devicetree/bindings/arm/cpu-enable-method/al,alpine-smp
@@ -27,16 +27,6 @@ Properties:
- reg : Offset and length of the register set for the device
-* Alpine System-Fabric Service Registers
-
-The System-Fabric Service Registers allow various operation on CPU and
-system fabric, like powering CPUs off.
-
-Properties:
-- compatible : Should contain "al,alpine-sysfabric-service" and "syscon".
-- reg : Offset and length of the register set for the device
-
-
Example:
cpus {
diff --git a/Documentation/devicetree/bindings/arm/freescale/fsl,vf610-mscm-cpucfg.txt b/Documentation/devicetree/bindings/arm/freescale/fsl,vf610-mscm-cpucfg.txt
deleted file mode 100644
index 44aa3c451ccf..000000000000
--- a/Documentation/devicetree/bindings/arm/freescale/fsl,vf610-mscm-cpucfg.txt
+++ /dev/null
@@ -1,14 +0,0 @@
-Freescale Vybrid Miscellaneous System Control - CPU Configuration
-
-The MSCM IP contains multiple sub modules, this binding describes the first
-block of registers which contains CPU configuration information.
-
-Required properties:
-- compatible: "fsl,vf610-mscm-cpucfg", "syscon"
-- reg: the register range of the MSCM CPU configuration registers
-
-Example:
- mscm_cpucfg: cpucfg@40001000 {
- compatible = "fsl,vf610-mscm-cpucfg", "syscon";
- reg = <0x40001000 0x800>;
- }
diff --git a/Documentation/devicetree/bindings/arm/fsl.yaml b/Documentation/devicetree/bindings/arm/fsl.yaml
index 6d185d09cb6a..80747d79418a 100644
--- a/Documentation/devicetree/bindings/arm/fsl.yaml
+++ b/Documentation/devicetree/bindings/arm/fsl.yaml
@@ -8,7 +8,6 @@ title: Freescale i.MX Platforms
maintainers:
- Shawn Guo <shawnguo@kernel.org>
- - Li Yang <leoyang.li@nxp.com>
properties:
$nodename:
@@ -363,6 +362,12 @@ properties:
- const: gw,ventana
- const: fsl,imx6q
+ - description: i.MX6Q Kontron SMARC-sAMX6i on SMARC Eval Carrier 2.0
+ items:
+ - const: kontron,imx6q-samx6i-ads2
+ - const: kontron,imx6q-samx6i
+ - const: fsl,imx6q
+
- description: i.MX6Q PHYTEC phyBOARD-Mira
items:
- enum:
@@ -544,6 +549,12 @@ properties:
- const: gw,ventana
- const: fsl,imx6dl
+ - description: i.MX6DL Kontron SMARC-sAMX6i on SMARC Eval Carrier 2.0
+ items:
+ - const: kontron,imx6dl-samx6i-ads2
+ - const: kontron,imx6dl-samx6i
+ - const: fsl,imx6dl
+
- description: i.MX6DL PHYTEC phyBOARD-Mira
items:
- enum:
@@ -946,6 +957,13 @@ properties:
- prt,prt8mm # i.MX8MM Protonic PRT8MM Board
- const: fsl,imx8mm
+ - description: Compulab i.MX8MM UCM SoM based boards
+ items:
+ - enum:
+ - compulab,imx8mm-iot-gateway # i.MX8MM Compulab IoT-Gateway
+ - const: compulab,imx8mm-ucm-som # i.MX8MM Compulab UCM SoM
+ - const: fsl,imx8mm
+
- description: Emtop i.MX8MM based Boards
items:
- const: ees,imx8mm-emtop-baseboard # i.MX8MM Emtop SoM on i.MX8M Mini Baseboard V1
@@ -1145,8 +1163,9 @@ properties:
version as an industrial computing device.
items:
- enum:
- - tq,imx8mp-tqma8mpql-mba8mpxl # TQ-Systems GmbH i.MX8MP TQMa8MPQL SOM on MBa8MPxL
- - const: tq,imx8mp-tqma8mpql # TQ-Systems GmbH i.MX8MP TQMa8MPQL SOM
+ - tq,imx8mp-tqma8mpql-mba8mpxl # TQ-Systems GmbH i.MX8MP TQMa8MPQL SOM on MBa8MPxL
+ - tq,imx8mp-tqma8mpql-mba8mp-ras314 # TQ-Systems GmbH i.MX8MP TQMa8MPQL SOM on MBa8MP-RAS314
+ - const: tq,imx8mp-tqma8mpql # TQ-Systems GmbH i.MX8MP TQMa8MPQL SOM
- const: fsl,imx8mp
- description: i.MX8MQ based Boards
@@ -1272,9 +1291,16 @@ properties:
- description: i.MX93 based Boards
items:
- enum:
+ - fsl,imx93-9x9-qsb # i.MX93 9x9 QSB Board
- fsl,imx93-11x11-evk # i.MX93 11x11 EVK Board
- const: fsl,imx93
+ - description: i.MX95 based Boards
+ items:
+ - enum:
+ - fsl,imx95-19x19-evk # i.MX95 19x19 EVK Board
+ - const: fsl,imx95
+
- description: i.MXRT1050 based Boards
items:
- enum:
diff --git a/Documentation/devicetree/bindings/arm/keystone/ti,sci.yaml b/Documentation/devicetree/bindings/arm/keystone/ti,sci.yaml
index 7f06b1080244..25a2b42105e5 100644
--- a/Documentation/devicetree/bindings/arm/keystone/ti,sci.yaml
+++ b/Documentation/devicetree/bindings/arm/keystone/ti,sci.yaml
@@ -20,7 +20,7 @@ description: |
initialized early into boot process and provides services to Operating Systems
on multiple processors including ones running Linux.
- See http://processors.wiki.ti.com/index.php/TISCI for protocol definition.
+ See https://software-dl.ti.com/tisci/esd/latest/index.html for protocol definition.
The TI-SCI node describes the Texas Instrument's System Controller entity node.
This parent node may optionally have additional children nodes which describe
diff --git a/Documentation/devicetree/bindings/arm/marvell/armada-7k-8k.yaml b/Documentation/devicetree/bindings/arm/marvell/armada-7k-8k.yaml
index 16d2e132d3d1..538d91be8857 100644
--- a/Documentation/devicetree/bindings/arm/marvell/armada-7k-8k.yaml
+++ b/Documentation/devicetree/bindings/arm/marvell/armada-7k-8k.yaml
@@ -82,4 +82,22 @@ properties:
- const: marvell,armada-ap807-quad
- const: marvell,armada-ap807
+ - description:
+ SolidRun CN9130 SoM based single-board computers
+ items:
+ - enum:
+ - solidrun,cn9130-clearfog-base
+ - solidrun,cn9130-clearfog-pro
+ - solidrun,cn9131-solidwan
+ - const: solidrun,cn9130-sr-som
+ - const: marvell,cn9130
+
+ - description:
+ SolidRun CN9132 COM-Express Type 7 based single-board computers
+ items:
+ - enum:
+ - solidrun,cn9132-clearfog
+ - const: solidrun,cn9132-sr-cex7
+ - const: marvell,cn9130
+
additionalProperties: true
diff --git a/Documentation/devicetree/bindings/arm/marvell/marvell,dove.txt b/Documentation/devicetree/bindings/arm/marvell/marvell,dove.txt
index aaaf64c56e44..e10e8525eabd 100644
--- a/Documentation/devicetree/bindings/arm/marvell/marvell,dove.txt
+++ b/Documentation/devicetree/bindings/arm/marvell/marvell,dove.txt
@@ -5,18 +5,3 @@ Boards with a Marvell Dove SoC shall have the following properties:
Required root node property:
- compatible: must contain "marvell,dove";
-
-* Global Configuration registers
-
-Global Configuration registers of Dove SoC are shared by a syscon node.
-
-Required properties:
-- compatible: must contain "marvell,dove-global-config" and "syscon".
-- reg: base address and size of the Global Configuration registers.
-
-Example:
-
-gconf: global-config@e802c {
- compatible = "marvell,dove-global-config", "syscon";
- reg = <0xe802c 0x14>;
-};
diff --git a/Documentation/devicetree/bindings/arm/mediatek.yaml b/Documentation/devicetree/bindings/arm/mediatek.yaml
index 09f9ffd3ff7b..1d4bb50fcd8d 100644
--- a/Documentation/devicetree/bindings/arm/mediatek.yaml
+++ b/Documentation/devicetree/bindings/arm/mediatek.yaml
@@ -85,12 +85,15 @@ properties:
- const: mediatek,mt7629
- items:
- enum:
+ - cudy,wr3000-v1
+ - openwrt,one
- xiaomi,ax3000t
- const: mediatek,mt7981b
- items:
- enum:
- acelink,ew-7886cax
- bananapi,bpi-r3
+ - bananapi,bpi-r3mini
- mediatek,mt7986a-rfb
- const: mediatek,mt7986a
- items:
@@ -293,6 +296,13 @@ properties:
- const: google,tentacruel-sku327683
- const: google,tentacruel
- const: mediatek,mt8186
+ - description: Google Voltorb (Acer Chromebook 311 C723/C732T)
+ items:
+ - enum:
+ - google,voltorb-sku589824
+ - google,voltorb-sku589825
+ - const: google,voltorb
+ - const: mediatek,mt8186
- items:
- enum:
- mediatek,mt8186-evb
@@ -342,6 +352,14 @@ properties:
- const: google,tomato-rev3
- const: google,tomato
- const: mediatek,mt8195
+ - description: HP Dojo sku1, 3, 5, 7 (HP Chromebook x360 13b-ca0002sa)
+ items:
+ - const: google,dojo-sku7
+ - const: google,dojo-sku5
+ - const: google,dojo-sku3
+ - const: google,dojo-sku1
+ - const: google,dojo
+ - const: mediatek,mt8195
- items:
- enum:
- mediatek,mt8195-demo
@@ -353,6 +371,12 @@ properties:
- const: mediatek,mt8365
- items:
- enum:
+ - mediatek,mt8390-evk
+ - const: mediatek,mt8390
+ - const: mediatek,mt8188
+ - items:
+ - enum:
+ - kontron,3-5-sbc-i1200
- mediatek,mt8395-evk
- radxa,nio-12l
- const: mediatek,mt8395
diff --git a/Documentation/devicetree/bindings/arm/pmu.yaml b/Documentation/devicetree/bindings/arm/pmu.yaml
index 99b5e9530707..528544d0a161 100644
--- a/Documentation/devicetree/bindings/arm/pmu.yaml
+++ b/Documentation/devicetree/bindings/arm/pmu.yaml
@@ -53,14 +53,20 @@ properties:
- arm,cortex-a710-pmu
- arm,cortex-a715-pmu
- arm,cortex-a720-pmu
+ - arm,cortex-a725-pmu
- arm,cortex-x1-pmu
- arm,cortex-x2-pmu
- arm,cortex-x3-pmu
- arm,cortex-x4-pmu
+ - arm,cortex-x925-pmu
- arm,neoverse-e1-pmu
- arm,neoverse-n1-pmu
- arm,neoverse-n2-pmu
+ - arm,neoverse-n3-pmu
- arm,neoverse-v1-pmu
+ - arm,neoverse-v2-pmu
+ - arm,neoverse-v3-pmu
+ - arm,neoverse-v3ae-pmu
- brcm,vulcan-pmu
- cavium,thunder-pmu
- nvidia,denver-pmu
diff --git a/Documentation/devicetree/bindings/arm/qcom.yaml b/Documentation/devicetree/bindings/arm/qcom.yaml
index ae885414b181..f08e13b61172 100644
--- a/Documentation/devicetree/bindings/arm/qcom.yaml
+++ b/Documentation/devicetree/bindings/arm/qcom.yaml
@@ -42,6 +42,7 @@ description: |
msm8996
msm8998
qcs404
+ qcs8550
qcm2290
qcm6490
qdu1000
@@ -96,6 +97,7 @@ properties:
- items:
- enum:
- qcom,apq8016-sbc
+ - schneider,apq8016-hmibsc
- const: qcom,apq8016
- items:
@@ -104,6 +106,7 @@ properties:
- huawei,sturgeon
- lg,lenok
- samsung,matisse-wifi
+ - samsung,milletwifi
- const: qcom,apq8026
- items:
@@ -138,6 +141,7 @@ properties:
- microsoft,makepeace
- microsoft,moneypenny
- motorola,falcon
+ - samsung,ms013g
- samsung,s3ve3g
- const: qcom,msm8226
@@ -175,6 +179,7 @@ properties:
- items:
- enum:
- lge,hammerhead
+ - samsung,hlte
- sony,xperia-amami
- sony,xperia-honami
- const: qcom,msm8974
@@ -182,8 +187,10 @@ properties:
- items:
- enum:
- fairphone,fp2
+ - htc,m8
- oneplus,bacon
- samsung,klte
+ - sony,xperia-aries
- sony,xperia-castor
- sony,xperia-leo
- const: qcom,msm8974pro
@@ -203,8 +210,13 @@ properties:
- asus,z00l
- gplus,fl8005a
- huawei,g7
+ - lg,c50
+ - lg,m216
- longcheer,l8910
- longcheer,l8150
+ - motorola,harpia
+ - motorola,osprey
+ - motorola,surnia
- qcom,msm8916-mtp
- samsung,a3u-eur
- samsung,a5u-eur
@@ -316,6 +328,7 @@ properties:
- items:
- enum:
- qcom,ipq5018-rdp432-c2
+ - tplink,archer-ax55-v1
- const: qcom,ipq5018
- items:
@@ -366,6 +379,7 @@ properties:
- fairphone,fp5
- qcom,qcm6490-idp
- qcom,qcs6490-rb3gen2
+ - shift,otter
- const: qcom,qcm6490
- description: Qualcomm Technologies, Inc. Distributed Unit 1000 platform
@@ -802,6 +816,7 @@ properties:
- items:
- enum:
+ - lenovo,tbx605f
- motorola,ali
- const: qcom,sdm450
@@ -883,6 +898,7 @@ properties:
- items:
- enum:
- qcom,sa8775p-ride
+ - qcom,sa8775p-ride-r3
- const: qcom,sa8775p
- items:
@@ -1004,17 +1020,28 @@ properties:
- qcom,sm8550-hdk
- qcom,sm8550-mtp
- qcom,sm8550-qrd
+ - samsung,q5q
- sony,pdx234
- const: qcom,sm8550
- items:
- enum:
+ - qcom,qcs8550-aim300-aiot
+ - const: qcom,qcs8550-aim300
+ - const: qcom,qcs8550
+ - const: qcom,sm8550
+
+ - items:
+ - enum:
+ - qcom,sm8650-hdk
- qcom,sm8650-mtp
- qcom,sm8650-qrd
- const: qcom,sm8650
- items:
- enum:
+ - asus,vivobook-s15
+ - lenovo,yoga-slim7x
- qcom,x1e80100-crd
- qcom,x1e80100-qcp
- const: qcom,x1e80100
diff --git a/Documentation/devicetree/bindings/arm/rockchip.yaml b/Documentation/devicetree/bindings/arm/rockchip.yaml
index e04c213a0dee..1ef09fbfdfaf 100644
--- a/Documentation/devicetree/bindings/arm/rockchip.yaml
+++ b/Documentation/devicetree/bindings/arm/rockchip.yaml
@@ -248,6 +248,13 @@ properties:
- const: friendlyarm,nanopc-t6
- const: rockchip,rk3588
+ - description: FriendlyElec CM3588-based boards
+ items:
+ - enum:
+ - friendlyarm,cm3588-nas
+ - const: friendlyarm,cm3588
+ - const: rockchip,rk3588
+
- description: GameForce Chi
items:
- const: gameforce,chi
@@ -627,6 +634,11 @@ properties:
- const: mqmaker,miqi
- const: rockchip,rk3288
+ - description: Neardi LBA3368
+ items:
+ - const: neardi,lba3368
+ - const: rockchip,rk3368
+
- description: Netxeon R89 board
items:
- const: netxeon,r89
@@ -799,11 +811,21 @@ properties:
- const: radxa,rock3a
- const: rockchip,rk3568
+ - description: Radxa ROCK 3B
+ items:
+ - const: radxa,rock-3b
+ - const: rockchip,rk3568
+
- description: Radxa ROCK 3C
items:
- const: radxa,rock-3c
- const: rockchip,rk3566
+ - description: Radxa ROCK 5 ITX
+ items:
+ - const: radxa,rock-5-itx
+ - const: rockchip,rk3588
+
- description: Radxa ROCK 5A
items:
- const: radxa,rock-5a
@@ -814,6 +836,18 @@ properties:
- const: radxa,rock-5b
- const: rockchip,rk3588
+ - description: Radxa ROCK S0
+ items:
+ - const: radxa,rock-s0
+ - const: rockchip,rk3308
+
+ - description: Radxa ZERO 3W/3E
+ items:
+ - enum:
+ - radxa,zero-3e
+ - radxa,zero-3w
+ - const: rockchip,rk3566
+
- description: Rikomagic MK808 v1
items:
- const: rikomagic,mk808
@@ -959,6 +993,14 @@ properties:
- const: wolfvision,rk3568-pf5
- const: rockchip,rk3568
+ - description: Xunlong Orange Pi 3B
+ items:
+ - enum:
+ - xunlong,orangepi-3b-v1.1
+ - xunlong,orangepi-3b-v2.1
+ - const: xunlong,orangepi-3b
+ - const: rockchip,rk3566
+
- description: Xunlong Orange Pi 5 Plus
items:
- const: xunlong,orangepi-5-plus
diff --git a/Documentation/devicetree/bindings/arm/rtsm-dcscb.txt b/Documentation/devicetree/bindings/arm/rtsm-dcscb.txt
deleted file mode 100644
index 3b8fbf3c00c5..000000000000
--- a/Documentation/devicetree/bindings/arm/rtsm-dcscb.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-ARM Dual Cluster System Configuration Block
--------------------------------------------
-
-The Dual Cluster System Configuration Block (DCSCB) provides basic
-functionality for controlling clocks, resets and configuration pins in
-the Dual Cluster System implemented by the Real-Time System Model (RTSM).
-
-Required properties:
-
-- compatible : should be "arm,rtsm,dcscb"
-
-- reg : physical base address and the size of the registers window
-
-Example:
-
- dcscb@60000000 {
- compatible = "arm,rtsm,dcscb";
- reg = <0x60000000 0x1000>;
- };
diff --git a/Documentation/devicetree/bindings/arm/spear-misc.txt b/Documentation/devicetree/bindings/arm/spear-misc.txt
deleted file mode 100644
index e404e2556b4a..000000000000
--- a/Documentation/devicetree/bindings/arm/spear-misc.txt
+++ /dev/null
@@ -1,9 +0,0 @@
-SPEAr Misc configuration
-===========================
-SPEAr SOCs have some miscellaneous registers which are used to configure
-few properties of different peripheral controllers.
-
-misc node required properties:
-
-- compatible Should be "st,spear1340-misc", "syscon".
-- reg: Address range of misc space up to 8K
diff --git a/Documentation/devicetree/bindings/arm/stm32/st,mlahb.yaml b/Documentation/devicetree/bindings/arm/stm32/st,mlahb.yaml
index d2dce238ff5d..3e996346b264 100644
--- a/Documentation/devicetree/bindings/arm/stm32/st,mlahb.yaml
+++ b/Documentation/devicetree/bindings/arm/stm32/st,mlahb.yaml
@@ -54,11 +54,10 @@ unevaluatedProperties: false
examples:
- |
- mlahb: ahb@38000000 {
+ ahb {
compatible = "st,mlahb", "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
- reg = <0x10000000 0x40000>;
ranges;
dma-ranges = <0x00000000 0x38000000 0x10000>,
<0x10000000 0x10000000 0x60000>,
diff --git a/Documentation/devicetree/bindings/arm/stm32/stm32.yaml b/Documentation/devicetree/bindings/arm/stm32/stm32.yaml
index bc2f43330ae4..58099949e8f3 100644
--- a/Documentation/devicetree/bindings/arm/stm32/stm32.yaml
+++ b/Documentation/devicetree/bindings/arm/stm32/stm32.yaml
@@ -59,6 +59,12 @@ properties:
- prt,prtt1s # Protonic PRTT1S
- const: st,stm32mp151
+ - description: DH STM32MP135 DHCOR SoM based Boards
+ items:
+ - const: dh,stm32mp135f-dhcor-dhsbc
+ - const: dh,stm32mp135f-dhcor-som
+ - const: st,stm32mp135
+
- description: DH STM32MP151 DHCOR SoM based Boards
items:
- const: dh,stm32mp151a-dhcor-testbench
diff --git a/Documentation/devicetree/bindings/arm/sunxi.yaml b/Documentation/devicetree/bindings/arm/sunxi.yaml
index c6d0d8d81ed4..09dc6f424986 100644
--- a/Documentation/devicetree/bindings/arm/sunxi.yaml
+++ b/Documentation/devicetree/bindings/arm/sunxi.yaml
@@ -57,17 +57,17 @@ properties:
- const: allwinner,sun8i-v3s
- description: Anbernic RG35XX (2024)
- - items:
+ items:
- const: anbernic,rg35xx-2024
- const: allwinner,sun50i-h700
- description: Anbernic RG35XX Plus
- - items:
+ items:
- const: anbernic,rg35xx-plus
- const: allwinner,sun50i-h700
- description: Anbernic RG35XX H
- - items:
+ items:
- const: anbernic,rg35xx-h
- const: allwinner,sun50i-h700
@@ -708,12 +708,12 @@ properties:
- const: olimex,a64-teres-i
- const: allwinner,sun50i-a64
- - description: Pine64
+ - description: Pine64 PINE A64
items:
- const: pine64,pine64
- const: allwinner,sun50i-a64
- - description: Pine64+
+ - description: Pine64 PINE A64+
items:
- const: pine64,pine64-plus
- const: allwinner,sun50i-a64
@@ -724,17 +724,17 @@ properties:
- const: sochip,s3
- const: allwinner,sun8i-v3
- - description: Pine64 PineH64 model A
+ - description: Pine64 PINE H64 Model A
items:
- const: pine64,pine-h64
- const: allwinner,sun50i-h6
- - description: Pine64 PineH64 model B
+ - description: Pine64 PINE H64 Model B
items:
- const: pine64,pine-h64-model-b
- const: allwinner,sun50i-h6
- - description: Pine64 LTS
+ - description: Pine64 PINE A64 LTS
items:
- const: pine64,pine64-lts
- const: allwinner,sun50i-r18
@@ -763,17 +763,17 @@ properties:
- const: pine64,pinephone
- const: allwinner,sun50i-a64
- - description: Pine64 PineTab, Development Sample
+ - description: Pine64 PineTab Developer Sample
items:
- const: pine64,pinetab
- const: allwinner,sun50i-a64
- - description: Pine64 PineTab, Early Adopter's batch (and maybe later ones)
+ - description: Pine64 PineTab Early Adopter
items:
- const: pine64,pinetab-early-adopter
- const: allwinner,sun50i-a64
- - description: Pine64 SoPine Baseboard
+ - description: Pine64 SOPINE
items:
- const: pine64,sopine-baseboard
- const: pine64,sopine
diff --git a/Documentation/devicetree/bindings/arm/ti/k3.yaml b/Documentation/devicetree/bindings/arm/ti/k3.yaml
index 52b51fd7044e..4d9c5fbb4c26 100644
--- a/Documentation/devicetree/bindings/arm/ti/k3.yaml
+++ b/Documentation/devicetree/bindings/arm/ti/k3.yaml
@@ -25,6 +25,12 @@ properties:
- ti,am62a7-sk
- const: ti,am62a7
+ - description: K3 AM62A7 SoC PHYTEC phyBOARD-Lyra
+ items:
+ - const: phytec,am62a7-phyboard-lyra-rdk
+ - const: phytec,am62a-phycore-som
+ - const: ti,am62a7
+
- description: K3 AM62P5 SoC and Boards
items:
- enum:
diff --git a/Documentation/devicetree/bindings/ata/ahci-fsl-qoriq.txt b/Documentation/devicetree/bindings/ata/ahci-fsl-qoriq.txt
deleted file mode 100644
index 7c3ca0e13de0..000000000000
--- a/Documentation/devicetree/bindings/ata/ahci-fsl-qoriq.txt
+++ /dev/null
@@ -1,21 +0,0 @@
-Binding for Freescale QorIQ AHCI SATA Controller
-
-Required properties:
- - reg: Physical base address and size of the controller's register area.
- - compatible: Compatibility string. Must be 'fsl,<chip>-ahci', where
- chip could be ls1021a, ls1043a, ls1046a, ls1088a, ls2080a etc.
- - clocks: Input clock specifier. Refer to common clock bindings.
- - interrupts: Interrupt specifier. Refer to interrupt binding.
-
-Optional properties:
- - dma-coherent: Enable AHCI coherent DMA operation.
- - reg-names: register area names when there are more than 1 register area.
-
-Examples:
- sata@3200000 {
- compatible = "fsl,ls1021a-ahci";
- reg = <0x0 0x3200000 0x0 0x10000>;
- interrupts = <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&platform_clk 1>;
- dma-coherent;
- };
diff --git a/Documentation/devicetree/bindings/ata/fsl,ahci.yaml b/Documentation/devicetree/bindings/ata/fsl,ahci.yaml
new file mode 100644
index 000000000000..ea4428bc1742
--- /dev/null
+++ b/Documentation/devicetree/bindings/ata/fsl,ahci.yaml
@@ -0,0 +1,64 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/ata/fsl,ahci.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale QorIQ AHCI SATA Controller
+
+maintainers:
+ - Frank Li <Frank.Li@nxp.com>
+
+properties:
+ compatible:
+ oneOf:
+ - description: SATA controller for ls1012a
+ items:
+ - const: fsl,ls1012a-ahci
+ - const: fsl,ls1043a-ahci
+ - enum:
+ - fsl,ls1021a-ahci
+ - fsl,ls1028a-ahci
+ - fsl,ls1043a-ahci
+ - fsl,ls1046a-ahci
+ - fsl,ls1088a-ahci
+ - fsl,ls2080a-ahci
+ - fsl,lx2160a-ahci
+
+ reg:
+ minItems: 1
+ maxItems: 2
+
+ reg-names:
+ items:
+ - const: ahci
+ - const: sata-ecc
+ minItems: 1
+
+ clocks:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ dma-coherent: true
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - interrupts
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ sata@3200000 {
+ compatible = "fsl,ls1021a-ahci";
+ reg = <0x3200000 0x10000>;
+ interrupts = <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&platform_clk 1>;
+ dma-coherent;
+ };
diff --git a/Documentation/devicetree/bindings/cache/qcom,llcc.yaml b/Documentation/devicetree/bindings/cache/qcom,llcc.yaml
index 07ccbda4a0ab..68ea5f70b75f 100644
--- a/Documentation/devicetree/bindings/cache/qcom,llcc.yaml
+++ b/Documentation/devicetree/bindings/cache/qcom,llcc.yaml
@@ -21,6 +21,7 @@ properties:
compatible:
enum:
- qcom,qdu1000-llcc
+ - qcom,sa8775p-llcc
- qcom,sc7180-llcc
- qcom,sc7280-llcc
- qcom,sc8180x-llcc
@@ -66,7 +67,6 @@ allOf:
compatible:
contains:
enum:
- - qcom,qdu1000-llcc
- qcom,sc7180-llcc
- qcom,sm6350-llcc
then:
@@ -85,6 +85,33 @@ allOf:
compatible:
contains:
enum:
+ - qcom,sa8775p-llcc
+ then:
+ properties:
+ reg:
+ items:
+ - description: LLCC0 base register region
+ - description: LLCC1 base register region
+ - description: LLCC2 base register region
+ - description: LLCC3 base register region
+ - description: LLCC4 base register region
+ - description: LLCC5 base register region
+ - description: LLCC broadcast base register region
+ reg-names:
+ items:
+ - const: llcc0_base
+ - const: llcc1_base
+ - const: llcc2_base
+ - const: llcc3_base
+ - const: llcc4_base
+ - const: llcc5_base
+ - const: llcc_broadcast_base
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
- qcom,sc7280-llcc
then:
properties:
@@ -104,6 +131,7 @@ allOf:
compatible:
contains:
enum:
+ - qcom,qdu1000-llcc
- qcom,sc8180x-llcc
- qcom,sc8280xp-llcc
- qcom,x1e80100-llcc
@@ -141,8 +169,31 @@ allOf:
- qcom,sm8150-llcc
- qcom,sm8250-llcc
- qcom,sm8350-llcc
+ then:
+ properties:
+ reg:
+ items:
+ - description: LLCC0 base register region
+ - description: LLCC1 base register region
+ - description: LLCC2 base register region
+ - description: LLCC3 base register region
+ - description: LLCC broadcast base register region
+ reg-names:
+ items:
+ - const: llcc0_base
+ - const: llcc1_base
+ - const: llcc2_base
+ - const: llcc3_base
+ - const: llcc_broadcast_base
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
- qcom,sm8450-llcc
- qcom,sm8550-llcc
+ - qcom,sm8650-llcc
then:
properties:
reg:
@@ -151,7 +202,8 @@ allOf:
- description: LLCC1 base register region
- description: LLCC2 base register region
- description: LLCC3 base register region
- - description: LLCC broadcast base register region
+ - description: LLCC broadcast OR register region
+ - description: LLCC broadcast AND register region
reg-names:
items:
- const: llcc0_base
@@ -159,6 +211,7 @@ allOf:
- const: llcc2_base
- const: llcc3_base
- const: llcc_broadcast_base
+ - const: llcc_broadcast_and_base
additionalProperties: false
diff --git a/Documentation/devicetree/bindings/cache/starfive,jh8100-starlink-cache.yaml b/Documentation/devicetree/bindings/cache/starfive,jh8100-starlink-cache.yaml
new file mode 100644
index 000000000000..6d61098e388b
--- /dev/null
+++ b/Documentation/devicetree/bindings/cache/starfive,jh8100-starlink-cache.yaml
@@ -0,0 +1,66 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/cache/starfive,jh8100-starlink-cache.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: StarFive StarLink Cache Controller
+
+maintainers:
+ - Joshua Yeong <joshua.yeong@starfivetech.com>
+
+description:
+ StarFive's StarLink Cache Controller manages the L3 cache shared between
+ clusters of CPU cores. The cache driver enables RISC-V non-standard cache
+ management as an alternative to instructions in the RISC-V Zicbom extension.
+
+allOf:
+ - $ref: /schemas/cache-controller.yaml#
+
+# We need a select here so we don't match all nodes with 'cache'
+select:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - starfive,jh8100-starlink-cache
+
+ required:
+ - compatible
+
+properties:
+ compatible:
+ items:
+ - const: starfive,jh8100-starlink-cache
+ - const: cache
+
+ reg:
+ maxItems: 1
+
+unevaluatedProperties: false
+
+required:
+ - compatible
+ - reg
+ - cache-block-size
+ - cache-level
+ - cache-sets
+ - cache-size
+ - cache-unified
+
+examples:
+ - |
+ soc {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ cache-controller@15000000 {
+ compatible = "starfive,jh8100-starlink-cache", "cache";
+ reg = <0x0 0x15000000 0x0 0x278>;
+ cache-block-size = <64>;
+ cache-level = <3>;
+ cache-sets = <8192>;
+ cache-size = <0x400000>;
+ cache-unified;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/clock/qcom,ipq9574-gcc.yaml b/Documentation/devicetree/bindings/clock/qcom,ipq9574-gcc.yaml
index 944a0ea79cd6..824781cbdf34 100644
--- a/Documentation/devicetree/bindings/clock/qcom,ipq9574-gcc.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,ipq9574-gcc.yaml
@@ -33,6 +33,9 @@ properties:
- description: PCIE30 PHY3 pipe clock source
- description: USB3 PHY pipe clock source
+ '#interconnect-cells':
+ const: 1
+
required:
- compatible
- clocks
diff --git a/Documentation/devicetree/bindings/clock/qcom,qcm2290-gpucc.yaml b/Documentation/devicetree/bindings/clock/qcom,qcm2290-gpucc.yaml
new file mode 100644
index 000000000000..734880805c1b
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/qcom,qcm2290-gpucc.yaml
@@ -0,0 +1,77 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/qcom,qcm2290-gpucc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Graphics Clock & Reset Controller on QCM2290
+
+maintainers:
+ - Konrad Dybcio <konradybcio@kernel.org>
+
+description: |
+ Qualcomm graphics clock control module provides the clocks, resets and power
+ domains on Qualcomm SoCs.
+
+ See also::
+ include/dt-bindings/clock/qcom,qcm2290-gpucc.h
+
+properties:
+ compatible:
+ const: qcom,qcm2290-gpucc
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: AHB interface clock,
+ - description: SoC CXO clock
+ - description: GPLL0 main branch source
+ - description: GPLL0 div branch source
+
+ power-domains:
+ description:
+ A phandle and PM domain specifier for the CX power domain.
+ maxItems: 1
+
+ required-opps:
+ description:
+ A phandle to an OPP node describing required CX performance point.
+ maxItems: 1
+
+required:
+ - compatible
+ - clocks
+ - power-domains
+
+allOf:
+ - $ref: qcom,gcc.yaml#
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/qcom,gcc-qcm2290.h>
+ #include <dt-bindings/clock/qcom,rpmcc.h>
+ #include <dt-bindings/power/qcom-rpmpd.h>
+
+ soc {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ clock-controller@5990000 {
+ compatible = "qcom,qcm2290-gpucc";
+ reg = <0x0 0x05990000 0x0 0x9000>;
+ clocks = <&gcc GCC_GPU_CFG_AHB_CLK>,
+ <&rpmcc RPM_SMD_XO_CLK_SRC>,
+ <&gcc GCC_GPU_GPLL0_CLK_SRC>,
+ <&gcc GCC_GPU_GPLL0_DIV_CLK_SRC>;
+ power-domains = <&rpmpd QCM2290_VDDCX>;
+ required-opps = <&rpmpd_opp_low_svs>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/clock/qcom,sm8450-camcc.yaml b/Documentation/devicetree/bindings/clock/qcom,sm8450-camcc.yaml
index fa0e5b6b02b8..f58edfc10f4c 100644
--- a/Documentation/devicetree/bindings/clock/qcom,sm8450-camcc.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,sm8450-camcc.yaml
@@ -8,15 +8,17 @@ title: Qualcomm Camera Clock & Reset Controller on SM8450
maintainers:
- Vladimir Zapolskiy <vladimir.zapolskiy@linaro.org>
+ - Jagadeesh Kona <quic_jkona@quicinc.com>
description: |
Qualcomm camera clock control module provides the clocks, resets and power
domains on SM8450.
- See also::
+ See also:
+ include/dt-bindings/clock/qcom,sc8280xp-camcc.h
include/dt-bindings/clock/qcom,sm8450-camcc.h
include/dt-bindings/clock/qcom,sm8550-camcc.h
- include/dt-bindings/clock/qcom,sc8280xp-camcc.h
+ include/dt-bindings/clock/qcom,sm8650-camcc.h
include/dt-bindings/clock/qcom,x1e80100-camcc.h
allOf:
@@ -28,6 +30,7 @@ properties:
- qcom,sc8280xp-camcc
- qcom,sm8450-camcc
- qcom,sm8550-camcc
+ - qcom,sm8650-camcc
- qcom,x1e80100-camcc
clocks:
diff --git a/Documentation/devicetree/bindings/clock/qcom,sm8450-videocc.yaml b/Documentation/devicetree/bindings/clock/qcom,sm8450-videocc.yaml
index bad8f019a8d3..8ce5972a65d5 100644
--- a/Documentation/devicetree/bindings/clock/qcom,sm8450-videocc.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,sm8450-videocc.yaml
@@ -8,18 +8,22 @@ title: Qualcomm Video Clock & Reset Controller on SM8450
maintainers:
- Taniya Das <quic_tdas@quicinc.com>
+ - Jagadeesh Kona <quic_jkona@quicinc.com>
description: |
Qualcomm video clock control module provides the clocks, resets and power
domains on SM8450.
- See also:: include/dt-bindings/clock/qcom,videocc-sm8450.h
+ See also:
+ include/dt-bindings/clock/qcom,sm8450-videocc.h
+ include/dt-bindings/clock/qcom,sm8650-videocc.h
properties:
compatible:
enum:
- qcom,sm8450-videocc
- qcom,sm8550-videocc
+ - qcom,sm8650-videocc
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/clock/ti,sci-clk.yaml b/Documentation/devicetree/bindings/clock/ti,sci-clk.yaml
index 0a9d6a4c4b66..66e8e66ca175 100644
--- a/Documentation/devicetree/bindings/clock/ti,sci-clk.yaml
+++ b/Documentation/devicetree/bindings/clock/ti,sci-clk.yaml
@@ -36,7 +36,7 @@ properties:
The second cell should contain the clock ID.
- Please see http://processors.wiki.ti.com/index.php/TISCI for
+ Please see https://software-dl.ti.com/tisci/esd/latest/index.html for
protocol documentation for the values to be used for different devices.
additionalProperties: false
diff --git a/Documentation/devicetree/bindings/clock/ti-keystone-pllctrl.txt b/Documentation/devicetree/bindings/clock/ti-keystone-pllctrl.txt
deleted file mode 100644
index c35cb6c4af4d..000000000000
--- a/Documentation/devicetree/bindings/clock/ti-keystone-pllctrl.txt
+++ /dev/null
@@ -1,20 +0,0 @@
-* Device tree bindings for Texas Instruments keystone pll controller
-
-The main pll controller used to drive theC66x CorePacs, the switch fabric,
-and a majority of the peripheral clocks (all but the ARM CorePacs, DDR3 and
-the NETCP modules) requires a PLL Controller to manage the various clock
-divisions, gating, and synchronization.
-
-Required properties:
-
-- compatible: "ti,keystone-pllctrl", "syscon"
-
-- reg: contains offset/length value for pll controller
- registers space.
-
-Example:
-
-pllctrl: pll-controller@02310000 {
- compatible = "ti,keystone-pllctrl", "syscon";
- reg = <0x02310000 0x200>;
-};
diff --git a/Documentation/devicetree/bindings/dma/fsl,edma.yaml b/Documentation/devicetree/bindings/dma/fsl,edma.yaml
index acfb4b2ee7a9..d54140f18d34 100644
--- a/Documentation/devicetree/bindings/dma/fsl,edma.yaml
+++ b/Documentation/devicetree/bindings/dma/fsl,edma.yaml
@@ -59,8 +59,8 @@ properties:
- 3
dma-channels:
- minItems: 1
- maxItems: 64
+ minimum: 1
+ maximum: 64
clocks:
minItems: 1
diff --git a/Documentation/devicetree/bindings/firmware/arm,scmi.yaml b/Documentation/devicetree/bindings/firmware/arm,scmi.yaml
index 7de2c29606e5..308af58180d1 100644
--- a/Documentation/devicetree/bindings/firmware/arm,scmi.yaml
+++ b/Documentation/devicetree/bindings/firmware/arm,scmi.yaml
@@ -72,14 +72,17 @@ properties:
- const: tx
- const: tx_reply
- const: rx
+ - const: rx_reply
minItems: 2
mboxes:
description:
List of phandle and mailbox channel specifiers. It should contain
- exactly one, two or three mailboxes; the first one or two for transmitting
- messages ("tx") and another optional ("rx") for receiving notifications
- and delayed responses, if supported by the platform.
+ exactly one, two, three or four mailboxes; the first one or two for
+ transmitting messages ("tx") and another optional ("rx") for receiving
+ notifications and delayed responses, if supported by the platform.
+ The optional ("rx_reply") is for notifications completion interrupt,
+ if supported by the platform.
The number of mailboxes needed for transmitting messages depends on the
type of channels exposed by the specific underlying mailbox controller;
one single channel descriptor is enough if such channel is bidirectional,
@@ -92,9 +95,10 @@ properties:
2 mbox / 2 shmem => SCMI TX and RX over 2 mailbox bidirectional channels
2 mbox / 1 shmem => SCMI TX over 2 mailbox unidirectional channels
3 mbox / 2 shmem => SCMI TX and RX over 3 mailbox unidirectional channels
+ 4 mbox / 2 shmem => SCMI TX and RX over 4 mailbox unidirectional channels
Any other combination of mboxes and shmem is invalid.
minItems: 1
- maxItems: 3
+ maxItems: 4
shmem:
description:
diff --git a/Documentation/devicetree/bindings/firmware/cznic,turris-omnia-mcu.yaml b/Documentation/devicetree/bindings/firmware/cznic,turris-omnia-mcu.yaml
new file mode 100644
index 000000000000..af9249695ef5
--- /dev/null
+++ b/Documentation/devicetree/bindings/firmware/cznic,turris-omnia-mcu.yaml
@@ -0,0 +1,86 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/firmware/cznic,turris-omnia-mcu.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: CZ.NIC's Turris Omnia MCU
+
+maintainers:
+ - Marek Behún <kabel@kernel.org>
+
+description:
+ The MCU on Turris Omnia acts as a system controller providing additional
+ GPIOs, interrupts, watchdog, system power off and wakeup configuration.
+
+properties:
+ compatible:
+ const: cznic,turris-omnia-mcu
+
+ reg:
+ description: MCU I2C slave address
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ interrupt-controller: true
+
+ '#interrupt-cells':
+ const: 2
+ description: |
+ The first cell specifies the interrupt number (0 to 63), the second cell
+ specifies interrupt type (which can be one of IRQ_TYPE_EDGE_RISING,
+ IRQ_TYPE_EDGE_FALLING or IRQ_TYPE_EDGE_BOTH).
+ The interrupt numbers correspond sequentially to GPIO numbers, taking the
+ GPIO banks into account:
+ IRQ number GPIO bank GPIO pin within bank
+ 0 - 15 0 0 - 15
+ 16 - 47 1 0 - 31
+ 48 - 63 2 0 - 15
+ There are several exceptions:
+ IRQ number meaning
+ 11 LED panel brightness changed by button press
+ 13 TRNG entropy ready
+ 14 ECDSA message signature computation done
+
+ gpio-controller: true
+
+ '#gpio-cells':
+ const: 3
+ description:
+ The first cell is bank number (0, 1 or 2), the second cell is pin number
+ within the bank (0 to 15 for banks 0 and 2, 0 to 31 for bank 1), and the
+ third cell specifies consumer flags.
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - interrupt-controller
+ - gpio-controller
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ system-controller@2a {
+ compatible = "cznic,turris-omnia-mcu";
+ reg = <0x2a>;
+
+ interrupt-parent = <&gpio1>;
+ interrupts = <11 IRQ_TYPE_NONE>;
+
+ gpio-controller;
+ #gpio-cells = <3>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/firmware/qcom,scm.yaml b/Documentation/devicetree/bindings/firmware/qcom,scm.yaml
index 47d3d2d52acd..2cc83771d8e7 100644
--- a/Documentation/devicetree/bindings/firmware/qcom,scm.yaml
+++ b/Documentation/devicetree/bindings/firmware/qcom,scm.yaml
@@ -93,6 +93,11 @@ properties:
protocol to handle sleeping SCM calls.
maxItems: 1
+ memory-region:
+ description:
+ Phandle to the memory region reserved for the shared memory bridge to TZ.
+ maxItems: 1
+
qcom,sdi-enabled:
description:
Indicates that the SDI (Secure Debug Image) has been enabled by TZ
@@ -193,6 +198,16 @@ allOf:
then:
properties:
interrupts: false
+ - if:
+ not:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,scm-sa8775p
+ then:
+ properties:
+ memory-region: false
required:
- compatible
diff --git a/Documentation/devicetree/bindings/fuse/renesas,rcar-efuse.yaml b/Documentation/devicetree/bindings/fuse/renesas,rcar-efuse.yaml
new file mode 100644
index 000000000000..d7e289244e72
--- /dev/null
+++ b/Documentation/devicetree/bindings/fuse/renesas,rcar-efuse.yaml
@@ -0,0 +1,55 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/fuse/renesas,rcar-efuse.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: R-Car E-FUSE connected to PFC
+
+maintainers:
+ - Geert Uytterhoeven <geert+renesas@glider.be>
+
+description:
+ The E-FUSE is a type of non-volatile memory, which is accessible through the
+ Pin Function Controller (PFC) on some R-Car Gen4 SoCs.
+
+properties:
+ compatible:
+ enum:
+ - renesas,r8a779a0-efuse # R-Car V3U
+ - renesas,r8a779f0-efuse # R-Car S4-8
+
+ reg:
+ maxItems: 1
+ description: PFC System Group Fuse Control and Monitor register block
+
+ clocks:
+ maxItems: 1
+
+ power-domains:
+ maxItems: 1
+
+ resets:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - power-domains
+ - resets
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/r8a779a0-cpg-mssr.h>
+ #include <dt-bindings/power/r8a779a0-sysc.h>
+
+ fuse: fuse@e6078800 {
+ compatible = "renesas,r8a779a0-efuse";
+ reg = <0xe6078800 0x100>;
+ clocks = <&cpg CPG_MOD 916>;
+ power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>;
+ resets = <&cpg 916>;
+ };
diff --git a/Documentation/devicetree/bindings/fuse/renesas,rcar-otp.yaml b/Documentation/devicetree/bindings/fuse/renesas,rcar-otp.yaml
new file mode 100644
index 000000000000..d74872ae9ff3
--- /dev/null
+++ b/Documentation/devicetree/bindings/fuse/renesas,rcar-otp.yaml
@@ -0,0 +1,38 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/fuse/renesas,rcar-otp.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: R-Car E-FUSE connected to OTP_MEM
+
+maintainers:
+ - Geert Uytterhoeven <geert+renesas@glider.be>
+
+description:
+ The E-FUSE is a type of non-volatile memory, which is accessible through the
+ One-Time Programmable Memory (OTP_MEM) module on some R-Car Gen4 SoCs.
+
+properties:
+ compatible:
+ enum:
+ - renesas,r8a779g0-otp # R-CarV4H
+ - renesas,r8a779h0-otp # R-CarV4M
+
+ reg:
+ items:
+ - description: OTP_MEM_0
+ - description: OTP_MEM_1
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ otp: otp@e61be000 {
+ compatible = "renesas,r8a779g0-otp";
+ reg = <0xe61be000 0x1000>, <0xe61bf000 0x1000>;
+ };
diff --git a/Documentation/devicetree/bindings/gpio/aspeed,sgpio.yaml b/Documentation/devicetree/bindings/gpio/aspeed,sgpio.yaml
index 46bb121360dc..1046f0331c09 100644
--- a/Documentation/devicetree/bindings/gpio/aspeed,sgpio.yaml
+++ b/Documentation/devicetree/bindings/gpio/aspeed,sgpio.yaml
@@ -33,6 +33,11 @@ properties:
gpio-controller: true
+ # Each SGPIO is represented as a pair of input and output GPIOs
+ gpio-line-names:
+ minItems: 160
+ maxItems: 256
+
'#gpio-cells':
const: 2
@@ -41,6 +46,9 @@ properties:
interrupt-controller: true
+ '#interrupt-cells':
+ const: 2
+
clocks:
maxItems: 1
@@ -55,6 +63,7 @@ required:
- '#gpio-cells'
- interrupts
- interrupt-controller
+ - '#interrupt-cells'
- ngpios
- clocks
- bus-frequency
@@ -72,6 +81,7 @@ examples:
reg = <0x1e780200 0x0100>;
clocks = <&syscon ASPEED_CLK_APB>;
interrupt-controller;
+ #interrupt-cells = <2>;
ngpios = <80>;
bus-frequency = <12000000>;
};
diff --git a/Documentation/devicetree/bindings/gpio/atmel,at91rm9200-gpio.yaml b/Documentation/devicetree/bindings/gpio/atmel,at91rm9200-gpio.yaml
new file mode 100644
index 000000000000..3dd70933ed8e
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpio/atmel,at91rm9200-gpio.yaml
@@ -0,0 +1,81 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/gpio/atmel,at91rm9200-gpio.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Microchip GPIO controller (PIO)
+
+maintainers:
+ - Manikandan Muralidharan <manikandan.m@microchip.com>
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - enum:
+ - atmel,at91sam9x5-gpio
+ - microchip,sam9x60-gpio
+ - const: atmel,at91rm9200-gpio
+ - items:
+ - enum:
+ - microchip,sam9x7-gpio
+ - const: microchip,sam9x60-gpio
+ - const: atmel,at91rm9200-gpio
+ - items:
+ - const: atmel,at91rm9200-gpio
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ interrupt-controller: true
+
+ "#interrupt-cells":
+ const: 2
+
+ gpio-controller: true
+ gpio-line-names: true
+
+ "#gpio-cells":
+ const: 2
+
+ clocks:
+ maxItems: 1
+
+ "#gpio-lines":
+ description:
+ Number of gpio, 32 by default if absent
+ maxItems: 1
+ default: 32
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - interrupt-controller
+ - "#interrupt-cells"
+ - gpio-controller
+ - "#gpio-cells"
+ - clocks
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/at91.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ gpio@fffff400 {
+ compatible = "atmel,at91rm9200-gpio";
+ reg = <0xfffff400 0x200>;
+ interrupts = <2 IRQ_TYPE_LEVEL_HIGH 1>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 2>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/gpio/fsl,qoriq-gpio.yaml b/Documentation/devicetree/bindings/gpio/fsl,qoriq-gpio.yaml
new file mode 100644
index 000000000000..84fd82291ee4
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpio/fsl,qoriq-gpio.yaml
@@ -0,0 +1,87 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/gpio/fsl,qoriq-gpio.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale MPC512x/MPC8xxx/QorIQ/Layerscape GPIO controller
+
+maintainers:
+ - Frank Li <Frank.Li@nxp.com>
+
+properties:
+ compatible:
+ oneOf:
+ - enum:
+ - fsl,mpc5121-gpio
+ - fsl,mpc5125-gpio
+ - fsl,mpc8349-gpio
+ - fsl,mpc8572-gpio
+ - fsl,mpc8610-gpio
+ - fsl,pq3-gpio
+ - items:
+ - enum:
+ - fsl,ls1021a-gpio
+ - fsl,ls1028a-gpio
+ - fsl,ls1043a-gpio
+ - fsl,ls1046a-gpio
+ - fsl,ls1088a-gpio
+ - fsl,ls2080a-gpio
+ - const: fsl,qoriq-gpio
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ "#gpio-cells":
+ const: 2
+
+ gpio-controller: true
+
+ interrupt-controller: true
+
+ "#interrupt-cells":
+ const: 2
+
+ gpio-line-names:
+ minItems: 1
+ maxItems: 32
+
+ little-endian:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ GPIO registers are used as little endian. If not
+ present registers are used as big endian by default.
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - "#gpio-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ gpio@1100 {
+ compatible = "fsl,mpc5125-gpio";
+ reg = <0x1100 0x080>;
+ interrupts = <78 0x8>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ gpio@2300000 {
+ compatible = "fsl,ls2080a-gpio", "fsl,qoriq-gpio";
+ reg = <0x2300000 0x10000>;
+ interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>;
+ little-endian;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
diff --git a/Documentation/devicetree/bindings/gpio/gpio-mpc8xxx.txt b/Documentation/devicetree/bindings/gpio/gpio-mpc8xxx.txt
deleted file mode 100644
index cd28e932bf50..000000000000
--- a/Documentation/devicetree/bindings/gpio/gpio-mpc8xxx.txt
+++ /dev/null
@@ -1,53 +0,0 @@
-* Freescale MPC512x/MPC8xxx/QorIQ/Layerscape GPIO controller
-
-Required properties:
-- compatible : Should be "fsl,<soc>-gpio"
- The following <soc>s are known to be supported:
- mpc5121, mpc5125, mpc8349, mpc8572, mpc8610, pq3, qoriq,
- ls1021a, ls1043a, ls2080a, ls1028a, ls1088a.
-- reg : Address and length of the register set for the device
-- interrupts : Should be the port interrupt shared by all 32 pins.
-- #gpio-cells : Should be two. The first cell is the pin number and
- the second cell is used to specify the gpio polarity:
- 0 = active high
- 1 = active low
-
-Optional properties:
-- little-endian : GPIO registers are used as little endian. If not
- present registers are used as big endian by default.
-
-Example of gpio-controller node for a mpc5125 SoC:
-
-gpio0: gpio@1100 {
- compatible = "fsl,mpc5125-gpio";
- #gpio-cells = <2>;
- reg = <0x1100 0x080>;
- interrupts = <78 0x8>;
-};
-
-Example of gpio-controller node for a ls2080a SoC:
-
-gpio0: gpio@2300000 {
- compatible = "fsl,ls2080a-gpio", "fsl,qoriq-gpio";
- reg = <0x0 0x2300000 0x0 0x10000>;
- interrupts = <0 36 0x4>; /* Level high type */
- gpio-controller;
- little-endian;
- #gpio-cells = <2>;
- interrupt-controller;
- #interrupt-cells = <2>;
-};
-
-
-Example of gpio-controller node for a ls1028a/ls1088a SoC:
-
-gpio1: gpio@2300000 {
- compatible = "fsl,ls1028a-gpio", "fsl,ls1088a-gpio", "fsl,qoriq-gpio";
- reg = <0x0 0x2300000 0x0 0x10000>;
- interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>;
- gpio-controller;
- #gpio-cells = <2>;
- interrupt-controller;
- #interrupt-cells = <2>;
- little-endian;
-};
diff --git a/Documentation/devicetree/bindings/gpio/gpio-pca95xx.yaml b/Documentation/devicetree/bindings/gpio/gpio-pca95xx.yaml
index 99febb8ea1b6..51e8390d6b32 100644
--- a/Documentation/devicetree/bindings/gpio/gpio-pca95xx.yaml
+++ b/Documentation/devicetree/bindings/gpio/gpio-pca95xx.yaml
@@ -66,6 +66,7 @@ properties:
- ti,tca6408
- ti,tca6416
- ti,tca6424
+ - ti,tca9535
- ti,tca9538
- ti,tca9539
- ti,tca9554
diff --git a/Documentation/devicetree/bindings/gpio/gpio-vf610.yaml b/Documentation/devicetree/bindings/gpio/gpio-vf610.yaml
index a27f92950257..cabda2eab4a2 100644
--- a/Documentation/devicetree/bindings/gpio/gpio-vf610.yaml
+++ b/Documentation/devicetree/bindings/gpio/gpio-vf610.yaml
@@ -51,6 +51,10 @@ properties:
gpio-controller: true
+ gpio-line-names:
+ minItems: 1
+ maxItems: 32
+
clocks:
items:
- description: SoC GPIO clock
diff --git a/Documentation/devicetree/bindings/gpio/gpio-zevio.txt b/Documentation/devicetree/bindings/gpio/gpio-zevio.txt
deleted file mode 100644
index a37bd9ae2730..000000000000
--- a/Documentation/devicetree/bindings/gpio/gpio-zevio.txt
+++ /dev/null
@@ -1,16 +0,0 @@
-Zevio GPIO controller
-
-Required properties:
-- compatible: Should be "lsi,zevio-gpio"
-- reg: Address and length of the register set for the device
-- #gpio-cells: Should be two. The first cell is the pin number and the
- second cell is used to specify optional parameters (currently unused).
-- gpio-controller: Marks the device node as a GPIO controller.
-
-Example:
- gpio: gpio@90000000 {
- compatible = "lsi,zevio-gpio";
- reg = <0x90000000 0x1000>;
- gpio-controller;
- #gpio-cells = <2>;
- };
diff --git a/Documentation/devicetree/bindings/gpio/gpio_atmel.txt b/Documentation/devicetree/bindings/gpio/gpio_atmel.txt
deleted file mode 100644
index 29416f9c3220..000000000000
--- a/Documentation/devicetree/bindings/gpio/gpio_atmel.txt
+++ /dev/null
@@ -1,31 +0,0 @@
-* Atmel GPIO controller (PIO)
-
-Required properties:
-- compatible: "atmel,<chip>-gpio", where <chip> is at91rm9200 or at91sam9x5.
-- reg: Should contain GPIO controller registers location and length
-- interrupts: Should be the port interrupt shared by all the pins.
-- #gpio-cells: Should be two. The first cell is the pin number and
- the second cell is used to specify optional parameters to declare if the GPIO
- is active high or low. See gpio.txt.
-- gpio-controller: Marks the device node as a GPIO controller.
-- interrupt-controller: Marks the device node as an interrupt controller.
-- #interrupt-cells: Should be two. The first cell is the pin number and the
- second cell is used to specify irq type flags, see the two cell description
- in interrupt-controller/interrupts.txt for details.
-
-optional properties:
-- #gpio-lines: Number of gpio if absent 32.
-
-
-Example:
- pioA: gpio@fffff200 {
- compatible = "atmel,at91rm9200-gpio";
- reg = <0xfffff200 0x100>;
- interrupts = <2 4>;
- #gpio-cells = <2>;
- gpio-controller;
- #gpio-lines = <19>;
- interrupt-controller;
- #interrupt-cells = <2>;
- };
-
diff --git a/Documentation/devicetree/bindings/gpio/lsi,zevio-gpio.yaml b/Documentation/devicetree/bindings/gpio/lsi,zevio-gpio.yaml
new file mode 100644
index 000000000000..e9e201a489e5
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpio/lsi,zevio-gpio.yaml
@@ -0,0 +1,43 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/gpio/lsi,zevio-gpio.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Zevio GPIO controller
+
+maintainers:
+ - Pratik Farkase <pratikfarkase94@gmail.com>
+
+properties:
+ compatible:
+ items:
+ - const: lsi,zevio-gpio
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ "#gpio-cells":
+ const: 2
+
+ gpio-controller: true
+
+required:
+ - compatible
+ - reg
+ - "#gpio-cells"
+ - gpio-controller
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ gpio@90000000 {
+ compatible = "lsi,zevio-gpio";
+ reg = <0x90000000 0x1000>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
diff --git a/Documentation/devicetree/bindings/hwmon/g762.txt b/Documentation/devicetree/bindings/hwmon/g762.txt
deleted file mode 100644
index 6d154c4923de..000000000000
--- a/Documentation/devicetree/bindings/hwmon/g762.txt
+++ /dev/null
@@ -1,47 +0,0 @@
-GMT G762/G763 PWM Fan controller
-
-Required node properties:
-
- - "compatible": must be either "gmt,g762" or "gmt,g763"
- - "reg": I2C bus address of the device
- - "clocks": a fixed clock providing input clock frequency
- on CLK pin of the chip.
-
-Optional properties:
-
- - "fan_startv": fan startup voltage. Accepted values are 0, 1, 2 and 3.
- The higher the more.
-
- - "pwm_polarity": pwm polarity. Accepted values are 0 (positive duty)
- and 1 (negative duty).
-
- - "fan_gear_mode": fan gear mode. Supported values are 0, 1 and 2.
-
-If an optional property is not set in .dts file, then current value is kept
-unmodified (e.g. u-boot installed value).
-
-Additional information on operational parameters for the device is available
-in Documentation/hwmon/g762.rst. A detailed datasheet for the device is available
-at http://natisbad.org/NAS/refs/GMT_EDS-762_763-080710-0.2.pdf.
-
-Example g762 node:
-
- clocks {
- #address-cells = <1>;
- #size-cells = <0>;
-
- g762_clk: fixedclk {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <8192>;
- }
- }
-
- g762: g762@3e {
- compatible = "gmt,g762";
- reg = <0x3e>;
- clocks = <&g762_clk>
- fan_gear_mode = <0>; /* chip default */
- fan_startv = <1>; /* chip default */
- pwm_polarity = <0>; /* chip default */
- };
diff --git a/Documentation/devicetree/bindings/hwmon/gmt,g762.yaml b/Documentation/devicetree/bindings/hwmon/gmt,g762.yaml
new file mode 100644
index 000000000000..8e1bffd252e6
--- /dev/null
+++ b/Documentation/devicetree/bindings/hwmon/gmt,g762.yaml
@@ -0,0 +1,95 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/hwmon/gmt,g762.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: GMT G761/G762/G763 PWM Fan controller
+
+maintainers:
+ - Christian Marangi <ansuelsmth@gmail.com>
+
+description: |
+ GMT G761/G762/G763 PWM Fan controller.
+
+ G761 supports an internal-clock hence the clocks property is optional.
+ If not defined, internal-clock will be used. (31KHz is the clock of
+ the internal crystal oscillator)
+
+ If an optional property is not set in DT, then current value is kept
+ unmodified (e.g. bootloader installed value).
+
+ Additional information on operational parameters for the device is available
+ in Documentation/hwmon/g762.rst. A detailed datasheet for the device is available
+ at http://natisbad.org/NAS/refs/GMT_EDS-762_763-080710-0.2.pdf.
+
+properties:
+ compatible:
+ enum:
+ - gmt,g761
+ - gmt,g762
+ - gmt,g763
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ description: a fixed clock providing input clock frequency on CLK
+ pin of the chip.
+ maxItems: 1
+
+ fan_startv:
+ description: Fan startup voltage step
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [0, 1, 2, 3]
+
+ pwm_polarity:
+ description: PWM polarity (positive or negative duty)
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [0, 1]
+
+ fan_gear_mode:
+ description: FAN gear mode. Configure High speed fan setting factor
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [0, 1, 2]
+
+required:
+ - compatible
+ - reg
+
+if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - gmt,g762
+ - gmt,g763
+then:
+ required:
+ - clocks
+
+additionalProperties: false
+
+examples:
+ - |
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ g762@3e {
+ compatible = "gmt,g762";
+ reg = <0x3e>;
+ clocks = <&g762_clk>;
+ fan_gear_mode = <0>;
+ fan_startv = <1>;
+ pwm_polarity = <0>;
+ };
+
+ g761@1e {
+ compatible = "gmt,g761";
+ reg = <0x1e>;
+ fan_gear_mode = <0>;
+ fan_startv = <1>;
+ pwm_polarity = <0>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/hwmon/maxim,max6639.yaml b/Documentation/devicetree/bindings/hwmon/maxim,max6639.yaml
new file mode 100644
index 000000000000..4f5837a30773
--- /dev/null
+++ b/Documentation/devicetree/bindings/hwmon/maxim,max6639.yaml
@@ -0,0 +1,92 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+
+$id: http://devicetree.org/schemas/hwmon/maxim,max6639.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Maxim max6639
+
+maintainers:
+ - Naresh Solanki <naresh.solanki@9elements.com>
+
+description: |
+ The MAX6639 is a 2-channel temperature monitor with dual, automatic, PWM
+ fan-speed controller. It monitors its own temperature and one external
+ diode-connected transistor or the temperatures of two external diode-connected
+ transistors, typically available in CPUs, FPGAs, or GPUs.
+
+ Datasheets:
+ https://datasheets.maximintegrated.com/en/ds/MAX6639-MAX6639F.pdf
+
+properties:
+ compatible:
+ enum:
+ - maxim,max6639
+
+ reg:
+ maxItems: 1
+
+ '#address-cells':
+ const: 1
+
+ '#size-cells':
+ const: 0
+
+ '#pwm-cells':
+ const: 3
+
+required:
+ - compatible
+ - reg
+
+patternProperties:
+ "^fan@[0-1]$":
+ type: object
+ description:
+ Represents the two fans and their specific configuration.
+
+ $ref: fan-common.yaml#
+
+ unevaluatedProperties: false
+
+ properties:
+ reg:
+ description:
+ The fan number.
+
+ required:
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fan1: fan-controller@10 {
+ compatible = "maxim,max6639";
+ reg = <0x10>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #pwm-cells = <3>;
+
+ fan@0 {
+ reg = <0x0>;
+ pulses-per-revolution = <2>;
+ max-rpm = <4000>;
+ target-rpm = <1000>;
+ pwms = <&fan1 0 25000 0>;
+ };
+
+ fan@1 {
+ reg = <0x1>;
+ pulses-per-revolution = <2>;
+ max-rpm = <8000>;
+ pwms = <&fan1 1 25000 0>;
+ };
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/hwmon/ti,ina2xx.yaml b/Documentation/devicetree/bindings/hwmon/ti,ina2xx.yaml
index df86c2c92037..6ae961732e6b 100644
--- a/Documentation/devicetree/bindings/hwmon/ti,ina2xx.yaml
+++ b/Documentation/devicetree/bindings/hwmon/ti,ina2xx.yaml
@@ -66,6 +66,14 @@ properties:
description: phandle to the regulator that provides the VS supply typically
in range from 2.7 V to 5.5 V.
+ ti,alert-polarity-active-high:
+ description: Alert pin is asserted based on the value of Alert polarity Bit
+ of Mask/Enable register. Default value is Normal (0 which maps to
+ active-low open collector). The other value is Inverted
+ (1 which maps to active-high open collector). Specify this property to set
+ the alert polarity to active-high.
+ $ref: /schemas/types.yaml#/definitions/flag
+
required:
- compatible
- reg
@@ -88,5 +96,6 @@ examples:
label = "vdd_3v0";
shunt-resistor = <1000>;
vs-supply = <&vdd_3v0>;
+ ti,alert-polarity-active-high;
};
};
diff --git a/Documentation/devicetree/bindings/hwmon/ti,tmp108.yaml b/Documentation/devicetree/bindings/hwmon/ti,tmp108.yaml
index 8b5307c875ff..0ad10d43fac0 100644
--- a/Documentation/devicetree/bindings/hwmon/ti,tmp108.yaml
+++ b/Documentation/devicetree/bindings/hwmon/ti,tmp108.yaml
@@ -9,6 +9,14 @@ title: TMP108 temperature sensor
maintainers:
- Krzysztof Kozlowski <krzk@kernel.org>
+description: |
+ The TMP108 is a digital-output temperature sensor with a
+ dynamically-programmable limit window, and under- and overtemperature
+ alert functions.
+
+ Datasheets:
+ https://www.ti.com/product/TMP108
+
properties:
compatible:
enum:
@@ -24,6 +32,9 @@ properties:
"#thermal-sensor-cells":
const: 0
+ vcc-supply:
+ description: phandle to the regulator that provides the V+ supply
+
required:
- compatible
- reg
@@ -45,6 +56,7 @@ examples:
interrupts = <7 IRQ_TYPE_LEVEL_LOW>;
pinctrl-names = "default";
pinctrl-0 = <&tmp_alrt>;
+ vcc-supply = <&supply>;
#thermal-sensor-cells = <0>;
};
};
diff --git a/Documentation/devicetree/bindings/i2c/atmel,at91sam-i2c.yaml b/Documentation/devicetree/bindings/i2c/atmel,at91sam-i2c.yaml
index b1c13bab2472..b2d19cfb87ad 100644
--- a/Documentation/devicetree/bindings/i2c/atmel,at91sam-i2c.yaml
+++ b/Documentation/devicetree/bindings/i2c/atmel,at91sam-i2c.yaml
@@ -77,7 +77,7 @@ required:
- clocks
allOf:
- - $ref: i2c-controller.yaml
+ - $ref: /schemas/i2c/i2c-controller.yaml#
- if:
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/i2c/google,cros-ec-i2c-tunnel.yaml b/Documentation/devicetree/bindings/i2c/google,cros-ec-i2c-tunnel.yaml
index ab151c9db219..580003cdfff5 100644
--- a/Documentation/devicetree/bindings/i2c/google,cros-ec-i2c-tunnel.yaml
+++ b/Documentation/devicetree/bindings/i2c/google,cros-ec-i2c-tunnel.yaml
@@ -21,7 +21,7 @@ description: |
google,cros-ec-spi or google,cros-ec-i2c.
allOf:
- - $ref: i2c-controller.yaml#
+ - $ref: /schemas/i2c/i2c-controller.yaml#
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/iio/dac/adi,ad3552r.yaml b/Documentation/devicetree/bindings/iio/dac/adi,ad3552r.yaml
index 96340a05754c..8265d709094d 100644
--- a/Documentation/devicetree/bindings/iio/dac/adi,ad3552r.yaml
+++ b/Documentation/devicetree/bindings/iio/dac/adi,ad3552r.yaml
@@ -139,7 +139,7 @@ allOf:
Voltage output range of the channel as <minimum, maximum>
Required connections:
Rfb1x for: 0 to 2.5 V; 0 to 3V; 0 to 5 V;
- Rfb2x for: 0 to 10 V; 2.5 to 7.5V; -5 to 5 V;
+ Rfb2x for: 0 to 10 V; -2.5 to 7.5V; -5 to 5 V;
oneOf:
- items:
- const: 0
diff --git a/Documentation/devicetree/bindings/input/allwinner,sun4i-a10-lradc-keys.yaml b/Documentation/devicetree/bindings/input/allwinner,sun4i-a10-lradc-keys.yaml
index c384bf0bb25d..6bdb8040be65 100644
--- a/Documentation/devicetree/bindings/input/allwinner,sun4i-a10-lradc-keys.yaml
+++ b/Documentation/devicetree/bindings/input/allwinner,sun4i-a10-lradc-keys.yaml
@@ -22,7 +22,9 @@ properties:
- const: allwinner,sun8i-a83t-r-lradc
- const: allwinner,sun50i-r329-lradc
- items:
- - const: allwinner,sun20i-d1-lradc
+ - enum:
+ - allwinner,sun50i-h616-lradc
+ - allwinner,sun20i-d1-lradc
- const: allwinner,sun50i-r329-lradc
reg:
diff --git a/Documentation/devicetree/bindings/input/cirrus,cs40l50.yaml b/Documentation/devicetree/bindings/input/cirrus,cs40l50.yaml
new file mode 100644
index 000000000000..89bd06864bd4
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/cirrus,cs40l50.yaml
@@ -0,0 +1,68 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/input/cirrus,cs40l50.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Cirrus Logic CS40L50 Advanced Haptic Driver
+
+maintainers:
+ - James Ogletree <jogletre@opensource.cirrus.com>
+
+description:
+ CS40L50 is a haptic driver with waveform memory,
+ integrated DSP, and closed-loop algorithms.
+
+properties:
+ compatible:
+ enum:
+ - cirrus,cs40l50
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ reset-gpios:
+ maxItems: 1
+
+ vdd-a-supply:
+ description: Power supply for internal analog circuits.
+
+ vdd-p-supply:
+ description: Power supply for always-on circuits.
+
+ vdd-io-supply:
+ description: Power supply for digital input/output.
+
+ vdd-b-supply:
+ description: Power supply for the boost converter.
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - reset-gpios
+ - vdd-io-supply
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ haptic-driver@34 {
+ compatible = "cirrus,cs40l50";
+ reg = <0x34>;
+ interrupt-parent = <&gpio>;
+ interrupts = <113 IRQ_TYPE_LEVEL_LOW>;
+ reset-gpios = <&gpio 112 GPIO_ACTIVE_LOW>;
+ vdd-io-supply = <&vreg>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/input/elan,ekth6915.yaml b/Documentation/devicetree/bindings/input/elan,ekth6915.yaml
index dc4ac41f2441..a62916d07a08 100644
--- a/Documentation/devicetree/bindings/input/elan,ekth6915.yaml
+++ b/Documentation/devicetree/bindings/input/elan,ekth6915.yaml
@@ -18,9 +18,12 @@ allOf:
properties:
compatible:
- enum:
- - elan,ekth6915
- - ilitek,ili2901
+ oneOf:
+ - items:
+ - enum:
+ - elan,ekth5015m
+ - const: elan,ekth6915
+ - const: elan,ekth6915
reg:
const: 0x10
@@ -33,6 +36,12 @@ properties:
reset-gpios:
description: Reset GPIO; not all touchscreens using eKTH6915 hook this up.
+ no-reset-on-power-off:
+ type: boolean
+ description:
+ Reset line is wired so that it can (and should) be left deasserted when
+ the power supply is off.
+
vcc33-supply:
description: The 3.3V supply to the touchscreen.
@@ -58,8 +67,8 @@ examples:
#address-cells = <1>;
#size-cells = <0>;
- ap_ts: touchscreen@10 {
- compatible = "elan,ekth6915";
+ touchscreen@10 {
+ compatible = "elan,ekth5015m", "elan,ekth6915";
reg = <0x10>;
interrupt-parent = <&tlmm>;
diff --git a/Documentation/devicetree/bindings/input/ilitek,ili2901.yaml b/Documentation/devicetree/bindings/input/ilitek,ili2901.yaml
new file mode 100644
index 000000000000..1abeec768d79
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/ilitek,ili2901.yaml
@@ -0,0 +1,66 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/input/ilitek,ili2901.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Ilitek ILI2901 touchscreen controller
+
+maintainers:
+ - Jiri Kosina <jkosina@suse.com>
+
+description:
+ Supports the Ilitek ILI2901 touchscreen controller.
+ This touchscreen controller uses the i2c-hid protocol with a reset GPIO.
+
+allOf:
+ - $ref: /schemas/input/touchscreen/touchscreen.yaml#
+
+properties:
+ compatible:
+ enum:
+ - ilitek,ili2901
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ panel: true
+
+ reset-gpios:
+ maxItems: 1
+
+ vcc33-supply: true
+
+ vccio-supply: true
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - vcc33-supply
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ touchscreen@41 {
+ compatible = "ilitek,ili2901";
+ reg = <0x41>;
+
+ interrupt-parent = <&tlmm>;
+ interrupts = <9 IRQ_TYPE_LEVEL_LOW>;
+
+ reset-gpios = <&tlmm 8 GPIO_ACTIVE_LOW>;
+ vcc33-supply = <&pp3300_ts>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/interconnect/qcom,msm8998-bwmon.yaml b/Documentation/devicetree/bindings/interconnect/qcom,msm8998-bwmon.yaml
index 05067e197abe..2cd1f5590fd9 100644
--- a/Documentation/devicetree/bindings/interconnect/qcom,msm8998-bwmon.yaml
+++ b/Documentation/devicetree/bindings/interconnect/qcom,msm8998-bwmon.yaml
@@ -35,6 +35,7 @@ properties:
- qcom,sm8250-cpu-bwmon
- qcom,sm8550-cpu-bwmon
- qcom,sm8650-cpu-bwmon
+ - qcom,x1e80100-cpu-bwmon
- const: qcom,sdm845-bwmon # BWMON v4, unified register space
- items:
- enum:
@@ -44,6 +45,7 @@ properties:
- qcom,sm8250-llcc-bwmon
- qcom,sm8550-llcc-bwmon
- qcom,sm8650-llcc-bwmon
+ - qcom,x1e80100-llcc-bwmon
- const: qcom,sc7280-llcc-bwmon
- const: qcom,sc7280-llcc-bwmon # BWMON v5
- const: qcom,sdm845-llcc-bwmon # BWMON v5
@@ -72,7 +74,6 @@ required:
- interconnects
- interrupts
- operating-points-v2
- - opp-table
- reg
additionalProperties: false
diff --git a/Documentation/devicetree/bindings/interrupt-controller/fsl,ls-extirq.yaml b/Documentation/devicetree/bindings/interrupt-controller/fsl,ls-extirq.yaml
index 887e565b9573..199b34fdbefc 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/fsl,ls-extirq.yaml
+++ b/Documentation/devicetree/bindings/interrupt-controller/fsl,ls-extirq.yaml
@@ -8,7 +8,6 @@ title: Freescale Layerscape External Interrupt Controller
maintainers:
- Shawn Guo <shawnguo@kernel.org>
- - Li Yang <leoyang.li@nxp.com>
description: |
Some Layerscape SOCs (LS1021A, LS1043A, LS1046A LS1088A, LS208xA,
diff --git a/Documentation/devicetree/bindings/interrupt-controller/marvell,armada-370-xp-mpic.txt b/Documentation/devicetree/bindings/interrupt-controller/marvell,armada-370-xp-mpic.txt
deleted file mode 100644
index 5fc03134a999..000000000000
--- a/Documentation/devicetree/bindings/interrupt-controller/marvell,armada-370-xp-mpic.txt
+++ /dev/null
@@ -1,38 +0,0 @@
-Marvell Armada 370, 375, 38x, XP Interrupt Controller
------------------------------------------------------
-
-Required properties:
-- compatible: Should be "marvell,mpic"
-- interrupt-controller: Identifies the node as an interrupt controller.
-- msi-controller: Identifies the node as an PCI Message Signaled
- Interrupt controller.
-- #interrupt-cells: The number of cells to define the interrupts. Should be 1.
- The cell is the IRQ number
-
-- reg: Should contain PMIC registers location and length. First pair
- for the main interrupt registers, second pair for the per-CPU
- interrupt registers. For this last pair, to be compliant with SMP
- support, the "virtual" must be use (For the record, these registers
- automatically map to the interrupt controller registers of the
- current CPU)
-
-Optional properties:
-
-- interrupts: If defined, then it indicates that this MPIC is
- connected as a slave to another interrupt controller. This is
- typically the case on Armada 375 and Armada 38x, where the MPIC is
- connected as a slave to the Cortex-A9 GIC. The provided interrupt
- indicate to which GIC interrupt the MPIC output is connected.
-
-Example:
-
- mpic: interrupt-controller@d0020000 {
- compatible = "marvell,mpic";
- #interrupt-cells = <1>;
- #address-cells = <1>;
- #size-cells = <1>;
- interrupt-controller;
- msi-controller;
- reg = <0xd0020a00 0x1d0>,
- <0xd0021070 0x58>;
- };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/marvell,mpic.yaml b/Documentation/devicetree/bindings/interrupt-controller/marvell,mpic.yaml
new file mode 100644
index 000000000000..616a41c87352
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/marvell,mpic.yaml
@@ -0,0 +1,63 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/interrupt-controller/marvell,mpic.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Marvell Armada 370, 375, 38x, 39x, XP Interrupt Controller
+
+maintainers:
+ - Marek Behún <kabel@kernel.org>
+
+description: |
+ The top-level interrupt controller on Marvell Armada 370 and XP. On these
+ platforms it also provides inter-processor interrupts.
+
+ On Marvell Armada 375, 38x and 39x this controller is wired under ARM GIC.
+
+ Provides MSI handling for the PCIe controllers.
+
+properties:
+ compatible:
+ const: marvell,mpic
+
+ reg:
+ items:
+ - description: main registers
+ - description: per-cpu registers
+
+ interrupts:
+ items:
+ - description: |
+ Parent interrupt on platforms where MPIC is not the top-level
+ interrupt controller.
+
+ interrupt-controller: true
+
+ '#interrupt-cells':
+ const: 1
+
+ msi-controller: true
+
+required:
+ - compatible
+ - reg
+ - interrupt-controller
+ - '#interrupt-cells'
+ - msi-controller
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ interrupt-controller@20a00 {
+ compatible = "marvell,mpic";
+ reg = <0x20a00 0x2d0>, <0x21070 0x58>;
+ interrupts = <GIC_PPI 15 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ msi-controller;
+ };
diff --git a/Documentation/devicetree/bindings/leds/backlight/ti,lm3509.yaml b/Documentation/devicetree/bindings/leds/backlight/ti,lm3509.yaml
new file mode 100644
index 000000000000..482fae71dd53
--- /dev/null
+++ b/Documentation/devicetree/bindings/leds/backlight/ti,lm3509.yaml
@@ -0,0 +1,136 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/leds/backlight/ti,lm3509.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: TI LM3509 High Efficiency Boost for White LED's and/or OLED Displays
+
+maintainers:
+ - Patrick Gansterer <paroga@paroga.com>
+
+description:
+ The LM3509 current mode boost converter offers two separate outputs.
+ https://www.ti.com/product/LM3509
+
+properties:
+ compatible:
+ const: ti,lm3509
+
+ reg:
+ maxItems: 1
+
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 0
+
+ reset-gpios:
+ maxItems: 1
+
+ ti,brightness-rate-of-change-us:
+ description: Brightness Rate of Change in microseconds.
+ enum: [51, 13000, 26000, 52000]
+
+ ti,oled-mode:
+ description: Enable OLED mode.
+ type: boolean
+
+patternProperties:
+ "^led@[01]$":
+ type: object
+ description: Properties for a string of connected LEDs.
+ $ref: common.yaml#
+
+ properties:
+ reg:
+ description:
+ The control register that is used to program the two current sinks.
+ The LM3509 has two registers (BMAIN and BSUB) and are represented
+ as 0 or 1 in this property. The two current sinks can be controlled
+ independently with both registers, or register BMAIN can be
+ configured to control both sinks with the led-sources property.
+ minimum: 0
+ maximum: 1
+
+ label: true
+
+ led-sources:
+ minItems: 1
+ maxItems: 2
+ items:
+ minimum: 0
+ maximum: 1
+
+ default-brightness:
+ minimum: 0
+ maximum: 31
+ default: 18
+
+ max-brightness:
+ minimum: 0
+ maximum: 31
+ default: 31
+
+ required:
+ - reg
+
+ additionalProperties: false
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ backlight@36 {
+ compatible = "ti,lm3509";
+ reg = <0x36>;
+ reset-gpios = <&gpio2 5 GPIO_ACTIVE_LOW>;
+
+ ti,oled-mode;
+ ti,brightness-rate-of-change-us = <52000>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@0 {
+ reg = <0>;
+ led-sources = <0 1>;
+ label = "lcd-backlight";
+ default-brightness = <12>;
+ max-brightness = <31>;
+ };
+ };
+ };
+ - |
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ backlight@36 {
+ compatible = "ti,lm3509";
+ reg = <0x36>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@0 {
+ reg = <0>;
+ default-brightness = <12>;
+ };
+
+ led@1 {
+ reg = <1>;
+ default-brightness = <15>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/leds/leds-lp55xx.yaml b/Documentation/devicetree/bindings/leds/leds-lp55xx.yaml
index e9d4514d0166..fe8aaecf3010 100644
--- a/Documentation/devicetree/bindings/leds/leds-lp55xx.yaml
+++ b/Documentation/devicetree/bindings/leds/leds-lp55xx.yaml
@@ -28,6 +28,7 @@ properties:
- national,lp5523
- ti,lp55231
- ti,lp5562
+ - ti,lp5569
- ti,lp8501
reg:
@@ -151,6 +152,16 @@ patternProperties:
$ref: /schemas/types.yaml#/definitions/string
description: name of channel
+if:
+ not:
+ properties:
+ compatible:
+ contains:
+ const: ti,lp8501
+then:
+ properties:
+ pwr-sel: false
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/leds/silergy,sy7802.yaml b/Documentation/devicetree/bindings/leds/silergy,sy7802.yaml
new file mode 100644
index 000000000000..46b8e5452b62
--- /dev/null
+++ b/Documentation/devicetree/bindings/leds/silergy,sy7802.yaml
@@ -0,0 +1,100 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/leds/silergy,sy7802.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Silergy SY7802 1800mA Boost Charge Pump LED Driver
+
+maintainers:
+ - André Apitzsch <git@apitzsch.eu>
+
+description: |
+ The SY7802 is a current-regulated charge pump which can regulate two current
+ levels for Flash and Torch modes.
+
+ The SY7802 is a high-current synchronous boost converter with 2-channel
+ high side current sources. Each channel is able to deliver 900mA current.
+
+properties:
+ compatible:
+ enum:
+ - silergy,sy7802
+
+ reg:
+ maxItems: 1
+
+ enable-gpios:
+ maxItems: 1
+ description: A connection to the 'EN' pin.
+
+ flash-gpios:
+ maxItems: 1
+ description: A connection to the 'FLEN' pin.
+
+ vin-supply:
+ description: Regulator providing power to the 'VIN' pin.
+
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 0
+
+patternProperties:
+ "^led@[0-1]$":
+ type: object
+ $ref: common.yaml#
+ unevaluatedProperties: false
+
+ properties:
+ reg:
+ description: Index of the LED.
+ minimum: 0
+ maximum: 1
+
+ led-sources:
+ minItems: 1
+ maxItems: 2
+ items:
+ minimum: 0
+ maximum: 1
+
+ required:
+ - reg
+ - led-sources
+
+required:
+ - compatible
+ - reg
+ - "#address-cells"
+ - "#size-cells"
+ - enable-gpios
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ #include <dt-bindings/leds/common.h>
+
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ flash-led-controller@53 {
+ compatible = "silergy,sy7802";
+ reg = <0x53>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ enable-gpios = <&tlmm 16 GPIO_ACTIVE_HIGH>;
+
+ led@0 {
+ reg = <0>;
+ function = LED_FUNCTION_FLASH;
+ color = <LED_COLOR_ID_WHITE>;
+ led-sources = <0>, <1>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/memory-controllers/fsl/fsl,ifc.yaml b/Documentation/devicetree/bindings/memory-controllers/fsl/fsl,ifc.yaml
index 3be1db30bf41..d1c3421bee10 100644
--- a/Documentation/devicetree/bindings/memory-controllers/fsl/fsl,ifc.yaml
+++ b/Documentation/devicetree/bindings/memory-controllers/fsl/fsl,ifc.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: FSL/NXP Integrated Flash Controller
maintainers:
- - Li Yang <leoyang.li@nxp.com>
+ - Shawn Guo <shawnguo@kernel.org>
description: |
NXP's integrated flash controller (IFC) is an advanced version of the
diff --git a/Documentation/devicetree/bindings/mfd/marvell,88pm886-a1.yaml b/Documentation/devicetree/bindings/mfd/marvell,88pm886-a1.yaml
new file mode 100644
index 000000000000..d6a71c912b76
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/marvell,88pm886-a1.yaml
@@ -0,0 +1,76 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mfd/marvell,88pm886-a1.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Marvell 88PM886 PMIC core
+
+maintainers:
+ - Karel Balej <balejk@matfyz.cz>
+
+description:
+ Marvell 88PM886 is a PMIC providing several functions such as onkey,
+ regulators or battery and charger.
+
+properties:
+ compatible:
+ const: marvell,88pm886-a1
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ wakeup-source: true
+
+ regulators:
+ type: object
+ additionalProperties: false
+ patternProperties:
+ "^(ldo(1[0-6]|[1-9])|buck[1-5])$":
+ type: object
+ $ref: /schemas/regulator/regulator.yaml#
+ description: LDO or buck regulator.
+ unevaluatedProperties: false
+
+required:
+ - compatible
+ - reg
+ - interrupts
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ pmic@30 {
+ compatible = "marvell,88pm886-a1";
+ reg = <0x30>;
+ interrupts = <0 4 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&gic>;
+ wakeup-source;
+
+ regulators {
+ ldo2: ldo2 {
+ regulator-min-microvolt = <3100000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ ldo15: ldo15 {
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ buck2: buck2 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ };
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/mfd/mediatek,mt8195-scpsys.yaml b/Documentation/devicetree/bindings/mfd/mediatek,mt8195-scpsys.yaml
index c8c4812fffe2..768390b92682 100644
--- a/Documentation/devicetree/bindings/mfd/mediatek,mt8195-scpsys.yaml
+++ b/Documentation/devicetree/bindings/mfd/mediatek,mt8195-scpsys.yaml
@@ -22,8 +22,10 @@ properties:
- mediatek,mt8173-scpsys
- mediatek,mt8183-scpsys
- mediatek,mt8186-scpsys
+ - mediatek,mt8188-scpsys
- mediatek,mt8192-scpsys
- mediatek,mt8195-scpsys
+ - mediatek,mt8365-scpsys
- const: syscon
- const: simple-mfd
diff --git a/Documentation/devicetree/bindings/mfd/mfd.txt b/Documentation/devicetree/bindings/mfd/mfd.txt
index 336c0495c8a3..b938fa26d2ce 100644
--- a/Documentation/devicetree/bindings/mfd/mfd.txt
+++ b/Documentation/devicetree/bindings/mfd/mfd.txt
@@ -17,13 +17,14 @@ A typical MFD can be:
Optional properties:
-- compatible : "simple-mfd" - this signifies that the operating system should
- consider all subnodes of the MFD device as separate devices akin to how
- "simple-bus" indicates when to see subnodes as children for a simple
- memory-mapped bus. For more complex devices, when the nexus driver has to
- probe registers to figure out what child devices exist etc, this should not
- be used. In the latter case the child devices will be determined by the
- operating system.
+- compatible : "simple-mfd" - this signifies that the operating system
+ should consider all subnodes of the MFD device as separate and independent
+ devices, so not needing any resources to be provided by the parent device.
+ Similarly to how "simple-bus" indicates when to see subnodes as children for
+ a simple memory-mapped bus.
+ For more complex devices, when the nexus driver has to probe registers to
+ figure out what child devices exist etc, this should not be used. In the
+ latter case the child devices will be determined by the operating system.
- ranges: Describes the address mapping relationship to the parent. Should set
the child's base address to 0, the physical address within parent's address
diff --git a/Documentation/devicetree/bindings/mfd/qcom,pm8008.yaml b/Documentation/devicetree/bindings/mfd/qcom,pm8008.yaml
index 0c75d8bde568..0c6e1870db1d 100644
--- a/Documentation/devicetree/bindings/mfd/qcom,pm8008.yaml
+++ b/Documentation/devicetree/bindings/mfd/qcom,pm8008.yaml
@@ -19,110 +19,136 @@ properties:
const: qcom,pm8008
reg:
- description:
- I2C slave address.
-
maxItems: 1
interrupts:
maxItems: 1
- description: Parent interrupt.
+ reset-gpios:
+ maxItems: 1
+
+ vdd-l1-l2-supply: true
+ vdd-l3-l4-supply: true
+ vdd-l5-supply: true
+ vdd-l6-supply: true
+ vdd-l7-supply: true
- "#interrupt-cells":
+ gpio-controller: true
+
+ "#gpio-cells":
const: 2
- description: |
- The first cell is the IRQ number, the second cell is the IRQ trigger
- flag. All interrupts are listed in include/dt-bindings/mfd/qcom-pm8008.h.
+ gpio-ranges:
+ maxItems: 1
interrupt-controller: true
- "#address-cells":
- const: 1
+ "#interrupt-cells":
+ const: 2
- "#size-cells":
+ "#thermal-sensor-cells":
const: 0
-patternProperties:
- "^gpio@[0-9a-f]+$":
+ pinctrl:
type: object
+ additionalProperties: false
+ patternProperties:
+ "-state$":
+ type: object
- description: |
- The GPIO peripheral. This node may be specified twice, one for each GPIO.
-
- properties:
- compatible:
- items:
- - const: qcom,pm8008-gpio
- - const: qcom,spmi-gpio
-
- reg:
- description: Peripheral address of one of the two GPIO peripherals.
- maxItems: 1
-
- gpio-controller: true
-
- gpio-ranges:
- maxItems: 1
+ allOf:
+ - $ref: /schemas/pinctrl/pinmux-node.yaml
+ - $ref: /schemas/pinctrl/pincfg-node.yaml
- interrupt-controller: true
+ properties:
+ pins:
+ items:
+ pattern: "^gpio[12]$"
- "#interrupt-cells":
- const: 2
+ function:
+ items:
+ - enum:
+ - normal
- "#gpio-cells":
- const: 2
+ required:
+ - pins
+ - function
- required:
- - compatible
- - reg
- - gpio-controller
- - interrupt-controller
- - "#gpio-cells"
- - gpio-ranges
- - "#interrupt-cells"
+ additionalProperties: false
+ regulators:
+ type: object
additionalProperties: false
+ patternProperties:
+ "^ldo[1-7]$":
+ type: object
+ $ref: /schemas/regulator/regulator.yaml#
+ unevaluatedProperties: false
required:
- compatible
- reg
- interrupts
- - "#address-cells"
- - "#size-cells"
+ - vdd-l1-l2-supply
+ - vdd-l3-l4-supply
+ - vdd-l5-supply
+ - vdd-l6-supply
+ - vdd-l7-supply
+ - gpio-controller
+ - "#gpio-cells"
+ - gpio-ranges
+ - interrupt-controller
- "#interrupt-cells"
+ - "#thermal-sensor-cells"
additionalProperties: false
examples:
- |
- #include <dt-bindings/mfd/qcom-pm8008.h>
+ #include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/interrupt-controller/irq.h>
i2c {
#address-cells = <1>;
#size-cells = <0>;
- pmic@8 {
+ pm8008: pmic@8 {
compatible = "qcom,pm8008";
reg = <0x8>;
- #address-cells = <1>;
- #size-cells = <0>;
- interrupt-controller;
- #interrupt-cells = <2>;
interrupt-parent = <&tlmm>;
interrupts = <32 IRQ_TYPE_EDGE_RISING>;
- pm8008_gpios: gpio@c000 {
- compatible = "qcom,pm8008-gpio", "qcom,spmi-gpio";
- reg = <0xc000>;
- gpio-controller;
- gpio-ranges = <&pm8008_gpios 0 0 2>;
- #gpio-cells = <2>;
- interrupt-controller;
- #interrupt-cells = <2>;
+ reset-gpios = <&tlmm 42 GPIO_ACTIVE_LOW>;
+
+ vdd-l1-l2-supply = <&vreg_s8b_1p2>;
+ vdd-l3-l4-supply = <&vreg_s1b_1p8>;
+ vdd-l5-supply = <&vreg_bob>;
+ vdd-l6-supply = <&vreg_bob>;
+ vdd-l7-supply = <&vreg_bob>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&pm8008 0 0 2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ #thermal-sensor-cells = <0>;
+
+ pinctrl {
+ gpio-keys-state {
+ pins = "gpio1";
+ function = "normal";
+ };
+ };
+
+ regulators {
+ ldo1 {
+ regulator-name = "vreg_l1";
+ regulator-min-microvolt = <950000>;
+ regulator-max-microvolt = <1300000>;
+ };
};
};
};
diff --git a/Documentation/devicetree/bindings/mfd/qcom,spmi-pmic.yaml b/Documentation/devicetree/bindings/mfd/qcom,spmi-pmic.yaml
index b7f01cbb8fff..a2b2fbf77d5c 100644
--- a/Documentation/devicetree/bindings/mfd/qcom,spmi-pmic.yaml
+++ b/Documentation/devicetree/bindings/mfd/qcom,spmi-pmic.yaml
@@ -75,6 +75,7 @@ properties:
- qcom,pma8084
- qcom,pmc8180
- qcom,pmc8180c
+ - qcom,pmc8380
- qcom,pmd9635
- qcom,pmi632
- qcom,pmi8950
@@ -95,6 +96,7 @@ properties:
- qcom,pmx65
- qcom,pmx75
- qcom,smb2351
+ - qcom,smb2360
- const: qcom,spmi-pmic
reg:
diff --git a/Documentation/devicetree/bindings/mfd/rockchip,rk809.yaml b/Documentation/devicetree/bindings/mfd/rockchip,rk809.yaml
deleted file mode 100644
index 839c0521f1e5..000000000000
--- a/Documentation/devicetree/bindings/mfd/rockchip,rk809.yaml
+++ /dev/null
@@ -1,288 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
-%YAML 1.2
----
-$id: http://devicetree.org/schemas/mfd/rockchip,rk809.yaml#
-$schema: http://devicetree.org/meta-schemas/core.yaml#
-
-title: RK809 Power Management Integrated Circuit
-
-maintainers:
- - Chris Zhong <zyw@rock-chips.com>
- - Zhang Qing <zhangqing@rock-chips.com>
-
-description: |
- Rockchip RK809 series PMIC. This device consists of an i2c controlled MFD
- that includes regulators, an RTC, and power button.
-
-properties:
- compatible:
- enum:
- - rockchip,rk809
-
- reg:
- maxItems: 1
-
- interrupts:
- maxItems: 1
-
- '#clock-cells':
- description: |
- See <dt-bindings/clock/rockchip,rk808.h> for clock IDs.
- minimum: 0
- maximum: 1
-
- clock-output-names:
- description:
- From common clock binding to override the default output clock name.
-
- rockchip,system-power-controller:
- type: boolean
- deprecated: true
- description:
- Telling whether or not this PMIC is controlling the system power.
-
- system-power-controller: true
-
- wakeup-source:
- type: boolean
- description:
- Device can be used as a wakeup source.
-
- vcc1-supply:
- description:
- The input supply for DCDC_REG1.
-
- vcc2-supply:
- description:
- The input supply for DCDC_REG2.
-
- vcc3-supply:
- description:
- The input supply for DCDC_REG3.
-
- vcc4-supply:
- description:
- The input supply for DCDC_REG4.
-
- vcc5-supply:
- description:
- The input supply for LDO_REG1, LDO_REG2, and LDO_REG3.
-
- vcc6-supply:
- description:
- The input supply for LDO_REG4, LDO_REG5, and LDO_REG6.
-
- vcc7-supply:
- description:
- The input supply for LDO_REG7, LDO_REG8, and LDO_REG9.
-
- vcc8-supply:
- description:
- The input supply for SWITCH_REG1.
-
- vcc9-supply:
- description:
- The input supply for DCDC_REG5 and SWITCH_REG2.
-
- regulators:
- type: object
- patternProperties:
- "^(LDO_REG[1-9]|DCDC_REG[1-5]|SWITCH_REG[1-2])$":
- type: object
- $ref: /schemas/regulator/regulator.yaml#
- unevaluatedProperties: false
- unevaluatedProperties: false
-
-allOf:
- - if:
- properties:
- '#clock-cells':
- const: 0
-
- then:
- properties:
- clock-output-names:
- maxItems: 1
-
- else:
- properties:
- clock-output-names:
- maxItems: 2
-
-required:
- - compatible
- - reg
- - interrupts
- - "#clock-cells"
-
-additionalProperties: false
-
-examples:
- - |
- #include <dt-bindings/pinctrl/rockchip.h>
- #include <dt-bindings/interrupt-controller/irq.h>
- #include <dt-bindings/gpio/gpio.h>
- i2c {
- #address-cells = <1>;
- #size-cells = <0>;
-
- rk808: pmic@1b {
- compatible = "rockchip,rk808";
- reg = <0x1b>;
- #clock-cells = <1>;
- clock-output-names = "xin32k", "rk808-clkout2";
- interrupt-parent = <&gpio3>;
- interrupts = <10 IRQ_TYPE_LEVEL_LOW>;
- pinctrl-names = "default";
- pinctrl-0 = <&pmic_int_l_pin>;
- rockchip,system-power-controller;
- wakeup-source;
-
- vcc1-supply = <&vcc_sysin>;
- vcc2-supply = <&vcc_sysin>;
- vcc3-supply = <&vcc_sysin>;
- vcc4-supply = <&vcc_sysin>;
- vcc6-supply = <&vcc_sysin>;
- vcc7-supply = <&vcc_sysin>;
- vcc8-supply = <&vcc3v3_sys>;
- vcc9-supply = <&vcc_sysin>;
- vcc10-supply = <&vcc_sysin>;
- vcc11-supply = <&vcc_sysin>;
- vcc12-supply = <&vcc3v3_sys>;
-
- regulators {
- vdd_center: DCDC_REG1 {
- regulator-name = "vdd_center";
- regulator-always-on;
- regulator-boot-on;
- regulator-min-microvolt = <750000>;
- regulator-max-microvolt = <1350000>;
- regulator-ramp-delay = <6001>;
- regulator-state-mem {
- regulator-off-in-suspend;
- };
- };
-
- vdd_cpu_l: DCDC_REG2 {
- regulator-name = "vdd_cpu_l";
- regulator-always-on;
- regulator-boot-on;
- regulator-min-microvolt = <750000>;
- regulator-max-microvolt = <1350000>;
- regulator-ramp-delay = <6001>;
- regulator-state-mem {
- regulator-off-in-suspend;
- };
- };
-
- vcc_ddr: DCDC_REG3 {
- regulator-name = "vcc_ddr";
- regulator-always-on;
- regulator-boot-on;
- regulator-state-mem {
- regulator-on-in-suspend;
- };
- };
-
- vcc_1v8: vcc_wl: DCDC_REG4 {
- regulator-name = "vcc_1v8";
- regulator-always-on;
- regulator-boot-on;
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- regulator-state-mem {
- regulator-on-in-suspend;
- regulator-suspend-microvolt = <1800000>;
- };
- };
-
- vcc1v8_pmupll: LDO_REG3 {
- regulator-name = "vcc1v8_pmupll";
- regulator-always-on;
- regulator-boot-on;
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- regulator-state-mem {
- regulator-on-in-suspend;
- regulator-suspend-microvolt = <1800000>;
- };
- };
-
- vcc_sdio: LDO_REG4 {
- regulator-name = "vcc_sdio";
- regulator-always-on;
- regulator-boot-on;
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <3000000>;
- regulator-state-mem {
- regulator-on-in-suspend;
- regulator-suspend-microvolt = <3000000>;
- };
- };
-
- vcca3v0_codec: LDO_REG5 {
- regulator-name = "vcca3v0_codec";
- regulator-always-on;
- regulator-boot-on;
- regulator-min-microvolt = <3000000>;
- regulator-max-microvolt = <3000000>;
- regulator-state-mem {
- regulator-off-in-suspend;
- };
- };
-
- vcc_1v5: LDO_REG6 {
- regulator-name = "vcc_1v5";
- regulator-always-on;
- regulator-boot-on;
- regulator-min-microvolt = <1500000>;
- regulator-max-microvolt = <1500000>;
- regulator-state-mem {
- regulator-on-in-suspend;
- regulator-suspend-microvolt = <1500000>;
- };
- };
-
- vcca1v8_codec: LDO_REG7 {
- regulator-name = "vcca1v8_codec";
- regulator-always-on;
- regulator-boot-on;
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- regulator-state-mem {
- regulator-off-in-suspend;
- };
- };
-
- vcc_3v0: LDO_REG8 {
- regulator-name = "vcc_3v0";
- regulator-always-on;
- regulator-boot-on;
- regulator-min-microvolt = <3000000>;
- regulator-max-microvolt = <3000000>;
- regulator-state-mem {
- regulator-on-in-suspend;
- regulator-suspend-microvolt = <3000000>;
- };
- };
-
- vcc3v3_s3: SWITCH_REG1 {
- regulator-name = "vcc3v3_s3";
- regulator-always-on;
- regulator-boot-on;
- regulator-state-mem {
- regulator-off-in-suspend;
- };
- };
-
- vcc3v3_s0: SWITCH_REG2 {
- regulator-name = "vcc3v3_s0";
- regulator-always-on;
- regulator-boot-on;
- regulator-state-mem {
- regulator-off-in-suspend;
- };
- };
- };
- };
- };
diff --git a/Documentation/devicetree/bindings/mfd/rockchip,rk817.yaml b/Documentation/devicetree/bindings/mfd/rockchip,rk817.yaml
index 8c2fd0fabb92..2cb6d176a84c 100644
--- a/Documentation/devicetree/bindings/mfd/rockchip,rk817.yaml
+++ b/Documentation/devicetree/bindings/mfd/rockchip,rk817.yaml
@@ -4,20 +4,21 @@
$id: http://devicetree.org/schemas/mfd/rockchip,rk817.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
-title: RK817 Power Management Integrated Circuit
+title: RK809/RK817 Power Management Integrated Circuit
maintainers:
- Chris Zhong <zyw@rock-chips.com>
- Zhang Qing <zhangqing@rock-chips.com>
description: |
- Rockchip RK817 series PMIC. This device consists of an i2c controlled MFD
- that includes regulators, an RTC, a power button, an audio codec, and a
- battery charger manager.
+ Rockchip RK809/RK817 series PMIC. This device consists of an i2c controlled
+ MFD that includes regulators, an RTC, a power button and an audio codec.
+ The RK817 variant also provides a battery charger manager.
properties:
compatible:
enum:
+ - rockchip,rk809
- rockchip,rk817
reg:
@@ -32,6 +33,13 @@ properties:
minimum: 0
maximum: 1
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ items:
+ - const: mclk
+
clock-output-names:
description:
From common clock binding to override the default output clock name.
@@ -42,6 +50,9 @@ properties:
description:
Telling whether or not this PMIC is controlling the system power.
+ '#sound-dai-cells':
+ const: 0
+
system-power-controller: true
wakeup-source:
@@ -79,41 +90,22 @@ properties:
vcc8-supply:
description:
- The input supply for BOOST.
+ The input supply for BOOST on RK817, or for SWITCH_REG2 on RK809.
vcc9-supply:
description:
- The input supply for OTG_SWITCH.
+ The input supply for OTG_SWITCH on RK817,
+ or for DCDC_REG5 and SWITCH_REG1 on RK809.
regulators:
type: object
patternProperties:
- "^(LDO_REG[1-9]|DCDC_REG[1-4]|BOOST|OTG_SWITCH)$":
- type: object
+ "^(LDO_REG[1-9]|DCDC_REG[1-5]|BOOST|OTG_SWITCH|SWITCH_REG[1-2])$":
+ $ref: /schemas/regulator/regulator.yaml
unevaluatedProperties: false
- $ref: /schemas/regulator/regulator.yaml#
- unevaluatedProperties: false
-
- clocks:
- description:
- The input clock for the audio codec.
-
- clock-names:
- description:
- The clock name for the codec clock.
- items:
- - const: mclk
-
- '#sound-dai-cells':
- description:
- Needed for the interpretation of sound dais.
- const: 0
+ additionalProperties: false
codec:
- description: |
- The child node for the codec to hold additional properties. If no
- additional properties are required for the codec, this node can be
- omitted.
type: object
additionalProperties: false
properties:
@@ -123,9 +115,6 @@ properties:
Describes if the microphone uses differential mode.
charger:
- description: |
- The child node for the charger to hold additional properties. If a
- battery is not in use, this node can be omitted.
type: object
$ref: /schemas/power/supply/power-supply.yaml
@@ -168,6 +157,7 @@ properties:
additionalProperties: false
allOf:
+ - $ref: /schemas/sound/dai-common.yaml#
- if:
properties:
'#clock-cells':
@@ -183,6 +173,22 @@ allOf:
clock-output-names:
maxItems: 2
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: rockchip,rk817
+ then:
+ properties:
+ regulators:
+ patternProperties:
+ "^(DCDC_REG5|SWITCH_REG[1-2])$": false
+ else:
+ properties:
+ regulators:
+ patternProperties:
+ "^(BOOST|OTG_SWITCH)$": false
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/mfd/rohm,bd96801-pmic.yaml b/Documentation/devicetree/bindings/mfd/rohm,bd96801-pmic.yaml
new file mode 100644
index 000000000000..d381125a0a15
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/rohm,bd96801-pmic.yaml
@@ -0,0 +1,173 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mfd/rohm,bd96801-pmic.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ROHM BD96801 Scalable Power Management Integrated Circuit
+
+maintainers:
+ - Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>
+
+description:
+ BD96801 is an automotive grade single-chip power management IC.
+ It integrates 4 buck converters and 3 LDOs with safety features like
+ over-/under voltage and over current detection and a watchdog.
+
+properties:
+ compatible:
+ const: rohm,bd96801
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ description:
+ The PMIC provides intb and errb IRQ lines. The errb IRQ line is used
+ for fatal IRQs which will cause the PMIC to shut down power outputs.
+ In many systems this will shut down the SoC contolling the PMIC and
+ connecting/handling the errb can be omitted. However, there are cases
+ where the SoC is not powered by the PMIC or has a short time backup
+ energy to handle shutdown of critical hardware. In that case it may be
+ useful to connect the errb and handle errb events.
+ minItems: 1
+ maxItems: 2
+
+ interrupt-names:
+ minItems: 1
+ items:
+ - enum: [intb, errb]
+ - const: errb
+
+ rohm,hw-timeout-ms:
+ description:
+ Watchdog timeout value(s). First walue is timeout limit. Second value is
+ optional value for 'too early' watchdog ping if window timeout mode is
+ to be used.
+ minItems: 1
+ maxItems: 2
+
+ rohm,wdg-action:
+ description:
+ Whether the watchdog failure must turn off the regulator power outputs or
+ just toggle the INTB line.
+ enum:
+ - prstb
+ - intb-only
+
+ timeout-sec:
+ maxItems: 2
+
+ regulators:
+ $ref: /schemas/regulator/rohm,bd96801-regulator.yaml
+ description:
+ List of child nodes that specify the regulators.
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - interrupt-names
+ - regulators
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/leds/common.h>
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ pmic: pmic@60 {
+ reg = <0x60>;
+ compatible = "rohm,bd96801";
+ interrupt-parent = <&gpio1>;
+ interrupts = <29 IRQ_TYPE_LEVEL_LOW>, <6 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-names = "intb", "errb";
+
+ regulators {
+ buck1 {
+ regulator-name = "buck1";
+ regulator-ramp-delay = <1250>;
+ /* 0.5V min INITIAL - 150 mV tune */
+ regulator-min-microvolt = <350000>;
+ /* 3.3V + 150mV tune */
+ regulator-max-microvolt = <3450000>;
+
+ /* These can be set only when PMIC is in STBY */
+ rohm,initial-voltage-microvolt = <500000>;
+ regulator-ov-error-microvolt = <230000>;
+ regulator-uv-error-microvolt = <230000>;
+ regulator-temp-protection-kelvin = <1>;
+ regulator-temp-warn-kelvin = <0>;
+ };
+ buck2 {
+ regulator-name = "buck2";
+ regulator-min-microvolt = <350000>;
+ regulator-max-microvolt = <3450000>;
+
+ rohm,initial-voltage-microvolt = <3000000>;
+ regulator-ov-error-microvolt = <18000>;
+ regulator-uv-error-microvolt = <18000>;
+ regulator-temp-protection-kelvin = <1>;
+ regulator-temp-warn-kelvin = <1>;
+ };
+ buck3 {
+ regulator-name = "buck3";
+ regulator-min-microvolt = <350000>;
+ regulator-max-microvolt = <3450000>;
+
+ rohm,initial-voltage-microvolt = <600000>;
+ regulator-ov-warn-microvolt = <18000>;
+ regulator-uv-warn-microvolt = <18000>;
+ regulator-temp-protection-kelvin = <1>;
+ regulator-temp-error-kelvin = <0>;
+ };
+ buck4 {
+ regulator-name = "buck4";
+ regulator-min-microvolt = <350000>;
+ regulator-max-microvolt = <3450000>;
+
+ rohm,initial-voltage-microvolt = <600000>;
+ regulator-ov-warn-microvolt = <18000>;
+ regulator-uv-warn-microvolt = <18000>;
+ regulator-temp-protection-kelvin = <1>;
+ regulator-temp-error-kelvin = <0>;
+ };
+ ldo5 {
+ regulator-name = "ldo5";
+ regulator-min-microvolt = <300000>;
+ regulator-max-microvolt = <3300000>;
+
+ rohm,initial-voltage-microvolt = <500000>;
+ regulator-ov-error-microvolt = <36000>;
+ regulator-uv-error-microvolt = <34000>;
+ regulator-temp-protection-kelvin = <1>;
+ regulator-temp-warn-kelvin = <0>;
+ };
+ ldo6 {
+ regulator-name = "ldo6";
+ regulator-min-microvolt = <300000>;
+ regulator-max-microvolt = <3300000>;
+
+ rohm,initial-voltage-microvolt = <300000>;
+ regulator-ov-error-microvolt = <36000>;
+ regulator-uv-error-microvolt = <34000>;
+ regulator-temp-protection-kelvin = <1>;
+ regulator-temp-warn-kelvin = <0>;
+ };
+ ldo7 {
+ regulator-name = "ldo7";
+ regulator-min-microvolt = <300000>;
+ regulator-max-microvolt = <3300000>;
+
+ rohm,initial-voltage-microvolt = <500000>;
+ regulator-ov-error-microvolt = <36000>;
+ regulator-uv-error-microvolt = <34000>;
+ regulator-temp-protection-kelvin = <1>;
+ regulator-temp-warn-kelvin = <0>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/mfd/syscon-common.yaml b/Documentation/devicetree/bindings/mfd/syscon-common.yaml
new file mode 100644
index 000000000000..451cbad467a3
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/syscon-common.yaml
@@ -0,0 +1,71 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mfd/syscon-common.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: System Controller Registers R/W Common Properties
+
+description:
+ System controller node represents a register region containing a set
+ of miscellaneous registers. The registers are not cohesive enough to
+ represent as any specific type of device. The typical use-case is
+ for some other node's driver, or platform-specific code, to acquire
+ a reference to the syscon node (e.g. by phandle, node path, or
+ search using a specific compatible value), interrogate the node (or
+ associated OS driver) to determine the location of the registers,
+ and access the registers directly.
+
+maintainers:
+ - Lee Jones <lee@kernel.org>
+
+select:
+ properties:
+ compatible:
+ contains:
+ const: syscon
+
+ required:
+ - compatible
+
+properties:
+ compatible:
+ contains:
+ const: syscon
+ minItems: 2
+ maxItems: 5 # Should be enough
+
+ reg:
+ maxItems: 1
+
+ reg-io-width:
+ description:
+ The size (in bytes) of the IO accesses that should be performed
+ on the device.
+ enum: [1, 2, 4, 8]
+
+required:
+ - compatible
+ - reg
+
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: simple-mfd
+ then:
+ properties:
+ compatible:
+ minItems: 3
+ maxItems: 5
+
+additionalProperties: true
+
+examples:
+ - |
+ syscon: syscon@1c00000 {
+ compatible = "allwinner,sun8i-h3-system-controller", "syscon";
+ reg = <0x01c00000 0x1000>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/mfd/syscon.yaml b/Documentation/devicetree/bindings/mfd/syscon.yaml
index 7ed12a938baa..9dc594ea3654 100644
--- a/Documentation/devicetree/bindings/mfd/syscon.yaml
+++ b/Documentation/devicetree/bindings/mfd/syscon.yaml
@@ -4,7 +4,7 @@
$id: http://devicetree.org/schemas/mfd/syscon.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
-title: System Controller Registers R/W
+title: System Controller Devices
description: |
System controller node represents a register region containing a set
@@ -19,121 +19,213 @@ description: |
maintainers:
- Lee Jones <lee@kernel.org>
+# Need a select with all compatibles listed for compatibility with older
+# dtschema (<2024.02), so this will not be selected for other schemas having
+# syscon fallback.
select:
properties:
compatible:
contains:
enum:
- - syscon
-
+ - al,alpine-sysfabric-servic
+ - allwinner,sun8i-a83t-system-controller
+ - allwinner,sun8i-h3-system-controller
+ - allwinner,sun8i-v3s-system-controller
+ - allwinner,sun50i-a64-system-controller
+ - altr,l3regs
+ - altr,sdr-ctl
+ - amd,pensando-elba-syscon
+ - amlogic,meson-mx-assist
+ - amlogic,meson-mx-bootrom
+ - amlogic,meson8-analog-top
+ - amlogic,meson8b-analog-top
+ - amlogic,meson8-pmu
+ - amlogic,meson8b-pmu
+ - apm,merlin-poweroff-mailbox
+ - apm,mustang-poweroff-mailbox
+ - apm,xgene-csw
+ - apm,xgene-efuse
+ - apm,xgene-mcb
+ - apm,xgene-rb
+ - apm,xgene-scu
+ - atmel,sama5d2-sfrbu
+ - atmel,sama5d3-nfc-io
+ - atmel,sama5d3-sfrbu
+ - atmel,sama5d4-sfrbu
+ - axis,artpec6-syscon
+ - brcm,cru-clkset
+ - brcm,sr-cdru
+ - brcm,sr-mhb
+ - cirrus,ep7209-syscon1
+ - cirrus,ep7209-syscon2
+ - cirrus,ep7209-syscon3
+ - cnxt,cx92755-uc
+ - freecom,fsg-cs2-system-controller
+ - fsl,imx93-aonmix-ns-syscfg
+ - fsl,imx93-wakeupmix-syscfg
+ - fsl,ls1088a-reset
+ - fsl,vf610-anatop
+ - fsl,vf610-mscm-cpucfg
+ - hisilicon,dsa-subctrl
+ - hisilicon,hi6220-sramctrl
+ - hisilicon,hip04-ppe
+ - hisilicon,pcie-sas-subctrl
+ - hisilicon,peri-subctrl
+ - hpe,gxp-sysreg
+ - loongson,ls1b-syscon
+ - loongson,ls1c-syscon
+ - lsi,axxia-syscon
+ - marvell,armada-3700-cpu-misc
+ - marvell,armada-3700-nb-pm
+ - marvell,armada-3700-avs
+ - marvell,armada-3700-usb2-host-misc
+ - marvell,dove-global-config
+ - mediatek,mt2701-pctl-a-syscfg
+ - mediatek,mt2712-pctl-a-syscfg
+ - mediatek,mt6397-pctl-pmic-syscfg
+ - mediatek,mt8135-pctl-a-syscfg
+ - mediatek,mt8135-pctl-b-syscfg
+ - mediatek,mt8173-pctl-a-syscfg
+ - mediatek,mt8365-syscfg
+ - microchip,lan966x-cpu-syscon
+ - microchip,sam9x60-sfr
+ - microchip,sama7g5-ddr3phy
+ - mscc,ocelot-cpu-syscon
+ - mstar,msc313-pmsleep
+ - nuvoton,ma35d1-sys
+ - nuvoton,wpcm450-shm
+ - rockchip,px30-qos
+ - rockchip,rk3036-qos
+ - rockchip,rk3066-qos
+ - rockchip,rk3128-qos
+ - rockchip,rk3228-qos
+ - rockchip,rk3288-qos
+ - rockchip,rk3368-qos
+ - rockchip,rk3399-qos
+ - rockchip,rk3568-qos
+ - rockchip,rk3588-qos
+ - rockchip,rv1126-qos
+ - st,spear1340-misc
+ - stericsson,nomadik-pmu
+ - starfive,jh7100-sysmain
+ - ti,am62-opp-efuse-table
+ - ti,am62-usb-phy-ctrl
+ - ti,am625-dss-oldi-io-ctrl
+ - ti,am62p-cpsw-mac-efuse
+ - ti,am654-dss-oldi-io-ctrl
+ - ti,j784s4-pcie-ctrl
+ - ti,keystone-pllctrl
required:
- compatible
properties:
compatible:
- anyOf:
- - items:
- - enum:
- - allwinner,sun8i-a83t-system-controller
- - allwinner,sun8i-h3-system-controller
- - allwinner,sun8i-v3s-system-controller
- - allwinner,sun50i-a64-system-controller
- - altr,sdr-ctl
- - amd,pensando-elba-syscon
- - apm,xgene-csw
- - apm,xgene-efuse
- - apm,xgene-mcb
- - apm,xgene-rb
- - apm,xgene-scu
- - brcm,cru-clkset
- - brcm,sr-cdru
- - brcm,sr-mhb
- - freecom,fsg-cs2-system-controller
- - fsl,imx93-aonmix-ns-syscfg
- - fsl,imx93-wakeupmix-syscfg
- - fsl,ls1088a-reset
- - hisilicon,dsa-subctrl
- - hisilicon,hi6220-sramctrl
- - hisilicon,pcie-sas-subctrl
- - hisilicon,peri-subctrl
- - hpe,gxp-sysreg
- - intel,lgm-syscon
- - loongson,ls1b-syscon
- - loongson,ls1c-syscon
- - marvell,armada-3700-cpu-misc
- - marvell,armada-3700-nb-pm
- - marvell,armada-3700-avs
- - marvell,armada-3700-usb2-host-misc
- - mediatek,mt2712-pctl-a-syscfg
- - mediatek,mt6397-pctl-pmic-syscfg
- - mediatek,mt8135-pctl-a-syscfg
- - mediatek,mt8135-pctl-b-syscfg
- - mediatek,mt8173-pctl-a-syscfg
- - mediatek,mt8365-syscfg
- - microchip,lan966x-cpu-syscon
- - microchip,sparx5-cpu-syscon
- - mstar,msc313-pmsleep
- - nuvoton,ma35d1-sys
- - nuvoton,wpcm450-shm
- - rockchip,px30-qos
- - rockchip,rk3036-qos
- - rockchip,rk3066-qos
- - rockchip,rk3128-qos
- - rockchip,rk3228-qos
- - rockchip,rk3288-qos
- - rockchip,rk3368-qos
- - rockchip,rk3399-qos
- - rockchip,rk3568-qos
- - rockchip,rk3588-qos
- - rockchip,rv1126-qos
- - starfive,jh7100-sysmain
- - ti,am62-usb-phy-ctrl
- - ti,am62p-cpsw-mac-efuse
- - ti,am654-dss-oldi-io-ctrl
- - ti,am654-serdes-ctrl
- - ti,j784s4-pcie-ctrl
-
- - const: syscon
-
- - contains:
- const: syscon
- minItems: 2
- maxItems: 5 # Should be enough
+ items:
+ - enum:
+ - al,alpine-sysfabric-service
+ - allwinner,sun8i-a83t-system-controller
+ - allwinner,sun8i-h3-system-controller
+ - allwinner,sun8i-v3s-system-controller
+ - allwinner,sun50i-a64-system-controller
+ - altr,l3regs
+ - altr,sdr-ctl
+ - amd,pensando-elba-syscon
+ - amlogic,meson-mx-assist
+ - amlogic,meson-mx-bootrom
+ - amlogic,meson8-analog-top
+ - amlogic,meson8b-analog-top
+ - amlogic,meson8-pmu
+ - amlogic,meson8b-pmu
+ - apm,merlin-poweroff-mailbox
+ - apm,mustang-poweroff-mailbox
+ - apm,xgene-csw
+ - apm,xgene-efuse
+ - apm,xgene-mcb
+ - apm,xgene-rb
+ - apm,xgene-scu
+ - atmel,sama5d2-sfrbu
+ - atmel,sama5d3-nfc-io
+ - atmel,sama5d3-sfrbu
+ - atmel,sama5d4-sfrbu
+ - axis,artpec6-syscon
+ - brcm,cru-clkset
+ - brcm,sr-cdru
+ - brcm,sr-mhb
+ - cirrus,ep7209-syscon1
+ - cirrus,ep7209-syscon2
+ - cirrus,ep7209-syscon3
+ - cnxt,cx92755-uc
+ - freecom,fsg-cs2-system-controller
+ - fsl,imx93-aonmix-ns-syscfg
+ - fsl,imx93-wakeupmix-syscfg
+ - fsl,ls1088a-reset
+ - fsl,vf610-anatop
+ - fsl,vf610-mscm-cpucfg
+ - hisilicon,dsa-subctrl
+ - hisilicon,hi6220-sramctrl
+ - hisilicon,hip04-ppe
+ - hisilicon,pcie-sas-subctrl
+ - hisilicon,peri-subctrl
+ - hpe,gxp-sysreg
+ - loongson,ls1b-syscon
+ - loongson,ls1c-syscon
+ - lsi,axxia-syscon
+ - marvell,armada-3700-cpu-misc
+ - marvell,armada-3700-nb-pm
+ - marvell,armada-3700-avs
+ - marvell,armada-3700-usb2-host-misc
+ - marvell,dove-global-config
+ - mediatek,mt2701-pctl-a-syscfg
+ - mediatek,mt2712-pctl-a-syscfg
+ - mediatek,mt6397-pctl-pmic-syscfg
+ - mediatek,mt8135-pctl-a-syscfg
+ - mediatek,mt8135-pctl-b-syscfg
+ - mediatek,mt8173-pctl-a-syscfg
+ - mediatek,mt8365-syscfg
+ - microchip,lan966x-cpu-syscon
+ - microchip,sam9x60-sfr
+ - microchip,sama7g5-ddr3phy
+ - mscc,ocelot-cpu-syscon
+ - mstar,msc313-pmsleep
+ - nuvoton,ma35d1-sys
+ - nuvoton,wpcm450-shm
+ - rockchip,px30-qos
+ - rockchip,rk3036-qos
+ - rockchip,rk3066-qos
+ - rockchip,rk3128-qos
+ - rockchip,rk3228-qos
+ - rockchip,rk3288-qos
+ - rockchip,rk3368-qos
+ - rockchip,rk3399-qos
+ - rockchip,rk3568-qos
+ - rockchip,rk3588-qos
+ - rockchip,rv1126-qos
+ - st,spear1340-misc
+ - stericsson,nomadik-pmu
+ - starfive,jh7100-sysmain
+ - ti,am62-opp-efuse-table
+ - ti,am62-usb-phy-ctrl
+ - ti,am625-dss-oldi-io-ctrl
+ - ti,am62p-cpsw-mac-efuse
+ - ti,am654-dss-oldi-io-ctrl
+ - ti,j784s4-pcie-ctrl
+ - ti,keystone-pllctrl
+ - const: syscon
reg:
maxItems: 1
- reg-io-width:
- description: |
- The size (in bytes) of the IO accesses that should be performed
- on the device.
- enum: [1, 2, 4, 8]
-
resets:
maxItems: 1
- hwlocks:
- maxItems: 1
- description:
- Reference to a phandle of a hardware spinlock provider node.
-
required:
- compatible
- reg
allOf:
- - if:
- properties:
- compatible:
- contains:
- const: simple-mfd
- then:
- properties:
- compatible:
- minItems: 3
- maxItems: 5
+ - $ref: syscon-common.yaml#
-additionalProperties: true
+unevaluatedProperties: false
examples:
- |
diff --git a/Documentation/devicetree/bindings/mfd/ti,twl.yaml b/Documentation/devicetree/bindings/mfd/ti,twl.yaml
index c2357fecb56c..e94b0fd7af0f 100644
--- a/Documentation/devicetree/bindings/mfd/ti,twl.yaml
+++ b/Documentation/devicetree/bindings/mfd/ti,twl.yaml
@@ -22,6 +22,32 @@ allOf:
contains:
const: ti,twl4030
then:
+ patternProperties:
+ "^regulator-":
+ properties:
+ compatible:
+ enum:
+ - ti,twl4030-vaux1
+ - ti,twl4030-vaux2
+ - ti,twl4030-vaux3
+ - ti,twl4030-vaux4
+ - ti,twl4030-vmmc1
+ - ti,twl4030-vmmc2
+ - ti,twl4030-vpll1
+ - ti,twl4030-vpll2
+ - ti,twl4030-vsim
+ - ti,twl4030-vdac
+ - ti,twl4030-vintana2
+ - ti,twl4030-vio
+ - ti,twl4030-vdd1
+ - ti,twl4030-vdd2
+ - ti,twl4030-vintana1
+ - ti,twl4030-vintdig
+ - ti,twl4030-vusb1v5
+ - ti,twl4030-vusb1v8
+ - ti,twl4030-vusb3v1
+ ti,retain-on-reset: false
+
properties:
madc:
type: object
@@ -50,13 +76,34 @@ allOf:
properties:
compatible:
const: ti,twl4030-wdt
-
- if:
properties:
compatible:
contains:
const: ti,twl6030
then:
+ patternProperties:
+ "^regulator-":
+ properties:
+ compatible:
+ enum:
+ - ti,twl6030-vaux1
+ - ti,twl6030-vaux2
+ - ti,twl6030-vaux3
+ - ti,twl6030-vmmc
+ - ti,twl6030-vpp
+ - ti,twl6030-vusim
+ - ti,twl6030-vana
+ - ti,twl6030-vcxio
+ - ti,twl6030-vdac
+ - ti,twl6030-vusb
+ - ti,twl6030-v1v8
+ - ti,twl6030-v2v1
+ - ti,twl6030-vdd1
+ - ti,twl6030-vdd2
+ - ti,twl6030-vdd3
+ regulator-initial-mode: false
+
properties:
gpadc:
type: object
@@ -69,6 +116,25 @@ allOf:
contains:
const: ti,twl6032
then:
+ patternProperties:
+ "^regulator-":
+ properties:
+ compatible:
+ enum:
+ - ti,twl6032-ldo1
+ - ti,twl6032-ldo2
+ - ti,twl6032-ldo3
+ - ti,twl6032-ldo4
+ - ti,twl6032-ldo5
+ - ti,twl6032-ldo6
+ - ti,twl6032-ldo7
+ - ti,twl6032-ldoln
+ - ti,twl6032-ldousb
+ - ti,twl6032-smps3
+ - ti,twl6032-smps4
+ - ti,twl6032-vio
+ regulator-initial-mode: false
+
properties:
gpadc:
type: object
@@ -112,6 +178,27 @@ properties:
interrupts:
maxItems: 1
+patternProperties:
+ "^regulator-":
+ type: object
+ unevaluatedProperties: false
+ $ref: /schemas/regulator/regulator.yaml
+ properties:
+ compatible: true
+ regulator-initial-mode:
+ enum:
+ - 0x08 # Sleep mode, the nominal output voltage is maintained
+ # with low power consumption with low load current capability
+ - 0x0e # Active mode, the regulator can deliver its nominal output
+ # voltage with full-load current capability
+ ti,retain-on-reset:
+ description:
+ Does not turn off the supplies during warm
+ reset. Could be needed for VMMC, as TWL6030
+ reset sequence for this signal does not comply
+ with the SD specification.
+ type: boolean
+
unevaluatedProperties: false
required:
@@ -131,9 +218,85 @@ examples:
compatible = "ti,twl6030";
reg = <0x48>;
interrupts = <39>; /* IRQ_SYS_1N cascaded to gic */
+ interrupt-parent = <&gic>;
interrupt-controller;
#interrupt-cells = <1>;
- interrupt-parent = <&gic>;
+
+ gpadc {
+ compatible = "ti,twl6030-gpadc";
+ interrupts = <6>;
+ #io-channel-cells = <1>;
+ };
+
+ rtc {
+ compatible = "ti,twl4030-rtc";
+ interrupts = <8>;
+ };
+
+ regulator-vaux1 {
+ compatible = "ti,twl6030-vaux1";
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <3000000>;
+ };
+
+ regulator-vmmc1 {
+ compatible = "ti,twl6030-vmmc";
+ ti,retain-on-reset;
+ };
};
};
+ - |
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pmic@48 {
+ compatible = "ti,twl4030";
+ reg = <0x48>;
+ interrupts = <7>; /* SYS_NIRQ cascaded to intc */
+ interrupt-parent = <&intc>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ bci {
+ compatible = "ti,twl4030-bci";
+ interrupts = <9>, <2>;
+ bci3v1-supply = <&vusb3v1>;
+ io-channels = <&twl_madc 11>;
+ io-channel-names = "vac";
+ };
+
+ twl_madc: madc {
+ compatible = "ti,twl4030-madc";
+ interrupts = <3>;
+ #io-channel-cells = <1>;
+ };
+
+ pwrbutton {
+ compatible = "ti,twl4030-pwrbutton";
+ interrupts = <8>;
+ };
+
+ rtc {
+ compatible = "ti,twl4030-rtc";
+ interrupts = <11>;
+ };
+
+ regulator-vaux1 {
+ compatible = "ti,twl4030-vaux1";
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-initial-mode = <0xe>;
+ };
+
+ vusb3v1: regulator-vusb3v1 {
+ compatible = "ti,twl4030-vusb3v1";
+ };
+
+ watchdog {
+ compatible = "ti,twl4030-wdt";
+ };
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/mips/mscc.txt b/Documentation/devicetree/bindings/mips/mscc.txt
index cc916eaeed0a..e74165696b76 100644
--- a/Documentation/devicetree/bindings/mips/mscc.txt
+++ b/Documentation/devicetree/bindings/mips/mscc.txt
@@ -25,23 +25,6 @@ Example:
reg = <0x71070000 0x1c>;
};
-
-o CPU system control:
-
-The SoC has a few registers (ICPU_CFG:CPU_SYSTEM_CTRL) handling configuration of
-the CPU: 8 general purpose registers, reset control, CPU en/disabling, CPU
-endianness, CPU bus control, CPU status.
-
-Required properties:
-- compatible: Should be "mscc,ocelot-cpu-syscon", "syscon"
-- reg : Should contain registers location and length
-
-Example:
- syscon@70000000 {
- compatible = "mscc,ocelot-cpu-syscon", "syscon";
- reg = <0x70000000 0x2c>;
- };
-
o HSIO regs:
The SoC has a few registers (HSIO) handling miscellaneous functionalities:
diff --git a/Documentation/devicetree/bindings/mmc/amlogic,meson-gx-mmc.yaml b/Documentation/devicetree/bindings/mmc/amlogic,meson-gx-mmc.yaml
index bc403ae9e5d9..57646575a13f 100644
--- a/Documentation/devicetree/bindings/mmc/amlogic,meson-gx-mmc.yaml
+++ b/Documentation/devicetree/bindings/mmc/amlogic,meson-gx-mmc.yaml
@@ -51,6 +51,9 @@ properties:
set when controller's internal DMA engine cannot access the DRAM memory,
like on the G12A dedicated SDIO controller.
+ power-domains:
+ maxItems: 1
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/mmc/brcm,sdhci-brcmstb.yaml b/Documentation/devicetree/bindings/mmc/brcm,sdhci-brcmstb.yaml
index cbd3d6c6c77f..eee6be7a7867 100644
--- a/Documentation/devicetree/bindings/mmc/brcm,sdhci-brcmstb.yaml
+++ b/Documentation/devicetree/bindings/mmc/brcm,sdhci-brcmstb.yaml
@@ -20,6 +20,7 @@ properties:
- const: brcm,sdhci-brcmstb
- items:
- enum:
+ - brcm,bcm2712-sdhci
- brcm,bcm74165b0-sdhci
- brcm,bcm7445-sdhci
- brcm,bcm7425-sdhci
diff --git a/Documentation/devicetree/bindings/mmc/fsl,esdhc.yaml b/Documentation/devicetree/bindings/mmc/fsl,esdhc.yaml
new file mode 100644
index 000000000000..b86ffb53b18b
--- /dev/null
+++ b/Documentation/devicetree/bindings/mmc/fsl,esdhc.yaml
@@ -0,0 +1,105 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mmc/fsl,esdhc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale Enhanced Secure Digital Host Controller (eSDHC)
+
+description:
+ The Enhanced Secure Digital Host Controller provides an interface
+ for MMC, SD, and SDIO types of memory cards.
+
+maintainers:
+ - Frank Li <Frank.Li@nxp.com>
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - fsl,mpc8536-esdhc
+ - fsl,mpc8378-esdhc
+ - fsl,p2020-esdhc
+ - fsl,p4080-esdhc
+ - fsl,t1040-esdhc
+ - fsl,t4240-esdhc
+ - fsl,ls1012a-esdhc
+ - fsl,ls1028a-esdhc
+ - fsl,ls1088a-esdhc
+ - fsl,ls1043a-esdhc
+ - fsl,ls1046a-esdhc
+ - fsl,ls2080a-esdhc
+ - const: fsl,esdhc
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-frequency:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: specifies eSDHC base clock frequency.
+
+ sdhci,wp-inverted:
+ $ref: /schemas/types.yaml#/definitions/flag
+ deprecated: true
+ description:
+ specifies that eSDHC controller reports
+ inverted write-protect state; New devices should use the generic
+ "wp-inverted" property.
+
+ sdhci,1-bit-only:
+ $ref: /schemas/types.yaml#/definitions/flag
+ deprecated: true
+ description:
+ specifies that a controller can only handle
+ 1-bit data transfers. New devices should use the generic
+ "bus-width = <1>" property.
+
+ sdhci,auto-cmd12:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ specifies that a controller can only handle auto CMD12.
+
+ voltage-ranges:
+ $ref: /schemas/types.yaml#/definitions/uint32-matrix
+ items:
+ items:
+ - description: specifies minimum slot voltage (mV).
+ - description: specifies maximum slot voltage (mV).
+ minItems: 1
+ maxItems: 8
+
+ dma-coherent: true
+
+ little-endian:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ If the host controller is little-endian mode, specify
+ this property. The default endian mode is big-endian.
+
+required:
+ - compatible
+ - reg
+ - interrupts
+
+allOf:
+ - $ref: sdhci-common.yaml#
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ mmc@2e000 {
+ compatible = "fsl,mpc8378-esdhc", "fsl,esdhc";
+ reg = <0x2e000 0x1000>;
+ interrupts = <42 0x8>;
+ interrupt-parent = <&ipic>;
+ /* Filled in by U-Boot */
+ clock-frequency = <100000000>;
+ voltage-ranges = <3300 3300>;
+ };
diff --git a/Documentation/devicetree/bindings/mmc/fsl-esdhc.txt b/Documentation/devicetree/bindings/mmc/fsl-esdhc.txt
deleted file mode 100644
index edb8cadb9541..000000000000
--- a/Documentation/devicetree/bindings/mmc/fsl-esdhc.txt
+++ /dev/null
@@ -1,52 +0,0 @@
-* Freescale Enhanced Secure Digital Host Controller (eSDHC)
-
-The Enhanced Secure Digital Host Controller provides an interface
-for MMC, SD, and SDIO types of memory cards.
-
-This file documents differences between the core properties described
-by mmc.txt and the properties used by the sdhci-esdhc driver.
-
-Required properties:
- - compatible : should be "fsl,esdhc", or "fsl,<chip>-esdhc".
- Possible compatibles for PowerPC:
- "fsl,mpc8536-esdhc"
- "fsl,mpc8378-esdhc"
- "fsl,p2020-esdhc"
- "fsl,p4080-esdhc"
- "fsl,t1040-esdhc"
- "fsl,t4240-esdhc"
- Possible compatibles for ARM:
- "fsl,ls1012a-esdhc"
- "fsl,ls1028a-esdhc"
- "fsl,ls1088a-esdhc"
- "fsl,ls1043a-esdhc"
- "fsl,ls1046a-esdhc"
- "fsl,ls2080a-esdhc"
- - clock-frequency : specifies eSDHC base clock frequency.
-
-Optional properties:
- - sdhci,wp-inverted : specifies that eSDHC controller reports
- inverted write-protect state; New devices should use the generic
- "wp-inverted" property.
- - sdhci,1-bit-only : specifies that a controller can only handle
- 1-bit data transfers. New devices should use the generic
- "bus-width = <1>" property.
- - sdhci,auto-cmd12: specifies that a controller can only handle auto
- CMD12.
- - voltage-ranges : two cells are required, first cell specifies minimum
- slot voltage (mV), second cell specifies maximum slot voltage (mV).
- Several ranges could be specified.
- - little-endian : If the host controller is little-endian mode, specify
- this property. The default endian mode is big-endian.
-
-Example:
-
-sdhci@2e000 {
- compatible = "fsl,mpc8378-esdhc", "fsl,esdhc";
- reg = <0x2e000 0x1000>;
- interrupts = <42 0x8>;
- interrupt-parent = <&ipic>;
- /* Filled in by U-Boot */
- clock-frequency = <0>;
- voltage-ranges = <3300 3300>;
-};
diff --git a/Documentation/devicetree/bindings/mmc/mmc-spi-slot.yaml b/Documentation/devicetree/bindings/mmc/mmc-spi-slot.yaml
index 36acc40c7d18..6e2cdac6a85d 100644
--- a/Documentation/devicetree/bindings/mmc/mmc-spi-slot.yaml
+++ b/Documentation/devicetree/bindings/mmc/mmc-spi-slot.yaml
@@ -27,17 +27,19 @@ properties:
maxItems: 1
voltage-ranges:
- $ref: /schemas/types.yaml#/definitions/uint32-array
+ $ref: /schemas/types.yaml#/definitions/uint32-matrix
description: |
Two cells are required, first cell specifies minimum slot voltage (mV),
second cell specifies maximum slot voltage (mV).
items:
- - description: |
- value for minimum slot voltage in mV
- default: 3200
- - description: |
- value for maximum slot voltage in mV
- default: 3400
+ items:
+ - description: |
+ value for minimum slot voltage in mV
+ default: 3200
+ - description: |
+ value for maximum slot voltage in mV
+ default: 3400
+ maxItems: 1
gpios:
description: |
diff --git a/Documentation/devicetree/bindings/mmc/sdhci-msm.yaml b/Documentation/devicetree/bindings/mmc/sdhci-msm.yaml
index c24c537f62b1..11979b026d21 100644
--- a/Documentation/devicetree/bindings/mmc/sdhci-msm.yaml
+++ b/Documentation/devicetree/bindings/mmc/sdhci-msm.yaml
@@ -51,6 +51,7 @@ properties:
- qcom,sdm845-sdhci
- qcom,sdx55-sdhci
- qcom,sdx65-sdhci
+ - qcom,sdx75-sdhci
- qcom,sm6115-sdhci
- qcom,sm6125-sdhci
- qcom,sm6350-sdhci
diff --git a/Documentation/devicetree/bindings/mmc/sdhci-sprd.txt b/Documentation/devicetree/bindings/mmc/sdhci-sprd.txt
deleted file mode 100644
index eb7eb1b529f0..000000000000
--- a/Documentation/devicetree/bindings/mmc/sdhci-sprd.txt
+++ /dev/null
@@ -1,67 +0,0 @@
-* Spreadtrum SDHCI controller (sdhci-sprd)
-
-The Secure Digital (SD) Host controller on Spreadtrum SoCs provides an interface
-for MMC, SD and SDIO types of cards.
-
-This file documents differences between the core properties in mmc.txt
-and the properties used by the sdhci-sprd driver.
-
-Required properties:
-- compatible: Should contain "sprd,sdhci-r11".
-- reg: physical base address of the controller and length.
-- interrupts: Interrupts used by the SDHCI controller.
-- clocks: Should contain phandle for the clock feeding the SDHCI controller
-- clock-names: Should contain the following:
- "sdio" - SDIO source clock (required)
- "enable" - gate clock which used for enabling/disabling the device (required)
- "2x_enable" - gate clock controlling the device for some special platforms (optional)
-
-Optional properties:
-- assigned-clocks: the same with "sdio" clock
-- assigned-clock-parents: the default parent of "sdio" clock
-- pinctrl-names: should be "default", "state_uhs"
-- pinctrl-0: should contain default/high speed pin control
-- pinctrl-1: should contain uhs mode pin control
-
-PHY DLL delays are used to delay the data valid window, and align the window
-to sampling clock. PHY DLL delays can be configured by following properties,
-and each property contains 4 cells which are used to configure the clock data
-write line delay value, clock read command line delay value, clock read data
-positive edge delay value and clock read data negative edge delay value.
-Each cell's delay value unit is cycle of the PHY clock.
-
-- sprd,phy-delay-legacy: Delay value for legacy timing.
-- sprd,phy-delay-sd-highspeed: Delay value for SD high-speed timing.
-- sprd,phy-delay-sd-uhs-sdr50: Delay value for SD UHS SDR50 timing.
-- sprd,phy-delay-sd-uhs-sdr104: Delay value for SD UHS SDR50 timing.
-- sprd,phy-delay-mmc-highspeed: Delay value for MMC high-speed timing.
-- sprd,phy-delay-mmc-ddr52: Delay value for MMC DDR52 timing.
-- sprd,phy-delay-mmc-hs200: Delay value for MMC HS200 timing.
-- sprd,phy-delay-mmc-hs400: Delay value for MMC HS400 timing.
-- sprd,phy-delay-mmc-hs400es: Delay value for MMC HS400 enhanced strobe timing.
-
-Examples:
-
-sdio0: sdio@20600000 {
- compatible = "sprd,sdhci-r11";
- reg = <0 0x20600000 0 0x1000>;
- interrupts = <GIC_SPI 60 IRQ_TYPE_LEVEL_HIGH>;
-
- clock-names = "sdio", "enable";
- clocks = <&ap_clk CLK_EMMC_2X>,
- <&apahb_gate CLK_EMMC_EB>;
- assigned-clocks = <&ap_clk CLK_EMMC_2X>;
- assigned-clock-parents = <&rpll CLK_RPLL_390M>;
-
- pinctrl-names = "default", "state_uhs";
- pinctrl-0 = <&sd0_pins_default>;
- pinctrl-1 = <&sd0_pins_uhs>;
-
- sprd,phy-delay-sd-uhs-sdr104 = <0x3f 0x7f 0x2e 0x2e>;
- bus-width = <8>;
- non-removable;
- no-sdio;
- no-sd;
- cap-mmc-hw-reset;
- status = "okay";
-};
diff --git a/Documentation/devicetree/bindings/mmc/sprd,sdhci-r11.yaml b/Documentation/devicetree/bindings/mmc/sprd,sdhci-r11.yaml
new file mode 100644
index 000000000000..b08081bc018b
--- /dev/null
+++ b/Documentation/devicetree/bindings/mmc/sprd,sdhci-r11.yaml
@@ -0,0 +1,112 @@
+# SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mmc/sprd,sdhci-r11.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Spreadtrum SDHCI controller
+
+maintainers:
+ - Orson Zhai <orsonzhai@gmail.com>
+ - Baolin Wang <baolin.wang7@gmail.com>
+ - Chunyan Zhang <zhang.lyra@gmail.com>
+
+properties:
+ compatible:
+ const: sprd,sdhci-r11
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ minItems: 2
+ items:
+ - description: SDIO source clock
+ - description: gate clock for enabling/disabling the device
+ - description: gate clock controlling the device for some special platforms (optional)
+
+ clock-names:
+ minItems: 2
+ items:
+ - const: sdio
+ - const: enable
+ - const: 2x_enable
+
+ pinctrl-0:
+ description: default/high speed pin control
+ maxItems: 1
+
+ pinctrl-1:
+ description: UHS mode pin control
+ maxItems: 1
+
+ pinctrl-names:
+ minItems: 1
+ items:
+ - const: default
+ - const: state_uhs
+
+patternProperties:
+ "^sprd,phy-delay-(legacy|mmc-(ddr52|highspeed|hs[24]00|hs400es)|sd-(highspeed|uhs-sdr(50|104)))$":
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ items:
+ - description: clock data write line delay value
+ - description: clock read command line delay value
+ - description: clock read data positive edge delay value
+ - description: clock read data negative edge delay value
+ description:
+ PHY DLL delays are used to delay the data valid window, and align
+ the window to the sampling clock. Each cell's delay value unit is
+ cycle of the PHY clock.
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+
+allOf:
+ - $ref: sdhci-common.yaml#
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/sprd,sc9860-clk.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ mmc@50430000 {
+ compatible = "sprd,sdhci-r11";
+ reg = <0x50430000 0x1000>;
+ interrupts = <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&aon_prediv CLK_EMMC_2X>,
+ <&apahb_gate CLK_EMMC_EB>,
+ <&aon_gate CLK_EMMC_2X_EN>;
+ clock-names = "sdio", "enable", "2x_enable";
+
+ pinctrl-0 = <&sd0_pins_default>;
+ pinctrl-1 = <&sd0_pins_uhs>;
+ pinctrl-names = "default", "state_uhs";
+
+ bus-width = <8>;
+ cap-mmc-hw-reset;
+ mmc-hs400-enhanced-strobe;
+ mmc-hs400-1_8v;
+ mmc-hs200-1_8v;
+ mmc-ddr-1_8v;
+ non-removable;
+ no-sdio;
+ no-sd;
+
+ sprd,phy-delay-mmc-ddr52 = <0x3f 0x75 0x14 0x14>;
+ sprd,phy-delay-mmc-hs200 = <0x0 0x8c 0x8c 0x8c>;
+ sprd,phy-delay-mmc-hs400 = <0x44 0x7f 0x2e 0x2e>;
+ sprd,phy-delay-mmc-hs400es = <0x3f 0x3f 0x2e 0x2e>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/mtd/atmel-nand.txt b/Documentation/devicetree/bindings/mtd/atmel-nand.txt
index 4598930851d9..e36c35b17873 100644
--- a/Documentation/devicetree/bindings/mtd/atmel-nand.txt
+++ b/Documentation/devicetree/bindings/mtd/atmel-nand.txt
@@ -60,15 +60,6 @@ Required properties:
- reg: should contain 2 register ranges. The first one is pointing to the PMECC
block, and the second one to the PMECC_ERRLOC block.
-* SAMA5 NFC I/O bindings:
-
-SAMA5 SoCs embed an advanced NAND controller logic to automate READ/WRITE page
-operations. This interface to this logic is placed in a separate I/O range and
-should thus have its own DT node.
-
-- compatible: should be "atmel,sama5d3-nfc-io", "syscon".
-- reg: should contain the I/O range used to interact with the NFC logic.
-
Example:
nfc_io: nfc-io@70000000 {
diff --git a/Documentation/devicetree/bindings/net/airoha,en7581-eth.yaml b/Documentation/devicetree/bindings/net/airoha,en7581-eth.yaml
new file mode 100644
index 000000000000..c578637c5826
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/airoha,en7581-eth.yaml
@@ -0,0 +1,143 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/airoha,en7581-eth.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Airoha EN7581 Frame Engine Ethernet controller
+
+maintainers:
+ - Lorenzo Bianconi <lorenzo@kernel.org>
+
+description:
+ The frame engine ethernet controller can be found on Airoha SoCs.
+ These SoCs have multi-GMAC ports.
+
+properties:
+ compatible:
+ enum:
+ - airoha,en7581-eth
+
+ reg:
+ items:
+ - description: Frame engine base address
+ - description: QDMA0 base address
+ - description: QDMA1 base address
+
+ reg-names:
+ items:
+ - const: fe
+ - const: qdma0
+ - const: qdma1
+
+ interrupts:
+ items:
+ - description: QDMA lan irq0
+ - description: QDMA lan irq1
+ - description: QDMA lan irq2
+ - description: QDMA lan irq3
+ - description: QDMA wan irq0
+ - description: QDMA wan irq1
+ - description: QDMA wan irq2
+ - description: QDMA wan irq3
+ - description: FE error irq
+ - description: PDMA irq
+
+ resets:
+ maxItems: 8
+
+ reset-names:
+ items:
+ - const: fe
+ - const: pdma
+ - const: qdma
+ - const: xsi-mac
+ - const: hsi0-mac
+ - const: hsi1-mac
+ - const: hsi-mac
+ - const: xfp-mac
+
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 0
+
+patternProperties:
+ "^ethernet@[1-4]$":
+ type: object
+ unevaluatedProperties: false
+ $ref: ethernet-controller.yaml#
+ description:
+ Ethernet GMAC port associated to the MAC controller
+ properties:
+ compatible:
+ const: airoha,eth-mac
+
+ reg:
+ minimum: 1
+ maximum: 4
+ description: GMAC port identifier
+
+ required:
+ - reg
+ - compatible
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - resets
+ - reset-names
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/clock/en7523-clk.h>
+
+ soc {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ eth: ethernet@1fb50000 {
+ compatible = "airoha,en7581-eth";
+ reg = <0 0x1fb50000 0 0x2600>,
+ <0 0x1fb54000 0 0x2000>,
+ <0 0x1fb56000 0 0x2000>;
+ reg-names = "fe", "qdma0", "qdma1";
+
+ resets = <&scuclk 44>,
+ <&scuclk 30>,
+ <&scuclk 31>,
+ <&scuclk 6>,
+ <&scuclk 15>,
+ <&scuclk 16>,
+ <&scuclk 17>,
+ <&scuclk 26>;
+ reset-names = "fe", "pdma", "qdma", "xsi-mac",
+ "hsi0-mac", "hsi1-mac", "hsi-mac",
+ "xfp-mac";
+
+ interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 57 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 59 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 60 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 64 IRQ_TYPE_LEVEL_HIGH>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ mac: ethernet@1 {
+ compatible = "airoha,eth-mac";
+ reg = <1>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/arc_emac.txt b/Documentation/devicetree/bindings/net/arc_emac.txt
deleted file mode 100644
index c73a0e9c625e..000000000000
--- a/Documentation/devicetree/bindings/net/arc_emac.txt
+++ /dev/null
@@ -1,46 +0,0 @@
-* Synopsys ARC EMAC 10/100 Ethernet driver (EMAC)
-
-Required properties:
-- compatible: Should be "snps,arc-emac"
-- reg: Address and length of the register set for the device
-- interrupts: Should contain the EMAC interrupts
-- max-speed: see ethernet.txt file in the same directory.
-- phy: see ethernet.txt file in the same directory.
-
-Optional properties:
-- phy-reset-gpios : Should specify the gpio for phy reset
-- phy-reset-duration : Reset duration in milliseconds. Should present
- only if property "phy-reset-gpios" is available. Missing the property
- will have the duration be 1 millisecond. Numbers greater than 1000 are
- invalid and 1 millisecond will be used instead.
-
-Clock handling:
-The clock frequency is needed to calculate and set polling period of EMAC.
-It must be provided by one of:
-- clock-frequency: CPU frequency.
-- clocks: reference to the clock supplying the EMAC.
-
-Child nodes of the driver are the individual PHY devices connected to the
-MDIO bus. They must have a "reg" property given the PHY address on the MDIO bus.
-
-Examples:
-
- ethernet@c0fc2000 {
- compatible = "snps,arc-emac";
- reg = <0xc0fc2000 0x3c>;
- interrupts = <6>;
- mac-address = [ 00 11 22 33 44 55 ];
-
- clock-frequency = <80000000>;
- /* or */
- clocks = <&emac_clock>;
-
- max-speed = <100>;
- phy = <&phy0>;
-
- #address-cells = <1>;
- #size-cells = <0>;
- phy0: ethernet-phy@0 {
- reg = <1>;
- };
- };
diff --git a/Documentation/devicetree/bindings/net/bluetooth/mediatek,mt7622-bluetooth.yaml b/Documentation/devicetree/bindings/net/bluetooth/mediatek,mt7622-bluetooth.yaml
new file mode 100644
index 000000000000..3f9e69208127
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/bluetooth/mediatek,mt7622-bluetooth.yaml
@@ -0,0 +1,51 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/bluetooth/mediatek,mt7622-bluetooth.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: MediaTek SoC built-in Bluetooth
+
+description:
+ This device is a serial attached device to BTIF device and thus it must be a
+ child node of the serial node with BTIF. The dt-bindings details for BTIF
+ device can be known via Documentation/devicetree/bindings/serial/8250.yaml.
+
+maintainers:
+ - Sean Wang <sean.wang@mediatek.com>
+
+allOf:
+ - $ref: bluetooth-controller.yaml#
+
+properties:
+ compatible:
+ const: mediatek,mt7622-bluetooth
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ const: ref
+
+ power-domains:
+ maxItems: 1
+
+required:
+ - clocks
+ - clock-names
+ - power-domains
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/power/mt7622-power.h>
+
+ serial {
+ bluetooth {
+ compatible = "mediatek,mt7622-bluetooth";
+ power-domains = <&scpsys MT7622_POWER_DOMAIN_WB>;
+ clocks = <&clk25m>;
+ clock-names = "ref";
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/bluetooth/nxp,88w8987-bt.yaml b/Documentation/devicetree/bindings/net/bluetooth/nxp,88w8987-bt.yaml
index f01a3988538c..37a65badb448 100644
--- a/Documentation/devicetree/bindings/net/bluetooth/nxp,88w8987-bt.yaml
+++ b/Documentation/devicetree/bindings/net/bluetooth/nxp,88w8987-bt.yaml
@@ -31,6 +31,9 @@ properties:
This property depends on the module vendor's
configuration.
+ firmware-name:
+ maxItems: 1
+
required:
- compatible
@@ -42,5 +45,6 @@ examples:
bluetooth {
compatible = "nxp,88w8987-bt";
fw-init-baudrate = <3000000>;
+ firmware-name = "uartuart8987_bt_v0.bin";
};
};
diff --git a/Documentation/devicetree/bindings/net/bluetooth/qualcomm-bluetooth.yaml b/Documentation/devicetree/bindings/net/bluetooth/qualcomm-bluetooth.yaml
index 055a3351880b..68c5ed111417 100644
--- a/Documentation/devicetree/bindings/net/bluetooth/qualcomm-bluetooth.yaml
+++ b/Documentation/devicetree/bindings/net/bluetooth/qualcomm-bluetooth.yaml
@@ -62,6 +62,9 @@ properties:
vdddig-supply:
description: VDD_DIG supply regulator handle
+ vddbtcmx-supply:
+ description: VDD_BT_CMX supply regulator handle
+
vddbtcxmx-supply:
description: VDD_BT_CXMX supply regulator handle
@@ -74,6 +77,9 @@ properties:
vddrfa1p7-supply:
description: VDD_RFA_1P7 supply regulator handle
+ vddrfa1p8-supply:
+ description: VDD_RFA_1P8 supply regulator handle
+
vddrfa1p2-supply:
description: VDD_RFA_1P2 supply regulator handle
@@ -86,6 +92,12 @@ properties:
vddasd-supply:
description: VDD_ASD supply regulator handle
+ vddwlcx-supply:
+ description: VDD_WLCX supply regulator handle
+
+ vddwlmx-supply:
+ description: VDD_WLMX supply regulator handle
+
max-speed:
description: see Documentation/devicetree/bindings/serial/serial.yaml
@@ -176,14 +188,27 @@ allOf:
- qcom,wcn7850-bt
then:
required:
- - enable-gpios
- - swctrl-gpios
- - vddio-supply
+ - vddrfacmn-supply
+ - vddaon-supply
+ - vddwlcx-supply
+ - vddwlmx-supply
+ - vddrfa0p8-supply
+ - vddrfa1p2-supply
+ - vddrfa1p8-supply
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,qca6390-bt
+ then:
+ required:
+ - vddrfacmn-supply
- vddaon-supply
- - vdddig-supply
+ - vddbtcmx-supply
- vddrfa0p8-supply
- vddrfa1p2-supply
- - vddrfa1p9-supply
+ - vddrfa1p7-supply
examples:
- |
diff --git a/Documentation/devicetree/bindings/net/can/xilinx,can.yaml b/Documentation/devicetree/bindings/net/can/xilinx,can.yaml
index 8d4e5af6fd6c..40835497050a 100644
--- a/Documentation/devicetree/bindings/net/can/xilinx,can.yaml
+++ b/Documentation/devicetree/bindings/net/can/xilinx,can.yaml
@@ -5,7 +5,7 @@ $id: http://devicetree.org/schemas/net/can/xilinx,can.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title:
- Xilinx Axi CAN/Zynq CANPS controller
+ Xilinx CAN and CANFD controller
maintainers:
- Appana Durga Kedareswara rao <appana.durga.rao@xilinx.com>
diff --git a/Documentation/devicetree/bindings/net/cdns,macb.yaml b/Documentation/devicetree/bindings/net/cdns,macb.yaml
index 2c71e2cf3a2f..3c30dd23cd4e 100644
--- a/Documentation/devicetree/bindings/net/cdns,macb.yaml
+++ b/Documentation/devicetree/bindings/net/cdns,macb.yaml
@@ -146,6 +146,7 @@ patternProperties:
magic-packet:
type: boolean
+ deprecated: true
description:
Indicates that the hardware supports waking up via magic packet.
diff --git a/Documentation/devicetree/bindings/net/dsa/lantiq,gswip.yaml b/Documentation/devicetree/bindings/net/dsa/lantiq,gswip.yaml
new file mode 100644
index 000000000000..f3154b19af78
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/dsa/lantiq,gswip.yaml
@@ -0,0 +1,202 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/dsa/lantiq,gswip.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Lantiq GSWIP Ethernet switches
+
+allOf:
+ - $ref: dsa.yaml#/$defs/ethernet-ports
+
+maintainers:
+ - Hauke Mehrtens <hauke@hauke-m.de>
+
+properties:
+ compatible:
+ enum:
+ - lantiq,xrx200-gswip
+ - lantiq,xrx300-gswip
+ - lantiq,xrx330-gswip
+
+ reg:
+ minItems: 3
+ maxItems: 3
+
+ reg-names:
+ items:
+ - const: switch
+ - const: mdio
+ - const: mii
+
+ mdio:
+ $ref: /schemas/net/mdio.yaml#
+ unevaluatedProperties: false
+
+ properties:
+ compatible:
+ const: lantiq,xrx200-mdio
+
+ required:
+ - compatible
+
+ gphy-fw:
+ type: object
+ properties:
+ '#address-cells':
+ const: 1
+
+ '#size-cells':
+ const: 0
+
+ compatible:
+ items:
+ - enum:
+ - lantiq,xrx200-gphy-fw
+ - lantiq,xrx300-gphy-fw
+ - lantiq,xrx330-gphy-fw
+ - const: lantiq,gphy-fw
+
+ lantiq,rcu:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description: phandle to the RCU syscon
+
+ patternProperties:
+ "^gphy@[0-9a-f]{1,2}$":
+ type: object
+
+ additionalProperties: false
+
+ properties:
+ reg:
+ minimum: 0
+ maximum: 255
+ description:
+ Offset of the GPHY firmware register in the RCU register range
+
+ resets:
+ items:
+ - description: GPHY reset line
+
+ reset-names:
+ items:
+ - const: gphy
+
+ required:
+ - reg
+
+ required:
+ - compatible
+ - lantiq,rcu
+
+ additionalProperties: false
+
+required:
+ - compatible
+ - reg
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ switch@e108000 {
+ compatible = "lantiq,xrx200-gswip";
+ reg = <0xe108000 0x3100>, /* switch */
+ <0xe10b100 0xd8>, /* mdio */
+ <0xe10b1d8 0x130>; /* mii */
+ dsa,member = <0 0>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ label = "lan3";
+ phy-mode = "rgmii";
+ phy-handle = <&phy0>;
+ };
+
+ port@1 {
+ reg = <1>;
+ label = "lan4";
+ phy-mode = "rgmii";
+ phy-handle = <&phy1>;
+ };
+
+ port@2 {
+ reg = <2>;
+ label = "lan2";
+ phy-mode = "internal";
+ phy-handle = <&phy11>;
+ };
+
+ port@4 {
+ reg = <4>;
+ label = "lan1";
+ phy-mode = "internal";
+ phy-handle = <&phy13>;
+ };
+
+ port@5 {
+ reg = <5>;
+ label = "wan";
+ phy-mode = "rgmii";
+ phy-handle = <&phy5>;
+ };
+
+ port@6 {
+ reg = <0x6>;
+ phy-mode = "internal";
+ ethernet = <&eth0>;
+
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+ };
+ };
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "lantiq,xrx200-mdio";
+
+ phy0: ethernet-phy@0 {
+ reg = <0x0>;
+ };
+ phy1: ethernet-phy@1 {
+ reg = <0x1>;
+ };
+ phy5: ethernet-phy@5 {
+ reg = <0x5>;
+ };
+ phy11: ethernet-phy@11 {
+ reg = <0x11>;
+ };
+ phy13: ethernet-phy@13 {
+ reg = <0x13>;
+ };
+ };
+
+ gphy-fw {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "lantiq,xrx200-gphy-fw", "lantiq,gphy-fw";
+ lantiq,rcu = <&rcu0>;
+
+ gphy@20 {
+ reg = <0x20>;
+
+ resets = <&reset0 31 30>;
+ reset-names = "gphy";
+ };
+
+ gphy@68 {
+ reg = <0x68>;
+
+ resets = <&reset0 29 28>;
+ reset-names = "gphy";
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/dsa/lantiq-gswip.txt b/Documentation/devicetree/bindings/net/dsa/lantiq-gswip.txt
deleted file mode 100644
index 8bb1eff21cb1..000000000000
--- a/Documentation/devicetree/bindings/net/dsa/lantiq-gswip.txt
+++ /dev/null
@@ -1,146 +0,0 @@
-Lantiq GSWIP Ethernet switches
-==================================
-
-Required properties for GSWIP core:
-
-- compatible : "lantiq,xrx200-gswip" for the embedded GSWIP in the
- xRX200 SoC
- "lantiq,xrx300-gswip" for the embedded GSWIP in the
- xRX300 SoC
- "lantiq,xrx330-gswip" for the embedded GSWIP in the
- xRX330 SoC
-- reg : memory range of the GSWIP core registers
- : memory range of the GSWIP MDIO registers
- : memory range of the GSWIP MII registers
-
-See Documentation/devicetree/bindings/net/dsa/dsa.txt for a list of
-additional required and optional properties.
-
-
-Required properties for MDIO bus:
-- compatible : "lantiq,xrx200-mdio" for the MDIO bus inside the GSWIP
- core of the xRX200 SoC and the PHYs connected to it.
-
-See Documentation/devicetree/bindings/net/mdio.txt for a list of additional
-required and optional properties.
-
-
-Required properties for GPHY firmware loading:
-- compatible : "lantiq,xrx200-gphy-fw", "lantiq,gphy-fw"
- "lantiq,xrx300-gphy-fw", "lantiq,gphy-fw"
- "lantiq,xrx330-gphy-fw", "lantiq,gphy-fw"
- for the loading of the firmware into the embedded
- GPHY core of the SoC.
-- lantiq,rcu : reference to the rcu syscon
-
-The GPHY firmware loader has a list of GPHY entries, one for each
-embedded GPHY
-
-- reg : Offset of the GPHY firmware register in the RCU
- register range
-- resets : list of resets of the embedded GPHY
-- reset-names : list of names of the resets
-
-Example:
-
-Ethernet switch on the VRX200 SoC:
-
-switch@e108000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "lantiq,xrx200-gswip";
- reg = < 0xe108000 0x3100 /* switch */
- 0xe10b100 0xd8 /* mdio */
- 0xe10b1d8 0x130 /* mii */
- >;
- dsa,member = <0 0>;
-
- ports {
- #address-cells = <1>;
- #size-cells = <0>;
-
- port@0 {
- reg = <0>;
- label = "lan3";
- phy-mode = "rgmii";
- phy-handle = <&phy0>;
- };
-
- port@1 {
- reg = <1>;
- label = "lan4";
- phy-mode = "rgmii";
- phy-handle = <&phy1>;
- };
-
- port@2 {
- reg = <2>;
- label = "lan2";
- phy-mode = "internal";
- phy-handle = <&phy11>;
- };
-
- port@4 {
- reg = <4>;
- label = "lan1";
- phy-mode = "internal";
- phy-handle = <&phy13>;
- };
-
- port@5 {
- reg = <5>;
- label = "wan";
- phy-mode = "rgmii";
- phy-handle = <&phy5>;
- };
-
- port@6 {
- reg = <0x6>;
- ethernet = <&eth0>;
- };
- };
-
- mdio {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "lantiq,xrx200-mdio";
- reg = <0>;
-
- phy0: ethernet-phy@0 {
- reg = <0x0>;
- };
- phy1: ethernet-phy@1 {
- reg = <0x1>;
- };
- phy5: ethernet-phy@5 {
- reg = <0x5>;
- };
- phy11: ethernet-phy@11 {
- reg = <0x11>;
- };
- phy13: ethernet-phy@13 {
- reg = <0x13>;
- };
- };
-
- gphy-fw {
- compatible = "lantiq,xrx200-gphy-fw", "lantiq,gphy-fw";
- lantiq,rcu = <&rcu0>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- gphy@20 {
- reg = <0x20>;
-
- resets = <&reset0 31 30>;
- reset-names = "gphy";
- };
-
- gphy@68 {
- reg = <0x68>;
-
- resets = <&reset0 29 28>;
- reset-names = "gphy";
- };
- };
-};
diff --git a/Documentation/devicetree/bindings/net/dsa/mediatek,mt7530.yaml b/Documentation/devicetree/bindings/net/dsa/mediatek,mt7530.yaml
index 1c2444121e60..7e405ad96eb2 100644
--- a/Documentation/devicetree/bindings/net/dsa/mediatek,mt7530.yaml
+++ b/Documentation/devicetree/bindings/net/dsa/mediatek,mt7530.yaml
@@ -22,16 +22,16 @@ description: |
The MT7988 SoC comes with a built-in switch similar to MT7531 as well as four
Gigabit Ethernet PHYs. The switch registers are directly mapped into the SoC's
- memory map rather than using MDIO. The switch got an internally connected 10G
+ memory map rather than using MDIO. The switch has an internally connected 10G
CPU port and 4 user ports connected to the built-in Gigabit Ethernet PHYs.
- MT7530 in MT7620AN, MT7620DA, MT7620DAN and MT7620NN SoCs has got 10/100 PHYs
+ The MT7530 in MT7620AN, MT7620DA, MT7620DAN and MT7620NN SoCs has 10/100 PHYs
and the switch registers are directly mapped into SoC's memory map rather than
using MDIO. The DSA driver currently doesn't support MT7620 variants.
There is only the standalone version of MT7531.
- Port 5 on MT7530 has got various ways of configuration:
+ Port 5 on MT7530 supports various configurations:
- Port 5 can be used as a CPU port.
diff --git a/Documentation/devicetree/bindings/net/dsa/vitesse,vsc73xx.txt b/Documentation/devicetree/bindings/net/dsa/vitesse,vsc73xx.txt
deleted file mode 100644
index 258bef483673..000000000000
--- a/Documentation/devicetree/bindings/net/dsa/vitesse,vsc73xx.txt
+++ /dev/null
@@ -1,129 +0,0 @@
-Vitesse VSC73xx Switches
-========================
-
-This defines device tree bindings for the Vitesse VSC73xx switch chips.
-The Vitesse company has been acquired by Microsemi and Microsemi has
-been acquired Microchip but retains this vendor branding.
-
-The currently supported switch chips are:
-Vitesse VSC7385 SparX-G5 5+1-port Integrated Gigabit Ethernet Switch
-Vitesse VSC7388 SparX-G8 8-port Integrated Gigabit Ethernet Switch
-Vitesse VSC7395 SparX-G5e 5+1-port Integrated Gigabit Ethernet Switch
-Vitesse VSC7398 SparX-G8e 8-port Integrated Gigabit Ethernet Switch
-
-This switch could have two different management interface.
-
-If SPI interface is used, the device tree node is an SPI device so it must
-reside inside a SPI bus device tree node, see spi/spi-bus.txt
-
-When the chip is connected to a parallel memory bus and work in memory-mapped
-I/O mode, a platform device is used to represent the vsc73xx. In this case it
-must reside inside a platform bus device tree node.
-
-Required properties:
-
-- compatible: must be exactly one of:
- "vitesse,vsc7385"
- "vitesse,vsc7388"
- "vitesse,vsc7395"
- "vitesse,vsc7398"
-- gpio-controller: indicates that this switch is also a GPIO controller,
- see gpio/gpio.txt
-- #gpio-cells: this must be set to <2> and indicates that we are a twocell
- GPIO controller, see gpio/gpio.txt
-
-Optional properties:
-
-- reset-gpios: a handle to a GPIO line that can issue reset of the chip.
- It should be tagged as active low.
-
-Required subnodes:
-
-See net/dsa/dsa.txt for a list of additional required and optional properties
-and subnodes of DSA switches.
-
-Examples:
-
-SPI:
-switch@0 {
- compatible = "vitesse,vsc7395";
- reg = <0>;
- /* Specified for 2.5 MHz or below */
- spi-max-frequency = <2500000>;
- gpio-controller;
- #gpio-cells = <2>;
-
- ports {
- #address-cells = <1>;
- #size-cells = <0>;
-
- port@0 {
- reg = <0>;
- label = "lan1";
- };
- port@1 {
- reg = <1>;
- label = "lan2";
- };
- port@2 {
- reg = <2>;
- label = "lan3";
- };
- port@3 {
- reg = <3>;
- label = "lan4";
- };
- vsc: port@6 {
- reg = <6>;
- ethernet = <&gmac1>;
- phy-mode = "rgmii";
- fixed-link {
- speed = <1000>;
- full-duplex;
- pause;
- };
- };
- };
-};
-
-Platform:
-switch@2,0 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "vitesse,vsc7385";
- reg = <0x2 0x0 0x20000>;
- reset-gpios = <&gpio0 12 GPIO_ACTIVE_LOW>;
-
- ports {
- #address-cells = <1>;
- #size-cells = <0>;
-
- port@0 {
- reg = <0>;
- label = "lan1";
- };
- port@1 {
- reg = <1>;
- label = "lan2";
- };
- port@2 {
- reg = <2>;
- label = "lan3";
- };
- port@3 {
- reg = <3>;
- label = "lan4";
- };
- vsc: port@6 {
- reg = <6>;
- ethernet = <&enet0>;
- phy-mode = "rgmii";
- fixed-link {
- speed = <1000>;
- full-duplex;
- pause;
- };
- };
- };
-
-};
diff --git a/Documentation/devicetree/bindings/net/dsa/vitesse,vsc73xx.yaml b/Documentation/devicetree/bindings/net/dsa/vitesse,vsc73xx.yaml
new file mode 100644
index 000000000000..b99d7a694b70
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/dsa/vitesse,vsc73xx.yaml
@@ -0,0 +1,162 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/dsa/vitesse,vsc73xx.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Vitesse VSC73xx DSA Switches
+
+maintainers:
+ - Linus Walleij <linus.walleij@linaro.org>
+
+description:
+ The Vitesse DSA Switches were produced in the early-to-mid 2000s.
+
+ The Vitesse company has been acquired by Microsemi and Microsemi has
+ been acquired Microchip but the new owner retains this vendor branding.
+
+ The currently supported switch chips are
+ Vitesse VSC7385 SparX-G5 5+1-port Integrated Gigabit Ethernet Switch
+ Vitesse VSC7388 SparX-G8 8-port Integrated Gigabit Ethernet Switch
+ Vitesse VSC7395 SparX-G5e 5+1-port Integrated Gigabit Ethernet Switch
+ Vitesse VSC7398 SparX-G8e 8-port Integrated Gigabit Ethernet Switch
+
+ This switch can use one of two different management interfaces.
+
+ If SPI interface is used, the device tree node is an SPI device so it must
+ reside inside a SPI bus device tree node, see spi/spi-bus.txt
+
+ When the chip is connected to a parallel memory bus and work in memory-mapped
+ I/O mode, a platform device is used to represent the vsc73xx. In this case it
+ must reside inside a platform bus device tree node.
+
+properties:
+ compatible:
+ enum:
+ - vitesse,vsc7385
+ - vitesse,vsc7388
+ - vitesse,vsc7395
+ - vitesse,vsc7398
+
+ reg:
+ maxItems: 1
+
+ gpio-controller: true
+ "#gpio-cells":
+ const: 2
+
+ reset-gpios:
+ description: GPIO to be used to reset the whole device
+ maxItems: 1
+
+allOf:
+ - $ref: dsa.yaml#/$defs/ethernet-ports
+
+# This checks if reg is a chipselect so the device is on an SPI
+# bus, the if-clause will fail if reg is a tuple such as for a
+# platform device.
+if:
+ properties:
+ reg:
+ minimum: 0
+ maximum: 256
+then:
+ $ref: /schemas/spi/spi-peripheral-props.yaml#
+
+required:
+ - compatible
+ - reg
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+
+ spi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethernet-switch@0 {
+ compatible = "vitesse,vsc7395";
+ reg = <0>;
+ spi-max-frequency = <2500000>;
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ ethernet-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethernet-port@0 {
+ reg = <0>;
+ label = "lan1";
+ };
+ ethernet-port@1 {
+ reg = <1>;
+ label = "lan2";
+ };
+ ethernet-port@2 {
+ reg = <2>;
+ label = "lan3";
+ };
+ ethernet-port@3 {
+ reg = <3>;
+ label = "lan4";
+ };
+ ethernet-port@6 {
+ reg = <6>;
+ ethernet = <&gmac1>;
+ phy-mode = "rgmii";
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ pause;
+ };
+ };
+ };
+ };
+ };
+
+ bus {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ ethernet-switch@10000000 {
+ compatible = "vitesse,vsc7385";
+ reg = <0x10000000 0x20000>;
+ reset-gpios = <&gpio0 12 GPIO_ACTIVE_LOW>;
+
+ ethernet-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethernet-port@0 {
+ reg = <0>;
+ label = "lan1";
+ };
+ ethernet-port@1 {
+ reg = <1>;
+ label = "lan2";
+ };
+ ethernet-port@2 {
+ reg = <2>;
+ label = "lan3";
+ };
+ ethernet-port@3 {
+ reg = <3>;
+ label = "lan4";
+ };
+ ethernet-port@6 {
+ reg = <6>;
+ ethernet = <&enet0>;
+ phy-mode = "rgmii";
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ pause;
+ };
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/ethernet-controller.yaml b/Documentation/devicetree/bindings/net/ethernet-controller.yaml
index b2785b03139f..45819b235800 100644
--- a/Documentation/devicetree/bindings/net/ethernet-controller.yaml
+++ b/Documentation/devicetree/bindings/net/ethernet-controller.yaml
@@ -103,6 +103,7 @@ properties:
- usxgmii
- 10gbase-r
- 25gbase-r
+ - 10g-qxgmii
phy-mode:
$ref: "#/properties/phy-connection-type"
diff --git a/Documentation/devicetree/bindings/net/ethernet-phy.yaml b/Documentation/devicetree/bindings/net/ethernet-phy.yaml
index 8fb2a6ee7e5b..d9b62741a225 100644
--- a/Documentation/devicetree/bindings/net/ethernet-phy.yaml
+++ b/Documentation/devicetree/bindings/net/ethernet-phy.yaml
@@ -93,6 +93,14 @@ properties:
the turn around line low at end of the control phase of the
MDIO transaction.
+ brr-mode:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ If set, indicates the network cable interface is an alternative one as
+ defined in the BroadR-Reach link mode specification under 1BR-100 and
+ 1BR-10 names. The PHY must be configured to operate in BroadR-Reach mode
+ by software.
+
clocks:
maxItems: 1
description:
diff --git a/Documentation/devicetree/bindings/net/fsl,enetc-ierb.yaml b/Documentation/devicetree/bindings/net/fsl,enetc-ierb.yaml
new file mode 100644
index 000000000000..c8a654310b90
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/fsl,enetc-ierb.yaml
@@ -0,0 +1,38 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/fsl,enetc-ierb.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Integrated Endpoint Register Block
+
+description:
+ The fsl_enetc driver can probe on the Integrated Endpoint Register Block,
+ which preconfigures the FIFO limits for the ENETC ports.
+
+maintainers:
+ - Frank Li <Frank.Li@nxp.com>
+ - Vladimir Oltean <vladimir.oltean@nxp.com>
+ - Wei Fang <wei.fang@nxp.com>
+ - Claudiu Manoil <claudiu.manoil@nxp.com>
+
+properties:
+ compatible:
+ enum:
+ - fsl,ls1028a-enetc-ierb
+
+ reg:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ endpoint-config@f0800000 {
+ compatible = "fsl,ls1028a-enetc-ierb";
+ reg = <0xf0800000 0x10000>;
+ };
diff --git a/Documentation/devicetree/bindings/net/fsl,enetc-mdio.yaml b/Documentation/devicetree/bindings/net/fsl,enetc-mdio.yaml
new file mode 100644
index 000000000000..c1dd6aa04321
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/fsl,enetc-mdio.yaml
@@ -0,0 +1,57 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/fsl,enetc-mdio.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ENETC external MDIO PCIe endpoint device
+
+description:
+ NETC provides an external master MDIO interface (EMDIO) for managing external
+ devices (PHYs). EMDIO supports both Clause 22 and 45 protocols. And the EMDIO
+ provides a means for different software modules to share a single set of MDIO
+ signals to access their PHYs.
+
+maintainers:
+ - Frank Li <Frank.Li@nxp.com>
+ - Vladimir Oltean <vladimir.oltean@nxp.com>
+ - Wei Fang <wei.fang@nxp.com>
+ - Claudiu Manoil <claudiu.manoil@nxp.com>
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - pci1957,ee01
+ - const: fsl,enetc-mdio
+
+ reg:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+
+allOf:
+ - $ref: mdio.yaml
+ - $ref: /schemas/pci/pci-device.yaml
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ pcie{
+ #address-cells = <3>;
+ #size-cells = <2>;
+
+ mdio@0,3 {
+ compatible = "pci1957,ee01", "fsl,enetc-mdio";
+ reg = <0x000300 0 0 0 0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethernet-phy@2 {
+ reg = <0x2>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/fsl,enetc.yaml b/Documentation/devicetree/bindings/net/fsl,enetc.yaml
new file mode 100644
index 000000000000..e152c93998fe
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/fsl,enetc.yaml
@@ -0,0 +1,66 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/fsl,enetc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: The NIC functionality of NXP NETC
+
+description:
+ The NIC functionality in NETC is known as EtherNET Controller (ENETC). ENETC
+ supports virtualization/isolation based on PCIe Single Root IO Virtualization
+ (SR-IOV), advanced QoS with 8 traffic classes and 4 drop resilience levels,
+ and a full range of TSN standards and NIC offload capabilities
+
+maintainers:
+ - Frank Li <Frank.Li@nxp.com>
+ - Vladimir Oltean <vladimir.oltean@nxp.com>
+ - Wei Fang <wei.fang@nxp.com>
+ - Claudiu Manoil <claudiu.manoil@nxp.com>
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - pci1957,e100
+ - const: fsl,enetc
+
+ reg:
+ maxItems: 1
+
+ mdio:
+ $ref: mdio.yaml
+ unevaluatedProperties: false
+ description: Optional child node for ENETC instance, otherwise use NETC EMDIO.
+
+required:
+ - compatible
+ - reg
+
+allOf:
+ - $ref: /schemas/pci/pci-device.yaml
+ - $ref: ethernet-controller.yaml
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ pcie {
+ #address-cells = <3>;
+ #size-cells = <2>;
+
+ ethernet@0,0 {
+ compatible = "pci1957,e100", "fsl,enetc";
+ reg = <0x000000 0 0 0 0>;
+ phy-handle = <&sgmii_phy0>;
+ phy-connection-type = "sgmii";
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ phy@2 {
+ reg = <0x2>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/fsl,fman-dtsec.yaml b/Documentation/devicetree/bindings/net/fsl,fman-dtsec.yaml
index c80c880a9dab..60aaf30d68ed 100644
--- a/Documentation/devicetree/bindings/net/fsl,fman-dtsec.yaml
+++ b/Documentation/devicetree/bindings/net/fsl,fman-dtsec.yaml
@@ -128,7 +128,6 @@ required:
- cell-index
- reg
- fsl,fman-ports
- - ptp-timer
dependencies:
pcs-handle-names:
diff --git a/Documentation/devicetree/bindings/net/fsl,fman-mdio.yaml b/Documentation/devicetree/bindings/net/fsl,fman-mdio.yaml
new file mode 100644
index 000000000000..6b2c0aa407a2
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/fsl,fman-mdio.yaml
@@ -0,0 +1,123 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/fsl,fman-mdio.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale Frame Manager MDIO Device
+
+maintainers:
+ - Frank Li <Frank.Li@nxp.com>
+
+description: FMan MDIO Node.
+ The MDIO is a bus to which the PHY devices are connected.
+
+properties:
+ compatible:
+ enum:
+ - fsl,fman-mdio
+ - fsl,fman-xmdio
+ - fsl,fman-memac-mdio
+ description:
+ Must include "fsl,fman-mdio" for 1 Gb/s MDIO from FMan v2.
+ Must include "fsl,fman-xmdio" for 10 Gb/s MDIO from FMan v2.
+ Must include "fsl,fman-memac-mdio" for 1/10 Gb/s MDIO from
+ FMan v3.
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: A reference to the input clock of the controller
+ from which the MDC frequency is derived.
+
+ interrupts:
+ maxItems: 1
+
+ fsl,fman-internal-mdio:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ Fman has internal MDIO for internal PCS(Physical
+ Coding Sublayer) PHYs and external MDIO for external PHYs.
+ The settings and programming routines for internal/external
+ MDIO are different. Must be included for internal MDIO.
+
+ fsl,erratum-a009885:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description: Indicates the presence of the A009885
+ erratum describing that the contents of MDIO_DATA may
+ become corrupt unless it is read within 16 MDC cycles
+ of MDIO_CFG[BSY] being cleared, when performing an
+ MDIO read operation.
+
+ fsl,erratum-a011043:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ Indicates the presence of the A011043 erratum
+ describing that the MDIO_CFG[MDIO_RD_ER] bit may be falsely
+ set when reading internal PCS registers. MDIO reads to
+ internal PCS registers may result in having the
+ MDIO_CFG[MDIO_RD_ER] bit set, even when there is no error and
+ read data (MDIO_DATA[MDIO_DATA]) is correct.
+ Software may get false read error when reading internal
+ PCS registers through MDIO. As a workaround, all internal
+ MDIO accesses should ignore the MDIO_CFG[MDIO_RD_ER] bit.
+
+ For internal PHY device on internal mdio bus, a PHY node should be created.
+ See the definition of the PHY node in booting-without-of.txt for an
+ example of how to define a PHY (Internal PHY has no interrupt line).
+ - For "fsl,fman-mdio" compatible internal mdio bus, the PHY is TBI PHY.
+ - For "fsl,fman-memac-mdio" compatible internal mdio bus, the PHY is PCS PHY.
+ The PCS PHY address should correspond to the value of the appropriate
+ MDEV_PORT.
+
+ little-endian:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ IP block is little-endian mode. The default endian mode is big-endian.
+
+required:
+ - compatible
+ - reg
+
+allOf:
+ - $ref: mdio.yaml#
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ mdio@f1000 {
+ compatible = "fsl,fman-xmdio";
+ reg = <0xf1000 0x1000>;
+ interrupts = <101 2 0 0>;
+ };
+
+ - |
+ mdio@e3120 {
+ compatible = "fsl,fman-mdio";
+ reg = <0xe3120 0xee0>;
+ fsl,fman-internal-mdio;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ tbi-phy@8 {
+ reg = <0x8>;
+ device_type = "tbi-phy";
+ };
+ };
+
+ - |
+ mdio@f1000 {
+ compatible = "fsl,fman-memac-mdio";
+ reg = <0xf1000 0x1000>;
+ fsl,fman-internal-mdio;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pcsphy6: ethernet-phy@0 {
+ reg = <0x0>;
+ };
+ };
+
diff --git a/Documentation/devicetree/bindings/net/fsl,fman-muram.yaml b/Documentation/devicetree/bindings/net/fsl,fman-muram.yaml
new file mode 100644
index 000000000000..aa71acc7fa5b
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/fsl,fman-muram.yaml
@@ -0,0 +1,40 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/fsl,fman-muram.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale Frame Manager MURAM Device
+
+maintainers:
+ - Frank Li <Frank.Li@nxp.com>
+
+description: |
+ FMan Internal memory - shared between all the FMan modules.
+ It contains data structures that are common and written to or read by
+ the modules.
+
+ FMan internal memory is split into the following parts:
+ Packet buffering (Tx/Rx FIFOs)
+ Frames internal context
+
+properties:
+ compatible:
+ enum:
+ - fsl,fman-muram
+
+ reg:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ muram@0 {
+ compatible = "fsl,fman-muram";
+ reg = <0x0 0x28000>;
+ };
diff --git a/Documentation/devicetree/bindings/net/fsl,fman-port.yaml b/Documentation/devicetree/bindings/net/fsl,fman-port.yaml
new file mode 100644
index 000000000000..9de445307830
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/fsl,fman-port.yaml
@@ -0,0 +1,75 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/fsl,fman-port.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale Frame Manager Port Device
+
+maintainers:
+ - Frank Li <Frank.Li@nxp.com>
+
+description: |
+ The Frame Manager (FMan) supports several types of hardware ports:
+ Ethernet receiver (RX)
+ Ethernet transmitter (TX)
+ Offline/Host command (O/H)
+
+properties:
+ compatible:
+ enum:
+ - fsl,fman-v2-port-oh
+ - fsl,fman-v2-port-rx
+ - fsl,fman-v2-port-tx
+ - fsl,fman-v3-port-oh
+ - fsl,fman-v3-port-rx
+ - fsl,fman-v3-port-tx
+
+ cell-index:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ Specifies the hardware port id.
+ Each hardware port on the FMan has its own hardware PortID.
+ Super set of all hardware Port IDs available at FMan Reference
+ Manual under "FMan Hardware Ports in Freescale Devices" table.
+
+ Each hardware port is assigned a 4KB, port-specific page in
+ the FMan hardware port memory region (which is part of the
+ FMan memory map). The first 4 KB in the FMan hardware ports
+ memory region is used for what are called common registers.
+ The subsequent 63 4KB pages are allocated to the hardware
+ ports.
+ The page of a specific port is determined by the cell-index.
+
+ reg:
+ items:
+ - description: There is one reg region describing the port
+ configuration registers.
+
+ fsl,fman-10g-port:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description: The default port rate is 1G.
+ If this property exists, the port is s 10G port.
+
+ fsl,fman-best-effort-port:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description: The default port rate is 1G.
+ Can be defined only if 10G-support is set.
+ This property marks a best-effort 10G port (10G port that
+ may not be capable of line rate).
+
+required:
+ - compatible
+ - reg
+ - cell-index
+
+additionalProperties: false
+
+examples:
+ - |
+ port@a8000 {
+ compatible = "fsl,fman-v2-port-tx";
+ reg = <0xa8000 0x1000>;
+ cell-index = <0x28>;
+ };
+
diff --git a/Documentation/devicetree/bindings/net/fsl,fman.yaml b/Documentation/devicetree/bindings/net/fsl,fman.yaml
new file mode 100644
index 000000000000..9bbf39ef31a2
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/fsl,fman.yaml
@@ -0,0 +1,210 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/fsl,fman.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale Frame Manager Device
+
+maintainers:
+ - Frank Li <Frank.Li@nxp.com>
+
+description:
+ Due to the fact that the FMan is an aggregation of sub-engines (ports, MACs,
+ etc.) the FMan node will have child nodes for each of them.
+
+properties:
+ compatible:
+ enum:
+ - fsl,fman
+ description:
+ FMan version can be determined via FM_IP_REV_1 register in the
+ FMan block. The offset is 0xc4 from the beginning of the
+ Frame Processing Manager memory map (0xc3000 from the
+ beginning of the FMan node).
+
+ cell-index:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: |
+ Specifies the index of the FMan unit.
+
+ The cell-index value may be used by the SoC, to identify the
+ FMan unit in the SoC memory map. In the table below,
+ there's a description of the cell-index use in each SoC:
+
+ - P1023:
+ register[bit] FMan unit cell-index
+ ============================================================
+ DEVDISR[1] 1 0
+
+ - P2041, P3041, P4080 P5020, P5040:
+ register[bit] FMan unit cell-index
+ ============================================================
+ DCFG_DEVDISR2[6] 1 0
+ DCFG_DEVDISR2[14] 2 1
+ (Second FM available only in P4080 and P5040)
+
+ - B4860, T1040, T2080, T4240:
+ register[bit] FMan unit cell-index
+ ============================================================
+ DCFG_CCSR_DEVDISR2[24] 1 0
+ DCFG_CCSR_DEVDISR2[25] 2 1
+ (Second FM available only in T4240)
+
+ DEVDISR, DCFG_DEVDISR2 and DCFG_CCSR_DEVDISR2 are located in
+ the specific SoC "Device Configuration/Pin Control" Memory
+ Map.
+
+ reg:
+ items:
+ - description: BMI configuration registers.
+ - description: QMI configuration registers.
+ - description: DMA configuration registers.
+ - description: FPM configuration registers.
+ - description: FMan controller configuration registers.
+ minItems: 1
+
+ ranges: true
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ items:
+ - const: fmanclk
+
+ interrupts:
+ items:
+ - description: The first element is associated with the event interrupts.
+ - description: the second element is associated with the error interrupts.
+
+ dma-coherent: true
+
+ ptimer-handle:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description: see ptp/fsl,ptp.yaml
+
+ fsl,qman-channel-range:
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ description:
+ Specifies the range of the available dedicated
+ channels in the FMan. The first cell specifies the beginning
+ of the range and the second cell specifies the number of
+ channels
+ items:
+ - description: The first cell specifies the beginning of the range.
+ - description: |
+ The second cell specifies the number of channels.
+ Further information available at:
+ "Work Queue (WQ) Channel Assignments in the QMan" section
+ in DPAA Reference Manual.
+
+ fsl,qman:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description: See soc/fsl/qman.txt
+
+ fsl,bman:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description: See soc/fsl/bman.txt
+
+ fsl,erratum-a050385:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description: A boolean property. Indicates the presence of the
+ erratum A050385 which indicates that DMA transactions that are
+ split can result in a FMan lock.
+
+ '#address-cells':
+ const: 1
+
+ '#size-cells':
+ const: 1
+
+patternProperties:
+ '^muram@[a-f0-9]+$':
+ $ref: fsl,fman-muram.yaml
+
+ '^port@[a-f0-9]+$':
+ $ref: fsl,fman-port.yaml
+
+ '^ethernet@[a-f0-9]+$':
+ $ref: fsl,fman-dtsec.yaml
+
+ '^mdio@[a-f0-9]+$':
+ $ref: fsl,fman-mdio.yaml
+
+ '^phc@[a-f0-9]+$':
+ $ref: /schemas/ptp/fsl,ptp.yaml
+
+required:
+ - compatible
+ - cell-index
+ - reg
+ - ranges
+ - clocks
+ - clock-names
+ - interrupts
+ - fsl,qman-channel-range
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ fman@400000 {
+ compatible = "fsl,fman";
+ reg = <0x400000 0x100000>;
+ ranges = <0 0x400000 0x100000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ cell-index = <1>;
+ clocks = <&fman_clk>;
+ clock-names = "fmanclk";
+ interrupts = <96 IRQ_TYPE_EDGE_FALLING>,
+ <16 IRQ_TYPE_EDGE_FALLING>;
+ fsl,qman-channel-range = <0x40 0xc>;
+
+ muram@0 {
+ compatible = "fsl,fman-muram";
+ reg = <0x0 0x28000>;
+ };
+
+ port@81000 {
+ cell-index = <1>;
+ compatible = "fsl,fman-v2-port-oh";
+ reg = <0x81000 0x1000>;
+ };
+
+ fman1_rx_0x8: port@88000 {
+ cell-index = <0x8>;
+ compatible = "fsl,fman-v2-port-rx";
+ reg = <0x88000 0x1000>;
+ };
+
+ fman1_tx_0x28: port@a8000 {
+ cell-index = <0x28>;
+ compatible = "fsl,fman-v2-port-tx";
+ reg = <0xa8000 0x1000>;
+ };
+
+ ethernet@e0000 {
+ compatible = "fsl,fman-dtsec";
+ cell-index = <0>;
+ reg = <0xe0000 0x1000>;
+ ptp-timer = <&ptp_timer>;
+ fsl,fman-ports = <&fman1_rx_0x8 &fman1_tx_0x28>;
+ tbi-handle = <&tbi5>;
+ };
+
+ ptp_timer: phc@fe000 {
+ compatible = "fsl,fman-ptp-timer";
+ reg = <0xfe000 0x1000>;
+ interrupts = <12 IRQ_TYPE_LEVEL_LOW>;
+ };
+
+ mdio@f1000 {
+ compatible = "fsl,fman-xmdio";
+ reg = <0xf1000 0x1000>;
+ interrupts = <101 IRQ_TYPE_EDGE_FALLING>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/fsl-enetc.txt b/Documentation/devicetree/bindings/net/fsl-enetc.txt
deleted file mode 100644
index 9b9a3f197e2d..000000000000
--- a/Documentation/devicetree/bindings/net/fsl-enetc.txt
+++ /dev/null
@@ -1,119 +0,0 @@
-* ENETC ethernet device tree bindings
-
-Depending on board design and ENETC port type (internal or
-external) there are two supported link modes specified by
-below device tree bindings.
-
-Required properties:
-
-- reg : Specifies PCIe Device Number and Function
- Number of the ENETC endpoint device, according
- to parent node bindings.
-- compatible : Should be "fsl,enetc".
-
-1. The ENETC external port is connected to a MDIO configurable phy
-
-1.1. Using the local ENETC Port MDIO interface
-
-In this case, the ENETC node should include a "mdio" sub-node
-that in turn should contain the "ethernet-phy" node describing the
-external phy. Below properties are required, their bindings
-already defined in Documentation/devicetree/bindings/net/ethernet.txt or
-Documentation/devicetree/bindings/net/phy.txt.
-
-Required:
-
-- phy-handle : Phandle to a PHY on the MDIO bus.
- Defined in ethernet.txt.
-
-- phy-connection-type : Defined in ethernet.txt.
-
-- mdio : "mdio" node, defined in mdio.txt.
-
-- ethernet-phy : "ethernet-phy" node, defined in phy.txt.
-
-Example:
-
- ethernet@0,0 {
- compatible = "fsl,enetc";
- reg = <0x000000 0 0 0 0>;
- phy-handle = <&sgmii_phy0>;
- phy-connection-type = "sgmii";
-
- mdio {
- #address-cells = <1>;
- #size-cells = <0>;
- sgmii_phy0: ethernet-phy@2 {
- reg = <0x2>;
- };
- };
- };
-
-1.2. Using the central MDIO PCIe endpoint device
-
-In this case, the mdio node should be defined as another PCIe
-endpoint node, at the same level with the ENETC port nodes.
-
-Required properties:
-
-- reg : Specifies PCIe Device Number and Function
- Number of the ENETC endpoint device, according
- to parent node bindings.
-- compatible : Should be "fsl,enetc-mdio".
-
-The remaining required mdio bus properties are standard, their bindings
-already defined in Documentation/devicetree/bindings/net/mdio.txt.
-
-Example:
-
- ethernet@0,0 {
- compatible = "fsl,enetc";
- reg = <0x000000 0 0 0 0>;
- phy-handle = <&sgmii_phy0>;
- phy-connection-type = "sgmii";
- };
-
- mdio@0,3 {
- compatible = "fsl,enetc-mdio";
- reg = <0x000300 0 0 0 0>;
- #address-cells = <1>;
- #size-cells = <0>;
- sgmii_phy0: ethernet-phy@2 {
- reg = <0x2>;
- };
- };
-
-2. The ENETC port is an internal port or has a fixed-link external
-connection
-
-In this case, the ENETC port node defines a fixed link connection,
-as specified by Documentation/devicetree/bindings/net/fixed-link.txt.
-
-Required:
-
-- fixed-link : "fixed-link" node, defined in "fixed-link.txt".
-
-Example:
- ethernet@0,2 {
- compatible = "fsl,enetc";
- reg = <0x000200 0 0 0 0>;
- fixed-link {
- speed = <1000>;
- full-duplex;
- };
- };
-
-* Integrated Endpoint Register Block bindings
-
-Optionally, the fsl_enetc driver can probe on the Integrated Endpoint Register
-Block, which preconfigures the FIFO limits for the ENETC ports. This is a node
-with the following properties:
-
-- reg : Specifies the address in the SoC memory space.
-- compatible : Must be "fsl,ls1028a-enetc-ierb".
-
-Example:
- ierb@1f0800000 {
- compatible = "fsl,ls1028a-enetc-ierb";
- reg = <0x01 0xf0800000 0x0 0x10000>;
- };
diff --git a/Documentation/devicetree/bindings/net/fsl-fman.txt b/Documentation/devicetree/bindings/net/fsl-fman.txt
deleted file mode 100644
index bda4b41af074..000000000000
--- a/Documentation/devicetree/bindings/net/fsl-fman.txt
+++ /dev/null
@@ -1,548 +0,0 @@
-=============================================================================
-Freescale Frame Manager Device Bindings
-
-CONTENTS
- - FMan Node
- - FMan Port Node
- - FMan MURAM Node
- - FMan dTSEC/XGEC/mEMAC Node
- - FMan IEEE 1588 Node
- - FMan MDIO Node
- - Example
-
-=============================================================================
-FMan Node
-
-DESCRIPTION
-
-Due to the fact that the FMan is an aggregation of sub-engines (ports, MACs,
-etc.) the FMan node will have child nodes for each of them.
-
-PROPERTIES
-
-- compatible
- Usage: required
- Value type: <stringlist>
- Definition: Must include "fsl,fman"
- FMan version can be determined via FM_IP_REV_1 register in the
- FMan block. The offset is 0xc4 from the beginning of the
- Frame Processing Manager memory map (0xc3000 from the
- beginning of the FMan node).
-
-- cell-index
- Usage: required
- Value type: <u32>
- Definition: Specifies the index of the FMan unit.
-
- The cell-index value may be used by the SoC, to identify the
- FMan unit in the SoC memory map. In the table below,
- there's a description of the cell-index use in each SoC:
-
- - P1023:
- register[bit] FMan unit cell-index
- ============================================================
- DEVDISR[1] 1 0
-
- - P2041, P3041, P4080 P5020, P5040:
- register[bit] FMan unit cell-index
- ============================================================
- DCFG_DEVDISR2[6] 1 0
- DCFG_DEVDISR2[14] 2 1
- (Second FM available only in P4080 and P5040)
-
- - B4860, T1040, T2080, T4240:
- register[bit] FMan unit cell-index
- ============================================================
- DCFG_CCSR_DEVDISR2[24] 1 0
- DCFG_CCSR_DEVDISR2[25] 2 1
- (Second FM available only in T4240)
-
- DEVDISR, DCFG_DEVDISR2 and DCFG_CCSR_DEVDISR2 are located in
- the specific SoC "Device Configuration/Pin Control" Memory
- Map.
-
-- reg
- Usage: required
- Value type: <prop-encoded-array>
- Definition: A standard property. Specifies the offset of the
- following configuration registers:
- - BMI configuration registers.
- - QMI configuration registers.
- - DMA configuration registers.
- - FPM configuration registers.
- - FMan controller configuration registers.
-
-- ranges
- Usage: required
- Value type: <prop-encoded-array>
- Definition: A standard property.
-
-- clocks
- Usage: required
- Value type: <prop-encoded-array>
- Definition: phandle for the fman input clock.
-
-- clock-names
- usage: required
- Value type: <stringlist>
- Definition: "fmanclk" for the fman input clock.
-
-- interrupts
- Usage: required
- Value type: <prop-encoded-array>
- Definition: A pair of IRQs are specified in this property.
- The first element is associated with the event interrupts and
- the second element is associated with the error interrupts.
-
-- fsl,qman-channel-range
- Usage: required
- Value type: <prop-encoded-array>
- Definition: Specifies the range of the available dedicated
- channels in the FMan. The first cell specifies the beginning
- of the range and the second cell specifies the number of
- channels.
- Further information available at:
- "Work Queue (WQ) Channel Assignments in the QMan" section
- in DPAA Reference Manual.
-
-- fsl,qman
-- fsl,bman
- Usage: required
- Definition: See soc/fsl/qman.txt and soc/fsl/bman.txt
-
-- fsl,erratum-a050385
- Usage: optional
- Value type: boolean
- Definition: A boolean property. Indicates the presence of the
- erratum A050385 which indicates that DMA transactions that are
- split can result in a FMan lock.
-
-=============================================================================
-FMan MURAM Node
-
-DESCRIPTION
-
-FMan Internal memory - shared between all the FMan modules.
-It contains data structures that are common and written to or read by
-the modules.
-FMan internal memory is split into the following parts:
- Packet buffering (Tx/Rx FIFOs)
- Frames internal context
-
-PROPERTIES
-
-- compatible
- Usage: required
- Value type: <stringlist>
- Definition: Must include "fsl,fman-muram"
-
-- ranges
- Usage: required
- Value type: <prop-encoded-array>
- Definition: A standard property.
- Specifies the multi-user memory offset and the size within
- the FMan.
-
-EXAMPLE
-
-muram@0 {
- compatible = "fsl,fman-muram";
- ranges = <0 0x000000 0x28000>;
-};
-
-=============================================================================
-FMan Port Node
-
-DESCRIPTION
-
-The Frame Manager (FMan) supports several types of hardware ports:
- Ethernet receiver (RX)
- Ethernet transmitter (TX)
- Offline/Host command (O/H)
-
-PROPERTIES
-
-- compatible
- Usage: required
- Value type: <stringlist>
- Definition: A standard property.
- Must include one of the following:
- - "fsl,fman-v2-port-oh" for FManV2 OH ports
- - "fsl,fman-v2-port-rx" for FManV2 RX ports
- - "fsl,fman-v2-port-tx" for FManV2 TX ports
- - "fsl,fman-v3-port-oh" for FManV3 OH ports
- - "fsl,fman-v3-port-rx" for FManV3 RX ports
- - "fsl,fman-v3-port-tx" for FManV3 TX ports
-
-- cell-index
- Usage: required
- Value type: <u32>
- Definition: Specifies the hardware port id.
- Each hardware port on the FMan has its own hardware PortID.
- Super set of all hardware Port IDs available at FMan Reference
- Manual under "FMan Hardware Ports in Freescale Devices" table.
-
- Each hardware port is assigned a 4KB, port-specific page in
- the FMan hardware port memory region (which is part of the
- FMan memory map). The first 4 KB in the FMan hardware ports
- memory region is used for what are called common registers.
- The subsequent 63 4KB pages are allocated to the hardware
- ports.
- The page of a specific port is determined by the cell-index.
-
-- reg
- Usage: required
- Value type: <prop-encoded-array>
- Definition: There is one reg region describing the port
- configuration registers.
-
-- fsl,fman-10g-port
- Usage: optional
- Value type: boolean
- Definition: The default port rate is 1G.
- If this property exists, the port is s 10G port.
-
-- fsl,fman-best-effort-port
- Usage: optional
- Value type: boolean
- Definition: Can be defined only if 10G-support is set.
- This property marks a best-effort 10G port (10G port that
- may not be capable of line rate).
-
-EXAMPLE
-
-port@a8000 {
- cell-index = <0x28>;
- compatible = "fsl,fman-v2-port-tx";
- reg = <0xa8000 0x1000>;
-};
-
-port@88000 {
- cell-index = <0x8>;
- compatible = "fsl,fman-v2-port-rx";
- reg = <0x88000 0x1000>;
-};
-
-port@81000 {
- cell-index = <0x1>;
- compatible = "fsl,fman-v2-port-oh";
- reg = <0x81000 0x1000>;
-};
-
-=============================================================================
-FMan dTSEC/XGEC/mEMAC Node
-
-Refer to Documentation/devicetree/bindings/net/fsl,fman-dtsec.yaml
-
-============================================================================
-FMan IEEE 1588 Node
-
-Refer to Documentation/devicetree/bindings/ptp/ptp-qoriq.txt
-
-=============================================================================
-FMan MDIO Node
-
-DESCRIPTION
-
-The MDIO is a bus to which the PHY devices are connected.
-
-PROPERTIES
-
-- compatible
- Usage: required
- Value type: <stringlist>
- Definition: A standard property.
- Must include "fsl,fman-mdio" for 1 Gb/s MDIO from FMan v2.
- Must include "fsl,fman-xmdio" for 10 Gb/s MDIO from FMan v2.
- Must include "fsl,fman-memac-mdio" for 1/10 Gb/s MDIO from
- FMan v3.
-
-- reg
- Usage: required
- Value type: <prop-encoded-array>
- Definition: A standard property.
-
-- clocks
- Usage: optional
- Value type: <phandle>
- Definition: A reference to the input clock of the controller
- from which the MDC frequency is derived.
-
-- clock-frequency
- Usage: optional
- Value type: <u32>
- Definition: Specifies the external MDC frequency, in Hertz, to
- be used. Requires that the input clock is specified in the
- "clocks" property. See also: mdio.yaml.
-
-- suppress-preamble
- Usage: optional
- Value type: <boolean>
- Definition: Disable generation of preamble bits. See also:
- mdio.yaml.
-
-- interrupts
- Usage: required for external MDIO
- Value type: <prop-encoded-array>
- Definition: Event interrupt of external MDIO controller.
-
-- fsl,fman-internal-mdio
- Usage: required for internal MDIO
- Value type: boolean
- Definition: Fman has internal MDIO for internal PCS(Physical
- Coding Sublayer) PHYs and external MDIO for external PHYs.
- The settings and programming routines for internal/external
- MDIO are different. Must be included for internal MDIO.
-
-- fsl,erratum-a009885
- Usage: optional
- Value type: <boolean>
- Definition: Indicates the presence of the A009885
- erratum describing that the contents of MDIO_DATA may
- become corrupt unless it is read within 16 MDC cycles
- of MDIO_CFG[BSY] being cleared, when performing an
- MDIO read operation.
-
-- fsl,erratum-a011043
- Usage: optional
- Value type: <boolean>
- Definition: Indicates the presence of the A011043 erratum
- describing that the MDIO_CFG[MDIO_RD_ER] bit may be falsely
- set when reading internal PCS registers. MDIO reads to
- internal PCS registers may result in having the
- MDIO_CFG[MDIO_RD_ER] bit set, even when there is no error and
- read data (MDIO_DATA[MDIO_DATA]) is correct.
- Software may get false read error when reading internal
- PCS registers through MDIO. As a workaround, all internal
- MDIO accesses should ignore the MDIO_CFG[MDIO_RD_ER] bit.
-
-For internal PHY device on internal mdio bus, a PHY node should be created.
-See the definition of the PHY node in booting-without-of.txt for an
-example of how to define a PHY (Internal PHY has no interrupt line).
-- For "fsl,fman-mdio" compatible internal mdio bus, the PHY is TBI PHY.
-- For "fsl,fman-memac-mdio" compatible internal mdio bus, the PHY is PCS PHY.
- The PCS PHY address should correspond to the value of the appropriate
- MDEV_PORT.
-
-EXAMPLE
-
-Example for FMan v2 external MDIO:
-
-mdio@f1000 {
- compatible = "fsl,fman-xmdio";
- reg = <0xf1000 0x1000>;
- interrupts = <101 2 0 0>;
-};
-
-Example for FMan v2 internal MDIO:
-
-mdio@e3120 {
- compatible = "fsl,fman-mdio";
- reg = <0xe3120 0xee0>;
- fsl,fman-internal-mdio;
-
- tbi1: tbi-phy@8 {
- reg = <0x8>;
- device_type = "tbi-phy";
- };
-};
-
-Example for FMan v3 internal MDIO:
-
-mdio@f1000 {
- compatible = "fsl,fman-memac-mdio";
- reg = <0xf1000 0x1000>;
- fsl,fman-internal-mdio;
-
- pcsphy6: ethernet-phy@0 {
- reg = <0x0>;
- };
-};
-
-=============================================================================
-Example
-
-fman@400000 {
- #address-cells = <1>;
- #size-cells = <1>;
- cell-index = <1>;
- compatible = "fsl,fman"
- ranges = <0 0x400000 0x100000>;
- reg = <0x400000 0x100000>;
- clocks = <&fman_clk>;
- clock-names = "fmanclk";
- interrupts = <
- 96 2 0 0
- 16 2 1 1>;
- fsl,qman-channel-range = <0x40 0xc>;
-
- muram@0 {
- compatible = "fsl,fman-muram";
- reg = <0x0 0x28000>;
- };
-
- port@81000 {
- cell-index = <1>;
- compatible = "fsl,fman-v2-port-oh";
- reg = <0x81000 0x1000>;
- };
-
- port@82000 {
- cell-index = <2>;
- compatible = "fsl,fman-v2-port-oh";
- reg = <0x82000 0x1000>;
- };
-
- port@83000 {
- cell-index = <3>;
- compatible = "fsl,fman-v2-port-oh";
- reg = <0x83000 0x1000>;
- };
-
- port@84000 {
- cell-index = <4>;
- compatible = "fsl,fman-v2-port-oh";
- reg = <0x84000 0x1000>;
- };
-
- port@85000 {
- cell-index = <5>;
- compatible = "fsl,fman-v2-port-oh";
- reg = <0x85000 0x1000>;
- };
-
- port@86000 {
- cell-index = <6>;
- compatible = "fsl,fman-v2-port-oh";
- reg = <0x86000 0x1000>;
- };
-
- fman1_rx_0x8: port@88000 {
- cell-index = <0x8>;
- compatible = "fsl,fman-v2-port-rx";
- reg = <0x88000 0x1000>;
- };
-
- fman1_rx_0x9: port@89000 {
- cell-index = <0x9>;
- compatible = "fsl,fman-v2-port-rx";
- reg = <0x89000 0x1000>;
- };
-
- fman1_rx_0xa: port@8a000 {
- cell-index = <0xa>;
- compatible = "fsl,fman-v2-port-rx";
- reg = <0x8a000 0x1000>;
- };
-
- fman1_rx_0xb: port@8b000 {
- cell-index = <0xb>;
- compatible = "fsl,fman-v2-port-rx";
- reg = <0x8b000 0x1000>;
- };
-
- fman1_rx_0xc: port@8c000 {
- cell-index = <0xc>;
- compatible = "fsl,fman-v2-port-rx";
- reg = <0x8c000 0x1000>;
- };
-
- fman1_rx_0x10: port@90000 {
- cell-index = <0x10>;
- compatible = "fsl,fman-v2-port-rx";
- reg = <0x90000 0x1000>;
- };
-
- fman1_tx_0x28: port@a8000 {
- cell-index = <0x28>;
- compatible = "fsl,fman-v2-port-tx";
- reg = <0xa8000 0x1000>;
- };
-
- fman1_tx_0x29: port@a9000 {
- cell-index = <0x29>;
- compatible = "fsl,fman-v2-port-tx";
- reg = <0xa9000 0x1000>;
- };
-
- fman1_tx_0x2a: port@aa000 {
- cell-index = <0x2a>;
- compatible = "fsl,fman-v2-port-tx";
- reg = <0xaa000 0x1000>;
- };
-
- fman1_tx_0x2b: port@ab000 {
- cell-index = <0x2b>;
- compatible = "fsl,fman-v2-port-tx";
- reg = <0xab000 0x1000>;
- };
-
- fman1_tx_0x2c: port@ac0000 {
- cell-index = <0x2c>;
- compatible = "fsl,fman-v2-port-tx";
- reg = <0xac000 0x1000>;
- };
-
- fman1_tx_0x30: port@b0000 {
- cell-index = <0x30>;
- compatible = "fsl,fman-v2-port-tx";
- reg = <0xb0000 0x1000>;
- };
-
- ethernet@e0000 {
- compatible = "fsl,fman-dtsec";
- cell-index = <0>;
- reg = <0xe0000 0x1000>;
- fsl,fman-ports = <&fman1_rx_0x8 &fman1_tx_0x28>;
- tbi-handle = <&tbi5>;
- };
-
- ethernet@e2000 {
- compatible = "fsl,fman-dtsec";
- cell-index = <1>;
- reg = <0xe2000 0x1000>;
- fsl,fman-ports = <&fman1_rx_0x9 &fman1_tx_0x29>;
- tbi-handle = <&tbi6>;
- };
-
- ethernet@e4000 {
- compatible = "fsl,fman-dtsec";
- cell-index = <2>;
- reg = <0xe4000 0x1000>;
- fsl,fman-ports = <&fman1_rx_0xa &fman1_tx_0x2a>;
- tbi-handle = <&tbi7>;
- };
-
- ethernet@e6000 {
- compatible = "fsl,fman-dtsec";
- cell-index = <3>;
- reg = <0xe6000 0x1000>;
- fsl,fman-ports = <&fman1_rx_0xb &fman1_tx_0x2b>;
- tbi-handle = <&tbi8>;
- };
-
- ethernet@e8000 {
- compatible = "fsl,fman-dtsec";
- cell-index = <4>;
- reg = <0xf0000 0x1000>;
- fsl,fman-ports = <&fman1_rx_0xc &fman1_tx_0x2c>;
- tbi-handle = <&tbi9>;
-
- ethernet@f0000 {
- cell-index = <8>;
- compatible = "fsl,fman-xgec";
- reg = <0xf0000 0x1000>;
- fsl,fman-ports = <&fman1_rx_0x10 &fman1_tx_0x30>;
- };
-
- ptp-timer@fe000 {
- compatible = "fsl,fman-ptp-timer";
- reg = <0xfe000 0x1000>;
- };
-
- mdio@f1000 {
- compatible = "fsl,fman-xmdio";
- reg = <0xf1000 0x1000>;
- interrupts = <101 2 0 0>;
- };
-};
diff --git a/Documentation/devicetree/bindings/net/fsl-tsec-phy.txt b/Documentation/devicetree/bindings/net/fsl-tsec-phy.txt
index 047bdf7bdd2f..9c9668c1b6a2 100644
--- a/Documentation/devicetree/bindings/net/fsl-tsec-phy.txt
+++ b/Documentation/devicetree/bindings/net/fsl-tsec-phy.txt
@@ -86,4 +86,4 @@ Example:
* Gianfar PTP clock nodes
-Refer to Documentation/devicetree/bindings/ptp/ptp-qoriq.txt
+Refer to Documentation/devicetree/bindings/ptp/fsl,ptp.yaml
diff --git a/Documentation/devicetree/bindings/net/hisilicon-hip04-net.txt b/Documentation/devicetree/bindings/net/hisilicon-hip04-net.txt
index 464c0dafc617..c09eec6422ac 100644
--- a/Documentation/devicetree/bindings/net/hisilicon-hip04-net.txt
+++ b/Documentation/devicetree/bindings/net/hisilicon-hip04-net.txt
@@ -19,16 +19,6 @@ Optional properties:
[1] Documentation/devicetree/bindings/net/ethernet.txt
-* Ethernet ppe node:
-Control rx & tx fifos of all ethernet controllers.
-Have 2048 recv channels shared by all ethernet controllers, only if no overlap.
-Each controller's recv channel start from channel * number (RX_DESC_NUM).
-
-Required properties:
-- compatible: "hisilicon,hip04-ppe", "syscon".
-- reg: address and length of the register set for the device.
-
-
* MDIO bus node:
Required properties:
diff --git a/Documentation/devicetree/bindings/net/mediatek,net.yaml b/Documentation/devicetree/bindings/net/mediatek,net.yaml
index 3202dc7967c5..686b5c2fae40 100644
--- a/Documentation/devicetree/bindings/net/mediatek,net.yaml
+++ b/Documentation/devicetree/bindings/net/mediatek,net.yaml
@@ -68,6 +68,17 @@ properties:
Phandle to the syscon node that handles the path from GMAC to
PHY variants.
+ mediatek,pcie-mirror:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description:
+ Phandle to the mediatek pcie-mirror controller.
+
+ mediatek,pctl:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description:
+ Phandle to the syscon node that handles the ports slew rate and
+ driver current.
+
mediatek,sgmiisys:
$ref: /schemas/types.yaml#/definitions/phandle-array
minItems: 1
@@ -131,15 +142,12 @@ allOf:
mediatek,infracfg: false
- mediatek,pctl:
- $ref: /schemas/types.yaml#/definitions/phandle
- description:
- Phandle to the syscon node that handles the ports slew rate and
- driver current.
-
mediatek,wed: false
mediatek,wed-pcie: false
+ else:
+ properties:
+ mediatek,pctl: false
- if:
properties:
@@ -201,12 +209,10 @@ allOf:
minItems: 1
maxItems: 1
- mediatek,pcie-mirror:
- $ref: /schemas/types.yaml#/definitions/phandle
- description:
- Phandle to the mediatek pcie-mirror controller.
-
mediatek,wed-pcie: false
+ else:
+ properties:
+ mediatek,pcie-mirror: false
- if:
properties:
diff --git a/Documentation/devicetree/bindings/net/mediatek-bluetooth.txt b/Documentation/devicetree/bindings/net/mediatek-bluetooth.txt
index 9ef5bacda8c1..988c72685cbf 100644
--- a/Documentation/devicetree/bindings/net/mediatek-bluetooth.txt
+++ b/Documentation/devicetree/bindings/net/mediatek-bluetooth.txt
@@ -1,39 +1,3 @@
-MediaTek SoC built-in Bluetooth Devices
-==================================
-
-This device is a serial attached device to BTIF device and thus it must be a
-child node of the serial node with BTIF. The dt-bindings details for BTIF
-device can be known via Documentation/devicetree/bindings/serial/8250.yaml.
-
-Required properties:
-
-- compatible: Must be
- "mediatek,mt7622-bluetooth": for MT7622 SoC
-- clocks: Should be the clock specifiers corresponding to the entry in
- clock-names property.
-- clock-names: Should contain "ref" entries.
-- power-domains: Phandle to the power domain that the device is part of
-
-Example:
-
- btif: serial@1100c000 {
- compatible = "mediatek,mt7622-btif",
- "mediatek,mtk-btif";
- reg = <0 0x1100c000 0 0x1000>;
- interrupts = <GIC_SPI 90 IRQ_TYPE_LEVEL_LOW>;
- clocks = <&pericfg CLK_PERI_BTIF_PD>;
- clock-names = "main";
- reg-shift = <2>;
- reg-io-width = <4>;
-
- bluetooth {
- compatible = "mediatek,mt7622-bluetooth";
- power-domains = <&scpsys MT7622_POWER_DOMAIN_WB>;
- clocks = <&clk25m>;
- clock-names = "ref";
- };
- };
-
MediaTek UART based Bluetooth Devices
==================================
diff --git a/Documentation/devicetree/bindings/net/mscc,miim.yaml b/Documentation/devicetree/bindings/net/mscc,miim.yaml
index 5b292e7c9e46..792f26b06b06 100644
--- a/Documentation/devicetree/bindings/net/mscc,miim.yaml
+++ b/Documentation/devicetree/bindings/net/mscc,miim.yaml
@@ -38,6 +38,16 @@ properties:
clock-frequency: true
+ resets:
+ items:
+ - description:
+ Reset shared with all blocks attached to the Switch Core Register
+ Bus (CSR) including VRAP slave.
+
+ reset-names:
+ items:
+ - const: switch
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/net/pcs/snps,dw-xpcs.yaml b/Documentation/devicetree/bindings/net/pcs/snps,dw-xpcs.yaml
new file mode 100644
index 000000000000..e77eec9ac9ee
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/pcs/snps,dw-xpcs.yaml
@@ -0,0 +1,136 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/pcs/snps,dw-xpcs.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Synopsys DesignWare Ethernet PCS
+
+maintainers:
+ - Serge Semin <fancer.lancer@gmail.com>
+
+description:
+ Synopsys DesignWare Ethernet Physical Coding Sublayer provides an interface
+ between Media Access Control and Physical Medium Attachment Sublayer through
+ the Media Independent Interface (XGMII, USXGMII, XLGMII, GMII, etc)
+ controlled by means of the IEEE std. Clause 45 registers set. The PCS can be
+ optionally synthesized with a vendor-specific interface connected to
+ Synopsys PMA (also called DesignWare Consumer/Enterprise PHY) although in
+ general it can be used to communicate with any compatible PHY.
+
+ The PCS CSRs can be accessible either over the Ethernet MDIO bus or directly
+ by means of the APB3/MCI interfaces. In the later case the XPCS can be mapped
+ right to the system IO memory space.
+
+properties:
+ compatible:
+ oneOf:
+ - description: Synopsys DesignWare XPCS with none or unknown PMA
+ const: snps,dw-xpcs
+ - description: Synopsys DesignWare XPCS with Consumer Gen1 3G PMA
+ const: snps,dw-xpcs-gen1-3g
+ - description: Synopsys DesignWare XPCS with Consumer Gen2 3G PMA
+ const: snps,dw-xpcs-gen2-3g
+ - description: Synopsys DesignWare XPCS with Consumer Gen2 6G PMA
+ const: snps,dw-xpcs-gen2-6g
+ - description: Synopsys DesignWare XPCS with Consumer Gen4 3G PMA
+ const: snps,dw-xpcs-gen4-3g
+ - description: Synopsys DesignWare XPCS with Consumer Gen4 6G PMA
+ const: snps,dw-xpcs-gen4-6g
+ - description: Synopsys DesignWare XPCS with Consumer Gen5 10G PMA
+ const: snps,dw-xpcs-gen5-10g
+ - description: Synopsys DesignWare XPCS with Consumer Gen5 12G PMA
+ const: snps,dw-xpcs-gen5-12g
+
+ reg:
+ items:
+ - description:
+ In case of the MDIO management interface this just a 5-bits ID
+ of the MDIO bus device. If DW XPCS CSRs space is accessed over the
+ MCI or APB3 management interfaces, then the space mapping can be
+ either 'direct' or 'indirect'. In the former case all Clause 45
+ registers are contiguously mapped within the address space
+ MMD '[20:16]', Reg '[15:0]'. In the later case the space is divided
+ to the multiple 256 register sets. There is a special viewport CSR
+ which is responsible for the set selection. The upper part of
+ the CSR address MMD+REG[20:8] is supposed to be written in there
+ so the corresponding subset would be mapped to the lowest 255 CSRs.
+
+ reg-names:
+ items:
+ - enum: [ direct, indirect ]
+
+ reg-io-width:
+ description:
+ The way the CSRs are mapped to the memory is platform depended. Since
+ each Clause 45 CSR is of 16-bits wide the access instructions must be
+ two bytes aligned at least.
+ default: 2
+ enum: [ 2, 4 ]
+
+ interrupts:
+ description:
+ System interface interrupt output (sbd_intr_o) indicating Clause 73/37
+ auto-negotiation events':' Page received, AN is completed or incompatible
+ link partner.
+ maxItems: 1
+
+ clocks:
+ description:
+ The MCI and APB3 interfaces are supposed to be equipped with a clock
+ source connected to the clk_csr_i line.
+
+ PCS/PMA layer can be clocked by an internal reference clock source
+ (phyN_core_refclk) or by an externally connected (phyN_pad_refclk) clock
+ generator. Both clocks can be supplied at a time.
+ minItems: 1
+ maxItems: 3
+
+ clock-names:
+ oneOf:
+ - minItems: 1
+ items: # MDIO
+ - enum: [core, pad]
+ - const: pad
+ - minItems: 1
+ items: # MCI or APB
+ - const: csr
+ - enum: [core, pad]
+ - const: pad
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ ethernet-pcs@1f05d000 {
+ compatible = "snps,dw-xpcs";
+ reg = <0x1f05d000 0x1000>;
+ reg-names = "indirect";
+
+ reg-io-width = <4>;
+
+ interrupts = <79 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&ccu_pclk>, <&ccu_core>, <&ccu_pad>;
+ clock-names = "csr", "core", "pad";
+ };
+ - |
+ mdio-bus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethernet-pcs@0 {
+ compatible = "snps,dw-xpcs";
+ reg = <0>;
+
+ clocks = <&ccu_core>, <&ccu_pad>;
+ clock-names = "core", "pad";
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/net/pse-pd/microchip,pd692x0.yaml b/Documentation/devicetree/bindings/net/pse-pd/microchip,pd692x0.yaml
index 828439398fdf..fd4244fceced 100644
--- a/Documentation/devicetree/bindings/net/pse-pd/microchip,pd692x0.yaml
+++ b/Documentation/devicetree/bindings/net/pse-pd/microchip,pd692x0.yaml
@@ -24,6 +24,7 @@ properties:
managers:
type: object
+ additionalProperties: false
description:
List of the PD69208T4/PD69204T4/PD69208M PSE managers. Each manager
have 4 or 8 physical ports according to the chip version. No need to
@@ -47,8 +48,9 @@ properties:
- "#size-cells"
patternProperties:
- "^manager@0[0-9a-b]$":
+ "^manager@[0-9a-b]$":
type: object
+ additionalProperties: false
description:
PD69208T4/PD69204T4/PD69208M PSE manager exposing 4 or 8 physical
ports.
@@ -69,9 +71,14 @@ properties:
patternProperties:
'^port@[0-7]$':
type: object
+ additionalProperties: false
+
+ properties:
+ reg:
+ maxItems: 1
+
required:
- reg
- additionalProperties: false
required:
- reg
diff --git a/Documentation/devicetree/bindings/net/pse-pd/ti,tps23881.yaml b/Documentation/devicetree/bindings/net/pse-pd/ti,tps23881.yaml
index 4147adb11e10..6992d56832bf 100644
--- a/Documentation/devicetree/bindings/net/pse-pd/ti,tps23881.yaml
+++ b/Documentation/devicetree/bindings/net/pse-pd/ti,tps23881.yaml
@@ -29,13 +29,31 @@ properties:
of the ports conversion matrix that establishes relationship between
the logical ports and the physical channels.
type: object
+ additionalProperties: false
+
+ properties:
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 0
patternProperties:
'^channel@[0-7]$':
type: object
+ additionalProperties: false
+
+ properties:
+ reg:
+ maxItems: 1
+
required:
- reg
+ required:
+ - "#address-cells"
+ - "#size-cells"
+
unevaluatedProperties: false
required:
diff --git a/Documentation/devicetree/bindings/net/realtek,rtl82xx.yaml b/Documentation/devicetree/bindings/net/realtek,rtl82xx.yaml
index bb94a2388520..d248a08a2136 100644
--- a/Documentation/devicetree/bindings/net/realtek,rtl82xx.yaml
+++ b/Documentation/devicetree/bindings/net/realtek,rtl82xx.yaml
@@ -14,10 +14,32 @@ maintainers:
description:
Bindings for Realtek RTL82xx PHYs
-allOf:
- - $ref: ethernet-phy.yaml#
-
properties:
+ compatible:
+ enum:
+ - ethernet-phy-id001c.c800
+ - ethernet-phy-id001c.c816
+ - ethernet-phy-id001c.c838
+ - ethernet-phy-id001c.c840
+ - ethernet-phy-id001c.c848
+ - ethernet-phy-id001c.c849
+ - ethernet-phy-id001c.c84a
+ - ethernet-phy-id001c.c862
+ - ethernet-phy-id001c.c878
+ - ethernet-phy-id001c.c880
+ - ethernet-phy-id001c.c910
+ - ethernet-phy-id001c.c912
+ - ethernet-phy-id001c.c913
+ - ethernet-phy-id001c.c914
+ - ethernet-phy-id001c.c915
+ - ethernet-phy-id001c.c916
+ - ethernet-phy-id001c.c942
+ - ethernet-phy-id001c.c961
+ - ethernet-phy-id001c.cad0
+ - ethernet-phy-id001c.cb00
+
+ leds: true
+
realtek,clkout-disable:
type: boolean
description:
@@ -31,6 +53,18 @@ properties:
unevaluatedProperties: false
+allOf:
+ - $ref: ethernet-phy.yaml#
+ - if:
+ not:
+ properties:
+ compatible:
+ contains:
+ const: ethernet-phy-id001c.c916
+ then:
+ properties:
+ leds: false
+
examples:
- |
mdio {
diff --git a/Documentation/devicetree/bindings/net/snps,dwmac.yaml b/Documentation/devicetree/bindings/net/snps,dwmac.yaml
index 21cc27e75f50..3eb65e63fdae 100644
--- a/Documentation/devicetree/bindings/net/snps,dwmac.yaml
+++ b/Documentation/devicetree/bindings/net/snps,dwmac.yaml
@@ -76,6 +76,7 @@ properties:
- rockchip,rk3128-gmac
- rockchip,rk3228-gmac
- rockchip,rk3288-gmac
+ - rockchip,rk3308-gmac
- rockchip,rk3328-gmac
- rockchip,rk3366-gmac
- rockchip,rk3368-gmac
@@ -435,6 +436,32 @@ properties:
description:
Use Address-Aligned Beats
+ snps,pbl:
+ description:
+ Programmable Burst Length (tx and rx)
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [1, 2, 4, 8, 16, 32]
+
+ snps,txpbl:
+ description:
+ Tx Programmable Burst Length. If set, DMA tx will use this
+ value rather than snps,pbl.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [1, 2, 4, 8, 16, 32]
+
+ snps,rxpbl:
+ description:
+ Rx Programmable Burst Length. If set, DMA rx will use this
+ value rather than snps,pbl.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [1, 2, 4, 8, 16, 32]
+
+ snps,no-pbl-x8:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ Don\'t multiply the pbl/txpbl/rxpbl values by 8. For core
+ rev < 3.50, don\'t multiply the values by 4.
+
snps,fixed-burst:
$ref: /schemas/types.yaml#/definitions/flag
description:
@@ -485,6 +512,12 @@ properties:
description:
Frequency division factor for MDC clock.
+ snps,tso:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ Enables the TSO feature otherwise it will be managed by MAC HW capability
+ register.
+
mdio:
$ref: mdio.yaml#
unevaluatedProperties: false
@@ -568,95 +601,38 @@ allOf:
- if:
properties:
compatible:
- contains:
- enum:
- - allwinner,sun7i-a20-gmac
- - allwinner,sun8i-a83t-emac
- - allwinner,sun8i-h3-emac
- - allwinner,sun8i-r40-gmac
- - allwinner,sun8i-v3s-emac
- - allwinner,sun50i-a64-emac
- - ingenic,jz4775-mac
- - ingenic,x1000-mac
- - ingenic,x1600-mac
- - ingenic,x1830-mac
- - ingenic,x2000-mac
- - qcom,sa8775p-ethqos
- - qcom,sc8280xp-ethqos
- - snps,dwmac-3.50a
- - snps,dwmac-4.10a
- - snps,dwmac-4.20a
- - snps,dwmac-5.20
- - snps,dwxgmac
- - snps,dwxgmac-2.10
- - st,spear600-gmac
-
- then:
- properties:
- snps,pbl:
- description:
- Programmable Burst Length (tx and rx)
- $ref: /schemas/types.yaml#/definitions/uint32
- enum: [1, 2, 4, 8, 16, 32]
-
- snps,txpbl:
- description:
- Tx Programmable Burst Length. If set, DMA tx will use this
- value rather than snps,pbl.
- $ref: /schemas/types.yaml#/definitions/uint32
- enum: [1, 2, 4, 8, 16, 32]
-
- snps,rxpbl:
- description:
- Rx Programmable Burst Length. If set, DMA rx will use this
- value rather than snps,pbl.
- $ref: /schemas/types.yaml#/definitions/uint32
- enum: [1, 2, 4, 8, 16, 32]
-
- snps,no-pbl-x8:
- $ref: /schemas/types.yaml#/definitions/flag
- description:
- Don\'t multiply the pbl/txpbl/rxpbl values by 8. For core
- rev < 3.50, don\'t multiply the values by 4.
-
- - if:
- properties:
- compatible:
- contains:
- enum:
- - allwinner,sun7i-a20-gmac
- - allwinner,sun8i-a83t-emac
- - allwinner,sun8i-h3-emac
- - allwinner,sun8i-r40-gmac
- - allwinner,sun8i-v3s-emac
- - allwinner,sun50i-a64-emac
- - loongson,ls2k-dwmac
- - loongson,ls7a-dwmac
- - ingenic,jz4775-mac
- - ingenic,x1000-mac
- - ingenic,x1600-mac
- - ingenic,x1830-mac
- - ingenic,x2000-mac
- - qcom,qcs404-ethqos
- - qcom,sa8775p-ethqos
- - qcom,sc8280xp-ethqos
- - qcom,sm8150-ethqos
- - snps,dwmac-4.00
- - snps,dwmac-4.10a
- - snps,dwmac-4.20a
- - snps,dwmac-5.10a
- - snps,dwmac-5.20
- - snps,dwxgmac
- - snps,dwxgmac-2.10
- - st,spear600-gmac
+ not:
+ contains:
+ enum:
+ - allwinner,sun7i-a20-gmac
+ - allwinner,sun8i-a83t-emac
+ - allwinner,sun8i-h3-emac
+ - allwinner,sun8i-r40-gmac
+ - allwinner,sun8i-v3s-emac
+ - allwinner,sun50i-a64-emac
+ - loongson,ls2k-dwmac
+ - loongson,ls7a-dwmac
+ - ingenic,jz4775-mac
+ - ingenic,x1000-mac
+ - ingenic,x1600-mac
+ - ingenic,x1830-mac
+ - ingenic,x2000-mac
+ - qcom,qcs404-ethqos
+ - qcom,sa8775p-ethqos
+ - qcom,sc8280xp-ethqos
+ - qcom,sm8150-ethqos
+ - snps,dwmac-4.00
+ - snps,dwmac-4.10a
+ - snps,dwmac-4.20a
+ - snps,dwmac-5.10a
+ - snps,dwmac-5.20
+ - snps,dwxgmac
+ - snps,dwxgmac-2.10
+ - st,spear600-gmac
then:
properties:
- snps,tso:
- $ref: /schemas/types.yaml#/definitions/flag
- description:
- Enables the TSO feature otherwise it will be managed by
- MAC HW capability register.
+ snps,tso: false
additionalProperties: true
diff --git a/Documentation/devicetree/bindings/net/stm32-dwmac.yaml b/Documentation/devicetree/bindings/net/stm32-dwmac.yaml
index 7ccf75676b6d..bf23838fe6e8 100644
--- a/Documentation/devicetree/bindings/net/stm32-dwmac.yaml
+++ b/Documentation/devicetree/bindings/net/stm32-dwmac.yaml
@@ -22,18 +22,22 @@ select:
enum:
- st,stm32-dwmac
- st,stm32mp1-dwmac
+ - st,stm32mp13-dwmac
+ - st,stm32mp25-dwmac
required:
- compatible
-allOf:
- - $ref: snps,dwmac.yaml#
-
properties:
compatible:
oneOf:
- items:
- enum:
+ - st,stm32mp25-dwmac
+ - const: snps,dwmac-5.20
+ - items:
+ - enum:
- st,stm32mp1-dwmac
+ - st,stm32mp13-dwmac
- const: snps,dwmac-4.20a
- items:
- enum:
@@ -75,12 +79,15 @@ properties:
st,syscon:
$ref: /schemas/types.yaml#/definitions/phandle-array
items:
- - items:
+ - minItems: 2
+ items:
- description: phandle to the syscon node which encompases the glue register
- description: offset of the control register
+ - description: field to set mask in register
description:
Should be phandle/offset pair. The phandle to the syscon node which
- encompases the glue register, and the offset of the control register
+ encompases the glue register, the offset of the control register and
+ the mask to set bitfield in control register
st,ext-phyclk:
description:
@@ -112,12 +119,40 @@ required:
unevaluatedProperties: false
+allOf:
+ - $ref: snps,dwmac.yaml#
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - st,stm32-dwmac
+ - st,stm32mp1-dwmac
+ - st,stm32mp25-dwmac
+ then:
+ properties:
+ st,syscon:
+ items:
+ minItems: 2
+ maxItems: 2
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - st,stm32mp13-dwmac
+ then:
+ properties:
+ st,syscon:
+ items:
+ minItems: 3
+ maxItems: 3
+
examples:
- |
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/clock/stm32mp1-clks.h>
- #include <dt-bindings/reset/stm32mp1-resets.h>
- #include <dt-bindings/mfd/stm32h7-rcc.h>
//Example 1
ethernet0: ethernet@5800a000 {
compatible = "st,stm32mp1-dwmac", "snps,dwmac-4.20a";
diff --git a/Documentation/devicetree/bindings/net/ti,icss-iep.yaml b/Documentation/devicetree/bindings/net/ti,icss-iep.yaml
index f5c22d6dcaee..e36e3a622904 100644
--- a/Documentation/devicetree/bindings/net/ti,icss-iep.yaml
+++ b/Documentation/devicetree/bindings/net/ti,icss-iep.yaml
@@ -28,6 +28,15 @@ properties:
maxItems: 1
description: phandle to the IEP source clock
+ interrupts:
+ maxItems: 1
+ description:
+ Interrupt specifier for capture/compare IRQ.
+
+ interrupt-names:
+ items:
+ - const: iep_cap_cmp
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/net/ti,icssg-prueth.yaml b/Documentation/devicetree/bindings/net/ti,icssg-prueth.yaml
index e253fa786092..c296e5711848 100644
--- a/Documentation/devicetree/bindings/net/ti,icssg-prueth.yaml
+++ b/Documentation/devicetree/bindings/net/ti,icssg-prueth.yaml
@@ -55,6 +55,14 @@ properties:
description:
phandle to MII_RT module's syscon regmap
+ ti,pa-stats:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description:
+ phandle to PA_STATS module's syscon regmap. PA_STATS is a set of
+ registers where different statistics related to ICSSG, are dumped by
+ ICSSG firmware. PA_STATS module's syscon regmap will help the device to
+ access/read/write those statistics.
+
ti,iep:
$ref: /schemas/types.yaml#/definitions/phandle-array
maxItems: 2
@@ -194,6 +202,7 @@ examples:
"tx1-0", "tx1-1", "tx1-2", "tx1-3",
"rx0", "rx1";
ti,mii-g-rt = <&icssg2_mii_g_rt>;
+ ti,pa-stats = <&icssg2_pa_stats>;
ti,iep = <&icssg2_iep0>, <&icssg2_iep1>;
interrupt-parent = <&icssg2_intc>;
interrupts = <24 0 2>, <25 1 3>;
diff --git a/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.yaml b/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.yaml
index 5c4498b762c8..070c4c9b8643 100644
--- a/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.yaml
+++ b/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.yaml
@@ -128,6 +128,11 @@ properties:
Whether to skip executing an SCM call that reassigns the memory
region ownership.
+ qcom,no-msa-ready-indicator:
+ type: boolean
+ description:
+ Don't wait for MSA_READY indicator to complete init.
+
qcom,smem-states:
$ref: /schemas/types.yaml#/definitions/phandle-array
description: State bits used by the AP to signal the WLAN Q6.
diff --git a/Documentation/devicetree/bindings/net/wireless/qcom,ath11k-pci.yaml b/Documentation/devicetree/bindings/net/wireless/qcom,ath11k-pci.yaml
index 41d023797d7d..8675d7d0215c 100644
--- a/Documentation/devicetree/bindings/net/wireless/qcom,ath11k-pci.yaml
+++ b/Documentation/devicetree/bindings/net/wireless/qcom,ath11k-pci.yaml
@@ -17,6 +17,7 @@ description: |
properties:
compatible:
enum:
+ - pci17cb,1101 # QCA6390
- pci17cb,1103 # WCN6855
reg:
@@ -28,10 +29,55 @@ properties:
string to uniquely identify variant of the calibration data for designs
with colliding bus and device ids
+ vddrfacmn-supply:
+ description: VDD_RFA_CMN supply regulator handle
+
+ vddaon-supply:
+ description: VDD_AON supply regulator handle
+
+ vddwlcx-supply:
+ description: VDD_WL_CX supply regulator handle
+
+ vddwlmx-supply:
+ description: VDD_WL_MX supply regulator handle
+
+ vddrfa0p8-supply:
+ description: VDD_RFA_0P8 supply regulator handle
+
+ vddrfa1p2-supply:
+ description: VDD_RFA_1P2 supply regulator handle
+
+ vddrfa1p7-supply:
+ description: VDD_RFA_1P7 supply regulator handle
+
+ vddpcie0p9-supply:
+ description: VDD_PCIE_0P9 supply regulator handle
+
+ vddpcie1p8-supply:
+ description: VDD_PCIE_1P8 supply regulator handle
+
required:
- compatible
- reg
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: pci17cb,1101
+ then:
+ required:
+ - vddrfacmn-supply
+ - vddaon-supply
+ - vddwlcx-supply
+ - vddwlmx-supply
+ - vddrfa0p8-supply
+ - vddrfa1p2-supply
+ - vddrfa1p7-supply
+ - vddpcie0p9-supply
+ - vddpcie1p8-supply
+
additionalProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/net/wireless/qcom,ath11k.yaml b/Documentation/devicetree/bindings/net/wireless/qcom,ath11k.yaml
index a2d55bf4c7a5..ff5763dc66a8 100644
--- a/Documentation/devicetree/bindings/net/wireless/qcom,ath11k.yaml
+++ b/Documentation/devicetree/bindings/net/wireless/qcom,ath11k.yaml
@@ -265,15 +265,6 @@ allOf:
examples:
- |
-
- q6v5_wcss: remoteproc@cd00000 {
- compatible = "qcom,ipq8074-wcss-pil";
- reg = <0xcd00000 0x4040>,
- <0x4ab000 0x20>;
- reg-names = "qdsp6",
- "rmb";
- };
-
wifi0: wifi@c000000 {
compatible = "qcom,ipq8074-wifi";
reg = <0xc000000 0x2000000>;
diff --git a/Documentation/devicetree/bindings/net/wireless/qcom,ath12k.yaml b/Documentation/devicetree/bindings/net/wireless/qcom,ath12k.yaml
new file mode 100644
index 000000000000..1b5884015b15
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/wireless/qcom,ath12k.yaml
@@ -0,0 +1,99 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+# Copyright (c) 2024 Linaro Limited
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/wireless/qcom,ath12k.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Technologies ath12k wireless devices (PCIe)
+
+maintainers:
+ - Jeff Johnson <quic_jjohnson@quicinc.com>
+ - Kalle Valo <kvalo@kernel.org>
+
+description:
+ Qualcomm Technologies IEEE 802.11be PCIe devices.
+
+properties:
+ compatible:
+ enum:
+ - pci17cb,1107 # WCN7850
+
+ reg:
+ maxItems: 1
+
+ vddaon-supply:
+ description: VDD_AON supply regulator handle
+
+ vddwlcx-supply:
+ description: VDD_WLCX supply regulator handle
+
+ vddwlmx-supply:
+ description: VDD_WLMX supply regulator handle
+
+ vddrfacmn-supply:
+ description: VDD_RFA_CMN supply regulator handle
+
+ vddrfa0p8-supply:
+ description: VDD_RFA_0P8 supply regulator handle
+
+ vddrfa1p2-supply:
+ description: VDD_RFA_1P2 supply regulator handle
+
+ vddrfa1p8-supply:
+ description: VDD_RFA_1P8 supply regulator handle
+
+ vddpcie0p9-supply:
+ description: VDD_PCIE_0P9 supply regulator handle
+
+ vddpcie1p8-supply:
+ description: VDD_PCIE_1P8 supply regulator handle
+
+required:
+ - compatible
+ - reg
+ - vddaon-supply
+ - vddwlcx-supply
+ - vddwlmx-supply
+ - vddrfacmn-supply
+ - vddrfa0p8-supply
+ - vddrfa1p2-supply
+ - vddrfa1p8-supply
+ - vddpcie0p9-supply
+ - vddpcie1p8-supply
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/qcom,rpmh.h>
+ #include <dt-bindings/gpio/gpio.h>
+ pcie {
+ #address-cells = <3>;
+ #size-cells = <2>;
+
+ pcie@0 {
+ device_type = "pci";
+ reg = <0x0 0x0 0x0 0x0 0x0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges;
+
+ bus-range = <0x01 0xff>;
+
+ wifi@0 {
+ compatible = "pci17cb,1107";
+ reg = <0x10000 0x0 0x0 0x0 0x0>;
+
+ vddaon-supply = <&vreg_pmu_aon_0p59>;
+ vddwlcx-supply = <&vreg_pmu_wlcx_0p8>;
+ vddwlmx-supply = <&vreg_pmu_wlmx_0p85>;
+ vddrfacmn-supply = <&vreg_pmu_rfa_cmn>;
+ vddrfa0p8-supply = <&vreg_pmu_rfa_0p8>;
+ vddrfa1p2-supply = <&vreg_pmu_rfa_1p2>;
+ vddrfa1p8-supply = <&vreg_pmu_rfa_1p8>;
+ vddpcie0p9-supply = <&vreg_pmu_pcie_0p9>;
+ vddpcie1p8-supply = <&vreg_pmu_pcie_1p8>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/xlnx,gmii-to-rgmii.yaml b/Documentation/devicetree/bindings/net/xlnx,gmii-to-rgmii.yaml
index 0f781dac6717..eb803ddd13e0 100644
--- a/Documentation/devicetree/bindings/net/xlnx,gmii-to-rgmii.yaml
+++ b/Documentation/devicetree/bindings/net/xlnx,gmii-to-rgmii.yaml
@@ -31,6 +31,10 @@ properties:
phy-handle:
$ref: ethernet-controller.yaml#/properties/phy-handle
+ clocks:
+ items:
+ - description: 200/375 MHz free-running clock is used as input clock.
+
required:
- compatible
- reg
@@ -51,5 +55,6 @@ examples:
compatible = "xlnx,gmii-to-rgmii-1.0";
reg = <8>;
phy-handle = <&phy>;
+ clocks = <&dummy>;
};
};
diff --git a/Documentation/devicetree/bindings/perf/fsl-imx-ddr.yaml b/Documentation/devicetree/bindings/perf/fsl-imx-ddr.yaml
index 6c96a4204e5d..37e8b98f2cdc 100644
--- a/Documentation/devicetree/bindings/perf/fsl-imx-ddr.yaml
+++ b/Documentation/devicetree/bindings/perf/fsl-imx-ddr.yaml
@@ -30,6 +30,9 @@ properties:
- items:
- const: fsl,imx8dxl-ddr-pmu
- const: fsl,imx8-ddr-pmu
+ - items:
+ - const: fsl,imx95-ddr-pmu
+ - const: fsl,imx93-ddr-pmu
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.yaml b/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.yaml
index 50846a2d09c8..0bf2d9f093b5 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.yaml
@@ -29,7 +29,6 @@ properties:
- qcom,pm7325-gpio
- qcom,pm7550ba-gpio
- qcom,pm8005-gpio
- - qcom,pm8008-gpio
- qcom,pm8018-gpio
- qcom,pm8019-gpio
- qcom,pm8038-gpio
@@ -126,7 +125,6 @@ allOf:
compatible:
contains:
enum:
- - qcom,pm8008-gpio
- qcom,pmi8950-gpio
- qcom,pmr735d-gpio
then:
@@ -448,7 +446,6 @@ $defs:
- gpio1-gpio10 for pm7325
- gpio1-gpio8 for pm7550ba
- gpio1-gpio4 for pm8005
- - gpio1-gpio2 for pm8008
- gpio1-gpio6 for pm8018
- gpio1-gpio12 for pm8038
- gpio1-gpio40 for pm8058
diff --git a/Documentation/devicetree/bindings/platform/lenovo,yoga-c630-ec.yaml b/Documentation/devicetree/bindings/platform/lenovo,yoga-c630-ec.yaml
new file mode 100644
index 000000000000..3180ce1a22d4
--- /dev/null
+++ b/Documentation/devicetree/bindings/platform/lenovo,yoga-c630-ec.yaml
@@ -0,0 +1,83 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/platform/lenovo,yoga-c630-ec.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Lenovo Yoga C630 Embedded Controller.
+
+maintainers:
+ - Bjorn Andersson <andersson@kernel.org>
+
+description:
+ The Qualcomm Snapdragon-based Lenovo Yoga C630 has an Embedded Controller
+ (EC) which handles things such as battery and USB Type-C. This binding
+ describes the interface, on an I2C bus, to this EC.
+
+properties:
+ compatible:
+ const: lenovo,yoga-c630-ec
+
+ reg:
+ const: 0x70
+
+ '#address-cells':
+ const: 1
+
+ '#size-cells':
+ const: 0
+
+ interrupts:
+ maxItems: 1
+
+patternProperties:
+ '^connector@[01]$':
+ $ref: /schemas/connector/usb-connector.yaml#
+
+ properties:
+ reg:
+ maxItems: 1
+
+ unevaluatedProperties: false
+
+required:
+ - compatible
+ - reg
+ - interrupts
+
+additionalProperties: false
+
+examples:
+ - |+
+ #include <dt-bindings/interrupt-controller/irq.h>
+ i2c1 {
+ clock-frequency = <400000>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ embedded-controller@70 {
+ compatible = "lenovo,yoga-c630-ec";
+ reg = <0x70>;
+
+ interrupts-extended = <&tlmm 20 IRQ_TYPE_LEVEL_HIGH>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ connector@0 {
+ compatible = "usb-c-connector";
+ reg = <0>;
+ power-role = "source";
+ data-role = "host";
+ };
+
+ connector@1 {
+ compatible = "usb-c-connector";
+ reg = <1>;
+ power-role = "source";
+ data-role = "host";
+ };
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/power/amlogic,meson-sec-pwrc.yaml b/Documentation/devicetree/bindings/power/amlogic,meson-sec-pwrc.yaml
index dab3d92bc273..15d74138baa3 100644
--- a/Documentation/devicetree/bindings/power/amlogic,meson-sec-pwrc.yaml
+++ b/Documentation/devicetree/bindings/power/amlogic,meson-sec-pwrc.yaml
@@ -20,6 +20,8 @@ properties:
enum:
- amlogic,meson-a1-pwrc
- amlogic,meson-s4-pwrc
+ - amlogic,a4-pwrc
+ - amlogic,a5-pwrc
- amlogic,c3-pwrc
- amlogic,t7-pwrc
diff --git a/Documentation/devicetree/bindings/ptp/fsl,ptp.yaml b/Documentation/devicetree/bindings/ptp/fsl,ptp.yaml
new file mode 100644
index 000000000000..3bb8615e3e91
--- /dev/null
+++ b/Documentation/devicetree/bindings/ptp/fsl,ptp.yaml
@@ -0,0 +1,144 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/ptp/fsl,ptp.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale QorIQ 1588 timer based PTP clock
+
+maintainers:
+ - Frank Li <Frank.Li@nxp.com>
+
+properties:
+ compatible:
+ enum:
+ - fsl,etsec-ptp
+ - fsl,fman-ptp-timer
+ - fsl,dpaa2-ptp
+ - fsl,enetc-ptp
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ fsl,cksel:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: |
+ Timer reference clock source.
+
+ Reference clock source is determined by the value, which is holded
+ in CKSEL bits in TMR_CTRL register. "fsl,cksel" property keeps the
+ value, which will be directly written in those bits, that is why,
+ according to reference manual, the next clock sources can be used:
+
+ For eTSEC,
+ <0> - external high precision timer reference clock (TSEC_TMR_CLK
+ input is used for this purpose);
+ <1> - eTSEC system clock;
+ <2> - eTSEC1 transmit clock;
+ <3> - RTC clock input.
+
+ For DPAA FMan,
+ <0> - external high precision timer reference clock (TMR_1588_CLK)
+ <1> - MAC system clock (1/2 FMan clock)
+ <2> - reserved
+ <3> - RTC clock oscillator
+
+ fsl,tclk-period:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: Timer reference clock period in nanoseconds.
+
+ fsl,tmr-prsc:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: Prescaler, divides the output clock.
+
+ fsl,tmr-add:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: Frequency compensation value.
+
+ fsl,tmr-fiper1:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: Fixed interval period pulse generator.
+
+ fsl,tmr-fiper2:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: Fixed interval period pulse generator.
+
+ fsl,tmr-fiper3:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ Fixed interval period pulse generator.
+ Supported only on DPAA2 and ENETC hardware.
+
+ fsl,max-adj:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: |
+ Maximum frequency adjustment in parts per billion.
+
+ These properties set the operational parameters for the PTP
+ clock. You must choose these carefully for the clock to work right.
+ Here is how to figure good values:
+
+ TimerOsc = selected reference clock MHz
+ tclk_period = desired clock period nanoseconds
+ NominalFreq = 1000 / tclk_period MHz
+ FreqDivRatio = TimerOsc / NominalFreq (must be greater that 1.0)
+ tmr_add = ceil(2^32 / FreqDivRatio)
+ OutputClock = NominalFreq / tmr_prsc MHz
+ PulseWidth = 1 / OutputClock microseconds
+ FiperFreq1 = desired frequency in Hz
+ FiperDiv1 = 1000000 * OutputClock / FiperFreq1
+ tmr_fiper1 = tmr_prsc * tclk_period * FiperDiv1 - tclk_period
+ max_adj = 1000000000 * (FreqDivRatio - 1.0) - 1
+
+ The calculation for tmr_fiper2 is the same as for tmr_fiper1. The
+ driver expects that tmr_fiper1 will be correctly set to produce a 1
+ Pulse Per Second (PPS) signal, since this will be offered to the PPS
+ subsystem to synchronize the Linux clock.
+
+ When this attribute is not used, the IEEE 1588 timer reference clock
+ will use the eTSEC system clock (for Gianfar) or the MAC system
+ clock (for DPAA).
+
+ fsl,extts-fifo:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ The presence of this property indicates hardware
+ support for the external trigger stamp FIFO
+
+ little-endian:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ The presence of this property indicates the 1588 timer
+ support for the external trigger stamp FIFO.
+ IP block is little-endian mode. The default endian mode
+ is big-endian.
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ phc@24e00 {
+ compatible = "fsl,etsec-ptp";
+ reg = <0x24e00 0xb0>;
+ interrupts = <12 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-parent = <&ipic>;
+ fsl,cksel = <1>;
+ fsl,tclk-period = <10>;
+ fsl,tmr-prsc = <100>;
+ fsl,tmr-add = <0x999999a4>;
+ fsl,tmr-fiper1 = <0x3b9ac9f6>;
+ fsl,tmr-fiper2 = <0x00018696>;
+ fsl,max-adj = <659999998>;
+ };
diff --git a/Documentation/devicetree/bindings/ptp/ptp-qoriq.txt b/Documentation/devicetree/bindings/ptp/ptp-qoriq.txt
deleted file mode 100644
index 743eda754e65..000000000000
--- a/Documentation/devicetree/bindings/ptp/ptp-qoriq.txt
+++ /dev/null
@@ -1,87 +0,0 @@
-* Freescale QorIQ 1588 timer based PTP clock
-
-General Properties:
-
- - compatible Should be "fsl,etsec-ptp" for eTSEC
- Should be "fsl,fman-ptp-timer" for DPAA FMan
- Should be "fsl,dpaa2-ptp" for DPAA2
- Should be "fsl,enetc-ptp" for ENETC
- - reg Offset and length of the register set for the device
- - interrupts There should be at least two interrupts. Some devices
- have as many as four PTP related interrupts.
-
-Clock Properties:
-
- - fsl,cksel Timer reference clock source.
- - fsl,tclk-period Timer reference clock period in nanoseconds.
- - fsl,tmr-prsc Prescaler, divides the output clock.
- - fsl,tmr-add Frequency compensation value.
- - fsl,tmr-fiper1 Fixed interval period pulse generator.
- - fsl,tmr-fiper2 Fixed interval period pulse generator.
- - fsl,tmr-fiper3 Fixed interval period pulse generator.
- Supported only on DPAA2 and ENETC hardware.
- - fsl,max-adj Maximum frequency adjustment in parts per billion.
- - fsl,extts-fifo The presence of this property indicates hardware
- support for the external trigger stamp FIFO.
- - little-endian The presence of this property indicates the 1588 timer
- IP block is little-endian mode. The default endian mode
- is big-endian.
-
- These properties set the operational parameters for the PTP
- clock. You must choose these carefully for the clock to work right.
- Here is how to figure good values:
-
- TimerOsc = selected reference clock MHz
- tclk_period = desired clock period nanoseconds
- NominalFreq = 1000 / tclk_period MHz
- FreqDivRatio = TimerOsc / NominalFreq (must be greater that 1.0)
- tmr_add = ceil(2^32 / FreqDivRatio)
- OutputClock = NominalFreq / tmr_prsc MHz
- PulseWidth = 1 / OutputClock microseconds
- FiperFreq1 = desired frequency in Hz
- FiperDiv1 = 1000000 * OutputClock / FiperFreq1
- tmr_fiper1 = tmr_prsc * tclk_period * FiperDiv1 - tclk_period
- max_adj = 1000000000 * (FreqDivRatio - 1.0) - 1
-
- The calculation for tmr_fiper2 is the same as for tmr_fiper1. The
- driver expects that tmr_fiper1 will be correctly set to produce a 1
- Pulse Per Second (PPS) signal, since this will be offered to the PPS
- subsystem to synchronize the Linux clock.
-
- Reference clock source is determined by the value, which is holded
- in CKSEL bits in TMR_CTRL register. "fsl,cksel" property keeps the
- value, which will be directly written in those bits, that is why,
- according to reference manual, the next clock sources can be used:
-
- For eTSEC,
- <0> - external high precision timer reference clock (TSEC_TMR_CLK
- input is used for this purpose);
- <1> - eTSEC system clock;
- <2> - eTSEC1 transmit clock;
- <3> - RTC clock input.
-
- For DPAA FMan,
- <0> - external high precision timer reference clock (TMR_1588_CLK)
- <1> - MAC system clock (1/2 FMan clock)
- <2> - reserved
- <3> - RTC clock oscillator
-
- When this attribute is not used, the IEEE 1588 timer reference clock
- will use the eTSEC system clock (for Gianfar) or the MAC system
- clock (for DPAA).
-
-Example:
-
- ptp_clock@24e00 {
- compatible = "fsl,etsec-ptp";
- reg = <0x24E00 0xB0>;
- interrupts = <12 0x8 13 0x8>;
- interrupt-parent = < &ipic >;
- fsl,cksel = <1>;
- fsl,tclk-period = <10>;
- fsl,tmr-prsc = <100>;
- fsl,tmr-add = <0x999999A4>;
- fsl,tmr-fiper1 = <0x3B9AC9F6>;
- fsl,tmr-fiper2 = <0x00018696>;
- fsl,max-adj = <659999998>;
- };
diff --git a/Documentation/devicetree/bindings/pwm/adi,axi-pwmgen.yaml b/Documentation/devicetree/bindings/pwm/adi,axi-pwmgen.yaml
new file mode 100644
index 000000000000..ec6115d3796b
--- /dev/null
+++ b/Documentation/devicetree/bindings/pwm/adi,axi-pwmgen.yaml
@@ -0,0 +1,48 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pwm/adi,axi-pwmgen.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Analog Devices AXI PWM generator
+
+maintainers:
+ - Michael Hennerich <Michael.Hennerich@analog.com>
+ - Nuno Sá <nuno.sa@analog.com>
+
+description:
+ The Analog Devices AXI PWM generator can generate PWM signals
+ with variable pulse width and period.
+
+ https://wiki.analog.com/resources/fpga/docs/axi_pwm_gen
+
+allOf:
+ - $ref: pwm.yaml#
+
+properties:
+ compatible:
+ const: adi,axi-pwmgen-2.00.a
+
+ reg:
+ maxItems: 1
+
+ "#pwm-cells":
+ const: 2
+
+ clocks:
+ maxItems: 1
+
+required:
+ - reg
+ - clocks
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ pwm@44b00000 {
+ compatible = "adi,axi-pwmgen-2.00.a";
+ reg = <0x44b00000 0x1000>;
+ clocks = <&spi_clk>;
+ #pwm-cells = <2>;
+ };
diff --git a/Documentation/devicetree/bindings/pwm/atmel,at91sam-pwm.yaml b/Documentation/devicetree/bindings/pwm/atmel,at91sam-pwm.yaml
index 96cd6f3c3546..d20ad27657aa 100644
--- a/Documentation/devicetree/bindings/pwm/atmel,at91sam-pwm.yaml
+++ b/Documentation/devicetree/bindings/pwm/atmel,at91sam-pwm.yaml
@@ -23,7 +23,9 @@ properties:
- atmel,sama5d2-pwm
- microchip,sam9x60-pwm
- items:
- - const: microchip,sama7g5-pwm
+ - enum:
+ - microchip,sama7d65-pwm
+ - microchip,sama7g5-pwm
- const: atmel,sama5d2-pwm
- items:
- const: microchip,sam9x7-pwm
diff --git a/Documentation/devicetree/bindings/pwm/fsl,vf610-ftm-pwm.yaml b/Documentation/devicetree/bindings/pwm/fsl,vf610-ftm-pwm.yaml
new file mode 100644
index 000000000000..7f9f72d95e7a
--- /dev/null
+++ b/Documentation/devicetree/bindings/pwm/fsl,vf610-ftm-pwm.yaml
@@ -0,0 +1,92 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pwm/fsl,vf610-ftm-pwm.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale FlexTimer Module (FTM) PWM controller
+
+description: |
+ The same FTM PWM device can have a different endianness on different SoCs. The
+ device tree provides a property to describing this so that an operating system
+ device driver can handle all variants of the device. Refer to the table below
+ for the endianness of the FTM PWM block as integrated into the existing SoCs:
+
+ SoC | FTM-PWM endianness
+ --------+-------------------
+ Vybrid | LE
+ LS1 | BE
+ LS2 | LE
+
+ Please see ../regmap/regmap.txt for more detail about how to specify endian
+ modes in device tree.
+
+maintainers:
+ - Frank Li <Frank.Li@nxp.com>
+
+properties:
+ compatible:
+ enum:
+ - fsl,vf610-ftm-pwm
+ - fsl,imx8qm-ftm-pwm
+
+ reg:
+ maxItems: 1
+
+ "#pwm-cells":
+ const: 3
+
+ clocks:
+ minItems: 4
+ maxItems: 4
+
+ clock-names:
+ items:
+ - const: ftm_sys
+ - const: ftm_ext
+ - const: ftm_fix
+ - const: ftm_cnt_clk_en
+
+ pinctrl-0: true
+ pinctrl-1: true
+
+ pinctrl-names:
+ minItems: 1
+ items:
+ - const: default
+ - const: sleep
+
+ big-endian:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ Boolean property, required if the FTM PWM registers use a big-
+ endian rather than little-endian layout.
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+
+allOf:
+ - $ref: pwm.yaml#
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/vf610-clock.h>
+
+ pwm@40038000 {
+ compatible = "fsl,vf610-ftm-pwm";
+ reg = <0x40038000 0x1000>;
+ #pwm-cells = <3>;
+ clocks = <&clks VF610_CLK_FTM0>,
+ <&clks VF610_CLK_FTM0_EXT_SEL>,
+ <&clks VF610_CLK_FTM0_FIX_SEL>,
+ <&clks VF610_CLK_FTM0_EXT_FIX_EN>;
+ clock-names = "ftm_sys", "ftm_ext", "ftm_fix", "ftm_cnt_clk_en";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pwm0_1>;
+ big-endian;
+ };
diff --git a/Documentation/devicetree/bindings/pwm/imx-pwm.yaml b/Documentation/devicetree/bindings/pwm/imx-pwm.yaml
index a84a240a61dc..04148198e34d 100644
--- a/Documentation/devicetree/bindings/pwm/imx-pwm.yaml
+++ b/Documentation/devicetree/bindings/pwm/imx-pwm.yaml
@@ -68,7 +68,6 @@ required:
- reg
- clocks
- clock-names
- - interrupts
additionalProperties: false
diff --git a/Documentation/devicetree/bindings/pwm/pwm-fsl-ftm.txt b/Documentation/devicetree/bindings/pwm/pwm-fsl-ftm.txt
deleted file mode 100644
index 36532cd5ab25..000000000000
--- a/Documentation/devicetree/bindings/pwm/pwm-fsl-ftm.txt
+++ /dev/null
@@ -1,55 +0,0 @@
-Freescale FlexTimer Module (FTM) PWM controller
-
-The same FTM PWM device can have a different endianness on different SoCs. The
-device tree provides a property to describing this so that an operating system
-device driver can handle all variants of the device. Refer to the table below
-for the endianness of the FTM PWM block as integrated into the existing SoCs:
-
- SoC | FTM-PWM endianness
- --------+-------------------
- Vybrid | LE
- LS1 | BE
- LS2 | LE
-
-Please see ../regmap/regmap.txt for more detail about how to specify endian
-modes in device tree.
-
-
-Required properties:
-- compatible : should be "fsl,<soc>-ftm-pwm" and one of the following
- compatible strings:
- - "fsl,vf610-ftm-pwm" for PWM compatible with the one integrated on VF610
- - "fsl,imx8qm-ftm-pwm" for PWM compatible with the one integrated on i.MX8QM
-- reg: Physical base address and length of the controller's registers
-- #pwm-cells: Should be 3. See pwm.yaml in this directory for a description of
- the cells format.
-- clock-names: Should include the following module clock source entries:
- "ftm_sys" (module clock, also can be used as counter clock),
- "ftm_ext" (external counter clock),
- "ftm_fix" (fixed counter clock),
- "ftm_cnt_clk_en" (external and fixed counter clock enable/disable).
-- clocks: Must contain a phandle and clock specifier for each entry in
- clock-names, please see clock/clock-bindings.txt for details of the property
- values.
-- pinctrl-names: Must contain a "default" entry.
-- pinctrl-NNN: One property must exist for each entry in pinctrl-names.
- See pinctrl/pinctrl-bindings.txt for details of the property values.
-- big-endian: Boolean property, required if the FTM PWM registers use a big-
- endian rather than little-endian layout.
-
-Example:
-
-pwm0: pwm@40038000 {
- compatible = "fsl,vf610-ftm-pwm";
- reg = <0x40038000 0x1000>;
- #pwm-cells = <3>;
- clock-names = "ftm_sys", "ftm_ext",
- "ftm_fix", "ftm_cnt_clk_en";
- clocks = <&clks VF610_CLK_FTM0>,
- <&clks VF610_CLK_FTM0_EXT_SEL>,
- <&clks VF610_CLK_FTM0_FIX_SEL>,
- <&clks VF610_CLK_FTM0_EXT_FIX_EN>;
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_pwm0_1>;
- big-endian;
-};
diff --git a/Documentation/devicetree/bindings/pwm/pwm-gpio.yaml b/Documentation/devicetree/bindings/pwm/pwm-gpio.yaml
new file mode 100644
index 000000000000..1576c193f2ab
--- /dev/null
+++ b/Documentation/devicetree/bindings/pwm/pwm-gpio.yaml
@@ -0,0 +1,46 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pwm/pwm-gpio.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Generic software PWM for modulating GPIOs
+
+maintainers:
+ - Stefan Wahren <wahrenst@gmx.net>
+
+allOf:
+ - $ref: pwm.yaml#
+
+properties:
+ compatible:
+ const: pwm-gpio
+
+ "#pwm-cells":
+ const: 3
+ description:
+ See pwm.yaml in this directory for a description of the cells format.
+ The first cell which represents the PWM instance number must always
+ be zero.
+
+ gpios:
+ description:
+ GPIO to be modulated
+ maxItems: 1
+
+required:
+ - compatible
+ - "#pwm-cells"
+ - gpios
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+
+ pwm {
+ #pwm-cells = <3>;
+ compatible = "pwm-gpio";
+ gpios = <&gpio 1 GPIO_ACTIVE_HIGH>;
+ };
diff --git a/Documentation/devicetree/bindings/pwm/pwm.yaml b/Documentation/devicetree/bindings/pwm/pwm.yaml
index abd9fa873354..f2206ec3c7c4 100644
--- a/Documentation/devicetree/bindings/pwm/pwm.yaml
+++ b/Documentation/devicetree/bindings/pwm/pwm.yaml
@@ -16,8 +16,10 @@ properties:
pattern: "^pwm(@.*|-([0-9]|[1-9][0-9]+))?$"
"#pwm-cells":
- description:
- Number of cells in a PWM specifier.
+ description: |
+ Number of cells in a PWM specifier. Typically the cells represent, in
+ order: the chip-relative PWM number, the PWM period in nanoseconds and
+ optionally a number of flags (defined in <dt-bindings/pwm/pwm.h>).
required:
- "#pwm-cells"
diff --git a/Documentation/devicetree/bindings/regulator/mediatek,mt6873-dvfsrc-regulator.yaml b/Documentation/devicetree/bindings/regulator/mediatek,mt6873-dvfsrc-regulator.yaml
new file mode 100644
index 000000000000..704828687970
--- /dev/null
+++ b/Documentation/devicetree/bindings/regulator/mediatek,mt6873-dvfsrc-regulator.yaml
@@ -0,0 +1,43 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/regulator/mediatek,mt6873-dvfsrc-regulator.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: MediaTek DVFSRC-controlled Regulators
+
+description:
+ The Dynamic Voltage and Frequency Scaling Resource Collector Regulators
+ are controlled with votes to the DVFSRC hardware.
+
+maintainers:
+ - AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+
+properties:
+ compatible:
+ enum:
+ - mediatek,mt6873-dvfsrc-regulator
+ - mediatek,mt8183-dvfsrc-regulator
+ - mediatek,mt8192-dvfsrc-regulator
+ - mediatek,mt8195-dvfsrc-regulator
+
+ dvfsrc-vcore:
+ description: DVFSRC-controlled SoC Vcore regulator
+ $ref: regulator.yaml#
+ unevaluatedProperties: false
+
+ dvfsrc-vscp:
+ description: DVFSRC-controlled System Control Processor regulator
+ $ref: regulator.yaml#
+ unevaluatedProperties: false
+
+required:
+ - compatible
+
+anyOf:
+ - required:
+ - dvfsrc-vcore
+ - required:
+ - dvfsrc-vscp
+
+additionalProperties: false
diff --git a/Documentation/devicetree/bindings/regulator/mt6315-regulator.yaml b/Documentation/devicetree/bindings/regulator/mt6315-regulator.yaml
index 6317daf76d1f..cd4aa27218a1 100644
--- a/Documentation/devicetree/bindings/regulator/mt6315-regulator.yaml
+++ b/Documentation/devicetree/bindings/regulator/mt6315-regulator.yaml
@@ -16,7 +16,11 @@ description: |
properties:
compatible:
- const: mediatek,mt6315-regulator
+ oneOf:
+ - items:
+ - const: mediatek,mt6319-regulator
+ - const: mediatek,mt6315-regulator
+ - const: mediatek,mt6315-regulator
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/regulator/nxp,pca9450-regulator.yaml b/Documentation/devicetree/bindings/regulator/nxp,pca9450-regulator.yaml
index 849bfa50bdba..f8057bba747a 100644
--- a/Documentation/devicetree/bindings/regulator/nxp,pca9450-regulator.yaml
+++ b/Documentation/devicetree/bindings/regulator/nxp,pca9450-regulator.yaml
@@ -96,7 +96,6 @@ properties:
required:
- compatible
- reg
- - interrupts
- regulators
additionalProperties: false
diff --git a/Documentation/devicetree/bindings/regulator/qcom,qca6390-pmu.yaml b/Documentation/devicetree/bindings/regulator/qcom,qca6390-pmu.yaml
new file mode 100644
index 000000000000..3aaa9653419a
--- /dev/null
+++ b/Documentation/devicetree/bindings/regulator/qcom,qca6390-pmu.yaml
@@ -0,0 +1,185 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/regulator/qcom,qca6390-pmu.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Technologies, Inc. QCA6390 PMU Regulators
+
+maintainers:
+ - Bartosz Golaszewski <bartosz.golaszewski@linaro.org>
+
+description:
+ The QCA6390 package contains discrete modules for WLAN and Bluetooth. They
+ are powered by the Power Management Unit (PMU) that takes inputs from the
+ host and provides LDO outputs. This document describes this module.
+
+properties:
+ compatible:
+ enum:
+ - qcom,qca6390-pmu
+ - qcom,wcn7850-pmu
+
+ vdd-supply:
+ description: VDD supply regulator handle
+
+ vddaon-supply:
+ description: VDD_AON supply regulator handle
+
+ vdddig-supply:
+ description: VDD_DIG supply regulator handle
+
+ vddpmu-supply:
+ description: VDD_PMU supply regulator handle
+
+ vddio1p2-supply:
+ description: VDD_IO_1P2 supply regulator handle
+
+ vddrfa0p95-supply:
+ description: VDD_RFA_0P95 supply regulator handle
+
+ vddrfa1p2-supply:
+ description: VDD_RFA_1P2 supply regulator handle
+
+ vddrfa1p3-supply:
+ description: VDD_RFA_1P3 supply regulator handle
+
+ vddrfa1p8-supply:
+ description: VDD_RFA_1P8 supply regulator handle
+
+ vddrfa1p9-supply:
+ description: VDD_RFA_1P9 supply regulator handle
+
+ vddpcie1p3-supply:
+ description: VDD_PCIE_1P3 supply regulator handle
+
+ vddpcie1p9-supply:
+ description: VDD_PCIE_1P9 supply regulator handle
+
+ vddio-supply:
+ description: VDD_IO supply regulator handle
+
+ wlan-enable-gpios:
+ maxItems: 1
+ description: GPIO line enabling the ATH11K WLAN module supplied by the PMU
+
+ bt-enable-gpios:
+ maxItems: 1
+ description: GPIO line enabling the ATH11K Bluetooth module supplied by the PMU
+
+ clocks:
+ maxItems: 1
+ description: Reference clock handle
+
+ regulators:
+ type: object
+ description:
+ LDO outputs of the PMU
+
+ patternProperties:
+ "^ldo[0-9]$":
+ $ref: regulator.yaml#
+ type: object
+ unevaluatedProperties: false
+
+ additionalProperties: false
+
+required:
+ - compatible
+ - regulators
+
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: qcom,qca6390-pmu
+ then:
+ required:
+ - vddaon-supply
+ - vddpmu-supply
+ - vddrfa0p95-supply
+ - vddrfa1p3-supply
+ - vddrfa1p9-supply
+ - vddpcie1p3-supply
+ - vddpcie1p9-supply
+ - vddio-supply
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: qcom,wcn7850-pmu
+ then:
+ required:
+ - vdd-supply
+ - vddio-supply
+ - vddaon-supply
+ - vdddig-supply
+ - vddrfa1p2-supply
+ - vddrfa1p8-supply
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ pmu {
+ compatible = "qcom,qca6390-pmu";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&bt_en_state>, <&wlan_en_state>;
+
+ vddaon-supply = <&vreg_s6a_0p95>;
+ vddpmu-supply = <&vreg_s2f_0p95>;
+ vddrfa0p95-supply = <&vreg_s2f_0p95>;
+ vddrfa1p3-supply = <&vreg_s8c_1p3>;
+ vddrfa1p9-supply = <&vreg_s5a_1p9>;
+ vddpcie1p3-supply = <&vreg_s8c_1p3>;
+ vddpcie1p9-supply = <&vreg_s5a_1p9>;
+ vddio-supply = <&vreg_s4a_1p8>;
+
+ wlan-enable-gpios = <&tlmm 20 GPIO_ACTIVE_HIGH>;
+ bt-enable-gpios = <&tlmm 21 GPIO_ACTIVE_HIGH>;
+
+ regulators {
+ vreg_pmu_rfa_cmn: ldo0 {
+ regulator-name = "vreg_pmu_rfa_cmn";
+ };
+
+ vreg_pmu_aon_0p59: ldo1 {
+ regulator-name = "vreg_pmu_aon_0p59";
+ };
+
+ vreg_pmu_wlcx_0p8: ldo2 {
+ regulator-name = "vreg_pmu_wlcx_0p8";
+ };
+
+ vreg_pmu_wlmx_0p85: ldo3 {
+ regulator-name = "vreg_pmu_wlmx_0p85";
+ };
+
+ vreg_pmu_btcmx_0p85: ldo4 {
+ regulator-name = "vreg_pmu_btcmx_0p85";
+ };
+
+ vreg_pmu_rfa_0p8: ldo5 {
+ regulator-name = "vreg_pmu_rfa_0p8";
+ };
+
+ vreg_pmu_rfa_1p2: ldo6 {
+ regulator-name = "vreg_pmu_rfa_1p2";
+ };
+
+ vreg_pmu_rfa_1p7: ldo7 {
+ regulator-name = "vreg_pmu_rfa_1p7";
+ };
+
+ vreg_pmu_pcie_0p9: ldo8 {
+ regulator-name = "vreg_pmu_pcie_0p9";
+ };
+
+ vreg_pmu_pcie_1p8: ldo9 {
+ regulator-name = "vreg_pmu_pcie_1p8";
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/regulator/richtek,rtq2208.yaml b/Documentation/devicetree/bindings/regulator/richtek,rtq2208.yaml
index 609c06615bdc..87accc6f13b8 100644
--- a/Documentation/devicetree/bindings/regulator/richtek,rtq2208.yaml
+++ b/Documentation/devicetree/bindings/regulator/richtek,rtq2208.yaml
@@ -75,6 +75,12 @@ properties:
description:
regulator description for ldo[1-2].
+ properties:
+ richtek,fixed-microvolt:
+ description: |
+ This property can be used to set a fixed operating voltage that lies outside
+ the range of the regulator's adjustable mode.
+
required:
- compatible
- reg
@@ -177,6 +183,8 @@ examples:
};
};
ldo1 {
+ /* Fixed LDO VOUT */
+ richtek,fixed-microvolt = <1200000>;
regulator-min-microvolt = <1200000>;
regulator-max-microvolt = <1200000>;
regulator-always-on;
@@ -185,7 +193,8 @@ examples:
};
};
ldo2 {
- regulator-min-microvolt = <3300000>;
+ /* Adjustable LDO VOUT */
+ regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <3300000>;
regulator-always-on;
regulator-state-mem {
diff --git a/Documentation/devicetree/bindings/regulator/rohm,bd96801-regulator.yaml b/Documentation/devicetree/bindings/regulator/rohm,bd96801-regulator.yaml
new file mode 100644
index 000000000000..b3d2d7d583ce
--- /dev/null
+++ b/Documentation/devicetree/bindings/regulator/rohm,bd96801-regulator.yaml
@@ -0,0 +1,63 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/regulator/rohm,bd96801-regulator.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ROHM BD96801 Power Management Integrated Circuit regulators
+
+maintainers:
+ - Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>
+
+description:
+ This module is part of the ROHM BD96801 MFD device. For more details
+ see Documentation/devicetree/bindings/mfd/rohm,bd96801-pmic.yaml.
+
+ The regulator controller is represented as a sub-node of the PMIC node
+ on the device tree.
+
+ Regulator nodes should be named to buck_<number> and ldo_<number>.
+ The valid names for BD96801 regulator nodes are
+ buck1, buck2, buck3, buck4, ldo5, ldo6, ldo7
+
+patternProperties:
+ "^ldo[5-7]$":
+ type: object
+ description:
+ Properties for single LDO regulator.
+ $ref: regulator.yaml#
+
+ properties:
+ rohm,initial-voltage-microvolt:
+ description:
+ Initial voltage for regulator. Voltage can be tuned +/-150 mV from
+ this value. NOTE, This can be modified via I2C only when PMIC is in
+ STBY state.
+ minimum: 300000
+ maximum: 3300000
+
+ unevaluatedProperties: false
+
+ "^buck[1-4]$":
+ type: object
+ description:
+ Properties for single BUCK regulator.
+ $ref: regulator.yaml#
+
+ properties:
+ rohm,initial-voltage-microvolt:
+ description:
+ Initial voltage for regulator. Voltage can be tuned +/-150 mV from
+ this value. NOTE, This can be modified via I2C only when PMIC is in
+ STBY state.
+ minimum: 500000
+ maximum: 3300000
+
+ rohm,keep-on-stby:
+ description:
+ Keep the regulator powered when PMIC transitions to STBY state.
+ type: boolean
+
+ unevaluatedProperties: false
+
+additionalProperties: false
diff --git a/Documentation/devicetree/bindings/regulator/sprd,sc2731-regulator.txt b/Documentation/devicetree/bindings/regulator/sprd,sc2731-regulator.txt
deleted file mode 100644
index 63dc07877cd6..000000000000
--- a/Documentation/devicetree/bindings/regulator/sprd,sc2731-regulator.txt
+++ /dev/null
@@ -1,43 +0,0 @@
-Spreadtrum SC2731 Voltage regulators
-
-The SC2731 integrates low-voltage and low quiescent current DCDC/LDO.
-14 LDO and 3 DCDCs are designed for external use. All DCDCs/LDOs have
-their own bypass (power-down) control signals. External tantalum or MLCC
-ceramic capacitors are recommended to use with these LDOs.
-
-Required properties:
- - compatible: should be "sprd,sc27xx-regulator".
-
-List of regulators provided by this controller. It is named according to
-its regulator type, BUCK_<name> and LDO_<name>. The definition for each
-of these nodes is defined using the standard binding for regulators at
-Documentation/devicetree/bindings/regulator/regulator.txt.
-
-The valid names for regulators are:
-BUCK:
- BUCK_CPU0, BUCK_CPU1, BUCK_RF
-LDO:
- LDO_CAMA0, LDO_CAMA1, LDO_CAMMOT, LDO_VLDO, LDO_EMMCCORE, LDO_SDCORE,
- LDO_SDIO, LDO_WIFIPA, LDO_USB33, LDO_CAMD0, LDO_CAMD1, LDO_CON,
- LDO_CAMIO, LDO_SRAM
-
-Example:
- regulators {
- compatible = "sprd,sc27xx-regulator";
-
- vddarm0: BUCK_CPU0 {
- regulator-name = "vddarm0";
- regulator-min-microvolt = <400000>;
- regulator-max-microvolt = <1996875>;
- regulator-ramp-delay = <25000>;
- regulator-always-on;
- };
-
- vddcama0: LDO_CAMA0 {
- regulator-name = "vddcama0";
- regulator-min-microvolt = <1200000>;
- regulator-max-microvolt = <3750000>;
- regulator-enable-ramp-delay = <100>;
- };
- ...
- };
diff --git a/Documentation/devicetree/bindings/regulator/sprd,sc2731-regulator.yaml b/Documentation/devicetree/bindings/regulator/sprd,sc2731-regulator.yaml
new file mode 100644
index 000000000000..ffb2924dde36
--- /dev/null
+++ b/Documentation/devicetree/bindings/regulator/sprd,sc2731-regulator.yaml
@@ -0,0 +1,67 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/regulator/sprd,sc2731-regulator.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Spreadtrum SC2731 Power Management IC regulators
+
+maintainers:
+ - Orson Zhai <orsonzhai@gmail.com>
+ - Baolin Wang <baolin.wang7@gmail.com>
+ - Chunyan Zhang <zhang.lyra@gmail.com>
+
+description: |
+ The SC2731 integrates low-voltage and low quiescent current DCDC/LDO.
+ 14 LDO and 3 DCDCs are designed for external use. All DCDCs/LDOs have
+ their own bypass (power-down) control signals. It is recommended to use
+ external tantalum or MLCC ceramic capacitors with these LDOs.
+ Valid names for the regulators are:
+ BUCK:
+ BUCK_CPU0, BUCK_CPU1, BUCK_RF
+ LDO:
+ LDO_CAMA0, LDO_CAMA1, LDO_CAMD0, LDO_CAMD1, LDO_CAMIO, LDO_CAMMOT,
+ LDO_CON, LDO_EMMCCORE, LDO_SDCORE, LDO_SDIO, LDO_SRAM, LDO_USB33,
+ LDO_VLDO, LDO_WIFIPA
+
+properties:
+ compatible:
+ const: sprd,sc2731-regulator
+
+patternProperties:
+ "^BUCK_(CPU[0-1]|RF)$":
+ type: object
+ $ref: regulator.yaml#
+ unevaluatedProperties: false
+
+ "^LDO_(CAM(A0|A1|D0|D1|IO|MOT)|CON|EMMCCORE|SD(CORE|IO)|SRAM|USB33|VLDO|WIFIPA)$":
+ type: object
+ $ref: regulator.yaml#
+ unevaluatedProperties: false
+
+required:
+ - compatible
+
+additionalProperties: false
+
+examples:
+ - |
+ regulators {
+ compatible = "sprd,sc2731-regulator";
+
+ BUCK_CPU0 {
+ regulator-name = "vddarm0";
+ regulator-min-microvolt = <400000>;
+ regulator-max-microvolt = <1996875>;
+ regulator-ramp-delay = <25000>;
+ regulator-always-on;
+ };
+
+ LDO_CAMA0 {
+ regulator-name = "vddcama0";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <3750000>;
+ regulator-enable-ramp-delay = <100>;
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/regulator/st,stm32mp1-pwr-reg.yaml b/Documentation/devicetree/bindings/regulator/st,stm32mp1-pwr-reg.yaml
index c9586d277f41..3cb2dad18781 100644
--- a/Documentation/devicetree/bindings/regulator/st,stm32mp1-pwr-reg.yaml
+++ b/Documentation/devicetree/bindings/regulator/st,stm32mp1-pwr-reg.yaml
@@ -11,7 +11,12 @@ maintainers:
properties:
compatible:
- const: st,stm32mp1,pwr-reg
+ oneOf:
+ - items:
+ - const: st,stm32mp1,pwr-reg
+ - items:
+ - const: st,stm32mp13-pwr-reg
+ - const: st,stm32mp1,pwr-reg
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/regulator/ti,tps65132.yaml b/Documentation/devicetree/bindings/regulator/ti,tps65132.yaml
index 6a6d1a3d6fa7..873d92738eb0 100644
--- a/Documentation/devicetree/bindings/regulator/ti,tps65132.yaml
+++ b/Documentation/devicetree/bindings/regulator/ti,tps65132.yaml
@@ -23,6 +23,8 @@ properties:
reg:
maxItems: 1
+ vin-supply: true
+
patternProperties:
"^out[pn]$":
type: object
@@ -65,6 +67,7 @@ examples:
regulator@3e {
compatible = "ti,tps65132";
reg = <0x3e>;
+ vin-supply = <&supply>;
outp {
regulator-name = "outp";
diff --git a/Documentation/devicetree/bindings/regulator/twl-regulator.txt b/Documentation/devicetree/bindings/regulator/twl-regulator.txt
deleted file mode 100644
index 549f80436deb..000000000000
--- a/Documentation/devicetree/bindings/regulator/twl-regulator.txt
+++ /dev/null
@@ -1,80 +0,0 @@
-TWL family of regulators
-
-Required properties:
-For twl6030 regulators/LDOs
-- compatible:
- - "ti,twl6030-vaux1" for VAUX1 LDO
- - "ti,twl6030-vaux2" for VAUX2 LDO
- - "ti,twl6030-vaux3" for VAUX3 LDO
- - "ti,twl6030-vmmc" for VMMC LDO
- - "ti,twl6030-vpp" for VPP LDO
- - "ti,twl6030-vusim" for VUSIM LDO
- - "ti,twl6030-vana" for VANA LDO
- - "ti,twl6030-vcxio" for VCXIO LDO
- - "ti,twl6030-vdac" for VDAC LDO
- - "ti,twl6030-vusb" for VUSB LDO
- - "ti,twl6030-v1v8" for V1V8 LDO
- - "ti,twl6030-v2v1" for V2V1 LDO
- - "ti,twl6030-vdd1" for VDD1 SMPS
- - "ti,twl6030-vdd2" for VDD2 SMPS
- - "ti,twl6030-vdd3" for VDD3 SMPS
-For twl6032 regulators/LDOs
-- compatible:
- - "ti,twl6032-ldo1" for LDO1 LDO
- - "ti,twl6032-ldo2" for LDO2 LDO
- - "ti,twl6032-ldo3" for LDO3 LDO
- - "ti,twl6032-ldo4" for LDO4 LDO
- - "ti,twl6032-ldo5" for LDO5 LDO
- - "ti,twl6032-ldo6" for LDO6 LDO
- - "ti,twl6032-ldo7" for LDO7 LDO
- - "ti,twl6032-ldoln" for LDOLN LDO
- - "ti,twl6032-ldousb" for LDOUSB LDO
- - "ti,twl6032-smps3" for SMPS3 SMPS
- - "ti,twl6032-smps4" for SMPS4 SMPS
- - "ti,twl6032-vio" for VIO SMPS
-For twl4030 regulators/LDOs
-- compatible:
- - "ti,twl4030-vaux1" for VAUX1 LDO
- - "ti,twl4030-vaux2" for VAUX2 LDO
- - "ti,twl5030-vaux2" for VAUX2 LDO
- - "ti,twl4030-vaux3" for VAUX3 LDO
- - "ti,twl4030-vaux4" for VAUX4 LDO
- - "ti,twl4030-vmmc1" for VMMC1 LDO
- - "ti,twl4030-vmmc2" for VMMC2 LDO
- - "ti,twl4030-vpll1" for VPLL1 LDO
- - "ti,twl4030-vpll2" for VPLL2 LDO
- - "ti,twl4030-vsim" for VSIM LDO
- - "ti,twl4030-vdac" for VDAC LDO
- - "ti,twl4030-vintana2" for VINTANA2 LDO
- - "ti,twl4030-vio" for VIO LDO
- - "ti,twl4030-vdd1" for VDD1 SMPS
- - "ti,twl4030-vdd2" for VDD2 SMPS
- - "ti,twl4030-vintana1" for VINTANA1 LDO
- - "ti,twl4030-vintdig" for VINTDIG LDO
- - "ti,twl4030-vusb1v5" for VUSB1V5 LDO
- - "ti,twl4030-vusb1v8" for VUSB1V8 LDO
- - "ti,twl4030-vusb3v1" for VUSB3V1 LDO
-
-Optional properties:
-- Any optional property defined in bindings/regulator/regulator.txt
-For twl4030 regulators/LDOs:
- - regulator-initial-mode:
- - 0x08 - Sleep mode, the nominal output voltage is maintained with low power
- consumption with low load current capability.
- - 0x0e - Active mode, the regulator can deliver its nominal output voltage
- with full-load current capability.
-
-Example:
-
- xyz: regulator@0 {
- compatible = "ti,twl6030-vaux1";
- regulator-min-microvolt = <1000000>;
- regulator-max-microvolt = <3000000>;
- };
-
-For twl6030 regulators/LDOs:
-
- - ti,retain-on-reset: Does not turn off the supplies during warm
- reset. Could be needed for VMMC, as TWL6030
- reset sequence for this signal does not comply
- with the SD specification.
diff --git a/Documentation/devicetree/bindings/reset/renesas,rzg2l-usbphy-ctrl.yaml b/Documentation/devicetree/bindings/reset/renesas,rzg2l-usbphy-ctrl.yaml
index 03c18611e42d..b0b20af15313 100644
--- a/Documentation/devicetree/bindings/reset/renesas,rzg2l-usbphy-ctrl.yaml
+++ b/Documentation/devicetree/bindings/reset/renesas,rzg2l-usbphy-ctrl.yaml
@@ -42,6 +42,12 @@ properties:
0 = Port 1 Phy reset
1 = Port 2 Phy reset
+ regulator-vbus:
+ type: object
+ description: USB VBUS regulator
+ $ref: /schemas/regulator/regulator.yaml#
+ unevaluatedProperties: false
+
required:
- compatible
- reg
@@ -49,6 +55,7 @@ required:
- resets
- power-domains
- '#reset-cells'
+ - regulator-vbus
additionalProperties: false
@@ -64,4 +71,7 @@ examples:
resets = <&cpg R9A07G044_USB_PRESETN>;
power-domains = <&cpg>;
#reset-cells = <1>;
+ regulator-vbus {
+ regulator-name = "vbus";
+ };
};
diff --git a/Documentation/devicetree/bindings/reset/ti,sci-reset.yaml b/Documentation/devicetree/bindings/reset/ti,sci-reset.yaml
index e10eb98eddad..1db08ce9ae27 100644
--- a/Documentation/devicetree/bindings/reset/ti,sci-reset.yaml
+++ b/Documentation/devicetree/bindings/reset/ti,sci-reset.yaml
@@ -37,7 +37,7 @@ properties:
The second cell should contain the reset mask corresponding to the device
used by system controller.
- Please see http://processors.wiki.ti.com/index.php/TISCI for
+ Please see https://software-dl.ti.com/tisci/esd/latest/index.html for
protocol documentation for the values to be used for different devices.
diff --git a/Documentation/devicetree/bindings/riscv/cpus.yaml b/Documentation/devicetree/bindings/riscv/cpus.yaml
index d87dd50f1a4b..d067f2a468ee 100644
--- a/Documentation/devicetree/bindings/riscv/cpus.yaml
+++ b/Documentation/devicetree/bindings/riscv/cpus.yaml
@@ -47,6 +47,7 @@ properties:
- sifive,u74
- sifive,u74-mc
- thead,c906
+ - thead,c908
- thead,c910
- thead,c920
- const: riscv
diff --git a/Documentation/devicetree/bindings/riscv/microchip.yaml b/Documentation/devicetree/bindings/riscv/microchip.yaml
index 4a29c890619a..78ce76ae1b6d 100644
--- a/Documentation/devicetree/bindings/riscv/microchip.yaml
+++ b/Documentation/devicetree/bindings/riscv/microchip.yaml
@@ -29,6 +29,7 @@ properties:
- enum:
- aldec,tysom-m-mpfs250t-rev2
- aries,m100pfsevp
+ - beagle,beaglev-fire
- microchip,mpfs-sev-kit
- sundance,polarberry
- const: microchip,mpfs
diff --git a/Documentation/devicetree/bindings/riscv/starfive.yaml b/Documentation/devicetree/bindings/riscv/starfive.yaml
index b672f8521949..4d5c857b3cac 100644
--- a/Documentation/devicetree/bindings/riscv/starfive.yaml
+++ b/Documentation/devicetree/bindings/riscv/starfive.yaml
@@ -27,6 +27,7 @@ properties:
- items:
- enum:
- milkv,mars
+ - pine64,star64
- starfive,visionfive-2-v1.2a
- starfive,visionfive-2-v1.3b
- const: starfive,jh7110
diff --git a/Documentation/devicetree/bindings/serial/mrvl,pxa-ssp.txt b/Documentation/devicetree/bindings/serial/mrvl,pxa-ssp.txt
deleted file mode 100644
index d10cc06c0c37..000000000000
--- a/Documentation/devicetree/bindings/serial/mrvl,pxa-ssp.txt
+++ /dev/null
@@ -1,64 +0,0 @@
-Device tree bindings for Marvell PXA SSP ports
-
-Required properties:
-
- - compatible: Must be one of
- mrvl,pxa25x-ssp
- mvrl,pxa25x-nssp
- mrvl,pxa27x-ssp
- mrvl,pxa3xx-ssp
- mvrl,pxa168-ssp
- mrvl,pxa910-ssp
- mrvl,ce4100-ssp
-
- - reg: The memory base
- - dmas: Two dma phandles, one for rx, one for tx
- - dma-names: Must be "rx", "tx"
-
-
-Example for PXA3xx:
-
- ssp0: ssp@41000000 {
- compatible = "mrvl,pxa3xx-ssp";
- reg = <0x41000000 0x40>;
- ssp-id = <1>;
- interrupts = <24>;
- clock-names = "pxa27x-ssp.0";
- dmas = <&dma 13
- &dma 14>;
- dma-names = "rx", "tx";
- };
-
- ssp1: ssp@41700000 {
- compatible = "mrvl,pxa3xx-ssp";
- reg = <0x41700000 0x40>;
- ssp-id = <2>;
- interrupts = <16>;
- clock-names = "pxa27x-ssp.1";
- dmas = <&dma 15
- &dma 16>;
- dma-names = "rx", "tx";
- };
-
- ssp2: ssp@41900000 {
- compatibl3 = "mrvl,pxa3xx-ssp";
- reg = <0x41900000 0x40>;
- ssp-id = <3>;
- interrupts = <0>;
- clock-names = "pxa27x-ssp.2";
- dmas = <&dma 66
- &dma 67>;
- dma-names = "rx", "tx";
- };
-
- ssp3: ssp@41a00000 {
- compatible = "mrvl,pxa3xx-ssp";
- reg = <0x41a00000 0x40>;
- ssp-id = <4>;
- interrupts = <13>;
- clock-names = "pxa27x-ssp.3";
- dmas = <&dma 2
- &dma 3>;
- dma-names = "rx", "tx";
- };
-
diff --git a/Documentation/devicetree/bindings/soc/fsl/fsl,layerscape-dcfg.yaml b/Documentation/devicetree/bindings/soc/fsl/fsl,layerscape-dcfg.yaml
index ce1a6505eb51..3fb0534ea597 100644
--- a/Documentation/devicetree/bindings/soc/fsl/fsl,layerscape-dcfg.yaml
+++ b/Documentation/devicetree/bindings/soc/fsl/fsl,layerscape-dcfg.yaml
@@ -8,7 +8,6 @@ title: Freescale Layerscape Device Configuration Unit
maintainers:
- Shawn Guo <shawnguo@kernel.org>
- - Li Yang <leoyang.li@nxp.com>
description: |
DCFG is the device configuration unit, that provides general purpose
diff --git a/Documentation/devicetree/bindings/soc/fsl/fsl,layerscape-scfg.yaml b/Documentation/devicetree/bindings/soc/fsl/fsl,layerscape-scfg.yaml
index a6a511b00a12..2a456c8af992 100644
--- a/Documentation/devicetree/bindings/soc/fsl/fsl,layerscape-scfg.yaml
+++ b/Documentation/devicetree/bindings/soc/fsl/fsl,layerscape-scfg.yaml
@@ -8,7 +8,6 @@ title: Freescale Layerscape Supplemental Configuration Unit
maintainers:
- Shawn Guo <shawnguo@kernel.org>
- - Li Yang <leoyang.li@nxp.com>
description: |
SCFG is the supplemental configuration unit, that provides SoC specific
diff --git a/Documentation/devicetree/bindings/soc/hisilicon/hisilicon,hi3660-usb3-otg-bc.yaml b/Documentation/devicetree/bindings/soc/hisilicon/hisilicon,hi3660-usb3-otg-bc.yaml
new file mode 100644
index 000000000000..5c77c4925d19
--- /dev/null
+++ b/Documentation/devicetree/bindings/soc/hisilicon/hisilicon,hi3660-usb3-otg-bc.yaml
@@ -0,0 +1,46 @@
+# SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/soc/hisilicon/hisilicon,hi3660-usb3-otg-bc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Hisilicon Kirin 960 USB OTG Battery Charging Syscon
+
+maintainers:
+ - Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
+
+properties:
+ compatible:
+ items:
+ - const: hisilicon,hi3660-usb3-otg-bc
+ - const: syscon
+ - const: simple-mfd
+
+ reg:
+ maxItems: 1
+
+ usb-phy:
+ $ref: /schemas/phy/hisilicon,hi3660-usb3.yaml
+ description: USB Phy node
+
+required:
+ - compatible
+ - reg
+ - usb-phy
+
+additionalProperties: false
+
+examples:
+ - |
+ syscon@ff200000 {
+ compatible = "hisilicon,hi3660-usb3-otg-bc", "syscon", "simple-mfd";
+ reg = <0xff200000 0x1000>;
+
+ usb-phy {
+ compatible = "hisilicon,hi3660-usb-phy";
+ #phy-cells = <0>;
+ hisilicon,pericrg-syscon = <&crg_ctrl>;
+ hisilicon,pctrl-syscon = <&pctrl>;
+ hisilicon,eye-diagram-param = <0x22466e4>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/soc/intel/intel,lgm-syscon.yaml b/Documentation/devicetree/bindings/soc/intel/intel,lgm-syscon.yaml
new file mode 100644
index 000000000000..6951d55356d5
--- /dev/null
+++ b/Documentation/devicetree/bindings/soc/intel/intel,lgm-syscon.yaml
@@ -0,0 +1,57 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/soc/intel/intel,lgm-syscon.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Intel Lightning Mountain(LGM) Syscon
+
+maintainers:
+ - Chuanhua Lei <lchuanhua@maxlinear.com>
+ - Rahul Tanwar <rtanwar@maxlinear.com>
+
+properties:
+ compatible:
+ items:
+ - const: intel,lgm-syscon
+ - const: syscon
+
+ reg:
+ maxItems: 1
+
+ ranges: true
+
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 1
+
+patternProperties:
+ "^emmc-phy@[0-9a-f]+$":
+ $ref: /schemas/phy/intel,lgm-emmc-phy.yaml#
+
+required:
+ - compatible
+ - reg
+ - "#address-cells"
+ - "#size-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ chiptop@e0200000 {
+ compatible = "intel,lgm-syscon", "syscon";
+ reg = <0xe0200000 0x100>;
+ ranges = <0x0 0xe0200000 0x100>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ emmc-phy@a8 {
+ compatible = "intel,lgm-emmc-phy";
+ reg = <0x00a8 0x10>;
+ clocks = <&emmc>;
+ #phy-cells = <0>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/soc/mediatek/mediatek,mutex.yaml b/Documentation/devicetree/bindings/soc/mediatek/mediatek,mutex.yaml
index ba2014a8725c..a10326a9683d 100644
--- a/Documentation/devicetree/bindings/soc/mediatek/mediatek,mutex.yaml
+++ b/Documentation/devicetree/bindings/soc/mediatek/mediatek,mutex.yaml
@@ -33,6 +33,7 @@ properties:
- mediatek,mt8186-disp-mutex
- mediatek,mt8186-mdp3-mutex
- mediatek,mt8188-disp-mutex
+ - mediatek,mt8188-vpp-mutex
- mediatek,mt8192-disp-mutex
- mediatek,mt8195-disp-mutex
- mediatek,mt8195-vpp-mutex
diff --git a/Documentation/devicetree/bindings/soc/microchip/microchip,sparx5-cpu-syscon.yaml b/Documentation/devicetree/bindings/soc/microchip/microchip,sparx5-cpu-syscon.yaml
new file mode 100644
index 000000000000..1f0b542d2296
--- /dev/null
+++ b/Documentation/devicetree/bindings/soc/microchip/microchip,sparx5-cpu-syscon.yaml
@@ -0,0 +1,49 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/soc/microchip/microchip,sparx5-cpu-syscon.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Microchip Sparx5 CPU Syscon
+
+maintainers:
+ - Lars Povlsen <lars.povlsen@microchip.com>
+
+properties:
+ compatible:
+ items:
+ - const: microchip,sparx5-cpu-syscon
+ - const: syscon
+ - const: simple-mfd
+
+ reg:
+ maxItems: 1
+
+ mux-controller:
+ $ref: /schemas/mux/reg-mux.yaml#
+
+required:
+ - compatible
+ - reg
+ - mux-controller
+
+additionalProperties: false
+
+examples:
+ - |
+ soc {
+ #address-cells = <2>;
+ #size-cells = <1>;
+
+ syscon@600000000 {
+ compatible = "microchip,sparx5-cpu-syscon", "syscon",
+ "simple-mfd";
+ reg = <0x6 0x00000000 0xd0>;
+
+ mux: mux-controller {
+ compatible = "mmio-mux";
+ #mux-control-cells = <1>;
+ mux-reg-masks = <0x88 0xf0>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,aoss-qmp.yaml b/Documentation/devicetree/bindings/soc/qcom/qcom,aoss-qmp.yaml
index b4478f417edc..7afdb60edb22 100644
--- a/Documentation/devicetree/bindings/soc/qcom/qcom,aoss-qmp.yaml
+++ b/Documentation/devicetree/bindings/soc/qcom/qcom,aoss-qmp.yaml
@@ -31,6 +31,7 @@ properties:
- qcom,sc7280-aoss-qmp
- qcom,sc8180x-aoss-qmp
- qcom,sc8280xp-aoss-qmp
+ - qcom,sdx75-aoss-qmp
- qcom,sdm845-aoss-qmp
- qcom,sm6350-aoss-qmp
- qcom,sm8150-aoss-qmp
diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,smp2p.yaml b/Documentation/devicetree/bindings/soc/qcom/qcom,smp2p.yaml
index 58500529b90f..141d666dc3f7 100644
--- a/Documentation/devicetree/bindings/soc/qcom/qcom,smp2p.yaml
+++ b/Documentation/devicetree/bindings/soc/qcom/qcom,smp2p.yaml
@@ -41,6 +41,7 @@ properties:
description:
Three entries specifying the outgoing ipc bit used for signaling the
remote end of the smp2p edge.
+ deprecated: true
qcom,local-pid:
$ref: /schemas/types.yaml#/definitions/uint32
@@ -128,7 +129,7 @@ examples:
compatible = "qcom,smp2p";
qcom,smem = <431>, <451>;
interrupts = <GIC_SPI 143 IRQ_TYPE_EDGE_RISING>;
- qcom,ipc = <&apcs 8 18>;
+ mboxes = <&apcs 18>;
qcom,local-pid = <0>;
qcom,remote-pid = <4>;
diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,smsm.yaml b/Documentation/devicetree/bindings/soc/qcom/qcom,smsm.yaml
index db67cf043256..4900215f26af 100644
--- a/Documentation/devicetree/bindings/soc/qcom/qcom,smsm.yaml
+++ b/Documentation/devicetree/bindings/soc/qcom/qcom,smsm.yaml
@@ -33,6 +33,14 @@ properties:
specifier of the column in the subscription matrix representing the local
processor.
+ mboxes:
+ minItems: 1
+ maxItems: 5
+ description:
+ Reference to the mailbox representing the outgoing doorbell in APCS for
+ this client. Each entry represents the N:th remote processor by index
+ (0-indexed).
+
'#size-cells':
const: 0
@@ -47,6 +55,7 @@ patternProperties:
description:
Three entries specifying the outgoing ipc bit used for signaling the N:th
remote processor.
+ deprecated: true
"@[0-9a-f]$":
type: object
@@ -98,15 +107,18 @@ required:
- '#address-cells'
- '#size-cells'
-anyOf:
- - required:
- - qcom,ipc-1
- - required:
- - qcom,ipc-2
- - required:
- - qcom,ipc-3
+oneOf:
- required:
- - qcom,ipc-4
+ - mboxes
+ - anyOf:
+ - required:
+ - qcom,ipc-1
+ - required:
+ - qcom,ipc-2
+ - required:
+ - qcom,ipc-3
+ - required:
+ - qcom,ipc-4
additionalProperties: false
@@ -122,7 +134,7 @@ examples:
compatible = "qcom,smsm";
#address-cells = <1>;
#size-cells = <0>;
- qcom,ipc-3 = <&apcs 8 19>;
+ mboxes = <0>, <0>, <0>, <&apcs 19>;
apps_smsm: apps@0 {
reg = <0>;
diff --git a/Documentation/devicetree/bindings/soc/sprd/sprd,sc9863a-glbregs.yaml b/Documentation/devicetree/bindings/soc/sprd/sprd,sc9863a-glbregs.yaml
new file mode 100644
index 000000000000..49add564e5e1
--- /dev/null
+++ b/Documentation/devicetree/bindings/soc/sprd/sprd,sc9863a-glbregs.yaml
@@ -0,0 +1,55 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/soc/sprd/sprd,sc9863a-glbregs.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: SC9863A Syscon
+
+maintainers:
+ - Orson Zhai <orsonzhai@gmail.com>
+ - Baolin Wang <baolin.wang7@gmail.com>
+ - Chunyan Zhang <zhang.lyra@gmail.com>
+
+properties:
+ compatible:
+ items:
+ - const: sprd,sc9863a-glbregs
+ - const: syscon
+ - const: simple-mfd
+
+ reg:
+ maxItems: 1
+
+ ranges: true
+
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 1
+
+patternProperties:
+ "@[0-9a-f]+$":
+ $ref: /schemas/clock/sprd,sc9863a-clk.yaml
+ description: Clock controllers
+
+additionalProperties: false
+
+examples:
+ - |
+ syscon@20e00000 {
+ compatible = "sprd,sc9863a-glbregs", "syscon", "simple-mfd";
+ reg = <0x20e00000 0x4000>;
+ ranges = <0 0x20e00000 0x4000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ apahb_gate: apahb-gate@0 {
+ compatible = "sprd,sc9863a-apahb-gate";
+ reg = <0x0 0x1020>;
+ #clock-cells = <1>;
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/soc/sti/st,sti-syscon.yaml b/Documentation/devicetree/bindings/soc/sti/st,sti-syscon.yaml
index 5f97d9ff17fb..fc933d70d138 100644
--- a/Documentation/devicetree/bindings/soc/sti/st,sti-syscon.yaml
+++ b/Documentation/devicetree/bindings/soc/sti/st,sti-syscon.yaml
@@ -30,6 +30,15 @@ properties:
reg:
maxItems: 1
+ sti-sasg-codec:
+ description: STi internal audio codec
+ type: object
+ additionalProperties: true
+
+ properties:
+ compatible:
+ const: st,stih407-sas-codec
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/soc/ti/sci-pm-domain.yaml b/Documentation/devicetree/bindings/soc/ti/sci-pm-domain.yaml
index a750035d6234..b6da72032151 100644
--- a/Documentation/devicetree/bindings/soc/ti/sci-pm-domain.yaml
+++ b/Documentation/devicetree/bindings/soc/ti/sci-pm-domain.yaml
@@ -40,7 +40,7 @@ properties:
TI_SCI_PD_SHARED - Allows the device to be shared by multiple hosts.
Please refer to dt-bindings/soc/ti,sci_pm_domain.h for the definitions.
- Please see http://processors.wiki.ti.com/index.php/TISCI for
+ Please see https://software-dl.ti.com/tisci/esd/latest/index.html for
protocol documentation for the values to be used for different devices.
additionalProperties: false
diff --git a/Documentation/devicetree/bindings/soc/ti/ti,am654-serdes-ctrl.yaml b/Documentation/devicetree/bindings/soc/ti/ti,am654-serdes-ctrl.yaml
new file mode 100644
index 000000000000..a10a3b89ae05
--- /dev/null
+++ b/Documentation/devicetree/bindings/soc/ti/ti,am654-serdes-ctrl.yaml
@@ -0,0 +1,42 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/soc/ti/ti,am654-serdes-ctrl.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Texas Instruments AM654 Serdes Control Syscon
+
+maintainers:
+ - Nishanth Menon <nm@ti.com>
+
+properties:
+ compatible:
+ items:
+ - const: ti,am654-serdes-ctrl
+ - const: syscon
+
+ reg:
+ maxItems: 1
+
+ mux-controller:
+ $ref: /schemas/mux/reg-mux.yaml#
+
+required:
+ - compatible
+ - reg
+ - mux-controller
+
+additionalProperties: false
+
+examples:
+ - |
+ clock@4080 {
+ compatible = "ti,am654-serdes-ctrl", "syscon";
+ reg = <0x4080 0x4>;
+
+ mux-controller {
+ compatible = "mmio-mux";
+ #mux-control-cells = <1>;
+ mux-reg-masks = <0x0 0x3>; /* lane select */
+ };
+ };
diff --git a/Documentation/devicetree/bindings/mfd/ti,j721e-system-controller.yaml b/Documentation/devicetree/bindings/soc/ti/ti,j721e-system-controller.yaml
index e6289fbe6907..378e9cc5fac2 100644
--- a/Documentation/devicetree/bindings/mfd/ti,j721e-system-controller.yaml
+++ b/Documentation/devicetree/bindings/soc/ti/ti,j721e-system-controller.yaml
@@ -2,7 +2,7 @@
# Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com/
%YAML 1.2
---
-$id: http://devicetree.org/schemas/mfd/ti,j721e-system-controller.yaml#
+$id: http://devicetree.org/schemas/soc/ti/ti,j721e-system-controller.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: TI J721e System Controller Registers R/W
@@ -19,7 +19,7 @@ description: |
and access the registers directly.
maintainers:
- - Kishon Vijay Abraham I <kishon@ti.com>
+ - Kishon Vijay Abraham I <kishon@kernel.org>
- Roger Quadros <rogerq@kernel.org>
properties:
diff --git a/Documentation/devicetree/bindings/spi/amlogic,a1-spifc.yaml b/Documentation/devicetree/bindings/spi/amlogic,a1-spifc.yaml
index ea47d30eef43..043879b434ac 100644
--- a/Documentation/devicetree/bindings/spi/amlogic,a1-spifc.yaml
+++ b/Documentation/devicetree/bindings/spi/amlogic,a1-spifc.yaml
@@ -23,6 +23,9 @@ properties:
clocks:
maxItems: 1
+ power-domains:
+ maxItems: 1
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/spi/atmel,at91rm9200-spi.yaml b/Documentation/devicetree/bindings/spi/atmel,at91rm9200-spi.yaml
index 32e7c14033c2..d29772994cf5 100644
--- a/Documentation/devicetree/bindings/spi/atmel,at91rm9200-spi.yaml
+++ b/Documentation/devicetree/bindings/spi/atmel,at91rm9200-spi.yaml
@@ -18,10 +18,10 @@ properties:
oneOf:
- const: atmel,at91rm9200-spi
- items:
- - const: microchip,sam9x60-spi
- - const: atmel,at91rm9200-spi
- - items:
- - const: microchip,sam9x7-spi
+ - enum:
+ - microchip,sam9x60-spi
+ - microchip,sam9x7-spi
+ - microchip,sama7d65-spi
- const: atmel,at91rm9200-spi
reg:
diff --git a/Documentation/devicetree/bindings/spi/brcm,bcm2835-spi.txt b/Documentation/devicetree/bindings/spi/brcm,bcm2835-spi.txt
deleted file mode 100644
index 3d55dd64b1be..000000000000
--- a/Documentation/devicetree/bindings/spi/brcm,bcm2835-spi.txt
+++ /dev/null
@@ -1,23 +0,0 @@
-Broadcom BCM2835 SPI0 controller
-
-The BCM2835 contains two forms of SPI master controller, one known simply as
-SPI0, and the other known as the "Universal SPI Master"; part of the
-auxiliary block. This binding applies to the SPI0 controller.
-
-Required properties:
-- compatible: Should be one of "brcm,bcm2835-spi" for BCM2835/2836/2837 or
- "brcm,bcm2711-spi" for BCM2711 or "brcm,bcm7211-spi" for BCM7211.
-- reg: Should contain register location and length.
-- interrupts: Should contain interrupt.
-- clocks: The clock feeding the SPI controller.
-
-Example:
-
-spi@20204000 {
- compatible = "brcm,bcm2835-spi";
- reg = <0x7e204000 0x1000>;
- interrupts = <2 22>;
- clocks = <&clk_spi>;
- #address-cells = <1>;
- #size-cells = <0>;
-};
diff --git a/Documentation/devicetree/bindings/spi/brcm,bcm2835-spi.yaml b/Documentation/devicetree/bindings/spi/brcm,bcm2835-spi.yaml
new file mode 100644
index 000000000000..94da68792194
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/brcm,bcm2835-spi.yaml
@@ -0,0 +1,50 @@
+# SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/spi/brcm,bcm2835-spi.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Broadcom BCM2835 SPI0 controller
+
+maintainers:
+ - Florian Fainelli <florian.fainelli@broadcom.com>
+ - Kanak Shilledar <kanakshilledar111@protonmail.com>
+ - Stefan Wahren <wahrenst@gmx.net>
+
+allOf:
+ - $ref: spi-controller.yaml#
+
+properties:
+ compatible:
+ enum:
+ - brcm,bcm2835-spi
+ - brcm,bcm2711-spi
+ - brcm,bcm7211-spi
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ spi@20204000 {
+ compatible = "brcm,bcm2835-spi";
+ reg = <0x7e204000 0x1000>;
+ interrupts = <2 22>;
+ clocks = <&clk_spi>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
diff --git a/Documentation/devicetree/bindings/spi/fsl,dspi-peripheral-props.yaml b/Documentation/devicetree/bindings/spi/fsl,dspi-peripheral-props.yaml
new file mode 100644
index 000000000000..9b62b75e17a7
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/fsl,dspi-peripheral-props.yaml
@@ -0,0 +1,30 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/spi/fsl,dspi-peripheral-props.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Peripheral-specific properties for Freescale DSPI controller
+
+maintainers:
+ - Vladimir Oltean <olteanv@gmail.com>
+
+description:
+ See spi-peripheral-props.yaml for more info.
+
+properties:
+ fsl,spi-cs-sck-delay:
+ deprecated: true
+ description:
+ Delay in nanoseconds between activating chip select and the start of
+ clock signal, at the start of a transfer.
+ $ref: /schemas/types.yaml#/definitions/uint32
+
+ fsl,spi-sck-cs-delay:
+ deprecated: true
+ description:
+ Delay in nanoseconds between stopping the clock signal and
+ deactivating chip select, at the end of a transfer.
+ $ref: /schemas/types.yaml#/definitions/uint32
+
+additionalProperties: true
diff --git a/Documentation/devicetree/bindings/spi/fsl,dspi.yaml b/Documentation/devicetree/bindings/spi/fsl,dspi.yaml
new file mode 100644
index 000000000000..7ca8fceda717
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/fsl,dspi.yaml
@@ -0,0 +1,116 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/spi/fsl,dspi.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ARM Freescale DSPI controller
+
+maintainers:
+ - Frank Li <Frank.Li@nxp.com>
+
+properties:
+ compatible:
+ oneOf:
+ - enum:
+ - fsl,vf610-dspi
+ - fsl,ls1021a-v1.0-dspi
+ - fsl,ls1012a-dspi
+ - fsl,ls1028a-dspi
+ - fsl,ls1043a-dspi
+ - fsl,ls1046a-dspi
+ - fsl,ls1088a-dspi
+ - fsl,ls2080a-dspi
+ - fsl,ls2085a-dspi
+ - fsl,lx2160a-dspi
+ - items:
+ - enum:
+ - fsl,ls1012a-dspi
+ - fsl,ls1028a-dspi
+ - fsl,ls1043a-dspi
+ - fsl,ls1046a-dspi
+ - fsl,ls1088a-dspi
+ - const: fsl,ls1021a-v1.0-dspi
+ - items:
+ - const: fsl,ls2080a-dspi
+ - const: fsl,ls2085a-dspi
+ - items:
+ - const: fsl,lx2160a-dspi
+ - const: fsl,ls2085a-dspi
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ items:
+ - const: dspi
+
+ dmas:
+ items:
+ - description: DMA controller phandle and request line for TX
+ - description: DMA controller phandle and request line for RX
+
+ dma-names:
+ items:
+ - const: tx
+ - const: rx
+
+ spi-num-chipselects:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ The number of the chip native chipselect signals.
+ cs-gpios don't count against this number.
+
+ big-endian: true
+
+ bus-num:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: SoC-specific identifier for the SPI controller.
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+ - spi-num-chipselects
+
+allOf:
+ - $ref: spi-controller.yaml#
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/clock/vf610-clock.h>
+
+ spi@4002c000 {
+ compatible = "fsl,vf610-dspi";
+ reg = <0x4002c000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks VF610_CLK_DSPI0>;
+ clock-names = "dspi";
+ spi-num-chipselects = <5>;
+ bus-num = <0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_dspi0_1>;
+ big-endian;
+
+ flash@0 {
+ compatible = "jedec,spi-nor";
+ reg = <0>;
+ spi-max-frequency = <16000000>;
+ spi-cpol;
+ spi-cpha;
+ spi-cs-setup-delay-ns = <100>;
+ spi-cs-hold-delay-ns = <50>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/spi/ibm,spi-fsi.yaml b/Documentation/devicetree/bindings/spi/ibm,spi-fsi.yaml
new file mode 100644
index 000000000000..d7fec4c3a801
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/ibm,spi-fsi.yaml
@@ -0,0 +1,55 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/spi/ibm,spi-fsi.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: IBM FSI-attached SPI Controller
+
+maintainers:
+ - Eddie James <eajames@linux.ibm.com>
+
+description:
+ A SPI controller found on IBM Power processors, accessed over FSI from a
+ service processor. This node will always be a child node of an ibm,fsi2spi
+ node.
+
+properties:
+ compatible:
+ enum:
+ - ibm,spi-fsi
+
+ reg:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+
+allOf:
+ - $ref: spi-controller.yaml#
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ fsi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ spi@0 {
+ compatible = "ibm,spi-fsi";
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@0 {
+ compatible = "atmel,at25";
+ reg = <0>;
+ size = <0x80000>;
+ address-width = <24>;
+ pagesize = <256>;
+ spi-max-frequency = <1000000>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/spi/marvell,mmp2-ssp.yaml b/Documentation/devicetree/bindings/spi/marvell,mmp2-ssp.yaml
index 5f4f6b5615d0..0a1bada8f800 100644
--- a/Documentation/devicetree/bindings/spi/marvell,mmp2-ssp.yaml
+++ b/Documentation/devicetree/bindings/spi/marvell,mmp2-ssp.yaml
@@ -10,12 +10,17 @@ title: PXA2xx SSP SPI Controller
maintainers:
- Lubomir Rintel <lkundrak@v3.sk>
-allOf:
- - $ref: spi-controller.yaml#
-
properties:
compatible:
- const: marvell,mmp2-ssp
+ enum:
+ - marvell,mmp2-ssp
+ - mrvl,ce4100-ssp
+ - mvrl,pxa168-ssp
+ - mrvl,pxa25x-ssp
+ - mvrl,pxa25x-nssp
+ - mrvl,pxa27x-ssp
+ - mrvl,pxa3xx-ssp
+ - mrvl,pxa910-ssp
interrupts:
maxItems: 1
@@ -26,6 +31,16 @@ properties:
clocks:
maxItems: 1
+ dmas:
+ items:
+ - description: Receive DMA
+ - description: Transmit DMA
+
+ dma-names:
+ items:
+ - const: rx
+ - const: tx
+
ready-gpios:
description: |
GPIO used to signal a SPI master that the FIFO is filled and we're
@@ -41,6 +56,18 @@ required:
dependencies:
ready-gpios: [ spi-slave ]
+allOf:
+ - $ref: spi-controller.yaml#
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: marvell,mmp2-ssp
+ then:
+ properties:
+ dmas: false
+ dma-names: false
+
unevaluatedProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/spi/microchip,mpfs-spi.yaml b/Documentation/devicetree/bindings/spi/microchip,mpfs-spi.yaml
index 74a817cc7d94..ffa8d1b48f8b 100644
--- a/Documentation/devicetree/bindings/spi/microchip,mpfs-spi.yaml
+++ b/Documentation/devicetree/bindings/spi/microchip,mpfs-spi.yaml
@@ -13,9 +13,6 @@ description:
maintainers:
- Conor Dooley <conor.dooley@microchip.com>
-allOf:
- - $ref: spi-controller.yaml#
-
properties:
compatible:
oneOf:
@@ -43,6 +40,32 @@ required:
- interrupts
- clocks
+allOf:
+ - $ref: spi-controller.yaml#
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: microchip,mpfs-spi
+ then:
+ properties:
+ num-cs:
+ default: 1
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: microchip,mpfs-spi
+ not:
+ required:
+ - cs-gpios
+ then:
+ properties:
+ num-cs:
+ maximum: 1
+
unevaluatedProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/spi/snps,dw-apb-ssi.yaml b/Documentation/devicetree/bindings/spi/snps,dw-apb-ssi.yaml
index fde3776a558b..bccd00a1ddd0 100644
--- a/Documentation/devicetree/bindings/spi/snps,dw-apb-ssi.yaml
+++ b/Documentation/devicetree/bindings/spi/snps,dw-apb-ssi.yaml
@@ -88,6 +88,10 @@ properties:
- renesas,r9a06g032-spi # RZ/N1D
- renesas,r9a06g033-spi # RZ/N1S
- const: renesas,rzn1-spi # RZ/N1
+ - description: T-HEAD TH1520 SoC SPI Controller
+ items:
+ - const: thead,th1520-spi
+ - const: snps,dw-apb-ssi
reg:
minItems: 1
diff --git a/Documentation/devicetree/bindings/spi/spi-cadence.yaml b/Documentation/devicetree/bindings/spi/spi-cadence.yaml
index d4b61b0e8301..8de96abe9da1 100644
--- a/Documentation/devicetree/bindings/spi/spi-cadence.yaml
+++ b/Documentation/devicetree/bindings/spi/spi-cadence.yaml
@@ -55,6 +55,13 @@ properties:
label:
description: Descriptive name of the SPI controller.
+ resets:
+ maxItems: 1
+
+ reset-names:
+ items:
+ - const: spi
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/spi/spi-fsl-dspi.txt b/Documentation/devicetree/bindings/spi/spi-fsl-dspi.txt
deleted file mode 100644
index 30a79da9c039..000000000000
--- a/Documentation/devicetree/bindings/spi/spi-fsl-dspi.txt
+++ /dev/null
@@ -1,65 +0,0 @@
-ARM Freescale DSPI controller
-
-Required properties:
-- compatible : must be one of:
- "fsl,vf610-dspi",
- "fsl,ls1021a-v1.0-dspi",
- "fsl,ls1012a-dspi" (optionally followed by "fsl,ls1021a-v1.0-dspi"),
- "fsl,ls1028a-dspi",
- "fsl,ls1043a-dspi" (optionally followed by "fsl,ls1021a-v1.0-dspi"),
- "fsl,ls1046a-dspi" (optionally followed by "fsl,ls1021a-v1.0-dspi"),
- "fsl,ls1088a-dspi" (optionally followed by "fsl,ls1021a-v1.0-dspi"),
- "fsl,ls2080a-dspi" (optionally followed by "fsl,ls2085a-dspi"),
- "fsl,ls2085a-dspi",
- "fsl,lx2160a-dspi",
-- reg : Offset and length of the register set for the device
-- interrupts : Should contain SPI controller interrupt
-- clocks: from common clock binding: handle to dspi clock.
-- clock-names: from common clock binding: Shall be "dspi".
-- pinctrl-0: pin control group to be used for this controller.
-- pinctrl-names: must contain a "default" entry.
-- spi-num-chipselects : the number of the chipselect signals.
-
-Optional property:
-- big-endian: If present the dspi device's registers are implemented
- in big endian mode.
-- bus-num : the slave chip chipselect signal number.
-
-Optional SPI slave node properties:
-- fsl,spi-cs-sck-delay: a delay in nanoseconds between activating chip
- select and the start of clock signal, at the start of a transfer.
-- fsl,spi-sck-cs-delay: a delay in nanoseconds between stopping the clock
- signal and deactivating chip select, at the end of a transfer.
-
-Example:
-
-dspi0@4002c000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,vf610-dspi";
- reg = <0x4002c000 0x1000>;
- interrupts = <0 67 0x04>;
- clocks = <&clks VF610_CLK_DSPI0>;
- clock-names = "dspi";
- spi-num-chipselects = <5>;
- bus-num = <0>;
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_dspi0_1>;
- big-endian;
-
- sflash: at26df081a@0 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "atmel,at26df081a";
- spi-max-frequency = <16000000>;
- spi-cpol;
- spi-cpha;
- reg = <0>;
- linux,modalias = "m25p80";
- modal = "at26df081a";
- fsl,spi-cs-sck-delay = <100>;
- fsl,spi-sck-cs-delay = <50>;
- };
-};
-
-
diff --git a/Documentation/devicetree/bindings/spi/spi-peripheral-props.yaml b/Documentation/devicetree/bindings/spi/spi-peripheral-props.yaml
index 15938f81fdce..0bb443b8decd 100644
--- a/Documentation/devicetree/bindings/spi/spi-peripheral-props.yaml
+++ b/Documentation/devicetree/bindings/spi/spi-peripheral-props.yaml
@@ -122,6 +122,7 @@ properties:
allOf:
- $ref: arm,pl022-peripheral-props.yaml#
- $ref: cdns,qspi-nor-peripheral-props.yaml#
+ - $ref: fsl,dspi-peripheral-props.yaml#
- $ref: samsung,spi-peripheral-props.yaml#
- $ref: nvidia,tegra210-quad-peripheral-props.yaml#
diff --git a/Documentation/devicetree/bindings/sram/allwinner,sun4i-a10-system-control.yaml b/Documentation/devicetree/bindings/sram/allwinner,sun4i-a10-system-control.yaml
index cf07b8f787a6..d9322704f358 100644
--- a/Documentation/devicetree/bindings/sram/allwinner,sun4i-a10-system-control.yaml
+++ b/Documentation/devicetree/bindings/sram/allwinner,sun4i-a10-system-control.yaml
@@ -56,6 +56,9 @@ properties:
ranges: true
patternProperties:
+ "^regulators@[0-9a-f]+$":
+ $ref: /schemas/regulator/allwinner,sun20i-d1-system-ldos.yaml#
+
"^sram@[a-f0-9]+":
$ref: /schemas/sram/sram.yaml#
unevaluatedProperties: false
@@ -130,3 +133,28 @@ examples:
};
};
};
+
+ - |
+ syscon@3000000 {
+ compatible = "allwinner,sun20i-d1-system-control";
+ reg = <0x3000000 0x1000>;
+ ranges;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ regulators@3000150 {
+ compatible = "allwinner,sun20i-d1-system-ldos";
+ reg = <0x3000150 0x4>;
+
+ reg_ldoa: ldoa {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ reg_ldob: ldob {
+ regulator-name = "vcc-dram";
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <1500000>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/sram/qcom,imem.yaml b/Documentation/devicetree/bindings/sram/qcom,imem.yaml
index 8025a852bc9c..faef3d6e0a94 100644
--- a/Documentation/devicetree/bindings/sram/qcom,imem.yaml
+++ b/Documentation/devicetree/bindings/sram/qcom,imem.yaml
@@ -22,6 +22,7 @@ properties:
- qcom,msm8974-imem
- qcom,qcs404-imem
- qcom,qdu1000-imem
+ - qcom,sa8775p-imem
- qcom,sc7180-imem
- qcom,sc7280-imem
- qcom,sdm630-imem
diff --git a/Documentation/devicetree/bindings/thermal/allwinner,sun8i-a83t-ths.yaml b/Documentation/devicetree/bindings/thermal/allwinner,sun8i-a83t-ths.yaml
index 6b3aea6d73b0..dad8de900495 100644
--- a/Documentation/devicetree/bindings/thermal/allwinner,sun8i-a83t-ths.yaml
+++ b/Documentation/devicetree/bindings/thermal/allwinner,sun8i-a83t-ths.yaml
@@ -10,6 +10,8 @@ maintainers:
- Vasily Khoruzhick <anarsoul@gmail.com>
- Yangtao Li <tiny.windzz@gmail.com>
+$ref: thermal-sensor.yaml#
+
properties:
compatible:
enum:
@@ -55,7 +57,6 @@ properties:
maxItems: 1
description: phandle to device controlling temperate offset SYS_CFG register
- # See Documentation/devicetree/bindings/thermal/thermal-sensor.yaml for details
"#thermal-sensor-cells":
enum:
- 0
@@ -135,9 +136,8 @@ required:
- compatible
- reg
- interrupts
- - '#thermal-sensor-cells'
-additionalProperties: false
+unevaluatedProperties: false
examples:
- |
diff --git a/Documentation/devicetree/bindings/thermal/amlogic,thermal.yaml b/Documentation/devicetree/bindings/thermal/amlogic,thermal.yaml
index 01fccdfc4178..725303e1a364 100644
--- a/Documentation/devicetree/bindings/thermal/amlogic,thermal.yaml
+++ b/Documentation/devicetree/bindings/thermal/amlogic,thermal.yaml
@@ -11,6 +11,8 @@ maintainers:
description: Binding for Amlogic Thermal
+$ref: thermal-sensor.yaml#
+
properties:
compatible:
oneOf:
@@ -44,17 +46,17 @@ required:
- clocks
- amlogic,ao-secure
-additionalProperties: false
+unevaluatedProperties: false
examples:
- |
- cpu_temp: temperature-sensor@ff634800 {
- compatible = "amlogic,g12a-cpu-thermal",
- "amlogic,g12a-thermal";
- reg = <0xff634800 0x50>;
- interrupts = <0x0 0x24 0x0>;
- clocks = <&clk 164>;
- #thermal-sensor-cells = <0>;
- amlogic,ao-secure = <&sec_AO>;
- };
+ temperature-sensor@ff634800 {
+ compatible = "amlogic,g12a-cpu-thermal",
+ "amlogic,g12a-thermal";
+ reg = <0xff634800 0x50>;
+ interrupts = <0x0 0x24 0x0>;
+ clocks = <&clk 164>;
+ #thermal-sensor-cells = <0>;
+ amlogic,ao-secure = <&sec_AO>;
+ };
...
diff --git a/Documentation/devicetree/bindings/thermal/brcm,avs-ro-thermal.yaml b/Documentation/devicetree/bindings/thermal/brcm,avs-ro-thermal.yaml
index 89a2c32c0ab2..29a9844e8b48 100644
--- a/Documentation/devicetree/bindings/thermal/brcm,avs-ro-thermal.yaml
+++ b/Documentation/devicetree/bindings/thermal/brcm,avs-ro-thermal.yaml
@@ -19,30 +19,30 @@ description: |+
Refer to the bindings described in
Documentation/devicetree/bindings/mfd/syscon.yaml
+$ref: thermal-sensor.yaml#
+
properties:
compatible:
const: brcm,bcm2711-thermal
- # See Documentation/devicetree/bindings/thermal/thermal-sensor.yaml for details
"#thermal-sensor-cells":
const: 0
required:
- compatible
- - '#thermal-sensor-cells'
-additionalProperties: false
+unevaluatedProperties: false
examples:
- |
- avs-monitor@7d5d2000 {
- compatible = "brcm,bcm2711-avs-monitor",
- "syscon", "simple-mfd";
- reg = <0x7d5d2000 0xf00>;
-
- thermal: thermal {
- compatible = "brcm,bcm2711-thermal";
- #thermal-sensor-cells = <0>;
- };
+ avs-monitor@7d5d2000 {
+ compatible = "brcm,bcm2711-avs-monitor",
+ "syscon", "simple-mfd";
+ reg = <0x7d5d2000 0xf00>;
+
+ thermal: thermal {
+ compatible = "brcm,bcm2711-thermal";
+ #thermal-sensor-cells = <0>;
};
+ };
...
diff --git a/Documentation/devicetree/bindings/thermal/brcm,avs-tmon.yaml b/Documentation/devicetree/bindings/thermal/brcm,avs-tmon.yaml
index 267a0f423504..081486b44382 100644
--- a/Documentation/devicetree/bindings/thermal/brcm,avs-tmon.yaml
+++ b/Documentation/devicetree/bindings/thermal/brcm,avs-tmon.yaml
@@ -42,15 +42,14 @@ additionalProperties: false
required:
- compatible
- reg
- - "#thermal-sensor-cells"
examples:
- |
- thermal@f04d1500 {
- compatible = "brcm,avs-tmon-bcm7445", "brcm,avs-tmon";
- reg = <0xf04d1500 0x28>;
- interrupts = <0x6>;
- interrupt-names = "tmon";
- interrupt-parent = <&avs_host_l2_intc>;
- #thermal-sensor-cells = <0>;
- };
+ thermal@f04d1500 {
+ compatible = "brcm,avs-tmon-bcm7445", "brcm,avs-tmon";
+ reg = <0xf04d1500 0x28>;
+ interrupts = <0x6>;
+ interrupt-names = "tmon";
+ interrupt-parent = <&avs_host_l2_intc>;
+ #thermal-sensor-cells = <0>;
+ };
diff --git a/Documentation/devicetree/bindings/thermal/brcm,bcm2835-thermal.yaml b/Documentation/devicetree/bindings/thermal/brcm,bcm2835-thermal.yaml
index 2b6026d9fbcf..ddf0f20e5285 100644
--- a/Documentation/devicetree/bindings/thermal/brcm,bcm2835-thermal.yaml
+++ b/Documentation/devicetree/bindings/thermal/brcm,bcm2835-thermal.yaml
@@ -34,7 +34,6 @@ required:
- compatible
- reg
- clocks
- - '#thermal-sensor-cells'
examples:
- |
diff --git a/Documentation/devicetree/bindings/thermal/fsl,scu-thermal.yaml b/Documentation/devicetree/bindings/thermal/fsl,scu-thermal.yaml
index e02d04d4f71e..ceef318668bf 100644
--- a/Documentation/devicetree/bindings/thermal/fsl,scu-thermal.yaml
+++ b/Documentation/devicetree/bindings/thermal/fsl,scu-thermal.yaml
@@ -28,7 +28,6 @@ properties:
required:
- compatible
- - '#thermal-sensor-cells'
additionalProperties: false
diff --git a/Documentation/devicetree/bindings/thermal/generic-adc-thermal.yaml b/Documentation/devicetree/bindings/thermal/generic-adc-thermal.yaml
index f1fc3b0d8608..12e6418dc24d 100644
--- a/Documentation/devicetree/bindings/thermal/generic-adc-thermal.yaml
+++ b/Documentation/devicetree/bindings/thermal/generic-adc-thermal.yaml
@@ -15,6 +15,8 @@ description:
sensor resistor. The voltage read across the sensor is mapped to
temperature using voltage-temperature lookup table.
+$ref: thermal-sensor.yaml#
+
properties:
compatible:
const: generic-adc-thermal
@@ -44,11 +46,10 @@ properties:
required:
- compatible
- - '#thermal-sensor-cells'
- io-channels
- io-channel-names
-additionalProperties: false
+unevaluatedProperties: false
examples:
- |
diff --git a/Documentation/devicetree/bindings/thermal/hisilicon,tsensor.yaml b/Documentation/devicetree/bindings/thermal/hisilicon,tsensor.yaml
new file mode 100644
index 000000000000..11aca2b749d7
--- /dev/null
+++ b/Documentation/devicetree/bindings/thermal/hisilicon,tsensor.yaml
@@ -0,0 +1,57 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/thermal/hisilicon,tsensor.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Temperature Sensor on HiSilicon SoCs
+
+maintainers:
+ - Abdulrasaq Lawani <abdulrasaqolawani@gmail.com>
+
+allOf:
+ - $ref: thermal-sensor.yaml
+
+properties:
+ compatible:
+ enum:
+ - hisilicon,tsensor
+ - hisilicon,hi3660-tsensor
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ items:
+ - const: thermal_clk
+
+ interrupts:
+ maxItems: 1
+
+ '#thermal-sensor-cells':
+ const: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - '#thermal-sensor-cells'
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/hi6220-clock.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ temperature-sensor@f7030700 {
+ compatible = "hisilicon,tsensor";
+ reg = <0xf7030700 0x1000>;
+ interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&sys_ctrl HI6220_TSENSOR_CLK>;
+ clock-names = "thermal_clk";
+ #thermal-sensor-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/thermal/hisilicon-thermal.txt b/Documentation/devicetree/bindings/thermal/hisilicon-thermal.txt
deleted file mode 100644
index 4b19d80e6558..000000000000
--- a/Documentation/devicetree/bindings/thermal/hisilicon-thermal.txt
+++ /dev/null
@@ -1,32 +0,0 @@
-* Temperature Sensor on hisilicon SoCs
-
-** Required properties :
-
-- compatible: "hisilicon,tsensor".
-- reg: physical base address of thermal sensor and length of memory mapped
- region.
-- interrupt: The interrupt number to the cpu. Defines the interrupt used
- by /SOCTHERM/tsensor.
-- clock-names: Input clock name, should be 'thermal_clk'.
-- clocks: phandles for clock specified in "clock-names" property.
-- #thermal-sensor-cells: Should be 1. See Documentation/devicetree/bindings/thermal/thermal-sensor.yaml for a description.
-
-Example :
-
-for Hi6220:
- tsensor: tsensor@0,f7030700 {
- compatible = "hisilicon,tsensor";
- reg = <0x0 0xf7030700 0x0 0x1000>;
- interrupts = <0 7 0x4>;
- clocks = <&sys_ctrl HI6220_TSENSOR_CLK>;
- clock-names = "thermal_clk";
- #thermal-sensor-cells = <1>;
- }
-
-for Hi3660:
- tsensor: tsensor@fff30000 {
- compatible = "hisilicon,hi3660-tsensor";
- reg = <0x0 0xfff30000 0x0 0x1000>;
- interrupts = <GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>;
- #thermal-sensor-cells = <1>;
- };
diff --git a/Documentation/devicetree/bindings/thermal/imx8mm-thermal.yaml b/Documentation/devicetree/bindings/thermal/imx8mm-thermal.yaml
index d2c1e4573c32..e7ddaa6c966e 100644
--- a/Documentation/devicetree/bindings/thermal/imx8mm-thermal.yaml
+++ b/Documentation/devicetree/bindings/thermal/imx8mm-thermal.yaml
@@ -16,6 +16,8 @@ description: |
for i.MX8MM which has ONLY 1 sensor, v2 is for i.MX8MP which has
2 sensors.
+$ref: thermal-sensor.yaml#
+
properties:
compatible:
oneOf:
@@ -51,9 +53,8 @@ required:
- compatible
- reg
- clocks
- - '#thermal-sensor-cells'
-additionalProperties: false
+unevaluatedProperties: false
examples:
- |
diff --git a/Documentation/devicetree/bindings/thermal/loongson,ls2k-thermal.yaml b/Documentation/devicetree/bindings/thermal/loongson,ls2k-thermal.yaml
index ca81c8afba79..79e691b08341 100644
--- a/Documentation/devicetree/bindings/thermal/loongson,ls2k-thermal.yaml
+++ b/Documentation/devicetree/bindings/thermal/loongson,ls2k-thermal.yaml
@@ -38,7 +38,6 @@ required:
- compatible
- reg
- interrupts
- - '#thermal-sensor-cells'
if:
properties:
diff --git a/Documentation/devicetree/bindings/thermal/mediatek,lvts-thermal.yaml b/Documentation/devicetree/bindings/thermal/mediatek,lvts-thermal.yaml
index 331cf4e662e3..0259cd3ce9c5 100644
--- a/Documentation/devicetree/bindings/thermal/mediatek,lvts-thermal.yaml
+++ b/Documentation/devicetree/bindings/thermal/mediatek,lvts-thermal.yaml
@@ -99,7 +99,6 @@ required:
- resets
- nvmem-cells
- nvmem-cell-names
- - "#thermal-sensor-cells"
additionalProperties: false
diff --git a/Documentation/devicetree/bindings/thermal/nvidia,tegra124-soctherm.yaml b/Documentation/devicetree/bindings/thermal/nvidia,tegra124-soctherm.yaml
index b0237d236021..19bb1f324183 100644
--- a/Documentation/devicetree/bindings/thermal/nvidia,tegra124-soctherm.yaml
+++ b/Documentation/devicetree/bindings/thermal/nvidia,tegra124-soctherm.yaml
@@ -197,7 +197,6 @@ required:
- clock-names
- resets
- reset-names
- - "#thermal-sensor-cells"
allOf:
- $ref: thermal-sensor.yaml
diff --git a/Documentation/devicetree/bindings/thermal/nvidia,tegra186-bpmp-thermal.yaml b/Documentation/devicetree/bindings/thermal/nvidia,tegra186-bpmp-thermal.yaml
index c91fd07e4061..978b9e6ab8a3 100644
--- a/Documentation/devicetree/bindings/thermal/nvidia,tegra186-bpmp-thermal.yaml
+++ b/Documentation/devicetree/bindings/thermal/nvidia,tegra186-bpmp-thermal.yaml
@@ -20,11 +20,7 @@ description: |
node. See ../firmware/nvidia,tegra186-bpmp.yaml for details of the
BPMP binding.
- This node represents a thermal sensor. See
-
- Documentation/devicetree/bindings/thermal/thermal-sensor.yaml
-
- for details of the core thermal binding.
+$ref: thermal-sensor.yaml#
properties:
compatible:
@@ -33,10 +29,6 @@ properties:
- nvidia,tegra194-bpmp-thermal
'#thermal-sensor-cells':
- $ref: /schemas/types.yaml#/definitions/uint32
- description: Number of cells needed in the phandle specifier to
- identify a given sensor. Must be 1 and the single cell specifies
- the sensor index.
const: 1
-additionalProperties: false
+unevaluatedProperties: false
diff --git a/Documentation/devicetree/bindings/thermal/nvidia,tegra30-tsensor.yaml b/Documentation/devicetree/bindings/thermal/nvidia,tegra30-tsensor.yaml
index a35da257b070..63a29a1f7fe6 100644
--- a/Documentation/devicetree/bindings/thermal/nvidia,tegra30-tsensor.yaml
+++ b/Documentation/devicetree/bindings/thermal/nvidia,tegra30-tsensor.yaml
@@ -27,6 +27,8 @@ description: |
TSENSOR has two channels which monitor two different spots of the SoC.
+$ref: thermal-sensor.yaml#
+
properties:
compatible:
const: nvidia,tegra30-tsensor
@@ -46,19 +48,14 @@ properties:
"#thermal-sensor-cells":
const: 1
- assigned-clock-parents: true
- assigned-clock-rates: true
- assigned-clocks: true
-
required:
- compatible
- reg
- clocks
- resets
- interrupts
- - "#thermal-sensor-cells"
-additionalProperties: false
+unevaluatedProperties: false
examples:
- |
diff --git a/Documentation/devicetree/bindings/thermal/qcom,spmi-temp-alarm.yaml b/Documentation/devicetree/bindings/thermal/qcom,spmi-temp-alarm.yaml
index 5f08b6e59b8a..30b22151aa82 100644
--- a/Documentation/devicetree/bindings/thermal/qcom,spmi-temp-alarm.yaml
+++ b/Documentation/devicetree/bindings/thermal/qcom,spmi-temp-alarm.yaml
@@ -42,7 +42,6 @@ required:
- compatible
- reg
- interrupts
- - '#thermal-sensor-cells'
additionalProperties: false
diff --git a/Documentation/devicetree/bindings/thermal/qcom-spmi-adc-tm-hc.yaml b/Documentation/devicetree/bindings/thermal/qcom-spmi-adc-tm-hc.yaml
index 7541e27704ca..bfad8130a042 100644
--- a/Documentation/devicetree/bindings/thermal/qcom-spmi-adc-tm-hc.yaml
+++ b/Documentation/devicetree/bindings/thermal/qcom-spmi-adc-tm-hc.yaml
@@ -8,6 +8,8 @@ title: Qualcomm's SPMI PMIC ADC HC Thermal Monitoring
maintainers:
- Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
+$ref: thermal-sensor.yaml#
+
properties:
compatible:
const: qcom,spmi-adc-tm-hc
@@ -20,9 +22,6 @@ properties:
"#thermal-sensor-cells":
const: 1
- description:
- Number of cells required to uniquely identify the thermal sensors. Since
- we have multiple sensors this is set to 1
"#address-cells":
const: 1
@@ -106,9 +105,8 @@ required:
- interrupts
- "#address-cells"
- "#size-cells"
- - "#thermal-sensor-cells"
-additionalProperties: false
+unevaluatedProperties: false
examples:
- |
diff --git a/Documentation/devicetree/bindings/thermal/qcom-spmi-adc-tm5.yaml b/Documentation/devicetree/bindings/thermal/qcom-spmi-adc-tm5.yaml
index d9d2657287cb..4470a5942fb2 100644
--- a/Documentation/devicetree/bindings/thermal/qcom-spmi-adc-tm5.yaml
+++ b/Documentation/devicetree/bindings/thermal/qcom-spmi-adc-tm5.yaml
@@ -8,6 +8,8 @@ title: Qualcomm's SPMI PMIC ADC Thermal Monitoring
maintainers:
- Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
+$ref: thermal-sensor.yaml#
+
properties:
compatible:
enum:
@@ -23,9 +25,6 @@ properties:
"#thermal-sensor-cells":
const: 1
- description:
- Number of cells required to uniquely identify the thermal sensors. Since
- we have multiple sensors this is set to 1
"#address-cells":
const: 1
@@ -159,9 +158,8 @@ required:
- interrupts
- "#address-cells"
- "#size-cells"
- - "#thermal-sensor-cells"
-additionalProperties: false
+unevaluatedProperties: false
examples:
- |
diff --git a/Documentation/devicetree/bindings/thermal/qcom-tsens.yaml b/Documentation/devicetree/bindings/thermal/qcom-tsens.yaml
index 99d9c526c0b6..72048c5a0412 100644
--- a/Documentation/devicetree/bindings/thermal/qcom-tsens.yaml
+++ b/Documentation/devicetree/bindings/thermal/qcom-tsens.yaml
@@ -67,6 +67,7 @@ properties:
- qcom,sm8450-tsens
- qcom,sm8550-tsens
- qcom,sm8650-tsens
+ - qcom,x1e80100-tsens
- const: qcom,tsens-v2
- description: v2 of TSENS with combined interrupt
@@ -217,18 +218,16 @@ properties:
"#thermal-sensor-cells":
const: 1
- description:
- Number of cells required to uniquely identify the thermal sensors. Since
- we have multiple sensors this is set to 1
required:
- compatible
- interrupts
- interrupt-names
- - "#thermal-sensor-cells"
- "#qcom,sensors"
allOf:
+ - $ref: thermal-sensor.yaml#
+
- if:
properties:
compatible:
@@ -292,27 +291,21 @@ allOf:
required:
- reg
-additionalProperties: false
+unevaluatedProperties: false
examples:
- |
#include <dt-bindings/interrupt-controller/arm-gic.h>
- // Example msm9860 based SoC (ipq8064):
- gcc: clock-controller {
-
- /* ... */
+ thermal-sensor {
+ compatible = "qcom,ipq8064-tsens";
- tsens: thermal-sensor {
- compatible = "qcom,ipq8064-tsens";
-
- nvmem-cells = <&tsens_calib>, <&tsens_calib_backup>;
- nvmem-cell-names = "calib", "calib_backup";
- interrupts = <GIC_SPI 178 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "uplow";
+ nvmem-cells = <&tsens_calib>, <&tsens_calib_backup>;
+ nvmem-cell-names = "calib", "calib_backup";
+ interrupts = <GIC_SPI 178 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "uplow";
- #qcom,sensors = <11>;
- #thermal-sensor-cells = <1>;
- };
+ #qcom,sensors = <11>;
+ #thermal-sensor-cells = <1>;
};
- |
@@ -349,66 +342,66 @@ examples:
#include <dt-bindings/interrupt-controller/arm-gic.h>
// Example 1 (legacy: for pre v1 IP):
tsens1: thermal-sensor@4a9000 {
- compatible = "qcom,msm8916-tsens", "qcom,tsens-v0_1";
- reg = <0x4a9000 0x1000>, /* TM */
- <0x4a8000 0x1000>; /* SROT */
+ compatible = "qcom,msm8916-tsens", "qcom,tsens-v0_1";
+ reg = <0x4a9000 0x1000>, /* TM */
+ <0x4a8000 0x1000>; /* SROT */
- nvmem-cells = <&tsens_caldata>, <&tsens_calsel>;
- nvmem-cell-names = "calib", "calib_sel";
+ nvmem-cells = <&tsens_caldata>, <&tsens_calsel>;
+ nvmem-cell-names = "calib", "calib_sel";
- interrupts = <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "uplow";
+ interrupts = <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "uplow";
- #qcom,sensors = <5>;
- #thermal-sensor-cells = <1>;
+ #qcom,sensors = <5>;
+ #thermal-sensor-cells = <1>;
};
- |
#include <dt-bindings/interrupt-controller/arm-gic.h>
// Example 2 (for any platform containing v1 of the TSENS IP):
tsens2: thermal-sensor@4a9000 {
- compatible = "qcom,qcs404-tsens", "qcom,tsens-v1";
- reg = <0x004a9000 0x1000>, /* TM */
- <0x004a8000 0x1000>; /* SROT */
+ compatible = "qcom,qcs404-tsens", "qcom,tsens-v1";
+ reg = <0x004a9000 0x1000>, /* TM */
+ <0x004a8000 0x1000>; /* SROT */
- nvmem-cells = <&tsens_caldata>;
- nvmem-cell-names = "calib";
+ nvmem-cells = <&tsens_caldata>;
+ nvmem-cell-names = "calib";
- interrupts = <GIC_SPI 506 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "uplow";
+ interrupts = <GIC_SPI 506 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "uplow";
- #qcom,sensors = <10>;
- #thermal-sensor-cells = <1>;
+ #qcom,sensors = <10>;
+ #thermal-sensor-cells = <1>;
};
- |
#include <dt-bindings/interrupt-controller/arm-gic.h>
// Example 3 (for any platform containing v2 of the TSENS IP):
tsens3: thermal-sensor@c263000 {
- compatible = "qcom,sdm845-tsens", "qcom,tsens-v2";
- reg = <0xc263000 0x1ff>,
- <0xc222000 0x1ff>;
+ compatible = "qcom,sdm845-tsens", "qcom,tsens-v2";
+ reg = <0xc263000 0x1ff>,
+ <0xc222000 0x1ff>;
- interrupts = <GIC_SPI 506 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 508 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "uplow", "critical";
+ interrupts = <GIC_SPI 506 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 508 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "uplow", "critical";
- #qcom,sensors = <13>;
- #thermal-sensor-cells = <1>;
+ #qcom,sensors = <13>;
+ #thermal-sensor-cells = <1>;
};
- |
#include <dt-bindings/interrupt-controller/arm-gic.h>
// Example 4 (for any IPQ8074 based SoC-s):
tsens4: thermal-sensor@4a9000 {
- compatible = "qcom,ipq8074-tsens";
- reg = <0x4a9000 0x1000>,
- <0x4a8000 0x1000>;
+ compatible = "qcom,ipq8074-tsens";
+ reg = <0x4a9000 0x1000>,
+ <0x4a8000 0x1000>;
- interrupts = <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "combined";
+ interrupts = <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "combined";
- #qcom,sensors = <16>;
- #thermal-sensor-cells = <1>;
+ #qcom,sensors = <16>;
+ #thermal-sensor-cells = <1>;
};
...
diff --git a/Documentation/devicetree/bindings/thermal/qoriq-thermal.yaml b/Documentation/devicetree/bindings/thermal/qoriq-thermal.yaml
index d155d6799da6..1876fe9555d6 100644
--- a/Documentation/devicetree/bindings/thermal/qoriq-thermal.yaml
+++ b/Documentation/devicetree/bindings/thermal/qoriq-thermal.yaml
@@ -9,6 +9,8 @@ title: Thermal Monitoring Unit (TMU) on Freescale QorIQ SoCs
maintainers:
- Anson Huang <Anson.Huang@nxp.com>
+$ref: thermal-sensor.yaml#
+
properties:
compatible:
description: |
@@ -68,9 +70,8 @@ required:
- interrupts
- fsl,tmu-range
- fsl,tmu-calibration
- - '#thermal-sensor-cells'
-additionalProperties: false
+unevaluatedProperties: false
examples:
- |
diff --git a/Documentation/devicetree/bindings/thermal/rcar-gen3-thermal.yaml b/Documentation/devicetree/bindings/thermal/rcar-gen3-thermal.yaml
index 6a81cb6e11bc..b6657d64cf3d 100644
--- a/Documentation/devicetree/bindings/thermal/rcar-gen3-thermal.yaml
+++ b/Documentation/devicetree/bindings/thermal/rcar-gen3-thermal.yaml
@@ -15,6 +15,8 @@ description:
maintainers:
- Niklas Söderlund <niklas.soderlund@ragnatech.se>
+$ref: thermal-sensor.yaml#
+
properties:
compatible:
enum:
@@ -57,7 +59,6 @@ required:
- clocks
- power-domains
- resets
- - "#thermal-sensor-cells"
if:
properties:
@@ -96,7 +97,7 @@ else:
required:
- interrupts
-additionalProperties: false
+unevaluatedProperties: false
examples:
- |
@@ -105,33 +106,33 @@ examples:
#include <dt-bindings/power/r8a7795-sysc.h>
tsc: thermal@e6198000 {
- compatible = "renesas,r8a7795-thermal";
- reg = <0xe6198000 0x100>,
- <0xe61a0000 0x100>,
- <0xe61a8000 0x100>;
- interrupts = <GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&cpg CPG_MOD 522>;
- power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
- resets = <&cpg 522>;
- #thermal-sensor-cells = <1>;
+ compatible = "renesas,r8a7795-thermal";
+ reg = <0xe6198000 0x100>,
+ <0xe61a0000 0x100>,
+ <0xe61a8000 0x100>;
+ interrupts = <GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 522>;
+ power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ resets = <&cpg 522>;
+ #thermal-sensor-cells = <1>;
};
thermal-zones {
- sensor_thermal: sensor-thermal {
- polling-delay-passive = <250>;
- polling-delay = <1000>;
- thermal-sensors = <&tsc 0>;
-
- trips {
- sensor1_crit: sensor1-crit {
- temperature = <90000>;
- hysteresis = <2000>;
- type = "critical";
- };
- };
+ sensor_thermal: sensor-thermal {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+ thermal-sensors = <&tsc 0>;
+
+ trips {
+ sensor1_crit: sensor1-crit {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
};
+ };
};
- |
#include <dt-bindings/clock/r8a779a0-cpg-mssr.h>
@@ -139,14 +140,14 @@ examples:
#include <dt-bindings/power/r8a779a0-sysc.h>
tsc_r8a779a0: thermal@e6190000 {
- compatible = "renesas,r8a779a0-thermal";
- reg = <0xe6190000 0x200>,
- <0xe6198000 0x200>,
- <0xe61a0000 0x200>,
- <0xe61a8000 0x200>,
- <0xe61b0000 0x200>;
- clocks = <&cpg CPG_MOD 919>;
- power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>;
- resets = <&cpg 919>;
- #thermal-sensor-cells = <1>;
+ compatible = "renesas,r8a779a0-thermal";
+ reg = <0xe6190000 0x200>,
+ <0xe6198000 0x200>,
+ <0xe61a0000 0x200>,
+ <0xe61a8000 0x200>,
+ <0xe61b0000 0x200>;
+ clocks = <&cpg CPG_MOD 919>;
+ power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>;
+ resets = <&cpg 919>;
+ #thermal-sensor-cells = <1>;
};
diff --git a/Documentation/devicetree/bindings/thermal/rcar-thermal.yaml b/Documentation/devicetree/bindings/thermal/rcar-thermal.yaml
index 119998d10ff4..221a58d18cad 100644
--- a/Documentation/devicetree/bindings/thermal/rcar-thermal.yaml
+++ b/Documentation/devicetree/bindings/thermal/rcar-thermal.yaml
@@ -98,8 +98,8 @@ examples:
# Example (non interrupt support)
- |
thermal@ffc48000 {
- compatible = "renesas,thermal-r8a7779", "renesas,rcar-thermal";
- reg = <0xffc48000 0x38>;
+ compatible = "renesas,thermal-r8a7779", "renesas,rcar-thermal";
+ reg = <0xffc48000 0x38>;
};
# Example (interrupt support)
@@ -109,12 +109,12 @@ examples:
#include <dt-bindings/interrupt-controller/irq.h>
thermal@e61f0000 {
- compatible = "renesas,thermal-r8a73a4", "renesas,rcar-thermal";
- reg = <0xe61f0000 0x14>, <0xe61f0100 0x38>,
- <0xe61f0200 0x38>, <0xe61f0300 0x38>;
- interrupts = <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&mstp5_clks R8A73A4_CLK_THERMAL>;
- power-domains = <&pd_c5>;
+ compatible = "renesas,thermal-r8a73a4", "renesas,rcar-thermal";
+ reg = <0xe61f0000 0x14>, <0xe61f0100 0x38>,
+ <0xe61f0200 0x38>, <0xe61f0300 0x38>;
+ interrupts = <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp5_clks R8A73A4_CLK_THERMAL>;
+ power-domains = <&pd_c5>;
};
# Example (with thermal-zone)
@@ -124,32 +124,32 @@ examples:
#include <dt-bindings/power/r8a7790-sysc.h>
thermal: thermal@e61f0000 {
- compatible = "renesas,thermal-r8a7790",
- "renesas,rcar-gen2-thermal",
- "renesas,rcar-thermal";
- reg = <0xe61f0000 0x10>, <0xe61f0100 0x38>;
- interrupts = <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&cpg CPG_MOD 522>;
- power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
- resets = <&cpg 522>;
- #thermal-sensor-cells = <0>;
+ compatible = "renesas,thermal-r8a7790",
+ "renesas,rcar-gen2-thermal",
+ "renesas,rcar-thermal";
+ reg = <0xe61f0000 0x10>, <0xe61f0100 0x38>;
+ interrupts = <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 522>;
+ power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
+ resets = <&cpg 522>;
+ #thermal-sensor-cells = <0>;
};
thermal-zones {
- cpu_thermal: cpu-thermal {
- polling-delay-passive = <1000>;
- polling-delay = <5000>;
-
- thermal-sensors = <&thermal>;
-
- trips {
- cpu-crit {
- temperature = <115000>;
- hysteresis = <0>;
- type = "critical";
- };
- };
- cooling-maps {
- };
+ cpu_thermal: cpu-thermal {
+ polling-delay-passive = <1000>;
+ polling-delay = <5000>;
+
+ thermal-sensors = <&thermal>;
+
+ trips {
+ cpu-crit {
+ temperature = <115000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
};
+ cooling-maps {
+ };
+ };
};
diff --git a/Documentation/devicetree/bindings/thermal/rockchip-thermal.yaml b/Documentation/devicetree/bindings/thermal/rockchip-thermal.yaml
index 55f8ec0bec01..b717ea8261ca 100644
--- a/Documentation/devicetree/bindings/thermal/rockchip-thermal.yaml
+++ b/Documentation/devicetree/bindings/thermal/rockchip-thermal.yaml
@@ -9,6 +9,8 @@ title: Temperature Sensor ADC (TSADC) on Rockchip SoCs
maintainers:
- Heiko Stuebner <heiko@sntech.de>
+$ref: thermal-sensor.yaml#
+
properties:
compatible:
enum:
@@ -76,9 +78,8 @@ required:
- clocks
- clock-names
- resets
- - "#thermal-sensor-cells"
-additionalProperties: false
+unevaluatedProperties: false
examples:
- |
diff --git a/Documentation/devicetree/bindings/thermal/rzg2l-thermal.yaml b/Documentation/devicetree/bindings/thermal/rzg2l-thermal.yaml
index 03f4b926e53c..136589f5adee 100644
--- a/Documentation/devicetree/bindings/thermal/rzg2l-thermal.yaml
+++ b/Documentation/devicetree/bindings/thermal/rzg2l-thermal.yaml
@@ -13,6 +13,8 @@ description:
maintainers:
- Biju Das <biju.das.jz@bp.renesas.com>
+$ref: thermal-sensor.yaml#
+
properties:
compatible:
items:
@@ -43,36 +45,35 @@ required:
- clocks
- power-domains
- resets
- - "#thermal-sensor-cells"
-additionalProperties: false
+unevaluatedProperties: false
examples:
- |
#include <dt-bindings/clock/r9a07g044-cpg.h>
tsu: thermal@10059400 {
- compatible = "renesas,r9a07g044-tsu",
- "renesas,rzg2l-tsu";
- reg = <0x10059400 0x400>;
- clocks = <&cpg CPG_MOD R9A07G044_TSU_PCLK>;
- resets = <&cpg R9A07G044_TSU_PRESETN>;
- power-domains = <&cpg>;
- #thermal-sensor-cells = <1>;
+ compatible = "renesas,r9a07g044-tsu",
+ "renesas,rzg2l-tsu";
+ reg = <0x10059400 0x400>;
+ clocks = <&cpg CPG_MOD R9A07G044_TSU_PCLK>;
+ resets = <&cpg R9A07G044_TSU_PRESETN>;
+ power-domains = <&cpg>;
+ #thermal-sensor-cells = <1>;
};
thermal-zones {
- cpu-thermal {
- polling-delay-passive = <250>;
- polling-delay = <1000>;
- thermal-sensors = <&tsu 0>;
-
- trips {
- sensor_crit: sensor-crit {
- temperature = <125000>;
- hysteresis = <1000>;
- type = "critical";
- };
- };
+ cpu-thermal {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+ thermal-sensors = <&tsu 0>;
+
+ trips {
+ sensor_crit: sensor-crit {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "critical";
+ };
};
+ };
};
diff --git a/Documentation/devicetree/bindings/thermal/samsung,exynos-thermal.yaml b/Documentation/devicetree/bindings/thermal/samsung,exynos-thermal.yaml
index 1344df708e2d..29a08b0729ee 100644
--- a/Documentation/devicetree/bindings/thermal/samsung,exynos-thermal.yaml
+++ b/Documentation/devicetree/bindings/thermal/samsung,exynos-thermal.yaml
@@ -61,7 +61,8 @@ properties:
TRIMINFO at 0x10068000 contains data for TMU channel 2
minItems: 1
- '#thermal-sensor-cells': true
+ '#thermal-sensor-cells':
+ const: 0
vtmu-supply:
description: The regulator node supplying voltage to TMU.
diff --git a/Documentation/devicetree/bindings/thermal/socionext,uniphier-thermal.yaml b/Documentation/devicetree/bindings/thermal/socionext,uniphier-thermal.yaml
index 6f975821fa5e..8210b7079721 100644
--- a/Documentation/devicetree/bindings/thermal/socionext,uniphier-thermal.yaml
+++ b/Documentation/devicetree/bindings/thermal/socionext,uniphier-thermal.yaml
@@ -14,6 +14,8 @@ description: |
maintainers:
- Kunihiko Hayashi <hayashi.kunihiko@socionext.com>
+$ref: thermal-sensor.yaml#
+
properties:
compatible:
enum:
@@ -38,9 +40,8 @@ properties:
required:
- compatible
- interrupts
- - "#thermal-sensor-cells"
-additionalProperties: false
+unevaluatedProperties: false
examples:
- |
diff --git a/Documentation/devicetree/bindings/thermal/sprd-thermal.yaml b/Documentation/devicetree/bindings/thermal/sprd-thermal.yaml
index 76aaa004c8ac..afa551f6185f 100644
--- a/Documentation/devicetree/bindings/thermal/sprd-thermal.yaml
+++ b/Documentation/devicetree/bindings/thermal/sprd-thermal.yaml
@@ -11,6 +11,8 @@ maintainers:
- Baolin Wang <baolin.wang7@gmail.com>
- Chunyan Zhang <zhang.lyra@gmail.com>
+$ref: thermal-sensor.yaml#
+
properties:
compatible:
const: sprd,ums512-thermal
@@ -77,35 +79,34 @@ required:
- clock-names
- nvmem-cells
- nvmem-cell-names
- - "#thermal-sensor-cells"
- "#address-cells"
- "#size-cells"
-additionalProperties: false
+unevaluatedProperties: false
examples:
- |
- ap_thm0: thermal@32200000 {
- compatible = "sprd,ums512-thermal";
- reg = <0x32200000 0x10000>;
- clock-names = "enable";
- clocks = <&aonapb_gate 32>;
- #thermal-sensor-cells = <1>;
- nvmem-cells = <&thm0_sign>, <&thm0_ratio>;
- nvmem-cell-names = "thm_sign_cal", "thm_ratio_cal";
- #address-cells = <1>;
- #size-cells = <0>;
-
- prometheus-sensor@0 {
- reg = <0>;
- nvmem-cells = <&thm0_sen0>;
- nvmem-cell-names = "sen_delta_cal";
- };
-
- ank-sensor@1 {
- reg = <1>;
- nvmem-cells = <&thm0_sen1>;
- nvmem-cell-names = "sen_delta_cal";
- };
+ thermal@32200000 {
+ compatible = "sprd,ums512-thermal";
+ reg = <0x32200000 0x10000>;
+ clock-names = "enable";
+ clocks = <&aonapb_gate 32>;
+ #thermal-sensor-cells = <1>;
+ nvmem-cells = <&thm0_sign>, <&thm0_ratio>;
+ nvmem-cell-names = "thm_sign_cal", "thm_ratio_cal";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ prometheus-sensor@0 {
+ reg = <0>;
+ nvmem-cells = <&thm0_sen0>;
+ nvmem-cell-names = "sen_delta_cal";
+ };
+
+ ank-sensor@1 {
+ reg = <1>;
+ nvmem-cells = <&thm0_sen1>;
+ nvmem-cell-names = "sen_delta_cal";
};
+ };
...
diff --git a/Documentation/devicetree/bindings/thermal/st,stm32-thermal.yaml b/Documentation/devicetree/bindings/thermal/st,stm32-thermal.yaml
index ab043084f667..1c01a80a0cdd 100644
--- a/Documentation/devicetree/bindings/thermal/st,stm32-thermal.yaml
+++ b/Documentation/devicetree/bindings/thermal/st,stm32-thermal.yaml
@@ -9,6 +9,8 @@ title: STMicroelectronics STM32 digital thermal sensor (DTS)
maintainers:
- Pascal Paillet <p.paillet@foss.st.com>
+$ref: thermal-sensor.yaml#
+
properties:
compatible:
const: st,stm32-thermal
@@ -30,14 +32,13 @@ properties:
const: 0
required:
- - "#thermal-sensor-cells"
- compatible
- reg
- interrupts
- clocks
- clock-names
-additionalProperties: false
+unevaluatedProperties: false
examples:
- |
diff --git a/Documentation/devicetree/bindings/thermal/thermal-zones.yaml b/Documentation/devicetree/bindings/thermal/thermal-zones.yaml
index 68398e7e8655..0f435be1dbd8 100644
--- a/Documentation/devicetree/bindings/thermal/thermal-zones.yaml
+++ b/Documentation/devicetree/bindings/thermal/thermal-zones.yaml
@@ -49,7 +49,10 @@ properties:
to take when the temperature crosses those thresholds.
patternProperties:
- "^[a-zA-Z][a-zA-Z0-9\\-]{1,12}-thermal$":
+ # Node name is limited in size due to Linux kernel requirements - 19
+ # characters in total (see THERMAL_NAME_LENGTH, including terminating NUL
+ # byte):
+ "^[a-zA-Z][a-zA-Z0-9\\-]{1,10}-thermal$":
type: object
description:
Each thermal zone node contains information about how frequently it
@@ -229,7 +232,6 @@ patternProperties:
required:
- thermal-sensors
- - trips
additionalProperties: false
diff --git a/Documentation/devicetree/bindings/thermal/ti,am654-thermal.yaml b/Documentation/devicetree/bindings/thermal/ti,am654-thermal.yaml
index 7ed0abe9290f..c123d9070525 100644
--- a/Documentation/devicetree/bindings/thermal/ti,am654-thermal.yaml
+++ b/Documentation/devicetree/bindings/thermal/ti,am654-thermal.yaml
@@ -9,6 +9,8 @@ title: Texas Instruments AM654 VTM (DTS)
maintainers:
- Keerthy <j-keerthy@ti.com>
+$ref: thermal-sensor.yaml#
+
properties:
compatible:
const: ti,am654-vtm
@@ -26,9 +28,8 @@ required:
- compatible
- reg
- power-domains
- - "#thermal-sensor-cells"
-additionalProperties: false
+unevaluatedProperties: false
examples:
- |
@@ -46,11 +47,11 @@ examples:
thermal-sensors = <&vtm0 0>;
trips {
- mpu0_crit: mpu0_crit {
- temperature = <125000>; /* milliCelsius */
- hysteresis = <2000>; /* milliCelsius */
- type = "critical";
- };
+ mpu0_crit: mpu0_crit {
+ temperature = <125000>; /* milliCelsius */
+ hysteresis = <2000>; /* milliCelsius */
+ type = "critical";
+ };
};
};
...
diff --git a/Documentation/devicetree/bindings/thermal/ti,j72xx-thermal.yaml b/Documentation/devicetree/bindings/thermal/ti,j72xx-thermal.yaml
index 171b3622ed84..82b77b9795a3 100644
--- a/Documentation/devicetree/bindings/thermal/ti,j72xx-thermal.yaml
+++ b/Documentation/devicetree/bindings/thermal/ti,j72xx-thermal.yaml
@@ -22,6 +22,8 @@ description: |
Temp(C) = (-9.2627e-12) * x^4 + (6.0373e-08) * x^3 + \
(-1.7058e-04) * x^2 + (3.2512e-01) * x + (-4.9003e+01)
+$ref: thermal-sensor.yaml#
+
properties:
compatible:
enum:
@@ -64,9 +66,8 @@ required:
- compatible
- reg
- power-domains
- - "#thermal-sensor-cells"
-additionalProperties: false
+unevaluatedProperties: false
examples:
- |
diff --git a/Documentation/devicetree/bindings/timer/realtek,otto-timer.yaml b/Documentation/devicetree/bindings/timer/realtek,otto-timer.yaml
new file mode 100644
index 000000000000..7b6ec2c69484
--- /dev/null
+++ b/Documentation/devicetree/bindings/timer/realtek,otto-timer.yaml
@@ -0,0 +1,63 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/timer/realtek,otto-timer.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Realtek Otto SoCs Timer/Counter
+
+description:
+ Realtek SoCs support a number of timers/counters. These are used
+ as a per CPU clock event generator and an overall CPU clocksource.
+
+maintainers:
+ - Chris Packham <chris.packham@alliedtelesis.co.nz>
+
+properties:
+ $nodename:
+ pattern: "^timer@[0-9a-f]+$"
+
+ compatible:
+ items:
+ - enum:
+ - realtek,rtl9302-timer
+ - const: realtek,otto-timer
+
+ reg:
+ items:
+ - description: timer0 registers
+ - description: timer1 registers
+ - description: timer2 registers
+ - description: timer3 registers
+ - description: timer4 registers
+
+ clocks:
+ maxItems: 1
+
+ interrupts:
+ items:
+ - description: timer0 interrupt
+ - description: timer1 interrupt
+ - description: timer2 interrupt
+ - description: timer3 interrupt
+ - description: timer4 interrupt
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - interrupts
+
+additionalProperties: false
+
+examples:
+ - |
+ timer@3200 {
+ compatible = "realtek,rtl9302-timer", "realtek,otto-timer";
+ reg = <0x3200 0x10>, <0x3210 0x10>, <0x3220 0x10>,
+ <0x3230 0x10>, <0x3240 0x10>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <7>, <8>, <9>, <10>, <11>;
+ clocks = <&lx_clk>;
+ };
diff --git a/Documentation/devicetree/bindings/timer/renesas,tmu.yaml b/Documentation/devicetree/bindings/timer/renesas,tmu.yaml
index 360a5cf1ae9c..b6dd98d956f3 100644
--- a/Documentation/devicetree/bindings/timer/renesas,tmu.yaml
+++ b/Documentation/devicetree/bindings/timer/renesas,tmu.yaml
@@ -21,13 +21,24 @@ properties:
compatible:
items:
- enum:
+ - renesas,tmu-r8a73a4 # R-Mobile APE6
- renesas,tmu-r8a7740 # R-Mobile A1
+ - renesas,tmu-r8a7742 # RZ/G1H
+ - renesas,tmu-r8a7743 # RZ/G1M
+ - renesas,tmu-r8a7744 # RZ/G1N
+ - renesas,tmu-r8a7745 # RZ/G1E
+ - renesas,tmu-r8a77470 # RZ/G1C
- renesas,tmu-r8a774a1 # RZ/G2M
- renesas,tmu-r8a774b1 # RZ/G2N
- renesas,tmu-r8a774c0 # RZ/G2E
- renesas,tmu-r8a774e1 # RZ/G2H
- renesas,tmu-r8a7778 # R-Car M1A
- renesas,tmu-r8a7779 # R-Car H1
+ - renesas,tmu-r8a7790 # R-Car H2
+ - renesas,tmu-r8a7791 # R-Car M2-W
+ - renesas,tmu-r8a7792 # R-Car V2H
+ - renesas,tmu-r8a7793 # R-Car M2-N
+ - renesas,tmu-r8a7794 # R-Car E2
- renesas,tmu-r8a7795 # R-Car H3
- renesas,tmu-r8a7796 # R-Car M3-W
- renesas,tmu-r8a77961 # R-Car M3-W+
@@ -94,6 +105,7 @@ if:
compatible:
contains:
enum:
+ - renesas,tmu-r8a73a4
- renesas,tmu-r8a7740
- renesas,tmu-r8a7778
- renesas,tmu-r8a7779
diff --git a/Documentation/devicetree/bindings/timer/sifive,clint.yaml b/Documentation/devicetree/bindings/timer/sifive,clint.yaml
index fced6f2d8ecb..b42d43d2de48 100644
--- a/Documentation/devicetree/bindings/timer/sifive,clint.yaml
+++ b/Documentation/devicetree/bindings/timer/sifive,clint.yaml
@@ -40,6 +40,7 @@ properties:
- allwinner,sun20i-d1-clint
- sophgo,cv1800b-clint
- sophgo,cv1812h-clint
+ - sophgo,sg2002-clint
- thead,th1520-clint
- const: thead,c900-clint
- items:
diff --git a/Documentation/devicetree/bindings/trivial-devices.yaml b/Documentation/devicetree/bindings/trivial-devices.yaml
index 0a419453d183..03e290cb65c3 100644
--- a/Documentation/devicetree/bindings/trivial-devices.yaml
+++ b/Documentation/devicetree/bindings/trivial-devices.yaml
@@ -168,6 +168,8 @@ properties:
- isil,isl69269
# Intersil ISL76682 Ambient Light Sensor
- isil,isl76682
+ # JEDEC JESD300 (SPD5118) Hub and Serial Presence Detect
+ - jedec,spd5118
# Linear Technology LTC2488
- lineartechnology,ltc2488
# 5 Bit Programmable, Pulse-Width Modulator
@@ -286,14 +288,22 @@ properties:
- mps,mp2857
# Monolithic Power Systems Inc. multi-phase controller mp2888
- mps,mp2888
+ # Monolithic Power Systems Inc. multi-phase controller mp2891
+ - mps,mp2891
# Monolithic Power Systems Inc. multi-phase controller mp2971
- mps,mp2971
# Monolithic Power Systems Inc. multi-phase controller mp2973
- mps,mp2973
# Monolithic Power Systems Inc. multi-phase controller mp2975
- mps,mp2975
+ # Monolithic Power Systems Inc. multi-phase controller mp2993
+ - mps,mp2993
+ # Monolithic Power Systems Inc. multi-phase hot-swap controller mp5920
+ - mps,mp5920
# Monolithic Power Systems Inc. multi-phase hot-swap controller mp5990
- mps,mp5990
+ # Monolithic Power Systems Inc. digital step-down converter mp9941
+ - mps,mp9941
# Monolithic Power Systems Inc. synchronous step-down converter mpq8785
- mps,mpq8785
# Temperature sensor with integrated fan control
diff --git a/Documentation/devicetree/bindings/usb/realtek,rts5411.yaml b/Documentation/devicetree/bindings/usb/realtek,rts5411.yaml
index 0874fc21f66f..6577a61cc075 100644
--- a/Documentation/devicetree/bindings/usb/realtek,rts5411.yaml
+++ b/Documentation/devicetree/bindings/usb/realtek,rts5411.yaml
@@ -65,6 +65,7 @@ patternProperties:
description: The hard wired USB devices
type: object
$ref: /schemas/usb/usb-device.yaml
+ additionalProperties: true
required:
- peer-hub
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.yaml b/Documentation/devicetree/bindings/vendor-prefixes.yaml
index fbf47f0bacf1..87ec6772b2d4 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.yaml
+++ b/Documentation/devicetree/bindings/vendor-prefixes.yaml
@@ -338,6 +338,8 @@ patternProperties:
description: Czech Technical University in Prague
"^cubietech,.*":
description: Cubietech, Ltd.
+ "^cudy,.*":
+ description: Shenzhen Cudy Technology Co., Ltd.
"^cui,.*":
description: CUI Devices
"^cypress,.*":
@@ -394,6 +396,8 @@ patternProperties:
description: DPTechnics
"^dragino,.*":
description: Dragino Technology Co., Limited
+ "^dream,.*":
+ description: Dream Property GmbH
"^ds,.*":
description: DaSheng, Inc.
"^dserve,.*":
@@ -995,6 +999,8 @@ patternProperties:
description: MYIR Tech Limited
"^national,.*":
description: National Semiconductor
+ "^neardi,.*":
+ description: Shanghai Neardi Technology Co., Ltd.
"^nec,.*":
description: NEC LCD Technologies, Ltd.
"^neonode,.*":
@@ -1082,6 +1088,8 @@ patternProperties:
description: OpenPandora GmbH
"^openrisc,.*":
description: OpenRISC.io
+ "^openwrt,.*":
+ description: OpenWrt
"^option,.*":
description: Option NV
"^oranth,.*":
@@ -1254,6 +1262,8 @@ patternProperties:
description: Smart Battery System
"^schindler,.*":
description: Schindler
+ "^schneider,.*":
+ description: Schneider Electric
"^seagate,.*":
description: Seagate Technology PLC
"^seeed,.*":
diff --git a/Documentation/driver-api/cxl/memory-devices.rst b/Documentation/driver-api/cxl/memory-devices.rst
index 5149ecdc53c7..d732c42526df 100644
--- a/Documentation/driver-api/cxl/memory-devices.rst
+++ b/Documentation/driver-api/cxl/memory-devices.rst
@@ -328,6 +328,12 @@ CXL Memory Device
.. kernel-doc:: drivers/cxl/mem.c
:doc: cxl mem
+.. kernel-doc:: drivers/cxl/cxlmem.h
+ :internal:
+
+.. kernel-doc:: drivers/cxl/core/memdev.c
+ :identifiers:
+
CXL Port
--------
.. kernel-doc:: drivers/cxl/port.c
@@ -341,6 +347,15 @@ CXL Core
.. kernel-doc:: drivers/cxl/cxl.h
:internal:
+.. kernel-doc:: drivers/cxl/core/hdm.c
+ :doc: cxl core hdm
+
+.. kernel-doc:: drivers/cxl/core/hdm.c
+ :identifiers:
+
+.. kernel-doc:: drivers/cxl/core/cdat.c
+ :identifiers:
+
.. kernel-doc:: drivers/cxl/core/port.c
:doc: cxl core
diff --git a/Documentation/driver-api/driver-model/devres.rst b/Documentation/driver-api/driver-model/devres.rst
index 18caebad7376..ac9ee7441887 100644
--- a/Documentation/driver-api/driver-model/devres.rst
+++ b/Documentation/driver-api/driver-model/devres.rst
@@ -464,7 +464,10 @@ SLAVE DMA ENGINE
SPI
devm_spi_alloc_master()
devm_spi_alloc_slave()
+ devm_spi_optimize_message()
devm_spi_register_controller()
+ devm_spi_register_host()
+ devm_spi_register_target()
WATCHDOG
devm_watchdog_register_device()
diff --git a/Documentation/driver-api/gpio/board.rst b/Documentation/driver-api/gpio/board.rst
index b33aa04f213f..4fd1cbd8296e 100644
--- a/Documentation/driver-api/gpio/board.rst
+++ b/Documentation/driver-api/gpio/board.rst
@@ -4,12 +4,6 @@ GPIO Mappings
This document explains how GPIOs can be assigned to given devices and functions.
-Note that it only applies to the new descriptor-based interface. For a
-description of the deprecated integer-based GPIO interface please refer to
-legacy.rst (actually, there is no real mapping possible with the old
-interface; you just fetch an integer from somewhere and request the
-corresponding GPIO).
-
All platforms can enable the GPIO library, but if the platform strictly
requires GPIO functionality to be present, it needs to select GPIOLIB from its
Kconfig. Then, how GPIOs are mapped depends on what the platform uses to
diff --git a/Documentation/driver-api/gpio/consumer.rst b/Documentation/driver-api/gpio/consumer.rst
index ab56ab0dd7a6..bb3366047fad 100644
--- a/Documentation/driver-api/gpio/consumer.rst
+++ b/Documentation/driver-api/gpio/consumer.rst
@@ -2,9 +2,7 @@
GPIO Descriptor Consumer Interface
==================================
-This document describes the consumer interface of the GPIO framework. Note that
-it describes the new descriptor-based interface. For a description of the
-deprecated integer-based GPIO interface please refer to legacy.rst.
+This document describes the consumer interface of the GPIO framework.
Guidelines for GPIOs consumers
diff --git a/Documentation/driver-api/gpio/driver.rst b/Documentation/driver-api/gpio/driver.rst
index e541bd2e898b..ae433261e11a 100644
--- a/Documentation/driver-api/gpio/driver.rst
+++ b/Documentation/driver-api/gpio/driver.rst
@@ -69,9 +69,8 @@ driver code:
The code implementing a gpio_chip should support multiple instances of the
controller, preferably using the driver model. That code will configure each
-gpio_chip and issue gpiochip_add(), gpiochip_add_data(), or
-devm_gpiochip_add_data(). Removing a GPIO controller should be rare; use
-gpiochip_remove() when it is unavoidable.
+gpio_chip and issue gpiochip_add_data() or devm_gpiochip_add_data(). Removing
+a GPIO controller should be rare; use gpiochip_remove() when it is unavoidable.
Often a gpio_chip is part of an instance-specific structure with states not
exposed by the GPIO interfaces, such as addressing, power management, and more.
diff --git a/Documentation/driver-api/gpio/drivers-on-gpio.rst b/Documentation/driver-api/gpio/drivers-on-gpio.rst
index af632d764ac6..95572d2a94ce 100644
--- a/Documentation/driver-api/gpio/drivers-on-gpio.rst
+++ b/Documentation/driver-api/gpio/drivers-on-gpio.rst
@@ -27,7 +27,12 @@ hardware descriptions such as device tree or ACPI:
to the lines for a more permanent solution of this type.
- gpio-beeper: drivers/input/misc/gpio-beeper.c is used to provide a beep from
- an external speaker connected to a GPIO line.
+ an external speaker connected to a GPIO line. (If the beep is controlled by
+ off/on, for an actual PWM waveform, see pwm-gpio below.)
+
+- pwm-gpio: drivers/pwm/pwm-gpio.c is used to toggle a GPIO with a high
+ resolution timer producing a PWM waveform on the GPIO line, as well as
+ Linux high resolution timers can do.
- extcon-gpio: drivers/extcon/extcon-gpio.c is used when you need to read an
external connector status, such as a headset line for an audio driver or an
diff --git a/Documentation/driver-api/gpio/index.rst b/Documentation/driver-api/gpio/index.rst
index 1d48fe248f05..34b57cee3391 100644
--- a/Documentation/driver-api/gpio/index.rst
+++ b/Documentation/driver-api/gpio/index.rst
@@ -13,7 +13,6 @@ Contents:
consumer
board
drivers-on-gpio
- legacy
bt8xxgpio
Core
diff --git a/Documentation/driver-api/gpio/intro.rst b/Documentation/driver-api/gpio/intro.rst
index c9c19243b97f..5936a9c57df3 100644
--- a/Documentation/driver-api/gpio/intro.rst
+++ b/Documentation/driver-api/gpio/intro.rst
@@ -10,18 +10,6 @@ The documents in this directory give detailed instructions on how to access
GPIOs in drivers, and how to write a driver for a device that provides GPIOs
itself.
-Due to the history of GPIO interfaces in the kernel, there are two different
-ways to obtain and use GPIOs:
-
- - The descriptor-based interface is the preferred way to manipulate GPIOs,
- and is described by all the files in this directory excepted legacy.rst.
- - The legacy integer-based interface which is considered deprecated (but still
- usable for compatibility reasons) is documented in legacy.rst.
-
-The remainder of this document applies to the new descriptor-based interface.
-legacy.rst contains the same information applied to the legacy
-integer-based interface.
-
What is a GPIO?
===============
diff --git a/Documentation/driver-api/gpio/legacy.rst b/Documentation/driver-api/gpio/legacy.rst
deleted file mode 100644
index 534dfe95d128..000000000000
--- a/Documentation/driver-api/gpio/legacy.rst
+++ /dev/null
@@ -1,679 +0,0 @@
-======================
-Legacy GPIO Interfaces
-======================
-
-This provides an overview of GPIO access conventions on Linux.
-
-These calls use the gpio_* naming prefix. No other calls should use that
-prefix, or the related __gpio_* prefix.
-
-
-What is a GPIO?
-===============
-A "General Purpose Input/Output" (GPIO) is a flexible software-controlled
-digital signal. They are provided from many kinds of chip, and are familiar
-to Linux developers working with embedded and custom hardware. Each GPIO
-represents a bit connected to a particular pin, or "ball" on Ball Grid Array
-(BGA) packages. Board schematics show which external hardware connects to
-which GPIOs. Drivers can be written generically, so that board setup code
-passes such pin configuration data to drivers.
-
-System-on-Chip (SOC) processors heavily rely on GPIOs. In some cases, every
-non-dedicated pin can be configured as a GPIO; and most chips have at least
-several dozen of them. Programmable logic devices (like FPGAs) can easily
-provide GPIOs; multifunction chips like power managers, and audio codecs
-often have a few such pins to help with pin scarcity on SOCs; and there are
-also "GPIO Expander" chips that connect using the I2C or SPI serial busses.
-Most PC southbridges have a few dozen GPIO-capable pins (with only the BIOS
-firmware knowing how they're used).
-
-The exact capabilities of GPIOs vary between systems. Common options:
-
- - Output values are writable (high=1, low=0). Some chips also have
- options about how that value is driven, so that for example only one
- value might be driven ... supporting "wire-OR" and similar schemes
- for the other value (notably, "open drain" signaling).
-
- - Input values are likewise readable (1, 0). Some chips support readback
- of pins configured as "output", which is very useful in such "wire-OR"
- cases (to support bidirectional signaling). GPIO controllers may have
- input de-glitch/debounce logic, sometimes with software controls.
-
- - Inputs can often be used as IRQ signals, often edge triggered but
- sometimes level triggered. Such IRQs may be configurable as system
- wakeup events, to wake the system from a low power state.
-
- - Usually a GPIO will be configurable as either input or output, as needed
- by different product boards; single direction ones exist too.
-
- - Most GPIOs can be accessed while holding spinlocks, but those accessed
- through a serial bus normally can't. Some systems support both types.
-
-On a given board each GPIO is used for one specific purpose like monitoring
-MMC/SD card insertion/removal, detecting card writeprotect status, driving
-a LED, configuring a transceiver, bitbanging a serial bus, poking a hardware
-watchdog, sensing a switch, and so on.
-
-
-GPIO conventions
-================
-Note that this is called a "convention" because you don't need to do it this
-way, and it's no crime if you don't. There **are** cases where portability
-is not the main issue; GPIOs are often used for the kind of board-specific
-glue logic that may even change between board revisions, and can't ever be
-used on a board that's wired differently. Only least-common-denominator
-functionality can be very portable. Other features are platform-specific,
-and that can be critical for glue logic.
-
-Plus, this doesn't require any implementation framework, just an interface.
-One platform might implement it as simple inline functions accessing chip
-registers; another might implement it by delegating through abstractions
-used for several very different kinds of GPIO controller. (There is some
-optional code supporting such an implementation strategy, described later
-in this document, but drivers acting as clients to the GPIO interface must
-not care how it's implemented.)
-
-That said, if the convention is supported on their platform, drivers should
-use it when possible. Platforms must select GPIOLIB if GPIO functionality
-is strictly required. Drivers that can't work without
-standard GPIO calls should have Kconfig entries which depend on GPIOLIB. The
-GPIO calls are available, either as "real code" or as optimized-away stubs,
-when drivers use the include file:
-
- #include <linux/gpio.h>
-
-If you stick to this convention then it'll be easier for other developers to
-see what your code is doing, and help maintain it.
-
-Note that these operations include I/O barriers on platforms which need to
-use them; drivers don't need to add them explicitly.
-
-
-Identifying GPIOs
------------------
-GPIOs are identified by unsigned integers in the range 0..MAX_INT. That
-reserves "negative" numbers for other purposes like marking signals as
-"not available on this board", or indicating faults. Code that doesn't
-touch the underlying hardware treats these integers as opaque cookies.
-
-Platforms define how they use those integers, and usually #define symbols
-for the GPIO lines so that board-specific setup code directly corresponds
-to the relevant schematics. In contrast, drivers should only use GPIO
-numbers passed to them from that setup code, using platform_data to hold
-board-specific pin configuration data (along with other board specific
-data they need). That avoids portability problems.
-
-So for example one platform uses numbers 32-159 for GPIOs; while another
-uses numbers 0..63 with one set of GPIO controllers, 64-79 with another
-type of GPIO controller, and on one particular board 80-95 with an FPGA.
-The numbers need not be contiguous; either of those platforms could also
-use numbers 2000-2063 to identify GPIOs in a bank of I2C GPIO expanders.
-
-If you want to initialize a structure with an invalid GPIO number, use
-some negative number (perhaps "-EINVAL"); that will never be valid. To
-test if such number from such a structure could reference a GPIO, you
-may use this predicate:
-
- int gpio_is_valid(int number);
-
-A number that's not valid will be rejected by calls which may request
-or free GPIOs (see below). Other numbers may also be rejected; for
-example, a number might be valid but temporarily unused on a given board.
-
-Whether a platform supports multiple GPIO controllers is a platform-specific
-implementation issue, as are whether that support can leave "holes" in the space
-of GPIO numbers, and whether new controllers can be added at runtime. Such issues
-can affect things including whether adjacent GPIO numbers are both valid.
-
-Using GPIOs
------------
-The first thing a system should do with a GPIO is allocate it, using
-the gpio_request() call; see later.
-
-One of the next things to do with a GPIO, often in board setup code when
-setting up a platform_device using the GPIO, is mark its direction::
-
- /* set as input or output, returning 0 or negative errno */
- int gpio_direction_input(unsigned gpio);
- int gpio_direction_output(unsigned gpio, int value);
-
-The return value is zero for success, else a negative errno. It should
-be checked, since the get/set calls don't have error returns and since
-misconfiguration is possible. You should normally issue these calls from
-a task context. However, for spinlock-safe GPIOs it's OK to use them
-before tasking is enabled, as part of early board setup.
-
-For output GPIOs, the value provided becomes the initial output value.
-This helps avoid signal glitching during system startup.
-
-For compatibility with legacy interfaces to GPIOs, setting the direction
-of a GPIO implicitly requests that GPIO (see below) if it has not been
-requested already. That compatibility is being removed from the optional
-gpiolib framework.
-
-Setting the direction can fail if the GPIO number is invalid, or when
-that particular GPIO can't be used in that mode. It's generally a bad
-idea to rely on boot firmware to have set the direction correctly, since
-it probably wasn't validated to do more than boot Linux. (Similarly,
-that board setup code probably needs to multiplex that pin as a GPIO,
-and configure pullups/pulldowns appropriately.)
-
-
-Spinlock-Safe GPIO access
--------------------------
-Most GPIO controllers can be accessed with memory read/write instructions.
-Those don't need to sleep, and can safely be done from inside hard
-(nonthreaded) IRQ handlers and similar contexts.
-
-Use the following calls to access such GPIOs::
-
- /* GPIO INPUT: return zero or nonzero */
- int gpio_get_value(unsigned gpio);
-
- /* GPIO OUTPUT */
- void gpio_set_value(unsigned gpio, int value);
-
-The values are boolean, zero for low, nonzero for high. When reading the
-value of an output pin, the value returned should be what's seen on the
-pin ... that won't always match the specified output value, because of
-issues including open-drain signaling and output latencies.
-
-The get/set calls have no error returns because "invalid GPIO" should have
-been reported earlier from gpio_direction_*(). However, note that not all
-platforms can read the value of output pins; those that can't should always
-return zero. Also, using these calls for GPIOs that can't safely be accessed
-without sleeping (see below) is an error.
-
-Platform-specific implementations are encouraged to optimize the two
-calls to access the GPIO value in cases where the GPIO number (and for
-output, value) are constant. It's normal for them to need only a couple
-of instructions in such cases (reading or writing a hardware register),
-and not to need spinlocks. Such optimized calls can make bitbanging
-applications a lot more efficient (in both space and time) than spending
-dozens of instructions on subroutine calls.
-
-
-GPIO access that may sleep
---------------------------
-Some GPIO controllers must be accessed using message based busses like I2C
-or SPI. Commands to read or write those GPIO values require waiting to
-get to the head of a queue to transmit a command and get its response.
-This requires sleeping, which can't be done from inside IRQ handlers.
-To access such GPIOs, a different set of accessors is defined::
-
- /* GPIO INPUT: return zero or nonzero, might sleep */
- int gpio_get_value_cansleep(unsigned gpio);
-
- /* GPIO OUTPUT, might sleep */
- void gpio_set_value_cansleep(unsigned gpio, int value);
-
-Accessing such GPIOs requires a context which may sleep, for example
-a threaded IRQ handler, and those accessors must be used instead of
-spinlock-safe accessors without the cansleep() name suffix.
-
-Other than the fact that these accessors might sleep, and will work
-on GPIOs that can't be accessed from hardIRQ handlers, these calls act
-the same as the spinlock-safe calls.
-
-**IN ADDITION** calls to setup and configure such GPIOs must be made
-from contexts which may sleep, since they may need to access the GPIO
-controller chip too (These setup calls are usually made from board
-setup or driver probe/teardown code, so this is an easy constraint.)::
-
- gpio_direction_input()
- gpio_direction_output()
- gpio_request()
-
- ## gpio_request_one()
-
- gpio_free()
-
-
-Claiming and Releasing GPIOs
-----------------------------
-To help catch system configuration errors, two calls are defined::
-
- /* request GPIO, returning 0 or negative errno.
- * non-null labels may be useful for diagnostics.
- */
- int gpio_request(unsigned gpio, const char *label);
-
- /* release previously-claimed GPIO */
- void gpio_free(unsigned gpio);
-
-Passing invalid GPIO numbers to gpio_request() will fail, as will requesting
-GPIOs that have already been claimed with that call. The return value of
-gpio_request() must be checked. You should normally issue these calls from
-a task context. However, for spinlock-safe GPIOs it's OK to request GPIOs
-before tasking is enabled, as part of early board setup.
-
-These calls serve two basic purposes. One is marking the signals which
-are actually in use as GPIOs, for better diagnostics; systems may have
-several hundred potential GPIOs, but often only a dozen are used on any
-given board. Another is to catch conflicts, identifying errors when
-(a) two or more drivers wrongly think they have exclusive use of that
-signal, or (b) something wrongly believes it's safe to remove drivers
-needed to manage a signal that's in active use. That is, requesting a
-GPIO can serve as a kind of lock.
-
-Some platforms may also use knowledge about what GPIOs are active for
-power management, such as by powering down unused chip sectors and, more
-easily, gating off unused clocks.
-
-For GPIOs that use pins known to the pinctrl subsystem, that subsystem should
-be informed of their use; a gpiolib driver's .request() operation may call
-pinctrl_gpio_request(), and a gpiolib driver's .free() operation may call
-pinctrl_gpio_free(). The pinctrl subsystem allows a pinctrl_gpio_request()
-to succeed concurrently with a pin or pingroup being "owned" by a device for
-pin multiplexing.
-
-Any programming of pin multiplexing hardware that is needed to route the
-GPIO signal to the appropriate pin should occur within a GPIO driver's
-.direction_input() or .direction_output() operations, and occur after any
-setup of an output GPIO's value. This allows a glitch-free migration from a
-pin's special function to GPIO. This is sometimes required when using a GPIO
-to implement a workaround on signals typically driven by a non-GPIO HW block.
-
-Some platforms allow some or all GPIO signals to be routed to different pins.
-Similarly, other aspects of the GPIO or pin may need to be configured, such as
-pullup/pulldown. Platform software should arrange that any such details are
-configured prior to gpio_request() being called for those GPIOs, e.g. using
-the pinctrl subsystem's mapping table, so that GPIO users need not be aware
-of these details.
-
-Also note that it's your responsibility to have stopped using a GPIO
-before you free it.
-
-Considering in most cases GPIOs are actually configured right after they
-are claimed, three additional calls are defined::
-
- /* request a single GPIO, with initial configuration specified by
- * 'flags', identical to gpio_request() wrt other arguments and
- * return value
- */
- int gpio_request_one(unsigned gpio, unsigned long flags, const char *label);
-
-where 'flags' is currently defined to specify the following properties:
-
- * GPIOF_DIR_IN - to configure direction as input
- * GPIOF_DIR_OUT - to configure direction as output
-
- * GPIOF_INIT_LOW - as output, set initial level to LOW
- * GPIOF_INIT_HIGH - as output, set initial level to HIGH
-
-since GPIOF_INIT_* are only valid when configured as output, so group valid
-combinations as:
-
- * GPIOF_IN - configure as input
- * GPIOF_OUT_INIT_LOW - configured as output, initial level LOW
- * GPIOF_OUT_INIT_HIGH - configured as output, initial level HIGH
-
-Further more, to ease the claim/release of multiple GPIOs, 'struct gpio' is
-introduced to encapsulate all three fields as::
-
- struct gpio {
- unsigned gpio;
- unsigned long flags;
- const char *label;
- };
-
-A typical example of usage::
-
- static struct gpio leds_gpios[] = {
- { 32, GPIOF_OUT_INIT_HIGH, "Power LED" }, /* default to ON */
- { 33, GPIOF_OUT_INIT_LOW, "Green LED" }, /* default to OFF */
- { 34, GPIOF_OUT_INIT_LOW, "Red LED" }, /* default to OFF */
- { 35, GPIOF_OUT_INIT_LOW, "Blue LED" }, /* default to OFF */
- { ... },
- };
-
- err = gpio_request_one(31, GPIOF_IN, "Reset Button");
- if (err)
- ...
-
-
-GPIOs mapped to IRQs
---------------------
-GPIO numbers are unsigned integers; so are IRQ numbers. These make up
-two logically distinct namespaces (GPIO 0 need not use IRQ 0). You can
-map between them using calls like::
-
- /* map GPIO numbers to IRQ numbers */
- int gpio_to_irq(unsigned gpio);
-
-Those return either the corresponding number in the other namespace, or
-else a negative errno code if the mapping can't be done. (For example,
-some GPIOs can't be used as IRQs.) It is an unchecked error to use a GPIO
-number that wasn't set up as an input using gpio_direction_input(), or
-to use an IRQ number that didn't originally come from gpio_to_irq().
-
-These two mapping calls are expected to cost on the order of a single
-addition or subtraction. They're not allowed to sleep.
-
-Non-error values returned from gpio_to_irq() can be passed to request_irq()
-or free_irq(). They will often be stored into IRQ resources for platform
-devices, by the board-specific initialization code. Note that IRQ trigger
-options are part of the IRQ interface, e.g. IRQF_TRIGGER_FALLING, as are
-system wakeup capabilities.
-
-
-Emulating Open Drain Signals
-----------------------------
-Sometimes shared signals need to use "open drain" signaling, where only the
-low signal level is actually driven. (That term applies to CMOS transistors;
-"open collector" is used for TTL.) A pullup resistor causes the high signal
-level. This is sometimes called a "wire-AND"; or more practically, from the
-negative logic (low=true) perspective this is a "wire-OR".
-
-One common example of an open drain signal is a shared active-low IRQ line.
-Also, bidirectional data bus signals sometimes use open drain signals.
-
-Some GPIO controllers directly support open drain outputs; many don't. When
-you need open drain signaling but your hardware doesn't directly support it,
-there's a common idiom you can use to emulate it with any GPIO pin that can
-be used as either an input or an output:
-
- LOW: gpio_direction_output(gpio, 0) ... this drives the signal
- and overrides the pullup.
-
- HIGH: gpio_direction_input(gpio) ... this turns off the output,
- so the pullup (or some other device) controls the signal.
-
-If you are "driving" the signal high but gpio_get_value(gpio) reports a low
-value (after the appropriate rise time passes), you know some other component
-is driving the shared signal low. That's not necessarily an error. As one
-common example, that's how I2C clocks are stretched: a slave that needs a
-slower clock delays the rising edge of SCK, and the I2C master adjusts its
-signaling rate accordingly.
-
-
-GPIO controllers and the pinctrl subsystem
-------------------------------------------
-
-A GPIO controller on a SOC might be tightly coupled with the pinctrl
-subsystem, in the sense that the pins can be used by other functions
-together with an optional gpio feature. We have already covered the
-case where e.g. a GPIO controller need to reserve a pin or set the
-direction of a pin by calling any of::
-
- pinctrl_gpio_request()
- pinctrl_gpio_free()
- pinctrl_gpio_direction_input()
- pinctrl_gpio_direction_output()
-
-But how does the pin control subsystem cross-correlate the GPIO
-numbers (which are a global business) to a certain pin on a certain
-pin controller?
-
-This is done by registering "ranges" of pins, which are essentially
-cross-reference tables. These are described in
-Documentation/driver-api/pin-control.rst
-
-While the pin allocation is totally managed by the pinctrl subsystem,
-gpio (under gpiolib) is still maintained by gpio drivers. It may happen
-that different pin ranges in a SoC is managed by different gpio drivers.
-
-This makes it logical to let gpio drivers announce their pin ranges to
-the pin ctrl subsystem before it will call 'pinctrl_gpio_request' in order
-to request the corresponding pin to be prepared by the pinctrl subsystem
-before any gpio usage.
-
-For this, the gpio controller can register its pin range with pinctrl
-subsystem. There are two ways of doing it currently: with or without DT.
-
-For with DT support refer to Documentation/devicetree/bindings/gpio/gpio.txt.
-
-For non-DT support, user can call gpiochip_add_pin_range() with appropriate
-parameters to register a range of gpio pins with a pinctrl driver. For this
-exact name string of pinctrl device has to be passed as one of the
-argument to this routine.
-
-
-What do these conventions omit?
-===============================
-One of the biggest things these conventions omit is pin multiplexing, since
-this is highly chip-specific and nonportable. One platform might not need
-explicit multiplexing; another might have just two options for use of any
-given pin; another might have eight options per pin; another might be able
-to route a given GPIO to any one of several pins. (Yes, those examples all
-come from systems that run Linux today.)
-
-Related to multiplexing is configuration and enabling of the pullups or
-pulldowns integrated on some platforms. Not all platforms support them,
-or support them in the same way; and any given board might use external
-pullups (or pulldowns) so that the on-chip ones should not be used.
-(When a circuit needs 5 kOhm, on-chip 100 kOhm resistors won't do.)
-Likewise drive strength (2 mA vs 20 mA) and voltage (1.8V vs 3.3V) is a
-platform-specific issue, as are models like (not) having a one-to-one
-correspondence between configurable pins and GPIOs.
-
-There are other system-specific mechanisms that are not specified here,
-like the aforementioned options for input de-glitching and wire-OR output.
-Hardware may support reading or writing GPIOs in gangs, but that's usually
-configuration dependent: for GPIOs sharing the same bank. (GPIOs are
-commonly grouped in banks of 16 or 32, with a given SOC having several such
-banks.) Some systems can trigger IRQs from output GPIOs, or read values
-from pins not managed as GPIOs. Code relying on such mechanisms will
-necessarily be nonportable.
-
-Dynamic definition of GPIOs is not currently standard; for example, as
-a side effect of configuring an add-on board with some GPIO expanders.
-
-
-GPIO implementor's framework (OPTIONAL)
-=======================================
-As noted earlier, there is an optional implementation framework making it
-easier for platforms to support different kinds of GPIO controller using
-the same programming interface. This framework is called "gpiolib".
-
-As a debugging aid, if debugfs is available a /sys/kernel/debug/gpio file
-will be found there. That will list all the controllers registered through
-this framework, and the state of the GPIOs currently in use.
-
-
-Controller Drivers: gpio_chip
------------------------------
-In this framework each GPIO controller is packaged as a "struct gpio_chip"
-with information common to each controller of that type:
-
- - methods to establish GPIO direction
- - methods used to access GPIO values
- - flag saying whether calls to its methods may sleep
- - optional debugfs dump method (showing extra state like pullup config)
- - label for diagnostics
-
-There is also per-instance data, which may come from device.platform_data:
-the number of its first GPIO, and how many GPIOs it exposes.
-
-The code implementing a gpio_chip should support multiple instances of the
-controller, possibly using the driver model. That code will configure each
-gpio_chip and issue gpiochip_add(). Removing a GPIO controller should be
-rare; use gpiochip_remove() when it is unavoidable.
-
-Most often a gpio_chip is part of an instance-specific structure with state
-not exposed by the GPIO interfaces, such as addressing, power management,
-and more. Chips such as codecs will have complex non-GPIO state.
-
-Any debugfs dump method should normally ignore signals which haven't been
-requested as GPIOs. They can use gpiochip_is_requested(), which returns
-either NULL or the label associated with that GPIO when it was requested.
-
-
-Platform Support
-----------------
-To force-enable this framework, a platform's Kconfig will "select" GPIOLIB,
-else it is up to the user to configure support for GPIO.
-
-If neither of these options are selected, the platform does not support
-GPIOs through GPIO-lib and the code cannot be enabled by the user.
-
-Trivial implementations of those functions can directly use framework
-code, which always dispatches through the gpio_chip::
-
- #define gpio_get_value __gpio_get_value
- #define gpio_set_value __gpio_set_value
-
-Fancier implementations could instead define those as inline functions with
-logic optimizing access to specific SOC-based GPIOs. For example, if the
-referenced GPIO is the constant "12", getting or setting its value could
-cost as little as two or three instructions, never sleeping. When such an
-optimization is not possible those calls must delegate to the framework
-code, costing at least a few dozen instructions. For bitbanged I/O, such
-instruction savings can be significant.
-
-For SOCs, platform-specific code defines and registers gpio_chip instances
-for each bank of on-chip GPIOs. Those GPIOs should be numbered/labeled to
-match chip vendor documentation, and directly match board schematics. They
-may well start at zero and go up to a platform-specific limit. Such GPIOs
-are normally integrated into platform initialization to make them always be
-available, from arch_initcall() or earlier; they can often serve as IRQs.
-
-
-Board Support
--------------
-For external GPIO controllers -- such as I2C or SPI expanders, ASICs, multi
-function devices, FPGAs or CPLDs -- most often board-specific code handles
-registering controller devices and ensures that their drivers know what GPIO
-numbers to use with gpiochip_add(). Their numbers often start right after
-platform-specific GPIOs.
-
-For example, board setup code could create structures identifying the range
-of GPIOs that chip will expose, and passes them to each GPIO expander chip
-using platform_data. Then the chip driver's probe() routine could pass that
-data to gpiochip_add().
-
-Initialization order can be important. For example, when a device relies on
-an I2C-based GPIO, its probe() routine should only be called after that GPIO
-becomes available. That may mean the device should not be registered until
-calls for that GPIO can work. One way to address such dependencies is for
-such gpio_chip controllers to provide setup() and teardown() callbacks to
-board specific code; those board specific callbacks would register devices
-once all the necessary resources are available, and remove them later when
-the GPIO controller device becomes unavailable.
-
-
-Sysfs Interface for Userspace (OPTIONAL)
-========================================
-Platforms which use the "gpiolib" implementors framework may choose to
-configure a sysfs user interface to GPIOs. This is different from the
-debugfs interface, since it provides control over GPIO direction and
-value instead of just showing a gpio state summary. Plus, it could be
-present on production systems without debugging support.
-
-Given appropriate hardware documentation for the system, userspace could
-know for example that GPIO #23 controls the write protect line used to
-protect boot loader segments in flash memory. System upgrade procedures
-may need to temporarily remove that protection, first importing a GPIO,
-then changing its output state, then updating the code before re-enabling
-the write protection. In normal use, GPIO #23 would never be touched,
-and the kernel would have no need to know about it.
-
-Again depending on appropriate hardware documentation, on some systems
-userspace GPIO can be used to determine system configuration data that
-standard kernels won't know about. And for some tasks, simple userspace
-GPIO drivers could be all that the system really needs.
-
-Note that standard kernel drivers exist for common "LEDs and Buttons"
-GPIO tasks: "leds-gpio" and "gpio_keys", respectively. Use those
-instead of talking directly to the GPIOs; they integrate with kernel
-frameworks better than your userspace code could.
-
-
-Paths in Sysfs
---------------
-There are three kinds of entry in /sys/class/gpio:
-
- - Control interfaces used to get userspace control over GPIOs;
-
- - GPIOs themselves; and
-
- - GPIO controllers ("gpio_chip" instances).
-
-That's in addition to standard files including the "device" symlink.
-
-The control interfaces are write-only:
-
- /sys/class/gpio/
-
- "export" ... Userspace may ask the kernel to export control of
- a GPIO to userspace by writing its number to this file.
-
- Example: "echo 19 > export" will create a "gpio19" node
- for GPIO #19, if that's not requested by kernel code.
-
- "unexport" ... Reverses the effect of exporting to userspace.
-
- Example: "echo 19 > unexport" will remove a "gpio19"
- node exported using the "export" file.
-
-GPIO signals have paths like /sys/class/gpio/gpio42/ (for GPIO #42)
-and have the following read/write attributes:
-
- /sys/class/gpio/gpioN/
-
- "direction" ... reads as either "in" or "out". This value may
- normally be written. Writing as "out" defaults to
- initializing the value as low. To ensure glitch free
- operation, values "low" and "high" may be written to
- configure the GPIO as an output with that initial value.
-
- Note that this attribute *will not exist* if the kernel
- doesn't support changing the direction of a GPIO, or
- it was exported by kernel code that didn't explicitly
- allow userspace to reconfigure this GPIO's direction.
-
- "value" ... reads as either 0 (low) or 1 (high). If the GPIO
- is configured as an output, this value may be written;
- any nonzero value is treated as high.
-
- If the pin can be configured as interrupt-generating interrupt
- and if it has been configured to generate interrupts (see the
- description of "edge"), you can poll(2) on that file and
- poll(2) will return whenever the interrupt was triggered. If
- you use poll(2), set the events POLLPRI. If you use select(2),
- set the file descriptor in exceptfds. After poll(2) returns,
- either lseek(2) to the beginning of the sysfs file and read the
- new value or close the file and re-open it to read the value.
-
- "edge" ... reads as either "none", "rising", "falling", or
- "both". Write these strings to select the signal edge(s)
- that will make poll(2) on the "value" file return.
-
- This file exists only if the pin can be configured as an
- interrupt generating input pin.
-
- "active_low" ... reads as either 0 (false) or 1 (true). Write
- any nonzero value to invert the value attribute both
- for reading and writing. Existing and subsequent
- poll(2) support configuration via the edge attribute
- for "rising" and "falling" edges will follow this
- setting.
-
-GPIO controllers have paths like /sys/class/gpio/gpiochip42/ (for the
-controller implementing GPIOs starting at #42) and have the following
-read-only attributes:
-
- /sys/class/gpio/gpiochipN/
-
- "base" ... same as N, the first GPIO managed by this chip
-
- "label" ... provided for diagnostics (not always unique)
-
- "ngpio" ... how many GPIOs this manges (N to N + ngpio - 1)
-
-Board documentation should in most cases cover what GPIOs are used for
-what purposes. However, those numbers are not always stable; GPIOs on
-a daughtercard might be different depending on the base board being used,
-or other cards in the stack. In such cases, you may need to use the
-gpiochip nodes (possibly in conjunction with schematics) to determine
-the correct GPIO number to use for a given signal.
-
-
-API Reference
-=============
-
-The functions listed in this section are deprecated. The GPIO descriptor based
-API should be used in new code.
-
-.. kernel-doc:: drivers/gpio/gpiolib-legacy.c
- :export:
diff --git a/Documentation/filesystems/gfs2-glocks.rst b/Documentation/filesystems/gfs2-glocks.rst
index 8a5842929b60..adc0d4c4d979 100644
--- a/Documentation/filesystems/gfs2-glocks.rst
+++ b/Documentation/filesystems/gfs2-glocks.rst
@@ -40,14 +40,14 @@ shared lock mode, SH. In GFS2 the DF mode is used exclusively for direct I/O
operations. The glocks are basically a lock plus some routines which deal
with cache management. The following rules apply for the cache:
-========== ========== ============== ========== ==============
-Glock mode Cache data Cache Metadata Dirty Data Dirty Metadata
-========== ========== ============== ========== ==============
- UN No No No No
- SH Yes Yes No No
- DF No Yes No No
- EX Yes Yes Yes Yes
-========== ========== ============== ========== ==============
+========== ============== ========== ========== ==============
+Glock mode Cache Metadata Cache data Dirty Data Dirty Metadata
+========== ============== ========== ========== ==============
+ UN No No No No
+ DF Yes No No No
+ SH Yes Yes No No
+ EX Yes Yes Yes Yes
+========== ============== ========== ========== ==============
These rules are implemented using the various glock operations which
are defined for each type of glock. Not all types of glocks use
@@ -55,23 +55,22 @@ all the modes. Only inode glocks use the DF mode for example.
Table of glock operations and per type constants:
-============= =============================================================
+============== =============================================================
Field Purpose
-============= =============================================================
-go_xmote_th Called before remote state change (e.g. to sync dirty data)
+============== =============================================================
+go_sync Called before remote state change (e.g. to sync dirty data)
go_xmote_bh Called after remote state change (e.g. to refill cache)
go_inval Called if remote state change requires invalidating the cache
-go_demote_ok Returns boolean value of whether its ok to demote a glock
- (e.g. checks timeout, and that there is no cached data)
-go_lock Called for the first local holder of a lock
-go_unlock Called on the final local unlock of a lock
+go_instantiate Called when a glock has been acquired
+go_held Called every time a glock holder is acquired
go_dump Called to print content of object for debugfs file, or on
error to dump glock to the log.
-go_type The type of the glock, ``LM_TYPE_*``
go_callback Called if the DLM sends a callback to drop this lock
+go_unlocked Called when a glock is unlocked (dlm_unlock())
+go_type The type of the glock, ``LM_TYPE_*``
go_flags GLOF_ASPACE is set, if the glock has an address space
associated with it
-============= =============================================================
+============== =============================================================
The minimum hold time for each lock is the time after a remote lock
grant for which we ignore remote demote requests. This is in order to
@@ -82,26 +81,24 @@ to by multiple nodes. By delaying the demotion in response to a
remote callback, that gives the userspace program time to make
some progress before the pages are unmapped.
-There is a plan to try and remove the go_lock and go_unlock callbacks
-if possible, in order to try and speed up the fast path though the locking.
-Also, eventually we hope to make the glock "EX" mode locally shared
-such that any local locking will be done with the i_mutex as required
-rather than via the glock.
+Eventually, we hope to make the glock "EX" mode locally shared such that any
+local locking will be done with the i_mutex as required rather than via the
+glock.
Locking rules for glock operations:
-============= ====================== =============================
+============== ====================== =============================
Operation GLF_LOCK bit lock held gl_lockref.lock spinlock held
-============= ====================== =============================
-go_xmote_th Yes No
+============== ====================== =============================
+go_sync Yes No
go_xmote_bh Yes No
go_inval Yes No
-go_demote_ok Sometimes Yes
-go_lock Yes No
-go_unlock Yes No
+go_instantiate No No
+go_held No No
go_dump Sometimes Yes
go_callback Sometimes (N/A) Yes
-============= ====================== =============================
+go_unlocked Yes No
+============== ====================== =============================
.. Note::
diff --git a/Documentation/filesystems/index.rst b/Documentation/filesystems/index.rst
index 8f5c1ee02e2f..e8e496d23e1d 100644
--- a/Documentation/filesystems/index.rst
+++ b/Documentation/filesystems/index.rst
@@ -34,6 +34,7 @@ algorithms work.
seq_file
sharedsubtree
idmappings
+ iomap/index
automount-support
diff --git a/Documentation/filesystems/iomap/design.rst b/Documentation/filesystems/iomap/design.rst
new file mode 100644
index 000000000000..f8ee3427bc1a
--- /dev/null
+++ b/Documentation/filesystems/iomap/design.rst
@@ -0,0 +1,441 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. _iomap_design:
+
+..
+ Dumb style notes to maintain the author's sanity:
+ Please try to start sentences on separate lines so that
+ sentence changes don't bleed colors in diff.
+ Heading decorations are documented in sphinx.rst.
+
+==============
+Library Design
+==============
+
+.. contents:: Table of Contents
+ :local:
+
+Introduction
+============
+
+iomap is a filesystem library for handling common file operations.
+The library has two layers:
+
+ 1. A lower layer that provides an iterator over ranges of file offsets.
+ This layer tries to obtain mappings of each file ranges to storage
+ from the filesystem, but the storage information is not necessarily
+ required.
+
+ 2. An upper layer that acts upon the space mappings provided by the
+ lower layer iterator.
+
+The iteration can involve mappings of file's logical offset ranges to
+physical extents, but the storage layer information is not necessarily
+required, e.g. for walking cached file information.
+The library exports various APIs for implementing file operations such
+as:
+
+ * Pagecache reads and writes
+ * Folio write faults to the pagecache
+ * Writeback of dirty folios
+ * Direct I/O reads and writes
+ * fsdax I/O reads, writes, loads, and stores
+ * FIEMAP
+ * lseek ``SEEK_DATA`` and ``SEEK_HOLE``
+ * swapfile activation
+
+This origins of this library is the file I/O path that XFS once used; it
+has now been extended to cover several other operations.
+
+Who Should Read This?
+=====================
+
+The target audience for this document are filesystem, storage, and
+pagecache programmers and code reviewers.
+
+If you are working on PCI, machine architectures, or device drivers, you
+are most likely in the wrong place.
+
+How Is This Better?
+===================
+
+Unlike the classic Linux I/O model which breaks file I/O into small
+units (generally memory pages or blocks) and looks up space mappings on
+the basis of that unit, the iomap model asks the filesystem for the
+largest space mappings that it can create for a given file operation and
+initiates operations on that basis.
+This strategy improves the filesystem's visibility into the size of the
+operation being performed, which enables it to combat fragmentation with
+larger space allocations when possible.
+Larger space mappings improve runtime performance by amortizing the cost
+of mapping function calls into the filesystem across a larger amount of
+data.
+
+At a high level, an iomap operation `looks like this
+<https://lore.kernel.org/all/ZGbVaewzcCysclPt@dread.disaster.area/>`_:
+
+1. For each byte in the operation range...
+
+ 1. Obtain a space mapping via ``->iomap_begin``
+
+ 2. For each sub-unit of work...
+
+ 1. Revalidate the mapping and go back to (1) above, if necessary.
+ So far only the pagecache operations need to do this.
+
+ 2. Do the work
+
+ 3. Increment operation cursor
+
+ 4. Release the mapping via ``->iomap_end``, if necessary
+
+Each iomap operation will be covered in more detail below.
+This library was covered previously by an `LWN article
+<https://lwn.net/Articles/935934/>`_ and a `KernelNewbies page
+<https://kernelnewbies.org/KernelProjects/iomap>`_.
+
+The goal of this document is to provide a brief discussion of the
+design and capabilities of iomap, followed by a more detailed catalog
+of the interfaces presented by iomap.
+If you change iomap, please update this design document.
+
+File Range Iterator
+===================
+
+Definitions
+-----------
+
+ * **buffer head**: Shattered remnants of the old buffer cache.
+
+ * ``fsblock``: The block size of a file, also known as ``i_blocksize``.
+
+ * ``i_rwsem``: The VFS ``struct inode`` rwsemaphore.
+ Processes hold this in shared mode to read file state and contents.
+ Some filesystems may allow shared mode for writes.
+ Processes often hold this in exclusive mode to change file state and
+ contents.
+
+ * ``invalidate_lock``: The pagecache ``struct address_space``
+ rwsemaphore that protects against folio insertion and removal for
+ filesystems that support punching out folios below EOF.
+ Processes wishing to insert folios must hold this lock in shared
+ mode to prevent removal, though concurrent insertion is allowed.
+ Processes wishing to remove folios must hold this lock in exclusive
+ mode to prevent insertions.
+ Concurrent removals are not allowed.
+
+ * ``dax_read_lock``: The RCU read lock that dax takes to prevent a
+ device pre-shutdown hook from returning before other threads have
+ released resources.
+
+ * **filesystem mapping lock**: This synchronization primitive is
+ internal to the filesystem and must protect the file mapping data
+ from updates while a mapping is being sampled.
+ The filesystem author must determine how this coordination should
+ happen; it does not need to be an actual lock.
+
+ * **iomap internal operation lock**: This is a general term for
+ synchronization primitives that iomap functions take while holding a
+ mapping.
+ A specific example would be taking the folio lock while reading or
+ writing the pagecache.
+
+ * **pure overwrite**: A write operation that does not require any
+ metadata or zeroing operations to perform during either submission
+ or completion.
+ This implies that the fileystem must have already allocated space
+ on disk as ``IOMAP_MAPPED`` and the filesystem must not place any
+ constaints on IO alignment or size.
+ The only constraints on I/O alignment are device level (minimum I/O
+ size and alignment, typically sector size).
+
+``struct iomap``
+----------------
+
+The filesystem communicates to the iomap iterator the mapping of
+byte ranges of a file to byte ranges of a storage device with the
+structure below:
+
+.. code-block:: c
+
+ struct iomap {
+ u64 addr;
+ loff_t offset;
+ u64 length;
+ u16 type;
+ u16 flags;
+ struct block_device *bdev;
+ struct dax_device *dax_dev;
+ voidw *inline_data;
+ void *private;
+ const struct iomap_folio_ops *folio_ops;
+ u64 validity_cookie;
+ };
+
+The fields are as follows:
+
+ * ``offset`` and ``length`` describe the range of file offsets, in
+ bytes, covered by this mapping.
+ These fields must always be set by the filesystem.
+
+ * ``type`` describes the type of the space mapping:
+
+ * **IOMAP_HOLE**: No storage has been allocated.
+ This type must never be returned in response to an ``IOMAP_WRITE``
+ operation because writes must allocate and map space, and return
+ the mapping.
+ The ``addr`` field must be set to ``IOMAP_NULL_ADDR``.
+ iomap does not support writing (whether via pagecache or direct
+ I/O) to a hole.
+
+ * **IOMAP_DELALLOC**: A promise to allocate space at a later time
+ ("delayed allocation").
+ If the filesystem returns IOMAP_F_NEW here and the write fails, the
+ ``->iomap_end`` function must delete the reservation.
+ The ``addr`` field must be set to ``IOMAP_NULL_ADDR``.
+
+ * **IOMAP_MAPPED**: The file range maps to specific space on the
+ storage device.
+ The device is returned in ``bdev`` or ``dax_dev``.
+ The device address, in bytes, is returned via ``addr``.
+
+ * **IOMAP_UNWRITTEN**: The file range maps to specific space on the
+ storage device, but the space has not yet been initialized.
+ The device is returned in ``bdev`` or ``dax_dev``.
+ The device address, in bytes, is returned via ``addr``.
+ Reads from this type of mapping will return zeroes to the caller.
+ For a write or writeback operation, the ioend should update the
+ mapping to MAPPED.
+ Refer to the sections about ioends for more details.
+
+ * **IOMAP_INLINE**: The file range maps to the memory buffer
+ specified by ``inline_data``.
+ For write operation, the ``->iomap_end`` function presumably
+ handles persisting the data.
+ The ``addr`` field must be set to ``IOMAP_NULL_ADDR``.
+
+ * ``flags`` describe the status of the space mapping.
+ These flags should be set by the filesystem in ``->iomap_begin``:
+
+ * **IOMAP_F_NEW**: The space under the mapping is newly allocated.
+ Areas that will not be written to must be zeroed.
+ If a write fails and the mapping is a space reservation, the
+ reservation must be deleted.
+
+ * **IOMAP_F_DIRTY**: The inode will have uncommitted metadata needed
+ to access any data written.
+ fdatasync is required to commit these changes to persistent
+ storage.
+ This needs to take into account metadata changes that *may* be made
+ at I/O completion, such as file size updates from direct I/O.
+
+ * **IOMAP_F_SHARED**: The space under the mapping is shared.
+ Copy on write is necessary to avoid corrupting other file data.
+
+ * **IOMAP_F_BUFFER_HEAD**: This mapping requires the use of buffer
+ heads for pagecache operations.
+ Do not add more uses of this.
+
+ * **IOMAP_F_MERGED**: Multiple contiguous block mappings were
+ coalesced into this single mapping.
+ This is only useful for FIEMAP.
+
+ * **IOMAP_F_XATTR**: The mapping is for extended attribute data, not
+ regular file data.
+ This is only useful for FIEMAP.
+
+ * **IOMAP_F_PRIVATE**: Starting with this value, the upper bits can
+ be set by the filesystem for its own purposes.
+
+ These flags can be set by iomap itself during file operations.
+ The filesystem should supply an ``->iomap_end`` function if it needs
+ to observe these flags:
+
+ * **IOMAP_F_SIZE_CHANGED**: The file size has changed as a result of
+ using this mapping.
+
+ * **IOMAP_F_STALE**: The mapping was found to be stale.
+ iomap will call ``->iomap_end`` on this mapping and then
+ ``->iomap_begin`` to obtain a new mapping.
+
+ Currently, these flags are only set by pagecache operations.
+
+ * ``addr`` describes the device address, in bytes.
+
+ * ``bdev`` describes the block device for this mapping.
+ This only needs to be set for mapped or unwritten operations.
+
+ * ``dax_dev`` describes the DAX device for this mapping.
+ This only needs to be set for mapped or unwritten operations, and
+ only for a fsdax operation.
+
+ * ``inline_data`` points to a memory buffer for I/O involving
+ ``IOMAP_INLINE`` mappings.
+ This value is ignored for all other mapping types.
+
+ * ``private`` is a pointer to `filesystem-private information
+ <https://lore.kernel.org/all/20180619164137.13720-7-hch@lst.de/>`_.
+ This value will be passed unchanged to ``->iomap_end``.
+
+ * ``folio_ops`` will be covered in the section on pagecache operations.
+
+ * ``validity_cookie`` is a magic freshness value set by the filesystem
+ that should be used to detect stale mappings.
+ For pagecache operations this is critical for correct operation
+ because page faults can occur, which implies that filesystem locks
+ should not be held between ``->iomap_begin`` and ``->iomap_end``.
+ Filesystems with completely static mappings need not set this value.
+ Only pagecache operations revalidate mappings; see the section about
+ ``iomap_valid`` for details.
+
+``struct iomap_ops``
+--------------------
+
+Every iomap function requires the filesystem to pass an operations
+structure to obtain a mapping and (optionally) to release the mapping:
+
+.. code-block:: c
+
+ struct iomap_ops {
+ int (*iomap_begin)(struct inode *inode, loff_t pos, loff_t length,
+ unsigned flags, struct iomap *iomap,
+ struct iomap *srcmap);
+
+ int (*iomap_end)(struct inode *inode, loff_t pos, loff_t length,
+ ssize_t written, unsigned flags,
+ struct iomap *iomap);
+ };
+
+``->iomap_begin``
+~~~~~~~~~~~~~~~~~
+
+iomap operations call ``->iomap_begin`` to obtain one file mapping for
+the range of bytes specified by ``pos`` and ``length`` for the file
+``inode``.
+This mapping should be returned through the ``iomap`` pointer.
+The mapping must cover at least the first byte of the supplied file
+range, but it does not need to cover the entire requested range.
+
+Each iomap operation describes the requested operation through the
+``flags`` argument.
+The exact value of ``flags`` will be documented in the
+operation-specific sections below.
+These flags can, at least in principle, apply generally to iomap
+operations:
+
+ * ``IOMAP_DIRECT`` is set when the caller wishes to issue file I/O to
+ block storage.
+
+ * ``IOMAP_DAX`` is set when the caller wishes to issue file I/O to
+ memory-like storage.
+
+ * ``IOMAP_NOWAIT`` is set when the caller wishes to perform a best
+ effort attempt to avoid any operation that would result in blocking
+ the submitting task.
+ This is similar in intent to ``O_NONBLOCK`` for network APIs - it is
+ intended for asynchronous applications to keep doing other work
+ instead of waiting for the specific unavailable filesystem resource
+ to become available.
+ Filesystems implementing ``IOMAP_NOWAIT`` semantics need to use
+ trylock algorithms.
+ They need to be able to satisfy the entire I/O request range with a
+ single iomap mapping.
+ They need to avoid reading or writing metadata synchronously.
+ They need to avoid blocking memory allocations.
+ They need to avoid waiting on transaction reservations to allow
+ modifications to take place.
+ They probably should not be allocating new space.
+ And so on.
+ If there is any doubt in the filesystem developer's mind as to
+ whether any specific ``IOMAP_NOWAIT`` operation may end up blocking,
+ then they should return ``-EAGAIN`` as early as possible rather than
+ start the operation and force the submitting task to block.
+ ``IOMAP_NOWAIT`` is often set on behalf of ``IOCB_NOWAIT`` or
+ ``RWF_NOWAIT``.
+
+If it is necessary to read existing file contents from a `different
+<https://lore.kernel.org/all/20191008071527.29304-9-hch@lst.de/>`_
+device or address range on a device, the filesystem should return that
+information via ``srcmap``.
+Only pagecache and fsdax operations support reading from one mapping and
+writing to another.
+
+``->iomap_end``
+~~~~~~~~~~~~~~~
+
+After the operation completes, the ``->iomap_end`` function, if present,
+is called to signal that iomap is finished with a mapping.
+Typically, implementations will use this function to tear down any
+context that were set up in ``->iomap_begin``.
+For example, a write might wish to commit the reservations for the bytes
+that were operated upon and unreserve any space that was not operated
+upon.
+``written`` might be zero if no bytes were touched.
+``flags`` will contain the same value passed to ``->iomap_begin``.
+iomap ops for reads are not likely to need to supply this function.
+
+Both functions should return a negative errno code on error, or zero on
+success.
+
+Preparing for File Operations
+=============================
+
+iomap only handles mapping and I/O.
+Filesystems must still call out to the VFS to check input parameters
+and file state before initiating an I/O operation.
+It does not handle obtaining filesystem freeze protection, updating of
+timestamps, stripping privileges, or access control.
+
+Locking Hierarchy
+=================
+
+iomap requires that filesystems supply their own locking model.
+There are three categories of synchronization primitives, as far as
+iomap is concerned:
+
+ * The **upper** level primitive is provided by the filesystem to
+ coordinate access to different iomap operations.
+ The exact primitive is specifc to the filesystem and operation,
+ but is often a VFS inode, pagecache invalidation, or folio lock.
+ For example, a filesystem might take ``i_rwsem`` before calling
+ ``iomap_file_buffered_write`` and ``iomap_file_unshare`` to prevent
+ these two file operations from clobbering each other.
+ Pagecache writeback may lock a folio to prevent other threads from
+ accessing the folio until writeback is underway.
+
+ * The **lower** level primitive is taken by the filesystem in the
+ ``->iomap_begin`` and ``->iomap_end`` functions to coordinate
+ access to the file space mapping information.
+ The fields of the iomap object should be filled out while holding
+ this primitive.
+ The upper level synchronization primitive, if any, remains held
+ while acquiring the lower level synchronization primitive.
+ For example, XFS takes ``ILOCK_EXCL`` and ext4 takes ``i_data_sem``
+ while sampling mappings.
+ Filesystems with immutable mapping information may not require
+ synchronization here.
+
+ * The **operation** primitive is taken by an iomap operation to
+ coordinate access to its own internal data structures.
+ The upper level synchronization primitive, if any, remains held
+ while acquiring this primitive.
+ The lower level primitive is not held while acquiring this
+ primitive.
+ For example, pagecache write operations will obtain a file mapping,
+ then grab and lock a folio to copy new contents.
+ It may also lock an internal folio state object to update metadata.
+
+The exact locking requirements are specific to the filesystem; for
+certain operations, some of these locks can be elided.
+All further mention of locking are *recommendations*, not mandates.
+Each filesystem author must figure out the locking for themself.
+
+Bugs and Limitations
+====================
+
+ * No support for fscrypt.
+ * No support for compression.
+ * No support for fsverity yet.
+ * Strong assumptions that IO should work the way it does on XFS.
+ * Does iomap *actually* work for non-regular file data?
+
+Patches welcome!
diff --git a/Documentation/filesystems/iomap/index.rst b/Documentation/filesystems/iomap/index.rst
new file mode 100644
index 000000000000..3c6a52440250
--- /dev/null
+++ b/Documentation/filesystems/iomap/index.rst
@@ -0,0 +1,13 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=======================
+VFS iomap Documentation
+=======================
+
+.. toctree::
+ :maxdepth: 2
+ :numbered:
+
+ design
+ operations
+ porting
diff --git a/Documentation/filesystems/iomap/operations.rst b/Documentation/filesystems/iomap/operations.rst
new file mode 100644
index 000000000000..8e6c721d2330
--- /dev/null
+++ b/Documentation/filesystems/iomap/operations.rst
@@ -0,0 +1,713 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. _iomap_operations:
+
+..
+ Dumb style notes to maintain the author's sanity:
+ Please try to start sentences on separate lines so that
+ sentence changes don't bleed colors in diff.
+ Heading decorations are documented in sphinx.rst.
+
+=========================
+Supported File Operations
+=========================
+
+.. contents:: Table of Contents
+ :local:
+
+Below are a discussion of the high level file operations that iomap
+implements.
+
+Buffered I/O
+============
+
+Buffered I/O is the default file I/O path in Linux.
+File contents are cached in memory ("pagecache") to satisfy reads and
+writes.
+Dirty cache will be written back to disk at some point that can be
+forced via ``fsync`` and variants.
+
+iomap implements nearly all the folio and pagecache management that
+filesystems have to implement themselves under the legacy I/O model.
+This means that the filesystem need not know the details of allocating,
+mapping, managing uptodate and dirty state, or writeback of pagecache
+folios.
+Under the legacy I/O model, this was managed very inefficiently with
+linked lists of buffer heads instead of the per-folio bitmaps that iomap
+uses.
+Unless the filesystem explicitly opts in to buffer heads, they will not
+be used, which makes buffered I/O much more efficient, and the pagecache
+maintainer much happier.
+
+``struct address_space_operations``
+-----------------------------------
+
+The following iomap functions can be referenced directly from the
+address space operations structure:
+
+ * ``iomap_dirty_folio``
+ * ``iomap_release_folio``
+ * ``iomap_invalidate_folio``
+ * ``iomap_is_partially_uptodate``
+
+The following address space operations can be wrapped easily:
+
+ * ``read_folio``
+ * ``readahead``
+ * ``writepages``
+ * ``bmap``
+ * ``swap_activate``
+
+``struct iomap_folio_ops``
+--------------------------
+
+The ``->iomap_begin`` function for pagecache operations may set the
+``struct iomap::folio_ops`` field to an ops structure to override
+default behaviors of iomap:
+
+.. code-block:: c
+
+ struct iomap_folio_ops {
+ struct folio *(*get_folio)(struct iomap_iter *iter, loff_t pos,
+ unsigned len);
+ void (*put_folio)(struct inode *inode, loff_t pos, unsigned copied,
+ struct folio *folio);
+ bool (*iomap_valid)(struct inode *inode, const struct iomap *iomap);
+ };
+
+iomap calls these functions:
+
+ - ``get_folio``: Called to allocate and return an active reference to
+ a locked folio prior to starting a write.
+ If this function is not provided, iomap will call
+ ``iomap_get_folio``.
+ This could be used to `set up per-folio filesystem state
+ <https://lore.kernel.org/all/20190429220934.10415-5-agruenba@redhat.com/>`_
+ for a write.
+
+ - ``put_folio``: Called to unlock and put a folio after a pagecache
+ operation completes.
+ If this function is not provided, iomap will ``folio_unlock`` and
+ ``folio_put`` on its own.
+ This could be used to `commit per-folio filesystem state
+ <https://lore.kernel.org/all/20180619164137.13720-6-hch@lst.de/>`_
+ that was set up by ``->get_folio``.
+
+ - ``iomap_valid``: The filesystem may not hold locks between
+ ``->iomap_begin`` and ``->iomap_end`` because pagecache operations
+ can take folio locks, fault on userspace pages, initiate writeback
+ for memory reclamation, or engage in other time-consuming actions.
+ If a file's space mapping data are mutable, it is possible that the
+ mapping for a particular pagecache folio can `change in the time it
+ takes
+ <https://lore.kernel.org/all/20221123055812.747923-8-david@fromorbit.com/>`_
+ to allocate, install, and lock that folio.
+
+ For the pagecache, races can happen if writeback doesn't take
+ ``i_rwsem`` or ``invalidate_lock`` and updates mapping information.
+ Races can also happen if the filesytem allows concurrent writes.
+ For such files, the mapping *must* be revalidated after the folio
+ lock has been taken so that iomap can manage the folio correctly.
+
+ fsdax does not need this revalidation because there's no writeback
+ and no support for unwritten extents.
+
+ Filesystems subject to this kind of race must provide a
+ ``->iomap_valid`` function to decide if the mapping is still valid.
+ If the mapping is not valid, the mapping will be sampled again.
+
+ To support making the validity decision, the filesystem's
+ ``->iomap_begin`` function may set ``struct iomap::validity_cookie``
+ at the same time that it populates the other iomap fields.
+ A simple validation cookie implementation is a sequence counter.
+ If the filesystem bumps the sequence counter every time it modifies
+ the inode's extent map, it can be placed in the ``struct
+ iomap::validity_cookie`` during ``->iomap_begin``.
+ If the value in the cookie is found to be different to the value
+ the filesystem holds when the mapping is passed back to
+ ``->iomap_valid``, then the iomap should considered stale and the
+ validation failed.
+
+These ``struct kiocb`` flags are significant for buffered I/O with iomap:
+
+ * ``IOCB_NOWAIT``: Turns on ``IOMAP_NOWAIT``.
+
+Internal per-Folio State
+------------------------
+
+If the fsblock size matches the size of a pagecache folio, it is assumed
+that all disk I/O operations will operate on the entire folio.
+The uptodate (memory contents are at least as new as what's on disk) and
+dirty (memory contents are newer than what's on disk) status of the
+folio are all that's needed for this case.
+
+If the fsblock size is less than the size of a pagecache folio, iomap
+tracks the per-fsblock uptodate and dirty state itself.
+This enables iomap to handle both "bs < ps" `filesystems
+<https://lore.kernel.org/all/20230725122932.144426-1-ritesh.list@gmail.com/>`_
+and large folios in the pagecache.
+
+iomap internally tracks two state bits per fsblock:
+
+ * ``uptodate``: iomap will try to keep folios fully up to date.
+ If there are read(ahead) errors, those fsblocks will not be marked
+ uptodate.
+ The folio itself will be marked uptodate when all fsblocks within the
+ folio are uptodate.
+
+ * ``dirty``: iomap will set the per-block dirty state when programs
+ write to the file.
+ The folio itself will be marked dirty when any fsblock within the
+ folio is dirty.
+
+iomap also tracks the amount of read and write disk IOs that are in
+flight.
+This structure is much lighter weight than ``struct buffer_head``
+because there is only one per folio, and the per-fsblock overhead is two
+bits vs. 104 bytes.
+
+Filesystems wishing to turn on large folios in the pagecache should call
+``mapping_set_large_folios`` when initializing the incore inode.
+
+Buffered Readahead and Reads
+----------------------------
+
+The ``iomap_readahead`` function initiates readahead to the pagecache.
+The ``iomap_read_folio`` function reads one folio's worth of data into
+the pagecache.
+The ``flags`` argument to ``->iomap_begin`` will be set to zero.
+The pagecache takes whatever locks it needs before calling the
+filesystem.
+
+Buffered Writes
+---------------
+
+The ``iomap_file_buffered_write`` function writes an ``iocb`` to the
+pagecache.
+``IOMAP_WRITE`` or ``IOMAP_WRITE`` | ``IOMAP_NOWAIT`` will be passed as
+the ``flags`` argument to ``->iomap_begin``.
+Callers commonly take ``i_rwsem`` in either shared or exclusive mode
+before calling this function.
+
+mmap Write Faults
+~~~~~~~~~~~~~~~~~
+
+The ``iomap_page_mkwrite`` function handles a write fault to a folio in
+the pagecache.
+``IOMAP_WRITE | IOMAP_FAULT`` will be passed as the ``flags`` argument
+to ``->iomap_begin``.
+Callers commonly take the mmap ``invalidate_lock`` in shared or
+exclusive mode before calling this function.
+
+Buffered Write Failures
+~~~~~~~~~~~~~~~~~~~~~~~
+
+After a short write to the pagecache, the areas not written will not
+become marked dirty.
+The filesystem must arrange to `cancel
+<https://lore.kernel.org/all/20221123055812.747923-6-david@fromorbit.com/>`_
+such `reservations
+<https://lore.kernel.org/linux-xfs/20220817093627.GZ3600936@dread.disaster.area/>`_
+because writeback will not consume the reservation.
+The ``iomap_file_buffered_write_punch_delalloc`` can be called from a
+``->iomap_end`` function to find all the clean areas of the folios
+caching a fresh (``IOMAP_F_NEW``) delalloc mapping.
+It takes the ``invalidate_lock``.
+
+The filesystem must supply a function ``punch`` to be called for
+each file range in this state.
+This function must *only* remove delayed allocation reservations, in
+case another thread racing with the current thread writes successfully
+to the same region and triggers writeback to flush the dirty data out to
+disk.
+
+Zeroing for File Operations
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Filesystems can call ``iomap_zero_range`` to perform zeroing of the
+pagecache for non-truncation file operations that are not aligned to
+the fsblock size.
+``IOMAP_ZERO`` will be passed as the ``flags`` argument to
+``->iomap_begin``.
+Callers typically hold ``i_rwsem`` and ``invalidate_lock`` in exclusive
+mode before calling this function.
+
+Unsharing Reflinked File Data
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Filesystems can call ``iomap_file_unshare`` to force a file sharing
+storage with another file to preemptively copy the shared data to newly
+allocate storage.
+``IOMAP_WRITE | IOMAP_UNSHARE`` will be passed as the ``flags`` argument
+to ``->iomap_begin``.
+Callers typically hold ``i_rwsem`` and ``invalidate_lock`` in exclusive
+mode before calling this function.
+
+Truncation
+----------
+
+Filesystems can call ``iomap_truncate_page`` to zero the bytes in the
+pagecache from EOF to the end of the fsblock during a file truncation
+operation.
+``truncate_setsize`` or ``truncate_pagecache`` will take care of
+everything after the EOF block.
+``IOMAP_ZERO`` will be passed as the ``flags`` argument to
+``->iomap_begin``.
+Callers typically hold ``i_rwsem`` and ``invalidate_lock`` in exclusive
+mode before calling this function.
+
+Pagecache Writeback
+-------------------
+
+Filesystems can call ``iomap_writepages`` to respond to a request to
+write dirty pagecache folios to disk.
+The ``mapping`` and ``wbc`` parameters should be passed unchanged.
+The ``wpc`` pointer should be allocated by the filesystem and must
+be initialized to zero.
+
+The pagecache will lock each folio before trying to schedule it for
+writeback.
+It does not lock ``i_rwsem`` or ``invalidate_lock``.
+
+The dirty bit will be cleared for all folios run through the
+``->map_blocks`` machinery described below even if the writeback fails.
+This is to prevent dirty folio clots when storage devices fail; an
+``-EIO`` is recorded for userspace to collect via ``fsync``.
+
+The ``ops`` structure must be specified and is as follows:
+
+``struct iomap_writeback_ops``
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. code-block:: c
+
+ struct iomap_writeback_ops {
+ int (*map_blocks)(struct iomap_writepage_ctx *wpc, struct inode *inode,
+ loff_t offset, unsigned len);
+ int (*prepare_ioend)(struct iomap_ioend *ioend, int status);
+ void (*discard_folio)(struct folio *folio, loff_t pos);
+ };
+
+The fields are as follows:
+
+ - ``map_blocks``: Sets ``wpc->iomap`` to the space mapping of the file
+ range (in bytes) given by ``offset`` and ``len``.
+ iomap calls this function for each dirty fs block in each dirty folio,
+ though it will `reuse mappings
+ <https://lore.kernel.org/all/20231207072710.176093-15-hch@lst.de/>`_
+ for runs of contiguous dirty fsblocks within a folio.
+ Do not return ``IOMAP_INLINE`` mappings here; the ``->iomap_end``
+ function must deal with persisting written data.
+ Do not return ``IOMAP_DELALLOC`` mappings here; iomap currently
+ requires mapping to allocated space.
+ Filesystems can skip a potentially expensive mapping lookup if the
+ mappings have not changed.
+ This revalidation must be open-coded by the filesystem; it is
+ unclear if ``iomap::validity_cookie`` can be reused for this
+ purpose.
+ This function must be supplied by the filesystem.
+
+ - ``prepare_ioend``: Enables filesystems to transform the writeback
+ ioend or perform any other preparatory work before the writeback I/O
+ is submitted.
+ This might include pre-write space accounting updates, or installing
+ a custom ``->bi_end_io`` function for internal purposes, such as
+ deferring the ioend completion to a workqueue to run metadata update
+ transactions from process context.
+ This function is optional.
+
+ - ``discard_folio``: iomap calls this function after ``->map_blocks``
+ fails to schedule I/O for any part of a dirty folio.
+ The function should throw away any reservations that may have been
+ made for the write.
+ The folio will be marked clean and an ``-EIO`` recorded in the
+ pagecache.
+ Filesystems can use this callback to `remove
+ <https://lore.kernel.org/all/20201029163313.1766967-1-bfoster@redhat.com/>`_
+ delalloc reservations to avoid having delalloc reservations for
+ clean pagecache.
+ This function is optional.
+
+Pagecache Writeback Completion
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+To handle the bookkeeping that must happen after disk I/O for writeback
+completes, iomap creates chains of ``struct iomap_ioend`` objects that
+wrap the ``bio`` that is used to write pagecache data to disk.
+By default, iomap finishes writeback ioends by clearing the writeback
+bit on the folios attached to the ``ioend``.
+If the write failed, it will also set the error bits on the folios and
+the address space.
+This can happen in interrupt or process context, depending on the
+storage device.
+
+Filesystems that need to update internal bookkeeping (e.g. unwritten
+extent conversions) should provide a ``->prepare_ioend`` function to
+set ``struct iomap_end::bio::bi_end_io`` to its own function.
+This function should call ``iomap_finish_ioends`` after finishing its
+own work (e.g. unwritten extent conversion).
+
+Some filesystems may wish to `amortize the cost of running metadata
+transactions
+<https://lore.kernel.org/all/20220120034733.221737-1-david@fromorbit.com/>`_
+for post-writeback updates by batching them.
+They may also require transactions to run from process context, which
+implies punting batches to a workqueue.
+iomap ioends contain a ``list_head`` to enable batching.
+
+Given a batch of ioends, iomap has a few helpers to assist with
+amortization:
+
+ * ``iomap_sort_ioends``: Sort all the ioends in the list by file
+ offset.
+
+ * ``iomap_ioend_try_merge``: Given an ioend that is not in any list and
+ a separate list of sorted ioends, merge as many of the ioends from
+ the head of the list into the given ioend.
+ ioends can only be merged if the file range and storage addresses are
+ contiguous; the unwritten and shared status are the same; and the
+ write I/O outcome is the same.
+ The merged ioends become their own list.
+
+ * ``iomap_finish_ioends``: Finish an ioend that possibly has other
+ ioends linked to it.
+
+Direct I/O
+==========
+
+In Linux, direct I/O is defined as file I/O that is issued directly to
+storage, bypassing the pagecache.
+The ``iomap_dio_rw`` function implements O_DIRECT (direct I/O) reads and
+writes for files.
+
+.. code-block:: c
+
+ ssize_t iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
+ const struct iomap_ops *ops,
+ const struct iomap_dio_ops *dops,
+ unsigned int dio_flags, void *private,
+ size_t done_before);
+
+The filesystem can provide the ``dops`` parameter if it needs to perform
+extra work before or after the I/O is issued to storage.
+The ``done_before`` parameter tells the how much of the request has
+already been transferred.
+It is used to continue a request asynchronously when `part of the
+request
+<https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=c03098d4b9ad76bca2966a8769dcfe59f7f85103>`_
+has already been completed synchronously.
+
+The ``done_before`` parameter should be set if writes for the ``iocb``
+have been initiated prior to the call.
+The direction of the I/O is determined from the ``iocb`` passed in.
+
+The ``dio_flags`` argument can be set to any combination of the
+following values:
+
+ * ``IOMAP_DIO_FORCE_WAIT``: Wait for the I/O to complete even if the
+ kiocb is not synchronous.
+
+ * ``IOMAP_DIO_OVERWRITE_ONLY``: Perform a pure overwrite for this range
+ or fail with ``-EAGAIN``.
+ This can be used by filesystems with complex unaligned I/O
+ write paths to provide an optimised fast path for unaligned writes.
+ If a pure overwrite can be performed, then serialisation against
+ other I/Os to the same filesystem block(s) is unnecessary as there is
+ no risk of stale data exposure or data loss.
+ If a pure overwrite cannot be performed, then the filesystem can
+ perform the serialisation steps needed to provide exclusive access
+ to the unaligned I/O range so that it can perform allocation and
+ sub-block zeroing safely.
+ Filesystems can use this flag to try to reduce locking contention,
+ but a lot of `detailed checking
+ <https://lore.kernel.org/linux-ext4/20230314130759.642710-1-bfoster@redhat.com/>`_
+ is required to do it `correctly
+ <https://lore.kernel.org/linux-ext4/20230810165559.946222-1-bfoster@redhat.com/>`_.
+
+ * ``IOMAP_DIO_PARTIAL``: If a page fault occurs, return whatever
+ progress has already been made.
+ The caller may deal with the page fault and retry the operation.
+ If the caller decides to retry the operation, it should pass the
+ accumulated return values of all previous calls as the
+ ``done_before`` parameter to the next call.
+
+These ``struct kiocb`` flags are significant for direct I/O with iomap:
+
+ * ``IOCB_NOWAIT``: Turns on ``IOMAP_NOWAIT``.
+
+ * ``IOCB_SYNC``: Ensure that the device has persisted data to disk
+ before completing the call.
+ In the case of pure overwrites, the I/O may be issued with FUA
+ enabled.
+
+ * ``IOCB_HIPRI``: Poll for I/O completion instead of waiting for an
+ interrupt.
+ Only meaningful for asynchronous I/O, and only if the entire I/O can
+ be issued as a single ``struct bio``.
+
+ * ``IOCB_DIO_CALLER_COMP``: Try to run I/O completion from the caller's
+ process context.
+ See ``linux/fs.h`` for more details.
+
+Filesystems should call ``iomap_dio_rw`` from ``->read_iter`` and
+``->write_iter``, and set ``FMODE_CAN_ODIRECT`` in the ``->open``
+function for the file.
+They should not set ``->direct_IO``, which is deprecated.
+
+If a filesystem wishes to perform its own work before direct I/O
+completion, it should call ``__iomap_dio_rw``.
+If its return value is not an error pointer or a NULL pointer, the
+filesystem should pass the return value to ``iomap_dio_complete`` after
+finishing its internal work.
+
+Return Values
+-------------
+
+``iomap_dio_rw`` can return one of the following:
+
+ * A non-negative number of bytes transferred.
+
+ * ``-ENOTBLK``: Fall back to buffered I/O.
+ iomap itself will return this value if it cannot invalidate the page
+ cache before issuing the I/O to storage.
+ The ``->iomap_begin`` or ``->iomap_end`` functions may also return
+ this value.
+
+ * ``-EIOCBQUEUED``: The asynchronous direct I/O request has been
+ queued and will be completed separately.
+
+ * Any of the other negative error codes.
+
+Direct Reads
+------------
+
+A direct I/O read initiates a read I/O from the storage device to the
+caller's buffer.
+Dirty parts of the pagecache are flushed to storage before initiating
+the read io.
+The ``flags`` value for ``->iomap_begin`` will be ``IOMAP_DIRECT`` with
+any combination of the following enhancements:
+
+ * ``IOMAP_NOWAIT``, as defined previously.
+
+Callers commonly hold ``i_rwsem`` in shared mode before calling this
+function.
+
+Direct Writes
+-------------
+
+A direct I/O write initiates a write I/O to the storage device from the
+caller's buffer.
+Dirty parts of the pagecache are flushed to storage before initiating
+the write io.
+The pagecache is invalidated both before and after the write io.
+The ``flags`` value for ``->iomap_begin`` will be ``IOMAP_DIRECT |
+IOMAP_WRITE`` with any combination of the following enhancements:
+
+ * ``IOMAP_NOWAIT``, as defined previously.
+
+ * ``IOMAP_OVERWRITE_ONLY``: Allocating blocks and zeroing partial
+ blocks is not allowed.
+ The entire file range must map to a single written or unwritten
+ extent.
+ The file I/O range must be aligned to the filesystem block size
+ if the mapping is unwritten and the filesystem cannot handle zeroing
+ the unaligned regions without exposing stale contents.
+
+Callers commonly hold ``i_rwsem`` in shared or exclusive mode before
+calling this function.
+
+``struct iomap_dio_ops:``
+-------------------------
+.. code-block:: c
+
+ struct iomap_dio_ops {
+ void (*submit_io)(const struct iomap_iter *iter, struct bio *bio,
+ loff_t file_offset);
+ int (*end_io)(struct kiocb *iocb, ssize_t size, int error,
+ unsigned flags);
+ struct bio_set *bio_set;
+ };
+
+The fields of this structure are as follows:
+
+ - ``submit_io``: iomap calls this function when it has constructed a
+ ``struct bio`` object for the I/O requested, and wishes to submit it
+ to the block device.
+ If no function is provided, ``submit_bio`` will be called directly.
+ Filesystems that would like to perform additional work before (e.g.
+ data replication for btrfs) should implement this function.
+
+ - ``end_io``: This is called after the ``struct bio`` completes.
+ This function should perform post-write conversions of unwritten
+ extent mappings, handle write failures, etc.
+ The ``flags`` argument may be set to a combination of the following:
+
+ * ``IOMAP_DIO_UNWRITTEN``: The mapping was unwritten, so the ioend
+ should mark the extent as written.
+
+ * ``IOMAP_DIO_COW``: Writing to the space in the mapping required a
+ copy on write operation, so the ioend should switch mappings.
+
+ - ``bio_set``: This allows the filesystem to provide a custom bio_set
+ for allocating direct I/O bios.
+ This enables filesystems to `stash additional per-bio information
+ <https://lore.kernel.org/all/20220505201115.937837-3-hch@lst.de/>`_
+ for private use.
+ If this field is NULL, generic ``struct bio`` objects will be used.
+
+Filesystems that want to perform extra work after an I/O completion
+should set a custom ``->bi_end_io`` function via ``->submit_io``.
+Afterwards, the custom endio function must call
+``iomap_dio_bio_end_io`` to finish the direct I/O.
+
+DAX I/O
+=======
+
+Some storage devices can be directly mapped as memory.
+These devices support a new access mode known as "fsdax" that allows
+loads and stores through the CPU and memory controller.
+
+fsdax Reads
+-----------
+
+A fsdax read performs a memcpy from storage device to the caller's
+buffer.
+The ``flags`` value for ``->iomap_begin`` will be ``IOMAP_DAX`` with any
+combination of the following enhancements:
+
+ * ``IOMAP_NOWAIT``, as defined previously.
+
+Callers commonly hold ``i_rwsem`` in shared mode before calling this
+function.
+
+fsdax Writes
+------------
+
+A fsdax write initiates a memcpy to the storage device from the caller's
+buffer.
+The ``flags`` value for ``->iomap_begin`` will be ``IOMAP_DAX |
+IOMAP_WRITE`` with any combination of the following enhancements:
+
+ * ``IOMAP_NOWAIT``, as defined previously.
+
+ * ``IOMAP_OVERWRITE_ONLY``: The caller requires a pure overwrite to be
+ performed from this mapping.
+ This requires the filesystem extent mapping to already exist as an
+ ``IOMAP_MAPPED`` type and span the entire range of the write I/O
+ request.
+ If the filesystem cannot map this request in a way that allows the
+ iomap infrastructure to perform a pure overwrite, it must fail the
+ mapping operation with ``-EAGAIN``.
+
+Callers commonly hold ``i_rwsem`` in exclusive mode before calling this
+function.
+
+fsdax mmap Faults
+~~~~~~~~~~~~~~~~~
+
+The ``dax_iomap_fault`` function handles read and write faults to fsdax
+storage.
+For a read fault, ``IOMAP_DAX | IOMAP_FAULT`` will be passed as the
+``flags`` argument to ``->iomap_begin``.
+For a write fault, ``IOMAP_DAX | IOMAP_FAULT | IOMAP_WRITE`` will be
+passed as the ``flags`` argument to ``->iomap_begin``.
+
+Callers commonly hold the same locks as they do to call their iomap
+pagecache counterparts.
+
+fsdax Truncation, fallocate, and Unsharing
+------------------------------------------
+
+For fsdax files, the following functions are provided to replace their
+iomap pagecache I/O counterparts.
+The ``flags`` argument to ``->iomap_begin`` are the same as the
+pagecache counterparts, with ``IOMAP_DAX`` added.
+
+ * ``dax_file_unshare``
+ * ``dax_zero_range``
+ * ``dax_truncate_page``
+
+Callers commonly hold the same locks as they do to call their iomap
+pagecache counterparts.
+
+fsdax Deduplication
+-------------------
+
+Filesystems implementing the ``FIDEDUPERANGE`` ioctl must call the
+``dax_remap_file_range_prep`` function with their own iomap read ops.
+
+Seeking Files
+=============
+
+iomap implements the two iterating whence modes of the ``llseek`` system
+call.
+
+SEEK_DATA
+---------
+
+The ``iomap_seek_data`` function implements the SEEK_DATA "whence" value
+for llseek.
+``IOMAP_REPORT`` will be passed as the ``flags`` argument to
+``->iomap_begin``.
+
+For unwritten mappings, the pagecache will be searched.
+Regions of the pagecache with a folio mapped and uptodate fsblocks
+within those folios will be reported as data areas.
+
+Callers commonly hold ``i_rwsem`` in shared mode before calling this
+function.
+
+SEEK_HOLE
+---------
+
+The ``iomap_seek_hole`` function implements the SEEK_HOLE "whence" value
+for llseek.
+``IOMAP_REPORT`` will be passed as the ``flags`` argument to
+``->iomap_begin``.
+
+For unwritten mappings, the pagecache will be searched.
+Regions of the pagecache with no folio mapped, or a !uptodate fsblock
+within a folio will be reported as sparse hole areas.
+
+Callers commonly hold ``i_rwsem`` in shared mode before calling this
+function.
+
+Swap File Activation
+====================
+
+The ``iomap_swapfile_activate`` function finds all the base-page aligned
+regions in a file and sets them up as swap space.
+The file will be ``fsync()``'d before activation.
+``IOMAP_REPORT`` will be passed as the ``flags`` argument to
+``->iomap_begin``.
+All mappings must be mapped or unwritten; cannot be dirty or shared, and
+cannot span multiple block devices.
+Callers must hold ``i_rwsem`` in exclusive mode; this is already
+provided by ``swapon``.
+
+File Space Mapping Reporting
+============================
+
+iomap implements two of the file space mapping system calls.
+
+FS_IOC_FIEMAP
+-------------
+
+The ``iomap_fiemap`` function exports file extent mappings to userspace
+in the format specified by the ``FS_IOC_FIEMAP`` ioctl.
+``IOMAP_REPORT`` will be passed as the ``flags`` argument to
+``->iomap_begin``.
+Callers commonly hold ``i_rwsem`` in shared mode before calling this
+function.
+
+FIBMAP (deprecated)
+-------------------
+
+``iomap_bmap`` implements FIBMAP.
+The calling conventions are the same as for FIEMAP.
+This function is only provided to maintain compatibility for filesystems
+that implemented FIBMAP prior to conversion.
+This ioctl is deprecated; do **not** add a FIBMAP implementation to
+filesystems that do not have it.
+Callers should probably hold ``i_rwsem`` in shared mode before calling
+this function, but this is unclear.
diff --git a/Documentation/filesystems/iomap/porting.rst b/Documentation/filesystems/iomap/porting.rst
new file mode 100644
index 000000000000..3d49a32c0fff
--- /dev/null
+++ b/Documentation/filesystems/iomap/porting.rst
@@ -0,0 +1,120 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. _iomap_porting:
+
+..
+ Dumb style notes to maintain the author's sanity:
+ Please try to start sentences on separate lines so that
+ sentence changes don't bleed colors in diff.
+ Heading decorations are documented in sphinx.rst.
+
+=======================
+Porting Your Filesystem
+=======================
+
+.. contents:: Table of Contents
+ :local:
+
+Why Convert?
+============
+
+There are several reasons to convert a filesystem to iomap:
+
+ 1. The classic Linux I/O path is not terribly efficient.
+ Pagecache operations lock a single base page at a time and then call
+ into the filesystem to return a mapping for only that page.
+ Direct I/O operations build I/O requests a single file block at a
+ time.
+ This worked well enough for direct/indirect-mapped filesystems such
+ as ext2, but is very inefficient for extent-based filesystems such
+ as XFS.
+
+ 2. Large folios are only supported via iomap; there are no plans to
+ convert the old buffer_head path to use them.
+
+ 3. Direct access to storage on memory-like devices (fsdax) is only
+ supported via iomap.
+
+ 4. Lower maintenance overhead for individual filesystem maintainers.
+ iomap handles common pagecache related operations itself, such as
+ allocating, instantiating, locking, and unlocking of folios.
+ No ->write_begin(), ->write_end() or direct_IO
+ address_space_operations are required to be implemented by
+ filesystem using iomap.
+
+How Do I Convert a Filesystem?
+==============================
+
+First, add ``#include <linux/iomap.h>`` from your source code and add
+``select FS_IOMAP`` to your filesystem's Kconfig option.
+Build the kernel, run fstests with the ``-g all`` option across a wide
+variety of your filesystem's supported configurations to build a
+baseline of which tests pass and which ones fail.
+
+The recommended approach is first to implement ``->iomap_begin`` (and
+``->iomap_end`` if necessary) to allow iomap to obtain a read-only
+mapping of a file range.
+In most cases, this is a relatively trivial conversion of the existing
+``get_block()`` function for read-only mappings.
+``FS_IOC_FIEMAP`` is a good first target because it is trivial to
+implement support for it and then to determine that the extent map
+iteration is correct from userspace.
+If FIEMAP is returning the correct information, it's a good sign that
+other read-only mapping operations will do the right thing.
+
+Next, modify the filesystem's ``get_block(create = false)``
+implementation to use the new ``->iomap_begin`` implementation to map
+file space for selected read operations.
+Hide behind a debugging knob the ability to switch on the iomap mapping
+functions for selected call paths.
+It is necessary to write some code to fill out the bufferhead-based
+mapping information from the ``iomap`` structure, but the new functions
+can be tested without needing to implement any iomap APIs.
+
+Once the read-only functions are working like this, convert each high
+level file operation one by one to use iomap native APIs instead of
+going through ``get_block()``.
+Done one at a time, regressions should be self evident.
+You *do* have a regression test baseline for fstests, right?
+It is suggested to convert swap file activation, ``SEEK_DATA``, and
+``SEEK_HOLE`` before tackling the I/O paths.
+A likely complexity at this point will be converting the buffered read
+I/O path because of bufferheads.
+The buffered read I/O paths doesn't need to be converted yet, though the
+direct I/O read path should be converted in this phase.
+
+At this point, you should look over your ``->iomap_begin`` function.
+If it switches between large blocks of code based on dispatching of the
+``flags`` argument, you should consider breaking it up into
+per-operation iomap ops with smaller, more cohesive functions.
+XFS is a good example of this.
+
+The next thing to do is implement ``get_blocks(create == true)``
+functionality in the ``->iomap_begin``/``->iomap_end`` methods.
+It is strongly recommended to create separate mapping functions and
+iomap ops for write operations.
+Then convert the direct I/O write path to iomap, and start running fsx
+w/ DIO enabled in earnest on filesystem.
+This will flush out lots of data integrity corner case bugs that the new
+write mapping implementation introduces.
+
+Now, convert any remaining file operations to call the iomap functions.
+This will get the entire filesystem using the new mapping functions, and
+they should largely be debugged and working correctly after this step.
+
+Most likely at this point, the buffered read and write paths will still
+need to be converted.
+The mapping functions should all work correctly, so all that needs to be
+done is rewriting all the code that interfaces with bufferheads to
+interface with iomap and folios.
+It is much easier first to get regular file I/O (without any fancy
+features like fscrypt, fsverity, compression, or data=journaling)
+converted to use iomap.
+Some of those fancy features (fscrypt and compression) aren't
+implemented yet in iomap.
+For unjournalled filesystems that use the pagecache for symbolic links
+and directories, you might also try converting their handling to iomap.
+
+The rest is left as an exercise for the reader, as it will be different
+for every filesystem.
+If you encounter problems, email the people and lists in
+``get_maintainers.pl`` for help.
diff --git a/Documentation/filesystems/mount_api.rst b/Documentation/filesystems/mount_api.rst
index 9aaf6ef75eb5..317934c9e8fc 100644
--- a/Documentation/filesystems/mount_api.rst
+++ b/Documentation/filesystems/mount_api.rst
@@ -645,6 +645,8 @@ The members are as follows:
fs_param_is_blockdev Blockdev path * Needs lookup
fs_param_is_path Path * Needs lookup
fs_param_is_fd File descriptor result->int_32
+ fs_param_is_uid User ID (u32) result->uid
+ fs_param_is_gid Group ID (u32) result->gid
======================= ======================= =====================
Note that if the value is of fs_param_is_bool type, fs_parse() will try
@@ -678,6 +680,8 @@ The members are as follows:
fsparam_bdev() fs_param_is_blockdev
fsparam_path() fs_param_is_path
fsparam_fd() fs_param_is_fd
+ fsparam_uid() fs_param_is_uid
+ fsparam_gid() fs_param_is_gid
======================= ===============================================
all of which take two arguments, name string and option number - for
@@ -784,8 +788,9 @@ process the parameters it is given.
option number (which it returns).
If successful, and if the parameter type indicates the result is a
- boolean, integer or enum type, the value is converted by this function and
- the result stored in result->{boolean,int_32,uint_32,uint_64}.
+ boolean, integer, enum, uid, or gid type, the value is converted by this
+ function and the result stored in
+ result->{boolean,int_32,uint_32,uint_64,uid,gid}.
If a match isn't initially made, the key is prefixed with "no" and no
value is present then an attempt will be made to look up the key with the
diff --git a/Documentation/filesystems/proc.rst b/Documentation/filesystems/proc.rst
index 7c3a565ffbef..82d142de3461 100644
--- a/Documentation/filesystems/proc.rst
+++ b/Documentation/filesystems/proc.rst
@@ -571,6 +571,7 @@ encoded manner. The codes are the following:
um userfaultfd missing tracking
uw userfaultfd wr-protect tracking
ss shadow stack page
+ sl sealed
== =======================================
Note that there is no guarantee that every flag and associated mnemonic will
diff --git a/Documentation/hid/hid-bpf.rst b/Documentation/hid/hid-bpf.rst
index 0765b3298ecf..5939eeafb361 100644
--- a/Documentation/hid/hid-bpf.rst
+++ b/Documentation/hid/hid-bpf.rst
@@ -129,19 +129,37 @@ When a BPF program needs to emit input events, it needs to talk with the HID
protocol, and rely on the HID kernel processing to translate the HID data into
input events.
+In-tree HID-BPF programs and ``udev-hid-bpf``
+=============================================
+
+Official device fixes are shipped in the kernel tree as source in the
+``drivers/hid/bpf/progs`` directory. This allows to add selftests to them in
+``tools/testing/selftests/hid``.
+
+However, the compilation of these objects is not part of a regular kernel compilation
+given that they need an external tool to be loaded. This tool is currently
+`udev-hid-bpf <https://libevdev.pages.freedesktop.org/udev-hid-bpf/index.html>`_.
+
+For convenience, that external repository duplicates the files from here in
+``drivers/hid/bpf/progs`` into its own ``src/bpf/stable`` directory. This allows
+distributions to not have to pull the entire kernel source tree to ship and package
+those HID-BPF fixes. ``udev-hid-bpf`` also has capabilities of handling multiple
+objects files depending on the kernel the user is running.
+
Available types of programs
===========================
-HID-BPF is built "on top" of BPF, meaning that we use tracing method to
+HID-BPF is built "on top" of BPF, meaning that we use bpf struct_ops method to
declare our programs.
HID-BPF has the following attachment types available:
-1. event processing/filtering with ``SEC("fmod_ret/hid_bpf_device_event")`` in libbpf
+1. event processing/filtering with ``SEC("struct_ops/hid_device_event")`` in libbpf
2. actions coming from userspace with ``SEC("syscall")`` in libbpf
-3. change of the report descriptor with ``SEC("fmod_ret/hid_bpf_rdesc_fixup")`` in libbpf
+3. change of the report descriptor with ``SEC("struct_ops/hid_rdesc_fixup")`` or
+ ``SEC("struct_ops.s/hid_rdesc_fixup")`` in libbpf
-A ``hid_bpf_device_event`` is calling a BPF program when an event is received from
+A ``hid_device_event`` is calling a BPF program when an event is received from
the device. Thus we are in IRQ context and can act on the data or notify userspace.
And given that we are in IRQ context, we can not talk back to the device.
@@ -149,37 +167,42 @@ A ``syscall`` means that userspace called the syscall ``BPF_PROG_RUN`` facility.
This time, we can do any operations allowed by HID-BPF, and talking to the device is
allowed.
-Last, ``hid_bpf_rdesc_fixup`` is different from the others as there can be only one
+Last, ``hid_rdesc_fixup`` is different from the others as there can be only one
BPF program of this type. This is called on ``probe`` from the driver and allows to
-change the report descriptor from the BPF program. Once a ``hid_bpf_rdesc_fixup``
+change the report descriptor from the BPF program. Once a ``hid_rdesc_fixup``
program has been loaded, it is not possible to overwrite it unless the program which
inserted it allows us by pinning the program and closing all of its fds pointing to it.
+Note that ``hid_rdesc_fixup`` can be declared as sleepable (``SEC("struct_ops.s/hid_rdesc_fixup")``).
+
+
Developer API:
==============
-User API data structures available in programs:
------------------------------------------------
+Available ``struct_ops`` for HID-BPF:
+-------------------------------------
.. kernel-doc:: include/linux/hid_bpf.h
+ :identifiers: hid_bpf_ops
-Available tracing functions to attach a HID-BPF program:
---------------------------------------------------------
-.. kernel-doc:: drivers/hid/bpf/hid_bpf_dispatch.c
- :functions: hid_bpf_device_event hid_bpf_rdesc_fixup
+User API data structures available in programs:
+-----------------------------------------------
-Available API that can be used in all HID-BPF programs:
--------------------------------------------------------
+.. kernel-doc:: include/linux/hid_bpf.h
+ :identifiers: hid_bpf_ctx
+
+Available API that can be used in all HID-BPF struct_ops programs:
+------------------------------------------------------------------
.. kernel-doc:: drivers/hid/bpf/hid_bpf_dispatch.c
- :functions: hid_bpf_get_data
+ :identifiers: hid_bpf_get_data
-Available API that can be used in syscall HID-BPF programs:
------------------------------------------------------------
+Available API that can be used in syscall HID-BPF programs or in sleepable HID-BPF struct_ops programs:
+-------------------------------------------------------------------------------------------------------
.. kernel-doc:: drivers/hid/bpf/hid_bpf_dispatch.c
- :functions: hid_bpf_attach_prog hid_bpf_hw_request hid_bpf_hw_output_report hid_bpf_input_report hid_bpf_allocate_context hid_bpf_release_context
+ :identifiers: hid_bpf_hw_request hid_bpf_hw_output_report hid_bpf_input_report hid_bpf_try_input_report hid_bpf_allocate_context hid_bpf_release_context
General overview of a HID-BPF program
=====================================
@@ -222,20 +245,21 @@ This allows the following:
Effect of a HID-BPF program
---------------------------
-For all HID-BPF attachment types except for :c:func:`hid_bpf_rdesc_fixup`, several eBPF
-programs can be attached to the same device.
+For all HID-BPF attachment types except for :c:func:`hid_rdesc_fixup`, several eBPF
+programs can be attached to the same device. If a HID-BPF struct_ops has a
+:c:func:`hid_rdesc_fixup` while another is already attached to the device, the
+kernel will return `-EINVAL` when attaching the struct_ops.
-Unless ``HID_BPF_FLAG_INSERT_HEAD`` is added to the flags while attaching the
-program, the new program is appended at the end of the list.
-``HID_BPF_FLAG_INSERT_HEAD`` will insert the new program at the beginning of the
-list which is useful for e.g. tracing where we need to get the unprocessed events
-from the device.
+Unless ``BPF_F_BEFORE`` is added to the flags while attaching the program, the new
+program is appended at the end of the list.
+``BPF_F_BEFORE`` will insert the new program at the beginning of the list which is
+useful for e.g. tracing where we need to get the unprocessed events from the device.
-Note that if there are multiple programs using the ``HID_BPF_FLAG_INSERT_HEAD`` flag,
+Note that if there are multiple programs using the ``BPF_F_BEFORE`` flag,
only the most recently loaded one is actually the first in the list.
-``SEC("fmod_ret/hid_bpf_device_event")``
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+``SEC("struct_ops/hid_device_event")``
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Whenever a matching event is raised, the eBPF programs are called one after the other
and are working on the same data buffer.
@@ -258,17 +282,17 @@ with, userspace needs to refer to the device by its unique system id (the last 4
in the sysfs path: ``/sys/bus/hid/devices/xxxx:yyyy:zzzz:0000``).
To retrieve a context associated with the device, the program must call
-:c:func:`hid_bpf_allocate_context` and must release it with :c:func:`hid_bpf_release_context`
+hid_bpf_allocate_context() and must release it with hid_bpf_release_context()
before returning.
Once the context is retrieved, one can also request a pointer to kernel memory with
-:c:func:`hid_bpf_get_data`. This memory is big enough to support all input/output/feature
+hid_bpf_get_data(). This memory is big enough to support all input/output/feature
reports of the given device.
-``SEC("fmod_ret/hid_bpf_rdesc_fixup")``
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+``SEC("struct_ops/hid_rdesc_fixup")``
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-The ``hid_bpf_rdesc_fixup`` program works in a similar manner to
-``.report_fixup`` of ``struct hid_driver``.
+The ``hid_rdesc_fixup`` program works in a similar manner to ``.report_fixup``
+of ``struct hid_driver``.
When the device is probed, the kernel sets the data buffer of the context with the
content of the report descriptor. The memory associated with that buffer is
@@ -277,33 +301,31 @@ content of the report descriptor. The memory associated with that buffer is
The eBPF program can modify the data buffer at-will and the kernel uses the
modified content and size as the report descriptor.
-Whenever a ``SEC("fmod_ret/hid_bpf_rdesc_fixup")`` program is attached (if no
-program was attached before), the kernel immediately disconnects the HID device
-and does a reprobe.
+Whenever a struct_ops containing a ``SEC("struct_ops/hid_rdesc_fixup")`` program
+is attached (if no program was attached before), the kernel immediately disconnects
+the HID device and does a reprobe.
-In the same way, when the ``SEC("fmod_ret/hid_bpf_rdesc_fixup")`` program is
-detached, the kernel issues a disconnect on the device.
+In the same way, when this struct_ops is detached, the kernel issues a disconnect
+on the device.
There is no ``detach`` facility in HID-BPF. Detaching a program happens when
-all the user space file descriptors pointing at a program are closed.
+all the user space file descriptors pointing at a HID-BPF struct_ops link are closed.
Thus, if we need to replace a report descriptor fixup, some cooperation is
required from the owner of the original report descriptor fixup.
-The previous owner will likely pin the program in the bpffs, and we can then
+The previous owner will likely pin the struct_ops link in the bpffs, and we can then
replace it through normal bpf operations.
Attaching a bpf program to a device
===================================
-``libbpf`` does not export any helper to attach a HID-BPF program.
-Users need to use a dedicated ``syscall`` program which will call
-``hid_bpf_attach_prog(hid_id, program_fd, flags)``.
+We now use standard struct_ops attachment through ``bpf_map__attach_struct_ops()``.
+But given that we need to attach a struct_ops to a dedicated HID device, the caller
+must set ``hid_id`` in the struct_ops map before loading the program in the kernel.
``hid_id`` is the unique system ID of the HID device (the last 4 numbers in the
sysfs path: ``/sys/bus/hid/devices/xxxx:yyyy:zzzz:0000``)
-``progam_fd`` is the opened file descriptor of the program to attach.
-
-``flags`` is of type ``enum hid_bpf_attach_flags``.
+One can also set ``flags``, which is of type ``enum hid_bpf_attach_flags``.
We can not rely on hidraw to bind a BPF program to a HID device. hidraw is an
artefact of the processing of the HID device, and is not stable. Some drivers
@@ -358,32 +380,15 @@ For that, we can create a basic skeleton for our BPF program::
extern __u8 *hid_bpf_get_data(struct hid_bpf_ctx *ctx,
unsigned int offset,
const size_t __sz) __ksym;
- extern int hid_bpf_attach_prog(unsigned int hid_id, int prog_fd, u32 flags) __ksym;
struct {
__uint(type, BPF_MAP_TYPE_RINGBUF);
__uint(max_entries, 4096 * 64);
} ringbuf SEC(".maps");
- struct attach_prog_args {
- int prog_fd;
- unsigned int hid;
- unsigned int flags;
- int retval;
- };
-
- SEC("syscall")
- int attach_prog(struct attach_prog_args *ctx)
- {
- ctx->retval = hid_bpf_attach_prog(ctx->hid,
- ctx->prog_fd,
- ctx->flags);
- return 0;
- }
-
__u8 current_value = 0;
- SEC("?fmod_ret/hid_bpf_device_event")
+ SEC("struct_ops/hid_device_event")
int BPF_PROG(filter_switch, struct hid_bpf_ctx *hid_ctx)
{
__u8 *data = hid_bpf_get_data(hid_ctx, 0 /* offset */, 192 /* size */);
@@ -407,37 +412,37 @@ For that, we can create a basic skeleton for our BPF program::
return 0;
}
-To attach ``filter_switch``, userspace needs to call the ``attach_prog`` syscall
-program first::
+ SEC(".struct_ops.link")
+ struct hid_bpf_ops haptic_tablet = {
+ .hid_device_event = (void *)filter_switch,
+ };
+
+
+To attach ``haptic_tablet``, userspace needs to set ``hid_id`` first::
static int attach_filter(struct hid *hid_skel, int hid_id)
{
- int err, prog_fd;
- int ret = -1;
- struct attach_prog_args args = {
- .hid = hid_id,
- };
- DECLARE_LIBBPF_OPTS(bpf_test_run_opts, tattrs,
- .ctx_in = &args,
- .ctx_size_in = sizeof(args),
- );
+ int err, link_fd;
- args.prog_fd = bpf_program__fd(hid_skel->progs.filter_switch);
+ hid_skel->struct_ops.haptic_tablet->hid_id = hid_id;
+ err = hid__load(skel);
+ if (err)
+ return err;
- prog_fd = bpf_program__fd(hid_skel->progs.attach_prog);
-
- err = bpf_prog_test_run_opts(prog_fd, &tattrs);
- if (err)
- return err;
+ link_fd = bpf_map__attach_struct_ops(hid_skel->maps.haptic_tablet);
+ if (!link_fd) {
+ fprintf(stderr, "can not attach HID-BPF program: %m\n");
+ return -1;
+ }
- return args.retval; /* the fd of the created bpf_link */
+ return link_fd; /* the fd of the created bpf_link */
}
Our userspace program can now listen to notifications on the ring buffer, and
is awaken only when the value changes.
When the userspace program doesn't need to listen to events anymore, it can just
-close the returned fd from :c:func:`attach_filter`, which will tell the kernel to
+close the returned bpf link from :c:func:`attach_filter`, which will tell the kernel to
detach the program from the HID device.
Of course, in other use cases, the userspace program can also pin the fd to the
diff --git a/Documentation/hwmon/adm1021.rst b/Documentation/hwmon/adm1021.rst
deleted file mode 100644
index 116fb2019956..000000000000
--- a/Documentation/hwmon/adm1021.rst
+++ /dev/null
@@ -1,153 +0,0 @@
-Kernel driver adm1021
-=====================
-
-Supported chips:
-
- * Analog Devices ADM1021
-
- Prefix: 'adm1021'
-
- Addresses scanned: I2C 0x18 - 0x1a, 0x29 - 0x2b, 0x4c - 0x4e
-
- Datasheet: Publicly available at the Analog Devices website
-
- * Analog Devices ADM1021A/ADM1023
-
- Prefix: 'adm1023'
-
- Addresses scanned: I2C 0x18 - 0x1a, 0x29 - 0x2b, 0x4c - 0x4e
-
- Datasheet: Publicly available at the Analog Devices website
-
- * Genesys Logic GL523SM
-
- Prefix: 'gl523sm'
-
- Addresses scanned: I2C 0x18 - 0x1a, 0x29 - 0x2b, 0x4c - 0x4e
-
- Datasheet:
-
- * Maxim MAX1617
-
- Prefix: 'max1617'
-
- Addresses scanned: I2C 0x18 - 0x1a, 0x29 - 0x2b, 0x4c - 0x4e
-
- Datasheet: Publicly available at the Maxim website
-
- * Maxim MAX1617A
-
- Prefix: 'max1617a'
-
- Addresses scanned: I2C 0x18 - 0x1a, 0x29 - 0x2b, 0x4c - 0x4e
-
- Datasheet: Publicly available at the Maxim website
-
- * National Semiconductor LM84
-
- Prefix: 'lm84'
-
- Addresses scanned: I2C 0x18 - 0x1a, 0x29 - 0x2b, 0x4c - 0x4e
-
- Datasheet: Publicly available at the National Semiconductor website
-
- * Philips NE1617
-
- Prefix: 'max1617' (probably detected as a max1617)
-
- Addresses scanned: I2C 0x18 - 0x1a, 0x29 - 0x2b, 0x4c - 0x4e
-
- Datasheet: Publicly available at the Philips website
-
- * Philips NE1617A
-
- Prefix: 'max1617' (probably detected as a max1617)
-
- Addresses scanned: I2C 0x18 - 0x1a, 0x29 - 0x2b, 0x4c - 0x4e
-
- Datasheet: Publicly available at the Philips website
-
- * TI THMC10
-
- Prefix: 'thmc10'
-
- Addresses scanned: I2C 0x18 - 0x1a, 0x29 - 0x2b, 0x4c - 0x4e
-
- Datasheet: Publicly available at the TI website
-
- * Onsemi MC1066
-
- Prefix: 'mc1066'
-
- Addresses scanned: I2C 0x18 - 0x1a, 0x29 - 0x2b, 0x4c - 0x4e
-
- Datasheet: Publicly available at the Onsemi website
-
-
-Authors:
- - Frodo Looijaard <frodol@dds.nl>,
- - Philip Edelbrock <phil@netroedge.com>
-
-Module Parameters
------------------
-
-* read_only: int
- Don't set any values, read only mode
-
-
-Description
------------
-
-The chips supported by this driver are very similar. The Maxim MAX1617 is
-the oldest; it has the problem that it is not very well detectable. The
-MAX1617A solves that. The ADM1021 is a straight clone of the MAX1617A.
-Ditto for the THMC10. From here on, we will refer to all these chips as
-ADM1021-clones.
-
-The ADM1021 and MAX1617A reports a die code, which is a sort of revision
-code. This can help us pinpoint problems; it is not very useful
-otherwise.
-
-ADM1021-clones implement two temperature sensors. One of them is internal,
-and measures the temperature of the chip itself; the other is external and
-is realised in the form of a transistor-like device. A special alarm
-indicates whether the remote sensor is connected.
-
-Each sensor has its own low and high limits. When they are crossed, the
-corresponding alarm is set and remains on as long as the temperature stays
-out of range. Temperatures are measured in degrees Celsius. Measurements
-are possible between -65 and +127 degrees, with a resolution of one degree.
-
-If an alarm triggers, it will remain triggered until the hardware register
-is read at least once. This means that the cause for the alarm may already
-have disappeared!
-
-This driver only updates its values each 1.5 seconds; reading it more often
-will do no harm, but will return 'old' values. It is possible to make
-ADM1021-clones do faster measurements, but there is really no good reason
-for that.
-
-
-Netburst-based Xeon support
----------------------------
-
-Some Xeon processors based on the Netburst (early Pentium 4, from 2001 to
-2003) microarchitecture had real MAX1617, ADM1021, or compatible chips
-within them, with two temperature sensors. Other Xeon processors of this
-era (with 400 MHz FSB) had chips with only one temperature sensor.
-
-If you have such an old Xeon, and you get two valid temperatures when
-loading the adm1021 module, then things are good.
-
-If nothing happens when loading the adm1021 module, and you are certain
-that your specific Xeon processor model includes compatible sensors, you
-will have to explicitly instantiate the sensor chips from user-space. See
-method 4 in Documentation/i2c/instantiating-devices.rst. Possible slave
-addresses are 0x18, 0x1a, 0x29, 0x2b, 0x4c, or 0x4e. It is likely that
-only temp2 will be correct and temp1 will have to be ignored.
-
-Previous generations of the Xeon processor (based on Pentium II/III)
-didn't have these sensors. Next generations of Xeon processors (533 MHz
-FSB and faster) lost them, until the Core-based generation which
-introduced integrated digital thermal sensors. These are supported by
-the coretemp driver.
diff --git a/Documentation/hwmon/amc6821.rst b/Documentation/hwmon/amc6821.rst
index 5ddb2849da90..dbd544cd1160 100644
--- a/Documentation/hwmon/amc6821.rst
+++ b/Documentation/hwmon/amc6821.rst
@@ -47,13 +47,18 @@ fan1_input ro tachometer speed
fan1_min rw "
fan1_max rw "
fan1_fault ro "
-fan1_div rw Fan divisor can be either 2 or 4.
+fan1_pulses rw Pulses per revolution can be either 2 or 4.
+fan1_target rw Target fan speed, to be used with pwm1_enable
+ mode 4.
pwm1 rw pwm1
pwm1_enable rw regulator mode, 1=open loop, 2=fan controlled
by remote temperature, 3=fan controlled by
combination of the on-chip temperature and
remote-sensor temperature,
+ 4=fan controlled by target rpm set with
+ fan1_target attribute.
+pwm1_mode rw Fan duty control mode (0=DC, 1=PWM)
pwm1_auto_channels_temp ro 1 if pwm_enable==2, 3 if pwm_enable==3
pwm1_auto_point1_pwm ro Hardwired to 0, shared for both
temperature channels.
diff --git a/Documentation/hwmon/asus_ec_sensors.rst b/Documentation/hwmon/asus_ec_sensors.rst
index 0bf99ba406dd..ca38922f4ec5 100644
--- a/Documentation/hwmon/asus_ec_sensors.rst
+++ b/Documentation/hwmon/asus_ec_sensors.rst
@@ -8,6 +8,7 @@ Supported boards:
* PRIME X570-PRO
* Pro WS X570-ACE
* ProArt X570-CREATOR WIFI
+ * ProArt X670E-CREATOR WIFI
* ProArt B550-CREATOR
* ROG CROSSHAIR VIII DARK HERO
* ROG CROSSHAIR VIII HERO (WI-FI)
diff --git a/Documentation/hwmon/corsair-cpro.rst b/Documentation/hwmon/corsair-cpro.rst
index 751f95476b57..15077203a2f8 100644
--- a/Documentation/hwmon/corsair-cpro.rst
+++ b/Documentation/hwmon/corsair-cpro.rst
@@ -39,3 +39,11 @@ fan[1-6]_target Sets fan speed target rpm.
pwm[1-6] Sets the fan speed. Values from 0-255. Can only be read if pwm
was set directly.
======================= =====================================================================
+
+Debugfs entries
+---------------
+
+======================= ===================
+firmware_version Firmware version
+bootloader_version Bootloader version
+======================= ===================
diff --git a/Documentation/hwmon/corsair-psu.rst b/Documentation/hwmon/corsair-psu.rst
index 16db34d464dd..7ed794087f84 100644
--- a/Documentation/hwmon/corsair-psu.rst
+++ b/Documentation/hwmon/corsair-psu.rst
@@ -15,11 +15,11 @@ Supported devices:
Corsair HX850i
- Corsair HX1000i (Series 2022 and 2023)
+ Corsair HX1000i (Legacy and Series 2023)
- Corsair HX1200i
+ Corsair HX1200i (Legacy and Series 2023)
- Corsair HX1500i (Series 2022 and 2023)
+ Corsair HX1500i (Legacy and Series 2023)
Corsair RM550i
diff --git a/Documentation/hwmon/cros_ec_hwmon.rst b/Documentation/hwmon/cros_ec_hwmon.rst
new file mode 100644
index 000000000000..47ecae983bdb
--- /dev/null
+++ b/Documentation/hwmon/cros_ec_hwmon.rst
@@ -0,0 +1,26 @@
+.. SPDX-License-Identifier: GPL-2.0-or-later
+
+Kernel driver cros_ec_hwmon
+===========================
+
+Supported chips:
+
+ * ChromeOS embedded controllers.
+
+ Prefix: 'cros_ec'
+
+ Addresses scanned: -
+
+Author:
+
+ - Thomas Weißschuh <linux@weissschuh.net>
+
+Description
+-----------
+
+This driver implements support for hardware monitoring commands exposed by the
+ChromeOS embedded controller used in Chromebooks and other devices.
+
+The channel labels exposed via hwmon are retrieved from the EC itself.
+
+Fan and temperature readings are supported.
diff --git a/Documentation/hwmon/dell-smm-hwmon.rst b/Documentation/hwmon/dell-smm-hwmon.rst
index 977263cb57a8..74905675d71f 100644
--- a/Documentation/hwmon/dell-smm-hwmon.rst
+++ b/Documentation/hwmon/dell-smm-hwmon.rst
@@ -360,6 +360,8 @@ Firmware Bug Affected Machines
======================================================= =================
Reading of fan states return spurious errors. Precision 490
+ OptiPlex 7060
+
Reading of fan types causes erratic fan behaviour. Studio XPS 8000
Studio XPS 8100
diff --git a/Documentation/hwmon/index.rst b/Documentation/hwmon/index.rst
index 03d313af469a..913c11390a45 100644
--- a/Documentation/hwmon/index.rst
+++ b/Documentation/hwmon/index.rst
@@ -25,7 +25,6 @@ Hardware Monitoring Kernel Drivers
acpi_power_meter
ad7314
adc128d818
- adm1021
adm1025
adm1026
adm1031
@@ -58,6 +57,7 @@ Hardware Monitoring Kernel Drivers
coretemp
corsair-cpro
corsair-psu
+ cros_ec_hwmon
da9052
da9055
dell-smm-hwmon
@@ -154,7 +154,6 @@ Hardware Monitoring Kernel Drivers
max34440
max6620
max6639
- max6642
max6650
max6697
max8688
@@ -165,9 +164,13 @@ Hardware Monitoring Kernel Drivers
mlxreg-fan
mp2856
mp2888
+ mp2891
mp2975
+ mp2993
mp5023
+ mp5920
mp5990
+ mp9941
mpq8785
nct6683
nct6775
@@ -215,6 +218,7 @@ Hardware Monitoring Kernel Drivers
smsc47m192
smsc47m1
sparx5-temp
+ spd5118
stpddc60
surface_fan
sy7636a-hwmon
diff --git a/Documentation/hwmon/max31827.rst b/Documentation/hwmon/max31827.rst
index 44ab9dc064cb..9c11a9518c67 100644
--- a/Documentation/hwmon/max31827.rst
+++ b/Documentation/hwmon/max31827.rst
@@ -131,7 +131,14 @@ The Fault Queue bits select how many consecutive temperature faults must occur
before overtemperature or undertemperature faults are indicated in the
corresponding status bits.
-Notes
------
+PEC Support
+-----------
+
+When reading a register value, the PEC byte is computed and sent by the chip.
+
+PEC on word data transaction respresents a signifcant increase in bandwitdh
+usage (+33% for both write and reads) in normal conditions.
-PEC is not implemented.
+Since this operation implies there will be an extra delay to each
+transaction, PEC can be disabled or enabled through sysfs.
+Just write 1 to the "pec" file for enabling PEC and 0 for disabling it.
diff --git a/Documentation/hwmon/max6642.rst b/Documentation/hwmon/max6642.rst
deleted file mode 100644
index 7e5b7d4f9492..000000000000
--- a/Documentation/hwmon/max6642.rst
+++ /dev/null
@@ -1,27 +0,0 @@
-Kernel driver max6642
-=====================
-
-Supported chips:
-
- * Maxim MAX6642
-
- Prefix: 'max6642'
-
- Addresses scanned: I2C 0x48-0x4f
-
- Datasheet: Publicly available at the Maxim website
-
- http://datasheets.maxim-ic.com/en/ds/MAX6642.pdf
-
-Authors:
-
- Per Dalen <per.dalen@appeartv.com>
-
-Description
------------
-
-The MAX6642 is a digital temperature sensor. It senses its own temperature as
-well as the temperature on one external diode.
-
-All temperature values are given in degrees Celsius. Resolution
-is 0.25 degree for the local temperature and for the remote temperature.
diff --git a/Documentation/hwmon/mp2891.rst b/Documentation/hwmon/mp2891.rst
new file mode 100644
index 000000000000..55944d1b5457
--- /dev/null
+++ b/Documentation/hwmon/mp2891.rst
@@ -0,0 +1,179 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Kernel driver mp2891
+====================
+
+Supported chips:
+
+ * MPS mp2891
+
+ Prefix: 'mp2891'
+
+ * Datasheet
+
+ Publicly available at the MPS website : https://www.monolithicpower.com/en/mp2891.html
+
+Author:
+
+ Noah Wang <noahwang.wang@outlook.com>
+
+Description
+-----------
+
+This driver implements support for Monolithic Power Systems, Inc. (MPS)
+MP2891 Multi-phase Digital VR Controller.
+
+Device compliant with:
+
+- PMBus rev 1.3 interface.
+
+Device supports direct and linear format for reading input voltage,
+output voltage, input current, output current, input power, output
+power, and temperature.
+
+The driver exports the following attributes via the 'sysfs' files
+for input voltage:
+
+**in1_input**
+
+**in1_label**
+
+**in1_crit**
+
+**in1_crit_alarm**
+
+**in1_lcrit**
+
+**in1_lcrit_alarm**
+
+**in1_min**
+
+**in1_min_alarm**
+
+The driver provides the following attributes for output voltage:
+
+**in2_input**
+
+**in2_label**
+
+**in2_crit**
+
+**in2_crit_alarm**
+
+**in2_lcrit**
+
+**in2_lcrit_alarm**
+
+**in2_min**
+
+**in2_min_alarm**
+
+**in3_input**
+
+**in3_label**
+
+**in3_crit**
+
+**in3_crit_alarm**
+
+**in3_lcrit**
+
+**in3_lcrit_alarm**
+
+**in3_min**
+
+**in3_min_alarm**
+
+The driver provides the following attributes for input current:
+
+**curr1_input**
+
+**curr1_label**
+
+**curr1_max**
+
+**curr1_max_alarm**
+
+**curr2_input**
+
+**curr2_label**
+
+**curr2_max**
+
+**curr2_max_alarm**
+
+The driver provides the following attributes for output current:
+
+**curr3_input**
+
+**curr3_label**
+
+**curr3_crit**
+
+**curr3_crit_alarm**
+
+**curr3_max**
+
+**curr3_max_alarm**
+
+**curr4_input**
+
+**curr4_label**
+
+**curr4_crit**
+
+**curr4_crit_alarm**
+
+**curr4_max**
+
+**curr4_max_alarm**
+
+The driver provides the following attributes for input power:
+
+**power1_input**
+
+**power1_label**
+
+**power1_max**
+
+**power1_alarm**
+
+**power2_input**
+
+**power2_label**
+
+**power2_max**
+
+**power2_alarm**
+
+The driver provides the following attributes for output power:
+
+**power3_input**
+
+**power3_label**
+
+**power4_input**
+
+**power4_label**
+
+The driver provides the following attributes for temperature:
+
+**temp1_input**
+
+**temp1_crit**
+
+**temp1_crit_alarm**
+
+**temp1_max**
+
+**temp1_max_alarm**
+
+**temp2_input**
+
+**temp2_crit**
+
+**temp2_crit_alarm**
+
+**temp2_max**
+
+**temp2_max_alarm**
diff --git a/Documentation/hwmon/mp2993.rst b/Documentation/hwmon/mp2993.rst
new file mode 100644
index 000000000000..7a4fe0d946e0
--- /dev/null
+++ b/Documentation/hwmon/mp2993.rst
@@ -0,0 +1,150 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Kernel driver mp2993
+====================
+
+Supported chips:
+
+ * MPS mp2993
+
+ Prefix: 'mp2993'
+
+ * Datasheet
+ https://scnbwymvp-my.sharepoint.com/:f:/g/personal/admin_scnbwy_com/Eth4kX1_J1hMsaASHiOYL4QBHU5a75r-tRfLKbHnJFdKLQ?e=vxj3DF
+
+Author:
+
+ Noah Wang <noahwang.wang@outlook.com>
+
+Description
+-----------
+
+This driver implements support for Monolithic Power Systems, Inc. (MPS)
+MP2993 Dual Loop Digital Multi-phase Controller.
+
+Device compliant with:
+
+- PMBus rev 1.3 interface.
+
+The driver exports the following attributes via the 'sysfs' files
+for input voltage:
+
+**in1_input**
+
+**in1_label**
+
+**in1_crit**
+
+**in1_crit_alarm**
+
+**in1_lcrit**
+
+**in1_lcrit_alarm**
+
+**in1_max**
+
+**in1_max_alarm**
+
+**in1_min**
+
+**in1_min_alarm**
+
+The driver provides the following attributes for output voltage:
+
+**in2_input**
+
+**in2_label**
+
+**in2_crit**
+
+**in2_crit_alarm**
+
+**in2_lcrit**
+
+**in2_lcrit_alarm**
+
+**in3_input**
+
+**in3_label**
+
+**in3_crit**
+
+**in3_crit_alarm**
+
+**in3_lcrit**
+
+**in3_lcrit_alarm**
+
+The driver provides the following attributes for input current:
+
+**curr1_input**
+
+**curr1_label**
+
+**curr1_max**
+
+**curr1_max_alarm**
+
+The driver provides the following attributes for output current:
+
+**curr2_input**
+
+**curr2_label**
+
+**curr2_crit**
+
+**curr2_crit_alarm**
+
+**curr2_max**
+
+**curr2_max_alarm**
+
+**curr3_input**
+
+**curr3_label**
+
+**curr3_crit**
+
+**curr3_crit_alarm**
+
+**curr3_max**
+
+**curr3_max_alarm**
+
+The driver provides the following attributes for input power:
+
+**power1_input**
+
+**power1_label**
+
+The driver provides the following attributes for output power:
+
+**power2_input**
+
+**power2_label**
+
+**power3_input**
+
+**power3_label**
+
+The driver provides the following attributes for temperature:
+
+**temp1_input**
+
+**temp1_crit**
+
+**temp1_crit_alarm**
+
+**temp1_max**
+
+**temp1_max_alarm**
+
+**temp2_input**
+
+**temp2_crit**
+
+**temp2_crit_alarm**
+
+**temp2_max**
+
+**temp2_max_alarm**
diff --git a/Documentation/hwmon/mp5920.rst b/Documentation/hwmon/mp5920.rst
new file mode 100644
index 000000000000..98946e7cf54e
--- /dev/null
+++ b/Documentation/hwmon/mp5920.rst
@@ -0,0 +1,91 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Kernel driver mp5920
+====================
+
+Supported chips:
+
+ * MPS MP5920
+
+ Prefix: 'mp5920'
+
+ * Datasheet
+
+ Publicly available at the MPS website : https://www.monolithicpower.com/en/mp5920.html
+
+Authors:
+
+ Tony Ao <tony_ao@wiwynn.com>
+ Alex Vdovydchenko <xzeol@yahoo.com>
+
+Description
+-----------
+
+This driver implements support for Monolithic Power Systems, Inc. (MPS)
+MP5920 Hot-Swap Controller.
+
+Device compliant with:
+
+- PMBus rev 1.3 interface.
+
+Device supports direct and linear format for reading input voltage,
+output voltage, output current, input power and temperature.
+
+The driver exports the following attributes via the 'sysfs' files
+for input voltage:
+
+**in1_input**
+
+**in1_label**
+
+**in1_rated_max**
+
+**in1_rated_min**
+
+**in1_crit**
+
+**in1_alarm**
+
+The driver provides the following attributes for output voltage:
+
+**in2_input**
+
+**in2_label**
+
+**in2_rated_max**
+
+**in2_rated_min**
+
+**in2_alarm**
+
+The driver provides the following attributes for output current:
+
+**curr1_input**
+
+**curr1_label**
+
+**curr1_crit**
+
+**curr1_alarm**
+
+**curr1_rated_max**
+
+The driver provides the following attributes for input power:
+
+**power1_input**
+
+**power1_label**
+
+**power1_max**
+
+**power1_rated_max**
+
+The driver provides the following attributes for temperature:
+
+**temp1_input**
+
+**temp1_max**
+
+**temp1_crit**
+
+**temp1_alarm**
diff --git a/Documentation/hwmon/mp9941.rst b/Documentation/hwmon/mp9941.rst
new file mode 100644
index 000000000000..1274fa20e256
--- /dev/null
+++ b/Documentation/hwmon/mp9941.rst
@@ -0,0 +1,92 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Kernel driver mp9941
+====================
+
+Supported chips:
+
+ * MPS mp9941
+
+ Prefix: 'mp9941'
+
+ * Datasheet
+ https://scnbwymvp-my.sharepoint.com/:f:/g/personal/admin_scnbwy_com/Eth4kX1_J1hMsaASHiOYL4QBHU5a75r-tRfLKbHnJFdKLQ?e=vxj3DF
+
+Author:
+
+ Noah Wang <noahwang.wang@outlook.com>
+
+Description
+-----------
+
+This driver implements support for Monolithic Power Systems, Inc. (MPS)
+MP9941 digital step-down converter.
+
+Device compliant with:
+
+- PMBus rev 1.3 interface.
+
+The driver exports the following attributes via the 'sysfs' files
+for input voltage:
+
+**in1_input**
+
+**in1_label**
+
+**in1_crit**
+
+**in1_crit_alarm**
+
+The driver provides the following attributes for output voltage:
+
+**in2_input**
+
+**in2_label**
+
+**in2_lcrit**
+
+**in2_lcrit_alarm**
+
+**in2_rated_max**
+
+**in2_rated_min**
+
+The driver provides the following attributes for input current:
+
+**curr1_input**
+
+**curr1_label**
+
+**curr1_max**
+
+**curr1_max_alarm**
+
+The driver provides the following attributes for output current:
+
+**curr2_input**
+
+**curr2_label**
+
+The driver provides the following attributes for input power:
+
+**power1_input**
+
+**power1_label**
+
+The driver provides the following attributes for output power:
+
+**power2_input**
+
+**power2_label**
+
+The driver provides the following attributes for temperature:
+
+**temp1_input**
+
+**temp1_crit**
+
+**temp1_crit_alarm**
+
+**temp1_max**
+
+**temp1_max_alarm**
diff --git a/Documentation/hwmon/spd5118.rst b/Documentation/hwmon/spd5118.rst
new file mode 100644
index 000000000000..ef7338f46575
--- /dev/null
+++ b/Documentation/hwmon/spd5118.rst
@@ -0,0 +1,63 @@
+.. SPDX-License-Identifier: GPL-2.0-or-later
+
+Kernel driver spd5118
+=====================
+
+Supported chips:
+
+ * SPD5118 (JEDEC JESD300) compliant temperature sensor chips
+
+ JEDEC standard download:
+ https://www.jedec.org/standards-documents/docs/jesd300-5b01
+ (account required)
+
+
+ Prefix: 'spd5118'
+
+ Addresses scanned: I2C 0x50 - 0x57
+
+Author:
+ Guenter Roeck <linux@roeck-us.net>
+
+
+Description
+-----------
+
+This driver implements support for SPD5118 (JEDEC JESD300) compliant temperature
+sensors, which are used on many DDR5 memory modules. Some systems use the sensor
+to prevent memory overheating by automatically throttling the memory controller.
+
+The driver auto-detects SPD5118 compliant chips, but can also be instantiated
+using devicetree/firmware nodes.
+
+A SPD5118 compliant chip supports a single temperature sensor. Critical minimum,
+minimum, maximum, and critical temperature can be configured. There are alarms
+for low critical, low, high, and critical thresholds.
+
+
+Hardware monitoring sysfs entries
+---------------------------------
+
+======================= ==================================
+temp1_input Temperature (RO)
+temp1_lcrit Low critical high temperature (RW)
+temp1_min Minimum temperature (RW)
+temp1_max Maximum temperature (RW)
+temp1_crit Critical high temperature (RW)
+
+temp1_lcrit_alarm Temperature low critical alarm
+temp1_min_alarm Temperature low alarm
+temp1_max_alarm Temperature high alarm
+temp1_crit_alarm Temperature critical alarm
+======================= ==================================
+
+Alarm attributes are sticky until read and will be cleared afterwards
+unless the alarm condition still applies.
+
+
+SPD (Serial Presence Detect) support
+------------------------------------
+
+The driver also supports reading the SPD NVRAM on SPD5118 compatible chips.
+SPD data is available from the 'eeprom' binary attribute file attached to the
+chip's I2C device.
diff --git a/Documentation/i2c/i2c_bus.svg b/Documentation/i2c/i2c_bus.svg
index 3170de976373..45801de4af7d 100644
--- a/Documentation/i2c/i2c_bus.svg
+++ b/Documentation/i2c/i2c_bus.svg
@@ -1,5 +1,6 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!-- Created with Inkscape (http://www.inkscape.org/) -->
+<!-- Updated to inclusive terminology by Wolfram Sang -->
<svg
xmlns:dc="http://purl.org/dc/elements/1.1/"
@@ -1120,7 +1121,7 @@
<rect
style="opacity:1;fill:#ffb9b9;fill-opacity:1;stroke:#f00000;stroke-width:2.8125;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
id="rect4424-3-2-9-7"
- width="112.5"
+ width="134.5"
height="113.75008"
x="112.5"
y="471.11221"
@@ -1133,15 +1134,15 @@
y="521.46259"
id="text4349"><tspan
sodipodi:role="line"
- x="167.5354"
+ x="178.5354"
y="521.46259"
style="font-size:25px;line-height:1.25;font-family:sans-serif;text-align:center;text-anchor:middle"
id="tspan1273">I2C</tspan><tspan
sodipodi:role="line"
- x="167.5354"
+ x="178.5354"
y="552.71259"
style="font-size:25px;line-height:1.25;font-family:sans-serif;text-align:center;text-anchor:middle"
- id="tspan1285">Master</tspan></text>
+ id="tspan1285">Controller</tspan></text>
<rect
style="color:#000000;clip-rule:nonzero;display:inline;overflow:visible;visibility:visible;opacity:1;isolation:auto;mix-blend-mode:normal;color-interpolation:sRGB;color-interpolation-filters:linearRGB;solid-color:#000000;solid-opacity:1;fill:#b9ffb9;fill-opacity:1;fill-rule:nonzero;stroke:#006400;stroke-width:2.8125;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;color-rendering:auto;image-rendering:auto;shape-rendering:auto;text-rendering:auto;enable-background:accumulate"
id="rect4424-3-2-9-7-3-3-5-3"
@@ -1171,7 +1172,7 @@
x="318.59131"
y="552.08752"
style="font-size:25.00000191px;line-height:1.25;font-family:sans-serif;text-align:center;text-anchor:middle;stroke-width:1px"
- id="tspan1287">Slave</tspan></text>
+ id="tspan1287">Target</tspan></text>
<path
style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1.99968767;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
d="m 112.49995,677.36223 c 712.50005,0 712.50005,0 712.50005,0"
@@ -1233,7 +1234,7 @@
x="468.59131"
y="552.08746"
style="font-size:25.00000191px;line-height:1.25;font-family:sans-serif;text-align:center;text-anchor:middle;stroke-width:1px"
- id="tspan1287-6">Slave</tspan></text>
+ id="tspan1287-6">Target</tspan></text>
<rect
style="color:#000000;clip-rule:nonzero;display:inline;overflow:visible;visibility:visible;opacity:1;isolation:auto;mix-blend-mode:normal;color-interpolation:sRGB;color-interpolation-filters:linearRGB;solid-color:#000000;solid-opacity:1;vector-effect:none;fill:#b9ffb9;fill-opacity:1;fill-rule:nonzero;stroke:#006400;stroke-width:2.8125;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;color-rendering:auto;image-rendering:auto;shape-rendering:auto;text-rendering:auto;enable-background:accumulate"
id="rect4424-3-2-9-7-3-3-5-3-1"
@@ -1258,7 +1259,7 @@
x="618.59131"
y="552.08746"
style="font-size:25.00000191px;line-height:1.25;font-family:sans-serif;text-align:center;text-anchor:middle;stroke-width:1px"
- id="tspan1287-9">Slave</tspan></text>
+ id="tspan1287-9">Target</tspan></text>
<path
style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1.99968743;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#DotM)"
d="m 150,583.61221 v 93.75"
diff --git a/Documentation/i2c/summary.rst b/Documentation/i2c/summary.rst
index 786c618ba3be..579a1c7df200 100644
--- a/Documentation/i2c/summary.rst
+++ b/Documentation/i2c/summary.rst
@@ -3,29 +3,27 @@ Introduction to I2C and SMBus
=============================
I²C (pronounce: I squared C and written I2C in the kernel documentation) is
-a protocol developed by Philips. It is a slow two-wire protocol (variable
-speed, up to 400 kHz), with a high speed extension (3.4 MHz). It provides
+a protocol developed by Philips. It is a two-wire protocol with variable
+speed (typically up to 400 kHz, high speed modes up to 5 MHz). It provides
an inexpensive bus for connecting many types of devices with infrequent or
-low bandwidth communications needs. I2C is widely used with embedded
-systems. Some systems use variants that don't meet branding requirements,
+low bandwidth communications needs. I2C is widely used with embedded
+systems. Some systems use variants that don't meet branding requirements,
and so are not advertised as being I2C but come under different names,
e.g. TWI (Two Wire Interface), IIC.
-The latest official I2C specification is the `"I2C-bus specification and user
-manual" (UM10204) <https://www.nxp.com/webapp/Download?colCode=UM10204>`_
-published by NXP Semiconductors. However, you need to log-in to the site to
-access the PDF. An older version of the specification (revision 6) is archived
-`here <https://web.archive.org/web/20210813122132/https://www.nxp.com/docs/en/user-guide/UM10204.pdf>`_.
+The latest official I2C specification is the `"I²C-bus specification and user
+manual" (UM10204) <https://www.nxp.com/docs/en/user-guide/UM10204.pdf>`_
+published by NXP Semiconductors, version 7 as of this writing.
SMBus (System Management Bus) is based on the I2C protocol, and is mostly
-a subset of I2C protocols and signaling. Many I2C devices will work on an
+a subset of I2C protocols and signaling. Many I2C devices will work on an
SMBus, but some SMBus protocols add semantics beyond what is required to
-achieve I2C branding. Modern PC mainboards rely on SMBus. The most common
+achieve I2C branding. Modern PC mainboards rely on SMBus. The most common
devices connected through SMBus are RAM modules configured using I2C EEPROMs,
and hardware monitoring chips.
Because the SMBus is mostly a subset of the generalized I2C bus, we can
-use its protocols on many I2C systems. However, there are systems that don't
+use its protocols on many I2C systems. However, there are systems that don't
meet both SMBus and I2C electrical constraints; and others which can't
implement all the common SMBus protocol semantics or messages.
@@ -33,29 +31,52 @@ implement all the common SMBus protocol semantics or messages.
Terminology
===========
-Using the terminology from the official documentation, the I2C bus connects
-one or more *master* chips and one or more *slave* chips.
+The I2C bus connects one or more controller chips and one or more target chips.
.. kernel-figure:: i2c_bus.svg
- :alt: Simple I2C bus with one master and 3 slaves
+ :alt: Simple I2C bus with one controller and 3 targets
Simple I2C bus
-A **master** chip is a node that starts communications with slaves. In the
-Linux kernel implementation it is called an **adapter** or bus. Adapter
-drivers are in the ``drivers/i2c/busses/`` subdirectory.
+A **controller** chip is a node that starts communications with targets. In the
+Linux kernel implementation it is also called an "adapter" or "bus". Controller
+drivers are usually in the ``drivers/i2c/busses/`` subdirectory.
-An **algorithm** contains general code that can be used to implement a
-whole class of I2C adapters. Each specific adapter driver either depends on
-an algorithm driver in the ``drivers/i2c/algos/`` subdirectory, or includes
-its own implementation.
+An **algorithm** contains general code that can be used to implement a whole
+class of I2C controllers. Each specific controller driver either depends on an
+algorithm driver in the ``drivers/i2c/algos/`` subdirectory, or includes its
+own implementation.
-A **slave** chip is a node that responds to communications when addressed
-by the master. In Linux it is called a **client**. Client drivers are kept
-in a directory specific to the feature they provide, for example
-``drivers/media/gpio/`` for GPIO expanders and ``drivers/media/i2c/`` for
+A **target** chip is a node that responds to communications when addressed by a
+controller. In the Linux kernel implementation it is also called a "client".
+While targets are usually separate external chips, Linux can also act as a
+target (needs hardware support) and respond to another controller on the bus.
+This is then called a **local target**. In contrast, an external chip is called
+a **remote target**.
+
+Target drivers are kept in a directory specific to the feature they provide,
+for example ``drivers/gpio/`` for GPIO expanders and ``drivers/media/i2c/`` for
video-related chips.
-For the example configuration in figure, you will need a driver for your
-I2C adapter, and drivers for your I2C devices (usually one driver for each
-device).
+For the example configuration in the figure above, you will need one driver for
+the I2C controller, and drivers for your I2C targets. Usually one driver for
+each target.
+
+Synonyms
+--------
+
+As mentioned above, the Linux I2C implementation historically uses the terms
+"adapter" for controller and "client" for target. A number of data structures
+have these synonyms in their name. So, when discussing implementation details,
+you should be aware of these terms as well. The official wording is preferred,
+though.
+
+Outdated terminology
+--------------------
+
+In earlier I2C specifications, controller was named "master" and target was
+named "slave". These terms have been obsoleted with v7 of the specification and
+their use is also discouraged by the Linux Kernel Code of Conduct. You may
+still find them in references to documentation which has not been updated. The
+general attitude, however, is to use the inclusive terms: controller and
+target. Work to replace the old terminology in the Linux Kernel is on-going.
diff --git a/Documentation/kbuild/kconfig-language.rst b/Documentation/kbuild/kconfig-language.rst
index 555c2f839969..1fb3f5e6193c 100644
--- a/Documentation/kbuild/kconfig-language.rst
+++ b/Documentation/kbuild/kconfig-language.rst
@@ -150,6 +150,12 @@ applicable everywhere (see syntax).
That will limit the usefulness but on the other hand avoid
the illegal configurations all over.
+ If "select" <symbol> is followed by "if" <expr>, <symbol> will be
+ selected by the logical AND of the value of the current menu symbol
+ and <expr>. This means, the lower limit can be downgraded due to the
+ presence of "if" <expr>. This behavior may seem weird, but we rely on
+ it. (The future of this behavior is undecided.)
+
- weak reverse dependencies: "imply" <symbol> ["if" <expr>]
This is similar to "select" as it enforces a lower limit on another
@@ -184,7 +190,7 @@ applicable everywhere (see syntax).
ability to hook into a secondary subsystem while allowing the user to
configure that subsystem out without also having to unset these drivers.
- Note: If the combination of FOO=y and BAR=m causes a link error,
+ Note: If the combination of FOO=y and BAZ=m causes a link error,
you can guard the function call with IS_REACHABLE()::
foo_init()
@@ -202,6 +208,10 @@ applicable everywhere (see syntax).
imply BAR
imply BAZ
+ Note: If "imply" <symbol> is followed by "if" <expr>, the default of <symbol>
+ will be the logical AND of the value of the current menu symbol and <expr>.
+ (The future of this behavior is undecided.)
+
- limiting menu display: "visible if" <expr>
This attribute is only applicable to menu blocks, if the condition is
diff --git a/Documentation/kbuild/modules.rst b/Documentation/kbuild/modules.rst
index a1f3eb7a43e2..131863142cbb 100644
--- a/Documentation/kbuild/modules.rst
+++ b/Documentation/kbuild/modules.rst
@@ -128,7 +128,7 @@ executed to make module versioning work.
modules_install
Install the external module(s). The default location is
- /lib/modules/<kernel_release>/extra/, but a prefix may
+ /lib/modules/<kernel_release>/updates/, but a prefix may
be added with INSTALL_MOD_PATH (discussed in section 5).
clean
@@ -417,7 +417,7 @@ directory:
And external modules are installed in:
- /lib/modules/$(KERNELRELEASE)/extra/
+ /lib/modules/$(KERNELRELEASE)/updates/
5.1 INSTALL_MOD_PATH
--------------------
@@ -438,10 +438,10 @@ And external modules are installed in:
-------------------
External modules are by default installed to a directory under
- /lib/modules/$(KERNELRELEASE)/extra/, but you may wish to
+ /lib/modules/$(KERNELRELEASE)/updates/, but you may wish to
locate modules for a specific functionality in a separate
directory. For this purpose, use INSTALL_MOD_DIR to specify an
- alternative name to "extra."::
+ alternative name to "updates."::
$ make INSTALL_MOD_DIR=gandalf -C $KDIR \
M=$PWD modules_install
diff --git a/Documentation/leds/leds-blinkm.rst b/Documentation/leds/leds-blinkm.rst
index c74b5bc877b1..2d3c226a371a 100644
--- a/Documentation/leds/leds-blinkm.rst
+++ b/Documentation/leds/leds-blinkm.rst
@@ -7,7 +7,7 @@ The leds-blinkm driver supports the devices of the BlinkM family.
They are RGB-LED modules driven by a (AT)tiny microcontroller and
communicate through I2C. The default address of these modules is
0x09 but this can be changed through a command. By this you could
-dasy-chain up to 127 BlinkMs on an I2C bus.
+daisy-chain up to 127 BlinkMs on an I2C bus.
The device accepts RGB and HSB color values through separate commands.
Also you can store blinking sequences as "scripts" in
diff --git a/Documentation/netlink/specs/dpll.yaml b/Documentation/netlink/specs/dpll.yaml
index 95b0eb1486bf..94132d30e0e0 100644
--- a/Documentation/netlink/specs/dpll.yaml
+++ b/Documentation/netlink/specs/dpll.yaml
@@ -479,6 +479,7 @@ operations:
name: pin-get
doc: |
Get list of pins and its attributes.
+
- dump request without any attributes given - list all the pins in the
system
- dump request with target dpll - list all the pins registered with
diff --git a/Documentation/netlink/specs/ethtool.yaml b/Documentation/netlink/specs/ethtool.yaml
index 00dc61358be8..495e35fcfb21 100644
--- a/Documentation/netlink/specs/ethtool.yaml
+++ b/Documentation/netlink/specs/ethtool.yaml
@@ -20,6 +20,25 @@ definitions:
name: header-flags
type: flags
entries: [ compact-bitsets, omit-reply, stats ]
+ -
+ name: module-fw-flash-status
+ type: enum
+ entries: [ started, in_progress, completed, error ]
+ -
+ name: c33-pse-ext-state
+ enum-name:
+ type: enum
+ name-prefix: ethtool-c33-pse-ext-state-
+ entries:
+ - none
+ - error-condition
+ - mr-mps-valid
+ - mr-pse-enable
+ - option-detect-ted
+ - option-vport-lim
+ - ovld-detected
+ - power-not-available
+ - short-detected
attribute-sets:
-
@@ -415,6 +434,26 @@ attribute-sets:
type: u32
-
+ name: irq-moderation
+ attributes:
+ -
+ name: usec
+ type: u32
+ -
+ name: pkts
+ type: u32
+ -
+ name: comps
+ type: u32
+ -
+ name: profile
+ attributes:
+ -
+ name: irq-moderation
+ type: nest
+ multi-attr: true
+ nested-attributes: irq-moderation
+ -
name: coalesce
attributes:
-
@@ -502,6 +541,15 @@ attribute-sets:
-
name: tx-aggr-time-usecs
type: u32
+ -
+ name: rx-profile
+ type: nest
+ nested-attributes: profile
+ -
+ name: tx-profile
+ type: nest
+ nested-attributes: profile
+
-
name: pause-stat
attributes:
@@ -892,6 +940,15 @@ attribute-sets:
name: power-mode
type: u8
-
+ name: c33-pse-pw-limit
+ attributes:
+ -
+ name: min
+ type: u32
+ -
+ name: max
+ type: u32
+ -
name: pse
attributes:
-
@@ -922,6 +979,33 @@ attribute-sets:
name: c33-pse-pw-d-status
type: u32
name-prefix: ethtool-a-
+ -
+ name: c33-pse-pw-class
+ type: u32
+ name-prefix: ethtool-a-
+ -
+ name: c33-pse-actual-pw
+ type: u32
+ name-prefix: ethtool-a-
+ -
+ name: c33-pse-ext-state
+ type: u32
+ name-prefix: ethtool-a-
+ enum: c33-pse-ext-state
+ -
+ name: c33-pse-ext-substate
+ type: u32
+ name-prefix: ethtool-a-
+ -
+ name: c33-pse-avail-pw-limit
+ type: u32
+ name-prefix: ethtool-a-
+ -
+ name: c33-pse-pw-limit-ranges
+ name-prefix: ethtool-a-
+ type: nest
+ multi-attr: true
+ nested-attributes: c33-pse-pw-limit
-
name: rss
attributes:
@@ -975,6 +1059,32 @@ attribute-sets:
-
name: burst-tmr
type: u32
+ -
+ name: module-fw-flash
+ attributes:
+ -
+ name: header
+ type: nest
+ nested-attributes: header
+ -
+ name: file-name
+ type: string
+ -
+ name: password
+ type: u32
+ -
+ name: status
+ type: u32
+ enum: module-fw-flash-status
+ -
+ name: status-msg
+ type: string
+ -
+ name: done
+ type: uint
+ -
+ name: total
+ type: uint
operations:
enum-model: directional
@@ -1325,6 +1435,8 @@ operations:
- tx-aggr-max-bytes
- tx-aggr-max-frames
- tx-aggr-time-usecs
+ - rx-profile
+ - tx-profile
dump: *coalesce-get-op
-
name: coalesce-set
@@ -1603,7 +1715,7 @@ operations:
attributes:
- header
reply:
- attributes: &pse
+ attributes:
- header
- podl-pse-admin-state
- podl-pse-admin-control
@@ -1611,6 +1723,12 @@ operations:
- c33-pse-admin-state
- c33-pse-admin-control
- c33-pse-pw-d-status
+ - c33-pse-pw-class
+ - c33-pse-actual-pw
+ - c33-pse-ext-state
+ - c33-pse-ext-substate
+ - c33-pse-avail-pw-limit
+ - c33-pse-pw-limit-ranges
dump: *pse-get-op
-
name: pse-set
@@ -1620,7 +1738,11 @@ operations:
do:
request:
- attributes: *pse
+ attributes:
+ - header
+ - podl-pse-admin-control
+ - c33-pse-admin-control
+ - c33-pse-avail-pw-limit
-
name: rss-get
doc: Get RSS params.
@@ -1730,3 +1852,28 @@ operations:
name: mm-ntf
doc: Notification for change in MAC Merge configuration.
notify: mm-get
+ -
+ name: module-fw-flash-act
+ doc: Flash transceiver module firmware.
+
+ attribute-set: module-fw-flash
+
+ do:
+ request:
+ attributes:
+ - header
+ - file-name
+ - password
+ -
+ name: module-fw-flash-ntf
+ doc: Notification for firmware flashing progress and status.
+
+ attribute-set: module-fw-flash
+
+ event:
+ attributes:
+ - header
+ - status
+ - status-msg
+ - done
+ - total
diff --git a/Documentation/netlink/specs/netdev.yaml b/Documentation/netlink/specs/netdev.yaml
index 11a32373365a..959755be4d7f 100644
--- a/Documentation/netlink/specs/netdev.yaml
+++ b/Documentation/netlink/specs/netdev.yaml
@@ -350,6 +350,10 @@ attribute-sets:
buffer space, host descriptors etc.
type: uint
-
+ name: rx-csum-complete
+ doc: Number of packets that were marked as CHECKSUM_COMPLETE.
+ type: uint
+ -
name: rx-csum-unnecessary
doc: Number of packets that were marked as CHECKSUM_UNNECESSARY.
type: uint
diff --git a/Documentation/netlink/specs/nfsd.yaml b/Documentation/netlink/specs/nfsd.yaml
index d21234097167..c87658114852 100644
--- a/Documentation/netlink/specs/nfsd.yaml
+++ b/Documentation/netlink/specs/nfsd.yaml
@@ -115,6 +115,15 @@ attribute-sets:
type: nest
nested-attributes: sock
multi-attr: true
+ -
+ name: pool-mode
+ attributes:
+ -
+ name: mode
+ type: string
+ -
+ name: npools
+ type: u32
operations:
list:
@@ -123,8 +132,6 @@ operations:
doc: dump pending nfsd rpc
attribute-set: rpc-status
dump:
- pre: nfsd-nl-rpc-status-get-start
- post: nfsd-nl-rpc-status-get-done
reply:
attributes:
- xid
@@ -197,3 +204,21 @@ operations:
reply:
attributes:
- addr
+ -
+ name: pool-mode-set
+ doc: set the current server pool-mode
+ attribute-set: pool-mode
+ flags: [ admin-perm ]
+ do:
+ request:
+ attributes:
+ - mode
+ -
+ name: pool-mode-get
+ doc: get info about server pool-mode
+ attribute-set: pool-mode
+ do:
+ reply:
+ attributes:
+ - mode
+ - npools
diff --git a/Documentation/netlink/specs/ovs_flow.yaml b/Documentation/netlink/specs/ovs_flow.yaml
index 4fdfc6b5cae9..46f5d1cd8a5f 100644
--- a/Documentation/netlink/specs/ovs_flow.yaml
+++ b/Documentation/netlink/specs/ovs_flow.yaml
@@ -727,6 +727,12 @@ attribute-sets:
name: dec-ttl
type: nest
nested-attributes: dec-ttl-attrs
+ -
+ name: psample
+ type: nest
+ nested-attributes: psample-attrs
+ doc: |
+ Sends a packet sample to psample for external observation.
-
name: tunnel-key-attrs
enum-name: ovs-tunnel-key-attr
@@ -938,6 +944,17 @@ attribute-sets:
-
name: gbp
type: u32
+ -
+ name: psample-attrs
+ enum-name: ovs-psample-attr
+ name-prefix: ovs-psample-attr-
+ attributes:
+ -
+ name: group
+ type: u32
+ -
+ name: cookie
+ type: binary
operations:
name-prefix: ovs-flow-cmd-
diff --git a/Documentation/netlink/specs/tc.yaml b/Documentation/netlink/specs/tc.yaml
index 8c01e4e13195..b02d59a0349c 100644
--- a/Documentation/netlink/specs/tc.yaml
+++ b/Documentation/netlink/specs/tc.yaml
@@ -42,6 +42,16 @@ definitions:
- not-in-nw
- verbose
-
+ name: tc-flower-key-ctrl-flags
+ type: flags
+ entries:
+ - frag
+ - firstfrag
+ - tuncsum
+ - tundf
+ - tunoam
+ - tuncrit
+ -
name: tc-stats
type: struct
members:
@@ -2536,10 +2546,14 @@ attribute-sets:
name: key-flags
type: u32
byte-order: big-endian
+ enum: tc-flower-key-ctrl-flags
+ enum-as-flags: true
-
name: key-flags-mask
type: u32
byte-order: big-endian
+ enum: tc-flower-key-ctrl-flags
+ enum-as-flags: true
-
name: key-icmpv4-code
type: u8
@@ -2749,6 +2763,18 @@ attribute-sets:
name: key-spi-mask
type: u32
byte-order: big-endian
+ -
+ name: key-enc-flags
+ type: u32
+ byte-order: big-endian
+ enum: tc-flower-key-ctrl-flags
+ enum-as-flags: true
+ -
+ name: key-enc-flags-mask
+ type: u32
+ byte-order: big-endian
+ enum: tc-flower-key-ctrl-flags
+ enum-as-flags: true
-
name: tc-flower-key-enc-opts-attrs
attributes:
diff --git a/Documentation/netlink/specs/tcp_metrics.yaml b/Documentation/netlink/specs/tcp_metrics.yaml
new file mode 100644
index 000000000000..1bd94f43e526
--- /dev/null
+++ b/Documentation/netlink/specs/tcp_metrics.yaml
@@ -0,0 +1,169 @@
+# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
+
+name: tcp_metrics
+
+protocol: genetlink-legacy
+
+doc: |
+ Management interface for TCP metrics.
+
+c-family-name: tcp-metrics-genl-name
+c-version-name: tcp-metrics-genl-version
+max-by-define: true
+kernel-policy: global
+
+definitions:
+ -
+ name: tcp-fastopen-cookie-max
+ type: const
+ value: 16
+
+attribute-sets:
+ -
+ name: tcp-metrics
+ name-prefix: tcp-metrics-attr-
+ attributes:
+ -
+ name: addr-ipv4
+ type: u32
+ byte-order: big-endian
+ display-hint: ipv4
+ -
+ name: addr-ipv6
+ type: binary
+ checks:
+ min-len: 16
+ byte-order: big-endian
+ display-hint: ipv6
+ -
+ name: age
+ type: u64
+ -
+ name: tw-tsval
+ type: u32
+ doc: unused
+ -
+ name: tw-ts-stamp
+ type: s32
+ doc: unused
+ -
+ name: vals
+ type: nest
+ nested-attributes: metrics
+ -
+ name: fopen-mss
+ type: u16
+ -
+ name: fopen-syn-drops
+ type: u16
+ -
+ name: fopen-syn-drop-ts
+ type: u64
+ -
+ name: fopen-cookie
+ type: binary
+ checks:
+ min-len: tcp-fastopen-cookie-max
+ -
+ name: saddr-ipv4
+ type: u32
+ byte-order: big-endian
+ display-hint: ipv4
+ -
+ name: saddr-ipv6
+ type: binary
+ checks:
+ min-len: 16
+ byte-order: big-endian
+ display-hint: ipv6
+ -
+ name: pad
+ type: pad
+
+ -
+ name: metrics
+ # Intentionally don't define the name-prefix, see below.
+ doc: |
+ Attributes with metrics. Note that the values here do not match
+ the TCP_METRIC_* defines in the kernel, because kernel defines
+ are off-by one (e.g. rtt is defined as enum 0, while netlink carries
+ attribute type 1).
+ attributes:
+ -
+ name: rtt
+ type: u32
+ doc: |
+ Round Trip Time (RTT), in msecs with 3 bits fractional
+ (left-shift by 3 to get the msec value).
+ -
+ name: rttvar
+ type: u32
+ doc: |
+ Round Trip Time VARiance (RTT), in msecs with 2 bits fractional
+ (left-shift by 2 to get the msec value).
+ -
+ name: ssthresh
+ type: u32
+ doc: Slow Start THRESHold.
+ -
+ name: cwnd
+ type: u32
+ doc: Congestion Window.
+ -
+ name: reodering
+ type: u32
+ doc: Reodering metric.
+ -
+ name: rtt-us
+ type: u32
+ doc: |
+ Round Trip Time (RTT), in usecs, with 3 bits fractional
+ (left-shift by 3 to get the msec value).
+ -
+ name: rttvar-us
+ type: u32
+ doc: |
+ Round Trip Time (RTT), in usecs, with 2 bits fractional
+ (left-shift by 3 to get the msec value).
+
+operations:
+ list:
+ -
+ name: get
+ doc: Retrieve metrics.
+ attribute-set: tcp-metrics
+
+ dont-validate: [ strict, dump ]
+
+ do:
+ request: &sel_attrs
+ attributes:
+ - addr-ipv4
+ - addr-ipv6
+ - saddr-ipv4
+ - saddr-ipv6
+ reply: &all_attrs
+ attributes:
+ - addr-ipv4
+ - addr-ipv6
+ - saddr-ipv4
+ - saddr-ipv6
+ - age
+ - vals
+ - fopen-mss
+ - fopen-syn-drops
+ - fopen-syn-drop-ts
+ - fopen-cookie
+ dump:
+ reply: *all_attrs
+
+ -
+ name: del
+ doc: Delete metrics.
+ attribute-set: tcp-metrics
+
+ dont-validate: [ strict, dump ]
+ flags: [ admin-perm ]
+
+ do:
+ request: *sel_attrs
diff --git a/Documentation/networking/af_xdp.rst b/Documentation/networking/af_xdp.rst
index 72da7057e4cf..dceeb0d763aa 100644
--- a/Documentation/networking/af_xdp.rst
+++ b/Documentation/networking/af_xdp.rst
@@ -329,24 +329,23 @@ XDP_SHARED_UMEM option and provide the initial socket's fd in the
sxdp_shared_umem_fd field as you registered the UMEM on that
socket. These two sockets will now share one and the same UMEM.
-In this case, it is possible to use the NIC's packet steering
-capabilities to steer the packets to the right queue. This is not
-possible in the previous example as there is only one queue shared
-among sockets, so the NIC cannot do this steering as it can only steer
-between queues.
-
-In libxdp (or libbpf prior to version 1.0), you need to use the
-xsk_socket__create_shared() API as it takes a reference to a FILL ring
-and a COMPLETION ring that will be created for you and bound to the
-shared UMEM. You can use this function for all the sockets you create,
-or you can use it for the second and following ones and use
-xsk_socket__create() for the first one. Both methods yield the same
-result.
+There is no need to supply an XDP program like the one in the previous
+case where sockets were bound to the same queue id and
+device. Instead, use the NIC's packet steering capabilities to steer
+the packets to the right queue. In the previous example, there is only
+one queue shared among sockets, so the NIC cannot do this steering. It
+can only steer between queues.
+
+In libbpf, you need to use the xsk_socket__create_shared() API as it
+takes a reference to a FILL ring and a COMPLETION ring that will be
+created for you and bound to the shared UMEM. You can use this
+function for all the sockets you create, or you can use it for the
+second and following ones and use xsk_socket__create() for the first
+one. Both methods yield the same result.
Note that a UMEM can be shared between sockets on the same queue id
and device, as well as between queues on the same device and between
-devices at the same time. It is also possible to redirect to any
-socket as long as it is bound to the same umem with XDP_SHARED_UMEM.
+devices at the same time.
XDP_USE_NEED_WAKEUP bind flag
-----------------------------
@@ -823,10 +822,6 @@ A: The short answer is no, that is not supported at the moment. The
switch, or other distribution mechanism, in your NIC to direct
traffic to the correct queue id and socket.
- Note that if you are using the XDP_SHARED_UMEM option, it is
- possible to switch traffic between any socket bound to the same
- umem.
-
Q: My packets are sometimes corrupted. What is wrong?
A: Care has to be taken not to feed the same buffer in the UMEM into
diff --git a/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/counters.rst b/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/counters.rst
index fed821ef9b09..3bd72577af9a 100644
--- a/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/counters.rst
+++ b/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/counters.rst
@@ -189,22 +189,19 @@ the software port.
* - `rx[i]_gro_packets`
- Number of received packets processed using hardware-accelerated GRO. The
- number of hardware GRO offloaded packets received on ring i.
+ number of hardware GRO offloaded packets received on ring i. Only true GRO
+ packets are counted: only packets that are in an SKB with a GRO count > 1.
- Acceleration
* - `rx[i]_gro_bytes`
- Number of received bytes processed using hardware-accelerated GRO. The
- number of hardware GRO offloaded bytes received on ring i.
+ number of hardware GRO offloaded bytes received on ring i. Only true GRO
+ packets are counted: only packets that are in an SKB with a GRO count > 1.
- Acceleration
* - `rx[i]_gro_skbs`
- - The number of receive SKBs constructed while performing
- hardware-accelerated GRO.
- - Informative
-
- * - `rx[i]_gro_match_packets`
- - Number of received packets processed using hardware-accelerated GRO that
- met the flow table match criteria.
+ - The number of GRO SKBs constructed from hardware-accelerated GRO. Only SKBs
+ with a GRO count > 1 are counted.
- Informative
* - `rx[i]_gro_large_hds`
@@ -212,6 +209,15 @@ the software port.
headers that require additional memory to be allocated.
- Informative
+ * - `rx[i]_hds_nodata_packets`
+ - Number of header only packets in header/data split mode [#accel]_.
+ - Informative
+
+ * - `rx[i]_hds_nodata_bytes`
+ - Number of bytes for header only packets in header/data split mode
+ [#accel]_.
+ - Informative
+
* - `rx[i]_lro_packets`
- The number of LRO packets received on ring i [#accel]_.
- Acceleration
diff --git a/Documentation/networking/devlink/devlink-region.rst b/Documentation/networking/devlink/devlink-region.rst
index 9232cd7da301..5d0b68f752c0 100644
--- a/Documentation/networking/devlink/devlink-region.rst
+++ b/Documentation/networking/devlink/devlink-region.rst
@@ -49,7 +49,7 @@ example usage
$ devlink region show [ DEV/REGION ]
$ devlink region del DEV/REGION snapshot SNAPSHOT_ID
$ devlink region dump DEV/REGION [ snapshot SNAPSHOT_ID ]
- $ devlink region read DEV/REGION [ snapshot SNAPSHOT_ID ] address ADDRESS length length
+ $ devlink region read DEV/REGION [ snapshot SNAPSHOT_ID ] address ADDRESS length LENGTH
# Show all of the exposed regions with region sizes:
$ devlink region show
diff --git a/Documentation/networking/devlink/ice.rst b/Documentation/networking/devlink/ice.rst
index 830c04354222..e3972d03cea0 100644
--- a/Documentation/networking/devlink/ice.rst
+++ b/Documentation/networking/devlink/ice.rst
@@ -11,6 +11,7 @@ Parameters
==========
.. list-table:: Generic parameters implemented
+ :widths: 5 5 90
* - Name
- Mode
@@ -68,6 +69,30 @@ Parameters
To verify that value has been set:
$ devlink dev param show pci/0000:16:00.0 name tx_scheduling_layers
+.. list-table:: Driver specific parameters implemented
+ :widths: 5 5 90
+
+ * - Name
+ - Mode
+ - Description
+ * - ``local_forwarding``
+ - runtime
+ - Controls loopback behavior by tuning scheduler bandwidth.
+ It impacts all kinds of functions: physical, virtual and
+ subfunctions.
+ Supported values are:
+
+ ``enabled`` - loopback traffic is allowed on port
+
+ ``disabled`` - loopback traffic is not allowed on this port
+
+ ``prioritized`` - loopback traffic is prioritized on this port
+
+ Default value of ``local_forwarding`` parameter is ``enabled``.
+ ``prioritized`` provides ability to adjust loopback traffic rate to increase
+ one port capacity at cost of the another. User needs to disable
+ local forwarding on one of the ports in order have increased capacity
+ on the ``prioritized`` port.
Info versions
=============
diff --git a/Documentation/networking/devlink/octeontx2.rst b/Documentation/networking/devlink/octeontx2.rst
index 610de99b728a..d33a90dd44bf 100644
--- a/Documentation/networking/devlink/octeontx2.rst
+++ b/Documentation/networking/devlink/octeontx2.rst
@@ -40,3 +40,19 @@ The ``octeontx2 AF`` driver implements the following driver-specific parameters.
- runtime
- Use to set the quantum which hardware uses for scheduling among transmit queues.
Hardware uses weighted DWRR algorithm to schedule among all transmit queues.
+
+The ``octeontx2 PF`` driver implements the following driver-specific parameters.
+
+.. list-table:: Driver-specific parameters implemented
+ :widths: 5 5 5 85
+
+ * - Name
+ - Type
+ - Mode
+ - Description
+ * - ``unicast_filter_count``
+ - u8
+ - runtime
+ - Set the maximum number of unicast filters that can be programmed for
+ the device. This can be used to achieve better device resource
+ utilization, avoiding over consumption of unused MCAM table entries.
diff --git a/Documentation/networking/ethtool-netlink.rst b/Documentation/networking/ethtool-netlink.rst
index 160bfb0ae8ba..3ab423b80e91 100644
--- a/Documentation/networking/ethtool-netlink.rst
+++ b/Documentation/networking/ethtool-netlink.rst
@@ -228,6 +228,7 @@ Userspace to kernel:
``ETHTOOL_MSG_PLCA_GET_STATUS`` get PLCA RS status
``ETHTOOL_MSG_MM_GET`` get MAC merge layer state
``ETHTOOL_MSG_MM_SET`` set MAC merge layer parameters
+ ``ETHTOOL_MSG_MODULE_FW_FLASH_ACT`` flash transceiver module firmware
===================================== =================================
Kernel to userspace:
@@ -274,6 +275,7 @@ Kernel to userspace:
``ETHTOOL_MSG_PLCA_GET_STATUS_REPLY`` PLCA RS status
``ETHTOOL_MSG_PLCA_NTF`` PLCA RS parameters
``ETHTOOL_MSG_MM_GET_REPLY`` MAC merge layer status
+ ``ETHTOOL_MSG_MODULE_FW_FLASH_NTF`` transceiver module flash updates
======================================== =================================
``GET`` requests are sent by userspace applications to retrieve device
@@ -1033,6 +1035,8 @@ Kernel response contents:
``ETHTOOL_A_COALESCE_TX_AGGR_MAX_BYTES`` u32 max aggr size, Tx
``ETHTOOL_A_COALESCE_TX_AGGR_MAX_FRAMES`` u32 max aggr packets, Tx
``ETHTOOL_A_COALESCE_TX_AGGR_TIME_USECS`` u32 time (us), aggr, Tx
+ ``ETHTOOL_A_COALESCE_RX_PROFILE`` nested profile of DIM, Rx
+ ``ETHTOOL_A_COALESCE_TX_PROFILE`` nested profile of DIM, Tx
=========================================== ====== =======================
Attributes are only included in reply if their value is not zero or the
@@ -1062,6 +1066,10 @@ block should be sent.
This feature is mainly of interest for specific USB devices which does not cope
well with frequent small-sized URBs transmissions.
+``ETHTOOL_A_COALESCE_RX_PROFILE`` and ``ETHTOOL_A_COALESCE_TX_PROFILE`` refer
+to DIM parameters, see `Generic Network Dynamic Interrupt Moderation (Net DIM)
+<https://www.kernel.org/doc/Documentation/networking/net_dim.rst>`_.
+
COALESCE_SET
============
@@ -1098,6 +1106,8 @@ Request contents:
``ETHTOOL_A_COALESCE_TX_AGGR_MAX_BYTES`` u32 max aggr size, Tx
``ETHTOOL_A_COALESCE_TX_AGGR_MAX_FRAMES`` u32 max aggr packets, Tx
``ETHTOOL_A_COALESCE_TX_AGGR_TIME_USECS`` u32 time (us), aggr, Tx
+ ``ETHTOOL_A_COALESCE_RX_PROFILE`` nested profile of DIM, Rx
+ ``ETHTOOL_A_COALESCE_TX_PROFILE`` nested profile of DIM, Tx
=========================================== ====== =======================
Request is rejected if it attributes declared as unsupported by driver (i.e.
@@ -1720,17 +1730,28 @@ Request contents:
Kernel response contents:
- ====================================== ====== =============================
- ``ETHTOOL_A_PSE_HEADER`` nested reply header
- ``ETHTOOL_A_PODL_PSE_ADMIN_STATE`` u32 Operational state of the PoDL
- PSE functions
- ``ETHTOOL_A_PODL_PSE_PW_D_STATUS`` u32 power detection status of the
- PoDL PSE.
- ``ETHTOOL_A_C33_PSE_ADMIN_STATE`` u32 Operational state of the PoE
- PSE functions.
- ``ETHTOOL_A_C33_PSE_PW_D_STATUS`` u32 power detection status of the
- PoE PSE.
- ====================================== ====== =============================
+ ========================================== ====== =============================
+ ``ETHTOOL_A_PSE_HEADER`` nested reply header
+ ``ETHTOOL_A_PODL_PSE_ADMIN_STATE`` u32 Operational state of the PoDL
+ PSE functions
+ ``ETHTOOL_A_PODL_PSE_PW_D_STATUS`` u32 power detection status of the
+ PoDL PSE.
+ ``ETHTOOL_A_C33_PSE_ADMIN_STATE`` u32 Operational state of the PoE
+ PSE functions.
+ ``ETHTOOL_A_C33_PSE_PW_D_STATUS`` u32 power detection status of the
+ PoE PSE.
+ ``ETHTOOL_A_C33_PSE_PW_CLASS`` u32 power class of the PoE PSE.
+ ``ETHTOOL_A_C33_PSE_ACTUAL_PW`` u32 actual power drawn on the
+ PoE PSE.
+ ``ETHTOOL_A_C33_PSE_EXT_STATE`` u32 power extended state of the
+ PoE PSE.
+ ``ETHTOOL_A_C33_PSE_EXT_SUBSTATE`` u32 power extended substatus of
+ the PoE PSE.
+ ``ETHTOOL_A_C33_PSE_AVAIL_PW_LIMIT`` u32 currently configured power
+ limit of the PoE PSE.
+ ``ETHTOOL_A_C33_PSE_PW_LIMIT_RANGES`` nested Supported power limit
+ configuration ranges.
+ ========================================== ====== =============================
When set, the optional ``ETHTOOL_A_PODL_PSE_ADMIN_STATE`` attribute identifies
the operational state of the PoDL PSE functions. The operational state of the
@@ -1762,6 +1783,46 @@ The same goes for ``ETHTOOL_A_C33_PSE_ADMIN_PW_D_STATUS`` implementing
.. kernel-doc:: include/uapi/linux/ethtool.h
:identifiers: ethtool_c33_pse_pw_d_status
+When set, the optional ``ETHTOOL_A_C33_PSE_PW_CLASS`` attribute identifies
+the power class of the C33 PSE. It depends on the class negotiated between
+the PSE and the PD. This option is corresponding to ``IEEE 802.3-2022``
+30.9.1.1.8 aPSEPowerClassification.
+
+When set, the optional ``ETHTOOL_A_C33_PSE_ACTUAL_PW`` attribute identifies
+This option is corresponding to ``IEEE 802.3-2022`` 30.9.1.1.23 aPSEActualPower.
+Actual power is reported in mW.
+
+When set, the optional ``ETHTOOL_A_C33_PSE_EXT_STATE`` attribute identifies
+the extended error state of the C33 PSE. Possible values are:
+
+.. kernel-doc:: include/uapi/linux/ethtool.h
+ :identifiers: ethtool_c33_pse_ext_state
+
+When set, the optional ``ETHTOOL_A_C33_PSE_EXT_SUBSTATE`` attribute identifies
+the extended error state of the C33 PSE. Possible values are:
+Possible values are:
+
+.. kernel-doc:: include/uapi/linux/ethtool.h
+ :identifiers: ethtool_c33_pse_ext_substate_class_num_events
+ ethtool_c33_pse_ext_substate_error_condition
+ ethtool_c33_pse_ext_substate_mr_pse_enable
+ ethtool_c33_pse_ext_substate_option_detect_ted
+ ethtool_c33_pse_ext_substate_option_vport_lim
+ ethtool_c33_pse_ext_substate_ovld_detected
+ ethtool_c33_pse_ext_substate_pd_dll_power_type
+ ethtool_c33_pse_ext_substate_power_not_available
+ ethtool_c33_pse_ext_substate_short_detected
+
+When set, the optional ``ETHTOOL_A_C33_PSE_AVAIL_PW_LIMIT`` attribute
+identifies the C33 PSE power limit in mW.
+
+When set the optional ``ETHTOOL_A_C33_PSE_PW_LIMIT_RANGES`` nested attribute
+identifies the C33 PSE power limit ranges through
+``ETHTOOL_A_C33_PSE_PWR_VAL_LIMIT_RANGE_MIN`` and
+``ETHTOOL_A_C33_PSE_PWR_VAL_LIMIT_RANGE_MAX``.
+If the controller works with fixed classes, the min and max values will be
+equal.
+
PSE_SET
=======
@@ -1773,6 +1834,8 @@ Request contents:
``ETHTOOL_A_PSE_HEADER`` nested request header
``ETHTOOL_A_PODL_PSE_ADMIN_CONTROL`` u32 Control PoDL PSE Admin state
``ETHTOOL_A_C33_PSE_ADMIN_CONTROL`` u32 Control PSE Admin state
+ ``ETHTOOL_A_C33_PSE_AVAIL_PWR_LIMIT`` u32 Control PoE PSE available
+ power limit
====================================== ====== =============================
When set, the optional ``ETHTOOL_A_PODL_PSE_ADMIN_CONTROL`` attribute is used
@@ -1783,6 +1846,18 @@ to control PoDL PSE Admin functions. This option is implementing
The same goes for ``ETHTOOL_A_C33_PSE_ADMIN_CONTROL`` implementing
``IEEE 802.3-2022`` 30.9.1.2.1 acPSEAdminControl.
+When set, the optional ``ETHTOOL_A_C33_PSE_AVAIL_PWR_LIMIT`` attribute is
+used to control the available power value limit for C33 PSE in milliwatts.
+This attribute corresponds to the `pse_available_power` variable described in
+``IEEE 802.3-2022`` 33.2.4.4 Variables and `pse_avail_pwr` in 145.2.5.4
+Variables, which are described in power classes.
+
+It was decided to use milliwatts for this interface to unify it with other
+power monitoring interfaces, which also use milliwatts, and to align with
+various existing products that document power consumption in watts rather than
+classes. If power limit configuration based on classes is needed, the
+conversion can be done in user space, for example by ethtool.
+
RSS_GET
=======
@@ -2033,6 +2108,73 @@ The attributes are propagated to the driver through the following structure:
.. kernel-doc:: include/linux/ethtool.h
:identifiers: ethtool_mm_cfg
+MODULE_FW_FLASH_ACT
+===================
+
+Flashes transceiver module firmware.
+
+Request contents:
+
+ ======================================= ====== ===========================
+ ``ETHTOOL_A_MODULE_FW_FLASH_HEADER`` nested request header
+ ``ETHTOOL_A_MODULE_FW_FLASH_FILE_NAME`` string firmware image file name
+ ``ETHTOOL_A_MODULE_FW_FLASH_PASSWORD`` u32 transceiver module password
+ ======================================= ====== ===========================
+
+The firmware update process consists of three logical steps:
+
+1. Downloading a firmware image to the transceiver module and validating it.
+2. Running the firmware image.
+3. Committing the firmware image so that it is run upon reset.
+
+When flash command is given, those three steps are taken in that order.
+
+This message merely schedules the update process and returns immediately
+without blocking. The process then runs asynchronously.
+Since it can take several minutes to complete, during the update process
+notifications are emitted from the kernel to user space updating it about
+the status and progress.
+
+The ``ETHTOOL_A_MODULE_FW_FLASH_FILE_NAME`` attribute encodes the firmware
+image file name. The firmware image is downloaded to the transceiver module,
+validated, run and committed.
+
+The optional ``ETHTOOL_A_MODULE_FW_FLASH_PASSWORD`` attribute encodes a password
+that might be required as part of the transceiver module firmware update
+process.
+
+The firmware update process can take several minutes to complete. Therefore,
+during the update process notifications are emitted from the kernel to user
+space updating it about the status and progress.
+
+
+
+Notification contents:
+
+ +---------------------------------------------------+--------+----------------+
+ | ``ETHTOOL_A_MODULE_FW_FLASH_HEADER`` | nested | reply header |
+ +---------------------------------------------------+--------+----------------+
+ | ``ETHTOOL_A_MODULE_FW_FLASH_STATUS`` | u32 | status |
+ +---------------------------------------------------+--------+----------------+
+ | ``ETHTOOL_A_MODULE_FW_FLASH_STATUS_MSG`` | string | status message |
+ +---------------------------------------------------+--------+----------------+
+ | ``ETHTOOL_A_MODULE_FW_FLASH_DONE`` | uint | progress |
+ +---------------------------------------------------+--------+----------------+
+ | ``ETHTOOL_A_MODULE_FW_FLASH_TOTAL`` | uint | total |
+ +---------------------------------------------------+--------+----------------+
+
+The ``ETHTOOL_A_MODULE_FW_FLASH_STATUS`` attribute encodes the current status
+of the firmware update process. Possible values are:
+
+.. kernel-doc:: include/uapi/linux/ethtool.h
+ :identifiers: ethtool_module_fw_flash_status
+
+The ``ETHTOOL_A_MODULE_FW_FLASH_STATUS_MSG`` attribute encodes a status message
+string.
+
+The ``ETHTOOL_A_MODULE_FW_FLASH_DONE`` and ``ETHTOOL_A_MODULE_FW_FLASH_TOTAL``
+attributes encode the completed and total amount of work, respectively.
+
Request translation
===================
@@ -2139,4 +2281,5 @@ are netlink only.
n/a ``ETHTOOL_MSG_PLCA_GET_STATUS``
n/a ``ETHTOOL_MSG_MM_GET``
n/a ``ETHTOOL_MSG_MM_SET``
+ n/a ``ETHTOOL_MSG_MODULE_FW_FLASH_ACT``
=================================== =====================================
diff --git a/Documentation/networking/index.rst b/Documentation/networking/index.rst
index 7664c0bfe461..d1af04b952f8 100644
--- a/Documentation/networking/index.rst
+++ b/Documentation/networking/index.rst
@@ -19,6 +19,7 @@ Contents:
caif/index
ethtool-netlink
ieee802154
+ iso15765-2
j1939
kapi
msg_zerocopy
@@ -72,6 +73,7 @@ Contents:
mac80211-injection
mctp
mpls-sysctl
+ mptcp
mptcp-sysctl
multiqueue
multi-pf-netdev
@@ -104,6 +106,7 @@ Contents:
seg6-sysctl
skbuff
smc-sysctl
+ sriov
statistics
strparser
switchdev
diff --git a/Documentation/networking/ip-sysctl.rst b/Documentation/networking/ip-sysctl.rst
index bd50df6a5a42..3616389c8c2d 100644
--- a/Documentation/networking/ip-sysctl.rst
+++ b/Documentation/networking/ip-sysctl.rst
@@ -131,6 +131,20 @@ fib_multipath_hash_fields - UNSIGNED INTEGER
Default: 0x0007 (source IP, destination IP and IP protocol)
+fib_multipath_hash_seed - UNSIGNED INTEGER
+ The seed value used when calculating hash for multipath routes. Applies
+ to both IPv4 and IPv6 datapath. Only present for kernels built with
+ CONFIG_IP_ROUTE_MULTIPATH enabled.
+
+ When set to 0, the seed value used for multipath routing defaults to an
+ internal random-generated one.
+
+ The actual hashing algorithm is not specified -- there is no guarantee
+ that a next hop distribution effected by a given seed will keep stable
+ across kernel versions.
+
+ Default: 0 (random)
+
fib_sync_mem - UNSIGNED INTEGER
Amount of dirty memory from fib entries that can be backlogged before
synchronize_rcu is forced.
@@ -1196,6 +1210,19 @@ tcp_pingpong_thresh - INTEGER
Default: 1
+tcp_rto_min_us - INTEGER
+ Minimal TCP retransmission timeout (in microseconds). Note that the
+ rto_min route option has the highest precedence for configuring this
+ setting, followed by the TCP_BPF_RTO_MIN socket option, followed by
+ this tcp_rto_min_us sysctl.
+
+ The recommended practice is to use a value less or equal to 200000
+ microseconds.
+
+ Possible Values: 1 - INT_MAX
+
+ Default: 200000
+
UDP variables
=============
diff --git a/Documentation/networking/iso15765-2.rst b/Documentation/networking/iso15765-2.rst
new file mode 100644
index 000000000000..0e9d96074178
--- /dev/null
+++ b/Documentation/networking/iso15765-2.rst
@@ -0,0 +1,386 @@
+.. SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+
+====================
+ISO 15765-2 (ISO-TP)
+====================
+
+Overview
+========
+
+ISO 15765-2, also known as ISO-TP, is a transport protocol specifically defined
+for diagnostic communication on CAN. It is widely used in the automotive
+industry, for example as the transport protocol for UDSonCAN (ISO 14229-3) or
+emission-related diagnostic services (ISO 15031-5).
+
+ISO-TP can be used both on CAN CC (aka Classical CAN) and CAN FD (CAN with
+Flexible Datarate) based networks. It is also designed to be compatible with a
+CAN network using SAE J1939 as data link layer (however, this is not a
+requirement).
+
+Specifications used
+-------------------
+
+* ISO 15765-2:2024 : Road vehicles - Diagnostic communication over Controller
+ Area Network (DoCAN). Part 2: Transport protocol and network layer services.
+
+Addressing
+----------
+
+In its simplest form, ISO-TP is based on two kinds of addressing modes for the
+nodes connected to the same network:
+
+* physical addressing is implemented by two node-specific addresses and is used
+ in 1-to-1 communication.
+
+* functional addressing is implemented by one node-specific address and is used
+ in 1-to-N communication.
+
+Three different addressing formats can be employed:
+
+* "normal" : each address is represented simply by a CAN ID.
+
+* "extended": each address is represented by a CAN ID plus the first byte of
+ the CAN payload; both the CAN ID and the byte inside the payload shall be
+ different between two addresses.
+
+* "mixed": each address is represented by a CAN ID plus the first byte of
+ the CAN payload; the CAN ID is different between two addresses, but the
+ additional byte is the same.
+
+Transport protocol and associated frame types
+---------------------------------------------
+
+When transmitting data using the ISO-TP protocol, the payload can either fit
+inside one single CAN message or not, also considering the overhead the protocol
+is generating and the optional extended addressing. In the first case, the data
+is transmitted at once using a so-called Single Frame (SF). In the second case,
+ISO-TP defines a multi-frame protocol, in which the sender provides (through a
+First Frame - FF) the PDU length which is to be transmitted and also asks for a
+Flow Control (FC) frame, which provides the maximum supported size of a macro
+data block (``blocksize``) and the minimum time between the single CAN messages
+composing such block (``stmin``). Once this information has been received, the
+sender starts to send frames containing fragments of the data payload (called
+Consecutive Frames - CF), stopping after every ``blocksize``-sized block to wait
+confirmation from the receiver which should then send another Flow Control
+frame to inform the sender about its availability to receive more data.
+
+How to Use ISO-TP
+=================
+
+As with others CAN protocols, the ISO-TP stack support is built into the
+Linux network subsystem for the CAN bus, aka. Linux-CAN or SocketCAN, and
+thus follows the same socket API.
+
+Creation and basic usage of an ISO-TP socket
+--------------------------------------------
+
+To use the ISO-TP stack, ``#include <linux/can/isotp.h>`` shall be used. A
+socket can then be created using the ``PF_CAN`` protocol family, the
+``SOCK_DGRAM`` type (as the underlying protocol is datagram-based by design)
+and the ``CAN_ISOTP`` protocol:
+
+.. code-block:: C
+
+ s = socket(PF_CAN, SOCK_DGRAM, CAN_ISOTP);
+
+After the socket has been successfully created, ``bind(2)`` shall be called to
+bind the socket to the desired CAN interface; to do so:
+
+* a TX CAN ID shall be specified as part of the sockaddr supplied to the call
+ itself.
+
+* a RX CAN ID shall also be specified, unless broadcast flags have been set
+ through socket option (explained below).
+
+Once bound to an interface, the socket can be read from and written to using
+the usual ``read(2)`` and ``write(2)`` system calls, as well as ``send(2)``,
+``sendmsg(2)``, ``recv(2)`` and ``recvmsg(2)``.
+Unlike the CAN_RAW socket API, only the ISO-TP data field (the actual payload)
+is sent and received by the userspace application using these calls. The address
+information and the protocol information are automatically filled by the ISO-TP
+stack using the configuration supplied during socket creation. In the same way,
+the stack will use the transport mechanism when required (i.e., when the size
+of the data payload exceeds the MTU of the underlying CAN bus).
+
+The sockaddr structure used for SocketCAN has extensions for use with ISO-TP,
+as specified below:
+
+.. code-block:: C
+
+ struct sockaddr_can {
+ sa_family_t can_family;
+ int can_ifindex;
+ union {
+ struct { canid_t rx_id, tx_id; } tp;
+ ...
+ } can_addr;
+ }
+
+* ``can_family`` and ``can_ifindex`` serve the same purpose as for other
+ SocketCAN sockets.
+
+* ``can_addr.tp.rx_id`` specifies the receive (RX) CAN ID and will be used as
+ a RX filter.
+
+* ``can_addr.tp.tx_id`` specifies the transmit (TX) CAN ID
+
+ISO-TP socket options
+---------------------
+
+When creating an ISO-TP socket, reasonable defaults are set. Some options can
+be modified with ``setsockopt(2)`` and/or read back with ``getsockopt(2)``.
+
+General options
+~~~~~~~~~~~~~~~
+
+General socket options can be passed using the ``CAN_ISOTP_OPTS`` optname:
+
+.. code-block:: C
+
+ struct can_isotp_options opts;
+ ret = setsockopt(s, SOL_CAN_ISOTP, CAN_ISOTP_OPTS, &opts, sizeof(opts))
+
+where the ``can_isotp_options`` structure has the following contents:
+
+.. code-block:: C
+
+ struct can_isotp_options {
+ u32 flags;
+ u32 frame_txtime;
+ u8 ext_address;
+ u8 txpad_content;
+ u8 rxpad_content;
+ u8 rx_ext_address;
+ };
+
+* ``flags``: modifiers to be applied to the default behaviour of the ISO-TP
+ stack. Following flags are available:
+
+ * ``CAN_ISOTP_LISTEN_MODE``: listen only (do not send FC frames); normally
+ used as a testing feature.
+
+ * ``CAN_ISOTP_EXTEND_ADDR``: use the byte specified in ``ext_address`` as an
+ additional address component. This enables the "mixed" addressing format if
+ used alone, or the "extended" addressing format if used in conjunction with
+ ``CAN_ISOTP_RX_EXT_ADDR``.
+
+ * ``CAN_ISOTP_TX_PADDING``: enable padding for transmitted frames, using
+ ``txpad_content`` as value for the padding bytes.
+
+ * ``CAN_ISOTP_RX_PADDING``: enable padding for the received frames, using
+ ``rxpad_content`` as value for the padding bytes.
+
+ * ``CAN_ISOTP_CHK_PAD_LEN``: check for correct padding length on the received
+ frames.
+
+ * ``CAN_ISOTP_CHK_PAD_DATA``: check padding bytes on the received frames
+ against ``rxpad_content``; if ``CAN_ISOTP_RX_PADDING`` is not specified,
+ this flag is ignored.
+
+ * ``CAN_ISOTP_HALF_DUPLEX``: force ISO-TP socket in half duplex mode
+ (that is, transport mechanism can only be incoming or outgoing at the same
+ time, not both).
+
+ * ``CAN_ISOTP_FORCE_TXSTMIN``: ignore stmin from received FC; normally
+ used as a testing feature.
+
+ * ``CAN_ISOTP_FORCE_RXSTMIN``: ignore CFs depending on rx stmin; normally
+ used as a testing feature.
+
+ * ``CAN_ISOTP_RX_EXT_ADDR``: use ``rx_ext_address`` instead of ``ext_address``
+ as extended addressing byte on the reception path. If used in conjunction
+ with ``CAN_ISOTP_EXTEND_ADDR``, this flag effectively enables the "extended"
+ addressing format.
+
+ * ``CAN_ISOTP_WAIT_TX_DONE``: wait until the frame is sent before returning
+ from ``write(2)`` and ``send(2)`` calls (i.e., blocking write operations).
+
+ * ``CAN_ISOTP_SF_BROADCAST``: use 1-to-N functional addressing (cannot be
+ specified alongside ``CAN_ISOTP_CF_BROADCAST``).
+
+ * ``CAN_ISOTP_CF_BROADCAST``: use 1-to-N transmission without flow control
+ (cannot be specified alongside ``CAN_ISOTP_SF_BROADCAST``).
+ NOTE: this is not covered by the ISO 15765-2 standard.
+
+ * ``CAN_ISOTP_DYN_FC_PARMS``: enable dynamic update of flow control
+ parameters.
+
+* ``frame_txtime``: frame transmission time (defined as N_As/N_Ar inside the
+ ISO standard); if ``0``, the default (or the last set value) is used.
+ To set the transmission time to ``0``, the ``CAN_ISOTP_FRAME_TXTIME_ZERO``
+ macro (equal to 0xFFFFFFFF) shall be used.
+
+* ``ext_address``: extended addressing byte, used if the
+ ``CAN_ISOTP_EXTEND_ADDR`` flag is specified.
+
+* ``txpad_content``: byte used as padding value for transmitted frames.
+
+* ``rxpad_content``: byte used as padding value for received frames.
+
+* ``rx_ext_address``: extended addressing byte for the reception path, used if
+ the ``CAN_ISOTP_RX_EXT_ADDR`` flag is specified.
+
+Flow Control options
+~~~~~~~~~~~~~~~~~~~~
+
+Flow Control (FC) options can be passed using the ``CAN_ISOTP_RECV_FC`` optname
+to provide the communication parameters for receiving ISO-TP PDUs.
+
+.. code-block:: C
+
+ struct can_isotp_fc_options fc_opts;
+ ret = setsockopt(s, SOL_CAN_ISOTP, CAN_ISOTP_RECV_FC, &fc_opts, sizeof(fc_opts));
+
+where the ``can_isotp_fc_options`` structure has the following contents:
+
+.. code-block:: C
+
+ struct can_isotp_options {
+ u8 bs;
+ u8 stmin;
+ u8 wftmax;
+ };
+
+* ``bs``: blocksize provided in flow control frames.
+
+* ``stmin``: minimum separation time provided in flow control frames; can
+ have the following values (others are reserved):
+
+ * 0x00 - 0x7F : 0 - 127 ms
+
+ * 0xF1 - 0xF9 : 100 us - 900 us
+
+* ``wftmax``: maximum number of wait frames provided in flow control frames.
+
+Link Layer options
+~~~~~~~~~~~~~~~~~~
+
+Link Layer (LL) options can be passed using the ``CAN_ISOTP_LL_OPTS`` optname:
+
+.. code-block:: C
+
+ struct can_isotp_ll_options ll_opts;
+ ret = setsockopt(s, SOL_CAN_ISOTP, CAN_ISOTP_LL_OPTS, &ll_opts, sizeof(ll_opts));
+
+where the ``can_isotp_ll_options`` structure has the following contents:
+
+.. code-block:: C
+
+ struct can_isotp_ll_options {
+ u8 mtu;
+ u8 tx_dl;
+ u8 tx_flags;
+ };
+
+* ``mtu``: generated and accepted CAN frame type, can be equal to ``CAN_MTU``
+ for classical CAN frames or ``CANFD_MTU`` for CAN FD frames.
+
+* ``tx_dl``: maximum payload length for transmitted frames, can have one value
+ among: 8, 12, 16, 20, 24, 32, 48, 64. Values above 8 only apply to CAN FD
+ traffic (i.e.: ``mtu = CANFD_MTU``).
+
+* ``tx_flags``: flags set into ``struct canfd_frame.flags`` at frame creation.
+ Only applies to CAN FD traffic (i.e.: ``mtu = CANFD_MTU``).
+
+Transmission stmin
+~~~~~~~~~~~~~~~~~~
+
+The transmission minimum separation time (stmin) can be forced using the
+``CAN_ISOTP_TX_STMIN`` optname and providing an stmin value in microseconds as
+a 32bit unsigned integer; this will overwrite the value sent by the receiver in
+flow control frames:
+
+.. code-block:: C
+
+ uint32_t stmin;
+ ret = setsockopt(s, SOL_CAN_ISOTP, CAN_ISOTP_TX_STMIN, &stmin, sizeof(stmin));
+
+Reception stmin
+~~~~~~~~~~~~~~~
+
+The reception minimum separation time (stmin) can be forced using the
+``CAN_ISOTP_RX_STMIN`` optname and providing an stmin value in microseconds as
+a 32bit unsigned integer; received Consecutive Frames (CF) which timestamps
+differ less than this value will be ignored:
+
+.. code-block:: C
+
+ uint32_t stmin;
+ ret = setsockopt(s, SOL_CAN_ISOTP, CAN_ISOTP_RX_STMIN, &stmin, sizeof(stmin));
+
+Multi-frame transport support
+-----------------------------
+
+The ISO-TP stack contained inside the Linux kernel supports the multi-frame
+transport mechanism defined by the standard, with the following constraints:
+
+* the maximum size of a PDU is defined by a module parameter, with an hard
+ limit imposed at build time.
+
+* when a transmission is in progress, subsequent calls to ``write(2)`` will
+ block, while calls to ``send(2)`` will either block or fail depending on the
+ presence of the ``MSG_DONTWAIT`` flag.
+
+* no support is present for sending "wait frames": whether a PDU can be fully
+ received or not is decided when the First Frame is received.
+
+Errors
+------
+
+Following errors are reported to userspace:
+
+RX path errors
+~~~~~~~~~~~~~~
+
+============ ===============================================================
+-ETIMEDOUT timeout of data reception
+-EILSEQ sequence number mismatch during a multi-frame reception
+-EBADMSG data reception with wrong padding
+============ ===============================================================
+
+TX path errors
+~~~~~~~~~~~~~~
+
+========== =================================================================
+-ECOMM flow control reception timeout
+-EMSGSIZE flow control reception overflow
+-EBADMSG flow control reception with wrong layout/padding
+========== =================================================================
+
+Examples
+========
+
+Basic node example
+------------------
+
+Following example implements a node using "normal" physical addressing, with
+RX ID equal to 0x18DAF142 and a TX ID equal to 0x18DA42F1. All options are left
+to their default.
+
+.. code-block:: C
+
+ int s;
+ struct sockaddr_can addr;
+ int ret;
+
+ s = socket(PF_CAN, SOCK_DGRAM, CAN_ISOTP);
+ if (s < 0)
+ exit(1);
+
+ addr.can_family = AF_CAN;
+ addr.can_ifindex = if_nametoindex("can0");
+ addr.tp.tx_id = 0x18DA42F1 | CAN_EFF_FLAG;
+ addr.tp.rx_id = 0x18DAF142 | CAN_EFF_FLAG;
+
+ ret = bind(s, (struct sockaddr *)&addr, sizeof(addr));
+ if (ret < 0)
+ exit(1);
+
+ /* Data can now be received using read(s, ...) and sent using write(s, ...) */
+
+Additional examples
+-------------------
+
+More complete (and complex) examples can be found inside the ``isotp*`` userland
+tools, distributed as part of the ``can-utils`` utilities at:
+https://github.com/linux-can/can-utils
diff --git a/Documentation/networking/mptcp-sysctl.rst b/Documentation/networking/mptcp-sysctl.rst
index 69975ce25a02..fd514bba8c43 100644
--- a/Documentation/networking/mptcp-sysctl.rst
+++ b/Documentation/networking/mptcp-sysctl.rst
@@ -7,14 +7,6 @@ MPTCP Sysfs variables
/proc/sys/net/mptcp/* Variables
===============================
-enabled - BOOLEAN
- Control whether MPTCP sockets can be created.
-
- MPTCP sockets can be created if the value is 1. This is a
- per-namespace sysctl.
-
- Default: 1 (enabled)
-
add_addr_timeout - INTEGER (seconds)
Set the timeout after which an ADD_ADDR control message will be
resent to an MPTCP peer that has not acknowledged a previous
@@ -25,16 +17,22 @@ add_addr_timeout - INTEGER (seconds)
Default: 120
-close_timeout - INTEGER (seconds)
- Set the make-after-break timeout: in absence of any close or
- shutdown syscall, MPTCP sockets will maintain the status
- unchanged for such time, after the last subflow removal, before
- moving to TCP_CLOSE.
+allow_join_initial_addr_port - BOOLEAN
+ Allow peers to send join requests to the IP address and port number used
+ by the initial subflow if the value is 1. This controls a flag that is
+ sent to the peer at connection time, and whether such join requests are
+ accepted or denied.
- The default value matches TCP_TIMEWAIT_LEN. This is a per-namespace
- sysctl.
+ Joins to addresses advertised with ADD_ADDR are not affected by this
+ value.
- Default: 60
+ This is a per-namespace sysctl.
+
+ Default: 1
+
+available_schedulers - STRING
+ Shows the available schedulers choices that are registered. More packet
+ schedulers may be available, but not loaded.
checksum_enabled - BOOLEAN
Control whether DSS checksum can be enabled.
@@ -44,18 +42,24 @@ checksum_enabled - BOOLEAN
Default: 0
-allow_join_initial_addr_port - BOOLEAN
- Allow peers to send join requests to the IP address and port number used
- by the initial subflow if the value is 1. This controls a flag that is
- sent to the peer at connection time, and whether such join requests are
- accepted or denied.
+close_timeout - INTEGER (seconds)
+ Set the make-after-break timeout: in absence of any close or
+ shutdown syscall, MPTCP sockets will maintain the status
+ unchanged for such time, after the last subflow removal, before
+ moving to TCP_CLOSE.
- Joins to addresses advertised with ADD_ADDR are not affected by this
- value.
+ The default value matches TCP_TIMEWAIT_LEN. This is a per-namespace
+ sysctl.
- This is a per-namespace sysctl.
+ Default: 60
- Default: 1
+enabled - BOOLEAN
+ Control whether MPTCP sockets can be created.
+
+ MPTCP sockets can be created if the value is 1. This is a
+ per-namespace sysctl.
+
+ Default: 1 (enabled)
pm_type - INTEGER
Set the default path manager type to use for each new MPTCP
@@ -74,6 +78,14 @@ pm_type - INTEGER
Default: 0
+scheduler - STRING
+ Select the scheduler of your choice.
+
+ Support for selection of different schedulers. This is a per-namespace
+ sysctl.
+
+ Default: "default"
+
stale_loss_cnt - INTEGER
The number of MPTCP-level retransmission intervals with no traffic and
pending outstanding data on a given subflow required to declare it stale.
@@ -85,11 +97,3 @@ stale_loss_cnt - INTEGER
This is a per-namespace sysctl.
Default: 4
-
-scheduler - STRING
- Select the scheduler of your choice.
-
- Support for selection of different schedulers. This is a per-namespace
- sysctl.
-
- Default: "default"
diff --git a/Documentation/networking/mptcp.rst b/Documentation/networking/mptcp.rst
new file mode 100644
index 000000000000..17f2bab61164
--- /dev/null
+++ b/Documentation/networking/mptcp.rst
@@ -0,0 +1,156 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=====================
+Multipath TCP (MPTCP)
+=====================
+
+Introduction
+============
+
+Multipath TCP or MPTCP is an extension to the standard TCP and is described in
+`RFC 8684 (MPTCPv1) <https://www.rfc-editor.org/rfc/rfc8684.html>`_. It allows a
+device to make use of multiple interfaces at once to send and receive TCP
+packets over a single MPTCP connection. MPTCP can aggregate the bandwidth of
+multiple interfaces or prefer the one with the lowest latency. It also allows a
+fail-over if one path is down, and the traffic is seamlessly reinjected on other
+paths.
+
+For more details about Multipath TCP in the Linux kernel, please see the
+official website: `mptcp.dev <https://www.mptcp.dev>`_.
+
+
+Use cases
+=========
+
+Thanks to MPTCP, being able to use multiple paths in parallel or simultaneously
+brings new use-cases, compared to TCP:
+
+- Seamless handovers: switching from one path to another while preserving
+ established connections, e.g. to be used in mobility use-cases, like on
+ smartphones.
+- Best network selection: using the "best" available path depending on some
+ conditions, e.g. latency, losses, cost, bandwidth, etc.
+- Network aggregation: using multiple paths at the same time to have a higher
+ throughput, e.g. to combine fixed and mobile networks to send files faster.
+
+
+Concepts
+========
+
+Technically, when a new socket is created with the ``IPPROTO_MPTCP`` protocol
+(Linux-specific), a *subflow* (or *path*) is created. This *subflow* consists of
+a regular TCP connection that is used to transmit data through one interface.
+Additional *subflows* can be negotiated later between the hosts. For the remote
+host to be able to detect the use of MPTCP, a new field is added to the TCP
+*option* field of the underlying TCP *subflow*. This field contains, amongst
+other things, a ``MP_CAPABLE`` option that tells the other host to use MPTCP if
+it is supported. If the remote host or any middlebox in between does not support
+it, the returned ``SYN+ACK`` packet will not contain MPTCP options in the TCP
+*option* field. In that case, the connection will be "downgraded" to plain TCP,
+and it will continue with a single path.
+
+This behavior is made possible by two internal components: the path manager, and
+the packet scheduler.
+
+Path Manager
+------------
+
+The Path Manager is in charge of *subflows*, from creation to deletion, and also
+address announcements. Typically, it is the client side that initiates subflows,
+and the server side that announces additional addresses via the ``ADD_ADDR`` and
+``REMOVE_ADDR`` options.
+
+Path managers are controlled by the ``net.mptcp.pm_type`` sysctl knob -- see
+mptcp-sysctl.rst. There are two types: the in-kernel one (type ``0``) where the
+same rules are applied for all the connections (see: ``ip mptcp``) ; and the
+userspace one (type ``1``), controlled by a userspace daemon (i.e. `mptcpd
+<https://mptcpd.mptcp.dev/>`_) where different rules can be applied for each
+connection. The path managers can be controlled via a Netlink API; see
+netlink_spec/mptcp_pm.rst.
+
+To be able to use multiple IP addresses on a host to create multiple *subflows*
+(paths), the default in-kernel MPTCP path-manager needs to know which IP
+addresses can be used. This can be configured with ``ip mptcp endpoint`` for
+example.
+
+Packet Scheduler
+----------------
+
+The Packet Scheduler is in charge of selecting which available *subflow(s)* to
+use to send the next data packet. It can decide to maximize the use of the
+available bandwidth, only to pick the path with the lower latency, or any other
+policy depending on the configuration.
+
+Packet schedulers are controlled by the ``net.mptcp.scheduler`` sysctl knob --
+see mptcp-sysctl.rst.
+
+
+Sockets API
+===========
+
+Creating MPTCP sockets
+----------------------
+
+On Linux, MPTCP can be used by selecting MPTCP instead of TCP when creating the
+``socket``:
+
+.. code-block:: C
+
+ int sd = socket(AF_INET(6), SOCK_STREAM, IPPROTO_MPTCP);
+
+Note that ``IPPROTO_MPTCP`` is defined as ``262``.
+
+If MPTCP is not supported, ``errno`` will be set to:
+
+- ``EINVAL``: (*Invalid argument*): MPTCP is not available, on kernels < 5.6.
+- ``EPROTONOSUPPORT`` (*Protocol not supported*): MPTCP has not been compiled,
+ on kernels >= v5.6.
+- ``ENOPROTOOPT`` (*Protocol not available*): MPTCP has been disabled using
+ ``net.mptcp.enabled`` sysctl knob; see mptcp-sysctl.rst.
+
+MPTCP is then opt-in: applications need to explicitly request it. Note that
+applications can be forced to use MPTCP with different techniques, e.g.
+``LD_PRELOAD`` (see ``mptcpize``), eBPF (see ``mptcpify``), SystemTAP,
+``GODEBUG`` (``GODEBUG=multipathtcp=1``), etc.
+
+Switching to ``IPPROTO_MPTCP`` instead of ``IPPROTO_TCP`` should be as
+transparent as possible for the userspace applications.
+
+Socket options
+--------------
+
+MPTCP supports most socket options handled by TCP. It is possible some less
+common options are not supported, but contributions are welcome.
+
+Generally, the same value is propagated to all subflows, including the ones
+created after the calls to ``setsockopt()``. eBPF can be used to set different
+values per subflow.
+
+There are some MPTCP specific socket options at the ``SOL_MPTCP`` (284) level to
+retrieve info. They fill the ``optval`` buffer of the ``getsockopt()`` system
+call:
+
+- ``MPTCP_INFO``: Uses ``struct mptcp_info``.
+- ``MPTCP_TCPINFO``: Uses ``struct mptcp_subflow_data``, followed by an array of
+ ``struct tcp_info``.
+- ``MPTCP_SUBFLOW_ADDRS``: Uses ``struct mptcp_subflow_data``, followed by an
+ array of ``mptcp_subflow_addrs``.
+- ``MPTCP_FULL_INFO``: Uses ``struct mptcp_full_info``, with one pointer to an
+ array of ``struct mptcp_subflow_info`` (including the
+ ``struct mptcp_subflow_addrs``), and one pointer to an array of
+ ``struct tcp_info``, followed by the content of ``struct mptcp_info``.
+
+Note that at the TCP level, ``TCP_IS_MPTCP`` socket option can be used to know
+if MPTCP is currently being used: the value will be set to 1 if it is.
+
+
+Design choices
+==============
+
+A new socket type has been added for MPTCP for the userspace-facing socket. The
+kernel is in charge of creating subflow sockets: they are TCP sockets where the
+behavior is modified using TCP-ULP.
+
+MPTCP listen sockets will create "plain" *accepted* TCP sockets if the
+connection request from the client didn't ask for MPTCP, making the performance
+impact minimal when MPTCP is enabled by default.
diff --git a/Documentation/networking/net_dim.rst b/Documentation/networking/net_dim.rst
index 3bed9fd95336..8908fd7b0a8d 100644
--- a/Documentation/networking/net_dim.rst
+++ b/Documentation/networking/net_dim.rst
@@ -169,6 +169,48 @@ usage is not complete but it should make the outline of the usage clear.
...
}
+
+Tuning DIM
+==========
+
+Net DIM serves a range of network devices and delivers excellent acceleration
+benefits. Yet, it has been observed that some preset configurations of DIM may
+not align seamlessly with the varying specifications of network devices, and
+this discrepancy has been identified as a factor to the suboptimal performance
+outcomes of DIM-enabled network devices, related to a mismatch in profiles.
+
+To address this issue, Net DIM introduces a per-device control to modify and
+access a device's ``rx-profile`` and ``tx-profile`` parameters:
+Assume that the target network device is named ethx, and ethx only declares
+support for RX profile setting and supports modification of ``usec`` field
+and ``pkts`` field (See the data structure:
+:c:type:`struct dim_cq_moder <dim_cq_moder>`).
+
+You can use ethtool to modify the current RX DIM profile where all
+values are 64::
+
+ $ ethtool -C ethx rx-profile 1,1,n_2,2,n_3,n,n_n,4,n_n,n,n
+
+``n`` means do not modify this field, and ``_`` separates structure
+elements of the profile array.
+
+Querying the current profiles using::
+
+ $ ethtool -c ethx
+ ...
+ rx-profile:
+ {.usec = 1, .pkts = 1, .comps = n/a,},
+ {.usec = 2, .pkts = 2, .comps = n/a,},
+ {.usec = 3, .pkts = 64, .comps = n/a,},
+ {.usec = 64, .pkts = 4, .comps = n/a,},
+ {.usec = 64, .pkts = 64, .comps = n/a,}
+ tx-profile: n/a
+
+If the network device does not support specific fields of DIM profiles,
+the corresponding ``n/a`` will display. If the ``n/a`` field is being
+modified, error messages will be reported.
+
+
Dynamic Interrupt Moderation (DIM) library API
==============================================
diff --git a/Documentation/networking/phy.rst b/Documentation/networking/phy.rst
index 1283240d7620..f64641417c54 100644
--- a/Documentation/networking/phy.rst
+++ b/Documentation/networking/phy.rst
@@ -327,6 +327,12 @@ Some of the interface modes are described below:
This is the Penta SGMII mode, it is similar to QSGMII but it combines 5
SGMII lines into a single link compared to 4 on QSGMII.
+``PHY_INTERFACE_MODE_10G_QXGMII``
+ Represents the 10G-QXGMII PHY-MAC interface as defined by the Cisco USXGMII
+ Multiport Copper Interface document. It supports 4 ports over a 10.3125 GHz
+ SerDes lane, each port having speeds of 2.5G / 1G / 100M / 10M achieved
+ through symbol replication. The PCS expects the standard USXGMII code word.
+
Pause frames / flow control
===========================
diff --git a/Documentation/networking/sriov.rst b/Documentation/networking/sriov.rst
new file mode 100644
index 000000000000..5deb4ff3154f
--- /dev/null
+++ b/Documentation/networking/sriov.rst
@@ -0,0 +1,25 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+===============
+NIC SR-IOV APIs
+===============
+
+Modern NICs are strongly encouraged to focus on implementing the ``switchdev``
+model (see :ref:`switchdev`) to configure forwarding and security of SR-IOV
+functionality.
+
+Legacy API
+==========
+
+The old SR-IOV API is implemented in ``rtnetlink`` Netlink family as part of
+the ``RTM_GETLINK`` and ``RTM_SETLINK`` commands. On the driver side
+it consists of a number of ``ndo_set_vf_*`` and ``ndo_get_vf_*`` callbacks.
+
+Since the legacy APIs do not integrate well with the rest of the stack
+the API is considered frozen; no new functionality or extensions
+will be accepted. New drivers should not implement the uncommon callbacks;
+namely the following callbacks are off limits:
+
+ - ``ndo_get_vf_port``
+ - ``ndo_set_vf_port``
+ - ``ndo_set_vf_rss_query_en``
diff --git a/Documentation/networking/tcp_ao.rst b/Documentation/networking/tcp_ao.rst
index 8a58321acce7..e96e62d1dab3 100644
--- a/Documentation/networking/tcp_ao.rst
+++ b/Documentation/networking/tcp_ao.rst
@@ -337,6 +337,15 @@ TCP-AO per-socket counters are also duplicated with per-netns counters,
exposed with SNMP. Those are ``TCPAOGood``, ``TCPAOBad``, ``TCPAOKeyNotFound``,
``TCPAORequired`` and ``TCPAODroppedIcmps``.
+For monitoring purposes, there are following TCP-AO trace events:
+``tcp_hash_bad_header``, ``tcp_hash_ao_required``, ``tcp_ao_handshake_failure``,
+``tcp_ao_wrong_maclen``, ``tcp_ao_wrong_maclen``, ``tcp_ao_key_not_found``,
+``tcp_ao_rnext_request``, ``tcp_ao_synack_no_key``, ``tcp_ao_snd_sne_update``,
+``tcp_ao_rcv_sne_update``. It's possible to separately enable any of them and
+one can filter them by net-namespace, 4-tuple, family, L3 index, and TCP header
+flags. If a segment has a TCP-AO header, the filters may also include
+keyid, rnext, and maclen. SNE updates include the rolled-over numbers.
+
RFC 5925 very permissively specifies how TCP port matching can be done for
MKTs::
diff --git a/Documentation/power/regulator/consumer.rst b/Documentation/power/regulator/consumer.rst
index 85c2bf5ac07e..9d2416f63f6e 100644
--- a/Documentation/power/regulator/consumer.rst
+++ b/Documentation/power/regulator/consumer.rst
@@ -227,3 +227,9 @@ directly written to the voltage selector register, use::
int regulator_list_hardware_vsel(struct regulator *regulator,
unsigned selector);
+
+To access the hardware for enabling/disabling the regulator, consumers must
+use regulator_get_exclusive(), as it can't work if there's more than one
+consumer. To enable/disable regulator use::
+
+ int regulator_hardware_enable(struct regulator *regulator, bool enable);
diff --git a/Documentation/process/maintainer-netdev.rst b/Documentation/process/maintainer-netdev.rst
index fd96e4a3cef9..5e1fcfad1c4c 100644
--- a/Documentation/process/maintainer-netdev.rst
+++ b/Documentation/process/maintainer-netdev.rst
@@ -227,7 +227,7 @@ preferably including links to previous postings, for example::
The amount of mooing will depend on packet rate so should match
the diurnal cycle quite well.
- Signed-of-by: Joe Defarmer <joe@barn.org>
+ Signed-off-by: Joe Defarmer <joe@barn.org>
---
v3:
- add a note about time-of-day mooing fluctuation to the commit message
diff --git a/Documentation/translations/zh_CN/driver-api/gpio/index.rst b/Documentation/translations/zh_CN/driver-api/gpio/index.rst
index 9a6a14162a6c..e4d54724a1b5 100644
--- a/Documentation/translations/zh_CN/driver-api/gpio/index.rst
+++ b/Documentation/translations/zh_CN/driver-api/gpio/index.rst
@@ -18,8 +18,6 @@
:caption: 目录
:maxdepth: 2
- legacy
-
Todolist:
* intro
diff --git a/Documentation/translations/zh_CN/driver-api/gpio/legacy.rst b/Documentation/translations/zh_CN/driver-api/gpio/legacy.rst
deleted file mode 100644
index 0faf042001d2..000000000000
--- a/Documentation/translations/zh_CN/driver-api/gpio/legacy.rst
+++ /dev/null
@@ -1,618 +0,0 @@
-.. SPDX-License-Identifier: GPL-2.0
-
-.. include:: ../../disclaimer-zh_CN.rst
-
-:Original: Documentation/driver-api/gpio/legacy.rst
-
-:翻译:
-
- 傅炜 Fu Wei <tekkamanninja@gmail.com>
- 司延腾 Yanteng Si <siyanteng@loongson.cn>
-
-:校译:
-
-
-传统GPIO接口
-============
-
-本文档概述了Linux下的GPIO访问公约。
-
-这些函数以 gpio_* 作为前缀。其他的函数不允许使用这样的前缀或相关的
-__gpio_* 前缀。
-
-
-什么是GPIO?
-============
-"通用输入/输出口"(GPIO)是一个灵活的由软件控制的数字信号。他们可
-由多种芯片提供,且对于从事嵌入式和定制硬件的 Linux 开发者来说是
-比较熟悉。每个GPIO 都代表一个连接到特定引脚或球栅阵列(BGA)封装中
-“球珠”的一个位。电路板原理图显示了 GPIO 与外部硬件的连接关系。
-驱动可以编写成通用代码,以使板级启动代码可传递引脚配置数据给驱动。
-
-片上系统 (SOC) 处理器对 GPIO 有很大的依赖。在某些情况下,每个
-非专用引脚都可配置为 GPIO,且大多数芯片都最少有一些 GPIO。
-可编程逻辑器件(类似 FPGA) 可以方便地提供 GPIO。像电源管理和
-音频编解码器这样的多功能芯片经常留有一些这样的引脚来帮助那些引脚
-匮乏的 SOC。同时还有通过 I2C 或 SPI 串行总线连接的“GPIO扩展器”
-芯片。大多数 PC 的南桥有一些拥有 GPIO 能力的引脚 (只有BIOS
-固件才知道如何使用他们)。
-
-GPIO 的实际功能因系统而异。通常用法有:
-
- - 输出值可写 (高电平=1,低电平=0)。一些芯片也有如何驱动这些值的选项,
- 例如只允许输出一个值、支持“线与”及其他取值类似的模式(值得注意的是
- “开漏”信号)
-
- - 输入值可读(1、0)。一些芯片支持引脚在配置为“输出”时回读,这对于类似
- “线与”的情况(以支持双向信号)是非常有用的。GPIO 控制器可能有输入
- 去毛刺/消抖逻辑,这有时需要软件控制。
-
- - 输入通常可作为 IRQ 信号,一般是沿触发,但有时是电平触发。这样的 IRQ
- 可能配置为系统唤醒事件,以将系统从低功耗状态下唤醒。
-
- - 通常一个 GPIO 根据不同产品电路板的需求,可以配置为输入或输出,也有仅
- 支持单向的。
-
- - 大部分 GPIO 可以在持有自旋锁时访问,但是通常由串行总线扩展的 GPIO
- 不允许持有自旋锁。但某些系统也支持这种类型。
-
-对于给定的电路板,每个 GPIO 都用于某个特定的目的,如监控 MMC/SD 卡的
-插入/移除、检测卡的写保护状态、驱动 LED、配置收发器、模拟串行总线、
-复位硬件看门狗、感知开关状态等等。
-
-
-GPIO 公约
-=========
-注意,这个叫做“公约”,因为这不是强制性的,不遵循这个公约是无伤大雅的,
-因为此时可移植性并不重要。GPIO 常用于板级特定的电路逻辑,甚至可能
-随着电路板的版本而改变,且不可能在不同走线的电路板上使用。仅有在少数
-功能上才具有可移植性,其他功能是平台特定。这也是由于“胶合”的逻辑造成的。
-
-此外,这不需要任何的执行框架,只是一个接口。某个平台可能通过一个简单地
-访问芯片寄存器的内联函数来实现它,其他平台可能通过委托一系列不同的GPIO
-控制器的抽象函数来实现它。(有一些可选的代码能支持这种策略的实现,本文档
-后面会介绍,但作为 GPIO 接口的客户端驱动程序必须与它的实现无关。)
-
-也就是说,如果在他们的平台上支持这个公约,驱动应尽可能的使用它。同时,平台
-必须在 Kconfig 中选择 ARCH_REQUIRE_GPIOLIB 或者 ARCH_WANT_OPTIONAL_GPIOLIB
-选项。那些调用标准 GPIO 函数的驱动应该在 Kconfig 入口中声明依赖GENERIC_GPIO。
-当驱动包含文件:
-
- #include <linux/gpio.h>
-
-则 GPIO 函数是可用,无论是“真实代码”还是经优化过的语句。如果你遵守
-这个公约,当你的代码完成后,对其他的开发者来说会更容易看懂和维护。
-
-注意,这些操作包含所用平台的 I/O 屏障代码,驱动无须显式地调用他们。
-
-
-标识 GPIO
----------
-
-GPIO 是通过无符号整型来标识的,范围是 0 到 MAX_INT。保留“负”数
-用于其他目的,例如标识信号“在这个板子上不可用”或指示错误。未接触底层
-硬件的代码会忽略这些整数。
-
-平台会定义这些整数的用法,且通常使用 #define 来定义 GPIO,这样
-板级特定的启动代码可以直接关联相应的原理图。相对来说,驱动应该仅使用
-启动代码传递过来的 GPIO 编号,使用 platform_data 保存板级特定
-引脚配置数据 (同时还有其他须要的板级特定数据),避免可能出现的问题。
-
-例如一个平台使用编号 32-159 来标识 GPIO,而在另一个平台使用编号0-63
-标识一组 GPIO 控制器,64-79标识另一类 GPIO 控制器,且在一个含有
-FPGA 的特定板子上使用 80-95。编号不一定要连续,那些平台中,也可以
-使用编号2000-2063来标识一个 I2C 接口的 GPIO 扩展器中的 GPIO。
-
-如果你要初始化一个带有无效 GPIO 编号的结构体,可以使用一些负编码
-(如"-EINVAL"),那将使其永远不会是有效。来测试这样一个结构体中的编号
-是否关联一个 GPIO,你可使用以下断言::
-
- int gpio_is_valid(int number);
-
-如果编号不存在,则请求和释放 GPIO 的函数将拒绝执行相关操作(见下文)。
-其他编号也可能被拒绝,比如一个编号可能存在,但暂时在给定的电路上不可用。
-
-一个平台是否支持多个 GPIO 控制器为平台特定的实现问题,就像是否可以
-在 GPIO 编号空间中有“空洞”和是否可以在运行时添加新的控制器一样。
-这些问题会影响其他事情,包括相邻的 GPIO 编号是否存在等。
-
-使用 GPIO
----------
-
-对于一个 GPIO,系统应该做的第一件事情就是通过 gpio_request()
-函数分配它,见下文。
-
-接下来是设置I/O方向,这通常是在板级启动代码中为所使用的 GPIO 设置
-platform_device 时完成::
-
- /* 设置为输入或输出, 返回 0 或负的错误代码 */
- int gpio_direction_input(unsigned gpio);
- int gpio_direction_output(unsigned gpio, int value);
-
-返回值为零代表成功,否则返回一个负的错误代码。这个返回值需要检查,因为
-get/set(获取/设置)函数调用没法返回错误,且有可能是配置错误。通常,
-你应该在进程上下文中调用这些函数。然而,对于自旋锁安全的 GPIO,在板子
-启动的早期、进程启动前使用他们也是可以的。
-
-对于作为输出的 GPIO,为其提供初始输出值,对于避免在系统启动期间出现
-信号毛刺是很有帮助的。
-
-为了与传统的 GPIO 接口兼容, 在设置一个 GPIO 方向时,如果它还未被申请,
-则隐含了申请那个 GPIO 的操作(见下文)。这种兼容性正在从可选的 gpiolib
-框架中移除。
-
-如果这个 GPIO 编码不存在,或者特定的 GPIO 不能用于那种模式,则方向
-设置可能失败。依赖启动固件来正确地设置方向通常是一个坏主意,因为它可能
-除了启动Linux,并没有做更多的验证工作。(同理, 板子的启动代码可能需要
-将这个复用的引脚设置为 GPIO,并正确地配置上拉/下拉电阻。)
-
-
-访问自旋锁安全的 GPIO
----------------------
-
-大多数 GPIO 控制器可以通过内存读/写指令来访问。这些指令不会休眠,可以
-安全地在硬(非线程)中断例程和类似的上下文中完成。
-
-对于那些 GPIO,使用以下的函数访问::
-
- /* GPIO 输入:返回零或非零 */
- int gpio_get_value(unsigned gpio);
-
- /* GPIO 输出 */
- void gpio_set_value(unsigned gpio, int value);
-
-GPIO值是布尔值,零表示低电平,非零表示高电平。当读取一个输出引脚的值时,
-返回值应该是引脚上的值。这个值不总是和输出值相符,因为存在开漏输出信号和
-输出延迟问题。
-
-以上的 get/set 函数无错误返回值,因为之前 gpio_direction_*()应已检查过
-其是否为“无效GPIO”。此外,还需要注意的是并不是所有平台都可以从输出引脚
-中读取数据,对于不能读取的引脚应总返回零。另外,对那些在原子上下文中无法
-安全访问的 GPIO (译者注:因为访问可能导致休眠)使用这些函数是不合适的
-(见下文)。
-
-在 GPIO 编号(还有输出、值)为常数的情况下,鼓励通过平台特定的实现来优化
-这两个函数来访问 GPIO 值。这种情况(读写一个硬件寄存器)下只需要几条指令
-是很正常的,且无须自旋锁。这种优化函数比起那些在子程序上花费许多指令的
-函数可以使得模拟接口(译者注:例如 GPIO 模拟 I2C、1-wire 或 SPI)的
-应用(在空间和时间上都)更具效率。
-
-
-访问可能休眠的 GPIO
--------------------
-
-某些 GPIO 控制器必须通过基于总线(如 I2C 或 SPI)的消息访问。读或写这些
-GPIO 值的命令需要等待其信息排到队首才发送命令,再获得其反馈。期间需要
-休眠,这不能在 IRQ 例程(中断上下文)中执行。
-
-为了访问这种 GPIO,内核定义了一套不同的函数::
-
- /* GPIO 输入:返回零或非零 ,可能会休眠 */
- int gpio_get_value_cansleep(unsigned gpio);
-
- /* GPIO 输出,可能会休眠 */
- void gpio_set_value_cansleep(unsigned gpio, int value);
-
-访问这样的 GPIO 需要一个允许休眠的上下文,例如线程 IRQ 处理例程,并用以上的
-访问函数替换那些没有 cansleep()后缀的自旋锁安全访问函数。
-
-除了这些访问函数可能休眠,且它们操作的 GPIO 不能在硬件 IRQ 处理例程中访问的
-事实,这些处理例程实际上和自旋锁安全的函数是一样的。
-
-** 除此之外 ** 调用设置和配置此类 GPIO 的函数也必须在允许休眠的上下文中,
-因为它们可能也需要访问 GPIO 控制器芯片 (这些设置函数通常在板级启动代码或者
-驱动探测/断开代码中,所以这是一个容易满足的约束条件。) ::
-
- gpio_direction_input()
- gpio_direction_output()
- gpio_request()
-
- ## gpio_request_one()
-
- gpio_free()
-
-
-
-声明和释放 GPIO
-----------------
-
-为了有助于捕获系统配置错误,定义了两个函数::
-
- /* 申请 GPIO, 返回 0 或负的错误代码.
- * 非空标签可能有助于诊断.
- */
- int gpio_request(unsigned gpio, const char *label);
-
- /* 释放之前声明的 GPIO */
- void gpio_free(unsigned gpio);
-
-将无效的 GPIO 编码传递给 gpio_request()会导致失败,申请一个已使用这个
-函数声明过的 GPIO 也会失败。gpio_request()的返回值必须检查。你应该在
-进程上下文中调用这些函数。然而,对于自旋锁安全的 GPIO,在板子启动的早期、
-进入进程之前是可以申请的。
-
-这个函数完成两个基本的目标。一是标识那些实际上已作为 GPIO 使用的信号线,
-这样便于更好地诊断;系统可能需要服务几百个可用的 GPIO,但是对于任何一个
-给定的电路板通常只有一些被使用。另一个目的是捕获冲突,查明错误:如两个或
-更多驱动错误地认为他们已经独占了某个信号线,或是错误地认为移除一个管理着
-某个已激活信号的驱动是安全的。也就是说,申请 GPIO 的作用类似一种锁机制。
-
-某些平台可能也使用 GPIO 作为电源管理激活信号(例如通过关闭未使用芯片区和
-简单地关闭未使用时钟)。
-
-对于 GPIO 使用引脚控制子系统已知的引脚,子系统应该被告知其使用情况;
-一个 gpiolib 驱动的 .request()操作应调用 pinctrl_gpio_request(),
-而 gpiolib 驱动的 .free()操作应调用 pinctrl_gpio_free()。引脚控制
-子系统允许 pinctrl_gpio_request()在某个引脚或引脚组以复用形式“属于”
-一个设备时都成功返回。
-
-任何须将 GPIO 信号导向适当引脚的引脚复用硬件的编程应该发生在 GPIO
-驱动的 .direction_input()或 .direction_output()函数中,以及
-任何输出 GPIO 值的设置之后。这样可使从引脚特殊功能到 GPIO 的转换
-不会在引脚产生毛刺波形。有时当用一个 GPIO 实现其信号驱动一个非 GPIO
-硬件模块的解决方案时,就需要这种机制。
-
-某些平台允许部分或所有 GPIO 信号使用不同的引脚。类似的,GPIO 或引脚的
-其他方面也需要配置,如上拉/下拉。平台软件应该在对这些 GPIO 调用
-gpio_request()前将这类细节配置好,例如使用引脚控制子系统的映射表,
-使得 GPIO 的用户无须关注这些细节。
-
-还有一个值得注意的是在释放 GPIO 前,你必须停止使用它。
-
-
-注意:申请一个 GPIO 并没有以任何方式配置它,只不过标识那个 GPIO 处于使用
-状态。必须有另外的代码来处理引脚配置(如控制 GPIO 使用的引脚、上拉/下拉)。
-考虑到大多数情况下声明 GPIO 之后就会立即配置它们,所以定义了以下三个辅助函数::
-
- /* 申请一个 GPIO 信号, 同时通过特定的'flags'初始化配置,
- * 其他和 gpio_request()的参数和返回值相同
- *
- */
- int gpio_request_one(unsigned gpio, unsigned long flags, const char *label);
-
-这里 'flags' 当前定义可指定以下属性:
-
- * GPIOF_DIR_IN - 配置方向为输入
- * GPIOF_DIR_OUT - 配置方向为输出
-
- * GPIOF_INIT_LOW - 在作为输出时,初始值为低电平
- * GPIOF_INIT_HIGH - 在作为输出时,初始值为高电平
-
-因为 GPIOF_INIT_* 仅有在配置为输出的时候才存在,所以有效的组合为:
-
- * GPIOF_IN - 配置为输入
- * GPIOF_OUT_INIT_LOW - 配置为输出,并初始化为低电平
- * GPIOF_OUT_INIT_HIGH - 配置为输出,并初始化为高电平
-
-更进一步,为了更简单地声明/释放多个 GPIO,'struct gpio'被引进来封装所有
-这三个领域::
-
- struct gpio {
- unsigned gpio;
- unsigned long flags;
- const char *label;
- };
-
-一个典型的用例::
-
- static struct gpio leds_gpios[] = {
- { 32, GPIOF_OUT_INIT_HIGH, "Power LED" }, /* 默认开启 */
- { 33, GPIOF_OUT_INIT_LOW, "Green LED" }, /* 默认关闭 */
- { 34, GPIOF_OUT_INIT_LOW, "Red LED" }, /* 默认关闭 */
- { 35, GPIOF_OUT_INIT_LOW, "Blue LED" }, /* 默认关闭 */
- { ... },
- };
-
- err = gpio_request_one(31, GPIOF_IN, "Reset Button");
- if (err)
- ...
-
-
-GPIO 映射到 IRQ
-----------------
-
-GPIO 编号是无符号整数;IRQ 编号也是。这些构成了两个逻辑上不同的命名空间
-(GPIO 0 不一定使用 IRQ 0)。你可以通过以下函数在它们之间实现映射::
-
- /* 映射 GPIO 编号到 IRQ 编号 */
- int gpio_to_irq(unsigned gpio);
-
-它们的返回值为对应命名空间的相关编号,或是负的错误代码(如果无法映射)。
-(例如,某些 GPIO 无法做为 IRQ 使用。)以下的编号错误是未经检测的:使用一个
-未通过 gpio_direction_input()配置为输入的 GPIO 编号,或者使用一个
-并非来源于gpio_to_irq()的 IRQ 编号。
-
-这两个映射函数可能会在信号编号的加减计算过程上花些时间。它们不可休眠。
-
-gpio_to_irq()返回的非错误值可以传递给 request_irq()或者 free_irq()。
-它们通常通过板级特定的初始化代码存放到平台设备的 IRQ 资源中。注意:IRQ
-触发选项是 IRQ 接口的一部分,如 IRQF_TRIGGER_FALLING,系统唤醒能力
-也是如此。
-
-
-模拟开漏信号
-------------
-
-有时在只有低电平信号作为实际驱动结果(译者注:多个输出连接于一点,逻辑电平
-结果为所有输出的逻辑与)的时候,共享的信号线需要使用“开漏”信号。(该术语
-适用于 CMOS 管;而 TTL 用“集电极开路”。)一个上拉电阻使信号为高电平。这
-有时被称为“线与”。实际上,从负逻辑(低电平为真)的角度来看,这是一个“线或”。
-
-一个开漏信号的常见例子是共享的低电平使能 IRQ 信号线。此外,有时双向数据总线
-信号也使用漏极开路信号。
-
-某些 GPIO 控制器直接支持开漏输出,还有许多不支持。当你需要开漏信号,但
-硬件又不直接支持的时候,一个常用的方法是用任何即可作输入也可作输出的 GPIO
-引脚来模拟:
-
- LOW: gpio_direction_output(gpio, 0) ... 这代码驱动信号并覆盖
- 上拉配置。
-
- HIGH: gpio_direction_input(gpio) ... 这代码关闭输出,所以上拉电阻
- (或其他的一些器件)控制了信号。
-
-如果你将信号线“驱动”为高电平,但是 gpio_get_value(gpio)报告了一个
-低电平(在适当的上升时间后),你就可以知道是其他的一些组件将共享信号线拉低了。
-这不一定是错误的。一个常见的例子就是 I2C 时钟的延长:一个需要较慢时钟的
-从设备延迟 SCK 的上升沿,而 I2C 主设备相应地调整其信号传输速率。
-
-GPIO控制器和引脚控制子系统
---------------------------
-
-SOC上的GPIO控制器可能与引脚控制子系统紧密结合,即引脚可以与可选的gpio功
-能一起被其他功能使用。我们已经涵盖了这样的情况,例如一个GPIO控制器需要保
-留一个引脚或通过调用以下任何一个引脚来设置其方向::
-
- pinctrl_gpio_request()
- pinctrl_gpio_free()
- pinctrl_gpio_direction_input()
- pinctrl_gpio_direction_output()
-
-但是,引脚控制子系统是如何将GPIO号码(这是一个全局事项)与某个引脚控制器
-上的某个引脚交叉关联的?
-
-这是通过注册引脚的“范围”来实现的,这基本上是交叉参考表。这些描述是在
-Documentation/driver-api/pin-control.rst
-
-虽然引脚分配完全由引脚控制子系统管理,但gpio(在gpiolib下)仍由gpio驱动
-维护。可能发生的情况是,SoC中的不同引脚范围由不同的gpio驱动器管理。
-
-这使得在调用 "pinctrl_gpio_request" 之前,让gpio驱动向pin ctrl子系
-统宣布它们的引脚范围是合理的,以便在使用任何gpio之前要求引脚控制子系统准
-备相应的引脚。
-
-为此,gpio控制器可以用引脚控制子系统注册其引脚范围。目前有两种方法:有或
-无DT。
-
-关于对DT的支持,请参考 Documentation/devicetree/bindings/gpio/gpio.txt.
-
-对于非DT支持,用户可以用适当的参数调用gpiochip_add_pin_range(),将一
-系列的gpio引脚注册到引脚控制驱动上。为此,必须将引脚控制设备的名称字符串
-作为参数之一传给这个程序。
-
-
-这些公约忽略了什么?
-====================
-
-这些公约忽略的最大一件事就是引脚复用,因为这属于高度芯片特定的属性且
-没有可移植性。某个平台可能不需要明确的复用信息;有的对于任意给定的引脚
-可能只有两个功能选项;有的可能每个引脚有八个功能选项;有的可能可以将
-几个引脚中的任何一个作为给定的 GPIO。(是的,这些例子都来自于当前运行
-Linux 的系统。)
-
-在某些系统中,与引脚复用相关的是配置和使能集成的上、下拉模式。并不是所有
-平台都支持这种模式,或者不会以相同的方式来支持这种模式;且任何给定的电路板
-可能使用外置的上拉(或下拉)电阻,这时芯片上的就不应该使用。(当一个电路需要
-5kOhm 的拉动电阻,芯片上的 100 kOhm 电阻就不能做到。)同样的,驱动能力
-(2 mA vs 20 mA)和电压(1.8V vs 3.3V)是平台特定问题,就像模型一样在
-可配置引脚和 GPIO 之间(没)有一一对应的关系。
-
-还有其他一些系统特定的机制没有在这里指出,例如上述的输入去毛刺和线与输出
-选项。硬件可能支持批量读或写 GPIO,但是那一般是配置相关的:对于处于同一
-块区(bank)的GPIO。(GPIO 通常以 16 或 32 个组成一个区块,一个给定的
-片上系统一般有几个这样的区块。)某些系统可以通过输出 GPIO 触发 IRQ,
-或者从并非以 GPIO 管理的引脚取值。这些机制的相关代码没有必要具有可移植性。
-
-当前,动态定义 GPIO 并不是标准的,例如作为配置一个带有某些 GPIO 扩展器的
-附加电路板的副作用。
-
-GPIO 实现者的框架(可选)
-=========================
-
-前面提到了,有一个可选的实现框架,让平台使用相同的编程接口,更加简单地支持
-不同种类的 GPIO 控制器。这个框架称为"gpiolib"。
-
-作为一个辅助调试功能,如果 debugfs 可用,就会有一个 /sys/kernel/debug/gpio
-文件。通过这个框架,它可以列出所有注册的控制器,以及当前正在使用中的 GPIO
-的状态。
-
-
-控制器驱动: gpio_chip
----------------------
-
-在框架中每个 GPIO 控制器都包装为一个 "struct gpio_chip",他包含了
-该类型的每个控制器的常用信息:
-
- - 设置 GPIO 方向的方法
- - 用于访问 GPIO 值的方法
- - 告知调用其方法是否可能休眠的标志
- - 可选的 debugfs 信息导出方法 (显示类似上拉配置一样的额外状态)
- - 诊断标签
-
-也包含了来自 device.platform_data 的每个实例的数据:它第一个 GPIO 的
-编号和它可用的 GPIO 的数量。
-
-实现 gpio_chip 的代码应支持多控制器实例,这可能使用驱动模型。那些代码要
-配置每个 gpio_chip,并发起gpiochip_add()。卸载一个 GPIO 控制器很少见,
-但在必要的时候可以使用 gpiochip_remove()。
-
-大部分 gpio_chip 是一个实例特定结构体的一部分,而并不将 GPIO 接口单独
-暴露出来,比如编址、电源管理等。类似编解码器这样的芯片会有复杂的非 GPIO
-状态。
-
-任何一个 debugfs 信息导出方法通常应该忽略还未申请作为 GPIO 的信号线。
-他们可以使用 gpiochip_is_requested()测试,当这个 GPIO 已经申请过了
-就返回相关的标签,否则返回 NULL。
-
-
-平台支持
---------
-
-为了支持这个框架,一个平台的 Kconfig 文件将会 "select"(选择)
-ARCH_REQUIRE_GPIOLIB 或 ARCH_WANT_OPTIONAL_GPIOLIB,并让它的
-<asm/gpio.h> 包含 <asm-generic/gpio.h>,同时定义两个方法:
-gpio_get_value()、gpio_set_value()。
-
-它也应提供一个 ARCH_NR_GPIOS 的定义值,这样可以更好地反映该平台 GPIO
-的实际数量,节省静态表的空间。(这个定义值应该包含片上系统内建 GPIO 和
-GPIO 扩展器中的数据。)
-
-ARCH_REQUIRE_GPIOLIB 意味着 gpiolib 核心在这个构架中将总是编译进内核。
-
-ARCH_WANT_OPTIONAL_GPIOLIB 意味着 gpiolib 核心默认关闭,且用户可以
-使能它,并将其编译进内核(可选)。
-
-如果这些选项都没被选择,该平台就不通过 GPIO-lib 支持 GPIO,且代码不可以
-被用户使能。
-
-以下这些方法的实现可以直接使用框架代码,并总是通过 gpio_chip 调度::
-
- #define gpio_get_value __gpio_get_value
- #define gpio_set_value __gpio_set_value
-
-这些定义可以用更理想的实现方法替代,那就是使用经过逻辑优化的内联函数来访问
-基于特定片上系统的 GPIO。例如,若引用的 GPIO (寄存器位偏移)是常量“12”,
-读取或设置它可能只需少则两或三个指令,且不会休眠。当这样的优化无法实现时,
-那些函数必须使用框架提供的代码,那就至少要几十条指令才可以实现。对于用 GPIO
-模拟的 I/O 接口, 如此精简指令是很有意义的。
-
-对于片上系统,平台特定代码为片上 GPIO 每个区(bank)定义并注册 gpio_chip
-实例。那些 GPIO 应该根据芯片厂商的文档进行编码/标签,并直接和电路板原理图
-对应。他们应该开始于零并终止于平台特定的限制。这些 GPIO(代码)通常从
-arch_initcall()或者更早的地方集成进平台初始化代码,使这些 GPIO 总是可用,
-且他们通常可以作为 IRQ 使用。
-
-板级支持
---------
-
-对于外部 GPIO 控制器(例如 I2C 或 SPI 扩展器、专用芯片、多功能器件、FPGA
-或 CPLD),大多数常用板级特定代码都可以注册控制器设备,并保证他们的驱动知道
-gpiochip_add()所使用的 GPIO 编号。他们的起始编号通常跟在平台特定的 GPIO
-编号之后。
-
-例如板级启动代码应该创建结构体指明芯片公开的 GPIO 范围,并使用 platform_data
-将其传递给每个 GPIO 扩展器芯片。然后芯片驱动中的 probe()例程可以将这个
-数据传递给 gpiochip_add()。
-
-初始化顺序很重要。例如,如果一个设备依赖基于 I2C 的(扩展)GPIO,那么它的
-probe()例程就应该在那个 GPIO 有效以后才可以被调用。这意味着设备应该在
-GPIO 可以工作之后才可被注册。解决这类依赖的的一种方法是让这种 gpio_chip
-控制器向板级特定代码提供 setup()和 teardown()回调函数。一旦所有必须的
-资源可用之后,这些板级特定的回调函数将会注册设备,并可以在这些 GPIO 控制器
-设备变成无效时移除它们。
-
-
-用户空间的 Sysfs 接口(可选)
-=============================
-
-使用“gpiolib”实现框架的平台可以选择配置一个 GPIO 的 sysfs 用户接口。
-这不同于 debugfs 接口,因为它提供的是对 GPIO方向和值的控制,而不只显示
-一个GPIO 的状态摘要。此外,它可以出现在没有调试支持的产品级系统中。
-
-例如,通过适当的系统硬件文档,用户空间可以知道 GIOP #23 控制 Flash
-存储器的写保护(用于保护其中 Bootloader 分区)。产品的系统升级可能需要
-临时解除这个保护:首先导入一个 GPIO,改变其输出状态,然后在重新使能写保护
-前升级代码。通常情况下,GPIO #23 是不会被触及的,并且内核也不需要知道他。
-
-根据适当的硬件文档,某些系统的用户空间 GPIO 可以用于确定系统配置数据,
-这些数据是标准内核不知道的。在某些任务中,简单的用户空间 GPIO 驱动可能是
-系统真正需要的。
-
-注意:标准内核驱动中已经存在通用的“LED 和按键”GPIO 任务,分别是:
-"leds-gpio" 和 "gpio_keys"。请使用这些来替代直接访问 GPIO,因为集成在
-内核框架中的这类驱动比你在用户空间的代码更好。
-
-
-Sysfs 中的路径
---------------
-
-在/sys/class/gpio 中有 3 类入口:
-
- - 用于在用户空间控制 GPIO 的控制接口;
-
- - GPIOs 本身;以及
-
- - GPIO 控制器 ("gpio_chip" 实例)。
-
-除了这些标准的文件,还包含“device”符号链接。
-
-控制接口是只写的:
-
- /sys/class/gpio/
-
- "export" ... 用户空间可以通过写其编号到这个文件,要求内核导出
- 一个 GPIO 的控制到用户空间。
-
- 例如: 如果内核代码没有申请 GPIO #19,"echo 19 > export"
- 将会为 GPIO #19 创建一个 "gpio19" 节点。
-
- "unexport" ... 导出到用户空间的逆操作。
-
- 例如: "echo 19 > unexport" 将会移除使用"export"文件导出的
- "gpio19" 节点。
-
-GPIO 信号的路径类似 /sys/class/gpio/gpio42/ (对于 GPIO #42 来说),
-并有如下的读/写属性:
-
- /sys/class/gpio/gpioN/
-
- "direction" ... 读取得到 "in" 或 "out"。这个值通常运行写入。
- 写入"out" 时,其引脚的默认输出为低电平。为了确保无故障运行,
- "low" 或 "high" 的电平值应该写入 GPIO 的配置,作为初始输出值。
-
- 注意:如果内核不支持改变 GPIO 的方向,或者在导出时内核代码没有
- 明确允许用户空间可以重新配置 GPIO 方向,那么这个属性将不存在。
-
- "value" ... 读取得到 0 (低电平) 或 1 (高电平)。如果 GPIO 配置为
- 输出,这个值允许写操作。任何非零值都以高电平看待。
-
- 如果引脚可以配置为中断信号,且如果已经配置了产生中断的模式
- (见"edge"的描述),你可以对这个文件使用轮询操作(poll(2)),
- 且轮询操作会在任何中断触发时返回。如果你使用轮询操作(poll(2)),
- 请在 events 中设置 POLLPRI 和 POLLERR。如果你使用轮询操作
- (select(2)),请在 exceptfds 设置你期望的文件描述符。在
- 轮询操作(poll(2))返回之后,既可以通过 lseek(2)操作读取
- sysfs 文件的开始部分,也可以关闭这个文件并重新打开它来读取数据。
-
- "edge" ... 读取得到“none”、“rising”、“falling”或者“both”。
- 将这些字符串写入这个文件可以选择沿触发模式,会使得轮询操作
- (select(2))在"value"文件中返回。
-
- 这个文件仅有在这个引脚可以配置为可产生中断输入引脚时,才存在。
-
- "active_low" ... 读取得到 0 (假) 或 1 (真)。写入任何非零值可以
- 翻转这个属性的(读写)值。已存在或之后通过"edge"属性设置了"rising"
- 和 "falling" 沿触发模式的轮询操作(poll(2))将会遵循这个设置。
-
-GPIO 控制器的路径类似 /sys/class/gpio/gpiochip42/ (对于从#42 GPIO
-开始实现控制的控制器),并有着以下只读属性:
-
- /sys/class/gpio/gpiochipN/
-
- "base" ... 与以上的 N 相同,代表此芯片管理的第一个 GPIO 的编号
-
- "label" ... 用于诊断 (并不总是只有唯一值)
-
- "ngpio" ... 此控制器所管理的 GPIO 数量(而 GPIO 编号从 N 到
- N + ngpio - 1)
-
-大多数情况下,电路板的文档应当标明每个 GPIO 的使用目的。但是那些编号并不总是
-固定的,例如在扩展卡上的 GPIO会根据所使用的主板或所在堆叠架构中其他的板子而
-有所不同。在这种情况下,你可能需要使用 gpiochip 节点(尽可能地结合电路图)来
-确定给定信号所用的 GPIO 编号。
-
-
-API参考
-=======
-
-本节中列出的函数已被废弃。在新的代码中应该使用基于GPIO描述符的API。
diff --git a/Documentation/translations/zh_TW/gpio.txt b/Documentation/translations/zh_TW/gpio.txt
deleted file mode 100644
index 77d69d381316..000000000000
--- a/Documentation/translations/zh_TW/gpio.txt
+++ /dev/null
@@ -1,574 +0,0 @@
-Chinese translated version of Documentation/admin-guide/gpio
-
-If you have any comment or update to the content, please contact the
-original document maintainer directly. However, if you have a problem
-communicating in English you can also ask the Chinese maintainer for
-help. Contact the Chinese maintainer if this translation is outdated
-or if there is a problem with the translation.
-
-Maintainer: Grant Likely <grant.likely@secretlab.ca>
- Linus Walleij <linus.walleij@linaro.org>
-Traditional Chinese maintainer: Hu Haowen <2023002089@link.tyut.edu.cn>
----------------------------------------------------------------------
-Documentation/admin-guide/gpio 的繁體中文翻譯
-
-如果想評論或更新本文的內容,請直接聯繫原文檔的維護者。如果你使用英文
-交流有困難的話,也可以向繁體中文版維護者求助。如果本翻譯更新不及時或
-者翻譯存在問題,請聯繫繁體中文版維護者。
-
-英文版維護者: Grant Likely <grant.likely@secretlab.ca>
- Linus Walleij <linus.walleij@linaro.org>
-繁體中文版維護者: 胡皓文 Hu Haowen <2023002089@link.tyut.edu.cn>
-繁體中文版翻譯者: 胡皓文 Hu Haowen <2023002089@link.tyut.edu.cn>
-繁體中文版校譯者: 胡皓文 Hu Haowen <2023002089@link.tyut.edu.cn>
-
-以下爲正文
----------------------------------------------------------------------
-GPIO 接口
-
-本文檔提供了一個在Linux下訪問GPIO的公約概述。
-
-這些函數以 gpio_* 作爲前綴。其他的函數不允許使用這樣的前綴或相關的
-__gpio_* 前綴。
-
-
-什麼是GPIO?
-==========
-"通用輸入/輸出口"(GPIO)是一個靈活的由軟體控制的數位訊號。他們可
-由多種晶片提供,且對於從事嵌入式和定製硬體的 Linux 開發者來說是
-比較熟悉。每個GPIO 都代表一個連接到特定引腳或球柵陣列(BGA)封裝中
-「球珠」的一個位。電路板原理圖顯示了 GPIO 與外部硬體的連接關係。
-驅動可以編寫成通用代碼,以使板級啓動代碼可傳遞引腳配置數據給驅動。
-
-片上系統 (SOC) 處理器對 GPIO 有很大的依賴。在某些情況下,每個
-非專用引腳都可配置爲 GPIO,且大多數晶片都最少有一些 GPIO。
-可編程邏輯器件(類似 FPGA) 可以方便地提供 GPIO。像電源管理和
-音頻編解碼器這樣的多功能晶片經常留有一些這樣的引腳來幫助那些引腳
-匱乏的 SOC。同時還有通過 I2C 或 SPI 串行總線連接的「GPIO擴展器」
-晶片。大多數 PC 的南橋有一些擁有 GPIO 能力的引腳 (只有BIOS
-固件才知道如何使用他們)。
-
-GPIO 的實際功能因系統而異。通常用法有:
-
- - 輸出值可寫 (高電平=1,低電平=0)。一些晶片也有如何驅動這些值的選項,
- 例如只允許輸出一個值、支持「線與」及其他取值類似的模式(值得注意的是
- 「開漏」信號)
-
- - 輸入值可讀(1、0)。一些晶片支持引腳在配置爲「輸出」時回讀,這對於類似
- 「線與」的情況(以支持雙向信號)是非常有用的。GPIO 控制器可能有輸入
- 去毛刺/消抖邏輯,這有時需要軟體控制。
-
- - 輸入通常可作爲 IRQ 信號,一般是沿觸發,但有時是電平觸發。這樣的 IRQ
- 可能配置爲系統喚醒事件,以將系統從低功耗狀態下喚醒。
-
- - 通常一個 GPIO 根據不同產品電路板的需求,可以配置爲輸入或輸出,也有僅
- 支持單向的。
-
- - 大部分 GPIO 可以在持有自旋鎖時訪問,但是通常由串行總線擴展的 GPIO
- 不允許持有自旋鎖。但某些系統也支持這種類型。
-
-對於給定的電路板,每個 GPIO 都用於某個特定的目的,如監控 MMC/SD 卡的
-插入/移除、檢測卡的防寫狀態、驅動 LED、配置收發器、模擬串行總線、
-復位硬體看門狗、感知開關狀態等等。
-
-
-GPIO 公約
-=========
-注意,這個叫做「公約」,因爲這不是強制性的,不遵循這個公約是無傷大雅的,
-因爲此時可移植性並不重要。GPIO 常用於板級特定的電路邏輯,甚至可能
-隨著電路板的版本而改變,且不可能在不同走線的電路板上使用。僅有在少數
-功能上才具有可移植性,其他功能是平台特定。這也是由於「膠合」的邏輯造成的。
-
-此外,這不需要任何的執行框架,只是一個接口。某個平台可能通過一個簡單地
-訪問晶片寄存器的內聯函數來實現它,其他平台可能通過委託一系列不同的GPIO
-控制器的抽象函數來實現它。(有一些可選的代碼能支持這種策略的實現,本文檔
-後面會介紹,但作爲 GPIO 接口的客戶端驅動程序必須與它的實現無關。)
-
-也就是說,如果在他們的平台上支持這個公約,驅動應儘可能的使用它。同時,平台
-必須在 Kconfig 中選擇 ARCH_REQUIRE_GPIOLIB 或者 ARCH_WANT_OPTIONAL_GPIOLIB
-選項。那些調用標準 GPIO 函數的驅動應該在 Kconfig 入口中聲明依賴GENERIC_GPIO。
-當驅動包含文件:
-
- #include <linux/gpio.h>
-
-則 GPIO 函數是可用,無論是「真實代碼」還是經優化過的語句。如果你遵守
-這個公約,當你的代碼完成後,對其他的開發者來說會更容易看懂和維護。
-
-注意,這些操作包含所用平台的 I/O 屏障代碼,驅動無須顯式地調用他們。
-
-
-標識 GPIO
----------
-GPIO 是通過無符號整型來標識的,範圍是 0 到 MAX_INT。保留「負」數
-用於其他目的,例如標識信號「在這個板子上不可用」或指示錯誤。未接觸底層
-硬體的代碼會忽略這些整數。
-
-平台會定義這些整數的用法,且通常使用 #define 來定義 GPIO,這樣
-板級特定的啓動代碼可以直接關聯相應的原理圖。相對來說,驅動應該僅使用
-啓動代碼傳遞過來的 GPIO 編號,使用 platform_data 保存板級特定
-引腳配置數據 (同時還有其他須要的板級特定數據),避免可能出現的問題。
-
-例如一個平台使用編號 32-159 來標識 GPIO,而在另一個平台使用編號0-63
-標識一組 GPIO 控制器,64-79標識另一類 GPIO 控制器,且在一個含有
-FPGA 的特定板子上使用 80-95。編號不一定要連續,那些平台中,也可以
-使用編號2000-2063來標識一個 I2C 接口的 GPIO 擴展器中的 GPIO。
-
-如果你要初始化一個帶有無效 GPIO 編號的結構體,可以使用一些負編碼
-(如"-EINVAL"),那將使其永遠不會是有效。來測試這樣一個結構體中的編號
-是否關聯一個 GPIO,你可使用以下斷言:
-
- int gpio_is_valid(int number);
-
-如果編號不存在,則請求和釋放 GPIO 的函數將拒絕執行相關操作(見下文)。
-其他編號也可能被拒絕,比如一個編號可能存在,但暫時在給定的電路上不可用。
-
-一個平台是否支持多個 GPIO 控制器爲平台特定的實現問題,就像是否可以
-在 GPIO 編號空間中有「空洞」和是否可以在運行時添加新的控制器一樣。
-這些問題會影響其他事情,包括相鄰的 GPIO 編號是否存在等。
-
-使用 GPIO
----------
-對於一個 GPIO,系統應該做的第一件事情就是通過 gpio_request()
-函數分配它,見下文。
-
-接下來是設置I/O方向,這通常是在板級啓動代碼中爲所使用的 GPIO 設置
-platform_device 時完成。
-
- /* 設置爲輸入或輸出, 返回 0 或負的錯誤代碼 */
- int gpio_direction_input(unsigned gpio);
- int gpio_direction_output(unsigned gpio, int value);
-
-返回值爲零代表成功,否則返回一個負的錯誤代碼。這個返回值需要檢查,因爲
-get/set(獲取/設置)函數調用沒法返回錯誤,且有可能是配置錯誤。通常,
-你應該在進程上下文中調用這些函數。然而,對於自旋鎖安全的 GPIO,在板子
-啓動的早期、進程啓動前使用他們也是可以的。
-
-對於作爲輸出的 GPIO,爲其提供初始輸出值,對於避免在系統啓動期間出現
-信號毛刺是很有幫助的。
-
-爲了與傳統的 GPIO 接口兼容, 在設置一個 GPIO 方向時,如果它還未被申請,
-則隱含了申請那個 GPIO 的操作(見下文)。這種兼容性正在從可選的 gpiolib
-框架中移除。
-
-如果這個 GPIO 編碼不存在,或者特定的 GPIO 不能用於那種模式,則方向
-設置可能失敗。依賴啓動固件來正確地設置方向通常是一個壞主意,因爲它可能
-除了啓動Linux,並沒有做更多的驗證工作。(同理, 板子的啓動代碼可能需要
-將這個復用的引腳設置爲 GPIO,並正確地配置上拉/下拉電阻。)
-
-
-訪問自旋鎖安全的 GPIO
--------------------
-大多數 GPIO 控制器可以通過內存讀/寫指令來訪問。這些指令不會休眠,可以
-安全地在硬(非線程)中斷例程和類似的上下文中完成。
-
-對於那些 GPIO,使用以下的函數訪問:
-
- /* GPIO 輸入:返回零或非零 */
- int gpio_get_value(unsigned gpio);
-
- /* GPIO 輸出 */
- void gpio_set_value(unsigned gpio, int value);
-
-GPIO值是布爾值,零表示低電平,非零表示高電平。當讀取一個輸出引腳的值時,
-返回值應該是引腳上的值。這個值不總是和輸出值相符,因爲存在開漏輸出信號和
-輸出延遲問題。
-
-以上的 get/set 函數無錯誤返回值,因爲之前 gpio_direction_*()應已檢查過
-其是否爲「無效GPIO」。此外,還需要注意的是並不是所有平台都可以從輸出引腳
-中讀取數據,對於不能讀取的引腳應總返回零。另外,對那些在原子上下文中無法
-安全訪問的 GPIO (譯者註:因爲訪問可能導致休眠)使用這些函數是不合適的
-(見下文)。
-
-在 GPIO 編號(還有輸出、值)爲常數的情況下,鼓勵通過平台特定的實現來優化
-這兩個函數來訪問 GPIO 值。這種情況(讀寫一個硬體寄存器)下只需要幾條指令
-是很正常的,且無須自旋鎖。這種優化函數比起那些在子程序上花費許多指令的
-函數可以使得模擬接口(譯者注:例如 GPIO 模擬 I2C、1-wire 或 SPI)的
-應用(在空間和時間上都)更具效率。
-
-
-訪問可能休眠的 GPIO
------------------
-某些 GPIO 控制器必須通過基於總線(如 I2C 或 SPI)的消息訪問。讀或寫這些
-GPIO 值的命令需要等待其信息排到隊首才發送命令,再獲得其反饋。期間需要
-休眠,這不能在 IRQ 例程(中斷上下文)中執行。
-
-爲了訪問這種 GPIO,內核定義了一套不同的函數:
-
- /* GPIO 輸入:返回零或非零 ,可能會休眠 */
- int gpio_get_value_cansleep(unsigned gpio);
-
- /* GPIO 輸出,可能會休眠 */
- void gpio_set_value_cansleep(unsigned gpio, int value);
-
-訪問這樣的 GPIO 需要一個允許休眠的上下文,例如線程 IRQ 處理例程,並用以上的
-訪問函數替換那些沒有 cansleep()後綴的自旋鎖安全訪問函數。
-
-除了這些訪問函數可能休眠,且它們操作的 GPIO 不能在硬體 IRQ 處理例程中訪問的
-事實,這些處理例程實際上和自旋鎖安全的函數是一樣的。
-
-** 除此之外 ** 調用設置和配置此類 GPIO 的函數也必須在允許休眠的上下文中,
-因爲它們可能也需要訪問 GPIO 控制器晶片: (這些設置函數通常在板級啓動代碼或者
-驅動探測/斷開代碼中,所以這是一個容易滿足的約束條件。)
-
- gpio_direction_input()
- gpio_direction_output()
- gpio_request()
-
-## gpio_request_one()
-
- gpio_free()
-
-
-聲明和釋放 GPIO
-----------------------------
-爲了有助於捕獲系統配置錯誤,定義了兩個函數。
-
- /* 申請 GPIO, 返回 0 或負的錯誤代碼.
- * 非空標籤可能有助於診斷.
- */
- int gpio_request(unsigned gpio, const char *label);
-
- /* 釋放之前聲明的 GPIO */
- void gpio_free(unsigned gpio);
-
-將無效的 GPIO 編碼傳遞給 gpio_request()會導致失敗,申請一個已使用這個
-函數聲明過的 GPIO 也會失敗。gpio_request()的返回值必須檢查。你應該在
-進程上下文中調用這些函數。然而,對於自旋鎖安全的 GPIO,在板子啓動的早期、
-進入進程之前是可以申請的。
-
-這個函數完成兩個基本的目標。一是標識那些實際上已作爲 GPIO 使用的信號線,
-這樣便於更好地診斷;系統可能需要服務幾百個可用的 GPIO,但是對於任何一個
-給定的電路板通常只有一些被使用。另一個目的是捕獲衝突,查明錯誤:如兩個或
-更多驅動錯誤地認爲他們已經獨占了某個信號線,或是錯誤地認爲移除一個管理著
-某個已激活信號的驅動是安全的。也就是說,申請 GPIO 的作用類似一種鎖機制。
-
-某些平台可能也使用 GPIO 作爲電源管理激活信號(例如通過關閉未使用晶片區和
-簡單地關閉未使用時鐘)。
-
-對於 GPIO 使用 pinctrl 子系統已知的引腳,子系統應該被告知其使用情況;
-一個 gpiolib 驅動的 .request()操作應調用 pinctrl_gpio_request(),
-而 gpiolib 驅動的 .free()操作應調用 pinctrl_gpio_free()。pinctrl
-子系統允許 pinctrl_gpio_request()在某個引腳或引腳組以復用形式「屬於」
-一個設備時都成功返回。
-
-任何須將 GPIO 信號導向適當引腳的引腳復用硬體的編程應該發生在 GPIO
-驅動的 .direction_input()或 .direction_output()函數中,以及
-任何輸出 GPIO 值的設置之後。這樣可使從引腳特殊功能到 GPIO 的轉換
-不會在引腳產生毛刺波形。有時當用一個 GPIO 實現其信號驅動一個非 GPIO
-硬體模塊的解決方案時,就需要這種機制。
-
-某些平台允許部分或所有 GPIO 信號使用不同的引腳。類似的,GPIO 或引腳的
-其他方面也需要配置,如上拉/下拉。平台軟體應該在對這些 GPIO 調用
-gpio_request()前將這類細節配置好,例如使用 pinctrl 子系統的映射表,
-使得 GPIO 的用戶無須關注這些細節。
-
-還有一個值得注意的是在釋放 GPIO 前,你必須停止使用它。
-
-
-注意:申請一個 GPIO 並沒有以任何方式配置它,只不過標識那個 GPIO 處於使用
-狀態。必須有另外的代碼來處理引腳配置(如控制 GPIO 使用的引腳、上拉/下拉)。
-考慮到大多數情況下聲明 GPIO 之後就會立即配置它們,所以定義了以下三個輔助函數:
-
- /* 申請一個 GPIO 信號, 同時通過特定的'flags'初始化配置,
- * 其他和 gpio_request()的參數和返回值相同
- *
- */
- int gpio_request_one(unsigned gpio, unsigned long flags, const char *label);
-
-這裡 'flags' 當前定義可指定以下屬性:
-
- * GPIOF_DIR_IN - 配置方向爲輸入
- * GPIOF_DIR_OUT - 配置方向爲輸出
-
- * GPIOF_INIT_LOW - 在作爲輸出時,初始值爲低電平
- * GPIOF_INIT_HIGH - 在作爲輸出時,初始值爲高電平
-
-因爲 GPIOF_INIT_* 僅有在配置爲輸出的時候才存在,所以有效的組合爲:
-
- * GPIOF_IN - 配置爲輸入
- * GPIOF_OUT_INIT_LOW - 配置爲輸出,並初始化爲低電平
- * GPIOF_OUT_INIT_HIGH - 配置爲輸出,並初始化爲高電平
-
-更進一步,爲了更簡單地聲明/釋放多個 GPIO,'struct gpio'被引進來封裝所有
-這三個領域:
-
- struct gpio {
- unsigned gpio;
- unsigned long flags;
- const char *label;
- };
-
-一個典型的用例:
-
- static struct gpio leds_gpios[] = {
- { 32, GPIOF_OUT_INIT_HIGH, "Power LED" }, /* 默認開啓 */
- { 33, GPIOF_OUT_INIT_LOW, "Green LED" }, /* 默認關閉 */
- { 34, GPIOF_OUT_INIT_LOW, "Red LED" }, /* 默認關閉 */
- { 35, GPIOF_OUT_INIT_LOW, "Blue LED" }, /* 默認關閉 */
- { ... },
- };
-
- err = gpio_request_one(31, GPIOF_IN, "Reset Button");
- if (err)
- ...
-
-
-GPIO 映射到 IRQ
---------------------
-GPIO 編號是無符號整數;IRQ 編號也是。這些構成了兩個邏輯上不同的命名空間
-(GPIO 0 不一定使用 IRQ 0)。你可以通過以下函數在它們之間實現映射:
-
- /* 映射 GPIO 編號到 IRQ 編號 */
- int gpio_to_irq(unsigned gpio);
-
-它們的返回值爲對應命名空間的相關編號,或是負的錯誤代碼(如果無法映射)。
-(例如,某些 GPIO 無法做爲 IRQ 使用。)以下的編號錯誤是未經檢測的:使用一個
-未通過 gpio_direction_input()配置爲輸入的 GPIO 編號,或者使用一個
-並非來源於gpio_to_irq()的 IRQ 編號。
-
-這兩個映射函數可能會在信號編號的加減計算過程上花些時間。它們不可休眠。
-
-gpio_to_irq()返回的非錯誤值可以傳遞給 request_irq()或者 free_irq()。
-它們通常通過板級特定的初始化代碼存放到平台設備的 IRQ 資源中。注意:IRQ
-觸發選項是 IRQ 接口的一部分,如 IRQF_TRIGGER_FALLING,系統喚醒能力
-也是如此。
-
-
-模擬開漏信號
-----------------------------
-有時在只有低電平信號作爲實際驅動結果(譯者注:多個輸出連接於一點,邏輯電平
-結果爲所有輸出的邏輯與)的時候,共享的信號線需要使用「開漏」信號。(該術語
-適用於 CMOS 管;而 TTL 用「集電極開路」。)一個上拉電阻使信號爲高電平。這
-有時被稱爲「線與」。實際上,從負邏輯(低電平爲真)的角度來看,這是一個「線或」。
-
-一個開漏信號的常見例子是共享的低電平使能 IRQ 信號線。此外,有時雙向數據總線
-信號也使用漏極開路信號。
-
-某些 GPIO 控制器直接支持開漏輸出,還有許多不支持。當你需要開漏信號,但
-硬體又不直接支持的時候,一個常用的方法是用任何即可作輸入也可作輸出的 GPIO
-引腳來模擬:
-
- LOW: gpio_direction_output(gpio, 0) ... 這代碼驅動信號並覆蓋
- 上拉配置。
-
- HIGH: gpio_direction_input(gpio) ... 這代碼關閉輸出,所以上拉電阻
- (或其他的一些器件)控制了信號。
-
-如果你將信號線「驅動」爲高電平,但是 gpio_get_value(gpio)報告了一個
-低電平(在適當的上升時間後),你就可以知道是其他的一些組件將共享信號線拉低了。
-這不一定是錯誤的。一個常見的例子就是 I2C 時鐘的延長:一個需要較慢時鐘的
-從設備延遲 SCK 的上升沿,而 I2C 主設備相應地調整其信號傳輸速率。
-
-
-這些公約忽略了什麼?
-================
-這些公約忽略的最大一件事就是引腳復用,因爲這屬於高度晶片特定的屬性且
-沒有可移植性。某個平台可能不需要明確的復用信息;有的對於任意給定的引腳
-可能只有兩個功能選項;有的可能每個引腳有八個功能選項;有的可能可以將
-幾個引腳中的任何一個作爲給定的 GPIO。(是的,這些例子都來自於當前運行
-Linux 的系統。)
-
-在某些系統中,與引腳復用相關的是配置和使能集成的上、下拉模式。並不是所有
-平台都支持這種模式,或者不會以相同的方式來支持這種模式;且任何給定的電路板
-可能使用外置的上拉(或下拉)電阻,這時晶片上的就不應該使用。(當一個電路需要
-5kOhm 的拉動電阻,晶片上的 100 kOhm 電阻就不能做到。)同樣的,驅動能力
-(2 mA vs 20 mA)和電壓(1.8V vs 3.3V)是平台特定問題,就像模型一樣在
-可配置引腳和 GPIO 之間(沒)有一一對應的關係。
-
-還有其他一些系統特定的機制沒有在這裡指出,例如上述的輸入去毛刺和線與輸出
-選項。硬體可能支持批量讀或寫 GPIO,但是那一般是配置相關的:對於處於同一
-塊區(bank)的GPIO。(GPIO 通常以 16 或 32 個組成一個區塊,一個給定的
-片上系統一般有幾個這樣的區塊。)某些系統可以通過輸出 GPIO 觸發 IRQ,
-或者從並非以 GPIO 管理的引腳取值。這些機制的相關代碼沒有必要具有可移植性。
-
-當前,動態定義 GPIO 並不是標準的,例如作爲配置一個帶有某些 GPIO 擴展器的
-附加電路板的副作用。
-
-GPIO 實現者的框架 (可選)
-=====================
-前面提到了,有一個可選的實現框架,讓平台使用相同的編程接口,更加簡單地支持
-不同種類的 GPIO 控制器。這個框架稱爲"gpiolib"。
-
-作爲一個輔助調試功能,如果 debugfs 可用,就會有一個 /sys/kernel/debug/gpio
-文件。通過這個框架,它可以列出所有註冊的控制器,以及當前正在使用中的 GPIO
-的狀態。
-
-
-控制器驅動: gpio_chip
--------------------
-在框架中每個 GPIO 控制器都包裝爲一個 "struct gpio_chip",他包含了
-該類型的每個控制器的常用信息:
-
- - 設置 GPIO 方向的方法
- - 用於訪問 GPIO 值的方法
- - 告知調用其方法是否可能休眠的標誌
- - 可選的 debugfs 信息導出方法 (顯示類似上拉配置一樣的額外狀態)
- - 診斷標籤
-
-也包含了來自 device.platform_data 的每個實例的數據:它第一個 GPIO 的
-編號和它可用的 GPIO 的數量。
-
-實現 gpio_chip 的代碼應支持多控制器實例,這可能使用驅動模型。那些代碼要
-配置每個 gpio_chip,並發起gpiochip_add()。卸載一個 GPIO 控制器很少見,
-但在必要的時候可以使用 gpiochip_remove()。
-
-大部分 gpio_chip 是一個實例特定結構體的一部分,而並不將 GPIO 接口單獨
-暴露出來,比如編址、電源管理等。類似編解碼器這樣的晶片會有複雜的非 GPIO
-狀態。
-
-任何一個 debugfs 信息導出方法通常應該忽略還未申請作爲 GPIO 的信號線。
-他們可以使用 gpiochip_is_requested()測試,當這個 GPIO 已經申請過了
-就返回相關的標籤,否則返回 NULL。
-
-
-平台支持
--------
-爲了支持這個框架,一個平台的 Kconfig 文件將會 "select"(選擇)
-ARCH_REQUIRE_GPIOLIB 或 ARCH_WANT_OPTIONAL_GPIOLIB,並讓它的
-<asm/gpio.h> 包含 <asm-generic/gpio.h>,同時定義二個方法:
-gpio_get_value()、gpio_set_value()。
-
-它也應提供一個 ARCH_NR_GPIOS 的定義值,這樣可以更好地反映該平台 GPIO
-的實際數量,節省靜態表的空間。(這個定義值應該包含片上系統內建 GPIO 和
-GPIO 擴展器中的數據。)
-
-ARCH_REQUIRE_GPIOLIB 意味著 gpiolib 核心在這個構架中將總是編譯進內核。
-
-ARCH_WANT_OPTIONAL_GPIOLIB 意味著 gpiolib 核心默認關閉,且用戶可以
-使能它,並將其編譯進內核(可選)。
-
-如果這些選項都沒被選擇,該平台就不通過 GPIO-lib 支持 GPIO,且代碼不可以
-被用戶使能。
-
-以下這些方法的實現可以直接使用框架代碼,並總是通過 gpio_chip 調度:
-
- #define gpio_get_value __gpio_get_value
- #define gpio_set_value __gpio_set_value
-
-這些定義可以用更理想的實現方法替代,那就是使用經過邏輯優化的內聯函數來訪問
-基於特定片上系統的 GPIO。例如,若引用的 GPIO (寄存器位偏移)是常量「12」,
-讀取或設置它可能只需少則兩或三個指令,且不會休眠。當這樣的優化無法實現時,
-那些函數必須使用框架提供的代碼,那就至少要幾十條指令才可以實現。對於用 GPIO
-模擬的 I/O 接口, 如此精簡指令是很有意義的。
-
-對於片上系統,平台特定代碼爲片上 GPIO 每個區(bank)定義並註冊 gpio_chip
-實例。那些 GPIO 應該根據晶片廠商的文檔進行編碼/標籤,並直接和電路板原理圖
-對應。他們應該開始於零並終止於平台特定的限制。這些 GPIO(代碼)通常從
-arch_initcall()或者更早的地方集成進平台初始化代碼,使這些 GPIO 總是可用,
-且他們通常可以作爲 IRQ 使用。
-
-板級支持
--------
-對於外部 GPIO 控制器(例如 I2C 或 SPI 擴展器、專用晶片、多功能器件、FPGA
-或 CPLD),大多數常用板級特定代碼都可以註冊控制器設備,並保證他們的驅動知道
-gpiochip_add()所使用的 GPIO 編號。他們的起始編號通常跟在平台特定的 GPIO
-編號之後。
-
-例如板級啓動代碼應該創建結構體指明晶片公開的 GPIO 範圍,並使用 platform_data
-將其傳遞給每個 GPIO 擴展器晶片。然後晶片驅動中的 probe()例程可以將這個
-數據傳遞給 gpiochip_add()。
-
-初始化順序很重要。例如,如果一個設備依賴基於 I2C 的(擴展)GPIO,那麼它的
-probe()例程就應該在那個 GPIO 有效以後才可以被調用。這意味著設備應該在
-GPIO 可以工作之後才可被註冊。解決這類依賴的的一種方法是讓這種 gpio_chip
-控制器向板級特定代碼提供 setup()和 teardown()回調函數。一旦所有必須的
-資源可用之後,這些板級特定的回調函數將會註冊設備,並可以在這些 GPIO 控制器
-設備變成無效時移除它們。
-
-
-用戶空間的 Sysfs 接口(可選)
-========================
-使用「gpiolib」實現框架的平台可以選擇配置一個 GPIO 的 sysfs 用戶接口。
-這不同於 debugfs 接口,因爲它提供的是對 GPIO方向和值的控制,而不只顯示
-一個GPIO 的狀態摘要。此外,它可以出現在沒有調試支持的產品級系統中。
-
-例如,通過適當的系統硬體文檔,用戶空間可以知道 GIOP #23 控制 Flash
-存儲器的防寫(用於保護其中 Bootloader 分區)。產品的系統升級可能需要
-臨時解除這個保護:首先導入一個 GPIO,改變其輸出狀態,然後在重新使能防寫
-前升級代碼。通常情況下,GPIO #23 是不會被觸及的,並且內核也不需要知道他。
-
-根據適當的硬體文檔,某些系統的用戶空間 GPIO 可以用於確定系統配置數據,
-這些數據是標準內核不知道的。在某些任務中,簡單的用戶空間 GPIO 驅動可能是
-系統真正需要的。
-
-注意:標準內核驅動中已經存在通用的「LED 和按鍵」GPIO 任務,分別是:
-"leds-gpio" 和 "gpio_keys"。請使用這些來替代直接訪問 GPIO,因爲集成在
-內核框架中的這類驅動比你在用戶空間的代碼更好。
-
-
-Sysfs 中的路徑
---------------
-在/sys/class/gpio 中有 3 類入口:
-
- - 用於在用戶空間控制 GPIO 的控制接口;
-
- - GPIOs 本身;以及
-
- - GPIO 控制器 ("gpio_chip" 實例)。
-
-除了這些標準的文件,還包含「device」符號連結。
-
-控制接口是只寫的:
-
- /sys/class/gpio/
-
- "export" ... 用戶空間可以通過寫其編號到這個文件,要求內核導出
- 一個 GPIO 的控制到用戶空間。
-
- 例如: 如果內核代碼沒有申請 GPIO #19,"echo 19 > export"
- 將會爲 GPIO #19 創建一個 "gpio19" 節點。
-
- "unexport" ... 導出到用戶空間的逆操作。
-
- 例如: "echo 19 > unexport" 將會移除使用"export"文件導出的
- "gpio19" 節點。
-
-GPIO 信號的路徑類似 /sys/class/gpio/gpio42/ (對於 GPIO #42 來說),
-並有如下的讀/寫屬性:
-
- /sys/class/gpio/gpioN/
-
- "direction" ... 讀取得到 "in" 或 "out"。這個值通常運行寫入。
- 寫入"out" 時,其引腳的默認輸出爲低電平。爲了確保無故障運行,
- "low" 或 "high" 的電平值應該寫入 GPIO 的配置,作爲初始輸出值。
-
- 注意:如果內核不支持改變 GPIO 的方向,或者在導出時內核代碼沒有
- 明確允許用戶空間可以重新配置 GPIO 方向,那麼這個屬性將不存在。
-
- "value" ... 讀取得到 0 (低電平) 或 1 (高電平)。如果 GPIO 配置爲
- 輸出,這個值允許寫操作。任何非零值都以高電平看待。
-
- 如果引腳可以配置爲中斷信號,且如果已經配置了產生中斷的模式
- (見"edge"的描述),你可以對這個文件使用輪詢操作(poll(2)),
- 且輪詢操作會在任何中斷觸發時返回。如果你使用輪詢操作(poll(2)),
- 請在 events 中設置 POLLPRI 和 POLLERR。如果你使用輪詢操作
- (select(2)),請在 exceptfds 設置你期望的文件描述符。在
- 輪詢操作(poll(2))返回之後,既可以通過 lseek(2)操作讀取
- sysfs 文件的開始部分,也可以關閉這個文件並重新打開它來讀取數據。
-
- "edge" ... 讀取得到「none」、「rising」、「falling」或者「both」。
- 將這些字符串寫入這個文件可以選擇沿觸發模式,會使得輪詢操作
- (select(2))在"value"文件中返回。
-
- 這個文件僅有在這個引腳可以配置爲可產生中斷輸入引腳時,才存在。
-
- "active_low" ... 讀取得到 0 (假) 或 1 (真)。寫入任何非零值可以
- 翻轉這個屬性的(讀寫)值。已存在或之後通過"edge"屬性設置了"rising"
- 和 "falling" 沿觸發模式的輪詢操作(poll(2))將會遵循這個設置。
-
-GPIO 控制器的路徑類似 /sys/class/gpio/gpiochip42/ (對於從#42 GPIO
-開始實現控制的控制器),並有著以下只讀屬性:
-
- /sys/class/gpio/gpiochipN/
-
- "base" ... 與以上的 N 相同,代表此晶片管理的第一個 GPIO 的編號
-
- "label" ... 用於診斷 (並不總是只有唯一值)
-
- "ngpio" ... 此控制器所管理的 GPIO 數量(而 GPIO 編號從 N 到
- N + ngpio - 1)
-
-大多數情況下,電路板的文檔應當標明每個 GPIO 的使用目的。但是那些編號並不總是
-固定的,例如在擴展卡上的 GPIO會根據所使用的主板或所在堆疊架構中其他的板子而
-有所不同。在這種情況下,你可能需要使用 gpiochip 節點(儘可能地結合電路圖)來
-確定給定信號所用的 GPIO 編號。
diff --git a/Documentation/userspace-api/gpio/gpio-handle-get-line-values-ioctl.rst b/Documentation/userspace-api/gpio/gpio-handle-get-line-values-ioctl.rst
index 25263b8f0588..2e3a52c113d5 100644
--- a/Documentation/userspace-api/gpio/gpio-handle-get-line-values-ioctl.rst
+++ b/Documentation/userspace-api/gpio/gpio-handle-get-line-values-ioctl.rst
@@ -36,6 +36,13 @@ Description
Get the values of all requested lines.
+The values returned are logical, indicating if the line is active or inactive.
+The ``GPIOHANDLE_REQUEST_ACTIVE_LOW`` flag controls the mapping between physical
+values (high/low) and logical values (active/inactive).
+If ``GPIOHANDLE_REQUEST_ACTIVE_LOW`` is not set then high is active and
+low is inactive. If ``GPIOHANDLE_REQUEST_ACTIVE_LOW`` is set then low is active
+and high is inactive.
+
The values of both input and output lines may be read.
For output lines, the value returned is driver and configuration dependent and
diff --git a/Documentation/userspace-api/gpio/gpio-handle-set-config-ioctl.rst b/Documentation/userspace-api/gpio/gpio-handle-set-config-ioctl.rst
index d002a84681ac..a03f30db63ab 100644
--- a/Documentation/userspace-api/gpio/gpio-handle-set-config-ioctl.rst
+++ b/Documentation/userspace-api/gpio/gpio-handle-set-config-ioctl.rst
@@ -43,7 +43,10 @@ The configuration applies to all requested lines.
The same :ref:`gpio-get-linehandle-config-rules` and
:ref:`gpio-get-linehandle-config-support` that apply when requesting the
-lines also apply when updating the line configuration.
+lines also apply when updating the line configuration, with the additional
+restriction that a direction flag must be set. Requesting an invalid
+configuration, including without a direction flag set, is an error
+(**EINVAL**).
The motivating use case for this command is changing direction of
bi-directional lines between input and output, but it may be used more
diff --git a/Documentation/userspace-api/gpio/gpio-handle-set-line-values-ioctl.rst b/Documentation/userspace-api/gpio/gpio-handle-set-line-values-ioctl.rst
index 0aa05e623a6c..12862132b420 100644
--- a/Documentation/userspace-api/gpio/gpio-handle-set-line-values-ioctl.rst
+++ b/Documentation/userspace-api/gpio/gpio-handle-set-line-values-ioctl.rst
@@ -36,6 +36,13 @@ Description
Set the values of all requested output lines.
+The values set are logical, indicating if the line is to be active or inactive.
+The ``GPIOHANDLE_REQUEST_ACTIVE_LOW`` flag controls the mapping between logical
+values (active/inactive) and physical values (high/low).
+If ``GPIOHANDLE_REQUEST_ACTIVE_LOW`` is not set then active is high and
+inactive is low. If ``GPIOHANDLE_REQUEST_ACTIVE_LOW`` is set then active is low
+and inactive is high.
+
Only the values of output lines may be set.
Attempting to set the value of input lines is an error (**EPERM**).
diff --git a/Documentation/userspace-api/gpio/gpio-lineevent-data-read.rst b/Documentation/userspace-api/gpio/gpio-lineevent-data-read.rst
index 68b8d4f9f604..d1e7e2383b0d 100644
--- a/Documentation/userspace-api/gpio/gpio-lineevent-data-read.rst
+++ b/Documentation/userspace-api/gpio/gpio-lineevent-data-read.rst
@@ -44,6 +44,11 @@ Edge detection must be enabled for the input line using either
both. Edge events are then generated whenever edge interrupts are detected on
the input line.
+Edges are defined in terms of changes to the logical line value, so an inactive
+to active transition is a rising edge. If ``GPIOHANDLE_REQUEST_ACTIVE_LOW`` is
+set then logical polarity is the opposite of physical polarity, and
+``GPIOEVENT_REQUEST_RISING_EDGE`` then corresponds to a falling physical edge.
+
The kernel captures and timestamps edge events as close as possible to their
occurrence and stores them in a buffer from where they can be read by
userspace at its convenience using `read()`.
diff --git a/Documentation/userspace-api/gpio/gpio-v2-line-event-read.rst b/Documentation/userspace-api/gpio/gpio-v2-line-event-read.rst
index 6513c23fb7ca..1312668e0f6a 100644
--- a/Documentation/userspace-api/gpio/gpio-v2-line-event-read.rst
+++ b/Documentation/userspace-api/gpio/gpio-v2-line-event-read.rst
@@ -40,6 +40,11 @@ Edge detection must be enabled for the input line using either
both. Edge events are then generated whenever edge interrupts are detected on
the input line.
+Edges are defined in terms of changes to the logical line value, so an inactive
+to active transition is a rising edge. If ``GPIO_V2_LINE_FLAG_ACTIVE_LOW`` is
+set then logical polarity is the opposite of physical polarity, and
+``GPIO_V2_LINE_FLAG_EDGE_RISING`` then corresponds to a falling physical edge.
+
The kernel captures and timestamps edge events as close as possible to their
occurrence and stores them in a buffer from where they can be read by
userspace at its convenience using `read()`.
diff --git a/Documentation/userspace-api/gpio/gpio-v2-line-get-values-ioctl.rst b/Documentation/userspace-api/gpio/gpio-v2-line-get-values-ioctl.rst
index e4e74a1926d8..d7defd4ca397 100644
--- a/Documentation/userspace-api/gpio/gpio-v2-line-get-values-ioctl.rst
+++ b/Documentation/userspace-api/gpio/gpio-v2-line-get-values-ioctl.rst
@@ -34,6 +34,13 @@ Description
Get the values of requested lines.
+The values returned are logical, indicating if the line is active or inactive.
+The ``GPIO_V2_LINE_FLAG_ACTIVE_LOW`` flag controls the mapping between physical
+values (high/low) and logical values (active/inactive).
+If ``GPIO_V2_LINE_FLAG_ACTIVE_LOW`` is not set then high is active and low is
+inactive. If ``GPIO_V2_LINE_FLAG_ACTIVE_LOW`` is set then low is active and
+high is inactive.
+
The values of both input and output lines may be read.
For output lines, the value returned is driver and configuration dependent and
diff --git a/Documentation/userspace-api/gpio/gpio-v2-line-set-config-ioctl.rst b/Documentation/userspace-api/gpio/gpio-v2-line-set-config-ioctl.rst
index 9b942a8a53ca..cfaab801556c 100644
--- a/Documentation/userspace-api/gpio/gpio-v2-line-set-config-ioctl.rst
+++ b/Documentation/userspace-api/gpio/gpio-v2-line-set-config-ioctl.rst
@@ -35,11 +35,14 @@ Description
Update the configuration of previously requested lines, without releasing the
line or introducing potential glitches.
-The new configuration must specify the configuration of all requested lines.
+The new configuration must specify a configuration for all requested lines.
The same :ref:`gpio-v2-get-line-config-rules` and
:ref:`gpio-v2-get-line-config-support` that apply when requesting the lines
-also apply when updating the line configuration.
+also apply when updating the line configuration, with the additional
+restriction that a direction flag must be set to enable reconfiguration.
+If no direction flag is set in the configuration for a given line then the
+configuration for that line is left unchanged.
The motivating use case for this command is changing direction of
bi-directional lines between input and output, but it may also be used to
diff --git a/Documentation/userspace-api/gpio/gpio-v2-line-set-values-ioctl.rst b/Documentation/userspace-api/gpio/gpio-v2-line-set-values-ioctl.rst
index 6d2d1886950b..16dd50fc60ca 100644
--- a/Documentation/userspace-api/gpio/gpio-v2-line-set-values-ioctl.rst
+++ b/Documentation/userspace-api/gpio/gpio-v2-line-set-values-ioctl.rst
@@ -35,6 +35,13 @@ Description
Set the values of requested output lines.
+The values set are logical, indicating if the line is to be active or inactive.
+The ``GPIO_V2_LINE_FLAG_ACTIVE_LOW`` flag controls the mapping between logical
+values (active/inactive) and physical values (high/low).
+If ``GPIO_V2_LINE_FLAG_ACTIVE_LOW`` is not set then active is high and inactive
+is low. If ``GPIO_V2_LINE_FLAG_ACTIVE_LOW`` is set then active is low and
+inactive is high.
+
Only the values of output lines may be set.
Attempting to set the value of an input line is an error (**EPERM**).
diff --git a/Documentation/userspace-api/gpio/sysfs.rst b/Documentation/userspace-api/gpio/sysfs.rst
index 116921048b18..bd64896de91a 100644
--- a/Documentation/userspace-api/gpio/sysfs.rst
+++ b/Documentation/userspace-api/gpio/sysfs.rst
@@ -97,9 +97,10 @@ and have the following read/write attributes:
poll(2) will return whenever the interrupt was triggered. If
you use poll(2), set the events POLLPRI and POLLERR. If you
use select(2), set the file descriptor in exceptfds. After
- poll(2) returns, either lseek(2) to the beginning of the sysfs
- file and read the new value or close the file and re-open it
- to read the value.
+ poll(2) returns, use pread(2) to read the value at offset
+ zero. Alternatively, either lseek(2) to the beginning of the
+ sysfs file and read the new value or close the file and
+ re-open it to read the value.
"edge" ...
reads as either "none", "rising", "falling", or
diff --git a/Documentation/userspace-api/index.rst b/Documentation/userspace-api/index.rst
index 5926115ec0ed..8a251d71fa6e 100644
--- a/Documentation/userspace-api/index.rst
+++ b/Documentation/userspace-api/index.rst
@@ -32,6 +32,7 @@ Security-related interfaces
seccomp_filter
landlock
lsm
+ mfd_noexec
spec_ctrl
tee
diff --git a/Documentation/userspace-api/ioctl/ioctl-number.rst b/Documentation/userspace-api/ioctl/ioctl-number.rst
index a141e8e65c5d..9a97030c6c8d 100644
--- a/Documentation/userspace-api/ioctl/ioctl-number.rst
+++ b/Documentation/userspace-api/ioctl/ioctl-number.rst
@@ -186,6 +186,7 @@ Code Seq# Include File Comments
'Q' all linux/soundcard.h
'R' 00-1F linux/random.h conflict!
'R' 01 linux/rfkill.h conflict!
+'R' 20-2F linux/trace_mmap.h
'R' C0-DF net/bluetooth/rfcomm.h
'R' E0 uapi/linux/fsl_mc.h
'S' all linux/cdrom.h conflict!
diff --git a/Documentation/userspace-api/media/v4l/dev-subdev.rst b/Documentation/userspace-api/media/v4l/dev-subdev.rst
index 0f9eda3187f3..161b43f1ce66 100644
--- a/Documentation/userspace-api/media/v4l/dev-subdev.rst
+++ b/Documentation/userspace-api/media/v4l/dev-subdev.rst
@@ -582,7 +582,7 @@ depending on the hardware. In all cases, however, only routes that have the
Devices generating the streams may allow enabling and disabling some of the
routes or have a fixed routing configuration. If the routes can be disabled, not
declaring the routes (or declaring them without
-``VIDIOC_SUBDEV_STREAM_FL_ACTIVE`` flag set) in ``VIDIOC_SUBDEV_S_ROUTING`` will
+``V4L2_SUBDEV_STREAM_FL_ACTIVE`` flag set) in ``VIDIOC_SUBDEV_S_ROUTING`` will
disable the routes. ``VIDIOC_SUBDEV_S_ROUTING`` will still return such routes
back to the user in the routes array, with the ``V4L2_SUBDEV_STREAM_FL_ACTIVE``
flag unset.
diff --git a/Documentation/userspace-api/mfd_noexec.rst b/Documentation/userspace-api/mfd_noexec.rst
new file mode 100644
index 000000000000..7afcc480e38f
--- /dev/null
+++ b/Documentation/userspace-api/mfd_noexec.rst
@@ -0,0 +1,86 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+==================================
+Introduction of non-executable mfd
+==================================
+:Author:
+ Daniel Verkamp <dverkamp@chromium.org>
+ Jeff Xu <jeffxu@chromium.org>
+
+:Contributor:
+ Aleksa Sarai <cyphar@cyphar.com>
+
+Since Linux introduced the memfd feature, memfds have always had their
+execute bit set, and the memfd_create() syscall doesn't allow setting
+it differently.
+
+However, in a secure-by-default system, such as ChromeOS, (where all
+executables should come from the rootfs, which is protected by verified
+boot), this executable nature of memfd opens a door for NoExec bypass
+and enables “confused deputy attack”. E.g, in VRP bug [1]: cros_vm
+process created a memfd to share the content with an external process,
+however the memfd is overwritten and used for executing arbitrary code
+and root escalation. [2] lists more VRP of this kind.
+
+On the other hand, executable memfd has its legit use: runc uses memfd’s
+seal and executable feature to copy the contents of the binary then
+execute them. For such a system, we need a solution to differentiate runc's
+use of executable memfds and an attacker's [3].
+
+To address those above:
+ - Let memfd_create() set X bit at creation time.
+ - Let memfd be sealed for modifying X bit when NX is set.
+ - Add a new pid namespace sysctl: vm.memfd_noexec to help applications in
+ migrating and enforcing non-executable MFD.
+
+User API
+========
+``int memfd_create(const char *name, unsigned int flags)``
+
+``MFD_NOEXEC_SEAL``
+ When MFD_NOEXEC_SEAL bit is set in the ``flags``, memfd is created
+ with NX. F_SEAL_EXEC is set and the memfd can't be modified to
+ add X later. MFD_ALLOW_SEALING is also implied.
+ This is the most common case for the application to use memfd.
+
+``MFD_EXEC``
+ When MFD_EXEC bit is set in the ``flags``, memfd is created with X.
+
+Note:
+ ``MFD_NOEXEC_SEAL`` implies ``MFD_ALLOW_SEALING``. In case that
+ an app doesn't want sealing, it can add F_SEAL_SEAL after creation.
+
+
+Sysctl:
+========
+``pid namespaced sysctl vm.memfd_noexec``
+
+The new pid namespaced sysctl vm.memfd_noexec has 3 values:
+
+ - 0: MEMFD_NOEXEC_SCOPE_EXEC
+ memfd_create() without MFD_EXEC nor MFD_NOEXEC_SEAL acts like
+ MFD_EXEC was set.
+
+ - 1: MEMFD_NOEXEC_SCOPE_NOEXEC_SEAL
+ memfd_create() without MFD_EXEC nor MFD_NOEXEC_SEAL acts like
+ MFD_NOEXEC_SEAL was set.
+
+ - 2: MEMFD_NOEXEC_SCOPE_NOEXEC_ENFORCED
+ memfd_create() without MFD_NOEXEC_SEAL will be rejected.
+
+The sysctl allows finer control of memfd_create for old software that
+doesn't set the executable bit; for example, a container with
+vm.memfd_noexec=1 means the old software will create non-executable memfd
+by default while new software can create executable memfd by setting
+MFD_EXEC.
+
+The value of vm.memfd_noexec is passed to child namespace at creation
+time. In addition, the setting is hierarchical, i.e. during memfd_create,
+we will search from current ns to root ns and use the most restrictive
+setting.
+
+[1] https://crbug.com/1305267
+
+[2] https://bugs.chromium.org/p/chromium/issues/list?q=type%3Dbug-security%20memfd%20escalation&can=1
+
+[3] https://lwn.net/Articles/781013/
diff --git a/Documentation/virt/coco/sev-guest.rst b/Documentation/virt/coco/sev-guest.rst
index e1eaf6a830ce..9d00967a5b2b 100644
--- a/Documentation/virt/coco/sev-guest.rst
+++ b/Documentation/virt/coco/sev-guest.rst
@@ -204,6 +204,17 @@ has taken care to make use of the SEV-SNP CPUID throughout all stages of boot.
Otherwise, guest owner attestation provides no assurance that the kernel wasn't
fed incorrect values at some point during boot.
+4. SEV Guest Driver Communication Key
+=====================================
+
+Communication between an SEV guest and the SEV firmware in the AMD Secure
+Processor (ASP, aka PSP) is protected by a VM Platform Communication Key
+(VMPCK). By default, the sev-guest driver uses the VMPCK associated with the
+VM Privilege Level (VMPL) at which the guest is running. Should this key be
+wiped by the sev-guest driver (see the driver for reasons why a VMPCK can be
+wiped), a different key can be used by reloading the sev-guest driver and
+specifying the desired key using the vmpck_id module parameter.
+
Reference
---------
diff --git a/Documentation/virt/hyperv/clocks.rst b/Documentation/virt/hyperv/clocks.rst
index a56f4837d443..176043265803 100644
--- a/Documentation/virt/hyperv/clocks.rst
+++ b/Documentation/virt/hyperv/clocks.rst
@@ -62,12 +62,21 @@ shared page with scale and offset values into user space. User
space code performs the same algorithm of reading the TSC and
applying the scale and offset to get the constant 10 MHz clock.
-Linux clockevents are based on Hyper-V synthetic timer 0. While
-Hyper-V offers 4 synthetic timers for each CPU, Linux only uses
-timer 0. Interrupts from stimer0 are recorded on the "HVS" line in
-/proc/interrupts. Clockevents based on the virtualized PIT and
-local APIC timer also work, but the Hyper-V synthetic timer is
-preferred.
+Linux clockevents are based on Hyper-V synthetic timer 0 (stimer0).
+While Hyper-V offers 4 synthetic timers for each CPU, Linux only uses
+timer 0. In older versions of Hyper-V, an interrupt from stimer0
+results in a VMBus control message that is demultiplexed by
+vmbus_isr() as described in the Documentation/virt/hyperv/vmbus.rst
+documentation. In newer versions of Hyper-V, stimer0 interrupts can
+be mapped to an architectural interrupt, which is referred to as
+"Direct Mode". Linux prefers to use Direct Mode when available. Since
+x86/x64 doesn't support per-CPU interrupts, Direct Mode statically
+allocates an x86 interrupt vector (HYPERV_STIMER0_VECTOR) across all CPUs
+and explicitly codes it to call the stimer0 interrupt handler. Hence
+interrupts from stimer0 are recorded on the "HVS" line in /proc/interrupts
+rather than being associated with a Linux IRQ. Clockevents based on the
+virtualized PIT and local APIC timer also work, but Hyper-V stimer0
+is preferred.
The driver for the Hyper-V synthetic system clock and timers is
drivers/clocksource/hyperv_timer.c.
diff --git a/Documentation/virt/hyperv/overview.rst b/Documentation/virt/hyperv/overview.rst
index cd493332c88a..77408a89d1a4 100644
--- a/Documentation/virt/hyperv/overview.rst
+++ b/Documentation/virt/hyperv/overview.rst
@@ -40,7 +40,7 @@ Linux guests communicate with Hyper-V in four different ways:
arm64, these synthetic registers must be accessed using explicit
hypercalls.
-* VMbus: VMbus is a higher-level software construct that is built on
+* VMBus: VMBus is a higher-level software construct that is built on
the other 3 mechanisms. It is a message passing interface between
the Hyper-V host and the Linux guest. It uses memory that is shared
between Hyper-V and the guest, along with various signaling
@@ -54,8 +54,8 @@ x86/x64 architecture only.
.. _Hyper-V Top Level Functional Spec (TLFS): https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/tlfs/tlfs
-VMbus is not documented. This documentation provides a high-level
-overview of VMbus and how it works, but the details can be discerned
+VMBus is not documented. This documentation provides a high-level
+overview of VMBus and how it works, but the details can be discerned
only from the code.
Sharing Memory
@@ -74,7 +74,7 @@ follows:
physical address space. How Hyper-V is told about the GPA or list
of GPAs varies. In some cases, a single GPA is written to a
synthetic register. In other cases, a GPA or list of GPAs is sent
- in a VMbus message.
+ in a VMBus message.
* Hyper-V translates the GPAs into "real" physical memory addresses,
and creates a virtual mapping that it can use to access the memory.
@@ -133,9 +133,9 @@ only the CPUs actually present in the VM, so Linux does not report
any hot-add CPUs.
A Linux guest CPU may be taken offline using the normal Linux
-mechanisms, provided no VMbus channel interrupts are assigned to
-the CPU. See the section on VMbus Interrupts for more details
-on how VMbus channel interrupts can be re-assigned to permit
+mechanisms, provided no VMBus channel interrupts are assigned to
+the CPU. See the section on VMBus Interrupts for more details
+on how VMBus channel interrupts can be re-assigned to permit
taking a CPU offline.
32-bit and 64-bit
@@ -169,14 +169,14 @@ and functionality. Hyper-V indicates feature/function availability
via flags in synthetic MSRs that Hyper-V provides to the guest,
and the guest code tests these flags.
-VMbus has its own protocol version that is negotiated during the
-initial VMbus connection from the guest to Hyper-V. This version
+VMBus has its own protocol version that is negotiated during the
+initial VMBus connection from the guest to Hyper-V. This version
number is also output to dmesg during boot. This version number
is checked in a few places in the code to determine if specific
functionality is present.
-Furthermore, each synthetic device on VMbus also has a protocol
-version that is separate from the VMbus protocol version. Device
+Furthermore, each synthetic device on VMBus also has a protocol
+version that is separate from the VMBus protocol version. Device
drivers for these synthetic devices typically negotiate the device
protocol version, and may test that protocol version to determine
if specific device functionality is present.
diff --git a/Documentation/virt/hyperv/vmbus.rst b/Documentation/virt/hyperv/vmbus.rst
index d2012d9022c5..1dcef6a7fda3 100644
--- a/Documentation/virt/hyperv/vmbus.rst
+++ b/Documentation/virt/hyperv/vmbus.rst
@@ -1,8 +1,8 @@
.. SPDX-License-Identifier: GPL-2.0
-VMbus
+VMBus
=====
-VMbus is a software construct provided by Hyper-V to guest VMs. It
+VMBus is a software construct provided by Hyper-V to guest VMs. It
consists of a control path and common facilities used by synthetic
devices that Hyper-V presents to guest VMs. The control path is
used to offer synthetic devices to the guest VM and, in some cases,
@@ -12,9 +12,9 @@ and the synthetic device implementation that is part of Hyper-V, and
signaling primitives to allow Hyper-V and the guest to interrupt
each other.
-VMbus is modeled in Linux as a bus, with the expected /sys/bus/vmbus
-entry in a running Linux guest. The VMbus driver (drivers/hv/vmbus_drv.c)
-establishes the VMbus control path with the Hyper-V host, then
+VMBus is modeled in Linux as a bus, with the expected /sys/bus/vmbus
+entry in a running Linux guest. The VMBus driver (drivers/hv/vmbus_drv.c)
+establishes the VMBus control path with the Hyper-V host, then
registers itself as a Linux bus driver. It implements the standard
bus functions for adding and removing devices to/from the bus.
@@ -49,9 +49,9 @@ synthetic NIC is referred to as "netvsc" and the Linux driver for
the synthetic SCSI controller is "storvsc". These drivers contain
functions with names like "storvsc_connect_to_vsp".
-VMbus channels
+VMBus channels
--------------
-An instance of a synthetic device uses VMbus channels to communicate
+An instance of a synthetic device uses VMBus channels to communicate
between the VSP and the VSC. Channels are bi-directional and used
for passing messages. Most synthetic devices use a single channel,
but the synthetic SCSI controller and synthetic NIC may use multiple
@@ -73,7 +73,7 @@ write indices and some control flags, followed by the memory for the
actual ring. The size of the ring is determined by the VSC in the
guest and is specific to each synthetic device. The list of GPAs
making up the ring is communicated to the Hyper-V host over the
-VMbus control path as a GPA Descriptor List (GPADL). See function
+VMBus control path as a GPA Descriptor List (GPADL). See function
vmbus_establish_gpadl().
Each ring buffer is mapped into contiguous Linux kernel virtual
@@ -102,10 +102,10 @@ resources. For Windows Server 2019 and later, this limit is
approximately 1280 Mbytes. For versions prior to Windows Server
2019, the limit is approximately 384 Mbytes.
-VMbus messages
---------------
-All VMbus messages have a standard header that includes the message
-length, the offset of the message payload, some flags, and a
+VMBus channel messages
+----------------------
+All messages sent in a VMBus channel have a standard header that includes
+the message length, the offset of the message payload, some flags, and a
transactionID. The portion of the message after the header is
unique to each VSP/VSC pair.
@@ -137,7 +137,7 @@ control message contains a list of GPAs that describe the data
buffer. For example, the storvsc driver uses this approach to
specify the data buffers to/from which disk I/O is done.
-Three functions exist to send VMbus messages:
+Three functions exist to send VMBus channel messages:
1. vmbus_sendpacket(): Control-only messages and messages with
embedded data -- no GPAs
@@ -154,20 +154,51 @@ Historically, Linux guests have trusted Hyper-V to send well-formed
and valid messages, and Linux drivers for synthetic devices did not
fully validate messages. With the introduction of processor
technologies that fully encrypt guest memory and that allow the
-guest to not trust the hypervisor (AMD SNP-SEV, Intel TDX), trusting
+guest to not trust the hypervisor (AMD SEV-SNP, Intel TDX), trusting
the Hyper-V host is no longer a valid assumption. The drivers for
-VMbus synthetic devices are being updated to fully validate any
+VMBus synthetic devices are being updated to fully validate any
values read from memory that is shared with Hyper-V, which includes
-messages from VMbus devices. To facilitate such validation,
+messages from VMBus devices. To facilitate such validation,
messages read by the guest from the "in" ring buffer are copied to a
temporary buffer that is not shared with Hyper-V. Validation is
performed in this temporary buffer without the risk of Hyper-V
maliciously modifying the message after it is validated but before
it is used.
-VMbus interrupts
+Synthetic Interrupt Controller (synic)
+--------------------------------------
+Hyper-V provides each guest CPU with a synthetic interrupt controller
+that is used by VMBus for host-guest communication. While each synic
+defines 16 synthetic interrupts (SINT), Linux uses only one of the 16
+(VMBUS_MESSAGE_SINT). All interrupts related to communication between
+the Hyper-V host and a guest CPU use that SINT.
+
+The SINT is mapped to a single per-CPU architectural interrupt (i.e,
+an 8-bit x86/x64 interrupt vector, or an arm64 PPI INTID). Because
+each CPU in the guest has a synic and may receive VMBus interrupts,
+they are best modeled in Linux as per-CPU interrupts. This model works
+well on arm64 where a single per-CPU Linux IRQ is allocated for
+VMBUS_MESSAGE_SINT. This IRQ appears in /proc/interrupts as an IRQ labelled
+"Hyper-V VMbus". Since x86/x64 lacks support for per-CPU IRQs, an x86
+interrupt vector is statically allocated (HYPERVISOR_CALLBACK_VECTOR)
+across all CPUs and explicitly coded to call vmbus_isr(). In this case,
+there's no Linux IRQ, and the interrupts are visible in aggregate in
+/proc/interrupts on the "HYP" line.
+
+The synic provides the means to demultiplex the architectural interrupt into
+one or more logical interrupts and route the logical interrupt to the proper
+VMBus handler in Linux. This demultiplexing is done by vmbus_isr() and
+related functions that access synic data structures.
+
+The synic is not modeled in Linux as an irq chip or irq domain,
+and the demultiplexed logical interrupts are not Linux IRQs. As such,
+they don't appear in /proc/interrupts or /proc/irq. The CPU
+affinity for one of these logical interrupts is controlled via an
+entry under /sys/bus/vmbus as described below.
+
+VMBus interrupts
----------------
-VMbus provides a mechanism for the guest to interrupt the host when
+VMBus provides a mechanism for the guest to interrupt the host when
the guest has queued new messages in a ring buffer. The host
expects that the guest will send an interrupt only when an "out"
ring buffer transitions from empty to non-empty. If the guest sends
@@ -176,63 +207,55 @@ unnecessary. If a guest sends an excessive number of unnecessary
interrupts, the host may throttle that guest by suspending its
execution for a few seconds to prevent a denial-of-service attack.
-Similarly, the host will interrupt the guest when it sends a new
-message on the VMbus control path, or when a VMbus channel "in" ring
-buffer transitions from empty to non-empty. Each CPU in the guest
-may receive VMbus interrupts, so they are best modeled as per-CPU
-interrupts in Linux. This model works well on arm64 where a single
-per-CPU IRQ is allocated for VMbus. Since x86/x64 lacks support for
-per-CPU IRQs, an x86 interrupt vector is statically allocated (see
-HYPERVISOR_CALLBACK_VECTOR) across all CPUs and explicitly coded to
-call the VMbus interrupt service routine. These interrupts are
-visible in /proc/interrupts on the "HYP" line.
-
-The guest CPU that a VMbus channel will interrupt is selected by the
+Similarly, the host will interrupt the guest via the synic when
+it sends a new message on the VMBus control path, or when a VMBus
+channel "in" ring buffer transitions from empty to non-empty due to
+the host inserting a new VMBus channel message. The control message stream
+and each VMBus channel "in" ring buffer are separate logical interrupts
+that are demultiplexed by vmbus_isr(). It demultiplexes by first checking
+for channel interrupts by calling vmbus_chan_sched(), which looks at a synic
+bitmap to determine which channels have pending interrupts on this CPU.
+If multiple channels have pending interrupts for this CPU, they are
+processed sequentially. When all channel interrupts have been processed,
+vmbus_isr() checks for and processes any messages received on the VMBus
+control path.
+
+The guest CPU that a VMBus channel will interrupt is selected by the
guest when the channel is created, and the host is informed of that
-selection. VMbus devices are broadly grouped into two categories:
+selection. VMBus devices are broadly grouped into two categories:
-1. "Slow" devices that need only one VMbus channel. The devices
+1. "Slow" devices that need only one VMBus channel. The devices
(such as keyboard, mouse, heartbeat, and timesync) generate
- relatively few interrupts. Their VMbus channels are all
+ relatively few interrupts. Their VMBus channels are all
assigned to interrupt the VMBUS_CONNECT_CPU, which is always
CPU 0.
-2. "High speed" devices that may use multiple VMbus channels for
+2. "High speed" devices that may use multiple VMBus channels for
higher parallelism and performance. These devices include the
- synthetic SCSI controller and synthetic NIC. Their VMbus
+ synthetic SCSI controller and synthetic NIC. Their VMBus
channels interrupts are assigned to CPUs that are spread out
among the available CPUs in the VM so that interrupts on
multiple channels can be processed in parallel.
-The assignment of VMbus channel interrupts to CPUs is done in the
+The assignment of VMBus channel interrupts to CPUs is done in the
function init_vp_index(). This assignment is done outside of the
normal Linux interrupt affinity mechanism, so the interrupts are
neither "unmanaged" nor "managed" interrupts.
-The CPU that a VMbus channel will interrupt can be seen in
+The CPU that a VMBus channel will interrupt can be seen in
/sys/bus/vmbus/devices/<deviceGUID>/ channels/<channelRelID>/cpu.
When running on later versions of Hyper-V, the CPU can be changed
-by writing a new value to this sysfs entry. Because the interrupt
-assignment is done outside of the normal Linux affinity mechanism,
-there are no entries in /proc/irq corresponding to individual
-VMbus channel interrupts.
+by writing a new value to this sysfs entry. Because VMBus channel
+interrupts are not Linux IRQs, there are no entries in /proc/interrupts
+or /proc/irq corresponding to individual VMBus channel interrupts.
An online CPU in a Linux guest may not be taken offline if it has
-VMbus channel interrupts assigned to it. Any such channel
+VMBus channel interrupts assigned to it. Any such channel
interrupts must first be manually reassigned to another CPU as
described above. When no channel interrupts are assigned to the
CPU, it can be taken offline.
-When a guest CPU receives a VMbus interrupt from the host, the
-function vmbus_isr() handles the interrupt. It first checks for
-channel interrupts by calling vmbus_chan_sched(), which looks at a
-bitmap setup by the host to determine which channels have pending
-interrupts on this CPU. If multiple channels have pending
-interrupts for this CPU, they are processed sequentially. When all
-channel interrupts have been processed, vmbus_isr() checks for and
-processes any message received on the VMbus control path.
-
-The VMbus channel interrupt handling code is designed to work
+The VMBus channel interrupt handling code is designed to work
correctly even if an interrupt is received on a CPU other than the
CPU assigned to the channel. Specifically, the code does not use
CPU-based exclusion for correctness. In normal operation, Hyper-V
@@ -242,23 +265,23 @@ when Hyper-V will make the transition. The code must work correctly
even if there is a time lag before Hyper-V starts interrupting the
new CPU. See comments in target_cpu_store().
-VMbus device creation/deletion
+VMBus device creation/deletion
------------------------------
Hyper-V and the Linux guest have a separate message-passing path
that is used for synthetic device creation and deletion. This
-path does not use a VMbus channel. See vmbus_post_msg() and
+path does not use a VMBus channel. See vmbus_post_msg() and
vmbus_on_msg_dpc().
The first step is for the guest to connect to the generic
-Hyper-V VMbus mechanism. As part of establishing this connection,
-the guest and Hyper-V agree on a VMbus protocol version they will
+Hyper-V VMBus mechanism. As part of establishing this connection,
+the guest and Hyper-V agree on a VMBus protocol version they will
use. This negotiation allows newer Linux kernels to run on older
Hyper-V versions, and vice versa.
The guest then tells Hyper-V to "send offers". Hyper-V sends an
offer message to the guest for each synthetic device that the VM
-is configured to have. Each VMbus device type has a fixed GUID
-known as the "class ID", and each VMbus device instance is also
+is configured to have. Each VMBus device type has a fixed GUID
+known as the "class ID", and each VMBus device instance is also
identified by a GUID. The offer message from Hyper-V contains
both GUIDs to uniquely (within the VM) identify the device.
There is one offer message for each device instance, so a VM with
@@ -275,7 +298,7 @@ type based on the class ID, and invokes the correct driver to set up
the device. Driver/device matching is performed using the standard
Linux mechanism.
-The device driver probe function opens the primary VMbus channel to
+The device driver probe function opens the primary VMBus channel to
the corresponding VSP. It allocates guest memory for the channel
ring buffers and shares the ring buffer with the Hyper-V host by
giving the host a list of GPAs for the ring buffer memory. See
@@ -285,7 +308,7 @@ Once the ring buffer is set up, the device driver and VSP exchange
setup messages via the primary channel. These messages may include
negotiating the device protocol version to be used between the Linux
VSC and the VSP on the Hyper-V host. The setup messages may also
-include creating additional VMbus channels, which are somewhat
+include creating additional VMBus channels, which are somewhat
mis-named as "sub-channels" since they are functionally
equivalent to the primary channel once they are created.
diff --git a/MAINTAINERS b/MAINTAINERS
index d6c90161c7bf..08d5e7e8e408 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -392,6 +392,7 @@ ACPI WMI DRIVER
M: Armin Wolf <W_Armin@gmx.de>
L: platform-driver-x86@vger.kernel.org
S: Maintained
+F: Documentation/ABI/testing/sysfs-bus-wmi
F: Documentation/driver-api/wmi.rst
F: Documentation/wmi/
F: drivers/platform/x86/wmi.c
@@ -682,6 +683,15 @@ S: Supported
F: fs/aio.c
F: include/linux/*aio*.h
+AIROHA ETHERNET DRIVER
+M: Lorenzo Bianconi <lorenzo@kernel.org>
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+L: linux-mediatek@lists.infradead.org (moderated for non-subscribers)
+L: netdev@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/net/airoha,en7581-eth.yaml
+F: drivers/net/ethernet/mediatek/airoha_eth.c
+
AIROHA SPI SNFI DRIVER
M: Lorenzo Bianconi <lorenzo@kernel.org>
M: Ray Liu <ray.liu@airoha.com>
@@ -846,12 +856,6 @@ ALPS PS/2 TOUCHPAD DRIVER
R: Pali Rohár <pali@kernel.org>
F: drivers/input/mouse/alps.*
-ALTERA I2C CONTROLLER DRIVER
-M: Thor Thayer <thor.thayer@linux.intel.com>
-S: Maintained
-F: Documentation/devicetree/bindings/i2c/i2c-altera.txt
-F: drivers/i2c/busses/i2c-altera.c
-
ALTERA MAILBOX DRIVER
M: Mun Yew Tham <mun.yew.tham@intel.com>
S: Maintained
@@ -871,21 +875,6 @@ L: linux-gpio@vger.kernel.org
S: Maintained
F: drivers/gpio/gpio-altera.c
-ALTERA SYSTEM MANAGER DRIVER
-M: Thor Thayer <thor.thayer@linux.intel.com>
-S: Maintained
-F: drivers/mfd/altera-sysmgr.c
-F: include/linux/mfd/altera-sysmgr.h
-
-ALTERA SYSTEM RESOURCE DRIVER FOR ARRIA10 DEVKIT
-M: Thor Thayer <thor.thayer@linux.intel.com>
-S: Maintained
-F: drivers/gpio/gpio-altera-a10sr.c
-F: drivers/mfd/altera-a10sr.c
-F: drivers/reset/reset-a10sr.c
-F: include/dt-bindings/reset/altr,rst-mgr-a10sr.h
-F: include/linux/mfd/altera-a10sr.h
-
ALTERA TRIPLE SPEED ETHERNET DRIVER
M: Joyce Ooi <joyce.ooi@intel.com>
L: netdev@vger.kernel.org
@@ -1044,7 +1033,7 @@ M: Joerg Roedel <joro@8bytes.org>
R: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
L: iommu@lists.linux.dev
S: Maintained
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/iommu/linux.git
F: drivers/iommu/amd/
F: include/linux/amd-iommu.h
@@ -1107,7 +1096,6 @@ L: linux-pm@vger.kernel.org
S: Supported
F: Documentation/admin-guide/pm/amd-pstate.rst
F: drivers/cpufreq/amd-pstate*
-F: include/linux/amd-pstate.h
F: tools/power/x86/amd_pstate_tracer/amd_pstate_trace.py
AMD PTDMA DRIVER
@@ -1909,6 +1897,15 @@ F: include/dt-bindings/reset/actions,*
F: include/linux/soc/actions/
N: owl
+ARM/AIROHA SOC SUPPORT
+M: Matthias Brugger <matthias.bgg@gmail.com>
+M: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+L: linux-mediatek@lists.infradead.org (moderated for non-subscribers)
+S: Odd Fixes
+F: arch/arm/boot/dts/airoha/
+F: arch/arm64/boot/dts/airoha/
+
ARM/Allwinner SoC Clock Support
M: Emilio López <emilio@elopez.com.ar>
S: Maintained
@@ -1931,6 +1928,16 @@ N: allwinner
N: sun[x456789]i
N: sun[25]0i
+ARM/ALPHASCALE ARCHITECTURE
+M: Krzysztof Kozlowski <krzk@kernel.org>
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+S: Odd Fixes
+F: arch/arm/boot/dts/alphascale/
+F: drivers/clk/clk-asm9260.c
+F: drivers/clocksource/asm9260_timer.c
+F: drivers/rtc/rtc-asm9260.c
+F: drivers/watchdog/asm9260_wdt.c
+
ARM/AMD PENSANDO ARM64 ARCHITECTURE
M: Brad Larson <blarson@amd.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -1986,10 +1993,9 @@ F: drivers/soc/amlogic/
N: meson
ARM/Annapurna Labs ALPINE ARCHITECTURE
-M: Tsahee Zidenberg <tsahee@annapurnalabs.com>
M: Antoine Tenart <atenart@kernel.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-S: Maintained
+S: Odd Fixes
F: arch/arm/boot/dts/amazon/
F: arch/arm/mach-alpine/
F: arch/arm64/boot/dts/amazon/
@@ -2094,6 +2100,14 @@ F: arch/arm/boot/dts/aspeed/
F: arch/arm/mach-aspeed/
N: aspeed
+ARM/AXM LSI SOC
+M: Krzysztof Kozlowski <krzk@kernel.org>
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+S: Odd Fixes
+F: Documentation/devicetree/bindings/arm/axxia.yaml
+F: arch/arm/boot/dts/intel/axm/
+F: arch/arm/mach-axxia/
+
ARM/BITMAIN ARCHITECTURE
M: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -2207,11 +2221,14 @@ M: Marek Behún <kabel@kernel.org>
S: Maintained
W: https://www.turris.cz/
F: Documentation/ABI/testing/debugfs-moxtet
+F: Documentation/ABI/testing/sysfs-bus-i2c-devices-turris-omnia-mcu
F: Documentation/ABI/testing/sysfs-bus-moxtet-devices
F: Documentation/ABI/testing/sysfs-firmware-turris-mox-rwtm
F: Documentation/devicetree/bindings/bus/moxtet.txt
F: Documentation/devicetree/bindings/firmware/cznic,turris-mox-rwtm.txt
+F: Documentation/devicetree/bindings/firmware/cznic,turris-omnia-mcu.yaml
F: Documentation/devicetree/bindings/gpio/gpio-moxtet.txt
+F: Documentation/devicetree/bindings/interrupt-controller/marvell,mpic.yaml
F: Documentation/devicetree/bindings/leds/cznic,turris-omnia-leds.yaml
F: Documentation/devicetree/bindings/watchdog/armada-37xx-wdt.txt
F: drivers/bus/moxtet.c
@@ -2219,10 +2236,12 @@ F: drivers/firmware/turris-mox-rwtm.c
F: drivers/gpio/gpio-moxtet.c
F: drivers/leds/leds-turris-omnia.c
F: drivers/mailbox/armada-37xx-rwtm-mailbox.c
+F: drivers/platform/cznic/
F: drivers/watchdog/armada_37xx_wdt.c
F: include/dt-bindings/bus/moxtet.h
F: include/linux/armada-37xx-rwtm-mailbox.h
F: include/linux/moxtet.h
+F: include/linux/turris-omnia-mcu-interface.h
ARM/FARADAY FA526 PORT
M: Hans Ulli Kroll <ulli.kroll@googlemail.com>
@@ -2524,6 +2543,15 @@ F: arch/arm/boot/dts/socionext/milbeaut*
F: arch/arm/mach-milbeaut/
N: milbeaut
+ARM/MOXA ART SOC
+M: Krzysztof Kozlowski <krzk@kernel.org>
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+S: Odd Fixes
+F: Documentation/devicetree/bindings/arm/moxart.yaml
+F: Documentation/devicetree/bindings/clock/moxa,moxart-clock.txt
+F: arch/arm/boot/dts/moxa/
+F: drivers/clk/clk-moxart.c
+
ARM/MStar/Sigmastar Armv7 SoC support
M: Daniel Palmer <daniel@thingy.jp>
M: Romain Perier <romain.perier@gmail.com>
@@ -2893,7 +2921,7 @@ F: drivers/edac/altera_edac.[ch]
ARM/SPREADTRUM SoC SUPPORT
M: Orson Zhai <orsonzhai@gmail.com>
M: Baolin Wang <baolin.wang7@gmail.com>
-M: Chunyan Zhang <zhang.lyra@gmail.com>
+R: Chunyan Zhang <zhang.lyra@gmail.com>
S: Maintained
F: arch/arm64/boot/dts/sprd
N: sprd
@@ -3034,6 +3062,15 @@ F: Documentation/devicetree/bindings/hwinfo/ti,k3-socinfo.yaml
F: arch/arm64/boot/dts/ti/Makefile
F: arch/arm64/boot/dts/ti/k3-*
+ARM/TEXAS INSTRUMENTS NSPIRE ARCHITECTURE
+M: Krzysztof Kozlowski <krzk@kernel.org>
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+S: Odd Fixes
+F: Documentation/devicetree/bindings/*/*/ti,nspire*
+F: Documentation/devicetree/bindings/*/ti,nspire*
+F: Documentation/devicetree/bindings/arm/ti/nspire.yaml
+F: arch/arm/boot/dts/nspire/
+
ARM/TOSHIBA VISCONTI ARCHITECTURE
M: Nobuhiro Iwamatsu <nobuhiro1.iwamatsu@toshiba.co.jp>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -3103,9 +3140,12 @@ W: http://www.armlinux.org.uk/
F: arch/arm/vfp/
ARM/VT8500 ARM ARCHITECTURE
+M: Alexey Charkov <alchark@gmail.com>
+M: Krzysztof Kozlowski <krzk@kernel.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-S: Orphan
+S: Odd Fixes
F: Documentation/devicetree/bindings/i2c/i2c-wmt.txt
+F: arch/arm/boot/dts/vt8500/
F: arch/arm/mach-vt8500/
F: drivers/clocksource/timer-vt8500.c
F: drivers/i2c/busses/i2c-viai2c-wmt.c
@@ -3571,6 +3611,15 @@ W: https://ez.analog.com/linux-software-drivers
F: Documentation/devicetree/bindings/spi/adi,axi-spi-engine.yaml
F: drivers/spi/spi-axi-spi-engine.c
+AXI PWM GENERATOR
+M: Michael Hennerich <michael.hennerich@analog.com>
+M: Nuno Sá <nuno.sa@analog.com>
+L: linux-pwm@vger.kernel.org
+S: Supported
+W: https://ez.analog.com/linux-software-drivers
+F: Documentation/devicetree/bindings/pwm/adi,axi-pwmgen.yaml
+F: drivers/pwm/pwm-axi-pwmgen.c
+
AXXIA I2C CONTROLLER
M: Krzysztof Adamski <krzysztof.adamski@nokia.com>
L: linux-i2c@vger.kernel.org
@@ -3602,10 +3651,9 @@ W: https://wireless.wiki.kernel.org/en/users/Drivers/b43
F: drivers/net/wireless/broadcom/b43/
B43LEGACY WIRELESS DRIVER
-M: Larry Finger <Larry.Finger@lwfinger.net>
L: linux-wireless@vger.kernel.org
L: b43-dev@lists.infradead.org
-S: Maintained
+S: Orphan
W: https://wireless.wiki.kernel.org/en/users/Drivers/b43
F: drivers/net/wireless/broadcom/b43legacy/
@@ -3782,6 +3830,20 @@ F: include/linux/blk*
F: kernel/trace/blktrace.c
F: lib/sbitmap.c
+BLOCK LAYER DEVICE DRIVER API [RUST]
+M: Andreas Hindborg <a.hindborg@samsung.com>
+R: Boqun Feng <boqun.feng@gmail.com>
+L: linux-block@vger.kernel.org
+L: rust-for-linux@vger.kernel.org
+S: Supported
+W: https://rust-for-linux.com
+B: https://github.com/Rust-for-Linux/linux/issues
+C: https://rust-for-linux.zulipchat.com/#narrow/stream/Block
+T: git https://github.com/Rust-for-Linux/linux.git rust-block-next
+F: drivers/block/rnull.rs
+F: rust/kernel/block.rs
+F: rust/kernel/block/
+
BLOCK2MTD DRIVER
M: Joern Engel <joern@lazybastard.org>
L: linux-mtd@lists.infradead.org
@@ -3854,6 +3916,7 @@ BPF JIT for ARM64
M: Daniel Borkmann <daniel@iogearbox.net>
M: Alexei Starovoitov <ast@kernel.org>
M: Puranjay Mohan <puranjay@kernel.org>
+R: Xu Kuohai <xukuohai@huaweicloud.com>
L: bpf@vger.kernel.org
S: Supported
F: arch/arm64/net/
@@ -3980,7 +4043,7 @@ R: Song Liu <song@kernel.org>
R: Yonghong Song <yonghong.song@linux.dev>
R: John Fastabend <john.fastabend@gmail.com>
R: KP Singh <kpsingh@kernel.org>
-R: Stanislav Fomichev <sdf@google.com>
+R: Stanislav Fomichev <sdf@fomichev.me>
R: Hao Luo <haoluo@google.com>
R: Jiri Olsa <jolsa@kernel.org>
L: bpf@vger.kernel.org
@@ -4083,12 +4146,13 @@ F: kernel/bpf/ringbuf.c
BPF [SECURITY & LSM] (Security Audit and Enforcement using BPF)
M: KP Singh <kpsingh@kernel.org>
-R: Matt Bobrowski <mattbobrowski@google.com>
+M: Matt Bobrowski <mattbobrowski@google.com>
L: bpf@vger.kernel.org
S: Maintained
F: Documentation/bpf/prog_lsm.rst
F: include/linux/bpf_lsm.h
F: kernel/bpf/bpf_lsm.c
+F: kernel/trace/bpf_trace.c
F: security/bpf/
BPF [SELFTESTS] (Test Runners & Infrastructure)
@@ -4842,6 +4906,7 @@ W: https://github.com/linux-can
T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can-next.git
F: Documentation/networking/can.rst
+F: Documentation/networking/iso15765-2.rst
F: include/linux/can/can-ml.h
F: include/linux/can/core.h
F: include/linux/can/skb.h
@@ -5135,11 +5200,30 @@ S: Maintained
F: Documentation/devicetree/bindings/sound/google,cros-ec-codec.yaml
F: sound/soc/codecs/cros_ec_codec.*
+CHROMEOS EC CHARGE CONTROL
+M: Thomas Weißschuh <thomas@weissschuh.net>
+S: Maintained
+F: drivers/power/supply/cros_charge-control.c
+
+CHROMEOS EC HARDWARE MONITORING
+M: Thomas Weißschuh <thomas@weissschuh.net>
+L: chrome-platform@lists.linux.dev
+L: linux-hwmon@vger.kernel.org
+S: Maintained
+F: Documentation/hwmon/cros_ec_hwmon.rst
+F: drivers/hwmon/cros_ec_hwmon.c
+
+CHROMEOS EC LED DRIVER
+M: Thomas Weißschuh <thomas@weissschuh.net>
+S: Maintained
+F: drivers/leds/leds-cros_ec.c
+
CHROMEOS EC SUBDRIVERS
M: Benson Leung <bleung@chromium.org>
R: Guenter Roeck <groeck@chromium.org>
L: chrome-platform@lists.linux.dev
S: Maintained
+F: drivers/power/supply/cros_charge-control.c
F: drivers/power/supply/cros_usbpd-charger.c
N: cros_ec
N: cros-ec
@@ -5187,7 +5271,6 @@ F: Documentation/devicetree/bindings/media/i2c/chrontel,ch7322.yaml
F: drivers/media/cec/i2c/ch7322.c
CIRRUS LOGIC AUDIO CODEC DRIVERS
-M: James Schulman <james.schulman@cirrus.com>
M: David Rhodes <david.rhodes@cirrus.com>
M: Richard Fitzgerald <rf@opensource.cirrus.com>
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
@@ -5206,6 +5289,18 @@ F: sound/pci/hda/hda_component*
F: sound/pci/hda/hda_cs_dsp_ctl.*
F: sound/soc/codecs/cs*
+CIRRUS LOGIC HAPTIC DRIVERS
+M: James Ogletree <jogletre@opensource.cirrus.com>
+M: Fred Treven <fred.treven@cirrus.com>
+M: Ben Bright <ben.bright@cirrus.com>
+L: patches@opensource.cirrus.com
+S: Supported
+F: Documentation/devicetree/bindings/input/cirrus,cs40l50.yaml
+F: drivers/input/misc/cs40l*
+F: drivers/mfd/cs40l*
+F: include/linux/mfd/cs40l*
+F: sound/soc/codecs/cs40l*
+
CIRRUS LOGIC DSP FIRMWARE DRIVER
M: Simon Trimmer <simont@opensource.cirrus.com>
M: Charles Keepax <ckeepax@opensource.cirrus.com>
@@ -5296,7 +5391,7 @@ F: drivers/infiniband/hw/usnic/
CLANG CONTROL FLOW INTEGRITY SUPPORT
M: Sami Tolvanen <samitolvanen@google.com>
-M: Kees Cook <keescook@chromium.org>
+M: Kees Cook <kees@kernel.org>
R: Nathan Chancellor <nathan@kernel.org>
L: llvm@lists.linux.dev
S: Supported
@@ -5536,6 +5631,7 @@ CONTROL GROUP (CGROUP)
M: Tejun Heo <tj@kernel.org>
M: Zefan Li <lizefan.x@bytedance.com>
M: Johannes Weiner <hannes@cmpxchg.org>
+M: Michal Koutný <mkoutny@suse.com>
L: cgroups@vger.kernel.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup.git
@@ -6116,6 +6212,12 @@ F: Documentation/ABI/obsolete/procfs-i8k
F: drivers/hwmon/dell-smm-hwmon.c
F: include/uapi/linux/i8k.h
+DELL PC DRIVER
+M: Lyndon Sanche <lsanche@lyndeno.ca>
+L: platform-driver-x86@vger.kernel.org
+S: Maintained
+F: drivers/platform/x86/dell/dell-pc.c
+
DELL REMOTE BIOS UPDATE DRIVER
M: Stuart Hayes <stuart.w.hayes@gmail.com>
L: platform-driver-x86@vger.kernel.org
@@ -6239,9 +6341,8 @@ S: Maintained
F: drivers/usb/dwc3/
DESIGNWARE XDATA IP DRIVER
-M: Gustavo Pimentel <gustavo.pimentel@synopsys.com>
L: linux-pci@vger.kernel.org
-S: Maintained
+S: Orphan
F: Documentation/misc-devices/dw-xdata-pcie.rst
F: drivers/misc/dw-xdata-pcie.c
@@ -8212,13 +8313,15 @@ F: rust/kernel/net/phy.rs
EXEC & BINFMT API, ELF
R: Eric Biederman <ebiederm@xmission.com>
-R: Kees Cook <keescook@chromium.org>
+R: Kees Cook <kees@kernel.org>
L: linux-mm@kvack.org
S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git for-next/execve
F: Documentation/userspace-api/ELF.rst
F: fs/*binfmt_*.c
+F: fs/Kconfig.binfmt
F: fs/exec.c
+F: fs/exec_test.c
F: include/linux/binfmts.h
F: include/linux/elf.h
F: include/uapi/linux/binfmts.h
@@ -8483,6 +8586,7 @@ R: Darrick J. Wong <djwong@kernel.org>
L: linux-xfs@vger.kernel.org
L: linux-fsdevel@vger.kernel.org
S: Supported
+F: Documentation/filesystems/iomap/*
F: fs/iomap/
F: include/linux/iomap.h
@@ -8613,7 +8717,7 @@ S: Maintained
F: drivers/net/ethernet/nvidia/*
FORTIFY_SOURCE
-M: Kees Cook <keescook@chromium.org>
+M: Kees Cook <kees@kernel.org>
L: linux-hardening@vger.kernel.org
S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git for-next/hardening
@@ -8709,7 +8813,7 @@ FREESCALE DSPI DRIVER
M: Vladimir Oltean <olteanv@gmail.com>
L: linux-spi@vger.kernel.org
S: Maintained
-F: Documentation/devicetree/bindings/spi/spi-fsl-dspi.txt
+F: Documentation/devicetree/bindings/spi/fsl,dspi*.yaml
F: drivers/spi/spi-fsl-dspi.c
F: include/linux/spi/spi-fsl-dspi.h
@@ -8810,14 +8914,14 @@ M: Madalin Bucur <madalin.bucur@nxp.com>
R: Sean Anderson <sean.anderson@seco.com>
L: netdev@vger.kernel.org
S: Maintained
-F: Documentation/devicetree/bindings/net/fsl-fman.txt
+F: Documentation/devicetree/bindings/net/fsl,fman*.yaml
F: drivers/net/ethernet/freescale/fman
FREESCALE QORIQ PTP CLOCK DRIVER
M: Yangbo Lu <yangbo.lu@nxp.com>
L: netdev@vger.kernel.org
S: Maintained
-F: Documentation/devicetree/bindings/ptp/ptp-qoriq.txt
+F: Documentation/devicetree/bindings/ptp/fsl,ptp.yaml
F: drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp*
F: drivers/net/ethernet/freescale/dpaa2/dprtc*
F: drivers/net/ethernet/freescale/enetc/enetc_ptp.c
@@ -8834,6 +8938,7 @@ F: drivers/spi/spi-fsl-qspi.c
FREESCALE QUICC ENGINE LIBRARY
M: Qiang Zhao <qiang.zhao@nxp.com>
+M: Christophe Leroy <christophe.leroy@csgroup.eu>
L: linuxppc-dev@lists.ozlabs.org
S: Maintained
F: drivers/soc/fsl/qe/
@@ -8883,9 +8988,10 @@ S: Maintained
F: drivers/tty/serial/ucc_uart.c
FREESCALE SOC DRIVERS
+M: Christophe Leroy <christophe.leroy@csgroup.eu>
L: linuxppc-dev@lists.ozlabs.org
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-S: Orphan
+S: Maintained
F: Documentation/devicetree/bindings/misc/fsl,dpaa2-console.yaml
F: Documentation/devicetree/bindings/soc/fsl/
F: drivers/soc/fsl/
@@ -9103,7 +9209,7 @@ F: include/linux/mfd/gsc.h
F: include/linux/platform_data/gsc_hwmon.h
GCC PLUGINS
-M: Kees Cook <keescook@chromium.org>
+M: Kees Cook <kees@kernel.org>
L: linux-hardening@vger.kernel.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git for-next/hardening
@@ -9237,7 +9343,7 @@ S: Maintained
F: drivers/input/touchscreen/resistive-adc-touch.c
GENERIC STRING LIBRARY
-M: Kees Cook <keescook@chromium.org>
+M: Kees Cook <kees@kernel.org>
R: Andy Shevchenko <andy@kernel.org>
L: linux-hardening@vger.kernel.org
S: Supported
@@ -11035,8 +11141,8 @@ F: include/uapi/drm/i915_drm.h
INTEL DRM XE DRIVER (Lunar Lake and newer)
M: Lucas De Marchi <lucas.demarchi@intel.com>
-M: Oded Gabbay <ogabbay@kernel.org>
M: Thomas Hellström <thomas.hellstrom@linux.intel.com>
+M: Rodrigo Vivi <rodrigo.vivi@intel.com>
L: intel-xe@lists.freedesktop.org
S: Supported
W: https://drm.pages.freedesktop.org/intel-docs/
@@ -11051,8 +11157,8 @@ F: include/drm/xe*
F: include/uapi/drm/xe_drm.h
INTEL ETHERNET DRIVERS
-M: Jesse Brandeburg <jesse.brandeburg@intel.com>
M: Tony Nguyen <anthony.l.nguyen@intel.com>
+M: Przemek Kitszel <przemyslaw.kitszel@intel.com>
L: intel-wired-lan@lists.osuosl.org (moderated for non-subscribers)
S: Supported
W: https://www.intel.com/content/www/us/en/support.html
@@ -11144,6 +11250,7 @@ R: Tony Luck <tony.luck@intel.com>
S: Maintained
F: drivers/platform/x86/intel/ifs
F: include/trace/events/intel_ifs.h
+F: tools/testing/selftests/drivers/platform/x86/intel/ifs/
INTEL INTEGRATED SENSOR HUB DRIVER
M: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
@@ -11157,7 +11264,7 @@ M: David Woodhouse <dwmw2@infradead.org>
M: Lu Baolu <baolu.lu@linux.intel.com>
L: iommu@lists.linux.dev
S: Supported
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/iommu/linux.git
F: drivers/iommu/intel/
INTEL IPU3 CSI-2 CIO2 DRIVER
@@ -11530,7 +11637,7 @@ IOMMU DMA-API LAYER
M: Robin Murphy <robin.murphy@arm.com>
L: iommu@lists.linux.dev
S: Maintained
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/iommu/linux.git
F: drivers/iommu/dma-iommu.c
F: drivers/iommu/dma-iommu.h
F: drivers/iommu/iova.c
@@ -11542,7 +11649,7 @@ M: Will Deacon <will@kernel.org>
R: Robin Murphy <robin.murphy@arm.com>
L: iommu@lists.linux.dev
S: Maintained
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/iommu/linux.git
F: Documentation/devicetree/bindings/iommu/
F: Documentation/userspace-api/iommu.rst
F: drivers/iommu/
@@ -11571,7 +11678,7 @@ F: include/linux/iosys-map.h
IO_URING
M: Jens Axboe <axboe@kernel.dk>
-R: Pavel Begunkov <asml.silence@gmail.com>
+M: Pavel Begunkov <asml.silence@gmail.com>
L: io-uring@vger.kernel.org
S: Maintained
T: git git://git.kernel.dk/linux-block
@@ -11951,7 +12058,7 @@ F: scripts/package/
F: usr/
KERNEL HARDENING (not covered by other areas)
-M: Kees Cook <keescook@chromium.org>
+M: Kees Cook <kees@kernel.org>
R: Gustavo A. R. Silva <gustavoars@kernel.org>
L: linux-hardening@vger.kernel.org
S: Supported
@@ -11962,6 +12069,7 @@ F: arch/*/configs/hardening.config
F: include/linux/overflow.h
F: include/linux/randomize_kstack.h
F: kernel/configs/hardening.config
+F: lib/usercopy_kunit.c
F: mm/usercopy.c
K: \b(add|choose)_random_kstack_offset\b
K: \b__check_(object_size|heap_object)\b
@@ -11981,7 +12089,7 @@ R: Dai Ngo <Dai.Ngo@oracle.com>
R: Tom Talpey <tom@talpey.com>
L: linux-nfs@vger.kernel.org
S: Supported
-W: http://nfs.sourceforge.net/
+B: https://bugzilla.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/cel/linux.git
F: Documentation/filesystems/nfs/
F: fs/lockd/
@@ -12383,7 +12491,6 @@ F: drivers/video/backlight/ktz8866.c
KVM PARAVIRT (KVM/paravirt)
M: Paolo Bonzini <pbonzini@redhat.com>
-R: Wanpeng Li <wanpengli@tencent.com>
R: Vitaly Kuznetsov <vkuznets@redhat.com>
L: kvm@vger.kernel.org
S: Supported
@@ -12450,6 +12557,7 @@ LANTIQ / INTEL Ethernet drivers
M: Hauke Mehrtens <hauke@hauke-m.de>
L: netdev@vger.kernel.org
S: Maintained
+F: Documentation/devicetree/bindings/net/dsa/lantiq,gswip.yaml
F: drivers/net/dsa/lantiq_gswip.c
F: drivers/net/dsa/lantiq_pce.h
F: drivers/net/ethernet/lantiq_xrx200.c
@@ -12479,7 +12587,7 @@ F: drivers/scsi/53c700*
LEAKING_ADDRESSES
M: Tycho Andersen <tycho@tycho.pizza>
-R: Kees Cook <keescook@chromium.org>
+R: Kees Cook <kees@kernel.org>
L: linux-hardening@vger.kernel.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git for-next/hardening
@@ -12490,7 +12598,7 @@ M: Pavel Machek <pavel@ucw.cz>
M: Lee Jones <lee@kernel.org>
L: linux-leds@vger.kernel.org
S: Maintained
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/pavel/linux-leds.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/lee/leds.git
F: Documentation/devicetree/bindings/leds/
F: Documentation/leds/
F: drivers/leds/
@@ -12775,7 +12883,7 @@ F: arch/powerpc/platforms/8xx/
F: arch/powerpc/platforms/83xx/
LINUX KERNEL DUMP TEST MODULE (LKDTM)
-M: Kees Cook <keescook@chromium.org>
+M: Kees Cook <kees@kernel.org>
S: Maintained
F: drivers/misc/lkdtm/*
F: tools/testing/selftests/lkdtm/*
@@ -12905,7 +13013,7 @@ Q: http://patchwork.linuxtv.org/project/linux-media/list/
F: drivers/media/usb/dvb-usb-v2/lmedm04*
LOADPIN SECURITY MODULE
-M: Kees Cook <keescook@chromium.org>
+M: Kees Cook <kees@kernel.org>
S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git for-next/hardening
F: Documentation/admin-guide/LSM/LoadPin.rst
@@ -12969,6 +13077,7 @@ F: Documentation/arch/loongarch/
F: Documentation/translations/zh_CN/arch/loongarch/
F: arch/loongarch/
F: drivers/*/*loongarch*
+F: drivers/cpufreq/loongson3_cpufreq.c
LOONGSON GPIO DRIVER
M: Yinbo Zhu <zhuyinbo@loongson.cn>
@@ -13295,6 +13404,15 @@ F: drivers/net/dsa/mv88e6xxx/
F: include/linux/dsa/mv88e6xxx.h
F: include/linux/platform_data/mv88e6xxx.h
+MARVELL 88PM886 PMIC DRIVER
+M: Karel Balej <balejk@matfyz.cz>
+S: Maintained
+F: Documentation/devicetree/bindings/mfd/marvell,88pm886-a1.yaml
+F: drivers/input/misc/88pm886-onkey.c
+F: drivers/mfd/88pm886.c
+F: drivers/regulators/88pm886-regulator.c
+F: include/linux/mfd/88pm886.h
+
MARVELL ARMADA 3700 PHY DRIVERS
M: Miquel Raynal <miquel.raynal@bootlin.com>
S: Maintained
@@ -14475,7 +14593,7 @@ MEMORY MAPPING
M: Andrew Morton <akpm@linux-foundation.org>
R: Liam R. Howlett <Liam.Howlett@oracle.com>
R: Vlastimil Babka <vbabka@suse.cz>
-R: Lorenzo Stoakes <lstoakes@gmail.com>
+R: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
L: linux-mm@kvack.org
S: Maintained
W: http://www.linux-mm.org
@@ -14569,6 +14687,13 @@ T: git git://linuxtv.org/media_tree.git
F: Documentation/devicetree/bindings/media/amlogic,gx-vdec.yaml
F: drivers/staging/media/meson/vdec/
+META ETHERNET DRIVERS
+M: Alexander Duyck <alexanderduyck@fb.com>
+M: Jakub Kicinski <kuba@kernel.org>
+R: kernel-team@meta.com
+S: Supported
+F: drivers/net/ethernet/meta/
+
METHODE UDPU SUPPORT
M: Robert Marko <robert.marko@sartura.hr>
S: Maintained
@@ -14850,6 +14975,7 @@ MICROCHIP SOC DRIVERS
M: Conor Dooley <conor@kernel.org>
S: Supported
T: git https://git.kernel.org/pub/scm/linux/kernel/git/conor/linux.git/
+F: Documentation/devicetree/bindings/soc/microchip/
F: drivers/soc/microchip/
MICROCHIP SPI DRIVER
@@ -15238,7 +15364,6 @@ F: drivers/staging/most/
F: include/linux/most.h
MOTORCOMM PHY DRIVER
-M: Peter Geis <pgwipeout@gmail.com>
M: Frank <Frank.Sae@motor-comm.com>
L: netdev@vger.kernel.org
S: Maintained
@@ -15258,6 +15383,27 @@ S: Maintained
F: Documentation/devicetree/bindings/leds/backlight/mps,mp3309c.yaml
F: drivers/video/backlight/mp3309c.c
+MPS MP2891 DRIVER
+M: Noah Wang <noahwang.wang@outlook.com>
+L: linux-hwmon@vger.kernel.org
+S: Maintained
+F: Documentation/hwmon/mp2891.rst
+F: drivers/hwmon/pmbus/mp2891.c
+
+MPS MP2993 DRIVER
+M: Noah Wang <noahwang.wang@outlook.com>
+L: linux-hwmon@vger.kernel.org
+S: Maintained
+F: Documentation/hwmon/mp2993.rst
+F: drivers/hwmon/pmbus/mp2993.c
+
+MPS MP9941 DRIVER
+M: Noah Wang <noahwang.wang@outlook.com>
+L: linux-hwmon@vger.kernel.org
+S: Maintained
+F: Documentation/hwmon/mp9941.rst
+F: drivers/hwmon/pmbus/mp9941.c
+
MR800 AVERMEDIA USB FM RADIO DRIVER
M: Alexey Klimov <klimov.linux@gmail.com>
L: linux-media@vger.kernel.org
@@ -15752,7 +15898,7 @@ B: https://github.com/multipath-tcp/mptcp_net-next/issues
T: git https://github.com/multipath-tcp/mptcp_net-next.git export-net
T: git https://github.com/multipath-tcp/mptcp_net-next.git export
F: Documentation/netlink/specs/mptcp_pm.yaml
-F: Documentation/networking/mptcp-sysctl.rst
+F: Documentation/networking/mptcp*.rst
F: include/net/mptcp.h
F: include/trace/events/mptcp.h
F: include/uapi/linux/mptcp*.h
@@ -15769,8 +15915,13 @@ F: include/linux/tcp.h
F: include/net/tcp.h
F: include/trace/events/tcp.h
F: include/uapi/linux/tcp.h
+F: net/ipv4/inet_connection_sock.c
+F: net/ipv4/inet_hashtables.c
+F: net/ipv4/inet_timewait_sock.c
F: net/ipv4/syncookies.c
F: net/ipv4/tcp*.c
+F: net/ipv6/inet6_connection_sock.c
+F: net/ipv6/inet6_hashtables.c
F: net/ipv6/syncookies.c
F: net/ipv6/tcp*.c
@@ -15827,7 +15978,7 @@ F: drivers/nfc/virtual_ncidev.c
F: tools/testing/selftests/nci/
NFS, SUNRPC, AND LOCKD CLIENTS
-M: Trond Myklebust <trond.myklebust@hammerspace.com>
+M: Trond Myklebust <trondmy@kernel.org>
M: Anna Schumaker <anna@kernel.org>
L: linux-nfs@vger.kernel.org
S: Maintained
@@ -16332,7 +16483,6 @@ S: Maintained
F: arch/arm/*omap*/*clock*
OMAP DEVICE TREE SUPPORT
-M: Benoît Cousson <bcousson@baylibre.com>
M: Tony Lindgren <tony@atomide.com>
L: linux-omap@vger.kernel.org
L: devicetree@vger.kernel.org
@@ -16387,7 +16537,6 @@ S: Maintained
F: arch/arm/mach-omap2/omap_hwmod*data*
OMAP HWMOD SUPPORT
-M: Benoît Cousson <bcousson@baylibre.com>
M: Paul Walmsley <paul@pwsan.com>
L: linux-omap@vger.kernel.org
S: Maintained
@@ -16449,7 +16598,7 @@ F: arch/arm/boot/dts/ti/omap/am335x-nano.dts
OMAP1 SUPPORT
M: Aaro Koskinen <aaro.koskinen@iki.fi>
M: Janusz Krzysztofik <jmkrzyszt@gmail.com>
-M: Tony Lindgren <tony@atomide.com>
+R: Tony Lindgren <tony@atomide.com>
L: linux-omap@vger.kernel.org
S: Maintained
Q: http://patchwork.kernel.org/project/linux-omap/list/
@@ -16461,10 +16610,13 @@ F: include/linux/platform_data/ams-delta-fiq.h
F: include/linux/platform_data/i2c-omap.h
OMAP2+ SUPPORT
+M: Aaro Koskinen <aaro.koskinen@iki.fi>
+M: Andreas Kemnade <andreas@kemnade.info>
+M: Kevin Hilman <khilman@baylibre.com>
+M: Roger Quadros <rogerq@kernel.org>
M: Tony Lindgren <tony@atomide.com>
L: linux-omap@vger.kernel.org
S: Maintained
-W: http://www.muru.com/linux/omap/
W: http://linux.omap.com/
Q: http://patchwork.kernel.org/project/linux-omap/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tmlind/linux-omap.git
@@ -17351,6 +17503,14 @@ F: Documentation/driver-api/pci/p2pdma.rst
F: drivers/pci/p2pdma.c
F: include/linux/pci-p2pdma.h
+PCI POWER CONTROL
+M: Bartosz Golaszewski <brgl@bgdev.pl>
+L: linux-pci@vger.kernel.org
+S: Maintained
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/pci/pci.git
+F: drivers/pci/pwrctl/*
+F: include/linux/pci-pwrctl.h
+
PCI SUBSYSTEM
M: Bjorn Helgaas <bhelgaas@google.com>
L: linux-pci@vger.kernel.org
@@ -17534,7 +17694,6 @@ F: include/linux/peci.h
PENSANDO ETHERNET DRIVERS
M: Shannon Nelson <shannon.nelson@amd.com>
M: Brett Creeley <brett.creeley@amd.com>
-M: drivers@pensando.io
L: netdev@vger.kernel.org
S: Supported
F: Documentation/networking/device_drivers/ethernet/pensando/ionic.rst
@@ -17892,6 +18051,14 @@ F: include/linux/pm_*
F: include/linux/powercap.h
F: kernel/configs/nopm.config
+POWER SEQUENCING
+M: Bartosz Golaszewski <brgl@bgdev.pl>
+L: linux-pm@vger.kernel.org
+S: Maintained
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/brgl/linux.git
+F: drivers/power/sequencing/
+F: include/linux/pwrseq/
+
POWER STATE COORDINATION INTERFACE (PSCI)
M: Mark Rutland <mark.rutland@arm.com>
M: Lorenzo Pieralisi <lpieralisi@kernel.org>
@@ -17998,7 +18165,7 @@ F: tools/testing/selftests/proc/
PROC SYSCTL
M: Luis Chamberlain <mcgrof@kernel.org>
-M: Kees Cook <keescook@chromium.org>
+M: Kees Cook <kees@kernel.org>
M: Joel Granados <j.granados@samsung.com>
L: linux-kernel@vger.kernel.org
L: linux-fsdevel@vger.kernel.org
@@ -18054,7 +18221,7 @@ F: Documentation/devicetree/bindings/net/pse-pd/
F: drivers/net/pse-pd/
PSTORE FILESYSTEM
-M: Kees Cook <keescook@chromium.org>
+M: Kees Cook <kees@kernel.org>
R: Tony Luck <tony.luck@intel.com>
R: Guilherme G. Piccoli <gpiccoli@igalia.com>
L: linux-hardening@vger.kernel.org
@@ -18212,6 +18379,7 @@ QCOM AUDIO (ASoC) DRIVERS
M: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
M: Banajit Goswami <bgoswami@quicinc.com>
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
+L: linux-arm-msm@vger.kernel.org
S: Supported
F: Documentation/devicetree/bindings/soc/qcom/qcom,apr*
F: Documentation/devicetree/bindings/sound/qcom,*
@@ -18376,7 +18544,7 @@ M: Jeff Johnson <jjohnson@kernel.org>
L: ath12k@lists.infradead.org
S: Supported
W: https://wireless.wiki.kernel.org/en/users/Drivers/ath12k
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/ath/ath.git
F: drivers/net/wireless/ath/ath12k/
N: ath12k
@@ -18386,7 +18554,7 @@ M: Jeff Johnson <jjohnson@kernel.org>
L: ath10k@lists.infradead.org
S: Supported
W: https://wireless.wiki.kernel.org/en/users/Drivers/ath10k
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/ath/ath.git
F: drivers/net/wireless/ath/ath10k/
N: ath10k
@@ -18397,7 +18565,7 @@ L: ath11k@lists.infradead.org
S: Supported
W: https://wireless.wiki.kernel.org/en/users/Drivers/ath11k
B: https://wireless.wiki.kernel.org/en/users/Drivers/ath11k/bugreport
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/ath/ath.git
F: drivers/net/wireless/ath/ath11k/
N: ath11k
@@ -18406,7 +18574,7 @@ M: Toke Høiland-Jørgensen <toke@toke.dk>
L: linux-wireless@vger.kernel.org
S: Maintained
W: https://wireless.wiki.kernel.org/en/users/Drivers/ath9k
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/ath/ath.git
F: Documentation/devicetree/bindings/net/wireless/qca,ath9k.yaml
F: drivers/net/wireless/ath/ath9k/
@@ -18605,6 +18773,14 @@ F: Documentation/networking/device_drivers/cellular/qualcomm/rmnet.rst
F: drivers/net/ethernet/qualcomm/rmnet/
F: include/linux/if_rmnet.h
+QUALCOMM TRUST ZONE MEMORY ALLOCATOR
+M: Bartosz Golaszewski <bartosz.golaszewski@linaro.org>
+L: linux-arm-msm@vger.kernel.org
+S: Maintained
+F: drivers/firmware/qcom/qcom_tzmem.c
+F: drivers/firmware/qcom/qcom_tzmem.h
+F: include/linux/firmware/qcom/qcom_tzmem.h
+
QUALCOMM TSENS THERMAL DRIVER
M: Amit Kucheria <amitk@kernel.org>
M: Thara Gopinath <thara.gopinath@gmail.com>
@@ -18868,6 +19044,7 @@ M: Neeraj Upadhyay <neeraj.upadhyay@kernel.org> (kernel/rcu/tasks.h)
M: Joel Fernandes <joel@joelfernandes.org>
M: Josh Triplett <josh@joshtriplett.org>
M: Boqun Feng <boqun.feng@gmail.com>
+M: Uladzislau Rezki <urezki@gmail.com>
R: Steven Rostedt <rostedt@goodmis.org>
R: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
R: Lai Jiangshan <jiangshanlai@gmail.com>
@@ -19033,6 +19210,14 @@ F: drivers/net/ethernet/renesas/Makefile
F: drivers/net/ethernet/renesas/rcar_gen4*
F: drivers/net/ethernet/renesas/rswitch*
+RENESAS ETHERNET TSN DRIVER
+M: Niklas Söderlund <niklas.soderlund@ragnatech.se>
+L: netdev@vger.kernel.org
+L: linux-renesas-soc@vger.kernel.org
+S: Supported
+F: Documentation/devicetree/bindings/net/renesas,ethertsn.yaml
+F: drivers/net/ethernet/renesas/rtsn.*
+
RENESAS IDT821034 ASoC CODEC
M: Herve Codina <herve.codina@bootlin.com>
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
@@ -19078,8 +19263,8 @@ L: linux-renesas-soc@vger.kernel.org
S: Supported
F: Documentation/devicetree/bindings/thermal/rcar-gen3-thermal.yaml
F: Documentation/devicetree/bindings/thermal/rcar-thermal.yaml
-F: drivers/thermal/rcar_gen3_thermal.c
-F: drivers/thermal/rcar_thermal.c
+F: drivers/thermal/renesas/rcar_gen3_thermal.c
+F: drivers/thermal/renesas/rcar_thermal.c
RENESAS RIIC DRIVER
M: Chris Brandt <chris.brandt@renesas.com>
@@ -19306,6 +19491,7 @@ F: arch/riscv/boot/dts/
X: arch/riscv/boot/dts/allwinner/
X: arch/riscv/boot/dts/renesas/
X: arch/riscv/boot/dts/sophgo/
+X: arch/riscv/boot/dts/thead/
RISC-V PMU DRIVERS
M: Atish Patra <atishp@atishpatra.org>
@@ -19317,11 +19503,12 @@ F: drivers/perf/riscv_pmu_legacy.c
F: drivers/perf/riscv_pmu_sbi.c
RISC-V THEAD SoC SUPPORT
-M: Jisheng Zhang <jszhang@kernel.org>
+M: Drew Fustini <drew@pdp7.com>
M: Guo Ren <guoren@kernel.org>
M: Fu Wei <wefu@redhat.com>
L: linux-riscv@lists.infradead.org
S: Maintained
+T: git https://github.com/pdp7/linux.git
F: arch/riscv/boot/dts/thead/
RNBD BLOCK DRIVERS
@@ -19446,17 +19633,21 @@ F: drivers/gpio/gpio-bd71828.c
F: drivers/mfd/rohm-bd71828.c
F: drivers/mfd/rohm-bd718x7.c
F: drivers/mfd/rohm-bd9576.c
+F: drivers/mfd/rohm-bd96801.c
F: drivers/regulator/bd71815-regulator.c
F: drivers/regulator/bd71828-regulator.c
F: drivers/regulator/bd718x7-regulator.c
F: drivers/regulator/bd9576-regulator.c
+F: drivers/regulator/bd96801-regulator.c
F: drivers/regulator/rohm-regulator.c
F: drivers/rtc/rtc-bd70528.c
F: drivers/watchdog/bd9576_wdt.c
+F: drivers/watchdog/bd96801_wdt.c
F: include/linux/mfd/rohm-bd71815.h
F: include/linux/mfd/rohm-bd71828.h
F: include/linux/mfd/rohm-bd718x7.h
F: include/linux/mfd/rohm-bd957x.h
+F: include/linux/mfd/rohm-bd96801.h
F: include/linux/mfd/rohm-generic.h
F: include/linux/mfd/rohm-shared.h
@@ -19511,7 +19702,6 @@ F: drivers/net/wireless/realtek/rtl818x/rtl8180/
RTL8187 WIRELESS DRIVER
M: Hin-Tak Leung <hintak.leung@gmail.com>
-M: Larry Finger <Larry.Finger@lwfinger.net>
L: linux-wireless@vger.kernel.org
S: Maintained
T: git https://github.com/pkshih/rtw.git
@@ -19927,7 +20117,6 @@ R: Dietmar Eggemann <dietmar.eggemann@arm.com> (SCHED_NORMAL)
R: Steven Rostedt <rostedt@goodmis.org> (SCHED_FIFO/SCHED_RR)
R: Ben Segall <bsegall@google.com> (CONFIG_CFS_BANDWIDTH)
R: Mel Gorman <mgorman@suse.de> (CONFIG_NUMA_BALANCING)
-R: Daniel Bristot de Oliveira <bristot@redhat.com> (SCHED_DEADLINE)
R: Valentin Schneider <vschneid@redhat.com> (TOPOLOGY)
L: linux-kernel@vger.kernel.org
S: Maintained
@@ -20060,7 +20249,7 @@ F: drivers/media/cec/platform/seco/seco-cec.c
F: drivers/media/cec/platform/seco/seco-cec.h
SECURE COMPUTING
-M: Kees Cook <keescook@chromium.org>
+M: Kees Cook <kees@kernel.org>
R: Andy Lutomirski <luto@amacapital.net>
R: Will Drewry <wad@chromium.org>
S: Supported
@@ -20100,6 +20289,7 @@ SECURE DIGITAL HOST CONTROLLER INTERFACE (SDHCI) NXP i.MX DRIVER
M: Haibo Chen <haibo.chen@nxp.com>
L: imx@lists.linux.dev
L: linux-mmc@vger.kernel.org
+L: s32@nxp.com
S: Maintained
F: drivers/mmc/host/sdhci-esdhc-imx.c
@@ -21249,7 +21439,6 @@ W: http://wiki.laptop.org/go/DCON
F: drivers/staging/olpc_dcon/
STAGING - REALTEK RTL8712U DRIVERS
-M: Larry Finger <Larry.Finger@lwfinger.net>
M: Florian Schilhabel <florian.c.schilhabel@googlemail.com>.
S: Odd Fixes
F: drivers/staging/rtl8712/
@@ -21283,9 +21472,9 @@ F: drivers/staging/
STANDALONE CACHE CONTROLLER DRIVERS
M: Conor Dooley <conor@kernel.org>
-L: linux-riscv@lists.infradead.org
S: Maintained
T: git https://git.kernel.org/pub/scm/linux/kernel/git/conor/linux.git/
+F: Documentation/devicetree/bindings/cache/
F: drivers/cache
STARFIRE/DURALAN NETWORK DRIVER
@@ -21316,7 +21505,7 @@ F: arch/riscv/boot/dts/starfive/
STARFIVE DWMAC GLUE LAYER
M: Emil Renner Berthing <kernel@esmil.dk>
-M: Samin Guo <samin.guo@starfivetech.com>
+M: Minda Chen <minda.chen@starfivetech.com>
S: Maintained
F: Documentation/devicetree/bindings/net/starfive,jh7110-dwmac.yaml
F: drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c
@@ -21798,6 +21987,7 @@ F: drivers/mfd/syscon.c
SYSTEM CONTROL & POWER/MANAGEMENT INTERFACE (SCPI/SCMI) Message Protocol drivers
M: Sudeep Holla <sudeep.holla@arm.com>
R: Cristian Marussi <cristian.marussi@arm.com>
+L: arm-scmi@vger.kernel.org
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: Documentation/devicetree/bindings/firmware/arm,sc[mp]i.yaml
@@ -22140,7 +22330,13 @@ TEHUTI ETHERNET DRIVER
M: Andy Gospodarek <andy@greyhouse.net>
L: netdev@vger.kernel.org
S: Supported
-F: drivers/net/ethernet/tehuti/*
+F: drivers/net/ethernet/tehuti/tehuti.*
+
+TEHUTI TN40XX ETHERNET DRIVER
+M: FUJITA Tomonori <fujita.tomonori@gmail.com>
+L: netdev@vger.kernel.org
+S: Maintained
+F: drivers/net/ethernet/tehuti/tn40*
TELECOM CLOCK DRIVER FOR MCPL0010
M: Mark Gross <markgross@kernel.org>
@@ -22527,6 +22723,7 @@ L: linux-kernel@vger.kernel.org
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/ti/linux.git
+F: Documentation/devicetree/bindings/soc/ti/ti,pruss.yaml
F: drivers/pmdomain/ti/omap_prm.c
F: drivers/soc/ti/*
@@ -22655,7 +22852,7 @@ L: linux-renesas-soc@vger.kernel.org
S: Supported
F: drivers/mmc/host/renesas_sdhi*
F: drivers/mmc/host/tmio_mmc*
-F: include/linux/mfd/tmio.h
+F: include/linux/platform_data/tmio.h
TMP513 HARDWARE MONITOR DRIVER
M: Eric Tremblay <etremblay@distech-controls.com>
@@ -22679,7 +22876,7 @@ L: tomoyo-users-en@lists.osdn.me (subscribers-only, for users in English)
L: tomoyo-dev@lists.osdn.me (subscribers-only, for developers in Japanese)
L: tomoyo-users@lists.osdn.me (subscribers-only, for users in Japanese)
S: Maintained
-W: https://tomoyo.osdn.jp/
+W: https://tomoyo.sourceforge.net/
F: security/tomoyo/
TOPSTAR LAPTOP EXTRAS DRIVER
@@ -22748,7 +22945,7 @@ M: Jarkko Sakkinen <jarkko@kernel.org>
R: Jason Gunthorpe <jgg@ziepe.ca>
L: linux-integrity@vger.kernel.org
S: Maintained
-W: https://gitlab.com/jarkkojs/linux-tpmdd-test
+W: https://codeberg.org/jarkko/linux-tpmdd-test
Q: https://patchwork.kernel.org/project/linux-integrity/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jarkko/linux-tpmdd.git
F: Documentation/devicetree/bindings/tpm/
@@ -22974,7 +23171,7 @@ F: drivers/block/ublk_drv.c
F: include/uapi/linux/ublk_cmd.h
UBSAN
-M: Kees Cook <keescook@chromium.org>
+M: Kees Cook <kees@kernel.org>
R: Marco Elver <elver@google.com>
R: Andrey Konovalov <andreyknvl@gmail.com>
R: Andrey Ryabinin <ryabinin.a.a@gmail.com>
@@ -23647,12 +23844,6 @@ M: Kevin Brace <kevinbrace@bracecomputerlab.com>
S: Maintained
F: drivers/net/ethernet/via/via-rhine.c
-VIA SD/MMC CARD CONTROLLER DRIVER
-M: Bruce Chang <brucechang@via.com.tw>
-M: Harald Welte <HaraldWelte@viatech.com>
-S: Maintained
-F: drivers/mmc/host/via-sdmmc.c
-
VIA UNICHROME(PRO)/CHROME9 FRAMEBUFFER DRIVER
M: Florian Tobias Schandinat <FlorianSchandinat@gmx.de>
L: linux-fbdev@vger.kernel.org
@@ -23865,8 +24056,8 @@ S: Maintained
F: drivers/vhost/scsi.c
VIRTIO I2C DRIVER
-M: Conghui Chen <conghui.chen@intel.com>
M: Viresh Kumar <viresh.kumar@linaro.org>
+R: "Chen, Jian Jun" <jian.jun.chen@intel.com>
L: linux-i2c@vger.kernel.org
L: virtualization@lists.linux.dev
S: Maintained
@@ -23976,7 +24167,6 @@ VMALLOC
M: Andrew Morton <akpm@linux-foundation.org>
R: Uladzislau Rezki <urezki@gmail.com>
R: Christoph Hellwig <hch@infradead.org>
-R: Lorenzo Stoakes <lstoakes@gmail.com>
L: linux-mm@kvack.org
S: Maintained
W: http://www.linux-mm.org
@@ -24812,7 +25002,7 @@ F: drivers/net/hamradio/yam*
F: include/linux/yam.h
YAMA SECURITY MODULE
-M: Kees Cook <keescook@chromium.org>
+M: Kees Cook <kees@kernel.org>
S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git for-next/hardening
F: Documentation/admin-guide/LSM/Yama.rst
diff --git a/Makefile b/Makefile
index f975b6396328..67ce3b7d558e 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
VERSION = 6
PATCHLEVEL = 10
SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION =
NAME = Baby Opossum Posse
# *DOCUMENTATION*
@@ -1219,7 +1219,7 @@ remove-stale-files:
$(Q)$(srctree)/scripts/remove-stale-files
# Support for using generic headers in asm-generic
-asm-generic := -f $(srctree)/scripts/Makefile.asm-generic obj
+asm-generic := -f $(srctree)/scripts/Makefile.asm-headers obj
PHONY += asm-generic uapi-asm-generic
asm-generic: uapi-asm-generic
diff --git a/arch/arc/include/asm/Kbuild b/arch/arc/include/asm/Kbuild
index 3c1afa524b9c..49285a3ce239 100644
--- a/arch/arc/include/asm/Kbuild
+++ b/arch/arc/include/asm/Kbuild
@@ -1,4 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
+syscall-y += syscall_table_32.h
+
generic-y += extable.h
generic-y += kvm_para.h
generic-y += mcs_spinlock.h
diff --git a/arch/arc/include/asm/unistd.h b/arch/arc/include/asm/unistd.h
new file mode 100644
index 000000000000..211c230d88d6
--- /dev/null
+++ b/arch/arc/include/asm/unistd.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _ASM_ARC_UNISTD_H
+#define _ASM_ARC_UNISTD_H
+
+#include <uapi/asm/unistd.h>
+
+#define __ARCH_WANT_STAT64
+#define __ARCH_WANT_SYS_CLONE
+#define __ARCH_WANT_SYS_VFORK
+#define __ARCH_WANT_SYS_FORK
+
+#define NR_syscalls __NR_syscalls
+
+#endif
diff --git a/arch/arc/include/uapi/asm/Kbuild b/arch/arc/include/uapi/asm/Kbuild
index e78470141932..2501e82a1a0a 100644
--- a/arch/arc/include/uapi/asm/Kbuild
+++ b/arch/arc/include/uapi/asm/Kbuild
@@ -1,2 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
+syscall-y += unistd_32.h
+
generic-y += ucontext.h
diff --git a/arch/arc/include/uapi/asm/unistd.h b/arch/arc/include/uapi/asm/unistd.h
index fa2713ae6bea..cb2905c7c5da 100644
--- a/arch/arc/include/uapi/asm/unistd.h
+++ b/arch/arc/include/uapi/asm/unistd.h
@@ -7,46 +7,4 @@
* published by the Free Software Foundation.
*/
-/******** no-legacy-syscalls-ABI *******/
-
-/*
- * Non-typical guard macro to enable inclusion twice in ARCH sys.c
- * That is how the Generic syscall wrapper generator works
- */
-#if !defined(_UAPI_ASM_ARC_UNISTD_H) || defined(__SYSCALL)
-#define _UAPI_ASM_ARC_UNISTD_H
-
-#define __ARCH_WANT_RENAMEAT
-#define __ARCH_WANT_STAT64
-#define __ARCH_WANT_SET_GET_RLIMIT
-#define __ARCH_WANT_SYS_EXECVE
-#define __ARCH_WANT_SYS_CLONE
-#define __ARCH_WANT_SYS_CLONE3
-#define __ARCH_WANT_SYS_VFORK
-#define __ARCH_WANT_SYS_FORK
-#define __ARCH_WANT_TIME32_SYSCALLS
-
-#define sys_mmap2 sys_mmap_pgoff
-
-#include <asm-generic/unistd.h>
-
-#define NR_syscalls __NR_syscalls
-
-/* Generic syscall (fs/filesystems.c - lost in asm-generic/unistd.h */
-#define __NR_sysfs (__NR_arch_specific_syscall + 3)
-
-/* ARC specific syscall */
-#define __NR_cacheflush (__NR_arch_specific_syscall + 0)
-#define __NR_arc_settls (__NR_arch_specific_syscall + 1)
-#define __NR_arc_gettls (__NR_arch_specific_syscall + 2)
-#define __NR_arc_usr_cmpxchg (__NR_arch_specific_syscall + 4)
-
-__SYSCALL(__NR_cacheflush, sys_cacheflush)
-__SYSCALL(__NR_arc_settls, sys_arc_settls)
-__SYSCALL(__NR_arc_gettls, sys_arc_gettls)
-__SYSCALL(__NR_arc_usr_cmpxchg, sys_arc_usr_cmpxchg)
-__SYSCALL(__NR_sysfs, sys_sysfs)
-
-#undef __SYSCALL
-
-#endif
+#include <asm/unistd_32.h>
diff --git a/arch/arc/kernel/Makefile.syscalls b/arch/arc/kernel/Makefile.syscalls
new file mode 100644
index 000000000000..391d30ab7a83
--- /dev/null
+++ b/arch/arc/kernel/Makefile.syscalls
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+
+syscall_abis_32 += arc time32 renameat stat64 rlimit
diff --git a/arch/arc/kernel/sys.c b/arch/arc/kernel/sys.c
index 1069446bdc58..36a2a95c083b 100644
--- a/arch/arc/kernel/sys.c
+++ b/arch/arc/kernel/sys.c
@@ -8,11 +8,12 @@
#define sys_clone sys_clone_wrapper
#define sys_clone3 sys_clone3_wrapper
+#define sys_mmap2 sys_mmap_pgoff
-#undef __SYSCALL
#define __SYSCALL(nr, call) [nr] = (call),
+#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, native)
void *sys_call_table[NR_syscalls] = {
[0 ... NR_syscalls-1] = sys_ni_syscall,
-#include <asm/unistd.h>
+#include <asm/syscall_table_32.h>
};
diff --git a/arch/arc/net/bpf_jit.h b/arch/arc/net/bpf_jit.h
index ec44873c42d1..495f3023e4c1 100644
--- a/arch/arc/net/bpf_jit.h
+++ b/arch/arc/net/bpf_jit.h
@@ -39,7 +39,7 @@
/************** Functions that the back-end must provide **************/
/* Extension for 32-bit operations. */
-inline u8 zext(u8 *buf, u8 rd);
+u8 zext(u8 *buf, u8 rd);
/***** Moves *****/
u8 mov_r32(u8 *buf, u8 rd, u8 rs, u8 sign_ext);
u8 mov_r32_i32(u8 *buf, u8 reg, s32 imm);
diff --git a/arch/arc/net/bpf_jit_arcv2.c b/arch/arc/net/bpf_jit_arcv2.c
index 31bfb6e9ce00..4458e409ca0a 100644
--- a/arch/arc/net/bpf_jit_arcv2.c
+++ b/arch/arc/net/bpf_jit_arcv2.c
@@ -62,7 +62,7 @@ enum {
* If/when we decide to add ARCv2 instructions that do use register pairs,
* the mapping, hopefully, doesn't need to be revisited.
*/
-const u8 bpf2arc[][2] = {
+static const u8 bpf2arc[][2] = {
/* Return value from in-kernel function, and exit value from eBPF */
[BPF_REG_0] = {ARC_R_8, ARC_R_9},
/* Arguments from eBPF program to in-kernel function */
@@ -1302,7 +1302,7 @@ static u8 arc_b(u8 *buf, s32 offset)
/************* Packers (Deal with BPF_REGs) **************/
-inline u8 zext(u8 *buf, u8 rd)
+u8 zext(u8 *buf, u8 rd)
{
if (rd != BPF_REG_FP)
return arc_movi_r(buf, REG_HI(rd), 0);
@@ -2235,6 +2235,7 @@ u8 gen_swap(u8 *buf, u8 rd, u8 size, u8 endian, bool force, bool do_zext)
break;
default:
/* The caller must have handled this. */
+ break;
}
} else {
/*
@@ -2253,6 +2254,7 @@ u8 gen_swap(u8 *buf, u8 rd, u8 size, u8 endian, bool force, bool do_zext)
break;
default:
/* The caller must have handled this. */
+ break;
}
}
@@ -2517,7 +2519,7 @@ u8 arc_epilogue(u8 *buf, u32 usage, u16 frame_size)
#define JCC64_NR_OF_JMPS 3 /* Number of jumps in jcc64 template. */
#define JCC64_INSNS_TO_END 3 /* Number of insn. inclusive the 2nd jmp to end. */
#define JCC64_SKIP_JMP 1 /* Index of the "skip" jump to "end". */
-const struct {
+static const struct {
/*
* "jit_off" is common between all "jmp[]" and is coupled with
* "cond" of each "jmp[]" instance. e.g.:
@@ -2883,7 +2885,7 @@ u8 gen_jmp_64(u8 *buf, u8 rd, u8 rs, u8 cond, u32 curr_off, u32 targ_off)
* The "ARC_CC_SET" becomes "CC_unequal" because of the "tst"
* instruction that precedes the conditional branch.
*/
-const u8 arcv2_32_jmps[ARC_CC_LAST] = {
+static const u8 arcv2_32_jmps[ARC_CC_LAST] = {
[ARC_CC_UGT] = CC_great_u,
[ARC_CC_UGE] = CC_great_eq_u,
[ARC_CC_ULT] = CC_less_u,
diff --git a/arch/arc/net/bpf_jit_core.c b/arch/arc/net/bpf_jit_core.c
index 6f6b4ffccf2c..e3628922c24a 100644
--- a/arch/arc/net/bpf_jit_core.c
+++ b/arch/arc/net/bpf_jit_core.c
@@ -159,7 +159,7 @@ static void jit_dump(const struct jit_context *ctx)
/* Initialise the context so there's no garbage. */
static int jit_ctx_init(struct jit_context *ctx, struct bpf_prog *prog)
{
- memset(ctx, 0, sizeof(ctx));
+ memset(ctx, 0, sizeof(*ctx));
ctx->orig_prog = prog;
@@ -167,7 +167,7 @@ static int jit_ctx_init(struct jit_context *ctx, struct bpf_prog *prog)
ctx->prog = bpf_jit_blind_constants(prog);
if (IS_ERR(ctx->prog))
return PTR_ERR(ctx->prog);
- ctx->blinded = (ctx->prog == ctx->orig_prog ? false : true);
+ ctx->blinded = (ctx->prog != ctx->orig_prog);
/* If the verifier doesn't zero-extend, then we have to do it. */
ctx->do_zext = !ctx->prog->aux->verifier_zext;
@@ -1182,12 +1182,12 @@ static int jit_prepare(struct jit_context *ctx)
}
/*
- * All the "handle_*()" functions have been called before by the
- * "jit_prepare()". If there was an error, we would know by now.
- * Therefore, no extra error checking at this point, other than
- * a sanity check at the end that expects the calculated length
- * (jit.len) to be equal to the length of generated instructions
- * (jit.index).
+ * jit_compile() is the real compilation phase. jit_prepare() is
+ * invoked before jit_compile() as a dry-run to make sure everything
+ * will go OK and allocate the necessary memory.
+ *
+ * In the end, jit_compile() checks if it has produced the same number
+ * of instructions as jit_prepare() would.
*/
static int jit_compile(struct jit_context *ctx)
{
@@ -1407,9 +1407,9 @@ static struct bpf_prog *do_extra_pass(struct bpf_prog *prog)
/*
* This function may be invoked twice for the same stream of BPF
- * instructions. The "extra pass" happens, when there are "call"s
- * involved that their addresses are not known during the first
- * invocation.
+ * instructions. The "extra pass" happens, when there are
+ * (re)locations involved that their addresses are not known
+ * during the first run.
*/
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
{
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index ee5115252aac..a867a7d967aa 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -34,6 +34,7 @@ config ARM
select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_OPTIONAL_KERNEL_RWX if ARCH_HAS_STRICT_KERNEL_RWX
select ARCH_OPTIONAL_KERNEL_RWX_DEFAULT if CPU_V7
+ select ARCH_NEED_CMPXCHG_1_EMU if CPU_V6
select ARCH_SUPPORTS_ATOMIC_RMW
select ARCH_SUPPORTS_CFI_CLANG
select ARCH_SUPPORTS_HUGETLBFS if ARM_LPAE
diff --git a/arch/arm/boot/dts/allwinner/Makefile b/arch/arm/boot/dts/allwinner/Makefile
index 4247f19b1adc..cd0d044882cf 100644
--- a/arch/arm/boot/dts/allwinner/Makefile
+++ b/arch/arm/boot/dts/allwinner/Makefile
@@ -261,68 +261,6 @@ dtb-$(CONFIG_MACH_SUN8I) += \
sun8i-v3s-licheepi-zero.dtb \
sun8i-v3s-licheepi-zero-dock.dtb \
sun8i-v40-bananapi-m2-berry.dtb
-dtb-$(CONFIG_MACH_SUN8I) += \
- sun8i-a23-evb.dtb \
- sun8i-a23-gt90h-v4.dtb \
- sun8i-a23-inet86dz.dtb \
- sun8i-a23-ippo-q8h-v5.dtb \
- sun8i-a23-ippo-q8h-v1.2.dtb \
- sun8i-a23-polaroid-mid2407pxe03.dtb \
- sun8i-a23-polaroid-mid2809pxe04.dtb \
- sun8i-a23-q8-tablet.dtb \
- sun8i-a33-et-q8-v1.6.dtb \
- sun8i-a33-ga10h-v1.1.dtb \
- sun8i-a33-inet-d978-rev2.dtb \
- sun8i-a33-ippo-q8h-v1.2.dtb \
- sun8i-a33-olinuxino.dtb \
- sun8i-a33-q8-tablet.dtb \
- sun8i-a33-sinlinx-sina33.dtb \
- sun8i-a83t-allwinner-h8homlet-v2.dtb \
- sun8i-a83t-bananapi-m3.dtb \
- sun8i-a83t-cubietruck-plus.dtb \
- sun8i-a83t-tbs-a711.dtb \
- sun8i-h2-plus-bananapi-m2-zero.dtb \
- sun8i-h2-plus-libretech-all-h3-cc.dtb \
- sun8i-h2-plus-orangepi-r1.dtb \
- sun8i-h2-plus-orangepi-zero.dtb \
- sun8i-h3-bananapi-m2-plus.dtb \
- sun8i-h3-bananapi-m2-plus-v1.2.dtb \
- sun8i-h3-beelink-x2.dtb \
- sun8i-h3-libretech-all-h3-cc.dtb \
- sun8i-h3-mapleboard-mp130.dtb \
- sun8i-h3-nanopi-duo2.dtb \
- sun8i-h3-nanopi-m1.dtb\
- \
- sun8i-h3-nanopi-m1-plus.dtb \
- sun8i-h3-nanopi-neo.dtb \
- sun8i-h3-nanopi-neo-air.dtb \
- sun8i-h3-nanopi-r1.dtb \
- sun8i-h3-orangepi-2.dtb \
- sun8i-h3-orangepi-lite.dtb \
- sun8i-h3-orangepi-one.dtb \
- sun8i-h3-orangepi-pc.dtb \
- sun8i-h3-orangepi-pc-plus.dtb \
- sun8i-h3-orangepi-plus.dtb \
- sun8i-h3-orangepi-plus2e.dtb \
- sun8i-h3-orangepi-zero-plus2.dtb \
- sun8i-h3-rervision-dvk.dtb \
- sun8i-h3-zeropi.dtb \
- sun8i-h3-emlid-neutis-n5h3-devboard.dtb \
- sun8i-r16-bananapi-m2m.dtb \
- sun8i-r16-nintendo-nes-classic.dtb \
- sun8i-r16-nintendo-super-nes-classic.dtb \
- sun8i-r16-parrot.dtb \
- sun8i-r40-bananapi-m2-ultra.dtb \
- sun8i-r40-oka40i-c.dtb \
- sun8i-s3-elimo-initium.dtb \
- sun8i-s3-lichee-zero-plus.dtb \
- sun8i-s3-pinecube.dtb \
- sun8i-t113s-mangopi-mq-r-t113.dtb \
- sun8i-t3-cqa3t-bv3.dtb \
- sun8i-v3-sl631-imx179.dtb \
- sun8i-v3s-licheepi-zero.dtb \
- sun8i-v3s-licheepi-zero-dock.dtb \
- sun8i-v40-bananapi-m2-berry.dtb
dtb-$(CONFIG_MACH_SUN9I) += \
sun9i-a80-optimus.dtb \
sun9i-a80-cubieboard4.dtb
diff --git a/arch/arm/boot/dts/arm/arm-realview-eb-bbrevd.dtsi b/arch/arm/boot/dts/arm/arm-realview-eb-bbrevd.dtsi
index a79e1d1d30a7..7f62aef9ca8a 100644
--- a/arch/arm/boot/dts/arm/arm-realview-eb-bbrevd.dtsi
+++ b/arch/arm/boot/dts/arm/arm-realview-eb-bbrevd.dtsi
@@ -22,7 +22,7 @@
/ {
/* Introduce a fixed regulator for the new ethernet controller */
- veth: fixedregulator@0 {
+ veth: regulator-veth {
compatible = "regulator-fixed";
regulator-name = "veth";
regulator-min-microvolt = <3300000>;
diff --git a/arch/arm/boot/dts/arm/arm-realview-eb.dtsi b/arch/arm/boot/dts/arm/arm-realview-eb.dtsi
index fbb2258b451f..16f784da5a55 100644
--- a/arch/arm/boot/dts/arm/arm-realview-eb.dtsi
+++ b/arch/arm/boot/dts/arm/arm-realview-eb.dtsi
@@ -45,7 +45,7 @@
};
/* The voltage to the MMC card is hardwired at 3.3V */
- vmmc: fixedregulator@0 {
+ vmmc: regulator-vmmc {
compatible = "regulator-fixed";
regulator-name = "vmmc";
regulator-min-microvolt = <3300000>;
@@ -53,13 +53,13 @@
regulator-boot-on;
};
- xtal24mhz: xtal24mhz@24M {
+ xtal24mhz: mclk: kmiclk: sspclk: uartclk: wdogclk: clock-24000000 {
#clock-cells = <0>;
compatible = "fixed-clock";
clock-frequency = <24000000>;
};
- timclk: timclk@1M {
+ timclk: clock-1000000 {
#clock-cells = <0>;
compatible = "fixed-factor-clock";
clock-div = <24>;
@@ -67,48 +67,8 @@
clocks = <&xtal24mhz>;
};
- mclk: mclk@24M {
- #clock-cells = <0>;
- compatible = "fixed-factor-clock";
- clock-div = <1>;
- clock-mult = <1>;
- clocks = <&xtal24mhz>;
- };
-
- kmiclk: kmiclk@24M {
- #clock-cells = <0>;
- compatible = "fixed-factor-clock";
- clock-div = <1>;
- clock-mult = <1>;
- clocks = <&xtal24mhz>;
- };
-
- sspclk: sspclk@24M {
- #clock-cells = <0>;
- compatible = "fixed-factor-clock";
- clock-div = <1>;
- clock-mult = <1>;
- clocks = <&xtal24mhz>;
- };
-
- uartclk: uartclk@24M {
- #clock-cells = <0>;
- compatible = "fixed-factor-clock";
- clock-div = <1>;
- clock-mult = <1>;
- clocks = <&xtal24mhz>;
- };
-
- wdogclk: wdogclk@24M {
- #clock-cells = <0>;
- compatible = "fixed-factor-clock";
- clock-div = <1>;
- clock-mult = <1>;
- clocks = <&xtal24mhz>;
- };
-
/* FIXME: this actually hangs off the PLL clocks */
- pclk: pclk@0 {
+ pclk: clock-pclk {
#clock-cells = <0>;
compatible = "fixed-clock";
clock-frequency = <0>;
diff --git a/arch/arm/boot/dts/arm/arm-realview-pb1176.dts b/arch/arm/boot/dts/arm/arm-realview-pb1176.dts
index d99bac02232b..b9b10cbd65aa 100644
--- a/arch/arm/boot/dts/arm/arm-realview-pb1176.dts
+++ b/arch/arm/boot/dts/arm/arm-realview-pb1176.dts
@@ -63,13 +63,13 @@
regulator-boot-on;
};
- xtal24mhz: xtal24mhz@24M {
+ xtal24mhz: mclk: kmiclk: sspclk: uartclk: clock-24000000 {
#clock-cells = <0>;
compatible = "fixed-clock";
clock-frequency = <24000000>;
};
- timclk: timclk@1M {
+ timclk: clock-1000000 {
#clock-cells = <0>;
compatible = "fixed-factor-clock";
clock-div = <24>;
@@ -77,40 +77,8 @@
clocks = <&xtal24mhz>;
};
- mclk: mclk@24M {
- #clock-cells = <0>;
- compatible = "fixed-factor-clock";
- clock-div = <1>;
- clock-mult = <1>;
- clocks = <&xtal24mhz>;
- };
-
- kmiclk: kmiclk@24M {
- #clock-cells = <0>;
- compatible = "fixed-factor-clock";
- clock-div = <1>;
- clock-mult = <1>;
- clocks = <&xtal24mhz>;
- };
-
- sspclk: sspclk@24M {
- #clock-cells = <0>;
- compatible = "fixed-factor-clock";
- clock-div = <1>;
- clock-mult = <1>;
- clocks = <&xtal24mhz>;
- };
-
- uartclk: uartclk@24M {
- #clock-cells = <0>;
- compatible = "fixed-factor-clock";
- clock-div = <1>;
- clock-mult = <1>;
- clocks = <&xtal24mhz>;
- };
-
/* FIXME: this actually hangs off the PLL clocks */
- pclk: pclk@0 {
+ pclk: clock-pclk {
#clock-cells = <0>;
compatible = "fixed-clock";
clock-frequency = <0>;
diff --git a/arch/arm/boot/dts/arm/arm-realview-pb11mp.dts b/arch/arm/boot/dts/arm/arm-realview-pb11mp.dts
index 89103d54ecc1..ce35748f3d25 100644
--- a/arch/arm/boot/dts/arm/arm-realview-pb11mp.dts
+++ b/arch/arm/boot/dts/arm/arm-realview-pb11mp.dts
@@ -163,19 +163,19 @@
regulator-boot-on;
};
- xtal24mhz: xtal24mhz@24M {
+ xtal24mhz: mclk: kmiclk: sspclk: uartclk: wdogclk: clock-24000000 {
#clock-cells = <0>;
compatible = "fixed-clock";
clock-frequency = <24000000>;
};
- refclk32khz: refclk32khz {
+ refclk32khz: clock-32768 {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <32768>;
};
- timclk: timclk@1M {
+ timclk: clock-1000000 {
#clock-cells = <0>;
compatible = "fixed-factor-clock";
clock-div = <24>;
@@ -183,48 +183,8 @@
clocks = <&xtal24mhz>;
};
- mclk: mclk@24M {
- #clock-cells = <0>;
- compatible = "fixed-factor-clock";
- clock-div = <1>;
- clock-mult = <1>;
- clocks = <&xtal24mhz>;
- };
-
- kmiclk: kmiclk@24M {
- #clock-cells = <0>;
- compatible = "fixed-factor-clock";
- clock-div = <1>;
- clock-mult = <1>;
- clocks = <&xtal24mhz>;
- };
-
- sspclk: sspclk@24M {
- #clock-cells = <0>;
- compatible = "fixed-factor-clock";
- clock-div = <1>;
- clock-mult = <1>;
- clocks = <&xtal24mhz>;
- };
-
- uartclk: uartclk@24M {
- #clock-cells = <0>;
- compatible = "fixed-factor-clock";
- clock-div = <1>;
- clock-mult = <1>;
- clocks = <&xtal24mhz>;
- };
-
- wdogclk: wdogclk@24M {
- #clock-cells = <0>;
- compatible = "fixed-factor-clock";
- clock-div = <1>;
- clock-mult = <1>;
- clocks = <&xtal24mhz>;
- };
-
/* FIXME: this actually hangs off the PLL clocks */
- pclk: pclk@0 {
+ pclk: clock-pclk {
#clock-cells = <0>;
compatible = "fixed-clock";
clock-frequency = <0>;
diff --git a/arch/arm/boot/dts/arm/arm-realview-pbx.dtsi b/arch/arm/boot/dts/arm/arm-realview-pbx.dtsi
index ec1507c5147c..e625403a9456 100644
--- a/arch/arm/boot/dts/arm/arm-realview-pbx.dtsi
+++ b/arch/arm/boot/dts/arm/arm-realview-pbx.dtsi
@@ -62,19 +62,19 @@
regulator-boot-on;
};
- xtal24mhz: xtal24mhz@24M {
+ xtal24mhz: mclk: kmiclk: sspclk: uartclk: wdogclk: clock-24000000 {
#clock-cells = <0>;
compatible = "fixed-clock";
clock-frequency = <24000000>;
};
- refclk32khz: refclk32khz {
+ refclk32khz: clock-32768 {
#clock-cells = <0>;
compatible = "fixed-clock";
clock-frequency = <32768>;
};
- timclk: timclk@1M {
+ timclk: clock-1000000 {
#clock-cells = <0>;
compatible = "fixed-factor-clock";
clock-div = <24>;
@@ -82,48 +82,8 @@
clocks = <&xtal24mhz>;
};
- mclk: mclk@24M {
- #clock-cells = <0>;
- compatible = "fixed-factor-clock";
- clock-div = <1>;
- clock-mult = <1>;
- clocks = <&xtal24mhz>;
- };
-
- kmiclk: kmiclk@24M {
- #clock-cells = <0>;
- compatible = "fixed-factor-clock";
- clock-div = <1>;
- clock-mult = <1>;
- clocks = <&xtal24mhz>;
- };
-
- sspclk: sspclk@24M {
- #clock-cells = <0>;
- compatible = "fixed-factor-clock";
- clock-div = <1>;
- clock-mult = <1>;
- clocks = <&xtal24mhz>;
- };
-
- uartclk: uartclk@24M {
- #clock-cells = <0>;
- compatible = "fixed-factor-clock";
- clock-div = <1>;
- clock-mult = <1>;
- clocks = <&xtal24mhz>;
- };
-
- wdogclk: wdogclk@24M {
- #clock-cells = <0>;
- compatible = "fixed-factor-clock";
- clock-div = <1>;
- clock-mult = <1>;
- clocks = <&xtal24mhz>;
- };
-
/* FIXME: this actually hangs off the PLL clocks */
- pclk: pclk@0 {
+ pclk: clock-pclk {
#clock-cells = <0>;
compatible = "fixed-clock";
clock-frequency = <0>;
diff --git a/arch/arm/boot/dts/arm/integratorap-im-pd1.dts b/arch/arm/boot/dts/arm/integratorap-im-pd1.dts
index 367850ea0912..db13e09f2fab 100644
--- a/arch/arm/boot/dts/arm/integratorap-im-pd1.dts
+++ b/arch/arm/boot/dts/arm/integratorap-im-pd1.dts
@@ -54,7 +54,7 @@
};
/* Also used for the Smart Card Interface SCI */
- impd1_uartclk: clock@1_4 {
+ impd1_uartclk: clock-uart {
compatible = "fixed-factor-clock";
#clock-cells = <0>;
clock-div = <4>;
@@ -64,7 +64,7 @@
};
/* For the SSP the clock is divided by 64 */
- impd1_sspclk: clock@1_64 {
+ impd1_sspclk: clock-ssp {
compatible = "fixed-factor-clock";
#clock-cells = <0>;
clock-div = <64>;
diff --git a/arch/arm/boot/dts/arm/integratorap.dts b/arch/arm/boot/dts/arm/integratorap.dts
index d9927d3181dc..9b6a1dbaf265 100644
--- a/arch/arm/boot/dts/arm/integratorap.dts
+++ b/arch/arm/boot/dts/arm/integratorap.dts
@@ -57,22 +57,14 @@
};
/* 24 MHz chrystal on the Integrator/AP development board */
- xtal24mhz: xtal24mhz@24M {
+ xtal24mhz: pclk: clock-24000000 {
#clock-cells = <0>;
compatible = "fixed-clock";
clock-frequency = <24000000>;
};
- pclk: pclk@0 {
- #clock-cells = <0>;
- compatible = "fixed-factor-clock";
- clock-div = <1>;
- clock-mult = <1>;
- clocks = <&xtal24mhz>;
- };
-
/* The UART clock is 14.74 MHz divided by an ICS525 */
- uartclk: uartclk@14.74M {
+ uartclk: clock-14745600 {
#clock-cells = <0>;
compatible = "fixed-clock";
clock-frequency = <14745600>;
@@ -81,7 +73,7 @@
core-module@10000000 {
/* 24 MHz chrystal on the core module */
- cm24mhz: cm24mhz@24M {
+ cm24mhz: clock-24000000 {
#clock-cells = <0>;
compatible = "fixed-clock";
clock-frequency = <24000000>;
diff --git a/arch/arm/boot/dts/arm/integratorcp.dts b/arch/arm/boot/dts/arm/integratorcp.dts
index c011333eb165..8ad1a8957ace 100644
--- a/arch/arm/boot/dts/arm/integratorcp.dts
+++ b/arch/arm/boot/dts/arm/integratorcp.dts
@@ -47,14 +47,14 @@
*/
/* The codec chrystal operates at 24.576 MHz */
- xtal_codec: xtal24.576@24.576M {
+ xtal_codec: clock-24576000 {
#clock-cells = <0>;
compatible = "fixed-clock";
clock-frequency = <24576000>;
};
/* The chrystal is divided by 2 by the codec for the AACI bit clock */
- aaci_bitclk: aaci_bitclk@12.288M {
+ aaci_bitclk: clock-12288000 {
#clock-cells = <0>;
compatible = "fixed-factor-clock";
clock-div = <2>;
@@ -63,21 +63,21 @@
};
/* This is a 25MHz chrystal on the base board */
- xtal25mhz: xtal25mhz@25M {
+ xtal25mhz: clock-25000000 {
#clock-cells = <0>;
compatible = "fixed-clock";
clock-frequency = <25000000>;
};
/* The UART clock is 14.74 MHz divided from 25MHz by an ICS525 */
- uartclk: uartclk@14.74M {
+ uartclk: clock-14745600 {
#clock-cells = <0>;
compatible = "fixed-clock";
clock-frequency = <14745600>;
};
/* Actually sysclk I think */
- pclk: pclk@0 {
+ pclk: clock-pclk {
#clock-cells = <0>;
compatible = "fixed-clock";
clock-frequency = <0>;
@@ -85,7 +85,7 @@
core-module@10000000 {
/* 24 MHz chrystal on the core module */
- cm24mhz: cm24mhz@24M {
+ cm24mhz: clock-24000000 {
#clock-cells = <0>;
compatible = "fixed-clock";
clock-frequency = <24000000>;
@@ -131,7 +131,7 @@
};
/* The timer clock is the 24 MHz oscillator divided to 1MHz */
- timclk: timclk@1M {
+ timclk: clock-1000000 {
#clock-cells = <0>;
compatible = "fixed-factor-clock";
clock-div = <24>;
diff --git a/arch/arm/boot/dts/arm/mps2.dtsi b/arch/arm/boot/dts/arm/mps2.dtsi
index ce308820765b..e240bc8aa605 100644
--- a/arch/arm/boot/dts/arm/mps2.dtsi
+++ b/arch/arm/boot/dts/arm/mps2.dtsi
@@ -48,37 +48,37 @@
#address-cells = <1>;
#size-cells = <1>;
- oscclk0: clk-osc0 {
+ oscclk0: clock-50000000 {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <50000000>;
};
- oscclk1: clk-osc1 {
+ oscclk1: clock-24576000 {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <24576000>;
};
- oscclk2: clk-osc2 {
+ oscclk2: clock-25000000 {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <25000000>;
};
- cfgclk: clk-cfg {
+ cfgclk: clock-5000000 {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <5000000>;
};
- spicfgclk: clk-spicfg {
+ spicfgclk: clock-75000000 {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <75000000>;
};
- sysclk: clk-sys {
+ sysclk: spiclcd: spicon: i2cclcd: i2caud: clock-sys {
compatible = "fixed-factor-clock";
clocks = <&oscclk0>;
#clock-cells = <0>;
@@ -86,7 +86,7 @@
clock-mult = <1>;
};
- audmclk: clk-audm {
+ audmclk: clk-12388000 {
compatible = "fixed-factor-clock";
clocks = <&oscclk1>;
#clock-cells = <0>;
@@ -94,7 +94,7 @@
clock-mult = <1>;
};
- audsclk: clk-auds {
+ audsclk: clk-3072000 {
compatible = "fixed-factor-clock";
clocks = <&oscclk1>;
#clock-cells = <0>;
@@ -102,38 +102,6 @@
clock-mult = <1>;
};
- spiclcd: clk-cpiclcd {
- compatible = "fixed-factor-clock";
- clocks = <&oscclk0>;
- #clock-cells = <0>;
- clock-div = <2>;
- clock-mult = <1>;
- };
-
- spicon: clk-spicon {
- compatible = "fixed-factor-clock";
- clocks = <&oscclk0>;
- #clock-cells = <0>;
- clock-div = <2>;
- clock-mult = <1>;
- };
-
- i2cclcd: clk-i2cclcd {
- compatible = "fixed-factor-clock";
- clocks = <&oscclk0>;
- #clock-cells = <0>;
- clock-div = <2>;
- clock-mult = <1>;
- };
-
- i2caud: clk-i2caud {
- compatible = "fixed-factor-clock";
- clocks = <&oscclk0>;
- #clock-cells = <0>;
- clock-div = <2>;
- clock-mult = <1>;
- };
-
soc {
compatible = "simple-bus";
ranges;
diff --git a/arch/arm/boot/dts/arm/versatile-ab.dts b/arch/arm/boot/dts/arm/versatile-ab.dts
index de45aa99e260..6fe6b49f5d8e 100644
--- a/arch/arm/boot/dts/arm/versatile-ab.dts
+++ b/arch/arm/boot/dts/arm/versatile-ab.dts
@@ -24,7 +24,7 @@
reg = <0x0 0x08000000>;
};
- xtal24mhz: xtal24mhz@24M {
+ xtal24mhz: clock-24000000 {
#clock-cells = <0>;
compatible = "fixed-clock";
clock-frequency = <24000000>;
@@ -142,14 +142,14 @@
};
/* OSC1 on AB, OSC4 on PB */
- osc1: cm_aux_osc@24M {
+ osc1: clock-osc {
#clock-cells = <0>;
compatible = "arm,versatile-cm-auxosc";
clocks = <&xtal24mhz>;
};
/* The timer clock is the 24 MHz oscillator divided to 1MHz */
- timclk: timclk@1M {
+ timclk: clock-1000000 {
#clock-cells = <0>;
compatible = "fixed-factor-clock";
clock-div = <24>;
@@ -157,7 +157,7 @@
clocks = <&xtal24mhz>;
};
- pclk: pclk@24M {
+ pclk: clock-24000000 {
#clock-cells = <0>;
compatible = "fixed-factor-clock";
clock-div = <1>;
diff --git a/arch/arm/boot/dts/arm/vexpress-v2m-rs1.dtsi b/arch/arm/boot/dts/arm/vexpress-v2m-rs1.dtsi
index 8af4b77fe655..158b3923eae3 100644
--- a/arch/arm/boot/dts/arm/vexpress-v2m-rs1.dtsi
+++ b/arch/arm/boot/dts/arm/vexpress-v2m-rs1.dtsi
@@ -20,7 +20,7 @@
#include <dt-bindings/interrupt-controller/arm-gic.h>
/ {
- v2m_fixed_3v3: fixed-regulator-0 {
+ v2m_fixed_3v3: regulator-3v3 {
compatible = "regulator-fixed";
regulator-name = "3V3";
regulator-min-microvolt = <3300000>;
@@ -28,21 +28,21 @@
regulator-always-on;
};
- v2m_clk24mhz: clk24mhz {
+ v2m_clk24mhz: clock-24000000 {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <24000000>;
clock-output-names = "v2m:clk24mhz";
};
- v2m_refclk1mhz: refclk1mhz {
+ v2m_refclk1mhz: clock-1000000 {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <1000000>;
clock-output-names = "v2m:refclk1mhz";
};
- v2m_refclk32khz: refclk32khz {
+ v2m_refclk32khz: clock-32768 {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <32768>;
diff --git a/arch/arm/boot/dts/arm/vexpress-v2m.dtsi b/arch/arm/boot/dts/arm/vexpress-v2m.dtsi
index c5e92f6d2fcd..be03f2a8a57a 100644
--- a/arch/arm/boot/dts/arm/vexpress-v2m.dtsi
+++ b/arch/arm/boot/dts/arm/vexpress-v2m.dtsi
@@ -351,7 +351,7 @@
};
};
- v2m_fixed_3v3: fixed-regulator-0 {
+ v2m_fixed_3v3: regulator-3v3 {
compatible = "regulator-fixed";
regulator-name = "3V3";
regulator-min-microvolt = <3300000>;
@@ -359,21 +359,21 @@
regulator-always-on;
};
- v2m_clk24mhz: clk24mhz {
+ v2m_clk24mhz: clock-24000000 {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <24000000>;
clock-output-names = "v2m:clk24mhz";
};
- v2m_refclk1mhz: refclk1mhz {
+ v2m_refclk1mhz: clock-1000000 {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <1000000>;
clock-output-names = "v2m:refclk1mhz";
};
- v2m_refclk32khz: refclk32khz {
+ v2m_refclk32khz: clock-32768 {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <32768>;
@@ -436,7 +436,7 @@
compatible = "arm,vexpress,config-bus";
arm,vexpress,config-bridge = <&v2m_sysreg>;
- oscclk0 {
+ clock-controller-0 {
/* MCC static memory clock */
compatible = "arm,vexpress-osc";
arm,vexpress-sysreg,func = <1 0>;
@@ -445,7 +445,7 @@
clock-output-names = "v2m:oscclk0";
};
- v2m_oscclk1: oscclk1 {
+ v2m_oscclk1: clock-controller-1 {
/* CLCD clock */
compatible = "arm,vexpress-osc";
arm,vexpress-sysreg,func = <1 1>;
@@ -454,7 +454,7 @@
clock-output-names = "v2m:oscclk1";
};
- v2m_oscclk2: oscclk2 {
+ v2m_oscclk2: clock-controller-2 {
/* IO FPGA peripheral clock */
compatible = "arm,vexpress-osc";
arm,vexpress-sysreg,func = <1 2>;
@@ -463,7 +463,7 @@
clock-output-names = "v2m:oscclk2";
};
- volt-vio {
+ regulator-vio {
/* Logic level voltage */
compatible = "arm,vexpress-volt";
arm,vexpress-sysreg,func = <2 0>;
diff --git a/arch/arm/boot/dts/arm/vexpress-v2p-ca15-tc1.dts b/arch/arm/boot/dts/arm/vexpress-v2p-ca15-tc1.dts
index 679537e17ff5..5a91e936edef 100644
--- a/arch/arm/boot/dts/arm/vexpress-v2p-ca15-tc1.dts
+++ b/arch/arm/boot/dts/arm/vexpress-v2p-ca15-tc1.dts
@@ -142,7 +142,7 @@
compatible = "arm,vexpress,config-bus";
arm,vexpress,config-bridge = <&v2m_sysreg>;
- oscclk0 {
+ clock-controller-0 {
/* CPU PLL reference clock */
compatible = "arm,vexpress-osc";
arm,vexpress-sysreg,func = <1 0>;
@@ -151,7 +151,7 @@
clock-output-names = "oscclk0";
};
- oscclk4 {
+ clock-controller-4 {
/* Multiplexed AXI master clock */
compatible = "arm,vexpress-osc";
arm,vexpress-sysreg,func = <1 4>;
@@ -160,7 +160,7 @@
clock-output-names = "oscclk4";
};
- hdlcd_clk: oscclk5 {
+ hdlcd_clk: clock-controller-5 {
/* HDLCD PLL reference clock */
compatible = "arm,vexpress-osc";
arm,vexpress-sysreg,func = <1 5>;
@@ -169,7 +169,7 @@
clock-output-names = "oscclk5";
};
- smbclk: oscclk6 {
+ smbclk: clock-controller-6 {
/* SMB clock */
compatible = "arm,vexpress-osc";
arm,vexpress-sysreg,func = <1 6>;
@@ -178,7 +178,7 @@
clock-output-names = "oscclk6";
};
- sys_pll: oscclk7 {
+ sys_pll: clock-controller-7 {
/* SYS PLL reference clock */
compatible = "arm,vexpress-osc";
arm,vexpress-sysreg,func = <1 7>;
@@ -187,7 +187,7 @@
clock-output-names = "oscclk7";
};
- oscclk8 {
+ clock-controller-8 {
/* DDR2 PLL reference clock */
compatible = "arm,vexpress-osc";
arm,vexpress-sysreg,func = <1 8>;
@@ -196,7 +196,7 @@
clock-output-names = "oscclk8";
};
- volt-cores {
+ regulator-cores {
/* CPU core voltage */
compatible = "arm,vexpress-volt";
arm,vexpress-sysreg,func = <2 0>;
diff --git a/arch/arm/boot/dts/arm/vexpress-v2p-ca15_a7.dts b/arch/arm/boot/dts/arm/vexpress-v2p-ca15_a7.dts
index 511e87cc2bc5..6ef23c53d2d8 100644
--- a/arch/arm/boot/dts/arm/vexpress-v2p-ca15_a7.dts
+++ b/arch/arm/boot/dts/arm/vexpress-v2p-ca15_a7.dts
@@ -253,7 +253,7 @@
compatible = "arm,vexpress,config-bus";
arm,vexpress,config-bridge = <&v2m_sysreg>;
- oscclk0 {
+ clock-controller-0 {
/* A15 PLL 0 reference clock */
compatible = "arm,vexpress-osc";
arm,vexpress-sysreg,func = <1 0>;
@@ -262,7 +262,7 @@
clock-output-names = "oscclk0";
};
- oscclk1 {
+ clock-controller-1 {
/* A15 PLL 1 reference clock */
compatible = "arm,vexpress-osc";
arm,vexpress-sysreg,func = <1 1>;
@@ -271,7 +271,7 @@
clock-output-names = "oscclk1";
};
- oscclk2 {
+ clock-controller-2 {
/* A7 PLL 0 reference clock */
compatible = "arm,vexpress-osc";
arm,vexpress-sysreg,func = <1 2>;
@@ -280,7 +280,7 @@
clock-output-names = "oscclk2";
};
- oscclk3 {
+ clock-controller-3 {
/* A7 PLL 1 reference clock */
compatible = "arm,vexpress-osc";
arm,vexpress-sysreg,func = <1 3>;
@@ -289,7 +289,7 @@
clock-output-names = "oscclk3";
};
- oscclk4 {
+ clock-controller-4 {
/* External AXI master clock */
compatible = "arm,vexpress-osc";
arm,vexpress-sysreg,func = <1 4>;
@@ -298,7 +298,7 @@
clock-output-names = "oscclk4";
};
- hdlcd_clk: oscclk5 {
+ hdlcd_clk: clock-controller-5 {
/* HDLCD PLL reference clock */
compatible = "arm,vexpress-osc";
arm,vexpress-sysreg,func = <1 5>;
@@ -307,7 +307,7 @@
clock-output-names = "oscclk5";
};
- smbclk: oscclk6 {
+ smbclk: clock-controller-6 {
/* Static memory controller clock */
compatible = "arm,vexpress-osc";
arm,vexpress-sysreg,func = <1 6>;
@@ -316,7 +316,7 @@
clock-output-names = "oscclk6";
};
- oscclk7 {
+ clock-controller-7 {
/* SYS PLL reference clock */
compatible = "arm,vexpress-osc";
arm,vexpress-sysreg,func = <1 7>;
@@ -325,7 +325,7 @@
clock-output-names = "oscclk7";
};
- oscclk8 {
+ clock-controller-8 {
/* DDR2 PLL reference clock */
compatible = "arm,vexpress-osc";
arm,vexpress-sysreg,func = <1 8>;
@@ -334,7 +334,7 @@
clock-output-names = "oscclk8";
};
- volt-a15 {
+ regulator-a15 {
/* A15 CPU core voltage */
compatible = "arm,vexpress-volt";
arm,vexpress-sysreg,func = <2 0>;
@@ -345,7 +345,7 @@
label = "A15 Vcore";
};
- volt-a7 {
+ regulator-a7 {
/* A7 CPU core voltage */
compatible = "arm,vexpress-volt";
arm,vexpress-sysreg,func = <2 1>;
diff --git a/arch/arm/boot/dts/arm/vexpress-v2p-ca5s.dts b/arch/arm/boot/dts/arm/vexpress-v2p-ca5s.dts
index ff1f9a1bcfcf..e3896253f33e 100644
--- a/arch/arm/boot/dts/arm/vexpress-v2p-ca5s.dts
+++ b/arch/arm/boot/dts/arm/vexpress-v2p-ca5s.dts
@@ -145,7 +145,7 @@
compatible = "arm,vexpress,config-bus";
arm,vexpress,config-bridge = <&v2m_sysreg>;
- cpu_clk: oscclk0 {
+ cpu_clk: clock-controller-0 {
/* CPU and internal AXI reference clock */
compatible = "arm,vexpress-osc";
arm,vexpress-sysreg,func = <1 0>;
@@ -154,7 +154,7 @@
clock-output-names = "oscclk0";
};
- axi_clk: oscclk1 {
+ axi_clk: clock-controller-1 {
/* Multiplexed AXI master clock */
compatible = "arm,vexpress-osc";
arm,vexpress-sysreg,func = <1 1>;
@@ -163,7 +163,7 @@
clock-output-names = "oscclk1";
};
- oscclk2 {
+ clock-controller-2 {
/* DDR2 */
compatible = "arm,vexpress-osc";
arm,vexpress-sysreg,func = <1 2>;
@@ -172,7 +172,7 @@
clock-output-names = "oscclk2";
};
- hdlcd_clk: oscclk3 {
+ hdlcd_clk: clock-controller-3 {
/* HDLCD */
compatible = "arm,vexpress-osc";
arm,vexpress-sysreg,func = <1 3>;
@@ -181,7 +181,7 @@
clock-output-names = "oscclk3";
};
- oscclk4 {
+ clock-controller-4 {
/* Test chip gate configuration */
compatible = "arm,vexpress-osc";
arm,vexpress-sysreg,func = <1 4>;
@@ -190,7 +190,7 @@
clock-output-names = "oscclk4";
};
- smbclk: oscclk5 {
+ smbclk: clock-controller-5 {
/* SMB clock */
compatible = "arm,vexpress-osc";
arm,vexpress-sysreg,func = <1 5>;
diff --git a/arch/arm/boot/dts/arm/vexpress-v2p-ca9.dts b/arch/arm/boot/dts/arm/vexpress-v2p-ca9.dts
index 8bf35666412b..43a5a4ab6ff0 100644
--- a/arch/arm/boot/dts/arm/vexpress-v2p-ca9.dts
+++ b/arch/arm/boot/dts/arm/vexpress-v2p-ca9.dts
@@ -187,7 +187,7 @@
compatible = "arm,vexpress,config-bus";
arm,vexpress,config-bridge = <&v2m_sysreg>;
- oscclk0: extsaxiclk {
+ oscclk0: clock-controller-0 {
/* ACLK clock to the AXI master port on the test chip */
compatible = "arm,vexpress-osc";
arm,vexpress-sysreg,func = <1 0>;
@@ -196,7 +196,7 @@
clock-output-names = "extsaxiclk";
};
- oscclk1: clcdclk {
+ oscclk1: clock-controller-1 {
/* Reference clock for the CLCD */
compatible = "arm,vexpress-osc";
arm,vexpress-sysreg,func = <1 1>;
@@ -205,7 +205,7 @@
clock-output-names = "clcdclk";
};
- smbclk: oscclk2: tcrefclk {
+ smbclk: oscclk2: clock-controller-2 {
/* Reference clock for the test chip internal PLLs */
compatible = "arm,vexpress-osc";
arm,vexpress-sysreg,func = <1 2>;
@@ -214,7 +214,7 @@
clock-output-names = "tcrefclk";
};
- volt-vd10 {
+ regulator-vd10 {
/* Test Chip internal logic voltage */
compatible = "arm,vexpress-volt";
arm,vexpress-sysreg,func = <2 0>;
@@ -223,7 +223,7 @@
label = "VD10";
};
- volt-vd10-s2 {
+ regulator-vd10-s2 {
/* PL310, L2 cache, RAM cell supply (not PL310 logic) */
compatible = "arm,vexpress-volt";
arm,vexpress-sysreg,func = <2 1>;
@@ -232,7 +232,7 @@
label = "VD10_S2";
};
- volt-vd10-s3 {
+ regulator-vd10-s3 {
/* Cortex-A9 system supply, Cores, MPEs, SCU and PL310 logic */
compatible = "arm,vexpress-volt";
arm,vexpress-sysreg,func = <2 2>;
@@ -241,7 +241,7 @@
label = "VD10_S3";
};
- volt-vcc1v8 {
+ regulator-vcc1v8 {
/* DDR2 SDRAM and Test Chip DDR2 I/O supply */
compatible = "arm,vexpress-volt";
arm,vexpress-sysreg,func = <2 3>;
@@ -250,7 +250,7 @@
label = "VCC1V8";
};
- volt-ddr2vtt {
+ regulator-ddr2vtt {
/* DDR2 SDRAM VTT termination voltage */
compatible = "arm,vexpress-volt";
arm,vexpress-sysreg,func = <2 4>;
@@ -259,7 +259,7 @@
label = "DDR2VTT";
};
- volt-vcc3v3 {
+ regulator-vcc3v3 {
/* Local board supply for miscellaneous logic external to the Test Chip */
arm,vexpress-sysreg,func = <2 5>;
compatible = "arm,vexpress-volt";
diff --git a/arch/arm/boot/dts/aspeed/aspeed-g4.dtsi b/arch/arm/boot/dts/aspeed/aspeed-g4.dtsi
index 857cb26ed6d7..c669ec202085 100644
--- a/arch/arm/boot/dts/aspeed/aspeed-g4.dtsi
+++ b/arch/arm/boot/dts/aspeed/aspeed-g4.dtsi
@@ -463,7 +463,7 @@
interrupt-controller;
};
- i2c0: i2c-bus@40 {
+ i2c0: i2c@40 {
#address-cells = <1>;
#size-cells = <0>;
@@ -478,7 +478,7 @@
/* Does not need pinctrl properties */
};
- i2c1: i2c-bus@80 {
+ i2c1: i2c@80 {
#address-cells = <1>;
#size-cells = <0>;
@@ -493,7 +493,7 @@
/* Does not need pinctrl properties */
};
- i2c2: i2c-bus@c0 {
+ i2c2: i2c@c0 {
#address-cells = <1>;
#size-cells = <0>;
@@ -509,7 +509,7 @@
status = "disabled";
};
- i2c3: i2c-bus@100 {
+ i2c3: i2c@100 {
#address-cells = <1>;
#size-cells = <0>;
@@ -525,7 +525,7 @@
status = "disabled";
};
- i2c4: i2c-bus@140 {
+ i2c4: i2c@140 {
#address-cells = <1>;
#size-cells = <0>;
@@ -541,7 +541,7 @@
status = "disabled";
};
- i2c5: i2c-bus@180 {
+ i2c5: i2c@180 {
#address-cells = <1>;
#size-cells = <0>;
@@ -557,7 +557,7 @@
status = "disabled";
};
- i2c6: i2c-bus@1c0 {
+ i2c6: i2c@1c0 {
#address-cells = <1>;
#size-cells = <0>;
@@ -573,7 +573,7 @@
status = "disabled";
};
- i2c7: i2c-bus@300 {
+ i2c7: i2c@300 {
#address-cells = <1>;
#size-cells = <0>;
@@ -589,7 +589,7 @@
status = "disabled";
};
- i2c8: i2c-bus@340 {
+ i2c8: i2c@340 {
#address-cells = <1>;
#size-cells = <0>;
@@ -605,7 +605,7 @@
status = "disabled";
};
- i2c9: i2c-bus@380 {
+ i2c9: i2c@380 {
#address-cells = <1>;
#size-cells = <0>;
@@ -621,7 +621,7 @@
status = "disabled";
};
- i2c10: i2c-bus@3c0 {
+ i2c10: i2c@3c0 {
#address-cells = <1>;
#size-cells = <0>;
@@ -637,7 +637,7 @@
status = "disabled";
};
- i2c11: i2c-bus@400 {
+ i2c11: i2c@400 {
#address-cells = <1>;
#size-cells = <0>;
@@ -653,7 +653,7 @@
status = "disabled";
};
- i2c12: i2c-bus@440 {
+ i2c12: i2c@440 {
#address-cells = <1>;
#size-cells = <0>;
@@ -669,7 +669,7 @@
status = "disabled";
};
- i2c13: i2c-bus@480 {
+ i2c13: i2c@480 {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm/boot/dts/aspeed/aspeed-g5.dtsi b/arch/arm/boot/dts/aspeed/aspeed-g5.dtsi
index e6f3cf3c721e..6e05cbcce49c 100644
--- a/arch/arm/boot/dts/aspeed/aspeed-g5.dtsi
+++ b/arch/arm/boot/dts/aspeed/aspeed-g5.dtsi
@@ -592,7 +592,7 @@
interrupt-controller;
};
- i2c0: i2c-bus@40 {
+ i2c0: i2c@40 {
#address-cells = <1>;
#size-cells = <0>;
@@ -607,7 +607,7 @@
/* Does not need pinctrl properties */
};
- i2c1: i2c-bus@80 {
+ i2c1: i2c@80 {
#address-cells = <1>;
#size-cells = <0>;
@@ -622,7 +622,7 @@
/* Does not need pinctrl properties */
};
- i2c2: i2c-bus@c0 {
+ i2c2: i2c@c0 {
#address-cells = <1>;
#size-cells = <0>;
@@ -638,7 +638,7 @@
status = "disabled";
};
- i2c3: i2c-bus@100 {
+ i2c3: i2c@100 {
#address-cells = <1>;
#size-cells = <0>;
@@ -654,7 +654,7 @@
status = "disabled";
};
- i2c4: i2c-bus@140 {
+ i2c4: i2c@140 {
#address-cells = <1>;
#size-cells = <0>;
@@ -670,7 +670,7 @@
status = "disabled";
};
- i2c5: i2c-bus@180 {
+ i2c5: i2c@180 {
#address-cells = <1>;
#size-cells = <0>;
@@ -686,7 +686,7 @@
status = "disabled";
};
- i2c6: i2c-bus@1c0 {
+ i2c6: i2c@1c0 {
#address-cells = <1>;
#size-cells = <0>;
@@ -702,7 +702,7 @@
status = "disabled";
};
- i2c7: i2c-bus@300 {
+ i2c7: i2c@300 {
#address-cells = <1>;
#size-cells = <0>;
@@ -718,7 +718,7 @@
status = "disabled";
};
- i2c8: i2c-bus@340 {
+ i2c8: i2c@340 {
#address-cells = <1>;
#size-cells = <0>;
@@ -734,7 +734,7 @@
status = "disabled";
};
- i2c9: i2c-bus@380 {
+ i2c9: i2c@380 {
#address-cells = <1>;
#size-cells = <0>;
@@ -750,7 +750,7 @@
status = "disabled";
};
- i2c10: i2c-bus@3c0 {
+ i2c10: i2c@3c0 {
#address-cells = <1>;
#size-cells = <0>;
@@ -766,7 +766,7 @@
status = "disabled";
};
- i2c11: i2c-bus@400 {
+ i2c11: i2c@400 {
#address-cells = <1>;
#size-cells = <0>;
@@ -782,7 +782,7 @@
status = "disabled";
};
- i2c12: i2c-bus@440 {
+ i2c12: i2c@440 {
#address-cells = <1>;
#size-cells = <0>;
@@ -798,7 +798,7 @@
status = "disabled";
};
- i2c13: i2c-bus@480 {
+ i2c13: i2c@480 {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm/boot/dts/aspeed/aspeed-g6.dtsi b/arch/arm/boot/dts/aspeed/aspeed-g6.dtsi
index 7fb421153596..0c00882f111a 100644
--- a/arch/arm/boot/dts/aspeed/aspeed-g6.dtsi
+++ b/arch/arm/boot/dts/aspeed/aspeed-g6.dtsi
@@ -905,7 +905,7 @@
#include "aspeed-g6-pinctrl.dtsi"
&i2c {
- i2c0: i2c-bus@80 {
+ i2c0: i2c@80 {
#address-cells = <1>;
#size-cells = <0>;
reg = <0x80 0x80>;
@@ -919,7 +919,7 @@
status = "disabled";
};
- i2c1: i2c-bus@100 {
+ i2c1: i2c@100 {
#address-cells = <1>;
#size-cells = <0>;
reg = <0x100 0x80>;
@@ -933,7 +933,7 @@
status = "disabled";
};
- i2c2: i2c-bus@180 {
+ i2c2: i2c@180 {
#address-cells = <1>;
#size-cells = <0>;
reg = <0x180 0x80>;
@@ -947,7 +947,7 @@
status = "disabled";
};
- i2c3: i2c-bus@200 {
+ i2c3: i2c@200 {
#address-cells = <1>;
#size-cells = <0>;
reg = <0x200 0x80>;
@@ -961,7 +961,7 @@
status = "disabled";
};
- i2c4: i2c-bus@280 {
+ i2c4: i2c@280 {
#address-cells = <1>;
#size-cells = <0>;
reg = <0x280 0x80>;
@@ -975,7 +975,7 @@
status = "disabled";
};
- i2c5: i2c-bus@300 {
+ i2c5: i2c@300 {
#address-cells = <1>;
#size-cells = <0>;
reg = <0x300 0x80>;
@@ -989,7 +989,7 @@
status = "disabled";
};
- i2c6: i2c-bus@380 {
+ i2c6: i2c@380 {
#address-cells = <1>;
#size-cells = <0>;
reg = <0x380 0x80>;
@@ -1003,7 +1003,7 @@
status = "disabled";
};
- i2c7: i2c-bus@400 {
+ i2c7: i2c@400 {
#address-cells = <1>;
#size-cells = <0>;
reg = <0x400 0x80>;
@@ -1017,7 +1017,7 @@
status = "disabled";
};
- i2c8: i2c-bus@480 {
+ i2c8: i2c@480 {
#address-cells = <1>;
#size-cells = <0>;
reg = <0x480 0x80>;
@@ -1031,7 +1031,7 @@
status = "disabled";
};
- i2c9: i2c-bus@500 {
+ i2c9: i2c@500 {
#address-cells = <1>;
#size-cells = <0>;
reg = <0x500 0x80>;
@@ -1045,7 +1045,7 @@
status = "disabled";
};
- i2c10: i2c-bus@580 {
+ i2c10: i2c@580 {
#address-cells = <1>;
#size-cells = <0>;
reg = <0x580 0x80>;
@@ -1059,7 +1059,7 @@
status = "disabled";
};
- i2c11: i2c-bus@600 {
+ i2c11: i2c@600 {
#address-cells = <1>;
#size-cells = <0>;
reg = <0x600 0x80>;
@@ -1073,7 +1073,7 @@
status = "disabled";
};
- i2c12: i2c-bus@680 {
+ i2c12: i2c@680 {
#address-cells = <1>;
#size-cells = <0>;
reg = <0x680 0x80>;
@@ -1087,7 +1087,7 @@
status = "disabled";
};
- i2c13: i2c-bus@700 {
+ i2c13: i2c@700 {
#address-cells = <1>;
#size-cells = <0>;
reg = <0x700 0x80>;
@@ -1101,7 +1101,7 @@
status = "disabled";
};
- i2c14: i2c-bus@780 {
+ i2c14: i2c@780 {
#address-cells = <1>;
#size-cells = <0>;
reg = <0x780 0x80>;
@@ -1115,7 +1115,7 @@
status = "disabled";
};
- i2c15: i2c-bus@800 {
+ i2c15: i2c@800 {
#address-cells = <1>;
#size-cells = <0>;
reg = <0x800 0x80>;
diff --git a/arch/arm/boot/dts/cirrus/ep7211-edb7211.dts b/arch/arm/boot/dts/cirrus/ep7211-edb7211.dts
index 7fb532f227af..808cd5778e27 100644
--- a/arch/arm/boot/dts/cirrus/ep7211-edb7211.dts
+++ b/arch/arm/boot/dts/cirrus/ep7211-edb7211.dts
@@ -30,7 +30,7 @@
display-timings {
native-mode = <&timing0>;
- timing0: 320x240 {
+ timing0: timing-320x240 {
hactive = <320>;
hback-porch = <0>;
hfront-porch = <0>;
diff --git a/arch/arm/boot/dts/intel/ixp/intel-ixp42x-linksys-nslu2.dts b/arch/arm/boot/dts/intel/ixp/intel-ixp42x-linksys-nslu2.dts
index 2eec5f63d399..2f7c34c649ea 100644
--- a/arch/arm/boot/dts/intel/ixp/intel-ixp42x-linksys-nslu2.dts
+++ b/arch/arm/boot/dts/intel/ixp/intel-ixp42x-linksys-nslu2.dts
@@ -90,11 +90,18 @@
timeout-ms = <5000>;
};
- gpio-beeper {
- compatible = "gpio-beeper";
+ gpio_pwm: pwm {
+ #pwm-cells = <3>;
+ compatible = "pwm-gpio";
gpios = <&gpio0 4 GPIO_ACTIVE_HIGH>;
};
+ beeper {
+ compatible = "pwm-beeper";
+ pwms = <&gpio_pwm 0 1 0>;
+ beeper-hz = <1000>;
+ };
+
soc {
bus@c4000000 {
/* The first 16MB region at CS0 on the expansion bus */
diff --git a/arch/arm/boot/dts/marvell/armada-370-xp.dtsi b/arch/arm/boot/dts/marvell/armada-370-xp.dtsi
index 0b8c2a64b36f..954c891e5aee 100644
--- a/arch/arm/boot/dts/marvell/armada-370-xp.dtsi
+++ b/arch/arm/boot/dts/marvell/armada-370-xp.dtsi
@@ -168,7 +168,6 @@
mpic: interrupt-controller@20a00 {
compatible = "marvell,mpic";
#interrupt-cells = <1>;
- #size-cells = <1>;
interrupt-controller;
msi-controller;
};
diff --git a/arch/arm/boot/dts/marvell/armada-375.dtsi b/arch/arm/boot/dts/marvell/armada-375.dtsi
index ddc49547d786..99778b4b7e7b 100644
--- a/arch/arm/boot/dts/marvell/armada-375.dtsi
+++ b/arch/arm/boot/dts/marvell/armada-375.dtsi
@@ -376,7 +376,6 @@
compatible = "marvell,mpic";
reg = <0x20a00 0x2d0>, <0x21070 0x58>;
#interrupt-cells = <1>;
- #size-cells = <1>;
interrupt-controller;
msi-controller;
interrupts = <GIC_PPI 15 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm/boot/dts/marvell/armada-385-atl-x530.dts b/arch/arm/boot/dts/marvell/armada-385-atl-x530.dts
index 5a9ab8410b7b..2fb7304039be 100644
--- a/arch/arm/boot/dts/marvell/armada-385-atl-x530.dts
+++ b/arch/arm/boot/dts/marvell/armada-385-atl-x530.dts
@@ -43,6 +43,17 @@
};
};
};
+
+ led-7seg {
+ compatible = "gpio-7-segment";
+ segment-gpios = <&led_7seg_gpio 0 GPIO_ACTIVE_LOW>,
+ <&led_7seg_gpio 1 GPIO_ACTIVE_LOW>,
+ <&led_7seg_gpio 2 GPIO_ACTIVE_LOW>,
+ <&led_7seg_gpio 3 GPIO_ACTIVE_LOW>,
+ <&led_7seg_gpio 4 GPIO_ACTIVE_LOW>,
+ <&led_7seg_gpio 5 GPIO_ACTIVE_LOW>,
+ <&led_7seg_gpio 6 GPIO_ACTIVE_LOW>;
+ };
};
&pciec {
@@ -149,7 +160,7 @@
#size-cells = <0>;
reg = <3>;
- gpio@20 {
+ led_7seg_gpio: gpio@20 {
compatible = "nxp,pca9554";
gpio-controller;
#gpio-cells = <2>;
diff --git a/arch/arm/boot/dts/marvell/armada-385-turris-omnia.dts b/arch/arm/boot/dts/marvell/armada-385-turris-omnia.dts
index 7b755bb4e4e7..43202890c959 100644
--- a/arch/arm/boot/dts/marvell/armada-385-turris-omnia.dts
+++ b/arch/arm/boot/dts/marvell/armada-385-turris-omnia.dts
@@ -112,6 +112,19 @@
status = "disabled";
};
+ gpio-keys {
+ compatible = "gpio-keys";
+
+ front-button {
+ label = "Front Button";
+ linux,code = <KEY_VENDOR>;
+ linux,can-disable;
+ gpios = <&mcu 0 12 GPIO_ACTIVE_HIGH>;
+ /* debouncing is done by the microcontroller */
+ debounce-interval = <0>;
+ };
+ };
+
sound {
compatible = "simple-audio-card";
simple-audio-card,name = "SPDIF";
@@ -218,7 +231,22 @@
#size-cells = <0>;
reg = <0>;
- /* STM32F0 command interface at address 0x2a */
+ mcu: system-controller@2a {
+ compatible = "cznic,turris-omnia-mcu";
+ reg = <0x2a>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&mcu_pins>;
+
+ interrupt-parent = <&gpio1>;
+ interrupts = <11 IRQ_TYPE_NONE>;
+
+ gpio-controller;
+ #gpio-cells = <3>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
led-controller@2b {
compatible = "cznic,turris-omnia-leds";
@@ -501,6 +529,11 @@
};
&pinctrl {
+ mcu_pins: mcu-pins {
+ marvell,pins = "mpp43";
+ marvell,function = "gpio";
+ };
+
pcawan_pins: pcawan-pins {
marvell,pins = "mpp46";
marvell,function = "gpio";
diff --git a/arch/arm/boot/dts/marvell/armada-38x.dtsi b/arch/arm/boot/dts/marvell/armada-38x.dtsi
index 446861b6b17b..1181b13deabc 100644
--- a/arch/arm/boot/dts/marvell/armada-38x.dtsi
+++ b/arch/arm/boot/dts/marvell/armada-38x.dtsi
@@ -408,7 +408,6 @@
compatible = "marvell,mpic";
reg = <0x20a00 0x2d0>, <0x21070 0x58>;
#interrupt-cells = <1>;
- #size-cells = <1>;
interrupt-controller;
msi-controller;
interrupts = <GIC_PPI 15 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm/boot/dts/marvell/armada-39x.dtsi b/arch/arm/boot/dts/marvell/armada-39x.dtsi
index 9d1cac49c022..6d05835efb42 100644
--- a/arch/arm/boot/dts/marvell/armada-39x.dtsi
+++ b/arch/arm/boot/dts/marvell/armada-39x.dtsi
@@ -268,7 +268,6 @@
compatible = "marvell,mpic";
reg = <0x20a00 0x2d0>, <0x21070 0x58>;
#interrupt-cells = <1>;
- #size-cells = <1>;
interrupt-controller;
msi-controller;
interrupts = <GIC_PPI 15 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm/boot/dts/marvell/kirkwood-blackarmor-nas220.dts b/arch/arm/boot/dts/marvell/kirkwood-blackarmor-nas220.dts
index 07fbfca444d5..36b90c632fd6 100644
--- a/arch/arm/boot/dts/marvell/kirkwood-blackarmor-nas220.dts
+++ b/arch/arm/boot/dts/marvell/kirkwood-blackarmor-nas220.dts
@@ -35,13 +35,13 @@
gpio_keys {
compatible = "gpio-keys";
- reset {
+ button-reset {
label = "Reset";
linux,code = <KEY_POWER>;
gpios = <&gpio0 29 GPIO_ACTIVE_HIGH>;
};
- button {
+ button-power {
label = "Power";
linux,code = <KEY_SLEEP>;
gpios = <&gpio0 26 GPIO_ACTIVE_LOW>;
@@ -51,7 +51,7 @@
gpio-leds {
compatible = "gpio-leds";
- blue-power {
+ led-blue-power {
label = "nas220:blue:power";
gpios = <&gpio0 12 GPIO_ACTIVE_HIGH>;
linux,default-trigger = "default-on";
diff --git a/arch/arm/boot/dts/marvell/kirkwood-c200-v1.dts b/arch/arm/boot/dts/marvell/kirkwood-c200-v1.dts
index f59ff7578dfc..7e3ee64d4bdf 100644
--- a/arch/arm/boot/dts/marvell/kirkwood-c200-v1.dts
+++ b/arch/arm/boot/dts/marvell/kirkwood-c200-v1.dts
@@ -29,25 +29,25 @@
pinctrl-0 = <&pmx_buttons>;
pinctrl-names = "default";
- power {
+ button-power {
label = "Power Button";
linux,code = <KEY_POWER>;
gpios = <&gpio1 16 GPIO_ACTIVE_HIGH>;
};
- reset {
+ button-reset {
label = "Reset Button";
linux,code = <KEY_RESTART>;
gpios = <&gpio1 17 GPIO_ACTIVE_LOW>;
};
- usb1 {
+ button-usb1 {
label = "USB1 Button";
linux,code = <BTN_0>;
gpios = <&gpio0 28 GPIO_ACTIVE_LOW>;
};
- usb2 {
+ button-usb2 {
label = "USB2 Button";
linux,code = <BTN_1>;
gpios = <&gpio0 29 GPIO_ACTIVE_LOW>;
diff --git a/arch/arm/boot/dts/marvell/kirkwood-cloudbox.dts b/arch/arm/boot/dts/marvell/kirkwood-cloudbox.dts
index 448b0cd23b5f..151edcd140a0 100644
--- a/arch/arm/boot/dts/marvell/kirkwood-cloudbox.dts
+++ b/arch/arm/boot/dts/marvell/kirkwood-cloudbox.dts
@@ -58,10 +58,8 @@
gpio_keys {
compatible = "gpio-keys";
- #address-cells = <1>;
- #size-cells = <0>;
- power {
+ key-power {
label = "Power push button";
linux,code = <KEY_POWER>;
gpios = <&gpio0 16 GPIO_ACTIVE_LOW>;
@@ -71,11 +69,11 @@
gpio-leds {
compatible = "gpio-leds";
- red-fail {
+ led-red-fail {
label = "cloudbox:red:fail";
gpios = <&gpio0 14 GPIO_ACTIVE_HIGH>;
};
- blue-sata {
+ led-blue-sata {
label = "cloudbox:blue:sata";
gpios = <&gpio0 15 GPIO_ACTIVE_HIGH>;
};
diff --git a/arch/arm/boot/dts/marvell/kirkwood-d2net.dts b/arch/arm/boot/dts/marvell/kirkwood-d2net.dts
index bd3b266dd766..fcce8730d3e3 100644
--- a/arch/arm/boot/dts/marvell/kirkwood-d2net.dts
+++ b/arch/arm/boot/dts/marvell/kirkwood-d2net.dts
@@ -37,7 +37,7 @@
gpio-leds {
compatible = "gpio-leds";
- red-fail {
+ led-red-fail {
label = "d2net_v2:red:fail";
gpios = <&gpio0 12 GPIO_ACTIVE_HIGH>;
};
diff --git a/arch/arm/boot/dts/marvell/kirkwood-dir665.dts b/arch/arm/boot/dts/marvell/kirkwood-dir665.dts
index 0c0851cd9bec..2f6793f794cd 100644
--- a/arch/arm/boot/dts/marvell/kirkwood-dir665.dts
+++ b/arch/arm/boot/dts/marvell/kirkwood-dir665.dts
@@ -137,38 +137,38 @@
gpio-leds {
compatible = "gpio-leds";
- blue-usb {
+ led-blue-usb {
label = "dir665:blue:usb";
gpios = <&gpio0 12 GPIO_ACTIVE_HIGH>;
};
- blue-internet {
+ led-blue-internet {
/* Can only be turned on if the Internet
* Ethernet port has Link
*/
label = "dir665:blue:internet";
gpios = <&gpio1 10 GPIO_ACTIVE_LOW>;
};
- amber-internet {
+ led-amber-internet {
label = "dir665:amber:internet";
gpios = <&gpio1 11 GPIO_ACTIVE_HIGH>;
};
- blue-wifi5g {
+ led-blue-wifi5g {
label = "dir665:blue:5g";
gpios = <&gpio1 12 GPIO_ACTIVE_LOW>;
};
- blue-status {
+ led-blue-status {
label = "dir665:blue:status";
gpios = <&gpio1 13 GPIO_ACTIVE_HIGH>;
};
- blue-wps {
+ led-blue-wps {
label = "dir665:blue:wps";
gpios = <&gpio1 15 GPIO_ACTIVE_HIGH>;
};
- amber-status {
+ led-amber-status {
label = "dir665:amber:status";
gpios = <&gpio1 16 GPIO_ACTIVE_HIGH>;
};
- blue-24g {
+ led-blue-24g {
label = "dir665:blue:24g";
gpios = <&gpio1 17 GPIO_ACTIVE_LOW>;
};
@@ -176,15 +176,13 @@
gpio-keys {
compatible = "gpio-keys";
- #address-cells = <1>;
- #size-cells = <0>;
- reset {
+ button-reset {
label = "reset";
linux,code = <KEY_RESTART>;
gpios = <&gpio0 28 GPIO_ACTIVE_LOW>;
};
- wps {
+ button-wps {
label = "wps";
linux,code = <KEY_WPS_BUTTON>;
gpios = <&gpio1 14 GPIO_ACTIVE_LOW>;
diff --git a/arch/arm/boot/dts/marvell/kirkwood-dns320.dts b/arch/arm/boot/dts/marvell/kirkwood-dns320.dts
index d6b0f418fd01..d8279e0c4c4f 100644
--- a/arch/arm/boot/dts/marvell/kirkwood-dns320.dts
+++ b/arch/arm/boot/dts/marvell/kirkwood-dns320.dts
@@ -24,24 +24,24 @@
&pmx_led_white_usb>;
pinctrl-names = "default";
- blue-power {
+ led-blue-power {
label = "dns320:blue:power";
gpios = <&gpio0 26 GPIO_ACTIVE_LOW>;
default-state = "keep";
};
- blue-usb {
+ led-blue-usb {
label = "dns320:blue:usb";
gpios = <&gpio1 11 GPIO_ACTIVE_LOW>;
};
- orange-l_hdd {
+ led-orange-l_hdd {
label = "dns320:orange:l_hdd";
gpios = <&gpio0 28 GPIO_ACTIVE_LOW>;
};
- orange-r_hdd {
+ led-orange-r_hdd {
label = "dns320:orange:r_hdd";
gpios = <&gpio0 27 GPIO_ACTIVE_LOW>;
};
- orange-usb {
+ led-orange-usb {
label = "dns320:orange:usb";
gpios = <&gpio1 3 GPIO_ACTIVE_LOW>; /* GPIO 35 */
};
diff --git a/arch/arm/boot/dts/marvell/kirkwood-dns325.dts b/arch/arm/boot/dts/marvell/kirkwood-dns325.dts
index 94d9c06cbbf5..7f396195e977 100644
--- a/arch/arm/boot/dts/marvell/kirkwood-dns325.dts
+++ b/arch/arm/boot/dts/marvell/kirkwood-dns325.dts
@@ -24,24 +24,24 @@
&pmx_led_white_usb>;
pinctrl-names = "default";
- white-power {
+ led-white-power {
label = "dns325:white:power";
gpios = <&gpio0 26 GPIO_ACTIVE_LOW>;
default-state = "keep";
};
- white-usb {
+ led-white-usb {
label = "dns325:white:usb";
gpios = <&gpio1 11 GPIO_ACTIVE_LOW>; /* GPIO 43 */
};
- red-l_hdd {
+ led-red-l_hdd {
label = "dns325:red:l_hdd";
gpios = <&gpio0 28 GPIO_ACTIVE_LOW>;
};
- red-r_hdd {
+ led-red-r_hdd {
label = "dns325:red:r_hdd";
gpios = <&gpio0 27 GPIO_ACTIVE_LOW>;
};
- red-usb {
+ led-red-usb {
label = "dns325:red:usb";
gpios = <&gpio0 29 GPIO_ACTIVE_LOW>;
};
diff --git a/arch/arm/boot/dts/marvell/kirkwood-dnskw.dtsi b/arch/arm/boot/dts/marvell/kirkwood-dnskw.dtsi
index 0738eb679fcd..20bcd031f3f5 100644
--- a/arch/arm/boot/dts/marvell/kirkwood-dnskw.dtsi
+++ b/arch/arm/boot/dts/marvell/kirkwood-dnskw.dtsi
@@ -8,23 +8,21 @@
gpio_keys {
compatible = "gpio-keys";
- #address-cells = <1>;
- #size-cells = <0>;
pinctrl-0 = <&pmx_button_power &pmx_button_unmount
&pmx_button_reset>;
pinctrl-names = "default";
- power {
+ button-power {
label = "Power button";
linux,code = <KEY_POWER>;
gpios = <&gpio1 2 GPIO_ACTIVE_LOW>;
};
- eject {
+ button-eject {
label = "USB unmount button";
linux,code = <KEY_EJECTCD>;
gpios = <&gpio1 15 GPIO_ACTIVE_LOW>;
};
- reset {
+ button-reset {
label = "Reset button";
linux,code = <KEY_RESTART>;
gpios = <&gpio1 16 GPIO_ACTIVE_LOW>;
diff --git a/arch/arm/boot/dts/marvell/kirkwood-dockstar.dts b/arch/arm/boot/dts/marvell/kirkwood-dockstar.dts
index 264938dfa4d9..090f1e2e5bb6 100644
--- a/arch/arm/boot/dts/marvell/kirkwood-dockstar.dts
+++ b/arch/arm/boot/dts/marvell/kirkwood-dockstar.dts
@@ -42,12 +42,12 @@
pinctrl-0 = <&pmx_led_green &pmx_led_orange>;
pinctrl-names = "default";
- health {
+ led-health {
label = "status:green:health";
gpios = <&gpio1 14 GPIO_ACTIVE_LOW>;
default-state = "keep";
};
- fault {
+ led-fault {
label = "status:orange:fault";
gpios = <&gpio1 15 GPIO_ACTIVE_LOW>;
};
diff --git a/arch/arm/boot/dts/marvell/kirkwood-dreamplug.dts b/arch/arm/boot/dts/marvell/kirkwood-dreamplug.dts
index 328516351e84..590bee3c561c 100644
--- a/arch/arm/boot/dts/marvell/kirkwood-dreamplug.dts
+++ b/arch/arm/boot/dts/marvell/kirkwood-dreamplug.dts
@@ -85,15 +85,15 @@
&pmx_led_wifi_ap >;
pinctrl-names = "default";
- bluetooth {
+ led-bluetooth {
label = "dreamplug:blue:bluetooth";
gpios = <&gpio1 15 GPIO_ACTIVE_LOW>;
};
- wifi {
+ led-wifi {
label = "dreamplug:green:wifi";
gpios = <&gpio1 16 GPIO_ACTIVE_LOW>;
};
- wifi-ap {
+ led-wifi-ap {
label = "dreamplug:green:wifi_ap";
gpios = <&gpio1 17 GPIO_ACTIVE_LOW>;
};
diff --git a/arch/arm/boot/dts/marvell/kirkwood-goflexnet.dts b/arch/arm/boot/dts/marvell/kirkwood-goflexnet.dts
index d4cb3cd3e2a2..d5ac4e3974da 100644
--- a/arch/arm/boot/dts/marvell/kirkwood-goflexnet.dts
+++ b/arch/arm/boot/dts/marvell/kirkwood-goflexnet.dts
@@ -85,44 +85,44 @@
>;
pinctrl-names = "default";
- health {
+ led-health {
label = "status:green:health";
gpios = <&gpio1 14 GPIO_ACTIVE_LOW>;
default-state = "keep";
};
- fault {
+ led-fault {
label = "status:orange:fault";
gpios = <&gpio1 15 GPIO_ACTIVE_LOW>;
};
- left0 {
+ led-left0 {
label = "status:white:left0";
gpios = <&gpio1 10 GPIO_ACTIVE_HIGH>;
};
- left1 {
+ led-left1 {
label = "status:white:left1";
gpios = <&gpio1 11 GPIO_ACTIVE_HIGH>;
};
- left2 {
+ led-left2 {
label = "status:white:left2";
gpios = <&gpio1 12 GPIO_ACTIVE_HIGH>;
};
- left3 {
+ led-left3 {
label = "status:white:left3";
gpios = <&gpio1 13 GPIO_ACTIVE_HIGH>;
};
- right0 {
+ led-right0 {
label = "status:white:right0";
gpios = <&gpio1 6 GPIO_ACTIVE_HIGH>;
};
- right1 {
+ led-right1 {
label = "status:white:right1";
gpios = <&gpio1 7 GPIO_ACTIVE_HIGH>;
};
- right2 {
+ led-right2 {
label = "status:white:right2";
gpios = <&gpio1 8 GPIO_ACTIVE_HIGH>;
};
- right3 {
+ led-right3 {
label = "status:white:right3";
gpios = <&gpio1 9 GPIO_ACTIVE_HIGH>;
};
diff --git a/arch/arm/boot/dts/marvell/kirkwood-guruplug-server-plus.dts b/arch/arm/boot/dts/marvell/kirkwood-guruplug-server-plus.dts
index dfb41393941d..d5aa8b505cc0 100644
--- a/arch/arm/boot/dts/marvell/kirkwood-guruplug-server-plus.dts
+++ b/arch/arm/boot/dts/marvell/kirkwood-guruplug-server-plus.dts
@@ -59,19 +59,19 @@
&pmx_led_wmode_r &pmx_led_wmode_g >;
pinctrl-names = "default";
- health-r {
+ led-health-r {
label = "guruplug:red:health";
gpios = <&gpio1 14 GPIO_ACTIVE_LOW>;
};
- health-g {
+ led-health-g {
label = "guruplug:green:health";
gpios = <&gpio1 15 GPIO_ACTIVE_LOW>;
};
- wmode-r {
+ led-wmode-r {
label = "guruplug:red:wmode";
gpios = <&gpio1 16 GPIO_ACTIVE_LOW>;
};
- wmode-g {
+ led-wmode-g {
label = "guruplug:green:wmode";
gpios = <&gpio1 17 GPIO_ACTIVE_LOW>;
};
diff --git a/arch/arm/boot/dts/marvell/kirkwood-ib62x0.dts b/arch/arm/boot/dts/marvell/kirkwood-ib62x0.dts
index 962a910a6f5c..018c6b8f3e8a 100644
--- a/arch/arm/boot/dts/marvell/kirkwood-ib62x0.dts
+++ b/arch/arm/boot/dts/marvell/kirkwood-ib62x0.dts
@@ -58,17 +58,15 @@
gpio_keys {
compatible = "gpio-keys";
- #address-cells = <1>;
- #size-cells = <0>;
pinctrl-0 = <&pmx_button_reset &pmx_button_usb_copy>;
pinctrl-names = "default";
- copy {
+ button-copy {
label = "USB Copy";
linux,code = <KEY_COPY>;
gpios = <&gpio0 29 GPIO_ACTIVE_LOW>;
};
- reset {
+ button-reset {
label = "Reset";
linux,code = <KEY_RESTART>;
gpios = <&gpio0 28 GPIO_ACTIVE_LOW>;
@@ -81,16 +79,16 @@
&pmx_led_usb_transfer>;
pinctrl-names = "default";
- green-os {
+ led-green-os {
label = "ib62x0:green:os";
gpios = <&gpio0 25 GPIO_ACTIVE_HIGH>;
default-state = "keep";
};
- red-os {
+ led-red-os {
label = "ib62x0:red:os";
gpios = <&gpio0 22 GPIO_ACTIVE_HIGH>;
};
- usb-copy {
+ led-usb-copy {
label = "ib62x0:red:usb_copy";
gpios = <&gpio0 27 GPIO_ACTIVE_HIGH>;
};
diff --git a/arch/arm/boot/dts/marvell/kirkwood-iconnect.dts b/arch/arm/boot/dts/marvell/kirkwood-iconnect.dts
index aed20185fd7a..91b46e77e0b6 100644
--- a/arch/arm/boot/dts/marvell/kirkwood-iconnect.dts
+++ b/arch/arm/boot/dts/marvell/kirkwood-iconnect.dts
@@ -89,32 +89,32 @@
gpios = <&gpio1 9 GPIO_ACTIVE_HIGH>;
default-state = "on";
};
- power-blue {
+ led-power-blue {
label = "power:blue";
gpios = <&gpio1 10 GPIO_ACTIVE_HIGH>;
default-state = "keep";
};
- power-red {
+ led-power-red {
label = "power:red";
gpios = <&gpio1 11 GPIO_ACTIVE_HIGH>;
};
- usb1 {
+ led-usb1 {
label = "usb1:blue";
gpios = <&gpio1 12 GPIO_ACTIVE_HIGH>;
};
- usb2 {
+ led-usb2 {
label = "usb2:blue";
gpios = <&gpio1 13 GPIO_ACTIVE_HIGH>;
};
- usb3 {
+ led-usb3 {
label = "usb3:blue";
gpios = <&gpio1 14 GPIO_ACTIVE_HIGH>;
};
- usb4 {
+ led-usb4 {
label = "usb4:blue";
gpios = <&gpio1 15 GPIO_ACTIVE_HIGH>;
};
- otb {
+ led-otb {
label = "otb:blue";
gpios = <&gpio1 16 GPIO_ACTIVE_HIGH>;
};
@@ -122,18 +122,16 @@
gpio_keys {
compatible = "gpio-keys";
- #address-cells = <1>;
- #size-cells = <0>;
pinctrl-0 = < &pmx_button_reset &pmx_button_otb >;
pinctrl-names = "default";
- otb {
+ button-otb {
label = "OTB Button";
linux,code = <KEY_COPY>;
gpios = <&gpio1 3 GPIO_ACTIVE_LOW>;
debounce-interval = <100>;
};
- reset {
+ button-reset {
label = "Reset";
linux,code = <KEY_RESTART>;
gpios = <&gpio0 12 GPIO_ACTIVE_LOW>;
diff --git a/arch/arm/boot/dts/marvell/kirkwood-iomega_ix2_200.dts b/arch/arm/boot/dts/marvell/kirkwood-iomega_ix2_200.dts
index 2338f495d517..039362152650 100644
--- a/arch/arm/boot/dts/marvell/kirkwood-iomega_ix2_200.dts
+++ b/arch/arm/boot/dts/marvell/kirkwood-iomega_ix2_200.dts
@@ -127,44 +127,42 @@
&pmx_led_rebuild &pmx_led_health >;
pinctrl-names = "default";
- power_led {
+ led-power-led {
label = "status:white:power_led";
gpios = <&gpio0 16 GPIO_ACTIVE_HIGH>;
default-state = "keep";
};
- rebuild_led {
+ led-rebuild-led {
label = "status:white:rebuild_led";
gpios = <&gpio1 4 GPIO_ACTIVE_HIGH>;
};
- health_led {
+ led-health-led {
label = "status:red:health_led";
gpios = <&gpio1 5 GPIO_ACTIVE_HIGH>;
};
- backup_led {
+ led-backup-led {
label = "status:blue:backup_led";
gpios = <&gpio0 15 GPIO_ACTIVE_HIGH>;
};
};
gpio-keys {
compatible = "gpio-keys";
- #address-cells = <1>;
- #size-cells = <0>;
pinctrl-0 = <&pmx_button_reset &pmx_button_power
&pmx_button_otb>;
pinctrl-names = "default";
- Power {
+ button-power {
label = "Power Button";
linux,code = <KEY_POWER>;
gpios = <&gpio0 14 GPIO_ACTIVE_LOW>;
};
- Reset {
+ button-reset {
label = "Reset Button";
linux,code = <KEY_RESTART>;
gpios = <&gpio0 12 GPIO_ACTIVE_LOW>;
};
- OTB {
+ button-otb {
label = "OTB Button";
linux,code = <KEY_COPY>;
gpios = <&gpio1 3 GPIO_ACTIVE_LOW>;
diff --git a/arch/arm/boot/dts/marvell/kirkwood-l-50.dts b/arch/arm/boot/dts/marvell/kirkwood-l-50.dts
index c841eb8e7fb1..974bc9de4702 100644
--- a/arch/arm/boot/dts/marvell/kirkwood-l-50.dts
+++ b/arch/arm/boot/dts/marvell/kirkwood-l-50.dts
@@ -97,52 +97,52 @@
leds {
compatible = "gpio-leds";
- status_green {
+ led-status-green {
label = "l-50:green:status";
gpios = <&gpio1 6 GPIO_ACTIVE_LOW>;
};
- status_red {
+ led-status-red {
label = "l-50:red:status";
gpios = <&gpio3 2 GPIO_ACTIVE_LOW>;
};
- wifi {
+ led-wifi {
label = "l-50:green:wifi";
gpios = <&gpio2 7 GPIO_ACTIVE_LOW>;
linux,default-trigger = "phy0tpt";
};
- internet_green {
+ led-internet-green {
label = "l-50:green:internet";
gpios = <&gpio2 3 GPIO_ACTIVE_LOW>;
};
- internet_red {
+ led-internet-red {
label = "l-50:red:internet";
gpios = <&gpio2 1 GPIO_ACTIVE_LOW>;
};
- usb1_green {
+ led-usb1-green {
label = "l-50:green:usb1";
gpios = <&gpio2 0 GPIO_ACTIVE_LOW>;
linux,default-trigger = "usbport";
trigger-sources = <&hub_port3>;
};
- usb1_red {
+ led-usb1-red {
label = "l-50:red:usb1";
gpios = <&gpio2 4 GPIO_ACTIVE_LOW>;
};
- usb2_green {
+ led-usb2-green {
label = "l-50:green:usb2";
gpios = <&gpio2 2 GPIO_ACTIVE_LOW>;
linux,default-trigger = "usbport";
trigger-sources = <&hub_port1>;
};
- usb2_red {
+ led-usb2-red {
label = "l-50:red:usb2";
gpios = <&gpio2 5 GPIO_ACTIVE_LOW>;
};
@@ -193,7 +193,7 @@
keys {
compatible = "gpio-keys";
- factory_defaults {
+ button-factory-defaults {
label = "factory_defaults";
gpios = <&gpio0 29 GPIO_ACTIVE_LOW>;
linux,code = <KEY_RESTART>;
diff --git a/arch/arm/boot/dts/marvell/kirkwood-laplug.dts b/arch/arm/boot/dts/marvell/kirkwood-laplug.dts
index 8c2b540eaf4f..90ea6cdee8e0 100644
--- a/arch/arm/boot/dts/marvell/kirkwood-laplug.dts
+++ b/arch/arm/boot/dts/marvell/kirkwood-laplug.dts
@@ -51,7 +51,7 @@
gpio_keys {
compatible = "gpio-keys";
- power {
+ button-power {
label = "Power push button";
linux,code = <KEY_POWER>;
gpios = <&gpio1 0 GPIO_ACTIVE_HIGH>;
@@ -61,11 +61,11 @@
gpio-leds {
compatible = "gpio-leds";
- red-fail {
+ led-red-fail {
label = "laplug_v2:red:power";
gpios = <&gpio0 12 GPIO_ACTIVE_HIGH>;
};
- blue-power {
+ led-blue-power {
label = "laplug_v2:blue:power";
gpios = <&gpio0 29 GPIO_ACTIVE_HIGH>;
linux,default-trigger = "default-on";
diff --git a/arch/arm/boot/dts/marvell/kirkwood-linkstation.dtsi b/arch/arm/boot/dts/marvell/kirkwood-linkstation.dtsi
index b54c9980f636..8a11d2b9d449 100644
--- a/arch/arm/boot/dts/marvell/kirkwood-linkstation.dtsi
+++ b/arch/arm/boot/dts/marvell/kirkwood-linkstation.dtsi
@@ -88,8 +88,6 @@
gpio_keys {
compatible = "gpio-keys";
- #address-cells = <1>;
- #size-cells = <0>;
pinctrl-0 = <&pmx_button_function &pmx_power_switch
&pmx_power_auto_switch>;
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/marvell/kirkwood-linksys-viper.dts b/arch/arm/boot/dts/marvell/kirkwood-linksys-viper.dts
index 27fd6e2337d5..8a1c38ab6111 100644
--- a/arch/arm/boot/dts/marvell/kirkwood-linksys-viper.dts
+++ b/arch/arm/boot/dts/marvell/kirkwood-linksys-viper.dts
@@ -33,18 +33,16 @@
gpio_keys {
compatible = "gpio-keys";
- #address-cells = <1>;
- #size-cells = <0>;
pinctrl-0 = < &pmx_btn_wps &pmx_btn_reset >;
pinctrl-names = "default";
- wps {
+ button-wps {
label = "WPS Button";
linux,code = <KEY_WPS_BUTTON>;
gpios = <&gpio1 15 GPIO_ACTIVE_LOW>;
};
- reset {
+ button-reset {
label = "Reset Button";
linux,code = <KEY_RESTART>;
gpios = <&gpio1 16 GPIO_ACTIVE_LOW>;
@@ -56,12 +54,12 @@
pinctrl-0 = < &pmx_led_white_health &pmx_led_white_pulse >;
pinctrl-names = "default";
- white-health {
+ led-white-health {
label = "viper:white:health";
gpios = <&gpio0 7 GPIO_ACTIVE_HIGH>;
};
- white-pulse {
+ led-white-pulse {
label = "viper:white:pulse";
gpios = <&gpio0 14 GPIO_ACTIVE_HIGH>;
};
diff --git a/arch/arm/boot/dts/marvell/kirkwood-lsxl.dtsi b/arch/arm/boot/dts/marvell/kirkwood-lsxl.dtsi
index f80af24b9e90..5e0b139dd4fb 100644
--- a/arch/arm/boot/dts/marvell/kirkwood-lsxl.dtsi
+++ b/arch/arm/boot/dts/marvell/kirkwood-lsxl.dtsi
@@ -107,24 +107,22 @@
gpio_keys {
compatible = "gpio-keys";
- #address-cells = <1>;
- #size-cells = <0>;
pinctrl-0 = <&pmx_button_function &pmx_power_switch
&pmx_power_auto_switch>;
pinctrl-names = "default";
- option {
+ button-option {
label = "Function Button";
linux,code = <KEY_OPTION>;
gpios = <&gpio1 9 GPIO_ACTIVE_LOW>;
};
- reserved {
+ button-reserved {
label = "Power-on Switch";
linux,code = <KEY_RESERVED>;
linux,input-type = <5>;
gpios = <&gpio1 10 GPIO_ACTIVE_LOW>;
};
- power {
+ button-power {
label = "Power-auto Switch";
linux,code = <KEY_ESC>;
linux,input-type = <5>;
@@ -139,28 +137,28 @@
&pmx_led_function_blue>;
pinctrl-names = "default";
- func_blue {
+ led-func-blue {
label = "lsxl:blue:func";
gpios = <&gpio1 4 GPIO_ACTIVE_LOW>;
};
- alarm {
+ led-alarm {
label = "lsxl:red:alarm";
gpios = <&gpio1 5 GPIO_ACTIVE_LOW>;
};
- info {
+ led-info {
label = "lsxl:amber:info";
gpios = <&gpio1 6 GPIO_ACTIVE_LOW>;
};
- power {
+ led-power {
label = "lsxl:blue:power";
gpios = <&gpio1 7 GPIO_ACTIVE_LOW>;
default-state = "keep";
};
- func_red {
+ led-func-red {
label = "lsxl:red:func";
gpios = <&gpio1 16 GPIO_ACTIVE_LOW>;
};
diff --git a/arch/arm/boot/dts/marvell/kirkwood-mplcec4.dts b/arch/arm/boot/dts/marvell/kirkwood-mplcec4.dts
index e87ea7146546..6533b49a15b2 100644
--- a/arch/arm/boot/dts/marvell/kirkwood-mplcec4.dts
+++ b/arch/arm/boot/dts/marvell/kirkwood-mplcec4.dts
@@ -114,36 +114,36 @@
>;
pinctrl-names = "default";
- health {
+ led-health {
label = "status:green:health";
gpios = <&gpio0 7 GPIO_ACTIVE_LOW>;
};
- user1o {
+ led-user1o {
label = "user1:orange";
gpios = <&gpio1 8 GPIO_ACTIVE_LOW>;
default-state = "on";
};
- user1g {
+ led-user1g {
label = "user1:green";
gpios = <&gpio1 9 GPIO_ACTIVE_LOW>;
default-state = "on";
};
- user0o {
+ led-user0o {
label = "user0:orange";
gpios = <&gpio1 12 GPIO_ACTIVE_LOW>;
default-state = "on";
};
- user0g {
+ led-user0g {
label = "user0:green";
gpios = <&gpio1 13 GPIO_ACTIVE_LOW>;
default-state = "on";
};
- misc {
+ led-misc {
label = "status:orange:misc";
gpios = <&gpio1 14 GPIO_ACTIVE_LOW>;
default-state = "on";
diff --git a/arch/arm/boot/dts/marvell/kirkwood-mv88f6281gtw-ge.dts b/arch/arm/boot/dts/marvell/kirkwood-mv88f6281gtw-ge.dts
index 5a77286136c7..e3b41784c876 100644
--- a/arch/arm/boot/dts/marvell/kirkwood-mv88f6281gtw-ge.dts
+++ b/arch/arm/boot/dts/marvell/kirkwood-mv88f6281gtw-ge.dts
@@ -73,17 +73,17 @@
pinctrl-0 = <&pmx_leds &pmx_usb_led>;
pinctrl-names = "default";
- green-status {
+ led-green-status {
label = "gtw:green:Status";
gpios = <&gpio0 20 GPIO_ACTIVE_HIGH>;
};
- red-status {
+ led-red-status {
label = "gtw:red:Status";
gpios = <&gpio0 21 GPIO_ACTIVE_HIGH>;
};
- green-usb {
+ led-green-usb {
label = "gtw:green:USB";
gpios = <&gpio0 12 GPIO_ACTIVE_HIGH>;
};
@@ -91,17 +91,15 @@
gpio_keys {
compatible = "gpio-keys";
- #address-cells = <1>;
- #size-cells = <0>;
pinctrl-0 = <&pmx_keys>;
pinctrl-names = "default";
- restart {
+ button-restart {
label = "SWR Button";
linux,code = <KEY_RESTART>;
gpios = <&gpio1 15 GPIO_ACTIVE_LOW>;
};
- wps {
+ button-wps {
label = "WPS Button";
linux,code = <KEY_WPS_BUTTON>;
gpios = <&gpio1 14 GPIO_ACTIVE_LOW>;
diff --git a/arch/arm/boot/dts/marvell/kirkwood-netxbig.dtsi b/arch/arm/boot/dts/marvell/kirkwood-netxbig.dtsi
index b5737026e244..d4edf2727388 100644
--- a/arch/arm/boot/dts/marvell/kirkwood-netxbig.dtsi
+++ b/arch/arm/boot/dts/marvell/kirkwood-netxbig.dtsi
@@ -53,26 +53,24 @@
gpio-keys {
compatible = "gpio-keys";
- #address-cells = <1>;
- #size-cells = <0>;
/*
* esc and power represent a three position rocker
* switch. Thus the conventional KEY_POWER does not fit
*/
- exc {
+ button-exc {
label = "Back power switch (on|auto)";
linux,code = <KEY_ESC>;
linux,input-type = <5>;
gpios = <&gpio0 13 GPIO_ACTIVE_LOW>;
};
- power {
+ button-power {
label = "Back power switch (auto|off)";
linux,code = <KEY_1>;
linux,input-type = <5>;
gpios = <&gpio0 15 GPIO_ACTIVE_LOW>;
};
- option {
+ button-option {
label = "Function button";
linux,code = <KEY_OPTION>;
gpios = <&gpio1 2 GPIO_ACTIVE_LOW>;
diff --git a/arch/arm/boot/dts/marvell/kirkwood-ns2-common.dtsi b/arch/arm/boot/dts/marvell/kirkwood-ns2-common.dtsi
index 51530ea86622..d6b615cf6390 100644
--- a/arch/arm/boot/dts/marvell/kirkwood-ns2-common.dtsi
+++ b/arch/arm/boot/dts/marvell/kirkwood-ns2-common.dtsi
@@ -55,10 +55,8 @@
gpio_keys {
compatible = "gpio-keys";
- #address-cells = <1>;
- #size-cells = <0>;
- power {
+ button-power {
label = "Power push button";
linux,code = <KEY_POWER>;
gpios = <&gpio1 0 GPIO_ACTIVE_HIGH>;
@@ -68,7 +66,7 @@
gpio-leds {
compatible = "gpio-leds";
- red-fail {
+ led-red-fail {
label = "ns2:red:fail";
gpios = <&gpio0 12 GPIO_ACTIVE_HIGH>;
};
diff --git a/arch/arm/boot/dts/marvell/kirkwood-ns2lite.dts b/arch/arm/boot/dts/marvell/kirkwood-ns2lite.dts
index b0cb5907ed63..686bcd6f0f3c 100644
--- a/arch/arm/boot/dts/marvell/kirkwood-ns2lite.dts
+++ b/arch/arm/boot/dts/marvell/kirkwood-ns2lite.dts
@@ -24,7 +24,7 @@
gpio-leds {
compatible = "gpio-leds";
- blue-sata {
+ led-blue-sata {
label = "ns2:blue:sata";
gpios = <&gpio0 30 GPIO_ACTIVE_LOW>;
linux,default-trigger = "disk-activity";
diff --git a/arch/arm/boot/dts/marvell/kirkwood-nsa310.dts b/arch/arm/boot/dts/marvell/kirkwood-nsa310.dts
index c1799a07816e..3555ac1c3b15 100644
--- a/arch/arm/boot/dts/marvell/kirkwood-nsa310.dts
+++ b/arch/arm/boot/dts/marvell/kirkwood-nsa310.dts
@@ -87,43 +87,43 @@
&pmx_led_hdd_green &pmx_led_hdd_red>;
pinctrl-names = "default";
- green-sys {
+ led-green-sys {
label = "nsa310:green:sys";
gpios = <&gpio0 28 GPIO_ACTIVE_HIGH>;
};
- red-sys {
+ led-red-sys {
label = "nsa310:red:sys";
gpios = <&gpio0 29 GPIO_ACTIVE_HIGH>;
};
- green-hdd {
+ led-green-hdd {
label = "nsa310:green:hdd";
gpios = <&gpio1 9 GPIO_ACTIVE_HIGH>;
};
- red-hdd {
+ led-red-hdd {
label = "nsa310:red:hdd";
gpios = <&gpio1 10 GPIO_ACTIVE_HIGH>;
};
- green-esata {
+ led-green-esata {
label = "nsa310:green:esata";
gpios = <&gpio0 12 GPIO_ACTIVE_HIGH>;
};
- red-esata {
+ led-red-esata {
label = "nsa310:red:esata";
gpios = <&gpio0 13 GPIO_ACTIVE_HIGH>;
};
- green-usb {
+ led-green-usb {
label = "nsa310:green:usb";
gpios = <&gpio0 15 GPIO_ACTIVE_HIGH>;
};
- red-usb {
+ led-red-usb {
label = "nsa310:red:usb";
gpios = <&gpio0 16 GPIO_ACTIVE_HIGH>;
};
- green-copy {
+ led-green-copy {
label = "nsa310:green:copy";
gpios = <&gpio1 7 GPIO_ACTIVE_HIGH>;
};
- red-copy {
+ led-red-copy {
label = "nsa310:red:copy";
gpios = <&gpio1 8 GPIO_ACTIVE_HIGH>;
};
diff --git a/arch/arm/boot/dts/marvell/kirkwood-nsa310a.dts b/arch/arm/boot/dts/marvell/kirkwood-nsa310a.dts
index b85e314f045a..ddf84092aade 100644
--- a/arch/arm/boot/dts/marvell/kirkwood-nsa310a.dts
+++ b/arch/arm/boot/dts/marvell/kirkwood-nsa310a.dts
@@ -75,39 +75,39 @@
gpio-leds {
compatible = "gpio-leds";
- green-sys {
+ led-green-sys {
label = "nsa310:green:sys";
gpios = <&gpio0 28 GPIO_ACTIVE_HIGH>;
};
- red-sys {
+ led-red-sys {
label = "nsa310:red:sys";
gpios = <&gpio0 29 GPIO_ACTIVE_HIGH>;
};
- green-hdd {
+ led-green-hdd {
label = "nsa310:green:hdd";
gpios = <&gpio1 9 GPIO_ACTIVE_HIGH>;
};
- red-hdd {
+ led-red-hdd {
label = "nsa310:red:hdd";
gpios = <&gpio1 10 GPIO_ACTIVE_HIGH>;
};
- green-esata {
+ led-green-esata {
label = "nsa310:green:esata";
gpios = <&gpio0 12 GPIO_ACTIVE_HIGH>;
};
- red-esata {
+ led-red-esata {
label = "nsa310:red:esata";
gpios = <&gpio0 13 GPIO_ACTIVE_HIGH>;
};
- green-usb {
+ led-green-usb {
label = "nsa310:green:usb";
gpios = <&gpio0 15 GPIO_ACTIVE_HIGH>;
};
- green-copy {
+ led-green-copy {
label = "nsa310:green:copy";
gpios = <&gpio1 7 GPIO_ACTIVE_HIGH>;
};
- red-copy {
+ led-red-copy {
label = "nsa310:red:copy";
gpios = <&gpio1 8 GPIO_ACTIVE_HIGH>;
};
diff --git a/arch/arm/boot/dts/marvell/kirkwood-nsa310s.dts b/arch/arm/boot/dts/marvell/kirkwood-nsa310s.dts
index 49da633a1bc0..47deb93c90a5 100644
--- a/arch/arm/boot/dts/marvell/kirkwood-nsa310s.dts
+++ b/arch/arm/boot/dts/marvell/kirkwood-nsa310s.dts
@@ -35,24 +35,22 @@
keys {
compatible = "gpio-keys";
- #address-cells = <1>;
- #size-cells = <0>;
pinctrl-0 = <&pmx_buttons>;
pinctrl-names = "default";
- power {
+ button-power {
label = "Power Button";
linux,code = <KEY_POWER>;
gpios = <&gpio0 26 GPIO_ACTIVE_HIGH>;
};
- copy {
+ button-copy {
label = "Copy Button";
linux,code = <KEY_COPY>;
gpios = <&gpio0 25 GPIO_ACTIVE_LOW>;
};
- reset {
+ button-reset {
label = "Reset Button";
linux,code = <KEY_RESTART>;
gpios = <&gpio0 24 GPIO_ACTIVE_LOW>;
diff --git a/arch/arm/boot/dts/marvell/kirkwood-nsa320.dts b/arch/arm/boot/dts/marvell/kirkwood-nsa320.dts
index 652405e65006..dd5c8ffc8781 100644
--- a/arch/arm/boot/dts/marvell/kirkwood-nsa320.dts
+++ b/arch/arm/boot/dts/marvell/kirkwood-nsa320.dts
@@ -142,39 +142,39 @@
&pmx_led_hdd1_green &pmx_led_hdd1_red>;
pinctrl-names = "default";
- green-sys {
+ led-green-sys {
label = "nsa320:green:sys";
gpios = <&gpio0 28 GPIO_ACTIVE_HIGH>;
};
- orange-sys {
+ led-orange-sys {
label = "nsa320:orange:sys";
gpios = <&gpio0 29 GPIO_ACTIVE_HIGH>;
};
- green-hdd1 {
+ led-green-hdd1 {
label = "nsa320:green:hdd1";
gpios = <&gpio1 9 GPIO_ACTIVE_HIGH>;
};
- red-hdd1 {
+ led-red-hdd1 {
label = "nsa320:red:hdd1";
gpios = <&gpio1 10 GPIO_ACTIVE_HIGH>;
};
- green-hdd2 {
+ led-green-hdd2 {
label = "nsa320:green:hdd2";
gpios = <&gpio0 12 GPIO_ACTIVE_HIGH>;
};
- red-hdd2 {
+ led-red-hdd2 {
label = "nsa320:red:hdd2";
gpios = <&gpio0 13 GPIO_ACTIVE_HIGH>;
};
- green-usb {
+ led-green-usb {
label = "nsa320:green:usb";
gpios = <&gpio0 15 GPIO_ACTIVE_HIGH>;
};
- green-copy {
+ led-green-copy {
label = "nsa320:green:copy";
gpios = <&gpio1 7 GPIO_ACTIVE_HIGH>;
};
- red-copy {
+ led-red-copy {
label = "nsa320:red:copy";
gpios = <&gpio1 8 GPIO_ACTIVE_HIGH>;
};
diff --git a/arch/arm/boot/dts/marvell/kirkwood-nsa325.dts b/arch/arm/boot/dts/marvell/kirkwood-nsa325.dts
index 371456de34b2..f0786a5f2ce6 100644
--- a/arch/arm/boot/dts/marvell/kirkwood-nsa325.dts
+++ b/arch/arm/boot/dts/marvell/kirkwood-nsa325.dts
@@ -162,39 +162,39 @@
&pmx_led_hdd1_green &pmx_led_hdd1_red>;
pinctrl-names = "default";
- green-sys {
+ led-green-sys {
label = "nsa325:green:sys";
gpios = <&gpio0 28 GPIO_ACTIVE_HIGH>;
};
- orange-sys {
+ led-orange-sys {
label = "nsa325:orange:sys";
gpios = <&gpio0 29 GPIO_ACTIVE_HIGH>;
};
- green-hdd1 {
+ led-green-hdd1 {
label = "nsa325:green:hdd1";
gpios = <&gpio1 9 GPIO_ACTIVE_HIGH>;
};
- red-hdd1 {
+ led-red-hdd1 {
label = "nsa325:red:hdd1";
gpios = <&gpio1 10 GPIO_ACTIVE_HIGH>;
};
- green-hdd2 {
+ led-green-hdd2 {
label = "nsa325:green:hdd2";
gpios = <&gpio0 12 GPIO_ACTIVE_HIGH>;
};
- red-hdd2 {
+ led-red-hdd2 {
label = "nsa325:red:hdd2";
gpios = <&gpio0 13 GPIO_ACTIVE_HIGH>;
};
- green-usb {
+ led-green-usb {
label = "nsa325:green:usb";
gpios = <&gpio0 15 GPIO_ACTIVE_HIGH>;
};
- green-copy {
+ led-green-copy {
label = "nsa325:green:copy";
gpios = <&gpio1 7 GPIO_ACTIVE_HIGH>;
};
- red-copy {
+ led-red-copy {
label = "nsa325:red:copy";
gpios = <&gpio1 8 GPIO_ACTIVE_HIGH>;
};
diff --git a/arch/arm/boot/dts/marvell/kirkwood-nsa3x0-common.dtsi b/arch/arm/boot/dts/marvell/kirkwood-nsa3x0-common.dtsi
index ea3d36512e9f..e9bd9c551af5 100644
--- a/arch/arm/boot/dts/marvell/kirkwood-nsa3x0-common.dtsi
+++ b/arch/arm/boot/dts/marvell/kirkwood-nsa3x0-common.dtsi
@@ -63,22 +63,20 @@
gpio_keys {
compatible = "gpio-keys";
- #address-cells = <1>;
- #size-cells = <0>;
pinctrl-0 = <&pmx_btn_reset &pmx_btn_copy &pmx_btn_power>;
pinctrl-names = "default";
- power {
+ button-power {
label = "Power Button";
linux,code = <KEY_POWER>;
gpios = <&gpio1 14 GPIO_ACTIVE_HIGH>;
};
- copy {
+ button-copy {
label = "Copy Button";
linux,code = <KEY_COPY>;
gpios = <&gpio1 5 GPIO_ACTIVE_LOW>;
};
- reset {
+ button-reset {
label = "Reset Button";
linux,code = <KEY_RESTART>;
gpios = <&gpio1 4 GPIO_ACTIVE_LOW>;
diff --git a/arch/arm/boot/dts/marvell/kirkwood-openblocks_a6.dts b/arch/arm/boot/dts/marvell/kirkwood-openblocks_a6.dts
index 8ea430168ea5..20c6290d2037 100644
--- a/arch/arm/boot/dts/marvell/kirkwood-openblocks_a6.dts
+++ b/arch/arm/boot/dts/marvell/kirkwood-openblocks_a6.dts
@@ -115,10 +115,8 @@
compatible = "gpio-keys";
pinctrl-0 = <&pmx_gpio_init>;
pinctrl-names = "default";
- #address-cells = <1>;
- #size-cells = <0>;
- power {
+ button-power {
label = "Init Button";
linux,code = <KEY_POWER>;
gpios = <&gpio1 6 GPIO_ACTIVE_HIGH>;
diff --git a/arch/arm/boot/dts/marvell/kirkwood-openblocks_a7.dts b/arch/arm/boot/dts/marvell/kirkwood-openblocks_a7.dts
index 946f0f453dd1..9c438f10f737 100644
--- a/arch/arm/boot/dts/marvell/kirkwood-openblocks_a7.dts
+++ b/arch/arm/boot/dts/marvell/kirkwood-openblocks_a7.dts
@@ -136,8 +136,6 @@
compatible = "gpio-keys";
pinctrl-0 = <&pmx_gpio_init>;
pinctrl-names = "default";
- #address-cells = <1>;
- #size-cells = <0>;
button {
label = "Init Button";
diff --git a/arch/arm/boot/dts/marvell/kirkwood-pogo_e02.dts b/arch/arm/boot/dts/marvell/kirkwood-pogo_e02.dts
index f9e95e55f36d..39a5345332da 100644
--- a/arch/arm/boot/dts/marvell/kirkwood-pogo_e02.dts
+++ b/arch/arm/boot/dts/marvell/kirkwood-pogo_e02.dts
@@ -33,12 +33,12 @@
gpio-leds {
compatible = "gpio-leds";
- health {
+ led-health {
label = "pogo_e02:green:health";
gpios = <&gpio1 16 GPIO_ACTIVE_LOW>;
default-state = "keep";
};
- fault {
+ led-fault {
label = "pogo_e02:orange:fault";
gpios = <&gpio1 17 GPIO_ACTIVE_LOW>;
};
diff --git a/arch/arm/boot/dts/marvell/kirkwood-pogoplug-series-4.dts b/arch/arm/boot/dts/marvell/kirkwood-pogoplug-series-4.dts
index 5aa4669ae254..0e9c4cf79822 100644
--- a/arch/arm/boot/dts/marvell/kirkwood-pogoplug-series-4.dts
+++ b/arch/arm/boot/dts/marvell/kirkwood-pogoplug-series-4.dts
@@ -29,12 +29,10 @@
gpio_keys {
compatible = "gpio-keys";
- #address-cells = <1>;
- #size-cells = <0>;
pinctrl-0 = <&pmx_button_eject>;
pinctrl-names = "default";
- eject {
+ button-eject {
debounce-interval = <50>;
wakeup-source;
linux,code = <KEY_EJECTCD>;
@@ -48,12 +46,12 @@
pinctrl-0 = <&pmx_led_green &pmx_led_red>;
pinctrl-names = "default";
- health {
+ led-health {
label = "pogoplugv4:green:health";
gpios = <&gpio0 22 GPIO_ACTIVE_LOW>;
default-state = "on";
};
- fault {
+ led-fault {
label = "pogoplugv4:red:fault";
gpios = <&gpio0 24 GPIO_ACTIVE_LOW>;
};
diff --git a/arch/arm/boot/dts/marvell/kirkwood-sheevaplug-esata.dts b/arch/arm/boot/dts/marvell/kirkwood-sheevaplug-esata.dts
index ae8f493c9a0f..eb185273376e 100644
--- a/arch/arm/boot/dts/marvell/kirkwood-sheevaplug-esata.dts
+++ b/arch/arm/boot/dts/marvell/kirkwood-sheevaplug-esata.dts
@@ -33,7 +33,7 @@
pinctrl-0 = <&pmx_led_blue>;
pinctrl-names = "default";
- health {
+ led-health {
label = "sheevaplug:blue:health";
gpios = <&gpio1 17 GPIO_ACTIVE_LOW>;
default-state = "keep";
diff --git a/arch/arm/boot/dts/marvell/kirkwood-sheevaplug.dts b/arch/arm/boot/dts/marvell/kirkwood-sheevaplug.dts
index c73cc904e5c4..ce73fcf2255f 100644
--- a/arch/arm/boot/dts/marvell/kirkwood-sheevaplug.dts
+++ b/arch/arm/boot/dts/marvell/kirkwood-sheevaplug.dts
@@ -28,13 +28,13 @@
pinctrl-0 = <&pmx_led_blue &pmx_led_red>;
pinctrl-names = "default";
- health {
+ led-health {
label = "sheevaplug:blue:health";
gpios = <&gpio1 17 GPIO_ACTIVE_LOW>;
default-state = "keep";
};
- misc {
+ led-misc {
label = "sheevaplug:red:misc";
gpios = <&gpio1 14 GPIO_ACTIVE_LOW>;
};
diff --git a/arch/arm/boot/dts/marvell/kirkwood-synology.dtsi b/arch/arm/boot/dts/marvell/kirkwood-synology.dtsi
index 20964eb48fd7..6b7c5218b1fb 100644
--- a/arch/arm/boot/dts/marvell/kirkwood-synology.dtsi
+++ b/arch/arm/boot/dts/marvell/kirkwood-synology.dtsi
@@ -410,7 +410,7 @@
pinctrl-0 = <&pmx_alarmled_12>;
pinctrl-names = "default";
- hdd1-green {
+ led-hdd1-green {
label = "synology:alarm";
gpios = <&gpio0 21 GPIO_ACTIVE_LOW>;
};
@@ -424,42 +424,42 @@
&pmx_hddled_26 &pmx_hddled_27>;
pinctrl-names = "default";
- hdd1-green {
+ led-hdd1-green {
label = "synology:green:hdd1";
gpios = <&gpio0 20 GPIO_ACTIVE_LOW>;
};
- hdd1-amber {
+ led-hdd1-amber {
label = "synology:amber:hdd1";
gpios = <&gpio0 21 GPIO_ACTIVE_LOW>;
};
- hdd2-green {
+ led-hdd2-green {
label = "synology:green:hdd2";
gpios = <&gpio0 22 GPIO_ACTIVE_LOW>;
};
- hdd2-amber {
+ led-hdd2-amber {
label = "synology:amber:hdd2";
gpios = <&gpio0 23 GPIO_ACTIVE_LOW>;
};
- hdd3-green {
+ led-hdd3-green {
label = "synology:green:hdd3";
gpios = <&gpio0 24 GPIO_ACTIVE_LOW>;
};
- hdd3-amber {
+ led-hdd3-amber {
label = "synology:amber:hdd3";
gpios = <&gpio0 25 GPIO_ACTIVE_LOW>;
};
- hdd4-green {
+ led-hdd4-green {
label = "synology:green:hdd4";
gpios = <&gpio0 26 GPIO_ACTIVE_LOW>;
};
- hdd4-amber {
+ led-hdd4-amber {
label = "synology:amber:hdd4";
gpios = <&gpio0 27 GPIO_ACTIVE_LOW>;
};
@@ -471,12 +471,12 @@
pinctrl-0 = <&pmx_hddled_21 &pmx_hddled_23>;
pinctrl-names = "default";
- hdd1-green {
+ led-hdd1-green {
label = "synology:green:hdd1";
gpios = <&gpio0 21 GPIO_ACTIVE_LOW>;
};
- hdd1-amber {
+ led-hdd1-amber {
label = "synology:amber:hdd1";
gpios = <&gpio0 23 GPIO_ACTIVE_LOW>;
};
@@ -488,22 +488,22 @@
pinctrl-0 = <&pmx_hddled_21 &pmx_hddled_23 &pmx_hddled_20 &pmx_hddled_22>;
pinctrl-names = "default";
- hdd1-green {
+ led-hdd1-green {
label = "synology:green:hdd1";
gpios = <&gpio0 21 GPIO_ACTIVE_LOW>;
};
- hdd1-amber {
+ led-hdd1-amber {
label = "synology:amber:hdd1";
gpios = <&gpio0 23 GPIO_ACTIVE_LOW>;
};
- hdd2-green {
+ led-hdd2-green {
label = "synology:green:hdd2";
gpios = <&gpio0 20 GPIO_ACTIVE_LOW>;
};
- hdd2-amber {
+ led-hdd2-amber {
label = "synology:amber:hdd2";
gpios = <&gpio0 22 GPIO_ACTIVE_LOW>;
};
@@ -518,52 +518,52 @@
&pmx_hddled_45>;
pinctrl-names = "default";
- hdd1-green {
+ led-hdd1-green {
label = "synology:green:hdd1";
gpios = <&gpio1 4 GPIO_ACTIVE_LOW>;
};
- hdd1-amber {
+ led-hdd1-amber {
label = "synology:amber:hdd1";
gpios = <&gpio1 5 GPIO_ACTIVE_LOW>;
};
- hdd2-green {
+ led-hdd2-green {
label = "synology:green:hdd2";
gpios = <&gpio1 6 GPIO_ACTIVE_LOW>;
};
- hdd2-amber {
+ led-hdd2-amber {
label = "synology:amber:hdd2";
gpios = <&gpio1 7 GPIO_ACTIVE_LOW>;
};
- hdd3-green {
+ led-hdd3-green {
label = "synology:green:hdd3";
gpios = <&gpio1 8 GPIO_ACTIVE_LOW>;
};
- hdd3-amber {
+ led-hdd3-amber {
label = "synology:amber:hdd3";
gpios = <&gpio1 9 GPIO_ACTIVE_LOW>;
};
- hdd4-green {
+ led-hdd4-green {
label = "synology:green:hdd4";
gpios = <&gpio1 10 GPIO_ACTIVE_LOW>;
};
- hdd4-amber {
+ led-hdd4-amber {
label = "synology:amber:hdd4";
gpios = <&gpio1 11 GPIO_ACTIVE_LOW>;
};
- hdd5-green {
+ led-hdd5-green {
label = "synology:green:hdd5";
gpios = <&gpio1 12 GPIO_ACTIVE_LOW>;
};
- hdd5-amber {
+ led-hdd5-amber {
label = "synology:amber:hdd5";
gpios = <&gpio1 13 GPIO_ACTIVE_LOW>;
};
@@ -575,22 +575,22 @@
pinctrl-0 = <&pmx_hddled_38 &pmx_hddled_39 &pmx_hddled_36 &pmx_hddled_37>;
pinctrl-names = "default";
- hdd1-green {
+ led-hdd1-green {
label = "synology:green:hdd1";
gpios = <&gpio1 6 GPIO_ACTIVE_LOW>;
};
- hdd1-amber {
+ led-hdd1-amber {
label = "synology:amber:hdd1";
gpios = <&gpio1 7 GPIO_ACTIVE_LOW>;
};
- hdd2-green {
+ led-hdd2-green {
label = "synology:green:hdd2";
gpios = <&gpio1 4 GPIO_ACTIVE_LOW>;
};
- hdd2-amber {
+ led-hdd2-amber {
label = "synology:amber:hdd2";
gpios = <&gpio1 5 GPIO_ACTIVE_LOW>;
};
diff --git a/arch/arm/boot/dts/marvell/kirkwood-t5325.dts b/arch/arm/boot/dts/marvell/kirkwood-t5325.dts
index ad093324e075..a6e77a487d00 100644
--- a/arch/arm/boot/dts/marvell/kirkwood-t5325.dts
+++ b/arch/arm/boot/dts/marvell/kirkwood-t5325.dts
@@ -156,12 +156,10 @@
gpio_keys {
compatible = "gpio-keys";
- #address-cells = <1>;
- #size-cells = <0>;
pinctrl-0 = <&pmx_button_power>;
pinctrl-names = "default";
- power {
+ button-power {
label = "Power Button";
linux,code = <KEY_POWER>;
gpios = <&gpio1 13 GPIO_ACTIVE_HIGH>;
diff --git a/arch/arm/boot/dts/marvell/kirkwood-ts219-6281.dts b/arch/arm/boot/dts/marvell/kirkwood-ts219-6281.dts
index 30892c19aceb..a2e0ad4b84d8 100644
--- a/arch/arm/boot/dts/marvell/kirkwood-ts219-6281.dts
+++ b/arch/arm/boot/dts/marvell/kirkwood-ts219-6281.dts
@@ -35,17 +35,15 @@
gpio_keys {
compatible = "gpio-keys";
- #address-cells = <1>;
- #size-cells = <0>;
pinctrl-0 = <&pmx_reset_button &pmx_USB_copy_button>;
pinctrl-names = "default";
- copy {
+ button-copy {
label = "USB Copy";
linux,code = <KEY_COPY>;
gpios = <&gpio0 15 GPIO_ACTIVE_LOW>;
};
- reset {
+ button-reset {
label = "Reset";
linux,code = <KEY_RESTART>;
gpios = <&gpio0 16 GPIO_ACTIVE_LOW>;
diff --git a/arch/arm/boot/dts/marvell/kirkwood-ts219-6282.dts b/arch/arm/boot/dts/marvell/kirkwood-ts219-6282.dts
index aba1205981f1..35be6bce1dba 100644
--- a/arch/arm/boot/dts/marvell/kirkwood-ts219-6282.dts
+++ b/arch/arm/boot/dts/marvell/kirkwood-ts219-6282.dts
@@ -35,17 +35,15 @@
gpio_keys {
compatible = "gpio-keys";
- #address-cells = <1>;
- #size-cells = <0>;
pinctrl-0 = <&pmx_reset_button &pmx_USB_copy_button>;
pinctrl-names = "default";
- copy {
+ button-copy {
label = "USB Copy";
linux,code = <KEY_COPY>;
gpios = <&gpio1 11 GPIO_ACTIVE_LOW>;
};
- reset {
+ button-reset {
label = "Reset";
linux,code = <KEY_RESTART>;
gpios = <&gpio1 5 GPIO_ACTIVE_LOW>;
diff --git a/arch/arm/boot/dts/marvell/kirkwood-ts419.dtsi b/arch/arm/boot/dts/marvell/kirkwood-ts419.dtsi
index 717236853e45..f136059607b7 100644
--- a/arch/arm/boot/dts/marvell/kirkwood-ts419.dtsi
+++ b/arch/arm/boot/dts/marvell/kirkwood-ts419.dtsi
@@ -36,17 +36,15 @@
gpio_keys {
compatible = "gpio-keys";
- #address-cells = <1>;
- #size-cells = <0>;
pinctrl-0 = <&pmx_reset_button &pmx_USB_copy_button>;
pinctrl-names = "default";
- copy {
+ button-copy {
label = "USB Copy";
linux,code = <KEY_COPY>;
gpios = <&gpio1 11 GPIO_ACTIVE_LOW>;
};
- reset {
+ button-reset {
label = "Reset";
linux,code = <KEY_RESTART>;
gpios = <&gpio1 5 GPIO_ACTIVE_LOW>;
diff --git a/arch/arm/boot/dts/marvell/mvebu-linkstation-gpio-simple.dtsi b/arch/arm/boot/dts/marvell/mvebu-linkstation-gpio-simple.dtsi
index c2d87ba6190a..055ac754c5fd 100644
--- a/arch/arm/boot/dts/marvell/mvebu-linkstation-gpio-simple.dtsi
+++ b/arch/arm/boot/dts/marvell/mvebu-linkstation-gpio-simple.dtsi
@@ -48,8 +48,6 @@
/ {
gpio_keys {
compatible = "gpio-keys";
- #address-cells = <1>;
- #size-cells = <0>;
pinctrl-0 = <&pmx_power_switch>;
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/marvell/orion5x-lacie-d2-network.dts b/arch/arm/boot/dts/marvell/orion5x-lacie-d2-network.dts
index 03471d30bfd9..12a4aac2633e 100644
--- a/arch/arm/boot/dts/marvell/orion5x-lacie-d2-network.dts
+++ b/arch/arm/boot/dts/marvell/orion5x-lacie-d2-network.dts
@@ -35,22 +35,21 @@
compatible = "gpio-keys";
pinctrl-0 = <&pmx_buttons>;
pinctrl-names = "default";
- #address-cells = <1>;
- #size-cells = <0>;
- front_button {
+
+ button-front {
label = "Front Push Button";
linux,code = <KEY_POWER>;
gpios = <&gpio0 18 GPIO_ACTIVE_HIGH>;
};
- power_rocker_sw_on {
+ switch-power-rocker-sw-on {
label = "Power rocker switch (on|auto)";
linux,input-type = <5>; /* EV_SW */
linux,code = <1>; /* D2NET_SWITCH_POWER_ON */
gpios = <&gpio0 8 GPIO_ACTIVE_HIGH>;
};
- power_rocker_sw_off {
+ switch-power-rocker-sw-off {
label = "Power rocker switch (auto|off)";
linux,input-type = <5>; /* EV_SW */
linux,code = <2>; /* D2NET_SWITCH_POWER_OFF */
diff --git a/arch/arm/boot/dts/marvell/orion5x-lacie-ethernet-disk-mini-v2.dts b/arch/arm/boot/dts/marvell/orion5x-lacie-ethernet-disk-mini-v2.dts
index f17e25ac98dd..f81acb9b7223 100644
--- a/arch/arm/boot/dts/marvell/orion5x-lacie-ethernet-disk-mini-v2.dts
+++ b/arch/arm/boot/dts/marvell/orion5x-lacie-ethernet-disk-mini-v2.dts
@@ -39,9 +39,8 @@
compatible = "gpio-keys";
pinctrl-0 = <&pmx_power_button>;
pinctrl-names = "default";
- #address-cells = <1>;
- #size-cells = <0>;
- button@1 {
+
+ button-1 {
label = "Power-on Switch";
linux,code = <KEY_POWER>;
gpios = <&gpio0 18 GPIO_ACTIVE_HIGH>;
@@ -53,7 +52,7 @@
pinctrl-0 = <&pmx_power_led>;
pinctrl-names = "default";
- led@1 {
+ led-1 {
label = "power:blue";
gpios = <&gpio0 16 GPIO_ACTIVE_LOW>;
};
diff --git a/arch/arm/boot/dts/marvell/orion5x-linkstation-lschl.dts b/arch/arm/boot/dts/marvell/orion5x-linkstation-lschl.dts
index ee751995c8d0..79fee048c900 100644
--- a/arch/arm/boot/dts/marvell/orion5x-linkstation-lschl.dts
+++ b/arch/arm/boot/dts/marvell/orion5x-linkstation-lschl.dts
@@ -61,7 +61,7 @@
};
gpio_keys {
- func {
+ func-button {
label = "Function Button";
linux,code = <KEY_OPTION>;
gpios = <&gpio0 15 GPIO_ACTIVE_LOW>;
@@ -90,7 +90,7 @@
gpios = <&gpio0 3 GPIO_ACTIVE_LOW>;
};
- func {
+ func-led {
label = "lschl:func:blue:top";
gpios = <&gpio0 17 GPIO_ACTIVE_LOW>;
};
diff --git a/arch/arm/boot/dts/marvell/orion5x-lswsgl.dts b/arch/arm/boot/dts/marvell/orion5x-lswsgl.dts
index 2fbc17d6dfa4..e0da406c430f 100644
--- a/arch/arm/boot/dts/marvell/orion5x-lswsgl.dts
+++ b/arch/arm/boot/dts/marvell/orion5x-lswsgl.dts
@@ -74,22 +74,21 @@
compatible = "gpio-keys";
pinctrl-0 = <&pmx_buttons>;
pinctrl-names = "default";
- #address-cells = <1>;
- #size-cells = <0>;
- func {
+
+ key-func {
label = "Function Button";
linux,code = <KEY_OPTION>;
gpios = <&gpio0 15 GPIO_ACTIVE_LOW>;
};
- power {
+ key-power {
label = "Power-on Switch";
linux,input-type = <5>; /* EV_SW */
linux,code = <KEY_RESERVED>; /* LSMINI_SW_POWER */
gpios = <&gpio0 18 GPIO_ACTIVE_LOW>;
};
- autopower {
+ key-autopower {
label = "Power-auto Switch";
linux,input-type = <5>; /* EV_SW */
linux,code = <KEY_ESC>; /* LSMINI_SW_AUTOPOWER */
@@ -103,24 +102,24 @@
&pmx_led_power>;
pinctrl-names = "default";
- alarm {
+ led-alarm {
label = "lswsgl:alarm:red";
- gpio = <&gpio0 2 GPIO_ACTIVE_LOW>;
+ gpios = <&gpio0 2 GPIO_ACTIVE_LOW>;
};
- info {
+ led-info {
label = "lswsgl:info:amber";
- gpio = <&gpio0 3 GPIO_ACTIVE_LOW>;
+ gpios = <&gpio0 3 GPIO_ACTIVE_LOW>;
};
- func {
+ led-func {
label = "lswsgl:func:blue:top";
- gpio = <&gpio0 9 GPIO_ACTIVE_LOW>;
+ gpios = <&gpio0 9 GPIO_ACTIVE_LOW>;
};
- power {
+ led-power {
label = "lswsgl:power:blue:bottom";
- gpio = <&gpio0 14 GPIO_ACTIVE_LOW>;
+ gpios = <&gpio0 14 GPIO_ACTIVE_LOW>;
default-state = "on";
};
};
diff --git a/arch/arm/boot/dts/marvell/orion5x-maxtor-shared-storage-2.dts b/arch/arm/boot/dts/marvell/orion5x-maxtor-shared-storage-2.dts
index d57859998350..cb1bd24b7ae3 100644
--- a/arch/arm/boot/dts/marvell/orion5x-maxtor-shared-storage-2.dts
+++ b/arch/arm/boot/dts/marvell/orion5x-maxtor-shared-storage-2.dts
@@ -35,15 +35,14 @@
compatible = "gpio-keys";
pinctrl-0 = <&pmx_buttons>;
pinctrl-names = "default";
- #address-cells = <1>;
- #size-cells = <0>;
- power {
+
+ key-power {
label = "Power";
linux,code = <KEY_POWER>;
gpios = <&gpio0 11 GPIO_ACTIVE_LOW>;
};
- reset {
+ key-reset {
label = "Reset";
linux,code = <KEY_RESTART>;
gpios = <&gpio0 12 GPIO_ACTIVE_LOW>;
diff --git a/arch/arm/boot/dts/marvell/orion5x-netgear-wnr854t.dts b/arch/arm/boot/dts/marvell/orion5x-netgear-wnr854t.dts
index fb203e7d37f5..d63ea15539aa 100644
--- a/arch/arm/boot/dts/marvell/orion5x-netgear-wnr854t.dts
+++ b/arch/arm/boot/dts/marvell/orion5x-netgear-wnr854t.dts
@@ -35,7 +35,7 @@
pinctrl-0 = <&pmx_reset_button>;
pinctrl-names = "default";
- reset {
+ key-reset {
label = "Reset Button";
linux,code = <KEY_RESTART>;
gpios = <&gpio0 1 GPIO_ACTIVE_LOW>;
diff --git a/arch/arm/boot/dts/marvell/orion5x-rd88f5182-nas.dts b/arch/arm/boot/dts/marvell/orion5x-rd88f5182-nas.dts
index fd78aa02a3c5..75ab913b21e5 100644
--- a/arch/arm/boot/dts/marvell/orion5x-rd88f5182-nas.dts
+++ b/arch/arm/boot/dts/marvell/orion5x-rd88f5182-nas.dts
@@ -32,7 +32,7 @@
pinctrl-0 = <&pmx_debug_led>;
pinctrl-names = "default";
- led@0 {
+ led-0 {
label = "rd88f5182:cpu";
linux,default-trigger = "heartbeat";
gpios = <&gpio0 0 GPIO_ACTIVE_HIGH>;
diff --git a/arch/arm/boot/dts/mediatek/mt2701-evb.dts b/arch/arm/boot/dts/mediatek/mt2701-evb.dts
index 9c7325f18933..4c76366aa938 100644
--- a/arch/arm/boot/dts/mediatek/mt2701-evb.dts
+++ b/arch/arm/boot/dts/mediatek/mt2701-evb.dts
@@ -231,7 +231,7 @@
<MT2701_PIN_238_EXT_SDIO1__FUNC_EXT_SDIO1>,
<MT2701_PIN_237_EXT_SDIO2__FUNC_EXT_SDIO2>,
<MT2701_PIN_236_EXT_SDIO3__FUNC_EXT_SDIO3>;
- drive-strength = <MTK_DRIVE_4mA>;
+ drive-strength = <4>;
bias-pull-up;
};
};
diff --git a/arch/arm/boot/dts/mediatek/mt7623.dtsi b/arch/arm/boot/dts/mediatek/mt7623.dtsi
index f0b4a09004b3..814586abc297 100644
--- a/arch/arm/boot/dts/mediatek/mt7623.dtsi
+++ b/arch/arm/boot/dts/mediatek/mt7623.dtsi
@@ -1143,13 +1143,13 @@
<MT7623_PIN_121_MSDC0_DAT0_FUNC_MSDC0_DAT0>,
<MT7623_PIN_116_MSDC0_CMD_FUNC_MSDC0_CMD>;
input-enable;
- drive-strength = <MTK_DRIVE_2mA>;
+ drive-strength = <2>;
bias-pull-up = <MTK_PUPD_SET_R1R0_01>;
};
pins-clk {
pinmux = <MT7623_PIN_117_MSDC0_CLK_FUNC_MSDC0_CLK>;
- drive-strength = <MTK_DRIVE_2mA>;
+ drive-strength = <2>;
bias-pull-down = <MTK_PUPD_SET_R1R0_01>;
};
@@ -1167,14 +1167,14 @@
<MT7623_PIN_110_MSDC1_DAT3_FUNC_MSDC1_DAT3>,
<MT7623_PIN_105_MSDC1_CMD_FUNC_MSDC1_CMD>;
input-enable;
- drive-strength = <MTK_DRIVE_4mA>;
+ drive-strength = <4>;
bias-pull-up = <MTK_PUPD_SET_R1R0_10>;
};
pins-clk {
pinmux = <MT7623_PIN_106_MSDC1_CLK_FUNC_MSDC1_CLK>;
bias-pull-down;
- drive-strength = <MTK_DRIVE_4mA>;
+ drive-strength = <4>;
};
pins-wp {
@@ -1197,13 +1197,13 @@
<MT7623_PIN_110_MSDC1_DAT3_FUNC_MSDC1_DAT3>,
<MT7623_PIN_105_MSDC1_CMD_FUNC_MSDC1_CMD>;
input-enable;
- drive-strength = <MTK_DRIVE_4mA>;
+ drive-strength = <4>;
bias-pull-up = <MTK_PUPD_SET_R1R0_10>;
};
pins-clk {
pinmux = <MT7623_PIN_106_MSDC1_CLK_FUNC_MSDC1_CLK>;
- drive-strength = <MTK_DRIVE_4mA>;
+ drive-strength = <4>;
bias-pull-down = <MTK_PUPD_SET_R1R0_10>;
};
};
@@ -1211,7 +1211,7 @@
nand_pins_default: nanddefault {
pins-ale {
pinmux = <MT7623_PIN_116_MSDC0_CMD_FUNC_NALE>;
- drive-strength = <MTK_DRIVE_8mA>;
+ drive-strength = <8>;
bias-pull-down = <MTK_PUPD_SET_R1R0_10>;
};
@@ -1226,13 +1226,13 @@
<MT7623_PIN_115_MSDC0_RSTB_FUNC_NLD8>,
<MT7623_PIN_119_MSDC0_DAT2_FUNC_NLD2>;
input-enable;
- drive-strength = <MTK_DRIVE_8mA>;
+ drive-strength = <8>;
bias-pull-up;
};
pins-we {
pinmux = <MT7623_PIN_117_MSDC0_CLK_FUNC_NWEB>;
- drive-strength = <MTK_DRIVE_8mA>;
+ drive-strength = <8>;
bias-pull-up = <MTK_PUPD_SET_R1R0_10>;
};
};
diff --git a/arch/arm/boot/dts/nspire/nspire-classic.dtsi b/arch/arm/boot/dts/nspire/nspire-classic.dtsi
index a6e9cbf51524..0ee53d3ecd54 100644
--- a/arch/arm/boot/dts/nspire/nspire-classic.dtsi
+++ b/arch/arm/boot/dts/nspire/nspire-classic.dtsi
@@ -55,7 +55,7 @@
};
/ {
- memory {
+ memory@10000000 {
device_type = "memory";
reg = <0x10000000 0x2000000>; /* 32 MB */
};
diff --git a/arch/arm/boot/dts/nspire/nspire-cx.dts b/arch/arm/boot/dts/nspire/nspire-cx.dts
index 29f0181e5b38..debeff0ec010 100644
--- a/arch/arm/boot/dts/nspire/nspire-cx.dts
+++ b/arch/arm/boot/dts/nspire/nspire-cx.dts
@@ -122,7 +122,7 @@
model = "TI-NSPIRE CX";
compatible = "ti,nspire-cx";
- memory {
+ memory@10000000 {
device_type = "memory";
reg = <0x10000000 0x4000000>; /* 64 MB */
};
diff --git a/arch/arm/boot/dts/nspire/nspire.dtsi b/arch/arm/boot/dts/nspire/nspire.dtsi
index d56fef7250db..95588b716c6f 100644
--- a/arch/arm/boot/dts/nspire/nspire.dtsi
+++ b/arch/arm/boot/dts/nspire/nspire.dtsi
@@ -170,9 +170,12 @@
};
watchdog: watchdog@90060000 {
- compatible = "arm,primecell";
+ compatible = "arm,sp805", "arm,primecell";
reg = <0x90060000 0x1000>;
interrupts = <3>;
+ clocks = <&apb_pclk>, <&apb_pclk>;
+ clock-names = "wdog_clk", "apb_pclk";
+ status = "disabled";
};
rtc: rtc@90090000 {
diff --git a/arch/arm/boot/dts/nuvoton/nuvoton-npcm730-kudo.dts b/arch/arm/boot/dts/nuvoton/nuvoton-npcm730-kudo.dts
index 5787ae95d3b4..1f07ba382910 100644
--- a/arch/arm/boot/dts/nuvoton/nuvoton-npcm730-kudo.dts
+++ b/arch/arm/boot/dts/nuvoton/nuvoton-npcm730-kudo.dts
@@ -525,7 +525,7 @@
};
};
- i2c-bus@4 {
+ i2c@4 {
#address-cells = <1>;
#size-cells = <0>;
reg = <4>;
@@ -537,7 +537,7 @@
};
};
- i2c-bus@5 {
+ i2c@5 {
#address-cells = <1>;
#size-cells = <0>;
reg = <5>;
@@ -549,7 +549,7 @@
};
};
- i2c-bus@6 {
+ i2c@6 {
#address-cells = <1>;
#size-cells = <0>;
reg = <6>;
@@ -561,7 +561,7 @@
};
};
- i2c-bus@7 {
+ i2c@7 {
#address-cells = <1>;
#size-cells = <0>;
reg = <7>;
@@ -580,7 +580,7 @@
reg = <0x77>;
i2c-mux-idle-disconnect;
- i2c-bus@2 {
+ i2c@2 {
#address-cells = <1>;
#size-cells = <0>;
reg = <2>;
@@ -620,7 +620,7 @@
reg = <0x77>;
i2c-mux-idle-disconnect;
- i2c-bus@0 {
+ i2c@0 {
#address-cells = <1>;
#size-cells = <0>;
reg = <0>;
@@ -632,7 +632,7 @@
};
};
- i2c-bus@1 {
+ i2c@1 {
#address-cells = <1>;
#size-cells = <0>;
reg = <1>;
@@ -691,7 +691,7 @@
reg = <0x77>;
i2c-mux-idle-disconnect;
- i2c-bus@3 {
+ i2c@3 {
#address-cells = <1>;
#size-cells = <0>;
reg = <3>;
@@ -703,7 +703,7 @@
};
};
- i2c-bus@4 {
+ i2c@4 {
#address-cells = <1>;
#size-cells = <0>;
reg = <4>;
@@ -715,7 +715,7 @@
};
};
- i2c-bus@5 {
+ i2c@5 {
#address-cells = <1>;
#size-cells = <0>;
reg = <5>;
@@ -726,7 +726,7 @@
reg = <0x28>;
};
};
- i2c-bus@6 {
+ i2c@6 {
#address-cells = <1>;
#size-cells = <0>;
reg = <6>;
diff --git a/arch/arm/boot/dts/nuvoton/nuvoton-npcm750-runbmc-olympus.dts b/arch/arm/boot/dts/nuvoton/nuvoton-npcm750-runbmc-olympus.dts
index baa39d0c1032..087f4ac43187 100644
--- a/arch/arm/boot/dts/nuvoton/nuvoton-npcm750-runbmc-olympus.dts
+++ b/arch/arm/boot/dts/nuvoton/nuvoton-npcm750-runbmc-olympus.dts
@@ -215,43 +215,43 @@
reg = <0x70>;
i2c-mux-idle-disconnect;
- i2c_slot1a: i2c-bus@0 {
+ i2c_slot1a: i2c@0 {
#address-cells = <1>;
#size-cells = <0>;
reg = <0>;
};
- i2c_slot1b: i2c-bus@1 {
+ i2c_slot1b: i2c@1 {
#address-cells = <1>;
#size-cells = <0>;
reg = <1>;
};
- i2c_slot2a: i2c-bus@2 {
+ i2c_slot2a: i2c@2 {
#address-cells = <1>;
#size-cells = <0>;
reg = <2>;
};
- i2c_slot2b: i2c-bus@3 {
+ i2c_slot2b: i2c@3 {
#address-cells = <1>;
#size-cells = <0>;
reg = <3>;
};
- i2c_slot3: i2c-bus@4 {
+ i2c_slot3: i2c@4 {
#address-cells = <1>;
#size-cells = <0>;
reg = <4>;
};
- i2c_slot4: i2c-bus@5 {
+ i2c_slot4: i2c@5 {
#address-cells = <1>;
#size-cells = <0>;
reg = <5>;
};
- i2c_slot5: i2c-bus@6 {
+ i2c_slot5: i2c@6 {
#address-cells = <1>;
#size-cells = <0>;
reg = <6>;
@@ -265,24 +265,24 @@
#size-cells = <0>;
i2c-mux-idle-disconnect;
- i2c_m2_s1: i2c-bus@0 {
+ i2c_m2_s1: i2c@0 {
#address-cells = <1>;
#size-cells = <0>;
reg = <0>;
};
- i2c_m2_s2: i2c-bus@1 {
+ i2c_m2_s2: i2c@1 {
#address-cells = <1>;
#size-cells = <0>;
reg = <1>;
};
- i2c_m2_s3: i2c-bus@2 {
+ i2c_m2_s3: i2c@2 {
#address-cells = <1>;
#size-cells = <0>;
reg = <2>;
};
- i2c_m2_s4: i2c-bus@3 {
+ i2c_m2_s4: i2c@3 {
#address-cells = <1>;
#size-cells = <0>;
reg = <3>;
diff --git a/arch/arm/boot/dts/nxp/imx/Makefile b/arch/arm/boot/dts/nxp/imx/Makefile
index 231c0d73a53e..92e291603ea1 100644
--- a/arch/arm/boot/dts/nxp/imx/Makefile
+++ b/arch/arm/boot/dts/nxp/imx/Makefile
@@ -99,6 +99,7 @@ dtb-$(CONFIG_SOC_IMX6Q) += \
imx6dl-icore.dtb \
imx6dl-icore-mipi.dtb \
imx6dl-icore-rqs.dtb \
+ imx6dl-kontron-samx6i-ads2.dtb \
imx6dl-lanmcu.dtb \
imx6dl-mamoj.dtb \
imx6dl-mba6a.dtb \
@@ -207,6 +208,7 @@ dtb-$(CONFIG_SOC_IMX6Q) += \
imx6q-icore-ofcap10.dtb \
imx6q-icore-ofcap12.dtb \
imx6q-icore-rqs.dtb \
+ imx6q-kontron-samx6i-ads2.dtb \
imx6q-kp-tpc.dtb \
imx6q-logicpd.dtb \
imx6q-marsboard.dtb \
diff --git a/arch/arm/boot/dts/nxp/imx/e60k02.dtsi b/arch/arm/boot/dts/nxp/imx/e60k02.dtsi
index 13756d39fb7b..0029c12f16c8 100644
--- a/arch/arm/boot/dts/nxp/imx/e60k02.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/e60k02.dtsi
@@ -14,6 +14,10 @@
#include <dt-bindings/input/input.h>
/ {
+ aliases {
+ mmc0 = &usdhc2;
+ mmc1 = &usdhc3;
+ };
chosen {
stdout-path = &uart1;
diff --git a/arch/arm/boot/dts/nxp/imx/imx51-apf51dev.dts b/arch/arm/boot/dts/nxp/imx/imx51-apf51dev.dts
index b61d55ca1467..de6b7607510a 100644
--- a/arch/arm/boot/dts/nxp/imx/imx51-apf51dev.dts
+++ b/arch/arm/boot/dts/nxp/imx/imx51-apf51dev.dts
@@ -25,8 +25,8 @@
pinctrl-0 = <&pinctrl_ipu_disp1>;
display-timings {
- lw700 {
- native-mode;
+ native-mode = <&timing0>;
+ timing0: timing-lw700 {
clock-frequency = <33000033>;
hactive = <800>;
vactive = <480>;
diff --git a/arch/arm/boot/dts/nxp/imx/imx51-babbage.dts b/arch/arm/boot/dts/nxp/imx/imx51-babbage.dts
index 16ff543f3fbf..f4a47e8348b2 100644
--- a/arch/arm/boot/dts/nxp/imx/imx51-babbage.dts
+++ b/arch/arm/boot/dts/nxp/imx/imx51-babbage.dts
@@ -89,7 +89,7 @@
status = "disabled";
display-timings {
native-mode = <&timing1>;
- timing1: claawvga {
+ timing1: timing-claawvga {
clock-frequency = <27000000>;
hactive = <800>;
vactive = <480>;
diff --git a/arch/arm/boot/dts/nxp/imx/imx51-ts4800.dts b/arch/arm/boot/dts/nxp/imx/imx51-ts4800.dts
index 2bd0761c7e90..079bd3d14999 100644
--- a/arch/arm/boot/dts/nxp/imx/imx51-ts4800.dts
+++ b/arch/arm/boot/dts/nxp/imx/imx51-ts4800.dts
@@ -58,8 +58,8 @@
pinctrl-0 = <&pinctrl_lcd>;
display-timings {
- 800x480p60 {
- native-mode;
+ native-mode = <&timing0>;
+ timing0: timing-800x480p60 {
clock-frequency = <30066000>;
hactive = <800>;
vactive = <480>;
diff --git a/arch/arm/boot/dts/nxp/imx/imx53-m53evk.dts b/arch/arm/boot/dts/nxp/imx/imx53-m53evk.dts
index 1353d985969c..ba0c62994f75 100644
--- a/arch/arm/boot/dts/nxp/imx/imx53-m53evk.dts
+++ b/arch/arm/boot/dts/nxp/imx/imx53-m53evk.dts
@@ -17,8 +17,8 @@
pinctrl-0 = <&pinctrl_ipu_disp1>;
display-timings {
- 800x480p60 {
- native-mode;
+ native-mode = <&timing0>;
+ timing0: timing-800x480p60 {
clock-frequency = <31500000>;
hactive = <800>;
vactive = <480>;
diff --git a/arch/arm/boot/dts/nxp/imx/imx53-m53menlo.dts b/arch/arm/boot/dts/nxp/imx/imx53-m53menlo.dts
index 4d77b6077fc1..558751e730f3 100644
--- a/arch/arm/boot/dts/nxp/imx/imx53-m53menlo.dts
+++ b/arch/arm/boot/dts/nxp/imx/imx53-m53menlo.dts
@@ -64,6 +64,7 @@
reg = <0>;
lvds_decoder_in: endpoint {
+ data-mapping = "jeida-18";
remote-endpoint = <&lvds0_out>;
};
};
diff --git a/arch/arm/boot/dts/nxp/imx/imx53-qsb-common.dtsi b/arch/arm/boot/dts/nxp/imx/imx53-qsb-common.dtsi
index d80440446473..05d7a462ea25 100644
--- a/arch/arm/boot/dts/nxp/imx/imx53-qsb-common.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx53-qsb-common.dtsi
@@ -85,7 +85,7 @@
};
};
- panel {
+ panel_dpi: panel {
compatible = "sii,43wvf1g";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_display_power>;
diff --git a/arch/arm/boot/dts/nxp/imx/imx53-qsb-hdmi.dtso b/arch/arm/boot/dts/nxp/imx/imx53-qsb-hdmi.dtso
index c84e9b052527..151e9cee3c87 100644
--- a/arch/arm/boot/dts/nxp/imx/imx53-qsb-hdmi.dtso
+++ b/arch/arm/boot/dts/nxp/imx/imx53-qsb-hdmi.dtso
@@ -10,8 +10,6 @@
/plugin/;
&{/} {
- /delete-node/ panel;
-
hdmi: connector-hdmi {
compatible = "hdmi-connector";
label = "hdmi";
@@ -82,6 +80,10 @@
};
};
+&panel_dpi {
+ status = "disabled";
+};
+
&tve {
status = "disabled";
};
diff --git a/arch/arm/boot/dts/nxp/imx/imx53-tx53-x03x.dts b/arch/arm/boot/dts/nxp/imx/imx53-tx53-x03x.dts
index a7f77527269d..a02d77bb5672 100644
--- a/arch/arm/boot/dts/nxp/imx/imx53-tx53-x03x.dts
+++ b/arch/arm/boot/dts/nxp/imx/imx53-tx53-x03x.dts
@@ -67,7 +67,7 @@
};
display-timings {
- VGA {
+ timing-vga {
clock-frequency = <25200000>;
hactive = <640>;
vactive = <480>;
@@ -83,7 +83,7 @@
pixelclk-active = <0>;
};
- ETV570 {
+ timing-etc570 {
clock-frequency = <25200000>;
hactive = <640>;
vactive = <480>;
@@ -99,7 +99,7 @@
pixelclk-active = <0>;
};
- ET0350 {
+ timing-et0350 {
clock-frequency = <6413760>;
hactive = <320>;
vactive = <240>;
@@ -115,7 +115,7 @@
pixelclk-active = <0>;
};
- ET0430 {
+ timing-et0430 {
clock-frequency = <9009000>;
hactive = <480>;
vactive = <272>;
@@ -131,7 +131,7 @@
pixelclk-active = <1>;
};
- ET0500 {
+ timing-et0500 {
clock-frequency = <33264000>;
hactive = <800>;
vactive = <480>;
@@ -147,7 +147,7 @@
pixelclk-active = <0>;
};
- ET0700 { /* same as ET0500 */
+ timing-et0700 { /* same as ET0500 */
clock-frequency = <33264000>;
hactive = <800>;
vactive = <480>;
@@ -163,7 +163,7 @@
pixelclk-active = <0>;
};
- ETQ570 {
+ timing-etq570 {
clock-frequency = <6596040>;
hactive = <320>;
vactive = <240>;
diff --git a/arch/arm/boot/dts/nxp/imx/imx53-tx53-x13x.dts b/arch/arm/boot/dts/nxp/imx/imx53-tx53-x13x.dts
index 6cdf2082c742..e10c179dbdb3 100644
--- a/arch/arm/boot/dts/nxp/imx/imx53-tx53-x13x.dts
+++ b/arch/arm/boot/dts/nxp/imx/imx53-tx53-x13x.dts
@@ -191,7 +191,7 @@
display-timings {
native-mode = <&lvds0_timing0>;
- lvds0_timing0: hsd100pxn1 {
+ lvds0_timing0: timing-hsd100pxn1 {
clock-frequency = <65000000>;
hactive = <1024>;
vactive = <768>;
@@ -207,7 +207,7 @@
pixelclk-active = <1>;
};
- lvds0_timing1: nl12880bc20 {
+ lvds0_timing1: timing-nl12880bc20 {
clock-frequency = <71000000>;
hactive = <1280>;
vactive = <800>;
@@ -233,7 +233,7 @@
display-timings {
native-mode = <&lvds1_timing0>;
- lvds1_timing0: hsd100pxn1 {
+ lvds1_timing0: timing-hsd100pxn1 {
clock-frequency = <65000000>;
hactive = <1024>;
vactive = <768>;
diff --git a/arch/arm/boot/dts/nxp/imx/imx6dl-aristainetos2_4.dts b/arch/arm/boot/dts/nxp/imx/imx6dl-aristainetos2_4.dts
index dfa6f64d43cc..c9b2ea2b24b2 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6dl-aristainetos2_4.dts
+++ b/arch/arm/boot/dts/nxp/imx/imx6dl-aristainetos2_4.dts
@@ -82,11 +82,10 @@
compatible = "lg,lg4573";
spi-max-frequency = <10000000>;
reg = <0>;
- power-on-delay = <10>;
display-timings {
- 480x800p57 {
- native-mode;
+ native-mode = <&timing0>;
+ timing0: timing-480x800p57 {
clock-frequency = <27000027>;
hactive = <480>;
vactive = <800>;
diff --git a/arch/arm/boot/dts/nxp/imx/imx6dl-aristainetos_4.dts b/arch/arm/boot/dts/nxp/imx/imx6dl-aristainetos_4.dts
index a5ac79346854..9ec038f1d0ff 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6dl-aristainetos_4.dts
+++ b/arch/arm/boot/dts/nxp/imx/imx6dl-aristainetos_4.dts
@@ -36,8 +36,8 @@
status = "okay";
display-timings {
- 480x800p60 {
- native-mode;
+ native-mode = <&timing0>;
+ timing0: timing-480x800p60 {
clock-frequency = <30000000>;
hactive = <480>;
vactive = <800>;
diff --git a/arch/arm/boot/dts/nxp/imx/imx6dl-aristainetos_7.dts b/arch/arm/boot/dts/nxp/imx/imx6dl-aristainetos_7.dts
index 5a25bdbbeb68..b3129832f471 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6dl-aristainetos_7.dts
+++ b/arch/arm/boot/dts/nxp/imx/imx6dl-aristainetos_7.dts
@@ -25,8 +25,8 @@
status = "okay";
display-timings {
- 800x480p60 {
- native-mode;
+ native-mode = <&timing0>;
+ timing0: timing-800x480p60 {
clock-frequency = <33246000>;
hactive = <800>;
vactive = <480>;
diff --git a/arch/arm/boot/dts/nxp/imx/imx6dl-kontron-samx6i-ads2.dts b/arch/arm/boot/dts/nxp/imx/imx6dl-kontron-samx6i-ads2.dts
new file mode 100644
index 000000000000..6a0c53f23a15
--- /dev/null
+++ b/arch/arm/boot/dts/nxp/imx/imx6dl-kontron-samx6i-ads2.dts
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0 OR X11
+
+/dts-v1/;
+
+#include "imx6dl.dtsi"
+#include "imx6qdl-kontron-samx6i.dtsi"
+#include "imx6qdl-kontron-samx6i-ads2.dtsi"
+
+/ {
+ model = "Kontron SMARC-sAMX6i Dual-Lite/Solo on SMARC Eval 2.0 carrier";
+ compatible = "kontron,imx6dl-samx6i-ads2", "kontron,imx6dl-samx6i", "fsl,imx6dl";
+};
diff --git a/arch/arm/boot/dts/nxp/imx/imx6dl-kontron-samx6i.dtsi b/arch/arm/boot/dts/nxp/imx/imx6dl-kontron-samx6i.dtsi
index a864fdbd5f16..5a9b819d7ee8 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6dl-kontron-samx6i.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6dl-kontron-samx6i.dtsi
@@ -7,6 +7,6 @@
#include "imx6qdl-kontron-samx6i.dtsi"
/ {
- model = "Kontron SMARC sAMX6i Dual-Lite/Solo";
+ model = "Kontron SMARC-sAMX6i Dual-Lite/Solo";
compatible = "kontron,imx6dl-samx6i", "fsl,imx6dl";
};
diff --git a/arch/arm/boot/dts/nxp/imx/imx6q-kontron-samx6i-ads2.dts b/arch/arm/boot/dts/nxp/imx/imx6q-kontron-samx6i-ads2.dts
new file mode 100644
index 000000000000..94c395cc020e
--- /dev/null
+++ b/arch/arm/boot/dts/nxp/imx/imx6q-kontron-samx6i-ads2.dts
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0 OR X11
+
+/dts-v1/;
+
+#include "imx6q.dtsi"
+#include "imx6qdl-kontron-samx6i.dtsi"
+#include "imx6qdl-kontron-samx6i-ads2.dtsi"
+
+/ {
+ model = "Kontron SMARC-sAMX6i Quad/Dual on SMARC Eval 2.0 carrier";
+ compatible = "kontron,imx6q-samx6i-ads2", "kontron,imx6q-samx6i", "fsl,imx6q";
+};
diff --git a/arch/arm/boot/dts/nxp/imx/imx6q-kontron-samx6i.dtsi b/arch/arm/boot/dts/nxp/imx/imx6q-kontron-samx6i.dtsi
index 4d6a0c3e8455..e76963436079 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6q-kontron-samx6i.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6q-kontron-samx6i.dtsi
@@ -5,31 +5,8 @@
#include "imx6q.dtsi"
#include "imx6qdl-kontron-samx6i.dtsi"
-#include <dt-bindings/gpio/gpio.h>
/ {
- model = "Kontron SMARC sAMX6i Quad/Dual";
+ model = "Kontron SMARC-sAMX6i Quad/Dual";
compatible = "kontron,imx6q-samx6i", "fsl,imx6q";
};
-
-/* Quad/Dual SoMs have 3 chip-select signals */
-&ecspi4 {
- cs-gpios = <&gpio3 24 GPIO_ACTIVE_LOW>,
- <&gpio3 29 GPIO_ACTIVE_LOW>,
- <&gpio3 25 GPIO_ACTIVE_LOW>;
-};
-
-&pinctrl_ecspi4 {
- fsl,pins = <
- MX6QDL_PAD_EIM_D21__ECSPI4_SCLK 0x100b1
- MX6QDL_PAD_EIM_D28__ECSPI4_MOSI 0x100b1
- MX6QDL_PAD_EIM_D22__ECSPI4_MISO 0x100b1
-
- /* SPI4_IMX_CS2# - connected to internal flash */
- MX6QDL_PAD_EIM_D24__GPIO3_IO24 0x1b0b0
- /* SPI4_IMX_CS0# - connected to SMARC SPI0_CS0# */
- MX6QDL_PAD_EIM_D29__GPIO3_IO29 0x1b0b0
- /* SPI4_CS3# - connected to SMARC SPI0_CS1# */
- MX6QDL_PAD_EIM_D25__GPIO3_IO25 0x1b0b0
- >;
-};
diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-gw52xx.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-gw52xx.dtsi
index 48ffb3ee01bd..082a2e3a391f 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6qdl-gw52xx.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-gw52xx.dtsi
@@ -485,7 +485,7 @@
display-timings {
native-mode = <&timing0>;
- timing0: hsd100pxn1 {
+ timing0: timing-hsd100pxn1 {
clock-frequency = <65000000>;
hactive = <1024>;
vactive = <768>;
diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-gw53xx.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-gw53xx.dtsi
index 1eae438fbdae..8ec442038ea0 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6qdl-gw53xx.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-gw53xx.dtsi
@@ -482,7 +482,7 @@
display-timings {
native-mode = <&timing0>;
- timing0: hsd100pxn1 {
+ timing0: timing-hsd100pxn1 {
clock-frequency = <65000000>;
hactive = <1024>;
vactive = <768>;
diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-gw54xx.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-gw54xx.dtsi
index c2ec8572c8a5..9df9f79affae 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6qdl-gw54xx.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-gw54xx.dtsi
@@ -529,7 +529,7 @@
display-timings {
native-mode = <&timing0>;
- timing0: hsd100pxn1 {
+ timing0: timing-hsd100pxn1 {
clock-frequency = <65000000>;
hactive = <1024>;
vactive = <768>;
diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-gw560x.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-gw560x.dtsi
index 7cee983da669..7693f92195d5 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6qdl-gw560x.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-gw560x.dtsi
@@ -584,7 +584,7 @@
display-timings {
native-mode = <&timing0>;
- timing0: hsd100pxn1 {
+ timing0: timing-hsd100pxn1 {
clock-frequency = <65000000>;
hactive = <1024>;
vactive = <768>;
diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-gw5903.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-gw5903.dtsi
index fbc704c064b6..9d0836df0fed 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6qdl-gw5903.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-gw5903.dtsi
@@ -486,7 +486,7 @@
display-timings {
native-mode = <&timing0>;
- timing0: g101evn010 {
+ timing0: timing-g101evn010 {
clock-frequency = <68930000>;
hactive = <1280>;
vactive = <800>;
diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-gw5904.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-gw5904.dtsi
index 070506279186..f4cb9e1d34a9 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6qdl-gw5904.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-gw5904.dtsi
@@ -551,7 +551,7 @@
display-timings {
native-mode = <&timing0>;
- timing0: hsd100pxn1 {
+ timing0: timing-hsd100pxn1 {
clock-frequency = <65000000>;
hactive = <1024>;
vactive = <768>;
diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-kontron-samx6i-ads2.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-kontron-samx6i-ads2.dtsi
new file mode 100644
index 000000000000..b4a79245b7b6
--- /dev/null
+++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-kontron-samx6i-ads2.dtsi
@@ -0,0 +1,148 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Device Tree include for the Kontron SMARC-sAMX6i board on a SMARC Eval
+ * 2.0 carrier (ADS2).
+ *
+ */
+
+/ {
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ sound {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "simple-audio-card";
+ simple-audio-card,format = "i2s";
+ simple-audio-card,bitclock-master = <&dailink_master>;
+ simple-audio-card,frame-master = <&dailink_master>;
+ simple-audio-card,widgets =
+ "Headphone", "Headphone Jack",
+ "Line", "Line Out Jack",
+ "Microphone", "Microphone Jack",
+ "Line", "Line In Jack";
+ simple-audio-card,routing =
+ "Line Out Jack", "LINEOUTR",
+ "Line Out Jack", "LINEOUTL",
+ "Headphone Jack", "HPOUTR",
+ "Headphone Jack", "HPOUTL",
+ "IN1L", "Line In Jack",
+ "IN1R", "Line In Jack",
+ "Microphone Jack", "MICBIAS",
+ "IN2L", "Microphone Jack",
+ "IN2R", "Microphone Jack";
+
+ simple-audio-card,cpu {
+ sound-dai = <&ssi1>;
+ };
+
+ dailink_master: simple-audio-card,codec {
+ sound-dai = <&wm8904>;
+ };
+ };
+
+ reg_codec_mic: regulator-codec-mic {
+ compatible = "regulator-fixed";
+ regulator-name = "V_3V3_MIC";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ reg_codec_1p8v: regulator-codec-1p8v {
+ compatible = "regulator-fixed";
+ regulator-name = "V_1V8_S0_CODEC";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+};
+
+&audmux {
+ status = "okay";
+};
+
+&can1 {
+ status = "okay";
+};
+
+&can2 {
+ status = "okay";
+};
+
+&ecspi4 {
+ flash@1 {
+ compatible = "jedec,spi-nor";
+ reg = <1>;
+ spi-max-frequency = <100000000>;
+ m25p,fast-read;
+ };
+};
+
+&fec {
+ status = "okay";
+};
+
+&i2c1 {
+ status = "okay";
+
+ wm8904: audio-codec@1a {
+ compatible = "wlf,wm8904";
+ reg = <0x1a>;
+ #sound-dai-cells = <0>;
+ clocks = <&clks IMX6QDL_CLK_CKO2>;
+ clock-names = "mclk";
+ AVDD-supply = <&reg_codec_1p8v>;
+ CPVDD-supply = <&reg_codec_1p8v>;
+ DBVDD-supply = <&reg_codec_1p8v>;
+ DCVDD-supply = <&reg_codec_1p8v>;
+ MICVDD-supply = <&reg_codec_mic>;
+ };
+};
+
+&i2c3 {
+ eeprom@57 {
+ compatible = "atmel,24c64";
+ reg = <0x57>;
+ pagesize = <32>;
+ };
+};
+
+&pcie {
+ status = "okay";
+};
+
+&ssi1 {
+ status = "okay";
+};
+
+&uart1 {
+ status = "okay";
+};
+
+&uart2 {
+ status = "okay";
+};
+
+&uart4 {
+ status = "okay";
+};
+
+&uart5 {
+ status = "okay";
+};
+
+&usbh1 {
+ status = "okay";
+};
+
+&usbotg {
+ status = "okay";
+};
+
+&usdhc3 {
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-kontron-samx6i.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-kontron-samx6i.dtsi
index 85aeebc9485d..99b5e78458aa 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6qdl-kontron-samx6i.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-kontron-samx6i.dtsi
@@ -61,6 +61,18 @@
vin-supply = <&reg_smarc_suppy>;
};
+ reg_sdio: regulator-sdio {
+ compatible = "regulator-fixed";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_reg_sdio>;
+ regulator-name = "V_3V3_SD";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&gpio1 29 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ off-on-delay-us = <20000>;
+ };
+
reg_smarc_lcdbklt: regulator-smarc-lcdbklt {
compatible = "regulator-fixed";
pinctrl-names = "default";
@@ -137,7 +149,7 @@
status = "disabled";
};
- i2c_intern: i2c-gpio-intern {
+ i2c_intern: i2c-0 {
compatible = "i2c-gpio";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_i2c_gpio_intern>;
@@ -148,7 +160,7 @@
#size-cells = <0>;
};
- i2c_lcd: i2c-gpio-lcd {
+ i2c_lcd: i2c-1 {
compatible = "i2c-gpio";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_i2c_gpio_lcd>;
@@ -160,7 +172,7 @@
status = "disabled";
};
- i2c_cam: i2c-gpio-cam {
+ i2c_cam: i2c-2 {
compatible = "i2c-gpio";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_i2c_gpio_cam>;
@@ -178,7 +190,7 @@
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_audmux>;
- audmux_ssi1 {
+ mux-ssi1 {
fsl,audmux-port = <MX51_AUDMUX_PORT1_SSI0>;
fsl,port-config = <
(IMX_AUDMUX_V2_PTCR_TFSEL(MX51_AUDMUX_PORT3) |
@@ -190,7 +202,7 @@
>;
};
- audmux_adu3 {
+ mux-aud3 {
fsl,audmux-port = <MX51_AUDMUX_PORT3>;
fsl,port-config = <
IMX_AUDMUX_V2_PTCR_SYN
@@ -198,7 +210,7 @@
>;
};
- audmux_ssi2 {
+ mux-ssi2 {
fsl,audmux-port = <MX51_AUDMUX_PORT2_SSI1>;
fsl,port-config = <
(IMX_AUDMUX_V2_PTCR_TFSEL(MX51_AUDMUX_PORT4) |
@@ -210,7 +222,7 @@
>;
};
- audmux_adu4 {
+ mux-aud4 {
fsl,audmux-port = <MX51_AUDMUX_PORT4>;
fsl,port-config = <
IMX_AUDMUX_V2_PTCR_SYN
@@ -244,7 +256,8 @@
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_ecspi4>;
cs-gpios = <&gpio3 24 GPIO_ACTIVE_LOW>,
- <&gpio3 29 GPIO_ACTIVE_LOW>;
+ <&gpio3 29 GPIO_ACTIVE_LOW>,
+ <&gpio3 25 GPIO_ACTIVE_LOW>;
status = "okay";
/* default boot source: workaround #1 for errata ERR006282 */
@@ -259,7 +272,7 @@
&fec {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_enet>;
- phy-mode = "rgmii";
+ phy-connection-type = "rgmii-id";
phy-handle = <&ethphy>;
mdio {
@@ -269,7 +282,7 @@
ethphy: ethernet-phy@1 {
compatible = "ethernet-phy-ieee802.3-c22";
reg = <1>;
- reset-gpios = <&gpio1 25 GPIO_ACTIVE_LOW>;
+ reset-gpios = <&gpio2 1 GPIO_ACTIVE_LOW>;
reset-assert-us = <1000>;
};
};
@@ -356,10 +369,6 @@
regulator-always-on;
};
- /*
- * Per schematics, of all VGEN's, only VGEN5 has some
- * usage ... but even that - over DNI resistor
- */
vgen1 {
regulator-min-microvolt = <800000>;
regulator-max-microvolt = <1550000>;
@@ -380,8 +389,7 @@
regulator-max-microvolt = <3300000>;
};
- reg_2p5v_s0: vgen5 {
- regulator-name = "V_2V5_S0";
+ vgen5 {
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <3300000>;
};
@@ -464,6 +472,8 @@
MX6QDL_PAD_EIM_D24__GPIO3_IO24 0x1b0b0
/* SPI_IMX_CS0# - connected to SMARC SPI0_CS0# */
MX6QDL_PAD_EIM_D29__GPIO3_IO29 0x1b0b0
+ /* SPI4_CS3# - connected to SMARC SPI0_CS1# */
+ MX6QDL_PAD_EIM_D25__GPIO3_IO25 0x1b0b0
>;
};
@@ -516,7 +526,7 @@
MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x1b0b0
MX6QDL_PAD_ENET_MDC__ENET_MDC 0x1b0b0
MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x1b0b0
- MX6QDL_PAD_ENET_CRS_DV__GPIO1_IO25 0x1b0b0 /* RST_GBE0_PHY# */
+ MX6QDL_PAD_NANDF_D1__GPIO2_IO01 0x1b0b0 /* RST_GBE0_PHY# */
>;
};
@@ -642,6 +652,12 @@
>;
};
+ pinctrl_reg_sdio: reg-sdiogrp {
+ fsl,pins = <
+ MX6QDL_PAD_ENET_TXD1__GPIO1_IO29 0x1b0b0 /* SDIO_PWR_EN */
+ >;
+ };
+
pinctrl_uart1: uart1grp {
fsl,pins = <
MX6QDL_PAD_CSI0_DAT11__UART1_RX_DATA 0x1b0b1
@@ -694,7 +710,6 @@
MX6QDL_PAD_NANDF_CS1__GPIO6_IO14 0x1b0b0 /* CD */
MX6QDL_PAD_ENET_RXD1__GPIO1_IO26 0x1b0b0 /* WP */
- MX6QDL_PAD_ENET_TXD1__GPIO1_IO29 0x1b0b0 /* PWR_EN */
>;
};
@@ -728,8 +743,7 @@
&pcie {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pcie>;
- wake-up-gpio = <&gpio6 18 GPIO_ACTIVE_HIGH>;
- reset-gpio = <&gpio3 13 GPIO_ACTIVE_HIGH>;
+ reset-gpio = <&gpio3 13 GPIO_ACTIVE_LOW>;
};
/* LCD_BKLT_PWM */
@@ -797,12 +811,12 @@
pinctrl-0 = <&pinctrl_usdhc3>;
cd-gpios = <&gpio6 14 GPIO_ACTIVE_LOW>;
wp-gpios = <&gpio1 26 GPIO_ACTIVE_HIGH>;
+ vmmc-supply = <&reg_sdio>;
no-1-8-v;
};
/* SDMMC */
&usdhc4 {
- /* Internal eMMC, optional on some boards */
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usdhc4>;
bus-width = <8>;
@@ -811,11 +825,13 @@
non-removable;
vmmc-supply = <&reg_3p3v_s0>;
vqmmc-supply = <&reg_1p8v_s0>;
+ status = "okay";
};
&wdog1 {
/* CPLD is feeded by watchdog (hardwired) */
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_wdog1>;
+ fsl,ext-reset-output;
status = "okay";
};
diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-mba6a.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-mba6a.dtsi
index 238f3af42822..807f3c95e3ce 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6qdl-mba6a.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-mba6a.dtsi
@@ -22,12 +22,16 @@
compatible = "atmel,24c64";
reg = <0x57>;
pagesize = <32>;
- #address-cells = <1>;
- #size-cells = <1>;
vcc-supply = <&reg_mba6_3p3v>;
- mba_mac_address: mac-address@20 {
- reg = <0x20 0x6>;
+ nvmem-layout {
+ compatible = "fixed-layout";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ mba_mac_address: mac-address@20 {
+ reg = <0x20 0x6>;
+ };
};
};
diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-mba6b.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-mba6b.dtsi
index a587bc88f76f..789733a45b95 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6qdl-mba6b.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-mba6b.dtsi
@@ -32,12 +32,16 @@
compatible = "atmel,24c64";
reg = <0x57>;
pagesize = <32>;
- #address-cells = <1>;
- #size-cells = <1>;
vcc-supply = <&reg_mba6_3p3v>;
- mba_mac_address: mac-address@20 {
- reg = <0x20 0x6>;
+ nvmem-layout {
+ compatible = "fixed-layout";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ mba_mac_address: mac-address@20 {
+ reg = <0x20 0x6>;
+ };
};
};
diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-sabreauto.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-sabreauto.dtsi
index 6656e2e762a1..0a3deaf92eea 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6qdl-sabreauto.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-sabreauto.dtsi
@@ -786,7 +786,7 @@
display-timings {
native-mode = <&timing0>;
- timing0: hsd100pxn1 {
+ timing0: timing-hsd100pxn1 {
clock-frequency = <65000000>;
hactive = <1024>;
vactive = <768>;
diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-tx6-lcd.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-tx6-lcd.dtsi
index 79f2354886b7..ded241a39906 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6qdl-tx6-lcd.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-tx6-lcd.dtsi
@@ -110,7 +110,7 @@
};
display-timings {
- VGA {
+ timing-vga {
clock-frequency = <25200000>;
hactive = <640>;
vactive = <480>;
@@ -126,8 +126,7 @@
pixelclk-active = <0>;
};
- ETV570 {
- u-boot,panel-name = "edt,et057090dhu";
+ timing-etv570 {
clock-frequency = <25200000>;
hactive = <640>;
vactive = <480>;
@@ -143,8 +142,7 @@
pixelclk-active = <0>;
};
- ET0350 {
- u-boot,panel-name = "edt,et0350g0dh6";
+ timing-et0350 {
clock-frequency = <6413760>;
hactive = <320>;
vactive = <240>;
@@ -160,8 +158,7 @@
pixelclk-active = <0>;
};
- ET0430 {
- u-boot,panel-name = "edt,et0430g0dh6";
+ timing-et0430 {
clock-frequency = <9009000>;
hactive = <480>;
vactive = <272>;
@@ -177,7 +174,7 @@
pixelclk-active = <1>;
};
- ET0500 {
+ timing-et0500 {
clock-frequency = <33264000>;
hactive = <800>;
vactive = <480>;
@@ -193,8 +190,7 @@
pixelclk-active = <0>;
};
- ET0700 { /* same as ET0500 */
- u-boot,panel-name = "edt,etm0700g0dh6";
+ timing-et0700 { /* same as ET0500 */
clock-frequency = <33264000>;
hactive = <800>;
vactive = <480>;
@@ -210,7 +206,7 @@
pixelclk-active = <0>;
};
- ETQ570 {
+ timing-etq570 {
clock-frequency = <6596040>;
hactive = <320>;
vactive = <240>;
@@ -226,8 +222,7 @@
pixelclk-active = <0>;
};
- CoMTFT { /* same as ET0700 but with inverted pixel clock */
- u-boot,panel-name = "edt,etm0700g0edh6";
+ timing-comtft { /* same as ET0700 but with inverted pixel clock */
clock-frequency = <33264000>;
hactive = <800>;
vactive = <480>;
diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-tx6-lvds.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-tx6-lvds.dtsi
index 2ca2eb37e14f..4eb53d5677a6 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6qdl-tx6-lvds.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-tx6-lvds.dtsi
@@ -127,8 +127,7 @@
};
display-timings {
- hsd100pxn1 {
- u-boot,panel-name = "hannstar,hsd100pxn1";
+ timing-hsd100pxn1 {
clock-frequency = <65000000>;
hactive = <1024>;
vactive = <768>;
@@ -142,7 +141,7 @@
pixelclk-active = <1>;
};
- VGA {
+ timing-vga {
clock-frequency = <25200000>;
hactive = <640>;
vactive = <480>;
@@ -158,8 +157,7 @@
pixelclk-active = <0>;
};
- nl12880bc20 {
- u-boot,panel-name = "nlt,nl12880bc20-spwg-24";
+ timing-nl12880bc20 {
clock-frequency = <71000000>;
hactive = <1280>;
vactive = <800>;
@@ -175,8 +173,7 @@
pixelclk-active = <1>;
};
- ET0700 {
- u-boot,panel-name = "edt,etm0700g0dh6";
+ timing-et0700 {
clock-frequency = <33264000>;
hactive = <800>;
vactive = <480>;
@@ -192,8 +189,7 @@
pixelclk-active = <0>;
};
- ETV570 {
- u-boot,panel-name = "edt,et057090dhu";
+ timing-etv570 {
clock-frequency = <25200000>;
hactive = <640>;
vactive = <480>;
@@ -224,7 +220,7 @@
};
display-timings {
- hsd100pxn1 {
+ timing-hsd100pxn1 {
clock-frequency = <65000000>;
hactive = <1024>;
vactive = <768>;
@@ -238,7 +234,7 @@
pixelclk-active = <1>;
};
- VGA {
+ timing-vga {
clock-frequency = <25200000>;
hactive = <640>;
vactive = <480>;
@@ -254,7 +250,7 @@
pixelclk-active = <0>;
};
- nl12880bc20 {
+ timing-nl12880bc20 {
clock-frequency = <71000000>;
hactive = <1280>;
vactive = <800>;
diff --git a/arch/arm/boot/dts/nxp/imx/imx6ul-tx6ul.dtsi b/arch/arm/boot/dts/nxp/imx/imx6ul-tx6ul.dtsi
index 1db146ac1c17..864173e30709 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6ul-tx6ul.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6ul-tx6ul.dtsi
@@ -405,7 +405,7 @@
status = "okay";
display-timings {
- VGA {
+ timing-vga {
clock-frequency = <25200000>;
hactive = <640>;
vactive = <480>;
@@ -421,7 +421,7 @@
pixelclk-active = <1>;
};
- ETV570 {
+ timing-etv570 {
clock-frequency = <25200000>;
hactive = <640>;
vactive = <480>;
@@ -437,7 +437,7 @@
pixelclk-active = <1>;
};
- ET0350 {
+ timing-et0350 {
clock-frequency = <6413760>;
hactive = <320>;
vactive = <240>;
@@ -453,7 +453,7 @@
pixelclk-active = <1>;
};
- ET0430 {
+ timing-et0430 {
clock-frequency = <9009000>;
hactive = <480>;
vactive = <272>;
@@ -469,7 +469,7 @@
pixelclk-active = <0>;
};
- ET0500 {
+ timing-et0500 {
clock-frequency = <33264000>;
hactive = <800>;
vactive = <480>;
@@ -485,7 +485,7 @@
pixelclk-active = <1>;
};
- ET0700 { /* same as ET0500 */
+ timing-et0700 { /* same as ET0500 */
clock-frequency = <33264000>;
hactive = <800>;
vactive = <480>;
@@ -501,7 +501,7 @@
pixelclk-active = <1>;
};
- ETQ570 {
+ timing-etq570 {
clock-frequency = <6596040>;
hactive = <320>;
vactive = <240>;
diff --git a/arch/arm/boot/dts/nxp/mxs/imx28-tx28.dts b/arch/arm/boot/dts/nxp/mxs/imx28-tx28.dts
index 5485fe118dc4..d38183edf0fd 100644
--- a/arch/arm/boot/dts/nxp/mxs/imx28-tx28.dts
+++ b/arch/arm/boot/dts/nxp/mxs/imx28-tx28.dts
@@ -323,7 +323,6 @@
display-timings {
native-mode = <&timing5>;
timing0: timing0 {
- panel-name = "VGA";
clock-frequency = <25175000>;
hactive = <640>;
vactive = <480>;
@@ -340,7 +339,6 @@
};
timing1: timing1 {
- panel-name = "ETV570";
clock-frequency = <25175000>;
hactive = <640>;
vactive = <480>;
@@ -357,7 +355,6 @@
};
timing2: timing2 {
- panel-name = "ET0350";
clock-frequency = <6500000>;
hactive = <320>;
vactive = <240>;
@@ -374,7 +371,6 @@
};
timing3: timing3 {
- panel-name = "ET0430";
clock-frequency = <9000000>;
hactive = <480>;
vactive = <272>;
@@ -391,7 +387,6 @@
};
timing4: timing4 {
- panel-name = "ET0500", "ET0700";
clock-frequency = <33260000>;
hactive = <800>;
vactive = <480>;
@@ -408,7 +403,6 @@
};
timing5: timing5 {
- panel-name = "ETQ570";
clock-frequency = <6400000>;
hactive = <320>;
vactive = <240>;
diff --git a/arch/arm/boot/dts/qcom/Makefile b/arch/arm/boot/dts/qcom/Makefile
index e2e922bdc9e9..f06c6d425e91 100644
--- a/arch/arm/boot/dts/qcom/Makefile
+++ b/arch/arm/boot/dts/qcom/Makefile
@@ -6,6 +6,7 @@ dtb-$(CONFIG_ARCH_QCOM) += \
qcom-apq8026-huawei-sturgeon.dtb \
qcom-apq8026-lg-lenok.dtb \
qcom-apq8026-samsung-matisse-wifi.dtb \
+ qcom-apq8026-samsung-milletwifi.dtb \
qcom-apq8060-dragonboard.dtb \
qcom-apq8064-cm-qs600.dtb \
qcom-apq8064-ifc6410.dtb \
@@ -27,6 +28,7 @@ dtb-$(CONFIG_ARCH_QCOM) += \
qcom-msm8226-microsoft-dempsey.dtb \
qcom-msm8226-microsoft-makepeace.dtb \
qcom-msm8226-microsoft-moneypenny.dtb \
+ qcom-msm8226-samsung-ms013g.dtb \
qcom-msm8226-samsung-s3ve3g.dtb \
qcom-msm8660-surf.dtb \
qcom-msm8916-samsung-e5.dtb \
@@ -41,12 +43,15 @@ dtb-$(CONFIG_ARCH_QCOM) += \
qcom-msm8960-cdp.dtb \
qcom-msm8960-samsung-expressatt.dtb \
qcom-msm8974-lge-nexus5-hammerhead.dtb \
+ qcom-msm8974-samsung-hlte.dtb \
qcom-msm8974-sony-xperia-rhine-amami.dtb \
qcom-msm8974-sony-xperia-rhine-honami.dtb \
qcom-msm8974pro-fairphone-fp2.dtb \
+ qcom-msm8974pro-htc-m8.dtb \
qcom-msm8974pro-oneplus-bacon.dtb \
qcom-msm8974pro-samsung-klte.dtb \
qcom-msm8974pro-samsung-kltechn.dtb \
+ qcom-msm8974pro-sony-xperia-shinano-aries.dtb \
qcom-msm8974pro-sony-xperia-shinano-castor.dtb \
qcom-msm8974pro-sony-xperia-shinano-leo.dtb \
qcom-mdm9615-wp8548-mangoh-green.dtb \
diff --git a/arch/arm/boot/dts/qcom/msm8226-motorola-falcon.dts b/arch/arm/boot/dts/qcom/msm8226-motorola-falcon.dts
index 029e1b1659c9..5dbca83f2230 100644
--- a/arch/arm/boot/dts/qcom/msm8226-motorola-falcon.dts
+++ b/arch/arm/boot/dts/qcom/msm8226-motorola-falcon.dts
@@ -96,6 +96,35 @@
};
};
+&blsp1_i2c2 {
+ status = "okay";
+
+ magnetometer@c {
+ compatible = "asahi-kasei,ak8963";
+ reg = <0xc>;
+ interrupts-extended = <&tlmm 66 IRQ_TYPE_EDGE_FALLING>;
+ reset-gpios = <&tlmm 62 GPIO_ACTIVE_LOW>;
+ vdd-supply = <&pm8226_l19>;
+ vid-supply = <&pm8226_lvs1>;
+ pinctrl-0 = <&mag_int_default &mag_reset_default>;
+ pinctrl-names = "default";
+ };
+
+ accelerometer@19 {
+ compatible = "st,lis3dh-accel";
+ reg = <0x19>;
+ interrupts-extended = <&tlmm 63 IRQ_TYPE_EDGE_FALLING>;
+ vdd-supply = <&pm8226_l19>;
+ vddio-supply = <&pm8226_lvs1>;
+ pinctrl-0 = <&accel_int_default>;
+ pinctrl-names = "default";
+ mount-matrix = "0", "1", "0",
+ "1", "0", "0",
+ "0", "0", "-1";
+ st,drdy-int-pin = <1>;
+ };
+};
+
&blsp1_i2c3 {
status = "okay";
@@ -321,6 +350,30 @@
};
&tlmm {
+ accel_int_default: accel-int-default-state {
+ pins = "gpio63";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ output-disable;
+ };
+
+ mag_int_default: mag-int-default-state {
+ pins = "gpio66";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ output-disable;
+ };
+
+ mag_reset_default: mag-reset-default-state {
+ pins = "gpio62";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ output-high;
+ };
+
reg_lcd_default: reg-lcd-default-state {
pins = "gpio31", "gpio33";
function = "gpio";
diff --git a/arch/arm/boot/dts/qcom/qcom-apq8026-samsung-milletwifi.dts b/arch/arm/boot/dts/qcom/qcom-apq8026-samsung-milletwifi.dts
new file mode 100644
index 000000000000..7d519156d91d
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/qcom-apq8026-samsung-milletwifi.dts
@@ -0,0 +1,573 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2022, Matti Lehtimäki <matti.lehtimaki@gmail.com>
+ * Copyright (c) 2023, Bryant Mairs <bryant@mai.rs>
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/power/summit,smb347-charger.h>
+#include "qcom-msm8226.dtsi"
+#include "pm8226.dtsi"
+
+/delete-node/ &adsp_region;
+/delete-node/ &smem_region;
+
+/ {
+ model = "Samsung Galaxy Tab 4 8.0 Wi-Fi";
+ compatible = "samsung,milletwifi", "qcom,apq8026";
+ chassis-type = "tablet";
+
+ aliases {
+ display0 = &framebuffer0;
+ mmc0 = &sdhc_1; /* SDC1 eMMC slot */
+ mmc1 = &sdhc_2; /* SDC2 SD card slot */
+ };
+
+ chosen {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ stdout-path = "display0";
+
+ framebuffer0: framebuffer@3200000 {
+ compatible = "simple-framebuffer";
+ reg = <0x03200000 0x800000>;
+ width = <800>;
+ height = <1280>;
+ stride = <(800 * 3)>;
+ format = "r8g8b8";
+ };
+ };
+
+ gpio-hall-sensor {
+ compatible = "gpio-keys";
+
+ event-hall-sensor {
+ label = "Cover";
+ gpios = <&tlmm 37 GPIO_ACTIVE_LOW>;
+ linux,input-type = <EV_SW>;
+ linux,code = <SW_LID>;
+ debounce-interval = <15>;
+ linux,can-disable;
+ wakeup-source;
+ };
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+ autorepeat;
+
+ key-home {
+ label = "Home";
+ gpios = <&tlmm 108 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_HOMEPAGE>;
+ debounce-interval = <15>;
+ };
+
+ key-volume-down {
+ label = "Volume Down";
+ gpios = <&tlmm 107 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_VOLUMEDOWN>;
+ debounce-interval = <15>;
+ };
+
+ key-volume-up {
+ label = "Volume Up";
+ gpios = <&tlmm 106 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_VOLUMEUP>;
+ debounce-interval = <15>;
+ };
+ };
+
+ i2c-backlight {
+ compatible = "i2c-gpio";
+ sda-gpios = <&tlmm 20 (GPIO_ACTIVE_HIGH|GPIO_OPEN_DRAIN)>;
+ scl-gpios = <&tlmm 21 (GPIO_ACTIVE_HIGH|GPIO_OPEN_DRAIN)>;
+
+ pinctrl-0 = <&backlight_i2c_default_state>;
+ pinctrl-names = "default";
+
+ i2c-gpio,delay-us = <4>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ backlight@2c {
+ compatible = "ti,lp8556";
+ reg = <0x2c>;
+ enable-supply = <&reg_backlight_vddio>;
+
+ dev-ctrl = /bits/ 8 <0x80>;
+ init-brt = /bits/ 8 <0x3f>;
+
+ /*
+ * Change transition duration: 200ms, Change
+ * transition strength: heavy, PWM hysteresis:
+ * 1-bit w/ 8-bit resolution
+ */
+ rom-a3h {
+ rom-addr = /bits/ 8 <0xa3>;
+ rom-val = /bits/ 8 <0x5e>;
+ };
+
+ /*
+ * PWM phase configuration: 3-phase/3 drivers
+ * (0, 120deg, 240deg, -, -, -),
+ * PWM frequency: 9616Hz (10-bit)
+ */
+ rom-a5h {
+ rom-addr = /bits/ 8 <0xa5>;
+ rom-val = /bits/ 8 <0x34>;
+ };
+
+ /*
+ * Enable LED drivers 2 & 3, Boot inductor
+ * current limit: 1.5A/2.6A
+ */
+ rom-a7h {
+ rom-addr = /bits/ 8 <0xa7>;
+ rom-val = /bits/ 8 <0xfa>;
+ };
+ };
+ };
+
+ reg_backlight_vddio: regulator-backlight-vddio {
+ compatible = "regulator-fixed";
+ regulator-name = "backlight_vddio";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ gpio = <&tlmm 74 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-0 = <&backlight_vddio_default_state>;
+ pinctrl-names = "default";
+ };
+
+ reg_tsp_1p8v: regulator-tsp-1p8v {
+ compatible = "regulator-fixed";
+ regulator-name = "tsp_1p8v";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ gpio = <&tlmm 114 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-0 = <&tsp_en1_default_state>;
+ pinctrl-names = "default";
+ };
+
+ reg_tsp_3p3v: regulator-tsp-3p3v {
+ compatible = "regulator-fixed";
+ regulator-name = "tsp_3p3v";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ gpio = <&tlmm 31 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-0 = <&tsp_en_default_state>;
+ pinctrl-names = "default";
+ };
+
+ reserved-memory {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ framebuffer@3200000 {
+ reg = <0x03200000 0x800000>;
+ no-map;
+ };
+
+ mpss_region: mpss@8400000 {
+ reg = <0x08400000 0x1f00000>;
+ no-map;
+ };
+
+ mba_region: mba@a300000 {
+ reg = <0x0a300000 0x100000>;
+ no-map;
+ };
+
+ reserved@cb00000 {
+ reg = <0x0cb00000 0x700000>;
+ no-map;
+ };
+
+ wcnss_region: wcnss@d200000 {
+ reg = <0x0d200000 0x700000>;
+ no-map;
+ };
+
+ adsp_region: adsp@d900000 {
+ reg = <0x0d900000 0x1800000>;
+ no-map;
+ };
+
+ venus@f100000 {
+ reg = <0x0f100000 0x500000>;
+ no-map;
+ };
+
+ smem_region: smem@fa00000 {
+ reg = <0x0fa00000 0x100000>;
+ no-map;
+ };
+
+ reserved@fb00000 {
+ reg = <0x0fb00000 0x260000>;
+ no-map;
+ };
+
+ rfsa@fd60000 {
+ reg = <0x0fd60000 0x20000>;
+ no-map;
+ };
+
+ rmtfs@fd80000 {
+ compatible = "qcom,rmtfs-mem";
+ reg = <0x0fd80000 0x180000>;
+ no-map;
+
+ qcom,client-id = <1>;
+ };
+ };
+};
+
+&blsp1_i2c2 {
+ status = "okay";
+
+ accelerometer@1d {
+ compatible = "st,lis2hh12";
+ reg = <0x1d>;
+
+ interrupts-extended = <&tlmm 54 IRQ_TYPE_LEVEL_HIGH>;
+
+ pinctrl-0 = <&accel_int_default_state>;
+ pinctrl-names = "default";
+
+ vdd-supply = <&pm8226_l19>;
+ vddio-supply = <&pm8226_lvs1>;
+
+ mount-matrix = "0", "1", "0",
+ "-1", "0", "0",
+ "0", "0", "1";
+
+ st,drdy-int-pin = <1>;
+ };
+};
+
+&blsp1_i2c3 {
+ status = "okay";
+
+ charger@6a {
+ compatible = "summit,smb358";
+ reg = <0x6a>;
+
+ interrupts-extended = <&tlmm 115 IRQ_TYPE_EDGE_FALLING>;
+
+ pinctrl-0 = <&charger_int_default_state>;
+ pinctrl-names = "default";
+
+ summit,enable-usb-charging;
+ summit,enable-charge-control = <SMB3XX_CHG_ENABLE_SW>;
+ summit,fast-voltage-threshold-microvolt = <3000000>;
+ summit,chip-temperature-threshold-celsius = <130>;
+ summit,usb-current-limit-microamp = <1500000>;
+ };
+};
+
+&blsp1_i2c4 {
+ status = "okay";
+
+ muic: usb-switch@25 {
+ compatible = "siliconmitus,sm5502-muic";
+ reg = <0x25>;
+
+ interrupts-extended = <&tlmm 67 IRQ_TYPE_EDGE_FALLING>;
+
+ pinctrl-0 = <&muic_int_default_state>;
+ pinctrl-names = "default";
+ };
+};
+
+&blsp1_i2c5 {
+ status = "okay";
+
+ touchscreen@48 {
+ compatible = "melfas,mms252", "melfas,mms114";
+ reg = <0x48>;
+ interrupts-extended = <&tlmm 17 IRQ_TYPE_EDGE_FALLING>;
+ touchscreen-size-x = <800>;
+ touchscreen-size-y = <1280>;
+ avdd-supply = <&reg_tsp_3p3v>;
+ vdd-supply = <&reg_tsp_1p8v>;
+ linux,keycodes = <KEY_APPSELECT KEY_BACK>;
+
+ pinctrl-0 = <&tsp_int_rst_default_state>;
+ pinctrl-names = "default";
+ };
+};
+
+&rpm_requests {
+ regulators {
+ compatible = "qcom,rpm-pm8226-regulators";
+
+ pm8226_s3: s3 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1300000>;
+ };
+
+ pm8226_s4: s4 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ pm8226_s5: s5 {
+ regulator-min-microvolt = <1150000>;
+ regulator-max-microvolt = <1150000>;
+ };
+
+ pm8226_l1: l1 {
+ regulator-min-microvolt = <1225000>;
+ regulator-max-microvolt = <1225000>;
+ };
+
+ pm8226_l2: l2 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ };
+
+ pm8226_l3: l3 {
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <1337500>;
+ regulator-always-on;
+ };
+
+ pm8226_l4: l4 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ };
+
+ pm8226_l5: l5 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ };
+
+ pm8226_l6: l6 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ };
+
+ pm8226_l7: l7 {
+ regulator-min-microvolt = <1850000>;
+ regulator-max-microvolt = <1850000>;
+ };
+
+ pm8226_l8: l8 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ };
+
+ pm8226_l9: l9 {
+ regulator-min-microvolt = <2050000>;
+ regulator-max-microvolt = <2050000>;
+ };
+
+ pm8226_l10: l10 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ pm8226_l12: l12 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ pm8226_l14: l14 {
+ regulator-min-microvolt = <2750000>;
+ regulator-max-microvolt = <2750000>;
+ };
+
+ pm8226_l15: l15 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ pm8226_l16: l16 {
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3350000>;
+ };
+
+ pm8226_l17: l17 {
+ regulator-min-microvolt = <2950000>;
+ regulator-max-microvolt = <2950000>;
+
+ regulator-system-load = <200000>;
+ regulator-allow-set-load;
+ regulator-always-on;
+ };
+
+ pm8226_l18: l18 {
+ regulator-min-microvolt = <2950000>;
+ regulator-max-microvolt = <2950000>;
+ };
+
+ pm8226_l19: l19 {
+ regulator-min-microvolt = <2850000>;
+ regulator-max-microvolt = <3000000>;
+ };
+
+ pm8226_l20: l20 {
+ regulator-min-microvolt = <3075000>;
+ regulator-max-microvolt = <3075000>;
+ };
+
+ pm8226_l21: l21 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2950000>;
+ };
+
+ pm8226_l22: l22 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3000000>;
+ };
+
+ pm8226_l23: l23 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ pm8226_l24: l24 {
+ regulator-min-microvolt = <1300000>;
+ regulator-max-microvolt = <1350000>;
+ };
+
+ pm8226_l25: l25 {
+ regulator-min-microvolt = <1775000>;
+ regulator-max-microvolt = <2125000>;
+ };
+
+ pm8226_l26: l26 {
+ regulator-min-microvolt = <1225000>;
+ regulator-max-microvolt = <1300000>;
+ };
+
+ pm8226_l27: l27 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ pm8226_l28: l28 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2950000>;
+ };
+
+ pm8226_lvs1: lvs1 {};
+ };
+};
+
+&sdhc_1 {
+ vmmc-supply = <&pm8226_l17>;
+ vqmmc-supply = <&pm8226_l6>;
+
+ bus-width = <8>;
+ non-removable;
+
+ status = "okay";
+};
+
+&sdhc_2 {
+ vmmc-supply = <&pm8226_l18>;
+ vqmmc-supply = <&pm8226_l21>;
+
+ bus-width = <4>;
+ cd-gpios = <&tlmm 38 GPIO_ACTIVE_LOW>;
+
+ pinctrl-0 = <&sdhc2_default_state>, <&sdc2_cd_default_state>;
+ pinctrl-names = "default";
+
+ status = "okay";
+};
+
+&tlmm {
+ accel_int_default_state: accel-int-default-state {
+ pins = "gpio54";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ backlight_i2c_default_state: backlight-i2c-default-state {
+ pins = "gpio20", "gpio21";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ backlight_vddio_default_state: backlight-vddio-default-state {
+ pins = "gpio74";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ charger_int_default_state: charger-int-default-state {
+ pins = "gpio115";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ muic_int_default_state: muic-int-default-state {
+ pins = "gpio67";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ sdc2_cd_default_state: sdc2-cd-default-state {
+ pins = "gpio38";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ tsp_en_default_state: tsp-en-default-state {
+ pins = "gpio31";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ tsp_en1_default_state: tsp-en1-default-state {
+ pins = "gpio114";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ tsp_int_rst_default_state: tsp-int-rst-default-state {
+ pins = "gpio17";
+ function = "gpio";
+ drive-strength = <10>;
+ bias-pull-up;
+ };
+};
+
+&usb {
+ extcon = <&muic>, <&muic>;
+ status = "okay";
+};
+
+&usb_hs_phy {
+ extcon = <&muic>;
+ v1p8-supply = <&pm8226_l10>;
+ v3p3-supply = <&pm8226_l20>;
+};
diff --git a/arch/arm/boot/dts/qcom/qcom-apq8064.dtsi b/arch/arm/boot/dts/qcom/qcom-apq8064.dtsi
index 11e60b74c3c9..769e151747c3 100644
--- a/arch/arm/boot/dts/qcom/qcom-apq8064.dtsi
+++ b/arch/arm/boot/dts/qcom/qcom-apq8064.dtsi
@@ -666,12 +666,12 @@
qcom,controller-type = "pmic-arbiter";
};
- qfprom: qfprom@700000 {
+ qfprom: efuse@700000 {
compatible = "qcom,apq8064-qfprom", "qcom,qfprom";
reg = <0x00700000 0x1000>;
#address-cells = <1>;
#size-cells = <1>;
- ranges;
+
tsens_calib: calib@404 {
reg = <0x404 0x10>;
};
@@ -684,7 +684,6 @@
compatible = "qcom,gcc-apq8064", "syscon";
reg = <0x00900000 0x4000>;
#clock-cells = <1>;
- #power-domain-cells = <1>;
#reset-cells = <1>;
clocks = <&cxo_board>,
<&pxo_board>,
@@ -993,7 +992,7 @@
reg = <0x1a400000 0x100>;
};
- gpu: adreno-3xx@4300000 {
+ gpu: gpu@4300000 {
compatible = "qcom,adreno-320.2", "qcom,adreno";
reg = <0x04300000 0x20000>;
reg-names = "kgsl_3d0_reg_memory";
diff --git a/arch/arm/boot/dts/qcom/qcom-apq8084.dtsi b/arch/arm/boot/dts/qcom/qcom-apq8084.dtsi
index ca53dff820ef..2b52e5d5eb51 100644
--- a/arch/arm/boot/dts/qcom/qcom-apq8084.dtsi
+++ b/arch/arm/boot/dts/qcom/qcom-apq8084.dtsi
@@ -245,7 +245,7 @@
reg = <0xfc190000 0x10000>;
};
- qfprom: qfprom@fc4bc000 {
+ qfprom: efuse@fc4bc000 {
compatible = "qcom,apq8084-qfprom", "qcom,qfprom";
reg = <0xfc4bc000 0x1000>;
#address-cells = <1>;
diff --git a/arch/arm/boot/dts/qcom/qcom-ipq4019.dtsi b/arch/arm/boot/dts/qcom/qcom-ipq4019.dtsi
index 0fb65f2bbcdf..56415ab34083 100644
--- a/arch/arm/boot/dts/qcom/qcom-ipq4019.dtsi
+++ b/arch/arm/boot/dts/qcom/qcom-ipq4019.dtsi
@@ -187,7 +187,6 @@
gcc: clock-controller@1800000 {
compatible = "qcom,gcc-ipq4019";
#clock-cells = <1>;
- #power-domain-cells = <1>;
#reset-cells = <1>;
reg = <0x1800000 0x60000>;
clocks = <&xo>, <&sleep_clk>;
diff --git a/arch/arm/boot/dts/qcom/qcom-ipq8064.dtsi b/arch/arm/boot/dts/qcom/qcom-ipq8064.dtsi
index f128510d8445..da0fd75f4711 100644
--- a/arch/arm/boot/dts/qcom/qcom-ipq8064.dtsi
+++ b/arch/arm/boot/dts/qcom/qcom-ipq8064.dtsi
@@ -372,7 +372,7 @@
qcom,controller-type = "pmic-arbiter";
};
- qfprom: qfprom@700000 {
+ qfprom: efuse@700000 {
compatible = "qcom,ipq8064-qfprom", "qcom,qfprom";
reg = <0x00700000 0x1000>;
#address-cells = <1>;
@@ -519,7 +519,6 @@
reg = <0x00900000 0x4000>;
#clock-cells = <1>;
#reset-cells = <1>;
- #power-domain-cells = <1>;
tsens: thermal-sensor {
compatible = "qcom,ipq8064-tsens";
diff --git a/arch/arm/boot/dts/qcom/qcom-mdm9615.dtsi b/arch/arm/boot/dts/qcom/qcom-mdm9615.dtsi
index 34c60994d026..573feb3218c3 100644
--- a/arch/arm/boot/dts/qcom/qcom-mdm9615.dtsi
+++ b/arch/arm/boot/dts/qcom/qcom-mdm9615.dtsi
@@ -102,7 +102,6 @@
gcc: clock-controller@900000 {
compatible = "qcom,gcc-mdm9615";
#clock-cells = <1>;
- #power-domain-cells = <1>;
#reset-cells = <1>;
reg = <0x900000 0x4000>;
clocks = <&cxo_board>,
diff --git a/arch/arm/boot/dts/qcom/qcom-msm8226-microsoft-common.dtsi b/arch/arm/boot/dts/qcom/qcom-msm8226-microsoft-common.dtsi
index 525d8c608b06..8839b23fc693 100644
--- a/arch/arm/boot/dts/qcom/qcom-msm8226-microsoft-common.dtsi
+++ b/arch/arm/boot/dts/qcom/qcom-msm8226-microsoft-common.dtsi
@@ -287,6 +287,10 @@
status = "okay";
};
+&smbb {
+ status = "okay";
+};
+
&usb {
extcon = <&smbb>;
dr_mode = "peripheral";
diff --git a/arch/arm/boot/dts/qcom/qcom-msm8226-samsung-ms013g.dts b/arch/arm/boot/dts/qcom/qcom-msm8226-samsung-ms013g.dts
new file mode 100644
index 000000000000..2ecc5983d365
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/qcom-msm8226-samsung-ms013g.dts
@@ -0,0 +1,386 @@
+// SPDX-License-Identifier: BSD-3-Clause
+
+/dts-v1/;
+
+#include "qcom-msm8226.dtsi"
+#include "pm8226.dtsi"
+
+/delete-node/ &smem_region;
+
+/ {
+ model = "Samsung Galaxy Grand 2";
+ compatible = "samsung,ms013g", "qcom,msm8226";
+ chassis-type = "handset";
+
+ aliases {
+ mmc0 = &sdhc_1; /* SDC1 eMMC slot */
+ mmc1 = &sdhc_2; /* SDC2 SD card slot */
+ serial0 = &blsp1_uart3;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ gpio-hall-sensor {
+ compatible = "gpio-keys";
+
+ pinctrl-0 = <&gpio_hall_sensor_default>;
+ pinctrl-names = "default";
+
+ label = "GPIO Hall Effect Sensor";
+
+ event-hall-sensor {
+ label = "Hall Effect Sensor";
+ gpios = <&tlmm 50 GPIO_ACTIVE_LOW>;
+ linux,input-type = <EV_SW>;
+ linux,code = <SW_LID>;
+ linux,can-disable;
+ };
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+
+ pinctrl-0 = <&gpio_keys_default>;
+ pinctrl-names = "default";
+
+ label = "GPIO Buttons";
+
+ button-volume-up {
+ label = "Volume Up";
+ gpios = <&tlmm 106 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_VOLUMEUP>;
+ };
+
+ button-volume-down {
+ label = "Volume Down";
+ gpios = <&tlmm 107 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_VOLUMEDOWN>;
+ };
+
+ button-home {
+ label = "Home Key";
+ gpios = <&tlmm 108 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_HOMEPAGE>;
+ };
+ };
+
+ reg_motor_vdd: regulator-motor-vdd {
+ compatible = "regulator-fixed";
+ regulator-name = "motor_vdd";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ gpio = <&tlmm 111 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-0 = <&motor_en_default>;
+ pinctrl-names = "default";
+ };
+
+ reg_vdd_tsp_a: regulator-vdd-tsp-a {
+ compatible = "regulator-fixed";
+ regulator-name = "tsp_3p3v";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ gpio = <&tlmm 31 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-0 = <&tsp_en_default>;
+ pinctrl-names = "default";
+ };
+
+ reserved-memory {
+ smem_region: smem@fa00000 {
+ reg = <0x0fa00000 0x100000>;
+ no-map;
+ };
+ };
+
+ vibrator {
+ compatible = "regulator-haptic";
+ haptic-supply = <&reg_motor_vdd>;
+ min-microvolt = <3300000>;
+ max-microvolt = <3300000>;
+ };
+};
+
+&blsp1_i2c2 {
+ status = "okay";
+
+ accelerometer@18 {
+ compatible = "bosch,bma255";
+ reg = <0x18>;
+ interrupts-extended = <&tlmm 64 IRQ_TYPE_EDGE_RISING>;
+
+ vdd-supply = <&pm8226_l19>;
+ vddio-supply = <&pm8226_lvs1>;
+
+ pinctrl-0 = <&accel_int_default>;
+ pinctrl-names = "default";
+
+ mount-matrix = "0", "1", "0",
+ "-1", "0", "0",
+ "0", "0", "-1";
+ };
+};
+
+&blsp1_i2c5 {
+ status = "okay";
+
+ touchscreen@20 {
+ compatible = "zinitix,bt541";
+
+ reg = <0x20>;
+ interrupts-extended = <&tlmm 17 IRQ_TYPE_EDGE_FALLING>;
+
+ touchscreen-size-x = <720>;
+ touchscreen-size-y = <1280>;
+
+ vcca-supply = <&reg_vdd_tsp_a>;
+ vdd-supply = <&pm8226_lvs1>;
+
+ pinctrl-0 = <&tsp_int_default>;
+ pinctrl-names = "default";
+ };
+};
+
+&blsp1_uart3 {
+ status = "okay";
+};
+
+&rpm_requests {
+ regulators {
+ compatible = "qcom,rpm-pm8226-regulators";
+
+ pm8226_s3: s3 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1300000>;
+ };
+
+ pm8226_s4: s4 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2200000>;
+ };
+
+ pm8226_s5: s5 {
+ regulator-min-microvolt = <1150000>;
+ regulator-max-microvolt = <1150000>;
+ };
+
+ pm8226_l1: l1 {
+ regulator-min-microvolt = <1225000>;
+ regulator-max-microvolt = <1225000>;
+ };
+
+ pm8226_l2: l2 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ };
+
+ pm8226_l3: l3 {
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <1337500>;
+ };
+
+ pm8226_l4: l4 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ };
+
+ pm8226_l5: l5 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ };
+
+ pm8226_l6: l6 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-allow-set-load;
+ regulator-always-on;
+ };
+
+ pm8226_l7: l7 {
+ regulator-min-microvolt = <1850000>;
+ regulator-max-microvolt = <1850000>;
+ };
+
+ pm8226_l8: l8 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ pm8226_l9: l9 {
+ regulator-min-microvolt = <2050000>;
+ regulator-max-microvolt = <2050000>;
+ };
+
+ pm8226_l10: l10 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ pm8226_l12: l12 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ pm8226_l14: l14 {
+ regulator-min-microvolt = <2750000>;
+ regulator-max-microvolt = <2750000>;
+ };
+
+ pm8226_l15: l15 {
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ };
+
+ pm8226_l16: l16 {
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3350000>;
+ };
+
+ pm8226_l17: l17 {
+ regulator-min-microvolt = <2950000>;
+ regulator-max-microvolt = <2950000>;
+
+ regulator-system-load = <200000>;
+ regulator-allow-set-load;
+ regulator-always-on;
+ };
+
+ pm8226_l18: l18 {
+ regulator-min-microvolt = <2950000>;
+ regulator-max-microvolt = <2950000>;
+ };
+
+ pm8226_l19: l19 {
+ regulator-min-microvolt = <2850000>;
+ regulator-max-microvolt = <3000000>;
+ };
+
+ pm8226_l20: l20 {
+ regulator-min-microvolt = <3075000>;
+ regulator-max-microvolt = <3075000>;
+ };
+
+ pm8226_l21: l21 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2950000>;
+ regulator-allow-set-load;
+ };
+
+ pm8226_l22: l22 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2950000>;
+ };
+
+ pm8226_l23: l23 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ pm8226_l24: l24 {
+ regulator-min-microvolt = <1300000>;
+ regulator-max-microvolt = <1350000>;
+ };
+
+ pm8226_l25: l25 {
+ regulator-min-microvolt = <1775000>;
+ regulator-max-microvolt = <2125000>;
+ };
+
+ pm8226_l26: l26 {
+ regulator-min-microvolt = <1225000>;
+ regulator-max-microvolt = <1300000>;
+ };
+
+ pm8226_l27: l27 {
+ regulator-min-microvolt = <2050000>;
+ regulator-max-microvolt = <2050000>;
+ };
+
+ pm8226_l28: l28 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2950000>;
+ };
+
+ pm8226_lvs1: lvs1 {};
+ };
+};
+
+&sdhc_1 {
+ vmmc-supply = <&pm8226_l17>;
+ vqmmc-supply = <&pm8226_l6>;
+
+ bus-width = <8>;
+ non-removable;
+
+ status = "okay";
+};
+
+&sdhc_2 {
+ vmmc-supply = <&pm8226_l18>;
+ vqmmc-supply = <&pm8226_l21>;
+
+ bus-width = <4>;
+ cd-gpios = <&tlmm 38 GPIO_ACTIVE_LOW>;
+
+ pinctrl-0 = <&sdhc2_default_state &sdhc2_cd_default>;
+ pinctrl-names = "default";
+
+ status = "okay";
+};
+
+&tlmm {
+ accel_int_default: accel-int-default-state {
+ pins = "gpio64";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ gpio_hall_sensor_default: gpio-hall-sensor-default-state {
+ pins = "gpio50";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ gpio_keys_default: gpio-keys-default-state {
+ pins = "gpio106", "gpio107", "gpio108";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ motor_en_default: motor-en-default-state {
+ pins = "gpio111";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ sdhc2_cd_default: sdhc2-cd-default-state {
+ pins = "gpio38";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ tsp_en_default: tsp-en-default-state {
+ pins = "gpio31";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ tsp_int_default: tsp-int-default-state {
+ pins = "gpio17";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+};
diff --git a/arch/arm/boot/dts/qcom/qcom-msm8226.dtsi b/arch/arm/boot/dts/qcom/qcom-msm8226.dtsi
index 270973e85625..b2f92ad6499a 100644
--- a/arch/arm/boot/dts/qcom/qcom-msm8226.dtsi
+++ b/arch/arm/boot/dts/qcom/qcom-msm8226.dtsi
@@ -635,7 +635,7 @@
reg = <0xfc4ab000 0x4>;
};
- qfprom: qfprom@fc4bc000 {
+ qfprom: efuse@fc4bc000 {
compatible = "qcom,msm8226-qfprom", "qcom,qfprom";
reg = <0xfc4bc000 0x1000>;
#address-cells = <1>;
@@ -1046,7 +1046,7 @@
};
};
- gpu: adreno@fdb00000 {
+ gpu: gpu@fdb00000 {
compatible = "qcom,adreno-305.18", "qcom,adreno";
reg = <0xfdb00000 0x10000>;
reg-names = "kgsl_3d0_reg_memory";
diff --git a/arch/arm/boot/dts/qcom/qcom-msm8660.dtsi b/arch/arm/boot/dts/qcom/qcom-msm8660.dtsi
index 455ba4bf1bf4..a66c474cd1aa 100644
--- a/arch/arm/boot/dts/qcom/qcom-msm8660.dtsi
+++ b/arch/arm/boot/dts/qcom/qcom-msm8660.dtsi
@@ -113,7 +113,6 @@
gcc: clock-controller@900000 {
compatible = "qcom,gcc-msm8660";
#clock-cells = <1>;
- #power-domain-cells = <1>;
#reset-cells = <1>;
reg = <0x900000 0x4000>;
clocks = <&pxo_board>, <&cxo_board>;
diff --git a/arch/arm/boot/dts/qcom/qcom-msm8926-motorola-peregrine.dts b/arch/arm/boot/dts/qcom/qcom-msm8926-motorola-peregrine.dts
index 0cbe2d2fbbb1..376a33125941 100644
--- a/arch/arm/boot/dts/qcom/qcom-msm8926-motorola-peregrine.dts
+++ b/arch/arm/boot/dts/qcom/qcom-msm8926-motorola-peregrine.dts
@@ -29,6 +29,10 @@
height = <1280>;
stride = <(720 * 3)>;
format = "r8g8b8";
+ vsp-supply = <&reg_lcd_pos>;
+ vsn-supply = <&reg_lcd_neg>;
+ vdd-supply = <&pm8226_l28>;
+ vddio-supply = <&vddio_disp_vreg>;
};
};
@@ -51,6 +55,18 @@
};
};
+ vddio_disp_vreg: regulator-vddio-disp {
+ compatible = "regulator-fixed";
+ regulator-name = "vddio_disp";
+ gpio = <&tlmm 34 GPIO_ACTIVE_HIGH>;
+ startup-delay-us = <300>;
+ enable-active-high;
+ regulator-boot-on;
+ vin-supply = <&pm8226_l8>;
+ pinctrl-0 = <&disp_vddio_default>;
+ pinctrl-names = "default";
+ };
+
reserved-memory {
#address-cells = <1>;
#size-cells = <1>;
@@ -68,12 +84,67 @@
};
};
+&blsp1_i2c2 {
+ clock-frequency = <100000>;
+ status = "okay";
+
+ magnetometer@c {
+ compatible = "asahi-kasei,ak8963";
+ reg = <0xc>;
+ interrupts-extended = <&tlmm 38 IRQ_TYPE_EDGE_FALLING>;
+ reset-gpios = <&tlmm 62 GPIO_ACTIVE_LOW>;
+ vdd-supply = <&pm8226_l19>;
+ pinctrl-0 = <&mag_int_default &mag_reset_default>;
+ pinctrl-names = "default";
+ };
+
+ accelerometer@18 {
+ compatible = "st,lis3dh-accel";
+ reg = <0x18>;
+ interrupts-extended = <&tlmm 1 IRQ_TYPE_EDGE_FALLING>;
+ vdd-supply = <&pm8226_l19>;
+ pinctrl-0 = <&accel_int_default>;
+ pinctrl-names = "default";
+ st,drdy-int-pin = <1>;
+ };
+};
+
&blsp1_i2c3 {
+ clock-frequency = <400000>;
status = "okay";
+ regulator@3e {
+ compatible = "ti,tps65132";
+ reg = <0x3e>;
+ pinctrl-0 = <&reg_lcd_default>;
+ pinctrl-names = "default";
+
+ reg_lcd_pos: outp {
+ regulator-name = "outp";
+ regulator-min-microvolt = <4000000>;
+ regulator-max-microvolt = <6000000>;
+ regulator-active-discharge = <1>;
+ regulator-boot-on;
+ enable-gpios = <&tlmm 31 GPIO_ACTIVE_HIGH>;
+ };
+
+ reg_lcd_neg: outn {
+ regulator-name = "outn";
+ regulator-min-microvolt = <4000000>;
+ regulator-max-microvolt = <6000000>;
+ regulator-active-discharge = <1>;
+ regulator-boot-on;
+ enable-gpios = <&tlmm 33 GPIO_ACTIVE_HIGH>;
+ };
+ };
+
sensor@48 {
compatible = "ti,tmp108";
reg = <0x48>;
+ interrupts-extended = <&tlmm 13 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-0 = <&temp_alert_default>;
+ pinctrl-names = "default";
+ #thermal-sensor-cells = <0>;
};
};
@@ -278,6 +349,56 @@
status = "okay";
};
+&tlmm {
+ accel_int_default: accel-int-default-state {
+ pins = "gpio1";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ output-disable;
+ };
+
+ disp_vddio_default: disp-vddio-default-state {
+ pins = "gpio34";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ output-high;
+ };
+
+ mag_int_default: mag-int-default-state {
+ pins = "gpio38";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ output-disable;
+ };
+
+ mag_reset_default: mag-reset-default-state {
+ pins = "gpio62";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ output-high;
+ };
+
+ reg_lcd_default: reg-lcd-default-state {
+ pins = "gpio31", "gpio33";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ output-high;
+ };
+
+ temp_alert_default: temp-alert-default-state {
+ pins = "gpio13";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ output-disable;
+ };
+};
+
&usb {
extcon = <&smbb>;
dr_mode = "peripheral";
diff --git a/arch/arm/boot/dts/qcom/qcom-msm8960.dtsi b/arch/arm/boot/dts/qcom/qcom-msm8960.dtsi
index 922f9e49468a..ebc43c5c6e5f 100644
--- a/arch/arm/boot/dts/qcom/qcom-msm8960.dtsi
+++ b/arch/arm/boot/dts/qcom/qcom-msm8960.dtsi
@@ -47,9 +47,9 @@
};
};
- memory {
+ memory@80000000 {
device_type = "memory";
- reg = <0x0 0x0>;
+ reg = <0x80000000 0>;
};
cpu-pmu {
@@ -129,7 +129,6 @@
gcc: clock-controller@900000 {
compatible = "qcom,gcc-msm8960";
#clock-cells = <1>;
- #power-domain-cells = <1>;
#reset-cells = <1>;
reg = <0x900000 0x4000>;
clocks = <&cxo_board>,
diff --git a/arch/arm/boot/dts/qcom/qcom-msm8974-lge-nexus5-hammerhead.dts b/arch/arm/boot/dts/qcom/qcom-msm8974-lge-nexus5-hammerhead.dts
index 4aaae8537a3f..fdb6e22986cf 100644
--- a/arch/arm/boot/dts/qcom/qcom-msm8974-lge-nexus5-hammerhead.dts
+++ b/arch/arm/boot/dts/qcom/qcom-msm8974-lge-nexus5-hammerhead.dts
@@ -182,7 +182,7 @@
status = "okay";
clock-frequency = <355000>;
- led-controller@38 {
+ backlight: led-controller@38 {
compatible = "ti,lm3630a";
status = "okay";
reg = <0x38>;
@@ -272,6 +272,8 @@
reg = <0>;
compatible = "lg,acx467akm-7";
+ backlight = <&backlight>;
+
pinctrl-names = "default";
pinctrl-0 = <&panel_pin>;
@@ -328,7 +330,7 @@
power-source = <PM8941_GPIO_S3>;
};
- otg {
+ otg-hog {
gpio-hog;
gpios = <35 GPIO_ACTIVE_HIGH>;
output-high;
diff --git a/arch/arm/boot/dts/qcom/qcom-msm8974-samsung-hlte.dts b/arch/arm/boot/dts/qcom/qcom-msm8974-samsung-hlte.dts
new file mode 100644
index 000000000000..903bb4d12513
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/qcom-msm8974-samsung-hlte.dts
@@ -0,0 +1,401 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "qcom-msm8974.dtsi"
+#include "pm8841.dtsi"
+#include "pm8941.dtsi"
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/leds/common.h>
+#include <dt-bindings/pinctrl/qcom,pmic-gpio.h>
+
+/ {
+ model = "Samsung Galaxy Note 3";
+ compatible = "samsung,hlte", "qcom,msm8974";
+ chassis-type = "handset";
+
+ aliases {
+ mmc0 = &sdhc_1; /* SDC1 eMMC slot */
+ mmc1 = &sdhc_3; /* SDC3 SD card slot */
+ serial0 = &blsp1_uart1;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+
+ pinctrl-0 = <&gpio_keys_pin_a>;
+ pinctrl-names = "default";
+
+ key-home {
+ label = "Home Key";
+ gpios = <&pm8941_gpios 3 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_HOMEPAGE>;
+ wakeup-source;
+ debounce-interval = <15>;
+ };
+
+ key-volume-down {
+ label = "Volume Down";
+ gpios = <&pm8941_gpios 2 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_VOLUMEDOWN>;
+ debounce-interval = <15>;
+ };
+
+ key-volume-up {
+ label = "Volume Up";
+ gpios = <&pm8941_gpios 5 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_VOLUMEUP>;
+ debounce-interval = <15>;
+ };
+ };
+
+ touch_ldo: regulator-touch {
+ compatible = "regulator-fixed";
+ regulator-name = "touch-ldo";
+
+ gpio = <&pm8941_gpios 9 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ regulator-boot-on;
+
+ pinctrl-0 = <&touch_ldo_pin>;
+ pinctrl-names = "default";
+ };
+};
+
+&blsp1_i2c2 {
+ status = "okay";
+
+ touchscreen@20 {
+ compatible = "syna,rmi4-i2c";
+ reg = <0x20>;
+
+ interrupt-parent = <&pm8941_gpios>;
+ interrupts = <30 IRQ_TYPE_EDGE_FALLING>;
+
+ vdd-supply = <&pm8941_l10>;
+ vio-supply = <&touch_ldo>;
+
+ pinctrl-0 = <&touch_pin>;
+ pinctrl-names = "default";
+
+ syna,startup-delay-ms = <100>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ rmi4-f01@1 {
+ reg = <0x1>;
+ syna,nosleep-mode = <1>;
+ };
+
+ rmi4-f12@12 {
+ reg = <0x12>;
+ syna,sensor-type = <1>;
+ };
+ };
+};
+
+&blsp2_i2c6 {
+ status = "okay";
+
+ fuelgauge@36 {
+ compatible = "maxim,max17048";
+ reg = <0x36>;
+
+ maxim,double-soc;
+ maxim,rcomp = /bits/ 8 <0x56>;
+
+ interrupt-parent = <&pm8941_gpios>;
+ interrupts = <26 IRQ_TYPE_EDGE_FALLING>;
+
+ pinctrl-0 = <&fuelgauge_pin>;
+ pinctrl-names = "default";
+ };
+};
+
+&blsp1_uart2 {
+ status = "okay";
+};
+
+&pm8941_gpios {
+ gpio_keys_pin_a: gpio-keys-active-state {
+ pins = "gpio2", "gpio3", "gpio5";
+ function = "normal";
+ bias-pull-up;
+ power-source = <PM8941_GPIO_S3>;
+ };
+
+ fuelgauge_pin: fuelgauge-int-state {
+ pins = "gpio26";
+ function = "normal";
+ bias-disable;
+ input-enable;
+ power-source = <PM8941_GPIO_S3>;
+ };
+
+ touch_pin: touchscreen-int-state {
+ pins = "gpio30";
+ function = "normal";
+ bias-disable;
+ input-enable;
+ power-source = <PM8941_GPIO_S3>;
+ };
+
+ touch_ldo_pin: touchscreen-ldo-state {
+ pins = "gpio9";
+ function = "normal";
+ output-high;
+ power-source = <PM8941_GPIO_S3>;
+ qcom,drive-strength = <PMIC_GPIO_STRENGTH_HIGH>;
+ };
+};
+
+&remoteproc_adsp {
+ cx-supply = <&pm8841_s2>;
+ status = "okay";
+};
+
+&remoteproc_mss {
+ cx-supply = <&pm8841_s2>;
+ mss-supply = <&pm8841_s3>;
+ mx-supply = <&pm8841_s1>;
+ pll-supply = <&pm8941_l12>;
+ status = "okay";
+};
+
+&rpm_requests {
+ regulators-0 {
+ compatible = "qcom,rpm-pm8841-regulators";
+
+ pm8841_s1: s1 {
+ regulator-min-microvolt = <675000>;
+ regulator-max-microvolt = <1050000>;
+ };
+
+ pm8841_s2: s2 {
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1050000>;
+ };
+
+ pm8841_s3: s3 {
+ regulator-min-microvolt = <1050000>;
+ regulator-max-microvolt = <1050000>;
+ };
+
+ pm8841_s4: s4 {
+ regulator-min-microvolt = <815000>;
+ regulator-max-microvolt = <900000>;
+ };
+ };
+
+ regulators-1 {
+ compatible = "qcom,rpm-pm8941-regulators";
+
+ pm8941_s1: s1 {
+ regulator-min-microvolt = <1300000>;
+ regulator-max-microvolt = <1300000>;
+ regulator-always-on;
+ };
+
+ pm8941_s2: s2 {
+ regulator-min-microvolt = <2150000>;
+ regulator-max-microvolt = <2150000>;
+ };
+
+ pm8941_s3: s3 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ pm8941_l1: l1 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ };
+
+ pm8941_l2: l2 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ };
+
+ pm8941_l3: l3 {
+ regulator-min-microvolt = <1050000>;
+ regulator-max-microvolt = <1225000>;
+ };
+
+ pm8941_l4: l4 {
+ regulator-min-microvolt = <1225000>;
+ regulator-max-microvolt = <1225000>;
+ };
+
+ pm8941_l5: l5 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ pm8941_l6: l6 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ pm8941_l7: l7 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ pm8941_l8: l8 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ pm8941_l9: l9 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2950000>;
+ };
+
+ pm8941_l10: l10 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ pm8941_l11: l11 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1350000>;
+ };
+
+ pm8941_l12: l12 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ };
+
+ pm8941_l13: l13 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ pm8941_l14: l14 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ pm8941_l15: l15 {
+ regulator-min-microvolt = <2050000>;
+ regulator-max-microvolt = <2050000>;
+ };
+
+ pm8941_l16: l16 {
+ regulator-min-microvolt = <2700000>;
+ regulator-max-microvolt = <2700000>;
+ };
+
+ pm8941_l17: l17 {
+ regulator-min-microvolt = <2850000>;
+ regulator-max-microvolt = <3000000>;
+ };
+
+ pm8941_l18: l18 {
+ regulator-min-microvolt = <2850000>;
+ regulator-max-microvolt = <2850000>;
+ };
+
+ pm8941_l19: l19 {
+ regulator-min-microvolt = <2900000>;
+ regulator-max-microvolt = <3350000>;
+ };
+
+ pm8941_l20: l20 {
+ regulator-min-microvolt = <2950000>;
+ regulator-max-microvolt = <2950000>;
+ regulator-system-load = <200000>;
+ regulator-allow-set-load;
+ };
+
+ pm8941_l21: l21 {
+ regulator-min-microvolt = <2950000>;
+ regulator-max-microvolt = <2950000>;
+ regulator-system-load = <200000>;
+ regulator-allow-set-load;
+ };
+
+ pm8941_l22: l22 {
+ regulator-min-microvolt = <2500000>;
+ regulator-max-microvolt = <3000000>;
+ };
+
+ pm8941_l23: l23 {
+ regulator-min-microvolt = <2400000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ pm8941_l24: l24 {
+ regulator-min-microvolt = <3075000>;
+ regulator-max-microvolt = <3075000>;
+ };
+ };
+};
+
+&sdhc_1 {
+ vmmc-supply = <&pm8941_l20>;
+ vqmmc-supply = <&pm8941_s3>;
+
+ pinctrl-0 = <&sdhc1_pin_a>;
+ pinctrl-names = "default";
+
+ status = "okay";
+};
+
+&sdhc_3 {
+ max-frequency = <100000000>;
+
+ vmmc-supply = <&pm8941_l21>;
+ vqmmc-supply = <&pm8941_l21>;
+
+ pinctrl-0 = <&sdhc3_pin_a>;
+ pinctrl-names = "default";
+
+ status = "okay";
+};
+
+&tlmm {
+ sdhc1_pin_a: sdhc1-pin-active-state {
+ clk-pins {
+ pins = "sdc1_clk";
+ drive-strength = <4>;
+ bias-disable;
+ };
+
+ cmd-data-pins {
+ pins = "sdc1_cmd", "sdc1_data";
+ drive-strength = <4>;
+ bias-pull-up;
+ };
+ };
+
+ sdhc3_pin_a: sdhc3-pin-active-state {
+ pins = "gpio35", "gpio36", "gpio37", "gpio38", "gpio39", "gpio40";
+ function = "sdc3";
+ drive-strength = <8>;
+ bias-disable;
+ };
+};
+
+&usb {
+ phys = <&usb_hs1_phy>;
+ phy-select = <&tcsr 0xb000 0>;
+
+ hnp-disable;
+ srp-disable;
+ adp-disable;
+
+ status = "okay";
+};
+
+&usb_hs1_phy {
+ v1p8-supply = <&pm8941_l6>;
+ v3p3-supply = <&pm8941_l24>;
+
+ qcom,init-seq = /bits/ 8 <0x1 0x64>;
+
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/qcom/qcom-msm8974.dtsi b/arch/arm/boot/dts/qcom/qcom-msm8974.dtsi
index 5651bb31bd54..15568579459a 100644
--- a/arch/arm/boot/dts/qcom/qcom-msm8974.dtsi
+++ b/arch/arm/boot/dts/qcom/qcom-msm8974.dtsi
@@ -132,7 +132,7 @@
smd-edge {
interrupts = <GIC_SPI 168 IRQ_TYPE_EDGE_RISING>;
- qcom,ipc = <&apcs 8 0>;
+ mboxes = <&apcs 0>;
qcom,smd-edge = <15>;
rpm_requests: rpm-requests {
@@ -219,7 +219,7 @@
interrupt-parent = <&intc>;
interrupts = <GIC_SPI 158 IRQ_TYPE_EDGE_RISING>;
- qcom,ipc = <&apcs 8 10>;
+ mboxes = <&apcs 10>;
qcom,local-pid = <0>;
qcom,remote-pid = <2>;
@@ -244,7 +244,7 @@
interrupt-parent = <&intc>;
interrupts = <GIC_SPI 27 IRQ_TYPE_EDGE_RISING>;
- qcom,ipc = <&apcs 8 14>;
+ mboxes = <&apcs 14>;
qcom,local-pid = <0>;
qcom,remote-pid = <1>;
@@ -269,7 +269,7 @@
interrupt-parent = <&intc>;
interrupts = <GIC_SPI 143 IRQ_TYPE_EDGE_RISING>;
- qcom,ipc = <&apcs 8 18>;
+ mboxes = <&apcs 18>;
qcom,local-pid = <0>;
qcom,remote-pid = <4>;
@@ -294,9 +294,7 @@
#address-cells = <1>;
#size-cells = <0>;
- qcom,ipc-1 = <&apcs 8 13>;
- qcom,ipc-2 = <&apcs 8 9>;
- qcom,ipc-3 = <&apcs 8 19>;
+ mboxes = <0>, <&apcs 13>, <&apcs 9>, <&apcs 19>;
apps_smsm: apps@0 {
reg = <0>;
@@ -343,9 +341,11 @@
<0xf9002000 0x1000>;
};
- apcs: syscon@f9011000 {
- compatible = "syscon";
+ apcs: mailbox@f9011000 {
+ compatible = "qcom,msm8974-apcs-kpss-global",
+ "qcom,msm8994-apcs-kpss-global", "syscon";
reg = <0xf9011000 0x1000>;
+ #mbox-cells = <1>;
};
saw_l2: power-manager@f9012000 {
@@ -757,7 +757,7 @@
smd-edge {
interrupts = <GIC_SPI 142 IRQ_TYPE_EDGE_RISING>;
- qcom,ipc = <&apcs 8 17>;
+ mboxes = <&apcs 17>;
qcom,smd-edge = <6>;
wcnss {
@@ -1233,7 +1233,7 @@
reg = <0xfc4ab000 0x4>;
};
- qfprom: qfprom@fc4bc000 {
+ qfprom: efuse@fc4bc000 {
compatible = "qcom,msm8974-qfprom", "qcom,qfprom";
reg = <0xfc4bc000 0x2100>;
#address-cells = <1>;
@@ -1576,7 +1576,7 @@
smd-edge {
interrupts = <GIC_SPI 25 IRQ_TYPE_EDGE_RISING>;
- qcom,ipc = <&apcs 8 12>;
+ mboxes = <&apcs 12>;
qcom,smd-edge = <0>;
label = "modem";
@@ -2129,7 +2129,7 @@
};
};
- gpu: adreno@fdb00000 {
+ gpu: gpu@fdb00000 {
compatible = "qcom,adreno-330.1", "qcom,adreno";
reg = <0xfdb00000 0x10000>;
reg-names = "kgsl_3d0_reg_memory";
@@ -2213,7 +2213,7 @@
smd-edge {
interrupts = <GIC_SPI 156 IRQ_TYPE_EDGE_RISING>;
- qcom,ipc = <&apcs 8 8>;
+ mboxes = <&apcs 8>;
qcom,smd-edge = <1>;
label = "lpass";
};
diff --git a/arch/arm/boot/dts/qcom/qcom-msm8974pro-htc-m8.dts b/arch/arm/boot/dts/qcom/qcom-msm8974pro-htc-m8.dts
new file mode 100644
index 000000000000..b896cc1ad6f7
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/qcom-msm8974pro-htc-m8.dts
@@ -0,0 +1,353 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include "qcom-msm8974pro.dtsi"
+#include "pm8841.dtsi"
+#include "pm8941.dtsi"
+#include <dt-bindings/input/input.h>
+
+/ {
+ model = "HTC One (M8)";
+ compatible = "htc,m8", "qcom,msm8974pro", "qcom,msm8974";
+ chassis-type = "handset";
+
+ aliases {
+ mmc0 = &sdhc_1;
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+
+ pinctrl-0 = <&gpio_keys_default>;
+ pinctrl-names = "default";
+
+ key-volume-down {
+ label = "volume_down";
+ gpios = <&tlmm 27 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_VOLUMEDOWN>;
+ debounce-interval = <20>;
+ wakeup-source;
+ };
+
+ key-volume-up {
+ label = "volume_up";
+ gpios = <&tlmm 28 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_VOLUMEUP>;
+ debounce-interval = <20>;
+ wakeup-source;
+ };
+ };
+
+ vreg_boost: vreg-boost {
+ compatible = "regulator-fixed";
+
+ regulator-name = "vreg-boost";
+ regulator-min-microvolt = <3150000>;
+ regulator-max-microvolt = <3150000>;
+
+ regulator-always-on;
+ regulator-boot-on;
+
+ gpio = <&pm8941_gpios 21 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-0 = <&boost_bypass_n_pin>;
+ pinctrl-names = "default";
+ };
+
+ vreg_vph_pwr: vreg-vph-pwr {
+ compatible = "regulator-fixed";
+ regulator-name = "vph-pwr";
+
+ regulator-min-microvolt = <3600000>;
+ regulator-max-microvolt = <3600000>;
+
+ regulator-always-on;
+ };
+};
+
+&pm8941_vib {
+ status = "okay";
+};
+
+&pronto {
+ vddmx-supply = <&pm8841_s1>;
+ vddcx-supply = <&pm8841_s2>;
+ vddpx-supply = <&pm8941_s3>;
+
+ pinctrl-0 = <&wcnss_pin_a>;
+ pinctrl-names = "default";
+
+ status = "okay";
+
+ iris {
+ vddxo-supply = <&pm8941_l6>;
+ vddrfa-supply = <&pm8941_l11>;
+ vddpa-supply = <&pm8941_l19>;
+ vdddig-supply = <&pm8941_s3>;
+ };
+
+ smd-edge {
+ qcom,remote-pid = <4>;
+ label = "pronto";
+
+ wcnss {
+ status = "okay";
+ };
+ };
+};
+
+&rpm_requests {
+ regulators-0 {
+ compatible = "qcom,rpm-pm8841-regulators";
+
+ pm8841_s1: s1 {
+ regulator-min-microvolt = <675000>;
+ regulator-max-microvolt = <1050000>;
+ };
+
+ pm8841_s2: s2 {
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1050000>;
+ };
+
+ pm8841_s3: s3 {
+ regulator-min-microvolt = <1050000>;
+ regulator-max-microvolt = <1050000>;
+ };
+
+ pm8841_s4: s4 {
+ regulator-min-microvolt = <815000>;
+ regulator-max-microvolt = <900000>;
+ };
+ };
+
+ regulators-1 {
+ compatible = "qcom,rpm-pm8941-regulators";
+
+ vdd_l1_l3-supply = <&pm8941_s1>;
+ vdd_l2_lvs1_2_3-supply = <&pm8941_s3>;
+ vdd_l4_l11-supply = <&pm8941_s1>;
+ vdd_l5_l7-supply = <&pm8941_s2>;
+ vdd_l6_l12_l14_l15-supply = <&pm8941_s2>;
+ vdd_l8_l16_l18_l19-supply = <&vreg_vph_pwr>;
+ vdd_l9_l10_l17_l22-supply = <&vreg_boost>;
+ vdd_l13_l20_l23_l24-supply = <&vreg_boost>;
+ vdd_l21-supply = <&vreg_boost>;
+
+ pm8941_s1: s1 {
+ regulator-min-microvolt = <1300000>;
+ regulator-max-microvolt = <1300000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ pm8941_s2: s2 {
+ regulator-min-microvolt = <2150000>;
+ regulator-max-microvolt = <2150000>;
+ regulator-boot-on;
+ };
+
+ pm8941_s3: s3 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ pm8941_l1: l1 {
+ regulator-min-microvolt = <1225000>;
+ regulator-max-microvolt = <1225000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ pm8941_l2: l2 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ };
+
+ pm8941_l3: l3 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ };
+
+ pm8941_l4: l4 {
+ regulator-min-microvolt = <1225000>;
+ regulator-max-microvolt = <1225000>;
+ };
+
+ pm8941_l5: l5 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ pm8941_l6: l6 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-boot-on;
+ };
+
+ pm8941_l7: l7 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-boot-on;
+ };
+
+ pm8941_l8: l8 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ pm8941_l9: l9 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2950000>;
+ };
+
+ pm8941_l10: l10 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2950000>;
+ };
+
+ pm8941_l11: l11 {
+ regulator-min-microvolt = <1225000>;
+ regulator-max-microvolt = <1350000>;
+ };
+
+ pm8941_l12: l12 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ pm8941_l13: l13 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2950000>;
+ regulator-boot-on;
+ };
+
+ pm8941_l14: l14 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ pm8941_l15: l15 {
+ regulator-min-microvolt = <2050000>;
+ regulator-max-microvolt = <2050000>;
+ };
+
+ pm8941_l16: l16 {
+ regulator-min-microvolt = <2700000>;
+ regulator-max-microvolt = <2700000>;
+ };
+
+ pm8941_l17: l17 {
+ regulator-min-microvolt = <2850000>;
+ regulator-max-microvolt = <2850000>;
+ };
+
+ pm8941_l18: l18 {
+ regulator-min-microvolt = <2850000>;
+ regulator-max-microvolt = <2850000>;
+ };
+
+ pm8941_l19: l19 {
+ regulator-min-microvolt = <2900000>;
+ regulator-max-microvolt = <3350000>;
+ };
+
+ pm8941_l20: l20 {
+ regulator-min-microvolt = <2950000>;
+ regulator-max-microvolt = <2950000>;
+ regulator-system-load = <200000>;
+ regulator-allow-set-load;
+ regulator-boot-on;
+ };
+
+ pm8941_l21: l21 {
+ regulator-min-microvolt = <2950000>;
+ regulator-max-microvolt = <2950000>;
+ regulator-boot-on;
+ };
+
+ pm8941_l22: l22 {
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ pm8941_l23: l23 {
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ };
+
+ pm8941_l24: l24 {
+ regulator-min-microvolt = <3075000>;
+ regulator-max-microvolt = <3075000>;
+ regulator-boot-on;
+ };
+ };
+};
+
+&sdhc_1 {
+ vmmc-supply = <&pm8941_l20>;
+ vqmmc-supply = <&pm8941_s3>;
+
+ pinctrl-0 = <&sdc1_on>;
+ pinctrl-1 = <&sdc1_off>;
+ pinctrl-names = "default", "sleep";
+
+ status = "okay";
+};
+
+&smbb {
+ status = "okay";
+};
+
+&tlmm {
+ gpio_keys_default: gpio-keys-default-state {
+ pins = "gpio27", "gpio28";
+ function = "gpio";
+ bias-pull-up;
+ };
+
+ sdc1_on: sdc1-on-state {
+ clk-pins {
+ pins = "sdc1_clk";
+ drive-strength = <10>;
+ bias-disable;
+ };
+
+ cmd-data-pins {
+ pins = "sdc1_cmd", "sdc1_data";
+ drive-strength = <10>;
+ bias-pull-up;
+ };
+ };
+
+ wcnss_pin_a: wcnss-pin-active-state {
+ pins = "gpio36", "gpio37", "gpio38", "gpio39", "gpio40";
+ function = "wlan";
+ drive-strength = <6>;
+ bias-pull-down;
+ };
+};
+
+&usb {
+ phys = <&usb_hs1_phy>;
+ phy-select = <&tcsr 0xb000 0>;
+ extcon = <&smbb>, <&usb_id>;
+ vbus-supply = <&chg_otg>;
+
+ hnp-disable;
+ srp-disable;
+ adp-disable;
+
+ status = "okay";
+};
+
+&usb_hs1_phy {
+ v1p8-supply = <&pm8941_l6>;
+ v3p3-supply = <&pm8941_l24>;
+ extcon = <&smbb>;
+ qcom,init-seq = /bits/ 8 <0x1 0x63>;
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/qcom/qcom-msm8974pro-sony-xperia-shinano-aries.dts b/arch/arm/boot/dts/qcom/qcom-msm8974pro-sony-xperia-shinano-aries.dts
new file mode 100644
index 000000000000..2621c5928b6a
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/qcom-msm8974pro-sony-xperia-shinano-aries.dts
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "qcom-msm8974pro-sony-xperia-shinano-common.dtsi"
+
+/ {
+ model = "Sony Xperia Z3 Compact";
+ compatible = "sony,xperia-aries", "qcom,msm8974pro", "qcom,msm8974";
+ chassis-type = "handset";
+
+ gpio-keys {
+ key-camera-snapshot {
+ label = "camera_snapshot";
+ gpios = <&pm8941_gpios 3 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_CAMERA>;
+ debounce-interval = <15>;
+ };
+
+ key-camera-focus {
+ label = "camera_focus";
+ gpios = <&pm8941_gpios 4 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_CAMERA_FOCUS>;
+ debounce-interval = <15>;
+ };
+ };
+};
+
+&gpio_keys_pin_a {
+ pins = "gpio2", "gpio3", "gpio4", "gpio5";
+};
+
+&smbb {
+ usb-charge-current-limit = <1500000>;
+ qcom,fast-charge-safe-current = <2100000>;
+ qcom,fast-charge-current-limit = <1800000>;
+ qcom,fast-charge-safe-voltage = <4400000>;
+ qcom,fast-charge-high-threshold-voltage = <4350000>;
+ qcom,auto-recharge-threshold-voltage = <4280000>;
+ qcom,minimum-input-voltage = <4200000>;
+
+ status = "okay";
+};
+
+&synaptics_touchscreen {
+ vio-supply = <&pm8941_s3>;
+};
diff --git a/arch/arm/boot/dts/qcom/qcom-msm8974pro-sony-xperia-shinano-common.dtsi b/arch/arm/boot/dts/qcom/qcom-msm8974pro-sony-xperia-shinano-common.dtsi
index e129bb1bd6ec..6af7c71c7158 100644
--- a/arch/arm/boot/dts/qcom/qcom-msm8974pro-sony-xperia-shinano-common.dtsi
+++ b/arch/arm/boot/dts/qcom/qcom-msm8974pro-sony-xperia-shinano-common.dtsi
@@ -380,6 +380,8 @@
pm8941_l21: l21 {
regulator-min-microvolt = <2950000>;
regulator-max-microvolt = <2950000>;
+ regulator-system-load = <500000>;
+ regulator-allow-set-load;
regulator-boot-on;
};
diff --git a/arch/arm/boot/dts/renesas/r8a73a4.dtsi b/arch/arm/boot/dts/renesas/r8a73a4.dtsi
index 9a2ae282a46b..85261684b5d5 100644
--- a/arch/arm/boot/dts/renesas/r8a73a4.dtsi
+++ b/arch/arm/boot/dts/renesas/r8a73a4.dtsi
@@ -58,6 +58,7 @@
<GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
<GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
<GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>;
+ interrupt-names = "sec-phys", "phys", "virt", "hyp-phys";
};
tmu0: timer@e61e0000 {
diff --git a/arch/arm/boot/dts/renesas/r8a7742.dtsi b/arch/arm/boot/dts/renesas/r8a7742.dtsi
index d55c344c1cd2..3a5d6b434d09 100644
--- a/arch/arm/boot/dts/renesas/r8a7742.dtsi
+++ b/arch/arm/boot/dts/renesas/r8a7742.dtsi
@@ -1938,6 +1938,7 @@
<&gic GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
<&gic GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
<&gic GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>;
+ interrupt-names = "sec-phys", "phys", "virt", "hyp-phys";
};
/* External USB clock - can be overridden by the board */
diff --git a/arch/arm/boot/dts/renesas/r8a7743.dtsi b/arch/arm/boot/dts/renesas/r8a7743.dtsi
index d917c0a971f5..8833898d5557 100644
--- a/arch/arm/boot/dts/renesas/r8a7743.dtsi
+++ b/arch/arm/boot/dts/renesas/r8a7743.dtsi
@@ -1846,6 +1846,7 @@
<&gic GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
<&gic GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
<&gic GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>;
+ interrupt-names = "sec-phys", "phys", "virt", "hyp-phys";
};
/* External USB clock - can be overridden by the board */
diff --git a/arch/arm/boot/dts/renesas/r8a7744.dtsi b/arch/arm/boot/dts/renesas/r8a7744.dtsi
index 754859c38a93..c66c1102fb72 100644
--- a/arch/arm/boot/dts/renesas/r8a7744.dtsi
+++ b/arch/arm/boot/dts/renesas/r8a7744.dtsi
@@ -1832,6 +1832,7 @@
<&gic GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
<&gic GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
<&gic GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>;
+ interrupt-names = "sec-phys", "phys", "virt", "hyp-phys";
};
/* External USB clock - can be overridden by the board */
diff --git a/arch/arm/boot/dts/renesas/r8a7745.dtsi b/arch/arm/boot/dts/renesas/r8a7745.dtsi
index 168298300490..6ddde364782b 100644
--- a/arch/arm/boot/dts/renesas/r8a7745.dtsi
+++ b/arch/arm/boot/dts/renesas/r8a7745.dtsi
@@ -1636,6 +1636,7 @@
<&gic GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
<&gic GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
<&gic GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>;
+ interrupt-names = "sec-phys", "phys", "virt", "hyp-phys";
};
/* External USB clock - can be overridden by the board */
diff --git a/arch/arm/boot/dts/renesas/r8a77470.dtsi b/arch/arm/boot/dts/renesas/r8a77470.dtsi
index 2375438d83c9..a8a12275c98a 100644
--- a/arch/arm/boot/dts/renesas/r8a77470.dtsi
+++ b/arch/arm/boot/dts/renesas/r8a77470.dtsi
@@ -1061,6 +1061,7 @@
<&gic GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
<&gic GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
<&gic GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>;
+ interrupt-names = "sec-phys", "phys", "virt", "hyp-phys";
};
/* External USB clock - can be overridden by the board */
diff --git a/arch/arm/boot/dts/renesas/r8a7790.dtsi b/arch/arm/boot/dts/renesas/r8a7790.dtsi
index 583b74a9f071..20e4d4c6e748 100644
--- a/arch/arm/boot/dts/renesas/r8a7790.dtsi
+++ b/arch/arm/boot/dts/renesas/r8a7790.dtsi
@@ -2012,6 +2012,7 @@
<&gic GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
<&gic GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
<&gic GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>;
+ interrupt-names = "sec-phys", "phys", "virt", "hyp-phys";
};
/* External USB clock - can be overridden by the board */
diff --git a/arch/arm/boot/dts/renesas/r8a7791.dtsi b/arch/arm/boot/dts/renesas/r8a7791.dtsi
index de08ceb62230..f9c9e1d8f669 100644
--- a/arch/arm/boot/dts/renesas/r8a7791.dtsi
+++ b/arch/arm/boot/dts/renesas/r8a7791.dtsi
@@ -1938,6 +1938,7 @@
<&gic GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
<&gic GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
<&gic GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>;
+ interrupt-names = "sec-phys", "phys", "virt", "hyp-phys";
};
/* External USB clock - can be overridden by the board */
diff --git a/arch/arm/boot/dts/renesas/r8a7792.dtsi b/arch/arm/boot/dts/renesas/r8a7792.dtsi
index 7defeb8e4cd1..dd3bc32668b7 100644
--- a/arch/arm/boot/dts/renesas/r8a7792.dtsi
+++ b/arch/arm/boot/dts/renesas/r8a7792.dtsi
@@ -990,5 +990,6 @@
<&gic GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
<&gic GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
<&gic GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>;
+ interrupt-names = "sec-phys", "phys", "virt", "hyp-phys";
};
};
diff --git a/arch/arm/boot/dts/renesas/r8a7793.dtsi b/arch/arm/boot/dts/renesas/r8a7793.dtsi
index d32a9d5d3faa..24e66ddf37e0 100644
--- a/arch/arm/boot/dts/renesas/r8a7793.dtsi
+++ b/arch/arm/boot/dts/renesas/r8a7793.dtsi
@@ -1517,6 +1517,7 @@
<&gic GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
<&gic GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
<&gic GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>;
+ interrupt-names = "sec-phys", "phys", "virt", "hyp-phys";
};
/* External USB clock - can be overridden by the board */
diff --git a/arch/arm/boot/dts/renesas/r8a7794.dtsi b/arch/arm/boot/dts/renesas/r8a7794.dtsi
index f37f094cecc8..8e6386a79aea 100644
--- a/arch/arm/boot/dts/renesas/r8a7794.dtsi
+++ b/arch/arm/boot/dts/renesas/r8a7794.dtsi
@@ -1484,6 +1484,7 @@
<&gic GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
<&gic GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
<&gic GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>;
+ interrupt-names = "sec-phys", "phys", "virt", "hyp-phys";
};
/* External USB clock - can be overridden by the board */
diff --git a/arch/arm/boot/dts/renesas/r9a06g032.dtsi b/arch/arm/boot/dts/renesas/r9a06g032.dtsi
index 45f60eeeaaa1..7548291c8d7e 100644
--- a/arch/arm/boot/dts/renesas/r9a06g032.dtsi
+++ b/arch/arm/boot/dts/renesas/r9a06g032.dtsi
@@ -316,6 +316,24 @@
data-width = <8>;
};
+ gmac1: ethernet@44000000 {
+ compatible = "renesas,r9a06g032-gmac", "renesas,rzn1-gmac", "snps,dwmac";
+ reg = <0x44000000 0x2000>;
+ interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "macirq", "eth_wake_irq", "eth_lpi";
+ clocks = <&sysctrl R9A06G032_HCLK_GMAC0>;
+ clock-names = "stmmaceth";
+ power-domains = <&sysctrl>;
+ snps,multicast-filter-bins = <256>;
+ snps,perfect-filter-entries = <128>;
+ tx-fifo-depth = <2048>;
+ rx-fifo-depth = <4096>;
+ pcs-handle = <&mii_conv1>;
+ status = "disabled";
+ };
+
gmac2: ethernet@44002000 {
compatible = "renesas,r9a06g032-gmac", "renesas,rzn1-gmac", "snps,dwmac";
reg = <0x44002000 0x2000>;
@@ -466,6 +484,7 @@
<GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
<GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
<GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>;
+ interrupt-names = "sec-phys", "phys", "virt", "hyp-phys";
};
usbphy: usb-phy {
diff --git a/arch/arm/boot/dts/rockchip/rk3036.dtsi b/arch/arm/boot/dts/rockchip/rk3036.dtsi
index 04af224005f8..96279d1e02fe 100644
--- a/arch/arm/boot/dts/rockchip/rk3036.dtsi
+++ b/arch/arm/boot/dts/rockchip/rk3036.dtsi
@@ -402,6 +402,7 @@
rockchip,grf = <&grf>;
pinctrl-names = "default";
pinctrl-0 = <&hdmi_ctl>;
+ #sound-dai-cells = <0>;
status = "disabled";
ports {
diff --git a/arch/arm/boot/dts/rockchip/rk3066a-mk808.dts b/arch/arm/boot/dts/rockchip/rk3066a-mk808.dts
index 06790f05b395..4de9a45c4883 100644
--- a/arch/arm/boot/dts/rockchip/rk3066a-mk808.dts
+++ b/arch/arm/boot/dts/rockchip/rk3066a-mk808.dts
@@ -143,6 +143,14 @@
};
};
+&hdmi_sound {
+ status = "okay";
+};
+
+&i2s0 {
+ status = "okay";
+};
+
&mmc0 {
bus-width = <4>;
cap-mmc-highspeed;
diff --git a/arch/arm/boot/dts/rockchip/rk3066a.dtsi b/arch/arm/boot/dts/rockchip/rk3066a.dtsi
index 30139f21de64..3f6d49459734 100644
--- a/arch/arm/boot/dts/rockchip/rk3066a.dtsi
+++ b/arch/arm/boot/dts/rockchip/rk3066a.dtsi
@@ -53,6 +53,22 @@
ports = <&vop0_out>, <&vop1_out>;
};
+ hdmi_sound: hdmi-sound {
+ compatible = "simple-audio-card";
+ simple-audio-card,name = "HDMI";
+ simple-audio-card,format = "i2s";
+ simple-audio-card,mclk-fs = <256>;
+ status = "disabled";
+
+ simple-audio-card,codec {
+ sound-dai = <&hdmi>;
+ };
+
+ simple-audio-card,cpu {
+ sound-dai = <&i2s0>;
+ };
+ };
+
sram: sram@10080000 {
compatible = "mmio-sram";
reg = <0x10080000 0x10000>;
@@ -128,6 +144,7 @@
pinctrl-0 = <&hdmii2c_xfer>, <&hdmi_hpd>;
power-domains = <&power RK3066_PD_VIO>;
rockchip,grf = <&grf>;
+ #sound-dai-cells = <0>;
status = "disabled";
ports {
@@ -879,7 +896,3 @@
&wdt {
compatible = "rockchip,rk3066-wdt", "snps,dw-wdt";
};
-
-&emac {
- compatible = "rockchip,rk3066-emac";
-};
diff --git a/arch/arm/boot/dts/rockchip/rk3128.dtsi b/arch/arm/boot/dts/rockchip/rk3128.dtsi
index fb98873fd94e..23e633387c24 100644
--- a/arch/arm/boot/dts/rockchip/rk3128.dtsi
+++ b/arch/arm/boot/dts/rockchip/rk3128.dtsi
@@ -216,6 +216,8 @@
<&cru ACLK_LCDC0>,
<&cru HCLK_LCDC0>,
<&cru PCLK_MIPI>,
+ <&cru PCLK_MIPIPHY>,
+ <&cru SCLK_MIPI_24M>,
<&cru ACLK_RGA>,
<&cru HCLK_RGA>,
<&cru ACLK_VIO0>,
@@ -275,6 +277,43 @@
reg = <0>;
remote-endpoint = <&hdmi_in_vop>;
};
+
+ vop_out_dsi: endpoint@1 {
+ reg = <1>;
+ remote-endpoint = <&dsi_in_vop>;
+ };
+ };
+ };
+
+ dsi: dsi@10110000 {
+ compatible = "rockchip,rk3128-mipi-dsi", "snps,dw-mipi-dsi";
+ reg = <0x10110000 0x4000>;
+ interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru PCLK_MIPI>;
+ clock-names = "pclk";
+ phys = <&dphy>;
+ phy-names = "dphy";
+ power-domains = <&power RK3128_PD_VIO>;
+ resets = <&cru SRST_VIO_MIPI_DSI>;
+ reset-names = "apb";
+ rockchip,grf = <&grf>;
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ dsi_in: port@0 {
+ reg = <0>;
+
+ dsi_in_vop: endpoint {
+ remote-endpoint = <&vop_out_dsi>;
+ };
+ };
+
+ dsi_out: port@1 {
+ reg = <1>;
+ };
};
};
@@ -360,6 +399,41 @@
status = "disabled";
};
+ i2s_8ch: i2s@10200000 {
+ compatible = "rockchip,rk3128-i2s", "rockchip,rk3066-i2s";
+ reg = <0x10200000 0x1000>;
+ interrupts = <GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru SCLK_I2S0>, <&cru HCLK_I2S_8CH>;
+ clock-names = "i2s_clk", "i2s_hclk";
+ dmas = <&pdma 14>, <&pdma 15>;
+ dma-names = "tx", "rx";
+ #sound-dai-cells = <0>;
+ status = "disabled";
+ };
+
+ spdif: spdif@10204000 {
+ compatible = "rockchip,rk3128-spdif", "rockchip,rk3066-spdif";
+ reg = <0x10204000 0x1000>;
+ interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru SCLK_SPDIF>, <&cru HCLK_SPDIF>;
+ clock-names = "mclk", "hclk";
+ dmas = <&pdma 13>;
+ dma-names = "tx";
+ pinctrl-names = "default";
+ pinctrl-0 = <&spdif_tx>;
+ #sound-dai-cells = <0>;
+ status = "disabled";
+ };
+
+ sfc: spi@1020c000 {
+ compatible = "rockchip,sfc";
+ reg = <0x1020c000 0x8000>;
+ interrupts = <GIC_SPI 50 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru SCLK_SFC>, <&cru 479>;
+ clock-names = "clk_sfc", "hclk_sfc";
+ status = "disabled";
+ };
+
sdmmc: mmc@10214000 {
compatible = "rockchip,rk3128-dw-mshc", "rockchip,rk3288-dw-mshc";
reg = <0x10214000 0x4000>;
@@ -408,6 +482,21 @@
status = "disabled";
};
+ i2s_2ch: i2s@10220000 {
+ compatible = "rockchip,rk3128-i2s", "rockchip,rk3066-i2s";
+ reg = <0x10220000 0x1000>;
+ interrupts = <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru SCLK_I2S1>, <&cru HCLK_I2S_2CH>;
+ clock-names = "i2s_clk", "i2s_hclk";
+ dmas = <&pdma 0>, <&pdma 1>;
+ dma-names = "tx", "rx";
+ rockchip,playback-channels = <2>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2s_bus>;
+ #sound-dai-cells = <0>;
+ status = "disabled";
+ };
+
nfc: nand-controller@10500000 {
compatible = "rockchip,rk3128-nfc", "rockchip,rk2928-nfc";
reg = <0x10500000 0x4000>;
@@ -477,6 +566,7 @@
pinctrl-names = "default";
pinctrl-0 = <&hdmii2c_xfer &hdmi_hpd &hdmi_cec>;
power-domains = <&power RK3128_PD_VIO>;
+ #sound-dai-cells = <0>;
status = "disabled";
ports {
@@ -496,6 +586,18 @@
};
};
+ dphy: phy@20038000 {
+ compatible = "rockchip,rk3128-dsi-dphy";
+ reg = <0x20038000 0x4000>;
+ clocks = <&cru SCLK_MIPI_24M>, <&cru PCLK_MIPIPHY>;
+ clock-names = "ref", "pclk";
+ #phy-cells = <0>;
+ power-domains = <&power RK3128_PD_VIO>;
+ resets = <&cru SRST_MIPIPHY_P>;
+ reset-names = "apb";
+ status = "disabled";
+ };
+
timer0: timer@20044000 {
compatible = "rockchip,rk3128-timer", "rockchip,rk3288-timer";
reg = <0x20044000 0x20>;
@@ -1104,6 +1206,32 @@
};
};
+ sfc {
+ sfc_bus2: sfc-bus2 {
+ rockchip,pins = <1 RK_PD0 3 &pcfg_pull_default>,
+ <1 RK_PD1 3 &pcfg_pull_default>;
+ };
+
+ sfc_bus4: sfc-bus4 {
+ rockchip,pins = <1 RK_PD0 3 &pcfg_pull_default>,
+ <1 RK_PD1 3 &pcfg_pull_default>,
+ <1 RK_PD2 3 &pcfg_pull_default>,
+ <1 RK_PD3 3 &pcfg_pull_default>;
+ };
+
+ sfc_clk: sfc-clk {
+ rockchip,pins = <2 RK_PA4 3 &pcfg_pull_none>;
+ };
+
+ sfc_cs0: sfc-cs0 {
+ rockchip,pins = <2 RK_PA2 2 &pcfg_pull_default>;
+ };
+
+ sfc_cs1: sfc-cs1 {
+ rockchip,pins = <2 RK_PA3 2 &pcfg_pull_default>;
+ };
+ };
+
spdif {
spdif_tx: spdif-tx {
rockchip,pins = <3 RK_PD3 1 &pcfg_pull_none>;
diff --git a/arch/arm/boot/dts/rockchip/rk3xxx.dtsi b/arch/arm/boot/dts/rockchip/rk3xxx.dtsi
index f37137f298d5..e6a78bcf9163 100644
--- a/arch/arm/boot/dts/rockchip/rk3xxx.dtsi
+++ b/arch/arm/boot/dts/rockchip/rk3xxx.dtsi
@@ -194,17 +194,14 @@
};
emac: ethernet@10204000 {
- compatible = "snps,arc-emac";
+ compatible = "rockchip,rk3066-emac";
reg = <0x10204000 0x3c>;
interrupts = <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>;
-
- rockchip,grf = <&grf>;
-
clocks = <&cru HCLK_EMAC>, <&cru SCLK_MAC>;
clock-names = "hclk", "macref";
max-speed = <100>;
phy-mode = "rmii";
-
+ rockchip,grf = <&grf>;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/rockchip/rv1126-edgeble-neu2-io.dts b/arch/arm/boot/dts/rockchip/rv1126-edgeble-neu2-io.dts
index 0c2396b8f8db..7707d1b01440 100644
--- a/arch/arm/boot/dts/rockchip/rv1126-edgeble-neu2-io.dts
+++ b/arch/arm/boot/dts/rockchip/rv1126-edgeble-neu2-io.dts
@@ -69,8 +69,7 @@
&mdio {
phy: ethernet-phy@0 {
- compatible = "ethernet-phy-id001c.c916",
- "ethernet-phy-ieee802.3-c22";
+ compatible = "ethernet-phy-id001c.c916";
reg = <0x0>;
pinctrl-names = "default";
pinctrl-0 = <&eth_phy_rst>;
diff --git a/arch/arm/boot/dts/st/Makefile b/arch/arm/boot/dts/st/Makefile
index 9fedd6776208..015903d09323 100644
--- a/arch/arm/boot/dts/st/Makefile
+++ b/arch/arm/boot/dts/st/Makefile
@@ -29,6 +29,7 @@ dtb-$(CONFIG_ARCH_STM32) += \
stm32h743i-eval.dtb \
stm32h743i-disco.dtb \
stm32h750i-art-pi.dtb \
+ stm32mp135f-dhcor-dhsbc.dtb \
stm32mp135f-dk.dtb \
stm32mp151a-prtt1a.dtb \
stm32mp151a-prtt1c.dtb \
diff --git a/arch/arm/boot/dts/st/stih407-family.dtsi b/arch/arm/boot/dts/st/stih407-family.dtsi
index 29302e74aa1d..35a55aef7f4b 100644
--- a/arch/arm/boot/dts/st/stih407-family.dtsi
+++ b/arch/arm/boot/dts/st/stih407-family.dtsi
@@ -33,7 +33,7 @@
cpus {
#address-cells = <1>;
#size-cells = <0>;
- cpu@0 {
+ cpu0: cpu@0 {
device_type = "cpu";
compatible = "arm,cortex-a9";
reg = <0>;
@@ -52,8 +52,9 @@
clock-latency = <100000>;
cpu0-supply = <&pwm_regulator>;
st,syscfg = <&syscfg_core 0x8e0>;
+ #cooling-cells = <2>;
};
- cpu@1 {
+ cpu1: cpu@1 {
device_type = "cpu";
compatible = "arm,cortex-a9";
reg = <1>;
@@ -66,6 +67,7 @@
1200000 0
800000 0
500000 0>;
+ #cooling-cells = <2>;
};
};
diff --git a/arch/arm/boot/dts/st/stih410.dtsi b/arch/arm/boot/dts/st/stih410.dtsi
index 29e95e9d3229..a69231854f78 100644
--- a/arch/arm/boot/dts/st/stih410.dtsi
+++ b/arch/arm/boot/dts/st/stih410.dtsi
@@ -270,6 +270,7 @@
clock-names = "thermal";
clocks = <&clk_sysin>;
interrupts = <GIC_SPI 205 IRQ_TYPE_EDGE_RISING>;
+ #thermal-sensor-cells = <0>;
};
cec@94a087c {
diff --git a/arch/arm/boot/dts/st/stih418.dtsi b/arch/arm/boot/dts/st/stih418.dtsi
index b35b9b7a7ccc..8fb8b3af5e49 100644
--- a/arch/arm/boot/dts/st/stih418.dtsi
+++ b/arch/arm/boot/dts/st/stih418.dtsi
@@ -6,23 +6,26 @@
#include "stih418-clock.dtsi"
#include "stih407-family.dtsi"
#include "stih410-pinctrl.dtsi"
+#include <dt-bindings/thermal/thermal.h>
/ {
cpus {
#address-cells = <1>;
#size-cells = <0>;
- cpu@2 {
+ cpu2: cpu@2 {
device_type = "cpu";
compatible = "arm,cortex-a9";
reg = <2>;
/* u-boot puts hpen in SBC dmem at 0xa4 offset */
cpu-release-addr = <0x94100A4>;
+ #cooling-cells = <2>;
};
- cpu@3 {
+ cpu3: cpu@3 {
device_type = "cpu";
compatible = "arm,cortex-a9";
reg = <3>;
/* u-boot puts hpen in SBC dmem at 0xa4 offset */
cpu-release-addr = <0x94100A4>;
+ #cooling-cells = <2>;
};
};
@@ -44,6 +47,38 @@
reset-names = "global", "port";
};
+ thermal-zones {
+ cpu_thermal: cpu-thermal {
+ polling-delay-passive = <250>; /* 250ms */
+ polling-delay = <1000>; /* 1000ms */
+
+ thermal-sensors = <&thermal>;
+
+ trips {
+ cpu_crit: cpu-crit {
+ temperature = <95000>; /* 95C */
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ cpu_alert: cpu-alert {
+ temperature = <85000>; /* 85C */
+ hysteresis = <2000>;
+ type = "passive";
+ };
+ };
+
+ cooling-maps {
+ map {
+ trip = <&cpu_alert>;
+ cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+ };
+ };
+
soc {
rng11: rng@8a8a000 {
status = "disabled";
@@ -107,12 +142,13 @@
assigned-clock-rates = <200000000>;
};
- thermal@91a0000 {
+ thermal: thermal@91a0000 {
compatible = "st,stih407-thermal";
reg = <0x91a0000 0x28>;
clock-names = "thermal";
clocks = <&clk_sysin>;
interrupts = <GIC_SPI 205 IRQ_TYPE_EDGE_RISING>;
+ #thermal-sensor-cells = <0>;
};
};
};
diff --git a/arch/arm/boot/dts/st/stm32f429.dtsi b/arch/arm/boot/dts/st/stm32f429.dtsi
index 8efcda9ef8ae..ad91b74ddd0d 100644
--- a/arch/arm/boot/dts/st/stm32f429.dtsi
+++ b/arch/arm/boot/dts/st/stm32f429.dtsi
@@ -579,6 +579,7 @@
syscfg: syscon@40013800 {
compatible = "st,stm32-syscfg", "syscon";
reg = <0x40013800 0x400>;
+ clocks = <&rcc 0 STM32F4_APB2_CLOCK(SYSCFG)>;
};
exti: interrupt-controller@40013c00 {
diff --git a/arch/arm/boot/dts/st/stm32mp13-pinctrl.dtsi b/arch/arm/boot/dts/st/stm32mp13-pinctrl.dtsi
index 32c5d8a1e06a..c9f588a65094 100644
--- a/arch/arm/boot/dts/st/stm32mp13-pinctrl.dtsi
+++ b/arch/arm/boot/dts/st/stm32mp13-pinctrl.dtsi
@@ -6,6 +6,14 @@
#include <dt-bindings/pinctrl/stm32-pinfunc.h>
&pinctrl {
+ /omit-if-no-ref/
+ adc1_pins_a: adc1-pins-0 {
+ pins {
+ pinmux = <STM32_PINMUX('A', 3, ANALOG)>; /* ADC1 in12 */
+ };
+ };
+
+ /omit-if-no-ref/
adc1_usb_cc_pins_a: adc1-usb-cc-pins-0 {
pins {
pinmux = <STM32_PINMUX('F', 12, ANALOG)>, /* ADC1 in6 */
@@ -13,6 +21,241 @@
};
};
+ /omit-if-no-ref/
+ adc1_usb_cc_pins_b: adc1-usb-cc-pins-1 {
+ pins {
+ pinmux = <STM32_PINMUX('A', 5, ANALOG)>, /* ADC1_INP2 */
+ <STM32_PINMUX('F', 13, ANALOG)>; /* ADC1_INP11 */
+ };
+ };
+
+ /omit-if-no-ref/
+ dcmipp_pins_a: dcmi-0 {
+ pins1 {
+ pinmux = <STM32_PINMUX('H', 8, AF13)>,/* DCMI_HSYNC */
+ <STM32_PINMUX('G', 9, AF13)>,/* DCMI_VSYNC */
+ <STM32_PINMUX('B', 7, AF14)>,/* DCMI_PIXCLK */
+ <STM32_PINMUX('A', 9, AF13)>,/* DCMI_D0 */
+ <STM32_PINMUX('D', 0, AF13)>,/* DCMI_D1 */
+ <STM32_PINMUX('G', 10, AF13)>,/* DCMI_D2 */
+ <STM32_PINMUX('E', 4, AF13)>,/* DCMI_D3 */
+ <STM32_PINMUX('D', 11, AF14)>,/* DCMI_D4 */
+ <STM32_PINMUX('D', 3, AF13)>,/* DCMI_D5 */
+ <STM32_PINMUX('B', 8, AF13)>,/* DCMI_D6 */
+ <STM32_PINMUX('E', 14, AF13)>;/* DCMI_D7 */
+ bias-disable;
+ };
+ };
+
+ /omit-if-no-ref/
+ dcmipp_sleep_pins_a: dcmi-sleep-0 {
+ pins1 {
+ pinmux = <STM32_PINMUX('H', 8, ANALOG)>,/* DCMI_HSYNC */
+ <STM32_PINMUX('G', 9, ANALOG)>,/* DCMI_VSYNC */
+ <STM32_PINMUX('B', 7, ANALOG)>,/* DCMI_PIXCLK */
+ <STM32_PINMUX('A', 9, ANALOG)>,/* DCMI_D0 */
+ <STM32_PINMUX('D', 0, ANALOG)>,/* DCMI_D1 */
+ <STM32_PINMUX('G', 10, ANALOG)>,/* DCMI_D2 */
+ <STM32_PINMUX('E', 4, ANALOG)>,/* DCMI_D3 */
+ <STM32_PINMUX('D', 11, ANALOG)>,/* DCMI_D4 */
+ <STM32_PINMUX('D', 3, ANALOG)>,/* DCMI_D5 */
+ <STM32_PINMUX('B', 8, ANALOG)>,/* DCMI_D6 */
+ <STM32_PINMUX('E', 14, ANALOG)>;/* DCMI_D7 */
+ };
+ };
+
+ /omit-if-no-ref/
+ eth1_rgmii_pins_a: eth1-rgmii-0 {
+ pins1 {
+ pinmux = <STM32_PINMUX('G', 13, AF11)>, /* ETH_RGMII_TXD0 */
+ <STM32_PINMUX('G', 14, AF11)>, /* ETH_RGMII_TXD1 */
+ <STM32_PINMUX('C', 2, AF11)>, /* ETH_RGMII_TXD2 */
+ <STM32_PINMUX('E', 5, AF10)>, /* ETH_RGMII_TXD3 */
+ <STM32_PINMUX('B', 11, AF11)>, /* ETH_RGMII_TX_CTL */
+ <STM32_PINMUX('C', 1, AF11)>, /* ETH_RGMII_GTX_CLK */
+ <STM32_PINMUX('A', 2, AF11)>, /* ETH_MDIO */
+ <STM32_PINMUX('G', 2, AF11)>; /* ETH_MDC */
+ bias-disable;
+ drive-push-pull;
+ slew-rate = <2>;
+ };
+
+ pins2 {
+ pinmux = <STM32_PINMUX('C', 4, AF11)>, /* ETH_RGMII_RXD0 */
+ <STM32_PINMUX('C', 5, AF11)>, /* ETH_RGMII_RXD1 */
+ <STM32_PINMUX('B', 0, AF11)>, /* ETH_RGMII_RXD2 */
+ <STM32_PINMUX('B', 1, AF11)>, /* ETH_RGMII_RXD3 */
+ <STM32_PINMUX('A', 7, AF11)>, /* ETH_RGMII_RX_CTL */
+ <STM32_PINMUX('D', 7, AF10)>; /* ETH_RGMII_RX_CLK */
+ bias-disable;
+ };
+ };
+
+ /omit-if-no-ref/
+ eth1_rgmii_sleep_pins_a: eth1-rgmii-sleep-0 {
+ pins1 {
+ pinmux = <STM32_PINMUX('G', 13, ANALOG)>, /* ETH_RGMII_TXD0 */
+ <STM32_PINMUX('G', 14, ANALOG)>, /* ETH_RGMII_TXD1 */
+ <STM32_PINMUX('C', 2, ANALOG)>, /* ETH_RGMII_TXD2 */
+ <STM32_PINMUX('E', 5, ANALOG)>, /* ETH_RGMII_TXD3 */
+ <STM32_PINMUX('B', 11, ANALOG)>, /* ETH_RGMII_TX_CTL */
+ <STM32_PINMUX('C', 1, ANALOG)>, /* ETH_RGMII_GTX_CLK */
+ <STM32_PINMUX('A', 2, ANALOG)>, /* ETH_MDIO */
+ <STM32_PINMUX('G', 2, ANALOG)>, /* ETH_MDC */
+ <STM32_PINMUX('C', 4, ANALOG)>, /* ETH_RGMII_RXD0 */
+ <STM32_PINMUX('C', 5, ANALOG)>, /* ETH_RGMII_RXD1 */
+ <STM32_PINMUX('B', 0, ANALOG)>, /* ETH_RGMII_RXD1 */
+ <STM32_PINMUX('B', 1, ANALOG)>, /* ETH_RGMII_RXD1 */
+ <STM32_PINMUX('A', 7, ANALOG)>, /* ETH_RGMII_RX_CTL */
+ <STM32_PINMUX('D', 7, ANALOG)>; /* ETH_RGMII_RX_CLK */
+ };
+ };
+
+ /omit-if-no-ref/
+ eth1_rmii_pins_a: eth1-rmii-0 {
+ pins1 {
+ pinmux = <STM32_PINMUX('G', 13, AF11)>, /* ETH_RMII_TXD0 */
+ <STM32_PINMUX('G', 14, AF11)>, /* ETH_RMII_TXD1 */
+ <STM32_PINMUX('B', 11, AF11)>, /* ETH_RMII_TX_EN */
+ <STM32_PINMUX('A', 1, AF11)>, /* ETH_RMII_REF_CLK */
+ <STM32_PINMUX('A', 2, AF11)>, /* ETH_MDIO */
+ <STM32_PINMUX('G', 2, AF11)>; /* ETH_MDC */
+ bias-disable;
+ drive-push-pull;
+ slew-rate = <1>;
+ };
+
+ pins2 {
+ pinmux = <STM32_PINMUX('C', 4, AF11)>, /* ETH_RMII_RXD0 */
+ <STM32_PINMUX('C', 5, AF11)>, /* ETH_RMII_RXD1 */
+ <STM32_PINMUX('C', 1, AF10)>; /* ETH_RMII_CRS_DV */
+ bias-disable;
+ };
+ };
+
+ /omit-if-no-ref/
+ eth1_rmii_sleep_pins_a: eth1-rmii-sleep-0 {
+ pins1 {
+ pinmux = <STM32_PINMUX('G', 13, ANALOG)>, /* ETH_RMII_TXD0 */
+ <STM32_PINMUX('G', 14, ANALOG)>, /* ETH_RMII_TXD1 */
+ <STM32_PINMUX('B', 11, ANALOG)>, /* ETH_RMII_TX_EN */
+ <STM32_PINMUX('A', 1, ANALOG)>, /* ETH_RMII_REF_CLK */
+ <STM32_PINMUX('A', 2, ANALOG)>, /* ETH_MDIO */
+ <STM32_PINMUX('G', 2, ANALOG)>, /* ETH_MDC */
+ <STM32_PINMUX('C', 4, ANALOG)>, /* ETH_RMII_RXD0 */
+ <STM32_PINMUX('C', 5, ANALOG)>, /* ETH_RMII_RXD1 */
+ <STM32_PINMUX('C', 1, ANALOG)>; /* ETH_RMII_CRS_DV */
+ };
+ };
+
+ /omit-if-no-ref/
+ eth2_rgmii_pins_a: eth2-rgmii-0 {
+ pins1 {
+ pinmux = <STM32_PINMUX('F', 7, AF11)>, /* ETH_RGMII_TXD0 */
+ <STM32_PINMUX('G', 11, AF10)>, /* ETH_RGMII_TXD1 */
+ <STM32_PINMUX('G', 1, AF10)>, /* ETH_RGMII_TXD2 */
+ <STM32_PINMUX('E', 6, AF11)>, /* ETH_RGMII_TXD3 */
+ <STM32_PINMUX('F', 6, AF11)>, /* ETH_RGMII_TX_CTL */
+ <STM32_PINMUX('G', 3, AF10)>, /* ETH_RGMII_GTX_CLK */
+ <STM32_PINMUX('B', 6, AF11)>, /* ETH_MDIO */
+ <STM32_PINMUX('G', 5, AF10)>; /* ETH_MDC */
+ bias-disable;
+ drive-push-pull;
+ slew-rate = <2>;
+ };
+
+ pins2 {
+ pinmux = <STM32_PINMUX('F', 4, AF11)>, /* ETH_RGMII_RXD0 */
+ <STM32_PINMUX('E', 2, AF10)>, /* ETH_RGMII_RXD1 */
+ <STM32_PINMUX('H', 6, AF12)>, /* ETH_RGMII_RXD2 */
+ <STM32_PINMUX('A', 8, AF11)>, /* ETH_RGMII_RXD3 */
+ <STM32_PINMUX('A', 12, AF11)>, /* ETH_RGMII_RX_CTL */
+ <STM32_PINMUX('H', 11, AF11)>; /* ETH_RGMII_RX_CLK */
+ bias-disable;
+ };
+ };
+
+ /omit-if-no-ref/
+ eth2_rgmii_sleep_pins_a: eth2-rgmii-sleep-0 {
+ pins1 {
+ pinmux = <STM32_PINMUX('F', 7, ANALOG)>, /* ETH_RGMII_TXD0 */
+ <STM32_PINMUX('G', 11, ANALOG)>, /* ETH_RGMII_TXD1 */
+ <STM32_PINMUX('G', 1, ANALOG)>, /* ETH_RGMII_TXD2 */
+ <STM32_PINMUX('E', 6, ANALOG)>, /* ETH_RGMII_TXD3 */
+ <STM32_PINMUX('F', 6, ANALOG)>, /* ETH_RGMII_TX_CTL */
+ <STM32_PINMUX('G', 3, ANALOG)>, /* ETH_RGMII_GTX_CLK */
+ <STM32_PINMUX('B', 6, ANALOG)>, /* ETH_MDIO */
+ <STM32_PINMUX('G', 5, ANALOG)>, /* ETH_MDC */
+ <STM32_PINMUX('F', 4, ANALOG)>, /* ETH_RGMII_RXD0 */
+ <STM32_PINMUX('E', 2, ANALOG)>, /* ETH_RGMII_RXD1 */
+ <STM32_PINMUX('H', 6, ANALOG)>, /* ETH_RGMII_RXD2 */
+ <STM32_PINMUX('A', 8, ANALOG)>, /* ETH_RGMII_RXD3 */
+ <STM32_PINMUX('A', 12, ANALOG)>, /* ETH_RGMII_RX_CTL */
+ <STM32_PINMUX('H', 11, ANALOG)>; /* ETH_RGMII_RX_CLK */
+ };
+ };
+
+ /omit-if-no-ref/
+ eth2_rmii_pins_a: eth2-rmii-0 {
+ pins1 {
+ pinmux = <STM32_PINMUX('F', 7, AF11)>, /* ETH_RMII_TXD0 */
+ <STM32_PINMUX('G', 11, AF10)>, /* ETH_RMII_TXD1 */
+ <STM32_PINMUX('G', 8, AF13)>, /* ETH_RMII_ETHCK */
+ <STM32_PINMUX('F', 6, AF11)>, /* ETH_RMII_TX_EN */
+ <STM32_PINMUX('B', 2, AF11)>, /* ETH_MDIO */
+ <STM32_PINMUX('G', 5, AF10)>; /* ETH_MDC */
+ bias-disable;
+ drive-push-pull;
+ slew-rate = <1>;
+ };
+
+ pins2 {
+ pinmux = <STM32_PINMUX('F', 4, AF11)>, /* ETH_RMII_RXD0 */
+ <STM32_PINMUX('E', 2, AF10)>, /* ETH_RMII_RXD1 */
+ <STM32_PINMUX('A', 12, AF11)>; /* ETH_RMII_CRS_DV */
+ bias-disable;
+ };
+ };
+
+ /omit-if-no-ref/
+ eth2_rmii_sleep_pins_a: eth2-rmii-sleep-0 {
+ pins1 {
+ pinmux = <STM32_PINMUX('F', 7, ANALOG)>, /* ETH_RMII_TXD0 */
+ <STM32_PINMUX('G', 11, ANALOG)>, /* ETH_RMII_TXD1 */
+ <STM32_PINMUX('G', 8, ANALOG)>, /* ETH_RMII_ETHCK */
+ <STM32_PINMUX('F', 6, ANALOG)>, /* ETH_RMII_TX_EN */
+ <STM32_PINMUX('B', 2, ANALOG)>, /* ETH_MDIO */
+ <STM32_PINMUX('G', 5, ANALOG)>, /* ETH_MDC */
+ <STM32_PINMUX('F', 4, ANALOG)>, /* ETH_RMII_RXD0 */
+ <STM32_PINMUX('E', 2, ANALOG)>, /* ETH_RMII_RXD1 */
+ <STM32_PINMUX('A', 12, ANALOG)>; /* ETH_RMII_CRS_DV */
+ };
+ };
+
+ /omit-if-no-ref/
+ goodix_pins_a: goodix-0 {
+ /*
+ * touchscreen reset needs to be configured
+ * via the pinctrl not the driver (a pull-down resistor
+ * has been soldered onto the reset line which forces
+ * the touchscreen to reset state).
+ */
+ pins1 {
+ pinmux = <STM32_PINMUX('H', 2, GPIO)>;
+ output-high;
+ bias-pull-up;
+ };
+ /*
+ * Interrupt line must have a pull-down resistor
+ * in order to freeze the i2c address at 0x5D
+ */
+ pins2 {
+ pinmux = <STM32_PINMUX('F', 5, GPIO)>;
+ bias-pull-down;
+ };
+ };
+
+ /omit-if-no-ref/
i2c1_pins_a: i2c1-0 {
pins {
pinmux = <STM32_PINMUX('D', 12, AF5)>, /* I2C1_SCL */
@@ -23,6 +266,7 @@
};
};
+ /omit-if-no-ref/
i2c1_sleep_pins_a: i2c1-sleep-0 {
pins {
pinmux = <STM32_PINMUX('D', 12, ANALOG)>, /* I2C1_SCL */
@@ -30,6 +274,7 @@
};
};
+ /omit-if-no-ref/
i2c5_pins_a: i2c5-0 {
pins {
pinmux = <STM32_PINMUX('D', 1, AF4)>, /* I2C5_SCL */
@@ -40,6 +285,7 @@
};
};
+ /omit-if-no-ref/
i2c5_sleep_pins_a: i2c5-sleep-0 {
pins {
pinmux = <STM32_PINMUX('D', 1, ANALOG)>, /* I2C5_SCL */
@@ -47,6 +293,26 @@
};
};
+ /omit-if-no-ref/
+ i2c5_pins_b: i2c5-1 {
+ pins {
+ pinmux = <STM32_PINMUX('D', 1, AF4)>, /* I2C5_SCL */
+ <STM32_PINMUX('E', 13, AF4)>; /* I2C5_SDA */
+ bias-disable;
+ drive-open-drain;
+ slew-rate = <0>;
+ };
+ };
+
+ /omit-if-no-ref/
+ i2c5_sleep_pins_b: i2c5-sleep-1 {
+ pins {
+ pinmux = <STM32_PINMUX('D', 1, ANALOG)>, /* I2C5_SCL */
+ <STM32_PINMUX('E', 13, ANALOG)>; /* I2C5_SDA */
+ };
+ };
+
+ /omit-if-no-ref/
ltdc_pins_a: ltdc-0 {
pins {
pinmux = <STM32_PINMUX('D', 9, AF13)>, /* LCD_CLK */
@@ -77,6 +343,7 @@
};
};
+ /omit-if-no-ref/
ltdc_sleep_pins_a: ltdc-sleep-0 {
pins {
pinmux = <STM32_PINMUX('D', 9, ANALOG)>, /* LCD_CLK */
@@ -104,6 +371,51 @@
};
};
+ /omit-if-no-ref/
+ m_can1_pins_a: m-can1-0 {
+ pins1 {
+ pinmux = <STM32_PINMUX('G', 10, AF9)>; /* CAN1_TX */
+ slew-rate = <1>;
+ drive-push-pull;
+ bias-disable;
+ };
+ pins2 {
+ pinmux = <STM32_PINMUX('D', 0, AF9)>; /* CAN1_RX */
+ bias-disable;
+ };
+ };
+
+ /omit-if-no-ref/
+ m_can1_sleep_pins_a: m_can1-sleep-0 {
+ pins {
+ pinmux = <STM32_PINMUX('G', 10, ANALOG)>, /* CAN1_TX */
+ <STM32_PINMUX('D', 0, ANALOG)>; /* CAN1_RX */
+ };
+ };
+
+ /omit-if-no-ref/
+ m_can2_pins_a: m-can2-0 {
+ pins1 {
+ pinmux = <STM32_PINMUX('G', 0, AF9)>; /* CAN2_TX */
+ slew-rate = <1>;
+ drive-push-pull;
+ bias-disable;
+ };
+ pins2 {
+ pinmux = <STM32_PINMUX('E', 0, AF9)>; /* CAN2_RX */
+ bias-disable;
+ };
+ };
+
+ /omit-if-no-ref/
+ m_can2_sleep_pins_a: m_can2-sleep-0 {
+ pins {
+ pinmux = <STM32_PINMUX('G', 0, ANALOG)>, /* CAN2_TX */
+ <STM32_PINMUX('E', 0, ANALOG)>; /* CAN2_RX */
+ };
+ };
+
+ /omit-if-no-ref/
mcp23017_pins_a: mcp23017-0 {
pins {
pinmux = <STM32_PINMUX('G', 12, GPIO)>;
@@ -111,6 +423,7 @@
};
};
+ /omit-if-no-ref/
pwm3_pins_a: pwm3-0 {
pins {
pinmux = <STM32_PINMUX('B', 1, AF2)>; /* TIM3_CH4 */
@@ -120,12 +433,14 @@
};
};
+ /omit-if-no-ref/
pwm3_sleep_pins_a: pwm3-sleep-0 {
pins {
pinmux = <STM32_PINMUX('B', 1, ANALOG)>; /* TIM3_CH4 */
};
};
+ /omit-if-no-ref/
pwm4_pins_a: pwm4-0 {
pins {
pinmux = <STM32_PINMUX('D', 13, AF2)>; /* TIM4_CH2 */
@@ -135,12 +450,31 @@
};
};
+ /omit-if-no-ref/
pwm4_sleep_pins_a: pwm4-sleep-0 {
pins {
pinmux = <STM32_PINMUX('D', 13, ANALOG)>; /* TIM4_CH2 */
};
};
+ /omit-if-no-ref/
+ pwm5_pins_a: pwm5-0 {
+ pins {
+ pinmux = <STM32_PINMUX('H', 12, AF2)>; /* TIM5_CH3 */
+ bias-pull-down;
+ drive-push-pull;
+ slew-rate = <0>;
+ };
+ };
+
+ /omit-if-no-ref/
+ pwm5_sleep_pins_a: pwm5-sleep-0 {
+ pins {
+ pinmux = <STM32_PINMUX('H', 12, ANALOG)>; /* TIM5_CH3 */
+ };
+ };
+
+ /omit-if-no-ref/
pwm8_pins_a: pwm8-0 {
pins {
pinmux = <STM32_PINMUX('E', 5, AF3)>; /* TIM8_CH3 */
@@ -150,12 +484,31 @@
};
};
+ /omit-if-no-ref/
pwm8_sleep_pins_a: pwm8-sleep-0 {
pins {
pinmux = <STM32_PINMUX('E', 5, ANALOG)>; /* TIM8_CH3 */
};
};
+ /omit-if-no-ref/
+ pwm13_pins_a: pwm13-0 {
+ pins {
+ pinmux = <STM32_PINMUX('A', 6, AF9)>; /* TIM13_CH1 */
+ bias-pull-down;
+ drive-push-pull;
+ slew-rate = <0>;
+ };
+ };
+
+ /omit-if-no-ref/
+ pwm13_sleep_pins_a: pwm13-sleep-0 {
+ pins {
+ pinmux = <STM32_PINMUX('A', 6, ANALOG)>; /* TIM13_CH1 */
+ };
+ };
+
+ /omit-if-no-ref/
pwm14_pins_a: pwm14-0 {
pins {
pinmux = <STM32_PINMUX('F', 9, AF9)>; /* TIM14_CH1 */
@@ -165,12 +518,107 @@
};
};
+ /omit-if-no-ref/
pwm14_sleep_pins_a: pwm14-sleep-0 {
pins {
pinmux = <STM32_PINMUX('F', 9, ANALOG)>; /* TIM14_CH1 */
};
};
+ /omit-if-no-ref/
+ qspi_clk_pins_a: qspi-clk-0 {
+ pins {
+ pinmux = <STM32_PINMUX('F', 10, AF9)>; /* QSPI_CLK */
+ bias-disable;
+ drive-push-pull;
+ slew-rate = <3>;
+ };
+ };
+
+ /omit-if-no-ref/
+ qspi_clk_sleep_pins_a: qspi-clk-sleep-0 {
+ pins {
+ pinmux = <STM32_PINMUX('F', 10, ANALOG)>; /* QSPI_CLK */
+ };
+ };
+
+ /omit-if-no-ref/
+ qspi_bk1_pins_a: qspi-bk1-0 {
+ pins {
+ pinmux = <STM32_PINMUX('F', 8, AF10)>, /* QSPI_BK1_IO0 */
+ <STM32_PINMUX('F', 9, AF10)>, /* QSPI_BK1_IO1 */
+ <STM32_PINMUX('D', 11, AF9)>, /* QSPI_BK1_IO2 */
+ <STM32_PINMUX('H', 7, AF13)>; /* QSPI_BK1_IO3 */
+ bias-disable;
+ drive-push-pull;
+ slew-rate = <1>;
+ };
+ };
+
+ /omit-if-no-ref/
+ qspi_bk1_sleep_pins_a: qspi-bk1-sleep-0 {
+ pins {
+ pinmux = <STM32_PINMUX('F', 8, ANALOG)>, /* QSPI_BK1_IO0 */
+ <STM32_PINMUX('F', 9, ANALOG)>, /* QSPI_BK1_IO1 */
+ <STM32_PINMUX('D', 11, ANALOG)>, /* QSPI_BK1_IO2 */
+ <STM32_PINMUX('H', 7, ANALOG)>; /* QSPI_BK1_IO3 */
+ };
+ };
+
+ /omit-if-no-ref/
+ qspi_cs1_pins_a: qspi-cs1-0 {
+ pins {
+ pinmux = <STM32_PINMUX('B', 2, AF9)>; /* QSPI_BK1_NCS */
+ bias-pull-up;
+ drive-push-pull;
+ slew-rate = <1>;
+ };
+ };
+
+ /omit-if-no-ref/
+ qspi_cs1_sleep_pins_a: qspi-cs1-sleep-0 {
+ pins {
+ pinmux = <STM32_PINMUX('B', 2, ANALOG)>; /* QSPI_BK1_NCS */
+ };
+ };
+
+ /omit-if-no-ref/
+ sai1a_pins_a: sai1a-0 {
+ pins {
+ pinmux = <STM32_PINMUX('A', 4, AF12)>, /* SAI1_SCK_A */
+ <STM32_PINMUX('D', 6, AF6)>, /* SAI1_SD_A */
+ <STM32_PINMUX('E', 11, AF6)>; /* SAI1_FS_A */
+ slew-rate = <0>;
+ drive-push-pull;
+ bias-disable;
+ };
+ };
+
+ /omit-if-no-ref/
+ sai1a_sleep_pins_a: sai1a-sleep-0 {
+ pins {
+ pinmux = <STM32_PINMUX('A', 4, ANALOG)>, /* SAI1_SCK_A */
+ <STM32_PINMUX('D', 6, ANALOG)>, /* SAI1_SD_A */
+ <STM32_PINMUX('E', 11, ANALOG)>; /* SAI1_FS_A */
+ };
+ };
+
+ /omit-if-no-ref/
+ sai1b_pins_a: sai1b-0 {
+ pins {
+ pinmux = <STM32_PINMUX('A', 0, AF6)>; /* SAI1_SD_B */
+ bias-disable;
+ };
+ };
+
+ /omit-if-no-ref/
+ sai1b_sleep_pins_a: sai1b-sleep-0 {
+ pins {
+ pinmux = <STM32_PINMUX('A', 0, ANALOG)>; /* SAI1_SD_B */
+ };
+ };
+
+ /omit-if-no-ref/
sdmmc1_b4_pins_a: sdmmc1-b4-0 {
pins {
pinmux = <STM32_PINMUX('C', 8, AF12)>, /* SDMMC1_D0 */
@@ -184,6 +632,7 @@
};
};
+ /omit-if-no-ref/
sdmmc1_b4_od_pins_a: sdmmc1-b4-od-0 {
pins1 {
pinmux = <STM32_PINMUX('C', 8, AF12)>, /* SDMMC1_D0 */
@@ -202,6 +651,7 @@
};
};
+ /omit-if-no-ref/
sdmmc1_b4_sleep_pins_a: sdmmc1-b4-sleep-0 {
pins {
pinmux = <STM32_PINMUX('C', 8, ANALOG)>, /* SDMMC1_D0 */
@@ -213,6 +663,7 @@
};
};
+ /omit-if-no-ref/
sdmmc1_clk_pins_a: sdmmc1-clk-0 {
pins {
pinmux = <STM32_PINMUX('C', 12, AF12)>; /* SDMMC1_CK */
@@ -222,6 +673,7 @@
};
};
+ /omit-if-no-ref/
sdmmc2_b4_pins_a: sdmmc2-b4-0 {
pins {
pinmux = <STM32_PINMUX('B', 14, AF10)>, /* SDMMC2_D0 */
@@ -235,6 +687,7 @@
};
};
+ /omit-if-no-ref/
sdmmc2_b4_od_pins_a: sdmmc2-b4-od-0 {
pins1 {
pinmux = <STM32_PINMUX('B', 14, AF10)>, /* SDMMC2_D0 */
@@ -253,6 +706,7 @@
};
};
+ /omit-if-no-ref/
sdmmc2_b4_sleep_pins_a: sdmmc2-b4-sleep-0 {
pins {
pinmux = <STM32_PINMUX('B', 14, ANALOG)>, /* SDMMC2_D0 */
@@ -264,6 +718,7 @@
};
};
+ /omit-if-no-ref/
sdmmc2_clk_pins_a: sdmmc2-clk-0 {
pins {
pinmux = <STM32_PINMUX('E', 3, AF10)>; /* SDMMC2_CK */
@@ -273,6 +728,80 @@
};
};
+ /omit-if-no-ref/
+ sdmmc2_d47_pins_a: sdmmc2-d47-0 {
+ pins {
+ pinmux = <STM32_PINMUX('F', 0, AF10)>, /* SDMMC2_D4 */
+ <STM32_PINMUX('B', 9, AF10)>, /* SDMMC2_D5 */
+ <STM32_PINMUX('C', 6, AF10)>, /* SDMMC2_D6 */
+ <STM32_PINMUX('C', 7, AF10)>; /* SDMMC2_D7 */
+ slew-rate = <1>;
+ drive-push-pull;
+ bias-pull-up;
+ };
+ };
+
+ /omit-if-no-ref/
+ sdmmc2_d47_sleep_pins_a: sdmmc2-d47-sleep-0 {
+ pins {
+ pinmux = <STM32_PINMUX('F', 0, ANALOG)>, /* SDMMC2_D4 */
+ <STM32_PINMUX('B', 9, ANALOG)>, /* SDMMC2_D5 */
+ <STM32_PINMUX('C', 6, ANALOG)>, /* SDMMC2_D6 */
+ <STM32_PINMUX('C', 7, ANALOG)>; /* SDMMC2_D7 */
+ };
+ };
+
+ /omit-if-no-ref/
+ spi2_pins_a: spi2-0 {
+ pins1 {
+ pinmux = <STM32_PINMUX('B', 10, AF6)>, /* SPI2_SCK */
+ <STM32_PINMUX('H', 10, AF6)>; /* SPI2_MOSI */
+ bias-disable;
+ drive-push-pull;
+ slew-rate = <1>;
+ };
+
+ pins2 {
+ pinmux = <STM32_PINMUX('B', 5, AF5)>; /* SPI2_MISO */
+ bias-disable;
+ };
+ };
+
+ /omit-if-no-ref/
+ spi2_sleep_pins_a: spi2-sleep-0 {
+ pins {
+ pinmux = <STM32_PINMUX('B', 10, ANALOG)>, /* SPI2_SCK */
+ <STM32_PINMUX('B', 5, ANALOG)>, /* SPI2_MISO */
+ <STM32_PINMUX('H', 10, ANALOG)>; /* SPI2_MOSI */
+ };
+ };
+
+ /omit-if-no-ref/
+ spi3_pins_a: spi3-0 {
+ pins1 {
+ pinmux = <STM32_PINMUX('H', 13, AF6)>, /* SPI3_SCK */
+ <STM32_PINMUX('F', 1, AF5)>; /* SPI3_MOSI */
+ bias-disable;
+ drive-push-pull;
+ slew-rate = <1>;
+ };
+
+ pins2 {
+ pinmux = <STM32_PINMUX('D', 4, AF5)>; /* SPI3_MISO */
+ bias-disable;
+ };
+ };
+
+ /omit-if-no-ref/
+ spi3_sleep_pins_a: spi3-sleep-0 {
+ pins {
+ pinmux = <STM32_PINMUX('H', 13, ANALOG)>, /* SPI3_SCK */
+ <STM32_PINMUX('D', 4, ANALOG)>, /* SPI3_MISO */
+ <STM32_PINMUX('F', 1, ANALOG)>; /* SPI3_MOSI */
+ };
+ };
+
+ /omit-if-no-ref/
spi5_pins_a: spi5-0 {
pins1 {
pinmux = <STM32_PINMUX('H', 7, AF6)>, /* SPI5_SCK */
@@ -288,6 +817,7 @@
};
};
+ /omit-if-no-ref/
spi5_sleep_pins_a: spi5-sleep-0 {
pins {
pinmux = <STM32_PINMUX('H', 7, ANALOG)>, /* SPI5_SCK */
@@ -296,6 +826,7 @@
};
};
+ /omit-if-no-ref/
stm32g0_intn_pins_a: stm32g0-intn-0 {
pins {
pinmux = <STM32_PINMUX('I', 2, GPIO)>;
@@ -303,6 +834,7 @@
};
};
+ /omit-if-no-ref/
uart4_pins_a: uart4-0 {
pins1 {
pinmux = <STM32_PINMUX('D', 6, AF8)>; /* UART4_TX */
@@ -316,6 +848,7 @@
};
};
+ /omit-if-no-ref/
uart4_idle_pins_a: uart4-idle-0 {
pins1 {
pinmux = <STM32_PINMUX('D', 6, ANALOG)>; /* UART4_TX */
@@ -326,6 +859,7 @@
};
};
+ /omit-if-no-ref/
uart4_sleep_pins_a: uart4-sleep-0 {
pins {
pinmux = <STM32_PINMUX('D', 6, ANALOG)>, /* UART4_TX */
@@ -333,6 +867,84 @@
};
};
+ /omit-if-no-ref/
+ uart4_pins_b: uart4-1 {
+ pins1 {
+ pinmux = <STM32_PINMUX('A', 9, AF8)>; /* UART4_TX */
+ bias-disable;
+ drive-push-pull;
+ slew-rate = <0>;
+ };
+ pins2 {
+ pinmux = <STM32_PINMUX('D', 8, AF8)>; /* UART4_RX */
+ bias-pull-up;
+ };
+ };
+
+ /omit-if-no-ref/
+ uart4_idle_pins_b: uart4-idle-1 {
+ pins1 {
+ pinmux = <STM32_PINMUX('A', 9, ANALOG)>; /* UART4_TX */
+ };
+ pins2 {
+ pinmux = <STM32_PINMUX('D', 8, AF8)>; /* UART4_RX */
+ bias-pull-up;
+ };
+ };
+
+ /omit-if-no-ref/
+ uart4_sleep_pins_b: uart4-sleep-1 {
+ pins {
+ pinmux = <STM32_PINMUX('A', 9, ANALOG)>, /* UART4_TX */
+ <STM32_PINMUX('D', 8, ANALOG)>; /* UART4_RX */
+ };
+ };
+
+ /omit-if-no-ref/
+ uart7_pins_a: uart7-0 {
+ pins1 {
+ pinmux = <STM32_PINMUX('H', 2, AF8)>, /* UART7_TX */
+ <STM32_PINMUX('B', 12, AF7)>; /* UART7_RTS */
+ bias-disable;
+ drive-push-pull;
+ slew-rate = <0>;
+ };
+ pins2 {
+ pinmux = <STM32_PINMUX('E', 10, AF7)>, /* UART7_RX */
+ <STM32_PINMUX('G', 7, AF8)>; /* UART7_CTS_NSS */
+ bias-disable;
+ };
+ };
+
+ /omit-if-no-ref/
+ uart7_idle_pins_a: uart7-idle-0 {
+ pins1 {
+ pinmux = <STM32_PINMUX('H', 2, ANALOG)>, /* UART7_TX */
+ <STM32_PINMUX('G', 7, ANALOG)>; /* UART7_CTS_NSS */
+ };
+ pins2 {
+ pinmux = <STM32_PINMUX('B', 12, AF7)>; /* UART7_RTS */
+ bias-disable;
+ drive-push-pull;
+ slew-rate = <0>;
+ };
+ pins3 {
+ pinmux = <STM32_PINMUX('E', 10, AF7)>; /* UART7_RX */
+ bias-disable;
+ };
+ };
+
+ /omit-if-no-ref/
+ uart7_sleep_pins_a: uart7-sleep-0 {
+ pins {
+ pinmux = <STM32_PINMUX('H', 2, ANALOG)>, /* UART7_TX */
+ <STM32_PINMUX('B', 12, ANALOG)>, /* UART7_RTS */
+ <STM32_PINMUX('E', 10, ANALOG)>, /* UART7_RX */
+ <STM32_PINMUX('G', 7, ANALOG)>; /* UART7_CTS_NSS */
+ };
+ };
+
+ /omit-if-no-ref/
uart8_pins_a: uart8-0 {
pins1 {
pinmux = <STM32_PINMUX('E', 1, AF8)>; /* UART8_TX */
@@ -346,6 +958,7 @@
};
};
+ /omit-if-no-ref/
uart8_idle_pins_a: uart8-idle-0 {
pins1 {
pinmux = <STM32_PINMUX('E', 1, ANALOG)>; /* UART8_TX */
@@ -356,6 +969,7 @@
};
};
+ /omit-if-no-ref/
uart8_sleep_pins_a: uart8-sleep-0 {
pins {
pinmux = <STM32_PINMUX('E', 1, ANALOG)>, /* UART8_TX */
@@ -363,6 +977,7 @@
};
};
+ /omit-if-no-ref/
usart1_pins_a: usart1-0 {
pins1 {
pinmux = <STM32_PINMUX('C', 0, AF7)>, /* USART1_TX */
@@ -378,6 +993,7 @@
};
};
+ /omit-if-no-ref/
usart1_idle_pins_a: usart1-idle-0 {
pins1 {
pinmux = <STM32_PINMUX('C', 0, ANALOG)>, /* USART1_TX */
@@ -395,6 +1011,7 @@
};
};
+ /omit-if-no-ref/
usart1_sleep_pins_a: usart1-sleep-0 {
pins {
pinmux = <STM32_PINMUX('C', 0, ANALOG)>, /* USART1_TX */
@@ -404,6 +1021,40 @@
};
};
+ /omit-if-no-ref/
+ usart1_pins_b: usart1-1 {
+ pins1 {
+ pinmux = <STM32_PINMUX('C', 0, AF7)>; /* USART1_TX */
+ bias-disable;
+ drive-push-pull;
+ slew-rate = <0>;
+ };
+ pins2 {
+ pinmux = <STM32_PINMUX('D', 14, AF7)>; /* USART1_RX */
+ bias-pull-up;
+ };
+ };
+
+ /omit-if-no-ref/
+ usart1_idle_pins_b: usart1-idle-1 {
+ pins1 {
+ pinmux = <STM32_PINMUX('C', 0, ANALOG)>; /* USART1_TX */
+ };
+ pins2 {
+ pinmux = <STM32_PINMUX('D', 14, AF7)>; /* USART1_RX */
+ bias-pull-up;
+ };
+ };
+
+ /omit-if-no-ref/
+ usart1_sleep_pins_b: usart1-sleep-1 {
+ pins {
+ pinmux = <STM32_PINMUX('C', 0, ANALOG)>, /* USART1_TX */
+ <STM32_PINMUX('D', 14, ANALOG)>; /* USART1_RX */
+ };
+ };
+
+ /omit-if-no-ref/
usart2_pins_a: usart2-0 {
pins1 {
pinmux = <STM32_PINMUX('H', 12, AF1)>, /* USART2_TX */
@@ -419,6 +1070,7 @@
};
};
+ /omit-if-no-ref/
usart2_idle_pins_a: usart2-idle-0 {
pins1 {
pinmux = <STM32_PINMUX('H', 12, ANALOG)>, /* USART2_TX */
@@ -436,6 +1088,7 @@
};
};
+ /omit-if-no-ref/
usart2_sleep_pins_a: usart2-sleep-0 {
pins {
pinmux = <STM32_PINMUX('H', 12, ANALOG)>, /* USART2_TX */
@@ -444,4 +1097,48 @@
<STM32_PINMUX('E', 11, ANALOG)>; /* USART2_CTS_NSS */
};
};
+
+ /omit-if-no-ref/
+ usart2_pins_b: usart2-1 {
+ pins1 {
+ pinmux = <STM32_PINMUX('F', 11, AF1)>, /* USART2_TX */
+ <STM32_PINMUX('A', 1, AF7)>; /* USART2_RTS */
+ bias-disable;
+ drive-push-pull;
+ slew-rate = <0>;
+ };
+ pins2 {
+ pinmux = <STM32_PINMUX('D', 15, AF1)>, /* USART2_RX */
+ <STM32_PINMUX('E', 15, AF3)>; /* USART2_CTS_NSS */
+ bias-disable;
+ };
+ };
+
+ /omit-if-no-ref/
+ usart2_idle_pins_b: usart2-idle-1 {
+ pins1 {
+ pinmux = <STM32_PINMUX('F', 11, ANALOG)>, /* USART2_TX */
+ <STM32_PINMUX('E', 15, ANALOG)>; /* USART2_CTS_NSS */
+ };
+ pins2 {
+ pinmux = <STM32_PINMUX('A', 1, AF7)>; /* USART2_RTS */
+ bias-disable;
+ drive-push-pull;
+ slew-rate = <0>;
+ };
+ pins3 {
+ pinmux = <STM32_PINMUX('D', 15, AF1)>; /* USART2_RX */
+ bias-disable;
+ };
+ };
+
+ /omit-if-no-ref/
+ usart2_sleep_pins_b: usart2-sleep-1 {
+ pins {
+ pinmux = <STM32_PINMUX('F', 11, ANALOG)>, /* USART2_TX */
+ <STM32_PINMUX('A', 1, ANALOG)>, /* USART2_RTS */
+ <STM32_PINMUX('D', 15, ANALOG)>, /* USART2_RX */
+ <STM32_PINMUX('E', 15, ANALOG)>; /* USART2_CTS_NSS */
+ };
+ };
};
diff --git a/arch/arm/boot/dts/st/stm32mp131.dtsi b/arch/arm/boot/dts/st/stm32mp131.dtsi
index 6704ceef284d..e1a764d269d2 100644
--- a/arch/arm/boot/dts/st/stm32mp131.dtsi
+++ b/arch/arm/boot/dts/st/stm32mp131.dtsi
@@ -979,6 +979,12 @@
ts_cal2: calib@5e {
reg = <0x5e 0x2>;
};
+ ethernet_mac1_address: mac1@e4 {
+ reg = <0xe4 0x6>;
+ };
+ ethernet_mac2_address: mac2@ea {
+ reg = <0xea 0x6>;
+ };
};
etzpc: bus@5c007000 {
@@ -1505,6 +1511,38 @@
status = "disabled";
};
+ ethernet1: ethernet@5800a000 {
+ compatible = "st,stm32mp13-dwmac", "snps,dwmac-4.20a";
+ reg = <0x5800a000 0x2000>;
+ reg-names = "stmmaceth";
+ interrupts-extended = <&intc GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>,
+ <&exti 68 1>;
+ interrupt-names = "macirq", "eth_wake_irq";
+ clock-names = "stmmaceth",
+ "mac-clk-tx",
+ "mac-clk-rx",
+ "ethstp",
+ "eth-ck";
+ clocks = <&rcc ETH1MAC>,
+ <&rcc ETH1TX>,
+ <&rcc ETH1RX>,
+ <&rcc ETH1STP>,
+ <&rcc ETH1CK_K>;
+ st,syscon = <&syscfg 0x4 0xff0000>;
+ snps,mixed-burst;
+ snps,pbl = <2>;
+ snps,axi-config = <&stmmac_axi_config_1>;
+ snps,tso;
+ access-controllers = <&etzpc 48>;
+ status = "disabled";
+
+ stmmac_axi_config_1: stmmac-axi-config {
+ snps,blen = <0 0 0 0 16 8 4>;
+ snps,rd_osr_lmt = <0x7>;
+ snps,wr_osr_lmt = <0x7>;
+ };
+ };
+
usbphyc: usbphyc@5a006000 {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm/boot/dts/st/stm32mp133.dtsi b/arch/arm/boot/dts/st/stm32mp133.dtsi
index 3e394c8e58b9..73e470019ce4 100644
--- a/arch/arm/boot/dts/st/stm32mp133.dtsi
+++ b/arch/arm/boot/dts/st/stm32mp133.dtsi
@@ -68,4 +68,35 @@
};
};
};
+
+ ethernet2: ethernet@5800e000 {
+ compatible = "st,stm32mp13-dwmac", "snps,dwmac-4.20a";
+ reg = <0x5800e000 0x2000>;
+ reg-names = "stmmaceth";
+ interrupts-extended = <&intc GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "macirq";
+ clock-names = "stmmaceth",
+ "mac-clk-tx",
+ "mac-clk-rx",
+ "ethstp",
+ "eth-ck";
+ clocks = <&rcc ETH2MAC>,
+ <&rcc ETH2TX>,
+ <&rcc ETH2RX>,
+ <&rcc ETH2STP>,
+ <&rcc ETH2CK_K>;
+ st,syscon = <&syscfg 0x4 0xff000000>;
+ snps,mixed-burst;
+ snps,pbl = <2>;
+ snps,axi-config = <&stmmac_axi_config_2>;
+ snps,tso;
+ access-controllers = <&etzpc 49>;
+ status = "disabled";
+
+ stmmac_axi_config_2: stmmac-axi-config {
+ snps,blen = <0 0 0 0 16 8 4>;
+ snps,rd_osr_lmt = <0x7>;
+ snps,wr_osr_lmt = <0x7>;
+ };
+ };
};
diff --git a/arch/arm/boot/dts/st/stm32mp135f-dhcor-dhsbc.dts b/arch/arm/boot/dts/st/stm32mp135f-dhcor-dhsbc.dts
new file mode 100644
index 000000000000..bacb70b4256b
--- /dev/null
+++ b/arch/arm/boot/dts/st/stm32mp135f-dhcor-dhsbc.dts
@@ -0,0 +1,377 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright (C) 2024 Marek Vasut <marex@denx.de>
+ *
+ * DHCOR STM32MP13 variant:
+ * DHCR-STM32MP135F-C100-R051-EE-F0409-SPI4-RTC-WBT-I-01LG
+ * DHCOR PCB number: 718-100 or newer
+ * DHSBC PCB number: 719-100 or newer
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/regulator/st,stm32mp13-regulator.h>
+#include "stm32mp135.dtsi"
+#include "stm32mp13xf.dtsi"
+#include "stm32mp13xx-dhcor-som.dtsi"
+
+/ {
+ model = "DH electronics STM32MP135F DHCOR DHSBC";
+ compatible = "dh,stm32mp135f-dhcor-dhsbc",
+ "dh,stm32mp135f-dhcor-som",
+ "st,stm32mp135";
+
+ aliases {
+ ethernet0 = &ethernet1;
+ ethernet1 = &ethernet2;
+ serial2 = &usart1;
+ serial3 = &usart2;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+};
+
+&adc_1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&adc1_pins_a &adc1_usb_cc_pins_b>;
+ vdda-supply = <&vdd_adc>;
+ vref-supply = <&vdd_adc>;
+ status = "okay";
+
+ adc1: adc@0 {
+ status = "okay";
+
+ /*
+ * Type-C USB_PWR_CC1 & USB_PWR_CC2 on in2 & in11.
+ * Use at least 5 * RC time, e.g. 5 * (Rp + Rd) * C:
+ * 5 * (5.1 + 47kOhms) * 5pF => 1.3us.
+ * Use arbitrary margin here (e.g. 5us).
+ *
+ * The pinmux pins must be set as ANALOG, use datasheet
+ * DS13483 Table 7. STM32MP135C/F ball definitions to
+ * find out which 'pin name' maps to which 'additional
+ * functions', which lists the mapping between pin and
+ * ADC channel. In this case, PA5 maps to ADC1_INP2 and
+ * PF13 maps to ADC1_INP11 .
+ */
+ channel@2 {
+ reg = <2>;
+ st,min-sample-time-ns = <5000>;
+ };
+
+ channel@11 {
+ reg = <11>;
+ st,min-sample-time-ns = <5000>;
+ };
+
+ /* Expansion connector: INP12:pin29 */
+ channel@12 {
+ reg = <12>;
+ st,min-sample-time-ns = <5000>;
+ };
+ };
+};
+
+&ethernet1 {
+ phy-handle = <&ethphy1>;
+ phy-mode = "rgmii-id";
+ pinctrl-0 = <&eth1_rgmii_pins_a>;
+ pinctrl-1 = <&eth1_rgmii_sleep_pins_a>;
+ pinctrl-names = "default", "sleep";
+ st,ext-phyclk;
+ status = "okay";
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "snps,dwmac-mdio";
+
+ ethphy1: ethernet-phy@1 {
+ /* RTL8211F */
+ compatible = "ethernet-phy-id001c.c916";
+ interrupt-parent = <&gpiog>;
+ interrupts = <12 IRQ_TYPE_LEVEL_LOW>;
+ reg = <1>;
+ reset-assert-us = <15000>;
+ reset-deassert-us = <55000>;
+ reset-gpios = <&gpioa 11 GPIO_ACTIVE_LOW>;
+ };
+ };
+};
+
+&ethernet2 {
+ phy-handle = <&ethphy2>;
+ phy-mode = "rgmii-id";
+ pinctrl-0 = <&eth2_rgmii_pins_a>;
+ pinctrl-1 = <&eth2_rgmii_sleep_pins_a>;
+ pinctrl-names = "default", "sleep";
+ st,ext-phyclk;
+ status = "okay";
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "snps,dwmac-mdio";
+
+ ethphy2: ethernet-phy@1 {
+ /* RTL8211F */
+ compatible = "ethernet-phy-id001c.c916";
+ interrupt-parent = <&gpiog>;
+ interrupts = <15 IRQ_TYPE_LEVEL_LOW>;
+ reg = <1>;
+ reset-assert-us = <15000>;
+ reset-deassert-us = <55000>;
+ reset-gpios = <&gpiog 8 GPIO_ACTIVE_LOW>;
+ };
+ };
+};
+
+&gpioa {
+ gpio-line-names = "", "", "", "",
+ "", "DHSBC_USB_PWR_CC1", "", "",
+ "", "", "", "DHSBC_nETH1_RST",
+ "", "DHCOR_HW-CODING_0", "", "";
+};
+
+&gpiob {
+ gpio-line-names = "", "", "", "",
+ "", "", "", "DHCOR_BT_HOST_WAKE",
+ "", "", "", "",
+ "", "DHSBC_nTPM_CS", "", "";
+};
+
+&gpioc {
+ gpio-line-names = "", "", "", "DHSBC_USB_5V_MEAS",
+ "", "", "", "",
+ "", "", "", "",
+ "", "", "", "";
+};
+
+&gpiod {
+ gpio-line-names = "", "", "", "",
+ "", "DHCOR_RAM-CODING_0", "", "",
+ "", "DHCOR_RAM-CODING_1", "", "",
+ "", "", "", "";
+};
+
+&gpioe {
+ gpio-line-names = "", "", "", "",
+ "", "", "", "",
+ "", "DHSBC_nTPM_RST", "", "",
+ "DHSBC_nTPM_PIRQ", "", "DHCOR_WL_HOST_WAKE", "";
+};
+
+&gpiof {
+ gpio-line-names = "", "", "DHSBC_USB_PWR_nFLT", "",
+ "", "", "", "",
+ "", "", "", "",
+ "DHCOR_WL_REG_ON", "DHSBC_USB_PWR_CC2", "", "";
+};
+
+&gpiog {
+ gpio-line-names = "", "", "", "",
+ "", "", "", "",
+ "DHSBC_nETH2_RST", "DHCOR_BT_DEV_WAKE", "", "",
+ "DHSBC_ETH1_INTB", "", "", "DHSBC_ETH2_INTB";
+};
+
+&gpioi {
+ gpio-line-names = "DHCOR_RTC_nINT", "DHCOR_HW-CODING_1",
+ "DHCOR_BT_REG_ON", "DHCOR_PMIC_nINT",
+ "DHSBC_BOOT0", "DHSBC_BOOT1",
+ "DHSBC_BOOT2", "DHSBC_USB-C_DATA_VBUS";
+};
+
+&i2c1 { /* Expansion connector: SDA:pin27 SCL:pin28 */
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&i2c1_pins_a>;
+ pinctrl-1 = <&i2c1_sleep_pins_a>;
+ i2c-scl-rising-time-ns = <96>;
+ i2c-scl-falling-time-ns = <3>;
+ clock-frequency = <400000>;
+ status = "okay";
+ /* spare dmas for other usage */
+ /delete-property/dmas;
+ /delete-property/dma-names;
+};
+
+&i2c5 { /* Expansion connector: SDA:pin3 SCL:pin5 */
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&i2c5_pins_b>;
+ pinctrl-1 = <&i2c5_sleep_pins_b>;
+ i2c-scl-rising-time-ns = <96>;
+ i2c-scl-falling-time-ns = <3>;
+ clock-frequency = <400000>;
+ status = "okay";
+ /* spare dmas for other usage */
+ /delete-property/dmas;
+ /delete-property/dma-names;
+};
+
+&m_can1 { /* Expansion connector: TX:pin16 RX:pin18 */
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&m_can1_pins_a>;
+ pinctrl-1 = <&m_can1_sleep_pins_a>;
+ status = "okay";
+};
+
+&m_can2 { /* Expansion connector: TX:pin22 RX:pin26 */
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&m_can2_pins_a>;
+ pinctrl-1 = <&m_can2_sleep_pins_a>;
+ status = "okay";
+};
+
+&pwr_regulators {
+ vdd-supply = <&vdd>;
+ vdd_3v3_usbfs-supply = <&vdd_usb>;
+ status = "okay";
+};
+
+&sai1 { /* Expansion connector: SCK-A:pin12 FS-A:pin35 SD-A:pin38 SD-B:pin40 */
+ clocks = <&rcc SAI1>, <&rcc PLL3_Q>, <&rcc PLL3_R>;
+ clock-names = "pclk", "x8k", "x11k";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&sai1a_pins_a &sai1b_pins_a>;
+ pinctrl-1 = <&sai1a_sleep_pins_a &sai1b_sleep_pins_a>;
+};
+
+&scmi_voltd {
+ status = "disabled";
+};
+
+&spi2 {
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&spi2_pins_a>;
+ pinctrl-1 = <&spi2_sleep_pins_a>;
+ cs-gpios = <&gpiob 13 0>;
+ status = "okay";
+
+ st33htph: tpm@0 {
+ compatible = "st,st33htpm-spi", "tcg,tpm_tis-spi";
+ reg = <0>;
+ spi-max-frequency = <24000000>;
+ };
+};
+
+&spi3 { /* Expansion connector: MOSI:pin19 MISO:pin21 SCK:pin22 nCS:pin24 */
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&spi3_pins_a>;
+ pinctrl-1 = <&spi3_sleep_pins_a>;
+ cs-gpios = <&gpiof 3 0>;
+ status = "disabled";
+};
+
+&timers5 { /* Expansion connector: CH3:pin31 */
+ /delete-property/dmas;
+ /delete-property/dma-names;
+ status = "okay";
+
+ pwm {
+ pinctrl-0 = <&pwm5_pins_a>;
+ pinctrl-1 = <&pwm5_sleep_pins_a>;
+ pinctrl-names = "default", "sleep";
+ status = "okay";
+ };
+ timer@4 {
+ status = "okay";
+ };
+};
+
+&timers13 { /* Expansion connector: CH1:pin32 */
+ /delete-property/dmas;
+ /delete-property/dma-names;
+ status = "okay";
+
+ pwm {
+ pinctrl-0 = <&pwm13_pins_a>;
+ pinctrl-1 = <&pwm13_sleep_pins_a>;
+ pinctrl-names = "default", "sleep";
+ status = "okay";
+ };
+ timer@12 {
+ status = "okay";
+ };
+};
+
+&usart1 { /* Expansion connector: RX:pin33 TX:pin37 */
+ pinctrl-names = "default", "sleep", "idle";
+ pinctrl-0 = <&usart1_pins_b>;
+ pinctrl-1 = <&usart1_sleep_pins_b>;
+ pinctrl-2 = <&usart1_idle_pins_b>;
+ status = "okay";
+};
+
+&usart2 { /* Expansion connector: RX:pin10 TX:pin8 RTS:pin11 CTS:pin36 */
+ pinctrl-names = "default", "sleep", "idle";
+ pinctrl-0 = <&usart2_pins_b>;
+ pinctrl-1 = <&usart2_sleep_pins_b>;
+ pinctrl-2 = <&usart2_idle_pins_b>;
+ uart-has-rtscts;
+ status = "okay";
+};
+
+&usbh_ehci {
+ phys = <&usbphyc_port0>;
+ status = "okay";
+};
+
+&usbh_ohci {
+ phys = <&usbphyc_port0>;
+ status = "okay";
+};
+
+&usbotg_hs {
+ dr_mode = "peripheral";
+ phys = <&usbphyc_port1 0>;
+ phy-names = "usb2-phy";
+ usb33d-supply = <&usb33>;
+ status = "okay";
+};
+
+&usbphyc {
+ status = "okay";
+ vdda1v1-supply = <&reg11>;
+ vdda1v8-supply = <&reg18>;
+};
+
+&usbphyc_port0 {
+ phy-supply = <&vdd_usb>;
+ st,current-boost-microamp = <1000>;
+ st,decrease-hs-slew-rate;
+ st,tune-hs-dc-level = <2>;
+ st,enable-hs-rftime-reduction;
+ st,trim-hs-current = <11>;
+ st,trim-hs-impedance = <2>;
+ st,tune-squelch-level = <1>;
+ st,enable-hs-rx-gain-eq;
+ st,no-hs-ftime-ctrl;
+ st,no-lsfs-sc;
+ connector {
+ compatible = "usb-a-connector";
+ vbus-supply = <&vbus_sw>;
+ };
+};
+
+&usbphyc_port1 {
+ phy-supply = <&vdd_usb>;
+ st,current-boost-microamp = <1000>;
+ st,decrease-hs-slew-rate;
+ st,tune-hs-dc-level = <2>;
+ st,enable-hs-rftime-reduction;
+ st,trim-hs-current = <11>;
+ st,trim-hs-impedance = <2>;
+ st,tune-squelch-level = <1>;
+ st,enable-hs-rx-gain-eq;
+ st,no-hs-ftime-ctrl;
+ st,no-lsfs-sc;
+ connector {
+ compatible = "gpio-usb-b-connector", "usb-b-connector";
+ vbus-gpios = <&gpioi 7 GPIO_ACTIVE_HIGH>;
+ label = "Type-C";
+ self-powered;
+ type = "micro";
+ };
+};
diff --git a/arch/arm/boot/dts/st/stm32mp135f-dk.dts b/arch/arm/boot/dts/st/stm32mp135f-dk.dts
index 567e53ad285f..1af335a39993 100644
--- a/arch/arm/boot/dts/st/stm32mp135f-dk.dts
+++ b/arch/arm/boot/dts/st/stm32mp135f-dk.dts
@@ -19,6 +19,7 @@
compatible = "st,stm32mp135f-dk", "st,stm32mp135";
aliases {
+ ethernet0 = &ethernet1;
serial0 = &uart4;
serial1 = &usart1;
serial2 = &uart8;
@@ -29,6 +30,20 @@
stdout-path = "serial0:115200n8";
};
+ clocks {
+ clk_ext_camera: clk-ext-camera {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <24000000>;
+ };
+
+ clk_mco1: clk-mco1 {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <24000000>;
+ };
+ };
+
memory@c0000000 {
device_type = "memory";
reg = <0xc0000000 0x20000000>;
@@ -141,6 +156,45 @@
status = "okay";
};
+&dcmipp {
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&dcmipp_pins_a>;
+ pinctrl-1 = <&dcmipp_sleep_pins_a>;
+ status = "okay";
+
+ port {
+ dcmipp_0: endpoint {
+ remote-endpoint = <&mipid02_2>;
+ bus-width = <8>;
+ hsync-active = <0>;
+ vsync-active = <0>;
+ pclk-sample = <0>;
+ };
+ };
+};
+
+&ethernet1 {
+ status = "okay";
+ pinctrl-0 = <&eth1_rmii_pins_a>;
+ pinctrl-1 = <&eth1_rmii_sleep_pins_a>;
+ pinctrl-names = "default", "sleep";
+ phy-mode = "rmii";
+ phy-handle = <&phy0_eth1>;
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "snps,dwmac-mdio";
+
+ phy0_eth1: ethernet-phy@0 {
+ compatible = "ethernet-phy-id0007.c131";
+ reg = <0>;
+ reset-gpios = <&mcp23017 9 GPIO_ACTIVE_LOW>;
+ wakeup-source;
+ };
+ };
+};
+
&i2c1 {
pinctrl-names = "default", "sleep";
pinctrl-0 = <&i2c1_pins_a>;
@@ -201,6 +255,76 @@
/* spare dmas for other usage */
/delete-property/dmas;
/delete-property/dma-names;
+
+ stmipi: csi2rx@14 {
+ compatible = "st,st-mipid02";
+ reg = <0x14>;
+ clocks = <&clk_mco1>;
+ clock-names = "xclk";
+ VDDE-supply = <&scmi_v1v8_periph>;
+ VDDIN-supply = <&scmi_v1v8_periph>;
+ reset-gpios = <&mcp23017 2 (GPIO_ACTIVE_LOW | GPIO_PUSH_PULL)>;
+ status = "okay";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ port@0 {
+ reg = <0>;
+
+ mipid02_0: endpoint {
+ data-lanes = <1 2>;
+ lane-polarities = <0 0 0>;
+ remote-endpoint = <&gc2145_ep>;
+ };
+ };
+ port@2 {
+ reg = <2>;
+
+ mipid02_2: endpoint {
+ bus-width = <8>;
+ hsync-active = <0>;
+ vsync-active = <0>;
+ pclk-sample = <0>;
+ remote-endpoint = <&dcmipp_0>;
+ };
+ };
+ };
+ };
+
+ gc2145: camera@3c {
+ compatible = "galaxycore,gc2145";
+ reg = <0x3c>;
+ clocks = <&clk_ext_camera>;
+ iovdd-supply = <&scmi_v3v3_sw>;
+ avdd-supply = <&scmi_v3v3_sw>;
+ dvdd-supply = <&scmi_v3v3_sw>;
+ powerdown-gpios = <&mcp23017 3 (GPIO_ACTIVE_LOW | GPIO_PUSH_PULL)>;
+ reset-gpios = <&mcp23017 4 (GPIO_ACTIVE_LOW | GPIO_PUSH_PULL)>;
+ status = "okay";
+
+ port {
+ gc2145_ep: endpoint {
+ remote-endpoint = <&mipid02_0>;
+ data-lanes = <1 2>;
+ link-frequencies = /bits/ 64 <120000000 192000000 240000000>;
+ };
+ };
+ };
+
+ goodix: goodix-ts@5d {
+ compatible = "goodix,gt911";
+ reg = <0x5d>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&goodix_pins_a>;
+ interrupt-parent = <&gpiof>;
+ interrupts = <5 IRQ_TYPE_EDGE_FALLING>;
+ AVDD28-supply = <&scmi_v3v3_sw>;
+ VDDIO-supply = <&scmi_v3v3_sw>;
+ touchscreen-size-x = <480>;
+ touchscreen-size-y = <272>;
+ status = "okay" ;
+ };
};
&iwdg2 {
@@ -273,6 +397,7 @@
/delete-property/dma-names;
status = "disabled";
pwm {
+ /* PWM output on pin 7 of the expansion connector (CN8.7) using TIM3_CH4 func */
pinctrl-0 = <&pwm3_pins_a>;
pinctrl-1 = <&pwm3_sleep_pins_a>;
pinctrl-names = "default", "sleep";
@@ -288,6 +413,7 @@
/delete-property/dma-names;
status = "disabled";
pwm {
+ /* PWM output on pin 31 of the expansion connector (CN8.31) using TIM4_CH2 func */
pinctrl-0 = <&pwm4_pins_a>;
pinctrl-1 = <&pwm4_sleep_pins_a>;
pinctrl-names = "default", "sleep";
@@ -303,6 +429,7 @@
/delete-property/dma-names;
status = "disabled";
pwm {
+ /* PWM output on pin 32 of the expansion connector (CN8.32) using TIM8_CH3 func */
pinctrl-0 = <&pwm8_pins_a>;
pinctrl-1 = <&pwm8_sleep_pins_a>;
pinctrl-names = "default", "sleep";
@@ -316,6 +443,7 @@
&timers14 {
status = "disabled";
pwm {
+ /* PWM output on pin 33 of the expansion connector (CN8.33) using TIM14_CH1 func */
pinctrl-0 = <&pwm14_pins_a>;
pinctrl-1 = <&pwm14_sleep_pins_a>;
pinctrl-names = "default", "sleep";
diff --git a/arch/arm/boot/dts/st/stm32mp13xx-dhcor-som.dtsi b/arch/arm/boot/dts/st/stm32mp13xx-dhcor-som.dtsi
new file mode 100644
index 000000000000..ddad6497775b
--- /dev/null
+++ b/arch/arm/boot/dts/st/stm32mp13xx-dhcor-som.dtsi
@@ -0,0 +1,308 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright (C) 2024 Marek Vasut <marex@denx.de>
+ */
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/leds/common.h>
+#include <dt-bindings/mfd/st,stpmic1.h>
+#include <dt-bindings/regulator/st,stm32mp13-regulator.h>
+#include "stm32mp13-pinctrl.dtsi"
+
+/ {
+ model = "DH electronics STM32MP13xx DHCOR SoM";
+ compatible = "dh,stm32mp131a-dhcor-som",
+ "st,stm32mp131";
+
+ aliases {
+ mmc0 = &sdmmc2;
+ mmc1 = &sdmmc1;
+ serial0 = &uart4;
+ serial1 = &uart7;
+ rtc0 = &rv3032;
+ spi0 = &qspi;
+ };
+
+ memory@c0000000 {
+ device_type = "memory";
+ reg = <0xc0000000 0x20000000>;
+ };
+
+ reserved-memory {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ optee@dd000000 {
+ reg = <0xdd000000 0x3000000>;
+ no-map;
+ };
+ };
+
+ sdio_pwrseq: sdio-pwrseq {
+ compatible = "mmc-pwrseq-simple";
+ reset-gpios = <&gpiof 12 GPIO_ACTIVE_LOW>;
+ };
+
+ vin: vin {
+ compatible = "regulator-fixed";
+ regulator-name = "vin";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-always-on;
+ };
+};
+
+&i2c3 {
+ i2c-scl-rising-time-ns = <96>;
+ i2c-scl-falling-time-ns = <3>;
+ clock-frequency = <400000>;
+ status = "okay";
+ /* spare dmas for other usage */
+ /delete-property/dmas;
+ /delete-property/dma-names;
+
+ pmic: stpmic@33 {
+ compatible = "st,stpmic1";
+ reg = <0x33>;
+ interrupts-extended = <&gpioi 3 IRQ_TYPE_EDGE_FALLING>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ status = "okay";
+
+ regulators {
+ compatible = "st,stpmic1-regulators";
+
+ ldo1-supply = <&vin>;
+ ldo2-supply = <&vin>;
+ ldo3-supply = <&vin>;
+ ldo4-supply = <&vin>;
+ ldo5-supply = <&vin>;
+ ldo6-supply = <&vin>;
+ pwr_sw1-supply = <&bst_out>;
+ pwr_sw2-supply = <&bst_out>;
+
+ vddcpu: buck1 { /* VDD_CPU_1V2 */
+ regulator-name = "vddcpu";
+ regulator-min-microvolt = <1250000>;
+ regulator-max-microvolt = <1250000>;
+ regulator-always-on;
+ regulator-initial-mode = <0>;
+ regulator-over-current-protection;
+ };
+
+ vdd_ddr: buck2 { /* VDD_DDR_1V35 */
+ regulator-name = "vdd_ddr";
+ regulator-min-microvolt = <1350000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-always-on;
+ regulator-initial-mode = <0>;
+ regulator-over-current-protection;
+ };
+
+ vdd: buck3 { /* VDD_3V3_1V8 */
+ regulator-name = "vdd";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ regulator-initial-mode = <0>;
+ regulator-over-current-protection;
+ };
+
+ vddcore: buck4 { /* VDD_CORE_1V2 */
+ regulator-name = "vddcore";
+ regulator-min-microvolt = <1250000>;
+ regulator-max-microvolt = <1250000>;
+ regulator-always-on;
+ regulator-initial-mode = <0>;
+ regulator-over-current-protection;
+ };
+
+ vdd_adc: ldo1 { /* VDD_ADC_1V8 */
+ regulator-name = "vdd_adc";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ interrupts = <IT_CURLIM_LDO1 0>;
+ };
+
+ vdd_ldo2: ldo2 { /* LDO2_OUT_1V8 */
+ regulator-name = "vdd_ldo2";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ interrupts = <IT_CURLIM_LDO2 0>;
+ };
+
+ vdd_ldo3: ldo3 { /* LDO3_OUT */
+ regulator-name = "vdd_ldo3";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ interrupts = <IT_CURLIM_LDO3 0>;
+ };
+
+ vdd_usb: ldo4 { /* VDD_USB_3V3 */
+ regulator-name = "vdd_usb";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ interrupts = <IT_CURLIM_LDO4 0>;
+ };
+
+ vdd_sd: ldo5 { /* VDD_SD_3V3_1V8 */
+ regulator-name = "vdd_sd";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ interrupts = <IT_CURLIM_LDO5 0>;
+ };
+
+ vdd_sd2: ldo6 { /* VDD_SD2_3V3_1V8 */
+ regulator-name = "vdd_sd2";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ interrupts = <IT_CURLIM_LDO6 0>;
+ };
+
+ vref_ddr: vref_ddr { /* VREF_DDR_0V675 */
+ regulator-name = "vref_ddr";
+ regulator-always-on;
+ };
+
+ bst_out: boost { /* BST_OUT_5V2 */
+ regulator-name = "bst_out";
+ };
+
+ vbus_otg: pwr_sw1 {
+ regulator-name = "vbus_otg";
+ interrupts = <IT_OCP_OTG 0>;
+ };
+
+ vbus_sw: pwr_sw2 {
+ regulator-name = "vbus_sw";
+ interrupts = <IT_OCP_SWOUT 0>;
+ regulator-active-discharge = <1>;
+ };
+ };
+
+ onkey {
+ compatible = "st,stpmic1-onkey";
+ interrupts = <IT_PONKEY_F 0>, <IT_PONKEY_R 1>;
+ interrupt-names = "onkey-falling", "onkey-rising";
+ status = "okay";
+ };
+
+ watchdog {
+ compatible = "st,stpmic1-wdt";
+ status = "disabled";
+ };
+ };
+
+ eeprom0: eeprom@50 {
+ compatible = "atmel,24c256"; /* ST M24256 */
+ reg = <0x50>;
+ pagesize = <64>;
+ };
+
+ rv3032: rtc@51 {
+ compatible = "microcrystal,rv3032";
+ reg = <0x51>;
+ interrupts-extended = <&gpioi 0 IRQ_TYPE_EDGE_FALLING>;
+ };
+};
+
+&iwdg2 {
+ timeout-sec = <32>;
+ status = "okay";
+};
+
+&qspi {
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&qspi_clk_pins_a
+ &qspi_bk1_pins_a
+ &qspi_cs1_pins_a>;
+ pinctrl-1 = <&qspi_clk_sleep_pins_a
+ &qspi_bk1_sleep_pins_a
+ &qspi_cs1_sleep_pins_a>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "okay";
+
+ flash0: flash@0 {
+ compatible = "jedec,spi-nor";
+ reg = <0>;
+ spi-rx-bus-width = <4>;
+ spi-max-frequency = <108000000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ };
+};
+
+/* Console UART */
+&uart4 {
+ pinctrl-names = "default", "sleep", "idle";
+ pinctrl-0 = <&uart4_pins_b>;
+ pinctrl-1 = <&uart4_sleep_pins_b>;
+ pinctrl-2 = <&uart4_idle_pins_b>;
+ /delete-property/dmas;
+ /delete-property/dma-names;
+ status = "okay";
+};
+
+/* Bluetooth */
+&uart7 {
+ pinctrl-names = "default", "sleep", "idle";
+ pinctrl-0 = <&uart7_pins_a>;
+ pinctrl-1 = <&uart7_sleep_pins_a>;
+ pinctrl-2 = <&uart7_idle_pins_a>;
+ uart-has-rtscts;
+ status = "okay";
+
+ bluetooth {
+ compatible = "infineon,cyw43439-bt", "brcm,bcm4329-bt";
+ max-speed = <3000000>;
+ device-wakeup-gpios = <&gpiog 9 GPIO_ACTIVE_HIGH>;
+ shutdown-gpios = <&gpioi 2 GPIO_ACTIVE_HIGH>;
+ };
+};
+
+/* SDIO WiFi */
+&sdmmc1 {
+ pinctrl-names = "default", "opendrain", "sleep";
+ pinctrl-0 = <&sdmmc1_b4_pins_a &sdmmc1_clk_pins_a>;
+ pinctrl-1 = <&sdmmc1_b4_od_pins_a &sdmmc1_clk_pins_a>;
+ pinctrl-2 = <&sdmmc1_b4_sleep_pins_a>;
+ bus-width = <4>;
+ cap-power-off-card;
+ keep-power-in-suspend;
+ non-removable;
+ st,neg-edge;
+ vmmc-supply = <&vdd>;
+ mmc-pwrseq = <&sdio_pwrseq>;
+ status = "okay";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ brcmf: bcrmf@1 { /* muRata 1YN */
+ reg = <1>;
+ compatible = "infineon,cyw43439-fmac", "brcm,bcm4329-fmac";
+ interrupt-parent = <&gpioe>;
+ interrupts = <14 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-names = "host-wake";
+ };
+};
+
+/* eMMC */
+&sdmmc2 {
+ pinctrl-names = "default", "opendrain", "sleep";
+ pinctrl-0 = <&sdmmc2_b4_pins_a &sdmmc2_d47_pins_a &sdmmc2_clk_pins_a>;
+ pinctrl-1 = <&sdmmc2_b4_od_pins_a &sdmmc2_d47_pins_a &sdmmc2_clk_pins_a>;
+ pinctrl-2 = <&sdmmc2_b4_sleep_pins_a &sdmmc2_d47_sleep_pins_a>;
+ bus-width = <8>;
+ mmc-ddr-3_3v;
+ no-sd;
+ no-sdio;
+ non-removable;
+ st,neg-edge;
+ vmmc-supply = <&vdd>;
+ vqmmc-supply = <&vdd>;
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/st/stm32mp151.dtsi b/arch/arm/boot/dts/st/stm32mp151.dtsi
index 90c5c72c87ab..4f878ec102c1 100644
--- a/arch/arm/boot/dts/st/stm32mp151.dtsi
+++ b/arch/arm/boot/dts/st/stm32mp151.dtsi
@@ -50,6 +50,7 @@
<GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_LOW)>,
<GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_LOW)>;
interrupt-parent = <&intc>;
+ arm,no-tick-in-suspend;
};
clocks {
diff --git a/arch/arm/boot/dts/st/stm32mp157a-dk1-scmi.dts b/arch/arm/boot/dts/st/stm32mp157a-dk1-scmi.dts
index 306e1bc2a514..847b360f02fc 100644
--- a/arch/arm/boot/dts/st/stm32mp157a-dk1-scmi.dts
+++ b/arch/arm/boot/dts/st/stm32mp157a-dk1-scmi.dts
@@ -62,6 +62,11 @@
reset-names = "mcu_rst", "hold_boot";
};
+&optee {
+ interrupt-parent = <&intc>;
+ interrupts = <GIC_PPI 15 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>;
+};
+
&rcc {
compatible = "st,stm32mp1-rcc-secure", "syscon";
clock-names = "hse", "hsi", "csi", "lse", "lsi";
diff --git a/arch/arm/boot/dts/st/stm32mp157c-dk2-scmi.dts b/arch/arm/boot/dts/st/stm32mp157c-dk2-scmi.dts
index 956da5f26c1c..43280289759d 100644
--- a/arch/arm/boot/dts/st/stm32mp157c-dk2-scmi.dts
+++ b/arch/arm/boot/dts/st/stm32mp157c-dk2-scmi.dts
@@ -68,6 +68,11 @@
reset-names = "mcu_rst", "hold_boot";
};
+&optee {
+ interrupt-parent = <&intc>;
+ interrupts = <GIC_PPI 15 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>;
+};
+
&rcc {
compatible = "st,stm32mp1-rcc-secure", "syscon";
clock-names = "hse", "hsi", "csi", "lse", "lsi";
diff --git a/arch/arm/boot/dts/st/stm32mp157c-ed1-scmi.dts b/arch/arm/boot/dts/st/stm32mp157c-ed1-scmi.dts
index 8e4b0db198c2..6f27d794d270 100644
--- a/arch/arm/boot/dts/st/stm32mp157c-ed1-scmi.dts
+++ b/arch/arm/boot/dts/st/stm32mp157c-ed1-scmi.dts
@@ -67,6 +67,11 @@
reset-names = "mcu_rst", "hold_boot";
};
+&optee {
+ interrupt-parent = <&intc>;
+ interrupts = <GIC_PPI 15 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>;
+};
+
&rcc {
compatible = "st,stm32mp1-rcc-secure", "syscon";
clock-names = "hse", "hsi", "csi", "lse", "lsi";
diff --git a/arch/arm/boot/dts/st/stm32mp157c-ev1-scmi.dts b/arch/arm/boot/dts/st/stm32mp157c-ev1-scmi.dts
index 72b9cab2d990..6ae391bffee5 100644
--- a/arch/arm/boot/dts/st/stm32mp157c-ev1-scmi.dts
+++ b/arch/arm/boot/dts/st/stm32mp157c-ev1-scmi.dts
@@ -72,6 +72,11 @@
reset-names = "mcu_rst", "hold_boot";
};
+&optee {
+ interrupt-parent = <&intc>;
+ interrupts = <GIC_PPI 15 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>;
+};
+
&rcc {
compatible = "st,stm32mp1-rcc-secure", "syscon";
clock-names = "hse", "hsi", "csi", "lse", "lsi";
diff --git a/arch/arm/boot/dts/st/stm32mp157c-osd32mp1-red.dts b/arch/arm/boot/dts/st/stm32mp157c-osd32mp1-red.dts
index 527c33be66cc..36e6055b5665 100644
--- a/arch/arm/boot/dts/st/stm32mp157c-osd32mp1-red.dts
+++ b/arch/arm/boot/dts/st/stm32mp157c-osd32mp1-red.dts
@@ -147,11 +147,6 @@
status = "okay";
};
-&pwr_regulators {
- vdd-supply = <&vdd>;
- vdd_3v3_usbfs-supply = <&vdd_usb>;
-};
-
&rtc {
status = "okay";
};
@@ -211,11 +206,3 @@
&usbphyc {
status = "okay";
};
-
-&usbphyc_port0 {
- phy-supply = <&vdd_usb>;
-};
-
-&usbphyc_port1 {
- phy-supply = <&vdd_usb>;
-};
diff --git a/arch/arm/boot/dts/st/stm32mp15xc-lxa-tac.dtsi b/arch/arm/boot/dts/st/stm32mp15xc-lxa-tac.dtsi
index cfaf8adde319..c87fd96cbd91 100644
--- a/arch/arm/boot/dts/st/stm32mp15xc-lxa-tac.dtsi
+++ b/arch/arm/boot/dts/st/stm32mp15xc-lxa-tac.dtsi
@@ -379,11 +379,6 @@ baseboard_eeprom: &sip_eeprom {
};
};
-&pwr_regulators {
- vdd-supply = <&vdd>;
- vdd_3v3_usbfs-supply = <&vdd_usb>;
-};
-
&rtc {
status = "okay";
};
@@ -590,14 +585,6 @@ baseboard_eeprom: &sip_eeprom {
status = "okay";
};
-&usbphyc_port0 {
- phy-supply = <&vdd_usb>;
-};
-
-&usbphyc_port1 {
- phy-supply = <&vdd_usb>;
-};
-
&vrefbuf {
regulator-min-microvolt = <2500000>;
regulator-max-microvolt = <2500000>;
diff --git a/arch/arm/boot/dts/st/stm32mp15xx-osd32.dtsi b/arch/arm/boot/dts/st/stm32mp15xx-osd32.dtsi
index aeb71c41a734..2022a1fa31ca 100644
--- a/arch/arm/boot/dts/st/stm32mp15xx-osd32.dtsi
+++ b/arch/arm/boot/dts/st/stm32mp15xx-osd32.dtsi
@@ -214,3 +214,16 @@
&rng1 {
status = "okay";
};
+
+&pwr_regulators {
+ vdd-supply = <&vdd>;
+ vdd_3v3_usbfs-supply = <&vdd_usb>;
+};
+
+&usbphyc_port0 {
+ phy-supply = <&vdd_usb>;
+};
+
+&usbphyc_port1 {
+ phy-supply = <&vdd_usb>;
+};
diff --git a/arch/arm/boot/dts/ti/davinci/da850-evm.dts b/arch/arm/boot/dts/ti/davinci/da850-evm.dts
index 6c5936278e75..1f5cd35f8b74 100644
--- a/arch/arm/boot/dts/ti/davinci/da850-evm.dts
+++ b/arch/arm/boot/dts/ti/davinci/da850-evm.dts
@@ -65,7 +65,7 @@
display-timings {
native-mode = <&timing0>;
- timing0: 480x272 {
+ timing0: timing-480x272 {
clock-frequency = <9000000>;
hactive = <480>;
vactive = <272>;
diff --git a/arch/arm/boot/dts/ti/omap/am335x-guardian.dts b/arch/arm/boot/dts/ti/omap/am335x-guardian.dts
index 56e5d954a490..4b070e634b28 100644
--- a/arch/arm/boot/dts/ti/omap/am335x-guardian.dts
+++ b/arch/arm/boot/dts/ti/omap/am335x-guardian.dts
@@ -74,7 +74,7 @@
pinctrl-1 = <&lcd_pins_sleep>;
display-timings {
- 320x240 {
+ timing-320x240 {
hactive = <320>;
vactive = <240>;
hback-porch = <68>;
diff --git a/arch/arm/boot/dts/ti/omap/am335x-pdu001.dts b/arch/arm/boot/dts/ti/omap/am335x-pdu001.dts
index f38f5bff2b96..17574d0d0525 100644
--- a/arch/arm/boot/dts/ti/omap/am335x-pdu001.dts
+++ b/arch/arm/boot/dts/ti/omap/am335x-pdu001.dts
@@ -67,7 +67,7 @@
};
display-timings {
- 240x320p16 {
+ timing-240x320p16 {
clock-frequency = <6500000>;
hactive = <240>;
vactive = <320>;
diff --git a/arch/arm/boot/dts/ti/omap/am335x-pepper.dts b/arch/arm/boot/dts/ti/omap/am335x-pepper.dts
index d5a4a21889d1..e7d561a527fd 100644
--- a/arch/arm/boot/dts/ti/omap/am335x-pepper.dts
+++ b/arch/arm/boot/dts/ti/omap/am335x-pepper.dts
@@ -202,7 +202,7 @@
};
display-timings {
native-mode = <&timing0>;
- timing0: 480x272 {
+ timing0: timing-480x272 {
clock-frequency = <18400000>;
hactive = <480>;
vactive = <272>;
diff --git a/arch/arm/boot/dts/ti/omap/am5729-beagleboneai.dts b/arch/arm/boot/dts/ti/omap/am5729-beagleboneai.dts
index eb1ec85aba28..e6a18954e449 100644
--- a/arch/arm/boot/dts/ti/omap/am5729-beagleboneai.dts
+++ b/arch/arm/boot/dts/ti/omap/am5729-beagleboneai.dts
@@ -196,7 +196,6 @@
extcon_usb1: extcon_usb1 {
compatible = "linux,extcon-usb-gpio";
- ti,enable-id-detection;
id-gpios = <&gpio3 13 GPIO_ACTIVE_HIGH>;
};
};
diff --git a/arch/arm/boot/dts/vt8500/vt8500-bv07.dts b/arch/arm/boot/dts/vt8500/vt8500-bv07.dts
index e9f55bd30bd4..38a2da5e2c5d 100644
--- a/arch/arm/boot/dts/vt8500/vt8500-bv07.dts
+++ b/arch/arm/boot/dts/vt8500/vt8500-bv07.dts
@@ -16,7 +16,7 @@
bits-per-pixel = <16>;
display-timings {
native-mode = <&timing0>;
- timing0: 800x480 {
+ timing0: timing-800x480 {
clock-frequency = <0>; /* unused but required */
hactive = <800>;
vactive = <480>;
diff --git a/arch/arm/boot/dts/vt8500/vt8500.dtsi b/arch/arm/boot/dts/vt8500/vt8500.dtsi
index b7e09eff5bb2..f23cb5ee11ae 100644
--- a/arch/arm/boot/dts/vt8500/vt8500.dtsi
+++ b/arch/arm/boot/dts/vt8500/vt8500.dtsi
@@ -115,7 +115,7 @@
interrupts = <43>;
};
- uhci@d8007b00 {
+ usb@d8007b00 {
compatible = "platform-uhci";
reg = <0xd8007b00 0x200>;
interrupts = <43>;
diff --git a/arch/arm/boot/dts/vt8500/wm8505-ref.dts b/arch/arm/boot/dts/vt8500/wm8505-ref.dts
index 2d77c087676e..8ce9e2ef0a81 100644
--- a/arch/arm/boot/dts/vt8500/wm8505-ref.dts
+++ b/arch/arm/boot/dts/vt8500/wm8505-ref.dts
@@ -16,7 +16,7 @@
bits-per-pixel = <32>;
display-timings {
native-mode = <&timing0>;
- timing0: 800x480 {
+ timing0: timing-800x480 {
clock-frequency = <0>; /* unused but required */
hactive = <800>;
vactive = <480>;
diff --git a/arch/arm/boot/dts/vt8500/wm8505.dtsi b/arch/arm/boot/dts/vt8500/wm8505.dtsi
index 168cd12b07bc..d9e1280372c5 100644
--- a/arch/arm/boot/dts/vt8500/wm8505.dtsi
+++ b/arch/arm/boot/dts/vt8500/wm8505.dtsi
@@ -213,7 +213,7 @@
interrupts = <1>;
};
- uhci@d8007300 {
+ usb@d8007300 {
compatible = "platform-uhci";
reg = <0xd8007300 0x200>;
interrupts = <0>;
diff --git a/arch/arm/boot/dts/vt8500/wm8650-mid.dts b/arch/arm/boot/dts/vt8500/wm8650-mid.dts
index f6a42149a0a0..7977b6c1e8eb 100644
--- a/arch/arm/boot/dts/vt8500/wm8650-mid.dts
+++ b/arch/arm/boot/dts/vt8500/wm8650-mid.dts
@@ -17,7 +17,7 @@
display-timings {
native-mode = <&timing0>;
- timing0: 800x480 {
+ timing0: timing-800x480 {
clock-frequency = <0>; /* unused but required */
hactive = <800>;
vactive = <480>;
diff --git a/arch/arm/boot/dts/vt8500/wm8650.dtsi b/arch/arm/boot/dts/vt8500/wm8650.dtsi
index bc057b6f7d16..35d12d77efc0 100644
--- a/arch/arm/boot/dts/vt8500/wm8650.dtsi
+++ b/arch/arm/boot/dts/vt8500/wm8650.dtsi
@@ -185,7 +185,7 @@
interrupts = <43>;
};
- uhci@d8007b00 {
+ usb@d8007b00 {
compatible = "platform-uhci";
reg = <0xd8007b00 0x200>;
interrupts = <43>;
diff --git a/arch/arm/boot/dts/vt8500/wm8750.dtsi b/arch/arm/boot/dts/vt8500/wm8750.dtsi
index 33aeb37491f4..b292f85d4e69 100644
--- a/arch/arm/boot/dts/vt8500/wm8750.dtsi
+++ b/arch/arm/boot/dts/vt8500/wm8750.dtsi
@@ -257,13 +257,13 @@
interrupts = <26>;
};
- uhci@d8007b00 {
+ usb@d8007b00 {
compatible = "platform-uhci";
reg = <0xd8007b00 0x200>;
interrupts = <26>;
};
- uhci@d8008d00 {
+ usb@d8008d00 {
compatible = "platform-uhci";
reg = <0xd8008d00 0x200>;
interrupts = <26>;
diff --git a/arch/arm/boot/dts/vt8500/wm8850-w70v2.dts b/arch/arm/boot/dts/vt8500/wm8850-w70v2.dts
index c7a6fe0ce48f..5d409323b10c 100644
--- a/arch/arm/boot/dts/vt8500/wm8850-w70v2.dts
+++ b/arch/arm/boot/dts/vt8500/wm8850-w70v2.dts
@@ -28,7 +28,7 @@
bits-per-pixel = <16>;
display-timings {
native-mode = <&timing0>;
- timing0: 800x480 {
+ timing0: timing-800x480 {
clock-frequency = <0>; /* unused but required */
hactive = <800>;
vactive = <480>;
diff --git a/arch/arm/boot/dts/vt8500/wm8850.dtsi b/arch/arm/boot/dts/vt8500/wm8850.dtsi
index 65c9271050e6..c61717ebb4f1 100644
--- a/arch/arm/boot/dts/vt8500/wm8850.dtsi
+++ b/arch/arm/boot/dts/vt8500/wm8850.dtsi
@@ -244,13 +244,13 @@
interrupts = <26>;
};
- uhci@d8007b00 {
+ usb@d8007b00 {
compatible = "platform-uhci";
reg = <0xd8007b00 0x200>;
interrupts = <26>;
};
- uhci@d8008d00 {
+ usb@d8008d00 {
compatible = "platform-uhci";
reg = <0xd8008d00 0x200>;
interrupts = <26>;
diff --git a/arch/arm/configs/at91_dt_defconfig b/arch/arm/configs/at91_dt_defconfig
index 1d53aec4c836..6eabe2313c9a 100644
--- a/arch/arm/configs/at91_dt_defconfig
+++ b/arch/arm/configs/at91_dt_defconfig
@@ -143,6 +143,7 @@ CONFIG_VIDEO_OV2640=m
CONFIG_VIDEO_OV7740=m
CONFIG_DRM=y
CONFIG_DRM_ATMEL_HLCDC=y
+CONFIG_DRM_MICROCHIP_LVDS_SERIALIZER=y
CONFIG_DRM_PANEL_SIMPLE=y
CONFIG_DRM_PANEL_EDP=y
CONFIG_FB_ATMEL=y
diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig
index cf2480dce285..b7c271ddf9c0 100644
--- a/arch/arm/configs/imx_v6_v7_defconfig
+++ b/arch/arm/configs/imx_v6_v7_defconfig
@@ -133,6 +133,7 @@ CONFIG_SMSC911X=y
# CONFIG_NET_VENDOR_STMICRO is not set
CONFIG_MICREL_PHY=y
CONFIG_AT803X_PHY=y
+CONFIG_DP83867_PHY=y
CONFIG_CAN_FLEXCAN=y
CONFIG_USB_PEGASUS=m
CONFIG_USB_RTL8150=m
@@ -180,6 +181,7 @@ CONFIG_TOUCHSCREEN_SX8654=y
CONFIG_TOUCHSCREEN_COLIBRI_VF50=y
CONFIG_INPUT_MISC=y
CONFIG_INPUT_MMA8450=y
+CONFIG_INPUT_GPIO_BEEPER=m
CONFIG_SERIO_SERPORT=m
# CONFIG_LEGACY_PTYS is not set
CONFIG_SERIAL_IMX=y
@@ -211,6 +213,7 @@ CONFIG_GPIO_SIOX=m
CONFIG_GPIO_VF610=y
CONFIG_GPIO_MAX732X=y
CONFIG_GPIO_PCA953X=y
+CONFIG_GPIO_PCA953X_IRQ=y
CONFIG_GPIO_PCF857X=y
CONFIG_GPIO_BD71815=y
CONFIG_GPIO_STMPE=y
@@ -226,6 +229,7 @@ CONFIG_RN5T618_POWER=m
CONFIG_SENSORS_MC13783_ADC=y
CONFIG_SENSORS_GPIO_FAN=y
CONFIG_SENSORS_IIO_HWMON=y
+CONFIG_SENSORS_LM75=m
CONFIG_SENSORS_PWM_FAN=y
CONFIG_SENSORS_SY7636A=y
CONFIG_THERMAL_STATISTICS=y
@@ -282,6 +286,9 @@ CONFIG_DRM_PANEL_LVDS=y
CONFIG_DRM_PANEL_SIMPLE=y
CONFIG_DRM_PANEL_EDP=y
CONFIG_DRM_PANEL_SEIKO_43WVF1G=y
+CONFIG_DRM_DISPLAY_CONNECTOR=y
+CONFIG_DRM_LVDS_CODEC=m
+CONFIG_DRM_SII902X=y
CONFIG_DRM_TI_TFP410=y
CONFIG_DRM_DW_HDMI_AHB_AUDIO=m
CONFIG_DRM_DW_HDMI_CEC=y
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index 86bf057ac366..62734530a3d6 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -469,6 +469,7 @@ CONFIG_SPI_XILINX=y
CONFIG_SPI_SPIDEV=y
CONFIG_SPMI=y
CONFIG_PINCTRL_AS3722=y
+CONFIG_PINCTRL_MCP23S08=m
CONFIG_PINCTRL_MICROCHIP_SGPIO=y
CONFIG_PINCTRL_OCELOT=y
CONFIG_PINCTRL_PALMAS=y
diff --git a/arch/arm/configs/vexpress_defconfig b/arch/arm/configs/vexpress_defconfig
index 96ad442089bd..cdb6065e04fd 100644
--- a/arch/arm/configs/vexpress_defconfig
+++ b/arch/arm/configs/vexpress_defconfig
@@ -14,7 +14,6 @@ CONFIG_CPUSETS=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_PROFILING=y
CONFIG_ARCH_VEXPRESS=y
-CONFIG_ARCH_VEXPRESS_DCSCB=y
CONFIG_ARCH_VEXPRESS_TC2_PM=y
CONFIG_SMP=y
CONFIG_HAVE_ARM_ARCH_TIMER=y
diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
index 44667bdb4707..9beb64d30586 100644
--- a/arch/arm/include/asm/cmpxchg.h
+++ b/arch/arm/include/asm/cmpxchg.h
@@ -5,6 +5,7 @@
#include <linux/irqflags.h>
#include <linux/prefetch.h>
#include <asm/barrier.h>
+#include <linux/cmpxchg-emu.h>
#if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110)
/*
@@ -162,7 +163,11 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
prefetchw((const void *)ptr);
switch (size) {
-#ifndef CONFIG_CPU_V6 /* min ARCH >= ARMv6K */
+#ifdef CONFIG_CPU_V6 /* ARCH == ARMv6 */
+ case 1:
+ oldval = cmpxchg_emu_u8((volatile u8 *)ptr, old, new);
+ break;
+#else /* min ARCH > ARMv6 */
case 1:
do {
asm volatile("@ __cmpxchg1\n"
diff --git a/arch/arm/include/asm/efi.h b/arch/arm/include/asm/efi.h
index 78282ced5038..e408399d5f0e 100644
--- a/arch/arm/include/asm/efi.h
+++ b/arch/arm/include/asm/efi.h
@@ -14,6 +14,7 @@
#include <asm/mach/map.h>
#include <asm/mmu_context.h>
#include <asm/ptrace.h>
+#include <asm/uaccess.h>
#ifdef CONFIG_EFI
void efi_init(void);
@@ -25,6 +26,18 @@ int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md, boo
#define arch_efi_call_virt_setup() efi_virtmap_load()
#define arch_efi_call_virt_teardown() efi_virtmap_unload()
+#ifdef CONFIG_CPU_TTBR0_PAN
+#undef arch_efi_call_virt
+#define arch_efi_call_virt(p, f, args...) ({ \
+ unsigned int flags = uaccess_save_and_enable(); \
+ efi_status_t res = _Generic((p)->f(args), \
+ efi_status_t: (p)->f(args), \
+ default: ((p)->f(args), EFI_ABORTED)); \
+ uaccess_restore(flags); \
+ res; \
+})
+#endif
+
#define ARCH_EFI_IRQ_FLAGS_MASK \
(PSR_J_BIT | PSR_E_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT | \
PSR_T_BIT | MODE_MASK)
diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h
index 3676e82cf95c..9fb00973c608 100644
--- a/arch/arm/include/asm/unistd.h
+++ b/arch/arm/include/asm/unistd.h
@@ -37,7 +37,6 @@
#define __ARCH_WANT_SYS_FORK
#define __ARCH_WANT_SYS_VFORK
#define __ARCH_WANT_SYS_CLONE
-#define __ARCH_WANT_SYS_CLONE3
/*
* Unimplemented (or alternatively implemented) syscalls
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index 89a77e3f51d2..aaae31b8c4a5 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -78,8 +78,6 @@ obj-$(CONFIG_CPU_XSC3) += xscale-cp0.o
obj-$(CONFIG_CPU_MOHAWK) += xscale-cp0.o
obj-$(CONFIG_IWMMXT) += iwmmxt.o
obj-$(CONFIG_PERF_EVENTS) += perf_regs.o perf_callchain.o
-obj-$(CONFIG_HW_PERF_EVENTS) += perf_event_xscale.o perf_event_v6.o \
- perf_event_v7.o
AFLAGS_iwmmxt.o := -Wa,-mcpu=iwmmxt
obj-$(CONFIG_ARM_CPU_TOPOLOGY) += topology.o
obj-$(CONFIG_VDSO) += vdso.o
diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c
index a0b6d1e3812f..e61591f33a6c 100644
--- a/arch/arm/kernel/ftrace.c
+++ b/arch/arm/kernel/ftrace.c
@@ -232,11 +232,24 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
unsigned long old;
if (unlikely(atomic_read(&current->tracing_graph_pause)))
+err_out:
return;
if (IS_ENABLED(CONFIG_UNWINDER_FRAME_POINTER)) {
- /* FP points one word below parent's top of stack */
- frame_pointer += 4;
+ /*
+ * Usually, the stack frames are contiguous in memory but cases
+ * have been observed where the next stack frame does not live
+ * at 'frame_pointer + 4' as this code used to assume.
+ *
+ * Instead, dereference the field in the stack frame that
+ * stores the SP of the calling frame: to avoid unbounded
+ * recursion, this cannot involve any ftrace instrumented
+ * functions, so use the __get_kernel_nofault() primitive
+ * directly.
+ */
+ __get_kernel_nofault(&frame_pointer,
+ (unsigned long *)(frame_pointer - 8),
+ unsigned long, err_out);
} else {
struct stackframe frame = {
.fp = frame_pointer,
diff --git a/arch/arm/mach-davinci/pm.c b/arch/arm/mach-davinci/pm.c
index 8aa39db095d7..2c5155bd376b 100644
--- a/arch/arm/mach-davinci/pm.c
+++ b/arch/arm/mach-davinci/pm.c
@@ -61,7 +61,7 @@ static void davinci_pm_suspend(void)
/* Configure sleep count in deep sleep register */
val = __raw_readl(pm_config.deepsleep_reg);
- val &= ~DEEPSLEEP_SLEEPCOUNT_MASK,
+ val &= ~DEEPSLEEP_SLEEPCOUNT_MASK;
val |= pm_config.sleepcount;
__raw_writel(val, pm_config.deepsleep_reg);
diff --git a/arch/arm/mach-pxa/devices.c b/arch/arm/mach-pxa/devices.c
index 1e4cd502340e..7695cfce01a1 100644
--- a/arch/arm/mach-pxa/devices.c
+++ b/arch/arm/mach-pxa/devices.c
@@ -7,6 +7,7 @@
#include <linux/clk-provider.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
+#include <linux/gpio-pxa.h>
#include <linux/platform_data/i2c-pxa.h>
#include <linux/soc/pxa/cpu.h>
@@ -17,6 +18,7 @@
#include <linux/platform_data/usb-ohci-pxa27x.h>
#include <linux/platform_data/mmp_dma.h>
+#include "mfp-pxa2xx.h"
#include "regs-ost.h"
#include "reset.h"
#include "devices.h"
@@ -46,7 +48,7 @@ struct platform_device pxa_device_pmu = {
.num_resources = 1,
};
-static struct resource pxamci_resources[] = {
+static const struct resource pxamci_resources[] = {
[0] = {
.start = 0x41100000,
.end = 0x41100fff,
@@ -59,22 +61,26 @@ static struct resource pxamci_resources[] = {
},
};
-static u64 pxamci_dmamask = 0xffffffffUL;
-
-struct platform_device pxa_device_mci = {
- .name = "pxa2xx-mci",
- .id = 0,
- .dev = {
- .dma_mask = &pxamci_dmamask,
- .coherent_dma_mask = 0xffffffff,
- },
- .num_resources = ARRAY_SIZE(pxamci_resources),
- .resource = pxamci_resources,
-};
-
-void __init pxa_set_mci_info(struct pxamci_platform_data *info)
+void __init pxa_set_mci_info(const struct pxamci_platform_data *info,
+ const struct property_entry *props)
{
- pxa_register_device(&pxa_device_mci, info);
+ const struct platform_device_info mci_info = {
+ .name = "pxa2xx-mci",
+ .id = 0,
+ .res = pxamci_resources,
+ .num_res = ARRAY_SIZE(pxamci_resources),
+ .data = info,
+ .size_data = sizeof(*info),
+ .dma_mask = 0xffffffffUL,
+ .properties = props,
+ };
+ struct platform_device *mci_dev;
+ int err;
+
+ mci_dev = platform_device_register_full(&mci_info);
+ err = PTR_ERR_OR_ZERO(mci_dev);
+ if (err)
+ pr_err("Unable to create mci device: %d\n", err);
}
static struct pxa2xx_udc_mach_info pxa_udc_info = {
@@ -627,6 +633,11 @@ struct platform_device pxa27x_device_pwm1 = {
};
#endif /* CONFIG_PXA27x || CONFIG_PXA3xx */
+#if defined(CONFIG_PXA25x) || defined(CONFIG_PXA27x)
+const struct software_node pxa2xx_gpiochip_node = {
+ .name = "gpio-pxa",
+};
+
struct resource pxa_resource_gpio[] = {
{
.start = 0x40e00000,
@@ -650,11 +661,19 @@ struct resource pxa_resource_gpio[] = {
},
};
+static struct pxa_gpio_platform_data pxa2xx_gpio_info = {
+ .irq_base = PXA_GPIO_TO_IRQ(0),
+ .gpio_set_wake = gpio_set_wake,
+};
+
struct platform_device pxa25x_device_gpio = {
.name = "pxa25x-gpio",
.id = -1,
.num_resources = ARRAY_SIZE(pxa_resource_gpio),
.resource = pxa_resource_gpio,
+ .dev = {
+ .platform_data = &pxa2xx_gpio_info,
+ },
};
struct platform_device pxa27x_device_gpio = {
@@ -662,7 +681,11 @@ struct platform_device pxa27x_device_gpio = {
.id = -1,
.num_resources = ARRAY_SIZE(pxa_resource_gpio),
.resource = pxa_resource_gpio,
+ .dev = {
+ .platform_data = &pxa2xx_gpio_info,
+ },
};
+#endif /* CONFIG_PXA25x || CONFIG_PXA27x */
static struct resource pxa_dma_resource[] = {
[0] = {
diff --git a/arch/arm/mach-pxa/devices.h b/arch/arm/mach-pxa/devices.h
index 82c83939017a..72c556ff67db 100644
--- a/arch/arm/mach-pxa/devices.h
+++ b/arch/arm/mach-pxa/devices.h
@@ -4,7 +4,6 @@
struct mmp_dma_platdata;
extern struct platform_device pxa_device_pmu;
-extern struct platform_device pxa_device_mci;
extern struct platform_device pxa3xx_device_mci2;
extern struct platform_device pxa3xx_device_mci3;
extern struct platform_device pxa25x_device_udc;
@@ -53,8 +52,8 @@ extern struct platform_device pxa_device_asoc_ssp4;
extern struct platform_device pxa25x_device_gpio;
extern struct platform_device pxa27x_device_gpio;
-extern struct platform_device pxa3xx_device_gpio;
-extern struct platform_device pxa93x_device_gpio;
+
+extern const struct software_node pxa2xx_gpiochip_node;
void __init pxa_register_device(struct platform_device *dev, void *data);
void __init pxa2xx_set_dmac_info(struct mmp_dma_platdata *dma_pdata);
diff --git a/arch/arm/mach-pxa/gumstix.c b/arch/arm/mach-pxa/gumstix.c
index c9f0f62187bd..efa6faa62a2c 100644
--- a/arch/arm/mach-pxa/gumstix.c
+++ b/arch/arm/mach-pxa/gumstix.c
@@ -90,7 +90,7 @@ static struct pxamci_platform_data gumstix_mci_platform_data = {
static void __init gumstix_mmc_init(void)
{
- pxa_set_mci_info(&gumstix_mci_platform_data);
+ pxa_set_mci_info(&gumstix_mci_platform_data, NULL);
}
#else
static void __init gumstix_mmc_init(void)
@@ -100,26 +100,22 @@ static void __init gumstix_mmc_init(void)
#endif
#ifdef CONFIG_USB_PXA25X
-static struct gpiod_lookup_table gumstix_gpio_vbus_gpiod_table = {
- .dev_id = "gpio-vbus",
- .table = {
- GPIO_LOOKUP("gpio-pxa", GPIO_GUMSTIX_USB_GPIOn,
- "vbus", GPIO_ACTIVE_HIGH),
- GPIO_LOOKUP("gpio-pxa", GPIO_GUMSTIX_USB_GPIOx,
- "pullup", GPIO_ACTIVE_HIGH),
- { },
- },
+static const struct property_entry spitz_mci_props[] __initconst = {
+ PROPERTY_ENTRY_GPIO("vbus-gpios", &pxa2xx_gpiochip_node,
+ GPIO_GUMSTIX_USB_GPIOn, GPIO_ACTIVE_HIGH),
+ PROPERTY_ENTRY_GPIO("pullup-gpios", &pxa2xx_gpiochip_node,
+ GPIO_GUMSTIX_USB_GPIOx, GPIO_ACTIVE_HIGH),
+ { }
};
-static struct platform_device gumstix_gpio_vbus = {
+static const struct platform_device_info gumstix_gpio_vbus_info __initconst = {
.name = "gpio-vbus",
- .id = -1,
+ .id = PLATFORM_DEVID_NONE,
};
static void __init gumstix_udc_init(void)
{
- gpiod_add_lookup_table(&gumstix_gpio_vbus_gpiod_table);
- platform_device_register(&gumstix_gpio_vbus);
+ platform_device_register_full(&gumstix_gpio_vbus_info);
}
#else
static void gumstix_udc_init(void)
diff --git a/arch/arm/mach-pxa/pxa25x.c b/arch/arm/mach-pxa/pxa25x.c
index 02712d24be82..03e34841fc00 100644
--- a/arch/arm/mach-pxa/pxa25x.c
+++ b/arch/arm/mach-pxa/pxa25x.c
@@ -178,12 +178,8 @@ void __init pxa25x_map_io(void)
pxa25x_get_clk_frequency_khz(1);
}
-static struct pxa_gpio_platform_data pxa25x_gpio_info __initdata = {
- .irq_base = PXA_GPIO_TO_IRQ(0),
- .gpio_set_wake = gpio_set_wake,
-};
-
static struct platform_device *pxa25x_devices[] __initdata = {
+ &pxa25x_device_gpio,
&pxa25x_device_udc,
&pxa_device_pmu,
&pxa_device_i2s,
@@ -243,8 +239,8 @@ static int __init pxa25x_init(void)
register_syscore_ops(&pxa2xx_mfp_syscore_ops);
if (!of_have_populated_dt()) {
+ software_node_register(&pxa2xx_gpiochip_node);
pxa2xx_set_dmac_info(&pxa25x_dma_pdata);
- pxa_register_device(&pxa25x_device_gpio, &pxa25x_gpio_info);
ret = platform_add_devices(pxa25x_devices,
ARRAY_SIZE(pxa25x_devices));
}
diff --git a/arch/arm/mach-pxa/pxa27x.c b/arch/arm/mach-pxa/pxa27x.c
index d71491e2e1d6..f8382477d629 100644
--- a/arch/arm/mach-pxa/pxa27x.c
+++ b/arch/arm/mach-pxa/pxa27x.c
@@ -276,12 +276,8 @@ void __init pxa27x_set_i2c_power_info(struct i2c_pxa_platform_data *info)
pxa_register_device(&pxa27x_device_i2c_power, info);
}
-static struct pxa_gpio_platform_data pxa27x_gpio_info __initdata = {
- .irq_base = PXA_GPIO_TO_IRQ(0),
- .gpio_set_wake = gpio_set_wake,
-};
-
static struct platform_device *devices[] __initdata = {
+ &pxa27x_device_gpio,
&pxa27x_device_udc,
&pxa_device_pmu,
&pxa_device_i2s,
@@ -345,8 +341,7 @@ static int __init pxa27x_init(void)
register_syscore_ops(&pxa2xx_mfp_syscore_ops);
if (!of_have_populated_dt()) {
- pxa_register_device(&pxa27x_device_gpio,
- &pxa27x_gpio_info);
+ software_node_register(&pxa2xx_gpiochip_node);
pxa2xx_set_dmac_info(&pxa27x_dma_pdata);
ret = platform_add_devices(devices,
ARRAY_SIZE(devices));
diff --git a/arch/arm/mach-pxa/spitz.c b/arch/arm/mach-pxa/spitz.c
index 3c5f5a3cb480..452bf7aac1fa 100644
--- a/arch/arm/mach-pxa/spitz.c
+++ b/arch/arm/mach-pxa/spitz.c
@@ -14,6 +14,7 @@
#include <linux/gpio_keys.h>
#include <linux/gpio.h>
#include <linux/gpio/machine.h>
+#include <linux/gpio/property.h>
#include <linux/leds.h>
#include <linux/i2c.h>
#include <linux/platform_data/i2c-pxa.h>
@@ -28,6 +29,7 @@
#include <linux/input/matrix_keypad.h>
#include <linux/regulator/machine.h>
#include <linux/io.h>
+#include <linux/property.h>
#include <linux/reboot.h>
#include <linux/memblock.h>
@@ -128,6 +130,19 @@ static unsigned long spitz_pin_config[] __initdata = {
GPIO1_GPIO | WAKEUP_ON_EDGE_FALL, /* SPITZ_GPIO_RESET */
};
+static const struct software_node spitz_scoop_1_gpiochip_node = {
+ .name = "sharp-scoop.0",
+};
+
+/* Only on Spitz */
+static const struct software_node spitz_scoop_2_gpiochip_node = {
+ .name = "sharp-scoop.1",
+};
+
+/* Only on Akita */
+static const struct software_node akita_max7310_gpiochip_node = {
+ .name = "i2c-max7310",
+};
/******************************************************************************
* Scoop GPIO expander
@@ -452,35 +467,64 @@ static inline void spitz_keys_init(void) {}
* LEDs
******************************************************************************/
#if defined(CONFIG_LEDS_GPIO) || defined(CONFIG_LEDS_GPIO_MODULE)
-static struct gpio_led spitz_gpio_leds[] = {
- {
- .name = "spitz:amber:charge",
- .default_trigger = "sharpsl-charge",
- .gpio = SPITZ_GPIO_LED_ORANGE,
- },
- {
- .name = "spitz:green:hddactivity",
- .default_trigger = "disk-activity",
- .gpio = SPITZ_GPIO_LED_GREEN,
- },
+static const struct software_node spitz_gpio_leds_node = {
+ .name = "spitz-leds",
};
-static struct gpio_led_platform_data spitz_gpio_leds_info = {
- .leds = spitz_gpio_leds,
- .num_leds = ARRAY_SIZE(spitz_gpio_leds),
+static const struct property_entry spitz_orange_led_props[] = {
+ PROPERTY_ENTRY_STRING("linux,default-trigger", "sharpsl-charge"),
+ PROPERTY_ENTRY_GPIO("gpios",
+ &spitz_scoop_1_gpiochip_node, 6, GPIO_ACTIVE_HIGH),
+ { }
};
-static struct platform_device spitz_led_device = {
- .name = "leds-gpio",
- .id = -1,
- .dev = {
- .platform_data = &spitz_gpio_leds_info,
- },
+static const struct software_node spitz_orange_led_node = {
+ .name = "spitz:amber:charge",
+ .parent = &spitz_gpio_leds_node,
+ .properties = spitz_orange_led_props,
+};
+
+static const struct property_entry spitz_green_led_props[] = {
+ PROPERTY_ENTRY_STRING("linux,default-trigger", "disk-activity"),
+ PROPERTY_ENTRY_GPIO("gpios",
+ &spitz_scoop_1_gpiochip_node, 0, GPIO_ACTIVE_HIGH),
+ { }
+};
+
+static const struct software_node spitz_green_led_node = {
+ .name = "spitz:green:hddactivity",
+ .parent = &spitz_gpio_leds_node,
+ .properties = spitz_green_led_props,
+};
+
+static const struct software_node *spitz_gpio_leds_swnodes[] = {
+ &spitz_gpio_leds_node,
+ &spitz_orange_led_node,
+ &spitz_green_led_node,
+ NULL
};
static void __init spitz_leds_init(void)
{
- platform_device_register(&spitz_led_device);
+ struct platform_device_info led_info = {
+ .name = "leds-gpio",
+ .id = PLATFORM_DEVID_NONE,
+ };
+ struct platform_device *led_dev;
+ int err;
+
+ err = software_node_register_node_group(spitz_gpio_leds_swnodes);
+ if (err) {
+ pr_err("failed to register LED software nodes: %d\n", err);
+ return;
+ }
+
+ led_info.fwnode = software_node_fwnode(&spitz_gpio_leds_node);
+
+ led_dev = platform_device_register_full(&led_info);
+ err = PTR_ERR_OR_ZERO(led_dev);
+ if (err)
+ pr_err("failed to create LED device: %d\n", err);
}
#else
static inline void spitz_leds_init(void) {}
@@ -490,53 +534,43 @@ static inline void spitz_leds_init(void) {}
* SSP Devices
******************************************************************************/
#if defined(CONFIG_SPI_PXA2XX) || defined(CONFIG_SPI_PXA2XX_MODULE)
-static void spitz_ads7846_wait_for_hsync(void)
-{
- while (gpio_get_value(SPITZ_GPIO_HSYNC))
- cpu_relax();
- while (!gpio_get_value(SPITZ_GPIO_HSYNC))
- cpu_relax();
-}
+static const struct property_entry spitz_ads7846_props[] = {
+ PROPERTY_ENTRY_STRING("compatible", "ti,ads7846"),
+ PROPERTY_ENTRY_U32("touchscreen-max-pressure", 1024),
+ PROPERTY_ENTRY_U16("ti,x-plate-ohms", 419),
+ PROPERTY_ENTRY_U16("ti,y-plate-ohms", 486),
+ PROPERTY_ENTRY_U16("ti,vref-delay-usecs", 100),
+ PROPERTY_ENTRY_GPIO("pendown-gpios", &pxa2xx_gpiochip_node,
+ SPITZ_GPIO_TP_INT, GPIO_ACTIVE_LOW),
+ PROPERTY_ENTRY_GPIO("ti,hsync-gpios", &pxa2xx_gpiochip_node,
+ SPITZ_GPIO_HSYNC, GPIO_ACTIVE_LOW),
+ { }
+};
-static struct ads7846_platform_data spitz_ads7846_info = {
- .model = 7846,
- .vref_delay_usecs = 100,
- .x_plate_ohms = 419,
- .y_plate_ohms = 486,
- .pressure_max = 1024,
- .wait_for_sync = spitz_ads7846_wait_for_hsync,
+static const struct software_node spitz_ads7846_swnode = {
+ .name = "ads7846",
+ .properties = spitz_ads7846_props,
};
-static struct gpiod_lookup_table spitz_ads7846_gpio_table = {
- .dev_id = "spi2.0",
- .table = {
- GPIO_LOOKUP("gpio-pxa", SPITZ_GPIO_TP_INT,
- "pendown", GPIO_ACTIVE_LOW),
- { }
- },
+static const struct property_entry spitz_lcdcon_props[] = {
+ PROPERTY_ENTRY_GPIO("BL_CONT-gpios",
+ &spitz_scoop_2_gpiochip_node, 6, GPIO_ACTIVE_LOW),
+ PROPERTY_ENTRY_GPIO("BL_ON-gpios",
+ &spitz_scoop_2_gpiochip_node, 7, GPIO_ACTIVE_HIGH),
+ { }
};
-static struct gpiod_lookup_table spitz_lcdcon_gpio_table = {
- .dev_id = "spi2.1",
- .table = {
- GPIO_LOOKUP("gpio-pxa", SPITZ_GPIO_BACKLIGHT_CONT,
- "BL_CONT", GPIO_ACTIVE_LOW),
- GPIO_LOOKUP("gpio-pxa", SPITZ_GPIO_BACKLIGHT_ON,
- "BL_ON", GPIO_ACTIVE_HIGH),
- { },
- },
+static const struct property_entry akita_lcdcon_props[] = {
+ PROPERTY_ENTRY_GPIO("BL_ON-gpios",
+ &akita_max7310_gpiochip_node, 3, GPIO_ACTIVE_HIGH),
+ PROPERTY_ENTRY_GPIO("BL_CONT-gpios",
+ &akita_max7310_gpiochip_node, 4, GPIO_ACTIVE_LOW),
+ { }
};
-static struct gpiod_lookup_table akita_lcdcon_gpio_table = {
- .dev_id = "spi2.1",
- .table = {
- GPIO_LOOKUP("gpio-pxa", AKITA_GPIO_BACKLIGHT_CONT,
- "BL_CONT", GPIO_ACTIVE_LOW),
- GPIO_LOOKUP("gpio-pxa", AKITA_GPIO_BACKLIGHT_ON,
- "BL_ON", GPIO_ACTIVE_HIGH),
- { },
- },
+static struct software_node spitz_lcdcon_node = {
+ .name = "spitz-lcdcon",
};
static struct corgi_lcd_platform_data spitz_lcdcon_info = {
@@ -553,7 +587,7 @@ static struct spi_board_info spitz_spi_devices[] = {
.max_speed_hz = 1200000,
.bus_num = 2,
.chip_select = 0,
- .platform_data = &spitz_ads7846_info,
+ .swnode = &spitz_ads7846_swnode,
.irq = PXA_GPIO_TO_IRQ(SPITZ_GPIO_TP_INT),
}, {
.modalias = "corgi-lcd",
@@ -561,6 +595,7 @@ static struct spi_board_info spitz_spi_devices[] = {
.bus_num = 2,
.chip_select = 1,
.platform_data = &spitz_lcdcon_info,
+ .swnode = &spitz_lcdcon_node,
}, {
.modalias = "max1111",
.max_speed_hz = 450000,
@@ -569,53 +604,40 @@ static struct spi_board_info spitz_spi_devices[] = {
},
};
-static struct gpiod_lookup_table spitz_spi_gpio_table = {
- .dev_id = "spi2",
- .table = {
- GPIO_LOOKUP_IDX("gpio-pxa", SPITZ_GPIO_ADS7846_CS, "cs", 0, GPIO_ACTIVE_LOW),
- GPIO_LOOKUP_IDX("gpio-pxa", SPITZ_GPIO_LCDCON_CS, "cs", 1, GPIO_ACTIVE_LOW),
- GPIO_LOOKUP_IDX("gpio-pxa", SPITZ_GPIO_MAX1111_CS, "cs", 2, GPIO_ACTIVE_LOW),
- { },
- },
+static const struct software_node_ref_args spitz_spi_gpio_refs[] = {
+ SOFTWARE_NODE_REFERENCE(&pxa2xx_gpiochip_node, SPITZ_GPIO_ADS7846_CS,
+ GPIO_ACTIVE_LOW),
+ SOFTWARE_NODE_REFERENCE(&pxa2xx_gpiochip_node, SPITZ_GPIO_LCDCON_CS,
+ GPIO_ACTIVE_LOW),
+ SOFTWARE_NODE_REFERENCE(&pxa2xx_gpiochip_node, SPITZ_GPIO_MAX1111_CS,
+ GPIO_ACTIVE_LOW),
};
static const struct property_entry spitz_spi_properties[] = {
- PROPERTY_ENTRY_U32("num-cs", 3),
+ PROPERTY_ENTRY_REF_ARRAY("gpios", spitz_spi_gpio_refs),
{ }
};
-static const struct software_node spitz_spi_node = {
+static const struct platform_device_info spitz_spi_device_info = {
+ .name = "pxa2xx-spi",
+ /* pxa2xx-spi platform-device ID equals respective SSP platform-device ID + 1 */
+ .id = 2,
.properties = spitz_spi_properties,
};
static void __init spitz_spi_init(void)
{
struct platform_device *pd;
- int id = 2;
int err;
- if (machine_is_akita())
- gpiod_add_lookup_table(&akita_lcdcon_gpio_table);
- else
- gpiod_add_lookup_table(&spitz_lcdcon_gpio_table);
-
- gpiod_add_lookup_table(&spitz_ads7846_gpio_table);
- gpiod_add_lookup_table(&spitz_spi_gpio_table);
-
- /* pxa2xx-spi platform-device ID equals respective SSP platform-device ID + 1 */
- pd = platform_device_alloc("pxa2xx-spi", id);
- if (pd == NULL) {
- pr_err("pxa2xx-spi: failed to allocate device id %d\n", id);
- } else {
- err = device_add_software_node(&pd->dev, &spitz_spi_node);
- if (err) {
- platform_device_put(pd);
- pr_err("pxa2xx-spi: failed to add software node\n");
- } else {
- platform_device_add(pd);
- }
- }
+ pd = platform_device_register_full(&spitz_spi_device_info);
+ err = PTR_ERR_OR_ZERO(pd);
+ if (err)
+ pr_err("pxa2xx-spi: failed to instantiate SPI controller: %d\n",
+ err);
+ spitz_lcdcon_node.properties = machine_is_akita() ?
+ akita_lcdcon_props : spitz_lcdcon_props;
spi_register_board_info(ARRAY_AND_SIZE(spitz_spi_devices));
}
#else
@@ -648,21 +670,17 @@ static struct pxamci_platform_data spitz_mci_platform_data = {
.setpower = spitz_mci_setpower,
};
-static struct gpiod_lookup_table spitz_mci_gpio_table = {
- .dev_id = "pxa2xx-mci.0",
- .table = {
- GPIO_LOOKUP("gpio-pxa", SPITZ_GPIO_nSD_DETECT,
- "cd", GPIO_ACTIVE_LOW),
- GPIO_LOOKUP("gpio-pxa", SPITZ_GPIO_nSD_WP,
- "wp", GPIO_ACTIVE_LOW),
- { },
- },
+static const struct property_entry spitz_mci_props[] __initconst = {
+ PROPERTY_ENTRY_GPIO("cd-gpios", &pxa2xx_gpiochip_node,
+ SPITZ_GPIO_nSD_DETECT, GPIO_ACTIVE_LOW),
+ PROPERTY_ENTRY_GPIO("wp-gpios", &pxa2xx_gpiochip_node,
+ SPITZ_GPIO_nSD_WP, GPIO_ACTIVE_LOW),
+ { }
};
static void __init spitz_mmc_init(void)
{
- gpiod_add_lookup_table(&spitz_mci_gpio_table);
- pxa_set_mci_info(&spitz_mci_platform_data);
+ pxa_set_mci_info(&spitz_mci_platform_data, spitz_mci_props);
}
#else
static inline void spitz_mmc_init(void) {}
@@ -961,30 +979,24 @@ static void __init spitz_i2c_init(void)
static inline void spitz_i2c_init(void) {}
#endif
-static struct gpiod_lookup_table spitz_audio_gpio_table = {
- .dev_id = "spitz-audio",
- .table = {
- GPIO_LOOKUP("sharp-scoop.0", SPITZ_GPIO_MUTE_L - SPITZ_SCP_GPIO_BASE,
- "mute-l", GPIO_ACTIVE_HIGH),
- GPIO_LOOKUP("sharp-scoop.0", SPITZ_GPIO_MUTE_R - SPITZ_SCP_GPIO_BASE,
- "mute-r", GPIO_ACTIVE_HIGH),
- GPIO_LOOKUP("sharp-scoop.1", SPITZ_GPIO_MIC_BIAS - SPITZ_SCP2_GPIO_BASE,
- "mic", GPIO_ACTIVE_HIGH),
- { },
- },
+static const struct property_entry spitz_audio_props[] = {
+ PROPERTY_ENTRY_GPIO("mute-l-gpios", &spitz_scoop_1_gpiochip_node, 3,
+ GPIO_ACTIVE_HIGH),
+ PROPERTY_ENTRY_GPIO("mute-r-gpios", &spitz_scoop_1_gpiochip_node, 4,
+ GPIO_ACTIVE_HIGH),
+ PROPERTY_ENTRY_GPIO("mic-gpios", &spitz_scoop_2_gpiochip_node, 8,
+ GPIO_ACTIVE_HIGH),
+ { }
};
-static struct gpiod_lookup_table akita_audio_gpio_table = {
- .dev_id = "spitz-audio",
- .table = {
- GPIO_LOOKUP("sharp-scoop.0", SPITZ_GPIO_MUTE_L - SPITZ_SCP_GPIO_BASE,
- "mute-l", GPIO_ACTIVE_HIGH),
- GPIO_LOOKUP("sharp-scoop.0", SPITZ_GPIO_MUTE_R - SPITZ_SCP_GPIO_BASE,
- "mute-r", GPIO_ACTIVE_HIGH),
- GPIO_LOOKUP("i2c-max7310", AKITA_GPIO_MIC_BIAS - AKITA_IOEXP_GPIO_BASE,
- "mic", GPIO_ACTIVE_HIGH),
- { },
- },
+static const struct property_entry akita_audio_props[] = {
+ PROPERTY_ENTRY_GPIO("mute-l-gpios", &spitz_scoop_1_gpiochip_node, 3,
+ GPIO_ACTIVE_HIGH),
+ PROPERTY_ENTRY_GPIO("mute-r-gpios", &spitz_scoop_1_gpiochip_node, 4,
+ GPIO_ACTIVE_HIGH),
+ PROPERTY_ENTRY_GPIO("mic-gpios", &akita_max7310_gpiochip_node, 2,
+ GPIO_ACTIVE_HIGH),
+ { }
};
/******************************************************************************
@@ -992,12 +1004,14 @@ static struct gpiod_lookup_table akita_audio_gpio_table = {
******************************************************************************/
static inline void spitz_audio_init(void)
{
- if (machine_is_akita())
- gpiod_add_lookup_table(&akita_audio_gpio_table);
- else
- gpiod_add_lookup_table(&spitz_audio_gpio_table);
-
- platform_device_register_simple("spitz-audio", -1, NULL, 0);
+ struct platform_device_info audio_info = {
+ .name = "spitz-audio",
+ .id = PLATFORM_DEVID_NONE,
+ .properties = machine_is_akita() ?
+ akita_audio_props : spitz_audio_props,
+ };
+
+ platform_device_register_full(&audio_info);
}
/******************************************************************************
@@ -1020,6 +1034,12 @@ static void spitz_restart(enum reboot_mode mode, const char *cmd)
static void __init spitz_init(void)
{
+ software_node_register(&spitz_scoop_1_gpiochip_node);
+ if (machine_is_akita())
+ software_node_register(&akita_max7310_gpiochip_node);
+ else
+ software_node_register(&spitz_scoop_2_gpiochip_node);
+
init_gpio_reset(SPITZ_GPIO_ON_RESET, 1, 0);
pm_power_off = spitz_poweroff;
diff --git a/arch/arm/mach-tegra/board-paz00.c b/arch/arm/mach-tegra/board-paz00.c
index 18d37f90cdfe..3ec810b6f1a7 100644
--- a/arch/arm/mach-tegra/board-paz00.c
+++ b/arch/arm/mach-tegra/board-paz00.c
@@ -8,35 +8,49 @@
* Copyright (C) 2010 Google, Inc.
*/
-#include <linux/property.h>
+#include <linux/err.h>
#include <linux/gpio/machine.h>
+#include <linux/gpio/property.h>
#include <linux/platform_device.h>
+#include <linux/printk.h>
+#include <linux/property.h>
#include "board.h"
-static struct property_entry wifi_rfkill_prop[] __initdata = {
- PROPERTY_ENTRY_STRING("name", "wifi_rfkill"),
- PROPERTY_ENTRY_STRING("type", "wlan"),
- { },
+static const struct software_node tegra_gpiochip_node = {
+ .name = "tegra-gpio",
};
-static struct platform_device wifi_rfkill_device = {
- .name = "rfkill_gpio",
- .id = -1,
+static const struct property_entry wifi_rfkill_prop[] __initconst = {
+ PROPERTY_ENTRY_STRING("name", "wifi_rfkill"),
+ PROPERTY_ENTRY_STRING("type", "wlan"),
+ PROPERTY_ENTRY_GPIO("reset-gpios",
+ &tegra_gpiochip_node, 25, GPIO_ACTIVE_HIGH),
+ PROPERTY_ENTRY_GPIO("shutdown-gpios",
+ &tegra_gpiochip_node, 85, GPIO_ACTIVE_HIGH),
+ { }
};
-static struct gpiod_lookup_table wifi_gpio_lookup = {
- .dev_id = "rfkill_gpio",
- .table = {
- GPIO_LOOKUP("tegra-gpio", 25, "reset", 0),
- GPIO_LOOKUP("tegra-gpio", 85, "shutdown", 0),
- { },
- },
+static const struct platform_device_info wifi_rfkill_info __initconst = {
+ .name = "rfkill_gpio",
+ .id = PLATFORM_DEVID_NONE,
+ .properties = wifi_rfkill_prop,
};
void __init tegra_paz00_wifikill_init(void)
{
- device_create_managed_software_node(&wifi_rfkill_device.dev, wifi_rfkill_prop, NULL);
- gpiod_add_lookup_table(&wifi_gpio_lookup);
- platform_device_register(&wifi_rfkill_device);
+ struct platform_device *pd;
+ int err;
+
+ err = software_node_register(&tegra_gpiochip_node);
+ if (err) {
+ pr_err("failed to register %s node: %d\n",
+ tegra_gpiochip_node.name, err);
+ return;
+ }
+
+ pd = platform_device_register_full(&wifi_rfkill_info);
+ err = PTR_ERR_OR_ZERO(pd);
+ if (err)
+ pr_err("failed to register WiFi rfkill device: %d\n", err);
}
diff --git a/arch/arm/mach-versatile/Kconfig b/arch/arm/mach-versatile/Kconfig
index e029270c2687..513618078440 100644
--- a/arch/arm/mach-versatile/Kconfig
+++ b/arch/arm/mach-versatile/Kconfig
@@ -278,15 +278,6 @@ config ARCH_VEXPRESS_CORTEX_A5_A9_ERRATA
build a working kernel, you must also enable relevant core
tile support or Flattened Device Tree based support options.
-config ARCH_VEXPRESS_DCSCB
- bool "Dual Cluster System Control Block (DCSCB) support"
- depends on MCPM
- select ARM_CCI400_PORT_CTRL
- help
- Support for the Dual Cluster System Configuration Block (DCSCB).
- This is needed to provide CPU and cluster power management
- on RTSM implementing big.LITTLE.
-
config ARCH_VEXPRESS_SPC
bool "Versatile Express Serial Power Controller (SPC)"
select PM_OPP
diff --git a/arch/arm/mach-versatile/Makefile b/arch/arm/mach-versatile/Makefile
index 27d712bcf1af..d819fb2fc450 100644
--- a/arch/arm/mach-versatile/Makefile
+++ b/arch/arm/mach-versatile/Makefile
@@ -16,9 +16,6 @@ obj-$(CONFIG_ARCH_REALVIEW) += realview.o
# vexpress
obj-$(CONFIG_ARCH_VEXPRESS) := v2m.o
-obj-$(CONFIG_ARCH_VEXPRESS_DCSCB) += dcscb.o dcscb_setup.o
-CFLAGS_dcscb.o += -march=armv7-a
-CFLAGS_REMOVE_dcscb.o = -pg
obj-$(CONFIG_ARCH_VEXPRESS_SPC) += spc.o
CFLAGS_REMOVE_spc.o = -pg
obj-$(CONFIG_ARCH_VEXPRESS_TC2_PM) += tc2_pm.o
diff --git a/arch/arm/mach-versatile/dcscb.c b/arch/arm/mach-versatile/dcscb.c
deleted file mode 100644
index d8797350996d..000000000000
--- a/arch/arm/mach-versatile/dcscb.c
+++ /dev/null
@@ -1,173 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * dcscb.c - Dual Cluster System Configuration Block
- *
- * Created by: Nicolas Pitre, May 2012
- * Copyright: (C) 2012-2013 Linaro Limited
- */
-
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/io.h>
-#include <linux/errno.h>
-#include <linux/of_address.h>
-#include <linux/vexpress.h>
-#include <linux/arm-cci.h>
-
-#include <asm/mcpm.h>
-#include <asm/proc-fns.h>
-#include <asm/cacheflush.h>
-#include <asm/cputype.h>
-#include <asm/cp15.h>
-
-#include "vexpress.h"
-
-#define RST_HOLD0 0x0
-#define RST_HOLD1 0x4
-#define SYS_SWRESET 0x8
-#define RST_STAT0 0xc
-#define RST_STAT1 0x10
-#define EAG_CFG_R 0x20
-#define EAG_CFG_W 0x24
-#define KFC_CFG_R 0x28
-#define KFC_CFG_W 0x2c
-#define DCS_CFG_R 0x30
-
-static void __iomem *dcscb_base;
-static int dcscb_allcpus_mask[2];
-
-static int dcscb_cpu_powerup(unsigned int cpu, unsigned int cluster)
-{
- unsigned int rst_hold, cpumask = (1 << cpu);
-
- pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
- if (cluster >= 2 || !(cpumask & dcscb_allcpus_mask[cluster]))
- return -EINVAL;
-
- rst_hold = readl_relaxed(dcscb_base + RST_HOLD0 + cluster * 4);
- rst_hold &= ~(cpumask | (cpumask << 4));
- writel_relaxed(rst_hold, dcscb_base + RST_HOLD0 + cluster * 4);
- return 0;
-}
-
-static int dcscb_cluster_powerup(unsigned int cluster)
-{
- unsigned int rst_hold;
-
- pr_debug("%s: cluster %u\n", __func__, cluster);
- if (cluster >= 2)
- return -EINVAL;
-
- /* remove cluster reset and add individual CPU's reset */
- rst_hold = readl_relaxed(dcscb_base + RST_HOLD0 + cluster * 4);
- rst_hold &= ~(1 << 8);
- rst_hold |= dcscb_allcpus_mask[cluster];
- writel_relaxed(rst_hold, dcscb_base + RST_HOLD0 + cluster * 4);
- return 0;
-}
-
-static void dcscb_cpu_powerdown_prepare(unsigned int cpu, unsigned int cluster)
-{
- unsigned int rst_hold;
-
- pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
- BUG_ON(cluster >= 2 || !((1 << cpu) & dcscb_allcpus_mask[cluster]));
-
- rst_hold = readl_relaxed(dcscb_base + RST_HOLD0 + cluster * 4);
- rst_hold |= (1 << cpu);
- writel_relaxed(rst_hold, dcscb_base + RST_HOLD0 + cluster * 4);
-}
-
-static void dcscb_cluster_powerdown_prepare(unsigned int cluster)
-{
- unsigned int rst_hold;
-
- pr_debug("%s: cluster %u\n", __func__, cluster);
- BUG_ON(cluster >= 2);
-
- rst_hold = readl_relaxed(dcscb_base + RST_HOLD0 + cluster * 4);
- rst_hold |= (1 << 8);
- writel_relaxed(rst_hold, dcscb_base + RST_HOLD0 + cluster * 4);
-}
-
-static void dcscb_cpu_cache_disable(void)
-{
- /* Disable and flush the local CPU cache. */
- v7_exit_coherency_flush(louis);
-}
-
-static void dcscb_cluster_cache_disable(void)
-{
- /* Flush all cache levels for this cluster. */
- v7_exit_coherency_flush(all);
-
- /*
- * A full outer cache flush could be needed at this point
- * on platforms with such a cache, depending on where the
- * outer cache sits. In some cases the notion of a "last
- * cluster standing" would need to be implemented if the
- * outer cache is shared across clusters. In any case, when
- * the outer cache needs flushing, there is no concurrent
- * access to the cache controller to worry about and no
- * special locking besides what is already provided by the
- * MCPM state machinery is needed.
- */
-
- /*
- * Disable cluster-level coherency by masking
- * incoming snoops and DVM messages:
- */
- cci_disable_port_by_cpu(read_cpuid_mpidr());
-}
-
-static const struct mcpm_platform_ops dcscb_power_ops = {
- .cpu_powerup = dcscb_cpu_powerup,
- .cluster_powerup = dcscb_cluster_powerup,
- .cpu_powerdown_prepare = dcscb_cpu_powerdown_prepare,
- .cluster_powerdown_prepare = dcscb_cluster_powerdown_prepare,
- .cpu_cache_disable = dcscb_cpu_cache_disable,
- .cluster_cache_disable = dcscb_cluster_cache_disable,
-};
-
-extern void dcscb_power_up_setup(unsigned int affinity_level);
-
-static int __init dcscb_init(void)
-{
- struct device_node *node;
- unsigned int cfg;
- int ret;
-
- if (!cci_probed())
- return -ENODEV;
-
- node = of_find_compatible_node(NULL, NULL, "arm,rtsm,dcscb");
- if (!node)
- return -ENODEV;
- dcscb_base = of_iomap(node, 0);
- of_node_put(node);
- if (!dcscb_base)
- return -EADDRNOTAVAIL;
- cfg = readl_relaxed(dcscb_base + DCS_CFG_R);
- dcscb_allcpus_mask[0] = (1 << (((cfg >> 16) >> (0 << 2)) & 0xf)) - 1;
- dcscb_allcpus_mask[1] = (1 << (((cfg >> 16) >> (1 << 2)) & 0xf)) - 1;
-
- ret = mcpm_platform_register(&dcscb_power_ops);
- if (!ret)
- ret = mcpm_sync_init(dcscb_power_up_setup);
- if (ret) {
- iounmap(dcscb_base);
- return ret;
- }
-
- pr_info("VExpress DCSCB support installed\n");
-
- /*
- * Future entries into the kernel can now go
- * through the cluster entry vectors.
- */
- vexpress_flags_set(__pa_symbol(mcpm_entry_point));
-
- return 0;
-}
-
-early_initcall(dcscb_init);
diff --git a/arch/arm/mach-versatile/dcscb_setup.S b/arch/arm/mach-versatile/dcscb_setup.S
deleted file mode 100644
index 92d1fd9d7f6a..000000000000
--- a/arch/arm/mach-versatile/dcscb_setup.S
+++ /dev/null
@@ -1,33 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Created by: Dave Martin, 2012-06-22
- * Copyright: (C) 2012-2013 Linaro Limited
- */
-
-#include <linux/linkage.h>
-
-
-ENTRY(dcscb_power_up_setup)
-
- cmp r0, #0 @ check affinity level
- beq 2f
-
-/*
- * Enable cluster-level coherency, in preparation for turning on the MMU.
- * The ACTLR SMP bit does not need to be set here, because cpu_resume()
- * already restores that.
- *
- * A15/A7 may not require explicit L2 invalidation on reset, dependent
- * on hardware integration decisions.
- * For now, this code assumes that L2 is either already invalidated,
- * or invalidation is not required.
- */
-
- b cci_enable_port_for_self
-
-2: @ Implementation-specific local CPU setup operations should go here,
- @ if any. In this case, there is nothing to do.
-
- bx lr
-
-ENDPROC(dcscb_power_up_setup)
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index 67c425341a95..ab01b51de559 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -25,6 +25,8 @@
#include "fault.h"
+#ifdef CONFIG_MMU
+
bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size)
{
unsigned long addr = (unsigned long)unsafe_src;
@@ -32,8 +34,6 @@ bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size)
return addr >= TASK_SIZE && ULONG_MAX - addr >= size;
}
-#ifdef CONFIG_MMU
-
/*
* This is useful to dump out the page tables associated with
* 'addr' in mm 'mm'.
diff --git a/arch/arm/tools/syscall.tbl b/arch/arm/tools/syscall.tbl
index 2ed7d229c8f9..23c98203c40f 100644
--- a/arch/arm/tools/syscall.tbl
+++ b/arch/arm/tools/syscall.tbl
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
#
# Linux system call numbers and entry vectors
#
diff --git a/arch/arm/xen/p2m.c b/arch/arm/xen/p2m.c
index 309648c17f48..9da57a5b81c7 100644
--- a/arch/arm/xen/p2m.c
+++ b/arch/arm/xen/p2m.c
@@ -109,7 +109,7 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
* immediate unmapping.
*/
map_ops[i].status = GNTST_general_error;
- unmap.host_addr = map_ops[i].host_addr,
+ unmap.host_addr = map_ops[i].host_addr;
unmap.handle = map_ops[i].handle;
map_ops[i].handle = INVALID_GRANT_HANDLE;
if (map_ops[i].flags & GNTMAP_device_map)
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 5d91259ee7b5..79a656a62cbc 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -5,6 +5,7 @@ config ARM64
select ACPI_CCA_REQUIRED if ACPI
select ACPI_GENERIC_GSI if ACPI
select ACPI_GTDT if ACPI
+ select ACPI_HOTPLUG_CPU if ACPI_PROCESSOR && HOTPLUG_CPU
select ACPI_IORT if ACPI
select ACPI_REDUCED_HARDWARE_ONLY if ACPI
select ACPI_MCFG if (ACPI && PCI)
@@ -381,7 +382,7 @@ config BROKEN_GAS_INST
config BUILTIN_RETURN_ADDRESS_STRIPS_PAC
bool
- # Clang's __builtin_return_adddress() strips the PAC since 12.0.0
+ # Clang's __builtin_return_address() strips the PAC since 12.0.0
# https://github.com/llvm/llvm-project/commit/2a96f47c5ffca84cd774ad402cacd137f4bf45e2
default y if CC_IS_CLANG
# GCC's __builtin_return_address() strips the PAC since 11.1.0,
@@ -1067,34 +1068,21 @@ config ARM64_ERRATUM_3117295
If unsure, say Y.
-config ARM64_WORKAROUND_SPECULATIVE_SSBS
- bool
-
config ARM64_ERRATUM_3194386
- bool "Cortex-X4: 3194386: workaround for MSR SSBS not self-synchronizing"
- select ARM64_WORKAROUND_SPECULATIVE_SSBS
+ bool "Cortex-{A720,X4,X925}/Neoverse-V3: workaround for MSR SSBS not self-synchronizing"
default y
help
- This option adds the workaround for ARM Cortex-X4 erratum 3194386.
+ This option adds the workaround for the following errata:
- On affected cores "MSR SSBS, #0" instructions may not affect
- subsequent speculative instructions, which may permit unexepected
- speculative store bypassing.
-
- Work around this problem by placing a speculation barrier after
- kernel changes to SSBS. The presence of the SSBS special-purpose
- register is hidden from hwcaps and EL0 reads of ID_AA64PFR1_EL1, such
- that userspace will use the PR_SPEC_STORE_BYPASS prctl to change
- SSBS.
-
- If unsure, say Y.
-
-config ARM64_ERRATUM_3312417
- bool "Neoverse-V3: 3312417: workaround for MSR SSBS not self-synchronizing"
- select ARM64_WORKAROUND_SPECULATIVE_SSBS
- default y
- help
- This option adds the workaround for ARM Neoverse-V3 erratum 3312417.
+ * ARM Cortex-A710 erratam 3324338
+ * ARM Cortex-A720 erratum 3456091
+ * ARM Cortex-X2 erratum 3324338
+ * ARM Cortex-X3 erratum 3324335
+ * ARM Cortex-X4 erratum 3194386
+ * ARM Cortex-X925 erratum 3324334
+ * ARM Neoverse N2 erratum 3324339
+ * ARM Neoverse V2 erratum 3324336
+ * ARM Neoverse-V3 erratum 3312417
On affected cores "MSR SSBS, #0" instructions may not affect
subsequent speculative instructions, which may permit unexepected
@@ -1108,7 +1096,6 @@ config ARM64_ERRATUM_3312417
If unsure, say Y.
-
config CAVIUM_ERRATUM_22375
bool "Cavium erratum 22375, 24313"
default y
@@ -1649,6 +1636,7 @@ config RODATA_FULL_DEFAULT_ENABLED
config ARM64_SW_TTBR0_PAN
bool "Emulate Privileged Access Never using TTBR0_EL1 switching"
+ depends on !KCSAN
help
Enabling this option prevents the kernel from accessing
user-space memory directly by pointing TTBR0_EL1 to a reserved
diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms
index a52618073de2..d1461335e78f 100644
--- a/arch/arm64/Kconfig.platforms
+++ b/arch/arm64/Kconfig.platforms
@@ -207,7 +207,6 @@ if ARCH_NXP
config ARCH_LAYERSCAPE
bool "Freescale Layerscape SoC family"
- select EDAC_SUPPORT
help
This enables support for the Freescale Layerscape SoC family.
@@ -312,6 +311,8 @@ config ARCH_STM32
select STM32_EXTI
select ARM_SMC_MBOX
select ARM_SCMI_PROTOCOL
+ select REGULATOR
+ select REGULATOR_ARM_SCMI
select COMMON_CLK_SCMI
select STM32_FIREWALL
help
diff --git a/arch/arm64/boot/dts/Makefile b/arch/arm64/boot/dts/Makefile
index 30dd6347a929..21cd3a87f385 100644
--- a/arch/arm64/boot/dts/Makefile
+++ b/arch/arm64/boot/dts/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
subdir-y += actions
+subdir-y += airoha
subdir-y += allwinner
subdir-y += altera
subdir-y += amazon
diff --git a/arch/arm64/boot/dts/airoha/Makefile b/arch/arm64/boot/dts/airoha/Makefile
new file mode 100644
index 000000000000..ebea112ce1d7
--- /dev/null
+++ b/arch/arm64/boot/dts/airoha/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+dtb-$(CONFIG_ARCH_AIROHA) += en7581-evb.dtb
diff --git a/arch/arm64/boot/dts/airoha/en7581-evb.dts b/arch/arm64/boot/dts/airoha/en7581-evb.dts
new file mode 100644
index 000000000000..cf58e43dd5b2
--- /dev/null
+++ b/arch/arm64/boot/dts/airoha/en7581-evb.dts
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/dts-v1/;
+
+/* Bootloader installs ATF here */
+/memreserve/ 0x80000000 0x200000;
+
+#include "en7581.dtsi"
+
+/ {
+ model = "Airoha EN7581 Evaluation Board";
+ compatible = "airoha,en7581-evb", "airoha,en7581";
+
+ aliases {
+ serial0 = &uart1;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ linux,usable-memory-range = <0x0 0x80200000 0x0 0x1fe00000>;
+ };
+
+ memory@80000000 {
+ device_type = "memory";
+ reg = <0x0 0x80000000 0x2 0x00000000>;
+ };
+};
diff --git a/arch/arm64/boot/dts/airoha/en7581.dtsi b/arch/arm64/boot/dts/airoha/en7581.dtsi
new file mode 100644
index 000000000000..55eb1762fb11
--- /dev/null
+++ b/arch/arm64/boot/dts/airoha/en7581.dtsi
@@ -0,0 +1,154 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+/ {
+ interrupt-parent = <&gic>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ npu-binary@84000000 {
+ no-map;
+ reg = <0x0 0x84000000 0x0 0xa00000>;
+ };
+
+ npu-flag@84b0000 {
+ no-map;
+ reg = <0x0 0x84b00000 0x0 0x100000>;
+ };
+
+ npu-pkt@85000000 {
+ no-map;
+ reg = <0x0 0x85000000 0x0 0x1a00000>;
+ };
+
+ npu-phyaddr@86b00000 {
+ no-map;
+ reg = <0x0 0x86b00000 0x0 0x100000>;
+ };
+
+ npu-rxdesc@86d00000 {
+ no-map;
+ reg = <0x0 0x86d00000 0x0 0x100000>;
+ };
+ };
+
+ psci {
+ compatible = "arm,psci-1.0";
+ method = "smc";
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu-map {
+ cluster0 {
+ core0 {
+ cpu = <&cpu0>;
+ };
+
+ core1 {
+ cpu = <&cpu1>;
+ };
+
+ core2 {
+ cpu = <&cpu2>;
+ };
+
+ core3 {
+ cpu = <&cpu3>;
+ };
+ };
+ };
+
+ cpu0: cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53";
+ reg = <0x0>;
+ enable-method = "psci";
+ clock-frequency = <80000000>;
+ next-level-cache = <&l2>;
+ };
+
+ cpu1: cpu@1 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53";
+ reg = <0x1>;
+ enable-method = "psci";
+ clock-frequency = <80000000>;
+ next-level-cache = <&l2>;
+ };
+
+ cpu2: cpu@2 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53";
+ reg = <0x2>;
+ enable-method = "psci";
+ clock-frequency = <80000000>;
+ next-level-cache = <&l2>;
+ };
+
+ cpu3: cpu@3 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53";
+ reg = <0x3>;
+ enable-method = "psci";
+ clock-frequency = <80000000>;
+ next-level-cache = <&l2>;
+ };
+
+ l2: l2-cache {
+ compatible = "cache";
+ cache-size = <0x80000>;
+ cache-line-size = <64>;
+ cache-level = <2>;
+ cache-unified;
+ };
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_PPI 14 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_PPI 11 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>;
+ };
+
+ soc {
+ compatible = "simple-bus";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ gic: interrupt-controller@9000000 {
+ compatible = "arm,gic-v3";
+ interrupt-controller;
+ #interrupt-cells = <3>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x0 0x09000000 0x0 0x20000>,
+ <0x0 0x09080000 0x0 0x80000>,
+ <0x0 0x09400000 0x0 0x2000>,
+ <0x0 0x09500000 0x0 0x2000>,
+ <0x0 0x09600000 0x0 0x20000>;
+ interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_LOW>;
+ };
+
+ uart1: serial@1fbf0000 {
+ compatible = "ns16550";
+ reg = <0x0 0x1fbf0000 0x0 0x30>;
+ reg-io-width = <4>;
+ reg-shift = <2>;
+ interrupts = <GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>;
+ clock-frequency = <1843200>;
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-lts.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-lts.dts
index 596a25907432..709fe650a360 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-lts.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-lts.dts
@@ -5,7 +5,7 @@
#include "sun50i-a64-sopine-baseboard.dts"
/ {
- model = "Pine64 LTS";
+ model = "Pine64 PINE A64 LTS";
compatible = "pine64,pine64-lts", "allwinner,sun50i-r18",
"allwinner,sun50i-a64";
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-plus.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-plus.dts
index b54099b654c8..026d843cd7e0 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-plus.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-plus.dts
@@ -4,7 +4,7 @@
#include "sun50i-a64-pine64.dts"
/ {
- model = "Pine64+";
+ model = "Pine64 PINE A64+";
compatible = "pine64,pine64-plus", "allwinner,sun50i-a64";
/* TODO: Camera, touchscreen, etc. */
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts
index 2accb5ddf783..09e71fd60785 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts
@@ -9,7 +9,7 @@
#include <dt-bindings/gpio/gpio.h>
/ {
- model = "Pine64";
+ model = "Pine64 PINE A64";
compatible = "pine64,pine64", "allwinner,sun50i-a64";
aliases {
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinebook.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinebook.dts
index 6c65d5bc16ba..379c2c8466f5 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinebook.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinebook.dts
@@ -13,7 +13,7 @@
#include <dt-bindings/pwm/pwm.h>
/ {
- model = "Pinebook";
+ model = "Pine64 Pinebook";
compatible = "pine64,pinebook", "allwinner,sun50i-a64";
chassis-type = "laptop";
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinetab-early-adopter.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinetab-early-adopter.dts
index 6265360ce623..86cc85eb3d48 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinetab-early-adopter.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinetab-early-adopter.dts
@@ -9,7 +9,7 @@
#include "sun50i-a64-pinetab.dts"
/ {
- model = "PineTab, Early Adopter's version";
+ model = "Pine64 PineTab Early Adopter";
compatible = "pine64,pinetab-early-adopter", "allwinner,sun50i-a64";
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinetab.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinetab.dts
index c6007df99938..f5fb1ee32dad 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinetab.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinetab.dts
@@ -14,7 +14,7 @@
#include <dt-bindings/pwm/pwm.h>
/ {
- model = "PineTab, Development Sample";
+ model = "Pine64 PineTab Developer Sample";
compatible = "pine64,pinetab", "allwinner,sun50i-a64";
chassis-type = "tablet";
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine-baseboard.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine-baseboard.dts
index 5e66ce1a334f..be2347c8f267 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine-baseboard.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine-baseboard.dts
@@ -8,7 +8,7 @@
#include "sun50i-a64-sopine.dtsi"
/ {
- model = "SoPine with baseboard";
+ model = "Pine64 SOPINE on Baseboard carrier board";
compatible = "pine64,sopine-baseboard", "pine64,sopine",
"allwinner,sun50i-a64";
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
index ce4aa44c3353..e868ca5ae753 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
@@ -51,10 +51,16 @@
device_type = "cpu";
reg = <0>;
enable-method = "psci";
- next-level-cache = <&L2>;
clocks = <&ccu CLK_CPUX>;
clock-names = "cpu";
#cooling-cells = <2>;
+ i-cache-size = <0x8000>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <256>;
+ d-cache-size = <0x8000>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <128>;
+ next-level-cache = <&l2_cache>;
};
cpu1: cpu@1 {
@@ -62,10 +68,16 @@
device_type = "cpu";
reg = <1>;
enable-method = "psci";
- next-level-cache = <&L2>;
clocks = <&ccu CLK_CPUX>;
clock-names = "cpu";
#cooling-cells = <2>;
+ i-cache-size = <0x8000>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <256>;
+ d-cache-size = <0x8000>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <128>;
+ next-level-cache = <&l2_cache>;
};
cpu2: cpu@2 {
@@ -73,10 +85,16 @@
device_type = "cpu";
reg = <2>;
enable-method = "psci";
- next-level-cache = <&L2>;
clocks = <&ccu CLK_CPUX>;
clock-names = "cpu";
#cooling-cells = <2>;
+ i-cache-size = <0x8000>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <256>;
+ d-cache-size = <0x8000>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <128>;
+ next-level-cache = <&l2_cache>;
};
cpu3: cpu@3 {
@@ -84,16 +102,25 @@
device_type = "cpu";
reg = <3>;
enable-method = "psci";
- next-level-cache = <&L2>;
clocks = <&ccu CLK_CPUX>;
clock-names = "cpu";
#cooling-cells = <2>;
+ i-cache-size = <0x8000>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <256>;
+ d-cache-size = <0x8000>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <128>;
+ next-level-cache = <&l2_cache>;
};
- L2: l2-cache {
+ l2_cache: l2-cache {
compatible = "cache";
cache-level = <2>;
cache-unified;
+ cache-size = <0x80000>;
+ cache-line-size = <64>;
+ cache-sets = <512>;
};
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64-model-b.dts b/arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64-model-b.dts
index 66fe03910d5e..066fbeff8bfa 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64-model-b.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64-model-b.dts
@@ -8,7 +8,7 @@
/delete-node/ &reg_gmac_3v3;
/ {
- model = "Pine H64 model B";
+ model = "Pine64 PINE H64 Model B";
compatible = "pine64,pine-h64-model-b", "allwinner,sun50i-h6";
wifi_pwrseq: pwrseq {
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dts b/arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dts
index 3910393be1f9..c8b275552872 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dts
@@ -9,7 +9,7 @@
#include <dt-bindings/gpio/gpio.h>
/ {
- model = "Pine H64 model A";
+ model = "Pine64 PINE H64 Model A";
compatible = "pine64,pine-h64", "allwinner,sun50i-h6";
aliases {
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi
index 8a8591c4e7dd..2301c59b41b1 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi
@@ -29,6 +29,13 @@
clocks = <&ccu CLK_CPUX>;
clock-latency-ns = <244144>; /* 8 32k periods */
#cooling-cells = <2>;
+ i-cache-size = <0x8000>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <256>;
+ d-cache-size = <0x8000>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <128>;
+ next-level-cache = <&l2_cache>;
};
cpu1: cpu@1 {
@@ -39,6 +46,13 @@
clocks = <&ccu CLK_CPUX>;
clock-latency-ns = <244144>; /* 8 32k periods */
#cooling-cells = <2>;
+ i-cache-size = <0x8000>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <256>;
+ d-cache-size = <0x8000>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <128>;
+ next-level-cache = <&l2_cache>;
};
cpu2: cpu@2 {
@@ -49,6 +63,13 @@
clocks = <&ccu CLK_CPUX>;
clock-latency-ns = <244144>; /* 8 32k periods */
#cooling-cells = <2>;
+ i-cache-size = <0x8000>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <256>;
+ d-cache-size = <0x8000>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <128>;
+ next-level-cache = <&l2_cache>;
};
cpu3: cpu@3 {
@@ -59,6 +80,22 @@
clocks = <&ccu CLK_CPUX>;
clock-latency-ns = <244144>; /* 8 32k periods */
#cooling-cells = <2>;
+ i-cache-size = <0x8000>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <256>;
+ d-cache-size = <0x8000>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <128>;
+ next-level-cache = <&l2_cache>;
+ };
+
+ l2_cache: l2-cache {
+ compatible = "cache";
+ cache-level = <2>;
+ cache-unified;
+ cache-size = <0x80000>;
+ cache-line-size = <64>;
+ cache-sets = <512>;
};
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h616-cpu-opp.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h616-cpu-opp.dtsi
index aca22a7f0191..dd10aaf472b6 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h616-cpu-opp.dtsi
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h616-cpu-opp.dtsi
@@ -11,7 +11,7 @@
opp-hz = /bits/ 64 <480000000>;
opp-microvolt = <900000>;
clock-latency-ns = <244144>; /* 8 32k periods */
- opp-supported-hw = <0x1f>;
+ opp-supported-hw = <0x3f>;
};
opp-600000000 {
@@ -25,7 +25,7 @@
opp-hz = /bits/ 64 <720000000>;
opp-microvolt = <900000>;
clock-latency-ns = <244144>; /* 8 32k periods */
- opp-supported-hw = <0x0d>;
+ opp-supported-hw = <0x2d>;
};
opp-792000000 {
@@ -50,8 +50,16 @@
opp-microvolt-speed2 = <950000>;
opp-microvolt-speed3 = <950000>;
opp-microvolt-speed4 = <1020000>;
+ opp-microvolt-speed5 = <900000>;
clock-latency-ns = <244144>; /* 8 32k periods */
- opp-supported-hw = <0x1f>;
+ opp-supported-hw = <0x3f>;
+ };
+
+ opp-1032000000 {
+ opp-hz = /bits/ 64 <1032000000>;
+ opp-microvolt = <900000>;
+ clock-latency-ns = <244144>; /* 8 32k periods */
+ opp-supported-hw = <0x20>;
};
opp-1104000000 {
@@ -59,8 +67,9 @@
opp-microvolt-speed0 = <1000000>;
opp-microvolt-speed2 = <1000000>;
opp-microvolt-speed3 = <1000000>;
+ opp-microvolt-speed5 = <950000>;
clock-latency-ns = <244144>; /* 8 32k periods */
- opp-supported-hw = <0x0d>;
+ opp-supported-hw = <0x2d>;
};
opp-1200000000 {
@@ -70,8 +79,9 @@
opp-microvolt-speed2 = <1050000>;
opp-microvolt-speed3 = <1050000>;
opp-microvolt-speed4 = <1100000>;
+ opp-microvolt-speed5 = <1020000>;
clock-latency-ns = <244144>; /* 8 32k periods */
- opp-supported-hw = <0x1f>;
+ opp-supported-hw = <0x3f>;
};
opp-1320000000 {
@@ -85,15 +95,16 @@
opp-hz = /bits/ 64 <1416000000>;
opp-microvolt = <1100000>;
clock-latency-ns = <244144>; /* 8 32k periods */
- opp-supported-hw = <0x0d>;
+ opp-supported-hw = <0x2d>;
};
opp-1512000000 {
opp-hz = /bits/ 64 <1512000000>;
opp-microvolt-speed1 = <1100000>;
opp-microvolt-speed3 = <1100000>;
+ opp-microvolt-speed5 = <1160000>;
clock-latency-ns = <244144>; /* 8 32k periods */
- opp-supported-hw = <0x0a>;
+ opp-supported-hw = <0x2a>;
};
};
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h616.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h616.dtsi
index 921d5f61d8d6..b29ce7321317 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h616.dtsi
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h616.dtsi
@@ -27,6 +27,13 @@
enable-method = "psci";
clocks = <&ccu CLK_CPUX>;
#cooling-cells = <2>;
+ i-cache-size = <0x8000>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <256>;
+ d-cache-size = <0x8000>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <128>;
+ next-level-cache = <&l2_cache>;
};
cpu1: cpu@1 {
@@ -36,6 +43,13 @@
enable-method = "psci";
clocks = <&ccu CLK_CPUX>;
#cooling-cells = <2>;
+ i-cache-size = <0x8000>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <256>;
+ d-cache-size = <0x8000>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <128>;
+ next-level-cache = <&l2_cache>;
};
cpu2: cpu@2 {
@@ -45,6 +59,13 @@
enable-method = "psci";
clocks = <&ccu CLK_CPUX>;
#cooling-cells = <2>;
+ i-cache-size = <0x8000>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <256>;
+ d-cache-size = <0x8000>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <128>;
+ next-level-cache = <&l2_cache>;
};
cpu3: cpu@3 {
@@ -54,6 +75,22 @@
enable-method = "psci";
clocks = <&ccu CLK_CPUX>;
#cooling-cells = <2>;
+ i-cache-size = <0x8000>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <256>;
+ d-cache-size = <0x8000>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <128>;
+ next-level-cache = <&l2_cache>;
+ };
+
+ l2_cache: l2-cache {
+ compatible = "cache";
+ cache-level = <2>;
+ cache-unified;
+ cache-size = <0x40000>;
+ cache-line-size = <64>;
+ cache-sets = <256>;
};
};
@@ -113,6 +150,16 @@
#size-cells = <1>;
ranges = <0x0 0x0 0x0 0x40000000>;
+ crypto: crypto@1904000 {
+ compatible = "allwinner,sun50i-h616-crypto";
+ reg = <0x01904000 0x800>;
+ interrupts = <GIC_SPI 91 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&ccu CLK_BUS_CE>, <&ccu CLK_CE>,
+ <&ccu CLK_MBUS_CE>, <&rtc CLK_IOSC>;
+ clock-names = "bus", "mod", "ram", "trng";
+ resets = <&ccu RST_BUS_CE>;
+ };
+
syscon: syscon@3000000 {
compatible = "allwinner,sun50i-h616-system-control";
reg = <0x03000000 0x1000>;
@@ -306,6 +353,15 @@
#interrupt-cells = <3>;
};
+ iommu: iommu@30f0000 {
+ compatible = "allwinner,sun50i-h616-iommu";
+ reg = <0x030f0000 0x10000>;
+ interrupts = <GIC_SPI 61 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&ccu CLK_BUS_IOMMU>;
+ resets = <&ccu RST_BUS_IOMMU>;
+ #iommu-cells = <1>;
+ };
+
mmc0: mmc@4020000 {
compatible = "allwinner,sun50i-h616-mmc",
"allwinner,sun50i-a100-mmc";
@@ -589,6 +645,17 @@
status = "disabled";
};
+ gpadc: adc@5070000 {
+ compatible = "allwinner,sun50i-h616-gpadc",
+ "allwinner,sun20i-d1-gpadc";
+ reg = <0x05070000 0x400>;
+ clocks = <&ccu CLK_BUS_GPADC>;
+ resets = <&ccu RST_BUS_GPADC>;
+ interrupts = <GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ #io-channel-cells = <1>;
+ };
+
ths: thermal-sensor@5070400 {
compatible = "allwinner,sun50i-h616-ths";
reg = <0x05070400 0x400>;
@@ -602,6 +669,16 @@
#thermal-sensor-cells = <1>;
};
+ lradc: lradc@5070800 {
+ compatible = "allwinner,sun50i-h616-lradc",
+ "allwinner,sun50i-r329-lradc";
+ reg = <0x05070800 0x400>;
+ interrupts = <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&ccu CLK_BUS_KEYADC>;
+ resets = <&ccu RST_BUS_KEYADC>;
+ status = "disabled";
+ };
+
usbotg: usb@5100000 {
compatible = "allwinner,sun50i-h616-musb",
"allwinner,sun8i-h3-musb";
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h64-remix-mini-pc.dts b/arch/arm64/boot/dts/allwinner/sun50i-h64-remix-mini-pc.dts
index c204dd43c726..ce90327e1b2e 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h64-remix-mini-pc.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h64-remix-mini-pc.dts
@@ -191,7 +191,7 @@
compatible = "x-powers,axp803";
reg = <0x3a3>;
interrupt-parent = <&r_intc>;
- interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_LOW>;
+ interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_LOW>;
x-powers,drive-vbus-en;
vin1-supply = <&reg_vcc5v>;
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h700-anbernic-rg35xx-2024.dts b/arch/arm64/boot/dts/allwinner/sun50i-h700-anbernic-rg35xx-2024.dts
index ee30584b6ad7..afb49e65859f 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h700-anbernic-rg35xx-2024.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h700-anbernic-rg35xx-2024.dts
@@ -6,7 +6,7 @@
/dts-v1/;
#include "sun50i-h616.dtsi"
-
+#include "sun50i-h616-cpu-opp.dtsi"
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/input/linux-event-codes.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
@@ -221,7 +221,7 @@
reg_dcdc1: dcdc1 {
regulator-always-on;
regulator-min-microvolt = <900000>;
- regulator-max-microvolt = <1100000>;
+ regulator-max-microvolt = <1160000>;
regulator-name = "vdd-cpu";
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h700-anbernic-rg35xx-h.dts b/arch/arm64/boot/dts/allwinner/sun50i-h700-anbernic-rg35xx-h.dts
index 63036256917f..ff453336eab1 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h700-anbernic-rg35xx-h.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h700-anbernic-rg35xx-h.dts
@@ -9,6 +9,78 @@
/ {
model = "Anbernic RG35XX H";
compatible = "anbernic,rg35xx-h", "allwinner,sun50i-h700";
+
+ adc-joystick {
+ compatible = "adc-joystick";
+ io-channels = <&adc_mux 0>,
+ <&adc_mux 1>,
+ <&adc_mux 2>,
+ <&adc_mux 3>;
+ pinctrl-0 = <&joy_mux_pin>;
+ pinctrl-names = "default";
+ poll-interval = <60>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ axis@0 {
+ reg = <0>;
+ abs-flat = <32>;
+ abs-fuzz = <32>;
+ abs-range = <4096 0>;
+ linux,code = <ABS_X>;
+ };
+
+ axis@1 {
+ reg = <1>;
+ abs-flat = <32>;
+ abs-fuzz = <32>;
+ abs-range = <0 4096>;
+ linux,code = <ABS_Y>;
+ };
+
+ axis@2 {
+ reg = <2>;
+ abs-flat = <32>;
+ abs-fuzz = <32>;
+ abs-range = <0 4096>;
+ linux,code = <ABS_RX>;
+ };
+
+ axis@3 {
+ reg = <3>;
+ abs-flat = <32>;
+ abs-fuzz = <32>;
+ abs-range = <4096 0>;
+ linux,code = <ABS_RY>;
+ };
+ };
+
+ adc_mux: adc-mux {
+ compatible = "io-channel-mux";
+ channels = "left_x", "left_y", "right_x", "right_y";
+ #io-channel-cells = <1>;
+ io-channels = <&gpadc 0>;
+ io-channel-names = "parent";
+ mux-controls = <&gpio_mux>;
+ settle-time-us = <100>;
+ };
+
+ gpio_mux: mux-controller {
+ compatible = "gpio-mux";
+ mux-gpios = <&pio 8 1 GPIO_ACTIVE_LOW>,
+ <&pio 8 2 GPIO_ACTIVE_LOW>;
+ #mux-control-cells = <0>;
+ };
+};
+
+&gpadc {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "okay";
+
+ channel@0 {
+ reg = <0>;
+ };
};
&gpio_keys_gamepad {
@@ -34,3 +106,10 @@
&ohci1 {
status = "okay";
};
+
+&pio {
+ joy_mux_pin: joy-mux-pin {
+ pins = "PI0";
+ function = "gpio_out";
+ };
+};
diff --git a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
index cbbc53c47921..0def0b0daaf7 100644
--- a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
+++ b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
@@ -34,6 +34,7 @@
compatible = "arm,cortex-a53";
device_type = "cpu";
enable-method = "psci";
+ next-level-cache = <&l2_shared>;
reg = <0x0>;
};
@@ -41,6 +42,7 @@
compatible = "arm,cortex-a53";
device_type = "cpu";
enable-method = "psci";
+ next-level-cache = <&l2_shared>;
reg = <0x1>;
};
@@ -48,6 +50,7 @@
compatible = "arm,cortex-a53";
device_type = "cpu";
enable-method = "psci";
+ next-level-cache = <&l2_shared>;
reg = <0x2>;
};
@@ -55,8 +58,15 @@
compatible = "arm,cortex-a53";
device_type = "cpu";
enable-method = "psci";
+ next-level-cache = <&l2_shared>;
reg = <0x3>;
};
+
+ l2_shared: cache {
+ compatible = "cache";
+ cache-level = <2>;
+ cache-unified;
+ };
};
firmware {
diff --git a/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts b/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts
index 26173f0b0051..4eee777ef1a1 100644
--- a/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts
+++ b/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts
@@ -180,8 +180,6 @@
&qspi {
status = "okay";
flash@0 {
- #address-cells = <1>;
- #size-cells = <1>;
compatible = "micron,mt25qu02g", "jedec,spi-nor";
reg = <0>;
spi-max-frequency = <100000000>;
diff --git a/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk_nand.dts b/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk_nand.dts
index 81d0e914a77c..7c53cb9621e5 100644
--- a/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk_nand.dts
+++ b/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk_nand.dts
@@ -169,8 +169,6 @@
&qspi {
status = "okay";
flash@0 {
- #address-cells = <1>;
- #size-cells = <1>;
compatible = "micron,mt25qu02g", "jedec,spi-nor";
reg = <0>;
spi-max-frequency = <100000000>;
diff --git a/arch/arm64/boot/dts/amlogic/Makefile b/arch/arm64/boot/dts/amlogic/Makefile
index 0f29517da5ec..29417f04f886 100644
--- a/arch/arm64/boot/dts/amlogic/Makefile
+++ b/arch/arm64/boot/dts/amlogic/Makefile
@@ -21,6 +21,8 @@ dtb-$(CONFIG_ARCH_MESON) += meson-g12b-a311d-khadas-vim3.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-g12b-a311d-khadas-vim3-ts050.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-g12b-bananapi-cm4-cm4io.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-g12b-bananapi-cm4-mnt-reform2.dtb
+dtb-$(CONFIG_ARCH_MESON) += meson-g12b-dreambox-one.dtb
+dtb-$(CONFIG_ARCH_MESON) += meson-g12b-dreambox-two.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-g12b-gsking-x.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-g12b-gtking-pro.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-g12b-gtking.dtb
@@ -62,6 +64,8 @@ dtb-$(CONFIG_ARCH_MESON) += meson-gxl-s905x-libretech-cc-v2.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-gxl-s905x-libretech-cc.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-gxl-s905x-nexbox-a95x.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-gxl-s905x-p212.dtb
+dtb-$(CONFIG_ARCH_MESON) += meson-gxl-s905x-vero4k.dtb
+dtb-$(CONFIG_ARCH_MESON) += meson-gxlx-s905l-p271.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-gxm-gt1-ultimate.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-gxm-khadas-vim2.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-gxm-mecool-kiii-pro.dtb
diff --git a/arch/arm64/boot/dts/amlogic/amlogic-a4.dtsi b/arch/arm64/boot/dts/amlogic/amlogic-a4.dtsi
index 73ca1d7eed81..de10e7aebf21 100644
--- a/arch/arm64/boot/dts/amlogic/amlogic-a4.dtsi
+++ b/arch/arm64/boot/dts/amlogic/amlogic-a4.dtsi
@@ -4,6 +4,7 @@
*/
#include "amlogic-a4-common.dtsi"
+#include <dt-bindings/power/amlogic,a4-pwrc.h>
/ {
cpus {
#address-cells = <2>;
@@ -37,4 +38,13 @@
enable-method = "psci";
};
};
+
+ sm: secure-monitor {
+ compatible = "amlogic,meson-gxbb-sm";
+
+ pwrc: power-controller {
+ compatible = "amlogic,a4-pwrc";
+ #power-domain-cells = <1>;
+ };
+ };
};
diff --git a/arch/arm64/boot/dts/amlogic/amlogic-c3.dtsi b/arch/arm64/boot/dts/amlogic/amlogic-c3.dtsi
index 32a754fe7990..f8fb060c49ae 100644
--- a/arch/arm64/boot/dts/amlogic/amlogic-c3.dtsi
+++ b/arch/arm64/boot/dts/amlogic/amlogic-c3.dtsi
@@ -111,8 +111,7 @@
};
gpio_intc: interrupt-controller@4080 {
- compatible = "amlogic,meson-gpio-intc",
- "amlogic,c3-gpio-intc";
+ compatible = "amlogic,c3-gpio-intc", "amlogic,meson-gpio-intc";
reg = <0x0 0x4080 0x0 0x0020>;
interrupt-controller;
#interrupt-cells = <2>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-a1-ad402.dts b/arch/arm64/boot/dts/amlogic/meson-a1-ad402.dts
index 4bc30af05848..0d92f5253b64 100644
--- a/arch/arm64/boot/dts/amlogic/meson-a1-ad402.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-a1-ad402.dts
@@ -7,6 +7,7 @@
/dts-v1/;
#include "meson-a1.dtsi"
+#include <dt-bindings/thermal/thermal.h>
/ {
compatible = "amlogic,ad402", "amlogic,a1";
@@ -83,6 +84,50 @@
vin-supply = <&vddao_3v3>;
regulator-always-on;
};
+
+ thermal-zones {
+ soc_thermal: soc-thermal {
+ polling-delay = <1000>;
+ polling-delay-passive = <100>;
+ sustainable-power = <130>;
+
+ thermal-sensors = <&cpu_temp>;
+
+ trips {
+ soc_passive: soc-passive {
+ temperature = <70000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ soc_hot: soc-hot {
+ temperature = <85000>;
+ hysteresis = <5000>;
+ type = "hot";
+ };
+
+ soc_critical: soc-critical {
+ temperature = <110000>;
+ hysteresis = <1000>;
+ type = "critical";
+ };
+ };
+
+ soc_cooling_maps: cooling-maps {
+ map0 {
+ trip = <&soc_passive>;
+ cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+
+ map1 {
+ trip = <&soc_hot>;
+ cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+ };
+ };
};
/* Bluetooth HCI H4 */
diff --git a/arch/arm64/boot/dts/amlogic/meson-a1.dtsi b/arch/arm64/boot/dts/amlogic/meson-a1.dtsi
index c03e207ea6c5..e5366d4239b1 100644
--- a/arch/arm64/boot/dts/amlogic/meson-a1.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-a1.dtsi
@@ -28,6 +28,7 @@
reg = <0x0 0x0>;
enable-method = "psci";
next-level-cache = <&l2>;
+ #cooling-cells = <2>;
};
cpu1: cpu@1 {
@@ -36,6 +37,7 @@
reg = <0x0 0x1>;
enable-method = "psci";
next-level-cache = <&l2>;
+ #cooling-cells = <2>;
};
l2: l2-cache0 {
@@ -398,6 +400,17 @@
power-domains = <&pwrc PWRC_USB_ID>;
};
+ cpu_temp: temperature-sensor@4c00 {
+ compatible = "amlogic,a1-cpu-thermal";
+ reg = <0x0 0x4c00 0x0 0x50>;
+ interrupts = <GIC_SPI 57 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clkc_periphs CLKID_TS>;
+ assigned-clocks = <&clkc_periphs CLKID_TS>;
+ assigned-clock-rates = <500000>;
+ #thermal-sensor-cells = <0>;
+ amlogic,ao-secure = <&sec_AO>;
+ };
+
hwrng: rng@5118 {
compatible = "amlogic,meson-rng";
reg = <0x0 0x5118 0x0 0x4>;
@@ -419,7 +432,7 @@
clock-names = "fixpll_in", "hifipll_in";
};
- sd_emmc: sd@10000 {
+ sd_emmc: mmc@10000 {
compatible = "amlogic,meson-axg-mmc";
reg = <0x0 0x10000 0x0 0x800>;
interrupts = <GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH>;
@@ -453,7 +466,6 @@
assigned-clocks = <&clkc_periphs CLKID_USB_BUS>;
assigned-clock-rates = <64000000>;
resets = <&reset RESET_USBCTRL>;
- reset-name = "usb_ctrl";
dr_mode = "otg";
diff --git a/arch/arm64/boot/dts/amlogic/meson-axg.dtsi b/arch/arm64/boot/dts/amlogic/meson-axg.dtsi
index 6d12b760b90f..e9b22868983d 100644
--- a/arch/arm64/boot/dts/amlogic/meson-axg.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-axg.dtsi
@@ -25,10 +25,10 @@
compatible = "amlogic,axg-tdm-iface";
#sound-dai-cells = <0>;
sound-name-prefix = "TDM_A";
- clocks = <&clkc_audio AUD_CLKID_MST_A_MCLK>,
- <&clkc_audio AUD_CLKID_MST_A_SCLK>,
- <&clkc_audio AUD_CLKID_MST_A_LRCLK>;
- clock-names = "mclk", "sclk", "lrclk";
+ clocks = <&clkc_audio AUD_CLKID_MST_A_SCLK>,
+ <&clkc_audio AUD_CLKID_MST_A_LRCLK>,
+ <&clkc_audio AUD_CLKID_MST_A_MCLK>;
+ clock-names = "sclk", "lrclk", "mclk";
status = "disabled";
};
@@ -36,10 +36,10 @@
compatible = "amlogic,axg-tdm-iface";
#sound-dai-cells = <0>;
sound-name-prefix = "TDM_B";
- clocks = <&clkc_audio AUD_CLKID_MST_B_MCLK>,
- <&clkc_audio AUD_CLKID_MST_B_SCLK>,
- <&clkc_audio AUD_CLKID_MST_B_LRCLK>;
- clock-names = "mclk", "sclk", "lrclk";
+ clocks = <&clkc_audio AUD_CLKID_MST_B_SCLK>,
+ <&clkc_audio AUD_CLKID_MST_B_LRCLK>,
+ <&clkc_audio AUD_CLKID_MST_B_MCLK>;
+ clock-names = "sclk", "lrclk", "mclk";
status = "disabled";
};
@@ -47,10 +47,10 @@
compatible = "amlogic,axg-tdm-iface";
#sound-dai-cells = <0>;
sound-name-prefix = "TDM_C";
- clocks = <&clkc_audio AUD_CLKID_MST_C_MCLK>,
- <&clkc_audio AUD_CLKID_MST_C_SCLK>,
- <&clkc_audio AUD_CLKID_MST_C_LRCLK>;
- clock-names = "mclk", "sclk", "lrclk";
+ clocks = <&clkc_audio AUD_CLKID_MST_C_SCLK>,
+ <&clkc_audio AUD_CLKID_MST_C_LRCLK>,
+ <&clkc_audio AUD_CLKID_MST_C_MCLK>;
+ clock-names = "sclk", "lrclk", "mclk";
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
index b058ed78faf0..d08c97797010 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
@@ -215,6 +215,11 @@
#sound-dai-cells = <0>;
status = "disabled";
+ assigned-clocks = <&clkc CLKID_HDMI_SEL>,
+ <&clkc CLKID_HDMI>;
+ assigned-clock-parents = <&xtal>, <0>;
+ assigned-clock-rates = <0>, <24000000>;
+
/* VPU VENC Input */
hdmi_tx_venc_port: port@0 {
reg = <0>;
@@ -987,7 +992,7 @@
mux {
groups = "spdif_out_h";
function = "spdif_out";
- drive-strength-microamp = <500>;
+ drive-strength-microamp = <3000>;
bias-disable;
};
};
@@ -996,7 +1001,7 @@
mux {
groups = "spdif_out_a11";
function = "spdif_out";
- drive-strength-microamp = <500>;
+ drive-strength-microamp = <3000>;
bias-disable;
};
};
@@ -1005,7 +1010,7 @@
mux {
groups = "spdif_out_a13";
function = "spdif_out";
- drive-strength-microamp = <500>;
+ drive-strength-microamp = <3000>;
bias-disable;
};
};
@@ -1741,9 +1746,6 @@
compatible = "amlogic,meson-gx-ao-sysctrl",
"simple-mfd", "syscon";
reg = <0x0 0x0 0x0 0x100>;
- #address-cells = <2>;
- #size-cells = <2>;
- ranges = <0x0 0x0 0x0 0x0 0x0 0x100>;
clkc_AO: clock-controller {
compatible = "amlogic,meson-g12a-aoclkc";
@@ -1752,278 +1754,278 @@
clocks = <&xtal>, <&clkc CLKID_CLK81>;
clock-names = "xtal", "mpeg-clk";
};
+ };
- ao_pinctrl: pinctrl {
- compatible = "amlogic,meson-g12a-aobus-pinctrl";
- #address-cells = <2>;
- #size-cells = <2>;
- ranges;
+ ao_pinctrl: pinctrl@14 {
+ compatible = "amlogic,meson-g12a-aobus-pinctrl";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ gpio_ao: bank@14 {
+ reg = <0x0 0x14 0x0 0x8>,
+ <0x0 0x1c 0x0 0x8>,
+ <0x0 0x24 0x0 0x14>;
+ reg-names = "mux",
+ "ds",
+ "gpio";
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&ao_pinctrl 0 0 15>;
+ };
- gpio_ao: bank@14 {
- reg = <0x0 0x14 0x0 0x8>,
- <0x0 0x1c 0x0 0x8>,
- <0x0 0x24 0x0 0x14>;
- reg-names = "mux",
- "ds",
- "gpio";
- gpio-controller;
- #gpio-cells = <2>;
- gpio-ranges = <&ao_pinctrl 0 0 15>;
+ i2c_ao_sck_pins: i2c_ao_sck_pins {
+ mux {
+ groups = "i2c_ao_sck";
+ function = "i2c_ao";
+ bias-disable;
+ drive-strength-microamp = <3000>;
};
+ };
- i2c_ao_sck_pins: i2c_ao_sck_pins {
- mux {
- groups = "i2c_ao_sck";
- function = "i2c_ao";
- bias-disable;
- drive-strength-microamp = <3000>;
- };
+ i2c_ao_sda_pins: i2c_ao_sda {
+ mux {
+ groups = "i2c_ao_sda";
+ function = "i2c_ao";
+ bias-disable;
+ drive-strength-microamp = <3000>;
};
+ };
- i2c_ao_sda_pins: i2c_ao_sda {
- mux {
- groups = "i2c_ao_sda";
- function = "i2c_ao";
- bias-disable;
- drive-strength-microamp = <3000>;
- };
+ i2c_ao_sck_e_pins: i2c_ao_sck_e {
+ mux {
+ groups = "i2c_ao_sck_e";
+ function = "i2c_ao";
+ bias-disable;
+ drive-strength-microamp = <3000>;
};
+ };
- i2c_ao_sck_e_pins: i2c_ao_sck_e {
- mux {
- groups = "i2c_ao_sck_e";
- function = "i2c_ao";
- bias-disable;
- drive-strength-microamp = <3000>;
- };
+ i2c_ao_sda_e_pins: i2c_ao_sda_e {
+ mux {
+ groups = "i2c_ao_sda_e";
+ function = "i2c_ao";
+ bias-disable;
+ drive-strength-microamp = <3000>;
};
+ };
- i2c_ao_sda_e_pins: i2c_ao_sda_e {
- mux {
- groups = "i2c_ao_sda_e";
- function = "i2c_ao";
- bias-disable;
- drive-strength-microamp = <3000>;
- };
+ mclk0_ao_pins: mclk0-ao {
+ mux {
+ groups = "mclk0_ao";
+ function = "mclk0_ao";
+ bias-disable;
+ drive-strength-microamp = <3000>;
};
+ };
- mclk0_ao_pins: mclk0-ao {
- mux {
- groups = "mclk0_ao";
- function = "mclk0_ao";
- bias-disable;
- drive-strength-microamp = <3000>;
- };
+ tdm_ao_b_din0_pins: tdm-ao-b-din0 {
+ mux {
+ groups = "tdm_ao_b_din0";
+ function = "tdm_ao_b";
+ bias-disable;
};
+ };
- tdm_ao_b_din0_pins: tdm-ao-b-din0 {
- mux {
- groups = "tdm_ao_b_din0";
- function = "tdm_ao_b";
- bias-disable;
- };
+ spdif_ao_out_pins: spdif-ao-out {
+ mux {
+ groups = "spdif_ao_out";
+ function = "spdif_ao_out";
+ drive-strength-microamp = <3000>;
+ bias-disable;
};
+ };
- spdif_ao_out_pins: spdif-ao-out {
- mux {
- groups = "spdif_ao_out";
- function = "spdif_ao_out";
- drive-strength-microamp = <500>;
- bias-disable;
- };
+ tdm_ao_b_din1_pins: tdm-ao-b-din1 {
+ mux {
+ groups = "tdm_ao_b_din1";
+ function = "tdm_ao_b";
+ bias-disable;
};
+ };
- tdm_ao_b_din1_pins: tdm-ao-b-din1 {
- mux {
- groups = "tdm_ao_b_din1";
- function = "tdm_ao_b";
- bias-disable;
- };
+ tdm_ao_b_din2_pins: tdm-ao-b-din2 {
+ mux {
+ groups = "tdm_ao_b_din2";
+ function = "tdm_ao_b";
+ bias-disable;
};
+ };
- tdm_ao_b_din2_pins: tdm-ao-b-din2 {
- mux {
- groups = "tdm_ao_b_din2";
- function = "tdm_ao_b";
- bias-disable;
- };
+ tdm_ao_b_dout0_pins: tdm-ao-b-dout0 {
+ mux {
+ groups = "tdm_ao_b_dout0";
+ function = "tdm_ao_b";
+ bias-disable;
+ drive-strength-microamp = <3000>;
};
+ };
- tdm_ao_b_dout0_pins: tdm-ao-b-dout0 {
- mux {
- groups = "tdm_ao_b_dout0";
- function = "tdm_ao_b";
- bias-disable;
- drive-strength-microamp = <3000>;
- };
+ tdm_ao_b_dout1_pins: tdm-ao-b-dout1 {
+ mux {
+ groups = "tdm_ao_b_dout1";
+ function = "tdm_ao_b";
+ bias-disable;
+ drive-strength-microamp = <3000>;
};
+ };
- tdm_ao_b_dout1_pins: tdm-ao-b-dout1 {
- mux {
- groups = "tdm_ao_b_dout1";
- function = "tdm_ao_b";
- bias-disable;
- drive-strength-microamp = <3000>;
- };
- };
-
- tdm_ao_b_dout2_pins: tdm-ao-b-dout2 {
- mux {
- groups = "tdm_ao_b_dout2";
- function = "tdm_ao_b";
- bias-disable;
- drive-strength-microamp = <3000>;
- };
+ tdm_ao_b_dout2_pins: tdm-ao-b-dout2 {
+ mux {
+ groups = "tdm_ao_b_dout2";
+ function = "tdm_ao_b";
+ bias-disable;
+ drive-strength-microamp = <3000>;
};
+ };
- tdm_ao_b_fs_pins: tdm-ao-b-fs {
- mux {
- groups = "tdm_ao_b_fs";
- function = "tdm_ao_b";
- bias-disable;
- drive-strength-microamp = <3000>;
- };
+ tdm_ao_b_fs_pins: tdm-ao-b-fs {
+ mux {
+ groups = "tdm_ao_b_fs";
+ function = "tdm_ao_b";
+ bias-disable;
+ drive-strength-microamp = <3000>;
};
+ };
- tdm_ao_b_sclk_pins: tdm-ao-b-sclk {
- mux {
- groups = "tdm_ao_b_sclk";
- function = "tdm_ao_b";
- bias-disable;
- drive-strength-microamp = <3000>;
- };
+ tdm_ao_b_sclk_pins: tdm-ao-b-sclk {
+ mux {
+ groups = "tdm_ao_b_sclk";
+ function = "tdm_ao_b";
+ bias-disable;
+ drive-strength-microamp = <3000>;
};
+ };
- tdm_ao_b_slv_fs_pins: tdm-ao-b-slv-fs {
- mux {
- groups = "tdm_ao_b_slv_fs";
- function = "tdm_ao_b";
- bias-disable;
- };
+ tdm_ao_b_slv_fs_pins: tdm-ao-b-slv-fs {
+ mux {
+ groups = "tdm_ao_b_slv_fs";
+ function = "tdm_ao_b";
+ bias-disable;
};
+ };
- tdm_ao_b_slv_sclk_pins: tdm-ao-b-slv-sclk {
- mux {
- groups = "tdm_ao_b_slv_sclk";
- function = "tdm_ao_b";
- bias-disable;
- };
+ tdm_ao_b_slv_sclk_pins: tdm-ao-b-slv-sclk {
+ mux {
+ groups = "tdm_ao_b_slv_sclk";
+ function = "tdm_ao_b";
+ bias-disable;
};
+ };
- uart_ao_a_pins: uart-a-ao {
- mux {
- groups = "uart_ao_a_tx",
- "uart_ao_a_rx";
- function = "uart_ao_a";
- bias-disable;
- };
+ uart_ao_a_pins: uart-a-ao {
+ mux {
+ groups = "uart_ao_a_tx",
+ "uart_ao_a_rx";
+ function = "uart_ao_a";
+ bias-disable;
};
+ };
- uart_ao_a_cts_rts_pins: uart-ao-a-cts-rts {
- mux {
- groups = "uart_ao_a_cts",
- "uart_ao_a_rts";
- function = "uart_ao_a";
- bias-disable;
- };
+ uart_ao_a_cts_rts_pins: uart-ao-a-cts-rts {
+ mux {
+ groups = "uart_ao_a_cts",
+ "uart_ao_a_rts";
+ function = "uart_ao_a";
+ bias-disable;
};
+ };
- uart_ao_b_2_3_pins: uart-ao-b-2-3 {
- mux {
- groups = "uart_ao_b_tx_2",
- "uart_ao_b_rx_3";
- function = "uart_ao_b";
- bias-disable;
- };
+ uart_ao_b_2_3_pins: uart-ao-b-2-3 {
+ mux {
+ groups = "uart_ao_b_tx_2",
+ "uart_ao_b_rx_3";
+ function = "uart_ao_b";
+ bias-disable;
};
+ };
- uart_ao_b_8_9_pins: uart-ao-b-8-9 {
- mux {
- groups = "uart_ao_b_tx_8",
- "uart_ao_b_rx_9";
- function = "uart_ao_b";
- bias-disable;
- };
+ uart_ao_b_8_9_pins: uart-ao-b-8-9 {
+ mux {
+ groups = "uart_ao_b_tx_8",
+ "uart_ao_b_rx_9";
+ function = "uart_ao_b";
+ bias-disable;
};
+ };
- uart_ao_b_cts_rts_pins: uart-ao-b-cts-rts {
- mux {
- groups = "uart_ao_b_cts",
- "uart_ao_b_rts";
- function = "uart_ao_b";
- bias-disable;
- };
+ uart_ao_b_cts_rts_pins: uart-ao-b-cts-rts {
+ mux {
+ groups = "uart_ao_b_cts",
+ "uart_ao_b_rts";
+ function = "uart_ao_b";
+ bias-disable;
};
+ };
- pwm_a_e_pins: pwm-a-e {
- mux {
- groups = "pwm_a_e";
- function = "pwm_a_e";
- bias-disable;
- };
+ pwm_a_e_pins: pwm-a-e {
+ mux {
+ groups = "pwm_a_e";
+ function = "pwm_a_e";
+ bias-disable;
};
+ };
- pwm_ao_a_pins: pwm-ao-a {
- mux {
- groups = "pwm_ao_a";
- function = "pwm_ao_a";
- bias-disable;
- };
+ pwm_ao_a_pins: pwm-ao-a {
+ mux {
+ groups = "pwm_ao_a";
+ function = "pwm_ao_a";
+ bias-disable;
};
+ };
- pwm_ao_b_pins: pwm-ao-b {
- mux {
- groups = "pwm_ao_b";
- function = "pwm_ao_b";
- bias-disable;
- };
+ pwm_ao_b_pins: pwm-ao-b {
+ mux {
+ groups = "pwm_ao_b";
+ function = "pwm_ao_b";
+ bias-disable;
};
+ };
- pwm_ao_c_4_pins: pwm-ao-c-4 {
- mux {
- groups = "pwm_ao_c_4";
- function = "pwm_ao_c";
- bias-disable;
- };
+ pwm_ao_c_4_pins: pwm-ao-c-4 {
+ mux {
+ groups = "pwm_ao_c_4";
+ function = "pwm_ao_c";
+ bias-disable;
};
+ };
- pwm_ao_c_6_pins: pwm-ao-c-6 {
- mux {
- groups = "pwm_ao_c_6";
- function = "pwm_ao_c";
- bias-disable;
- };
+ pwm_ao_c_6_pins: pwm-ao-c-6 {
+ mux {
+ groups = "pwm_ao_c_6";
+ function = "pwm_ao_c";
+ bias-disable;
};
+ };
- pwm_ao_d_5_pins: pwm-ao-d-5 {
- mux {
- groups = "pwm_ao_d_5";
- function = "pwm_ao_d";
- bias-disable;
- };
+ pwm_ao_d_5_pins: pwm-ao-d-5 {
+ mux {
+ groups = "pwm_ao_d_5";
+ function = "pwm_ao_d";
+ bias-disable;
};
+ };
- pwm_ao_d_10_pins: pwm-ao-d-10 {
- mux {
- groups = "pwm_ao_d_10";
- function = "pwm_ao_d";
- bias-disable;
- };
+ pwm_ao_d_10_pins: pwm-ao-d-10 {
+ mux {
+ groups = "pwm_ao_d_10";
+ function = "pwm_ao_d";
+ bias-disable;
};
+ };
- pwm_ao_d_e_pins: pwm-ao-d-e {
- mux {
- groups = "pwm_ao_d_e";
- function = "pwm_ao_d";
- };
+ pwm_ao_d_e_pins: pwm-ao-d-e {
+ mux {
+ groups = "pwm_ao_d_e";
+ function = "pwm_ao_d";
};
+ };
- remote_input_ao_pins: remote-input-ao {
- mux {
- groups = "remote_ao_input";
- function = "remote_ao_input";
- bias-disable;
- };
+ remote_input_ao_pins: remote-input-ao {
+ mux {
+ groups = "remote_ao_input";
+ function = "remote_ao_input";
+ bias-disable;
};
};
};
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12.dtsi
index e732df3f3114..664912d1beaa 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-g12.dtsi
@@ -363,6 +363,10 @@
power-domains = <&pwrc PWRC_G12A_ETH_ID>;
};
+&hdmi_tx {
+ power-domains = <&pwrc PWRC_G12A_VPU_ID>;
+};
+
&vpu {
power-domains = <&pwrc PWRC_G12A_VPU_ID>;
};
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12a-u200.dts b/arch/arm64/boot/dts/amlogic/meson-g12a-u200.dts
index 3da7922d83f1..0e239939ade6 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12a-u200.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-g12a-u200.dts
@@ -24,7 +24,6 @@
compatible = "simple-audio-amplifier";
enable-gpios = <&gpio_ao GPIOAO_2 GPIO_ACTIVE_HIGH>;
VCC-supply = <&vcc_5v>;
- #sound-dai-cells = <0>;
sound-name-prefix = "10U2";
};
@@ -374,6 +373,7 @@
};
&acodec {
+ AVDD-supply = <&vddao_1v8>;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-bananapi.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12b-bananapi.dtsi
index 4b8db872bbf3..6a346cb86a53 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12b-bananapi.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-g12b-bananapi.dtsi
@@ -44,13 +44,6 @@
reset-gpios = <&gpio BOOT_12 GPIO_ACTIVE_LOW>;
};
- fan0: pwm-fan {
- compatible = "pwm-fan";
- #cooling-cells = <2>;
- cooling-levels = <0 120 170 220>;
- pwms = <&pwm_cd 1 40000 0>;
- };
-
hdmi-connector {
compatible = "hdmi-connector";
type = "a";
@@ -374,13 +367,6 @@
clock-names = "clkin0";
};
-&pwm_cd {
- status = "okay";
- pinctrl-0 = <&pwm_d_x6_pins>;
- pinctrl-names = "default";
- pwm-gpios = <&gpio GPIOAO_10 GPIO_ACTIVE_HIGH>;
-};
-
&pwm_ef {
status = "okay";
pinctrl-0 = <&pwm_e_pins>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-dreambox-one.dts b/arch/arm64/boot/dts/amlogic/meson-g12b-dreambox-one.dts
new file mode 100644
index 000000000000..ecfa1c683dde
--- /dev/null
+++ b/arch/arm64/boot/dts/amlogic/meson-g12b-dreambox-one.dts
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2019 Christian Hewitt <christianshewitt@gmail.com>
+ */
+
+/dts-v1/;
+
+#include "meson-g12b-dreambox.dtsi"
+
+/ {
+ compatible = "dream,dreambox-one", "amlogic,s922x", "amlogic,g12b";
+ model = "Dreambox One";
+};
+
+&sd_emmc_a {
+ sd-uhs-sdr12;
+};
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-dreambox-two.dts b/arch/arm64/boot/dts/amlogic/meson-g12b-dreambox-two.dts
new file mode 100644
index 000000000000..df0d71983c3d
--- /dev/null
+++ b/arch/arm64/boot/dts/amlogic/meson-g12b-dreambox-two.dts
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2019 Christian Hewitt <christianshewitt@gmail.com>
+ */
+
+/dts-v1/;
+
+#include "meson-g12b-dreambox.dtsi"
+
+/ {
+ compatible = "dream,dreambox-two", "amlogic,s922x", "amlogic,g12b";
+ model = "Dreambox Two";
+};
+
+&sd_emmc_a {
+ sd-uhs-sdr12;
+ sd-uhs-sdr25;
+ sd-uhs-sdr50;
+ sd-uhs-sdr104;
+};
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-dreambox.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12b-dreambox.dtsi
new file mode 100644
index 000000000000..3a24c2411552
--- /dev/null
+++ b/arch/arm64/boot/dts/amlogic/meson-g12b-dreambox.dtsi
@@ -0,0 +1,154 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2021 Christian Hewitt <christianshewitt@gmail.com>
+ */
+
+#include "meson-g12b-w400.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/leds/common.h>
+#include <dt-bindings/sound/meson-g12a-tohdmitx.h>
+
+/ {
+ cvbs-connector {
+ status = "disabled";
+ };
+
+ sdio_pwrseq: sdio-pwrseq {
+ compatible = "mmc-pwrseq-simple";
+ reset-gpios = <&gpio GPIOA_11 GPIO_ACTIVE_LOW>;
+ clocks = <&wifi32k>;
+ clock-names = "ext_clock";
+ };
+
+ spdif_dit: audio-codec-1 {
+ #sound-dai-cells = <0>;
+ compatible = "linux,spdif-dit";
+ status = "okay";
+ sound-name-prefix = "DIT";
+ };
+
+ sound {
+ compatible = "amlogic,axg-sound-card";
+ model = "DREAMBOX";
+ audio-aux-devs = <&tdmout_b>;
+ audio-routing = "TDMOUT_B IN 0", "FRDDR_A OUT 1",
+ "TDMOUT_B IN 1", "FRDDR_B OUT 1",
+ "TDMOUT_B IN 2", "FRDDR_C OUT 1",
+ "TDM_B Playback", "TDMOUT_B OUT",
+ "SPDIFOUT_A IN 0", "FRDDR_A OUT 3",
+ "SPDIFOUT_A IN 1", "FRDDR_B OUT 3",
+ "SPDIFOUT_A IN 2", "FRDDR_C OUT 3";
+ assigned-clocks = <&clkc CLKID_MPLL2>,
+ <&clkc CLKID_MPLL0>,
+ <&clkc CLKID_MPLL1>;
+ assigned-clock-parents = <0>, <0>, <0>;
+ assigned-clock-rates = <294912000>,
+ <270950400>,
+ <393216000>;
+
+ dai-link-0 {
+ sound-dai = <&frddr_a>;
+ };
+
+ dai-link-1 {
+ sound-dai = <&frddr_b>;
+ };
+
+ dai-link-2 {
+ sound-dai = <&frddr_c>;
+ };
+
+ /* 8ch hdmi interface */
+ dai-link-3 {
+ sound-dai = <&tdmif_b>;
+ dai-format = "i2s";
+ dai-tdm-slot-tx-mask-0 = <1 1>;
+ dai-tdm-slot-tx-mask-1 = <1 1>;
+ dai-tdm-slot-tx-mask-2 = <1 1>;
+ dai-tdm-slot-tx-mask-3 = <1 1>;
+ mclk-fs = <256>;
+
+ codec {
+ sound-dai = <&tohdmitx TOHDMITX_I2S_IN_B>;
+ };
+ };
+
+ /* spdif hdmi or toslink interface */
+ dai-link-4 {
+ sound-dai = <&spdifout_a>;
+
+ codec-0 {
+ sound-dai = <&spdif_dit>;
+ };
+
+ codec-1 {
+ sound-dai = <&tohdmitx TOHDMITX_SPDIF_IN_A>;
+ };
+ };
+
+ /* spdif hdmi interface */
+ dai-link-5 {
+ sound-dai = <&spdifout_b>;
+
+ codec {
+ sound-dai = <&tohdmitx TOHDMITX_SPDIF_IN_B>;
+ };
+ };
+
+ /* hdmi glue */
+ dai-link-6 {
+ sound-dai = <&tohdmitx TOHDMITX_I2S_OUT>;
+
+ codec {
+ sound-dai = <&hdmi_tx>;
+ };
+ };
+ };
+};
+
+&arb {
+ status = "okay";
+};
+
+&frddr_a {
+ status = "okay";
+};
+
+&frddr_b {
+ status = "okay";
+};
+
+&frddr_c {
+ status = "okay";
+};
+
+&ir {
+ linux,rc-map-name = "rc-dreambox";
+};
+
+&saradc {
+ status = "okay";
+ vref-supply = <&vddao_1v8>;
+};
+
+&spdifout_a {
+ pinctrl-0 = <&spdif_out_h_pins>;
+ pinctrl-names = "default";
+ status = "okay";
+};
+
+&spdifout_b {
+ status = "okay";
+};
+
+&tdmif_b {
+ status = "okay";
+};
+
+&tdmout_b {
+ status = "okay";
+};
+
+&tohdmitx {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-radxa-zero2.dts b/arch/arm64/boot/dts/amlogic/meson-g12b-radxa-zero2.dts
index 890f5bfebb03..8445701100d0 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12b-radxa-zero2.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-g12b-radxa-zero2.dts
@@ -33,6 +33,13 @@
reg = <0x0 0x0 0x0 0x80000000>;
};
+ fan0: pwm-fan {
+ compatible = "pwm-fan";
+ #cooling-cells = <2>;
+ cooling-levels = <0 64 128 192 255>;
+ pwms = <&pwm_AO_ab 0 40000 0>;
+ };
+
gpio-keys-polled {
compatible = "gpio-keys-polled";
poll-interval = <100>;
@@ -286,6 +293,23 @@
clock-latency = <50000>;
};
+&cpu_thermal {
+ trips {
+ cpu_active: cpu-active {
+ temperature = <70000>; /* millicelsius */
+ hysteresis = <2000>; /* millicelsius */
+ type = "active";
+ };
+ };
+
+ cooling-maps {
+ map2 {
+ trip = <&cpu_active>;
+ cooling-device = <&fan0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+};
+
&frddr_a {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
index c431986e6a33..c37cc6b036cd 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
@@ -408,6 +408,6 @@
compatible = "usb5e3,610";
reg = <1>;
vdd-supply = <&p5v0>;
- reset-gpio = <&gpio_ao GPIOAO_4 GPIO_ACTIVE_LOW>;
+ reset-gpios = <&gpio_ao GPIOAO_4 GPIO_ACTIVE_LOW>;
};
};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi
index 12ef6e81c8bd..ed00e67e6923 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi
@@ -311,10 +311,16 @@
<&reset RESET_HDMI_SYSTEM_RESET>,
<&reset RESET_HDMI_TX>;
reset-names = "hdmitx_apb", "hdmitx", "hdmitx_phy";
- clocks = <&clkc CLKID_HDMI_PCLK>,
- <&clkc CLKID_CLK81>,
+ clocks = <&clkc CLKID_HDMI>,
+ <&clkc CLKID_HDMI_PCLK>,
<&clkc CLKID_GCLK_VENCI_INT0>;
clock-names = "isfr", "iahb", "venci";
+ power-domains = <&pwrc PWRC_GXBB_VPU_ID>;
+
+ assigned-clocks = <&clkc CLKID_HDMI_SEL>,
+ <&clkc CLKID_HDMI>;
+ assigned-clock-parents = <&xtal>, <0>;
+ assigned-clock-rates = <0>, <24000000>;
};
&sysctrl {
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-vero4k.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-vero4k.dts
new file mode 100644
index 000000000000..de996e930b82
--- /dev/null
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-vero4k.dts
@@ -0,0 +1,199 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2024 Christian Hewitt <christianshewitt@gmail.com>
+ */
+
+/dts-v1/;
+
+#include "meson-gxl-s905x-p212.dtsi"
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/leds/common.h>
+#include <dt-bindings/sound/meson-aiu.h>
+
+/ {
+ compatible = "osmc,vero4k", "amlogic,s905x", "amlogic,meson-gxl";
+ model = "OSMC Vero 4K";
+
+ reserved-memory {
+ /* 32 MiB reserved for ARM Trusted Firmware (BL32) */
+ secmon_reserved_bl32: secmon@5300000 {
+ reg = <0x0 0x05300000 0x0 0x2000000>;
+ no-map;
+ };
+ };
+
+ gpio-keys-polled {
+ compatible = "gpio-keys-polled";
+ poll-interval = <20>;
+
+ button {
+ label = "power";
+ linux,code = <KEY_POWER>;
+ gpios = <&gpio_ao GPIOAO_2 GPIO_ACTIVE_HIGH>;
+ };
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ led-standby {
+ color = <LED_COLOR_ID_RED>;
+ function = LED_FUNCTION_POWER;
+ gpios = <&gpio GPIODV_24 GPIO_ACTIVE_LOW>;
+ default-state = "off";
+ panic-indicator;
+ };
+ };
+
+ dio2133: analog-amplifier {
+ compatible = "simple-audio-amplifier";
+ sound-name-prefix = "AU2";
+ VCC-supply = <&hdmi_5v>;
+ enable-gpios = <&gpio GPIOH_5 GPIO_ACTIVE_HIGH>;
+ };
+
+ spdif_dit: audio-codec-0 {
+ #sound-dai-cells = <0>;
+ compatible = "linux,spdif-dit";
+ sound-name-prefix = "DIT";
+ };
+
+ cvbs-connector {
+ compatible = "composite-video-connector";
+
+ port {
+ cvbs_connector_in: endpoint {
+ remote-endpoint = <&cvbs_vdac_out>;
+ };
+ };
+ };
+
+ hdmi-connector {
+ compatible = "hdmi-connector";
+ type = "a";
+
+ port {
+ hdmi_connector_in: endpoint {
+ remote-endpoint = <&hdmi_tx_tmds_out>;
+ };
+ };
+ };
+
+ sound {
+ compatible = "amlogic,gx-sound-card";
+ model = "VERO4K";
+ audio-aux-devs = <&dio2133>;
+ audio-widgets = "Line", "Lineout";
+ audio-routing = "AU2 INL", "ACODEC LOLP",
+ "AU2 INR", "ACODEC LORP",
+ "AU2 INL", "ACODEC LOLN",
+ "AU2 INR", "ACODEC LORN",
+ "Lineout", "AU2 OUTL",
+ "Lineout", "AU2 OUTR";
+ assigned-clocks = <&clkc CLKID_MPLL0>,
+ <&clkc CLKID_MPLL1>,
+ <&clkc CLKID_MPLL2>;
+ assigned-clock-parents = <0>, <0>, <0>;
+ assigned-clock-rates = <294912000>,
+ <270950400>,
+ <393216000>;
+
+ dai-link-0 {
+ sound-dai = <&aiu AIU_CPU CPU_I2S_FIFO>;
+ };
+
+ dai-link-1 {
+ sound-dai = <&aiu AIU_CPU CPU_SPDIF_FIFO>;
+ };
+
+ dai-link-2 {
+ sound-dai = <&aiu AIU_CPU CPU_I2S_ENCODER>;
+ dai-format = "i2s";
+ mclk-fs = <256>;
+
+ codec-0 {
+ sound-dai = <&aiu AIU_HDMI CTRL_I2S>;
+ };
+
+ codec-1 {
+ sound-dai = <&aiu AIU_ACODEC CTRL_I2S>;
+ };
+ };
+
+ dai-link-3 {
+ sound-dai = <&aiu AIU_CPU CPU_SPDIF_ENCODER>;
+
+ codec-0 {
+ sound-dai = <&spdif_dit>;
+ };
+ };
+
+ dai-link-4 {
+ sound-dai = <&aiu AIU_HDMI CTRL_OUT>;
+
+ codec-0 {
+ sound-dai = <&hdmi_tx>;
+ };
+ };
+
+ dai-link-5 {
+ sound-dai = <&aiu AIU_ACODEC CTRL_OUT>;
+
+ codec-0 {
+ sound-dai = <&acodec>;
+ };
+ };
+ };
+};
+
+&acodec {
+ AVDD-supply = <&vddio_ao18>;
+ status = "okay";
+};
+
+&aiu {
+ status = "okay";
+ pinctrl-0 = <&spdif_out_h_pins>;
+ pinctrl-names = "default";
+};
+
+&cec_AO {
+ status = "okay";
+ pinctrl-0 = <&ao_cec_pins>;
+ pinctrl-names = "default";
+ hdmi-phandle = <&hdmi_tx>;
+};
+
+&cvbs_vdac_port {
+ cvbs_vdac_out: endpoint {
+ remote-endpoint = <&cvbs_connector_in>;
+ };
+};
+
+&ethmac {
+ phy-mode = "rmii";
+ phy-handle = <&internal_phy>;
+};
+
+&hdmi_tx {
+ status = "okay";
+ pinctrl-0 = <&hdmi_hpd_pins>, <&hdmi_i2c_pins>;
+ pinctrl-names = "default";
+ hdmi-supply = <&hdmi_5v>;
+};
+
+&hdmi_tx_tmds_port {
+ hdmi_tx_tmds_out: endpoint {
+ remote-endpoint = <&hdmi_connector_in>;
+ };
+};
+
+&internal_phy {
+ pinctrl-0 = <&eth_link_led_pins>, <&eth_act_led_pins>;
+ pinctrl-names = "default";
+};
+
+/* This UART is brought out to the DB9 connector */
+&uart_AO {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
index 17bcfa4702e1..f58d1790de1c 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
@@ -323,10 +323,16 @@
<&reset RESET_HDMI_SYSTEM_RESET>,
<&reset RESET_HDMI_TX>;
reset-names = "hdmitx_apb", "hdmitx", "hdmitx_phy";
- clocks = <&clkc CLKID_HDMI_PCLK>,
- <&clkc CLKID_CLK81>,
+ clocks = <&clkc CLKID_HDMI>,
+ <&clkc CLKID_HDMI_PCLK>,
<&clkc CLKID_GCLK_VENCI_INT0>;
clock-names = "isfr", "iahb", "venci";
+ power-domains = <&pwrc PWRC_GXBB_VPU_ID>;
+
+ assigned-clocks = <&clkc CLKID_HDMI_SEL>,
+ <&clkc CLKID_HDMI>;
+ assigned-clock-parents = <&xtal>, <0>;
+ assigned-clock-rates = <0>, <24000000>;
};
&sysctrl {
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxlx-s905l-p271.dts b/arch/arm64/boot/dts/amlogic/meson-gxlx-s905l-p271.dts
new file mode 100644
index 000000000000..1221f4545130
--- /dev/null
+++ b/arch/arm64/boot/dts/amlogic/meson-gxlx-s905l-p271.dts
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2024 Christian Hewitt <christianshewitt@gmail.com>
+ */
+
+/dts-v1/;
+
+#include "meson-gxl-s905x.dtsi"
+#include "meson-gx-p23x-q20x.dtsi"
+
+/ {
+ compatible = "amlogic,p271", "amlogic,s905l", "amlogic,meson-gxlx";
+ model = "Amlogic Meson GXLX (S905L) P271 Development Board";
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x0 0x0 0x0 0x40000000>;
+ };
+
+ sound {
+ model = "P271";
+ };
+};
+
+&apb {
+ mali: gpu@c0000 {
+ /* Mali 450-MP2 */
+ interrupts = <GIC_SPI 160 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 161 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 162 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 163 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 164 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 165 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 166 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 167 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "gp", "gpmmu", "pp", "pmu",
+ "pp0", "ppmmu0", "pp1", "ppmmu1";
+ };
+};
+
+&saradc {
+ compatible = "amlogic,meson-gxlx-saradc", "amlogic,meson-saradc";
+};
+
+&usb {
+ dr_mode = "host";
+};
+
+&vdec {
+ compatible = "amlogic,gxlx-vdec", "amlogic,gx-vdec";
+};
diff --git a/arch/arm64/boot/dts/amlogic/meson-s4.dtsi b/arch/arm64/boot/dts/amlogic/meson-s4.dtsi
index 10896f9df682..b686eacb9662 100644
--- a/arch/arm64/boot/dts/amlogic/meson-s4.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-s4.dtsi
@@ -312,6 +312,160 @@
};
};
+ pwm_a_pins1: pwm-a-pins1 {
+ mux {
+ groups = "pwm_a_d";
+ function = "pwm_a";
+ };
+ };
+
+ pwm_a_pins2: pwm-a-pins2 {
+ mux {
+ groups = "pwm_a_x";
+ function = "pwm_a";
+ };
+ };
+
+ pwm_b_pins1: pwm-b-pins1 {
+ mux {
+ groups = "pwm_b_d";
+ function = "pwm_b";
+ };
+ };
+
+ pwm_b_pins2: pwm-b-pins2 {
+ mux {
+ groups = "pwm_b_x";
+ function = "pwm_b";
+ };
+ };
+
+ pwm_c_pins1: pwm-c-pins1 {
+ mux {
+ groups = "pwm_c_d";
+ function = "pwm_c";
+ };
+ };
+
+ pwm_c_pins2: pwm-c-pins2 {
+ mux {
+ groups = "pwm_c_x";
+ function = "pwm_c";
+ };
+ };
+
+ pwm_d_pins1: pwm-d-pins1 {
+ mux {
+ groups = "pwm_d_d";
+ function = "pwm_d";
+ };
+ };
+
+ pwm_d_pins2: pwm-d-pins2 {
+ mux {
+ groups = "pwm_d_h";
+ function = "pwm_d";
+ };
+ };
+
+ pwm_e_pins1: pwm-e-pins1 {
+ mux {
+ groups = "pwm_e_x";
+ function = "pwm_e";
+ };
+ };
+
+ pwm_e_pins2: pwm-e-pins2 {
+ mux {
+ groups = "pwm_e_z";
+ function = "pwm_e";
+ };
+ };
+
+ pwm_f_pins1: pwm-f-pins1 {
+ mux {
+ groups = "pwm_f_x";
+ function = "pwm_f";
+ };
+ };
+
+ pwm_f_pins2: pwm-f-pins2 {
+ mux {
+ groups = "pwm_f_z";
+ function = "pwm_f";
+ };
+ };
+
+ pwm_g_pins1: pwm-g-pins1 {
+ mux {
+ groups = "pwm_g_d";
+ function = "pwm_g";
+ };
+ };
+
+ pwm_g_pins2: pwm-g-pins2 {
+ mux {
+ groups = "pwm_g_z";
+ function = "pwm_g";
+ };
+ };
+
+ pwm_h_pins: pwm-h-pins {
+ mux {
+ groups = "pwm_h";
+ function = "pwm_h";
+ };
+ };
+
+ pwm_i_pins1: pwm-i-pins1 {
+ mux {
+ groups = "pwm_i_d";
+ function = "pwm_i";
+ };
+ };
+
+ pwm_i_pins2: pwm-i-pins2 {
+ mux {
+ groups = "pwm_i_h";
+ function = "pwm_i";
+ };
+ };
+
+ pwm_j_pins: pwm-j-pins {
+ mux {
+ groups = "pwm_j";
+ function = "pwm_j";
+ };
+ };
+
+ pwm_a_hiz_pins: pwm-a-hiz-pins {
+ mux {
+ groups = "pwm_a_hiz";
+ function = "pwm_a_hiz";
+ };
+ };
+
+ pwm_b_hiz_pins: pwm-b-hiz-pins {
+ mux {
+ groups = "pwm_b_hiz";
+ function = "pwm_b_hiz";
+ };
+ };
+
+ pwm_c_hiz_pins: pwm-c-hiz-pins {
+ mux {
+ groups = "pwm_c_hiz";
+ function = "pwm_c_hiz";
+ };
+ };
+
+ pwm_g_hiz_pins: pwm-g-hiz-pins {
+ mux {
+ groups = "pwm_g_hiz";
+ function = "pwm_g_hiz";
+ };
+ };
+
spicc0_pins_x: spicc0-pins_x {
mux {
groups = "spi_a_mosi_x",
@@ -399,6 +553,51 @@
status = "disabled";
};
+ pwm_ab: pwm@58000 {
+ compatible = "amlogic,meson-s4-pwm";
+ reg = <0x0 0x58000 0x0 0x24>;
+ clocks = <&clkc_periphs CLKID_PWM_A>,
+ <&clkc_periphs CLKID_PWM_B>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm_cd: pwm@5a000 {
+ compatible = "amlogic,meson-s4-pwm";
+ reg = <0x0 0x5a000 0x0 0x24>;
+ clocks = <&clkc_periphs CLKID_PWM_C>,
+ <&clkc_periphs CLKID_PWM_D>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm_ef: pwm@5c000 {
+ compatible = "amlogic,meson-s4-pwm";
+ reg = <0x0 0x5c000 0x0 0x24>;
+ clocks = <&clkc_periphs CLKID_PWM_E>,
+ <&clkc_periphs CLKID_PWM_F>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm_gh: pwm@5e000 {
+ compatible = "amlogic,meson-s4-pwm";
+ reg = <0x0 0x5e000 0x0 0x24>;
+ clocks = <&clkc_periphs CLKID_PWM_G>,
+ <&clkc_periphs CLKID_PWM_H>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm_ij: pwm@60000 {
+ compatible = "amlogic,meson-s4-pwm";
+ reg = <0x0 0x60000 0x0 0x24>;
+ clocks = <&clkc_periphs CLKID_PWM_I>,
+ <&clkc_periphs CLKID_PWM_J>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
i2c0: i2c@66000 {
compatible = "amlogic,meson-axg-i2c";
reg = <0x0 0x66000 0x0 0x20>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1.dtsi b/arch/arm64/boot/dts/amlogic/meson-sm1.dtsi
index 643f94d9d08e..97e4b52066dc 100644
--- a/arch/arm64/boot/dts/amlogic/meson-sm1.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-sm1.dtsi
@@ -17,10 +17,10 @@
compatible = "amlogic,axg-tdm-iface";
#sound-dai-cells = <0>;
sound-name-prefix = "TDM_A";
- clocks = <&clkc_audio AUD_CLKID_MST_A_MCLK>,
- <&clkc_audio AUD_CLKID_MST_A_SCLK>,
- <&clkc_audio AUD_CLKID_MST_A_LRCLK>;
- clock-names = "mclk", "sclk", "lrclk";
+ clocks = <&clkc_audio AUD_CLKID_MST_A_SCLK>,
+ <&clkc_audio AUD_CLKID_MST_A_LRCLK>,
+ <&clkc_audio AUD_CLKID_MST_A_MCLK>;
+ clock-names = "sclk", "lrclk", "mclk";
status = "disabled";
};
@@ -28,10 +28,10 @@
compatible = "amlogic,axg-tdm-iface";
#sound-dai-cells = <0>;
sound-name-prefix = "TDM_B";
- clocks = <&clkc_audio AUD_CLKID_MST_B_MCLK>,
- <&clkc_audio AUD_CLKID_MST_B_SCLK>,
- <&clkc_audio AUD_CLKID_MST_B_LRCLK>;
- clock-names = "mclk", "sclk", "lrclk";
+ clocks = <&clkc_audio AUD_CLKID_MST_B_SCLK>,
+ <&clkc_audio AUD_CLKID_MST_B_LRCLK>,
+ <&clkc_audio AUD_CLKID_MST_B_MCLK>;
+ clock-names = "sclk", "lrclk", "mclk";
status = "disabled";
};
@@ -39,10 +39,10 @@
compatible = "amlogic,axg-tdm-iface";
#sound-dai-cells = <0>;
sound-name-prefix = "TDM_C";
- clocks = <&clkc_audio AUD_CLKID_MST_C_MCLK>,
- <&clkc_audio AUD_CLKID_MST_C_SCLK>,
- <&clkc_audio AUD_CLKID_MST_C_LRCLK>;
- clock-names = "mclk", "sclk", "lrclk";
+ clocks = <&clkc_audio AUD_CLKID_MST_C_SCLK>,
+ <&clkc_audio AUD_CLKID_MST_C_LRCLK>,
+ <&clkc_audio AUD_CLKID_MST_C_MCLK>;
+ clock-names = "sclk", "lrclk", "mclk";
status = "disabled";
};
@@ -275,8 +275,7 @@
};
tdmin_a: audio-controller@300 {
- compatible = "amlogic,sm1-tdmin",
- "amlogic,axg-tdmin";
+ compatible = "amlogic,sm1-tdmin";
reg = <0x0 0x300 0x0 0x40>;
sound-name-prefix = "TDMIN_A";
resets = <&clkc_audio AUD_RESET_TDMIN_A>;
@@ -291,8 +290,7 @@
};
tdmin_b: audio-controller@340 {
- compatible = "amlogic,sm1-tdmin",
- "amlogic,axg-tdmin";
+ compatible = "amlogic,sm1-tdmin";
reg = <0x0 0x340 0x0 0x40>;
sound-name-prefix = "TDMIN_B";
resets = <&clkc_audio AUD_RESET_TDMIN_B>;
@@ -307,8 +305,7 @@
};
tdmin_c: audio-controller@380 {
- compatible = "amlogic,sm1-tdmin",
- "amlogic,axg-tdmin";
+ compatible = "amlogic,sm1-tdmin";
reg = <0x0 0x380 0x0 0x40>;
sound-name-prefix = "TDMIN_C";
resets = <&clkc_audio AUD_RESET_TDMIN_C>;
@@ -323,8 +320,7 @@
};
tdmin_lb: audio-controller@3c0 {
- compatible = "amlogic,sm1-tdmin",
- "amlogic,axg-tdmin";
+ compatible = "amlogic,sm1-tdmin";
reg = <0x0 0x3c0 0x0 0x40>;
sound-name-prefix = "TDMIN_LB";
resets = <&clkc_audio AUD_RESET_TDMIN_LB>;
@@ -339,7 +335,7 @@
};
spdifin: audio-controller@400 {
- compatible = "amlogic,g12a-spdifin",
+ compatible = "amlogic,sm1-spdifin",
"amlogic,axg-spdifin";
reg = <0x0 0x400 0x0 0x30>;
#sound-dai-cells = <0>;
@@ -353,7 +349,7 @@
};
spdifout_a: audio-controller@480 {
- compatible = "amlogic,g12a-spdifout",
+ compatible = "amlogic,sm1-spdifout",
"amlogic,axg-spdifout";
reg = <0x0 0x480 0x0 0x50>;
#sound-dai-cells = <0>;
@@ -518,6 +514,10 @@
"amlogic,meson-gpio-intc";
};
+&hdmi_tx {
+ power-domains = <&pwrc PWRC_SM1_VPU_ID>;
+};
+
&pcie {
power-domains = <&pwrc PWRC_SM1_PCIE_ID>;
};
diff --git a/arch/arm64/boot/dts/apm/apm-merlin.dts b/arch/arm64/boot/dts/apm/apm-merlin.dts
index 6e05cf1a3df6..b1160780a2a6 100644
--- a/arch/arm64/boot/dts/apm/apm-merlin.dts
+++ b/arch/arm64/boot/dts/apm/apm-merlin.dts
@@ -32,7 +32,7 @@
};
poweroff_mbox: poweroff_mbox@10548000 {
- compatible = "syscon";
+ compatible = "apm,merlin-poweroff-mailbox", "syscon";
reg = <0x0 0x10548000 0x0 0x30>;
};
diff --git a/arch/arm64/boot/dts/apm/apm-mustang.dts b/arch/arm64/boot/dts/apm/apm-mustang.dts
index e7644cddf06f..2ef658796746 100644
--- a/arch/arm64/boot/dts/apm/apm-mustang.dts
+++ b/arch/arm64/boot/dts/apm/apm-mustang.dts
@@ -32,7 +32,7 @@
};
poweroff_mbox: poweroff_mbox@10548000 {
- compatible = "syscon";
+ compatible = "apm,mustang-poweroff-mailbox", "syscon";
reg = <0x0 0x10548000 0x0 0x30>;
};
diff --git a/arch/arm64/boot/dts/arm/corstone1000-fvp.dts b/arch/arm64/boot/dts/arm/corstone1000-fvp.dts
index 901a7fc83307..abd013562995 100644
--- a/arch/arm64/boot/dts/arm/corstone1000-fvp.dts
+++ b/arch/arm64/boot/dts/arm/corstone1000-fvp.dts
@@ -21,7 +21,7 @@
reg-io-width = <2>;
};
- vmmc_v3_3d: fixed_v3_3d {
+ vmmc_v3_3d: regulator-vmmc {
compatible = "regulator-fixed";
regulator-name = "vmmc_supply";
regulator-min-microvolt = <3300000>;
diff --git a/arch/arm64/boot/dts/arm/corstone1000.dtsi b/arch/arm64/boot/dts/arm/corstone1000.dtsi
index 6ad7829f9e28..bb9b96fb5314 100644
--- a/arch/arm64/boot/dts/arm/corstone1000.dtsi
+++ b/arch/arm64/boot/dts/arm/corstone1000.dtsi
@@ -60,14 +60,14 @@
cache-sets = <1024>;
};
- refclk100mhz: refclk100mhz {
+ refclk100mhz: clock-100000000 {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <100000000>;
clock-output-names = "apb_pclk";
};
- smbclk: refclk24mhzx2 {
+ smbclk: clock-48000000 {
/* Reference 24MHz clock x 2 */
compatible = "fixed-clock";
#clock-cells = <0>;
@@ -83,7 +83,7 @@
<GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_LOW)>;
};
- uartclk: uartclk {
+ uartclk: clock-50000000 {
/* UART clock - 50MHz */
compatible = "fixed-clock";
#clock-cells = <0>;
diff --git a/arch/arm64/boot/dts/arm/foundation-v8.dtsi b/arch/arm64/boot/dts/arm/foundation-v8.dtsi
index 7b41537731a6..93f1e7c026b8 100644
--- a/arch/arm64/boot/dts/arm/foundation-v8.dtsi
+++ b/arch/arm64/boot/dts/arm/foundation-v8.dtsi
@@ -99,21 +99,21 @@
timeout-sec = <30>;
};
- v2m_clk24mhz: clk24mhz {
+ v2m_clk24mhz: clock-24000000 {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <24000000>;
clock-output-names = "v2m:clk24mhz";
};
- v2m_refclk1mhz: refclk1mhz {
+ v2m_refclk1mhz: clock-1000000 {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <1000000>;
clock-output-names = "v2m:refclk1mhz";
};
- v2m_refclk32khz: refclk32khz {
+ v2m_refclk32khz: clock-32768 {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <32768>;
diff --git a/arch/arm64/boot/dts/arm/juno-base.dtsi b/arch/arm64/boot/dts/arm/juno-base.dtsi
index 98ed2b329ed6..055764d0b9e5 100644
--- a/arch/arm64/boot/dts/arm/juno-base.dtsi
+++ b/arch/arm64/boot/dts/arm/juno-base.dtsi
@@ -663,7 +663,6 @@
dma-coherent;
/* The SMMU is only really of interest to bare-metal hypervisors */
/* iommus = <&smmu_gpu 0>; */
- status = "disabled";
};
sram: sram@2e000000 {
diff --git a/arch/arm64/boot/dts/arm/juno-clocks.dtsi b/arch/arm64/boot/dts/arm/juno-clocks.dtsi
index 2870b5eeb198..6d7d88e9591a 100644
--- a/arch/arm64/boot/dts/arm/juno-clocks.dtsi
+++ b/arch/arm64/boot/dts/arm/juno-clocks.dtsi
@@ -8,35 +8,35 @@
*/
/ {
/* SoC fixed clocks */
- soc_uartclk: refclk7372800hz {
+ soc_uartclk: clock-7372800 {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <7372800>;
clock-output-names = "juno:uartclk";
};
- soc_usb48mhz: clk48mhz {
+ soc_usb48mhz: clock-48000000 {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <48000000>;
clock-output-names = "clk48mhz";
};
- soc_smc50mhz: clk50mhz {
+ soc_smc50mhz: clock-50000000 {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <50000000>;
clock-output-names = "smc_clk";
};
- soc_refclk100mhz: refclk100mhz {
+ soc_refclk100mhz: clock-100000000 {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <100000000>;
clock-output-names = "apb_pclk";
};
- soc_faxiclk: refclk400mhz {
+ soc_faxiclk: clock-400000000 {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <400000000>;
diff --git a/arch/arm64/boot/dts/arm/juno-motherboard.dtsi b/arch/arm64/boot/dts/arm/juno-motherboard.dtsi
index be42932f7e21..ffa4ba4f1fbc 100644
--- a/arch/arm64/boot/dts/arm/juno-motherboard.dtsi
+++ b/arch/arm64/boot/dts/arm/juno-motherboard.dtsi
@@ -8,35 +8,35 @@
*/
/ {
- mb_clk24mhz: clk24mhz {
+ mb_clk24mhz: clock-24000000 {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <24000000>;
clock-output-names = "juno_mb:clk24mhz";
};
- mb_clk25mhz: clk25mhz {
+ mb_clk25mhz: clock-25000000 {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <25000000>;
clock-output-names = "juno_mb:clk25mhz";
};
- v2m_refclk1mhz: refclk1mhz {
+ v2m_refclk1mhz: clock-1000000 {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <1000000>;
clock-output-names = "juno_mb:refclk1mhz";
};
- v2m_refclk32khz: refclk32khz {
+ v2m_refclk32khz: clock-32768 {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <32768>;
clock-output-names = "juno_mb:refclk32khz";
};
- mb_fixed_3v3: mcc-sb-3v3 {
+ mb_fixed_3v3: regulator-3v3 {
compatible = "regulator-fixed";
regulator-name = "MCC_SB_3V3";
regulator-min-microvolt = <3300000>;
@@ -158,7 +158,8 @@
};
apbregs@10000 {
- compatible = "syscon", "simple-mfd";
+ compatible = "arm,juno-fpga-apb-regs",
+ "syscon", "simple-mfd";
reg = <0x010000 0x1000>;
ranges = <0x0 0x10000 0x1000>;
#address-cells = <1>;
diff --git a/arch/arm64/boot/dts/arm/rtsm_ve-motherboard.dtsi b/arch/arm64/boot/dts/arm/rtsm_ve-motherboard.dtsi
index ba8beef3fe99..66b1b74d27dc 100644
--- a/arch/arm64/boot/dts/arm/rtsm_ve-motherboard.dtsi
+++ b/arch/arm64/boot/dts/arm/rtsm_ve-motherboard.dtsi
@@ -8,28 +8,28 @@
* VEMotherBoard.lisa
*/
/ {
- v2m_clk24mhz: clk24mhz {
+ v2m_clk24mhz: clock-24000000 {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <24000000>;
clock-output-names = "v2m:clk24mhz";
};
- v2m_refclk1mhz: refclk1mhz {
+ v2m_refclk1mhz: clock-1000000 {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <1000000>;
clock-output-names = "v2m:refclk1mhz";
};
- v2m_refclk32khz: refclk32khz {
+ v2m_refclk32khz: clock-32768 {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <32768>;
clock-output-names = "v2m:refclk32khz";
};
- v2m_fixed_3v3: v2m-3v3 {
+ v2m_fixed_3v3: regulator-3v3 {
compatible = "regulator-fixed";
regulator-name = "3V3";
regulator-min-microvolt = <3300000>;
@@ -41,7 +41,7 @@
compatible = "arm,vexpress,config-bus";
arm,vexpress,config-bridge = <&v2m_sysreg>;
- v2m_oscclk1: oscclk1 {
+ v2m_oscclk1: clock-controller {
/* CLCD clock */
compatible = "arm,vexpress-osc";
arm,vexpress-sysreg,func = <1 1>;
diff --git a/arch/arm64/boot/dts/arm/vexpress-v2f-1xv7-ca53x2.dts b/arch/arm64/boot/dts/arm/vexpress-v2f-1xv7-ca53x2.dts
index 9115c99d0dc0..a0e1fa83eafa 100644
--- a/arch/arm64/boot/dts/arm/vexpress-v2f-1xv7-ca53x2.dts
+++ b/arch/arm64/boot/dts/arm/vexpress-v2f-1xv7-ca53x2.dts
@@ -111,7 +111,7 @@
compatible = "arm,vexpress,config-bus";
arm,vexpress,config-bridge = <&v2m_sysreg>;
- smbclk: smclk {
+ smbclk: clock-controller {
/* SMC clock */
compatible = "arm,vexpress-osc";
arm,vexpress-sysreg,func = <1 4>;
@@ -120,7 +120,7 @@
clock-output-names = "smclk";
};
- volt-vio {
+ regulator-vio {
/* VIO to expansion board above */
compatible = "arm,vexpress-volt";
arm,vexpress-sysreg,func = <2 0>;
@@ -130,7 +130,7 @@
regulator-always-on;
};
- volt-12v {
+ regulator-12v {
/* 12V from power connector J6 */
compatible = "arm,vexpress-volt";
arm,vexpress-sysreg,func = <2 1>;
diff --git a/arch/arm64/boot/dts/exynos/exynos850.dtsi b/arch/arm64/boot/dts/exynos/exynos850.dtsi
index 0706c8534ceb..f1c8b4613cbc 100644
--- a/arch/arm64/boot/dts/exynos/exynos850.dtsi
+++ b/arch/arm64/boot/dts/exynos/exynos850.dtsi
@@ -416,6 +416,14 @@
interrupts = <GIC_SPI 451 IRQ_TYPE_LEVEL_HIGH>;
};
+ trng: rng@12081400 {
+ compatible = "samsung,exynos850-trng";
+ reg = <0x12081400 0x100>;
+ clocks = <&cmu_core CLK_GOUT_SSS_ACLK>,
+ <&cmu_core CLK_GOUT_SSS_PCLK>;
+ clock-names = "secss", "pclk";
+ };
+
pinctrl_hsi: pinctrl@13430000 {
compatible = "samsung,exynos850-pinctrl";
reg = <0x13430000 0x1000>;
diff --git a/arch/arm64/boot/dts/exynos/google/gs101-oriole.dts b/arch/arm64/boot/dts/exynos/google/gs101-oriole.dts
index 5e8ffe065081..387fb779bd29 100644
--- a/arch/arm64/boot/dts/exynos/google/gs101-oriole.dts
+++ b/arch/arm64/boot/dts/exynos/google/gs101-oriole.dts
@@ -131,9 +131,9 @@
};
&usbdrd31 {
- status = "okay";
vdd10-supply = <&reg_placeholder>;
vdd33-supply = <&reg_placeholder>;
+ status = "okay";
};
&usbdrd31_dwc3 {
@@ -145,6 +145,13 @@
};
&usbdrd31_phy {
+ /* TODO: Update these once PMIC is implemented */
+ pll-supply = <&reg_placeholder>;
+ dvdd-usb20-supply = <&reg_placeholder>;
+ vddh-usb20-supply = <&reg_placeholder>;
+ vdd33-usb20-supply = <&reg_placeholder>;
+ vdda-usbdp-supply = <&reg_placeholder>;
+ vddh-usbdp-supply = <&reg_placeholder>;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/exynos/google/gs101.dtsi b/arch/arm64/boot/dts/exynos/google/gs101.dtsi
index a66e996666b8..eadb8822e6d4 100644
--- a/arch/arm64/boot/dts/exynos/google/gs101.dtsi
+++ b/arch/arm64/boot/dts/exynos/google/gs101.dtsi
@@ -213,9 +213,9 @@
pmu-3 {
compatible = "arm,dsu-pmu";
- interrupts = <GIC_SPI 257 IRQ_TYPE_LEVEL_HIGH 0>;
cpus = <&cpu0>, <&cpu1>, <&cpu2>, <&cpu3>,
<&cpu4>, <&cpu5>, <&cpu6>, <&cpu7>;
+ interrupts = <GIC_SPI 257 IRQ_TYPE_LEVEL_HIGH 0>;
};
psci {
@@ -288,6 +288,8 @@
compatible = "google,gs101-mct",
"samsung,exynos4210-mct";
reg = <0x10050000 0x800>;
+ clocks = <&ext_24_5m>, <&cmu_misc CLK_GOUT_MISC_MCT_PCLK>;
+ clock-names = "fin_pll", "mct";
interrupts = <GIC_SPI 753 IRQ_TYPE_LEVEL_HIGH 0>,
<GIC_SPI 754 IRQ_TYPE_LEVEL_HIGH 0>,
<GIC_SPI 755 IRQ_TYPE_LEVEL_HIGH 0>,
@@ -300,17 +302,15 @@
<GIC_SPI 762 IRQ_TYPE_LEVEL_HIGH 0>,
<GIC_SPI 763 IRQ_TYPE_LEVEL_HIGH 0>,
<GIC_SPI 764 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&ext_24_5m>, <&cmu_misc CLK_GOUT_MISC_MCT_PCLK>;
- clock-names = "fin_pll", "mct";
};
watchdog_cl0: watchdog@10060000 {
compatible = "google,gs101-wdt";
reg = <0x10060000 0x100>;
- interrupts = <GIC_SPI 765 IRQ_TYPE_LEVEL_HIGH 0>;
clocks = <&cmu_misc CLK_GOUT_MISC_WDT_CLUSTER0_PCLK>,
<&ext_24_5m>;
clock-names = "watchdog", "watchdog_src";
+ interrupts = <GIC_SPI 765 IRQ_TYPE_LEVEL_HIGH 0>;
samsung,syscon-phandle = <&pmu_system_controller>;
samsung,cluster-index = <0>;
status = "disabled";
@@ -319,10 +319,10 @@
watchdog_cl1: watchdog@10070000 {
compatible = "google,gs101-wdt";
reg = <0x10070000 0x100>;
- interrupts = <GIC_SPI 766 IRQ_TYPE_LEVEL_HIGH 0>;
clocks = <&cmu_misc CLK_GOUT_MISC_WDT_CLUSTER1_PCLK>,
<&ext_24_5m>;
clock-names = "watchdog", "watchdog_src";
+ interrupts = <GIC_SPI 766 IRQ_TYPE_LEVEL_HIGH 0>;
samsung,syscon-phandle = <&pmu_system_controller>;
samsung,cluster-index = <1>;
status = "disabled";
@@ -776,12 +776,12 @@
compatible = "google,gs101-hsi2c",
"samsung,exynosautov9-hsi2c";
reg = <0x10970000 0xc0>;
- interrupts = <GIC_SPI 642 IRQ_TYPE_LEVEL_HIGH 0>;
#address-cells = <1>;
#size-cells = <0>;
clocks = <&cmu_peric0 CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_7>,
<&cmu_peric0 CLK_GOUT_PERIC0_PERIC0_TOP0_PCLK_7>;
clock-names = "hsi2c", "hsi2c_pclk";
+ interrupts = <GIC_SPI 642 IRQ_TYPE_LEVEL_HIGH 0>;
pinctrl-0 = <&hsi2c8_bus>;
pinctrl-names = "default";
status = "disabled";
@@ -831,10 +831,10 @@
serial_0: serial@10a00000 {
compatible = "google,gs101-uart";
reg = <0x10a00000 0xc0>;
- interrupts = <GIC_SPI 634 IRQ_TYPE_LEVEL_HIGH 0>;
clocks = <&cmu_peric0 CLK_GOUT_PERIC0_PERIC0_TOP1_PCLK_0>,
<&cmu_peric0 CLK_GOUT_PERIC0_PERIC0_TOP1_IPCLK_0>;
clock-names = "uart", "clk_uart_baud0";
+ interrupts = <GIC_SPI 634 IRQ_TYPE_LEVEL_HIGH 0>;
pinctrl-0 = <&uart0_bus>;
pinctrl-names = "default";
samsung,uart-fifosize = <256>;
@@ -1157,12 +1157,12 @@
compatible = "google,gs101-hsi2c",
"samsung,exynosautov9-hsi2c";
reg = <0x10d50000 0xc0>;
- interrupts = <GIC_SPI 655 IRQ_TYPE_LEVEL_HIGH 0>;
#address-cells = <1>;
#size-cells = <0>;
clocks = <&cmu_peric1 CLK_GOUT_PERIC1_PERIC1_TOP0_IPCLK_5>,
<&cmu_peric1 CLK_GOUT_PERIC1_PERIC1_TOP0_PCLK_5>;
clock-names = "hsi2c", "hsi2c_pclk";
+ interrupts = <GIC_SPI 655 IRQ_TYPE_LEVEL_HIGH 0>;
pinctrl-0 = <&hsi2c12_bus>;
pinctrl-names = "default";
status = "disabled";
@@ -1277,13 +1277,14 @@
<&cmu_hsi0 CLK_GOUT_HSI0_UASC_HSI0_CTRL_PCLK>,
<&cmu_hsi0 CLK_GOUT_HSI0_USB31DRD_I_USBDPPHY_SCL_APB_PCLK>;
clock-names = "phy", "ref", "ctrl_aclk", "ctrl_pclk", "scl_pclk";
- samsung,pmu-syscon = <&pmu_system_controller>;
#phy-cells = <1>;
+ samsung,pmu-syscon = <&pmu_system_controller>;
status = "disabled";
};
usbdrd31: usb@11110000 {
compatible = "google,gs101-dwusb3";
+ ranges = <0x0 0x11110000 0x10000>;
clocks = <&cmu_hsi0 CLK_GOUT_HSI0_USB31DRD_BUS_CLK_EARLY>,
<&cmu_hsi0 CLK_GOUT_HSI0_USB31DRD_I_USB31DRD_SUSPEND_CLK_26>,
<&cmu_hsi0 CLK_GOUT_HSI0_UASC_HSI0_LINK_ACLK>,
@@ -1291,14 +1292,13 @@
clock-names = "bus_early", "susp_clk", "link_aclk", "link_pclk";
#address-cells = <1>;
#size-cells = <1>;
- ranges = <0x0 0x11110000 0x10000>;
status = "disabled";
usbdrd31_dwc3: usb@0 {
compatible = "snps,dwc3";
+ reg = <0x0 0x10000>;
clocks = <&cmu_hsi0 CLK_GOUT_HSI0_USB31DRD_I_USB31DRD_REF_CLK_40>;
clock-names = "ref";
- reg = <0x0 0x10000>;
interrupts = <GIC_SPI 463 IRQ_TYPE_LEVEL_HIGH 0>;
phys = <&usbdrd31_phy 0>, <&usbdrd31_phy 1>;
phy-names = "usb2-phy", "usb3-phy";
diff --git a/arch/arm64/boot/dts/freescale/Makefile b/arch/arm64/boot/dts/freescale/Makefile
index bd443c2bc5a4..f04c22b7de72 100644
--- a/arch/arm64/boot/dts/freescale/Makefile
+++ b/arch/arm64/boot/dts/freescale/Makefile
@@ -114,6 +114,7 @@ dtb-$(CONFIG_ARCH_MXC) += imx8mm-evk.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mm-evkb.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mm-icore-mx8mm-ctouch2.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mm-icore-mx8mm-edimm2.2.dtb
+dtb-$(CONFIG_ARCH_MXC) += imx8mm-iot-gateway.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mm-innocomm-wb15-evk.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mm-kontron-bl.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mm-kontron-bl-osm-s.dtb
@@ -177,6 +178,7 @@ dtb-$(CONFIG_ARCH_MXC) += imx8mp-skov-revb-hdmi.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mp-skov-revb-lt6.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mp-skov-revb-mi1010ait-1cp1.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mp-tqma8mpql-mba8mpxl.dtb
+dtb-$(CONFIG_ARCH_MXC) += imx8mp-tqma8mpql-mba8mp-ras314.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mp-venice-gw71xx-2x.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mp-venice-gw72xx-2x.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mp-venice-gw73xx-2x.dtb
@@ -191,6 +193,9 @@ dtb-$(CONFIG_ARCH_MXC) += imx8mp-verdin-wifi-dev.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mp-verdin-wifi-mallow.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mp-verdin-wifi-yavia.dtb
+imx8mp-evk-mx8-dlvds-lcd1-dtbs += imx8mp-evk.dtb imx8mp-evk-mx8-dlvds-lcd1.dtbo
+dtb-$(CONFIG_ARCH_MXC) += imx8mp-evk-mx8-dlvds-lcd1.dtb
+
imx8mp-tqma8mpql-mba8mpxl-lvds-dtbs += imx8mp-tqma8mpql-mba8mpxl.dtb imx8mp-tqma8mpql-mba8mpxl-lvds.dtbo
imx8mp-tqma8mpql-mba8mpxl-lvds-g133han01-dtbs += imx8mp-tqma8mpql-mba8mpxl.dtb imx8mp-tqma8mpql-mba8mpxl-lvds-g133han01.dtbo
dtb-$(CONFIG_ARCH_MXC) += imx8mp-tqma8mpql-mba8mpxl-lvds.dtb
@@ -231,11 +236,13 @@ dtb-$(CONFIG_ARCH_MXC) += imx8qxp-colibri-iris-v2.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8qxp-mek.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8qxp-tqma8xqp-mba8xx.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8ulp-evk.dtb
+dtb-$(CONFIG_ARCH_MXC) += imx93-9x9-qsb.dtb
dtb-$(CONFIG_ARCH_MXC) += imx93-11x11-evk.dtb
dtb-$(CONFIG_ARCH_MXC) += imx93-phyboard-segin.dtb
dtb-$(CONFIG_ARCH_MXC) += imx93-tqma9352-mba93xxca.dtb
dtb-$(CONFIG_ARCH_MXC) += imx93-tqma9352-mba93xxla.dtb
dtb-$(CONFIG_ARCH_MXC) += imx93-var-som-symphony.dtb
+dtb-$(CONFIG_ARCH_MXC) += imx95-19x19-evk.dtb
imx8mm-venice-gw72xx-0x-imx219-dtbs := imx8mm-venice-gw72xx-0x.dtb imx8mm-venice-gw72xx-0x-imx219.dtbo
imx8mm-venice-gw72xx-0x-rpidsi-dtbs := imx8mm-venice-gw72xx-0x.dtb imx8mm-venice-gw72xx-0x-rpidsi.dtbo
@@ -263,6 +270,14 @@ dtb-$(CONFIG_ARCH_MXC) += imx8mm-venice-gw73xx-0x-rs485.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mp-venice-gw74xx-imx219.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mp-venice-gw74xx-rpidsi.dtb
+imx8mm-phygate-tauri-l-rs232-rs232-dtbs := imx8mm-phygate-tauri-l.dtb imx8mm-phygate-tauri-l-rs232-rs232.dtbo
+imx8mm-phygate-tauri-l-rs232-cts-rts-dtbs := imx8mm-phygate-tauri-l.dtb imx8mm-phygate-tauri-l-rs232-rts-cts.dtbo
+imx8mm-phygate-tauri-l-rs232-rs485-dtbs := imx8mm-phygate-tauri-l.dtb imx8mm-phygate-tauri-l-rs232-rs485.dtbo
+
+dtb-$(CONFIG_ARCH_MXC) += imx8mm-phygate-tauri-l-rs232-rs232.dtb
+dtb-$(CONFIG_ARCH_MXC) += imx8mm-phygate-tauri-l-rs232-cts-rts.dtb
+dtb-$(CONFIG_ARCH_MXC) += imx8mm-phygate-tauri-l-rs232-rs485.dtb
+
dtb-$(CONFIG_ARCH_S32) += s32g274a-evb.dtb
dtb-$(CONFIG_ARCH_S32) += s32g274a-rdb2.dtb
dtb-$(CONFIG_ARCH_S32) += s32g399a-rdb3.dtb
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi
index a0f7bbd691a0..e61ea7e0737e 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi
@@ -74,15 +74,15 @@
timer {
compatible = "arm,armv8-timer";
- interrupts = <1 13 IRQ_TYPE_LEVEL_LOW>,/* Physical Secure PPI */
- <1 14 IRQ_TYPE_LEVEL_LOW>,/* Physical Non-Secure PPI */
- <1 11 IRQ_TYPE_LEVEL_LOW>,/* Virtual PPI */
- <1 10 IRQ_TYPE_LEVEL_LOW>;/* Hypervisor PPI */
+ interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_LOW>,/* Physical Secure PPI */
+ <GIC_PPI 14 IRQ_TYPE_LEVEL_LOW>,/* Physical Non-Secure PPI */
+ <GIC_PPI 11 IRQ_TYPE_LEVEL_LOW>,/* Virtual PPI */
+ <GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>;/* Hypervisor PPI */
};
pmu {
compatible = "arm,cortex-a53-pmu";
- interrupts = <0 106 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>;
};
gic: interrupt-controller@1400000 {
@@ -93,7 +93,7 @@
<0x0 0x1402000 0 0x2000>, /* GICC */
<0x0 0x1404000 0 0x2000>, /* GICH */
<0x0 0x1406000 0 0x2000>; /* GICV */
- interrupts = <1 9 IRQ_TYPE_LEVEL_LOW>;
+ interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_LOW>;
};
reboot {
@@ -156,10 +156,10 @@
status = "disabled";
};
- esdhc0: esdhc@1560000 {
+ esdhc0: mmc@1560000 {
compatible = "fsl,ls1012a-esdhc", "fsl,esdhc";
reg = <0x0 0x1560000 0x0 0x10000>;
- interrupts = <0 62 0x4>;
+ interrupts = <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
QORIQ_CLK_PLL_DIV(1)>;
voltage-ranges = <1800 1800 3300 3300>;
@@ -175,10 +175,10 @@
big-endian;
};
- esdhc1: esdhc@1580000 {
+ esdhc1: mmc@1580000 {
compatible = "fsl,ls1012a-esdhc", "fsl,esdhc";
reg = <0x0 0x1580000 0x0 0x10000>;
- interrupts = <0 65 0x4>;
+ interrupts = <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
QORIQ_CLK_PLL_DIV(1)>;
voltage-ranges = <1800 1800 3300 3300>;
@@ -305,7 +305,7 @@
tmu: tmu@1f00000 {
compatible = "fsl,qoriq-tmu";
reg = <0x0 0x1f00000 0x0 0x10000>;
- interrupts = <0 33 0x4>;
+ interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
fsl,tmu-range = <0xb0000 0x9002a 0x6004c 0x60062>;
fsl,tmu-calibration =
<0x00000000 0x00000025>,
@@ -355,7 +355,7 @@
#address-cells = <1>;
#size-cells = <0>;
reg = <0x0 0x2180000 0x0 0x10000>;
- interrupts = <0 56 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
QORIQ_CLK_PLL_DIV(4)>;
scl-gpios = <&gpio0 2 0>;
@@ -367,7 +367,7 @@
#address-cells = <1>;
#size-cells = <0>;
reg = <0x0 0x2190000 0x0 0x10000>;
- interrupts = <0 57 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 57 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
QORIQ_CLK_PLL_DIV(4)>;
scl-gpios = <&gpio0 13 0>;
@@ -379,7 +379,7 @@
#address-cells = <1>;
#size-cells = <0>;
reg = <0x0 0x2100000 0x0 0x10000>;
- interrupts = <0 64 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 64 IRQ_TYPE_LEVEL_HIGH>;
clock-names = "dspi";
clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
QORIQ_CLK_PLL_DIV(1)>;
@@ -391,7 +391,7 @@
duart0: serial@21c0500 {
compatible = "fsl,ns16550", "ns16550a";
reg = <0x00 0x21c0500 0x0 0x100>;
- interrupts = <0 54 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
QORIQ_CLK_PLL_DIV(1)>;
status = "disabled";
@@ -400,16 +400,16 @@
duart1: serial@21c0600 {
compatible = "fsl,ns16550", "ns16550a";
reg = <0x00 0x21c0600 0x0 0x100>;
- interrupts = <0 54 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
QORIQ_CLK_PLL_DIV(1)>;
status = "disabled";
};
gpio0: gpio@2300000 {
- compatible = "fsl,qoriq-gpio";
+ compatible = "fsl,ls1021a-gpio", "fsl,qoriq-gpio";
reg = <0x0 0x2300000 0x0 0x10000>;
- interrupts = <0 66 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 66 IRQ_TYPE_LEVEL_HIGH>;
gpio-controller;
#gpio-cells = <2>;
interrupt-controller;
@@ -417,9 +417,9 @@
};
gpio1: gpio@2310000 {
- compatible = "fsl,qoriq-gpio";
+ compatible = "fsl,ls1021a-gpio", "fsl,qoriq-gpio";
reg = <0x0 0x2310000 0x0 0x10000>;
- interrupts = <0 67 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>;
gpio-controller;
#gpio-cells = <2>;
interrupt-controller;
@@ -430,7 +430,7 @@
compatible = "fsl,ls1012a-wdt",
"fsl,imx21-wdt";
reg = <0x0 0x2ad0000 0x0 0x10000>;
- interrupts = <0 83 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL QORIQ_CLK_PLL_DIV(1)>;
big-endian;
};
@@ -439,7 +439,7 @@
#sound-dai-cells = <0>;
compatible = "fsl,vf610-sai";
reg = <0x0 0x2b50000 0x0 0x10000>;
- interrupts = <0 148 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
QORIQ_CLK_PLL_DIV(4)>,
<&clockgen QORIQ_CLK_PLATFORM_PLL
@@ -449,9 +449,9 @@
<&clockgen QORIQ_CLK_PLATFORM_PLL
QORIQ_CLK_PLL_DIV(4)>;
clock-names = "bus", "mclk1", "mclk2", "mclk3";
- dma-names = "tx", "rx";
- dmas = <&edma0 1 47>,
- <&edma0 1 46>;
+ dma-names = "rx", "tx";
+ dmas = <&edma0 1 46>,
+ <&edma0 1 47>;
status = "disabled";
};
@@ -459,7 +459,7 @@
#sound-dai-cells = <0>;
compatible = "fsl,vf610-sai";
reg = <0x0 0x2b60000 0x0 0x10000>;
- interrupts = <0 149 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
QORIQ_CLK_PLL_DIV(4)>,
<&clockgen QORIQ_CLK_PLATFORM_PLL
@@ -469,9 +469,9 @@
<&clockgen QORIQ_CLK_PLATFORM_PLL
QORIQ_CLK_PLL_DIV(4)>;
clock-names = "bus", "mclk1", "mclk2", "mclk3";
- dma-names = "tx", "rx";
- dmas = <&edma0 1 45>,
- <&edma0 1 44>;
+ dma-names = "rx", "tx";
+ dmas = <&edma0 1 44>,
+ <&edma0 1 45>;
status = "disabled";
};
@@ -481,8 +481,8 @@
reg = <0x0 0x2c00000 0x0 0x10000>,
<0x0 0x2c10000 0x0 0x10000>,
<0x0 0x2c20000 0x0 0x10000>;
- interrupts = <0 103 IRQ_TYPE_LEVEL_HIGH>,
- <0 103 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "edma-tx", "edma-err";
dma-channels = <32>;
big-endian;
@@ -496,12 +496,11 @@
usb0: usb@2f00000 {
compatible = "snps,dwc3";
reg = <0x0 0x2f00000 0x0 0x10000>;
- interrupts = <0 60 0x4>;
+ interrupts = <GIC_SPI 60 IRQ_TYPE_LEVEL_HIGH>;
dr_mode = "host";
snps,quirk-frame-length-adjustment = <0x20>;
snps,dis_rxdet_inp3_quirk;
snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>;
- snps,host-vbus-glitches;
};
sata: sata@3200000 {
@@ -509,7 +508,7 @@
reg = <0x0 0x3200000 0x0 0x10000>,
<0x0 0x20140520 0x0 0x4>;
reg-names = "ahci", "sata-ecc";
- interrupts = <0 69 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
QORIQ_CLK_PLL_DIV(1)>;
dma-coherent;
@@ -519,7 +518,7 @@
usb1: usb@8600000 {
compatible = "fsl-usb2-dr-v2.5", "fsl-usb2-dr";
reg = <0x0 0x8600000 0x0 0x1000>;
- interrupts = <0 139 0x4>;
+ interrupts = <GIC_SPI 139 IRQ_TYPE_LEVEL_HIGH>;
dr_mode = "host";
phy_type = "ulpi";
};
@@ -528,7 +527,7 @@
compatible = "fsl,ls1012a-msi";
reg = <0x0 0x1572000 0x0 0x8>;
msi-controller;
- interrupts = <0 126 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 126 IRQ_TYPE_LEVEL_HIGH>;
};
pcie1: pcie@3400000 {
@@ -536,9 +535,9 @@
reg = <0x00 0x03400000 0x0 0x00100000>, /* controller registers */
<0x40 0x00000000 0x0 0x00002000>; /* configuration space */
reg-names = "regs", "config";
- interrupts = <0 118 0x4>, /* controller interrupt */
- <0 117 0x4>; /* PME interrupt */
- interrupt-names = "aer", "pme";
+ interrupts = <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>, /* PME interrupt */
+ <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>; /* controller interrupt */
+ interrupt-names = "pme", "aer";
#address-cells = <3>;
#size-cells = <2>;
device_type = "pci";
@@ -563,7 +562,7 @@
#fsl,rcpm-wakeup-cells = <1>;
};
- ftm_alarm0: timer@29d0000 {
+ ftm_alarm0: rtc@29d0000 {
compatible = "fsl,ls1012a-ftm-alarm";
reg = <0x0 0x29d0000 0x0 0x10000>;
fsl,rcpm-wakeup = <&rcpm 0x20000>;
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a-rdb.dts b/arch/arm64/boot/dts/freescale/fsl-ls1028a-rdb.dts
index ecd2c1ea177f..757a34ba7da3 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1028a-rdb.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a-rdb.dts
@@ -201,6 +201,37 @@
#address-cells = <1>;
#size-cells = <0>;
+ i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x0>;
+
+ /* Atmel AT24C512C-XHD­B: 64 KB EEPROM */
+ eeprom@50 {
+ compatible = "atmel,24c512";
+ reg = <0x50>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ };
+
+ /* AT24C04C 512-byte DDR4 SPD EEPROM */
+ /* Documentation says 0x51, but must be even and i2cdetect says 0x52 */
+ eeprom@52 {
+ compatible = "atmel,24c04";
+ reg = <0x52>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ };
+
+ /* Atmel AT24C02C-XHM­B: 256-byte EEPROM */
+ eeprom@57 {
+ compatible = "atmel,24c02";
+ reg = <0x57>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ };
+ };
+
i2c@1 {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
index 70b8731029c4..6b6e3ee950e5 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
@@ -155,7 +155,7 @@
};
thermal-zones {
- ddr-controller {
+ ddr-thermal {
polling-delay-passive = <1000>;
polling-delay = <5000>;
thermal-sensors = <&tmu 0>;
@@ -175,7 +175,7 @@
};
};
- core-cluster {
+ core-cluster-thermal {
polling-delay-passive = <1000>;
polling-delay = <5000>;
thermal-sensors = <&tmu 1>;
@@ -674,7 +674,7 @@
};
pcie_ep1: pcie-ep@3400000 {
- compatible = "fsl,ls1028a-pcie-ep","fsl,ls-pcie-ep";
+ compatible = "fsl,ls1028a-pcie-ep";
reg = <0x00 0x03400000 0x0 0x00100000
0x80 0x00000000 0x8 0x00000000>;
reg-names = "regs", "addr_space";
@@ -713,7 +713,7 @@
};
pcie_ep2: pcie-ep@3500000 {
- compatible = "fsl,ls1028a-pcie-ep","fsl,ls-pcie-ep";
+ compatible = "fsl,ls1028a-pcie-ep";
reg = <0x00 0x03500000 0x0 0x00100000
0x88 0x00000000 0x8 0x00000000>;
reg-names = "regs", "addr_space";
@@ -828,6 +828,7 @@
<GIC_SPI 254 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "qdma-error", "qdma-queue0",
"qdma-queue1", "qdma-queue2", "qdma-queue3";
+ #dma-cells = <1>;
dma-channels = <8>;
block-number = <1>;
block-offset = <0x10000>;
@@ -859,8 +860,8 @@
malidp0: display@f080000 {
compatible = "arm,mali-dp500";
reg = <0x0 0xf080000 0x0 0x10000>;
- interrupts = <0 222 IRQ_TYPE_LEVEL_HIGH>,
- <0 223 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 222 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 223 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "DE", "SE";
clocks = <&dpclk>,
<&clockgen QORIQ_CLK_HWACCEL 2>,
@@ -902,9 +903,9 @@
<&clockgen QORIQ_CLK_PLATFORM_PLL
QORIQ_CLK_PLL_DIV(2)>;
clock-names = "bus", "mclk1", "mclk2", "mclk3";
- dma-names = "tx", "rx";
- dmas = <&edma0 1 4>,
- <&edma0 1 3>;
+ dma-names = "rx", "tx";
+ dmas = <&edma0 1 3>,
+ <&edma0 1 4>;
fsl,sai-asynchronous;
status = "disabled";
};
@@ -923,9 +924,9 @@
<&clockgen QORIQ_CLK_PLATFORM_PLL
QORIQ_CLK_PLL_DIV(2)>;
clock-names = "bus", "mclk1", "mclk2", "mclk3";
- dma-names = "tx", "rx";
- dmas = <&edma0 1 6>,
- <&edma0 1 5>;
+ dma-names = "rx", "tx";
+ dmas = <&edma0 1 5>,
+ <&edma0 1 6>;
fsl,sai-asynchronous;
status = "disabled";
};
@@ -944,9 +945,9 @@
<&clockgen QORIQ_CLK_PLATFORM_PLL
QORIQ_CLK_PLL_DIV(2)>;
clock-names = "bus", "mclk1", "mclk2", "mclk3";
- dma-names = "tx", "rx";
- dmas = <&edma0 1 8>,
- <&edma0 1 7>;
+ dma-names = "rx", "tx";
+ dmas = <&edma0 1 7>,
+ <&edma0 1 8>;
fsl,sai-asynchronous;
status = "disabled";
};
@@ -965,9 +966,9 @@
<&clockgen QORIQ_CLK_PLATFORM_PLL
QORIQ_CLK_PLL_DIV(2)>;
clock-names = "bus", "mclk1", "mclk2", "mclk3";
- dma-names = "tx", "rx";
- dmas = <&edma0 1 10>,
- <&edma0 1 9>;
+ dma-names = "rx", "tx";
+ dmas = <&edma0 1 9>,
+ <&edma0 1 10>;
fsl,sai-asynchronous;
status = "disabled";
};
@@ -986,9 +987,9 @@
<&clockgen QORIQ_CLK_PLATFORM_PLL
QORIQ_CLK_PLL_DIV(2)>;
clock-names = "bus", "mclk1", "mclk2", "mclk3";
- dma-names = "tx", "rx";
- dmas = <&edma0 1 12>,
- <&edma0 1 11>;
+ dma-names = "rx", "tx";
+ dmas = <&edma0 1 11>,
+ <&edma0 1 12>;
fsl,sai-asynchronous;
status = "disabled";
};
@@ -1007,9 +1008,9 @@
<&clockgen QORIQ_CLK_PLATFORM_PLL
QORIQ_CLK_PLL_DIV(2)>;
clock-names = "bus", "mclk1", "mclk2", "mclk3";
- dma-names = "tx", "rx";
- dmas = <&edma0 1 14>,
- <&edma0 1 13>;
+ dma-names = "rx", "tx";
+ dmas = <&edma0 1 13>,
+ <&edma0 1 14>;
fsl,sai-asynchronous;
status = "disabled";
};
@@ -1024,7 +1025,7 @@
tmu: tmu@1f80000 {
compatible = "fsl,qoriq-tmu";
reg = <0x0 0x1f80000 0x0 0x10000>;
- interrupts = <0 23 0x4>;
+ interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>;
fsl,tmu-range = <0xb0000 0xa0026 0x80048 0x70061>;
fsl,tmu-calibration =
<0x00000000 0x00000024>,
@@ -1325,7 +1326,7 @@
little-endian;
};
- ftm_alarm0: timer@2800000 {
+ ftm_alarm0: rtc@2800000 {
compatible = "fsl,ls1028a-ftm-alarm";
reg = <0x0 0x2800000 0x0 0x10000>;
fsl,rcpm-wakeup = <&rcpm 0x0 0x0 0x0 0x0 0x4000 0x0 0x0>;
@@ -1333,7 +1334,7 @@
status = "disabled";
};
- ftm_alarm1: timer@2810000 {
+ ftm_alarm1: rtc@2810000 {
compatible = "fsl,ls1028a-ftm-alarm";
reg = <0x0 0x2810000 0x0 0x10000>;
fsl,rcpm-wakeup = <&rcpm 0x0 0x0 0x0 0x0 0x4000 0x0 0x0>;
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1043a-qds.dts b/arch/arm64/boot/dts/freescale/fsl-ls1043a-qds.dts
index dda27ed7aaf2..11b1356e95d5 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1043a-qds.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a-qds.dts
@@ -64,7 +64,7 @@
0x2 0x0 0x0 0x7fb00000 0x00000100>;
status = "okay";
- nor@0,0 {
+ flash@0,0 {
compatible = "cfi-flash";
reg = <0x0 0x0 0x8000000>;
big-endian;
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb.dts b/arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb.dts
index 26f8540cb101..c4532c809f0a 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb.dts
@@ -71,7 +71,7 @@
0x1 0x0 0x0 0x7e800000 0x00010000
0x2 0x0 0x0 0x7fb00000 0x00000100>;
- nor@0,0 {
+ flash@0,0 {
compatible = "cfi-flash";
#address-cells = <1>;
#size-cells = <1>;
@@ -104,6 +104,12 @@
compatible = "n25q128a13", "jedec,spi-nor"; /* 16MB */
reg = <0>;
spi-max-frequency = <1000000>; /* input clock */
+ /*
+ * Standard CS timing properties replace the deprecated vendor
+ * variants below.
+ */
+ spi-cs-setup-delay-ns = <100>;
+ spi-cs-hold-delay-ns = <100>;
fsl,spi-cs-sck-delay = <100>;
fsl,spi-sck-cs-delay = <100>;
};
@@ -112,6 +118,12 @@
compatible = "maxim,ds26522";
reg = <2>;
spi-max-frequency = <2000000>;
+ /*
+ * Standard CS timing properties replace the deprecated vendor
+ * variants below.
+ */
+ spi-cs-setup-delay-ns = <100>;
+ spi-cs-hold-delay-ns = <50>;
fsl,spi-cs-sck-delay = <100>;
fsl,spi-sck-cs-delay = <50>;
};
@@ -120,6 +132,12 @@
compatible = "maxim,ds26522";
reg = <3>;
spi-max-frequency = <2000000>;
+ /*
+ * Standard CS timing properties replace the deprecated vendor
+ * variants below.
+ */
+ spi-cs-setup-delay-ns = <100>;
+ spi-cs-hold-delay-ns = <50>;
fsl,spi-cs-sck-delay = <100>;
fsl,spi-sck-cs-delay = <50>;
};
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
index 8ee6d8c0ef61..17f4e3171120 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
@@ -154,7 +154,7 @@
};
thermal-zones {
- ddr-controller {
+ ddr-thermal {
polling-delay-passive = <1000>;
polling-delay = <5000>;
thermal-sensors = <&tmu 0>;
@@ -174,7 +174,7 @@
};
};
- serdes {
+ serdes-thermal {
polling-delay-passive = <1000>;
polling-delay = <5000>;
thermal-sensors = <&tmu 1>;
@@ -194,7 +194,7 @@
};
};
- fman {
+ fman-thermal {
polling-delay-passive = <1000>;
polling-delay = <5000>;
thermal-sensors = <&tmu 2>;
@@ -214,7 +214,7 @@
};
};
- core-cluster {
+ core-cluster-thermal {
polling-delay-passive = <1000>;
polling-delay = <5000>;
thermal-sensors = <&tmu 3>;
@@ -245,7 +245,7 @@
};
};
- sec {
+ sec-thermal {
polling-delay-passive = <1000>;
polling-delay = <5000>;
thermal-sensors = <&tmu 4>;
@@ -268,19 +268,19 @@
timer {
compatible = "arm,armv8-timer";
- interrupts = <1 13 0xf08>, /* Physical Secure PPI */
- <1 14 0xf08>, /* Physical Non-Secure PPI */
- <1 11 0xf08>, /* Virtual PPI */
- <1 10 0xf08>; /* Hypervisor PPI */
+ interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
fsl,erratum-a008585;
};
pmu {
compatible = "arm,cortex-a53-pmu";
- interrupts = <0 106 0x4>,
- <0 107 0x4>,
- <0 95 0x4>,
- <0 97 0x4>;
+ interrupts = <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>;
interrupt-affinity = <&cpu0>,
<&cpu1>,
<&cpu2>,
@@ -295,7 +295,7 @@
<0x0 0x1402000 0 0x2000>, /* GICC */
<0x0 0x1404000 0 0x2000>, /* GICH */
<0x0 0x1406000 0 0x2000>; /* GICV */
- interrupts = <1 9 0xf08>;
+ interrupts = <GIC_PPI 9 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
};
soc: soc {
@@ -352,7 +352,7 @@
#size-cells = <1>;
ranges = <0x0 0x00 0x1700000 0x100000>;
reg = <0x00 0x1700000 0x0 0x100000>;
- interrupts = <0 75 0x4>;
+ interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>;
dma-coherent;
sec_jr0: jr@10000 {
@@ -360,7 +360,7 @@
"fsl,sec-v5.0-job-ring",
"fsl,sec-v4.0-job-ring";
reg = <0x10000 0x10000>;
- interrupts = <0 71 0x4>;
+ interrupts = <GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>;
};
sec_jr1: jr@20000 {
@@ -368,7 +368,7 @@
"fsl,sec-v5.0-job-ring",
"fsl,sec-v4.0-job-ring";
reg = <0x20000 0x10000>;
- interrupts = <0 72 0x4>;
+ interrupts = <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>;
};
sec_jr2: jr@30000 {
@@ -376,7 +376,7 @@
"fsl,sec-v5.0-job-ring",
"fsl,sec-v4.0-job-ring";
reg = <0x30000 0x10000>;
- interrupts = <0 73 0x4>;
+ interrupts = <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>;
};
sec_jr3: jr@40000 {
@@ -384,7 +384,7 @@
"fsl,sec-v5.0-job-ring",
"fsl,sec-v4.0-job-ring";
reg = <0x40000 0x10000>;
- interrupts = <0 74 0x4>;
+ interrupts = <GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>;
};
};
@@ -405,7 +405,7 @@
ifc: memory-controller@1530000 {
compatible = "fsl,ifc";
reg = <0x0 0x1530000 0x0 0x10000>;
- interrupts = <0 43 0x4>;
+ interrupts = <GIC_SPI 43 IRQ_TYPE_LEVEL_HIGH>;
};
qspi: spi@1550000 {
@@ -415,7 +415,7 @@
reg = <0x0 0x1550000 0x0 0x10000>,
<0x0 0x40000000 0x0 0x4000000>;
reg-names = "QuadSPI", "QuadSPI-memory";
- interrupts = <0 99 0x4>;
+ interrupts = <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>;
clock-names = "qspi_en", "qspi";
clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
QORIQ_CLK_PLL_DIV(1)>,
@@ -424,10 +424,10 @@
status = "disabled";
};
- esdhc: esdhc@1560000 {
+ esdhc: mmc@1560000 {
compatible = "fsl,ls1043a-esdhc", "fsl,esdhc";
reg = <0x0 0x1560000 0x0 0x10000>;
- interrupts = <0 62 0x4>;
+ interrupts = <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <0>;
voltage-ranges = <1800 1800 3300 3300>;
sdhci,auto-cmd12;
@@ -438,14 +438,14 @@
ddr: memory-controller@1080000 {
compatible = "fsl,qoriq-memory-controller";
reg = <0x0 0x1080000 0x0 0x1000>;
- interrupts = <0 144 0x4>;
+ interrupts = <GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>;
big-endian;
};
tmu: tmu@1f00000 {
compatible = "fsl,qoriq-tmu";
reg = <0x0 0x1f00000 0x0 0x10000>;
- interrupts = <0 33 0x4>;
+ interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
fsl,tmu-range = <0xb0000 0x9002a 0x6004c 0x70062>;
fsl,tmu-calibration =
<0x00000000 0x00000023>,
@@ -505,11 +505,11 @@
memory-region = <&bman_fbpr>;
};
- bportals: bman-portals@508000000 {
+ bportals: bman-portals-bus@508000000 {
ranges = <0x0 0x5 0x08000000 0x8000000>;
};
- qportals: qman-portals@500000000 {
+ qportals: qman-portals-bus@500000000 {
ranges = <0x0 0x5 0x00000000 0x8000000>;
};
@@ -518,7 +518,7 @@
#address-cells = <1>;
#size-cells = <0>;
reg = <0x0 0x2100000 0x0 0x10000>;
- interrupts = <0 64 0x4>;
+ interrupts = <GIC_SPI 64 IRQ_TYPE_LEVEL_HIGH>;
clock-names = "dspi";
clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
QORIQ_CLK_PLL_DIV(1)>;
@@ -532,8 +532,8 @@
#address-cells = <1>;
#size-cells = <0>;
reg = <0x0 0x2180000 0x0 0x10000>;
- interrupts = <0 56 0x4>;
- clock-names = "i2c";
+ interrupts = <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>;
+ clock-names = "ipg";
clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
QORIQ_CLK_PLL_DIV(1)>;
dmas = <&edma0 1 38>,
@@ -547,8 +547,8 @@
#address-cells = <1>;
#size-cells = <0>;
reg = <0x0 0x2190000 0x0 0x10000>;
- interrupts = <0 57 0x4>;
- clock-names = "i2c";
+ interrupts = <GIC_SPI 57 IRQ_TYPE_LEVEL_HIGH>;
+ clock-names = "ipg";
clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
QORIQ_CLK_PLL_DIV(1)>;
scl-gpios = <&gpio4 2 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
@@ -560,8 +560,8 @@
#address-cells = <1>;
#size-cells = <0>;
reg = <0x0 0x21a0000 0x0 0x10000>;
- interrupts = <0 58 0x4>;
- clock-names = "i2c";
+ interrupts = <GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH>;
+ clock-names = "ipg";
clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
QORIQ_CLK_PLL_DIV(1)>;
scl-gpios = <&gpio4 10 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
@@ -573,8 +573,8 @@
#address-cells = <1>;
#size-cells = <0>;
reg = <0x0 0x21b0000 0x0 0x10000>;
- interrupts = <0 59 0x4>;
- clock-names = "i2c";
+ interrupts = <GIC_SPI 59 IRQ_TYPE_LEVEL_HIGH>;
+ clock-names = "ipg";
clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
QORIQ_CLK_PLL_DIV(1)>;
scl-gpios = <&gpio4 12 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
@@ -584,7 +584,7 @@
duart0: serial@21c0500 {
compatible = "fsl,ns16550", "ns16550a";
reg = <0x00 0x21c0500 0x0 0x100>;
- interrupts = <0 54 0x4>;
+ interrupts = <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
QORIQ_CLK_PLL_DIV(1)>;
};
@@ -592,7 +592,7 @@
duart1: serial@21c0600 {
compatible = "fsl,ns16550", "ns16550a";
reg = <0x00 0x21c0600 0x0 0x100>;
- interrupts = <0 54 0x4>;
+ interrupts = <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
QORIQ_CLK_PLL_DIV(1)>;
};
@@ -600,7 +600,7 @@
duart2: serial@21d0500 {
compatible = "fsl,ns16550", "ns16550a";
reg = <0x0 0x21d0500 0x0 0x100>;
- interrupts = <0 55 0x4>;
+ interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
QORIQ_CLK_PLL_DIV(1)>;
};
@@ -608,7 +608,7 @@
duart3: serial@21d0600 {
compatible = "fsl,ns16550", "ns16550a";
reg = <0x0 0x21d0600 0x0 0x100>;
- interrupts = <0 55 0x4>;
+ interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
QORIQ_CLK_PLL_DIV(1)>;
};
@@ -616,7 +616,7 @@
gpio1: gpio@2300000 {
compatible = "fsl,ls1043a-gpio", "fsl,qoriq-gpio";
reg = <0x0 0x2300000 0x0 0x10000>;
- interrupts = <0 66 0x4>;
+ interrupts = <GIC_SPI 66 IRQ_TYPE_LEVEL_HIGH>;
gpio-controller;
#gpio-cells = <2>;
interrupt-controller;
@@ -626,7 +626,7 @@
gpio2: gpio@2310000 {
compatible = "fsl,ls1043a-gpio", "fsl,qoriq-gpio";
reg = <0x0 0x2310000 0x0 0x10000>;
- interrupts = <0 67 0x4>;
+ interrupts = <GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>;
gpio-controller;
#gpio-cells = <2>;
interrupt-controller;
@@ -636,7 +636,7 @@
gpio3: gpio@2320000 {
compatible = "fsl,ls1043a-gpio", "fsl,qoriq-gpio";
reg = <0x0 0x2320000 0x0 0x10000>;
- interrupts = <0 68 0x4>;
+ interrupts = <GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>;
gpio-controller;
#gpio-cells = <2>;
interrupt-controller;
@@ -646,7 +646,7 @@
gpio4: gpio@2330000 {
compatible = "fsl,ls1043a-gpio", "fsl,qoriq-gpio";
reg = <0x0 0x2330000 0x0 0x10000>;
- interrupts = <0 134 0x4>;
+ interrupts = <GIC_SPI 134 IRQ_TYPE_LEVEL_HIGH>;
gpio-controller;
#gpio-cells = <2>;
interrupt-controller;
@@ -721,7 +721,7 @@
lpuart0: serial@2950000 {
compatible = "fsl,ls1021a-lpuart";
reg = <0x0 0x2950000 0x0 0x1000>;
- interrupts = <0 48 0x4>;
+ interrupts = <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clockgen QORIQ_CLK_SYSCLK 0>;
clock-names = "ipg";
status = "disabled";
@@ -730,7 +730,7 @@
lpuart1: serial@2960000 {
compatible = "fsl,ls1021a-lpuart";
reg = <0x0 0x2960000 0x0 0x1000>;
- interrupts = <0 49 0x4>;
+ interrupts = <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
QORIQ_CLK_PLL_DIV(1)>;
clock-names = "ipg";
@@ -740,7 +740,7 @@
lpuart2: serial@2970000 {
compatible = "fsl,ls1021a-lpuart";
reg = <0x0 0x2970000 0x0 0x1000>;
- interrupts = <0 50 0x4>;
+ interrupts = <GIC_SPI 50 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
QORIQ_CLK_PLL_DIV(1)>;
clock-names = "ipg";
@@ -750,7 +750,7 @@
lpuart3: serial@2980000 {
compatible = "fsl,ls1021a-lpuart";
reg = <0x0 0x2980000 0x0 0x1000>;
- interrupts = <0 51 0x4>;
+ interrupts = <GIC_SPI 51 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
QORIQ_CLK_PLL_DIV(1)>;
clock-names = "ipg";
@@ -760,7 +760,7 @@
lpuart4: serial@2990000 {
compatible = "fsl,ls1021a-lpuart";
reg = <0x0 0x2990000 0x0 0x1000>;
- interrupts = <0 52 0x4>;
+ interrupts = <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
QORIQ_CLK_PLL_DIV(1)>;
clock-names = "ipg";
@@ -770,7 +770,7 @@
lpuart5: serial@29a0000 {
compatible = "fsl,ls1021a-lpuart";
reg = <0x0 0x29a0000 0x0 0x1000>;
- interrupts = <0 53 0x4>;
+ interrupts = <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
QORIQ_CLK_PLL_DIV(1)>;
clock-names = "ipg";
@@ -780,10 +780,9 @@
wdog0: watchdog@2ad0000 {
compatible = "fsl,ls1043a-wdt", "fsl,imx21-wdt";
reg = <0x0 0x2ad0000 0x0 0x10000>;
- interrupts = <0 83 0x4>;
+ interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
QORIQ_CLK_PLL_DIV(1)>;
- clock-names = "wdog";
big-endian;
};
@@ -793,8 +792,8 @@
reg = <0x0 0x2c00000 0x0 0x10000>,
<0x0 0x2c10000 0x0 0x10000>,
<0x0 0x2c20000 0x0 0x10000>;
- interrupts = <0 103 0x4>,
- <0 103 0x4>;
+ interrupts = <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "edma-tx", "edma-err";
dma-channels = <32>;
big-endian;
@@ -805,7 +804,7 @@
QORIQ_CLK_PLL_DIV(1)>;
};
- aux_bus: aux_bus {
+ aux_bus: aux-bus {
#address-cells = <2>;
#size-cells = <2>;
compatible = "simple-bus";
@@ -815,7 +814,7 @@
usb0: usb@2f00000 {
compatible = "snps,dwc3";
reg = <0x0 0x2f00000 0x0 0x10000>;
- interrupts = <0 60 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 60 IRQ_TYPE_LEVEL_HIGH>;
dr_mode = "host";
snps,quirk-frame-length-adjustment = <0x20>;
snps,dis_rxdet_inp3_quirk;
@@ -827,7 +826,7 @@
usb1: usb@3000000 {
compatible = "snps,dwc3";
reg = <0x0 0x3000000 0x0 0x10000>;
- interrupts = <0 61 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 61 IRQ_TYPE_LEVEL_HIGH>;
dr_mode = "host";
snps,quirk-frame-length-adjustment = <0x20>;
snps,dis_rxdet_inp3_quirk;
@@ -839,7 +838,7 @@
usb2: usb@3100000 {
compatible = "snps,dwc3";
reg = <0x0 0x3100000 0x0 0x10000>;
- interrupts = <0 63 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 63 IRQ_TYPE_LEVEL_HIGH>;
dr_mode = "host";
snps,quirk-frame-length-adjustment = <0x20>;
snps,dis_rxdet_inp3_quirk;
@@ -853,7 +852,7 @@
reg = <0x0 0x3200000 0x0 0x10000>,
<0x0 0x20140520 0x0 0x4>;
reg-names = "ahci", "sata-ecc";
- interrupts = <0 69 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
QORIQ_CLK_PLL_DIV(1)>;
dma-coherent;
@@ -864,21 +863,21 @@
compatible = "fsl,ls1043a-msi";
reg = <0x0 0x1571000 0x0 0x8>;
msi-controller;
- interrupts = <0 116 0x4>;
+ interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>;
};
msi2: msi-controller2@1572000 {
compatible = "fsl,ls1043a-msi";
reg = <0x0 0x1572000 0x0 0x8>;
msi-controller;
- interrupts = <0 126 0x4>;
+ interrupts = <GIC_SPI 126 IRQ_TYPE_LEVEL_HIGH>;
};
msi3: msi-controller3@1573000 {
compatible = "fsl,ls1043a-msi";
reg = <0x0 0x1573000 0x0 0x8>;
msi-controller;
- interrupts = <0 160 0x4>;
+ interrupts = <GIC_SPI 160 IRQ_TYPE_LEVEL_HIGH>;
};
pcie1: pcie@3400000 {
@@ -886,8 +885,8 @@
reg = <0x00 0x03400000 0x0 0x00100000>, /* controller registers */
<0x40 0x00000000 0x0 0x00002000>; /* configuration space */
reg-names = "regs", "config";
- interrupts = <0 117 IRQ_TYPE_LEVEL_HIGH>,
- <0 118 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "pme", "aer";
#address-cells = <3>;
#size-cells = <2>;
@@ -913,8 +912,8 @@
reg = <0x00 0x03500000 0x0 0x00100000>, /* controller registers */
<0x48 0x00000000 0x0 0x00002000>; /* configuration space */
reg-names = "regs", "config";
- interrupts = <0 127 IRQ_TYPE_LEVEL_HIGH>,
- <0 128 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 127 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 128 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "pme", "aer";
#address-cells = <3>;
#size-cells = <2>;
@@ -940,8 +939,8 @@
reg = <0x00 0x03600000 0x0 0x00100000>, /* controller registers */
<0x50 0x00000000 0x0 0x00002000>; /* configuration space */
reg-names = "regs", "config";
- interrupts = <0 161 IRQ_TYPE_LEVEL_HIGH>,
- <0 162 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 161 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 162 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "pme", "aer";
#address-cells = <3>;
#size-cells = <2>;
@@ -974,6 +973,7 @@
<GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "qdma-error", "qdma-queue0",
"qdma-queue1", "qdma-queue2", "qdma-queue3";
+ #dma-cells = <1>;
dma-channels = <8>;
block-number = <1>;
block-offset = <0x10000>;
@@ -989,7 +989,7 @@
#fsl,rcpm-wakeup-cells = <1>;
};
- ftm_alarm0: timer@29d0000 {
+ ftm_alarm0: rtc@29d0000 {
compatible = "fsl,ls1043a-ftm-alarm";
reg = <0x0 0x29d0000 0x0 0x10000>;
fsl,rcpm-wakeup = <&rcpm 0x20000>;
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1046a-qds.dts b/arch/arm64/boot/dts/freescale/fsl-ls1046a-qds.dts
index 3b0ed9305f2b..e5296e51f656 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1046a-qds.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a-qds.dts
@@ -151,7 +151,7 @@
0x2 0x0 0x0 0x7fb00000 0x00000100>;
status = "okay";
- nor@0,0 {
+ flash@0,0 {
compatible = "cfi-flash";
reg = <0x0 0x0 0x8000000>;
big-endian;
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
index 754a64be739c..200e52622f99 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
@@ -122,7 +122,7 @@
};
thermal-zones {
- ddr-controller {
+ ddr-thermal {
polling-delay-passive = <1000>;
polling-delay = <5000>;
thermal-sensors = <&tmu 0>;
@@ -142,7 +142,7 @@
};
};
- serdes {
+ serdes-thermal {
polling-delay-passive = <1000>;
polling-delay = <5000>;
thermal-sensors = <&tmu 1>;
@@ -162,7 +162,7 @@
};
};
- fman {
+ fman-thermal {
polling-delay-passive = <1000>;
polling-delay = <5000>;
thermal-sensors = <&tmu 2>;
@@ -182,7 +182,7 @@
};
};
- core-cluster {
+ core-cluster-thermal {
polling-delay-passive = <1000>;
polling-delay = <5000>;
thermal-sensors = <&tmu 3>;
@@ -213,7 +213,7 @@
};
};
- sec {
+ sec-thermal {
polling-delay-passive = <1000>;
polling-delay = <5000>;
thermal-sensors = <&tmu 4>;
@@ -308,7 +308,7 @@
status = "disabled";
};
- esdhc: esdhc@1560000 {
+ esdhc: mmc@1560000 {
compatible = "fsl,ls1046a-esdhc", "fsl,esdhc";
reg = <0x0 0x1560000 0x0 0x10000>;
interrupts = <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>;
@@ -409,11 +409,11 @@
};
- qportals: qman-portals@500000000 {
+ qportals: qman-portals-bus@500000000 {
ranges = <0x0 0x5 0x00000000 0x8000000>;
};
- bportals: bman-portals@508000000 {
+ bportals: bman-portals-bus@508000000 {
ranges = <0x0 0x5 0x08000000 0x8000000>;
};
@@ -441,7 +441,7 @@
tmu: tmu@1f00000 {
compatible = "fsl,qoriq-tmu";
reg = <0x0 0x1f00000 0x0 0x10000>;
- interrupts = <0 33 0x4>;
+ interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
fsl,tmu-range = <0xb0000 0x9002a 0x6004c 0x70062>;
fsl,tmu-calibration =
/* Calibration data group 1 */
@@ -589,7 +589,7 @@
};
gpio0: gpio@2300000 {
- compatible = "fsl,qoriq-gpio";
+ compatible = "fsl,ls1046a-gpio", "fsl,qoriq-gpio";
reg = <0x0 0x2300000 0x0 0x10000>;
interrupts = <GIC_SPI 66 IRQ_TYPE_LEVEL_HIGH>;
gpio-controller;
@@ -599,7 +599,7 @@
};
gpio1: gpio@2310000 {
- compatible = "fsl,qoriq-gpio";
+ compatible = "fsl,ls1046a-gpio", "fsl,qoriq-gpio";
reg = <0x0 0x2310000 0x0 0x10000>;
interrupts = <GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>;
gpio-controller;
@@ -609,7 +609,7 @@
};
gpio2: gpio@2320000 {
- compatible = "fsl,qoriq-gpio";
+ compatible = "fsl,ls1046a-gpio", "fsl,qoriq-gpio";
reg = <0x0 0x2320000 0x0 0x10000>;
interrupts = <GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>;
gpio-controller;
@@ -619,7 +619,7 @@
};
gpio3: gpio@2330000 {
- compatible = "fsl,qoriq-gpio";
+ compatible = "fsl,ls1046a-gpio", "fsl,qoriq-gpio";
reg = <0x0 0x2330000 0x0 0x10000>;
interrupts = <GIC_SPI 134 IRQ_TYPE_LEVEL_HIGH>;
gpio-controller;
@@ -715,7 +715,7 @@
QORIQ_CLK_PLL_DIV(2)>;
};
- aux_bus: aux_bus {
+ aux_bus: aux-bus {
#address-cells = <2>;
#size-cells = <2>;
compatible = "simple-bus";
@@ -801,9 +801,9 @@
reg = <0x00 0x03400000 0x0 0x00100000>, /* controller registers */
<0x40 0x00000000 0x0 0x00002000>; /* configuration space */
reg-names = "regs", "config";
- interrupts = <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>, /* controller interrupt */
- <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>; /* PME interrupt */
- interrupt-names = "aer", "pme";
+ interrupts = <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>, /* PME interrupt */
+ <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>; /* controller interrupt */
+ interrupt-names = "pme", "aer";
#address-cells = <3>;
#size-cells = <2>;
device_type = "pci";
@@ -840,9 +840,9 @@
reg = <0x00 0x03500000 0x0 0x00100000>, /* controller registers */
<0x48 0x00000000 0x0 0x00002000>; /* configuration space */
reg-names = "regs", "config";
- interrupts = <GIC_SPI 128 IRQ_TYPE_LEVEL_HIGH>, /* controller interrupt */
- <GIC_SPI 127 IRQ_TYPE_LEVEL_HIGH>; /* PME interrupt */
- interrupt-names = "aer", "pme";
+ interrupts = <GIC_SPI 127 IRQ_TYPE_LEVEL_HIGH>, /* PME interrupt */
+ <GIC_SPI 128 IRQ_TYPE_LEVEL_HIGH>; /* controller interrupt */
+ interrupt-names = "pme", "aer";
#address-cells = <3>;
#size-cells = <2>;
device_type = "pci";
@@ -879,9 +879,9 @@
reg = <0x00 0x03600000 0x0 0x00100000>, /* controller registers */
<0x50 0x00000000 0x0 0x00002000>; /* configuration space */
reg-names = "regs", "config";
- interrupts = <GIC_SPI 162 IRQ_TYPE_LEVEL_HIGH>, /* controller interrupt */
- <GIC_SPI 161 IRQ_TYPE_LEVEL_HIGH>; /* PME interrupt */
- interrupt-names = "aer", "pme";
+ interrupts = <GIC_SPI 161 IRQ_TYPE_LEVEL_HIGH>, /* PME interrupt */
+ <GIC_SPI 162 IRQ_TYPE_LEVEL_HIGH>; /* controller interrupt */
+ interrupt-names = "pme", "aer";
#address-cells = <3>;
#size-cells = <2>;
device_type = "pci";
@@ -925,6 +925,7 @@
<GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "qdma-error", "qdma-queue0",
"qdma-queue1", "qdma-queue2", "qdma-queue3";
+ #dma-cells = <1>;
dma-channels = <8>;
block-number = <1>;
block-offset = <0x10000>;
@@ -940,7 +941,7 @@
#fsl,rcpm-wakeup-cells = <1>;
};
- ftm_alarm0: timer@29d0000 {
+ ftm_alarm0: rtc@29d0000 {
compatible = "fsl,ls1046a-ftm-alarm";
reg = <0x0 0x29d0000 0x0 0x10000>;
fsl,rcpm-wakeup = <&rcpm 0x20000>;
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1088a-qds.dts b/arch/arm64/boot/dts/freescale/fsl-ls1088a-qds.dts
index aa52ff73ff9e..d238a8440a81 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1088a-qds.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1088a-qds.dts
@@ -113,7 +113,7 @@
3 0 0x5 0x20000000 0x00010000>;
status = "okay";
- nor@0,0 {
+ flash@0,0 {
compatible = "cfi-flash";
reg = <0x0 0x0 0x8000000>;
bank-width = <2>;
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi
index 604bf88d70b3..8ce4b6aae79d 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi
@@ -118,7 +118,7 @@
<0x0 0x0c0c0000 0 0x2000>, /* GICC */
<0x0 0x0c0d0000 0 0x1000>, /* GICH */
<0x0 0x0c0e0000 0 0x20000>; /* GICV */
- interrupts = <1 9 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <2>;
#size-cells = <2>;
ranges;
@@ -131,7 +131,7 @@
};
thermal-zones {
- core-cluster {
+ core-cluster-thermal {
polling-delay-passive = <1000>;
polling-delay = <5000>;
thermal-sensors = <&tmu 0>;
@@ -166,7 +166,7 @@
};
};
- soc {
+ soc-thermal {
polling-delay-passive = <1000>;
polling-delay = <5000>;
thermal-sensors = <&tmu 1>;
@@ -183,10 +183,10 @@
timer {
compatible = "arm,armv8-timer";
- interrupts = <1 13 IRQ_TYPE_LEVEL_LOW>,/* Physical Secure PPI */
- <1 14 IRQ_TYPE_LEVEL_LOW>,/* Physical Non-Secure PPI */
- <1 11 IRQ_TYPE_LEVEL_LOW>,/* Virtual PPI */
- <1 10 IRQ_TYPE_LEVEL_LOW>;/* Hypervisor PPI */
+ interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_LOW>,/* Physical Secure PPI */
+ <GIC_PPI 14 IRQ_TYPE_LEVEL_LOW>,/* Physical Non-Secure PPI */
+ <GIC_PPI 11 IRQ_TYPE_LEVEL_LOW>,/* Virtual PPI */
+ <GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>;/* Hypervisor PPI */
};
pmu {
@@ -280,7 +280,7 @@
tmu: tmu@1f80000 {
compatible = "fsl,qoriq-tmu";
reg = <0x0 0x1f80000 0x0 0x10000>;
- interrupts = <0 23 0x4>;
+ interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>;
fsl,tmu-range = <0xb0000 0x9002a 0x6004c 0x70062>;
fsl,tmu-calibration =
/* Calibration data group 1 */
@@ -347,7 +347,7 @@
reg = <0x0 0x21c0500 0x0 0x100>;
clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
QORIQ_CLK_PLL_DIV(4)>;
- interrupts = <0 32 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
};
@@ -356,14 +356,14 @@
reg = <0x0 0x21c0600 0x0 0x100>;
clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
QORIQ_CLK_PLL_DIV(4)>;
- interrupts = <0 32 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
};
gpio0: gpio@2300000 {
compatible = "fsl,ls1088a-gpio", "fsl,qoriq-gpio";
reg = <0x0 0x2300000 0x0 0x10000>;
- interrupts = <0 36 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>;
little-endian;
gpio-controller;
#gpio-cells = <2>;
@@ -374,7 +374,7 @@
gpio1: gpio@2310000 {
compatible = "fsl,ls1088a-gpio", "fsl,qoriq-gpio";
reg = <0x0 0x2310000 0x0 0x10000>;
- interrupts = <0 36 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>;
little-endian;
gpio-controller;
#gpio-cells = <2>;
@@ -385,7 +385,7 @@
gpio2: gpio@2320000 {
compatible = "fsl,ls1088a-gpio", "fsl,qoriq-gpio";
reg = <0x0 0x2320000 0x0 0x10000>;
- interrupts = <0 37 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
little-endian;
gpio-controller;
#gpio-cells = <2>;
@@ -396,7 +396,7 @@
gpio3: gpio@2330000 {
compatible = "fsl,ls1088a-gpio", "fsl,qoriq-gpio";
reg = <0x0 0x2330000 0x0 0x10000>;
- interrupts = <0 37 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
little-endian;
gpio-controller;
#gpio-cells = <2>;
@@ -407,7 +407,7 @@
ifc: memory-controller@2240000 {
compatible = "fsl,ifc";
reg = <0x0 0x2240000 0x0 0x20000>;
- interrupts = <0 21 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>;
little-endian;
#address-cells = <2>;
#size-cells = <1>;
@@ -419,7 +419,7 @@
#address-cells = <1>;
#size-cells = <0>;
reg = <0x0 0x2000000 0x0 0x10000>;
- interrupts = <0 34 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
QORIQ_CLK_PLL_DIV(8)>;
status = "disabled";
@@ -430,7 +430,7 @@
#address-cells = <1>;
#size-cells = <0>;
reg = <0x0 0x2010000 0x0 0x10000>;
- interrupts = <0 34 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
QORIQ_CLK_PLL_DIV(8)>;
status = "disabled";
@@ -441,7 +441,7 @@
#address-cells = <1>;
#size-cells = <0>;
reg = <0x0 0x2020000 0x0 0x10000>;
- interrupts = <0 35 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
QORIQ_CLK_PLL_DIV(8)>;
status = "disabled";
@@ -452,7 +452,7 @@
#address-cells = <1>;
#size-cells = <0>;
reg = <0x0 0x2030000 0x0 0x10000>;
- interrupts = <0 35 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
QORIQ_CLK_PLL_DIV(8)>;
status = "disabled";
@@ -474,10 +474,10 @@
status = "disabled";
};
- esdhc: esdhc@2140000 {
+ esdhc: mmc@2140000 {
compatible = "fsl,ls1088a-esdhc", "fsl,esdhc";
reg = <0x0 0x2140000 0x0 0x10000>;
- interrupts = <0 28 0x4>; /* Level high type */
+ interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <0>;
clocks = <&clockgen QORIQ_CLK_HWACCEL 1>;
voltage-ranges = <1800 1800 3300 3300>;
@@ -490,7 +490,7 @@
usb0: usb@3100000 {
compatible = "snps,dwc3";
reg = <0x0 0x3100000 0x0 0x10000>;
- interrupts = <0 80 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 80 IRQ_TYPE_LEVEL_HIGH>;
dr_mode = "host";
snps,quirk-frame-length-adjustment = <0x20>;
snps,dis_rxdet_inp3_quirk;
@@ -501,7 +501,7 @@
usb1: usb@3110000 {
compatible = "snps,dwc3";
reg = <0x0 0x3110000 0x0 0x10000>;
- interrupts = <0 81 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 81 IRQ_TYPE_LEVEL_HIGH>;
dr_mode = "host";
snps,quirk-frame-length-adjustment = <0x20>;
snps,dis_rxdet_inp3_quirk;
@@ -514,7 +514,7 @@
reg = <0x0 0x3200000 0x0 0x10000>,
<0x7 0x100520 0x0 0x4>;
reg-names = "ahci", "sata-ecc";
- interrupts = <0 133 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
QORIQ_CLK_PLL_DIV(4)>;
dma-coherent;
@@ -565,7 +565,7 @@
reg = <0x00 0x03400000 0x0 0x00100000>, /* controller registers */
<0x20 0x00000000 0x0 0x00002000>; /* configuration space */
reg-names = "regs", "config";
- interrupts = <0 108 IRQ_TYPE_LEVEL_HIGH>; /* aer interrupt */
+ interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>; /* aer interrupt */
interrupt-names = "aer";
#address-cells = <3>;
#size-cells = <2>;
@@ -604,7 +604,7 @@
reg = <0x00 0x03500000 0x0 0x00100000>, /* controller registers */
<0x28 0x00000000 0x0 0x00002000>; /* configuration space */
reg-names = "regs", "config";
- interrupts = <0 113 IRQ_TYPE_LEVEL_HIGH>; /* aer interrupt */
+ interrupts = <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>; /* aer interrupt */
interrupt-names = "aer";
#address-cells = <3>;
#size-cells = <2>;
@@ -642,7 +642,7 @@
reg = <0x00 0x03600000 0x0 0x00100000>, /* controller registers */
<0x30 0x00000000 0x0 0x00002000>; /* configuration space */
reg-names = "regs", "config";
- interrupts = <0 118 IRQ_TYPE_LEVEL_HIGH>; /* aer interrupt */
+ interrupts = <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>; /* aer interrupt */
interrupt-names = "aer";
#address-cells = <3>;
#size-cells = <2>;
@@ -880,7 +880,7 @@
};
};
- cluster1_core0_watchdog: wdt@c000000 {
+ cluster1_core0_watchdog: watchdog@c000000 {
compatible = "arm,sp805", "arm,primecell";
reg = <0x0 0xc000000 0x0 0x1000>;
clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
@@ -890,7 +890,7 @@
clock-names = "wdog_clk", "apb_pclk";
};
- cluster1_core1_watchdog: wdt@c010000 {
+ cluster1_core1_watchdog: watchdog@c010000 {
compatible = "arm,sp805", "arm,primecell";
reg = <0x0 0xc010000 0x0 0x1000>;
clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
@@ -900,7 +900,7 @@
clock-names = "wdog_clk", "apb_pclk";
};
- cluster1_core2_watchdog: wdt@c020000 {
+ cluster1_core2_watchdog: watchdog@c020000 {
compatible = "arm,sp805", "arm,primecell";
reg = <0x0 0xc020000 0x0 0x1000>;
clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
@@ -910,7 +910,7 @@
clock-names = "wdog_clk", "apb_pclk";
};
- cluster1_core3_watchdog: wdt@c030000 {
+ cluster1_core3_watchdog: watchdog@c030000 {
compatible = "arm,sp805", "arm,primecell";
reg = <0x0 0xc030000 0x0 0x1000>;
clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
@@ -920,7 +920,7 @@
clock-names = "wdog_clk", "apb_pclk";
};
- cluster2_core0_watchdog: wdt@c100000 {
+ cluster2_core0_watchdog: watchdog@c100000 {
compatible = "arm,sp805", "arm,primecell";
reg = <0x0 0xc100000 0x0 0x1000>;
clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
@@ -930,7 +930,7 @@
clock-names = "wdog_clk", "apb_pclk";
};
- cluster2_core1_watchdog: wdt@c110000 {
+ cluster2_core1_watchdog: watchdog@c110000 {
compatible = "arm,sp805", "arm,primecell";
reg = <0x0 0xc110000 0x0 0x1000>;
clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
@@ -940,7 +940,7 @@
clock-names = "wdog_clk", "apb_pclk";
};
- cluster2_core2_watchdog: wdt@c120000 {
+ cluster2_core2_watchdog: watchdog@c120000 {
compatible = "arm,sp805", "arm,primecell";
reg = <0x0 0xc120000 0x0 0x1000>;
clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
@@ -950,7 +950,7 @@
clock-names = "wdog_clk", "apb_pclk";
};
- cluster2_core3_watchdog: wdt@c130000 {
+ cluster2_core3_watchdog: watchdog@c130000 {
compatible = "arm,sp805", "arm,primecell";
reg = <0x0 0xc130000 0x0 0x1000>;
clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
@@ -1040,7 +1040,7 @@
little-endian;
};
- ftm_alarm0: timer@2800000 {
+ ftm_alarm0: rtc@2800000 {
compatible = "fsl,ls1088a-ftm-alarm";
reg = <0x0 0x2800000 0x0 0x10000>;
fsl,rcpm-wakeup = <&rcpm 0x0 0x0 0x0 0x0 0x4000 0x0>;
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi
index 8352197cea6f..e9bc1f4fa13c 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi
@@ -15,7 +15,7 @@
/ {
pmu {
compatible = "arm,cortex-a57-pmu";
- interrupts = <1 7 0x8>; /* PMU PPI, Level low type */
+ interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_LOW>;
};
};
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls2088a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls2088a.dtsi
index 245bbd615c81..60c422560e33 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls2088a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls2088a.dtsi
@@ -15,7 +15,7 @@
/ {
pmu {
compatible = "arm,cortex-a72-pmu";
- interrupts = <1 7 0x8>; /* PMU PPI, Level low type */
+ interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_LOW>;
};
};
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls208xa-qds.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls208xa-qds.dtsi
index e2c94da6d6e8..9178cd61c786 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls208xa-qds.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls208xa-qds.dtsi
@@ -43,7 +43,7 @@
0x2 0x0 0x5 0x30000000 0x00010000
0x3 0x0 0x5 0x20000000 0x00010000>;
- nor@0,0 {
+ flash@0,0 {
#address-cells = <1>;
#size-cells = <1>;
compatible = "cfi-flash";
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls208xa-rdb.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls208xa-rdb.dtsi
index 537cecb13dd0..69cd05a30b85 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls208xa-rdb.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls208xa-rdb.dtsi
@@ -21,7 +21,7 @@
0x2 0x0 0x5 0x30000000 0x00010000
0x3 0x0 0x5 0x20000000 0x00010000>;
- nor@0,0 {
+ flash@0,0 {
#address-cells = <1>;
#size-cells = <1>;
compatible = "cfi-flash";
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi
index ccba0a135b24..bde89de2576e 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi
@@ -58,7 +58,7 @@
#size-cells = <2>;
ranges;
interrupt-controller;
- interrupts = <1 9 0x4>;
+ interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
its: msi-controller@6020000 {
compatible = "arm,gic-v3-its";
@@ -80,7 +80,7 @@
};
thermal-zones {
- ddr-controller1 {
+ ddr-ctrl1-thermal {
polling-delay-passive = <1000>;
polling-delay = <5000>;
thermal-sensors = <&tmu 1>;
@@ -94,7 +94,7 @@
};
};
- ddr-controller2 {
+ ddr-ctrl2-thermal {
polling-delay-passive = <1000>;
polling-delay = <5000>;
thermal-sensors = <&tmu 2>;
@@ -108,7 +108,7 @@
};
};
- ddr-controller3 {
+ ddr-ctrl3-thermal {
polling-delay-passive = <1000>;
polling-delay = <5000>;
thermal-sensors = <&tmu 3>;
@@ -122,7 +122,7 @@
};
};
- core-cluster1 {
+ core-cluster1-thermal {
polling-delay-passive = <1000>;
polling-delay = <5000>;
thermal-sensors = <&tmu 4>;
@@ -151,7 +151,7 @@
};
};
- core-cluster2 {
+ core-cluster2-thermal {
polling-delay-passive = <1000>;
polling-delay = <5000>;
thermal-sensors = <&tmu 5>;
@@ -180,7 +180,7 @@
};
};
- core-cluster3 {
+ core-cluster3-thermal {
polling-delay-passive = <1000>;
polling-delay = <5000>;
thermal-sensors = <&tmu 6>;
@@ -209,7 +209,7 @@
};
};
- core-cluster4 {
+ core-cluster4-thermal {
polling-delay-passive = <1000>;
polling-delay = <5000>;
thermal-sensors = <&tmu 7>;
@@ -241,10 +241,10 @@
timer: timer {
compatible = "arm,armv8-timer";
- interrupts = <1 13 4>, /* Physical Secure PPI, active-low */
- <1 14 4>, /* Physical Non-Secure PPI, active-low */
- <1 11 4>, /* Virtual PPI, active-low */
- <1 10 4>; /* Hypervisor PPI, active-low */
+ interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_HIGH>, /* Physical Secure PPI */
+ <GIC_PPI 14 IRQ_TYPE_LEVEL_HIGH>, /* Physical Non-Secure PPI */
+ <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>, /* Virtual PPI */
+ <GIC_PPI 10 IRQ_TYPE_LEVEL_HIGH>; /* Hypervisor PPI */
};
psci {
@@ -314,7 +314,7 @@
tmu: tmu@1f80000 {
compatible = "fsl,qoriq-tmu";
reg = <0x0 0x1f80000 0x0 0x10000>;
- interrupts = <0 23 0x4>;
+ interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>;
fsl,tmu-range = <0xb0000 0x9002a 0x6004c 0x30062>;
fsl,tmu-calibration =
<0x00000000 0x00000026>,
@@ -362,7 +362,7 @@
reg = <0x0 0x21c0500 0x0 0x100>;
clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
QORIQ_CLK_PLL_DIV(4)>;
- interrupts = <0 32 0x4>; /* Level high type */
+ interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
};
serial1: serial@21c0600 {
@@ -370,7 +370,7 @@
reg = <0x0 0x21c0600 0x0 0x100>;
clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
QORIQ_CLK_PLL_DIV(4)>;
- interrupts = <0 32 0x4>; /* Level high type */
+ interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
};
serial2: serial@21d0500 {
@@ -378,7 +378,7 @@
reg = <0x0 0x21d0500 0x0 0x100>;
clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
QORIQ_CLK_PLL_DIV(4)>;
- interrupts = <0 33 0x4>; /* Level high type */
+ interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
};
serial3: serial@21d0600 {
@@ -386,10 +386,10 @@
reg = <0x0 0x21d0600 0x0 0x100>;
clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
QORIQ_CLK_PLL_DIV(4)>;
- interrupts = <0 33 0x4>; /* Level high type */
+ interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
};
- cluster1_core0_watchdog: wdt@c000000 {
+ cluster1_core0_watchdog: watchdog@c000000 {
compatible = "arm,sp805", "arm,primecell";
reg = <0x0 0xc000000 0x0 0x1000>;
clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
@@ -399,7 +399,7 @@
clock-names = "wdog_clk", "apb_pclk";
};
- cluster1_core1_watchdog: wdt@c010000 {
+ cluster1_core1_watchdog: watchdog@c010000 {
compatible = "arm,sp805", "arm,primecell";
reg = <0x0 0xc010000 0x0 0x1000>;
clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
@@ -409,7 +409,7 @@
clock-names = "wdog_clk", "apb_pclk";
};
- cluster2_core0_watchdog: wdt@c100000 {
+ cluster2_core0_watchdog: watchdog@c100000 {
compatible = "arm,sp805", "arm,primecell";
reg = <0x0 0xc100000 0x0 0x1000>;
clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
@@ -419,7 +419,7 @@
clock-names = "wdog_clk", "apb_pclk";
};
- cluster2_core1_watchdog: wdt@c110000 {
+ cluster2_core1_watchdog: watchdog@c110000 {
compatible = "arm,sp805", "arm,primecell";
reg = <0x0 0xc110000 0x0 0x1000>;
clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
@@ -429,7 +429,7 @@
clock-names = "wdog_clk", "apb_pclk";
};
- cluster3_core0_watchdog: wdt@c200000 {
+ cluster3_core0_watchdog: watchdog@c200000 {
compatible = "arm,sp805", "arm,primecell";
reg = <0x0 0xc200000 0x0 0x1000>;
clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
@@ -439,7 +439,7 @@
clock-names = "wdog_clk", "apb_pclk";
};
- cluster3_core1_watchdog: wdt@c210000 {
+ cluster3_core1_watchdog: watchdog@c210000 {
compatible = "arm,sp805", "arm,primecell";
reg = <0x0 0xc210000 0x0 0x1000>;
clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
@@ -449,7 +449,7 @@
clock-names = "wdog_clk", "apb_pclk";
};
- cluster4_core0_watchdog: wdt@c300000 {
+ cluster4_core0_watchdog: watchdog@c300000 {
compatible = "arm,sp805", "arm,primecell";
reg = <0x0 0xc300000 0x0 0x1000>;
clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
@@ -459,7 +459,7 @@
clock-names = "wdog_clk", "apb_pclk";
};
- cluster4_core1_watchdog: wdt@c310000 {
+ cluster4_core1_watchdog: watchdog@c310000 {
compatible = "arm,sp805", "arm,primecell";
reg = <0x0 0xc310000 0x0 0x1000>;
clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
@@ -883,48 +883,48 @@
#iommu-cells = <1>;
stream-match-mask = <0x7C00>;
dma-coherent;
- interrupts = <0 13 4>, /* global secure fault */
- <0 14 4>, /* combined secure interrupt */
- <0 15 4>, /* global non-secure fault */
- <0 16 4>, /* combined non-secure interrupt */
+ interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>, /* global secure fault */
+ <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>, /* combined secure interrupt */
+ <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>, /* global non-secure fault */
+ <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>, /* combined non-secure interrupt */
/* performance counter interrupts 0-7 */
- <0 211 4>, <0 212 4>,
- <0 213 4>, <0 214 4>,
- <0 215 4>, <0 216 4>,
- <0 217 4>, <0 218 4>,
+ <GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 213 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 214 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 215 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 216 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 217 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH>,
/* per context interrupt, 64 interrupts */
- <0 146 4>, <0 147 4>,
- <0 148 4>, <0 149 4>,
- <0 150 4>, <0 151 4>,
- <0 152 4>, <0 153 4>,
- <0 154 4>, <0 155 4>,
- <0 156 4>, <0 157 4>,
- <0 158 4>, <0 159 4>,
- <0 160 4>, <0 161 4>,
- <0 162 4>, <0 163 4>,
- <0 164 4>, <0 165 4>,
- <0 166 4>, <0 167 4>,
- <0 168 4>, <0 169 4>,
- <0 170 4>, <0 171 4>,
- <0 172 4>, <0 173 4>,
- <0 174 4>, <0 175 4>,
- <0 176 4>, <0 177 4>,
- <0 178 4>, <0 179 4>,
- <0 180 4>, <0 181 4>,
- <0 182 4>, <0 183 4>,
- <0 184 4>, <0 185 4>,
- <0 186 4>, <0 187 4>,
- <0 188 4>, <0 189 4>,
- <0 190 4>, <0 191 4>,
- <0 192 4>, <0 193 4>,
- <0 194 4>, <0 195 4>,
- <0 196 4>, <0 197 4>,
- <0 198 4>, <0 199 4>,
- <0 200 4>, <0 201 4>,
- <0 202 4>, <0 203 4>,
- <0 204 4>, <0 205 4>,
- <0 206 4>, <0 207 4>,
- <0 208 4>, <0 209 4>;
+ <GIC_SPI 146 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 150 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 151 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 152 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 153 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 154 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 155 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 156 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 157 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 158 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 159 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 160 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 161 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 162 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 163 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 164 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 165 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 166 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 167 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 171 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 172 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 173 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 175 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 176 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 177 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 178 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 179 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 180 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 181 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 182 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 183 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 185 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 186 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 190 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 192 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 193 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 194 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 195 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 196 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 197 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 198 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 199 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 201 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 202 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 203 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 204 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 207 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 209 IRQ_TYPE_LEVEL_HIGH>;
};
dspi: spi@2100000 {
@@ -933,18 +933,18 @@
#address-cells = <1>;
#size-cells = <0>;
reg = <0x0 0x2100000 0x0 0x10000>;
- interrupts = <0 26 0x4>; /* Level high type */
+ interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
QORIQ_CLK_PLL_DIV(4)>;
clock-names = "dspi";
spi-num-chipselects = <5>;
};
- esdhc: esdhc@2140000 {
+ esdhc: mmc@2140000 {
status = "disabled";
compatible = "fsl,ls2080a-esdhc", "fsl,esdhc";
reg = <0x0 0x2140000 0x0 0x10000>;
- interrupts = <0 28 0x4>; /* Level high type */
+ interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
QORIQ_CLK_PLL_DIV(2)>;
voltage-ranges = <1800 1800 3300 3300>;
@@ -956,7 +956,7 @@
gpio0: gpio@2300000 {
compatible = "fsl,ls2080a-gpio", "fsl,qoriq-gpio";
reg = <0x0 0x2300000 0x0 0x10000>;
- interrupts = <0 36 0x4>; /* Level high type */
+ interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>;
gpio-controller;
little-endian;
#gpio-cells = <2>;
@@ -967,7 +967,7 @@
gpio1: gpio@2310000 {
compatible = "fsl,ls2080a-gpio", "fsl,qoriq-gpio";
reg = <0x0 0x2310000 0x0 0x10000>;
- interrupts = <0 36 0x4>; /* Level high type */
+ interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>;
gpio-controller;
little-endian;
#gpio-cells = <2>;
@@ -978,7 +978,7 @@
gpio2: gpio@2320000 {
compatible = "fsl,ls2080a-gpio", "fsl,qoriq-gpio";
reg = <0x0 0x2320000 0x0 0x10000>;
- interrupts = <0 37 0x4>; /* Level high type */
+ interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
gpio-controller;
little-endian;
#gpio-cells = <2>;
@@ -989,7 +989,7 @@
gpio3: gpio@2330000 {
compatible = "fsl,ls2080a-gpio", "fsl,qoriq-gpio";
reg = <0x0 0x2330000 0x0 0x10000>;
- interrupts = <0 37 0x4>; /* Level high type */
+ interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
gpio-controller;
little-endian;
#gpio-cells = <2>;
@@ -1003,8 +1003,8 @@
#address-cells = <1>;
#size-cells = <0>;
reg = <0x0 0x2000000 0x0 0x10000>;
- interrupts = <0 34 0x4>; /* Level high type */
- clock-names = "i2c";
+ interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
+ clock-names = "ipg";
clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
QORIQ_CLK_PLL_DIV(4)>;
};
@@ -1015,8 +1015,8 @@
#address-cells = <1>;
#size-cells = <0>;
reg = <0x0 0x2010000 0x0 0x10000>;
- interrupts = <0 34 0x4>; /* Level high type */
- clock-names = "i2c";
+ interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
+ clock-names = "ipg";
clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
QORIQ_CLK_PLL_DIV(4)>;
};
@@ -1027,8 +1027,8 @@
#address-cells = <1>;
#size-cells = <0>;
reg = <0x0 0x2020000 0x0 0x10000>;
- interrupts = <0 35 0x4>; /* Level high type */
- clock-names = "i2c";
+ interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
+ clock-names = "ipg";
clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
QORIQ_CLK_PLL_DIV(4)>;
};
@@ -1039,8 +1039,8 @@
#address-cells = <1>;
#size-cells = <0>;
reg = <0x0 0x2030000 0x0 0x10000>;
- interrupts = <0 35 0x4>; /* Level high type */
- clock-names = "i2c";
+ interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
+ clock-names = "ipg";
clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
QORIQ_CLK_PLL_DIV(4)>;
};
@@ -1048,7 +1048,7 @@
ifc: memory-controller@2240000 {
compatible = "fsl,ifc";
reg = <0x0 0x2240000 0x0 0x20000>;
- interrupts = <0 21 0x4>; /* Level high type */
+ interrupts = <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>;
little-endian;
#address-cells = <2>;
#size-cells = <1>;
@@ -1077,7 +1077,7 @@
pcie1: pcie@3400000 {
compatible = "fsl,ls2080a-pcie", "fsl,ls2085a-pcie";
reg-names = "regs", "config";
- interrupts = <0 108 0x4>; /* Level high type */
+ interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "intr";
#address-cells = <3>;
#size-cells = <2>;
@@ -1099,7 +1099,7 @@
pcie2: pcie@3500000 {
compatible = "fsl,ls2080a-pcie", "fsl,ls2085a-pcie";
reg-names = "regs", "config";
- interrupts = <0 113 0x4>; /* Level high type */
+ interrupts = <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "intr";
#address-cells = <3>;
#size-cells = <2>;
@@ -1121,7 +1121,7 @@
pcie3: pcie@3600000 {
compatible = "fsl,ls2080a-pcie", "fsl,ls2085a-pcie";
reg-names = "regs", "config";
- interrupts = <0 118 0x4>; /* Level high type */
+ interrupts = <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "intr";
#address-cells = <3>;
#size-cells = <2>;
@@ -1143,7 +1143,7 @@
pcie4: pcie@3700000 {
compatible = "fsl,ls2080a-pcie", "fsl,ls2085a-pcie";
reg-names = "regs", "config";
- interrupts = <0 123 0x4>; /* Level high type */
+ interrupts = <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "intr";
#address-cells = <3>;
#size-cells = <2>;
@@ -1166,7 +1166,7 @@
status = "disabled";
compatible = "fsl,ls2080a-ahci";
reg = <0x0 0x3200000 0x0 0x10000>;
- interrupts = <0 133 0x4>; /* Level high type */
+ interrupts = <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
QORIQ_CLK_PLL_DIV(4)>;
dma-coherent;
@@ -1176,7 +1176,7 @@
status = "disabled";
compatible = "fsl,ls2080a-ahci";
reg = <0x0 0x3210000 0x0 0x10000>;
- interrupts = <0 136 0x4>; /* Level high type */
+ interrupts = <GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
QORIQ_CLK_PLL_DIV(4)>;
dma-coherent;
@@ -1192,7 +1192,7 @@
usb0: usb@3100000 {
compatible = "snps,dwc3";
reg = <0x0 0x3100000 0x0 0x10000>;
- interrupts = <0 80 0x4>; /* Level high type */
+ interrupts = <GIC_SPI 80 IRQ_TYPE_LEVEL_HIGH>;
dr_mode = "host";
snps,quirk-frame-length-adjustment = <0x20>;
snps,dis_rxdet_inp3_quirk;
@@ -1203,7 +1203,7 @@
usb1: usb@3110000 {
compatible = "snps,dwc3";
reg = <0x0 0x3110000 0x0 0x10000>;
- interrupts = <0 81 0x4>; /* Level high type */
+ interrupts = <GIC_SPI 81 IRQ_TYPE_LEVEL_HIGH>;
dr_mode = "host";
snps,quirk-frame-length-adjustment = <0x20>;
snps,dis_rxdet_inp3_quirk;
@@ -1215,7 +1215,7 @@
ccn@4000000 {
compatible = "arm,ccn-504";
reg = <0x0 0x04000000 0x0 0x01000000>;
- interrupts = <0 12 4>;
+ interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>;
};
rcpm: power-controller@1e34040 {
@@ -1225,7 +1225,7 @@
little-endian;
};
- ftm_alarm0: timer@2800000 {
+ ftm_alarm0: rtc@2800000 {
compatible = "fsl,ls208xa-ftm-alarm";
reg = <0x0 0x2800000 0x0 0x10000>;
fsl,rcpm-wakeup = <&rcpm 0x0 0x0 0x0 0x0 0x4000 0x0>;
@@ -1236,14 +1236,14 @@
ddr1: memory-controller@1080000 {
compatible = "fsl,qoriq-memory-controller";
reg = <0x0 0x1080000 0x0 0x1000>;
- interrupts = <0 17 0x4>;
+ interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
little-endian;
};
ddr2: memory-controller@1090000 {
compatible = "fsl,qoriq-memory-controller";
reg = <0x0 0x1090000 0x0 0x1000>;
- interrupts = <0 18 0x4>;
+ interrupts = <GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>;
little-endian;
};
diff --git a/arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi b/arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi
index 96055593204a..26c7ca31e22e 100644
--- a/arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi
@@ -449,7 +449,7 @@
};
thermal-zones {
- cluster6-7 {
+ cluster6-7-thermal {
polling-delay-passive = <1000>;
polling-delay = <5000>;
thermal-sensors = <&tmu 0>;
@@ -492,7 +492,7 @@
};
};
- ddr-cluster5 {
+ ddr-cluster5-thermal {
polling-delay-passive = <1000>;
polling-delay = <5000>;
thermal-sensors = <&tmu 1>;
@@ -512,7 +512,7 @@
};
};
- wriop {
+ wriop-thermal {
polling-delay-passive = <1000>;
polling-delay = <5000>;
thermal-sensors = <&tmu 2>;
@@ -532,7 +532,7 @@
};
};
- dce-qbman-hsio2 {
+ dce-thermal {
polling-delay-passive = <1000>;
polling-delay = <5000>;
thermal-sensors = <&tmu 3>;
@@ -552,7 +552,7 @@
};
};
- ccn-dpaa-tbu {
+ ccn-thermal {
polling-delay-passive = <1000>;
polling-delay = <5000>;
thermal-sensors = <&tmu 4>;
@@ -572,7 +572,7 @@
};
};
- cluster4-hsio3 {
+ cluster4-thermal {
polling-delay-passive = <1000>;
polling-delay = <5000>;
thermal-sensors = <&tmu 5>;
@@ -592,7 +592,7 @@
};
};
- cluster2-3 {
+ cluster2-3-thermal {
polling-delay-passive = <1000>;
polling-delay = <5000>;
thermal-sensors = <&tmu 6>;
@@ -745,7 +745,7 @@
#size-cells = <0>;
reg = <0x0 0x2000000 0x0 0x10000>;
interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
- clock-names = "i2c";
+ clock-names = "ipg";
clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
QORIQ_CLK_PLL_DIV(16)>;
pinctrl-names = "default", "gpio";
@@ -761,7 +761,7 @@
#size-cells = <0>;
reg = <0x0 0x2010000 0x0 0x10000>;
interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
- clock-names = "i2c";
+ clock-names = "ipg";
clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
QORIQ_CLK_PLL_DIV(16)>;
pinctrl-names = "default", "gpio";
@@ -777,7 +777,7 @@
#size-cells = <0>;
reg = <0x0 0x2020000 0x0 0x10000>;
interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
- clock-names = "i2c";
+ clock-names = "ipg";
clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
QORIQ_CLK_PLL_DIV(16)>;
pinctrl-names = "default", "gpio";
@@ -793,7 +793,7 @@
#size-cells = <0>;
reg = <0x0 0x2030000 0x0 0x10000>;
interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
- clock-names = "i2c";
+ clock-names = "ipg";
clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
QORIQ_CLK_PLL_DIV(16)>;
pinctrl-names = "default", "gpio";
@@ -809,7 +809,7 @@
#size-cells = <0>;
reg = <0x0 0x2040000 0x0 0x10000>;
interrupts = <GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>;
- clock-names = "i2c";
+ clock-names = "ipg";
clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
QORIQ_CLK_PLL_DIV(16)>;
pinctrl-names = "default", "gpio";
@@ -825,7 +825,7 @@
#size-cells = <0>;
reg = <0x0 0x2050000 0x0 0x10000>;
interrupts = <GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>;
- clock-names = "i2c";
+ clock-names = "ipg";
clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
QORIQ_CLK_PLL_DIV(16)>;
pinctrl-names = "default", "gpio";
@@ -841,7 +841,7 @@
#size-cells = <0>;
reg = <0x0 0x2060000 0x0 0x10000>;
interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>;
- clock-names = "i2c";
+ clock-names = "ipg";
clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
QORIQ_CLK_PLL_DIV(16)>;
pinctrl-names = "default", "gpio";
@@ -857,7 +857,7 @@
#size-cells = <0>;
reg = <0x0 0x2070000 0x0 0x10000>;
interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>;
- clock-names = "i2c";
+ clock-names = "ipg";
clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
QORIQ_CLK_PLL_DIV(16)>;
pinctrl-names = "default", "gpio";
@@ -925,10 +925,10 @@
status = "disabled";
};
- esdhc0: esdhc@2140000 {
- compatible = "fsl,esdhc";
+ esdhc0: mmc@2140000 {
+ compatible = "fsl,ls2080a-esdhc", "fsl,esdhc";
reg = <0x0 0x2140000 0x0 0x10000>;
- interrupts = <0 28 0x4>; /* Level high type */
+ interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
QORIQ_CLK_PLL_DIV(2)>;
dma-coherent;
@@ -939,10 +939,10 @@
status = "disabled";
};
- esdhc1: esdhc@2150000 {
- compatible = "fsl,esdhc";
+ esdhc1: mmc@2150000 {
+ compatible = "fsl,ls2080a-esdhc", "fsl,esdhc";
reg = <0x0 0x2150000 0x0 0x10000>;
- interrupts = <0 63 0x4>; /* Level high type */
+ interrupts = <GIC_SPI 63 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
QORIQ_CLK_PLL_DIV(2)>;
dma-coherent;
@@ -1027,7 +1027,7 @@
};
gpio0: gpio@2300000 {
- compatible = "fsl,qoriq-gpio";
+ compatible = "fsl,ls2080a-gpio", "fsl,qoriq-gpio";
reg = <0x0 0x2300000 0x0 0x10000>;
interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>;
gpio-controller;
@@ -1038,7 +1038,7 @@
};
gpio1: gpio@2310000 {
- compatible = "fsl,qoriq-gpio";
+ compatible = "fsl,ls2080a-gpio", "fsl,qoriq-gpio";
reg = <0x0 0x2310000 0x0 0x10000>;
interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>;
gpio-controller;
@@ -1049,7 +1049,7 @@
};
gpio2: gpio@2320000 {
- compatible = "fsl,qoriq-gpio";
+ compatible = "fsl,ls2080a-gpio", "fsl,qoriq-gpio";
reg = <0x0 0x2320000 0x0 0x10000>;
interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
gpio-controller;
@@ -1060,7 +1060,7 @@
};
gpio3: gpio@2330000 {
- compatible = "fsl,qoriq-gpio";
+ compatible = "fsl,ls2080a-gpio", "fsl,qoriq-gpio";
reg = <0x0 0x2330000 0x0 0x10000>;
interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
gpio-controller;
@@ -1085,7 +1085,7 @@
little-endian;
};
- ftm_alarm0: timer@2800000 {
+ ftm_alarm0: rtc@2800000 {
compatible = "fsl,lx2160a-ftm-alarm";
reg = <0x0 0x2800000 0x0 0x10000>;
fsl,rcpm-wakeup = <&rcpm 0x0 0x0 0x0 0x0 0x4000 0x0 0x0>;
@@ -1702,8 +1702,8 @@
pinmux_i2crv: pinmux@70010012c {
compatible = "pinctrl-single";
reg = <0x00000007 0x0010012c 0x0 0xc>;
- #address-cells = <2>;
- #size-cells = <2>;
+ #address-cells = <1>;
+ #size-cells = <0>;
pinctrl-single,bit-per-mux;
pinctrl-single,register-width = <32>;
pinctrl-single,function-mask = <0x7>;
diff --git a/arch/arm64/boot/dts/freescale/imx8-ss-audio.dtsi b/arch/arm64/boot/dts/freescale/imx8-ss-audio.dtsi
index 897cbb7b6742..ff5df0fed9e9 100644
--- a/arch/arm64/boot/dts/freescale/imx8-ss-audio.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8-ss-audio.dtsi
@@ -447,7 +447,6 @@ audio_subsys: bus@59000000 {
<&lsio_mu13 2 1>,
<&lsio_mu13 3 0>,
<&lsio_mu13 3 1>;
- memory-region = <&dsp_reserved>;
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/freescale/imx8-ss-cm41.dtsi b/arch/arm64/boot/dts/freescale/imx8-ss-cm41.dtsi
new file mode 100644
index 000000000000..d715f2a6b037
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8-ss-cm41.dtsi
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2024 NXP
+ * Dong Aisheng <aisheng.dong@nxp.com>
+ */
+
+#include <dt-bindings/firmware/imx/rsrc.h>
+#include <dt-bindings/clock/imx8-lpcg.h>
+
+cm41_ipg_clk: clock-cm41-ipg {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <132000000>;
+ clock-output-names = "cm41_ipg_clk";
+};
+
+cm41_subsys: bus@38000000 {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x38000000 0x0 0x38000000 0x4000000>;
+ interrupt-parent = <&cm41_intmux>;
+
+ cm41_i2c: i2c@3b230000 {
+ compatible = "fsl,imx8qxp-lpi2c", "fsl,imx7ulp-lpi2c";
+ reg = <0x3b230000 0x1000>;
+ interrupts = <9 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cm41_i2c_lpcg IMX_LPCG_CLK_0>,
+ <&cm41_i2c_lpcg IMX_LPCG_CLK_4>;
+ clock-names = "per", "ipg";
+ assigned-clocks = <&clk IMX_SC_R_M4_1_I2C IMX_SC_PM_CLK_PER>;
+ assigned-clock-rates = <24000000>;
+ power-domains = <&pd IMX_SC_R_M4_1_I2C>;
+ status = "disabled";
+ };
+
+ cm41_intmux: intmux@3b400000 {
+ compatible = "fsl,imx-intmux";
+ reg = <0x3b400000 0x1000>;
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&cm41_ipg_clk>;
+ clock-names = "ipg";
+ power-domains = <&pd IMX_SC_R_M4_1_INTMUX>;
+ status = "disabled";
+ };
+
+ cm41_i2c_lpcg: clock-controller@3b630000 {
+ compatible = "fsl,imx8qxp-lpcg";
+ reg = <0x3b630000 0x1000>;
+ #clock-cells = <1>;
+ clocks = <&clk IMX_SC_R_M4_1_I2C IMX_SC_PM_CLK_PER>,
+ <&cm41_ipg_clk>;
+ clock-indices = <IMX_LPCG_CLK_0>, <IMX_LPCG_CLK_4>;
+ clock-output-names = "cm41_lpcg_i2c_clk",
+ "cm41_lpcg_i2c_ipg_clk";
+ power-domains = <&pd IMX_SC_R_M4_1_I2C>;
+ };
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8-ss-conn.dtsi b/arch/arm64/boot/dts/freescale/imx8-ss-conn.dtsi
index 4aaf5a0c1ed8..a4a10ce03bfe 100644
--- a/arch/arm64/boot/dts/freescale/imx8-ss-conn.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8-ss-conn.dtsi
@@ -28,6 +28,13 @@ conn_ipg_clk: clock-conn-ipg {
clock-output-names = "conn_ipg_clk";
};
+conn_bch_clk: clock-conn-bch {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <400000000>;
+ clock-output-names = "conn_bch_clk";
+};
+
conn_subsys: bus@5b000000 {
compatible = "simple-bus";
#address-cells = <1>;
@@ -302,4 +309,66 @@ conn_subsys: bus@5b000000 {
"usb3_aclk";
power-domains = <&pd IMX_SC_R_USB_2_PHY>;
};
+
+ rawnand_0_lpcg: clock-controller@5b290000 {
+ compatible = "fsl,imx8qxp-lpcg";
+ reg = <0x5b290000 0x4>;
+ #clock-cells = <1>;
+ clocks = <&clk IMX_SC_R_NAND IMX_SC_PM_CLK_PER>,
+ <&clk IMX_SC_R_NAND IMX_SC_PM_CLK_MST_BUS>,
+ <&conn_axi_clk>,
+ <&conn_axi_clk>;
+ clock-indices = <IMX_LPCG_CLK_0>, <IMX_LPCG_CLK_1>,
+ <IMX_LPCG_CLK_4>, <IMX_LPCG_CLK_5>;
+ clock-output-names = "gpmi_bch",
+ "gpmi_io",
+ "gpmi_apb",
+ "gpmi_bch_apb";
+ power-domains = <&pd IMX_SC_R_NAND>;
+ };
+
+ rawnand_4_lpcg: clock-controller@5b290004 {
+ compatible = "fsl,imx8qxp-lpcg";
+ reg = <0x5b290004 0x10000>;
+ #clock-cells = <1>;
+ clocks = <&conn_axi_clk>;
+ clock-indices = <IMX_LPCG_CLK_4>;
+ clock-output-names = "apbhdma_hclk";
+ power-domains = <&pd IMX_SC_R_NAND>;
+ };
+
+ dma_apbh: dma-controller@5b810000 {
+ compatible = "fsl,imx8qxp-dma-apbh", "fsl,imx28-dma-apbh";
+ reg = <0x5b810000 0x2000>;
+ interrupts = <GIC_SPI 274 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 274 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 274 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 274 IRQ_TYPE_LEVEL_HIGH>;
+ #dma-cells = <1>;
+ dma-channels = <4>;
+ clocks = <&rawnand_4_lpcg IMX_LPCG_CLK_0>;
+ power-domains = <&pd IMX_SC_R_NAND>;
+ };
+
+ gpmi: nand-controller@5b812000{
+ compatible = "fsl,imx8qxp-gpmi-nand";
+ reg = <0x5b812000 0x2000>, <0x5b814000 0x2000>;
+ reg-names = "gpmi-nand", "bch";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <GIC_SPI 272 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "bch";
+ clocks = <&rawnand_0_lpcg IMX_LPCG_CLK_1>,
+ <&rawnand_0_lpcg IMX_LPCG_CLK_4>,
+ <&rawnand_0_lpcg IMX_LPCG_CLK_0>,
+ <&rawnand_0_lpcg IMX_LPCG_CLK_5>;
+ clock-names = "gpmi_io", "gpmi_apb",
+ "gpmi_bch", "gpmi_bch_apb";
+ dmas = <&dma_apbh 0>;
+ dma-names = "rx-tx";
+ power-domains = <&pd IMX_SC_R_NAND>;
+ assigned-clocks = <&clk IMX_SC_R_NAND IMX_SC_PM_CLK_MST_BUS>;
+ assigned-clock-rates = <50000000>;
+ status = "disabled";
+ };
};
diff --git a/arch/arm64/boot/dts/freescale/imx8dxl-evk.dts b/arch/arm64/boot/dts/freescale/imx8dxl-evk.dts
index 2412ab145c06..1a74ac3ee4ee 100644
--- a/arch/arm64/boot/dts/freescale/imx8dxl-evk.dts
+++ b/arch/arm64/boot/dts/freescale/imx8dxl-evk.dts
@@ -24,6 +24,19 @@
stdout-path = &lpuart0;
};
+ imx8dxl-cm4 {
+ compatible = "fsl,imx8qxp-cm4";
+ clocks = <&clk_dummy>;
+ mbox-names = "tx", "rx", "rxdb";
+ mboxes = <&lsio_mu5 0 1 &lsio_mu5 1 1 &lsio_mu5 3 1>;
+ memory-region = <&vdevbuffer>, <&vdev0vring0>, <&vdev0vring1>,
+ <&vdev1vring0>, <&vdev1vring1>, <&rsc_table>;
+ power-domains = <&pd IMX_SC_R_M4_0_PID0>, <&pd IMX_SC_R_M4_0_MU_1A>;
+ fsl,resource-id = <IMX_SC_R_M4_0_PID0>;
+ fsl,entry-address = <0x34fe0000>;
+ };
+
+
memory@80000000 {
device_type = "memory";
reg = <0x00000000 0x80000000 0 0x40000000>;
@@ -51,6 +64,37 @@
alloc-ranges = <0 0x98000000 0 0x14000000>;
linux,cma-default;
};
+
+ vdev0vring0: memory0@90000000 {
+ reg = <0 0x90000000 0 0x8000>;
+ no-map;
+ };
+
+ vdev0vring1: memory@90008000 {
+ reg = <0 0x90008000 0 0x8000>;
+ no-map;
+ };
+
+ vdev1vring0: memory@90010000 {
+ reg = <0 0x90010000 0 0x8000>;
+ no-map;
+ };
+
+ vdev1vring1: memory@90018000 {
+ reg = <0 0x90018000 0 0x8000>;
+ no-map;
+ };
+
+ rsc_table: memory-rsc-table@900ff000 {
+ reg = <0 0x900ff000 0 0x1000>;
+ no-map;
+ };
+
+ vdevbuffer: memory-vdevbuffer@90400000 {
+ compatible = "shared-dma-pool";
+ reg = <0 0x90400000 0 0x100000>;
+ no-map;
+ };
};
m2_uart1_sel: regulator-m2uart1sel {
@@ -137,6 +181,76 @@
enable-active-high;
regulator-always-on;
};
+
+ bt_sco_codec: audio-codec-bt {
+ compatible = "linux,bt-sco";
+ #sound-dai-cells = <1>;
+ };
+
+ sound-bt-sco {
+ compatible = "simple-audio-card";
+ simple-audio-card,name = "bt-sco-audio";
+ simple-audio-card,format = "dsp_a";
+ simple-audio-card,bitclock-inversion;
+ simple-audio-card,frame-master = <&btcpu>;
+ simple-audio-card,bitclock-master = <&btcpu>;
+
+ btcpu: simple-audio-card,cpu {
+ sound-dai = <&sai0>;
+ dai-tdm-slot-num = <2>;
+ dai-tdm-slot-width = <16>;
+ };
+
+ simple-audio-card,codec {
+ sound-dai = <&bt_sco_codec 1>;
+ };
+ };
+
+ sound-wm8960-1 {
+ compatible = "fsl,imx-audio-wm8960";
+ model = "wm8960-audio";
+ audio-cpu = <&sai1>;
+ audio-codec = <&wm8960_1>;
+ audio-asrc = <&asrc0>;
+ audio-routing = "Headphone Jack", "HP_L",
+ "Headphone Jack", "HP_R",
+ "Ext Spk", "SPK_LP",
+ "Ext Spk", "SPK_LN",
+ "Ext Spk", "SPK_RP",
+ "Ext Spk", "SPK_RN",
+ "LINPUT1", "Mic Jack",
+ "Mic Jack", "MICB";
+ };
+
+ sound-wm8960-2 {
+ compatible = "fsl,imx-audio-wm8960";
+ model = "wm8960-audio-2";
+ audio-cpu = <&sai2>;
+ audio-codec = <&wm8960_2>;
+ audio-routing = "Headphone Jack", "HP_L",
+ "Headphone Jack", "HP_R",
+ "Ext Spk", "SPK_LP",
+ "Ext Spk", "SPK_LN",
+ "Ext Spk", "SPK_RP",
+ "Ext Spk", "SPK_RN",
+ "LINPUT1", "Mic Jack",
+ "Mic Jack", "MICB";
+ };
+
+ sound-wm8960-3 {
+ compatible = "fsl,imx-audio-wm8960";
+ model = "wm8960-audio-3";
+ audio-cpu = <&sai3>;
+ audio-codec = <&wm8960_3>;
+ audio-routing = "Headphone Jack", "HP_L",
+ "Headphone Jack", "HP_R",
+ "Ext Spk", "SPK_LP",
+ "Ext Spk", "SPK_LN",
+ "Ext Spk", "SPK_RP",
+ "Ext Spk", "SPK_RN",
+ "LINPUT1", "Mic Jack",
+ "Mic Jack", "MICB";
+ };
};
&adc0 {
@@ -144,6 +258,11 @@
status = "okay";
};
+&asrc0 {
+ fsl,asrc-rate = <48000>;
+ status = "okay";
+};
+
&eqos {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_eqos>;
@@ -271,6 +390,78 @@
};
};
+ i2c@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x1>;
+
+ wm8960_1: audio-codec@1a {
+ compatible = "wlf,wm8960";
+ reg = <0x1a>;
+ clocks = <&mclkout1_lpcg IMX_LPCG_CLK_0>;
+ clock-names = "mclk";
+ assigned-clocks = <&clk IMX_SC_R_AUDIO_PLL_0 IMX_SC_PM_CLK_PLL>,
+ <&clk IMX_SC_R_AUDIO_PLL_0 IMX_SC_PM_CLK_SLV_BUS>,
+ <&clk IMX_SC_R_AUDIO_PLL_0 IMX_SC_PM_CLK_MST_BUS>,
+ <&mclkout1_lpcg IMX_LPCG_CLK_0>;
+ assigned-clock-rates = <786432000>,
+ <49152000>,
+ <12288000>,
+ <12288000>;
+ wlf,shared-lrclk;
+ wlf,hp-cfg = <2 2 3>;
+ wlf,gpio-cfg = <1 3>;
+ };
+ };
+
+ i2c@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x2>;
+
+ wm8960_2: audio-codec@1a {
+ compatible = "wlf,wm8960";
+ reg = <0x1a>;
+ clocks = <&mclkout1_lpcg IMX_LPCG_CLK_0>;
+ clock-names = "mclk";
+ assigned-clocks = <&clk IMX_SC_R_AUDIO_PLL_0 IMX_SC_PM_CLK_PLL>,
+ <&clk IMX_SC_R_AUDIO_PLL_0 IMX_SC_PM_CLK_SLV_BUS>,
+ <&clk IMX_SC_R_AUDIO_PLL_0 IMX_SC_PM_CLK_MST_BUS>,
+ <&mclkout1_lpcg IMX_LPCG_CLK_0>;
+ assigned-clock-rates = <786432000>,
+ <49152000>,
+ <12288000>,
+ <12288000>;
+ wlf,shared-lrclk;
+ wlf,hp-cfg = <2 2 3>;
+ wlf,gpio-cfg = <1 3>;
+ };
+ };
+
+ i2c@3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x3>;
+
+ wm8960_3: audio-codec@1a {
+ compatible = "wlf,wm8960";
+ reg = <0x1a>;
+ clocks = <&mclkout1_lpcg IMX_LPCG_CLK_0>;
+ clock-names = "mclk";
+ assigned-clocks = <&clk IMX_SC_R_AUDIO_PLL_0 IMX_SC_PM_CLK_PLL>,
+ <&clk IMX_SC_R_AUDIO_PLL_0 IMX_SC_PM_CLK_SLV_BUS>,
+ <&clk IMX_SC_R_AUDIO_PLL_0 IMX_SC_PM_CLK_MST_BUS>,
+ <&mclkout1_lpcg IMX_LPCG_CLK_0>;
+ assigned-clock-rates = <786432000>,
+ <49152000>,
+ <12288000>,
+ <12288000>;
+ wlf,shared-lrclk;
+ wlf,hp-cfg = <2 2 3>;
+ wlf,gpio-cfg = <1 3>;
+ };
+ };
+
i2c@4 {
#address-cells = <1>;
#size-cells = <0>;
@@ -358,6 +549,10 @@
status = "okay";
};
+&lsio_mu5 {
+ status = "okay";
+};
+
&flexcan2 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_flexcan2>;
@@ -390,6 +585,53 @@
status = "okay";
};
+&sai0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sai0>;
+ #sound-dai-cells = <0>;
+ assigned-clocks = <&clk IMX_SC_R_AUDIO_PLL_0 IMX_SC_PM_CLK_PLL>,
+ <&clk IMX_SC_R_AUDIO_PLL_0 IMX_SC_PM_CLK_SLV_BUS>,
+ <&clk IMX_SC_R_AUDIO_PLL_0 IMX_SC_PM_CLK_MST_BUS>,
+ <&sai0_lpcg IMX_LPCG_CLK_0>;
+ assigned-clock-rates = <786432000>, <49152000>, <12288000>, <49152000>;
+ status = "okay";
+};
+
+&sai1 {
+ assigned-clocks = <&clk IMX_SC_R_AUDIO_PLL_0 IMX_SC_PM_CLK_PLL>,
+ <&clk IMX_SC_R_AUDIO_PLL_0 IMX_SC_PM_CLK_SLV_BUS>,
+ <&clk IMX_SC_R_AUDIO_PLL_0 IMX_SC_PM_CLK_MST_BUS>,
+ <&sai1_lpcg IMX_LPCG_CLK_0>;
+ assigned-clock-rates = <786432000>, <49152000>, <12288000>, <49152000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sai1>;
+ status = "okay";
+};
+
+&sai2 {
+ assigned-clocks = <&clk IMX_SC_R_AUDIO_PLL_0 IMX_SC_PM_CLK_PLL>,
+ <&clk IMX_SC_R_AUDIO_PLL_0 IMX_SC_PM_CLK_SLV_BUS>,
+ <&clk IMX_SC_R_AUDIO_PLL_0 IMX_SC_PM_CLK_MST_BUS>,
+ <&sai2_lpcg IMX_LPCG_CLK_0>;
+ assigned-clock-rates = <786432000>, <49152000>, <12288000>, <49152000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sai2>;
+ fsl,sai-asynchronous;
+ status = "okay";
+};
+
+&sai3 {
+ assigned-clocks = <&clk IMX_SC_R_AUDIO_PLL_0 IMX_SC_PM_CLK_PLL>,
+ <&clk IMX_SC_R_AUDIO_PLL_0 IMX_SC_PM_CLK_SLV_BUS>,
+ <&clk IMX_SC_R_AUDIO_PLL_0 IMX_SC_PM_CLK_MST_BUS>,
+ <&sai3_lpcg IMX_LPCG_CLK_0>;
+ assigned-clock-rates = <786432000>, <49152000>, <12288000>, <49152000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sai3>;
+ fsl,sai-asynchronous;
+ status = "okay";
+};
+
&thermal_zones {
pmic-thermal {
polling-delay-passive = <250>;
@@ -632,6 +874,41 @@
>;
};
+ pinctrl_sai0: sai0grp {
+ fsl,pins = <
+ IMX8DXL_SPI0_CS0_ADMA_SAI0_RXD 0x06000060
+ IMX8DXL_SPI0_CS1_ADMA_SAI0_RXC 0x06000040
+ IMX8DXL_SPI0_SCK_ADMA_SAI0_TXC 0x06000060
+ IMX8DXL_SPI0_SDI_ADMA_SAI0_TXD 0x06000060
+ IMX8DXL_SPI0_SDO_ADMA_SAI0_TXFS 0x06000040
+ >;
+ };
+
+ pinctrl_sai1: sai1grp {
+ fsl,pins = <
+ IMX8DXL_FLEXCAN0_RX_ADMA_SAI1_TXC 0x06000040
+ IMX8DXL_FLEXCAN0_TX_ADMA_SAI1_TXFS 0x06000040
+ IMX8DXL_FLEXCAN1_RX_ADMA_SAI1_TXD 0x06000060
+ IMX8DXL_FLEXCAN1_TX_ADMA_SAI1_RXD 0x06000060
+ >;
+ };
+
+ pinctrl_sai2: sai2grp {
+ fsl,pins = <
+ IMX8DXL_SNVS_TAMPER_OUT3_ADMA_SAI2_RXC 0x06000040
+ IMX8DXL_SNVS_TAMPER_IN0_ADMA_SAI2_RXFS 0x06000040
+ IMX8DXL_SNVS_TAMPER_OUT4_ADMA_SAI2_RXD 0x06000060
+ >;
+ };
+
+ pinctrl_sai3: sai3grp {
+ fsl,pins = <
+ IMX8DXL_SNVS_TAMPER_IN1_ADMA_SAI3_RXC 0x06000040
+ IMX8DXL_SNVS_TAMPER_IN3_ADMA_SAI3_RXFS 0x06000040
+ IMX8DXL_SNVS_TAMPER_IN2_ADMA_SAI3_RXD 0x06000060
+ >;
+ };
+
pinctrl_usdhc1: usdhc1grp {
fsl,pins = <
IMX8DXL_EMMC0_CLK_CONN_EMMC0_CLK 0x06000041
diff --git a/arch/arm64/boot/dts/freescale/imx8dxl-ss-adma.dtsi b/arch/arm64/boot/dts/freescale/imx8dxl-ss-adma.dtsi
index 5d012c95222f..72434529f78e 100644
--- a/arch/arm64/boot/dts/freescale/imx8dxl-ss-adma.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8dxl-ss-adma.dtsi
@@ -3,6 +3,63 @@
* Copyright 2019~2020, 2022 NXP
*/
+/delete-node/ &asrc1;
+/delete-node/ &asrc1_lpcg;
+/delete-node/ &adc1;
+/delete-node/ &adc1_lpcg;
+/delete-node/ &amix;
+/delete-node/ &amix_lpcg;
+/delete-node/ &edma1;
+/delete-node/ &esai0;
+/delete-node/ &esai0_lpcg;
+/delete-node/ &sai4;
+/delete-node/ &sai4_lpcg;
+/delete-node/ &sai5;
+/delete-node/ &sai5_lpcg;
+
+&acm {
+ compatible = "fsl,imx8dxl-acm";
+ power-domains = <&pd IMX_SC_R_AUDIO_CLK_0>,
+ <&pd IMX_SC_R_AUDIO_CLK_1>,
+ <&pd IMX_SC_R_MCLK_OUT_0>,
+ <&pd IMX_SC_R_MCLK_OUT_1>,
+ <&pd IMX_SC_R_AUDIO_PLL_0>,
+ <&pd IMX_SC_R_AUDIO_PLL_1>,
+ <&pd IMX_SC_R_ASRC_0>,
+ <&pd IMX_SC_R_SAI_0>,
+ <&pd IMX_SC_R_SAI_1>,
+ <&pd IMX_SC_R_SAI_2>,
+ <&pd IMX_SC_R_SAI_3>,
+ <&pd IMX_SC_R_SPDIF_0>,
+ <&pd IMX_SC_R_MQS_0>;
+ clocks = <&aud_rec0_lpcg IMX_LPCG_CLK_0>,
+ <&aud_rec1_lpcg IMX_LPCG_CLK_0>,
+ <&aud_pll_div0_lpcg IMX_LPCG_CLK_0>,
+ <&aud_pll_div1_lpcg IMX_LPCG_CLK_0>,
+ <&clk_ext_aud_mclk0>,
+ <&clk_ext_aud_mclk1>,
+ <&clk_spdif0_rx>,
+ <&clk_sai0_rx_bclk>,
+ <&clk_sai0_tx_bclk>,
+ <&clk_sai1_rx_bclk>,
+ <&clk_sai1_tx_bclk>,
+ <&clk_sai2_rx_bclk>,
+ <&clk_sai3_rx_bclk>;
+ clock-names = "aud_rec_clk0_lpcg_clk",
+ "aud_rec_clk1_lpcg_clk",
+ "aud_pll_div_clk0_lpcg_clk",
+ "aud_pll_div_clk1_lpcg_clk",
+ "ext_aud_mclk0",
+ "ext_aud_mclk1",
+ "spdif0_rx",
+ "sai0_rx_bclk",
+ "sai0_tx_bclk",
+ "sai1_rx_bclk",
+ "sai1_tx_bclk",
+ "sai2_rx_bclk",
+ "sai3_rx_bclk";
+};
+
&audio_ipg_clk {
clock-frequency = <160000000>;
};
@@ -177,3 +234,24 @@
&lpspi3 {
interrupts = <GIC_SPI 221 IRQ_TYPE_LEVEL_HIGH>;
};
+
+&sai0 {
+ interrupts = <GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>;
+};
+
+&sai1 {
+ interrupts = <GIC_SPI 190 IRQ_TYPE_LEVEL_HIGH>;
+};
+
+&sai2 {
+ interrupts = <GIC_SPI 192 IRQ_TYPE_LEVEL_HIGH>;
+};
+
+&sai3 {
+ interrupts = <GIC_SPI 198 IRQ_TYPE_LEVEL_HIGH>;
+};
+
+&spdif0 {
+ interrupts = <GIC_SPI 326 IRQ_TYPE_LEVEL_HIGH>, /* rx */
+ <GIC_SPI 328 IRQ_TYPE_LEVEL_HIGH>; /* tx */
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8dxl-ss-conn.dtsi b/arch/arm64/boot/dts/freescale/imx8dxl-ss-conn.dtsi
index 6d13e4fafb76..1e02b04494e9 100644
--- a/arch/arm64/boot/dts/freescale/imx8dxl-ss-conn.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8dxl-ss-conn.dtsi
@@ -108,6 +108,13 @@
};
+&dma_apbh {
+ interrupts = <GIC_SPI 176 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 176 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 176 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 176 IRQ_TYPE_LEVEL_HIGH>;
+};
+
&enet0_lpcg {
clocks = <&conn_enet0_root_clk>,
<&conn_enet0_root_clk>,
@@ -127,6 +134,10 @@
assigned-clock-rates = <125000000>;
};
+&gpmi {
+ interrupts = <GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH>;
+};
+
&usdhc1 {
compatible = "fsl,imx8dxl-usdhc", "fsl,imx8qxp-usdhc";
interrupts = <GIC_SPI 138 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-evk.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-evk.dtsi
index 90d1901df2b1..930e14fec423 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-evk.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mm-evk.dtsi
@@ -400,7 +400,7 @@
pinctrl-0 = <&pinctrl_typec1>;
reg = <0x50>;
interrupt-parent = <&gpio2>;
- interrupts = <11 8>;
+ interrupts = <11 IRQ_TYPE_LEVEL_LOW>;
status = "okay";
typec1_con: connector {
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-iot-gateway.dts b/arch/arm64/boot/dts/freescale/imx8mm-iot-gateway.dts
new file mode 100644
index 000000000000..370558a8ba46
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8mm-iot-gateway.dts
@@ -0,0 +1,218 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+//
+// Copyright 2020 CompuLab
+
+#include "imx8mm-ucm-som.dtsi"
+#include <dt-bindings/phy/phy-imx8-pcie.h>
+/ {
+ model = "CompuLab i.MX8MM IoT Gateway";
+ compatible = "compulab,imx8mm-iot-gateway", "compulab,imx8mm-ucm-som", "fsl,imx8mm";
+
+ regulator-usbhub-ena {
+ compatible = "regulator-fixed";
+ regulator-name = "usbhub_ena";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&gpio4 28 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ regulator-always-on;
+ };
+
+ regulator-usbhub-rst {
+ compatible = "regulator-fixed";
+ regulator-name = "usbhub_rst";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&gpio3 24 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ regulator-always-on;
+ };
+
+ regulator-uart1-mode {
+ compatible = "regulator-fixed";
+ regulator-name = "uart1_mode";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&gpio4 26 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ regulator-always-on;
+ };
+
+ regulator-uart1-duplex {
+ compatible = "regulator-fixed";
+ regulator-name = "uart1_duplex";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&gpio4 27 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ regulator-always-on;
+ };
+
+ regulator-uart1-shdn {
+ compatible = "regulator-fixed";
+ regulator-name = "uart1_shdn";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&gpio5 5 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ regulator-always-on;
+ };
+
+ regulator-uart1-trmen {
+ compatible = "regulator-fixed";
+ regulator-name = "uart1_trmen";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&gpio4 25 GPIO_ACTIVE_LOW>;
+ regulator-always-on;
+ };
+
+ regulator-usdhc2-v {
+ compatible = "regulator-fixed";
+ regulator-name = "usdhc2_v";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&gpio1 4 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ regulator-always-on;
+ };
+
+ regulator-mpcie2-rst {
+ compatible = "regulator-fixed";
+ regulator-name = "mpcie2_rst";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&gpio3 22 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ regulator-always-on;
+ };
+
+ regulator-mpcie2lora-dis {
+ compatible = "regulator-fixed";
+ regulator-name = "mpcie2lora_dis";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&gpio3 21 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ regulator-always-on;
+ };
+
+ pcie0_refclk: clock-pcie0-refclk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <100000000>;
+ };
+};
+
+&i2c1 {
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c1>;
+ status = "okay";
+
+ eeprom@54 {
+ compatible = "atmel,24c08";
+ reg = <0x54>;
+ pagesize = <16>;
+ };
+};
+
+&ecspi1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ecspi1 &pinctrl_ecspi1_cs>;
+ cs-gpios = <&gpio5 9 GPIO_ACTIVE_LOW>;
+ status = "okay";
+};
+
+&pcie_phy {
+ fsl,refclk-pad-mode = <IMX8_PCIE_REFCLK_PAD_INPUT>;
+ fsl,tx-deemph-gen1 = <0x2d>;
+ fsl,tx-deemph-gen2 = <0xf>;
+ fsl,clkreq-unsupported;
+ clocks = <&pcie0_refclk>;
+ clock-names = "ref";
+ status = "okay";
+};
+
+&pcie0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pcie0>;
+ reset-gpio = <&gpio3 20 GPIO_ACTIVE_LOW>;
+ status = "okay";
+};
+
+&usbotg1 {
+ dr_mode = "host";
+ status = "okay";
+};
+
+&usbotg2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ dr_mode = "host";
+ usb-role-switch;
+ status = "okay";
+
+ usbhub@1 {
+ compatible = "usb424,9514";
+ reg = <1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usb9514>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethernet: ethernet@1 {
+ compatible = "usb424,ec00";
+ reg = <1>;
+ };
+ };
+};
+
+&usdhc2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usdhc2>;
+ bus-width = <4>;
+ mmc-ddr-1_8v;
+ non-removable;
+ status = "okay";
+};
+
+&iomuxc {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_hog>;
+
+ pinctrl_hog: hoggrp {
+ fsl,pins = <
+ /* mPCIe2 */
+ MX8MM_IOMUXC_SAI5_RXD0_GPIO3_IO21 0x140
+ MX8MM_IOMUXC_SAI5_RXD1_GPIO3_IO22 0x140
+ >;
+ };
+
+ pinctrl_ecspi1: ecspi1grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_ECSPI1_SCLK_ECSPI1_SCLK 0x82
+ MX8MM_IOMUXC_ECSPI1_MOSI_ECSPI1_MOSI 0x82
+ MX8MM_IOMUXC_ECSPI1_MISO_ECSPI1_MISO 0x82
+ >;
+ };
+
+ pinctrl_ecspi1_cs: ecspi1csgrp {
+ fsl,pins = <
+ MX8MM_IOMUXC_ECSPI1_SS0_GPIO5_IO9 0x40000
+ >;
+ };
+
+ pinctrl_pcie0: pcie0grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_SAI5_RXC_GPIO3_IO20 0x140
+ >;
+ };
+
+ pinctrl_usb9514: usb9514grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_SAI3_RXFS_GPIO4_IO28 0x140 /* USB_PS_EN */
+ MX8MM_IOMUXC_SAI5_RXD3_GPIO3_IO24 0x140 /* HUB_RSTn */
+ >;
+ };
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-phygate-tauri-l-rs232-rs232.dtso b/arch/arm64/boot/dts/freescale/imx8mm-phygate-tauri-l-rs232-rs232.dtso
new file mode 100644
index 000000000000..bf3e04651ba0
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8mm-phygate-tauri-l-rs232-rs232.dtso
@@ -0,0 +1,72 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (C) 2021 PHYTEC Messtechnik GmbH
+ * Author: Jens Lang <j.lang@phytec.de>
+ *
+ * Tauri-L 2 x RS232:
+ * - GPIO3_20 uart4_rs485_en needs to be driven low (inactive)
+ */
+
+#include <dt-bindings/clock/imx8mm-clock.h>
+#include <dt-bindings/gpio/gpio.h>
+#include "imx8mm-pinfunc.h"
+
+/dts-v1/;
+/plugin/;
+
+&{/} {
+ compatible = "phytec,imx8mm-phygate-tauri-l";
+
+};
+
+&gpio3 {
+ pinctrl-names = "default";
+ pinctrcl-0 = <&pinctrl_gpio3_hog>;
+
+ uart4_rs485_en {
+ gpio-hog;
+ gpios = <20 GPIO_ACTIVE_HIGH>;
+ output-low;
+ line-name = "uart4_rs485_en";
+ };
+};
+
+/* UART2 - RS232 */
+&uart2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart2>;
+ assigned-clocks = <&clk IMX8MM_CLK_UART2>;
+ assigned-clock-parents = <&clk IMX8MM_SYS_PLL1_80M>;
+ status = "okay";
+};
+
+/* UART4 - RS232 */
+&uart4 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart4>;
+ assigned-clocks = <&clk IMX8MM_CLK_UART4>;
+ assigned-clock-parents = <&clk IMX8MM_SYS_PLL1_80M>;
+ status = "okay";
+};
+
+&iomuxc {
+ pinctrl_gpio3_hog: gpio3hoggrp {
+ fsl,pins = <
+ MX8MM_IOMUXC_SAI5_RXC_GPIO3_IO20 0x49
+ >;
+ };
+
+ pinctrl_uart2: uart2grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_UART2_RXD_UART2_DCE_RX 0x00
+ MX8MM_IOMUXC_UART2_TXD_UART2_DCE_TX 0x00
+ >;
+ };
+
+ pinctrl_uart4: uart4grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_UART4_RXD_UART4_DCE_RX 0x49
+ MX8MM_IOMUXC_UART4_TXD_UART4_DCE_TX 0x49
+ >;
+ };
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-phygate-tauri-l-rs232-rs485.dtso b/arch/arm64/boot/dts/freescale/imx8mm-phygate-tauri-l-rs232-rs485.dtso
new file mode 100644
index 000000000000..f4448cde0407
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8mm-phygate-tauri-l-rs232-rs485.dtso
@@ -0,0 +1,76 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (C) 2021 PHYTEC Messtechnik GmbH
+ * Author: Jens Lang <j.lang@phytec.de>
+ *
+ * Tauri-L RS232 + RS485:
+ * - GPIO3_20 uart4_rs485_en needs to be driven high (active)
+ * - GPIO3_25 RS485_DE Driver enable
+ */
+
+#include <dt-bindings/clock/imx8mm-clock.h>
+#include <dt-bindings/gpio/gpio.h>
+#include "imx8mm-pinfunc.h"
+
+/dts-v1/;
+/plugin/;
+
+&{/} {
+ compatible = "phytec,imx8mm-phygate-tauri-l";
+
+};
+
+&gpio3 {
+ pinctrl-names = "default";
+ pinctrcl-0 = <&pinctrl_gpio3_hog>;
+
+ uart4_rs485_en {
+ gpio-hog;
+ gpios = <20 GPIO_ACTIVE_HIGH>;
+ output-high;
+ line-name = "uart4_rs485_en";
+ };
+};
+
+/* UART2 - RS232 */
+&uart2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart2>;
+ assigned-clocks = <&clk IMX8MM_CLK_UART2>;
+ assigned-clock-parents = <&clk IMX8MM_SYS_PLL1_80M>;
+ status = "okay";
+};
+
+/* UART4 - RS485 */
+&uart4 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart4>;
+ assigned-clocks = <&clk IMX8MM_CLK_UART4>;
+ assigned-clock-parents = <&clk IMX8MM_SYS_PLL1_80M>;
+ rts-gpios = <&gpio3 25 GPIO_ACTIVE_HIGH>;
+ linux,rs485-enabled-at-boot-time;
+ status = "okay";
+};
+
+&iomuxc {
+ pinctrl_gpio3_hog: gpio3hoggrp {
+ fsl,pins = <
+ MX8MM_IOMUXC_SAI5_RXC_GPIO3_IO20 0x49
+ >;
+ };
+
+ pinctrl_uart2: uart2grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_UART2_RXD_UART2_DCE_RX 0x00
+ MX8MM_IOMUXC_UART2_TXD_UART2_DCE_TX 0x00
+ >;
+ };
+
+ pinctrl_uart4: uart4grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_UART4_RXD_UART4_DCE_RX 0x49
+ MX8MM_IOMUXC_UART4_TXD_UART4_DCE_TX 0x49
+ MX8MM_IOMUXC_SAI5_MCLK_GPIO3_IO25 0x49
+ >;
+ };
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-phygate-tauri-l-rs232-rts-cts.dtso b/arch/arm64/boot/dts/freescale/imx8mm-phygate-tauri-l-rs232-rts-cts.dtso
new file mode 100644
index 000000000000..107f743fbb1c
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8mm-phygate-tauri-l-rs232-rts-cts.dtso
@@ -0,0 +1,41 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (C) 2023 PHYTEC Messtechnik GmbH
+ * Author: Jens Lang <j.lang@phytec.de>
+ *
+ * Tauri-L RS232 with RTS/CTS hardware flow control:
+ * - UART4_TX becomes RTS
+ * - UART4_RX becomes CTS
+ */
+
+#include <dt-bindings/clock/imx8mm-clock.h>
+#include "imx8mm-pinfunc.h"
+
+/dts-v1/;
+/plugin/;
+
+
+&{/} {
+ compatible = "phytec,imx8mm-phygate-tauri-l";
+
+};
+
+&uart2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart2>;
+ assigned-clocks = <&clk IMX8MM_CLK_UART2>;
+ assigned-clock-parents = <&clk IMX8MM_SYS_PLL1_80M>;
+ uart-has-rtscts;
+ status = "okay";
+};
+
+&iomuxc {
+ pinctrl_uart2: uart2grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_UART2_RXD_UART2_DCE_RX 0x00
+ MX8MM_IOMUXC_UART2_TXD_UART2_DCE_TX 0x00
+ MX8MM_IOMUXC_UART4_RXD_UART2_DCE_CTS_B 0x00
+ MX8MM_IOMUXC_UART4_TXD_UART2_DCE_RTS_B 0x00
+ >;
+ };
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-phygate-tauri-l.dts b/arch/arm64/boot/dts/freescale/imx8mm-phygate-tauri-l.dts
index 27a902569e2a..ba6ce3c7f477 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-phygate-tauri-l.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mm-phygate-tauri-l.dts
@@ -7,6 +7,7 @@
#include <dt-bindings/input/linux-event-codes.h>
#include <dt-bindings/leds/common.h>
+#include <dt-bindings/phy/phy-imx8-pcie.h>
#include "imx8mm-phycore-som.dtsi"
/ {
@@ -185,6 +186,15 @@
status = "okay";
};
+&pcie_phy {
+ clocks = <&clk IMX8MM_CLK_PCIE1_PHY>;
+ fsl,clkreq-unsupported;
+ fsl,refclk-pad-mode = <IMX8_PCIE_REFCLK_PAD_OUTPUT>;
+ fsl,tx-deemph-gen1 = <0x2d>;
+ fsl,tx-deemph-gen2 = <0xf>;
+ status = "okay";
+};
+
&pwm1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pwm1>;
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-tqma8mqml.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-tqma8mqml.dtsi
index 8c0c6e715924..ca0205b9019e 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-tqma8mqml.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mm-tqma8mqml.dtsi
@@ -62,11 +62,15 @@
flash0: flash@0 {
compatible = "jedec,spi-nor";
reg = <0>;
- #address-cells = <1>;
- #size-cells = <1>;
spi-max-frequency = <84000000>;
spi-tx-bus-width = <1>;
spi-rx-bus-width = <4>;
+
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ };
};
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-ucm-som.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-ucm-som.dtsi
new file mode 100644
index 000000000000..d3b21203c5f4
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8mm-ucm-som.dtsi
@@ -0,0 +1,679 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+//
+// Copyright 2018 CompuLab
+
+/dts-v1/;
+
+#include "imx8mm.dtsi"
+#include <dt-bindings/leds/common.h>
+
+/ {
+ aliases {
+ rtc0 = &rtc_i2c;
+ rtc1 = &snvs_rtc;
+ mmc0 = &usdhc3;
+ };
+
+ chosen {
+ stdout-path = &uart3;
+ };
+
+ backlight {
+ compatible = "pwm-backlight";
+ pwms = <&pwm2 0 3000000 0>;
+ brightness-levels = <0 255>;
+ num-interpolated-steps = <255>;
+ default-brightness-level = <222>;
+ status = "okay";
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpio_led>;
+
+ heartbeat-led {
+ function = LED_FUNCTION_STATUS;
+ gpios = <&gpio1 12 GPIO_ACTIVE_LOW>;
+ linux,default-trigger = "heartbeat";
+ };
+ };
+
+ pmic_osc: clock-pmic {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <32768>;
+ clock-output-names = "pmic_osc";
+ };
+
+ wlreg_on: regulator-wlreg-on {
+ compatible = "regulator-fixed";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "wlreg_on";
+ gpio = <&gpio2 10 GPIO_ACTIVE_HIGH>;
+ startup-delay-us = <100>;
+ enable-active-high;
+ regulator-always-on;
+ status = "okay";
+ };
+
+ reg_usdhc2_vmmc: regulator-usdhc2-vmmc {
+ compatible = "regulator-fixed";
+ regulator-name = "VSD_3V3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&gpio2 19 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ startup-delay-us = <100>;
+ off-on-delay-us = <12000>;
+ };
+
+ regulator-usdhc3rst {
+ compatible = "regulator-fixed";
+ regulator-name = "usdhc3_rst";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&gpio3 16 GPIO_ACTIVE_HIGH>;
+ regulator-always-on;
+ enable-active-high;
+ };
+
+ regulator-fec1rst {
+ compatible = "regulator-fixed";
+ regulator-name = "fec1_rst";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&gpio1 10 GPIO_ACTIVE_HIGH>;
+ regulator-always-on;
+ enable-active-high;
+ startup-delay-us = <500>;
+ regulator-boot-on;
+ };
+};
+
+&A53_0 {
+ arm-supply = <&buck2>;
+};
+
+&cpu_alert0 {
+ temperature = <105000>;
+};
+
+&cpu_crit0 {
+ temperature = <115000>;
+};
+
+&fec1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_fec1>;
+ phy-mode = "rgmii-id";
+ phy-handle = <&ethphy0>;
+ fsl,magic-packet;
+ status = "okay";
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethphy0: ethernet-phy@0 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <0>;
+ };
+ };
+};
+
+&i2c2 {
+ clock-frequency = <400000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c2>;
+ status = "okay";
+
+ pmic@4b {
+ reg = <0x4b>;
+ compatible = "rohm,bd71837";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pmic>;
+ #clock-cells = <0>;
+ clocks = <&pmic_osc>;
+ clock-names = "osc";
+ clock-output-names = "pmic_clk";
+ interrupt-parent = <&gpio1>;
+ interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
+ rohm,reset-snvs-powered;
+
+ regulators {
+ buck1: BUCK1 {
+ regulator-name = "buck1";
+ regulator-min-microvolt = <700000>;
+ regulator-max-microvolt = <1300000>;
+ regulator-boot-on;
+ regulator-always-on;
+ regulator-ramp-delay = <1250>;
+ };
+
+ buck2: BUCK2 {
+ regulator-name = "buck2";
+ regulator-min-microvolt = <700000>;
+ regulator-max-microvolt = <1300000>;
+ regulator-boot-on;
+ regulator-always-on;
+ regulator-ramp-delay = <1250>;
+ rohm,dvs-run-voltage = <1000000>;
+ rohm,dvs-idle-voltage = <900000>;
+ };
+
+ buck3: BUCK3 {
+ regulator-name = "buck3";
+ regulator-min-microvolt = <700000>;
+ regulator-max-microvolt = <1300000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ buck4: BUCK4 {
+ regulator-name = "buck4";
+ regulator-min-microvolt = <700000>;
+ regulator-max-microvolt = <1300000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ buck5: BUCK5 {
+ regulator-name = "buck5";
+ regulator-min-microvolt = <700000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ buck6: BUCK6 {
+ regulator-name = "buck6";
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ buck7: BUCK7 {
+ regulator-name = "buck7";
+ regulator-min-microvolt = <1605000>;
+ regulator-max-microvolt = <1995000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ buck8: BUCK8 {
+ regulator-name = "buck8";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1400000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ ldo1: LDO1 {
+ regulator-name = "ldo1";
+ regulator-min-microvolt = <1600000>;
+ regulator-max-microvolt = <1900000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ ldo2: LDO2 {
+ regulator-name = "ldo2";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <900000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ ldo3: LDO3 {
+ regulator-name = "ldo3";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ ldo4: LDO4 {
+ regulator-name = "ldo4";
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ ldo5: LDO5 {
+ regulator-name = "ldo5";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ ldo6: LDO6 {
+ regulator-name = "ldo6";
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ ldo7: LDO7 {
+ regulator-name = "ldo7";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ };
+ };
+ };
+
+ eeprom@50 {
+ compatible = "atmel,24c08";
+ reg = <0x50>;
+ pagesize = <16>;
+ };
+
+ rtc_i2c: rtc@69 {
+ compatible = "abracon,ab1805";
+ reg = <0x69>;
+ };
+};
+
+&i2c3 {
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c3>;
+ status = "disabled";
+};
+
+&pwm2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pwm_backlight>;
+ status = "okay";
+};
+
+&sai2 {
+ #sound-dai-cells = <0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sai2>;
+ assigned-clocks = <&clk IMX8MM_CLK_SAI2>;
+ assigned-clock-parents = <&clk IMX8MM_AUDIO_PLL1_OUT>;
+ assigned-clock-rates = <49152000>;
+ clocks = <&clk IMX8MM_CLK_SAI2_IPG>, <&clk IMX8MM_CLK_DUMMY>,
+ <&clk IMX8MM_CLK_SAI2_ROOT>, <&clk IMX8MM_CLK_DUMMY>,
+ <&clk IMX8MM_CLK_DUMMY>, <&clk IMX8MM_AUDIO_PLL1_OUT>,
+ <&clk IMX8MM_AUDIO_PLL2_OUT>;
+ clock-names = "bus", "mclk0", "mclk1", "mclk2", "mclk3", "pll8k", "pll11k";
+ fsl,sai-asynchronous;
+ status = "okay";
+};
+
+&snvs {
+ status = "okay";
+};
+
+&snvs_pwrkey {
+ status = "okay";
+};
+
+&uart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart1>;
+ assigned-clocks = <&clk IMX8MM_CLK_UART1>;
+ assigned-clock-parents = <&clk IMX8MM_SYS_PLL1_80M>;
+ status = "disabled";
+};
+
+&uart2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart2>;
+ assigned-clocks = <&clk IMX8MM_CLK_UART2>;
+ assigned-clock-parents = <&clk IMX8MM_SYS_PLL1_80M>;
+ status = "disabled";
+};
+
+&uart3 { /* console */
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart3>;
+ status = "okay";
+};
+
+&uart4 { /* bluetooth */
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart4>;
+ assigned-clocks = <&clk IMX8MM_CLK_UART4>;
+ assigned-clock-parents = <&clk IMX8MM_SYS_PLL1_80M>;
+ uart-has-rtscts;
+ status = "disabled";
+
+ bluetooth {
+ compatible = "brcm,bcm4330-bt";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_bt>;
+ max-speed = <3000000>;
+ device-wakeup-gpios = <&gpio2 7 GPIO_ACTIVE_HIGH>;
+ host-wakeup-gpios = <&gpio2 8 GPIO_ACTIVE_HIGH>;
+ shutdown-gpios = <&gpio2 6 GPIO_ACTIVE_HIGH>;
+ };
+};
+
+&usbotg1 {
+ dr_mode = "otg";
+ hnp-disable;
+ srp-disable;
+ disable-over-current;
+ status = "disabled";
+};
+
+&usbotg2 {
+ dr_mode = "host";
+ hnp-disable;
+ srp-disable;
+ disable-over-current;
+ status = "disabled";
+};
+
+&usdhc1 {
+ pinctrl-names = "default", "state_100mhz", "state_200mhz";
+ pinctrl-0 = <&pinctrl_usdhc1>, <&pinctrl_usdhc1_gpio>;
+ pinctrl-1 = <&pinctrl_usdhc1_100mhz>, <&pinctrl_usdhc1_gpio>;
+ pinctrl-2 = <&pinctrl_usdhc1_200mhz>, <&pinctrl_usdhc1_gpio>;
+ bus-width = <4>;
+ non-removable;
+};
+
+&usdhc2 {
+ pinctrl-names = "default", "state_100mhz", "state_200mhz";
+ pinctrl-0 = <&pinctrl_usdhc2>, <&pinctrl_usdhc2_gpio>;
+ pinctrl-1 = <&pinctrl_usdhc2_100mhz>, <&pinctrl_usdhc2_gpio>;
+ pinctrl-2 = <&pinctrl_usdhc2_200mhz>, <&pinctrl_usdhc2_gpio>;
+ cd-gpios = <&gpio2 12 GPIO_ACTIVE_LOW>;
+ wp-gpios = <&gpio2 20 GPIO_ACTIVE_HIGH>;
+ no-1-8-v;
+ bus-width = <4>;
+ vmmc-supply = <&reg_usdhc2_vmmc>;
+ status = "okay";
+};
+
+&usdhc3 {
+ pinctrl-names = "default", "state_100mhz", "state_200mhz";
+ pinctrl-0 = <&pinctrl_usdhc3>;
+ pinctrl-1 = <&pinctrl_usdhc3_100mhz>;
+ pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
+ bus-width = <8>;
+ non-removable;
+ no-1-8-v;
+ status = "okay";
+};
+
+&wdog1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_wdog>;
+ fsl,ext-reset-output;
+ status = "okay";
+};
+
+&iomuxc {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_hog_1>;
+
+ pinctrl_hog: hoggrp {
+ fsl,pins = <
+ MX8MM_IOMUXC_GPIO1_IO10_GPIO1_IO10 0x19
+ MX8MM_IOMUXC_NAND_READY_B_GPIO3_IO16 0x190
+ >;
+ };
+
+ pinctrl_bt: bt0grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_SD1_DATA4_GPIO2_IO6 0x19 /* BT_REG_ON */
+ MX8MM_IOMUXC_SD1_DATA5_GPIO2_IO7 0x19 /* BT_DEV_WU */
+ MX8MM_IOMUXC_SD1_DATA6_GPIO2_IO8 0x19 /* BT_HST_WU */
+ >;
+ };
+
+ pinctrl_fec1: fec1grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_ENET_MDC_ENET1_MDC 0x3
+ MX8MM_IOMUXC_ENET_MDIO_ENET1_MDIO 0x3
+ MX8MM_IOMUXC_ENET_TD3_ENET1_RGMII_TD3 0x1f
+ MX8MM_IOMUXC_ENET_TD2_ENET1_RGMII_TD2 0x1f
+ MX8MM_IOMUXC_ENET_TD1_ENET1_RGMII_TD1 0x1f
+ MX8MM_IOMUXC_ENET_TD0_ENET1_RGMII_TD0 0x1f
+ MX8MM_IOMUXC_ENET_RD3_ENET1_RGMII_RD3 0x91
+ MX8MM_IOMUXC_ENET_RD2_ENET1_RGMII_RD2 0x91
+ MX8MM_IOMUXC_ENET_RD1_ENET1_RGMII_RD1 0x91
+ MX8MM_IOMUXC_ENET_RD0_ENET1_RGMII_RD0 0x91
+ MX8MM_IOMUXC_ENET_TXC_ENET1_RGMII_TXC 0x1f
+ MX8MM_IOMUXC_ENET_RXC_ENET1_RGMII_RXC 0x91
+ MX8MM_IOMUXC_ENET_RX_CTL_ENET1_RGMII_RX_CTL 0x91
+ MX8MM_IOMUXC_ENET_TX_CTL_ENET1_RGMII_TX_CTL 0x1f
+ >;
+ };
+
+ pinctrl_gpio_led: gpioledgrp {
+ fsl,pins = <
+ MX8MM_IOMUXC_GPIO1_IO12_GPIO1_IO12 0x19
+ >;
+ };
+
+ pinctrl_i2c1: i2c1grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_I2C1_SCL_I2C1_SCL 0x400001c3
+ MX8MM_IOMUXC_I2C1_SDA_I2C1_SDA 0x400001c3
+ >;
+ };
+
+ pinctrl_i2c2: i2c2grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_I2C2_SCL_I2C2_SCL 0x400001c3
+ MX8MM_IOMUXC_I2C2_SDA_I2C2_SDA 0x400001c3
+ >;
+ };
+
+ pinctrl_i2c3: i2c3grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_I2C3_SCL_I2C3_SCL 0x400001c3
+ MX8MM_IOMUXC_I2C3_SDA_I2C3_SDA 0x400001c3
+ >;
+ };
+
+ pinctrl_i2c4: i2c4grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_I2C4_SCL_I2C4_SCL 0x400001c3
+ MX8MM_IOMUXC_I2C4_SDA_I2C4_SDA 0x400001c3
+ >;
+ };
+
+ pinctrl_pmic: pmicgrp {
+ fsl,pins = <
+ MX8MM_IOMUXC_GPIO1_IO03_GPIO1_IO3 0x41
+ >;
+ };
+
+ pinctrl_pwm_backlight: pwmbacklightgrp {
+ fsl,pins = <
+ MX8MM_IOMUXC_GPIO1_IO13_PWM2_OUT 0x03
+ >;
+ };
+
+
+ pinctrl_sai2: sai2grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_SAI2_MCLK_SAI2_MCLK 0xd6
+ MX8MM_IOMUXC_SAI2_TXFS_SAI2_TX_SYNC 0xd6
+ MX8MM_IOMUXC_SAI2_RXFS_SAI2_RX_SYNC 0xd6
+ MX8MM_IOMUXC_SAI2_TXC_SAI2_TX_BCLK 0xd6
+ MX8MM_IOMUXC_SAI2_TXD0_SAI2_TX_DATA0 0xd6
+ MX8MM_IOMUXC_SAI2_RXD0_SAI2_RX_DATA0 0xd6
+ >;
+ };
+
+ pinctrl_uart1: uart1grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_UART1_RXD_UART1_DCE_RX 0x140
+ MX8MM_IOMUXC_UART1_TXD_UART1_DCE_TX 0x140
+ >;
+ };
+
+ pinctrl_uart2: uart2grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_UART2_RXD_UART2_DCE_RX 0x140
+ MX8MM_IOMUXC_UART2_TXD_UART2_DCE_TX 0x140
+ >;
+ };
+
+ pinctrl_uart3: uart3grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_UART3_RXD_UART3_DCE_RX 0x49
+ MX8MM_IOMUXC_UART3_TXD_UART3_DCE_TX 0x49
+ >;
+ };
+
+ pinctrl_uart4: uart4grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_ECSPI2_MISO_UART4_DCE_CTS_B 0x140
+ MX8MM_IOMUXC_ECSPI2_MOSI_UART4_DCE_TX 0x140
+ MX8MM_IOMUXC_ECSPI2_SS0_UART4_DCE_RTS_B 0x140
+ MX8MM_IOMUXC_ECSPI2_SCLK_UART4_DCE_RX 0x140
+ >;
+ };
+
+ pinctrl_usdhc1_gpio: usdhc1grpgpiogrp {
+ fsl,pins = <
+ MX8MM_IOMUXC_SD1_RESET_B_GPIO2_IO10 0x41
+ >;
+ };
+
+ pinctrl_usdhc1: usdhc1grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_SD1_CLK_USDHC1_CLK 0x190
+ MX8MM_IOMUXC_SD1_CMD_USDHC1_CMD 0x1d0
+ MX8MM_IOMUXC_SD1_DATA0_USDHC1_DATA0 0x1d0
+ MX8MM_IOMUXC_SD1_DATA1_USDHC1_DATA1 0x1d0
+ MX8MM_IOMUXC_SD1_DATA2_USDHC1_DATA2 0x1d0
+ MX8MM_IOMUXC_SD1_DATA3_USDHC1_DATA3 0x1d0
+ MX8MM_IOMUXC_GPIO1_IO03_USDHC1_VSELECT 0x1d0
+ >;
+ };
+
+ pinctrl_usdhc1_100mhz: usdhc1grp100mhzgrp {
+ fsl,pins = <
+ MX8MM_IOMUXC_SD1_CLK_USDHC1_CLK 0x194
+ MX8MM_IOMUXC_SD1_CMD_USDHC1_CMD 0x1d4
+ MX8MM_IOMUXC_SD1_DATA0_USDHC1_DATA0 0x1d4
+ MX8MM_IOMUXC_SD1_DATA1_USDHC1_DATA1 0x1d4
+ MX8MM_IOMUXC_SD1_DATA2_USDHC1_DATA2 0x1d4
+ MX8MM_IOMUXC_SD1_DATA3_USDHC1_DATA3 0x1d4
+ MX8MM_IOMUXC_GPIO1_IO03_USDHC1_VSELECT 0x1d0
+ >;
+ };
+
+ pinctrl_usdhc1_200mhz: usdhc1grp200mhzgrp {
+ fsl,pins = <
+ MX8MM_IOMUXC_SD1_CLK_USDHC1_CLK 0x196
+ MX8MM_IOMUXC_SD1_CMD_USDHC1_CMD 0x1d6
+ MX8MM_IOMUXC_SD1_DATA0_USDHC1_DATA0 0x1d6
+ MX8MM_IOMUXC_SD1_DATA1_USDHC1_DATA1 0x1d6
+ MX8MM_IOMUXC_SD1_DATA2_USDHC1_DATA2 0x1d6
+ MX8MM_IOMUXC_SD1_DATA3_USDHC1_DATA3 0x1d6
+ MX8MM_IOMUXC_GPIO1_IO03_USDHC1_VSELECT 0x1d0
+ >;
+ };
+
+ pinctrl_usdhc2_gpio: usdhc2grpgpiogrp {
+ fsl,pins = <
+ MX8MM_IOMUXC_SD2_RESET_B_GPIO2_IO19 0x41
+ MX8MM_IOMUXC_SD2_CD_B_GPIO2_IO12 0x41
+ MX8MM_IOMUXC_SD2_WP_GPIO2_IO20 0x00
+ >;
+ };
+
+ pinctrl_usdhc2: usdhc2grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x190
+ MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD 0x1d0
+ MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x1d0
+ MX8MM_IOMUXC_SD2_DATA1_USDHC2_DATA1 0x1d0
+ MX8MM_IOMUXC_SD2_DATA2_USDHC2_DATA2 0x1d0
+ MX8MM_IOMUXC_SD2_DATA3_USDHC2_DATA3 0x1d0
+ MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0x1d0
+ >;
+ };
+
+ pinctrl_usdhc2_100mhz: usdhc2grp100mhzgrp {
+ fsl,pins = <
+ MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x194
+ MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD 0x1d4
+ MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x1d4
+ MX8MM_IOMUXC_SD2_DATA1_USDHC2_DATA1 0x1d4
+ MX8MM_IOMUXC_SD2_DATA2_USDHC2_DATA2 0x1d4
+ MX8MM_IOMUXC_SD2_DATA3_USDHC2_DATA3 0x1d4
+ MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0x1d0
+ >;
+ };
+
+ pinctrl_usdhc2_200mhz: usdhc2grp200mhzgrp {
+ fsl,pins = <
+ MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x196
+ MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD 0x1d6
+ MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x1d6
+ MX8MM_IOMUXC_SD2_DATA1_USDHC2_DATA1 0x1d6
+ MX8MM_IOMUXC_SD2_DATA2_USDHC2_DATA2 0x1d6
+ MX8MM_IOMUXC_SD2_DATA3_USDHC2_DATA3 0x1d6
+ MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0x1d0
+ >;
+ };
+
+ pinctrl_usdhc3: usdhc3grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_NAND_WE_B_USDHC3_CLK 0x40000190
+ MX8MM_IOMUXC_NAND_WP_B_USDHC3_CMD 0x1d0
+ MX8MM_IOMUXC_NAND_DATA04_USDHC3_DATA0 0x1d0
+ MX8MM_IOMUXC_NAND_DATA05_USDHC3_DATA1 0x1d0
+ MX8MM_IOMUXC_NAND_DATA06_USDHC3_DATA2 0x1d0
+ MX8MM_IOMUXC_NAND_DATA07_USDHC3_DATA3 0x1d0
+ MX8MM_IOMUXC_NAND_RE_B_USDHC3_DATA4 0x1d0
+ MX8MM_IOMUXC_NAND_CE2_B_USDHC3_DATA5 0x1d0
+ MX8MM_IOMUXC_NAND_CE3_B_USDHC3_DATA6 0x1d0
+ MX8MM_IOMUXC_NAND_CLE_USDHC3_DATA7 0x1d0
+ MX8MM_IOMUXC_NAND_CE1_B_USDHC3_STROBE 0x190
+ >;
+ };
+
+ pinctrl_usdhc3_100mhz: usdhc3grp100mhzgrp {
+ fsl,pins = <
+ MX8MM_IOMUXC_NAND_WE_B_USDHC3_CLK 0x40000194
+ MX8MM_IOMUXC_NAND_WP_B_USDHC3_CMD 0x1d4
+ MX8MM_IOMUXC_NAND_DATA04_USDHC3_DATA0 0x1d4
+ MX8MM_IOMUXC_NAND_DATA05_USDHC3_DATA1 0x1d4
+ MX8MM_IOMUXC_NAND_DATA06_USDHC3_DATA2 0x1d4
+ MX8MM_IOMUXC_NAND_DATA07_USDHC3_DATA3 0x1d4
+ MX8MM_IOMUXC_NAND_RE_B_USDHC3_DATA4 0x1d4
+ MX8MM_IOMUXC_NAND_CE2_B_USDHC3_DATA5 0x1d4
+ MX8MM_IOMUXC_NAND_CE3_B_USDHC3_DATA6 0x1d4
+ MX8MM_IOMUXC_NAND_CLE_USDHC3_DATA7 0x1d4
+ MX8MM_IOMUXC_NAND_CE1_B_USDHC3_STROBE 0x194
+ >;
+ };
+
+ pinctrl_usdhc3_200mhz: usdhc3grp200mhzgrp {
+ fsl,pins = <
+ MX8MM_IOMUXC_NAND_WE_B_USDHC3_CLK 0x40000196
+ MX8MM_IOMUXC_NAND_WP_B_USDHC3_CMD 0x1d6
+ MX8MM_IOMUXC_NAND_DATA04_USDHC3_DATA0 0x1d6
+ MX8MM_IOMUXC_NAND_DATA05_USDHC3_DATA1 0x1d6
+ MX8MM_IOMUXC_NAND_DATA06_USDHC3_DATA2 0x1d6
+ MX8MM_IOMUXC_NAND_DATA07_USDHC3_DATA3 0x1d6
+ MX8MM_IOMUXC_NAND_RE_B_USDHC3_DATA4 0x1d6
+ MX8MM_IOMUXC_NAND_CE2_B_USDHC3_DATA5 0x1d6
+ MX8MM_IOMUXC_NAND_CE3_B_USDHC3_DATA6 0x1d6
+ MX8MM_IOMUXC_NAND_CLE_USDHC3_DATA7 0x1d6
+ MX8MM_IOMUXC_NAND_CE1_B_USDHC3_STROBE 0x196
+ >;
+ };
+
+ pinctrl_wdog: wdoggrp {
+ fsl,pins = <
+ MX8MM_IOMUXC_GPIO1_IO02_WDOG1_WDOG_B 0xc6
+ >;
+ };
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw700x.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw700x.dtsi
index de7f67a4ff2a..36803b038cd5 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw700x.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw700x.dtsi
@@ -5,6 +5,7 @@
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/input/linux-event-codes.h>
+#include <dt-bindings/leds/common.h>
#include <dt-bindings/net/ti-dp83867.h>
/ {
@@ -113,6 +114,25 @@
ti,tx-internal-delay = <DP83867_RGMIIDCTL_2_00_NS>;
tx-fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_4_B_NIB>;
rx-fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_4_B_NIB>;
+
+ leds {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@1 {
+ reg = <1>;
+ color = <LED_COLOR_ID_AMBER>;
+ function = LED_FUNCTION_LAN;
+ default-state = "keep";
+ };
+
+ led@2 {
+ reg = <2>;
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_LAN;
+ default-state = "keep";
+ };
+ };
};
};
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7901.dts b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7901.dts
index 35ae0faa815b..136cb30df03a 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7901.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7901.dts
@@ -364,8 +364,6 @@
interrupts = <16 IRQ_TYPE_EDGE_FALLING>;
interrupt-controller;
#interrupt-cells = <1>;
- #address-cells = <1>;
- #size-cells = <0>;
adc {
compatible = "gw,gsc-adc";
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7902.dts b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7902.dts
index c11260c26d0b..1d56f2a6c06a 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7902.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7902.dts
@@ -314,8 +314,6 @@
interrupts = <6 IRQ_TYPE_EDGE_FALLING>;
interrupt-controller;
#interrupt-cells = <1>;
- #address-cells = <1>;
- #size-cells = <0>;
adc {
compatible = "gw,gsc-adc";
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7903.dts b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7903.dts
index db1737bf637d..45470160f98f 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7903.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7903.dts
@@ -280,8 +280,6 @@
interrupts = <26 IRQ_TYPE_EDGE_FALLING>;
interrupt-controller;
#interrupt-cells = <1>;
- #address-cells = <1>;
- #size-cells = <0>;
adc {
compatible = "gw,gsc-adc";
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7904.dts b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7904.dts
index 05489a31e7fd..ef951bc9f0dd 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7904.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7904.dts
@@ -330,8 +330,6 @@
interrupts = <26 IRQ_TYPE_EDGE_FALLING>;
interrupt-controller;
#interrupt-cells = <1>;
- #address-cells = <1>;
- #size-cells = <0>;
adc {
compatible = "gw,gsc-adc";
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi
index 4768b05fd765..5fa395914191 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi
@@ -6,6 +6,7 @@
#include <dt-bindings/phy/phy-imx8-pcie.h>
#include <dt-bindings/pwm/pwm.h>
#include "imx8mm.dtsi"
+#include "imx8mm-overdrive.dtsi"
/ {
chosen {
@@ -227,15 +228,16 @@
pinctrl-0 = <&pinctrl_ecspi2>;
};
-/* Verdin CAN_1 (On-module) */
+/* On-module SPI */
&ecspi3 {
#address-cells = <1>;
#size-cells = <0>;
- cs-gpios = <&gpio5 25 GPIO_ACTIVE_LOW>;
+ cs-gpios = <&gpio5 25 GPIO_ACTIVE_LOW>, <&gpio4 19 GPIO_ACTIVE_LOW>;
pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_ecspi3>;
+ pinctrl-0 = <&pinctrl_ecspi3>, <&pinctrl_tpm_spi_cs>;
status = "okay";
+ /* Verdin CAN_1 */
can1: can@0 {
compatible = "microchip,mcp251xfd";
clocks = <&clk40m>;
@@ -245,6 +247,12 @@
reg = <0>;
spi-max-frequency = <8500000>;
};
+
+ verdin_som_tpm: tpm@1 {
+ compatible = "atmel,attpm20p", "tcg,tpm_tis-spi";
+ reg = <0x1>;
+ spi-max-frequency = <36000000>;
+ };
};
/* Verdin ETH_1 (On-module PHY) */
@@ -547,7 +555,7 @@
/* Verdin I2C_2_DSI */
&i2c2 {
- clock-frequency = <10000>;
+ clock-frequency = <400000>;
pinctrl-names = "default", "gpio";
pinctrl-0 = <&pinctrl_i2c2>;
pinctrl-1 = <&pinctrl_i2c2_gpio>;
@@ -807,8 +815,7 @@
pinctrl-0 = <&pinctrl_gpio1>, <&pinctrl_gpio2>,
<&pinctrl_gpio3>, <&pinctrl_gpio4>,
<&pinctrl_gpio7>, <&pinctrl_gpio8>,
- <&pinctrl_gpio_hog1>, <&pinctrl_gpio_hog2>, <&pinctrl_gpio_hog3>,
- <&pinctrl_pmic_tpm_ena>;
+ <&pinctrl_gpio_hog1>, <&pinctrl_gpio_hog2>, <&pinctrl_gpio_hog3>;
pinctrl_can1_int: can1intgrp {
fsl,pins =
@@ -935,7 +942,7 @@
/* Verdin GPIO_9_DSI (pulled-up as active-low) */
pinctrl_gpio_9_dsi: gpio9dsigrp {
fsl,pins =
- <MX8MM_IOMUXC_NAND_RE_B_GPIO3_IO15 0x146>; /* SODIMM 17 */
+ <MX8MM_IOMUXC_NAND_RE_B_GPIO3_IO15 0x1c6>; /* SODIMM 17 */
};
/* Verdin GPIO_10_DSI (pulled-up as active-low) */
@@ -1110,7 +1117,7 @@
};
/* control signal for optional ATTPM20P or SE050 */
- pinctrl_pmic_tpm_ena: pmictpmenagrp {
+ pinctrl_tpm_spi_cs: tpmspicsgrp {
fsl,pins =
<MX8MM_IOMUXC_SAI1_TXD7_GPIO4_IO19 0x106>; /* PMIC_TPM_ENA */
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mn-tqma8mqnl.dtsi b/arch/arm64/boot/dts/freescale/imx8mn-tqma8mqnl.dtsi
index fb24b9aa1b93..e68a3fd73e17 100644
--- a/arch/arm64/boot/dts/freescale/imx8mn-tqma8mqnl.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mn-tqma8mqnl.dtsi
@@ -60,11 +60,15 @@
flash0: flash@0 {
compatible = "jedec,spi-nor";
reg = <0>;
- #address-cells = <1>;
- #size-cells = <1>;
spi-max-frequency = <84000000>;
spi-tx-bus-width = <1>;
spi-rx-bus-width = <4>;
+
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ };
};
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mn-venice-gw7902.dts b/arch/arm64/boot/dts/freescale/imx8mn-venice-gw7902.dts
index 0b1fa04f1d67..72004ab6bda5 100644
--- a/arch/arm64/boot/dts/freescale/imx8mn-venice-gw7902.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mn-venice-gw7902.dts
@@ -312,8 +312,6 @@
interrupts = <6 IRQ_TYPE_EDGE_FALLING>;
interrupt-controller;
#interrupt-cells = <1>;
- #address-cells = <1>;
- #size-cells = <0>;
adc {
compatible = "gw,gsc-adc";
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-beacon-kit.dts b/arch/arm64/boot/dts/freescale/imx8mp-beacon-kit.dts
index e5d3901f2913..17e2c19d8455 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-beacon-kit.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mp-beacon-kit.dts
@@ -302,10 +302,18 @@
adv_bridge: hdmi@3d {
compatible = "adi,adv7535";
- reg = <0x3d>, <0x3c>, <0x3e>, <0x3f>;
- reg-names = "main", "cec", "edid", "packet";
+ reg = <0x3d>;
+ reg-names = "main";
+ interrupt-parent = <&gpio4>;
+ interrupts = <27 IRQ_TYPE_EDGE_FALLING>;
adi,dsi-lanes = <4>;
#sound-dai-cells = <0>;
+ avdd-supply = <&buck5>;
+ dvdd-supply = <&buck5>;
+ pvdd-supply = <&buck5>;
+ a2vdd-supply = <&buck5>;
+ v1p2-supply = <&buck5>;
+ v3p3-supply = <&buck4>;
ports {
#address-cells = <1>;
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-beacon-som.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-beacon-som.dtsi
index 8be251b69378..15f7ab58db36 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-beacon-som.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mp-beacon-som.dtsi
@@ -71,7 +71,6 @@
mtl_rx_setup: rx-queues-config {
snps,rx-queues-to-use = <5>;
- snps,rx-sched-sp;
queue0 {
snps,dcb-algorithm;
@@ -106,7 +105,6 @@
mtl_tx_setup: tx-queues-config {
snps,tx-queues-to-use = <5>;
- snps,tx-sched-sp;
queue0 {
snps,dcb-algorithm;
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-debix-model-a.dts b/arch/arm64/boot/dts/freescale/imx8mp-debix-model-a.dts
index 9b8f97a84e61..af02af9e5334 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-debix-model-a.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mp-debix-model-a.dts
@@ -20,6 +20,18 @@
stdout-path = &uart2;
};
+ hdmi-connector {
+ compatible = "hdmi-connector";
+ label = "hdmi";
+ type = "a";
+
+ port {
+ hdmi_connector_in: endpoint {
+ remote-endpoint = <&hdmi_tx_out>;
+ };
+ };
+ };
+
leds {
compatible = "gpio-leds";
pinctrl-names = "default";
@@ -94,6 +106,28 @@
};
};
+&hdmi_pvi {
+ status = "okay";
+};
+
+&hdmi_tx {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_hdmi>;
+ status = "okay";
+
+ ports {
+ port@1 {
+ hdmi_tx_out: endpoint {
+ remote-endpoint = <&hdmi_connector_in>;
+ };
+ };
+ };
+};
+
+&hdmi_tx_phy {
+ status = "okay";
+};
+
&i2c1 {
clock-frequency = <400000>;
pinctrl-names = "default";
@@ -239,6 +273,10 @@
status = "okay";
};
+&lcdif3 {
+ status = "okay";
+};
+
&snvs_pwrkey {
status = "okay";
};
@@ -356,6 +394,15 @@
>;
};
+ pinctrl_hdmi: hdmigrp {
+ fsl,pins = <
+ MX8MP_IOMUXC_HDMI_DDC_SCL__HDMIMIX_HDMI_SCL 0x1c3
+ MX8MP_IOMUXC_HDMI_DDC_SDA__HDMIMIX_HDMI_SDA 0x1c3
+ MX8MP_IOMUXC_HDMI_HPD__HDMIMIX_HDMI_HPD 0x19
+ MX8MP_IOMUXC_HDMI_CEC__HDMIMIX_HDMI_CEC 0x19
+ >;
+ };
+
pinctrl_i2c1: i2c1grp {
fsl,pins = <
MX8MP_IOMUXC_I2C1_SCL__I2C1_SCL 0x400001c2
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-dhcom-pdk2.dts b/arch/arm64/boot/dts/freescale/imx8mp-dhcom-pdk2.dts
index 3b1c940860e0..ebdf13e97b4e 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-dhcom-pdk2.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mp-dhcom-pdk2.dts
@@ -69,6 +69,18 @@
};
};
+ hdmi-connector {
+ compatible = "hdmi-connector";
+ label = "X38";
+ type = "a";
+
+ port {
+ hdmi_connector_in: endpoint {
+ remote-endpoint = <&hdmi_tx_out>;
+ };
+ };
+ };
+
led {
compatible = "gpio-leds";
@@ -184,6 +196,33 @@
status = "okay";
};
+&hdmi_pvi {
+ status = "okay";
+};
+
+&hdmi_tx {
+ ddc-i2c-bus = <&i2c5>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_hdmi>;
+ status = "okay";
+
+ ports {
+ port@1 {
+ hdmi_tx_out: endpoint {
+ remote-endpoint = <&hdmi_connector_in>;
+ };
+ };
+ };
+};
+
+&hdmi_tx_phy {
+ status = "okay";
+};
+
+&lcdif3 {
+ status = "okay";
+};
+
&pcie_phy {
clock-names = "ref";
clocks = <&hsio_blk_ctrl>;
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-dhcom-pdk3.dts b/arch/arm64/boot/dts/freescale/imx8mp-dhcom-pdk3.dts
index ac7ec7533a3c..ef012e8365b1 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-dhcom-pdk3.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mp-dhcom-pdk3.dts
@@ -75,6 +75,18 @@
};
};
+ hdmi-connector {
+ compatible = "hdmi-connector";
+ label = "X28";
+ type = "a";
+
+ port {
+ hdmi_connector_in: endpoint {
+ remote-endpoint = <&hdmi_tx_out>;
+ };
+ };
+ };
+
led {
compatible = "gpio-leds";
@@ -248,6 +260,33 @@
status = "okay";
};
+&hdmi_pvi {
+ status = "okay";
+};
+
+&hdmi_tx {
+ ddc-i2c-bus = <&i2cmuxed1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_hdmi>;
+ status = "okay";
+
+ ports {
+ port@1 {
+ hdmi_tx_out: endpoint {
+ remote-endpoint = <&hdmi_connector_in>;
+ };
+ };
+ };
+};
+
+&hdmi_tx_phy {
+ status = "okay";
+};
+
+&lcdif3 {
+ status = "okay";
+};
+
&pcie_phy {
clocks = <&pcieclk 1>;
clock-names = "ref";
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-dhcom-som.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-dhcom-som.dtsi
index 43f1d45ccc96..a90e28c07e3f 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-dhcom-som.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mp-dhcom-som.dtsi
@@ -78,6 +78,11 @@
cpu-supply = <&buck2>;
};
+&audio_blk_ctrl {
+ assigned-clocks = <&clk IMX8MP_AUDIO_PLL1>;
+ assigned-clock-rates = <393216000>;
+};
+
&ecspi1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_ecspi1>;
@@ -105,14 +110,14 @@
#size-cells = <0>;
/* Up to one of these two PHYs may be populated. */
- ethphy0f: ethernet-phy@0 { /* SMSC LAN8740Ai */
+ ethphy0f: ethernet-phy@1 { /* SMSC LAN8740Ai */
compatible = "ethernet-phy-id0007.c110",
"ethernet-phy-ieee802.3-c22";
interrupt-parent = <&gpio3>;
interrupts = <19 IRQ_TYPE_LEVEL_LOW>;
pinctrl-0 = <&pinctrl_ethphy0>;
pinctrl-names = "default";
- reg = <0>;
+ reg = <1>;
reset-assert-us = <1000>;
reset-deassert-us = <1000>;
reset-gpios = <&ioexp 4 GPIO_ACTIVE_LOW>;
@@ -151,14 +156,14 @@
#size-cells = <0>;
/* Up to one PHY may be populated. */
- ethphy1f: ethernet-phy@1 { /* SMSC LAN8740Ai */
+ ethphy1f: ethernet-phy@2 { /* SMSC LAN8740Ai */
compatible = "ethernet-phy-id0007.c110",
"ethernet-phy-ieee802.3-c22";
interrupt-parent = <&gpio4>;
interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
pinctrl-0 = <&pinctrl_ethphy1>;
pinctrl-names = "default";
- reg = <1>;
+ reg = <2>;
reset-assert-us = <1000>;
reset-deassert-us = <1000>;
reset-gpios = <&gpio4 2 GPIO_ACTIVE_LOW>;
@@ -254,7 +259,7 @@
<&clk IMX8MP_CLK_CLKOUT2>,
<&clk IMX8MP_AUDIO_PLL2_OUT>;
assigned-clock-parents = <&clk IMX8MP_AUDIO_PLL2_OUT>;
- assigned-clock-rates = <13000000>, <13000000>, <156000000>;
+ assigned-clock-rates = <13000000>, <13000000>, <208000000>;
reset-gpios = <&gpio4 1 GPIO_ACTIVE_HIGH>;
status = "disabled";
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-evk-mx8-dlvds-lcd1.dtso b/arch/arm64/boot/dts/freescale/imx8mp-evk-mx8-dlvds-lcd1.dtso
new file mode 100644
index 000000000000..1b71890d43d5
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8mp-evk-mx8-dlvds-lcd1.dtso
@@ -0,0 +1,77 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright 2024 NXP
+ */
+
+/dts-v1/;
+/plugin/;
+
+&{/} {
+ panel-lvds {
+ compatible = "koe,tx26d202vm0bwa";
+ backlight = <&backlight_lvds>;
+ power-supply = <&reg_vext_3v3>;
+
+ panel-timing {
+ clock-frequency = <148500000>;
+ hactive = <1920>;
+ vactive = <1200>;
+ hfront-porch = <130>;
+ hback-porch = <70>;
+ hsync-len = <30>;
+ vfront-porch = <5>;
+ vback-porch = <5>;
+ vsync-len = <5>;
+ de-active = <1>;
+ };
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ dual-lvds-odd-pixels;
+
+ panel_in_odd: endpoint {
+ remote-endpoint = <&ldb_lvds_ch0>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ dual-lvds-even-pixels;
+
+ panel_in_even: endpoint {
+ remote-endpoint = <&ldb_lvds_ch1>;
+ };
+ };
+ };
+ };
+};
+
+&backlight_lvds {
+ status = "okay";
+};
+
+&lcdif2 {
+ status = "okay";
+};
+
+&lvds_bridge {
+ status = "okay";
+
+ ports {
+ port@1 {
+ ldb_lvds_ch0: endpoint {
+ remote-endpoint = <&panel_in_odd>;
+ };
+ };
+
+ port@2 {
+ ldb_lvds_ch1: endpoint {
+ remote-endpoint = <&panel_in_even>;
+ };
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-evk.dts b/arch/arm64/boot/dts/freescale/imx8mp-evk.dts
index 8be5b2a57f27..938347704136 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-evk.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mp-evk.dts
@@ -16,6 +16,16 @@
stdout-path = &uart2;
};
+ backlight_lvds: backlight-lvds {
+ compatible = "pwm-backlight";
+ pwms = <&pwm2 0 100000 0>;
+ brightness-levels = <0 100>;
+ num-interpolated-steps = <100>;
+ default-brightness-level = <100>;
+ power-supply = <&reg_per_12v>;
+ status = "disabled";
+ };
+
hdmi-connector {
compatible = "hdmi-connector";
label = "hdmi";
@@ -96,6 +106,15 @@
enable-active-high;
};
+ reg_per_12v: regulator-per-12v {
+ compatible = "regulator-fixed";
+ regulator-name = "PER_12V";
+ regulator-min-microvolt = <12000000>;
+ regulator-max-microvolt = <12000000>;
+ gpio = <&pca6416 1 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
reg_usdhc2_vmmc: regulator-usdhc2 {
compatible = "regulator-fixed";
pinctrl-names = "default";
@@ -114,6 +133,11 @@
regulator-max-microvolt = <3300000>;
};
+ audio_codec_bt_sco: audio-codec-bt-sco {
+ compatible = "linux,bt-sco";
+ #sound-dai-cells = <1>;
+ };
+
sound {
compatible = "simple-audio-card";
simple-audio-card,name = "wm8960-audio";
@@ -145,6 +169,25 @@
};
+ sound-bt-sco {
+ compatible = "simple-audio-card";
+ simple-audio-card,name = "bt-sco-audio";
+ simple-audio-card,format = "dsp_a";
+ simple-audio-card,bitclock-inversion;
+ simple-audio-card,frame-master = <&btcpu>;
+ simple-audio-card,bitclock-master = <&btcpu>;
+
+ btcpu: simple-audio-card,cpu {
+ sound-dai = <&sai2>;
+ dai-tdm-slot-num = <2>;
+ dai-tdm-slot-width = <16>;
+ };
+
+ simple-audio-card,codec {
+ sound-dai = <&audio_codec_bt_sco 1>;
+ };
+ };
+
sound-hdmi {
compatible = "fsl,imx-audio-hdmi";
model = "audio-hdmi";
@@ -166,6 +209,19 @@
};
};
+ sound-xcvr {
+ compatible = "fsl,imx-audio-card";
+ model = "imx-audio-xcvr";
+
+ pri-dai-link {
+ link-name = "XCVR PCM";
+
+ cpu {
+ sound-dai = <&xcvr>;
+ };
+ };
+ };
+
reserved-memory {
#address-cells = <2>;
#size-cells = <2>;
@@ -251,7 +307,6 @@
mtl_tx_setup: tx-queues-config {
snps,tx-queues-to-use = <5>;
- snps,tx-sched-sp;
queue0 {
snps,dcb-algorithm;
@@ -608,6 +663,17 @@
status = "okay";
};
+&sai2 {
+ #sound-dai-cells = <0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sai2>;
+ assigned-clocks = <&clk IMX8MP_CLK_SAI2>;
+ assigned-clock-parents = <&clk IMX8MP_AUDIO_PLL1_OUT>;
+ assigned-clock-rates = <12288000>;
+ fsl,sai-mclk-direction-output;
+ status = "okay";
+};
+
&sai3 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_sai3>;
@@ -694,7 +760,15 @@
status = "okay";
};
+&xcvr {
+ #sound-dai-cells = <0>;
+ status = "okay";
+};
+
&iomuxc {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_hog>;
+
pinctrl_audio_pwr_reg: audiopwrreggrp {
fsl,pins = <
MX8MP_IOMUXC_SAI3_RXC__GPIO4_IO29 0xd6
@@ -784,6 +858,12 @@
>;
};
+ pinctrl_hog: hoggrp {
+ fsl,pins = <
+ MX8MP_IOMUXC_HDMI_HPD__HDMIMIX_HDMI_HPD 0x40000010
+ >;
+ };
+
pinctrl_i2c1: i2c1grp {
fsl,pins = <
MX8MP_IOMUXC_I2C1_SCL__I2C1_SCL 0x400001c2
@@ -880,6 +960,15 @@
>;
};
+ pinctrl_sai2: sai2grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_SAI2_TXC__AUDIOMIX_SAI2_TX_BCLK 0xd6
+ MX8MP_IOMUXC_SAI2_TXFS__AUDIOMIX_SAI2_TX_SYNC 0xd6
+ MX8MP_IOMUXC_SAI2_TXD0__AUDIOMIX_SAI2_TX_DATA00 0xd6
+ MX8MP_IOMUXC_SAI2_RXD0__AUDIOMIX_SAI2_RX_DATA00 0xd6
+ >;
+ };
+
pinctrl_sai3: sai3grp {
fsl,pins = <
MX8MP_IOMUXC_SAI3_TXFS__AUDIOMIX_SAI3_TX_SYNC 0xd6
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-msc-sm2s-ep1.dts b/arch/arm64/boot/dts/freescale/imx8mp-msc-sm2s-ep1.dts
index da4b1807c275..83194ea7cb81 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-msc-sm2s-ep1.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mp-msc-sm2s-ep1.dts
@@ -46,6 +46,24 @@
};
};
+&hdmi_pvi {
+ status = "okay";
+};
+
+&hdmi_tx {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_hdmi>;
+ status = "okay";
+};
+
+&hdmi_tx_phy {
+ status = "okay";
+};
+
+&lcdif3 {
+ status = "okay";
+};
+
&i2c1 {
sgtl5000: audio-codec@a {
compatible = "fsl,sgtl5000";
@@ -92,6 +110,15 @@
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_smarc_gpio>;
+ pinctrl_hdmi: hdmigrp {
+ fsl,pins = <
+ MX8MP_IOMUXC_HDMI_DDC_SCL__HDMIMIX_HDMI_SCL 0x1c2
+ MX8MP_IOMUXC_HDMI_DDC_SDA__HDMIMIX_HDMI_SDA 0x1c2
+ MX8MP_IOMUXC_HDMI_CEC__HDMIMIX_HDMI_CEC 0x10
+ MX8MP_IOMUXC_HDMI_HPD__HDMIMIX_HDMI_HPD 0x10
+ >;
+ };
+
pinctrl_sai2: sai2grp {
fsl,pins = <
MX8MP_IOMUXC_SAI2_TXFS__AUDIOMIX_SAI2_TX_SYNC 0xd6
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-tqma8mpql-mba8mp-ras314.dts b/arch/arm64/boot/dts/freescale/imx8mp-tqma8mpql-mba8mp-ras314.dts
new file mode 100644
index 000000000000..d7fd9d36f824
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8mp-tqma8mpql-mba8mp-ras314.dts
@@ -0,0 +1,906 @@
+// SPDX-License-Identifier: GPL-2.0-or-later OR MIT
+/*
+ * Copyright (c) 2023-2024 TQ-Systems GmbH <linux@ew.tq-group.com>,
+ * D-82229 Seefeld, Germany.
+ * Author: Martin Schmiedel
+ * Author: Alexander Stein
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/leds/common.h>
+#include <dt-bindings/net/ti-dp83867.h>
+#include <dt-bindings/phy/phy-imx8-pcie.h>
+#include <dt-bindings/pwm/pwm.h>
+#include "imx8mp-tqma8mpql.dtsi"
+
+/ {
+ model = "TQ-Systems i.MX8MPlus TQMa8MPxL on MBa8MP-RAS314";
+ compatible = "tq,imx8mp-tqma8mpql-mba8mp-ras314", "tq,imx8mp-tqma8mpql", "fsl,imx8mp";
+ chassis-type = "embedded";
+
+ chosen {
+ stdout-path = &uart4;
+ };
+
+ aliases {
+ mmc0 = &usdhc3;
+ mmc1 = &usdhc2;
+ mmc2 = &usdhc1;
+ rtc0 = &pcf85063;
+ rtc1 = &snvs_rtc;
+ };
+
+ /* X8 */
+ backlight_lvds: backlight {
+ compatible = "pwm-backlight";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_backlight>;
+ pwms = <&pwm2 0 5000000 0>;
+ brightness-levels = <0 4 8 16 32 64 128 255>;
+ default-brightness-level = <7>;
+ power-supply = <&reg_vcc_12v0>;
+ enable-gpios = <&gpio1 3 GPIO_ACTIVE_HIGH>;
+ status = "disabled";
+ };
+
+ /* X7 + X8 */
+ display: display {
+ /*
+ * Display is not fixed, so compatible has to be added from
+ * DT overlay
+ */
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_lvdsdisplay>;
+ power-supply = <&reg_vcc_3v3>;
+ enable-gpios = <&gpio1 7 GPIO_ACTIVE_HIGH>;
+ backlight = <&backlight_lvds>;
+ status = "disabled";
+ };
+
+ gpio-leds {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpioled>;
+
+ led-1 {
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_STATUS;
+ function-enumerator = <0>;
+ gpios = <&gpio4 18 GPIO_ACTIVE_HIGH>;
+ };
+
+ led-2 {
+ color = <LED_COLOR_ID_YELLOW>;
+ function = LED_FUNCTION_STATUS;
+ function-enumerator = <1>;
+ gpios = <&gpio4 19 GPIO_ACTIVE_HIGH>;
+ };
+ };
+
+ hdmi-connector {
+ compatible = "hdmi-connector";
+ label = "X9";
+ type = "a";
+
+ port {
+ hdmi_connector_in: endpoint {
+ remote-endpoint = <&hdmi_tx_out>;
+ };
+ };
+ };
+
+ reg_usdhc2_vmmc: regulator-usdhc2 {
+ compatible = "regulator-fixed";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_reg_usdhc2_vmmc>;
+ regulator-name = "VSD_3V3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&gpio2 19 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ startup-delay-us = <100>;
+ off-on-delay-us = <12000>;
+ };
+
+ reg_vcc_3v3: regulator-3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "V_3V3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ reg_vcc_5v0: regulator-5v0 {
+ compatible = "regulator-fixed";
+ regulator-name = "V_5V0";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ };
+
+ reg_vcc_12v0: regulator-12v0 {
+ compatible = "regulator-fixed";
+ regulator-name = "V_12V";
+ regulator-min-microvolt = <12000000>;
+ regulator-max-microvolt = <12000000>;
+ };
+
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ /* global autoconfigured region for contiguous allocations */
+ linux,cma {
+ compatible = "shared-dma-pool";
+ reusable;
+ size = <0 0x38000000>;
+ alloc-ranges = <0 0x40000000 0 0xB0000000>;
+ linux,cma-default;
+ };
+ };
+
+ rfkill {
+ compatible = "rfkill-gpio";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_rfkill>;
+ label = "rfkill-pcie-wlan";
+ radio-type = "wlan";
+ shutdown-gpios = <&gpio5 2 GPIO_ACTIVE_HIGH>;
+ };
+
+ sound {
+ compatible = "fsl,imx-audio-tlv320aic32x4";
+ model = "tq-mba8mp-ras314";
+ audio-cpu = <&sai5>;
+ audio-codec = <&tlv320aic3x04>;
+ audio-routing =
+ "IN3_L", "Mic Jack",
+ "Mic Jack", "Mic Bias",
+ "Headphone Jack", "HPL",
+ "Headphone Jack", "HPR";
+ };
+};
+
+&ecspi3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ecspi3>;
+ cs-gpios = <&gpio5 25 GPIO_ACTIVE_LOW>, <&gpio1 6 GPIO_ACTIVE_LOW>;
+ status = "okay";
+};
+
+&eqos {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_eqos>;
+ phy-mode = "rgmii-id";
+ phy-handle = <&ethphy3>;
+ status = "okay";
+
+ mdio {
+ compatible = "snps,dwmac-mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethphy3: ethernet-phy@3 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <3>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_eqos_phy>;
+ reset-gpios = <&gpio4 2 GPIO_ACTIVE_LOW>;
+ reset-assert-us = <500000>;
+ reset-deassert-us = <50000>;
+ enet-phy-lane-no-swap;
+ interrupt-parent = <&gpio4>;
+ interrupts = <3 IRQ_TYPE_EDGE_FALLING>;
+ ti,rx-internal-delay = <DP83867_RGMIIDCTL_2_25_NS>;
+ ti,tx-internal-delay = <DP83867_RGMIIDCTL_2_25_NS>;
+ ti,fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_4_B_NIB>;
+ ti,dp83867-rxctrl-strap-quirk;
+ ti,clk-output-sel = <DP83867_CLK_O_SEL_OFF>;
+ };
+ };
+};
+
+&fec {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_fec>;
+ phy-mode = "rgmii-id";
+ phy-handle = <&ethphy0>;
+ fsl,magic-packet;
+ status = "okay";
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethphy0: ethernet-phy@0 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_fec_phy>;
+ reset-gpios = <&gpio4 0 GPIO_ACTIVE_LOW>;
+ reset-assert-us = <500000>;
+ reset-deassert-us = <50000>;
+ enet-phy-lane-no-swap;
+ interrupt-parent = <&gpio4>;
+ interrupts = <1 IRQ_TYPE_EDGE_FALLING>;
+ ti,rx-internal-delay = <DP83867_RGMIIDCTL_2_25_NS>;
+ ti,tx-internal-delay = <DP83867_RGMIIDCTL_2_25_NS>;
+ ti,fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_4_B_NIB>;
+ ti,dp83867-rxctrl-strap-quirk;
+ ti,clk-output-sel = <DP83867_CLK_O_SEL_OFF>;
+ };
+ };
+};
+
+&gpio1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpio1>;
+
+ gpio-line-names = "WIFI_PMIC_EN", "LVDS_RESET#", "", "",
+ "", "", "GPIO8", "",
+ "", "", "", "",
+ "", "", "GPIO12", "GPIO13",
+ "", "", "", "",
+ "", "", "", "",
+ "", "", "", "",
+ "", "", "", "";
+
+ wifi-pmic-en-hog {
+ gpio-hog;
+ gpios = <0 0>;
+ output-high;
+ line-name = "WIFI_PMIC_EN";
+ };
+};
+
+&gpio2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpio2>;
+
+ gpio-line-names = "GPIO22", "GPIO23", "GPIO24", "GPIO25",
+ "GPIO26", "GPIO27", "CAM_GPIO1", "CAM_GPIO2",
+ "", "", "GPIO1", "GPIO0",
+ "", "", "", "",
+ "", "", "", "",
+ "", "", "", "",
+ "", "", "", "",
+ "", "", "", "";
+};
+
+&gpio3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpio3>;
+
+ gpio-line-names = "", "", "", "",
+ "", "", "", "",
+ "", "", "", "",
+ "", "", "", "",
+ "", "", "", "",
+ "TEMP_EVENT#", "", "", "",
+ "", "", "", "",
+ "", "", "", "";
+};
+
+&gpio4 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpio4>;
+
+ gpio-line-names = "", "", "", "",
+ "", "", "", "",
+ "", "", "", "",
+ "", "", "", "",
+ "", "", "", "",
+ "HDMI_OC#", "GPIO14", "GPIO15", "GPIO16",
+ "GPIO17", "PCIE_WAKE#", "GPIO19", "GPIO20",
+ "PCIE_PERST#", "", "", "";
+
+ pewake-hog {
+ gpio-hog;
+ gpios = <25 0>;
+ input;
+ line-name = "PCIE_WAKE#";
+ };
+};
+
+&gpio5 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpio5>, <&pinctrl_gpt1_gpio>,
+ <&pinctrl_gpt2_gpio>, <&pinctrl_gpt3_gpio>;
+
+ gpio-line-names = "", "GPIO18", "", "GPIO3",
+ "GPIO2", "GPIO21", "", "",
+ "", "", "", "",
+ "", "", "", "",
+ "", "", "GPIO5", "GPIO6",
+ "", "", "GPIO11", "GPIO10",
+ "GPIO9", "GPIO7", "", "GPIO4",
+ "", "", "", "";
+};
+
+&gpt1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpt1>;
+ status = "disabled";
+};
+
+&gpt2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpt2>;
+ status = "disabled";
+};
+
+&gpt3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpt3>;
+ status = "disabled";
+};
+
+&hdmi_pvi {
+ status = "okay";
+};
+
+&hdmi_tx {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_hdmi>;
+ status = "okay";
+
+ ports {
+ port@1 {
+ hdmi_tx_out: endpoint {
+ remote-endpoint = <&hdmi_connector_in>;
+ };
+ };
+ };
+};
+
+&hdmi_tx_phy {
+ status = "okay";
+};
+
+/* X5 + X6 Camera & Display interface */
+&i2c2 {
+ clock-frequency = <384000>;
+ pinctrl-names = "default", "gpio";
+ pinctrl-0 = <&pinctrl_i2c2>;
+ pinctrl-1 = <&pinctrl_i2c2_gpio>;
+ scl-gpios = <&gpio5 16 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ sda-gpios = <&gpio5 17 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ status = "okay";
+};
+
+/* X1 ID_I2C */
+&i2c3 {
+ clock-frequency = <384000>;
+ pinctrl-names = "default", "gpio";
+ pinctrl-0 = <&pinctrl_i2c3>;
+ pinctrl-1 = <&pinctrl_i2c3_gpio>;
+ scl-gpios = <&gpio2 10 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ sda-gpios = <&gpio2 11 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ status = "okay";
+};
+
+&i2c4 {
+ clock-frequency = <384000>;
+ pinctrl-names = "default", "gpio";
+ pinctrl-0 = <&pinctrl_i2c4>;
+ pinctrl-1 = <&pinctrl_i2c4_gpio>;
+ scl-gpios = <&gpio5 12 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ sda-gpios = <&gpio5 13 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ status = "okay";
+
+ tlv320aic3x04: audio-codec@18 {
+ compatible = "ti,tlv320aic32x4";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_tlv320aic3x04>;
+ reg = <0x18>;
+ clock-names = "mclk";
+ clocks = <&audio_blk_ctrl IMX8MP_CLK_AUDIOMIX_SAI5_MCLK1>;
+ reset-gpios = <&gpio5 11 GPIO_ACTIVE_LOW>;
+ iov-supply = <&reg_vcc_3v3>;
+ ldoin-supply = <&reg_vcc_3v3>;
+ };
+};
+
+/* X1 I2C */
+&i2c5 {
+ clock-frequency = <384000>;
+ pinctrl-names = "default", "gpio";
+ pinctrl-0 = <&pinctrl_i2c5>;
+ pinctrl-1 = <&pinctrl_i2c5_gpio>;
+ scl-gpios = <&gpio5 3 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ sda-gpios = <&gpio5 4 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ status = "okay";
+};
+
+/* X1 I2C on GPIO24/GPIO25 */
+&i2c6 {
+ clock-frequency = <384000>;
+ pinctrl-names = "default", "gpio";
+ pinctrl-0 = <&pinctrl_i2c6>;
+ pinctrl-1 = <&pinctrl_i2c6_gpio>;
+ scl-gpios = <&gpio2 2 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ sda-gpios = <&gpio2 3 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ status = "disabled";
+};
+
+&lcdif3 {
+ status = "okay";
+};
+
+&pcf85063 {
+ /* RTC_EVENT# is connected on MBa8MP-RAS314 */
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pcf85063>;
+ interrupt-parent = <&gpio3>;
+ interrupts = <19 IRQ_TYPE_EDGE_FALLING>;
+};
+
+&pcie_phy {
+ clocks = <&hsio_blk_ctrl>;
+ clock-names = "ref";
+ fsl,refclk-pad-mode = <IMX8_PCIE_REFCLK_PAD_OUTPUT>;
+ status = "okay";
+};
+
+&pcie {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pcie>;
+ reset-gpios = <&gpio4 28 GPIO_ACTIVE_LOW>;
+ status = "okay";
+};
+
+&pwm2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pwm2>;
+ status = "disabled";
+};
+
+&pwm3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pwm3>;
+ status = "okay";
+};
+
+&pwm4 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pwm4>;
+ status = "okay";
+};
+
+&sai5 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sai5>;
+ assigned-clocks = <&clk IMX8MP_CLK_SAI5>;
+ assigned-clock-parents = <&clk IMX8MP_AUDIO_PLL1_OUT>;
+ assigned-clock-rates = <12288000>;
+ fsl,sai-mclk-direction-output;
+ status = "okay";
+};
+
+&snvs_pwrkey {
+ status = "okay";
+};
+
+/* X1 UART1 */
+&uart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart1>;
+ uart-has-rtscts;
+ assigned-clocks = <&clk IMX8MP_CLK_UART1>;
+ assigned-clock-parents = <&clk IMX8MP_SYS_PLL1_80M>;
+ status = "okay";
+};
+
+&uart2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart2>;
+ uart-has-rtscts;
+ assigned-clocks = <&clk IMX8MP_CLK_UART2>;
+ assigned-clock-parents = <&clk IMX8MP_SYS_PLL1_80M>;
+ status = "okay";
+
+ bluetooth {
+ compatible = "nxp,88w8987-bt";
+ };
+};
+
+&uart3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart3>;
+ assigned-clocks = <&clk IMX8MP_CLK_UART3>;
+ assigned-clock-parents = <&clk IMX8MP_SYS_PLL1_80M>;
+ status = "okay";
+};
+
+&uart4 {
+ /* console */
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart4>;
+ status = "okay";
+};
+
+&usb3_0 {
+ fsl,disable-port-power-control;
+ status = "okay";
+};
+
+&usb3_1 {
+ fsl,disable-port-power-control;
+ fsl,permanently-attached;
+ status = "okay";
+};
+
+&usb3_phy0 {
+ vbus-supply = <&reg_vcc_5v0>;
+ status = "okay";
+};
+
+&usb3_phy1 {
+ vbus-supply = <&reg_vcc_5v0>;
+ status = "okay";
+};
+
+&usb_dwc3_0 {
+ dr_mode = "peripheral";
+ status = "okay";
+};
+
+&usb_dwc3_1 {
+ dr_mode = "host";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usbhub>;
+ status = "okay";
+
+ hub_2_0: hub@1 {
+ compatible = "usb451,8142";
+ reg = <1>;
+ peer-hub = <&hub_3_0>;
+ reset-gpios = <&gpio5 26 GPIO_ACTIVE_LOW>;
+ vdd-supply = <&reg_vcc_3v3>;
+ };
+
+ hub_3_0: hub@2 {
+ compatible = "usb451,8140";
+ reg = <2>;
+ peer-hub = <&hub_2_0>;
+ reset-gpios = <&gpio5 26 GPIO_ACTIVE_LOW>;
+ vdd-supply = <&reg_vcc_3v3>;
+ };
+};
+
+/* X1 SD card on GPIO22-GPIO27 */
+&usdhc1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usdhc1>;
+ disable-wp;
+ bus-width = <4>;
+ status = "disabled";
+};
+
+&usdhc2 {
+ pinctrl-names = "default", "state_100mhz", "state_200mhz";
+ pinctrl-0 = <&pinctrl_usdhc2>, <&pinctrl_usdhc2_gpio>;
+ pinctrl-1 = <&pinctrl_usdhc2_100mhz>, <&pinctrl_usdhc2_gpio>;
+ pinctrl-2 = <&pinctrl_usdhc2_200mhz>, <&pinctrl_usdhc2_gpio>;
+ cd-gpios = <&gpio2 12 GPIO_ACTIVE_LOW>;
+ vmmc-supply = <&reg_usdhc2_vmmc>;
+ no-mmc;
+ no-sdio;
+ disable-wp;
+ bus-width = <4>;
+ status = "okay";
+};
+
+&iomuxc {
+ pinctrl_backlight: backlightgrp {
+ fsl,pins = <MX8MP_IOMUXC_GPIO1_IO03__GPIO1_IO03 0x14>;
+ };
+
+ pinctrl_ecspi3: ecspi3grp {
+ fsl,pins = <MX8MP_IOMUXC_UART1_RXD__ECSPI3_SCLK 0x140>,
+ <MX8MP_IOMUXC_UART1_TXD__ECSPI3_MOSI 0x140>,
+ <MX8MP_IOMUXC_UART2_RXD__ECSPI3_MISO 0x1c0>,
+ <MX8MP_IOMUXC_UART2_TXD__GPIO5_IO25 0x140>,
+ <MX8MP_IOMUXC_GPIO1_IO06__GPIO1_IO06 0x140>;
+ };
+
+ pinctrl_ecspi3_gpio: ecspi3gpiogrp {
+ fsl,pins = <MX8MP_IOMUXC_UART1_RXD__GPIO5_IO22 0x80>,
+ <MX8MP_IOMUXC_UART1_TXD__GPIO5_IO23 0x80>,
+ <MX8MP_IOMUXC_UART2_RXD__GPIO5_IO24 0x80>,
+ <MX8MP_IOMUXC_UART2_TXD__GPIO5_IO25 0x80>,
+ <MX8MP_IOMUXC_GPIO1_IO06__GPIO1_IO06 0x80>;
+ };
+
+ pinctrl_eqos: eqosgrp {
+ fsl,pins = <MX8MP_IOMUXC_ENET_MDC__ENET_QOS_MDC 0x40000044>,
+ <MX8MP_IOMUXC_ENET_MDIO__ENET_QOS_MDIO 0x40000044>,
+ <MX8MP_IOMUXC_ENET_RD0__ENET_QOS_RGMII_RD0 0x90>,
+ <MX8MP_IOMUXC_ENET_RD1__ENET_QOS_RGMII_RD1 0x90>,
+ <MX8MP_IOMUXC_ENET_RD2__ENET_QOS_RGMII_RD2 0x90>,
+ <MX8MP_IOMUXC_ENET_RD3__ENET_QOS_RGMII_RD3 0x90>,
+ <MX8MP_IOMUXC_ENET_RXC__CCM_ENET_QOS_CLOCK_GENERATE_RX_CLK 0x90>,
+ <MX8MP_IOMUXC_ENET_RX_CTL__ENET_QOS_RGMII_RX_CTL 0x90>,
+ <MX8MP_IOMUXC_ENET_TD0__ENET_QOS_RGMII_TD0 0x12>,
+ <MX8MP_IOMUXC_ENET_TD1__ENET_QOS_RGMII_TD1 0x12>,
+ <MX8MP_IOMUXC_ENET_TD2__ENET_QOS_RGMII_TD2 0x12>,
+ <MX8MP_IOMUXC_ENET_TD3__ENET_QOS_RGMII_TD3 0x12>,
+ <MX8MP_IOMUXC_ENET_TX_CTL__ENET_QOS_RGMII_TX_CTL 0x12>,
+ <MX8MP_IOMUXC_ENET_TXC__CCM_ENET_QOS_CLOCK_GENERATE_TX_CLK 0x14>;
+ };
+
+ pinctrl_eqos_phy: eqosphygrp {
+ fsl,pins = <MX8MP_IOMUXC_SAI1_RXD0__GPIO4_IO02 0x100>,
+ <MX8MP_IOMUXC_SAI1_RXD1__GPIO4_IO03 0x1c0>;
+ };
+
+ pinctrl_fec: fecgrp {
+ fsl,pins = <MX8MP_IOMUXC_SAI1_RXD2__ENET1_MDC 0x40000044>,
+ <MX8MP_IOMUXC_SAI1_RXD3__ENET1_MDIO 0x40000044>,
+ <MX8MP_IOMUXC_SAI1_RXD4__ENET1_RGMII_RD0 0x90>,
+ <MX8MP_IOMUXC_SAI1_RXD5__ENET1_RGMII_RD1 0x90>,
+ <MX8MP_IOMUXC_SAI1_RXD6__ENET1_RGMII_RD2 0x90>,
+ <MX8MP_IOMUXC_SAI1_RXD7__ENET1_RGMII_RD3 0x90>,
+ <MX8MP_IOMUXC_SAI1_TXC__ENET1_RGMII_RXC 0x90>,
+ <MX8MP_IOMUXC_SAI1_TXFS__ENET1_RGMII_RX_CTL 0x90>,
+ <MX8MP_IOMUXC_SAI1_TXD0__ENET1_RGMII_TD0 0x12>,
+ <MX8MP_IOMUXC_SAI1_TXD1__ENET1_RGMII_TD1 0x12>,
+ <MX8MP_IOMUXC_SAI1_TXD2__ENET1_RGMII_TD2 0x12>,
+ <MX8MP_IOMUXC_SAI1_TXD3__ENET1_RGMII_TD3 0x12>,
+ <MX8MP_IOMUXC_SAI1_TXD4__ENET1_RGMII_TX_CTL 0x12>,
+ <MX8MP_IOMUXC_SAI1_TXD5__ENET1_RGMII_TXC 0x14>;
+ };
+
+ pinctrl_fec_phy: fecphygrp {
+ fsl,pins = <MX8MP_IOMUXC_SAI1_RXFS__GPIO4_IO00 0x100>,
+ <MX8MP_IOMUXC_SAI1_RXC__GPIO4_IO01 0x1c0>;
+ };
+
+ pinctrl_gpioled: gpioledgrp {
+ fsl,pins = <MX8MP_IOMUXC_SAI1_TXD6__GPIO4_IO18 0x14>,
+ <MX8MP_IOMUXC_SAI1_TXD7__GPIO4_IO19 0x14>;
+ };
+
+ pinctrl_gpio1: gpio1grp {
+ fsl,pins = <MX8MP_IOMUXC_GPIO1_IO00__GPIO1_IO00 0x14>,
+ <MX8MP_IOMUXC_GPIO1_IO01__GPIO1_IO01 0x14>;
+ };
+
+ pinctrl_gpio2: gpio2grp {
+ fsl,pins = <MX8MP_IOMUXC_SD1_CLK__GPIO2_IO00 0x94>,
+ <MX8MP_IOMUXC_SD1_CMD__GPIO2_IO01 0x94>,
+ <MX8MP_IOMUXC_SD1_DATA0__GPIO2_IO02 0x94>,
+ <MX8MP_IOMUXC_SD1_DATA1__GPIO2_IO03 0x94>,
+ <MX8MP_IOMUXC_SD1_DATA2__GPIO2_IO04 0x94>,
+ <MX8MP_IOMUXC_SD1_DATA3__GPIO2_IO05 0x94>,
+ <MX8MP_IOMUXC_SD1_DATA4__GPIO2_IO06 0x94>,
+ <MX8MP_IOMUXC_SD1_DATA5__GPIO2_IO07 0x94>;
+ };
+
+ pinctrl_gpio3: gpio3grp {
+ fsl,pins = <MX8MP_IOMUXC_SAI5_RXC__GPIO3_IO20 0x180>;
+ };
+
+ pinctrl_gpio4: gpio4grp {
+ fsl,pins = <MX8MP_IOMUXC_SAI1_MCLK__GPIO4_IO20 0x80>,
+ /* PCIE_WAKE# */
+ <MX8MP_IOMUXC_SAI2_TXC__GPIO4_IO25 0x180>,
+ <MX8MP_IOMUXC_SAI2_TXD0__GPIO4_IO26 0x94>,
+ <MX8MP_IOMUXC_SAI2_MCLK__GPIO4_IO27 0x94>;
+ };
+
+ pinctrl_gpio5: gpio5grp {
+ fsl,pins = <MX8MP_IOMUXC_SAI3_TXD__GPIO5_IO01 0x80>,
+ <MX8MP_IOMUXC_SPDIF_EXT_CLK__GPIO5_IO05 0x80>;
+ };
+
+ pinctrl_hdmi: hdmigrp {
+ fsl,pins = <MX8MP_IOMUXC_HDMI_DDC_SCL__HDMIMIX_HDMI_SCL 0x400001c2>,
+ <MX8MP_IOMUXC_HDMI_DDC_SDA__HDMIMIX_HDMI_SDA 0x400001c2>,
+ <MX8MP_IOMUXC_HDMI_HPD__HDMIMIX_HDMI_HPD 0x40000010>,
+ <MX8MP_IOMUXC_HDMI_CEC__HDMIMIX_HDMI_CEC 0x40000154>;
+ };
+
+ pinctrl_gpt1: gpt1grp {
+ fsl,pins = <MX8MP_IOMUXC_UART3_TXD__GPT1_CLK 0x14>;
+ };
+
+ pinctrl_gpt1_gpio: gpt1gpiogrp {
+ fsl,pins = <MX8MP_IOMUXC_UART3_TXD__GPIO5_IO27 0x80>;
+ };
+
+ pinctrl_gpt2: gpt2grp {
+ fsl,pins = <MX8MP_IOMUXC_I2C3_SCL__GPT2_CLK 0x14>;
+ };
+
+ pinctrl_gpt2_gpio: gpt2gpiogrp {
+ fsl,pins = <MX8MP_IOMUXC_I2C3_SCL__GPIO5_IO18 0x80>;
+ };
+
+ pinctrl_gpt3: gpt3grp {
+ fsl,pins = <MX8MP_IOMUXC_I2C3_SDA__GPT3_CLK 0x14>;
+ };
+
+ pinctrl_gpt3_gpio: gpt3gpiogrp {
+ fsl,pins = <MX8MP_IOMUXC_I2C3_SDA__GPIO5_IO19 0x80>;
+ };
+
+ pinctrl_i2c2: i2c2grp {
+ fsl,pins = <MX8MP_IOMUXC_I2C2_SCL__I2C2_SCL 0x400001e2>,
+ <MX8MP_IOMUXC_I2C2_SDA__I2C2_SDA 0x400001e2>;
+ };
+
+ pinctrl_i2c2_gpio: i2c2-gpiogrp {
+ fsl,pins = <MX8MP_IOMUXC_I2C2_SCL__GPIO5_IO16 0x400001e2>,
+ <MX8MP_IOMUXC_I2C2_SDA__GPIO5_IO17 0x400001e2>;
+ };
+
+ pinctrl_i2c3: i2c3grp {
+ fsl,pins = <MX8MP_IOMUXC_SD1_RESET_B__I2C3_SCL 0x400001e2>,
+ <MX8MP_IOMUXC_SD1_STROBE__I2C3_SDA 0x400001e2>;
+ };
+
+ pinctrl_i2c3_gpio: i2c3-gpiogrp {
+ fsl,pins = <MX8MP_IOMUXC_SD1_RESET_B__GPIO2_IO10 0x400001e2>,
+ <MX8MP_IOMUXC_SD1_STROBE__GPIO2_IO11 0x400001e2>;
+ };
+
+ pinctrl_i2c4: i2c4grp {
+ fsl,pins = <MX8MP_IOMUXC_ECSPI2_MISO__I2C4_SCL 0x400001e2>,
+ <MX8MP_IOMUXC_ECSPI2_SS0__I2C4_SDA 0x400001e2>;
+ };
+
+ pinctrl_i2c4_gpio: i2c4-gpiogrp {
+ fsl,pins = <MX8MP_IOMUXC_ECSPI2_MISO__GPIO5_IO12 0x400001e2>,
+ <MX8MP_IOMUXC_ECSPI2_SS0__GPIO5_IO13 0x400001e2>;
+ };
+
+ pinctrl_i2c5: i2c5grp {
+ fsl,pins = <MX8MP_IOMUXC_SPDIF_TX__I2C5_SCL 0x400001e2>,
+ <MX8MP_IOMUXC_SPDIF_RX__I2C5_SDA 0x400001e2>;
+ };
+
+ pinctrl_i2c5_gpio: i2c5-gpiogrp {
+ fsl,pins = <MX8MP_IOMUXC_SPDIF_TX__GPIO5_IO03 0x400001e2>,
+ <MX8MP_IOMUXC_SPDIF_RX__GPIO5_IO04 0x400001e2>;
+ };
+
+ pinctrl_i2c6: i2c6grp {
+ fsl,pins = <MX8MP_IOMUXC_SD1_DATA0__I2C6_SCL 0x400001e2>,
+ <MX8MP_IOMUXC_SD1_DATA1__I2C6_SDA 0x400001e2>;
+ };
+
+ pinctrl_i2c6_gpio: i2c6-gpiogrp {
+ fsl,pins = <MX8MP_IOMUXC_SD1_DATA0__GPIO2_IO02 0x400001e2>,
+ <MX8MP_IOMUXC_SD1_DATA1__GPIO2_IO03 0x400001e2>;
+ };
+
+ pinctrl_pcf85063: pcf85063grp {
+ fsl,pins = <MX8MP_IOMUXC_SAI5_RXFS__GPIO3_IO19 0x80>;
+ };
+
+ pinctrl_pcie: pciegrp {
+ fsl,pins = <MX8MP_IOMUXC_I2C4_SCL__PCIE_CLKREQ_B 0x60>,
+ <MX8MP_IOMUXC_SAI3_RXFS__GPIO4_IO28 0x94>;
+ };
+
+ pinctrl_lvdsdisplay: lvdsdisplaygrp {
+ fsl,pins = <MX8MP_IOMUXC_GPIO1_IO07__GPIO1_IO07 0x10>;
+ };
+
+ pinctrl_pwm2: pwm2grp {
+ fsl,pins = <MX8MP_IOMUXC_GPIO1_IO09__PWM2_OUT 0x14>;
+ };
+
+ pinctrl_pwm3: pwm3grp {
+ fsl,pins = <MX8MP_IOMUXC_GPIO1_IO14__PWM3_OUT 0x14>;
+ };
+
+ pinctrl_pwm3_gpio: pwm3grpiogrp {
+ fsl,pins = <MX8MP_IOMUXC_GPIO1_IO14__GPIO1_IO14 0x80>;
+ };
+
+ pinctrl_pwm4: pwm4grp {
+ fsl,pins = <MX8MP_IOMUXC_GPIO1_IO15__PWM4_OUT 0x14>;
+ };
+
+ pinctrl_pwm4_gpio: pwm4grpiogrp {
+ fsl,pins = <MX8MP_IOMUXC_GPIO1_IO15__GPIO1_IO15 0x80>;
+ };
+
+ pinctrl_rfkill: rfkillgrp {
+ fsl,pins = <MX8MP_IOMUXC_SAI3_MCLK__GPIO5_IO02 0x14>;
+ };
+
+ pinctrl_sai5: sai5grp {
+ fsl,pins = <MX8MP_IOMUXC_SAI5_MCLK__AUDIOMIX_SAI5_MCLK 0x94>,
+ <MX8MP_IOMUXC_SAI5_RXD0__AUDIOMIX_SAI5_RX_DATA00 0x94>,
+ <MX8MP_IOMUXC_SAI5_RXD3__AUDIOMIX_SAI5_TX_DATA00 0x94>,
+ <MX8MP_IOMUXC_SAI5_RXD1__AUDIOMIX_SAI5_TX_SYNC 0x94>,
+ <MX8MP_IOMUXC_SAI5_RXD2__AUDIOMIX_SAI5_TX_BCLK 0x94>;
+ };
+
+ pinctrl_tlv320aic3x04: tlv320aic3x04grp {
+ fsl,pins = <MX8MP_IOMUXC_ECSPI2_MOSI__GPIO5_IO11 0x180>;
+ };
+
+ pinctrl_uart1: uart1grp {
+ fsl,pins = <MX8MP_IOMUXC_SAI2_RXFS__UART1_DCE_TX 0x14>,
+ <MX8MP_IOMUXC_SAI2_RXC__UART1_DCE_RX 0x14>,
+ <MX8MP_IOMUXC_SAI2_RXD0__UART1_DTE_CTS 0x14>,
+ <MX8MP_IOMUXC_SAI2_TXFS__UART1_DTE_RTS 0x14>;
+ };
+
+ pinctrl_uart1_gpio: uart1gpiogrp {
+ fsl,pins = <MX8MP_IOMUXC_SAI2_RXFS__GPIO4_IO21 0x80>,
+ <MX8MP_IOMUXC_SAI2_RXC__GPIO4_IO22 0x80>,
+ <MX8MP_IOMUXC_SAI2_RXD0__GPIO4_IO23 0x80>,
+ <MX8MP_IOMUXC_SAI2_TXFS__GPIO4_IO24 0x80>;
+ };
+
+ pinctrl_uart2: uart2grp {
+ fsl,pins = <MX8MP_IOMUXC_SAI3_TXC__UART2_DCE_TX 0x14>,
+ <MX8MP_IOMUXC_SAI3_TXFS__UART2_DCE_RX 0x14>,
+ <MX8MP_IOMUXC_SAI3_RXD__UART2_DCE_RTS 0x14>,
+ <MX8MP_IOMUXC_SAI3_RXC__UART2_DCE_CTS 0x14>;
+ };
+
+ pinctrl_uart3: uart3grp {
+ fsl,pins = <MX8MP_IOMUXC_SD1_DATA6__UART3_DCE_TX 0x140>,
+ <MX8MP_IOMUXC_SD1_DATA7__UART3_DCE_RX 0x140>;
+ };
+
+ pinctrl_uart4: uart4grp {
+ fsl,pins = <MX8MP_IOMUXC_UART4_TXD__UART4_DCE_TX 0x140>,
+ <MX8MP_IOMUXC_UART4_RXD__UART4_DCE_RX 0x140>;
+ };
+
+ pinctrl_usbhub: usbhubgrp {
+ fsl,pins = <MX8MP_IOMUXC_UART3_RXD__GPIO5_IO26 0x10>;
+ };
+
+ pinctrl_usdhc1: usdhc1grp {
+ fsl,pins = <MX8MP_IOMUXC_SD1_CLK__USDHC1_CLK 0x192>,
+ <MX8MP_IOMUXC_SD1_CMD__USDHC1_CMD 0x1d2>,
+ <MX8MP_IOMUXC_SD1_DATA0__USDHC1_DATA0 0x1d2>,
+ <MX8MP_IOMUXC_SD1_DATA1__USDHC1_DATA1 0x1d2>,
+ <MX8MP_IOMUXC_SD1_DATA2__USDHC1_DATA2 0x1d2>,
+ <MX8MP_IOMUXC_SD1_DATA3__USDHC1_DATA3 0x1d2>;
+ };
+
+ pinctrl_usdhc2: usdhc2grp {
+ fsl,pins = <MX8MP_IOMUXC_SD2_CLK__USDHC2_CLK 0x192>,
+ <MX8MP_IOMUXC_SD2_CMD__USDHC2_CMD 0x1d2>,
+ <MX8MP_IOMUXC_SD2_DATA0__USDHC2_DATA0 0x1d2>,
+ <MX8MP_IOMUXC_SD2_DATA1__USDHC2_DATA1 0x1d2>,
+ <MX8MP_IOMUXC_SD2_DATA2__USDHC2_DATA2 0x1d2>,
+ <MX8MP_IOMUXC_SD2_DATA3__USDHC2_DATA3 0x1d2>,
+ <MX8MP_IOMUXC_GPIO1_IO04__USDHC2_VSELECT 0xc0>;
+ };
+
+ pinctrl_usdhc2_100mhz: usdhc2-100mhzgrp {
+ fsl,pins = <MX8MP_IOMUXC_SD2_CLK__USDHC2_CLK 0x194>,
+ <MX8MP_IOMUXC_SD2_CMD__USDHC2_CMD 0x1d4>,
+ <MX8MP_IOMUXC_SD2_DATA0__USDHC2_DATA0 0x1d4>,
+ <MX8MP_IOMUXC_SD2_DATA1__USDHC2_DATA1 0x1d4>,
+ <MX8MP_IOMUXC_SD2_DATA2__USDHC2_DATA2 0x1d4>,
+ <MX8MP_IOMUXC_SD2_DATA3__USDHC2_DATA3 0x1d4>,
+ <MX8MP_IOMUXC_GPIO1_IO04__USDHC2_VSELECT 0xc0>;
+ };
+
+ pinctrl_usdhc2_200mhz: usdhc2-200mhzgrp {
+ fsl,pins = <MX8MP_IOMUXC_SD2_CLK__USDHC2_CLK 0x194>,
+ <MX8MP_IOMUXC_SD2_CMD__USDHC2_CMD 0x1d4>,
+ <MX8MP_IOMUXC_SD2_DATA0__USDHC2_DATA0 0x1d4>,
+ <MX8MP_IOMUXC_SD2_DATA1__USDHC2_DATA1 0x1d4>,
+ <MX8MP_IOMUXC_SD2_DATA2__USDHC2_DATA2 0x1d4>,
+ <MX8MP_IOMUXC_SD2_DATA3__USDHC2_DATA3 0x1d4>,
+ <MX8MP_IOMUXC_GPIO1_IO04__USDHC2_VSELECT 0xc0>;
+ };
+
+ pinctrl_usdhc2_gpio: usdhc2-gpiogrp {
+ fsl,pins = <MX8MP_IOMUXC_SD2_CD_B__GPIO2_IO12 0x1c0>;
+ };
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-tqma8mpql-mba8mpxl.dts b/arch/arm64/boot/dts/freescale/imx8mp-tqma8mpql-mba8mpxl.dts
index c51ed7d991d1..ae64731266f3 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-tqma8mpql-mba8mpxl.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mp-tqma8mpql-mba8mpxl.dts
@@ -222,11 +222,6 @@
#size-cells = <2>;
ranges;
- ocram: ocram@900000 {
- no-map;
- reg = <0 0x900000 0 0x70000>;
- };
-
/* global autoconfigured region for contiguous allocations */
linux,cma {
compatible = "shared-dma-pool";
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-tqma8mpql.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-tqma8mpql.dtsi
index ebc29a950ba9..336785a9fba8 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-tqma8mpql.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mp-tqma8mpql.dtsi
@@ -45,12 +45,16 @@
flash0: flash@0 {
reg = <0>;
- #address-cells = <1>;
- #size-cells = <1>;
compatible = "jedec,spi-nor";
spi-max-frequency = <80000000>;
spi-tx-bus-width = <1>;
spi-rx-bus-width = <4>;
+
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ };
};
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-venice-gw702x.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-venice-gw702x.dtsi
index 560c68e4da6d..6c75a5ecf56b 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-venice-gw702x.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mp-venice-gw702x.dtsi
@@ -5,6 +5,7 @@
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/input/linux-event-codes.h>
+#include <dt-bindings/leds/common.h>
#include <dt-bindings/net/ti-dp83867.h>
/ {
@@ -102,6 +103,25 @@
ti,tx-internal-delay = <DP83867_RGMIIDCTL_2_00_NS>;
tx-fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_4_B_NIB>;
rx-fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_4_B_NIB>;
+
+ leds {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@1 {
+ reg = <1>;
+ color = <LED_COLOR_ID_AMBER>;
+ function = LED_FUNCTION_LAN;
+ default-state = "keep";
+ };
+
+ led@2 {
+ reg = <2>;
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_LAN;
+ default-state = "keep";
+ };
+ };
};
};
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-venice-gw73xx.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-venice-gw73xx.dtsi
index dec57fad6828..e2b5e7ac3e46 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-venice-gw73xx.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mp-venice-gw73xx.dtsi
@@ -219,7 +219,7 @@
bluetooth {
compatible = "brcm,bcm4330-bt";
- shutdown-gpios = <&gpio4 16 GPIO_ACTIVE_HIGH>;
+ shutdown-gpios = <&gpio1 3 GPIO_ACTIVE_HIGH>;
};
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-venice-gw74xx.dts b/arch/arm64/boot/dts/freescale/imx8mp-venice-gw74xx.dts
index a77e9a44d9fa..d765b7972841 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-venice-gw74xx.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mp-venice-gw74xx.dts
@@ -9,6 +9,7 @@
#include <dt-bindings/input/linux-event-codes.h>
#include <dt-bindings/leds/common.h>
#include <dt-bindings/phy/phy-imx8-pcie.h>
+#include <dt-bindings/net/ti-dp83867.h>
#include "imx8mp.dtsi"
@@ -225,6 +226,29 @@
ethphy0: ethernet-phy@0 {
compatible = "ethernet-phy-ieee802.3-c22";
reg = <0x0>;
+ ti,rx-internal-delay = <DP83867_RGMIIDCTL_2_00_NS>;
+ ti,tx-internal-delay = <DP83867_RGMIIDCTL_2_00_NS>;
+ tx-fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_4_B_NIB>;
+ rx-fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_4_B_NIB>;
+
+ leds {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@1 {
+ reg = <1>;
+ color = <LED_COLOR_ID_AMBER>;
+ function = LED_FUNCTION_LAN;
+ default-state = "keep";
+ };
+
+ led@2 {
+ reg = <2>;
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_LAN;
+ default-state = "keep";
+ };
+ };
};
};
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-verdin-dahlia.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-verdin-dahlia.dtsi
index 6e6b9c2c4640..fbcd93e33aea 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-verdin-dahlia.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mp-verdin-dahlia.dtsi
@@ -4,6 +4,18 @@
*/
/ {
+ native-hdmi-connector {
+ compatible = "hdmi-connector";
+ label = "X21";
+ type = "a";
+
+ port {
+ native_hdmi_connector_in: endpoint {
+ remote-endpoint = <&hdmi_tx_out>;
+ };
+ };
+ };
+
sound {
compatible = "simple-audio-card";
simple-audio-card,bitclock-master = <&codec_dai>;
@@ -94,6 +106,27 @@
pinctrl-0 = <&pinctrl_ctrl_sleep_moci>;
};
+/* Verdin HDMI_1 */
+&hdmi_pvi {
+ status = "okay";
+};
+
+&hdmi_tx {
+ status = "okay";
+
+ ports {
+ port@1 {
+ hdmi_tx_out: endpoint {
+ remote-endpoint = <&native_hdmi_connector_in>;
+ };
+ };
+ };
+};
+
+&hdmi_tx_phy {
+ status = "okay";
+};
+
/* Current measurement into module VCC */
&hwmon {
status = "okay";
@@ -139,6 +172,10 @@
status = "okay";
};
+&lcdif3 {
+ status = "okay";
+};
+
/* Verdin PCIE_1 */
&pcie {
vpcie-supply = <&reg_pcie>;
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-verdin-dev.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-verdin-dev.dtsi
index 42ed44a11711..09733fea036d 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-verdin-dev.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mp-verdin-dev.dtsi
@@ -4,6 +4,18 @@
*/
/ {
+ native-hdmi-connector {
+ compatible = "hdmi-connector";
+ label = "X37";
+ type = "a";
+
+ port {
+ native_hdmi_connector_in: endpoint {
+ remote-endpoint = <&hdmi_tx_out>;
+ };
+ };
+ };
+
reg_eth2phy: regulator-eth2phy {
compatible = "regulator-fixed";
enable-active-high;
@@ -103,6 +115,27 @@
vcc-supply = <&reg_1p8v>;
};
+/* Verdin HDMI_1 */
+&hdmi_pvi {
+ status = "okay";
+};
+
+&hdmi_tx {
+ status = "okay";
+
+ ports {
+ port@1 {
+ hdmi_tx_out: endpoint {
+ remote-endpoint = <&native_hdmi_connector_in>;
+ };
+ };
+ };
+};
+
+&hdmi_tx_phy {
+ status = "okay";
+};
+
/* Current measurement into module VCC */
&hwmon {
status = "okay";
@@ -141,6 +174,10 @@
status = "okay";
};
+&lcdif3 {
+ status = "okay";
+};
+
/* Verdin PCIE_1 */
&pcie {
status = "okay";
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-verdin-mallow.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-verdin-mallow.dtsi
index 1d15f7449c58..3a40338cf2d8 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-verdin-mallow.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mp-verdin-mallow.dtsi
@@ -11,6 +11,18 @@
#include <dt-bindings/leds/common.h>
/ {
+ native-hdmi-connector {
+ compatible = "hdmi-connector";
+ label = "X14";
+ type = "a";
+
+ port {
+ native_hdmi_connector_in: endpoint {
+ remote-endpoint = <&hdmi_tx_out>;
+ };
+ };
+ };
+
leds {
compatible = "gpio-leds";
pinctrl-names = "default";
@@ -91,6 +103,27 @@
status = "okay";
};
+/* Verdin HDMI_1 */
+&hdmi_pvi {
+ status = "okay";
+};
+
+&hdmi_tx {
+ status = "okay";
+
+ ports {
+ port@1 {
+ hdmi_tx_out: endpoint {
+ remote-endpoint = <&native_hdmi_connector_in>;
+ };
+ };
+ };
+};
+
+&hdmi_tx_phy {
+ status = "okay";
+};
+
/* Temperature sensor on Mallow */
&hwmon_temp {
compatible = "ti,tmp1075";
@@ -117,6 +150,10 @@
status = "okay";
};
+&lcdif3 {
+ status = "okay";
+};
+
/* Verdin PCIE_1 */
&pcie {
status = "okay";
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-verdin-nonwifi.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-verdin-nonwifi.dtsi
index 91d597391b7c..2ee91f31e7f0 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-verdin-nonwifi.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mp-verdin-nonwifi.dtsi
@@ -41,8 +41,7 @@
pinctrl-0 = <&pinctrl_gpio1>, <&pinctrl_gpio2>,
<&pinctrl_gpio3>, <&pinctrl_gpio4>,
<&pinctrl_gpio7>, <&pinctrl_gpio8>,
- <&pinctrl_gpio_hog1>, <&pinctrl_gpio_hog2>, <&pinctrl_gpio_hog3>,
- <&pinctrl_hdmi_hog>;
+ <&pinctrl_gpio_hog1>, <&pinctrl_gpio_hog2>, <&pinctrl_gpio_hog3>;
};
/*
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-verdin-wifi.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-verdin-wifi.dtsi
index ef94f9a57e20..efcab00c0142 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-verdin-wifi.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mp-verdin-wifi.dtsi
@@ -55,8 +55,7 @@
pinctrl-0 = <&pinctrl_gpio1>, <&pinctrl_gpio2>,
<&pinctrl_gpio3>, <&pinctrl_gpio4>,
<&pinctrl_gpio7>, <&pinctrl_gpio8>,
- <&pinctrl_gpio_hog2>, <&pinctrl_gpio_hog3>, <&pinctrl_gpio_hog4>,
- <&pinctrl_hdmi_hog>;
+ <&pinctrl_gpio_hog2>, <&pinctrl_gpio_hog3>, <&pinctrl_gpio_hog4>;
};
/* On-module Bluetooth */
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-verdin-yavia.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-verdin-yavia.dtsi
index a7b261ff3e4c..533b7fe218ce 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-verdin-yavia.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mp-verdin-yavia.dtsi
@@ -6,6 +6,18 @@
#include <dt-bindings/leds/common.h>
/ {
+ native-hdmi-connector {
+ compatible = "hdmi-connector";
+ label = "J15";
+ type = "a";
+
+ port {
+ native_hdmi_connector_in: endpoint {
+ remote-endpoint = <&hdmi_tx_out>;
+ };
+ };
+ };
+
/* Carrier Board Supply +V1.8 */
reg_1p8v: regulator-1p8v {
compatible = "regulator-fixed";
@@ -105,6 +117,27 @@
pinctrl-0 = <&pinctrl_ctrl_sleep_moci>;
};
+/* Verdin HDMI_1 */
+&hdmi_pvi {
+ status = "okay";
+};
+
+&hdmi_tx {
+ status = "okay";
+
+ ports {
+ port@1 {
+ hdmi_tx_out: endpoint {
+ remote-endpoint = <&native_hdmi_connector_in>;
+ };
+ };
+ };
+};
+
+&hdmi_tx_phy {
+ status = "okay";
+};
+
&hwmon_temp {
status = "okay";
};
@@ -127,6 +160,10 @@
status = "okay";
};
+&lcdif3 {
+ status = "okay";
+};
+
/* Verdin PCIE_1 */
&pcie {
status = "okay";
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-verdin.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-verdin.dtsi
index aef4bef4bccd..d23a3942174d 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-verdin.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mp-verdin.dtsi
@@ -241,7 +241,6 @@
mtl_rx_setup: rx-queues-config {
snps,rx-queues-to-use = <5>;
- snps,rx-sched-sp;
queue0 {
snps,dcb-algorithm;
@@ -276,7 +275,6 @@
mtl_tx_setup: tx-queues-config {
snps,tx-queues-to-use = <5>;
- snps,tx-sched-sp;
queue0 {
snps,dcb-algorithm;
@@ -457,6 +455,13 @@
"SODIMM_44";
};
+/* Verdin HDMI_1 */
+&hdmi_tx {
+ ddc-i2c-bus = <&i2c5>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_hdmi>;
+};
+
/* On-module I2C */
&i2c1 {
clock-frequency = <400000>;
@@ -650,8 +655,7 @@
/* Verdin I2C_2_DSI */
&i2c2 {
- /* Lower frequency to avoid DDC/EDID issues with certain displays/screens. */
- clock-frequency = <10000>;
+ clock-frequency = <400000>;
pinctrl-names = "default", "gpio";
pinctrl-0 = <&pinctrl_i2c2>;
pinctrl-1 = <&pinctrl_i2c2_gpio>;
@@ -1117,10 +1121,10 @@
<MX8MP_IOMUXC_SAI1_RXFS__GPIO4_IO00 0x1c4>; /* SODIMM 252 */
};
- pinctrl_hdmi_hog: hdmihoggrp {
+ pinctrl_hdmi: hdmigrp {
fsl,pins =
- <MX8MP_IOMUXC_HDMI_CEC__HDMIMIX_HDMI_CEC 0x40000019>, /* SODIMM 63 */
- <MX8MP_IOMUXC_HDMI_HPD__HDMIMIX_HDMI_HPD 0x40000019>; /* SODIMM 61 */
+ <MX8MP_IOMUXC_HDMI_CEC__HDMIMIX_HDMI_CEC 0x140>, /* SODIMM 63 */
+ <MX8MP_IOMUXC_HDMI_HPD__HDMIMIX_HDMI_HPD 0x180>; /* SODIMM 61 */
};
/* On-module I2C */
diff --git a/arch/arm64/boot/dts/freescale/imx8mp.dtsi b/arch/arm64/boot/dts/freescale/imx8mp.dtsi
index b92abb5a5c53..603dfe80216f 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mp.dtsi
@@ -789,6 +789,23 @@
reg = <IMX8MP_POWER_DOMAIN_USB2_PHY>;
};
+ pgc_mlmix: power-domain@4 {
+ #power-domain-cells = <0>;
+ reg = <IMX8MP_POWER_DOMAIN_MLMIX>;
+ clocks = <&clk IMX8MP_CLK_ML_AXI>,
+ <&clk IMX8MP_CLK_ML_AHB>,
+ <&clk IMX8MP_CLK_NPU_ROOT>;
+ assigned-clocks = <&clk IMX8MP_CLK_ML_CORE>,
+ <&clk IMX8MP_CLK_ML_AXI>,
+ <&clk IMX8MP_CLK_ML_AHB>;
+ assigned-clock-parents = <&clk IMX8MP_SYS_PLL1_800M>,
+ <&clk IMX8MP_SYS_PLL1_800M>,
+ <&clk IMX8MP_SYS_PLL1_800M>;
+ assigned-clock-rates = <800000000>,
+ <800000000>,
+ <300000000>;
+ };
+
pgc_audio: power-domain@5 {
#power-domain-cells = <0>;
reg = <IMX8MP_POWER_DOMAIN_AUDIOMIX>;
@@ -821,6 +838,12 @@
assigned-clock-rates = <800000000>, <400000000>;
};
+ pgc_vpumix: power-domain@8 {
+ #power-domain-cells = <0>;
+ reg = <IMX8MP_POWER_DOMAIN_VPUMIX>;
+ clocks = <&clk IMX8MP_CLK_VPU_ROOT>;
+ };
+
pgc_gpu3d: power-domain@9 {
#power-domain-cells = <0>;
reg = <IMX8MP_POWER_DOMAIN_GPU3D>;
@@ -836,6 +859,28 @@
<&clk IMX8MP_CLK_MEDIA_APB_ROOT>;
};
+ pgc_vpu_g1: power-domain@11 {
+ #power-domain-cells = <0>;
+ power-domains = <&pgc_vpumix>;
+ reg = <IMX8MP_POWER_DOMAIN_VPU_G1>;
+ clocks = <&clk IMX8MP_CLK_VPU_G1_ROOT>;
+ };
+
+ pgc_vpu_g2: power-domain@12 {
+ #power-domain-cells = <0>;
+ power-domains = <&pgc_vpumix>;
+ reg = <IMX8MP_POWER_DOMAIN_VPU_G2>;
+ clocks = <&clk IMX8MP_CLK_VPU_G2_ROOT>;
+
+ };
+
+ pgc_vpu_vc8000e: power-domain@13 {
+ #power-domain-cells = <0>;
+ power-domains = <&pgc_vpumix>;
+ reg = <IMX8MP_POWER_DOMAIN_VPU_VC8000E>;
+ clocks = <&clk IMX8MP_CLK_VPU_VC8KE_ROOT>;
+ };
+
pgc_hdmimix: power-domain@14 {
#power-domain-cells = <0>;
reg = <IMX8MP_POWER_DOMAIN_HDMIMIX>;
@@ -873,50 +918,6 @@
reg = <IMX8MP_POWER_DOMAIN_MEDIAMIX_ISPDWP>;
clocks = <&clk IMX8MP_CLK_MEDIA_ISP_ROOT>;
};
-
- pgc_vpumix: power-domain@19 {
- #power-domain-cells = <0>;
- reg = <IMX8MP_POWER_DOMAIN_VPUMIX>;
- clocks = <&clk IMX8MP_CLK_VPU_ROOT>;
- };
-
- pgc_vpu_g1: power-domain@20 {
- #power-domain-cells = <0>;
- power-domains = <&pgc_vpumix>;
- reg = <IMX8MP_POWER_DOMAIN_VPU_G1>;
- clocks = <&clk IMX8MP_CLK_VPU_G1_ROOT>;
- };
-
- pgc_vpu_g2: power-domain@21 {
- #power-domain-cells = <0>;
- power-domains = <&pgc_vpumix>;
- reg = <IMX8MP_POWER_DOMAIN_VPU_G2>;
- clocks = <&clk IMX8MP_CLK_VPU_G2_ROOT>;
- };
-
- pgc_vpu_vc8000e: power-domain@22 {
- #power-domain-cells = <0>;
- power-domains = <&pgc_vpumix>;
- reg = <IMX8MP_POWER_DOMAIN_VPU_VC8000E>;
- clocks = <&clk IMX8MP_CLK_VPU_VC8KE_ROOT>;
- };
-
- pgc_mlmix: power-domain@24 {
- #power-domain-cells = <0>;
- reg = <IMX8MP_POWER_DOMAIN_MLMIX>;
- clocks = <&clk IMX8MP_CLK_ML_AXI>,
- <&clk IMX8MP_CLK_ML_AHB>,
- <&clk IMX8MP_CLK_NPU_ROOT>;
- assigned-clocks = <&clk IMX8MP_CLK_ML_CORE>,
- <&clk IMX8MP_CLK_ML_AXI>,
- <&clk IMX8MP_CLK_ML_AHB>;
- assigned-clock-parents = <&clk IMX8MP_SYS_PLL1_800M>,
- <&clk IMX8MP_SYS_PLL1_800M>,
- <&clk IMX8MP_SYS_PLL1_800M>;
- assigned-clock-rates = <800000000>,
- <800000000>,
- <300000000>;
- };
};
};
};
@@ -1540,6 +1541,31 @@
dma-names = "tx";
status = "disabled";
};
+
+ xcvr: xcvr@30cc0000 {
+ compatible = "fsl,imx8mp-xcvr";
+ reg = <0x30cc0000 0x800>,
+ <0x30cc0800 0x400>,
+ <0x30cc0c00 0x080>,
+ <0x30cc0e00 0x080>;
+ reg-names = "ram", "regs", "rxfifo",
+ "txfifo";
+ interrupts = /* XCVR IRQ 0 */
+ <GIC_SPI 128 IRQ_TYPE_LEVEL_HIGH>,
+ /* XCVR IRQ 1 */
+ <GIC_SPI 129 IRQ_TYPE_LEVEL_HIGH>,
+ /* XCVR PHY - SPDIF wakeup IRQ */
+ <GIC_SPI 146 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&audio_blk_ctrl IMX8MP_CLK_AUDIOMIX_EARC_IPG>,
+ <&audio_blk_ctrl IMX8MP_CLK_AUDIOMIX_EARC_PHY>,
+ <&audio_blk_ctrl IMX8MP_CLK_AUDIOMIX_SPBA2_ROOT>,
+ <&audio_blk_ctrl IMX8MP_CLK_AUDIOMIX_AUDPLL_ROOT>;
+ clock-names = "ipg", "phy", "spba", "pll_ipg";
+ dmas = <&sdma2 30 2 0>, <&sdma2 31 2 0>;
+ dma-names = "rx", "tx";
+ resets = <&audio_blk_ctrl 0>;
+ status = "disabled";
+ };
};
sdma3: dma-controller@30e00000 {
@@ -1568,6 +1594,7 @@
compatible = "fsl,imx8mp-audio-blk-ctrl";
reg = <0x30e20000 0x10000>;
#clock-cells = <1>;
+ #reset-cells = <1>;
clocks = <&clk IMX8MP_CLK_AUDIO_ROOT>,
<&clk IMX8MP_CLK_SAI1>,
<&clk IMX8MP_CLK_SAI2>,
@@ -1579,6 +1606,9 @@
"sai1", "sai2", "sai3",
"sai5", "sai6", "sai7";
power-domains = <&pgc_audio>;
+ assigned-clocks = <&clk IMX8MP_AUDIO_PLL1>,
+ <&clk IMX8MP_AUDIO_PLL2>;
+ assigned-clock-rates = <393216000>, <361267200>;
};
};
@@ -1946,7 +1976,7 @@
};
irqsteer_hdmi: interrupt-controller@32fc2000 {
- compatible = "fsl,imx-irqsteer";
+ compatible = "fsl,imx8mp-irqsteer", "fsl,imx-irqsteer";
reg = <0x32fc2000 0x1000>;
interrupts = <GIC_SPI 43 IRQ_TYPE_LEVEL_HIGH>;
interrupt-controller;
diff --git a/arch/arm64/boot/dts/freescale/imx8mq-librem5.dtsi b/arch/arm64/boot/dts/freescale/imx8mq-librem5.dtsi
index ffb5fe61630d..1b39514d5c12 100644
--- a/arch/arm64/boot/dts/freescale/imx8mq-librem5.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mq-librem5.dtsi
@@ -45,7 +45,6 @@
gpios = <&gpio1 17 GPIO_ACTIVE_LOW>;
linux,code = <KEY_VOLUMEDOWN>;
debounce-interval = <50>;
- wakeup-source;
};
key-vol-up {
@@ -53,7 +52,6 @@
gpios = <&gpio1 16 GPIO_ACTIVE_LOW>;
linux,code = <KEY_VOLUMEUP>;
debounce-interval = <50>;
- wakeup-source;
};
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mq-tqma8mq.dtsi b/arch/arm64/boot/dts/freescale/imx8mq-tqma8mq.dtsi
index 5ca6b2252546..01e5092e4c40 100644
--- a/arch/arm64/boot/dts/freescale/imx8mq-tqma8mq.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mq-tqma8mq.dtsi
@@ -251,11 +251,15 @@
flash0: flash@0 {
compatible = "jedec,spi-nor";
reg = <0>;
- #address-cells = <1>;
- #size-cells = <1>;
spi-max-frequency = <84000000>;
spi-tx-bus-width = <1>;
spi-rx-bus-width = <4>;
+
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ };
};
};
diff --git a/arch/arm64/boot/dts/freescale/imx8qm-mek.dts b/arch/arm64/boot/dts/freescale/imx8qm-mek.dts
index 5c6b39c6933f..778741dbbb33 100644
--- a/arch/arm64/boot/dts/freescale/imx8qm-mek.dts
+++ b/arch/arm64/boot/dts/freescale/imx8qm-mek.dts
@@ -36,16 +36,103 @@
regulator-name = "SD1_SPWR";
regulator-min-microvolt = <3000000>;
regulator-max-microvolt = <3000000>;
- gpio = <&lsio_gpio4 19 GPIO_ACTIVE_HIGH>;
+ gpio = <&lsio_gpio4 7 GPIO_ACTIVE_HIGH>;
enable-active-high;
};
+ reg_fec2_supply: regulator-fec2-nvcc {
+ compatible = "regulator-fixed";
+ regulator-name = "fec2_nvcc";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ gpio = <&max7322 0 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ reg_can01_en: regulator-can01-gen {
+ compatible = "regulator-fixed";
+ regulator-name = "can01-en";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&pca6416 3 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ reg_can2_en: regulator-can2-gen {
+ compatible = "regulator-fixed";
+ regulator-name = "can2-en";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&pca6416 4 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ reg_can01_stby: regulator-can01-stby {
+ compatible = "regulator-fixed";
+ regulator-name = "can01-stby";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&pca6416 5 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ vin-supply = <&reg_can01_en>;
+ };
+
+ reg_can2_stby: regulator-can2-stby {
+ compatible = "regulator-fixed";
+ regulator-name = "can2-stby";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&pca6416 6 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ vin-supply = <&reg_can2_en>;
+ };
+
reg_vref_1v8: regulator-adc-vref {
compatible = "regulator-fixed";
regulator-name = "vref_1v8";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
};
+
+ bt_sco_codec: audio-codec-bt {
+ compatible = "linux,bt-sco";
+ #sound-dai-cells = <1>;
+ };
+
+ sound-bt-sco {
+ compatible = "simple-audio-card";
+ simple-audio-card,name = "bt-sco-audio";
+ simple-audio-card,format = "dsp_a";
+ simple-audio-card,bitclock-inversion;
+ simple-audio-card,frame-master = <&btcpu>;
+ simple-audio-card,bitclock-master = <&btcpu>;
+
+ btcpu: simple-audio-card,cpu {
+ sound-dai = <&sai0>;
+ dai-tdm-slot-num = <2>;
+ dai-tdm-slot-width = <16>;
+ };
+
+ simple-audio-card,codec {
+ sound-dai = <&bt_sco_codec 1>;
+ };
+ };
+
+ sound-wm8960 {
+ compatible = "fsl,imx-audio-wm8960";
+ model = "wm8960-audio";
+ audio-cpu = <&sai1>;
+ audio-codec = <&wm8960>;
+ hp-det-gpio = <&lsio_gpio0 31 GPIO_ACTIVE_HIGH>;
+ audio-routing = "Headphone Jack", "HP_L",
+ "Headphone Jack", "HP_R",
+ "Ext Spk", "SPK_LP",
+ "Ext Spk", "SPK_LN",
+ "Ext Spk", "SPK_RP",
+ "Ext Spk", "SPK_RN",
+ "LINPUT1", "Mic Jack",
+ "Mic Jack", "MICB";
+ };
};
&adc0 {
@@ -55,6 +142,78 @@
status = "okay";
};
+&amix {
+ status = "okay";
+};
+
+&asrc0 {
+ fsl,asrc-rate = <48000>;
+ status = "okay";
+};
+
+&cm41_i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_cm41_i2c>;
+ status = "okay";
+
+ pca6416: gpio@20 {
+ compatible = "ti,tca6416";
+ reg = <0x20>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+};
+
+&cm41_intmux {
+ status = "okay";
+};
+
+&i2c0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c0>;
+ status = "okay";
+
+ accelerometer@19 {
+ compatible = "st,lsm303agr-accel";
+ reg = <0x19>;
+ };
+
+ gyrometer@20 {
+ compatible = "nxp,fxas21002c";
+ reg = <0x20>;
+ };
+
+ light-sensor@44 {
+ compatible = "isil,isl29023";
+ reg = <0x44>;
+ interrupt-parent = <&lsio_gpio4>;
+ interrupts = <11 IRQ_TYPE_EDGE_FALLING>;
+ };
+
+ pressure-sensor@60 {
+ compatible = "fsl,mpl3115";
+ reg = <0x60>;
+ };
+
+ max7322: gpio@68 {
+ compatible = "maxim,max7322";
+ reg = <0x68>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ gyrometer@69 {
+ compatible = "st,l3g4200d-gyro";
+ reg = <0x69>;
+ };
+};
+
&i2c1 {
#address-cells = <1>;
#size-cells = <0>;
@@ -65,6 +224,42 @@
scl-gpios = <&lsio_gpio0 14 GPIO_ACTIVE_HIGH>;
sda-gpios = <&lsio_gpio0 15 GPIO_ACTIVE_HIGH>;
status = "okay";
+
+ wm8960: audio-codec@1a {
+ compatible = "wlf,wm8960";
+ reg = <0x1a>;
+ clocks = <&mclkout0_lpcg IMX_LPCG_CLK_0>;
+ clock-names = "mclk";
+ assigned-clocks = <&clk IMX_SC_R_AUDIO_PLL_0 IMX_SC_PM_CLK_PLL>,
+ <&clk IMX_SC_R_AUDIO_PLL_0 IMX_SC_PM_CLK_SLV_BUS>,
+ <&clk IMX_SC_R_AUDIO_PLL_0 IMX_SC_PM_CLK_MST_BUS>,
+ <&mclkout0_lpcg IMX_LPCG_CLK_0>;
+ assigned-clock-rates = <786432000>, <49152000>, <12288000>, <12288000>;
+ wlf,shared-lrclk;
+ wlf,hp-cfg = <2 2 3>;
+ wlf,gpio-cfg = <1 3>;
+ };
+};
+
+&flexcan1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_flexcan1>;
+ xceiver-supply = <&reg_can01_stby>;
+ status = "okay";
+};
+
+&flexcan2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_flexcan2>;
+ xceiver-supply = <&reg_can01_stby>;
+ status = "okay";
+};
+
+&flexcan3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_flexcan3>;
+ xceiver-supply = <&reg_can2_stby>;
+ status = "okay";
};
&lpuart0 {
@@ -100,6 +295,14 @@
};
};
+&lsio_mu5 {
+ status = "okay";
+};
+
+&lsio_mu6 {
+ status = "okay";
+};
+
&flexspi0 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_flexspi0>;
@@ -140,6 +343,19 @@
};
};
+&fec2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_fec2>;
+ phy-mode = "rgmii-txid";
+ phy-handle = <&ethphy1>;
+ phy-supply = <&reg_fec2_supply>;
+ nvmem-cells = <&fec_mac1>;
+ nvmem-cell-names = "mac-address";
+ rx-internal-delay-ps = <2000>;
+ fsl,magic-packet;
+ status = "okay";
+};
+
&usdhc1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usdhc1>;
@@ -160,7 +376,71 @@
status = "okay";
};
+&sai0 {
+ #sound-dai-cells = <0>;
+ assigned-clocks = <&clk IMX_SC_R_AUDIO_PLL_0 IMX_SC_PM_CLK_PLL>,
+ <&clk IMX_SC_R_AUDIO_PLL_0 IMX_SC_PM_CLK_SLV_BUS>,
+ <&clk IMX_SC_R_AUDIO_PLL_0 IMX_SC_PM_CLK_MST_BUS>,
+ <&sai0_lpcg IMX_LPCG_CLK_4>;
+ assigned-clock-rates = <786432000>, <49152000>, <12288000>, <49152000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sai0>;
+ status = "okay";
+};
+
+&sai1 {
+ assigned-clocks = <&clk IMX_SC_R_AUDIO_PLL_0 IMX_SC_PM_CLK_PLL>,
+ <&clk IMX_SC_R_AUDIO_PLL_0 IMX_SC_PM_CLK_SLV_BUS>,
+ <&clk IMX_SC_R_AUDIO_PLL_0 IMX_SC_PM_CLK_MST_BUS>,
+ <&sai1_lpcg IMX_LPCG_CLK_4>;
+ assigned-clock-rates = <786432000>, <49152000>, <12288000>, <49152000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sai1>;
+ status = "okay";
+};
+
+&sai6 {
+ assigned-clocks = <&acm IMX_ADMA_ACM_SAI6_MCLK_SEL>,
+ <&clk IMX_SC_R_AUDIO_PLL_1 IMX_SC_PM_CLK_PLL>,
+ <&clk IMX_SC_R_AUDIO_PLL_1 IMX_SC_PM_CLK_SLV_BUS>,
+ <&clk IMX_SC_R_AUDIO_PLL_1 IMX_SC_PM_CLK_MST_BUS>,
+ <&sai6_lpcg IMX_LPCG_CLK_4>;
+ assigned-clock-parents = <&aud_pll_div1_lpcg IMX_LPCG_CLK_0>;
+ assigned-clock-rates = <0>, <786432000>, <98304000>, <12288000>, <98304000>;
+ fsl,sai-asynchronous;
+ status = "okay";
+};
+
+&sai7 {
+ assigned-clocks = <&acm IMX_ADMA_ACM_SAI7_MCLK_SEL>,
+ <&clk IMX_SC_R_AUDIO_PLL_1 IMX_SC_PM_CLK_PLL>,
+ <&clk IMX_SC_R_AUDIO_PLL_1 IMX_SC_PM_CLK_SLV_BUS>,
+ <&clk IMX_SC_R_AUDIO_PLL_1 IMX_SC_PM_CLK_MST_BUS>,
+ <&sai7_lpcg IMX_LPCG_CLK_4>;
+ assigned-clock-parents = <&aud_pll_div1_lpcg IMX_LPCG_CLK_0>;
+ assigned-clock-rates = <0>, <786432000>, <98304000>, <12288000>, <98304000>;
+ fsl,sai-asynchronous;
+ status = "okay";
+};
+
&iomuxc {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_hog>;
+
+ pinctrl_hog: hoggrp {
+ fsl,pins = <
+ IMX8QM_MCLK_OUT0_AUD_ACM_MCLK_OUT0 0x0600004c
+ IMX8QM_SCU_GPIO0_03_LSIO_GPIO0_IO31 0x0600004c
+ >;
+ };
+
+ pinctrl_i2c0: i2c0grp {
+ fsl,pins = <
+ IMX8QM_HDMI_TX0_TS_SCL_DMA_I2C0_SCL 0x06000021
+ IMX8QM_HDMI_TX0_TS_SDA_DMA_I2C0_SDA 0x06000021
+ >;
+ };
+
pinctrl_i2c1: i2c1grp {
fsl,pins = <
IMX8QM_GPT0_CLK_DMA_I2C1_SCL 0x0600004c
@@ -181,6 +461,13 @@
>;
};
+ pinctrl_cm41_i2c: cm41i2cgrp {
+ fsl,pins = <
+ IMX8QM_M41_I2C0_SDA_M41_I2C0_SDA 0x0600004c
+ IMX8QM_M41_I2C0_SCL_M41_I2C0_SCL 0x0600004c
+ >;
+ };
+
pinctrl_fec1: fec1grp {
fsl,pins = <
IMX8QM_ENET0_MDC_CONN_ENET0_MDC 0x06000020
@@ -235,6 +522,45 @@
>;
};
+ pinctrl_fec2: fec2grp {
+ fsl,pins = <
+ IMX8QM_COMP_CTL_GPIO_1V8_3V3_ENET_ENETA_PAD 0x000014a0
+ IMX8QM_ENET1_RGMII_TX_CTL_CONN_ENET1_RGMII_TX_CTL 0x00000060
+ IMX8QM_ENET1_RGMII_TXC_CONN_ENET1_RGMII_TXC 0x00000060
+ IMX8QM_ENET1_RGMII_TXD0_CONN_ENET1_RGMII_TXD0 0x00000060
+ IMX8QM_ENET1_RGMII_TXD1_CONN_ENET1_RGMII_TXD1 0x00000060
+ IMX8QM_ENET1_RGMII_TXD2_CONN_ENET1_RGMII_TXD2 0x00000060
+ IMX8QM_ENET1_RGMII_TXD3_CONN_ENET1_RGMII_TXD3 0x00000060
+ IMX8QM_ENET1_RGMII_RXC_CONN_ENET1_RGMII_RXC 0x00000060
+ IMX8QM_ENET1_RGMII_RX_CTL_CONN_ENET1_RGMII_RX_CTL 0x00000060
+ IMX8QM_ENET1_RGMII_RXD0_CONN_ENET1_RGMII_RXD0 0x00000060
+ IMX8QM_ENET1_RGMII_RXD1_CONN_ENET1_RGMII_RXD1 0x00000060
+ IMX8QM_ENET1_RGMII_RXD2_CONN_ENET1_RGMII_RXD2 0x00000060
+ IMX8QM_ENET1_RGMII_RXD3_CONN_ENET1_RGMII_RXD3 0x00000060
+ >;
+ };
+
+ pinctrl_flexcan1: flexcan0grp {
+ fsl,pins = <
+ IMX8QM_FLEXCAN0_TX_DMA_FLEXCAN0_TX 0x21
+ IMX8QM_FLEXCAN0_RX_DMA_FLEXCAN0_RX 0x21
+ >;
+ };
+
+ pinctrl_flexcan2: flexcan1grp {
+ fsl,pins = <
+ IMX8QM_FLEXCAN1_TX_DMA_FLEXCAN1_TX 0x21
+ IMX8QM_FLEXCAN1_RX_DMA_FLEXCAN1_RX 0x21
+ >;
+ };
+
+ pinctrl_flexcan3: flexcan3grp {
+ fsl,pins = <
+ IMX8QM_FLEXCAN2_TX_DMA_FLEXCAN2_TX 0x21
+ IMX8QM_FLEXCAN2_RX_DMA_FLEXCAN2_RX 0x21
+ >;
+ };
+
pinctrl_lpuart0: lpuart0grp {
fsl,pins = <
IMX8QM_UART0_RX_DMA_UART0_RX 0x06000020
@@ -256,6 +582,24 @@
>;
};
+ pinctrl_sai0: sai0grp {
+ fsl,pins = <
+ IMX8QM_SPI0_CS1_AUD_SAI0_TXC 0x0600004c
+ IMX8QM_SPI2_CS1_AUD_SAI0_TXFS 0x0600004c
+ IMX8QM_SAI1_RXFS_AUD_SAI0_RXD 0x0600004c
+ IMX8QM_SAI1_RXC_AUD_SAI0_TXD 0x0600006c
+ >;
+ };
+
+ pinctrl_sai1: sai1grp {
+ fsl,pins = <
+ IMX8QM_SAI1_RXD_AUD_SAI1_RXD 0x06000040
+ IMX8QM_SAI1_TXFS_AUD_SAI1_TXFS 0x06000040
+ IMX8QM_SAI1_TXD_AUD_SAI1_TXD 0x06000060
+ IMX8QM_SAI1_TXC_AUD_SAI1_TXC 0x06000040
+ >;
+ };
+
pinctrl_usdhc1: usdhc1grp {
fsl,pins = <
IMX8QM_EMMC0_CLK_CONN_EMMC0_CLK 0x06000041
diff --git a/arch/arm64/boot/dts/freescale/imx8qm-ss-audio.dtsi b/arch/arm64/boot/dts/freescale/imx8qm-ss-audio.dtsi
new file mode 100644
index 000000000000..3036af49fc85
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8qm-ss-audio.dtsi
@@ -0,0 +1,473 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2024 NXP
+ * Dong Aisheng <aisheng.dong@nxp.com>
+ */
+
+/delete-node/ &acm;
+/delete-node/ &sai4;
+/delete-node/ &sai5;
+/delete-node/ &sai4_lpcg;
+/delete-node/ &sai5_lpcg;
+
+&amix {
+ dais = <&sai6>, <&sai7>;
+};
+
+&asrc0 {
+ clocks = <&asrc0_lpcg IMX_LPCG_CLK_0>,
+ <&asrc0_lpcg IMX_LPCG_CLK_2>,
+ <&aud_pll_div0_lpcg IMX_LPCG_CLK_0>,
+ <&aud_pll_div1_lpcg IMX_LPCG_CLK_0>,
+ <&acm IMX_ADMA_ACM_AUD_CLK0_SEL>,
+ <&acm IMX_ADMA_ACM_AUD_CLK1_SEL>,
+ <&clk_dummy>,
+ <&clk_dummy>,
+ <&clk_dummy>,
+ <&clk_dummy>,
+ <&clk_dummy>,
+ <&clk_dummy>,
+ <&clk_dummy>,
+ <&clk_dummy>,
+ <&clk_dummy>,
+ <&clk_dummy>,
+ <&clk_dummy>,
+ <&clk_dummy>,
+ <&clk_dummy>;
+ power-domains = <&pd IMX_SC_R_ASRC_0>;
+};
+
+&asrc0_lpcg {
+ clocks = <&audio_ipg_clk>,
+ <&audio_ipg_clk>;
+ clock-indices = <IMX_LPCG_CLK_0>, <IMX_LPCG_CLK_2>;
+ clock-output-names = "asrc0_lpcg_ipg_clk", "asrc0_lpcg_mem_clk";
+};
+
+&asrc1 {
+ clocks = <&asrc1_lpcg IMX_LPCG_CLK_0>,
+ <&asrc1_lpcg IMX_LPCG_CLK_2>,
+ <&aud_pll_div0_lpcg IMX_LPCG_CLK_0>,
+ <&aud_pll_div1_lpcg IMX_LPCG_CLK_0>,
+ <&acm IMX_ADMA_ACM_AUD_CLK0_SEL>,
+ <&acm IMX_ADMA_ACM_AUD_CLK1_SEL>,
+ <&clk_dummy>,
+ <&clk_dummy>,
+ <&clk_dummy>,
+ <&clk_dummy>,
+ <&clk_dummy>,
+ <&clk_dummy>,
+ <&clk_dummy>,
+ <&clk_dummy>,
+ <&clk_dummy>,
+ <&clk_dummy>,
+ <&clk_dummy>,
+ <&clk_dummy>,
+ <&clk_dummy>;
+ power-domains = <&pd IMX_SC_R_ASRC_1>;
+};
+
+&asrc1_lpcg {
+ clocks = <&audio_ipg_clk>, <&audio_ipg_clk>;
+ clock-indices = <IMX_LPCG_CLK_0>, <IMX_LPCG_CLK_2>;
+ clock-output-names = "asrc1_lpcg_ipg_clk", "asrc1_lpcg_mem_clk";
+};
+
+&audio_subsys {
+
+ sai4: sai@59080000 {
+ compatible = "fsl,imx8qm-sai";
+ reg = <0x59080000 0x10000>;
+ interrupts = <GIC_SPI 325 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&sai4_lpcg IMX_LPCG_CLK_0>,
+ <&clk_dummy>,
+ <&sai4_lpcg IMX_LPCG_CLK_4>,
+ <&clk_dummy>,
+ <&clk_dummy>;
+ clock-names = "bus", "mclk0", "mclk1", "mclk2", "mclk3";
+ dma-names = "rx";
+ dmas = <&edma0 18 0 1>;
+ fsl,dataline = <0 0xf 0x0>;
+ power-domains = <&pd IMX_SC_R_SAI_4>;
+ status = "disabled";
+ };
+
+ sai5: sai@59090000 {
+ compatible = "fsl,imx8qm-sai";
+ reg = <0x59090000 0x10000>;
+ interrupts = <GIC_SPI 327 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&sai5_lpcg IMX_LPCG_CLK_0>,
+ <&clk_dummy>,
+ <&sai5_lpcg IMX_LPCG_CLK_4>,
+ <&clk_dummy>,
+ <&clk_dummy>;
+ clock-names = "bus", "mclk0", "mclk1", "mclk2", "mclk3";
+ dma-names = "tx";
+ dmas = <&edma0 19 0 0>;
+ fsl,dataline = <0 0x0 0xf>;
+ power-domains = <&pd IMX_SC_R_SAI_5>;
+ status = "disabled";
+ };
+
+ sai4_lpcg: clock-controller@59480000 {
+ compatible = "fsl,imx8qxp-lpcg";
+ reg = <0x59480000 0x10000>;
+ #clock-cells = <1>;
+ clocks = <&acm IMX_ADMA_ACM_SAI4_MCLK_SEL>,
+ <&audio_ipg_clk>;
+ clock-indices = <IMX_LPCG_CLK_4>, <IMX_LPCG_CLK_0>;
+ clock-output-names = "sai4_lpcg_mclk", "sai4_lpcg_ipg_clk";
+ power-domains = <&pd IMX_SC_R_SAI_4>;
+ status = "disabled";
+ };
+
+ sai5_lpcg: clock-controller@59490000 {
+ compatible = "fsl,imx8qxp-lpcg";
+ reg = <0x59490000 0x10000>;
+ #clock-cells = <1>;
+ clocks = <&acm IMX_ADMA_ACM_SAI5_MCLK_SEL>,
+ <&audio_ipg_clk>;
+ clock-indices = <IMX_LPCG_CLK_4>, <IMX_LPCG_CLK_0>;
+ clock-output-names = "sai5_lpcg_mclk", "sai5_lpcg_ipg_clk";
+ power-domains = <&pd IMX_SC_R_SAI_5>;
+ status = "disabled";
+ };
+
+ esai1: esai@59810000 {
+ compatible = "fsl,imx8qm-esai";
+ reg = <0x59810000 0x10000>;
+ interrupts = <GIC_SPI 411 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&esai1_lpcg IMX_LPCG_CLK_0>,
+ <&esai1_lpcg IMX_LPCG_CLK_4>,
+ <&esai1_lpcg IMX_LPCG_CLK_0>,
+ <&clk_dummy>;
+ clock-names = "core", "extal", "fsys", "spba";
+ dmas = <&edma1 6 0 1>, <&edma1 7 0 0>;
+ dma-names = "rx", "tx";
+ power-domains = <&pd IMX_SC_R_ESAI_1>;
+ status = "disabled";
+ };
+
+ sai6: sai@59820000 {
+ compatible = "fsl,imx8qm-sai";
+ reg = <0x59820000 0x10000>;
+ interrupts = <GIC_SPI 329 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&sai6_lpcg IMX_LPCG_CLK_0>,
+ <&clk_dummy>,
+ <&sai6_lpcg IMX_LPCG_CLK_4>,
+ <&clk_dummy>,
+ <&clk_dummy>;
+ clock-names = "bus", "mclk0", "mclk1", "mclk2", "mclk3";
+ dma-names = "rx", "tx";
+ dmas = <&edma1 8 0 1>, <&edma1 9 0 0>;
+ power-domains = <&pd IMX_SC_R_SAI_6>;
+ status = "disabled";
+ };
+
+ sai7: sai@59830000 {
+ compatible = "fsl,imx8qm-sai";
+ reg = <0x59830000 0x10000>;
+ interrupts = <GIC_SPI 331 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&sai7_lpcg IMX_LPCG_CLK_0>,
+ <&clk_dummy>,
+ <&sai7_lpcg IMX_LPCG_CLK_4>,
+ <&clk_dummy>,
+ <&clk_dummy>;
+ clock-names = "bus", "mclk0", "mclk1", "mclk2", "mclk3";
+ dma-names = "tx";
+ dmas = <&edma1 10 0 0>;
+ power-domains = <&pd IMX_SC_R_SAI_7>;
+ status = "disabled";
+ };
+
+ esai1_lpcg: clock-controller@59c10000 {
+ compatible = "fsl,imx8qxp-lpcg";
+ reg = <0x59c10000 0x10000>;
+ #clock-cells = <1>;
+ clocks = <&acm IMX_ADMA_ACM_ESAI1_MCLK_SEL>,
+ <&audio_ipg_clk>;
+ clock-indices = <IMX_LPCG_CLK_4>, <IMX_LPCG_CLK_0>;
+ clock-output-names = "esai1_lpcg_extal_clk", "esai1_lpcg_ipg_clk";
+ power-domains = <&pd IMX_SC_R_ESAI_1>;
+ };
+
+ sai6_lpcg: clock-controller@59c20000 {
+ compatible = "fsl,imx8qxp-lpcg";
+ reg = <0x59c20000 0x10000>;
+ #clock-cells = <1>;
+ clocks = <&acm IMX_ADMA_ACM_SAI6_MCLK_SEL>,
+ <&audio_ipg_clk>;
+ clock-indices = <IMX_LPCG_CLK_4>, <IMX_LPCG_CLK_0>;
+ clock-output-names = "sai6_lpcg_mclk", "sai6_lpcg_ipg_clk";
+ power-domains = <&pd IMX_SC_R_SAI_6>;
+ };
+
+ sai7_lpcg: clock-controller@59c30000 {
+ compatible = "fsl,imx8qxp-lpcg";
+ reg = <0x59c30000 0x10000>;
+ #clock-cells = <1>;
+ clocks = <&acm IMX_ADMA_ACM_SAI7_MCLK_SEL>,
+ <&audio_ipg_clk>;
+ clock-indices = <IMX_LPCG_CLK_4>, <IMX_LPCG_CLK_0>;
+ clock-output-names = "sai7_lpcg_mclk", "sai7_lpcg_ipg_clk";
+ power-domains = <&pd IMX_SC_R_SAI_7>;
+ };
+
+ acm: acm@59e00000 {
+ compatible = "fsl,imx8qm-acm";
+ reg = <0x59e00000 0x1d0000>;
+ #clock-cells = <1>;
+ power-domains = <&pd IMX_SC_R_AUDIO_CLK_0>,
+ <&pd IMX_SC_R_AUDIO_CLK_1>,
+ <&pd IMX_SC_R_MCLK_OUT_0>,
+ <&pd IMX_SC_R_MCLK_OUT_1>,
+ <&pd IMX_SC_R_AUDIO_PLL_0>,
+ <&pd IMX_SC_R_AUDIO_PLL_1>,
+ <&pd IMX_SC_R_ASRC_0>,
+ <&pd IMX_SC_R_ASRC_1>,
+ <&pd IMX_SC_R_ESAI_0>,
+ <&pd IMX_SC_R_ESAI_1>,
+ <&pd IMX_SC_R_SAI_0>,
+ <&pd IMX_SC_R_SAI_1>,
+ <&pd IMX_SC_R_SAI_2>,
+ <&pd IMX_SC_R_SAI_3>,
+ <&pd IMX_SC_R_SAI_4>,
+ <&pd IMX_SC_R_SAI_5>,
+ <&pd IMX_SC_R_SAI_6>,
+ <&pd IMX_SC_R_SAI_7>,
+ <&pd IMX_SC_R_SPDIF_0>,
+ <&pd IMX_SC_R_SPDIF_1>,
+ <&pd IMX_SC_R_MQS_0>;
+ clocks = <&aud_rec0_lpcg IMX_LPCG_CLK_0>,
+ <&aud_rec1_lpcg IMX_LPCG_CLK_0>,
+ <&aud_pll_div0_lpcg IMX_LPCG_CLK_0>,
+ <&aud_pll_div1_lpcg IMX_LPCG_CLK_0>,
+ <&clk_mlb_clk>,
+ <&clk_hdmi_rx_mclk>,
+ <&clk_ext_aud_mclk0>,
+ <&clk_ext_aud_mclk1>,
+ <&clk_esai0_rx_clk>,
+ <&clk_esai0_rx_hf_clk>,
+ <&clk_esai0_tx_clk>,
+ <&clk_esai0_tx_hf_clk>,
+ <&clk_esai1_rx_clk>,
+ <&clk_esai1_rx_hf_clk>,
+ <&clk_esai1_tx_clk>,
+ <&clk_esai1_tx_hf_clk>,
+ <&clk_spdif0_rx>,
+ <&clk_spdif0_rx>,
+ <&clk_sai0_rx_bclk>,
+ <&clk_sai0_tx_bclk>,
+ <&clk_sai1_rx_bclk>,
+ <&clk_sai1_tx_bclk>,
+ <&clk_sai2_rx_bclk>,
+ <&clk_sai3_rx_bclk>,
+ <&clk_sai4_rx_bclk>,
+ <&clk_sai5_rx_bclk>,
+ <&clk_sai6_rx_bclk>;
+ clock-names = "aud_rec_clk0_lpcg_clk",
+ "aud_rec_clk1_lpcg_clk",
+ "aud_pll_div_clk0_lpcg_clk",
+ "aud_pll_div_clk1_lpcg_clk",
+ "mlb_clk",
+ "hdmi_rx_mclk",
+ "ext_aud_mclk0",
+ "ext_aud_mclk1",
+ "esai0_rx_clk",
+ "esai0_rx_hf_clk",
+ "esai0_tx_clk",
+ "esai0_tx_hf_clk",
+ "esai1_rx_clk",
+ "esai1_rx_hf_clk",
+ "esai1_tx_clk",
+ "esai1_tx_hf_clk",
+ "spdif0_rx",
+ "spdif1_rx",
+ "sai0_rx_bclk",
+ "sai0_tx_bclk",
+ "sai1_rx_bclk",
+ "sai1_tx_bclk",
+ "sai2_rx_bclk",
+ "sai3_rx_bclk",
+ "sai4_rx_bclk",
+ "sai5_tx_bclk",
+ "sai6_rx_bclk";
+ };
+};
+
+&dsp_lpcg {
+ status = "disabled";
+};
+
+&dsp_ram_lpcg {
+ status = "disabled";
+};
+
+/* edma2 called in imx8qm RM with the same address in edma0 of imx8qxp */
+&edma0{
+ reg = <0x591f0000 0x150000>;
+ dma-channels = <20>;
+ dma-channel-mask = <0>;
+ interrupts = <GIC_SPI 374 IRQ_TYPE_LEVEL_HIGH>, /* asrc0 */
+ <GIC_SPI 375 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 376 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 377 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 378 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 379 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 410 IRQ_TYPE_LEVEL_HIGH>, /* esai0 */
+ <GIC_SPI 410 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 457 IRQ_TYPE_LEVEL_HIGH>, /* spdif0 */
+ <GIC_SPI 459 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 461 IRQ_TYPE_LEVEL_HIGH>, /* spdif1 */
+ <GIC_SPI 463 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 315 IRQ_TYPE_LEVEL_HIGH>, /* sai0 */
+ <GIC_SPI 315 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 317 IRQ_TYPE_LEVEL_HIGH>, /* sai1 */
+ <GIC_SPI 317 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 319 IRQ_TYPE_LEVEL_HIGH>, /* sai2 */
+ <GIC_SPI 324 IRQ_TYPE_LEVEL_HIGH>, /* sai3 */
+ <GIC_SPI 326 IRQ_TYPE_LEVEL_HIGH>, /* sai4 */
+ <GIC_SPI 328 IRQ_TYPE_LEVEL_HIGH>; /* sai5 */
+ power-domains = <&pd IMX_SC_R_DMA_2_CH0>,
+ <&pd IMX_SC_R_DMA_2_CH1>,
+ <&pd IMX_SC_R_DMA_2_CH2>,
+ <&pd IMX_SC_R_DMA_2_CH3>,
+ <&pd IMX_SC_R_DMA_2_CH4>,
+ <&pd IMX_SC_R_DMA_2_CH5>,
+ <&pd IMX_SC_R_DMA_2_CH6>,
+ <&pd IMX_SC_R_DMA_2_CH7>,
+ <&pd IMX_SC_R_DMA_2_CH8>,
+ <&pd IMX_SC_R_DMA_2_CH9>,
+ <&pd IMX_SC_R_DMA_2_CH10>,
+ <&pd IMX_SC_R_DMA_2_CH11>,
+ <&pd IMX_SC_R_DMA_2_CH12>,
+ <&pd IMX_SC_R_DMA_2_CH13>,
+ <&pd IMX_SC_R_DMA_2_CH14>,
+ <&pd IMX_SC_R_DMA_2_CH15>,
+ <&pd IMX_SC_R_DMA_2_CH16>,
+ <&pd IMX_SC_R_DMA_2_CH17>,
+ <&pd IMX_SC_R_DMA_2_CH18>,
+ <&pd IMX_SC_R_DMA_2_CH19>;
+};
+
+/* edma3 called in imx8qm RM with the same address in edma1 of imx8qxp */
+&edma1{
+ reg = <0x599f0000 0xc0000>;
+ dma-channels = <11>;
+ dma-channel-mask = <0xc0>;
+ interrupts = <GIC_SPI 382 IRQ_TYPE_LEVEL_HIGH>, /* asrc1 */
+ <GIC_SPI 383 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 384 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 385 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 386 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 387 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>, /* no used */
+ <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>, /* no used */
+ <GIC_SPI 330 IRQ_TYPE_LEVEL_HIGH>, /* sai6 */
+ <GIC_SPI 330 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 332 IRQ_TYPE_LEVEL_HIGH>; /* sai7 */
+ power-domains = <&pd IMX_SC_R_DMA_3_CH0>,
+ <&pd IMX_SC_R_DMA_3_CH1>,
+ <&pd IMX_SC_R_DMA_3_CH2>,
+ <&pd IMX_SC_R_DMA_3_CH3>,
+ <&pd IMX_SC_R_DMA_3_CH4>,
+ <&pd IMX_SC_R_DMA_3_CH5>,
+ <&pd IMX_SC_R_DMA_3_CH6>,
+ <&pd IMX_SC_R_DMA_3_CH7>,
+ <&pd IMX_SC_R_DMA_3_CH8>,
+ <&pd IMX_SC_R_DMA_3_CH9>,
+ <&pd IMX_SC_R_DMA_3_CH10>;
+};
+
+&esai0 {
+ clocks = <&esai0_lpcg IMX_LPCG_CLK_0>,
+ <&esai0_lpcg IMX_LPCG_CLK_4>,
+ <&esai0_lpcg IMX_LPCG_CLK_0>,
+ <&clk_dummy>;
+ power-domains = <&pd IMX_SC_R_ESAI_0>;
+};
+
+&esai0_lpcg {
+ clock-indices = <IMX_LPCG_CLK_4>, <IMX_LPCG_CLK_0>;
+ clock-output-names = "esai0_lpcg_extal_clk", "esai0_lpcg_ipg_clk";
+};
+
+&mqs0_lpcg {
+ clock-indices = <IMX_LPCG_CLK_4>, <IMX_LPCG_CLK_0>;
+ clock-output-names = "mqs0_lpcg_mclk", "mqs0_lpcg_ipg_clk";
+};
+
+&sai0 {
+ clocks = <&sai0_lpcg IMX_LPCG_CLK_0>,
+ <&clk_dummy>,
+ <&sai0_lpcg IMX_LPCG_CLK_4>,
+ <&clk_dummy>,
+ <&clk_dummy>;
+ power-domains = <&pd IMX_SC_R_SAI_0>;
+};
+
+&sai0_lpcg {
+ clock-indices = <IMX_LPCG_CLK_4>, <IMX_LPCG_CLK_0>;
+ clock-output-names = "sai0_lpcg_mclk", "sai0_lpcg_ipg_clk";
+};
+
+&sai1 {
+ clocks = <&sai1_lpcg IMX_LPCG_CLK_0>,
+ <&clk_dummy>,
+ <&sai1_lpcg IMX_LPCG_CLK_4>,
+ <&clk_dummy>,
+ <&clk_dummy>;
+ power-domains = <&pd IMX_SC_R_SAI_1>;
+};
+
+&sai1_lpcg {
+ clock-indices = <IMX_LPCG_CLK_4>, <IMX_LPCG_CLK_0>;
+ clock-output-names = "sai1_lpcg_mclk", "sai1_lpcg_ipg_clk";
+};
+
+&sai2 {
+ clocks = <&sai2_lpcg IMX_LPCG_CLK_0>,
+ <&clk_dummy>,
+ <&sai2_lpcg IMX_LPCG_CLK_4>,
+ <&clk_dummy>,
+ <&clk_dummy>;
+ power-domains = <&pd IMX_SC_R_SAI_2>;
+};
+
+&sai2_lpcg {
+ clock-indices = <IMX_LPCG_CLK_4>, <IMX_LPCG_CLK_0>;
+ clock-output-names = "sai2_lpcg_mclk", "sai2_lpcg_ipg_clk";
+};
+
+&sai3 {
+ clocks = <&sai3_lpcg IMX_LPCG_CLK_0>,
+ <&clk_dummy>,
+ <&sai3_lpcg IMX_LPCG_CLK_4>,
+ <&clk_dummy>,
+ <&clk_dummy>;
+ power-domains = <&pd IMX_SC_R_SAI_3>;
+};
+
+&sai3_lpcg {
+ clock-indices = <IMX_LPCG_CLK_4>, <IMX_LPCG_CLK_0>;
+ clock-output-names = "sai3_lpcg_mclk", "sai3_lpcg_ipg_clk";
+};
+
+&spdif0 {
+ clocks = <&spdif0_lpcg IMX_LPCG_CLK_4>, /* core */
+ <&clk_dummy>, /* rxtx0 */
+ <&spdif0_lpcg IMX_LPCG_CLK_5>, /* rxtx1 */
+ <&clk_dummy>, /* rxtx2 */
+ <&clk_dummy>, /* rxtx3 */
+ <&clk_dummy>, /* rxtx4 */
+ <&audio_ipg_clk>, /* rxtx5 */
+ <&clk_dummy>, /* rxtx6 */
+ <&clk_dummy>, /* rxtx7 */
+ <&clk_dummy>; /* spba */
+ power-domains = <&pd IMX_SC_R_SPDIF_0>;
+};
+
+&spdif0_lpcg {
+ clock-indices = <IMX_LPCG_CLK_5>, <IMX_LPCG_CLK_4>;
+ clock-output-names = "spdif0_lpcg_tx_clk", "spdif0_lpcg_gclkw";
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8qm.dtsi b/arch/arm64/boot/dts/freescale/imx8qm.dtsi
index b3d01677b70c..61986e0639e5 100644
--- a/arch/arm64/boot/dts/freescale/imx8qm.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8qm.dtsi
@@ -333,6 +333,21 @@
compatible = "fsl,imx8qxp-sc-rtc";
};
+ ocotp: ocotp {
+ compatible = "fsl,imx8qm-scu-ocotp";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ read-only;
+
+ fec_mac0: mac@1c4 {
+ reg = <0x1c4 6>;
+ };
+
+ fec_mac1: mac@1c6 {
+ reg = <0x1c6 6>;
+ };
+ };
+
tsens: thermal-sensor {
compatible = "fsl,imx8qxp-sc-thermal", "fsl,imx-sc-thermal";
#thermal-sensor-cells = <1>;
@@ -461,8 +476,95 @@
};
};
+ clk_dummy: clock-dummy {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <0>;
+ clock-output-names = "clk_dummy";
+ };
+
+ clk_esai1_rx_clk: clock-esai1-rx {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <0>;
+ clock-output-names = "esai1_rx_clk";
+ };
+
+ clk_esai1_rx_hf_clk: clock-esai1-rx-hf {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <0>;
+ clock-output-names = "esai1_rx_hf_clk";
+ };
+
+ clk_esai1_tx_clk: clock-esai1-tx {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <0>;
+ clock-output-names = "esai1_tx_clk";
+ };
+
+ clk_esai1_tx_hf_clk: clock-esai1-tx-hf {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <0>;
+ clock-output-names = "esai1_tx_hf_clk";
+ };
+
+ clk_hdmi_rx_mclk: clock-hdmi-rx-mclk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <0>;
+ clock-output-names = "hdmi-rx-mclk";
+ };
+
+ clk_mlb_clk: clock-mlb-clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <0>;
+ clock-output-names = "mlb_clk";
+ };
+
+ clk_sai5_rx_bclk: clock-sai5-rx-bclk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <0>;
+ clock-output-names = "sai5_rx_bclk";
+ };
+
+ clk_sai5_tx_bclk: clock-sai5-tx-bclk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <0>;
+ clock-output-names = "sai5_tx_bclk";
+ };
+
+ clk_sai6_rx_bclk: clock-sai6-rx-bclk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <0>;
+ clock-output-names = "sai6_rx_bclk";
+ };
+
+ clk_sai6_tx_bclk: clock-sai6-tx-bclk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <0>;
+ clock-output-names = "sai6_tx_bclk";
+ };
+
+ clk_spdif1_rx: clock-spdif1-rx {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <0>;
+ clock-output-names = "spdif1_rx";
+ };
+
/* sorted in register address */
+ #include "imx8-ss-cm41.dtsi"
+ #include "imx8-ss-audio.dtsi"
#include "imx8-ss-vpu.dtsi"
+ #include "imx8-ss-gpu0.dtsi"
#include "imx8-ss-img.dtsi"
#include "imx8-ss-dma.dtsi"
#include "imx8-ss-conn.dtsi"
@@ -473,3 +575,4 @@
#include "imx8qm-ss-dma.dtsi"
#include "imx8qm-ss-conn.dtsi"
#include "imx8qm-ss-lsio.dtsi"
+#include "imx8qm-ss-audio.dtsi"
diff --git a/arch/arm64/boot/dts/freescale/imx8qxp-mek.dts b/arch/arm64/boot/dts/freescale/imx8qxp-mek.dts
index cee13e58762c..936ba5ecdcac 100644
--- a/arch/arm64/boot/dts/freescale/imx8qxp-mek.dts
+++ b/arch/arm64/boot/dts/freescale/imx8qxp-mek.dts
@@ -63,6 +63,7 @@
};
&dsp {
+ memory-region = <&dsp_reserved>;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/freescale/imx93-11x11-evk.dts b/arch/arm64/boot/dts/freescale/imx93-11x11-evk.dts
index d400d85f42a9..a15987f49e8d 100644
--- a/arch/arm64/boot/dts/freescale/imx93-11x11-evk.dts
+++ b/arch/arm64/boot/dts/freescale/imx93-11x11-evk.dts
@@ -97,89 +97,6 @@
status = "okay";
};
-&mu1 {
- status = "okay";
-};
-
-&mu2 {
- status = "okay";
-};
-
-&lpi2c3 {
- #address-cells = <1>;
- #size-cells = <0>;
- clock-frequency = <400000>;
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_lpi2c3>;
- status = "okay";
-
- ptn5110: tcpc@50 {
- compatible = "nxp,ptn5110", "tcpci";
- reg = <0x50>;
- interrupt-parent = <&gpio3>;
- interrupts = <27 IRQ_TYPE_LEVEL_LOW>;
-
- typec1_con: connector {
- compatible = "usb-c-connector";
- label = "USB-C";
- power-role = "dual";
- data-role = "dual";
- try-power-role = "sink";
- source-pdos = <PDO_FIXED(5000, 3000, PDO_FIXED_USB_COMM)>;
- sink-pdos = <PDO_FIXED(5000, 3000, PDO_FIXED_USB_COMM)
- PDO_VAR(5000, 20000, 3000)>;
- op-sink-microwatt = <15000000>;
- self-powered;
-
- ports {
- #address-cells = <1>;
- #size-cells = <0>;
-
- port@0 {
- reg = <0>;
-
- typec1_dr_sw: endpoint {
- remote-endpoint = <&usb1_drd_sw>;
- };
- };
- };
- };
- };
-
- ptn5110_2: tcpc@51 {
- compatible = "nxp,ptn5110", "tcpci";
- reg = <0x51>;
- interrupt-parent = <&gpio3>;
- interrupts = <27 IRQ_TYPE_LEVEL_LOW>;
-
- typec2_con: connector {
- compatible = "usb-c-connector";
- label = "USB-C";
- power-role = "dual";
- data-role = "dual";
- try-power-role = "sink";
- source-pdos = <PDO_FIXED(5000, 3000, PDO_FIXED_USB_COMM)>;
- sink-pdos = <PDO_FIXED(5000, 3000, PDO_FIXED_USB_COMM)
- PDO_VAR(5000, 20000, 3000)>;
- op-sink-microwatt = <15000000>;
- self-powered;
-
- ports {
- #address-cells = <1>;
- #size-cells = <0>;
-
- port@0 {
- reg = <0>;
-
- typec2_dr_sw: endpoint {
- remote-endpoint = <&usb2_drd_sw>;
- };
- };
- };
- };
- };
-};
-
&eqos {
pinctrl-names = "default", "sleep";
pinctrl-0 = <&pinctrl_eqos>;
@@ -228,82 +145,6 @@
};
};
-&lpuart1 { /* console */
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_uart1>;
- status = "okay";
-};
-
-&lpuart5 {
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_uart5>;
- status = "okay";
-};
-
-&usbotg1 {
- dr_mode = "otg";
- hnp-disable;
- srp-disable;
- adp-disable;
- usb-role-switch;
- disable-over-current;
- samsung,picophy-pre-emp-curr-control = <3>;
- samsung,picophy-dc-vol-level-adjust = <7>;
- status = "okay";
-
- port {
- usb1_drd_sw: endpoint {
- remote-endpoint = <&typec1_dr_sw>;
- };
- };
-};
-
-&usbotg2 {
- dr_mode = "otg";
- hnp-disable;
- srp-disable;
- adp-disable;
- usb-role-switch;
- disable-over-current;
- samsung,picophy-pre-emp-curr-control = <3>;
- samsung,picophy-dc-vol-level-adjust = <7>;
- status = "okay";
-
- port {
- usb2_drd_sw: endpoint {
- remote-endpoint = <&typec2_dr_sw>;
- };
- };
-};
-
-&usdhc1 {
- pinctrl-names = "default", "state_100mhz", "state_200mhz";
- pinctrl-0 = <&pinctrl_usdhc1>;
- pinctrl-1 = <&pinctrl_usdhc1_100mhz>;
- pinctrl-2 = <&pinctrl_usdhc1_200mhz>;
- bus-width = <8>;
- non-removable;
- status = "okay";
-};
-
-&usdhc2 {
- pinctrl-names = "default", "state_100mhz", "state_200mhz", "sleep";
- pinctrl-0 = <&pinctrl_usdhc2>, <&pinctrl_usdhc2_gpio>;
- pinctrl-1 = <&pinctrl_usdhc2_100mhz>, <&pinctrl_usdhc2_gpio>;
- pinctrl-2 = <&pinctrl_usdhc2_200mhz>, <&pinctrl_usdhc2_gpio>;
- pinctrl-3 = <&pinctrl_usdhc2_sleep>, <&pinctrl_usdhc2_gpio_sleep>;
- cd-gpios = <&gpio3 00 GPIO_ACTIVE_LOW>;
- vmmc-supply = <&reg_usdhc2_vmmc>;
- bus-width = <4>;
- status = "okay";
- no-sdio;
- no-mmc;
-};
-
-&wdog3 {
- status = "okay";
-};
-
&lpi2c2 {
#address-cells = <1>;
#size-cells = <0>;
@@ -403,11 +244,79 @@
};
&lpi2c3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
clock-frequency = <400000>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_lpi2c3>;
status = "okay";
+ ptn5110: tcpc@50 {
+ compatible = "nxp,ptn5110", "tcpci";
+ reg = <0x50>;
+ interrupt-parent = <&gpio3>;
+ interrupts = <27 IRQ_TYPE_LEVEL_LOW>;
+
+ typec1_con: connector {
+ compatible = "usb-c-connector";
+ label = "USB-C";
+ power-role = "dual";
+ data-role = "dual";
+ try-power-role = "sink";
+ source-pdos = <PDO_FIXED(5000, 3000, PDO_FIXED_USB_COMM)>;
+ sink-pdos = <PDO_FIXED(5000, 3000, PDO_FIXED_USB_COMM)
+ PDO_VAR(5000, 20000, 3000)>;
+ op-sink-microwatt = <15000000>;
+ self-powered;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ typec1_dr_sw: endpoint {
+ remote-endpoint = <&usb1_drd_sw>;
+ };
+ };
+ };
+ };
+ };
+
+ ptn5110_2: tcpc@51 {
+ compatible = "nxp,ptn5110", "tcpci";
+ reg = <0x51>;
+ interrupt-parent = <&gpio3>;
+ interrupts = <27 IRQ_TYPE_LEVEL_LOW>;
+
+ typec2_con: connector {
+ compatible = "usb-c-connector";
+ label = "USB-C";
+ power-role = "dual";
+ data-role = "dual";
+ try-power-role = "sink";
+ source-pdos = <PDO_FIXED(5000, 3000, PDO_FIXED_USB_COMM)>;
+ sink-pdos = <PDO_FIXED(5000, 3000, PDO_FIXED_USB_COMM)
+ PDO_VAR(5000, 20000, 3000)>;
+ op-sink-microwatt = <15000000>;
+ self-powered;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ typec2_dr_sw: endpoint {
+ remote-endpoint = <&usb2_drd_sw>;
+ };
+ };
+ };
+ };
+ };
+
pcf2131: rtc@53 {
compatible = "nxp,pcf2131";
reg = <0x53>;
@@ -416,6 +325,89 @@
};
};
+&lpuart1 { /* console */
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart1>;
+ status = "okay";
+};
+
+&lpuart5 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart5>;
+ status = "okay";
+};
+
+&mu1 {
+ status = "okay";
+};
+
+&mu2 {
+ status = "okay";
+};
+
+&usbotg1 {
+ dr_mode = "otg";
+ hnp-disable;
+ srp-disable;
+ adp-disable;
+ usb-role-switch;
+ disable-over-current;
+ samsung,picophy-pre-emp-curr-control = <3>;
+ samsung,picophy-dc-vol-level-adjust = <7>;
+ status = "okay";
+
+ port {
+ usb1_drd_sw: endpoint {
+ remote-endpoint = <&typec1_dr_sw>;
+ };
+ };
+};
+
+&usbotg2 {
+ dr_mode = "otg";
+ hnp-disable;
+ srp-disable;
+ adp-disable;
+ usb-role-switch;
+ disable-over-current;
+ samsung,picophy-pre-emp-curr-control = <3>;
+ samsung,picophy-dc-vol-level-adjust = <7>;
+ status = "okay";
+
+ port {
+ usb2_drd_sw: endpoint {
+ remote-endpoint = <&typec2_dr_sw>;
+ };
+ };
+};
+
+&usdhc1 {
+ pinctrl-names = "default", "state_100mhz", "state_200mhz";
+ pinctrl-0 = <&pinctrl_usdhc1>;
+ pinctrl-1 = <&pinctrl_usdhc1_100mhz>;
+ pinctrl-2 = <&pinctrl_usdhc1_200mhz>;
+ bus-width = <8>;
+ non-removable;
+ status = "okay";
+};
+
+&usdhc2 {
+ pinctrl-names = "default", "state_100mhz", "state_200mhz", "sleep";
+ pinctrl-0 = <&pinctrl_usdhc2>, <&pinctrl_usdhc2_gpio>;
+ pinctrl-1 = <&pinctrl_usdhc2_100mhz>, <&pinctrl_usdhc2_gpio>;
+ pinctrl-2 = <&pinctrl_usdhc2_200mhz>, <&pinctrl_usdhc2_gpio>;
+ pinctrl-3 = <&pinctrl_usdhc2_sleep>, <&pinctrl_usdhc2_gpio_sleep>;
+ cd-gpios = <&gpio3 00 GPIO_ACTIVE_LOW>;
+ vmmc-supply = <&reg_usdhc2_vmmc>;
+ bus-width = <4>;
+ status = "okay";
+ no-mmc;
+};
+
+&wdog3 {
+ status = "okay";
+};
+
&iomuxc {
pinctrl_eqos: eqosgrp {
fsl,pins = <
diff --git a/arch/arm64/boot/dts/freescale/imx93-9x9-qsb.dts b/arch/arm64/boot/dts/freescale/imx93-9x9-qsb.dts
new file mode 100644
index 000000000000..950dece83c24
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx93-9x9-qsb.dts
@@ -0,0 +1,492 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR MIT)
+/*
+ * Copyright 2024 NXP
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/usb/pd.h>
+#include "imx93.dtsi"
+
+/ {
+ model = "NXP i.MX93 9x9 Quick Start Board";
+ compatible = "fsl,imx93-9x9-qsb", "fsl,imx93";
+
+ chosen {
+ stdout-path = &lpuart1;
+ };
+
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ linux,cma {
+ compatible = "shared-dma-pool";
+ reusable;
+ size = <0 0x10000000>;
+ linux,cma-default;
+ };
+
+ vdev0vring0: vdev0vring0@a4000000 {
+ reg = <0 0xa4000000 0 0x8000>;
+ no-map;
+ };
+
+ vdev0vring1: vdev0vring1@a4008000 {
+ reg = <0 0xa4008000 0 0x8000>;
+ no-map;
+ };
+
+ vdev1vring0: vdev1vring0@a4010000 {
+ reg = <0 0xa4010000 0 0x8000>;
+ no-map;
+ };
+
+ vdev1vring1: vdev1vring1@a4018000 {
+ reg = <0 0xa4018000 0 0x8000>;
+ no-map;
+ };
+
+ rsc_table: rsc-table@2021e000 {
+ reg = <0 0x2021e000 0 0x1000>;
+ no-map;
+ };
+
+ vdevbuffer: vdevbuffer@a4020000 {
+ compatible = "shared-dma-pool";
+ reg = <0 0xa4020000 0 0x100000>;
+ no-map;
+ };
+
+ };
+
+ reg_vref_1v8: regulator-adc-vref {
+ compatible = "regulator-fixed";
+ regulator-name = "VREF_1V8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ reg_rpi_3v3: regulator-rpi {
+ compatible = "regulator-fixed";
+ regulator-name = "VDD_RPI_3V3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&pcal6524 21 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ reg_usdhc2_vmmc: regulator-usdhc2 {
+ compatible = "regulator-fixed";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_reg_usdhc2_vmmc>;
+ regulator-name = "VSD_3V3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&gpio3 7 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ off-on-delay-us = <12000>;
+ };
+};
+
+&adc1 {
+ vref-supply = <&reg_vref_1v8>;
+ status = "okay";
+};
+
+&cm33 {
+ mbox-names = "tx", "rx", "rxdb";
+ mboxes = <&mu1 0 1>,
+ <&mu1 1 1>,
+ <&mu1 3 1>;
+ memory-region = <&vdevbuffer>, <&vdev0vring0>, <&vdev0vring1>,
+ <&vdev1vring0>, <&vdev1vring1>, <&rsc_table>;
+ status = "okay";
+};
+
+&eqos {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_eqos>;
+ phy-mode = "rgmii-id";
+ phy-handle = <&ethphy1>;
+ status = "okay";
+
+ mdio {
+ compatible = "snps,dwmac-mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clock-frequency = <5000000>;
+
+ ethphy1: ethernet-phy@1 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <1>;
+ eee-broken-1000t;
+ reset-gpios = <&pcal6524 15 GPIO_ACTIVE_LOW>;
+ reset-assert-us = <10000>;
+ reset-deassert-us = <80000>;
+ realtek,clkout-disable;
+ };
+ };
+};
+
+&lpi2c1 {
+ clock-frequency = <400000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_lpi2c1>;
+ status = "okay";
+
+ ptn5110: tcpc@50 {
+ compatible = "nxp,ptn5110", "tcpci";
+ reg = <0x50>;
+ interrupt-parent = <&gpio3>;
+ interrupts = <26 IRQ_TYPE_LEVEL_LOW>;
+
+ typec1_con: connector {
+ compatible = "usb-c-connector";
+ label = "USB-C";
+ power-role = "dual";
+ data-role = "dual";
+ try-power-role = "sink";
+ source-pdos = <PDO_FIXED(5000, 3000, PDO_FIXED_USB_COMM)>;
+ sink-pdos = <PDO_FIXED(5000, 3000, PDO_FIXED_USB_COMM)
+ PDO_VAR(5000, 20000, 3000)>;
+ op-sink-microwatt = <15000000>;
+ self-powered;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ typec1_dr_sw: endpoint {
+ remote-endpoint = <&usb1_drd_sw>;
+ };
+ };
+ };
+ };
+ };
+
+ rtc@53 {
+ compatible = "nxp,pcf2131";
+ reg = <0x53>;
+ interrupt-parent = <&pcal6524>;
+ interrupts = <1 IRQ_TYPE_EDGE_FALLING>;
+ };
+};
+
+&lpi2c2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clock-frequency = <400000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_lpi2c2>;
+ status = "okay";
+
+ pcal6524: gpio@22 {
+ compatible = "nxp,pcal6524";
+ reg = <0x22>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupt-parent = <&gpio3>;
+ interrupts = <26 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pcal6524>;
+ };
+
+ pmic@25 {
+ compatible = "nxp,pca9451a";
+ reg = <0x25>;
+ interrupt-parent = <&pcal6524>;
+ interrupts = <11 IRQ_TYPE_EDGE_FALLING>;
+
+ regulators {
+ buck1: BUCK1 {
+ regulator-name = "BUCK1";
+ regulator-min-microvolt = <650000>;
+ regulator-max-microvolt = <2237500>;
+ regulator-boot-on;
+ regulator-always-on;
+ regulator-ramp-delay = <3125>;
+ };
+
+ buck2: BUCK2 {
+ regulator-name = "BUCK2";
+ regulator-min-microvolt = <600000>;
+ regulator-max-microvolt = <2187500>;
+ regulator-boot-on;
+ regulator-always-on;
+ regulator-ramp-delay = <3125>;
+ };
+
+ buck4: BUCK4{
+ regulator-name = "BUCK4";
+ regulator-min-microvolt = <600000>;
+ regulator-max-microvolt = <3400000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ buck5: BUCK5{
+ regulator-name = "BUCK5";
+ regulator-min-microvolt = <600000>;
+ regulator-max-microvolt = <3400000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ buck6: BUCK6 {
+ regulator-name = "BUCK6";
+ regulator-min-microvolt = <600000>;
+ regulator-max-microvolt = <3400000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ ldo1: LDO1 {
+ regulator-name = "LDO1";
+ regulator-min-microvolt = <1600000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ ldo4: LDO4 {
+ regulator-name = "LDO4";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ ldo5: LDO5 {
+ regulator-name = "LDO5";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+ };
+ };
+};
+
+&lpuart1 { /* console */
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart1>;
+ status = "okay";
+};
+
+&mu1 {
+ status = "okay";
+};
+
+&mu2 {
+ status = "okay";
+};
+
+&usbotg1 {
+ dr_mode = "otg";
+ hnp-disable;
+ srp-disable;
+ adp-disable;
+ usb-role-switch;
+ disable-over-current;
+ samsung,picophy-pre-emp-curr-control = <3>;
+ samsung,picophy-dc-vol-level-adjust = <7>;
+ status = "okay";
+
+ port {
+ usb1_drd_sw: endpoint {
+ remote-endpoint = <&typec1_dr_sw>;
+ };
+ };
+};
+
+&usdhc1 {
+ pinctrl-names = "default", "state_100mhz", "state_200mhz";
+ pinctrl-0 = <&pinctrl_usdhc1>;
+ pinctrl-1 = <&pinctrl_usdhc1_100mhz>;
+ pinctrl-2 = <&pinctrl_usdhc1_200mhz>;
+ bus-width = <8>;
+ non-removable;
+ status = "okay";
+};
+
+&usdhc2 {
+ pinctrl-names = "default", "state_100mhz", "state_200mhz";
+ pinctrl-0 = <&pinctrl_usdhc2>, <&pinctrl_usdhc2_gpio>;
+ pinctrl-1 = <&pinctrl_usdhc2_100mhz>, <&pinctrl_usdhc2_gpio>;
+ pinctrl-2 = <&pinctrl_usdhc2_200mhz>, <&pinctrl_usdhc2_gpio>;
+ cd-gpios = <&gpio3 0 GPIO_ACTIVE_LOW>;
+ vmmc-supply = <&reg_usdhc2_vmmc>;
+ bus-width = <4>;
+ no-mmc;
+ status = "okay";
+};
+
+&wdog3 {
+ status = "okay";
+};
+
+&iomuxc {
+ pinctrl_eqos: eqosgrp {
+ fsl,pins = <
+ MX93_PAD_ENET1_MDC__ENET_QOS_MDC 0x57e
+ MX93_PAD_ENET1_MDIO__ENET_QOS_MDIO 0x57e
+ MX93_PAD_ENET1_RD0__ENET_QOS_RGMII_RD0 0x57e
+ MX93_PAD_ENET1_RD1__ENET_QOS_RGMII_RD1 0x57e
+ MX93_PAD_ENET1_RD2__ENET_QOS_RGMII_RD2 0x57e
+ MX93_PAD_ENET1_RD3__ENET_QOS_RGMII_RD3 0x57e
+ MX93_PAD_ENET1_RXC__CCM_ENET_QOS_CLOCK_GENERATE_RX_CLK 0x58e
+ MX93_PAD_ENET1_RX_CTL__ENET_QOS_RGMII_RX_CTL 0x57e
+ MX93_PAD_ENET1_TD0__ENET_QOS_RGMII_TD0 0x57e
+ MX93_PAD_ENET1_TD1__ENET_QOS_RGMII_TD1 0x57e
+ MX93_PAD_ENET1_TD2__ENET_QOS_RGMII_TD2 0x57e
+ MX93_PAD_ENET1_TD3__ENET_QOS_RGMII_TD3 0x57e
+ MX93_PAD_ENET1_TXC__CCM_ENET_QOS_CLOCK_GENERATE_TX_CLK 0x58e
+ MX93_PAD_ENET1_TX_CTL__ENET_QOS_RGMII_TX_CTL 0x57e
+ >;
+ };
+
+ pinctrl_lpi2c1: lpi2c1grp {
+ fsl,pins = <
+ MX93_PAD_I2C1_SCL__LPI2C1_SCL 0x40000b9e
+ MX93_PAD_I2C1_SDA__LPI2C1_SDA 0x40000b9e
+ >;
+ };
+
+ pinctrl_lpi2c2: lpi2c2grp {
+ fsl,pins = <
+ MX93_PAD_I2C2_SCL__LPI2C2_SCL 0x40000b9e
+ MX93_PAD_I2C2_SDA__LPI2C2_SDA 0x40000b9e
+ >;
+ };
+
+ pinctrl_pcal6524: pcal6524grp {
+ fsl,pins = <
+ MX93_PAD_CCM_CLKO1__GPIO3_IO26 0x31e
+ >;
+ };
+
+ pinctrl_uart1: uart1grp {
+ fsl,pins = <
+ MX93_PAD_UART1_RXD__LPUART1_RX 0x31e
+ MX93_PAD_UART1_TXD__LPUART1_TX 0x31e
+ >;
+ };
+
+ pinctrl_uart5: uart5grp {
+ fsl,pins = <
+ MX93_PAD_DAP_TDO_TRACESWO__LPUART5_TX 0x31e
+ MX93_PAD_DAP_TDI__LPUART5_RX 0x31e
+ MX93_PAD_DAP_TMS_SWDIO__LPUART5_RTS_B 0x31e
+ MX93_PAD_DAP_TCLK_SWCLK__LPUART5_CTS_B 0x31e
+ >;
+ };
+
+ /* need to config the SION for data and cmd pad, refer to ERR052021 */
+ pinctrl_usdhc1: usdhc1grp {
+ fsl,pins = <
+ MX93_PAD_SD1_CLK__USDHC1_CLK 0x1582
+ MX93_PAD_SD1_CMD__USDHC1_CMD 0x40001382
+ MX93_PAD_SD1_DATA0__USDHC1_DATA0 0x40001382
+ MX93_PAD_SD1_DATA1__USDHC1_DATA1 0x40001382
+ MX93_PAD_SD1_DATA2__USDHC1_DATA2 0x40001382
+ MX93_PAD_SD1_DATA3__USDHC1_DATA3 0x40001382
+ MX93_PAD_SD1_DATA4__USDHC1_DATA4 0x40001382
+ MX93_PAD_SD1_DATA5__USDHC1_DATA5 0x40001382
+ MX93_PAD_SD1_DATA6__USDHC1_DATA6 0x40001382
+ MX93_PAD_SD1_DATA7__USDHC1_DATA7 0x40001382
+ MX93_PAD_SD1_STROBE__USDHC1_STROBE 0x1582
+ >;
+ };
+
+ /* need to config the SION for data and cmd pad, refer to ERR052021 */
+ pinctrl_usdhc1_100mhz: usdhc1-100mhzgrp {
+ fsl,pins = <
+ MX93_PAD_SD1_CLK__USDHC1_CLK 0x158e
+ MX93_PAD_SD1_CMD__USDHC1_CMD 0x4000138e
+ MX93_PAD_SD1_DATA0__USDHC1_DATA0 0x4000138e
+ MX93_PAD_SD1_DATA1__USDHC1_DATA1 0x4000138e
+ MX93_PAD_SD1_DATA2__USDHC1_DATA2 0x4000138e
+ MX93_PAD_SD1_DATA3__USDHC1_DATA3 0x4000138e
+ MX93_PAD_SD1_DATA4__USDHC1_DATA4 0x4000138e
+ MX93_PAD_SD1_DATA5__USDHC1_DATA5 0x4000138e
+ MX93_PAD_SD1_DATA6__USDHC1_DATA6 0x4000138e
+ MX93_PAD_SD1_DATA7__USDHC1_DATA7 0x4000138e
+ MX93_PAD_SD1_STROBE__USDHC1_STROBE 0x158e
+ >;
+ };
+
+ /* need to config the SION for data and cmd pad, refer to ERR052021 */
+ pinctrl_usdhc1_200mhz: usdhc1-200mhzgrp {
+ fsl,pins = <
+ MX93_PAD_SD1_CLK__USDHC1_CLK 0x15fe
+ MX93_PAD_SD1_CMD__USDHC1_CMD 0x400013fe
+ MX93_PAD_SD1_DATA0__USDHC1_DATA0 0x400013fe
+ MX93_PAD_SD1_DATA1__USDHC1_DATA1 0x400013fe
+ MX93_PAD_SD1_DATA2__USDHC1_DATA2 0x400013fe
+ MX93_PAD_SD1_DATA3__USDHC1_DATA3 0x400013fe
+ MX93_PAD_SD1_DATA4__USDHC1_DATA4 0x400013fe
+ MX93_PAD_SD1_DATA5__USDHC1_DATA5 0x400013fe
+ MX93_PAD_SD1_DATA6__USDHC1_DATA6 0x400013fe
+ MX93_PAD_SD1_DATA7__USDHC1_DATA7 0x400013fe
+ MX93_PAD_SD1_STROBE__USDHC1_STROBE 0x15fe
+ >;
+ };
+
+ pinctrl_reg_usdhc2_vmmc: regusdhc2vmmcgrp {
+ fsl,pins = <
+ MX93_PAD_SD2_RESET_B__GPIO3_IO07 0x31e
+ >;
+ };
+
+ pinctrl_usdhc2_gpio: usdhc2gpiogrp {
+ fsl,pins = <
+ MX93_PAD_SD2_CD_B__GPIO3_IO00 0x31e
+ >;
+ };
+
+ /* need to config the SION for data and cmd pad, refer to ERR052021 */
+ pinctrl_usdhc2: usdhc2grp {
+ fsl,pins = <
+ MX93_PAD_SD2_CLK__USDHC2_CLK 0x1582
+ MX93_PAD_SD2_CMD__USDHC2_CMD 0x40001382
+ MX93_PAD_SD2_DATA0__USDHC2_DATA0 0x40001382
+ MX93_PAD_SD2_DATA1__USDHC2_DATA1 0x40001382
+ MX93_PAD_SD2_DATA2__USDHC2_DATA2 0x40001382
+ MX93_PAD_SD2_DATA3__USDHC2_DATA3 0x40001382
+ MX93_PAD_SD2_VSELECT__USDHC2_VSELECT 0x51e
+ >;
+ };
+
+ /* need to config the SION for data and cmd pad, refer to ERR052021 */
+ pinctrl_usdhc2_100mhz: usdhc2-100mhzgrp {
+ fsl,pins = <
+ MX93_PAD_SD2_CLK__USDHC2_CLK 0x158e
+ MX93_PAD_SD2_CMD__USDHC2_CMD 0x4000138e
+ MX93_PAD_SD2_DATA0__USDHC2_DATA0 0x4000138e
+ MX93_PAD_SD2_DATA1__USDHC2_DATA1 0x4000138e
+ MX93_PAD_SD2_DATA2__USDHC2_DATA2 0x4000138e
+ MX93_PAD_SD2_DATA3__USDHC2_DATA3 0x4000138e
+ MX93_PAD_SD2_VSELECT__USDHC2_VSELECT 0x51e
+ >;
+ };
+
+ /* need to config the SION for data and cmd pad, refer to ERR052021 */
+ pinctrl_usdhc2_200mhz: usdhc2-200mhzgrp {
+ fsl,pins = <
+ MX93_PAD_SD2_CLK__USDHC2_CLK 0x15fe
+ MX93_PAD_SD2_CMD__USDHC2_CMD 0x400013fe
+ MX93_PAD_SD2_DATA0__USDHC2_DATA0 0x400013fe
+ MX93_PAD_SD2_DATA1__USDHC2_DATA1 0x400013fe
+ MX93_PAD_SD2_DATA2__USDHC2_DATA2 0x400013fe
+ MX93_PAD_SD2_DATA3__USDHC2_DATA3 0x400013fe
+ MX93_PAD_SD2_VSELECT__USDHC2_VSELECT 0x51e
+ >;
+ };
+};
diff --git a/arch/arm64/boot/dts/freescale/imx93-tqma9352-mba93xxca.dts b/arch/arm64/boot/dts/freescale/imx93-tqma9352-mba93xxca.dts
index af795ecf678b..852dd3d2eac7 100644
--- a/arch/arm64/boot/dts/freescale/imx93-tqma9352-mba93xxca.dts
+++ b/arch/arm64/boot/dts/freescale/imx93-tqma9352-mba93xxca.dts
@@ -303,6 +303,32 @@
reg = <0x1c>;
};
+ ptn5110: usb-typec@50 {
+ compatible = "nxp,ptn5110", "tcpci";
+ reg = <0x50>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_typec>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <2 IRQ_TYPE_EDGE_FALLING>;
+
+ connector {
+ compatible = "usb-c-connector";
+ label = "X17";
+ power-role = "dual";
+ data-role = "dual";
+ try-power-role = "sink";
+ typec-power-opmode = "default";
+ pd-disable;
+ self-powered;
+
+ port {
+ typec_con_hs: endpoint {
+ remote-endpoint = <&typec_hs>;
+ };
+ };
+ };
+ };
+
eeprom2: eeprom@54 {
compatible = "nxp,se97b", "atmel,24c02";
reg = <0x54>;
@@ -371,18 +397,6 @@
"WLAN_PERST#", "12V_EN";
/*
- * Controls the on board USB Hub reset which is low
- * active as reset signal. The output-low states, the
- * signal is inactive, e.g. no reset
- */
- usb-reset-hog {
- gpio-hog;
- gpios = <2 GPIO_ACTIVE_LOW>;
- output-low;
- line-name = "USB_RESET#";
- };
-
- /*
* Controls the WiFi card PD pin which is low active
* as power down signal. The output-high states, the signal
* is active, e.g. card is powered down
@@ -492,6 +506,41 @@
status = "okay";
};
+&usbotg1 {
+ dr_mode = "otg";
+ hnp-disable;
+ srp-disable;
+ adp-disable;
+ usb-role-switch;
+ disable-over-current;
+ samsung,picophy-pre-emp-curr-control = <3>;
+ samsung,picophy-dc-vol-level-adjust = <7>;
+ status = "okay";
+
+ port {
+ typec_hs: endpoint {
+ remote-endpoint = <&typec_con_hs>;
+ };
+ };
+};
+
+&usbotg2 {
+ dr_mode = "host";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ disable-over-current;
+ samsung,picophy-pre-emp-curr-control = <3>;
+ samsung,picophy-dc-vol-level-adjust = <7>;
+ status = "okay";
+
+ hub_2_0: hub@1 {
+ compatible = "usb424,2517";
+ reg = <1>;
+ reset-gpios = <&expander1 2 GPIO_ACTIVE_LOW>;
+ vdd-supply = <&reg_3v3>;
+ };
+};
+
&usdhc2 {
pinctrl-names = "default", "state_100mhz", "state_200mhz";
pinctrl-0 = <&pinctrl_usdhc2_hs>, <&pinctrl_usdhc2_gpio>;
diff --git a/arch/arm64/boot/dts/freescale/imx93-tqma9352-mba93xxla.dts b/arch/arm64/boot/dts/freescale/imx93-tqma9352-mba93xxla.dts
index eb3f4cfb6986..da8f19a646a9 100644
--- a/arch/arm64/boot/dts/freescale/imx93-tqma9352-mba93xxla.dts
+++ b/arch/arm64/boot/dts/freescale/imx93-tqma9352-mba93xxla.dts
@@ -252,6 +252,32 @@
reg = <0x1c>;
};
+ ptn5110: usb-typec@50 {
+ compatible = "nxp,ptn5110", "tcpci";
+ reg = <0x50>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_typec>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <2 IRQ_TYPE_EDGE_FALLING>;
+
+ connector {
+ compatible = "usb-c-connector";
+ label = "X17";
+ power-role = "dual";
+ data-role = "dual";
+ try-power-role = "sink";
+ typec-power-opmode = "default";
+ pd-disable;
+ self-powered;
+
+ port {
+ typec_con_hs: endpoint {
+ remote-endpoint = <&typec_hs>;
+ };
+ };
+ };
+ };
+
eeprom2: eeprom@54 {
compatible = "nxp,se97b", "atmel,24c02";
reg = <0x54>;
@@ -433,6 +459,41 @@
pinctrl-0 = <&pinctrl_tpm5>;
};
+&usbotg1 {
+ dr_mode = "otg";
+ hnp-disable;
+ srp-disable;
+ adp-disable;
+ usb-role-switch;
+ disable-over-current;
+ samsung,picophy-pre-emp-curr-control = <3>;
+ samsung,picophy-dc-vol-level-adjust = <7>;
+ status = "okay";
+
+ port {
+ typec_hs: endpoint {
+ remote-endpoint = <&typec_con_hs>;
+ };
+ };
+};
+
+&usbotg2 {
+ dr_mode = "host";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ disable-over-current;
+ samsung,picophy-pre-emp-curr-control = <3>;
+ samsung,picophy-dc-vol-level-adjust = <7>;
+ status = "okay";
+
+ hub_2_0: hub@1 {
+ compatible = "usb424,2517";
+ reg = <1>;
+ reset-gpios = <&expander1 2 GPIO_ACTIVE_LOW>;
+ vdd-supply = <&reg_3v3>;
+ };
+};
+
&usdhc2 {
pinctrl-names = "default", "state_100mhz", "state_200mhz";
pinctrl-0 = <&pinctrl_usdhc2_hs>, <&pinctrl_usdhc2_gpio>;
diff --git a/arch/arm64/boot/dts/freescale/imx93-tqma9352.dtsi b/arch/arm64/boot/dts/freescale/imx93-tqma9352.dtsi
index 9d2328c185c9..edbd8cad35bc 100644
--- a/arch/arm64/boot/dts/freescale/imx93-tqma9352.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx93-tqma9352.dtsi
@@ -75,6 +75,12 @@
spi-max-frequency = <62000000>;
spi-tx-bus-width = <4>;
spi-rx-bus-width = <4>;
+
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ };
};
};
diff --git a/arch/arm64/boot/dts/freescale/imx95-19x19-evk.dts b/arch/arm64/boot/dts/freescale/imx95-19x19-evk.dts
new file mode 100644
index 000000000000..d14a54ab4fd4
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx95-19x19-evk.dts
@@ -0,0 +1,289 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright 2024 NXP
+ */
+
+/dts-v1/;
+
+#include "imx95.dtsi"
+
+/ {
+ model = "NXP i.MX95 19X19 board";
+ compatible = "fsl,imx95-19x19-evk", "fsl,imx95";
+
+ aliases {
+ mmc0 = &usdhc1;
+ mmc1 = &usdhc2;
+ serial0 = &lpuart1;
+ };
+
+ chosen {
+ stdout-path = &lpuart1;
+ };
+
+ memory@80000000 {
+ device_type = "memory";
+ reg = <0x0 0x80000000 0 0x80000000>;
+ };
+
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ linux_cma: linux,cma {
+ compatible = "shared-dma-pool";
+ alloc-ranges = <0 0x80000000 0 0x7f000000>;
+ size = <0 0x3c000000>;
+ linux,cma-default;
+ reusable;
+ };
+ };
+
+ reg_m2_pwr: regulator-m2-pwr {
+ compatible = "regulator-fixed";
+ regulator-name = "M.2-power";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&i2c7_pcal6524 20 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ reg_pcie0: regulator-pcie {
+ compatible = "regulator-fixed";
+ regulator-name = "PCIE_WLAN_EN";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&reg_m2_pwr>;
+ gpio = <&i2c7_pcal6524 6 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ reg_slot_pwr: regulator-slot-pwr {
+ compatible = "regulator-fixed";
+ regulator-name = "PCIe slot-power";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&i2c7_pcal6524 14 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ reg_usdhc2_vmmc: regulator-usdhc2 {
+ compatible = "regulator-fixed";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_reg_usdhc2_vmmc>;
+ regulator-name = "VDD_SD2_3V3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&gpio3 7 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ off-on-delay-us = <12000>;
+ };
+};
+
+&lpi2c7 {
+ clock-frequency = <1000000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_lpi2c7>;
+ status = "okay";
+
+ i2c7_pcal6524: i2c7-gpio@22 {
+ compatible = "nxp,pcal6524";
+ reg = <0x22>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c7_pcal6524>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupt-parent = <&gpio5>;
+ interrupts = <16 IRQ_TYPE_LEVEL_LOW>;
+ };
+};
+
+&lpuart1 {
+ /* console */
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart1>;
+ status = "okay";
+};
+
+&mu7 {
+ status = "okay";
+};
+
+&pcie0 {
+ pinctrl-0 = <&pinctrl_pcie0>;
+ pinctrl-names = "default";
+ reset-gpio = <&i2c7_pcal6524 5 GPIO_ACTIVE_LOW>;
+ vpcie-supply = <&reg_pcie0>;
+ status = "okay";
+};
+
+&pcie1 {
+ pinctrl-0 = <&pinctrl_pcie1>;
+ pinctrl-names = "default";
+ reset-gpio = <&i2c7_pcal6524 16 GPIO_ACTIVE_LOW>;
+ vpcie-supply = <&reg_slot_pwr>;
+ status = "okay";
+};
+
+&usdhc1 {
+ pinctrl-names = "default", "state_100mhz", "state_200mhz", "sleep";
+ pinctrl-0 = <&pinctrl_usdhc1>;
+ pinctrl-1 = <&pinctrl_usdhc1_100mhz>;
+ pinctrl-2 = <&pinctrl_usdhc1_200mhz>;
+ pinctrl-3 = <&pinctrl_usdhc1>;
+ bus-width = <8>;
+ non-removable;
+ no-sdio;
+ no-sd;
+ status = "okay";
+};
+
+&usdhc2 {
+ pinctrl-names = "default", "state_100mhz", "state_200mhz", "sleep";
+ pinctrl-0 = <&pinctrl_usdhc2>, <&pinctrl_usdhc2_gpio>;
+ pinctrl-1 = <&pinctrl_usdhc2_100mhz>, <&pinctrl_usdhc2_gpio>;
+ pinctrl-2 = <&pinctrl_usdhc2_200mhz>, <&pinctrl_usdhc2_gpio>;
+ pinctrl-3 = <&pinctrl_usdhc2>, <&pinctrl_usdhc2_gpio>;
+ cd-gpios = <&gpio3 0 GPIO_ACTIVE_LOW>;
+ vmmc-supply = <&reg_usdhc2_vmmc>;
+ bus-width = <4>;
+ status = "okay";
+};
+
+&wdog3 {
+ fsl,ext-reset-output;
+ status = "okay";
+};
+
+&scmi_iomuxc {
+ pinctrl_i2c7_pcal6524: i2c7pcal6524grp {
+ fsl,pins = <
+ IMX95_PAD_GPIO_IO36__GPIO5_IO_BIT16 0x31e
+ >;
+ };
+
+ pinctrl_lpi2c7: lpi2c7grp {
+ fsl,pins = <
+ IMX95_PAD_GPIO_IO08__LPI2C7_SDA 0x40000b9e
+ IMX95_PAD_GPIO_IO09__LPI2C7_SCL 0x40000b9e
+ >;
+ };
+
+ pinctrl_pcie0: pcie0grp {
+ fsl,pins = <
+ IMX95_PAD_GPIO_IO32__HSIOMIX_TOP_PCIE1_CLKREQ_B 0x4000031e
+ >;
+ };
+
+ pinctrl_pcie1: pcie1grp {
+ fsl,pins = <
+ IMX95_PAD_GPIO_IO35__HSIOMIX_TOP_PCIE2_CLKREQ_B 0x4000031e
+ >;
+ };
+
+ pinctrl_uart1: uart1grp {
+ fsl,pins = <
+ IMX95_PAD_UART1_RXD__AONMIX_TOP_LPUART1_RX 0x31e
+ IMX95_PAD_UART1_TXD__AONMIX_TOP_LPUART1_TX 0x31e
+ >;
+ };
+
+ pinctrl_usdhc1: usdhc1grp {
+ fsl,pins = <
+ IMX95_PAD_SD1_CLK__USDHC1_CLK 0x158e
+ IMX95_PAD_SD1_CMD__USDHC1_CMD 0x138e
+ IMX95_PAD_SD1_DATA0__USDHC1_DATA0 0x138e
+ IMX95_PAD_SD1_DATA1__USDHC1_DATA1 0x138e
+ IMX95_PAD_SD1_DATA2__USDHC1_DATA2 0x138e
+ IMX95_PAD_SD1_DATA3__USDHC1_DATA3 0x138e
+ IMX95_PAD_SD1_DATA4__USDHC1_DATA4 0x138e
+ IMX95_PAD_SD1_DATA5__USDHC1_DATA5 0x138e
+ IMX95_PAD_SD1_DATA6__USDHC1_DATA6 0x138e
+ IMX95_PAD_SD1_DATA7__USDHC1_DATA7 0x138e
+ IMX95_PAD_SD1_STROBE__USDHC1_STROBE 0x158e
+ >;
+ };
+
+ pinctrl_usdhc1_100mhz: usdhc1-100mhzgrp {
+ fsl,pins = <
+ IMX95_PAD_SD1_CLK__USDHC1_CLK 0x158e
+ IMX95_PAD_SD1_CMD__USDHC1_CMD 0x138e
+ IMX95_PAD_SD1_DATA0__USDHC1_DATA0 0x138e
+ IMX95_PAD_SD1_DATA1__USDHC1_DATA1 0x138e
+ IMX95_PAD_SD1_DATA2__USDHC1_DATA2 0x138e
+ IMX95_PAD_SD1_DATA3__USDHC1_DATA3 0x138e
+ IMX95_PAD_SD1_DATA4__USDHC1_DATA4 0x138e
+ IMX95_PAD_SD1_DATA5__USDHC1_DATA5 0x138e
+ IMX95_PAD_SD1_DATA6__USDHC1_DATA6 0x138e
+ IMX95_PAD_SD1_DATA7__USDHC1_DATA7 0x138e
+ IMX95_PAD_SD1_STROBE__USDHC1_STROBE 0x158e
+ >;
+ };
+
+ pinctrl_usdhc1_200mhz: usdhc1-200mhzgrp {
+ fsl,pins = <
+ IMX95_PAD_SD1_CLK__USDHC1_CLK 0x15fe
+ IMX95_PAD_SD1_CMD__USDHC1_CMD 0x13fe
+ IMX95_PAD_SD1_DATA0__USDHC1_DATA0 0x13fe
+ IMX95_PAD_SD1_DATA1__USDHC1_DATA1 0x13fe
+ IMX95_PAD_SD1_DATA2__USDHC1_DATA2 0x13fe
+ IMX95_PAD_SD1_DATA3__USDHC1_DATA3 0x13fe
+ IMX95_PAD_SD1_DATA4__USDHC1_DATA4 0x13fe
+ IMX95_PAD_SD1_DATA5__USDHC1_DATA5 0x13fe
+ IMX95_PAD_SD1_DATA6__USDHC1_DATA6 0x13fe
+ IMX95_PAD_SD1_DATA7__USDHC1_DATA7 0x13fe
+ IMX95_PAD_SD1_STROBE__USDHC1_STROBE 0x15fe
+ >;
+ };
+
+ pinctrl_reg_usdhc2_vmmc: regusdhc2vmmcgrp {
+ fsl,pins = <
+ IMX95_PAD_SD2_RESET_B__GPIO3_IO_BIT7 0x31e
+ >;
+ };
+
+ pinctrl_usdhc2_gpio: usdhc2gpiogrp {
+ fsl,pins = <
+ IMX95_PAD_SD2_CD_B__GPIO3_IO_BIT0 0x31e
+ >;
+ };
+
+ pinctrl_usdhc2: usdhc2grp {
+ fsl,pins = <
+ IMX95_PAD_SD2_CLK__USDHC2_CLK 0x158e
+ IMX95_PAD_SD2_CMD__USDHC2_CMD 0x138e
+ IMX95_PAD_SD2_DATA0__USDHC2_DATA0 0x138e
+ IMX95_PAD_SD2_DATA1__USDHC2_DATA1 0x138e
+ IMX95_PAD_SD2_DATA2__USDHC2_DATA2 0x138e
+ IMX95_PAD_SD2_DATA3__USDHC2_DATA3 0x138e
+ IMX95_PAD_SD2_VSELECT__USDHC2_VSELECT 0x51e
+ >;
+ };
+
+ pinctrl_usdhc2_100mhz: usdhc2-100mhzgrp {
+ fsl,pins = <
+ IMX95_PAD_SD2_CLK__USDHC2_CLK 0x158e
+ IMX95_PAD_SD2_CMD__USDHC2_CMD 0x138e
+ IMX95_PAD_SD2_DATA0__USDHC2_DATA0 0x138e
+ IMX95_PAD_SD2_DATA1__USDHC2_DATA1 0x138e
+ IMX95_PAD_SD2_DATA2__USDHC2_DATA2 0x138e
+ IMX95_PAD_SD2_DATA3__USDHC2_DATA3 0x138e
+ IMX95_PAD_SD2_VSELECT__USDHC2_VSELECT 0x51e
+ >;
+ };
+
+ pinctrl_usdhc2_200mhz: usdhc2-200mhzgrp {
+ fsl,pins = <
+ IMX95_PAD_SD2_CLK__USDHC2_CLK 0x15fe
+ IMX95_PAD_SD2_CMD__USDHC2_CMD 0x13fe
+ IMX95_PAD_SD2_DATA0__USDHC2_DATA0 0x13fe
+ IMX95_PAD_SD2_DATA1__USDHC2_DATA1 0x13fe
+ IMX95_PAD_SD2_DATA2__USDHC2_DATA2 0x13fe
+ IMX95_PAD_SD2_DATA3__USDHC2_DATA3 0x13fe
+ IMX95_PAD_SD2_VSELECT__USDHC2_VSELECT 0x51e
+ >;
+ };
+};
diff --git a/arch/arm64/boot/dts/freescale/imx95-clock.h b/arch/arm64/boot/dts/freescale/imx95-clock.h
new file mode 100644
index 000000000000..e1f91203e794
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx95-clock.h
@@ -0,0 +1,187 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/*
+ * Copyright 2024 NXP
+ */
+
+#ifndef __CLOCK_IMX95_H
+#define __CLOCK_IMX95_H
+
+/* The index should match i.MX95 SCMI Firmware */
+#define IMX95_CLK_32K 1
+#define IMX95_CLK_24M 2
+#define IMX95_CLK_FRO 3
+#define IMX95_CLK_SYSPLL1_VCO 4
+#define IMX95_CLK_SYSPLL1_PFD0_UNGATED 5
+#define IMX95_CLK_SYSPLL1_PFD0 6
+#define IMX95_CLK_SYSPLL1_PFD0_DIV2 7
+#define IMX95_CLK_SYSPLL1_PFD1_UNGATED 8
+#define IMX95_CLK_SYSPLL1_PFD1 9
+#define IMX95_CLK_SYSPLL1_PFD1_DIV2 10
+#define IMX95_CLK_SYSPLL1_PFD2_UNGATED 11
+#define IMX95_CLK_SYSPLL1_PFD2 12
+#define IMX95_CLK_SYSPLL1_PFD2_DIV2 13
+#define IMX95_CLK_AUDIOPLL1_VCO 14
+#define IMX95_CLK_AUDIOPLL1 15
+#define IMX95_CLK_AUDIOPLL2_VCO 16
+#define IMX95_CLK_AUDIOPLL2 17
+#define IMX95_CLK_VIDEOPLL1_VCO 18
+#define IMX95_CLK_VIDEOPLL1 19
+#define IMX95_CLK_RESERVED20 20
+#define IMX95_CLK_RESERVED21 21
+#define IMX95_CLK_RESERVED22 22
+#define IMX95_CLK_RESERVED23 23
+#define IMX95_CLK_ARMPLL_VCO 24
+#define IMX95_CLK_ARMPLL_PFD0_UNGATED 25
+#define IMX95_CLK_ARMPLL_PFD0 26
+#define IMX95_CLK_ARMPLL_PFD1_UNGATED 27
+#define IMX95_CLK_ARMPLL_PFD1 28
+#define IMX95_CLK_ARMPLL_PFD2_UNGATED 29
+#define IMX95_CLK_ARMPLL_PFD2 30
+#define IMX95_CLK_ARMPLL_PFD3_UNGATED 31
+#define IMX95_CLK_ARMPLL_PFD3 32
+#define IMX95_CLK_DRAMPLL_VCO 33
+#define IMX95_CLK_DRAMPLL 34
+#define IMX95_CLK_HSIOPLL_VCO 35
+#define IMX95_CLK_HSIOPLL 36
+#define IMX95_CLK_LDBPLL_VCO 37
+#define IMX95_CLK_LDBPLL 38
+#define IMX95_CLK_EXT1 39
+#define IMX95_CLK_EXT2 40
+
+#define IMX95_CCM_NUM_CLK_SRC 41
+
+#define IMX95_CLK_ADC (IMX95_CCM_NUM_CLK_SRC + 0)
+#define IMX95_CLK_TMU (IMX95_CCM_NUM_CLK_SRC + 1)
+#define IMX95_CLK_BUSAON (IMX95_CCM_NUM_CLK_SRC + 2)
+#define IMX95_CLK_CAN1 (IMX95_CCM_NUM_CLK_SRC + 3)
+#define IMX95_CLK_I3C1 (IMX95_CCM_NUM_CLK_SRC + 4)
+#define IMX95_CLK_I3C1SLOW (IMX95_CCM_NUM_CLK_SRC + 5)
+#define IMX95_CLK_LPI2C1 (IMX95_CCM_NUM_CLK_SRC + 6)
+#define IMX95_CLK_LPI2C2 (IMX95_CCM_NUM_CLK_SRC + 7)
+#define IMX95_CLK_LPSPI1 (IMX95_CCM_NUM_CLK_SRC + 8)
+#define IMX95_CLK_LPSPI2 (IMX95_CCM_NUM_CLK_SRC + 9)
+#define IMX95_CLK_LPTMR1 (IMX95_CCM_NUM_CLK_SRC + 10)
+#define IMX95_CLK_LPUART1 (IMX95_CCM_NUM_CLK_SRC + 11)
+#define IMX95_CLK_LPUART2 (IMX95_CCM_NUM_CLK_SRC + 12)
+#define IMX95_CLK_M33 (IMX95_CCM_NUM_CLK_SRC + 13)
+#define IMX95_CLK_M33SYSTICK (IMX95_CCM_NUM_CLK_SRC + 14)
+#define IMX95_CLK_MQS1 (IMX95_CCM_NUM_CLK_SRC + 15)
+#define IMX95_CLK_PDM (IMX95_CCM_NUM_CLK_SRC + 16)
+#define IMX95_CLK_SAI1 (IMX95_CCM_NUM_CLK_SRC + 17)
+#define IMX95_CLK_SENTINEL (IMX95_CCM_NUM_CLK_SRC + 18)
+#define IMX95_CLK_TPM2 (IMX95_CCM_NUM_CLK_SRC + 19)
+#define IMX95_CLK_TSTMR1 (IMX95_CCM_NUM_CLK_SRC + 20)
+#define IMX95_CLK_CAMAPB (IMX95_CCM_NUM_CLK_SRC + 21)
+#define IMX95_CLK_CAMAXI (IMX95_CCM_NUM_CLK_SRC + 22)
+#define IMX95_CLK_CAMCM0 (IMX95_CCM_NUM_CLK_SRC + 23)
+#define IMX95_CLK_CAMISI (IMX95_CCM_NUM_CLK_SRC + 24)
+#define IMX95_CLK_MIPIPHYCFG (IMX95_CCM_NUM_CLK_SRC + 25)
+#define IMX95_CLK_MIPIPHYPLLBYPASS (IMX95_CCM_NUM_CLK_SRC + 26)
+#define IMX95_CLK_MIPIPHYPLLREF (IMX95_CCM_NUM_CLK_SRC + 27)
+#define IMX95_CLK_MIPITESTBYTE (IMX95_CCM_NUM_CLK_SRC + 28)
+#define IMX95_CLK_A55 (IMX95_CCM_NUM_CLK_SRC + 29)
+#define IMX95_CLK_A55MTRBUS (IMX95_CCM_NUM_CLK_SRC + 30)
+#define IMX95_CLK_A55PERIPH (IMX95_CCM_NUM_CLK_SRC + 31)
+#define IMX95_CLK_DRAMALT (IMX95_CCM_NUM_CLK_SRC + 32)
+#define IMX95_CLK_DRAMAPB (IMX95_CCM_NUM_CLK_SRC + 33)
+#define IMX95_CLK_DISPAPB (IMX95_CCM_NUM_CLK_SRC + 34)
+#define IMX95_CLK_DISPAXI (IMX95_CCM_NUM_CLK_SRC + 35)
+#define IMX95_CLK_DISPDP (IMX95_CCM_NUM_CLK_SRC + 36)
+#define IMX95_CLK_DISPOCRAM (IMX95_CCM_NUM_CLK_SRC + 37)
+#define IMX95_CLK_DISPUSB31 (IMX95_CCM_NUM_CLK_SRC + 38)
+#define IMX95_CLK_DISP1PIX (IMX95_CCM_NUM_CLK_SRC + 39)
+#define IMX95_CLK_DISP2PIX (IMX95_CCM_NUM_CLK_SRC + 40)
+#define IMX95_CLK_DISP3PIX (IMX95_CCM_NUM_CLK_SRC + 41)
+#define IMX95_CLK_GPUAPB (IMX95_CCM_NUM_CLK_SRC + 42)
+#define IMX95_CLK_GPU (IMX95_CCM_NUM_CLK_SRC + 43)
+#define IMX95_CLK_HSIOACSCAN480M (IMX95_CCM_NUM_CLK_SRC + 44)
+#define IMX95_CLK_HSIOACSCAN80M (IMX95_CCM_NUM_CLK_SRC + 45)
+#define IMX95_CLK_HSIO (IMX95_CCM_NUM_CLK_SRC + 46)
+#define IMX95_CLK_HSIOPCIEAUX (IMX95_CCM_NUM_CLK_SRC + 47)
+#define IMX95_CLK_HSIOPCIETEST160M (IMX95_CCM_NUM_CLK_SRC + 48)
+#define IMX95_CLK_HSIOPCIETEST400M (IMX95_CCM_NUM_CLK_SRC + 49)
+#define IMX95_CLK_HSIOPCIETEST500M (IMX95_CCM_NUM_CLK_SRC + 50)
+#define IMX95_CLK_HSIOUSBTEST50M (IMX95_CCM_NUM_CLK_SRC + 51)
+#define IMX95_CLK_HSIOUSBTEST60M (IMX95_CCM_NUM_CLK_SRC + 52)
+#define IMX95_CLK_BUSM7 (IMX95_CCM_NUM_CLK_SRC + 53)
+#define IMX95_CLK_M7 (IMX95_CCM_NUM_CLK_SRC + 54)
+#define IMX95_CLK_M7SYSTICK (IMX95_CCM_NUM_CLK_SRC + 55)
+#define IMX95_CLK_BUSNETCMIX (IMX95_CCM_NUM_CLK_SRC + 56)
+#define IMX95_CLK_ENET (IMX95_CCM_NUM_CLK_SRC + 57)
+#define IMX95_CLK_ENETPHYTEST200M (IMX95_CCM_NUM_CLK_SRC + 58)
+#define IMX95_CLK_ENETPHYTEST500M (IMX95_CCM_NUM_CLK_SRC + 59)
+#define IMX95_CLK_ENETPHYTEST667M (IMX95_CCM_NUM_CLK_SRC + 60)
+#define IMX95_CLK_ENETREF (IMX95_CCM_NUM_CLK_SRC + 61)
+#define IMX95_CLK_ENETTIMER1 (IMX95_CCM_NUM_CLK_SRC + 62)
+#define IMX95_CLK_MQS2 (IMX95_CCM_NUM_CLK_SRC + 63)
+#define IMX95_CLK_SAI2 (IMX95_CCM_NUM_CLK_SRC + 64)
+#define IMX95_CLK_NOCAPB (IMX95_CCM_NUM_CLK_SRC + 65)
+#define IMX95_CLK_NOC (IMX95_CCM_NUM_CLK_SRC + 66)
+#define IMX95_CLK_NPUAPB (IMX95_CCM_NUM_CLK_SRC + 67)
+#define IMX95_CLK_NPU (IMX95_CCM_NUM_CLK_SRC + 68)
+#define IMX95_CLK_CCMCKO1 (IMX95_CCM_NUM_CLK_SRC + 69)
+#define IMX95_CLK_CCMCKO2 (IMX95_CCM_NUM_CLK_SRC + 70)
+#define IMX95_CLK_CCMCKO3 (IMX95_CCM_NUM_CLK_SRC + 71)
+#define IMX95_CLK_CCMCKO4 (IMX95_CCM_NUM_CLK_SRC + 72)
+#define IMX95_CLK_VPUAPB (IMX95_CCM_NUM_CLK_SRC + 73)
+#define IMX95_CLK_VPU (IMX95_CCM_NUM_CLK_SRC + 74)
+#define IMX95_CLK_VPUDSP (IMX95_CCM_NUM_CLK_SRC + 75)
+#define IMX95_CLK_VPUJPEG (IMX95_CCM_NUM_CLK_SRC + 76)
+#define IMX95_CLK_AUDIOXCVR (IMX95_CCM_NUM_CLK_SRC + 77)
+#define IMX95_CLK_BUSWAKEUP (IMX95_CCM_NUM_CLK_SRC + 78)
+#define IMX95_CLK_CAN2 (IMX95_CCM_NUM_CLK_SRC + 79)
+#define IMX95_CLK_CAN3 (IMX95_CCM_NUM_CLK_SRC + 80)
+#define IMX95_CLK_CAN4 (IMX95_CCM_NUM_CLK_SRC + 81)
+#define IMX95_CLK_CAN5 (IMX95_CCM_NUM_CLK_SRC + 82)
+#define IMX95_CLK_FLEXIO1 (IMX95_CCM_NUM_CLK_SRC + 83)
+#define IMX95_CLK_FLEXIO2 (IMX95_CCM_NUM_CLK_SRC + 84)
+#define IMX95_CLK_FLEXSPI1 (IMX95_CCM_NUM_CLK_SRC + 85)
+#define IMX95_CLK_I3C2 (IMX95_CCM_NUM_CLK_SRC + 86)
+#define IMX95_CLK_I3C2SLOW (IMX95_CCM_NUM_CLK_SRC + 87)
+#define IMX95_CLK_LPI2C3 (IMX95_CCM_NUM_CLK_SRC + 88)
+#define IMX95_CLK_LPI2C4 (IMX95_CCM_NUM_CLK_SRC + 89)
+#define IMX95_CLK_LPI2C5 (IMX95_CCM_NUM_CLK_SRC + 90)
+#define IMX95_CLK_LPI2C6 (IMX95_CCM_NUM_CLK_SRC + 91)
+#define IMX95_CLK_LPI2C7 (IMX95_CCM_NUM_CLK_SRC + 92)
+#define IMX95_CLK_LPI2C8 (IMX95_CCM_NUM_CLK_SRC + 93)
+#define IMX95_CLK_LPSPI3 (IMX95_CCM_NUM_CLK_SRC + 94)
+#define IMX95_CLK_LPSPI4 (IMX95_CCM_NUM_CLK_SRC + 95)
+#define IMX95_CLK_LPSPI5 (IMX95_CCM_NUM_CLK_SRC + 96)
+#define IMX95_CLK_LPSPI6 (IMX95_CCM_NUM_CLK_SRC + 97)
+#define IMX95_CLK_LPSPI7 (IMX95_CCM_NUM_CLK_SRC + 98)
+#define IMX95_CLK_LPSPI8 (IMX95_CCM_NUM_CLK_SRC + 99)
+#define IMX95_CLK_LPTMR2 (IMX95_CCM_NUM_CLK_SRC + 100)
+#define IMX95_CLK_LPUART3 (IMX95_CCM_NUM_CLK_SRC + 101)
+#define IMX95_CLK_LPUART4 (IMX95_CCM_NUM_CLK_SRC + 102)
+#define IMX95_CLK_LPUART5 (IMX95_CCM_NUM_CLK_SRC + 103)
+#define IMX95_CLK_LPUART6 (IMX95_CCM_NUM_CLK_SRC + 104)
+#define IMX95_CLK_LPUART7 (IMX95_CCM_NUM_CLK_SRC + 105)
+#define IMX95_CLK_LPUART8 (IMX95_CCM_NUM_CLK_SRC + 106)
+#define IMX95_CLK_SAI3 (IMX95_CCM_NUM_CLK_SRC + 107)
+#define IMX95_CLK_SAI4 (IMX95_CCM_NUM_CLK_SRC + 108)
+#define IMX95_CLK_SAI5 (IMX95_CCM_NUM_CLK_SRC + 109)
+#define IMX95_CLK_SPDIF (IMX95_CCM_NUM_CLK_SRC + 110)
+#define IMX95_CLK_SWOTRACE (IMX95_CCM_NUM_CLK_SRC + 111)
+#define IMX95_CLK_TPM4 (IMX95_CCM_NUM_CLK_SRC + 112)
+#define IMX95_CLK_TPM5 (IMX95_CCM_NUM_CLK_SRC + 113)
+#define IMX95_CLK_TPM6 (IMX95_CCM_NUM_CLK_SRC + 114)
+#define IMX95_CLK_TSTMR2 (IMX95_CCM_NUM_CLK_SRC + 115)
+#define IMX95_CLK_USBPHYBURUNIN (IMX95_CCM_NUM_CLK_SRC + 116)
+#define IMX95_CLK_USDHC1 (IMX95_CCM_NUM_CLK_SRC + 117)
+#define IMX95_CLK_USDHC2 (IMX95_CCM_NUM_CLK_SRC + 118)
+#define IMX95_CLK_USDHC3 (IMX95_CCM_NUM_CLK_SRC + 119)
+#define IMX95_CLK_V2XPK (IMX95_CCM_NUM_CLK_SRC + 120)
+#define IMX95_CLK_WAKEUPAXI (IMX95_CCM_NUM_CLK_SRC + 121)
+#define IMX95_CLK_XSPISLVROOT (IMX95_CCM_NUM_CLK_SRC + 122)
+#define IMX95_CLK_SEL_EXT (IMX95_CCM_NUM_CLK_SRC + 123 + 0)
+#define IMX95_CLK_SEL_A55C0 (IMX95_CCM_NUM_CLK_SRC + 123 + 1)
+#define IMX95_CLK_SEL_A55C1 (IMX95_CCM_NUM_CLK_SRC + 123 + 2)
+#define IMX95_CLK_SEL_A55C2 (IMX95_CCM_NUM_CLK_SRC + 123 + 3)
+#define IMX95_CLK_SEL_A55C3 (IMX95_CCM_NUM_CLK_SRC + 123 + 4)
+#define IMX95_CLK_SEL_A55C4 (IMX95_CCM_NUM_CLK_SRC + 123 + 5)
+#define IMX95_CLK_SEL_A55C5 (IMX95_CCM_NUM_CLK_SRC + 123 + 6)
+#define IMX95_CLK_SEL_A55P (IMX95_CCM_NUM_CLK_SRC + 123 + 7)
+#define IMX95_CLK_SEL_DRAM (IMX95_CCM_NUM_CLK_SRC + 123 + 8)
+#define IMX95_CLK_SEL_TEMPSENSE (IMX95_CCM_NUM_CLK_SRC + 123 + 9)
+
+#endif /* __CLOCK_IMX95_H */
diff --git a/arch/arm64/boot/dts/freescale/imx95-pinfunc.h b/arch/arm64/boot/dts/freescale/imx95-pinfunc.h
new file mode 100644
index 000000000000..9f614eea7c86
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx95-pinfunc.h
@@ -0,0 +1,865 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */
+/*
+ * Copyright 2024 NXP
+ */
+
+#ifndef __DTS_IMX95_PINFUNC_H
+#define __DTS_IMX95_PINFUNC_H
+
+/*
+ * The pin function ID is a tuple of
+ * <mux_reg conf_reg input_reg mux_mode input_val>
+ */
+#define IMX95_PAD_DAP_TDI__JTAG_MUX_TDI 0x0000 0x0204 0x0610 0x00 0x00
+#define IMX95_PAD_DAP_TDI__NETCMIX_TOP_MQS2_LEFT 0x0000 0x0204 0x0000 0x01 0x00
+#define IMX95_PAD_DAP_TDI__NETCMIX_TOP_NETC_TMR_1588_ALARM1 0x0000 0x0204 0x0000 0x02 0x00
+#define IMX95_PAD_DAP_TDI__CAN2_TX 0x0000 0x0204 0x0000 0x03 0x00
+#define IMX95_PAD_DAP_TDI__FLEXIO2_FLEXIO_BIT30 0x0000 0x0204 0x0000 0x04 0x00
+#define IMX95_PAD_DAP_TDI__GPIO3_IO_BIT28 0x0000 0x0204 0x0000 0x05 0x00
+#define IMX95_PAD_DAP_TDI__LPUART5_RX 0x0000 0x0204 0x0570 0x06 0x00
+
+#define IMX95_PAD_DAP_TMS_SWDIO__JTAG_MUX_TMS 0x0004 0x0208 0x0614 0x00 0x00
+#define IMX95_PAD_DAP_TMS_SWDIO__CAN4_TX 0x0004 0x0208 0x0000 0x02 0x00
+#define IMX95_PAD_DAP_TMS_SWDIO__FLEXIO2_FLEXIO_BIT31 0x0004 0x0208 0x0000 0x04 0x00
+#define IMX95_PAD_DAP_TMS_SWDIO__GPIO3_IO_BIT29 0x0004 0x0208 0x0000 0x05 0x00
+#define IMX95_PAD_DAP_TMS_SWDIO__LPUART5_RTS_B 0x0004 0x0208 0x0000 0x06 0x00
+
+#define IMX95_PAD_DAP_TCLK_SWCLK__JTAG_MUX_TCK 0x0008 0x020C 0x060C 0x00 0x00
+#define IMX95_PAD_DAP_TCLK_SWCLK__CAN4_RX 0x0008 0x020C 0x044C 0x02 0x00
+#define IMX95_PAD_DAP_TCLK_SWCLK__FLEXIO1_FLEXIO_BIT30 0x0008 0x020C 0x0460 0x04 0x00
+#define IMX95_PAD_DAP_TCLK_SWCLK__GPIO3_IO_BIT30 0x0008 0x020C 0x0000 0x05 0x00
+#define IMX95_PAD_DAP_TCLK_SWCLK__LPUART5_CTS_B 0x0008 0x020C 0x056C 0x06 0x00
+
+#define IMX95_PAD_DAP_TDO_TRACESWO__JTAG_MUX_TDO 0x000C 0x0210 0x0000 0x00 0x00
+#define IMX95_PAD_DAP_TDO_TRACESWO__NETCMIX_TOP_MQS2_RIGHT 0x000C 0x0210 0x0000 0x01 0x00
+#define IMX95_PAD_DAP_TDO_TRACESWO__NETCMIX_TOP_NETC_TMR_1588_ALARM 0x000C 0x0210 0x0000 0x02 0x00
+#define IMX95_PAD_DAP_TDO_TRACESWO__CAN2_RX 0x000C 0x0210 0x0444 0x03 0x00
+#define IMX95_PAD_DAP_TDO_TRACESWO__FLEXIO1_FLEXIO_BIT31 0x000C 0x0210 0x0464 0x04 0x00
+#define IMX95_PAD_DAP_TDO_TRACESWO__GPIO3_IO_BIT31 0x000C 0x0210 0x0000 0x05 0x00
+#define IMX95_PAD_DAP_TDO_TRACESWO__LPUART5_TX 0x000C 0x0210 0x0574 0x06 0x00
+
+#define IMX95_PAD_GPIO_IO00__GPIO2_IO_BIT0 0x0010 0x0214 0x0000 0x00 0x00
+#define IMX95_PAD_GPIO_IO00__LPI2C3_SDA 0x0010 0x0214 0x0504 0x11 0x00
+#define IMX95_PAD_GPIO_IO00__LPSPI6_PCS0 0x0010 0x0214 0x0000 0x04 0x00
+#define IMX95_PAD_GPIO_IO00__LPUART5_TX 0x0010 0x0214 0x0574 0x05 0x01
+#define IMX95_PAD_GPIO_IO00__LPI2C5_SDA 0x0010 0x0214 0x0514 0x16 0x00
+#define IMX95_PAD_GPIO_IO00__FLEXIO1_FLEXIO_BIT0 0x0010 0x0214 0x0468 0x07 0x00
+
+#define IMX95_PAD_GPIO_IO01__GPIO2_IO_BIT1 0x0014 0x0218 0x0000 0x00 0x00
+#define IMX95_PAD_GPIO_IO01__LPI2C3_SCL 0x0014 0x0218 0x0500 0x11 0x00
+#define IMX95_PAD_GPIO_IO01__LPSPI6_SIN 0x0014 0x0218 0x0000 0x04 0x00
+#define IMX95_PAD_GPIO_IO01__LPUART5_RX 0x0014 0x0218 0x0570 0x05 0x01
+#define IMX95_PAD_GPIO_IO01__LPI2C5_SCL 0x0014 0x0218 0x0510 0x16 0x00
+#define IMX95_PAD_GPIO_IO01__FLEXIO1_FLEXIO_BIT1 0x0014 0x0218 0x046C 0x07 0x00
+
+#define IMX95_PAD_GPIO_IO02__GPIO2_IO_BIT2 0x0018 0x021C 0x0000 0x00 0x00
+#define IMX95_PAD_GPIO_IO02__LPI2C4_SDA 0x0018 0x021C 0x050C 0x11 0x00
+#define IMX95_PAD_GPIO_IO02__LPSPI6_SOUT 0x0018 0x021C 0x0000 0x04 0x00
+#define IMX95_PAD_GPIO_IO02__LPUART5_CTS_B 0x0018 0x021C 0x056C 0x05 0x01
+#define IMX95_PAD_GPIO_IO02__LPI2C6_SDA 0x0018 0x021C 0x051C 0x16 0x00
+#define IMX95_PAD_GPIO_IO02__FLEXIO1_FLEXIO_BIT2 0x0018 0x021C 0x0470 0x07 0x00
+
+#define IMX95_PAD_GPIO_IO03__GPIO2_IO_BIT3 0x001C 0x0220 0x0000 0x00 0x00
+#define IMX95_PAD_GPIO_IO03__LPI2C4_SCL 0x001C 0x0220 0x0508 0x11 0x00
+#define IMX95_PAD_GPIO_IO03__LPSPI6_SCK 0x001C 0x0220 0x0000 0x04 0x00
+#define IMX95_PAD_GPIO_IO03__LPUART5_RTS_B 0x001C 0x0220 0x0000 0x05 0x00
+#define IMX95_PAD_GPIO_IO03__LPI2C6_SCL 0x001C 0x0220 0x0518 0x16 0x00
+#define IMX95_PAD_GPIO_IO03__FLEXIO1_FLEXIO_BIT3 0x001C 0x0220 0x0474 0x07 0x00
+
+#define IMX95_PAD_GPIO_IO04__GPIO2_IO_BIT4 0x0020 0x0224 0x0000 0x00 0x00
+#define IMX95_PAD_GPIO_IO04__TPM3_CH0 0x0020 0x0224 0x0000 0x01 0x00
+#define IMX95_PAD_GPIO_IO04__AONMIX_TOP_PDM_CLK 0x0020 0x0224 0x0000 0x02 0x00
+#define IMX95_PAD_GPIO_IO04__CAN4_TX 0x0020 0x0224 0x0000 0x03 0x00
+#define IMX95_PAD_GPIO_IO04__LPSPI7_PCS0 0x0020 0x0224 0x0000 0x04 0x00
+#define IMX95_PAD_GPIO_IO04__LPUART6_TX 0x0020 0x0224 0x0580 0x05 0x01
+#define IMX95_PAD_GPIO_IO04__LPI2C6_SDA 0x0020 0x0224 0x051C 0x16 0x01
+#define IMX95_PAD_GPIO_IO04__FLEXIO1_FLEXIO_BIT4 0x0020 0x0224 0x0478 0x07 0x00
+
+#define IMX95_PAD_GPIO_IO05__GPIO2_IO_BIT5 0x0024 0x0228 0x0000 0x00 0x00
+#define IMX95_PAD_GPIO_IO05__TPM4_CH0 0x0024 0x0228 0x0000 0x01 0x00
+#define IMX95_PAD_GPIO_IO05__AONMIX_TOP_PDM_BIT_STREAM_BIT0 0x0024 0x0228 0x040C 0x02 0x01
+#define IMX95_PAD_GPIO_IO05__CAN4_RX 0x0024 0x0228 0x044C 0x03 0x01
+#define IMX95_PAD_GPIO_IO05__LPSPI7_SIN 0x0024 0x0228 0x0000 0x04 0x00
+#define IMX95_PAD_GPIO_IO05__LPUART6_RX 0x0024 0x0228 0x057C 0x05 0x01
+#define IMX95_PAD_GPIO_IO05__LPI2C6_SCL 0x0024 0x0228 0x0518 0x16 0x01
+#define IMX95_PAD_GPIO_IO05__FLEXIO1_FLEXIO_BIT5 0x0024 0x0228 0x047C 0x07 0x00
+
+#define IMX95_PAD_GPIO_IO06__GPIO2_IO_BIT6 0x0028 0x022C 0x0000 0x00 0x00
+#define IMX95_PAD_GPIO_IO06__TPM5_CH0 0x0028 0x022C 0x0000 0x01 0x00
+#define IMX95_PAD_GPIO_IO06__AONMIX_TOP_PDM_BIT_STREAM_BIT1 0x0028 0x022C 0x0410 0x02 0x01
+#define IMX95_PAD_GPIO_IO06__LPSPI7_SOUT 0x0028 0x022C 0x0000 0x04 0x00
+#define IMX95_PAD_GPIO_IO06__LPUART6_CTS_B 0x0028 0x022C 0x0578 0x05 0x01
+#define IMX95_PAD_GPIO_IO06__LPI2C7_SDA 0x0028 0x022C 0x0524 0x16 0x00
+#define IMX95_PAD_GPIO_IO06__FLEXIO1_FLEXIO_BIT6 0x0028 0x022C 0x0480 0x07 0x00
+
+#define IMX95_PAD_GPIO_IO07__GPIO2_IO_BIT7 0x002C 0x0230 0x0000 0x00 0x00
+#define IMX95_PAD_GPIO_IO07__LPSPI3_PCS1 0x002C 0x0230 0x0000 0x01 0x00
+#define IMX95_PAD_GPIO_IO07__LPSPI7_SCK 0x002C 0x0230 0x0000 0x04 0x00
+#define IMX95_PAD_GPIO_IO07__LPUART6_RTS_B 0x002C 0x0230 0x0000 0x05 0x00
+#define IMX95_PAD_GPIO_IO07__LPI2C7_SCL 0x002C 0x0230 0x0520 0x16 0x00
+#define IMX95_PAD_GPIO_IO07__FLEXIO1_FLEXIO_BIT7 0x002C 0x0230 0x0484 0x07 0x00
+
+#define IMX95_PAD_GPIO_IO08__GPIO2_IO_BIT8 0x0030 0x0234 0x0000 0x00 0x00
+#define IMX95_PAD_GPIO_IO08__LPSPI3_PCS0 0x0030 0x0234 0x0000 0x01 0x00
+#define IMX95_PAD_GPIO_IO08__TPM6_CH0 0x0030 0x0234 0x0000 0x04 0x00
+#define IMX95_PAD_GPIO_IO08__LPUART7_TX 0x0030 0x0234 0x0588 0x05 0x01
+#define IMX95_PAD_GPIO_IO08__LPI2C7_SDA 0x0030 0x0234 0x0524 0x16 0x01
+#define IMX95_PAD_GPIO_IO08__FLEXIO1_FLEXIO_BIT8 0x0030 0x0234 0x0488 0x07 0x00
+
+#define IMX95_PAD_GPIO_IO09__GPIO2_IO_BIT9 0x0034 0x0238 0x0000 0x00 0x00
+#define IMX95_PAD_GPIO_IO09__LPSPI3_SIN 0x0034 0x0238 0x0000 0x01 0x00
+#define IMX95_PAD_GPIO_IO09__TPM3_EXTCLK 0x0034 0x0238 0x0000 0x04 0x00
+#define IMX95_PAD_GPIO_IO09__LPUART7_RX 0x0034 0x0238 0x0584 0x05 0x01
+#define IMX95_PAD_GPIO_IO09__LPI2C7_SCL 0x0034 0x0238 0x0520 0x16 0x01
+#define IMX95_PAD_GPIO_IO09__FLEXIO1_FLEXIO_BIT9 0x0034 0x0238 0x048C 0x07 0x00
+
+#define IMX95_PAD_GPIO_IO10__GPIO2_IO_BIT10 0x0038 0x023C 0x0000 0x00 0x00
+#define IMX95_PAD_GPIO_IO10__LPSPI3_SOUT 0x0038 0x023C 0x0000 0x01 0x00
+#define IMX95_PAD_GPIO_IO10__TPM4_EXTCLK 0x0038 0x023C 0x0000 0x04 0x00
+#define IMX95_PAD_GPIO_IO10__LPUART7_CTS_B 0x0038 0x023C 0x0000 0x05 0x00
+#define IMX95_PAD_GPIO_IO10__LPI2C8_SDA 0x0038 0x023C 0x052C 0x16 0x00
+#define IMX95_PAD_GPIO_IO10__FLEXIO1_FLEXIO_BIT10 0x0038 0x023C 0x0490 0x07 0x00
+
+#define IMX95_PAD_GPIO_IO11__GPIO2_IO_BIT11 0x003C 0x0240 0x0000 0x00 0x00
+#define IMX95_PAD_GPIO_IO11__LPSPI3_SCK 0x003C 0x0240 0x0000 0x01 0x00
+#define IMX95_PAD_GPIO_IO11__TPM5_EXTCLK 0x003C 0x0240 0x0000 0x04 0x00
+#define IMX95_PAD_GPIO_IO11__LPUART7_RTS_B 0x003C 0x0240 0x0000 0x05 0x00
+#define IMX95_PAD_GPIO_IO11__LPI2C8_SCL 0x003C 0x0240 0x0528 0x16 0x00
+#define IMX95_PAD_GPIO_IO11__FLEXIO1_FLEXIO_BIT11 0x003C 0x0240 0x0494 0x07 0x00
+
+#define IMX95_PAD_GPIO_IO12__GPIO2_IO_BIT12 0x0040 0x0244 0x0000 0x00 0x00
+#define IMX95_PAD_GPIO_IO12__TPM3_CH2 0x0040 0x0244 0x0000 0x01 0x00
+#define IMX95_PAD_GPIO_IO12__AONMIX_TOP_PDM_BIT_STREAM_BIT2 0x0040 0x0244 0x0414 0x02 0x00
+#define IMX95_PAD_GPIO_IO12__FLEXIO1_FLEXIO_BIT12 0x0040 0x0244 0x0498 0x03 0x00
+#define IMX95_PAD_GPIO_IO12__LPSPI8_PCS0 0x0040 0x0244 0x0000 0x04 0x00
+#define IMX95_PAD_GPIO_IO12__LPUART8_TX 0x0040 0x0244 0x0000 0x05 0x00
+#define IMX95_PAD_GPIO_IO12__LPI2C8_SDA 0x0040 0x0244 0x052C 0x16 0x01
+#define IMX95_PAD_GPIO_IO12__SAI3_RX_SYNC 0x0040 0x0244 0x0590 0x07 0x00
+
+#define IMX95_PAD_GPIO_IO13__GPIO2_IO_BIT13 0x0044 0x0248 0x0000 0x00 0x00
+#define IMX95_PAD_GPIO_IO13__TPM4_CH2 0x0044 0x0248 0x0000 0x01 0x00
+#define IMX95_PAD_GPIO_IO13__AONMIX_TOP_PDM_BIT_STREAM_BIT3 0x0044 0x0248 0x0418 0x02 0x00
+#define IMX95_PAD_GPIO_IO13__LPSPI8_SIN 0x0044 0x0248 0x0000 0x04 0x00
+#define IMX95_PAD_GPIO_IO13__LPUART8_RX 0x0044 0x0248 0x0000 0x05 0x00
+#define IMX95_PAD_GPIO_IO13__LPI2C8_SCL 0x0044 0x0248 0x0528 0x16 0x01
+#define IMX95_PAD_GPIO_IO13__FLEXIO1_FLEXIO_BIT13 0x0044 0x0248 0x049C 0x07 0x00
+
+#define IMX95_PAD_GPIO_IO14__GPIO2_IO_BIT14 0x0048 0x024C 0x0000 0x00 0x00
+#define IMX95_PAD_GPIO_IO14__LPUART3_TX 0x0048 0x024C 0x055C 0x01 0x01
+#define IMX95_PAD_GPIO_IO14__LPSPI8_SOUT 0x0048 0x024C 0x0000 0x04 0x00
+#define IMX95_PAD_GPIO_IO14__LPUART8_CTS_B 0x0048 0x024C 0x0000 0x05 0x00
+#define IMX95_PAD_GPIO_IO14__LPUART4_TX 0x0048 0x024C 0x0568 0x06 0x01
+#define IMX95_PAD_GPIO_IO14__FLEXIO1_FLEXIO_BIT14 0x0048 0x024C 0x04A0 0x07 0x00
+
+#define IMX95_PAD_GPIO_IO15__GPIO2_IO_BIT15 0x004C 0x0250 0x0000 0x00 0x00
+#define IMX95_PAD_GPIO_IO15__LPUART3_RX 0x004C 0x0250 0x0558 0x01 0x01
+#define IMX95_PAD_GPIO_IO15__LPSPI8_SCK 0x004C 0x0250 0x0000 0x04 0x00
+#define IMX95_PAD_GPIO_IO15__LPUART8_RTS_B 0x004C 0x0250 0x0000 0x05 0x00
+#define IMX95_PAD_GPIO_IO15__LPUART4_RX 0x004C 0x0250 0x0564 0x06 0x01
+#define IMX95_PAD_GPIO_IO15__FLEXIO1_FLEXIO_BIT15 0x004C 0x0250 0x04A4 0x07 0x00
+
+#define IMX95_PAD_GPIO_IO16__GPIO2_IO_BIT16 0x0050 0x0254 0x0000 0x00 0x00
+#define IMX95_PAD_GPIO_IO16__SAI3_TX_BCLK 0x0050 0x0254 0x0000 0x01 0x00
+#define IMX95_PAD_GPIO_IO16__AONMIX_TOP_PDM_BIT_STREAM_BIT2 0x0050 0x0254 0x0414 0x02 0x01
+#define IMX95_PAD_GPIO_IO16__LPUART3_CTS_B 0x0050 0x0254 0x0554 0x04 0x01
+#define IMX95_PAD_GPIO_IO16__LPSPI4_PCS2 0x0050 0x0254 0x0538 0x05 0x01
+#define IMX95_PAD_GPIO_IO16__LPUART4_CTS_B 0x0050 0x0254 0x0560 0x06 0x01
+#define IMX95_PAD_GPIO_IO16__FLEXIO1_FLEXIO_BIT16 0x0050 0x0254 0x04A8 0x07 0x00
+
+#define IMX95_PAD_GPIO_IO17__GPIO2_IO_BIT17 0x0054 0x0258 0x0000 0x00 0x00
+#define IMX95_PAD_GPIO_IO17__SAI3_MCLK 0x0054 0x0258 0x0000 0x01 0x00
+#define IMX95_PAD_GPIO_IO17__LPUART3_RTS_B 0x0054 0x0258 0x0000 0x04 0x00
+#define IMX95_PAD_GPIO_IO17__LPSPI4_PCS1 0x0054 0x0258 0x0534 0x05 0x01
+#define IMX95_PAD_GPIO_IO17__LPUART4_RTS_B 0x0054 0x0258 0x0000 0x06 0x00
+#define IMX95_PAD_GPIO_IO17__FLEXIO1_FLEXIO_BIT17 0x0054 0x0258 0x04AC 0x07 0x00
+
+#define IMX95_PAD_GPIO_IO18__GPIO2_IO_BIT18 0x0058 0x025C 0x0000 0x00 0x00
+#define IMX95_PAD_GPIO_IO18__SAI3_RX_BCLK 0x0058 0x025C 0x058C 0x01 0x00
+#define IMX95_PAD_GPIO_IO18__LPSPI5_PCS0 0x0058 0x025C 0x0000 0x04 0x00
+#define IMX95_PAD_GPIO_IO18__LPSPI4_PCS0 0x0058 0x025C 0x0530 0x05 0x01
+#define IMX95_PAD_GPIO_IO18__TPM5_CH2 0x0058 0x025C 0x0000 0x06 0x00
+#define IMX95_PAD_GPIO_IO18__FLEXIO1_FLEXIO_BIT18 0x0058 0x025C 0x04B0 0x07 0x00
+
+#define IMX95_PAD_GPIO_IO19__GPIO2_IO_BIT19 0x005C 0x0260 0x0000 0x00 0x00
+#define IMX95_PAD_GPIO_IO19__SAI3_RX_SYNC 0x005C 0x0260 0x0590 0x01 0x01
+#define IMX95_PAD_GPIO_IO19__AONMIX_TOP_PDM_BIT_STREAM_BIT3 0x005C 0x0260 0x0418 0x02 0x01
+#define IMX95_PAD_GPIO_IO19__FLEXIO1_FLEXIO_BIT19 0x005C 0x0260 0x04B4 0x03 0x00
+#define IMX95_PAD_GPIO_IO19__LPSPI5_SIN 0x005C 0x0260 0x0000 0x04 0x00
+#define IMX95_PAD_GPIO_IO19__LPSPI4_SIN 0x005C 0x0260 0x0540 0x05 0x01
+#define IMX95_PAD_GPIO_IO19__TPM6_CH2 0x005C 0x0260 0x0000 0x06 0x00
+#define IMX95_PAD_GPIO_IO19__SAI3_TX_DATA_BIT0 0x005C 0x0260 0x0000 0x07 0x00
+
+#define IMX95_PAD_GPIO_IO20__GPIO2_IO_BIT20 0x0060 0x0264 0x0000 0x00 0x00
+#define IMX95_PAD_GPIO_IO20__SAI3_RX_DATA_BIT0 0x0060 0x0264 0x0000 0x01 0x00
+#define IMX95_PAD_GPIO_IO20__AONMIX_TOP_PDM_BIT_STREAM_BIT0 0x0060 0x0264 0x040C 0x02 0x02
+#define IMX95_PAD_GPIO_IO20__LPSPI5_SOUT 0x0060 0x0264 0x0000 0x04 0x00
+#define IMX95_PAD_GPIO_IO20__LPSPI4_SOUT 0x0060 0x0264 0x0544 0x05 0x01
+#define IMX95_PAD_GPIO_IO20__TPM3_CH1 0x0060 0x0264 0x0000 0x06 0x00
+#define IMX95_PAD_GPIO_IO20__FLEXIO1_FLEXIO_BIT20 0x0060 0x0264 0x04B8 0x07 0x00
+
+#define IMX95_PAD_GPIO_IO21__GPIO2_IO_BIT21 0x0064 0x0268 0x0000 0x00 0x00
+#define IMX95_PAD_GPIO_IO21__SAI3_TX_DATA_BIT0 0x0064 0x0268 0x0000 0x01 0x00
+#define IMX95_PAD_GPIO_IO21__AONMIX_TOP_PDM_CLK 0x0064 0x0268 0x0000 0x02 0x00
+#define IMX95_PAD_GPIO_IO21__FLEXIO1_FLEXIO_BIT21 0x0064 0x0268 0x04BC 0x03 0x00
+#define IMX95_PAD_GPIO_IO21__LPSPI5_SCK 0x0064 0x0268 0x0000 0x04 0x00
+#define IMX95_PAD_GPIO_IO21__LPSPI4_SCK 0x0064 0x0268 0x053C 0x05 0x01
+#define IMX95_PAD_GPIO_IO21__TPM4_CH1 0x0064 0x0268 0x0000 0x06 0x00
+#define IMX95_PAD_GPIO_IO21__SAI3_RX_BCLK 0x0064 0x0268 0x058C 0x07 0x01
+
+#define IMX95_PAD_GPIO_IO22__GPIO2_IO_BIT22 0x0068 0x026C 0x0000 0x00 0x00
+#define IMX95_PAD_GPIO_IO22__USDHC3_CLK 0x0068 0x026C 0x05C8 0x01 0x00
+#define IMX95_PAD_GPIO_IO22__SPDIF_IN 0x0068 0x026C 0x0454 0x02 0x02
+#define IMX95_PAD_GPIO_IO22__CAN5_TX 0x0068 0x026C 0x0000 0x03 0x00
+#define IMX95_PAD_GPIO_IO22__TPM5_CH1 0x0068 0x026C 0x0000 0x04 0x00
+#define IMX95_PAD_GPIO_IO22__TPM6_EXTCLK 0x0068 0x026C 0x0000 0x05 0x00
+#define IMX95_PAD_GPIO_IO22__LPI2C5_SDA 0x0068 0x026C 0x0514 0x16 0x01
+#define IMX95_PAD_GPIO_IO22__FLEXIO1_FLEXIO_BIT22 0x0068 0x026C 0x04C0 0x07 0x00
+
+#define IMX95_PAD_GPIO_IO23__GPIO2_IO_BIT23 0x006C 0x0270 0x0000 0x00 0x00
+#define IMX95_PAD_GPIO_IO23__USDHC3_CMD 0x006C 0x0270 0x05CC 0x01 0x00
+#define IMX95_PAD_GPIO_IO23__SPDIF_OUT 0x006C 0x0270 0x0000 0x02 0x00
+#define IMX95_PAD_GPIO_IO23__CAN5_RX 0x006C 0x0270 0x0450 0x03 0x00
+#define IMX95_PAD_GPIO_IO23__TPM6_CH1 0x006C 0x0270 0x0000 0x04 0x00
+#define IMX95_PAD_GPIO_IO23__LPI2C5_SCL 0x006C 0x0270 0x0510 0x16 0x01
+#define IMX95_PAD_GPIO_IO23__FLEXIO1_FLEXIO_BIT23 0x006C 0x0270 0x04C4 0x07 0x00
+
+#define IMX95_PAD_GPIO_IO24__GPIO2_IO_BIT24 0x0070 0x0274 0x0000 0x00 0x00
+#define IMX95_PAD_GPIO_IO24__USDHC3_DATA0 0x0070 0x0274 0x05D0 0x01 0x00
+#define IMX95_PAD_GPIO_IO24__TPM3_CH3 0x0070 0x0274 0x0000 0x04 0x00
+#define IMX95_PAD_GPIO_IO24__JTAG_MUX_TDO 0x0070 0x0274 0x0000 0x05 0x00
+#define IMX95_PAD_GPIO_IO24__LPSPI6_PCS1 0x0070 0x0274 0x0000 0x06 0x00
+#define IMX95_PAD_GPIO_IO24__FLEXIO1_FLEXIO_BIT24 0x0070 0x0274 0x04C8 0x07 0x00
+
+#define IMX95_PAD_GPIO_IO25__GPIO2_IO_BIT25 0x0074 0x0278 0x0000 0x00 0x00
+#define IMX95_PAD_GPIO_IO25__USDHC3_DATA1 0x0074 0x0278 0x05D4 0x01 0x00
+#define IMX95_PAD_GPIO_IO25__CAN2_TX 0x0074 0x0278 0x0000 0x02 0x00
+#define IMX95_PAD_GPIO_IO25__TPM4_CH3 0x0074 0x0278 0x0000 0x04 0x00
+#define IMX95_PAD_GPIO_IO25__JTAG_MUX_TCK 0x0074 0x0278 0x060C 0x05 0x01
+#define IMX95_PAD_GPIO_IO25__LPSPI7_PCS1 0x0074 0x0278 0x0000 0x06 0x00
+#define IMX95_PAD_GPIO_IO25__FLEXIO1_FLEXIO_BIT25 0x0074 0x0278 0x04CC 0x07 0x00
+
+#define IMX95_PAD_GPIO_IO26__GPIO2_IO_BIT26 0x0078 0x027C 0x0000 0x00 0x00
+#define IMX95_PAD_GPIO_IO26__USDHC3_DATA2 0x0078 0x027C 0x05D8 0x01 0x00
+#define IMX95_PAD_GPIO_IO26__AONMIX_TOP_PDM_BIT_STREAM_BIT1 0x0078 0x027C 0x0410 0x02 0x02
+#define IMX95_PAD_GPIO_IO26__FLEXIO1_FLEXIO_BIT26 0x0078 0x027C 0x0458 0x03 0x01
+#define IMX95_PAD_GPIO_IO26__TPM5_CH3 0x0078 0x027C 0x0000 0x04 0x00
+#define IMX95_PAD_GPIO_IO26__JTAG_MUX_TDI 0x0078 0x027C 0x0610 0x05 0x01
+#define IMX95_PAD_GPIO_IO26__LPSPI8_PCS1 0x0078 0x027C 0x0000 0x06 0x00
+#define IMX95_PAD_GPIO_IO26__SAI3_TX_SYNC 0x0078 0x027C 0x0000 0x07 0x00
+
+#define IMX95_PAD_GPIO_IO27__GPIO2_IO_BIT27 0x007C 0x0280 0x0000 0x00 0x00
+#define IMX95_PAD_GPIO_IO27__USDHC3_DATA3 0x007C 0x0280 0x05DC 0x01 0x00
+#define IMX95_PAD_GPIO_IO27__CAN2_RX 0x007C 0x0280 0x0444 0x02 0x02
+#define IMX95_PAD_GPIO_IO27__TPM6_CH3 0x007C 0x0280 0x0000 0x04 0x00
+#define IMX95_PAD_GPIO_IO27__JTAG_MUX_TMS 0x007C 0x0280 0x0614 0x05 0x01
+#define IMX95_PAD_GPIO_IO27__LPSPI5_PCS1 0x007C 0x0280 0x0000 0x06 0x00
+#define IMX95_PAD_GPIO_IO27__FLEXIO1_FLEXIO_BIT27 0x007C 0x0280 0x045C 0x07 0x01
+
+#define IMX95_PAD_GPIO_IO28__GPIO2_IO_BIT28 0x0080 0x0284 0x0000 0x00 0x00
+#define IMX95_PAD_GPIO_IO28__LPI2C3_SDA 0x0080 0x0284 0x0504 0x11 0x01
+#define IMX95_PAD_GPIO_IO28__CAN3_TX 0x0080 0x0284 0x0000 0x02 0x00
+#define IMX95_PAD_GPIO_IO28__FLEXIO1_FLEXIO_BIT28 0x0080 0x0284 0x0000 0x07 0x00
+
+#define IMX95_PAD_GPIO_IO29__GPIO2_IO_BIT29 0x0084 0x0288 0x0000 0x00 0x00
+#define IMX95_PAD_GPIO_IO29__LPI2C3_SCL 0x0084 0x0288 0x0500 0x11 0x01
+#define IMX95_PAD_GPIO_IO29__CAN3_RX 0x0084 0x0288 0x0448 0x02 0x01
+#define IMX95_PAD_GPIO_IO29__FLEXIO1_FLEXIO_BIT29 0x0084 0x0288 0x0000 0x07 0x00
+
+#define IMX95_PAD_GPIO_IO30__GPIO2_IO_BIT30 0x0088 0x028C 0x0000 0x00 0x00
+#define IMX95_PAD_GPIO_IO30__LPI2C4_SDA 0x0088 0x028C 0x050C 0x11 0x01
+#define IMX95_PAD_GPIO_IO30__CAN5_TX 0x0088 0x028C 0x0000 0x02 0x00
+#define IMX95_PAD_GPIO_IO30__FLEXIO1_FLEXIO_BIT30 0x0088 0x028C 0x0460 0x07 0x01
+
+#define IMX95_PAD_GPIO_IO31__GPIO2_IO_BIT31 0x008C 0x0290 0x0000 0x00 0x00
+#define IMX95_PAD_GPIO_IO31__LPI2C4_SCL 0x008C 0x0290 0x0508 0x11 0x01
+#define IMX95_PAD_GPIO_IO31__CAN5_RX 0x008C 0x0290 0x0450 0x02 0x01
+#define IMX95_PAD_GPIO_IO31__FLEXIO1_FLEXIO_BIT31 0x008C 0x0290 0x0464 0x07 0x01
+
+#define IMX95_PAD_GPIO_IO32__GPIO5_IO_BIT12 0x0090 0x0294 0x0000 0x00 0x00
+#define IMX95_PAD_GPIO_IO32__HSIOMIX_TOP_PCIE1_CLKREQ_B 0x0090 0x0294 0x0000 0x01 0x00
+#define IMX95_PAD_GPIO_IO32__LPUART6_TX 0x0090 0x0294 0x0580 0x02 0x00
+#define IMX95_PAD_GPIO_IO32__LPSPI4_PCS2 0x0090 0x0294 0x0538 0x04 0x00
+
+#define IMX95_PAD_GPIO_IO33__GPIO5_IO_BIT13 0x0094 0x0298 0x0000 0x00 0x00
+#define IMX95_PAD_GPIO_IO33__LPUART6_RX 0x0094 0x0298 0x057C 0x02 0x00
+#define IMX95_PAD_GPIO_IO33__LPSPI4_PCS1 0x0094 0x0298 0x0534 0x04 0x00
+
+#define IMX95_PAD_GPIO_IO34__GPIO5_IO_BIT14 0x0098 0x029C 0x0000 0x00 0x00
+#define IMX95_PAD_GPIO_IO34__LPUART6_CTS_B 0x0098 0x029C 0x0578 0x02 0x00
+#define IMX95_PAD_GPIO_IO34__LPSPI4_PCS0 0x0098 0x029C 0x0530 0x04 0x00
+
+#define IMX95_PAD_GPIO_IO35__GPIO5_IO_BIT15 0x009C 0x02A0 0x0000 0x00 0x00
+#define IMX95_PAD_GPIO_IO35__HSIOMIX_TOP_PCIE2_CLKREQ_B 0x009C 0x02A0 0x0000 0x01 0x00
+#define IMX95_PAD_GPIO_IO35__LPUART6_RTS_B 0x009C 0x02A0 0x0000 0x02 0x00
+#define IMX95_PAD_GPIO_IO35__LPSPI4_SIN 0x009C 0x02A0 0x0540 0x04 0x00
+
+#define IMX95_PAD_GPIO_IO36__LPSPI4_SOUT 0x00A0 0x02A4 0x0544 0x04 0x00
+#define IMX95_PAD_GPIO_IO36__GPIO5_IO_BIT16 0x00A0 0x02A4 0x0000 0x00 0x00
+#define IMX95_PAD_GPIO_IO36__LPUART7_TX 0x00A0 0x02A4 0x0588 0x02 0x00
+
+#define IMX95_PAD_GPIO_IO37__GPIO5_IO_BIT17 0x00A4 0x02A8 0x0000 0x00 0x00
+#define IMX95_PAD_GPIO_IO37__LPUART7_RX 0x00A4 0x02A8 0x0584 0x02 0x00
+#define IMX95_PAD_GPIO_IO37__LPSPI4_SCK 0x00A4 0x02A8 0x053C 0x04 0x00
+
+#define IMX95_PAD_CCM_CLKO1__CCMSRCGPCMIX_TOP_CLKO_1 0x00A8 0x02AC 0x0000 0x00 0x00
+#define IMX95_PAD_CCM_CLKO1__NETCMIX_TOP_NETC_TMR_1588_TRIG1 0x00A8 0x02AC 0x0434 0x01 0x00
+#define IMX95_PAD_CCM_CLKO1__FLEXIO1_FLEXIO_BIT26 0x00A8 0x02AC 0x0458 0x04 0x00
+#define IMX95_PAD_CCM_CLKO1__GPIO3_IO_BIT26 0x00A8 0x02AC 0x0000 0x05 0x00
+
+#define IMX95_PAD_CCM_CLKO2__GPIO3_IO_BIT27 0x00AC 0x02B0 0x0000 0x05 0x00
+#define IMX95_PAD_CCM_CLKO2__CCMSRCGPCMIX_TOP_CLKO_2 0x00AC 0x02B0 0x0000 0x00 0x00
+#define IMX95_PAD_CCM_CLKO2__NETCMIX_TOP_NETC_TMR_1588_PP1 0x00AC 0x02B0 0x0000 0x01 0x00
+#define IMX95_PAD_CCM_CLKO2__FLEXIO1_FLEXIO_BIT27 0x00AC 0x02B0 0x045C 0x04 0x00
+
+#define IMX95_PAD_CCM_CLKO3__CCMSRCGPCMIX_TOP_CLKO_3 0x00B0 0x02B4 0x0000 0x00 0x00
+#define IMX95_PAD_CCM_CLKO3__NETCMIX_TOP_NETC_TMR_1588_TRIG2 0x00B0 0x02B4 0x0438 0x01 0x00
+#define IMX95_PAD_CCM_CLKO3__CAN3_TX 0x00B0 0x02B4 0x0000 0x02 0x00
+#define IMX95_PAD_CCM_CLKO3__FLEXIO2_FLEXIO_BIT28 0x00B0 0x02B4 0x0000 0x04 0x00
+#define IMX95_PAD_CCM_CLKO3__GPIO4_IO_BIT28 0x00B0 0x02B4 0x0000 0x05 0x00
+
+#define IMX95_PAD_CCM_CLKO4__CCMSRCGPCMIX_TOP_CLKO_4 0x00B4 0x02B8 0x0000 0x00 0x00
+#define IMX95_PAD_CCM_CLKO4__NETCMIX_TOP_NETC_TMR_1588_PP2 0x00B4 0x02B8 0x0000 0x01 0x00
+#define IMX95_PAD_CCM_CLKO4__CAN3_RX 0x00B4 0x02B8 0x0448 0x02 0x00
+#define IMX95_PAD_CCM_CLKO4__FLEXIO2_FLEXIO_BIT29 0x00B4 0x02B8 0x0000 0x04 0x00
+#define IMX95_PAD_CCM_CLKO4__GPIO4_IO_BIT29 0x00B4 0x02B8 0x0000 0x05 0x00
+
+#define IMX95_PAD_ENET1_MDC__NETCMIX_TOP_NETC_MDC 0x00B8 0x02BC 0x0424 0x00 0x00
+#define IMX95_PAD_ENET1_MDC__LPUART3_DCD_B 0x00B8 0x02BC 0x0000 0x01 0x00
+#define IMX95_PAD_ENET1_MDC__I3C2_SCL 0x00B8 0x02BC 0x04F8 0x02 0x00
+#define IMX95_PAD_ENET1_MDC__HSIOMIX_TOP_USB1_OTG_ID 0x00B8 0x02BC 0x0000 0x03 0x00
+#define IMX95_PAD_ENET1_MDC__FLEXIO2_FLEXIO_BIT0 0x00B8 0x02BC 0x0000 0x04 0x00
+#define IMX95_PAD_ENET1_MDC__GPIO4_IO_BIT0 0x00B8 0x02BC 0x0000 0x05 0x00
+
+#define IMX95_PAD_ENET1_MDIO__NETCMIX_TOP_NETC_MDIO 0x00BC 0x02C0 0x0428 0x00 0x00
+#define IMX95_PAD_ENET1_MDIO__LPUART3_RIN_B 0x00BC 0x02C0 0x0000 0x01 0x00
+#define IMX95_PAD_ENET1_MDIO__I3C2_SDA 0x00BC 0x02C0 0x04FC 0x02 0x00
+#define IMX95_PAD_ENET1_MDIO__HSIOMIX_TOP_USB1_OTG_PWR 0x00BC 0x02C0 0x0000 0x03 0x00
+#define IMX95_PAD_ENET1_MDIO__FLEXIO2_FLEXIO_BIT1 0x00BC 0x02C0 0x0000 0x04 0x00
+#define IMX95_PAD_ENET1_MDIO__GPIO4_IO_BIT1 0x00BC 0x02C0 0x0000 0x05 0x00
+
+#define IMX95_PAD_ENET1_TD3__NETCMIX_TOP_ETH0_RGMII_TD3 0x00C0 0x02C4 0x0000 0x00 0x00
+#define IMX95_PAD_ENET1_TD3__CAN2_TX 0x00C0 0x02C4 0x0000 0x02 0x00
+#define IMX95_PAD_ENET1_TD3__HSIOMIX_TOP_USB2_OTG_ID 0x00C0 0x02C4 0x0000 0x03 0x00
+#define IMX95_PAD_ENET1_TD3__FLEXIO2_FLEXIO_BIT2 0x00C0 0x02C4 0x0000 0x04 0x00
+#define IMX95_PAD_ENET1_TD3__GPIO4_IO_BIT2 0x00C0 0x02C4 0x0000 0x05 0x00
+
+#define IMX95_PAD_ENET1_TD2__NETCMIX_TOP_ETH0_RGMII_TD2 0x00C4 0x02C8 0x0000 0x00 0x00
+#define IMX95_PAD_ENET1_TD2__NETCMIX_TOP_ETH0_RMII_REF50_CLK 0x00C4 0x02C8 0x0000 0x01 0x00
+#define IMX95_PAD_ENET1_TD2__CAN2_RX 0x00C4 0x02C8 0x0444 0x02 0x01
+#define IMX95_PAD_ENET1_TD2__HSIOMIX_TOP_USB2_OTG_OC 0x00C4 0x02C8 0x0000 0x03 0x00
+#define IMX95_PAD_ENET1_TD2__FLEXIO2_FLEXIO_BIT3 0x00C4 0x02C8 0x0000 0x04 0x00
+#define IMX95_PAD_ENET1_TD2__GPIO4_IO_BIT3 0x00C4 0x02C8 0x0000 0x05 0x00
+
+#define IMX95_PAD_ENET1_TD1__NETCMIX_TOP_ETH0_RGMII_TD1 0x00C8 0x02CC 0x0000 0x00 0x00
+#define IMX95_PAD_ENET1_TD1__LPUART3_RTS_B 0x00C8 0x02CC 0x0000 0x01 0x00
+#define IMX95_PAD_ENET1_TD1__I3C2_PUR 0x00C8 0x02CC 0x0000 0x02 0x00
+#define IMX95_PAD_ENET1_TD1__HSIOMIX_TOP_USB1_OTG_OC 0x00C8 0x02CC 0x0000 0x03 0x00
+#define IMX95_PAD_ENET1_TD1__FLEXIO2_FLEXIO_BIT4 0x00C8 0x02CC 0x0000 0x04 0x00
+#define IMX95_PAD_ENET1_TD1__GPIO4_IO_BIT4 0x00C8 0x02CC 0x0000 0x05 0x00
+#define IMX95_PAD_ENET1_TD1__I3C2_PUR_B 0x00C8 0x02CC 0x0000 0x06 0x00
+#define IMX95_PAD_ENET1_TD1__NETCMIX_TOP_ETH0_RMII_TXD1 0x00C8 0x02CC 0x0000 0x07 0x00
+
+#define IMX95_PAD_ENET1_TD0__NETCMIX_TOP_ETH0_RGMII_TD0 0x00CC 0x02D0 0x0000 0x00 0x00
+#define IMX95_PAD_ENET1_TD0__LPUART3_TX 0x00CC 0x02D0 0x055C 0x01 0x00
+#define IMX95_PAD_ENET1_TD0__NETCMIX_TOP_ETH0_RMII_TXD0 0x00CC 0x02D0 0x0000 0x02 0x00
+#define IMX95_PAD_ENET1_TD0__FLEXIO2_FLEXIO_BIT5 0x00CC 0x02D0 0x0000 0x04 0x00
+#define IMX95_PAD_ENET1_TD0__GPIO4_IO_BIT5 0x00CC 0x02D0 0x0000 0x05 0x00
+
+#define IMX95_PAD_ENET1_TX_CTL__NETCMIX_TOP_ETH0_RGMII_TX_CTL 0x00D0 0x02D4 0x0000 0x00 0x00
+#define IMX95_PAD_ENET1_TX_CTL__LPUART3_DTR_B 0x00D0 0x02D4 0x0000 0x01 0x00
+#define IMX95_PAD_ENET1_TX_CTL__NETCMIX_TOP_ETH0_RMII_TX_EN 0x00D0 0x02D4 0x0000 0x02 0x00
+#define IMX95_PAD_ENET1_TX_CTL__FLEXIO2_FLEXIO_BIT6 0x00D0 0x02D4 0x0000 0x04 0x00
+#define IMX95_PAD_ENET1_TX_CTL__GPIO4_IO_BIT6 0x00D0 0x02D4 0x0000 0x05 0x00
+
+#define IMX95_PAD_ENET1_TXC__NETCMIX_TOP_ETH0_RGMII_TX_CLK 0x00D4 0x02D8 0x0000 0x00 0x00
+#define IMX95_PAD_ENET1_TXC__CCMSRCGPCMIX_TOP_ENET_CLK_ROOT 0x00D4 0x02D8 0x0000 0x01 0x00
+#define IMX95_PAD_ENET1_TXC__FLEXIO2_FLEXIO_BIT7 0x00D4 0x02D8 0x0000 0x04 0x00
+#define IMX95_PAD_ENET1_TXC__GPIO4_IO_BIT7 0x00D4 0x02D8 0x0000 0x05 0x00
+
+#define IMX95_PAD_ENET1_RX_CTL__NETCMIX_TOP_ETH0_RGMII_RX_CTL 0x00D8 0x02DC 0x0000 0x00 0x00
+#define IMX95_PAD_ENET1_RX_CTL__LPUART3_DSR_B 0x00D8 0x02DC 0x0000 0x01 0x00
+#define IMX95_PAD_ENET1_RX_CTL__NETCMIX_TOP_ETH0_RMII_CRS_DV 0x00D8 0x02DC 0x0000 0x02 0x00
+#define IMX95_PAD_ENET1_RX_CTL__HSIOMIX_TOP_USB2_OTG_PWR 0x00D8 0x02DC 0x0000 0x03 0x00
+#define IMX95_PAD_ENET1_RX_CTL__FLEXIO2_FLEXIO_BIT8 0x00D8 0x02DC 0x0000 0x04 0x00
+#define IMX95_PAD_ENET1_RX_CTL__GPIO4_IO_BIT8 0x00D8 0x02DC 0x0000 0x05 0x00
+
+#define IMX95_PAD_ENET1_RXC__NETCMIX_TOP_ETH0_RGMII_RX_CLK 0x00DC 0x02E0 0x0000 0x00 0x00
+#define IMX95_PAD_ENET1_RXC__NETCMIX_TOP_ETH0_RMII_RX_ER 0x00DC 0x02E0 0x042C 0x01 0x00
+#define IMX95_PAD_ENET1_RXC__FLEXIO2_FLEXIO_BIT9 0x00DC 0x02E0 0x0000 0x04 0x00
+#define IMX95_PAD_ENET1_RXC__GPIO4_IO_BIT9 0x00DC 0x02E0 0x0000 0x05 0x00
+
+#define IMX95_PAD_ENET1_RD0__NETCMIX_TOP_ETH0_RGMII_RD0 0x00E0 0x02E4 0x0000 0x00 0x00
+#define IMX95_PAD_ENET1_RD0__LPUART3_RX 0x00E0 0x02E4 0x0558 0x01 0x00
+#define IMX95_PAD_ENET1_RD0__NETCMIX_TOP_ETH0_RMII_RXD0 0x00E0 0x02E4 0x0000 0x02 0x00
+#define IMX95_PAD_ENET1_RD0__FLEXIO2_FLEXIO_BIT10 0x00E0 0x02E4 0x0000 0x04 0x00
+#define IMX95_PAD_ENET1_RD0__GPIO4_IO_BIT10 0x00E0 0x02E4 0x0000 0x05 0x00
+
+#define IMX95_PAD_ENET1_RD1__NETCMIX_TOP_ETH0_RGMII_RD1 0x00E4 0x02E8 0x0000 0x00 0x00
+#define IMX95_PAD_ENET1_RD1__LPUART3_CTS_B 0x00E4 0x02E8 0x0554 0x01 0x00
+#define IMX95_PAD_ENET1_RD1__NETCMIX_TOP_ETH0_RMII_RXD1 0x00E4 0x02E8 0x0000 0x02 0x00
+#define IMX95_PAD_ENET1_RD1__LPTMR2_ALT1 0x00E4 0x02E8 0x0548 0x03 0x00
+#define IMX95_PAD_ENET1_RD1__FLEXIO2_FLEXIO_BIT11 0x00E4 0x02E8 0x0000 0x04 0x00
+#define IMX95_PAD_ENET1_RD1__GPIO4_IO_BIT11 0x00E4 0x02E8 0x0000 0x05 0x00
+
+#define IMX95_PAD_ENET1_RD2__NETCMIX_TOP_ETH0_RGMII_RD2 0x00E8 0x02EC 0x0000 0x00 0x00
+#define IMX95_PAD_ENET1_RD2__NETCMIX_TOP_ETH0_RMII_RX_ER 0x00E8 0x02EC 0x042C 0x02 0x01
+#define IMX95_PAD_ENET1_RD2__LPTMR2_ALT2 0x00E8 0x02EC 0x054C 0x03 0x00
+#define IMX95_PAD_ENET1_RD2__FLEXIO2_FLEXIO_BIT12 0x00E8 0x02EC 0x0000 0x04 0x00
+#define IMX95_PAD_ENET1_RD2__GPIO4_IO_BIT12 0x00E8 0x02EC 0x0000 0x05 0x00
+
+#define IMX95_PAD_ENET1_RD3__NETCMIX_TOP_ETH0_RGMII_RD3 0x00EC 0x02F0 0x0000 0x00 0x00
+#define IMX95_PAD_ENET1_RD3__LPTMR2_ALT3 0x00EC 0x02F0 0x0550 0x03 0x00
+#define IMX95_PAD_ENET1_RD3__FLEXIO2_FLEXIO_BIT13 0x00EC 0x02F0 0x0000 0x04 0x00
+#define IMX95_PAD_ENET1_RD3__GPIO4_IO_BIT13 0x00EC 0x02F0 0x0000 0x05 0x00
+
+#define IMX95_PAD_ENET2_MDC__NETCMIX_TOP_NETC_MDC 0x00F0 0x02F4 0x0424 0x00 0x01
+#define IMX95_PAD_ENET2_MDC__LPUART4_DCD_B 0x00F0 0x02F4 0x0000 0x01 0x00
+#define IMX95_PAD_ENET2_MDC__NETCMIX_TOP_SAI2_RX_SYNC 0x00F0 0x02F4 0x0000 0x02 0x00
+#define IMX95_PAD_ENET2_MDC__FLEXIO2_FLEXIO_BIT14 0x00F0 0x02F4 0x0000 0x04 0x00
+#define IMX95_PAD_ENET2_MDC__GPIO4_IO_BIT14 0x00F0 0x02F4 0x0000 0x05 0x00
+
+#define IMX95_PAD_ENET2_MDIO__NETCMIX_TOP_NETC_MDIO 0x00F4 0x02F8 0x0428 0x00 0x01
+#define IMX95_PAD_ENET2_MDIO__LPUART4_RIN_B 0x00F4 0x02F8 0x0000 0x01 0x00
+#define IMX95_PAD_ENET2_MDIO__NETCMIX_TOP_SAI2_RX_BCLK 0x00F4 0x02F8 0x0000 0x02 0x00
+#define IMX95_PAD_ENET2_MDIO__FLEXIO2_FLEXIO_BIT15 0x00F4 0x02F8 0x0000 0x04 0x00
+#define IMX95_PAD_ENET2_MDIO__GPIO4_IO_BIT15 0x00F4 0x02F8 0x0000 0x05 0x00
+
+#define IMX95_PAD_ENET2_TD3__NETCMIX_TOP_SAI2_RX_DATA_BIT0 0x00F8 0x02FC 0x0000 0x02 0x00
+#define IMX95_PAD_ENET2_TD3__FLEXIO2_FLEXIO_BIT16 0x00F8 0x02FC 0x0000 0x04 0x00
+#define IMX95_PAD_ENET2_TD3__GPIO4_IO_BIT16 0x00F8 0x02FC 0x0000 0x05 0x00
+#define IMX95_PAD_ENET2_TD3__NETCMIX_TOP_ETH1_RGMII_TD3 0x00F8 0x02FC 0x0000 0x00 0x00
+
+#define IMX95_PAD_ENET2_TD2__NETCMIX_TOP_ETH1_RGMII_TD2 0x00FC 0x0300 0x0000 0x00 0x00
+#define IMX95_PAD_ENET2_TD2__NETCMIX_TOP_ETH1_RMII_REF50_CLK 0x00FC 0x0300 0x0000 0x01 0x00
+#define IMX95_PAD_ENET2_TD2__NETCMIX_TOP_SAI2_RX_DATA_BIT1 0x00FC 0x0300 0x0000 0x02 0x00
+#define IMX95_PAD_ENET2_TD2__SAI4_TX_SYNC 0x00FC 0x0300 0x05A4 0x03 0x00
+#define IMX95_PAD_ENET2_TD2__FLEXIO2_FLEXIO_BIT17 0x00FC 0x0300 0x0000 0x04 0x00
+#define IMX95_PAD_ENET2_TD2__GPIO4_IO_BIT17 0x00FC 0x0300 0x0000 0x05 0x00
+
+#define IMX95_PAD_ENET2_TD1__NETCMIX_TOP_ETH1_RGMII_TD1 0x0100 0x0304 0x0000 0x00 0x00
+#define IMX95_PAD_ENET2_TD1__LPUART4_RTS_B 0x0100 0x0304 0x0000 0x01 0x00
+#define IMX95_PAD_ENET2_TD1__NETCMIX_TOP_SAI2_RX_DATA_BIT2 0x0100 0x0304 0x0000 0x02 0x00
+#define IMX95_PAD_ENET2_TD1__SAI4_TX_BCLK 0x0100 0x0304 0x05A0 0x03 0x00
+#define IMX95_PAD_ENET2_TD1__FLEXIO2_FLEXIO_BIT18 0x0100 0x0304 0x0000 0x04 0x00
+#define IMX95_PAD_ENET2_TD1__GPIO4_IO_BIT18 0x0100 0x0304 0x0000 0x05 0x00
+#define IMX95_PAD_ENET2_TD1__NETCMIX_TOP_ETH1_RMII_TXD1 0x0100 0x0304 0x0000 0x06 0x00
+
+#define IMX95_PAD_ENET2_TD0__NETCMIX_TOP_ETH1_RGMII_TD0 0x0104 0x0308 0x0000 0x00 0x00
+#define IMX95_PAD_ENET2_TD0__LPUART4_TX 0x0104 0x0308 0x0568 0x01 0x00
+#define IMX95_PAD_ENET2_TD0__NETCMIX_TOP_SAI2_RX_DATA_BIT3 0x0104 0x0308 0x0000 0x02 0x00
+#define IMX95_PAD_ENET2_TD0__SAI4_TX_DATA_BIT0 0x0104 0x0308 0x0000 0x03 0x00
+#define IMX95_PAD_ENET2_TD0__FLEXIO2_FLEXIO_BIT19 0x0104 0x0308 0x0000 0x04 0x00
+#define IMX95_PAD_ENET2_TD0__GPIO4_IO_BIT19 0x0104 0x0308 0x0000 0x05 0x00
+#define IMX95_PAD_ENET2_TD0__NETCMIX_TOP_ETH1_RMII_TXD0 0x0104 0x0308 0x0000 0x06 0x00
+
+#define IMX95_PAD_ENET2_TX_CTL__NETCMIX_TOP_ETH1_RGMII_TX_CTL 0x0108 0x030C 0x0000 0x00 0x00
+#define IMX95_PAD_ENET2_TX_CTL__LPUART4_DTR_B 0x0108 0x030C 0x0000 0x01 0x00
+#define IMX95_PAD_ENET2_TX_CTL__NETCMIX_TOP_SAI2_TX_SYNC 0x0108 0x030C 0x0000 0x02 0x00
+#define IMX95_PAD_ENET2_TX_CTL__NETCMIX_TOP_ETH1_RMII_TX_EN 0x0108 0x030C 0x0000 0x03 0x00
+#define IMX95_PAD_ENET2_TX_CTL__FLEXIO2_FLEXIO_BIT20 0x0108 0x030C 0x0000 0x04 0x00
+#define IMX95_PAD_ENET2_TX_CTL__GPIO4_IO_BIT20 0x0108 0x030C 0x0000 0x05 0x00
+
+#define IMX95_PAD_ENET2_TXC__NETCMIX_TOP_ETH1_RGMII_TX_CLK 0x010C 0x0310 0x0000 0x00 0x00
+#define IMX95_PAD_ENET2_TXC__CCMSRCGPCMIX_TOP_ENET_CLK_ROOT 0x010C 0x0310 0x0000 0x01 0x00
+#define IMX95_PAD_ENET2_TXC__NETCMIX_TOP_SAI2_TX_BCLK 0x010C 0x0310 0x0000 0x02 0x00
+#define IMX95_PAD_ENET2_TXC__FLEXIO2_FLEXIO_BIT21 0x010C 0x0310 0x0000 0x04 0x00
+#define IMX95_PAD_ENET2_TXC__GPIO4_IO_BIT21 0x010C 0x0310 0x0000 0x05 0x00
+
+#define IMX95_PAD_ENET2_RX_CTL__NETCMIX_TOP_ETH1_RGMII_RX_CTL 0x0110 0x0314 0x0000 0x00 0x00
+#define IMX95_PAD_ENET2_RX_CTL__LPUART4_DSR_B 0x0110 0x0314 0x0000 0x01 0x00
+#define IMX95_PAD_ENET2_RX_CTL__NETCMIX_TOP_SAI2_TX_DATA_BIT0 0x0110 0x0314 0x0000 0x02 0x00
+#define IMX95_PAD_ENET2_RX_CTL__FLEXIO2_FLEXIO_BIT22 0x0110 0x0314 0x0000 0x04 0x00
+#define IMX95_PAD_ENET2_RX_CTL__GPIO4_IO_BIT22 0x0110 0x0314 0x0000 0x05 0x00
+#define IMX95_PAD_ENET2_RX_CTL__NETCMIX_TOP_ETH1_RMII_CRS_DV 0x0110 0x0314 0x0000 0x06 0x00
+
+#define IMX95_PAD_ENET2_RXC__NETCMIX_TOP_ETH1_RGMII_RX_CLK 0x0114 0x0318 0x0000 0x00 0x00
+#define IMX95_PAD_ENET2_RXC__NETCMIX_TOP_ETH1_RMII_RX_ER 0x0114 0x0318 0x0430 0x01 0x00
+#define IMX95_PAD_ENET2_RXC__NETCMIX_TOP_SAI2_TX_DATA_BIT1 0x0114 0x0318 0x0000 0x02 0x00
+#define IMX95_PAD_ENET2_RXC__SAI4_RX_SYNC 0x0114 0x0318 0x059C 0x03 0x00
+#define IMX95_PAD_ENET2_RXC__FLEXIO2_FLEXIO_BIT23 0x0114 0x0318 0x0000 0x04 0x00
+#define IMX95_PAD_ENET2_RXC__GPIO4_IO_BIT23 0x0114 0x0318 0x0000 0x05 0x00
+
+#define IMX95_PAD_ENET2_RD0__NETCMIX_TOP_ETH1_RGMII_RD0 0x0118 0x031C 0x0000 0x00 0x00
+#define IMX95_PAD_ENET2_RD0__LPUART4_RX 0x0118 0x031C 0x0564 0x01 0x00
+#define IMX95_PAD_ENET2_RD0__NETCMIX_TOP_SAI2_TX_DATA_BIT2 0x0118 0x031C 0x0000 0x02 0x00
+#define IMX95_PAD_ENET2_RD0__SAI4_RX_BCLK 0x0118 0x031C 0x0594 0x03 0x00
+#define IMX95_PAD_ENET2_RD0__FLEXIO2_FLEXIO_BIT24 0x0118 0x031C 0x0000 0x04 0x00
+#define IMX95_PAD_ENET2_RD0__GPIO4_IO_BIT24 0x0118 0x031C 0x0000 0x05 0x00
+#define IMX95_PAD_ENET2_RD0__NETCMIX_TOP_ETH1_RMII_RXD0 0x0118 0x031C 0x0000 0x06 0x00
+
+#define IMX95_PAD_ENET2_RD1__NETCMIX_TOP_ETH1_RGMII_RD1 0x011C 0x0320 0x0000 0x00 0x00
+#define IMX95_PAD_ENET2_RD1__SPDIF_IN 0x011C 0x0320 0x0454 0x01 0x00
+#define IMX95_PAD_ENET2_RD1__NETCMIX_TOP_SAI2_TX_DATA_BIT3 0x011C 0x0320 0x0000 0x02 0x00
+#define IMX95_PAD_ENET2_RD1__SAI4_RX_DATA_BIT0 0x011C 0x0320 0x0598 0x03 0x00
+#define IMX95_PAD_ENET2_RD1__FLEXIO2_FLEXIO_BIT25 0x011C 0x0320 0x0000 0x04 0x00
+#define IMX95_PAD_ENET2_RD1__GPIO4_IO_BIT25 0x011C 0x0320 0x0000 0x05 0x00
+#define IMX95_PAD_ENET2_RD1__NETCMIX_TOP_ETH1_RMII_RXD1 0x011C 0x0320 0x0000 0x06 0x00
+
+#define IMX95_PAD_ENET2_RD2__NETCMIX_TOP_ETH1_RGMII_RD2 0x0120 0x0324 0x0000 0x00 0x00
+#define IMX95_PAD_ENET2_RD2__LPUART4_CTS_B 0x0120 0x0324 0x0560 0x01 0x00
+#define IMX95_PAD_ENET2_RD2__NETCMIX_TOP_SAI2_MCLK 0x0120 0x0324 0x0000 0x02 0x00
+#define IMX95_PAD_ENET2_RD2__NETCMIX_TOP_MQS2_RIGHT 0x0120 0x0324 0x0000 0x03 0x00
+#define IMX95_PAD_ENET2_RD2__FLEXIO2_FLEXIO_BIT26 0x0120 0x0324 0x0000 0x04 0x00
+#define IMX95_PAD_ENET2_RD2__GPIO4_IO_BIT26 0x0120 0x0324 0x0000 0x05 0x00
+#define IMX95_PAD_ENET2_RD2__NETCMIX_TOP_ETH1_RMII_RX_ER 0x0120 0x0324 0x0430 0x06 0x01
+
+#define IMX95_PAD_ENET2_RD3__NETCMIX_TOP_ETH1_RGMII_RD3 0x0124 0x0328 0x0000 0x00 0x00
+#define IMX95_PAD_ENET2_RD3__SPDIF_OUT 0x0124 0x0328 0x0000 0x01 0x00
+#define IMX95_PAD_ENET2_RD3__SPDIF_IN 0x0124 0x0328 0x0454 0x02 0x01
+#define IMX95_PAD_ENET2_RD3__NETCMIX_TOP_MQS2_LEFT 0x0124 0x0328 0x0000 0x03 0x00
+#define IMX95_PAD_ENET2_RD3__FLEXIO2_FLEXIO_BIT27 0x0124 0x0328 0x0000 0x04 0x00
+#define IMX95_PAD_ENET2_RD3__GPIO4_IO_BIT27 0x0124 0x0328 0x0000 0x05 0x00
+
+#define IMX95_PAD_SD1_CLK__FLEXIO1_FLEXIO_BIT8 0x0128 0x032C 0x0488 0x04 0x01
+#define IMX95_PAD_SD1_CLK__GPIO3_IO_BIT8 0x0128 0x032C 0x0000 0x05 0x00
+#define IMX95_PAD_SD1_CLK__USDHC1_CLK 0x0128 0x032C 0x0000 0x00 0x00
+
+#define IMX95_PAD_SD1_CMD__USDHC1_CMD 0x012C 0x0330 0x0000 0x00 0x00
+#define IMX95_PAD_SD1_CMD__FLEXIO1_FLEXIO_BIT9 0x012C 0x0330 0x048C 0x04 0x01
+#define IMX95_PAD_SD1_CMD__GPIO3_IO_BIT9 0x012C 0x0330 0x0000 0x05 0x00
+
+#define IMX95_PAD_SD1_DATA0__USDHC1_DATA0 0x0130 0x0334 0x0000 0x00 0x00
+#define IMX95_PAD_SD1_DATA0__FLEXIO1_FLEXIO_BIT10 0x0130 0x0334 0x0490 0x04 0x01
+#define IMX95_PAD_SD1_DATA0__GPIO3_IO_BIT10 0x0130 0x0334 0x0000 0x05 0x00
+
+#define IMX95_PAD_SD1_DATA1__USDHC1_DATA1 0x0134 0x0338 0x0000 0x00 0x00
+#define IMX95_PAD_SD1_DATA1__FLEXIO1_FLEXIO_BIT11 0x0134 0x0338 0x0494 0x04 0x01
+#define IMX95_PAD_SD1_DATA1__GPIO3_IO_BIT11 0x0134 0x0338 0x0000 0x05 0x00
+
+#define IMX95_PAD_SD1_DATA2__USDHC1_DATA2 0x0138 0x033C 0x0000 0x00 0x00
+#define IMX95_PAD_SD1_DATA2__FLEXIO1_FLEXIO_BIT12 0x0138 0x033C 0x0498 0x04 0x01
+#define IMX95_PAD_SD1_DATA2__GPIO3_IO_BIT12 0x0138 0x033C 0x0000 0x05 0x00
+#define IMX95_PAD_SD1_DATA2__CCMSRCGPCMIX_TOP_PMIC_READY 0x0138 0x033C 0x0000 0x06 0x00
+
+#define IMX95_PAD_SD1_DATA3__USDHC1_DATA3 0x013C 0x0340 0x0000 0x00 0x00
+#define IMX95_PAD_SD1_DATA3__FLEXSPI1_A_SS1_B 0x013C 0x0340 0x0000 0x01 0x00
+#define IMX95_PAD_SD1_DATA3__FLEXIO1_FLEXIO_BIT13 0x013C 0x0340 0x049C 0x04 0x01
+#define IMX95_PAD_SD1_DATA3__GPIO3_IO_BIT13 0x013C 0x0340 0x0000 0x05 0x00
+
+#define IMX95_PAD_SD1_DATA4__USDHC1_DATA4 0x0140 0x0344 0x0000 0x00 0x00
+#define IMX95_PAD_SD1_DATA4__FLEXSPI1_A_DATA_BIT4 0x0140 0x0344 0x04E4 0x01 0x00
+#define IMX95_PAD_SD1_DATA4__FLEXIO1_FLEXIO_BIT14 0x0140 0x0344 0x04A0 0x04 0x01
+#define IMX95_PAD_SD1_DATA4__GPIO3_IO_BIT14 0x0140 0x0344 0x0000 0x05 0x00
+#define IMX95_PAD_SD1_DATA4__XSPI_DATA_BIT4 0x0140 0x0344 0x05FC 0x06 0x00
+
+#define IMX95_PAD_SD1_DATA5__USDHC1_DATA5 0x0144 0x0348 0x0000 0x00 0x00
+#define IMX95_PAD_SD1_DATA5__FLEXSPI1_A_DATA_BIT5 0x0144 0x0348 0x04E8 0x01 0x00
+#define IMX95_PAD_SD1_DATA5__USDHC1_RESET_B 0x0144 0x0348 0x0000 0x02 0x00
+#define IMX95_PAD_SD1_DATA5__FLEXIO1_FLEXIO_BIT15 0x0144 0x0348 0x04A4 0x04 0x01
+#define IMX95_PAD_SD1_DATA5__GPIO3_IO_BIT15 0x0144 0x0348 0x0000 0x05 0x00
+#define IMX95_PAD_SD1_DATA5__XSPI_DATA_BIT5 0x0144 0x0348 0x0600 0x06 0x00
+
+#define IMX95_PAD_SD1_DATA6__USDHC1_DATA6 0x0148 0x034C 0x0000 0x00 0x00
+#define IMX95_PAD_SD1_DATA6__FLEXSPI1_A_DATA_BIT6 0x0148 0x034C 0x04EC 0x01 0x00
+#define IMX95_PAD_SD1_DATA6__USDHC1_CD_B 0x0148 0x034C 0x0000 0x02 0x00
+#define IMX95_PAD_SD1_DATA6__FLEXIO1_FLEXIO_BIT16 0x0148 0x034C 0x04A8 0x04 0x01
+#define IMX95_PAD_SD1_DATA6__GPIO3_IO_BIT16 0x0148 0x034C 0x0000 0x05 0x00
+#define IMX95_PAD_SD1_DATA6__XSPI_DATA_BIT6 0x0148 0x034C 0x0604 0x06 0x00
+
+#define IMX95_PAD_SD1_DATA7__USDHC1_DATA7 0x014C 0x0350 0x0000 0x00 0x00
+#define IMX95_PAD_SD1_DATA7__FLEXSPI1_A_DATA_BIT7 0x014C 0x0350 0x04F0 0x01 0x00
+#define IMX95_PAD_SD1_DATA7__USDHC1_WP 0x014C 0x0350 0x0000 0x02 0x00
+#define IMX95_PAD_SD1_DATA7__FLEXIO1_FLEXIO_BIT17 0x014C 0x0350 0x04AC 0x04 0x01
+#define IMX95_PAD_SD1_DATA7__GPIO3_IO_BIT17 0x014C 0x0350 0x0000 0x05 0x00
+#define IMX95_PAD_SD1_DATA7__XSPI_DATA_BIT7 0x014C 0x0350 0x0608 0x06 0x00
+
+#define IMX95_PAD_SD1_STROBE__USDHC1_STROBE 0x0150 0x0354 0x0000 0x00 0x00
+#define IMX95_PAD_SD1_STROBE__FLEXSPI1_A_DQS 0x0150 0x0354 0x04D0 0x01 0x00
+#define IMX95_PAD_SD1_STROBE__FLEXIO1_FLEXIO_BIT18 0x0150 0x0354 0x04B0 0x04 0x01
+#define IMX95_PAD_SD1_STROBE__GPIO3_IO_BIT18 0x0150 0x0354 0x0000 0x05 0x00
+#define IMX95_PAD_SD1_STROBE__XSPI_DQS 0x0150 0x0354 0x05E4 0x06 0x00
+
+#define IMX95_PAD_SD2_VSELECT__USDHC2_VSELECT 0x0154 0x0358 0x0000 0x00 0x00
+#define IMX95_PAD_SD2_VSELECT__USDHC2_WP 0x0154 0x0358 0x0000 0x01 0x00
+#define IMX95_PAD_SD2_VSELECT__LPTMR2_ALT3 0x0154 0x0358 0x0550 0x02 0x01
+#define IMX95_PAD_SD2_VSELECT__FLEXIO1_FLEXIO_BIT19 0x0154 0x0358 0x04B4 0x04 0x01
+#define IMX95_PAD_SD2_VSELECT__GPIO3_IO_BIT19 0x0154 0x0358 0x0000 0x05 0x00
+#define IMX95_PAD_SD2_VSELECT__CCMSRCGPCMIX_TOP_EXT_CLK1 0x0154 0x0358 0x0420 0x06 0x01
+
+#define IMX95_PAD_SD3_CLK__USDHC3_CLK 0x0158 0x035C 0x05C8 0x00 0x01
+#define IMX95_PAD_SD3_CLK__FLEXSPI1_A_SCLK 0x0158 0x035C 0x04F4 0x01 0x00
+#define IMX95_PAD_SD3_CLK__SAI5_TX_DATA_BIT1 0x0158 0x035C 0x0000 0x02 0x00
+#define IMX95_PAD_SD3_CLK__SAI5_RX_DATA_BIT0 0x0158 0x035C 0x05AC 0x03 0x00
+#define IMX95_PAD_SD3_CLK__FLEXIO1_FLEXIO_BIT20 0x0158 0x035C 0x04B8 0x04 0x01
+#define IMX95_PAD_SD3_CLK__GPIO3_IO_BIT20 0x0158 0x035C 0x0000 0x05 0x00
+#define IMX95_PAD_SD3_CLK__XSPI_CLK 0x0158 0x035C 0x05E8 0x06 0x00
+
+#define IMX95_PAD_SD3_CMD__USDHC3_CMD 0x015C 0x0360 0x05CC 0x00 0x01
+#define IMX95_PAD_SD3_CMD__FLEXSPI1_A_SS0_B 0x015C 0x0360 0x0000 0x01 0x00
+#define IMX95_PAD_SD3_CMD__SAI5_TX_DATA_BIT2 0x015C 0x0360 0x0000 0x02 0x00
+#define IMX95_PAD_SD3_CMD__SAI5_RX_SYNC 0x015C 0x0360 0x05BC 0x03 0x00
+#define IMX95_PAD_SD3_CMD__FLEXIO1_FLEXIO_BIT21 0x015C 0x0360 0x04BC 0x04 0x01
+#define IMX95_PAD_SD3_CMD__GPIO3_IO_BIT21 0x015C 0x0360 0x0000 0x05 0x00
+#define IMX95_PAD_SD3_CMD__XSPI_CS 0x015C 0x0360 0x05E0 0x06 0x00
+
+#define IMX95_PAD_SD3_DATA0__USDHC3_DATA0 0x0160 0x0364 0x05D0 0x00 0x01
+#define IMX95_PAD_SD3_DATA0__FLEXSPI1_A_DATA_BIT0 0x0160 0x0364 0x04D4 0x01 0x00
+#define IMX95_PAD_SD3_DATA0__SAI5_TX_DATA_BIT3 0x0160 0x0364 0x0000 0x02 0x00
+#define IMX95_PAD_SD3_DATA0__SAI5_RX_BCLK 0x0160 0x0364 0x05A8 0x03 0x00
+#define IMX95_PAD_SD3_DATA0__FLEXIO1_FLEXIO_BIT22 0x0160 0x0364 0x04C0 0x04 0x01
+#define IMX95_PAD_SD3_DATA0__GPIO3_IO_BIT22 0x0160 0x0364 0x0000 0x05 0x00
+#define IMX95_PAD_SD3_DATA0__XSPI_DATA_BIT0 0x0160 0x0364 0x05EC 0x06 0x00
+
+#define IMX95_PAD_SD3_DATA1__USDHC3_DATA1 0x0164 0x0368 0x05D4 0x00 0x01
+#define IMX95_PAD_SD3_DATA1__FLEXSPI1_A_DATA_BIT1 0x0164 0x0368 0x04D8 0x01 0x00
+#define IMX95_PAD_SD3_DATA1__SAI5_RX_DATA_BIT1 0x0164 0x0368 0x05B0 0x02 0x00
+#define IMX95_PAD_SD3_DATA1__SAI5_TX_DATA_BIT0 0x0164 0x0368 0x0000 0x03 0x00
+#define IMX95_PAD_SD3_DATA1__FLEXIO1_FLEXIO_BIT23 0x0164 0x0368 0x04C4 0x04 0x01
+#define IMX95_PAD_SD3_DATA1__GPIO3_IO_BIT23 0x0164 0x0368 0x0000 0x05 0x00
+#define IMX95_PAD_SD3_DATA1__XSPI_DATA_BIT1 0x0164 0x0368 0x05F0 0x06 0x00
+
+#define IMX95_PAD_SD3_DATA2__USDHC3_DATA2 0x0168 0x036C 0x05D8 0x00 0x01
+#define IMX95_PAD_SD3_DATA2__FLEXSPI1_A_DATA_BIT2 0x0168 0x036C 0x04DC 0x01 0x00
+#define IMX95_PAD_SD3_DATA2__SAI5_RX_DATA_BIT2 0x0168 0x036C 0x05B4 0x02 0x00
+#define IMX95_PAD_SD3_DATA2__SAI5_TX_SYNC 0x0168 0x036C 0x05C4 0x03 0x00
+#define IMX95_PAD_SD3_DATA2__FLEXIO1_FLEXIO_BIT24 0x0168 0x036C 0x04C8 0x04 0x01
+#define IMX95_PAD_SD3_DATA2__GPIO3_IO_BIT24 0x0168 0x036C 0x0000 0x05 0x00
+#define IMX95_PAD_SD3_DATA2__XSPI_DATA_BIT2 0x0168 0x036C 0x05F4 0x06 0x00
+
+#define IMX95_PAD_SD3_DATA3__USDHC3_DATA3 0x016C 0x0370 0x05DC 0x00 0x01
+#define IMX95_PAD_SD3_DATA3__FLEXSPI1_A_DATA_BIT3 0x016C 0x0370 0x04E0 0x01 0x00
+#define IMX95_PAD_SD3_DATA3__SAI5_RX_DATA_BIT3 0x016C 0x0370 0x05B8 0x02 0x00
+#define IMX95_PAD_SD3_DATA3__SAI5_TX_BCLK 0x016C 0x0370 0x05C0 0x03 0x00
+#define IMX95_PAD_SD3_DATA3__FLEXIO1_FLEXIO_BIT25 0x016C 0x0370 0x04CC 0x04 0x01
+#define IMX95_PAD_SD3_DATA3__GPIO3_IO_BIT25 0x016C 0x0370 0x0000 0x05 0x00
+#define IMX95_PAD_SD3_DATA3__XSPI_DATA_BIT3 0x016C 0x0370 0x05F8 0x06 0x00
+
+#define IMX95_PAD_XSPI1_DATA0__FLEXSPI1_A_DATA_BIT0 0x0170 0x0374 0x04D4 0x00 0x01
+#define IMX95_PAD_XSPI1_DATA0__NETCMIX_TOP_SAI2_TX_DATA_BIT4 0x0170 0x0374 0x0000 0x01 0x00
+#define IMX95_PAD_XSPI1_DATA0__SAI4_TX_BCLK 0x0170 0x0374 0x05A0 0x02 0x01
+#define IMX95_PAD_XSPI1_DATA0__SAI4_RX_DATA_BIT1 0x0170 0x0374 0x0000 0x03 0x00
+#define IMX95_PAD_XSPI1_DATA0__XSPI_DATA_BIT0 0x0170 0x0374 0x05EC 0x04 0x01
+#define IMX95_PAD_XSPI1_DATA0__GPIO5_IO_BIT0 0x0170 0x0374 0x0000 0x05 0x00
+
+#define IMX95_PAD_XSPI1_DATA1__FLEXSPI1_A_DATA_BIT1 0x0174 0x0378 0x04D8 0x00 0x01
+#define IMX95_PAD_XSPI1_DATA1__NETCMIX_TOP_SAI2_TX_DATA_BIT5 0x0174 0x0378 0x0000 0x01 0x00
+#define IMX95_PAD_XSPI1_DATA1__SAI4_TX_SYNC 0x0174 0x0378 0x05A4 0x02 0x01
+#define IMX95_PAD_XSPI1_DATA1__SAI4_TX_DATA_BIT1 0x0174 0x0378 0x0000 0x03 0x00
+#define IMX95_PAD_XSPI1_DATA1__XSPI_DATA_BIT1 0x0174 0x0378 0x05F0 0x04 0x01
+#define IMX95_PAD_XSPI1_DATA1__GPIO5_IO_BIT1 0x0174 0x0378 0x0000 0x05 0x00
+
+#define IMX95_PAD_XSPI1_DATA2__FLEXSPI1_A_DATA_BIT2 0x0178 0x037C 0x04DC 0x00 0x01
+#define IMX95_PAD_XSPI1_DATA2__NETCMIX_TOP_SAI2_TX_DATA_BIT6 0x0178 0x037C 0x0000 0x01 0x00
+#define IMX95_PAD_XSPI1_DATA2__SAI4_TX_DATA_BIT0 0x0178 0x037C 0x0000 0x02 0x00
+#define IMX95_PAD_XSPI1_DATA2__XSPI_DATA_BIT2 0x0178 0x037C 0x05F4 0x04 0x01
+#define IMX95_PAD_XSPI1_DATA2__GPIO5_IO_BIT2 0x0178 0x037C 0x0000 0x05 0x00
+
+#define IMX95_PAD_XSPI1_DATA3__FLEXSPI1_A_DATA_BIT3 0x017C 0x0380 0x04E0 0x00 0x01
+#define IMX95_PAD_XSPI1_DATA3__NETCMIX_TOP_SAI2_TX_DATA_BIT7 0x017C 0x0380 0x0000 0x01 0x00
+#define IMX95_PAD_XSPI1_DATA3__SAI4_RX_DATA_BIT0 0x017C 0x0380 0x0598 0x02 0x01
+#define IMX95_PAD_XSPI1_DATA3__XSPI_DATA_BIT3 0x017C 0x0380 0x05F8 0x04 0x01
+#define IMX95_PAD_XSPI1_DATA3__GPIO5_IO_BIT3 0x017C 0x0380 0x0000 0x05 0x00
+
+#define IMX95_PAD_XSPI1_DATA4__FLEXSPI1_A_DATA_BIT4 0x0180 0x0384 0x04E4 0x00 0x01
+#define IMX95_PAD_XSPI1_DATA4__SAI5_TX_DATA_BIT0 0x0180 0x0384 0x0000 0x01 0x00
+#define IMX95_PAD_XSPI1_DATA4__SAI5_RX_DATA_BIT1 0x0180 0x0384 0x05B0 0x02 0x01
+#define IMX95_PAD_XSPI1_DATA4__XSPI_DATA_BIT4 0x0180 0x0384 0x05FC 0x04 0x01
+#define IMX95_PAD_XSPI1_DATA4__GPIO5_IO_BIT4 0x0180 0x0384 0x0000 0x05 0x00
+
+#define IMX95_PAD_XSPI1_DATA5__FLEXSPI1_A_DATA_BIT5 0x0184 0x0388 0x04E8 0x00 0x01
+#define IMX95_PAD_XSPI1_DATA5__SAI5_TX_SYNC 0x0184 0x0388 0x05C4 0x01 0x01
+#define IMX95_PAD_XSPI1_DATA5__SAI5_RX_DATA_BIT2 0x0184 0x0388 0x05B4 0x02 0x01
+#define IMX95_PAD_XSPI1_DATA5__NETCMIX_TOP_SAI2_RX_DATA_BIT6 0x0184 0x0388 0x043C 0x03 0x00
+#define IMX95_PAD_XSPI1_DATA5__XSPI_DATA_BIT5 0x0184 0x0388 0x0600 0x04 0x01
+#define IMX95_PAD_XSPI1_DATA5__GPIO5_IO_BIT5 0x0184 0x0388 0x0000 0x05 0x00
+
+#define IMX95_PAD_XSPI1_DATA6__FLEXSPI1_A_DATA_BIT6 0x0188 0x038C 0x04EC 0x00 0x01
+#define IMX95_PAD_XSPI1_DATA6__SAI5_TX_BCLK 0x0188 0x038C 0x05C0 0x01 0x01
+#define IMX95_PAD_XSPI1_DATA6__SAI5_RX_DATA_BIT3 0x0188 0x038C 0x05B8 0x02 0x01
+#define IMX95_PAD_XSPI1_DATA6__NETCMIX_TOP_SAI2_RX_DATA_BIT7 0x0188 0x038C 0x0440 0x03 0x00
+#define IMX95_PAD_XSPI1_DATA6__XSPI_DATA_BIT6 0x0188 0x038C 0x0604 0x04 0x01
+#define IMX95_PAD_XSPI1_DATA6__GPIO5_IO_BIT6 0x0188 0x038C 0x0000 0x05 0x00
+
+#define IMX95_PAD_XSPI1_DATA7__FLEXSPI1_A_DATA_BIT7 0x018C 0x0390 0x04F0 0x00 0x01
+#define IMX95_PAD_XSPI1_DATA7__SAI5_RX_DATA_BIT0 0x018C 0x0390 0x05AC 0x01 0x01
+#define IMX95_PAD_XSPI1_DATA7__SAI5_TX_DATA_BIT1 0x018C 0x0390 0x0000 0x02 0x00
+#define IMX95_PAD_XSPI1_DATA7__XSPI_DATA_BIT7 0x018C 0x0390 0x0608 0x04 0x01
+#define IMX95_PAD_XSPI1_DATA7__GPIO5_IO_BIT7 0x018C 0x0390 0x0000 0x05 0x00
+
+#define IMX95_PAD_XSPI1_DQS__FLEXSPI1_A_DQS 0x0190 0x0394 0x04D0 0x00 0x01
+#define IMX95_PAD_XSPI1_DQS__SAI5_RX_SYNC 0x0190 0x0394 0x05BC 0x01 0x01
+#define IMX95_PAD_XSPI1_DQS__SAI5_TX_DATA_BIT2 0x0190 0x0394 0x0000 0x02 0x00
+#define IMX95_PAD_XSPI1_DQS__NETCMIX_TOP_SAI2_RX_DATA_BIT6 0x0190 0x0394 0x043C 0x03 0x01
+#define IMX95_PAD_XSPI1_DQS__XSPI_DQS 0x0190 0x0394 0x05E4 0x04 0x01
+#define IMX95_PAD_XSPI1_DQS__GPIO5_IO_BIT8 0x0190 0x0394 0x0000 0x05 0x00
+
+#define IMX95_PAD_XSPI1_SCLK__FLEXSPI1_A_SCLK 0x0194 0x0398 0x04F4 0x00 0x01
+#define IMX95_PAD_XSPI1_SCLK__NETCMIX_TOP_SAI2_RX_DATA_BIT4 0x0194 0x0398 0x0000 0x01 0x00
+#define IMX95_PAD_XSPI1_SCLK__SAI4_RX_SYNC 0x0194 0x0398 0x059C 0x02 0x01
+#define IMX95_PAD_XSPI1_SCLK__EARC_DC_HPD_IN 0x0194 0x0398 0x0000 0x03 0x00
+#define IMX95_PAD_XSPI1_SCLK__XSPI_CLK 0x0194 0x0398 0x05E8 0x04 0x01
+#define IMX95_PAD_XSPI1_SCLK__GPIO5_IO_BIT9 0x0194 0x0398 0x0000 0x05 0x00
+
+#define IMX95_PAD_XSPI1_SS0_B__FLEXSPI1_A_SS0_B 0x0198 0x039C 0x0000 0x00 0x00
+#define IMX95_PAD_XSPI1_SS0_B__NETCMIX_TOP_SAI2_RX_DATA_BIT5 0x0198 0x039C 0x0000 0x01 0x00
+#define IMX95_PAD_XSPI1_SS0_B__SAI4_RX_BCLK 0x0198 0x039C 0x0594 0x02 0x01
+#define IMX95_PAD_XSPI1_SS0_B__EARC_CEC_OUT 0x0198 0x039C 0x0000 0x03 0x00
+#define IMX95_PAD_XSPI1_SS0_B__XSPI_CS 0x0198 0x039C 0x05E0 0x04 0x01
+#define IMX95_PAD_XSPI1_SS0_B__GPIO5_IO_BIT10 0x0198 0x039C 0x0000 0x05 0x00
+
+#define IMX95_PAD_XSPI1_SS1_B__FLEXSPI1_A_SS1_B 0x019C 0x03A0 0x0000 0x00 0x00
+#define IMX95_PAD_XSPI1_SS1_B__SAI5_RX_BCLK 0x019C 0x03A0 0x05A8 0x01 0x01
+#define IMX95_PAD_XSPI1_SS1_B__SAI5_TX_DATA_BIT3 0x019C 0x03A0 0x0000 0x02 0x00
+#define IMX95_PAD_XSPI1_SS1_B__NETCMIX_TOP_SAI2_RX_DATA_BIT7 0x019C 0x03A0 0x0440 0x03 0x01
+#define IMX95_PAD_XSPI1_SS1_B__GPIO5_IO_BIT11 0x019C 0x03A0 0x0000 0x05 0x00
+
+#define IMX95_PAD_SD2_CD_B__USDHC2_CD_B 0x01A0 0x03A4 0x0000 0x00 0x00
+#define IMX95_PAD_SD2_CD_B__NETCMIX_TOP_NETC_TMR_1588_TRIG1 0x01A0 0x03A4 0x0434 0x01 0x01
+#define IMX95_PAD_SD2_CD_B__I3C2_SCL 0x01A0 0x03A4 0x04F8 0x02 0x01
+#define IMX95_PAD_SD2_CD_B__FLEXIO1_FLEXIO_BIT0 0x01A0 0x03A4 0x0468 0x04 0x01
+#define IMX95_PAD_SD2_CD_B__GPIO3_IO_BIT0 0x01A0 0x03A4 0x0000 0x05 0x00
+
+#define IMX95_PAD_SD2_CLK__USDHC2_CLK 0x01A4 0x03A8 0x0000 0x00 0x00
+#define IMX95_PAD_SD2_CLK__NETCMIX_TOP_NETC_TMR_1588_PP1 0x01A4 0x03A8 0x0000 0x01 0x00
+#define IMX95_PAD_SD2_CLK__I3C2_SDA 0x01A4 0x03A8 0x04FC 0x02 0x01
+#define IMX95_PAD_SD2_CLK__FLEXIO1_FLEXIO_BIT1 0x01A4 0x03A8 0x046C 0x04 0x01
+#define IMX95_PAD_SD2_CLK__GPIO3_IO_BIT1 0x01A4 0x03A8 0x0000 0x05 0x00
+#define IMX95_PAD_SD2_CLK__CCMSRCGPCMIX_TOP_OBSERVE_0 0x01A4 0x03A8 0x0000 0x06 0x00
+
+#define IMX95_PAD_SD2_CMD__USDHC2_CMD 0x01A8 0x03AC 0x0000 0x00 0x00
+#define IMX95_PAD_SD2_CMD__NETCMIX_TOP_NETC_TMR_1588_TRIG2 0x01A8 0x03AC 0x0438 0x01 0x01
+#define IMX95_PAD_SD2_CMD__I3C2_PUR 0x01A8 0x03AC 0x0000 0x02 0x00
+#define IMX95_PAD_SD2_CMD__I3C2_PUR_B 0x01A8 0x03AC 0x0000 0x03 0x00
+#define IMX95_PAD_SD2_CMD__FLEXIO1_FLEXIO_BIT2 0x01A8 0x03AC 0x0470 0x04 0x01
+#define IMX95_PAD_SD2_CMD__GPIO3_IO_BIT2 0x01A8 0x03AC 0x0000 0x05 0x00
+#define IMX95_PAD_SD2_CMD__CCMSRCGPCMIX_TOP_OBSERVE_1 0x01A8 0x03AC 0x0000 0x06 0x00
+
+#define IMX95_PAD_SD2_DATA0__USDHC2_DATA0 0x01AC 0x03B0 0x0000 0x00 0x00
+#define IMX95_PAD_SD2_DATA0__NETCMIX_TOP_NETC_TMR_1588_PP2 0x01AC 0x03B0 0x0000 0x01 0x00
+#define IMX95_PAD_SD2_DATA0__CAN2_TX 0x01AC 0x03B0 0x0000 0x02 0x00
+#define IMX95_PAD_SD2_DATA0__FLEXIO1_FLEXIO_BIT3 0x01AC 0x03B0 0x0474 0x04 0x01
+#define IMX95_PAD_SD2_DATA0__GPIO3_IO_BIT3 0x01AC 0x03B0 0x0000 0x05 0x00
+#define IMX95_PAD_SD2_DATA0__CCMSRCGPCMIX_TOP_OBSERVE_2 0x01AC 0x03B0 0x0000 0x06 0x00
+
+#define IMX95_PAD_SD2_DATA1__USDHC2_DATA1 0x01B0 0x03B4 0x0000 0x00 0x00
+#define IMX95_PAD_SD2_DATA1__NETCMIX_TOP_NETC_TMR_1588_CLK 0x01B0 0x03B4 0x0000 0x01 0x00
+#define IMX95_PAD_SD2_DATA1__CAN2_RX 0x01B0 0x03B4 0x0444 0x02 0x03
+#define IMX95_PAD_SD2_DATA1__FLEXIO1_FLEXIO_BIT4 0x01B0 0x03B4 0x0478 0x04 0x01
+#define IMX95_PAD_SD2_DATA1__GPIO3_IO_BIT4 0x01B0 0x03B4 0x0000 0x05 0x00
+
+#define IMX95_PAD_SD2_DATA2__USDHC2_DATA2 0x01B4 0x03B8 0x0000 0x00 0x00
+#define IMX95_PAD_SD2_DATA2__NETCMIX_TOP_NETC_TMR_1588_PP3 0x01B4 0x03B8 0x0000 0x01 0x00
+#define IMX95_PAD_SD2_DATA2__NETCMIX_TOP_MQS2_RIGHT 0x01B4 0x03B8 0x0000 0x02 0x00
+#define IMX95_PAD_SD2_DATA2__FLEXIO1_FLEXIO_BIT5 0x01B4 0x03B8 0x047C 0x04 0x01
+#define IMX95_PAD_SD2_DATA2__GPIO3_IO_BIT5 0x01B4 0x03B8 0x0000 0x05 0x00
+
+#define IMX95_PAD_SD2_DATA3__USDHC2_DATA3 0x01B8 0x03BC 0x0000 0x00 0x00
+#define IMX95_PAD_SD2_DATA3__LPTMR2_ALT1 0x01B8 0x03BC 0x0548 0x01 0x01
+#define IMX95_PAD_SD2_DATA3__NETCMIX_TOP_MQS2_LEFT 0x01B8 0x03BC 0x0000 0x02 0x00
+#define IMX95_PAD_SD2_DATA3__NETCMIX_TOP_NETC_TMR_1588_ALARM1 0x01B8 0x03BC 0x0000 0x03 0x00
+#define IMX95_PAD_SD2_DATA3__FLEXIO1_FLEXIO_BIT6 0x01B8 0x03BC 0x0480 0x04 0x01
+#define IMX95_PAD_SD2_DATA3__GPIO3_IO_BIT6 0x01B8 0x03BC 0x0000 0x05 0x00
+
+#define IMX95_PAD_SD2_RESET_B__USDHC2_RESET_B 0x01BC 0x03C0 0x0000 0x00 0x00
+#define IMX95_PAD_SD2_RESET_B__LPTMR2_ALT2 0x01BC 0x03C0 0x054C 0x01 0x01
+#define IMX95_PAD_SD2_RESET_B__NETCMIX_TOP_NETC_TMR_1588_GCLK 0x01BC 0x03C0 0x0000 0x03 0x00
+#define IMX95_PAD_SD2_RESET_B__FLEXIO1_FLEXIO_BIT7 0x01BC 0x03C0 0x0484 0x04 0x01
+#define IMX95_PAD_SD2_RESET_B__GPIO3_IO_BIT7 0x01BC 0x03C0 0x0000 0x05 0x00
+
+#define IMX95_PAD_I2C1_SCL__AONMIX_TOP_LPI2C1_SCL 0x01C0 0x03C4 0x0000 0x00 0x00
+#define IMX95_PAD_I2C1_SCL__AONMIX_TOP_I3C1_SCL 0x01C0 0x03C4 0x0000 0x01 0x00
+#define IMX95_PAD_I2C1_SCL__AONMIX_TOP_LPUART1_DCD_B 0x01C0 0x03C4 0x0000 0x02 0x00
+#define IMX95_PAD_I2C1_SCL__AONMIX_TOP_TPM2_CH0 0x01C0 0x03C4 0x0000 0x03 0x00
+#define IMX95_PAD_I2C1_SCL__VPUMIX_TOP_UART_RX 0x01C0 0x03C4 0x0000 0x04 0x00
+#define IMX95_PAD_I2C1_SCL__AONMIX_TOP_GPIO1_IO_BIT0 0x01C0 0x03C4 0x0000 0x05 0x00
+
+#define IMX95_PAD_I2C1_SDA__AONMIX_TOP_LPI2C1_SDA 0x01C4 0x03C8 0x0000 0x00 0x00
+#define IMX95_PAD_I2C1_SDA__AONMIX_TOP_I3C1_SDA 0x01C4 0x03C8 0x0000 0x01 0x00
+#define IMX95_PAD_I2C1_SDA__AONMIX_TOP_LPUART1_RIN_B 0x01C4 0x03C8 0x0000 0x02 0x00
+#define IMX95_PAD_I2C1_SDA__AONMIX_TOP_TPM2_CH1 0x01C4 0x03C8 0x0000 0x03 0x00
+#define IMX95_PAD_I2C1_SDA__VPUMIX_TOP_UART_TX 0x01C4 0x03C8 0x0000 0x04 0x00
+#define IMX95_PAD_I2C1_SDA__AONMIX_TOP_GPIO1_IO_BIT1 0x01C4 0x03C8 0x0000 0x05 0x00
+
+#define IMX95_PAD_I2C2_SCL__AONMIX_TOP_LPI2C2_SCL 0x01C8 0x03CC 0x0000 0x00 0x00
+#define IMX95_PAD_I2C2_SCL__AONMIX_TOP_I3C1_PUR 0x01C8 0x03CC 0x0000 0x01 0x00
+#define IMX95_PAD_I2C2_SCL__AONMIX_TOP_LPUART2_DCD_B 0x01C8 0x03CC 0x0000 0x02 0x00
+#define IMX95_PAD_I2C2_SCL__AONMIX_TOP_TPM2_CH2 0x01C8 0x03CC 0x0000 0x03 0x00
+#define IMX95_PAD_I2C2_SCL__AONMIX_TOP_SAI1_RX_SYNC 0x01C8 0x03CC 0x0000 0x04 0x00
+#define IMX95_PAD_I2C2_SCL__AONMIX_TOP_GPIO1_IO_BIT2 0x01C8 0x03CC 0x0000 0x05 0x00
+#define IMX95_PAD_I2C2_SCL__AONMIX_TOP_I3C1_PUR_B 0x01C8 0x03CC 0x0000 0x06 0x00
+
+#define IMX95_PAD_I2C2_SDA__AONMIX_TOP_LPI2C2_SDA 0x01CC 0x03D0 0x0000 0x00 0x00
+#define IMX95_PAD_I2C2_SDA__AONMIX_TOP_LPUART2_RIN_B 0x01CC 0x03D0 0x0000 0x02 0x00
+#define IMX95_PAD_I2C2_SDA__AONMIX_TOP_TPM2_CH3 0x01CC 0x03D0 0x0000 0x03 0x00
+#define IMX95_PAD_I2C2_SDA__AONMIX_TOP_SAI1_RX_BCLK 0x01CC 0x03D0 0x0000 0x04 0x00
+#define IMX95_PAD_I2C2_SDA__AONMIX_TOP_GPIO1_IO_BIT3 0x01CC 0x03D0 0x0000 0x05 0x00
+
+#define IMX95_PAD_UART1_RXD__AONMIX_TOP_LPUART1_RX 0x01D0 0x03D4 0x0000 0x00 0x00
+#define IMX95_PAD_UART1_RXD__S400_UART_RX 0x01D0 0x03D4 0x0000 0x01 0x00
+#define IMX95_PAD_UART1_RXD__AONMIX_TOP_LPSPI2_SIN 0x01D0 0x03D4 0x0000 0x02 0x00
+#define IMX95_PAD_UART1_RXD__AONMIX_TOP_TPM1_CH0 0x01D0 0x03D4 0x0000 0x03 0x00
+#define IMX95_PAD_UART1_RXD__AONMIX_TOP_GPIO1_IO_BIT4 0x01D0 0x03D4 0x0000 0x05 0x00
+
+#define IMX95_PAD_UART1_TXD__AONMIX_TOP_LPUART1_TX 0x01D4 0x03D8 0x0000 0x00 0x00
+#define IMX95_PAD_UART1_TXD__S400_UART_TX 0x01D4 0x03D8 0x0000 0x01 0x00
+#define IMX95_PAD_UART1_TXD__AONMIX_TOP_LPSPI2_PCS0 0x01D4 0x03D8 0x0000 0x02 0x00
+#define IMX95_PAD_UART1_TXD__AONMIX_TOP_TPM1_CH1 0x01D4 0x03D8 0x0000 0x03 0x00
+#define IMX95_PAD_UART1_TXD__AONMIX_TOP_GPIO1_IO_BIT5 0x01D4 0x03D8 0x0000 0x05 0x00
+
+#define IMX95_PAD_UART2_RXD__AONMIX_TOP_LPUART2_RX 0x01D8 0x03DC 0x0000 0x00 0x00
+#define IMX95_PAD_UART2_RXD__AONMIX_TOP_LPUART1_CTS_B 0x01D8 0x03DC 0x0000 0x01 0x00
+#define IMX95_PAD_UART2_RXD__AONMIX_TOP_LPSPI2_SOUT 0x01D8 0x03DC 0x0000 0x02 0x00
+#define IMX95_PAD_UART2_RXD__AONMIX_TOP_TPM1_CH2 0x01D8 0x03DC 0x0000 0x03 0x00
+#define IMX95_PAD_UART2_RXD__AONMIX_TOP_SAI1_MCLK 0x01D8 0x03DC 0x041C 0x04 0x00
+#define IMX95_PAD_UART2_RXD__AONMIX_TOP_GPIO1_IO_BIT6 0x01D8 0x03DC 0x0000 0x05 0x00
+
+#define IMX95_PAD_UART2_TXD__AONMIX_TOP_LPUART2_TX 0x01DC 0x03E0 0x0000 0x00 0x00
+#define IMX95_PAD_UART2_TXD__AONMIX_TOP_LPUART1_RTS_B 0x01DC 0x03E0 0x0000 0x01 0x00
+#define IMX95_PAD_UART2_TXD__AONMIX_TOP_LPSPI2_SCK 0x01DC 0x03E0 0x0000 0x02 0x00
+#define IMX95_PAD_UART2_TXD__AONMIX_TOP_TPM1_CH3 0x01DC 0x03E0 0x0000 0x03 0x00
+#define IMX95_PAD_UART2_TXD__AONMIX_TOP_GPIO1_IO_BIT7 0x01DC 0x03E0 0x0000 0x05 0x00
+
+#define IMX95_PAD_PDM_CLK__AONMIX_TOP_PDM_CLK 0x01E0 0x03E4 0x0000 0x00 0x00
+#define IMX95_PAD_PDM_CLK__AONMIX_TOP_MQS1_LEFT 0x01E0 0x03E4 0x0000 0x01 0x00
+#define IMX95_PAD_PDM_CLK__AONMIX_TOP_LPTMR1_ALT1 0x01E0 0x03E4 0x0000 0x04 0x00
+#define IMX95_PAD_PDM_CLK__AONMIX_TOP_GPIO1_IO_BIT8 0x01E0 0x03E4 0x0000 0x05 0x00
+#define IMX95_PAD_PDM_CLK__AONMIX_TOP_CAN1_TX 0x01E0 0x03E4 0x0000 0x06 0x00
+
+#define IMX95_PAD_PDM_BIT_STREAM0__AONMIX_TOP_PDM_BIT_STREAM_BIT0 0x01E4 0x03E8 0x040C 0x00 0x00
+#define IMX95_PAD_PDM_BIT_STREAM0__AONMIX_TOP_MQS1_RIGHT 0x01E4 0x03E8 0x0000 0x01 0x00
+#define IMX95_PAD_PDM_BIT_STREAM0__AONMIX_TOP_LPSPI1_PCS1 0x01E4 0x03E8 0x0000 0x02 0x00
+#define IMX95_PAD_PDM_BIT_STREAM0__AONMIX_TOP_TPM1_EXTCLK 0x01E4 0x03E8 0x0000 0x03 0x00
+#define IMX95_PAD_PDM_BIT_STREAM0__AONMIX_TOP_LPTMR1_ALT2 0x01E4 0x03E8 0x0000 0x04 0x00
+#define IMX95_PAD_PDM_BIT_STREAM0__AONMIX_TOP_GPIO1_IO_BIT9 0x01E4 0x03E8 0x0000 0x05 0x00
+#define IMX95_PAD_PDM_BIT_STREAM0__AONMIX_TOP_CAN1_RX 0x01E4 0x03E8 0x0408 0x06 0x00
+
+#define IMX95_PAD_PDM_BIT_STREAM1__AONMIX_TOP_PDM_BIT_STREAM_BIT1 0x01E8 0x03EC 0x0410 0x00 0x00
+#define IMX95_PAD_PDM_BIT_STREAM1__NMI_GLUE_NMI 0x01E8 0x03EC 0x0000 0x01 0x00
+#define IMX95_PAD_PDM_BIT_STREAM1__AONMIX_TOP_LPSPI2_PCS1 0x01E8 0x03EC 0x0000 0x02 0x00
+#define IMX95_PAD_PDM_BIT_STREAM1__AONMIX_TOP_TPM2_EXTCLK 0x01E8 0x03EC 0x0000 0x03 0x00
+#define IMX95_PAD_PDM_BIT_STREAM1__AONMIX_TOP_LPTMR1_ALT3 0x01E8 0x03EC 0x0000 0x04 0x00
+#define IMX95_PAD_PDM_BIT_STREAM1__AONMIX_TOP_GPIO1_IO_BIT10 0x01E8 0x03EC 0x0000 0x05 0x00
+#define IMX95_PAD_PDM_BIT_STREAM1__CCMSRCGPCMIX_TOP_EXT_CLK1 0x01E8 0x03EC 0x0420 0x06 0x00
+
+#define IMX95_PAD_SAI1_TXFS__AONMIX_TOP_SAI1_TX_SYNC 0x01EC 0x03F0 0x0000 0x00 0x00
+#define IMX95_PAD_SAI1_TXFS__AONMIX_TOP_SAI1_TX_DATA_BIT1 0x01EC 0x03F0 0x0000 0x01 0x00
+#define IMX95_PAD_SAI1_TXFS__AONMIX_TOP_LPSPI1_PCS0 0x01EC 0x03F0 0x0000 0x02 0x00
+#define IMX95_PAD_SAI1_TXFS__AONMIX_TOP_LPUART2_DTR_B 0x01EC 0x03F0 0x0000 0x03 0x00
+#define IMX95_PAD_SAI1_TXFS__AONMIX_TOP_MQS1_LEFT 0x01EC 0x03F0 0x0000 0x04 0x00
+#define IMX95_PAD_SAI1_TXFS__AONMIX_TOP_GPIO1_IO_BIT11 0x01EC 0x03F0 0x0000 0x05 0x00
+
+#define IMX95_PAD_SAI1_TXC__AONMIX_TOP_SAI1_TX_BCLK 0x01F0 0x03F4 0x0000 0x00 0x00
+#define IMX95_PAD_SAI1_TXC__AONMIX_TOP_LPUART2_CTS_B 0x01F0 0x03F4 0x0000 0x01 0x00
+#define IMX95_PAD_SAI1_TXC__AONMIX_TOP_LPSPI1_SIN 0x01F0 0x03F4 0x0000 0x02 0x00
+#define IMX95_PAD_SAI1_TXC__AONMIX_TOP_LPUART1_DSR_B 0x01F0 0x03F4 0x0000 0x03 0x00
+#define IMX95_PAD_SAI1_TXC__AONMIX_TOP_CAN1_RX 0x01F0 0x03F4 0x0408 0x04 0x01
+#define IMX95_PAD_SAI1_TXC__AONMIX_TOP_GPIO1_IO_BIT12 0x01F0 0x03F4 0x0000 0x05 0x00
+
+#define IMX95_PAD_SAI1_TXD0__AONMIX_TOP_SAI1_TX_DATA_BIT0 0x01F4 0x03F8 0x0000 0x00 0x00
+#define IMX95_PAD_SAI1_TXD0__AONMIX_TOP_LPUART2_RTS_B 0x01F4 0x03F8 0x0000 0x01 0x00
+#define IMX95_PAD_SAI1_TXD0__AONMIX_TOP_LPSPI1_SCK 0x01F4 0x03F8 0x0000 0x02 0x00
+#define IMX95_PAD_SAI1_TXD0__AONMIX_TOP_LPUART1_DTR_B 0x01F4 0x03F8 0x0000 0x03 0x00
+#define IMX95_PAD_SAI1_TXD0__AONMIX_TOP_CAN1_TX 0x01F4 0x03F8 0x0000 0x04 0x00
+#define IMX95_PAD_SAI1_TXD0__AONMIX_TOP_GPIO1_IO_BIT13 0x01F4 0x03F8 0x0000 0x05 0x00
+
+#define IMX95_PAD_SAI1_RXD0__AONMIX_TOP_SAI1_RX_DATA_BIT0 0x01F8 0x03FC 0x0000 0x00 0x00
+#define IMX95_PAD_SAI1_RXD0__AONMIX_TOP_SAI1_MCLK 0x01F8 0x03FC 0x041C 0x01 0x01
+#define IMX95_PAD_SAI1_RXD0__AONMIX_TOP_LPSPI1_SOUT 0x01F8 0x03FC 0x0000 0x02 0x00
+#define IMX95_PAD_SAI1_RXD0__AONMIX_TOP_LPUART2_DSR_B 0x01F8 0x03FC 0x0000 0x03 0x00
+#define IMX95_PAD_SAI1_RXD0__AONMIX_TOP_MQS1_RIGHT 0x01F8 0x03FC 0x0000 0x04 0x00
+#define IMX95_PAD_SAI1_RXD0__AONMIX_TOP_GPIO1_IO_BIT14 0x01F8 0x03FC 0x0000 0x05 0x00
+
+#define IMX95_PAD_WDOG_ANY__AONMIX_TOP_WDOG_ANY 0x01FC 0x0400 0x0000 0x00 0x00
+#define IMX95_PAD_WDOG_ANY__AONMIX_TOP_FCCU_EOUT1 0x01FC 0x0400 0x0000 0x01 0x00
+#define IMX95_PAD_WDOG_ANY__AONMIX_TOP_GPIO1_IO_BIT15 0x01FC 0x0400 0x0000 0x05 0x00
+#endif /* __DTS_IMX95_PINFUNC_H */
diff --git a/arch/arm64/boot/dts/freescale/imx95-power.h b/arch/arm64/boot/dts/freescale/imx95-power.h
new file mode 100644
index 000000000000..0b7f0bc30e19
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx95-power.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */
+/*
+ * Copyright 2024 NXP
+ */
+
+#ifndef __IMX95_POWER_H__
+#define __IMX95_POWER_H__
+
+#define IMX95_PD_ANA 0
+#define IMX95_PD_AON 1
+#define IMX95_PD_BBSM 2
+#define IMX95_PD_CAMERA 3
+#define IMX95_PD_CCMSRCGPC 4
+#define IMX95_PD_A55C0 5
+#define IMX95_PD_A55C1 6
+#define IMX95_PD_A55C2 7
+#define IMX95_PD_A55C3 8
+#define IMX95_PD_A55C4 9
+#define IMX95_PD_A55C5 10
+#define IMX95_PD_A55P 11
+#define IMX95_PD_DDR 12
+#define IMX95_PD_DISPLAY 13
+#define IMX95_PD_GPU 14
+#define IMX95_PD_HSIO_TOP 15
+#define IMX95_PD_HSIO_WAON 16
+#define IMX95_PD_M7 17
+#define IMX95_PD_NETC 18
+#define IMX95_PD_NOC 19
+#define IMX95_PD_NPU 20
+#define IMX95_PD_VPU 21
+#define IMX95_PD_WAKEUP 22
+
+#define IMX95_PERF_ELE 0
+#define IMX95_PERF_M33 1
+#define IMX95_PERF_WAKEUP 2
+#define IMX95_PERF_M7 3
+#define IMX95_PERF_DRAM 4
+#define IMX95_PERF_HSIO 5
+#define IMX95_PERF_NPU 6
+#define IMX95_PERF_NOC 7
+#define IMX95_PERF_A55 8
+#define IMX95_PERF_GPU 9
+#define IMX95_PERF_VPU 10
+#define IMX95_PERF_CAM 11
+#define IMX95_PERF_DISP 12
+
+#endif
diff --git a/arch/arm64/boot/dts/freescale/imx95.dtsi b/arch/arm64/boot/dts/freescale/imx95.dtsi
new file mode 100644
index 000000000000..1bbf9a0468f6
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx95.dtsi
@@ -0,0 +1,1192 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR MIT)
+/*
+ * Copyright 2024 NXP
+ */
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/thermal/thermal.h>
+
+#include "imx95-clock.h"
+#include "imx95-pinfunc.h"
+#include "imx95-power.h"
+
+/ {
+ interrupt-parent = <&gic>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ A55_0: cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a55";
+ reg = <0x0>;
+ enable-method = "psci";
+ #cooling-cells = <2>;
+ power-domains = <&scmi_devpd IMX95_PERF_A55>;
+ power-domain-names = "perf";
+ i-cache-size = <32768>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <128>;
+ d-cache-size = <32768>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <128>;
+ next-level-cache = <&l2_cache_l0>;
+ };
+
+ A55_1: cpu@100 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a55";
+ reg = <0x100>;
+ enable-method = "psci";
+ #cooling-cells = <2>;
+ power-domains = <&scmi_devpd IMX95_PERF_A55>;
+ power-domain-names = "perf";
+ i-cache-size = <32768>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <128>;
+ d-cache-size = <32768>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <128>;
+ next-level-cache = <&l2_cache_l1>;
+ };
+
+ A55_2: cpu@200 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a55";
+ reg = <0x200>;
+ enable-method = "psci";
+ #cooling-cells = <2>;
+ power-domains = <&scmi_devpd IMX95_PERF_A55>;
+ power-domain-names = "perf";
+ i-cache-size = <32768>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <128>;
+ d-cache-size = <32768>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <128>;
+ next-level-cache = <&l2_cache_l2>;
+ };
+
+ A55_3: cpu@300 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a55";
+ reg = <0x300>;
+ enable-method = "psci";
+ #cooling-cells = <2>;
+ power-domains = <&scmi_devpd IMX95_PERF_A55>;
+ power-domain-names = "perf";
+ i-cache-size = <32768>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <128>;
+ d-cache-size = <32768>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <128>;
+ next-level-cache = <&l2_cache_l3>;
+ };
+
+ A55_4: cpu@400 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a55";
+ reg = <0x400>;
+ power-domains = <&scmi_devpd IMX95_PERF_A55>;
+ power-domain-names = "perf";
+ enable-method = "psci";
+ #cooling-cells = <2>;
+ i-cache-size = <32768>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <128>;
+ d-cache-size = <32768>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <128>;
+ next-level-cache = <&l2_cache_l4>;
+ };
+
+ A55_5: cpu@500 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a55";
+ reg = <0x500>;
+ power-domains = <&scmi_devpd IMX95_PERF_A55>;
+ power-domain-names = "perf";
+ enable-method = "psci";
+ #cooling-cells = <2>;
+ i-cache-size = <32768>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <128>;
+ d-cache-size = <32768>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <128>;
+ next-level-cache = <&l2_cache_l5>;
+ };
+
+ l2_cache_l0: l2-cache-l0 {
+ compatible = "cache";
+ cache-size = <65536>;
+ cache-line-size = <64>;
+ cache-sets = <256>;
+ cache-level = <2>;
+ cache-unified;
+ next-level-cache = <&l3_cache>;
+ };
+
+ l2_cache_l1: l2-cache-l1 {
+ compatible = "cache";
+ cache-size = <65536>;
+ cache-line-size = <64>;
+ cache-sets = <256>;
+ cache-level = <2>;
+ cache-unified;
+ next-level-cache = <&l3_cache>;
+ };
+
+ l2_cache_l2: l2-cache-l2 {
+ compatible = "cache";
+ cache-size = <65536>;
+ cache-line-size = <64>;
+ cache-sets = <256>;
+ cache-level = <2>;
+ cache-unified;
+ next-level-cache = <&l3_cache>;
+ };
+
+ l2_cache_l3: l2-cache-l3 {
+ compatible = "cache";
+ cache-size = <65536>;
+ cache-line-size = <64>;
+ cache-sets = <256>;
+ cache-level = <2>;
+ cache-unified;
+ next-level-cache = <&l3_cache>;
+ };
+
+ l2_cache_l4: l2-cache-l4 {
+ compatible = "cache";
+ cache-size = <65536>;
+ cache-line-size = <64>;
+ cache-sets = <256>;
+ cache-level = <2>;
+ cache-unified;
+ next-level-cache = <&l3_cache>;
+ };
+
+ l2_cache_l5: l2-cache-l5 {
+ compatible = "cache";
+ cache-size = <65536>;
+ cache-line-size = <64>;
+ cache-sets = <256>;
+ cache-level = <2>;
+ cache-unified;
+ next-level-cache = <&l3_cache>;
+ };
+
+ l3_cache: l3-cache {
+ compatible = "cache";
+ cache-size = <524288>;
+ cache-line-size = <64>;
+ cache-sets = <1024>;
+ cache-level = <3>;
+ cache-unified;
+ };
+
+ cpu-map {
+ cluster0 {
+ core0 {
+ cpu = <&A55_0>;
+ };
+
+ core1 {
+ cpu = <&A55_1>;
+ };
+
+ core2 {
+ cpu = <&A55_2>;
+ };
+
+ core3 {
+ cpu = <&A55_3>;
+ };
+
+ core4 {
+ cpu = <&A55_4>;
+ };
+
+ core5 {
+ cpu = <&A55_5>;
+ };
+ };
+ };
+ };
+
+ clk_ext1: clock-ext1 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <133000000>;
+ clock-output-names = "clk_ext1";
+ };
+
+ sai1_mclk: clock-sai-mclk1 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency= <0>;
+ clock-output-names = "sai1_mclk";
+ };
+
+ sai2_mclk: clock-sai-mclk2 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency= <0>;
+ clock-output-names = "sai2_mclk";
+ };
+
+ sai3_mclk: clock-sai-mclk3 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency= <0>;
+ clock-output-names = "sai3_mclk";
+ };
+
+ sai4_mclk: clock-sai-mclk4 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency= <0>;
+ clock-output-names = "sai4_mclk";
+ };
+
+ sai5_mclk: clock-sai-mclk5 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency= <0>;
+ clock-output-names = "sai5_mclk";
+ };
+
+ osc_24m: clock-24m {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <24000000>;
+ clock-output-names = "osc_24m";
+ };
+
+ sram1: sram@204c0000 {
+ compatible = "mmio-sram";
+ reg = <0x0 0x204c0000 0x0 0x18000>;
+ ranges = <0x0 0x0 0x204c0000 0x18000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ };
+
+ firmware {
+ scmi {
+ compatible = "arm,scmi";
+ mboxes = <&mu2 5 0>, <&mu2 3 0>, <&mu2 3 1>;
+ shmem = <&scmi_buf0>, <&scmi_buf1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ scmi_devpd: protocol@11 {
+ reg = <0x11>;
+ #power-domain-cells = <1>;
+ };
+
+ scmi_perf: protocol@13 {
+ reg = <0x13>;
+ #power-domain-cells = <1>;
+ };
+
+ scmi_clk: protocol@14 {
+ reg = <0x14>;
+ #clock-cells = <1>;
+ };
+
+ scmi_sensor: protocol@15 {
+ reg = <0x15>;
+ #thermal-sensor-cells = <1>;
+ };
+
+ scmi_iomuxc: protocol@19 {
+ reg = <0x19>;
+ };
+
+ };
+ };
+
+ pmu {
+ compatible = "arm,cortex-a55-pmu";
+ interrupts = <GIC_PPI 7 (GIC_CPU_MASK_SIMPLE(6) | IRQ_TYPE_LEVEL_HIGH)>;
+ };
+
+ thermal-zones {
+ a55-thermal {
+ polling-delay-passive = <250>;
+ polling-delay = <2000>;
+ thermal-sensors = <&scmi_sensor 1>;
+
+ trips {
+ cpu_alert0: trip0 {
+ temperature = <85000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu_crit0: trip1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&cpu_alert0>;
+ cooling-device =
+ <&A55_0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&A55_1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&A55_2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&A55_3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&A55_4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&A55_5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+ };
+ };
+
+ psci {
+ compatible = "arm,psci-1.0";
+ method = "smc";
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(6) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(6) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(6) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(6) | IRQ_TYPE_LEVEL_LOW)>;
+ clock-frequency = <24000000>;
+ arm,no-tick-in-suspend;
+ interrupt-parent = <&gic>;
+ };
+
+ gic: interrupt-controller@48000000 {
+ compatible = "arm,gic-v3";
+ reg = <0 0x48000000 0 0x10000>,
+ <0 0x48060000 0 0xc0000>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ #interrupt-cells = <3>;
+ interrupt-controller;
+ interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&gic>;
+ dma-noncoherent;
+ ranges;
+
+ its: msi-controller@48040000 {
+ compatible = "arm,gic-v3-its";
+ reg = <0 0x48040000 0 0x20000>;
+ msi-controller;
+ #msi-cells = <1>;
+ dma-noncoherent;
+ };
+ };
+
+ soc {
+ compatible = "simple-bus";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ aips2: bus@42000000 {
+ compatible = "fsl,aips-bus", "simple-bus";
+ reg = <0x0 0x42000000 0x0 0x800000>;
+ ranges = <0x42000000 0x0 0x42000000 0x8000000>,
+ <0x28000000 0x0 0x28000000 0x10000000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ mu7: mailbox@42430000 {
+ compatible = "fsl,imx95-mu";
+ reg = <0x42430000 0x10000>;
+ interrupts = <GIC_SPI 234 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk IMX95_CLK_BUSWAKEUP>;
+ #mbox-cells = <2>;
+ status = "disabled";
+ };
+
+ wdog3: watchdog@42490000 {
+ compatible = "fsl,imx93-wdt";
+ reg = <0x42490000 0x10000>;
+ interrupts = <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk IMX95_CLK_BUSWAKEUP>;
+ timeout-sec = <40>;
+ status = "disabled";
+ };
+
+ tpm3: pwm@424e0000 {
+ compatible = "fsl,imx7ulp-pwm";
+ reg = <0x424e0000 0x1000>;
+ clocks = <&scmi_clk IMX95_CLK_BUSWAKEUP>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ tpm4: pwm@424f0000 {
+ compatible = "fsl,imx7ulp-pwm";
+ reg = <0x424f0000 0x1000>;
+ clocks = <&scmi_clk IMX95_CLK_TPM4>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ tpm5: pwm@42500000 {
+ compatible = "fsl,imx7ulp-pwm";
+ reg = <0x42500000 0x1000>;
+ clocks = <&scmi_clk IMX95_CLK_TPM5>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ tpm6: pwm@42510000 {
+ compatible = "fsl,imx7ulp-pwm";
+ reg = <0x42510000 0x1000>;
+ clocks = <&scmi_clk IMX95_CLK_TPM6>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ lpi2c3: i2c@42530000 {
+ compatible = "fsl,imx95-lpi2c", "fsl,imx7ulp-lpi2c";
+ reg = <0x42530000 0x10000>;
+ interrupts = <GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk IMX95_CLK_LPI2C3>,
+ <&scmi_clk IMX95_CLK_BUSWAKEUP>;
+ clock-names = "per", "ipg";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ lpi2c4: i2c@42540000 {
+ compatible = "fsl,imx95-lpi2c", "fsl,imx7ulp-lpi2c";
+ reg = <0x42540000 0x10000>;
+ interrupts = <GIC_SPI 59 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk IMX95_CLK_LPI2C4>,
+ <&scmi_clk IMX95_CLK_BUSWAKEUP>;
+ clock-names = "per", "ipg";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ lpspi3: spi@42550000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,imx95-spi", "fsl,imx7ulp-spi";
+ reg = <0x42550000 0x10000>;
+ interrupts = <GIC_SPI 61 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk IMX95_CLK_LPSPI3>,
+ <&scmi_clk IMX95_CLK_BUSWAKEUP>;
+ clock-names = "per", "ipg";
+ status = "disabled";
+ };
+
+ lpspi4: spi@42560000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,imx95-spi", "fsl,imx7ulp-spi";
+ reg = <0x42560000 0x10000>;
+ interrupts = <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk IMX95_CLK_LPSPI4>,
+ <&scmi_clk IMX95_CLK_BUSWAKEUP>;
+ clock-names = "per", "ipg";
+ status = "disabled";
+ };
+
+ lpuart3: serial@42570000 {
+ compatible = "fsl,imx95-lpuart", "fsl,imx8ulp-lpuart",
+ "fsl,imx7ulp-lpuart";
+ reg = <0x42570000 0x1000>;
+ interrupts = <GIC_SPI 64 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk IMX95_CLK_LPUART3>;
+ clock-names = "ipg";
+ status = "disabled";
+ };
+
+ lpuart4: serial@42580000 {
+ compatible = "fsl,imx95-lpuart", "fsl,imx8ulp-lpuart",
+ "fsl,imx7ulp-lpuart";
+ reg = <0x42580000 0x1000>;
+ interrupts = <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk IMX95_CLK_LPUART4>;
+ clock-names = "ipg";
+ status = "disabled";
+ };
+
+ lpuart5: serial@42590000 {
+ compatible = "fsl,imx95-lpuart", "fsl,imx8ulp-lpuart",
+ "fsl,imx7ulp-lpuart";
+ reg = <0x42590000 0x1000>;
+ interrupts = <GIC_SPI 66 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk IMX95_CLK_LPUART5>;
+ clock-names = "ipg";
+ status = "disabled";
+ };
+
+ lpuart6: serial@425a0000 {
+ compatible = "fsl,imx95-lpuart", "fsl,imx8ulp-lpuart",
+ "fsl,imx7ulp-lpuart";
+ reg = <0x425a0000 0x1000>;
+ interrupts = <GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk IMX95_CLK_LPUART6>;
+ clock-names = "ipg";
+ status = "disabled";
+ };
+
+ lpuart7: serial@42690000 {
+ compatible = "fsl,imx95-lpuart", "fsl,imx8ulp-lpuart",
+ "fsl,imx7ulp-lpuart";
+ reg = <0x42690000 0x1000>;
+ interrupts = <GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk IMX95_CLK_LPUART7>;
+ clock-names = "ipg";
+ status = "disabled";
+ };
+
+ lpuart8: serial@426a0000 {
+ compatible = "fsl,imx95-lpuart", "fsl,imx8ulp-lpuart",
+ "fsl,imx7ulp-lpuart";
+ reg = <0x426a0000 0x1000>;
+ interrupts = <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk IMX95_CLK_LPUART8>;
+ clock-names = "ipg";
+ status = "disabled";
+ };
+
+ lpi2c5: i2c@426b0000 {
+ compatible = "fsl,imx95-lpi2c", "fsl,imx7ulp-lpi2c";
+ reg = <0x426b0000 0x10000>;
+ interrupts = <GIC_SPI 181 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk IMX95_CLK_LPI2C5>,
+ <&scmi_clk IMX95_CLK_BUSWAKEUP>;
+ clock-names = "per", "ipg";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ lpi2c6: i2c@426c0000 {
+ compatible = "fsl,imx95-lpi2c", "fsl,imx7ulp-lpi2c";
+ reg = <0x426c0000 0x10000>;
+ interrupts = <GIC_SPI 182 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk IMX95_CLK_LPI2C6>,
+ <&scmi_clk IMX95_CLK_BUSWAKEUP>;
+ clock-names = "per", "ipg";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ lpi2c7: i2c@426d0000 {
+ compatible = "fsl,imx95-lpi2c", "fsl,imx7ulp-lpi2c";
+ reg = <0x426d0000 0x10000>;
+ interrupts = <GIC_SPI 183 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk IMX95_CLK_LPI2C7>,
+ <&scmi_clk IMX95_CLK_BUSWAKEUP>;
+ clock-names = "per", "ipg";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ lpi2c8: i2c@426e0000 {
+ compatible = "fsl,imx95-lpi2c", "fsl,imx7ulp-lpi2c";
+ reg = <0x426e0000 0x10000>;
+ interrupts = <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk IMX95_CLK_LPI2C8>,
+ <&scmi_clk IMX95_CLK_BUSWAKEUP>;
+ clock-names = "per", "ipg";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ lpspi5: spi@426f0000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,imx95-spi", "fsl,imx7ulp-spi";
+ reg = <0x426f0000 0x10000>;
+ interrupts = <GIC_SPI 177 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk IMX95_CLK_LPSPI5>,
+ <&scmi_clk IMX95_CLK_BUSWAKEUP>;
+ clock-names = "per", "ipg";
+ status = "disabled";
+ };
+
+ lpspi6: spi@42700000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,imx95-spi", "fsl,imx7ulp-spi";
+ reg = <0x42700000 0x10000>;
+ interrupts = <GIC_SPI 178 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk IMX95_CLK_LPSPI6>,
+ <&scmi_clk IMX95_CLK_BUSWAKEUP>;
+ clock-names = "per", "ipg";
+ status = "disabled";
+ };
+
+ lpspi7: spi@42710000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,imx95-spi", "fsl,imx7ulp-spi";
+ reg = <0x42710000 0x10000>;
+ interrupts = <GIC_SPI 179 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk IMX95_CLK_LPSPI7>,
+ <&scmi_clk IMX95_CLK_BUSWAKEUP>;
+ clock-names = "per", "ipg";
+ status = "disabled";
+ };
+
+ lpspi8: spi@42720000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,imx95-spi", "fsl,imx7ulp-spi";
+ reg = <0x42720000 0x10000>;
+ interrupts = <GIC_SPI 180 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk IMX95_CLK_LPSPI8>,
+ <&scmi_clk IMX95_CLK_BUSWAKEUP>;
+ clock-names = "per", "ipg";
+ status = "disabled";
+ };
+
+ mu8: mailbox@42730000 {
+ compatible = "fsl,imx95-mu";
+ reg = <0x42730000 0x10000>;
+ interrupts = <GIC_SPI 235 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk IMX95_CLK_BUSWAKEUP>;
+ #mbox-cells = <2>;
+ status = "disabled";
+ };
+ };
+
+ aips3: bus@42800000 {
+ compatible = "fsl,aips-bus", "simple-bus";
+ reg = <0 0x42800000 0 0x800000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x42800000 0x0 0x42800000 0x800000>;
+
+ usdhc1: mmc@42850000 {
+ compatible = "fsl,imx95-usdhc", "fsl,imx8mm-usdhc";
+ reg = <0x42850000 0x10000>;
+ interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk IMX95_CLK_BUSWAKEUP>,
+ <&scmi_clk IMX95_CLK_WAKEUPAXI>,
+ <&scmi_clk IMX95_CLK_USDHC1>;
+ clock-names = "ipg", "ahb", "per";
+ assigned-clocks = <&scmi_clk IMX95_CLK_USDHC1>;
+ assigned-clock-parents = <&scmi_clk IMX95_CLK_SYSPLL1_PFD1>;
+ assigned-clock-rates = <400000000>;
+ bus-width = <8>;
+ fsl,tuning-start-tap = <1>;
+ fsl,tuning-step= <2>;
+ status = "disabled";
+ };
+
+ usdhc2: mmc@42860000 {
+ compatible = "fsl,imx95-usdhc", "fsl,imx8mm-usdhc";
+ reg = <0x42860000 0x10000>;
+ interrupts = <GIC_SPI 87 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk IMX95_CLK_BUSWAKEUP>,
+ <&scmi_clk IMX95_CLK_WAKEUPAXI>,
+ <&scmi_clk IMX95_CLK_USDHC2>;
+ clock-names = "ipg", "ahb", "per";
+ assigned-clocks = <&scmi_clk IMX95_CLK_USDHC2>;
+ assigned-clock-parents = <&scmi_clk IMX95_CLK_SYSPLL1_PFD1>;
+ assigned-clock-rates = <400000000>;
+ bus-width = <4>;
+ fsl,tuning-start-tap = <1>;
+ fsl,tuning-step= <2>;
+ status = "disabled";
+ };
+
+ usdhc3: mmc@428b0000 {
+ compatible = "fsl,imx95-usdhc", "fsl,imx8mm-usdhc";
+ reg = <0x428b0000 0x10000>;
+ interrupts = <GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk IMX95_CLK_BUSWAKEUP>,
+ <&scmi_clk IMX95_CLK_WAKEUPAXI>,
+ <&scmi_clk IMX95_CLK_USDHC3>;
+ clock-names = "ipg", "ahb", "per";
+ assigned-clocks = <&scmi_clk IMX95_CLK_USDHC3>;
+ assigned-clock-parents = <&scmi_clk IMX95_CLK_SYSPLL1_PFD1>;
+ assigned-clock-rates = <400000000>;
+ bus-width = <4>;
+ fsl,tuning-start-tap = <1>;
+ fsl,tuning-step= <2>;
+ status = "disabled";
+ };
+ };
+
+ gpio2: gpio@43810000 {
+ compatible = "fsl,imx95-gpio", "fsl,imx8ulp-gpio";
+ reg = <0x0 0x43810000 0x0 0x1000>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupts = <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 50 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&scmi_clk IMX95_CLK_BUSWAKEUP>,
+ <&scmi_clk IMX95_CLK_BUSWAKEUP>;
+ clock-names = "gpio", "port";
+ gpio-ranges = <&scmi_iomuxc 0 4 32>;
+ };
+
+ gpio3: gpio@43820000 {
+ compatible = "fsl,imx95-gpio", "fsl,imx8ulp-gpio";
+ reg = <0x0 0x43820000 0x0 0x1000>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupts = <GIC_SPI 51 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&scmi_clk IMX95_CLK_BUSWAKEUP>,
+ <&scmi_clk IMX95_CLK_BUSWAKEUP>;
+ clock-names = "gpio", "port";
+ gpio-ranges = <&scmi_iomuxc 0 104 8>, <&scmi_iomuxc 8 74 18>,
+ <&scmi_iomuxc 26 42 2>, <&scmi_iomuxc 28 0 4>;
+ };
+
+ gpio4: gpio@43840000 {
+ compatible = "fsl,imx95-gpio", "fsl,imx8ulp-gpio";
+ reg = <0x0 0x43840000 0x0 0x1000>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupts = <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&scmi_clk IMX95_CLK_BUSWAKEUP>,
+ <&scmi_clk IMX95_CLK_BUSWAKEUP>;
+ clock-names = "gpio", "port";
+ gpio-ranges = <&scmi_iomuxc 0 46 28>, <&scmi_iomuxc 28 44 2>;
+ };
+
+ gpio5: gpio@43850000 {
+ compatible = "fsl,imx95-gpio", "fsl,imx8ulp-gpio";
+ reg = <0x0 0x43850000 0x0 0x1000>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&scmi_clk IMX95_CLK_BUSWAKEUP>,
+ <&scmi_clk IMX95_CLK_BUSWAKEUP>;
+ clock-names = "gpio", "port";
+ gpio-ranges = <&scmi_iomuxc 0 92 12>, <&scmi_iomuxc 12 36 6>;
+ };
+
+ aips1: bus@44000000 {
+ compatible = "fsl,aips-bus", "simple-bus";
+ reg = <0x0 0x44000000 0x0 0x800000>;
+ ranges = <0x44000000 0x0 0x44000000 0x800000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ mu1: mailbox@44220000 {
+ compatible = "fsl,imx95-mu";
+ reg = <0x44220000 0x10000>;
+ interrupts = <GIC_SPI 224 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk IMX95_CLK_BUSAON>;
+ #mbox-cells = <2>;
+ status = "disabled";
+ };
+
+ tpm1: pwm@44310000 {
+ compatible = "fsl,imx7ulp-pwm";
+ reg = <0x44310000 0x1000>;
+ clocks = <&scmi_clk IMX95_CLK_BUSAON>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ tpm2: pwm@44320000 {
+ compatible = "fsl,imx7ulp-pwm";
+ reg = <0x44320000 0x1000>;
+ clocks = <&scmi_clk IMX95_CLK_TPM2>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ lpi2c1: i2c@44340000 {
+ compatible = "fsl,imx95-lpi2c", "fsl,imx7ulp-lpi2c";
+ reg = <0x44340000 0x10000>;
+ interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk IMX95_CLK_LPI2C1>,
+ <&scmi_clk IMX95_CLK_BUSAON>;
+ clock-names = "per", "ipg";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ lpi2c2: i2c@44350000 {
+ compatible = "fsl,imx95-lpi2c", "fsl,imx7ulp-lpi2c";
+ reg = <0x44350000 0x10000>;
+ interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk IMX95_CLK_LPI2C2>,
+ <&scmi_clk IMX95_CLK_BUSAON>;
+ clock-names = "per", "ipg";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ lpspi1: spi@44360000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,imx95-spi", "fsl,imx7ulp-spi";
+ reg = <0x44360000 0x10000>;
+ interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk IMX95_CLK_LPSPI1>,
+ <&scmi_clk IMX95_CLK_BUSAON>;
+ clock-names = "per", "ipg";
+ status = "disabled";
+ };
+
+ lpspi2: spi@44370000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,imx95-spi", "fsl,imx7ulp-spi";
+ reg = <0x44370000 0x10000>;
+ interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk IMX95_CLK_LPSPI2>,
+ <&scmi_clk IMX95_CLK_BUSAON>;
+ clock-names = "per", "ipg";
+ status = "disabled";
+ };
+
+ lpuart1: serial@44380000 {
+ compatible = "fsl,imx95-lpuart", "fsl,imx8ulp-lpuart",
+ "fsl,imx7ulp-lpuart";
+ reg = <0x44380000 0x1000>;
+ interrupts = <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk IMX95_CLK_LPUART1>;
+ clock-names = "ipg";
+ status = "disabled";
+ };
+
+ lpuart2: serial@44390000 {
+ compatible = "fsl,imx95-lpuart", "fsl,imx8ulp-lpuart",
+ "fsl,imx7ulp-lpuart";
+ reg = <0x44390000 0x1000>;
+ interrupts = <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk IMX95_CLK_LPUART2>;
+ clock-names = "ipg";
+ status = "disabled";
+ };
+
+ adc1: adc@44530000 {
+ compatible = "nxp,imx93-adc";
+ reg = <0x44530000 0x10000>;
+ interrupts = <GIC_SPI 199 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 201 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk IMX95_CLK_ADC>;
+ clock-names = "ipg";
+ status = "disabled";
+ };
+
+ mu2: mailbox@445b0000 {
+ compatible = "fsl,imx95-mu";
+ reg = <0x445b0000 0x1000>;
+ ranges;
+ interrupts = <GIC_SPI 226 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ #mbox-cells = <2>;
+
+ sram0: sram@445b1000 {
+ compatible = "mmio-sram";
+ reg = <0x445b1000 0x400>;
+ ranges = <0x0 0x445b1000 0x400>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ scmi_buf0: scmi-sram-section@0 {
+ compatible = "arm,scmi-shmem";
+ reg = <0x0 0x80>;
+ };
+
+ scmi_buf1: scmi-sram-section@80 {
+ compatible = "arm,scmi-shmem";
+ reg = <0x80 0x80>;
+ };
+ };
+
+ };
+
+ mu3: mailbox@445d0000 {
+ compatible = "fsl,imx95-mu";
+ reg = <0x445d0000 0x10000>;
+ interrupts = <GIC_SPI 228 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk IMX95_CLK_BUSAON>;
+ #mbox-cells = <2>;
+ status = "disabled";
+ };
+
+ mu4: mailbox@445f0000 {
+ compatible = "fsl,imx95-mu";
+ reg = <0x445f0000 0x10000>;
+ interrupts = <GIC_SPI 230 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk IMX95_CLK_BUSAON>;
+ #mbox-cells = <2>;
+ status = "disabled";
+ };
+
+ mu6: mailbox@44630000 {
+ compatible = "fsl,imx95-mu";
+ reg = <0x44630000 0x10000>;
+ interrupts = <GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk IMX95_CLK_BUSAON>;
+ #mbox-cells = <2>;
+ status = "disabled";
+ };
+ };
+
+ mailbox@47320000 {
+ compatible = "fsl,imx95-mu-v2x";
+ reg = <0x0 0x47320000 0x0 0x10000>;
+ interrupts = <GIC_SPI 254 IRQ_TYPE_LEVEL_HIGH>;
+ #mbox-cells = <2>;
+ };
+
+ mailbox@47350000 {
+ compatible = "fsl,imx95-mu-v2x";
+ reg = <0x0 0x47350000 0x0 0x10000>;
+ interrupts = <GIC_SPI 255 IRQ_TYPE_LEVEL_HIGH>;
+ #mbox-cells = <2>;
+ };
+
+ /* GPIO1 is under exclusive control of System Manager */
+ gpio1: gpio@47400000 {
+ compatible = "fsl,imx95-gpio", "fsl,imx8ulp-gpio";
+ reg = <0x0 0x47400000 0x0 0x1000>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&scmi_clk IMX95_CLK_M33>,
+ <&scmi_clk IMX95_CLK_M33>;
+ clock-names = "gpio", "port";
+ gpio-ranges = <&scmi_iomuxc 0 112 16>;
+ status = "disabled";
+ };
+
+ elemu0: mailbox@47520000 {
+ compatible = "fsl,imx95-mu-ele";
+ reg = <0x0 0x47520000 0x0 0x10000>;
+ interrupts = <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>;
+ #mbox-cells = <2>;
+ status = "disabled";
+ };
+
+ elemu1: mailbox@47530000 {
+ compatible = "fsl,imx95-mu-ele";
+ reg = <0x0 0x47530000 0x0 0x10000>;
+ interrupts = <GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>;
+ #mbox-cells = <2>;
+ status = "disabled";
+ };
+
+ elemu2: mailbox@47540000 {
+ compatible = "fsl,imx95-mu-ele";
+ reg = <0x0 0x47540000 0x0 0x10000>;
+ interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>;
+ #mbox-cells = <2>;
+ status = "disabled";
+ };
+
+ elemu3: mailbox@47550000 {
+ compatible = "fsl,imx95-mu-ele";
+ reg = <0x0 0x47550000 0x0 0x10000>;
+ interrupts = <GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>;
+ #mbox-cells = <2>;
+ };
+
+ elemu4: mailbox@47560000 {
+ compatible = "fsl,imx95-mu-ele";
+ reg = <0x0 0x47560000 0x0 0x10000>;
+ interrupts = <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>;
+ #mbox-cells = <2>;
+ status = "disabled";
+ };
+
+ elemu5: mailbox@47570000 {
+ compatible = "fsl,imx95-mu-ele";
+ reg = <0x0 0x47570000 0x0 0x10000>;
+ interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
+ #mbox-cells = <2>;
+ status = "disabled";
+ };
+
+ aips4: bus@49000000 {
+ compatible = "fsl,aips-bus", "simple-bus";
+ reg = <0x0 0x49000000 0x0 0x800000>;
+ ranges = <0x49000000 0x0 0x49000000 0x800000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ smmu: iommu@490d0000 {
+ compatible = "arm,smmu-v3";
+ reg = <0x490d0000 0x100000>;
+ interrupts = <GIC_SPI 325 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 328 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 334 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 326 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "eventq", "gerror", "priq", "cmdq-sync";
+ #iommu-cells = <1>;
+ status = "disabled";
+ };
+ };
+
+ pcie0: pcie@4c300000 {
+ compatible = "fsl,imx95-pcie";
+ reg = <0 0x4c300000 0 0x10000>,
+ <0 0x60100000 0 0xfe00000>,
+ <0 0x4c360000 0 0x10000>,
+ <0 0x4c340000 0 0x2000>;
+ reg-names = "dbi", "config", "atu", "app";
+ ranges = <0x81000000 0x0 0x00000000 0x0 0x6ff00000 0 0x00100000>,
+ <0x82000000 0x0 0x10000000 0x9 0x10000000 0 0x10000000>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ device_type = "pci";
+ linux,pci-domain = <0>;
+ bus-range = <0x00 0xff>;
+ num-lanes = <1>;
+ num-viewport = <8>;
+ interrupts = <GIC_SPI 310 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "msi";
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0x7>;
+ interrupt-map = <0 0 0 1 &gic 0 0 GIC_SPI 306 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 2 &gic 0 0 GIC_SPI 307 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 3 &gic 0 0 GIC_SPI 308 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 4 &gic 0 0 GIC_SPI 309 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk IMX95_CLK_HSIO>,
+ <&scmi_clk IMX95_CLK_HSIOPLL>,
+ <&scmi_clk IMX95_CLK_HSIOPLL_VCO>,
+ <&scmi_clk IMX95_CLK_HSIOPCIEAUX>;
+ clock-names = "pcie", "pcie_bus", "pcie_phy", "pcie_aux";
+ assigned-clocks =<&scmi_clk IMX95_CLK_HSIOPLL_VCO>,
+ <&scmi_clk IMX95_CLK_HSIOPLL>,
+ <&scmi_clk IMX95_CLK_HSIOPCIEAUX>;
+ assigned-clock-rates = <3600000000>, <100000000>, <10000000>;
+ assigned-clock-parents = <0>, <0>,
+ <&scmi_clk IMX95_CLK_SYSPLL1_PFD1_DIV2>;
+ power-domains = <&scmi_devpd IMX95_PD_HSIO_TOP>;
+ fsl,max-link-speed = <3>;
+ status = "disabled";
+ };
+
+ pcie0_ep: pcie-ep@4c300000 {
+ compatible = "fsl,imx95-pcie-ep";
+ reg = <0 0x4c300000 0 0x10000>,
+ <0 0x4c360000 0 0x1000>,
+ <0 0x4c320000 0 0x1000>,
+ <0 0x4c340000 0 0x2000>,
+ <0 0x4c370000 0 0x10000>,
+ <0x9 0 1 0>;
+ reg-names = "dbi","atu", "dbi2", "app", "dma", "addr_space";
+ num-lanes = <1>;
+ interrupts = <GIC_SPI 317 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "dma";
+ clocks = <&scmi_clk IMX95_CLK_HSIO>,
+ <&scmi_clk IMX95_CLK_HSIOPLL>,
+ <&scmi_clk IMX95_CLK_HSIOPLL_VCO>,
+ <&scmi_clk IMX95_CLK_HSIOPCIEAUX>;
+ clock-names = "pcie", "pcie_bus", "pcie_phy", "pcie_aux";
+ assigned-clocks =<&scmi_clk IMX95_CLK_HSIOPLL_VCO>,
+ <&scmi_clk IMX95_CLK_HSIOPLL>,
+ <&scmi_clk IMX95_CLK_HSIOPCIEAUX>;
+ assigned-clock-rates = <3600000000>, <100000000>, <10000000>;
+ assigned-clock-parents = <0>, <0>,
+ <&scmi_clk IMX95_CLK_SYSPLL1_PFD1_DIV2>;
+ power-domains = <&scmi_devpd IMX95_PD_HSIO_TOP>;
+ status = "disabled";
+ };
+
+ pcie1: pcie@4c380000 {
+ compatible = "fsl,imx95-pcie";
+ reg = <0 0x4c380000 0 0x10000>,
+ <8 0x80100000 0 0xfe00000>,
+ <0 0x4c3e0000 0 0x10000>,
+ <0 0x4c3c0000 0 0x2000>;
+ reg-names = "dbi", "config", "atu", "app";
+ ranges = <0x81000000 0 0x00000000 0x8 0x8ff00000 0 0x00100000>,
+ <0x82000000 0 0x10000000 0xa 0x10000000 0 0x10000000>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ device_type = "pci";
+ linux,pci-domain = <1>;
+ bus-range = <0x00 0xff>;
+ num-lanes = <1>;
+ num-viewport = <8>;
+ interrupts = <GIC_SPI 316 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "msi";
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0x7>;
+ interrupt-map = <0 0 0 1 &gic 0 0 GIC_SPI 312 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 2 &gic 0 0 GIC_SPI 313 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 3 &gic 0 0 GIC_SPI 314 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 4 &gic 0 0 GIC_SPI 315 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk IMX95_CLK_HSIO>,
+ <&scmi_clk IMX95_CLK_HSIOPLL>,
+ <&scmi_clk IMX95_CLK_HSIOPLL_VCO>,
+ <&scmi_clk IMX95_CLK_HSIOPCIEAUX>;
+ clock-names = "pcie", "pcie_bus", "pcie_phy", "pcie_aux";
+ assigned-clocks =<&scmi_clk IMX95_CLK_HSIOPLL_VCO>,
+ <&scmi_clk IMX95_CLK_HSIOPLL>,
+ <&scmi_clk IMX95_CLK_HSIOPCIEAUX>;
+ assigned-clock-rates = <3600000000>, <100000000>, <10000000>;
+ assigned-clock-parents = <0>, <0>,
+ <&scmi_clk IMX95_CLK_SYSPLL1_PFD1_DIV2>;
+ power-domains = <&scmi_devpd IMX95_PD_HSIO_TOP>;
+ fsl,max-link-speed = <3>;
+ status = "disabled";
+ };
+
+ pcie1_ep: pcie-ep@4c380000 {
+ compatible = "fsl,imx95-pcie-ep";
+ reg = <0 0x4c380000 0 0x10000>,
+ <0 0x4c3e0000 0 0x1000>,
+ <0 0x4c3a0000 0 0x1000>,
+ <0 0x4c3c0000 0 0x2000>,
+ <0 0x4c3f0000 0 0x10000>,
+ <0xa 0 1 0>;
+ reg-names = "dbi", "atu", "dbi2", "app", "dma", "addr_space";
+ num-lanes = <1>;
+ interrupts = <GIC_SPI 317 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "dma";
+ clocks = <&scmi_clk IMX95_CLK_HSIO>,
+ <&scmi_clk IMX95_CLK_HSIOPLL>,
+ <&scmi_clk IMX95_CLK_HSIOPLL_VCO>,
+ <&scmi_clk IMX95_CLK_HSIOPCIEAUX>;
+ clock-names = "pcie", "pcie_bus", "pcie_phy", "pcie_aux";
+ assigned-clocks =<&scmi_clk IMX95_CLK_HSIOPLL_VCO>,
+ <&scmi_clk IMX95_CLK_HSIOPLL>,
+ <&scmi_clk IMX95_CLK_HSIOPCIEAUX>;
+ assigned-clock-rates = <3600000000>, <100000000>, <10000000>;
+ assigned-clock-parents = <0>, <0>,
+ <&scmi_clk IMX95_CLK_SYSPLL1_PFD1_DIV2>;
+ power-domains = <&scmi_devpd IMX95_PD_HSIO_TOP>;
+ status = "disabled";
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/freescale/qoriq-fman3-0-10g-0.dtsi b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-10g-0.dtsi
index dbd2fc3ba790..65f7b5a50eb5 100644
--- a/arch/arm64/boot/dts/freescale/qoriq-fman3-0-10g-0.dtsi
+++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-10g-0.dtsi
@@ -32,7 +32,7 @@ fman@1a00000 {
mdio@f1000 {
#address-cells = <1>;
#size-cells = <0>;
- compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
+ compatible = "fsl,fman-memac-mdio";
reg = <0xf1000 0x1000>;
pcsphy6: ethernet-phy@0 {
diff --git a/arch/arm64/boot/dts/freescale/qoriq-fman3-0-10g-1.dtsi b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-10g-1.dtsi
index 6fc5d2560057..3f70482c98c3 100644
--- a/arch/arm64/boot/dts/freescale/qoriq-fman3-0-10g-1.dtsi
+++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-10g-1.dtsi
@@ -32,7 +32,7 @@ fman@1a00000 {
mdio@f3000 {
#address-cells = <1>;
#size-cells = <0>;
- compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
+ compatible = "fsl,fman-memac-mdio";
reg = <0xf3000 0x1000>;
pcsphy7: ethernet-phy@0 {
diff --git a/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-0.dtsi b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-0.dtsi
index 4e02276fcf99..78841c1f3252 100644
--- a/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-0.dtsi
+++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-0.dtsi
@@ -31,7 +31,7 @@ fman@1a00000 {
mdio@e1000 {
#address-cells = <1>;
#size-cells = <0>;
- compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
+ compatible = "fsl,fman-memac-mdio";
reg = <0xe1000 0x1000>;
pcsphy0: ethernet-phy@0 {
diff --git a/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-1.dtsi b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-1.dtsi
index 0312fa43fa77..1f43fa666222 100644
--- a/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-1.dtsi
+++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-1.dtsi
@@ -31,7 +31,7 @@ fman@1a00000 {
mdio@e3000 {
#address-cells = <1>;
#size-cells = <0>;
- compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
+ compatible = "fsl,fman-memac-mdio";
reg = <0xe3000 0x1000>;
pcsphy1: ethernet-phy@0 {
diff --git a/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-2.dtsi b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-2.dtsi
index af2df07971dd..de0aa017701d 100644
--- a/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-2.dtsi
+++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-2.dtsi
@@ -31,7 +31,7 @@ fman@1a00000 {
mdio@e5000 {
#address-cells = <1>;
#size-cells = <0>;
- compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
+ compatible = "fsl,fman-memac-mdio";
reg = <0xe5000 0x1000>;
pcsphy2: ethernet-phy@0 {
diff --git a/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-3.dtsi b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-3.dtsi
index 4ac98dc8b227..6904aa5d8e54 100644
--- a/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-3.dtsi
+++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-3.dtsi
@@ -31,7 +31,7 @@ fman@1a00000 {
mdio@e7000 {
#address-cells = <1>;
#size-cells = <0>;
- compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
+ compatible = "fsl,fman-memac-mdio";
reg = <0xe7000 0x1000>;
pcsphy3: ethernet-phy@0 {
diff --git a/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-4.dtsi b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-4.dtsi
index bd932d8b0160..a3d29d470297 100644
--- a/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-4.dtsi
+++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-4.dtsi
@@ -31,7 +31,7 @@ fman@1a00000 {
mdio@e9000 {
#address-cells = <1>;
#size-cells = <0>;
- compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
+ compatible = "fsl,fman-memac-mdio";
reg = <0xe9000 0x1000>;
pcsphy4: ethernet-phy@0 {
diff --git a/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-5.dtsi b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-5.dtsi
index 7de1c5203f3e..01b78c0463a7 100644
--- a/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-5.dtsi
+++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-5.dtsi
@@ -31,7 +31,7 @@ fman@1a00000 {
mdio@eb000 {
#address-cells = <1>;
#size-cells = <0>;
- compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
+ compatible = "fsl,fman-memac-mdio";
reg = <0xeb000 0x1000>;
pcsphy5: ethernet-phy@0 {
diff --git a/arch/arm64/boot/dts/freescale/qoriq-fman3-0.dtsi b/arch/arm64/boot/dts/freescale/qoriq-fman3-0.dtsi
index ae1c2abaaf36..b0390b711fef 100644
--- a/arch/arm64/boot/dts/freescale/qoriq-fman3-0.dtsi
+++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0.dtsi
@@ -67,14 +67,14 @@ fman0: fman@1a00000 {
mdio0: mdio@fc000 {
#address-cells = <1>;
#size-cells = <0>;
- compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
+ compatible = "fsl,fman-memac-mdio";
reg = <0xfc000 0x1000>;
};
xmdio0: mdio@fd000 {
#address-cells = <1>;
#size-cells = <0>;
- compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
+ compatible = "fsl,fman-memac-mdio";
reg = <0xfd000 0x1000>;
};
};
diff --git a/arch/arm64/boot/dts/freescale/tqma8xx.dtsi b/arch/arm64/boot/dts/freescale/tqma8xx.dtsi
index d98469a7c47c..366912bf3d5e 100644
--- a/arch/arm64/boot/dts/freescale/tqma8xx.dtsi
+++ b/arch/arm64/boot/dts/freescale/tqma8xx.dtsi
@@ -61,12 +61,16 @@
flash0: flash@0 {
reg = <0>;
- #address-cells = <1>;
- #size-cells = <1>;
compatible = "jedec,spi-nor";
spi-max-frequency = <66000000>;
spi-tx-bus-width = <1>;
spi-rx-bus-width = <4>;
+
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ };
};
};
diff --git a/arch/arm64/boot/dts/hisilicon/hi3660.dtsi b/arch/arm64/boot/dts/hisilicon/hi3660.dtsi
index 7e137a884ae5..957a1b41f19b 100644
--- a/arch/arm64/boot/dts/hisilicon/hi3660.dtsi
+++ b/arch/arm64/boot/dts/hisilicon/hi3660.dtsi
@@ -1161,7 +1161,7 @@
};
usb3_otg_bc: usb3_otg_bc@ff200000 {
- compatible = "syscon", "simple-mfd";
+ compatible = "hisilicon,hi3660-usb3-otg-bc", "syscon", "simple-mfd";
reg = <0x0 0xff200000 0x0 0x1000>;
usb_phy: usb-phy {
diff --git a/arch/arm64/boot/dts/intel/socfpga_agilex_socdk.dts b/arch/arm64/boot/dts/intel/socfpga_agilex_socdk.dts
index ad99aefeb185..b31cfa6b802d 100644
--- a/arch/arm64/boot/dts/intel/socfpga_agilex_socdk.dts
+++ b/arch/arm64/boot/dts/intel/socfpga_agilex_socdk.dts
@@ -106,8 +106,6 @@
&qspi {
status = "okay";
flash@0 {
- #address-cells = <1>;
- #size-cells = <1>;
compatible = "micron,mt25qu02g", "jedec,spi-nor";
reg = <0>;
spi-max-frequency = <100000000>;
diff --git a/arch/arm64/boot/dts/intel/socfpga_n5x_socdk.dts b/arch/arm64/boot/dts/intel/socfpga_n5x_socdk.dts
index 2d70a92c2090..7952c7f47cc2 100644
--- a/arch/arm64/boot/dts/intel/socfpga_n5x_socdk.dts
+++ b/arch/arm64/boot/dts/intel/socfpga_n5x_socdk.dts
@@ -83,8 +83,6 @@
&qspi {
status = "okay";
flash@0 {
- #address-cells = <1>;
- #size-cells = <1>;
compatible = "micron,mt25qu02g", "jedec,spi-nor";
reg = <0>;
spi-max-frequency = <100000000>;
diff --git a/arch/arm64/boot/dts/marvell/Makefile b/arch/arm64/boot/dts/marvell/Makefile
index 99b8cb3c49e1..ce751b5028e2 100644
--- a/arch/arm64/boot/dts/marvell/Makefile
+++ b/arch/arm64/boot/dts/marvell/Makefile
@@ -28,3 +28,7 @@ dtb-$(CONFIG_ARCH_MVEBU) += cn9130-crb-A.dtb
dtb-$(CONFIG_ARCH_MVEBU) += cn9130-crb-B.dtb
dtb-$(CONFIG_ARCH_MVEBU) += ac5x-rd-carrier-cn9131.dtb
dtb-$(CONFIG_ARCH_MVEBU) += ac5-98dx35xx-rd.dtb
+dtb-$(CONFIG_ARCH_MVEBU) += cn9130-cf-base.dtb
+dtb-$(CONFIG_ARCH_MVEBU) += cn9130-cf-pro.dtb
+dtb-$(CONFIG_ARCH_MVEBU) += cn9131-cf-solidwan.dtb
+dtb-$(CONFIG_ARCH_MVEBU) += cn9132-clearfog.dtb
diff --git a/arch/arm64/boot/dts/marvell/armada-3720-gl-mv1000.dts b/arch/arm64/boot/dts/marvell/armada-3720-gl-mv1000.dts
index 63fbc8352161..56930f2ce481 100644
--- a/arch/arm64/boot/dts/marvell/armada-3720-gl-mv1000.dts
+++ b/arch/arm64/boot/dts/marvell/armada-3720-gl-mv1000.dts
@@ -41,7 +41,7 @@
keys {
compatible = "gpio-keys";
- reset {
+ button-reset {
label = "reset";
linux,code = <KEY_RESTART>;
gpios = <&gpionb 14 GPIO_ACTIVE_LOW>;
@@ -57,17 +57,17 @@
leds {
compatible = "gpio-leds";
- vpn {
+ led-vpn {
label = "green:vpn";
gpios = <&gpionb 11 GPIO_ACTIVE_LOW>;
};
- wan {
+ led-wan {
label = "green:wan";
gpios = <&gpionb 12 GPIO_ACTIVE_LOW>;
};
- led_power: power {
+ led_power: led-power {
label = "green:power";
gpios = <&gpionb 13 GPIO_ACTIVE_LOW>;
default-state = "on";
diff --git a/arch/arm64/boot/dts/marvell/cn9130-cf-base.dts b/arch/arm64/boot/dts/marvell/cn9130-cf-base.dts
new file mode 100644
index 000000000000..788a5c302b17
--- /dev/null
+++ b/arch/arm64/boot/dts/marvell/cn9130-cf-base.dts
@@ -0,0 +1,178 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2024 Josua Mayer <josua@solid-run.com>
+ *
+ * DTS for SolidRun CN9130 Clearfog Base.
+ *
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/leds/common.h>
+
+#include "cn9130.dtsi"
+#include "cn9130-sr-som.dtsi"
+#include "cn9130-cf.dtsi"
+
+/ {
+ model = "SolidRun CN9130 Clearfog Base";
+ compatible = "solidrun,cn9130-clearfog-base",
+ "solidrun,cn9130-sr-som", "marvell,cn9130";
+
+ gpio-keys {
+ compatible = "gpio-keys";
+ pinctrl-0 = <&rear_button_pins>;
+ pinctrl-names = "default";
+
+ button-0 {
+ /* The rear SW3 button */
+ label = "Rear Button";
+ gpios = <&cp0_gpio1 31 GPIO_ACTIVE_LOW>;
+ linux,can-disable;
+ linux,code = <BTN_0>;
+ };
+ };
+
+ rfkill-m2-gnss {
+ compatible = "rfkill-gpio";
+ label = "m.2 GNSS";
+ radio-type = "gps";
+ /* rfkill-gpio inverts internally */
+ shutdown-gpios = <&expander0 9 GPIO_ACTIVE_HIGH>;
+ };
+
+ /* M.2 is B-keyed, so w-disable is for WWAN */
+ rfkill-m2-wwan {
+ compatible = "rfkill-gpio";
+ label = "m.2 WWAN";
+ radio-type = "wwan";
+ /* rfkill-gpio inverts internally */
+ shutdown-gpios = <&expander0 8 GPIO_ACTIVE_HIGH>;
+ };
+};
+
+/* SRDS #3 - SGMII 1GE */
+&cp0_eth1 {
+ phy = <&phy1>;
+ phys = <&cp0_comphy3 1>;
+ phy-mode = "sgmii";
+ status = "okay";
+};
+
+&cp0_eth2_phy {
+ /*
+ * Configure LEDs default behaviour:
+ * - LED[0]: link/activity: On/blink (green)
+ * - LED[1]: link is 100/1000Mbps: On (yellow)
+ * - LED[2]: high impedance (floating)
+ */
+ marvell,reg-init = <3 16 0xf000 0x0a61>;
+
+ leds {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@0 {
+ reg = <0>;
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_WAN;
+ default-state = "keep";
+ };
+
+ led@1 {
+ reg = <1>;
+ color = <LED_COLOR_ID_YELLOW>;
+ function = LED_FUNCTION_WAN;
+ default-state = "keep";
+ };
+ };
+};
+
+&cp0_gpio1 {
+ sim-select-hog {
+ gpio-hog;
+ gpios = <27 GPIO_ACTIVE_HIGH>;
+ output-high;
+ line-name = "sim-select";
+ };
+};
+
+&cp0_mdio {
+ phy1: ethernet-phy@1 {
+ reg = <1>;
+ /*
+ * Configure LEDs default behaviour:
+ * - LED[0]: link/activity: On/blink (green)
+ * - LED[1]: link is 100/1000Mbps: On (yellow)
+ * - LED[2]: high impedance (floating)
+ *
+ * Configure LEDs electrical polarity
+ * - on-state: low
+ * - off-state: high (not hi-z, to avoid residual glow)
+ */
+ marvell,reg-init = <3 16 0xf000 0x0a61>,
+ <3 17 0x003f 0x000a>;
+
+ leds {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@0 {
+ reg = <0>;
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_LAN;
+ default-state = "keep";
+ };
+
+ led@1 {
+ reg = <1>;
+ color = <LED_COLOR_ID_YELLOW>;
+ function = LED_FUNCTION_LAN;
+ default-state = "keep";
+ };
+ };
+ };
+};
+
+&cp0_pinctrl {
+ pinctrl-0 = <&sim_select_pins>;
+ pintrl-names = "default";
+
+ rear_button_pins: cp0-rear-button-pins {
+ marvell,pins = "mpp31";
+ marvell,function = "gpio";
+ };
+
+ sim_select_pins: cp0-sim-select-pins {
+ marvell,pins = "mpp27";
+ marvell,function = "gpio";
+ };
+};
+
+/*
+ * SRDS #4 - USB 3.0 host on M.2 connector
+ * USB-2.0 Host on Type-A connector
+ */
+&cp0_usb3_1 {
+ phys = <&cp0_comphy4 1>, <&cp0_utmi1>;
+ phy-names = "comphy", "utmi";
+ dr_mode = "host";
+ status = "okay";
+};
+
+&expander0 {
+ m2-full-card-power-off-hog {
+ gpio-hog;
+ gpios = <2 GPIO_ACTIVE_LOW>;
+ output-low;
+ line-name = "m2-full-card-power-off";
+ };
+
+ m2-reset-hog {
+ gpio-hog;
+ gpios = <10 GPIO_ACTIVE_LOW>;
+ output-low;
+ line-name = "m2-reset";
+ };
+};
diff --git a/arch/arm64/boot/dts/marvell/cn9130-cf-pro.dts b/arch/arm64/boot/dts/marvell/cn9130-cf-pro.dts
new file mode 100644
index 000000000000..a27fe0042867
--- /dev/null
+++ b/arch/arm64/boot/dts/marvell/cn9130-cf-pro.dts
@@ -0,0 +1,375 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2024 Josua Mayer <josua@solid-run.com>
+ *
+ * DTS for SolidRun CN9130 Clearfog Pro.
+ *
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/leds/common.h>
+
+#include "cn9130.dtsi"
+#include "cn9130-sr-som.dtsi"
+#include "cn9130-cf.dtsi"
+
+/ {
+ model = "SolidRun CN9130 Clearfog Pro";
+ compatible = "solidrun,cn9130-clearfog-pro",
+ "solidrun,cn9130-sr-som", "marvell,cn9130";
+
+ gpio-keys {
+ compatible = "gpio-keys";
+ pinctrl-0 = <&rear_button_pins>;
+ pinctrl-names = "default";
+
+ button-0 {
+ /* The rear SW3 button */
+ label = "Rear Button";
+ gpios = <&cp0_gpio2 0 GPIO_ACTIVE_LOW>;
+ linux,can-disable;
+ linux,code = <BTN_0>;
+ };
+ };
+};
+
+/* SRDS #3 - SGMII 1GE to L2 switch */
+&cp0_eth1 {
+ phys = <&cp0_comphy3 1>;
+ phy-mode = "sgmii";
+ status = "okay";
+
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+};
+
+&cp0_eth2_phy {
+ /*
+ * Configure LEDs default behaviour similar to switch ports:
+ * - LED[0]: link/activity: On/blink (green)
+ * - LED[1]: link is 100/1000Mbps: On (red)
+ * - LED[2]: high impedance (floating)
+ *
+ * Switch port defaults:
+ * - LED0: link/activity: On/blink (green)
+ * - LED1: link is 1000Mbps: On (red)
+ *
+ * Identical configuration is impossible with hardware offload.
+ */
+ marvell,reg-init = <3 16 0xf000 0x0a61>;
+
+ leds {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@0 {
+ reg = <0>;
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_WAN;
+ label = "LED2";
+ default-state = "keep";
+ };
+
+ led@1 {
+ reg = <1>;
+ color = <LED_COLOR_ID_RED>;
+ function = LED_FUNCTION_WAN;
+ label = "LED1";
+ default-state = "keep";
+ };
+ };
+};
+
+&cp0_mdio {
+ ethernet-switch@4 {
+ compatible = "marvell,mv88e6085";
+ reg = <4>;
+ pinctrl-0 = <&dsa_clk_pins &dsa_pins>;
+ pinctrl-names = "default";
+ reset-gpios = <&cp0_gpio1 27 GPIO_ACTIVE_LOW>;
+ interrupt-parent = <&cp0_gpio1>;
+ interrupts = <29 IRQ_TYPE_EDGE_FALLING>;
+
+ ethernet-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethernet-port@0 {
+ reg = <0>;
+ label = "lan5";
+ phy = <&switch0phy0>;
+
+ leds {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@0 {
+ reg = <0>;
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_LAN;
+ label = "LED12";
+ default-state = "keep";
+ };
+
+ led@1 {
+ reg = <1>;
+ color = <LED_COLOR_ID_RED>;
+ function = LED_FUNCTION_LAN;
+ label = "LED11";
+ default-state = "keep";
+ };
+ };
+ };
+
+ ethernet-port@1 {
+ reg = <1>;
+ label = "lan4";
+ phy = <&switch0phy1>;
+
+ leds {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@0 {
+ reg = <0>;
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_LAN;
+ label = "LED10";
+ default-state = "keep";
+ };
+
+ led@1 {
+ reg = <1>;
+ color = <LED_COLOR_ID_RED>;
+ function = LED_FUNCTION_LAN;
+ label = "LED9";
+ default-state = "keep";
+ };
+ };
+ };
+
+ ethernet-port@2 {
+ reg = <2>;
+ label = "lan3";
+ phy = <&switch0phy2>;
+
+ leds {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@0 {
+ reg = <0>;
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_LAN;
+ label = "LED8";
+ default-state = "keep";
+ };
+
+ led@1 {
+ reg = <1>;
+ color = <LED_COLOR_ID_RED>;
+ function = LED_FUNCTION_LAN;
+ label = "LED7";
+ default-state = "keep";
+ };
+ };
+ };
+
+ ethernet-port@3 {
+ reg = <3>;
+ label = "lan2";
+ phy = <&switch0phy3>;
+
+ leds {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@0 {
+ reg = <0>;
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_LAN;
+ label = "LED6";
+ default-state = "keep";
+ };
+
+ led@1 {
+ reg = <1>;
+ color = <LED_COLOR_ID_RED>;
+ function = LED_FUNCTION_LAN;
+ label = "LED5";
+ default-state = "keep";
+ };
+ };
+ };
+
+ ethernet-port@4 {
+ reg = <4>;
+ label = "lan1";
+ phy = <&switch0phy4>;
+
+ leds {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@0 {
+ reg = <0>;
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_LAN;
+ label = "LED4";
+ default-state = "keep";
+ };
+
+ led@1 {
+ reg = <1>;
+ color = <LED_COLOR_ID_RED>;
+ function = LED_FUNCTION_LAN;
+ label = "LED3";
+ default-state = "keep";
+ };
+ };
+ };
+
+ ethernet-port@5 {
+ reg = <5>;
+ label = "cpu";
+ ethernet = <&cp0_eth1>;
+ phy-mode = "sgmii";
+
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+ };
+
+ ethernet-port@6 {
+ reg = <6>;
+ label = "lan6";
+ phy-mode = "rgmii";
+
+ /*
+ * Because of mdio address conflict the
+ * external phy is not readable.
+ * Force a fixed link instead.
+ */
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+ };
+ };
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ switch0phy0: ethernet-phy@0 {
+ reg = <0x0>;
+ };
+
+ switch0phy1: ethernet-phy@1 {
+ reg = <0x1>;
+ /*
+ * Indirectly configure default behaviour
+ * for port lan6 leds behind external phy.
+ * Internal PHYs are not using page 3,
+ * therefore writing to it is safe.
+ */
+ marvell,reg-init = <3 16 0xf000 0x0a61>;
+ };
+
+ switch0phy2: ethernet-phy@2 {
+ reg = <0x2>;
+ };
+
+ switch0phy3: ethernet-phy@3 {
+ reg = <0x3>;
+ };
+
+ switch0phy4: ethernet-phy@4 {
+ reg = <0x4>;
+ };
+ };
+
+ /*
+ * There is an external phy on the switch mdio bus.
+ * Because its mdio address collides with internal phys,
+ * it is not readable.
+ *
+ * mdio-external {
+ * compatible = "marvell,mv88e6xxx-mdio-external";
+ * #address-cells = <1>;
+ * #size-cells = <0>;
+ *
+ * ethernet-phy@1 {
+ * reg = <0x1>;
+ * };
+ * };
+ */
+ };
+};
+
+/* SRDS #4 - miniPCIe (CON2) */
+&cp0_pcie1 {
+ num-lanes = <1>;
+ phys = <&cp0_comphy4 1>;
+ /* dw-pcie inverts internally */
+ reset-gpios = <&expander0 2 GPIO_ACTIVE_HIGH>;
+ status = "okay";
+};
+
+&cp0_pinctrl {
+ dsa_clk_pins: cp0-dsa-clk-pins {
+ marvell,pins = "mpp40";
+ marvell,function = "synce1";
+ };
+
+ dsa_pins: cp0-dsa-pins {
+ marvell,pins = "mpp27", "mpp29";
+ marvell,function = "gpio";
+ };
+
+ rear_button_pins: cp0-rear-button-pins {
+ marvell,pins = "mpp32";
+ marvell,function = "gpio";
+ };
+
+ cp0_spi1_cs1_pins: cp0-spi1-cs1-pins {
+ marvell,pins = "mpp12";
+ marvell,function = "spi1";
+ };
+};
+
+&cp0_spi1 {
+ /* add pin for chip-select 1 on mikrobus */
+ pinctrl-0 = <&cp0_spi1_pins &cp0_spi1_cs1_pins>;
+};
+
+/* USB-2.0 Host on Type-A connector */
+&cp0_usb3_1 {
+ phys = <&cp0_utmi1>;
+ phy-names = "utmi";
+ dr_mode = "host";
+ status = "okay";
+};
+
+&expander0 {
+ /* CON2 */
+ pcie1-0-clkreq-hog {
+ gpio-hog;
+ gpios = <4 GPIO_ACTIVE_LOW>;
+ input;
+ line-name = "pcie1.0-clkreq";
+ };
+
+ /* CON2 */
+ pcie1-0-w-disable-hog {
+ gpio-hog;
+ gpios = <7 GPIO_ACTIVE_LOW>;
+ output-low;
+ line-name = "pcie1.0-w-disable";
+ };
+};
diff --git a/arch/arm64/boot/dts/marvell/cn9130-cf.dtsi b/arch/arm64/boot/dts/marvell/cn9130-cf.dtsi
new file mode 100644
index 000000000000..ad0ab34b6602
--- /dev/null
+++ b/arch/arm64/boot/dts/marvell/cn9130-cf.dtsi
@@ -0,0 +1,197 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2024 Josua Mayer <josua@solid-run.com>
+ *
+ * DTS for common base of SolidRun CN9130 Clearfog Base and Pro.
+ *
+ */
+
+/ {
+ aliases {
+ /* label nics same order as armada 388 clearfog */
+ ethernet0 = &cp0_eth2;
+ ethernet1 = &cp0_eth1;
+ ethernet2 = &cp0_eth0;
+ i2c1 = &cp0_i2c1;
+ mmc1 = &cp0_sdhci0;
+ };
+
+ reg_usb3_vbus0: regulator-usb3-vbus0 {
+ compatible = "regulator-fixed";
+ regulator-name = "vbus0";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpios = <&expander0 6 GPIO_ACTIVE_LOW>;
+ };
+
+ sfp: sfp {
+ compatible = "sff,sfp";
+ i2c-bus = <&cp0_i2c1>;
+ los-gpios = <&expander0 12 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&expander0 15 GPIO_ACTIVE_LOW>;
+ tx-disable-gpios = <&expander0 14 GPIO_ACTIVE_HIGH>;
+ tx-fault-gpios = <&expander0 13 GPIO_ACTIVE_HIGH>;
+ maximum-power-milliwatt = <2000>;
+ };
+};
+
+/* SRDS #2 - SFP+ 10GE */
+&cp0_eth0 {
+ managed = "in-band-status";
+ phys = <&cp0_comphy2 0>;
+ phy-mode = "10gbase-r";
+ sfp = <&sfp>;
+ status = "okay";
+};
+
+&cp0_i2c0 {
+ expander0: gpio-expander@20 {
+ compatible = "nxp,pca9555";
+ reg = <0x20>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ pinctrl-0 = <&expander0_pins>;
+ pinctrl-names = "default";
+ interrupt-parent = <&cp0_gpio1>;
+ interrupts = <4 IRQ_TYPE_LEVEL_LOW>;
+
+ /* CON3 */
+ pcie2-0-clkreq-hog {
+ gpio-hog;
+ gpios = <0 GPIO_ACTIVE_LOW>;
+ input;
+ line-name = "pcie2.0-clkreq";
+ };
+
+ /* CON3 */
+ pcie2-0-w-disable-hog {
+ gpio-hog;
+ gpios = <3 GPIO_ACTIVE_LOW>;
+ output-low;
+ line-name = "pcie2.0-w-disable";
+ };
+
+ usb3-ilimit-hog {
+ gpio-hog;
+ gpios = <5 GPIO_ACTIVE_LOW>;
+ input;
+ line-name = "usb3-current-limit";
+ };
+
+ m2-devslp-hog {
+ gpio-hog;
+ gpios = <11 GPIO_ACTIVE_HIGH>;
+ output-low;
+ line-name = "m.2 devslp";
+ };
+ };
+
+ /* The MCP3021 supports standard and fast modes */
+ adc@4c {
+ compatible = "microchip,mcp3021";
+ reg = <0x4c>;
+ };
+
+ carrier_eeprom: eeprom@52 {
+ compatible = "atmel,24c02";
+ reg = <0x52>;
+ pagesize = <8>;
+ };
+};
+
+&cp0_i2c1 {
+ /*
+ * Routed to SFP, M.2, mikrobus, and miniPCIe
+ * SFP limits this to 100kHz, and requires an AT24C01A/02/04 with
+ * address pins tied low, which takes addresses 0x50 and 0x51.
+ * Mikrobus doesn't specify beyond an I2C bus being present.
+ * PCIe uses ARP to assign addresses, or 0x63-0x64.
+ */
+ clock-frequency = <100000>;
+ pinctrl-0 = <&cp0_i2c1_pins>;
+ pinctrl-names = "default";
+ status = "okay";
+};
+
+/* SRDS #5 - miniPCIe (CON3) */
+&cp0_pcie2 {
+ num-lanes = <1>;
+ phys = <&cp0_comphy5 2>;
+ /* dw-pcie inverts internally */
+ reset-gpios = <&expander0 1 GPIO_ACTIVE_HIGH>;
+ status = "okay";
+};
+
+&cp0_pinctrl {
+ cp0_i2c1_pins: cp0-i2c1-pins {
+ marvell,pins = "mpp35", "mpp36";
+ marvell,function = "i2c1";
+ };
+
+ cp0_mmc0_pins: cp0-mmc0-pins {
+ marvell,pins = "mpp43", "mpp56", "mpp57", "mpp58",
+ "mpp59", "mpp60", "mpp61";
+ marvell,function = "sdio";
+ };
+
+ mikro_spi_pins: cp0-spi1-cs1-pins {
+ marvell,pins = "mpp12";
+ marvell,function = "spi1";
+ };
+
+ mikro_uart_pins: cp0-uart-pins {
+ marvell,pins = "mpp2", "mpp3";
+ marvell,function = "uart1";
+ };
+
+ expander0_pins: cp0-expander0-pins {
+ marvell,pins = "mpp4";
+ marvell,function = "gpio";
+ };
+};
+
+/* SRDS #0 - SATA on M.2 connector */
+&cp0_sata0 {
+ phys = <&cp0_comphy0 1>;
+ status = "okay";
+
+ /* only port 1 is available */
+ /delete-node/ sata-port@0;
+};
+
+/* microSD */
+&cp0_sdhci0 {
+ pinctrl-0 = <&cp0_mmc0_pins>;
+ pinctrl-names = "default";
+ bus-width = <4>;
+ no-1-8-v;
+ status = "okay";
+};
+
+&cp0_spi1 {
+ /* CS1 for mikrobus */
+ pinctrl-0 = <&cp0_spi1_pins &mikro_spi_pins>;
+};
+
+/*
+ * SRDS #1 - USB-3.0 Host on Type-A connector
+ * USB-2.0 Host on mPCI-e connector (CON3)
+ */
+&cp0_usb3_0 {
+ phys = <&cp0_comphy1 0>, <&cp0_utmi0>;
+ phy-names = "comphy", "utmi";
+ vbus-supply = <&reg_usb3_vbus0>;
+ dr_mode = "host";
+ status = "okay";
+};
+
+&cp0_utmi {
+ status = "okay";
+};
+
+/* mikrobus uart */
+&cp0_uart0 {
+ pinctrl-0 = <&mikro_uart_pins>;
+ pinctrl-names = "default";
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/marvell/cn9130-sr-som.dtsi b/arch/arm64/boot/dts/marvell/cn9130-sr-som.dtsi
new file mode 100644
index 000000000000..4676e3488f54
--- /dev/null
+++ b/arch/arm64/boot/dts/marvell/cn9130-sr-som.dtsi
@@ -0,0 +1,160 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (C) 2024 Josua Mayer <josua@solid-run.com>
+ *
+ */
+
+#include <dt-bindings/gpio/gpio.h>
+
+/ {
+ model = "SolidRun CN9130 SoM";
+ compatible = "solidrun,cn9130-sr-som", "marvell,cn9130";
+
+ aliases {
+ ethernet0 = &cp0_eth0;
+ ethernet1 = &cp0_eth1;
+ ethernet2 = &cp0_eth2;
+ i2c0 = &cp0_i2c0;
+ mmc0 = &ap_sdhci0;
+ rtc0 = &cp0_rtc;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ v_1_8: regulator-1-8 {
+ compatible = "regulator-fixed";
+ regulator-name = "1v8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ /* requires assembly of R9307 */
+ vhv: regulator-vhv-1-8 {
+ compatible = "regulator-fixed";
+ regulator-name = "vhv-1v8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ pinctrl-0 = <&cp0_reg_vhv_pins>;
+ pinctrl-names = "default";
+ gpios = <&cp0_gpio2 9 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+};
+
+&ap_pinctrl {
+ ap_mmc0_pins: ap-mmc0-pins {
+ marvell,pins = "mpp0", "mpp1", "mpp2", "mpp3", "mpp4", "mpp5",
+ "mpp6", "mpp7", "mpp8", "mpp9", "mpp10", "mpp12";
+ marvell,function = "sdio";
+ /*
+ * mpp12 is emmc reset, function should be sdio (hw_rst),
+ * but pinctrl-mvebu does not support this.
+ *
+ * From pinctrl-mvebu.h:
+ * "The name will be used to switch to this setting in DT description, e.g.
+ * marvell,function = "uart2". subname is only for debugging purposes."
+ */
+ };
+};
+
+&ap_sdhci0 {
+ bus-width = <8>;
+ pinctrl-0 = <&ap_mmc0_pins>;
+ pinctrl-names = "default";
+ vqmmc-supply = <&v_1_8>;
+ status = "okay";
+};
+
+&cp0_ethernet {
+ status = "okay";
+};
+
+/* for assembly with phy */
+&cp0_eth2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&cp0_eth2_pins>;
+ phy-mode = "rgmii-id";
+ phy = <&cp0_eth2_phy>;
+ status = "okay";
+};
+
+&cp0_i2c0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&cp0_i2c0_pins>;
+ clock-frequency = <100000>;
+ status = "okay";
+
+ som_eeprom: eeprom@53 {
+ compatible = "atmel,24c02";
+ reg = <0x53>;
+ pagesize = <8>;
+ };
+};
+
+&cp0_mdio {
+ pinctrl-0 = <&cp0_mdio_pins>;
+ status = "okay";
+
+ /* assembly option */
+ cp0_eth2_phy: ethernet-phy@0 {
+ reg = <0>;
+ };
+};
+
+&cp0_spi1 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&cp0_spi1_pins>;
+ /* max speed limited by a mux */
+ spi-max-frequency = <1800000000>;
+
+ flash@0 {
+ compatible = "jedec,spi-nor";
+ reg = <0>;
+ /* read command supports max. 50MHz */
+ spi-max-frequency = <50000000>;
+ };
+};
+
+&cp0_syscon0 {
+ cp0_pinctrl: pinctrl {
+ compatible = "marvell,cp115-standalone-pinctrl";
+
+ cp0_eth2_pins: cp0-ge2-rgmii-pins {
+ marvell,pins = "mpp44", "mpp45", "mpp46", "mpp47",
+ "mpp48", "mpp49", "mpp50", "mpp51",
+ "mpp52", "mpp53", "mpp54", "mpp55";
+ /* docs call it "ge2", but cp110-pinctrl "ge1" */
+ marvell,function = "ge1";
+ };
+
+ cp0_i2c0_pins: cp0-i2c0-pins {
+ marvell,pins = "mpp37", "mpp38";
+ marvell,function = "i2c0";
+ };
+
+ cp0_mdio_pins: cp0-mdio-pins {
+ marvell,pins = "mpp40", "mpp41";
+ marvell,function = "ge";
+ };
+
+ cp0_spi1_pins: cp0-spi1-pins {
+ marvell,pins = "mpp13", "mpp14", "mpp15", "mpp16";
+ marvell,function = "spi1";
+ };
+
+ cp0_reg_vhv_pins: cp0-reg-vhv-pins {
+ marvell,pins = "mpp41";
+ marvell,function = "gpio";
+ };
+ };
+};
+
+/* AP default console */
+&uart0 {
+ pinctrl-0 = <&uart0_pins>;
+ pinctrl-names = "default";
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/marvell/cn9131-cf-solidwan.dts b/arch/arm64/boot/dts/marvell/cn9131-cf-solidwan.dts
new file mode 100644
index 000000000000..b1ea7dcaed17
--- /dev/null
+++ b/arch/arm64/boot/dts/marvell/cn9131-cf-solidwan.dts
@@ -0,0 +1,637 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2024 Josua Mayer <josua@solid-run.com>
+ *
+ * DTS for SolidRun CN9130 Clearfog Base.
+ *
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/leds/common.h>
+
+#include "cn9130.dtsi"
+#include "cn9130-sr-som.dtsi"
+
+/*
+ * Instantiate the external CP115
+ */
+
+#define CP11X_NAME cp1
+#define CP11X_BASE f4000000
+#define CP11X_PCIEx_MEM_BASE(iface) (0xe2000000 + (iface * 0x1000000))
+#define CP11X_PCIEx_MEM_SIZE(iface) 0xf00000
+#define CP11X_PCIE0_BASE f4600000
+#define CP11X_PCIE1_BASE f4620000
+#define CP11X_PCIE2_BASE f4640000
+
+#include "armada-cp115.dtsi"
+
+#undef CP11X_NAME
+#undef CP11X_BASE
+#undef CP11X_PCIEx_MEM_BASE
+#undef CP11X_PCIEx_MEM_SIZE
+#undef CP11X_PCIE0_BASE
+#undef CP11X_PCIE1_BASE
+#undef CP11X_PCIE2_BASE
+
+/ {
+ model = "SolidRun CN9131 SolidWAN";
+ compatible = "solidrun,cn9131-solidwan",
+ "solidrun,cn9130-sr-som", "marvell,cn9130";
+
+ aliases {
+ ethernet0 = &cp1_eth1;
+ ethernet1 = &cp1_eth2;
+ ethernet2 = &cp0_eth1;
+ ethernet3 = &cp0_eth2;
+ ethernet4 = &cp0_eth0;
+ ethernet5 = &cp1_eth0;
+ gpio0 = &ap_gpio;
+ gpio1 = &cp0_gpio1;
+ gpio2 = &cp0_gpio2;
+ gpio3 = &cp1_gpio1;
+ gpio4 = &cp1_gpio2;
+ gpio5 = &expander0;
+ i2c0 = &cp0_i2c0;
+ i2c1 = &cp0_i2c1;
+ i2c2 = &cp1_i2c1;
+ mmc0 = &ap_sdhci0;
+ mmc1 = &cp0_sdhci0;
+ rtc0 = &cp0_rtc;
+ rtc1 = &carrier_rtc;
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&cp0_led_pins &cp1_led_pins>;
+
+ /* for sfp-1 (J42) */
+ led-sfp1-activity {
+ label = "sfp1:green";
+ gpios = <&cp0_gpio1 7 GPIO_ACTIVE_HIGH>;
+ };
+
+ /* for sfp-1 (J42) */
+ led-sfp1-link {
+ label = "sfp1:yellow";
+ gpios = <&cp0_gpio1 4 GPIO_ACTIVE_HIGH>;
+ };
+
+ /* (J28) */
+ led-sfp0-activity {
+ label = "sfp0:green";
+ gpios = <&cp1_gpio2 22 GPIO_ACTIVE_HIGH>;
+ };
+
+ /* (J28) */
+ led-sfp0-link {
+ label = "sfp0:yellow";
+ gpios = <&cp1_gpio2 23 GPIO_ACTIVE_HIGH>;
+ };
+ };
+
+ /* Type-A port on J53 */
+ reg_usb_a_vbus0: regulator-usb-a-vbus0 {
+ compatible = "regulator-fixed";
+ pinctrl-0 = <&cp0_reg_usb_a_vbus0_pins>;
+ pinctrl-names = "default";
+ regulator-name = "vbus0";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpios = <&cp0_gpio1 27 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ regulator-always-on;
+ };
+
+ reg_usb_a_vbus1: regulator-usb-a-vbus1 {
+ compatible = "regulator-fixed";
+ pinctrl-0 = <&cp0_reg_usb_a_vbus1_pins>;
+ pinctrl-names = "default";
+ regulator-name = "vbus1";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpios = <&cp0_gpio1 28 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ regulator-always-on;
+ };
+
+ sfp0: sfp-0 {
+ compatible = "sff,sfp";
+ pinctrl-0 = <&cp0_sfp0_pins>;
+ pinctrl-names = "default";
+ i2c-bus = <&cp0_i2c1>;
+ los-gpios = <&cp0_gpio2 2 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&cp0_gpio2 0 GPIO_ACTIVE_LOW>;
+ tx-disable-gpios = <&cp0_gpio2 1 GPIO_ACTIVE_HIGH>;
+ tx-fault-gpios = <&cp0_gpio1 31 GPIO_ACTIVE_HIGH>;
+ maximum-power-milliwatt = <2000>;
+ };
+
+ sfp1: sfp-1 {
+ compatible = "sff,sfp";
+ pinctrl-0 = <&cp1_sfp1_pins>;
+ pinctrl-names = "default";
+ i2c-bus = <&cp1_i2c1>;
+ los-gpios = <&cp1_gpio2 2 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&cp1_gpio2 18 GPIO_ACTIVE_LOW>;
+ tx-disable-gpios = <&cp1_gpio2 1 GPIO_ACTIVE_HIGH>;
+ tx-fault-gpios = <&cp1_gpio2 17 GPIO_ACTIVE_HIGH>;
+ maximum-power-milliwatt = <2000>;
+ };
+};
+
+&cp0_ethernet {
+ status = "okay";
+};
+
+/* SRDS #2 - SFP+ 10GE */
+&cp0_eth0 {
+ managed = "in-band-status";
+ phy-mode = "10gbase-r";
+ phys = <&cp0_comphy2 0>;
+ sfp = <&sfp0>;
+ status = "okay";
+};
+
+/* SRDS #3 - SGMII 1GE */
+&cp0_eth1 {
+ managed = "in-band-status";
+ phy-mode = "sgmii";
+ /* Without mdio phy access rely on sgmii auto-negotiation. */
+ phys = <&cp0_comphy3 1>;
+ status = "okay";
+};
+
+/* SRDS #1 - SGMII */
+&cp0_eth2 {
+ /delete-property/ pinctrl-0;
+ /delete-property/ pinctrl-names;
+ managed = "in-band-status";
+ phy-mode = "sgmii";
+ phy = <&cp0_phy1>;
+ phys = <&cp0_comphy1 2>;
+};
+
+&cp0_gpio1 {
+ pcie0-0-w-disable-hog {
+ gpio-hog;
+ gpios = <6 GPIO_ACTIVE_LOW>;
+ output-low;
+ line-name = "pcie0.0-w-disable";
+ };
+
+ /* J34 */
+ m2-full-card-power-off-hog {
+ gpio-hog;
+ gpios = <8 GPIO_ACTIVE_LOW>;
+ output-low;
+ line-name = "m2-full-card-power-off";
+ };
+};
+
+&cp0_i2c0 {
+ /* assembly option */
+ fan-controller@18 {
+ compatible = "ti,amc6821";
+ reg = <0x18>;
+ };
+
+ expander0: gpio@41 {
+ compatible = "nxp,pca9536";
+ reg = <0x41>;
+
+ usb-a-vbus0-ilimit-hog {
+ gpio-hog;
+ gpios = <0 GPIO_ACTIVE_LOW>;
+ input;
+ line-name = "vbus0-ilimit";
+ };
+
+ /* duplicate connection, controlled by soc gpio */
+ usb-vbus0-enable-hog {
+ gpio-hog;
+ gpios = <1 GPIO_ACTIVE_HIGH>;
+ input;
+ line-name = "vbus0-enable";
+ };
+
+ usb-a-vbus1-ilimit-hog {
+ gpio-hog;
+ gpios = <2 GPIO_ACTIVE_LOW>;
+ input;
+ line-name = "vbus1-ilimit";
+ };
+
+ /* duplicate connection, controlled by soc gpio */
+ usb-vbus1-enable-hog {
+ gpio-hog;
+ gpios = <3 GPIO_ACTIVE_HIGH>;
+ input;
+ line-name = "vbus1-enable";
+ };
+ };
+
+ carrier_eeprom: eeprom@52 {
+ compatible = "atmel,24c02";
+ reg = <0x52>;
+ pagesize = <8>;
+ };
+
+ /* usb-hub@60 */
+
+ /* assembly option */
+ carrier_rtc: rtc@68 {
+ compatible = "st,m41t83";
+ reg = <0x68>;
+ pinctrl-0 = <&cp1_rtc_pins>;
+ pinctrl-names = "default";
+ interrupt-parent = <&cp1_gpio1>;
+ interrupts = <12 IRQ_TYPE_EDGE_FALLING>;
+ reset-gpios = <&cp1_gpio1 13 GPIO_ACTIVE_LOW>;
+ };
+};
+
+&cp0_i2c1 {
+ /*
+ * Routed to SFP.
+ * Limit to 100kHz for compatibility with SFP modules,
+ * featuring AT24C01A/02/04 at addresses 0x50/0x51.
+ */
+ clock-frequency = <100000>;
+ pinctrl-0 = <&cp0_i2c1_pins>;
+ pinctrl-names = "default";
+ status = "okay";
+};
+
+&cp0_mdio {
+ /*
+ * SoM + Carrier each have a PHY at address 0.
+ * Remove the SoM phy node, and skip adding the carrier node.
+ * SGMII Auto-Negotation is enabled by bootloader for
+ * autonomous operation without mdio control.
+ */
+ /delete-node/ ethernet-phy@0;
+
+ /* U17016 */
+ cp0_phy1: ethernet-phy@1 {
+ reg = <1>;
+ /*
+ * Configure LEDs default behaviour:
+ * - LED[0]: link is 1000Mbps: On (yellow)
+ * - LED[1]: link/activity: On/blink (green)
+ * - LED[2]: high impedance (floating)
+ */
+ marvell,reg-init = <3 16 0xf000 0x0a17>;
+
+ leds {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@0 {
+ reg = <0>;
+ color = <LED_COLOR_ID_YELLOW>;
+ function = LED_FUNCTION_LAN;
+ default-state = "keep";
+ };
+
+ led@1 {
+ reg = <1>;
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_LAN;
+ default-state = "keep";
+ };
+ };
+ };
+};
+
+/* SRDS #0 - miniPCIe */
+&cp0_pcie0 {
+ num-lanes = <1>;
+ phys = <&cp0_comphy0 0>;
+ status = "okay";
+};
+
+/* SRDS #5 - M.2 B-Key (J34) */
+&cp0_pcie2 {
+ num-lanes = <1>;
+ phys = <&cp0_comphy5 2>;
+ status = "okay";
+};
+
+&cp0_pinctrl {
+ pinctrl-0 = <&cp0_m2_0_shutdown_pins &cp0_mpcie_rfkill_pins>;
+ pinctrl-names = "default";
+
+ cp0_i2c1_pins: cp0-i2c1-pins {
+ marvell,pins = "mpp35", "mpp36";
+ marvell,function = "i2c1";
+ };
+
+ cp0_led_pins: cp0-led-pins {
+ marvell,pins = "mpp4", "mpp7";
+ marvell,function = "gpio";
+ };
+
+ cp0_m2_0_shutdown_pins: cp0-m2-0-shutdown-pins {
+ marvell,pins = "mpp8";
+ marvell,function = "gpio";
+ };
+
+ cp0_mmc0_pins: cp0-mmc0-pins {
+ marvell,pins = "mpp43", "mpp56", "mpp57", "mpp58",
+ "mpp59", "mpp60", "mpp61";
+ marvell,function = "sdio";
+ };
+
+ cp0_mpcie_rfkill_pins: cp0-mpcie-rfkill-pins {
+ marvell,pins = "mpp6";
+ marvell,function = "gpio";
+ };
+
+ cp0_reg_usb_a_vbus0_pins: cp0-reg-usb-a-vbus0-pins {
+ marvell,pins = "mpp27";
+ marvell,function = "gpio";
+ };
+
+ cp0_reg_usb_a_vbus1_pins: cp0-reg-usb-a-vbus1-pins {
+ marvell,pins = "mpp28";
+ marvell,function = "gpio";
+ };
+
+ cp0_sfp0_pins: cp0-sfp0-pins {
+ marvell,pins = "mpp31", "mpp32", "mpp33", "mpp34";
+ marvell,function = "gpio";
+ };
+
+ cp0_spi1_cs1_pins: cp0-spi1-cs1-pins {
+ marvell,pins = "mpp12";
+ marvell,function = "spi1";
+ };
+};
+
+/* microSD */
+&cp0_sdhci0 {
+ pinctrl-0 = <&cp0_mmc0_pins>;
+ pinctrl-names = "default";
+ bus-width = <4>;
+ no-1-8-v;
+ status = "okay";
+};
+
+&cp0_spi1 {
+ /* add pin for chip-select 1 */
+ pinctrl-0 = <&cp0_spi1_pins &cp0_spi1_cs1_pins>;
+
+ flash@1 {
+ compatible = "jedec,spi-nor";
+ reg = <1>;
+ /* read command supports max. 50MHz */
+ spi-max-frequency = <50000000>;
+ };
+};
+
+/* USB-2.0 Host to USB-Hub */
+&cp0_usb3_0 {
+ phys = <&cp0_utmi0>;
+ phy-names = "utmi";
+ dr_mode = "host";
+ status = "okay";
+};
+
+/* SRDS #4 - USB-3.0 Host to USB-Hub */
+&cp0_usb3_1 {
+ phys = <&cp0_comphy4 1>, <&cp0_utmi1>;
+ phy-names = "comphy", "utmi";
+ dr_mode = "host";
+ status = "okay";
+};
+
+&cp0_utmi {
+ status = "okay";
+};
+
+&cp0_utmi1 {
+ status = "disabled";
+};
+
+&cp1_ethernet {
+ status = "okay";
+};
+
+/* SRDS #4 - SFP+ 10GE */
+&cp1_eth0 {
+ managed = "in-band-status";
+ phy-mode = "10gbase-r";
+ phys = <&cp1_comphy4 0>;
+ sfp = <&sfp1>;
+ status = "okay";
+};
+
+/* SRDS #3 - SGMII 1GE */
+&cp1_eth1 {
+ managed = "in-band-status";
+ phy-mode = "sgmii";
+ phy = <&cp1_phy0>;
+ phys = <&cp0_comphy3 1>;
+ status = "okay";
+};
+
+/* SRDS #5 - SGMII 1GE */
+&cp1_eth2 {
+ managed = "in-band-status";
+ phy-mode = "sgmii";
+ phy = <&cp1_phy1>;
+ phys = <&cp0_comphy5 2>;
+ status = "okay";
+};
+
+&cp1_gpio1 {
+ status = "okay";
+
+ /* J30 */
+ m2-full-card-power-off-hog-0 {
+ gpio-hog;
+ gpios = <29 GPIO_ACTIVE_LOW>;
+ output-low;
+ line-name = "m2-full-card-power-off";
+ };
+
+ /* J44 */
+ m2-full-card-power-off-hog-1 {
+ gpio-hog;
+ gpios = <30 GPIO_ACTIVE_LOW>;
+ output-low;
+ line-name = "m2-full-card-power-off";
+ };
+};
+
+&cp1_gpio2 {
+ status = "okay";
+};
+
+&cp1_i2c1 {
+ /*
+ * Routed to SFP.
+ * Limit to 100kHz for compatibility with SFP modules,
+ * featuring AT24C01A/02/04 at addresses 0x50/0x51.
+ */
+ clock-frequency = <100000>;
+ pinctrl-0 = <&cp1_i2c1_pins>;
+ pinctrl-names = "default";
+ status = "okay";
+};
+
+&cp1_mdio {
+ pinctrl-0 = <&cp1_mdio_pins>;
+ pinctrl-names = "default";
+ status = "okay";
+
+ cp1_phy0: ethernet-phy@0 {
+ reg = <0>;
+ /*
+ * Configure LEDs default behaviour:
+ * - LED[0]: link is 1000Mbps: On (yellow)
+ * - LED[1]: link/activity: On/blink (green)
+ * - LED[2]: high impedance (floating)
+ */
+ marvell,reg-init = <3 16 0xf000 0x0a17>;
+
+ leds {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@0 {
+ reg = <0>;
+ color = <LED_COLOR_ID_YELLOW>;
+ function = LED_FUNCTION_LAN;
+ default-state = "keep";
+ };
+
+ led@1 {
+ reg = <1>;
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_LAN;
+ default-state = "keep";
+ };
+ };
+ };
+
+ cp1_phy1: ethernet-phy@1 {
+ reg = <1>;
+ /*
+ * Configure LEDs default behaviour:
+ * - LED[0]: link is 1000Mbps: On (yellow)
+ * - LED[1]: link/activity: On/blink (green)
+ * - LED[2]: high impedance (floating)
+ */
+ marvell,reg-init = <3 16 0xf000 0x0a17>;
+
+ leds {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@0 {
+ reg = <0>;
+ color = <LED_COLOR_ID_YELLOW>;
+ function = LED_FUNCTION_LAN;
+ default-state = "keep";
+ };
+
+ led@1 {
+ reg = <1>;
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_LAN;
+ default-state = "keep";
+ };
+ };
+ };
+};
+
+/* SRDS #0 - M.2 (J30) */
+&cp1_pcie0 {
+ num-lanes = <1>;
+ phys = <&cp1_comphy0 0>;
+ status = "okay";
+};
+
+&cp1_rtc {
+ status = "disabled";
+};
+
+/* SRDS #1 - SATA on M.2 (J44) */
+&cp1_sata0 {
+ phys = <&cp1_comphy1 0>;
+ status = "okay";
+
+ /* only port 0 is available */
+ /delete-node/ sata-port@1;
+};
+
+&cp1_syscon0 {
+ cp1_pinctrl: pinctrl {
+ compatible = "marvell,cp115-standalone-pinctrl";
+ pinctrl-0 = <&cp1_m2_1_shutdown_pins &cp1_m2_2_shutdown_pins>;
+ pinctrl-names = "default";
+
+ cp1_i2c1_pins: cp0-i2c1-pins {
+ marvell,pins = "mpp35", "mpp36";
+ marvell,function = "i2c1";
+ };
+
+ cp1_led_pins: cp1-led-pins {
+ marvell,pins = "mpp54", "mpp55";
+ marvell,function = "gpio";
+ };
+
+ cp1_m2_1_shutdown_pins: cp1-m2-1-shutdown-pins {
+ marvell,pins = "mpp29";
+ marvell,function = "gpio";
+ };
+
+ cp1_m2_2_shutdown_pins: cp1-m2-2-shutdown-pins {
+ marvell,pins = "mpp30";
+ marvell,function = "gpio";
+ };
+
+ cp1_mdio_pins: cp1-mdio-pins {
+ marvell,pins = "mpp37", "mpp38";
+ marvell,function = "ge";
+ };
+
+ cp1_rtc_pins: cp1-rtc-pins {
+ marvell,pins = "mpp12", "mpp13";
+ marvell,function = "gpio";
+ };
+
+ cp1_sfp1_pins: cp1-sfp1-pins {
+ marvell,pins = "mpp33", "mpp34", "mpp49", "mpp50";
+ marvell,function = "gpio";
+ };
+ };
+};
+
+/*
+ * SRDS #2 - USB-3.0 Host to M.2 (J44)
+ * USB-2.0 Host to M.2 (J30)
+ */
+&cp1_usb3_0 {
+ phys = <&cp1_comphy2 0>, <&cp1_utmi0>;
+ phy-names = "comphy", "utmi";
+ dr_mode = "host";
+ status = "okay";
+};
+
+/* USB-2.0 Host to M.2 (J44) */
+&cp1_usb3_1 {
+ phys = <&cp1_utmi1>;
+ phy-names = "utmi";
+ dr_mode = "host";
+ status = "okay";
+};
+
+&cp1_utmi {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/marvell/cn9132-clearfog.dts b/arch/arm64/boot/dts/marvell/cn9132-clearfog.dts
new file mode 100644
index 000000000000..0f53745a6fa0
--- /dev/null
+++ b/arch/arm64/boot/dts/marvell/cn9132-clearfog.dts
@@ -0,0 +1,673 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (C) 2024 Josua Mayer <josua@solid-run.com>
+ *
+ * DTS for SolidRun CN9132 Clearfog.
+ *
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/leds/common.h>
+
+#include "cn9130.dtsi"
+#include "cn9132-sr-cex7.dtsi"
+
+/ {
+ model = "SolidRun CN9132 Clearfog";
+ compatible = "solidrun,cn9132-clearfog",
+ "solidrun,cn9132-sr-cex7", "marvell,cn9130";
+
+ aliases {
+ ethernet1 = &cp0_eth2;
+ ethernet2 = &cp0_eth0;
+ ethernet3 = &cp2_eth0;
+ ethernet4 = &cp1_eth0;
+ i2c7 = &carrier_mpcie_i2c;
+ i2c8 = &carrier_ptp_i2c;
+ mmc1 = &cp0_sdhci0;
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+ pinctrl-names = "default";
+ pinctrl-0 = <&cp1_wake0_pins>;
+
+ button-0 {
+ label = "SW2";
+ gpios = <&cp1_gpio2 8 GPIO_ACTIVE_LOW>;
+ linux,can-disable;
+ linux,code = <BTN_2>;
+ };
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&cp1_batlow_pins &cp2_rsvd4_pins>;
+
+ /* LED11 */
+ led-io-0 {
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_DISK;
+ function-enumerator = <0>;
+ default-state = "off";
+ gpios = <&cp1_gpio1 11 GPIO_ACTIVE_HIGH>;
+ };
+
+ /* LED12 */
+ led-io-1 {
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_DISK;
+ function-enumerator = <1>;
+ default-state = "off";
+ gpios = <&cp2_gpio1 4 GPIO_ACTIVE_HIGH>;
+ };
+ };
+
+ /* CON4 W_DISABLE1/W_DISABLE2 */
+ rfkill-m2-wlan {
+ compatible = "rfkill-gpio";
+ label = "m.2 wlan (CON4)";
+ radio-type = "wlan";
+ pinctrl-names = "default";
+ pinctrl-0 = <&cp1_10g_phy_rst_01_pins>;
+ /* rfkill-gpio inverts internally */
+ shutdown-gpios = <&cp1_gpio2 11 GPIO_ACTIVE_HIGH>;
+ };
+
+ /* CON5 W_DISABLE1/W_DISABLE2 */
+ rfkill-m2-wlan {
+ compatible = "rfkill-gpio";
+ label = "m.2 wlan (CON5)";
+ radio-type = "wlan";
+ pinctrl-names = "default";
+ pinctrl-0 = <&cp1_10g_phy_rst_23_pins>;
+ /* rfkill-gpio inverts internally */
+ shutdown-gpios = <&cp1_gpio2 10 GPIO_ACTIVE_HIGH>;
+ };
+
+ /* J21 W_DISABLE1 */
+ rfkill-m2-wwan {
+ compatible = "rfkill-gpio";
+ label = "m.2 wwan (J21)";
+ radio-type = "wwan";
+ pinctrl-names = "default";
+ pinctrl-0 = <&cp2_rsvd3_pins>;
+ /* rfkill-gpio inverts internally */
+ shutdown-gpios = <&cp2_gpio1 3 GPIO_ACTIVE_HIGH>;
+ };
+
+ /* J21 W_DISABLE1 */
+ rfkill-m2-gnss {
+ compatible = "rfkill-gpio";
+ label = "m.2 gnss (J21)";
+ radio-type = "gps";
+ pinctrl-names = "default";
+ pinctrl-0 = <&cp2_rsvd8_pins>;
+ /* rfkill-gpio inverts internally */
+ shutdown-gpios = <&cp2_gpio1 8 GPIO_ACTIVE_HIGH>;
+ };
+
+ /* J14 W_DISABLE */
+ rfkill-mpcie-wlan {
+ compatible = "rfkill-gpio";
+ label = "mpcie wlan (J14)";
+ radio-type = "wlan";
+ pinctrl-names = "default";
+ pinctrl-0 = <&cp2_rsvd2_pins>;
+ /* rfkill-gpio inverts internally */
+ shutdown-gpios = <&cp2_gpio1 2 GPIO_ACTIVE_HIGH>;
+ };
+
+ sfp: sfp {
+ compatible = "sff,sfp";
+ i2c-bus = <&com_10g_sfp_i2c0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&com_10g_int0_pins>;
+ mod-def0-gpios = <&cp0_gpio1 24 GPIO_ACTIVE_LOW>;
+ maximum-power-milliwatt = <2000>;
+ };
+};
+
+&com_smbus {
+ /* This bus is also routed to STM32 BMC Microcontroller (U2) */
+
+ power-sensor@40 {
+ compatible = "ti,ina220";
+ reg = <0x40>;
+ #io-channel-cells = <1>;
+ label = "vdd_12v0";
+ shunt-resistor = <2000>;
+ };
+
+ adc@48 {
+ compatible = "ti,tla2021";
+ reg = <0x48>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* supplied by chaoskey hardware noise generator circuit */
+ channel@0 {
+ reg = <0>;
+ };
+ };
+};
+
+&cp0_eth_phy0 {
+ /*
+ * Configure LEDs default behaviour:
+ * - LED[0]: link is 1000Mbps: On (yellow): 0111
+ * - LED[1]: link/activity: On/Blink (green): 0001
+ * - LED[2]: Off (green): 1000
+ */
+ marvell,reg-init = <3 16 0xf000 0x0817>;
+
+ leds {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@0 {
+ /* link */
+ reg = <0>;
+ color = <LED_COLOR_ID_YELLOW>;
+ function = LED_FUNCTION_LAN;
+ default-state = "keep";
+ };
+
+ led@1 {
+ /* act */
+ reg = <1>;
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_LAN;
+ default-state = "keep";
+ };
+
+ led@2 {
+ /* 1000 */
+ reg = <2>;
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_LAN;
+ default-state = "keep";
+ };
+ };
+};
+
+/* SRDS #4 - 10GE */
+&cp0_eth0 {
+ phys = <&cp0_comphy4 0>;
+ phy-mode = "10gbase-r";
+ managed = "in-band-status";
+ sfp = <&sfp>;
+ status = "okay";
+};
+
+&cp0_eth2 {
+ phy-mode = "2500base-x";
+ phys = <&cp0_comphy5 2>;
+ status = "okay";
+
+ fixed-link {
+ speed = <2500>;
+ full-duplex;
+ pause;
+ };
+};
+
+&cp0_i2c1 {
+ /*
+ * Both COM and Carrier Board have a PCA9547 i2c mux at 0x77.
+ * Describe them as a single device merging each child bus.
+ */
+
+ i2c-mux@77 {
+ i2c@0 {
+ /* Routed to Full PCIe (J4) */
+ };
+
+ i2c@1 {
+ /* Routed to USB Hub (U29) */
+ };
+
+ i2c@2 {
+ /* Routed to M.2 (CON4) */
+ };
+
+ i2c@3 {
+ /* Routed to M.2 (CON5) */
+ };
+
+ i2c@4 {
+ /* Routed to M.2 (J21) */
+ };
+
+ carrier_mpcie_i2c: i2c@5 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <5>;
+
+ /* Routed to mini-PCIe (J14) */
+ };
+
+ carrier_ptp_i2c: i2c@6 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <6>;
+
+ /* Routed to various optional PTP related components */
+ };
+ };
+};
+
+&cp0_mdio {
+ ethernet-switch@4 {
+ compatible = "marvell,mv88e6085";
+ reg = <4>;
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ sw_phy1: ethernet-phy@1 {
+ reg = <0x11>;
+ };
+
+ sw_phy2: ethernet-phy@2 {
+ reg = <0x12>;
+ };
+
+ sw_phy3: ethernet-phy@3 {
+ reg = <0x13>;
+ };
+
+ sw_phy4: ethernet-phy@4 {
+ reg = <0x14>;
+ };
+ };
+
+ ethernet-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethernet-port@1 {
+ reg = <1>;
+ label = "lan1";
+ phy-handle = <&sw_phy1>;
+ phy-mode = "internal";
+
+ leds {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@0 {
+ reg = <0>;
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_LAN;
+ default-state = "keep";
+ };
+
+ led@1 {
+ reg = <1>;
+ color = <LED_COLOR_ID_YELLOW>;
+ function = LED_FUNCTION_LAN;
+ default-state = "keep";
+ };
+ };
+ };
+
+ ethernet-port@2 {
+ reg = <2>;
+ label = "lan2";
+ phy-handle = <&sw_phy2>;
+ phy-mode = "internal";
+
+ leds {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@0 {
+ reg = <0>;
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_LAN;
+ default-state = "keep";
+ };
+
+ led@1 {
+ reg = <1>;
+ color = <LED_COLOR_ID_YELLOW>;
+ function = LED_FUNCTION_LAN;
+ default-state = "keep";
+ };
+ };
+ };
+
+ ethernet-port@3 {
+ reg = <3>;
+ label = "lan3";
+ phy-handle = <&sw_phy3>;
+ phy-mode = "internal";
+
+ leds {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@0 {
+ reg = <0>;
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_LAN;
+ default-state = "keep";
+ };
+
+ led@1 {
+ reg = <1>;
+ color = <LED_COLOR_ID_YELLOW>;
+ function = LED_FUNCTION_LAN;
+ default-state = "keep";
+ };
+ };
+ };
+
+ ethernet-port@4 {
+ reg = <4>;
+ label = "lan4";
+ phy-handle = <&sw_phy4>;
+ phy-mode = "internal";
+
+ leds {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@0 {
+ reg = <0>;
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_LAN;
+ default-state = "keep";
+ };
+
+ led@1 {
+ reg = <1>;
+ color = <LED_COLOR_ID_YELLOW>;
+ function = LED_FUNCTION_LAN;
+ default-state = "keep";
+ };
+ };
+ };
+
+ ethernet-port@5 {
+ reg = <5>;
+ label = "cpu";
+ ethernet = <&cp0_eth2>;
+ phy-mode = "2500base-x";
+
+ fixed-link {
+ speed = <2500>;
+ full-duplex;
+ pause;
+ };
+ };
+ };
+ };
+};
+
+/* SRDS #0,#1,#2,#3 - PCIe */
+&cp0_pcie0 {
+ num-lanes = <4>;
+ phys = <&cp0_comphy0 0>, <&cp0_comphy1 0>, <&cp0_comphy2 0>, <&cp0_comphy3 0>;
+ status = "okay";
+};
+
+&cp0_pinctrl {
+ /*
+ * configure unused gpios exposed via pin headers:
+ * - J7-10: PWRBTN
+ */
+ pinctrl-names = "default";
+ pinctrl-0 = <&cp0_pwrbtn_pins>;
+};
+
+/* microSD */
+&cp0_sdhci0 {
+ pinctrl-0 = <&cp0_mmc0_pins>, <&cp0_mmc0_cd_pins>;
+ pinctrl-names = "default";
+ bus-width = <4>;
+ no-1-8-v;
+ status = "okay";
+};
+
+&cp0_spi1 {
+ /* add CS1 */
+ pinctrl-0 = <&cp0_spi1_pins>, <&cp0_spi1_cs1_pins>;
+
+ flash@1 {
+ compatible = "jedec,spi-nor";
+ reg = <1>;
+ /* read command supports max. 50MHz */
+ spi-max-frequency = <50000000>;
+ };
+};
+
+/* J38 */
+&cp0_uart2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&cp0_uart2_pins>;
+ status = "okay";
+};
+
+&cp0_utmi {
+ /* M.2 "CON5" swaps D+/D- */
+ swap-dx-lanes = <1>;
+};
+
+&cp1_ethernet {
+ status = "okay";
+};
+
+/* SRDS #2 - 5GE */
+&cp1_eth0 {
+ phys = <&cp1_comphy2 0>;
+ phy-mode = "5gbase-r";
+ phy = <&cp1_eth_phy0>;
+ managed = "in-band-status";
+ status = "okay";
+};
+
+/* SRDS #0,#1 - PCIe */
+&cp1_pcie0 {
+ num-lanes = <2>;
+ phys = <&cp1_comphy0 0>, <&cp1_comphy1 0>;
+ status = "okay";
+};
+
+/* SRDS #4 - PCIe */
+&cp1_pcie1 {
+ num-lanes = <1>;
+ phys = <&cp1_comphy4 1>;
+ status = "okay";
+};
+
+/* SRDS #5 - PCIe */
+&cp1_pcie2 {
+ num-lanes = <1>;
+ phys = <&cp1_comphy5 2>;
+ status = "okay";
+};
+
+&cp1_pinctrl {
+ /*
+ * configure unused gpios exposed via pin headers:
+ * - J7-8: RSVD16
+ * - J7-10: THRM
+ * - J10-1: WAKE1
+ * - J10-2: SATA_ACT
+ * - J10-8: THERMTRIP
+ */
+ pinctrl-names = "default";
+ pinctrl-0 = <&cp1_rsvd16_pins &cp1_sata_act_pins &cp1_thrm_irq_pins>,
+ <&cp1_thrm_trip_pins &cp1_wake1_pins>;
+};
+
+/* SRDS #3 - SATA */
+&cp1_sata0 {
+ status = "okay";
+
+ /* only port 1 is available */
+ /delete-node/ sata-port@0;
+
+ sata-port@1 {
+ phys = <&cp1_comphy3 1>;
+ };
+};
+
+&cp1_utmi {
+ /* M.2 "CON4" swaps D+/D- */
+ swap-dx-lanes = <0>;
+};
+
+&cp1_xmdio {
+ pinctrl-names = "default";
+ pinctrl-0 = <&cp1_xmdio_pins>;
+ status = "okay";
+
+ cp1_eth_phy0: ethernet-phy@8 {
+ compatible = "ethernet-phy-ieee802.3-c45";
+ reg = <8>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&com_10g_int1_pins>;
+ interrupt-parent = <&cp1_gpio2>;
+ interrupts = <18 IRQ_TYPE_EDGE_FALLING>;
+
+ leds {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@1 {
+ reg = <1>;
+ color = <LED_COLOR_ID_YELLOW>;
+ function = LED_FUNCTION_LAN;
+ default-state = "keep";
+ };
+
+ led@2 {
+ reg = <2>;
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_LAN;
+ default-state = "keep";
+ };
+ };
+ };
+};
+
+&cp2_ethernet {
+ status = "okay";
+};
+
+/* SRDS #2 - 5GE */
+&cp2_eth0 {
+ phys = <&cp2_comphy2 0>;
+ phy-mode = "5gbase-r";
+ phy = <&cp2_eth_phy0>;
+ managed = "in-band-status";
+ status = "okay";
+};
+
+&cp2_gpio1 {
+ pinctrl-names= "default";
+ pinctrl-0 = <&cp2_rsvd9_pins>;
+
+ /* J21 */
+ m2-wwan-reset-hog {
+ gpio-hog;
+ gpios = <9 (GPIO_ACTIVE_LOW | GPIO_OPEN_DRAIN)>;
+ output-low;
+ line-name = "m2-wwan-reset";
+ };
+};
+
+/* SRDS #0 - PCIe */
+&cp2_pcie0 {
+ num-lanes = <1>;
+ phys = <&cp2_comphy0 0>;
+ status = "okay";
+};
+
+/* SRDS #4 - PCIe */
+&cp2_pcie1 {
+ num-lanes = <1>;
+ phys = <&cp2_comphy4 1>;
+ status = "okay";
+};
+
+/* SRDS #5 - PCIe */
+&cp2_pcie2 {
+ num-lanes = <1>;
+ phys = <&cp2_comphy5 2>;
+ status = "okay";
+};
+
+&cp2_pinctrl {
+ /*
+ * configure unused gpios exposed via pin headers:
+ * - J7-1: RSVD10
+ * - J7-3: RSVD11
+ * - J7-5: RSVD56
+ * - J7-6: RSVD7
+ * - J7-7: RSVD27
+ * - J10-3: RSVD31
+ * - J10-5: RSVD5
+ * - J10-6: RSVD32
+ * - J10-7: RSVD0
+ * - J10-9: RSVD1
+ */
+ pinctrl-names = "default";
+ pinctrl-0 = <&cp2_rsvd0_pins &cp2_rsvd1_pins &cp2_rsvd5_pins>,
+ <&cp2_rsvd7_pins &cp2_rsvd10_pins &cp2_rsvd11_pins>,
+ <&cp2_rsvd27_pins &cp2_rsvd31_pins &cp2_rsvd32_pins>,
+ <&cp2_rsvd56_pins>;
+};
+
+/* SRDS #3 - SATA */
+&cp2_sata0 {
+ status = "okay";
+
+ /* only port 1 is available */
+ /delete-node/ sata-port@0;
+
+ sata-port@1 {
+ phys = <&cp2_comphy3 1>;
+ };
+};
+
+&cp2_xmdio {
+ pinctrl-names = "default";
+ pinctrl-0 = <&cp2_xmdio_pins>;
+ status = "okay";
+
+ cp2_eth_phy0: ethernet-phy@8 {
+ compatible = "ethernet-phy-ieee802.3-c45";
+ reg = <8>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&com_10g_int2_pins>;
+ interrupt-parent = <&cp2_gpio2>;
+ interrupts = <18 IRQ_TYPE_EDGE_FALLING>;
+
+ leds {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@1 {
+ reg = <1>;
+ color = <LED_COLOR_ID_YELLOW>;
+ function = LED_FUNCTION_LAN;
+ default-state = "keep";
+ };
+
+ led@2 {
+ reg = <2>;
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_LAN;
+ default-state = "keep";
+ };
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/marvell/cn9132-sr-cex7.dtsi b/arch/arm64/boot/dts/marvell/cn9132-sr-cex7.dtsi
new file mode 100644
index 000000000000..afc041c1c448
--- /dev/null
+++ b/arch/arm64/boot/dts/marvell/cn9132-sr-cex7.dtsi
@@ -0,0 +1,712 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (C) 2024 Josua Mayer <josua@solid-run.com>
+ *
+ */
+
+#include <dt-bindings/gpio/gpio.h>
+
+/*
+ * Instantiate the first external CP115
+ */
+
+#define CP11X_NAME cp1
+#define CP11X_BASE f4000000
+#define CP11X_PCIEx_MEM_BASE(iface) (0xe2000000 + (iface * 0x1000000))
+#define CP11X_PCIEx_MEM_SIZE(iface) 0xf00000
+#define CP11X_PCIE0_BASE f4600000
+#define CP11X_PCIE1_BASE f4620000
+#define CP11X_PCIE2_BASE f4640000
+
+#include "armada-cp115.dtsi"
+
+#undef CP11X_NAME
+#undef CP11X_BASE
+#undef CP11X_PCIEx_MEM_BASE
+#undef CP11X_PCIEx_MEM_SIZE
+#undef CP11X_PCIE0_BASE
+#undef CP11X_PCIE1_BASE
+#undef CP11X_PCIE2_BASE
+
+/*
+ * Instantiate the second external CP115
+ */
+
+#define CP11X_NAME cp2
+#define CP11X_BASE f6000000
+#define CP11X_PCIEx_MEM_BASE(iface) (0xe5000000 + (iface * 0x1000000))
+#define CP11X_PCIEx_MEM_SIZE(iface) 0xf00000
+#define CP11X_PCIE0_BASE f6600000
+#define CP11X_PCIE1_BASE f6620000
+#define CP11X_PCIE2_BASE f6640000
+
+#include "armada-cp115.dtsi"
+
+#undef CP11X_NAME
+#undef CP11X_BASE
+#undef CP11X_PCIEx_MEM_BASE
+#undef CP11X_PCIEx_MEM_SIZE
+#undef CP11X_PCIE0_BASE
+#undef CP11X_PCIE1_BASE
+#undef CP11X_PCIE2_BASE
+
+/ {
+ model = "SolidRun CN9132 COM Express Type 7 Module";
+ compatible = "solidrun,cn9132-sr-cex7", "marvell,cn9130";
+
+ aliases {
+ ethernet0 = &cp0_eth1;
+ gpio3 = &cp1_gpio1;
+ gpio4 = &cp1_gpio2;
+ gpio5 = &cp2_gpio1;
+ gpio6 = &cp2_gpio2;
+ i2c0 = &cp0_i2c0;
+ i2c1 = &cp0_i2c1;
+ i2c2 = &com_clkgen_i2c;
+ i2c3 = &com_10g_led_i2c;
+ i2c4 = &com_10g_sfp_i2c0;
+ i2c5 = &com_smbus;
+ i2c6 = &com_fanctrl_i2c;
+ mmc0 = &ap_sdhci0;
+ rtc0 = &cp0_rtc;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ fan: pwm-fan {
+ compatible = "pwm-fan";
+ cooling-levels = <0 51 102 153 204 255>;
+ #cooling-cells = <2>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&cp0_fan_pwm_pins &cp0_fan_tacho_pins>;
+ pwms = <&cp0_gpio2 7 40000>;
+ interrupt-parent = <&cp0_gpio1>;
+ interrupts = <26 IRQ_TYPE_EDGE_FALLING>;
+ };
+
+ v_1_8: regulator-1-8 {
+ compatible = "regulator-fixed";
+ regulator-name = "1v8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ ap_vhv: regulator-ap-vhv-1-8 {
+ compatible = "regulator-fixed";
+ regulator-name = "ap-vhv-1v8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ pinctrl-0 = <&cp0_reg_ap_vhv_pins>;
+ pinctrl-names = "default";
+ gpios = <&cp0_gpio2 21 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ cp_vhv: regulator-cp-vhv-1-8 {
+ compatible = "regulator-fixed";
+ regulator-name = "cp-vhv-1v8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ pinctrl-0 = <&cp0_reg_cp_vhv_pins>;
+ pinctrl-names = "default";
+ gpios = <&cp0_gpio2 17 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+};
+
+&ap_pinctrl {
+ ap_mmc0_pins: ap-mmc0-pins {
+ marvell,pins = "mpp0", "mpp1", "mpp2", "mpp3", "mpp4", "mpp5",
+ "mpp6", "mpp7", "mpp8", "mpp9", "mpp10", "mpp12";
+ marvell,function = "sdio";
+ /*
+ * mpp12 is emmc reset, function should be sdio (hw_rst),
+ * but pinctrl-mvebu does not support this.
+ *
+ * From pinctrl-mvebu.h:
+ * "The name will be used to switch to this setting in DT description, e.g.
+ * marvell,function = "uart2". subname is only for debugging purposes."
+ */
+ };
+};
+
+&ap_sdhci0 {
+ bus-width = <8>;
+ pinctrl-0 = <&ap_mmc0_pins>;
+ pinctrl-names = "default";
+ vqmmc-supply = <&v_1_8>;
+ status = "okay";
+};
+
+&ap_thermal_ic {
+ polling-delay = <1000>;
+
+ trips {
+ ap_active: trip-active {
+ temperature = <40000>;
+ hysteresis = <4000>;
+ type = "active";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&ap_active>;
+ cooling-device = <&fan THERMAL_NO_LIMIT 4>;
+ };
+
+ map1 {
+ trip = <&ap_crit>;
+ cooling-device = <&fan 4 5>;
+ };
+ };
+};
+
+&cp0_ethernet {
+ status = "okay";
+};
+
+&cp0_eth1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&cp0_eth1_pins>;
+ phy-mode = "rgmii-id";
+ phy = <&cp0_eth_phy0>;
+ status = "okay";
+};
+
+&cp0_gpio1 {
+ status = "okay";
+
+ /*
+ * Tacho signal used as interrupt source by pwm-fan driver.
+ * Hog IO as input to ensure mvebu-gpio irq driver`s
+ * irq_set_type can succeed.
+ */
+ pwm-tacho-irq-hog {
+ gpio-hog;
+ gpios = <26 (GPIO_ACTIVE_LOW | GPIO_OPEN_DRAIN)>;
+ input;
+ line-name = "fan-tacho";
+ };
+};
+
+&cp0_gpio2 {
+ status = "okay";
+};
+
+&cp0_i2c0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&cp0_i2c0_pins>;
+ clock-frequency = <100000>;
+ status = "okay";
+
+ com_eeprom: eeprom@50 {
+ compatible = "atmel,24c02";
+ reg = <0x50>;
+ pagesize = <8>;
+ };
+
+ eeprom@53 {
+ compatible = "atmel,spd";
+ reg = <0x53>;
+ };
+};
+
+&cp0_i2c1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&cp0_i2c1_pins>;
+ clock-frequency = <100000>;
+ status = "okay";
+
+ i2c-mux@77 {
+ compatible = "nxp,pca9547";
+ reg = <0x77>;
+ i2c-mux-idle-disconnect;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ com_clkgen_i2c: i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+
+ /* clock-controller@6b */
+ };
+
+ com_10g_led_i2c: i2c@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+
+ /* Routed to B2B Connector as I2C_10G_LED_SCL/SDA */
+ };
+
+ com_10g_sfp_i2c0: i2c@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <2>;
+
+ /* Routed to B2B Connector as I2C_SFP0_CP0_SCL/SDA */
+ };
+
+ com_smbus: i2c@3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <3>;
+
+ /* Routed to B2B Connector as SBM_CLK/DAT */
+ };
+
+ com_fanctrl_i2c: i2c@4 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <4>;
+
+ /* fan-controller@2f (assembly option) */
+ };
+ };
+};
+
+&cp0_mdio {
+ pinctrl-names = "default";
+ pinctrl-0 = <&cp0_mdio_pins>;
+ status = "okay";
+
+ cp0_eth_phy0: ethernet-phy@0 {
+ reg = <0>;
+ };
+};
+
+&cp0_spi1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&cp0_spi1_pins>;
+ status = "okay";
+
+ flash@0 {
+ compatible = "jedec,spi-nor";
+ reg = <0>;
+ /* read command supports max. 50MHz */
+ spi-max-frequency = <50000000>;
+ };
+};
+
+&cp0_syscon0 {
+ cp0_pinctrl: pinctrl {
+ compatible = "marvell,cp115-standalone-pinctrl";
+
+ com_10g_int0_pins: cp0-10g-int-pins {
+ marvell,pins = "mpp24";
+ marvell,function = "gpio";
+ };
+
+ cp0_eth1_pins: cp0-eth1-pins {
+ marvell,pins = "mpp0", "mpp1", "mpp2", "mpp3",
+ "mpp4", "mpp5", "mpp6", "mpp7",
+ "mpp8", "mpp9", "mpp10", "mpp11";
+ /* docs call it "ge1", but cp110-pinctrl "ge0" */
+ marvell,function = "ge0";
+ };
+
+ cp0_fan_pwm_pins: cp0-fan-pwm-pins {
+ marvell,pins = "mpp39";
+ marvell,function = "gpio";
+ };
+
+ cp0_fan_tacho_pins: cp0-fan-tacho-pins {
+ marvell,pins = "mpp26";
+ marvell,function = "gpio";
+ };
+
+ cp0_i2c0_pins: cp0-i2c0-pins {
+ marvell,pins = "mpp37", "mpp38";
+ marvell,function = "i2c0";
+ };
+
+ cp0_i2c1_pins: cp0-i2c1-pins {
+ marvell,pins = "mpp35", "mpp36";
+ marvell,function = "i2c1";
+ };
+
+ cp0_mdio_pins: cp0-mdio-pins {
+ marvell,pins = "mpp40", "mpp41";
+ marvell,function = "ge";
+ };
+
+ cp0_mmc0_pins: cp0-mmc0-pins {
+ marvell,pins = "mpp56", "mpp57", "mpp58", "mpp59",
+ "mpp60", "mpp61";
+ marvell,function = "sdio";
+ };
+
+ cp0_mmc0_cd_pins: cp0-mmc0-cd-pins {
+ marvell,pins = "mpp55";
+ marvell,function = "sdio_cd";
+ };
+
+ cp0_pwrbtn_pins: cp0-pwrbtn-pins {
+ marvell,pins = "mpp31";
+ marvell,function = "gpio";
+ };
+
+ cp0_reg_ap_vhv_pins: cp0-reg-ap-vhv-pins {
+ marvell,pins = "mpp53";
+ marvell,function = "gpio";
+ };
+
+ cp0_reg_cp_vhv_pins: cp0-reg-cp-vhv-pins {
+ marvell,pins = "mpp49";
+ marvell,function = "gpio";
+ };
+
+ cp0_spi1_pins: cp0-spi1-pins {
+ marvell,pins = "mpp13", "mpp14", "mpp15", "mpp16";
+ marvell,function = "spi1";
+ };
+
+ cp0_spi1_cs1_pins: cp0-spi1-cs1-pins {
+ marvell,pins = "mpp12";
+ marvell,function = "spi1";
+ };
+
+ cp0_uart2_pins: cp0-uart2-pins {
+ marvell,pins = "mpp50", "mpp51";
+ marvell,function = "uart2";
+ };
+ };
+};
+
+&cp0_thermal_ic {
+ polling-delay = <1000>;
+
+ trips {
+ cp0_active: trip-active {
+ temperature = <40000>;
+ hysteresis = <4000>;
+ type = "active";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&cp0_active>;
+ cooling-device = <&fan THERMAL_NO_LIMIT 4>;
+ };
+
+ map1 {
+ trip = <&cp0_crit>;
+ cooling-device = <&fan 4 5>;
+ };
+ };
+};
+
+/* USB-2.0 Host */
+&cp0_usb3_0 {
+ phys = <&cp0_utmi0>;
+ phy-names = "utmi";
+ dr_mode = "host";
+ status = "okay";
+};
+
+/* USB-2.0 Host */
+&cp0_usb3_1 {
+ phys = <&cp0_utmi1>;
+ phy-names = "utmi";
+ dr_mode = "host";
+ status = "okay";
+};
+
+&cp0_utmi {
+ status = "okay";
+};
+
+&cp1_gpio1 {
+ status = "okay";
+};
+
+&cp1_gpio2 {
+ status = "okay";
+};
+
+&cp1_rtc {
+ status = "disabled";
+};
+
+&cp1_spi1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&cp1_spi1_pins>;
+ status = "okay";
+
+ tpm@0 {
+ reg = <0>;
+ compatible = "infineon,slb9670", "tcg,tpm_tis-spi";
+ spi-max-frequency = <10000000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&cp1_tpm_irq_pins>;
+ interrupt-parent = <&cp1_gpio1>;
+ interrupts = <17 IRQ_TYPE_LEVEL_LOW>;
+ };
+};
+
+&cp1_syscon0 {
+ cp1_pinctrl: pinctrl {
+ compatible = "marvell,cp115-standalone-pinctrl";
+
+ com_10g_int1_pins: cp1-10g-int-pins {
+ marvell,pins = "mpp50";
+ marvell,function = "gpio";
+ };
+
+ cp1_10g_phy_rst_01_pins: cp1-10g-phy-rst-01-pins {
+ marvell,pins = "mpp43";
+ marvell,function = "gpio";
+ };
+
+ cp1_10g_phy_rst_23_pins: cp1-10g-phy-rst-23-pins {
+ marvell,pins = "mpp42";
+ marvell,function = "gpio";
+ };
+
+ cp1_batlow_pins: cp1-batlow-pins {
+ marvell,pins = "mpp11";
+ marvell,function = "gpio";
+ };
+
+ cp1_rsvd16_pins: cp1-rsvd16-pins {
+ marvell,pins = "mpp29";
+ marvell,function = "gpio";
+ };
+
+ cp1_sata_act_pins: cp1-sata-act-pins {
+ marvell,pins = "mpp39";
+ marvell,function = "gpio";
+ };
+
+ cp1_spi1_pins: cp1-spi1-pins {
+ marvell,pins = "mpp13", "mpp14", "mpp15", "mpp16";
+ marvell,function = "spi1";
+ };
+
+ cp1_thrm_irq_pins: cp1-thrm-irq-pins {
+ marvell,pins = "mpp34";
+ marvell,function = "gpio";
+ };
+
+ cp1_thrm_trip_pins: cp1-thrm-trip-pins {
+ marvell,pins = "mpp33";
+ marvell,function = "gpio";
+ };
+
+ cp1_tpm_irq_pins: cp1-tpm-irq-pins {
+ marvell,pins = "mpp17";
+ marvell,function = "gpio";
+ };
+
+ cp1_wake0_pins: cp1-wake0-pins {
+ marvell,pins = "mpp40";
+ marvell,function = "gpio";
+ };
+
+ cp1_wake1_pins: cp1-wake1-pins {
+ marvell,pins = "mpp51";
+ marvell,function = "gpio";
+ };
+
+ cp1_xmdio_pins: cp1-xmdio-pins {
+ marvell,pins = "mpp37", "mpp38";
+ marvell,function = "xg";
+ };
+ };
+};
+
+&cp1_thermal_ic {
+ polling-delay = <1000>;
+
+ trips {
+ cp1_active: trip-active {
+ temperature = <40000>;
+ hysteresis = <4000>;
+ type = "active";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&cp1_active>;
+ cooling-device = <&fan THERMAL_NO_LIMIT 4>;
+ };
+
+ map1 {
+ trip = <&cp1_crit>;
+ cooling-device = <&fan 4 5>;
+ };
+ };
+};
+
+/* USB-2.0 Host */
+&cp1_usb3_0 {
+ phys = <&cp1_utmi0>;
+ phy-names = "utmi";
+ dr_mode = "host";
+ status = "okay";
+};
+
+&cp1_utmi {
+ status = "okay";
+};
+
+&cp2_ethernet {
+ status = "okay";
+};
+
+&cp2_gpio1 {
+ status = "okay";
+};
+
+&cp2_gpio2 {
+ status = "okay";
+};
+
+&cp2_rtc {
+ status = "disabled";
+};
+
+&cp2_syscon0 {
+ cp2_pinctrl: pinctrl {
+ compatible = "marvell,cp115-standalone-pinctrl";
+
+ com_10g_int2_pins: cp2-10g-int-pins {
+ marvell,pins = "mpp50";
+ marvell,function = "gpio";
+ };
+
+ cp2_rsvd0_pins: cp2-rsvd0-pins {
+ marvell,pins = "mpp0";
+ marvell,function = "gpio";
+ };
+
+ cp2_rsvd1_pins: cp2-rsvd1-pins {
+ marvell,pins = "mpp1";
+ marvell,function = "gpio";
+ };
+
+ cp2_rsvd2_pins: cp2-rsvd2-pins {
+ marvell,pins = "mpp2";
+ marvell,function = "gpio";
+ };
+
+ cp2_rsvd3_pins: cp2-rsvd3-pins {
+ marvell,pins = "mpp3";
+ marvell,function = "gpio";
+ };
+
+ cp2_rsvd4_pins: cp2-rsvd4-pins {
+ marvell,pins = "mpp4";
+ marvell,function = "gpio";
+ };
+
+ cp2_rsvd5_pins: cp2-rsvd5-pins {
+ marvell,pins = "mpp54";
+ marvell,function = "gpio";
+ };
+
+ cp2_rsvd7_pins: cp2-rsvd7-pins {
+ marvell,pins = "mpp7";
+ marvell,function = "gpio";
+ };
+
+ cp2_rsvd8_pins: cp2-rsvd8-pins {
+ marvell,pins = "mpp8";
+ marvell,function = "gpio";
+ };
+
+ cp2_rsvd9_pins: cp2-rsvd9-pins {
+ marvell,pins = "mpp9";
+ marvell,function = "gpio";
+ };
+
+ cp2_rsvd10_pins: cp2-rsvd10-pins {
+ marvell,pins = "mpp10";
+ marvell,function = "gpio";
+ };
+
+ cp2_rsvd11_pins: cp2-rsvd11-pins {
+ marvell,pins = "mpp11";
+ marvell,function = "gpio";
+ };
+
+ cp2_rsvd27_pins: cp2-rsvd27-pins {
+ marvell,pins = "mpp11";
+ marvell,function = "gpio";
+ };
+
+ cp2_rsvd31_pins: cp2-rsvd31-pins {
+ marvell,pins = "mpp31";
+ marvell,function = "gpio";
+ };
+
+ cp2_rsvd32_pins: cp2-rsvd32-pins {
+ marvell,pins = "mpp32";
+ marvell,function = "gpio";
+ };
+
+ cp2_rsvd55_pins: cp2-rsvd55-pins {
+ marvell,pins = "mpp55";
+ marvell,function = "gpio";
+ };
+
+ cp2_rsvd56_pins: cp2-rsvd56-pins {
+ marvell,pins = "mpp56";
+ marvell,function = "gpio";
+ };
+
+ cp2_xmdio_pins: cp2-xmdio-pins {
+ marvell,pins = "mpp37", "mpp38";
+ marvell,function = "xg";
+ };
+ };
+};
+
+&cp2_thermal_ic {
+ polling-delay = <1000>;
+
+ trips {
+ cp2_active: trip-active {
+ temperature = <40000>;
+ hysteresis = <4000>;
+ type = "active";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&cp2_active>;
+ cooling-device = <&fan THERMAL_NO_LIMIT 4>;
+ };
+
+ map1 {
+ trip = <&cp2_crit>;
+ cooling-device = <&fan 4 5>;
+ };
+ };
+};
+
+/* USB-2.0/3.0 Host */
+&cp2_usb3_0 {
+ phys = <&cp2_utmi0>, <&cp2_comphy1 0>;
+ phy-names = "utmi", "comphy";
+ dr_mode = "host";
+ status = "okay";
+};
+
+&cp2_utmi {
+ status = "okay";
+};
+
+/* AP default console */
+&uart0 {
+ pinctrl-0 = <&uart0_pins>;
+ pinctrl-names = "default";
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/mediatek/Makefile b/arch/arm64/boot/dts/mediatek/Makefile
index 37b4ca3a87c9..8fd7b2bb7a15 100644
--- a/arch/arm64/boot/dts/mediatek/Makefile
+++ b/arch/arm64/boot/dts/mediatek/Makefile
@@ -8,9 +8,12 @@ dtb-$(CONFIG_ARCH_MEDIATEK) += mt6797-evb.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt6797-x20-dev.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt7622-rfb1.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt7622-bananapi-bpi-r64.dtb
+dtb-$(CONFIG_ARCH_MEDIATEK) += mt7981b-cudy-wr3000-v1.dtb
+dtb-$(CONFIG_ARCH_MEDIATEK) += mt7981b-openwrt-one.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt7981b-xiaomi-ax3000t.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt7986a-acelink-ew-7886cax.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt7986a-bananapi-bpi-r3.dtb
+dtb-$(CONFIG_ARCH_MEDIATEK) += mt7986a-bananapi-bpi-r3-mini.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt7986a-bananapi-bpi-r3-emmc.dtbo
dtb-$(CONFIG_ARCH_MEDIATEK) += mt7986a-bananapi-bpi-r3-nand.dtbo
dtb-$(CONFIG_ARCH_MEDIATEK) += mt7986a-bananapi-bpi-r3-nor.dtbo
@@ -62,6 +65,8 @@ dtb-$(CONFIG_ARCH_MEDIATEK) += mt8186-corsola-tentacool-sku327681.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt8186-corsola-tentacool-sku327683.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt8186-corsola-tentacruel-sku262144.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt8186-corsola-tentacruel-sku262148.dtb
+dtb-$(CONFIG_ARCH_MEDIATEK) += mt8186-corsola-voltorb-sku589824.dtb
+dtb-$(CONFIG_ARCH_MEDIATEK) += mt8186-corsola-voltorb-sku589825.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt8186-evb.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt8188-evb.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt8192-asurada-hayato-r1.dtb
@@ -69,6 +74,7 @@ dtb-$(CONFIG_ARCH_MEDIATEK) += mt8192-asurada-hayato-r5-sku2.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt8192-asurada-spherion-r0.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt8192-asurada-spherion-r4.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt8192-evb.dtb
+dtb-$(CONFIG_ARCH_MEDIATEK) += mt8195-cherry-dojo-r1.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt8195-cherry-tomato-r1.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt8195-cherry-tomato-r2.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt8195-cherry-tomato-r3.dtb
@@ -76,5 +82,11 @@ dtb-$(CONFIG_ARCH_MEDIATEK) += mt8195-demo.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt8195-evb.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt8365-evk.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt8395-genio-1200-evk.dtb
+dtb-$(CONFIG_ARCH_MEDIATEK) += mt8390-genio-700-evk.dtb
+dtb-$(CONFIG_ARCH_MEDIATEK) += mt8395-kontron-3-5-sbc-i1200.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt8395-radxa-nio-12l.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt8516-pumpkin.dtb
+
+# Device tree overlays support
+DTC_FLAGS_mt7986a-bananapi-bpi-r3 := -@
+DTC_FLAGS_mt7986a-bananapi-bpi-r3-mini := -@
diff --git a/arch/arm64/boot/dts/mediatek/mt2712-evb.dts b/arch/arm64/boot/dts/mediatek/mt2712-evb.dts
index 234e3b23d7a8..c84c47c1352f 100644
--- a/arch/arm64/boot/dts/mediatek/mt2712-evb.dts
+++ b/arch/arm64/boot/dts/mediatek/mt2712-evb.dts
@@ -137,7 +137,7 @@
<MT2712_PIN_74_GBE_TXD0__FUNC_GBE_TXD0>,
<MT2712_PIN_75_GBE_TXC__FUNC_GBE_TXC>,
<MT2712_PIN_76_GBE_TXEN__FUNC_GBE_TXEN>;
- drive-strength = <MTK_DRIVE_8mA>;
+ drive-strength = <8>;
};
rx_pins {
pinmux = <MT2712_PIN_78_GBE_RXD3__FUNC_GBE_RXD3>,
@@ -151,7 +151,7 @@
mdio_pins {
pinmux = <MT2712_PIN_85_GBE_MDC__FUNC_GBE_MDC>,
<MT2712_PIN_86_GBE_MDIO__FUNC_GBE_MDIO>;
- drive-strength = <MTK_DRIVE_8mA>;
+ drive-strength = <8>;
input-enable;
};
};
diff --git a/arch/arm64/boot/dts/mediatek/mt6795-sony-xperia-m5.dts b/arch/arm64/boot/dts/mediatek/mt6795-sony-xperia-m5.dts
index 7364c7278276..91de920c2245 100644
--- a/arch/arm64/boot/dts/mediatek/mt6795-sony-xperia-m5.dts
+++ b/arch/arm64/boot/dts/mediatek/mt6795-sony-xperia-m5.dts
@@ -288,25 +288,25 @@
<PINMUX_GPIO161__FUNC_MSDC0_DAT7>,
<PINMUX_GPIO162__FUNC_MSDC0_CMD>;
input-enable;
- drive-strength = <MTK_DRIVE_6mA>;
+ drive-strength = <6>;
bias-pull-up = <MTK_PUPD_SET_R1R0_01>;
};
pins-clk {
pinmux = <PINMUX_GPIO163__FUNC_MSDC0_CLK>;
- drive-strength = <MTK_DRIVE_6mA>;
+ drive-strength = <6>;
bias-pull-down = <MTK_PUPD_SET_R1R0_10>;
};
pins-rst {
pinmux = <PINMUX_GPIO165__FUNC_MSDC0_RSTB>;
- drive-strength = <MTK_DRIVE_6mA>;
+ drive-strength = <6>;
bias-pull-up = <MTK_PUPD_SET_R1R0_10>;
};
pins-ds {
pinmux = <PINMUX_GPIO164__FUNC_MSDC0_DSL>;
- drive-strength = <MTK_DRIVE_6mA>;
+ drive-strength = <6>;
bias-pull-down = <MTK_PUPD_SET_R1R0_10>;
};
};
diff --git a/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts b/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts
index 224bb289660c..d12eac9b3eeb 100644
--- a/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts
+++ b/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts
@@ -149,9 +149,9 @@
#address-cells = <1>;
#size-cells = <0>;
- switch@0 {
+ switch@1f {
compatible = "mediatek,mt7531";
- reg = <0>;
+ reg = <0x1f>;
interrupt-controller;
#interrupt-cells = <1>;
interrupts-extended = <&pio 53 IRQ_TYPE_LEVEL_HIGH>;
@@ -329,8 +329,8 @@
/* eMMC is shared pin with parallel NAND */
emmc_pins_default: emmc-pins-default {
mux {
- function = "emmc", "emmc_rst";
- groups = "emmc";
+ function = "emmc";
+ groups = "emmc", "emmc_rst";
};
/* "NDL0","NDL1","NDL2","NDL3","NDL4","NDL5","NDL6","NDL7",
diff --git a/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts b/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts
index 41629769bdc8..8c3e2e2578bc 100644
--- a/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts
+++ b/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts
@@ -268,8 +268,8 @@
/* eMMC is shared pin with parallel NAND */
emmc_pins_default: emmc-pins-default {
mux {
- function = "emmc", "emmc_rst";
- groups = "emmc";
+ function = "emmc";
+ groups = "emmc", "emmc_rst";
};
/* "NDL0","NDL1","NDL2","NDL3","NDL4","NDL5","NDL6","NDL7",
diff --git a/arch/arm64/boot/dts/mediatek/mt7981b-cudy-wr3000-v1.dts b/arch/arm64/boot/dts/mediatek/mt7981b-cudy-wr3000-v1.dts
new file mode 100644
index 000000000000..54101cc08a25
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt7981b-cudy-wr3000-v1.dts
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+
+/dts-v1/;
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/leds/common.h>
+
+#include "mt7981b.dtsi"
+
+/ {
+ compatible = "cudy,wr3000-v1", "mediatek,mt7981b";
+ model = "Cudy WR3000 V1";
+
+ memory@40000000 {
+ reg = <0 0x40000000 0 0x10000000>;
+ device_type = "memory";
+ };
+
+ keys {
+ compatible = "gpio-keys";
+
+ key-wps {
+ label = "WPS";
+ gpios = <&pio 0 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_WPS_BUTTON>;
+ };
+
+ key-reset {
+ label = "RESET";
+ gpios = <&pio 1 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_RESTART>;
+ };
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ led-0 {
+ color = <LED_COLOR_ID_BLUE>;
+ function = LED_FUNCTION_WAN;
+ gpios = <&pio 5 GPIO_ACTIVE_LOW>;
+ };
+
+ led-1 {
+ color = <LED_COLOR_ID_BLUE>;
+ function = LED_FUNCTION_WLAN_2GHZ;
+ gpios = <&pio 6 GPIO_ACTIVE_LOW>;
+ };
+
+ led-2 {
+ color = <LED_COLOR_ID_BLUE>;
+ function = LED_FUNCTION_WLAN_5GHZ;
+ gpios = <&pio 7 GPIO_ACTIVE_LOW>;
+ };
+
+ led-3 {
+ color = <LED_COLOR_ID_BLUE>;
+ function = LED_FUNCTION_LAN;
+ gpios = <&pio 9 GPIO_ACTIVE_LOW>;
+ };
+
+ led-4 {
+ color = <LED_COLOR_ID_BLUE>;
+ function = LED_FUNCTION_STATUS;
+ gpios = <&pio 10 GPIO_ACTIVE_LOW>;
+ };
+
+ led-5 {
+ color = <LED_COLOR_ID_BLUE>;
+ function = LED_FUNCTION_WAN_ONLINE;
+ gpios = <&pio 11 GPIO_ACTIVE_LOW>;
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/mediatek/mt7981b-openwrt-one.dts b/arch/arm64/boot/dts/mediatek/mt7981b-openwrt-one.dts
new file mode 100644
index 000000000000..4f6cbb491287
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt7981b-openwrt-one.dts
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+
+/dts-v1/;
+
+#include "mt7981b.dtsi"
+
+/ {
+ compatible = "openwrt,one", "mediatek,mt7981b";
+ model = "OpenWrt One";
+
+ memory@40000000 {
+ reg = <0 0x40000000 0 0x40000000>;
+ device_type = "memory";
+ };
+};
diff --git a/arch/arm64/boot/dts/mediatek/mt7981b.dtsi b/arch/arm64/boot/dts/mediatek/mt7981b.dtsi
index 4feff3d1c5f4..64aeeb24efac 100644
--- a/arch/arm64/boot/dts/mediatek/mt7981b.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt7981b.dtsi
@@ -2,6 +2,7 @@
#include <dt-bindings/clock/mediatek,mt7981-clk.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/reset/mt7986-resets.h>
/ {
compatible = "mediatek,mt7981b";
@@ -62,12 +63,19 @@
#clock-cells = <1>;
};
- clock-controller@1001b000 {
+ topckgen: clock-controller@1001b000 {
compatible = "mediatek,mt7981-topckgen", "syscon";
reg = <0 0x1001b000 0 0x1000>;
#clock-cells = <1>;
};
+ watchdog: watchdog@1001c000 {
+ compatible = "mediatek,mt7986-wdt";
+ reg = <0 0x1001c000 0 0x1000>;
+ interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>;
+ #reset-cells = <1>;
+ };
+
clock-controller@1001e000 {
compatible = "mediatek,mt7981-apmixedsys";
reg = <0 0x1001e000 0 0x1000>;
@@ -78,20 +86,80 @@
compatible = "mediatek,mt7981-pwm";
reg = <0 0x10048000 0 0x1000>;
clocks = <&infracfg CLK_INFRA_PWM_STA>,
- <&infracfg CLK_INFRA_PWM_HCK>,
- <&infracfg CLK_INFRA_PWM1_CK>,
- <&infracfg CLK_INFRA_PWM2_CK>,
- <&infracfg CLK_INFRA_PWM3_CK>;
+ <&infracfg CLK_INFRA_PWM_HCK>,
+ <&infracfg CLK_INFRA_PWM1_CK>,
+ <&infracfg CLK_INFRA_PWM2_CK>,
+ <&infracfg CLK_INFRA_PWM3_CK>;
clock-names = "top", "main", "pwm1", "pwm2", "pwm3";
#pwm-cells = <2>;
};
+ i2c@11007000 {
+ compatible = "mediatek,mt7981-i2c";
+ reg = <0 0x11007000 0 0x1000>,
+ <0 0x10217080 0 0x80>;
+ interrupts = <GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&infracfg CLK_INFRA_I2C0_CK>,
+ <&infracfg CLK_INFRA_AP_DMA_CK>,
+ <&infracfg CLK_INFRA_I2C_MCK_CK>,
+ <&infracfg CLK_INFRA_I2C_PCK_CK>;
+ clock-names = "main", "dma", "arb", "pmic";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ pio: pinctrl@11d00000 {
+ compatible = "mediatek,mt7981-pinctrl";
+ reg = <0 0x11d00000 0 0x1000>,
+ <0 0x11c00000 0 0x1000>,
+ <0 0x11c10000 0 0x1000>,
+ <0 0x11d20000 0 0x1000>,
+ <0 0x11e00000 0 0x1000>,
+ <0 0x11e20000 0 0x1000>,
+ <0 0x11f00000 0 0x1000>,
+ <0 0x11f10000 0 0x1000>,
+ <0 0x1000b000 0 0x1000>;
+ reg-names = "gpio", "iocfg_rt", "iocfg_rm", "iocfg_rb", "iocfg_lb",
+ "iocfg_bl", "iocfg_tm", "iocfg_tl", "eint";
+ interrupt-controller;
+ interrupts = <GIC_SPI 225 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&gic>;
+ gpio-ranges = <&pio 0 0 56>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ #interrupt-cells = <2>;
+ };
+
+ efuse@11f20000 {
+ compatible = "mediatek,mt7981-efuse", "mediatek,efuse";
+ reg = <0 0x11f20000 0 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ };
+
clock-controller@15000000 {
compatible = "mediatek,mt7981-ethsys", "syscon";
reg = <0 0x15000000 0 0x1000>;
#clock-cells = <1>;
#reset-cells = <1>;
};
+
+ wifi@18000000 {
+ compatible = "mediatek,mt7981-wmac";
+ reg = <0 0x18000000 0 0x1000000>,
+ <0 0x10003000 0 0x1000>,
+ <0 0x11d10000 0 0x1000>;
+ interrupts = <GIC_SPI 213 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 214 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 215 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 216 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&topckgen CLK_TOP_NETSYS_MCU_SEL>,
+ <&topckgen CLK_TOP_AP2CNN_HOST_SEL>;
+ clock-names = "mcu", "ap2conn";
+ resets = <&watchdog MT7986_TOPRGU_CONSYS_SW_RST>;
+ reset-names = "consys";
+ };
};
timer {
diff --git a/arch/arm64/boot/dts/mediatek/mt7986a-bananapi-bpi-r3-emmc.dtso b/arch/arm64/boot/dts/mediatek/mt7986a-bananapi-bpi-r3-emmc.dtso
index 779dc6782bb1..047a8388811e 100644
--- a/arch/arm64/boot/dts/mediatek/mt7986a-bananapi-bpi-r3-emmc.dtso
+++ b/arch/arm64/boot/dts/mediatek/mt7986a-bananapi-bpi-r3-emmc.dtso
@@ -9,21 +9,17 @@
/ {
compatible = "bananapi,bpi-r3", "mediatek,mt7986a";
-
- fragment@0 {
- target-path = "/soc/mmc@11230000";
- __overlay__ {
- bus-width = <8>;
- max-frequency = <200000000>;
- cap-mmc-highspeed;
- mmc-hs200-1_8v;
- mmc-hs400-1_8v;
- hs400-ds-delay = <0x14014>;
- non-removable;
- no-sd;
- no-sdio;
- status = "okay";
- };
- };
};
+&{/soc/mmc@11230000} {
+ bus-width = <8>;
+ max-frequency = <200000000>;
+ cap-mmc-highspeed;
+ mmc-hs200-1_8v;
+ mmc-hs400-1_8v;
+ hs400-ds-delay = <0x14014>;
+ non-removable;
+ no-sd;
+ no-sdio;
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/mediatek/mt7986a-bananapi-bpi-r3-mini.dts b/arch/arm64/boot/dts/mediatek/mt7986a-bananapi-bpi-r3-mini.dts
new file mode 100644
index 000000000000..e2a2fea7adf0
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt7986a-bananapi-bpi-r3-mini.dts
@@ -0,0 +1,493 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright (C) 2021 MediaTek Inc.
+ * Authors: Frank Wunderlich <frank-w@public-files.de>
+ * Eric Woudstra <ericwouds@gmail.com>
+ * Tianling Shen <cnsztl@immortalwrt.org>
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/leds/common.h>
+#include <dt-bindings/pinctrl/mt65xx.h>
+
+#include "mt7986a.dtsi"
+
+/ {
+ model = "Bananapi BPI-R3 Mini";
+ chassis-type = "embedded";
+ compatible = "bananapi,bpi-r3mini", "mediatek,mt7986a";
+
+ aliases {
+ serial0 = &uart0;
+ ethernet0 = &gmac0;
+ ethernet1 = &gmac1;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ dcin: regulator-12v {
+ compatible = "regulator-fixed";
+ regulator-name = "12vd";
+ regulator-min-microvolt = <12000000>;
+ regulator-max-microvolt = <12000000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ fan: pwm-fan {
+ compatible = "pwm-fan";
+ #cooling-cells = <2>;
+ /*
+ * The signal is inverted on this board and the PWM driver
+ * does not support polarity inversion.
+ */
+ /* cooling level (0, 1, 2) */
+ cooling-levels = <255 96 0>;
+ pwms = <&pwm 0 10000>;
+ };
+
+ reg_1p8v: regulator-1v8 {
+ compatible = "regulator-fixed";
+ regulator-name = "1.8vd";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-boot-on;
+ regulator-always-on;
+ vin-supply = <&dcin>;
+ };
+
+ reg_3p3v: regulator-3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "3.3vd";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ regulator-always-on;
+ vin-supply = <&dcin>;
+ };
+
+ usb_vbus: regulator-5v {
+ compatible = "regulator-fixed";
+ regulator-name = "usb_vbus";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpios = <&pio 20 GPIO_ACTIVE_LOW>;
+ regulator-boot-on;
+ };
+
+ en8811_a: regulator-phy1 {
+ compatible = "regulator-fixed";
+ regulator-name = "phy1";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&pio 16 GPIO_ACTIVE_LOW>;
+ regulator-always-on;
+ };
+
+ en8811_b: regulator-phy2 {
+ compatible = "regulator-fixed";
+ regulator-name = "phy2";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&pio 17 GPIO_ACTIVE_LOW>;
+ regulator-always-on;
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ green_led: led-0 {
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_POWER;
+ gpios = <&pio 19 GPIO_ACTIVE_HIGH>;
+ default-state = "on";
+ };
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+
+ reset-key {
+ label = "reset";
+ linux,code = <KEY_RESTART>;
+ gpios = <&pio 7 GPIO_ACTIVE_LOW>;
+ };
+ };
+
+};
+
+&cpu_thermal {
+ cooling-maps {
+ map0 {
+ /* active: set fan to cooling level 2 */
+ cooling-device = <&fan 2 2>;
+ trip = <&cpu_trip_active_high>;
+ };
+
+ map1 {
+ /* active: set fan to cooling level 1 */
+ cooling-device = <&fan 1 1>;
+ trip = <&cpu_trip_active_med>;
+ };
+
+ map2 {
+ /* active: set fan to cooling level 0 */
+ cooling-device = <&fan 0 0>;
+ trip = <&cpu_trip_active_low>;
+ };
+ };
+};
+
+&crypto {
+ status = "okay";
+};
+
+&eth {
+ status = "okay";
+
+ gmac0: mac@0 {
+ compatible = "mediatek,eth-mac";
+ reg = <0>;
+ phy-mode = "2500base-x";
+ phy-handle = <&phy0>;
+ };
+
+ gmac1: mac@1 {
+ compatible = "mediatek,eth-mac";
+ reg = <1>;
+ phy-mode = "2500base-x";
+ phy-handle = <&phy1>;
+ };
+
+ mdio: mdio-bus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+};
+
+&mmc0 {
+ pinctrl-names = "default", "state_uhs";
+ pinctrl-0 = <&mmc0_pins_default>;
+ pinctrl-1 = <&mmc0_pins_uhs>;
+ vmmc-supply = <&reg_3p3v>;
+ vqmmc-supply = <&reg_1p8v>;
+};
+
+
+&i2c0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c_pins>;
+ status = "okay";
+
+ /* MAC Address EEPROM */
+ eeprom@50 {
+ compatible = "atmel,24c02";
+ reg = <0x50>;
+
+ address-width = <8>;
+ pagesize = <8>;
+ size = <256>;
+ };
+};
+
+&mdio {
+ phy0: ethernet-phy@14 {
+ reg = <14>;
+ interrupts-extended = <&pio 48 IRQ_TYPE_EDGE_FALLING>;
+ reset-gpios = <&pio 49 GPIO_ACTIVE_LOW>;
+ reset-assert-us = <10000>;
+ reset-deassert-us = <20000>;
+ phy-mode = "2500base-x";
+ full-duplex;
+ pause;
+ airoha,pnswap-rx;
+
+ leds {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@0 { /* en8811_a_gpio5 */
+ reg = <0>;
+ color = <LED_COLOR_ID_YELLOW>;
+ function = LED_FUNCTION_LAN;
+ function-enumerator = <1>;
+ default-state = "keep";
+ };
+ led@1 { /* en8811_a_gpio4 */
+ reg = <1>;
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_LAN;
+ function-enumerator = <2>;
+ default-state = "keep";
+ };
+ };
+ };
+
+ phy1: ethernet-phy@15 {
+ reg = <15>;
+ interrupts-extended = <&pio 46 IRQ_TYPE_EDGE_FALLING>;
+ reset-gpios = <&pio 47 GPIO_ACTIVE_LOW>;
+ reset-assert-us = <10000>;
+ reset-deassert-us = <20000>;
+ phy-mode = "2500base-x";
+ full-duplex;
+ pause;
+ airoha,pnswap-rx;
+
+ leds {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@0 { /* en8811_b_gpio5 */
+ reg = <0>;
+ color = <LED_COLOR_ID_YELLOW>;
+ function = LED_FUNCTION_WAN;
+ function-enumerator = <1>;
+ default-state = "keep";
+ };
+ led@1 { /* en8811_b_gpio4 */
+ reg = <1>;
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_WAN;
+ function-enumerator = <2>;
+ default-state = "keep";
+ };
+ };
+ };
+};
+
+&pcie {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie_pins>;
+ status = "okay";
+};
+
+&pcie_phy {
+ status = "okay";
+};
+
+&pio {
+ i2c_pins: i2c-pins {
+ mux {
+ function = "i2c";
+ groups = "i2c";
+ };
+ };
+
+ mmc0_pins_default: mmc0-pins {
+ mux {
+ function = "emmc";
+ groups = "emmc_51";
+ };
+ conf-cmd-dat {
+ pins = "EMMC_DATA_0", "EMMC_DATA_1", "EMMC_DATA_2",
+ "EMMC_DATA_3", "EMMC_DATA_4", "EMMC_DATA_5",
+ "EMMC_DATA_6", "EMMC_DATA_7", "EMMC_CMD";
+ input-enable;
+ drive-strength = <4>;
+ bias-pull-up = <MTK_PUPD_SET_R1R0_01>; /* pull-up 10K */
+ };
+ conf-clk {
+ pins = "EMMC_CK";
+ drive-strength = <6>;
+ bias-pull-down = <MTK_PUPD_SET_R1R0_10>; /* pull-down 50K */
+ };
+ conf-ds {
+ pins = "EMMC_DSL";
+ bias-pull-down = <MTK_PUPD_SET_R1R0_10>; /* pull-down 50K */
+ };
+ conf-rst {
+ pins = "EMMC_RSTB";
+ drive-strength = <4>;
+ bias-pull-up = <MTK_PUPD_SET_R1R0_01>; /* pull-up 10K */
+ };
+ };
+
+ mmc0_pins_uhs: mmc0-uhs-pins {
+ mux {
+ function = "emmc";
+ groups = "emmc_51";
+ };
+ conf-cmd-dat {
+ pins = "EMMC_DATA_0", "EMMC_DATA_1", "EMMC_DATA_2",
+ "EMMC_DATA_3", "EMMC_DATA_4", "EMMC_DATA_5",
+ "EMMC_DATA_6", "EMMC_DATA_7", "EMMC_CMD";
+ input-enable;
+ drive-strength = <4>;
+ bias-pull-up = <MTK_PUPD_SET_R1R0_01>; /* pull-up 10K */
+ };
+ conf-clk {
+ pins = "EMMC_CK";
+ drive-strength = <6>;
+ bias-pull-down = <MTK_PUPD_SET_R1R0_10>; /* pull-down 50K */
+ };
+ conf-ds {
+ pins = "EMMC_DSL";
+ bias-pull-down = <MTK_PUPD_SET_R1R0_10>; /* pull-down 50K */
+ };
+ conf-rst {
+ pins = "EMMC_RSTB";
+ drive-strength = <4>;
+ bias-pull-up = <MTK_PUPD_SET_R1R0_01>; /* pull-up 10K */
+ };
+ };
+
+ pcie_pins: pcie-pins {
+ mux {
+ function = "pcie";
+ groups = "pcie_clk", "pcie_wake", "pcie_pereset";
+ };
+ };
+
+ pwm_pins: pwm-pins {
+ mux {
+ function = "pwm";
+ groups = "pwm0";
+ };
+ };
+
+ spi_flash_pins: spi-flash-pins {
+ mux {
+ function = "spi";
+ groups = "spi0", "spi0_wp_hold";
+ };
+ };
+
+ usb_ngff_pins: usb-ngff-pins {
+ ngff-gnss-off-conf {
+ pins = "GPIO_6";
+ drive-strength = <8>;
+ mediatek,pull-up-adv = <1>;
+ };
+ ngff-pe-rst-conf {
+ pins = "GPIO_7";
+ drive-strength = <8>;
+ mediatek,pull-up-adv = <1>;
+ };
+ ngff-wwan-off-conf {
+ pins = "GPIO_8";
+ drive-strength = <8>;
+ mediatek,pull-up-adv = <1>;
+ };
+ ngff-pwr-off-conf {
+ pins = "GPIO_9";
+ drive-strength = <8>;
+ mediatek,pull-up-adv = <1>;
+ };
+ ngff-rst-conf {
+ pins = "GPIO_10";
+ drive-strength = <8>;
+ mediatek,pull-up-adv = <1>;
+ };
+ ngff-coex-conf {
+ pins = "SPI1_CS";
+ drive-strength = <8>;
+ mediatek,pull-up-adv = <1>;
+ };
+ };
+
+ wf_2g_5g_pins: wf-2g-5g-pins {
+ mux {
+ function = "wifi";
+ groups = "wf_2g", "wf_5g";
+ };
+ conf {
+ pins = "WF0_HB1", "WF0_HB2", "WF0_HB3", "WF0_HB4",
+ "WF0_HB0", "WF0_HB0_B", "WF0_HB5", "WF0_HB6",
+ "WF0_HB7", "WF0_HB8", "WF0_HB9", "WF0_HB10",
+ "WF0_TOP_CLK", "WF0_TOP_DATA", "WF1_HB1",
+ "WF1_HB2", "WF1_HB3", "WF1_HB4", "WF1_HB0",
+ "WF1_HB5", "WF1_HB6", "WF1_HB7", "WF1_HB8",
+ "WF1_TOP_CLK", "WF1_TOP_DATA";
+ drive-strength = <4>;
+ };
+ };
+
+ wf_dbdc_pins: wf-dbdc-pins {
+ mux {
+ function = "wifi";
+ groups = "wf_dbdc";
+ };
+ conf {
+ pins = "WF0_HB1", "WF0_HB2", "WF0_HB3", "WF0_HB4",
+ "WF0_HB0", "WF0_HB0_B", "WF0_HB5", "WF0_HB6",
+ "WF0_HB7", "WF0_HB8", "WF0_HB9", "WF0_HB10",
+ "WF0_TOP_CLK", "WF0_TOP_DATA", "WF1_HB1",
+ "WF1_HB2", "WF1_HB3", "WF1_HB4", "WF1_HB0",
+ "WF1_HB5", "WF1_HB6", "WF1_HB7", "WF1_HB8",
+ "WF1_TOP_CLK", "WF1_TOP_DATA";
+ drive-strength = <4>;
+ };
+ };
+
+ wf_led_pins: wf-led-pins {
+ mux {
+ function = "led";
+ groups = "wifi_led";
+ };
+ };
+};
+
+&pwm {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwm_pins>;
+ status = "okay";
+};
+
+&spi0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&spi_flash_pins>;
+ status = "okay";
+
+ flash@0 {
+ compatible = "spi-nand";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0>;
+
+ spi-max-frequency = <20000000>;
+ spi-tx-bus-width = <4>;
+ spi-rx-bus-width = <4>;
+ };
+};
+
+&ssusb {
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb_ngff_pins>;
+ vusb33-supply = <&reg_3p3v>;
+ vbus-supply = <&usb_vbus>;
+ status = "okay";
+};
+
+&trng {
+ status = "okay";
+};
+
+&uart0 {
+ status = "okay";
+};
+
+&usb_phy {
+ status = "okay";
+};
+
+&watchdog {
+ status = "okay";
+};
+
+&wifi {
+ status = "okay";
+ pinctrl-names = "default", "dbdc";
+ pinctrl-0 = <&wf_2g_5g_pins>, <&wf_led_pins>;
+ pinctrl-1 = <&wf_dbdc_pins>, <&wf_led_pins>;
+
+ led {
+ led-active-low;
+ };
+};
+
diff --git a/arch/arm64/boot/dts/mediatek/mt7986a-bananapi-bpi-r3-nand.dtso b/arch/arm64/boot/dts/mediatek/mt7986a-bananapi-bpi-r3-nand.dtso
index 7b97c5c91bd0..24398f8a7da4 100644
--- a/arch/arm64/boot/dts/mediatek/mt7986a-bananapi-bpi-r3-nand.dtso
+++ b/arch/arm64/boot/dts/mediatek/mt7986a-bananapi-bpi-r3-nand.dtso
@@ -9,46 +9,44 @@
/ {
compatible = "bananapi,bpi-r3", "mediatek,mt7986a";
+};
+
+&{/soc/spi@1100a000} {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ spi_nand: flash@0 {
+ compatible = "spi-nand";
+ reg = <0>;
+ spi-max-frequency = <10000000>;
+ spi-tx-buswidth = <4>;
+ spi-rx-buswidth = <4>;
- fragment@0 {
- target-path = "/soc/spi@1100a000";
- __overlay__ {
+ partitions {
+ compatible = "fixed-partitions";
#address-cells = <1>;
- #size-cells = <0>;
- spi_nand: flash@0 {
- compatible = "spi-nand";
- reg = <0>;
- spi-max-frequency = <10000000>;
- spi-tx-buswidth = <4>;
- spi-rx-buswidth = <4>;
-
- partitions {
- compatible = "fixed-partitions";
- #address-cells = <1>;
- #size-cells = <1>;
-
- partition@0 {
- label = "bl2";
- reg = <0x0 0x100000>;
- read-only;
- };
-
- partition@100000 {
- label = "reserved";
- reg = <0x100000 0x280000>;
- };
-
- partition@380000 {
- label = "fip";
- reg = <0x380000 0x200000>;
- read-only;
- };
-
- partition@580000 {
- label = "ubi";
- reg = <0x580000 0x7a80000>;
- };
- };
+ #size-cells = <1>;
+
+ partition@0 {
+ label = "bl2";
+ reg = <0x0 0x100000>;
+ read-only;
+ };
+
+ partition@100000 {
+ label = "reserved";
+ reg = <0x100000 0x280000>;
+ };
+
+ partition@380000 {
+ label = "fip";
+ reg = <0x380000 0x200000>;
+ read-only;
+ };
+
+ partition@580000 {
+ label = "ubi";
+ reg = <0x580000 0x7a80000>;
};
};
};
diff --git a/arch/arm64/boot/dts/mediatek/mt7986a-bananapi-bpi-r3-nor.dtso b/arch/arm64/boot/dts/mediatek/mt7986a-bananapi-bpi-r3-nor.dtso
index e48881be4ed6..6a0d529b54ac 100644
--- a/arch/arm64/boot/dts/mediatek/mt7986a-bananapi-bpi-r3-nor.dtso
+++ b/arch/arm64/boot/dts/mediatek/mt7986a-bananapi-bpi-r3-nor.dtso
@@ -9,54 +9,52 @@
/ {
compatible = "bananapi,bpi-r3", "mediatek,mt7986a";
+};
+
+&{/soc/spi@1100a000} {
+ #address-cells = <1>;
+ #size-cells = <0>;
- fragment@0 {
- target-path = "/soc/spi@1100a000";
- __overlay__ {
+ flash@0 {
+ compatible = "jedec,spi-nor";
+ reg = <0>;
+ spi-max-frequency = <10000000>;
+
+ partitions {
+ compatible = "fixed-partitions";
#address-cells = <1>;
- #size-cells = <0>;
- flash@0 {
- compatible = "jedec,spi-nor";
- reg = <0>;
- spi-max-frequency = <10000000>;
-
- partitions {
- compatible = "fixed-partitions";
- #address-cells = <1>;
- #size-cells = <1>;
-
- partition@0 {
- label = "bl2";
- reg = <0x0 0x40000>;
- read-only;
- };
-
- partition@40000 {
- label = "u-boot-env";
- reg = <0x40000 0x40000>;
- };
-
- partition@80000 {
- label = "reserved2";
- reg = <0x80000 0x80000>;
- };
-
- partition@100000 {
- label = "fip";
- reg = <0x100000 0x80000>;
- read-only;
- };
-
- partition@180000 {
- label = "recovery";
- reg = <0x180000 0xa80000>;
- };
-
- partition@c00000 {
- label = "fit";
- reg = <0xc00000 0x1400000>;
- };
- };
+ #size-cells = <1>;
+
+ partition@0 {
+ label = "bl2";
+ reg = <0x0 0x40000>;
+ read-only;
+ };
+
+ partition@40000 {
+ label = "u-boot-env";
+ reg = <0x40000 0x40000>;
+ };
+
+ partition@80000 {
+ label = "reserved2";
+ reg = <0x80000 0x80000>;
+ };
+
+ partition@100000 {
+ label = "fip";
+ reg = <0x100000 0x80000>;
+ read-only;
+ };
+
+ partition@180000 {
+ label = "recovery";
+ reg = <0x180000 0xa80000>;
+ };
+
+ partition@c00000 {
+ label = "fit";
+ reg = <0xc00000 0x1400000>;
};
};
};
diff --git a/arch/arm64/boot/dts/mediatek/mt7986a-bananapi-bpi-r3-sd.dtso b/arch/arm64/boot/dts/mediatek/mt7986a-bananapi-bpi-r3-sd.dtso
index f623bce075ce..d9e01967acc4 100644
--- a/arch/arm64/boot/dts/mediatek/mt7986a-bananapi-bpi-r3-sd.dtso
+++ b/arch/arm64/boot/dts/mediatek/mt7986a-bananapi-bpi-r3-sd.dtso
@@ -9,15 +9,11 @@
/ {
compatible = "bananapi,bpi-r3", "mediatek,mt7986a";
-
- fragment@0 {
- target-path = "/soc/mmc@11230000";
- __overlay__ {
- bus-width = <4>;
- max-frequency = <52000000>;
- cap-sd-highspeed;
- status = "okay";
- };
- };
};
+&{/soc/mmc@11230000} {
+ bus-width = <4>;
+ max-frequency = <52000000>;
+ cap-sd-highspeed;
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/mediatek/mt7988a.dtsi b/arch/arm64/boot/dts/mediatek/mt7988a.dtsi
index bba97de4fb44..aa728331e876 100644
--- a/arch/arm64/boot/dts/mediatek/mt7988a.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt7988a.dtsi
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only OR MIT
+#include <dt-bindings/clock/mediatek,mt7988-clk.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/phy/phy.h>
/ {
compatible = "mediatek,mt7988a";
@@ -78,7 +80,7 @@
#interrupt-cells = <3>;
};
- clock-controller@10001000 {
+ infracfg: clock-controller@10001000 {
compatible = "mediatek,mt7988-infracfg", "syscon";
reg = <0 0x10001000 0 0x1000>;
#clock-cells = <1>;
@@ -103,6 +105,92 @@
#clock-cells = <1>;
};
+ pwm@10048000 {
+ compatible = "mediatek,mt7988-pwm";
+ reg = <0 0x10048000 0 0x1000>;
+ clocks = <&infracfg CLK_INFRA_66M_PWM_BCK>,
+ <&infracfg CLK_INFRA_66M_PWM_HCK>,
+ <&infracfg CLK_INFRA_66M_PWM_CK1>,
+ <&infracfg CLK_INFRA_66M_PWM_CK2>,
+ <&infracfg CLK_INFRA_66M_PWM_CK3>,
+ <&infracfg CLK_INFRA_66M_PWM_CK4>,
+ <&infracfg CLK_INFRA_66M_PWM_CK5>,
+ <&infracfg CLK_INFRA_66M_PWM_CK6>,
+ <&infracfg CLK_INFRA_66M_PWM_CK7>,
+ <&infracfg CLK_INFRA_66M_PWM_CK8>;
+ clock-names = "top", "main", "pwm1", "pwm2", "pwm3",
+ "pwm4", "pwm5", "pwm6", "pwm7", "pwm8";
+ #pwm-cells = <2>;
+ status = "disabled";
+ };
+
+ i2c@11003000 {
+ compatible = "mediatek,mt7981-i2c";
+ reg = <0 0x11003000 0 0x1000>,
+ <0 0x10217080 0 0x80>;
+ interrupts = <GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&infracfg CLK_INFRA_I2C_BCK>,
+ <&infracfg CLK_INFRA_66M_AP_DMA_BCK>;
+ clock-names = "main", "dma";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c@11004000 {
+ compatible = "mediatek,mt7981-i2c";
+ reg = <0 0x11004000 0 0x1000>,
+ <0 0x10217100 0 0x80>;
+ interrupts = <GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&infracfg CLK_INFRA_I2C_BCK>,
+ <&infracfg CLK_INFRA_66M_AP_DMA_BCK>;
+ clock-names = "main", "dma";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c@11005000 {
+ compatible = "mediatek,mt7981-i2c";
+ reg = <0 0x11005000 0 0x1000>,
+ <0 0x10217180 0 0x80>;
+ interrupts = <GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&infracfg CLK_INFRA_I2C_BCK>,
+ <&infracfg CLK_INFRA_66M_AP_DMA_BCK>;
+ clock-names = "main", "dma";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ usb@11190000 {
+ compatible = "mediatek,mt7988-xhci", "mediatek,mtk-xhci";
+ reg = <0 0x11190000 0 0x2e00>,
+ <0 0x11193e00 0 0x0100>;
+ reg-names = "mac", "ippc";
+ interrupts = <GIC_SPI 173 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&infracfg CLK_INFRA_USB_SYS>,
+ <&infracfg CLK_INFRA_USB_REF>,
+ <&infracfg CLK_INFRA_66M_USB_HCK>,
+ <&infracfg CLK_INFRA_133M_USB_HCK>,
+ <&infracfg CLK_INFRA_USB_XHCI>;
+ clock-names = "sys_ck", "ref_ck", "mcu_ck", "dma_ck", "xhci_ck";
+ };
+
+ usb@11200000 {
+ compatible = "mediatek,mt7988-xhci", "mediatek,mtk-xhci";
+ reg = <0 0x11200000 0 0x2e00>,
+ <0 0x11203e00 0 0x0100>;
+ reg-names = "mac", "ippc";
+ interrupts = <GIC_SPI 172 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&infracfg CLK_INFRA_USB_SYS_CK_P1>,
+ <&infracfg CLK_INFRA_USB_CK_P1>,
+ <&infracfg CLK_INFRA_66M_USB_HCK_CK_P1>,
+ <&infracfg CLK_INFRA_133M_USB_HCK_CK_P1>,
+ <&infracfg CLK_INFRA_USB_XHCI_CK_P1>;
+ clock-names = "sys_ck", "ref_ck", "mcu_ck", "dma_ck", "xhci_ck";
+ };
+
clock-controller@11f40000 {
compatible = "mediatek,mt7988-xfi-pll";
reg = <0 0x11f40000 0 0x1000>;
diff --git a/arch/arm64/boot/dts/mediatek/mt8173-elm-hana.dtsi b/arch/arm64/boot/dts/mediatek/mt8173-elm-hana.dtsi
index 90cbbc18a483..8d1cbc92bce3 100644
--- a/arch/arm64/boot/dts/mediatek/mt8173-elm-hana.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8173-elm-hana.dtsi
@@ -27,6 +27,15 @@
hid-descr-addr = <0x0020>;
interrupts-extended = <&pio 88 IRQ_TYPE_LEVEL_LOW>;
};
+
+ /* Lenovo Ideapad C330 uses G2Touch touchscreen as a 2nd source touchscreen */
+ touchscreen@40 {
+ compatible = "hid-over-i2c";
+ reg = <0x40>;
+ hid-descr-addr = <0x0001>;
+ interrupt-parent = <&pio>;
+ interrupts = <88 IRQ_TYPE_LEVEL_LOW>;
+ };
};
&i2c4 {
diff --git a/arch/arm64/boot/dts/mediatek/mt8173-elm.dtsi b/arch/arm64/boot/dts/mediatek/mt8173-elm.dtsi
index 6d962d437e02..b4d85147b77b 100644
--- a/arch/arm64/boot/dts/mediatek/mt8173-elm.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8173-elm.dtsi
@@ -1134,12 +1134,6 @@
rtc: mt6397rtc {
compatible = "mediatek,mt6397-rtc";
};
-
- syscfg_pctl_pmic: syscon@c000 {
- compatible = "mediatek,mt6397-pctl-pmic-syscfg",
- "syscon";
- reg = <0 0x0000c000 0 0x0108>;
- };
};
};
diff --git a/arch/arm64/boot/dts/mediatek/mt8173-evb.dts b/arch/arm64/boot/dts/mediatek/mt8173-evb.dts
index 3fab21f59d18..bb4671c18e3b 100644
--- a/arch/arm64/boot/dts/mediatek/mt8173-evb.dts
+++ b/arch/arm64/boot/dts/mediatek/mt8173-evb.dts
@@ -213,14 +213,14 @@
<MT8173_PIN_76_MSDC1_DAT3__FUNC_MSDC1_DAT3>,
<MT8173_PIN_78_MSDC1_CMD__FUNC_MSDC1_CMD>;
input-enable;
- drive-strength = <MTK_DRIVE_4mA>;
+ drive-strength = <4>;
bias-pull-up = <MTK_PUPD_SET_R1R0_10>;
};
pins_clk {
pinmux = <MT8173_PIN_77_MSDC1_CLK__FUNC_MSDC1_CLK>;
bias-pull-down;
- drive-strength = <MTK_DRIVE_4mA>;
+ drive-strength = <4>;
};
pins_insert {
@@ -241,13 +241,13 @@
<MT8173_PIN_64_MSDC0_DAT7__FUNC_MSDC0_DAT7>,
<MT8173_PIN_66_MSDC0_CMD__FUNC_MSDC0_CMD>;
input-enable;
- drive-strength = <MTK_DRIVE_2mA>;
+ drive-strength = <2>;
bias-pull-up = <MTK_PUPD_SET_R1R0_01>;
};
pins_clk {
pinmux = <MT8173_PIN_65_MSDC0_CLK__FUNC_MSDC0_CLK>;
- drive-strength = <MTK_DRIVE_2mA>;
+ drive-strength = <2>;
bias-pull-down = <MTK_PUPD_SET_R1R0_01>;
};
@@ -265,13 +265,13 @@
<MT8173_PIN_76_MSDC1_DAT3__FUNC_MSDC1_DAT3>,
<MT8173_PIN_78_MSDC1_CMD__FUNC_MSDC1_CMD>;
input-enable;
- drive-strength = <MTK_DRIVE_4mA>;
+ drive-strength = <4>;
bias-pull-up = <MTK_PUPD_SET_R1R0_10>;
};
pins_clk {
pinmux = <MT8173_PIN_77_MSDC1_CLK__FUNC_MSDC1_CLK>;
- drive-strength = <MTK_DRIVE_4mA>;
+ drive-strength = <4>;
bias-pull-down = <MTK_PUPD_SET_R1R0_10>;
};
};
diff --git a/arch/arm64/boot/dts/mediatek/mt8183-evb.dts b/arch/arm64/boot/dts/mediatek/mt8183-evb.dts
index 681deddffc2a..f04baea5d6cb 100644
--- a/arch/arm64/boot/dts/mediatek/mt8183-evb.dts
+++ b/arch/arm64/boot/dts/mediatek/mt8183-evb.dts
@@ -160,7 +160,6 @@
pinmux = <PINMUX_GPIO82__FUNC_SDA0>,
<PINMUX_GPIO83__FUNC_SCL0>;
mediatek,pull-up-adv = <3>;
- mediatek,drive-strength-adv = <00>;
};
};
@@ -169,7 +168,6 @@
pinmux = <PINMUX_GPIO81__FUNC_SDA1>,
<PINMUX_GPIO84__FUNC_SCL1>;
mediatek,pull-up-adv = <3>;
- mediatek,drive-strength-adv = <00>;
};
};
@@ -178,7 +176,6 @@
pinmux = <PINMUX_GPIO103__FUNC_SCL2>,
<PINMUX_GPIO104__FUNC_SDA2>;
mediatek,pull-up-adv = <3>;
- mediatek,drive-strength-adv = <00>;
};
};
@@ -187,7 +184,6 @@
pinmux = <PINMUX_GPIO50__FUNC_SCL3>,
<PINMUX_GPIO51__FUNC_SDA3>;
mediatek,pull-up-adv = <3>;
- mediatek,drive-strength-adv = <00>;
};
};
@@ -196,7 +192,6 @@
pinmux = <PINMUX_GPIO105__FUNC_SCL4>,
<PINMUX_GPIO106__FUNC_SDA4>;
mediatek,pull-up-adv = <3>;
- mediatek,drive-strength-adv = <00>;
};
};
@@ -205,7 +200,6 @@
pinmux = <PINMUX_GPIO48__FUNC_SCL5>,
<PINMUX_GPIO49__FUNC_SDA5>;
mediatek,pull-up-adv = <3>;
- mediatek,drive-strength-adv = <00>;
};
};
diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-audio-da7219.dtsi b/arch/arm64/boot/dts/mediatek/mt8183-kukui-audio-da7219.dtsi
index 8b57706ac814..586eee79c73c 100644
--- a/arch/arm64/boot/dts/mediatek/mt8183-kukui-audio-da7219.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-audio-da7219.dtsi
@@ -27,7 +27,7 @@
dlg,btn-cfg = <50>;
dlg,mic-det-thr = <500>;
dlg,jack-ins-deb = <20>;
- dlg,jack-det-rate = "32ms_64ms";
+ dlg,jack-det-rate = "32_64";
dlg,jack-rem-deb = <1>;
dlg,a-d-btn-thr = <0xa>;
diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-cozmo.dts b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-cozmo.dts
index 072133fb0f01..f34964afe39b 100644
--- a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-cozmo.dts
+++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-cozmo.dts
@@ -9,6 +9,7 @@
/ {
model = "Google cozmo board";
+ chassis-type = "laptop";
compatible = "google,cozmo", "mediatek,mt8183";
};
diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-fennel-sku1.dts b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-fennel-sku1.dts
index b595622e7bee..72852b760038 100644
--- a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-fennel-sku1.dts
+++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-fennel-sku1.dts
@@ -9,6 +9,7 @@
/ {
model = "Google fennel sku1 board";
+ chassis-type = "convertible";
compatible = "google,fennel-sku1", "google,fennel", "mediatek,mt8183";
pwmleds {
diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-fennel-sku6.dts b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-fennel-sku6.dts
index 5a1c39318a6c..757d0afd14fb 100644
--- a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-fennel-sku6.dts
+++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-fennel-sku6.dts
@@ -9,6 +9,7 @@
/ {
model = "Google fennel sku6 board";
+ chassis-type = "convertible";
compatible = "google,fennel-sku6", "google,fennel", "mediatek,mt8183";
};
diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-fennel-sku7.dts b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-fennel-sku7.dts
index 3ea4fdb40118..6641b087e7c5 100644
--- a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-fennel-sku7.dts
+++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-fennel-sku7.dts
@@ -9,6 +9,7 @@
/ {
model = "Google fennel sku7 board";
+ chassis-type = "convertible";
compatible = "google,fennel-sku7", "google,fennel", "mediatek,mt8183";
};
diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-fennel14-sku2.dts b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-fennel14-sku2.dts
index 3fc5a6181d7e..877256eab262 100644
--- a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-fennel14-sku2.dts
+++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-fennel14-sku2.dts
@@ -9,6 +9,7 @@
/ {
model = "Google fennel14 sku2 board";
+ chassis-type = "laptop";
compatible = "google,fennel-sku2", "google,fennel", "mediatek,mt8183";
};
diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-fennel14.dts b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-fennel14.dts
index 23ad0b91e977..b981dd31a430 100644
--- a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-fennel14.dts
+++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-fennel14.dts
@@ -9,6 +9,7 @@
/ {
model = "Google fennel14 sku0 board";
+ chassis-type = "laptop";
compatible = "google,fennel-sku0", "google,fennel", "mediatek,mt8183";
};
diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-kappa.dts b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-kappa.dts
index e5bd9191e426..f3ac9c074226 100644
--- a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-kappa.dts
+++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-kappa.dts
@@ -9,6 +9,7 @@
/ {
model = "Google kappa board";
+ chassis-type = "laptop";
compatible = "google,kappa", "mediatek,mt8183";
};
diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-kenzo.dts b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-kenzo.dts
index 8fa89db03e63..e8241587949b 100644
--- a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-kenzo.dts
+++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-kenzo.dts
@@ -9,5 +9,6 @@
/ {
model = "Google kenzo sku17 board";
+ chassis-type = "laptop";
compatible = "google,juniper-sku17", "google,juniper", "mediatek,mt8183";
};
diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-makomo-sku0.dts b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-makomo-sku0.dts
index 4eb2a0d571af..ddb993521bbf 100644
--- a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-makomo-sku0.dts
+++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-makomo-sku0.dts
@@ -19,6 +19,6 @@
&mmc1_pins_uhs {
pins-clk {
- drive-strength = <MTK_DRIVE_6mA>;
+ drive-strength = <6>;
};
};
diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-makomo-sku1.dts b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-makomo-sku1.dts
index 6a733361e8ae..10c4f920a7d8 100644
--- a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-makomo-sku1.dts
+++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-makomo-sku1.dts
@@ -19,6 +19,6 @@
&mmc1_pins_uhs {
pins-clk {
- drive-strength = <MTK_DRIVE_6mA>;
+ drive-strength = <6>;
};
};
diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-pico6.dts b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-pico6.dts
index 6a7ae616512d..cce326aec1aa 100644
--- a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-pico6.dts
+++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-pico6.dts
@@ -17,7 +17,7 @@
pinctrl-names = "default";
pinctrl-0 = <&bt_pins_wakeup>;
- wobt {
+ event-wobt {
label = "Wake on BT";
gpios = <&pio 42 GPIO_ACTIVE_HIGH>;
linux,code = <KEY_WAKEUP>;
@@ -47,10 +47,8 @@
};
};
-&wifi_wakeup {
- wowlan {
- gpios = <&pio 113 GPIO_ACTIVE_LOW>;
- };
+&wifi_wakeup_event {
+ gpios = <&pio 113 GPIO_ACTIVE_LOW>;
};
&wifi_pwrseq {
@@ -68,16 +66,16 @@
&mmc1_pins_default {
pins-cmd-dat {
- drive-strength = <MTK_DRIVE_6mA>;
+ drive-strength = <6>;
};
pins-clk {
- drive-strength = <MTK_DRIVE_6mA>;
+ drive-strength = <6>;
};
};
&mmc1_pins_uhs {
pins-clk {
- drive-strength = <MTK_DRIVE_6mA>;
+ drive-strength = <6>;
};
};
diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-willow-sku0.dts b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-willow-sku0.dts
index 89208b843b27..928b205a616a 100644
--- a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-willow-sku0.dts
+++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-willow-sku0.dts
@@ -9,6 +9,7 @@
/ {
model = "Google willow board sku0";
+ chassis-type = "laptop";
compatible = "google,willow-sku0", "google,willow", "mediatek,mt8183";
};
diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-willow-sku1.dts b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-willow-sku1.dts
index c7b20441d053..71307a8052d6 100644
--- a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-willow-sku1.dts
+++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-willow-sku1.dts
@@ -9,5 +9,6 @@
/ {
model = "Google willow board sku1";
+ chassis-type = "laptop";
compatible = "google,willow-sku1", "google,willow", "mediatek,mt8183";
};
diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi.dtsi b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi.dtsi
index 7592e3b86037..fa4ab4d2899f 100644
--- a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi.dtsi
@@ -155,21 +155,24 @@
vdd18-supply = <&pp1800_mipibrdg>;
vdd33-supply = <&vddio_mipibrdg>;
- #address-cells = <1>;
- #size-cells = <0>;
- port@0 {
- reg = <0>;
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
- anx7625_in: endpoint {
- remote-endpoint = <&dsi_out>;
+ port@0 {
+ reg = <0>;
+
+ anx7625_in: endpoint {
+ remote-endpoint = <&dsi_out>;
+ };
};
- };
- port@1 {
- reg = <1>;
+ port@1 {
+ reg = <1>;
- anx7625_out: endpoint {
- remote-endpoint = <&panel_in>;
+ anx7625_out: endpoint {
+ remote-endpoint = <&panel_in>;
+ };
};
};
diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-kodama-sku32.dts b/arch/arm64/boot/dts/mediatek/mt8183-kukui-kodama-sku32.dts
index 7739358008ee..5a416143b4a0 100644
--- a/arch/arm64/boot/dts/mediatek/mt8183-kukui-kodama-sku32.dts
+++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-kodama-sku32.dts
@@ -12,6 +12,7 @@
/ {
model = "MediaTek kodama sku32 board";
+ chassis-type = "tablet";
compatible = "google,kodama-sku32", "google,kodama", "mediatek,mt8183";
};
diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi b/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi
index 100191c6453b..6345e969efae 100644
--- a/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi
@@ -152,7 +152,7 @@
pinctrl-names = "default";
pinctrl-0 = <&wifi_pins_wakeup>;
- button-wowlan {
+ wifi_wakeup_event: event-wowlan {
label = "Wake on WiFi";
gpios = <&pio 113 GPIO_ACTIVE_HIGH>;
linux,code = <KEY_WAKEUP>;
@@ -488,7 +488,7 @@
<PINMUX_GPIO172__FUNC_TDM_DATA1_2ND>,
<PINMUX_GPIO173__FUNC_TDM_DATA2_2ND>,
<PINMUX_GPIO10__FUNC_TDM_DATA3>; /*8ch-i2s to it6505*/
- drive-strength = <MTK_DRIVE_6mA>;
+ drive-strength = <6>;
};
};
@@ -502,7 +502,7 @@
<PINMUX_GPIO10__FUNC_GPIO10>;
input-enable;
bias-pull-down;
- drive-strength = <MTK_DRIVE_2mA>;
+ drive-strength = <2>;
};
};
@@ -533,7 +533,6 @@
pinmux = <PINMUX_GPIO82__FUNC_SDA0>,
<PINMUX_GPIO83__FUNC_SCL0>;
mediatek,pull-up-adv = <3>;
- mediatek,drive-strength-adv = <00>;
};
};
@@ -542,7 +541,6 @@
pinmux = <PINMUX_GPIO81__FUNC_SDA1>,
<PINMUX_GPIO84__FUNC_SCL1>;
mediatek,pull-up-adv = <3>;
- mediatek,drive-strength-adv = <00>;
};
};
@@ -551,7 +549,6 @@
pinmux = <PINMUX_GPIO103__FUNC_SCL2>,
<PINMUX_GPIO104__FUNC_SDA2>;
bias-disable;
- mediatek,drive-strength-adv = <00>;
};
};
@@ -560,7 +557,6 @@
pinmux = <PINMUX_GPIO50__FUNC_SCL3>,
<PINMUX_GPIO51__FUNC_SDA3>;
mediatek,pull-up-adv = <3>;
- mediatek,drive-strength-adv = <00>;
};
};
@@ -569,7 +565,6 @@
pinmux = <PINMUX_GPIO105__FUNC_SCL4>,
<PINMUX_GPIO106__FUNC_SDA4>;
bias-disable;
- mediatek,drive-strength-adv = <00>;
};
};
@@ -578,7 +573,6 @@
pinmux = <PINMUX_GPIO48__FUNC_SCL5>,
<PINMUX_GPIO49__FUNC_SDA5>;
mediatek,pull-up-adv = <3>;
- mediatek,drive-strength-adv = <00>;
};
};
@@ -679,14 +673,14 @@
<PINMUX_GPIO34__FUNC_MSDC1_DAT1>,
<PINMUX_GPIO33__FUNC_MSDC1_DAT2>,
<PINMUX_GPIO30__FUNC_MSDC1_DAT3>;
- drive-strength = <MTK_DRIVE_6mA>;
+ drive-strength = <6>;
input-enable;
mediatek,pull-up-adv = <10>;
};
pins-clk {
pinmux = <PINMUX_GPIO29__FUNC_MSDC1_CLK>;
- drive-strength = <MTK_DRIVE_8mA>;
+ drive-strength = <8>;
mediatek,pull-down-adv = <10>;
input-enable;
};
@@ -803,7 +797,6 @@
};
pins-rts {
pinmux = <PINMUX_GPIO47__FUNC_URTS1>;
- output-enable;
};
pins-cts {
pinmux = <PINMUX_GPIO46__FUNC_UCTS1>;
@@ -822,7 +815,6 @@
};
pins-rts {
pinmux = <PINMUX_GPIO47__FUNC_URTS1>;
- output-enable;
};
pins-cts {
pinmux = <PINMUX_GPIO46__FUNC_UCTS1>;
diff --git a/arch/arm64/boot/dts/mediatek/mt8183-pumpkin.dts b/arch/arm64/boot/dts/mediatek/mt8183-pumpkin.dts
index 333c516af490..1aa668c3ccf9 100644
--- a/arch/arm64/boot/dts/mediatek/mt8183-pumpkin.dts
+++ b/arch/arm64/boot/dts/mediatek/mt8183-pumpkin.dts
@@ -197,7 +197,6 @@
pinmux = <PINMUX_GPIO82__FUNC_SDA0>,
<PINMUX_GPIO83__FUNC_SCL0>;
mediatek,pull-up-adv = <3>;
- mediatek,drive-strength-adv = <00>;
};
};
@@ -206,7 +205,6 @@
pinmux = <PINMUX_GPIO81__FUNC_SDA1>,
<PINMUX_GPIO84__FUNC_SCL1>;
mediatek,pull-up-adv = <3>;
- mediatek,drive-strength-adv = <00>;
};
};
@@ -215,7 +213,6 @@
pinmux = <PINMUX_GPIO103__FUNC_SCL2>,
<PINMUX_GPIO104__FUNC_SDA2>;
mediatek,pull-up-adv = <3>;
- mediatek,drive-strength-adv = <00>;
};
};
@@ -224,7 +221,6 @@
pinmux = <PINMUX_GPIO50__FUNC_SCL3>,
<PINMUX_GPIO51__FUNC_SDA3>;
mediatek,pull-up-adv = <3>;
- mediatek,drive-strength-adv = <00>;
};
};
@@ -233,7 +229,6 @@
pinmux = <PINMUX_GPIO105__FUNC_SCL4>,
<PINMUX_GPIO106__FUNC_SDA4>;
mediatek,pull-up-adv = <3>;
- mediatek,drive-strength-adv = <00>;
};
};
@@ -242,7 +237,6 @@
pinmux = <PINMUX_GPIO48__FUNC_SCL5>,
<PINMUX_GPIO49__FUNC_SDA5>;
mediatek,pull-up-adv = <3>;
- mediatek,drive-strength-adv = <00>;
};
};
@@ -356,14 +350,14 @@
<PINMUX_GPIO34__FUNC_MSDC1_DAT1>,
<PINMUX_GPIO33__FUNC_MSDC1_DAT2>,
<PINMUX_GPIO30__FUNC_MSDC1_DAT3>;
- drive-strength = <MTK_DRIVE_6mA>;
+ drive-strength = <6>;
input-enable;
mediatek,pull-up-adv = <10>;
};
pins_clk {
pinmux = <PINMUX_GPIO29__FUNC_MSDC1_CLK>;
- drive-strength = <MTK_DRIVE_8mA>;
+ drive-strength = <8>;
mediatek,pull-down-adv = <10>;
input-enable;
};
diff --git a/arch/arm64/boot/dts/mediatek/mt8183.dtsi b/arch/arm64/boot/dts/mediatek/mt8183.dtsi
index 774ae5d9143f..fbf145639b8c 100644
--- a/arch/arm64/boot/dts/mediatek/mt8183.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8183.dtsi
@@ -1183,7 +1183,7 @@
status = "disabled";
};
- thermal: thermal@1100b000 {
+ thermal: thermal-sensor@1100b000 {
#thermal-sensor-cells = <1>;
compatible = "mediatek,mt8183-thermal";
reg = <0 0x1100b000 0 0xc00>;
@@ -2090,61 +2090,129 @@
};
};
- /* The tzts1 ~ tzts6 don't need to polling */
- /* The tzts1 ~ tzts6 don't need to thermal throttle */
-
- tzts1: tzts1 {
- polling-delay-passive = <0>;
- polling-delay = <0>;
+ tzts1: soc-thermal {
+ polling-delay = <1000>;
+ polling-delay-passive = <250>;
thermal-sensors = <&thermal 1>;
sustainable-power = <5000>;
- trips {};
- cooling-maps {};
+ trips {
+ soc_alert: trip-alert {
+ temperature = <85000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ soc_crit: trip-crit {
+ temperature = <100000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
};
- tzts2: tzts2 {
- polling-delay-passive = <0>;
- polling-delay = <0>;
+ tzts2: gpu-thermal {
+ polling-delay = <1000>;
+ polling-delay-passive = <250>;
thermal-sensors = <&thermal 2>;
sustainable-power = <5000>;
- trips {};
- cooling-maps {};
+
+ trips {
+ gpu_alert: trip-alert {
+ temperature = <85000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ gpu_crit: trip-crit {
+ temperature = <100000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
};
- tzts3: tzts3 {
- polling-delay-passive = <0>;
- polling-delay = <0>;
+ tzts3: md1-thermal {
+ polling-delay = <1000>;
+ polling-delay-passive = <250>;
thermal-sensors = <&thermal 3>;
sustainable-power = <5000>;
- trips {};
- cooling-maps {};
+
+ trips {
+ md1_alert: trip-alert {
+ temperature = <85000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ md1_crit: trip-crit {
+ temperature = <100000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
};
- tzts4: tzts4 {
- polling-delay-passive = <0>;
- polling-delay = <0>;
+ tzts4: cpu-little-thermal {
+ polling-delay = <1000>;
+ polling-delay-passive = <250>;
thermal-sensors = <&thermal 4>;
sustainable-power = <5000>;
- trips {};
- cooling-maps {};
+
+ trips {
+ cpul_alert: trip-alert {
+ temperature = <85000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpul_crit: trip-crit {
+ temperature = <100000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
};
- tzts5: tzts5 {
- polling-delay-passive = <0>;
- polling-delay = <0>;
+ tzts5: cpu-big-thermal {
+ polling-delay = <1000>;
+ polling-delay-passive = <250>;
thermal-sensors = <&thermal 5>;
sustainable-power = <5000>;
- trips {};
- cooling-maps {};
+
+ trips {
+ cpub_alert: trip-alert {
+ temperature = <85000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpub_crit: trip-crit {
+ temperature = <100000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
};
- tztsABB: tztsABB {
- polling-delay-passive = <0>;
- polling-delay = <0>;
+ tztsABB: tsabb-thermal {
+ polling-delay = <1000>;
+ polling-delay-passive = <250>;
thermal-sensors = <&thermal 6>;
sustainable-power = <5000>;
- trips {};
- cooling-maps {};
+
+ trips {
+ tsabb_alert: trip-alert {
+ temperature = <85000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ tsabb_crit: trip-crit {
+ temperature = <100000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
};
};
};
diff --git a/arch/arm64/boot/dts/mediatek/mt8186-corsola-voltorb-sku589824.dts b/arch/arm64/boot/dts/mediatek/mt8186-corsola-voltorb-sku589824.dts
new file mode 100644
index 000000000000..d16834eec87a
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt8186-corsola-voltorb-sku589824.dts
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright 2022 Google LLC
+ */
+
+/dts-v1/;
+#include "mt8186-corsola-voltorb.dtsi"
+
+/ {
+ model = "Google Voltorb sku589824 board";
+ compatible = "google,voltorb-sku589824", "google,voltorb",
+ "mediatek,mt8186";
+};
diff --git a/arch/arm64/boot/dts/mediatek/mt8186-corsola-voltorb-sku589825.dts b/arch/arm64/boot/dts/mediatek/mt8186-corsola-voltorb-sku589825.dts
new file mode 100644
index 000000000000..45e57f7706cc
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt8186-corsola-voltorb-sku589825.dts
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright 2022 Google LLC
+ */
+
+/dts-v1/;
+#include "mt8186-corsola-voltorb.dtsi"
+
+/ {
+ model = "Google Voltorb sku589825 board";
+ compatible = "google,voltorb-sku589825", "google,voltorb",
+ "mediatek,mt8186";
+};
+
+&i2c1 {
+ touchscreen@10 {
+ compatible = "elan,ekth6915";
+ reg = <0x10>;
+ interrupts-extended = <&pio 12 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&touchscreen_pins>;
+ reset-gpios = <&pio 60 GPIO_ACTIVE_LOW>;
+ vcc33-supply = <&pp3300_s3>;
+ };
+};
diff --git a/arch/arm64/boot/dts/mediatek/mt8186-corsola-voltorb.dtsi b/arch/arm64/boot/dts/mediatek/mt8186-corsola-voltorb.dtsi
new file mode 100644
index 000000000000..52ec58128d56
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt8186-corsola-voltorb.dtsi
@@ -0,0 +1,103 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright 2022 Google LLC
+ */
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/spmi/spmi.h>
+
+#include "mt8186-corsola-steelix.dtsi"
+
+/ {
+ chassis-type = "laptop";
+
+ max98360a: max98360a {
+ compatible = "maxim,max98360a";
+ sdmode-gpios = <&pio 150 GPIO_ACTIVE_HIGH>;
+ #sound-dai-cells = <0>;
+ };
+};
+
+&cpu6 {
+ proc-supply = <&mt6319_buck1>;
+};
+
+&cpu7 {
+ proc-supply = <&mt6319_buck1>;
+};
+
+&gpio_keys {
+ status = "disabled";
+};
+
+&keyboard_controller {
+ linux,keymap = <
+ MATRIX_KEY(0x00, 0x02, KEY_BACK)
+ MATRIX_KEY(0x03, 0x02, KEY_REFRESH)
+ MATRIX_KEY(0x02, 0x02, KEY_ZOOM)
+ MATRIX_KEY(0x01, 0x02, KEY_SCALE)
+ MATRIX_KEY(0x03, 0x04, KEY_SYSRQ)
+ MATRIX_KEY(0x02, 0x04, KEY_BRIGHTNESSDOWN)
+ MATRIX_KEY(0x01, 0x04, KEY_BRIGHTNESSUP)
+ MATRIX_KEY(0x02, 0x09, KEY_MUTE)
+ MATRIX_KEY(0x01, 0x09, KEY_VOLUMEDOWN)
+ MATRIX_KEY(0x00, 0x04, KEY_VOLUMEUP)
+ CROS_STD_MAIN_KEYMAP
+ >;
+};
+
+&mt6366_vproc11_reg {
+ status = "disabled";
+};
+
+&cluster1_opp_14 {
+ opp-hz = /bits/ 64 <2050000000>;
+ opp-microvolt = <1118750>;
+};
+
+&cluster1_opp_15 {
+ opp-hz = /bits/ 64 <2200000000>;
+};
+
+&rt1019p{
+ status = "disabled";
+};
+
+&sound {
+ compatible = "mediatek,mt8186-mt6366-rt5682s-max98360-sound";
+ status = "okay";
+
+ spk-hdmi-playback-dai-link {
+ codec {
+ sound-dai = <&it6505dptx>, <&max98360a>;
+ };
+ };
+};
+
+&spmi {
+ pinctrl-names = "default";
+ pinctrl-0 = <&spmi_pins>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+ status = "okay";
+
+ pmic@6 {
+ compatible = "mediatek,mt6319-regulator", "mediatek,mt6315-regulator";
+ reg = <0x6 SPMI_USID>;
+
+ regulators {
+ mt6319_buck1: vbuck1 {
+ regulator-name = "ppvar_dvdd_proc_bc_mt6319";
+ regulator-min-microvolt = <600000>;
+ regulator-max-microvolt = <1193750>;
+ regulator-enable-ramp-delay = <256>;
+ regulator-allowed-modes = <0 1 2>;
+ regulator-always-on;
+ };
+ };
+ };
+};
+
+&touchscreen {
+ status = "disabled";
+};
diff --git a/arch/arm64/boot/dts/mediatek/mt8186-corsola.dtsi b/arch/arm64/boot/dts/mediatek/mt8186-corsola.dtsi
index 1807e9d6cb0e..afdab5724eaa 100644
--- a/arch/arm64/boot/dts/mediatek/mt8186-corsola.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8186-corsola.dtsi
@@ -42,7 +42,7 @@
default-brightness-level = <576>;
};
- bt-sco-codec {
+ bt-sco {
compatible = "linux,bt-sco";
#sound-dai-cells = <0>;
};
@@ -223,12 +223,44 @@
mediatek,adsp = <&adsp>;
mediatek,platform = <&afe>;
- playback-codecs {
- sound-dai = <&it6505dptx>, <&rt1019p>;
+ audio-routing =
+ "Headphone", "HPOL",
+ "Headphone", "HPOR",
+ "IN1P", "Headset Mic",
+ "Speakers", "Speaker",
+ "HDMI1", "TX";
+
+ hs-playback-dai-link {
+ link-name = "I2S0";
+ dai-format = "i2s";
+ mediatek,clk-provider = "cpu";
+ codec {
+ sound-dai = <&rt5682s 0>;
+ };
+ };
+
+ hs-capture-dai-link {
+ link-name = "I2S1";
+ dai-format = "i2s";
+ mediatek,clk-provider = "cpu";
+ codec {
+ sound-dai = <&rt5682s 0>;
+ };
};
- headset-codec {
- sound-dai = <&rt5682s 0>;
+ spk-share-dai-link {
+ link-name = "I2S2";
+ mediatek,clk-provider = "cpu";
+ };
+
+ spk-hdmi-playback-dai-link {
+ link-name = "I2S3";
+ dai-format = "i2s";
+ mediatek,clk-provider = "cpu";
+ /* RT1019P and IT6505 connected to the same I2S line */
+ codec {
+ sound-dai = <&it6505dptx>, <&rt1019p>;
+ };
};
};
diff --git a/arch/arm64/boot/dts/mediatek/mt8188.dtsi b/arch/arm64/boot/dts/mediatek/mt8188.dtsi
index b4315c9214dc..29d012d28edb 100644
--- a/arch/arm64/boot/dts/mediatek/mt8188.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8188.dtsi
@@ -8,6 +8,7 @@
#include <dt-bindings/clock/mediatek,mt8188-clk.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/mailbox/mediatek,mt8188-gce.h>
#include <dt-bindings/phy/phy.h>
#include <dt-bindings/pinctrl/mediatek,mt8188-pinfunc.h>
#include <dt-bindings/power/mediatek,mt8188-power.h>
@@ -293,6 +294,112 @@
clock-output-names = "clk32k";
};
+ gpu_opp_table: opp-table-gpu {
+ compatible = "operating-points-v2";
+ opp-shared;
+
+ opp-390000000 {
+ opp-hz = /bits/ 64 <390000000>;
+ opp-microvolt = <575000>;
+ opp-supported-hw = <0xff>;
+ };
+ opp-431000000 {
+ opp-hz = /bits/ 64 <431000000>;
+ opp-microvolt = <587500>;
+ opp-supported-hw = <0xff>;
+ };
+ opp-473000000 {
+ opp-hz = /bits/ 64 <473000000>;
+ opp-microvolt = <600000>;
+ opp-supported-hw = <0xff>;
+ };
+ opp-515000000 {
+ opp-hz = /bits/ 64 <515000000>;
+ opp-microvolt = <612500>;
+ opp-supported-hw = <0xff>;
+ };
+ opp-556000000 {
+ opp-hz = /bits/ 64 <556000000>;
+ opp-microvolt = <625000>;
+ opp-supported-hw = <0xff>;
+ };
+ opp-598000000 {
+ opp-hz = /bits/ 64 <598000000>;
+ opp-microvolt = <637500>;
+ opp-supported-hw = <0xff>;
+ };
+ opp-640000000 {
+ opp-hz = /bits/ 64 <640000000>;
+ opp-microvolt = <650000>;
+ opp-supported-hw = <0xff>;
+ };
+ opp-670000000 {
+ opp-hz = /bits/ 64 <670000000>;
+ opp-microvolt = <662500>;
+ opp-supported-hw = <0xff>;
+ };
+ opp-700000000 {
+ opp-hz = /bits/ 64 <700000000>;
+ opp-microvolt = <675000>;
+ opp-supported-hw = <0xff>;
+ };
+ opp-730000000 {
+ opp-hz = /bits/ 64 <730000000>;
+ opp-microvolt = <687500>;
+ opp-supported-hw = <0xff>;
+ };
+ opp-760000000 {
+ opp-hz = /bits/ 64 <760000000>;
+ opp-microvolt = <700000>;
+ opp-supported-hw = <0xff>;
+ };
+ opp-790000000 {
+ opp-hz = /bits/ 64 <790000000>;
+ opp-microvolt = <712500>;
+ opp-supported-hw = <0xff>;
+ };
+ opp-835000000 {
+ opp-hz = /bits/ 64 <835000000>;
+ opp-microvolt = <731250>;
+ opp-supported-hw = <0xff>;
+ };
+ opp-880000000 {
+ opp-hz = /bits/ 64 <880000000>;
+ opp-microvolt = <750000>;
+ opp-supported-hw = <0xff>;
+ };
+ opp-915000000 {
+ opp-hz = /bits/ 64 <915000000>;
+ opp-microvolt = <775000>;
+ opp-supported-hw = <0x8f>;
+ };
+ opp-915000000-5 {
+ opp-hz = /bits/ 64 <915000000>;
+ opp-microvolt = <762500>;
+ opp-supported-hw = <0x30>;
+ };
+ opp-915000000-6 {
+ opp-hz = /bits/ 64 <915000000>;
+ opp-microvolt = <750000>;
+ opp-supported-hw = <0x70>;
+ };
+ opp-950000000 {
+ opp-hz = /bits/ 64 <950000000>;
+ opp-microvolt = <800000>;
+ opp-supported-hw = <0x8f>;
+ };
+ opp-950000000-5 {
+ opp-hz = /bits/ 64 <950000000>;
+ opp-microvolt = <775000>;
+ opp-supported-hw = <0x30>;
+ };
+ opp-950000000-6 {
+ opp-hz = /bits/ 64 <950000000>;
+ opp-microvolt = <750000>;
+ opp-supported-hw = <0x70>;
+ };
+ };
+
pmu-a55 {
compatible = "arm,cortex-a55-pmu";
interrupt-parent = <&gic>;
@@ -383,6 +490,329 @@
#interrupt-cells = <2>;
};
+ scpsys: syscon@10006000 {
+ compatible = "mediatek,mt8188-scpsys", "syscon", "simple-mfd";
+ reg = <0 0x10006000 0 0x1000>;
+
+ /* System Power Manager */
+ spm: power-controller {
+ compatible = "mediatek,mt8188-power-controller";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #power-domain-cells = <1>;
+
+ /* power domain of the SoC */
+ mfg0: power-domain@MT8188_POWER_DOMAIN_MFG0 {
+ reg = <MT8188_POWER_DOMAIN_MFG0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #power-domain-cells = <1>;
+
+ power-domain@MT8188_POWER_DOMAIN_MFG1 {
+ reg = <MT8188_POWER_DOMAIN_MFG1>;
+ clocks = <&topckgen CLK_APMIXED_MFGPLL>,
+ <&topckgen CLK_TOP_MFG_CORE_TMP>;
+ clock-names = "mfg", "alt";
+ mediatek,infracfg = <&infracfg_ao>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #power-domain-cells = <1>;
+
+ power-domain@MT8188_POWER_DOMAIN_MFG2 {
+ reg = <MT8188_POWER_DOMAIN_MFG2>;
+ #power-domain-cells = <0>;
+ };
+
+ power-domain@MT8188_POWER_DOMAIN_MFG3 {
+ reg = <MT8188_POWER_DOMAIN_MFG3>;
+ #power-domain-cells = <0>;
+ };
+
+ power-domain@MT8188_POWER_DOMAIN_MFG4 {
+ reg = <MT8188_POWER_DOMAIN_MFG4>;
+ #power-domain-cells = <0>;
+ };
+ };
+ };
+
+ power-domain@MT8188_POWER_DOMAIN_VPPSYS0 {
+ reg = <MT8188_POWER_DOMAIN_VPPSYS0>;
+ clocks = <&topckgen CLK_TOP_VPP>,
+ <&topckgen CLK_TOP_CAM>,
+ <&topckgen CLK_TOP_CCU>,
+ <&topckgen CLK_TOP_IMG>,
+ <&topckgen CLK_TOP_VENC>,
+ <&topckgen CLK_TOP_VDEC>,
+ <&topckgen CLK_TOP_WPE_VPP>,
+ <&topckgen CLK_TOP_CFGREG_CLOCK_EN_VPP0>,
+ <&topckgen CLK_TOP_CFGREG_F26M_VPP0>,
+ <&vppsys0 CLK_VPP0_SMI_COMMON_MMSRAM>,
+ <&vppsys0 CLK_VPP0_GALS_VDO0_LARB0_MMSRAM>,
+ <&vppsys0 CLK_VPP0_GALS_VDO0_LARB1_MMSRAM>,
+ <&vppsys0 CLK_VPP0_GALS_VENCSYS_MMSRAM>,
+ <&vppsys0 CLK_VPP0_GALS_VENCSYS_CORE1_MMSRAM>,
+ <&vppsys0 CLK_VPP0_GALS_INFRA_MMSRAM>,
+ <&vppsys0 CLK_VPP0_GALS_CAMSYS_MMSRAM>,
+ <&vppsys0 CLK_VPP0_GALS_VPP1_LARB5_MMSRAM>,
+ <&vppsys0 CLK_VPP0_GALS_VPP1_LARB6_MMSRAM>,
+ <&vppsys0 CLK_VPP0_SMI_REORDER_MMSRAM>,
+ <&vppsys0 CLK_VPP0_SMI_IOMMU>,
+ <&vppsys0 CLK_VPP0_GALS_IMGSYS_CAMSYS>,
+ <&vppsys0 CLK_VPP0_GALS_EMI0_EMI1>,
+ <&vppsys0 CLK_VPP0_SMI_SUB_COMMON_REORDER>,
+ <&vppsys0 CLK_VPP0_SMI_RSI>,
+ <&vppsys0 CLK_VPP0_SMI_COMMON_LARB4>,
+ <&vppsys0 CLK_VPP0_GALS_VDEC_VDEC_CORE1>,
+ <&vppsys0 CLK_VPP0_GALS_VPP1_WPESYS>,
+ <&vppsys0 CLK_VPP0_GALS_VDO0_VDO1_VENCSYS_CORE1>;
+ clock-names = "top", "cam", "ccu", "img", "venc",
+ "vdec", "wpe", "cfgck", "cfgxo",
+ "ss-sram-cmn", "ss-sram-v0l0", "ss-sram-v0l1",
+ "ss-sram-ve0", "ss-sram-ve1", "ss-sram-ifa",
+ "ss-sram-cam", "ss-sram-v1l5", "ss-sram-v1l6",
+ "ss-sram-rdr", "ss-iommu", "ss-imgcam",
+ "ss-emi", "ss-subcmn-rdr", "ss-rsi",
+ "ss-cmn-l4", "ss-vdec1", "ss-wpe",
+ "ss-cvdo-ve1";
+ mediatek,infracfg = <&infracfg_ao>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #power-domain-cells = <1>;
+
+ power-domain@MT8188_POWER_DOMAIN_VDOSYS0 {
+ reg = <MT8188_POWER_DOMAIN_VDOSYS0>;
+ clocks = <&topckgen CLK_TOP_CFGREG_CLOCK_EN_VDO0>,
+ <&topckgen CLK_TOP_CFGREG_F26M_VDO0>,
+ <&vdosys0 CLK_VDO0_SMI_GALS>,
+ <&vdosys0 CLK_VDO0_SMI_COMMON>,
+ <&vdosys0 CLK_VDO0_SMI_EMI>,
+ <&vdosys0 CLK_VDO0_SMI_IOMMU>,
+ <&vdosys0 CLK_VDO0_SMI_LARB>,
+ <&vdosys0 CLK_VDO0_SMI_RSI>,
+ <&vdosys0 CLK_VDO0_APB_BUS>;
+ clock-names = "cfgck", "cfgxo", "ss-gals",
+ "ss-cmn", "ss-emi", "ss-iommu",
+ "ss-larb", "ss-rsi", "ss-bus";
+ mediatek,infracfg = <&infracfg_ao>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #power-domain-cells = <1>;
+
+ power-domain@MT8188_POWER_DOMAIN_VPPSYS1 {
+ reg = <MT8188_POWER_DOMAIN_VPPSYS1>;
+ clocks = <&topckgen CLK_TOP_CFGREG_CLOCK_EN_VPP1>,
+ <&topckgen CLK_TOP_CFGREG_F26M_VPP1>,
+ <&vppsys1 CLK_VPP1_GALS5>,
+ <&vppsys1 CLK_VPP1_GALS6>,
+ <&vppsys1 CLK_VPP1_LARB5>,
+ <&vppsys1 CLK_VPP1_LARB6>;
+ clock-names = "cfgck", "cfgxo",
+ "ss-vpp1-g5", "ss-vpp1-g6",
+ "ss-vpp1-l5", "ss-vpp1-l6";
+ mediatek,infracfg = <&infracfg_ao>;
+ #power-domain-cells = <0>;
+ };
+
+ power-domain@MT8188_POWER_DOMAIN_VDEC1 {
+ reg = <MT8188_POWER_DOMAIN_VDEC1>;
+ clocks = <&vdecsys CLK_VDEC2_LARB1>;
+ clock-names = "ss-vdec";
+ mediatek,infracfg = <&infracfg_ao>;
+ #power-domain-cells = <0>;
+ };
+
+ power-domain@MT8188_POWER_DOMAIN_VDEC0 {
+ reg = <MT8188_POWER_DOMAIN_VDEC0>;
+ clocks = <&vdecsys_soc CLK_VDEC1_SOC_LARB1>;
+ clock-names = "ss-vdec";
+ mediatek,infracfg = <&infracfg_ao>;
+ #power-domain-cells = <0>;
+ };
+
+ cam_vcore: power-domain@MT8188_POWER_DOMAIN_CAM_VCORE {
+ reg = <MT8188_POWER_DOMAIN_CAM_VCORE>;
+ clocks = <&topckgen CLK_TOP_CAM>,
+ <&topckgen CLK_TOP_CCU>,
+ <&topckgen CLK_TOP_CCU_AHB>,
+ <&topckgen CLK_TOP_CFGREG_CLOCK_ISP_AXI_GALS>;
+ clock-names = "cam", "ccu", "bus", "cfgck";
+ mediatek,infracfg = <&infracfg_ao>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #power-domain-cells = <1>;
+
+ power-domain@MT8188_POWER_DOMAIN_CAM_MAIN {
+ reg = <MT8188_POWER_DOMAIN_CAM_MAIN>;
+ clocks = <&camsys CLK_CAM_MAIN_LARB13>,
+ <&camsys CLK_CAM_MAIN_LARB14>,
+ <&camsys CLK_CAM_MAIN_CAM2MM0_GALS>,
+ <&camsys CLK_CAM_MAIN_CAM2MM1_GALS>,
+ <&camsys CLK_CAM_MAIN_CAM2SYS_GALS>;
+ clock-names= "ss-cam-l13", "ss-cam-l14",
+ "ss-cam-mm0", "ss-cam-mm1",
+ "ss-camsys";
+ mediatek,infracfg = <&infracfg_ao>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #power-domain-cells = <1>;
+
+ power-domain@MT8188_POWER_DOMAIN_CAM_SUBB {
+ reg = <MT8188_POWER_DOMAIN_CAM_SUBB>;
+ clocks = <&camsys CLK_CAM_MAIN_CAM_SUBB>,
+ <&camsys_rawb CLK_CAM_RAWB_LARBX>,
+ <&camsys_yuvb CLK_CAM_YUVB_LARBX>;
+ clock-names = "ss-camb-sub",
+ "ss-camb-raw",
+ "ss-camb-yuv";
+ #power-domain-cells = <0>;
+ };
+
+ power-domain@MT8188_POWER_DOMAIN_CAM_SUBA {
+ reg =<MT8188_POWER_DOMAIN_CAM_SUBA>;
+ clocks = <&camsys CLK_CAM_MAIN_CAM_SUBA>,
+ <&camsys_rawa CLK_CAM_RAWA_LARBX>,
+ <&camsys_yuva CLK_CAM_YUVA_LARBX>;
+ clock-names = "ss-cama-sub",
+ "ss-cama-raw",
+ "ss-cama-yuv";
+ #power-domain-cells = <0>;
+ };
+ };
+ };
+
+ power-domain@MT8188_POWER_DOMAIN_VDOSYS1 {
+ reg = <MT8188_POWER_DOMAIN_VDOSYS1>;
+ clocks = <&topckgen CLK_TOP_CFGREG_CLOCK_EN_VDO1>,
+ <&topckgen CLK_TOP_CFGREG_F26M_VDO1>,
+ <&vdosys1 CLK_VDO1_SMI_LARB2>,
+ <&vdosys1 CLK_VDO1_SMI_LARB3>,
+ <&vdosys1 CLK_VDO1_GALS>;
+ clock-names = "cfgck", "cfgxo", "ss-larb2",
+ "ss-larb3", "ss-gals";
+ mediatek,infracfg = <&infracfg_ao>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #power-domain-cells = <1>;
+
+ power-domain@MT8188_POWER_DOMAIN_HDMI_TX {
+ reg = <MT8188_POWER_DOMAIN_HDMI_TX>;
+ clocks = <&topckgen CLK_TOP_HDMI_APB>,
+ <&topckgen CLK_TOP_HDCP_24M>;
+ clock-names = "bus", "hdcp";
+ mediatek,infracfg = <&infracfg_ao>;
+ #power-domain-cells = <0>;
+ };
+
+ power-domain@MT8188_POWER_DOMAIN_DP_TX {
+ reg = <MT8188_POWER_DOMAIN_DP_TX>;
+ mediatek,infracfg = <&infracfg_ao>;
+ #power-domain-cells = <0>;
+ };
+
+ power-domain@MT8188_POWER_DOMAIN_EDP_TX {
+ reg = <MT8188_POWER_DOMAIN_EDP_TX>;
+ mediatek,infracfg = <&infracfg_ao>;
+ #power-domain-cells = <0>;
+ };
+ };
+
+ power-domain@MT8188_POWER_DOMAIN_VENC {
+ reg = <MT8188_POWER_DOMAIN_VENC>;
+ clocks = <&vencsys CLK_VENC1_LARB>,
+ <&vencsys CLK_VENC1_VENC>,
+ <&vencsys CLK_VENC1_GALS>,
+ <&vencsys CLK_VENC1_GALS_SRAM>;
+ clock-names = "ss-ve1-larb", "ss-ve1-core",
+ "ss-ve1-gals", "ss-ve1-sram";
+ mediatek,infracfg = <&infracfg_ao>;
+ #power-domain-cells = <0>;
+ };
+
+ power-domain@MT8188_POWER_DOMAIN_WPE {
+ reg = <MT8188_POWER_DOMAIN_WPE>;
+ clocks = <&wpesys CLK_WPE_TOP_SMI_LARB7>,
+ <&wpesys CLK_WPE_TOP_SMI_LARB7_PCLK_EN>;
+ clock-names = "ss-wpe-l7", "ss-wpe-l7pce";
+ mediatek,infracfg = <&infracfg_ao>;
+ #power-domain-cells = <0>;
+ };
+ };
+ };
+
+ power-domain@MT8188_POWER_DOMAIN_PEXTP_MAC_P0 {
+ reg = <MT8188_POWER_DOMAIN_PEXTP_MAC_P0>;
+ mediatek,infracfg = <&infracfg_ao>;
+ clocks = <&pericfg_ao CLK_PERI_AO_PCIE_P0_FMEM>;
+ clock-names = "ss-pextp-fmem";
+ #power-domain-cells = <0>;
+ };
+
+ power-domain@MT8188_POWER_DOMAIN_CSIRX_TOP {
+ reg = <MT8188_POWER_DOMAIN_CSIRX_TOP>;
+ clocks = <&topckgen CLK_TOP_SENINF>,
+ <&topckgen CLK_TOP_SENINF1>;
+ clock-names = "seninf0", "seninf1";
+ #power-domain-cells = <0>;
+ };
+
+ power-domain@MT8188_POWER_DOMAIN_PEXTP_PHY_TOP {
+ reg = <MT8188_POWER_DOMAIN_PEXTP_PHY_TOP>;
+ #power-domain-cells = <0>;
+ };
+
+ power-domain@MT8188_POWER_DOMAIN_ADSP_AO {
+ reg = <MT8188_POWER_DOMAIN_ADSP_AO>;
+ clocks = <&topckgen CLK_TOP_AUDIO_LOCAL_BUS>,
+ <&topckgen CLK_TOP_ADSP>;
+ clock-names = "bus", "main";
+ mediatek,infracfg = <&infracfg_ao>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #power-domain-cells = <1>;
+
+ power-domain@MT8188_POWER_DOMAIN_ADSP_INFRA {
+ reg = <MT8188_POWER_DOMAIN_ADSP_INFRA>;
+ mediatek,infracfg = <&infracfg_ao>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #power-domain-cells = <1>;
+
+ power-domain@MT8188_POWER_DOMAIN_AUDIO_ASRC {
+ reg = <MT8188_POWER_DOMAIN_AUDIO_ASRC>;
+ clocks = <&topckgen CLK_TOP_ASM_H>;
+ clock-names = "asm";
+ mediatek,infracfg = <&infracfg_ao>;
+ #power-domain-cells = <0>;
+ };
+
+ power-domain@MT8188_POWER_DOMAIN_AUDIO {
+ reg = <MT8188_POWER_DOMAIN_AUDIO>;
+ clocks = <&topckgen CLK_TOP_A1SYS_HP>,
+ <&topckgen CLK_TOP_AUD_INTBUS>,
+ <&adsp_audio26m CLK_AUDIODSP_AUDIO26M>;
+ clock-names = "a1sys", "intbus", "adspck";
+ mediatek,infracfg = <&infracfg_ao>;
+ #power-domain-cells = <0>;
+ };
+
+ power-domain@MT8188_POWER_DOMAIN_ADSP {
+ reg = <MT8188_POWER_DOMAIN_ADSP>;
+ mediatek,infracfg = <&infracfg_ao>;
+ #power-domain-cells = <0>;
+ };
+ };
+ };
+
+ power-domain@MT8188_POWER_DOMAIN_ETHER {
+ reg = <MT8188_POWER_DOMAIN_ETHER>;
+ clocks = <&pericfg_ao CLK_PERI_AO_ETHERNET_MAC>;
+ clock-names = "ethermac";
+ mediatek,infracfg = <&infracfg_ao>;
+ #power-domain-cells = <0>;
+ };
+ };
+ };
+
watchdog: watchdog@10007000 {
compatible = "mediatek,mt8188-wdt";
reg = <0 0x10007000 0 0x100>;
@@ -413,6 +843,22 @@
clock-names = "spi", "wrap";
};
+ gce0: mailbox@10320000 {
+ compatible = "mediatek,mt8188-gce";
+ reg = <0 0x10320000 0 0x4000>;
+ interrupts = <GIC_SPI 226 IRQ_TYPE_LEVEL_HIGH 0>;
+ #mbox-cells = <2>;
+ clocks = <&infracfg_ao CLK_INFRA_AO_GCE>;
+ };
+
+ gce1: mailbox@10330000 {
+ compatible = "mediatek,mt8188-gce";
+ reg = <0 0x10330000 0 0x4000>;
+ interrupts = <GIC_SPI 228 IRQ_TYPE_LEVEL_HIGH 0>;
+ #mbox-cells = <2>;
+ clocks = <&infracfg_ao CLK_INFRA_AO_GCE2>;
+ };
+
scp: scp@10500000 {
compatible = "mediatek,mt8188-scp";
reg = <0 0x10500000 0 0x100000>,
@@ -827,6 +1273,23 @@
#clock-cells = <1>;
};
+ gpu: gpu@13000000 {
+ compatible = "mediatek,mt8188-mali", "arm,mali-valhall-jm";
+ reg = <0 0x13000000 0 0x4000>;
+
+ clocks = <&mfgcfg CLK_MFGCFG_BG3D>;
+ interrupts = <GIC_SPI 383 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 382 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 381 IRQ_TYPE_LEVEL_HIGH 0>;
+ interrupt-names = "job", "mmu", "gpu";
+ operating-points-v2 = <&gpu_opp_table>;
+ power-domains = <&spm MT8188_POWER_DOMAIN_MFG2>,
+ <&spm MT8188_POWER_DOMAIN_MFG3>,
+ <&spm MT8188_POWER_DOMAIN_MFG4>;
+ power-domain-names = "core0", "core1", "core2";
+ status = "disabled";
+ };
+
mfgcfg: clock-controller@13fbf000 {
compatible = "mediatek,mt8188-mfgcfg";
reg = <0 0x13fbf000 0 0x1000>;
@@ -952,5 +1415,22 @@
reg = <0 0x1a000000 0 0x1000>;
#clock-cells = <1>;
};
+
+ vdosys0: syscon@1c01d000 {
+ compatible = "mediatek,mt8188-vdosys0", "syscon";
+ reg = <0 0x1c01d000 0 0x1000>;
+ #clock-cells = <1>;
+ mboxes = <&gce0 0 CMDQ_THR_PRIO_4>;
+ mediatek,gce-client-reg = <&gce0 SUBSYS_1c01XXXX 0xd000 0x1000>;
+ };
+
+ vdosys1: syscon@1c100000 {
+ compatible = "mediatek,mt8188-vdosys1", "syscon";
+ reg = <0 0x1c100000 0 0x1000>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ mboxes = <&gce0 1 CMDQ_THR_PRIO_4>;
+ mediatek,gce-client-reg = <&gce0 SUBSYS_1c10XXXX 0 0x1000>;
+ };
};
};
diff --git a/arch/arm64/boot/dts/mediatek/mt8192-asurada-hayato-r1.dts b/arch/arm64/boot/dts/mediatek/mt8192-asurada-hayato-r1.dts
index fd2cb8765a15..ac2673e56fb8 100644
--- a/arch/arm64/boot/dts/mediatek/mt8192-asurada-hayato-r1.dts
+++ b/arch/arm64/boot/dts/mediatek/mt8192-asurada-hayato-r1.dts
@@ -7,6 +7,7 @@
/ {
model = "Google Hayato rev1";
+ chassis-type = "convertible";
compatible = "google,hayato-rev1", "google,hayato", "mediatek,mt8192";
};
diff --git a/arch/arm64/boot/dts/mediatek/mt8192-asurada-hayato-r5-sku2.dts b/arch/arm64/boot/dts/mediatek/mt8192-asurada-hayato-r5-sku2.dts
index 3127ee5f6172..cd86ad9ba28a 100644
--- a/arch/arm64/boot/dts/mediatek/mt8192-asurada-hayato-r5-sku2.dts
+++ b/arch/arm64/boot/dts/mediatek/mt8192-asurada-hayato-r5-sku2.dts
@@ -7,6 +7,7 @@
/ {
model = "Google Hayato rev5";
+ chassis-type = "convertible";
compatible = "google,hayato-rev5-sku2", "google,hayato-sku2",
"google,hayato", "mediatek,mt8192";
};
diff --git a/arch/arm64/boot/dts/mediatek/mt8192-asurada-spherion-r0.dts b/arch/arm64/boot/dts/mediatek/mt8192-asurada-spherion-r0.dts
index bc88866ab2f5..29aa87e93888 100644
--- a/arch/arm64/boot/dts/mediatek/mt8192-asurada-spherion-r0.dts
+++ b/arch/arm64/boot/dts/mediatek/mt8192-asurada-spherion-r0.dts
@@ -8,6 +8,7 @@
/ {
model = "Google Spherion (rev0 - 3)";
+ chassis-type = "laptop";
compatible = "google,spherion-rev3", "google,spherion-rev2",
"google,spherion-rev1", "google,spherion-rev0",
"google,spherion", "mediatek,mt8192";
diff --git a/arch/arm64/boot/dts/mediatek/mt8192-asurada-spherion-r4.dts b/arch/arm64/boot/dts/mediatek/mt8192-asurada-spherion-r4.dts
index 0039158c9e60..5e9e598bab90 100644
--- a/arch/arm64/boot/dts/mediatek/mt8192-asurada-spherion-r4.dts
+++ b/arch/arm64/boot/dts/mediatek/mt8192-asurada-spherion-r4.dts
@@ -8,6 +8,7 @@
/ {
model = "Google Spherion (rev4)";
+ chassis-type = "laptop";
compatible = "google,spherion-rev4", "google,spherion",
"mediatek,mt8192";
diff --git a/arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi b/arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi
index 7a704246678f..08d71ddf3668 100644
--- a/arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi
@@ -147,6 +147,7 @@
regulator-boot-on;
gpio = <&pio 127 GPIO_ACTIVE_HIGH>;
vin-supply = <&pp3300_g>;
+ off-on-delay-us = <500000>;
};
/* separately switched 3.3V power rail */
diff --git a/arch/arm64/boot/dts/mediatek/mt8192.dtsi b/arch/arm64/boot/dts/mediatek/mt8192.dtsi
index 84cbdf6e9eb0..47dea10dd3b8 100644
--- a/arch/arm64/boot/dts/mediatek/mt8192.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8192.dtsi
@@ -2234,7 +2234,7 @@
};
};
- gpu0-thermal {
+ gpu-thermal {
polling-delay = <1000>;
polling-delay-passive = <250>;
thermal-sensors = <&lvts_ap MT8192_AP_GPU0>;
diff --git a/arch/arm64/boot/dts/mediatek/mt8195-cherry-dojo-r1.dts b/arch/arm64/boot/dts/mediatek/mt8195-cherry-dojo-r1.dts
new file mode 100644
index 000000000000..88123842c818
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt8195-cherry-dojo-r1.dts
@@ -0,0 +1,114 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright (C) 2022 MediaTek Inc.
+ */
+/dts-v1/;
+#include "mt8195-cherry.dtsi"
+
+/ {
+ model = "HP Dojo (sku 1, 3, 5, 7) board";
+ chassis-type = "convertible";
+ compatible = "google,dojo-sku7", "google,dojo-sku5",
+ "google,dojo-sku3", "google,dojo-sku1",
+ "google,dojo", "mediatek,mt8195";
+};
+
+&audio_codec {
+ compatible = "realtek,rt5682s";
+ realtek,amic-delay-ms = <250>;
+};
+
+&i2c2 {
+ spk_r_amp: amplifier@38 {
+ compatible = "maxim,max98390";
+ reg = <0x38>;
+ reset-gpios = <&pio 100 GPIO_ACTIVE_LOW>;
+ sound-name-prefix = "Right";
+ #sound-dai-cells = <0>;
+ };
+
+ spk_l_amp: amplifier@39 {
+ compatible = "maxim,max98390";
+ reg = <0x39>;
+ sound-name-prefix = "Left";
+ #sound-dai-cells = <0>;
+ };
+};
+
+&i2c4 {
+ touchscreen@15 {
+ compatible = "hid-over-i2c";
+ reg = <0x15>;
+ hid-descr-addr = <0x0001>;
+ interrupts-extended = <&pio 92 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&touchscreen_pins>;
+ post-power-on-delay-ms = <10>;
+ vdd-supply = <&pp3300_s3>;
+ };
+};
+
+&keyboard_controller {
+ linux,keymap = <
+ CROS_STD_MAIN_KEYMAP
+
+ MATRIX_KEY(0x00, 0x02, KEY_BACK)
+ MATRIX_KEY(0x03, 0x02, KEY_REFRESH)
+ MATRIX_KEY(0x02, 0x02, KEY_ZOOM)
+ MATRIX_KEY(0x01, 0x02, KEY_SCALE)
+ MATRIX_KEY(0x03, 0x04, KEY_SYSRQ)
+ MATRIX_KEY(0x02, 0x04, KEY_BRIGHTNESSDOWN)
+ MATRIX_KEY(0x01, 0x04, KEY_BRIGHTNESSUP)
+ MATRIX_KEY(0x02, 0x09, KEY_KBDILLUMTOGGLE)
+ MATRIX_KEY(0x01, 0x09, KEY_PLAYPAUSE)
+ MATRIX_KEY(0x00, 0x04, KEY_MICMUTE)
+ MATRIX_KEY(0x00, 0x01, KEY_MUTE)
+ MATRIX_KEY(0x01, 0x05, KEY_VOLUMEDOWN)
+ MATRIX_KEY(0x03, 0x05, KEY_VOLUMEUP)
+ >;
+};
+
+&pcie0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie0_pins_default>;
+ status = "okay";
+};
+
+&pciephy {
+ status = "okay";
+};
+
+&pio_default {
+ pins-low-power-hdmi-disable {
+ pinmux = <PINMUX_GPIO31__FUNC_GPIO31>,
+ <PINMUX_GPIO32__FUNC_GPIO32>,
+ <PINMUX_GPIO33__FUNC_GPIO33>,
+ <PINMUX_GPIO34__FUNC_GPIO34>,
+ <PINMUX_GPIO35__FUNC_GPIO35>;
+ input-enable;
+ bias-pull-down;
+ };
+};
+
+&sound {
+ compatible = "mediatek,mt8195_mt6359_max98390_rt5682";
+ model = "m8195_m98390_5682s";
+
+ audio-routing =
+ "Headphone", "HPOL",
+ "Headphone", "HPOR",
+ "IN1P", "Headset Mic",
+ "Right Spk", "Right BE_OUT",
+ "Left Spk", "Left BE_OUT";
+
+ spk-playback-dai-link {
+ codec {
+ sound-dai = <&spk_r_amp>, <&spk_l_amp>;
+ };
+ };
+};
+
+&spk_amplifier {
+ /* Disable RT1019P, not present on Dojo */
+ status = "disabled";
+};
diff --git a/arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi b/arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi
index 4a11918da370..fe5400e17b0f 100644
--- a/arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi
@@ -240,6 +240,7 @@
spk_amplifier: rt1019p {
compatible = "realtek,rt1019p";
label = "rt1019p";
+ #sound-dai-cells = <0>;
pinctrl-names = "default";
pinctrl-0 = <&rt1019p_pins_default>;
sdb-gpios = <&pio 100 GPIO_ACTIVE_HIGH>;
@@ -366,6 +367,7 @@
&dp_tx {
status = "okay";
+ #sound-dai-cells = <0>;
pinctrl-names = "default";
pinctrl-0 = <&dptx_pin>;
@@ -436,6 +438,7 @@
/* Realtek RT5682i or RT5682s, sharing the same configuration */
reg = <0x1a>;
interrupts-extended = <&pio 89 IRQ_TYPE_EDGE_BOTH>;
+ #sound-dai-cells = <0>;
realtek,jd-src = <1>;
AVDD-supply = <&mt6359_vio18_ldo_reg>;
@@ -1162,6 +1165,48 @@
"AFE_SOF_DL2", "AFE_SOF_DL3", "AFE_SOF_UL4", "AFE_SOF_UL5";
pinctrl-names = "default";
pinctrl-0 = <&aud_pins_default>;
+
+ audio-routing =
+ "Headphone", "HPOL",
+ "Headphone", "HPOR",
+ "IN1P", "Headset Mic",
+ "Ext Spk", "Speaker";
+
+ mm-dai-link {
+ link-name = "ETDM1_IN_BE";
+ mediatek,clk-provider = "cpu";
+ };
+
+ hs-playback-dai-link {
+ link-name = "ETDM1_OUT_BE";
+ mediatek,clk-provider = "cpu";
+ codec {
+ sound-dai = <&audio_codec>;
+ };
+ };
+
+ hs-capture-dai-link {
+ link-name = "ETDM2_IN_BE";
+ mediatek,clk-provider = "cpu";
+ codec {
+ sound-dai = <&audio_codec>;
+ };
+ };
+
+ spk-playback-dai-link {
+ link-name = "ETDM2_OUT_BE";
+ mediatek,clk-provider = "cpu";
+ codec {
+ sound-dai = <&spk_amplifier>;
+ };
+ };
+
+ displayport-dai-link {
+ link-name = "DPTX_BE";
+ codec {
+ sound-dai = <&dp_tx>;
+ };
+ };
};
&spi0 {
@@ -1389,6 +1434,11 @@
MATRIX_KEY(0x02, 0x09, 0) /* T8 */
MATRIX_KEY(0x01, 0x09, 0) /* T9 */
MATRIX_KEY(0x00, 0x04, 0) /* T10 */
+
+ /* T11 to T13 are present only on Dojo */
+ MATRIX_KEY(0x00, 0x01, 0) /* T11 */
+ MATRIX_KEY(0x01, 0x05, 0) /* T12 */
+ MATRIX_KEY(0x03, 0x05, 0) /* T13 */
>;
linux,keymap = <
diff --git a/arch/arm64/boot/dts/mediatek/mt8195-demo.dts b/arch/arm64/boot/dts/mediatek/mt8195-demo.dts
index b82f7176b4a1..31d424b8fc7c 100644
--- a/arch/arm64/boot/dts/mediatek/mt8195-demo.dts
+++ b/arch/arm64/boot/dts/mediatek/mt8195-demo.dts
@@ -305,14 +305,14 @@
<PINMUX_GPIO78__FUNC_GBE_TXD2>,
<PINMUX_GPIO79__FUNC_GBE_TXD1>,
<PINMUX_GPIO80__FUNC_GBE_TXD0>;
- drive-strength = <MTK_DRIVE_8mA>;
+ drive-strength = <8>;
};
pins-cc {
pinmux = <PINMUX_GPIO85__FUNC_GBE_TXC>,
<PINMUX_GPIO88__FUNC_GBE_TXEN>,
<PINMUX_GPIO87__FUNC_GBE_RXDV>,
<PINMUX_GPIO86__FUNC_GBE_RXC>;
- drive-strength = <MTK_DRIVE_8mA>;
+ drive-strength = <8>;
};
pins-rxd {
pinmux = <PINMUX_GPIO81__FUNC_GBE_RXD3>,
@@ -377,7 +377,7 @@
mmc0_default_pins: mmc0-default-pins {
pins-clk {
pinmux = <PINMUX_GPIO122__FUNC_MSDC0_CLK>;
- drive-strength = <MTK_DRIVE_6mA>;
+ drive-strength = <6>;
bias-pull-down = <MTK_PUPD_SET_R1R0_10>;
};
@@ -392,13 +392,13 @@
<PINMUX_GPIO116__FUNC_MSDC0_DAT7>,
<PINMUX_GPIO121__FUNC_MSDC0_CMD>;
input-enable;
- drive-strength = <MTK_DRIVE_6mA>;
+ drive-strength = <6>;
bias-pull-up = <MTK_PUPD_SET_R1R0_01>;
};
pins-rst {
pinmux = <PINMUX_GPIO120__FUNC_MSDC0_RSTB>;
- drive-strength = <MTK_DRIVE_6mA>;
+ drive-strength = <6>;
bias-pull-up = <MTK_PUPD_SET_R1R0_01>;
};
};
@@ -406,7 +406,7 @@
mmc0_uhs_pins: mmc0-uhs-pins {
pins-clk {
pinmux = <PINMUX_GPIO122__FUNC_MSDC0_CLK>;
- drive-strength = <MTK_DRIVE_8mA>;
+ drive-strength = <8>;
bias-pull-down = <MTK_PUPD_SET_R1R0_10>;
};
@@ -421,19 +421,19 @@
<PINMUX_GPIO116__FUNC_MSDC0_DAT7>,
<PINMUX_GPIO121__FUNC_MSDC0_CMD>;
input-enable;
- drive-strength = <MTK_DRIVE_8mA>;
+ drive-strength = <8>;
bias-pull-up = <MTK_PUPD_SET_R1R0_01>;
};
pins-ds {
pinmux = <PINMUX_GPIO127__FUNC_MSDC0_DSL>;
- drive-strength = <MTK_DRIVE_8mA>;
+ drive-strength = <8>;
bias-pull-down = <MTK_PUPD_SET_R1R0_10>;
};
pins-rst {
pinmux = <PINMUX_GPIO120__FUNC_MSDC0_RSTB>;
- drive-strength = <MTK_DRIVE_8mA>;
+ drive-strength = <8>;
bias-pull-up = <MTK_PUPD_SET_R1R0_01>;
};
};
@@ -441,7 +441,7 @@
mmc1_default_pins: mmc1-default-pins {
pins-clk {
pinmux = <PINMUX_GPIO111__FUNC_MSDC1_CLK>;
- drive-strength = <MTK_DRIVE_8mA>;
+ drive-strength = <8>;
bias-pull-down = <MTK_PUPD_SET_R1R0_10>;
};
@@ -452,7 +452,7 @@
<PINMUX_GPIO114__FUNC_MSDC1_DAT2>,
<PINMUX_GPIO115__FUNC_MSDC1_DAT3>;
input-enable;
- drive-strength = <MTK_DRIVE_8mA>;
+ drive-strength = <8>;
bias-pull-up = <MTK_PUPD_SET_R1R0_01>;
};
@@ -465,7 +465,7 @@
mmc1_uhs_pins: mmc1-uhs-pins {
pins-clk {
pinmux = <PINMUX_GPIO111__FUNC_MSDC1_CLK>;
- drive-strength = <MTK_DRIVE_8mA>;
+ drive-strength = <8>;
bias-pull-down = <MTK_PUPD_SET_R1R0_10>;
};
@@ -476,7 +476,7 @@
<PINMUX_GPIO114__FUNC_MSDC1_DAT2>,
<PINMUX_GPIO115__FUNC_MSDC1_DAT3>;
input-enable;
- drive-strength = <MTK_DRIVE_8mA>;
+ drive-strength = <8>;
bias-pull-up = <MTK_PUPD_SET_R1R0_01>;
};
};
diff --git a/arch/arm64/boot/dts/mediatek/mt8195-evb.dts b/arch/arm64/boot/dts/mediatek/mt8195-evb.dts
index 341b6e074139..83456d649ff7 100644
--- a/arch/arm64/boot/dts/mediatek/mt8195-evb.dts
+++ b/arch/arm64/boot/dts/mediatek/mt8195-evb.dts
@@ -74,7 +74,6 @@
pinmux = <PINMUX_GPIO8__FUNC_SDA0>,
<PINMUX_GPIO9__FUNC_SCL0>;
bias-pull-up = <MTK_PUPD_SET_R1R0_01>;
- mediatek,drive-strength-adv = <0>;
drive-strength = <6>;
};
};
@@ -84,7 +83,6 @@
pinmux = <PINMUX_GPIO10__FUNC_SDA1>,
<PINMUX_GPIO11__FUNC_SCL1>;
bias-pull-up = <MTK_PUPD_SET_R1R0_01>;
- mediatek,drive-strength-adv = <0>;
drive-strength = <6>;
};
};
@@ -94,7 +92,7 @@
pinmux = <PINMUX_GPIO16__FUNC_SDA4>,
<PINMUX_GPIO17__FUNC_SCL4>;
bias-pull-up = <MTK_PUPD_SET_R1R0_01>;
- mediatek,drive-strength-adv = <7>;
+ drive-strength-microamp = <1000>;
};
};
diff --git a/arch/arm64/boot/dts/mediatek/mt8195.dtsi b/arch/arm64/boot/dts/mediatek/mt8195.dtsi
index 5d8b68f86ce4..2ee45752583c 100644
--- a/arch/arm64/boot/dts/mediatek/mt8195.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8195.dtsi
@@ -3880,7 +3880,7 @@
};
};
- gpu0-thermal {
+ gpu-thermal {
polling-delay = <1000>;
polling-delay-passive = <250>;
thermal-sensors = <&lvts_ap MT8195_AP_GPU0>;
diff --git a/arch/arm64/boot/dts/mediatek/mt8365-evk.dts b/arch/arm64/boot/dts/mediatek/mt8365-evk.dts
index 50cbaefa1a99..4211a992dd9d 100644
--- a/arch/arm64/boot/dts/mediatek/mt8365-evk.dts
+++ b/arch/arm64/boot/dts/mediatek/mt8365-evk.dts
@@ -308,7 +308,7 @@
mmc1_uhs_pins: mmc1-uhs-pins {
clk-pins {
pinmux = <MT8365_PIN_88_MSDC1_CLK__FUNC_MSDC1_CLK>;
- drive-strength = <MTK_DRIVE_8mA>;
+ drive-strength = <8>;
bias-pull-down = <MTK_PUPD_SET_R1R0_10>;
};
@@ -319,7 +319,7 @@
<MT8365_PIN_92_MSDC1_DAT3__FUNC_MSDC1_DAT3>,
<MT8365_PIN_87_MSDC1_CMD__FUNC_MSDC1_CMD>;
input-enable;
- drive-strength = <MTK_DRIVE_6mA>;
+ drive-strength = <6>;
bias-pull-up = <MTK_PUPD_SET_R1R0_01>;
};
};
diff --git a/arch/arm64/boot/dts/mediatek/mt8365.dtsi b/arch/arm64/boot/dts/mediatek/mt8365.dtsi
index 24581f7410aa..eb449bfa8803 100644
--- a/arch/arm64/boot/dts/mediatek/mt8365.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8365.dtsi
@@ -300,9 +300,8 @@
};
scpsys: syscon@10006000 {
- compatible = "mediatek,mt8365-syscfg", "syscon", "simple-mfd";
+ compatible = "mediatek,mt8365-scpsys", "syscon", "simple-mfd";
reg = <0 0x10006000 0 0x1000>;
- #power-domain-cells = <1>;
/* System Power Manager */
spm: power-controller {
diff --git a/arch/arm64/boot/dts/mediatek/mt8390-genio-700-evk.dts b/arch/arm64/boot/dts/mediatek/mt8390-genio-700-evk.dts
new file mode 100644
index 000000000000..1474bef7e754
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt8390-genio-700-evk.dts
@@ -0,0 +1,880 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright (C) 2023 MediaTek Inc.
+ * Author: Chris Chen <chris-qj.chen@mediatek.com>
+ * Pablo Sun <pablo.sun@mediatek.com>
+ * Macpaul Lin <macpaul.lin@mediatek.com>
+ */
+/dts-v1/;
+
+#include "mt8188.dtsi"
+#include "mt6359.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/pinctrl/mediatek,mt8188-pinfunc.h>
+#include <dt-bindings/regulator/mediatek,mt6360-regulator.h>
+#include <dt-bindings/spmi/spmi.h>
+#include <dt-bindings/usb/pd.h>
+
+/ {
+ model = "MediaTek Genio-700 EVK";
+ compatible = "mediatek,mt8390-evk", "mediatek,mt8390",
+ "mediatek,mt8188";
+
+ aliases {
+ serial0 = &uart0;
+ };
+
+ chosen {
+ stdout-path = "serial0:921600n8";
+ };
+
+ firmware {
+ optee {
+ compatible = "linaro,optee-tz";
+ method = "smc";
+ };
+ };
+
+ memory@40000000 {
+ device_type = "memory";
+ reg = <0 0x40000000 0x2 0x00000000>;
+ };
+
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ /*
+ * 12 MiB reserved for OP-TEE (BL32)
+ * +-----------------------+ 0x43e0_0000
+ * | SHMEM 2MiB |
+ * +-----------------------+ 0x43c0_0000
+ * | | TA_RAM 8MiB |
+ * + TZDRAM +--------------+ 0x4340_0000
+ * | | TEE_RAM 2MiB |
+ * +-----------------------+ 0x4320_0000
+ */
+ optee_reserved: optee@43200000 {
+ no-map;
+ reg = <0 0x43200000 0 0x00c00000>;
+ };
+
+ scp_mem: memory@50000000 {
+ compatible = "shared-dma-pool";
+ reg = <0 0x50000000 0 0x2900000>;
+ no-map;
+ };
+
+ /* 2 MiB reserved for ARM Trusted Firmware (BL31) */
+ bl31_secmon_reserved: memory@54600000 {
+ no-map;
+ reg = <0 0x54600000 0x0 0x200000>;
+ };
+
+ apu_mem: memory@55000000 {
+ compatible = "shared-dma-pool";
+ reg = <0 0x55000000 0 0x1400000>; /* 20 MB */
+ };
+
+ vpu_mem: memory@57000000 {
+ compatible = "shared-dma-pool";
+ reg = <0 0x57000000 0 0x1400000>; /* 20 MB */
+ };
+ };
+
+ common_fixed_5v: regulator-0 {
+ compatible = "regulator-fixed";
+ regulator-name = "5v_en";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&pio 10 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ regulator-always-on;
+ };
+
+ edp_panel_fixed_3v3: regulator-1 {
+ compatible = "regulator-fixed";
+ regulator-name = "edp_panel_3v3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ enable-active-high;
+ gpio = <&pio 15 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&edp_panel_3v3_en_pins>;
+ };
+
+ gpio_fixed_3v3: regulator-2 {
+ compatible = "regulator-fixed";
+ regulator-name = "gpio_3v3_en";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&pio 9 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ regulator-always-on;
+ };
+
+ sdio_fixed_1v8: regulator-3 {
+ compatible = "regulator-fixed";
+ regulator-name = "sdio_io";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ enable-active-high;
+ regulator-always-on;
+ };
+
+ sdio_fixed_3v3: regulator-4 {
+ compatible = "regulator-fixed";
+ regulator-name = "sdio_card";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&pio 74 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ regulator-always-on;
+ };
+
+ touch0_fixed_3v3: regulator-5 {
+ compatible = "regulator-fixed";
+ regulator-name = "touch_3v3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&pio 119 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ usb_hub_fixed_3v3: regulator-6 {
+ compatible = "regulator-fixed";
+ regulator-name = "usb_hub_3v3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&pio 112 GPIO_ACTIVE_HIGH>; /* HUB_3V3_EN */
+ startup-delay-us = <10000>;
+ enable-active-high;
+ };
+
+ usb_hub_reset_1v8: regulator-7 {
+ compatible = "regulator-fixed";
+ regulator-name = "usb_hub_reset";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ gpio = <&pio 7 GPIO_ACTIVE_HIGH>; /* HUB_RESET */
+ vin-supply = <&usb_hub_fixed_3v3>;
+ };
+
+ usb_p0_vbus: regulator-8 {
+ compatible = "regulator-fixed";
+ regulator-name = "usb_p0_vbus";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&pio 84 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ usb_p1_vbus: regulator-9 {
+ compatible = "regulator-fixed";
+ regulator-name = "usb_p1_vbus";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&pio 87 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ usb_p2_vbus: regulator-10 {
+ compatible = "regulator-fixed";
+ regulator-name = "usb_p2_vbus";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ enable-active-high;
+ };
+};
+
+&i2c0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c0_pins>;
+ clock-frequency = <400000>;
+ status = "okay";
+
+ touchscreen@5d {
+ compatible = "goodix,gt9271";
+ reg = <0x5d>;
+ interrupt-parent = <&pio>;
+ interrupts-extended = <&pio 6 IRQ_TYPE_EDGE_RISING>;
+ irq-gpios = <&pio 6 GPIO_ACTIVE_HIGH>;
+ reset-gpios = <&pio 5 GPIO_ACTIVE_HIGH>;
+ AVDD28-supply = <&touch0_fixed_3v3>;
+ VDDIO-supply = <&mt6359_vio18_ldo_reg>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&touch_pins>;
+ };
+};
+
+&i2c1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c1_pins>;
+ clock-frequency = <400000>;
+ status = "okay";
+};
+
+&i2c2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c2_pins>;
+ clock-frequency = <400000>;
+ status = "okay";
+};
+
+&i2c3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c3_pins>;
+ clock-frequency = <400000>;
+ status = "okay";
+};
+
+&i2c4 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c4_pins>;
+ pinctrl-1 = <&rt1715_int_pins>;
+ clock-frequency = <1000000>;
+ status = "okay";
+};
+
+&i2c5 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c5_pins>;
+ clock-frequency = <400000>;
+ status = "okay";
+};
+
+&i2c6 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c6_pins>;
+ clock-frequency = <400000>;
+ status = "okay";
+};
+
+&mmc0 {
+ status = "okay";
+ pinctrl-names = "default", "state_uhs";
+ pinctrl-0 = <&mmc0_default_pins>;
+ pinctrl-1 = <&mmc0_uhs_pins>;
+ bus-width = <8>;
+ max-frequency = <200000000>;
+ cap-mmc-highspeed;
+ mmc-hs200-1_8v;
+ mmc-hs400-1_8v;
+ supports-cqe;
+ cap-mmc-hw-reset;
+ no-sdio;
+ no-sd;
+ hs400-ds-delay = <0x1481b>;
+ vmmc-supply = <&mt6359_vemc_1_ldo_reg>;
+ vqmmc-supply = <&mt6359_vufs_ldo_reg>;
+ non-removable;
+};
+
+&mmc1 {
+ status = "okay";
+ pinctrl-names = "default", "state_uhs";
+ pinctrl-0 = <&mmc1_default_pins>;
+ pinctrl-1 = <&mmc1_uhs_pins>;
+ bus-width = <4>;
+ max-frequency = <200000000>;
+ cap-sd-highspeed;
+ sd-uhs-sdr50;
+ sd-uhs-sdr104;
+ no-mmc;
+ no-sdio;
+ cd-gpios = <&pio 2 GPIO_ACTIVE_LOW>;
+ vmmc-supply = <&mt6359_vpa_buck_reg>;
+ vqmmc-supply = <&mt6359_vsim1_ldo_reg>;
+};
+
+&mt6359_vbbck_ldo_reg {
+ regulator-always-on;
+};
+
+&mt6359_vcn18_ldo_reg {
+ regulator-always-on;
+};
+
+&mt6359_vcn33_2_bt_ldo_reg {
+ regulator-always-on;
+};
+
+&mt6359_vcore_buck_reg {
+ regulator-always-on;
+};
+
+&mt6359_vgpu11_buck_reg {
+ regulator-always-on;
+};
+
+&mt6359_vpa_buck_reg {
+ regulator-max-microvolt = <3100000>;
+};
+
+&mt6359_vpu_buck_reg {
+ regulator-always-on;
+};
+
+&mt6359_vrf12_ldo_reg {
+ regulator-always-on;
+};
+
+&mt6359_vsim1_ldo_reg {
+ regulator-enable-ramp-delay = <480>;
+};
+
+&mt6359_vufs_ldo_reg {
+ regulator-always-on;
+};
+
+&mt6359codec {
+ mediatek,mic-type-0 = <1>; /* ACC */
+ mediatek,mic-type-1 = <3>; /* DCC */
+};
+
+&pio {
+ audio_default_pins: audio-default-pins {
+ pins-cmd-dat {
+ pinmux = <PINMUX_GPIO101__FUNC_O_AUD_CLK_MOSI>,
+ <PINMUX_GPIO102__FUNC_O_AUD_SYNC_MOSI>,
+ <PINMUX_GPIO103__FUNC_O_AUD_DAT_MOSI0>,
+ <PINMUX_GPIO104__FUNC_O_AUD_DAT_MOSI1>,
+ <PINMUX_GPIO105__FUNC_I0_AUD_DAT_MISO0>,
+ <PINMUX_GPIO106__FUNC_I0_AUD_DAT_MISO1>,
+ <PINMUX_GPIO107__FUNC_B0_I2SIN_MCK>,
+ <PINMUX_GPIO108__FUNC_B0_I2SIN_BCK>,
+ <PINMUX_GPIO109__FUNC_B0_I2SIN_WS>,
+ <PINMUX_GPIO110__FUNC_I0_I2SIN_D0>,
+ <PINMUX_GPIO114__FUNC_O_I2SO2_MCK>,
+ <PINMUX_GPIO115__FUNC_B0_I2SO2_BCK>,
+ <PINMUX_GPIO116__FUNC_B0_I2SO2_WS>,
+ <PINMUX_GPIO117__FUNC_O_I2SO2_D0>,
+ <PINMUX_GPIO118__FUNC_O_I2SO2_D1>,
+ <PINMUX_GPIO121__FUNC_B0_PCM_CLK>,
+ <PINMUX_GPIO122__FUNC_B0_PCM_SYNC>,
+ <PINMUX_GPIO124__FUNC_I0_PCM_DI>,
+ <PINMUX_GPIO125__FUNC_O_DMIC1_CLK>,
+ <PINMUX_GPIO126__FUNC_I0_DMIC1_DAT>,
+ <PINMUX_GPIO128__FUNC_O_DMIC2_CLK>,
+ <PINMUX_GPIO129__FUNC_I0_DMIC2_DAT>;
+ };
+ };
+
+ dptx_pins: dptx-pins {
+ pins-cmd-dat {
+ pinmux = <PINMUX_GPIO46__FUNC_I0_DP_TX_HPD>;
+ bias-pull-up;
+ };
+ };
+
+ edp_panel_3v3_en_pins: edp-panel-3v3-en-pins {
+ pins1 {
+ pinmux = <PINMUX_GPIO15__FUNC_B_GPIO15>;
+ output-high;
+ };
+ };
+
+ eth_default_pins: eth-default-pins {
+ pins-cc {
+ pinmux = <PINMUX_GPIO139__FUNC_B0_GBE_TXC>,
+ <PINMUX_GPIO140__FUNC_I0_GBE_RXC>,
+ <PINMUX_GPIO141__FUNC_I0_GBE_RXDV>,
+ <PINMUX_GPIO142__FUNC_O_GBE_TXEN>;
+ drive-strength = <8>;
+ };
+
+ pins-mdio {
+ pinmux = <PINMUX_GPIO143__FUNC_O_GBE_MDC>,
+ <PINMUX_GPIO144__FUNC_B1_GBE_MDIO>;
+ drive-strength = <8>;
+ input-enable;
+ };
+
+ pins-power {
+ pinmux = <PINMUX_GPIO145__FUNC_B_GPIO145>,
+ <PINMUX_GPIO146__FUNC_B_GPIO146>;
+ output-high;
+ };
+
+ pins-rxd {
+ pinmux = <PINMUX_GPIO135__FUNC_I0_GBE_RXD3>,
+ <PINMUX_GPIO136__FUNC_I0_GBE_RXD2>,
+ <PINMUX_GPIO137__FUNC_I0_GBE_RXD1>,
+ <PINMUX_GPIO138__FUNC_I0_GBE_RXD0>;
+ drive-strength = <8>;
+ };
+
+ pins-txd {
+ pinmux = <PINMUX_GPIO131__FUNC_O_GBE_TXD3>,
+ <PINMUX_GPIO132__FUNC_O_GBE_TXD2>,
+ <PINMUX_GPIO133__FUNC_O_GBE_TXD1>,
+ <PINMUX_GPIO134__FUNC_O_GBE_TXD0>;
+ drive-strength = <8>;
+ };
+ };
+
+ eth_sleep_pins: eth-sleep-pins {
+ pins-cc {
+ pinmux = <PINMUX_GPIO139__FUNC_B_GPIO139>,
+ <PINMUX_GPIO140__FUNC_B_GPIO140>,
+ <PINMUX_GPIO141__FUNC_B_GPIO141>,
+ <PINMUX_GPIO142__FUNC_B_GPIO142>;
+ };
+
+ pins-mdio {
+ pinmux = <PINMUX_GPIO143__FUNC_B_GPIO143>,
+ <PINMUX_GPIO144__FUNC_B_GPIO144>;
+ input-disable;
+ bias-disable;
+ };
+
+ pins-rxd {
+ pinmux = <PINMUX_GPIO135__FUNC_B_GPIO135>,
+ <PINMUX_GPIO136__FUNC_B_GPIO136>,
+ <PINMUX_GPIO137__FUNC_B_GPIO137>,
+ <PINMUX_GPIO138__FUNC_B_GPIO138>;
+ };
+
+ pins-txd {
+ pinmux = <PINMUX_GPIO131__FUNC_B_GPIO131>,
+ <PINMUX_GPIO132__FUNC_B_GPIO132>,
+ <PINMUX_GPIO133__FUNC_B_GPIO133>,
+ <PINMUX_GPIO134__FUNC_B_GPIO134>;
+ };
+ };
+
+ i2c0_pins: i2c0-pins {
+ pins {
+ pinmux = <PINMUX_GPIO56__FUNC_B1_SDA0>,
+ <PINMUX_GPIO55__FUNC_B1_SCL0>;
+ bias-pull-up = <MTK_PULL_SET_RSEL_011>;
+ drive-strength-microamp = <1000>;
+ };
+ };
+
+ i2c1_pins: i2c1-pins {
+ pins {
+ pinmux = <PINMUX_GPIO58__FUNC_B1_SDA1>,
+ <PINMUX_GPIO57__FUNC_B1_SCL1>;
+ bias-pull-up = <MTK_PULL_SET_RSEL_011>;
+ drive-strength-microamp = <1000>;
+ };
+ };
+
+ i2c2_pins: i2c2-pins {
+ pins {
+ pinmux = <PINMUX_GPIO60__FUNC_B1_SDA2>,
+ <PINMUX_GPIO59__FUNC_B1_SCL2>;
+ bias-pull-up = <MTK_PULL_SET_RSEL_011>;
+ drive-strength-microamp = <1000>;
+ };
+ };
+
+ i2c3_pins: i2c3-pins {
+ pins {
+ pinmux = <PINMUX_GPIO62__FUNC_B1_SDA3>,
+ <PINMUX_GPIO61__FUNC_B1_SCL3>;
+ bias-pull-up = <MTK_PULL_SET_RSEL_011>;
+ drive-strength-microamp = <1000>;
+ };
+ };
+
+ i2c4_pins: i2c4-pins {
+ pins {
+ pinmux = <PINMUX_GPIO64__FUNC_B1_SDA4>,
+ <PINMUX_GPIO63__FUNC_B1_SCL4>;
+ bias-pull-up = <MTK_PULL_SET_RSEL_011>;
+ drive-strength-microamp = <1000>;
+ };
+ };
+
+ i2c5_pins: i2c5-pins {
+ pins {
+ pinmux = <PINMUX_GPIO66__FUNC_B1_SDA5>,
+ <PINMUX_GPIO65__FUNC_B1_SCL5>;
+ bias-pull-up = <MTK_PULL_SET_RSEL_011>;
+ drive-strength-microamp = <1000>;
+ };
+ };
+
+ i2c6_pins: i2c6-pins {
+ pins {
+ pinmux = <PINMUX_GPIO68__FUNC_B1_SDA6>,
+ <PINMUX_GPIO67__FUNC_B1_SCL6>;
+ bias-pull-up = <MTK_PULL_SET_RSEL_011>;
+ drive-strength-microamp = <1000>;
+ };
+ };
+
+ gpio_key_pins: gpio-key-pins {
+ pins {
+ pinmux = <PINMUX_GPIO42__FUNC_B1_KPCOL0>,
+ <PINMUX_GPIO43__FUNC_B1_KPCOL1>,
+ <PINMUX_GPIO44__FUNC_B1_KPROW0>;
+ };
+ };
+
+ mmc0_default_pins: mmc0-default-pins {
+ pins-clk {
+ pinmux = <PINMUX_GPIO157__FUNC_B1_MSDC0_CLK>;
+ drive-strength = <6>;
+ bias-pull-down = <MTK_PUPD_SET_R1R0_10>;
+ };
+
+ pins-cmd-dat {
+ pinmux = <PINMUX_GPIO161__FUNC_B1_MSDC0_DAT0>,
+ <PINMUX_GPIO160__FUNC_B1_MSDC0_DAT1>,
+ <PINMUX_GPIO159__FUNC_B1_MSDC0_DAT2>,
+ <PINMUX_GPIO158__FUNC_B1_MSDC0_DAT3>,
+ <PINMUX_GPIO154__FUNC_B1_MSDC0_DAT4>,
+ <PINMUX_GPIO153__FUNC_B1_MSDC0_DAT5>,
+ <PINMUX_GPIO152__FUNC_B1_MSDC0_DAT6>,
+ <PINMUX_GPIO151__FUNC_B1_MSDC0_DAT7>,
+ <PINMUX_GPIO156__FUNC_B1_MSDC0_CMD>;
+ input-enable;
+ drive-strength = <6>;
+ bias-pull-up = <MTK_PUPD_SET_R1R0_01>;
+ };
+
+ pins-rst {
+ pinmux = <PINMUX_GPIO155__FUNC_O_MSDC0_RSTB>;
+ drive-strength = <6>;
+ bias-pull-up = <MTK_PUPD_SET_R1R0_01>;
+ };
+ };
+
+ mmc0_uhs_pins: mmc0-uhs-pins {
+ pins-clk {
+ pinmux = <PINMUX_GPIO157__FUNC_B1_MSDC0_CLK>;
+ drive-strength = <8>;
+ bias-pull-down = <MTK_PUPD_SET_R1R0_10>;
+ };
+
+ pins-cmd-dat {
+ pinmux = <PINMUX_GPIO161__FUNC_B1_MSDC0_DAT0>,
+ <PINMUX_GPIO160__FUNC_B1_MSDC0_DAT1>,
+ <PINMUX_GPIO159__FUNC_B1_MSDC0_DAT2>,
+ <PINMUX_GPIO158__FUNC_B1_MSDC0_DAT3>,
+ <PINMUX_GPIO154__FUNC_B1_MSDC0_DAT4>,
+ <PINMUX_GPIO153__FUNC_B1_MSDC0_DAT5>,
+ <PINMUX_GPIO152__FUNC_B1_MSDC0_DAT6>,
+ <PINMUX_GPIO151__FUNC_B1_MSDC0_DAT7>,
+ <PINMUX_GPIO156__FUNC_B1_MSDC0_CMD>;
+ input-enable;
+ drive-strength = <8>;
+ bias-pull-up = <MTK_PUPD_SET_R1R0_01>;
+ };
+
+ pins-ds {
+ pinmux = <PINMUX_GPIO162__FUNC_B0_MSDC0_DSL>;
+ drive-strength = <8>;
+ bias-pull-down = <MTK_PUPD_SET_R1R0_10>;
+ };
+
+ pins-rst {
+ pinmux = <PINMUX_GPIO155__FUNC_O_MSDC0_RSTB>;
+ drive-strength = <8>;
+ bias-pull-up = <MTK_PUPD_SET_R1R0_01>;
+ };
+ };
+
+ mmc1_default_pins: mmc1-default-pins {
+ pins-clk {
+ pinmux = <PINMUX_GPIO164__FUNC_B1_MSDC1_CLK>;
+ drive-strength = <6>;
+ bias-pull-down = <MTK_PUPD_SET_R1R0_10>;
+ };
+
+ pins-cmd-dat {
+ pinmux = <PINMUX_GPIO163__FUNC_B1_MSDC1_CMD>,
+ <PINMUX_GPIO165__FUNC_B1_MSDC1_DAT0>,
+ <PINMUX_GPIO166__FUNC_B1_MSDC1_DAT1>,
+ <PINMUX_GPIO167__FUNC_B1_MSDC1_DAT2>,
+ <PINMUX_GPIO168__FUNC_B1_MSDC1_DAT3>;
+ input-enable;
+ drive-strength = <6>;
+ bias-pull-up = <MTK_PUPD_SET_R1R0_01>;
+ };
+
+ pins-insert {
+ pinmux = <PINMUX_GPIO2__FUNC_B_GPIO2>;
+ bias-pull-up;
+ };
+ };
+
+ mmc1_uhs_pins: mmc1-uhs-pins {
+ pins-clk {
+ pinmux = <PINMUX_GPIO164__FUNC_B1_MSDC1_CLK>;
+ drive-strength = <6>;
+ bias-pull-down = <MTK_PUPD_SET_R1R0_10>;
+ };
+
+ pins-cmd-dat {
+ pinmux = <PINMUX_GPIO163__FUNC_B1_MSDC1_CMD>,
+ <PINMUX_GPIO165__FUNC_B1_MSDC1_DAT0>,
+ <PINMUX_GPIO166__FUNC_B1_MSDC1_DAT1>,
+ <PINMUX_GPIO167__FUNC_B1_MSDC1_DAT2>,
+ <PINMUX_GPIO168__FUNC_B1_MSDC1_DAT3>;
+ input-enable;
+ drive-strength = <6>;
+ bias-pull-up = <MTK_PUPD_SET_R1R0_01>;
+ };
+ };
+
+ mmc2_default_pins: mmc2-default-pins {
+ pins-clk {
+ pinmux = <PINMUX_GPIO170__FUNC_B1_MSDC2_CLK>;
+ drive-strength = <4>;
+ bias-pull-down = <MTK_PUPD_SET_R1R0_10>;
+ };
+
+ pins-cmd-dat {
+ pinmux = <PINMUX_GPIO169__FUNC_B1_MSDC2_CMD>,
+ <PINMUX_GPIO171__FUNC_B1_MSDC2_DAT0>,
+ <PINMUX_GPIO172__FUNC_B1_MSDC2_DAT1>,
+ <PINMUX_GPIO173__FUNC_B1_MSDC2_DAT2>,
+ <PINMUX_GPIO174__FUNC_B1_MSDC2_DAT3>;
+ input-enable;
+ drive-strength = <6>;
+ bias-pull-up = <MTK_PUPD_SET_R1R0_01>;
+ };
+
+ pins-pcm {
+ pinmux = <PINMUX_GPIO123__FUNC_O_PCM_DO>;
+ };
+ };
+
+ mmc2_uhs_pins: mmc2-uhs-pins {
+ pins-clk {
+ pinmux = <PINMUX_GPIO170__FUNC_B1_MSDC2_CLK>;
+ drive-strength = <4>;
+ bias-pull-down = <MTK_PUPD_SET_R1R0_10>;
+ };
+
+ pins-cmd-dat {
+ pinmux = <PINMUX_GPIO169__FUNC_B1_MSDC2_CMD>,
+ <PINMUX_GPIO171__FUNC_B1_MSDC2_DAT0>,
+ <PINMUX_GPIO172__FUNC_B1_MSDC2_DAT1>,
+ <PINMUX_GPIO173__FUNC_B1_MSDC2_DAT2>,
+ <PINMUX_GPIO174__FUNC_B1_MSDC2_DAT3>;
+ input-enable;
+ drive-strength = <6>;
+ bias-pull-up = <MTK_PUPD_SET_R1R0_01>;
+ };
+ };
+
+ mmc2_eint_pins: mmc2-eint-pins {
+ pins-dat1 {
+ pinmux = <PINMUX_GPIO172__FUNC_B_GPIO172>;
+ input-enable;
+ bias-pull-up = <MTK_PUPD_SET_R1R0_01>;
+ };
+ };
+
+ mmc2_dat1_pins: mmc2-dat1-pins {
+ pins-dat1 {
+ pinmux = <PINMUX_GPIO172__FUNC_B1_MSDC2_DAT1>;
+ input-enable;
+ drive-strength = <6>;
+ bias-pull-up = <MTK_PUPD_SET_R1R0_01>;
+ };
+ };
+
+ panel_default_pins: panel-default-pins {
+ pins-dcdc {
+ pinmux = <PINMUX_GPIO45__FUNC_B_GPIO45>;
+ output-low;
+ };
+
+ pins-en {
+ pinmux = <PINMUX_GPIO111__FUNC_B_GPIO111>;
+ output-low;
+ };
+
+ pins-rst {
+ pinmux = <PINMUX_GPIO25__FUNC_B_GPIO25>;
+ output-high;
+ };
+ };
+
+ rt1715_int_pins: rt1715-int-pins {
+ pins_cmd0_dat {
+ pinmux = <PINMUX_GPIO12__FUNC_B_GPIO12>;
+ bias-pull-up;
+ input-enable;
+ };
+ };
+
+ spi0_pins: spi0-pins {
+ pins-spi {
+ pinmux = <PINMUX_GPIO69__FUNC_O_SPIM0_CSB>,
+ <PINMUX_GPIO70__FUNC_O_SPIM0_CLK>,
+ <PINMUX_GPIO71__FUNC_B0_SPIM0_MOSI>,
+ <PINMUX_GPIO72__FUNC_B0_SPIM0_MISO>;
+ bias-disable;
+ };
+ };
+
+ spi1_pins: spi1-pins {
+ pins-spi {
+ pinmux = <PINMUX_GPIO75__FUNC_O_SPIM1_CSB>,
+ <PINMUX_GPIO76__FUNC_O_SPIM1_CLK>,
+ <PINMUX_GPIO77__FUNC_B0_SPIM1_MOSI>,
+ <PINMUX_GPIO78__FUNC_B0_SPIM1_MISO>;
+ bias-disable;
+ };
+ };
+
+ spi2_pins: spi2-pins {
+ pins-spi {
+ pinmux = <PINMUX_GPIO79__FUNC_O_SPIM2_CSB>,
+ <PINMUX_GPIO80__FUNC_O_SPIM2_CLK>,
+ <PINMUX_GPIO81__FUNC_B0_SPIM2_MOSI>,
+ <PINMUX_GPIO82__FUNC_B0_SPIM2_MISO>;
+ bias-disable;
+ };
+ };
+
+ touch_pins: touch-pins {
+ pins-irq {
+ pinmux = <PINMUX_GPIO6__FUNC_B_GPIO6>;
+ input-enable;
+ bias-disable;
+ };
+
+ pins-reset {
+ pinmux = <PINMUX_GPIO5__FUNC_B_GPIO5>;
+ output-high;
+ };
+ };
+
+ uart0_pins: uart0-pins {
+ pins {
+ pinmux = <PINMUX_GPIO31__FUNC_O_UTXD0>,
+ <PINMUX_GPIO32__FUNC_I1_URXD0>;
+ bias-pull-up;
+ };
+ };
+
+ uart1_pins: uart1-pins {
+ pins {
+ pinmux = <PINMUX_GPIO33__FUNC_O_UTXD1>,
+ <PINMUX_GPIO34__FUNC_I1_URXD1>;
+ bias-pull-up;
+ };
+ };
+
+ uart2_pins: uart2-pins {
+ pins {
+ pinmux = <PINMUX_GPIO35__FUNC_O_UTXD2>,
+ <PINMUX_GPIO36__FUNC_I1_URXD2>;
+ bias-pull-up;
+ };
+ };
+
+ usb_default_pins: usb-default-pins {
+ pins-iddig {
+ pinmux = <PINMUX_GPIO83__FUNC_B_GPIO83>;
+ input-enable;
+ bias-pull-up;
+ };
+
+ pins-valid {
+ pinmux = <PINMUX_GPIO85__FUNC_I0_VBUSVALID>;
+ input-enable;
+ };
+
+ pins-vbus {
+ pinmux = <PINMUX_GPIO84__FUNC_O_USB_DRVVBUS>;
+ output-high;
+ };
+
+ };
+
+ usb1_default_pins: usb1-default-pins {
+ pins-valid {
+ pinmux = <PINMUX_GPIO88__FUNC_I0_VBUSVALID_1P>;
+ input-enable;
+ };
+
+ pins-usb-hub-3v3-en {
+ pinmux = <PINMUX_GPIO112__FUNC_B_GPIO112>;
+ output-high;
+ };
+ };
+
+ wifi_pwrseq_pins: wifi-pwrseq-pins {
+ pins-wifi-enable {
+ pinmux = <PINMUX_GPIO127__FUNC_B_GPIO127>;
+ output-low;
+ };
+ };
+};
+
+&pmic {
+ interrupt-parent = <&pio>;
+ interrupts = <222 IRQ_TYPE_LEVEL_HIGH>;
+};
+
+&scp {
+ memory-region = <&scp_mem>;
+ status = "okay";
+};
+
+&uart0 {
+ pinctrl-0 = <&uart0_pins>;
+ pinctrl-names = "default";
+ status = "okay";
+};
+
+&uart1 {
+ pinctrl-0 = <&uart1_pins>;
+ pinctrl-names = "default";
+ status = "okay";
+};
+
+&uart2 {
+ pinctrl-0 = <&uart2_pins>;
+ pinctrl-names = "default";
+ status = "okay";
+};
+
+&spi2 {
+ pinctrl-0 = <&spi2_pins>;
+ pinctrl-names = "default";
+ mediatek,pad-select = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "okay";
+};
+
+&u3phy0 {
+ status = "okay";
+};
+
+&u3phy1 {
+ status = "okay";
+};
+
+&u3phy2 {
+ status = "okay";
+};
+
+&xhci0 {
+ status = "okay";
+ vusb33-supply = <&mt6359_vusb_ldo_reg>;
+};
+
+&xhci1 {
+ status = "okay";
+ vusb33-supply = <&mt6359_vusb_ldo_reg>;
+ vbus-supply = <&usb_hub_reset_1v8>;
+};
+
+&xhci2 {
+ status = "okay";
+ vusb33-supply = <&mt6359_vusb_ldo_reg>;
+};
diff --git a/arch/arm64/boot/dts/mediatek/mt8395-genio-1200-evk.dts b/arch/arm64/boot/dts/mediatek/mt8395-genio-1200-evk.dts
index 1558649f633c..a06610fff8ad 100644
--- a/arch/arm64/boot/dts/mediatek/mt8395-genio-1200-evk.dts
+++ b/arch/arm64/boot/dts/mediatek/mt8395-genio-1200-evk.dts
@@ -475,7 +475,7 @@
<PINMUX_GPIO86__FUNC_GBE_RXC>,
<PINMUX_GPIO87__FUNC_GBE_RXDV>,
<PINMUX_GPIO88__FUNC_GBE_TXEN>;
- drive-strength = <MTK_DRIVE_8mA>;
+ drive-strength = <8>;
};
pins-mdio {
@@ -502,7 +502,7 @@
<PINMUX_GPIO78__FUNC_GBE_TXD2>,
<PINMUX_GPIO79__FUNC_GBE_TXD1>,
<PINMUX_GPIO80__FUNC_GBE_TXD0>;
- drive-strength = <MTK_DRIVE_8mA>;
+ drive-strength = <8>;
};
};
@@ -567,7 +567,7 @@
pinmux = <PINMUX_GPIO12__FUNC_SDA2>,
<PINMUX_GPIO13__FUNC_SCL2>;
bias-pull-up = <MTK_PULL_SET_RSEL_111>;
- drive-strength = <MTK_DRIVE_6mA>;
+ drive-strength = <6>;
};
};
@@ -582,7 +582,7 @@
mmc0_default_pins: mmc0-default-pins {
pins-clk {
pinmux = <PINMUX_GPIO122__FUNC_MSDC0_CLK>;
- drive-strength = <MTK_DRIVE_6mA>;
+ drive-strength = <6>;
bias-pull-down = <MTK_PUPD_SET_R1R0_10>;
};
@@ -597,13 +597,13 @@
<PINMUX_GPIO116__FUNC_MSDC0_DAT7>,
<PINMUX_GPIO121__FUNC_MSDC0_CMD>;
input-enable;
- drive-strength = <MTK_DRIVE_6mA>;
+ drive-strength = <6>;
bias-pull-up = <MTK_PUPD_SET_R1R0_01>;
};
pins-rst {
pinmux = <PINMUX_GPIO120__FUNC_MSDC0_RSTB>;
- drive-strength = <MTK_DRIVE_6mA>;
+ drive-strength = <6>;
bias-pull-up = <MTK_PUPD_SET_R1R0_01>;
};
};
@@ -611,7 +611,7 @@
mmc0_uhs_pins: mmc0-uhs-pins {
pins-clk {
pinmux = <PINMUX_GPIO122__FUNC_MSDC0_CLK>;
- drive-strength = <MTK_DRIVE_8mA>;
+ drive-strength = <8>;
bias-pull-down = <MTK_PUPD_SET_R1R0_10>;
};
@@ -626,19 +626,19 @@
<PINMUX_GPIO116__FUNC_MSDC0_DAT7>,
<PINMUX_GPIO121__FUNC_MSDC0_CMD>;
input-enable;
- drive-strength = <MTK_DRIVE_8mA>;
+ drive-strength = <8>;
bias-pull-up = <MTK_PUPD_SET_R1R0_01>;
};
pins-ds {
pinmux = <PINMUX_GPIO127__FUNC_MSDC0_DSL>;
- drive-strength = <MTK_DRIVE_8mA>;
+ drive-strength = <8>;
bias-pull-down = <MTK_PUPD_SET_R1R0_10>;
};
pins-rst {
pinmux = <PINMUX_GPIO120__FUNC_MSDC0_RSTB>;
- drive-strength = <MTK_DRIVE_8mA>;
+ drive-strength = <8>;
bias-pull-up = <MTK_PUPD_SET_R1R0_01>;
};
};
@@ -646,7 +646,7 @@
mmc1_default_pins: mmc1-default-pins {
pins-clk {
pinmux = <PINMUX_GPIO111__FUNC_MSDC1_CLK>;
- drive-strength = <MTK_DRIVE_8mA>;
+ drive-strength = <8>;
bias-pull-down = <MTK_PUPD_SET_R1R0_10>;
};
@@ -657,7 +657,7 @@
<PINMUX_GPIO114__FUNC_MSDC1_DAT2>,
<PINMUX_GPIO115__FUNC_MSDC1_DAT3>;
input-enable;
- drive-strength = <MTK_DRIVE_8mA>;
+ drive-strength = <8>;
bias-pull-up = <MTK_PUPD_SET_R1R0_01>;
};
};
@@ -665,7 +665,7 @@
mmc1_uhs_pins: mmc1-uhs-pins {
pins-clk {
pinmux = <PINMUX_GPIO111__FUNC_MSDC1_CLK>;
- drive-strength = <MTK_DRIVE_8mA>;
+ drive-strength = <8>;
bias-pull-down = <MTK_PUPD_SET_R1R0_10>;
};
@@ -676,7 +676,7 @@
<PINMUX_GPIO114__FUNC_MSDC1_DAT2>,
<PINMUX_GPIO115__FUNC_MSDC1_DAT3>;
input-enable;
- drive-strength = <MTK_DRIVE_8mA>;
+ drive-strength = <8>;
bias-pull-up = <MTK_PUPD_SET_R1R0_01>;
};
};
@@ -854,6 +854,10 @@
&u3phy1 {
status = "okay";
+
+ u3port1: usb-phy@700 {
+ mediatek,force-mode;
+ };
};
&u3phy2 {
@@ -900,6 +904,8 @@
};
&xhci1 {
+ phys = <&u2port1 PHY_TYPE_USB2>,
+ <&u3port1 PHY_TYPE_USB3>;
vusb33-supply = <&mt6359_vusb_ldo_reg>;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/mediatek/mt8395-kontron-3-5-sbc-i1200.dts b/arch/arm64/boot/dts/mediatek/mt8395-kontron-3-5-sbc-i1200.dts
new file mode 100644
index 000000000000..e4b2af9489a8
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt8395-kontron-3-5-sbc-i1200.dts
@@ -0,0 +1,1127 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright (C) 2024 Kontron Europe GmbH
+ *
+ * Author: Michael Walle <mwalle@kernel.org>
+ */
+/dts-v1/;
+
+#include "mt8195.dtsi"
+#include "mt6359.dtsi"
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/leds/common.h>
+#include <dt-bindings/pinctrl/mt8195-pinfunc.h>
+#include <dt-bindings/regulator/mediatek,mt6360-regulator.h>
+#include <dt-bindings/spmi/spmi.h>
+
+/ {
+ model = "Kontron 3.5\"-SBC-i1200";
+ compatible = "kontron,3-5-sbc-i1200", "mediatek,mt8395", "mediatek,mt8195";
+
+ aliases {
+ mmc0 = &mmc0;
+ mmc1 = &mmc1;
+ serial0 = &uart1;
+ serial1 = &uart2;
+ serial2 = &uart3;
+ serial3 = &uart4;
+ serial4 = &uart0;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ firmware {
+ optee {
+ compatible = "linaro,optee-tz";
+ method = "smc";
+ };
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+ pinctrl-names = "default";
+ pinctrl-0 = <&gpio_keys_pins>;
+
+ key-0 {
+ gpios = <&pio 106 GPIO_ACTIVE_LOW>;
+ label = "volume_up";
+ linux,code = <KEY_VOLUMEUP>;
+ wakeup-source;
+ debounce-interval = <15>;
+ };
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&led_pins>;
+
+ led-0 {
+ gpios = <&pio 107 GPIO_ACTIVE_HIGH>;
+ default-state = "keep";
+ function = LED_FUNCTION_POWER;
+ color = <LED_COLOR_ID_GREEN>;
+ };
+ };
+
+ memory@40000000 {
+ device_type = "memory";
+ reg = <0 0x40000000 0x0 0x80000000>;
+ };
+
+ vsys: regulator-vsys {
+ compatible = "regulator-fixed";
+ regulator-name = "vsys";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ };
+
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ /*
+ * 12 MiB reserved for OP-TEE (BL32)
+ * +-----------------------+ 0x43e0_0000
+ * | SHMEM 2MiB |
+ * +-----------------------+ 0x43c0_0000
+ * | | TA_RAM 8MiB |
+ * + TZDRAM +--------------+ 0x4340_0000
+ * | | TEE_RAM 2MiB |
+ * +-----------------------+ 0x4320_0000
+ */
+ optee_reserved: optee@43200000 {
+ no-map;
+ reg = <0 0x43200000 0 0x00c00000>;
+ };
+
+ scp_mem: memory@50000000 {
+ compatible = "shared-dma-pool";
+ reg = <0 0x50000000 0 0x2900000>;
+ no-map;
+ };
+
+ vpu_mem: memory@53000000 {
+ compatible = "shared-dma-pool";
+ reg = <0 0x53000000 0 0x1400000>; /* 20 MB */
+ };
+
+ /* 2 MiB reserved for ARM Trusted Firmware (BL31) */
+ bl31_secmon_mem: memory@54600000 {
+ no-map;
+ reg = <0 0x54600000 0x0 0x200000>;
+ };
+
+ snd_dma_mem: memory@60000000 {
+ compatible = "shared-dma-pool";
+ reg = <0 0x60000000 0 0x1100000>;
+ no-map;
+ };
+
+ apu_mem: memory@62000000 {
+ compatible = "shared-dma-pool";
+ reg = <0 0x62000000 0 0x1400000>; /* 20 MB */
+ };
+ };
+
+ thermal_sensor0: thermal-sensor-0 {
+ compatible = "generic-adc-thermal";
+ #thermal-sensor-cells = <0>;
+ io-channels = <&auxadc 0>;
+ io-channel-names = "sensor-channel";
+ temperature-lookup-table = <(-25000) 1474
+ (-20000) 1374
+ (-15000) 1260
+ (-10000) 1134
+ (-5000) 1004
+ 0 874
+ 5000 750
+ 10000 635
+ 15000 532
+ 20000 443
+ 25000 367
+ 30000 303
+ 35000 250
+ 40000 206
+ 45000 170
+ 50000 141
+ 55000 117
+ 60000 97
+ 65000 81
+ 70000 68
+ 75000 57
+ 80000 48
+ 85000 41
+ 90000 35
+ 95000 30
+ 100000 25
+ 105000 22
+ 110000 19
+ 115000 16
+ 120000 14
+ 125000 12
+ 130000 10
+ 135000 9
+ 140000 8
+ 145000 7
+ 150000 6>;
+ };
+
+ thermal_sensor1: thermal-sensor-1 {
+ compatible = "generic-adc-thermal";
+ #thermal-sensor-cells = <0>;
+ io-channels = <&auxadc 1>;
+ io-channel-names = "sensor-channel";
+ temperature-lookup-table = <(-25000) 1474
+ (-20000) 1374
+ (-15000) 1260
+ (-10000) 1134
+ (-5000) 1004
+ 0 874
+ 5000 750
+ 10000 635
+ 15000 532
+ 20000 443
+ 25000 367
+ 30000 303
+ 35000 250
+ 40000 206
+ 45000 170
+ 50000 141
+ 55000 117
+ 60000 97
+ 65000 81
+ 70000 68
+ 75000 57
+ 80000 48
+ 85000 41
+ 90000 35
+ 95000 30
+ 100000 25
+ 105000 22
+ 110000 19
+ 115000 16
+ 120000 14
+ 125000 12
+ 130000 10
+ 135000 9
+ 140000 8
+ 145000 7
+ 150000 6>;
+ };
+
+ thermal_sensor2: thermal-sensor-2 {
+ compatible = "generic-adc-thermal";
+ #thermal-sensor-cells = <0>;
+ io-channels = <&auxadc 2>;
+ io-channel-names = "sensor-channel";
+ temperature-lookup-table = <(-25000) 1474
+ (-20000) 1374
+ (-15000) 1260
+ (-10000) 1134
+ (-5000) 1004
+ 0 874
+ 5000 750
+ 10000 635
+ 15000 532
+ 20000 443
+ 25000 367
+ 30000 303
+ 35000 250
+ 40000 206
+ 45000 170
+ 50000 141
+ 55000 117
+ 60000 97
+ 65000 81
+ 70000 68
+ 75000 57
+ 80000 48
+ 85000 41
+ 90000 35
+ 95000 30
+ 100000 25
+ 105000 22
+ 110000 19
+ 115000 16
+ 120000 14
+ 125000 12
+ 130000 10
+ 135000 9
+ 140000 8
+ 145000 7
+ 150000 6>;
+ };
+};
+
+&auxadc {
+ status = "okay";
+};
+
+&eth {
+ phy-mode ="rgmii-id";
+ phy-handle = <&ethernet_phy0>;
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&eth_default_pins>;
+ pinctrl-1 = <&eth_sleep_pins>;
+ status = "okay";
+
+ mdio {
+ ethernet_phy0: ethernet-phy@1 {
+ compatible = "ethernet-phy-id001c.c916";
+ reg = <0x1>;
+ interrupts-extended = <&pio 94 IRQ_TYPE_LEVEL_LOW>;
+ reset-assert-us = <10000>;
+ reset-deassert-us = <80000>;
+ reset-gpios = <&pio 93 GPIO_ACTIVE_HIGH>;
+ };
+ };
+};
+
+&gpu {
+ status = "okay";
+ mali-supply = <&mt6315_7_vbuck1>;
+};
+
+/* CSI1/CSI2 connector */
+&i2c0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c0_pins>;
+ clock-frequency = <100000>;
+ status = "okay";
+};
+
+/* CSI3 connector */
+&i2c1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c1_pins>;
+ clock-frequency = <100000>;
+ status = "okay";
+};
+
+&i2c2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c2_pins>;
+ clock-frequency = <400000>;
+ status = "okay";
+
+ /* LVDS bridge @f */
+};
+
+/* Touch panel connector */
+&i2c3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c3_pins>;
+ clock-frequency = <100000>;
+ status = "okay";
+};
+
+/* B2B connector */
+&i2c4 {
+ clock-frequency = <100000>;
+ pinctrl-0 = <&i2c4_pins>;
+ pinctrl-names = "default";
+ status = "okay";
+};
+
+&i2c6 {
+ clock-frequency = <400000>;
+ pinctrl-0 = <&i2c6_pins>;
+ pinctrl-names = "default";
+ status = "okay";
+
+ mt6360: pmic@34 {
+ compatible = "mediatek,mt6360";
+ reg = <0x34>;
+ interrupt-controller;
+ interrupts-extended = <&pio 101 IRQ_TYPE_EDGE_FALLING>;
+ interrupt-names = "IRQB";
+ #interrupt-cells = <1>;
+
+ regulator {
+ compatible = "mediatek,mt6360-regulator";
+ LDO_VIN1-supply = <&vsys>;
+ LDO_VIN2-supply = <&vsys>;
+ LDO_VIN3-supply = <&vsys>;
+
+ mt6360_buck1: BUCK1 {
+ regulator-name = "emi_vdd2";
+ regulator-min-microvolt = <600000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-allowed-modes = <MT6360_OPMODE_NORMAL
+ MT6360_OPMODE_LP
+ MT6360_OPMODE_ULP>;
+ regulator-always-on;
+ };
+
+ mt6360_buck2: BUCK2 {
+ regulator-name = "emi_vddq";
+ regulator-min-microvolt = <300000>;
+ regulator-max-microvolt = <1300000>;
+ regulator-allowed-modes = <MT6360_OPMODE_NORMAL
+ MT6360_OPMODE_LP
+ MT6360_OPMODE_ULP>;
+ regulator-always-on;
+ };
+
+ mt6360_ldo1: LDO1 {
+ regulator-name = "mt6360_ldo1"; /* Test point */
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <3600000>;
+ regulator-allowed-modes = <MT6360_OPMODE_NORMAL
+ MT6360_OPMODE_LP>;
+ };
+
+ mt6360_ldo2: LDO2 {
+ regulator-name = "panel1_p1v8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-allowed-modes = <MT6360_OPMODE_NORMAL
+ MT6360_OPMODE_LP>;
+ };
+
+ mt6360_ldo3: LDO3 {
+ regulator-name = "vmc_pmu";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-allowed-modes = <MT6360_OPMODE_NORMAL
+ MT6360_OPMODE_LP>;
+ };
+
+ mt6360_ldo5: LDO5 {
+ regulator-name = "vmch_pmu";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-allowed-modes = <MT6360_OPMODE_NORMAL
+ MT6360_OPMODE_LP>;
+ };
+
+ mt6360_ldo6: LDO6 {
+ regulator-name = "mt6360_ldo6"; /* Test point */
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <2100000>;
+ regulator-allowed-modes = <MT6360_OPMODE_NORMAL
+ MT6360_OPMODE_LP>;
+ };
+
+ mt6360_ldo7: LDO7 {
+ regulator-name = "emi_vmddr_en";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-allowed-modes = <MT6360_OPMODE_NORMAL
+ MT6360_OPMODE_LP>;
+ regulator-always-on;
+ };
+ };
+ };
+};
+
+&mmc0 {
+ pinctrl-names = "default", "state_uhs";
+ pinctrl-0 = <&mmc0_default_pins>;
+ pinctrl-1 = <&mmc0_uhs_pins>;
+ bus-width = <8>;
+ max-frequency = <200000000>;
+ hs400-ds-delay = <0x14c11>;
+ cap-mmc-highspeed;
+ cap-mmc-hw-reset;
+ mmc-hs200-1_8v;
+ mmc-hs400-1_8v;
+ no-sdio;
+ no-sd;
+ non-removable;
+ vmmc-supply = <&mt6359_vemc_1_ldo_reg>;
+ vqmmc-supply = <&mt6359_vufs_ldo_reg>;
+ status = "okay";
+};
+
+&mmc1 {
+ pinctrl-names = "default", "state_uhs";
+ pinctrl-0 = <&mmc1_default_pins>, <&mmc1_detect_pins>;
+ pinctrl-1 = <&mmc1_default_pins>;
+ cd-gpios = <&pio 129 GPIO_ACTIVE_LOW>;
+ bus-width = <4>;
+ max-frequency = <200000000>;
+ cap-sd-highspeed;
+ sd-uhs-sdr50;
+ sd-uhs-sdr104;
+ no-mmc;
+ vmmc-supply = <&mt6360_ldo5>;
+ vqmmc-supply = <&mt6360_ldo3>;
+ status = "okay";
+};
+
+&mt6359_vbbck_ldo_reg {
+ regulator-always-on;
+};
+
+&mt6359_vcore_buck_reg {
+ regulator-always-on;
+};
+
+&mt6359_vgpu11_buck_reg {
+ regulator-always-on;
+};
+
+&mt6359_vproc1_buck_reg {
+ regulator-always-on;
+};
+
+&mt6359_vproc2_buck_reg {
+ regulator-always-on;
+};
+
+&mt6359_vpu_buck_reg {
+ regulator-always-on;
+};
+
+&mt6359_vrf12_ldo_reg {
+ regulator-always-on;
+};
+
+&mt6359_vsram_md_ldo_reg {
+ regulator-always-on;
+};
+
+&mt6359_vsram_others_ldo_reg {
+ regulator-always-on;
+};
+
+&nor_flash {
+ pinctrl-names = "default";
+ pinctrl-0 = <&nor_pins_default>;
+ status = "okay";
+
+ flash@0 {
+ compatible = "jedec,spi-nor";
+ reg = <0>;
+ spi-max-frequency = <52000000>;
+ spi-rx-bus-width = <2>;
+ spi-tx-bus-width = <2>;
+ };
+};
+
+&pcie0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie0_pins_default>;
+ status = "okay";
+};
+
+&pcie1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie1_pins_default>;
+ status = "okay";
+};
+
+&pciephy {
+ status = "okay";
+};
+
+&pio {
+ eth_default_pins: eth-default-pins {
+ pins-txd {
+ pinmux = <PINMUX_GPIO77__FUNC_GBE_TXD3>,
+ <PINMUX_GPIO78__FUNC_GBE_TXD2>,
+ <PINMUX_GPIO79__FUNC_GBE_TXD1>,
+ <PINMUX_GPIO80__FUNC_GBE_TXD0>;
+ drive-strength = <8>;
+ };
+
+ pins-rxd {
+ pinmux = <PINMUX_GPIO81__FUNC_GBE_RXD3>,
+ <PINMUX_GPIO82__FUNC_GBE_RXD2>,
+ <PINMUX_GPIO83__FUNC_GBE_RXD1>,
+ <PINMUX_GPIO84__FUNC_GBE_RXD0>;
+ };
+
+ pins-cc {
+ pinmux = <PINMUX_GPIO85__FUNC_GBE_TXC>,
+ <PINMUX_GPIO86__FUNC_GBE_RXC>,
+ <PINMUX_GPIO87__FUNC_GBE_RXDV>,
+ <PINMUX_GPIO88__FUNC_GBE_TXEN>;
+ drive-strength = <8>;
+ };
+
+ pins-mdio {
+ pinmux = <PINMUX_GPIO89__FUNC_GBE_MDC>,
+ <PINMUX_GPIO90__FUNC_GBE_MDIO>;
+ input-enable;
+ };
+
+ pins-power {
+ pinmux = <PINMUX_GPIO91__FUNC_GPIO91>,
+ <PINMUX_GPIO92__FUNC_GPIO92>;
+ output-high;
+ };
+
+ pins-reset {
+ pinmux = <PINMUX_GPIO93__FUNC_GPIO93>;
+ output-high;
+ };
+
+ pins-interrupt {
+ pinmux = <PINMUX_GPIO94__FUNC_GPIO94>;
+ input-enable;
+ };
+ };
+
+ eth_sleep_pins: eth-sleep-pins {
+ pins-txd {
+ pinmux = <PINMUX_GPIO77__FUNC_GPIO77>,
+ <PINMUX_GPIO78__FUNC_GPIO78>,
+ <PINMUX_GPIO79__FUNC_GPIO79>,
+ <PINMUX_GPIO80__FUNC_GPIO80>;
+ };
+
+ pins-cc {
+ pinmux = <PINMUX_GPIO85__FUNC_GPIO85>,
+ <PINMUX_GPIO88__FUNC_GPIO88>,
+ <PINMUX_GPIO87__FUNC_GPIO87>,
+ <PINMUX_GPIO86__FUNC_GPIO86>;
+ };
+
+ pins-rxd {
+ pinmux = <PINMUX_GPIO81__FUNC_GPIO81>,
+ <PINMUX_GPIO82__FUNC_GPIO82>,
+ <PINMUX_GPIO83__FUNC_GPIO83>,
+ <PINMUX_GPIO84__FUNC_GPIO84>;
+ };
+
+ pins-mdio {
+ pinmux = <PINMUX_GPIO89__FUNC_GPIO89>,
+ <PINMUX_GPIO90__FUNC_GPIO90>;
+ input-disable;
+ bias-disable;
+ };
+ };
+
+ gpio_keys_pins: gpio-keys-pins {
+ pins {
+ pinmux = <PINMUX_GPIO106__FUNC_GPIO106>;
+ input-enable;
+ };
+ };
+
+ i2c0_pins: i2c0-pins {
+ pins {
+ pinmux = <PINMUX_GPIO8__FUNC_SDA0>,
+ <PINMUX_GPIO9__FUNC_SCL0>;
+ bias-pull-up = <MTK_PULL_SET_RSEL_111>;
+ drive-strength-microamp = <1000>;
+ };
+ };
+
+ i2c1_pins: i2c1-pins {
+ pins {
+ pinmux = <PINMUX_GPIO10__FUNC_SDA1>,
+ <PINMUX_GPIO11__FUNC_SCL1>;
+ bias-pull-up = <MTK_PULL_SET_RSEL_111>;
+ drive-strength-microamp = <1000>;
+ };
+ };
+
+ i2c2_pins: i2c2-default-pins {
+ pins-bus {
+ pinmux = <PINMUX_GPIO12__FUNC_SDA2>,
+ <PINMUX_GPIO13__FUNC_SCL2>;
+ bias-pull-up = <MTK_PULL_SET_RSEL_111>;
+ drive-strength-microamp = <1000>;
+ };
+ };
+
+ i2c3_pins: i2c3-pins {
+ pins {
+ pinmux = <PINMUX_GPIO14__FUNC_SDA3>,
+ <PINMUX_GPIO15__FUNC_SCL3>;
+ bias-pull-up = <MTK_PULL_SET_RSEL_111>;
+ drive-strength-microamp = <1000>;
+ };
+ };
+
+ i2c4_pins: i2c4-pins {
+ pins {
+ pinmux = <PINMUX_GPIO16__FUNC_SDA4>,
+ <PINMUX_GPIO17__FUNC_SCL4>;
+ bias-pull-up = <MTK_PULL_SET_RSEL_111>;
+ drive-strength-microamp = <1000>;
+ };
+ };
+
+ i2c6_pins: i2c6-pins {
+ pins {
+ pinmux = <PINMUX_GPIO25__FUNC_SDA6>,
+ <PINMUX_GPIO26__FUNC_SCL6>;
+ bias-pull-up;
+ drive-strength-microamp = <1000>;
+ };
+ };
+
+ mmc0_default_pins: mmc0-default-pins {
+ pins-clk {
+ pinmux = <PINMUX_GPIO122__FUNC_MSDC0_CLK>;
+ drive-strength = <6>;
+ bias-pull-down = <MTK_PUPD_SET_R1R0_10>;
+ };
+
+ pins-cmd-dat {
+ pinmux = <PINMUX_GPIO126__FUNC_MSDC0_DAT0>,
+ <PINMUX_GPIO125__FUNC_MSDC0_DAT1>,
+ <PINMUX_GPIO124__FUNC_MSDC0_DAT2>,
+ <PINMUX_GPIO123__FUNC_MSDC0_DAT3>,
+ <PINMUX_GPIO119__FUNC_MSDC0_DAT4>,
+ <PINMUX_GPIO118__FUNC_MSDC0_DAT5>,
+ <PINMUX_GPIO117__FUNC_MSDC0_DAT6>,
+ <PINMUX_GPIO116__FUNC_MSDC0_DAT7>,
+ <PINMUX_GPIO121__FUNC_MSDC0_CMD>;
+ input-enable;
+ drive-strength = <6>;
+ bias-pull-up = <MTK_PUPD_SET_R1R0_01>;
+ };
+
+ pins-rst {
+ pinmux = <PINMUX_GPIO120__FUNC_MSDC0_RSTB>;
+ drive-strength = <6>;
+ bias-pull-up = <MTK_PUPD_SET_R1R0_01>;
+ };
+ };
+
+ mmc0_uhs_pins: mmc0-uhs-pins {
+ pins-clk {
+ pinmux = <PINMUX_GPIO122__FUNC_MSDC0_CLK>;
+ drive-strength = <8>;
+ bias-pull-down = <MTK_PUPD_SET_R1R0_10>;
+ };
+
+ pins-cmd-dat {
+ pinmux = <PINMUX_GPIO126__FUNC_MSDC0_DAT0>,
+ <PINMUX_GPIO125__FUNC_MSDC0_DAT1>,
+ <PINMUX_GPIO124__FUNC_MSDC0_DAT2>,
+ <PINMUX_GPIO123__FUNC_MSDC0_DAT3>,
+ <PINMUX_GPIO119__FUNC_MSDC0_DAT4>,
+ <PINMUX_GPIO118__FUNC_MSDC0_DAT5>,
+ <PINMUX_GPIO117__FUNC_MSDC0_DAT6>,
+ <PINMUX_GPIO116__FUNC_MSDC0_DAT7>,
+ <PINMUX_GPIO121__FUNC_MSDC0_CMD>;
+ input-enable;
+ drive-strength = <8>;
+ bias-pull-up = <MTK_PUPD_SET_R1R0_01>;
+ };
+
+ pins-ds {
+ pinmux = <PINMUX_GPIO127__FUNC_MSDC0_DSL>;
+ drive-strength = <8>;
+ bias-pull-down = <MTK_PUPD_SET_R1R0_10>;
+ };
+
+ pins-rst {
+ pinmux = <PINMUX_GPIO120__FUNC_MSDC0_RSTB>;
+ drive-strength = <8>;
+ bias-pull-up = <MTK_PUPD_SET_R1R0_01>;
+ };
+ };
+
+ mmc1_default_pins: mmc1-default-pins {
+ pins-clk {
+ pinmux = <PINMUX_GPIO111__FUNC_MSDC1_CLK>;
+ drive-strength = <8>;
+ bias-pull-down = <MTK_PUPD_SET_R1R0_10>;
+ };
+
+ pins-cmd-dat {
+ pinmux = <PINMUX_GPIO110__FUNC_MSDC1_CMD>,
+ <PINMUX_GPIO112__FUNC_MSDC1_DAT0>,
+ <PINMUX_GPIO113__FUNC_MSDC1_DAT1>,
+ <PINMUX_GPIO114__FUNC_MSDC1_DAT2>,
+ <PINMUX_GPIO115__FUNC_MSDC1_DAT3>;
+ input-enable;
+ drive-strength = <8>;
+ bias-pull-up = <MTK_PUPD_SET_R1R0_01>;
+ };
+ };
+
+ mmc1_detect_pins: mmc1-detect-pins {
+ pins-insert {
+ pinmux = <PINMUX_GPIO129__FUNC_GPIO129>;
+ bias-pull-up;
+ };
+ };
+
+ nor_pins_default: nor-default-pins {
+ pins-ck-io {
+ pinmux = <PINMUX_GPIO142__FUNC_SPINOR_IO0>,
+ <PINMUX_GPIO141__FUNC_SPINOR_CK>,
+ <PINMUX_GPIO143__FUNC_SPINOR_IO1>;
+ drive-strength = <6>;
+ bias-pull-down;
+ };
+
+ pins-cs {
+ pinmux = <PINMUX_GPIO140__FUNC_SPINOR_CS>;
+ drive-strength = <6>;
+ bias-pull-up;
+ };
+ };
+
+ pcie0_pins_default: pcie0-default-pins {
+ pins-bus {
+ pinmux = <PINMUX_GPIO19__FUNC_WAKEN>,
+ <PINMUX_GPIO20__FUNC_PERSTN>,
+ <PINMUX_GPIO21__FUNC_CLKREQN>;
+ bias-pull-up;
+ };
+ };
+
+ pcie1_pins_default: pcie1-default-pins {
+ pins-bus {
+ pinmux = <PINMUX_GPIO0__FUNC_PERSTN_1>,
+ <PINMUX_GPIO1__FUNC_CLKREQN_1>,
+ <PINMUX_GPIO2__FUNC_WAKEN_1>;
+ bias-pull-up = <MTK_PUPD_SET_R1R0_01>;
+ };
+ };
+
+ led_pins: led-pins {
+ pins-power-en {
+ pinmux = <PINMUX_GPIO107__FUNC_GPIO107>;
+ output-high;
+ };
+ };
+
+ spi0_pins: spi0-default-pins {
+ pins-cs-mosi-clk {
+ pinmux = <PINMUX_GPIO132__FUNC_SPIM0_CSB>,
+ <PINMUX_GPIO134__FUNC_SPIM0_MO>,
+ <PINMUX_GPIO133__FUNC_SPIM0_CLK>;
+ bias-disable;
+ };
+
+ pins-miso {
+ pinmux = <PINMUX_GPIO135__FUNC_SPIM0_MI>;
+ bias-pull-down;
+ };
+ };
+
+ spi1_pins: spi1-default-pins {
+ pins-cs-mosi-clk {
+ pinmux = <PINMUX_GPIO136__FUNC_SPIM1_CSB>,
+ <PINMUX_GPIO138__FUNC_SPIM1_MO>,
+ <PINMUX_GPIO137__FUNC_SPIM1_CLK>;
+ bias-disable;
+ };
+
+ pins-miso {
+ pinmux = <PINMUX_GPIO139__FUNC_SPIM1_MI>;
+ bias-pull-down;
+ };
+ };
+
+ uart0_pins: uart0-pins {
+ pins-rx {
+ pinmux = <PINMUX_GPIO99__FUNC_URXD0>;
+ input-enable;
+ bias-pull-up;
+ };
+
+ pins-tx {
+ pinmux = <PINMUX_GPIO98__FUNC_UTXD0>;
+ };
+ };
+
+ uart1_pins: uart1-pins {
+ pins-rx {
+ pinmux = <PINMUX_GPIO103__FUNC_URXD1>;
+ input-enable;
+ bias-pull-up;
+ };
+
+ pins-tx {
+ pinmux = <PINMUX_GPIO102__FUNC_UTXD1>;
+ };
+
+ pins-rts {
+ pinmux = <PINMUX_GPIO100__FUNC_URTS1>;
+ };
+
+ pins-cts {
+ pinmux = <PINMUX_GPIO101__FUNC_UCTS1>;
+ input-enable;
+ };
+ };
+
+ uart2_pins: uart2-pins {
+ pins-rx {
+ pinmux = <PINMUX_GPIO68__FUNC_URXD2>;
+ input-enable;
+ bias-pull-up;
+ };
+
+ pins-tx {
+ pinmux = <PINMUX_GPIO67__FUNC_UTXD2>;
+ };
+
+ pins-rts {
+ pinmux = <PINMUX_GPIO66__FUNC_URTS2>;
+ };
+
+ pins-cts {
+ pinmux = <PINMUX_GPIO65__FUNC_UCTS2>;
+ input-enable;
+ };
+ };
+
+ uart3_pins: uart3-pins {
+ pins-rx {
+ pinmux = <PINMUX_GPIO5__FUNC_URXD3>;
+ input-enable;
+ bias-pull-up = <MTK_PUPD_SET_R1R0_01>;
+ };
+
+ pins-tx {
+ pinmux = <PINMUX_GPIO4__FUNC_UTXD3>;
+ };
+ };
+
+ uart4_pins: uart4-pins {
+ pins-rx {
+ pinmux = <PINMUX_GPIO7__FUNC_URXD4>;
+ input-enable;
+ bias-pull-up;
+ };
+
+ pins-tx {
+ pinmux = <PINMUX_GPIO6__FUNC_UTXD4>;
+ };
+ };
+};
+
+&pmic {
+ interrupts-extended = <&pio 222 IRQ_TYPE_LEVEL_HIGH>;
+};
+
+&scp {
+ memory-region = <&scp_mem>;
+ firmware-name = "mediatek/mt8195/scp.img";
+ status = "okay";
+};
+
+&spmi {
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ mt6315@6 {
+ compatible = "mediatek,mt6315-regulator";
+ reg = <0x6 SPMI_USID>;
+
+ regulators {
+ mt6315_6_vbuck1: vbuck1 {
+ regulator-name = "Vbcpu";
+ regulator-min-microvolt = <300000>;
+ regulator-max-microvolt = <1193750>;
+ regulator-enable-ramp-delay = <256>;
+ regulator-ramp-delay = <6250>;
+ regulator-allowed-modes = <0 1 2>;
+ regulator-always-on;
+ };
+ };
+ };
+
+ mt6315@7 {
+ compatible = "mediatek,mt6315-regulator";
+ reg = <0x7 SPMI_USID>;
+
+ regulators {
+ mt6315_7_vbuck1: vbuck1 {
+ regulator-name = "Vgpu";
+ regulator-min-microvolt = <625000>;
+ regulator-max-microvolt = <1193750>;
+ regulator-enable-ramp-delay = <256>;
+ regulator-ramp-delay = <6250>;
+ regulator-allowed-modes = <0 1 2>;
+ regulator-always-on;
+ };
+ };
+ };
+};
+
+/* USB3.2 front port */
+&ssusb0 {
+ dr_mode = "host";
+ vusb33-supply = <&mt6359_vusb_ldo_reg>;
+ status = "okay";
+};
+
+/* USB2.0 M.2 Key-E */
+&ssusb2 {
+ vusb33-supply = <&mt6359_vusb_ldo_reg>;
+ status = "okay";
+};
+
+/* USB2.0 to on-board usb hub */
+&ssusb3 {
+ vusb33-supply = <&mt6359_vusb_ldo_reg>;
+ status = "okay";
+};
+
+&spi0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&spi0_pins>;
+ mediatek,pad-select = <0>;
+ status = "okay";
+
+ tpm: tpm@0 {
+ compatible = "infineon,slb9670", "tcg,tpm_tis-spi";
+ reg = <0>;
+ spi-max-frequency = <18500000>;
+ };
+};
+
+/* B2B connector */
+&spi1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&spi1_pins>;
+ mediatek,pad-select = <0>;
+ status = "okay";
+};
+
+&thermal_zones {
+ cpu-thermal {
+ polling-delay = <1000>; /* milliseconds */
+ polling-delay-passive = <0>; /* milliseconds */
+ thermal-sensors = <&thermal_sensor0>;
+
+ trips {
+ trip-alert {
+ temperature = <85000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ trip-crit {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+ };
+
+ pcb-top-thermal {
+ polling-delay = <1000>; /* milliseconds */
+ polling-delay-passive = <0>; /* milliseconds */
+ thermal-sensors = <&thermal_sensor1>;
+
+ trips {
+ trip-alert {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ trip-crit {
+ temperature = <85000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+ };
+
+ pcb-bottom-thermal {
+ polling-delay = <1000>; /* milliseconds */
+ polling-delay-passive = <0>; /* milliseconds */
+ thermal-sensors = <&thermal_sensor2>;
+
+ trips {
+ trip-alert {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ trip-crit {
+ temperature = <85000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+ };
+};
+
+&uart0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart0_pins>;
+ status = "okay";
+};
+
+&uart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart1_pins>;
+ uart-has-rtscts;
+ status = "okay";
+};
+
+&uart2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart2_pins>;
+ uart-has-rtscts;
+ status = "okay";
+};
+
+&uart3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart3_pins>;
+ status = "okay";
+};
+
+&uart4 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart4_pins>;
+ status = "okay";
+};
+
+/* USB3 */
+&u3phy0 {
+ status = "okay";
+};
+
+/* PCIe1/USB2 */
+&u3phy1 {
+ status = "okay";
+};
+
+/* USB2 */
+&u3phy2 {
+ status = "okay";
+};
+
+/* USB2 */
+&u3phy3 {
+ status = "okay";
+};
+
+/* USB3.2 front port */
+&xhci0 {
+ status = "okay";
+};
+
+/* USB2.0 M.2 Key-B */
+&xhci1 {
+ vusb33-supply = <&mt6359_vusb_ldo_reg>;
+ mediatek,u3p-dis-msk = <0x01>;
+ status = "okay";
+};
+
+/* USB2.0 M.2 Key-E */
+&xhci2 {
+ status = "okay";
+};
+
+/* USB2.0 to on-board usb hub */
+&xhci3 {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/mediatek/mt8395-radxa-nio-12l.dts b/arch/arm64/boot/dts/mediatek/mt8395-radxa-nio-12l.dts
index e5d9b671a405..4b5f6cf16f70 100644
--- a/arch/arm64/boot/dts/mediatek/mt8395-radxa-nio-12l.dts
+++ b/arch/arm64/boot/dts/mediatek/mt8395-radxa-nio-12l.dts
@@ -140,6 +140,38 @@
};
};
+&cpu0 {
+ cpu-supply = <&mt6359_vcore_buck_reg>;
+};
+
+&cpu1 {
+ cpu-supply = <&mt6359_vcore_buck_reg>;
+};
+
+&cpu2 {
+ cpu-supply = <&mt6359_vcore_buck_reg>;
+};
+
+&cpu3 {
+ cpu-supply = <&mt6359_vcore_buck_reg>;
+};
+
+&cpu4 {
+ cpu-supply = <&mt6315_6_vbuck1>;
+};
+
+&cpu5 {
+ cpu-supply = <&mt6315_6_vbuck1>;
+};
+
+&cpu6 {
+ cpu-supply = <&mt6315_6_vbuck1>;
+};
+
+&cpu7 {
+ cpu-supply = <&mt6315_6_vbuck1>;
+};
+
&eth {
phy-mode = "rgmii-rxid";
phy-handle = <&rgmii_phy>;
@@ -343,6 +375,14 @@
};
};
+&mfg0 {
+ domain-supply = <&mt6315_7_vbuck1>;
+};
+
+&mfg1 {
+ domain-supply = <&mt6359_vsram_others_ldo_reg>;
+};
+
/* MMC0 Controller: eMMC (HS400). Power lines are shared with UFS! */
&mmc0 {
pinctrl-names = "default", "state_uhs";
@@ -434,6 +474,8 @@
};
&pio {
+ mediatek,rsel-resistance-in-si-unit;
+
eth_default_pins: eth-default-pins {
pins-cc {
pinmux = <PINMUX_GPIO85__FUNC_GBE_TXC>,
@@ -509,7 +551,7 @@
pins-bus {
pinmux = <PINMUX_GPIO12__FUNC_SDA2>,
<PINMUX_GPIO13__FUNC_SCL2>;
- bias-pull-up = <MTK_PULL_SET_RSEL_111>;
+ bias-pull-up = <1000>;
drive-strength = <6>;
drive-strength-microamp = <1000>;
};
@@ -519,7 +561,7 @@
pins-bus {
pinmux = <PINMUX_GPIO16__FUNC_SDA4>,
<PINMUX_GPIO17__FUNC_SCL4>;
- bias-pull-up = <MTK_PULL_SET_RSEL_111>;
+ bias-pull-up = <1000>;
drive-strength-microamp = <1000>;
};
};
@@ -528,7 +570,7 @@
pins {
pinmux = <PINMUX_GPIO25__FUNC_SDA6>,
<PINMUX_GPIO26__FUNC_SCL6>;
- bias-pull-up = <MTK_PULL_SET_RSEL_111>;
+ bias-disable;
};
};
@@ -683,6 +725,26 @@
};
};
+ usb3_port0_pins: usb3p0-default-pins {
+ pins-vbus {
+ pinmux = <PINMUX_GPIO63__FUNC_VBUSVALID>;
+ input-enable;
+ };
+ };
+
+ usb2_port0_pins: usb2p0-default-pins {
+ pins-iddig {
+ pinmux = <PINMUX_GPIO130__FUNC_IDDIG_1P>;
+ input-enable;
+ bias-pull-up;
+ };
+
+ pins-vbus {
+ pinmux = <PINMUX_GPIO131__FUNC_USB_DRVVBUS_1P>;
+ output-low;
+ };
+ };
+
wifi_vreg_pins: wifi-vreg-pins {
pins-wifi-pmu-en {
pinmux = <PINMUX_GPIO65__FUNC_GPIO65>;
@@ -707,6 +769,10 @@
status = "okay";
};
+&pciephy {
+ status = "okay";
+};
+
&pmic {
interrupts-extended = <&pio 222 IRQ_TYPE_LEVEL_HIGH>;
};
@@ -774,6 +840,18 @@
};
};
+&u3phy0 {
+ status = "okay";
+};
+
+&u3phy1 {
+ status = "okay";
+};
+
+&u3phy2 {
+ status = "okay";
+};
+
&uart0 {
/* Exposed at 40 pin connector */
pinctrl-0 = <&uart0_pins>;
@@ -789,6 +867,8 @@
};
&ssusb0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb3_port0_pins>;
role-switch-default-mode = "host";
usb-role-switch;
vusb33-supply = <&mt6359_vusb_ldo_reg>;
@@ -802,6 +882,8 @@
};
&ssusb2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb2_port0_pins>;
vusb33-supply = <&mt6359_vusb_ldo_reg>;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/microchip/sparx5_pcb134_board.dtsi b/arch/arm64/boot/dts/microchip/sparx5_pcb134_board.dtsi
index 2c5574734c9e..e60acc74e822 100644
--- a/arch/arm64/boot/dts/microchip/sparx5_pcb134_board.dtsi
+++ b/arch/arm64/boot/dts/microchip/sparx5_pcb134_board.dtsi
@@ -13,6 +13,20 @@
priority = <200>;
};
+ i2c0_imux: i2c-mux-0 {
+ compatible = "i2c-mux-pinctrl";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ i2c-parent = <&i2c0>;
+ };
+
+ i2c0_emux: i2c-mux-1 {
+ compatible = "i2c-mux-gpio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ i2c-parent = <&i2c0>;
+ };
+
leds {
compatible = "gpio-leds";
led-0 {
@@ -248,6 +262,186 @@
default-state = "off";
};
};
+
+ sfp_eth12: sfp-eth12 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp1>;
+ tx-disable-gpios = <&sgpio_out2 11 1 GPIO_ACTIVE_LOW>;
+ los-gpios = <&sgpio_in2 11 1 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 11 2 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 12 0 GPIO_ACTIVE_HIGH>;
+ };
+
+ sfp_eth13: sfp-eth13 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp2>;
+ tx-disable-gpios = <&sgpio_out2 12 1 GPIO_ACTIVE_LOW>;
+ los-gpios = <&sgpio_in2 12 1 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 12 2 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 13 0 GPIO_ACTIVE_HIGH>;
+ };
+
+ sfp_eth14: sfp-eth14 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp3>;
+ tx-disable-gpios = <&sgpio_out2 13 1 GPIO_ACTIVE_LOW>;
+ los-gpios = <&sgpio_in2 13 1 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 13 2 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 14 0 GPIO_ACTIVE_HIGH>;
+ };
+
+ sfp_eth15: sfp-eth15 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp4>;
+ tx-disable-gpios = <&sgpio_out2 14 1 GPIO_ACTIVE_LOW>;
+ los-gpios = <&sgpio_in2 14 1 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 14 2 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 15 0 GPIO_ACTIVE_HIGH>;
+ };
+
+ sfp_eth48: sfp-eth48 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp5>;
+ tx-disable-gpios = <&sgpio_out2 15 1 GPIO_ACTIVE_LOW>;
+ los-gpios = <&sgpio_in2 15 1 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 15 2 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 16 0 GPIO_ACTIVE_HIGH>;
+ };
+
+ sfp_eth49: sfp-eth49 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp6>;
+ tx-disable-gpios = <&sgpio_out2 16 1 GPIO_ACTIVE_LOW>;
+ los-gpios = <&sgpio_in2 16 1 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 16 2 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 17 0 GPIO_ACTIVE_HIGH>;
+ };
+
+ sfp_eth50: sfp-eth50 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp7>;
+ tx-disable-gpios = <&sgpio_out2 17 1 GPIO_ACTIVE_LOW>;
+ los-gpios = <&sgpio_in2 17 1 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 17 2 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 18 0 GPIO_ACTIVE_HIGH>;
+ };
+
+ sfp_eth51: sfp-eth51 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp8>;
+ tx-disable-gpios = <&sgpio_out2 18 1 GPIO_ACTIVE_LOW>;
+ los-gpios = <&sgpio_in2 18 1 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 18 2 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 19 0 GPIO_ACTIVE_HIGH>;
+ };
+
+ sfp_eth52: sfp-eth52 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp9>;
+ tx-disable-gpios = <&sgpio_out2 19 1 GPIO_ACTIVE_LOW>;
+ los-gpios = <&sgpio_in2 19 1 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 19 2 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 20 0 GPIO_ACTIVE_HIGH>;
+ };
+
+ sfp_eth53: sfp-eth53 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp10>;
+ tx-disable-gpios = <&sgpio_out2 20 1 GPIO_ACTIVE_LOW>;
+ los-gpios = <&sgpio_in2 20 1 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 20 2 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 21 0 GPIO_ACTIVE_HIGH>;
+ };
+
+ sfp_eth54: sfp-eth54 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp11>;
+ tx-disable-gpios = <&sgpio_out2 21 1 GPIO_ACTIVE_LOW>;
+ los-gpios = <&sgpio_in2 21 1 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 21 2 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 22 0 GPIO_ACTIVE_HIGH>;
+ };
+
+ sfp_eth55: sfp-eth55 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp12>;
+ tx-disable-gpios = <&sgpio_out2 22 1 GPIO_ACTIVE_LOW>;
+ los-gpios = <&sgpio_in2 22 1 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 22 2 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 23 0 GPIO_ACTIVE_HIGH>;
+ };
+
+ sfp_eth56: sfp-eth56 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp13>;
+ tx-disable-gpios = <&sgpio_out2 23 1 GPIO_ACTIVE_LOW>;
+ los-gpios = <&sgpio_in2 23 1 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 23 2 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 24 0 GPIO_ACTIVE_HIGH>;
+ };
+
+ sfp_eth57: sfp-eth57 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp14>;
+ tx-disable-gpios = <&sgpio_out2 24 1 GPIO_ACTIVE_LOW>;
+ los-gpios = <&sgpio_in2 24 1 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 24 2 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 25 0 GPIO_ACTIVE_HIGH>;
+ };
+
+ sfp_eth58: sfp-eth58 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp15>;
+ tx-disable-gpios = <&sgpio_out2 25 1 GPIO_ACTIVE_LOW>;
+ los-gpios = <&sgpio_in2 25 1 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 25 2 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 26 0 GPIO_ACTIVE_HIGH>;
+ };
+
+ sfp_eth59: sfp-eth59 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp16>;
+ tx-disable-gpios = <&sgpio_out2 26 1 GPIO_ACTIVE_LOW>;
+ los-gpios = <&sgpio_in2 26 1 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 26 2 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 27 0 GPIO_ACTIVE_HIGH>;
+ };
+
+ sfp_eth60: sfp-eth60 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp17>;
+ tx-disable-gpios = <&sgpio_out2 27 1 GPIO_ACTIVE_LOW>;
+ los-gpios = <&sgpio_in2 27 1 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 27 2 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 28 0 GPIO_ACTIVE_HIGH>;
+ };
+
+ sfp_eth61: sfp-eth61 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp18>;
+ tx-disable-gpios = <&sgpio_out2 28 1 GPIO_ACTIVE_LOW>;
+ los-gpios = <&sgpio_in2 28 1 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 28 2 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 29 0 GPIO_ACTIVE_HIGH>;
+ };
+
+ sfp_eth62: sfp-eth62 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp19>;
+ tx-disable-gpios = <&sgpio_out2 29 1 GPIO_ACTIVE_LOW>;
+ los-gpios = <&sgpio_in2 29 1 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 29 2 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 30 0 GPIO_ACTIVE_HIGH>;
+ };
+
+ sfp_eth63: sfp-eth63 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp20>;
+ tx-disable-gpios = <&sgpio_out2 30 1 GPIO_ACTIVE_LOW>;
+ los-gpios = <&sgpio_in2 30 1 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 30 2 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 31 0 GPIO_ACTIVE_HIGH>;
+ };
};
&sgpio0 {
@@ -385,21 +579,6 @@
};
};
-&axi {
- i2c0_imux: i2c-mux-0 {
- compatible = "i2c-mux-pinctrl";
- #address-cells = <1>;
- #size-cells = <0>;
- i2c-parent = <&i2c0>;
- };
- i2c0_emux: i2c-mux-1 {
- compatible = "i2c-mux-gpio";
- #address-cells = <1>;
- #size-cells = <0>;
- i2c-parent = <&i2c0>;
- };
-};
-
&i2c0_imux {
pinctrl-names =
"i2c_sfp1", "i2c_sfp2", "i2c_sfp3", "i2c_sfp4",
@@ -535,169 +714,6 @@
};
};
-&axi {
- sfp_eth12: sfp-eth12 {
- compatible = "sff,sfp";
- i2c-bus = <&i2c_sfp1>;
- tx-disable-gpios = <&sgpio_out2 11 1 GPIO_ACTIVE_LOW>;
- los-gpios = <&sgpio_in2 11 1 GPIO_ACTIVE_HIGH>;
- mod-def0-gpios = <&sgpio_in2 11 2 GPIO_ACTIVE_LOW>;
- tx-fault-gpios = <&sgpio_in2 12 0 GPIO_ACTIVE_HIGH>;
- };
- sfp_eth13: sfp-eth13 {
- compatible = "sff,sfp";
- i2c-bus = <&i2c_sfp2>;
- tx-disable-gpios = <&sgpio_out2 12 1 GPIO_ACTIVE_LOW>;
- los-gpios = <&sgpio_in2 12 1 GPIO_ACTIVE_HIGH>;
- mod-def0-gpios = <&sgpio_in2 12 2 GPIO_ACTIVE_LOW>;
- tx-fault-gpios = <&sgpio_in2 13 0 GPIO_ACTIVE_HIGH>;
- };
- sfp_eth14: sfp-eth14 {
- compatible = "sff,sfp";
- i2c-bus = <&i2c_sfp3>;
- tx-disable-gpios = <&sgpio_out2 13 1 GPIO_ACTIVE_LOW>;
- los-gpios = <&sgpio_in2 13 1 GPIO_ACTIVE_HIGH>;
- mod-def0-gpios = <&sgpio_in2 13 2 GPIO_ACTIVE_LOW>;
- tx-fault-gpios = <&sgpio_in2 14 0 GPIO_ACTIVE_HIGH>;
- };
- sfp_eth15: sfp-eth15 {
- compatible = "sff,sfp";
- i2c-bus = <&i2c_sfp4>;
- tx-disable-gpios = <&sgpio_out2 14 1 GPIO_ACTIVE_LOW>;
- los-gpios = <&sgpio_in2 14 1 GPIO_ACTIVE_HIGH>;
- mod-def0-gpios = <&sgpio_in2 14 2 GPIO_ACTIVE_LOW>;
- tx-fault-gpios = <&sgpio_in2 15 0 GPIO_ACTIVE_HIGH>;
- };
- sfp_eth48: sfp-eth48 {
- compatible = "sff,sfp";
- i2c-bus = <&i2c_sfp5>;
- tx-disable-gpios = <&sgpio_out2 15 1 GPIO_ACTIVE_LOW>;
- los-gpios = <&sgpio_in2 15 1 GPIO_ACTIVE_HIGH>;
- mod-def0-gpios = <&sgpio_in2 15 2 GPIO_ACTIVE_LOW>;
- tx-fault-gpios = <&sgpio_in2 16 0 GPIO_ACTIVE_HIGH>;
- };
- sfp_eth49: sfp-eth49 {
- compatible = "sff,sfp";
- i2c-bus = <&i2c_sfp6>;
- tx-disable-gpios = <&sgpio_out2 16 1 GPIO_ACTIVE_LOW>;
- los-gpios = <&sgpio_in2 16 1 GPIO_ACTIVE_HIGH>;
- mod-def0-gpios = <&sgpio_in2 16 2 GPIO_ACTIVE_LOW>;
- tx-fault-gpios = <&sgpio_in2 17 0 GPIO_ACTIVE_HIGH>;
- };
- sfp_eth50: sfp-eth50 {
- compatible = "sff,sfp";
- i2c-bus = <&i2c_sfp7>;
- tx-disable-gpios = <&sgpio_out2 17 1 GPIO_ACTIVE_LOW>;
- los-gpios = <&sgpio_in2 17 1 GPIO_ACTIVE_HIGH>;
- mod-def0-gpios = <&sgpio_in2 17 2 GPIO_ACTIVE_LOW>;
- tx-fault-gpios = <&sgpio_in2 18 0 GPIO_ACTIVE_HIGH>;
- };
- sfp_eth51: sfp-eth51 {
- compatible = "sff,sfp";
- i2c-bus = <&i2c_sfp8>;
- tx-disable-gpios = <&sgpio_out2 18 1 GPIO_ACTIVE_LOW>;
- los-gpios = <&sgpio_in2 18 1 GPIO_ACTIVE_HIGH>;
- mod-def0-gpios = <&sgpio_in2 18 2 GPIO_ACTIVE_LOW>;
- tx-fault-gpios = <&sgpio_in2 19 0 GPIO_ACTIVE_HIGH>;
- };
- sfp_eth52: sfp-eth52 {
- compatible = "sff,sfp";
- i2c-bus = <&i2c_sfp9>;
- tx-disable-gpios = <&sgpio_out2 19 1 GPIO_ACTIVE_LOW>;
- los-gpios = <&sgpio_in2 19 1 GPIO_ACTIVE_HIGH>;
- mod-def0-gpios = <&sgpio_in2 19 2 GPIO_ACTIVE_LOW>;
- tx-fault-gpios = <&sgpio_in2 20 0 GPIO_ACTIVE_HIGH>;
- };
- sfp_eth53: sfp-eth53 {
- compatible = "sff,sfp";
- i2c-bus = <&i2c_sfp10>;
- tx-disable-gpios = <&sgpio_out2 20 1 GPIO_ACTIVE_LOW>;
- los-gpios = <&sgpio_in2 20 1 GPIO_ACTIVE_HIGH>;
- mod-def0-gpios = <&sgpio_in2 20 2 GPIO_ACTIVE_LOW>;
- tx-fault-gpios = <&sgpio_in2 21 0 GPIO_ACTIVE_HIGH>;
- };
- sfp_eth54: sfp-eth54 {
- compatible = "sff,sfp";
- i2c-bus = <&i2c_sfp11>;
- tx-disable-gpios = <&sgpio_out2 21 1 GPIO_ACTIVE_LOW>;
- los-gpios = <&sgpio_in2 21 1 GPIO_ACTIVE_HIGH>;
- mod-def0-gpios = <&sgpio_in2 21 2 GPIO_ACTIVE_LOW>;
- tx-fault-gpios = <&sgpio_in2 22 0 GPIO_ACTIVE_HIGH>;
- };
- sfp_eth55: sfp-eth55 {
- compatible = "sff,sfp";
- i2c-bus = <&i2c_sfp12>;
- tx-disable-gpios = <&sgpio_out2 22 1 GPIO_ACTIVE_LOW>;
- los-gpios = <&sgpio_in2 22 1 GPIO_ACTIVE_HIGH>;
- mod-def0-gpios = <&sgpio_in2 22 2 GPIO_ACTIVE_LOW>;
- tx-fault-gpios = <&sgpio_in2 23 0 GPIO_ACTIVE_HIGH>;
- };
- sfp_eth56: sfp-eth56 {
- compatible = "sff,sfp";
- i2c-bus = <&i2c_sfp13>;
- tx-disable-gpios = <&sgpio_out2 23 1 GPIO_ACTIVE_LOW>;
- los-gpios = <&sgpio_in2 23 1 GPIO_ACTIVE_HIGH>;
- mod-def0-gpios = <&sgpio_in2 23 2 GPIO_ACTIVE_LOW>;
- tx-fault-gpios = <&sgpio_in2 24 0 GPIO_ACTIVE_HIGH>;
- };
- sfp_eth57: sfp-eth57 {
- compatible = "sff,sfp";
- i2c-bus = <&i2c_sfp14>;
- tx-disable-gpios = <&sgpio_out2 24 1 GPIO_ACTIVE_LOW>;
- los-gpios = <&sgpio_in2 24 1 GPIO_ACTIVE_HIGH>;
- mod-def0-gpios = <&sgpio_in2 24 2 GPIO_ACTIVE_LOW>;
- tx-fault-gpios = <&sgpio_in2 25 0 GPIO_ACTIVE_HIGH>;
- };
- sfp_eth58: sfp-eth58 {
- compatible = "sff,sfp";
- i2c-bus = <&i2c_sfp15>;
- tx-disable-gpios = <&sgpio_out2 25 1 GPIO_ACTIVE_LOW>;
- los-gpios = <&sgpio_in2 25 1 GPIO_ACTIVE_HIGH>;
- mod-def0-gpios = <&sgpio_in2 25 2 GPIO_ACTIVE_LOW>;
- tx-fault-gpios = <&sgpio_in2 26 0 GPIO_ACTIVE_HIGH>;
- };
- sfp_eth59: sfp-eth59 {
- compatible = "sff,sfp";
- i2c-bus = <&i2c_sfp16>;
- tx-disable-gpios = <&sgpio_out2 26 1 GPIO_ACTIVE_LOW>;
- los-gpios = <&sgpio_in2 26 1 GPIO_ACTIVE_HIGH>;
- mod-def0-gpios = <&sgpio_in2 26 2 GPIO_ACTIVE_LOW>;
- tx-fault-gpios = <&sgpio_in2 27 0 GPIO_ACTIVE_HIGH>;
- };
- sfp_eth60: sfp-eth60 {
- compatible = "sff,sfp";
- i2c-bus = <&i2c_sfp17>;
- tx-disable-gpios = <&sgpio_out2 27 1 GPIO_ACTIVE_LOW>;
- los-gpios = <&sgpio_in2 27 1 GPIO_ACTIVE_HIGH>;
- mod-def0-gpios = <&sgpio_in2 27 2 GPIO_ACTIVE_LOW>;
- tx-fault-gpios = <&sgpio_in2 28 0 GPIO_ACTIVE_HIGH>;
- };
- sfp_eth61: sfp-eth61 {
- compatible = "sff,sfp";
- i2c-bus = <&i2c_sfp18>;
- tx-disable-gpios = <&sgpio_out2 28 1 GPIO_ACTIVE_LOW>;
- los-gpios = <&sgpio_in2 28 1 GPIO_ACTIVE_HIGH>;
- mod-def0-gpios = <&sgpio_in2 28 2 GPIO_ACTIVE_LOW>;
- tx-fault-gpios = <&sgpio_in2 29 0 GPIO_ACTIVE_HIGH>;
- };
- sfp_eth62: sfp-eth62 {
- compatible = "sff,sfp";
- i2c-bus = <&i2c_sfp19>;
- tx-disable-gpios = <&sgpio_out2 29 1 GPIO_ACTIVE_LOW>;
- los-gpios = <&sgpio_in2 29 1 GPIO_ACTIVE_HIGH>;
- mod-def0-gpios = <&sgpio_in2 29 2 GPIO_ACTIVE_LOW>;
- tx-fault-gpios = <&sgpio_in2 30 0 GPIO_ACTIVE_HIGH>;
- };
- sfp_eth63: sfp-eth63 {
- compatible = "sff,sfp";
- i2c-bus = <&i2c_sfp20>;
- tx-disable-gpios = <&sgpio_out2 30 1 GPIO_ACTIVE_LOW>;
- los-gpios = <&sgpio_in2 30 1 GPIO_ACTIVE_HIGH>;
- mod-def0-gpios = <&sgpio_in2 30 2 GPIO_ACTIVE_LOW>;
- tx-fault-gpios = <&sgpio_in2 31 0 GPIO_ACTIVE_HIGH>;
- };
-};
-
&switch {
ethernet-ports {
#address-cells = <1>;
diff --git a/arch/arm64/boot/dts/microchip/sparx5_pcb135_board.dtsi b/arch/arm64/boot/dts/microchip/sparx5_pcb135_board.dtsi
index af2f1831f07f..196868898f49 100644
--- a/arch/arm64/boot/dts/microchip/sparx5_pcb135_board.dtsi
+++ b/arch/arm64/boot/dts/microchip/sparx5_pcb135_board.dtsi
@@ -13,6 +13,13 @@
priority = <200>;
};
+ i2c0_imux: i2c-mux {
+ compatible = "i2c-mux-pinctrl";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ i2c-parent = <&i2c0>;
+ };
+
leds {
compatible = "gpio-leds";
led-0 {
@@ -56,6 +63,46 @@
default-state = "off";
};
};
+
+ sfp_eth60: sfp-eth60 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp1>;
+ tx-disable-gpios = <&sgpio_out2 28 0 GPIO_ACTIVE_LOW>;
+ rate-select0-gpios = <&sgpio_out2 28 1 GPIO_ACTIVE_HIGH>;
+ los-gpios = <&sgpio_in2 28 0 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 28 1 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 28 2 GPIO_ACTIVE_HIGH>;
+ };
+
+ sfp_eth61: sfp-eth61 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp2>;
+ tx-disable-gpios = <&sgpio_out2 29 0 GPIO_ACTIVE_LOW>;
+ rate-select0-gpios = <&sgpio_out2 29 1 GPIO_ACTIVE_HIGH>;
+ los-gpios = <&sgpio_in2 29 0 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 29 1 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 29 2 GPIO_ACTIVE_HIGH>;
+ };
+
+ sfp_eth62: sfp-eth62 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp3>;
+ tx-disable-gpios = <&sgpio_out2 30 0 GPIO_ACTIVE_LOW>;
+ rate-select0-gpios = <&sgpio_out2 30 1 GPIO_ACTIVE_HIGH>;
+ los-gpios = <&sgpio_in2 30 0 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 30 1 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 30 2 GPIO_ACTIVE_HIGH>;
+ };
+
+ sfp_eth63: sfp-eth63 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp4>;
+ tx-disable-gpios = <&sgpio_out2 31 0 GPIO_ACTIVE_LOW>;
+ rate-select0-gpios = <&sgpio_out2 31 1 GPIO_ACTIVE_HIGH>;
+ los-gpios = <&sgpio_in2 31 0 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 31 1 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 31 2 GPIO_ACTIVE_HIGH>;
+ };
};
&gpio {
@@ -119,15 +166,6 @@
microchip,sgpio-port-ranges = <0 0>, <16 18>, <28 31>;
};
-&axi {
- i2c0_imux: i2c-mux {
- compatible = "i2c-mux-pinctrl";
- #address-cells = <1>;
- #size-cells = <0>;
- i2c-parent = <&i2c0>;
- };
-};
-
&i2c0_imux {
pinctrl-names =
"i2c_sfp1", "i2c_sfp2", "i2c_sfp3", "i2c_sfp4",
@@ -159,45 +197,6 @@
};
};
-&axi {
- sfp_eth60: sfp-eth60 {
- compatible = "sff,sfp";
- i2c-bus = <&i2c_sfp1>;
- tx-disable-gpios = <&sgpio_out2 28 0 GPIO_ACTIVE_LOW>;
- rate-select0-gpios = <&sgpio_out2 28 1 GPIO_ACTIVE_HIGH>;
- los-gpios = <&sgpio_in2 28 0 GPIO_ACTIVE_HIGH>;
- mod-def0-gpios = <&sgpio_in2 28 1 GPIO_ACTIVE_LOW>;
- tx-fault-gpios = <&sgpio_in2 28 2 GPIO_ACTIVE_HIGH>;
- };
- sfp_eth61: sfp-eth61 {
- compatible = "sff,sfp";
- i2c-bus = <&i2c_sfp2>;
- tx-disable-gpios = <&sgpio_out2 29 0 GPIO_ACTIVE_LOW>;
- rate-select0-gpios = <&sgpio_out2 29 1 GPIO_ACTIVE_HIGH>;
- los-gpios = <&sgpio_in2 29 0 GPIO_ACTIVE_HIGH>;
- mod-def0-gpios = <&sgpio_in2 29 1 GPIO_ACTIVE_LOW>;
- tx-fault-gpios = <&sgpio_in2 29 2 GPIO_ACTIVE_HIGH>;
- };
- sfp_eth62: sfp-eth62 {
- compatible = "sff,sfp";
- i2c-bus = <&i2c_sfp3>;
- tx-disable-gpios = <&sgpio_out2 30 0 GPIO_ACTIVE_LOW>;
- rate-select0-gpios = <&sgpio_out2 30 1 GPIO_ACTIVE_HIGH>;
- los-gpios = <&sgpio_in2 30 0 GPIO_ACTIVE_HIGH>;
- mod-def0-gpios = <&sgpio_in2 30 1 GPIO_ACTIVE_LOW>;
- tx-fault-gpios = <&sgpio_in2 30 2 GPIO_ACTIVE_HIGH>;
- };
- sfp_eth63: sfp-eth63 {
- compatible = "sff,sfp";
- i2c-bus = <&i2c_sfp4>;
- tx-disable-gpios = <&sgpio_out2 31 0 GPIO_ACTIVE_LOW>;
- rate-select0-gpios = <&sgpio_out2 31 1 GPIO_ACTIVE_HIGH>;
- los-gpios = <&sgpio_in2 31 0 GPIO_ACTIVE_HIGH>;
- mod-def0-gpios = <&sgpio_in2 31 1 GPIO_ACTIVE_LOW>;
- tx-fault-gpios = <&sgpio_in2 31 2 GPIO_ACTIVE_HIGH>;
- };
-};
-
&mdio0 {
status = "okay";
phy0: ethernet-phy@0 {
diff --git a/arch/arm64/boot/dts/nvidia/tegra234-p3768-0000+p3767-0000.dts b/arch/arm64/boot/dts/nvidia/tegra234-p3768-0000+p3767-0000.dts
index 1607ee14216f..82a59e33c46c 100644
--- a/arch/arm64/boot/dts/nvidia/tegra234-p3768-0000+p3767-0000.dts
+++ b/arch/arm64/boot/dts/nvidia/tegra234-p3768-0000+p3767-0000.dts
@@ -1,11 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/dts-v1/;
-#include <dt-bindings/input/linux-event-codes.h>
-#include <dt-bindings/input/gpio-keys.h>
-
-#include "tegra234-p3767.dtsi"
-#include "tegra234-p3768-0000.dtsi"
+#include "tegra234-p3768-0000+p3767.dtsi"
/ {
compatible = "nvidia,p3768-0000+p3767-0000", "nvidia,p3767-0000", "nvidia,tegra234";
@@ -29,83 +25,12 @@
status = "okay";
};
- pwm@32a0000 {
- assigned-clocks = <&bpmp TEGRA234_CLK_PWM3>;
- assigned-clock-parents = <&bpmp TEGRA234_CLK_PLLP_OUT0>;
- status = "okay";
- };
-
hda@3510000 {
nvidia,model = "NVIDIA Jetson Orin NX HDA";
};
-
- padctl@3520000 {
- status = "okay";
- };
- };
-
- gpio-keys {
- compatible = "gpio-keys";
-
- key-force-recovery {
- label = "Force Recovery";
- gpios = <&gpio TEGRA234_MAIN_GPIO(G, 0) GPIO_ACTIVE_LOW>;
- linux,input-type = <EV_KEY>;
- linux,code = <BTN_1>;
- };
-
- key-power {
- label = "Power";
- gpios = <&gpio_aon TEGRA234_AON_GPIO(EE, 4) GPIO_ACTIVE_LOW>;
- linux,input-type = <EV_KEY>;
- linux,code = <KEY_POWER>;
- wakeup-event-action = <EV_ACT_ASSERTED>;
- wakeup-source;
- };
-
- key-suspend {
- label = "Suspend";
- gpios = <&gpio TEGRA234_MAIN_GPIO(G, 2) GPIO_ACTIVE_LOW>;
- linux,input-type = <EV_KEY>;
- linux,code = <KEY_SLEEP>;
- };
- };
-
- pwm-fan {
- cooling-levels = <0 88 187 255>;
- };
-
- vdd_3v3_pcie: regulator-vdd-3v3-pcie {
- compatible = "regulator-fixed";
- regulator-name = "VDD_3V3_PCIE";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- gpio = <&gpio_aon TEGRA234_AON_GPIO(AA, 5) GPIO_ACTIVE_HIGH>;
- enable-active-high;
};
sound {
label = "NVIDIA Jetson Orin NX APE";
};
-
- thermal-zones {
- tj-thermal {
- cooling-maps {
- map-active-0 {
- cooling-device = <&fan 0 1>;
- trip = <&tj_trip_active0>;
- };
-
- map-active-1 {
- cooling-device = <&fan 1 2>;
- trip = <&tj_trip_active1>;
- };
-
- map-active-2 {
- cooling-device = <&fan 2 3>;
- trip = <&tj_trip_active2>;
- };
- };
- };
- };
};
diff --git a/arch/arm64/boot/dts/nvidia/tegra234-p3768-0000+p3767-0005.dts b/arch/arm64/boot/dts/nvidia/tegra234-p3768-0000+p3767-0005.dts
index dc2d4bef1e83..9f5e07012b87 100644
--- a/arch/arm64/boot/dts/nvidia/tegra234-p3768-0000+p3767-0005.dts
+++ b/arch/arm64/boot/dts/nvidia/tegra234-p3768-0000+p3767-0005.dts
@@ -1,11 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/dts-v1/;
-#include <dt-bindings/input/linux-event-codes.h>
-#include <dt-bindings/input/gpio-keys.h>
-
-#include "tegra234-p3767.dtsi"
-#include "tegra234-p3768-0000.dtsi"
+#include "tegra234-p3768-0000+p3767.dtsi"
/ {
compatible = "nvidia,p3768-0000+p3767-0005", "nvidia,p3767-0005", "nvidia,tegra234";
@@ -17,32 +13,7 @@
};
};
- pwm-fan {
- cooling-levels = <0 88 187 255>;
- };
-
sound {
label = "NVIDIA Jetson Orin Nano APE";
};
-
- thermal-zones {
- tj-thermal {
- cooling-maps {
- map-active-0 {
- cooling-device = <&fan 0 1>;
- trip = <&tj_trip_active0>;
- };
-
- map-active-1 {
- cooling-device = <&fan 1 2>;
- trip = <&tj_trip_active1>;
- };
-
- map-active-2 {
- cooling-device = <&fan 2 3>;
- trip = <&tj_trip_active2>;
- };
- };
- };
- };
};
diff --git a/arch/arm64/boot/dts/nvidia/tegra234-p3768-0000.dtsi b/arch/arm64/boot/dts/nvidia/tegra234-p3768-0000+p3767.dtsi
index 5d0298b6c30d..6d64a24fa251 100644
--- a/arch/arm64/boot/dts/nvidia/tegra234-p3768-0000.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra234-p3768-0000+p3767.dtsi
@@ -1,7 +1,11 @@
// SPDX-License-Identifier: GPL-2.0
+#include <dt-bindings/input/linux-event-codes.h>
+#include <dt-bindings/input/gpio-keys.h>
+
+#include "tegra234-p3767.dtsi"
+
/ {
- compatible = "nvidia,p3768-0000";
aliases {
serial0 = &tcu;
@@ -210,6 +214,7 @@
compatible = "pwm-fan";
pwms = <&pwm3 0 45334>;
#cooling-cells = <2>;
+ cooling-levels = <0 88 187 255>;
};
vdd_1v8_sys: regulator-vdd-1v8-sys {
@@ -241,4 +246,25 @@
serial {
status = "okay";
};
+
+ thermal-zones {
+ tj-thermal {
+ cooling-maps {
+ map-active-0 {
+ cooling-device = <&fan 0 1>;
+ trip = <&tj_trip_active0>;
+ };
+
+ map-active-1 {
+ cooling-device = <&fan 1 2>;
+ trip = <&tj_trip_active1>;
+ };
+
+ map-active-2 {
+ cooling-device = <&fan 2 3>;
+ trip = <&tj_trip_active2>;
+ };
+ };
+ };
+ };
};
diff --git a/arch/arm64/boot/dts/qcom/Makefile b/arch/arm64/boot/dts/qcom/Makefile
index f63abb43e9fe..0e5c810304fb 100644
--- a/arch/arm64/boot/dts/qcom/Makefile
+++ b/arch/arm64/boot/dts/qcom/Makefile
@@ -5,11 +5,13 @@ apq8016-sbc-usb-host-dtbs := apq8016-sbc.dtb apq8016-sbc-usb-host.dtbo
dtb-$(CONFIG_ARCH_QCOM) += apq8016-sbc-usb-host.dtb
dtb-$(CONFIG_ARCH_QCOM) += apq8016-sbc-d3-camera-mezzanine.dtb
+dtb-$(CONFIG_ARCH_QCOM) += apq8016-schneider-hmibsc.dtb
dtb-$(CONFIG_ARCH_QCOM) += apq8039-t2.dtb
dtb-$(CONFIG_ARCH_QCOM) += apq8094-sony-xperia-kitakami-karin_windy.dtb
dtb-$(CONFIG_ARCH_QCOM) += apq8096-db820c.dtb
dtb-$(CONFIG_ARCH_QCOM) += apq8096-ifc6640.dtb
dtb-$(CONFIG_ARCH_QCOM) += ipq5018-rdp432-c2.dtb
+dtb-$(CONFIG_ARCH_QCOM) += ipq5018-tplink-archer-ax55-v1.dtb
dtb-$(CONFIG_ARCH_QCOM) += ipq5332-rdp441.dtb
dtb-$(CONFIG_ARCH_QCOM) += ipq5332-rdp442.dtb
dtb-$(CONFIG_ARCH_QCOM) += ipq5332-rdp468.dtb
@@ -29,8 +31,13 @@ dtb-$(CONFIG_ARCH_QCOM) += msm8916-alcatel-idol347.dtb
dtb-$(CONFIG_ARCH_QCOM) += msm8916-asus-z00l.dtb
dtb-$(CONFIG_ARCH_QCOM) += msm8916-gplus-fl8005a.dtb
dtb-$(CONFIG_ARCH_QCOM) += msm8916-huawei-g7.dtb
+dtb-$(CONFIG_ARCH_QCOM) += msm8916-lg-c50.dtb
+dtb-$(CONFIG_ARCH_QCOM) += msm8916-lg-m216.dtb
dtb-$(CONFIG_ARCH_QCOM) += msm8916-longcheer-l8150.dtb
dtb-$(CONFIG_ARCH_QCOM) += msm8916-longcheer-l8910.dtb
+dtb-$(CONFIG_ARCH_QCOM) += msm8916-motorola-harpia.dtb
+dtb-$(CONFIG_ARCH_QCOM) += msm8916-motorola-osprey.dtb
+dtb-$(CONFIG_ARCH_QCOM) += msm8916-motorola-surnia.dtb
dtb-$(CONFIG_ARCH_QCOM) += msm8916-mtp.dtb
dtb-$(CONFIG_ARCH_QCOM) += msm8916-samsung-a3u-eur.dtb
dtb-$(CONFIG_ARCH_QCOM) += msm8916-samsung-a5u-eur.dtb
@@ -93,9 +100,11 @@ dtb-$(CONFIG_ARCH_QCOM) += msm8998-sony-xperia-yoshino-poplar.dtb
dtb-$(CONFIG_ARCH_QCOM) += msm8998-xiaomi-sagit.dtb
dtb-$(CONFIG_ARCH_QCOM) += qcm6490-fairphone-fp5.dtb
dtb-$(CONFIG_ARCH_QCOM) += qcm6490-idp.dtb
+dtb-$(CONFIG_ARCH_QCOM) += qcm6490-shift-otter.dtb
dtb-$(CONFIG_ARCH_QCOM) += qcs404-evb-1000.dtb
dtb-$(CONFIG_ARCH_QCOM) += qcs404-evb-4000.dtb
dtb-$(CONFIG_ARCH_QCOM) += qcs6490-rb3gen2.dtb
+dtb-$(CONFIG_ARCH_QCOM) += qcs8550-aim300-aiot.dtb
dtb-$(CONFIG_ARCH_QCOM) += qdu1000-idp.dtb
dtb-$(CONFIG_ARCH_QCOM) += qrb2210-rb1.dtb
dtb-$(CONFIG_ARCH_QCOM) += qrb4210-rb2.dtb
@@ -106,6 +115,7 @@ dtb-$(CONFIG_ARCH_QCOM) += sa8155p-adp.dtb
dtb-$(CONFIG_ARCH_QCOM) += sa8295p-adp.dtb
dtb-$(CONFIG_ARCH_QCOM) += sa8540p-ride.dtb
dtb-$(CONFIG_ARCH_QCOM) += sa8775p-ride.dtb
+dtb-$(CONFIG_ARCH_QCOM) += sa8775p-ride-r3.dtb
dtb-$(CONFIG_ARCH_QCOM) += sc7180-acer-aspire1.dtb
dtb-$(CONFIG_ARCH_QCOM) += sc7180-idp.dtb
dtb-$(CONFIG_ARCH_QCOM) += sc7180-trogdor-coachz-r1.dtb
@@ -175,6 +185,7 @@ dtb-$(CONFIG_ARCH_QCOM) += sc8180x-primus.dtb
dtb-$(CONFIG_ARCH_QCOM) += sc8280xp-crd.dtb
dtb-$(CONFIG_ARCH_QCOM) += sc8280xp-lenovo-thinkpad-x13s.dtb
dtb-$(CONFIG_ARCH_QCOM) += sda660-inforce-ifc6560.dtb
+dtb-$(CONFIG_ARCH_QCOM) += sdm450-lenovo-tbx605f.dtb
dtb-$(CONFIG_ARCH_QCOM) += sdm450-motorola-ali.dtb
dtb-$(CONFIG_ARCH_QCOM) += sdm630-sony-xperia-ganges-kirin.dtb
dtb-$(CONFIG_ARCH_QCOM) += sdm630-sony-xperia-nile-discovery.dtb
@@ -241,8 +252,16 @@ dtb-$(CONFIG_ARCH_QCOM) += sm8450-sony-xperia-nagara-pdx224.dtb
dtb-$(CONFIG_ARCH_QCOM) += sm8550-hdk.dtb
dtb-$(CONFIG_ARCH_QCOM) += sm8550-mtp.dtb
dtb-$(CONFIG_ARCH_QCOM) += sm8550-qrd.dtb
+dtb-$(CONFIG_ARCH_QCOM) += sm8550-samsung-q5q.dtb
dtb-$(CONFIG_ARCH_QCOM) += sm8550-sony-xperia-yodo-pdx234.dtb
+
+sm8650-hdk-display-card-dtbs := sm8650-hdk.dtb sm8650-hdk-display-card.dtbo
+
+dtb-$(CONFIG_ARCH_QCOM) += sm8650-hdk-display-card.dtb
+dtb-$(CONFIG_ARCH_QCOM) += sm8650-hdk.dtb
dtb-$(CONFIG_ARCH_QCOM) += sm8650-mtp.dtb
dtb-$(CONFIG_ARCH_QCOM) += sm8650-qrd.dtb
+dtb-$(CONFIG_ARCH_QCOM) += x1e80100-asus-vivobook-s15.dtb
dtb-$(CONFIG_ARCH_QCOM) += x1e80100-crd.dtb
+dtb-$(CONFIG_ARCH_QCOM) += x1e80100-lenovo-yoga-slim7x.dtb
dtb-$(CONFIG_ARCH_QCOM) += x1e80100-qcp.dtb
diff --git a/arch/arm64/boot/dts/qcom/apq8016-schneider-hmibsc.dts b/arch/arm64/boot/dts/qcom/apq8016-schneider-hmibsc.dts
new file mode 100644
index 000000000000..75c6137e5a11
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/apq8016-schneider-hmibsc.dts
@@ -0,0 +1,491 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2024, Linaro Ltd.
+ */
+
+/dts-v1/;
+
+#include "msm8916-pm8916.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/leds/common.h>
+#include <dt-bindings/pinctrl/qcom,pmic-gpio.h>
+#include <dt-bindings/pinctrl/qcom,pmic-mpp.h>
+#include <dt-bindings/sound/apq8016-lpass.h>
+
+/ {
+ model = "Schneider Electric HMIBSC Board";
+ compatible = "schneider,apq8016-hmibsc", "qcom,apq8016";
+
+ aliases {
+ i2c1 = &blsp_i2c6;
+ i2c3 = &blsp_i2c4;
+ i2c4 = &blsp_i2c3;
+ mmc0 = &sdhc_1; /* eMMC */
+ mmc1 = &sdhc_2; /* SD card */
+ serial0 = &blsp_uart1;
+ serial1 = &blsp_uart2;
+ spi0 = &blsp_spi5;
+ usid0 = &pm8916_0;
+ };
+
+ chosen {
+ stdout-path = "serial0";
+ };
+
+ hdmi-out {
+ compatible = "hdmi-connector";
+ type = "a";
+
+ port {
+ hdmi_con: endpoint {
+ remote-endpoint = <&adv7533_out>;
+ };
+ };
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+ autorepeat;
+ pinctrl-0 = <&msm_key_volp_n_default>;
+ pinctrl-names = "default";
+
+ button {
+ label = "Volume Up";
+ linux,code = <KEY_VOLUMEUP>;
+ gpios = <&tlmm 107 GPIO_ACTIVE_LOW>;
+ };
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ pinctrl-0 = <&pm8916_mpps_leds>;
+ pinctrl-names = "default";
+
+ led-1 {
+ function = LED_FUNCTION_WLAN;
+ color = <LED_COLOR_ID_YELLOW>;
+ gpios = <&pm8916_mpps 2 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "phy0tx";
+ default-state = "off";
+ };
+
+ led-2 {
+ function = LED_FUNCTION_BLUETOOTH;
+ color = <LED_COLOR_ID_BLUE>;
+ gpios = <&pm8916_mpps 3 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "bluetooth-power";
+ default-state = "off";
+ };
+ };
+
+ memory@80000000 {
+ reg = <0 0x80000000 0 0x40000000>;
+ };
+
+ reserved-memory {
+ ramoops@bff00000 {
+ compatible = "ramoops";
+ reg = <0x0 0xbff00000 0x0 0x100000>;
+ record-size = <0x20000>;
+ console-size = <0x20000>;
+ ftrace-size = <0x20000>;
+ ecc-size = <16>;
+ };
+ };
+
+ usb-hub {
+ compatible = "smsc,usb3503";
+ reset-gpios = <&pm8916_gpios 1 GPIO_ACTIVE_LOW>;
+ initial-mode = <1>;
+ };
+
+ usb_id: usb-id {
+ compatible = "linux,extcon-usb-gpio";
+ id-gpios = <&tlmm 110 GPIO_ACTIVE_HIGH>;
+ pinctrl-0 = <&usb_id_default>;
+ pinctrl-names = "default";
+ };
+};
+
+&blsp_i2c3 {
+ status = "okay";
+
+ eeprom@50 {
+ compatible = "atmel,24c32";
+ reg = <0x50>;
+ };
+};
+
+&blsp_i2c4 {
+ status = "okay";
+
+ adv_bridge: bridge@39 {
+ compatible = "adi,adv7533";
+ reg = <0x39>;
+ interrupts-extended = <&tlmm 31 IRQ_TYPE_EDGE_FALLING>;
+
+ adi,dsi-lanes = <4>;
+ clocks = <&rpmcc RPM_SMD_BB_CLK2>;
+ clock-names = "cec";
+ pd-gpios = <&tlmm 32 GPIO_ACTIVE_HIGH>;
+
+ avdd-supply = <&pm8916_l6>;
+ a2vdd-supply = <&pm8916_l6>;
+ dvdd-supply = <&pm8916_l6>;
+ pvdd-supply = <&pm8916_l6>;
+ v1p2-supply = <&pm8916_l6>;
+ v3p3-supply = <&pm8916_l17>;
+
+ pinctrl-0 = <&adv7533_int_active &adv7533_switch_active>;
+ pinctrl-1 = <&adv7533_int_suspend &adv7533_switch_suspend>;
+ pinctrl-names = "default","sleep";
+ #sound-dai-cells = <0>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ adv7533_in: endpoint {
+ remote-endpoint = <&mdss_dsi0_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ adv7533_out: endpoint {
+ remote-endpoint = <&hdmi_con>;
+ };
+ };
+ };
+ };
+};
+
+&blsp_i2c6 {
+ status = "okay";
+
+ rtc@30 {
+ compatible = "sii,s35390a";
+ reg = <0x30>;
+ };
+
+ eeprom@50 {
+ compatible = "atmel,24c256";
+ reg = <0x50>;
+ };
+};
+
+&blsp_spi5 {
+ cs-gpios = <&tlmm 18 GPIO_ACTIVE_LOW>;
+ status = "okay";
+
+ tpm@0 {
+ compatible = "infineon,slb9670", "tcg,tpm_tis-spi";
+ reg = <0>;
+ spi-max-frequency = <500000>;
+ };
+};
+
+&blsp_uart1 {
+ label = "UART0";
+ status = "okay";
+};
+
+&blsp_uart2 {
+ label = "UART1";
+ status = "okay";
+};
+
+&lpass {
+ status = "okay";
+};
+
+&mdss {
+ status = "okay";
+};
+
+&mdss_dsi0_out {
+ data-lanes = <0 1 2 3>;
+ remote-endpoint = <&adv7533_in>;
+};
+
+&pm8916_codec {
+ qcom,mbhc-vthreshold-low = <75 150 237 450 500>;
+ qcom,mbhc-vthreshold-high = <75 150 237 450 500>;
+ status = "okay";
+};
+
+&pm8916_gpios {
+ gpio-line-names =
+ "USB_HUB_RESET_N_PM",
+ "USB_SW_SEL_PM",
+ "NC",
+ "NC";
+
+ usb_hub_reset_pm: usb-hub-reset-pm-state {
+ pins = "gpio1";
+ function = PMIC_GPIO_FUNC_NORMAL;
+ input-disable;
+ output-high;
+ };
+
+ usb_hub_reset_pm_device: usb-hub-reset-pm-device-state {
+ pins = "gpio1";
+ function = PMIC_GPIO_FUNC_NORMAL;
+ input-disable;
+ output-low;
+ };
+
+ usb_sw_sel_pm: usb-sw-sel-pm-state {
+ pins = "gpio2";
+ function = PMIC_GPIO_FUNC_NORMAL;
+ power-source = <PM8916_GPIO_VPH>;
+ input-disable;
+ output-high;
+ };
+
+ usb_sw_sel_pm_device: usb-sw-sel-pm-device-state {
+ pins = "gpio2";
+ function = PMIC_GPIO_FUNC_NORMAL;
+ power-source = <PM8916_GPIO_VPH>;
+ input-disable;
+ output-low;
+ };
+};
+
+&pm8916_mpps {
+ gpio-line-names =
+ "NC",
+ "WLAN_LED_CTRL",
+ "BT_LED_CTRL",
+ "NC";
+
+ pm8916_mpps_leds: pm8916-mpps-state {
+ pins = "mpp2", "mpp3";
+ function = "digital";
+ output-low;
+ };
+};
+
+&pm8916_resin {
+ linux,code = <KEY_POWER>;
+ status = "okay";
+};
+
+&pm8916_rpm_regulators {
+ pm8916_l17: l17 {
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+};
+
+&sdhc_1 {
+ status = "okay";
+};
+
+&sdhc_2 {
+ pinctrl-0 = <&sdc2_default &sdc2_cd_default>;
+ pinctrl-1 = <&sdc2_sleep &sdc2_cd_default>;
+ pinctrl-names = "default", "sleep";
+ cd-gpios = <&tlmm 38 GPIO_ACTIVE_LOW>;
+ status = "okay";
+};
+
+&sound {
+ pinctrl-0 = <&cdc_pdm_default &sec_mi2s_default>;
+ pinctrl-1 = <&cdc_pdm_sleep &sec_mi2s_sleep>;
+ pinctrl-names = "default", "sleep";
+ model = "HMIBSC";
+ audio-routing =
+ "AMIC2", "MIC BIAS Internal2",
+ "AMIC3", "MIC BIAS External1";
+ status = "okay";
+
+ quaternary-dai-link {
+ link-name = "ADV7533";
+ cpu {
+ sound-dai = <&lpass MI2S_QUATERNARY>;
+ };
+ codec {
+ sound-dai = <&adv_bridge 0>;
+ };
+ };
+
+ primary-dai-link {
+ link-name = "WCD";
+ cpu {
+ sound-dai = <&lpass MI2S_PRIMARY>;
+ };
+ codec {
+ sound-dai = <&lpass_codec 0>, <&pm8916_codec 0>;
+ };
+ };
+
+ tertiary-dai-link {
+ link-name = "WCD-Capture";
+ cpu {
+ sound-dai = <&lpass MI2S_TERTIARY>;
+ };
+ codec {
+ sound-dai = <&lpass_codec 1>, <&pm8916_codec 1>;
+ };
+ };
+};
+
+&tlmm {
+ pinctrl-0 = <&uart1_mux0_rs232_high &uart1_mux1_rs232_low>;
+ pinctrl-names = "default";
+
+ adv7533_int_active: adv533-int-active-state {
+ pins = "gpio31";
+ function = "gpio";
+ drive-strength = <16>;
+ bias-disable;
+ };
+
+ adv7533_int_suspend: adv7533-int-suspend-state {
+ pins = "gpio31";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ adv7533_switch_active: adv7533-switch-active-state {
+ pins = "gpio32";
+ function = "gpio";
+ drive-strength = <16>;
+ bias-disable;
+ };
+
+ adv7533_switch_suspend: adv7533-switch-suspend-state {
+ pins = "gpio32";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ msm_key_volp_n_default: msm-key-volp-n-default-state {
+ pins = "gpio107";
+ function = "gpio";
+ drive-strength = <8>;
+ bias-pull-up;
+ };
+
+ sdc2_cd_default: sdc2-cd-default-state {
+ pins = "gpio38";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ /*
+ * UART1 being the debug console supports various modes of
+ * operation (RS-232/485/422) controlled via GPIOs configured
+ * mux as follows:
+ *
+ * gpio100 gpio99 UART mode
+ * 0 0 loopback
+ * 0 1 RS-232
+ * 1 0 RS-485
+ * 1 1 RS-422
+ *
+ * The default mode configured here is RS-232 mode.
+ */
+ uart1_mux0_rs232_high: uart1-mux0-rs232-state {
+ bootph-all;
+ pins = "gpio99";
+ function = "gpio";
+ drive-strength = <16>;
+ bias-disable;
+ output-high;
+ };
+
+ uart1_mux1_rs232_low: uart1-mux1-rs232-state {
+ bootph-all;
+ pins = "gpio100";
+ function = "gpio";
+ drive-strength = <16>;
+ bias-disable;
+ output-low;
+ };
+
+ usb_id_default: usb-id-default-state {
+ pins = "gpio110";
+ function = "gpio";
+ drive-strength = <8>;
+ bias-pull-up;
+ };
+};
+
+&usb {
+ extcon = <&usb_id>, <&usb_id>;
+ pinctrl-0 = <&usb_sw_sel_pm &usb_hub_reset_pm>;
+ pinctrl-1 = <&usb_sw_sel_pm_device &usb_hub_reset_pm_device>;
+ pinctrl-names = "default", "device";
+ status = "okay";
+};
+
+&usb_hs_phy {
+ extcon = <&usb_id>;
+};
+
+&wcnss {
+ firmware-name = "qcom/apq8016/wcnss.mbn";
+ status = "okay";
+};
+
+&wcnss_ctrl {
+ firmware-name = "qcom/apq8016/WCNSS_qcom_wlan_nv_sbc.bin";
+};
+
+&wcnss_iris {
+ compatible = "qcom,wcn3620";
+};
+
+&wcnss_mem {
+ status = "okay";
+};
+
+/* PINCTRL - additions to nodes defined in msm8916.dtsi */
+
+/*
+ * 2mA drive strength is not enough when connecting multiple
+ * I2C devices with different pull up resistors.
+ */
+&blsp_i2c4_default {
+ drive-strength = <16>;
+};
+
+&blsp_i2c6_default {
+ drive-strength = <16>;
+};
+
+&blsp_uart1_default {
+ bootph-all;
+};
+
+/* Enable CoreSight */
+&cti0 { status = "okay"; };
+&cti1 { status = "okay"; };
+&cti12 { status = "okay"; };
+&cti13 { status = "okay"; };
+&cti14 { status = "okay"; };
+&cti15 { status = "okay"; };
+&debug0 { status = "okay"; };
+&debug1 { status = "okay"; };
+&debug2 { status = "okay"; };
+&debug3 { status = "okay"; };
+&etf { status = "okay"; };
+&etm0 { status = "okay"; };
+&etm1 { status = "okay"; };
+&etm2 { status = "okay"; };
+&etm3 { status = "okay"; };
+&etr { status = "okay"; };
+&funnel0 { status = "okay"; };
+&funnel1 { status = "okay"; };
+&replicator { status = "okay"; };
+&stm { status = "okay"; };
+&tpiu { status = "okay"; };
diff --git a/arch/arm64/boot/dts/qcom/ipq5018-tplink-archer-ax55-v1.dts b/arch/arm64/boot/dts/qcom/ipq5018-tplink-archer-ax55-v1.dts
new file mode 100644
index 000000000000..5bb021cb29cd
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/ipq5018-tplink-archer-ax55-v1.dts
@@ -0,0 +1,128 @@
+// SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause
+
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/leds/common.h>
+
+#include "ipq5018.dtsi"
+
+/ {
+ model = "TP-Link Archer AX55 v1";
+ compatible = "tplink,archer-ax55-v1", "qcom,ipq5018";
+
+ aliases {
+ serial0 = &blsp1_uart1;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ pinctrl-0 = <&led_pins>;
+ pinctrl-names = "default";
+
+ led-0 {
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_LAN;
+ gpios = <&tlmm 10 GPIO_ACTIVE_HIGH>;
+ };
+
+ led-1 {
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_WAN_ONLINE;
+ gpios = <&tlmm 11 GPIO_ACTIVE_HIGH>;
+ };
+
+ led-2 {
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_WLAN_2GHZ;
+ gpios = <&tlmm 13 GPIO_ACTIVE_HIGH>;
+ };
+
+ led-3 {
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_POWER;
+ gpios = <&tlmm 18 GPIO_ACTIVE_HIGH>;
+ };
+
+ led-4 {
+ color = <LED_COLOR_ID_ORANGE>;
+ function = LED_FUNCTION_WAN;
+ gpios = <&tlmm 22 GPIO_ACTIVE_HIGH>;
+ };
+
+ led-5 {
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_USB;
+ gpios = <&tlmm 38 GPIO_ACTIVE_HIGH>;
+ };
+
+ led-6 {
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_WLAN_5GHZ;
+ gpios = <&tlmm 39 GPIO_ACTIVE_HIGH>;
+ };
+ };
+
+ buttons {
+ compatible = "gpio-keys";
+ pinctrl-0 = <&button_pins>;
+ pinctrl-names = "default";
+
+ button-reset {
+ debounce-interval = <60>;
+ gpios = <&tlmm 25 GPIO_ACTIVE_LOW>;
+ label = "reset";
+ linux,code = <KEY_RESTART>;
+ };
+
+ button-wps {
+ debounce-interval = <60>;
+ gpios = <&tlmm 31 GPIO_ACTIVE_LOW>;
+ label = "wps";
+ linux,code = <KEY_WPS_BUTTON>;
+ };
+ };
+};
+
+&blsp1_uart1 {
+ pinctrl-0 = <&uart_pins>;
+ pinctrl-names = "default";
+ status = "okay";
+};
+
+&sleep_clk {
+ clock-frequency = <32000>;
+};
+
+&tlmm {
+ button_pins: button-pins-state {
+ pins = "gpio25", "gpio31";
+ function = "gpio";
+ drive-strength = <8>;
+ bias-pull-up;
+ };
+
+ led_pins: led-pins-state {
+ pins = "gpio10", "gpio11", "gpio13", "gpio18", "gpio22",
+ "gpio38", "gpio39";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ uart_pins: uart-pins-state {
+ pins = "gpio20", "gpio21";
+ function = "blsp0_uart0";
+ drive-strength = <8>;
+ bias-disable;
+ };
+};
+
+&xo_board_clk {
+ clock-frequency = <24000000>;
+};
diff --git a/arch/arm64/boot/dts/qcom/ipq5018.dtsi b/arch/arm64/boot/dts/qcom/ipq5018.dtsi
index 32b178b639f0..7e6e2c121979 100644
--- a/arch/arm64/boot/dts/qcom/ipq5018.dtsi
+++ b/arch/arm64/boot/dts/qcom/ipq5018.dtsi
@@ -179,7 +179,6 @@
<0>;
#clock-cells = <1>;
#reset-cells = <1>;
- #power-domain-cells = <1>;
};
tcsr_mutex: hwlock@1905000 {
diff --git a/arch/arm64/boot/dts/qcom/ipq5332.dtsi b/arch/arm64/boot/dts/qcom/ipq5332.dtsi
index 770d9c2fb456..573656587c0d 100644
--- a/arch/arm64/boot/dts/qcom/ipq5332.dtsi
+++ b/arch/arm64/boot/dts/qcom/ipq5332.dtsi
@@ -208,7 +208,6 @@
reg = <0x01800000 0x80000>;
#clock-cells = <1>;
#reset-cells = <1>;
- #power-domain-cells = <1>;
clocks = <&xo_board>,
<&sleep_clk>,
<0>,
diff --git a/arch/arm64/boot/dts/qcom/ipq6018.dtsi b/arch/arm64/boot/dts/qcom/ipq6018.dtsi
index 17ab6c475958..e1e45da7f787 100644
--- a/arch/arm64/boot/dts/qcom/ipq6018.dtsi
+++ b/arch/arm64/boot/dts/qcom/ipq6018.dtsi
@@ -396,7 +396,7 @@
};
};
- gcc: gcc@1800000 {
+ gcc: clock-controller@1800000 {
compatible = "qcom,gcc-ipq6018";
reg = <0x0 0x01800000 0x0 0x80000>;
clocks = <&xo>, <&sleep_clk>;
@@ -457,6 +457,25 @@
};
};
+ sdhc: mmc@7804000 {
+ compatible = "qcom,ipq6018-sdhci", "qcom,sdhci-msm-v5";
+ reg = <0x0 0x07804000 0x0 0x1000>,
+ <0x0 0x07805000 0x0 0x1000>;
+ reg-names = "hc", "cqhci";
+
+ interrupts = <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 138 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "hc_irq", "pwr_irq";
+
+ clocks = <&gcc GCC_SDCC1_AHB_CLK>,
+ <&gcc GCC_SDCC1_APPS_CLK>,
+ <&xo>;
+ clock-names = "iface", "core", "xo";
+ resets = <&gcc GCC_SDCC1_BCR>;
+ max-frequency = <192000000>;
+ status = "disabled";
+ };
+
blsp_dma: dma-controller@7884000 {
compatible = "qcom,bam-v1.7.0";
reg = <0x0 0x07884000 0x0 0x2b000>;
@@ -685,6 +704,7 @@
clocks = <&xo>;
clock-names = "ref";
tx-fifo-resize;
+ snps,parkmode-disable-ss-quirk;
snps,is-utmi-l1-suspend;
snps,hird-threshold = /bits/ 8 <0x0>;
snps,dis_u2_susphy_quirk;
@@ -923,7 +943,6 @@
thermal-zones {
nss-top-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens 4>;
trips {
@@ -937,7 +956,6 @@
nss-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens 5>;
trips {
@@ -951,7 +969,6 @@
wcss-phya0-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens 7>;
trips {
@@ -979,7 +996,6 @@
cpu-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens 13>;
trips {
@@ -1009,7 +1025,6 @@
lpass-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens 14>;
trips {
@@ -1023,7 +1038,6 @@
ddrss-top-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens 15>;
trips {
diff --git a/arch/arm64/boot/dts/qcom/ipq8074.dtsi b/arch/arm64/boot/dts/qcom/ipq8074.dtsi
index 5d42de829e75..284a4553070f 100644
--- a/arch/arm64/boot/dts/qcom/ipq8074.dtsi
+++ b/arch/arm64/boot/dts/qcom/ipq8074.dtsi
@@ -363,7 +363,7 @@
};
};
- gcc: gcc@1800000 {
+ gcc: clock-controller@1800000 {
compatible = "qcom,gcc-ipq8074";
reg = <0x01800000 0x80000>;
clocks = <&xo>,
@@ -666,6 +666,7 @@
interrupts = <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>;
phys = <&qusb_phy_0>, <&ssphy_0>;
phy-names = "usb2-phy", "usb3-phy";
+ snps,parkmode-disable-ss-quirk;
snps,is-utmi-l1-suspend;
snps,hird-threshold = /bits/ 8 <0x0>;
snps,dis_u2_susphy_quirk;
@@ -715,6 +716,7 @@
interrupts = <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>;
phys = <&qusb_phy_1>, <&ssphy_1>;
phy-names = "usb2-phy", "usb3-phy";
+ snps,parkmode-disable-ss-quirk;
snps,is-utmi-l1-suspend;
snps,hird-threshold = /bits/ 8 <0x0>;
snps,dis_u2_susphy_quirk;
@@ -982,7 +984,6 @@
thermal-zones {
nss-top-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens 4>;
@@ -997,7 +998,6 @@
nss0-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens 5>;
@@ -1012,7 +1012,6 @@
nss1-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens 6>;
@@ -1027,7 +1026,6 @@
wcss-phya0-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens 7>;
@@ -1042,7 +1040,6 @@
wcss-phya1-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens 8>;
@@ -1057,7 +1054,6 @@
cpu0_thermal: cpu0-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens 9>;
@@ -1072,7 +1068,6 @@
cpu1_thermal: cpu1-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens 10>;
@@ -1087,7 +1082,6 @@
cpu2_thermal: cpu2-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens 11>;
@@ -1102,7 +1096,6 @@
cpu3_thermal: cpu3-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens 12>;
@@ -1117,7 +1110,6 @@
cluster_thermal: cluster-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens 13>;
@@ -1132,7 +1124,6 @@
wcss-phyb0-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens 14>;
@@ -1147,7 +1138,6 @@
wcss-phyb1-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens 15>;
diff --git a/arch/arm64/boot/dts/qcom/ipq9574.dtsi b/arch/arm64/boot/dts/qcom/ipq9574.dtsi
index 7f2e5cbf3bbb..48dfafea46a7 100644
--- a/arch/arm64/boot/dts/qcom/ipq9574.dtsi
+++ b/arch/arm64/boot/dts/qcom/ipq9574.dtsi
@@ -8,6 +8,7 @@
#include <dt-bindings/clock/qcom,apss-ipq.h>
#include <dt-bindings/clock/qcom,ipq9574-gcc.h>
+#include <dt-bindings/interconnect/qcom,ipq9574.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/reset/qcom,ipq9574-gcc.h>
#include <dt-bindings/thermal/thermal.h>
@@ -232,6 +233,16 @@
clock-names = "core";
};
+ mdio: mdio@90000 {
+ compatible = "qcom,ipq9574-mdio", "qcom,ipq4019-mdio";
+ reg = <0x00090000 0x64>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&gcc GCC_MDIO_AHB_CLK>;
+ clock-names = "gcc_mdio_ahb_clk";
+ status = "disabled";
+ };
+
qfprom: efuse@a4000 {
compatible = "qcom,ipq9574-qfprom", "qcom,qfprom";
reg = <0x000a4000 0x5a1>;
@@ -305,7 +316,7 @@
<0>;
#clock-cells = <1>;
#reset-cells = <1>;
- #power-domain-cells = <1>;
+ #interconnect-cells = <1>;
};
tcsr_mutex: hwlock@1905000 {
@@ -749,8 +760,6 @@
thermal-zones {
nss-top-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens 3>;
trips {
@@ -763,8 +772,6 @@
};
ubi-0-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens 4>;
trips {
@@ -777,8 +784,6 @@
};
ubi-1-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens 5>;
trips {
@@ -791,8 +796,6 @@
};
ubi-2-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens 6>;
trips {
@@ -805,8 +808,6 @@
};
ubi-3-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens 7>;
trips {
@@ -819,8 +820,6 @@
};
cpuss0-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens 8>;
trips {
@@ -833,8 +832,6 @@
};
cpuss1-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens 9>;
trips {
@@ -847,8 +844,6 @@
};
cpu0-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens 10>;
trips {
@@ -877,8 +872,6 @@
};
cpu1-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens 11>;
trips {
@@ -907,8 +900,6 @@
};
cpu2-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens 12>;
trips {
@@ -937,8 +928,6 @@
};
cpu3-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens 13>;
trips {
@@ -967,8 +956,6 @@
};
wcss-phyb-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens 14>;
trips {
@@ -981,8 +968,6 @@
};
top-glue-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens 15>;
trips {
diff --git a/arch/arm64/boot/dts/qcom/msm8216-samsung-fortuna3g.dts b/arch/arm64/boot/dts/qcom/msm8216-samsung-fortuna3g.dts
index 366914be7d53..fba68bf8bf79 100644
--- a/arch/arm64/boot/dts/qcom/msm8216-samsung-fortuna3g.dts
+++ b/arch/arm64/boot/dts/qcom/msm8216-samsung-fortuna3g.dts
@@ -9,3 +9,17 @@
compatible = "samsung,fortuna3g", "qcom,msm8916";
chassis-type = "handset";
};
+
+&battery {
+ charge-term-current-microamp = <200000>;
+ constant-charge-current-max-microamp = <1000000>;
+ constant-charge-voltage-max-microvolt = <4350000>;
+};
+
+&st_accel {
+ status = "okay";
+};
+
+&st_magn {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8916-acer-a1-724.dts b/arch/arm64/boot/dts/qcom/msm8916-acer-a1-724.dts
index b32c7a97394d..b4ce14a79370 100644
--- a/arch/arm64/boot/dts/qcom/msm8916-acer-a1-724.dts
+++ b/arch/arm64/boot/dts/qcom/msm8916-acer-a1-724.dts
@@ -3,6 +3,7 @@
/dts-v1/;
#include "msm8916-pm8916.dtsi"
+#include "msm8916-modem-qdsp6.dtsi"
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/input/input.h>
@@ -135,6 +136,17 @@
status = "okay";
};
+&mpss_mem {
+ reg = <0x0 0x86800000 0x0 0x4500000>;
+};
+
+&pm8916_codec {
+ qcom,micbias-lvl = <2800>;
+ qcom,mbhc-vthreshold-low = <150 237 450 500 590>;
+ qcom,mbhc-vthreshold-high = <150 237 450 500 590>;
+ qcom,hphl-jack-type-normally-open;
+};
+
&pm8916_resin {
linux,code = <KEY_VOLUMEDOWN>;
status = "okay";
@@ -170,6 +182,20 @@
status = "okay";
};
+&sound {
+ model = "acer-a1-724";
+ audio-routing =
+ "DMIC1", "MIC BIAS External1",
+ "DMIC1", "Digital Mic1",
+ "AMIC2", "MIC BIAS Internal2",
+ "DMIC2", "MIC BIAS External1",
+ "DMIC2", "Digital Mic2";
+
+ pinctrl-0 = <&cdc_pdm_default &sec_mi2s_default &pri_mi2s_mclk_default &cdc_dmic_default>;
+ pinctrl-1 = <&cdc_pdm_sleep &sec_mi2s_sleep &pri_mi2s_mclk_sleep &cdc_dmic_sleep>;
+ pinctrl-names = "default", "sleep";
+};
+
&usb {
extcon = <&usb_id>, <&usb_id>;
status = "okay";
diff --git a/arch/arm64/boot/dts/qcom/msm8916-gplus-fl8005a.dts b/arch/arm64/boot/dts/qcom/msm8916-gplus-fl8005a.dts
index b748d140b52e..f7be7e371820 100644
--- a/arch/arm64/boot/dts/qcom/msm8916-gplus-fl8005a.dts
+++ b/arch/arm64/boot/dts/qcom/msm8916-gplus-fl8005a.dts
@@ -3,6 +3,7 @@
/dts-v1/;
#include "msm8916-pm8916.dtsi"
+#include "msm8916-modem-qdsp6.dtsi"
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/input/input.h>
@@ -23,6 +24,28 @@
stdout-path = "serial0";
};
+ battery: battery {
+ compatible = "simple-battery";
+ device-chemistry = "lithium-ion-polymer";
+ voltage-min-design-microvolt = <3700000>;
+ voltage-max-design-microvolt = <4200000>;
+ energy-full-design-microwatt-hours = <13690000>;
+ charge-full-design-microamp-hours = <3700000>;
+
+ ocv-capacity-celsius = <25>;
+ ocv-capacity-table-0 =
+ <4186000 100>, <4126000 95>, <4078000 90>,
+ <4036000 85>, <3997000 80>, <3962000 75>,
+ <3932000 70>, <3904000 65>, <3874000 60>,
+ <3839000 55>, <3809000 50>, <3792000 45>,
+ <3780000 40>, <3772000 35>, <3764000 30>,
+ <3752000 25>, <3731000 20>, <3704000 16>,
+ <3677000 13>, <3670000 11>, <3668000 10>,
+ <3666000 9>, <3662000 8>, <3658000 7>, <3648000 6>,
+ <3624000 5>, <3580000 4>, <3518000 3>, <3434000 2>,
+ <3310000 1>, <3000000 0>;
+ };
+
flash-led-controller {
/* Actually qcom,leds-gpio-flash */
compatible = "sgmicro,sgm3140";
@@ -111,6 +134,22 @@
status = "okay";
};
+&mpss_mem {
+ reg = <0x0 0x86800000 0x0 0x5000000>;
+};
+
+&pm8916_bms {
+ monitored-battery = <&battery>;
+ status = "okay";
+};
+
+&pm8916_codec {
+ qcom,micbias-lvl = <2800>;
+ qcom,mbhc-vthreshold-low = <150 180 237 450 500>;
+ qcom,mbhc-vthreshold-high = <150 180 237 450 500>;
+ qcom,hphl-jack-type-normally-open;
+};
+
&pm8916_resin {
linux,code = <KEY_VOLUMEDOWN>;
status = "okay";
@@ -141,6 +180,14 @@
status = "okay";
};
+&sound {
+ model = "msm8916-1mic";
+ audio-routing =
+ "AMIC1", "MIC BIAS External1",
+ "AMIC2", "MIC BIAS Internal2",
+ "AMIC3", "MIC BIAS External1";
+};
+
&usb {
extcon = <&usb_id>, <&usb_id>;
status = "okay";
diff --git a/arch/arm64/boot/dts/qcom/msm8916-lg-c50.dts b/arch/arm64/boot/dts/qcom/msm8916-lg-c50.dts
new file mode 100644
index 000000000000..a823a1c40208
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8916-lg-c50.dts
@@ -0,0 +1,140 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/dts-v1/;
+
+#include "msm8916-pm8916.dtsi"
+
+#include <dt-bindings/gpio/gpio.h>
+
+/ {
+ model = "LG Leon LTE";
+ compatible = "lg,c50", "qcom,msm8916";
+ chassis-type = "handset";
+
+ aliases {
+ mmc0 = &sdhc_1; /* eMMC */
+ mmc1 = &sdhc_2; /* SD card */
+ serial0 = &blsp_uart2;
+ };
+
+ chosen {
+ stdout-path = "serial0";
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+
+ pinctrl-0 = <&gpio_keys_default>;
+ pinctrl-names = "default";
+
+ label = "GPIO Buttons";
+
+ volume-up-button {
+ label = "Volume Up";
+ gpios = <&tlmm 108 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_VOLUMEUP>;
+ };
+
+ volume-down-button {
+ label = "Volume Down";
+ gpios = <&tlmm 107 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_VOLUMEDOWN>;
+ };
+ };
+
+ reg_sd_vmmc: regulator-sdcard-vmmc {
+ compatible = "regulator-fixed";
+ regulator-name = "sdcard-vmmc";
+ regulator-min-microvolt = <2950000>;
+ regulator-max-microvolt = <2950000>;
+
+ gpio = <&tlmm 60 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ startup-delay-us = <5000>;
+
+ pinctrl-0 = <&sd_vmmc_en_default>;
+ pinctrl-names = "default";
+ };
+};
+
+&blsp_uart2 {
+ status = "okay";
+};
+
+&pm8916_usbin {
+ status = "okay";
+};
+
+&pm8916_vib {
+ status = "okay";
+};
+
+&sdhc_1 {
+ status = "okay";
+};
+
+&sdhc_2 {
+ vmmc-supply = <&reg_sd_vmmc>;
+
+ pinctrl-0 = <&sdc2_default &sdc2_cd_default>;
+ pinctrl-1 = <&sdc2_sleep &sdc2_cd_default>;
+ pinctrl-names = "default", "sleep";
+
+ cd-gpios = <&tlmm 38 GPIO_ACTIVE_HIGH>;
+
+ status = "okay";
+};
+
+&usb {
+ dr_mode = "peripheral";
+ extcon = <&pm8916_usbin>;
+ status = "okay";
+};
+
+&usb_hs_phy {
+ extcon = <&pm8916_usbin>;
+};
+
+&venus {
+ status = "okay";
+};
+
+&venus_mem {
+ status = "okay";
+};
+
+&wcnss {
+ status = "okay";
+};
+
+&wcnss_iris {
+ compatible = "qcom,wcn3620";
+};
+
+&wcnss_mem {
+ status = "okay";
+};
+
+&tlmm {
+ gpio_keys_default: gpio-keys-default-state {
+ pins = "gpio107", "gpio108";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ sd_vmmc_en_default: sd-vmmc-en-default-state {
+ pins = "gpio60";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ sdc2_cd_default: sdc2-cd-default-state {
+ pins = "gpio38";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8916-lg-m216.dts b/arch/arm64/boot/dts/qcom/msm8916-lg-m216.dts
new file mode 100644
index 000000000000..07345e694f6f
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8916-lg-m216.dts
@@ -0,0 +1,251 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/dts-v1/;
+
+#include "msm8916-pm8916.dtsi"
+#include "msm8916-modem-qdsp6.dtsi"
+
+#include <dt-bindings/gpio/gpio.h>
+
+/ {
+ model = "LG K10 (K420n)";
+ compatible = "lg,m216", "qcom,msm8916";
+ chassis-type = "handset";
+
+ aliases {
+ mmc0 = &sdhc_1; /* eMMC */
+ mmc1 = &sdhc_2; /* SD card */
+ serial0 = &blsp_uart2;
+ };
+
+ chosen {
+ stdout-path = "serial0";
+ };
+
+ battery: battery {
+ compatible = "simple-battery";
+ voltage-min-design-microvolt = <3300000>;
+ voltage-max-design-microvolt = <4350000>;
+ energy-full-design-microwatt-hours = <8800000>;
+ charge-full-design-microamp-hours = <2300000>;
+
+ ocv-capacity-celsius = <25>;
+ ocv-capacity-table-0 = <4342000 100>, <4266000 95>, <4206000 90>,
+ <4148000 85>, <4094000 80>, <4046000 75>, <3994000 70>,
+ <3956000 65>, <3916000 60>, <3866000 55>, <3831000 50>,
+ <3808000 45>, <3789000 40>, <3776000 35>, <3769000 30>,
+ <3760000 25>, <3740000 20>, <3712000 16>, <3684000 13>,
+ <3676000 11>, <3674000 10>, <3672000 9>, <3669000 8>,
+ <3665000 7>, <3660000 6>, <3643000 5>, <3602000 4>,
+ <3542000 3>, <3458000 2>, <3326000 1>, <3000000 0>;
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+
+ pinctrl-0 = <&gpio_keys_default>;
+ pinctrl-names = "default";
+
+ label = "GPIO Buttons";
+
+ volume-up-button {
+ label = "Volume Up";
+ gpios = <&tlmm 107 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_VOLUMEUP>;
+ };
+
+ volume-down-button {
+ label = "Volume Down";
+ gpios = <&tlmm 108 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_VOLUMEDOWN>;
+ };
+ };
+};
+
+&blsp_i2c2 {
+ status = "okay";
+
+ accelerometer@11 {
+ compatible = "bosch,bmc150_accel";
+ reg = <0x11>;
+
+ interrupts-extended = <&tlmm 115 IRQ_TYPE_EDGE_RISING>;
+
+ mount-matrix = "0", "1", "0",
+ "-1", "0", "0",
+ "0", "0", "1";
+
+ vdd-supply = <&pm8916_l17>;
+ vddio-supply = <&pm8916_l6>;
+
+ pinctrl-0 = <&accel_int_default>;
+ pinctrl-names = "default";
+ };
+
+ magnetometer@13 {
+ compatible = "bosch,bmc150_magn";
+ reg = <0x13>;
+
+ interrupts-extended = <&tlmm 69 IRQ_TYPE_EDGE_RISING>;
+
+ vdd-supply = <&pm8916_l17>;
+ vddio-supply = <&pm8916_l6>;
+
+ pinctrl-0 = <&magn_int_default>;
+ pinctrl-names = "default";
+ };
+};
+
+&blsp_i2c5 {
+ status = "okay";
+
+ touchscreen@34 {
+ compatible = "melfas,mip4_ts";
+ reg = <0x34>;
+
+ interrupts-extended = <&tlmm 13 IRQ_TYPE_EDGE_FALLING>;
+ ce-gpios = <&tlmm 12 GPIO_ACTIVE_HIGH>;
+
+ pinctrl-0 = <&touchscreen_default>;
+ pinctrl-names = "default";
+ };
+};
+
+&blsp_uart2 {
+ status = "okay";
+};
+
+&mpss_mem {
+ reg = <0x0 0x86800000 0x0 0x4a00000>;
+};
+
+&pm8916_bms {
+ monitored-battery = <&battery>;
+ power-supplies = <&pm8916_charger>;
+
+ status = "okay";
+};
+
+&pm8916_charger {
+ qcom,fast-charge-safe-current = <700000>;
+ qcom,fast-charge-safe-voltage = <4300000>;
+
+ monitored-battery = <&battery>;
+ status = "okay";
+};
+
+&pm8916_codec {
+ qcom,micbias1-ext-cap;
+ qcom,micbias-lvl = <2800>;
+ qcom,mbhc-vthreshold-low = <75 100 120 180 500>;
+ qcom,mbhc-vthreshold-high = <75 100 120 180 500>;
+ qcom,hphl-jack-type-normally-open;
+};
+
+&pm8916_rpm_regulators {
+ pm8916_l17: l17 {
+ regulator-min-microvolt = <2850000>;
+ regulator-max-microvolt = <2850000>;
+ };
+};
+
+&pm8916_vib {
+ status = "okay";
+};
+
+&sdhc_1 {
+ status = "okay";
+};
+
+&sdhc_2 {
+ pinctrl-0 = <&sdc2_default &sdc2_cd_default>;
+ pinctrl-1 = <&sdc2_sleep &sdc2_cd_default>;
+ pinctrl-names = "default", "sleep";
+
+ cd-gpios = <&tlmm 38 GPIO_ACTIVE_LOW>;
+
+ status = "okay";
+};
+
+&sound {
+ audio-routing =
+ "AMIC1", "MIC BIAS External1",
+ "AMIC2", "MIC BIAS Internal2",
+ "AMIC3", "MIC BIAS External1";
+};
+
+&usb {
+ dr_mode = "peripheral";
+ extcon = <&pm8916_charger>;
+ status = "okay";
+};
+
+&usb_hs_phy {
+ extcon = <&pm8916_charger>;
+};
+
+&venus {
+ status = "okay";
+};
+
+&venus_mem {
+ status = "okay";
+};
+
+&wcnss {
+ status = "okay";
+};
+
+&wcnss_iris {
+ compatible = "qcom,wcn3620";
+};
+
+&wcnss_mem {
+ status = "okay";
+};
+
+&tlmm {
+ accel_int_default: accel-int-default-state {
+ pins = "gpio115";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ gpio_keys_default: gpio-keys-default-state {
+ pins = "gpio107", "gpio108";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ magn_int_default: magn-int-default-state {
+ pins = "gpio69";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ sdc2_cd_default: sdc2-cd-default-state {
+ pins = "gpio38";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ touchscreen_default: touchscreen-default-state {
+ touchscreen-pins {
+ pins = "gpio13";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ ce-pins {
+ pins = "gpio12";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8916-motorola-common.dtsi b/arch/arm64/boot/dts/qcom/msm8916-motorola-common.dtsi
new file mode 100644
index 000000000000..6a27d0ecd2ad
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8916-motorola-common.dtsi
@@ -0,0 +1,161 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include "msm8916-pm8916.dtsi"
+#include "msm8916-modem-qdsp6.dtsi"
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+
+/ {
+ aliases {
+ mmc0 = &sdhc_1; /* eMMC */
+ mmc1 = &sdhc_2; /* SD card */
+ serial0 = &blsp_uart1;
+ };
+
+ chosen {
+ stdout-path = "serial0";
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+
+ pinctrl-0 = <&gpio_keys_default>;
+ pinctrl-names = "default";
+
+ label = "GPIO Buttons";
+
+ volume-up-button {
+ label = "Volume Up";
+ gpios = <&tlmm 107 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_VOLUMEUP>;
+ debounce-interval = <15>;
+ };
+ };
+
+ usb_id: usb-id {
+ compatible = "linux,extcon-usb-gpio";
+ id-gpios = <&tlmm 91 GPIO_ACTIVE_HIGH>;
+ pinctrl-0 = <&usb_id_default>;
+ pinctrl-1 = <&usb_id_sleep>;
+ pinctrl-names = "default", "sleep";
+ };
+};
+
+&blsp_i2c2 {
+ status = "okay";
+
+ touchscreen: touchscreen@20 {
+ compatible = "syna,rmi4-i2c";
+ reg = <0x20>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ vio-supply = <&pm8916_l6>;
+
+ syna,startup-delay-ms = <100>;
+
+ rmi4-f01@1 {
+ reg = <1>;
+ syna,nosleep-mode = <1>; /* Allow sleeping */
+ };
+
+ rmi4-f11@11 {
+ reg = <11>;
+ syna,sensor-type = <1>; /* Touchscreen */
+ };
+ };
+};
+
+&blsp_uart1 {
+ status = "okay";
+};
+
+&mpss_mem {
+ reg = <0x0 0x86800000 0x0 0x5500000>;
+};
+
+&pm8916_resin {
+ linux,code = <KEY_VOLUMEDOWN>;
+ status = "okay";
+};
+
+&pm8916_rpm_regulators {
+ pm8916_l16: l16 {
+ regulator-min-microvolt = <3100000>;
+ regulator-max-microvolt = <3300000>;
+ };
+};
+
+&pm8916_vib {
+ status = "okay";
+};
+
+&sdhc_1 {
+ status = "okay";
+};
+
+&sdhc_2 {
+ status = "okay";
+};
+
+&usb {
+ extcon = <&usb_id>, <&usb_id>;
+ status = "okay";
+};
+
+&usb_hs_phy {
+ extcon = <&usb_id>;
+};
+
+&venus {
+ status = "okay";
+};
+
+&venus_mem {
+ status = "okay";
+};
+
+&wcnss {
+ status = "okay";
+};
+
+&wcnss_iris {
+ compatible = "qcom,wcn3620";
+};
+
+&wcnss_mem {
+ status = "okay";
+};
+
+/* CTS/RTX are not used */
+&blsp_uart1_default {
+ pins = "gpio0", "gpio1";
+};
+&blsp_uart1_sleep {
+ pins = "gpio0", "gpio1";
+};
+
+&tlmm {
+ gpio_keys_default: gpio-keys-default-state {
+ pins = "gpio107";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ usb_id_default: usb-id-default-state {
+ pins = "gpio91";
+ function = "gpio";
+ drive-strength = <8>;
+ bias-pull-up;
+ };
+
+ usb_id_sleep: usb-id-sleep-state {
+ pins = "gpio91";
+ function = "gpio";
+ drive-strength = <8>;
+ bias-disable;
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8916-motorola-harpia.dts b/arch/arm64/boot/dts/qcom/msm8916-motorola-harpia.dts
new file mode 100644
index 000000000000..8380451ebbf6
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8916-motorola-harpia.dts
@@ -0,0 +1,147 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/dts-v1/;
+
+#include "msm8916-motorola-common.dtsi"
+
+/ {
+ model = "Motorola Moto G4 Play";
+ compatible = "motorola,harpia", "qcom,msm8916";
+ chassis-type = "handset";
+};
+
+&blsp_i2c1 {
+ status = "okay";
+
+ battery@36 {
+ compatible = "maxim,max17050";
+ reg = <0x36>;
+
+ interrupts-extended = <&tlmm 62 IRQ_TYPE_EDGE_FALLING>;
+
+ pinctrl-0 = <&battery_alert_default>;
+ pinctrl-names = "default";
+
+ maxim,rsns-microohm = <10000>;
+ maxim,over-heat-temp = <600>;
+ maxim,cold-temp = <(-200)>;
+ maxim,dead-volt = <3200>;
+ maxim,over-volt = <4500>;
+ };
+
+ /* charger@6b */
+};
+
+&blsp_i2c4 {
+ status = "okay";
+
+ accelerometer@19 {
+ compatible = "bosch,bma253";
+ reg = <0x19>;
+
+ interrupts-extended = <&tlmm 115 IRQ_TYPE_EDGE_RISING>,
+ <&tlmm 119 IRQ_TYPE_EDGE_RISING>;
+
+ vdd-supply = <&pm8916_l17>;
+ vddio-supply = <&pm8916_l6>;
+
+ mount-matrix = "1", "0", "0",
+ "0", "-1", "0",
+ "0", "0", "1";
+
+ pinctrl-0 = <&accel_int_default>;
+ pinctrl-names = "default";
+ };
+
+ /* proximity@49 */
+};
+
+&pm8916_codec {
+ qcom,micbias-lvl = <2800>;
+ qcom,mbhc-vthreshold-low = <75 150 237 450 500>;
+ qcom,mbhc-vthreshold-high = <75 150 237 450 500>;
+ qcom,micbias1-ext-cap;
+};
+
+&pm8916_rpm_regulators {
+ pm8916_l17: l17 {
+ regulator-min-microvolt = <2850000>;
+ regulator-max-microvolt = <2850000>;
+ };
+};
+
+&sdhc_2 {
+ pinctrl-0 = <&sdc2_default &sdc2_cd_default>;
+ pinctrl-1 = <&sdc2_sleep &sdc2_cd_default>;
+ pinctrl-names = "default", "sleep";
+
+ cd-gpios = <&tlmm 118 GPIO_ACTIVE_LOW>;
+};
+
+&sound {
+ audio-routing =
+ "AMIC1", "MIC BIAS External1",
+ "AMIC2", "MIC BIAS Internal2",
+ "AMIC3", "MIC BIAS External1";
+
+ pinctrl-0 = <&cdc_pdm_default &headset_switch_supply_en
+ &headset_switch_in>;
+ pinctrl-1 = <&cdc_pdm_sleep &headset_switch_supply_en
+ &headset_switch_in>;
+ pinctrl-names = "default", "sleep";
+};
+
+&touchscreen {
+ interrupts-extended = <&tlmm 13 IRQ_TYPE_EDGE_FALLING>;
+
+ vdd-supply = <&pm8916_l16>;
+
+ pinctrl-0 = <&ts_int_default>;
+ pinctrl-names = "default";
+};
+
+&tlmm {
+ accel_int_default: accel-int-default-state {
+ pins = "gpio115", "gpio119";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ battery_alert_default: battery-alert-default-state {
+ pins = "gpio62";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ headset_switch_in: headset-switch-in-state {
+ pins = "gpio112";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ output-low;
+ };
+
+ headset_switch_supply_en: headset-switch-supply-en-state {
+ pins = "gpio111";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ output-high;
+ };
+
+ sdc2_cd_default: sdc2-cd-default-state {
+ pins = "gpio118";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ ts_int_default: ts-int-default-state {
+ pins = "gpio13";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8916-motorola-osprey.dts b/arch/arm64/boot/dts/qcom/msm8916-motorola-osprey.dts
new file mode 100644
index 000000000000..ec5589fc69bd
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8916-motorola-osprey.dts
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/dts-v1/;
+
+#include "msm8916-motorola-common.dtsi"
+
+/ {
+ model = "Motorola Moto G 2015";
+ compatible = "motorola,osprey", "qcom,msm8916";
+ chassis-type = "handset";
+
+ reg_touch_vdda: regulator-touch-vdda {
+ compatible = "regulator-fixed";
+ regulator-name = "touch_vdda";
+ gpio = <&tlmm 114 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ pinctrl-0 = <&touch_vdda_default>;
+ pinctrl-names = "default";
+ startup-delay-us = <300>;
+ vin-supply = <&pm8916_l16>;
+ };
+};
+
+&blsp_i2c1 {
+ status = "okay";
+
+ battery@36 {
+ compatible = "maxim,max17050";
+ reg = <0x36>;
+
+ interrupts-extended = <&tlmm 49 IRQ_TYPE_EDGE_FALLING>;
+
+ pinctrl-0 = <&battery_alert_default>;
+ pinctrl-names = "default";
+
+ maxim,rsns-microohm = <10000>;
+ maxim,over-heat-temp = <600>;
+ maxim,cold-temp = <(-200)>;
+ maxim,dead-volt = <3200>;
+ maxim,over-volt = <4500>;
+
+ };
+};
+
+&blsp_i2c6 {
+ /* magnetometer@c */
+};
+
+&pm8916_codec {
+ qcom,micbias1-ext-cap;
+ qcom,micbias2-ext-cap;
+};
+
+&sdhc_2 {
+ pinctrl-0 = <&sdc2_default &sdc2_cd_default>;
+ pinctrl-1 = <&sdc2_sleep &sdc2_cd_default>;
+ pinctrl-names = "default", "sleep";
+
+ cd-gpios = <&tlmm 25 GPIO_ACTIVE_LOW>;
+};
+
+&sound {
+ audio-routing =
+ "AMIC1", "MIC BIAS External1",
+ "AMIC3", "MIC BIAS External1";
+};
+
+&touchscreen {
+ interrupts-extended = <&tlmm 21 IRQ_TYPE_EDGE_FALLING>;
+
+ vdd-supply = <&reg_touch_vdda>;
+
+ pinctrl-0 = <&ts_int_default>;
+ pinctrl-names = "default";
+};
+
+&tlmm {
+ battery_alert_default: battery-alert-default-state {
+ pins = "gpio49";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ sdc2_cd_default: sdc2-cd-default-state {
+ pins = "gpio25";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ ts_int_default: ts-int-default-state {
+ pins = "gpio21";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ touch_vdda_default: touch-vdda-default-state {
+ pins = "gpio114";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8916-motorola-surnia.dts b/arch/arm64/boot/dts/qcom/msm8916-motorola-surnia.dts
new file mode 100644
index 000000000000..eecf78ba45bb
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8916-motorola-surnia.dts
@@ -0,0 +1,83 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/dts-v1/;
+
+#include "msm8916-motorola-common.dtsi"
+
+/ {
+ model = "Motorola Moto E 2015 LTE";
+ compatible = "motorola,surnia", "qcom,msm8916";
+ chassis-type = "handset";
+};
+
+&blsp_i2c4 {
+ status = "okay";
+
+ battery@36 {
+ compatible = "maxim,max17050";
+ reg = <0x36>;
+
+ interrupts-extended = <&tlmm 12 IRQ_TYPE_EDGE_FALLING>;
+
+ pinctrl-0 = <&battery_alert_default>;
+ pinctrl-names = "default";
+
+ maxim,rsns-microohm = <10000>;
+ maxim,over-heat-temp = <600>;
+ maxim,cold-temp = <(-200)>;
+ maxim,dead-volt = <3200>;
+ maxim,over-volt = <4500>;
+
+ };
+};
+
+&pm8916_codec {
+ qcom,micbias1-ext-cap;
+ qcom,micbias2-ext-cap;
+};
+
+&sdhc_2 {
+ pinctrl-0 = <&sdc2_default &sdc2_cd_default>;
+ pinctrl-1 = <&sdc2_sleep &sdc2_cd_default>;
+ pinctrl-names = "default", "sleep";
+
+ cd-gpios = <&tlmm 25 GPIO_ACTIVE_LOW>;
+};
+
+&sound {
+ audio-routing =
+ "AMIC1", "MIC BIAS External1",
+ "AMIC3", "MIC BIAS External1";
+};
+
+&touchscreen {
+ interrupts-extended = <&tlmm 21 IRQ_TYPE_EDGE_FALLING>;
+
+ vdd-supply = <&pm8916_l16>;
+
+ pinctrl-0 = <&ts_int_default>;
+ pinctrl-names = "default";
+};
+
+&tlmm {
+ battery_alert_default: battery-alert-default-state {
+ pins = "gpio12";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ sdc2_cd_default: sdc2-cd-default-state {
+ pins = "gpio25";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ ts_int_default: ts-int-default-state {
+ pins = "gpio21";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8916-samsung-a2015-common.dtsi b/arch/arm64/boot/dts/qcom/msm8916-samsung-a2015-common.dtsi
index 4bbbee80b5e4..e6355e5e2177 100644
--- a/arch/arm64/boot/dts/qcom/msm8916-samsung-a2015-common.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8916-samsung-a2015-common.dtsi
@@ -28,6 +28,12 @@
};
};
+ battery: battery {
+ compatible = "simple-battery";
+ precharge-current-microamp = <450000>;
+ precharge-upper-limit-microvolt = <3500000>;
+ };
+
clk_pwm: pwm {
compatible = "clk-pwm";
#pwm-cells = <2>;
@@ -245,7 +251,7 @@
&blsp_i2c4 {
status = "okay";
- battery@35 {
+ fuel-gauge@35 {
compatible = "richtek,rt5033-battery";
reg = <0x35>;
interrupt-parent = <&tlmm>;
@@ -253,6 +259,44 @@
pinctrl-names = "default";
pinctrl-0 = <&fg_alert_default>;
+
+ power-supplies = <&charger>;
+ };
+};
+
+&blsp_i2c6 {
+ status = "okay";
+
+ pmic@34 {
+ compatible = "richtek,rt5033";
+ reg = <0x34>;
+
+ interrupts-extended = <&tlmm 62 IRQ_TYPE_EDGE_FALLING>;
+
+ pinctrl-0 = <&pmic_int_default>;
+ pinctrl-names = "default";
+
+ regulators {
+ rt5033_reg_safe_ldo: SAFE_LDO {
+ regulator-min-microvolt = <4900000>;
+ regulator-max-microvolt = <4900000>;
+ regulator-always-on;
+ };
+
+ /*
+ * Needed for camera, but not used yet.
+ * Define empty nodes to allow disabling the unused
+ * regulators.
+ */
+ LDO {};
+ BUCK {};
+ };
+
+ charger: charger {
+ compatible = "richtek,rt5033-charger";
+ monitored-battery = <&battery>;
+ richtek,usb-connector = <&usb_con>;
+ };
};
};
@@ -476,6 +520,13 @@
bias-disable;
};
+ pmic_int_default: pmic-int-default-state {
+ pins = "gpio62";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
sdc2_cd_default: sdc2-cd-default-state {
pins = "gpio38";
function = "gpio";
diff --git a/arch/arm64/boot/dts/qcom/msm8916-samsung-a3u-eur.dts b/arch/arm64/boot/dts/qcom/msm8916-samsung-a3u-eur.dts
index 3b934f5eba47..906d31f1ea21 100644
--- a/arch/arm64/boot/dts/qcom/msm8916-samsung-a3u-eur.dts
+++ b/arch/arm64/boot/dts/qcom/msm8916-samsung-a3u-eur.dts
@@ -55,6 +55,12 @@
"0", "0", "1";
};
+&battery {
+ charge-term-current-microamp = <150000>;
+ constant-charge-current-max-microamp = <1000000>;
+ constant-charge-voltage-max-microvolt = <4350000>;
+};
+
&blsp_i2c5 {
status = "okay";
diff --git a/arch/arm64/boot/dts/qcom/msm8916-samsung-a5u-eur.dts b/arch/arm64/boot/dts/qcom/msm8916-samsung-a5u-eur.dts
index 391befa22bb4..fe39be7a742b 100644
--- a/arch/arm64/boot/dts/qcom/msm8916-samsung-a5u-eur.dts
+++ b/arch/arm64/boot/dts/qcom/msm8916-samsung-a5u-eur.dts
@@ -29,6 +29,12 @@
"0", "0", "1";
};
+&battery {
+ charge-term-current-microamp = <200000>;
+ constant-charge-current-max-microamp = <1500000>;
+ constant-charge-voltage-max-microvolt = <4350000>;
+};
+
&blsp_i2c5 {
status = "okay";
diff --git a/arch/arm64/boot/dts/qcom/msm8916-samsung-e5.dts b/arch/arm64/boot/dts/qcom/msm8916-samsung-e5.dts
index fad2535255f7..800cb1038da0 100644
--- a/arch/arm64/boot/dts/qcom/msm8916-samsung-e5.dts
+++ b/arch/arm64/boot/dts/qcom/msm8916-samsung-e5.dts
@@ -23,6 +23,12 @@
chassis-type = "handset";
};
+&battery {
+ charge-term-current-microamp = <200000>;
+ constant-charge-current-max-microamp = <1500000>;
+ constant-charge-voltage-max-microvolt = <4350000>;
+};
+
&blsp_i2c5 {
status = "okay";
diff --git a/arch/arm64/boot/dts/qcom/msm8916-samsung-e7.dts b/arch/arm64/boot/dts/qcom/msm8916-samsung-e7.dts
index b412b61ca258..ec1debd2e245 100644
--- a/arch/arm64/boot/dts/qcom/msm8916-samsung-e7.dts
+++ b/arch/arm64/boot/dts/qcom/msm8916-samsung-e7.dts
@@ -23,6 +23,13 @@
chassis-type = "handset";
};
+&battery {
+ charge-term-current-microamp = <200000>;
+ constant-charge-current-max-microamp = <1500000>;
+ constant-charge-voltage-max-microvolt = <4350000>;
+};
+
+
&pm8916_l17 {
regulator-min-microvolt = <3000000>;
regulator-max-microvolt = <3000000>;
diff --git a/arch/arm64/boot/dts/qcom/msm8916-samsung-fortuna-common.dtsi b/arch/arm64/boot/dts/qcom/msm8916-samsung-fortuna-common.dtsi
index 5e933fb8b363..81b3e0760154 100644
--- a/arch/arm64/boot/dts/qcom/msm8916-samsung-fortuna-common.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8916-samsung-fortuna-common.dtsi
@@ -6,6 +6,7 @@
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/input/input.h>
#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/pinctrl/qcom,pmic-gpio.h>
/ {
aliases {
@@ -26,6 +27,12 @@
};
};
+ battery: battery {
+ compatible = "simple-battery";
+ precharge-current-microamp = <450000>;
+ precharge-upper-limit-microvolt = <3500000>;
+ };
+
clk_pwm_backlight: backlight {
compatible = "pwm-backlight";
pwms = <&clk_pwm 0 100000>;
@@ -78,6 +85,35 @@
max-microvolt = <3300000>;
};
+ i2c_nfc: i2c-nfc {
+ compatible = "i2c-gpio";
+ sda-gpios = <&tlmm 0 (GPIO_ACTIVE_HIGH|GPIO_OPEN_DRAIN)>;
+ scl-gpios = <&tlmm 1 (GPIO_ACTIVE_HIGH|GPIO_OPEN_DRAIN)>;
+
+ pinctrl-0 = <&nfc_i2c_default>;
+ pinctrl-names = "default";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ s3fwrn5_nfc: nfc@27 {
+ compatible = "samsung,s3fwrn5-i2c";
+ reg = <0x27>;
+
+ interrupts-extended = <&tlmm 21 IRQ_TYPE_EDGE_RISING>;
+
+ en-gpios = <&tlmm 20 GPIO_ACTIVE_HIGH>;
+ wake-gpios = <&tlmm 49 GPIO_ACTIVE_HIGH>;
+
+ clocks = <&rpmcc RPM_SMD_BB_CLK2_PIN>;
+
+ pinctrl-0 = <&nfc_default>, <&nfc_clk_req>;
+ pinctrl-names = "default";
+
+ status = "disabled";
+ };
+ };
+
reg_motor_vdd: regulator-motor-vdd {
compatible = "regulator-fixed";
regulator-name = "motor_vdd";
@@ -114,6 +150,82 @@
interrupts-extended = <&tlmm 12 IRQ_TYPE_EDGE_FALLING>;
pinctrl-0 = <&muic_int_default>;
pinctrl-names = "default";
+
+ usb_con: connector {
+ compatible = "usb-b-connector";
+ label = "micro-USB";
+ type = "micro";
+ };
+ };
+};
+
+&blsp_i2c2 {
+ /* Available sensors vary depending on model variant */
+ status = "okay";
+
+ bosch_accel: accelerometer@10 {
+ compatible = "bosch,bmc150_accel";
+ reg = <0x10>;
+ interrupts-extended = <&tlmm 115 IRQ_TYPE_EDGE_RISING>;
+
+ vdd-supply = <&pm8916_l5>;
+ vddio-supply = <&pm8916_l5>;
+
+ pinctrl-0 = <&accel_int_default>;
+ pinctrl-names = "default";
+
+ mount-matrix = "0", "-1", "0",
+ "-1", "0", "0",
+ "0", "0", "1";
+
+ status = "disabled";
+ };
+
+ bosch_magn: magnetometer@12 {
+ compatible = "bosch,bmc150_magn";
+ reg = <0x12>;
+
+ vdd-supply = <&pm8916_l5>;
+ vddio-supply = <&pm8916_l5>;
+
+ mount-matrix = "0", "-1", "0",
+ "-1", "0", "0",
+ "0", "0", "1";
+
+ status = "disabled";
+ };
+
+ st_accel: accelerometer@1d {
+ compatible = "st,lsm303c-accel";
+ reg = <0x1d>;
+ interrupts-extended = <&tlmm 115 IRQ_TYPE_LEVEL_HIGH>;
+
+ vdd-supply = <&pm8916_l17>;
+ vddio-supply = <&pm8916_l5>;
+
+ pinctrl-0 = <&accel_int_default>;
+ pinctrl-names = "default";
+
+ st,drdy-int-pin = <1>;
+ mount-matrix = "0", "-1", "0",
+ "1", "0", "0",
+ "0", "0", "-1";
+
+ status = "disabled";
+ };
+
+ st_magn: magnetometer@1e {
+ compatible = "st,lsm303c-magn";
+ reg = <0x1e>;
+
+ vdd-supply = <&pm8916_l17>;
+ vddio-supply = <&pm8916_l5>;
+
+ mount-matrix = "0", "-1", "0",
+ "1", "0", "0",
+ "0", "0", "-1";
+
+ status = "disabled";
};
};
@@ -128,6 +240,8 @@
pinctrl-0 = <&fg_alert_default>;
pinctrl-names = "default";
+
+ power-supplies = <&charger>;
};
};
@@ -151,6 +265,42 @@
};
};
+&blsp_i2c6 {
+ status = "okay";
+
+ pmic@34 {
+ compatible = "richtek,rt5033";
+ reg = <0x34>;
+
+ interrupts-extended = <&tlmm 62 IRQ_TYPE_EDGE_FALLING>;
+
+ pinctrl-0 = <&pmic_int_default>;
+ pinctrl-names = "default";
+
+ regulators {
+ rt5033_reg_safe_ldo: SAFE_LDO {
+ regulator-min-microvolt = <4900000>;
+ regulator-max-microvolt = <4900000>;
+ regulator-always-on;
+ };
+
+ /*
+ * Needed for camera, but not used yet.
+ * Define empty nodes to allow disabling the unused
+ * regulators.
+ */
+ LDO {};
+ BUCK {};
+ };
+
+ charger: charger {
+ compatible = "richtek,rt5033-charger";
+ monitored-battery = <&battery>;
+ richtek,usb-connector = <&usb_con>;
+ };
+ };
+};
+
&blsp_uart2 {
status = "okay";
};
@@ -223,6 +373,13 @@
};
&tlmm {
+ accel_int_default: accel-int-default-state {
+ pins = "gpio115";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
backlight_en_default: backlight-en-default-state {
pins = "gpio98";
function = "gpio";
@@ -263,6 +420,36 @@
bias-disable;
};
+ nfc_default: nfc-default-state {
+ irq-pins {
+ pins = "gpio21";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+
+ nfc-pins {
+ pins = "gpio20", "gpio49";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
+ nfc_i2c_default: nfc-i2c-default-state {
+ pins = "gpio0", "gpio1";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ pmic_int_default: pmic-int-default-state {
+ pins = "gpio62";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
sdc2_cd_default: sdc2-cd-default-state {
pins = "gpio38";
function = "gpio";
@@ -284,3 +471,13 @@
bias-disable;
};
};
+
+&pm8916_gpios {
+ nfc_clk_req: nfc-clk-req-state {
+ pins = "gpio2";
+ function = "func1";
+ power-source = <PM8916_GPIO_L2>;
+ bias-disable;
+ input-enable;
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8916-samsung-gprimeltecan.dts b/arch/arm64/boot/dts/qcom/msm8916-samsung-gprimeltecan.dts
index 9d65fa58ba92..677e4e286ac0 100644
--- a/arch/arm64/boot/dts/qcom/msm8916-samsung-gprimeltecan.dts
+++ b/arch/arm64/boot/dts/qcom/msm8916-samsung-gprimeltecan.dts
@@ -21,6 +21,76 @@
};
};
+&battery {
+ charge-term-current-microamp = <200000>;
+ constant-charge-current-max-microamp = <1000000>;
+ constant-charge-voltage-max-microvolt = <4350000>;
+};
+
+&bosch_accel {
+ status = "okay";
+};
+
+&bosch_magn {
+ status = "okay";
+};
+
+&blsp_i2c6 {
+ /* pmic@34 is on i2c_nfc instead */
+ /delete-node/ pmic@34;
+
+ nfc@27 {
+ compatible = "samsung,s3fwrn5-i2c";
+ reg = <0x27>;
+
+ interrupts-extended = <&tlmm 21 IRQ_TYPE_EDGE_RISING>;
+
+ en-gpios = <&tlmm 20 GPIO_ACTIVE_HIGH>;
+ wake-gpios = <&tlmm 49 GPIO_ACTIVE_HIGH>;
+
+ clocks = <&rpmcc RPM_SMD_BB_CLK2_PIN>;
+
+ pinctrl-0 = <&nfc_default>, <&nfc_clk_req>;
+ pinctrl-names = "default";
+ };
+};
+
+&i2c_nfc {
+ /* nfc@27 is on &blsp_i2c6 */
+
+ pmic@34 {
+ compatible = "richtek,rt5033";
+ reg = <0x34>;
+
+ interrupts-extended = <&tlmm 62 IRQ_TYPE_EDGE_FALLING>;
+
+ pinctrl-0 = <&pmic_int_default>;
+ pinctrl-names = "default";
+
+ regulators {
+ rt5033_reg_safe_ldo: SAFE_LDO {
+ regulator-min-microvolt = <4900000>;
+ regulator-max-microvolt = <4900000>;
+ regulator-always-on;
+ };
+
+ /*
+ * Needed for camera, but not used yet.
+ * Define empty nodes to allow disabling the unused
+ * regulators.
+ */
+ LDO {};
+ BUCK {};
+ };
+
+ charger: charger {
+ compatible = "richtek,rt5033-charger";
+ monitored-battery = <&battery>;
+ richtek,usb-connector = <&usb_con>;
+ };
+ };
+};
+
&mpss_mem {
/* Firmware for gprimeltecan needs more space */
reg = <0x0 0x86800000 0x0 0x5400000>;
diff --git a/arch/arm64/boot/dts/qcom/msm8916-samsung-grandmax.dts b/arch/arm64/boot/dts/qcom/msm8916-samsung-grandmax.dts
index 5882b3a593b8..135df1739dbd 100644
--- a/arch/arm64/boot/dts/qcom/msm8916-samsung-grandmax.dts
+++ b/arch/arm64/boot/dts/qcom/msm8916-samsung-grandmax.dts
@@ -41,6 +41,12 @@
};
};
+&battery {
+ charge-term-current-microamp = <150000>;
+ constant-charge-current-max-microamp = <1000000>;
+ constant-charge-voltage-max-microvolt = <4400000>;
+};
+
&reg_motor_vdd {
gpio = <&tlmm 72 GPIO_ACTIVE_HIGH>;
};
diff --git a/arch/arm64/boot/dts/qcom/msm8916-samsung-grandprimelte.dts b/arch/arm64/boot/dts/qcom/msm8916-samsung-grandprimelte.dts
index a66ce4b13547..582bfcb09684 100644
--- a/arch/arm64/boot/dts/qcom/msm8916-samsung-grandprimelte.dts
+++ b/arch/arm64/boot/dts/qcom/msm8916-samsung-grandprimelte.dts
@@ -10,6 +10,20 @@
chassis-type = "handset";
};
+&battery {
+ charge-term-current-microamp = <200000>;
+ constant-charge-current-max-microamp = <1000000>;
+ constant-charge-voltage-max-microvolt = <4350000>;
+};
+
+&bosch_accel {
+ status = "okay";
+};
+
+&bosch_magn {
+ status = "okay";
+};
+
&mpss_mem {
/* Firmware for grandprimelte needs more space */
reg = <0x0 0x86800000 0x0 0x5400000>;
diff --git a/arch/arm64/boot/dts/qcom/msm8916-samsung-rossa-common.dtsi b/arch/arm64/boot/dts/qcom/msm8916-samsung-rossa-common.dtsi
index b438fa81886c..e7f265e3c2ab 100644
--- a/arch/arm64/boot/dts/qcom/msm8916-samsung-rossa-common.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8916-samsung-rossa-common.dtsi
@@ -15,6 +15,12 @@
interrupts-extended = <&tlmm 12 IRQ_TYPE_EDGE_FALLING>;
pinctrl-0 = <&muic_int_default>;
pinctrl-names = "default";
+
+ usb_con: connector {
+ compatible = "usb-b-connector";
+ label = "micro-USB";
+ type = "micro";
+ };
};
};
@@ -26,3 +32,15 @@
&clk_pwm_backlight {
status = "disabled";
};
+
+&s3fwrn5_nfc {
+ status = "okay";
+};
+
+&st_accel {
+ compatible = "st,lis2hh12";
+ mount-matrix = "1", "0", "0",
+ "0", "-1", "0",
+ "0", "0", "1";
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8916-samsung-rossa.dts b/arch/arm64/boot/dts/qcom/msm8916-samsung-rossa.dts
index ebaa13c6b016..1981bb71f6a9 100644
--- a/arch/arm64/boot/dts/qcom/msm8916-samsung-rossa.dts
+++ b/arch/arm64/boot/dts/qcom/msm8916-samsung-rossa.dts
@@ -10,6 +10,12 @@
chassis-type = "handset";
};
+&battery {
+ charge-term-current-microamp = <150000>;
+ constant-charge-current-max-microamp = <700000>;
+ constant-charge-voltage-max-microvolt = <4400000>;
+};
+
&mpss_mem {
/* Firmware for rossa needs more space */
reg = <0x0 0x86800000 0x0 0x5800000>;
diff --git a/arch/arm64/boot/dts/qcom/msm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8916.dtsi
index cedff4166bfb..7383bcc603ab 100644
--- a/arch/arm64/boot/dts/qcom/msm8916.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8916.dtsi
@@ -308,7 +308,7 @@
smd-edge {
interrupts = <GIC_SPI 168 IRQ_TYPE_EDGE_RISING>;
- qcom,ipc = <&apcs 8 0>;
+ mboxes = <&apcs 0>;
qcom,smd-edge = <15>;
rpm_requests: rpm-requests {
@@ -360,7 +360,7 @@
interrupts = <GIC_SPI 27 IRQ_TYPE_EDGE_RISING>;
- qcom,ipc = <&apcs 8 14>;
+ mboxes = <&apcs 14>;
qcom,local-pid = <0>;
qcom,remote-pid = <1>;
@@ -385,7 +385,7 @@
interrupts = <GIC_SPI 143 IRQ_TYPE_EDGE_RISING>;
- qcom,ipc = <&apcs 8 18>;
+ mboxes = <&apcs 18>;
qcom,local-pid = <0>;
qcom,remote-pid = <4>;
@@ -410,8 +410,7 @@
#address-cells = <1>;
#size-cells = <0>;
- qcom,ipc-1 = <&apcs 8 13>;
- qcom,ipc-3 = <&apcs 8 19>;
+ mboxes = <0>, <&apcs 13>, <0>, <&apcs 19>;
apps_smsm: apps@0 {
reg = <0>;
@@ -1978,7 +1977,7 @@
interrupts = <GIC_SPI 25 IRQ_TYPE_EDGE_RISING>;
qcom,smd-edge = <0>;
- qcom,ipc = <&apcs 8 12>;
+ mboxes = <&apcs 12>;
qcom,remote-pid = <1>;
label = "hexagon";
@@ -2459,7 +2458,7 @@
smd-edge {
interrupts = <GIC_SPI 142 IRQ_TYPE_EDGE_RISING>;
- qcom,ipc = <&apcs 8 17>;
+ mboxes = <&apcs 17>;
qcom,smd-edge = <6>;
qcom,remote-pid = <4>;
@@ -2626,7 +2625,6 @@
thermal-zones {
cpu0-1-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens 5>;
@@ -2656,7 +2654,6 @@
cpu2-3-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens 4>;
@@ -2686,7 +2683,6 @@
gpu-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens 2>;
@@ -2713,7 +2709,6 @@
camera-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens 1>;
@@ -2728,7 +2723,6 @@
modem-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens 0>;
diff --git a/arch/arm64/boot/dts/qcom/msm8939-samsung-a7.dts b/arch/arm64/boot/dts/qcom/msm8939-samsung-a7.dts
index 0c599e71a464..91acdb160227 100644
--- a/arch/arm64/boot/dts/qcom/msm8939-samsung-a7.dts
+++ b/arch/arm64/boot/dts/qcom/msm8939-samsung-a7.dts
@@ -33,6 +33,15 @@
};
};
+ battery: battery {
+ compatible = "simple-battery";
+ charge-term-current-microamp = <150000>;
+ constant-charge-current-max-microamp = <1500000>;
+ constant-charge-voltage-max-microvolt = <4300000>;
+ precharge-current-microamp = <450000>;
+ precharge-upper-limit-microvolt = <3500000>;
+ };
+
gpio-hall-sensor {
compatible = "gpio-keys";
@@ -82,7 +91,7 @@
#address-cells = <1>;
#size-cells = <0>;
- battery@35 {
+ fuel-gauge@35 {
compatible = "richtek,rt5033-battery";
reg = <0x35>;
@@ -91,6 +100,8 @@
pinctrl-0 = <&fg_alert_default>;
pinctrl-names = "default";
+
+ power-supplies = <&charger>;
};
};
@@ -325,6 +336,42 @@
};
};
+&blsp_i2c6 {
+ status = "okay";
+
+ pmic@34 {
+ compatible = "richtek,rt5033";
+ reg = <0x34>;
+
+ interrupts-extended = <&tlmm 62 IRQ_TYPE_EDGE_FALLING>;
+
+ pinctrl-0 = <&pmic_int_default>;
+ pinctrl-names = "default";
+
+ regulators {
+ rt5033_reg_safe_ldo: SAFE_LDO {
+ regulator-min-microvolt = <4900000>;
+ regulator-max-microvolt = <4900000>;
+ regulator-always-on;
+ };
+
+ /*
+ * Needed for camera, but not used yet.
+ * Define empty nodes to allow disabling the unused
+ * regulators.
+ */
+ LDO {};
+ BUCK {};
+ };
+
+ charger: charger {
+ compatible = "richtek,rt5033-charger";
+ monitored-battery = <&battery>;
+ richtek,usb-connector = <&usb_con>;
+ };
+ };
+};
+
&blsp_uart2 {
status = "okay";
};
@@ -510,6 +557,13 @@
bias-disable;
};
+ pmic_int_default: pmic-int-default-state {
+ pins = "gpio62";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
reg_tsp_en_default: reg-tsp-en-default-state {
pins = "gpio73";
function = "gpio";
diff --git a/arch/arm64/boot/dts/qcom/msm8939.dtsi b/arch/arm64/boot/dts/qcom/msm8939.dtsi
index dd45975682b2..46d9480cd464 100644
--- a/arch/arm64/boot/dts/qcom/msm8939.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8939.dtsi
@@ -248,7 +248,7 @@
smd-edge {
interrupts = <GIC_SPI 168 IRQ_TYPE_EDGE_RISING>;
- qcom,ipc = <&apcs1_mbox 8 0>;
+ mboxes = <&apcs1_mbox 0>;
qcom,smd-edge = <15>;
rpm_requests: rpm-requests {
@@ -443,8 +443,7 @@
#address-cells = <1>;
#size-cells = <0>;
- qcom,ipc-1 = <&apcs1_mbox 8 13>;
- qcom,ipc-3 = <&apcs1_mbox 8 19>;
+ mboxes = <0>, <&apcs1_mbox 13>, <0>, <&apcs1_mbox 19>;
apps_smsm: apps@0 {
reg = <0>;
@@ -2067,7 +2066,7 @@
smd-edge {
interrupts = <GIC_SPI 142 IRQ_TYPE_EDGE_RISING>;
- qcom,ipc = <&apcs1_mbox 8 17>;
+ mboxes = <&apcs1_mbox 17>;
qcom,smd-edge = <6>;
qcom,remote-pid = <4>;
@@ -2299,7 +2298,6 @@
thermal_zones: thermal-zones {
cpu0-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens 5>;
@@ -2330,7 +2328,6 @@
cpu1-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens 6>;
@@ -2361,7 +2358,6 @@
cpu2-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens 7>;
@@ -2392,7 +2388,6 @@
cpu3-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens 8>;
@@ -2423,7 +2418,6 @@
cpu4567-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens 9>;
@@ -2454,7 +2448,6 @@
gpu-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens 3>;
@@ -2482,7 +2475,6 @@
modem1-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens 0>;
@@ -2497,7 +2489,6 @@
modem2-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens 2>;
@@ -2512,7 +2503,6 @@
camera-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens 1>;
diff --git a/arch/arm64/boot/dts/qcom/msm8953-motorola-potter.dts b/arch/arm64/boot/dts/qcom/msm8953-motorola-potter.dts
index 711d84dad9d7..2edf804eb7c9 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-motorola-potter.dts
+++ b/arch/arm64/boot/dts/qcom/msm8953-motorola-potter.dts
@@ -301,5 +301,6 @@
};
&usb3_dwc3 {
+ /delete-property/ usb-role-switch;
dr_mode = "peripheral";
};
diff --git a/arch/arm64/boot/dts/qcom/msm8953-xiaomi-daisy.dts b/arch/arm64/boot/dts/qcom/msm8953-xiaomi-daisy.dts
index a5957e79b818..336b916729e4 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-xiaomi-daisy.dts
+++ b/arch/arm64/boot/dts/qcom/msm8953-xiaomi-daisy.dts
@@ -321,5 +321,6 @@
};
&usb3_dwc3 {
+ /delete-property/ usb-role-switch;
dr_mode = "peripheral";
};
diff --git a/arch/arm64/boot/dts/qcom/msm8953-xiaomi-mido.dts b/arch/arm64/boot/dts/qcom/msm8953-xiaomi-mido.dts
index 6b9245cd8b0c..bdf1bfc79c56 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-xiaomi-mido.dts
+++ b/arch/arm64/boot/dts/qcom/msm8953-xiaomi-mido.dts
@@ -326,5 +326,6 @@
};
&usb3_dwc3 {
+ /delete-property/ usb-role-switch;
dr_mode = "peripheral";
};
diff --git a/arch/arm64/boot/dts/qcom/msm8953-xiaomi-tissot.dts b/arch/arm64/boot/dts/qcom/msm8953-xiaomi-tissot.dts
index 9ac4f507e321..fccb9c4360ca 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-xiaomi-tissot.dts
+++ b/arch/arm64/boot/dts/qcom/msm8953-xiaomi-tissot.dts
@@ -322,5 +322,6 @@
};
&usb3_dwc3 {
+ /delete-property/ usb-role-switch;
dr_mode = "peripheral";
};
diff --git a/arch/arm64/boot/dts/qcom/msm8953-xiaomi-vince.dts b/arch/arm64/boot/dts/qcom/msm8953-xiaomi-vince.dts
index b0588f30f8f1..d46325e79917 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-xiaomi-vince.dts
+++ b/arch/arm64/boot/dts/qcom/msm8953-xiaomi-vince.dts
@@ -357,5 +357,6 @@
};
&usb3_dwc3 {
+ /delete-property/ usb-role-switch;
dr_mode = "peripheral";
};
diff --git a/arch/arm64/boot/dts/qcom/msm8953.dtsi b/arch/arm64/boot/dts/qcom/msm8953.dtsi
index 5d818fe057dd..a4bfb624fb8a 100644
--- a/arch/arm64/boot/dts/qcom/msm8953.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8953.dtsi
@@ -195,7 +195,7 @@
smd-edge {
interrupts = <GIC_SPI 168 IRQ_TYPE_EDGE_RISING>;
- qcom,ipc = <&apcs 8 0>;
+ mboxes = <&apcs 0>;
qcom,smd-edge = <15>;
rpm_requests: rpm-requests {
@@ -361,7 +361,7 @@
interrupts = <GIC_SPI 27 IRQ_TYPE_EDGE_RISING>;
- qcom,ipc = <&apcs 8 14>;
+ mboxes = <&apcs 14>;
qcom,local-pid = <0>;
qcom,remote-pid = <1>;
@@ -386,7 +386,7 @@
interrupts = <GIC_SPI 143 IRQ_TYPE_EDGE_RISING>;
- qcom,ipc = <&apcs 8 18>;
+ mboxes = <&apcs 18>;
qcom,local-pid = <0>;
qcom,remote-pid = <4>;
@@ -411,8 +411,7 @@
#address-cells = <1>;
#size-cells = <0>;
- qcom,ipc-1 = <&apcs 8 13>;
- qcom,ipc-3 = <&apcs 8 19>;
+ mboxes = <0>, <&apcs 13>, <0>, <&apcs 19>;
apps_smsm: apps@0 {
reg = <0>;
@@ -1267,7 +1266,7 @@
interrupts = <GIC_SPI 25 IRQ_TYPE_EDGE_RISING>;
qcom,smd-edge = <0>;
- qcom,ipc = <&apcs 8 12>;
+ mboxes = <&apcs 12>;
qcom,remote-pid = <1>;
label = "modem";
@@ -1748,7 +1747,7 @@
smd-edge {
interrupts = <GIC_SPI 142 IRQ_TYPE_EDGE_RISING>;
- qcom,ipc = <&apcs 8 17>;
+ mboxes = <&apcs 17>;
qcom,smd-edge = <6>;
qcom,remote-pid = <4>;
@@ -1968,8 +1967,9 @@
thermal-zones {
cpu0-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
+
thermal-sensors = <&tsens0 9>;
+
trips {
cpu0_alert: trip-point0 {
temperature = <80000>;
@@ -1991,8 +1991,9 @@
};
cpu1-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
+
thermal-sensors = <&tsens0 10>;
+
trips {
cpu1_alert: trip-point0 {
temperature = <80000>;
@@ -2014,8 +2015,9 @@
};
cpu2-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
+
thermal-sensors = <&tsens0 11>;
+
trips {
cpu2_alert: trip-point0 {
temperature = <80000>;
@@ -2037,8 +2039,9 @@
};
cpu3-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
+
thermal-sensors = <&tsens0 12>;
+
trips {
cpu3_alert: trip-point0 {
temperature = <80000>;
@@ -2060,7 +2063,6 @@
};
cpu4-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 4>;
trips {
cpu4_alert: trip-point0 {
@@ -2083,7 +2085,6 @@
};
cpu5-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 5>;
trips {
cpu5_alert: trip-point0 {
@@ -2106,7 +2107,6 @@
};
cpu6-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 6>;
trips {
cpu6_alert: trip-point0 {
@@ -2129,7 +2129,6 @@
};
cpu7-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 7>;
trips {
cpu7_alert: trip-point0 {
@@ -2153,7 +2152,6 @@
gpu-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 15>;
trips {
diff --git a/arch/arm64/boot/dts/qcom/msm8956.dtsi b/arch/arm64/boot/dts/qcom/msm8956.dtsi
index 668e05185c21..fa36b62156bb 100644
--- a/arch/arm64/boot/dts/qcom/msm8956.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8956.dtsi
@@ -8,8 +8,8 @@
#include "msm8976.dtsi"
-&pmu {
- interrupts = <GIC_PPI 7 (GIC_CPU_MASK_SIMPLE(6) | IRQ_TYPE_LEVEL_HIGH)>;
+&pmu_a72 {
+ interrupts = <GIC_PPI 7 (GIC_CPU_MASK_RAW(0x30) | IRQ_TYPE_LEVEL_HIGH)>;
};
&tsens {
diff --git a/arch/arm64/boot/dts/qcom/msm8976.dtsi b/arch/arm64/boot/dts/qcom/msm8976.dtsi
index d2bb1ada361a..d62dcb76fa48 100644
--- a/arch/arm64/boot/dts/qcom/msm8976.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8976.dtsi
@@ -222,11 +222,17 @@
reg = <0x0 0x80000000 0x0 0x0>;
};
- pmu: pmu {
- compatible = "arm,armv8-pmuv3";
- interrupts = <GIC_PPI 7 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_HIGH)>;
+ pmu-a53 {
+ compatible = "arm,cortex-a53-pmu";
+ interrupts = <GIC_PPI 7 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>;
};
+ pmu_a72: pmu-a72 {
+ compatible = "arm,cortex-a72-pmu";
+ interrupts = <GIC_PPI 7 (GIC_CPU_MASK_RAW(0xf0) | IRQ_TYPE_LEVEL_HIGH)>;
+ };
+
+
psci {
compatible = "arm,psci-1.0";
method = "smc";
@@ -237,7 +243,7 @@
smd-edge {
interrupts = <GIC_SPI 168 IRQ_TYPE_EDGE_RISING>;
- qcom,ipc = <&apcs 8 0>;
+ mboxes = <&apcs 0>;
qcom,smd-edge = <15>;
rpm_requests: rpm-requests {
@@ -361,7 +367,7 @@
smp2p-hexagon {
compatible = "qcom,smp2p";
interrupts = <GIC_SPI 291 IRQ_TYPE_EDGE_RISING>;
- qcom,ipc = <&apcs 8 10>;
+ mboxes = <&apcs 10>;
qcom,local-pid = <0>;
qcom,remote-pid = <2>;
@@ -384,7 +390,7 @@
smp2p-modem {
compatible = "qcom,smp2p";
interrupts = <GIC_SPI 27 IRQ_TYPE_EDGE_RISING>;
- qcom,ipc = <&apcs 8 14>;
+ mboxes = <&apcs 14>;
qcom,local-pid = <0>;
qcom,remote-pid = <1>;
@@ -407,7 +413,7 @@
smp2p-wcnss {
compatible = "qcom,smp2p";
interrupts = <GIC_SPI 143 IRQ_TYPE_EDGE_RISING>;
- qcom,ipc = <&apcs 8 18>;
+ mboxes = <&apcs 18>;
qcom,local-pid = <0>;
qcom,remote-pid = <4>;
@@ -433,9 +439,7 @@
#address-cells = <1>;
#size-cells = <0>;
- qcom,ipc-1 = <&apcs 8 13>;
- qcom,ipc-2 = <&apcs 8 9>;
- qcom,ipc-3 = <&apcs 8 19>;
+ mboxes = <0>, <&apcs 13>, <&apcs 9>, <&apcs 19>;
apps_smsm: apps@0 {
reg = <0>;
@@ -771,6 +775,36 @@
drive-strength = <2>;
bias-disable;
};
+
+ wcss_wlan_default: wcss-wlan-default-state {
+ wcss-wlan2-pins {
+ pins = "gpio40";
+ function = "wcss_wlan2";
+ drive-strength = <6>;
+ bias-pull-up;
+ };
+
+ wcss-wlan1-pins {
+ pins = "gpio41";
+ function = "wcss_wlan1";
+ drive-strength = <6>;
+ bias-pull-up;
+ };
+
+ wcss-wlan0-pins {
+ pins = "gpio42";
+ function = "wcss_wlan0";
+ drive-strength = <6>;
+ bias-pull-up;
+ };
+
+ wcss-wlan-pins {
+ pins = "gpio43", "gpio44";
+ function = "wcss_wlan";
+ drive-strength = <6>;
+ bias-pull-up;
+ };
+ };
};
gcc: clock-controller@1800000 {
@@ -785,10 +819,10 @@
clocks = <&rpmcc RPM_SMD_XO_CLK_SRC>,
<&rpmcc RPM_SMD_XO_A_CLK_SRC>,
- <0>,
- <0>,
- <0>,
- <0>;
+ <&mdss_dsi0_phy 1>,
+ <&mdss_dsi0_phy 0>,
+ <&mdss_dsi1_phy 1>,
+ <&mdss_dsi1_phy 0>;
clock-names = "xo",
"xo_a",
"dsi0pll",
@@ -808,6 +842,430 @@
reg = <0x01937000 0x30000>;
};
+ mdss: display-subsystem@1a00000 {
+ compatible = "qcom,mdss";
+
+ reg = <0x01a00000 0x1000>,
+ <0x01ab0000 0x3000>;
+ reg-names = "mdss_phys", "vbif_phys";
+
+ power-domains = <&gcc MDSS_GDSC>;
+ interrupts = <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ clocks = <&gcc GCC_MDSS_AHB_CLK>,
+ <&gcc GCC_MDSS_AXI_CLK>,
+ <&gcc GCC_MDSS_VSYNC_CLK>,
+ <&gcc GCC_MDSS_MDP_CLK>;
+ clock-names = "iface",
+ "bus",
+ "vsync",
+ "core";
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ status = "disabled";
+
+ mdss_mdp: display-controller@1a01000 {
+ compatible = "qcom,msm8976-mdp5", "qcom,mdp5";
+ reg = <0x01a01000 0x89000>;
+ reg-names = "mdp_phys";
+
+ interrupt-parent = <&mdss>;
+ interrupts = <0>;
+
+ clocks = <&gcc GCC_MDSS_AHB_CLK>,
+ <&gcc GCC_MDSS_AXI_CLK>,
+ <&gcc GCC_MDSS_MDP_CLK>,
+ <&gcc GCC_MDSS_VSYNC_CLK>,
+ <&gcc GCC_MDP_TBU_CLK>,
+ <&gcc GCC_MDP_RT_TBU_CLK>;
+ clock-names = "iface",
+ "bus",
+ "core",
+ "vsync",
+ "tbu",
+ "tbu_rt";
+
+ operating-points-v2 = <&mdp_opp_table>;
+ power-domains = <&gcc MDSS_GDSC>;
+
+ iommus = <&apps_iommu 22>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ mdss_mdp5_intf1_out: endpoint {
+ remote-endpoint = <&mdss_dsi0_in>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ mdss_mdp5_intf2_out: endpoint {
+ remote-endpoint = <&mdss_dsi1_in>;
+ };
+ };
+ };
+
+ mdp_opp_table: opp-table {
+ compatible = "operating-points-v2";
+
+ opp-177780000 {
+ opp-hz = /bits/ 64 <177780000>;
+ required-opps = <&rpmpd_opp_svs>;
+ };
+
+ opp-270000000 {
+ opp-hz = /bits/ 64 <270000000>;
+ required-opps = <&rpmpd_opp_svs_plus>;
+ };
+
+ opp-320000000 {
+ opp-hz = /bits/ 64 <320000000>;
+ required-opps = <&rpmpd_opp_nom>;
+ };
+
+ opp-360000000 {
+ opp-hz = /bits/ 64 <360000000>;
+ required-opps = <&rpmpd_opp_turbo>;
+ };
+ };
+ };
+
+ mdss_dsi0: dsi@1a94000 {
+ compatible = "qcom,msm8976-dsi-ctrl", "qcom,mdss-dsi-ctrl";
+ reg = <0x01a94000 0x300>;
+ reg-names = "dsi_ctrl";
+
+ interrupt-parent = <&mdss>;
+ interrupts = <4>;
+
+ clocks = <&gcc GCC_MDSS_MDP_CLK>,
+ <&gcc GCC_MDSS_AHB_CLK>,
+ <&gcc GCC_MDSS_AXI_CLK>,
+ <&gcc GCC_MDSS_BYTE0_CLK>,
+ <&gcc GCC_MDSS_PCLK0_CLK>,
+ <&gcc GCC_MDSS_ESC0_CLK>;
+ clock-names = "mdp_core",
+ "iface",
+ "bus",
+ "byte",
+ "pixel",
+ "core";
+
+ assigned-clocks = <&gcc GCC_MDSS_BYTE0_CLK_SRC>,
+ <&gcc GCC_MDSS_PCLK0_CLK_SRC>;
+ assigned-clock-parents = <&mdss_dsi0_phy 0>,
+ <&mdss_dsi0_phy 1>;
+
+ phys = <&mdss_dsi0_phy>;
+
+ operating-points-v2 = <&dsi0_opp_table>;
+ power-domains = <&gcc MDSS_GDSC>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ mdss_dsi0_in: endpoint {
+ remote-endpoint = <&mdss_mdp5_intf1_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ mdss_dsi0_out: endpoint {
+ };
+ };
+ };
+
+ dsi0_opp_table: opp-table {
+ compatible = "operating-points-v2";
+
+ opp-125000000 {
+ opp-hz = /bits/ 64 <125000000>;
+ required-opps = <&rpmpd_opp_svs>;
+ };
+
+ opp-161250000 {
+ opp-hz = /bits/ 64 <161250000>;
+ required-opps = <&rpmpd_opp_svs_plus>;
+ };
+
+ opp-187500000 {
+ opp-hz = /bits/ 64 <187500000>;
+ required-opps = <&rpmpd_opp_nom>;
+ };
+ };
+ };
+
+ mdss_dsi1: dsi@1a96000 {
+ compatible = "qcom,msm8976-dsi-ctrl", "qcom,mdss-dsi-ctrl";
+ reg = <0x01a96000 0x300>;
+ reg-names = "dsi_ctrl";
+
+ interrupt-parent = <&mdss>;
+ interrupts = <5>;
+
+ clocks = <&gcc GCC_MDSS_MDP_CLK>,
+ <&gcc GCC_MDSS_AHB_CLK>,
+ <&gcc GCC_MDSS_AXI_CLK>,
+ <&gcc GCC_MDSS_BYTE1_CLK>,
+ <&gcc GCC_MDSS_PCLK1_CLK>,
+ <&gcc GCC_MDSS_ESC1_CLK>;
+ clock-names = "mdp_core",
+ "iface",
+ "bus",
+ "byte",
+ "pixel",
+ "core";
+
+ assigned-clocks = <&gcc GCC_MDSS_BYTE1_CLK_SRC>,
+ <&gcc GCC_MDSS_PCLK1_CLK_SRC>;
+ assigned-clock-parents = <&mdss_dsi1_phy 0>,
+ <&mdss_dsi1_phy 1>;
+
+ phys = <&mdss_dsi1_phy>;
+
+ operating-points-v2 = <&dsi0_opp_table>;
+ power-domains = <&gcc MDSS_GDSC>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ mdss_dsi1_in: endpoint {
+ remote-endpoint = <&mdss_mdp5_intf2_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ mdss_dsi1_out: endpoint {
+ };
+ };
+ };
+ };
+
+ mdss_dsi0_phy: phy@1a94a00 {
+ compatible = "qcom,dsi-phy-28nm-hpm-fam-b";
+ reg = <0x01a94a00 0xd4>,
+ <0x01a94400 0x280>,
+ <0x01a94b80 0x30>;
+ reg-names = "dsi_pll",
+ "dsi_phy",
+ "dsi_phy_regulator";
+
+ #clock-cells = <1>;
+ #phy-cells = <0>;
+
+ clocks = <&gcc GCC_MDSS_AHB_CLK>,
+ <&rpmcc RPM_SMD_XO_CLK_SRC>;
+ clock-names = "iface", "ref";
+
+ status = "disabled";
+ };
+
+ mdss_dsi1_phy: phy@1a96a00 {
+ compatible = "qcom,dsi-phy-28nm-hpm-fam-b";
+ reg = <0x01a96a00 0xd4>,
+ <0x01a96400 0x280>,
+ <0x01a96b80 0x30>;
+ reg-names = "dsi_pll",
+ "dsi_phy",
+ "dsi_phy_regulator";
+
+ #clock-cells = <1>;
+ #phy-cells = <0>;
+
+ clocks = <&gcc GCC_MDSS_AHB_CLK>,
+ <&rpmcc RPM_SMD_XO_CLK_SRC>;
+ clock-names = "iface", "ref";
+
+ status = "disabled";
+ };
+ };
+
+ adreno_gpu: gpu@1c00000 {
+ compatible = "qcom,adreno-510.0", "qcom,adreno";
+
+ reg = <0x01c00000 0x40000>;
+ reg-names = "kgsl_3d0_reg_memory";
+
+ interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "kgsl_3d0_irq";
+
+ clocks = <&gcc GCC_GFX3D_OXILI_CLK>,
+ <&gcc GCC_GFX3D_OXILI_AHB_CLK>,
+ <&gcc GCC_GFX3D_OXILI_GMEM_CLK>,
+ <&gcc GCC_GFX3D_BIMC_CLK>,
+ <&gcc GCC_GFX3D_OXILI_TIMER_CLK>,
+ <&gcc GCC_GFX3D_OXILI_AON_CLK>;
+ clock-names = "core",
+ "iface",
+ "mem",
+ "mem_iface",
+ "rbbmtimer",
+ "alwayson";
+
+ power-domains = <&gcc OXILI_GX_GDSC>;
+
+ iommus = <&gpu_iommu 0>;
+
+ operating-points-v2 = <&gpu_opp_table>;
+
+ status = "disabled";
+
+ gpu_opp_table: opp-table {
+ compatible = "operating-points-v2";
+
+ opp-200000000 {
+ opp-hz = /bits/ 64 <200000000>;
+ required-opps = <&rpmpd_opp_low_svs>;
+ opp-supported-hw = <0xff>;
+ };
+
+ opp-300000000 {
+ opp-hz = /bits/ 64 <300000000>;
+ required-opps = <&rpmpd_opp_svs>;
+ opp-supported-hw = <0xff>;
+ };
+
+ opp-400000000 {
+ opp-hz = /bits/ 64 <400000000>;
+ required-opps = <&rpmpd_opp_nom>;
+ opp-supported-hw = <0xff>;
+ };
+
+ opp-480000000 {
+ opp-hz = /bits/ 64 <480000000>;
+ required-opps = <&rpmpd_opp_nom_plus>;
+ opp-supported-hw = <0xff>;
+ };
+
+ opp-540000000 {
+ opp-hz = /bits/ 64 <540000000>;
+ required-opps = <&rpmpd_opp_turbo>;
+ opp-supported-hw = <0xff>;
+ };
+
+ opp-600000000 {
+ opp-hz = /bits/ 64 <600000000>;
+ required-opps = <&rpmpd_opp_turbo>;
+ opp-supported-hw = <0xff>;
+ };
+ };
+ };
+
+ apps_iommu: iommu@1ee0000 {
+ compatible = "qcom,msm8976-iommu", "qcom,msm-iommu-v2";
+ reg = <0x01ee0000 0x3000>;
+ ranges = <0 0x01e20000 0x20000>;
+
+ clocks = <&gcc GCC_SMMU_CFG_CLK>,
+ <&gcc GCC_APSS_TCU_CLK>;
+ clock-names = "iface", "bus";
+
+ qcom,iommu-secure-id = <17>;
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+ #iommu-cells = <1>;
+
+ /* VFE */
+ iommu-ctx@15000 {
+ compatible = "qcom,msm-iommu-v2-ns";
+ reg = <0x15000 0x1000>;
+ qcom,ctx-asid = <20>;
+ interrupts = <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ /* VENUS NS */
+ iommu-ctx@16000 {
+ compatible = "qcom,msm-iommu-v2-ns";
+ reg = <0x16000 0x1000>;
+ qcom,ctx-asid = <21>;
+ interrupts = <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ /* MDP0 */
+ iommu-ctx@17000 {
+ compatible = "qcom,msm-iommu-v2-ns";
+ reg = <0x17000 0x1000>;
+ qcom,ctx-asid = <22>;
+ interrupts = <GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>;
+ };
+ };
+
+ gpu_iommu: iommu@1f08000 {
+ compatible = "qcom,msm8976-iommu", "qcom,msm-iommu-v2";
+ ranges = <0 0x01f08000 0x8000>;
+
+ clocks = <&gcc GCC_SMMU_CFG_CLK>,
+ <&gcc GCC_GFX3D_TCU_CLK>;
+ clock-names = "iface", "bus";
+
+ power-domains = <&gcc OXILI_CX_GDSC>;
+
+ qcom,iommu-secure-id = <18>;
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+ #iommu-cells = <1>;
+
+ /* gfx3d user */
+ iommu-ctx@0 {
+ compatible = "qcom,msm-iommu-v2-ns";
+ reg = <0x0 0x1000>;
+ qcom,ctx-asid = <0>;
+ interrupts = <GIC_SPI 240 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ /* gfx3d secure */
+ iommu-ctx@1000 {
+ compatible = "qcom,msm-iommu-v2-sec";
+ reg = <0x1000 0x1000>;
+ qcom,ctx-asid = <2>;
+ interrupts = <GIC_SPI 241 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ /* gfx3d priv */
+ iommu-ctx@2000 {
+ compatible = "qcom,msm-iommu-v2-sec";
+ reg = <0x2000 0x1000>;
+ qcom,ctx-asid = <1>;
+ interrupts = <GIC_SPI 242 IRQ_TYPE_LEVEL_HIGH>;
+ };
+ };
+
spmi_bus: spmi@200f000 {
compatible = "qcom,spmi-pmic-arb";
reg = <0x0200f000 0x1000>,
@@ -1034,6 +1492,81 @@
status = "disabled";
};
+ wcnss: remoteproc@a204000 {
+ compatible = "qcom,pronto-v3-pil", "qcom,pronto";
+ reg = <0x0a204000 0x2000>,
+ <0x0a202000 0x1000>,
+ <0x0a21b000 0x3000>;
+ reg-names = "ccu",
+ "dxe",
+ "pmu";
+
+ memory-region = <&wcnss_fw_mem>;
+
+ interrupts-extended = <&intc GIC_SPI 149 IRQ_TYPE_EDGE_RISING>,
+ <&wcnss_smp2p_in 0 IRQ_TYPE_EDGE_RISING>,
+ <&wcnss_smp2p_in 1 IRQ_TYPE_EDGE_RISING>,
+ <&wcnss_smp2p_in 2 IRQ_TYPE_EDGE_RISING>,
+ <&wcnss_smp2p_in 3 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "wdog",
+ "fatal",
+ "ready",
+ "handover",
+ "stop-ack";
+
+ power-domains = <&rpmpd MSM8976_VDDCX>,
+ <&rpmpd MSM8976_VDDMX>;
+ power-domain-names = "cx", "mx";
+
+ qcom,smem-states = <&wcnss_smp2p_out 0>;
+ qcom,smem-state-names = "stop";
+
+ pinctrl-0 = <&wcss_wlan_default>;
+ pinctrl-names = "default";
+
+ status = "disabled";
+
+ wcnss_iris: iris {
+ /* Separate chip, compatible is board-specific */
+ clocks = <&rpmcc RPM_SMD_RF_CLK2>;
+ clock-names = "xo";
+ };
+
+ smd-edge {
+ interrupts = <GIC_SPI 142 IRQ_TYPE_EDGE_RISING>;
+
+ mboxes = <&apcs 17>;
+ qcom,smd-edge = <6>;
+ qcom,remote-pid = <4>;
+
+ label = "pronto";
+
+ wcnss_ctrl: wcnss {
+ compatible = "qcom,wcnss";
+ qcom,smd-channels = "WCNSS_CTRL";
+
+ qcom,mmio = <&wcnss>;
+
+ wcnss_bt: bluetooth {
+ compatible = "qcom,wcnss-bt";
+ };
+
+ wcnss_wifi: wifi {
+ compatible = "qcom,wcnss-wlan";
+
+ interrupts = <GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 146 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "tx", "rx";
+
+ qcom,smem-states = <&apps_smsm 10>,
+ <&apps_smsm 9>;
+ qcom,smem-state-names = "tx-enable",
+ "tx-rings-empty";
+ };
+ };
+ };
+ };
+
intc: interrupt-controller@b000000 {
compatible = "qcom,msm-qgic2";
reg = <0x0b000000 0x1000>, <0x0b002000 0x1000>;
@@ -1124,7 +1657,6 @@
thermal-zones {
aoss0-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens 0>;
@@ -1139,7 +1671,6 @@
modem-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens 1>;
trips {
@@ -1153,7 +1684,6 @@
qdsp-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens 2>;
trips {
@@ -1167,7 +1697,6 @@
cam-isp-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens 3>;
trips {
@@ -1181,7 +1710,7 @@
cpu4-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
+
thermal-sensors = <&tsens 4>;
trips {
@@ -1205,7 +1734,7 @@
cpu5-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
+
thermal-sensors = <&tsens 5>;
trips {
@@ -1229,7 +1758,7 @@
cpu6-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
+
thermal-sensors = <&tsens 6>;
trips {
@@ -1253,7 +1782,7 @@
cpu7-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
+
thermal-sensors = <&tsens 7>;
trips {
@@ -1277,7 +1806,7 @@
big-l2-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
+
thermal-sensors = <&tsens 8>;
trips {
@@ -1301,7 +1830,7 @@
cpu0-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
+
thermal-sensors = <&tsens 9>;
trips {
@@ -1325,7 +1854,7 @@
gpu-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
+
thermal-sensors = <&tsens 10>;
trips {
diff --git a/arch/arm64/boot/dts/qcom/msm8994.dtsi b/arch/arm64/boot/dts/qcom/msm8994.dtsi
index 695e541832ad..917fa246857d 100644
--- a/arch/arm64/boot/dts/qcom/msm8994.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8994.dtsi
@@ -183,7 +183,7 @@
smd-edge {
interrupts = <GIC_SPI 168 IRQ_TYPE_EDGE_RISING>;
- qcom,ipc = <&apcs 8 0>;
+ mboxes = <&apcs 0>;
qcom,smd-edge = <15>;
qcom,remote-pid = <6>;
@@ -300,7 +300,7 @@
interrupts = <GIC_SPI 158 IRQ_TYPE_EDGE_RISING>;
- qcom,ipc = <&apcs 8 10>;
+ mboxes = <&apcs 10>;
qcom,local-pid = <0>;
qcom,remote-pid = <2>;
@@ -325,7 +325,7 @@
interrupt-parent = <&intc>;
interrupts = <GIC_SPI 27 IRQ_TYPE_EDGE_RISING>;
- qcom,ipc = <&apcs 8 14>;
+ mboxes = <&apcs 14>;
qcom,local-pid = <0>;
qcom,remote-pid = <1>;
@@ -1093,10 +1093,10 @@
timer: timer {
compatible = "arm,armv8-timer";
- interrupts = <GIC_PPI 2 0xff08>,
- <GIC_PPI 3 0xff08>,
- <GIC_PPI 4 0xff08>,
- <GIC_PPI 1 0xff08>;
+ interrupts = <GIC_PPI 2 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 3 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 4 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 1 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>;
};
vph_pwr: vph-pwr-regulator {
diff --git a/arch/arm64/boot/dts/qcom/msm8996-xiaomi-common.dtsi b/arch/arm64/boot/dts/qcom/msm8996-xiaomi-common.dtsi
index 5ab583be9e0a..0386636a29f0 100644
--- a/arch/arm64/boot/dts/qcom/msm8996-xiaomi-common.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8996-xiaomi-common.dtsi
@@ -405,7 +405,6 @@
&hsusb_phy1 {
status = "okay";
- extcon = <&typec>;
vdda-pll-supply = <&vreg_l12a_1p8>;
vdda-phy-dpdm-supply = <&vreg_l24a_3p075>;
diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi
index 8d2cb6f41095..0fd2b1b944a5 100644
--- a/arch/arm64/boot/dts/qcom/msm8996.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi
@@ -982,6 +982,8 @@
<&mmcc MDSS_MDP_CLK>;
clock-names = "iface", "core";
+ resets = <&mmcc MDSS_BCR>;
+
#address-cells = <1>;
#size-cells = <1>;
ranges;
@@ -2077,24 +2079,20 @@
power-domains = <&gcc UFS_GDSC>;
clock-names =
- "core_clk_src",
"core_clk",
"bus_clk",
"bus_aggr_clk",
"iface_clk",
- "core_clk_unipro_src",
"core_clk_unipro",
"core_clk_ice",
"ref_clk",
"tx_lane0_sync_clk",
"rx_lane0_sync_clk";
clocks =
- <&gcc UFS_AXI_CLK_SRC>,
<&gcc GCC_UFS_AXI_CLK>,
<&gcc GCC_SYS_NOC_UFS_AXI_CLK>,
<&gcc GCC_AGGRE2_UFS_AXI_CLK>,
<&gcc GCC_UFS_AHB_CLK>,
- <&gcc UFS_ICE_CORE_CLK_SRC>,
<&gcc GCC_UFS_UNIPRO_CORE_CLK>,
<&gcc GCC_UFS_ICE_CORE_CLK>,
<&rpmcc RPM_SMD_LN_BB_CLK>,
@@ -2105,10 +2103,8 @@
<0 0>,
<0 0>,
<0 0>,
- <0 0>,
- <150000000 300000000>,
<75000000 150000000>,
- <0 0>,
+ <150000000 300000000>,
<0 0>,
<0 0>,
<0 0>;
@@ -2483,6 +2479,13 @@
status = "disabled";
+ glink-edge {
+ interrupts = <GIC_SPI 179 IRQ_TYPE_EDGE_RISING>;
+ label = "dsps";
+ qcom,remote-pid = <3>;
+ mboxes = <&apcs_glb 27>;
+ };
+
smd-edge {
interrupts = <GIC_SPI 176 IRQ_TYPE_EDGE_RISING>;
@@ -2552,6 +2555,13 @@
memory-region = <&mdata_mem>;
};
+ glink-edge {
+ interrupts = <GIC_SPI 452 IRQ_TYPE_EDGE_RISING>;
+ label = "modem";
+ qcom,remote-pid = <1>;
+ mboxes = <&apcs_glb 15>;
+ };
+
smd-edge {
interrupts = <GIC_SPI 449 IRQ_TYPE_EDGE_RISING>;
@@ -3091,6 +3101,7 @@
snps,dis_u2_susphy_quirk;
snps,dis_enblslpm_quirk;
snps,is-utmi-l1-suspend;
+ snps,parkmode-disable-ss-quirk;
tx-fifo-resize;
};
};
@@ -3497,6 +3508,14 @@
status = "disabled";
+ glink-edge {
+ interrupts = <GIC_SPI 157 IRQ_TYPE_EDGE_RISING>;
+ label = "lpass";
+ qcom,remote-pid = <2>;
+ mboxes = <&apcs_glb 9>;
+ };
+
+
smd-edge {
interrupts = <GIC_SPI 156 IRQ_TYPE_EDGE_RISING>;
@@ -3553,6 +3572,63 @@
};
};
};
+
+ fastrpc {
+ compatible = "qcom,fastrpc";
+ qcom,smd-channels = "fastrpcsmd-apps-dsp";
+ label = "adsp";
+ qcom,non-secure-domain;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cb@5 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <5>;
+ iommus = <&lpass_q6_smmu 5>;
+ };
+
+ cb@6 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <6>;
+ iommus = <&lpass_q6_smmu 6>;
+ };
+
+ cb@7 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <7>;
+ iommus = <&lpass_q6_smmu 7>;
+ };
+
+ cb@8 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <8>;
+ iommus = <&lpass_q6_smmu 8>;
+ };
+
+ cb@9 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <9>;
+ iommus = <&lpass_q6_smmu 9>;
+ };
+
+ cb@10 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <10>;
+ iommus = <&lpass_q6_smmu 10>;
+ };
+
+ cb@11 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <11>;
+ iommus = <&lpass_q6_smmu 11>;
+ };
+
+ cb@12 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <12>;
+ iommus = <&lpass_q6_smmu 12>;
+ };
+ };
};
};
@@ -3654,7 +3730,6 @@
thermal-zones {
cpu0-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 3>;
@@ -3675,7 +3750,6 @@
cpu1-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 5>;
@@ -3696,7 +3770,6 @@
cpu2-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 8>;
@@ -3717,7 +3790,6 @@
cpu3-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 10>;
@@ -3738,7 +3810,6 @@
gpu-top-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens1 6>;
@@ -3760,7 +3831,6 @@
gpu-bottom-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens1 7>;
@@ -3782,7 +3852,6 @@
m4m-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 1>;
@@ -3797,7 +3866,6 @@
l3-or-venus-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 2>;
@@ -3812,7 +3880,6 @@
cluster0-l2-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 7>;
@@ -3827,7 +3894,6 @@
cluster1-l2-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 12>;
@@ -3842,7 +3908,6 @@
camera-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens1 1>;
@@ -3857,7 +3922,6 @@
q6-dsp-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens1 2>;
@@ -3872,7 +3936,6 @@
mem-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens1 3>;
@@ -3887,7 +3950,6 @@
modemtx-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens1 4>;
diff --git a/arch/arm64/boot/dts/qcom/msm8998.dtsi b/arch/arm64/boot/dts/qcom/msm8998.dtsi
index d795b2bbe133..7f44807b1b97 100644
--- a/arch/arm64/boot/dts/qcom/msm8998.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8998.dtsi
@@ -488,7 +488,6 @@
thermal-zones {
cpu0-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 1>;
@@ -509,7 +508,6 @@
cpu1-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 2>;
@@ -530,7 +528,6 @@
cpu2-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 3>;
@@ -551,7 +548,6 @@
cpu3-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 4>;
@@ -572,7 +568,6 @@
cpu4-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 7>;
@@ -593,7 +588,6 @@
cpu5-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 8>;
@@ -614,7 +608,6 @@
cpu6-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 9>;
@@ -635,7 +628,6 @@
cpu7-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 10>;
@@ -656,7 +648,6 @@
gpu-bottom-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 12>;
@@ -671,7 +662,6 @@
gpu-top-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 13>;
@@ -686,7 +676,6 @@
clust0-mhm-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 5>;
@@ -701,7 +690,6 @@
clust1-mhm-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 6>;
@@ -716,7 +704,6 @@
cluster1-l2-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 11>;
@@ -731,7 +718,6 @@
modem-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens1 1>;
@@ -746,7 +732,6 @@
mem-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens1 2>;
@@ -761,7 +746,6 @@
wlan-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens1 3>;
@@ -776,7 +760,6 @@
q6-dsp-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens1 4>;
@@ -791,7 +774,6 @@
camera-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens1 5>;
@@ -806,7 +788,6 @@
multimedia-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens1 6>;
@@ -1590,7 +1571,6 @@
* SoC VDDMX RPM Power Domain in the Adreno driver.
*/
power-domains = <&gpucc GPU_GX_GDSC>;
- status = "disabled";
};
gpucc: clock-controller@5065000 {
@@ -2164,6 +2144,7 @@
interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>;
snps,dis_u2_susphy_quirk;
snps,dis_enblslpm_quirk;
+ snps,parkmode-disable-ss-quirk;
phys = <&qusb2phy>, <&usb3phy>;
phy-names = "usb2-phy", "usb3-phy";
snps,has-lpm-erratum;
@@ -3020,6 +3001,54 @@
};
};
+ venus: video-codec@cc00000 {
+ compatible = "qcom,msm8998-venus";
+ reg = <0x0cc00000 0xff000>;
+ interrupts = <GIC_SPI 287 IRQ_TYPE_LEVEL_HIGH>;
+ power-domains = <&mmcc VIDEO_TOP_GDSC>;
+ clocks = <&mmcc VIDEO_CORE_CLK>,
+ <&mmcc VIDEO_AHB_CLK>,
+ <&mmcc VIDEO_AXI_CLK>,
+ <&mmcc VIDEO_MAXI_CLK>;
+ clock-names = "core", "iface", "bus", "mbus";
+ iommus = <&mmss_smmu 0x400>,
+ <&mmss_smmu 0x401>,
+ <&mmss_smmu 0x40a>,
+ <&mmss_smmu 0x407>,
+ <&mmss_smmu 0x40e>,
+ <&mmss_smmu 0x40f>,
+ <&mmss_smmu 0x408>,
+ <&mmss_smmu 0x409>,
+ <&mmss_smmu 0x40b>,
+ <&mmss_smmu 0x40c>,
+ <&mmss_smmu 0x40d>,
+ <&mmss_smmu 0x410>,
+ <&mmss_smmu 0x421>,
+ <&mmss_smmu 0x428>,
+ <&mmss_smmu 0x429>,
+ <&mmss_smmu 0x42b>,
+ <&mmss_smmu 0x42c>,
+ <&mmss_smmu 0x42d>,
+ <&mmss_smmu 0x411>,
+ <&mmss_smmu 0x431>;
+ memory-region = <&venus_mem>;
+ status = "disabled";
+
+ video-decoder {
+ compatible = "venus-decoder";
+ clocks = <&mmcc VIDEO_SUBCORE0_CLK>;
+ clock-names = "core";
+ power-domains = <&mmcc VIDEO_SUBCORE0_GDSC>;
+ };
+
+ video-encoder {
+ compatible = "venus-encoder";
+ clocks = <&mmcc VIDEO_SUBCORE1_CLK>;
+ clock-names = "core";
+ power-domains = <&mmcc VIDEO_SUBCORE1_GDSC>;
+ };
+ };
+
mmss_smmu: iommu@cd00000 {
compatible = "qcom,msm8998-smmu-v2", "qcom,smmu-v2";
reg = <0x0cd00000 0x40000>;
@@ -3195,6 +3224,7 @@
iommus = <&anoc2_smmu 0x1900>,
<&anoc2_smmu 0x1901>;
qcom,snoc-host-cap-8bit-quirk;
+ qcom,no-msa-ready-indicator;
};
};
};
diff --git a/arch/arm64/boot/dts/qcom/pm6125.dtsi b/arch/arm64/boot/dts/qcom/pm6125.dtsi
index 99369a0cdb61..d0db28336fa9 100644
--- a/arch/arm64/boot/dts/qcom/pm6125.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm6125.dtsi
@@ -10,7 +10,6 @@
thermal-zones {
pm6125-thermal {
polling-delay-passive = <100>;
- polling-delay = <0>;
thermal-sensors = <&pm6125_temp>;
diff --git a/arch/arm64/boot/dts/qcom/pm6150.dtsi b/arch/arm64/boot/dts/qcom/pm6150.dtsi
index 6de6ed562d97..59524609fb1e 100644
--- a/arch/arm64/boot/dts/qcom/pm6150.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm6150.dtsi
@@ -13,7 +13,7 @@
thermal-zones {
pm6150_thermal: pm6150-thermal {
polling-delay-passive = <100>;
- polling-delay = <0>;
+
thermal-sensors = <&pm6150_temp>;
trips {
@@ -166,5 +166,11 @@
reg = <0x1 SPMI_USID>;
#address-cells = <1>;
#size-cells = <0>;
+
+ pm6150_vib: vibrator@5300 {
+ compatible = "qcom,pm6150-vib", "qcom,pmi632-vib";
+ reg = <0x5300>;
+ status = "disabled";
+ };
};
};
diff --git a/arch/arm64/boot/dts/qcom/pm6150l.dtsi b/arch/arm64/boot/dts/qcom/pm6150l.dtsi
index 0fce45276e5c..334f976f1154 100644
--- a/arch/arm64/boot/dts/qcom/pm6150l.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm6150l.dtsi
@@ -10,9 +10,6 @@
/ {
thermal-zones {
pm6150l-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
-
thermal-sensors = <&pm6150l_temp>;
trips {
diff --git a/arch/arm64/boot/dts/qcom/pm6350.dtsi b/arch/arm64/boot/dts/qcom/pm6350.dtsi
index 3a2a841e83f1..a20ee2457101 100644
--- a/arch/arm64/boot/dts/qcom/pm6350.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm6350.dtsi
@@ -10,7 +10,6 @@
thermal-zones {
pm6350-thermal {
polling-delay-passive = <100>;
- polling-delay = <0>;
thermal-sensors = <&pm6350_temp>;
diff --git a/arch/arm64/boot/dts/qcom/pm660.dtsi b/arch/arm64/boot/dts/qcom/pm660.dtsi
index 98dc04962fe3..156b2ddff0dc 100644
--- a/arch/arm64/boot/dts/qcom/pm660.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm660.dtsi
@@ -13,7 +13,6 @@
thermal-zones {
pm660-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&pm660_temp>;
@@ -74,6 +73,23 @@
};
};
+ pm660_charger: charger@1000 {
+ compatible = "qcom,pm660-charger";
+ reg = <0x1000>;
+
+ interrupts = <0x0 0x13 0x4 IRQ_TYPE_EDGE_BOTH>,
+ <0x0 0x12 0x2 IRQ_TYPE_EDGE_BOTH>,
+ <0x0 0x16 0x1 IRQ_TYPE_EDGE_RISING>,
+ <0x0 0x13 0x6 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "usb-plugin", "bat-ov", "wdog-bark", "usbin-icl-change";
+
+ io-channels = <&pm660_rradc 3>,
+ <&pm660_rradc 4>;
+ io-channel-names = "usbin_i", "usbin_v";
+
+ status = "disabled";
+ };
+
pm660_temp: temp-alarm@2400 {
compatible = "qcom,spmi-temp-alarm";
reg = <0x2400>;
@@ -181,6 +197,14 @@
};
};
+ pm660_rradc: adc@4500 {
+ compatible = "qcom,pm660-rradc";
+ reg = <0x4500>;
+ #io-channel-cells = <1>;
+
+ status = "disabled";
+ };
+
pm660_gpios: gpio@c000 {
compatible = "qcom,pm660-gpio", "qcom,spmi-gpio";
reg = <0xc000>;
diff --git a/arch/arm64/boot/dts/qcom/pm660l.dtsi b/arch/arm64/boot/dts/qcom/pm660l.dtsi
index 6fdbf507c262..0094e0ef058b 100644
--- a/arch/arm64/boot/dts/qcom/pm660l.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm660l.dtsi
@@ -13,7 +13,6 @@
thermal-zones {
pm660l-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&pm660l_temp>;
diff --git a/arch/arm64/boot/dts/qcom/pm7250b.dtsi b/arch/arm64/boot/dts/qcom/pm7250b.dtsi
index 3bf7cf5d1700..0761e6b5fd8d 100644
--- a/arch/arm64/boot/dts/qcom/pm7250b.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm7250b.dtsi
@@ -11,7 +11,6 @@
thermal-zones {
pm7250b-thermal {
polling-delay-passive = <100>;
- polling-delay = <0>;
thermal-sensors = <&pm7250b_temp>;
@@ -45,6 +44,52 @@
#address-cells = <1>;
#size-cells = <0>;
+ pm7250b_vbus: usb-vbus-regulator@1100 {
+ compatible = "qcom,pm7250b-vbus-reg", "qcom,pm8150b-vbus-reg";
+ reg = <0x1100>;
+ status = "disabled";
+ };
+
+ pm7250b_typec: typec@1500 {
+ compatible = "qcom,pm7250b-typec", "qcom,pm8150b-typec";
+ reg = <0x1500>,
+ <0x1700>;
+ interrupts = <PM7250B_SID 0x15 0x00 IRQ_TYPE_EDGE_RISING>,
+ <PM7250B_SID 0x15 0x01 IRQ_TYPE_EDGE_BOTH>,
+ <PM7250B_SID 0x15 0x02 IRQ_TYPE_EDGE_RISING>,
+ <PM7250B_SID 0x15 0x03 IRQ_TYPE_EDGE_BOTH>,
+ <PM7250B_SID 0x15 0x04 IRQ_TYPE_EDGE_RISING>,
+ <PM7250B_SID 0x15 0x05 IRQ_TYPE_EDGE_RISING>,
+ <PM7250B_SID 0x15 0x06 IRQ_TYPE_EDGE_BOTH>,
+ <PM7250B_SID 0x15 0x07 IRQ_TYPE_EDGE_RISING>,
+ <PM7250B_SID 0x17 0x00 IRQ_TYPE_EDGE_RISING>,
+ <PM7250B_SID 0x17 0x01 IRQ_TYPE_EDGE_RISING>,
+ <PM7250B_SID 0x17 0x02 IRQ_TYPE_EDGE_RISING>,
+ <PM7250B_SID 0x17 0x03 IRQ_TYPE_EDGE_RISING>,
+ <PM7250B_SID 0x17 0x04 IRQ_TYPE_EDGE_RISING>,
+ <PM7250B_SID 0x17 0x05 IRQ_TYPE_EDGE_RISING>,
+ <PM7250B_SID 0x17 0x06 IRQ_TYPE_EDGE_RISING>,
+ <PM7250B_SID 0x17 0x07 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "or-rid-detect-change",
+ "vpd-detect",
+ "cc-state-change",
+ "vconn-oc",
+ "vbus-change",
+ "attach-detach",
+ "legacy-cable-detect",
+ "try-snk-src-detect",
+ "sig-tx",
+ "sig-rx",
+ "msg-tx",
+ "msg-rx",
+ "msg-tx-failed",
+ "msg-tx-discarded",
+ "msg-rx-discarded",
+ "fr-swap";
+ vdd-vbus-supply = <&pm7250b_vbus>;
+ status = "disabled";
+ };
+
pm7250b_temp: temp-alarm@2400 {
compatible = "qcom,spmi-temp-alarm";
reg = <0x2400>;
diff --git a/arch/arm64/boot/dts/qcom/pm7325.dtsi b/arch/arm64/boot/dts/qcom/pm7325.dtsi
index d1c5476af5ee..6e29468505b2 100644
--- a/arch/arm64/boot/dts/qcom/pm7325.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm7325.dtsi
@@ -35,7 +35,7 @@
&thermal_zones {
pm7325_thermal: pm7325-thermal {
polling-delay-passive = <100>;
- polling-delay = <0>;
+
thermal-sensors = <&pm7325_temp_alarm>;
trips {
diff --git a/arch/arm64/boot/dts/qcom/pm7550ba.dtsi b/arch/arm64/boot/dts/qcom/pm7550ba.dtsi
index 8b00ece987d1..853a1d83a7f0 100644
--- a/arch/arm64/boot/dts/qcom/pm7550ba.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm7550ba.dtsi
@@ -10,7 +10,6 @@
thermal-zones {
pm7550ba-thermal {
polling-delay-passive = <100>;
- polling-delay = <0>;
thermal-sensors = <&pm7550ba_temp_alarm>;
diff --git a/arch/arm64/boot/dts/qcom/pm8010.dtsi b/arch/arm64/boot/dts/qcom/pm8010.dtsi
index 0ea641e12209..ef330194946b 100644
--- a/arch/arm64/boot/dts/qcom/pm8010.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm8010.dtsi
@@ -10,7 +10,6 @@
thermal-zones {
pm8010-m-thermal {
polling-delay-passive = <100>;
- polling-delay = <0>;
thermal-sensors = <&pm8010_m_temp_alarm>;
@@ -31,7 +30,6 @@
pm8010-n-thermal {
polling-delay-passive = <100>;
- polling-delay = <0>;
thermal-sensors = <&pm8010_n_temp_alarm>;
diff --git a/arch/arm64/boot/dts/qcom/pm8150.dtsi b/arch/arm64/boot/dts/qcom/pm8150.dtsi
index 3ba3ba5d8fce..a74a7ff660d2 100644
--- a/arch/arm64/boot/dts/qcom/pm8150.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm8150.dtsi
@@ -13,7 +13,6 @@
thermal-zones {
pm8150-thermal {
polling-delay-passive = <100>;
- polling-delay = <0>;
thermal-sensors = <&pm8150_temp>;
diff --git a/arch/arm64/boot/dts/qcom/pm8150b.dtsi b/arch/arm64/boot/dts/qcom/pm8150b.dtsi
index 1aee3270ce7b..3f7b0b6a1d10 100644
--- a/arch/arm64/boot/dts/qcom/pm8150b.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm8150b.dtsi
@@ -12,7 +12,6 @@
thermal-zones {
pm8150b-thermal {
polling-delay-passive = <100>;
- polling-delay = <0>;
thermal-sensors = <&pm8150b_temp>;
diff --git a/arch/arm64/boot/dts/qcom/pm8150l.dtsi b/arch/arm64/boot/dts/qcom/pm8150l.dtsi
index ac08a09c64c2..3911d6d0d2e2 100644
--- a/arch/arm64/boot/dts/qcom/pm8150l.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm8150l.dtsi
@@ -12,7 +12,6 @@
thermal-zones {
pm8150l-thermal {
polling-delay-passive = <100>;
- polling-delay = <0>;
thermal-sensors = <&pm8150l_temp>;
diff --git a/arch/arm64/boot/dts/qcom/pm8350.dtsi b/arch/arm64/boot/dts/qcom/pm8350.dtsi
index 9ed9ba23e81e..cb55b23688d6 100644
--- a/arch/arm64/boot/dts/qcom/pm8350.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm8350.dtsi
@@ -10,7 +10,7 @@
thermal-zones {
pm8350_thermal: pm8350-thermal {
polling-delay-passive = <100>;
- polling-delay = <0>;
+
thermal-sensors = <&pm8350_temp_alarm>;
trips {
diff --git a/arch/arm64/boot/dts/qcom/pm8350b.dtsi b/arch/arm64/boot/dts/qcom/pm8350b.dtsi
index 05c105898892..cf82f8a64a9b 100644
--- a/arch/arm64/boot/dts/qcom/pm8350b.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm8350b.dtsi
@@ -10,7 +10,7 @@
thermal-zones {
pm8350b_thermal: pm8350b-thermal {
polling-delay-passive = <100>;
- polling-delay = <0>;
+
thermal-sensors = <&pm8350b_temp_alarm>;
trips {
diff --git a/arch/arm64/boot/dts/qcom/pm8350c.dtsi b/arch/arm64/boot/dts/qcom/pm8350c.dtsi
index aa74e21fe0dc..1a24e6439e36 100644
--- a/arch/arm64/boot/dts/qcom/pm8350c.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm8350c.dtsi
@@ -48,7 +48,7 @@
thermal-zones {
pm8350c_thermal: pm8350c-thermal {
polling-delay-passive = <100>;
- polling-delay = <0>;
+
thermal-sensors = <&pm8350c_temp_alarm>;
trips {
diff --git a/arch/arm64/boot/dts/qcom/pm8450.dtsi b/arch/arm64/boot/dts/qcom/pm8450.dtsi
index ae5bce3cf46e..decb8809fd36 100644
--- a/arch/arm64/boot/dts/qcom/pm8450.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm8450.dtsi
@@ -10,7 +10,6 @@
thermal-zones {
pm8450-thermal {
polling-delay-passive = <100>;
- polling-delay = <0>;
thermal-sensors = <&pm8450_temp_alarm>;
diff --git a/arch/arm64/boot/dts/qcom/pm8550.dtsi b/arch/arm64/boot/dts/qcom/pm8550.dtsi
index 797a18c249a4..896bcacb6490 100644
--- a/arch/arm64/boot/dts/qcom/pm8550.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm8550.dtsi
@@ -10,7 +10,6 @@
thermal-zones {
pm8550-thermal {
polling-delay-passive = <100>;
- polling-delay = <0>;
thermal-sensors = <&pm8550_temp_alarm>;
diff --git a/arch/arm64/boot/dts/qcom/pm8550b.dtsi b/arch/arm64/boot/dts/qcom/pm8550b.dtsi
index 72609f31c890..74d23b8970f4 100644
--- a/arch/arm64/boot/dts/qcom/pm8550b.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm8550b.dtsi
@@ -10,7 +10,6 @@
thermal-zones {
pm8550b-thermal {
polling-delay-passive = <100>;
- polling-delay = <0>;
thermal-sensors = <&pm8550b_temp_alarm>;
diff --git a/arch/arm64/boot/dts/qcom/pm8550ve.dtsi b/arch/arm64/boot/dts/qcom/pm8550ve.dtsi
index 4dc1f03ab2c7..9d4734eabf5a 100644
--- a/arch/arm64/boot/dts/qcom/pm8550ve.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm8550ve.dtsi
@@ -10,7 +10,6 @@
thermal-zones {
pm8550ve-thermal {
polling-delay-passive = <100>;
- polling-delay = <0>;
thermal-sensors = <&pm8550ve_temp_alarm>;
diff --git a/arch/arm64/boot/dts/qcom/pm8550vs.dtsi b/arch/arm64/boot/dts/qcom/pm8550vs.dtsi
index 97b1c18aa7d8..6426b431616b 100644
--- a/arch/arm64/boot/dts/qcom/pm8550vs.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm8550vs.dtsi
@@ -10,7 +10,6 @@
thermal-zones {
pm8550vs-c-thermal {
polling-delay-passive = <100>;
- polling-delay = <0>;
thermal-sensors = <&pm8550vs_c_temp_alarm>;
@@ -31,7 +30,6 @@
pm8550vs-d-thermal {
polling-delay-passive = <100>;
- polling-delay = <0>;
thermal-sensors = <&pm8550vs_d_temp_alarm>;
@@ -52,7 +50,6 @@
pm8550vs-e-thermal {
polling-delay-passive = <100>;
- polling-delay = <0>;
thermal-sensors = <&pm8550vs_e_temp_alarm>;
@@ -73,7 +70,6 @@
pm8550vs-g-thermal {
polling-delay-passive = <100>;
- polling-delay = <0>;
thermal-sensors = <&pm8550vs_g_temp_alarm>;
diff --git a/arch/arm64/boot/dts/qcom/pm8916.dtsi b/arch/arm64/boot/dts/qcom/pm8916.dtsi
index 4b2e8fb47d2d..f8e4829ff7f7 100644
--- a/arch/arm64/boot/dts/qcom/pm8916.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm8916.dtsi
@@ -4,8 +4,37 @@
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/spmi/spmi.h>
-&spmi_bus {
+/ {
+ thermal-zones {
+ pm8916-thermal {
+ polling-delay-passive = <100>;
+
+ thermal-sensors = <&pm8916_temp>;
+
+ trips {
+ trip0 {
+ temperature = <105000>;
+ hysteresis = <0>;
+ type = "passive";
+ };
+
+ trip1 {
+ temperature = <125000>;
+ hysteresis = <0>;
+ type = "hot";
+ };
+
+ trip2 {
+ temperature = <145000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+ };
+ };
+};
+&spmi_bus {
pm8916_0: pmic@0 {
compatible = "qcom,pm8916", "qcom,spmi-pmic";
reg = <0x0 SPMI_USID>;
diff --git a/arch/arm64/boot/dts/qcom/pm8953.dtsi b/arch/arm64/boot/dts/qcom/pm8953.dtsi
index 1067e141be6c..64258505f9ba 100644
--- a/arch/arm64/boot/dts/qcom/pm8953.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm8953.dtsi
@@ -9,9 +9,6 @@
/ {
thermal-zones {
pm8953-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
-
thermal-sensors = <&pm8953_temp>;
trips {
diff --git a/arch/arm64/boot/dts/qcom/pm8994.dtsi b/arch/arm64/boot/dts/qcom/pm8994.dtsi
index d44a95caf04a..353e4a6bd088 100644
--- a/arch/arm64/boot/dts/qcom/pm8994.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm8994.dtsi
@@ -8,7 +8,6 @@
thermal-zones {
pm8994-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&pm8994_temp>;
diff --git a/arch/arm64/boot/dts/qcom/pm8998.dtsi b/arch/arm64/boot/dts/qcom/pm8998.dtsi
index 3f82715392c6..3ecb330590e5 100644
--- a/arch/arm64/boot/dts/qcom/pm8998.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm8998.dtsi
@@ -11,7 +11,6 @@
thermal-zones {
pm8998-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&pm8998_temp>;
diff --git a/arch/arm64/boot/dts/qcom/pmi632.dtsi b/arch/arm64/boot/dts/qcom/pmi632.dtsi
index 94d53b1cf6c8..8c899d148e46 100644
--- a/arch/arm64/boot/dts/qcom/pmi632.dtsi
+++ b/arch/arm64/boot/dts/qcom/pmi632.dtsi
@@ -11,7 +11,6 @@
thermal-zones {
pmi632-thermal {
polling-delay-passive = <100>;
- polling-delay = <0>;
thermal-sensors = <&pmi632_temp>;
@@ -200,5 +199,11 @@
status = "disabled";
};
+
+ pmi632_vib: vibrator@5700 {
+ compatible = "qcom,pmi632-vib";
+ reg = <0x5700>;
+ status = "disabled";
+ };
};
};
diff --git a/arch/arm64/boot/dts/qcom/pmi8950.dtsi b/arch/arm64/boot/dts/qcom/pmi8950.dtsi
index 1029f3b1bb9a..b4822cb17a37 100644
--- a/arch/arm64/boot/dts/qcom/pmi8950.dtsi
+++ b/arch/arm64/boot/dts/qcom/pmi8950.dtsi
@@ -84,6 +84,14 @@
#address-cells = <1>;
#size-cells = <0>;
+ pmi8950_pwm: pwm@b000 {
+ compatible = "qcom,pmi8950-pwm";
+ reg = <0xb000 0x100>;
+ #pwm-cells = <2>;
+
+ status = "disabled";
+ };
+
pmi8950_wled: leds@d800 {
compatible = "qcom,pmi8950-wled";
reg = <0xd800>, <0xd900>;
diff --git a/arch/arm64/boot/dts/qcom/pmm8155au_1.dtsi b/arch/arm64/boot/dts/qcom/pmm8155au_1.dtsi
index dbd4b91dfe06..5084de66fc46 100644
--- a/arch/arm64/boot/dts/qcom/pmm8155au_1.dtsi
+++ b/arch/arm64/boot/dts/qcom/pmm8155au_1.dtsi
@@ -12,7 +12,6 @@
thermal-zones {
pmm8155au-1-thermal {
polling-delay-passive = <100>;
- polling-delay = <0>;
thermal-sensors = <&pmm8155au_1_temp>;
diff --git a/arch/arm64/boot/dts/qcom/pmm8155au_2.dtsi b/arch/arm64/boot/dts/qcom/pmm8155au_2.dtsi
index 1cee20ac2c9c..555e4a456ef1 100644
--- a/arch/arm64/boot/dts/qcom/pmm8155au_2.dtsi
+++ b/arch/arm64/boot/dts/qcom/pmm8155au_2.dtsi
@@ -11,7 +11,6 @@
thermal-zones {
pmm8155au-2-thermal {
polling-delay-passive = <100>;
- polling-delay = <0>;
thermal-sensors = <&pmm8155au_2_temp>;
diff --git a/arch/arm64/boot/dts/qcom/pmr735a.dtsi b/arch/arm64/boot/dts/qcom/pmr735a.dtsi
index febda50779f9..f8efd8e5e68f 100644
--- a/arch/arm64/boot/dts/qcom/pmr735a.dtsi
+++ b/arch/arm64/boot/dts/qcom/pmr735a.dtsi
@@ -36,7 +36,7 @@
thermal-zones {
pmr735a_thermal: pmr735a-thermal {
polling-delay-passive = <100>;
- polling-delay = <0>;
+
thermal-sensors = <&pmr735a_temp_alarm>;
trips {
diff --git a/arch/arm64/boot/dts/qcom/pmr735b.dtsi b/arch/arm64/boot/dts/qcom/pmr735b.dtsi
index f7473e247322..09affc05b397 100644
--- a/arch/arm64/boot/dts/qcom/pmr735b.dtsi
+++ b/arch/arm64/boot/dts/qcom/pmr735b.dtsi
@@ -10,7 +10,7 @@
thermal-zones {
pmr735b_thermal: pmr735b-thermal {
polling-delay-passive = <100>;
- polling-delay = <0>;
+
thermal-sensors = <&pmr735b_temp_alarm>;
trips {
diff --git a/arch/arm64/boot/dts/qcom/pmr735d_a.dtsi b/arch/arm64/boot/dts/qcom/pmr735d_a.dtsi
index 37daaefe3431..f9f1793d310e 100644
--- a/arch/arm64/boot/dts/qcom/pmr735d_a.dtsi
+++ b/arch/arm64/boot/dts/qcom/pmr735d_a.dtsi
@@ -10,7 +10,6 @@
thermal-zones {
pmr735d-k-thermal {
polling-delay-passive = <100>;
- polling-delay = <0>;
thermal-sensors = <&pmr735d_k_temp_alarm>;
diff --git a/arch/arm64/boot/dts/qcom/pmr735d_b.dtsi b/arch/arm64/boot/dts/qcom/pmr735d_b.dtsi
index 3b470f6ac46f..d91fbd3bff10 100644
--- a/arch/arm64/boot/dts/qcom/pmr735d_b.dtsi
+++ b/arch/arm64/boot/dts/qcom/pmr735d_b.dtsi
@@ -10,7 +10,6 @@
thermal-zones {
pmr735d-l-thermal {
polling-delay-passive = <100>;
- polling-delay = <0>;
thermal-sensors = <&pmr735d_l_temp_alarm>;
diff --git a/arch/arm64/boot/dts/qcom/pms405.dtsi b/arch/arm64/boot/dts/qcom/pms405.dtsi
index 461ad97032f7..3f9100c7eff4 100644
--- a/arch/arm64/boot/dts/qcom/pms405.dtsi
+++ b/arch/arm64/boot/dts/qcom/pms405.dtsi
@@ -12,7 +12,6 @@
thermal-zones {
pms405-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&pms405_temp>;
diff --git a/arch/arm64/boot/dts/qcom/pmx75.dtsi b/arch/arm64/boot/dts/qcom/pmx75.dtsi
index 373e45f63dff..2e61b7849c92 100644
--- a/arch/arm64/boot/dts/qcom/pmx75.dtsi
+++ b/arch/arm64/boot/dts/qcom/pmx75.dtsi
@@ -10,7 +10,6 @@
thermal-zones {
pmx75-thermal {
polling-delay-passive = <100>;
- polling-delay = <0>;
thermal-sensors = <&pmx75_temp_alarm>;
diff --git a/arch/arm64/boot/dts/qcom/qcm2290.dtsi b/arch/arm64/boot/dts/qcom/qcm2290.dtsi
index 106110a9f551..8f3be4c75db3 100644
--- a/arch/arm64/boot/dts/qcom/qcm2290.dtsi
+++ b/arch/arm64/boot/dts/qcom/qcm2290.dtsi
@@ -7,6 +7,7 @@
#include <dt-bindings/clock/qcom,dispcc-qcm2290.h>
#include <dt-bindings/clock/qcom,gcc-qcm2290.h>
+#include <dt-bindings/clock/qcom,qcm2290-gpucc.h>
#include <dt-bindings/clock/qcom,rpmcc.h>
#include <dt-bindings/dma/qcom-gpi.h>
#include <dt-bindings/firmware/qcom,scm.h>
@@ -758,6 +759,11 @@
reg = <0x25b 0x1>;
bits = <1 4>;
};
+
+ gpu_speed_bin: gpu-speed-bin@2006 {
+ reg = <0x2006 0x2>;
+ bits = <5 8>;
+ };
};
pmu@1b8e300 {
@@ -1425,6 +1431,154 @@
};
};
+ gpu: gpu@5900000 {
+ compatible = "qcom,adreno-07000200", "qcom,adreno";
+ reg = <0x0 0x05900000 0x0 0x40000>;
+ reg-names = "kgsl_3d0_reg_memory";
+
+ interrupts = <GIC_SPI 177 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&gpucc GPU_CC_GX_GFX3D_CLK>,
+ <&gpucc GPU_CC_AHB_CLK>,
+ <&gcc GCC_BIMC_GPU_AXI_CLK>,
+ <&gcc GCC_GPU_MEMNOC_GFX_CLK>,
+ <&gpucc GPU_CC_CX_GMU_CLK>,
+ <&gpucc GPU_CC_CXO_CLK>;
+ clock-names = "core",
+ "iface",
+ "mem_iface",
+ "alt_mem_iface",
+ "gmu",
+ "xo";
+
+ interconnects = <&bimc MASTER_GFX3D RPM_ALWAYS_TAG
+ &bimc SLAVE_EBI1 RPM_ALWAYS_TAG>;
+ interconnect-names = "gfx-mem";
+
+ iommus = <&adreno_smmu 0 1>,
+ <&adreno_smmu 2 0>;
+ operating-points-v2 = <&gpu_opp_table>;
+ power-domains = <&rpmpd QCM2290_VDDCX>;
+ qcom,gmu = <&gmu_wrapper>;
+
+ nvmem-cells = <&gpu_speed_bin>;
+ nvmem-cell-names = "speed_bin";
+ #cooling-cells = <2>;
+
+ status = "disabled";
+
+ zap-shader {
+ memory-region = <&pil_gpu_mem>;
+ };
+
+ gpu_opp_table: opp-table {
+ compatible = "operating-points-v2";
+
+ /* TODO: Scale RPM_SMD_BIMC_GPU_CLK w/ turbo freqs */
+ opp-1123200000 {
+ opp-hz = /bits/ 64 <1123200000>;
+ required-opps = <&rpmpd_opp_turbo_plus>;
+ opp-peak-kBps = <6881000>;
+ opp-supported-hw = <0x3>;
+ turbo-mode;
+ };
+
+ opp-1017600000 {
+ opp-hz = /bits/ 64 <1017600000>;
+ required-opps = <&rpmpd_opp_turbo>;
+ opp-peak-kBps = <6881000>;
+ opp-supported-hw = <0x3>;
+ turbo-mode;
+ };
+
+ opp-921600000 {
+ opp-hz = /bits/ 64 <921600000>;
+ required-opps = <&rpmpd_opp_nom_plus>;
+ opp-peak-kBps = <6881000>;
+ opp-supported-hw = <0x3>;
+ };
+
+ opp-844800000 {
+ opp-hz = /bits/ 64 <844800000>;
+ required-opps = <&rpmpd_opp_nom>;
+ opp-peak-kBps = <6881000>;
+ opp-supported-hw = <0x7>;
+ };
+
+ opp-672000000 {
+ opp-hz = /bits/ 64 <672000000>;
+ required-opps = <&rpmpd_opp_svs_plus>;
+ opp-peak-kBps = <3879000>;
+ opp-supported-hw = <0xf>;
+ };
+
+ opp-537600000 {
+ opp-hz = /bits/ 64 <537600000>;
+ required-opps = <&rpmpd_opp_svs>;
+ opp-peak-kBps = <2929000>;
+ opp-supported-hw = <0xf>;
+ };
+
+ opp-355200000 {
+ opp-hz = /bits/ 64 <355200000>;
+ required-opps = <&rpmpd_opp_low_svs>;
+ opp-peak-kBps = <1720000>;
+ opp-supported-hw = <0xf>;
+ };
+ };
+ };
+
+ gmu_wrapper: gmu@596a000 {
+ compatible = "qcom,adreno-gmu-wrapper";
+ reg = <0x0 0x0596a000 0x0 0x30000>;
+ reg-names = "gmu";
+ power-domains = <&gpucc GPU_CX_GDSC>,
+ <&gpucc GPU_GX_GDSC>;
+ power-domain-names = "cx",
+ "gx";
+ };
+
+ gpucc: clock-controller@5990000 {
+ compatible = "qcom,qcm2290-gpucc";
+ reg = <0x0 0x05990000 0x0 0x9000>;
+ clocks = <&gcc GCC_GPU_CFG_AHB_CLK>,
+ <&rpmcc RPM_SMD_XO_CLK_SRC>,
+ <&gcc GCC_GPU_GPLL0_CLK_SRC>,
+ <&gcc GCC_GPU_GPLL0_DIV_CLK_SRC>;
+ power-domains = <&rpmpd QCM2290_VDDCX>;
+ required-opps = <&rpmpd_opp_low_svs>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+ };
+
+ adreno_smmu: iommu@59a0000 {
+ compatible = "qcom,qcm2290-smmu-500", "qcom,adreno-smmu",
+ "qcom,smmu-500", "arm,mmu-500";
+ reg = <0x0 0x059a0000 0x0 0x10000>;
+ interrupts = <GIC_SPI 163 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 167 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 171 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 172 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 173 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&gcc GCC_GPU_MEMNOC_GFX_CLK>,
+ <&gpucc GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK>,
+ <&gcc GCC_GPU_SNOC_DVM_GFX_CLK>;
+ clock-names = "mem",
+ "hlos",
+ "iface";
+
+ power-domains = <&gpucc GPU_CX_GDSC>;
+
+ #global-interrupts = <1>;
+ #iommu-cells = <2>;
+ };
+
mdss: display-subsystem@5e00000 {
compatible = "qcom,qcm2290-mdss";
reg = <0x0 0x05e00000 0x0 0x1000>;
@@ -1924,9 +2078,6 @@
thermal-zones {
mapss-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
-
thermal-sensors = <&tsens0 0>;
trips {
@@ -1951,9 +2102,6 @@
};
video-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
-
thermal-sensors = <&tsens0 1>;
trips {
@@ -1978,9 +2126,6 @@
};
wlan-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
-
thermal-sensors = <&tsens0 2>;
trips {
@@ -2005,9 +2150,6 @@
};
cpuss0-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
-
thermal-sensors = <&tsens0 3>;
trips {
@@ -2032,9 +2174,6 @@
};
cpuss1-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
-
thermal-sensors = <&tsens0 4>;
trips {
@@ -2059,9 +2198,6 @@
};
mdm0-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
-
thermal-sensors = <&tsens0 5>;
trips {
@@ -2086,9 +2222,6 @@
};
mdm1-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
-
thermal-sensors = <&tsens0 6>;
trips {
@@ -2113,9 +2246,6 @@
};
gpu-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
-
thermal-sensors = <&tsens0 7>;
trips {
@@ -2140,9 +2270,6 @@
};
hm-center-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
-
thermal-sensors = <&tsens0 8>;
trips {
@@ -2167,9 +2294,6 @@
};
camera-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
-
thermal-sensors = <&tsens0 9>;
trips {
diff --git a/arch/arm64/boot/dts/qcom/qcm6490-fairphone-fp5.dts b/arch/arm64/boot/dts/qcom/qcm6490-fairphone-fp5.dts
index f3432701945f..8ab30c01712e 100644
--- a/arch/arm64/boot/dts/qcom/qcm6490-fairphone-fp5.dts
+++ b/arch/arm64/boot/dts/qcom/qcm6490-fairphone-fp5.dts
@@ -167,7 +167,7 @@
thermal-zones {
camera-thermal {
polling-delay-passive = <0>;
- polling-delay = <0>;
+
thermal-sensors = <&pmk8350_adc_tm 2>;
trips {
@@ -181,7 +181,7 @@
chg-skin-thermal {
polling-delay-passive = <0>;
- polling-delay = <0>;
+
thermal-sensors = <&pm7250b_adc_tm 0>;
trips {
@@ -195,7 +195,7 @@
conn-thermal {
polling-delay-passive = <0>;
- polling-delay = <0>;
+
thermal-sensors = <&pm7250b_adc_tm 1>;
trips {
@@ -207,9 +207,28 @@
};
};
+ pm8008-thermal {
+ polling-delay-passive = <100>;
+ thermal-sensors = <&pm8008>;
+
+ trips {
+ trip0 {
+ temperature = <95000>;
+ hysteresis = <0>;
+ type = "passive";
+ };
+
+ trip1 {
+ temperature = <115000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+ };
+
quiet-thermal {
polling-delay-passive = <0>;
- polling-delay = <0>;
+
thermal-sensors = <&pmk8350_adc_tm 1>;
trips {
@@ -223,7 +242,7 @@
rear-cam-thermal {
polling-delay-passive = <0>;
- polling-delay = <0>;
+
thermal-sensors = <&pmk8350_adc_tm 4>;
trips {
@@ -237,7 +256,7 @@
sdm-skin-thermal {
polling-delay-passive = <0>;
- polling-delay = <0>;
+
thermal-sensors = <&pmk8350_adc_tm 3>;
trips {
@@ -251,7 +270,7 @@
xo-thermal {
polling-delay-passive = <0>;
- polling-delay = <0>;
+
thermal-sensors = <&pmk8350_adc_tm 0>;
trips {
@@ -271,46 +290,54 @@
qcom,pmic-id = "b";
vreg_s1b: smps1 {
+ regulator-name = "vreg_s1b";
regulator-min-microvolt = <1840000>;
regulator-max-microvolt = <2040000>;
};
vreg_s7b: smps7 {
+ regulator-name = "vreg_s7b";
regulator-min-microvolt = <535000>;
regulator-max-microvolt = <1120000>;
};
vreg_s8b: smps8 {
+ regulator-name = "vreg_s8b";
regulator-min-microvolt = <1200000>;
regulator-max-microvolt = <1500000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_RET>;
};
vreg_l1b: ldo1 {
+ regulator-name = "vreg_l1b";
regulator-min-microvolt = <825000>;
regulator-max-microvolt = <925000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l2b: ldo2 {
+ regulator-name = "vreg_l2b";
regulator-min-microvolt = <2700000>;
regulator-max-microvolt = <3544000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l3b: ldo3 {
+ regulator-name = "vreg_l3b";
regulator-min-microvolt = <312000>;
regulator-max-microvolt = <910000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l6b: ldo6 {
+ regulator-name = "vreg_l6b";
regulator-min-microvolt = <1140000>;
regulator-max-microvolt = <1260000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l7b: ldo7 {
+ regulator-name = "vreg_l7b";
/* Constrained for UFS VCC, at least until UFS driver scales voltage */
regulator-min-microvolt = <2952000>;
regulator-max-microvolt = <2952000>;
@@ -318,66 +345,77 @@
};
vreg_l8b: ldo8 {
+ regulator-name = "vreg_l8b";
regulator-min-microvolt = <870000>;
regulator-max-microvolt = <970000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l9b: ldo9 {
+ regulator-name = "vreg_l9b";
regulator-min-microvolt = <1200000>;
regulator-max-microvolt = <1304000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l11b: ldo11 {
+ regulator-name = "vreg_l11b";
regulator-min-microvolt = <1504000>;
regulator-max-microvolt = <2000000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l12b: ldo12 {
+ regulator-name = "vreg_l12b";
regulator-min-microvolt = <751000>;
regulator-max-microvolt = <824000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l13b: ldo13 {
+ regulator-name = "vreg_l13b";
regulator-min-microvolt = <530000>;
regulator-max-microvolt = <824000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l14b: ldo14 {
+ regulator-name = "vreg_l14b";
regulator-min-microvolt = <1080000>;
regulator-max-microvolt = <1304000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l15b: ldo15 {
+ regulator-name = "vreg_l15b";
regulator-min-microvolt = <765000>;
regulator-max-microvolt = <1020000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l16b: ldo16 {
+ regulator-name = "vreg_l16b";
regulator-min-microvolt = <1100000>;
regulator-max-microvolt = <1300000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l17b: ldo17 {
+ regulator-name = "vreg_l17b";
regulator-min-microvolt = <1700000>;
regulator-max-microvolt = <1900000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l18b: ldo18 {
+ regulator-name = "vreg_l18b";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <2000000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l19b: ldo19 {
+ regulator-name = "vreg_l19b";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <2000000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
@@ -389,60 +427,70 @@
qcom,pmic-id = "c";
vreg_s1c: smps1 {
+ regulator-name = "vreg_s1c";
regulator-min-microvolt = <2190000>;
regulator-max-microvolt = <2210000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_s9c: smps9 {
+ regulator-name = "vreg_s9c";
regulator-min-microvolt = <1010000>;
regulator-max-microvolt = <1170000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l1c: ldo1 {
+ regulator-name = "vreg_l1c";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1980000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l2c: ldo2 {
+ regulator-name = "vreg_l2c";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1950000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l3c: ldo3 {
+ regulator-name = "vreg_l3c";
regulator-min-microvolt = <3000000>;
regulator-max-microvolt = <3400000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l4c: ldo4 {
+ regulator-name = "vreg_l4c";
regulator-min-microvolt = <1620000>;
regulator-max-microvolt = <3300000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l5c: ldo5 {
+ regulator-name = "vreg_l5c";
regulator-min-microvolt = <1620000>;
regulator-max-microvolt = <3300000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l6c: ldo6 {
+ regulator-name = "vreg_l6c";
regulator-min-microvolt = <1650000>;
regulator-max-microvolt = <3544000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l7c: ldo7 {
+ regulator-name = "vreg_l7c";
regulator-min-microvolt = <3000000>;
regulator-max-microvolt = <3544000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l8c: ldo8 {
+ regulator-name = "vreg_l8c";
regulator-min-microvolt = <1620000>;
regulator-max-microvolt = <2000000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
@@ -451,36 +499,42 @@
};
vreg_l9c: ldo9 {
+ regulator-name = "vreg_l9c";
regulator-min-microvolt = <2700000>;
regulator-max-microvolt = <3544000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l10c: ldo10 {
+ regulator-name = "vreg_l10c";
regulator-min-microvolt = <720000>;
regulator-max-microvolt = <1050000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l11c: ldo11 {
+ regulator-name = "vreg_l11c";
regulator-min-microvolt = <2800000>;
regulator-max-microvolt = <3544000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l12c: ldo12 {
+ regulator-name = "vreg_l12c";
regulator-min-microvolt = <1650000>;
regulator-max-microvolt = <2000000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l13c: ldo13 {
+ regulator-name = "vreg_l13c";
regulator-min-microvolt = <2700000>;
regulator-max-microvolt = <3544000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_bob: bob {
+ regulator-name = "vreg_bob";
regulator-min-microvolt = <3008000>;
regulator-max-microvolt = <3960000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_AUTO>;
@@ -522,7 +576,76 @@
&i2c1 {
status = "okay";
- /* PM8008 PMIC @ 8 and 9 */
+ pm8008: pmic@8 {
+ compatible = "qcom,pm8008";
+ reg = <0x8>;
+
+ interrupts-extended = <&tlmm 25 IRQ_TYPE_EDGE_RISING>;
+ reset-gpios = <&pm8350c_gpios 3 GPIO_ACTIVE_LOW>;
+
+ vdd-l1-l2-supply = <&vreg_s8b>;
+ vdd-l3-l4-supply = <&vreg_bob>;
+ vdd-l5-supply = <&vreg_bob>;
+ vdd-l6-supply = <&vreg_s1b>;
+ vdd-l7-supply = <&vreg_bob>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pm8008_int_default>, <&pm8008_reset_n_default>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&pm8008 0 0 2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ #thermal-sensor-cells = <0>;
+
+ regulators {
+ vreg_l1p: ldo1 {
+ regulator-name = "vreg_l1p";
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1200000>;
+ };
+
+ vreg_l2p: ldo2 {
+ regulator-name = "vreg_l2p";
+ regulator-min-microvolt = <950000>;
+ regulator-max-microvolt = <1152000>;
+ };
+
+ vreg_l3p: ldo3 {
+ regulator-name = "vreg_l3p";
+ regulator-min-microvolt = <2700000>;
+ regulator-max-microvolt = <3000000>;
+ };
+
+ vreg_l4p: ldo4 {
+ regulator-name = "vreg_l4p";
+ regulator-min-microvolt = <2700000>;
+ regulator-max-microvolt = <2900000>;
+ };
+
+ vreg_l5p: ldo5 {
+ regulator-name = "vreg_l5p";
+ regulator-min-microvolt = <2704000>;
+ regulator-max-microvolt = <2900000>;
+ };
+
+ vreg_l6p: ldo6 {
+ regulator-name = "vreg_l6p";
+ regulator-min-microvolt = <1700000>;
+ regulator-max-microvolt = <1904000>;
+ };
+
+ vreg_l7p: ldo7 {
+ regulator-name = "vreg_l7p";
+ regulator-min-microvolt = <2700000>;
+ regulator-max-microvolt = <3000000>;
+ };
+ };
+ };
+
/* Pixelworks @ 26 */
/* FSA4480 USB audio switch @ 42 */
/* AW86927FCR haptics @ 5a */
@@ -551,7 +674,7 @@
&ipa {
qcom,gsi-loader = "self";
memory-region = <&ipa_fw_mem>;
- firmware-name = "qcom/qcm6490/fairphone5/ipa_fws.mdt";
+ firmware-name = "qcom/qcm6490/fairphone5/ipa_fws.mbn";
status = "okay";
};
@@ -653,6 +776,14 @@
};
};
+&pm8350c_gpios {
+ pm8008_reset_n_default: pm8008-reset-n-default-state {
+ pins = "gpio3";
+ function = PMIC_GPIO_FUNC_NORMAL;
+ bias-pull-down;
+ };
+};
+
&pmk8350_rtc {
status = "okay";
};
@@ -810,6 +941,13 @@
bias-pull-up;
};
+ pm8008_int_default: pm8008-int-default-state {
+ pins = "gpio25";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
qup_uart7_sleep_cts: qup-uart7-sleep-cts-state {
pins = "gpio28";
function = "gpio";
@@ -864,7 +1002,6 @@
};
&uart5 {
- compatible = "qcom,geni-debug-uart";
status = "okay";
};
diff --git a/arch/arm64/boot/dts/qcom/qcm6490-idp.dts b/arch/arm64/boot/dts/qcom/qcm6490-idp.dts
index 47ca2d000341..a0668f767e4b 100644
--- a/arch/arm64/boot/dts/qcom/qcm6490-idp.dts
+++ b/arch/arm64/boot/dts/qcom/qcm6490-idp.dts
@@ -658,7 +658,6 @@
};
&uart5 {
- compatible = "qcom,geni-debug-uart";
status = "okay";
};
@@ -667,6 +666,7 @@
};
&usb_1_dwc3 {
+ /delete-property/ usb-role-switch;
dr_mode = "peripheral";
};
diff --git a/arch/arm64/boot/dts/qcom/qcm6490-shift-otter.dts b/arch/arm64/boot/dts/qcom/qcm6490-shift-otter.dts
new file mode 100644
index 000000000000..4667e47a74bc
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/qcm6490-shift-otter.dts
@@ -0,0 +1,961 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2023, Luca Weiss <luca.weiss@fairphone.com>
+ * Copyright (c) 2024, Caleb Connolly <caleb@postmarketos.org>
+ */
+
+/dts-v1/;
+
+#define PM7250B_SID 8
+#define PM7250B_SID1 9
+
+#include <dt-bindings/iio/qcom,spmi-adc7-pm7325.h>
+#include <dt-bindings/iio/qcom,spmi-adc7-pmk8350.h>
+#include <dt-bindings/leds/common.h>
+#include <dt-bindings/pinctrl/qcom,pmic-gpio.h>
+#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
+#include "sc7280.dtsi"
+#include "pm7250b.dtsi"
+#include "pm7325.dtsi"
+#include "pm8350c.dtsi" /* PM7350C */
+#include "pmk8350.dtsi" /* PMK7325 */
+
+/delete-node/ &rmtfs_mem;
+
+/ {
+ model = "SHIFT SHIFTphone 8";
+ compatible = "shift,otter", "qcom,qcm6490";
+ chassis-type = "handset";
+
+ aliases {
+ serial0 = &uart5;
+ serial1 = &uart7;
+ };
+
+ chosen {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ stdout-path = "serial0:115200n8";
+
+ framebuffer0: framebuffer@a000000 {
+ compatible = "simple-framebuffer";
+ reg = <0x0 0xe1000000 0x0 (2400 * 1080 * 4)>;
+ width = <1080>;
+ height = <2400>;
+ stride = <(1080 * 4)>;
+ format = "a8r8g8b8";
+ clocks = <&gcc GCC_DISP_HF_AXI_CLK>;
+ };
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+
+ pinctrl-0 = <&volume_down_default>;
+ pinctrl-names = "default";
+
+ key-volume-up {
+ label = "Volume up";
+ gpios = <&pm7325_gpios 6 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_VOLUMEUP>;
+ debounce-interval = <15>;
+ };
+ };
+
+ pmic-glink {
+ compatible = "qcom,qcm6490-pmic-glink", "qcom,pmic-glink";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ connector@0 {
+ compatible = "usb-c-connector";
+ reg = <0>;
+ power-role = "dual";
+ data-role = "dual";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ pmic_glink_hs_in: endpoint {
+ remote-endpoint = <&usb_1_dwc3_hs>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ pmic_glink_ss_in: endpoint {
+ remote-endpoint = <&usb_dp_qmpphy_out>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+
+ pmic_glink_sbu: endpoint {
+ remote-endpoint = <&fsa4480_sbu_mux>;
+ };
+ };
+ };
+ };
+ };
+
+ reserved-memory {
+ cont_splash_mem: cont-splash@e1000000 {
+ reg = <0x0 0xe1000000 0x0 0x2300000>;
+ no-map;
+ };
+
+ cdsp_mem: cdsp@88f00000 {
+ reg = <0x0 0x88f00000 0x0 0x1e00000>;
+ no-map;
+ };
+
+ rmtfs_mem: rmtfs@f8500000 {
+ compatible = "qcom,rmtfs-mem";
+ reg = <0x0 0xf8500000 0x0 0x600000>;
+ no-map;
+
+ qcom,client-id = <1>;
+ qcom,vmid = <QCOM_SCM_VMID_MSS_MSA>, <QCOM_SCM_VMID_NAV>;
+ };
+ };
+
+ thermal-zones {
+ camera-thermal {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&pmk8350_adc_tm 2>;
+
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ chg-skin-thermal {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&pm7250b_adc_tm 0>;
+
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ conn-thermal {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&pm7250b_adc_tm 1>;
+
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ quiet-thermal {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&pmk8350_adc_tm 1>;
+
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ rear-cam-thermal {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&pmk8350_adc_tm 4>;
+
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ sdm-skin-thermal {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&pmk8350_adc_tm 3>;
+
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ xo-thermal {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&pmk8350_adc_tm 0>;
+
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+ };
+};
+
+&apps_rsc {
+ regulators-0 {
+ compatible = "qcom,pm7325-rpmh-regulators";
+ qcom,pmic-id = "b";
+
+ vreg_s1b: smps1 {
+ regulator-name = "vreg_s1b";
+ regulator-min-microvolt = <1840000>;
+ regulator-max-microvolt = <2040000>;
+ };
+
+ vreg_s7b: smps7 {
+ regulator-name = "vreg_s7b";
+ regulator-min-microvolt = <535000>;
+ regulator-max-microvolt = <1120000>;
+ };
+
+ vreg_s8b: smps8 {
+ regulator-name = "vreg_s8b";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1500000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_RET>;
+ };
+
+ vreg_l1b: ldo1 {
+ regulator-name = "vreg_l1b";
+ regulator-min-microvolt = <825000>;
+ regulator-max-microvolt = <925000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2b: ldo2 {
+ regulator-name = "vreg_l2b";
+ regulator-min-microvolt = <2700000>;
+ regulator-max-microvolt = <3544000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l3b: ldo3 {
+ regulator-name = "vreg_l3b";
+ regulator-min-microvolt = <312000>;
+ regulator-max-microvolt = <910000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l6b: ldo6 {
+ regulator-name = "vreg_l6b";
+ regulator-min-microvolt = <1140000>;
+ regulator-max-microvolt = <1260000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l7b: ldo7 {
+ regulator-name = "vreg_l7b";
+ /* Constrained for UFS VCC, at least until UFS driver scales voltage */
+ regulator-min-microvolt = <2952000>;
+ regulator-max-microvolt = <2952000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l8b: ldo8 {
+ regulator-name = "vreg_l8b";
+ regulator-min-microvolt = <870000>;
+ regulator-max-microvolt = <970000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l9b: ldo9 {
+ regulator-name = "vreg_l9b";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1304000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l11b: ldo11 {
+ regulator-name = "vreg_l11b";
+ regulator-min-microvolt = <1504000>;
+ regulator-max-microvolt = <2000000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l12b: ldo12 {
+ regulator-name = "vreg_l12b";
+ regulator-min-microvolt = <751000>;
+ regulator-max-microvolt = <824000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l13b: ldo13 {
+ regulator-name = "vreg_l13b";
+ regulator-min-microvolt = <530000>;
+ regulator-max-microvolt = <824000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l14b: ldo14 {
+ regulator-name = "vreg_l14b";
+ regulator-min-microvolt = <1080000>;
+ regulator-max-microvolt = <1304000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l15b: ldo15 {
+ regulator-name = "vreg_l15b";
+ regulator-min-microvolt = <765000>;
+ regulator-max-microvolt = <1020000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l16b: ldo16 {
+ regulator-name = "vreg_l16b";
+ regulator-min-microvolt = <1100000>;
+ regulator-max-microvolt = <1300000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l17b: ldo17 {
+ regulator-name = "vreg_l17b";
+ regulator-min-microvolt = <1700000>;
+ regulator-max-microvolt = <1900000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l18b: ldo18 {
+ regulator-name = "vreg_l18b";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2000000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l19b: ldo19 {
+ regulator-name = "vreg_l19b";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2000000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ regulators-1 {
+ compatible = "qcom,pm8350c-rpmh-regulators";
+ qcom,pmic-id = "c";
+
+ vreg_s1c: smps1 {
+ regulator-name = "vreg_s1c";
+ regulator-min-microvolt = <2190000>;
+ regulator-max-microvolt = <2210000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_s9c: smps9 {
+ regulator-name = "vreg_s9c";
+ regulator-min-microvolt = <1010000>;
+ regulator-max-microvolt = <1170000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l1c: ldo1 {
+ regulator-name = "vreg_l1c";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1980000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2c: ldo2 {
+ regulator-name = "vreg_l2c";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1950000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l3c: ldo3 {
+ regulator-name = "vreg_l3c";
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3400000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l4c: ldo4 {
+ regulator-name = "vreg_l4c";
+ regulator-min-microvolt = <1620000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l5c: ldo5 {
+ regulator-name = "vreg_l5c";
+ regulator-min-microvolt = <1620000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l6c: ldo6 {
+ regulator-name = "vreg_l6c";
+ regulator-min-microvolt = <1650000>;
+ regulator-max-microvolt = <3544000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l7c: ldo7 {
+ regulator-name = "vreg_l7c";
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3544000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l8c: ldo8 {
+ regulator-name = "vreg_l8c";
+ regulator-min-microvolt = <1620000>;
+ regulator-max-microvolt = <2000000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l9c: ldo9 {
+ regulator-name = "vreg_l9c";
+ regulator-min-microvolt = <2700000>;
+ regulator-max-microvolt = <3544000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l10c: ldo10 {
+ regulator-name = "vreg_l10c";
+ regulator-min-microvolt = <720000>;
+ regulator-max-microvolt = <1050000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l11c: ldo11 {
+ regulator-name = "vreg_l11c";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <3544000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l12c: ldo12 {
+ regulator-name = "vreg_l12c";
+ regulator-min-microvolt = <1650000>;
+ regulator-max-microvolt = <2000000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l13c: ldo13 {
+ regulator-name = "vreg_l13c";
+ regulator-min-microvolt = <2700000>;
+ regulator-max-microvolt = <3544000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_bob: bob {
+ regulator-name = "vreg_bob";
+ regulator-min-microvolt = <3008000>;
+ regulator-max-microvolt = <3960000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_AUTO>;
+ };
+ };
+};
+
+&gcc {
+ protected-clocks = <GCC_CFG_NOC_LPASS_CLK>,
+ <GCC_EDP_CLKREF_EN>,
+ <GCC_MSS_CFG_AHB_CLK>,
+ <GCC_MSS_GPLL0_MAIN_DIV_CLK_SRC>,
+ <GCC_MSS_OFFLINE_AXI_CLK>,
+ <GCC_MSS_Q6SS_BOOT_CLK_SRC>,
+ <GCC_MSS_Q6_MEMNOC_AXI_CLK>,
+ <GCC_MSS_SNOC_AXI_CLK>,
+ <GCC_QSPI_CNOC_PERIPH_AHB_CLK>,
+ <GCC_QSPI_CORE_CLK>,
+ <GCC_QSPI_CORE_CLK_SRC>,
+ <GCC_SEC_CTRL_CLK_SRC>,
+ <GCC_WPSS_AHB_BDG_MST_CLK>,
+ <GCC_WPSS_AHB_CLK>,
+ <GCC_WPSS_RSCP_CLK>;
+};
+
+&gpi_dma0 {
+ status = "okay";
+};
+
+&gpi_dma1 {
+ status = "okay";
+};
+
+&gpu {
+ status = "okay";
+};
+
+&gpu_zap_shader {
+ firmware-name = "qcom/qcm6490/SHIFT/otter/a660_zap.mbn";
+};
+
+&i2c1 {
+ status = "okay";
+
+ /* PM8008 PMIC @ 8 and 9 */
+ /* rtc6226 FM receiver @ 64 */
+
+ typec-mux@42 {
+ compatible = "fcs,fsa4480";
+ reg = <0x42>;
+
+ vcc-supply = <&vreg_bob>;
+
+ mode-switch;
+ orientation-switch;
+
+ port {
+ fsa4480_sbu_mux: endpoint {
+ remote-endpoint = <&pmic_glink_sbu>;
+ };
+ };
+ };
+};
+
+&i2c4 {
+ status = "okay";
+
+ /* tas2563 audio codec @ 4d */
+};
+
+&i2c9 {
+ status = "okay";
+
+ /* TMS(?) NFC @ 28 */
+ /* Ti drv2624 haptics @ 5a */
+};
+
+&i2c13 {
+ status = "okay";
+
+ /* focaltech FT3658U @ 38 */
+};
+
+&ipa {
+ qcom,gsi-loader = "self";
+ memory-region = <&ipa_fw_mem>;
+ firmware-name = "qcom/qcm6490/SHIFT/otter/ipa_fws.mbn";
+ status = "okay";
+};
+
+&pm7250b_adc {
+ channel@4d {
+ reg = <ADC5_AMUX_THM1_100K_PU>;
+ qcom,ratiometric;
+ qcom,hw-settle-time = <200>;
+ qcom,pre-scaling = <1 1>;
+ label = "charger_skin_therm";
+ };
+
+ channel@4f {
+ reg = <ADC5_AMUX_THM3_100K_PU>;
+ qcom,ratiometric;
+ qcom,hw-settle-time = <200>;
+ qcom,pre-scaling = <1 1>;
+ label = "conn_therm";
+ };
+};
+
+&pm7250b_adc_tm {
+ status = "okay";
+
+ charger-skin-therm@0 {
+ reg = <0>;
+ io-channels = <&pm7250b_adc ADC5_AMUX_THM1_100K_PU>;
+ qcom,ratiometric;
+ qcom,hw-settle-time-us = <200>;
+ };
+
+ conn-therm@1 {
+ reg = <1>;
+ io-channels = <&pm7250b_adc ADC5_AMUX_THM3_100K_PU>;
+ qcom,ratiometric;
+ qcom,hw-settle-time-us = <200>;
+ };
+};
+
+&pm7325_gpios {
+ volume_down_default: volume-down-default-state {
+ pins = "gpio6";
+ function = PMIC_GPIO_FUNC_NORMAL;
+ power-source = <1>;
+ bias-pull-up;
+ input-enable;
+ };
+};
+
+&pmk8350_adc_tm {
+ status = "okay";
+
+ xo-therm@0 {
+ reg = <0>;
+ io-channels = <&pmk8350_vadc PMK8350_ADC7_AMUX_THM1_100K_PU>;
+ qcom,ratiometric;
+ qcom,hw-settle-time-us = <200>;
+ };
+
+ quiet-therm@1 {
+ reg = <1>;
+ io-channels = <&pmk8350_vadc PM7325_ADC7_AMUX_THM1_100K_PU>;
+ qcom,ratiometric;
+ qcom,hw-settle-time-us = <200>;
+ };
+
+ cam-flash-therm@2 {
+ reg = <2>;
+ io-channels = <&pmk8350_vadc PM7325_ADC7_AMUX_THM2_100K_PU>;
+ qcom,ratiometric;
+ qcom,hw-settle-time-us = <200>;
+ };
+
+ sdm-skin-therm@3 {
+ reg = <3>;
+ io-channels = <&pmk8350_vadc PM7325_ADC7_AMUX_THM3_100K_PU>;
+ qcom,ratiometric;
+ qcom,hw-settle-time-us = <200>;
+ };
+
+ wide-rfc-therm@4 {
+ reg = <4>;
+ io-channels = <&pmk8350_vadc PM7325_ADC7_AMUX_THM4_100K_PU>;
+ qcom,ratiometric;
+ qcom,hw-settle-time-us = <200>;
+ };
+};
+
+&pmk8350_rtc {
+ status = "okay";
+};
+
+&pmk8350_vadc {
+ status = "okay";
+
+ channel@44 {
+ reg = <PMK8350_ADC7_AMUX_THM1_100K_PU>;
+ qcom,ratiometric;
+ qcom,hw-settle-time = <200>;
+ qcom,pre-scaling = <1 1>;
+ label = "pmk8350_xo_therm";
+ };
+
+ channel@144 {
+ reg = <PM7325_ADC7_AMUX_THM1_100K_PU>;
+ qcom,ratiometric;
+ qcom,hw-settle-time = <200>;
+ qcom,pre-scaling = <1 1>;
+ label = "pm7325_quiet_therm";
+ };
+
+ channel@145 {
+ reg = <PM7325_ADC7_AMUX_THM2_100K_PU>;
+ qcom,ratiometric;
+ qcom,hw-settle-time = <200>;
+ qcom,pre-scaling = <1 1>;
+ label = "pm7325_cam_flash_therm";
+ };
+
+ channel@146 {
+ reg = <PM7325_ADC7_AMUX_THM3_100K_PU>;
+ qcom,ratiometric;
+ qcom,hw-settle-time = <200>;
+ qcom,pre-scaling = <1 1>;
+ label = "pm7325_sdm_skin_therm";
+ };
+
+ channel@147 {
+ reg = <PM7325_ADC7_AMUX_THM4_100K_PU>;
+ qcom,ratiometric;
+ qcom,hw-settle-time = <200>;
+ qcom,pre-scaling = <1 1>;
+ label = "pm7325_wide_rfc_therm";
+ };
+};
+
+&pon_pwrkey {
+ status = "okay";
+};
+
+&pon_resin {
+ linux,code = <KEY_VOLUMEDOWN>;
+ status = "okay";
+};
+
+&qup_spi13_cs {
+ drive-strength = <6>;
+ bias-disable;
+};
+
+&qup_spi13_data_clk {
+ drive-strength = <6>;
+ bias-disable;
+};
+
+&qup_uart5_rx {
+ drive-strength = <2>;
+ bias-disable;
+};
+
+&qup_uart5_tx {
+ drive-strength = <2>;
+ bias-disable;
+};
+
+&qupv3_id_0 {
+ status = "okay";
+};
+
+&qupv3_id_1 {
+ status = "okay";
+};
+
+&remoteproc_adsp {
+ firmware-name = "qcom/qcm6490/SHIFT/otter/adsp.mbn";
+ status = "okay";
+};
+
+&remoteproc_cdsp {
+ firmware-name = "qcom/qcm6490/SHIFT/otter/cdsp.mbn";
+ status = "okay";
+};
+
+&remoteproc_mpss {
+ firmware-name = "qcom/qcm6490/SHIFT/otter/modem.mbn";
+ status = "okay";
+};
+
+&remoteproc_wpss {
+ firmware-name = "qcom/qcm6490/SHIFT/otter/wpss.mbn";
+ status = "okay";
+};
+
+&sdc2_clk {
+ drive-strength = <16>;
+ bias-disable;
+};
+
+&sdc2_cmd {
+ drive-strength = <10>;
+ bias-pull-up;
+};
+
+&sdc2_data {
+ drive-strength = <10>;
+ bias-pull-up;
+};
+
+&sdhc_2 {
+ vmmc-supply = <&vreg_l9c>;
+ vqmmc-supply = <&vreg_l6c>;
+
+ pinctrl-0 = <&sdc2_clk>, <&sdc2_cmd>, <&sdc2_data>;
+ pinctrl-1 = <&sdc2_clk_sleep>, <&sdc2_cmd_sleep>, <&sdc2_data_sleep>;
+
+ status = "okay";
+};
+
+&tlmm {
+ /*
+ * 48-52: protected by XPU, not sure why.
+ */
+ gpio-reserved-ranges = <48 4>;
+
+ bluetooth_enable_default: bluetooth-enable-default-state {
+ pins = "gpio85";
+ function = "gpio";
+ output-low;
+ bias-disable;
+ };
+
+ qup_uart7_sleep_cts: qup-uart7-sleep-cts-state {
+ pins = "gpio28";
+ function = "gpio";
+ /*
+ * Configure a bias-bus-hold on CTS to lower power
+ * usage when Bluetooth is turned off. Bus hold will
+ * maintain a low power state regardless of whether
+ * the Bluetooth module drives the pin in either
+ * direction or leaves the pin fully unpowered.
+ */
+ bias-bus-hold;
+ };
+
+ qup_uart7_sleep_rts: qup-uart7-sleep-rts-state {
+ pins = "gpio29";
+ function = "gpio";
+ /*
+ * Configure pull-down on RTS. As RTS is active low
+ * signal, pull it low to indicate the BT SoC that it
+ * can wakeup the system anytime from suspend state by
+ * pulling RX low (by sending wakeup bytes).
+ */
+ bias-pull-down;
+ };
+
+ qup_uart7_sleep_tx: qup-uart7-sleep-tx-state {
+ pins = "gpio30";
+ function = "gpio";
+ /*
+ * Configure pull-up on TX when it isn't actively driven
+ * to prevent BT SoC from receiving garbage during sleep.
+ */
+ bias-pull-up;
+ };
+
+ qup_uart7_sleep_rx: qup-uart7-sleep-rx-state {
+ pins = "gpio31";
+ function = "gpio";
+ /*
+ * Configure a pull-up on RX. This is needed to avoid
+ * garbage data when the TX pin of the Bluetooth module
+ * is floating which may cause spurious wakeups.
+ */
+ bias-pull-up;
+ };
+
+ sw_ctrl_default: sw-ctrl-default-state {
+ pins = "gpio86";
+ function = "gpio";
+ bias-pull-down;
+ };
+};
+
+&uart5 {
+ compatible = "qcom,geni-debug-uart";
+ status = "okay";
+};
+
+&uart7 {
+ /delete-property/interrupts;
+ interrupts-extended = <&intc GIC_SPI 608 IRQ_TYPE_LEVEL_HIGH>,
+ <&tlmm 31 IRQ_TYPE_EDGE_FALLING>;
+
+ pinctrl-1 = <&qup_uart7_sleep_cts>, <&qup_uart7_sleep_rts>, <&qup_uart7_sleep_tx>, <&qup_uart7_sleep_rx>;
+ pinctrl-names = "default", "sleep";
+
+ status = "okay";
+
+ bluetooth: bluetooth {
+ compatible = "qcom,wcn6750-bt";
+
+ pinctrl-0 = <&bluetooth_enable_default>, <&sw_ctrl_default>;
+ pinctrl-names = "default";
+
+ enable-gpios = <&tlmm 85 GPIO_ACTIVE_HIGH>;
+ swctrl-gpios = <&tlmm 86 GPIO_ACTIVE_HIGH>;
+
+ vddio-supply = <&vreg_l19b>;
+ vddaon-supply = <&vreg_s7b>;
+ vddbtcxmx-supply = <&vreg_s7b>;
+ vddrfacmn-supply = <&vreg_s7b>;
+ vddrfa0p8-supply = <&vreg_s7b>;
+ vddrfa1p7-supply = <&vreg_s1b>;
+ vddrfa1p2-supply = <&vreg_s8b>;
+ vddrfa2p2-supply = <&vreg_s1c>;
+ vddasd-supply = <&vreg_l11c>;
+
+ max-speed = <3200000>;
+ };
+};
+
+&ufs_mem_hc {
+ reset-gpios = <&tlmm 175 GPIO_ACTIVE_LOW>;
+
+ vcc-supply = <&vreg_l7b>;
+ vcc-max-microamp = <800000>;
+ /*
+ * Technically l9b enables an eLDO (supplied by s1b) which then powers
+ * VCCQ2 of the UFS.
+ */
+ vccq-supply = <&vreg_l9b>;
+ vccq-max-microamp = <900000>;
+
+ status = "okay";
+};
+
+&ufs_mem_phy {
+ vdda-phy-supply = <&vreg_l10c>;
+ vdda-pll-supply = <&vreg_l6b>;
+
+ status = "okay";
+};
+
+&usb_1 {
+ status = "okay";
+};
+
+&usb_1_dwc3 {
+ dr_mode = "otg";
+ usb-role-switch;
+};
+
+&usb_1_dwc3_hs {
+ remote-endpoint = <&pmic_glink_hs_in>;
+};
+
+&usb_dp_qmpphy_out {
+ remote-endpoint = <&pmic_glink_ss_in>;
+};
+
+&usb_1_hsphy {
+ vdda-pll-supply = <&vreg_l10c>;
+ vdda18-supply = <&vreg_l1c>;
+ vdda33-supply = <&vreg_l2b>;
+
+ qcom,hs-crossover-voltage-microvolt = <28000>;
+ qcom,hs-output-impedance-micro-ohms = <2600000>;
+ qcom,hs-rise-fall-time-bp = <5430>;
+ qcom,hs-disconnect-bp = <1743>;
+ qcom,hs-amplitude-bp = <2430>;
+
+ qcom,pre-emphasis-amplitude-bp = <20000>;
+ qcom,pre-emphasis-duration-bp = <20000>;
+
+ qcom,squelch-detector-bp = <(-2090)>;
+
+ orientation-switch;
+
+ status = "okay";
+};
+
+&usb_1_qmpphy {
+ vdda-phy-supply = <&vreg_l6b>;
+ vdda-pll-supply = <&vreg_l1b>;
+
+ status = "okay";
+};
+
+&wifi {
+ qcom,ath11k-calibration-variant = "SHIFTphone_8";
+
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/qcom/qcs404.dtsi b/arch/arm64/boot/dts/qcom/qcs404.dtsi
index ac451f378056..c291bbed6073 100644
--- a/arch/arm64/boot/dts/qcom/qcs404.dtsi
+++ b/arch/arm64/boot/dts/qcom/qcs404.dtsi
@@ -1324,7 +1324,7 @@
};
apcs_hfpll: clock-controller@b016000 {
- compatible = "qcom,hfpll";
+ compatible = "qcom,qcs404-hfpll";
reg = <0x0b016000 0x30>;
#clock-cells = <0>;
clock-output-names = "apcs_hfpll";
@@ -1531,10 +1531,10 @@
timer {
compatible = "arm,armv8-timer";
- interrupts = <GIC_PPI 2 0xff08>,
- <GIC_PPI 3 0xff08>,
- <GIC_PPI 4 0xff08>,
- <GIC_PPI 1 0xff08>;
+ interrupts = <GIC_PPI 2 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 3 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 4 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 1 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>;
};
smp2p-adsp {
@@ -1600,7 +1600,6 @@
thermal-zones {
aoss-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens 0>;
@@ -1615,7 +1614,6 @@
q6-hvx-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens 1>;
@@ -1630,7 +1628,6 @@
lpass-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens 2>;
@@ -1645,7 +1642,6 @@
wlan-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens 3>;
@@ -1660,7 +1656,6 @@
cluster-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens 4>;
@@ -1694,7 +1689,6 @@
cpu0-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens 5>;
@@ -1728,7 +1722,6 @@
cpu1-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens 6>;
@@ -1762,7 +1755,6 @@
cpu2-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens 7>;
@@ -1796,7 +1788,6 @@
cpu3-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens 8>;
@@ -1830,7 +1821,6 @@
gpu-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens 9>;
diff --git a/arch/arm64/boot/dts/qcom/qcs6490-rb3gen2.dts b/arch/arm64/boot/dts/qcom/qcs6490-rb3gen2.dts
index a085ff5b5fb2..0d45662b8028 100644
--- a/arch/arm64/boot/dts/qcom/qcs6490-rb3gen2.dts
+++ b/arch/arm64/boot/dts/qcom/qcs6490-rb3gen2.dts
@@ -52,6 +52,25 @@
};
};
+ hdmi-connector {
+ compatible = "hdmi-connector";
+ type = "a";
+
+ port {
+ hdmi_con: endpoint {
+ remote-endpoint = <&lt9611_out>;
+ };
+ };
+ };
+
+ lt9611_1v2: lt9611-vdd12-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "LT9611_1V2";
+
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ };
+
reserved-memory {
xbl_mem: xbl@80700000 {
reg = <0x0 0x80700000 0x0 0x100000>;
@@ -530,6 +549,54 @@
<GCC_WPSS_RSCP_CLK>;
};
+&gpi_dma0 {
+ status = "okay";
+};
+
+&gpi_dma1 {
+ status = "okay";
+};
+
+&i2c0 {
+ clock-frequency = <400000>;
+ status = "okay";
+
+ lt9611_codec: hdmi-bridge@2b {
+ compatible = "lontium,lt9611uxc";
+ reg = <0x2b>;
+
+ interrupts-extended = <&tlmm 24 IRQ_TYPE_EDGE_FALLING>;
+ reset-gpios = <&pm7250b_gpios 2 GPIO_ACTIVE_HIGH>;
+
+ vdd-supply = <&lt9611_1v2>;
+ vcc-supply = <&vreg_l11c_2p8>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&lt9611_irq_pin &lt9611_rst_pin>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ lt9611_a: endpoint {
+ remote-endpoint = <&mdss_dsi0_out>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+
+ lt9611_out: endpoint {
+ remote-endpoint = <&hdmi_con>;
+ };
+ };
+ };
+ };
+};
+
&i2c1 {
status = "okay";
@@ -587,6 +654,21 @@
remote-endpoint = <&usb_dp_qmpphy_dp_in>;
};
+&mdss_dsi {
+ vdda-supply = <&vreg_l6b_1p2>;
+ status = "okay";
+};
+
+&mdss_dsi0_out {
+ remote-endpoint = <&lt9611_a>;
+ data-lanes = <0 1 2 3>;
+};
+
+&mdss_dsi_phy {
+ vdds-supply = <&vreg_l10c_0p88>;
+ status = "okay";
+};
+
&mdss_edp {
status = "okay";
};
@@ -602,10 +684,18 @@
status = "okay";
};
+&pmk8350_rtc {
+ status = "okay";
+};
+
&qupv3_id_0 {
status = "okay";
};
+&qupv3_id_1 {
+ status = "okay";
+};
+
&remoteproc_adsp {
firmware-name = "qcom/qcs6490/adsp.mbn";
status = "okay";
@@ -632,7 +722,6 @@
};
&uart5 {
- compatible = "qcom,geni-debug-uart";
status = "okay";
};
@@ -711,3 +800,23 @@
function = "gpio";
bias-disable;
};
+
+&pm7250b_gpios {
+ lt9611_rst_pin: lt9611-rst-state {
+ pins = "gpio2";
+ function = "normal";
+
+ output-high;
+ input-disable;
+ power-source = <0>;
+ };
+};
+
+&tlmm {
+ lt9611_irq_pin: lt9611-irq-state {
+ pins = "gpio24";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/qcs8550-aim300-aiot.dts b/arch/arm64/boot/dts/qcom/qcs8550-aim300-aiot.dts
new file mode 100644
index 000000000000..2e2e46f214c7
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/qcs8550-aim300-aiot.dts
@@ -0,0 +1,315 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/leds/common.h>
+#include "qcs8550-aim300.dtsi"
+#include "pm8010.dtsi"
+#include "pmr735d_a.dtsi"
+#include "pmr735d_b.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. QCS8550 AIM300 AIOT";
+ compatible = "qcom,qcs8550-aim300-aiot", "qcom,qcs8550-aim300", "qcom,qcs8550",
+ "qcom,sm8550";
+
+ aliases {
+ serial0 = &uart7;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+
+ pinctrl-0 = <&volume_up_n>;
+ pinctrl-names = "default";
+
+ key-volume-up {
+ label = "Volume Up";
+ debounce-interval = <15>;
+ gpios = <&pm8550_gpios 6 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_VOLUMEUP>;
+ linux,can-disable;
+ wakeup-source;
+ };
+ };
+
+ pmic-glink {
+ compatible = "qcom,sm8550-pmic-glink", "qcom,pmic-glink";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ orientation-gpios = <&tlmm 11 GPIO_ACTIVE_HIGH>;
+
+ connector@0 {
+ compatible = "usb-c-connector";
+ reg = <0>;
+ power-role = "dual";
+ data-role = "dual";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ pmic_glink_hs_in: endpoint {
+ remote-endpoint = <&usb_1_dwc3_hs>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ pmic_glink_ss_in: endpoint {
+ remote-endpoint = <&redriver_ss_out>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+
+ pmic_glink_sbu: endpoint {
+ remote-endpoint = <&fsa4480_sbu_mux>;
+ };
+ };
+ };
+ };
+ };
+
+ vph_pwr: regulator-vph-pwr {
+ compatible = "regulator-fixed";
+ regulator-name = "vph_pwr";
+ regulator-min-microvolt = <3700000>;
+ regulator-max-microvolt = <3700000>;
+
+ regulator-always-on;
+ regulator-boot-on;
+ };
+};
+
+&apps_rsc {
+ regulators-0 {
+ vdd-bob1-supply = <&vph_pwr>;
+ vdd-bob2-supply = <&vph_pwr>;
+ };
+
+ regulators-3 {
+ vdd-s4-supply = <&vph_pwr>;
+ vdd-s5-supply = <&vph_pwr>;
+ };
+
+ regulators-4 {
+ vdd-s4-supply = <&vph_pwr>;
+ };
+
+ regulators-5 {
+ vdd-s1-supply = <&vph_pwr>;
+ vdd-s2-supply = <&vph_pwr>;
+ vdd-s3-supply = <&vph_pwr>;
+ vdd-s4-supply = <&vph_pwr>;
+ vdd-s5-supply = <&vph_pwr>;
+ vdd-s6-supply = <&vph_pwr>;
+ };
+};
+
+&i2c_hub_2 {
+ status = "okay";
+
+ typec-mux@42 {
+ compatible = "fcs,fsa4480";
+ reg = <0x42>;
+
+ vcc-supply = <&vreg_bob1>;
+
+ mode-switch;
+ orientation-switch;
+
+ port {
+ fsa4480_sbu_mux: endpoint {
+ remote-endpoint = <&pmic_glink_sbu>;
+ };
+ };
+ };
+
+ typec-retimer@1c {
+ compatible = "onnn,nb7vpq904m";
+ reg = <0x1c>;
+
+ vcc-supply = <&vreg_l15b_1p8>;
+
+ orientation-switch;
+ retimer-switch;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ redriver_ss_out: endpoint {
+ remote-endpoint = <&pmic_glink_ss_in>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ redriver_ss_in: endpoint {
+ data-lanes = <3 2 1 0>;
+ remote-endpoint = <&usb_dp_qmpphy_out>;
+ };
+ };
+ };
+ };
+};
+
+&mdss_dsi0 {
+ status = "okay";
+
+ panel@0 {
+ compatible = "visionox,vtdr6130";
+ reg = <0>;
+
+ pinctrl-0 = <&dsi_active>, <&te_default>;
+ pinctrl-1 = <&dsi_suspend>, <&te_default>;
+ pinctrl-names = "default", "sleep";
+
+ reset-gpios = <&tlmm 133 GPIO_ACTIVE_LOW>;
+
+ vci-supply = <&vreg_l13b_3p0>;
+ vdd-supply = <&vreg_l11b_1p2>;
+ vddio-supply = <&vreg_l12b_1p8>;
+
+ port {
+ panel0_in: endpoint {
+ remote-endpoint = <&mdss_dsi0_out>;
+ };
+ };
+ };
+};
+
+&mdss_dsi0_out {
+ remote-endpoint = <&panel0_in>;
+ data-lanes = <0 1 2 3>;
+};
+
+&mdss_dsi0_phy {
+ status = "okay";
+};
+
+&pcie0 {
+ status = "okay";
+};
+
+&pcie0_phy {
+ status = "okay";
+};
+
+&pcie1 {
+ status = "okay";
+};
+
+&pcie1_phy {
+ status = "okay";
+};
+
+&pm8550_gpios {
+ volume_up_n: volume-up-n-state {
+ pins = "gpio6";
+ function = "normal";
+ power-source = <1>;
+ bias-pull-up;
+ input-enable;
+ };
+};
+
+&pon_pwrkey {
+ status = "okay";
+};
+
+&pon_resin {
+ linux,code = <KEY_VOLUMEDOWN>;
+
+ status = "okay";
+};
+
+&qupv3_id_0 {
+ status = "okay";
+};
+
+&remoteproc_adsp {
+ firmware-name = "qcom/qcs8550/adsp.mbn",
+ "qcom/qcs8550/adsp_dtb.mbn";
+ status = "okay";
+};
+
+&remoteproc_cdsp {
+ firmware-name = "qcom/qcs8550/cdsp.mbn",
+ "qcom/qcs8550/cdsp_dtb.mbn";
+ status = "okay";
+};
+
+&swr1 {
+ status = "okay";
+};
+
+&swr2 {
+ status = "okay";
+};
+
+&tlmm {
+ gpio-reserved-ranges = <32 8>;
+
+ dsi_active: dsi-active-state {
+ pins = "gpio133";
+ function = "gpio";
+ drive-strength = <8>;
+ bias-disable;
+ };
+
+ dsi_suspend: dsi-suspend-state {
+ pins = "gpio133";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+
+ te_default: te-default-state {
+ pins = "gpio86";
+ function = "mdp_vsync";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+};
+
+&uart7 {
+ status = "okay";
+};
+
+&usb_1 {
+ status = "okay";
+};
+
+&usb_1_dwc3_hs {
+ remote-endpoint = <&pmic_glink_hs_in>;
+};
+
+&usb_1_hsphy {
+ status = "okay";
+};
+
+&usb_dp_qmpphy {
+ status = "okay";
+};
+
+&usb_dp_qmpphy_out {
+ remote-endpoint = <&redriver_ss_in>;
+};
diff --git a/arch/arm64/boot/dts/qcom/qcs8550-aim300.dtsi b/arch/arm64/boot/dts/qcom/qcs8550-aim300.dtsi
new file mode 100644
index 000000000000..f6960e2d466a
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/qcs8550-aim300.dtsi
@@ -0,0 +1,405 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
+#include "qcs8550.dtsi"
+#include "pm8550.dtsi"
+#include "pm8550b.dtsi"
+#define PMK8550VE_SID 5
+#include "pm8550ve.dtsi"
+#include "pm8550vs.dtsi"
+#include "pmk8550.dtsi"
+
+&apps_rsc {
+ regulators-0 {
+ compatible = "qcom,pm8550-rpmh-regulators";
+ qcom,pmic-id = "b";
+
+ vdd-l1-l4-l10-supply = <&vreg_s6g_1p86>;
+ vdd-l2-l13-l14-supply = <&vreg_bob1>;
+ vdd-l3-supply = <&vreg_s4g_1p25>;
+ vdd-l5-l16-supply = <&vreg_bob1>;
+ vdd-l6-l7-supply = <&vreg_bob1>;
+ vdd-l8-l9-supply = <&vreg_bob1>;
+ vdd-l11-supply = <&vreg_s4g_1p25>;
+ vdd-l12-supply = <&vreg_s6g_1p86>;
+ vdd-l15-supply = <&vreg_s6g_1p86>;
+ vdd-l17-supply = <&vreg_bob2>;
+
+ vreg_bob1: bob1 {
+ regulator-name = "vreg_bob1";
+ regulator-min-microvolt = <3296000>;
+ regulator-max-microvolt = <3960000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_bob2: bob2 {
+ regulator-name = "vreg_bob2";
+ regulator-min-microvolt = <2720000>;
+ regulator-max-microvolt = <3960000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l1b_1p8: ldo1 {
+ regulator-name = "vreg_l1b_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2b_3p0: ldo2 {
+ regulator-name = "vreg_l2b_3p0";
+ regulator-min-microvolt = <3008000>;
+ regulator-max-microvolt = <3008000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l5b_3p1: ldo5 {
+ regulator-name = "vreg_l5b_3p1";
+ regulator-min-microvolt = <3104000>;
+ regulator-max-microvolt = <3104000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l6b_1p8: ldo6 {
+ regulator-name = "vreg_l6b_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3008000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l7b_1p8: ldo7 {
+ regulator-name = "vreg_l7b_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3008000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l8b_1p8: ldo8 {
+ regulator-name = "vreg_l8b_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3008000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l9b_2p9: ldo9 {
+ regulator-name = "vreg_l9b_2p9";
+ regulator-min-microvolt = <2960000>;
+ regulator-max-microvolt = <3008000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l11b_1p2: ldo11 {
+ regulator-name = "vreg_l11b_1p2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1504000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l12b_1p8: ldo12 {
+ regulator-name = "vreg_l12b_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l13b_3p0: ldo13 {
+ regulator-name = "vreg_l13b_3p0";
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l14b_3p2: ldo14 {
+ regulator-name = "vreg_l14b_3p2";
+ regulator-min-microvolt = <3200000>;
+ regulator-max-microvolt = <3200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l15b_1p8: ldo15 {
+ regulator-name = "vreg_l15b_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l16b_2p8: ldo16 {
+ regulator-name = "vreg_l16b_2p8";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l17b_2p5: ldo17 {
+ regulator-name = "vreg_l17b_2p5";
+ regulator-min-microvolt = <2504000>;
+ regulator-max-microvolt = <2504000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ regulators-1 {
+ compatible = "qcom,pm8550vs-rpmh-regulators";
+ qcom,pmic-id = "c";
+
+ vdd-l1-supply = <&vreg_s4g_1p25>;
+ vdd-l2-supply = <&vreg_s4e_0p95>;
+ vdd-l3-supply = <&vreg_s4e_0p95>;
+
+ vreg_l3c_0p9: ldo3 {
+ regulator-name = "vreg_l3c_0p9";
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <912000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ regulators-2 {
+ compatible = "qcom,pm8550vs-rpmh-regulators";
+ qcom,pmic-id = "d";
+
+ vdd-l1-supply = <&vreg_s4e_0p95>;
+ vdd-l2-supply = <&vreg_s4e_0p95>;
+ vdd-l3-supply = <&vreg_s4e_0p95>;
+
+ vreg_l1d_0p88: ldo1 {
+ regulator-name = "vreg_l1d_0p88";
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <920000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ regulators-3 {
+ compatible = "qcom,pm8550vs-rpmh-regulators";
+ qcom,pmic-id = "e";
+
+ vdd-l1-supply = <&vreg_s4e_0p95>;
+ vdd-l2-supply = <&vreg_s4e_0p95>;
+ vdd-l3-supply = <&vreg_s4g_1p25>;
+
+ vreg_s4e_0p95: smps4 {
+ regulator-name = "vreg_s4e_0p95";
+ regulator-min-microvolt = <904000>;
+ regulator-max-microvolt = <984000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_s5e_1p08: smps5 {
+ regulator-name = "vreg_s5e_1p08";
+ regulator-min-microvolt = <1010000>;
+ regulator-max-microvolt = <1120000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l1e_0p88: ldo1 {
+ regulator-name = "vreg_l1e_0p88";
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <912000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2e_0p9: ldo2 {
+ regulator-name = "vreg_l2e_0p9";
+ regulator-min-microvolt = <870000>;
+ regulator-max-microvolt = <970000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l3e_1p2: ldo3 {
+ regulator-name = "vreg_l3e_1p2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ regulators-4 {
+ compatible = "qcom,pm8550ve-rpmh-regulators";
+ qcom,pmic-id = "f";
+
+ vdd-l1-supply = <&vreg_s4e_0p95>;
+ vdd-l2-supply = <&vreg_s4e_0p95>;
+ vdd-l3-supply = <&vreg_s4e_0p95>;
+
+ vreg_s4f_0p5: smps4 {
+ regulator-name = "vreg_s4f_0p5";
+ regulator-min-microvolt = <300000>;
+ regulator-max-microvolt = <700000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l1f_0p9: ldo1 {
+ regulator-name = "vreg_l1f_0p9";
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <912000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2f_0p88: ldo2 {
+ regulator-name = "vreg_l2f_0p88";
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <912000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l3f_0p88: ldo3 {
+ regulator-name = "vreg_l3f_0p88";
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <912000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ regulators-5 {
+ compatible = "qcom,pm8550vs-rpmh-regulators";
+ qcom,pmic-id = "g";
+ vdd-l1-supply = <&vreg_s4g_1p25>;
+ vdd-l2-supply = <&vreg_s4g_1p25>;
+ vdd-l3-supply = <&vreg_s4g_1p25>;
+
+ vreg_s1g_1p25: smps1 {
+ regulator-name = "vreg_s1g_1p25";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1300000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_s2g_0p85: smps2 {
+ regulator-name = "vreg_s2g_0p85";
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1036000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_s3g_0p8: smps3 {
+ regulator-name = "vreg_s3g_0p8";
+ regulator-min-microvolt = <300000>;
+ regulator-max-microvolt = <1004000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_s4g_1p25: smps4 {
+ regulator-name = "vreg_s4g_1p25";
+ regulator-min-microvolt = <1256000>;
+ regulator-max-microvolt = <1408000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_s5g_0p85: smps5 {
+ regulator-name = "vreg_s5g_0p85";
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1004000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_s6g_1p86: smps6 {
+ regulator-name = "vreg_s6g_1p86";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2000000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l1g_1p2: ldo1 {
+ regulator-name = "vreg_l1g_1p2";
+ regulator-min-microvolt = <1128000>;
+ regulator-max-microvolt = <1272000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2g_1p2: ldo2 {
+ regulator-name = "vreg_l2g_1p2";
+ regulator-min-microvolt = <1100000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l3g_1p2: ldo3 {
+ regulator-name = "vreg_l3g_1p2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+};
+
+&mdss_dsi0 {
+ vdda-supply = <&vreg_l3e_1p2>;
+};
+
+&mdss_dsi0_phy {
+ vdds-supply = <&vreg_l1e_0p88>;
+};
+
+&pcie0 {
+ perst-gpios = <&tlmm 94 GPIO_ACTIVE_LOW>;
+ wake-gpios = <&tlmm 96 GPIO_ACTIVE_HIGH>;
+
+ pinctrl-0 = <&pcie0_default_state>;
+ pinctrl-names = "default";
+};
+
+&pcie0_phy {
+ vdda-phy-supply = <&vreg_l1e_0p88>;
+ vdda-pll-supply = <&vreg_l3e_1p2>;
+};
+
+&pcie1 {
+ perst-gpios = <&tlmm 97 GPIO_ACTIVE_LOW>;
+ wake-gpios = <&tlmm 99 GPIO_ACTIVE_HIGH>;
+
+ pinctrl-0 = <&pcie1_default_state>;
+ pinctrl-names = "default";
+};
+
+&pcie1_phy {
+ vdda-phy-supply = <&vreg_l3c_0p9>;
+ vdda-pll-supply = <&vreg_l3e_1p2>;
+ vdda-qref-supply = <&vreg_l1e_0p88>;
+};
+
+&pm8550b_eusb2_repeater {
+ vdd18-supply = <&vreg_l15b_1p8>;
+ vdd3-supply = <&vreg_l5b_3p1>;
+};
+
+&sleep_clk {
+ clock-frequency = <32000>;
+};
+
+&ufs_mem_hc {
+ reset-gpios = <&tlmm 210 GPIO_ACTIVE_LOW>;
+ vcc-supply = <&vreg_l17b_2p5>;
+ vcc-max-microamp = <1300000>;
+ vccq-supply = <&vreg_l1g_1p2>;
+ vccq-max-microamp = <1200000>;
+ vdd-hba-supply = <&vreg_l3g_1p2>;
+
+ status = "okay";
+};
+
+&ufs_mem_phy {
+ vdda-phy-supply = <&vreg_l1d_0p88>;
+ vdda-pll-supply = <&vreg_l3e_1p2>;
+
+ status = "okay";
+};
+
+&usb_1_hsphy {
+ phys = <&pm8550b_eusb2_repeater>;
+
+ vdd-supply = <&vreg_l1e_0p88>;
+ vdda12-supply = <&vreg_l3e_1p2>;
+};
+
+&usb_dp_qmpphy {
+ vdda-phy-supply = <&vreg_l3e_1p2>;
+ vdda-pll-supply = <&vreg_l3f_0p88>;
+};
+
+&xo_board {
+ clock-frequency = <76800000>;
+};
diff --git a/arch/arm64/boot/dts/qcom/qcs8550.dtsi b/arch/arm64/boot/dts/qcom/qcs8550.dtsi
new file mode 100644
index 000000000000..07b314834d88
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/qcs8550.dtsi
@@ -0,0 +1,162 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include "sm8550.dtsi"
+
+/delete-node/ &reserved_memory;
+
+/ {
+ reserved_memory: reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+
+ /* These are 3 types of reserved memory regions here:
+ * 1. Firmware related regions which aren't shared with kernel.
+ * The device tree source in kernel doesn't need to have node to
+ * indicate the firmware related reserved information. Bootloader
+ * conveys the information by updating devicetree at runtime.
+ * This will be described as: UEFI saves the physical address of
+ * the UEFI System Table to dts file's chosen node. Kernel read this
+ * table and add reserved memory regions to efi config table. Current
+ * reserved memory region may have reserved region which was not yet
+ * used, release note of the firmware have such kind of information.
+ * 2. Firmware related memory regions which are shared with Kernel
+ * The device tree source in the kernel needs to include nodes
+ * that indicate fimware-related shared information. A label name
+ * is suggested because this type of shared information needs to
+ * be referenced by specific drivers for handling purposes.
+ * Unlike previous platforms, QCS8550 boots using EFI and describes
+ * most reserved regions in the ESRT memory map. As a result, reserved
+ * memory regions which aren't relevant to the kernel(like the hypervisor
+ ( region) don't need to be described in DT.
+ * 3. Remoteproc regions.
+ * Remoteproc regions will be reserved and then assigned to
+ * subsystem firmware later.
+ * Here is a reserved memory map for this platform:
+ * 0x80000000 +-------------------+
+ * | |
+ * | Firmware Related |
+ * | |
+ * 0x8a800000 +-------------------+
+ * | |
+ * | Remoteproc Region |
+ * | |
+ * 0xa7000000 +-------------------+
+ * | |
+ * | Kernel Available |
+ * | |
+ * 0xd4d00000 +-------------------+
+ * | |
+ * | Firmware Related |
+ * | |
+ * 0x100000000 +-------------------+
+ */
+
+ aop_image_mem: aop-image-region@81c00000 {
+ reg = <0x0 0x81c00000 0x0 0x60000>;
+ no-map;
+ };
+
+ aop_cmd_db_mem: aop-cmd-db-region@81c60000 {
+ compatible = "qcom,cmd-db";
+ reg = <0x0 0x81c60000 0x0 0x20000>;
+ no-map;
+ };
+
+ aop_config_mem: aop-config-region@81c80000 {
+ no-map;
+ reg = <0x0 0x81c80000 0x0 0x20000>;
+ };
+
+ smem_mem: smem-region@81d00000 {
+ compatible = "qcom,smem";
+ reg = <0x0 0x81d00000 0x0 0x200000>;
+ hwlocks = <&tcsr_mutex 3>;
+ no-map;
+ };
+
+ adsp_mhi_mem: adsp-mhi-region@81f00000 {
+ reg = <0x0 0x81f00000 0x0 0x20000>;
+ no-map;
+ };
+
+ mpss_mem: mpss-region@8a800000 {
+ reg = <0x0 0x8a800000 0x0 0x10800000>;
+ no-map;
+ };
+
+ q6_mpss_dtb_mem: q6-mpss-dtb-region@9b000000 {
+ reg = <0x0 0x9b000000 0x0 0x80000>;
+ no-map;
+ };
+
+ ipa_fw_mem: ipa-fw-region@9b080000 {
+ reg = <0x0 0x9b080000 0x0 0x10000>;
+ no-map;
+ };
+
+ ipa_gsi_mem: ipa-gsi-region@9b090000 {
+ reg = <0x0 0x9b090000 0x0 0xa000>;
+ no-map;
+ };
+
+ gpu_micro_code_mem: gpu-micro-code-region@9b09a000 {
+ reg = <0x0 0x9b09a000 0x0 0x2000>;
+ no-map;
+ };
+
+ spss_region_mem: spss-region@9b100000 {
+ reg = <0x0 0x9b100000 0x0 0x180000>;
+ no-map;
+ };
+
+ spu_secure_shared_memory_mem: spu-secure-shared-memory-region@9b280000 {
+ reg = <0x0 0x9b280000 0x0 0x80000>;
+ no-map;
+ };
+
+ camera_mem: camera-region@9b300000 {
+ reg = <0x0 0x9b300000 0x0 0x800000>;
+ no-map;
+ };
+
+ video_mem: video-region@9bb00000 {
+ reg = <0x0 0x9bb00000 0x0 0x700000>;
+ no-map;
+ };
+
+ cvp_mem: cvp-region@9c200000 {
+ reg = <0x0 0x9c200000 0x0 0x700000>;
+ no-map;
+ };
+
+ cdsp_mem: cdsp-region@9c900000 {
+ reg = <0x0 0x9c900000 0x0 0x2000000>;
+ no-map;
+ };
+
+ q6_cdsp_dtb_mem: q6-cdsp-dtb-region@9e900000 {
+ reg = <0x0 0x9e900000 0x0 0x80000>;
+ no-map;
+ };
+
+ q6_adsp_dtb_mem: q6-adsp-dtb-region@9e980000 {
+ reg = <0x0 0x9e980000 0x0 0x80000>;
+ no-map;
+ };
+
+ adspslpi_mem: adspslpi-region@9ea00000 {
+ reg = <0x0 0x9ea00000 0x0 0x4080000>;
+ no-map;
+ };
+
+ mpss_dsm_mem: mpss_dsm_region@d4d00000 {
+ reg = <0x0 0xd4d00000 0x0 0x3300000>;
+ no-map;
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/qdu1000-idp.dts b/arch/arm64/boot/dts/qcom/qdu1000-idp.dts
index 5a25cdec969e..e65305f8136c 100644
--- a/arch/arm64/boot/dts/qcom/qdu1000-idp.dts
+++ b/arch/arm64/boot/dts/qcom/qdu1000-idp.dts
@@ -500,3 +500,26 @@
&uart7 {
status = "okay";
};
+
+&usb_1 {
+ status = "okay";
+};
+
+&usb_1_dwc3 {
+ dr_mode = "peripheral";
+};
+
+&usb_1_hsphy {
+ vdda-pll-supply = <&vreg_l8a_0p91>;
+ vdda18-supply = <&vreg_l14a_1p8>;
+ vdda33-supply = <&vreg_l2a_2p3>;
+
+ status = "okay";
+};
+
+&usb_1_qmpphy {
+ vdda-phy-supply = <&vreg_l8a_0p91>;
+ vdda-pll-supply = <&vreg_l3a_1p2>;
+
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/qcom/qdu1000.dtsi b/arch/arm64/boot/dts/qcom/qdu1000.dtsi
index f2a5e2e40461..642ca8f0236b 100644
--- a/arch/arm64/boot/dts/qcom/qdu1000.dtsi
+++ b/arch/arm64/boot/dts/qcom/qdu1000.dtsi
@@ -6,6 +6,8 @@
#include <dt-bindings/clock/qcom,qdu1000-gcc.h>
#include <dt-bindings/clock/qcom,rpmh.h>
#include <dt-bindings/dma/qcom-gpi.h>
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/interconnect/qcom,icc.h>
#include <dt-bindings/interconnect/qcom,qdu1000-rpmh.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/power/qcom-rpmpd.h>
@@ -913,6 +915,126 @@
};
};
+ usb_1_hsphy: phy@88e3000 {
+ compatible = "qcom,qdu1000-usb-hs-phy",
+ "qcom,usb-snps-hs-7nm-phy";
+ reg = <0x0 0x088e3000 0x0 0x120>;
+ #phy-cells = <0>;
+
+ clocks =<&gcc GCC_USB2_CLKREF_EN>;
+ clock-names = "ref";
+
+ resets = <&gcc GCC_QUSB2PHY_PRIM_BCR>;
+
+ status = "disabled";
+ };
+
+ usb_1_qmpphy: phy@88e5000 {
+ compatible = "qcom,qdu1000-qmp-usb3-uni-phy";
+ reg = <0x0 0x088e5000 0x0 0x2000>;
+
+ clocks = <&gcc GCC_USB3_PRIM_PHY_AUX_CLK>,
+ <&gcc GCC_USB2_CLKREF_EN>,
+ <&gcc GCC_USB3_PRIM_PHY_COM_AUX_CLK>,
+ <&gcc GCC_USB3_PRIM_PHY_PIPE_CLK>;
+ clock-names = "aux",
+ "ref",
+ "com_aux",
+ "pipe";
+
+ resets = <&gcc GCC_USB3_PHY_PRIM_BCR>,
+ <&gcc GCC_USB3PHY_PHY_PRIM_BCR>;
+ reset-names = "phy",
+ "phy_phy";
+
+ #clock-cells = <0>;
+ clock-output-names = "usb3_uni_phy_pipe_clk_src";
+
+ #phy-cells = <0>;
+
+ status = "disabled";
+ };
+
+ usb_1: usb@a6f8800 {
+ compatible = "qcom,qdu1000-dwc3", "qcom,dwc3";
+ reg = <0 0x0a6f8800 0 0x400>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ clocks = <&gcc GCC_CFG_NOC_USB3_PRIM_AXI_CLK>,
+ <&gcc GCC_USB30_PRIM_MASTER_CLK>,
+ <&gcc GCC_USB30_PRIM_SLEEP_CLK>,
+ <&gcc GCC_USB30_PRIM_MOCK_UTMI_CLK>;
+ clock-names = "cfg_noc",
+ "core",
+ "sleep",
+ "mock_utmi";
+
+ assigned-clocks = <&gcc GCC_USB30_PRIM_MOCK_UTMI_CLK>,
+ <&gcc GCC_USB30_PRIM_MASTER_CLK>;
+ assigned-clock-rates = <19200000>, <200000000>;
+
+ interrupts-extended = <&intc GIC_SPI 130 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
+ <&pdc 8 IRQ_TYPE_EDGE_BOTH>,
+ <&pdc 9 IRQ_TYPE_EDGE_BOTH>,
+ <&pdc 6 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "pwr_event",
+ "hs_phy_irq",
+ "dp_hs_phy_irq",
+ "dm_hs_phy_irq",
+ "ss_phy_irq";
+
+ power-domains = <&gcc USB30_PRIM_GDSC>;
+ required-opps = <&rpmhpd_opp_nom>;
+
+ resets = <&gcc GCC_USB30_PRIM_BCR>;
+
+ interconnects = <&system_noc MASTER_USB3 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &system_noc SLAVE_USB3_0 QCOM_ICC_TAG_ALWAYS>;
+
+ interconnect-names = "usb-ddr",
+ "apps-usb";
+
+ status = "disabled";
+
+ usb_1_dwc3: usb@a600000 {
+ compatible = "snps,dwc3";
+ reg = <0 0x0a600000 0 0xcd00>;
+ interrupts = <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>;
+
+ iommus = <&apps_smmu 0xc0 0x0>;
+ snps,dis_u2_susphy_quirk;
+ snps,dis_enblslpm_quirk;
+ phys = <&usb_1_hsphy>,
+ <&usb_1_qmpphy>;
+ phy-names = "usb2-phy",
+ "usb3-phy";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ usb_1_dwc3_hs: endpoint {
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ usb_1_dwc3_ss: endpoint {
+ };
+ };
+ };
+ };
+ };
+
pdc: interrupt-controller@b220000 {
compatible = "qcom,qdu1000-pdc", "qcom,pdc";
reg = <0x0 0xb220000 0x0 0x30000>, <0x0 0x174000f0 0x0 0x64>;
@@ -1459,11 +1581,40 @@
system-cache-controller@19200000 {
compatible = "qcom,qdu1000-llcc";
- reg = <0 0x19200000 0 0xd80000>,
+ reg = <0 0x19200000 0 0x80000>,
+ <0 0x19300000 0 0x80000>,
+ <0 0x19600000 0 0x80000>,
+ <0 0x19700000 0 0x80000>,
+ <0 0x19a00000 0 0x80000>,
+ <0 0x19b00000 0 0x80000>,
+ <0 0x19e00000 0 0x80000>,
+ <0 0x19f00000 0 0x80000>,
<0 0x1a200000 0 0x80000>;
reg-names = "llcc0_base",
+ "llcc1_base",
+ "llcc2_base",
+ "llcc3_base",
+ "llcc4_base",
+ "llcc5_base",
+ "llcc6_base",
+ "llcc7_base",
"llcc_broadcast_base";
interrupts = <GIC_SPI 266 IRQ_TYPE_LEVEL_HIGH>;
+
+ nvmem-cells = <&multi_chan_ddr>;
+ nvmem-cell-names = "multi-chan-ddr";
+ };
+
+ sec_qfprom: efuse@221c8000 {
+ compatible = "qcom,qdu1000-sec-qfprom", "qcom,sec-qfprom";
+ reg = <0 0x221c8000 0 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ multi_chan_ddr: multi-chan-ddr@12b {
+ reg = <0x12b 0x1>;
+ bits = <0 2>;
+ };
};
};
diff --git a/arch/arm64/boot/dts/qcom/qrb2210-rb1.dts b/arch/arm64/boot/dts/qcom/qrb2210-rb1.dts
index bb5191422660..e19790464a11 100644
--- a/arch/arm64/boot/dts/qcom/qrb2210-rb1.dts
+++ b/arch/arm64/boot/dts/qcom/qrb2210-rb1.dts
@@ -59,6 +59,17 @@
};
};
+ i2c2_gpio: i2c {
+ compatible = "i2c-gpio";
+
+ sda-gpios = <&tlmm 6 GPIO_ACTIVE_HIGH>;
+ scl-gpios = <&tlmm 7 GPIO_ACTIVE_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ status = "disabled";
+ };
+
leds {
compatible = "gpio-leds";
@@ -199,7 +210,15 @@
status = "okay";
};
-&i2c2 {
+&gpu {
+ status = "okay";
+
+ zap-shader {
+ firmware-name = "qcom/qcm2290/a702_zap.mbn";
+ };
+};
+
+&i2c2_gpio {
clock-frequency = <400000>;
status = "okay";
diff --git a/arch/arm64/boot/dts/qcom/qrb4210-rb2.dts b/arch/arm64/boot/dts/qcom/qrb4210-rb2.dts
index 2c39bb1b97db..1888d99d398b 100644
--- a/arch/arm64/boot/dts/qcom/qrb4210-rb2.dts
+++ b/arch/arm64/boot/dts/qcom/qrb4210-rb2.dts
@@ -60,6 +60,17 @@
};
};
+ i2c2_gpio: i2c {
+ compatible = "i2c-gpio";
+
+ sda-gpios = <&tlmm 6 GPIO_ACTIVE_HIGH>;
+ scl-gpios = <&tlmm 7 GPIO_ACTIVE_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ status = "disabled";
+ };
+
leds {
compatible = "gpio-leds";
@@ -190,7 +201,7 @@
};
};
-&i2c2 {
+&i2c2_gpio {
clock-frequency = <400000>;
status = "okay";
@@ -294,7 +305,7 @@
&pmi632_vbus {
regulator-min-microamp = <500000>;
- regulator-max-microamp = <3000000>;
+ regulator-max-microamp = <1000000>;
status = "okay";
};
@@ -403,6 +414,8 @@
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
regulator-allow-set-load;
+ regulator-always-on;
+ regulator-boot-on;
};
vreg_l10a_1p8: l10 {
diff --git a/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts b/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts
index cd0db4f31d4a..ccff6cd73fdf 100644
--- a/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts
+++ b/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts
@@ -108,10 +108,69 @@
regulator-always-on;
};
+ qca6390-pmu {
+ compatible = "qcom,qca6390-pmu";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&bt_en_state>, <&wlan_en_state>;
+
+ vddaon-supply = <&vreg_s6a_0p95>;
+ vddpmu-supply = <&vreg_s2f_0p95>;
+ vddrfa0p95-supply = <&vreg_s2f_0p95>;
+ vddrfa1p3-supply = <&vreg_s8c_1p3>;
+ vddrfa1p9-supply = <&vreg_s5a_1p9>;
+ vddpcie1p3-supply = <&vreg_s8c_1p3>;
+ vddpcie1p9-supply = <&vreg_s5a_1p9>;
+ vddio-supply = <&vreg_s4a_1p8>;
+
+ wlan-enable-gpios = <&tlmm 20 GPIO_ACTIVE_HIGH>;
+ bt-enable-gpios = <&tlmm 21 GPIO_ACTIVE_HIGH>;
+
+ regulators {
+ vreg_pmu_rfa_cmn: ldo0 {
+ regulator-name = "vreg_pmu_rfa_cmn";
+ };
+
+ vreg_pmu_aon_0p59: ldo1 {
+ regulator-name = "vreg_pmu_aon_0p59";
+ };
+
+ vreg_pmu_wlcx_0p8: ldo2 {
+ regulator-name = "vreg_pmu_wlcx_0p8";
+ };
+
+ vreg_pmu_wlmx_0p85: ldo3 {
+ regulator-name = "vreg_pmu_wlmx_0p85";
+ };
+
+ vreg_pmu_btcmx_0p85: ldo4 {
+ regulator-name = "vreg_pmu_btcmx_0p85";
+ };
+
+ vreg_pmu_rfa_0p8: ldo5 {
+ regulator-name = "vreg_pmu_rfa_0p8";
+ };
+
+ vreg_pmu_rfa_1p2: ldo6 {
+ regulator-name = "vreg_pmu_rfa_1p2";
+ };
+
+ vreg_pmu_rfa_1p7: ldo7 {
+ regulator-name = "vreg_pmu_rfa_1p7";
+ };
+
+ vreg_pmu_pcie_0p9: ldo8 {
+ regulator-name = "vreg_pmu_pcie_0p9";
+ };
+
+ vreg_pmu_pcie_1p8: ldo9 {
+ regulator-name = "vreg_pmu_pcie_1p8";
+ };
+ };
+ };
+
thermal-zones {
conn-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&pm8150b_adc_tm 0>;
trips {
@@ -124,8 +183,6 @@
};
pm8150l-pcb-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&pm8150l_adc_tm 1>;
trips {
@@ -138,8 +195,6 @@
};
skin-msm-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&pm8150l_adc_tm 0>;
trips {
@@ -152,8 +207,6 @@
};
wifi-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&pm8150_adc_tm 1>;
trips {
@@ -166,8 +219,6 @@
};
xo-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&pm8150_adc_tm 0>;
trips {
@@ -734,6 +785,23 @@
vdda-pll-supply = <&vreg_l9a_1p2>;
};
+&pcieport0 {
+ wifi@0 {
+ compatible = "pci17cb,1101";
+ reg = <0x10000 0x0 0x0 0x0 0x0>;
+
+ vddrfacmn-supply = <&vreg_pmu_rfa_cmn>;
+ vddaon-supply = <&vreg_pmu_aon_0p59>;
+ vddwlcx-supply = <&vreg_pmu_wlcx_0p8>;
+ vddwlmx-supply = <&vreg_pmu_wlmx_0p85>;
+ vddrfa0p8-supply = <&vreg_pmu_rfa_0p8>;
+ vddrfa1p2-supply = <&vreg_pmu_rfa_1p2>;
+ vddrfa1p7-supply = <&vreg_pmu_rfa_1p7>;
+ vddpcie0p9-supply = <&vreg_pmu_pcie_0p9>;
+ vddpcie1p8-supply = <&vreg_pmu_pcie_1p8>;
+ };
+};
+
&pcie1 {
status = "okay";
};
@@ -1303,6 +1371,14 @@
function = "gpio";
bias-pull-up;
};
+
+ wlan_en_state: wlan-default-state {
+ pins = "gpio20";
+ function = "gpio";
+ drive-strength = <16>;
+ output-low;
+ bias-pull-up;
+ };
};
&uart6 {
@@ -1311,17 +1387,12 @@
bluetooth {
compatible = "qcom,qca6390-bt";
- pinctrl-names = "default";
- pinctrl-0 = <&bt_en_state>;
-
- enable-gpios = <&tlmm 21 GPIO_ACTIVE_HIGH>;
-
- vddio-supply = <&vreg_s4a_1p8>;
- vddpmu-supply = <&vreg_s2f_0p95>;
- vddaon-supply = <&vreg_s6a_0p95>;
- vddrfa0p9-supply = <&vreg_s2f_0p95>;
- vddrfa1p3-supply = <&vreg_s8c_1p3>;
- vddrfa1p9-supply = <&vreg_s5a_1p9>;
+ vddrfacmn-supply = <&vreg_pmu_rfa_cmn>;
+ vddaon-supply = <&vreg_pmu_aon_0p59>;
+ vddbtcmx-supply = <&vreg_pmu_btcmx_0p85>;
+ vddrfa0p8-supply = <&vreg_pmu_rfa_0p8>;
+ vddrfa1p2-supply = <&vreg_pmu_rfa_1p2>;
+ vddrfa1p7-supply = <&vreg_pmu_rfa_1p7>;
};
};
@@ -1356,8 +1427,8 @@
usb-role-switch;
};
-&usb_1_role_switch_out {
- remote-endpoint = <&pm8150b_role_switch_in>;
+&usb_1_dwc3_hs_out {
+ remote-endpoint = <&pm8150b_hs_in>;
};
&usb_1_hsphy {
@@ -1373,7 +1444,6 @@
vdda-phy-supply = <&vreg_l9a_1p2>;
vdda-pll-supply = <&vreg_l18a_0p92>;
- orientation-switch;
};
&usb_1_qmpphy_out {
@@ -1465,8 +1535,8 @@
port@0 {
reg = <0>;
- pm8150b_role_switch_in: endpoint {
- remote-endpoint = <&usb_1_role_switch_out>;
+ pm8150b_hs_in: endpoint {
+ remote-endpoint = <&usb_1_dwc3_hs_out>;
};
};
diff --git a/arch/arm64/boot/dts/qcom/qru1000-idp.dts b/arch/arm64/boot/dts/qcom/qru1000-idp.dts
index 2a862c83309e..1c781d9e24cf 100644
--- a/arch/arm64/boot/dts/qcom/qru1000-idp.dts
+++ b/arch/arm64/boot/dts/qcom/qru1000-idp.dts
@@ -467,3 +467,26 @@
&uart7 {
status = "okay";
};
+
+&usb_1 {
+ status = "okay";
+};
+
+&usb_1_dwc3 {
+ dr_mode = "peripheral";
+};
+
+&usb_1_hsphy {
+ vdda-pll-supply = <&vreg_l8a_0p91>;
+ vdda18-supply = <&vreg_l14a_1p8>;
+ vdda33-supply = <&vreg_l2a_2p3>;
+
+ status = "okay";
+};
+
+&usb_1_qmpphy {
+ vdda-phy-supply = <&vreg_l8a_0p91>;
+ vdda-pll-supply = <&vreg_l3a_1p2>;
+
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/qcom/sa8155p.dtsi b/arch/arm64/boot/dts/qcom/sa8155p.dtsi
index ffb7ab695213..9e70effc72e1 100644
--- a/arch/arm64/boot/dts/qcom/sa8155p.dtsi
+++ b/arch/arm64/boot/dts/qcom/sa8155p.dtsi
@@ -38,3 +38,7 @@
*/
compatible = "qcom,sa8155p-rpmhpd";
};
+
+&videocc {
+ power-domains = <&rpmhpd SA8155P_CX>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sa8775p-pmics.dtsi b/arch/arm64/boot/dts/qcom/sa8775p-pmics.dtsi
index eaa43f022a65..1369c3d43f86 100644
--- a/arch/arm64/boot/dts/qcom/sa8775p-pmics.dtsi
+++ b/arch/arm64/boot/dts/qcom/sa8775p-pmics.dtsi
@@ -10,7 +10,7 @@
thermal-zones {
pmm8654au_0_thermal: pm8775-0-thermal {
polling-delay-passive = <100>;
- polling-delay = <0>;
+
thermal-sensors = <&pmm8654au_0_temp_alarm>;
trips {
@@ -30,7 +30,7 @@
pmm8654au_1_thermal: pm8775-1-thermal {
polling-delay-passive = <100>;
- polling-delay = <0>;
+
thermal-sensors = <&pmm8654au_1_temp_alarm>;
trips {
@@ -50,7 +50,7 @@
pmm8654au_2_thermal: pm8775-2-thermal {
polling-delay-passive = <100>;
- polling-delay = <0>;
+
thermal-sensors = <&pmm8654au_2_temp_alarm>;
trips {
@@ -70,7 +70,7 @@
pmm8654au_3_thermal: pm8775-3-thermal {
polling-delay-passive = <100>;
- polling-delay = <0>;
+
thermal-sensors = <&pmm8654au_3_temp_alarm>;
trips {
diff --git a/arch/arm64/boot/dts/qcom/sa8775p-ride-r3.dts b/arch/arm64/boot/dts/qcom/sa8775p-ride-r3.dts
new file mode 100644
index 000000000000..ae065ae92478
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sa8775p-ride-r3.dts
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+/dts-v1/;
+
+#include "sa8775p-ride.dtsi"
+
+/ {
+ model = "Qualcomm SA8775P Ride Rev3";
+ compatible = "qcom,sa8775p-ride-r3", "qcom,sa8775p";
+};
+
+&ethernet0 {
+ phy-mode = "2500base-x";
+};
+
+&ethernet1 {
+ phy-mode = "2500base-x";
+};
+
+&mdio {
+ compatible = "snps,dwmac-mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ sgmii_phy0: phy@8 {
+ compatible = "ethernet-phy-id31c3.1c33";
+ reg = <0x8>;
+ device_type = "ethernet-phy";
+ interrupts-extended = <&tlmm 7 IRQ_TYPE_EDGE_FALLING>;
+ reset-gpios = <&pmm8654au_2_gpios 8 GPIO_ACTIVE_LOW>;
+ reset-assert-us = <11000>;
+ reset-deassert-us = <70000>;
+ };
+
+ sgmii_phy1: phy@0 {
+ compatible = "ethernet-phy-id31c3.1c33";
+ reg = <0x0>;
+ device_type = "ethernet-phy";
+ interrupts-extended = <&tlmm 26 IRQ_TYPE_EDGE_FALLING>;
+ reset-gpios = <&pmm8654au_2_gpios 9 GPIO_ACTIVE_LOW>;
+ reset-assert-us = <11000>;
+ reset-deassert-us = <70000>;
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/sa8775p-ride.dts b/arch/arm64/boot/dts/qcom/sa8775p-ride.dts
index 26ad05bd3b3f..2e87fd760dbd 100644
--- a/arch/arm64/boot/dts/qcom/sa8775p-ride.dts
+++ b/arch/arm64/boot/dts/qcom/sa8775p-ride.dts
@@ -5,835 +5,43 @@
/dts-v1/;
-#include <dt-bindings/gpio/gpio.h>
-#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
-
-#include "sa8775p.dtsi"
-#include "sa8775p-pmics.dtsi"
+#include "sa8775p-ride.dtsi"
/ {
model = "Qualcomm SA8775P Ride";
compatible = "qcom,sa8775p-ride", "qcom,sa8775p";
-
- aliases {
- ethernet0 = &ethernet0;
- ethernet1 = &ethernet1;
- i2c11 = &i2c11;
- i2c18 = &i2c18;
- serial0 = &uart10;
- serial1 = &uart12;
- serial2 = &uart17;
- spi16 = &spi16;
- ufshc1 = &ufs_mem_hc;
- };
-
- chosen {
- stdout-path = "serial0:115200n8";
- };
-};
-
-&apps_rsc {
- regulators-0 {
- compatible = "qcom,pmm8654au-rpmh-regulators";
- qcom,pmic-id = "a";
-
- vreg_s4a: smps4 {
- regulator-name = "vreg_s4a";
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1816000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- };
-
- vreg_s5a: smps5 {
- regulator-name = "vreg_s5a";
- regulator-min-microvolt = <1850000>;
- regulator-max-microvolt = <1996000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- };
-
- vreg_s9a: smps9 {
- regulator-name = "vreg_s9a";
- regulator-min-microvolt = <535000>;
- regulator-max-microvolt = <1120000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- };
-
- vreg_l4a: ldo4 {
- regulator-name = "vreg_l4a";
- regulator-min-microvolt = <788000>;
- regulator-max-microvolt = <1050000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- regulator-allow-set-load;
- regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
- RPMH_REGULATOR_MODE_HPM>;
- };
-
- vreg_l5a: ldo5 {
- regulator-name = "vreg_l5a";
- regulator-min-microvolt = <870000>;
- regulator-max-microvolt = <950000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- regulator-allow-set-load;
- regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
- RPMH_REGULATOR_MODE_HPM>;
- };
-
- vreg_l6a: ldo6 {
- regulator-name = "vreg_l6a";
- regulator-min-microvolt = <870000>;
- regulator-max-microvolt = <970000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- regulator-allow-set-load;
- regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
- RPMH_REGULATOR_MODE_HPM>;
- };
-
- vreg_l7a: ldo7 {
- regulator-name = "vreg_l7a";
- regulator-min-microvolt = <720000>;
- regulator-max-microvolt = <950000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- regulator-allow-set-load;
- regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
- RPMH_REGULATOR_MODE_HPM>;
- };
-
- vreg_l8a: ldo8 {
- regulator-name = "vreg_l8a";
- regulator-min-microvolt = <2504000>;
- regulator-max-microvolt = <3300000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- regulator-allow-set-load;
- regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
- RPMH_REGULATOR_MODE_HPM>;
- };
-
- vreg_l9a: ldo9 {
- regulator-name = "vreg_l9a";
- regulator-min-microvolt = <2970000>;
- regulator-max-microvolt = <3544000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- regulator-allow-set-load;
- regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
- RPMH_REGULATOR_MODE_HPM>;
- };
- };
-
- regulators-1 {
- compatible = "qcom,pmm8654au-rpmh-regulators";
- qcom,pmic-id = "c";
-
- vreg_l1c: ldo1 {
- regulator-name = "vreg_l1c";
- regulator-min-microvolt = <1140000>;
- regulator-max-microvolt = <1260000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- regulator-allow-set-load;
- regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
- RPMH_REGULATOR_MODE_HPM>;
- };
-
- vreg_l2c: ldo2 {
- regulator-name = "vreg_l2c";
- regulator-min-microvolt = <900000>;
- regulator-max-microvolt = <1100000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- regulator-allow-set-load;
- regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
- RPMH_REGULATOR_MODE_HPM>;
- };
-
- vreg_l3c: ldo3 {
- regulator-name = "vreg_l3c";
- regulator-min-microvolt = <1100000>;
- regulator-max-microvolt = <1300000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- regulator-allow-set-load;
- regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
- RPMH_REGULATOR_MODE_HPM>;
- };
-
- vreg_l4c: ldo4 {
- regulator-name = "vreg_l4c";
- regulator-min-microvolt = <1200000>;
- regulator-max-microvolt = <1200000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- /*
- * FIXME: This should have regulator-allow-set-load but
- * we're getting an over-current fault from the PMIC
- * when switching to LPM.
- */
- };
-
- vreg_l5c: ldo5 {
- regulator-name = "vreg_l5c";
- regulator-min-microvolt = <1100000>;
- regulator-max-microvolt = <1300000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- regulator-allow-set-load;
- regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
- RPMH_REGULATOR_MODE_HPM>;
- };
-
- vreg_l6c: ldo6 {
- regulator-name = "vreg_l6c";
- regulator-min-microvolt = <1620000>;
- regulator-max-microvolt = <1980000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- regulator-allow-set-load;
- regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
- RPMH_REGULATOR_MODE_HPM>;
- };
-
- vreg_l7c: ldo7 {
- regulator-name = "vreg_l7c";
- regulator-min-microvolt = <1620000>;
- regulator-max-microvolt = <2000000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- regulator-allow-set-load;
- regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
- RPMH_REGULATOR_MODE_HPM>;
- };
-
- vreg_l8c: ldo8 {
- regulator-name = "vreg_l8c";
- regulator-min-microvolt = <2400000>;
- regulator-max-microvolt = <3300000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- regulator-allow-set-load;
- regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
- RPMH_REGULATOR_MODE_HPM>;
- };
-
- vreg_l9c: ldo9 {
- regulator-name = "vreg_l9c";
- regulator-min-microvolt = <1650000>;
- regulator-max-microvolt = <2700000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- regulator-allow-set-load;
- regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
- RPMH_REGULATOR_MODE_HPM>;
- };
- };
-
- regulators-2 {
- compatible = "qcom,pmm8654au-rpmh-regulators";
- qcom,pmic-id = "e";
-
- vreg_s4e: smps4 {
- regulator-name = "vreg_s4e";
- regulator-min-microvolt = <970000>;
- regulator-max-microvolt = <1520000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- };
-
- vreg_s7e: smps7 {
- regulator-name = "vreg_s7e";
- regulator-min-microvolt = <1010000>;
- regulator-max-microvolt = <1170000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- };
-
- vreg_s9e: smps9 {
- regulator-name = "vreg_s9e";
- regulator-min-microvolt = <300000>;
- regulator-max-microvolt = <570000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- };
-
- vreg_l6e: ldo6 {
- regulator-name = "vreg_l6e";
- regulator-min-microvolt = <1280000>;
- regulator-max-microvolt = <1450000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- regulator-allow-set-load;
- regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
- RPMH_REGULATOR_MODE_HPM>;
- };
-
- vreg_l8e: ldo8 {
- regulator-name = "vreg_l8e";
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1950000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- regulator-allow-set-load;
- regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
- RPMH_REGULATOR_MODE_HPM>;
- };
- };
};
&ethernet0 {
phy-mode = "sgmii";
- phy-handle = <&sgmii_phy0>;
-
- pinctrl-0 = <&ethernet0_default>;
- pinctrl-names = "default";
-
- snps,mtl-rx-config = <&mtl_rx_setup>;
- snps,mtl-tx-config = <&mtl_tx_setup>;
- snps,ps-speed = <1000>;
-
- status = "okay";
-
- mdio {
- compatible = "snps,dwmac-mdio";
- #address-cells = <1>;
- #size-cells = <0>;
-
- sgmii_phy0: phy@8 {
- compatible = "ethernet-phy-id0141.0dd4";
- reg = <0x8>;
- device_type = "ethernet-phy";
- interrupts-extended = <&tlmm 7 IRQ_TYPE_EDGE_FALLING>;
- reset-gpios = <&pmm8654au_2_gpios 8 GPIO_ACTIVE_LOW>;
- reset-assert-us = <11000>;
- reset-deassert-us = <70000>;
- };
-
- sgmii_phy1: phy@a {
- compatible = "ethernet-phy-id0141.0dd4";
- reg = <0xa>;
- device_type = "ethernet-phy";
- interrupts-extended = <&tlmm 26 IRQ_TYPE_EDGE_FALLING>;
- reset-gpios = <&pmm8654au_2_gpios 9 GPIO_ACTIVE_LOW>;
- reset-assert-us = <11000>;
- reset-deassert-us = <70000>;
- };
- };
-
- mtl_rx_setup: rx-queues-config {
- snps,rx-queues-to-use = <4>;
- snps,rx-sched-sp;
-
- queue0 {
- snps,dcb-algorithm;
- snps,map-to-dma-channel = <0x0>;
- snps,route-up;
- snps,priority = <0x1>;
- };
-
- queue1 {
- snps,dcb-algorithm;
- snps,map-to-dma-channel = <0x1>;
- snps,route-ptp;
- };
-
- queue2 {
- snps,avb-algorithm;
- snps,map-to-dma-channel = <0x2>;
- snps,route-avcp;
- };
-
- queue3 {
- snps,avb-algorithm;
- snps,map-to-dma-channel = <0x3>;
- snps,priority = <0xc>;
- };
- };
-
- mtl_tx_setup: tx-queues-config {
- snps,tx-queues-to-use = <4>;
- snps,tx-sched-sp;
-
- queue0 {
- snps,dcb-algorithm;
- };
-
- queue1 {
- snps,dcb-algorithm;
- };
-
- queue2 {
- snps,avb-algorithm;
- snps,send_slope = <0x1000>;
- snps,idle_slope = <0x1000>;
- snps,high_credit = <0x3e800>;
- snps,low_credit = <0xffc18000>;
- };
-
- queue3 {
- snps,avb-algorithm;
- snps,send_slope = <0x1000>;
- snps,idle_slope = <0x1000>;
- snps,high_credit = <0x3e800>;
- snps,low_credit = <0xffc18000>;
- };
- };
};
&ethernet1 {
phy-mode = "sgmii";
- phy-handle = <&sgmii_phy1>;
-
- snps,mtl-rx-config = <&mtl_rx_setup1>;
- snps,mtl-tx-config = <&mtl_tx_setup1>;
- snps,ps-speed = <1000>;
-
- status = "okay";
-
- mtl_rx_setup1: rx-queues-config {
- snps,rx-queues-to-use = <4>;
- snps,rx-sched-sp;
-
- queue0 {
- snps,dcb-algorithm;
- snps,map-to-dma-channel = <0x0>;
- snps,route-up;
- snps,priority = <0x1>;
- };
-
- queue1 {
- snps,dcb-algorithm;
- snps,map-to-dma-channel = <0x1>;
- snps,route-ptp;
- };
-
- queue2 {
- snps,avb-algorithm;
- snps,map-to-dma-channel = <0x2>;
- snps,route-avcp;
- };
-
- queue3 {
- snps,avb-algorithm;
- snps,map-to-dma-channel = <0x3>;
- snps,priority = <0xc>;
- };
- };
-
- mtl_tx_setup1: tx-queues-config {
- snps,tx-queues-to-use = <4>;
- snps,tx-sched-sp;
-
- queue0 {
- snps,dcb-algorithm;
- };
-
- queue1 {
- snps,dcb-algorithm;
- };
-
- queue2 {
- snps,avb-algorithm;
- snps,send_slope = <0x1000>;
- snps,idle_slope = <0x1000>;
- snps,high_credit = <0x3e800>;
- snps,low_credit = <0xffc18000>;
- };
-
- queue3 {
- snps,avb-algorithm;
- snps,send_slope = <0x1000>;
- snps,idle_slope = <0x1000>;
- snps,high_credit = <0x3e800>;
- snps,low_credit = <0xffc18000>;
- };
- };
-};
-
-&i2c11 {
- clock-frequency = <400000>;
- pinctrl-0 = <&qup_i2c11_default>;
- pinctrl-names = "default";
- status = "okay";
-};
-
-&i2c18 {
- clock-frequency = <400000>;
- pinctrl-0 = <&qup_i2c18_default>;
- pinctrl-names = "default";
- status = "okay";
-};
-
-&pmm8654au_0_gpios {
- gpio-line-names = "DS_EN",
- "POFF_COMPLETE",
- "UFS0_VER_ID",
- "FAST_POFF",
- "DBU1_PON_DONE",
- "AOSS_SLEEP",
- "CAM_DES0_EN",
- "CAM_DES1_EN",
- "CAM_DES2_EN",
- "CAM_DES3_EN",
- "UEFI",
- "ANALOG_PON_OPT";
-};
-
-&pmm8654au_0_pon_resin {
- linux,code = <KEY_VOLUMEDOWN>;
- status = "okay";
-};
-
-&pmm8654au_1_gpios {
- gpio-line-names = "PMIC_C_ID0",
- "PMIC_C_ID1",
- "UFS1_VER_ID",
- "IPA_PWR",
- "",
- "WLAN_DBU4_EN",
- "WLAN_EN",
- "BT_EN",
- "USB2_PWR_EN",
- "USB2_FAULT";
-
- usb2_en_state: usb2-en-state {
- pins = "gpio9";
- function = "normal";
- output-high;
- power-source = <0>;
- };
-};
-
-&pmm8654au_2_gpios {
- gpio-line-names = "PMIC_E_ID0",
- "PMIC_E_ID1",
- "USB0_PWR_EN",
- "USB0_FAULT",
- "SENSOR_IRQ_1",
- "SENSOR_IRQ_2",
- "SENSOR_RST",
- "SGMIIO0_RST",
- "SGMIIO1_RST",
- "USB1_PWR_ENABLE",
- "USB1_FAULT",
- "VMON_SPX8";
-
- usb0_en_state: usb0-en-state {
- pins = "gpio3";
- function = "normal";
- output-high;
- power-source = <0>;
- };
-
- usb1_en_state: usb1-en-state {
- pins = "gpio10";
- function = "normal";
- output-high;
- power-source = <0>;
- };
-};
-
-&pmm8654au_3_gpios {
- gpio-line-names = "PMIC_G_ID0",
- "PMIC_G_ID1",
- "GNSS_RST",
- "GNSS_EN",
- "GNSS_BOOT_MODE";
-};
-
-&qupv3_id_1 {
- status = "okay";
-};
-
-&qupv3_id_2 {
- status = "okay";
-};
-
-&serdes0 {
- phy-supply = <&vreg_l5a>;
- status = "okay";
-};
-
-&serdes1 {
- phy-supply = <&vreg_l5a>;
- status = "okay";
-};
-
-&sleep_clk {
- clock-frequency = <32764>;
-};
-
-&spi16 {
- pinctrl-0 = <&qup_spi16_default>;
- pinctrl-names = "default";
- status = "okay";
};
-&tlmm {
- ethernet0_default: ethernet0-default-state {
- ethernet0_mdc: ethernet0-mdc-pins {
- pins = "gpio8";
- function = "emac0_mdc";
- drive-strength = <16>;
- bias-pull-up;
- };
-
- ethernet0_mdio: ethernet0-mdio-pins {
- pins = "gpio9";
- function = "emac0_mdio";
- drive-strength = <16>;
- bias-pull-up;
- };
- };
-
- qup_uart10_default: qup-uart10-state {
- pins = "gpio46", "gpio47";
- function = "qup1_se3";
- };
-
- qup_spi16_default: qup-spi16-state {
- pins = "gpio86", "gpio87", "gpio88", "gpio89";
- function = "qup2_se2";
- drive-strength = <6>;
- bias-disable;
- };
-
- qup_i2c11_default: qup-i2c11-state {
- pins = "gpio48", "gpio49";
- function = "qup1_se4";
- drive-strength = <2>;
- bias-pull-up;
- };
-
- qup_i2c18_default: qup-i2c18-state {
- pins = "gpio95", "gpio96";
- function = "qup2_se4";
- drive-strength = <2>;
- bias-pull-up;
- };
-
- qup_uart12_default: qup-uart12-state {
- qup_uart12_cts: qup-uart12-cts-pins {
- pins = "gpio52";
- function = "qup1_se5";
- bias-disable;
- };
-
- qup_uart12_rts: qup-uart12-rts-pins {
- pins = "gpio53";
- function = "qup1_se5";
- bias-pull-down;
- };
-
- qup_uart12_tx: qup-uart12-tx-pins {
- pins = "gpio54";
- function = "qup1_se5";
- bias-pull-up;
- };
-
- qup_uart12_rx: qup-uart12-rx-pins {
- pins = "gpio55";
- function = "qup1_se5";
- bias-pull-down;
- };
- };
-
- qup_uart17_default: qup-uart17-state {
- qup_uart17_cts: qup-uart17-cts-pins {
- pins = "gpio91";
- function = "qup2_se3";
- bias-disable;
- };
-
- qup_uart17_rts: qup0-uart17-rts-pins {
- pins = "gpio92";
- function = "qup2_se3";
- bias-pull-down;
- };
-
- qup_uart17_tx: qup0-uart17-tx-pins {
- pins = "gpio93";
- function = "qup2_se3";
- bias-pull-up;
- };
-
- qup_uart17_rx: qup0-uart17-rx-pins {
- pins = "gpio94";
- function = "qup2_se3";
- bias-pull-down;
- };
- };
+&mdio {
+ compatible = "snps,dwmac-mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
- pcie0_default_state: pcie0-default-state {
- perst-pins {
- pins = "gpio2";
- function = "gpio";
- drive-strength = <2>;
- bias-pull-down;
- };
-
- clkreq-pins {
- pins = "gpio1";
- function = "pcie0_clkreq";
- drive-strength = <2>;
- bias-pull-up;
- };
-
- wake-pins {
- pins = "gpio0";
- function = "gpio";
- drive-strength = <2>;
- bias-pull-up;
- };
+ sgmii_phy0: phy@8 {
+ compatible = "ethernet-phy-id0141.0dd4";
+ reg = <0x8>;
+ device_type = "ethernet-phy";
+ interrupts-extended = <&tlmm 7 IRQ_TYPE_EDGE_FALLING>;
+ reset-gpios = <&pmm8654au_2_gpios 8 GPIO_ACTIVE_LOW>;
+ reset-assert-us = <11000>;
+ reset-deassert-us = <70000>;
};
- pcie1_default_state: pcie1-default-state {
- perst-pins {
- pins = "gpio4";
- function = "gpio";
- drive-strength = <2>;
- bias-pull-down;
- };
-
- clkreq-pins {
- pins = "gpio3";
- function = "pcie1_clkreq";
- drive-strength = <2>;
- bias-pull-up;
- };
-
- wake-pins {
- pins = "gpio5";
- function = "gpio";
- drive-strength = <2>;
- bias-pull-up;
- };
+ sgmii_phy1: phy@a {
+ compatible = "ethernet-phy-id0141.0dd4";
+ reg = <0xa>;
+ device_type = "ethernet-phy";
+ interrupts-extended = <&tlmm 26 IRQ_TYPE_EDGE_FALLING>;
+ reset-gpios = <&pmm8654au_2_gpios 9 GPIO_ACTIVE_LOW>;
+ reset-assert-us = <11000>;
+ reset-deassert-us = <70000>;
};
};
-
-&pcie0 {
- perst-gpios = <&tlmm 2 GPIO_ACTIVE_LOW>;
- wake-gpios = <&tlmm 0 GPIO_ACTIVE_HIGH>;
-
- pinctrl-names = "default";
- pinctrl-0 = <&pcie0_default_state>;
-
- status = "okay";
-};
-
-&pcie1 {
- perst-gpios = <&tlmm 4 GPIO_ACTIVE_LOW>;
- wake-gpios = <&tlmm 5 GPIO_ACTIVE_HIGH>;
-
- pinctrl-names = "default";
- pinctrl-0 = <&pcie1_default_state>;
-
- status = "okay";
-};
-
-&pcie0_phy {
- vdda-phy-supply = <&vreg_l5a>;
- vdda-pll-supply = <&vreg_l1c>;
-
- status = "okay";
-};
-
-&pcie1_phy {
- vdda-phy-supply = <&vreg_l5a>;
- vdda-pll-supply = <&vreg_l1c>;
-
- status = "okay";
-};
-
-&uart10 {
- compatible = "qcom,geni-debug-uart";
- pinctrl-0 = <&qup_uart10_default>;
- pinctrl-names = "default";
- status = "okay";
-};
-
-&uart12 {
- pinctrl-0 = <&qup_uart12_default>;
- pinctrl-names = "default";
- status = "okay";
-};
-
-&uart17 {
- pinctrl-0 = <&qup_uart17_default>;
- pinctrl-names = "default";
- status = "okay";
-};
-
-&ufs_mem_hc {
- reset-gpios = <&tlmm 149 GPIO_ACTIVE_LOW>;
- vcc-supply = <&vreg_l8a>;
- vcc-max-microamp = <1100000>;
- vccq-supply = <&vreg_l4c>;
- vccq-max-microamp = <1200000>;
-
- status = "okay";
-};
-
-&ufs_mem_phy {
- vdda-phy-supply = <&vreg_l4a>;
- vdda-pll-supply = <&vreg_l1c>;
-
- status = "okay";
-};
-
-&usb_0 {
- pinctrl-names = "default";
- pinctrl-0 = <&usb0_en_state>;
-
- status = "okay";
-};
-
-&usb_0_dwc3 {
- dr_mode = "peripheral";
-};
-
-&usb_0_hsphy {
- vdda-pll-supply = <&vreg_l7a>;
- vdda18-supply = <&vreg_l6c>;
- vdda33-supply = <&vreg_l9a>;
-
- status = "okay";
-};
-
-&usb_0_qmpphy {
- vdda-phy-supply = <&vreg_l1c>;
- vdda-pll-supply = <&vreg_l7a>;
-
- status = "okay";
-};
-
-&usb_1 {
- pinctrl-names = "default";
- pinctrl-0 = <&usb1_en_state>;
-
- status = "okay";
-};
-
-&usb_1_dwc3 {
- dr_mode = "host";
-};
-
-&usb_1_hsphy {
- vdda-pll-supply = <&vreg_l7a>;
- vdda18-supply = <&vreg_l6c>;
- vdda33-supply = <&vreg_l9a>;
-
- status = "okay";
-};
-
-&usb_1_qmpphy {
- vdda-phy-supply = <&vreg_l1c>;
- vdda-pll-supply = <&vreg_l7a>;
-
- status = "okay";
-};
-
-&usb_2 {
- pinctrl-names = "default";
- pinctrl-0 = <&usb2_en_state>;
-
- status = "okay";
-};
-
-&usb_2_dwc3 {
- dr_mode = "host";
-};
-
-&usb_2_hsphy {
- vdda-pll-supply = <&vreg_l7a>;
- vdda18-supply = <&vreg_l6c>;
- vdda33-supply = <&vreg_l9a>;
-
- status = "okay";
-};
-
-&xo_board_clk {
- clock-frequency = <38400000>;
-};
diff --git a/arch/arm64/boot/dts/qcom/sa8775p-ride.dtsi b/arch/arm64/boot/dts/qcom/sa8775p-ride.dtsi
new file mode 100644
index 000000000000..2a6170623ea9
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sa8775p-ride.dtsi
@@ -0,0 +1,814 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
+
+#include "sa8775p.dtsi"
+#include "sa8775p-pmics.dtsi"
+
+/ {
+ aliases {
+ ethernet0 = &ethernet0;
+ ethernet1 = &ethernet1;
+ i2c11 = &i2c11;
+ i2c18 = &i2c18;
+ serial0 = &uart10;
+ serial1 = &uart12;
+ serial2 = &uart17;
+ spi16 = &spi16;
+ ufshc1 = &ufs_mem_hc;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+};
+
+&apps_rsc {
+ regulators-0 {
+ compatible = "qcom,pmm8654au-rpmh-regulators";
+ qcom,pmic-id = "a";
+
+ vreg_s4a: smps4 {
+ regulator-name = "vreg_s4a";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1816000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_s5a: smps5 {
+ regulator-name = "vreg_s5a";
+ regulator-min-microvolt = <1850000>;
+ regulator-max-microvolt = <1996000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_s9a: smps9 {
+ regulator-name = "vreg_s9a";
+ regulator-min-microvolt = <535000>;
+ regulator-max-microvolt = <1120000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l4a: ldo4 {
+ regulator-name = "vreg_l4a";
+ regulator-min-microvolt = <788000>;
+ regulator-max-microvolt = <1050000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l5a: ldo5 {
+ regulator-name = "vreg_l5a";
+ regulator-min-microvolt = <870000>;
+ regulator-max-microvolt = <950000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l6a: ldo6 {
+ regulator-name = "vreg_l6a";
+ regulator-min-microvolt = <870000>;
+ regulator-max-microvolt = <970000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l7a: ldo7 {
+ regulator-name = "vreg_l7a";
+ regulator-min-microvolt = <720000>;
+ regulator-max-microvolt = <950000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l8a: ldo8 {
+ regulator-name = "vreg_l8a";
+ regulator-min-microvolt = <2504000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l9a: ldo9 {
+ regulator-name = "vreg_l9a";
+ regulator-min-microvolt = <2970000>;
+ regulator-max-microvolt = <3544000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ regulators-1 {
+ compatible = "qcom,pmm8654au-rpmh-regulators";
+ qcom,pmic-id = "c";
+
+ vreg_l1c: ldo1 {
+ regulator-name = "vreg_l1c";
+ regulator-min-microvolt = <1140000>;
+ regulator-max-microvolt = <1260000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2c: ldo2 {
+ regulator-name = "vreg_l2c";
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <1100000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l3c: ldo3 {
+ regulator-name = "vreg_l3c";
+ regulator-min-microvolt = <1100000>;
+ regulator-max-microvolt = <1300000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l4c: ldo4 {
+ regulator-name = "vreg_l4c";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ /*
+ * FIXME: This should have regulator-allow-set-load but
+ * we're getting an over-current fault from the PMIC
+ * when switching to LPM.
+ */
+ };
+
+ vreg_l5c: ldo5 {
+ regulator-name = "vreg_l5c";
+ regulator-min-microvolt = <1100000>;
+ regulator-max-microvolt = <1300000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l6c: ldo6 {
+ regulator-name = "vreg_l6c";
+ regulator-min-microvolt = <1620000>;
+ regulator-max-microvolt = <1980000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l7c: ldo7 {
+ regulator-name = "vreg_l7c";
+ regulator-min-microvolt = <1620000>;
+ regulator-max-microvolt = <2000000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l8c: ldo8 {
+ regulator-name = "vreg_l8c";
+ regulator-min-microvolt = <2400000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l9c: ldo9 {
+ regulator-name = "vreg_l9c";
+ regulator-min-microvolt = <1650000>;
+ regulator-max-microvolt = <2700000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ regulators-2 {
+ compatible = "qcom,pmm8654au-rpmh-regulators";
+ qcom,pmic-id = "e";
+
+ vreg_s4e: smps4 {
+ regulator-name = "vreg_s4e";
+ regulator-min-microvolt = <970000>;
+ regulator-max-microvolt = <1520000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_s7e: smps7 {
+ regulator-name = "vreg_s7e";
+ regulator-min-microvolt = <1010000>;
+ regulator-max-microvolt = <1170000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_s9e: smps9 {
+ regulator-name = "vreg_s9e";
+ regulator-min-microvolt = <300000>;
+ regulator-max-microvolt = <570000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l6e: ldo6 {
+ regulator-name = "vreg_l6e";
+ regulator-min-microvolt = <1280000>;
+ regulator-max-microvolt = <1450000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l8e: ldo8 {
+ regulator-name = "vreg_l8e";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1950000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+};
+
+&ethernet0 {
+ phy-handle = <&sgmii_phy0>;
+
+ pinctrl-0 = <&ethernet0_default>;
+ pinctrl-names = "default";
+
+ snps,mtl-rx-config = <&mtl_rx_setup>;
+ snps,mtl-tx-config = <&mtl_tx_setup>;
+ snps,ps-speed = <1000>;
+
+ status = "okay";
+
+ mdio: mdio {
+ compatible = "snps,dwmac-mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ mtl_rx_setup: rx-queues-config {
+ snps,rx-queues-to-use = <4>;
+ snps,rx-sched-sp;
+
+ queue0 {
+ snps,dcb-algorithm;
+ snps,map-to-dma-channel = <0x0>;
+ snps,route-up;
+ snps,priority = <0x1>;
+ };
+
+ queue1 {
+ snps,dcb-algorithm;
+ snps,map-to-dma-channel = <0x1>;
+ snps,route-ptp;
+ };
+
+ queue2 {
+ snps,avb-algorithm;
+ snps,map-to-dma-channel = <0x2>;
+ snps,route-avcp;
+ };
+
+ queue3 {
+ snps,avb-algorithm;
+ snps,map-to-dma-channel = <0x3>;
+ snps,priority = <0xc>;
+ };
+ };
+
+ mtl_tx_setup: tx-queues-config {
+ snps,tx-queues-to-use = <4>;
+ snps,tx-sched-sp;
+
+ queue0 {
+ snps,dcb-algorithm;
+ };
+
+ queue1 {
+ snps,dcb-algorithm;
+ };
+
+ queue2 {
+ snps,avb-algorithm;
+ snps,send_slope = <0x1000>;
+ snps,idle_slope = <0x1000>;
+ snps,high_credit = <0x3e800>;
+ snps,low_credit = <0xffc18000>;
+ };
+
+ queue3 {
+ snps,avb-algorithm;
+ snps,send_slope = <0x1000>;
+ snps,idle_slope = <0x1000>;
+ snps,high_credit = <0x3e800>;
+ snps,low_credit = <0xffc18000>;
+ };
+ };
+};
+
+&ethernet1 {
+ phy-handle = <&sgmii_phy1>;
+
+ snps,mtl-rx-config = <&mtl_rx_setup1>;
+ snps,mtl-tx-config = <&mtl_tx_setup1>;
+ snps,ps-speed = <1000>;
+
+ status = "okay";
+
+ mtl_rx_setup1: rx-queues-config {
+ snps,rx-queues-to-use = <4>;
+ snps,rx-sched-sp;
+
+ queue0 {
+ snps,dcb-algorithm;
+ snps,map-to-dma-channel = <0x0>;
+ snps,route-up;
+ snps,priority = <0x1>;
+ };
+
+ queue1 {
+ snps,dcb-algorithm;
+ snps,map-to-dma-channel = <0x1>;
+ snps,route-ptp;
+ };
+
+ queue2 {
+ snps,avb-algorithm;
+ snps,map-to-dma-channel = <0x2>;
+ snps,route-avcp;
+ };
+
+ queue3 {
+ snps,avb-algorithm;
+ snps,map-to-dma-channel = <0x3>;
+ snps,priority = <0xc>;
+ };
+ };
+
+ mtl_tx_setup1: tx-queues-config {
+ snps,tx-queues-to-use = <4>;
+ snps,tx-sched-sp;
+
+ queue0 {
+ snps,dcb-algorithm;
+ };
+
+ queue1 {
+ snps,dcb-algorithm;
+ };
+
+ queue2 {
+ snps,avb-algorithm;
+ snps,send_slope = <0x1000>;
+ snps,idle_slope = <0x1000>;
+ snps,high_credit = <0x3e800>;
+ snps,low_credit = <0xffc18000>;
+ };
+
+ queue3 {
+ snps,avb-algorithm;
+ snps,send_slope = <0x1000>;
+ snps,idle_slope = <0x1000>;
+ snps,high_credit = <0x3e800>;
+ snps,low_credit = <0xffc18000>;
+ };
+ };
+};
+
+&i2c11 {
+ clock-frequency = <400000>;
+ pinctrl-0 = <&qup_i2c11_default>;
+ pinctrl-names = "default";
+ status = "okay";
+};
+
+&i2c18 {
+ clock-frequency = <400000>;
+ pinctrl-0 = <&qup_i2c18_default>;
+ pinctrl-names = "default";
+ status = "okay";
+};
+
+&pmm8654au_0_gpios {
+ gpio-line-names = "DS_EN",
+ "POFF_COMPLETE",
+ "UFS0_VER_ID",
+ "FAST_POFF",
+ "DBU1_PON_DONE",
+ "AOSS_SLEEP",
+ "CAM_DES0_EN",
+ "CAM_DES1_EN",
+ "CAM_DES2_EN",
+ "CAM_DES3_EN",
+ "UEFI",
+ "ANALOG_PON_OPT";
+};
+
+&pmm8654au_0_pon_resin {
+ linux,code = <KEY_VOLUMEDOWN>;
+ status = "okay";
+};
+
+&pmm8654au_1_gpios {
+ gpio-line-names = "PMIC_C_ID0",
+ "PMIC_C_ID1",
+ "UFS1_VER_ID",
+ "IPA_PWR",
+ "",
+ "WLAN_DBU4_EN",
+ "WLAN_EN",
+ "BT_EN",
+ "USB2_PWR_EN",
+ "USB2_FAULT";
+
+ usb2_en_state: usb2-en-state {
+ pins = "gpio9";
+ function = "normal";
+ output-high;
+ power-source = <0>;
+ };
+};
+
+&pmm8654au_2_gpios {
+ gpio-line-names = "PMIC_E_ID0",
+ "PMIC_E_ID1",
+ "USB0_PWR_EN",
+ "USB0_FAULT",
+ "SENSOR_IRQ_1",
+ "SENSOR_IRQ_2",
+ "SENSOR_RST",
+ "SGMIIO0_RST",
+ "SGMIIO1_RST",
+ "USB1_PWR_ENABLE",
+ "USB1_FAULT",
+ "VMON_SPX8";
+
+ usb0_en_state: usb0-en-state {
+ pins = "gpio3";
+ function = "normal";
+ output-high;
+ power-source = <0>;
+ };
+
+ usb1_en_state: usb1-en-state {
+ pins = "gpio10";
+ function = "normal";
+ output-high;
+ power-source = <0>;
+ };
+};
+
+&pmm8654au_3_gpios {
+ gpio-line-names = "PMIC_G_ID0",
+ "PMIC_G_ID1",
+ "GNSS_RST",
+ "GNSS_EN",
+ "GNSS_BOOT_MODE";
+};
+
+&qupv3_id_1 {
+ status = "okay";
+};
+
+&qupv3_id_2 {
+ status = "okay";
+};
+
+&serdes0 {
+ phy-supply = <&vreg_l5a>;
+ status = "okay";
+};
+
+&serdes1 {
+ phy-supply = <&vreg_l5a>;
+ status = "okay";
+};
+
+&sleep_clk {
+ clock-frequency = <32764>;
+};
+
+&spi16 {
+ pinctrl-0 = <&qup_spi16_default>;
+ pinctrl-names = "default";
+ status = "okay";
+};
+
+&tlmm {
+ ethernet0_default: ethernet0-default-state {
+ ethernet0_mdc: ethernet0-mdc-pins {
+ pins = "gpio8";
+ function = "emac0_mdc";
+ drive-strength = <16>;
+ bias-pull-up;
+ };
+
+ ethernet0_mdio: ethernet0-mdio-pins {
+ pins = "gpio9";
+ function = "emac0_mdio";
+ drive-strength = <16>;
+ bias-pull-up;
+ };
+ };
+
+ qup_uart10_default: qup-uart10-state {
+ pins = "gpio46", "gpio47";
+ function = "qup1_se3";
+ };
+
+ qup_spi16_default: qup-spi16-state {
+ pins = "gpio86", "gpio87", "gpio88", "gpio89";
+ function = "qup2_se2";
+ drive-strength = <6>;
+ bias-disable;
+ };
+
+ qup_i2c11_default: qup-i2c11-state {
+ pins = "gpio48", "gpio49";
+ function = "qup1_se4";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ qup_i2c18_default: qup-i2c18-state {
+ pins = "gpio95", "gpio96";
+ function = "qup2_se4";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ qup_uart12_default: qup-uart12-state {
+ qup_uart12_cts: qup-uart12-cts-pins {
+ pins = "gpio52";
+ function = "qup1_se5";
+ bias-disable;
+ };
+
+ qup_uart12_rts: qup-uart12-rts-pins {
+ pins = "gpio53";
+ function = "qup1_se5";
+ bias-pull-down;
+ };
+
+ qup_uart12_tx: qup-uart12-tx-pins {
+ pins = "gpio54";
+ function = "qup1_se5";
+ bias-pull-up;
+ };
+
+ qup_uart12_rx: qup-uart12-rx-pins {
+ pins = "gpio55";
+ function = "qup1_se5";
+ bias-pull-down;
+ };
+ };
+
+ qup_uart17_default: qup-uart17-state {
+ qup_uart17_cts: qup-uart17-cts-pins {
+ pins = "gpio91";
+ function = "qup2_se3";
+ bias-disable;
+ };
+
+ qup_uart17_rts: qup0-uart17-rts-pins {
+ pins = "gpio92";
+ function = "qup2_se3";
+ bias-pull-down;
+ };
+
+ qup_uart17_tx: qup0-uart17-tx-pins {
+ pins = "gpio93";
+ function = "qup2_se3";
+ bias-pull-up;
+ };
+
+ qup_uart17_rx: qup0-uart17-rx-pins {
+ pins = "gpio94";
+ function = "qup2_se3";
+ bias-pull-down;
+ };
+ };
+
+ pcie0_default_state: pcie0-default-state {
+ perst-pins {
+ pins = "gpio2";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+
+ clkreq-pins {
+ pins = "gpio1";
+ function = "pcie0_clkreq";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ wake-pins {
+ pins = "gpio0";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+
+ pcie1_default_state: pcie1-default-state {
+ perst-pins {
+ pins = "gpio4";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+
+ clkreq-pins {
+ pins = "gpio3";
+ function = "pcie1_clkreq";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ wake-pins {
+ pins = "gpio5";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+};
+
+&pcie0 {
+ perst-gpios = <&tlmm 2 GPIO_ACTIVE_LOW>;
+ wake-gpios = <&tlmm 0 GPIO_ACTIVE_HIGH>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie0_default_state>;
+
+ status = "okay";
+};
+
+&pcie1 {
+ perst-gpios = <&tlmm 4 GPIO_ACTIVE_LOW>;
+ wake-gpios = <&tlmm 5 GPIO_ACTIVE_HIGH>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie1_default_state>;
+
+ status = "okay";
+};
+
+&pcie0_phy {
+ vdda-phy-supply = <&vreg_l5a>;
+ vdda-pll-supply = <&vreg_l1c>;
+
+ status = "okay";
+};
+
+&pcie1_phy {
+ vdda-phy-supply = <&vreg_l5a>;
+ vdda-pll-supply = <&vreg_l1c>;
+
+ status = "okay";
+};
+
+&uart10 {
+ compatible = "qcom,geni-debug-uart";
+ pinctrl-0 = <&qup_uart10_default>;
+ pinctrl-names = "default";
+ status = "okay";
+};
+
+&uart12 {
+ pinctrl-0 = <&qup_uart12_default>;
+ pinctrl-names = "default";
+ status = "okay";
+};
+
+&uart17 {
+ pinctrl-0 = <&qup_uart17_default>;
+ pinctrl-names = "default";
+ status = "okay";
+};
+
+&ufs_mem_hc {
+ reset-gpios = <&tlmm 149 GPIO_ACTIVE_LOW>;
+ vcc-supply = <&vreg_l8a>;
+ vcc-max-microamp = <1100000>;
+ vccq-supply = <&vreg_l4c>;
+ vccq-max-microamp = <1200000>;
+
+ status = "okay";
+};
+
+&ufs_mem_phy {
+ vdda-phy-supply = <&vreg_l4a>;
+ vdda-pll-supply = <&vreg_l1c>;
+
+ status = "okay";
+};
+
+&usb_0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb0_en_state>;
+
+ status = "okay";
+};
+
+&usb_0_dwc3 {
+ dr_mode = "peripheral";
+};
+
+&usb_0_hsphy {
+ vdda-pll-supply = <&vreg_l7a>;
+ vdda18-supply = <&vreg_l6c>;
+ vdda33-supply = <&vreg_l9a>;
+
+ status = "okay";
+};
+
+&usb_0_qmpphy {
+ vdda-phy-supply = <&vreg_l1c>;
+ vdda-pll-supply = <&vreg_l7a>;
+
+ status = "okay";
+};
+
+&usb_1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb1_en_state>;
+
+ status = "okay";
+};
+
+&usb_1_dwc3 {
+ dr_mode = "host";
+};
+
+&usb_1_hsphy {
+ vdda-pll-supply = <&vreg_l7a>;
+ vdda18-supply = <&vreg_l6c>;
+ vdda33-supply = <&vreg_l9a>;
+
+ status = "okay";
+};
+
+&usb_1_qmpphy {
+ vdda-phy-supply = <&vreg_l1c>;
+ vdda-pll-supply = <&vreg_l7a>;
+
+ status = "okay";
+};
+
+&usb_2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb2_en_state>;
+
+ status = "okay";
+};
+
+&usb_2_dwc3 {
+ dr_mode = "host";
+};
+
+&usb_2_hsphy {
+ vdda-pll-supply = <&vreg_l7a>;
+ vdda18-supply = <&vreg_l6c>;
+ vdda33-supply = <&vreg_l9a>;
+
+ status = "okay";
+};
+
+&xo_board_clk {
+ clock-frequency = <38400000>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sa8775p.dtsi b/arch/arm64/boot/dts/qcom/sa8775p.dtsi
index 31de73594839..23f1b2e5e624 100644
--- a/arch/arm64/boot/dts/qcom/sa8775p.dtsi
+++ b/arch/arm64/boot/dts/qcom/sa8775p.dtsi
@@ -205,9 +205,23 @@
};
};
+ dummy-sink {
+ compatible = "arm,coresight-dummy-sink";
+
+ in-ports {
+ port {
+ eud_in: endpoint {
+ remote-endpoint =
+ <&swao_rep_out1>;
+ };
+ };
+ };
+ };
+
firmware {
scm {
compatible = "qcom,scm-sa8775p", "qcom,scm";
+ memory-region = <&tz_ffi_mem>;
};
};
@@ -418,6 +432,12 @@
no-map;
};
+ tz_ffi_mem: tz-ffi@91c00000 {
+ compatible = "shared-dma-pool";
+ reg = <0x0 0x91c00000 0x0 0x1400000>;
+ no-map;
+ };
+
lpass_machine_learning_mem: lpass-machine-learning@93b00000 {
reg = <0x0 0x93b00000 0x0 0xf00000>;
no-map;
@@ -1644,6 +1664,919 @@
clocks = <&gcc GCC_UFS_PHY_ICE_CORE_CLK>;
};
+ stm: stm@4002000 {
+ compatible = "arm,coresight-stm", "arm,primecell";
+ reg = <0x0 0x4002000 0x0 0x1000>,
+ <0x0 0x16280000 0x0 0x180000>;
+ reg-names = "stm-base", "stm-stimulus-base";
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ out-ports {
+ port {
+ stm_out: endpoint {
+ remote-endpoint =
+ <&funnel0_in7>;
+ };
+ };
+ };
+ };
+
+ tpdm@4003000 {
+ compatible = "qcom,coresight-tpdm", "arm,primecell";
+ reg = <0x0 0x4003000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ qcom,cmb-element-bits = <32>;
+ qcom,cmb-msrs-num = <32>;
+
+ out-ports {
+ port {
+ qdss_tpdm0_out: endpoint {
+ remote-endpoint =
+ <&qdss_tpda_in0>;
+ };
+ };
+ };
+ };
+
+ tpda@4004000 {
+ compatible = "qcom,coresight-tpda", "arm,primecell";
+ reg = <0x0 0x4004000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ out-ports {
+ port {
+ qdss_tpda_out: endpoint {
+ remote-endpoint =
+ <&funnel0_in6>;
+ };
+ };
+ };
+
+ in-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ qdss_tpda_in0: endpoint {
+ remote-endpoint =
+ <&qdss_tpdm0_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ qdss_tpda_in1: endpoint {
+ remote-endpoint =
+ <&qdss_tpdm1_out>;
+ };
+ };
+ };
+ };
+
+ tpdm@400f000 {
+ compatible = "qcom,coresight-tpdm", "arm,primecell";
+ reg = <0x0 0x400f000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ qcom,cmb-element-bits = <32>;
+ qcom,cmb-msrs-num = <32>;
+
+ out-ports {
+ port {
+ qdss_tpdm1_out: endpoint {
+ remote-endpoint =
+ <&qdss_tpda_in1>;
+ };
+ };
+ };
+ };
+
+ funnel@4041000 {
+ compatible = "arm,coresight-dynamic-funnel", "arm,primecell";
+ reg = <0x0 0x4041000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ out-ports {
+ port {
+ funnel0_out: endpoint {
+ remote-endpoint =
+ <&qdss_funnel_in0>;
+ };
+ };
+ };
+
+ in-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@6 {
+ reg = <6>;
+ funnel0_in6: endpoint {
+ remote-endpoint =
+ <&qdss_tpda_out>;
+ };
+ };
+
+ port@7 {
+ reg = <7>;
+ funnel0_in7: endpoint {
+ remote-endpoint =
+ <&stm_out>;
+ };
+ };
+ };
+ };
+
+ funnel@4042000 {
+ compatible = "arm,coresight-dynamic-funnel", "arm,primecell";
+ reg = <0x0 0x4042000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ out-ports {
+ port {
+ funnel1_out: endpoint {
+ remote-endpoint =
+ <&qdss_funnel_in1>;
+ };
+ };
+ };
+
+ in-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@4 {
+ reg = <4>;
+ funnel1_in4: endpoint {
+ remote-endpoint =
+ <&apss_funnel1_out>;
+ };
+ };
+ };
+ };
+
+ funnel@4045000 {
+ compatible = "arm,coresight-dynamic-funnel", "arm,primecell";
+ reg = <0x0 0x4045000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ out-ports {
+ port {
+ qdss_funnel_out: endpoint {
+ remote-endpoint =
+ <&aoss_funnel_in7>;
+ };
+ };
+ };
+
+ in-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ qdss_funnel_in0: endpoint {
+ remote-endpoint =
+ <&funnel0_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ qdss_funnel_in1: endpoint {
+ remote-endpoint =
+ <&funnel1_out>;
+ };
+ };
+ };
+ };
+
+ funnel@4b04000 {
+ compatible = "arm,coresight-dynamic-funnel", "arm,primecell";
+ reg = <0x0 0x4b04000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ out-ports {
+ port {
+ aoss_funnel_out: endpoint {
+ remote-endpoint =
+ <&etf0_in>;
+ };
+ };
+ };
+
+ in-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@6 {
+ reg = <6>;
+ aoss_funnel_in6: endpoint {
+ remote-endpoint =
+ <&aoss_tpda_out>;
+ };
+ };
+
+ port@7 {
+ reg = <7>;
+ aoss_funnel_in7: endpoint {
+ remote-endpoint =
+ <&qdss_funnel_out>;
+ };
+ };
+ };
+ };
+
+ tmc_etf: tmc@4b05000 {
+ compatible = "arm,coresight-tmc", "arm,primecell";
+ reg = <0x0 0x4b05000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ out-ports {
+ port {
+ etf0_out: endpoint {
+ remote-endpoint =
+ <&swao_rep_in>;
+ };
+ };
+ };
+
+ in-ports {
+ port {
+ etf0_in: endpoint {
+ remote-endpoint =
+ <&aoss_funnel_out>;
+ };
+ };
+ };
+ };
+
+ replicator@4b06000 {
+ compatible = "arm,coresight-dynamic-replicator", "arm,primecell";
+ reg = <0x0 0x4b06000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ out-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@1 {
+ reg = <1>;
+ swao_rep_out1: endpoint {
+ remote-endpoint =
+ <&eud_in>;
+ };
+ };
+ };
+
+ in-ports {
+ port {
+ swao_rep_in: endpoint {
+ remote-endpoint =
+ <&etf0_out>;
+ };
+ };
+ };
+ };
+
+ tpda@4b08000 {
+ compatible = "qcom,coresight-tpda", "arm,primecell";
+ reg = <0x0 0x4b08000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ out-ports {
+ port {
+ aoss_tpda_out: endpoint {
+ remote-endpoint =
+ <&aoss_funnel_in6>;
+ };
+ };
+ };
+
+ in-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ aoss_tpda_in0: endpoint {
+ remote-endpoint =
+ <&aoss_tpdm0_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ aoss_tpda_in1: endpoint {
+ remote-endpoint =
+ <&aoss_tpdm1_out>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+ aoss_tpda_in2: endpoint {
+ remote-endpoint =
+ <&aoss_tpdm2_out>;
+ };
+ };
+
+ port@3 {
+ reg = <3>;
+ aoss_tpda_in3: endpoint {
+ remote-endpoint =
+ <&aoss_tpdm3_out>;
+ };
+ };
+
+ port@4 {
+ reg = <4>;
+ aoss_tpda_in4: endpoint {
+ remote-endpoint =
+ <&aoss_tpdm4_out>;
+ };
+ };
+ };
+ };
+
+ tpdm@4b09000 {
+ compatible = "qcom,coresight-tpdm", "arm,primecell";
+ reg = <0x0 0x4b09000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ qcom,cmb-element-bits = <64>;
+ qcom,cmb-msrs-num = <32>;
+
+ out-ports {
+ port {
+ aoss_tpdm0_out: endpoint {
+ remote-endpoint =
+ <&aoss_tpda_in0>;
+ };
+ };
+ };
+ };
+
+ tpdm@4b0a000 {
+ compatible = "qcom,coresight-tpdm", "arm,primecell";
+ reg = <0x0 0x4b0a000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ qcom,cmb-element-bits = <64>;
+ qcom,cmb-msrs-num = <32>;
+
+ out-ports {
+ port {
+ aoss_tpdm1_out: endpoint {
+ remote-endpoint =
+ <&aoss_tpda_in1>;
+ };
+ };
+ };
+ };
+
+ tpdm@4b0b000 {
+ compatible = "qcom,coresight-tpdm", "arm,primecell";
+ reg = <0x0 0x4b0b000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ qcom,cmb-element-bits = <64>;
+ qcom,cmb-msrs-num = <32>;
+
+ out-ports {
+ port {
+ aoss_tpdm2_out: endpoint {
+ remote-endpoint =
+ <&aoss_tpda_in2>;
+ };
+ };
+ };
+ };
+
+ tpdm@4b0c000 {
+ compatible = "qcom,coresight-tpdm", "arm,primecell";
+ reg = <0x0 0x4b0c000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ qcom,cmb-element-bits = <64>;
+ qcom,cmb-msrs-num = <32>;
+
+ out-ports {
+ port {
+ aoss_tpdm3_out: endpoint {
+ remote-endpoint =
+ <&aoss_tpda_in3>;
+ };
+ };
+ };
+ };
+
+ tpdm@4b0d000 {
+ compatible = "qcom,coresight-tpdm", "arm,primecell";
+ reg = <0x0 0x4b0d000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ qcom,dsb-element-bits = <32>;
+ qcom,dsb-msrs-num = <32>;
+
+ out-ports {
+ port {
+ aoss_tpdm4_out: endpoint {
+ remote-endpoint =
+ <&aoss_tpda_in4>;
+ };
+ };
+ };
+ };
+
+ aoss_cti: cti@4b13000 {
+ compatible = "arm,coresight-cti", "arm,primecell";
+ reg = <0x0 0x4b13000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+ };
+
+ etm@6040000 {
+ compatible = "arm,primecell";
+ reg = <0x0 0x6040000 0x0 0x1000>;
+ cpu = <&CPU0>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+ arm,coresight-loses-context-with-cpu;
+ qcom,skip-power-up;
+
+ out-ports {
+ port {
+ etm0_out: endpoint {
+ remote-endpoint =
+ <&apss_funnel0_in0>;
+ };
+ };
+ };
+ };
+
+ etm@6140000 {
+ compatible = "arm,primecell";
+ reg = <0x0 0x6140000 0x0 0x1000>;
+ cpu = <&CPU1>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+ arm,coresight-loses-context-with-cpu;
+ qcom,skip-power-up;
+
+ out-ports {
+ port {
+ etm1_out: endpoint {
+ remote-endpoint =
+ <&apss_funnel0_in1>;
+ };
+ };
+ };
+ };
+
+ etm@6240000 {
+ compatible = "arm,primecell";
+ reg = <0x0 0x6240000 0x0 0x1000>;
+ cpu = <&CPU2>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+ arm,coresight-loses-context-with-cpu;
+ qcom,skip-power-up;
+
+ out-ports {
+ port {
+ etm2_out: endpoint {
+ remote-endpoint =
+ <&apss_funnel0_in2>;
+ };
+ };
+ };
+ };
+
+ etm@6340000 {
+ compatible = "arm,primecell";
+ reg = <0x0 0x6340000 0x0 0x1000>;
+ cpu = <&CPU3>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+ arm,coresight-loses-context-with-cpu;
+ qcom,skip-power-up;
+
+ out-ports {
+ port {
+ etm3_out: endpoint {
+ remote-endpoint =
+ <&apss_funnel0_in3>;
+ };
+ };
+ };
+ };
+
+ etm@6440000 {
+ compatible = "arm,primecell";
+ reg = <0x0 0x6440000 0x0 0x1000>;
+ cpu = <&CPU4>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+ arm,coresight-loses-context-with-cpu;
+ qcom,skip-power-up;
+
+ out-ports {
+ port {
+ etm4_out: endpoint {
+ remote-endpoint =
+ <&apss_funnel0_in4>;
+ };
+ };
+ };
+ };
+
+ etm@6540000 {
+ compatible = "arm,primecell";
+ reg = <0x0 0x6540000 0x0 0x1000>;
+ cpu = <&CPU5>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+ arm,coresight-loses-context-with-cpu;
+ qcom,skip-power-up;
+
+ out-ports {
+ port {
+ etm5_out: endpoint {
+ remote-endpoint =
+ <&apss_funnel0_in5>;
+ };
+ };
+ };
+ };
+
+ etm@6640000 {
+ compatible = "arm,primecell";
+ reg = <0x0 0x6640000 0x0 0x1000>;
+ cpu = <&CPU6>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+ arm,coresight-loses-context-with-cpu;
+ qcom,skip-power-up;
+
+ out-ports {
+ port {
+ etm6_out: endpoint {
+ remote-endpoint =
+ <&apss_funnel0_in6>;
+ };
+ };
+ };
+ };
+
+ etm@6740000 {
+ compatible = "arm,primecell";
+ reg = <0x0 0x6740000 0x0 0x1000>;
+ cpu = <&CPU7>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+ arm,coresight-loses-context-with-cpu;
+ qcom,skip-power-up;
+
+ out-ports {
+ port {
+ etm7_out: endpoint {
+ remote-endpoint =
+ <&apss_funnel0_in7>;
+ };
+ };
+ };
+ };
+
+ funnel@6800000 {
+ compatible = "arm,coresight-dynamic-funnel", "arm,primecell";
+ reg = <0x0 0x6800000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ out-ports {
+ port {
+ apss_funnel0_out: endpoint {
+ remote-endpoint =
+ <&apss_funnel1_in0>;
+ };
+ };
+ };
+
+ in-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ apss_funnel0_in0: endpoint {
+ remote-endpoint =
+ <&etm0_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ apss_funnel0_in1: endpoint {
+ remote-endpoint =
+ <&etm1_out>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+ apss_funnel0_in2: endpoint {
+ remote-endpoint =
+ <&etm2_out>;
+ };
+ };
+
+ port@3 {
+ reg = <3>;
+ apss_funnel0_in3: endpoint {
+ remote-endpoint =
+ <&etm3_out>;
+ };
+ };
+
+ port@4 {
+ reg = <4>;
+ apss_funnel0_in4: endpoint {
+ remote-endpoint =
+ <&etm4_out>;
+ };
+ };
+
+ port@5 {
+ reg = <5>;
+ apss_funnel0_in5: endpoint {
+ remote-endpoint =
+ <&etm5_out>;
+ };
+ };
+
+ port@6 {
+ reg = <6>;
+ apss_funnel0_in6: endpoint {
+ remote-endpoint =
+ <&etm6_out>;
+ };
+ };
+
+ port@7 {
+ reg = <7>;
+ apss_funnel0_in7: endpoint {
+ remote-endpoint =
+ <&etm7_out>;
+ };
+ };
+ };
+ };
+
+ funnel@6810000 {
+ compatible = "arm,coresight-dynamic-funnel", "arm,primecell";
+ reg = <0x0 0x6810000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ out-ports {
+ port {
+ apss_funnel1_out: endpoint {
+ remote-endpoint =
+ <&funnel1_in4>;
+ };
+ };
+ };
+
+ in-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ apss_funnel1_in0: endpoint {
+ remote-endpoint =
+ <&apss_funnel0_out>;
+ };
+ };
+
+ port@3 {
+ reg = <3>;
+ apss_funnel1_in3: endpoint {
+ remote-endpoint =
+ <&apss_tpda_out>;
+ };
+ };
+ };
+ };
+
+ tpdm@6860000 {
+ compatible = "qcom,coresight-tpdm", "arm,primecell";
+ reg = <0x0 0x6860000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ qcom,cmb-element-bits = <64>;
+ qcom,cmb-msrs-num = <32>;
+
+ out-ports {
+ port {
+ apss_tpdm3_out: endpoint {
+ remote-endpoint =
+ <&apss_tpda_in3>;
+ };
+ };
+ };
+ };
+
+ tpdm@6861000 {
+ compatible = "qcom,coresight-tpdm", "arm,primecell";
+ reg = <0x0 0x6861000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ qcom,dsb-element-bits = <32>;
+ qcom,dsb-msrs-num = <32>;
+
+ out-ports {
+ port {
+ apss_tpdm4_out: endpoint {
+ remote-endpoint =
+ <&apss_tpda_in4>;
+ };
+ };
+ };
+ };
+
+ tpda@6863000 {
+ compatible = "qcom,coresight-tpda", "arm,primecell";
+ reg = <0x0 0x6863000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ out-ports {
+ port {
+ apss_tpda_out: endpoint {
+ remote-endpoint =
+ <&apss_funnel1_in3>;
+ };
+ };
+ };
+
+ in-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ apss_tpda_in0: endpoint {
+ remote-endpoint =
+ <&apss_tpdm0_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ apss_tpda_in1: endpoint {
+ remote-endpoint =
+ <&apss_tpdm1_out>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+ apss_tpda_in2: endpoint {
+ remote-endpoint =
+ <&apss_tpdm2_out>;
+ };
+ };
+
+ port@3 {
+ reg = <3>;
+ apss_tpda_in3: endpoint {
+ remote-endpoint =
+ <&apss_tpdm3_out>;
+ };
+ };
+
+ port@4 {
+ reg = <4>;
+ apss_tpda_in4: endpoint {
+ remote-endpoint =
+ <&apss_tpdm4_out>;
+ };
+ };
+ };
+ };
+
+ tpdm@68a0000 {
+ compatible = "qcom,coresight-tpdm", "arm,primecell";
+ reg = <0x0 0x68a0000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ qcom,cmb-element-bits = <32>;
+ qcom,cmb-msrs-num = <32>;
+
+ out-ports {
+ port {
+ apss_tpdm0_out: endpoint {
+ remote-endpoint =
+ <&apss_tpda_in0>;
+ };
+ };
+ };
+ };
+
+ tpdm@68b0000 {
+ compatible = "qcom,coresight-tpdm", "arm,primecell";
+ reg = <0x0 0x68b0000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ qcom,cmb-element-bits = <32>;
+ qcom,cmb-msrs-num = <32>;
+
+ out-ports {
+ port {
+ apss_tpdm1_out: endpoint {
+ remote-endpoint =
+ <&apss_tpda_in1>;
+ };
+ };
+ };
+ };
+
+ tpdm@68c0000 {
+ compatible = "qcom,coresight-tpdm", "arm,primecell";
+ reg = <0x0 0x68c0000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ qcom,dsb-element-bits = <32>;
+ qcom,dsb-msrs-num = <32>;
+
+ out-ports {
+ port {
+ apss_tpdm2_out: endpoint {
+ remote-endpoint =
+ <&apss_tpda_in2>;
+ };
+ };
+ };
+ };
+
usb_0_hsphy: phy@88e4000 {
compatible = "qcom,sa8775p-usb-hs-phy",
"qcom,usb-snps-hs-5nm-phy";
@@ -1959,6 +2892,25 @@
status = "disabled";
};
+ llcc: system-cache-controller@9200000 {
+ compatible = "qcom,sa8775p-llcc";
+ reg = <0x0 0x09200000 0x0 0x80000>,
+ <0x0 0x09300000 0x0 0x80000>,
+ <0x0 0x09400000 0x0 0x80000>,
+ <0x0 0x09500000 0x0 0x80000>,
+ <0x0 0x09600000 0x0 0x80000>,
+ <0x0 0x09700000 0x0 0x80000>,
+ <0x0 0x09a00000 0x0 0x80000>;
+ reg-names = "llcc0_base",
+ "llcc1_base",
+ "llcc2_base",
+ "llcc3_base",
+ "llcc4_base",
+ "llcc5_base",
+ "llcc_broadcast_base";
+ interrupts = <GIC_SPI 580 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
pdc: interrupt-controller@b220000 {
compatible = "qcom,sa8775p-pdc", "qcom,pdc";
reg = <0x0 0x0b220000 0x0 0x30000>,
@@ -2099,6 +3051,20 @@
wakeup-parent = <&pdc>;
};
+ sram: sram@146d8000 {
+ compatible = "qcom,sa8775p-imem", "syscon", "simple-mfd";
+ reg = <0x0 0x146d8000 0x0 0x1000>;
+ ranges = <0x0 0x0 0x146d8000 0x1000>;
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ pil-reloc@94c {
+ compatible = "qcom,pil-reloc-info";
+ reg = <0x94c 0xc8>;
+ };
+ };
+
apps_smmu: iommu@15000000 {
compatible = "qcom,sa8775p-smmu-500", "qcom,smmu-500", "arm,mmu-500";
reg = <0x0 0x15000000 0x0 0x100000>;
@@ -2504,6 +3470,7 @@
phy-names = "serdes";
iommus = <&apps_smmu 0x140 0xf>;
+ dma-coherent;
snps,tso;
snps,pbl = <32>;
@@ -2538,6 +3505,7 @@
phy-names = "serdes";
iommus = <&apps_smmu 0x120 0xf>;
+ dma-coherent;
snps,tso;
snps,pbl = <32>;
@@ -2550,9 +3518,6 @@
thermal-zones {
aoss-0-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
-
thermal-sensors = <&tsens0 0>;
trips {
@@ -2572,7 +3537,6 @@
cpu-0-0-0-thermal {
polling-delay-passive = <10>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 1>;
@@ -2593,7 +3557,6 @@
cpu-0-1-0-thermal {
polling-delay-passive = <10>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 2>;
@@ -2614,7 +3577,6 @@
cpu-0-2-0-thermal {
polling-delay-passive = <10>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 3>;
@@ -2635,7 +3597,6 @@
cpu-0-3-0-thermal {
polling-delay-passive = <10>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 4>;
@@ -2656,7 +3617,6 @@
gpuss-0-thermal {
polling-delay-passive = <10>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 5>;
@@ -2677,7 +3637,6 @@
gpuss-1-thermal {
polling-delay-passive = <10>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 6>;
@@ -2698,7 +3657,6 @@
gpuss-2-thermal {
polling-delay-passive = <10>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 7>;
@@ -2718,9 +3676,6 @@
};
audio-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
-
thermal-sensors = <&tsens0 8>;
trips {
@@ -2739,9 +3694,6 @@
};
camss-0-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
-
thermal-sensors = <&tsens0 9>;
trips {
@@ -2760,9 +3712,6 @@
};
pcie-0-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
-
thermal-sensors = <&tsens0 10>;
trips {
@@ -2781,9 +3730,6 @@
};
cpuss-0-0-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
-
thermal-sensors = <&tsens0 11>;
trips {
@@ -2802,9 +3748,6 @@
};
aoss-1-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
-
thermal-sensors = <&tsens1 0>;
trips {
@@ -2824,7 +3767,6 @@
cpu-0-0-1-thermal {
polling-delay-passive = <10>;
- polling-delay = <0>;
thermal-sensors = <&tsens1 1>;
@@ -2845,7 +3787,6 @@
cpu-0-1-1-thermal {
polling-delay-passive = <10>;
- polling-delay = <0>;
thermal-sensors = <&tsens1 2>;
@@ -2866,7 +3807,6 @@
cpu-0-2-1-thermal {
polling-delay-passive = <10>;
- polling-delay = <0>;
thermal-sensors = <&tsens1 3>;
@@ -2887,7 +3827,6 @@
cpu-0-3-1-thermal {
polling-delay-passive = <10>;
- polling-delay = <0>;
thermal-sensors = <&tsens1 4>;
@@ -2908,7 +3847,6 @@
gpuss-3-thermal {
polling-delay-passive = <10>;
- polling-delay = <0>;
thermal-sensors = <&tsens1 5>;
@@ -2929,7 +3867,6 @@
gpuss-4-thermal {
polling-delay-passive = <10>;
- polling-delay = <0>;
thermal-sensors = <&tsens1 6>;
@@ -2950,7 +3887,6 @@
gpuss-5-thermal {
polling-delay-passive = <10>;
- polling-delay = <0>;
thermal-sensors = <&tsens1 7>;
@@ -2970,9 +3906,6 @@
};
video-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
-
thermal-sensors = <&tsens1 8>;
trips {
@@ -2991,9 +3924,6 @@
};
camss-1-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
-
thermal-sensors = <&tsens1 9>;
trips {
@@ -3012,9 +3942,6 @@
};
pcie-1-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
-
thermal-sensors = <&tsens1 10>;
trips {
@@ -3033,9 +3960,6 @@
};
cpuss-0-1-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
-
thermal-sensors = <&tsens1 11>;
trips {
@@ -3054,9 +3978,6 @@
};
aoss-2-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
-
thermal-sensors = <&tsens2 0>;
trips {
@@ -3076,7 +3997,6 @@
cpu-1-0-0-thermal {
polling-delay-passive = <10>;
- polling-delay = <0>;
thermal-sensors = <&tsens2 1>;
@@ -3097,7 +4017,6 @@
cpu-1-1-0-thermal {
polling-delay-passive = <10>;
- polling-delay = <0>;
thermal-sensors = <&tsens2 2>;
@@ -3118,7 +4037,6 @@
cpu-1-2-0-thermal {
polling-delay-passive = <10>;
- polling-delay = <0>;
thermal-sensors = <&tsens2 3>;
@@ -3139,7 +4057,6 @@
cpu-1-3-0-thermal {
polling-delay-passive = <10>;
- polling-delay = <0>;
thermal-sensors = <&tsens2 4>;
@@ -3160,7 +4077,6 @@
nsp-0-0-0-thermal {
polling-delay-passive = <10>;
- polling-delay = <0>;
thermal-sensors = <&tsens2 5>;
@@ -3181,7 +4097,6 @@
nsp-0-1-0-thermal {
polling-delay-passive = <10>;
- polling-delay = <0>;
thermal-sensors = <&tsens2 6>;
@@ -3202,7 +4117,6 @@
nsp-0-2-0-thermal {
polling-delay-passive = <10>;
- polling-delay = <0>;
thermal-sensors = <&tsens2 7>;
@@ -3223,7 +4137,6 @@
nsp-1-0-0-thermal {
polling-delay-passive = <10>;
- polling-delay = <0>;
thermal-sensors = <&tsens2 8>;
@@ -3244,7 +4157,6 @@
nsp-1-1-0-thermal {
polling-delay-passive = <10>;
- polling-delay = <0>;
thermal-sensors = <&tsens2 9>;
@@ -3265,7 +4177,6 @@
nsp-1-2-0-thermal {
polling-delay-passive = <10>;
- polling-delay = <0>;
thermal-sensors = <&tsens2 10>;
@@ -3285,9 +4196,6 @@
};
ddrss-0-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
-
thermal-sensors = <&tsens2 11>;
trips {
@@ -3306,9 +4214,6 @@
};
cpuss-1-0-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
-
thermal-sensors = <&tsens2 12>;
trips {
@@ -3327,9 +4232,6 @@
};
aoss-3-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
-
thermal-sensors = <&tsens3 0>;
trips {
@@ -3349,7 +4251,6 @@
cpu-1-0-1-thermal {
polling-delay-passive = <10>;
- polling-delay = <0>;
thermal-sensors = <&tsens3 1>;
@@ -3370,7 +4271,6 @@
cpu-1-1-1-thermal {
polling-delay-passive = <10>;
- polling-delay = <0>;
thermal-sensors = <&tsens3 2>;
@@ -3391,7 +4291,6 @@
cpu-1-2-1-thermal {
polling-delay-passive = <10>;
- polling-delay = <0>;
thermal-sensors = <&tsens3 3>;
@@ -3412,7 +4311,6 @@
cpu-1-3-1-thermal {
polling-delay-passive = <10>;
- polling-delay = <0>;
thermal-sensors = <&tsens3 4>;
@@ -3433,7 +4331,6 @@
nsp-0-0-1-thermal {
polling-delay-passive = <10>;
- polling-delay = <0>;
thermal-sensors = <&tsens3 5>;
@@ -3454,7 +4351,6 @@
nsp-0-1-1-thermal {
polling-delay-passive = <10>;
- polling-delay = <0>;
thermal-sensors = <&tsens3 6>;
@@ -3475,7 +4371,6 @@
nsp-0-2-1-thermal {
polling-delay-passive = <10>;
- polling-delay = <0>;
thermal-sensors = <&tsens3 7>;
@@ -3496,7 +4391,6 @@
nsp-1-0-1-thermal {
polling-delay-passive = <10>;
- polling-delay = <0>;
thermal-sensors = <&tsens3 8>;
@@ -3517,7 +4411,6 @@
nsp-1-1-1-thermal {
polling-delay-passive = <10>;
- polling-delay = <0>;
thermal-sensors = <&tsens3 9>;
@@ -3538,7 +4431,6 @@
nsp-1-2-1-thermal {
polling-delay-passive = <10>;
- polling-delay = <0>;
thermal-sensors = <&tsens3 10>;
@@ -3558,9 +4450,6 @@
};
ddrss-1-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
-
thermal-sensors = <&tsens3 11>;
trips {
@@ -3579,9 +4468,6 @@
};
cpuss-1-1-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
-
thermal-sensors = <&tsens3 12>;
trips {
@@ -3605,7 +4491,7 @@
interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
<GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
<GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
- <GIC_PPI 12 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>;
+ <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>;
};
pcie0: pcie@1c00000 {
@@ -3689,6 +4575,53 @@
};
};
+ pcie0_ep: pcie-ep@1c00000 {
+ compatible = "qcom,sa8775p-pcie-ep";
+ reg = <0x0 0x01c00000 0x0 0x3000>,
+ <0x0 0x40000000 0x0 0xf20>,
+ <0x0 0x40000f20 0x0 0xa8>,
+ <0x0 0x40001000 0x0 0x4000>,
+ <0x0 0x40200000 0x0 0x100000>,
+ <0x0 0x01c03000 0x0 0x1000>,
+ <0x0 0x40005000 0x0 0x2000>;
+ reg-names = "parf", "dbi", "elbi", "atu", "addr_space",
+ "mmio", "dma";
+
+ clocks = <&gcc GCC_PCIE_0_AUX_CLK>,
+ <&gcc GCC_PCIE_0_CFG_AHB_CLK>,
+ <&gcc GCC_PCIE_0_MSTR_AXI_CLK>,
+ <&gcc GCC_PCIE_0_SLV_AXI_CLK>,
+ <&gcc GCC_PCIE_0_SLV_Q2A_AXI_CLK>;
+
+ clock-names = "aux",
+ "cfg",
+ "bus_master",
+ "bus_slave",
+ "slave_q2a";
+
+ interrupts = <GIC_SPI 306 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 630 IRQ_TYPE_LEVEL_HIGH>;
+
+ interrupt-names = "global", "doorbell", "dma";
+
+ interconnects = <&pcie_anoc MASTER_PCIE_0 0 &mc_virt SLAVE_EBI1 0>,
+ <&gem_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_PCIE_0 0>;
+ interconnect-names = "pcie-mem", "cpu-pcie";
+
+ dma-coherent;
+ iommus = <&pcie_smmu 0x0000 0x7f>;
+ resets = <&gcc GCC_PCIE_0_BCR>;
+ reset-names = "core";
+ power-domains = <&gcc PCIE_0_GDSC>;
+ phys = <&pcie0_phy>;
+ phy-names = "pciephy";
+ max-link-speed = <3>; /* FIXME: Limiting the Gen speed due to stability issues */
+ num-lanes = <2>;
+
+ status = "disabled";
+ };
+
pcie0_phy: phy@1c04000 {
compatible = "qcom,sa8775p-qmp-gen4x2-pcie-phy";
reg = <0x0 0x1c04000 0x0 0x2000>;
@@ -3799,6 +4732,53 @@
};
};
+ pcie1_ep: pcie-ep@1c10000 {
+ compatible = "qcom,sa8775p-pcie-ep";
+ reg = <0x0 0x01c10000 0x0 0x3000>,
+ <0x0 0x60000000 0x0 0xf20>,
+ <0x0 0x60000f20 0x0 0xa8>,
+ <0x0 0x60001000 0x0 0x4000>,
+ <0x0 0x60200000 0x0 0x100000>,
+ <0x0 0x01c13000 0x0 0x1000>,
+ <0x0 0x60005000 0x0 0x2000>;
+ reg-names = "parf", "dbi", "elbi", "atu", "addr_space",
+ "mmio", "dma";
+
+ clocks = <&gcc GCC_PCIE_1_AUX_CLK>,
+ <&gcc GCC_PCIE_1_CFG_AHB_CLK>,
+ <&gcc GCC_PCIE_1_MSTR_AXI_CLK>,
+ <&gcc GCC_PCIE_1_SLV_AXI_CLK>,
+ <&gcc GCC_PCIE_1_SLV_Q2A_AXI_CLK>;
+
+ clock-names = "aux",
+ "cfg",
+ "bus_master",
+ "bus_slave",
+ "slave_q2a";
+
+ interrupts = <GIC_SPI 518 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 152 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 474 IRQ_TYPE_LEVEL_HIGH>;
+
+ interrupt-names = "global", "doorbell", "dma";
+
+ interconnects = <&pcie_anoc MASTER_PCIE_1 0 &mc_virt SLAVE_EBI1 0>,
+ <&gem_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_PCIE_1 0>;
+ interconnect-names = "pcie-mem", "cpu-pcie";
+
+ dma-coherent;
+ iommus = <&pcie_smmu 0x80 0x7f>;
+ resets = <&gcc GCC_PCIE_1_BCR>;
+ reset-names = "core";
+ power-domains = <&gcc PCIE_1_GDSC>;
+ phys = <&pcie1_phy>;
+ phy-names = "pciephy";
+ max-link-speed = <3>; /* FIXME: Limiting the Gen speed due to stability issues */
+ num-lanes = <4>;
+
+ status = "disabled";
+ };
+
pcie1_phy: phy@1c14000 {
compatible = "qcom,sa8775p-qmp-gen4x4-pcie-phy";
reg = <0x0 0x1c14000 0x0 0x4000>;
diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-clamshell.dtsi b/arch/arm64/boot/dts/qcom/sc7180-trogdor-clamshell.dtsi
new file mode 100644
index 000000000000..d91533b80e76
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-clamshell.dtsi
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Google Trogdor dts fragment for clamshells
+ *
+ * Copyright 2024 Google LLC.
+ */
+
+/* This file must be included after sc7180-trogdor.dtsi to modify cros_ec */
+#include <arm/cros-ec-keyboard.dtsi>
diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-coachz.dtsi b/arch/arm64/boot/dts/qcom/sc7180-trogdor-coachz.dtsi
index 7765c8f64905..3c124bbe2f4c 100644
--- a/arch/arm64/boot/dts/qcom/sc7180-trogdor-coachz.dtsi
+++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-coachz.dtsi
@@ -7,6 +7,7 @@
#include "sc7180-trogdor.dtsi"
#include "sc7180-trogdor-ti-sn65dsi86.dtsi"
+#include "sc7180-trogdor-detachable.dtsi"
/* Deleted nodes from sc7180-trogdor.dtsi */
@@ -25,7 +26,6 @@
thermal-zones {
skin_temp_thermal: skin-temp-thermal {
polling-delay-passive = <250>;
- polling-delay = <0>;
thermal-sensors = <&pm6150_adc_tm 1>;
sustainable-power = <965>;
@@ -80,10 +80,6 @@
};
&cros_ec {
- keyboard-controller {
- compatible = "google,cros-ec-keyb-switches";
- };
-
cros_ec_proximity: proximity {
compatible = "google,cros-ec-mkbp-proximity";
label = "proximity-wifi";
diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-detachable.dtsi b/arch/arm64/boot/dts/qcom/sc7180-trogdor-detachable.dtsi
new file mode 100644
index 000000000000..7c5d8a57ef7f
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-detachable.dtsi
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Google Trogdor dts fragment for detachables
+ *
+ * Copyright 2024 Google LLC.
+ */
+
+/* This file must be included after sc7180-trogdor.dtsi to modify cros_ec */
+&cros_ec {
+ keyboard-controller {
+ compatible = "google,cros-ec-keyb-switches";
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-homestar.dtsi b/arch/arm64/boot/dts/qcom/sc7180-trogdor-homestar.dtsi
index 2ba3bbf3b9ad..b2df22faafe8 100644
--- a/arch/arm64/boot/dts/qcom/sc7180-trogdor-homestar.dtsi
+++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-homestar.dtsi
@@ -5,9 +5,8 @@
* Copyright 2021 Google LLC.
*/
-/* This file must be included after sc7180-trogdor.dtsi */
-
#include "sc7180-trogdor-rt5682i-sku.dtsi"
+#include "sc7180-trogdor-detachable.dtsi"
/ {
/* BOARD-SPECIFIC TOP LEVEL NODES */
@@ -45,7 +44,6 @@
thermal-zones {
skin_temp_thermal: skin-temp-thermal {
polling-delay-passive = <250>;
- polling-delay = <0>;
thermal-sensors = <&pm6150_adc_tm 1>;
sustainable-power = <965>;
@@ -135,12 +133,6 @@ ap_ts_pen_1v8: &i2c4 {
status = "okay";
};
-&cros_ec {
- keyboard-controller {
- compatible = "google,cros-ec-keyb-switches";
- };
-};
-
&panel {
compatible = "samsung,atna33xc20";
enable-gpios = <&tlmm 12 GPIO_ACTIVE_HIGH>;
diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-kingoftown.dts b/arch/arm64/boot/dts/qcom/sc7180-trogdor-kingoftown.dts
index d6db7d83adcf..655bea928e52 100644
--- a/arch/arm64/boot/dts/qcom/sc7180-trogdor-kingoftown.dts
+++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-kingoftown.dts
@@ -9,7 +9,7 @@
#include "sc7180-trogdor.dtsi"
#include "sc7180-trogdor-parade-ps8640.dtsi"
-#include <arm/cros-ec-keyboard.dtsi>
+#include "sc7180-trogdor-clamshell.dtsi"
#include "sc7180-trogdor-lte-sku.dtsi"
#include "sc7180-trogdor-rt5682s-sku.dtsi"
diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r1-kb.dts b/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r1-kb.dts
index 919bfaea6189..340cb119d0a0 100644
--- a/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r1-kb.dts
+++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r1-kb.dts
@@ -12,6 +12,6 @@
compatible = "google,lazor-rev1-sku2", "google,lazor-rev2-sku2", "qcom,sc7180";
};
-&keyboard_backlight {
+&pwmleds {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r1-lte.dts b/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r1-lte.dts
index eb20157f6af9..d45e60e3eb9e 100644
--- a/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r1-lte.dts
+++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r1-lte.dts
@@ -17,6 +17,6 @@
status = "okay";
};
-&keyboard_backlight {
+&pwmleds {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r10-kb.dts b/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r10-kb.dts
index 45d34718a1bc..e906ce877b8c 100644
--- a/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r10-kb.dts
+++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r10-kb.dts
@@ -18,6 +18,6 @@
compatible = "google,lazor-sku2", "qcom,sc7180";
};
-&keyboard_backlight {
+&pwmleds {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r10-lte.dts b/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r10-lte.dts
index 79028d0dd1b0..4b9ee15b09f6 100644
--- a/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r10-lte.dts
+++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r10-lte.dts
@@ -22,6 +22,6 @@
status = "okay";
};
-&keyboard_backlight {
+&pwmleds {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r3-kb.dts b/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r3-kb.dts
index 3459b81c5628..a960553f3994 100644
--- a/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r3-kb.dts
+++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r3-kb.dts
@@ -21,6 +21,6 @@
"qcom,sc7180";
};
-&keyboard_backlight {
+&pwmleds {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r3-lte.dts b/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r3-lte.dts
index ff8f47da109d..82bd9ed7e21a 100644
--- a/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r3-lte.dts
+++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r3-lte.dts
@@ -25,6 +25,6 @@
status = "okay";
};
-&keyboard_backlight {
+&pwmleds {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r9-kb.dts b/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r9-kb.dts
index faf527972977..6278c1715d3f 100644
--- a/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r9-kb.dts
+++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r9-kb.dts
@@ -18,6 +18,6 @@
compatible = "google,lazor-rev9-sku2", "qcom,sc7180";
};
-&keyboard_backlight {
+&pwmleds {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r9-lte.dts b/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r9-lte.dts
index d737fd0637fb..0ec1697ae2c9 100644
--- a/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r9-lte.dts
+++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r9-lte.dts
@@ -22,6 +22,6 @@
status = "okay";
};
-&keyboard_backlight {
+&pwmleds {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor.dtsi b/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor.dtsi
index e9f213d27711..c3fd6760de7a 100644
--- a/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor.dtsi
+++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor.dtsi
@@ -5,8 +5,7 @@
* Copyright 2020 Google LLC.
*/
-/* This file must be included after sc7180-trogdor.dtsi */
-#include <arm/cros-ec-keyboard.dtsi>
+#include "sc7180-trogdor-clamshell.dtsi"
&ap_sar_sensor {
semtech,cs0-ground;
diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-pazquel.dtsi b/arch/arm64/boot/dts/qcom/sc7180-trogdor-pazquel.dtsi
index 8823edbb4d6e..cc2c5610a279 100644
--- a/arch/arm64/boot/dts/qcom/sc7180-trogdor-pazquel.dtsi
+++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-pazquel.dtsi
@@ -5,8 +5,7 @@
* Copyright 2021 Google LLC.
*/
-/* This file must be included after sc7180-trogdor.dtsi */
-#include <arm/cros-ec-keyboard.dtsi>
+#include "sc7180-trogdor-clamshell.dtsi"
&ap_sar_sensor {
compatible = "semtech,sx9324";
@@ -83,6 +82,8 @@
gpio = <&tlmm 67 GPIO_ACTIVE_HIGH>;
};
+/* PINCTRL - modifications to sc7180-trogdor.dtsi */
+
&en_pp3300_dx_edp {
pins = "gpio67";
};
diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom.dtsi b/arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom.dtsi
index 067813f5f437..ac8d4589e3fb 100644
--- a/arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom.dtsi
+++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom.dtsi
@@ -6,17 +6,13 @@
*/
#include "sc7180-trogdor.dtsi"
-/* Must come after sc7180-trogdor.dtsi to modify cros_ec */
-#include <arm/cros-ec-keyboard.dtsi>
+#include "sc7180-trogdor-clamshell.dtsi"
#include "sc7180-trogdor-rt5682i-sku.dtsi"
#include "sc7180-trogdor-ti-sn65dsi86.dtsi"
/ {
thermal-zones {
5v-choke-thermal {
- polling-delay-passive = <0>;
- polling-delay = <250>;
-
thermal-sensors = <&pm6150_adc_tm 1>;
trips {
diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-quackingstick.dtsi b/arch/arm64/boot/dts/qcom/sc7180-trogdor-quackingstick.dtsi
index 5f06842c683b..00229b1515e6 100644
--- a/arch/arm64/boot/dts/qcom/sc7180-trogdor-quackingstick.dtsi
+++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-quackingstick.dtsi
@@ -9,9 +9,7 @@
#include "sc7180-trogdor.dtsi"
#include "sc7180-trogdor-rt5682i-sku.dtsi"
-
-/* This board only has 1 USB Type-C port. */
-/delete-node/ &usb_c1;
+#include "sc7180-trogdor-detachable.dtsi"
/ {
ppvar_lcd: ppvar-lcd-regulator {
@@ -47,12 +45,6 @@
status = "okay";
};
-&cros_ec {
- keyboard-controller {
- compatible = "google,cros-ec-keyb-switches";
- };
-};
-
&gpio_keys {
status = "okay";
};
@@ -136,6 +128,11 @@ pp3300_disp_on: &pp3300_dx_edp {
gpio = <&tlmm 67 GPIO_ACTIVE_HIGH>;
};
+/* This board only has 1 USB Type-C port. */
+&usb_c1 {
+ status = "disabled";
+};
+
/* PINCTRL - modifications to sc7180-trogdor.dtsi */
/*
diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-r1.dts b/arch/arm64/boot/dts/qcom/sc7180-trogdor-r1.dts
index c9667751a990..d393a2712ce6 100644
--- a/arch/arm64/boot/dts/qcom/sc7180-trogdor-r1.dts
+++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-r1.dts
@@ -8,8 +8,7 @@
/dts-v1/;
#include "sc7180-trogdor.dtsi"
-/* Must come after sc7180-trogdor.dtsi to modify cros_ec */
-#include <arm/cros-ec-keyboard.dtsi>
+#include "sc7180-trogdor-clamshell.dtsi"
#include "sc7180-trogdor-rt5682i-sku.dtsi"
#include "sc7180-trogdor-ti-sn65dsi86.dtsi"
diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-wormdingler.dtsi b/arch/arm64/boot/dts/qcom/sc7180-trogdor-wormdingler.dtsi
index 305ad127246e..af89d80426ab 100644
--- a/arch/arm64/boot/dts/qcom/sc7180-trogdor-wormdingler.dtsi
+++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-wormdingler.dtsi
@@ -8,6 +8,7 @@
/dts-v1/;
#include "sc7180-trogdor.dtsi"
+#include "sc7180-trogdor-detachable.dtsi"
/ {
avdd_lcd: avdd-lcd-regulator {
@@ -50,7 +51,6 @@
thermal-zones {
skin_temp_thermal: skin-temp-thermal {
polling-delay-passive = <250>;
- polling-delay = <0>;
thermal-sensors = <&pm6150_adc_tm 1>;
sustainable-power = <574>;
@@ -104,10 +104,6 @@
base_detection: cbas {
compatible = "google,cros-cbas";
};
-
- keyboard-controller {
- compatible = "google,cros-ec-keyb-switches";
- };
};
&i2c4 {
diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi b/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi
index 8513be297120..74ab321d3333 100644
--- a/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi
+++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi
@@ -21,9 +21,6 @@
/ {
thermal-zones {
charger_thermal: charger-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
-
thermal-sensors = <&pm6150_adc_tm 0>;
trips {
@@ -359,10 +356,11 @@
#sound-dai-cells = <0>;
};
- pwmleds {
+ pwmleds: pwmleds {
compatible = "pwm-leds";
+ status = "disabled";
+
keyboard_backlight: led-0 {
- status = "disabled";
label = "cros_ec::kbd_backlight";
function = LED_FUNCTION_KBD_BACKLIGHT;
pwms = <&cros_ec_pwm 0>;
diff --git a/arch/arm64/boot/dts/qcom/sc7180.dtsi b/arch/arm64/boot/dts/qcom/sc7180.dtsi
index 4774a859bd7e..b5ebf8980325 100644
--- a/arch/arm64/boot/dts/qcom/sc7180.dtsi
+++ b/arch/arm64/boot/dts/qcom/sc7180.dtsi
@@ -1582,8 +1582,7 @@
};
ufs_mem_phy: phy@1d87000 {
- compatible = "qcom,sc7180-qmp-ufs-phy",
- "qcom,sm7150-qmp-ufs-phy";
+ compatible = "qcom,sc7180-qmp-ufs-phy";
reg = <0 0x01d87000 0 0x1000>;
clocks = <&rpmhcc RPMH_CXO_CLK>,
<&gcc GCC_UFS_PHY_PHY_AUX_CLK>,
@@ -3067,6 +3066,7 @@
iommus = <&apps_smmu 0x540 0>;
snps,dis_u2_susphy_quirk;
snps,dis_enblslpm_quirk;
+ snps,parkmode-disable-ss-quirk;
phys = <&usb_1_hsphy>, <&usb_1_qmpphy QMP_USB43DP_USB3_PHY>;
phy-names = "usb2-phy", "usb3-phy";
maximum-speed = "super-speed";
@@ -4036,7 +4036,6 @@
thermal-zones {
cpu0_thermal: cpu0-thermal {
polling-delay-passive = <250>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 1>;
sustainable-power = <1052>;
@@ -4085,7 +4084,6 @@
cpu1_thermal: cpu1-thermal {
polling-delay-passive = <250>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 2>;
sustainable-power = <1052>;
@@ -4134,7 +4132,6 @@
cpu2_thermal: cpu2-thermal {
polling-delay-passive = <250>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 3>;
sustainable-power = <1052>;
@@ -4183,7 +4180,6 @@
cpu3_thermal: cpu3-thermal {
polling-delay-passive = <250>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 4>;
sustainable-power = <1052>;
@@ -4232,7 +4228,6 @@
cpu4_thermal: cpu4-thermal {
polling-delay-passive = <250>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 5>;
sustainable-power = <1052>;
@@ -4281,7 +4276,6 @@
cpu5_thermal: cpu5-thermal {
polling-delay-passive = <250>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 6>;
sustainable-power = <1052>;
@@ -4330,7 +4324,6 @@
cpu6_thermal: cpu6-thermal {
polling-delay-passive = <250>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 9>;
sustainable-power = <1425>;
@@ -4371,7 +4364,6 @@
cpu7_thermal: cpu7-thermal {
polling-delay-passive = <250>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 10>;
sustainable-power = <1425>;
@@ -4412,7 +4404,6 @@
cpu8_thermal: cpu8-thermal {
polling-delay-passive = <250>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 11>;
sustainable-power = <1425>;
@@ -4453,7 +4444,6 @@
cpu9_thermal: cpu9-thermal {
polling-delay-passive = <250>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 12>;
sustainable-power = <1425>;
@@ -4494,7 +4484,6 @@
aoss0-thermal {
polling-delay-passive = <250>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 0>;
@@ -4515,7 +4504,6 @@
cpuss0-thermal {
polling-delay-passive = <250>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 7>;
@@ -4535,7 +4523,6 @@
cpuss1-thermal {
polling-delay-passive = <250>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 8>;
@@ -4555,7 +4542,6 @@
gpuss0-thermal {
polling-delay-passive = <250>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 13>;
@@ -4583,7 +4569,6 @@
gpuss1-thermal {
polling-delay-passive = <250>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 14>;
@@ -4611,7 +4596,6 @@
aoss1-thermal {
polling-delay-passive = <250>;
- polling-delay = <0>;
thermal-sensors = <&tsens1 0>;
@@ -4632,7 +4616,6 @@
cwlan-thermal {
polling-delay-passive = <250>;
- polling-delay = <0>;
thermal-sensors = <&tsens1 1>;
@@ -4653,7 +4636,6 @@
audio-thermal {
polling-delay-passive = <250>;
- polling-delay = <0>;
thermal-sensors = <&tsens1 2>;
@@ -4674,7 +4656,6 @@
ddr-thermal {
polling-delay-passive = <250>;
- polling-delay = <0>;
thermal-sensors = <&tsens1 3>;
@@ -4695,7 +4676,6 @@
q6-hvx-thermal {
polling-delay-passive = <250>;
- polling-delay = <0>;
thermal-sensors = <&tsens1 4>;
@@ -4716,7 +4696,6 @@
camera-thermal {
polling-delay-passive = <250>;
- polling-delay = <0>;
thermal-sensors = <&tsens1 5>;
@@ -4737,7 +4716,6 @@
mdm-core-thermal {
polling-delay-passive = <250>;
- polling-delay = <0>;
thermal-sensors = <&tsens1 6>;
@@ -4758,7 +4736,6 @@
mdm-dsp-thermal {
polling-delay-passive = <250>;
- polling-delay = <0>;
thermal-sensors = <&tsens1 7>;
@@ -4779,7 +4756,6 @@
npu-thermal {
polling-delay-passive = <250>;
- polling-delay = <0>;
thermal-sensors = <&tsens1 8>;
@@ -4800,7 +4776,6 @@
video-thermal {
polling-delay-passive = <250>;
- polling-delay = <0>;
thermal-sensors = <&tsens1 9>;
diff --git a/arch/arm64/boot/dts/qcom/sc7280-idp.dtsi b/arch/arm64/boot/dts/qcom/sc7280-idp.dtsi
index a0059527d9e4..7370aa0dbf0e 100644
--- a/arch/arm64/boot/dts/qcom/sc7280-idp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sc7280-idp.dtsi
@@ -495,7 +495,6 @@
};
&uart5 {
- compatible = "qcom,geni-debug-uart";
status = "okay";
};
diff --git a/arch/arm64/boot/dts/qcom/sc7280-qcard.dtsi b/arch/arm64/boot/dts/qcom/sc7280-qcard.dtsi
index f9b96bd2477e..7d1d5bbbbbd9 100644
--- a/arch/arm64/boot/dts/qcom/sc7280-qcard.dtsi
+++ b/arch/arm64/boot/dts/qcom/sc7280-qcard.dtsi
@@ -427,7 +427,6 @@
};
uart_dbg: &uart5 {
- compatible = "qcom,geni-debug-uart";
status = "okay";
};
diff --git a/arch/arm64/boot/dts/qcom/sc7280.dtsi b/arch/arm64/boot/dts/qcom/sc7280.dtsi
index fc9ec367e3a5..3d8410683402 100644
--- a/arch/arm64/boot/dts/qcom/sc7280.dtsi
+++ b/arch/arm64/boot/dts/qcom/sc7280.dtsi
@@ -24,6 +24,7 @@
#include <dt-bindings/power/qcom-rpmpd.h>
#include <dt-bindings/reset/qcom,sdm845-aoss.h>
#include <dt-bindings/reset/qcom,sdm845-pdc.h>
+#include <dt-bindings/soc/qcom,apr.h>
#include <dt-bindings/soc/qcom,rpmh-rsc.h>
#include <dt-bindings/sound/qcom,lpass.h>
#include <dt-bindings/thermal/thermal.h>
@@ -710,6 +711,7 @@
firmware {
scm: scm {
compatible = "qcom,scm-sc7280", "qcom,scm";
+ qcom,dload-mode = <&tcsr_2 0x13000>;
};
};
@@ -1440,12 +1442,12 @@
};
uart5: serial@994000 {
- compatible = "qcom,geni-uart";
+ compatible = "qcom,geni-debug-uart";
reg = <0 0x00994000 0 0x4000>;
clocks = <&gcc GCC_QUPV3_WRAP0_S5_CLK>;
clock-names = "se";
pinctrl-names = "default";
- pinctrl-0 = <&qup_uart5_cts>, <&qup_uart5_rts>, <&qup_uart5_tx>, <&qup_uart5_rx>;
+ pinctrl-0 = <&qup_uart5_tx>, <&qup_uart5_rx>;
interrupts = <GIC_SPI 606 IRQ_TYPE_LEVEL_HIGH>;
power-domains = <&rpmhpd SC7280_CX>;
operating-points-v2 = <&qup_opp_table>;
@@ -2129,6 +2131,8 @@
reg = <0 0x016e0000 0 0x1c080>;
#interconnect-cells = <2>;
qcom,bcm-voters = <&apps_bcm_voter>;
+ clocks = <&gcc GCC_AGGRE_UFS_PHY_AXI_CLK>,
+ <&gcc GCC_AGGRE_USB3_PRIM_AXI_CLK>;
};
aggre2_noc: interconnect@1700000 {
@@ -2136,6 +2140,7 @@
compatible = "qcom,sc7280-aggre2-noc";
#interconnect-cells = <2>;
qcom,bcm-voters = <&apps_bcm_voter>;
+ clocks = <&rpmhcc RPMH_IPA_CLK>;
};
mmss_noc: interconnect@1740000 {
@@ -2989,6 +2994,18 @@
dma-coherent;
};
+ gfx_0_tbu: tbu@3dd9000 {
+ compatible = "qcom,sc7280-tbu";
+ reg = <0x0 0x3dd9000 0x0 0x1000>;
+ qcom,stream-id-range = <&adreno_smmu 0x0 0x400>;
+ };
+
+ gfx_1_tbu: tbu@3ddd000 {
+ compatible = "qcom,sc7280-tbu";
+ reg = <0x0 0x3ddd000 0x0 0x1000>;
+ qcom,stream-id-range = <&adreno_smmu 0x400 0x400>;
+ };
+
remoteproc_mpss: remoteproc@4080000 {
compatible = "qcom,sc7280-mpss-pas";
reg = <0 0x04080000 0 0x10000>;
@@ -3762,6 +3779,75 @@
label = "lpass";
qcom,remote-pid = <2>;
+ apr {
+ compatible = "qcom,apr-v2";
+ qcom,glink-channels = "apr_audio_svc";
+ qcom,domain = <APR_DOMAIN_ADSP>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ service@3 {
+ reg = <APR_SVC_ADSP_CORE>;
+ compatible = "qcom,q6core";
+ qcom,protection-domain = "avs/audio", "msm/adsp/audio_pd";
+ };
+
+ q6afe: service@4 {
+ compatible = "qcom,q6afe";
+ reg = <APR_SVC_AFE>;
+ qcom,protection-domain = "avs/audio", "msm/adsp/audio_pd";
+
+ q6afedai: dais {
+ compatible = "qcom,q6afe-dais";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #sound-dai-cells = <1>;
+ };
+
+ q6afecc: clock-controller {
+ compatible = "qcom,q6afe-clocks";
+ #clock-cells = <2>;
+ };
+ };
+
+ q6asm: service@7 {
+ compatible = "qcom,q6asm";
+ reg = <APR_SVC_ASM>;
+ qcom,protection-domain = "avs/audio", "msm/adsp/audio_pd";
+
+ q6asmdai: dais {
+ compatible = "qcom,q6asm-dais";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #sound-dai-cells = <1>;
+ iommus = <&apps_smmu 0x1801 0x0>;
+
+ dai@0 {
+ reg = <0>;
+ };
+
+ dai@1 {
+ reg = <1>;
+ };
+
+ dai@2 {
+ reg = <2>;
+ };
+ };
+ };
+
+ q6adm: service@8 {
+ compatible = "qcom,q6adm";
+ reg = <APR_SVC_ADM>;
+ qcom,protection-domain = "avs/audio", "msm/adsp/audio_pd";
+
+ q6routing: routing {
+ compatible = "qcom,q6adm-routing";
+ #sound-dai-cells = <0>;
+ };
+ };
+ };
+
fastrpc {
compatible = "qcom,fastrpc";
qcom,glink-channels = "fastrpcglink-apps-dsp";
@@ -4150,6 +4236,7 @@
iommus = <&apps_smmu 0xe0 0x0>;
snps,dis_u2_susphy_quirk;
snps,dis_enblslpm_quirk;
+ snps,parkmode-disable-ss-quirk;
phys = <&usb_1_hsphy>, <&usb_1_qmpphy QMP_USB43DP_USB3_PHY>;
phy-names = "usb2-phy", "usb3-phy";
maximum-speed = "super-speed";
@@ -5407,16 +5494,6 @@
function = "qup04";
};
- qup_uart5_cts: qup-uart5-cts-state {
- pins = "gpio20";
- function = "qup05";
- };
-
- qup_uart5_rts: qup-uart5-rts-state {
- pins = "gpio21";
- function = "qup05";
- };
-
qup_uart5_tx: qup-uart5-tx-state {
pins = "gpio22";
function = "qup05";
@@ -5802,6 +5879,83 @@
<GIC_SPI 408 IRQ_TYPE_LEVEL_HIGH>;
};
+ anoc_1_tbu: tbu@151dd000 {
+ compatible = "qcom,sc7280-tbu";
+ reg = <0x0 0x151dd000 0x0 0x1000>;
+ interconnects = <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &cnoc3 SLAVE_TCU QCOM_ICC_TAG_ACTIVE_ONLY>;
+ qcom,stream-id-range = <&apps_smmu 0x0 0x400>;
+ };
+
+ anoc_2_tbu: tbu@151e1000 {
+ compatible = "qcom,sc7280-tbu";
+ reg = <0x0 0x151e1000 0x0 0x1000>;
+ interconnects = <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &cnoc3 SLAVE_TCU QCOM_ICC_TAG_ACTIVE_ONLY>;
+ qcom,stream-id-range = <&apps_smmu 0x400 0x400>;
+ };
+
+ mnoc_hf_0_tbu: tbu@151e5000 {
+ compatible = "qcom,sc7280-tbu";
+ reg = <0x0 0x151e5000 0x0 0x1000>;
+ interconnects = <&mmss_noc MASTER_MDP0 QCOM_ICC_TAG_ACTIVE_ONLY
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ACTIVE_ONLY>;
+ power-domains = <&gcc HLOS1_VOTE_MMNOC_MMU_TBU_HF0_GDSC>;
+ qcom,stream-id-range = <&apps_smmu 0x800 0x400>;
+ };
+
+ mnoc_hf_1_tbu: tbu@151e9000 {
+ compatible = "qcom,sc7280-tbu";
+ reg = <0x0 0x151e9000 0x0 0x1000>;
+ interconnects = <&mmss_noc MASTER_MDP0 QCOM_ICC_TAG_ACTIVE_ONLY
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ACTIVE_ONLY>;
+ power-domains = <&gcc HLOS1_VOTE_MMNOC_MMU_TBU_HF1_GDSC>;
+ qcom,stream-id-range = <&apps_smmu 0xc00 0x400>;
+ };
+
+ compute_dsp_1_tbu: tbu@151ed000 {
+ compatible = "qcom,sc7280-tbu";
+ reg = <0x0 0x151ed000 0x0 0x1000>;
+ interconnects = <&nsp_noc MASTER_CDSP_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ACTIVE_ONLY>;
+ power-domains = <&gcc HLOS1_VOTE_TURING_MMU_TBU1_GDSC>;
+ qcom,stream-id-range = <&apps_smmu 0x1000 0x400>;
+ };
+
+ compute_dsp_0_tbu: tbu@151f1000 {
+ compatible = "qcom,sc7280-tbu";
+ reg = <0x0 0x151f1000 0x0 0x1000>;
+ interconnects = <&nsp_noc MASTER_CDSP_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ACTIVE_ONLY>;
+ power-domains = <&gcc HLOS1_VOTE_TURING_MMU_TBU0_GDSC>;
+ qcom,stream-id-range = <&apps_smmu 0x1400 0x400>;
+ };
+
+ adsp_tbu: tbu@151f5000 {
+ compatible = "qcom,sc7280-tbu";
+ reg = <0x0 0x151f5000 0x0 0x1000>;
+ interconnects = <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &lpass_ag_noc SLAVE_LPASS_CORE_CFG QCOM_ICC_TAG_ACTIVE_ONLY>;
+ qcom,stream-id-range = <&apps_smmu 0x1800 0x400>;
+ };
+
+ anoc_1_pcie_tbu: tbu@151f9000 {
+ compatible = "qcom,sc7280-tbu";
+ reg = <0x0 0x151f9000 0x0 0x1000>;
+ interconnects = <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &cnoc3 SLAVE_TCU QCOM_ICC_TAG_ACTIVE_ONLY>;
+ qcom,stream-id-range = <&apps_smmu 0x1c00 0x400>;
+ };
+
+ mnoc_sf_0_tbu: tbu@151fd000 {
+ compatible = "qcom,sc7280-tbu";
+ reg = <0x0 0x151fd000 0x0 0x1000>;
+ interconnects = <&mmss_noc MASTER_CAMNOC_SF QCOM_ICC_TAG_ACTIVE_ONLY
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ACTIVE_ONLY>;
+ power-domains = <&gcc HLOS1_VOTE_MMNOC_MMU_TBU_SF0_GDSC>;
+ qcom,stream-id-range = <&apps_smmu 0x2000 0x400>;
+ };
+
intc: interrupt-controller@17a00000 {
compatible = "arm,gic-v3";
reg = <0 0x17a00000 0 0x10000>, /* GICD */
@@ -5991,10 +6145,12 @@
};
};
+ sound: sound {
+ };
+
thermal_zones: thermal-zones {
cpu0-thermal {
polling-delay-passive = <250>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 1>;
@@ -6038,7 +6194,6 @@
cpu1-thermal {
polling-delay-passive = <250>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 2>;
@@ -6082,7 +6237,6 @@
cpu2-thermal {
polling-delay-passive = <250>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 3>;
@@ -6126,7 +6280,6 @@
cpu3-thermal {
polling-delay-passive = <250>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 4>;
@@ -6170,7 +6323,6 @@
cpu4-thermal {
polling-delay-passive = <250>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 7>;
@@ -6214,7 +6366,6 @@
cpu5-thermal {
polling-delay-passive = <250>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 8>;
@@ -6258,7 +6409,6 @@
cpu6-thermal {
polling-delay-passive = <250>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 9>;
@@ -6302,7 +6452,6 @@
cpu7-thermal {
polling-delay-passive = <250>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 10>;
@@ -6346,7 +6495,6 @@
cpu8-thermal {
polling-delay-passive = <250>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 11>;
@@ -6390,7 +6538,6 @@
cpu9-thermal {
polling-delay-passive = <250>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 12>;
@@ -6434,7 +6581,6 @@
cpu10-thermal {
polling-delay-passive = <250>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 13>;
@@ -6478,7 +6624,6 @@
cpu11-thermal {
polling-delay-passive = <250>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 14>;
@@ -6522,7 +6667,6 @@
aoss0-thermal {
polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 0>;
@@ -6543,7 +6687,6 @@
aoss1-thermal {
polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens1 0>;
@@ -6564,7 +6707,6 @@
cpuss0-thermal {
polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 5>;
@@ -6584,7 +6726,6 @@
cpuss1-thermal {
polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 6>;
@@ -6604,7 +6745,6 @@
gpuss0-thermal {
polling-delay-passive = <100>;
- polling-delay = <0>;
thermal-sensors = <&tsens1 1>;
@@ -6632,7 +6772,6 @@
gpuss1-thermal {
polling-delay-passive = <100>;
- polling-delay = <0>;
thermal-sensors = <&tsens1 2>;
@@ -6659,9 +6798,6 @@
};
nspss0-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
-
thermal-sensors = <&tsens1 3>;
trips {
@@ -6680,9 +6816,6 @@
};
nspss1-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
-
thermal-sensors = <&tsens1 4>;
trips {
@@ -6701,9 +6834,6 @@
};
video-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
-
thermal-sensors = <&tsens1 5>;
trips {
@@ -6722,9 +6852,6 @@
};
ddr-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
-
thermal-sensors = <&tsens1 6>;
trips {
@@ -6743,9 +6870,6 @@
};
mdmss0-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
-
thermal-sensors = <&tsens1 7>;
trips {
@@ -6764,9 +6888,6 @@
};
mdmss1-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
-
thermal-sensors = <&tsens1 8>;
trips {
@@ -6785,9 +6906,6 @@
};
mdmss2-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
-
thermal-sensors = <&tsens1 9>;
trips {
@@ -6806,9 +6924,6 @@
};
mdmss3-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
-
thermal-sensors = <&tsens1 10>;
trips {
@@ -6827,9 +6942,6 @@
};
camera0-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
-
thermal-sensors = <&tsens1 11>;
trips {
diff --git a/arch/arm64/boot/dts/qcom/sc8180x-lenovo-flex-5g.dts b/arch/arm64/boot/dts/qcom/sc8180x-lenovo-flex-5g.dts
index 6af99116c715..5b226577f9d8 100644
--- a/arch/arm64/boot/dts/qcom/sc8180x-lenovo-flex-5g.dts
+++ b/arch/arm64/boot/dts/qcom/sc8180x-lenovo-flex-5g.dts
@@ -68,7 +68,7 @@
reg = <0>;
pmic_glink_con0_hs: endpoint {
- remote-endpoint = <&usb_prim_role_switch>;
+ remote-endpoint = <&usb_prim_dwc3_hs>;
};
};
@@ -103,7 +103,7 @@
reg = <0>;
pmic_glink_con1_hs: endpoint {
- remote-endpoint = <&usb_sec_role_switch>;
+ remote-endpoint = <&usb_sec_dwc3_hs>;
};
};
@@ -582,6 +582,10 @@
dr_mode = "host";
};
+&usb_prim_dwc3_hs {
+ remote-endpoint = <&pmic_glink_con0_hs>;
+};
+
&usb_prim_qmpphy_dp_in {
remote-endpoint = <&mdss_dp0_out>;
};
@@ -590,8 +594,8 @@
remote-endpoint = <&pmic_glink_con0_ss>;
};
-&usb_prim_role_switch {
- remote-endpoint = <&pmic_glink_con0_hs>;
+&usb_sec_dwc3_hs {
+ remote-endpoint = <&pmic_glink_con1_hs>;
};
&usb_sec_hsphy {
@@ -619,10 +623,6 @@
remote-endpoint = <&pmic_glink_con1_ss>;
};
-&usb_sec_role_switch {
- remote-endpoint = <&pmic_glink_con1_hs>;
-};
-
&usb_sec {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/qcom/sc8180x-pmics.dtsi b/arch/arm64/boot/dts/qcom/sc8180x-pmics.dtsi
index ddc84282f142..1c6f12fafe1d 100644
--- a/arch/arm64/boot/dts/qcom/sc8180x-pmics.dtsi
+++ b/arch/arm64/boot/dts/qcom/sc8180x-pmics.dtsi
@@ -13,7 +13,6 @@
thermal-zones {
pmc8180-thermal {
polling-delay-passive = <100>;
- polling-delay = <0>;
thermal-sensors = <&pmc8180_temp>;
@@ -40,7 +39,6 @@
pmc8180c-thermal {
polling-delay-passive = <100>;
- polling-delay = <0>;
thermal-sensors = <&pmc8180c_temp>;
diff --git a/arch/arm64/boot/dts/qcom/sc8180x-primus.dts b/arch/arm64/boot/dts/qcom/sc8180x-primus.dts
index bfee60c93ccc..65d923497a05 100644
--- a/arch/arm64/boot/dts/qcom/sc8180x-primus.dts
+++ b/arch/arm64/boot/dts/qcom/sc8180x-primus.dts
@@ -71,7 +71,7 @@
reg = <0>;
pmic_glink_con0_hs: endpoint {
- remote-endpoint = <&usb_prim_role_switch>;
+ remote-endpoint = <&usb_prim_dwc3_hs>;
};
};
@@ -106,7 +106,7 @@
reg = <0>;
pmic_glink_con1_hs: endpoint {
- remote-endpoint = <&usb_sec_role_switch>;
+ remote-endpoint = <&usb_sec_dwc3_hs>;
};
};
@@ -648,6 +648,10 @@
dr_mode = "host";
};
+&usb_prim_dwc3_hs {
+ remote-endpoint = <&pmic_glink_con0_hs>;
+};
+
&usb_prim_qmpphy_dp_in {
remote-endpoint = <&mdss_dp0_out>;
};
@@ -656,10 +660,6 @@
remote-endpoint = <&pmic_glink_con0_ss>;
};
-&usb_prim_role_switch {
- remote-endpoint = <&pmic_glink_con0_hs>;
-};
-
&usb_sec_hsphy {
vdda-pll-supply = <&vreg_l5e_0p88>;
vdda18-supply = <&vreg_l12a_1p8>;
@@ -685,10 +685,6 @@
remote-endpoint = <&pmic_glink_con1_ss>;
};
-&usb_sec_role_switch {
- remote-endpoint = <&pmic_glink_con1_hs>;
-};
-
&usb_sec {
status = "okay";
};
@@ -697,6 +693,10 @@
dr_mode = "host";
};
+&usb_sec_dwc3_hs {
+ remote-endpoint = <&pmic_glink_con1_hs>;
+};
+
&wifi {
memory-region = <&wlan_mem>;
diff --git a/arch/arm64/boot/dts/qcom/sc8180x.dtsi b/arch/arm64/boot/dts/qcom/sc8180x.dtsi
index 067712310560..6e707d993aeb 100644
--- a/arch/arm64/boot/dts/qcom/sc8180x.dtsi
+++ b/arch/arm64/boot/dts/qcom/sc8180x.dtsi
@@ -12,6 +12,7 @@
#include <dt-bindings/interconnect/qcom,osm-l3.h>
#include <dt-bindings/interconnect/qcom,sc8180x.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/phy/phy-qcom-qmp.h>
#include <dt-bindings/power/qcom-rpmpd.h>
#include <dt-bindings/soc/qcom,rpmh-rsc.h>
#include <dt-bindings/thermal/thermal.h>
@@ -1890,7 +1891,7 @@
power-domains = <&gcc PCIE_3_GDSC>;
interconnects = <&aggre2_noc MASTER_PCIE_3 0 &mc_virt SLAVE_EBI_CH0 0>,
- <&gem_noc MASTER_AMPSS_M0 0 &config_noc SLAVE_PCIE_0 0>;
+ <&gem_noc MASTER_AMPSS_M0 0 &config_noc SLAVE_PCIE_3 0>;
interconnect-names = "pcie-mem", "cpu-pcie";
phys = <&pcie3_phy>;
@@ -2012,7 +2013,7 @@
power-domains = <&gcc PCIE_1_GDSC>;
interconnects = <&aggre2_noc MASTER_PCIE_1 0 &mc_virt SLAVE_EBI_CH0 0>,
- <&gem_noc MASTER_AMPSS_M0 0 &config_noc SLAVE_PCIE_0 0>;
+ <&gem_noc MASTER_AMPSS_M0 0 &config_noc SLAVE_PCIE_1 0>;
interconnect-names = "pcie-mem", "cpu-pcie";
phys = <&pcie1_phy>;
@@ -2134,7 +2135,7 @@
power-domains = <&gcc PCIE_2_GDSC>;
interconnects = <&aggre2_noc MASTER_PCIE_2 0 &mc_virt SLAVE_EBI_CH0 0>,
- <&gem_noc MASTER_AMPSS_M0 0 &config_noc SLAVE_PCIE_0 0>;
+ <&gem_noc MASTER_AMPSS_M0 0 &config_noc SLAVE_PCIE_2 0>;
interconnect-names = "pcie-mem", "cpu-pcie";
phys = <&pcie2_phy>;
@@ -2245,18 +2246,13 @@
resets = <&ufs_mem_hc 0>;
reset-names = "ufsphy";
+ power-domains = <&gcc UFS_PHY_GDSC>;
+
#phy-cells = <0>;
status = "disabled";
};
- ipa_virt: interconnect@1e00000 {
- compatible = "qcom,sc8180x-ipa-virt";
- reg = <0 0x01e00000 0 0x1000>;
- #interconnect-cells = <2>;
- qcom,bcm-voters = <&apps_bcm_voter>;
- };
-
tcsr_mutex: hwlock@1f40000 {
compatible = "qcom,tcsr-mutex";
reg = <0x0 0x01f40000 0x0 0x40000>;
@@ -2511,28 +2507,25 @@
status = "disabled";
};
- usb_prim_qmpphy: phy@88e9000 {
+ usb_prim_qmpphy: phy@88e8000 {
compatible = "qcom,sc8180x-qmp-usb3-dp-phy";
- reg = <0 0x088e9000 0 0x18c>,
- <0 0x088e8000 0 0x38>,
- <0 0x088ea000 0 0x40>;
- reg-names = "reg-base", "dp_com";
+ reg = <0 0x088e8000 0 0x3000>;
+
clocks = <&gcc GCC_USB3_PRIM_PHY_AUX_CLK>,
- <&rpmhcc RPMH_CXO_CLK>,
<&gcc GCC_USB3_PRIM_CLKREF_CLK>,
- <&gcc GCC_USB3_PRIM_PHY_COM_AUX_CLK>;
+ <&gcc GCC_USB3_PRIM_PHY_COM_AUX_CLK>,
+ <&gcc GCC_USB3_PRIM_PHY_PIPE_CLK>;
clock-names = "aux",
- "ref_clk_src",
"ref",
- "com_aux";
+ "com_aux",
+ "usb3_pipe";
+
resets = <&gcc GCC_USB3_DP_PHY_PRIM_SP0_BCR>,
<&gcc GCC_USB3_PHY_PRIM_SP0_BCR>;
reset-names = "phy", "common";
#clock-cells = <1>;
- #address-cells = <2>;
- #size-cells = <2>;
- ranges;
+ #phy-cells = <1>;
status = "disabled";
@@ -2546,59 +2539,40 @@
usb_prim_qmpphy_out: endpoint {};
};
+ port@1 {
+ reg = <1>;
+
+ usb_prim_qmpphy_usb_ss_in: endpoint {
+ remote-endpoint = <&usb_prim_dwc3_ss>;
+ };
+ };
+
port@2 {
reg = <2>;
usb_prim_qmpphy_dp_in: endpoint {};
};
};
-
- usb_prim_ssphy: usb3-phy@88e9200 {
- reg = <0 0x088e9200 0 0x200>,
- <0 0x088e9400 0 0x200>,
- <0 0x088e9c00 0 0x218>,
- <0 0x088e9600 0 0x200>,
- <0 0x088e9800 0 0x200>,
- <0 0x088e9a00 0 0x100>;
- #phy-cells = <0>;
- clocks = <&gcc GCC_USB3_PRIM_PHY_PIPE_CLK>;
- clock-names = "pipe0";
- clock-output-names = "usb3_prim_phy_pipe_clk_src";
- };
-
- usb_prim_dpphy: dp-phy@88ea200 {
- reg = <0 0x088ea200 0 0x200>,
- <0 0x088ea400 0 0x200>,
- <0 0x088eaa00 0 0x200>,
- <0 0x088ea600 0 0x200>,
- <0 0x088ea800 0 0x200>;
- #clock-cells = <1>;
- #phy-cells = <0>;
- };
};
usb_sec_qmpphy: phy@88ee000 {
compatible = "qcom,sc8180x-qmp-usb3-dp-phy";
- reg = <0 0x088ee000 0 0x18c>,
- <0 0x088ed000 0 0x10>,
- <0 0x088ef000 0 0x40>;
- reg-names = "reg-base", "dp_com";
+ reg = <0 0x088ed000 0 0x3000>;
+
clocks = <&gcc GCC_USB3_SEC_PHY_AUX_CLK>,
- <&rpmhcc RPMH_CXO_CLK>,
<&gcc GCC_USB3_SEC_CLKREF_CLK>,
- <&gcc GCC_USB3_SEC_PHY_COM_AUX_CLK>;
+ <&gcc GCC_USB3_SEC_PHY_COM_AUX_CLK>,
+ <&gcc GCC_USB3_SEC_PHY_PIPE_CLK>;
clock-names = "aux",
- "ref_clk_src",
"ref",
- "com_aux";
+ "com_aux",
+ "usb3_pipe";
resets = <&gcc GCC_USB3_DP_PHY_SEC_BCR>,
<&gcc GCC_USB3_PHY_SEC_BCR>;
reset-names = "phy", "common";
#clock-cells = <1>;
- #address-cells = <2>;
- #size-cells = <2>;
- ranges;
+ #phy-cells = <1>;
status = "disabled";
@@ -2612,46 +2586,32 @@
usb_sec_qmpphy_out: endpoint {};
};
+ port@1 {
+ reg = <1>;
+
+ usb_sec_qmpphy_usb_ss_in: endpoint {
+ remote-endpoint = <&usb_sec_dwc3_ss>;
+ };
+ };
+
port@2 {
reg = <2>;
usb_sec_qmpphy_dp_in: endpoint {};
};
};
-
- usb_sec_ssphy: usb3-phy@88e9200 {
- reg = <0 0x088ee200 0 0x200>,
- <0 0x088ee400 0 0x200>,
- <0 0x088eec00 0 0x218>,
- <0 0x088ee600 0 0x200>,
- <0 0x088ee800 0 0x200>,
- <0 0x088eea00 0 0x100>;
- #phy-cells = <0>;
- clocks = <&gcc GCC_USB3_SEC_PHY_PIPE_CLK>;
- clock-names = "pipe0";
- clock-output-names = "usb3_sec_phy_pipe_clk_src";
- };
-
- usb_sec_dpphy: dp-phy@88ef200 {
- reg = <0 0x088ef200 0 0x200>,
- <0 0x088ef400 0 0x200>,
- <0 0x088efa00 0 0x200>,
- <0 0x088ef600 0 0x200>,
- <0 0x088ef800 0 0x200>;
- #clock-cells = <1>;
- #phy-cells = <0>;
- clock-output-names = "qmp_dptx1_phy_pll_link_clk",
- "qmp_dptx1_phy_pll_vco_div_clk";
- };
};
system-cache-controller@9200000 {
compatible = "qcom,sc8180x-llcc";
- reg = <0 0x09200000 0 0x50000>, <0 0x09280000 0 0x50000>,
- <0 0x09300000 0 0x50000>, <0 0x09380000 0 0x50000>,
- <0 0x09600000 0 0x50000>;
+ reg = <0 0x09200000 0 0x58000>, <0 0x09280000 0 0x58000>,
+ <0 0x09300000 0 0x58000>, <0 0x09380000 0 0x58000>,
+ <0 0x09400000 0 0x58000>, <0 0x09480000 0 0x58000>,
+ <0 0x09500000 0 0x58000>, <0 0x09580000 0 0x58000>,
+ <0 0x09600000 0 0x58000>;
reg-names = "llcc0_base", "llcc1_base", "llcc2_base",
- "llcc3_base", "llcc_broadcast_base";
+ "llcc3_base", "llcc4_base", "llcc5_base",
+ "llcc6_base", "llcc7_base", "llcc_broadcast_base";
interrupts = <GIC_SPI 582 IRQ_TYPE_LEVEL_HIGH>;
};
@@ -2711,11 +2671,26 @@
iommus = <&apps_smmu 0x140 0>;
snps,dis_u2_susphy_quirk;
snps,dis_enblslpm_quirk;
- phys = <&usb_prim_hsphy>, <&usb_prim_ssphy>;
+ phys = <&usb_prim_hsphy>, <&usb_prim_qmpphy QMP_USB43DP_USB3_PHY>;
phy-names = "usb2-phy", "usb3-phy";
- port {
- usb_prim_role_switch: endpoint {
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ usb_prim_dwc3_hs: endpoint {
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ usb_prim_dwc3_ss: endpoint {
+ remote-endpoint = <&usb_prim_qmpphy_usb_ss_in>;
+ };
};
};
};
@@ -2768,11 +2743,26 @@
iommus = <&apps_smmu 0x160 0>;
snps,dis_u2_susphy_quirk;
snps,dis_enblslpm_quirk;
- phys = <&usb_sec_hsphy>, <&usb_sec_ssphy>;
+ phys = <&usb_sec_hsphy>, <&usb_sec_qmpphy QMP_USB43DP_USB3_PHY>;
phy-names = "usb2-phy", "usb3-phy";
- port {
- usb_sec_role_switch: endpoint {
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ usb_sec_dwc3_hs: endpoint {
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ usb_sec_dwc3_ss: endpoint {
+ remote-endpoint = <&usb_sec_qmpphy_usb_ss_in>;
+ };
};
};
};
@@ -3086,9 +3076,10 @@
assigned-clocks = <&dispcc DISP_CC_MDSS_DP_LINK_CLK_SRC>,
<&dispcc DISP_CC_MDSS_DP_PIXEL_CLK_SRC>;
- assigned-clock-parents = <&usb_prim_dpphy 0>, <&usb_prim_dpphy 1>;
+ assigned-clock-parents = <&usb_prim_qmpphy QMP_USB43DP_DP_LINK_CLK>,
+ <&usb_prim_qmpphy QMP_USB43DP_DP_VCO_DIV_CLK>;
- phys = <&usb_prim_dpphy>;
+ phys = <&usb_prim_qmpphy QMP_USB43DP_DP_PHY>;
phy-names = "dp";
#sound-dai-cells = <0>;
@@ -3163,9 +3154,10 @@
assigned-clocks = <&dispcc DISP_CC_MDSS_DP_LINK1_CLK_SRC>,
<&dispcc DISP_CC_MDSS_DP_PIXEL2_CLK_SRC>;
- assigned-clock-parents = <&usb_sec_dpphy 0>, <&usb_sec_dpphy 1>;
+ assigned-clock-parents = <&usb_sec_qmpphy QMP_USB43DP_DP_LINK_CLK>,
+ <&usb_sec_qmpphy QMP_USB43DP_DP_VCO_DIV_CLK>;
- phys = <&usb_sec_dpphy>;
+ phys = <&usb_sec_qmpphy QMP_USB43DP_DP_PHY>;
phy-names = "dp";
#sound-dai-cells = <0>;
@@ -3308,21 +3300,27 @@
compatible = "qcom,sc8180x-dispcc";
reg = <0 0x0af00000 0 0x20000>;
clocks = <&rpmhcc RPMH_CXO_CLK>,
- <&sleep_clk>,
- <&usb_prim_dpphy 0>,
- <&usb_prim_dpphy 1>,
- <&usb_sec_dpphy 0>,
- <&usb_sec_dpphy 1>,
+ <&mdss_dsi0_phy 0>,
+ <&mdss_dsi0_phy 1>,
+ <&mdss_dsi1_phy 0>,
+ <&mdss_dsi1_phy 1>,
+ <&usb_prim_qmpphy QMP_USB43DP_DP_LINK_CLK>,
+ <&usb_prim_qmpphy QMP_USB43DP_DP_VCO_DIV_CLK>,
<&edp_phy 0>,
- <&edp_phy 1>;
+ <&edp_phy 1>,
+ <&usb_sec_qmpphy QMP_USB43DP_DP_LINK_CLK>,
+ <&usb_sec_qmpphy QMP_USB43DP_DP_VCO_DIV_CLK>;
clock-names = "bi_tcxo",
- "sleep_clk",
+ "dsi0_phy_pll_out_byteclk",
+ "dsi0_phy_pll_out_dsiclk",
+ "dsi1_phy_pll_out_byteclk",
+ "dsi1_phy_pll_out_dsiclk",
"dp_phy_pll_link_clk",
"dp_phy_pll_vco_div_clk",
- "dptx1_phy_pll_link_clk",
- "dptx1_phy_pll_vco_div_clk",
"edp_phy_pll_link_clk",
- "edp_phy_pll_vco_div_clk";
+ "edp_phy_pll_vco_div_clk",
+ "dptx1_phy_pll_link_clk",
+ "dptx1_phy_pll_vco_div_clk";
power-domains = <&rpmhpd SC8180X_MMCX>;
required-opps = <&rpmhpd_opp_low_svs>;
#clock-cells = <1>;
@@ -3368,7 +3366,6 @@
mboxes = <&apss_shared 0>;
#clock-cells = <0>;
- #power-domain-cells = <1>;
};
sram@c3f0000 {
@@ -3771,7 +3768,6 @@
thermal-zones {
cpu0-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 1>;
@@ -3786,7 +3782,6 @@
cpu1-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 2>;
@@ -3801,7 +3796,6 @@
cpu2-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 3>;
@@ -3816,7 +3810,6 @@
cpu3-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 4>;
@@ -3831,7 +3824,6 @@
cpu4-top-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 7>;
@@ -3846,7 +3838,6 @@
cpu5-top-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 8>;
@@ -3861,7 +3852,6 @@
cpu6-top-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 9>;
@@ -3876,7 +3866,6 @@
cpu7-top-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 10>;
@@ -3891,7 +3880,6 @@
cpu4-bottom-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 11>;
@@ -3906,7 +3894,6 @@
cpu5-bottom-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 12>;
@@ -3921,7 +3908,6 @@
cpu6-bottom-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 13>;
@@ -3936,7 +3922,6 @@
cpu7-bottom-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 14>;
@@ -3951,7 +3936,6 @@
aoss0-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 0>;
@@ -3966,7 +3950,6 @@
cluster0-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 5>;
@@ -3981,7 +3964,6 @@
cluster1-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 6>;
@@ -3996,7 +3978,6 @@
gpu-top-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 15>;
@@ -4009,16 +3990,27 @@
trips {
gpu_top_alert0: trip-point0 {
+ temperature = <85000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+
+ trip-point1 {
temperature = <90000>;
- hysteresis = <2000>;
+ hysteresis = <1000>;
type = "hot";
};
+
+ trip-point2 {
+ temperature = <110000>;
+ hysteresis = <1000>;
+ type = "critical";
+ };
};
};
aoss1-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens1 0>;
@@ -4033,7 +4025,6 @@
wlan-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens1 1>;
@@ -4048,7 +4039,6 @@
video-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens1 2>;
@@ -4063,7 +4053,6 @@
mem-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens1 3>;
@@ -4078,7 +4067,6 @@
q6-hvx-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens1 4>;
@@ -4093,7 +4081,6 @@
camera-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens1 5>;
@@ -4108,7 +4095,6 @@
compute-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens1 6>;
@@ -4123,7 +4109,6 @@
mdm-dsp-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens1 7>;
@@ -4138,7 +4123,6 @@
npu-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens1 8>;
@@ -4153,7 +4137,6 @@
gpu-bottom-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens1 11>;
@@ -4166,10 +4149,22 @@
trips {
gpu_bottom_alert0: trip-point0 {
+ temperature = <85000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+
+ trip-point1 {
temperature = <90000>;
- hysteresis = <2000>;
+ hysteresis = <1000>;
type = "hot";
};
+
+ trip-point2 {
+ temperature = <110000>;
+ hysteresis = <1000>;
+ type = "critical";
+ };
};
};
};
diff --git a/arch/arm64/boot/dts/qcom/sc8280xp-crd.dts b/arch/arm64/boot/dts/qcom/sc8280xp-crd.dts
index 41215567b3ae..b98b2f7752b5 100644
--- a/arch/arm64/boot/dts/qcom/sc8280xp-crd.dts
+++ b/arch/arm64/boot/dts/qcom/sc8280xp-crd.dts
@@ -56,7 +56,7 @@
reg = <0>;
pmic_glink_con0_hs: endpoint {
- remote-endpoint = <&usb_0_role_switch>;
+ remote-endpoint = <&usb_0_dwc3_hs>;
};
};
@@ -91,7 +91,7 @@
reg = <0>;
pmic_glink_con1_hs: endpoint {
- remote-endpoint = <&usb_1_role_switch>;
+ remote-endpoint = <&usb_1_dwc3_hs>;
};
};
@@ -675,6 +675,10 @@
dr_mode = "host";
};
+&usb_0_dwc3_hs {
+ remote-endpoint = <&pmic_glink_con0_hs>;
+};
+
&usb_0_hsphy {
vdda-pll-supply = <&vreg_l9d>;
vdda18-supply = <&vreg_l1c>;
@@ -700,10 +704,6 @@
remote-endpoint = <&pmic_glink_con0_ss>;
};
-&usb_0_role_switch {
- remote-endpoint = <&pmic_glink_con0_hs>;
-};
-
&usb_1 {
status = "okay";
};
@@ -712,6 +712,10 @@
dr_mode = "host";
};
+&usb_1_dwc3_hs {
+ remote-endpoint = <&pmic_glink_con1_hs>;
+};
+
&usb_1_hsphy {
vdda-pll-supply = <&vreg_l4b>;
vdda18-supply = <&vreg_l1c>;
@@ -737,10 +741,6 @@
remote-endpoint = <&pmic_glink_con1_ss>;
};
-&usb_1_role_switch {
- remote-endpoint = <&pmic_glink_con1_hs>;
-};
-
&xo_board_clk {
clock-frequency = <38400000>;
};
@@ -977,8 +977,7 @@
reset-n-pins {
pins = "gpio99";
function = "gpio";
- output-high;
- drive-strength = <16>;
+ bias-disable;
};
};
diff --git a/arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts b/arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts
index e937732abede..b27143f81867 100644
--- a/arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts
+++ b/arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts
@@ -117,7 +117,7 @@
reg = <0>;
pmic_glink_con0_hs: endpoint {
- remote-endpoint = <&usb_0_role_switch>;
+ remote-endpoint = <&usb_0_dwc3_hs>;
};
};
@@ -152,7 +152,7 @@
reg = <0>;
pmic_glink_con1_hs: endpoint {
- remote-endpoint = <&usb_1_role_switch>;
+ remote-endpoint = <&usb_1_dwc3_hs>;
};
};
@@ -297,9 +297,30 @@
};
thermal-zones {
+ pm8008-thermal {
+ polling-delay-passive = <100>;
+ polling-delay = <0>;
+
+ thermal-sensors = <&pm8008>;
+
+ trips {
+ trip0 {
+ temperature = <95000>;
+ hysteresis = <0>;
+ type = "passive";
+ };
+
+ trip1 {
+ temperature = <115000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+ };
+
skin-temp-thermal {
polling-delay-passive = <250>;
- polling-delay = <0>;
+
thermal-sensors = <&pmk8280_adc_tm 5>;
trips {
@@ -655,21 +676,101 @@
status = "okay";
- /* FIXME: verify */
touchscreen@10 {
- compatible = "hid-over-i2c";
+ compatible = "elan,ekth5015m", "elan,ekth6915";
reg = <0x10>;
- hid-descr-addr = <0x1>;
interrupts-extended = <&tlmm 175 IRQ_TYPE_LEVEL_LOW>;
- vdd-supply = <&vreg_misc_3p3>;
- vddl-supply = <&vreg_s10b>;
+ reset-gpios = <&tlmm 99 (GPIO_ACTIVE_LOW | GPIO_OPEN_DRAIN)>;
+ no-reset-on-power-off;
+
+ vcc33-supply = <&vreg_misc_3p3>;
+ vccio-supply = <&vreg_misc_3p3>;
pinctrl-names = "default";
pinctrl-0 = <&ts0_default>;
};
};
+&i2c11 {
+ clock-frequency = <400000>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c11_default>;
+
+ status = "okay";
+
+ pm8008: pmic@c {
+ compatible = "qcom,pm8008";
+ reg = <0xc>;
+
+ interrupts-extended = <&tlmm 41 IRQ_TYPE_EDGE_RISING>;
+ reset-gpios = <&tlmm 42 GPIO_ACTIVE_LOW>;
+
+ vdd-l1-l2-supply = <&vreg_s11b>;
+ vdd-l3-l4-supply = <&vreg_bob>;
+ vdd-l5-supply = <&vreg_bob>;
+ vdd-l6-supply = <&vreg_bob>;
+ vdd-l7-supply = <&vreg_bob>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pm8008_default>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&pm8008 0 0 2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ #thermal-sensor-cells = <0>;
+
+ regulators {
+ vreg_l1q: ldo1 {
+ regulator-name = "vreg_l1q";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ };
+
+ vreg_l2q: ldo2 {
+ regulator-name = "vreg_l2q";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ };
+
+ vreg_l3q: ldo3 {
+ regulator-name = "vreg_l3q";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ };
+
+ vreg_l4q: ldo4 {
+ regulator-name = "vreg_l4q";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ };
+
+ vreg_l5q: ldo5 {
+ regulator-name = "vreg_l5q";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ vreg_l6q: ldo6 {
+ regulator-name = "vreg_l6q";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ vreg_l7q: ldo7 {
+ regulator-name = "vreg_l7q";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ };
+ };
+ };
+};
+
&i2c21 {
clock-frequency = <400000>;
@@ -1131,6 +1232,10 @@
dr_mode = "host";
};
+&usb_0_dwc3_hs {
+ remote-endpoint = <&pmic_glink_con0_hs>;
+};
+
&usb_0_hsphy {
vdda-pll-supply = <&vreg_l9d>;
vdda18-supply = <&vreg_l1c>;
@@ -1156,10 +1261,6 @@
remote-endpoint = <&pmic_glink_con0_ss>;
};
-&usb_0_role_switch {
- remote-endpoint = <&pmic_glink_con0_hs>;
-};
-
&usb_1 {
status = "okay";
};
@@ -1168,6 +1269,10 @@
dr_mode = "host";
};
+&usb_1_dwc3_hs {
+ remote-endpoint = <&pmic_glink_con1_hs>;
+};
+
&usb_1_hsphy {
vdda-pll-supply = <&vreg_l4b>;
vdda18-supply = <&vreg_l1c>;
@@ -1193,10 +1298,6 @@
remote-endpoint = <&pmic_glink_con1_ss>;
};
-&usb_1_role_switch {
- remote-endpoint = <&pmic_glink_con1_hs>;
-};
-
&usb_2 {
status = "okay";
};
@@ -1355,6 +1456,13 @@
bias-disable;
};
+ i2c11_default: i2c11-default-state {
+ pins = "gpio18", "gpio19";
+ function = "qup11";
+ drive-strength = <16>;
+ bias-disable;
+ };
+
i2c21_default: i2c21-default-state {
pins = "gpio81", "gpio82";
function = "qup21";
@@ -1458,6 +1566,22 @@
};
};
+ pm8008_default: pm8008-default-state {
+ int-pins {
+ pins = "gpio41";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+
+ reset-n-pins {
+ pins = "gpio42";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
spkr_1_sd_n_default: spkr-1-sd-n-default-state {
perst-n-pins {
pins = "gpio178";
@@ -1496,8 +1620,8 @@
reset-n-pins {
pins = "gpio99";
function = "gpio";
- output-high;
- drive-strength = <16>;
+ drive-strength = <2>;
+ bias-disable;
};
};
diff --git a/arch/arm64/boot/dts/qcom/sc8280xp-pmics.dtsi b/arch/arm64/boot/dts/qcom/sc8280xp-pmics.dtsi
index 945de77911de..1e3babf2e40d 100644
--- a/arch/arm64/boot/dts/qcom/sc8280xp-pmics.dtsi
+++ b/arch/arm64/boot/dts/qcom/sc8280xp-pmics.dtsi
@@ -14,7 +14,7 @@
thermal-zones {
pm8280_1_thermal: pm8280-1-thermal {
polling-delay-passive = <100>;
- polling-delay = <0>;
+
thermal-sensors = <&pm8280_1_temp_alarm>;
trips {
@@ -34,7 +34,7 @@
pm8280_2_thermal: pm8280-2-thermal {
polling-delay-passive = <100>;
- polling-delay = <0>;
+
thermal-sensors = <&pm8280_2_temp_alarm>;
trips {
diff --git a/arch/arm64/boot/dts/qcom/sc8280xp.dtsi b/arch/arm64/boot/dts/qcom/sc8280xp.dtsi
index 0549ba1fbeea..80a57aa22839 100644
--- a/arch/arm64/boot/dts/qcom/sc8280xp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sc8280xp.dtsi
@@ -3222,6 +3222,14 @@
usb_0_qmpphy_out: endpoint {};
};
+ port@1 {
+ reg = <1>;
+
+ usb_0_qmpphy_usb_ss_in: endpoint {
+ remote-endpoint = <&usb_0_dwc3_ss>;
+ };
+ };
+
port@2 {
reg = <2>;
@@ -3275,6 +3283,14 @@
usb_1_qmpphy_out: endpoint {};
};
+ port@1 {
+ reg = <1>;
+
+ usb_1_qmpphy_usb_ss_in: endpoint {
+ remote-endpoint = <&usb_1_dwc3_ss>;
+ };
+ };
+
port@2 {
reg = <2>;
@@ -3560,8 +3576,23 @@
phys = <&usb_0_hsphy>, <&usb_0_qmpphy QMP_USB43DP_USB3_PHY>;
phy-names = "usb2-phy", "usb3-phy";
- port {
- usb_0_role_switch: endpoint {
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ usb_0_dwc3_hs: endpoint {
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ usb_0_dwc3_ss: endpoint {
+ remote-endpoint = <&usb_0_qmpphy_usb_ss_in>;
+ };
};
};
};
@@ -3622,8 +3653,23 @@
phys = <&usb_1_hsphy>, <&usb_1_qmpphy QMP_USB43DP_USB3_PHY>;
phy-names = "usb2-phy", "usb3-phy";
- port {
- usb_1_role_switch: endpoint {
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ usb_1_dwc3_hs: endpoint {
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ usb_1_dwc3_ss: endpoint {
+ remote-endpoint = <&usb_1_qmpphy_usb_ss_in>;
+ };
};
};
};
@@ -4623,6 +4669,8 @@
restart@c264000 {
compatible = "qcom,pshold";
reg = <0 0x0c264000 0 0x4>;
+ /* TZ seems to block access */
+ status = "reserved";
};
tsens1: thermal-sensor@c265000 {
@@ -5831,7 +5879,6 @@
thermal-zones {
cpu0-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 1>;
@@ -5846,7 +5893,6 @@
cpu1-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 2>;
@@ -5861,7 +5907,6 @@
cpu2-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 3>;
@@ -5876,7 +5921,6 @@
cpu3-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 4>;
@@ -5891,7 +5935,6 @@
cpu4-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 5>;
@@ -5906,7 +5949,6 @@
cpu5-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 6>;
@@ -5921,7 +5963,6 @@
cpu6-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 7>;
@@ -5936,7 +5977,6 @@
cpu7-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 8>;
@@ -5951,7 +5991,6 @@
cluster0-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 9>;
@@ -5965,13 +6004,25 @@
};
gpu-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
+ polling-delay-passive = <250>;
thermal-sensors = <&tsens2 2>;
+ cooling-maps {
+ map0 {
+ trip = <&gpu_alert0>;
+ cooling-device = <&gpu THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+
trips {
- gpu-crit {
+ gpu_alert0: trip-point0 {
+ temperature = <85000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+
+ trip-point1 {
temperature = <110000>;
hysteresis = <1000>;
type = "critical";
@@ -5981,7 +6032,6 @@
mem-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens1 15>;
diff --git a/arch/arm64/boot/dts/qcom/sda660-inforce-ifc6560.dts b/arch/arm64/boot/dts/qcom/sda660-inforce-ifc6560.dts
index 702ab49bbc59..60412281ab27 100644
--- a/arch/arm64/boot/dts/qcom/sda660-inforce-ifc6560.dts
+++ b/arch/arm64/boot/dts/qcom/sda660-inforce-ifc6560.dts
@@ -96,6 +96,18 @@
vin-supply = <&vph_pwr>;
};
+
+ /*
+ * this is also used for APC1 CPU power, touching it resets the board
+ */
+ vreg_l10a_1p8: vreg-l10a-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vreg_l10a_1p8";
+ regulator-min-microvolt = <1804000>;
+ regulator-max-microvolt = <1896000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
};
&adsp_pil {
@@ -220,6 +232,7 @@
status = "okay";
vdd-supply = <&vreg_l1b_0p925>;
+ vdda-pll-supply = <&vreg_l10a_1p8>;
vdda-phy-dpdm-supply = <&vreg_l7b_3p125>;
};
@@ -227,6 +240,7 @@
status = "okay";
vdd-supply = <&vreg_l1b_0p925>;
+ vdda-pll-supply = <&vreg_l10a_1p8>;
vdda-phy-dpdm-supply = <&vreg_l7b_3p125>;
};
@@ -464,5 +478,6 @@
&usb3_qmpphy {
vdda-phy-supply = <&vreg_l1b_0p925>;
+ vdda-pll-supply = <&vreg_l10a_1p8>;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/qcom/sdm450-lenovo-tbx605f.dts b/arch/arm64/boot/dts/qcom/sdm450-lenovo-tbx605f.dts
new file mode 100644
index 000000000000..175befc02b22
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm450-lenovo-tbx605f.dts
@@ -0,0 +1,276 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2024, Neil Armstrong <neil.armstrong@linaro.org>
+ */
+/dts-v1/;
+
+#include "sdm450.dtsi"
+#include "pm8953.dtsi"
+#include "pmi8950.dtsi"
+
+/ {
+ model = "Lenovo Smart Tab M10";
+ compatible = "lenovo,tbx605f", "qcom,sdm450";
+ chassis-type = "tablet";
+
+ chosen {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ framebuffer@90001000 {
+ compatible = "simple-framebuffer";
+ reg = <0 0x90001000 0 (1200 * 1920 * 3)>;
+
+ width = <1200>;
+ height = <1920>;
+ stride = <(1200 * 3)>;
+ format = "r8g8b8";
+
+ power-domains = <&gcc MDSS_GDSC>;
+
+ clocks = <&gcc GCC_MDSS_AHB_CLK>,
+ <&gcc GCC_MDSS_AXI_CLK>,
+ <&gcc GCC_MDSS_VSYNC_CLK>,
+ <&gcc GCC_MDSS_MDP_CLK>,
+ <&gcc GCC_MDSS_BYTE0_CLK>,
+ <&gcc GCC_MDSS_PCLK0_CLK>,
+ <&gcc GCC_MDSS_ESC0_CLK>;
+ };
+ };
+
+ reserved-memory {
+ other_ext_region@0 {
+ no-map;
+ reg = <0x00 0x84500000 0x00 0x2300000>;
+ };
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+ key-volume-up {
+ label = "volume_up";
+ gpios = <&tlmm 85 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_VOLUMEUP>;
+ };
+ };
+
+ vph_pwr: vph-pwr-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vph_pwr";
+ regulator-always-on;
+ regulator-boot-on;
+ };
+};
+
+&hsusb_phy {
+ vdd-supply = <&pm8953_l3>;
+ vdda-pll-supply = <&pm8953_l7>;
+ vdda-phy-dpdm-supply = <&pm8953_l13>;
+
+ status = "okay";
+};
+
+&i2c_3 {
+ status = "okay";
+
+ touchscreen@38 {
+ compatible = "edt,edt-ft5506";
+ reg = <0x38>;
+ interrupt-parent = <&tlmm>;
+ interrupts = <65 IRQ_TYPE_EDGE_FALLING>;
+ vcc-supply = <&pm8953_l10>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&ts_int_active &ts_reset_active>;
+
+ reset-gpios = <&tlmm 64 GPIO_ACTIVE_LOW>;
+ touchscreen-size-x = <1200>;
+ touchscreen-size-y = <1920>;
+ };
+};
+
+&pm8953_resin {
+ linux,code = <KEY_VOLUMEDOWN>;
+ status = "okay";
+};
+
+&rpm_requests {
+ regulators {
+ compatible = "qcom,rpm-pm8953-regulators";
+
+ vdd_s1-supply = <&vph_pwr>;
+ vdd_s2-supply = <&vph_pwr>;
+ vdd_s3-supply = <&vph_pwr>;
+ vdd_s4-supply = <&vph_pwr>;
+ vdd_s5-supply = <&vph_pwr>;
+ vdd_s6-supply = <&vph_pwr>;
+ vdd_s7-supply = <&vph_pwr>;
+ vdd_l1-supply = <&pm8953_s3>;
+ vdd_l2_l3-supply = <&pm8953_s3>;
+ vdd_l4_l5_l6_l7_l16_l19-supply = <&pm8953_s4>;
+ vdd_l8_l11_l12_l13_l14_l15-supply = <&vph_pwr>;
+ vdd_l9_l10_l17_l18_l22-supply = <&vph_pwr>;
+
+ pm8953_s1: s1 {
+ regulator-min-microvolt = <870000>;
+ regulator-max-microvolt = <1156000>;
+ };
+
+ pm8953_s3: s3 {
+ regulator-min-microvolt = <1224000>;
+ regulator-max-microvolt = <1224000>;
+ };
+
+ pm8953_s4: s4 {
+ regulator-min-microvolt = <1900000>;
+ regulator-max-microvolt = <2050000>;
+ };
+
+ pm8953_l1: l1 {
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1100000>;
+ };
+
+ pm8953_l2: l2 {
+ regulator-min-microvolt = <975000>;
+ regulator-max-microvolt = <1225000>;
+ };
+
+ pm8953_l3: l3 {
+ regulator-min-microvolt = <925000>;
+ regulator-max-microvolt = <925000>;
+ };
+
+ pm8953_l5: l5 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ pm8953_l6: l6 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ pm8953_l7: l7 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1900000>;
+ };
+
+ pm8953_l8: l8 {
+ regulator-min-microvolt = <2900000>;
+ regulator-max-microvolt = <2900000>;
+ };
+
+ pm8953_l9: l9 {
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ pm8953_l10: l10 {
+ regulator-min-microvolt = <2850000>;
+ regulator-max-microvolt = <2850000>;
+ };
+
+ pm8953_l11: l11 {
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ pm8953_l12: l12 {
+ regulator-min-microvolt = <2950000>;
+ regulator-max-microvolt = <2950000>;
+ };
+
+ pm8953_l13: l13 {
+ regulator-min-microvolt = <3125000>;
+ regulator-max-microvolt = <3125000>;
+ };
+
+ pm8953_l16: l16 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ pm8953_l17: l17 {
+ regulator-min-microvolt = <2850000>;
+ regulator-max-microvolt = <2850000>;
+ };
+
+ pm8953_l19: l19 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1350000>;
+ };
+
+ pm8953_l22: l22 {
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2850000>;
+ };
+
+ pm8953_l23: l23 {
+ regulator-min-microvolt = <975000>;
+ regulator-max-microvolt = <1225000>;
+ };
+ };
+};
+
+&sdhc_1 {
+ vmmc-supply = <&pm8953_l8>;
+ vqmmc-supply = <&pm8953_l5>;
+
+ status = "okay";
+};
+
+&sdhc_2 {
+ vmmc-supply = <&pm8953_l11>;
+ vqmmc-supply = <&pm8953_l12>;
+
+ cd-gpios = <&tlmm 133 GPIO_ACTIVE_LOW>;
+
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&sdc2_clk_on &sdc2_cmd_on &sdc2_data_on &sdc2_cd_off>;
+ pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off &sdc2_cd_off>;
+
+ status = "okay";
+};
+
+&tlmm {
+ gpio-reserved-ranges = <0 4>, <135 4>;
+
+ ts_int_active: ts-int-active-state {
+ pins = "gpio65";
+ function = "gpio";
+ drive-strength = <8>;
+ bias-pull-up;
+ };
+
+ ts_reset_active: ts-reset-active-state {
+ pins = "gpio64";
+ function = "gpio";
+ drive-strength = <0x08>;
+ bias-pull-up;
+ };
+};
+
+&usb3 {
+ status = "okay";
+};
+
+&usb3_dwc3 {
+ dr_mode = "peripheral";
+};
+
+&wcnss {
+ vddpx-supply = <&pm8953_l5>;
+
+ status = "okay";
+};
+
+&wcnss_iris {
+ compatible = "qcom,wcn3660b";
+
+ vddxo-supply = <&pm8953_l7>;
+ vddrfa-supply = <&pm8953_l19>;
+ vddpa-supply = <&pm8953_l9>;
+ vdddig-supply = <&pm8953_l5>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm450-motorola-ali.dts b/arch/arm64/boot/dts/qcom/sdm450-motorola-ali.dts
index e27f3c5d5bba..a288d52fb6d7 100644
--- a/arch/arm64/boot/dts/qcom/sdm450-motorola-ali.dts
+++ b/arch/arm64/boot/dts/qcom/sdm450-motorola-ali.dts
@@ -248,5 +248,6 @@
};
&usb3_dwc3 {
+ /delete-property/ usb-role-switch;
dr_mode = "peripheral";
};
diff --git a/arch/arm64/boot/dts/qcom/sdm630.dtsi b/arch/arm64/boot/dts/qcom/sdm630.dtsi
index f5921b80ef94..c7e3764a8cf3 100644
--- a/arch/arm64/boot/dts/qcom/sdm630.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm630.dtsi
@@ -1302,6 +1302,7 @@
interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>;
snps,dis_u2_susphy_quirk;
snps,dis_enblslpm_quirk;
+ snps,parkmode-disable-ss-quirk;
phys = <&qusb2phy0>, <&usb3_qmpphy>;
phy-names = "usb2-phy", "usb3-phy";
@@ -2422,7 +2423,6 @@
thermal-zones {
aoss-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens 0>;
@@ -2437,7 +2437,6 @@
cpuss0-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens 1>;
@@ -2452,7 +2451,6 @@
cpuss1-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens 2>;
@@ -2467,7 +2465,6 @@
cpu0-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens 3>;
@@ -2488,7 +2485,6 @@
cpu1-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens 4>;
@@ -2509,7 +2505,6 @@
cpu2-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens 5>;
@@ -2530,7 +2525,6 @@
cpu3-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens 6>;
@@ -2557,7 +2551,6 @@
pwr-cluster-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens 7>;
@@ -2578,7 +2571,6 @@
gpu-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens 8>;
@@ -2591,20 +2583,32 @@
trips {
gpu_alert0: trip-point0 {
+ temperature = <85000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+
+ trip-point1 {
temperature = <90000>;
hysteresis = <1000>;
type = "hot";
};
+
+ trip-point2 {
+ temperature = <110000>;
+ hysteresis = <1000>;
+ type = "critical";
+ };
};
};
};
timer {
compatible = "arm,armv8-timer";
- interrupts = <GIC_PPI 1 0xf08>,
- <GIC_PPI 2 0xf08>,
- <GIC_PPI 3 0xf08>,
- <GIC_PPI 0 0xf08>;
+ interrupts = <GIC_PPI 1 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 2 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 3 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 0 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
};
};
diff --git a/arch/arm64/boot/dts/qcom/sdm632-fairphone-fp3.dts b/arch/arm64/boot/dts/qcom/sdm632-fairphone-fp3.dts
index e2708c74e95a..2c1172aa97e4 100644
--- a/arch/arm64/boot/dts/qcom/sdm632-fairphone-fp3.dts
+++ b/arch/arm64/boot/dts/qcom/sdm632-fairphone-fp3.dts
@@ -143,6 +143,10 @@
status = "okay";
};
+&pmi632_vib {
+ status = "okay";
+};
+
&sdhc_1 {
status = "okay";
vmmc-supply = <&pm8953_l8>;
diff --git a/arch/arm64/boot/dts/qcom/sdm632-motorola-ocean.dts b/arch/arm64/boot/dts/qcom/sdm632-motorola-ocean.dts
index c82d6e628d2c..2f55db0c8ce3 100644
--- a/arch/arm64/boot/dts/qcom/sdm632-motorola-ocean.dts
+++ b/arch/arm64/boot/dts/qcom/sdm632-motorola-ocean.dts
@@ -287,5 +287,6 @@
};
&usb3_dwc3 {
+ /delete-property/ usb-role-switch;
dr_mode = "peripheral";
};
diff --git a/arch/arm64/boot/dts/qcom/sdm670.dtsi b/arch/arm64/boot/dts/qcom/sdm670.dtsi
index 80e81c4233b3..187c6698835d 100644
--- a/arch/arm64/boot/dts/qcom/sdm670.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670.dtsi
@@ -509,6 +509,18 @@
no-map;
};
+ smem@86000000 {
+ compatible = "qcom,smem";
+ reg = <0 0x86000000 0 0x200000>;
+ no-map;
+ hwlocks = <&tcsr_mutex 3>;
+ };
+
+ tz_mem: tz@86200000 {
+ reg = <0 0x86200000 0 0x2d00000>;
+ no-map;
+ };
+
camera_mem: camera-mem@8ab00000 {
reg = <0 0x8ab00000 0 0x500000>;
no-map;
@@ -1139,6 +1151,12 @@
qcom,bcm-voters = <&apps_bcm_voter>;
};
+ tcsr_mutex: hwlock@1f40000 {
+ compatible = "qcom,tcsr-mutex";
+ reg = <0 0x01f40000 0 0x20000>;
+ #hwlock-cells = <1>;
+ };
+
tlmm: pinctrl@3400000 {
compatible = "qcom,sdm670-tlmm";
reg = <0 0x03400000 0 0xc00000>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-mtp.dts b/arch/arm64/boot/dts/qcom/sdm845-mtp.dts
index 76bfa786612c..2391f842c903 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-mtp.dts
@@ -51,9 +51,6 @@
thermal-zones {
xo_thermal: xo-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
-
thermal-sensors = <&pm8998_adc_tm 1>;
trips {
@@ -66,9 +63,6 @@
};
msm_thermal: msm-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
-
thermal-sensors = <&pm8998_adc_tm 2>;
trips {
@@ -81,9 +75,6 @@
};
pa_thermal: pa-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
-
thermal-sensors = <&pm8998_adc_tm 3>;
trips {
@@ -96,9 +87,6 @@
};
quiet_thermal: quiet-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
-
thermal-sensors = <&pm8998_adc_tm 4>;
trips {
diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
index 10de2bd46ffc..54077549b9da 100644
--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
@@ -15,6 +15,7 @@
#include <dt-bindings/dma/qcom-gpi.h>
#include <dt-bindings/firmware/qcom,scm.h>
#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/interconnect/qcom,icc.h>
#include <dt-bindings/interconnect/qcom,osm-l3.h>
#include <dt-bindings/interconnect/qcom,sdm845.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
@@ -2666,6 +2667,8 @@
"ref_aux",
"qref";
+ power-domains = <&gcc UFS_PHY_GDSC>;
+
resets = <&ufs_mem_hc 0>;
reset-names = "ufsphy";
@@ -4028,6 +4031,35 @@
#clock-cells = <1>;
#phy-cells = <1>;
+ orientation-switch;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ usb_1_qmpphy_out: endpoint {
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ usb_1_qmpphy_usb_ss_in: endpoint {
+ remote-endpoint = <&usb_1_dwc3_ss>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+
+ usb_1_qmpphy_dp_in: endpoint {
+ remote-endpoint = <&dp_out>;
+ };
+ };
+ };
};
usb_2_qmpphy: phy@88eb000 {
@@ -4106,8 +4138,29 @@
iommus = <&apps_smmu 0x740 0>;
snps,dis_u2_susphy_quirk;
snps,dis_enblslpm_quirk;
+ snps,parkmode-disable-ss-quirk;
phys = <&usb_1_hsphy>, <&usb_1_qmpphy QMP_USB43DP_USB3_PHY>;
phy-names = "usb2-phy", "usb3-phy";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ usb_1_dwc3_hs: endpoint {
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ usb_1_dwc3_ss: endpoint {
+ remote-endpoint = <&usb_1_qmpphy_usb_ss_in>;
+ };
+ };
+ };
};
};
@@ -4161,6 +4214,7 @@
iommus = <&apps_smmu 0x760 0>;
snps,dis_u2_susphy_quirk;
snps,dis_enblslpm_quirk;
+ snps,parkmode-disable-ss-quirk;
phys = <&usb_2_hsphy>, <&usb_2_qmpphy>;
phy-names = "usb2-phy", "usb3-phy";
};
@@ -4598,7 +4652,9 @@
port@1 {
reg = <1>;
- dp_out: endpoint { };
+ dp_out: endpoint {
+ remote-endpoint = <&usb_1_qmpphy_dp_in>;
+ };
};
};
@@ -5105,6 +5161,78 @@
<GIC_SPI 343 IRQ_TYPE_LEVEL_HIGH>;
};
+ anoc_1_tbu: tbu@150c5000 {
+ compatible = "qcom,sdm845-tbu";
+ reg = <0x0 0x150c5000 0x0 0x1000>;
+ interconnects = <&system_noc MASTER_GNOC_SNOC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_IMEM_CFG QCOM_ICC_TAG_ACTIVE_ONLY>;
+ power-domains = <&gcc HLOS1_VOTE_AGGRE_NOC_MMU_TBU1_GDSC>;
+ qcom,stream-id-range = <&apps_smmu 0x0 0x400>;
+ };
+
+ anoc_2_tbu: tbu@150c9000 {
+ compatible = "qcom,sdm845-tbu";
+ reg = <0x0 0x150c9000 0x0 0x1000>;
+ interconnects = <&system_noc MASTER_GNOC_SNOC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_IMEM_CFG QCOM_ICC_TAG_ACTIVE_ONLY>;
+ power-domains = <&gcc HLOS1_VOTE_AGGRE_NOC_MMU_TBU2_GDSC>;
+ qcom,stream-id-range = <&apps_smmu 0x400 0x400>;
+ };
+
+ mnoc_hf_0_tbu: tbu@150cd000 {
+ compatible = "qcom,sdm845-tbu";
+ reg = <0x0 0x150cd000 0x0 0x1000>;
+ interconnects = <&mmss_noc MASTER_MDP0 QCOM_ICC_TAG_ACTIVE_ONLY
+ &mmss_noc SLAVE_MNOC_HF_MEM_NOC QCOM_ICC_TAG_ACTIVE_ONLY>;
+ power-domains = <&gcc HLOS1_VOTE_MMNOC_MMU_TBU_HF0_GDSC>;
+ qcom,stream-id-range = <&apps_smmu 0x800 0x400>;
+ };
+
+ mnoc_hf_1_tbu: tbu@150d1000 {
+ compatible = "qcom,sdm845-tbu";
+ reg = <0x0 0x150d1000 0x0 0x1000>;
+ interconnects = <&mmss_noc MASTER_MDP0 QCOM_ICC_TAG_ACTIVE_ONLY
+ &mmss_noc SLAVE_MNOC_HF_MEM_NOC QCOM_ICC_TAG_ACTIVE_ONLY>;
+ power-domains = <&gcc HLOS1_VOTE_MMNOC_MMU_TBU_HF1_GDSC>;
+ qcom,stream-id-range = <&apps_smmu 0xc00 0x400>;
+ };
+
+ mnoc_sf_0_tbu: tbu@150d5000 {
+ compatible = "qcom,sdm845-tbu";
+ reg = <0x0 0x150d5000 0x0 0x1000>;
+ interconnects = <&mmss_noc MASTER_CAMNOC_SF QCOM_ICC_TAG_ACTIVE_ONLY
+ &mmss_noc SLAVE_MNOC_SF_MEM_NOC QCOM_ICC_TAG_ACTIVE_ONLY>;
+ power-domains = <&gcc HLOS1_VOTE_MMNOC_MMU_TBU_SF_GDSC>;
+ qcom,stream-id-range = <&apps_smmu 0x1000 0x400>;
+ };
+
+ compute_dsp_tbu: tbu@150d9000 {
+ compatible = "qcom,sdm845-tbu";
+ reg = <0x0 0x150d9000 0x0 0x1000>;
+ interconnects = <&system_noc MASTER_GNOC_SNOC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_IMEM_CFG QCOM_ICC_TAG_ACTIVE_ONLY>;
+ qcom,stream-id-range = <&apps_smmu 0x1400 0x400>;
+ };
+
+ adsp_tbu: tbu@150dd000 {
+ compatible = "qcom,sdm845-tbu";
+ reg = <0x0 0x150dd000 0x0 0x1000>;
+ interconnects = <&system_noc MASTER_GNOC_SNOC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_IMEM_CFG QCOM_ICC_TAG_ACTIVE_ONLY>;
+ power-domains = <&gcc HLOS1_VOTE_AGGRE_NOC_MMU_AUDIO_TBU_GDSC>;
+ qcom,stream-id-range = <&apps_smmu 0x1800 0x400>;
+ };
+
+ anoc_1_pcie_tbu: tbu@150e1000 {
+ compatible = "qcom,sdm845-tbu";
+ reg = <0x0 0x150e1000 0x0 0x1000>;
+ clocks = <&gcc GCC_AGGRE_NOC_PCIE_TBU_CLK>;
+ interconnects = <&system_noc MASTER_GNOC_SNOC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_IMEM_CFG QCOM_ICC_TAG_ACTIVE_ONLY>;
+ power-domains = <&gcc HLOS1_VOTE_AGGRE_NOC_MMU_PCIE_TBU_GDSC>;
+ qcom,stream-id-range = <&apps_smmu 0x1c00 0x400>;
+ };
+
lpasscc: clock-controller@17014000 {
compatible = "qcom,sdm845-lpasscc";
reg = <0 0x17014000 0 0x1f004>, <0 0x17300000 0 0x200>;
@@ -5358,7 +5486,6 @@
thermal-zones {
cpu0-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 1>;
@@ -5385,7 +5512,6 @@
cpu1-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 2>;
@@ -5412,7 +5538,6 @@
cpu2-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 3>;
@@ -5439,7 +5564,6 @@
cpu3-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 4>;
@@ -5466,7 +5590,6 @@
cpu4-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 7>;
@@ -5493,7 +5616,6 @@
cpu5-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 8>;
@@ -5520,7 +5642,6 @@
cpu6-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 9>;
@@ -5547,7 +5668,6 @@
cpu7-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 10>;
@@ -5574,7 +5694,6 @@
aoss0-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 0>;
@@ -5589,7 +5708,6 @@
cluster0-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 5>;
@@ -5609,7 +5727,6 @@
cluster1-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 6>;
@@ -5629,7 +5746,6 @@
gpu-top-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 11>;
@@ -5642,16 +5758,27 @@
trips {
gpu_top_alert0: trip-point0 {
+ temperature = <85000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+
+ trip-point1 {
temperature = <90000>;
- hysteresis = <2000>;
+ hysteresis = <1000>;
type = "hot";
};
+
+ trip-point2 {
+ temperature = <110000>;
+ hysteresis = <1000>;
+ type = "critical";
+ };
};
};
gpu-bottom-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 12>;
@@ -5664,16 +5791,27 @@
trips {
gpu_bottom_alert0: trip-point0 {
+ temperature = <85000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+
+ trip-point1 {
temperature = <90000>;
- hysteresis = <2000>;
+ hysteresis = <1000>;
type = "hot";
};
+
+ trip-point2 {
+ temperature = <110000>;
+ hysteresis = <1000>;
+ type = "critical";
+ };
};
};
aoss1-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens1 0>;
@@ -5688,7 +5826,6 @@
q6-modem-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens1 1>;
@@ -5703,7 +5840,6 @@
mem-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens1 2>;
@@ -5718,7 +5854,6 @@
wlan-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens1 3>;
@@ -5733,7 +5868,6 @@
q6-hvx-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens1 4>;
@@ -5748,7 +5882,6 @@
camera-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens1 5>;
@@ -5763,7 +5896,6 @@
video-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens1 6>;
@@ -5778,7 +5910,6 @@
modem-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens1 7>;
diff --git a/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts b/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts
index 47dc42f6e936..f18050848cd8 100644
--- a/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts
+++ b/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts
@@ -370,6 +370,66 @@
&i2c1 {
status = "okay";
clock-frequency = <400000>;
+
+ embedded-controller@70 {
+ compatible = "lenovo,yoga-c630-ec";
+ reg = <0x70>;
+
+ interrupts-extended = <&tlmm 20 IRQ_TYPE_LEVEL_HIGH>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&ec_int_state>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ connector@0 {
+ compatible = "usb-c-connector";
+ reg = <0>;
+ power-role = "dual";
+ data-role = "host";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ ucsi0_hs_in: endpoint {
+ remote-endpoint = <&usb_1_dwc3_hs>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ ucsi0_ss_in: endpoint {
+ remote-endpoint = <&usb_1_qmpphy_out>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+
+ ucsi0_sbu: endpoint {
+ };
+ };
+ };
+ };
+
+ connector@1 {
+ compatible = "usb-c-connector";
+ reg = <1>;
+ power-role = "dual";
+ data-role = "host";
+
+ /*
+ * connected to the onboard USB hub, orientation is
+ * handled by the controller
+ */
+ };
+ };
};
&i2c3 {
@@ -494,6 +554,7 @@
&ipa {
qcom,gsi-loader = "self";
memory-region = <&ipa_fw_mem>;
+ firmware-name = "qcom/sdm850/LENOVO/81JL/ipa_fws.elf";
status = "okay";
};
@@ -694,6 +755,13 @@
bias-disable;
};
+
+ ec_int_state: ec-int-state {
+ pins = "gpio20";
+ function = "gpio";
+
+ bias-disable;
+ };
};
&uart6 {
@@ -741,6 +809,10 @@
dr_mode = "host";
};
+&usb_1_dwc3_hs {
+ remote-endpoint = <&ucsi0_hs_in>;
+};
+
&usb_1_hsphy {
status = "okay";
@@ -761,6 +833,10 @@
vdda-pll-supply = <&vdda_usb1_ss_core>;
};
+&usb_1_qmpphy_out {
+ remote-endpoint = <&ucsi0_ss_in>;
+};
+
&usb_2 {
status = "okay";
};
@@ -834,6 +910,7 @@
vdd-3.3-ch1-supply = <&vreg_l23a_3p3>;
qcom,snoc-host-cap-8bit-quirk;
+ qcom,ath10k-calibration-variant = "Lenovo_C630";
};
&crypto {
diff --git a/arch/arm64/boot/dts/qcom/sdx75-idp.dts b/arch/arm64/boot/dts/qcom/sdx75-idp.dts
index f76e72fb2072..fde16308c7e2 100644
--- a/arch/arm64/boot/dts/qcom/sdx75-idp.dts
+++ b/arch/arm64/boot/dts/qcom/sdx75-idp.dts
@@ -41,6 +41,29 @@
vin-supply = <&vph_ext>;
};
+
+ reg_2v952_vcc: regulator-2v952-vcc {
+ compatible = "regulator-gpio";
+ regulator-name = "2v952_vcc";
+ regulator-min-microvolt = <1650000>;
+ regulator-max-microvolt = <3600000>;
+ enable-gpios = <&tlmm 102 GPIO_ACTIVE_HIGH>;
+ gpios = <&tlmm 84 GPIO_ACTIVE_HIGH>;
+ states = <1650000 0>, <3600000 1>;
+ startup-delay-us = <5000>;
+ enable-active-high;
+ regulator-boot-on;
+
+ vin-supply = <&vph_ext>;
+ };
+
+ reg_2v95_vdd: regulator-2v95-vdd {
+ compatible = "regulator-fixed";
+ regulator-name = "2v95_vdd";
+ regulator-min-microvolt = <2950000>;
+ regulator-max-microvolt = <2950000>;
+ vin-supply = <&reg_2v952_vcc>;
+ };
};
&apps_rsc {
@@ -259,8 +282,30 @@
status = "okay";
};
+&sdhc {
+ cd-gpios = <&tlmm 103 GPIO_ACTIVE_LOW>;
+ vmmc-supply = <&reg_2v95_vdd>;
+ vqmmc-supply = <&reg_2v952_vcc>;
+ bus-width = <4>;
+ no-sdio;
+ no-mmc;
+
+ pinctrl-0 = <&sdc1_default &sd_cd>;
+ pinctrl-1 = <&sdc1_sleep &sd_cd>;
+ pinctrl-names = "default", "sleep";
+
+ status = "okay";
+};
+
&tlmm {
gpio-reserved-ranges = <110 6>;
+
+ sd_cd: sd-cd-state {
+ pins = "gpio103";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
};
&uart1 {
diff --git a/arch/arm64/boot/dts/qcom/sdx75.dtsi b/arch/arm64/boot/dts/qcom/sdx75.dtsi
index da1704061d58..9b93f6501d55 100644
--- a/arch/arm64/boot/dts/qcom/sdx75.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdx75.dtsi
@@ -8,9 +8,12 @@
#include <dt-bindings/clock/qcom,rpmh.h>
#include <dt-bindings/clock/qcom,sdx75-gcc.h>
+#include <dt-bindings/dma/qcom-gpi.h>
+#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/interconnect/qcom,icc.h>
#include <dt-bindings/interconnect/qcom,sdx75.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/mailbox/qcom-ipcc.h>
#include <dt-bindings/power/qcom,rpmhpd.h>
#include <dt-bindings/power/qcom-rpmpd.h>
#include <dt-bindings/soc/qcom,rpmh-rsc.h>
@@ -405,7 +408,42 @@
};
};
- smem: qcom,smem {
+ smp2p-modem {
+ compatible = "qcom,smp2p";
+ qcom,smem = <435>, <428>;
+ interrupts-extended = <&ipcc IPCC_CLIENT_MPSS
+ IPCC_MPROC_SIGNAL_SMP2P
+ IRQ_TYPE_EDGE_RISING>;
+ mboxes = <&ipcc IPCC_CLIENT_MPSS
+ IPCC_MPROC_SIGNAL_SMP2P>;
+
+ qcom,local-pid = <0>;
+ qcom,remote-pid = <1>;
+
+ smp2p_modem_out: master-kernel {
+ qcom,entry-name = "master-kernel";
+ #qcom,smem-state-cells = <1>;
+ };
+
+ smp2p_modem_in: slave-kernel {
+ qcom,entry-name = "slave-kernel";
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ ipa_smp2p_out: ipa-ap-to-modem {
+ qcom,entry-name = "ipa";
+ #qcom,smem-state-cells = <1>;
+ };
+
+ ipa_smp2p_in: ipa-modem-to-ap {
+ qcom,entry-name = "ipa";
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+ };
+
+ smem: smem {
compatible = "qcom,smem";
memory-region = <&smem_mem>;
hwlocks = <&tcsr_mutex 3>;
@@ -441,6 +479,37 @@
#power-domain-cells = <1>;
};
+ ipcc: mailbox@408000 {
+ compatible = "qcom,sdx75-ipcc", "qcom,ipcc";
+ reg = <0 0x00408000 0 0x1000>;
+ interrupts = <GIC_SPI 241 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #interrupt-cells = <3>;
+ #mbox-cells = <2>;
+ };
+
+ gpi_dma: dma-controller@900000 {
+ compatible = "qcom,sdx75-gpi-dma", "qcom,sm6350-gpi-dma";
+ reg = <0x0 0x00900000 0x0 0x60000>;
+ #dma-cells = <3>;
+ interrupts = <GIC_SPI 345 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 346 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 347 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 348 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 349 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 350 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 351 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 352 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 353 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 354 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 355 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 356 IRQ_TYPE_LEVEL_HIGH>;
+ dma-channels = <12>;
+ dma-channel-mask = <0x7f>;
+ iommus = <&apps_smmu 0xf6 0x0>;
+ status = "disabled";
+ };
+
qupv3_id_0: geniqup@9c0000 {
compatible = "qcom,geni-se-qup";
reg = <0x0 0x009c0000 0x0 0x2000>;
@@ -457,6 +526,52 @@
ranges;
status = "disabled";
+ i2c0: i2c@980000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0x0 0x00980000 0x0 0x4000>;
+ clocks = <&gcc GCC_QUPV3_WRAP0_S0_CLK>;
+ clock-names = "se";
+ interrupts = <GIC_SPI 337 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ pinctrl-0 = <&qup_i2c0_data_clk>;
+ pinctrl-names = "default";
+ interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &system_noc SLAVE_QUP_0 QCOM_ICC_TAG_ALWAYS>,
+ <&system_noc MASTER_QUP_0 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core", "qup-config", "qup-memory";
+ dmas = <&gpi_dma 0 0 QCOM_GPI_I2C>,
+ <&gpi_dma 1 0 QCOM_GPI_I2C>;
+ dma-names = "tx", "rx";
+ status = "disabled";
+ };
+
+ spi0: spi@980000 {
+ compatible = "qcom,geni-spi";
+ reg = <0x0 0x00980000 0x0 0x4000>;
+ clocks = <&gcc GCC_QUPV3_WRAP0_S0_CLK>;
+ clock-names = "se";
+ interrupts = <GIC_SPI 337 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ pinctrl-0 = <&qup_spi0_data_clk>, <&qup_spi0_cs>;
+ pinctrl-names = "default";
+ interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &system_noc SLAVE_QUP_0 QCOM_ICC_TAG_ALWAYS>,
+ <&system_noc MASTER_QUP_0 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core", "qup-config", "qup-memory";
+ dmas = <&gpi_dma 0 0 QCOM_GPI_SPI>,
+ <&gpi_dma 1 0 QCOM_GPI_SPI>;
+ dma-names = "tx", "rx";
+ status = "disabled";
+ };
+
uart1: serial@984000 {
compatible = "qcom,geni-debug-uart";
reg = <0x0 0x00984000 0x0 0x4000>;
@@ -475,6 +590,229 @@
"sleep";
status = "disabled";
};
+
+ i2c2: i2c@988000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0x0 0x00988000 0x0 0x4000>;
+ clocks = <&gcc GCC_QUPV3_WRAP0_S2_CLK>;
+ clock-names = "se";
+ interrupts = <GIC_SPI 339 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ pinctrl-0 = <&qup_i2c2_data_clk>;
+ pinctrl-names = "default";
+ interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &system_noc SLAVE_QUP_0 QCOM_ICC_TAG_ALWAYS>,
+ <&system_noc MASTER_QUP_0 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core", "qup-config", "qup-memory";
+ dmas = <&gpi_dma 0 2 QCOM_GPI_I2C>,
+ <&gpi_dma 1 2 QCOM_GPI_I2C>;
+ dma-names = "tx", "rx";
+ status = "disabled";
+ };
+
+ spi2: spi@988000 {
+ compatible = "qcom,geni-spi";
+ reg = <0x0 0x00988000 0x0 0x4000>;
+ clocks = <&gcc GCC_QUPV3_WRAP0_S2_CLK>;
+ clock-names = "se";
+ interrupts = <GIC_SPI 339 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ pinctrl-0 = <&qup_spi2_data_clk>, <&qup_spi2_cs>;
+ pinctrl-names = "default";
+ interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &system_noc SLAVE_QUP_0 QCOM_ICC_TAG_ALWAYS>,
+ <&system_noc MASTER_QUP_0 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core", "qup-config", "qup-memory";
+ dmas = <&gpi_dma 0 2 QCOM_GPI_SPI>,
+ <&gpi_dma 1 2 QCOM_GPI_SPI>;
+ dma-names = "tx", "rx";
+ status = "disabled";
+ };
+
+ i2c3: i2c@98c000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0x0 0x0098c000 0x0 0x4000>;
+ clocks = <&gcc GCC_QUPV3_WRAP0_S3_CLK>;
+ clock-names = "se";
+ interrupts = <GIC_SPI 340 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ pinctrl-0 = <&qup_i2c3_data_clk>;
+ pinctrl-names = "default";
+ interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &system_noc SLAVE_QUP_0 QCOM_ICC_TAG_ALWAYS>,
+ <&system_noc MASTER_QUP_0 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core", "qup-config", "qup-memory";
+ dmas = <&gpi_dma 0 3 QCOM_GPI_I2C>,
+ <&gpi_dma 1 3 QCOM_GPI_I2C>;
+ dma-names = "tx", "rx";
+ status = "disabled";
+ };
+
+ spi3: spi@98c000 {
+ compatible = "qcom,geni-spi";
+ reg = <0x0 0x0098c000 0x0 0x4000>;
+ clocks = <&gcc GCC_QUPV3_WRAP0_S3_CLK>;
+ clock-names = "se";
+ interrupts = <GIC_SPI 340 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ pinctrl-0 = <&qup_spi3_data_clk>, <&qup_spi3_cs>;
+ pinctrl-names = "default";
+ interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &system_noc SLAVE_QUP_0 QCOM_ICC_TAG_ALWAYS>,
+ <&system_noc MASTER_QUP_0 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core", "qup-config", "qup-memory";
+ dmas = <&gpi_dma 0 3 QCOM_GPI_SPI>,
+ <&gpi_dma 1 3 QCOM_GPI_SPI>;
+ dma-names = "tx", "rx";
+ status = "disabled";
+ };
+
+ uart4: serial@990000 {
+ compatible = "qcom,geni-uart";
+ reg = <0x0 0x00990000 0x0 0x4000>;
+ clocks = <&gcc GCC_QUPV3_WRAP0_S4_CLK>;
+ clock-names = "se";
+ interrupts = <GIC_SPI 341 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-0 = <&qup_uart4_default>, <&qup_uart4_cts_rts>;
+ pinctrl-names = "default";
+ interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &system_noc SLAVE_QUP_0 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core", "qup-config";
+ status = "disabled";
+ };
+
+ i2c5: i2c@994000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0x0 0x00994000 0x0 0x4000>;
+ clocks = <&gcc GCC_QUPV3_WRAP0_S5_CLK>;
+ clock-names = "se";
+ interrupts = <GIC_SPI 342 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ pinctrl-0 = <&qup_i2c5_data_clk>;
+ pinctrl-names = "default";
+ interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &system_noc SLAVE_QUP_0 QCOM_ICC_TAG_ALWAYS>,
+ <&system_noc MASTER_QUP_0 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core", "qup-config", "qup-memory";
+ dmas = <&gpi_dma 0 5 QCOM_GPI_I2C>,
+ <&gpi_dma 1 5 QCOM_GPI_I2C>;
+ dma-names = "tx", "rx";
+ status = "disabled";
+ };
+
+ i2c6: i2c@998000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0x0 0x00998000 0x0 0x4000>;
+ clocks = <&gcc GCC_QUPV3_WRAP0_S6_CLK>;
+ clock-names = "se";
+ interrupts = <GIC_SPI 343 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ pinctrl-0 = <&qup_i2c6_data_clk>;
+ pinctrl-names = "default";
+ interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &system_noc SLAVE_QUP_0 QCOM_ICC_TAG_ALWAYS>,
+ <&system_noc MASTER_QUP_0 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core", "qup-config", "qup-memory";
+ dmas = <&gpi_dma 0 6 QCOM_GPI_I2C>,
+ <&gpi_dma 1 6 QCOM_GPI_I2C>;
+ dma-names = "tx", "rx";
+ status = "disabled";
+ };
+
+ spi6: spi@998000 {
+ compatible = "qcom,geni-spi";
+ reg = <0x0 0x00998000 0x0 0x4000>;
+ clocks = <&gcc GCC_QUPV3_WRAP0_S6_CLK>;
+ clock-names = "se";
+ interrupts = <GIC_SPI 343 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ pinctrl-0 = <&qup_spi6_data_clk>, <&qup_spi6_cs>;
+ pinctrl-names = "default";
+ interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &system_noc SLAVE_QUP_0 QCOM_ICC_TAG_ALWAYS>,
+ <&system_noc MASTER_QUP_0 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core", "qup-config", "qup-memory";
+ dmas = <&gpi_dma 0 6 QCOM_GPI_SPI>,
+ <&gpi_dma 1 6 QCOM_GPI_SPI>;
+ dma-names = "tx", "rx";
+ status = "disabled";
+ };
+
+ i2c7: i2c@99c000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0x0 0x0099c000 0x0 0x4000>;
+ clocks = <&gcc GCC_QUPV3_WRAP0_S7_CLK>;
+ clock-names = "se";
+ interrupts = <GIC_SPI 344 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ pinctrl-0 = <&qup_i2c7_data_clk>;
+ pinctrl-names = "default";
+ interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &system_noc SLAVE_QUP_0 QCOM_ICC_TAG_ALWAYS>,
+ <&system_noc MASTER_QUP_0 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core", "qup-config", "qup-memory";
+ dmas = <&gpi_dma 0 7 QCOM_GPI_I2C>,
+ <&gpi_dma 1 7 QCOM_GPI_I2C>;
+ dma-names = "tx", "rx";
+ status = "disabled";
+ };
+
+ spi7: spi@99c000 {
+ compatible = "qcom,geni-spi";
+ reg = <0x0 0x0099c000 0x0 0x4000>;
+ clocks = <&gcc GCC_QUPV3_WRAP0_S7_CLK>;
+ clock-names = "se";
+ interrupts = <GIC_SPI 344 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ pinctrl-0 = <&qup_spi7_data_clk>, <&qup_spi7_cs>;
+ pinctrl-names = "default";
+ interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &system_noc SLAVE_QUP_0 QCOM_ICC_TAG_ALWAYS>,
+ <&system_noc MASTER_QUP_0 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core", "qup-config", "qup-memory";
+ dmas = <&gpi_dma 0 7 QCOM_GPI_SPI>,
+ <&gpi_dma 1 7 QCOM_GPI_SPI>;
+ dma-names = "tx", "rx";
+ status = "disabled";
+ };
};
usb_hsphy: phy@ff4000 {
@@ -538,6 +876,59 @@
#hwlock-cells = <1>;
};
+ tcsr: syscon@1fc0000 {
+ compatible = "qcom,sdx75-tcsr", "syscon";
+ reg = <0x0 0x01fc0000 0x0 0x30000>;
+ };
+
+ sdhc: mmc@8804000 {
+ compatible = "qcom,sdx75-sdhci", "qcom,sdhci-msm-v5";
+ reg = <0x0 0x08804000 0x0 0x1000>;
+
+ interrupts = <GIC_SPI 210 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 227 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "hc_irq",
+ "pwr_irq";
+
+ clocks = <&gcc GCC_SDCC1_AHB_CLK>,
+ <&gcc GCC_SDCC1_APPS_CLK>,
+ <&rpmhcc RPMH_CXO_CLK>;
+ clock-names = "iface",
+ "core",
+ "xo";
+ iommus = <&apps_smmu 0x00a0 0x0>;
+ qcom,dll-config = <0x0007442c>;
+ qcom,ddr-config = <0x80040868>;
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&sdhc1_opp_table>;
+
+ interconnects = <&system_noc MASTER_SDCC_1 &mc_virt SLAVE_EBI1>,
+ <&gem_noc MASTER_APPSS_PROC &system_noc SLAVE_SDCC_1>;
+ interconnect-names = "sdhc-ddr",
+ "cpu-sdhc";
+ bus-width = <4>;
+ dma-coherent;
+
+ /* Forbid SDR104/SDR50 - broken hw! */
+ sdhci-caps-mask = <0x3 0>;
+
+ status = "disabled";
+
+ sdhc1_opp_table: opp-table {
+ compatible = "operating-points-v2";
+
+ opp-100000000 {
+ opp-hz = /bits/ 64 <100000000>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+ };
+
+ opp-384000000 {
+ opp-hz = /bits/ 64 <384000000>;
+ required-opps = <&rpmhpd_opp_nom>;
+ };
+ };
+ };
+
usb: usb@a6f8800 {
compatible = "qcom,sdx75-dwc3", "qcom,dwc3";
reg = <0x0 0x0a6f8800 0x0 0x400>;
@@ -627,6 +1018,17 @@
interrupt-controller;
};
+ aoss_qmp: power-controller@c310000 {
+ compatible = "qcom,sdx75-aoss-qmp", "qcom,aoss-qmp";
+ reg = <0 0x0c310000 0 0x1000>;
+ interrupt-parent = <&ipcc>;
+ interrupts-extended = <&ipcc IPCC_CLIENT_AOP IPCC_MPROC_SIGNAL_GLINK_QMP
+ IRQ_TYPE_EDGE_RISING>;
+ mboxes = <&ipcc IPCC_CLIENT_AOP IPCC_MPROC_SIGNAL_GLINK_QMP>;
+
+ #clock-cells = <0>;
+ };
+
spmi_bus: spmi@c400000 {
compatible = "qcom,spmi-pmic-arb";
reg = <0x0 0x0c400000 0x0 0x3000>,
@@ -661,6 +1063,145 @@
#interrupt-cells = <2>;
wakeup-parent = <&pdc>;
+ qup_i2c0_data_clk: qup-i2c0-data-clk-state {
+ /* SDA, SCL */
+ pins = "gpio8", "gpio9";
+ function = "qup_se0";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ qup_i2c2_data_clk: qup-i2c2-data-clk-state {
+ /* SDA, SCL */
+ pins = "gpio14", "gpio15";
+ function = "qup_se2";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ qup_i2c3_data_clk: qup-i2c3-data-clk-state {
+ /* SDA, SCL */
+ pins = "gpio52", "gpio53";
+ function = "qup_se3";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ qup_i2c5_data_clk: qup-i2c5-data-clk-state {
+ /* SDA, SCL */
+ pins = "gpio110", "gpio111";
+ function = "qup_se5";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ qup_i2c6_data_clk: qup-i2c6-data-clk-state {
+ /* SDA, SCL */
+ pins = "gpio112", "gpio113";
+ function = "qup_se6";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ qup_i2c7_data_clk: qup-i2c7-data-clk-state {
+ /* SDA, SCL */
+ pins = "gpio116", "gpio117";
+ function = "qup_se7";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ qup_spi0_cs: qup-spi0-cs-state {
+ pins = "gpio11";
+ function = "qup_se0";
+ drive-strength = <6>;
+ bias-pull-down;
+ };
+
+ qup_spi0_data_clk: qup-spi0-data-clk-state {
+ /* MISO, MOSI, CLK */
+ pins = "gpio8", "gpio9", "gpio10";
+ function = "qup_se0";
+ drive-strength = <6>;
+ bias-pull-down;
+ };
+
+ qup_spi2_cs: qup-spi2-cs-state {
+ pins = "gpio17";
+ function = "qup_se2";
+ drive-strength = <6>;
+ bias-pull-down;
+ };
+
+ qup_spi2_data_clk: qup-spi2-data-clk-state {
+ /* MISO, MOSI, CLK */
+ pins = "gpio14", "gpio15", "gpio16";
+ function = "qup_se2";
+ drive-strength = <6>;
+ bias-pull-down;
+ };
+
+ qup_spi3_cs: qup-spi3-cs-state {
+ pins = "gpio55";
+ function = "qup_se3";
+ drive-strength = <6>;
+ bias-pull-down;
+ };
+
+ qup_spi3_data_clk: qup-spi3-data-clk-state {
+ /* MISO, MOSI, CLK */
+ pins = "gpio52", "gpio53", "gpio54";
+ function = "qup_se3";
+ drive-strength = <6>;
+ bias-pull-down;
+ };
+
+ qup_spi6_cs: qup-spi6-cs-state {
+ pins = "gpio115";
+ function = "qup_se6";
+ drive-strength = <6>;
+ bias-pull-down;
+ };
+
+ qup_spi6_data_clk: qup-spi6-data-clk-state {
+ /* MISO, MOSI, CLK */
+ pins = "gpio112", "gpio113", "gpio114";
+ function = "qup_se6";
+ drive-strength = <6>;
+ bias-pull-down;
+ };
+
+ qup_spi7_cs: qup-spi7-cs-state {
+ pins = "gpio119";
+ function = "qup_se7";
+ drive-strength = <6>;
+ bias-pull-down;
+ };
+
+ qup_spi7_data_clk: qup-spi7-data-clk-state {
+ /* MISO, MOSI, CLK */
+ pins = "gpio116", "gpio117", "gpio118";
+ function = "qup_se7";
+ drive-strength = <6>;
+ bias-pull-down;
+ };
+
+ qup_uart4_cts_rts: qup-uart4-cts-rts-state {
+ /* CTS, RTS */
+ pins = "gpio52", "gpio53";
+ function = "qup_se3";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+
+ qup_uart4_default: qup-uart4-default-state {
+ /* TX, RX */
+ pins = "gpio54", "gpio55";
+ function = "qup_se3";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
qupv3_se1_2uart_active: qupv3-se1-2uart-active-state {
tx-pins {
pins = "gpio12";
@@ -683,6 +1224,46 @@
drive-strength = <2>;
bias-pull-down;
};
+
+ sdc1_default: sdc1-default-state {
+ clk-pins {
+ pins = "sdc1_clk";
+ drive-strength = <16>;
+ bias-disable;
+ };
+
+ cmd-pins {
+ pins = "sdc1_cmd";
+ drive-strength = <10>;
+ bias-pull-up;
+ };
+
+ data-pins {
+ pins = "sdc1_data";
+ drive-strength = <10>;
+ bias-pull-up;
+ };
+ };
+
+ sdc1_sleep: sdc1-sleep-state {
+ clk-pins {
+ pins = "sdc1_clk";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ cmd-pins {
+ pins = "sdc1_cmd";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ data-pins {
+ pins = "sdc1_data";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
};
apps_smmu: iommu@15000000 {
diff --git a/arch/arm64/boot/dts/qcom/sm4250-oneplus-billie2.dts b/arch/arm64/boot/dts/qcom/sm4250-oneplus-billie2.dts
index 2c7a12983dae..9153a5a55ed9 100644
--- a/arch/arm64/boot/dts/qcom/sm4250-oneplus-billie2.dts
+++ b/arch/arm64/boot/dts/qcom/sm4250-oneplus-billie2.dts
@@ -240,6 +240,7 @@
};
&usb_dwc3 {
+ /delete-property/ usb-role-switch;
maximum-speed = "high-speed";
dr_mode = "peripheral";
diff --git a/arch/arm64/boot/dts/qcom/sm4450.dtsi b/arch/arm64/boot/dts/qcom/sm4450.dtsi
index 603c962661cc..9c9919e78fbd 100644
--- a/arch/arm64/boot/dts/qcom/sm4450.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm4450.dtsi
@@ -29,6 +29,14 @@
clock-frequency = <32000>;
#clock-cells = <0>;
};
+
+ bi_tcxo_div2: bi-tcxo-div2-clk {
+ #clock-cells = <0>;
+ compatible = "fixed-factor-clock";
+ clocks = <&rpmhcc RPMH_CXO_CLK>;
+ clock-mult = <1>;
+ clock-div = <2>;
+ };
};
cpus {
@@ -39,10 +47,12 @@
device_type = "cpu";
compatible = "arm,cortex-a55";
reg = <0x0 0x0>;
+ clocks = <&cpufreq_hw 0>;
enable-method = "psci";
next-level-cache = <&L2_0>;
power-domains = <&CPU_PD0>;
power-domain-names = "psci";
+ qcom,freq-domain = <&cpufreq_hw 0>;
#cooling-cells = <2>;
L2_0: l2-cache {
@@ -63,10 +73,12 @@
device_type = "cpu";
compatible = "arm,cortex-a55";
reg = <0x0 0x100>;
+ clocks = <&cpufreq_hw 0>;
enable-method = "psci";
next-level-cache = <&L2_100>;
power-domains = <&CPU_PD0>;
power-domain-names = "psci";
+ qcom,freq-domain = <&cpufreq_hw 0>;
#cooling-cells = <2>;
L2_100: l2-cache {
@@ -81,10 +93,12 @@
device_type = "cpu";
compatible = "arm,cortex-a55";
reg = <0x0 0x200>;
+ clocks = <&cpufreq_hw 0>;
enable-method = "psci";
next-level-cache = <&L2_200>;
power-domains = <&CPU_PD0>;
power-domain-names = "psci";
+ qcom,freq-domain = <&cpufreq_hw 0>;
#cooling-cells = <2>;
L2_200: l2-cache {
@@ -99,10 +113,12 @@
device_type = "cpu";
compatible = "arm,cortex-a55";
reg = <0x0 0x300>;
+ clocks = <&cpufreq_hw 0>;
enable-method = "psci";
next-level-cache = <&L2_300>;
power-domains = <&CPU_PD0>;
power-domain-names = "psci";
+ qcom,freq-domain = <&cpufreq_hw 0>;
#cooling-cells = <2>;
L2_300: l2-cache {
@@ -117,10 +133,12 @@
device_type = "cpu";
compatible = "arm,cortex-a55";
reg = <0x0 0x400>;
+ clocks = <&cpufreq_hw 0>;
enable-method = "psci";
next-level-cache = <&L2_400>;
power-domains = <&CPU_PD0>;
power-domain-names = "psci";
+ qcom,freq-domain = <&cpufreq_hw 0>;
#cooling-cells = <2>;
L2_400: l2-cache {
@@ -135,10 +153,12 @@
device_type = "cpu";
compatible = "arm,cortex-a55";
reg = <0x0 0x500>;
+ clocks = <&cpufreq_hw 0>;
enable-method = "psci";
next-level-cache = <&L2_500>;
power-domains = <&CPU_PD0>;
power-domain-names = "psci";
+ qcom,freq-domain = <&cpufreq_hw 0>;
#cooling-cells = <2>;
L2_500: l2-cache {
@@ -153,10 +173,12 @@
device_type = "cpu";
compatible = "arm,cortex-a78";
reg = <0x0 0x600>;
+ clocks = <&cpufreq_hw 1>;
enable-method = "psci";
next-level-cache = <&L2_600>;
power-domains = <&CPU_PD0>;
power-domain-names = "psci";
+ qcom,freq-domain = <&cpufreq_hw 1>;
#cooling-cells = <2>;
L2_600: l2-cache {
@@ -171,10 +193,12 @@
device_type = "cpu";
compatible = "arm,cortex-a78";
reg = <0x0 0x700>;
+ clocks = <&cpufreq_hw 1>;
enable-method = "psci";
next-level-cache = <&L2_700>;
power-domains = <&CPU_PD0>;
power-domain-names = "psci";
+ qcom,freq-domain = <&cpufreq_hw 1>;
#cooling-cells = <2>;
L2_700: l2-cache {
@@ -268,9 +292,14 @@
reg = <0x0 0xa0000000 0x0 0x0>;
};
- pmu {
- compatible = "arm,armv8-pmuv3";
- interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_HIGH>;
+ pmu-a55 {
+ compatible = "arm,cortex-a55-pmu";
+ interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_LOW>;
+ };
+
+ pmu-a78 {
+ compatible = "arm,cortex-a78-pmu";
+ interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_LOW>;
};
psci {
@@ -526,6 +555,19 @@
};
};
+ cpufreq_hw: cpufreq@17d91000 {
+ compatible = "qcom,sm4450-cpufreq-epss", "qcom,cpufreq-epss";
+ reg = <0 0x17d91000 0 0x1000>,
+ <0 0x17d92000 0 0x1000>;
+ reg-names = "freq-domain0", "freq-domain1";
+ clocks = <&bi_tcxo_div2>, <&gcc GCC_GPLL0>;
+ clock-names = "xo", "alternate";
+ interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "dcvsh-irq-0", "dcvsh-irq-1";
+ #freq-domain-cells = <1>;
+ #clock-cells = <1>;
+ };
};
timer {
diff --git a/arch/arm64/boot/dts/qcom/sm6115-fxtec-pro1x.dts b/arch/arm64/boot/dts/qcom/sm6115-fxtec-pro1x.dts
index 98eb072fa912..4a30024aa48f 100644
--- a/arch/arm64/boot/dts/qcom/sm6115-fxtec-pro1x.dts
+++ b/arch/arm64/boot/dts/qcom/sm6115-fxtec-pro1x.dts
@@ -234,6 +234,7 @@
};
&usb_dwc3 {
+ /delete-property/ usb-role-switch;
maximum-speed = "high-speed";
dr_mode = "peripheral";
};
diff --git a/arch/arm64/boot/dts/qcom/sm6115.dtsi b/arch/arm64/boot/dts/qcom/sm6115.dtsi
index aca0a87092e4..e374733f3b85 100644
--- a/arch/arm64/boot/dts/qcom/sm6115.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm6115.dtsi
@@ -1088,8 +1088,11 @@
<&gcc GCC_SDCC1_ICE_CORE_CLK>;
clock-names = "iface", "core", "xo", "ice";
+ resets = <&gcc GCC_SDCC1_BCR>;
+
power-domains = <&rpmpd SM6115_VDDCX>;
operating-points-v2 = <&sdhc1_opp_table>;
+ iommus = <&apps_smmu 0x00c0 0x0>;
interconnects = <&system_noc MASTER_SDCC_1 RPM_ALWAYS_TAG
&bimc SLAVE_EBI_CH0 RPM_ALWAYS_TAG>,
<&bimc MASTER_AMPSS_M0 RPM_ALWAYS_TAG
@@ -1230,6 +1233,8 @@
"ref_aux",
"qref";
+ power-domains = <&gcc GCC_UFS_PHY_GDSC>;
+
resets = <&ufs_mem_hc 0>;
reset-names = "ufsphy";
@@ -1655,6 +1660,7 @@
snps,has-lpm-erratum;
snps,hird-threshold = /bits/ 8 <0x10>;
snps,usb3_lpm_capable;
+ snps,parkmode-disable-ss-quirk;
usb-role-switch;
@@ -3011,8 +3017,6 @@
thermal-zones {
mapss-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 0>;
trips {
@@ -3031,8 +3035,6 @@
};
cdsp-hvx-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 1>;
trips {
@@ -3051,8 +3053,6 @@
};
wlan-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 2>;
trips {
@@ -3071,8 +3071,6 @@
};
camera-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 3>;
trips {
@@ -3091,8 +3089,6 @@
};
video-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 4>;
trips {
@@ -3111,8 +3107,6 @@
};
modem1-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 5>;
trips {
@@ -3131,8 +3125,6 @@
};
cpu4-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 6>;
trips {
@@ -3157,8 +3149,6 @@
};
cpu5-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 7>;
trips {
@@ -3183,8 +3173,6 @@
};
cpu6-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 8>;
trips {
@@ -3209,8 +3197,6 @@
};
cpu7-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 9>;
trips {
@@ -3235,8 +3221,6 @@
};
cpu45-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 10>;
trips {
@@ -3261,8 +3245,6 @@
};
cpu67-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 11>;
trips {
@@ -3287,8 +3269,6 @@
};
cpu0123-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 12>;
trips {
@@ -3313,8 +3293,6 @@
};
modem0-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 13>;
trips {
@@ -3333,8 +3311,6 @@
};
display-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 14>;
trips {
@@ -3353,8 +3329,8 @@
};
gpu-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
+ polling-delay-passive = <250>;
+
thermal-sensors = <&tsens0 15>;
cooling-maps {
@@ -3366,13 +3342,13 @@
trips {
gpu_alert0: trip-point0 {
- temperature = <115000>;
- hysteresis = <5000>;
+ temperature = <85000>;
+ hysteresis = <1000>;
type = "passive";
};
trip-point1 {
- temperature = <125000>;
+ temperature = <110000>;
hysteresis = <1000>;
type = "critical";
};
diff --git a/arch/arm64/boot/dts/qcom/sm6115p-lenovo-j606f.dts b/arch/arm64/boot/dts/qcom/sm6115p-lenovo-j606f.dts
index 54da053a8042..9d78bb3f7190 100644
--- a/arch/arm64/boot/dts/qcom/sm6115p-lenovo-j606f.dts
+++ b/arch/arm64/boot/dts/qcom/sm6115p-lenovo-j606f.dts
@@ -359,6 +359,7 @@
};
&usb_dwc3 {
+ /delete-property/ usb-role-switch;
maximum-speed = "high-speed";
dr_mode = "peripheral";
diff --git a/arch/arm64/boot/dts/qcom/sm6125-sony-xperia-seine-pdx201.dts b/arch/arm64/boot/dts/qcom/sm6125-sony-xperia-seine-pdx201.dts
index 08046f866f60..dcd05f303b78 100644
--- a/arch/arm64/boot/dts/qcom/sm6125-sony-xperia-seine-pdx201.dts
+++ b/arch/arm64/boot/dts/qcom/sm6125-sony-xperia-seine-pdx201.dts
@@ -90,8 +90,6 @@
thermal-zones {
rf-pa0-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&pm6125_adc_tm 0>;
trips {
@@ -104,8 +102,6 @@
};
quiet-thermal {
- polling-delay-passive = <0>;
- polling-delay = <5000>;
thermal-sensors = <&pm6125_adc_tm 1>;
trips {
@@ -118,8 +114,6 @@
};
xo-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&pm6125_adc_tm 2>;
trips {
@@ -132,8 +126,6 @@
};
rf-pa1-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&pm6125_adc_tm 3>;
trips {
diff --git a/arch/arm64/boot/dts/qcom/sm6125-xiaomi-laurel-sprout.dts b/arch/arm64/boot/dts/qcom/sm6125-xiaomi-laurel-sprout.dts
index a49d3ebb1931..994fb0412fcb 100644
--- a/arch/arm64/boot/dts/qcom/sm6125-xiaomi-laurel-sprout.dts
+++ b/arch/arm64/boot/dts/qcom/sm6125-xiaomi-laurel-sprout.dts
@@ -84,8 +84,6 @@
thermal-zones {
rf-pa0-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&pm6125_adc_tm 0>;
trips {
@@ -98,8 +96,6 @@
};
quiet-thermal {
- polling-delay-passive = <0>;
- polling-delay = <5000>;
thermal-sensors = <&pm6125_adc_tm 1>;
trips {
@@ -112,8 +108,6 @@
};
xo-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&pm6125_adc_tm 2>;
trips {
diff --git a/arch/arm64/boot/dts/qcom/sm6125.dtsi b/arch/arm64/boot/dts/qcom/sm6125.dtsi
index 98ab08356088..777c380c2fa0 100644
--- a/arch/arm64/boot/dts/qcom/sm6125.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm6125.dtsi
@@ -1588,10 +1588,10 @@
timer {
compatible = "arm,armv8-timer";
- interrupts = <GIC_PPI 1 0xf08
- GIC_PPI 2 0xf08
- GIC_PPI 3 0xf08
- GIC_PPI 0 0xf08>;
+ interrupts = <GIC_PPI 1 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 2 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 3 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 0 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
clock-frequency = <19200000>;
};
};
diff --git a/arch/arm64/boot/dts/qcom/sm6350-sony-xperia-lena-pdx213.dts b/arch/arm64/boot/dts/qcom/sm6350-sony-xperia-lena-pdx213.dts
index dddd6e44d280..bf23033a294e 100644
--- a/arch/arm64/boot/dts/qcom/sm6350-sony-xperia-lena-pdx213.dts
+++ b/arch/arm64/boot/dts/qcom/sm6350-sony-xperia-lena-pdx213.dts
@@ -293,7 +293,7 @@
compatible = "samsung,s6sy761";
reg = <0x48>;
interrupt-parent = <&tlmm>;
- interrupts = <22 0x2008>;
+ interrupts = <22 IRQ_TYPE_LEVEL_LOW>;
vdd-supply = <&pm6350_l11>;
avdd-supply = <&touch_en_vreg>;
@@ -375,6 +375,7 @@
};
&usb_1_dwc3 {
+ /delete-property/ usb-role-switch;
maximum-speed = "super-speed";
dr_mode = "peripheral";
};
diff --git a/arch/arm64/boot/dts/qcom/sm6350.dtsi b/arch/arm64/boot/dts/qcom/sm6350.dtsi
index 84ff20a96c83..7986ddb30f6e 100644
--- a/arch/arm64/boot/dts/qcom/sm6350.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm6350.dtsi
@@ -1197,6 +1197,8 @@
"ref_aux",
"qref";
+ power-domains = <&gcc UFS_PHY_GDSC>;
+
resets = <&ufs_mem_hc 0>;
reset-names = "ufsphy";
@@ -1321,6 +1323,7 @@
compatible = "qcom,fastrpc";
qcom,glink-channels = "fastrpcglink-apps-dsp";
label = "adsp";
+ qcom,non-secure-domain;
#address-cells = <1>;
#size-cells = <0>;
@@ -1580,6 +1583,7 @@
compatible = "qcom,fastrpc";
qcom,glink-channels = "fastrpcglink-apps-dsp";
label = "cdsp";
+ qcom,non-secure-domain;
#address-cells = <1>;
#size-cells = <0>;
@@ -1713,10 +1717,39 @@
<&gcc GCC_USB3_DP_PHY_PRIM_BCR>;
reset-names = "phy", "common";
+ orientation-switch;
+
#clock-cells = <1>;
#phy-cells = <1>;
status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ usb_1_qmpphy_out: endpoint {
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ usb_1_qmpphy_usb_ss_in: endpoint {
+ remote-endpoint = <&usb_1_dwc3_ss_out>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+
+ usb_1_qmpphy_dp_in: endpoint {
+ };
+ };
+ };
};
dc_noc: interconnect@9160000 {
@@ -1890,8 +1923,30 @@
snps,dis_enblslpm_quirk;
snps,has-lpm-erratum;
snps,hird-threshold = /bits/ 8 <0x10>;
+ snps,parkmode-disable-ss-quirk;
phys = <&usb_1_hsphy>, <&usb_1_qmpphy QMP_USB43DP_USB3_PHY>;
phy-names = "usb2-phy", "usb3-phy";
+ usb-role-switch;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ usb_1_dwc3_hs_out: endpoint {
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ usb_1_dwc3_ss_out: endpoint {
+ remote-endpoint = <&usb_1_qmpphy_usb_ss_in>;
+ };
+ };
+ };
};
};
@@ -2831,9 +2886,6 @@
thermal-zones {
aoss0-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
-
thermal-sensors = <&tsens0 0>;
trips {
@@ -2846,9 +2898,6 @@
};
aoss1-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
-
thermal-sensors = <&tsens1 0>;
trips {
@@ -2861,9 +2910,6 @@
};
audio-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
-
thermal-sensors = <&tsens1 2>;
trips {
@@ -2876,9 +2922,6 @@
};
camera-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
-
thermal-sensors = <&tsens1 5>;
trips {
@@ -2891,9 +2934,6 @@
};
cpu0-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
-
thermal-sensors = <&tsens0 1>;
trips {
@@ -2919,9 +2959,6 @@
};
cpu1-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
-
thermal-sensors = <&tsens0 2>;
trips {
@@ -2947,9 +2984,6 @@
};
cpu2-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
-
thermal-sensors = <&tsens0 3>;
trips {
@@ -2975,9 +3009,6 @@
};
cpu3-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
-
thermal-sensors = <&tsens0 4>;
trips {
@@ -3003,9 +3034,6 @@
};
cpu4-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
-
thermal-sensors = <&tsens0 5>;
trips {
@@ -3031,9 +3059,6 @@
};
cpu5-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
-
thermal-sensors = <&tsens0 6>;
trips {
@@ -3059,9 +3084,6 @@
};
cpu6-left-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
-
thermal-sensors = <&tsens0 9>;
trips {
@@ -3087,9 +3109,6 @@
};
cpu6-right-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
-
thermal-sensors = <&tsens0 10>;
trips {
@@ -3115,9 +3134,6 @@
};
cpu7-left-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
-
thermal-sensors = <&tsens0 11>;
trips {
@@ -3143,9 +3159,6 @@
};
cpu7-right-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
-
thermal-sensors = <&tsens0 12>;
trips {
@@ -3171,9 +3184,6 @@
};
cpuss0-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
-
thermal-sensors = <&tsens0 7>;
trips {
@@ -3186,9 +3196,6 @@
};
cpuss1-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
-
thermal-sensors = <&tsens0 8>;
trips {
@@ -3201,9 +3208,6 @@
};
cwlan-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
-
thermal-sensors = <&tsens1 1>;
trips {
@@ -3216,9 +3220,6 @@
};
ddr-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
-
thermal-sensors = <&tsens1 3>;
trips {
@@ -3231,21 +3232,20 @@
};
gpuss0-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
+ polling-delay-passive = <250>;
thermal-sensors = <&tsens0 13>;
trips {
gpuss0_alert0: trip-point0 {
- temperature = <95000>;
+ temperature = <85000>;
hysteresis = <2000>;
type = "passive";
};
gpuss0-crit {
- temperature = <115000>;
- hysteresis = <0>;
+ temperature = <110000>;
+ hysteresis = <1000>;
type = "critical";
};
};
@@ -3259,21 +3259,20 @@
};
gpuss1-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
+ polling-delay-passive = <250>;
thermal-sensors = <&tsens0 14>;
trips {
gpuss1_alert0: trip-point0 {
- temperature = <95000>;
+ temperature = <85000>;
hysteresis = <2000>;
type = "passive";
};
gpuss1-crit {
- temperature = <115000>;
- hysteresis = <0>;
+ temperature = <110000>;
+ hysteresis = <1000>;
type = "critical";
};
};
@@ -3287,9 +3286,6 @@
};
modem-core0-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
-
thermal-sensors = <&tsens1 6>;
trips {
@@ -3302,9 +3298,6 @@
};
modem-core1-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
-
thermal-sensors = <&tsens1 7>;
trips {
@@ -3317,9 +3310,6 @@
};
modem-scl-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
-
thermal-sensors = <&tsens1 9>;
trips {
@@ -3332,9 +3322,6 @@
};
modem-vec-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
-
thermal-sensors = <&tsens1 8>;
trips {
@@ -3347,9 +3334,6 @@
};
npu-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
-
thermal-sensors = <&tsens1 10>;
trips {
@@ -3362,9 +3346,6 @@
};
q6-hvx-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
-
thermal-sensors = <&tsens1 4>;
trips {
@@ -3377,9 +3358,6 @@
};
video-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
-
thermal-sensors = <&tsens1 11>;
trips {
diff --git a/arch/arm64/boot/dts/qcom/sm6375-sony-xperia-murray-pdx225.dts b/arch/arm64/boot/dts/qcom/sm6375-sony-xperia-murray-pdx225.dts
index cca2c2eb88ad..e04a3b8f81c5 100644
--- a/arch/arm64/boot/dts/qcom/sm6375-sony-xperia-murray-pdx225.dts
+++ b/arch/arm64/boot/dts/qcom/sm6375-sony-xperia-murray-pdx225.dts
@@ -142,7 +142,7 @@
compatible = "samsung,s6sy761";
reg = <0x48>;
interrupt-parent = <&tlmm>;
- interrupts = <22 0x2008>;
+ interrupts = <22 IRQ_TYPE_LEVEL_LOW>;
vdd-supply = <&pm6125_l13>;
avdd-supply = <&touch_avdd>;
diff --git a/arch/arm64/boot/dts/qcom/sm6375.dtsi b/arch/arm64/boot/dts/qcom/sm6375.dtsi
index f40509d91bbd..ddea681b536d 100644
--- a/arch/arm64/boot/dts/qcom/sm6375.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm6375.dtsi
@@ -1837,9 +1837,6 @@
thermal-zones {
mapss0-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
-
thermal-sensors = <&tsens0 0>;
trips {
@@ -1864,9 +1861,6 @@
};
cpu0-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
-
thermal-sensors = <&tsens0 1>;
trips {
@@ -1891,9 +1885,6 @@
};
cpu1-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
-
thermal-sensors = <&tsens0 2>;
trips {
@@ -1918,9 +1909,6 @@
};
cpu2-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
-
thermal-sensors = <&tsens0 3>;
trips {
@@ -1945,9 +1933,6 @@
};
cpu3-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
-
thermal-sensors = <&tsens0 4>;
trips {
@@ -1972,9 +1957,6 @@
};
cpu4-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
-
thermal-sensors = <&tsens0 5>;
trips {
@@ -1999,9 +1981,6 @@
};
cpu5-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
-
thermal-sensors = <&tsens0 6>;
trips {
@@ -2026,9 +2005,6 @@
};
cluster0-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
-
thermal-sensors = <&tsens0 7>;
trips {
@@ -2053,9 +2029,6 @@
};
cluster1-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
-
thermal-sensors = <&tsens0 8>;
trips {
@@ -2080,9 +2053,6 @@
};
cpu6-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
-
thermal-sensors = <&tsens0 9>;
trips {
@@ -2107,9 +2077,6 @@
};
cpu7-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
-
thermal-sensors = <&tsens0 10>;
trips {
@@ -2134,9 +2101,6 @@
};
cpu-unk0-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
-
thermal-sensors = <&tsens0 11>;
trips {
@@ -2161,9 +2125,6 @@
};
cpu-unk1-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
-
thermal-sensors = <&tsens0 12>;
trips {
@@ -2188,9 +2149,6 @@
};
gpuss0-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
-
thermal-sensors = <&tsens0 13>;
trips {
@@ -2215,9 +2173,6 @@
};
gpuss1-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
-
thermal-sensors = <&tsens0 14>;
trips {
@@ -2242,9 +2197,6 @@
};
mapss1-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
-
thermal-sensors = <&tsens1 0>;
trips {
@@ -2269,9 +2221,6 @@
};
cwlan-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
-
thermal-sensors = <&tsens1 1>;
trips {
@@ -2296,9 +2245,6 @@
};
audio-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
-
thermal-sensors = <&tsens1 2>;
trips {
@@ -2323,9 +2269,6 @@
};
ddr-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
-
thermal-sensors = <&tsens1 3>;
trips {
@@ -2350,9 +2293,6 @@
};
q6hvx-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
-
thermal-sensors = <&tsens1 4>;
trips {
@@ -2377,9 +2317,6 @@
};
camera-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
-
thermal-sensors = <&tsens1 5>;
trips {
@@ -2404,9 +2341,6 @@
};
mdm-core0-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
-
thermal-sensors = <&tsens1 6>;
trips {
@@ -2431,9 +2365,6 @@
};
mdm-core1-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
-
thermal-sensors = <&tsens1 7>;
trips {
@@ -2458,9 +2389,6 @@
};
mdm-vec-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
-
thermal-sensors = <&tsens1 8>;
trips {
@@ -2485,9 +2413,6 @@
};
msm-scl-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
-
thermal-sensors = <&tsens1 9>;
trips {
@@ -2512,9 +2437,6 @@
};
video-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
-
thermal-sensors = <&tsens1 10>;
trips {
diff --git a/arch/arm64/boot/dts/qcom/sm7225-fairphone-fp4.dts b/arch/arm64/boot/dts/qcom/sm7225-fairphone-fp4.dts
index bc67e8c1fe4d..2ee2561b57b1 100644
--- a/arch/arm64/boot/dts/qcom/sm7225-fairphone-fp4.dts
+++ b/arch/arm64/boot/dts/qcom/sm7225-fairphone-fp4.dts
@@ -19,6 +19,7 @@
#include <dt-bindings/leds/common.h>
#include <dt-bindings/pinctrl/qcom,pmic-gpio.h>
#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
+#include <dt-bindings/usb/pd.h>
#include "sm7225.dtsi"
#include "pm6150l.dtsi"
#include "pm6350.dtsi"
@@ -92,10 +93,22 @@
};
};
+ msm_therm_sensor: thermal-sensor-msm {
+ compatible = "generic-adc-thermal";
+ #thermal-sensor-cells = <0>;
+ io-channels = <&pm6150l_adc ADC5_AMUX_THM2_100K_PU>;
+ io-channel-names = "sensor-channel";
+ };
+
+ rear_cam_sensor: thermal-sensor-rear-cam {
+ compatible = "generic-adc-thermal";
+ #thermal-sensor-cells = <0>;
+ io-channels = <&pm6150l_adc ADC5_GPIO2_100K_PU>;
+ io-channel-names = "sensor-channel";
+ };
+
thermal-zones {
chg-skin-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&pm7250b_adc_tm 0>;
trips {
@@ -108,8 +121,6 @@
};
conn-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&pm7250b_adc_tm 1>;
trips {
@@ -120,6 +131,119 @@
};
};
};
+
+ pa0-thermal {
+ thermal-sensors = <&pm6150l_adc_tm 1>;
+
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ pa1-thermal {
+ thermal-sensors = <&pm6150l_adc_tm 0>;
+
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ pm8008-thermal {
+ polling-delay-passive = <100>;
+ thermal-sensors = <&pm8008>;
+
+ trips {
+ trip0 {
+ temperature = <95000>;
+ hysteresis = <0>;
+ type = "passive";
+ };
+
+ trip1 {
+ temperature = <115000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+ };
+
+ quiet-thermal {
+ thermal-sensors = <&pm6150l_adc_tm 3>;
+
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ rear-cam-thermal {
+ polling-delay-passive = <1000>;
+ polling-delay = <5000>;
+ thermal-sensors = <&rear_cam_sensor>;
+
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ rfc-flash-thermal {
+ thermal-sensors = <&pm6150l_adc_tm 2>;
+
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ sdm-skin-thermal {
+ polling-delay-passive = <1000>;
+ polling-delay = <5000>;
+ thermal-sensors = <&msm_therm_sensor>;
+
+ trips {
+ trip0 {
+ temperature = <45000>;
+ hysteresis = <0>;
+ type = "passive";
+ };
+
+ trip1 {
+ temperature = <55000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+ };
+
+ xo-thermal {
+ thermal-sensors = <&pmk8350_adc_tm 0>;
+
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
};
};
@@ -134,124 +258,145 @@
qcom,pmic-id = "a";
vreg_s1a: smps1 {
+ regulator-name = "vreg_s1a";
regulator-min-microvolt = <1000000>;
regulator-max-microvolt = <1200000>;
};
vreg_s2a: smps2 {
+ regulator-name = "vreg_s2a";
regulator-min-microvolt = <1503000>;
regulator-max-microvolt = <2048000>;
};
vreg_l2a: ldo2 {
+ regulator-name = "vreg_l2a";
regulator-min-microvolt = <1503000>;
regulator-max-microvolt = <1980000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l3a: ldo3 {
+ regulator-name = "vreg_l3a";
regulator-min-microvolt = <2700000>;
regulator-max-microvolt = <3300000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l4a: ldo4 {
+ regulator-name = "vreg_l4a";
regulator-min-microvolt = <352000>;
regulator-max-microvolt = <801000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l5a: ldo5 {
+ regulator-name = "vreg_l5a";
regulator-min-microvolt = <1503000>;
regulator-max-microvolt = <1980000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l6a: ldo6 {
+ regulator-name = "vreg_l6a";
regulator-min-microvolt = <1710000>;
regulator-max-microvolt = <3544000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l7a: ldo7 {
+ regulator-name = "vreg_l7a";
regulator-min-microvolt = <1620000>;
regulator-max-microvolt = <1980000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l8a: ldo8 {
+ regulator-name = "vreg_l8a";
regulator-min-microvolt = <2800000>;
regulator-max-microvolt = <2800000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l9a: ldo9 {
+ regulator-name = "vreg_l9a";
regulator-min-microvolt = <1650000>;
regulator-max-microvolt = <3401000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l11a: ldo11 {
+ regulator-name = "vreg_l11a";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <2000000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l12a: ldo12 {
+ regulator-name = "vreg_l12a";
regulator-min-microvolt = <1620000>;
regulator-max-microvolt = <1980000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l13a: ldo13 {
+ regulator-name = "vreg_l13a";
regulator-min-microvolt = <570000>;
regulator-max-microvolt = <650000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l14a: ldo14 {
+ regulator-name = "vreg_l14a";
regulator-min-microvolt = <1700000>;
regulator-max-microvolt = <1900000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l15a: ldo15 {
+ regulator-name = "vreg_l15a";
regulator-min-microvolt = <1100000>;
regulator-max-microvolt = <1305000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l16a: ldo16 {
+ regulator-name = "vreg_l16a";
regulator-min-microvolt = <830000>;
regulator-max-microvolt = <921000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l18a: ldo18 {
+ regulator-name = "vreg_l18a";
regulator-min-microvolt = <788000>;
regulator-max-microvolt = <1049000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l19a: ldo19 {
+ regulator-name = "vreg_l19a";
regulator-min-microvolt = <1080000>;
regulator-max-microvolt = <1305000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l20a: ldo20 {
+ regulator-name = "vreg_l20a";
regulator-min-microvolt = <530000>;
regulator-max-microvolt = <801000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l21a: ldo21 {
+ regulator-name = "vreg_l21a";
regulator-min-microvolt = <751000>;
regulator-max-microvolt = <825000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l22a: ldo22 {
+ regulator-name = "vreg_l22a";
regulator-min-microvolt = <1080000>;
regulator-max-microvolt = <1305000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
@@ -263,41 +408,48 @@
qcom,pmic-id = "e";
vreg_s8e: smps8 {
+ regulator-name = "vreg_s8e";
regulator-min-microvolt = <313000>;
regulator-max-microvolt = <1395000>;
};
vreg_l1e: ldo1 {
+ regulator-name = "vreg_l1e";
regulator-min-microvolt = <1620000>;
regulator-max-microvolt = <1980000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l2e: ldo2 {
+ regulator-name = "vreg_l2e";
regulator-min-microvolt = <1170000>;
regulator-max-microvolt = <1305000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l3e: ldo3 {
+ regulator-name = "vreg_l3e";
regulator-min-microvolt = <1100000>;
regulator-max-microvolt = <1299000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l4e: ldo4 {
+ regulator-name = "vreg_l4e";
regulator-min-microvolt = <1620000>;
regulator-max-microvolt = <3300000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l5e: ldo5 {
+ regulator-name = "vreg_l5e";
regulator-min-microvolt = <1620000>;
regulator-max-microvolt = <3300000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l6e: ldo6 {
+ regulator-name = "vreg_l6e";
regulator-min-microvolt = <1700000>;
regulator-max-microvolt = <2950000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
@@ -307,18 +459,21 @@
};
vreg_l7e: ldo7 {
+ regulator-name = "vreg_l7e";
regulator-min-microvolt = <2700000>;
regulator-max-microvolt = <3544000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l8e: ldo8 {
+ regulator-name = "vreg_l8e";
regulator-min-microvolt = <1620000>;
regulator-max-microvolt = <2000000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l9e: ldo9 {
+ regulator-name = "vreg_l9e";
regulator-min-microvolt = <2700000>;
regulator-max-microvolt = <2960000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
@@ -328,18 +483,21 @@
};
vreg_l10e: ldo10 {
+ regulator-name = "vreg_l10e";
regulator-min-microvolt = <3000000>;
regulator-max-microvolt = <3401000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l11e: ldo11 {
+ regulator-name = "vreg_l11e";
regulator-min-microvolt = <3000000>;
regulator-max-microvolt = <3401000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_bob: bob {
+ regulator-name = "vreg_bob";
regulator-min-microvolt = <1620000>;
regulator-max-microvolt = <5492000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_AUTO>;
@@ -407,7 +565,79 @@
};
&i2c10 {
- /* PM8008 PMIC @ 8 and 9 */
+ clock-frequency = <400000>;
+ status = "okay";
+
+ pm8008: pmic@8 {
+ compatible = "qcom,pm8008";
+ reg = <0x8>;
+
+ interrupts-extended = <&tlmm 59 IRQ_TYPE_EDGE_RISING>;
+ reset-gpios = <&tlmm 58 GPIO_ACTIVE_LOW>;
+
+ vdd-l1-l2-supply = <&vreg_s8e>;
+ vdd-l3-l4-supply = <&vreg_bob>;
+ vdd-l5-supply = <&vreg_bob>;
+ vdd-l6-supply = <&vreg_s2a>;
+ vdd-l7-supply = <&vreg_bob>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pm8008_default>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&pm8008 0 0 2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ #thermal-sensor-cells = <0>;
+
+ regulators {
+ vreg_l1p: ldo1 {
+ regulator-name = "vreg_l1p";
+ regulator-min-microvolt = <528000>;
+ regulator-max-microvolt = <1200000>;
+ };
+
+ vreg_l2p: ldo2 {
+ regulator-name = "vreg_l2p";
+ regulator-min-microvolt = <528000>;
+ regulator-max-microvolt = <1200000>;
+ };
+
+ vreg_l3p: ldo3 {
+ regulator-name = "vreg_l3p";
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <2800000>;
+ };
+
+ vreg_l4p: ldo4 {
+ regulator-name = "vreg_l4p";
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <2904000>;
+ };
+
+ vreg_l5p: ldo5 {
+ regulator-name = "vreg_l5p";
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <2900000>;
+ };
+
+ vreg_l6p: ldo6 {
+ regulator-name = "vreg_l6p";
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ vreg_l7p: ldo7 {
+ regulator-name = "vreg_l7p";
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <3140000>;
+ };
+ };
+ };
+
/* PX8618 @ 26 */
/* SMB1395 PMIC @ 34 */
/* awinic,aw8695 @ 5a */
@@ -462,6 +692,91 @@
status = "okay";
};
+&pm6150l_adc {
+ pinctrl-0 = <&pm6150l_adc_default>;
+ pinctrl-names = "default";
+
+ channel@4d {
+ reg = <ADC5_AMUX_THM1_100K_PU>;
+ label = "pa_therm1";
+ qcom,hw-settle-time = <200>;
+ qcom,pre-scaling = <1 1>;
+ qcom,ratiometric;
+ };
+
+ channel@4e {
+ reg = <ADC5_AMUX_THM2_100K_PU>;
+ label = "msm_therm";
+ qcom,hw-settle-time = <200>;
+ qcom,pre-scaling = <1 1>;
+ qcom,ratiometric;
+ };
+
+ channel@4f {
+ reg = <ADC5_AMUX_THM3_100K_PU>;
+ label = "pa_therm0";
+ qcom,hw-settle-time = <200>;
+ qcom,pre-scaling = <1 1>;
+ qcom,ratiometric;
+ };
+
+ channel@53 {
+ reg = <ADC5_GPIO2_100K_PU>;
+ label = "rear_cam_therm";
+ qcom,hw-settle-time = <200>;
+ qcom,pre-scaling = <1 1>;
+ qcom,ratiometric;
+ };
+
+ channel@54 {
+ reg = <ADC5_GPIO3_100K_PU>;
+ label = "rear_cam_flash_therm";
+ qcom,hw-settle-time = <200>;
+ qcom,pre-scaling = <1 1>;
+ qcom,ratiometric;
+ };
+
+ channel@55 {
+ reg = <ADC5_GPIO4_100K_PU>;
+ label = "quiet_therm";
+ qcom,hw-settle-time = <200>;
+ qcom,pre-scaling = <1 1>;
+ qcom,ratiometric;
+ };
+};
+
+&pm6150l_adc_tm {
+ status = "okay";
+
+ pa-therm1@0 {
+ reg = <0>;
+ io-channels = <&pm6150l_adc ADC5_AMUX_THM1_100K_PU>;
+ qcom,hw-settle-time-us = <200>;
+ qcom,ratiometric;
+ };
+
+ pa-therm0@1 {
+ reg = <1>;
+ io-channels = <&pm6150l_adc ADC5_AMUX_THM3_100K_PU>;
+ qcom,hw-settle-time-us = <200>;
+ qcom,ratiometric;
+ };
+
+ rear-cam-flash-therm@2 {
+ reg = <2>;
+ io-channels = <&pm6150l_adc ADC5_GPIO3_100K_PU>;
+ qcom,hw-settle-time-us = <200>;
+ qcom,ratiometric;
+ };
+
+ quiet-therm@3 {
+ reg = <3>;
+ io-channels = <&pm6150l_adc ADC5_GPIO4_100K_PU>;
+ qcom,hw-settle-time-us = <200>;
+ qcom,ratiometric;
+ };
+};
+
&pm6150l_flash {
status = "okay";
@@ -484,6 +799,14 @@
};
};
+&pm6150l_gpios {
+ pm6150l_adc_default: adc-default-state {
+ pins = "gpio6", "gpio7", "gpio10";
+ function = PMIC_GPIO_FUNC_NORMAL;
+ bias-high-impedance;
+ };
+};
+
&pm6150l_wled {
qcom,switching-freq = <800>;
qcom,current-limit-microamp = <20000>;
@@ -543,6 +866,64 @@
};
};
+&pm7250b_typec {
+ vdd-pdphy-supply = <&vreg_l3a>;
+
+ status = "okay";
+
+ connector {
+ compatible = "usb-c-connector";
+
+ power-role = "dual";
+ data-role = "dual";
+ self-powered;
+
+ /*
+ * Disable USB Power Delivery for now, seems to need extra work
+ * to support role switching while also letting the battery
+ * charge still - without charger driver
+ */
+ typec-power-opmode = "default";
+ pd-disable;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ pm7250b_hs_in: endpoint {
+ remote-endpoint = <&usb_1_dwc3_hs_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ pm7250b_ss_in: endpoint {
+ remote-endpoint = <&usb_1_qmpphy_out>;
+ };
+ };
+ };
+ };
+};
+
+&pm7250b_vbus {
+ regulator-min-microamp = <500000>;
+ regulator-max-microamp = <1500000>;
+ status = "okay";
+};
+
+&pmk8350_adc_tm {
+ status = "okay";
+
+ xo-therm@0 {
+ reg = <0>;
+ io-channels = <&pmk8350_vadc PMK8350_ADC7_AMUX_THM1_100K_PU>;
+ qcom,hw-settle-time-us = <200>;
+ qcom,ratiometric;
+ };
+};
+
&pmk8350_rtc {
status = "okay";
};
@@ -673,6 +1054,22 @@
*/
bias-pull-up;
};
+
+ pm8008_default: pm8008-default-state {
+ int-pins {
+ pins = "gpio59";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+
+ reset-n-pins {
+ pins = "gpio58";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
};
&uart1 {
@@ -726,7 +1123,11 @@
&usb_1_dwc3 {
maximum-speed = "super-speed";
- dr_mode = "peripheral";
+ dr_mode = "otg";
+};
+
+&usb_1_dwc3_hs_out {
+ remote-endpoint = <&pm7250b_hs_in>;
};
&usb_1_hsphy {
@@ -744,6 +1145,10 @@
status = "okay";
};
+&usb_1_qmpphy_out {
+ remote-endpoint = <&pm7250b_ss_in>;
+};
+
&wifi {
vdd-0.8-cx-mx-supply = <&vreg_l4a>;
vdd-1.8-xo-supply = <&vreg_l7a>;
diff --git a/arch/arm64/boot/dts/qcom/sm8150-hdk.dts b/arch/arm64/boot/dts/qcom/sm8150-hdk.dts
index 6cb6f503fdac..bac08f00b303 100644
--- a/arch/arm64/boot/dts/qcom/sm8150-hdk.dts
+++ b/arch/arm64/boot/dts/qcom/sm8150-hdk.dts
@@ -470,7 +470,6 @@
&mdss_dp_out {
data-lanes = <0 1>;
- remote-endpoint = <&usb_1_qmpphy_dp_in>;
};
&mdss_dsi0 {
@@ -556,7 +555,7 @@
port@0 {
reg = <0>;
- pm8150b_role_switch_in: endpoint {
+ pm8150b_hs_in: endpoint {
remote-endpoint = <&usb_1_dwc3_hs>;
};
};
@@ -676,18 +675,10 @@
orientation-switch;
};
-&usb_1_qmpphy_dp_in {
- remote-endpoint = <&mdss_dp_out>;
-};
-
&usb_1_qmpphy_out {
remote-endpoint = <&pm8150b_typec_mux_in>;
};
-&usb_1_qmpphy_usb_ss_in {
- remote-endpoint = <&usb_1_dwc3_ss>;
-};
-
&usb_2_qmpphy {
status = "okay";
vdda-phy-supply = <&vreg_l3c_1p2>;
@@ -708,11 +699,7 @@
};
&usb_1_dwc3_hs {
- remote-endpoint = <&pm8150b_role_switch_in>;
-};
-
-&usb_1_dwc3_ss {
- remote-endpoint = <&usb_1_qmpphy_usb_ss_in>;
+ remote-endpoint = <&pm8150b_hs_in>;
};
&usb_2_dwc3 {
diff --git a/arch/arm64/boot/dts/qcom/sm8150.dtsi b/arch/arm64/boot/dts/qcom/sm8150.dtsi
index ff22e4346660..3e236adb9397 100644
--- a/arch/arm64/boot/dts/qcom/sm8150.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm8150.dtsi
@@ -14,6 +14,7 @@
#include <dt-bindings/clock/qcom,dispcc-sm8150.h>
#include <dt-bindings/clock/qcom,gcc-sm8150.h>
#include <dt-bindings/clock/qcom,gpucc-sm8150.h>
+#include <dt-bindings/clock/qcom,videocc-sm8150.h>
#include <dt-bindings/interconnect/qcom,osm-l3.h>
#include <dt-bindings/interconnect/qcom,sm8150.h>
#include <dt-bindings/thermal/thermal.h>
@@ -3507,6 +3508,7 @@
reg = <1>;
usb_1_qmpphy_usb_ss_in: endpoint {
+ remote-endpoint = <&usb_1_dwc3_ss>;
};
};
@@ -3514,6 +3516,7 @@
reg = <2>;
usb_1_qmpphy_dp_in: endpoint {
+ remote-endpoint = <&mdss_dp_out>;
};
};
};
@@ -3672,6 +3675,7 @@
reg = <1>;
usb_1_dwc3_ss: endpoint {
+ remote-endpoint = <&usb_1_qmpphy_usb_ss_in>;
};
};
};
@@ -3735,6 +3739,19 @@
};
};
+ videocc: clock-controller@ab00000 {
+ compatible = "qcom,sm8150-videocc";
+ reg = <0 0x0ab00000 0 0x10000>;
+ clocks = <&gcc GCC_VIDEO_AHB_CLK>,
+ <&rpmhcc RPMH_CXO_CLK>;
+ clock-names = "iface", "bi_tcxo";
+ power-domains = <&rpmhpd SM8150_MMCX>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+ };
+
camnoc_virt: interconnect@ac00000 {
compatible = "qcom,sm8150-camnoc-virt";
reg = <0 0x0ac00000 0 0x1000>;
@@ -3894,6 +3911,7 @@
reg = <1>;
mdss_dp_out: endpoint {
+ remote-endpoint = <&usb_1_qmpphy_dp_in>;
};
};
};
@@ -4577,7 +4595,6 @@
thermal-zones {
cpu0-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 1>;
@@ -4621,7 +4638,6 @@
cpu1-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 2>;
@@ -4665,7 +4681,6 @@
cpu2-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 3>;
@@ -4709,7 +4724,6 @@
cpu3-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 4>;
@@ -4753,7 +4767,6 @@
cpu4-top-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 7>;
@@ -4797,7 +4810,6 @@
cpu5-top-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 8>;
@@ -4841,7 +4853,6 @@
cpu6-top-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 9>;
@@ -4885,7 +4896,6 @@
cpu7-top-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 10>;
@@ -4929,7 +4939,6 @@
cpu4-bottom-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 11>;
@@ -4973,7 +4982,6 @@
cpu5-bottom-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 12>;
@@ -5017,7 +5025,6 @@
cpu6-bottom-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 13>;
@@ -5061,7 +5068,6 @@
cpu7-bottom-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 14>;
@@ -5105,7 +5111,6 @@
aoss0-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 0>;
@@ -5120,7 +5125,6 @@
cluster0-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 5>;
@@ -5140,7 +5144,6 @@
cluster1-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 6>;
@@ -5160,7 +5163,6 @@
gpu-top-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 15>;
@@ -5173,16 +5175,27 @@
trips {
gpu_top_alert0: trip-point0 {
+ temperature = <85000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+
+ trip-point1 {
temperature = <90000>;
- hysteresis = <2000>;
+ hysteresis = <1000>;
type = "hot";
};
+
+ trip-point2 {
+ temperature = <110000>;
+ hysteresis = <1000>;
+ type = "critical";
+ };
};
};
aoss1-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens1 0>;
@@ -5197,7 +5210,6 @@
wlan-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens1 1>;
@@ -5212,7 +5224,6 @@
video-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens1 2>;
@@ -5227,7 +5238,6 @@
mem-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens1 3>;
@@ -5242,7 +5252,6 @@
q6-hvx-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens1 4>;
@@ -5257,7 +5266,6 @@
camera-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens1 5>;
@@ -5272,7 +5280,6 @@
compute-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens1 6>;
@@ -5287,7 +5294,6 @@
modem-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens1 7>;
@@ -5302,7 +5308,6 @@
npu-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens1 8>;
@@ -5317,7 +5322,6 @@
modem-vec-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens1 9>;
@@ -5332,7 +5336,6 @@
modem-scl-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens1 10>;
@@ -5347,7 +5350,6 @@
gpu-bottom-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens1 11>;
@@ -5360,10 +5362,22 @@
trips {
gpu_bottom_alert0: trip-point0 {
+ temperature = <85000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+
+ trip-point1 {
temperature = <90000>;
- hysteresis = <2000>;
+ hysteresis = <1000>;
type = "hot";
};
+
+ trip-point2 {
+ temperature = <110000>;
+ hysteresis = <1000>;
+ type = "critical";
+ };
};
};
};
diff --git a/arch/arm64/boot/dts/qcom/sm8250-mtp.dts b/arch/arm64/boot/dts/qcom/sm8250-mtp.dts
index 7ef99038cb37..21b2ca1def83 100644
--- a/arch/arm64/boot/dts/qcom/sm8250-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/sm8250-mtp.dts
@@ -53,8 +53,6 @@
thermal-zones {
camera-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&pm8150l_adc_tm 0>;
trips {
@@ -67,8 +65,6 @@
};
conn-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&pm8150b_adc_tm 0>;
trips {
@@ -81,8 +77,6 @@
};
mmw-pa1-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&pm8150_adc_tm 2>;
trips {
@@ -95,8 +89,6 @@
};
mmw-pa2-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&pm8150l_adc_tm 2>;
trips {
@@ -109,8 +101,6 @@
};
skin-msm-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&pm8150l_adc_tm 1>;
trips {
@@ -123,8 +113,6 @@
};
skin-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&pm8150_adc_tm 1>;
trips {
@@ -137,8 +125,6 @@
};
xo-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&pm8150_adc_tm 0>;
trips {
diff --git a/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo.dtsi b/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo.dtsi
index e07d0311ecb5..f6870d3f2886 100644
--- a/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo.dtsi
@@ -520,7 +520,7 @@
compatible = "samsung,s6sy761";
reg = <0x48>;
interrupt-parent = <&tlmm>;
- interrupts = <39 0x2008>;
+ interrupts = <39 IRQ_TYPE_LEVEL_LOW>;
/* It's "vddio" downstream but it works anyway! */
vdd-supply = <&vreg_l1c_1p8>;
avdd-supply = <&vreg_l10c_3p3>;
diff --git a/arch/arm64/boot/dts/qcom/sm8250-xiaomi-elish-common.dtsi b/arch/arm64/boot/dts/qcom/sm8250-xiaomi-elish-common.dtsi
index 41f117474872..3596dd328c31 100644
--- a/arch/arm64/boot/dts/qcom/sm8250-xiaomi-elish-common.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm8250-xiaomi-elish-common.dtsi
@@ -659,8 +659,8 @@
port@0 {
reg = <0>;
- pm8150b_role_switch_in: endpoint {
- remote-endpoint = <&usb_1_role_switch_out>;
+ pm8150b_hs_in: endpoint {
+ remote-endpoint = <&usb_1_dwc3_hs_out>;
};
};
};
@@ -725,8 +725,8 @@
status = "okay";
};
-&usb_1_role_switch_out {
- remote-endpoint = <&pm8150b_role_switch_in>;
+&usb_1_dwc3_hs_out {
+ remote-endpoint = <&pm8150b_hs_in>;
};
&ufs_mem_hc {
diff --git a/arch/arm64/boot/dts/qcom/sm8250.dtsi b/arch/arm64/boot/dts/qcom/sm8250.dtsi
index 8ccade628f1f..9d6c97d1fd9d 100644
--- a/arch/arm64/boot/dts/qcom/sm8250.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm8250.dtsi
@@ -2204,7 +2204,7 @@
status = "disabled";
- pcie@0 {
+ pcieport0: pcie@0 {
device_type = "pci";
reg = <0x0 0x0 0x0 0x0 0x0>;
bus-range = <0x01 0xff>;
@@ -2580,6 +2580,8 @@
resets = <&ufs_mem_hc 0>;
reset-names = "ufsphy";
+ power-domains = <&gcc UFS_PHY_GDSC>;
+
#phy-cells = <0>;
status = "disabled";
@@ -3936,6 +3938,8 @@
#clock-cells = <1>;
#phy-cells = <1>;
+ orientation-switch;
+
ports {
#address-cells = <1>;
#size-cells = <0>;
@@ -3947,6 +3951,10 @@
port@1 {
reg = <1>;
+
+ usb_1_qmpphy_usb_ss_in: endpoint {
+ remote-endpoint = <&usb_1_dwc3_ss_out>;
+ };
};
port@2 {
@@ -4225,8 +4233,24 @@
phys = <&usb_1_hsphy>, <&usb_1_qmpphy QMP_USB43DP_USB3_PHY>;
phy-names = "usb2-phy", "usb3-phy";
- port {
- usb_1_role_switch_out: endpoint {};
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ usb_1_dwc3_hs_out: endpoint {
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ usb_1_dwc3_ss_out: endpoint {
+ remote-endpoint = <&usb_1_qmpphy_usb_ss_in>;
+ };
+ };
};
};
};
@@ -6275,7 +6299,6 @@
thermal-zones {
cpu0-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 1>;
@@ -6319,7 +6342,6 @@
cpu1-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 2>;
@@ -6363,7 +6385,6 @@
cpu2-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 3>;
@@ -6407,7 +6428,6 @@
cpu3-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 4>;
@@ -6451,7 +6471,6 @@
cpu4-top-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 7>;
@@ -6495,7 +6514,6 @@
cpu5-top-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 8>;
@@ -6539,7 +6557,6 @@
cpu6-top-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 9>;
@@ -6583,7 +6600,6 @@
cpu7-top-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 10>;
@@ -6627,7 +6643,6 @@
cpu4-bottom-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 11>;
@@ -6671,7 +6686,6 @@
cpu5-bottom-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 12>;
@@ -6715,7 +6729,6 @@
cpu6-bottom-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 13>;
@@ -6759,7 +6772,6 @@
cpu7-bottom-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 14>;
@@ -6803,7 +6815,6 @@
aoss0-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 0>;
@@ -6818,7 +6829,6 @@
cluster0-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 5>;
@@ -6838,7 +6848,6 @@
cluster1-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 6>;
@@ -6858,7 +6867,6 @@
gpu-top-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 15>;
@@ -6871,16 +6879,27 @@
trips {
gpu_top_alert0: trip-point0 {
+ temperature = <85000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+
+ trip-point1 {
temperature = <90000>;
- hysteresis = <2000>;
+ hysteresis = <1000>;
type = "hot";
};
+
+ trip-point2 {
+ temperature = <110000>;
+ hysteresis = <1000>;
+ type = "critical";
+ };
};
};
aoss1-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens1 0>;
@@ -6895,7 +6914,6 @@
wlan-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens1 1>;
@@ -6910,7 +6928,6 @@
video-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens1 2>;
@@ -6925,7 +6942,6 @@
mem-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens1 3>;
@@ -6940,7 +6956,6 @@
q6-hvx-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens1 4>;
@@ -6955,7 +6970,6 @@
camera-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens1 5>;
@@ -6970,7 +6984,6 @@
compute-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens1 6>;
@@ -6985,7 +6998,6 @@
npu-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens1 7>;
@@ -7000,7 +7012,6 @@
gpu-bottom-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens1 8>;
@@ -7013,10 +7024,22 @@
trips {
gpu_bottom_alert0: trip-point0 {
+ temperature = <85000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+
+ trip-point1 {
temperature = <90000>;
- hysteresis = <2000>;
+ hysteresis = <1000>;
type = "hot";
};
+
+ trip-point2 {
+ temperature = <110000>;
+ hysteresis = <1000>;
+ type = "critical";
+ };
};
};
};
diff --git a/arch/arm64/boot/dts/qcom/sm8350-hdk.dts b/arch/arm64/boot/dts/qcom/sm8350-hdk.dts
index 4c25ab2f5670..895adce59e75 100644
--- a/arch/arm64/boot/dts/qcom/sm8350-hdk.dts
+++ b/arch/arm64/boot/dts/qcom/sm8350-hdk.dts
@@ -486,17 +486,10 @@
&mdss_dp {
status = "okay";
+};
- ports {
- port@1 {
- reg = <1>;
-
- mdss_dp0_out: endpoint {
- data-lanes = <0 1>;
- remote-endpoint = <&usb_1_qmpphy_dp_in>;
- };
- };
- };
+&mdss_dp_out {
+ data-lanes = <0 1>;
};
&mpss {
@@ -864,10 +857,6 @@
remote-endpoint = <&pmic_glink_hs_in>;
};
-&usb_1_dwc3_ss {
- remote-endpoint = <&usb_1_qmpphy_usb_ss_in>;
-};
-
&usb_1_hsphy {
status = "okay";
@@ -881,22 +870,12 @@
vdda-phy-supply = <&vreg_l6b_1p2>;
vdda-pll-supply = <&vreg_l1b_0p88>;
-
- orientation-switch;
-};
-
-&usb_1_qmpphy_dp_in {
- remote-endpoint = <&mdss_dp0_out>;
};
&usb_1_qmpphy_out {
remote-endpoint = <&pmic_glink_ss_in>;
};
-&usb_1_qmpphy_usb_ss_in {
- remote-endpoint = <&usb_1_dwc3_ss>;
-};
-
&usb_2 {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/qcom/sm8350.dtsi b/arch/arm64/boot/dts/qcom/sm8350.dtsi
index f7c4700f00c3..38ee0850c335 100644
--- a/arch/arm64/boot/dts/qcom/sm8350.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm8350.dtsi
@@ -301,8 +301,18 @@
reg = <0x0 0x80000000 0x0 0x0>;
};
- pmu {
- compatible = "arm,armv8-pmuv3";
+ pmu-a55 {
+ compatible = "arm,cortex-a55-pmu";
+ interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_LOW>;
+ };
+
+ pmu-a78 {
+ compatible = "arm,cortex-a78-pmu";
+ interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_LOW>;
+ };
+
+ pmu-x1 {
+ compatible = "arm,cortex-x1-pmu";
interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_LOW>;
};
@@ -1779,6 +1789,8 @@
"ref_aux",
"qref";
+ power-domains = <&gcc UFS_PHY_GDSC>;
+
resets = <&ufs_mem_hc 0>;
reset-names = "ufsphy";
@@ -2256,6 +2268,8 @@
#clock-cells = <1>;
#phy-cells = <1>;
+ orientation-switch;
+
status = "disabled";
ports {
@@ -2273,6 +2287,7 @@
reg = <1>;
usb_1_qmpphy_usb_ss_in: endpoint {
+ remote-endpoint = <&usb_1_dwc3_ss>;
};
};
@@ -2280,6 +2295,7 @@
reg = <2>;
usb_1_qmpphy_dp_in: endpoint {
+ remote-endpoint = <&mdss_dp_out>;
};
};
};
@@ -2405,6 +2421,7 @@
reg = <1>;
usb_1_dwc3_ss: endpoint {
+ remote-endpoint = <&usb_1_qmpphy_usb_ss_in>;
};
};
};
@@ -2626,6 +2643,14 @@
remote-endpoint = <&dpu_intf0_out>;
};
};
+
+ port@1 {
+ reg = <1>;
+
+ mdss_dp_out: endpoint {
+ remote-endpoint = <&usb_1_qmpphy_dp_in>;
+ };
+ };
};
dp_opp_table: opp-table {
@@ -3665,7 +3690,6 @@
thermal_zones: thermal-zones {
cpu0-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 1>;
@@ -3709,7 +3733,6 @@
cpu1-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 2>;
@@ -3753,7 +3776,6 @@
cpu2-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 3>;
@@ -3797,7 +3819,6 @@
cpu3-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 4>;
@@ -3841,7 +3862,6 @@
cpu4-top-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 7>;
@@ -3885,7 +3905,6 @@
cpu5-top-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 8>;
@@ -3929,7 +3948,6 @@
cpu6-top-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 9>;
@@ -3973,7 +3991,6 @@
cpu7-top-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 10>;
@@ -4017,7 +4034,6 @@
cpu4-bottom-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 11>;
@@ -4061,7 +4077,6 @@
cpu5-bottom-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 12>;
@@ -4105,7 +4120,6 @@
cpu6-bottom-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 13>;
@@ -4149,7 +4163,6 @@
cpu7-bottom-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 14>;
@@ -4193,7 +4206,6 @@
aoss0-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 0>;
@@ -4208,7 +4220,6 @@
cluster0-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 5>;
@@ -4228,7 +4239,6 @@
cluster1-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens0 6>;
@@ -4248,7 +4258,6 @@
aoss1-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens1 0>;
@@ -4263,7 +4272,6 @@
gpu-top-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens1 1>;
@@ -4276,16 +4284,27 @@
trips {
gpu_top_alert0: trip-point0 {
+ temperature = <85000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+
+ trip-point1 {
temperature = <90000>;
hysteresis = <1000>;
type = "hot";
};
+
+ trip-point2 {
+ temperature = <110000>;
+ hysteresis = <1000>;
+ type = "critical";
+ };
};
};
gpu-bottom-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens1 2>;
@@ -4298,16 +4317,27 @@
trips {
gpu_bottom_alert0: trip-point0 {
+ temperature = <85000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+
+ trip-point1 {
temperature = <90000>;
hysteresis = <1000>;
type = "hot";
};
+
+ trip-point2 {
+ temperature = <110000>;
+ hysteresis = <1000>;
+ type = "critical";
+ };
};
};
nspss1-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens1 3>;
@@ -4322,7 +4352,6 @@
nspss2-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens1 4>;
@@ -4337,7 +4366,6 @@
nspss3-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens1 5>;
@@ -4352,7 +4380,6 @@
video-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens1 6>;
@@ -4367,7 +4394,6 @@
mem-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens1 7>;
@@ -4382,7 +4408,6 @@
modem1-top-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens1 8>;
@@ -4397,7 +4422,6 @@
modem2-top-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens1 9>;
@@ -4412,7 +4436,6 @@
modem3-top-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens1 10>;
@@ -4427,7 +4450,6 @@
modem4-top-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens1 11>;
@@ -4442,7 +4464,6 @@
camera-top-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens1 12>;
@@ -4457,7 +4478,6 @@
cam-bottom-thermal {
polling-delay-passive = <250>;
- polling-delay = <1000>;
thermal-sensors = <&tsens1 13>;
diff --git a/arch/arm64/boot/dts/qcom/sm8450-hdk.dts b/arch/arm64/boot/dts/qcom/sm8450-hdk.dts
index 3be46b56c723..a754b8fe9167 100644
--- a/arch/arm64/boot/dts/qcom/sm8450-hdk.dts
+++ b/arch/arm64/boot/dts/qcom/sm8450-hdk.dts
@@ -138,7 +138,7 @@
thermal-zones {
camera-thermal {
polling-delay-passive = <250>;
- polling-delay = <0>;
+
thermal-sensors = <&pmk8350_adc_tm 2>;
trips {
@@ -152,7 +152,7 @@
rear-tof-thermal {
polling-delay-passive = <250>;
- polling-delay = <0>;
+
thermal-sensors = <&pmk8350_adc_tm 5>;
trips {
@@ -166,7 +166,7 @@
skin-msm-thermal {
polling-delay-passive = <250>;
- polling-delay = <0>;
+
thermal-sensors = <&pmk8350_adc_tm 1>;
trips {
@@ -180,7 +180,7 @@
therm1-thermal {
polling-delay-passive = <250>;
- polling-delay = <0>;
+
thermal-sensors = <&pmk8350_adc_tm 3>;
trips {
@@ -194,7 +194,7 @@
therm2-thermal {
polling-delay-passive = <250>;
- polling-delay = <0>;
+
thermal-sensors = <&pmk8350_adc_tm 6>;
trips {
@@ -208,7 +208,7 @@
usb-conn-thermal {
polling-delay-passive = <250>;
- polling-delay = <0>;
+
thermal-sensors = <&pmk8350_adc_tm 7>;
trips {
@@ -222,7 +222,7 @@
wide-rfc-thermal {
polling-delay-passive = <250>;
- polling-delay = <0>;
+
thermal-sensors = <&pmk8350_adc_tm 4>;
trips {
@@ -235,8 +235,6 @@
};
xo-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&pmk8350_adc_tm 0>;
trips {
@@ -674,17 +672,10 @@
&mdss_dp0 {
status = "okay";
+};
- ports {
- port@1 {
- reg = <1>;
-
- mdss_dp0_out: endpoint {
- data-lanes = <0 1>;
- remote-endpoint = <&usb_1_qmpphy_dp_in>;
- };
- };
- };
+&mdss_dp0_out {
+ data-lanes = <0 1>;
};
&pcie0 {
@@ -1114,10 +1105,6 @@
remote-endpoint = <&pmic_glink_hs_in>;
};
-&usb_1_dwc3_ss {
- remote-endpoint = <&usb_1_qmpphy_usb_ss_in>;
-};
-
&usb_1_hsphy {
status = "okay";
@@ -1131,22 +1118,12 @@
vdda-phy-supply = <&vreg_l6b_1p2>;
vdda-pll-supply = <&vreg_l1b_0p91>;
-
- orientation-switch;
-};
-
-&usb_1_qmpphy_dp_in {
- remote-endpoint = <&mdss_dp0_out>;
};
&usb_1_qmpphy_out {
remote-endpoint = <&pmic_glink_ss_in>;
};
-&usb_1_qmpphy_usb_ss_in {
- remote-endpoint = <&usb_1_dwc3_ss>;
-};
-
&vamacro {
pinctrl-0 = <&dmic01_default>, <&dmic23_default>;
pinctrl-names = "default";
diff --git a/arch/arm64/boot/dts/qcom/sm8450-sony-xperia-nagara.dtsi b/arch/arm64/boot/dts/qcom/sm8450-sony-xperia-nagara.dtsi
index 8b29fcf483a3..17dbb67868ae 100644
--- a/arch/arm64/boot/dts/qcom/sm8450-sony-xperia-nagara.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm8450-sony-xperia-nagara.dtsi
@@ -488,7 +488,7 @@
compatible = "samsung,s6sy761";
reg = <0x48>;
interrupt-parent = <&tlmm>;
- interrupts = <21 0x2008>;
+ interrupts = <21 IRQ_TYPE_LEVEL_LOW>;
vdd-supply = <&pm8350c_l2>;
avdd-supply = <&pm8350c_l3>;
diff --git a/arch/arm64/boot/dts/qcom/sm8450.dtsi b/arch/arm64/boot/dts/qcom/sm8450.dtsi
index 616461fcbab9..9bafb3b350ff 100644
--- a/arch/arm64/boot/dts/qcom/sm8450.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm8450.dtsi
@@ -754,8 +754,8 @@
clocks = <&rpmhcc RPMH_CXO_CLK>,
<&sleep_clk>,
<&pcie0_phy>,
- <&pcie1_phy>,
- <0>,
+ <&pcie1_phy QMP_PCIE_PIPE_CLK>,
+ <&pcie1_phy QMP_PCIE_PHY_AUX_CLK>,
<&ufs_mem_phy 0>,
<&ufs_mem_phy 1>,
<&ufs_mem_phy 2>,
@@ -1803,6 +1803,12 @@
<0 0 0 3 &intc 0 0 0 151 IRQ_TYPE_LEVEL_HIGH>, /* int_c */
<0 0 0 4 &intc 0 0 0 152 IRQ_TYPE_LEVEL_HIGH>; /* int_d */
+ interconnects = <&pcie_noc MASTER_PCIE_0 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_PCIE_0 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "pcie-mem", "cpu-pcie";
+
clocks = <&gcc GCC_PCIE_0_PIPE_CLK>,
<&gcc GCC_PCIE_0_PIPE_CLK_SRC>,
<&pcie0_phy>,
@@ -1845,8 +1851,35 @@
pinctrl-names = "default";
pinctrl-0 = <&pcie0_default_state>;
+ operating-points-v2 = <&pcie0_opp_table>;
+
status = "disabled";
+ pcie0_opp_table: opp-table {
+ compatible = "operating-points-v2";
+
+ /* GEN 1 x1 */
+ opp-2500000 {
+ opp-hz = /bits/ 64 <2500000>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+ opp-peak-kBps = <250000 1>;
+ };
+
+ /* GEN 2 x1 */
+ opp-5000000 {
+ opp-hz = /bits/ 64 <5000000>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+ opp-peak-kBps = <500000 1>;
+ };
+
+ /* GEN 3 x1 */
+ opp-8000000 {
+ opp-hz = /bits/ 64 <8000000>;
+ required-opps = <&rpmhpd_opp_nom>;
+ opp-peak-kBps = <984500 1>;
+ };
+ };
+
pcie@0 {
device_type = "pci";
reg = <0x0 0x0 0x0 0x0 0x0>;
@@ -1932,6 +1965,12 @@
<0 0 0 3 &intc 0 0 0 438 IRQ_TYPE_LEVEL_HIGH>, /* int_c */
<0 0 0 4 &intc 0 0 0 439 IRQ_TYPE_LEVEL_HIGH>; /* int_d */
+ interconnects = <&pcie_noc MASTER_PCIE_1 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_PCIE_1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "pcie-mem", "cpu-pcie";
+
clocks = <&gcc GCC_PCIE_1_PIPE_CLK>,
<&gcc GCC_PCIE_1_PIPE_CLK_SRC>,
<&pcie1_phy>,
@@ -1972,8 +2011,56 @@
pinctrl-names = "default";
pinctrl-0 = <&pcie1_default_state>;
+ operating-points-v2 = <&pcie1_opp_table>;
+
status = "disabled";
+ pcie1_opp_table: opp-table {
+ compatible = "operating-points-v2";
+
+ /* GEN 1 x1 */
+ opp-2500000 {
+ opp-hz = /bits/ 64 <2500000>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+ opp-peak-kBps = <250000 1>;
+ };
+
+ /* GEN 1 x2 and GEN 2 x1 */
+ opp-5000000 {
+ opp-hz = /bits/ 64 <5000000>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+ opp-peak-kBps = <500000 1>;
+ };
+
+ /* GEN 2 x2 */
+ opp-10000000 {
+ opp-hz = /bits/ 64 <10000000>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+ opp-peak-kBps = <1000000 1>;
+ };
+
+ /* GEN 3 x1 */
+ opp-8000000 {
+ opp-hz = /bits/ 64 <8000000>;
+ required-opps = <&rpmhpd_opp_nom>;
+ opp-peak-kBps = <984500 1>;
+ };
+
+ /* GEN 3 x2 and GEN 4 x1 */
+ opp-16000000 {
+ opp-hz = /bits/ 64 <16000000>;
+ required-opps = <&rpmhpd_opp_nom>;
+ opp-peak-kBps = <1969000 1>;
+ };
+
+ /* GEN 4 x2 */
+ opp-32000000 {
+ opp-hz = /bits/ 64 <32000000>;
+ required-opps = <&rpmhpd_opp_nom>;
+ opp-peak-kBps = <3938000 1>;
+ };
+ };
+
pcie@0 {
device_type = "pci";
reg = <0x0 0x0 0x0 0x0 0x0>;
@@ -2001,7 +2088,7 @@
"pipe";
clock-output-names = "pcie_1_pipe_clk";
- #clock-cells = <0>;
+ #clock-cells = <1>;
#phy-cells = <0>;
@@ -2304,6 +2391,8 @@
#clock-cells = <1>;
#phy-cells = <1>;
+ orientation-switch;
+
status = "disabled";
ports {
@@ -2321,6 +2410,7 @@
reg = <1>;
usb_1_qmpphy_usb_ss_in: endpoint {
+ remote-endpoint = <&usb_1_dwc3_ss>;
};
};
@@ -2328,6 +2418,7 @@
reg = <2>;
usb_1_qmpphy_dp_in: endpoint {
+ remote-endpoint = <&mdss_dp0_out>;
};
};
};
@@ -3119,6 +3210,14 @@
remote-endpoint = <&dpu_intf0_out>;
};
};
+
+ port@1 {
+ reg = <1>;
+
+ mdss_dp0_out: endpoint {
+ remote-endpoint = <&usb_1_qmpphy_dp_in>;
+ };
+ };
};
dp_opp_table: opp-table {
@@ -4362,9 +4461,10 @@
compatible = "qcom,sm8450-llcc";
reg = <0 0x19200000 0 0x80000>, <0 0x19600000 0 0x80000>,
<0 0x19300000 0 0x80000>, <0 0x19700000 0 0x80000>,
- <0 0x19a00000 0 0x80000>;
+ <0 0x19a00000 0 0x80000>, <0 0x19c00000 0 0x80000>;
reg-names = "llcc0_base", "llcc1_base", "llcc2_base",
- "llcc3_base", "llcc_broadcast_base";
+ "llcc3_base", "llcc_broadcast_base",
+ "llcc_broadcast_and_base";
interrupts = <GIC_SPI 266 IRQ_TYPE_LEVEL_HIGH>;
};
@@ -4429,6 +4529,8 @@
<&gcc GCC_UFS_PHY_PHY_AUX_CLK>,
<&gcc GCC_UFS_0_CLKREF_EN>;
+ power-domains = <&gcc UFS_PHY_GDSC>;
+
resets = <&ufs_mem_hc 0>;
reset-names = "ufsphy";
@@ -4584,6 +4686,7 @@
reg = <1>;
usb_1_dwc3_ss: endpoint {
+ remote-endpoint = <&usb_1_qmpphy_usb_ss_in>;
};
};
};
@@ -4610,8 +4713,6 @@
thermal-zones {
aoss0-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 0>;
trips {
@@ -4630,8 +4731,6 @@
};
cpuss0-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 1>;
trips {
@@ -4650,8 +4749,6 @@
};
cpuss1-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 2>;
trips {
@@ -4670,8 +4767,6 @@
};
cpuss3-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 3>;
trips {
@@ -4690,8 +4785,6 @@
};
cpuss4-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 4>;
trips {
@@ -4710,8 +4803,6 @@
};
cpu4-top-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 5>;
trips {
@@ -4736,8 +4827,6 @@
};
cpu4-bottom-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 6>;
trips {
@@ -4762,8 +4851,6 @@
};
cpu5-top-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 7>;
trips {
@@ -4788,8 +4875,6 @@
};
cpu5-bottom-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 8>;
trips {
@@ -4814,8 +4899,6 @@
};
cpu6-top-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 9>;
trips {
@@ -4840,8 +4923,6 @@
};
cpu6-bottom-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 10>;
trips {
@@ -4866,8 +4947,6 @@
};
cpu7-top-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 11>;
trips {
@@ -4892,8 +4971,6 @@
};
cpu7-middle-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 12>;
trips {
@@ -4918,8 +4995,6 @@
};
cpu7-bottom-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 13>;
trips {
@@ -4945,7 +5020,7 @@
gpu-top-thermal {
polling-delay-passive = <10>;
- polling-delay = <0>;
+
thermal-sensors = <&tsens0 14>;
cooling-maps {
@@ -4956,35 +5031,29 @@
};
trips {
- thermal-engine-config {
- temperature = <125000>;
+ gpu_top_alert0: trip-point0 {
+ temperature = <85000>;
hysteresis = <1000>;
type = "passive";
};
- thermal-hal-config {
- temperature = <125000>;
+ trip-point1 {
+ temperature = <90000>;
hysteresis = <1000>;
- type = "passive";
+ type = "hot";
};
- reset-mon-cfg {
- temperature = <115000>;
- hysteresis = <5000>;
- type = "passive";
- };
-
- gpu_top_alert0: trip-point0 {
- temperature = <95000>;
- hysteresis = <5000>;
- type = "passive";
+ trip-point2 {
+ temperature = <110000>;
+ hysteresis = <1000>;
+ type = "critical";
};
};
};
gpu-bottom-thermal {
polling-delay-passive = <10>;
- polling-delay = <0>;
+
thermal-sensors = <&tsens0 15>;
cooling-maps {
@@ -4995,35 +5064,27 @@
};
trips {
- thermal-engine-config {
- temperature = <125000>;
+ gpu_bottom_alert0: trip-point0 {
+ temperature = <85000>;
hysteresis = <1000>;
type = "passive";
};
- thermal-hal-config {
- temperature = <125000>;
+ trip-point1 {
+ temperature = <90000>;
hysteresis = <1000>;
- type = "passive";
- };
-
- reset-mon-cfg {
- temperature = <115000>;
- hysteresis = <5000>;
- type = "passive";
+ type = "hot";
};
- gpu_bottom_alert0: trip-point0 {
- temperature = <95000>;
- hysteresis = <5000>;
- type = "passive";
+ trip-point2 {
+ temperature = <110000>;
+ hysteresis = <1000>;
+ type = "critical";
};
};
};
aoss1-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens1 0>;
trips {
@@ -5042,8 +5103,6 @@
};
cpu0-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens1 1>;
trips {
@@ -5068,8 +5127,6 @@
};
cpu1-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens1 2>;
trips {
@@ -5094,8 +5151,6 @@
};
cpu2-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens1 3>;
trips {
@@ -5120,8 +5175,6 @@
};
cpu3-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens1 4>;
trips {
@@ -5147,7 +5200,7 @@
cdsp0-thermal {
polling-delay-passive = <10>;
- polling-delay = <0>;
+
thermal-sensors = <&tsens1 5>;
trips {
@@ -5179,7 +5232,7 @@
cdsp1-thermal {
polling-delay-passive = <10>;
- polling-delay = <0>;
+
thermal-sensors = <&tsens1 6>;
trips {
@@ -5211,7 +5264,7 @@
cdsp2-thermal {
polling-delay-passive = <10>;
- polling-delay = <0>;
+
thermal-sensors = <&tsens1 7>;
trips {
@@ -5242,8 +5295,6 @@
};
video-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens1 8>;
trips {
@@ -5263,7 +5314,7 @@
mem-thermal {
polling-delay-passive = <10>;
- polling-delay = <0>;
+
thermal-sensors = <&tsens1 9>;
trips {
@@ -5288,8 +5339,6 @@
};
modem0-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens1 10>;
trips {
@@ -5320,8 +5369,6 @@
};
modem1-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens1 11>;
trips {
@@ -5352,8 +5399,6 @@
};
modem2-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens1 12>;
trips {
@@ -5384,8 +5429,6 @@
};
modem3-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens1 13>;
trips {
@@ -5416,8 +5459,6 @@
};
camera0-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens1 14>;
trips {
@@ -5436,8 +5477,6 @@
};
camera1-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens1 15>;
trips {
diff --git a/arch/arm64/boot/dts/qcom/sm8550-hdk.dts b/arch/arm64/boot/dts/qcom/sm8550-hdk.dts
index 12d60a0ee095..2e12219006c9 100644
--- a/arch/arm64/boot/dts/qcom/sm8550-hdk.dts
+++ b/arch/arm64/boot/dts/qcom/sm8550-hdk.dts
@@ -940,7 +940,6 @@
};
&mdss_dp0_out {
- remote-endpoint = <&usb_dp_qmpphy_dp_in>;
data-lanes = <0 1>;
};
@@ -979,10 +978,6 @@
status = "okay";
};
-&pcie_1_phy_aux_clk {
- clock-frequency = <1000>;
-};
-
&pm8550_gpios {
sdc2_card_det_n: sdc2-card-det-state {
pins = "gpio12";
@@ -1111,6 +1106,7 @@
#sound-dai-cells = <0>;
sound-name-prefix = "SpkrLeft";
+ qcom,port-mapping = <1 2 3 7 10 13>;
};
/* WSA8845, Speaker South */
@@ -1128,6 +1124,7 @@
#sound-dai-cells = <0>;
sound-name-prefix = "SpkrRight";
+ qcom,port-mapping = <4 5 6 7 11 13>;
};
};
@@ -1258,19 +1255,10 @@
status = "okay";
};
-&usb_1_dwc3 {
- dr_mode = "otg";
- usb-role-switch;
-};
-
&usb_1_dwc3_hs {
remote-endpoint = <&pmic_glink_hs_in>;
};
-&usb_1_dwc3_ss {
- remote-endpoint = <&usb_dp_qmpphy_usb_ss_in>;
-};
-
&usb_1_hsphy {
vdd-supply = <&vreg_l1e_0p88>;
vdda12-supply = <&vreg_l3e_1p2>;
@@ -1284,23 +1272,13 @@
vdda-phy-supply = <&vreg_l3e_1p2>;
vdda-pll-supply = <&vreg_l3f_0p88>;
- orientation-switch;
-
status = "okay";
};
-&usb_dp_qmpphy_dp_in {
- remote-endpoint = <&mdss_dp0_out>;
-};
-
&usb_dp_qmpphy_out {
remote-endpoint = <&pmic_glink_ss_in>;
};
-&usb_dp_qmpphy_usb_ss_in {
- remote-endpoint = <&usb_1_dwc3_ss>;
-};
-
&xo_board {
clock-frequency = <76800000>;
};
diff --git a/arch/arm64/boot/dts/qcom/sm8550-mtp.dts b/arch/arm64/boot/dts/qcom/sm8550-mtp.dts
index 3d4ad5aac70f..ab447fc252f7 100644
--- a/arch/arm64/boot/dts/qcom/sm8550-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/sm8550-mtp.dts
@@ -736,11 +736,6 @@
&mdss_dp0_out {
data-lanes = <0 1>;
- remote-endpoint = <&usb_dp_qmpphy_dp_in>;
-};
-
-&pcie_1_phy_aux_clk {
- clock-frequency = <1000>;
};
&pcie0 {
@@ -847,6 +842,7 @@
sound-name-prefix = "SpkrLeft";
vdd-1p8-supply = <&vreg_l15b_1p8>;
vdd-io-supply = <&vreg_l3g_1p2>;
+ qcom,port-mapping = <1 2 3 7 10 13>;
};
/* WSA8845 */
@@ -860,6 +856,7 @@
sound-name-prefix = "SpkrRight";
vdd-1p8-supply = <&vreg_l15b_1p8>;
vdd-io-supply = <&vreg_l3g_1p2>;
+ qcom,port-mapping = <4 5 6 7 11 13>;
};
};
@@ -951,19 +948,10 @@
status = "okay";
};
-&usb_1_dwc3 {
- dr_mode = "otg";
- usb-role-switch;
-};
-
&usb_1_dwc3_hs {
remote-endpoint = <&pmic_glink_hs_in>;
};
-&usb_1_dwc3_ss {
- remote-endpoint = <&usb_dp_qmpphy_usb_ss_in>;
-};
-
&usb_1_hsphy {
vdd-supply = <&vreg_l1e_0p88>;
vdda12-supply = <&vreg_l3e_1p2>;
@@ -977,23 +965,13 @@
vdda-phy-supply = <&vreg_l3e_1p2>;
vdda-pll-supply = <&vreg_l3f_0p91>;
- orientation-switch;
-
status = "okay";
};
-&usb_dp_qmpphy_dp_in {
- remote-endpoint = <&mdss_dp0_out>;
-};
-
&usb_dp_qmpphy_out {
remote-endpoint = <&pmic_glink_ss_in>;
};
-&usb_dp_qmpphy_usb_ss_in {
- remote-endpoint = <&usb_1_dwc3_ss>;
-};
-
&xo_board {
clock-frequency = <76800000>;
};
diff --git a/arch/arm64/boot/dts/qcom/sm8550-qrd.dts b/arch/arm64/boot/dts/qcom/sm8550-qrd.dts
index 92f015017418..774bdfcffec3 100644
--- a/arch/arm64/boot/dts/qcom/sm8550-qrd.dts
+++ b/arch/arm64/boot/dts/qcom/sm8550-qrd.dts
@@ -214,6 +214,68 @@
regulator-always-on;
regulator-boot-on;
};
+
+ wcn7850-pmu {
+ compatible = "qcom,wcn7850-pmu";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&wlan_en>, <&pmk8550_sleep_clk>;
+
+ wlan-enable-gpios = <&tlmm 80 GPIO_ACTIVE_HIGH>;
+ /*
+ * TODO Add bt-enable-gpios once the Bluetooth driver is
+ * converted to using the power sequencer.
+ */
+
+ vdd-supply = <&vreg_s5g_0p85>;
+ vddio-supply = <&vreg_l15b_1p8>;
+ vddaon-supply = <&vreg_s2g_0p85>;
+ vdddig-supply = <&vreg_s4e_0p95>;
+ vddrfa1p2-supply = <&vreg_s4g_1p25>;
+ vddrfa1p8-supply = <&vreg_s6g_1p86>;
+
+ regulators {
+ vreg_pmu_rfa_cmn: ldo0 {
+ regulator-name = "vreg_pmu_rfa_cmn";
+ };
+
+ vreg_pmu_aon_0p59: ldo1 {
+ regulator-name = "vreg_pmu_aon_0p59";
+ };
+
+ vreg_pmu_wlcx_0p8: ldo2 {
+ regulator-name = "vreg_pmu_wlcx_0p8";
+ };
+
+ vreg_pmu_wlmx_0p85: ldo3 {
+ regulator-name = "vreg_pmu_wlmx_0p85";
+ };
+
+ vreg_pmu_btcmx_0p85: ldo4 {
+ regulator-name = "vreg_pmu_btcmx_0p85";
+ };
+
+ vreg_pmu_rfa_0p8: ldo5 {
+ regulator-name = "vreg_pmu_rfa_0p8";
+ };
+
+ vreg_pmu_rfa_1p2: ldo6 {
+ regulator-name = "vreg_pmu_rfa_1p2";
+ };
+
+ vreg_pmu_rfa_1p8: ldo7 {
+ regulator-name = "vreg_pmu_rfa_1p8";
+ };
+
+ vreg_pmu_pcie_0p9: ldo8 {
+ regulator-name = "vreg_pmu_pcie_0p9";
+ };
+
+ vreg_pmu_pcie_1p8: ldo9 {
+ regulator-name = "vreg_pmu_pcie_1p8";
+ };
+ };
+ };
};
&apps_rsc {
@@ -720,17 +782,6 @@
status = "okay";
};
-&gcc {
- clocks = <&bi_tcxo_div2>, <&sleep_clk>,
- <&pcie0_phy>,
- <&pcie1_phy>,
- <0>,
- <&ufs_mem_phy 0>,
- <&ufs_mem_phy 1>,
- <&ufs_mem_phy 2>,
- <&usb_dp_qmpphy QMP_USB43DP_USB3_PIPE_CLK>;
-};
-
&gpi_dma1 {
status = "okay";
};
@@ -807,11 +858,6 @@
&mdss_dp0_out {
data-lanes = <0 1>;
- remote-endpoint = <&usb_dp_qmpphy_dp_in>;
-};
-
-&pcie_1_phy_aux_clk {
- status = "disabled";
};
&pcie0 {
@@ -824,6 +870,23 @@
status = "okay";
};
+&pcieport0 {
+ wifi@0 {
+ compatible = "pci17cb,1107";
+ reg = <0x10000 0x0 0x0 0x0 0x0>;
+
+ vddrfacmn-supply = <&vreg_pmu_rfa_cmn>;
+ vddaon-supply = <&vreg_pmu_aon_0p59>;
+ vddwlcx-supply = <&vreg_pmu_wlcx_0p8>;
+ vddwlmx-supply = <&vreg_pmu_wlmx_0p85>;
+ vddrfa0p8-supply = <&vreg_pmu_rfa_0p8>;
+ vddrfa1p2-supply = <&vreg_pmu_rfa_1p2>;
+ vddrfa1p8-supply = <&vreg_pmu_rfa_1p8>;
+ vddpcie0p9-supply = <&vreg_pmu_pcie_0p9>;
+ vddpcie1p8-supply = <&vreg_pmu_pcie_1p8>;
+ };
+};
+
&pcie0_phy {
vdda-phy-supply = <&vreg_l1e_0p88>;
vdda-pll-supply = <&vreg_l3e_1p2>;
@@ -907,8 +970,15 @@
status = "okay";
};
-&pcie_1_phy_aux_clk {
- clock-frequency = <1000>;
+&pmk8550_gpios {
+ pmk8550_sleep_clk: sleep-clk-state {
+ pins = "gpio3";
+ function = "func1";
+ input-disable;
+ output-enable;
+ bias-disable;
+ power-source = <0>;
+ };
};
&qupv3_id_0 {
@@ -955,6 +1025,7 @@
sound-name-prefix = "SpkrLeft";
vdd-1p8-supply = <&vreg_l15b_1p8>;
vdd-io-supply = <&vreg_l3g_1p2>;
+ qcom,port-mapping = <1 2 3 7 10 13>;
};
/* WSA8845, Speaker South */
@@ -968,6 +1039,7 @@
sound-name-prefix = "SpkrRight";
vdd-1p8-supply = <&vreg_l15b_1p8>;
vdd-io-supply = <&vreg_l3g_1p2>;
+ qcom,port-mapping = <4 5 6 7 11 13>;
};
};
@@ -1084,6 +1156,13 @@
bias-disable;
output-low;
};
+
+ wlan_en: wlan-en-state {
+ pins = "gpio80";
+ function = "gpio";
+ drive-strength = <8>;
+ bias-pull-down;
+ };
};
&uart7 {
@@ -1135,19 +1214,10 @@
status = "okay";
};
-&usb_1_dwc3 {
- dr_mode = "otg";
- usb-role-switch;
-};
-
&usb_1_dwc3_hs {
remote-endpoint = <&pmic_glink_hs_in>;
};
-&usb_1_dwc3_ss {
- remote-endpoint = <&usb_dp_qmpphy_usb_ss_in>;
-};
-
&usb_1_hsphy {
vdd-supply = <&vreg_l1e_0p88>;
vdda12-supply = <&vreg_l3e_1p2>;
@@ -1161,23 +1231,13 @@
vdda-phy-supply = <&vreg_l3e_1p2>;
vdda-pll-supply = <&vreg_l3f_0p88>;
- orientation-switch;
-
status = "okay";
};
-&usb_dp_qmpphy_dp_in {
- remote-endpoint = <&mdss_dp0_out>;
-};
-
&usb_dp_qmpphy_out {
remote-endpoint = <&redriver_ss_in>;
};
-&usb_dp_qmpphy_usb_ss_in {
- remote-endpoint = <&usb_1_dwc3_ss>;
-};
-
&xo_board {
clock-frequency = <76800000>;
};
diff --git a/arch/arm64/boot/dts/qcom/sm8550-samsung-q5q.dts b/arch/arm64/boot/dts/qcom/sm8550-samsung-q5q.dts
new file mode 100644
index 000000000000..3d351e90bb39
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sm8550-samsung-q5q.dts
@@ -0,0 +1,593 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2024, Alexandru Marc Serdeliuc <serdeliuk@yahoo.com>
+ * Copyright (c) 2024, David Wronek <david@mainlining.org>
+ * Copyright (c) 2022, Linaro Limited
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/pinctrl/qcom,pmic-gpio.h>
+#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
+#include "sm8550.dtsi"
+#include "pm8550.dtsi"
+#include "pm8550vs.dtsi"
+#include "pmk8550.dtsi"
+
+/delete-node/ &adspslpi_mem;
+/delete-node/ &cdsp_mem;
+/delete-node/ &mpss_dsm_mem;
+/delete-node/ &mpss_mem;
+/delete-node/ &rmtfs_mem;
+
+/ {
+ model = "Samsung Galaxy Z Fold5";
+ compatible = "samsung,q5q", "qcom,sm8550";
+ chassis-type = "handset";
+
+ aliases {
+ serial0 = &uart7;
+ };
+
+ chosen {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ framebuffer: framebuffer@b8000000 {
+ compatible = "simple-framebuffer";
+ reg = <0x0 0xb8000000 0x0 0x2b00000>;
+ width = <2176>;
+ height = <1812>;
+ stride = <(2176 * 4)>;
+ format = "a8r8g8b8";
+ };
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+ pinctrl-0 = <&volume_up_n>;
+ pinctrl-names = "default";
+
+ key-volume-up {
+ label = "Volume Up";
+ linux,code = <KEY_VOLUMEUP>;
+ gpios = <&pm8550_gpios 6 GPIO_ACTIVE_LOW>;
+ debounce-interval = <15>;
+ linux,can-disable;
+ wakeup-source;
+ };
+ };
+
+ vph_pwr: vph-pwr-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vph_pwr";
+ regulator-min-microvolt = <3700000>;
+ regulator-max-microvolt = <3700000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ reserved-memory {
+ adspslpi_mem: adspslpi@9ea00000 {
+ reg = <0x0 0x9ea00000 0x0 0x59b4000>;
+ no-map;
+ };
+
+ cdsp_mem: cdsp-region@9c900000 {
+ reg = <0 0x9c900000 0 0x2000000>;
+ no-map;
+ };
+
+ mpss_dsm_mem: mpss-dsm@d4d00000 {
+ reg = <0x0 0xd4d00000 0x0 0x3300000>;
+ no-map;
+ };
+
+ mpss_mem: mpss@8b400000 {
+ reg = <0x0 0x8b400000 0x0 0xfc00000>;
+ no-map;
+ };
+
+ rmtfs_mem: rmtfs-region@d4a80000 {
+ reg = <0x0 0xd4a80000 0x0 0x280000>;
+ no-map;
+ };
+
+ /*
+ * The bootloader will only keep display hardware enabled
+ * if this memory region is named exactly 'splash_region'
+ */
+ splash_region@b8000000 {
+ reg = <0x0 0xb8000000 0x0 0x2b00000>;
+ no-map;
+ };
+ };
+};
+
+&apps_rsc {
+ regulators-0 {
+ compatible = "qcom,pm8550-rpmh-regulators";
+ qcom,pmic-id = "b";
+
+ vreg_bob1: bob1 {
+ regulator-name = "vreg_bob1";
+ regulator-min-microvolt = <3296000>;
+ regulator-max-microvolt = <3960000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_bob2: bob2 {
+ regulator-name = "vreg_bob2";
+ regulator-min-microvolt = <2720000>;
+ regulator-max-microvolt = <3960000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l1b_1p8: ldo1 {
+ regulator-name = "vreg_l1b_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2b_3p0: ldo2 {
+ regulator-name = "vreg_l2b_3p0";
+ regulator-min-microvolt = <3008000>;
+ regulator-max-microvolt = <3008000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l5b_3p1: ldo5 {
+ regulator-name = "vreg_l5b_3p1";
+ regulator-min-microvolt = <3104000>;
+ regulator-max-microvolt = <3104000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l6b_1p8: ldo6 {
+ regulator-name = "vreg_l6b_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3008000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l7b_1p8: ldo7 {
+ regulator-name = "vreg_l7b_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3008000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l8b_1p8: ldo8 {
+ regulator-name = "vreg_l8b_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3008000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l9b_2p9: ldo9 {
+ regulator-name = "vreg_l9b_2p9";
+ regulator-min-microvolt = <2960000>;
+ regulator-max-microvolt = <3008000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l11b_1p2: ldo11 {
+ regulator-name = "vreg_l11b_1p2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1504000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l12b_1p8: ldo12 {
+ regulator-name = "vreg_l12b_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l13b_3p0: ldo13 {
+ regulator-name = "vreg_l13b_3p0";
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l14b_3p2: ldo14 {
+ regulator-name = "vreg_l14b_3p2";
+ regulator-min-microvolt = <3200000>;
+ regulator-max-microvolt = <3200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l15b_1p8: ldo15 {
+ regulator-name = "vreg_l15b_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ regulator-always-on;
+ };
+
+ vreg_l16b_2p8: ldo16 {
+ regulator-name = "vreg_l16b_2p8";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l17b_2p5: ldo17 {
+ regulator-name = "vreg_l17b_2p5";
+ regulator-min-microvolt = <2504000>;
+ regulator-max-microvolt = <2504000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ regulators-1 {
+ compatible = "qcom,pm8550vs-rpmh-regulators";
+ qcom,pmic-id = "c";
+
+ vreg_l3c_0p91: ldo3 {
+ regulator-name = "vreg_l3c_0p9";
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <912000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ regulators-2 {
+ compatible = "qcom,pm8550vs-rpmh-regulators";
+ qcom,pmic-id = "d";
+
+ vreg_l1d_0p88: ldo1 {
+ regulator-name = "vreg_l1d_0p88";
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <920000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ regulators-3 {
+ compatible = "qcom,pm8550vs-rpmh-regulators";
+ qcom,pmic-id = "e";
+
+ vreg_s4e_0p9: smps4 {
+ regulator-name = "vreg_s4e_0p9";
+ regulator-min-microvolt = <904000>;
+ regulator-max-microvolt = <984000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_s5e_1p1: smps5 {
+ regulator-name = "vreg_s5e_1p1";
+ regulator-min-microvolt = <1080000>;
+ regulator-max-microvolt = <1120000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l1e_0p88: ldo1 {
+ regulator-name = "vreg_l1e_0p88";
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <880000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2e_0p9: ldo2 {
+ regulator-name = "vreg_l2e_0p9";
+ regulator-min-microvolt = <904000>;
+ regulator-max-microvolt = <970000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l3e_1p2: ldo3 {
+ regulator-name = "vreg_l3e_1p2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ regulators-4 {
+ compatible = "qcom,pm8550ve-rpmh-regulators";
+ qcom,pmic-id = "f";
+
+ vreg_s4f_0p5: smps4 {
+ regulator-name = "vreg_s4f_0p5";
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <700000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l1f_0p9: ldo1 {
+ regulator-name = "vreg_l1f_0p9";
+ regulator-min-microvolt = <912000>;
+ regulator-max-microvolt = <912000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2f_0p88: ldo2 {
+ regulator-name = "vreg_l2f_0p88";
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <912000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l3f_0p91: ldo3 {
+ regulator-name = "vreg_l3f_0p91";
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <912000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ regulators-5 {
+ compatible = "qcom,pm8550vs-rpmh-regulators";
+ qcom,pmic-id = "g";
+
+ vreg_s1g_1p2: smps1 {
+ regulator-name = "vreg_s1g_1p2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1300000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_s2g_0p8: smps2 {
+ regulator-name = "vreg_s2g_0p8";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1000000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_s3g_0p7: smps3 {
+ regulator-name = "vreg_s3g_0p7";
+ regulator-min-microvolt = <300000>;
+ regulator-max-microvolt = <1004000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_s4g_1p3: smps4 {
+ regulator-name = "vreg_s4g_1p3";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1352000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_s5g_0p8: smps5 {
+ regulator-name = "vreg_s5g_0p8";
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1004000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_s6g_1p8: smps6 {
+ regulator-name = "vreg_s6g_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2000000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l1g_1p2: ldo1 {
+ regulator-name = "vreg_l1g_1p2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2g_1p2: ldo2 {
+ regulator-name = "vreg_l2g_1p2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l3g_1p2: ldo3 {
+ regulator-name = "vreg_l3g_1p2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ regulators-6 {
+ compatible = "qcom,pm8010-rpmh-regulators";
+ qcom,pmic-id = "m";
+
+ vreg_l1m_1p056: ldo1 {
+ regulator-name = "vreg_l1m_1p056";
+ regulator-min-microvolt = <1056000>;
+ regulator-max-microvolt = <1056000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2m_1p056: ldo2 {
+ regulator-name = "vreg_l2m_1p056";
+ regulator-min-microvolt = <1056000>;
+ regulator-max-microvolt = <1056000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l3m_2p8: ldo3 {
+ regulator-name = "vreg_l3m_2p8";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l4m_2p8: ldo4 {
+ regulator-name = "vreg_l4m_2p8";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l5m_1p8: ldo5 {
+ regulator-name = "vreg_l5m_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l6m_1p8: ldo6 {
+ regulator-name = "vreg_l6m_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l7m_2p9: ldo7 {
+ regulator-name = "vreg_l7m_2p9";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2904000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ regulators-7 {
+ compatible = "qcom,pm8010-rpmh-regulators";
+ qcom,pmic-id = "n";
+
+ vreg_l1n_1p1: ldo1 {
+ regulator-name = "vreg_l1n_1p1";
+ regulator-min-microvolt = <1104000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2n_1p1: ldo2 {
+ regulator-name = "vreg_l2n_1p1";
+ regulator-min-microvolt = <1104000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l3n_2p8: ldo3 {
+ regulator-name = "vreg_l3n_2p8";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l4n_2p8: ldo4 {
+ regulator-name = "vreg_l4n_2p8";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l5n_1p8: ldo5 {
+ regulator-name = "vreg_l5n_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l6n_3p3: ldo6 {
+ regulator-name = "vreg_l6n_3p3";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <3304000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l7n_2p96: ldo7 {
+ regulator-name = "vreg_l7n_2p96";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2960000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+};
+
+&dispcc {
+ status = "disabled";
+};
+
+&i2c_master_hub_0 {
+ status = "okay";
+};
+
+&pcie0 {
+ wake-gpios = <&tlmm 96 GPIO_ACTIVE_HIGH>;
+ perst-gpios = <&tlmm 94 GPIO_ACTIVE_LOW>;
+ pinctrl-0 = <&pcie0_default_state>;
+ pinctrl-names = "default";
+ status = "okay";
+};
+
+&pcie0_phy {
+ vdda-phy-supply = <&vreg_l1e_0p88>;
+ vdda-pll-supply = <&vreg_l3e_1p2>;
+ status = "okay";
+};
+
+&pm8550_gpios {
+ volume_up_n: volume-up-n-state {
+ pins = "gpio6";
+ function = "normal";
+ power-source = <1>;
+ bias-pull-up;
+ input-enable;
+ };
+};
+
+&pon_pwrkey {
+ status = "okay";
+};
+
+&pon_resin {
+ status = "okay";
+ linux,code = <KEY_VOLUMEDOWN>;
+};
+
+&qupv3_id_0 {
+ status = "okay";
+};
+
+&remoteproc_adsp {
+ firmware-name = "qcom/sm8550/adsp.mdt",
+ "qcom/sm8550/adsp_dtb.mdt";
+ status = "okay";
+};
+
+&remoteproc_cdsp {
+ firmware-name = "qcom/sm8550/cdsp.mdt",
+ "qcom/sm8550/cdsp_dtb.mdt";
+ status = "okay";
+};
+
+&remoteproc_mpss {
+ firmware-name = "qcom/sm8550/modem.mdt",
+ "qcom/sm8550/modem_dtb.mdt";
+ status = "okay";
+};
+
+&sleep_clk {
+ clock-frequency = <32000>;
+};
+
+&tlmm {
+ gpio-reserved-ranges = <36 4>, <50 2>;
+};
+
+&ufs_mem_hc {
+ reset-gpios = <&tlmm 210 GPIO_ACTIVE_LOW>;
+ vcc-supply = <&vreg_l17b_2p5>;
+ vcc-max-microamp = <1300000>;
+ vccq-supply = <&vreg_l1g_1p2>;
+ vccq-max-microamp = <1200000>;
+ vdd-hba-supply = <&vreg_l3g_1p2>;
+ status = "okay";
+};
+
+&ufs_mem_phy {
+ vdda-phy-supply = <&vreg_l1d_0p88>;
+ vdda-pll-supply = <&vreg_l3e_1p2>;
+ status = "okay";
+};
+
+&xo_board {
+ clock-frequency = <76800000>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sm8550-sony-xperia-yodo-pdx234.dts b/arch/arm64/boot/dts/qcom/sm8550-sony-xperia-yodo-pdx234.dts
index 85e0d3d66e16..85d487ef80a0 100644
--- a/arch/arm64/boot/dts/qcom/sm8550-sony-xperia-yodo-pdx234.dts
+++ b/arch/arm64/boot/dts/qcom/sm8550-sony-xperia-yodo-pdx234.dts
@@ -737,19 +737,10 @@
status = "okay";
};
-&usb_1_dwc3 {
- dr_mode = "otg";
- usb-role-switch;
-};
-
&usb_1_dwc3_hs {
remote-endpoint = <&pmic_glink_hs_in>;
};
-&usb_1_dwc3_ss {
- remote-endpoint = <&usb_dp_qmpphy_usb_ss_in>;
-};
-
&usb_1_hsphy {
vdd-supply = <&pm8550vs_2_l1>;
vdda12-supply = <&pm8550vs_2_l3>;
@@ -761,7 +752,6 @@
&usb_dp_qmpphy {
vdda-phy-supply = <&pm8550vs_2_l3>;
vdda-pll-supply = <&pm8550ve_l3>;
- orientation-switch;
status = "okay";
};
@@ -770,10 +760,6 @@
remote-endpoint = <&pmic_glink_ss_in>;
};
-&usb_dp_qmpphy_usb_ss_in {
- remote-endpoint = <&usb_1_dwc3_ss>;
-};
-
&xo_board {
clock-frequency = <76800000>;
};
diff --git a/arch/arm64/boot/dts/qcom/sm8550.dtsi b/arch/arm64/boot/dts/qcom/sm8550.dtsi
index bc5aeb05ffc3..4c9820adcf52 100644
--- a/arch/arm64/boot/dts/qcom/sm8550.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm8550.dtsi
@@ -58,11 +58,6 @@
clock-mult = <1>;
clock-div = <2>;
};
-
- pcie_1_phy_aux_clk: pcie-1-phy-aux-clk {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- };
};
cpus {
@@ -357,8 +352,23 @@
reg = <0 0xa0000000 0 0>;
};
- pmu {
- compatible = "arm,armv8-pmuv3";
+ pmu-a510 {
+ compatible = "arm,cortex-a510-pmu";
+ interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_LOW>;
+ };
+
+ pmu-a710 {
+ compatible = "arm,cortex-a710-pmu";
+ interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_LOW>;
+ };
+
+ pmu-a715 {
+ compatible = "arm,cortex-a715-pmu";
+ interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_LOW>;
+ };
+
+ pmu-x3 {
+ compatible = "arm,cortex-x3-pmu";
interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_LOW>;
};
@@ -776,8 +786,8 @@
#power-domain-cells = <1>;
clocks = <&bi_tcxo_div2>, <&sleep_clk>,
<&pcie0_phy>,
- <&pcie1_phy>,
- <&pcie_1_phy_aux_clk>,
+ <&pcie1_phy QMP_PCIE_PIPE_CLK>,
+ <&pcie1_phy QMP_PCIE_PHY_AUX_CLK>,
<&ufs_mem_phy 0>,
<&ufs_mem_phy 1>,
<&ufs_mem_phy 2>,
@@ -1774,7 +1784,7 @@
status = "disabled";
- pcie@0 {
+ pcieport0: pcie@0 {
device_type = "pci";
reg = <0x0 0x0 0x0 0x0 0x0>;
bus-range = <0x01 0xff>;
@@ -1928,7 +1938,7 @@
power-domains = <&gcc PCIE_1_PHY_GDSC>;
- #clock-cells = <0>;
+ #clock-cells = <1>;
clock-output-names = "pcie1_pipe_clk";
#phy-cells = <0>;
@@ -2910,6 +2920,7 @@
port@1 {
reg = <1>;
mdss_dp0_out: endpoint {
+ remote-endpoint = <&usb_dp_qmpphy_dp_in>;
};
};
};
@@ -3169,6 +3180,8 @@
#clock-cells = <1>;
#phy-cells = <1>;
+ orientation-switch;
+
status = "disabled";
ports {
@@ -3186,6 +3199,7 @@
reg = <1>;
usb_dp_qmpphy_usb_ss_in: endpoint {
+ remote-endpoint = <&usb_1_dwc3_ss>;
};
};
@@ -3193,6 +3207,7 @@
reg = <2>;
usb_dp_qmpphy_dp_in: endpoint {
+ remote-endpoint = <&mdss_dp0_out>;
};
};
};
@@ -3264,6 +3279,7 @@
snps,has-lpm-erratum;
tx-fifo-resize;
dma-coherent;
+ usb-role-switch;
ports {
#address-cells = <1>;
@@ -3280,6 +3296,7 @@
reg = <1>;
usb_1_dwc3_ss: endpoint {
+ remote-endpoint = <&usb_dp_qmpphy_usb_ss_in>;
};
};
};
@@ -4295,12 +4312,14 @@
<0 0x25200000 0 0x200000>,
<0 0x25400000 0 0x200000>,
<0 0x25600000 0 0x200000>,
- <0 0x25800000 0 0x200000>;
+ <0 0x25800000 0 0x200000>,
+ <0 0x25a00000 0 0x200000>;
reg-names = "llcc0_base",
"llcc1_base",
"llcc2_base",
"llcc3_base",
- "llcc_broadcast_base";
+ "llcc_broadcast_base",
+ "llcc_broadcast_and_base";
interrupts = <GIC_SPI 266 IRQ_TYPE_LEVEL_HIGH>;
};
@@ -4571,8 +4590,6 @@
thermal-zones {
aoss0-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 0>;
trips {
@@ -4591,8 +4608,6 @@
};
cpuss0-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 1>;
trips {
@@ -4611,8 +4626,6 @@
};
cpuss1-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 2>;
trips {
@@ -4631,8 +4644,6 @@
};
cpuss2-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 3>;
trips {
@@ -4651,8 +4662,6 @@
};
cpuss3-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 4>;
trips {
@@ -4671,8 +4680,6 @@
};
cpu3-top-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 5>;
trips {
@@ -4697,8 +4704,6 @@
};
cpu3-bottom-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 6>;
trips {
@@ -4723,8 +4728,6 @@
};
cpu4-top-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 7>;
trips {
@@ -4749,8 +4752,6 @@
};
cpu4-bottom-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 8>;
trips {
@@ -4775,8 +4776,6 @@
};
cpu5-top-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 9>;
trips {
@@ -4801,8 +4800,6 @@
};
cpu5-bottom-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 10>;
trips {
@@ -4827,8 +4824,6 @@
};
cpu6-top-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 11>;
trips {
@@ -4853,8 +4848,6 @@
};
cpu6-bottom-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 12>;
trips {
@@ -4879,8 +4872,6 @@
};
cpu7-top-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 13>;
trips {
@@ -4905,8 +4896,6 @@
};
cpu7-middle-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 14>;
trips {
@@ -4931,8 +4920,6 @@
};
cpu7-bottom-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 15>;
trips {
@@ -4957,8 +4944,6 @@
};
aoss1-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens1 0>;
trips {
@@ -4977,8 +4962,6 @@
};
cpu0-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens1 1>;
trips {
@@ -5003,8 +4986,6 @@
};
cpu1-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens1 2>;
trips {
@@ -5029,8 +5010,6 @@
};
cpu2-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens1 3>;
trips {
@@ -5056,7 +5035,7 @@
cdsp0-thermal {
polling-delay-passive = <10>;
- polling-delay = <0>;
+
thermal-sensors = <&tsens2 4>;
trips {
@@ -5088,7 +5067,7 @@
cdsp1-thermal {
polling-delay-passive = <10>;
- polling-delay = <0>;
+
thermal-sensors = <&tsens2 5>;
trips {
@@ -5120,7 +5099,7 @@
cdsp2-thermal {
polling-delay-passive = <10>;
- polling-delay = <0>;
+
thermal-sensors = <&tsens2 6>;
trips {
@@ -5152,7 +5131,7 @@
cdsp3-thermal {
polling-delay-passive = <10>;
- polling-delay = <0>;
+
thermal-sensors = <&tsens2 7>;
trips {
@@ -5183,8 +5162,6 @@
};
video-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens1 8>;
trips {
@@ -5204,7 +5181,7 @@
mem-thermal {
polling-delay-passive = <10>;
- polling-delay = <0>;
+
thermal-sensors = <&tsens1 9>;
trips {
@@ -5229,8 +5206,6 @@
};
modem0-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens1 10>;
trips {
@@ -5261,8 +5236,6 @@
};
modem1-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens1 11>;
trips {
@@ -5293,8 +5266,6 @@
};
modem2-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens1 12>;
trips {
@@ -5325,8 +5296,6 @@
};
modem3-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens1 13>;
trips {
@@ -5357,8 +5326,6 @@
};
camera0-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens1 14>;
trips {
@@ -5377,8 +5344,6 @@
};
camera1-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens1 15>;
trips {
@@ -5397,8 +5362,6 @@
};
aoss2-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens2 0>;
trips {
@@ -5418,312 +5381,264 @@
gpuss-0-thermal {
polling-delay-passive = <10>;
- polling-delay = <0>;
+
thermal-sensors = <&tsens2 1>;
cooling-maps {
map0 {
- trip = <&gpu0_junction_config>;
+ trip = <&gpu0_alert0>;
cooling-device = <&gpu THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
};
};
trips {
- thermal-engine-config {
- temperature = <125000>;
+ gpu0_alert0: trip-point0 {
+ temperature = <85000>;
hysteresis = <1000>;
type = "passive";
};
- thermal-hal-config {
- temperature = <125000>;
+ trip-point1 {
+ temperature = <90000>;
hysteresis = <1000>;
- type = "passive";
- };
-
- reset-mon-config {
- temperature = <115000>;
- hysteresis = <5000>;
- type = "passive";
+ type = "hot";
};
- gpu0_junction_config: junction-config {
- temperature = <95000>;
- hysteresis = <5000>;
- type = "passive";
+ trip-point2 {
+ temperature = <110000>;
+ hysteresis = <1000>;
+ type = "critical";
};
};
};
gpuss-1-thermal {
polling-delay-passive = <10>;
- polling-delay = <0>;
+
thermal-sensors = <&tsens2 2>;
cooling-maps {
map0 {
- trip = <&gpu1_junction_config>;
+ trip = <&gpu1_alert0>;
cooling-device = <&gpu THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
};
};
trips {
- thermal-engine-config {
- temperature = <125000>;
+ gpu1_alert0: trip-point0 {
+ temperature = <85000>;
hysteresis = <1000>;
type = "passive";
};
- thermal-hal-config {
- temperature = <125000>;
+ trip-point1 {
+ temperature = <90000>;
hysteresis = <1000>;
- type = "passive";
+ type = "hot";
};
- reset-mon-config {
- temperature = <115000>;
- hysteresis = <5000>;
- type = "passive";
- };
-
- gpu1_junction_config: junction-config {
- temperature = <95000>;
- hysteresis = <5000>;
- type = "passive";
+ trip-point2 {
+ temperature = <110000>;
+ hysteresis = <1000>;
+ type = "critical";
};
};
};
gpuss-2-thermal {
polling-delay-passive = <10>;
- polling-delay = <0>;
+
thermal-sensors = <&tsens2 3>;
cooling-maps {
map0 {
- trip = <&gpu2_junction_config>;
+ trip = <&gpu2_alert0>;
cooling-device = <&gpu THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
};
};
trips {
- thermal-engine-config {
- temperature = <125000>;
+ gpu2_alert0: trip-point0 {
+ temperature = <85000>;
hysteresis = <1000>;
type = "passive";
};
- thermal-hal-config {
- temperature = <125000>;
+ trip-point1 {
+ temperature = <90000>;
hysteresis = <1000>;
- type = "passive";
+ type = "hot";
};
- reset-mon-config {
- temperature = <115000>;
- hysteresis = <5000>;
- type = "passive";
- };
-
- gpu2_junction_config: junction-config {
- temperature = <95000>;
- hysteresis = <5000>;
- type = "passive";
+ trip-point2 {
+ temperature = <110000>;
+ hysteresis = <1000>;
+ type = "critical";
};
};
};
gpuss-3-thermal {
polling-delay-passive = <10>;
- polling-delay = <0>;
+
thermal-sensors = <&tsens2 4>;
cooling-maps {
map0 {
- trip = <&gpu3_junction_config>;
+ trip = <&gpu3_alert0>;
cooling-device = <&gpu THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
};
};
trips {
- thermal-engine-config {
- temperature = <125000>;
+ gpu3_alert0: trip-point0 {
+ temperature = <85000>;
hysteresis = <1000>;
type = "passive";
};
- thermal-hal-config {
- temperature = <125000>;
+ trip-point1 {
+ temperature = <90000>;
hysteresis = <1000>;
- type = "passive";
- };
-
- reset-mon-config {
- temperature = <115000>;
- hysteresis = <5000>;
- type = "passive";
+ type = "hot";
};
- gpu3_junction_config: junction-config {
- temperature = <95000>;
- hysteresis = <5000>;
- type = "passive";
+ trip-point2 {
+ temperature = <110000>;
+ hysteresis = <1000>;
+ type = "critical";
};
};
};
gpuss-4-thermal {
polling-delay-passive = <10>;
- polling-delay = <0>;
+
thermal-sensors = <&tsens2 5>;
cooling-maps {
map0 {
- trip = <&gpu4_junction_config>;
+ trip = <&gpu4_alert0>;
cooling-device = <&gpu THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
};
};
trips {
- thermal-engine-config {
- temperature = <125000>;
+ gpu4_alert0: trip-point0 {
+ temperature = <85000>;
hysteresis = <1000>;
type = "passive";
};
- thermal-hal-config {
- temperature = <125000>;
+ trip-point1 {
+ temperature = <90000>;
hysteresis = <1000>;
- type = "passive";
- };
-
- reset-mon-config {
- temperature = <115000>;
- hysteresis = <5000>;
- type = "passive";
+ type = "hot";
};
- gpu4_junction_config: junction-config {
- temperature = <95000>;
- hysteresis = <5000>;
- type = "passive";
+ trip-point2 {
+ temperature = <110000>;
+ hysteresis = <1000>;
+ type = "critical";
};
};
};
gpuss-5-thermal {
polling-delay-passive = <10>;
- polling-delay = <0>;
+
thermal-sensors = <&tsens2 6>;
cooling-maps {
map0 {
- trip = <&gpu5_junction_config>;
+ trip = <&gpu5_alert0>;
cooling-device = <&gpu THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
};
};
trips {
- thermal-engine-config {
- temperature = <125000>;
+ gpu5_alert0: trip-point0 {
+ temperature = <85000>;
hysteresis = <1000>;
type = "passive";
};
- thermal-hal-config {
- temperature = <125000>;
+ trip-point1 {
+ temperature = <90000>;
hysteresis = <1000>;
- type = "passive";
+ type = "hot";
};
- reset-mon-config {
- temperature = <115000>;
- hysteresis = <5000>;
- type = "passive";
- };
-
- gpu5_junction_config: junction-config {
- temperature = <95000>;
- hysteresis = <5000>;
- type = "passive";
+ trip-point2 {
+ temperature = <110000>;
+ hysteresis = <1000>;
+ type = "critical";
};
};
};
gpuss-6-thermal {
polling-delay-passive = <10>;
- polling-delay = <0>;
+
thermal-sensors = <&tsens2 7>;
cooling-maps {
map0 {
- trip = <&gpu6_junction_config>;
+ trip = <&gpu6_alert0>;
cooling-device = <&gpu THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
};
};
trips {
- thermal-engine-config {
- temperature = <125000>;
+ gpu6_alert0: trip-point0 {
+ temperature = <85000>;
hysteresis = <1000>;
type = "passive";
};
- thermal-hal-config {
- temperature = <125000>;
+ trip-point1 {
+ temperature = <90000>;
hysteresis = <1000>;
- type = "passive";
+ type = "hot";
};
- reset-mon-config {
- temperature = <115000>;
- hysteresis = <5000>;
- type = "passive";
- };
-
- gpu6_junction_config: junction-config {
- temperature = <95000>;
- hysteresis = <5000>;
- type = "passive";
+ trip-point2 {
+ temperature = <110000>;
+ hysteresis = <1000>;
+ type = "critical";
};
};
};
gpuss-7-thermal {
polling-delay-passive = <10>;
- polling-delay = <0>;
+
thermal-sensors = <&tsens2 8>;
cooling-maps {
map0 {
- trip = <&gpu7_junction_config>;
+ trip = <&gpu7_alert0>;
cooling-device = <&gpu THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
};
};
trips {
- thermal-engine-config {
- temperature = <125000>;
+ gpu7_alert0: trip-point0 {
+ temperature = <85000>;
hysteresis = <1000>;
type = "passive";
};
- thermal-hal-config {
- temperature = <125000>;
+ trip-point1 {
+ temperature = <90000>;
hysteresis = <1000>;
- type = "passive";
+ type = "hot";
};
- reset-mon-config {
- temperature = <115000>;
- hysteresis = <5000>;
- type = "passive";
- };
-
- gpu7_junction_config: junction-config {
- temperature = <95000>;
- hysteresis = <5000>;
- type = "passive";
+ trip-point2 {
+ temperature = <110000>;
+ hysteresis = <1000>;
+ type = "critical";
};
};
};
diff --git a/arch/arm64/boot/dts/qcom/sm8650-hdk-display-card.dtso b/arch/arm64/boot/dts/qcom/sm8650-hdk-display-card.dtso
new file mode 100644
index 000000000000..cb102535838d
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sm8650-hdk-display-card.dtso
@@ -0,0 +1,141 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2024, Linaro Limited
+ */
+
+/*
+ * Display Card kit overlay
+ * This requires S5702 Switch 7 to be turned to OFF to route DSI0 to the display panel
+ */
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+
+/dts-v1/;
+/plugin/;
+
+/* Disable HDMI bridge related nodes (mutually exclusive with the display card) */
+
+&i2c6 {
+ status = "disabled";
+};
+
+&lt9611_1v2 {
+ status = "disabled";
+};
+
+&lt9611_3v3 {
+ status = "disabled";
+};
+
+&vreg_bob_3v3 {
+ status = "disabled";
+};
+
+&lt9611_codec {
+ status = "disabled";
+};
+
+&mdss_dsi0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ panel@0 {
+ compatible = "visionox,vtdr6130";
+ reg = <0>;
+
+ reset-gpios = <&tlmm 133 GPIO_ACTIVE_LOW>;
+
+ vddio-supply = <&vreg_l12b_1p8>;
+ vci-supply = <&vreg_l13b_3p0>;
+ vdd-supply = <&vreg_l11b_1p2>;
+
+ pinctrl-0 = <&disp0_reset_n_active>, <&mdp_vsync>;
+ pinctrl-1 = <&disp0_reset_n_suspend>, <&mdp_vsync>;
+ pinctrl-names = "default", "sleep";
+
+ port {
+ panel0_in: endpoint {
+ remote-endpoint = <&mdss_dsi0_out>;
+ };
+ };
+ };
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@1 {
+ reg = <1>;
+
+ mdss_dsi0_out: endpoint {
+ remote-endpoint = <&panel0_in>;
+ };
+ };
+ };
+};
+
+&spi4 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ status = "okay";
+
+ touchscreen@0 {
+ compatible = "goodix,gt9916";
+ reg = <0>;
+
+ interrupt-parent = <&tlmm>;
+ interrupts = <162 IRQ_TYPE_LEVEL_LOW>;
+
+ reset-gpios = <&tlmm 161 GPIO_ACTIVE_LOW>;
+
+ avdd-supply = <&vreg_l14b_3p2>;
+
+ spi-max-frequency = <1000000>;
+
+ touchscreen-size-x = <1080>;
+ touchscreen-size-y = <2400>;
+
+ pinctrl-0 = <&ts_irq>, <&ts_reset>;
+ pinctrl-names = "default";
+ };
+};
+
+&tlmm {
+ disp0_reset_n_active: disp0-reset-n-active-state {
+ pins = "gpio133";
+ function = "gpio";
+ drive-strength = <8>;
+ bias-disable;
+ };
+
+ disp0_reset_n_suspend: disp0-reset-n-suspend-state {
+ pins = "gpio133";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+
+ mdp_vsync: mdp-vsync-state {
+ pins = "gpio86";
+ function = "mdp_vsync";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+
+ ts_irq: ts-irq-state {
+ pins = "gpio161";
+ function = "gpio";
+ drive-strength = <8>;
+ bias-pull-up;
+ output-disable;
+ };
+
+ ts_reset: ts-reset-state {
+ pins = "gpio162";
+ function = "gpio";
+ drive-strength = <8>;
+ bias-pull-up;
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/sm8650-hdk.dts b/arch/arm64/boot/dts/qcom/sm8650-hdk.dts
new file mode 100644
index 000000000000..591e6ab9bf5b
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sm8650-hdk.dts
@@ -0,0 +1,1355 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2024, Linaro Limited
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/leds/common.h>
+#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
+#include "sm8650.dtsi"
+#include "pm8010.dtsi"
+#include "pm8550.dtsi"
+#include "pm8550b.dtsi"
+#define PMK8550VE_SID 8
+#include "pm8550ve.dtsi"
+#include "pm8550vs.dtsi"
+#include "pmk8550.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. SM8650 HDK";
+ compatible = "qcom,sm8650-hdk", "qcom,sm8650";
+ chassis-type = "embedded";
+
+ aliases {
+ serial0 = &uart15;
+ serial1 = &uart14;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ hdmi-out {
+ compatible = "hdmi-connector";
+ type = "a";
+
+ port {
+ hdmi_connector_out: endpoint {
+ remote-endpoint = <&lt9611_out>;
+ };
+ };
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+
+ pinctrl-0 = <&volume_up_n>;
+ pinctrl-names = "default";
+
+ key-volume-up {
+ label = "Volume Up";
+ linux,code = <KEY_VOLUMEUP>;
+ gpios = <&pm8550_gpios 6 GPIO_ACTIVE_LOW>;
+ debounce-interval = <15>;
+ linux,can-disable;
+ wakeup-source;
+ };
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ led-0 {
+ function = LED_FUNCTION_BLUETOOTH;
+ color = <LED_COLOR_ID_BLUE>;
+ gpios = <&pm8550_gpios 11 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "bluetooth-power";
+ default-state = "off";
+ };
+
+ led-1 {
+ function = LED_FUNCTION_INDICATOR;
+ color = <LED_COLOR_ID_GREEN>;
+ gpios = <&pm8550b_gpios 9 GPIO_ACTIVE_HIGH>;
+ default-state = "off";
+ panic-indicator;
+ };
+
+ led-2 {
+ function = LED_FUNCTION_WLAN;
+ color = <LED_COLOR_ID_ORANGE>;
+ gpios = <&pm8550b_gpios 10 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "phy0tx";
+ default-state = "off";
+ };
+ };
+
+ pmic-glink {
+ compatible = "qcom,sm8650-pmic-glink",
+ "qcom,sm8550-pmic-glink",
+ "qcom,pmic-glink";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ orientation-gpios = <&tlmm 29 GPIO_ACTIVE_HIGH>;
+
+ connector@0 {
+ compatible = "usb-c-connector";
+ reg = <0>;
+
+ power-role = "dual";
+ data-role = "dual";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ pmic_glink_hs_in: endpoint {
+ remote-endpoint = <&usb_1_dwc3_hs>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ pmic_glink_ss_in: endpoint {
+ remote-endpoint = <&usb_dp_qmpphy_out>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+
+ pmic_glink_sbu: endpoint {
+ remote-endpoint = <&wcd_usbss_sbu_mux>;
+ };
+ };
+ };
+ };
+ };
+
+ lt9611_1v2: regulator-lt9611-1v2 {
+ compatible = "regulator-fixed";
+
+ regulator-name = "LT9611_1V2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+
+ vin-supply = <&vph_pwr>;
+ gpio = <&tlmm 79 GPIO_ACTIVE_HIGH>;
+
+ enable-active-high;
+ };
+
+ lt9611_3v3: regulator-lt9611-3v3 {
+ compatible = "regulator-fixed";
+
+ regulator-name = "LT9611_3V3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ vin-supply = <&vreg_bob_3v3>;
+ gpio = <&tlmm 78 GPIO_ACTIVE_HIGH>;
+
+ enable-active-high;
+ };
+
+ sound {
+ compatible = "qcom,sm8650-sndcard", "qcom,sm8450-sndcard";
+ model = "SM8650-HDK";
+ audio-routing = "SpkrLeft IN", "WSA_SPK1 OUT",
+ "SpkrRight IN", "WSA_SPK2 OUT",
+ "IN1_HPHL", "HPHL_OUT",
+ "IN2_HPHR", "HPHR_OUT",
+ "AMIC1", "MIC BIAS1",
+ "AMIC2", "MIC BIAS2",
+ "AMIC5", "MIC BIAS4",
+ "TX SWR_INPUT0", "ADC1_OUTPUT",
+ "TX SWR_INPUT1", "ADC2_OUTPUT",
+ "TX SWR_INPUT3", "ADC4_OUTPUT";
+
+ wcd-playback-dai-link {
+ link-name = "WCD Playback";
+
+ cpu {
+ sound-dai = <&q6apmbedai RX_CODEC_DMA_RX_0>;
+ };
+
+ codec {
+ sound-dai = <&wcd939x 0>, <&swr1 0>, <&lpass_rxmacro 0>;
+ };
+
+ platform {
+ sound-dai = <&q6apm>;
+ };
+ };
+
+ wcd-capture-dai-link {
+ link-name = "WCD Capture";
+
+ cpu {
+ sound-dai = <&q6apmbedai TX_CODEC_DMA_TX_3>;
+ };
+
+ codec {
+ sound-dai = <&wcd939x 1>, <&swr2 0>, <&lpass_txmacro 0>;
+ };
+
+ platform {
+ sound-dai = <&q6apm>;
+ };
+ };
+
+ wsa-dai-link {
+ link-name = "WSA Playback";
+
+ cpu {
+ sound-dai = <&q6apmbedai WSA_CODEC_DMA_RX_0>;
+ };
+
+ codec {
+ sound-dai = <&north_spkr>, <&south_spkr>, <&swr0 0>, <&lpass_wsamacro 0>;
+ };
+
+ platform {
+ sound-dai = <&q6apm>;
+ };
+ };
+ };
+
+ vph_pwr: regulator-vph-pwr {
+ compatible = "regulator-fixed";
+
+ regulator-name = "vph_pwr";
+ regulator-min-microvolt = <3700000>;
+ regulator-max-microvolt = <3700000>;
+
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ vreg_bob_3v3: regulator-vreg-bob-3v3 {
+ compatible = "regulator-fixed";
+
+ regulator-name = "VREG_BOB_3P3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ vin-supply = <&vph_pwr>;
+ };
+
+ wcd939x: audio-codec {
+ compatible = "qcom,wcd9395-codec", "qcom,wcd9390-codec";
+
+ pinctrl-0 = <&wcd_default>;
+ pinctrl-names = "default";
+
+ qcom,micbias1-microvolt = <1800000>;
+ qcom,micbias2-microvolt = <1800000>;
+ qcom,micbias3-microvolt = <1800000>;
+ qcom,micbias4-microvolt = <1800000>;
+ qcom,mbhc-buttons-vthreshold-microvolt = <75000 150000 237000 500000 500000 500000 500000 500000>;
+ qcom,mbhc-headset-vthreshold-microvolt = <1700000>;
+ qcom,mbhc-headphone-vthreshold-microvolt = <50000>;
+ qcom,rx-device = <&wcd_rx>;
+ qcom,tx-device = <&wcd_tx>;
+
+ reset-gpios = <&tlmm 107 GPIO_ACTIVE_LOW>;
+
+ vdd-buck-supply = <&vreg_l15b_1p8>;
+ vdd-rxtx-supply = <&vreg_l15b_1p8>;
+ vdd-io-supply = <&vreg_l15b_1p8>;
+ vdd-mic-bias-supply = <&vreg_bob1>;
+
+ #sound-dai-cells = <1>;
+ };
+
+ wcn7850-pmu {
+ compatible = "qcom,wcn7850-pmu";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&wlan_en>;
+
+ wlan-enable-gpios = <&tlmm 16 GPIO_ACTIVE_HIGH>;
+ /*
+ * TODO Add bt-enable-gpios once the Bluetooth driver is
+ * converted to using the power sequencer.
+ */
+
+ vdd-supply = <&vreg_s4i_0p85>;
+ vddio-supply = <&vreg_l15b_1p8>;
+ vddio1p2-supply = <&vreg_l3c_1p2>;
+ vddaon-supply = <&vreg_s2c_0p8>;
+ vdddig-supply = <&vreg_s3c_0p9>;
+ vddrfa1p2-supply = <&vreg_s1c_1p2>;
+ vddrfa1p8-supply = <&vreg_s6c_1p8>;
+
+ clocks = <&rpmhcc RPMH_RF_CLK1>;
+
+ regulators {
+ vreg_pmu_rfa_cmn: ldo0 {
+ regulator-name = "vreg_pmu_rfa_cmn";
+ };
+
+ vreg_pmu_aon_0p59: ldo1 {
+ regulator-name = "vreg_pmu_aon_0p59";
+ };
+
+ vreg_pmu_wlcx_0p8: ldo2 {
+ regulator-name = "vreg_pmu_wlcx_0p8";
+ };
+
+ vreg_pmu_wlmx_0p85: ldo3 {
+ regulator-name = "vreg_pmu_wlmx_0p85";
+ };
+
+ vreg_pmu_btcmx_0p85: ldo4 {
+ regulator-name = "vreg_pmu_btcmx_0p85";
+ };
+
+ vreg_pmu_rfa_0p8: ldo5 {
+ regulator-name = "vreg_pmu_rfa_0p8";
+ };
+
+ vreg_pmu_rfa_1p2: ldo6 {
+ regulator-name = "vreg_pmu_rfa_1p2";
+ };
+
+ vreg_pmu_rfa_1p8: ldo7 {
+ regulator-name = "vreg_pmu_rfa_1p8";
+ };
+
+ vreg_pmu_pcie_0p9: ldo8 {
+ regulator-name = "vreg_pmu_pcie_0p9";
+ };
+
+ vreg_pmu_pcie_1p8: ldo9 {
+ regulator-name = "vreg_pmu_pcie_1p8";
+ };
+ };
+ };
+};
+
+&apps_rsc {
+ regulators-0 {
+ compatible = "qcom,pm8550-rpmh-regulators";
+
+ vdd-bob1-supply = <&vph_pwr>;
+ vdd-bob2-supply = <&vph_pwr>;
+ vdd-l2-l13-l14-supply = <&vreg_bob1>;
+ vdd-l3-supply = <&vreg_s1c_1p2>;
+ vdd-l5-l16-supply = <&vreg_bob1>;
+ vdd-l6-l7-supply = <&vreg_bob1>;
+ vdd-l8-l9-supply = <&vreg_bob1>;
+ vdd-l11-supply = <&vreg_s1c_1p2>;
+ vdd-l12-supply = <&vreg_s6c_1p8>;
+ vdd-l15-supply = <&vreg_s6c_1p8>;
+ vdd-l17-supply = <&vreg_bob2>;
+
+ qcom,pmic-id = "b";
+
+ vreg_bob1: bob1 {
+ regulator-name = "vreg_bob1";
+ regulator-min-microvolt = <3296000>;
+ regulator-max-microvolt = <3960000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_bob2: bob2 {
+ regulator-name = "vreg_bob2";
+ regulator-min-microvolt = <2720000>;
+ regulator-max-microvolt = <3008000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2b_3p0: ldo2 {
+ regulator-name = "vreg_l2b_3p0";
+ regulator-min-microvolt = <3008000>;
+ regulator-max-microvolt = <3008000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l5b_3p1: ldo5 {
+ regulator-name = "vreg_l5b_3p1";
+ regulator-min-microvolt = <3104000>;
+ regulator-max-microvolt = <3104000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l6b_1p8: ldo6 {
+ regulator-name = "vreg_l6b_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3008000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l7b_1p8: ldo7 {
+ regulator-name = "vreg_l7b_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3008000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l8b_1p8: ldo8 {
+ regulator-name = "vreg_l8b_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3008000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l9b_2p9: ldo9 {
+ regulator-name = "vreg_l9b_2p9";
+ regulator-min-microvolt = <2960000>;
+ regulator-max-microvolt = <3008000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l11b_1p2: ldo11 {
+ regulator-name = "vreg_l11b_1p2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1504000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l12b_1p8: ldo12 {
+ regulator-name = "vreg_l12b_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l13b_3p0: ldo13 {
+ regulator-name = "vreg_l13b_3p0";
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l14b_3p2: ldo14 {
+ regulator-name = "vreg_l14b_3p2";
+ regulator-min-microvolt = <3200000>;
+ regulator-max-microvolt = <3200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l15b_1p8: ldo15 {
+ regulator-name = "vreg_l15b_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l16b_2p8: ldo16 {
+ regulator-name = "vreg_l16b_2p8";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l17b_2p5: ldo17 {
+ regulator-name = "vreg_l17b_2p5";
+ regulator-min-microvolt = <2504000>;
+ regulator-max-microvolt = <2504000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ regulators-1 {
+ compatible = "qcom,pm8550vs-rpmh-regulators";
+
+ vdd-l1-supply = <&vreg_s1c_1p2>;
+ vdd-l2-supply = <&vreg_s1c_1p2>;
+ vdd-l3-supply = <&vreg_s1c_1p2>;
+ vdd-s1-supply = <&vph_pwr>;
+ vdd-s2-supply = <&vph_pwr>;
+ vdd-s3-supply = <&vph_pwr>;
+ vdd-s4-supply = <&vph_pwr>;
+ vdd-s5-supply = <&vph_pwr>;
+ vdd-s6-supply = <&vph_pwr>;
+
+ qcom,pmic-id = "c";
+
+ vreg_s1c_1p2: smps1 {
+ regulator-name = "vreg_s1c_1p2";
+ regulator-min-microvolt = <1256000>;
+ regulator-max-microvolt = <1348000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_s2c_0p8: smps2 {
+ regulator-name = "vreg_s2c_0p8";
+ regulator-min-microvolt = <852000>;
+ regulator-max-microvolt = <1036000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_s3c_0p9: smps3 {
+ regulator-name = "vreg_s3c_0p9";
+ regulator-min-microvolt = <976000>;
+ regulator-max-microvolt = <1064000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_s4c_1p2: smps4 {
+ regulator-name = "vreg_s4c_1p2";
+ regulator-min-microvolt = <1224000>;
+ regulator-max-microvolt = <1280000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_s5c_0p7: smps5 {
+ regulator-name = "vreg_s5c_0p7";
+ regulator-min-microvolt = <752000>;
+ regulator-max-microvolt = <900000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_s6c_1p8: smps6 {
+ regulator-name = "vreg_s6c_1p8";
+ regulator-min-microvolt = <1856000>;
+ regulator-max-microvolt = <2000000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l1c_1p2: ldo1 {
+ regulator-name = "vreg_l1c_1p2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l3c_1p2: ldo3 {
+ regulator-name = "vreg_l3c_1p2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ regulators-2 {
+ compatible = "qcom,pm8550vs-rpmh-regulators";
+
+ vdd-l1-supply = <&vreg_s3c_0p9>;
+
+ qcom,pmic-id = "d";
+
+ vreg_l1d_0p88: ldo1 {
+ regulator-name = "vreg_l1d_0p88";
+ regulator-min-microvolt = <912000>;
+ regulator-max-microvolt = <920000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ regulators-3 {
+ compatible = "qcom,pm8550vs-rpmh-regulators";
+
+ vdd-l3-supply = <&vreg_s3c_0p9>;
+
+ qcom,pmic-id = "e";
+
+ vreg_l3e_0p9: ldo3 {
+ regulator-name = "vreg_l3e_0p9";
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <920000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ regulators-4 {
+ compatible = "qcom,pm8550vs-rpmh-regulators";
+
+ vdd-l1-supply = <&vreg_s3c_0p9>;
+ vdd-l3-supply = <&vreg_s3c_0p9>;
+
+ qcom,pmic-id = "g";
+
+ vreg_l1g_0p91: ldo1 {
+ regulator-name = "vreg_l1g_0p91";
+ regulator-min-microvolt = <912000>;
+ regulator-max-microvolt = <920000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l3g_0p91: ldo3 {
+ regulator-name = "vreg_l3g_0p91";
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <912000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ regulators-5 {
+ compatible = "qcom,pm8550ve-rpmh-regulators";
+
+ vdd-l1-supply = <&vreg_s3c_0p9>;
+ vdd-l2-supply = <&vreg_s3c_0p9>;
+ vdd-l3-supply = <&vreg_s1c_1p2>;
+ vdd-s4-supply = <&vph_pwr>;
+
+ qcom,pmic-id = "i";
+
+ vreg_s4i_0p85: smps4 {
+ regulator-name = "vreg_s4i_0p85";
+ regulator-min-microvolt = <852000>;
+ regulator-max-microvolt = <1004000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l1i_0p88: ldo1 {
+ regulator-name = "vreg_l1i_0p88";
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <912000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2i_0p88: ldo2 {
+ regulator-name = "vreg_l2i_0p88";
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <912000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l3i_1p2: ldo3 {
+ regulator-name = "vreg_l3i_0p91";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ regulators-6 {
+ compatible = "qcom,pm8010-rpmh-regulators";
+ qcom,pmic-id = "m";
+
+ vdd-l1-l2-supply = <&vreg_s1c_1p2>;
+ vdd-l3-l4-supply = <&vreg_bob2>;
+ vdd-l5-supply = <&vreg_s6c_1p8>;
+ vdd-l6-supply = <&vreg_bob1>;
+ vdd-l7-supply = <&vreg_bob1>;
+
+ vreg_l1m_1p1: ldo1 {
+ regulator-name = "vreg_l1m_1p1";
+ regulator-min-microvolt = <1104000>;
+ regulator-max-microvolt = <1104000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2m_1p056: ldo2 {
+ regulator-name = "vreg_l2m_1p056";
+ regulator-min-microvolt = <1056000>;
+ regulator-max-microvolt = <1056000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l3m_2p8: ldo3 {
+ regulator-name = "vreg_l3m_2p8";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l4m_2p8: ldo4 {
+ regulator-name = "vreg_l4m_2p8";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l5m_1p8: ldo5 {
+ regulator-name = "vreg_l5m_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l6m_2p8: ldo6 {
+ regulator-name = "vreg_l6m_2p8";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l7m_2p96: ldo7 {
+ regulator-name = "vreg_l7m_2p96";
+ regulator-min-microvolt = <2960000>;
+ regulator-max-microvolt = <2960000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ regulators-7 {
+ compatible = "qcom,pm8010-rpmh-regulators";
+ qcom,pmic-id = "n";
+
+ vdd-l1-l2-supply = <&vreg_s1c_1p2>;
+ vdd-l3-l4-supply = <&vreg_s6c_1p8>;
+ vdd-l5-supply = <&vreg_bob2>;
+ vdd-l6-supply = <&vreg_bob2>;
+ vdd-l7-supply = <&vreg_bob1>;
+
+ vreg_l1n_1p1: ldo1 {
+ regulator-name = "vreg_l1n_1p1";
+ regulator-min-microvolt = <1104000>;
+ regulator-max-microvolt = <1104000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2n_1p056: ldo2 {
+ regulator-name = "vreg_l2n_1p056";
+ regulator-min-microvolt = <1056000>;
+ regulator-max-microvolt = <1056000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l3n_1p8: ldo3 {
+ regulator-name = "vreg_l3n_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l4n_1p8: ldo4 {
+ regulator-name = "vreg_l4n_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l5n_2p8: ldo5 {
+ regulator-name = "vreg_l5n_2p8";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l6n_2p8: ldo6 {
+ regulator-name = "vreg_l6n_2p8";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l7n_3p3: ldo7 {
+ regulator-name = "vreg_l7n_3p3";
+ regulator-min-microvolt = <3304000>;
+ regulator-max-microvolt = <3304000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+};
+
+&dispcc {
+ status = "okay";
+};
+
+&gpi_dma1 {
+ status = "okay";
+};
+
+&i2c3 {
+ status = "okay";
+
+ wcd_usbss: typec-mux@e {
+ compatible = "qcom,wcd9395-usbss", "qcom,wcd9390-usbss";
+ reg = <0xe>;
+
+ vdd-supply = <&vreg_l15b_1p8>;
+ reset-gpios = <&tlmm 152 GPIO_ACTIVE_HIGH>;
+
+ mode-switch;
+ orientation-switch;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ wcd_usbss_sbu_mux: endpoint {
+ remote-endpoint = <&pmic_glink_sbu>;
+ };
+ };
+ };
+ };
+};
+
+&i2c6 {
+ clock-frequency = <400000>;
+ status = "okay";
+
+ lt9611_codec: hdmi-bridge@2b {
+ compatible = "lontium,lt9611uxc";
+ reg = <0x2b>;
+
+ interrupts-extended = <&tlmm 85 IRQ_TYPE_EDGE_FALLING>;
+
+ reset-gpios = <&tlmm 28 GPIO_ACTIVE_HIGH>;
+
+ vdd-supply = <&lt9611_1v2>;
+ vcc-supply = <&lt9611_3v3>;
+
+ pinctrl-0 = <&lt9611_irq_pin>, <&lt9611_rst_pin>;
+ pinctrl-names = "default";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ lt9611_a: endpoint {
+ remote-endpoint = <&mdss_dsi0_out>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+
+ lt9611_out: endpoint {
+ remote-endpoint = <&hdmi_connector_out>;
+ };
+ };
+ };
+ };
+};
+
+&ipa {
+ qcom,gsi-loader = "self";
+ memory-region = <&ipa_fw_mem>;
+ firmware-name = "qcom/sm8650/ipa_fws.mbn";
+ status = "okay";
+};
+
+&gpu {
+ status = "okay";
+
+ zap-shader {
+ firmware-name = "qcom/sm8650/gen70900_zap.mbn";
+ };
+};
+
+&lpass_tlmm {
+ spkr_1_sd_n_active: spkr-1-sd-n-active-state {
+ pins = "gpio21";
+ function = "gpio";
+ drive-strength = <16>;
+ bias-disable;
+ output-low;
+ };
+};
+
+&mdss {
+ status = "okay";
+};
+
+&mdss_dsi0 {
+ vdda-supply = <&vreg_l3i_1p2>;
+
+ status = "okay";
+};
+
+&mdss_dsi0_out {
+ remote-endpoint = <&lt9611_a>;
+ data-lanes = <0 1 2 3>;
+};
+
+&mdss_dsi0_phy {
+ vdds-supply = <&vreg_l1i_0p88>;
+
+ status = "okay";
+};
+
+&mdss_dp0 {
+ status = "okay";
+};
+
+&mdss_dp0_out {
+ data-lanes = <0 1>;
+};
+
+&pcie0 {
+ wake-gpios = <&tlmm 96 GPIO_ACTIVE_HIGH>;
+ perst-gpios = <&tlmm 94 GPIO_ACTIVE_LOW>;
+
+ pinctrl-0 = <&pcie0_default_state>;
+ pinctrl-names = "default";
+
+ status = "okay";
+};
+
+&pcieport0 {
+ wifi@0 {
+ compatible = "pci17cb,1107";
+ reg = <0x10000 0x0 0x0 0x0 0x0>;
+
+ vddrfacmn-supply = <&vreg_pmu_rfa_cmn>;
+ vddaon-supply = <&vreg_pmu_aon_0p59>;
+ vddwlcx-supply = <&vreg_pmu_wlcx_0p8>;
+ vddwlmx-supply = <&vreg_pmu_wlmx_0p85>;
+ vddrfa0p8-supply = <&vreg_pmu_rfa_0p8>;
+ vddrfa1p2-supply = <&vreg_pmu_rfa_1p2>;
+ vddrfa1p8-supply = <&vreg_pmu_rfa_1p8>;
+ vddpcie0p9-supply = <&vreg_pmu_pcie_0p9>;
+ vddpcie1p8-supply = <&vreg_pmu_pcie_1p8>;
+ };
+};
+
+&pcie0_phy {
+ vdda-phy-supply = <&vreg_l1i_0p88>;
+ vdda-pll-supply = <&vreg_l3i_1p2>;
+
+ status = "okay";
+};
+
+&pcie1 {
+ wake-gpios = <&tlmm 99 GPIO_ACTIVE_HIGH>;
+ perst-gpios = <&tlmm 97 GPIO_ACTIVE_LOW>;
+
+ pinctrl-0 = <&pcie1_default_state>;
+ pinctrl-names = "default";
+
+ status = "okay";
+};
+
+&pcie1_phy {
+ vdda-phy-supply = <&vreg_l3e_0p9>;
+ vdda-pll-supply = <&vreg_l3i_1p2>;
+ vdda-qref-supply = <&vreg_l1i_0p88>;
+
+ status = "okay";
+};
+
+&pm8550_gpios {
+ sdc2_card_det_n: sdc2-card-det-state {
+ pins = "gpio12";
+ function = "normal";
+ bias-pull-up;
+ input-enable;
+ output-disable;
+ power-source = <1>; /* 1.8 V */
+ };
+
+ volume_up_n: volume-up-n-state {
+ pins = "gpio6";
+ function = "normal";
+ bias-pull-up;
+ input-enable;
+ power-source = <1>;
+ };
+};
+
+/* The RGB signals are routed to 3 separate LEDs on the HDK8650 */
+&pm8550_pwm {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ status = "okay";
+
+ led@1 {
+ reg = <1>;
+ function = LED_FUNCTION_STATUS;
+ color = <LED_COLOR_ID_RED>;
+ default-state = "off";
+ };
+
+ led@2 {
+ reg = <2>;
+ function = LED_FUNCTION_STATUS;
+ color = <LED_COLOR_ID_GREEN>;
+ default-state = "off";
+ };
+
+ led@3 {
+ reg = <3>;
+ function = LED_FUNCTION_STATUS;
+ color = <LED_COLOR_ID_BLUE>;
+ default-state = "off";
+ };
+};
+
+&pm8550b_eusb2_repeater {
+ vdd18-supply = <&vreg_l15b_1p8>;
+ vdd3-supply = <&vreg_l5b_3p1>;
+};
+
+&pmk8550_rtc {
+ status = "okay";
+};
+
+&pon_pwrkey {
+ status = "okay";
+};
+
+&pon_resin {
+ linux,code = <KEY_VOLUMEDOWN>;
+
+ status = "okay";
+};
+
+&qup_i2c3_data_clk {
+ /* Use internal I2C pull-up */
+ bias-pull-up = <2200>;
+};
+
+&qupv3_id_0 {
+ iommus = <&apps_smmu 0xa3 0x3>;
+
+ status = "okay";
+};
+
+&qupv3_id_1 {
+ status = "okay";
+};
+
+&remoteproc_adsp {
+ firmware-name = "qcom/sm8650/adsp.mbn",
+ "qcom/sm8650/adsp_dtb.mbn";
+
+ status = "okay";
+};
+
+&remoteproc_cdsp {
+ firmware-name = "qcom/sm8650/cdsp.mbn",
+ "qcom/sm8650/cdsp_dtb.mbn";
+
+ status = "okay";
+};
+
+&remoteproc_mpss {
+ firmware-name = "qcom/sm8650/modem.mbn",
+ "qcom/sm8650/modem_dtb.mbn";
+
+ status = "okay";
+};
+
+&sdhc_2 {
+ cd-gpios = <&pm8550_gpios 12 GPIO_ACTIVE_HIGH>;
+
+ vmmc-supply = <&vreg_l9b_2p9>;
+ vqmmc-supply = <&vreg_l8b_1p8>;
+ bus-width = <4>;
+ no-sdio;
+ no-mmc;
+
+ pinctrl-0 = <&sdc2_default>, <&sdc2_card_det_n>;
+ pinctrl-1 = <&sdc2_sleep>, <&sdc2_card_det_n>;
+ pinctrl-names = "default", "sleep";
+
+ status = "okay";
+};
+
+&sleep_clk {
+ clock-frequency = <32000>;
+};
+
+&swr0 {
+ status = "okay";
+
+ /* WSA8845, Speaker North */
+ north_spkr: speaker@0,0 {
+ compatible = "sdw20217020400";
+ reg = <0 0>;
+ pinctrl-0 = <&spkr_1_sd_n_active>;
+ pinctrl-names = "default";
+ powerdown-gpios = <&lpass_tlmm 21 GPIO_ACTIVE_LOW>;
+ #sound-dai-cells = <0>;
+ sound-name-prefix = "SpkrLeft";
+ vdd-1p8-supply = <&vreg_l15b_1p8>;
+ vdd-io-supply = <&vreg_l3c_1p2>;
+
+ /*
+ * WSA8845 Port 1 (DAC) <=> SWR0 Port 1 (SPKR_L)
+ * WSA8845 Port 2 (COMP) <=> SWR0 Port 2 (SPKR_L_COMP)
+ * WSA8845 Port 3 (BOOST) <=> SWR0 Port 3 (SPKR_L_BOOST)
+ * WSA8845 Port 4 (PBR) <=> SWR0 Port 7 (PBR)
+ * WSA8845 Port 5 (VISENSE) <=> SWR0 Port 10 (SPKR_L_VI)
+ * WSA8845 Port 6 (CPS) <=> SWR0 Port 13 (CPS)
+ */
+ qcom,port-mapping = <1 2 3 7 10 13>;
+ };
+
+ /* WSA8845, Speaker South */
+ south_spkr: speaker@0,1 {
+ compatible = "sdw20217020400";
+ reg = <0 1>;
+ pinctrl-0 = <&spkr_2_sd_n_active>;
+ pinctrl-names = "default";
+ powerdown-gpios = <&tlmm 77 GPIO_ACTIVE_LOW>;
+ #sound-dai-cells = <0>;
+ sound-name-prefix = "SpkrRight";
+ vdd-1p8-supply = <&vreg_l15b_1p8>;
+ vdd-io-supply = <&vreg_l3c_1p2>;
+
+ /*
+ * WSA8845 Port 1 (DAC) <=> SWR0 Port 4 (SPKR_R)
+ * WSA8845 Port 2 (COMP) <=> SWR0 Port 5 (SPKR_R_COMP)
+ * WSA8845 Port 3 (BOOST) <=> SWR0 Port 6 (SPKR_R_BOOST)
+ * WSA8845 Port 4 (PBR) <=> SWR0 Port 7 (PBR)
+ * WSA8845 Port 5 (VISENSE) <=> SWR0 Port 11 (SPKR_R_VI)
+ * WSA8845 Port 6 (CPS) <=> SWR0 Port 13 (CPS)
+ */
+ qcom,port-mapping = <4 5 6 7 11 13>;
+ };
+};
+
+&swr1 {
+ status = "okay";
+
+ /* WCD9395 RX */
+ wcd_rx: codec@0,4 {
+ compatible = "sdw20217010e00";
+ reg = <0 4>;
+
+ /*
+ * WCD9395 RX Port 1 (HPH_L/R) <=> SWR1 Port 1 (HPH_L/R)
+ * WCD9395 RX Port 2 (CLSH) <=> SWR1 Port 2 (CLSH)
+ * WCD9395 RX Port 3 (COMP_L/R) <=> SWR1 Port 3 (COMP_L/R)
+ * WCD9395 RX Port 4 (LO) <=> SWR1 Port 4 (LO)
+ * WCD9395 RX Port 5 (DSD_L/R) <=> SWR1 Port 5 (DSD_L/R)
+ * WCD9395 RX Port 6 (HIFI_PCM_L/R) <=> SWR1 Port 9 (HIFI_PCM_L/R)
+ */
+ qcom,rx-port-mapping = <1 2 3 4 5 9>;
+ };
+};
+
+&swr2 {
+ status = "okay";
+
+ /* WCD9395 TX */
+ wcd_tx: codec@0,3 {
+ compatible = "sdw20217010e00";
+ reg = <0 3>;
+
+ /*
+ * WCD9395 TX Port 1 (ADC1,2,3,4) <=> SWR2 Port 2 (TX SWR_INPUT 0,1,2,3)
+ * WCD9395 TX Port 2 (ADC3,4 & DMIC0,1) <=> SWR2 Port 2 (TX SWR_INPUT 0,1,2,3)
+ * WCD9395 TX Port 3 (DMIC0,1,2,3 & MBHC) <=> SWR2 Port 3 (TX SWR_INPUT 4,5,6,7)
+ * WCD9395 TX Port 4 (DMIC4,5,6,7) <=> SWR2 Port 4 (TX SWR_INPUT 8,9,10,11)
+ */
+ qcom,tx-port-mapping = <2 2 3 4>;
+ };
+};
+
+&tlmm {
+ /* Reserved I/Os for NFC */
+ gpio-reserved-ranges = <32 8>, <74 1>;
+
+ bt_default: bt-default-state {
+ bt-en-pins {
+ pins = "gpio17";
+ function = "gpio";
+ drive-strength = <16>;
+ bias-disable;
+ };
+
+ sw-ctrl-pins {
+ pins = "gpio18";
+ function = "gpio";
+ bias-pull-down;
+ };
+ };
+
+ lt9611_irq_pin: lt9611-irq-state {
+ pins = "gpio85";
+ function = "gpio";
+ bias-disable;
+ };
+
+ lt9611_rst_pin: lt9611-rst-state {
+ pins = "gpio28";
+ function = "gpio";
+ output-high;
+ };
+
+ spkr_2_sd_n_active: spkr-2-sd-n-active-state {
+ pins = "gpio77";
+ function = "gpio";
+ drive-strength = <16>;
+ bias-disable;
+ output-low;
+ };
+
+ wcd_default: wcd-reset-n-active-state {
+ pins = "gpio107";
+ function = "gpio";
+ drive-strength = <16>;
+ bias-disable;
+ output-low;
+ };
+
+ wlan_en: wlan-en-state {
+ pins = "gpio16";
+ function = "gpio";
+ drive-strength = <8>;
+ bias-pull-down;
+ };
+};
+
+&uart14 {
+ status = "okay";
+
+ bluetooth {
+ compatible = "qcom,wcn7850-bt";
+
+ vddio-supply = <&vreg_l3c_1p2>;
+ vddaon-supply = <&vreg_l15b_1p8>;
+ vdddig-supply = <&vreg_s3c_0p9>;
+ vddrfa0p8-supply = <&vreg_s3c_0p9>;
+ vddrfa1p2-supply = <&vreg_s1c_1p2>;
+ vddrfa1p9-supply = <&vreg_s6c_1p8>;
+
+ max-speed = <3200000>;
+
+ enable-gpios = <&tlmm 17 GPIO_ACTIVE_HIGH>;
+ swctrl-gpios = <&tlmm 18 GPIO_ACTIVE_HIGH>;
+
+ pinctrl-0 = <&bt_default>;
+ pinctrl-names = "default";
+ };
+};
+
+&uart15 {
+ status = "okay";
+};
+
+&ufs_mem_hc {
+ reset-gpios = <&tlmm 210 GPIO_ACTIVE_LOW>;
+
+ vcc-supply = <&vreg_l17b_2p5>;
+ vcc-max-microamp = <1300000>;
+ vccq-supply = <&vreg_l1c_1p2>;
+ vccq-max-microamp = <1200000>;
+
+ status = "okay";
+};
+
+&ufs_mem_phy {
+ vdda-phy-supply = <&vreg_l1d_0p88>;
+ vdda-pll-supply = <&vreg_l3i_1p2>;
+
+ status = "okay";
+};
+
+/*
+ * DPAUX -> WCD9395 -> USB_SBU -> USB-C
+ * eUSB2 DP/DM -> PM85550HS -> eUSB2 DP/DM -> WCD9395 -> USB-C
+ * USB SS -> USB-C
+ */
+
+&usb_1 {
+ status = "okay";
+};
+
+&usb_1_dwc3 {
+ dr_mode = "otg";
+ usb-role-switch;
+};
+
+&usb_1_dwc3_hs {
+ remote-endpoint = <&pmic_glink_hs_in>;
+};
+
+&usb_1_hsphy {
+ vdd-supply = <&vreg_l1i_0p88>;
+ vdda12-supply = <&vreg_l3i_1p2>;
+
+ phys = <&pm8550b_eusb2_repeater>;
+
+ status = "okay";
+};
+
+&usb_dp_qmpphy {
+ vdda-phy-supply = <&vreg_l3i_1p2>;
+ vdda-pll-supply = <&vreg_l3g_0p91>;
+
+ status = "okay";
+};
+
+&usb_dp_qmpphy_out {
+ remote-endpoint = <&pmic_glink_ss_in>;
+};
+
+&xo_board {
+ clock-frequency = <76800000>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sm8650-mtp.dts b/arch/arm64/boot/dts/qcom/sm8650-mtp.dts
index d04ceaa73c2b..c63822f5b127 100644
--- a/arch/arm64/boot/dts/qcom/sm8650-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/sm8650-mtp.dts
@@ -59,7 +59,7 @@
reg = <1>;
pmic_glink_ss_in: endpoint {
- remote-endpoint = <&usb_1_dwc3_ss>;
+ remote-endpoint = <&usb_dp_qmpphy_out>;
};
};
};
@@ -641,10 +641,6 @@
status = "okay";
};
-&pcie_1_phy_aux_clk {
- clock-frequency = <1000>;
-};
-
&pcie0 {
wake-gpios = <&tlmm 96 GPIO_ACTIVE_HIGH>;
perst-gpios = <&tlmm 94 GPIO_ACTIVE_LOW>;
@@ -755,6 +751,16 @@
sound-name-prefix = "SpkrLeft";
vdd-1p8-supply = <&vreg_l15b_1p8>;
vdd-io-supply = <&vreg_l3c_1p2>;
+
+ /*
+ * WSA8845 Port 1 (DAC) <=> SWR0 Port 1 (SPKR_L)
+ * WSA8845 Port 2 (COMP) <=> SWR0 Port 2 (SPKR_L_COMP)
+ * WSA8845 Port 3 (BOOST) <=> SWR0 Port 3 (SPKR_L_BOOST)
+ * WSA8845 Port 4 (PBR) <=> SWR0 Port 7 (PBR)
+ * WSA8845 Port 5 (VISENSE) <=> SWR0 Port 10 (SPKR_L_VI)
+ * WSA8845 Port 6 (CPS) <=> SWR0 Port 13 (CPS)
+ */
+ qcom,port-mapping = <1 2 3 7 10 13>;
};
/* WSA8845, Speaker Right */
@@ -768,6 +774,16 @@
sound-name-prefix = "SpkrRight";
vdd-1p8-supply = <&vreg_l15b_1p8>;
vdd-io-supply = <&vreg_l3c_1p2>;
+
+ /*
+ * WSA8845 Port 1 (DAC) <=> SWR0 Port 4 (SPKR_R)
+ * WSA8845 Port 2 (COMP) <=> SWR0 Port 5 (SPKR_R_COMP)
+ * WSA8845 Port 3 (BOOST) <=> SWR0 Port 6 (SPKR_R_BOOST)
+ * WSA8845 Port 4 (PBR) <=> SWR0 Port 7 (PBR)
+ * WSA8845 Port 5 (VISENSE) <=> SWR0 Port 11 (SPKR_R_VI)
+ * WSA8845 Port 6 (CPS) <=> SWR0 Port 13 (CPS)
+ */
+ qcom,port-mapping = <4 5 6 7 11 13>;
};
};
@@ -853,10 +869,6 @@
remote-endpoint = <&pmic_glink_hs_in>;
};
-&usb_1_dwc3_ss {
- remote-endpoint = <&pmic_glink_ss_in>;
-};
-
&usb_1_hsphy {
vdd-supply = <&vreg_l1i_0p88>;
vdda12-supply = <&vreg_l3i_1p2>;
@@ -873,6 +885,10 @@
status = "okay";
};
+&usb_dp_qmpphy_out {
+ remote-endpoint = <&pmic_glink_ss_in>;
+};
+
&xo_board {
clock-frequency = <76800000>;
};
diff --git a/arch/arm64/boot/dts/qcom/sm8650-qrd.dts b/arch/arm64/boot/dts/qcom/sm8650-qrd.dts
index 4e94f7fe4d2d..b0d7927b708f 100644
--- a/arch/arm64/boot/dts/qcom/sm8650-qrd.dts
+++ b/arch/arm64/boot/dts/qcom/sm8650-qrd.dts
@@ -203,6 +203,71 @@
};
};
};
+
+ wcn7850-pmu {
+ compatible = "qcom,wcn7850-pmu";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&wlan_en>;
+
+ wlan-enable-gpios = <&tlmm 16 GPIO_ACTIVE_HIGH>;
+ /*
+ * TODO Add bt-enable-gpios once the Bluetooth driver is
+ * converted to using the power sequencer.
+ */
+
+ vdd-supply = <&vreg_s4i_0p85>;
+ vddio-supply = <&vreg_l15b_1p8>;
+ vddio1p2-supply = <&vreg_l3c_1p2>;
+ vddaon-supply = <&vreg_s2c_0p8>;
+ vdddig-supply = <&vreg_s3c_0p9>;
+ vddrfa1p2-supply = <&vreg_s1c_1p2>;
+ vddrfa1p8-supply = <&vreg_s6c_1p8>;
+
+ clocks = <&rpmhcc RPMH_RF_CLK1>;
+
+ regulators {
+ vreg_pmu_rfa_cmn: ldo0 {
+ regulator-name = "vreg_pmu_rfa_cmn";
+ };
+
+ vreg_pmu_aon_0p59: ldo1 {
+ regulator-name = "vreg_pmu_aon_0p59";
+ };
+
+ vreg_pmu_wlcx_0p8: ldo2 {
+ regulator-name = "vreg_pmu_wlcx_0p8";
+ };
+
+ vreg_pmu_wlmx_0p85: ldo3 {
+ regulator-name = "vreg_pmu_wlmx_0p85";
+ };
+
+ vreg_pmu_btcmx_0p85: ldo4 {
+ regulator-name = "vreg_pmu_btcmx_0p85";
+ };
+
+ vreg_pmu_rfa_0p8: ldo5 {
+ regulator-name = "vreg_pmu_rfa_0p8";
+ };
+
+ vreg_pmu_rfa_1p2: ldo6 {
+ regulator-name = "vreg_pmu_rfa_1p2";
+ };
+
+ vreg_pmu_rfa_1p8: ldo7 {
+ regulator-name = "vreg_pmu_rfa_1p8";
+ };
+
+ vreg_pmu_pcie_0p9: ldo8 {
+ regulator-name = "vreg_pmu_pcie_0p9";
+ };
+
+ vreg_pmu_pcie_1p8: ldo9 {
+ regulator-name = "vreg_pmu_pcie_1p8";
+ };
+ };
+ };
};
&apps_rsc {
@@ -832,11 +897,6 @@
&mdss_dp0_out {
data-lanes = <0 1>;
- remote-endpoint = <&usb_dp_qmpphy_dp_in>;
-};
-
-&pcie_1_phy_aux_clk {
- clock-frequency = <1000>;
};
&pcie0 {
@@ -849,6 +909,23 @@
status = "okay";
};
+&pcieport0 {
+ wifi@0 {
+ compatible = "pci17cb,1107";
+ reg = <0x10000 0x0 0x0 0x0 0x0>;
+
+ vddrfacmn-supply = <&vreg_pmu_rfa_cmn>;
+ vddaon-supply = <&vreg_pmu_aon_0p59>;
+ vddwlcx-supply = <&vreg_pmu_wlcx_0p8>;
+ vddwlmx-supply = <&vreg_pmu_wlmx_0p85>;
+ vddrfa0p8-supply = <&vreg_pmu_rfa_0p8>;
+ vddrfa1p2-supply = <&vreg_pmu_rfa_1p2>;
+ vddrfa1p8-supply = <&vreg_pmu_rfa_1p8>;
+ vddpcie0p9-supply = <&vreg_pmu_pcie_0p9>;
+ vddpcie1p8-supply = <&vreg_pmu_pcie_1p8>;
+ };
+};
+
&pcie0_phy {
vdda-phy-supply = <&vreg_l1i_0p88>;
vdda-pll-supply = <&vreg_l3i_1p2>;
@@ -1012,6 +1089,16 @@
sound-name-prefix = "SpkrLeft";
vdd-1p8-supply = <&vreg_l15b_1p8>;
vdd-io-supply = <&vreg_l3c_1p2>;
+
+ /*
+ * WSA8845 Port 1 (DAC) <=> SWR0 Port 1 (SPKR_L)
+ * WSA8845 Port 2 (COMP) <=> SWR0 Port 2 (SPKR_L_COMP)
+ * WSA8845 Port 3 (BOOST) <=> SWR0 Port 3 (SPKR_L_BOOST)
+ * WSA8845 Port 4 (PBR) <=> SWR0 Port 7 (PBR)
+ * WSA8845 Port 5 (VISENSE) <=> SWR0 Port 10 (SPKR_L_VI)
+ * WSA8845 Port 6 (CPS) <=> SWR0 Port 13 (CPS)
+ */
+ qcom,port-mapping = <1 2 3 7 10 13>;
};
/* WSA8845, Speaker Right */
@@ -1025,6 +1112,16 @@
sound-name-prefix = "SpkrRight";
vdd-1p8-supply = <&vreg_l15b_1p8>;
vdd-io-supply = <&vreg_l3c_1p2>;
+
+ /*
+ * WSA8845 Port 1 (DAC) <=> SWR0 Port 4 (SPKR_R)
+ * WSA8845 Port 2 (COMP) <=> SWR0 Port 5 (SPKR_R_COMP)
+ * WSA8845 Port 3 (BOOST) <=> SWR0 Port 6 (SPKR_R_BOOST)
+ * WSA8845 Port 4 (PBR) <=> SWR0 Port 7 (PBR)
+ * WSA8845 Port 5 (VISENSE) <=> SWR0 Port 11 (SPKR_R_VI)
+ * WSA8845 Port 6 (CPS) <=> SWR0 Port 13 (CPS)
+ */
+ qcom,port-mapping = <4 5 6 7 11 13>;
};
};
@@ -1143,6 +1240,13 @@
bias-disable;
output-low;
};
+
+ wlan_en: wlan-en-state {
+ pins = "gpio16";
+ function = "gpio";
+ drive-strength = <8>;
+ bias-pull-down;
+ };
};
&uart14 {
@@ -1211,10 +1315,6 @@
remote-endpoint = <&pmic_glink_hs_in>;
};
-&usb_1_dwc3_ss {
- remote-endpoint = <&usb_dp_qmpphy_usb_ss_in>;
-};
-
&usb_1_hsphy {
vdd-supply = <&vreg_l1i_0p88>;
vdda12-supply = <&vreg_l3i_1p2>;
@@ -1228,23 +1328,13 @@
vdda-phy-supply = <&vreg_l3i_1p2>;
vdda-pll-supply = <&vreg_l3g_0p91>;
- orientation-switch;
-
status = "okay";
};
-&usb_dp_qmpphy_dp_in {
- remote-endpoint = <&mdss_dp0_out>;
-};
-
&usb_dp_qmpphy_out {
remote-endpoint = <&redriver_ss_in>;
};
-&usb_dp_qmpphy_usb_ss_in {
- remote-endpoint = <&usb_1_dwc3_ss>;
-};
-
&xo_board {
clock-frequency = <76800000>;
};
diff --git a/arch/arm64/boot/dts/qcom/sm8650.dtsi b/arch/arm64/boot/dts/qcom/sm8650.dtsi
index 62a6e77730bc..9d9bbb9aca64 100644
--- a/arch/arm64/boot/dts/qcom/sm8650.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm8650.dtsi
@@ -4,10 +4,12 @@
*/
#include <dt-bindings/clock/qcom,rpmh.h>
+#include <dt-bindings/clock/qcom,sm8650-camcc.h>
#include <dt-bindings/clock/qcom,sm8650-dispcc.h>
#include <dt-bindings/clock/qcom,sm8650-gcc.h>
#include <dt-bindings/clock/qcom,sm8650-gpucc.h>
#include <dt-bindings/clock/qcom,sm8650-tcsr.h>
+#include <dt-bindings/clock/qcom,sm8650-videocc.h>
#include <dt-bindings/dma/qcom-gpi.h>
#include <dt-bindings/firmware/qcom,scm.h>
#include <dt-bindings/gpio/gpio.h>
@@ -60,11 +62,6 @@
clock-mult = <1>;
clock-div = <2>;
};
-
- pcie_1_phy_aux_clk: pcie-1-phy-aux-clk {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- };
};
cpus {
@@ -371,6 +368,7 @@
firmware {
scm: scm {
compatible = "qcom,scm-sm8650", "qcom,scm";
+ qcom,dload-mode = <&tcsr 0x19000>;
interconnects = <&aggre2_noc MASTER_CRYPTO QCOM_ICC_TAG_ALWAYS
&mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
};
@@ -394,8 +392,18 @@
reg = <0 0xa0000000 0 0>;
};
- pmu {
- compatible = "arm,armv8-pmuv3";
+ pmu-a520 {
+ compatible = "arm,cortex-a520-pmu";
+ interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_LOW>;
+ };
+
+ pmu-a720 {
+ compatible = "arm,cortex-a720-pmu";
+ interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_LOW>;
+ };
+
+ pmu-x4 {
+ compatible = "arm,cortex-x4-pmu";
interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_LOW>;
};
@@ -758,8 +766,8 @@
<&bi_tcxo_ao_div2>,
<&sleep_clk>,
<&pcie0_phy>,
- <&pcie1_phy>,
- <&pcie_1_phy_aux_clk>,
+ <&pcie1_phy QMP_PCIE_PIPE_CLK>,
+ <&pcie1_phy QMP_PCIE_PHY_AUX_CLK>,
<&ufs_mem_phy 0>,
<&ufs_mem_phy 1>,
<&ufs_mem_phy 2>,
@@ -2208,7 +2216,7 @@
reg = <0 0x010c3000 0 0x1000>;
};
- pcie0: pci@1c00000 {
+ pcie0: pcie@1c00000 {
device_type = "pci";
compatible = "qcom,pcie-sm8650", "qcom,pcie-sm8550";
reg = <0 0x01c00000 0 0x3000>,
@@ -2294,7 +2302,7 @@
status = "disabled";
- pcie@0 {
+ pcieport0: pcie@0 {
device_type = "pci";
reg = <0x0 0x0 0x0 0x0 0x0>;
bus-range = <0x01 0xff>;
@@ -2336,7 +2344,7 @@
status = "disabled";
};
- pcie1: pci@1c08000 {
+ pcie1: pcie@1c08000 {
device_type = "pci";
compatible = "qcom,pcie-sm8650", "qcom,pcie-sm8550";
reg = <0 0x01c08000 0 0x3000>,
@@ -2467,7 +2475,7 @@
power-domains = <&gcc PCIE_1_PHY_GDSC>;
- #clock-cells = <0>;
+ #clock-cells = <1>;
clock-output-names = "pcie1_pipe_clk";
#phy-cells = <0>;
@@ -2626,6 +2634,7 @@
operating-points-v2 = <&gpu_opp_table>;
qcom,gmu = <&gmu>;
+ #cooling-cells = <2>;
status = "disabled";
@@ -3309,6 +3318,30 @@
};
};
+ videocc: clock-controller@aaf0000 {
+ compatible = "qcom,sm8650-videocc";
+ reg = <0 0x0aaf0000 0 0x10000>;
+ clocks = <&bi_tcxo_div2>,
+ <&gcc GCC_VIDEO_AHB_CLK>;
+ power-domains = <&rpmhpd RPMHPD_MMCX>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+ };
+
+ camcc: clock-controller@ade0000 {
+ compatible = "qcom,sm8650-camcc";
+ reg = <0 0x0ade0000 0 0x20000>;
+ clocks = <&gcc GCC_CAMERA_AHB_CLK>,
+ <&bi_tcxo_div2>,
+ <&bi_tcxo_ao_div2>,
+ <&sleep_clk>;
+ power-domains = <&rpmhpd RPMHPD_MMCX>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+ };
+
mdss: display-subsystem@ae00000 {
compatible = "qcom,sm8650-mdss";
reg = <0 0x0ae00000 0 0x1000>;
@@ -3675,6 +3708,7 @@
reg = <1>;
mdss_dp0_out: endpoint {
+ remote-endpoint = <&usb_dp_qmpphy_dp_in>;
};
};
};
@@ -3750,6 +3784,8 @@
#clock-cells = <1>;
#phy-cells = <1>;
+ orientation-switch;
+
status = "disabled";
ports {
@@ -3767,6 +3803,7 @@
reg = <1>;
usb_dp_qmpphy_usb_ss_in: endpoint {
+ remote-endpoint = <&usb_1_dwc3_ss>;
};
};
@@ -3774,6 +3811,7 @@
reg = <2>;
usb_dp_qmpphy_dp_in: endpoint {
+ remote-endpoint = <&mdss_dp0_out>;
};
};
};
@@ -3864,6 +3902,7 @@
reg = <1>;
usb_1_dwc3_ss: endpoint {
+ remote-endpoint = <&usb_dp_qmpphy_usb_ss_in>;
};
};
};
@@ -4982,12 +5021,14 @@
<0 0x25400000 0 0x200000>,
<0 0x25200000 0 0x200000>,
<0 0x25600000 0 0x200000>,
- <0 0x25800000 0 0x200000>;
+ <0 0x25800000 0 0x200000>,
+ <0 0x25a00000 0 0x200000>;
reg-names = "llcc0_base",
"llcc1_base",
"llcc2_base",
"llcc3_base",
- "llcc_broadcast_base";
+ "llcc_broadcast_base",
+ "llcc_broadcast_and_base";
interrupts = <GIC_SPI 266 IRQ_TYPE_LEVEL_HIGH>;
};
@@ -5328,8 +5369,6 @@
thermal-zones {
aoss0-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 0>;
trips {
@@ -5348,8 +5387,6 @@
};
cpuss0-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 1>;
trips {
@@ -5368,8 +5405,6 @@
};
cpuss1-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 2>;
trips {
@@ -5388,8 +5423,6 @@
};
cpuss2-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 3>;
trips {
@@ -5408,8 +5441,6 @@
};
cpuss3-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 4>;
trips {
@@ -5428,8 +5459,6 @@
};
cpu2-top-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 5>;
trips {
@@ -5454,8 +5483,6 @@
};
cpu2-bottom-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 6>;
trips {
@@ -5480,8 +5507,6 @@
};
cpu3-top-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 7>;
trips {
@@ -5506,8 +5531,6 @@
};
cpu3-bottom-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 8>;
trips {
@@ -5532,8 +5555,6 @@
};
cpu4-top-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 9>;
trips {
@@ -5558,8 +5579,6 @@
};
cpu4-bottom-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 10>;
trips {
@@ -5584,8 +5603,6 @@
};
cpu5-top-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 11>;
trips {
@@ -5610,8 +5627,6 @@
};
cpu5-bottom-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 12>;
trips {
@@ -5636,8 +5651,6 @@
};
cpu6-top-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 13>;
trips {
@@ -5662,8 +5675,6 @@
};
cpu6-bottom-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens0 14>;
trips {
@@ -5688,8 +5699,6 @@
};
aoss1-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens1 0>;
trips {
@@ -5708,8 +5717,6 @@
};
cpu7-top-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens1 1>;
trips {
@@ -5734,8 +5741,6 @@
};
cpu7-middle-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens1 2>;
trips {
@@ -5760,8 +5765,6 @@
};
cpu7-bottom-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens1 3>;
trips {
@@ -5786,8 +5789,6 @@
};
cpu0-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens1 4>;
trips {
@@ -5812,8 +5813,6 @@
};
cpu1-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens1 5>;
trips {
@@ -5839,7 +5838,7 @@
nsphvx0-thermal {
polling-delay-passive = <10>;
- polling-delay = <0>;
+
thermal-sensors = <&tsens2 6>;
trips {
@@ -5859,7 +5858,7 @@
nsphvx1-thermal {
polling-delay-passive = <10>;
- polling-delay = <0>;
+
thermal-sensors = <&tsens2 7>;
trips {
@@ -5879,7 +5878,7 @@
nsphmx0-thermal {
polling-delay-passive = <10>;
- polling-delay = <0>;
+
thermal-sensors = <&tsens2 8>;
trips {
@@ -5899,7 +5898,7 @@
nsphmx1-thermal {
polling-delay-passive = <10>;
- polling-delay = <0>;
+
thermal-sensors = <&tsens2 9>;
trips {
@@ -5919,7 +5918,7 @@
nsphmx2-thermal {
polling-delay-passive = <10>;
- polling-delay = <0>;
+
thermal-sensors = <&tsens2 10>;
trips {
@@ -5939,7 +5938,7 @@
nsphmx3-thermal {
polling-delay-passive = <10>;
- polling-delay = <0>;
+
thermal-sensors = <&tsens2 11>;
trips {
@@ -5959,7 +5958,7 @@
video-thermal {
polling-delay-passive = <10>;
- polling-delay = <0>;
+
thermal-sensors = <&tsens1 12>;
trips {
@@ -5979,7 +5978,7 @@
ddr-thermal {
polling-delay-passive = <10>;
- polling-delay = <0>;
+
thermal-sensors = <&tsens1 13>;
trips {
@@ -5998,8 +5997,6 @@
};
camera0-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens1 14>;
trips {
@@ -6018,8 +6015,6 @@
};
camera1-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens1 15>;
trips {
@@ -6038,8 +6033,6 @@
};
aoss2-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens2 0>;
trips {
@@ -6059,19 +6052,32 @@
gpuss0-thermal {
polling-delay-passive = <10>;
- polling-delay = <0>;
+
thermal-sensors = <&tsens2 1>;
+ cooling-maps {
+ map0 {
+ trip = <&gpu0_alert0>;
+ cooling-device = <&gpu THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+
trips {
- trip-point0 {
+ gpu0_alert0: trip-point0 {
+ temperature = <85000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+
+ trip-point1 {
temperature = <90000>;
- hysteresis = <2000>;
+ hysteresis = <1000>;
type = "hot";
};
- gpuss0-critical {
+ trip-point2 {
temperature = <110000>;
- hysteresis = <0>;
+ hysteresis = <1000>;
type = "critical";
};
};
@@ -6079,19 +6085,32 @@
gpuss1-thermal {
polling-delay-passive = <10>;
- polling-delay = <0>;
+
thermal-sensors = <&tsens2 2>;
+ cooling-maps {
+ map0 {
+ trip = <&gpu1_alert0>;
+ cooling-device = <&gpu THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+
trips {
- trip-point0 {
+ gpu1_alert0: trip-point0 {
+ temperature = <85000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+
+ trip-point1 {
temperature = <90000>;
- hysteresis = <2000>;
+ hysteresis = <1000>;
type = "hot";
};
- gpuss1-critical {
+ trip-point2 {
temperature = <110000>;
- hysteresis = <0>;
+ hysteresis = <1000>;
type = "critical";
};
};
@@ -6099,19 +6118,32 @@
gpuss2-thermal {
polling-delay-passive = <10>;
- polling-delay = <0>;
+
thermal-sensors = <&tsens2 3>;
+ cooling-maps {
+ map0 {
+ trip = <&gpu2_alert0>;
+ cooling-device = <&gpu THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+
trips {
- trip-point0 {
+ gpu2_alert0: trip-point0 {
+ temperature = <85000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+
+ trip-point1 {
temperature = <90000>;
- hysteresis = <2000>;
+ hysteresis = <1000>;
type = "hot";
};
- gpuss2-critical {
+ trip-point2 {
temperature = <110000>;
- hysteresis = <0>;
+ hysteresis = <1000>;
type = "critical";
};
};
@@ -6119,19 +6151,32 @@
gpuss3-thermal {
polling-delay-passive = <10>;
- polling-delay = <0>;
+
thermal-sensors = <&tsens2 4>;
+ cooling-maps {
+ map0 {
+ trip = <&gpu3_alert0>;
+ cooling-device = <&gpu THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+
trips {
- trip-point0 {
+ gpu3_alert0: trip-point0 {
+ temperature = <85000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+
+ trip-point1 {
temperature = <90000>;
- hysteresis = <2000>;
+ hysteresis = <1000>;
type = "hot";
};
- gpuss3-critical {
+ trip-point2 {
temperature = <110000>;
- hysteresis = <0>;
+ hysteresis = <1000>;
type = "critical";
};
};
@@ -6139,19 +6184,32 @@
gpuss4-thermal {
polling-delay-passive = <10>;
- polling-delay = <0>;
+
thermal-sensors = <&tsens2 5>;
+ cooling-maps {
+ map0 {
+ trip = <&gpu4_alert0>;
+ cooling-device = <&gpu THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+
trips {
- trip-point0 {
+ gpu4_alert0: trip-point0 {
+ temperature = <85000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+
+ trip-point1 {
temperature = <90000>;
- hysteresis = <2000>;
+ hysteresis = <1000>;
type = "hot";
};
- gpuss4-critical {
+ trip-point2 {
temperature = <110000>;
- hysteresis = <0>;
+ hysteresis = <1000>;
type = "critical";
};
};
@@ -6159,19 +6217,32 @@
gpuss5-thermal {
polling-delay-passive = <10>;
- polling-delay = <0>;
+
thermal-sensors = <&tsens2 6>;
+ cooling-maps {
+ map0 {
+ trip = <&gpu5_alert0>;
+ cooling-device = <&gpu THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+
trips {
- trip-point0 {
+ gpu5_alert0: trip-point0 {
+ temperature = <85000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+
+ trip-point1 {
temperature = <90000>;
- hysteresis = <2000>;
+ hysteresis = <1000>;
type = "hot";
};
- gpuss5-critical {
+ trip-point2 {
temperature = <110000>;
- hysteresis = <0>;
+ hysteresis = <1000>;
type = "critical";
};
};
@@ -6179,19 +6250,32 @@
gpuss6-thermal {
polling-delay-passive = <10>;
- polling-delay = <0>;
+
thermal-sensors = <&tsens2 7>;
+ cooling-maps {
+ map0 {
+ trip = <&gpu6_alert0>;
+ cooling-device = <&gpu THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+
trips {
- trip-point0 {
+ gpu6_alert0: trip-point0 {
+ temperature = <85000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+
+ trip-point1 {
temperature = <90000>;
- hysteresis = <2000>;
+ hysteresis = <1000>;
type = "hot";
};
- gpuss6-critical {
+ trip-point2 {
temperature = <110000>;
- hysteresis = <0>;
+ hysteresis = <1000>;
type = "critical";
};
};
@@ -6199,27 +6283,38 @@
gpuss7-thermal {
polling-delay-passive = <10>;
- polling-delay = <0>;
+
thermal-sensors = <&tsens2 8>;
+ cooling-maps {
+ map0 {
+ trip = <&gpu7_alert0>;
+ cooling-device = <&gpu THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+
trips {
- trip-point0 {
+ gpu7_alert0: trip-point0 {
+ temperature = <85000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+
+ trip-point1 {
temperature = <90000>;
- hysteresis = <2000>;
+ hysteresis = <1000>;
type = "hot";
};
- gpuss7-critical {
+ trip-point2 {
temperature = <110000>;
- hysteresis = <0>;
+ hysteresis = <1000>;
type = "critical";
};
};
};
modem0-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens2 9>;
trips {
@@ -6238,8 +6333,6 @@
};
modem1-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens2 10>;
trips {
@@ -6258,8 +6351,6 @@
};
modem2-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens2 11>;
trips {
@@ -6278,8 +6369,6 @@
};
modem3-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
thermal-sensors = <&tsens2 12>;
trips {
diff --git a/arch/arm64/boot/dts/qcom/x1e80100-asus-vivobook-s15.dts b/arch/arm64/boot/dts/qcom/x1e80100-asus-vivobook-s15.dts
new file mode 100644
index 000000000000..7fb980fcb307
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/x1e80100-asus-vivobook-s15.dts
@@ -0,0 +1,616 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2024, Xilin Wu <wuxilin123@gmail.com>
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
+
+#include "x1e80100.dtsi"
+#include "x1e80100-pmics.dtsi"
+
+/ {
+ model = "ASUS Vivobook S 15";
+ compatible = "asus,vivobook-s15", "qcom,x1e80100";
+ chassis-type = "laptop";
+
+ pmic-glink {
+ compatible = "qcom,x1e80100-pmic-glink",
+ "qcom,sm8550-pmic-glink",
+ "qcom,pmic-glink";
+ orientation-gpios = <&tlmm 121 GPIO_ACTIVE_HIGH>,
+ <&tlmm 123 GPIO_ACTIVE_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* Left-side port, closer to the screen */
+ connector@0 {
+ compatible = "usb-c-connector";
+ reg = <0>;
+ power-role = "dual";
+ data-role = "dual";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ pmic_glink_ss0_hs_in: endpoint {
+ remote-endpoint = <&usb_1_ss0_dwc3_hs>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ pmic_glink_ss0_ss_in: endpoint {
+ remote-endpoint = <&usb_1_ss0_qmpphy_out>;
+ };
+ };
+ };
+ };
+
+ /* Left-side port, farther from the screen */
+ connector@1 {
+ compatible = "usb-c-connector";
+ reg = <1>;
+ power-role = "dual";
+ data-role = "dual";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ pmic_glink_ss1_hs_in: endpoint {
+ remote-endpoint = <&usb_1_ss1_dwc3_hs>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ pmic_glink_ss1_ss_in: endpoint {
+ remote-endpoint = <&usb_1_ss1_qmpphy_out>;
+ };
+ };
+ };
+ };
+ };
+
+ reserved-memory {
+ linux,cma {
+ compatible = "shared-dma-pool";
+ size = <0x0 0x8000000>;
+ reusable;
+ linux,cma-default;
+ };
+ };
+
+ vph_pwr: vph-pwr-regulator {
+ compatible = "regulator-fixed";
+
+ regulator-name = "vph_pwr";
+ regulator-min-microvolt = <3700000>;
+ regulator-max-microvolt = <3700000>;
+
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ vreg_edp_3p3: regulator-edp-3p3 {
+ compatible = "regulator-fixed";
+
+ regulator-name = "VREG_EDP_3P3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ gpio = <&tlmm 70 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-0 = <&edp_reg_en>;
+ pinctrl-names = "default";
+
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ vreg_nvme: regulator-nvme {
+ compatible = "regulator-fixed";
+
+ regulator-name = "VREG_NVME_3P3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ gpio = <&tlmm 18 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-0 = <&nvme_reg_en>;
+ pinctrl-names = "default";
+ };
+};
+
+&apps_rsc {
+ regulators-0 {
+ compatible = "qcom,pm8550-rpmh-regulators";
+ qcom,pmic-id = "b";
+
+ vdd-bob1-supply = <&vph_pwr>;
+ vdd-bob2-supply = <&vph_pwr>;
+ vdd-l1-l4-l10-supply = <&vreg_s4c_1p8>;
+ vdd-l2-l13-l14-supply = <&vreg_bob1>;
+ vdd-l5-l16-supply = <&vreg_bob1>;
+ vdd-l6-l7-supply = <&vreg_bob2>;
+ vdd-l8-l9-supply = <&vreg_bob1>;
+ vdd-l12-supply = <&vreg_s5j_1p2>;
+ vdd-l15-supply = <&vreg_s4c_1p8>;
+ vdd-l17-supply = <&vreg_bob2>;
+
+ vreg_bob1: bob1 {
+ regulator-name = "vreg_bob1";
+ regulator-min-microvolt = <3008000>;
+ regulator-max-microvolt = <3960000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_bob2: bob2 {
+ regulator-name = "vreg_bob2";
+ regulator-min-microvolt = <2504000>;
+ regulator-max-microvolt = <3008000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2b_3p0: ldo2 {
+ regulator-name = "vreg_l2b_3p0";
+ regulator-min-microvolt = <3072000>;
+ regulator-max-microvolt = <3100000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l14b_3p0: ldo14 {
+ regulator-name = "vreg_l14b_3p0";
+ regulator-min-microvolt = <3072000>;
+ regulator-max-microvolt = <3072000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ regulators-1 {
+ compatible = "qcom,pm8550ve-rpmh-regulators";
+ qcom,pmic-id = "c";
+
+ vdd-l1-supply = <&vreg_s5j_1p2>;
+ vdd-l2-supply = <&vreg_s1f_0p7>;
+ vdd-l3-supply = <&vreg_s1f_0p7>;
+ vdd-s4-supply = <&vph_pwr>;
+
+ vreg_s4c_1p8: smps4 {
+ regulator-name = "vreg_s4c_1p8";
+ regulator-min-microvolt = <1856000>;
+ regulator-max-microvolt = <2000000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ regulators-2 {
+ compatible = "qcom,pmc8380-rpmh-regulators";
+ qcom,pmic-id = "d";
+
+ vdd-l1-supply = <&vreg_s1f_0p7>;
+ vdd-l2-supply = <&vreg_s1f_0p7>;
+ vdd-l3-supply = <&vreg_s4c_1p8>;
+ vdd-s1-supply = <&vph_pwr>;
+
+ vreg_l1d_0p8: ldo1 {
+ regulator-name = "vreg_l1d_0p8";
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <920000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2d_0p9: ldo2 {
+ regulator-name = "vreg_l2d_0p9";
+ regulator-min-microvolt = <912000>;
+ regulator-max-microvolt = <920000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l3d_1p8: ldo3 {
+ regulator-name = "vreg_l3d_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ regulators-3 {
+ compatible = "qcom,pmc8380-rpmh-regulators";
+ qcom,pmic-id = "e";
+
+ vdd-l2-supply = <&vreg_s1f_0p7>;
+ vdd-l3-supply = <&vreg_s5j_1p2>;
+
+ vreg_l2e_0p8: ldo2 {
+ regulator-name = "vreg_l2e_0p8";
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <920000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l3e_1p2: ldo3 {
+ regulator-name = "vreg_l3e_1p2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ regulators-4 {
+ compatible = "qcom,pmc8380-rpmh-regulators";
+ qcom,pmic-id = "f";
+
+ vdd-l1-supply = <&vreg_s5j_1p2>;
+ vdd-l2-supply = <&vreg_s5j_1p2>;
+ vdd-l3-supply = <&vreg_s5j_1p2>;
+ vdd-s1-supply = <&vph_pwr>;
+
+ vreg_s1f_0p7: smps1 {
+ regulator-name = "vreg_s1f_0p7";
+ regulator-min-microvolt = <700000>;
+ regulator-max-microvolt = <1100000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ regulators-6 {
+ compatible = "qcom,pm8550ve-rpmh-regulators";
+ qcom,pmic-id = "i";
+
+ vdd-l1-supply = <&vreg_s4c_1p8>;
+ vdd-l2-supply = <&vreg_s5j_1p2>;
+ vdd-l3-supply = <&vreg_s1f_0p7>;
+ vdd-s1-supply = <&vph_pwr>;
+ vdd-s2-supply = <&vph_pwr>;
+ };
+
+ regulators-7 {
+ compatible = "qcom,pm8550ve-rpmh-regulators";
+ qcom,pmic-id = "j";
+
+ vdd-l1-supply = <&vreg_s1f_0p7>;
+ vdd-l2-supply = <&vreg_s5j_1p2>;
+ vdd-l3-supply = <&vreg_s1f_0p7>;
+ vdd-s5-supply = <&vph_pwr>;
+
+ vreg_s5j_1p2: smps5 {
+ regulator-name = "vreg_s5j_1p2";
+ regulator-min-microvolt = <1256000>;
+ regulator-max-microvolt = <1304000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l1j_0p8: ldo1 {
+ regulator-name = "vreg_l1j_0p8";
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <920000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2j_1p2: ldo2 {
+ regulator-name = "vreg_l2j_1p2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l3j_0p8: ldo3 {
+ regulator-name = "vreg_l3j_0p8";
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <920000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+};
+
+&i2c0 {
+ clock-frequency = <400000>;
+ status = "okay";
+
+ touchpad@15 {
+ compatible = "hid-over-i2c";
+ reg = <0x15>;
+
+ hid-descr-addr = <0x1>;
+ interrupts-extended = <&tlmm 3 IRQ_TYPE_LEVEL_LOW>;
+
+ pinctrl-0 = <&tpad_default>;
+ pinctrl-names = "default";
+
+ wakeup-source;
+ };
+};
+
+&i2c1 {
+ clock-frequency = <400000>;
+ status = "okay";
+
+ /* PS8830 USB4 Retimer? @ 0x8 */
+};
+
+&i2c3 {
+ clock-frequency = <400000>;
+ status = "okay";
+
+ /* PS8830 USB4 Retimer? @ 0x8 */
+};
+
+&i2c5 {
+ clock-frequency = <400000>;
+ status = "okay";
+
+ keyboard@3a {
+ compatible = "hid-over-i2c";
+ reg = <0x3a>;
+
+ hid-descr-addr = <0x1>;
+ interrupts-extended = <&tlmm 67 IRQ_TYPE_LEVEL_LOW>;
+
+ pinctrl-0 = <&kybd_default>;
+ pinctrl-names = "default";
+
+ wakeup-source;
+ };
+
+ /* EC? @ 0x5b, 0x76 */
+};
+
+&i2c7 {
+ clock-frequency = <400000>;
+ status = "okay";
+
+ /* PS8830 USB4 Retimer? @ 0x8 */
+};
+
+&mdss {
+ status = "okay";
+};
+
+&mdss_dp3 {
+ compatible = "qcom,x1e80100-dp";
+ /delete-property/ #sound-dai-cells;
+
+ status = "okay";
+
+ aux-bus {
+ panel {
+ compatible = "edp-panel";
+ power-supply = <&vreg_edp_3p3>;
+
+ port {
+ edp_panel_in: endpoint {
+ remote-endpoint = <&mdss_dp3_out>;
+ };
+ };
+ };
+ };
+
+ ports {
+ port@1 {
+ reg = <1>;
+
+ mdss_dp3_out: endpoint {
+ data-lanes = <0 1 2 3>;
+ link-frequencies = /bits/ 64 <1620000000 2700000000 5400000000 8100000000>;
+
+ remote-endpoint = <&edp_panel_in>;
+ };
+ };
+ };
+};
+
+&mdss_dp3_phy {
+ vdda-phy-supply = <&vreg_l3j_0p8>;
+ vdda-pll-supply = <&vreg_l2j_1p2>;
+
+ status = "okay";
+};
+
+&pcie4 {
+ status = "okay";
+};
+
+&pcie4_phy {
+ vdda-phy-supply = <&vreg_l3j_0p8>;
+ vdda-pll-supply = <&vreg_l3e_1p2>;
+
+ status = "okay";
+};
+
+&pcie6a {
+ perst-gpios = <&tlmm 152 GPIO_ACTIVE_LOW>;
+ wake-gpios = <&tlmm 154 GPIO_ACTIVE_LOW>;
+
+ vddpe-3v3-supply = <&vreg_nvme>;
+
+ pinctrl-0 = <&pcie6a_default>;
+ pinctrl-names = "default";
+
+ status = "okay";
+};
+
+&pcie6a_phy {
+ vdda-phy-supply = <&vreg_l1d_0p8>;
+ vdda-pll-supply = <&vreg_l2j_1p2>;
+
+ status = "okay";
+};
+
+&qupv3_0 {
+ status = "okay";
+};
+
+&qupv3_1 {
+ status = "okay";
+};
+
+&qupv3_2 {
+ status = "okay";
+};
+
+&remoteproc_adsp {
+ firmware-name = "qcom/x1e80100/ASUSTeK/vivobook-s15/qcadsp8380.mbn",
+ "qcom/x1e80100/ASUSTeK/vivobook-s15/adsp_dtbs.elf";
+
+ status = "okay";
+};
+
+&remoteproc_cdsp {
+ firmware-name = "qcom/x1e80100/ASUSTeK/vivobook-s15/qccdsp8380.mbn",
+ "qcom/x1e80100/ASUSTeK/vivobook-s15/cdsp_dtbs.elf";
+
+ status = "okay";
+};
+
+&smb2360_0_eusb2_repeater {
+ vdd18-supply = <&vreg_l3d_1p8>;
+ vdd3-supply = <&vreg_l2b_3p0>;
+};
+
+&smb2360_1_eusb2_repeater {
+ vdd18-supply = <&vreg_l3d_1p8>;
+ vdd3-supply = <&vreg_l14b_3p0>;
+};
+
+&smb2360_2 {
+ status = "disabled";
+};
+
+&tlmm {
+ gpio-reserved-ranges = <34 2>, /* Unused */
+ <44 4>, /* SPI (TPM) */
+ <238 1>; /* UFS Reset */
+
+ edp_reg_en: edp-reg-en-state {
+ pins = "gpio70";
+ function = "gpio";
+ drive-strength = <16>;
+ bias-disable;
+ };
+
+ kybd_default: kybd-default-state {
+ pins = "gpio67";
+ function = "gpio";
+ bias-disable;
+ };
+
+ nvme_reg_en: nvme-reg-en-state {
+ pins = "gpio18";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ pcie6a_default: pcie2a-default-state {
+ clkreq-n-pins {
+ pins = "gpio153";
+ function = "pcie6a_clk";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ perst-n-pins {
+ pins = "gpio152";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+
+ wake-n-pins {
+ pins = "gpio154";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+
+ tpad_default: tpad-default-state {
+ pins = "gpio3";
+ function = "gpio";
+ bias-disable;
+ };
+};
+
+&usb_1_ss0_hsphy {
+ vdd-supply = <&vreg_l3j_0p8>;
+ vdda12-supply = <&vreg_l2j_1p2>;
+
+ phys = <&smb2360_0_eusb2_repeater>;
+
+ status = "okay";
+};
+
+&usb_1_ss0_qmpphy {
+ vdda-phy-supply = <&vreg_l3e_1p2>;
+ vdda-pll-supply = <&vreg_l1j_0p8>;
+
+ orientation-switch;
+
+ status = "okay";
+};
+
+&usb_1_ss0 {
+ status = "okay";
+};
+
+&usb_1_ss0_dwc3 {
+ dr_mode = "host";
+};
+
+&usb_1_ss0_dwc3_hs {
+ remote-endpoint = <&pmic_glink_ss0_hs_in>;
+};
+
+&usb_1_ss0_qmpphy_out {
+ remote-endpoint = <&pmic_glink_ss0_ss_in>;
+};
+
+&usb_1_ss1_hsphy {
+ vdd-supply = <&vreg_l3j_0p8>;
+ vdda12-supply = <&vreg_l2j_1p2>;
+
+ phys = <&smb2360_1_eusb2_repeater>;
+
+ status = "okay";
+};
+
+&usb_1_ss1_qmpphy {
+ vdda-phy-supply = <&vreg_l3e_1p2>;
+ vdda-pll-supply = <&vreg_l2d_0p9>;
+
+ orientation-switch;
+
+ status = "okay";
+};
+
+&usb_1_ss1 {
+ status = "okay";
+};
+
+&usb_1_ss1_dwc3 {
+ dr_mode = "host";
+};
+
+&usb_1_ss1_dwc3_hs {
+ remote-endpoint = <&pmic_glink_ss1_hs_in>;
+};
+
+&usb_1_ss1_qmpphy_out {
+ remote-endpoint = <&pmic_glink_ss1_ss_in>;
+};
diff --git a/arch/arm64/boot/dts/qcom/x1e80100-crd.dts b/arch/arm64/boot/dts/qcom/x1e80100-crd.dts
index c5c2895b37c7..6152bcd0bc1f 100644
--- a/arch/arm64/boot/dts/qcom/x1e80100-crd.dts
+++ b/arch/arm64/boot/dts/qcom/x1e80100-crd.dts
@@ -49,6 +49,113 @@
stdout-path = "serial0:115200n8";
};
+ pmic-glink {
+ compatible = "qcom,x1e80100-pmic-glink",
+ "qcom,sm8550-pmic-glink",
+ "qcom,pmic-glink";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ orientation-gpios = <&tlmm 121 GPIO_ACTIVE_HIGH>,
+ <&tlmm 123 GPIO_ACTIVE_HIGH>,
+ <&tlmm 125 GPIO_ACTIVE_HIGH>;
+
+ /* Left-side rear port */
+ connector@0 {
+ compatible = "usb-c-connector";
+ reg = <0>;
+ power-role = "dual";
+ data-role = "dual";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ pmic_glink_ss0_hs_in: endpoint {
+ remote-endpoint = <&usb_1_ss0_dwc3_hs>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ pmic_glink_ss0_ss_in: endpoint {
+ remote-endpoint = <&usb_1_ss0_qmpphy_out>;
+ };
+ };
+ };
+ };
+
+ /* Left-side front port */
+ connector@1 {
+ compatible = "usb-c-connector";
+ reg = <1>;
+ power-role = "dual";
+ data-role = "dual";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ pmic_glink_ss1_hs_in: endpoint {
+ remote-endpoint = <&usb_1_ss1_dwc3_hs>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ pmic_glink_ss1_ss_in: endpoint {
+ remote-endpoint = <&usb_1_ss1_qmpphy_out>;
+ };
+ };
+ };
+ };
+
+ /* Right-side port */
+ connector@2 {
+ compatible = "usb-c-connector";
+ reg = <2>;
+ power-role = "dual";
+ data-role = "dual";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ pmic_glink_ss2_hs_in: endpoint {
+ remote-endpoint = <&usb_1_ss2_dwc3_hs>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ pmic_glink_ss2_ss_in: endpoint {
+ remote-endpoint = <&usb_1_ss2_qmpphy_out>;
+ };
+ };
+ };
+ };
+ };
+
+ reserved-memory {
+ linux,cma {
+ compatible = "shared-dma-pool";
+ size = <0x0 0x8000000>;
+ reusable;
+ linux,cma-default;
+ };
+ };
+
sound {
compatible = "qcom,x1e80100-sndcard";
model = "X1E80100-CRD";
@@ -93,7 +200,7 @@
};
codec {
- sound-dai = <&wcd938x 1>, <&swr2 0>, <&lpass_txmacro 0>;
+ sound-dai = <&wcd938x 1>, <&swr2 1>, <&lpass_txmacro 0>;
};
platform {
@@ -164,6 +271,20 @@
regulator-always-on;
regulator-boot-on;
};
+
+ vreg_nvme: regulator-nvme {
+ compatible = "regulator-fixed";
+
+ regulator-name = "VREG_NVME_3P3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ gpio = <&tlmm 18 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&nvme_reg_en>;
+ };
};
&apps_rsc {
@@ -646,11 +767,19 @@
};
&pcie6a {
+ perst-gpios = <&tlmm 152 GPIO_ACTIVE_LOW>;
+ wake-gpios = <&tlmm 154 GPIO_ACTIVE_LOW>;
+
+ vddpe-3v3-supply = <&vreg_nvme>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie6a_default>;
+
status = "okay";
};
&pcie6a_phy {
- vdda-phy-supply = <&vreg_l3j_0p8>;
+ vdda-phy-supply = <&vreg_l1d_0p8>;
vdda-pll-supply = <&vreg_l2j_1p2>;
status = "okay";
@@ -744,7 +873,7 @@
wcd_tx: codec@0,3 {
compatible = "sdw20217010d00";
reg = <0 3>;
- qcom,tx-port-mapping = <1 1 2 3>;
+ qcom,tx-port-mapping = <2 2 3 4>;
};
};
@@ -795,6 +924,36 @@
bias-disable;
};
+ nvme_reg_en: nvme-reg-en-state {
+ pins = "gpio18";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ pcie6a_default: pcie2a-default-state {
+ clkreq-n-pins {
+ pins = "gpio153";
+ function = "pcie6a_clk";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ perst-n-pins {
+ pins = "gpio152";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+
+ wake-n-pins {
+ pins = "gpio154";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+
tpad_default: tpad-default-state {
pins = "gpio3";
function = "gpio";
@@ -831,8 +990,8 @@
};
&usb_1_ss0_hsphy {
- vdd-supply = <&vreg_l2e_0p8>;
- vdda12-supply = <&vreg_l3e_1p2>;
+ vdd-supply = <&vreg_l3j_0p8>;
+ vdda12-supply = <&vreg_l2j_1p2>;
phys = <&smb2360_0_eusb2_repeater>;
@@ -840,6 +999,9 @@
};
&usb_1_ss0_qmpphy {
+ vdda-phy-supply = <&vreg_l3e_1p2>;
+ vdda-pll-supply = <&vreg_l1j_0p8>;
+
status = "okay";
};
@@ -849,12 +1011,19 @@
&usb_1_ss0_dwc3 {
dr_mode = "host";
- usb-role-switch;
+};
+
+&usb_1_ss0_dwc3_hs {
+ remote-endpoint = <&pmic_glink_ss0_hs_in>;
+};
+
+&usb_1_ss0_qmpphy_out {
+ remote-endpoint = <&pmic_glink_ss0_ss_in>;
};
&usb_1_ss1_hsphy {
- vdd-supply = <&vreg_l2e_0p8>;
- vdda12-supply = <&vreg_l3e_1p2>;
+ vdd-supply = <&vreg_l3j_0p8>;
+ vdda12-supply = <&vreg_l2j_1p2>;
phys = <&smb2360_1_eusb2_repeater>;
@@ -862,6 +1031,9 @@
};
&usb_1_ss1_qmpphy {
+ vdda-phy-supply = <&vreg_l3e_1p2>;
+ vdda-pll-supply = <&vreg_l2d_0p9>;
+
status = "okay";
};
@@ -871,12 +1043,19 @@
&usb_1_ss1_dwc3 {
dr_mode = "host";
- usb-role-switch;
+};
+
+&usb_1_ss1_dwc3_hs {
+ remote-endpoint = <&pmic_glink_ss1_hs_in>;
+};
+
+&usb_1_ss1_qmpphy_out {
+ remote-endpoint = <&pmic_glink_ss1_ss_in>;
};
&usb_1_ss2_hsphy {
- vdd-supply = <&vreg_l2e_0p8>;
- vdda12-supply = <&vreg_l3e_1p2>;
+ vdd-supply = <&vreg_l3j_0p8>;
+ vdda12-supply = <&vreg_l2j_1p2>;
phys = <&smb2360_2_eusb2_repeater>;
@@ -884,6 +1063,9 @@
};
&usb_1_ss2_qmpphy {
+ vdda-phy-supply = <&vreg_l3e_1p2>;
+ vdda-pll-supply = <&vreg_l2d_0p9>;
+
status = "okay";
};
@@ -893,5 +1075,12 @@
&usb_1_ss2_dwc3 {
dr_mode = "host";
- usb-role-switch;
+};
+
+&usb_1_ss2_dwc3_hs {
+ remote-endpoint = <&pmic_glink_ss2_hs_in>;
+};
+
+&usb_1_ss2_qmpphy_out {
+ remote-endpoint = <&pmic_glink_ss2_ss_in>;
};
diff --git a/arch/arm64/boot/dts/qcom/x1e80100-lenovo-yoga-slim7x.dts b/arch/arm64/boot/dts/qcom/x1e80100-lenovo-yoga-slim7x.dts
new file mode 100644
index 000000000000..fbff558f5b07
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/x1e80100-lenovo-yoga-slim7x.dts
@@ -0,0 +1,929 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
+
+#include "x1e80100.dtsi"
+#include "x1e80100-pmics.dtsi"
+
+/ {
+ model = "Lenovo Yoga Slim 7x";
+ compatible = "lenovo,yoga-slim7x", "qcom,x1e80100";
+
+ pmic-glink {
+ compatible = "qcom,x1e80100-pmic-glink",
+ "qcom,sm8550-pmic-glink",
+ "qcom,pmic-glink";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ orientation-gpios = <&tlmm 121 GPIO_ACTIVE_HIGH>,
+ <&tlmm 123 GPIO_ACTIVE_HIGH>,
+ <&tlmm 125 GPIO_ACTIVE_HIGH>;
+
+ /* Left-side rear port */
+ connector@0 {
+ compatible = "usb-c-connector";
+ reg = <0>;
+ power-role = "dual";
+ data-role = "dual";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ pmic_glink_ss0_hs_in: endpoint {
+ remote-endpoint = <&usb_1_ss0_dwc3_hs>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ pmic_glink_ss0_ss_in: endpoint {
+ remote-endpoint = <&usb_1_ss0_qmpphy_out>;
+ };
+ };
+ };
+ };
+
+ /* Left-side front port */
+ connector@1 {
+ compatible = "usb-c-connector";
+ reg = <1>;
+ power-role = "dual";
+ data-role = "dual";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ pmic_glink_ss1_hs_in: endpoint {
+ remote-endpoint = <&usb_1_ss1_dwc3_hs>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ pmic_glink_ss1_ss_in: endpoint {
+ remote-endpoint = <&usb_1_ss1_qmpphy_out>;
+ };
+ };
+ };
+ };
+
+ /* Right-side port */
+ connector@2 {
+ compatible = "usb-c-connector";
+ reg = <2>;
+ power-role = "dual";
+ data-role = "dual";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ pmic_glink_ss2_hs_in: endpoint {
+ remote-endpoint = <&usb_1_ss2_dwc3_hs>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ pmic_glink_ss2_ss_in: endpoint {
+ remote-endpoint = <&usb_1_ss2_qmpphy_out>;
+ };
+ };
+ };
+ };
+ };
+
+ reserved-memory {
+ linux,cma {
+ compatible = "shared-dma-pool";
+ size = <0x0 0x8000000>;
+ reusable;
+ linux,cma-default;
+ };
+ };
+
+ sound {
+ compatible = "qcom,x1e80100-sndcard";
+ model = "X1E80100-LENOVO-Yoga-Slim7x";
+ audio-routing = "WooferLeft IN", "WSA WSA_SPK1 OUT",
+ "TweeterLeft IN", "WSA WSA_SPK2 OUT",
+ "WooferRight IN", "WSA2 WSA_SPK2 OUT",
+ "TweeterRight IN", "WSA2 WSA_SPK2 OUT";
+
+ wsa-dai-link {
+ link-name = "WSA Playback";
+
+ cpu {
+ sound-dai = <&q6apmbedai WSA_CODEC_DMA_RX_0>;
+ };
+
+ codec {
+ sound-dai = <&left_woofer>, <&left_tweeter>,
+ <&swr0 0>, <&lpass_wsamacro 0>,
+ <&right_woofer>, <&right_tweeter>,
+ <&swr3 0>, <&lpass_wsa2macro 0>;
+ };
+
+ platform {
+ sound-dai = <&q6apm>;
+ };
+ };
+
+ va-dai-link {
+ link-name = "VA Capture";
+
+ cpu {
+ sound-dai = <&q6apmbedai VA_CODEC_DMA_TX_0>;
+ };
+
+ codec {
+ sound-dai = <&lpass_vamacro 0>;
+ };
+
+ platform {
+ sound-dai = <&q6apm>;
+ };
+ };
+ };
+
+ vph_pwr: vph-pwr-regulator {
+ compatible = "regulator-fixed";
+
+ regulator-name = "vph_pwr";
+ regulator-min-microvolt = <3700000>;
+ regulator-max-microvolt = <3700000>;
+
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ vreg_edp_3p3: regulator-edp-3p3 {
+ compatible = "regulator-fixed";
+
+ regulator-name = "VREG_EDP_3P3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ gpio = <&tlmm 70 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-0 = <&edp_reg_en>;
+ pinctrl-names = "default";
+
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ vreg_nvme: regulator-nvme {
+ compatible = "regulator-fixed";
+
+ regulator-name = "VREG_NVME_3P3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ gpio = <&tlmm 18 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-0 = <&nvme_reg_en>;
+ pinctrl-names = "default";
+ };
+};
+
+&apps_rsc {
+ regulators-0 {
+ compatible = "qcom,pm8550-rpmh-regulators";
+ qcom,pmic-id = "b";
+
+ vdd-bob1-supply = <&vph_pwr>;
+ vdd-bob2-supply = <&vph_pwr>;
+ vdd-l1-l4-l10-supply = <&vreg_s4c_1p8>;
+ vdd-l2-l13-l14-supply = <&vreg_bob1>;
+ vdd-l5-l16-supply = <&vreg_bob1>;
+ vdd-l6-l7-supply = <&vreg_bob2>;
+ vdd-l8-l9-supply = <&vreg_bob1>;
+ vdd-l12-supply = <&vreg_s5j_1p2>;
+ vdd-l15-supply = <&vreg_s4c_1p8>;
+ vdd-l17-supply = <&vreg_bob2>;
+
+ vreg_bob1: bob1 {
+ regulator-name = "vreg_bob1";
+ regulator-min-microvolt = <3008000>;
+ regulator-max-microvolt = <3960000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_bob2: bob2 {
+ regulator-name = "vreg_bob2";
+ regulator-min-microvolt = <2504000>;
+ regulator-max-microvolt = <3008000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l1b_1p8: ldo1 {
+ regulator-name = "vreg_l1b_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2b_3p0: ldo2 {
+ regulator-name = "vreg_l2b_3p0";
+ regulator-min-microvolt = <3072000>;
+ regulator-max-microvolt = <3100000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l8b_3p0: ldo8 {
+ regulator-name = "vreg_l8b_3p0";
+ regulator-min-microvolt = <3072000>;
+ regulator-max-microvolt = <3072000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l12b_1p2: ldo12 {
+ regulator-name = "vreg_l12b_1p2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l14b_3p0: ldo14 {
+ regulator-name = "vreg_l14b_3p0";
+ regulator-min-microvolt = <3072000>;
+ regulator-max-microvolt = <3072000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l15b_1p8: ldo15 {
+ regulator-name = "vreg_l15b_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ };
+
+ regulators-1 {
+ compatible = "qcom,pm8550ve-rpmh-regulators";
+ qcom,pmic-id = "c";
+
+ vdd-l1-supply = <&vreg_s5j_1p2>;
+ vdd-l2-supply = <&vreg_s1f_0p7>;
+ vdd-l3-supply = <&vreg_s1f_0p7>;
+ vdd-s4-supply = <&vph_pwr>;
+
+ vreg_s4c_1p8: smps4 {
+ regulator-name = "vreg_s4c_1p8";
+ regulator-min-microvolt = <1856000>;
+ regulator-max-microvolt = <2000000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l1c_1p2: ldo1 {
+ regulator-name = "vreg_l1c_1p2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2c_0p8: ldo2 {
+ regulator-name = "vreg_l2c_0p8";
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <920000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l3c_0p8: ldo3 {
+ regulator-name = "vreg_l3c_0p8";
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <920000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ regulators-2 {
+ compatible = "qcom,pmc8380-rpmh-regulators";
+ qcom,pmic-id = "d";
+
+ vdd-l1-supply = <&vreg_s1f_0p7>;
+ vdd-l2-supply = <&vreg_s1f_0p7>;
+ vdd-l3-supply = <&vreg_s4c_1p8>;
+ vdd-s1-supply = <&vph_pwr>;
+
+ vreg_l1d_0p8: ldo1 {
+ regulator-name = "vreg_l1d_0p8";
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <920000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2d_0p9: ldo2 {
+ regulator-name = "vreg_l2d_0p9";
+ regulator-min-microvolt = <912000>;
+ regulator-max-microvolt = <920000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l3d_1p8: ldo3 {
+ regulator-name = "vreg_l3d_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ regulators-3 {
+ compatible = "qcom,pmc8380-rpmh-regulators";
+ qcom,pmic-id = "e";
+
+ vdd-l2-supply = <&vreg_s1f_0p7>;
+ vdd-l3-supply = <&vreg_s5j_1p2>;
+
+ vreg_l2e_0p8: ldo2 {
+ regulator-name = "vreg_l2e_0p8";
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <920000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l3e_1p2: ldo3 {
+ regulator-name = "vreg_l3e_1p2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ regulators-4 {
+ compatible = "qcom,pmc8380-rpmh-regulators";
+ qcom,pmic-id = "f";
+
+ vdd-l1-supply = <&vreg_s5j_1p2>;
+ vdd-l2-supply = <&vreg_s5j_1p2>;
+ vdd-l3-supply = <&vreg_s5j_1p2>;
+ vdd-s1-supply = <&vph_pwr>;
+
+ vreg_s1f_0p7: smps1 {
+ regulator-name = "vreg_s1f_0p7";
+ regulator-min-microvolt = <700000>;
+ regulator-max-microvolt = <1100000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l1f_1p0: ldo1 {
+ regulator-name = "vreg_l1f_1p0";
+ regulator-min-microvolt = <1024000>;
+ regulator-max-microvolt = <1024000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2f_1p0: ldo2 {
+ regulator-name = "vreg_l2f_1p0";
+ regulator-min-microvolt = <1024000>;
+ regulator-max-microvolt = <1024000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l3f_1p0: ldo3 {
+ regulator-name = "vreg_l3f_1p0";
+ regulator-min-microvolt = <1024000>;
+ regulator-max-microvolt = <1024000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ regulators-6 {
+ compatible = "qcom,pm8550ve-rpmh-regulators";
+ qcom,pmic-id = "i";
+
+ vdd-l1-supply = <&vreg_s4c_1p8>;
+ vdd-l2-supply = <&vreg_s5j_1p2>;
+ vdd-l3-supply = <&vreg_s1f_0p7>;
+ vdd-s1-supply = <&vph_pwr>;
+ vdd-s2-supply = <&vph_pwr>;
+
+ vreg_s1i_0p9: smps1 {
+ regulator-name = "vreg_s1i_0p9";
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <920000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_s2i_1p0: smps2 {
+ regulator-name = "vreg_s2i_1p0";
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1100000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l1i_1p8: ldo1 {
+ regulator-name = "vreg_l1i_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2i_1p2: ldo2 {
+ regulator-name = "vreg_l2i_1p2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l3i_0p8: ldo3 {
+ regulator-name = "vreg_l3i_0p8";
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <920000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ regulators-7 {
+ compatible = "qcom,pm8550ve-rpmh-regulators";
+ qcom,pmic-id = "j";
+
+ vdd-l1-supply = <&vreg_s1f_0p7>;
+ vdd-l2-supply = <&vreg_s5j_1p2>;
+ vdd-l3-supply = <&vreg_s1f_0p7>;
+ vdd-s5-supply = <&vph_pwr>;
+
+ vreg_s5j_1p2: smps5 {
+ regulator-name = "vreg_s5j_1p2";
+ regulator-min-microvolt = <1256000>;
+ regulator-max-microvolt = <1304000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l1j_0p8: ldo1 {
+ regulator-name = "vreg_l1j_0p8";
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <920000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2j_1p2: ldo2 {
+ regulator-name = "vreg_l2j_1p2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l3j_0p8: ldo3 {
+ regulator-name = "vreg_l3j_0p8";
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <920000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+};
+
+&gpu {
+ status = "okay";
+
+ zap-shader {
+ firmware-name = "qcom/x1e80100/LENOVO/83ED/qcdxkmsuc8380.mbn";
+ };
+};
+
+&i2c0 {
+ clock-frequency = <400000>;
+
+ status = "okay";
+
+ touchpad@2c {
+ compatible = "hid-over-i2c";
+ reg = <0x2c>;
+
+ hid-descr-addr = <0x20>;
+ interrupts-extended = <&tlmm 3 IRQ_TYPE_LEVEL_LOW>;
+
+ pinctrl-0 = <&tpad_default>;
+ pinctrl-names = "default";
+
+ wakeup-source;
+ };
+
+ keyboard@3a {
+ compatible = "hid-over-i2c";
+ reg = <0x3a>;
+
+ hid-descr-addr = <0x1>;
+ interrupts-extended = <&tlmm 67 IRQ_TYPE_LEVEL_LOW>;
+
+ pinctrl-0 = <&kybd_default>;
+ pinctrl-names = "default";
+
+ wakeup-source;
+ };
+};
+
+&i2c8 {
+ clock-frequency = <400000>;
+
+ status = "okay";
+
+ touchscreen@14 {
+ compatible = "hid-over-i2c";
+ reg = <0x14>;
+
+ hid-descr-addr = <0x1>;
+ interrupts-extended = <&tlmm 51 IRQ_TYPE_LEVEL_LOW>;
+
+ pinctrl-0 = <&ts0_default>;
+ pinctrl-names = "default";
+ };
+};
+
+&lpass_tlmm {
+ spkr_01_sd_n_active: spkr-01-sd-n-active-state {
+ pins = "gpio12";
+ function = "gpio";
+ drive-strength = <16>;
+ bias-disable;
+ output-low;
+ };
+
+ spkr_23_sd_n_active: spkr-23-sd-n-active-state {
+ pins = "gpio13";
+ function = "gpio";
+ drive-strength = <16>;
+ bias-disable;
+ output-low;
+ };
+};
+
+&lpass_vamacro {
+ pinctrl-0 = <&dmic01_default>, <&dmic23_default>;
+ pinctrl-names = "default";
+
+ vdd-micb-supply = <&vreg_l1b_1p8>;
+ qcom,dmic-sample-rate = <4800000>;
+};
+
+&mdss {
+ status = "okay";
+};
+
+&mdss_dp3 {
+ compatible = "qcom,x1e80100-dp";
+ /delete-property/ #sound-dai-cells;
+
+ status = "okay";
+
+ aux-bus {
+ panel {
+ compatible = "edp-panel";
+ power-supply = <&vreg_edp_3p3>;
+
+ port {
+ edp_panel_in: endpoint {
+ remote-endpoint = <&mdss_dp3_out>;
+ };
+ };
+ };
+ };
+
+ ports {
+ port@1 {
+ reg = <1>;
+
+ mdss_dp3_out: endpoint {
+ data-lanes = <0 1 2 3>;
+ link-frequencies = /bits/ 64 <1620000000 2700000000 5400000000 8100000000>;
+
+ remote-endpoint = <&edp_panel_in>;
+ };
+ };
+ };
+};
+
+&mdss_dp3_phy {
+ vdda-phy-supply = <&vreg_l3j_0p8>;
+ vdda-pll-supply = <&vreg_l2j_1p2>;
+
+ status = "okay";
+};
+
+&pcie4 {
+ status = "okay";
+};
+
+&pcie4_phy {
+ vdda-phy-supply = <&vreg_l3j_0p8>;
+ vdda-pll-supply = <&vreg_l3e_1p2>;
+
+ status = "okay";
+};
+
+&pcie6a {
+ perst-gpios = <&tlmm 152 GPIO_ACTIVE_LOW>;
+ wake-gpios = <&tlmm 154 GPIO_ACTIVE_LOW>;
+
+ vddpe-3v3-supply = <&vreg_nvme>;
+
+ pinctrl-0 = <&pcie6a_default>;
+ pinctrl-names = "default";
+
+ status = "okay";
+};
+
+&pcie6a_phy {
+ vdda-phy-supply = <&vreg_l1d_0p8>;
+ vdda-pll-supply = <&vreg_l2j_1p2>;
+
+ status = "okay";
+};
+
+&qupv3_0 {
+ status = "okay";
+};
+
+&qupv3_1 {
+ status = "okay";
+};
+
+&qupv3_2 {
+ status = "okay";
+};
+
+&remoteproc_adsp {
+ firmware-name = "qcom/x1e80100/LENOVO/83ED/qcadsp8380.mbn",
+ "qcom/x1e80100/LENOVO/83ED/adsp_dtbs.elf";
+ status = "okay";
+};
+
+&remoteproc_cdsp {
+ firmware-name = "qcom/x1e80100/LENOVO/83ED/qccdsp8380.mbn",
+ "qcom/x1e80100/LENOVO/83ED/cdsp_dtbs.elf";
+
+ status = "okay";
+};
+
+&smb2360_0_eusb2_repeater {
+ vdd18-supply = <&vreg_l3d_1p8>;
+ vdd3-supply = <&vreg_l2b_3p0>;
+};
+
+&smb2360_1_eusb2_repeater {
+ vdd18-supply = <&vreg_l3d_1p8>;
+ vdd3-supply = <&vreg_l14b_3p0>;
+};
+
+&smb2360_2_eusb2_repeater {
+ vdd18-supply = <&vreg_l3d_1p8>;
+ vdd3-supply = <&vreg_l8b_3p0>;
+};
+
+&swr0 {
+ status = "okay";
+
+ pinctrl-0 = <&wsa_swr_active>, <&spkr_01_sd_n_active>;
+ pinctrl-names = "default";
+
+ /* WSA8845, Left Woofer */
+ left_woofer: speaker@0,0 {
+ compatible = "sdw20217020400";
+ reg = <0 0>;
+ reset-gpios = <&lpass_tlmm 12 GPIO_ACTIVE_LOW>;
+ #sound-dai-cells = <0>;
+ sound-name-prefix = "WooferLeft";
+ vdd-1p8-supply = <&vreg_l15b_1p8>;
+ vdd-io-supply = <&vreg_l12b_1p2>;
+ qcom,port-mapping = <1 2 3 7 10 13>;
+ };
+
+ /* WSA8845, Left Tweeter */
+ left_tweeter: speaker@0,1 {
+ compatible = "sdw20217020400";
+ reg = <0 1>;
+ reset-gpios = <&lpass_tlmm 12 GPIO_ACTIVE_LOW>;
+ #sound-dai-cells = <0>;
+ sound-name-prefix = "TweeterLeft";
+ vdd-1p8-supply = <&vreg_l15b_1p8>;
+ vdd-io-supply = <&vreg_l12b_1p2>;
+ qcom,port-mapping = <4 5 6 7 11 13>;
+ };
+};
+
+
+&swr3 {
+ status = "okay";
+
+ pinctrl-0 = <&wsa2_swr_active>, <&spkr_23_sd_n_active>;
+ pinctrl-names = "default";
+
+ /* WSA8845, Right Woofer */
+ right_woofer: speaker@0,0 {
+ compatible = "sdw20217020400";
+ reg = <0 0>;
+ reset-gpios = <&lpass_tlmm 13 GPIO_ACTIVE_LOW>;
+ #sound-dai-cells = <0>;
+ sound-name-prefix = "WooferRight";
+ vdd-1p8-supply = <&vreg_l15b_1p8>;
+ vdd-io-supply = <&vreg_l12b_1p2>;
+ qcom,port-mapping = <1 2 3 7 10 13>;
+ };
+
+ /* WSA8845, Right Tweeter */
+ right_tweeter: speaker@0,1 {
+ compatible = "sdw20217020400";
+ reg = <0 1>;
+ reset-gpios = <&lpass_tlmm 13 GPIO_ACTIVE_LOW>;
+ #sound-dai-cells = <0>;
+ sound-name-prefix = "TweeterRight";
+ vdd-1p8-supply = <&vreg_l15b_1p8>;
+ vdd-io-supply = <&vreg_l12b_1p2>;
+ qcom,port-mapping = <4 5 6 7 11 13>;
+ };
+};
+
+&tlmm {
+ gpio-reserved-ranges = <34 2>, /* Unused */
+ <44 4>, /* SPI (TPM) */
+ <238 1>; /* UFS Reset */
+
+ edp_reg_en: edp-reg-en-state {
+ pins = "gpio70";
+ function = "gpio";
+ drive-strength = <16>;
+ bias-disable;
+ };
+
+ kybd_default: kybd-default-state {
+ pins = "gpio67";
+ function = "gpio";
+ bias-disable;
+ };
+
+ nvme_reg_en: nvme-reg-en-state {
+ pins = "gpio18";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ pcie6a_default: pcie2a-default-state {
+ clkreq-n-pins {
+ pins = "gpio153";
+ function = "pcie6a_clk";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ perst-n-pins {
+ pins = "gpio152";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+
+ wake-n-pins {
+ pins = "gpio154";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+
+ tpad_default: tpad-default-state {
+ pins = "gpio3";
+ function = "gpio";
+ bias-disable;
+ };
+
+ ts0_default: ts0-default-state {
+ int-n-pins {
+ pins = "gpio51";
+ function = "gpio";
+ bias-disable;
+ };
+
+ reset-n-pins {
+ pins = "gpio48";
+ function = "gpio";
+ output-high;
+ drive-strength = <16>;
+ };
+ };
+
+};
+
+&usb_1_ss0_hsphy {
+ vdd-supply = <&vreg_l3j_0p8>;
+ vdda12-supply = <&vreg_l2j_1p2>;
+
+ phys = <&smb2360_0_eusb2_repeater>;
+
+ status = "okay";
+};
+
+&usb_1_ss0_qmpphy {
+ vdda-phy-supply = <&vreg_l3e_1p2>;
+ vdda-pll-supply = <&vreg_l1j_0p8>;
+
+ orientation-switch;
+
+ status = "okay";
+};
+
+&usb_1_ss0 {
+ status = "okay";
+};
+
+&usb_1_ss0_dwc3 {
+ dr_mode = "host";
+};
+
+&usb_1_ss0_dwc3_hs {
+ remote-endpoint = <&pmic_glink_ss0_hs_in>;
+};
+
+&usb_1_ss0_qmpphy_out {
+ remote-endpoint = <&pmic_glink_ss0_ss_in>;
+};
+
+&usb_1_ss1_hsphy {
+ vdd-supply = <&vreg_l3j_0p8>;
+ vdda12-supply = <&vreg_l2j_1p2>;
+
+ phys = <&smb2360_1_eusb2_repeater>;
+
+ status = "okay";
+};
+
+&usb_1_ss1_qmpphy {
+ vdda-phy-supply = <&vreg_l3e_1p2>;
+ vdda-pll-supply = <&vreg_l2d_0p9>;
+
+ orientation-switch;
+
+ status = "okay";
+};
+
+&usb_1_ss1 {
+ status = "okay";
+};
+
+&usb_1_ss1_dwc3 {
+ dr_mode = "host";
+};
+
+&usb_1_ss1_dwc3_hs {
+ remote-endpoint = <&pmic_glink_ss1_hs_in>;
+};
+
+&usb_1_ss1_qmpphy_out {
+ remote-endpoint = <&pmic_glink_ss1_ss_in>;
+};
+
+&usb_1_ss2_hsphy {
+ vdd-supply = <&vreg_l3j_0p8>;
+ vdda12-supply = <&vreg_l2j_1p2>;
+
+ phys = <&smb2360_2_eusb2_repeater>;
+
+ status = "okay";
+};
+
+&usb_1_ss2_qmpphy {
+ vdda-phy-supply = <&vreg_l3e_1p2>;
+ vdda-pll-supply = <&vreg_l2d_0p9>;
+
+ status = "okay";
+};
+
+&usb_1_ss2 {
+ status = "okay";
+};
+
+&usb_1_ss2_dwc3 {
+ dr_mode = "host";
+};
+
+&usb_1_ss2_dwc3_hs {
+ remote-endpoint = <&pmic_glink_ss2_hs_in>;
+};
+
+&usb_1_ss2_qmpphy_out {
+ remote-endpoint = <&pmic_glink_ss2_ss_in>;
+};
diff --git a/arch/arm64/boot/dts/qcom/x1e80100-pmics.dtsi b/arch/arm64/boot/dts/qcom/x1e80100-pmics.dtsi
index 04301f772fbd..e34e70922cd3 100644
--- a/arch/arm64/boot/dts/qcom/x1e80100-pmics.dtsi
+++ b/arch/arm64/boot/dts/qcom/x1e80100-pmics.dtsi
@@ -3,10 +3,477 @@
* Copyright (c) 2024, Linaro Limited
*/
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/input/linux-event-codes.h>
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/spmi/spmi.h>
/ {
+ thermal-zones {
+ pm8550-thermal {
+ polling-delay-passive = <100>;
+
+ thermal-sensors = <&pm8550_temp_alarm>;
+
+ trips {
+ trip0 {
+ temperature = <95000>;
+ hysteresis = <0>;
+ type = "passive";
+ };
+
+ trip1 {
+ temperature = <115000>;
+ hysteresis = <0>;
+ type = "hot";
+ };
+ };
+ };
+
+ pm8550ve-2-thermal {
+ polling-delay-passive = <100>;
+
+ thermal-sensors = <&pm8550ve_2_temp_alarm>;
+
+ trips {
+ trip0 {
+ temperature = <95000>;
+ hysteresis = <0>;
+ type = "passive";
+ };
+
+ trip1 {
+ temperature = <115000>;
+ hysteresis = <0>;
+ type = "hot";
+ };
+ };
+ };
+
+ pmc8380-3-thermal {
+ polling-delay-passive = <100>;
+
+ thermal-sensors = <&pmc8380_3_temp_alarm>;
+
+ trips {
+ trip0 {
+ temperature = <95000>;
+ hysteresis = <0>;
+ type = "passive";
+ };
+
+ trip1 {
+ temperature = <115000>;
+ hysteresis = <0>;
+ type = "hot";
+ };
+ };
+ };
+
+ pmc8380-4-thermal {
+ polling-delay-passive = <100>;
+
+ thermal-sensors = <&pmc8380_4_temp_alarm>;
+
+ trips {
+ trip0 {
+ temperature = <95000>;
+ hysteresis = <0>;
+ type = "passive";
+ };
+
+ trip1 {
+ temperature = <115000>;
+ hysteresis = <0>;
+ type = "hot";
+ };
+ };
+ };
+
+ pmc8380-5-thermal {
+ polling-delay-passive = <100>;
+
+ thermal-sensors = <&pmc8380_5_temp_alarm>;
+
+ trips {
+ trip0 {
+ temperature = <95000>;
+ hysteresis = <0>;
+ type = "passive";
+ };
+
+ trip1 {
+ temperature = <115000>;
+ hysteresis = <0>;
+ type = "hot";
+ };
+ };
+ };
+
+ pmc8380-6-thermal {
+ polling-delay-passive = <100>;
+
+ thermal-sensors = <&pmc8380_6_temp_alarm>;
+
+ trips {
+ trip0 {
+ temperature = <95000>;
+ hysteresis = <0>;
+ type = "passive";
+ };
+
+ trip1 {
+ temperature = <115000>;
+ hysteresis = <0>;
+ type = "hot";
+ };
+ };
+ };
+
+ pm8550ve-8-thermal {
+ polling-delay-passive = <100>;
+
+ thermal-sensors = <&pm8550ve_8_temp_alarm>;
+
+ trips {
+ trip0 {
+ temperature = <95000>;
+ hysteresis = <0>;
+ type = "passive";
+ };
+
+ trip1 {
+ temperature = <115000>;
+ hysteresis = <0>;
+ type = "hot";
+ };
+ };
+ };
+
+ pm8550ve-9-thermal {
+ polling-delay-passive = <100>;
+
+ thermal-sensors = <&pm8550ve_9_temp_alarm>;
+
+ trips {
+ trip0 {
+ temperature = <95000>;
+ hysteresis = <0>;
+ type = "passive";
+ };
+
+ trip1 {
+ temperature = <115000>;
+ hysteresis = <0>;
+ type = "hot";
+ };
+ };
+ };
+
+ pm8010-thermal {
+ polling-delay-passive = <100>;
+
+ thermal-sensors = <&pm8010_temp_alarm>;
+
+ trips {
+ trip0 {
+ temperature = <95000>;
+ hysteresis = <0>;
+ type = "passive";
+ };
+
+ trip1 {
+ temperature = <115000>;
+ hysteresis = <0>;
+ type = "hot";
+ };
+ };
+ };
+ };
+};
+
+&spmi_bus0 {
+ /* PMK8380 */
+ pmk8550: pmic@0 {
+ compatible = "qcom,pm8550", "qcom,spmi-pmic";
+ reg = <0x0 SPMI_USID>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pmk8550_pon: pon@1300 {
+ compatible = "qcom,pmk8350-pon";
+ reg = <0x1300>, <0x800>;
+ reg-names = "hlos", "pbs";
+
+ pon_pwrkey: pwrkey {
+ compatible = "qcom,pmk8350-pwrkey";
+ interrupts = <0x0 0x13 0x7 IRQ_TYPE_EDGE_BOTH>;
+ linux,code = <KEY_POWER>;
+ };
+
+ pon_resin: resin {
+ compatible = "qcom,pmk8350-resin";
+ interrupts = <0x0 0x13 0x6 IRQ_TYPE_EDGE_BOTH>;
+ status = "disabled";
+ };
+ };
+
+ pmk8550_rtc: rtc@6100 {
+ compatible = "qcom,pmk8350-rtc";
+ reg = <0x6100>, <0x6200>;
+ reg-names = "rtc", "alarm";
+ interrupts = <0x0 0x62 0x1 IRQ_TYPE_EDGE_RISING>;
+ /* Not yet sure what blocks access */
+ status = "reserved";
+ };
+
+ pmk8550_sdam_2: nvram@7100 {
+ compatible = "qcom,spmi-sdam";
+ reg = <0x7100>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x7100 0x100>;
+
+ reboot_reason: reboot-reason@48 {
+ reg = <0x48 0x1>;
+ bits = <1 7>;
+ };
+ };
+
+ pmk8550_gpios: gpio@8800 {
+ compatible = "qcom,pmk8550-gpio", "qcom,spmi-gpio";
+ reg = <0xb800>;
+ gpio-controller;
+ gpio-ranges = <&pmk8550_gpios 0 0 6>;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+ };
+
+ /* PMC8380C */
+ pm8550: pmic@1 {
+ compatible = "qcom,pm8550", "qcom,spmi-pmic";
+ reg = <0x1 SPMI_USID>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pm8550_temp_alarm: temp-alarm@a00 {
+ compatible = "qcom,spmi-temp-alarm";
+ reg = <0xa00>;
+ interrupts = <0x1 0xa 0x0 IRQ_TYPE_EDGE_BOTH>;
+ #thermal-sensor-cells = <0>;
+ };
+
+ pm8550_gpios: gpio@8800 {
+ compatible = "qcom,pm8550-gpio", "qcom,spmi-gpio";
+ reg = <0x8800>;
+ gpio-controller;
+ gpio-ranges = <&pm8550_gpios 0 0 12>;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ pm8550_flash: led-controller@ee00 {
+ compatible = "qcom,pm8550-flash-led", "qcom,spmi-flash-led";
+ reg = <0xee00>;
+ status = "disabled";
+ };
+
+ pm8550_pwm: pwm {
+ compatible = "qcom,pm8550-pwm", "qcom,pm8350c-pwm";
+ #pwm-cells = <2>;
+
+ status = "disabled";
+ };
+ };
+
+ /* PMC8380VE */
+ pm8550ve_2: pmic@2 {
+ compatible = "qcom,pm8550", "qcom,spmi-pmic";
+ reg = <0x2 SPMI_USID>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pm8550ve_2_temp_alarm: temp-alarm@a00 {
+ compatible = "qcom,spmi-temp-alarm";
+ reg = <0xa00>;
+ interrupts = <0x2 0xa 0x0 IRQ_TYPE_EDGE_BOTH>;
+ #thermal-sensor-cells = <0>;
+ };
+
+ pm8550ve_2_gpios: gpio@8800 {
+ compatible = "qcom,pm8550ve-gpio", "qcom,spmi-gpio";
+ reg = <0x8800>;
+ gpio-controller;
+ gpio-ranges = <&pm8550ve_2_gpios 0 0 8>;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+ };
+
+ /* PMC8380 is actually not a PM8550 series rebrand */
+ pmc8380_3: pmic@3 {
+ compatible = "qcom,pmc8380", "qcom,spmi-pmic";
+ reg = <0x3 SPMI_USID>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pmc8380_3_temp_alarm: temp-alarm@a00 {
+ compatible = "qcom,spmi-temp-alarm";
+ reg = <0xa00>;
+ interrupts = <0x3 0xa 0x0 IRQ_TYPE_EDGE_BOTH>;
+ #thermal-sensor-cells = <0>;
+ };
+
+ pmc8380_3_gpios: gpio@8800 {
+ compatible = "qcom,pmc8380-gpio", "qcom,spmi-gpio";
+ reg = <0x8800>;
+ gpio-controller;
+ gpio-ranges = <&pmc8380_3_gpios 0 0 10>;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+ };
+
+ pmc8380_4: pmic@4 {
+ compatible = "qcom,pmc8380", "qcom,spmi-pmic";
+ reg = <0x4 SPMI_USID>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pmc8380_4_temp_alarm: temp-alarm@a00 {
+ compatible = "qcom,spmi-temp-alarm";
+ reg = <0xa00>;
+ interrupts = <0x4 0xa 0x0 IRQ_TYPE_EDGE_BOTH>;
+ #thermal-sensor-cells = <0>;
+ };
+
+ pmc8380_4_gpios: gpio@8800 {
+ compatible = "qcom,pmc8380-gpio", "qcom,spmi-gpio";
+ reg = <0x8800>;
+ gpio-controller;
+ gpio-ranges = <&pmc8380_4_gpios 0 0 10>;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+ };
+
+ pmc8380_5: pmic@5 {
+ compatible = "qcom,pmc8380", "qcom,spmi-pmic";
+ reg = <0x5 SPMI_USID>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pmc8380_5_temp_alarm: temp-alarm@a00 {
+ compatible = "qcom,spmi-temp-alarm";
+ reg = <0xa00>;
+ interrupts = <0x5 0xa 0x0 IRQ_TYPE_EDGE_BOTH>;
+ #thermal-sensor-cells = <0>;
+ };
+
+ pmc8380_5_gpios: gpio@8800 {
+ compatible = "qcom,pmc8380-gpio", "qcom,spmi-gpio";
+ reg = <0x8800>;
+ gpio-controller;
+ gpio-ranges = <&pmc8380_5_gpios 0 0 10>;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+ };
+
+ pmc8380_6: pmic@6 {
+ compatible = "qcom,pmc8380", "qcom,spmi-pmic";
+ reg = <0x6 SPMI_USID>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pmc8380_6_temp_alarm: temp-alarm@a00 {
+ compatible = "qcom,spmi-temp-alarm";
+ reg = <0xa00>;
+ interrupts = <0x6 0xa 0x0 IRQ_TYPE_EDGE_BOTH>;
+ #thermal-sensor-cells = <0>;
+ };
+
+ pmc8380_6_gpios: gpio@8800 {
+ compatible = "qcom,pmc8380-gpio", "qcom,spmi-gpio";
+ reg = <0x8800>;
+ gpio-controller;
+ gpio-ranges = <&pmc8380_6_gpios 0 0 10>;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+ };
+
+ /* PMC8380VE */
+ pm8550ve_8: pmic@8 {
+ compatible = "qcom,pm8550", "qcom,spmi-pmic";
+ reg = <0x8 SPMI_USID>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pm8550ve_8_temp_alarm: temp-alarm@a00 {
+ compatible = "qcom,spmi-temp-alarm";
+ reg = <0xa00>;
+ interrupts = <0x8 0xa 0x0 IRQ_TYPE_EDGE_BOTH>;
+ #thermal-sensor-cells = <0>;
+ };
+
+ pm8550ve_8_gpios: gpio@8800 {
+ compatible = "qcom,pm8550ve-gpio", "qcom,spmi-gpio";
+ reg = <0x8800>;
+ gpio-controller;
+ gpio-ranges = <&pm8550ve_8_gpios 0 0 8>;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+ };
+
+ /* PMC8380VE */
+ pm8550ve_9: pmic@9 {
+ compatible = "qcom,pm8550", "qcom,spmi-pmic";
+ reg = <0x9 SPMI_USID>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pm8550ve_9_temp_alarm: temp-alarm@a00 {
+ compatible = "qcom,spmi-temp-alarm";
+ reg = <0xa00>;
+ interrupts = <0x9 0xa 0x0 IRQ_TYPE_EDGE_BOTH>;
+ #thermal-sensor-cells = <0>;
+ };
+
+ pm8550ve_9_gpios: gpio@8800 {
+ compatible = "qcom,pm8550ve-gpio", "qcom,spmi-gpio";
+ reg = <0x8800>;
+ gpio-controller;
+ gpio-ranges = <&pm8550ve_9_gpios 0 0 8>;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+ };
+
+ pm8010: pmic@c {
+ compatible = "qcom,pm8010", "qcom,spmi-pmic";
+ reg = <0xc SPMI_USID>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pm8010_temp_alarm: temp-alarm@2400 {
+ compatible = "qcom,spmi-temp-alarm";
+ reg = <0x2400>;
+ interrupts = <0xc 0x24 0x0 IRQ_TYPE_EDGE_BOTH>;
+ #thermal-sensor-cells = <0>;
+ };
+ };
};
&spmi_bus1 {
@@ -48,4 +515,19 @@
#phy-cells = <0>;
};
};
+
+ smb2360_3: pmic@c {
+ compatible = "qcom,smb2360", "qcom,spmi-pmic";
+ reg = <0xc SPMI_USID>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ status = "disabled";
+
+ smb2360_3_eusb2_repeater: phy@fd00 {
+ compatible = "qcom,smb2360-eusb2-repeater";
+ reg = <0xfd00>;
+ #phy-cells = <0>;
+ };
+ };
};
diff --git a/arch/arm64/boot/dts/qcom/x1e80100-qcp.dts b/arch/arm64/boot/dts/qcom/x1e80100-qcp.dts
index 2061fbe7b75a..72a4f4138616 100644
--- a/arch/arm64/boot/dts/qcom/x1e80100-qcp.dts
+++ b/arch/arm64/boot/dts/qcom/x1e80100-qcp.dts
@@ -19,10 +19,200 @@
serial0 = &uart21;
};
+ wcd938x: audio-codec {
+ compatible = "qcom,wcd9385-codec";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&wcd_default>;
+
+ qcom,micbias1-microvolt = <1800000>;
+ qcom,micbias2-microvolt = <1800000>;
+ qcom,micbias3-microvolt = <1800000>;
+ qcom,micbias4-microvolt = <1800000>;
+ qcom,mbhc-buttons-vthreshold-microvolt = <75000 150000 237000 500000 500000 500000 500000 500000>;
+ qcom,mbhc-headset-vthreshold-microvolt = <1700000>;
+ qcom,mbhc-headphone-vthreshold-microvolt = <50000>;
+ qcom,rx-device = <&wcd_rx>;
+ qcom,tx-device = <&wcd_tx>;
+
+ reset-gpios = <&tlmm 191 GPIO_ACTIVE_LOW>;
+
+ vdd-buck-supply = <&vreg_l15b_1p8>;
+ vdd-rxtx-supply = <&vreg_l15b_1p8>;
+ vdd-io-supply = <&vreg_l15b_1p8>;
+ vdd-mic-bias-supply = <&vreg_bob1>;
+
+ #sound-dai-cells = <1>;
+ };
+
chosen {
stdout-path = "serial0:115200n8";
};
+ pmic-glink {
+ compatible = "qcom,x1e80100-pmic-glink",
+ "qcom,sm8550-pmic-glink",
+ "qcom,pmic-glink";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ orientation-gpios = <&tlmm 121 GPIO_ACTIVE_HIGH>,
+ <&tlmm 123 GPIO_ACTIVE_HIGH>,
+ <&tlmm 125 GPIO_ACTIVE_HIGH>;
+
+ connector@0 {
+ compatible = "usb-c-connector";
+ reg = <0>;
+ power-role = "dual";
+ data-role = "dual";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ pmic_glink_ss0_hs_in: endpoint {
+ remote-endpoint = <&usb_1_ss0_dwc3_hs>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ pmic_glink_ss0_ss_in: endpoint {
+ remote-endpoint = <&usb_1_ss0_qmpphy_out>;
+ };
+ };
+ };
+ };
+
+ connector@1 {
+ compatible = "usb-c-connector";
+ reg = <1>;
+ power-role = "dual";
+ data-role = "dual";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ pmic_glink_ss1_hs_in: endpoint {
+ remote-endpoint = <&usb_1_ss1_dwc3_hs>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ pmic_glink_ss1_ss_in: endpoint {
+ remote-endpoint = <&usb_1_ss1_qmpphy_out>;
+ };
+ };
+ };
+ };
+
+ connector@2 {
+ compatible = "usb-c-connector";
+ reg = <2>;
+ power-role = "dual";
+ data-role = "dual";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ pmic_glink_ss2_hs_in: endpoint {
+ remote-endpoint = <&usb_1_ss2_dwc3_hs>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ pmic_glink_ss2_ss_in: endpoint {
+ remote-endpoint = <&usb_1_ss2_qmpphy_out>;
+ };
+ };
+ };
+ };
+ };
+
+ reserved-memory {
+ linux,cma {
+ compatible = "shared-dma-pool";
+ size = <0x0 0x8000000>;
+ reusable;
+ linux,cma-default;
+ };
+ };
+
+ sound {
+ compatible = "qcom,x1e80100-sndcard";
+ model = "X1E80100-QCP";
+ audio-routing = "SpkrLeft IN", "WSA WSA_SPK1 OUT",
+ "SpkrRight IN", "WSA WSA_SPK2 OUT",
+ "IN1_HPHL", "HPHL_OUT",
+ "IN2_HPHR", "HPHR_OUT",
+ "AMIC2", "MIC BIAS2",
+ "TX SWR_INPUT1", "ADC2_OUTPUT";
+
+ wcd-playback-dai-link {
+ link-name = "WCD Playback";
+
+ cpu {
+ sound-dai = <&q6apmbedai RX_CODEC_DMA_RX_0>;
+ };
+
+ codec {
+ sound-dai = <&wcd938x 0>, <&swr1 0>, <&lpass_rxmacro 0>;
+ };
+
+ platform {
+ sound-dai = <&q6apm>;
+ };
+ };
+
+ wcd-capture-dai-link {
+ link-name = "WCD Capture";
+
+ cpu {
+ sound-dai = <&q6apmbedai TX_CODEC_DMA_TX_3>;
+ };
+
+ codec {
+ sound-dai = <&wcd938x 1>, <&swr2 1>, <&lpass_txmacro 0>;
+ };
+
+ platform {
+ sound-dai = <&q6apm>;
+ };
+ };
+
+ wsa-dai-link {
+ link-name = "WSA Playback";
+
+ cpu {
+ sound-dai = <&q6apmbedai WSA_CODEC_DMA_RX_0>;
+ };
+
+ codec {
+ sound-dai = <&left_spkr>, <&right_spkr>,
+ <&swr0 0>, <&lpass_wsamacro 0>;
+ };
+
+ platform {
+ sound-dai = <&q6apm>;
+ };
+ };
+ };
+
vph_pwr: vph-pwr-regulator {
compatible = "regulator-fixed";
@@ -50,6 +240,20 @@
regulator-always-on;
regulator-boot-on;
};
+
+ vreg_nvme: regulator-nvme {
+ compatible = "regulator-fixed";
+
+ regulator-name = "VREG_NVME_3P3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ gpio = <&tlmm 18 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&nvme_reg_en>;
+ };
};
&apps_rsc {
@@ -402,6 +606,16 @@
};
};
+&lpass_tlmm {
+ spkr_01_sd_n_active: spkr-01-sd-n-active-state {
+ pins = "gpio12";
+ function = "gpio";
+ drive-strength = <16>;
+ bias-disable;
+ output-low;
+ };
+};
+
&mdss {
status = "okay";
};
@@ -457,11 +671,19 @@
};
&pcie6a {
+ perst-gpios = <&tlmm 152 GPIO_ACTIVE_LOW>;
+ wake-gpios = <&tlmm 154 GPIO_ACTIVE_LOW>;
+
+ vddpe-3v3-supply = <&vreg_nvme>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie6a_default>;
+
status = "okay";
};
&pcie6a_phy {
- vdda-phy-supply = <&vreg_l3j_0p8>;
+ vdda-phy-supply = <&vreg_l1d_0p8>;
vdda-pll-supply = <&vreg_l2j_1p2>;
status = "okay";
@@ -493,6 +715,10 @@
status = "okay";
};
+&smb2360_3 {
+ status = "okay";
+};
+
&smb2360_0_eusb2_repeater {
vdd18-supply = <&vreg_l3d_1p8>;
vdd3-supply = <&vreg_l2b_3p0>;
@@ -508,6 +734,57 @@
vdd3-supply = <&vreg_l8b_3p0>;
};
+&swr0 {
+ pinctrl-0 = <&wsa_swr_active>, <&spkr_01_sd_n_active>;
+ pinctrl-names = "default";
+
+ status = "okay";
+
+ /* WSA8845, Left Speaker */
+ left_spkr: speaker@0,0 {
+ compatible = "sdw20217020400";
+ reg = <0 0>;
+ #sound-dai-cells = <0>;
+ reset-gpios = <&lpass_tlmm 12 GPIO_ACTIVE_LOW>;
+ sound-name-prefix = "SpkrLeft";
+ vdd-1p8-supply = <&vreg_l15b_1p8>;
+ vdd-io-supply = <&vreg_l12b_1p2>;
+ };
+
+ /* WSA8845, Right Speaker */
+ right_spkr: speaker@0,1 {
+ compatible = "sdw20217020400";
+ reg = <0 1>;
+ #sound-dai-cells = <0>;
+ reset-gpios = <&lpass_tlmm 12 GPIO_ACTIVE_LOW>;
+ sound-name-prefix = "SpkrRight";
+ vdd-1p8-supply = <&vreg_l15b_1p8>;
+ vdd-io-supply = <&vreg_l12b_1p2>;
+ };
+};
+
+&swr1 {
+ status = "okay";
+
+ /* WCD9385 RX */
+ wcd_rx: codec@0,4 {
+ compatible = "sdw20217010d00";
+ reg = <0 4>;
+ qcom,rx-port-mapping = <1 2 3 4 5>;
+ };
+};
+
+&swr2 {
+ status = "okay";
+
+ /* WCD9385 TX */
+ wcd_tx: codec@0,3 {
+ compatible = "sdw20217010d00";
+ reg = <0 3>;
+ qcom,tx-port-mapping = <2 2 3 4>;
+ };
+};
+
&tlmm {
gpio-reserved-ranges = <33 3>, /* Unused */
<44 4>, /* SPI (TPM) */
@@ -519,6 +796,44 @@
drive-strength = <16>;
bias-disable;
};
+
+ nvme_reg_en: nvme-reg-en-state {
+ pins = "gpio18";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ pcie6a_default: pcie2a-default-state {
+ clkreq-n-pins {
+ pins = "gpio153";
+ function = "pcie6a_clk";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ perst-n-pins {
+ pins = "gpio152";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+
+ wake-n-pins {
+ pins = "gpio154";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+
+ wcd_default: wcd-reset-n-active-state {
+ pins = "gpio191";
+ function = "gpio";
+ drive-strength = <16>;
+ bias-disable;
+ output-low;
+ };
};
&uart21 {
@@ -527,8 +842,8 @@
};
&usb_1_ss0_hsphy {
- vdd-supply = <&vreg_l2e_0p8>;
- vdda12-supply = <&vreg_l3e_1p2>;
+ vdd-supply = <&vreg_l3j_0p8>;
+ vdda12-supply = <&vreg_l2j_1p2>;
phys = <&smb2360_0_eusb2_repeater>;
@@ -536,6 +851,9 @@
};
&usb_1_ss0_qmpphy {
+ vdda-phy-supply = <&vreg_l3e_1p2>;
+ vdda-pll-supply = <&vreg_l1j_0p8>;
+
status = "okay";
};
@@ -545,12 +863,19 @@
&usb_1_ss0_dwc3 {
dr_mode = "host";
- usb-role-switch;
+};
+
+&usb_1_ss0_dwc3_hs {
+ remote-endpoint = <&pmic_glink_ss0_hs_in>;
+};
+
+&usb_1_ss0_qmpphy_out {
+ remote-endpoint = <&pmic_glink_ss0_ss_in>;
};
&usb_1_ss1_hsphy {
- vdd-supply = <&vreg_l2e_0p8>;
- vdda12-supply = <&vreg_l3e_1p2>;
+ vdd-supply = <&vreg_l3j_0p8>;
+ vdda12-supply = <&vreg_l2j_1p2>;
phys = <&smb2360_1_eusb2_repeater>;
@@ -558,6 +883,9 @@
};
&usb_1_ss1_qmpphy {
+ vdda-phy-supply = <&vreg_l3e_1p2>;
+ vdda-pll-supply = <&vreg_l2d_0p9>;
+
status = "okay";
};
@@ -567,12 +895,19 @@
&usb_1_ss1_dwc3 {
dr_mode = "host";
- usb-role-switch;
+};
+
+&usb_1_ss1_dwc3_hs {
+ remote-endpoint = <&pmic_glink_ss1_hs_in>;
+};
+
+&usb_1_ss1_qmpphy_out {
+ remote-endpoint = <&pmic_glink_ss1_ss_in>;
};
&usb_1_ss2_hsphy {
- vdd-supply = <&vreg_l2e_0p8>;
- vdda12-supply = <&vreg_l3e_1p2>;
+ vdd-supply = <&vreg_l3j_0p8>;
+ vdda12-supply = <&vreg_l2j_1p2>;
phys = <&smb2360_2_eusb2_repeater>;
@@ -580,6 +915,9 @@
};
&usb_1_ss2_qmpphy {
+ vdda-phy-supply = <&vreg_l3e_1p2>;
+ vdda-pll-supply = <&vreg_l2d_0p9>;
+
status = "okay";
};
@@ -589,5 +927,12 @@
&usb_1_ss2_dwc3 {
dr_mode = "host";
- usb-role-switch;
+};
+
+&usb_1_ss2_dwc3_hs {
+ remote-endpoint = <&pmic_glink_ss2_hs_in>;
+};
+
+&usb_1_ss2_qmpphy_out {
+ remote-endpoint = <&pmic_glink_ss2_ss_in>;
};
diff --git a/arch/arm64/boot/dts/qcom/x1e80100.dtsi b/arch/arm64/boot/dts/qcom/x1e80100.dtsi
index 5f90a0b3c016..7bca5fcd7d52 100644
--- a/arch/arm64/boot/dts/qcom/x1e80100.dtsi
+++ b/arch/arm64/boot/dts/qcom/x1e80100.dtsi
@@ -6,6 +6,7 @@
#include <dt-bindings/clock/qcom,rpmh.h>
#include <dt-bindings/clock/qcom,x1e80100-dispcc.h>
#include <dt-bindings/clock/qcom,x1e80100-gcc.h>
+#include <dt-bindings/clock/qcom,x1e80100-gpucc.h>
#include <dt-bindings/clock/qcom,x1e80100-tcsr.h>
#include <dt-bindings/dma/qcom-gpi.h>
#include <dt-bindings/interconnect/qcom,icc.h>
@@ -2505,6 +2506,66 @@
};
};
+ tsens0: thermal-sensor@c271000 {
+ compatible = "qcom,x1e80100-tsens", "qcom,tsens-v2";
+ reg = <0 0x0c271000 0 0x1000>,
+ <0 0x0c222000 0 0x1000>;
+
+ interrupts-extended = <&pdc 26 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 641 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "uplow",
+ "critical";
+
+ #qcom,sensors = <16>;
+
+ #thermal-sensor-cells = <1>;
+ };
+
+ tsens1: thermal-sensor@c272000 {
+ compatible = "qcom,x1e80100-tsens", "qcom,tsens-v2";
+ reg = <0 0x0c272000 0 0x1000>,
+ <0 0x0c223000 0 0x1000>;
+
+ interrupts-extended = <&pdc 27 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 642 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "uplow",
+ "critical";
+
+ #qcom,sensors = <16>;
+
+ #thermal-sensor-cells = <1>;
+ };
+
+ tsens2: thermal-sensor@c273000 {
+ compatible = "qcom,x1e80100-tsens", "qcom,tsens-v2";
+ reg = <0 0x0c273000 0 0x1000>,
+ <0 0x0c224000 0 0x1000>;
+
+ interrupts-extended = <&pdc 28 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 643 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "uplow",
+ "critical";
+
+ #qcom,sensors = <16>;
+
+ #thermal-sensor-cells = <1>;
+ };
+
+ tsens3: thermal-sensor@c274000 {
+ compatible = "qcom,x1e80100-tsens", "qcom,tsens-v2";
+ reg = <0 0x0c274000 0 0x1000>,
+ <0 0x0c225000 0 0x1000>;
+
+ interrupts-extended = <&pdc 29 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 770 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "uplow",
+ "critical";
+
+ #qcom,sensors = <16>;
+
+ #thermal-sensor-cells = <1>;
+ };
+
usb_1_ss0_hsphy: phy@fd3000 {
compatible = "qcom,x1e80100-snps-eusb2-phy",
"qcom,sm8550-snps-eusb2-phy";
@@ -2543,6 +2604,34 @@
#phy-cells = <1>;
status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ usb_1_ss0_qmpphy_out: endpoint {
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ usb_1_ss0_qmpphy_usb_ss_in: endpoint {
+ remote-endpoint = <&usb_1_ss0_dwc3_ss>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+
+ usb_1_ss0_qmpphy_dp_in: endpoint {
+ remote-endpoint = <&mdss_dp0_out>;
+ };
+ };
+ };
};
usb_1_ss1_hsphy: phy@fd9000 {
@@ -2583,6 +2672,34 @@
#phy-cells = <1>;
status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ usb_1_ss1_qmpphy_out: endpoint {
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ usb_1_ss1_qmpphy_usb_ss_in: endpoint {
+ remote-endpoint = <&usb_1_ss1_dwc3_ss>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+
+ usb_1_ss1_qmpphy_dp_in: endpoint {
+ remote-endpoint = <&mdss_dp1_out>;
+ };
+ };
+ };
};
usb_1_ss2_hsphy: phy@fde000 {
@@ -2623,6 +2740,34 @@
#phy-cells = <1>;
status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ usb_1_ss2_qmpphy_out: endpoint {
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ usb_1_ss2_qmpphy_usb_ss_in: endpoint {
+ remote-endpoint = <&usb_1_ss2_dwc3_ss>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+
+ usb_1_ss2_qmpphy_dp_in: endpoint {
+ remote-endpoint = <&mdss_dp2_out>;
+ };
+ };
+ };
};
cnoc_main: interconnect@1500000 {
@@ -2737,15 +2882,17 @@
device_type = "pci";
compatible = "qcom,pcie-x1e80100";
reg = <0 0x01bf8000 0 0x3000>,
- <0 0x70000000 0 0xf1d>,
- <0 0x70000f20 0 0xa8>,
+ <0 0x70000000 0 0xf20>,
+ <0 0x70000f40 0 0xa8>,
<0 0x70001000 0 0x1000>,
- <0 0x70100000 0 0x100000>;
+ <0 0x70100000 0 0x100000>,
+ <0 0x01bfb000 0 0x1000>;
reg-names = "parf",
"dbi",
"elbi",
"atu",
- "config";
+ "config",
+ "mhi";
#address-cells = <3>;
#size-cells = <2>;
ranges = <0x01000000 0 0x00000000 0 0x70200000 0 0x100000>,
@@ -2985,6 +3132,200 @@
#reset-cells = <1>;
};
+ gpu: gpu@3d00000 {
+ compatible = "qcom,adreno-43050c01", "qcom,adreno";
+ reg = <0x0 0x03d00000 0x0 0x40000>,
+ <0x0 0x03d9e000 0x0 0x1000>,
+ <0x0 0x03d61000 0x0 0x800>;
+
+ reg-names = "kgsl_3d0_reg_memory",
+ "cx_mem",
+ "cx_dbgc";
+
+ interrupts = <GIC_SPI 300 IRQ_TYPE_LEVEL_HIGH>;
+
+ iommus = <&adreno_smmu 0 0x0>,
+ <&adreno_smmu 1 0x0>;
+
+ operating-points-v2 = <&gpu_opp_table>;
+
+ qcom,gmu = <&gmu>;
+ #cooling-cells = <2>;
+
+ interconnects = <&gem_noc MASTER_GFX3D 0 &mc_virt SLAVE_EBI1 0>;
+ interconnect-names = "gfx-mem";
+
+ zap-shader {
+ memory-region = <&gpu_microcode_mem>;
+ firmware-name = "qcom/gen70500_zap.mbn";
+ };
+
+ gpu_opp_table: opp-table {
+ compatible = "operating-points-v2";
+
+ opp-1100000000 {
+ opp-hz = /bits/ 64 <1100000000>;
+ opp-level = <RPMH_REGULATOR_LEVEL_TURBO_L1>;
+ opp-peak-kBps = <16500000>;
+ };
+
+ opp-1000000000 {
+ opp-hz = /bits/ 64 <1000000000>;
+ opp-level = <RPMH_REGULATOR_LEVEL_TURBO>;
+ opp-peak-kBps = <14398438>;
+ };
+
+ opp-925000000 {
+ opp-hz = /bits/ 64 <925000000>;
+ opp-level = <RPMH_REGULATOR_LEVEL_NOM_L1>;
+ opp-peak-kBps = <14398438>;
+ };
+
+ opp-800000000 {
+ opp-hz = /bits/ 64 <800000000>;
+ opp-level = <RPMH_REGULATOR_LEVEL_NOM>;
+ opp-peak-kBps = <12449219>;
+ };
+
+ opp-744000000 {
+ opp-hz = /bits/ 64 <744000000>;
+ opp-level = <RPMH_REGULATOR_LEVEL_SVS_L2>;
+ opp-peak-kBps = <10687500>;
+ };
+
+ opp-687000000 {
+ opp-hz = /bits/ 64 <687000000>;
+ opp-level = <RPMH_REGULATOR_LEVEL_SVS_L1>;
+ opp-peak-kBps = <8171875>;
+ };
+
+ opp-550000000 {
+ opp-hz = /bits/ 64 <550000000>;
+ opp-level = <RPMH_REGULATOR_LEVEL_SVS>;
+ opp-peak-kBps = <6074219>;
+ };
+
+ opp-390000000 {
+ opp-hz = /bits/ 64 <390000000>;
+ opp-level = <RPMH_REGULATOR_LEVEL_LOW_SVS>;
+ opp-peak-kBps = <3000000>;
+ };
+
+ opp-300000000 {
+ opp-hz = /bits/ 64 <300000000>;
+ opp-level = <RPMH_REGULATOR_LEVEL_LOW_SVS_D1>;
+ opp-peak-kBps = <2136719>;
+ };
+ };
+ };
+
+ gmu: gmu@3d6a000 {
+ compatible = "qcom,adreno-gmu-x185.1", "qcom,adreno-gmu";
+ reg = <0x0 0x03d6a000 0x0 0x35000>,
+ <0x0 0x03d50000 0x0 0x10000>,
+ <0x0 0x0b280000 0x0 0x10000>;
+ reg-names = "gmu", "rscc", "gmu_pdc";
+
+ interrupts = <GIC_SPI 304 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 305 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "hfi", "gmu";
+
+ clocks = <&gpucc GPU_CC_AHB_CLK>,
+ <&gpucc GPU_CC_CX_GMU_CLK>,
+ <&gpucc GPU_CC_CXO_CLK>,
+ <&gcc GCC_DDRSS_GPU_AXI_CLK>,
+ <&gcc GCC_GPU_MEMNOC_GFX_CLK>,
+ <&gpucc GPU_CC_HUB_CX_INT_CLK>,
+ <&gpucc GPU_CC_DEMET_CLK>;
+ clock-names = "ahb",
+ "gmu",
+ "cxo",
+ "axi",
+ "memnoc",
+ "hub",
+ "demet";
+
+ power-domains = <&gpucc GPU_CX_GDSC>,
+ <&gpucc GPU_GX_GDSC>;
+ power-domain-names = "cx",
+ "gx";
+
+ iommus = <&adreno_smmu 5 0x0>;
+
+ qcom,qmp = <&aoss_qmp>;
+
+ operating-points-v2 = <&gmu_opp_table>;
+
+ gmu_opp_table: opp-table {
+ compatible = "operating-points-v2";
+
+ opp-550000000 {
+ opp-hz = /bits/ 64 <550000000>;
+ opp-level = <RPMH_REGULATOR_LEVEL_SVS>;
+ };
+
+ opp-220000000 {
+ opp-hz = /bits/ 64 <220000000>;
+ opp-level = <RPMH_REGULATOR_LEVEL_LOW_SVS>;
+ };
+ };
+ };
+
+ gpucc: clock-controller@3d90000 {
+ compatible = "qcom,x1e80100-gpucc";
+ reg = <0 0x03d90000 0 0xa000>;
+ clocks = <&bi_tcxo_div2>,
+ <&gcc GCC_GPU_GPLL0_CPH_CLK_SRC>,
+ <&gcc GCC_GPU_GPLL0_DIV_CPH_CLK_SRC>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+ };
+
+ adreno_smmu: iommu@3da0000 {
+ compatible = "qcom,x1e80100-smmu-500", "qcom,adreno-smmu",
+ "qcom,smmu-500", "arm,mmu-500";
+ reg = <0x0 0x03da0000 0x0 0x40000>;
+ #iommu-cells = <2>;
+ #global-interrupts = <1>;
+ interrupts = <GIC_SPI 673 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 678 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 679 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 680 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 681 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 682 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 683 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 684 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 685 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 686 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 687 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 688 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 422 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 476 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 574 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 575 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 576 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 577 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 660 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 662 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 665 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 666 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 667 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 669 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 670 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 700 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gpucc GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK>,
+ <&gcc GCC_GPU_MEMNOC_GFX_CLK>,
+ <&gcc GCC_GPU_SNOC_DVM_GFX_CLK>,
+ <&gpucc GPU_CC_AHB_CLK>;
+ clock-names = "hlos",
+ "bus",
+ "iface",
+ "ahb";
+ power-domains = <&gpucc GPU_CX_GDSC>;
+ dma-coherent;
+ };
+
gem_noc: interconnect@26400000 {
compatible = "qcom,x1e80100-gem-noc";
reg = <0 0x26400000 0 0x311200>;
@@ -3445,8 +3786,23 @@
dma-coherent;
- port {
- usb_1_ss2_role_switch: endpoint {
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ usb_1_ss2_dwc3_hs: endpoint {
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ usb_1_ss2_dwc3_ss: endpoint {
+ remote-endpoint = <&usb_1_ss2_qmpphy_usb_ss_in>;
+ };
};
};
};
@@ -3514,8 +3870,15 @@
phy-names = "usb2-phy";
maximum-speed = "high-speed";
- port {
- usb_2_role_switch: endpoint {
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ usb_2_dwc3_hs: endpoint {
+ };
};
};
};
@@ -3590,8 +3953,23 @@
dma-coherent;
- port {
- usb_1_ss0_role_switch: endpoint {
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ usb_1_ss0_dwc3_hs: endpoint {
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ usb_1_ss0_dwc3_ss: endpoint {
+ remote-endpoint = <&usb_1_ss0_qmpphy_usb_ss_in>;
+ };
};
};
};
@@ -3673,8 +4051,23 @@
dma-coherent;
- port {
- usb_1_ss1_role_switch: endpoint {
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ usb_1_ss1_dwc3_hs: endpoint {
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ usb_1_ss1_dwc3_ss: endpoint {
+ remote-endpoint = <&usb_1_ss1_qmpphy_usb_ss_in>;
+ };
};
};
};
@@ -3860,6 +4253,7 @@
reg = <1>;
mdss_dp0_out: endpoint {
+ remote-endpoint = <&usb_1_ss0_qmpphy_dp_in>;
};
};
};
@@ -3942,6 +4336,7 @@
reg = <1>;
mdss_dp1_out: endpoint {
+ remote-endpoint = <&usb_1_ss1_qmpphy_dp_in>;
};
};
};
@@ -4021,6 +4416,10 @@
port@1 {
reg = <1>;
+
+ mdss_dp2_out: endpoint {
+ remote-endpoint = <&usb_1_ss2_qmpphy_dp_in>;
+ };
};
};
@@ -5155,6 +5554,129 @@
};
};
+ pmu@24091000 {
+ compatible = "qcom,x1e80100-llcc-bwmon", "qcom,sc7280-llcc-bwmon";
+ reg = <0 0x24091000 0 0x1000>;
+
+ interrupts = <GIC_SPI 81 IRQ_TYPE_LEVEL_HIGH>;
+
+ interconnects = <&mc_virt MASTER_LLCC QCOM_ICC_TAG_ACTIVE_ONLY
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ACTIVE_ONLY>;
+
+ operating-points-v2 = <&llcc_bwmon_opp_table>;
+
+ llcc_bwmon_opp_table: opp-table {
+ compatible = "operating-points-v2";
+
+ opp-0 {
+ opp-peak-kBps = <800000>;
+ };
+
+ opp-1 {
+ opp-peak-kBps = <2188000>;
+ };
+
+ opp-2 {
+ opp-peak-kBps = <3072000>;
+ };
+
+ opp-3 {
+ opp-peak-kBps = <6220800>;
+ };
+
+ opp-4 {
+ opp-peak-kBps = <6835200>;
+ };
+
+ opp-5 {
+ opp-peak-kBps = <8371200>;
+ };
+
+ opp-6 {
+ opp-peak-kBps = <10944000>;
+ };
+
+ opp-7 {
+ opp-peak-kBps = <12748800>;
+ };
+
+ opp-8 {
+ opp-peak-kBps = <14745600>;
+ };
+
+ opp-9 {
+ opp-peak-kBps = <16896000>;
+ };
+ };
+ };
+
+ /* cluster0 */
+ pmu@240b3400 {
+ compatible = "qcom,x1e80100-cpu-bwmon", "qcom,sdm845-bwmon";
+ reg = <0 0x240b3400 0 0x600>;
+
+ interrupts = <GIC_SPI 581 IRQ_TYPE_LEVEL_HIGH>;
+
+ interconnects = <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &gem_noc SLAVE_LLCC QCOM_ICC_TAG_ACTIVE_ONLY>;
+
+ operating-points-v2 = <&cpu_bwmon_opp_table>;
+
+ cpu_bwmon_opp_table: opp-table {
+ compatible = "operating-points-v2";
+
+ opp-0 {
+ opp-peak-kBps = <4800000>;
+ };
+
+ opp-1 {
+ opp-peak-kBps = <7464000>;
+ };
+
+ opp-2 {
+ opp-peak-kBps = <9600000>;
+ };
+
+ opp-3 {
+ opp-peak-kBps = <12896000>;
+ };
+
+ opp-4 {
+ opp-peak-kBps = <14928000>;
+ };
+
+ opp-5 {
+ opp-peak-kBps = <17064000>;
+ };
+ };
+ };
+
+ /* cluster2 */
+ pmu@240b5400 {
+ compatible = "qcom,x1e80100-cpu-bwmon", "qcom,sdm845-bwmon";
+ reg = <0 0x240b5400 0 0x600>;
+
+ interrupts = <GIC_SPI 581 IRQ_TYPE_LEVEL_HIGH>;
+
+ interconnects = <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &gem_noc SLAVE_LLCC QCOM_ICC_TAG_ACTIVE_ONLY>;
+
+ operating-points-v2 = <&cpu_bwmon_opp_table>;
+ };
+
+ /* cluster1 */
+ pmu@240b6400 {
+ compatible = "qcom,x1e80100-cpu-bwmon", "qcom,sdm845-bwmon";
+ reg = <0 0x240b6400 0 0x600>;
+
+ interrupts = <GIC_SPI 581 IRQ_TYPE_LEVEL_HIGH>;
+
+ interconnects = <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &gem_noc SLAVE_LLCC QCOM_ICC_TAG_ACTIVE_ONLY>;
+
+ operating-points-v2 = <&cpu_bwmon_opp_table>;
+ };
+
system-cache-controller@25000000 {
compatible = "qcom,x1e80100-llcc";
reg = <0 0x25000000 0 0x200000>,
@@ -5224,6 +5746,55 @@
label = "lpass";
qcom,remote-pid = <2>;
+ fastrpc {
+ compatible = "qcom,fastrpc";
+ qcom,glink-channels = "fastrpcglink-apps-dsp";
+ label = "adsp";
+ qcom,non-secure-domain;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ compute-cb@3 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <3>;
+ iommus = <&apps_smmu 0x1003 0x80>,
+ <&apps_smmu 0x1063 0x0>;
+ dma-coherent;
+ };
+
+ compute-cb@4 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <4>;
+ iommus = <&apps_smmu 0x1004 0x80>,
+ <&apps_smmu 0x1064 0x0>;
+ dma-coherent;
+ };
+
+ compute-cb@5 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <5>;
+ iommus = <&apps_smmu 0x1005 0x80>,
+ <&apps_smmu 0x1065 0x0>;
+ dma-coherent;
+ };
+
+ compute-cb@6 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <6>;
+ iommus = <&apps_smmu 0x1006 0x80>,
+ <&apps_smmu 0x1066 0x0>;
+ dma-coherent;
+ };
+
+ compute-cb@7 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <7>;
+ iommus = <&apps_smmu 0x1007 0x80>,
+ <&apps_smmu 0x1067 0x0>;
+ dma-coherent;
+ };
+ };
+
gpr {
compatible = "qcom,gpr";
qcom,glink-channels = "adsp_apps";
@@ -5313,6 +5884,101 @@
label = "cdsp";
qcom,remote-pid = <5>;
+
+ fastrpc {
+ compatible = "qcom,fastrpc";
+ qcom,glink-channels = "fastrpcglink-apps-dsp";
+ label = "cdsp";
+ qcom,non-secure-domain;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ compute-cb@1 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <1>;
+ iommus = <&apps_smmu 0x0c01 0x20>;
+ dma-coherent;
+ };
+
+ compute-cb@2 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <2>;
+ iommus = <&apps_smmu 0x0c02 0x20>;
+ dma-coherent;
+ };
+
+ compute-cb@3 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <3>;
+ iommus = <&apps_smmu 0x0c03 0x20>;
+ dma-coherent;
+ };
+
+ compute-cb@4 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <4>;
+ iommus = <&apps_smmu 0x0c04 0x20>;
+ dma-coherent;
+ };
+
+ compute-cb@5 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <5>;
+ iommus = <&apps_smmu 0x0c05 0x20>;
+ dma-coherent;
+ };
+
+ compute-cb@6 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <6>;
+ iommus = <&apps_smmu 0x0c06 0x20>;
+ dma-coherent;
+ };
+
+ compute-cb@7 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <7>;
+ iommus = <&apps_smmu 0x0c07 0x20>;
+ dma-coherent;
+ };
+
+ compute-cb@8 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <8>;
+ iommus = <&apps_smmu 0x0c08 0x20>;
+ dma-coherent;
+ };
+
+ /* note: compute-cb@9 is secure */
+
+ compute-cb@10 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <10>;
+ iommus = <&apps_smmu 0x0c0c 0x20>;
+ dma-coherent;
+ };
+
+ compute-cb@11 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <11>;
+ iommus = <&apps_smmu 0x0c0d 0x20>;
+ dma-coherent;
+ };
+
+ compute-cb@12 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <12>;
+ iommus = <&apps_smmu 0x0c0e 0x20>;
+ dma-coherent;
+ };
+
+ compute-cb@13 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <13>;
+ iommus = <&apps_smmu 0x0c0f 0x20>;
+ dma-coherent;
+ };
+ };
};
};
};
@@ -5325,4 +5991,1158 @@
<GIC_PPI 11 IRQ_TYPE_LEVEL_LOW>,
<GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>;
};
+
+ thermal-zones {
+ aoss0-thermal {
+ thermal-sensors = <&tsens0 0>;
+
+ trips {
+ trip-point0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+
+ aoss0-critical {
+ temperature = <125000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+ };
+
+ cpu0-0-top-thermal {
+ polling-delay-passive = <250>;
+
+ thermal-sensors = <&tsens0 1>;
+
+ trips {
+ trip-point0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ trip-point1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu-critical {
+ temperature = <110000>;
+ hysteresis = <1000>;
+ type = "critical";
+ };
+ };
+ };
+
+ cpu0-0-btm-thermal {
+ polling-delay-passive = <250>;
+
+ thermal-sensors = <&tsens0 2>;
+
+ trips {
+ trip-point0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ trip-point1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu-critical {
+ temperature = <110000>;
+ hysteresis = <1000>;
+ type = "critical";
+ };
+ };
+ };
+
+ cpu0-1-top-thermal {
+ polling-delay-passive = <250>;
+
+ thermal-sensors = <&tsens0 3>;
+
+ trips {
+ trip-point0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ trip-point1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu-critical {
+ temperature = <110000>;
+ hysteresis = <1000>;
+ type = "critical";
+ };
+ };
+ };
+
+ cpu0-1-btm-thermal {
+ polling-delay-passive = <250>;
+
+ thermal-sensors = <&tsens0 4>;
+
+ trips {
+ trip-point0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ trip-point1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu-critical {
+ temperature = <110000>;
+ hysteresis = <1000>;
+ type = "critical";
+ };
+ };
+ };
+
+ cpu0-2-top-thermal {
+ polling-delay-passive = <250>;
+
+ thermal-sensors = <&tsens0 5>;
+
+ trips {
+ trip-point0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ trip-point1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu-critical {
+ temperature = <110000>;
+ hysteresis = <1000>;
+ type = "critical";
+ };
+ };
+ };
+
+ cpu0-2-btm-thermal {
+ polling-delay-passive = <250>;
+
+ thermal-sensors = <&tsens0 6>;
+
+ trips {
+ trip-point0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ trip-point1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu-critical {
+ temperature = <110000>;
+ hysteresis = <1000>;
+ type = "critical";
+ };
+ };
+ };
+
+ cpu0-3-top-thermal {
+ polling-delay-passive = <250>;
+
+ thermal-sensors = <&tsens0 7>;
+
+ trips {
+ trip-point0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ trip-point1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu-critical {
+ temperature = <110000>;
+ hysteresis = <1000>;
+ type = "critical";
+ };
+ };
+ };
+
+ cpu0-3-btm-thermal {
+ polling-delay-passive = <250>;
+
+ thermal-sensors = <&tsens0 8>;
+
+ trips {
+ trip-point0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ trip-point1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu-critical {
+ temperature = <110000>;
+ hysteresis = <1000>;
+ type = "critical";
+ };
+ };
+ };
+
+ cpuss0-top-thermal {
+ thermal-sensors = <&tsens0 9>;
+
+ trips {
+ trip-point0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+
+ cpuss2-critical {
+ temperature = <125000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+ };
+
+ cpuss0-btm-thermal {
+ thermal-sensors = <&tsens0 10>;
+
+ trips {
+ trip-point0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+
+ cpuss2-critical {
+ temperature = <125000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+ };
+
+ mem-thermal {
+ thermal-sensors = <&tsens0 11>;
+
+ trips {
+ trip-point0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+
+ mem-critical {
+ temperature = <125000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+ };
+
+ video-thermal {
+ polling-delay-passive = <250>;
+
+ thermal-sensors = <&tsens0 12>;
+
+ trips {
+ trip-point0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ aoss1-thermal {
+ thermal-sensors = <&tsens1 0>;
+
+ trips {
+ trip-point0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+
+ aoss0-critical {
+ temperature = <125000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+ };
+
+ cpu1-0-top-thermal {
+ polling-delay-passive = <250>;
+
+ thermal-sensors = <&tsens1 1>;
+
+ trips {
+ trip-point0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ trip-point1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu-critical {
+ temperature = <110000>;
+ hysteresis = <1000>;
+ type = "critical";
+ };
+ };
+ };
+
+ cpu1-0-btm-thermal {
+ polling-delay-passive = <250>;
+
+ thermal-sensors = <&tsens1 2>;
+
+ trips {
+ trip-point0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ trip-point1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu-critical {
+ temperature = <110000>;
+ hysteresis = <1000>;
+ type = "critical";
+ };
+ };
+ };
+
+ cpu1-1-top-thermal {
+ polling-delay-passive = <250>;
+
+ thermal-sensors = <&tsens1 3>;
+
+ trips {
+ trip-point0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ trip-point1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu-critical {
+ temperature = <110000>;
+ hysteresis = <1000>;
+ type = "critical";
+ };
+ };
+ };
+
+ cpu1-1-btm-thermal {
+ polling-delay-passive = <250>;
+
+ thermal-sensors = <&tsens1 4>;
+
+ trips {
+ trip-point0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ trip-point1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu-critical {
+ temperature = <110000>;
+ hysteresis = <1000>;
+ type = "critical";
+ };
+ };
+ };
+
+ cpu1-2-top-thermal {
+ polling-delay-passive = <250>;
+
+ thermal-sensors = <&tsens1 5>;
+
+ trips {
+ trip-point0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ trip-point1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu-critical {
+ temperature = <110000>;
+ hysteresis = <1000>;
+ type = "critical";
+ };
+ };
+ };
+
+ cpu1-2-btm-thermal {
+ polling-delay-passive = <250>;
+
+ thermal-sensors = <&tsens1 6>;
+
+ trips {
+ trip-point0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ trip-point1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu-critical {
+ temperature = <110000>;
+ hysteresis = <1000>;
+ type = "critical";
+ };
+ };
+ };
+
+ cpu1-3-top-thermal {
+ polling-delay-passive = <250>;
+
+ thermal-sensors = <&tsens1 7>;
+
+ trips {
+ trip-point0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ trip-point1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu-critical {
+ temperature = <110000>;
+ hysteresis = <1000>;
+ type = "critical";
+ };
+ };
+ };
+
+ cpu1-3-btm-thermal {
+ polling-delay-passive = <250>;
+
+ thermal-sensors = <&tsens1 8>;
+
+ trips {
+ trip-point0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ trip-point1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu-critical {
+ temperature = <110000>;
+ hysteresis = <1000>;
+ type = "critical";
+ };
+ };
+ };
+
+ cpuss1-top-thermal {
+ thermal-sensors = <&tsens1 9>;
+
+ trips {
+ trip-point0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+
+ cpuss2-critical {
+ temperature = <125000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+ };
+
+ cpuss1-btm-thermal {
+ thermal-sensors = <&tsens1 10>;
+
+ trips {
+ trip-point0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+
+ cpuss2-critical {
+ temperature = <125000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+ };
+
+ aoss2-thermal {
+ thermal-sensors = <&tsens2 0>;
+
+ trips {
+ trip-point0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+
+ aoss0-critical {
+ temperature = <125000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+ };
+
+ cpu2-0-top-thermal {
+ polling-delay-passive = <250>;
+
+ thermal-sensors = <&tsens2 1>;
+
+ trips {
+ trip-point0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ trip-point1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu-critical {
+ temperature = <110000>;
+ hysteresis = <1000>;
+ type = "critical";
+ };
+ };
+ };
+
+ cpu2-0-btm-thermal {
+ polling-delay-passive = <250>;
+
+ thermal-sensors = <&tsens2 2>;
+
+ trips {
+ trip-point0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ trip-point1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu-critical {
+ temperature = <110000>;
+ hysteresis = <1000>;
+ type = "critical";
+ };
+ };
+ };
+
+ cpu2-1-top-thermal {
+ polling-delay-passive = <250>;
+
+ thermal-sensors = <&tsens2 3>;
+
+ trips {
+ trip-point0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ trip-point1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu-critical {
+ temperature = <110000>;
+ hysteresis = <1000>;
+ type = "critical";
+ };
+ };
+ };
+
+ cpu2-1-btm-thermal {
+ polling-delay-passive = <250>;
+
+ thermal-sensors = <&tsens2 4>;
+
+ trips {
+ trip-point0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ trip-point1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu-critical {
+ temperature = <110000>;
+ hysteresis = <1000>;
+ type = "critical";
+ };
+ };
+ };
+
+ cpu2-2-top-thermal {
+ polling-delay-passive = <250>;
+
+ thermal-sensors = <&tsens2 5>;
+
+ trips {
+ trip-point0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ trip-point1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu-critical {
+ temperature = <110000>;
+ hysteresis = <1000>;
+ type = "critical";
+ };
+ };
+ };
+
+ cpu2-2-btm-thermal {
+ polling-delay-passive = <250>;
+
+ thermal-sensors = <&tsens2 6>;
+
+ trips {
+ trip-point0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ trip-point1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu-critical {
+ temperature = <110000>;
+ hysteresis = <1000>;
+ type = "critical";
+ };
+ };
+ };
+
+ cpu2-3-top-thermal {
+ polling-delay-passive = <250>;
+
+ thermal-sensors = <&tsens2 7>;
+
+ trips {
+ trip-point0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ trip-point1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu-critical {
+ temperature = <110000>;
+ hysteresis = <1000>;
+ type = "critical";
+ };
+ };
+ };
+
+ cpu2-3-btm-thermal {
+ polling-delay-passive = <250>;
+
+ thermal-sensors = <&tsens2 8>;
+
+ trips {
+ trip-point0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ trip-point1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu-critical {
+ temperature = <110000>;
+ hysteresis = <1000>;
+ type = "critical";
+ };
+ };
+ };
+
+ cpuss2-top-thermal {
+ thermal-sensors = <&tsens2 9>;
+
+ trips {
+ trip-point0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+
+ cpuss2-critical {
+ temperature = <125000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+ };
+
+ cpuss2-btm-thermal {
+ thermal-sensors = <&tsens2 10>;
+
+ trips {
+ trip-point0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+
+ cpuss2-critical {
+ temperature = <125000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+ };
+
+ aoss3-thermal {
+ thermal-sensors = <&tsens3 0>;
+
+ trips {
+ trip-point0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+
+ aoss0-critical {
+ temperature = <125000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+ };
+
+ nsp0-thermal {
+ thermal-sensors = <&tsens3 1>;
+
+ trips {
+ trip-point0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+
+ nsp0-critical {
+ temperature = <125000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+ };
+
+ nsp1-thermal {
+ thermal-sensors = <&tsens3 2>;
+
+ trips {
+ trip-point0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+
+ nsp1-critical {
+ temperature = <125000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+ };
+
+ nsp2-thermal {
+ thermal-sensors = <&tsens3 3>;
+
+ trips {
+ trip-point0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+
+ nsp2-critical {
+ temperature = <125000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+ };
+
+ nsp3-thermal {
+ thermal-sensors = <&tsens3 4>;
+
+ trips {
+ trip-point0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+
+ nsp3-critical {
+ temperature = <125000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+ };
+
+ gpuss-0-thermal {
+ polling-delay-passive = <10>;
+
+ thermal-sensors = <&tsens3 5>;
+
+ trips {
+ trip-point0 {
+ temperature = <85000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+
+ trip-point1 {
+ temperature = <90000>;
+ hysteresis = <1000>;
+ type = "hot";
+ };
+
+ trip-point2 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "critical";
+ };
+ };
+ };
+
+ gpuss-1-thermal {
+ polling-delay-passive = <10>;
+
+ thermal-sensors = <&tsens3 6>;
+
+ trips {
+ trip-point0 {
+ temperature = <85000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+
+ trip-point1 {
+ temperature = <90000>;
+ hysteresis = <1000>;
+ type = "hot";
+ };
+
+ trip-point2 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "critical";
+ };
+ };
+ };
+
+ gpuss-2-thermal {
+ polling-delay-passive = <10>;
+
+ thermal-sensors = <&tsens3 7>;
+
+ trips {
+ trip-point0 {
+ temperature = <85000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+
+ trip-point1 {
+ temperature = <90000>;
+ hysteresis = <1000>;
+ type = "hot";
+ };
+
+ trip-point2 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "critical";
+ };
+ };
+ };
+
+ gpuss-3-thermal {
+ polling-delay-passive = <10>;
+
+ thermal-sensors = <&tsens3 8>;
+
+ trips {
+ trip-point0 {
+ temperature = <85000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+
+ trip-point1 {
+ temperature = <90000>;
+ hysteresis = <1000>;
+ type = "hot";
+ };
+
+ trip-point2 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "critical";
+ };
+ };
+ };
+
+ gpuss-4-thermal {
+ polling-delay-passive = <10>;
+
+ thermal-sensors = <&tsens3 9>;
+
+ trips {
+ trip-point0 {
+ temperature = <85000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+
+ trip-point1 {
+ temperature = <90000>;
+ hysteresis = <1000>;
+ type = "hot";
+ };
+
+ trip-point2 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "critical";
+ };
+ };
+ };
+
+ gpuss-5-thermal {
+ polling-delay-passive = <10>;
+
+ thermal-sensors = <&tsens3 10>;
+
+ trips {
+ trip-point0 {
+ temperature = <85000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+
+ trip-point1 {
+ temperature = <90000>;
+ hysteresis = <1000>;
+ type = "hot";
+ };
+
+ trip-point2 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "critical";
+ };
+ };
+ };
+
+ gpuss-6-thermal {
+ polling-delay-passive = <10>;
+
+ thermal-sensors = <&tsens3 11>;
+
+ trips {
+ trip-point0 {
+ temperature = <85000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+
+ trip-point1 {
+ temperature = <90000>;
+ hysteresis = <1000>;
+ type = "hot";
+ };
+
+ trip-point2 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "critical";
+ };
+ };
+ };
+
+ gpuss-7-thermal {
+ polling-delay-passive = <10>;
+
+ thermal-sensors = <&tsens3 12>;
+
+ trips {
+ trip-point0 {
+ temperature = <85000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+
+ trip-point1 {
+ temperature = <90000>;
+ hysteresis = <1000>;
+ type = "hot";
+ };
+
+ trip-point2 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "critical";
+ };
+ };
+ };
+
+ camera0-thermal {
+ thermal-sensors = <&tsens3 13>;
+
+ trips {
+ trip-point0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+
+ camera0-critical {
+ temperature = <115000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+ };
+
+ camera1-thermal {
+ thermal-sensors = <&tsens3 14>;
+
+ trips {
+ trip-point0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+
+ camera0-critical {
+ temperature = <115000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+ };
+ };
};
diff --git a/arch/arm64/boot/dts/renesas/condor-common.dtsi b/arch/arm64/boot/dts/renesas/condor-common.dtsi
index 7c34d14dcd7e..8b7c0c34eadc 100644
--- a/arch/arm64/boot/dts/renesas/condor-common.dtsi
+++ b/arch/arm64/boot/dts/renesas/condor-common.dtsi
@@ -227,6 +227,12 @@
};
};
};
+
+ eeprom@50 {
+ compatible = "rohm,br24t01", "atmel,24c01";
+ reg = <0x50>;
+ pagesize = <8>;
+ };
};
&i2c1 {
diff --git a/arch/arm64/boot/dts/renesas/r8a774a1.dtsi b/arch/arm64/boot/dts/renesas/r8a774a1.dtsi
index a8a44fe5e83b..1dbf9d56c68d 100644
--- a/arch/arm64/boot/dts/renesas/r8a774a1.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a774a1.dtsi
@@ -2853,6 +2853,7 @@
<&gic GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(6) | IRQ_TYPE_LEVEL_LOW)>,
<&gic GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(6) | IRQ_TYPE_LEVEL_LOW)>,
<&gic GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(6) | IRQ_TYPE_LEVEL_LOW)>;
+ interrupt-names = "sec-phys", "phys", "virt", "hyp-phys";
};
/* External USB clocks - can be overridden by the board */
diff --git a/arch/arm64/boot/dts/renesas/r8a774b1.dtsi b/arch/arm64/boot/dts/renesas/r8a774b1.dtsi
index 4fff511e994c..10f22c52e79e 100644
--- a/arch/arm64/boot/dts/renesas/r8a774b1.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a774b1.dtsi
@@ -2704,6 +2704,7 @@
<&gic GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
<&gic GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
<&gic GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>;
+ interrupt-names = "sec-phys", "phys", "virt", "hyp-phys";
};
/* External USB clocks - can be overridden by the board */
diff --git a/arch/arm64/boot/dts/renesas/r8a774c0.dtsi b/arch/arm64/boot/dts/renesas/r8a774c0.dtsi
index 1ef43d78c3a5..3e2af50ce7c6 100644
--- a/arch/arm64/boot/dts/renesas/r8a774c0.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a774c0.dtsi
@@ -1990,6 +1990,7 @@
<&gic GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
<&gic GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
<&gic GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>;
+ interrupt-names = "sec-phys", "phys", "virt", "hyp-phys";
};
/* External USB clocks - can be overridden by the board */
diff --git a/arch/arm64/boot/dts/renesas/r8a774e1.dtsi b/arch/arm64/boot/dts/renesas/r8a774e1.dtsi
index be55ae83944c..1eeb4c7b4c4b 100644
--- a/arch/arm64/boot/dts/renesas/r8a774e1.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a774e1.dtsi
@@ -2985,6 +2985,7 @@
<&gic GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
<&gic GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
<&gic GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>;
+ interrupt-names = "sec-phys", "phys", "virt", "hyp-phys";
};
/* External USB clocks - can be overridden by the board */
diff --git a/arch/arm64/boot/dts/renesas/r8a77951.dtsi b/arch/arm64/boot/dts/renesas/r8a77951.dtsi
index bea4edd17d53..96f3b5fe7e92 100644
--- a/arch/arm64/boot/dts/renesas/r8a77951.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77951.dtsi
@@ -3473,6 +3473,7 @@
<&gic GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
<&gic GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
<&gic GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>;
+ interrupt-names = "sec-phys", "phys", "virt", "hyp-phys";
};
/* External USB clocks - can be overridden by the board */
diff --git a/arch/arm64/boot/dts/renesas/r8a77960.dtsi b/arch/arm64/boot/dts/renesas/r8a77960.dtsi
index 7846fea8e40d..1122c470b72f 100644
--- a/arch/arm64/boot/dts/renesas/r8a77960.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77960.dtsi
@@ -3068,6 +3068,7 @@
<&gic GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(6) | IRQ_TYPE_LEVEL_LOW)>,
<&gic GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(6) | IRQ_TYPE_LEVEL_LOW)>,
<&gic GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(6) | IRQ_TYPE_LEVEL_LOW)>;
+ interrupt-names = "sec-phys", "phys", "virt", "hyp-phys";
};
/* External USB clocks - can be overridden by the board */
diff --git a/arch/arm64/boot/dts/renesas/r8a77961.dtsi b/arch/arm64/boot/dts/renesas/r8a77961.dtsi
index 58f9286a5ab5..bf1130af7de3 100644
--- a/arch/arm64/boot/dts/renesas/r8a77961.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77961.dtsi
@@ -2889,6 +2889,7 @@
<&gic GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(6) | IRQ_TYPE_LEVEL_LOW)>,
<&gic GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(6) | IRQ_TYPE_LEVEL_LOW)>,
<&gic GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(6) | IRQ_TYPE_LEVEL_LOW)>;
+ interrupt-names = "sec-phys", "phys", "virt", "hyp-phys";
};
/* External USB clocks - can be overridden by the board */
diff --git a/arch/arm64/boot/dts/renesas/r8a77965.dtsi b/arch/arm64/boot/dts/renesas/r8a77965.dtsi
index 692940662d38..f02d1547b881 100644
--- a/arch/arm64/boot/dts/renesas/r8a77965.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77965.dtsi
@@ -2877,6 +2877,7 @@
<&gic GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
<&gic GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
<&gic GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>;
+ interrupt-names = "sec-phys", "phys", "virt", "hyp-phys";
};
/* External USB clocks - can be overridden by the board */
diff --git a/arch/arm64/boot/dts/renesas/r8a77970.dtsi b/arch/arm64/boot/dts/renesas/r8a77970.dtsi
index d2d3cecc76d5..64fb95b1c89a 100644
--- a/arch/arm64/boot/dts/renesas/r8a77970.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77970.dtsi
@@ -1223,5 +1223,6 @@
<&gic GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
<&gic GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
<&gic GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>;
+ interrupt-names = "sec-phys", "phys", "virt", "hyp-phys";
};
};
diff --git a/arch/arm64/boot/dts/renesas/r8a77980-condor.dts b/arch/arm64/boot/dts/renesas/r8a77980-condor.dts
index 68d1f1d53b3a..1d326552e2fa 100644
--- a/arch/arm64/boot/dts/renesas/r8a77980-condor.dts
+++ b/arch/arm64/boot/dts/renesas/r8a77980-condor.dts
@@ -14,11 +14,3 @@
model = "Renesas Condor board based on r8a77980";
compatible = "renesas,condor", "renesas,r8a77980";
};
-
-&i2c0 {
- eeprom@50 {
- compatible = "rohm,br24t01", "atmel,24c01";
- reg = <0x50>;
- pagesize = <8>;
- };
-};
diff --git a/arch/arm64/boot/dts/renesas/r8a77980.dtsi b/arch/arm64/boot/dts/renesas/r8a77980.dtsi
index c0ba110c74d6..0c2b157036e7 100644
--- a/arch/arm64/boot/dts/renesas/r8a77980.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77980.dtsi
@@ -1630,5 +1630,6 @@
IRQ_TYPE_LEVEL_LOW)>,
<&gic GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) |
IRQ_TYPE_LEVEL_LOW)>;
+ interrupt-names = "sec-phys", "phys", "virt", "hyp-phys";
};
};
diff --git a/arch/arm64/boot/dts/renesas/r8a77990.dtsi b/arch/arm64/boot/dts/renesas/r8a77990.dtsi
index 37063e3f4e1b..233af3081e84 100644
--- a/arch/arm64/boot/dts/renesas/r8a77990.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77990.dtsi
@@ -2157,5 +2157,6 @@
<&gic GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
<&gic GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
<&gic GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>;
+ interrupt-names = "sec-phys", "phys", "virt", "hyp-phys";
};
};
diff --git a/arch/arm64/boot/dts/renesas/r8a77995.dtsi b/arch/arm64/boot/dts/renesas/r8a77995.dtsi
index 89990dd8ebf7..5f0828a4675b 100644
--- a/arch/arm64/boot/dts/renesas/r8a77995.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77995.dtsi
@@ -1476,5 +1476,6 @@
<&gic GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_LOW)>,
<&gic GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_LOW)>,
<&gic GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_LOW)>;
+ interrupt-names = "sec-phys", "phys", "virt", "hyp-phys";
};
};
diff --git a/arch/arm64/boot/dts/renesas/r8a779a0.dtsi b/arch/arm64/boot/dts/renesas/r8a779a0.dtsi
index cfa70b441e32..d76347001cc1 100644
--- a/arch/arm64/boot/dts/renesas/r8a779a0.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a779a0.dtsi
@@ -2919,6 +2919,9 @@
interrupts-extended = <&gic GIC_PPI 13 IRQ_TYPE_LEVEL_LOW>,
<&gic GIC_PPI 14 IRQ_TYPE_LEVEL_LOW>,
<&gic GIC_PPI 11 IRQ_TYPE_LEVEL_LOW>,
- <&gic GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>;
+ <&gic GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>,
+ <&gic GIC_PPI 12 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-names = "sec-phys", "phys", "virt", "hyp-phys",
+ "hyp-virt";
};
};
diff --git a/arch/arm64/boot/dts/renesas/r8a779f0-spider-cpu.dtsi b/arch/arm64/boot/dts/renesas/r8a779f0-spider-cpu.dtsi
index 477f3114d2fd..4ed8d4c37906 100644
--- a/arch/arm64/boot/dts/renesas/r8a779f0-spider-cpu.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a779f0-spider-cpu.dtsi
@@ -15,6 +15,12 @@
compatible = "renesas,spider-cpu", "renesas,r8a779f0";
aliases {
+ i2c0 = &i2c0;
+ i2c1 = &i2c1;
+ i2c2 = &i2c2;
+ i2c3 = &i2c3;
+ i2c4 = &i2c4;
+ i2c5 = &i2c5;
serial0 = &hscif0;
serial1 = &scif0;
};
diff --git a/arch/arm64/boot/dts/renesas/r8a779f0.dtsi b/arch/arm64/boot/dts/renesas/r8a779f0.dtsi
index 72cf30341fc4..9629adb47d99 100644
--- a/arch/arm64/boot/dts/renesas/r8a779f0.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a779f0.dtsi
@@ -1324,7 +1324,10 @@
interrupts-extended = <&gic GIC_PPI 13 IRQ_TYPE_LEVEL_LOW>,
<&gic GIC_PPI 14 IRQ_TYPE_LEVEL_LOW>,
<&gic GIC_PPI 11 IRQ_TYPE_LEVEL_LOW>,
- <&gic GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>;
+ <&gic GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>,
+ <&gic GIC_PPI 12 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-names = "sec-phys", "phys", "virt", "hyp-phys",
+ "hyp-virt";
};
ufs30_clk: ufs30-clk {
diff --git a/arch/arm64/boot/dts/renesas/r8a779f4-s4sk.dts b/arch/arm64/boot/dts/renesas/r8a779f4-s4sk.dts
index bc65a7b4d999..fa910b85859e 100644
--- a/arch/arm64/boot/dts/renesas/r8a779f4-s4sk.dts
+++ b/arch/arm64/boot/dts/renesas/r8a779f4-s4sk.dts
@@ -14,6 +14,12 @@
compatible = "renesas,s4sk", "renesas,r8a779f4", "renesas,r8a779f0";
aliases {
+ i2c0 = &i2c0;
+ i2c1 = &i2c1;
+ i2c2 = &i2c2;
+ i2c3 = &i2c3;
+ i2c4 = &i2c4;
+ i2c5 = &i2c5;
serial0 = &hscif0;
serial1 = &hscif1;
ethernet0 = &rswitch;
diff --git a/arch/arm64/boot/dts/renesas/r8a779g0-white-hawk-ard-audio-da7212.dtso b/arch/arm64/boot/dts/renesas/r8a779g0-white-hawk-ard-audio-da7212.dtso
index e6f53377ecd9..e6cf304c77ee 100644
--- a/arch/arm64/boot/dts/renesas/r8a779g0-white-hawk-ard-audio-da7212.dtso
+++ b/arch/arm64/boot/dts/renesas/r8a779g0-white-hawk-ard-audio-da7212.dtso
@@ -155,11 +155,7 @@
pinctrl-0 = <&sound_clk_pins>, <&sound_pins>;
pinctrl-names = "default";
- /* Single DAI */
- #sound-dai-cells = <0>;
-
/* audio_clkout */
- #clock-cells = <0>;
clock-frequency = <5644800>; /* 44.1kHz groups [(C) clock] */
// clock-frequency = <6144000>; /* 48 kHz groups [(C) clock] */
diff --git a/arch/arm64/boot/dts/renesas/r8a779g0.dtsi b/arch/arm64/boot/dts/renesas/r8a779g0.dtsi
index 9bc542bc6169..53d1d4d8197a 100644
--- a/arch/arm64/boot/dts/renesas/r8a779g0.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a779g0.dtsi
@@ -815,8 +815,6 @@
phy-mode = "rgmii";
rx-internal-delay-ps = <0>;
tx-internal-delay-ps = <0>;
- #address-cells = <1>;
- #size-cells = <0>;
status = "disabled";
};
@@ -862,8 +860,6 @@
phy-mode = "rgmii";
rx-internal-delay-ps = <0>;
tx-internal-delay-ps = <0>;
- #address-cells = <1>;
- #size-cells = <0>;
status = "disabled";
};
@@ -909,8 +905,6 @@
phy-mode = "rgmii";
rx-internal-delay-ps = <0>;
tx-internal-delay-ps = <0>;
- #address-cells = <1>;
- #size-cells = <0>;
status = "disabled";
};
@@ -1724,18 +1718,6 @@
};
rcar_sound: sound@ec5a0000 {
- /*
- * #sound-dai-cells is required
- *
- * Single DAI : #sound-dai-cells = <0>; <&rcar_sound>;
- * Multi DAI : #sound-dai-cells = <1>; <&rcar_sound N>;
- */
- /*
- * #clock-cells is required
- *
- * clkout : #clock-cells = <0>; <&rcar_sound>;
- * audio_clkout0/1/2/3 : #clock-cells = <1>; <&rcar_sound N>;
- */
compatible = "renesas,rcar_sound-r8a779g0", "renesas,rcar_sound-gen4";
reg = <0 0xec5a0000 0 0x020>,
<0 0xec540000 0 0x1000>,
@@ -1745,6 +1727,11 @@
clocks = <&cpg CPG_MOD 2926>, <&cpg CPG_MOD 2927>, <&audio_clkin>;
clock-names = "ssiu.0", "ssi.0", "clkin";
+ /* #clock-cells is fixed */
+ #clock-cells = <0>;
+ /* #sound-dai-cells is fixed */
+ #sound-dai-cells = <0>;
+
power-domains = <&sysc R8A779G0_PD_ALWAYS_ON>;
resets = <&cpg 2926>, <&cpg 2927>;
reset-names = "ssiu.0", "ssi.0";
@@ -2359,6 +2346,9 @@
interrupts-extended = <&gic GIC_PPI 13 IRQ_TYPE_LEVEL_LOW>,
<&gic GIC_PPI 14 IRQ_TYPE_LEVEL_LOW>,
<&gic GIC_PPI 11 IRQ_TYPE_LEVEL_LOW>,
- <&gic GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>;
+ <&gic GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>,
+ <&gic GIC_PPI 12 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-names = "sec-phys", "phys", "virt", "hyp-phys",
+ "hyp-virt";
};
};
diff --git a/arch/arm64/boot/dts/renesas/r8a779h0-gray-hawk-single.dts b/arch/arm64/boot/dts/renesas/r8a779h0-gray-hawk-single.dts
index cfbe8c8680cd..2b9a19bb1c5d 100644
--- a/arch/arm64/boot/dts/renesas/r8a779h0-gray-hawk-single.dts
+++ b/arch/arm64/boot/dts/renesas/r8a779h0-gray-hawk-single.dts
@@ -17,6 +17,10 @@
compatible = "renesas,gray-hawk-single", "renesas,r8a779h0";
aliases {
+ i2c0 = &i2c0;
+ i2c1 = &i2c1;
+ i2c2 = &i2c2;
+ i2c3 = &i2c3;
serial0 = &hscif0;
serial1 = &hscif2;
ethernet0 = &avb0;
diff --git a/arch/arm64/boot/dts/renesas/r8a779h0.dtsi b/arch/arm64/boot/dts/renesas/r8a779h0.dtsi
index 6d791024cabe..a03ab2b6a859 100644
--- a/arch/arm64/boot/dts/renesas/r8a779h0.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a779h0.dtsi
@@ -14,9 +14,15 @@
#address-cells = <2>;
#size-cells = <2>;
+ /* External Audio clock - to be overridden by boards that provide it */
+ audio_clkin: audio_clkin {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <0>;
+ };
+
cluster0_opp: opp-table-0 {
compatible = "operating-points-v2";
- opp-shared;
opp-500000000 {
opp-hz = /bits/ 64 <500000000>;
@@ -939,6 +945,454 @@
status = "disabled";
};
+ vin00: video@e6ef0000 {
+ compatible = "renesas,vin-r8a779h0";
+ reg = <0 0xe6ef0000 0 0x1000>;
+ interrupts = <GIC_SPI 529 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 730>;
+ power-domains = <&sysc R8A779H0_PD_C4>;
+ resets = <&cpg 730>;
+ renesas,id = <0>;
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ reg = <2>;
+
+ vin00isp0: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&isp0vin00>;
+ };
+ };
+ };
+ };
+
+ vin01: video@e6ef1000 {
+ compatible = "renesas,vin-r8a779h0";
+ reg = <0 0xe6ef1000 0 0x1000>;
+ interrupts = <GIC_SPI 530 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 731>;
+ power-domains = <&sysc R8A779H0_PD_C4>;
+ resets = <&cpg 731>;
+ renesas,id = <1>;
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ reg = <2>;
+
+ vin01isp0: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&isp0vin01>;
+ };
+ };
+ };
+ };
+
+ vin02: video@e6ef2000 {
+ compatible = "renesas,vin-r8a779h0";
+ reg = <0 0xe6ef2000 0 0x1000>;
+ interrupts = <GIC_SPI 531 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 800>;
+ power-domains = <&sysc R8A779H0_PD_C4>;
+ resets = <&cpg 800>;
+ renesas,id = <2>;
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ reg = <2>;
+
+ vin02isp0: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&isp0vin02>;
+ };
+ };
+ };
+ };
+
+ vin03: video@e6ef3000 {
+ compatible = "renesas,vin-r8a779h0";
+ reg = <0 0xe6ef3000 0 0x1000>;
+ interrupts = <GIC_SPI 532 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 801>;
+ power-domains = <&sysc R8A779H0_PD_C4>;
+ resets = <&cpg 801>;
+ renesas,id = <3>;
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ reg = <2>;
+
+ vin03isp0: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&isp0vin03>;
+ };
+ };
+ };
+ };
+
+ vin04: video@e6ef4000 {
+ compatible = "renesas,vin-r8a779h0";
+ reg = <0 0xe6ef4000 0 0x1000>;
+ interrupts = <GIC_SPI 533 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 802>;
+ power-domains = <&sysc R8A779H0_PD_C4>;
+ resets = <&cpg 802>;
+ renesas,id = <4>;
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ reg = <2>;
+
+ vin04isp0: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&isp0vin04>;
+ };
+ };
+ };
+ };
+
+ vin05: video@e6ef5000 {
+ compatible = "renesas,vin-r8a779h0";
+ reg = <0 0xe6ef5000 0 0x1000>;
+ interrupts = <GIC_SPI 534 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 803>;
+ power-domains = <&sysc R8A779H0_PD_C4>;
+ resets = <&cpg 803>;
+ renesas,id = <5>;
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ reg = <2>;
+
+ vin05isp0: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&isp0vin05>;
+ };
+ };
+ };
+ };
+
+ vin06: video@e6ef6000 {
+ compatible = "renesas,vin-r8a779h0";
+ reg = <0 0xe6ef6000 0 0x1000>;
+ interrupts = <GIC_SPI 535 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 804>;
+ power-domains = <&sysc R8A779H0_PD_C4>;
+ resets = <&cpg 804>;
+ renesas,id = <6>;
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ reg = <2>;
+
+ vin06isp0: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&isp0vin06>;
+ };
+ };
+ };
+ };
+
+ vin07: video@e6ef7000 {
+ compatible = "renesas,vin-r8a779h0";
+ reg = <0 0xe6ef7000 0 0x1000>;
+ interrupts = <GIC_SPI 536 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 805>;
+ power-domains = <&sysc R8A779H0_PD_C4>;
+ resets = <&cpg 805>;
+ renesas,id = <7>;
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ reg = <2>;
+
+ vin07isp0: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&isp0vin07>;
+ };
+ };
+ };
+ };
+
+ vin08: video@e6ef8000 {
+ compatible = "renesas,vin-r8a779h0";
+ reg = <0 0xe6ef8000 0 0x1000>;
+ interrupts = <GIC_SPI 537 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 806>;
+ power-domains = <&sysc R8A779H0_PD_C4>;
+ resets = <&cpg 806>;
+ renesas,id = <8>;
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ reg = <2>;
+
+ vin08isp1: endpoint@1 {
+ reg = <1>;
+ remote-endpoint = <&isp1vin08>;
+ };
+ };
+ };
+ };
+
+ vin09: video@e6ef9000 {
+ compatible = "renesas,vin-r8a779h0";
+ reg = <0 0xe6ef9000 0 0x1000>;
+ interrupts = <GIC_SPI 538 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 807>;
+ power-domains = <&sysc R8A779H0_PD_C4>;
+ resets = <&cpg 807>;
+ renesas,id = <9>;
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ reg = <2>;
+
+ vin09isp1: endpoint@1 {
+ reg = <1>;
+ remote-endpoint = <&isp1vin09>;
+ };
+ };
+ };
+ };
+
+ vin10: video@e6efa000 {
+ compatible = "renesas,vin-r8a779h0";
+ reg = <0 0xe6efa000 0 0x1000>;
+ interrupts = <GIC_SPI 539 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 808>;
+ power-domains = <&sysc R8A779H0_PD_C4>;
+ resets = <&cpg 808>;
+ renesas,id = <10>;
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ reg = <2>;
+
+ vin10isp1: endpoint@1 {
+ reg = <1>;
+ remote-endpoint = <&isp1vin10>;
+ };
+ };
+ };
+ };
+
+ vin11: video@e6efb000 {
+ compatible = "renesas,vin-r8a779h0";
+ reg = <0 0xe6efb000 0 0x1000>;
+ interrupts = <GIC_SPI 540 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 809>;
+ power-domains = <&sysc R8A779H0_PD_C4>;
+ resets = <&cpg 809>;
+ renesas,id = <11>;
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ reg = <2>;
+
+ vin11isp1: endpoint@1 {
+ reg = <1>;
+ remote-endpoint = <&isp1vin11>;
+ };
+ };
+ };
+ };
+
+ vin12: video@e6efc000 {
+ compatible = "renesas,vin-r8a779h0";
+ reg = <0 0xe6efc000 0 0x1000>;
+ interrupts = <GIC_SPI 541 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 810>;
+ power-domains = <&sysc R8A779H0_PD_C4>;
+ resets = <&cpg 810>;
+ renesas,id = <12>;
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ reg = <2>;
+
+ vin12isp1: endpoint@1 {
+ reg = <1>;
+ remote-endpoint = <&isp1vin12>;
+ };
+ };
+ };
+ };
+
+ vin13: video@e6efd000 {
+ compatible = "renesas,vin-r8a779h0";
+ reg = <0 0xe6efd000 0 0x1000>;
+ interrupts = <GIC_SPI 542 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 811>;
+ power-domains = <&sysc R8A779H0_PD_C4>;
+ resets = <&cpg 811>;
+ renesas,id = <13>;
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ reg = <2>;
+
+ vin13isp1: endpoint@1 {
+ reg = <1>;
+ remote-endpoint = <&isp1vin13>;
+ };
+ };
+ };
+ };
+
+ vin14: video@e6efe000 {
+ compatible = "renesas,vin-r8a779h0";
+ reg = <0 0xe6efe000 0 0x1000>;
+ interrupts = <GIC_SPI 543 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 812>;
+ power-domains = <&sysc R8A779H0_PD_C4>;
+ resets = <&cpg 812>;
+ renesas,id = <14>;
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ reg = <2>;
+
+ vin14isp1: endpoint@1 {
+ reg = <1>;
+ remote-endpoint = <&isp1vin14>;
+ };
+ };
+ };
+ };
+
+ vin15: video@e6eff000 {
+ compatible = "renesas,vin-r8a779h0";
+ reg = <0 0xe6eff000 0 0x1000>;
+ interrupts = <GIC_SPI 544 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 813>;
+ power-domains = <&sysc R8A779H0_PD_C4>;
+ resets = <&cpg 813>;
+ renesas,id = <15>;
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ reg = <2>;
+
+ vin15isp1: endpoint@1 {
+ reg = <1>;
+ remote-endpoint = <&isp1vin15>;
+ };
+ };
+ };
+ };
+
dmac1: dma-controller@e7350000 {
compatible = "renesas,dmac-r8a779h0",
"renesas,rcar-gen4-dmac";
@@ -1011,6 +1465,67 @@
<&ipmmu_ds0 22>, <&ipmmu_ds0 23>;
};
+ rcar_sound: sound@ec400000 {
+ compatible = "renesas,rcar_sound-r8a779h0", "renesas,rcar_sound-gen4";
+ reg = <0 0xec400000 0 0x40000>,
+ <0 0xec540000 0 0x1000>,
+ <0 0xec541000 0 0x050>,
+ <0 0xec5a0000 0 0x020>;
+ reg-names = "sdmc", "ssiu", "ssi", "adg";
+ clocks = <&cpg CPG_MOD 2926>, <&cpg CPG_MOD 2927>, <&audio_clkin>;
+ clock-names = "ssiu.0", "ssi.0", "clkin";
+ /* #clock-cells is fixed */
+ #clock-cells = <0>;
+ /* #sound-dai-cells is fixed */
+ #sound-dai-cells = <0>;
+
+ power-domains = <&sysc R8A779H0_PD_ALWAYS_ON>;
+ resets = <&cpg 2926>, <&cpg 2927>;
+ reset-names = "ssiu.0", "ssi.0";
+ status = "disabled";
+
+ rcar_sound,ssiu {
+ ssiu00: ssiu-0 {
+ dmas = <&dmac1 0x6e>, <&dmac1 0x6f>;
+ dma-names = "tx", "rx";
+ };
+ ssiu01: ssiu-1 {
+ dmas = <&dmac1 0x6c>, <&dmac1 0x6d>;
+ dma-names = "tx", "rx";
+ };
+ ssiu02: ssiu-2 {
+ dmas = <&dmac1 0x6a>, <&dmac1 0x6b>;
+ dma-names = "tx", "rx";
+ };
+ ssiu03: ssiu-3 {
+ dmas = <&dmac1 0x68>, <&dmac1 0x69>;
+ dma-names = "tx", "rx";
+ };
+ ssiu04: ssiu-4 {
+ dmas = <&dmac1 0x66>, <&dmac1 0x67>;
+ dma-names = "tx", "rx";
+ };
+ ssiu05: ssiu-5 {
+ dmas = <&dmac1 0x64>, <&dmac1 0x65>;
+ dma-names = "tx", "rx";
+ };
+ ssiu06: ssiu-6 {
+ dmas = <&dmac1 0x62>, <&dmac1 0x63>;
+ dma-names = "tx", "rx";
+ };
+ ssiu07: ssiu-7 {
+ dmas = <&dmac1 0x60>, <&dmac1 0x61>;
+ dma-names = "tx", "rx";
+ };
+ };
+
+ rcar_sound,ssi {
+ ssi0: ssi-0 {
+ interrupts = <GIC_SPI 258 IRQ_TYPE_LEVEL_HIGH>;
+ };
+ };
+ };
+
mmc0: mmc@ee140000 {
compatible = "renesas,sdhi-r8a779h0",
"renesas,rcar-gen4-sdhi";
@@ -1152,6 +1667,224 @@
interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
};
+ csi40: csi2@fe500000 {
+ compatible = "renesas,r8a779h0-csi2";
+ reg = <0 0xfe500000 0 0x40000>;
+ interrupts = <GIC_SPI 499 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 331>;
+ power-domains = <&sysc R8A779H0_PD_C4>;
+ resets = <&cpg 331>;
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ };
+
+ port@1 {
+ reg = <1>;
+ csi40isp0: endpoint {
+ remote-endpoint = <&isp0csi40>;
+ };
+ };
+ };
+ };
+
+ csi41: csi2@fe540000 {
+ compatible = "renesas,r8a779h0-csi2";
+ reg = <0 0xfe540000 0 0x40000>;
+ interrupts = <GIC_SPI 500 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 400>;
+ power-domains = <&sysc R8A779H0_PD_C4>;
+ resets = <&cpg 400>;
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ };
+
+ port@1 {
+ reg = <1>;
+ csi41isp1: endpoint {
+ remote-endpoint = <&isp1csi41>;
+ };
+ };
+ };
+ };
+
+ isp0: isp@fed00000 {
+ compatible = "renesas,r8a779h0-isp";
+ reg = <0 0xfed00000 0 0x10000>;
+ interrupts = <GIC_SPI 473 IRQ_TYPE_LEVEL_LOW>;
+ clocks = <&cpg CPG_MOD 612>;
+ power-domains = <&sysc R8A779H0_PD_A3ISP0>;
+ resets = <&cpg 612>;
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ reg = <0>;
+
+ isp0csi40: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&csi40isp0>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ isp0vin00: endpoint {
+ remote-endpoint = <&vin00isp0>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+ isp0vin01: endpoint {
+ remote-endpoint = <&vin01isp0>;
+ };
+ };
+
+ port@3 {
+ reg = <3>;
+ isp0vin02: endpoint {
+ remote-endpoint = <&vin02isp0>;
+ };
+ };
+
+ port@4 {
+ reg = <4>;
+ isp0vin03: endpoint {
+ remote-endpoint = <&vin03isp0>;
+ };
+ };
+
+ port@5 {
+ reg = <5>;
+ isp0vin04: endpoint {
+ remote-endpoint = <&vin04isp0>;
+ };
+ };
+
+ port@6 {
+ reg = <6>;
+ isp0vin05: endpoint {
+ remote-endpoint = <&vin05isp0>;
+ };
+ };
+
+ port@7 {
+ reg = <7>;
+ isp0vin06: endpoint {
+ remote-endpoint = <&vin06isp0>;
+ };
+ };
+
+ port@8 {
+ reg = <8>;
+ isp0vin07: endpoint {
+ remote-endpoint = <&vin07isp0>;
+ };
+ };
+ };
+ };
+
+ isp1: isp@fed20000 {
+ compatible = "renesas,r8a779h0-isp";
+ reg = <0 0xfed20000 0 0x10000>;
+ interrupts = <GIC_SPI 474 IRQ_TYPE_LEVEL_LOW>;
+ clocks = <&cpg CPG_MOD 613>;
+ power-domains = <&sysc R8A779H0_PD_A3ISP0>;
+ resets = <&cpg 613>;
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ reg = <0>;
+
+ isp1csi41: endpoint@1 {
+ reg = <1>;
+ remote-endpoint = <&csi41isp1>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ isp1vin08: endpoint {
+ remote-endpoint = <&vin08isp1>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+ isp1vin09: endpoint {
+ remote-endpoint = <&vin09isp1>;
+ };
+ };
+
+ port@3 {
+ reg = <3>;
+ isp1vin10: endpoint {
+ remote-endpoint = <&vin10isp1>;
+ };
+ };
+
+ port@4 {
+ reg = <4>;
+ isp1vin11: endpoint {
+ remote-endpoint = <&vin11isp1>;
+ };
+ };
+
+ port@5 {
+ reg = <5>;
+ isp1vin12: endpoint {
+ remote-endpoint = <&vin12isp1>;
+ };
+ };
+
+ port@6 {
+ reg = <6>;
+ isp1vin13: endpoint {
+ remote-endpoint = <&vin13isp1>;
+ };
+ };
+
+ port@7 {
+ reg = <7>;
+ isp1vin14: endpoint {
+ remote-endpoint = <&vin14isp1>;
+ };
+ };
+
+ port@8 {
+ reg = <8>;
+ isp1vin15: endpoint {
+ remote-endpoint = <&vin15isp1>;
+ };
+ };
+ };
+ };
+
prr: chipid@fff00044 {
compatible = "renesas,prr";
reg = <0 0xfff00044 0 4>;
@@ -1195,5 +1928,7 @@
<&gic GIC_PPI 11 IRQ_TYPE_LEVEL_LOW>,
<&gic GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>,
<&gic GIC_PPI 12 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-names = "sec-phys", "phys", "virt", "hyp-phys",
+ "hyp-virt";
};
};
diff --git a/arch/arm64/boot/dts/renesas/r9a07g043.dtsi b/arch/arm64/boot/dts/renesas/r9a07g043.dtsi
index 6212ee550f33..2eccab9c8962 100644
--- a/arch/arm64/boot/dts/renesas/r9a07g043.dtsi
+++ b/arch/arm64/boot/dts/renesas/r9a07g043.dtsi
@@ -646,7 +646,7 @@
sdhi0: mmc@11c00000 {
compatible = "renesas,sdhi-r9a07g043",
- "renesas,rcar-gen3-sdhi";
+ "renesas,rzg2l-sdhi";
reg = <0x0 0x11c00000 0 0x10000>;
interrupts = <SOC_PERIPHERAL_IRQ(104) IRQ_TYPE_LEVEL_HIGH>,
<SOC_PERIPHERAL_IRQ(105) IRQ_TYPE_LEVEL_HIGH>;
@@ -662,7 +662,7 @@
sdhi1: mmc@11c10000 {
compatible = "renesas,sdhi-r9a07g043",
- "renesas,rcar-gen3-sdhi";
+ "renesas,rzg2l-sdhi";
reg = <0x0 0x11c10000 0 0x10000>;
interrupts = <SOC_PERIPHERAL_IRQ(106) IRQ_TYPE_LEVEL_HIGH>,
<SOC_PERIPHERAL_IRQ(107) IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm64/boot/dts/renesas/r9a07g043u.dtsi b/arch/arm64/boot/dts/renesas/r9a07g043u.dtsi
index 165bfcfef3bc..18ef297db933 100644
--- a/arch/arm64/boot/dts/renesas/r9a07g043u.dtsi
+++ b/arch/arm64/boot/dts/renesas/r9a07g043u.dtsi
@@ -50,7 +50,10 @@
interrupts-extended = <&gic GIC_PPI 13 IRQ_TYPE_LEVEL_LOW>,
<&gic GIC_PPI 14 IRQ_TYPE_LEVEL_LOW>,
<&gic GIC_PPI 11 IRQ_TYPE_LEVEL_LOW>,
- <&gic GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>;
+ <&gic GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>,
+ <&gic GIC_PPI 12 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-names = "sec-phys", "phys", "virt", "hyp-phys",
+ "hyp-virt";
};
};
diff --git a/arch/arm64/boot/dts/renesas/r9a07g044.dtsi b/arch/arm64/boot/dts/renesas/r9a07g044.dtsi
index 88634ae43287..d3838e5820fc 100644
--- a/arch/arm64/boot/dts/renesas/r9a07g044.dtsi
+++ b/arch/arm64/boot/dts/renesas/r9a07g044.dtsi
@@ -1050,7 +1050,7 @@
sdhi0: mmc@11c00000 {
compatible = "renesas,sdhi-r9a07g044",
- "renesas,rcar-gen3-sdhi";
+ "renesas,rzg2l-sdhi";
reg = <0x0 0x11c00000 0 0x10000>;
interrupts = <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>;
@@ -1066,7 +1066,7 @@
sdhi1: mmc@11c10000 {
compatible = "renesas,sdhi-r9a07g044",
- "renesas,rcar-gen3-sdhi";
+ "renesas,rzg2l-sdhi";
reg = <0x0 0x11c10000 0 0x10000>;
interrupts = <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>;
@@ -1334,6 +1334,9 @@
interrupts-extended = <&gic GIC_PPI 13 IRQ_TYPE_LEVEL_LOW>,
<&gic GIC_PPI 14 IRQ_TYPE_LEVEL_LOW>,
<&gic GIC_PPI 11 IRQ_TYPE_LEVEL_LOW>,
- <&gic GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>;
+ <&gic GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>,
+ <&gic GIC_PPI 12 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-names = "sec-phys", "phys", "virt", "hyp-phys",
+ "hyp-virt";
};
};
diff --git a/arch/arm64/boot/dts/renesas/r9a07g054.dtsi b/arch/arm64/boot/dts/renesas/r9a07g054.dtsi
index e89bfe4085f5..1de2e5f0917d 100644
--- a/arch/arm64/boot/dts/renesas/r9a07g054.dtsi
+++ b/arch/arm64/boot/dts/renesas/r9a07g054.dtsi
@@ -1058,7 +1058,7 @@
sdhi0: mmc@11c00000 {
compatible = "renesas,sdhi-r9a07g054",
- "renesas,rcar-gen3-sdhi";
+ "renesas,rzg2l-sdhi";
reg = <0x0 0x11c00000 0 0x10000>;
interrupts = <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>;
@@ -1074,7 +1074,7 @@
sdhi1: mmc@11c10000 {
compatible = "renesas,sdhi-r9a07g054",
- "renesas,rcar-gen3-sdhi";
+ "renesas,rzg2l-sdhi";
reg = <0x0 0x11c10000 0 0x10000>;
interrupts = <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>;
@@ -1342,6 +1342,9 @@
interrupts-extended = <&gic GIC_PPI 13 IRQ_TYPE_LEVEL_LOW>,
<&gic GIC_PPI 14 IRQ_TYPE_LEVEL_LOW>,
<&gic GIC_PPI 11 IRQ_TYPE_LEVEL_LOW>,
- <&gic GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>;
+ <&gic GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>,
+ <&gic GIC_PPI 12 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-names = "sec-phys", "phys", "virt", "hyp-phys",
+ "hyp-virt";
};
};
diff --git a/arch/arm64/boot/dts/renesas/r9a08g045.dtsi b/arch/arm64/boot/dts/renesas/r9a08g045.dtsi
index f5f3f4f4c8d6..0d5c47a65e46 100644
--- a/arch/arm64/boot/dts/renesas/r9a08g045.dtsi
+++ b/arch/arm64/boot/dts/renesas/r9a08g045.dtsi
@@ -182,7 +182,7 @@
};
sdhi0: mmc@11c00000 {
- compatible = "renesas,sdhi-r9a08g045", "renesas,rcar-gen3-sdhi";
+ compatible = "renesas,sdhi-r9a08g045", "renesas,rzg2l-sdhi";
reg = <0x0 0x11c00000 0 0x10000>;
interrupts = <GIC_SPI 88 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>;
@@ -197,7 +197,7 @@
};
sdhi1: mmc@11c10000 {
- compatible = "renesas,sdhi-r9a08g045", "renesas,rcar-gen3-sdhi";
+ compatible = "renesas,sdhi-r9a08g045", "renesas,rzg2l-sdhi";
reg = <0x0 0x11c10000 0 0x10000>;
interrupts = <GIC_SPI 90 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 91 IRQ_TYPE_LEVEL_HIGH>;
@@ -212,7 +212,7 @@
};
sdhi2: mmc@11c20000 {
- compatible = "renesas,sdhi-r9a08g045", "renesas,rcar-gen3-sdhi";
+ compatible = "renesas,sdhi-r9a08g045", "renesas,rzg2l-sdhi";
reg = <0x0 0x11c20000 0 0x10000>;
interrupts = <GIC_SPI 92 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 93 IRQ_TYPE_LEVEL_HIGH>;
@@ -294,6 +294,9 @@
interrupts-extended = <&gic GIC_PPI 13 IRQ_TYPE_LEVEL_LOW>,
<&gic GIC_PPI 14 IRQ_TYPE_LEVEL_LOW>,
<&gic GIC_PPI 11 IRQ_TYPE_LEVEL_LOW>,
- <&gic GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>;
+ <&gic GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>,
+ <&gic GIC_PPI 12 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-names = "sec-phys", "phys", "virt", "hyp-phys",
+ "hyp-virt";
};
};
diff --git a/arch/arm64/boot/dts/renesas/r9a09g011.dtsi b/arch/arm64/boot/dts/renesas/r9a09g011.dtsi
index 50ed66d42a24..9a4cbef704c1 100644
--- a/arch/arm64/boot/dts/renesas/r9a09g011.dtsi
+++ b/arch/arm64/boot/dts/renesas/r9a09g011.dtsi
@@ -71,7 +71,7 @@
sdhi0: mmc@85000000 {
compatible = "renesas,sdhi-r9a09g011",
- "renesas,rcar-gen3-sdhi";
+ "renesas,rzg2l-sdhi";
reg = <0x0 0x85000000 0 0x2000>;
interrupts = <GIC_SPI 356 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 357 IRQ_TYPE_LEVEL_HIGH>;
@@ -87,7 +87,7 @@
sdhi1: mmc@85010000 {
compatible = "renesas,sdhi-r9a09g011",
- "renesas,rcar-gen3-sdhi";
+ "renesas,rzg2l-sdhi";
reg = <0x0 0x85010000 0 0x2000>;
interrupts = <GIC_SPI 358 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 359 IRQ_TYPE_LEVEL_HIGH>;
@@ -103,7 +103,7 @@
emmc: mmc@85020000 {
compatible = "renesas,sdhi-r9a09g011",
- "renesas,rcar-gen3-sdhi";
+ "renesas,rzg2l-sdhi";
reg = <0x0 0x85020000 0 0x2000>;
interrupts = <GIC_SPI 354 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 355 IRQ_TYPE_LEVEL_HIGH>;
@@ -372,5 +372,6 @@
<&gic GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_LOW)>,
<&gic GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_LOW)>,
<&gic GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_LOW)>;
+ interrupt-names = "sec-phys", "phys", "virt", "hyp-phys";
};
};
diff --git a/arch/arm64/boot/dts/renesas/rz-smarc-common.dtsi b/arch/arm64/boot/dts/renesas/rz-smarc-common.dtsi
index b7a3e6caa386..b34855956ae0 100644
--- a/arch/arm64/boot/dts/renesas/rz-smarc-common.dtsi
+++ b/arch/arm64/boot/dts/renesas/rz-smarc-common.dtsi
@@ -54,14 +54,6 @@
};
};
- usb0_vbus_otg: regulator-usb0-vbus-otg {
- compatible = "regulator-fixed";
-
- regulator-name = "USB0_VBUS_OTG";
- regulator-min-microvolt = <5000000>;
- regulator-max-microvolt = <5000000>;
- };
-
vccq_sdhi1: regulator-vccq-sdhi1 {
compatible = "regulator-gpio";
regulator-name = "SDHI1 VccQ";
@@ -139,6 +131,9 @@
&phyrst {
status = "okay";
+ usb0_vbus_otg: regulator-vbus {
+ regulator-name = "vbus";
+ };
};
&scif0 {
diff --git a/arch/arm64/boot/dts/renesas/white-hawk-cpu-common.dtsi b/arch/arm64/boot/dts/renesas/white-hawk-cpu-common.dtsi
index 8ac17370ff36..80496fb3d476 100644
--- a/arch/arm64/boot/dts/renesas/white-hawk-cpu-common.dtsi
+++ b/arch/arm64/boot/dts/renesas/white-hawk-cpu-common.dtsi
@@ -13,6 +13,12 @@
/ {
aliases {
ethernet0 = &avb0;
+ i2c0 = &i2c0;
+ i2c1 = &i2c1;
+ i2c2 = &i2c2;
+ i2c3 = &i2c3;
+ i2c4 = &i2c4;
+ i2c5 = &i2c5;
serial0 = &hscif0;
};
@@ -142,18 +148,23 @@
&avb0 {
pinctrl-0 = <&avb0_pins>;
pinctrl-names = "default";
- phy-handle = <&phy0>;
+ phy-handle = <&avb0_phy>;
tx-internal-delay-ps = <2000>;
status = "okay";
- phy0: ethernet-phy@0 {
- compatible = "ethernet-phy-id0022.1622",
- "ethernet-phy-ieee802.3-c22";
- rxc-skew-ps = <1500>;
- reg = <0>;
- interrupt-parent = <&gpio7>;
- interrupts = <5 IRQ_TYPE_LEVEL_LOW>;
- reset-gpios = <&gpio7 10 GPIO_ACTIVE_LOW>;
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ avb0_phy: ethernet-phy@0 {
+ compatible = "ethernet-phy-id0022.1622",
+ "ethernet-phy-ieee802.3-c22";
+ rxc-skew-ps = <1500>;
+ reg = <0>;
+ interrupt-parent = <&gpio7>;
+ interrupts = <5 IRQ_TYPE_LEVEL_LOW>;
+ reset-gpios = <&gpio7 10 GPIO_ACTIVE_LOW>;
+ };
};
};
diff --git a/arch/arm64/boot/dts/renesas/white-hawk-ethernet.dtsi b/arch/arm64/boot/dts/renesas/white-hawk-ethernet.dtsi
index a218fda337cf..595ec4ff4cdd 100644
--- a/arch/arm64/boot/dts/renesas/white-hawk-ethernet.dtsi
+++ b/arch/arm64/boot/dts/renesas/white-hawk-ethernet.dtsi
@@ -6,6 +6,57 @@
* Copyright (C) 2022 Glider bv
*/
+/ {
+ aliases {
+ ethernet1 = &avb1;
+ ethernet2 = &avb2;
+ };
+};
+
+&avb1 {
+ pinctrl-0 = <&avb1_pins>;
+ pinctrl-names = "default";
+ phy-handle = <&avb1_phy>;
+ status = "okay";
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ reset-gpios = <&gpio6 1 GPIO_ACTIVE_LOW>;
+ reset-post-delay-us = <4000>;
+
+ avb1_phy: ethernet-phy@0 {
+ compatible = "ethernet-phy-ieee802.3-c45";
+ reg = <0>;
+ interrupt-parent = <&gpio6>;
+ interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
+ };
+ };
+};
+
+&avb2 {
+ pinctrl-0 = <&avb2_pins>;
+ pinctrl-names = "default";
+ phy-handle = <&avb2_phy>;
+ status = "okay";
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ reset-gpios = <&gpio5 5 GPIO_ACTIVE_LOW>;
+ reset-post-delay-us = <4000>;
+
+ avb2_phy: ethernet-phy@0 {
+ compatible = "ethernet-phy-ieee802.3-c45";
+ reg = <0>;
+ interrupt-parent = <&gpio5>;
+ interrupts = <4 IRQ_TYPE_LEVEL_LOW>;
+ };
+ };
+};
+
&i2c0 {
eeprom@53 {
compatible = "rohm,br24g01", "atmel,24c01";
@@ -14,3 +65,55 @@
pagesize = <8>;
};
};
+
+&pfc {
+ avb1_pins: avb1 {
+ mux {
+ groups = "avb1_link", "avb1_mdio", "avb1_rgmii",
+ "avb1_txcrefclk";
+ function = "avb1";
+ };
+
+ mdio {
+ groups = "avb1_mdio";
+ drive-strength = <24>;
+ bias-disable;
+ };
+
+ rgmii {
+ groups = "avb1_rgmii";
+ drive-strength = <24>;
+ bias-disable;
+ };
+
+ link {
+ groups = "avb1_link";
+ bias-disable;
+ };
+ };
+
+ avb2_pins: avb2 {
+ mux {
+ groups = "avb2_link", "avb2_mdio", "avb2_rgmii",
+ "avb2_txcrefclk";
+ function = "avb2";
+ };
+
+ mdio {
+ groups = "avb2_mdio";
+ drive-strength = <24>;
+ bias-disable;
+ };
+
+ rgmii {
+ groups = "avb2_rgmii";
+ drive-strength = <24>;
+ bias-disable;
+ };
+
+ link {
+ groups = "avb2_link";
+ bias-disable;
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/rockchip/Makefile b/arch/arm64/boot/dts/rockchip/Makefile
index f42fa62b4064..fda1b980eb4b 100644
--- a/arch/arm64/boot/dts/rockchip/Makefile
+++ b/arch/arm64/boot/dts/rockchip/Makefile
@@ -7,6 +7,7 @@ dtb-$(CONFIG_ARCH_ROCKCHIP) += px30-ringneck-haikou.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3308-evb.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3308-roc-cc.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3308-rock-pi-s.dtb
+dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3308-rock-s0.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3318-a95x-z2.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3326-anbernic-rg351m.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3326-anbernic-rg351v.dtb
@@ -27,6 +28,7 @@ dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3328-roc-cc.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3328-roc-pc.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3368-evb-act8846.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3368-geekbox.dtb
+dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3368-lba3368.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3368-lion-haikou.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3368-orion-r68-meta.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3368-px5-evb.dtb
@@ -79,6 +81,8 @@ dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3566-anbernic-rg353ps.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3566-anbernic-rg353v.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3566-anbernic-rg353vs.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3566-anbernic-rg503.dtb
+dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3566-orangepi-3b-v1.1.dtb
+dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3566-orangepi-3b-v2.1.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3566-pinenote-v1.1.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3566-pinenote-v1.2.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3566-pinetab2-v0.1.dtb
@@ -90,6 +94,8 @@ dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3566-powkiddy-x55.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3566-quartz64-a.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3566-quartz64-b.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3566-radxa-cm3-io.dtb
+dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3566-radxa-zero-3e.dtb
+dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3566-radxa-zero-3w.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3566-roc-pc.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3566-rock-3c.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3566-soquartz-blade.dtb
@@ -110,6 +116,7 @@ dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3568-qnap-ts433.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3568-radxa-e25.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3568-roc-pc.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3568-rock-3a.dtb
+dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3568-rock-3b.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3568-wolfvision-pf5.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3568-wolfvision-pf5-io-expander.dtbo
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-armsom-sige7.dtb
@@ -118,12 +125,16 @@ dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-edgeble-neu6a-io.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-edgeble-neu6a-wifi.dtbo
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-edgeble-neu6b-io.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-evb1-v10.dtb
+dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-friendlyelec-cm3588-nas.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-jaguar.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-nanopc-t6.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-ok3588-c.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-orangepi-5-plus.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-quartzpro64.dtb
+dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-rock-5-itx.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-rock-5b.dtb
+dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-rock-5b-pcie-ep.dtbo
+dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-rock-5b-pcie-srns.dtbo
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-tiger-haikou.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-toybrick-x0.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-turing-rk1.dtb
diff --git a/arch/arm64/boot/dts/rockchip/rk3308-rock-pi-s.dts b/arch/arm64/boot/dts/rockchip/rk3308-rock-pi-s.dts
index b47fe02c33fb..62d18ca769a1 100644
--- a/arch/arm64/boot/dts/rockchip/rk3308-rock-pi-s.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3308-rock-pi-s.dts
@@ -5,6 +5,8 @@
*/
/dts-v1/;
+
+#include <dt-bindings/leds/common.h>
#include "rk3308.dtsi"
/ {
@@ -15,6 +17,7 @@
ethernet0 = &gmac;
mmc0 = &emmc;
mmc1 = &sdmmc;
+ mmc2 = &sdio;
};
chosen {
@@ -24,17 +27,21 @@
leds {
compatible = "gpio-leds";
pinctrl-names = "default";
- pinctrl-0 = <&green_led_gio>, <&heartbeat_led_gpio>;
+ pinctrl-0 = <&green_led>, <&heartbeat_led>;
green-led {
+ color = <LED_COLOR_ID_GREEN>;
default-state = "on";
+ function = LED_FUNCTION_POWER;
gpios = <&gpio0 RK_PA6 GPIO_ACTIVE_HIGH>;
label = "rockpis:green:power";
linux,default-trigger = "default-on";
};
blue-led {
+ color = <LED_COLOR_ID_BLUE>;
default-state = "on";
+ function = LED_FUNCTION_HEARTBEAT;
gpios = <&gpio0 RK_PA5 GPIO_ACTIVE_HIGH>;
label = "rockpis:blue:user";
linux,default-trigger = "heartbeat";
@@ -126,21 +133,37 @@
};
&emmc {
- bus-width = <4>;
cap-mmc-highspeed;
- mmc-hs200-1_8v;
+ cap-sd-highspeed;
+ no-sdio;
non-removable;
+ pinctrl-names = "default";
+ pinctrl-0 = <&emmc_bus8 &emmc_clk &emmc_cmd>;
vmmc-supply = <&vcc_io>;
status = "okay";
};
&gmac {
clock_in_out = "output";
+ phy-handle = <&rtl8201f>;
phy-supply = <&vcc_io>;
- snps,reset-gpio = <&gpio0 RK_PA7 GPIO_ACTIVE_LOW>;
- snps,reset-active-low;
- snps,reset-delays-us = <0 50000 50000>;
status = "okay";
+
+ mdio {
+ compatible = "snps,dwmac-mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ rtl8201f: ethernet-phy@1 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&mac_rst>;
+ reset-assert-us = <20000>;
+ reset-deassert-us = <50000>;
+ reset-gpios = <&gpio0 RK_PA7 GPIO_ACTIVE_LOW>;
+ };
+ };
};
&gpio0 {
@@ -209,16 +232,46 @@
status = "okay";
};
+&io_domains {
+ vccio0-supply = <&vcc_io>;
+ vccio1-supply = <&vcc_io>;
+ vccio2-supply = <&vcc_io>;
+ vccio3-supply = <&vcc_io>;
+ vccio4-supply = <&vcc_1v8>;
+ vccio5-supply = <&vcc_io>;
+ status = "okay";
+};
+
&pinctrl {
pinctrl-names = "default";
pinctrl-0 = <&rtc_32k>;
+ bluetooth {
+ bt_reg_on: bt-reg-on {
+ rockchip,pins = <4 RK_PB3 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ bt_wake_host: bt-wake-host {
+ rockchip,pins = <4 RK_PB4 RK_FUNC_GPIO &pcfg_pull_down>;
+ };
+
+ host_wake_bt: host-wake-bt {
+ rockchip,pins = <4 RK_PB2 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ gmac {
+ mac_rst: mac-rst {
+ rockchip,pins = <0 RK_PA7 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
leds {
- green_led_gio: green-led-gpio {
+ green_led: green-led {
rockchip,pins = <0 RK_PA6 RK_FUNC_GPIO &pcfg_pull_none>;
};
- heartbeat_led_gpio: heartbeat-led-gpio {
+ heartbeat_led: heartbeat-led {
rockchip,pins = <0 RK_PA5 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
@@ -256,15 +309,31 @@
cap-sd-highspeed;
cap-sdio-irq;
keep-power-in-suspend;
- max-frequency = <1000000>;
+ max-frequency = <100000000>;
mmc-pwrseq = <&sdio_pwrseq>;
+ no-mmc;
+ no-sd;
non-removable;
- sd-uhs-sdr104;
+ sd-uhs-sdr50;
+ vmmc-supply = <&vcc_io>;
+ vqmmc-supply = <&vcc_1v8>;
status = "okay";
+
+ rtl8723ds: wifi@1 {
+ reg = <1>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <RK_PA0 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "host-wake";
+ pinctrl-names = "default";
+ pinctrl-0 = <&wifi_host_wake>;
+ };
};
&sdmmc {
+ cap-mmc-highspeed;
cap-sd-highspeed;
+ disable-wp;
+ vmmc-supply = <&vcc_io>;
status = "okay";
};
@@ -283,16 +352,22 @@
};
&uart0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart0_xfer>;
status = "okay";
};
&uart4 {
+ uart-has-rtscts;
status = "okay";
bluetooth {
- compatible = "realtek,rtl8723bs-bt";
- device-wake-gpios = <&gpio4 RK_PB3 GPIO_ACTIVE_HIGH>;
+ compatible = "realtek,rtl8723ds-bt";
+ device-wake-gpios = <&gpio4 RK_PB2 GPIO_ACTIVE_HIGH>;
+ enable-gpios = <&gpio4 RK_PB3 GPIO_ACTIVE_HIGH>;
host-wake-gpios = <&gpio4 RK_PB4 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&bt_reg_on &bt_wake_host &host_wake_bt>;
};
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3308-rock-s0.dts b/arch/arm64/boot/dts/rockchip/rk3308-rock-s0.dts
new file mode 100644
index 000000000000..bd6419a5c20a
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3308-rock-s0.dts
@@ -0,0 +1,293 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+
+/dts-v1/;
+
+#include <dt-bindings/leds/common.h>
+#include "rk3308.dtsi"
+
+/ {
+ model = "Radxa ROCK S0";
+ compatible = "radxa,rock-s0", "rockchip,rk3308";
+
+ aliases {
+ ethernet0 = &gmac;
+ mmc0 = &emmc;
+ mmc1 = &sdmmc;
+ mmc2 = &sdio;
+ };
+
+ chosen {
+ stdout-path = "serial0:1500000n8";
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwr_led>;
+
+ led-green {
+ color = <LED_COLOR_ID_GREEN>;
+ default-state = "on";
+ function = LED_FUNCTION_HEARTBEAT;
+ gpios = <&gpio0 RK_PB6 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "heartbeat";
+ };
+ };
+
+ vdd_log: regulator-1v04-vdd-log {
+ compatible = "regulator-fixed";
+ regulator-name = "vdd_log";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1040000>;
+ regulator-max-microvolt = <1040000>;
+ vin-supply = <&vcc5v0_sys>;
+ };
+
+ vcc_ddr: regulator-1v5-vcc-ddr {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_ddr";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <1500000>;
+ vin-supply = <&vcc5v0_sys>;
+ };
+
+ vcc_1v8: regulator-1v8-vcc {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_1v8";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ vin-supply = <&vcc_io>;
+ };
+
+ vcc_io: regulator-3v3-vcc-io {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_io";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vcc5v0_sys>;
+ };
+
+ vcc5v0_sys: regulator-5v0-vcc-sys {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc5v0_sys";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ };
+
+ vdd_core: regulator-vdd-core {
+ compatible = "pwm-regulator";
+ pwms = <&pwm0 0 5000 1>;
+ pwm-supply = <&vcc5v0_sys>;
+ regulator-name = "vdd_core";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <827000>;
+ regulator-max-microvolt = <1340000>;
+ regulator-settling-time-up-us = <250>;
+ };
+
+ sdio_pwrseq: sdio-pwrseq {
+ compatible = "mmc-pwrseq-simple";
+ pinctrl-names = "default";
+ pinctrl-0 = <&wifi_reg_on>;
+ reset-gpios = <&gpio0 RK_PA2 GPIO_ACTIVE_LOW>;
+ };
+};
+
+&cpu0 {
+ cpu-supply = <&vdd_core>;
+};
+
+&emmc {
+ cap-mmc-highspeed;
+ no-sd;
+ no-sdio;
+ non-removable;
+ pinctrl-names = "default";
+ pinctrl-0 = <&emmc_bus8 &emmc_clk &emmc_cmd &emmc_pwren>;
+ vmmc-supply = <&vcc_io>;
+ status = "okay";
+};
+
+&gmac {
+ clock_in_out = "output";
+ phy-handle = <&rtl8201f>;
+ phy-supply = <&vcc_io>;
+ status = "okay";
+
+ mdio {
+ compatible = "snps,dwmac-mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ rtl8201f: ethernet-phy@1 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&mac_rst>;
+ reset-assert-us = <20000>;
+ reset-deassert-us = <50000>;
+ reset-gpios = <&gpio0 RK_PA7 GPIO_ACTIVE_LOW>;
+ };
+ };
+};
+
+&io_domains {
+ vccio0-supply = <&vcc_io>;
+ vccio1-supply = <&vcc_io>;
+ vccio2-supply = <&vcc_io>;
+ vccio3-supply = <&vcc_io>;
+ vccio4-supply = <&vcc_1v8>;
+ vccio5-supply = <&vcc_io>;
+ status = "okay";
+};
+
+&pinctrl {
+ pinctrl-names = "default";
+ pinctrl-0 = <&rtc_32k>;
+
+ bluetooth {
+ bt_reg_on: bt-reg-on {
+ rockchip,pins = <4 RK_PB3 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ bt_wake_host: bt-wake-host {
+ rockchip,pins = <4 RK_PB4 RK_FUNC_GPIO &pcfg_pull_down>;
+ };
+
+ host_wake_bt: host-wake-bt {
+ rockchip,pins = <4 RK_PB2 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ gmac {
+ mac_rst: mac-rst {
+ rockchip,pins = <0 RK_PA7 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ leds {
+ pwr_led: pwr-led {
+ rockchip,pins = <0 RK_PB6 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ wifi {
+ wifi_reg_on: wifi-reg-on {
+ rockchip,pins = <0 RK_PA2 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ wifi_wake_host: wifi-wake-host {
+ rockchip,pins = <0 RK_PA0 RK_FUNC_GPIO &pcfg_pull_down>;
+ };
+ };
+};
+
+&pwm0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwm0_pin_pull_down>;
+ status = "okay";
+};
+
+&saradc {
+ vref-supply = <&vcc_1v8>;
+ status = "okay";
+};
+
+&sdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cap-sd-highspeed;
+ cap-sdio-irq;
+ keep-power-in-suspend;
+ max-frequency = <50000000>;
+ mmc-pwrseq = <&sdio_pwrseq>;
+ no-mmc;
+ no-sd;
+ non-removable;
+ vmmc-supply = <&vcc_io>;
+ vqmmc-supply = <&vcc_1v8>;
+ status = "okay";
+
+ brcmf: wifi@1 {
+ compatible = "brcm,bcm43430a1-fmac", "brcm,bcm4329-fmac";
+ reg = <1>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <RK_PA0 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "host-wake";
+ pinctrl-names = "default";
+ pinctrl-0 = <&wifi_wake_host>;
+ };
+};
+
+&sdmmc {
+ cap-mmc-highspeed;
+ cap-sd-highspeed;
+ disable-wp;
+ vmmc-supply = <&vcc_io>;
+ status = "okay";
+};
+
+&u2phy {
+ status = "okay";
+};
+
+&u2phy_host {
+ status = "okay";
+};
+
+&u2phy_otg {
+ status = "okay";
+};
+
+&uart0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart0_xfer>;
+ status = "okay";
+};
+
+&uart4 {
+ uart-has-rtscts;
+ status = "okay";
+
+ bluetooth {
+ compatible = "brcm,bcm43430a1-bt";
+ clocks = <&cru SCLK_RTC32K>;
+ clock-names = "lpo";
+ interrupt-parent = <&gpio4>;
+ interrupts = <RK_PB4 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "host-wakeup";
+ device-wakeup-gpios = <&gpio4 RK_PB2 GPIO_ACTIVE_HIGH>;
+ shutdown-gpios = <&gpio4 RK_PB3 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&bt_reg_on &bt_wake_host &host_wake_bt>;
+ vbat-supply = <&vcc_io>;
+ vddio-supply = <&vcc_1v8>;
+ };
+};
+
+&usb_host_ehci {
+ status = "okay";
+};
+
+&usb_host_ohci {
+ status = "okay";
+};
+
+&usb20_otg {
+ dr_mode = "peripheral";
+ status = "okay";
+};
+
+&wdt {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3308.dtsi b/arch/arm64/boot/dts/rockchip/rk3308.dtsi
index 962ea893999b..31c25de2d689 100644
--- a/arch/arm64/boot/dts/rockchip/rk3308.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3308.dtsi
@@ -173,6 +173,11 @@
compatible = "rockchip,rk3308-grf", "syscon", "simple-mfd";
reg = <0x0 0xff000000 0x0 0x08000>;
+ io_domains: io-domains {
+ compatible = "rockchip,rk3308-io-voltage-domain";
+ status = "disabled";
+ };
+
reboot-mode {
compatible = "syscon-reboot-mode";
offset = <0x500>;
@@ -556,6 +561,30 @@
status = "disabled";
};
+ otp: efuse@ff210000 {
+ compatible = "rockchip,rk3308-otp";
+ reg = <0x0 0xff210000 0x0 0x4000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ clocks = <&cru SCLK_OTP_USR>, <&cru PCLK_OTP_NS>,
+ <&cru PCLK_OTP_PHY>;
+ clock-names = "otp", "apb_pclk", "phy";
+ resets = <&cru SRST_OTP_PHY>;
+ reset-names = "phy";
+
+ cpu_id: id@7 {
+ reg = <0x07 0x10>;
+ };
+
+ cpu_leakage: cpu-leakage@17 {
+ reg = <0x17 0x1>;
+ };
+
+ logic_leakage: logic-leakage@18 {
+ reg = <0x18 0x1>;
+ };
+ };
+
dmac0: dma-controller@ff2c0000 {
compatible = "arm,pl330", "arm,primecell";
reg = <0x0 0xff2c0000 0x0 0x4000>;
@@ -811,7 +840,7 @@
clocks = <&cru SCLK_I2S2_8CH_TX_OUT>,
<&cru SCLK_I2S2_8CH_RX_OUT>,
<&cru PCLK_ACODEC>;
- reset-names = "codec-reset";
+ reset-names = "codec";
resets = <&cru SRST_ACODEC_P>;
#sound-dai-cells = <0>;
status = "disabled";
diff --git a/arch/arm64/boot/dts/rockchip/rk3328-rock-pi-e.dts b/arch/arm64/boot/dts/rockchip/rk3328-rock-pi-e.dts
index f09d60bbe6c4..a608a219543e 100644
--- a/arch/arm64/boot/dts/rockchip/rk3328-rock-pi-e.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3328-rock-pi-e.dts
@@ -241,8 +241,8 @@
rk805: pmic@18 {
compatible = "rockchip,rk805";
reg = <0x18>;
- interrupt-parent = <&gpio2>;
- interrupts = <6 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
#clock-cells = <1>;
clock-output-names = "xin32k", "rk805-clkout2";
gpio-controller;
diff --git a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
index 229fe9da9c2d..90fef766f3ae 100644
--- a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
@@ -154,6 +154,8 @@
};
&hdmi {
+ avdd-0v9-supply = <&vdd_10>;
+ avdd-1v8-supply = <&vcc_18>;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3328.dtsi b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
index 07dcc949b899..b01efd6d042c 100644
--- a/arch/arm64/boot/dts/rockchip/rk3328.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
@@ -850,8 +850,8 @@
<0>, <24000000>,
<24000000>, <24000000>,
<15000000>, <15000000>,
- <100000000>, <100000000>,
- <100000000>, <100000000>,
+ <300000000>, <100000000>,
+ <400000000>, <100000000>,
<50000000>, <100000000>,
<100000000>, <100000000>,
<50000000>, <50000000>,
diff --git a/arch/arm64/boot/dts/rockchip/rk3368-lba3368.dts b/arch/arm64/boot/dts/rockchip/rk3368-lba3368.dts
new file mode 100644
index 000000000000..e0cc4da7f392
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3368-lba3368.dts
@@ -0,0 +1,659 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+
+/dts-v1/;
+
+#include <dt-bindings/clock/rockchip,rk808.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/leds/common.h>
+#include <dt-bindings/sound/rt5640.h>
+#include "rk3368.dtsi"
+
+/ {
+ model = "Neardi LBA3368";
+ compatible = "neardi,lba3368", "rockchip,rk3368";
+
+ aliases {
+ ethernet0 = &gmac;
+ mmc0 = &emmc;
+ mmc1 = &sdmmc;
+ mmc2 = &sdio0;
+ rtc0 = &hym8563;
+ rtc1 = &rk808;
+ };
+
+ chosen {
+ stdout-path = "serial1:115200n8";
+ };
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x0 0x0 0x0 0x40000000>;
+ };
+
+ adc-key {
+ compatible = "adc-keys";
+ io-channels = <&saradc 1>;
+ io-channel-names = "buttons";
+ poll-interval = <100>;
+ keyup-threshold-microvolt = <1800000>;
+
+ button-recovery {
+ label = "Recovery";
+ linux,code = <KEY_VENDOR>;
+ press-threshold-microvolt = <0>;
+ };
+ };
+
+ analog-sound {
+ compatible = "audio-graph-card";
+ dais = <&i2s_8ch_p0>;
+ hp-det-gpio = <&gpio0 RK_PC7 GPIO_ACTIVE_HIGH>;
+ label = "alc5640";
+ routing = "Mic Jack", "MICBIAS1",
+ "IN1P", "Mic Jack",
+ "Headphone Jack", "HPOL",
+ "Headphone Jack", "HPOR",
+ "Speakers", "SPORP",
+ "Speakers", "SPORN",
+ "Speakers", "SPOLP",
+ "Speakers", "SPOLN";
+ widgets = "Microphone", "Mic Jack",
+ "Headphone", "Headphone Jack",
+ "Speaker", "Speakers";
+ pinctrl-names = "default";
+ pinctrl-0 = <&hp_det>;
+ };
+
+ dc_12v: dc-12v-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "dc_12v";
+ regulator-min-microvolt = <12000000>;
+ regulator-max-microvolt = <12000000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ ext_gmac: gmac-clk {
+ compatible = "fixed-clock";
+ clock-frequency = <50000000>;
+ clock-output-names = "ext_gmac";
+ #clock-cells = <0>;
+ };
+
+ hub_avdd: hub-avdd-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "hub_avdd";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vcc_io>;
+ regulator-always-on;
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ power-led {
+ gpios = <&gpio0 RK_PA4 GPIO_ACTIVE_HIGH>;
+ function = LED_FUNCTION_POWER;
+ color = <LED_COLOR_ID_GREEN>;
+ default-state = "on";
+ pinctrl-names = "default";
+ pinctrl-0 = <&power_led>;
+ };
+ };
+
+ sdio_pwrseq: sdio-pwrseq {
+ compatible = "mmc-pwrseq-simple";
+ clocks = <&rk808 RK808_CLKOUT1>;
+ clock-names = "ext_clock";
+ reset-gpios = <&gpio3 RK_PA5 GPIO_ACTIVE_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&wifi_reg_on>;
+ };
+
+ vcc_host: vcc-host-regulator {
+ compatible = "regulator-fixed";
+ gpio = <&gpio3 RK_PC1 GPIO_ACTIVE_HIGH>;
+ regulator-name = "vcc_host";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&vcc_sys>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&host_vbus_drv>;
+ enable-active-high;
+ regulator-always-on;
+ };
+
+ vcc_lan: vcc-lan-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_lan";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vcc_io>;
+ regulator-always-on;
+ };
+
+ vcc_otg: vcc-otg-regulator {
+ compatible = "regulator-fixed";
+ gpio = <&gpio0 RK_PC1 GPIO_ACTIVE_HIGH>;
+ regulator-name = "vcc_otg";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&vcc_sys>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&otg_vbus_drv>;
+ enable-active-high;
+ regulator-always-on;
+ };
+
+ vcc_sys: vcc-sys-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_sys";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&dc_12v>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ vdd10_usb: vdd10-usb-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vdd10_usb";
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1000000>;
+ vin-supply = <&vdd_10>;
+ regulator-always-on;
+ };
+};
+
+&cpu_l0 {
+ cpu-supply = <&vdd_cpu>;
+};
+
+&cpu_l1 {
+ cpu-supply = <&vdd_cpu>;
+};
+
+&cpu_l2 {
+ cpu-supply = <&vdd_cpu>;
+};
+
+&cpu_l3 {
+ cpu-supply = <&vdd_cpu>;
+};
+
+&cpu_b0 {
+ cpu-supply = <&vdd_cpu>;
+};
+
+&cpu_b1 {
+ cpu-supply = <&vdd_cpu>;
+};
+
+&cpu_b2 {
+ cpu-supply = <&vdd_cpu>;
+};
+
+&cpu_b3 {
+ cpu-supply = <&vdd_cpu>;
+};
+
+&emmc {
+ bus-width = <8>;
+ vmmc-supply = <&vcc_io>;
+ vqmmc-supply = <&vcc_18>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&emmc_clk &emmc_cmd &emmc_bus8>;
+ cap-mmc-highspeed;
+ non-removable;
+ no-sd;
+ no-sdio;
+ mmc-hs200-1_8v;
+ status = "okay";
+};
+
+&gmac {
+ clock_in_out = "input";
+ phy-handle = <&phy>;
+ phy-mode = "rmii";
+ phy-supply = <&vcc_lan>;
+ assigned-clocks = <&cru SCLK_MAC>;
+ assigned-clock-parents = <&ext_gmac>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&rmii_pins>;
+ status = "okay";
+
+ mdio {
+ compatible = "snps,dwmac-mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ phy: ethernet-phy@1 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <1>;
+ max-speed = <100>;
+ reset-assert-us = <10000>;
+ reset-deassert-us = <1000000>;
+ reset-gpios = <&gpio3 RK_PB4 GPIO_ACTIVE_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&phy_rst>;
+ };
+ };
+};
+
+&io_domains {
+ audio-supply = <&vcca1v8_codec>;
+ dvp-supply = <&vcc_18>;
+ flash0-supply = <&vcc_18>;
+ gpio1830-supply = <&vcc_io>;
+ gpio30-supply = <&vcc_io>;
+ sdcard-supply = <&vccio_sd>;
+ wifi-supply = <&vdd1v8_wl>;
+ status = "okay";
+};
+
+&i2c0 {
+ status = "okay";
+
+ rk808: pmic@1b {
+ compatible = "rockchip,rk808";
+ reg = <0x1b>;
+ interrupts-extended = <&gpio0 RK_PA5 IRQ_TYPE_LEVEL_LOW>;
+ clock-output-names = "rk808-clkout1", "xin32k_wifi_bt";
+ vcc1-supply = <&vcc_sys>;
+ vcc2-supply = <&vcc_sys>;
+ vcc3-supply = <&vcc_sys>;
+ vcc4-supply = <&vcc_sys>;
+ vcc6-supply = <&vcc_sys>;
+ vcc7-supply = <&vcc_sys>;
+ vcc8-supply = <&vcc_io>;
+ vcc9-supply = <&vcc_sys>;
+ vcc10-supply = <&vcc_sys>;
+ vcc11-supply = <&vcc_sys>;
+ vcc12-supply = <&vcc_io>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pmic_int &pmic_sleep>;
+ system-power-controller;
+ wakeup-source;
+ #clock-cells = <1>;
+
+ regulators {
+ vdd_cpu: DCDC_REG1 {
+ regulator-name = "vdd_cpu";
+ regulator-min-microvolt = <700000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-always-on;
+ regulator-boot-on;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_log: DCDC_REG2 {
+ regulator-name = "vdd_log";
+ regulator-min-microvolt = <1100000>;
+ regulator-max-microvolt = <1500000>;
+ regulator-ramp-delay = <6001>;
+ regulator-always-on;
+ regulator-boot-on;
+
+ regulator-state-mem {
+ regulator-suspend-microvolt = <1000000>;
+ regulator-on-in-suspend;
+ };
+ };
+
+ vcc_ddr: DCDC_REG3 {
+ regulator-name = "vcc_ddr";
+ regulator-always-on;
+ regulator-boot-on;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ };
+ };
+
+ vcc_io: DCDC_REG4 {
+ regulator-name = "vcc_io";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ regulator-boot-on;
+
+ regulator-state-mem {
+ regulator-suspend-microvolt = <3300000>;
+ regulator-on-in-suspend;
+ };
+ };
+
+ vcca1v8_codec: LDO_REG1 {
+ regulator-name = "vcca1v8_codec";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcca3v0_codec: LDO_REG2 {
+ regulator-name = "vcca3v0_codec";
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-always-on;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_10: LDO_REG3 {
+ regulator-name = "vdd_10";
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1000000>;
+ regulator-always-on;
+ regulator-boot-on;
+
+ regulator-state-mem {
+ regulator-suspend-microvolt = <1000000>;
+ regulator-on-in-suspend;
+ };
+ };
+
+ vdd1v8_wl: LDO_REG4 {
+ regulator-name = "vdd1v8_wl";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vccio_sd: LDO_REG5 {
+ regulator-name = "vccio_sd";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd10_lcd: LDO_REG6 {
+ regulator-name = "vdd10_lcd";
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1000000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_18: LDO_REG7 {
+ regulator-name = "vcc_18";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ regulator-state-mem {
+ regulator-suspend-microvolt = <1800000>;
+ regulator-on-in-suspend;
+ };
+ };
+
+ vcc18_lcd: LDO_REG8 {
+ regulator-name = "vcc18_lcd";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_tp: SWITCH_REG1 {
+ regulator-name = "vcc_tp";
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ };
+ };
+
+ vcc_sd: SWITCH_REG2 {
+ regulator-name = "vcc_sd";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+ };
+ };
+};
+
+&i2c1 {
+ status = "okay";
+
+ codec@1c {
+ compatible = "realtek,rt5640";
+ reg = <0x1c>;
+ clocks = <&cru SCLK_I2S_8CH_OUT>;
+ clock-names = "mclk";
+ realtek,dmic1-data-pin = <RT5640_DMIC1_DATA_PIN_IN1P>;
+ realtek,dmic2-data-pin = <RT5640_DMIC2_DATA_PIN_IN1N>;
+ realtek,in1-differential;
+ #sound-dai-cells = <0>;
+
+ port {
+ rt5640_p0_0: endpoint {
+ remote-endpoint = <&i2s_8ch_p0_0>;
+ };
+ };
+ };
+
+ hym8563: rtc@51 {
+ compatible = "haoyu,hym8563";
+ reg = <0x51>;
+ clock-output-names = "xin32k";
+ #clock-cells = <0>;
+ };
+};
+
+&i2s_8ch {
+ status = "okay";
+
+ i2s_8ch_p0: port {
+ i2s_8ch_p0_0: endpoint {
+ dai-format = "i2s";
+ mclk-fs = <256>;
+ remote-endpoint = <&rt5640_p0_0>;
+ };
+ };
+};
+
+&pinctrl {
+ bluetooth {
+ bt_host_wake: bt-host-wake {
+ rockchip,pins = <3 RK_PA7 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ bt_reg_on: bt-reg-on {
+ rockchip,pins = <3 RK_PA2 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+
+ bt_wake: bt-wake {
+ rockchip,pins = <3 RK_PA3 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ leds {
+ power_led: power-led {
+ rockchip,pins = <0 RK_PA4 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ phy {
+ phy_rst: phy-rst {
+ rockchip,pins = <3 RK_PB4 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ pmic {
+ pmic_int: pmic-int {
+ rockchip,pins = <0 RK_PA5 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+
+ pmic_sleep: pmic-sleep {
+ rockchip,pins = <0 RK_PA0 2 &pcfg_pull_none>;
+ };
+ };
+
+ sdio-pwrseq {
+ wifi_reg_on: wifi-reg-on {
+ rockchip,pins = <3 RK_PA5 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ sound {
+ hp_det: hp-det {
+ rockchip,pins = <0 RK_PC7 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ usb {
+ host_vbus_drv: host-vbus-drv {
+ rockchip,pins = <3 RK_PC1 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ otg_vbus_drv: otg-vbus-drv {
+ rockchip,pins = <0 RK_PC1 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ wifi {
+ wifi_host_wake: wifi-host-wake {
+ rockchip,pins = <3 RK_PA6 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+};
+
+&pmu_io_domains {
+ pmu-supply = <&vcc_io>;
+ vop-supply = <&vcc_io>;
+ status = "okay";
+};
+
+&saradc {
+ vref-supply = <&vcc_18>;
+ status = "okay";
+};
+
+&sdio0 {
+ bus-width = <4>;
+ mmc-pwrseq = <&sdio_pwrseq>;
+ vmmc-supply = <&vcc_io>;
+ vqmmc-supply = <&vdd1v8_wl>;
+ assigned-clocks = <&cru SCLK_SDIO0>;
+ assigned-clock-parents = <&cru PLL_CPLL>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&sdio0_bus4 &sdio0_cmd &sdio0_clk>;
+ cap-sd-highspeed;
+ cap-sdio-irq;
+ no-sd;
+ no-mmc;
+ non-removable;
+ sd-uhs-sdr104;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "okay";
+
+ wifi@1 {
+ compatible = "brcm,bcm43455-fmac", "brcm,bcm4329-fmac";
+ reg = <1>;
+ interrupts-extended = <&gpio3 RK_PA6 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "host-wake";
+ pinctrl-names = "default";
+ pinctrl-0 = <&wifi_host_wake>;
+ };
+};
+
+&sdmmc {
+ bus-width = <4>;
+ vmmc-supply = <&vcc_sd>;
+ vqmmc-supply = <&vccio_sd>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&sdmmc_clk &sdmmc_cmd &sdmmc_bus4 &sdmmc_cd>;
+ cap-sd-highspeed;
+ disable-wp;
+ no-mmc;
+ no-sdio;
+ sd-uhs-sdr12;
+ sd-uhs-sdr25;
+ sd-uhs-sdr50;
+ sd-uhs-sdr104;
+ status = "okay";
+};
+
+&tsadc {
+ rockchip,hw-tshut-mode = <0>;
+ rockchip,hw-tshut-polarity = <1>;
+ status = "okay";
+};
+
+&uart0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart0_xfer &uart0_cts &uart0_rts>;
+ uart-has-rtscts;
+ status = "okay";
+
+ bluetooth {
+ compatible = "brcm,bcm4345c5";
+ interrupts-extended = <&gpio3 RK_PA7 GPIO_ACTIVE_HIGH>;
+ interrupt-names = "host-wakeup";
+ clocks = <&rk808 RK808_CLKOUT1>;
+ clock-names = "lpo";
+ device-wakeup-gpios = <&gpio3 RK_PA3 GPIO_ACTIVE_HIGH>;
+ shutdown-gpios = <&gpio3 RK_PA2 GPIO_ACTIVE_HIGH>;
+ max-speed = <15000000>;
+ vbat-supply = <&vcc_io>;
+ vddio-supply = <&vdd1v8_wl>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&bt_host_wake &bt_wake &bt_reg_on>;
+ };
+};
+
+&uart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart1_xfer>;
+ status = "okay";
+};
+
+&uart4 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart4_xfer>;
+ status = "okay";
+};
+
+&usb_host0_ehci {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "okay";
+
+ hub@1 {
+ compatible = "usb5e3,610";
+ reg = <1>;
+ vdd-supply = <&hub_avdd>;
+ };
+};
+
+&usb_otg {
+ vbus-supply = <&vcc_otg>;
+ vusb_a-supply = <&vcc_io>;
+ vusb_d-supply = <&vdd10_usb>;
+ status = "okay";
+};
+
+&wdt {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3368.dtsi b/arch/arm64/boot/dts/rockchip/rk3368.dtsi
index 734f87db4d11..73618df7a889 100644
--- a/arch/arm64/boot/dts/rockchip/rk3368.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3368.dtsi
@@ -793,6 +793,7 @@
dma-names = "tx";
pinctrl-names = "default";
pinctrl-0 = <&spdif_tx>;
+ #sound-dai-cells = <0>;
status = "disabled";
};
@@ -804,6 +805,7 @@
clocks = <&cru SCLK_I2S_2CH>, <&cru HCLK_I2S_2CH>;
dmas = <&dmac_bus 6>, <&dmac_bus 7>;
dma-names = "tx", "rx";
+ #sound-dai-cells = <0>;
status = "disabled";
};
@@ -817,6 +819,7 @@
dma-names = "tx", "rx";
pinctrl-names = "default";
pinctrl-0 = <&i2s_8ch_bus>;
+ #sound-dai-cells = <0>;
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi
index 789fd0dcc88b..3cd63d1e8f15 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi
@@ -450,7 +450,7 @@ ap_i2c_audio: &i2c8 {
dlg,btn-cfg = <50>;
dlg,mic-det-thr = <500>;
dlg,jack-ins-deb = <20>;
- dlg,jack-det-rate = "32ms_64ms";
+ dlg,jack-det-rate = "32_64";
dlg,jack-rem-deb = <1>;
dlg,a-d-btn-thr = <0xa>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-pinephone-pro.dts b/arch/arm64/boot/dts/rockchip/rk3399-pinephone-pro.dts
index e5709c7ee06a..ef754ea30a94 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-pinephone-pro.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-pinephone-pro.dts
@@ -12,6 +12,7 @@
/dts-v1/;
#include <dt-bindings/input/gpio-keys.h>
#include <dt-bindings/input/linux-event-codes.h>
+#include <dt-bindings/leds/common.h>
#include "rk3399.dtsi"
#include "rk3399-opp.dtsi"
@@ -69,6 +70,34 @@
};
};
+ leds {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&red_led_pin &green_led_pin &blue_led_pin>;
+
+ led_red: led-0 {
+ color = <LED_COLOR_ID_RED>;
+ gpios = <&gpio4 RK_PD2 GPIO_ACTIVE_HIGH>;
+ };
+
+ led_green: led-1 {
+ color = <LED_COLOR_ID_GREEN>;
+ gpios = <&gpio4 RK_PD5 GPIO_ACTIVE_HIGH>;
+ };
+
+ led_blue: led-2 {
+ color = <LED_COLOR_ID_BLUE>;
+ gpios = <&gpio4 RK_PD6 GPIO_ACTIVE_HIGH>;
+ };
+ };
+
+ multi-led {
+ compatible = "leds-group-multicolor";
+ color = <LED_COLOR_ID_RGB>;
+ function = LED_FUNCTION_INDICATOR;
+ leds = <&led_red>, <&led_green>, <&led_blue>;
+ };
+
vcc_sys: vcc-sys-regulator {
compatible = "regulator-fixed";
regulator-name = "vcc_sys";
@@ -152,6 +181,12 @@
gpio = <&gpio3 RK_PA1 GPIO_ACTIVE_HIGH>;
pinctrl-names = "default";
};
+
+ vibrator {
+ compatible = "gpio-vibrator";
+ enable-gpios = <&gpio3 RK_PB1 GPIO_ACTIVE_HIGH>;
+ vcc-supply = <&vcc3v3_sys>;
+ };
};
&cpu_alert0 {
@@ -407,6 +442,21 @@
};
};
+&i2c4 {
+ i2c-scl-rising-time-ns = <600>;
+ i2c-scl-falling-time-ns = <20>;
+ status = "okay";
+
+ /* Accelerometer/gyroscope */
+ mpu6500@68 {
+ compatible = "invensense,mpu6500";
+ reg = <0x68>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <RK_PC6 IRQ_TYPE_LEVEL_LOW>;
+ vddio-supply = <&vcc_1v8>;
+ };
+};
+
&cluster0_opp {
opp04 {
status = "disabled";
@@ -481,6 +531,20 @@
};
};
+ leds {
+ red_led_pin: red-led-pin {
+ rockchip,pins = <4 RK_PD2 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ green_led_pin: green-led-pin {
+ rockchip,pins = <4 RK_PD5 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ blue_led_pin: blue-led-pin {
+ rockchip,pins = <4 RK_PD6 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
pmic {
pmic_int_l: pmic-int-l {
rockchip,pins = <1 RK_PC5 RK_FUNC_GPIO &pcfg_pull_up>;
@@ -565,6 +629,16 @@
status = "okay";
};
+&spi1 {
+ status = "okay";
+
+ flash@0 {
+ compatible = "jedec,spi-nor";
+ reg = <0>;
+ spi-max-frequency = <10000000>;
+ };
+};
+
&tsadc {
rockchip,hw-tshut-mode = <1>;
rockchip,hw-tshut-polarity = <1>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3399pro.dtsi b/arch/arm64/boot/dts/rockchip/rk3399pro.dtsi
deleted file mode 100644
index bb5ebf6608b9..000000000000
--- a/arch/arm64/boot/dts/rockchip/rk3399pro.dtsi
+++ /dev/null
@@ -1,22 +0,0 @@
-// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
-// Copyright (c) 2019 Fuzhou Rockchip Electronics Co., Ltd.
-
-#include "rk3399.dtsi"
-
-/ {
- compatible = "rockchip,rk3399pro";
-};
-
-/* Default to enabled since AP talk to NPU part over pcie */
-&pcie_phy {
- status = "okay";
-};
-
-/* Default to enabled since AP talk to NPU part over pcie */
-&pcie0 {
- ep-gpios = <&gpio0 RK_PB4 GPIO_ACTIVE_HIGH>;
- num-lanes = <4>;
- pinctrl-names = "default";
- pinctrl-0 = <&pcie_clkreqn_cpm>;
- status = "okay";
-};
diff --git a/arch/arm64/boot/dts/rockchip/rk3566-orangepi-3b-v1.1.dts b/arch/arm64/boot/dts/rockchip/rk3566-orangepi-3b-v1.1.dts
new file mode 100644
index 000000000000..074e93bd4b85
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3566-orangepi-3b-v1.1.dts
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+
+/dts-v1/;
+
+#include "rk3566-orangepi-3b.dtsi"
+
+/ {
+ model = "Xunlong Orange Pi 3B v1.1";
+ compatible = "xunlong,orangepi-3b-v1.1", "xunlong,orangepi-3b", "rockchip,rk3566";
+};
+
+&pmu_io_domains {
+ vccio5-supply = <&vcc_3v3>;
+};
+
+&gmac1 {
+ phy-handle = <&rgmii_phy1>;
+ status = "okay";
+};
+
+&mdio1 {
+ rgmii_phy1: ethernet-phy@1 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <1>;
+ reset-assert-us = <20000>;
+ reset-deassert-us = <50000>;
+ reset-gpios = <&gpio3 RK_PC2 GPIO_ACTIVE_LOW>;
+ };
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3566-orangepi-3b-v2.1.dts b/arch/arm64/boot/dts/rockchip/rk3566-orangepi-3b-v2.1.dts
new file mode 100644
index 000000000000..d894bff41e61
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3566-orangepi-3b-v2.1.dts
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+
+/dts-v1/;
+
+#include "rk3566-orangepi-3b.dtsi"
+
+/ {
+ model = "Xunlong Orange Pi 3B v2.1";
+ compatible = "xunlong,orangepi-3b-v2.1", "xunlong,orangepi-3b", "rockchip,rk3566";
+
+ vccio_phy1: regulator-1v8-vccio-phy {
+ compatible = "regulator-fixed";
+ regulator-name = "vccio_phy1";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-max-microvolt = <1800000>;
+ regulator-min-microvolt = <1800000>;
+ };
+};
+
+&pmu_io_domains {
+ vccio5-supply = <&vccio_phy1>;
+};
+
+&gmac1 {
+ phy-handle = <&rgmii_phy1>;
+ status = "okay";
+};
+
+&mdio1 {
+ rgmii_phy1: ethernet-phy@1 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <1>;
+ reset-assert-us = <20000>;
+ reset-deassert-us = <50000>;
+ reset-gpios = <&gpio4 RK_PC4 GPIO_ACTIVE_LOW>;
+ };
+};
+
+&sdmmc1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ brcmf: wifi@1 {
+ compatible = "brcm,bcm43456-fmac", "brcm,bcm4329-fmac";
+ reg = <1>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <RK_PD6 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "host-wake";
+ pinctrl-names = "default";
+ pinctrl-0 = <&wifi_wake_host_h>;
+ };
+};
+
+&uart1 {
+ bluetooth {
+ compatible = "brcm,bcm4345c5";
+ clocks = <&rk809 1>;
+ clock-names = "lpo";
+ interrupt-parent = <&gpio2>;
+ interrupts = <RK_PC0 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "host-wakeup";
+ device-wakeup-gpios = <&gpio2 RK_PC1 GPIO_ACTIVE_HIGH>;
+ shutdown-gpios = <&gpio2 RK_PB7 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&bt_reg_on_h &bt_wake_host_h &host_wake_bt_h>;
+ vbat-supply = <&vcc_3v3>;
+ vddio-supply = <&vcc_1v8>;
+ };
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3566-orangepi-3b.dtsi b/arch/arm64/boot/dts/rockchip/rk3566-orangepi-3b.dtsi
new file mode 100644
index 000000000000..d539570f531e
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3566-orangepi-3b.dtsi
@@ -0,0 +1,678 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/leds/common.h>
+#include <dt-bindings/pinctrl/rockchip.h>
+#include <dt-bindings/soc/rockchip,vop2.h>
+#include "rk3566.dtsi"
+
+/ {
+ model = "Xunlong Orange Pi 3B";
+ compatible = "xunlong,orangepi-3b", "rockchip,rk3566";
+
+ aliases {
+ ethernet0 = &gmac1;
+ mmc0 = &sdhci;
+ mmc1 = &sdmmc0;
+ mmc2 = &sdmmc1;
+ };
+
+ chosen {
+ stdout-path = "serial2:1500000n8";
+ };
+
+ hdmi-con {
+ compatible = "hdmi-connector";
+ type = "a";
+
+ port {
+ hdmi_con_in: endpoint {
+ remote-endpoint = <&hdmi_out_con>;
+ };
+ };
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&work_led>;
+
+ led-0 {
+ color = <LED_COLOR_ID_GREEN>;
+ default-state = "on";
+ function = LED_FUNCTION_HEARTBEAT;
+ gpios = <&gpio0 RK_PC0 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "heartbeat";
+ };
+ };
+
+ vcc3v3_pcie30: regulator-3v3-vcc-pcie30 {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpios = <&gpio0 RK_PB7 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie20_pwren>;
+ regulator-name = "vcc3v3_pcie30";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vcc3v3_sys>;
+ };
+
+ vcc3v3_sys: regulator-3v3-vcc-sys {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc3v3_sys";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vcc5v0_sys>;
+ };
+
+ vcc5v0_sys: regulator-5v0-vcc-sys {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc5v0_sys";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ };
+
+ vcc5v0_usb_host: regulator-5v0-vcc-usb-host {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpios = <&gpio0 RK_PA6 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb_host_pwren_h>;
+ regulator-name = "vcc5v0_usb_host";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&vcc5v0_sys>;
+ };
+
+ vcc5v0_usb_otg: regulator-5v0-vcc-usb-otg {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpios = <&gpio0 RK_PA5 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb_otg_pwren_h>;
+ regulator-name = "vcc5v0_usb_otg";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&vcc5v0_sys>;
+ };
+
+ sdio_pwrseq: sdio-pwrseq {
+ compatible = "mmc-pwrseq-simple";
+ clocks = <&rk809 1>;
+ clock-names = "ext_clock";
+ pinctrl-names = "default";
+ pinctrl-0 = <&wifi_reg_on_h>;
+ post-power-on-delay-ms = <200>;
+ power-off-delay-us = <5000000>;
+ reset-gpios = <&gpio0 RK_PD3 GPIO_ACTIVE_LOW>;
+ };
+
+ sound {
+ compatible = "simple-audio-card";
+ simple-audio-card,format = "i2s";
+ simple-audio-card,name = "Analog RK809";
+ simple-audio-card,mclk-fs = <256>;
+
+ simple-audio-card,cpu {
+ sound-dai = <&i2s1_8ch>;
+ };
+
+ simple-audio-card,codec {
+ sound-dai = <&rk809>;
+ };
+ };
+};
+
+&combphy1 {
+ status = "okay";
+};
+
+&combphy2 {
+ status = "okay";
+};
+
+&cpu0 {
+ cpu-supply = <&vdd_cpu>;
+};
+
+&cpu1 {
+ cpu-supply = <&vdd_cpu>;
+};
+
+&cpu2 {
+ cpu-supply = <&vdd_cpu>;
+};
+
+&cpu3 {
+ cpu-supply = <&vdd_cpu>;
+};
+
+&gmac1 {
+ assigned-clocks = <&cru SCLK_GMAC1_RX_TX>, <&cru SCLK_GMAC1>;
+ assigned-clock-parents = <&cru SCLK_GMAC1_RGMII_SPEED>, <&cru CLK_MAC1_2TOP>;
+ clock_in_out = "input";
+ phy-mode = "rgmii-id";
+ phy-supply = <&vcc_3v3>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&gmac1m0_miim
+ &gmac1m0_tx_bus2
+ &gmac1m0_rx_bus2
+ &gmac1m0_rgmii_clk
+ &gmac1m0_rgmii_bus
+ &gmac1m0_clkinout>;
+};
+
+&gpu {
+ mali-supply = <&vdd_gpu>;
+ status = "okay";
+};
+
+&hdmi {
+ avdd-0v9-supply = <&vdda0v9_image>;
+ avdd-1v8-supply = <&vcca1v8_image>;
+ status = "okay";
+};
+
+&hdmi_in {
+ hdmi_in_vp0: endpoint {
+ remote-endpoint = <&vp0_out_hdmi>;
+ };
+};
+
+&hdmi_out {
+ hdmi_out_con: endpoint {
+ remote-endpoint = <&hdmi_con_in>;
+ };
+};
+
+&hdmi_sound {
+ status = "okay";
+};
+
+&i2c0 {
+ status = "okay";
+
+ rk809: pmic@20 {
+ compatible = "rockchip,rk809";
+ reg = <0x20>;
+ assigned-clocks = <&cru I2S1_MCLKOUT_TX>;
+ assigned-clock-parents = <&cru CLK_I2S1_8CH_TX>;
+ #clock-cells = <1>;
+ clocks = <&cru I2S1_MCLKOUT_TX>;
+ clock-names = "mclk";
+ clock-output-names = "rk809-clkout1", "rk809-clkout2";
+ interrupt-parent = <&gpio0>;
+ interrupts = <RK_PA3 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pmic_int_l>, <&i2s1m0_mclk>;
+ #sound-dai-cells = <0>;
+ system-power-controller;
+ wakeup-source;
+
+ vcc1-supply = <&vcc3v3_sys>;
+ vcc2-supply = <&vcc3v3_sys>;
+ vcc3-supply = <&vcc3v3_sys>;
+ vcc4-supply = <&vcc3v3_sys>;
+ vcc5-supply = <&vcc3v3_sys>;
+ vcc6-supply = <&vcc3v3_sys>;
+ vcc7-supply = <&vcc3v3_sys>;
+ vcc8-supply = <&vcc3v3_sys>;
+ vcc9-supply = <&vcc3v3_sys>;
+
+ regulators {
+ vdd_logic: DCDC_REG1 {
+ regulator-name = "vdd_logic";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-initial-mode = <0x2>;
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <6001>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_gpu: DCDC_REG2 {
+ regulator-name = "vdd_gpu";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-initial-mode = <0x2>;
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <6001>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_ddr: DCDC_REG3 {
+ regulator-name = "vcc_ddr";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-initial-mode = <0x2>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ };
+ };
+
+ vdd_npu: DCDC_REG4 {
+ regulator-name = "vdd_npu";
+ regulator-initial-mode = <0x2>;
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <6001>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_1v8: DCDC_REG5 {
+ regulator-name = "vcc_1v8";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdda0v9_image: LDO_REG1 {
+ regulator-name = "vdda0v9_image";
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <900000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdda_0v9: LDO_REG2 {
+ regulator-name = "vdda_0v9";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <900000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdda0v9_pmu: LDO_REG3 {
+ regulator-name = "vdda0v9_pmu";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <900000>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <900000>;
+ };
+ };
+
+ vccio_acodec: LDO_REG4 {
+ regulator-name = "vccio_acodec";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vccio_sd: LDO_REG5 {
+ regulator-name = "vccio_sd";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc3v3_pmu: LDO_REG6 {
+ regulator-name = "vcc3v3_pmu";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <3300000>;
+ };
+ };
+
+ vcca_1v8: LDO_REG7 {
+ regulator-name = "vcca_1v8";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcca1v8_pmu: LDO_REG8 {
+ regulator-name = "vcca1v8_pmu";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ vcca1v8_image: LDO_REG9 {
+ regulator-name = "vcca1v8_image";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_3v3: SWITCH_REG1 {
+ regulator-name = "vcc_3v3";
+ regulator-always-on;
+ regulator-boot-on;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc3v3_sd: SWITCH_REG2 {
+ regulator-name = "vcc3v3_sd";
+ regulator-always-on;
+ regulator-boot-on;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+ };
+ };
+
+ vdd_cpu: regulator@40 {
+ compatible = "silergy,syr827";
+ reg = <0x40>;
+ fcs,suspend-voltage-selector = <1>;
+ regulator-name = "vdd_cpu";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <830000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-ramp-delay = <2300>;
+ vin-supply = <&vcc3v3_sys>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+};
+
+&i2s0_8ch {
+ status = "okay";
+};
+
+&i2s1_8ch {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2s1m0_sclktx
+ &i2s1m0_lrcktx
+ &i2s1m0_sdi0
+ &i2s1m0_sdo0>;
+ rockchip,trcm-sync-tx-only;
+ status = "okay";
+};
+
+&pcie2x1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie20_pins>;
+ reset-gpios = <&gpio0 RK_PB6 GPIO_ACTIVE_HIGH>;
+ vpcie3v3-supply = <&vcc3v3_pcie30>;
+ status = "okay";
+};
+
+&pinctrl {
+ bluetooth {
+ bt_reg_on_h: bt-reg-on-h {
+ rockchip,pins = <2 RK_PB7 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ bt_wake_host_h: bt-wake-host-h {
+ rockchip,pins = <2 RK_PC0 RK_FUNC_GPIO &pcfg_pull_down>;
+ };
+
+ host_wake_bt_h: host-wake-bt-h {
+ rockchip,pins = <2 RK_PC1 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ leds {
+ work_led: work-led {
+ rockchip,pins = <0 RK_PC0 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ pcie {
+ pcie20_pins: pcie20-pins {
+ rockchip,pins =
+ <1 RK_PB0 4 &pcfg_pull_none>,
+ <0 RK_PB6 RK_FUNC_GPIO &pcfg_pull_none>,
+ <1 RK_PB1 4 &pcfg_pull_none>;
+ };
+
+ pcie20_pwren: pcie20-pwren {
+ rockchip,pins = <0 RK_PB7 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ pmic {
+ pmic_int_l: pmic-int-l {
+ rockchip,pins = <0 RK_PA3 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+
+ usb {
+ usb_host_pwren_h: usb-host-pwren-h {
+ rockchip,pins = <0 RK_PA6 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ usb_otg_pwren_h: usb-otg-pwren-h {
+ rockchip,pins = <0 RK_PA5 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ wifi {
+ wifi_reg_on_h: wifi-reg-on-h {
+ rockchip,pins = <0 RK_PD3 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ wifi_wake_host_h: wifi-wake-host-h {
+ rockchip,pins = <0 RK_PD6 RK_FUNC_GPIO &pcfg_pull_down>;
+ };
+ };
+};
+
+&pmu_io_domains {
+ pmuio1-supply = <&vcc3v3_pmu>;
+ pmuio2-supply = <&vcc3v3_pmu>;
+ vccio1-supply = <&vccio_acodec>;
+ vccio2-supply = <&vcc_1v8>;
+ vccio3-supply = <&vccio_sd>;
+ vccio4-supply = <&vcc_1v8>;
+ vccio6-supply = <&vcc_3v3>;
+ vccio7-supply = <&vcc_3v3>;
+ status = "okay";
+};
+
+&saradc {
+ vref-supply = <&vcca_1v8>;
+ status = "okay";
+};
+
+&sdhci {
+ bus-width = <8>;
+ cap-mmc-highspeed;
+ max-frequency = <200000000>;
+ mmc-hs200-1_8v;
+ non-removable;
+ pinctrl-names = "default";
+ pinctrl-0 = <&emmc_bus8 &emmc_clk &emmc_cmd &emmc_datastrobe>;
+ vmmc-supply = <&vcc_3v3>;
+ vqmmc-supply = <&vcc_1v8>;
+ status = "okay";
+};
+
+&sdmmc0 {
+ bus-width = <4>;
+ cap-sd-highspeed;
+ disable-wp;
+ pinctrl-names = "default";
+ pinctrl-0 = <&sdmmc0_bus4 &sdmmc0_clk &sdmmc0_cmd &sdmmc0_det>;
+ vmmc-supply = <&vcc3v3_sd>;
+ vqmmc-supply = <&vccio_sd>;
+ status = "okay";
+};
+
+&sdmmc1 {
+ bus-width = <4>;
+ cap-sd-highspeed;
+ cap-sdio-irq;
+ keep-power-in-suspend;
+ mmc-pwrseq = <&sdio_pwrseq>;
+ no-mmc;
+ no-sd;
+ non-removable;
+ pinctrl-names = "default";
+ pinctrl-0 = <&sdmmc1_bus4 &sdmmc1_clk &sdmmc1_cmd>;
+ sd-uhs-sdr104;
+ vmmc-supply = <&vcc_3v3>;
+ vqmmc-supply = <&vcc_1v8>;
+ status = "okay";
+};
+
+&sfc {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "okay";
+
+ flash@0 {
+ compatible = "jedec,spi-nor";
+ reg = <0>;
+ spi-max-frequency = <104000000>;
+ spi-rx-bus-width = <4>;
+ spi-tx-bus-width = <1>;
+ };
+};
+
+&tsadc {
+ rockchip,hw-tshut-mode = <1>;
+ rockchip,hw-tshut-polarity = <0>;
+ status = "okay";
+};
+
+&uart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart1m0_xfer &uart1m0_ctsn &uart1m0_rtsn>;
+ uart-has-rtscts;
+ status = "okay";
+};
+
+&uart2 {
+ status = "okay";
+};
+
+&usb_host0_ehci {
+ status = "okay";
+};
+
+&usb_host0_ohci {
+ status = "okay";
+};
+
+&usb_host0_xhci {
+ dr_mode = "host";
+ status = "okay";
+};
+
+&usb_host1_ehci {
+ status = "okay";
+};
+
+&usb_host1_ohci {
+ status = "okay";
+};
+
+&usb_host1_xhci {
+ status = "okay";
+};
+
+&usb2phy0 {
+ status = "okay";
+};
+
+&usb2phy0_host {
+ phy-supply = <&vcc5v0_usb_host>;
+ status = "okay";
+};
+
+&usb2phy0_otg {
+ phy-supply = <&vcc5v0_usb_otg>;
+ status = "okay";
+};
+
+&usb2phy1 {
+ status = "okay";
+};
+
+&usb2phy1_host {
+ phy-supply = <&vcc5v0_usb_host>;
+ status = "okay";
+};
+
+&usb2phy1_otg {
+ phy-supply = <&vcc5v0_usb_host>;
+ status = "okay";
+};
+
+&vop {
+ assigned-clocks = <&cru DCLK_VOP0>, <&cru DCLK_VOP1>;
+ assigned-clock-parents = <&pmucru PLL_HPLL>, <&cru PLL_VPLL>;
+ status = "okay";
+};
+
+&vop_mmu {
+ status = "okay";
+};
+
+&vp0 {
+ vp0_out_hdmi: endpoint@ROCKCHIP_VOP2_EP_HDMI0 {
+ reg = <ROCKCHIP_VOP2_EP_HDMI0>;
+ remote-endpoint = <&hdmi_in_vp0>;
+ };
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3566-pinenote.dtsi b/arch/arm64/boot/dts/rockchip/rk3566-pinenote.dtsi
index d899087bf0b5..ae2536c65a83 100644
--- a/arch/arm64/boot/dts/rockchip/rk3566-pinenote.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3566-pinenote.dtsi
@@ -674,6 +674,7 @@
};
&uart1 {
+ dma-names = "tx", "rx";
pinctrl-0 = <&uart1m0_ctsn>, <&uart1m0_rtsn>, <&uart1m0_xfer>;
pinctrl-names = "default";
uart-has-rtscts;
diff --git a/arch/arm64/boot/dts/rockchip/rk3566-quartz64-a.dts b/arch/arm64/boot/dts/rockchip/rk3566-quartz64-a.dts
index 0b191d8462ad..37a1303d9a34 100644
--- a/arch/arm64/boot/dts/rockchip/rk3566-quartz64-a.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3566-quartz64-a.dts
@@ -738,6 +738,7 @@
};
&uart1 {
+ dma-names = "tx", "rx";
pinctrl-names = "default";
pinctrl-0 = <&uart1m0_xfer &uart1m0_ctsn &uart1m0_rtsn>;
status = "okay";
diff --git a/arch/arm64/boot/dts/rockchip/rk3566-quartz64-b.dts b/arch/arm64/boot/dts/rockchip/rk3566-quartz64-b.dts
index 26322a358d91..13e599a85eb8 100644
--- a/arch/arm64/boot/dts/rockchip/rk3566-quartz64-b.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3566-quartz64-b.dts
@@ -289,7 +289,7 @@
regulator-name = "vdd_gpu";
regulator-always-on;
regulator-boot-on;
- regulator-min-microvolt = <900000>;
+ regulator-min-microvolt = <500000>;
regulator-max-microvolt = <1350000>;
regulator-ramp-delay = <6001>;
@@ -652,6 +652,7 @@
};
&uart1 {
+ dma-names = "tx", "rx";
pinctrl-names = "default";
pinctrl-0 = <&uart1m0_xfer &uart1m0_ctsn &uart1m0_rtsn>;
status = "okay";
diff --git a/arch/arm64/boot/dts/rockchip/rk3566-radxa-zero-3.dtsi b/arch/arm64/boot/dts/rockchip/rk3566-radxa-zero-3.dtsi
new file mode 100644
index 000000000000..9cc7aa3298d0
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3566-radxa-zero-3.dtsi
@@ -0,0 +1,531 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/leds/common.h>
+#include <dt-bindings/soc/rockchip,vop2.h>
+#include "rk3566.dtsi"
+
+/ {
+ chosen {
+ stdout-path = "serial2:1500000n8";
+ };
+
+ hdmi-con {
+ compatible = "hdmi-connector";
+ type = "d";
+
+ port {
+ hdmi_con_in: endpoint {
+ remote-endpoint = <&hdmi_out_con>;
+ };
+ };
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&user_led2>;
+
+ led-green {
+ color = <LED_COLOR_ID_GREEN>;
+ default-state = "on";
+ function = LED_FUNCTION_HEARTBEAT;
+ gpios = <&gpio0 RK_PA0 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "heartbeat";
+ };
+ };
+
+ vcc_1v8: regulator-1v8-vcc {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_1v8";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ vin-supply = <&vcc_1v8_p>;
+ };
+
+ vcca_1v8: regulator-1v8-vcca {
+ compatible = "regulator-fixed";
+ regulator-name = "vcca_1v8";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ vin-supply = <&vcc_1v8_p>;
+ };
+
+ vcca1v8_image: regulator-1v8-vcca-image {
+ compatible = "regulator-fixed";
+ regulator-name = "vcca1v8_image";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ vin-supply = <&vcc_1v8_p>;
+ };
+
+ vcc_3v3: regulator-3v3-vcc {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_3v3";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vcc3v3_sys>;
+ };
+
+ vcc_sys: regulator-5v0-vcc-sys {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_sys";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ };
+};
+
+&combphy1 {
+ status = "okay";
+};
+
+&cpu0 {
+ cpu-supply = <&vdd_cpu>;
+};
+
+&cpu1 {
+ cpu-supply = <&vdd_cpu>;
+};
+
+&cpu2 {
+ cpu-supply = <&vdd_cpu>;
+};
+
+&cpu3 {
+ cpu-supply = <&vdd_cpu>;
+};
+
+&gpio0 {
+ gpio-line-names =
+ /* GPIO0_A0 - A7 */
+ "", "", "", "", "", "", "", "",
+ /* GPIO0_B0 - B7 */
+ "", "", "", "", "", "", "", "",
+ /* GPIO0_C0 - C7 */
+ "", "", "", "", "", "", "", "",
+ /* GPIO0_D0 - D7 */
+ "pin-10 [GPIO0_D0]", "pin-08 [GPIO0_D1]", "",
+ "", "", "", "", "";
+};
+
+&gpio1 {
+ gpio-line-names =
+ /* GPIO1_A0 - A7 */
+ "pin-03 [GPIO1_A0]", "pin-05 [GPIO1_A1]", "",
+ "", "pin-37 [GPIO1_A4]", "",
+ "", "",
+ /* GPIO1_B0 - B7 */
+ "", "", "", "", "", "", "", "",
+ /* GPIO1_C0 - C7 */
+ "", "", "", "", "", "", "", "",
+ /* GPIO1_D0 - D7 */
+ "", "", "", "", "", "", "", "";
+};
+
+&gpio2 {
+ gpio-line-names =
+ /* GPIO2_A0 - A7 */
+ "", "", "", "", "", "", "", "",
+ /* GPIO2_B0 - B7 */
+ "", "", "", "", "", "", "", "",
+ /* GPIO2_C0 - C7 */
+ "", "", "", "", "", "", "", "",
+ /* GPIO2_D0 - D7 */
+ "", "", "", "", "", "", "", "";
+};
+
+&gpio3 {
+ gpio-line-names =
+ /* GPIO3_A0 - A7 */
+ "", "pin-11 [GPIO3_A1]", "pin-13 [GPIO3_A2]",
+ "pin-12 [GPIO3_A3]", "pin-35 [GPIO3_A4]", "pin-40 [GPIO3_A5]",
+ "pin-38 [GPIO3_A6]", "pin-36 [GPIO3_A7]",
+ /* GPIO3_B0 - B7 */
+ "pin-15 [GPIO3_B0]", "pin-16 [GPIO3_B1]", "pin-18 [GPIO3_B2]",
+ "pin-29 [GPIO3_B3]", "pin-31 [GPIO3_B4]", "",
+ "", "",
+ /* GPIO3_C0 - C7 */
+ "", "pin-22 [GPIO3_C1]", "pin-32 [GPIO3_C2]",
+ "pin-33 [GPIO3_C3]", "pin-07 [GPIO3_C4]", "",
+ "", "",
+ /* GPIO3_D0 - D7 */
+ "", "", "", "", "", "", "", "";
+};
+
+&gpio4 {
+ gpio-line-names =
+ /* GPIO4_A0 - A7 */
+ "", "", "", "", "", "", "", "",
+ /* GPIO4_B0 - B7 */
+ "", "", "pin-27 [GPIO4_B2]",
+ "pin-28 [GPIO4_B3]", "", "", "", "",
+ /* GPIO4_C0 - C7 */
+ "", "", "pin-23 [GPIO4_C2]",
+ "pin-19 [GPIO4_C3]", "", "pin-21 [GPIO4_C5]",
+ "pin-24 [GPIO4_C6]", "",
+ /* GPIO4_D0 - D7 */
+ "", "", "", "", "", "", "", "";
+};
+
+&gpu {
+ mali-supply = <&vdd_gpu_npu>;
+ status = "okay";
+};
+
+&hdmi {
+ avdd-0v9-supply = <&vdda_0v9>;
+ avdd-1v8-supply = <&vcca1v8_image>;
+ status = "okay";
+};
+
+&hdmi_in {
+ hdmi_in_vp0: endpoint {
+ remote-endpoint = <&vp0_out_hdmi>;
+ };
+};
+
+&hdmi_out {
+ hdmi_out_con: endpoint {
+ remote-endpoint = <&hdmi_con_in>;
+ };
+};
+
+&hdmi_sound {
+ status = "okay";
+};
+
+&i2c0 {
+ status = "okay";
+
+ rk817: pmic@20 {
+ compatible = "rockchip,rk817";
+ reg = <0x20>;
+ #clock-cells = <1>;
+ clock-output-names = "rk817-clkout1", "rk817-clkout2";
+ interrupt-parent = <&gpio0>;
+ interrupts = <RK_PA3 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pmic_int_l>;
+ system-power-controller;
+ wakeup-source;
+
+ vcc1-supply = <&vcc_sys>;
+ vcc2-supply = <&vcc_sys>;
+ vcc3-supply = <&vcc_sys>;
+ vcc4-supply = <&vcc_sys>;
+ vcc5-supply = <&vcc_sys>;
+ vcc6-supply = <&vcc_sys>;
+ vcc7-supply = <&vcc_sys>;
+ vcc8-supply = <&vcc_sys>;
+ vcc9-supply = <&vcc5v_midu>;
+
+ regulators {
+ vdd_logic: DCDC_REG1 {
+ regulator-name = "vdd_logic";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-initial-mode = <0x2>;
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <6001>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-suspend-microvolt = <900000>;
+ };
+ };
+
+ vdd_gpu_npu: DCDC_REG2 {
+ regulator-name = "vdd_gpu_npu";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-initial-mode = <0x2>;
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <6001>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_ddr: DCDC_REG3 {
+ regulator-name = "vcc_ddr";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-initial-mode = <0x2>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ };
+ };
+
+ vcc3v3_sys: DCDC_REG4 {
+ regulator-name = "vcc3v3_sys";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-initial-mode = <0x2>;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <3300000>;
+ };
+ };
+
+ vcca1v8_pmu: LDO_REG1 {
+ regulator-name = "vcca1v8_pmu";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ vdda_0v9: LDO_REG2 {
+ regulator-name = "vdda_0v9";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <900000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdda0v9_pmu: LDO_REG3 {
+ regulator-name = "vdda0v9_pmu";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <900000>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <900000>;
+ };
+ };
+
+ vccio_acodec: LDO_REG4 {
+ regulator-name = "vccio_acodec";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vccio_sd: LDO_REG5 {
+ regulator-name = "vccio_sd";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc3v3_pmu: LDO_REG6 {
+ regulator-name = "vcc3v3_pmu";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <3300000>;
+ };
+ };
+
+ vcc_1v8_p: LDO_REG7 {
+ regulator-name = "vcc_1v8_p";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc1v8_dvp: LDO_REG8 {
+ regulator-name = "vcc1v8_dvp";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc2v8_dvp: LDO_REG9 {
+ regulator-name = "vcc2v8_dvp";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc5v_midu: BOOST {
+ regulator-name = "vcc5v_midu";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vbus: OTG_SWITCH {
+ regulator-name = "vbus";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+ };
+ };
+
+ vdd_cpu: regulator@40 {
+ compatible = "rockchip,rk8600";
+ reg = <0x40>;
+ fcs,suspend-voltage-selector = <1>;
+ regulator-name = "vdd_cpu";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <712500>;
+ regulator-max-microvolt = <1390000>;
+ regulator-ramp-delay = <2300>;
+ vin-supply = <&vcc_sys>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+};
+
+&i2s0_8ch {
+ status = "okay";
+};
+
+&pinctrl {
+ leds {
+ user_led2: user-led2 {
+ rockchip,pins = <0 RK_PA0 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ pmic {
+ pmic_int_l: pmic-int-l {
+ rockchip,pins = <0 RK_PA3 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+};
+
+&pmu_io_domains {
+ pmuio1-supply = <&vcc3v3_pmu>;
+ pmuio2-supply = <&vcca1v8_pmu>;
+ vccio1-supply = <&vccio_acodec>;
+ vccio2-supply = <&vcc_1v8>;
+ vccio3-supply = <&vccio_sd>;
+ vccio4-supply = <&vcc_1v8>;
+ vccio5-supply = <&vcc_3v3>;
+ vccio6-supply = <&vcc_3v3>;
+ vccio7-supply = <&vcc_3v3>;
+ status = "okay";
+};
+
+&saradc {
+ vref-supply = <&vcca_1v8>;
+ status = "okay";
+};
+
+&sdmmc0 {
+ bus-width = <4>;
+ cap-sd-highspeed;
+ disable-wp;
+ pinctrl-names = "default";
+ pinctrl-0 = <&sdmmc0_bus4 &sdmmc0_clk &sdmmc0_cmd &sdmmc0_det>;
+ vmmc-supply = <&vcc3v3_sys>;
+ vqmmc-supply = <&vccio_sd>;
+ status = "okay";
+};
+
+&tsadc {
+ rockchip,hw-tshut-mode = <1>;
+ rockchip,hw-tshut-polarity = <0>;
+ status = "okay";
+};
+
+&uart2 {
+ status = "okay";
+};
+
+&usb_host0_xhci {
+ dr_mode = "peripheral";
+ status = "okay";
+};
+
+&usb_host1_xhci {
+ status = "okay";
+};
+
+&usb2phy0 {
+ status = "okay";
+};
+
+&usb2phy0_host {
+ status = "okay";
+};
+
+&usb2phy0_otg {
+ status = "okay";
+};
+
+&vop {
+ assigned-clocks = <&cru DCLK_VOP0>, <&cru DCLK_VOP1>;
+ assigned-clock-parents = <&pmucru PLL_HPLL>, <&cru PLL_VPLL>;
+ status = "okay";
+};
+
+&vop_mmu {
+ status = "okay";
+};
+
+&vp0 {
+ vp0_out_hdmi: endpoint@ROCKCHIP_VOP2_EP_HDMI0 {
+ reg = <ROCKCHIP_VOP2_EP_HDMI0>;
+ remote-endpoint = <&hdmi_in_vp0>;
+ };
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3566-radxa-zero-3e.dts b/arch/arm64/boot/dts/rockchip/rk3566-radxa-zero-3e.dts
new file mode 100644
index 000000000000..4a830eb09f0b
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3566-radxa-zero-3e.dts
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+
+/dts-v1/;
+
+#include "rk3566-radxa-zero-3.dtsi"
+
+/ {
+ model = "Radxa ZERO 3E";
+ compatible = "radxa,zero-3e", "rockchip,rk3566";
+
+ aliases {
+ ethernet0 = &gmac1;
+ mmc0 = &sdmmc0;
+ };
+};
+
+&gmac1 {
+ assigned-clocks = <&cru SCLK_GMAC1_RX_TX>, <&cru SCLK_GMAC1>;
+ assigned-clock-parents = <&cru SCLK_GMAC1_RGMII_SPEED>, <&cru CLK_MAC1_2TOP>;
+ clock_in_out = "input";
+ phy-handle = <&rgmii_phy1>;
+ phy-mode = "rgmii-id";
+ phy-supply = <&vcc_3v3>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&gmac1m1_miim
+ &gmac1m1_tx_bus2
+ &gmac1m1_rx_bus2
+ &gmac1m1_rgmii_clk
+ &gmac1m1_rgmii_bus
+ &gmac1m1_clkinout>;
+ status = "okay";
+};
+
+&mdio1 {
+ rgmii_phy1: ethernet-phy@1 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&gmac1_rstn>;
+ reset-assert-us = <20000>;
+ reset-deassert-us = <50000>;
+ reset-gpios = <&gpio3 RK_PC0 GPIO_ACTIVE_LOW>;
+ };
+};
+
+&pinctrl {
+ gmac1 {
+ gmac1_rstn: gmac1-rstn {
+ rockchip,pins = <3 RK_PC0 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3566-radxa-zero-3w.dts b/arch/arm64/boot/dts/rockchip/rk3566-radxa-zero-3w.dts
new file mode 100644
index 000000000000..f92475c59deb
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3566-radxa-zero-3w.dts
@@ -0,0 +1,92 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+
+/dts-v1/;
+
+#include "rk3566-radxa-zero-3.dtsi"
+
+/ {
+ model = "Radxa ZERO 3W";
+ compatible = "radxa,zero-3w", "rockchip,rk3566";
+
+ aliases {
+ mmc0 = &sdhci;
+ mmc1 = &sdmmc0;
+ mmc2 = &sdmmc1;
+ };
+
+ sdio_pwrseq: sdio-pwrseq {
+ compatible = "mmc-pwrseq-simple";
+ clocks = <&rk817 1>;
+ clock-names = "ext_clock";
+ pinctrl-names = "default";
+ pinctrl-0 = <&wifi_reg_on_h>;
+ post-power-on-delay-ms = <100>;
+ power-off-delay-us = <5000000>;
+ reset-gpios = <&gpio0 RK_PC0 GPIO_ACTIVE_LOW>;
+ };
+};
+
+&pinctrl {
+ bluetooth {
+ bt_reg_on_h: bt-reg-on-h {
+ rockchip,pins = <0 RK_PC1 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ bt_wake_host_h: bt-wake-host-h {
+ rockchip,pins = <0 RK_PB3 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ host_wake_bt_h: host-wake-bt-h {
+ rockchip,pins = <0 RK_PB4 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ wifi {
+ wifi_reg_on_h: wifi-reg-on-h {
+ rockchip,pins = <0 RK_PC0 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ wifi_wake_host_h: wifi-wake-host-h {
+ rockchip,pins = <0 RK_PB7 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+};
+
+&sdhci {
+ bus-width = <8>;
+ cap-mmc-highspeed;
+ max-frequency = <200000000>;
+ mmc-hs200-1_8v;
+ no-sd;
+ no-sdio;
+ non-removable;
+ pinctrl-names = "default";
+ pinctrl-0 = <&emmc_bus8 &emmc_clk &emmc_cmd &emmc_datastrobe>;
+ vmmc-supply = <&vcc_3v3>;
+ vqmmc-supply = <&vcc_1v8>;
+ status = "okay";
+};
+
+&sdmmc1 {
+ bus-width = <4>;
+ cap-sd-highspeed;
+ cap-sdio-irq;
+ keep-power-in-suspend;
+ mmc-pwrseq = <&sdio_pwrseq>;
+ no-mmc;
+ no-sd;
+ non-removable;
+ pinctrl-names = "default";
+ pinctrl-0 = <&sdmmc1_bus4 &sdmmc1_clk &sdmmc1_cmd>;
+ sd-uhs-sdr104;
+ vmmc-supply = <&vcc_3v3>;
+ vqmmc-supply = <&vcc_1v8>;
+ status = "okay";
+};
+
+&uart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart1m0_xfer &uart1m0_ctsn &uart1m0_rtsn>;
+ uart-has-rtscts;
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3566-roc-pc.dts b/arch/arm64/boot/dts/rockchip/rk3566-roc-pc.dts
index 63eea27293fe..67e7801bd489 100644
--- a/arch/arm64/boot/dts/rockchip/rk3566-roc-pc.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3566-roc-pc.dts
@@ -269,7 +269,7 @@
vcc9-supply = <&vcc3v3_sys>;
codec {
- mic-in-differential;
+ rockchip,mic-in-differential;
};
regulators {
diff --git a/arch/arm64/boot/dts/rockchip/rk3566-rock-3c.dts b/arch/arm64/boot/dts/rockchip/rk3566-rock-3c.dts
index b242409d378c..f2cc086e5001 100644
--- a/arch/arm64/boot/dts/rockchip/rk3566-rock-3c.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3566-rock-3c.dts
@@ -633,7 +633,7 @@
flash@0 {
compatible = "jedec,spi-nor";
reg = <0x0>;
- spi-max-frequency = <120000000>;
+ spi-max-frequency = <104000000>;
spi-rx-bus-width = <4>;
spi-tx-bus-width = <1>;
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3566-soquartz.dtsi b/arch/arm64/boot/dts/rockchip/rk3566-soquartz.dtsi
index dd4e9c1893c6..e42c474ef4ad 100644
--- a/arch/arm64/boot/dts/rockchip/rk3566-soquartz.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3566-soquartz.dtsi
@@ -614,6 +614,7 @@
};
&uart1 {
+ dma-names = "tx", "rx";
pinctrl-names = "default";
pinctrl-0 = <&uart1m0_xfer &uart1m0_ctsn &uart1m0_rtsn>;
uart-has-rtscts;
diff --git a/arch/arm64/boot/dts/rockchip/rk3568-evb1-v10.dts b/arch/arm64/boot/dts/rockchip/rk3568-evb1-v10.dts
index 19f8fc369b13..8c3ab07d3807 100644
--- a/arch/arm64/boot/dts/rockchip/rk3568-evb1-v10.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3568-evb1-v10.dts
@@ -475,7 +475,7 @@
};
codec {
- mic-in-differential;
+ rockchip,mic-in-differential;
};
};
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3568-fastrhino-r66s.dts b/arch/arm64/boot/dts/rockchip/rk3568-fastrhino-r66s.dts
index 58ab7e9971db..b5e67990dd0f 100644
--- a/arch/arm64/boot/dts/rockchip/rk3568-fastrhino-r66s.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3568-fastrhino-r66s.dts
@@ -11,6 +11,10 @@
};
};
+&pmu_io_domains {
+ vccio3-supply = <&vccio_sd>;
+};
+
&sdmmc0 {
bus-width = <4>;
cap-mmc-highspeed;
diff --git a/arch/arm64/boot/dts/rockchip/rk3568-fastrhino-r66s.dtsi b/arch/arm64/boot/dts/rockchip/rk3568-fastrhino-r66s.dtsi
index 89e84e3a9262..25c49bdbadbc 100644
--- a/arch/arm64/boot/dts/rockchip/rk3568-fastrhino-r66s.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3568-fastrhino-r66s.dtsi
@@ -39,9 +39,9 @@
};
};
- dc_12v: dc-12v-regulator {
+ vcc12v_dcin: vcc12v-dcin-regulator {
compatible = "regulator-fixed";
- regulator-name = "dc_12v";
+ regulator-name = "vcc12v_dcin";
regulator-always-on;
regulator-boot-on;
regulator-min-microvolt = <12000000>;
@@ -65,7 +65,7 @@
regulator-boot-on;
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
- vin-supply = <&dc_12v>;
+ vin-supply = <&vcc12v_dcin>;
};
vcc5v0_sys: vcc5v0-sys-regulator {
@@ -75,16 +75,7 @@
regulator-boot-on;
regulator-min-microvolt = <5000000>;
regulator-max-microvolt = <5000000>;
- vin-supply = <&dc_12v>;
- };
-
- vcc5v0_usb_host: vcc5v0-usb-host-regulator {
- compatible = "regulator-fixed";
- regulator-name = "vcc5v0_usb_host";
- regulator-always-on;
- regulator-boot-on;
- regulator-min-microvolt = <5000000>;
- regulator-max-microvolt = <5000000>;
+ vin-supply = <&vcc12v_dcin>;
};
vcc5v0_usb_otg: vcc5v0-usb-otg-regulator {
@@ -94,8 +85,9 @@
pinctrl-names = "default";
pinctrl-0 = <&vcc5v0_usb_otg_en>;
regulator-name = "vcc5v0_usb_otg";
- regulator-always-on;
- regulator-boot-on;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&vcc5v0_sys>;
};
};
@@ -123,6 +115,10 @@
cpu-supply = <&vdd_cpu>;
};
+&display_subsystem {
+ status = "disabled";
+};
+
&gpu {
mali-supply = <&vdd_gpu>;
status = "okay";
@@ -405,8 +401,8 @@
&pmu_io_domains {
pmuio1-supply = <&vcc3v3_pmu>;
pmuio2-supply = <&vcc3v3_pmu>;
- vccio1-supply = <&vccio_acodec>;
- vccio3-supply = <&vccio_sd>;
+ vccio1-supply = <&vcc_3v3>;
+ vccio2-supply = <&vcc_1v8>;
vccio4-supply = <&vcc_1v8>;
vccio5-supply = <&vcc_3v3>;
vccio6-supply = <&vcc_1v8>;
@@ -429,28 +425,12 @@
status = "okay";
};
-&usb_host0_ehci {
- status = "okay";
-};
-
-&usb_host0_ohci {
- status = "okay";
-};
-
&usb_host0_xhci {
dr_mode = "host";
extcon = <&usb2phy0>;
status = "okay";
};
-&usb_host1_ehci {
- status = "okay";
-};
-
-&usb_host1_ohci {
- status = "okay";
-};
-
&usb_host1_xhci {
status = "okay";
};
@@ -460,7 +440,7 @@
};
&usb2phy0_host {
- phy-supply = <&vcc5v0_usb_host>;
+ phy-supply = <&vcc5v0_sys>;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3568-fastrhino-r68s.dts b/arch/arm64/boot/dts/rockchip/rk3568-fastrhino-r68s.dts
index e1fe5e442689..ce2a5e1ccefc 100644
--- a/arch/arm64/boot/dts/rockchip/rk3568-fastrhino-r68s.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3568-fastrhino-r68s.dts
@@ -39,7 +39,7 @@
&gmac0_rx_bus2
&gmac0_rgmii_clk
&gmac0_rgmii_bus>;
- snps,reset-gpio = <&gpio0 RK_PB0 GPIO_ACTIVE_LOW>;
+ snps,reset-gpio = <&gpio1 RK_PB0 GPIO_ACTIVE_LOW>;
snps,reset-active-low;
/* Reset time is 15ms, 50ms for rtl8211f */
snps,reset-delays-us = <0 15000 50000>;
@@ -61,7 +61,7 @@
&gmac1m1_rx_bus2
&gmac1m1_rgmii_clk
&gmac1m1_rgmii_bus>;
- snps,reset-gpio = <&gpio0 RK_PB1 GPIO_ACTIVE_LOW>;
+ snps,reset-gpio = <&gpio1 RK_PB1 GPIO_ACTIVE_LOW>;
snps,reset-active-low;
/* Reset time is 15ms, 50ms for rtl8211f */
snps,reset-delays-us = <0 15000 50000>;
@@ -71,18 +71,18 @@
};
&mdio0 {
- rgmii_phy0: ethernet-phy@0 {
+ rgmii_phy0: ethernet-phy@1 {
compatible = "ethernet-phy-ieee802.3-c22";
- reg = <0>;
+ reg = <0x1>;
pinctrl-0 = <&eth_phy0_reset_pin>;
pinctrl-names = "default";
};
};
&mdio1 {
- rgmii_phy1: ethernet-phy@0 {
+ rgmii_phy1: ethernet-phy@1 {
compatible = "ethernet-phy-ieee802.3-c22";
- reg = <0>;
+ reg = <0x1>;
pinctrl-0 = <&eth_phy1_reset_pin>;
pinctrl-names = "default";
};
@@ -102,6 +102,10 @@
};
};
+&pmu_io_domains {
+ vccio3-supply = <&vcc_3v3>;
+};
+
&sdhci {
bus-width = <8>;
max-frequency = <200000000>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3568-rock-3a.dts b/arch/arm64/boot/dts/rockchip/rk3568-rock-3a.dts
index ebdedea15ad1..59f1403b4fa5 100644
--- a/arch/arm64/boot/dts/rockchip/rk3568-rock-3a.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3568-rock-3a.dts
@@ -531,10 +531,6 @@
};
};
};
-
- codec {
- mic-in-differential;
- };
};
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3568-rock-3b.dts b/arch/arm64/boot/dts/rockchip/rk3568-rock-3b.dts
new file mode 100644
index 000000000000..3d0c1ccfaa79
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3568-rock-3b.dts
@@ -0,0 +1,781 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/leds/common.h>
+#include <dt-bindings/pinctrl/rockchip.h>
+#include <dt-bindings/soc/rockchip,vop2.h>
+#include "rk3568.dtsi"
+
+/ {
+ model = "Radxa ROCK 3B";
+ compatible = "radxa,rock-3b", "rockchip,rk3568";
+
+ aliases {
+ ethernet0 = &gmac0;
+ ethernet1 = &gmac1;
+ mmc0 = &sdhci;
+ mmc1 = &sdmmc0;
+ mmc2 = &sdmmc2;
+ };
+
+ chosen {
+ stdout-path = "serial2:1500000n8";
+ };
+
+ hdmi-con {
+ compatible = "hdmi-connector";
+ type = "a";
+
+ port {
+ hdmi_con_in: endpoint {
+ remote-endpoint = <&hdmi_out_con>;
+ };
+ };
+ };
+
+ ir-receiver {
+ compatible = "gpio-ir-receiver";
+ gpios = <&gpio0 RK_PC2 GPIO_ACTIVE_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwm3_ir>;
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&led>;
+
+ led-0 {
+ color = <LED_COLOR_ID_GREEN>;
+ default-state = "on";
+ function = LED_FUNCTION_HEARTBEAT;
+ gpios = <&gpio0 RK_PB7 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "heartbeat";
+ };
+ };
+
+ /* pi6c pcie clock generator */
+ vcc3v3_pi6c_03: regulator-3v3-vcc-pi6c-03 {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpios = <&gpio0 RK_PD4 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie_pwren_h>;
+ regulator-name = "vcc3v3_pi6c_03";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ startup-delay-us = <10000>;
+ vin-supply = <&vcc5v0_sys>;
+ };
+
+ vcc3v3_sys: regulator-3v3-vcc-sys {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc3v3_sys";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vcc5v0_sys>;
+ };
+
+ vcc3v3_sys2: regulator-3v3-vcc-sys2 {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc3v3_sys2";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vcc5v0_sys>;
+ };
+
+ vcc5v0_sys: regulator-5v0-vcc-sys {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc5v0_sys";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ };
+
+ vcc5v0_usb_host: regulator-5v0-vcc-usb-host {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpios = <&gpio0 RK_PA6 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb_host_pwren_h>;
+ regulator-name = "vcc5v0_usb_host";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&vcc5v0_sys>;
+ };
+
+ vcc5v0_usb_otg: regulator-5v0-vcc-usb-otg {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpios = <&gpio0 RK_PA5 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb_otg_pwren_h>;
+ regulator-name = "vcc5v0_usb_otg";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&vcc5v0_sys>;
+ };
+
+ sdio_pwrseq: sdio-pwrseq {
+ compatible = "mmc-pwrseq-simple";
+ clocks = <&rk809 1>;
+ clock-names = "ext_clock";
+ pinctrl-names = "default";
+ pinctrl-0 = <&wifi_reg_on_h>;
+ post-power-on-delay-ms = <100>;
+ power-off-delay-us = <5000000>;
+ reset-gpios = <&gpio3 RK_PD4 GPIO_ACTIVE_LOW>;
+ };
+
+ sound {
+ compatible = "simple-audio-card";
+ simple-audio-card,format = "i2s";
+ simple-audio-card,name = "Analog RK809";
+ simple-audio-card,mclk-fs = <256>;
+
+ simple-audio-card,cpu {
+ sound-dai = <&i2s1_8ch>;
+ };
+
+ simple-audio-card,codec {
+ sound-dai = <&rk809>;
+ };
+ };
+};
+
+&combphy0 {
+ status = "okay";
+};
+
+&combphy1 {
+ status = "okay";
+};
+
+&combphy2 {
+ status = "okay";
+};
+
+&cpu0 {
+ cpu-supply = <&vdd_cpu>;
+};
+
+&cpu1 {
+ cpu-supply = <&vdd_cpu>;
+};
+
+&cpu2 {
+ cpu-supply = <&vdd_cpu>;
+};
+
+&cpu3 {
+ cpu-supply = <&vdd_cpu>;
+};
+
+&gmac0 {
+ assigned-clocks = <&cru SCLK_GMAC0_RX_TX>, <&cru SCLK_GMAC0>;
+ assigned-clock-parents = <&cru SCLK_GMAC0_RGMII_SPEED>, <&cru CLK_MAC0_2TOP>;
+ clock_in_out = "input";
+ phy-handle = <&rgmii_phy0>;
+ phy-mode = "rgmii-id";
+ phy-supply = <&vcc_3v3>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&gmac0_miim
+ &gmac0_tx_bus2
+ &gmac0_rx_bus2
+ &gmac0_rgmii_clk
+ &gmac0_rgmii_bus
+ &gmac0_clkinout>;
+ status = "okay";
+};
+
+&gmac1 {
+ assigned-clocks = <&cru SCLK_GMAC1_RX_TX>, <&cru SCLK_GMAC1>;
+ assigned-clock-parents = <&cru SCLK_GMAC1_RGMII_SPEED>, <&cru CLK_MAC1_2TOP>;
+ clock_in_out = "input";
+ phy-handle = <&rgmii_phy1>;
+ phy-mode = "rgmii-id";
+ phy-supply = <&vcc_3v3>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&gmac1m1_miim
+ &gmac1m1_tx_bus2
+ &gmac1m1_rx_bus2
+ &gmac1m1_rgmii_clk
+ &gmac1m1_rgmii_bus
+ &gmac1m1_clkinout>;
+ status = "okay";
+};
+
+&gpu {
+ mali-supply = <&vdd_gpu>;
+ status = "okay";
+};
+
+&hdmi {
+ avdd-0v9-supply = <&vdda0v9_image>;
+ avdd-1v8-supply = <&vcca1v8_image>;
+ status = "okay";
+};
+
+&hdmi_in {
+ hdmi_in_vp0: endpoint {
+ remote-endpoint = <&vp0_out_hdmi>;
+ };
+};
+
+&hdmi_out {
+ hdmi_out_con: endpoint {
+ remote-endpoint = <&hdmi_con_in>;
+ };
+};
+
+&hdmi_sound {
+ status = "okay";
+};
+
+&i2c0 {
+ status = "okay";
+
+ vdd_cpu: regulator@1c {
+ compatible = "tcs,tcs4525";
+ reg = <0x1c>;
+ fcs,suspend-voltage-selector = <1>;
+ regulator-name = "vdd_cpu";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1150000>;
+ regulator-ramp-delay = <2300>;
+ vin-supply = <&vcc5v0_sys>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ rk809: pmic@20 {
+ compatible = "rockchip,rk809";
+ reg = <0x20>;
+ assigned-clocks = <&cru I2S1_MCLKOUT_TX>;
+ assigned-clock-parents = <&cru CLK_I2S1_8CH_TX>;
+ #clock-cells = <1>;
+ clocks = <&cru I2S1_MCLKOUT_TX>;
+ clock-names = "mclk";
+ clock-output-names = "rk809-clkout1", "rk809-clkout2";
+ interrupt-parent = <&gpio0>;
+ interrupts = <RK_PA3 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pmic_int_l>, <&i2s1m0_mclk>;
+ #sound-dai-cells = <0>;
+ system-power-controller;
+ wakeup-source;
+
+ vcc1-supply = <&vcc3v3_sys>;
+ vcc2-supply = <&vcc3v3_sys>;
+ vcc3-supply = <&vcc3v3_sys>;
+ vcc4-supply = <&vcc3v3_sys>;
+ vcc5-supply = <&vcc3v3_sys>;
+ vcc6-supply = <&vcc3v3_sys>;
+ vcc7-supply = <&vcc3v3_sys>;
+ vcc8-supply = <&vcc3v3_sys>;
+ vcc9-supply = <&vcc3v3_sys>;
+
+ regulators {
+ vdd_logic: DCDC_REG1 {
+ regulator-name = "vdd_logic";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-initial-mode = <0x2>;
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <6001>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_gpu: DCDC_REG2 {
+ regulator-name = "vdd_gpu";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-initial-mode = <0x2>;
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <6001>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_ddr: DCDC_REG3 {
+ regulator-name = "vcc_ddr";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-initial-mode = <0x2>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ };
+ };
+
+ vdd_npu: DCDC_REG4 {
+ regulator-name = "vdd_npu";
+ regulator-initial-mode = <0x2>;
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <6001>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_1v8: DCDC_REG5 {
+ regulator-name = "vcc_1v8";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdda0v9_image: LDO_REG1 {
+ regulator-name = "vdda0v9_image";
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <900000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdda_0v9: LDO_REG2 {
+ regulator-name = "vdda_0v9";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <900000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdda0v9_pmu: LDO_REG3 {
+ regulator-name = "vdda0v9_pmu";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <900000>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <900000>;
+ };
+ };
+
+ vccio_acodec: LDO_REG4 {
+ regulator-name = "vccio_acodec";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vccio_sd: LDO_REG5 {
+ regulator-name = "vccio_sd";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc3v3_pmu: LDO_REG6 {
+ regulator-name = "vcc3v3_pmu";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <3300000>;
+ };
+ };
+
+ vcca_1v8: LDO_REG7 {
+ regulator-name = "vcca_1v8";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcca1v8_pmu: LDO_REG8 {
+ regulator-name = "vcca1v8_pmu";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ vcca1v8_image: LDO_REG9 {
+ regulator-name = "vcca1v8_image";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_3v3: SWITCH_REG1 {
+ regulator-name = "vcc_3v3";
+ regulator-always-on;
+ regulator-boot-on;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc3v3_sd: SWITCH_REG2 {
+ regulator-name = "vcc3v3_sd";
+ regulator-always-on;
+ regulator-boot-on;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+ };
+ };
+};
+
+&i2c5 {
+ status = "okay";
+
+ hym8563: rtc@51 {
+ compatible = "haoyu,hym8563";
+ reg = <0x51>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <RK_PD3 IRQ_TYPE_LEVEL_LOW>;
+ #clock-cells = <0>;
+ clock-output-names = "rtcic_32kout";
+ pinctrl-names = "default";
+ pinctrl-0 = <&rtcic_int_l>;
+ wakeup-source;
+ };
+};
+
+&i2s0_8ch {
+ status = "okay";
+};
+
+&i2s1_8ch {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2s1m0_sclktx
+ &i2s1m0_lrcktx
+ &i2s1m0_sdi0
+ &i2s1m0_sdo0>;
+ rockchip,trcm-sync-tx-only;
+ status = "okay";
+};
+
+&mdio0 {
+ rgmii_phy0: ethernet-phy@1 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <1>;
+ reset-assert-us = <20000>;
+ reset-deassert-us = <50000>;
+ reset-gpios = <&gpio3 RK_PB7 GPIO_ACTIVE_LOW>;
+ };
+};
+
+&mdio1 {
+ rgmii_phy1: ethernet-phy@1 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <1>;
+ reset-assert-us = <20000>;
+ reset-deassert-us = <50000>;
+ reset-gpios = <&gpio3 RK_PB0 GPIO_ACTIVE_LOW>;
+ };
+};
+
+&pcie2x1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie20m1_pins>;
+ reset-gpios = <&gpio3 RK_PC1 GPIO_ACTIVE_HIGH>;
+ vpcie3v3-supply = <&vcc3v3_sys2>;
+ status = "okay";
+};
+
+&pcie30phy {
+ status = "okay";
+};
+
+&pcie3x2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie30x2m1_pins>;
+ reset-gpios = <&gpio2 RK_PD6 GPIO_ACTIVE_HIGH>;
+ status = "okay";
+};
+
+&pinctrl {
+ bluetooth {
+ bt_reg_on_h: bt-reg-on-h {
+ rockchip,pins = <4 RK_PB2 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ bt_wake_host_h: bt-wake-host-h {
+ rockchip,pins = <4 RK_PB4 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ host_wake_bt_h: host-wake-bt-h {
+ rockchip,pins = <4 RK_PB5 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ ir-receiver {
+ pwm3_ir: pwm3-ir {
+ rockchip,pins = <0 RK_PC2 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ leds {
+ led: led {
+ rockchip,pins = <0 RK_PB7 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ pcie {
+ pcie_pwren_h: pcie-pwren-h {
+ rockchip,pins = <0 RK_PD4 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ pcie20 {
+ pcie20m1_pins: pcie20m1-pins {
+ rockchip,pins =
+ <2 RK_PD0 4 &pcfg_pull_none>,
+ <3 RK_PC1 RK_FUNC_GPIO &pcfg_pull_none>,
+ <2 RK_PD1 4 &pcfg_pull_none>;
+ };
+ };
+
+ pcie30x2 {
+ pcie30x2m1_pins: pcie30x2m1-pins {
+ rockchip,pins =
+ <2 RK_PD4 4 &pcfg_pull_none>,
+ <2 RK_PD6 RK_FUNC_GPIO &pcfg_pull_none>,
+ <2 RK_PD5 4 &pcfg_pull_none>;
+ };
+ };
+
+ pmic {
+ pmic_int_l: pmic-int-l {
+ rockchip,pins = <0 RK_PA3 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+
+ rtc {
+ rtcic_int_l: rtcic-int-l {
+ rockchip,pins = <0 RK_PD3 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+
+ usb {
+ usb_host_pwren_h: usb-host-pwren-h {
+ rockchip,pins = <0 RK_PA6 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ usb_otg_pwren_h: usb-otg-pwren-h {
+ rockchip,pins = <0 RK_PA5 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ wifi {
+ wifi_reg_on_h: wifi-reg-on-h {
+ rockchip,pins = <3 RK_PD4 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ wifi_wake_host_h: wifi-wake-host-h {
+ rockchip,pins = <3 RK_PD5 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+};
+
+&pmu_io_domains {
+ pmuio1-supply = <&vcc3v3_pmu>;
+ pmuio2-supply = <&vcc3v3_pmu>;
+ vccio1-supply = <&vccio_acodec>;
+ vccio2-supply = <&vcc_1v8>;
+ vccio3-supply = <&vccio_sd>;
+ vccio4-supply = <&vcc_1v8>;
+ vccio5-supply = <&vcc_3v3>;
+ vccio6-supply = <&vcc_1v8>;
+ vccio7-supply = <&vcc_3v3>;
+ status = "okay";
+};
+
+&saradc {
+ vref-supply = <&vcca_1v8>;
+ status = "okay";
+};
+
+&sdhci {
+ bus-width = <8>;
+ cap-mmc-highspeed;
+ max-frequency = <200000000>;
+ mmc-hs200-1_8v;
+ non-removable;
+ pinctrl-names = "default";
+ pinctrl-0 = <&emmc_bus8 &emmc_clk &emmc_cmd &emmc_datastrobe>;
+ vmmc-supply = <&vcc_3v3>;
+ vqmmc-supply = <&vcc_1v8>;
+ status = "okay";
+};
+
+&sdmmc0 {
+ bus-width = <4>;
+ cap-sd-highspeed;
+ disable-wp;
+ pinctrl-names = "default";
+ pinctrl-0 = <&sdmmc0_bus4 &sdmmc0_clk &sdmmc0_cmd &sdmmc0_det>;
+ vmmc-supply = <&vcc3v3_sd>;
+ vqmmc-supply = <&vccio_sd>;
+ status = "okay";
+};
+
+&sdmmc2 {
+ bus-width = <4>;
+ cap-sd-highspeed;
+ cap-sdio-irq;
+ keep-power-in-suspend;
+ mmc-pwrseq = <&sdio_pwrseq>;
+ non-removable;
+ pinctrl-names = "default";
+ pinctrl-0 = <&sdmmc2m0_bus4 &sdmmc2m0_clk &sdmmc2m0_cmd>;
+ sd-uhs-sdr104;
+ vmmc-supply = <&vcc3v3_sys2>;
+ vqmmc-supply = <&vcc_1v8>;
+ status = "disabled";
+};
+
+&sfc {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "okay";
+
+ flash@0 {
+ compatible = "jedec,spi-nor";
+ reg = <0>;
+ spi-max-frequency = <104000000>;
+ spi-rx-bus-width = <4>;
+ spi-tx-bus-width = <1>;
+ };
+};
+
+&tsadc {
+ rockchip,hw-tshut-mode = <1>;
+ rockchip,hw-tshut-polarity = <0>;
+ status = "okay";
+};
+
+&uart2 {
+ status = "okay";
+};
+
+&uart8 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart8m0_xfer &uart8m0_ctsn &uart8m0_rtsn>;
+ uart-has-rtscts;
+ status = "disabled";
+};
+
+&usb_host0_ehci {
+ status = "okay";
+};
+
+&usb_host0_ohci {
+ status = "okay";
+};
+
+&usb_host0_xhci {
+ extcon = <&usb2phy0>;
+ status = "okay";
+};
+
+&usb_host1_xhci {
+ status = "okay";
+};
+
+&usb2phy0 {
+ status = "okay";
+};
+
+&usb2phy0_host {
+ phy-supply = <&vcc5v0_usb_host>;
+ status = "okay";
+};
+
+&usb2phy0_otg {
+ phy-supply = <&vcc5v0_usb_otg>;
+ status = "okay";
+};
+
+&usb2phy1 {
+ status = "okay";
+};
+
+&usb2phy1_otg {
+ phy-supply = <&vcc5v0_usb_host>;
+ status = "okay";
+};
+
+&vop {
+ assigned-clocks = <&cru DCLK_VOP0>, <&cru DCLK_VOP1>;
+ assigned-clock-parents = <&pmucru PLL_HPLL>, <&cru PLL_VPLL>;
+ status = "okay";
+};
+
+&vop_mmu {
+ status = "okay";
+};
+
+&vp0 {
+ vp0_out_hdmi: endpoint@ROCKCHIP_VOP2_EP_HDMI0 {
+ reg = <ROCKCHIP_VOP2_EP_HDMI0>;
+ remote-endpoint = <&hdmi_in_vp0>;
+ };
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk356x.dtsi b/arch/arm64/boot/dts/rockchip/rk356x.dtsi
index d8543b5557ee..4690be841a1c 100644
--- a/arch/arm64/boot/dts/rockchip/rk356x.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk356x.dtsi
@@ -195,32 +195,32 @@
opp-200000000 {
opp-hz = /bits/ 64 <200000000>;
- opp-microvolt = <825000>;
+ opp-microvolt = <850000 850000 1000000>;
};
opp-300000000 {
opp-hz = /bits/ 64 <300000000>;
- opp-microvolt = <825000>;
+ opp-microvolt = <850000 850000 1000000>;
};
opp-400000000 {
opp-hz = /bits/ 64 <400000000>;
- opp-microvolt = <825000>;
+ opp-microvolt = <850000 850000 1000000>;
};
opp-600000000 {
opp-hz = /bits/ 64 <600000000>;
- opp-microvolt = <825000>;
+ opp-microvolt = <900000 900000 1000000>;
};
opp-700000000 {
opp-hz = /bits/ 64 <700000000>;
- opp-microvolt = <900000>;
+ opp-microvolt = <950000 950000 1000000>;
};
opp-800000000 {
opp-hz = /bits/ 64 <800000000>;
- opp-microvolt = <1000000>;
+ opp-microvolt = <1000000 1000000 1000000>;
};
};
@@ -790,6 +790,7 @@
clocks = <&cru ACLK_VOP>, <&cru HCLK_VOP>;
clock-names = "aclk", "iface";
#iommu-cells = <0>;
+ power-domains = <&power RK3568_PD_VO>;
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-armsom-sige7.dts b/arch/arm64/boot/dts/rockchip/rk3588-armsom-sige7.dts
index 98c622b27647..c667704ba985 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588-armsom-sige7.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3588-armsom-sige7.dts
@@ -673,6 +673,10 @@
};
};
+&tsadc {
+ status = "okay";
+};
+
&u2phy0 {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3588s-pinctrl.dtsi b/arch/arm64/boot/dts/rockchip/rk3588-base-pinctrl.dtsi
index 30db12c4fc82..30db12c4fc82 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588s-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3588-base-pinctrl.dtsi
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-base.dtsi b/arch/arm64/boot/dts/rockchip/rk3588-base.dtsi
new file mode 100644
index 000000000000..b6e4df180f0b
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3588-base.dtsi
@@ -0,0 +1,2799 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2021 Rockchip Electronics Co., Ltd.
+ */
+
+#include <dt-bindings/clock/rockchip,rk3588-cru.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/power/rk3588-power.h>
+#include <dt-bindings/reset/rockchip,rk3588-cru.h>
+#include <dt-bindings/phy/phy.h>
+#include <dt-bindings/ata/ahci.h>
+#include <dt-bindings/thermal/thermal.h>
+
+/ {
+ compatible = "rockchip,rk3588";
+
+ interrupt-parent = <&gic>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ aliases {
+ gpio0 = &gpio0;
+ gpio1 = &gpio1;
+ gpio2 = &gpio2;
+ gpio3 = &gpio3;
+ gpio4 = &gpio4;
+ i2c0 = &i2c0;
+ i2c1 = &i2c1;
+ i2c2 = &i2c2;
+ i2c3 = &i2c3;
+ i2c4 = &i2c4;
+ i2c5 = &i2c5;
+ i2c6 = &i2c6;
+ i2c7 = &i2c7;
+ i2c8 = &i2c8;
+ serial0 = &uart0;
+ serial1 = &uart1;
+ serial2 = &uart2;
+ serial3 = &uart3;
+ serial4 = &uart4;
+ serial5 = &uart5;
+ serial6 = &uart6;
+ serial7 = &uart7;
+ serial8 = &uart8;
+ serial9 = &uart9;
+ spi0 = &spi0;
+ spi1 = &spi1;
+ spi2 = &spi2;
+ spi3 = &spi3;
+ spi4 = &spi4;
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu-map {
+ cluster0 {
+ core0 {
+ cpu = <&cpu_l0>;
+ };
+ core1 {
+ cpu = <&cpu_l1>;
+ };
+ core2 {
+ cpu = <&cpu_l2>;
+ };
+ core3 {
+ cpu = <&cpu_l3>;
+ };
+ };
+ cluster1 {
+ core0 {
+ cpu = <&cpu_b0>;
+ };
+ core1 {
+ cpu = <&cpu_b1>;
+ };
+ };
+ cluster2 {
+ core0 {
+ cpu = <&cpu_b2>;
+ };
+ core1 {
+ cpu = <&cpu_b3>;
+ };
+ };
+ };
+
+ cpu_l0: cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a55";
+ reg = <0x0>;
+ enable-method = "psci";
+ capacity-dmips-mhz = <530>;
+ clocks = <&scmi_clk SCMI_CLK_CPUL>;
+ assigned-clocks = <&scmi_clk SCMI_CLK_CPUL>;
+ assigned-clock-rates = <816000000>;
+ cpu-idle-states = <&CPU_SLEEP>;
+ i-cache-size = <32768>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <128>;
+ d-cache-size = <32768>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <128>;
+ next-level-cache = <&l2_cache_l0>;
+ dynamic-power-coefficient = <228>;
+ #cooling-cells = <2>;
+ };
+
+ cpu_l1: cpu@100 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a55";
+ reg = <0x100>;
+ enable-method = "psci";
+ capacity-dmips-mhz = <530>;
+ clocks = <&scmi_clk SCMI_CLK_CPUL>;
+ cpu-idle-states = <&CPU_SLEEP>;
+ i-cache-size = <32768>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <128>;
+ d-cache-size = <32768>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <128>;
+ next-level-cache = <&l2_cache_l1>;
+ dynamic-power-coefficient = <228>;
+ #cooling-cells = <2>;
+ };
+
+ cpu_l2: cpu@200 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a55";
+ reg = <0x200>;
+ enable-method = "psci";
+ capacity-dmips-mhz = <530>;
+ clocks = <&scmi_clk SCMI_CLK_CPUL>;
+ cpu-idle-states = <&CPU_SLEEP>;
+ i-cache-size = <32768>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <128>;
+ d-cache-size = <32768>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <128>;
+ next-level-cache = <&l2_cache_l2>;
+ dynamic-power-coefficient = <228>;
+ #cooling-cells = <2>;
+ };
+
+ cpu_l3: cpu@300 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a55";
+ reg = <0x300>;
+ enable-method = "psci";
+ capacity-dmips-mhz = <530>;
+ clocks = <&scmi_clk SCMI_CLK_CPUL>;
+ cpu-idle-states = <&CPU_SLEEP>;
+ i-cache-size = <32768>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <128>;
+ d-cache-size = <32768>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <128>;
+ next-level-cache = <&l2_cache_l3>;
+ dynamic-power-coefficient = <228>;
+ #cooling-cells = <2>;
+ };
+
+ cpu_b0: cpu@400 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a76";
+ reg = <0x400>;
+ enable-method = "psci";
+ capacity-dmips-mhz = <1024>;
+ clocks = <&scmi_clk SCMI_CLK_CPUB01>;
+ assigned-clocks = <&scmi_clk SCMI_CLK_CPUB01>;
+ assigned-clock-rates = <816000000>;
+ cpu-idle-states = <&CPU_SLEEP>;
+ i-cache-size = <65536>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <256>;
+ d-cache-size = <65536>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <256>;
+ next-level-cache = <&l2_cache_b0>;
+ dynamic-power-coefficient = <416>;
+ #cooling-cells = <2>;
+ };
+
+ cpu_b1: cpu@500 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a76";
+ reg = <0x500>;
+ enable-method = "psci";
+ capacity-dmips-mhz = <1024>;
+ clocks = <&scmi_clk SCMI_CLK_CPUB01>;
+ cpu-idle-states = <&CPU_SLEEP>;
+ i-cache-size = <65536>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <256>;
+ d-cache-size = <65536>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <256>;
+ next-level-cache = <&l2_cache_b1>;
+ dynamic-power-coefficient = <416>;
+ #cooling-cells = <2>;
+ };
+
+ cpu_b2: cpu@600 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a76";
+ reg = <0x600>;
+ enable-method = "psci";
+ capacity-dmips-mhz = <1024>;
+ clocks = <&scmi_clk SCMI_CLK_CPUB23>;
+ assigned-clocks = <&scmi_clk SCMI_CLK_CPUB23>;
+ assigned-clock-rates = <816000000>;
+ cpu-idle-states = <&CPU_SLEEP>;
+ i-cache-size = <65536>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <256>;
+ d-cache-size = <65536>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <256>;
+ next-level-cache = <&l2_cache_b2>;
+ dynamic-power-coefficient = <416>;
+ #cooling-cells = <2>;
+ };
+
+ cpu_b3: cpu@700 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a76";
+ reg = <0x700>;
+ enable-method = "psci";
+ capacity-dmips-mhz = <1024>;
+ clocks = <&scmi_clk SCMI_CLK_CPUB23>;
+ cpu-idle-states = <&CPU_SLEEP>;
+ i-cache-size = <65536>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <256>;
+ d-cache-size = <65536>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <256>;
+ next-level-cache = <&l2_cache_b3>;
+ dynamic-power-coefficient = <416>;
+ #cooling-cells = <2>;
+ };
+
+ idle-states {
+ entry-method = "psci";
+ CPU_SLEEP: cpu-sleep {
+ compatible = "arm,idle-state";
+ local-timer-stop;
+ arm,psci-suspend-param = <0x0010000>;
+ entry-latency-us = <100>;
+ exit-latency-us = <120>;
+ min-residency-us = <1000>;
+ };
+ };
+
+ l2_cache_l0: l2-cache-l0 {
+ compatible = "cache";
+ cache-size = <131072>;
+ cache-line-size = <64>;
+ cache-sets = <512>;
+ cache-level = <2>;
+ cache-unified;
+ next-level-cache = <&l3_cache>;
+ };
+
+ l2_cache_l1: l2-cache-l1 {
+ compatible = "cache";
+ cache-size = <131072>;
+ cache-line-size = <64>;
+ cache-sets = <512>;
+ cache-level = <2>;
+ cache-unified;
+ next-level-cache = <&l3_cache>;
+ };
+
+ l2_cache_l2: l2-cache-l2 {
+ compatible = "cache";
+ cache-size = <131072>;
+ cache-line-size = <64>;
+ cache-sets = <512>;
+ cache-level = <2>;
+ cache-unified;
+ next-level-cache = <&l3_cache>;
+ };
+
+ l2_cache_l3: l2-cache-l3 {
+ compatible = "cache";
+ cache-size = <131072>;
+ cache-line-size = <64>;
+ cache-sets = <512>;
+ cache-level = <2>;
+ cache-unified;
+ next-level-cache = <&l3_cache>;
+ };
+
+ l2_cache_b0: l2-cache-b0 {
+ compatible = "cache";
+ cache-size = <524288>;
+ cache-line-size = <64>;
+ cache-sets = <1024>;
+ cache-level = <2>;
+ cache-unified;
+ next-level-cache = <&l3_cache>;
+ };
+
+ l2_cache_b1: l2-cache-b1 {
+ compatible = "cache";
+ cache-size = <524288>;
+ cache-line-size = <64>;
+ cache-sets = <1024>;
+ cache-level = <2>;
+ cache-unified;
+ next-level-cache = <&l3_cache>;
+ };
+
+ l2_cache_b2: l2-cache-b2 {
+ compatible = "cache";
+ cache-size = <524288>;
+ cache-line-size = <64>;
+ cache-sets = <1024>;
+ cache-level = <2>;
+ cache-unified;
+ next-level-cache = <&l3_cache>;
+ };
+
+ l2_cache_b3: l2-cache-b3 {
+ compatible = "cache";
+ cache-size = <524288>;
+ cache-line-size = <64>;
+ cache-sets = <1024>;
+ cache-level = <2>;
+ cache-unified;
+ next-level-cache = <&l3_cache>;
+ };
+
+ l3_cache: l3-cache {
+ compatible = "cache";
+ cache-size = <3145728>;
+ cache-line-size = <64>;
+ cache-sets = <4096>;
+ cache-level = <3>;
+ cache-unified;
+ };
+ };
+
+ display_subsystem: display-subsystem {
+ compatible = "rockchip,display-subsystem";
+ ports = <&vop_out>;
+ };
+
+ firmware {
+ optee: optee {
+ compatible = "linaro,optee-tz";
+ method = "smc";
+ };
+
+ scmi: scmi {
+ compatible = "arm,scmi-smc";
+ arm,smc-id = <0x82000010>;
+ shmem = <&scmi_shmem>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ scmi_clk: protocol@14 {
+ reg = <0x14>;
+ #clock-cells = <1>;
+ };
+
+ scmi_reset: protocol@16 {
+ reg = <0x16>;
+ #reset-cells = <1>;
+ };
+ };
+ };
+
+ pmu-a55 {
+ compatible = "arm,cortex-a55-pmu";
+ interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_HIGH &ppi_partition0>;
+ };
+
+ pmu-a76 {
+ compatible = "arm,cortex-a76-pmu";
+ interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_HIGH &ppi_partition1>;
+ };
+
+ psci {
+ compatible = "arm,psci-1.0";
+ method = "smc";
+ };
+
+ spll: clock-0 {
+ compatible = "fixed-clock";
+ clock-frequency = <702000000>;
+ clock-output-names = "spll";
+ #clock-cells = <0>;
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_PPI 14 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_PPI 10 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_PPI 12 IRQ_TYPE_LEVEL_HIGH 0>;
+ interrupt-names = "sec-phys", "phys", "virt", "hyp-phys", "hyp-virt";
+ };
+
+ xin24m: clock-1 {
+ compatible = "fixed-clock";
+ clock-frequency = <24000000>;
+ clock-output-names = "xin24m";
+ #clock-cells = <0>;
+ };
+
+ xin32k: clock-2 {
+ compatible = "fixed-clock";
+ clock-frequency = <32768>;
+ clock-output-names = "xin32k";
+ #clock-cells = <0>;
+ };
+
+ pmu_sram: sram@10f000 {
+ compatible = "mmio-sram";
+ reg = <0x0 0x0010f000 0x0 0x100>;
+ ranges = <0 0x0 0x0010f000 0x100>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ scmi_shmem: sram@0 {
+ compatible = "arm,scmi-shmem";
+ reg = <0x0 0x100>;
+ };
+ };
+
+ gpu: gpu@fb000000 {
+ compatible = "rockchip,rk3588-mali", "arm,mali-valhall-csf";
+ reg = <0x0 0xfb000000 0x0 0x200000>;
+ #cooling-cells = <2>;
+ assigned-clocks = <&scmi_clk SCMI_CLK_GPU>;
+ assigned-clock-rates = <200000000>;
+ clocks = <&cru CLK_GPU>, <&cru CLK_GPU_COREGROUP>,
+ <&cru CLK_GPU_STACKS>;
+ clock-names = "core", "coregroup", "stacks";
+ dynamic-power-coefficient = <2982>;
+ interrupts = <GIC_SPI 92 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 93 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 94 IRQ_TYPE_LEVEL_HIGH 0>;
+ interrupt-names = "job", "mmu", "gpu";
+ power-domains = <&power RK3588_PD_GPU>;
+ status = "disabled";
+ };
+
+ usb_host0_xhci: usb@fc000000 {
+ compatible = "rockchip,rk3588-dwc3", "snps,dwc3";
+ reg = <0x0 0xfc000000 0x0 0x400000>;
+ interrupts = <GIC_SPI 220 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cru REF_CLK_USB3OTG0>, <&cru SUSPEND_CLK_USB3OTG0>,
+ <&cru ACLK_USB3OTG0>;
+ clock-names = "ref_clk", "suspend_clk", "bus_clk";
+ dr_mode = "otg";
+ phys = <&u2phy0_otg>, <&usbdp_phy0 PHY_TYPE_USB3>;
+ phy-names = "usb2-phy", "usb3-phy";
+ phy_type = "utmi_wide";
+ power-domains = <&power RK3588_PD_USB>;
+ resets = <&cru SRST_A_USB3OTG0>;
+ snps,dis_enblslpm_quirk;
+ snps,dis-u1-entry-quirk;
+ snps,dis-u2-entry-quirk;
+ snps,dis-u2-freeclk-exists-quirk;
+ snps,dis-del-phy-power-chg-quirk;
+ snps,dis-tx-ipgap-linecheck-quirk;
+ status = "disabled";
+ };
+
+ usb_host0_ehci: usb@fc800000 {
+ compatible = "rockchip,rk3588-ehci", "generic-ehci";
+ reg = <0x0 0xfc800000 0x0 0x40000>;
+ interrupts = <GIC_SPI 215 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cru HCLK_HOST0>, <&cru HCLK_HOST_ARB0>, <&cru ACLK_USB>, <&u2phy2>;
+ phys = <&u2phy2_host>;
+ phy-names = "usb";
+ power-domains = <&power RK3588_PD_USB>;
+ status = "disabled";
+ };
+
+ usb_host0_ohci: usb@fc840000 {
+ compatible = "rockchip,rk3588-ohci", "generic-ohci";
+ reg = <0x0 0xfc840000 0x0 0x40000>;
+ interrupts = <GIC_SPI 216 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cru HCLK_HOST0>, <&cru HCLK_HOST_ARB0>, <&cru ACLK_USB>, <&u2phy2>;
+ phys = <&u2phy2_host>;
+ phy-names = "usb";
+ power-domains = <&power RK3588_PD_USB>;
+ status = "disabled";
+ };
+
+ usb_host1_ehci: usb@fc880000 {
+ compatible = "rockchip,rk3588-ehci", "generic-ehci";
+ reg = <0x0 0xfc880000 0x0 0x40000>;
+ interrupts = <GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cru HCLK_HOST1>, <&cru HCLK_HOST_ARB1>, <&cru ACLK_USB>, <&u2phy3>;
+ phys = <&u2phy3_host>;
+ phy-names = "usb";
+ power-domains = <&power RK3588_PD_USB>;
+ status = "disabled";
+ };
+
+ usb_host1_ohci: usb@fc8c0000 {
+ compatible = "rockchip,rk3588-ohci", "generic-ohci";
+ reg = <0x0 0xfc8c0000 0x0 0x40000>;
+ interrupts = <GIC_SPI 219 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cru HCLK_HOST1>, <&cru HCLK_HOST_ARB1>, <&cru ACLK_USB>, <&u2phy3>;
+ phys = <&u2phy3_host>;
+ phy-names = "usb";
+ power-domains = <&power RK3588_PD_USB>;
+ status = "disabled";
+ };
+
+ usb_host2_xhci: usb@fcd00000 {
+ compatible = "rockchip,rk3588-dwc3", "snps,dwc3";
+ reg = <0x0 0xfcd00000 0x0 0x400000>;
+ interrupts = <GIC_SPI 222 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cru REF_CLK_USB3OTG2>, <&cru SUSPEND_CLK_USB3OTG2>,
+ <&cru ACLK_USB3OTG2>, <&cru CLK_UTMI_OTG2>,
+ <&cru CLK_PIPEPHY2_PIPE_U3_G>;
+ clock-names = "ref_clk", "suspend_clk", "bus_clk", "utmi", "pipe";
+ dr_mode = "host";
+ phys = <&combphy2_psu PHY_TYPE_USB3>;
+ phy-names = "usb3-phy";
+ phy_type = "utmi_wide";
+ resets = <&cru SRST_A_USB3OTG2>;
+ snps,dis_enblslpm_quirk;
+ snps,dis-u2-freeclk-exists-quirk;
+ snps,dis-del-phy-power-chg-quirk;
+ snps,dis-tx-ipgap-linecheck-quirk;
+ snps,dis_rxdet_inp3_quirk;
+ status = "disabled";
+ };
+
+ mmu600_pcie: iommu@fc900000 {
+ compatible = "arm,smmu-v3";
+ reg = <0x0 0xfc900000 0x0 0x200000>;
+ interrupts = <GIC_SPI 369 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 371 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 374 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 367 IRQ_TYPE_LEVEL_HIGH 0>;
+ interrupt-names = "eventq", "gerror", "priq", "cmdq-sync";
+ #iommu-cells = <1>;
+ status = "disabled";
+ };
+
+ mmu600_php: iommu@fcb00000 {
+ compatible = "arm,smmu-v3";
+ reg = <0x0 0xfcb00000 0x0 0x200000>;
+ interrupts = <GIC_SPI 381 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 383 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 386 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 379 IRQ_TYPE_LEVEL_HIGH 0>;
+ interrupt-names = "eventq", "gerror", "priq", "cmdq-sync";
+ #iommu-cells = <1>;
+ status = "disabled";
+ };
+
+ pmu1grf: syscon@fd58a000 {
+ compatible = "rockchip,rk3588-pmugrf", "syscon", "simple-mfd";
+ reg = <0x0 0xfd58a000 0x0 0x10000>;
+ };
+
+ sys_grf: syscon@fd58c000 {
+ compatible = "rockchip,rk3588-sys-grf", "syscon";
+ reg = <0x0 0xfd58c000 0x0 0x1000>;
+ };
+
+ vop_grf: syscon@fd5a4000 {
+ compatible = "rockchip,rk3588-vop-grf", "syscon";
+ reg = <0x0 0xfd5a4000 0x0 0x2000>;
+ };
+
+ vo0_grf: syscon@fd5a6000 {
+ compatible = "rockchip,rk3588-vo-grf", "syscon";
+ reg = <0x0 0xfd5a6000 0x0 0x2000>;
+ clocks = <&cru PCLK_VO0GRF>;
+ };
+
+ vo1_grf: syscon@fd5a8000 {
+ compatible = "rockchip,rk3588-vo-grf", "syscon";
+ reg = <0x0 0xfd5a8000 0x0 0x100>;
+ clocks = <&cru PCLK_VO1GRF>;
+ };
+
+ usb_grf: syscon@fd5ac000 {
+ compatible = "rockchip,rk3588-usb-grf", "syscon";
+ reg = <0x0 0xfd5ac000 0x0 0x4000>;
+ };
+
+ php_grf: syscon@fd5b0000 {
+ compatible = "rockchip,rk3588-php-grf", "syscon";
+ reg = <0x0 0xfd5b0000 0x0 0x1000>;
+ };
+
+ pipe_phy0_grf: syscon@fd5bc000 {
+ compatible = "rockchip,rk3588-pipe-phy-grf", "syscon";
+ reg = <0x0 0xfd5bc000 0x0 0x100>;
+ };
+
+ pipe_phy2_grf: syscon@fd5c4000 {
+ compatible = "rockchip,rk3588-pipe-phy-grf", "syscon";
+ reg = <0x0 0xfd5c4000 0x0 0x100>;
+ };
+
+ usbdpphy0_grf: syscon@fd5c8000 {
+ compatible = "rockchip,rk3588-usbdpphy-grf", "syscon";
+ reg = <0x0 0xfd5c8000 0x0 0x4000>;
+ };
+
+ usb2phy0_grf: syscon@fd5d0000 {
+ compatible = "rockchip,rk3588-usb2phy-grf", "syscon", "simple-mfd";
+ reg = <0x0 0xfd5d0000 0x0 0x4000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ u2phy0: usb2phy@0 {
+ compatible = "rockchip,rk3588-usb2phy";
+ reg = <0x0 0x10>;
+ #clock-cells = <0>;
+ clocks = <&cru CLK_USB2PHY_HDPTXRXPHY_REF>;
+ clock-names = "phyclk";
+ clock-output-names = "usb480m_phy0";
+ interrupts = <GIC_SPI 393 IRQ_TYPE_LEVEL_HIGH 0>;
+ resets = <&cru SRST_OTGPHY_U3_0>, <&cru SRST_P_USB2PHY_U3_0_GRF0>;
+ reset-names = "phy", "apb";
+ status = "disabled";
+
+ u2phy0_otg: otg-port {
+ #phy-cells = <0>;
+ status = "disabled";
+ };
+ };
+ };
+
+ usb2phy2_grf: syscon@fd5d8000 {
+ compatible = "rockchip,rk3588-usb2phy-grf", "syscon", "simple-mfd";
+ reg = <0x0 0xfd5d8000 0x0 0x4000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ u2phy2: usb2phy@8000 {
+ compatible = "rockchip,rk3588-usb2phy";
+ reg = <0x8000 0x10>;
+ #clock-cells = <0>;
+ clocks = <&cru CLK_USB2PHY_HDPTXRXPHY_REF>;
+ clock-names = "phyclk";
+ clock-output-names = "usb480m_phy2";
+ interrupts = <GIC_SPI 391 IRQ_TYPE_LEVEL_HIGH 0>;
+ resets = <&cru SRST_OTGPHY_U2_0>, <&cru SRST_P_USB2PHY_U2_0_GRF0>;
+ reset-names = "phy", "apb";
+ status = "disabled";
+
+ u2phy2_host: host-port {
+ #phy-cells = <0>;
+ status = "disabled";
+ };
+ };
+ };
+
+ usb2phy3_grf: syscon@fd5dc000 {
+ compatible = "rockchip,rk3588-usb2phy-grf", "syscon", "simple-mfd";
+ reg = <0x0 0xfd5dc000 0x0 0x4000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ u2phy3: usb2phy@c000 {
+ compatible = "rockchip,rk3588-usb2phy";
+ reg = <0xc000 0x10>;
+ #clock-cells = <0>;
+ clocks = <&cru CLK_USB2PHY_HDPTXRXPHY_REF>;
+ clock-names = "phyclk";
+ clock-output-names = "usb480m_phy3";
+ interrupts = <GIC_SPI 392 IRQ_TYPE_LEVEL_HIGH 0>;
+ resets = <&cru SRST_OTGPHY_U2_1>, <&cru SRST_P_USB2PHY_U2_1_GRF0>;
+ reset-names = "phy", "apb";
+ status = "disabled";
+
+ u2phy3_host: host-port {
+ #phy-cells = <0>;
+ status = "disabled";
+ };
+ };
+ };
+
+ hdptxphy0_grf: syscon@fd5e0000 {
+ compatible = "rockchip,rk3588-hdptxphy-grf", "syscon";
+ reg = <0x0 0xfd5e0000 0x0 0x100>;
+ };
+
+ ioc: syscon@fd5f0000 {
+ compatible = "rockchip,rk3588-ioc", "syscon";
+ reg = <0x0 0xfd5f0000 0x0 0x10000>;
+ };
+
+ system_sram1: sram@fd600000 {
+ compatible = "mmio-sram";
+ reg = <0x0 0xfd600000 0x0 0x100000>;
+ ranges = <0x0 0x0 0xfd600000 0x100000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ };
+
+ cru: clock-controller@fd7c0000 {
+ compatible = "rockchip,rk3588-cru";
+ reg = <0x0 0xfd7c0000 0x0 0x5c000>;
+ assigned-clocks =
+ <&cru PLL_PPLL>, <&cru PLL_AUPLL>,
+ <&cru PLL_NPLL>, <&cru PLL_GPLL>,
+ <&cru ACLK_CENTER_ROOT>,
+ <&cru HCLK_CENTER_ROOT>, <&cru ACLK_CENTER_LOW_ROOT>,
+ <&cru ACLK_TOP_ROOT>, <&cru PCLK_TOP_ROOT>,
+ <&cru ACLK_LOW_TOP_ROOT>, <&cru PCLK_PMU0_ROOT>,
+ <&cru HCLK_PMU_CM0_ROOT>, <&cru ACLK_VOP>,
+ <&cru ACLK_BUS_ROOT>, <&cru CLK_150M_SRC>,
+ <&cru CLK_GPU>;
+ assigned-clock-rates =
+ <1100000000>, <786432000>,
+ <850000000>, <1188000000>,
+ <702000000>,
+ <400000000>, <500000000>,
+ <800000000>, <100000000>,
+ <400000000>, <100000000>,
+ <200000000>, <500000000>,
+ <375000000>, <150000000>,
+ <200000000>;
+ rockchip,grf = <&php_grf>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ };
+
+ i2c0: i2c@fd880000 {
+ compatible = "rockchip,rk3588-i2c", "rockchip,rk3399-i2c";
+ reg = <0x0 0xfd880000 0x0 0x1000>;
+ interrupts = <GIC_SPI 317 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cru CLK_I2C0>, <&cru PCLK_I2C0>;
+ clock-names = "i2c", "pclk";
+ pinctrl-0 = <&i2c0m0_xfer>;
+ pinctrl-names = "default";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ uart0: serial@fd890000 {
+ compatible = "rockchip,rk3588-uart", "snps,dw-apb-uart";
+ reg = <0x0 0xfd890000 0x0 0x100>;
+ interrupts = <GIC_SPI 331 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cru SCLK_UART0>, <&cru PCLK_UART0>;
+ clock-names = "baudclk", "apb_pclk";
+ dmas = <&dmac0 6>, <&dmac0 7>;
+ dma-names = "tx", "rx";
+ pinctrl-0 = <&uart0m1_xfer>;
+ pinctrl-names = "default";
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ status = "disabled";
+ };
+
+ pwm0: pwm@fd8b0000 {
+ compatible = "rockchip,rk3588-pwm", "rockchip,rk3328-pwm";
+ reg = <0x0 0xfd8b0000 0x0 0x10>;
+ clocks = <&cru CLK_PMU1PWM>, <&cru PCLK_PMU1PWM>;
+ clock-names = "pwm", "pclk";
+ pinctrl-0 = <&pwm0m0_pins>;
+ pinctrl-names = "default";
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm1: pwm@fd8b0010 {
+ compatible = "rockchip,rk3588-pwm", "rockchip,rk3328-pwm";
+ reg = <0x0 0xfd8b0010 0x0 0x10>;
+ clocks = <&cru CLK_PMU1PWM>, <&cru PCLK_PMU1PWM>;
+ clock-names = "pwm", "pclk";
+ pinctrl-0 = <&pwm1m0_pins>;
+ pinctrl-names = "default";
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm2: pwm@fd8b0020 {
+ compatible = "rockchip,rk3588-pwm", "rockchip,rk3328-pwm";
+ reg = <0x0 0xfd8b0020 0x0 0x10>;
+ clocks = <&cru CLK_PMU1PWM>, <&cru PCLK_PMU1PWM>;
+ clock-names = "pwm", "pclk";
+ pinctrl-0 = <&pwm2m0_pins>;
+ pinctrl-names = "default";
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm3: pwm@fd8b0030 {
+ compatible = "rockchip,rk3588-pwm", "rockchip,rk3328-pwm";
+ reg = <0x0 0xfd8b0030 0x0 0x10>;
+ clocks = <&cru CLK_PMU1PWM>, <&cru PCLK_PMU1PWM>;
+ clock-names = "pwm", "pclk";
+ pinctrl-0 = <&pwm3m0_pins>;
+ pinctrl-names = "default";
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pmu: power-management@fd8d8000 {
+ compatible = "rockchip,rk3588-pmu", "syscon", "simple-mfd";
+ reg = <0x0 0xfd8d8000 0x0 0x400>;
+
+ power: power-controller {
+ compatible = "rockchip,rk3588-power-controller";
+ #address-cells = <1>;
+ #power-domain-cells = <1>;
+ #size-cells = <0>;
+ status = "okay";
+
+ /* These power domains are grouped by VD_NPU */
+ power-domain@RK3588_PD_NPU {
+ reg = <RK3588_PD_NPU>;
+ #power-domain-cells = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ power-domain@RK3588_PD_NPUTOP {
+ reg = <RK3588_PD_NPUTOP>;
+ clocks = <&cru HCLK_NPU_ROOT>,
+ <&cru PCLK_NPU_ROOT>,
+ <&cru CLK_NPU_DSU0>,
+ <&cru HCLK_NPU_CM0_ROOT>;
+ pm_qos = <&qos_npu0_mwr>,
+ <&qos_npu0_mro>,
+ <&qos_mcu_npu>;
+ #power-domain-cells = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ power-domain@RK3588_PD_NPU1 {
+ reg = <RK3588_PD_NPU1>;
+ clocks = <&cru HCLK_NPU_ROOT>,
+ <&cru PCLK_NPU_ROOT>,
+ <&cru CLK_NPU_DSU0>;
+ pm_qos = <&qos_npu1>;
+ #power-domain-cells = <0>;
+ };
+ power-domain@RK3588_PD_NPU2 {
+ reg = <RK3588_PD_NPU2>;
+ clocks = <&cru HCLK_NPU_ROOT>,
+ <&cru PCLK_NPU_ROOT>,
+ <&cru CLK_NPU_DSU0>;
+ pm_qos = <&qos_npu2>;
+ #power-domain-cells = <0>;
+ };
+ };
+ };
+ /* These power domains are grouped by VD_GPU */
+ power-domain@RK3588_PD_GPU {
+ reg = <RK3588_PD_GPU>;
+ clocks = <&cru CLK_GPU>,
+ <&cru CLK_GPU_COREGROUP>,
+ <&cru CLK_GPU_STACKS>;
+ pm_qos = <&qos_gpu_m0>,
+ <&qos_gpu_m1>,
+ <&qos_gpu_m2>,
+ <&qos_gpu_m3>;
+ #power-domain-cells = <0>;
+ };
+ /* These power domains are grouped by VD_VCODEC */
+ power-domain@RK3588_PD_VCODEC {
+ reg = <RK3588_PD_VCODEC>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #power-domain-cells = <0>;
+
+ power-domain@RK3588_PD_RKVDEC0 {
+ reg = <RK3588_PD_RKVDEC0>;
+ clocks = <&cru HCLK_RKVDEC0>,
+ <&cru HCLK_VDPU_ROOT>,
+ <&cru ACLK_VDPU_ROOT>,
+ <&cru ACLK_RKVDEC0>,
+ <&cru ACLK_RKVDEC_CCU>;
+ pm_qos = <&qos_rkvdec0>;
+ #power-domain-cells = <0>;
+ };
+ power-domain@RK3588_PD_RKVDEC1 {
+ reg = <RK3588_PD_RKVDEC1>;
+ clocks = <&cru HCLK_RKVDEC1>,
+ <&cru HCLK_VDPU_ROOT>,
+ <&cru ACLK_VDPU_ROOT>,
+ <&cru ACLK_RKVDEC1>;
+ pm_qos = <&qos_rkvdec1>;
+ #power-domain-cells = <0>;
+ };
+ power-domain@RK3588_PD_VENC0 {
+ reg = <RK3588_PD_VENC0>;
+ clocks = <&cru HCLK_RKVENC0>,
+ <&cru ACLK_RKVENC0>;
+ pm_qos = <&qos_rkvenc0_m0ro>,
+ <&qos_rkvenc0_m1ro>,
+ <&qos_rkvenc0_m2wo>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #power-domain-cells = <0>;
+
+ power-domain@RK3588_PD_VENC1 {
+ reg = <RK3588_PD_VENC1>;
+ clocks = <&cru HCLK_RKVENC1>,
+ <&cru HCLK_RKVENC0>,
+ <&cru ACLK_RKVENC0>,
+ <&cru ACLK_RKVENC1>;
+ pm_qos = <&qos_rkvenc1_m0ro>,
+ <&qos_rkvenc1_m1ro>,
+ <&qos_rkvenc1_m2wo>;
+ #power-domain-cells = <0>;
+ };
+ };
+ };
+ /* These power domains are grouped by VD_LOGIC */
+ power-domain@RK3588_PD_VDPU {
+ reg = <RK3588_PD_VDPU>;
+ clocks = <&cru HCLK_VDPU_ROOT>,
+ <&cru ACLK_VDPU_LOW_ROOT>,
+ <&cru ACLK_VDPU_ROOT>,
+ <&cru ACLK_JPEG_DECODER_ROOT>,
+ <&cru ACLK_IEP2P0>,
+ <&cru HCLK_IEP2P0>,
+ <&cru ACLK_JPEG_ENCODER0>,
+ <&cru HCLK_JPEG_ENCODER0>,
+ <&cru ACLK_JPEG_ENCODER1>,
+ <&cru HCLK_JPEG_ENCODER1>,
+ <&cru ACLK_JPEG_ENCODER2>,
+ <&cru HCLK_JPEG_ENCODER2>,
+ <&cru ACLK_JPEG_ENCODER3>,
+ <&cru HCLK_JPEG_ENCODER3>,
+ <&cru ACLK_JPEG_DECODER>,
+ <&cru HCLK_JPEG_DECODER>,
+ <&cru ACLK_RGA2>,
+ <&cru HCLK_RGA2>;
+ pm_qos = <&qos_iep>,
+ <&qos_jpeg_dec>,
+ <&qos_jpeg_enc0>,
+ <&qos_jpeg_enc1>,
+ <&qos_jpeg_enc2>,
+ <&qos_jpeg_enc3>,
+ <&qos_rga2_mro>,
+ <&qos_rga2_mwo>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #power-domain-cells = <0>;
+
+
+ power-domain@RK3588_PD_AV1 {
+ reg = <RK3588_PD_AV1>;
+ clocks = <&cru PCLK_AV1>,
+ <&cru ACLK_AV1>,
+ <&cru HCLK_VDPU_ROOT>;
+ pm_qos = <&qos_av1>;
+ #power-domain-cells = <0>;
+ };
+ power-domain@RK3588_PD_RKVDEC0 {
+ reg = <RK3588_PD_RKVDEC0>;
+ clocks = <&cru HCLK_RKVDEC0>,
+ <&cru HCLK_VDPU_ROOT>,
+ <&cru ACLK_VDPU_ROOT>,
+ <&cru ACLK_RKVDEC0>;
+ pm_qos = <&qos_rkvdec0>;
+ #power-domain-cells = <0>;
+ };
+ power-domain@RK3588_PD_RKVDEC1 {
+ reg = <RK3588_PD_RKVDEC1>;
+ clocks = <&cru HCLK_RKVDEC1>,
+ <&cru HCLK_VDPU_ROOT>,
+ <&cru ACLK_VDPU_ROOT>;
+ pm_qos = <&qos_rkvdec1>;
+ #power-domain-cells = <0>;
+ };
+ power-domain@RK3588_PD_RGA30 {
+ reg = <RK3588_PD_RGA30>;
+ clocks = <&cru ACLK_RGA3_0>,
+ <&cru HCLK_RGA3_0>;
+ pm_qos = <&qos_rga3_0>;
+ #power-domain-cells = <0>;
+ };
+ };
+ power-domain@RK3588_PD_VOP {
+ reg = <RK3588_PD_VOP>;
+ clocks = <&cru PCLK_VOP_ROOT>,
+ <&cru HCLK_VOP_ROOT>,
+ <&cru ACLK_VOP>;
+ pm_qos = <&qos_vop_m0>,
+ <&qos_vop_m1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #power-domain-cells = <0>;
+
+ power-domain@RK3588_PD_VO0 {
+ reg = <RK3588_PD_VO0>;
+ clocks = <&cru PCLK_VO0_ROOT>,
+ <&cru PCLK_VO0_S_ROOT>,
+ <&cru HCLK_VO0_S_ROOT>,
+ <&cru ACLK_VO0_ROOT>,
+ <&cru HCLK_HDCP0>,
+ <&cru ACLK_HDCP0>,
+ <&cru HCLK_VOP_ROOT>;
+ pm_qos = <&qos_hdcp0>;
+ #power-domain-cells = <0>;
+ };
+ };
+ power-domain@RK3588_PD_VO1 {
+ reg = <RK3588_PD_VO1>;
+ clocks = <&cru PCLK_VO1_ROOT>,
+ <&cru PCLK_VO1_S_ROOT>,
+ <&cru HCLK_VO1_S_ROOT>,
+ <&cru HCLK_HDCP1>,
+ <&cru ACLK_HDCP1>,
+ <&cru ACLK_HDMIRX_ROOT>,
+ <&cru HCLK_VO1USB_TOP_ROOT>;
+ pm_qos = <&qos_hdcp1>,
+ <&qos_hdmirx>;
+ #power-domain-cells = <0>;
+ };
+ power-domain@RK3588_PD_VI {
+ reg = <RK3588_PD_VI>;
+ clocks = <&cru HCLK_VI_ROOT>,
+ <&cru PCLK_VI_ROOT>,
+ <&cru HCLK_ISP0>,
+ <&cru ACLK_ISP0>,
+ <&cru HCLK_VICAP>,
+ <&cru ACLK_VICAP>;
+ pm_qos = <&qos_isp0_mro>,
+ <&qos_isp0_mwo>,
+ <&qos_vicap_m0>,
+ <&qos_vicap_m1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #power-domain-cells = <0>;
+
+ power-domain@RK3588_PD_ISP1 {
+ reg = <RK3588_PD_ISP1>;
+ clocks = <&cru HCLK_ISP1>,
+ <&cru ACLK_ISP1>,
+ <&cru HCLK_VI_ROOT>,
+ <&cru PCLK_VI_ROOT>;
+ pm_qos = <&qos_isp1_mwo>,
+ <&qos_isp1_mro>;
+ #power-domain-cells = <0>;
+ };
+ power-domain@RK3588_PD_FEC {
+ reg = <RK3588_PD_FEC>;
+ clocks = <&cru HCLK_FISHEYE0>,
+ <&cru ACLK_FISHEYE0>,
+ <&cru HCLK_FISHEYE1>,
+ <&cru ACLK_FISHEYE1>,
+ <&cru PCLK_VI_ROOT>;
+ pm_qos = <&qos_fisheye0>,
+ <&qos_fisheye1>;
+ #power-domain-cells = <0>;
+ };
+ };
+ power-domain@RK3588_PD_RGA31 {
+ reg = <RK3588_PD_RGA31>;
+ clocks = <&cru HCLK_RGA3_1>,
+ <&cru ACLK_RGA3_1>;
+ pm_qos = <&qos_rga3_1>;
+ #power-domain-cells = <0>;
+ };
+ power-domain@RK3588_PD_USB {
+ reg = <RK3588_PD_USB>;
+ clocks = <&cru PCLK_PHP_ROOT>,
+ <&cru ACLK_USB_ROOT>,
+ <&cru ACLK_USB>,
+ <&cru HCLK_USB_ROOT>,
+ <&cru HCLK_HOST0>,
+ <&cru HCLK_HOST_ARB0>,
+ <&cru HCLK_HOST1>,
+ <&cru HCLK_HOST_ARB1>;
+ pm_qos = <&qos_usb3_0>,
+ <&qos_usb3_1>,
+ <&qos_usb2host_0>,
+ <&qos_usb2host_1>;
+ #power-domain-cells = <0>;
+ };
+ power-domain@RK3588_PD_GMAC {
+ reg = <RK3588_PD_GMAC>;
+ clocks = <&cru PCLK_PHP_ROOT>,
+ <&cru ACLK_PCIE_ROOT>,
+ <&cru ACLK_PHP_ROOT>;
+ #power-domain-cells = <0>;
+ };
+ power-domain@RK3588_PD_PCIE {
+ reg = <RK3588_PD_PCIE>;
+ clocks = <&cru PCLK_PHP_ROOT>,
+ <&cru ACLK_PCIE_ROOT>,
+ <&cru ACLK_PHP_ROOT>;
+ #power-domain-cells = <0>;
+ };
+ power-domain@RK3588_PD_SDIO {
+ reg = <RK3588_PD_SDIO>;
+ clocks = <&cru HCLK_SDIO>,
+ <&cru HCLK_NVM_ROOT>;
+ pm_qos = <&qos_sdio>;
+ #power-domain-cells = <0>;
+ };
+ power-domain@RK3588_PD_AUDIO {
+ reg = <RK3588_PD_AUDIO>;
+ clocks = <&cru HCLK_AUDIO_ROOT>,
+ <&cru PCLK_AUDIO_ROOT>;
+ #power-domain-cells = <0>;
+ };
+ power-domain@RK3588_PD_SDMMC {
+ reg = <RK3588_PD_SDMMC>;
+ pm_qos = <&qos_sdmmc>;
+ #power-domain-cells = <0>;
+ };
+ };
+ };
+
+ av1d: video-codec@fdc70000 {
+ compatible = "rockchip,rk3588-av1-vpu";
+ reg = <0x0 0xfdc70000 0x0 0x800>;
+ interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH 0>;
+ interrupt-names = "vdpu";
+ assigned-clocks = <&cru ACLK_AV1>, <&cru PCLK_AV1>;
+ assigned-clock-rates = <400000000>, <400000000>;
+ clocks = <&cru ACLK_AV1>, <&cru PCLK_AV1>;
+ clock-names = "aclk", "hclk";
+ power-domains = <&power RK3588_PD_AV1>;
+ resets = <&cru SRST_A_AV1>, <&cru SRST_P_AV1>, <&cru SRST_A_AV1_BIU>, <&cru SRST_P_AV1_BIU>;
+ };
+
+ vop: vop@fdd90000 {
+ compatible = "rockchip,rk3588-vop";
+ reg = <0x0 0xfdd90000 0x0 0x4200>, <0x0 0xfdd95000 0x0 0x1000>;
+ reg-names = "vop", "gamma-lut";
+ interrupts = <GIC_SPI 156 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cru ACLK_VOP>,
+ <&cru HCLK_VOP>,
+ <&cru DCLK_VOP0>,
+ <&cru DCLK_VOP1>,
+ <&cru DCLK_VOP2>,
+ <&cru DCLK_VOP3>,
+ <&cru PCLK_VOP_ROOT>;
+ clock-names = "aclk",
+ "hclk",
+ "dclk_vp0",
+ "dclk_vp1",
+ "dclk_vp2",
+ "dclk_vp3",
+ "pclk_vop";
+ iommus = <&vop_mmu>;
+ power-domains = <&power RK3588_PD_VOP>;
+ rockchip,grf = <&sys_grf>;
+ rockchip,vop-grf = <&vop_grf>;
+ rockchip,vo1-grf = <&vo1_grf>;
+ rockchip,pmu = <&pmu>;
+ status = "disabled";
+
+ vop_out: ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ vp0: port@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+ };
+
+ vp1: port@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+ };
+
+ vp2: port@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <2>;
+ };
+
+ vp3: port@3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <3>;
+ };
+ };
+ };
+
+ vop_mmu: iommu@fdd97e00 {
+ compatible = "rockchip,rk3588-iommu", "rockchip,rk3568-iommu";
+ reg = <0x0 0xfdd97e00 0x0 0x100>, <0x0 0xfdd97f00 0x0 0x100>;
+ interrupts = <GIC_SPI 156 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cru ACLK_VOP>, <&cru HCLK_VOP>;
+ clock-names = "aclk", "iface";
+ #iommu-cells = <0>;
+ power-domains = <&power RK3588_PD_VOP>;
+ status = "disabled";
+ };
+
+ i2s4_8ch: i2s@fddc0000 {
+ compatible = "rockchip,rk3588-i2s-tdm";
+ reg = <0x0 0xfddc0000 0x0 0x1000>;
+ interrupts = <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cru MCLK_I2S4_8CH_TX>, <&cru MCLK_I2S4_8CH_TX>, <&cru HCLK_I2S4_8CH>;
+ clock-names = "mclk_tx", "mclk_rx", "hclk";
+ assigned-clocks = <&cru CLK_I2S4_8CH_TX_SRC>;
+ assigned-clock-parents = <&cru PLL_AUPLL>;
+ dmas = <&dmac2 0>;
+ dma-names = "tx";
+ power-domains = <&power RK3588_PD_VO0>;
+ resets = <&cru SRST_M_I2S4_8CH_TX>;
+ reset-names = "tx-m";
+ #sound-dai-cells = <0>;
+ status = "disabled";
+ };
+
+ i2s5_8ch: i2s@fddf0000 {
+ compatible = "rockchip,rk3588-i2s-tdm";
+ reg = <0x0 0xfddf0000 0x0 0x1000>;
+ interrupts = <GIC_SPI 185 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cru MCLK_I2S5_8CH_TX>, <&cru MCLK_I2S5_8CH_TX>, <&cru HCLK_I2S5_8CH>;
+ clock-names = "mclk_tx", "mclk_rx", "hclk";
+ assigned-clocks = <&cru CLK_I2S5_8CH_TX_SRC>;
+ assigned-clock-parents = <&cru PLL_AUPLL>;
+ dmas = <&dmac2 2>;
+ dma-names = "tx";
+ power-domains = <&power RK3588_PD_VO1>;
+ resets = <&cru SRST_M_I2S5_8CH_TX>;
+ reset-names = "tx-m";
+ #sound-dai-cells = <0>;
+ status = "disabled";
+ };
+
+ i2s9_8ch: i2s@fddfc000 {
+ compatible = "rockchip,rk3588-i2s-tdm";
+ reg = <0x0 0xfddfc000 0x0 0x1000>;
+ interrupts = <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cru MCLK_I2S9_8CH_RX>, <&cru MCLK_I2S9_8CH_RX>, <&cru HCLK_I2S9_8CH>;
+ clock-names = "mclk_tx", "mclk_rx", "hclk";
+ assigned-clocks = <&cru CLK_I2S9_8CH_RX_SRC>;
+ assigned-clock-parents = <&cru PLL_AUPLL>;
+ dmas = <&dmac2 23>;
+ dma-names = "rx";
+ power-domains = <&power RK3588_PD_VO1>;
+ resets = <&cru SRST_M_I2S9_8CH_RX>;
+ reset-names = "rx-m";
+ #sound-dai-cells = <0>;
+ status = "disabled";
+ };
+
+ qos_gpu_m0: qos@fdf35000 {
+ compatible = "rockchip,rk3588-qos", "syscon";
+ reg = <0x0 0xfdf35000 0x0 0x20>;
+ };
+
+ qos_gpu_m1: qos@fdf35200 {
+ compatible = "rockchip,rk3588-qos", "syscon";
+ reg = <0x0 0xfdf35200 0x0 0x20>;
+ };
+
+ qos_gpu_m2: qos@fdf35400 {
+ compatible = "rockchip,rk3588-qos", "syscon";
+ reg = <0x0 0xfdf35400 0x0 0x20>;
+ };
+
+ qos_gpu_m3: qos@fdf35600 {
+ compatible = "rockchip,rk3588-qos", "syscon";
+ reg = <0x0 0xfdf35600 0x0 0x20>;
+ };
+
+ qos_rga3_1: qos@fdf36000 {
+ compatible = "rockchip,rk3588-qos", "syscon";
+ reg = <0x0 0xfdf36000 0x0 0x20>;
+ };
+
+ qos_sdio: qos@fdf39000 {
+ compatible = "rockchip,rk3588-qos", "syscon";
+ reg = <0x0 0xfdf39000 0x0 0x20>;
+ };
+
+ qos_sdmmc: qos@fdf3d800 {
+ compatible = "rockchip,rk3588-qos", "syscon";
+ reg = <0x0 0xfdf3d800 0x0 0x20>;
+ };
+
+ qos_usb3_1: qos@fdf3e000 {
+ compatible = "rockchip,rk3588-qos", "syscon";
+ reg = <0x0 0xfdf3e000 0x0 0x20>;
+ };
+
+ qos_usb3_0: qos@fdf3e200 {
+ compatible = "rockchip,rk3588-qos", "syscon";
+ reg = <0x0 0xfdf3e200 0x0 0x20>;
+ };
+
+ qos_usb2host_0: qos@fdf3e400 {
+ compatible = "rockchip,rk3588-qos", "syscon";
+ reg = <0x0 0xfdf3e400 0x0 0x20>;
+ };
+
+ qos_usb2host_1: qos@fdf3e600 {
+ compatible = "rockchip,rk3588-qos", "syscon";
+ reg = <0x0 0xfdf3e600 0x0 0x20>;
+ };
+
+ qos_fisheye0: qos@fdf40000 {
+ compatible = "rockchip,rk3588-qos", "syscon";
+ reg = <0x0 0xfdf40000 0x0 0x20>;
+ };
+
+ qos_fisheye1: qos@fdf40200 {
+ compatible = "rockchip,rk3588-qos", "syscon";
+ reg = <0x0 0xfdf40200 0x0 0x20>;
+ };
+
+ qos_isp0_mro: qos@fdf40400 {
+ compatible = "rockchip,rk3588-qos", "syscon";
+ reg = <0x0 0xfdf40400 0x0 0x20>;
+ };
+
+ qos_isp0_mwo: qos@fdf40500 {
+ compatible = "rockchip,rk3588-qos", "syscon";
+ reg = <0x0 0xfdf40500 0x0 0x20>;
+ };
+
+ qos_vicap_m0: qos@fdf40600 {
+ compatible = "rockchip,rk3588-qos", "syscon";
+ reg = <0x0 0xfdf40600 0x0 0x20>;
+ };
+
+ qos_vicap_m1: qos@fdf40800 {
+ compatible = "rockchip,rk3588-qos", "syscon";
+ reg = <0x0 0xfdf40800 0x0 0x20>;
+ };
+
+ qos_isp1_mwo: qos@fdf41000 {
+ compatible = "rockchip,rk3588-qos", "syscon";
+ reg = <0x0 0xfdf41000 0x0 0x20>;
+ };
+
+ qos_isp1_mro: qos@fdf41100 {
+ compatible = "rockchip,rk3588-qos", "syscon";
+ reg = <0x0 0xfdf41100 0x0 0x20>;
+ };
+
+ qos_rkvenc0_m0ro: qos@fdf60000 {
+ compatible = "rockchip,rk3588-qos", "syscon";
+ reg = <0x0 0xfdf60000 0x0 0x20>;
+ };
+
+ qos_rkvenc0_m1ro: qos@fdf60200 {
+ compatible = "rockchip,rk3588-qos", "syscon";
+ reg = <0x0 0xfdf60200 0x0 0x20>;
+ };
+
+ qos_rkvenc0_m2wo: qos@fdf60400 {
+ compatible = "rockchip,rk3588-qos", "syscon";
+ reg = <0x0 0xfdf60400 0x0 0x20>;
+ };
+
+ qos_rkvenc1_m0ro: qos@fdf61000 {
+ compatible = "rockchip,rk3588-qos", "syscon";
+ reg = <0x0 0xfdf61000 0x0 0x20>;
+ };
+
+ qos_rkvenc1_m1ro: qos@fdf61200 {
+ compatible = "rockchip,rk3588-qos", "syscon";
+ reg = <0x0 0xfdf61200 0x0 0x20>;
+ };
+
+ qos_rkvenc1_m2wo: qos@fdf61400 {
+ compatible = "rockchip,rk3588-qos", "syscon";
+ reg = <0x0 0xfdf61400 0x0 0x20>;
+ };
+
+ qos_rkvdec0: qos@fdf62000 {
+ compatible = "rockchip,rk3588-qos", "syscon";
+ reg = <0x0 0xfdf62000 0x0 0x20>;
+ };
+
+ qos_rkvdec1: qos@fdf63000 {
+ compatible = "rockchip,rk3588-qos", "syscon";
+ reg = <0x0 0xfdf63000 0x0 0x20>;
+ };
+
+ qos_av1: qos@fdf64000 {
+ compatible = "rockchip,rk3588-qos", "syscon";
+ reg = <0x0 0xfdf64000 0x0 0x20>;
+ };
+
+ qos_iep: qos@fdf66000 {
+ compatible = "rockchip,rk3588-qos", "syscon";
+ reg = <0x0 0xfdf66000 0x0 0x20>;
+ };
+
+ qos_jpeg_dec: qos@fdf66200 {
+ compatible = "rockchip,rk3588-qos", "syscon";
+ reg = <0x0 0xfdf66200 0x0 0x20>;
+ };
+
+ qos_jpeg_enc0: qos@fdf66400 {
+ compatible = "rockchip,rk3588-qos", "syscon";
+ reg = <0x0 0xfdf66400 0x0 0x20>;
+ };
+
+ qos_jpeg_enc1: qos@fdf66600 {
+ compatible = "rockchip,rk3588-qos", "syscon";
+ reg = <0x0 0xfdf66600 0x0 0x20>;
+ };
+
+ qos_jpeg_enc2: qos@fdf66800 {
+ compatible = "rockchip,rk3588-qos", "syscon";
+ reg = <0x0 0xfdf66800 0x0 0x20>;
+ };
+
+ qos_jpeg_enc3: qos@fdf66a00 {
+ compatible = "rockchip,rk3588-qos", "syscon";
+ reg = <0x0 0xfdf66a00 0x0 0x20>;
+ };
+
+ qos_rga2_mro: qos@fdf66c00 {
+ compatible = "rockchip,rk3588-qos", "syscon";
+ reg = <0x0 0xfdf66c00 0x0 0x20>;
+ };
+
+ qos_rga2_mwo: qos@fdf66e00 {
+ compatible = "rockchip,rk3588-qos", "syscon";
+ reg = <0x0 0xfdf66e00 0x0 0x20>;
+ };
+
+ qos_rga3_0: qos@fdf67000 {
+ compatible = "rockchip,rk3588-qos", "syscon";
+ reg = <0x0 0xfdf67000 0x0 0x20>;
+ };
+
+ qos_vdpu: qos@fdf67200 {
+ compatible = "rockchip,rk3588-qos", "syscon";
+ reg = <0x0 0xfdf67200 0x0 0x20>;
+ };
+
+ qos_npu1: qos@fdf70000 {
+ compatible = "rockchip,rk3588-qos", "syscon";
+ reg = <0x0 0xfdf70000 0x0 0x20>;
+ };
+
+ qos_npu2: qos@fdf71000 {
+ compatible = "rockchip,rk3588-qos", "syscon";
+ reg = <0x0 0xfdf71000 0x0 0x20>;
+ };
+
+ qos_npu0_mwr: qos@fdf72000 {
+ compatible = "rockchip,rk3588-qos", "syscon";
+ reg = <0x0 0xfdf72000 0x0 0x20>;
+ };
+
+ qos_npu0_mro: qos@fdf72200 {
+ compatible = "rockchip,rk3588-qos", "syscon";
+ reg = <0x0 0xfdf72200 0x0 0x20>;
+ };
+
+ qos_mcu_npu: qos@fdf72400 {
+ compatible = "rockchip,rk3588-qos", "syscon";
+ reg = <0x0 0xfdf72400 0x0 0x20>;
+ };
+
+ qos_hdcp0: qos@fdf80000 {
+ compatible = "rockchip,rk3588-qos", "syscon";
+ reg = <0x0 0xfdf80000 0x0 0x20>;
+ };
+
+ qos_hdcp1: qos@fdf81000 {
+ compatible = "rockchip,rk3588-qos", "syscon";
+ reg = <0x0 0xfdf81000 0x0 0x20>;
+ };
+
+ qos_hdmirx: qos@fdf81200 {
+ compatible = "rockchip,rk3588-qos", "syscon";
+ reg = <0x0 0xfdf81200 0x0 0x20>;
+ };
+
+ qos_vop_m0: qos@fdf82000 {
+ compatible = "rockchip,rk3588-qos", "syscon";
+ reg = <0x0 0xfdf82000 0x0 0x20>;
+ };
+
+ qos_vop_m1: qos@fdf82200 {
+ compatible = "rockchip,rk3588-qos", "syscon";
+ reg = <0x0 0xfdf82200 0x0 0x20>;
+ };
+
+ dfi: dfi@fe060000 {
+ reg = <0x00 0xfe060000 0x00 0x10000>;
+ compatible = "rockchip,rk3588-dfi";
+ interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH 0>;
+ rockchip,pmu = <&pmu1grf>;
+ };
+
+ pcie2x1l1: pcie@fe180000 {
+ compatible = "rockchip,rk3588-pcie", "rockchip,rk3568-pcie";
+ bus-range = <0x30 0x3f>;
+ clocks = <&cru ACLK_PCIE_1L1_MSTR>, <&cru ACLK_PCIE_1L1_SLV>,
+ <&cru ACLK_PCIE_1L1_DBI>, <&cru PCLK_PCIE_1L1>,
+ <&cru CLK_PCIE_AUX3>, <&cru CLK_PCIE1L1_PIPE>;
+ clock-names = "aclk_mst", "aclk_slv",
+ "aclk_dbi", "pclk",
+ "aux", "pipe";
+ device_type = "pci";
+ interrupts = <GIC_SPI 248 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 247 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 246 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 245 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 244 IRQ_TYPE_LEVEL_HIGH 0>;
+ interrupt-names = "sys", "pmc", "msg", "legacy", "err";
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 7>;
+ interrupt-map = <0 0 0 1 &pcie2x1l1_intc 0>,
+ <0 0 0 2 &pcie2x1l1_intc 1>,
+ <0 0 0 3 &pcie2x1l1_intc 2>,
+ <0 0 0 4 &pcie2x1l1_intc 3>;
+ linux,pci-domain = <3>;
+ max-link-speed = <2>;
+ msi-map = <0x3000 &its0 0x3000 0x1000>;
+ num-lanes = <1>;
+ phys = <&combphy2_psu PHY_TYPE_PCIE>;
+ phy-names = "pcie-phy";
+ power-domains = <&power RK3588_PD_PCIE>;
+ ranges = <0x01000000 0x0 0xf3100000 0x0 0xf3100000 0x0 0x00100000>,
+ <0x02000000 0x0 0xf3200000 0x0 0xf3200000 0x0 0x00e00000>,
+ <0x03000000 0x0 0x40000000 0x9 0xc0000000 0x0 0x40000000>;
+ reg = <0xa 0x40c00000 0x0 0x00400000>,
+ <0x0 0xfe180000 0x0 0x00010000>,
+ <0x0 0xf3000000 0x0 0x00100000>;
+ reg-names = "dbi", "apb", "config";
+ resets = <&cru SRST_PCIE3_POWER_UP>, <&cru SRST_P_PCIE3>;
+ reset-names = "pwr", "pipe";
+ #address-cells = <3>;
+ #size-cells = <2>;
+ status = "disabled";
+
+ pcie2x1l1_intc: legacy-interrupt-controller {
+ interrupt-controller;
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_SPI 245 IRQ_TYPE_EDGE_RISING 0>;
+ };
+ };
+
+ pcie2x1l2: pcie@fe190000 {
+ compatible = "rockchip,rk3588-pcie", "rockchip,rk3568-pcie";
+ bus-range = <0x40 0x4f>;
+ clocks = <&cru ACLK_PCIE_1L2_MSTR>, <&cru ACLK_PCIE_1L2_SLV>,
+ <&cru ACLK_PCIE_1L2_DBI>, <&cru PCLK_PCIE_1L2>,
+ <&cru CLK_PCIE_AUX4>, <&cru CLK_PCIE1L2_PIPE>;
+ clock-names = "aclk_mst", "aclk_slv",
+ "aclk_dbi", "pclk",
+ "aux", "pipe";
+ device_type = "pci";
+ interrupts = <GIC_SPI 253 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 252 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 251 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 250 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 249 IRQ_TYPE_LEVEL_HIGH 0>;
+ interrupt-names = "sys", "pmc", "msg", "legacy", "err";
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 7>;
+ interrupt-map = <0 0 0 1 &pcie2x1l2_intc 0>,
+ <0 0 0 2 &pcie2x1l2_intc 1>,
+ <0 0 0 3 &pcie2x1l2_intc 2>,
+ <0 0 0 4 &pcie2x1l2_intc 3>;
+ linux,pci-domain = <4>;
+ max-link-speed = <2>;
+ msi-map = <0x4000 &its0 0x4000 0x1000>;
+ num-lanes = <1>;
+ phys = <&combphy0_ps PHY_TYPE_PCIE>;
+ phy-names = "pcie-phy";
+ power-domains = <&power RK3588_PD_PCIE>;
+ ranges = <0x01000000 0x0 0xf4100000 0x0 0xf4100000 0x0 0x00100000>,
+ <0x02000000 0x0 0xf4200000 0x0 0xf4200000 0x0 0x00e00000>,
+ <0x03000000 0x0 0x40000000 0xa 0x00000000 0x0 0x40000000>;
+ reg = <0xa 0x41000000 0x0 0x00400000>,
+ <0x0 0xfe190000 0x0 0x00010000>,
+ <0x0 0xf4000000 0x0 0x00100000>;
+ reg-names = "dbi", "apb", "config";
+ resets = <&cru SRST_PCIE4_POWER_UP>, <&cru SRST_P_PCIE4>;
+ reset-names = "pwr", "pipe";
+ #address-cells = <3>;
+ #size-cells = <2>;
+ status = "disabled";
+
+ pcie2x1l2_intc: legacy-interrupt-controller {
+ interrupt-controller;
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_SPI 250 IRQ_TYPE_EDGE_RISING 0>;
+ };
+ };
+
+ gmac1: ethernet@fe1c0000 {
+ compatible = "rockchip,rk3588-gmac", "snps,dwmac-4.20a";
+ reg = <0x0 0xfe1c0000 0x0 0x10000>;
+ interrupts = <GIC_SPI 234 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 233 IRQ_TYPE_LEVEL_HIGH 0>;
+ interrupt-names = "macirq", "eth_wake_irq";
+ clocks = <&cru CLK_GMAC_125M>, <&cru CLK_GMAC_50M>,
+ <&cru PCLK_GMAC1>, <&cru ACLK_GMAC1>,
+ <&cru CLK_GMAC1_PTP_REF>;
+ clock-names = "stmmaceth", "clk_mac_ref",
+ "pclk_mac", "aclk_mac",
+ "ptp_ref";
+ power-domains = <&power RK3588_PD_GMAC>;
+ resets = <&cru SRST_A_GMAC1>;
+ reset-names = "stmmaceth";
+ rockchip,grf = <&sys_grf>;
+ rockchip,php-grf = <&php_grf>;
+ snps,axi-config = <&gmac1_stmmac_axi_setup>;
+ snps,mixed-burst;
+ snps,mtl-rx-config = <&gmac1_mtl_rx_setup>;
+ snps,mtl-tx-config = <&gmac1_mtl_tx_setup>;
+ snps,tso;
+ status = "disabled";
+
+ mdio1: mdio {
+ compatible = "snps,dwmac-mdio";
+ #address-cells = <0x1>;
+ #size-cells = <0x0>;
+ };
+
+ gmac1_stmmac_axi_setup: stmmac-axi-config {
+ snps,blen = <0 0 0 0 16 8 4>;
+ snps,wr_osr_lmt = <4>;
+ snps,rd_osr_lmt = <8>;
+ };
+
+ gmac1_mtl_rx_setup: rx-queues-config {
+ snps,rx-queues-to-use = <2>;
+ queue0 {};
+ queue1 {};
+ };
+
+ gmac1_mtl_tx_setup: tx-queues-config {
+ snps,tx-queues-to-use = <2>;
+ queue0 {};
+ queue1 {};
+ };
+ };
+
+ sata0: sata@fe210000 {
+ compatible = "rockchip,rk3588-dwc-ahci", "snps,dwc-ahci";
+ reg = <0 0xfe210000 0 0x1000>;
+ interrupts = <GIC_SPI 273 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cru ACLK_SATA0>, <&cru CLK_PMALIVE0>,
+ <&cru CLK_RXOOB0>, <&cru CLK_PIPEPHY0_REF>,
+ <&cru CLK_PIPEPHY0_PIPE_ASIC_G>;
+ clock-names = "sata", "pmalive", "rxoob", "ref", "asic";
+ ports-implemented = <0x1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+
+ sata-port@0 {
+ reg = <0>;
+ hba-port-cap = <HBA_PORT_FBSCP>;
+ phys = <&combphy0_ps PHY_TYPE_SATA>;
+ phy-names = "sata-phy";
+ snps,rx-ts-max = <32>;
+ snps,tx-ts-max = <32>;
+ };
+ };
+
+ sata2: sata@fe230000 {
+ compatible = "rockchip,rk3588-dwc-ahci", "snps,dwc-ahci";
+ reg = <0 0xfe230000 0 0x1000>;
+ interrupts = <GIC_SPI 275 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cru ACLK_SATA2>, <&cru CLK_PMALIVE2>,
+ <&cru CLK_RXOOB2>, <&cru CLK_PIPEPHY2_REF>,
+ <&cru CLK_PIPEPHY2_PIPE_ASIC_G>;
+ clock-names = "sata", "pmalive", "rxoob", "ref", "asic";
+ ports-implemented = <0x1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+
+ sata-port@0 {
+ reg = <0>;
+ hba-port-cap = <HBA_PORT_FBSCP>;
+ phys = <&combphy2_psu PHY_TYPE_SATA>;
+ phy-names = "sata-phy";
+ snps,rx-ts-max = <32>;
+ snps,tx-ts-max = <32>;
+ };
+ };
+
+ sfc: spi@fe2b0000 {
+ compatible = "rockchip,sfc";
+ reg = <0x0 0xfe2b0000 0x0 0x4000>;
+ interrupts = <GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cru SCLK_SFC>, <&cru HCLK_SFC>;
+ clock-names = "clk_sfc", "hclk_sfc";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ sdmmc: mmc@fe2c0000 {
+ compatible = "rockchip,rk3588-dw-mshc", "rockchip,rk3288-dw-mshc";
+ reg = <0x0 0xfe2c0000 0x0 0x4000>;
+ interrupts = <GIC_SPI 203 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&scmi_clk SCMI_HCLK_SD>, <&scmi_clk SCMI_CCLK_SD>,
+ <&cru SCLK_SDMMC_DRV>, <&cru SCLK_SDMMC_SAMPLE>;
+ clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
+ fifo-depth = <0x100>;
+ max-frequency = <200000000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&sdmmc_clk &sdmmc_cmd &sdmmc_det &sdmmc_bus4>;
+ power-domains = <&power RK3588_PD_SDMMC>;
+ status = "disabled";
+ };
+
+ sdio: mmc@fe2d0000 {
+ compatible = "rockchip,rk3588-dw-mshc", "rockchip,rk3288-dw-mshc";
+ reg = <0x00 0xfe2d0000 0x00 0x4000>;
+ interrupts = <GIC_SPI 204 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cru HCLK_SDIO>, <&cru CCLK_SRC_SDIO>,
+ <&cru SCLK_SDIO_DRV>, <&cru SCLK_SDIO_SAMPLE>;
+ clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
+ fifo-depth = <0x100>;
+ max-frequency = <200000000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&sdiom1_pins>;
+ power-domains = <&power RK3588_PD_SDIO>;
+ status = "disabled";
+ };
+
+ sdhci: mmc@fe2e0000 {
+ compatible = "rockchip,rk3588-dwcmshc";
+ reg = <0x0 0xfe2e0000 0x0 0x10000>;
+ interrupts = <GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH 0>;
+ assigned-clocks = <&cru BCLK_EMMC>, <&cru TMCLK_EMMC>, <&cru CCLK_EMMC>;
+ assigned-clock-rates = <200000000>, <24000000>, <200000000>;
+ clocks = <&cru CCLK_EMMC>, <&cru HCLK_EMMC>,
+ <&cru ACLK_EMMC>, <&cru BCLK_EMMC>,
+ <&cru TMCLK_EMMC>;
+ clock-names = "core", "bus", "axi", "block", "timer";
+ max-frequency = <200000000>;
+ pinctrl-0 = <&emmc_rstnout>, <&emmc_bus8>, <&emmc_clk>,
+ <&emmc_cmd>, <&emmc_data_strobe>;
+ pinctrl-names = "default";
+ resets = <&cru SRST_C_EMMC>, <&cru SRST_H_EMMC>,
+ <&cru SRST_A_EMMC>, <&cru SRST_B_EMMC>,
+ <&cru SRST_T_EMMC>;
+ reset-names = "core", "bus", "axi", "block", "timer";
+ status = "disabled";
+ };
+
+ i2s0_8ch: i2s@fe470000 {
+ compatible = "rockchip,rk3588-i2s-tdm";
+ reg = <0x0 0xfe470000 0x0 0x1000>;
+ interrupts = <GIC_SPI 180 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cru MCLK_I2S0_8CH_TX>, <&cru MCLK_I2S0_8CH_RX>, <&cru HCLK_I2S0_8CH>;
+ clock-names = "mclk_tx", "mclk_rx", "hclk";
+ assigned-clocks = <&cru CLK_I2S0_8CH_TX_SRC>, <&cru CLK_I2S0_8CH_RX_SRC>;
+ assigned-clock-parents = <&cru PLL_AUPLL>, <&cru PLL_AUPLL>;
+ dmas = <&dmac0 0>, <&dmac0 1>;
+ dma-names = "tx", "rx";
+ power-domains = <&power RK3588_PD_AUDIO>;
+ resets = <&cru SRST_M_I2S0_8CH_TX>, <&cru SRST_M_I2S0_8CH_RX>;
+ reset-names = "tx-m", "rx-m";
+ rockchip,trcm-sync-tx-only;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2s0_lrck
+ &i2s0_sclk
+ &i2s0_sdi0
+ &i2s0_sdi1
+ &i2s0_sdi2
+ &i2s0_sdi3
+ &i2s0_sdo0
+ &i2s0_sdo1
+ &i2s0_sdo2
+ &i2s0_sdo3>;
+ #sound-dai-cells = <0>;
+ status = "disabled";
+ };
+
+ i2s1_8ch: i2s@fe480000 {
+ compatible = "rockchip,rk3588-i2s-tdm";
+ reg = <0x0 0xfe480000 0x0 0x1000>;
+ interrupts = <GIC_SPI 181 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cru MCLK_I2S1_8CH_TX>, <&cru MCLK_I2S1_8CH_RX>, <&cru HCLK_I2S1_8CH>;
+ clock-names = "mclk_tx", "mclk_rx", "hclk";
+ dmas = <&dmac0 2>, <&dmac0 3>;
+ dma-names = "tx", "rx";
+ resets = <&cru SRST_M_I2S1_8CH_TX>, <&cru SRST_M_I2S1_8CH_RX>;
+ reset-names = "tx-m", "rx-m";
+ rockchip,trcm-sync-tx-only;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2s1m0_lrck
+ &i2s1m0_sclk
+ &i2s1m0_sdi0
+ &i2s1m0_sdi1
+ &i2s1m0_sdi2
+ &i2s1m0_sdi3
+ &i2s1m0_sdo0
+ &i2s1m0_sdo1
+ &i2s1m0_sdo2
+ &i2s1m0_sdo3>;
+ #sound-dai-cells = <0>;
+ status = "disabled";
+ };
+
+ i2s2_2ch: i2s@fe490000 {
+ compatible = "rockchip,rk3588-i2s", "rockchip,rk3066-i2s";
+ reg = <0x0 0xfe490000 0x0 0x1000>;
+ interrupts = <GIC_SPI 182 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cru MCLK_I2S2_2CH>, <&cru HCLK_I2S2_2CH>;
+ clock-names = "i2s_clk", "i2s_hclk";
+ assigned-clocks = <&cru CLK_I2S2_2CH_SRC>;
+ assigned-clock-parents = <&cru PLL_AUPLL>;
+ dmas = <&dmac1 0>, <&dmac1 1>;
+ dma-names = "tx", "rx";
+ power-domains = <&power RK3588_PD_AUDIO>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2s2m1_lrck
+ &i2s2m1_sclk
+ &i2s2m1_sdi
+ &i2s2m1_sdo>;
+ #sound-dai-cells = <0>;
+ status = "disabled";
+ };
+
+ i2s3_2ch: i2s@fe4a0000 {
+ compatible = "rockchip,rk3588-i2s", "rockchip,rk3066-i2s";
+ reg = <0x0 0xfe4a0000 0x0 0x1000>;
+ interrupts = <GIC_SPI 183 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cru MCLK_I2S3_2CH>, <&cru HCLK_I2S3_2CH>;
+ clock-names = "i2s_clk", "i2s_hclk";
+ assigned-clocks = <&cru CLK_I2S3_2CH_SRC>;
+ assigned-clock-parents = <&cru PLL_AUPLL>;
+ dmas = <&dmac1 2>, <&dmac1 3>;
+ dma-names = "tx", "rx";
+ power-domains = <&power RK3588_PD_AUDIO>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2s3_lrck
+ &i2s3_sclk
+ &i2s3_sdi
+ &i2s3_sdo>;
+ #sound-dai-cells = <0>;
+ status = "disabled";
+ };
+
+ gic: interrupt-controller@fe600000 {
+ compatible = "arm,gic-v3";
+ reg = <0x0 0xfe600000 0 0x10000>, /* GICD */
+ <0x0 0xfe680000 0 0x100000>; /* GICR */
+ interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH 0>;
+ interrupt-controller;
+ mbi-alias = <0x0 0xfe610000>;
+ mbi-ranges = <424 56>;
+ msi-controller;
+ ranges;
+ #address-cells = <2>;
+ #interrupt-cells = <4>;
+ #size-cells = <2>;
+
+ its0: msi-controller@fe640000 {
+ compatible = "arm,gic-v3-its";
+ reg = <0x0 0xfe640000 0x0 0x20000>;
+ msi-controller;
+ #msi-cells = <1>;
+ };
+
+ its1: msi-controller@fe660000 {
+ compatible = "arm,gic-v3-its";
+ reg = <0x0 0xfe660000 0x0 0x20000>;
+ msi-controller;
+ #msi-cells = <1>;
+ };
+
+ ppi-partitions {
+ ppi_partition0: interrupt-partition-0 {
+ affinity = <&cpu_l0 &cpu_l1 &cpu_l2 &cpu_l3>;
+ };
+
+ ppi_partition1: interrupt-partition-1 {
+ affinity = <&cpu_b0 &cpu_b1 &cpu_b2 &cpu_b3>;
+ };
+ };
+ };
+
+ dmac0: dma-controller@fea10000 {
+ compatible = "arm,pl330", "arm,primecell";
+ reg = <0x0 0xfea10000 0x0 0x4000>;
+ interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 87 IRQ_TYPE_LEVEL_HIGH 0>;
+ arm,pl330-periph-burst;
+ clocks = <&cru ACLK_DMAC0>;
+ clock-names = "apb_pclk";
+ #dma-cells = <1>;
+ };
+
+ dmac1: dma-controller@fea30000 {
+ compatible = "arm,pl330", "arm,primecell";
+ reg = <0x0 0xfea30000 0x0 0x4000>;
+ interrupts = <GIC_SPI 88 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH 0>;
+ arm,pl330-periph-burst;
+ clocks = <&cru ACLK_DMAC1>;
+ clock-names = "apb_pclk";
+ #dma-cells = <1>;
+ };
+
+ i2c1: i2c@fea90000 {
+ compatible = "rockchip,rk3588-i2c", "rockchip,rk3399-i2c";
+ reg = <0x0 0xfea90000 0x0 0x1000>;
+ clocks = <&cru CLK_I2C1>, <&cru PCLK_I2C1>;
+ clock-names = "i2c", "pclk";
+ interrupts = <GIC_SPI 318 IRQ_TYPE_LEVEL_HIGH 0>;
+ pinctrl-0 = <&i2c1m0_xfer>;
+ pinctrl-names = "default";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c2: i2c@feaa0000 {
+ compatible = "rockchip,rk3588-i2c", "rockchip,rk3399-i2c";
+ reg = <0x0 0xfeaa0000 0x0 0x1000>;
+ clocks = <&cru CLK_I2C2>, <&cru PCLK_I2C2>;
+ clock-names = "i2c", "pclk";
+ interrupts = <GIC_SPI 319 IRQ_TYPE_LEVEL_HIGH 0>;
+ pinctrl-0 = <&i2c2m0_xfer>;
+ pinctrl-names = "default";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c3: i2c@feab0000 {
+ compatible = "rockchip,rk3588-i2c", "rockchip,rk3399-i2c";
+ reg = <0x0 0xfeab0000 0x0 0x1000>;
+ clocks = <&cru CLK_I2C3>, <&cru PCLK_I2C3>;
+ clock-names = "i2c", "pclk";
+ interrupts = <GIC_SPI 320 IRQ_TYPE_LEVEL_HIGH 0>;
+ pinctrl-0 = <&i2c3m0_xfer>;
+ pinctrl-names = "default";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c4: i2c@feac0000 {
+ compatible = "rockchip,rk3588-i2c", "rockchip,rk3399-i2c";
+ reg = <0x0 0xfeac0000 0x0 0x1000>;
+ clocks = <&cru CLK_I2C4>, <&cru PCLK_I2C4>;
+ clock-names = "i2c", "pclk";
+ interrupts = <GIC_SPI 321 IRQ_TYPE_LEVEL_HIGH 0>;
+ pinctrl-0 = <&i2c4m0_xfer>;
+ pinctrl-names = "default";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c5: i2c@fead0000 {
+ compatible = "rockchip,rk3588-i2c", "rockchip,rk3399-i2c";
+ reg = <0x0 0xfead0000 0x0 0x1000>;
+ clocks = <&cru CLK_I2C5>, <&cru PCLK_I2C5>;
+ clock-names = "i2c", "pclk";
+ interrupts = <GIC_SPI 322 IRQ_TYPE_LEVEL_HIGH 0>;
+ pinctrl-0 = <&i2c5m0_xfer>;
+ pinctrl-names = "default";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ timer0: timer@feae0000 {
+ compatible = "rockchip,rk3588-timer", "rockchip,rk3288-timer";
+ reg = <0x0 0xfeae0000 0x0 0x20>;
+ interrupts = <GIC_SPI 289 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cru PCLK_BUSTIMER0>, <&cru CLK_BUSTIMER0>;
+ clock-names = "pclk", "timer";
+ };
+
+ wdt: watchdog@feaf0000 {
+ compatible = "rockchip,rk3588-wdt", "snps,dw-wdt";
+ reg = <0x0 0xfeaf0000 0x0 0x100>;
+ clocks = <&cru TCLK_WDT0>, <&cru PCLK_WDT0>;
+ clock-names = "tclk", "pclk";
+ interrupts = <GIC_SPI 315 IRQ_TYPE_LEVEL_HIGH 0>;
+ };
+
+ spi0: spi@feb00000 {
+ compatible = "rockchip,rk3588-spi", "rockchip,rk3066-spi";
+ reg = <0x0 0xfeb00000 0x0 0x1000>;
+ interrupts = <GIC_SPI 326 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cru CLK_SPI0>, <&cru PCLK_SPI0>;
+ clock-names = "spiclk", "apb_pclk";
+ dmas = <&dmac0 14>, <&dmac0 15>;
+ dma-names = "tx", "rx";
+ num-cs = <2>;
+ pinctrl-0 = <&spi0m0_cs0 &spi0m0_cs1 &spi0m0_pins>;
+ pinctrl-names = "default";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ spi1: spi@feb10000 {
+ compatible = "rockchip,rk3588-spi", "rockchip,rk3066-spi";
+ reg = <0x0 0xfeb10000 0x0 0x1000>;
+ interrupts = <GIC_SPI 327 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cru CLK_SPI1>, <&cru PCLK_SPI1>;
+ clock-names = "spiclk", "apb_pclk";
+ dmas = <&dmac0 16>, <&dmac0 17>;
+ dma-names = "tx", "rx";
+ num-cs = <2>;
+ pinctrl-0 = <&spi1m1_cs0 &spi1m1_cs1 &spi1m1_pins>;
+ pinctrl-names = "default";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ spi2: spi@feb20000 {
+ compatible = "rockchip,rk3588-spi", "rockchip,rk3066-spi";
+ reg = <0x0 0xfeb20000 0x0 0x1000>;
+ interrupts = <GIC_SPI 328 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cru CLK_SPI2>, <&cru PCLK_SPI2>;
+ clock-names = "spiclk", "apb_pclk";
+ dmas = <&dmac1 15>, <&dmac1 16>;
+ dma-names = "tx", "rx";
+ num-cs = <2>;
+ pinctrl-0 = <&spi2m2_cs0 &spi2m2_cs1 &spi2m2_pins>;
+ pinctrl-names = "default";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ spi3: spi@feb30000 {
+ compatible = "rockchip,rk3588-spi", "rockchip,rk3066-spi";
+ reg = <0x0 0xfeb30000 0x0 0x1000>;
+ interrupts = <GIC_SPI 329 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cru CLK_SPI3>, <&cru PCLK_SPI3>;
+ clock-names = "spiclk", "apb_pclk";
+ dmas = <&dmac1 17>, <&dmac1 18>;
+ dma-names = "tx", "rx";
+ num-cs = <2>;
+ pinctrl-0 = <&spi3m1_cs0 &spi3m1_cs1 &spi3m1_pins>;
+ pinctrl-names = "default";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ uart1: serial@feb40000 {
+ compatible = "rockchip,rk3588-uart", "snps,dw-apb-uart";
+ reg = <0x0 0xfeb40000 0x0 0x100>;
+ interrupts = <GIC_SPI 332 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cru SCLK_UART1>, <&cru PCLK_UART1>;
+ clock-names = "baudclk", "apb_pclk";
+ dmas = <&dmac0 8>, <&dmac0 9>;
+ dma-names = "tx", "rx";
+ pinctrl-0 = <&uart1m1_xfer>;
+ pinctrl-names = "default";
+ reg-io-width = <4>;
+ reg-shift = <2>;
+ status = "disabled";
+ };
+
+ uart2: serial@feb50000 {
+ compatible = "rockchip,rk3588-uart", "snps,dw-apb-uart";
+ reg = <0x0 0xfeb50000 0x0 0x100>;
+ interrupts = <GIC_SPI 333 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cru SCLK_UART2>, <&cru PCLK_UART2>;
+ clock-names = "baudclk", "apb_pclk";
+ dmas = <&dmac0 10>, <&dmac0 11>;
+ dma-names = "tx", "rx";
+ pinctrl-0 = <&uart2m1_xfer>;
+ pinctrl-names = "default";
+ reg-io-width = <4>;
+ reg-shift = <2>;
+ status = "disabled";
+ };
+
+ uart3: serial@feb60000 {
+ compatible = "rockchip,rk3588-uart", "snps,dw-apb-uart";
+ reg = <0x0 0xfeb60000 0x0 0x100>;
+ interrupts = <GIC_SPI 334 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cru SCLK_UART3>, <&cru PCLK_UART3>;
+ clock-names = "baudclk", "apb_pclk";
+ dmas = <&dmac0 12>, <&dmac0 13>;
+ dma-names = "tx", "rx";
+ pinctrl-0 = <&uart3m1_xfer>;
+ pinctrl-names = "default";
+ reg-io-width = <4>;
+ reg-shift = <2>;
+ status = "disabled";
+ };
+
+ uart4: serial@feb70000 {
+ compatible = "rockchip,rk3588-uart", "snps,dw-apb-uart";
+ reg = <0x0 0xfeb70000 0x0 0x100>;
+ interrupts = <GIC_SPI 335 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cru SCLK_UART4>, <&cru PCLK_UART4>;
+ clock-names = "baudclk", "apb_pclk";
+ dmas = <&dmac1 9>, <&dmac1 10>;
+ dma-names = "tx", "rx";
+ pinctrl-0 = <&uart4m1_xfer>;
+ pinctrl-names = "default";
+ reg-io-width = <4>;
+ reg-shift = <2>;
+ status = "disabled";
+ };
+
+ uart5: serial@feb80000 {
+ compatible = "rockchip,rk3588-uart", "snps,dw-apb-uart";
+ reg = <0x0 0xfeb80000 0x0 0x100>;
+ interrupts = <GIC_SPI 336 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cru SCLK_UART5>, <&cru PCLK_UART5>;
+ clock-names = "baudclk", "apb_pclk";
+ dmas = <&dmac1 11>, <&dmac1 12>;
+ dma-names = "tx", "rx";
+ pinctrl-0 = <&uart5m1_xfer>;
+ pinctrl-names = "default";
+ reg-io-width = <4>;
+ reg-shift = <2>;
+ status = "disabled";
+ };
+
+ uart6: serial@feb90000 {
+ compatible = "rockchip,rk3588-uart", "snps,dw-apb-uart";
+ reg = <0x0 0xfeb90000 0x0 0x100>;
+ interrupts = <GIC_SPI 337 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cru SCLK_UART6>, <&cru PCLK_UART6>;
+ clock-names = "baudclk", "apb_pclk";
+ dmas = <&dmac1 13>, <&dmac1 14>;
+ dma-names = "tx", "rx";
+ pinctrl-0 = <&uart6m1_xfer>;
+ pinctrl-names = "default";
+ reg-io-width = <4>;
+ reg-shift = <2>;
+ status = "disabled";
+ };
+
+ uart7: serial@feba0000 {
+ compatible = "rockchip,rk3588-uart", "snps,dw-apb-uart";
+ reg = <0x0 0xfeba0000 0x0 0x100>;
+ interrupts = <GIC_SPI 338 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cru SCLK_UART7>, <&cru PCLK_UART7>;
+ clock-names = "baudclk", "apb_pclk";
+ dmas = <&dmac2 7>, <&dmac2 8>;
+ dma-names = "tx", "rx";
+ pinctrl-0 = <&uart7m1_xfer>;
+ pinctrl-names = "default";
+ reg-io-width = <4>;
+ reg-shift = <2>;
+ status = "disabled";
+ };
+
+ uart8: serial@febb0000 {
+ compatible = "rockchip,rk3588-uart", "snps,dw-apb-uart";
+ reg = <0x0 0xfebb0000 0x0 0x100>;
+ interrupts = <GIC_SPI 339 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cru SCLK_UART8>, <&cru PCLK_UART8>;
+ clock-names = "baudclk", "apb_pclk";
+ dmas = <&dmac2 9>, <&dmac2 10>;
+ dma-names = "tx", "rx";
+ pinctrl-0 = <&uart8m1_xfer>;
+ pinctrl-names = "default";
+ reg-io-width = <4>;
+ reg-shift = <2>;
+ status = "disabled";
+ };
+
+ uart9: serial@febc0000 {
+ compatible = "rockchip,rk3588-uart", "snps,dw-apb-uart";
+ reg = <0x0 0xfebc0000 0x0 0x100>;
+ interrupts = <GIC_SPI 340 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cru SCLK_UART9>, <&cru PCLK_UART9>;
+ clock-names = "baudclk", "apb_pclk";
+ dmas = <&dmac2 11>, <&dmac2 12>;
+ dma-names = "tx", "rx";
+ pinctrl-0 = <&uart9m1_xfer>;
+ pinctrl-names = "default";
+ reg-io-width = <4>;
+ reg-shift = <2>;
+ status = "disabled";
+ };
+
+ pwm4: pwm@febd0000 {
+ compatible = "rockchip,rk3588-pwm", "rockchip,rk3328-pwm";
+ reg = <0x0 0xfebd0000 0x0 0x10>;
+ clocks = <&cru CLK_PWM1>, <&cru PCLK_PWM1>;
+ clock-names = "pwm", "pclk";
+ pinctrl-0 = <&pwm4m0_pins>;
+ pinctrl-names = "default";
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm5: pwm@febd0010 {
+ compatible = "rockchip,rk3588-pwm", "rockchip,rk3328-pwm";
+ reg = <0x0 0xfebd0010 0x0 0x10>;
+ clocks = <&cru CLK_PWM1>, <&cru PCLK_PWM1>;
+ clock-names = "pwm", "pclk";
+ pinctrl-0 = <&pwm5m0_pins>;
+ pinctrl-names = "default";
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm6: pwm@febd0020 {
+ compatible = "rockchip,rk3588-pwm", "rockchip,rk3328-pwm";
+ reg = <0x0 0xfebd0020 0x0 0x10>;
+ clocks = <&cru CLK_PWM1>, <&cru PCLK_PWM1>;
+ clock-names = "pwm", "pclk";
+ pinctrl-0 = <&pwm6m0_pins>;
+ pinctrl-names = "default";
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm7: pwm@febd0030 {
+ compatible = "rockchip,rk3588-pwm", "rockchip,rk3328-pwm";
+ reg = <0x0 0xfebd0030 0x0 0x10>;
+ clocks = <&cru CLK_PWM1>, <&cru PCLK_PWM1>;
+ clock-names = "pwm", "pclk";
+ pinctrl-0 = <&pwm7m0_pins>;
+ pinctrl-names = "default";
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm8: pwm@febe0000 {
+ compatible = "rockchip,rk3588-pwm", "rockchip,rk3328-pwm";
+ reg = <0x0 0xfebe0000 0x0 0x10>;
+ clocks = <&cru CLK_PWM2>, <&cru PCLK_PWM2>;
+ clock-names = "pwm", "pclk";
+ pinctrl-0 = <&pwm8m0_pins>;
+ pinctrl-names = "default";
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm9: pwm@febe0010 {
+ compatible = "rockchip,rk3588-pwm", "rockchip,rk3328-pwm";
+ reg = <0x0 0xfebe0010 0x0 0x10>;
+ clocks = <&cru CLK_PWM2>, <&cru PCLK_PWM2>;
+ clock-names = "pwm", "pclk";
+ pinctrl-0 = <&pwm9m0_pins>;
+ pinctrl-names = "default";
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm10: pwm@febe0020 {
+ compatible = "rockchip,rk3588-pwm", "rockchip,rk3328-pwm";
+ reg = <0x0 0xfebe0020 0x0 0x10>;
+ clocks = <&cru CLK_PWM2>, <&cru PCLK_PWM2>;
+ clock-names = "pwm", "pclk";
+ pinctrl-0 = <&pwm10m0_pins>;
+ pinctrl-names = "default";
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm11: pwm@febe0030 {
+ compatible = "rockchip,rk3588-pwm", "rockchip,rk3328-pwm";
+ reg = <0x0 0xfebe0030 0x0 0x10>;
+ clocks = <&cru CLK_PWM2>, <&cru PCLK_PWM2>;
+ clock-names = "pwm", "pclk";
+ pinctrl-0 = <&pwm11m0_pins>;
+ pinctrl-names = "default";
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm12: pwm@febf0000 {
+ compatible = "rockchip,rk3588-pwm", "rockchip,rk3328-pwm";
+ reg = <0x0 0xfebf0000 0x0 0x10>;
+ clocks = <&cru CLK_PWM3>, <&cru PCLK_PWM3>;
+ clock-names = "pwm", "pclk";
+ pinctrl-0 = <&pwm12m0_pins>;
+ pinctrl-names = "default";
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm13: pwm@febf0010 {
+ compatible = "rockchip,rk3588-pwm", "rockchip,rk3328-pwm";
+ reg = <0x0 0xfebf0010 0x0 0x10>;
+ clocks = <&cru CLK_PWM3>, <&cru PCLK_PWM3>;
+ clock-names = "pwm", "pclk";
+ pinctrl-0 = <&pwm13m0_pins>;
+ pinctrl-names = "default";
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm14: pwm@febf0020 {
+ compatible = "rockchip,rk3588-pwm", "rockchip,rk3328-pwm";
+ reg = <0x0 0xfebf0020 0x0 0x10>;
+ clocks = <&cru CLK_PWM3>, <&cru PCLK_PWM3>;
+ clock-names = "pwm", "pclk";
+ pinctrl-0 = <&pwm14m0_pins>;
+ pinctrl-names = "default";
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm15: pwm@febf0030 {
+ compatible = "rockchip,rk3588-pwm", "rockchip,rk3328-pwm";
+ reg = <0x0 0xfebf0030 0x0 0x10>;
+ clocks = <&cru CLK_PWM3>, <&cru PCLK_PWM3>;
+ clock-names = "pwm", "pclk";
+ pinctrl-0 = <&pwm15m0_pins>;
+ pinctrl-names = "default";
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ thermal_zones: thermal-zones {
+ /* sensor near the center of the SoC */
+ package_thermal: package-thermal {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsadc 0>;
+
+ trips {
+ package_crit: package-crit {
+ temperature = <115000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+ };
+
+ /* sensor between A76 cores 0 and 1 */
+ bigcore0_thermal: bigcore0-thermal {
+ polling-delay-passive = <100>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsadc 1>;
+
+ trips {
+ bigcore0_alert: bigcore0-alert {
+ temperature = <85000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ bigcore0_crit: bigcore0-crit {
+ temperature = <115000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&bigcore0_alert>;
+ cooling-device =
+ <&cpu_b0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu_b1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+ };
+
+ /* sensor between A76 cores 2 and 3 */
+ bigcore2_thermal: bigcore2-thermal {
+ polling-delay-passive = <100>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsadc 2>;
+
+ trips {
+ bigcore2_alert: bigcore2-alert {
+ temperature = <85000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ bigcore2_crit: bigcore2-crit {
+ temperature = <115000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&bigcore2_alert>;
+ cooling-device =
+ <&cpu_b2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu_b3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+ };
+
+ /* sensor between the four A55 cores */
+ little_core_thermal: littlecore-thermal {
+ polling-delay-passive = <100>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsadc 3>;
+
+ trips {
+ littlecore_alert: littlecore-alert {
+ temperature = <85000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ littlecore_crit: littlecore-crit {
+ temperature = <115000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&littlecore_alert>;
+ cooling-device =
+ <&cpu_l0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu_l1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu_l2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu_l3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+ };
+
+ /* sensor near the PD_CENTER power domain */
+ center_thermal: center-thermal {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsadc 4>;
+
+ trips {
+ center_crit: center-crit {
+ temperature = <115000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+ };
+
+ gpu_thermal: gpu-thermal {
+ polling-delay-passive = <100>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsadc 5>;
+
+ trips {
+ gpu_alert: gpu-alert {
+ temperature = <85000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ gpu_crit: gpu-crit {
+ temperature = <115000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&gpu_alert>;
+ cooling-device =
+ <&gpu THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+ };
+
+ npu_thermal: npu-thermal {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsadc 6>;
+
+ trips {
+ npu_crit: npu-crit {
+ temperature = <115000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+ };
+ };
+
+ tsadc: tsadc@fec00000 {
+ compatible = "rockchip,rk3588-tsadc";
+ reg = <0x0 0xfec00000 0x0 0x400>;
+ interrupts = <GIC_SPI 397 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cru CLK_TSADC>, <&cru PCLK_TSADC>;
+ clock-names = "tsadc", "apb_pclk";
+ assigned-clocks = <&cru CLK_TSADC>;
+ assigned-clock-rates = <2000000>;
+ resets = <&cru SRST_P_TSADC>, <&cru SRST_TSADC>;
+ reset-names = "tsadc-apb", "tsadc";
+ rockchip,hw-tshut-temp = <120000>;
+ rockchip,hw-tshut-mode = <0>; /* tshut mode 0:CRU 1:GPIO */
+ rockchip,hw-tshut-polarity = <0>; /* tshut polarity 0:LOW 1:HIGH */
+ pinctrl-0 = <&tsadc_gpio_func>;
+ pinctrl-1 = <&tsadc_shut>;
+ pinctrl-names = "gpio", "otpout";
+ #thermal-sensor-cells = <1>;
+ status = "disabled";
+ };
+
+ saradc: adc@fec10000 {
+ compatible = "rockchip,rk3588-saradc";
+ reg = <0x0 0xfec10000 0x0 0x10000>;
+ interrupts = <GIC_SPI 398 IRQ_TYPE_LEVEL_HIGH 0>;
+ #io-channel-cells = <1>;
+ clocks = <&cru CLK_SARADC>, <&cru PCLK_SARADC>;
+ clock-names = "saradc", "apb_pclk";
+ resets = <&cru SRST_P_SARADC>;
+ reset-names = "saradc-apb";
+ status = "disabled";
+ };
+
+ i2c6: i2c@fec80000 {
+ compatible = "rockchip,rk3588-i2c", "rockchip,rk3399-i2c";
+ reg = <0x0 0xfec80000 0x0 0x1000>;
+ clocks = <&cru CLK_I2C6>, <&cru PCLK_I2C6>;
+ clock-names = "i2c", "pclk";
+ interrupts = <GIC_SPI 323 IRQ_TYPE_LEVEL_HIGH 0>;
+ pinctrl-0 = <&i2c6m0_xfer>;
+ pinctrl-names = "default";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c7: i2c@fec90000 {
+ compatible = "rockchip,rk3588-i2c", "rockchip,rk3399-i2c";
+ reg = <0x0 0xfec90000 0x0 0x1000>;
+ clocks = <&cru CLK_I2C7>, <&cru PCLK_I2C7>;
+ clock-names = "i2c", "pclk";
+ interrupts = <GIC_SPI 324 IRQ_TYPE_LEVEL_HIGH 0>;
+ pinctrl-0 = <&i2c7m0_xfer>;
+ pinctrl-names = "default";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c8: i2c@feca0000 {
+ compatible = "rockchip,rk3588-i2c", "rockchip,rk3399-i2c";
+ reg = <0x0 0xfeca0000 0x0 0x1000>;
+ clocks = <&cru CLK_I2C8>, <&cru PCLK_I2C8>;
+ clock-names = "i2c", "pclk";
+ interrupts = <GIC_SPI 325 IRQ_TYPE_LEVEL_HIGH 0>;
+ pinctrl-0 = <&i2c8m0_xfer>;
+ pinctrl-names = "default";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ spi4: spi@fecb0000 {
+ compatible = "rockchip,rk3588-spi", "rockchip,rk3066-spi";
+ reg = <0x0 0xfecb0000 0x0 0x1000>;
+ interrupts = <GIC_SPI 330 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cru CLK_SPI4>, <&cru PCLK_SPI4>;
+ clock-names = "spiclk", "apb_pclk";
+ dmas = <&dmac2 13>, <&dmac2 14>;
+ dma-names = "tx", "rx";
+ num-cs = <2>;
+ pinctrl-0 = <&spi4m0_cs0 &spi4m0_cs1 &spi4m0_pins>;
+ pinctrl-names = "default";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ otp: efuse@fecc0000 {
+ compatible = "rockchip,rk3588-otp";
+ reg = <0x0 0xfecc0000 0x0 0x400>;
+ clocks = <&cru CLK_OTPC_NS>, <&cru PCLK_OTPC_NS>,
+ <&cru CLK_OTP_PHY_G>, <&cru CLK_OTPC_ARB>;
+ clock-names = "otp", "apb_pclk", "phy", "arb";
+ resets = <&cru SRST_OTPC_NS>, <&cru SRST_P_OTPC_NS>,
+ <&cru SRST_OTPC_ARB>;
+ reset-names = "otp", "apb", "arb";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ cpu_code: cpu-code@2 {
+ reg = <0x02 0x2>;
+ };
+
+ otp_id: id@7 {
+ reg = <0x07 0x10>;
+ };
+
+ cpub0_leakage: cpu-leakage@17 {
+ reg = <0x17 0x1>;
+ };
+
+ cpub1_leakage: cpu-leakage@18 {
+ reg = <0x18 0x1>;
+ };
+
+ cpul_leakage: cpu-leakage@19 {
+ reg = <0x19 0x1>;
+ };
+
+ log_leakage: log-leakage@1a {
+ reg = <0x1a 0x1>;
+ };
+
+ gpu_leakage: gpu-leakage@1b {
+ reg = <0x1b 0x1>;
+ };
+
+ otp_cpu_version: cpu-version@1c {
+ reg = <0x1c 0x1>;
+ bits = <3 3>;
+ };
+
+ npu_leakage: npu-leakage@28 {
+ reg = <0x28 0x1>;
+ };
+
+ codec_leakage: codec-leakage@29 {
+ reg = <0x29 0x1>;
+ };
+ };
+
+ dmac2: dma-controller@fed10000 {
+ compatible = "arm,pl330", "arm,primecell";
+ reg = <0x0 0xfed10000 0x0 0x4000>;
+ interrupts = <GIC_SPI 90 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 91 IRQ_TYPE_LEVEL_HIGH 0>;
+ arm,pl330-periph-burst;
+ clocks = <&cru ACLK_DMAC2>;
+ clock-names = "apb_pclk";
+ #dma-cells = <1>;
+ };
+
+ hdptxphy_hdmi0: phy@fed60000 {
+ compatible = "rockchip,rk3588-hdptx-phy";
+ reg = <0x0 0xfed60000 0x0 0x2000>;
+ clocks = <&cru CLK_USB2PHY_HDPTXRXPHY_REF>, <&cru PCLK_HDPTX0>;
+ clock-names = "ref", "apb";
+ #phy-cells = <0>;
+ resets = <&cru SRST_HDPTX0>, <&cru SRST_P_HDPTX0>,
+ <&cru SRST_HDPTX0_INIT>, <&cru SRST_HDPTX0_CMN>,
+ <&cru SRST_HDPTX0_LANE>, <&cru SRST_HDPTX0_ROPLL>,
+ <&cru SRST_HDPTX0_LCPLL>;
+ reset-names = "phy", "apb", "init", "cmn", "lane", "ropll",
+ "lcpll";
+ rockchip,grf = <&hdptxphy0_grf>;
+ status = "disabled";
+ };
+
+ usbdp_phy0: phy@fed80000 {
+ compatible = "rockchip,rk3588-usbdp-phy";
+ reg = <0x0 0xfed80000 0x0 0x10000>;
+ #phy-cells = <1>;
+ clocks = <&cru CLK_USBDPPHY_MIPIDCPPHY_REF>,
+ <&cru CLK_USBDP_PHY0_IMMORTAL>,
+ <&cru PCLK_USBDPPHY0>,
+ <&u2phy0>;
+ clock-names = "refclk", "immortal", "pclk", "utmi";
+ resets = <&cru SRST_USBDP_COMBO_PHY0_INIT>,
+ <&cru SRST_USBDP_COMBO_PHY0_CMN>,
+ <&cru SRST_USBDP_COMBO_PHY0_LANE>,
+ <&cru SRST_USBDP_COMBO_PHY0_PCS>,
+ <&cru SRST_P_USBDPPHY0>;
+ reset-names = "init", "cmn", "lane", "pcs_apb", "pma_apb";
+ rockchip,u2phy-grf = <&usb2phy0_grf>;
+ rockchip,usb-grf = <&usb_grf>;
+ rockchip,usbdpphy-grf = <&usbdpphy0_grf>;
+ rockchip,vo-grf = <&vo0_grf>;
+ status = "disabled";
+ };
+
+ combphy0_ps: phy@fee00000 {
+ compatible = "rockchip,rk3588-naneng-combphy";
+ reg = <0x0 0xfee00000 0x0 0x100>;
+ clocks = <&cru CLK_REF_PIPE_PHY0>, <&cru PCLK_PCIE_COMBO_PIPE_PHY0>,
+ <&cru PCLK_PHP_ROOT>;
+ clock-names = "ref", "apb", "pipe";
+ assigned-clocks = <&cru CLK_REF_PIPE_PHY0>;
+ assigned-clock-rates = <100000000>;
+ #phy-cells = <1>;
+ resets = <&cru SRST_REF_PIPE_PHY0>, <&cru SRST_P_PCIE2_PHY0>;
+ reset-names = "phy", "apb";
+ rockchip,pipe-grf = <&php_grf>;
+ rockchip,pipe-phy-grf = <&pipe_phy0_grf>;
+ status = "disabled";
+ };
+
+ combphy2_psu: phy@fee20000 {
+ compatible = "rockchip,rk3588-naneng-combphy";
+ reg = <0x0 0xfee20000 0x0 0x100>;
+ clocks = <&cru CLK_REF_PIPE_PHY2>, <&cru PCLK_PCIE_COMBO_PIPE_PHY2>,
+ <&cru PCLK_PHP_ROOT>;
+ clock-names = "ref", "apb", "pipe";
+ assigned-clocks = <&cru CLK_REF_PIPE_PHY2>;
+ assigned-clock-rates = <100000000>;
+ #phy-cells = <1>;
+ resets = <&cru SRST_REF_PIPE_PHY2>, <&cru SRST_P_PCIE2_PHY2>;
+ reset-names = "phy", "apb";
+ rockchip,pipe-grf = <&php_grf>;
+ rockchip,pipe-phy-grf = <&pipe_phy2_grf>;
+ status = "disabled";
+ };
+
+ system_sram2: sram@ff001000 {
+ compatible = "mmio-sram";
+ reg = <0x0 0xff001000 0x0 0xef000>;
+ ranges = <0x0 0x0 0xff001000 0xef000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ };
+
+ pinctrl: pinctrl {
+ compatible = "rockchip,rk3588-pinctrl";
+ ranges;
+ rockchip,grf = <&ioc>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ gpio0: gpio@fd8a0000 {
+ compatible = "rockchip,gpio-bank";
+ reg = <0x0 0xfd8a0000 0x0 0x100>;
+ interrupts = <GIC_SPI 277 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cru PCLK_GPIO0>, <&cru DBCLK_GPIO0>;
+ gpio-controller;
+ gpio-ranges = <&pinctrl 0 0 32>;
+ interrupt-controller;
+ #gpio-cells = <2>;
+ #interrupt-cells = <2>;
+ };
+
+ gpio1: gpio@fec20000 {
+ compatible = "rockchip,gpio-bank";
+ reg = <0x0 0xfec20000 0x0 0x100>;
+ interrupts = <GIC_SPI 278 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cru PCLK_GPIO1>, <&cru DBCLK_GPIO1>;
+ gpio-controller;
+ gpio-ranges = <&pinctrl 0 32 32>;
+ interrupt-controller;
+ #gpio-cells = <2>;
+ #interrupt-cells = <2>;
+ };
+
+ gpio2: gpio@fec30000 {
+ compatible = "rockchip,gpio-bank";
+ reg = <0x0 0xfec30000 0x0 0x100>;
+ interrupts = <GIC_SPI 279 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cru PCLK_GPIO2>, <&cru DBCLK_GPIO2>;
+ gpio-controller;
+ gpio-ranges = <&pinctrl 0 64 32>;
+ interrupt-controller;
+ #gpio-cells = <2>;
+ #interrupt-cells = <2>;
+ };
+
+ gpio3: gpio@fec40000 {
+ compatible = "rockchip,gpio-bank";
+ reg = <0x0 0xfec40000 0x0 0x100>;
+ interrupts = <GIC_SPI 280 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cru PCLK_GPIO3>, <&cru DBCLK_GPIO3>;
+ gpio-controller;
+ gpio-ranges = <&pinctrl 0 96 32>;
+ interrupt-controller;
+ #gpio-cells = <2>;
+ #interrupt-cells = <2>;
+ };
+
+ gpio4: gpio@fec50000 {
+ compatible = "rockchip,gpio-bank";
+ reg = <0x0 0xfec50000 0x0 0x100>;
+ interrupts = <GIC_SPI 281 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cru PCLK_GPIO4>, <&cru DBCLK_GPIO4>;
+ gpio-controller;
+ gpio-ranges = <&pinctrl 0 128 32>;
+ interrupt-controller;
+ #gpio-cells = <2>;
+ #interrupt-cells = <2>;
+ };
+ };
+};
+
+#include "rk3588-base-pinctrl.dtsi"
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-edgeble-neu6a-common.dtsi b/arch/arm64/boot/dts/rockchip/rk3588-edgeble-neu6a-common.dtsi
index 709d348cf06b..03fd193be253 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588-edgeble-neu6a-common.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3588-edgeble-neu6a-common.dtsi
@@ -466,3 +466,7 @@
};
};
};
+
+&tsadc {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-evb1-v10.dts b/arch/arm64/boot/dts/rockchip/rk3588-evb1-v10.dts
index 7be2190244ba..00f660d50127 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588-evb1-v10.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3588-evb1-v10.dts
@@ -878,6 +878,8 @@
vdd_cpu_big1_s0: dcdc-reg1 {
regulator-always-on;
regulator-boot-on;
+ regulator-coupled-with = <&vdd_cpu_big1_mem_s0>;
+ regulator-coupled-max-spread = <10000>;
regulator-min-microvolt = <550000>;
regulator-max-microvolt = <1050000>;
regulator-ramp-delay = <12500>;
@@ -890,6 +892,8 @@
vdd_cpu_big0_s0: dcdc-reg2 {
regulator-always-on;
regulator-boot-on;
+ regulator-coupled-with = <&vdd_cpu_big0_mem_s0>;
+ regulator-coupled-max-spread = <10000>;
regulator-min-microvolt = <550000>;
regulator-max-microvolt = <1050000>;
regulator-ramp-delay = <12500>;
@@ -902,6 +906,8 @@
vdd_cpu_lit_s0: dcdc-reg3 {
regulator-always-on;
regulator-boot-on;
+ regulator-coupled-with = <&vdd_cpu_lit_mem_s0>;
+ regulator-coupled-max-spread = <10000>;
regulator-min-microvolt = <550000>;
regulator-max-microvolt = <950000>;
regulator-ramp-delay = <12500>;
@@ -926,6 +932,8 @@
vdd_cpu_big1_mem_s0: dcdc-reg5 {
regulator-always-on;
regulator-boot-on;
+ regulator-coupled-with = <&vdd_cpu_big1_s0>;
+ regulator-coupled-max-spread = <10000>;
regulator-min-microvolt = <675000>;
regulator-max-microvolt = <1050000>;
regulator-ramp-delay = <12500>;
@@ -939,6 +947,8 @@
vdd_cpu_big0_mem_s0: dcdc-reg6 {
regulator-always-on;
regulator-boot-on;
+ regulator-coupled-with = <&vdd_cpu_big0_s0>;
+ regulator-coupled-max-spread = <10000>;
regulator-min-microvolt = <675000>;
regulator-max-microvolt = <1050000>;
regulator-ramp-delay = <12500>;
@@ -963,6 +973,8 @@
vdd_cpu_lit_mem_s0: dcdc-reg8 {
regulator-always-on;
regulator-boot-on;
+ regulator-coupled-with = <&vdd_cpu_lit_s0>;
+ regulator-coupled-max-spread = <10000>;
regulator-min-microvolt = <675000>;
regulator-max-microvolt = <950000>;
regulator-ramp-delay = <12500>;
@@ -1131,6 +1143,10 @@
status = "okay";
};
+&tsadc {
+ status = "okay";
+};
+
&u2phy0 {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-pinctrl.dtsi b/arch/arm64/boot/dts/rockchip/rk3588-extra-pinctrl.dtsi
index 244c66faa161..244c66faa161 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3588-extra-pinctrl.dtsi
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-extra.dtsi b/arch/arm64/boot/dts/rockchip/rk3588-extra.dtsi
new file mode 100644
index 000000000000..0ce0934ec6b7
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3588-extra.dtsi
@@ -0,0 +1,448 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2021 Rockchip Electronics Co., Ltd.
+ */
+
+#include "rk3588-base.dtsi"
+#include "rk3588-extra-pinctrl.dtsi"
+
+/ {
+ usb_host1_xhci: usb@fc400000 {
+ compatible = "rockchip,rk3588-dwc3", "snps,dwc3";
+ reg = <0x0 0xfc400000 0x0 0x400000>;
+ interrupts = <GIC_SPI 221 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cru REF_CLK_USB3OTG1>, <&cru SUSPEND_CLK_USB3OTG1>,
+ <&cru ACLK_USB3OTG1>;
+ clock-names = "ref_clk", "suspend_clk", "bus_clk";
+ dr_mode = "otg";
+ phys = <&u2phy1_otg>, <&usbdp_phy1 PHY_TYPE_USB3>;
+ phy-names = "usb2-phy", "usb3-phy";
+ phy_type = "utmi_wide";
+ power-domains = <&power RK3588_PD_USB>;
+ resets = <&cru SRST_A_USB3OTG1>;
+ snps,dis_enblslpm_quirk;
+ snps,dis-u2-freeclk-exists-quirk;
+ snps,dis-del-phy-power-chg-quirk;
+ snps,dis-tx-ipgap-linecheck-quirk;
+ status = "disabled";
+ };
+
+ pcie30_phy_grf: syscon@fd5b8000 {
+ compatible = "rockchip,rk3588-pcie3-phy-grf", "syscon";
+ reg = <0x0 0xfd5b8000 0x0 0x10000>;
+ };
+
+ pipe_phy1_grf: syscon@fd5c0000 {
+ compatible = "rockchip,rk3588-pipe-phy-grf", "syscon";
+ reg = <0x0 0xfd5c0000 0x0 0x100>;
+ };
+
+ usbdpphy1_grf: syscon@fd5cc000 {
+ compatible = "rockchip,rk3588-usbdpphy-grf", "syscon";
+ reg = <0x0 0xfd5cc000 0x0 0x4000>;
+ };
+
+ usb2phy1_grf: syscon@fd5d4000 {
+ compatible = "rockchip,rk3588-usb2phy-grf", "syscon", "simple-mfd";
+ reg = <0x0 0xfd5d4000 0x0 0x4000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ u2phy1: usb2phy@4000 {
+ compatible = "rockchip,rk3588-usb2phy";
+ reg = <0x4000 0x10>;
+ #clock-cells = <0>;
+ clocks = <&cru CLK_USB2PHY_HDPTXRXPHY_REF>;
+ clock-names = "phyclk";
+ clock-output-names = "usb480m_phy1";
+ interrupts = <GIC_SPI 394 IRQ_TYPE_LEVEL_HIGH 0>;
+ resets = <&cru SRST_OTGPHY_U3_1>, <&cru SRST_P_USB2PHY_U3_1_GRF0>;
+ reset-names = "phy", "apb";
+ status = "disabled";
+
+ u2phy1_otg: otg-port {
+ #phy-cells = <0>;
+ status = "disabled";
+ };
+ };
+ };
+
+ i2s8_8ch: i2s@fddc8000 {
+ compatible = "rockchip,rk3588-i2s-tdm";
+ reg = <0x0 0xfddc8000 0x0 0x1000>;
+ interrupts = <GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cru MCLK_I2S8_8CH_TX>, <&cru MCLK_I2S8_8CH_TX>, <&cru HCLK_I2S8_8CH>;
+ clock-names = "mclk_tx", "mclk_rx", "hclk";
+ assigned-clocks = <&cru CLK_I2S8_8CH_TX_SRC>;
+ assigned-clock-parents = <&cru PLL_AUPLL>;
+ dmas = <&dmac2 22>;
+ dma-names = "tx";
+ power-domains = <&power RK3588_PD_VO0>;
+ resets = <&cru SRST_M_I2S8_8CH_TX>;
+ reset-names = "tx-m";
+ #sound-dai-cells = <0>;
+ status = "disabled";
+ };
+
+ i2s6_8ch: i2s@fddf4000 {
+ compatible = "rockchip,rk3588-i2s-tdm";
+ reg = <0x0 0xfddf4000 0x0 0x1000>;
+ interrupts = <GIC_SPI 186 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cru MCLK_I2S6_8CH_TX>, <&cru MCLK_I2S6_8CH_TX>, <&cru HCLK_I2S6_8CH>;
+ clock-names = "mclk_tx", "mclk_rx", "hclk";
+ assigned-clocks = <&cru CLK_I2S6_8CH_TX_SRC>;
+ assigned-clock-parents = <&cru PLL_AUPLL>;
+ dmas = <&dmac2 4>;
+ dma-names = "tx";
+ power-domains = <&power RK3588_PD_VO1>;
+ resets = <&cru SRST_M_I2S6_8CH_TX>;
+ reset-names = "tx-m";
+ #sound-dai-cells = <0>;
+ status = "disabled";
+ };
+
+ i2s7_8ch: i2s@fddf8000 {
+ compatible = "rockchip,rk3588-i2s-tdm";
+ reg = <0x0 0xfddf8000 0x0 0x1000>;
+ interrupts = <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cru MCLK_I2S7_8CH_RX>, <&cru MCLK_I2S7_8CH_RX>, <&cru HCLK_I2S7_8CH>;
+ clock-names = "mclk_tx", "mclk_rx", "hclk";
+ assigned-clocks = <&cru CLK_I2S7_8CH_RX_SRC>;
+ assigned-clock-parents = <&cru PLL_AUPLL>;
+ dmas = <&dmac2 21>;
+ dma-names = "rx";
+ power-domains = <&power RK3588_PD_VO1>;
+ resets = <&cru SRST_M_I2S7_8CH_RX>;
+ reset-names = "rx-m";
+ #sound-dai-cells = <0>;
+ status = "disabled";
+ };
+
+ i2s10_8ch: i2s@fde00000 {
+ compatible = "rockchip,rk3588-i2s-tdm";
+ reg = <0x0 0xfde00000 0x0 0x1000>;
+ interrupts = <GIC_SPI 190 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cru MCLK_I2S10_8CH_RX>, <&cru MCLK_I2S10_8CH_RX>, <&cru HCLK_I2S10_8CH>;
+ clock-names = "mclk_tx", "mclk_rx", "hclk";
+ assigned-clocks = <&cru CLK_I2S10_8CH_RX_SRC>;
+ assigned-clock-parents = <&cru PLL_AUPLL>;
+ dmas = <&dmac2 24>;
+ dma-names = "rx";
+ power-domains = <&power RK3588_PD_VO1>;
+ resets = <&cru SRST_M_I2S10_8CH_RX>;
+ reset-names = "rx-m";
+ #sound-dai-cells = <0>;
+ status = "disabled";
+ };
+
+ pcie3x4: pcie@fe150000 {
+ compatible = "rockchip,rk3588-pcie", "rockchip,rk3568-pcie";
+ #address-cells = <3>;
+ #size-cells = <2>;
+ bus-range = <0x00 0x0f>;
+ clocks = <&cru ACLK_PCIE_4L_MSTR>, <&cru ACLK_PCIE_4L_SLV>,
+ <&cru ACLK_PCIE_4L_DBI>, <&cru PCLK_PCIE_4L>,
+ <&cru CLK_PCIE_AUX0>, <&cru CLK_PCIE4L_PIPE>;
+ clock-names = "aclk_mst", "aclk_slv",
+ "aclk_dbi", "pclk",
+ "aux", "pipe";
+ device_type = "pci";
+ interrupts = <GIC_SPI 263 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 262 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 261 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 260 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 259 IRQ_TYPE_LEVEL_HIGH 0>;
+ interrupt-names = "sys", "pmc", "msg", "legacy", "err";
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 7>;
+ interrupt-map = <0 0 0 1 &pcie3x4_intc 0>,
+ <0 0 0 2 &pcie3x4_intc 1>,
+ <0 0 0 3 &pcie3x4_intc 2>,
+ <0 0 0 4 &pcie3x4_intc 3>;
+ linux,pci-domain = <0>;
+ max-link-speed = <3>;
+ msi-map = <0x0000 &its1 0x0000 0x1000>;
+ num-lanes = <4>;
+ phys = <&pcie30phy>;
+ phy-names = "pcie-phy";
+ power-domains = <&power RK3588_PD_PCIE>;
+ ranges = <0x01000000 0x0 0xf0100000 0x0 0xf0100000 0x0 0x00100000>,
+ <0x02000000 0x0 0xf0200000 0x0 0xf0200000 0x0 0x00e00000>,
+ <0x03000000 0x0 0x40000000 0x9 0x00000000 0x0 0x40000000>;
+ reg = <0xa 0x40000000 0x0 0x00400000>,
+ <0x0 0xfe150000 0x0 0x00010000>,
+ <0x0 0xf0000000 0x0 0x00100000>;
+ reg-names = "dbi", "apb", "config";
+ resets = <&cru SRST_PCIE0_POWER_UP>, <&cru SRST_P_PCIE0>;
+ reset-names = "pwr", "pipe";
+ status = "disabled";
+
+ pcie3x4_intc: legacy-interrupt-controller {
+ interrupt-controller;
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_SPI 260 IRQ_TYPE_EDGE_RISING 0>;
+ };
+ };
+
+ pcie3x4_ep: pcie-ep@fe150000 {
+ compatible = "rockchip,rk3588-pcie-ep";
+ reg = <0xa 0x40000000 0x0 0x00100000>,
+ <0xa 0x40100000 0x0 0x00100000>,
+ <0x0 0xfe150000 0x0 0x00010000>,
+ <0x9 0x00000000 0x0 0x40000000>,
+ <0xa 0x40300000 0x0 0x00100000>;
+ reg-names = "dbi", "dbi2", "apb", "addr_space", "atu";
+ clocks = <&cru ACLK_PCIE_4L_MSTR>, <&cru ACLK_PCIE_4L_SLV>,
+ <&cru ACLK_PCIE_4L_DBI>, <&cru PCLK_PCIE_4L>,
+ <&cru CLK_PCIE_AUX0>, <&cru CLK_PCIE4L_PIPE>;
+ clock-names = "aclk_mst", "aclk_slv",
+ "aclk_dbi", "pclk",
+ "aux", "pipe";
+ interrupts = <GIC_SPI 263 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 262 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 261 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 260 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 259 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 271 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 272 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 269 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 270 IRQ_TYPE_LEVEL_HIGH 0>;
+ interrupt-names = "sys", "pmc", "msg", "legacy", "err",
+ "dma0", "dma1", "dma2", "dma3";
+ max-link-speed = <3>;
+ num-lanes = <4>;
+ phys = <&pcie30phy>;
+ phy-names = "pcie-phy";
+ power-domains = <&power RK3588_PD_PCIE>;
+ resets = <&cru SRST_PCIE0_POWER_UP>, <&cru SRST_P_PCIE0>;
+ reset-names = "pwr", "pipe";
+ status = "disabled";
+ };
+
+ pcie3x2: pcie@fe160000 {
+ compatible = "rockchip,rk3588-pcie", "rockchip,rk3568-pcie";
+ #address-cells = <3>;
+ #size-cells = <2>;
+ bus-range = <0x10 0x1f>;
+ clocks = <&cru ACLK_PCIE_2L_MSTR>, <&cru ACLK_PCIE_2L_SLV>,
+ <&cru ACLK_PCIE_2L_DBI>, <&cru PCLK_PCIE_2L>,
+ <&cru CLK_PCIE_AUX1>, <&cru CLK_PCIE2L_PIPE>;
+ clock-names = "aclk_mst", "aclk_slv",
+ "aclk_dbi", "pclk",
+ "aux", "pipe";
+ device_type = "pci";
+ interrupts = <GIC_SPI 258 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 257 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 255 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 254 IRQ_TYPE_LEVEL_HIGH 0>;
+ interrupt-names = "sys", "pmc", "msg", "legacy", "err";
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 7>;
+ interrupt-map = <0 0 0 1 &pcie3x2_intc 0>,
+ <0 0 0 2 &pcie3x2_intc 1>,
+ <0 0 0 3 &pcie3x2_intc 2>,
+ <0 0 0 4 &pcie3x2_intc 3>;
+ linux,pci-domain = <1>;
+ max-link-speed = <3>;
+ msi-map = <0x1000 &its1 0x1000 0x1000>;
+ num-lanes = <2>;
+ phys = <&pcie30phy>;
+ phy-names = "pcie-phy";
+ power-domains = <&power RK3588_PD_PCIE>;
+ ranges = <0x01000000 0x0 0xf1100000 0x0 0xf1100000 0x0 0x00100000>,
+ <0x02000000 0x0 0xf1200000 0x0 0xf1200000 0x0 0x00e00000>,
+ <0x03000000 0x0 0x40000000 0x9 0x40000000 0x0 0x40000000>;
+ reg = <0xa 0x40400000 0x0 0x00400000>,
+ <0x0 0xfe160000 0x0 0x00010000>,
+ <0x0 0xf1000000 0x0 0x00100000>;
+ reg-names = "dbi", "apb", "config";
+ resets = <&cru SRST_PCIE1_POWER_UP>, <&cru SRST_P_PCIE1>;
+ reset-names = "pwr", "pipe";
+ status = "disabled";
+
+ pcie3x2_intc: legacy-interrupt-controller {
+ interrupt-controller;
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_SPI 255 IRQ_TYPE_EDGE_RISING 0>;
+ };
+ };
+
+ pcie2x1l0: pcie@fe170000 {
+ compatible = "rockchip,rk3588-pcie", "rockchip,rk3568-pcie";
+ bus-range = <0x20 0x2f>;
+ clocks = <&cru ACLK_PCIE_1L0_MSTR>, <&cru ACLK_PCIE_1L0_SLV>,
+ <&cru ACLK_PCIE_1L0_DBI>, <&cru PCLK_PCIE_1L0>,
+ <&cru CLK_PCIE_AUX2>, <&cru CLK_PCIE1L0_PIPE>;
+ clock-names = "aclk_mst", "aclk_slv",
+ "aclk_dbi", "pclk",
+ "aux", "pipe";
+ device_type = "pci";
+ interrupts = <GIC_SPI 243 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 242 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 241 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 240 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 239 IRQ_TYPE_LEVEL_HIGH 0>;
+ interrupt-names = "sys", "pmc", "msg", "legacy", "err";
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 7>;
+ interrupt-map = <0 0 0 1 &pcie2x1l0_intc 0>,
+ <0 0 0 2 &pcie2x1l0_intc 1>,
+ <0 0 0 3 &pcie2x1l0_intc 2>,
+ <0 0 0 4 &pcie2x1l0_intc 3>;
+ linux,pci-domain = <2>;
+ max-link-speed = <2>;
+ msi-map = <0x2000 &its0 0x2000 0x1000>;
+ num-lanes = <1>;
+ phys = <&combphy1_ps PHY_TYPE_PCIE>;
+ phy-names = "pcie-phy";
+ power-domains = <&power RK3588_PD_PCIE>;
+ ranges = <0x01000000 0x0 0xf2100000 0x0 0xf2100000 0x0 0x00100000>,
+ <0x02000000 0x0 0xf2200000 0x0 0xf2200000 0x0 0x00e00000>,
+ <0x03000000 0x0 0x40000000 0x9 0x80000000 0x0 0x40000000>;
+ reg = <0xa 0x40800000 0x0 0x00400000>,
+ <0x0 0xfe170000 0x0 0x00010000>,
+ <0x0 0xf2000000 0x0 0x00100000>;
+ reg-names = "dbi", "apb", "config";
+ resets = <&cru SRST_PCIE2_POWER_UP>, <&cru SRST_P_PCIE2>;
+ reset-names = "pwr", "pipe";
+ #address-cells = <3>;
+ #size-cells = <2>;
+ status = "disabled";
+
+ pcie2x1l0_intc: legacy-interrupt-controller {
+ interrupt-controller;
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_SPI 240 IRQ_TYPE_EDGE_RISING 0>;
+ };
+ };
+
+ gmac0: ethernet@fe1b0000 {
+ compatible = "rockchip,rk3588-gmac", "snps,dwmac-4.20a";
+ reg = <0x0 0xfe1b0000 0x0 0x10000>;
+ interrupts = <GIC_SPI 227 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 226 IRQ_TYPE_LEVEL_HIGH 0>;
+ interrupt-names = "macirq", "eth_wake_irq";
+ clocks = <&cru CLK_GMAC_125M>, <&cru CLK_GMAC_50M>,
+ <&cru PCLK_GMAC0>, <&cru ACLK_GMAC0>,
+ <&cru CLK_GMAC0_PTP_REF>;
+ clock-names = "stmmaceth", "clk_mac_ref",
+ "pclk_mac", "aclk_mac",
+ "ptp_ref";
+ power-domains = <&power RK3588_PD_GMAC>;
+ resets = <&cru SRST_A_GMAC0>;
+ reset-names = "stmmaceth";
+ rockchip,grf = <&sys_grf>;
+ rockchip,php-grf = <&php_grf>;
+ snps,axi-config = <&gmac0_stmmac_axi_setup>;
+ snps,mixed-burst;
+ snps,mtl-rx-config = <&gmac0_mtl_rx_setup>;
+ snps,mtl-tx-config = <&gmac0_mtl_tx_setup>;
+ snps,tso;
+ status = "disabled";
+
+ mdio0: mdio {
+ compatible = "snps,dwmac-mdio";
+ #address-cells = <0x1>;
+ #size-cells = <0x0>;
+ };
+
+ gmac0_stmmac_axi_setup: stmmac-axi-config {
+ snps,blen = <0 0 0 0 16 8 4>;
+ snps,wr_osr_lmt = <4>;
+ snps,rd_osr_lmt = <8>;
+ };
+
+ gmac0_mtl_rx_setup: rx-queues-config {
+ snps,rx-queues-to-use = <2>;
+ queue0 {};
+ queue1 {};
+ };
+
+ gmac0_mtl_tx_setup: tx-queues-config {
+ snps,tx-queues-to-use = <2>;
+ queue0 {};
+ queue1 {};
+ };
+ };
+
+ sata1: sata@fe220000 {
+ compatible = "rockchip,rk3588-dwc-ahci", "snps,dwc-ahci";
+ reg = <0 0xfe220000 0 0x1000>;
+ interrupts = <GIC_SPI 274 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cru ACLK_SATA1>, <&cru CLK_PMALIVE1>,
+ <&cru CLK_RXOOB1>, <&cru CLK_PIPEPHY1_REF>,
+ <&cru CLK_PIPEPHY1_PIPE_ASIC_G>;
+ clock-names = "sata", "pmalive", "rxoob", "ref", "asic";
+ ports-implemented = <0x1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+
+ sata-port@0 {
+ reg = <0>;
+ hba-port-cap = <HBA_PORT_FBSCP>;
+ phys = <&combphy1_ps PHY_TYPE_SATA>;
+ phy-names = "sata-phy";
+ snps,rx-ts-max = <32>;
+ snps,tx-ts-max = <32>;
+ };
+ };
+
+ usbdp_phy1: phy@fed90000 {
+ compatible = "rockchip,rk3588-usbdp-phy";
+ reg = <0x0 0xfed90000 0x0 0x10000>;
+ #phy-cells = <1>;
+ clocks = <&cru CLK_USBDPPHY_MIPIDCPPHY_REF>,
+ <&cru CLK_USBDP_PHY1_IMMORTAL>,
+ <&cru PCLK_USBDPPHY1>,
+ <&u2phy1>;
+ clock-names = "refclk", "immortal", "pclk", "utmi";
+ resets = <&cru SRST_USBDP_COMBO_PHY1_INIT>,
+ <&cru SRST_USBDP_COMBO_PHY1_CMN>,
+ <&cru SRST_USBDP_COMBO_PHY1_LANE>,
+ <&cru SRST_USBDP_COMBO_PHY1_PCS>,
+ <&cru SRST_P_USBDPPHY1>;
+ reset-names = "init", "cmn", "lane", "pcs_apb", "pma_apb";
+ rockchip,u2phy-grf = <&usb2phy1_grf>;
+ rockchip,usb-grf = <&usb_grf>;
+ rockchip,usbdpphy-grf = <&usbdpphy1_grf>;
+ rockchip,vo-grf = <&vo0_grf>;
+ status = "disabled";
+ };
+
+ combphy1_ps: phy@fee10000 {
+ compatible = "rockchip,rk3588-naneng-combphy";
+ reg = <0x0 0xfee10000 0x0 0x100>;
+ clocks = <&cru CLK_REF_PIPE_PHY1>, <&cru PCLK_PCIE_COMBO_PIPE_PHY1>,
+ <&cru PCLK_PHP_ROOT>;
+ clock-names = "ref", "apb", "pipe";
+ assigned-clocks = <&cru CLK_REF_PIPE_PHY1>;
+ assigned-clock-rates = <100000000>;
+ #phy-cells = <1>;
+ resets = <&cru SRST_REF_PIPE_PHY1>, <&cru SRST_P_PCIE2_PHY1>;
+ reset-names = "phy", "apb";
+ rockchip,pipe-grf = <&php_grf>;
+ rockchip,pipe-phy-grf = <&pipe_phy1_grf>;
+ status = "disabled";
+ };
+
+ pcie30phy: phy@fee80000 {
+ compatible = "rockchip,rk3588-pcie3-phy";
+ reg = <0x0 0xfee80000 0x0 0x20000>;
+ #phy-cells = <0>;
+ clocks = <&cru PCLK_PCIE_COMBO_PIPE_PHY>;
+ clock-names = "pclk";
+ resets = <&cru SRST_PCIE30_PHY>;
+ reset-names = "phy";
+ rockchip,pipe-grf = <&php_grf>;
+ rockchip,phy-grf = <&pcie30_phy_grf>;
+ status = "disabled";
+ };
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-friendlyelec-cm3588-nas.dts b/arch/arm64/boot/dts/rockchip/rk3588-friendlyelec-cm3588-nas.dts
new file mode 100644
index 000000000000..83103e4c7216
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3588-friendlyelec-cm3588-nas.dts
@@ -0,0 +1,778 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2021 Rockchip Electronics Co., Ltd.
+ * Copyright (c) 2023 Thomas McKahan
+ * Copyright (c) 2024 Sebastian Kropatsch
+ *
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/pinctrl/rockchip.h>
+#include <dt-bindings/usb/pd.h>
+#include "rk3588-friendlyelec-cm3588.dtsi"
+
+/ {
+ model = "FriendlyElec CM3588 NAS";
+ compatible = "friendlyarm,cm3588-nas", "friendlyarm,cm3588", "rockchip,rk3588";
+
+ adc_key_recovery: adc-key-recovery {
+ compatible = "adc-keys";
+ io-channels = <&saradc 1>;
+ io-channel-names = "buttons";
+ keyup-threshold-microvolt = <1800000>;
+ poll-interval = <100>;
+
+ button-recovery {
+ label = "Recovery";
+ linux,code = <KEY_VENDOR>;
+ press-threshold-microvolt = <17000>;
+ };
+ };
+
+ analog-sound {
+ compatible = "simple-audio-card";
+ pinctrl-names = "default";
+ pinctrl-0 = <&headphone_detect>;
+
+ simple-audio-card,format = "i2s";
+ simple-audio-card,hp-det-gpio = <&gpio1 RK_PC4 GPIO_ACTIVE_LOW>;
+ simple-audio-card,mclk-fs = <256>;
+ simple-audio-card,name = "realtek,rt5616-codec";
+
+ simple-audio-card,routing =
+ "Headphones", "HPOL",
+ "Headphones", "HPOR",
+ "MIC1", "Microphone Jack",
+ "Microphone Jack", "micbias1";
+ simple-audio-card,widgets =
+ "Headphone", "Headphones",
+ "Microphone", "Microphone Jack";
+
+ simple-audio-card,cpu {
+ sound-dai = <&i2s0_8ch>;
+ };
+
+ simple-audio-card,codec {
+ sound-dai = <&rt5616>;
+ };
+ };
+
+ buzzer: pwm-beeper {
+ compatible = "pwm-beeper";
+ amp-supply = <&vcc_5v0_sys>;
+ beeper-hz = <500>;
+ pwms = <&pwm8 0 500000 0>;
+ };
+
+ fan: pwm-fan {
+ compatible = "pwm-fan";
+ #cooling-cells = <2>;
+ cooling-levels = <0 50 80 120 160 220>;
+ fan-supply = <&vcc_5v0_sys>;
+ pwms = <&pwm1 0 50000 0>;
+ };
+
+ gpio_keys: gpio-keys {
+ compatible = "gpio-keys";
+ pinctrl-names = "default";
+ pinctrl-0 = <&key1_pin>;
+
+ button-user {
+ debounce-interval = <50>;
+ gpios = <&gpio0 RK_PD5 GPIO_ACTIVE_LOW>;
+ label = "User Button";
+ linux,code = <BTN_1>;
+ wakeup-source;
+ };
+ };
+
+ ir-receiver {
+ compatible = "gpio-ir-receiver";
+ gpios = <&gpio0 RK_PD4 GPIO_ACTIVE_LOW>;
+ };
+
+ vcc_12v_dcin: regulator-vcc-12v-dcin {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_12v_dcin";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <12000000>;
+ regulator-max-microvolt = <12000000>;
+ };
+
+ vcc_3v3_m2_a: regulator-vcc-3v3-m2-a {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_3v3_m2_a";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vcc_12v_dcin>;
+ };
+
+ vcc_3v3_m2_b: regulator-vcc-3v3-m2-b {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_3v3_m2_b";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vcc_12v_dcin>;
+ };
+
+ vcc_3v3_m2_c: regulator-vcc-3v3-m2-c {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_3v3_m2_c";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vcc_12v_dcin>;
+ };
+
+ vcc_3v3_m2_d: regulator-vcc-3v3-m2-d {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_3v3_m2_d";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vcc_12v_dcin>;
+ };
+
+ /* vcc_5v0_sys powers the peripherals */
+ vcc_5v0_sys: regulator-vcc-5v0-sys {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_5v0_sys";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&vcc_12v_dcin>;
+ };
+
+ /* SY6280AAC power switch (U14 in schematics) */
+ vcc_5v0_host_20: regulator-vcc-5v0-host-20 {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpios = <&gpio1 RK_PA4 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&vcc_5v0_host20_en>;
+ regulator-name = "vcc_5v0_host_20";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&vcc_5v0_sys>;
+ };
+
+ /* SY6280AAC power switch (U8 in schematics) */
+ vcc_5v0_host_30_p1: regulator-vcc-5v0-host-30-p1 {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpios = <&gpio4 RK_PB0 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&vcc_5v0_host30p1_en>;
+ regulator-name = "vcc_5v0_host_30_p1";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&vcc_5v0_sys>;
+ };
+
+ /* SY6280AAC power switch (U9 in schematics) */
+ vcc_5v0_host_30_p2: regulator-vcc-5v0-host-30-p2 {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpios = <&gpio3 RK_PA5 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&vcc_5v0_host30p2_en>;
+ regulator-name = "vcc_5v0_host_30_p2";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&vcc_5v0_sys>;
+ };
+
+ /* SY6280AAC power switch (U10 in schematics) */
+ vbus_5v0_typec: regulator-vbus-5v0-typec {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpios = <&gpio1 RK_PD2 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&typec_5v_pwr_en>;
+ regulator-name = "vbus_5v0_typec";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&vcc_5v0_sys>;
+ };
+};
+
+/* vcc_4v0_sys powers the RK806 and RK860's */
+&vcc_4v0_sys {
+ vin-supply = <&vcc_12v_dcin>;
+};
+
+/* Combo PHY 1 is configured to act as as PCIe 2.0 PHY */
+/* Used by PCIe controller 2 (pcie2x1l0) */
+&combphy1_ps {
+ status = "okay";
+};
+
+/* Combo PHY 2 is configured to act as USB3 PHY */
+/* Used by USB 3.0 OTG 2 controller (USB 3.0 Type-A port 2) */
+/* CM3588 USB Controller Config Table: USB30 HOST2 */
+&combphy2_psu {
+ status = "okay";
+};
+
+/* GPIO names are in the format "Human-readable-name [SIGNAL_LABEL]" */
+/* Signal labels match the official CM3588 NAS SDK schematic revision 2309 */
+&gpio0 {
+ gpio-line-names =
+ /* GPIO0 A0-A7 */
+ "", "", "", "",
+ "MicroSD detect [SDMMC_DET_L]", "", "", "",
+ /* GPIO0 B0-B7 */
+ "", "", "", "",
+ "", "", "", "",
+ /* GPIO0 C0-C7 */
+ "", "", "", "",
+ "Pin 10 [UART0_RX_M0]", "Pin 08 [UART0_TX_M0/PWM4_M0]", "Pin 32 [PWM5_M1]", "",
+ /* GPIO0 D0-D7 */
+ "", "", "", "USB3 Type-C [CC_INT_L]",
+ "IR receiver [PWM3_IR_M0]", "User Button", "", "";
+};
+
+&gpio1 {
+ gpio-line-names =
+ /* GPIO1 A0-A7 */
+ "Pin 27 [UART6_RX_M1]", "Pin 28 [UART6_TX_M1]", "", "",
+ "USB2 Type-A [USB2_PWREN]", "", "", "Pin 15",
+ /* GPIO1 B0-B7 */
+ "Pin 26", "Pin 21 [SPI0_MISO_M2]", "Pin 19 [SPI0_MOSI_M2/UART4_RX_M2]", "Pin 23 [SPI0_CLK_M2/UART4_TX_M2]",
+ "Pin 24 [SPI0_CS0_M2/UART7_RX_M2]", "Pin 22 [SPI0_CS1_M0/UART7_TX_M2]", "", "CSI-Pin 14 [MIPI_CAM2_CLKOUT]",
+ /* GPIO1 C0-C7 */
+ "", "", "", "",
+ "Headphone detect [HP_DET_L]", "", "", "",
+ /* GPIO1 D0-D7 */
+ "", "", "USB3 Type-C [TYPEC5V_PWREN_H]", "5V Fan [PWM1_M1]",
+ "", "HDMI-in detect [HDMIIRX_DET_L]", "Pin 05 [I2C8_SCL_M2]", "Pin 03 [I2C8_SDA_M2]";
+};
+
+&gpio2 {
+ gpio-line-names =
+ /* GPIO2 A0-A7 */
+ "", "", "", "",
+ "", "", "SPI NOR Flash [FSPI_D0_M1]", "SPI NOR Flash [FSPI_D1_M1]",
+ /* GPIO2 B0-B7 */
+ "SPI NOR Flash [FSPI_D2_M1]", "SPI NOR Flash [FSPI_D3_M1]", "", "SPI NOR Flash [FSPI_CLK_M1]",
+ "SPI NOR Flash [FSPI_CSN0_M1]", "", "", "",
+ /* GPIO2 C0-C7 */
+ "", "CSI-Pin 11 [MIPI_CAM2_RESET_L]", "CSI-Pin 12 [MIPI_CAM2_PDN_L]", "",
+ "", "", "", "",
+ /* GPIO2 D0-D7 */
+ "", "", "", "",
+ "", "", "", "";
+};
+
+&gpio3 {
+ gpio-line-names =
+ /* GPIO3 A0-A7 */
+ "Pin 35 [SPI4_MISO_M1/PWM10_M0]", "Pin 38 [SPI4_MOSI_M1]", "Pin 40 [SPI4_CLK_M1/UART8_TX_M1]", "Pin 36 [SPI4_CS0_M1/UART8_RX_M1]",
+ "Pin 37 [SPI4_CS1_M1]", "USB3-A #2 [USB3_2_PWREN]", "DSI-Pin 12 [LCD_RST]", "Buzzer [PWM8_M0]",
+ /* GPIO3 B0-B7 */
+ "Pin 33 [PWM9_M0]", "DSI-Pin 10 [PWM2_M1/LCD_BL]", "Pin 07", "Pin 16",
+ "Pin 18", "Pin 29 [UART3_TX_M1/PWM12_M0]", "Pin 31 [UART3_RX_M1/PWM13_M0]", "Pin 12",
+ /* GPIO3 C0-C7 */
+ "DSI-Pin 08 [TP_INT_L]", "DSI-Pin 14 [TP_RST_L]", "Pin 11 [PWM14_M0]", "Pin 13 [PWM15_IR_M0]",
+ "", "", "", "DSI-Pin 06 [I2C5_SCL_M0_TP]",
+ /* GPIO3 D0-D7 */
+ "DSI-Pin 05 [I2C5_SDA_M0_TP]", "", "", "",
+ "", "", "", "";
+};
+
+&gpio4 {
+ gpio-line-names =
+ /* GPIO4 A0-A7 */
+ "", "", "M.2 M-Key Slot4 [M2_D_PERST_L]", "",
+ "", "", "", "",
+ /* GPIO4 B0-B7 */
+ "USB3-A #1 [USB3_TYPEC1_PWREN]", "", "", "M.2 M-Key Slot3 [M2_C_PERST_L]",
+ "M.2 M-Key Slot2 [M2_B_PERST_L]", "M.2 M-Key Slot1 [M2_A_CLKREQ_L]", "M.2 M-Key Slot1 [M2_A_PERST_L]", "",
+ /* GPIO4 C0-C7 */
+ "", "", "", "",
+ "", "", "", "",
+ /* GPIO4 D0-D7 */
+ "", "", "", "",
+ "", "", "", "";
+};
+
+/* Connected to MIPI-DSI0 */
+&i2c5 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c5m0_xfer>;
+ status = "disabled";
+};
+
+&i2c6 {
+ fusb302: typec-portc@22 {
+ compatible = "fcs,fusb302";
+ reg = <0x22>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <RK_PD3 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&usbc0_int>;
+ vbus-supply = <&vbus_5v0_typec>;
+
+ usb_con: connector {
+ compatible = "usb-c-connector";
+ data-role = "dual";
+ label = "USB-C";
+ power-role = "source";
+ source-pdos = <PDO_FIXED(5000, 2000, PDO_FIXED_USB_COMM)>;
+ try-power-role = "source";
+ vbus-supply = <&vbus_5v0_typec>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ usbc0_orien_sw: endpoint {
+ remote-endpoint = <&usbdp_phy0_orientation_switch>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ usbc0_role_sw: endpoint {
+ remote-endpoint = <&dwc3_0_role_switch>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+
+ dp_altmode_mux: endpoint {
+ remote-endpoint = <&usbdp_phy0_dp_altmode_mux>;
+ };
+ };
+ };
+ };
+ };
+};
+
+/* Connected to MIPI-CSI1 */
+/* &i2c7 */
+
+/* GPIO Connector, connected to 40-pin GPIO header */
+&i2c8 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c8m2_xfer>;
+ status = "okay";
+};
+
+&pcie2x1l0 {
+ /* 2. M.2 socket, CON14: pcie30phy port0 lane1, @fe170000 */
+ max-link-speed = <3>;
+ num-lanes = <1>;
+ phys = <&pcie30phy>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie2_0_rst>;
+ reset-gpios = <&gpio4 RK_PB4 GPIO_ACTIVE_HIGH>;
+ vpcie3v3-supply = <&vcc_3v3_m2_b>;
+ status = "okay";
+};
+
+&pcie2x1l1 {
+ /* 4. M.2 socket, CON16: pcie30phy port1 lane1, @fe180000 */
+ max-link-speed = <3>;
+ num-lanes = <1>;
+ phys = <&pcie30phy>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie2_1_rst>;
+ reset-gpios = <&gpio4 RK_PA2 GPIO_ACTIVE_HIGH>;
+ vpcie3v3-supply = <&vcc_3v3_m2_d>;
+ status = "okay";
+};
+
+&pcie30phy {
+ /*
+ * Data lane mapping <1 3 2 4> = x1x1 x1x1 (bifurcation of both ports)
+ * port 0 lane 0 - always mapped to controller 0 (4L)
+ * port 0 lane 1 - map to controller 2 (1L0)
+ * port 1 lane 0 - map to controller 1 (2L)
+ * port 1 lane 1 - map to controller 3 (1L1)
+ */
+ data-lanes = <1 3 2 4>;
+ status = "okay";
+};
+
+&pcie3x4 {
+ /* 1. M.2 socket, CON13: pcie30phy port0 lane0, @fe150000 */
+ max-link-speed = <3>;
+ num-lanes = <1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie3x4_rst>;
+ reset-gpios = <&gpio4 RK_PB6 GPIO_ACTIVE_HIGH>;
+ vpcie3v3-supply = <&vcc_3v3_m2_a>;
+ status = "okay";
+};
+
+&pcie3x2 {
+ /* 3. M.2 socket, CON15: pcie30phy port1 lane0, @fe160000 */
+ max-link-speed = <3>;
+ num-lanes = <1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie3x2_rst>;
+ reset-gpios = <&gpio4 RK_PB3 GPIO_ACTIVE_HIGH>;
+ vpcie3v3-supply = <&vcc_3v3_m2_c>;
+ status = "okay";
+};
+
+&pinctrl {
+ audio {
+ headphone_detect: headphone-detect {
+ rockchip,pins = <1 RK_PC4 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ gpio-key {
+ key1_pin: key1-pin {
+ rockchip,pins = <0 RK_PD5 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+
+ pcie {
+ pcie2_0_rst: pcie2-0-rst {
+ rockchip,pins = <4 RK_PB4 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ pcie2_1_rst: pcie2-1-rst {
+ rockchip,pins = <4 RK_PA2 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ pcie3x2_rst: pcie3x2-rst {
+ rockchip,pins = <4 RK_PB3 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ pcie3x4_rst: pcie3x4-rst {
+ rockchip,pins = <4 RK_PB6 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ usb {
+ vcc_5v0_host20_en: vcc-5v0-host20-en {
+ rockchip,pins = <1 RK_PA4 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ vcc_5v0_host30p1_en: vcc-5v0-host30p1-en {
+ rockchip,pins = <4 RK_PB0 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ vcc_5v0_host30p2_en: vcc-5v0-host30p2-en {
+ rockchip,pins = <3 RK_PA5 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ usb-typec {
+ usbc0_int: usbc0-int {
+ rockchip,pins = <0 RK_PD3 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+
+ typec_5v_pwr_en: typec-5v-pwr-en {
+ rockchip,pins = <1 RK_PD2 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+};
+
+/* Connected to 5V Fan */
+&pwm1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwm1m1_pins>;
+ status = "okay";
+};
+
+/* Connected to MIPI-DSI0 */
+&pwm2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwm2m1_pins>;
+};
+
+/* Connected to IR Receiver */
+&pwm3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwm3m0_pins>;
+ status = "okay";
+};
+
+/* GPIO Connector, connected to 40-pin GPIO header */
+/* Shared with UART0 */
+&pwm4 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwm4m1_pins>;
+ status = "disabled";
+};
+
+/* GPIO Connector, connected to 40-pin GPIO header */
+&pwm5 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwm5m1_pins>;
+ status = "okay";
+};
+
+/* Connected to Buzzer */
+&pwm8 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwm8m0_pins>;
+ status = "okay";
+};
+
+/* GPIO Connector, connected to 40-pin GPIO header */
+&pwm9 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwm9m0_pins>;
+ status = "okay";
+};
+
+/* GPIO Connector, connected to 40-pin GPIO header */
+/* Shared with SPI4 */
+&pwm10 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwm10m0_pins>;
+ status = "disabled";
+};
+
+/* GPIO Connector, connected to 40-pin GPIO header */
+/* Shared with UART3 */
+&pwm12 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwm12m0_pins>;
+ status = "disabled";
+};
+
+/* GPIO Connector, connected to 40-pin GPIO header */
+/* Shared with UART3 */
+&pwm13 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwm13m0_pins>;
+ status = "disabled";
+};
+
+/* GPIO Connector, connected to 40-pin GPIO header */
+&pwm14 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwm14m0_pins>;
+ status = "okay";
+};
+
+/* GPIO Connector, connected to 40-pin GPIO header */
+/* Optimized for infrared applications */
+&pwm15 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwm15m0_pins>;
+ status = "disabled";
+};
+
+/* microSD card */
+&sdmmc {
+ status = "okay";
+};
+
+/* GPIO Connector, connected to 40-pin GPIO header */
+/* Shared with UART4, UART7 and PWM10 */
+&spi0 {
+ num-cs = <1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&spi0m2_cs0 &spi0m2_pins>;
+ status = "disabled";
+};
+
+/* GPIO Connector, connected to 40-pin GPIO header */
+/* Shared with UART8 */
+&spi4 {
+ num-cs = <1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&spi4m1_cs0 &spi4m1_pins>;
+ status = "disabled";
+};
+
+/* GPIO Connector, connected to 40-pin GPIO header */
+/* Shared with PWM4 */
+&uart0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart0m0_xfer>;
+ status = "disabled";
+};
+
+/* Debug UART */
+&uart2 {
+ status = "okay";
+};
+
+/* GPIO Connector, connected to 40-pin GPIO header */
+/* Shared with PWM12 and PWM13 */
+&uart3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart3m1_xfer>;
+ status = "disabled";
+};
+
+/* GPIO Connector, connected to 40-pin GPIO header */
+/* Shared with SPI0 */
+&uart4 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart4m2_xfer>;
+ status = "disabled";
+};
+
+/* GPIO Connector, connected to 40-pin GPIO header */
+&uart6 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart6m1_xfer>;
+ status = "okay";
+};
+
+/* GPIO Connector, connected to 40-pin GPIO header */
+/* Shared with SPI0 */
+&uart7 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart7m2_xfer>;
+ status = "disabled";
+};
+
+/* GPIO Connector, connected to 40-pin GPIO header */
+/* Shared with SPI4 */
+&uart8 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart8m1_xfer>;
+ status = "disabled";
+};
+
+/* USB2 PHY for USB Type-C port */
+/* CM3588 USB Controller Config Table: USB20 OTG0 */
+&u2phy0 {
+ status = "okay";
+};
+
+&u2phy0_otg {
+ phy-supply = <&vbus_5v0_typec>;
+ status = "okay";
+};
+
+/* USB2 PHY for USB 3.0 Type-A port 1 */
+/* CM3588 USB Controller Config Table: USB20 OTG1 */
+&u2phy1 {
+ status = "okay";
+};
+
+&u2phy1_otg {
+ phy-supply = <&vcc_5v0_host_30_p1>;
+ status = "okay";
+};
+
+/* USB2 PHY for USB 2.0 Type-A */
+/* CM3588 USB Controller Config Table: USB20 HOST0 */
+&u2phy2 {
+ status = "okay";
+};
+
+&u2phy2_host {
+ phy-supply = <&vcc_5v0_host_20>;
+ status = "okay";
+};
+
+/* USB2 PHY for USB 3.0 Type-A port 2 */
+/* CM3588 USB Controller Config Table: USB20 HOST1 */
+&u2phy3 {
+ status = "okay";
+};
+
+&u2phy3_host {
+ phy-supply = <&vcc_5v0_host_30_p2>;
+ status = "okay";
+};
+
+/* USB 2.0 Type-A */
+/* PHY: <&u2phy2_host> */
+&usb_host0_ehci {
+ status = "okay";
+};
+
+/* USB 2.0 Type-A */
+/* PHY: <&u2phy2_host> */
+&usb_host0_ohci {
+ status = "okay";
+};
+
+/* USB Type-C */
+/* PHYs: <&u2phy0_otg>, <&usbdp_phy0 PHY_TYPE_USB3> */
+&usb_host0_xhci {
+ usb-role-switch;
+ status = "okay";
+
+ port {
+ dwc3_0_role_switch: endpoint {
+ remote-endpoint = <&usbc0_role_sw>;
+ };
+ };
+};
+
+/* Lower USB 3.0 Type-A (port 2) */
+/* PHY: <&u2phy3_host> */
+&usb_host1_ehci {
+ status = "okay";
+};
+
+/* Lower USB 3.0 Type-A (port 2) */
+/* PHY: <&u2phy3_host> */
+&usb_host1_ohci {
+ status = "okay";
+};
+
+/* Upper USB 3.0 Type-A (port 1) */
+/* PHYs: <&u2phy1_otg>, <&usbdp_phy1 PHY_TYPE_USB3> */
+&usb_host1_xhci {
+ dr_mode = "host";
+ status = "okay";
+};
+
+/* Lower USB 3.0 Type-A (port 2) */
+/* PHYs: <&combphy2_psu PHY_TYPE_USB3> */
+&usb_host2_xhci {
+ status = "okay";
+};
+
+/* USB3 PHY for USB Type-C port */
+/* CM3588 USB Controller Config Table: USB30 OTG0 */
+&usbdp_phy0 {
+ mode-switch;
+ orientation-switch;
+ sbu1-dc-gpios = <&gpio4 RK_PA6 GPIO_ACTIVE_HIGH>;
+ sbu2-dc-gpios = <&gpio4 RK_PA7 GPIO_ACTIVE_HIGH>;
+ status = "okay";
+
+ port {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ usbdp_phy0_orientation_switch: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&usbc0_orien_sw>;
+ };
+
+ usbdp_phy0_dp_altmode_mux: endpoint@1 {
+ reg = <1>;
+ remote-endpoint = <&dp_altmode_mux>;
+ };
+ };
+};
+
+/* USB3 PHY for USB 3.0 Type-A port 1 */
+/* CM3588 USB Controller Config Table: USB30 OTG1 */
+&usbdp_phy1 {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-friendlyelec-cm3588.dtsi b/arch/arm64/boot/dts/rockchip/rk3588-friendlyelec-cm3588.dtsi
new file mode 100644
index 000000000000..e3a9598b99fc
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3588-friendlyelec-cm3588.dtsi
@@ -0,0 +1,653 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2021 Rockchip Electronics Co., Ltd.
+ * Copyright (c) 2023 Thomas McKahan
+ * Copyright (c) 2024 Sebastian Kropatsch
+ *
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/leds/common.h>
+#include <dt-bindings/pinctrl/rockchip.h>
+#include "rk3588.dtsi"
+
+/ {
+ model = "FriendlyElec CM3588";
+ compatible = "friendlyarm,cm3588", "rockchip,rk3588";
+
+ aliases {
+ mmc0 = &sdhci;
+ mmc1 = &sdmmc;
+ };
+
+ chosen {
+ stdout-path = "serial2:1500000n8";
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ led_sys: led-0 {
+ color = <LED_COLOR_ID_AMBER>;
+ function = LED_FUNCTION_HEARTBEAT;
+ gpios = <&gpio2 RK_PC5 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "heartbeat";
+ pinctrl-names = "default";
+ pinctrl-0 = <&led_sys_pin>;
+ };
+
+ led_usr: led-1 {
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_INDICATOR;
+ gpios = <&gpio1 RK_PC6 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&led_usr_pin>;
+ };
+ };
+
+ /* vcc_4v0_sys powers the RK806 and RK860's */
+ vcc_4v0_sys: regulator-vcc-4v0-sys {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_4v0_sys";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <4000000>;
+ regulator-max-microvolt = <4000000>;
+ };
+
+ vcc_3v3_pcie20: regulator-vcc-3v3-pcie20 {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_3v3_pcie20";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vcc_3v3_s3>;
+ };
+
+ vcc_3v3_sd_s0: regulator-vcc-3v3-sd-s0 {
+ compatible = "regulator-fixed";
+ gpios = <&gpio4 RK_PA5 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&sd_s0_pwr>;
+ regulator-boot-on;
+ regulator-max-microvolt = <3300000>;
+ regulator-min-microvolt = <3300000>;
+ regulator-name = "vcc_3v3_sd_s0";
+ vin-supply = <&vcc_3v3_s3>;
+ };
+
+ vcc_1v1_nldo_s3: regulator-vcc-1v1-nldo-s3 {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc-1v1-nldo-s3";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1100000>;
+ regulator-max-microvolt = <1100000>;
+ vin-supply = <&vcc_4v0_sys>;
+ };
+};
+
+/* Combo PHY 0 is configured to act as as PCIe 2.0 PHY */
+/* Used by PCIe controller 4 (pcie2x1l2) */
+&combphy0_ps {
+ status = "okay";
+};
+
+&cpu_l0 {
+ cpu-supply = <&vdd_cpu_lit_s0>;
+};
+
+&cpu_l1 {
+ cpu-supply = <&vdd_cpu_lit_s0>;
+};
+
+&cpu_l2 {
+ cpu-supply = <&vdd_cpu_lit_s0>;
+};
+
+&cpu_l3 {
+ cpu-supply = <&vdd_cpu_lit_s0>;
+};
+
+&cpu_b0 {
+ cpu-supply = <&vdd_cpu_big0_s0>;
+};
+
+&cpu_b1 {
+ cpu-supply = <&vdd_cpu_big0_s0>;
+};
+
+&cpu_b2 {
+ cpu-supply = <&vdd_cpu_big1_s0>;
+};
+
+&cpu_b3 {
+ cpu-supply = <&vdd_cpu_big1_s0>;
+};
+
+&gpu {
+ mali-supply = <&vdd_gpu_s0>;
+ sram-supply = <&vdd_gpu_mem_s0>;
+ status = "okay";
+};
+
+&i2c0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c0m2_xfer>;
+ status = "okay";
+
+ vdd_cpu_big0_s0: regulator@42 {
+ compatible = "rockchip,rk8602";
+ reg = <0x42>;
+ fcs,suspend-voltage-selector = <1>;
+ regulator-name = "vdd_cpu_big0_s0";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <1050000>;
+ regulator-ramp-delay = <2300>;
+ vin-supply = <&vcc_4v0_sys>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_cpu_big1_s0: regulator@43 {
+ compatible = "rockchip,rk8603", "rockchip,rk8602";
+ reg = <0x43>;
+ fcs,suspend-voltage-selector = <1>;
+ regulator-name = "vdd_cpu_big1_s0";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <1050000>;
+ regulator-ramp-delay = <2300>;
+ vin-supply = <&vcc_4v0_sys>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+};
+
+&i2c2 {
+ status = "okay";
+
+ vdd_npu_s0: regulator@42 {
+ compatible = "rockchip,rk8602";
+ reg = <0x42>;
+ fcs,suspend-voltage-selector = <1>;
+ regulator-name = "vdd_npu_s0";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <950000>;
+ regulator-ramp-delay = <2300>;
+ vin-supply = <&vcc_4v0_sys>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+};
+
+&i2c6 {
+ clock-frequency = <200000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c6m0_xfer>;
+ status = "okay";
+
+ hym8563: rtc@51 {
+ compatible = "haoyu,hym8563";
+ reg = <0x51>;
+ #clock-cells = <0>;
+ clock-output-names = "hym8563";
+ interrupt-parent = <&gpio0>;
+ interrupts = <RK_PB0 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&hym8563_int>;
+ wakeup-source;
+ };
+};
+
+&i2c7 {
+ clock-frequency = <200000>;
+ status = "okay";
+
+ rt5616: audio-codec@1b {
+ compatible = "realtek,rt5616";
+ reg = <0x1b>;
+ #sound-dai-cells = <0>;
+ };
+};
+
+&i2s0_8ch {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2s0_lrck
+ &i2s0_mclk
+ &i2s0_sclk
+ &i2s0_sdi0
+ &i2s0_sdo0>;
+ status = "okay";
+};
+
+&i2s5_8ch {
+ status = "okay";
+};
+
+&i2s6_8ch {
+ status = "okay";
+};
+
+&i2s7_8ch {
+ status = "okay";
+};
+
+&pcie2x1l2 {
+ /* r8125 ethernet, @fe190000 */
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie2_2_rst>;
+ reset-gpios = <&gpio4 RK_PA4 GPIO_ACTIVE_HIGH>;
+ vpcie3v3-supply = <&vcc_3v3_pcie20>;
+ status = "okay";
+};
+
+&pinctrl {
+ gpio-leds {
+ led_sys_pin: led-sys-pin {
+ rockchip,pins = <2 RK_PC5 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ led_usr_pin: led-usr-pin {
+ rockchip,pins = <1 RK_PC6 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ hym8563 {
+ hym8563_int: rtc-int {
+ rockchip,pins = <0 RK_PB0 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+
+ pcie {
+ pcie2_2_rst: pcie2-2-rst {
+ rockchip,pins = <4 RK_PA4 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ sdmmc {
+ sd_s0_pwr: sd-s0-pwr {
+ rockchip,pins = <4 RK_PA5 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+};
+
+&saradc {
+ vref-supply = <&avcc_1v8_s0>;
+ status = "okay";
+};
+
+/* eMMC */
+&sdhci {
+ bus-width = <8>;
+ mmc-hs400-1_8v;
+ mmc-hs400-enhanced-strobe;
+ no-sd;
+ no-sdio;
+ non-removable;
+ vmmc-supply = <&vcc_3v3_s3>;
+ vqmmc-supply = <&vcc_1v8_s3>;
+ status = "okay";
+};
+
+/* microSD card */
+&sdmmc {
+ bus-width = <4>;
+ cap-sd-highspeed;
+ disable-wp;
+ max-frequency = <150000000>;
+ no-mmc;
+ no-sdio;
+ sd-uhs-sdr104;
+ vmmc-supply = <&vcc_3v3_sd_s0>;
+ vqmmc-supply = <&vccio_sd_s0>;
+};
+
+&spi2 {
+ assigned-clocks = <&cru CLK_SPI2>;
+ assigned-clock-rates = <200000000>;
+ num-cs = <1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&spi2m2_cs0 &spi2m2_pins>;
+ status = "okay";
+
+ rk806_single: pmic@0 {
+ compatible = "rockchip,rk806";
+ reg = <0x0>;
+
+ interrupt-parent = <&gpio0>;
+ interrupts = <7 IRQ_TYPE_LEVEL_LOW>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pmic_pins>, <&rk806_dvs1_null>,
+ <&rk806_dvs2_null>, <&rk806_dvs3_null>;
+
+ spi-max-frequency = <1000000>;
+ system-power-controller;
+
+ vcc1-supply = <&vcc_4v0_sys>;
+ vcc2-supply = <&vcc_4v0_sys>;
+ vcc3-supply = <&vcc_4v0_sys>;
+ vcc4-supply = <&vcc_4v0_sys>;
+ vcc5-supply = <&vcc_4v0_sys>;
+ vcc6-supply = <&vcc_4v0_sys>;
+ vcc7-supply = <&vcc_4v0_sys>;
+ vcc8-supply = <&vcc_4v0_sys>;
+ vcc9-supply = <&vcc_4v0_sys>;
+ vcc10-supply = <&vcc_4v0_sys>;
+ vcc11-supply = <&vcc_2v0_pldo_s3>;
+ vcc12-supply = <&vcc_4v0_sys>;
+ vcc13-supply = <&vcc_1v1_nldo_s3>;
+ vcc14-supply = <&vcc_1v1_nldo_s3>;
+ vcca-supply = <&vcc_4v0_sys>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ rk806_dvs1_null: dvs1-null-pins {
+ pins = "gpio_pwrctrl1";
+ function = "pin_fun0";
+ };
+
+ rk806_dvs2_null: dvs2-null-pins {
+ pins = "gpio_pwrctrl2";
+ function = "pin_fun0";
+ };
+
+ rk806_dvs3_null: dvs3-null-pins {
+ pins = "gpio_pwrctrl3";
+ function = "pin_fun0";
+ };
+
+ regulators {
+ vdd_gpu_s0: vdd_gpu_mem_s0: dcdc-reg1 {
+ regulator-boot-on;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <950000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vdd_gpu_s0";
+ regulator-enable-ramp-delay = <400>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_cpu_lit_s0: vdd_cpu_lit_mem_s0: dcdc-reg2 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <950000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vdd_cpu_lit_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_log_s0: dcdc-reg3 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <675000>;
+ regulator-max-microvolt = <750000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vdd_log_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-suspend-microvolt = <750000>;
+ };
+ };
+
+ vdd_vdenc_s0: vdd_vdenc_mem_s0: dcdc-reg4 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <950000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vdd_vdenc_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_ddr_s0: dcdc-reg5 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <675000>;
+ regulator-max-microvolt = <900000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vdd_ddr_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-suspend-microvolt = <850000>;
+ };
+ };
+
+ vdd2_ddr_s3: dcdc-reg6 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-name = "vdd2_ddr_s3";
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ };
+ };
+
+ vcc_2v0_pldo_s3: dcdc-reg7 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <2000000>;
+ regulator-max-microvolt = <2000000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vdd_2v0_pldo_s3";
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <2000000>;
+ };
+ };
+
+ vcc_3v3_s3: dcdc-reg8 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc_3v3_s3";
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <3300000>;
+ };
+ };
+
+ vddq_ddr_s0: dcdc-reg9 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-name = "vddq_ddr_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_1v8_s3: dcdc-reg10 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcc_1v8_s3";
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ avcc_1v8_s0: pldo-reg1 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "avcc_1v8_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_1v8_s0: pldo-reg2 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcc_1v8_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ avdd_1v2_s0: pldo-reg3 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-name = "avdd_1v2_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_3v3_s0: pldo-reg4 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vcc_3v3_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vccio_sd_s0: pldo-reg5 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vccio_sd_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ pldo6_s3: pldo-reg6 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "pldo6_s3";
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ vdd_0v75_s3: nldo-reg1 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <750000>;
+ regulator-name = "vdd_0v75_s3";
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <750000>;
+ };
+ };
+
+ vdd_ddr_pll_s0: nldo-reg2 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <850000>;
+ regulator-max-microvolt = <850000>;
+ regulator-name = "vdd_ddr_pll_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-suspend-microvolt = <850000>;
+ };
+ };
+
+ avdd_0v75_s0: nldo-reg3 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <750000>;
+ regulator-name = "avdd_0v75_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_0v85_s0: nldo-reg4 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <850000>;
+ regulator-max-microvolt = <850000>;
+ regulator-name = "vdd_0v85_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_0v75_s0: nldo-reg5 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <750000>;
+ regulator-name = "vdd_0v75_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+ };
+ };
+};
+
+&tsadc {
+ status = "okay";
+};
+
+/* Debug UART */
+&uart2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart2m0_xfer>;
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-ok3588-c.dts b/arch/arm64/boot/dts/rockchip/rk3588-ok3588-c.dts
index 009566d881f3..c2a08bdf09e8 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588-ok3588-c.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3588-ok3588-c.dts
@@ -261,8 +261,7 @@
&mdio0 {
rgmii_phy0: ethernet-phy@1 {
/* RTL8211F */
- compatible = "ethernet-phy-id001c.c916",
- "ethernet-phy-ieee802.3-c22";
+ compatible = "ethernet-phy-id001c.c916";
reg = <0x1>;
pinctrl-names = "default";
pinctrl-0 = <&rtl8211f_0_rst>;
@@ -275,8 +274,7 @@
&mdio1 {
rgmii_phy1: ethernet-phy@2 {
/* RTL8211F */
- compatible = "ethernet-phy-id001c.c916",
- "ethernet-phy-ieee802.3-c22";
+ compatible = "ethernet-phy-id001c.c916";
reg = <0x2>;
pinctrl-names = "default";
pinctrl-0 = <&rtl8211f_1_rst>;
@@ -376,6 +374,10 @@
status = "okay";
};
+&tsadc {
+ status = "okay";
+};
+
&u2phy2 {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-opp.dtsi b/arch/arm64/boot/dts/rockchip/rk3588-opp.dtsi
new file mode 100644
index 000000000000..0f1a77697351
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3588-opp.dtsi
@@ -0,0 +1,190 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+
+/ {
+ cluster0_opp_table: opp-table-cluster0 {
+ compatible = "operating-points-v2";
+ opp-shared;
+
+ opp-1008000000 {
+ opp-hz = /bits/ 64 <1008000000>;
+ opp-microvolt = <675000 675000 950000>;
+ clock-latency-ns = <40000>;
+ };
+ opp-1200000000 {
+ opp-hz = /bits/ 64 <1200000000>;
+ opp-microvolt = <712500 712500 950000>;
+ clock-latency-ns = <40000>;
+ };
+ opp-1416000000 {
+ opp-hz = /bits/ 64 <1416000000>;
+ opp-microvolt = <762500 762500 950000>;
+ clock-latency-ns = <40000>;
+ opp-suspend;
+ };
+ opp-1608000000 {
+ opp-hz = /bits/ 64 <1608000000>;
+ opp-microvolt = <850000 850000 950000>;
+ clock-latency-ns = <40000>;
+ };
+ opp-1800000000 {
+ opp-hz = /bits/ 64 <1800000000>;
+ opp-microvolt = <950000 950000 950000>;
+ clock-latency-ns = <40000>;
+ };
+ };
+
+ cluster1_opp_table: opp-table-cluster1 {
+ compatible = "operating-points-v2";
+ opp-shared;
+
+ opp-1200000000 {
+ opp-hz = /bits/ 64 <1200000000>;
+ opp-microvolt = <675000 675000 1000000>;
+ clock-latency-ns = <40000>;
+ };
+ opp-1416000000 {
+ opp-hz = /bits/ 64 <1416000000>;
+ opp-microvolt = <725000 725000 1000000>;
+ clock-latency-ns = <40000>;
+ };
+ opp-1608000000 {
+ opp-hz = /bits/ 64 <1608000000>;
+ opp-microvolt = <762500 762500 1000000>;
+ clock-latency-ns = <40000>;
+ };
+ opp-1800000000 {
+ opp-hz = /bits/ 64 <1800000000>;
+ opp-microvolt = <850000 850000 1000000>;
+ clock-latency-ns = <40000>;
+ };
+ opp-2016000000 {
+ opp-hz = /bits/ 64 <2016000000>;
+ opp-microvolt = <925000 925000 1000000>;
+ clock-latency-ns = <40000>;
+ };
+ opp-2208000000 {
+ opp-hz = /bits/ 64 <2208000000>;
+ opp-microvolt = <987500 987500 1000000>;
+ clock-latency-ns = <40000>;
+ };
+ opp-2400000000 {
+ opp-hz = /bits/ 64 <2400000000>;
+ opp-microvolt = <1000000 1000000 1000000>;
+ clock-latency-ns = <40000>;
+ };
+ };
+
+ cluster2_opp_table: opp-table-cluster2 {
+ compatible = "operating-points-v2";
+ opp-shared;
+
+ opp-1200000000 {
+ opp-hz = /bits/ 64 <1200000000>;
+ opp-microvolt = <675000 675000 1000000>;
+ clock-latency-ns = <40000>;
+ };
+ opp-1416000000 {
+ opp-hz = /bits/ 64 <1416000000>;
+ opp-microvolt = <725000 725000 1000000>;
+ clock-latency-ns = <40000>;
+ };
+ opp-1608000000 {
+ opp-hz = /bits/ 64 <1608000000>;
+ opp-microvolt = <762500 762500 1000000>;
+ clock-latency-ns = <40000>;
+ };
+ opp-1800000000 {
+ opp-hz = /bits/ 64 <1800000000>;
+ opp-microvolt = <850000 850000 1000000>;
+ clock-latency-ns = <40000>;
+ };
+ opp-2016000000 {
+ opp-hz = /bits/ 64 <2016000000>;
+ opp-microvolt = <925000 925000 1000000>;
+ clock-latency-ns = <40000>;
+ };
+ opp-2208000000 {
+ opp-hz = /bits/ 64 <2208000000>;
+ opp-microvolt = <987500 987500 1000000>;
+ clock-latency-ns = <40000>;
+ };
+ opp-2400000000 {
+ opp-hz = /bits/ 64 <2400000000>;
+ opp-microvolt = <1000000 1000000 1000000>;
+ clock-latency-ns = <40000>;
+ };
+ };
+
+ gpu_opp_table: opp-table {
+ compatible = "operating-points-v2";
+
+ opp-300000000 {
+ opp-hz = /bits/ 64 <300000000>;
+ opp-microvolt = <675000 675000 850000>;
+ };
+ opp-400000000 {
+ opp-hz = /bits/ 64 <400000000>;
+ opp-microvolt = <675000 675000 850000>;
+ };
+ opp-500000000 {
+ opp-hz = /bits/ 64 <500000000>;
+ opp-microvolt = <675000 675000 850000>;
+ };
+ opp-600000000 {
+ opp-hz = /bits/ 64 <600000000>;
+ opp-microvolt = <675000 675000 850000>;
+ };
+ opp-700000000 {
+ opp-hz = /bits/ 64 <700000000>;
+ opp-microvolt = <700000 700000 850000>;
+ };
+ opp-800000000 {
+ opp-hz = /bits/ 64 <800000000>;
+ opp-microvolt = <750000 750000 850000>;
+ };
+ opp-900000000 {
+ opp-hz = /bits/ 64 <900000000>;
+ opp-microvolt = <800000 800000 850000>;
+ };
+ opp-1000000000 {
+ opp-hz = /bits/ 64 <1000000000>;
+ opp-microvolt = <850000 850000 850000>;
+ };
+ };
+};
+
+&cpu_b0 {
+ operating-points-v2 = <&cluster1_opp_table>;
+};
+
+&cpu_b1 {
+ operating-points-v2 = <&cluster1_opp_table>;
+};
+
+&cpu_b2 {
+ operating-points-v2 = <&cluster2_opp_table>;
+};
+
+&cpu_b3 {
+ operating-points-v2 = <&cluster2_opp_table>;
+};
+
+&cpu_l0 {
+ operating-points-v2 = <&cluster0_opp_table>;
+};
+
+&cpu_l1 {
+ operating-points-v2 = <&cluster0_opp_table>;
+};
+
+&cpu_l2 {
+ operating-points-v2 = <&cluster0_opp_table>;
+};
+
+&cpu_l3 {
+ operating-points-v2 = <&cluster0_opp_table>;
+};
+
+&gpu {
+ operating-points-v2 = <&gpu_opp_table>;
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-orangepi-5-plus.dts b/arch/arm64/boot/dts/rockchip/rk3588-orangepi-5-plus.dts
index 1a604429fb26..e74871491ef5 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588-orangepi-5-plus.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3588-orangepi-5-plus.dts
@@ -444,6 +444,7 @@
&sdmmc {
bus-width = <4>;
cap-sd-highspeed;
+ cd-gpios = <&gpio0 RK_PA4 GPIO_ACTIVE_LOW>;
disable-wp;
max-frequency = <150000000>;
no-sdio;
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-quartzpro64.dts b/arch/arm64/boot/dts/rockchip/rk3588-quartzpro64.dts
index b4f22d95ac0e..e4a20cda65ed 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588-quartzpro64.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3588-quartzpro64.dts
@@ -435,6 +435,7 @@
&sdmmc {
bus-width = <4>;
cap-sd-highspeed;
+ cd-gpios = <&gpio0 RK_PA4 GPIO_ACTIVE_LOW>;
disable-wp;
max-frequency = <150000000>;
no-sdio;
@@ -832,6 +833,8 @@
regulator-name = "vdd_cpu_big1_s0";
regulator-always-on;
regulator-boot-on;
+ regulator-coupled-with = <&vdd_cpu_big1_mem_s0>;
+ regulator-coupled-max-spread = <10000>;
regulator-min-microvolt = <550000>;
regulator-max-microvolt = <1050000>;
regulator-ramp-delay = <12500>;
@@ -845,6 +848,8 @@
regulator-name = "vdd_cpu_big0_s0";
regulator-always-on;
regulator-boot-on;
+ regulator-coupled-with = <&vdd_cpu_big0_mem_s0>;
+ regulator-coupled-max-spread = <10000>;
regulator-min-microvolt = <550000>;
regulator-max-microvolt = <1050000>;
regulator-ramp-delay = <12500>;
@@ -858,6 +863,8 @@
regulator-name = "vdd_cpu_lit_s0";
regulator-always-on;
regulator-boot-on;
+ regulator-coupled-with = <&vdd_cpu_lit_mem_s0>;
+ regulator-coupled-max-spread = <10000>;
regulator-min-microvolt = <550000>;
regulator-max-microvolt = <950000>;
regulator-ramp-delay = <12500>;
@@ -884,6 +891,8 @@
regulator-name = "vdd_cpu_big1_mem_s0";
regulator-always-on;
regulator-boot-on;
+ regulator-coupled-with = <&vdd_cpu_big1_s0>;
+ regulator-coupled-max-spread = <10000>;
regulator-min-microvolt = <675000>;
regulator-max-microvolt = <1050000>;
regulator-ramp-delay = <12500>;
@@ -898,6 +907,8 @@
regulator-name = "vdd_cpu_big0_mem_s0";
regulator-always-on;
regulator-boot-on;
+ regulator-coupled-with = <&vdd_cpu_big0_s0>;
+ regulator-coupled-max-spread = <10000>;
regulator-min-microvolt = <675000>;
regulator-max-microvolt = <1050000>;
regulator-ramp-delay = <12500>;
@@ -924,6 +935,8 @@
regulator-name = "vdd_cpu_lit_mem_s0";
regulator-always-on;
regulator-boot-on;
+ regulator-coupled-with = <&vdd_cpu_lit_s0>;
+ regulator-coupled-max-spread = <10000>;
regulator-min-microvolt = <675000>;
regulator-max-microvolt = <950000>;
regulator-ramp-delay = <12500>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-rock-5-itx.dts b/arch/arm64/boot/dts/rockchip/rk3588-rock-5-itx.dts
new file mode 100644
index 000000000000..d0b922b8d67e
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3588-rock-5-itx.dts
@@ -0,0 +1,1177 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2024 Radxa Limited
+ * Copyright (c) 2024 Heiko Stuebner <heiko@sntech.de>
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/leds/common.h>
+#include <dt-bindings/pinctrl/rockchip.h>
+#include <dt-bindings/pwm/pwm.h>
+#include "dt-bindings/usb/pd.h"
+#include "rk3588.dtsi"
+
+/ {
+ model = "Radxa ROCK 5 ITX";
+ compatible = "radxa,rock-5-itx", "rockchip,rk3588";
+
+ aliases {
+ mmc0 = &sdhci;
+ mmc1 = &sdmmc;
+ mmc2 = &sdio;
+ };
+
+ chosen {
+ stdout-path = "serial2:1500000n8";
+ };
+
+ adc_keys: adc-keys {
+ compatible = "adc-keys";
+ io-channels = <&saradc 0>;
+ io-channel-names = "buttons";
+ keyup-threshold-microvolt = <1800000>;
+ poll-interval = <100>;
+
+ button-maskrom {
+ label = "Mask Rom";
+ linux,code = <KEY_SETUP>;
+ press-threshold-microvolt = <1750>;
+ };
+ };
+
+ analog-sound {
+ compatible = "audio-graph-card";
+ label = "rk3588-es8316";
+ dais = <&i2s0_8ch_p0>;
+ hp-det-gpio = <&gpio1 RK_PD5 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&hp_detect>;
+ routing = "MIC2", "Mic Jack",
+ "Headphones", "HPOL",
+ "Headphones", "HPOR";
+ widgets = "Microphone", "Mic Jack",
+ "Headphone", "Headphones";
+ };
+
+ gpio-leds {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&led_pins>;
+
+ power-led1 {
+ gpios = <&gpio0 RK_PB7 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "default-on";
+ };
+
+ hdd-led2 {
+ gpios = <&gpio0 RK_PC0 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "disk-activity";
+ };
+ };
+
+ fan0: pwm-fan {
+ compatible = "pwm-fan";
+ #cooling-cells = <2>;
+ cooling-levels = <0 64 128 192 255>;
+ fan-supply = <&vcc12v_dcin>;
+ pwms = <&pwm14 0 10000 0>;
+ };
+
+ /* M.2 E-KEY */
+ sdio_pwrseq: sdio-pwrseq {
+ compatible = "mmc-pwrseq-simple";
+ clocks = <&hym8563>;
+ clock-names = "ext_clock";
+ pinctrl-names = "default";
+ pinctrl-0 = <&wifi_enable_h>;
+ reset-gpios = <&gpio0 RK_PC4 GPIO_ACTIVE_LOW>;
+ };
+
+ typec_vin: regulator-typec-vin {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpio = <&gpio1 RK_PB6 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&vbus5v0_typec_en>;
+ regulator-name = "typec_vin";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&vcc5v0_sys>;
+ };
+
+ vcc12v_dcin: regulator-vcc12v-dcin {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc12v_dcin";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <12000000>;
+ regulator-max-microvolt = <12000000>;
+ };
+
+ vcc33_io64: regulator-vcc33-io64 {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc33_io64";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vcc12v_dcin>;
+ };
+
+ vcc3v3_ekey: regulator-vcc3v3-ekey {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpios = <&gpio1 RK_PD2 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&ekey_en>;
+ regulator-name = "vcc3v3_ekey";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ startup-delay-us = <50000>;
+ vin-supply = <&vcc5v0_sys>;
+ };
+
+ vcc3v3_lan: vcc3v3_lan_phy2: regulator-vcc3v3-lan {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc3v3_lan";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vcc_3v3_s3>;
+ };
+
+ vcc3v3_mkey: regulator-vcc3v3-mkey {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpios = <&gpio1 RK_PA4 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie30x4_pwren_h>;
+ regulator-name = "vcc3v3_mkey";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ startup-delay-us = <5000>;
+ vin-supply = <&vcc5v0_sys>;
+ };
+
+ vcc3v3_sys: regulator-vcc3v3-sys {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc3v3_sys";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vcc12v_dcin>;
+ };
+
+ vcc5v0_sys: regulator-vcc5v0-sys {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc5v0_sys";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&vcc12v_dcin>;
+ };
+
+ vcc5v0_usb20: vcc5v0_usb12: vcc5v0_usb34: regulator-vcc5v0-usb {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpio = <&gpio3 RK_PB7 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb_host_pwren_h>;
+ regulator-name = "vcc5v0_usb";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&vcc5v0_sys>;
+ };
+
+ vcc_1v1_nldo_s3: regulator-vcc-1v1-nldo-s3 {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_1v1_nldo_s3";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1100000>;
+ regulator-max-microvolt = <1100000>;
+ vin-supply = <&vcc5v0_sys>;
+ };
+};
+
+&combphy0_ps {
+ status = "okay";
+};
+
+&combphy1_ps {
+ status = "okay";
+};
+
+&combphy2_psu {
+ status = "okay";
+};
+
+&cpu_b0 {
+ cpu-supply = <&vdd_cpu_big0_s0>;
+};
+
+&cpu_b1 {
+ cpu-supply = <&vdd_cpu_big0_s0>;
+};
+
+&cpu_b2 {
+ cpu-supply = <&vdd_cpu_big1_s0>;
+};
+
+&cpu_b3 {
+ cpu-supply = <&vdd_cpu_big1_s0>;
+};
+
+&cpu_l0 {
+ cpu-supply = <&vdd_cpu_lit_s0>;
+};
+
+&cpu_l1 {
+ cpu-supply = <&vdd_cpu_lit_s0>;
+};
+
+&cpu_l2 {
+ cpu-supply = <&vdd_cpu_lit_s0>;
+};
+
+&cpu_l3 {
+ cpu-supply = <&vdd_cpu_lit_s0>;
+};
+
+&gpu {
+ mali-supply = <&vdd_gpu_s0>;
+ status = "okay";
+};
+
+&i2c0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c0m2_xfer>;
+ status = "okay";
+
+ vdd_cpu_big0_s0: regulator@42 {
+ compatible = "rockchip,rk8602";
+ reg = <0x42>;
+ fcs,suspend-voltage-selector = <1>;
+ regulator-name = "vdd_cpu_big0_s0";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <1050000>;
+ regulator-ramp-delay = <2300>;
+ vin-supply = <&vcc5v0_sys>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_cpu_big1_s0: regulator@43 {
+ compatible = "rockchip,rk8603", "rockchip,rk8602";
+ reg = <0x43>;
+ fcs,suspend-voltage-selector = <1>;
+ regulator-name = "vdd_cpu_big1_s0";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <1050000>;
+ regulator-ramp-delay = <2300>;
+ vin-supply = <&vcc5v0_sys>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+};
+
+&i2c1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c1m2_xfer>;
+ status = "okay";
+
+ vdd_npu_s0: regulator@42 {
+ compatible = "rockchip,rk8602";
+ reg = <0x42>;
+ fcs,suspend-voltage-selector = <1>;
+ regulator-name = "vdd_npu_s0";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <950000>;
+ regulator-ramp-delay = <2300>;
+ vin-supply = <&vcc5v0_sys>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+};
+
+/* CAM0 connector */
+&i2c3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c3m0_xfer>;
+};
+
+/* M.2 E-key */
+&i2c4 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c4m1_xfer>;
+};
+
+/* RTC and LCD0 connector */
+&i2c6 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c6m0_xfer>;
+ status = "okay";
+
+ hym8563: rtc@51 {
+ compatible = "haoyu,hym8563";
+ reg = <0x51>;
+ #clock-cells = <0>;
+ clock-output-names = "wifi_32kout";
+ interrupt-parent = <&gpio0>;
+ interrupts = <RK_PB0 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&rtc_int>;
+ };
+};
+
+/* Audio codec and CAM1 connector */
+&i2c7 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c7m0_xfer>;
+ status = "okay";
+
+ es8316: audio-codec@11 {
+ compatible = "everest,es8316";
+ reg = <0x11>;
+ assigned-clocks = <&cru I2S0_8CH_MCLKOUT>;
+ assigned-clock-rates = <12288000>;
+ clocks = <&cru I2S0_8CH_MCLKOUT>;
+ clock-names = "mclk";
+ #sound-dai-cells = <0>;
+
+ port {
+ es8316_p0_0: endpoint {
+ remote-endpoint = <&i2s0_8ch_p0_0>;
+ };
+ };
+ };
+};
+
+/* FUSB302 and LCD1 connector */
+&i2c8 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c8m4_xfer>;
+ status = "okay";
+
+ usbc0: usb-typec@22 {
+ compatible = "fcs,fusb302";
+ reg = <0x22>;
+ interrupt-parent = <&gpio3>;
+ interrupts = <RK_PB4 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&usbc0_int>;
+ vbus-supply = <&typec_vin>;
+
+ usb_con: connector {
+ compatible = "usb-c-connector";
+ data-role = "dual";
+ label = "USB-C";
+ power-role = "source";
+ source-pdos =
+ <PDO_FIXED(5000, 3000, PDO_FIXED_USB_COMM)>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ usbc0_orien_sw: endpoint {
+ remote-endpoint = <&usbdp_phy0_orientation_switch>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ usbc0_role_sw: endpoint {
+ remote-endpoint = <&dwc3_0_role_switch>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+
+ dp_altmode_mux: endpoint {
+ remote-endpoint = <&usbdp_phy0_dp_altmode_mux>;
+ };
+ };
+ };
+ };
+ };
+};
+
+&i2c8m4_xfer {
+ rockchip,pins =
+ /* i2c8_scl_m4 */
+ <3 RK_PC2 9 &pcfg_pull_up_drv_level_6>,
+ /* i2c8_sda_m4 */
+ <3 RK_PC3 9 &pcfg_pull_up_drv_level_6>;
+};
+
+&i2s0_8ch {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2s0_lrck
+ &i2s0_mclk
+ &i2s0_sclk
+ &i2s0_sdi0
+ &i2s0_sdo0>;
+ status = "okay";
+
+ i2s0_8ch_p0: port {
+ i2s0_8ch_p0_0: endpoint {
+ dai-format = "i2s";
+ mclk-fs = <256>;
+ remote-endpoint = <&es8316_p0_0>;
+ };
+ };
+};
+
+&package_thermal {
+ polling-delay = <1000>;
+
+ trips {
+ package_fan0: package-fan0 {
+ hysteresis = <2000>;
+ temperature = <50000>;
+ type = "active";
+ };
+
+ package_fan1: package-fan1 {
+ hysteresis = <2000>;
+ temperature = <65000>;
+ type = "active";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ cooling-device = <&fan0 THERMAL_NO_LIMIT 1>;
+ trip = <&package_fan0>;
+ };
+ map1 {
+ cooling-device = <&fan0 2 THERMAL_NO_LIMIT>;
+ trip = <&package_fan1>;
+ };
+ };
+};
+
+/* M.2 E-key */
+&pcie2x1l0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie30x1_0_perstn_m1_l>;
+ reset-gpios = <&gpio4 RK_PA5 GPIO_ACTIVE_HIGH>;
+ vpcie3v3-supply = <&vcc3v3_ekey>;
+ status = "okay";
+};
+
+/* RTL8125B_1 */
+&pcie2x1l1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie30x1_1_perstn>;
+ reset-gpios = <&gpio4 RK_PA2 GPIO_ACTIVE_HIGH>;
+ vpcie3v3-supply = <&vcc3v3_lan>;
+ status = "okay";
+};
+
+/* RTL8125B_2 */
+&pcie2x1l2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie20x1_2_perstn>;
+ reset-gpios = <&gpio3 RK_PB0 GPIO_ACTIVE_HIGH>;
+ vpcie3v3-supply = <&vcc3v3_lan_phy2>;
+ status = "okay";
+};
+
+&pcie30phy {
+ data-lanes = <1 1 2 2>;
+ /* separate clock lines from the clock generator to phy and devices */
+ rockchip,rx-common-refclk-mode = <0 0 0 0>;
+ status = "okay";
+};
+
+/* ASMedia ASM1164 Sata controller */
+&pcie3x2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie30x2_perstn_m1_l>;
+ reset-gpios = <&gpio4 RK_PB0 GPIO_ACTIVE_HIGH>;
+ vpcie3v3-supply = <&vcc33_io64>;
+ status = "okay";
+};
+
+/* M.2 M.key */
+&pcie3x4 {
+ num-lanes = <2>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie30x4_perstn_m1_l>;
+ reset-gpios = <&gpio4 RK_PB6 GPIO_ACTIVE_HIGH>;
+ vpcie3v3-supply = <&vcc3v3_mkey>;
+ status = "okay";
+};
+
+&pinctrl {
+ hym8563 {
+ rtc_int: rtc-int {
+ rockchip,pins = <0 RK_PB0 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ leds {
+ led_pins: led-pins {
+ rockchip,pins = <0 RK_PB7 RK_FUNC_GPIO &pcfg_pull_none>,
+ <0 RK_PC0 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ pcie {
+ pcie20x1_2_perstn: pcie20x1-2-perstn {
+ rockchip,pins = <3 RK_PB0 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ pcie30x1_0_perstn_m1_l: pcie30x1-0-perstn-m1-l {
+ rockchip,pins = <4 RK_PA4 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ pcie30x1_1_perstn: pcie30x1-1-perstn {
+ rockchip,pins = <4 RK_PA2 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ pcie30x2_perstn_m1_l: pcie30x2-perstn-m1-l {
+ rockchip,pins = <4 RK_PB0 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ pcie30x4_perstn_m1_l: pcie30x4-perstn-m1-l {
+ rockchip,pins = <4 RK_PB6 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ ekey_en: ekey-en {
+ rockchip,pins = <1 RK_PD2 RK_FUNC_GPIO &pcfg_pull_down>;
+ };
+
+ pcie30x4_pwren_h: pcie30x4-pwren-h {
+ rockchip,pins = <1 RK_PA4 RK_FUNC_GPIO &pcfg_pull_down>;
+ };
+ };
+
+ sound {
+ hp_detect: hp-detect {
+ rockchip,pins = <1 RK_PD5 RK_FUNC_GPIO &pcfg_pull_down>;
+ };
+ };
+
+ usb {
+ usb_host_pwren_h: usb-host-pwren-h {
+ rockchip,pins = <3 RK_PB7 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ vcc5v0_otg_en: vcc5v0-otg-en {
+ rockchip,pins = <2 RK_PC5 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ gl3523_reset: rl3523-reset {
+ rockchip,pins = <3 RK_PB1 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ usb-typec {
+ usbc0_int: usbc0-int {
+ rockchip,pins = <3 RK_PB4 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+
+ vbus5v0_typec_en: vbus5v0-typec-en {
+ rockchip,pins = <1 RK_PB6 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ hdmirx {
+ hdmirx_det: hdmirx-det {
+ rockchip,pins = <1 RK_PC6 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ sdio-pwrseq {
+ wifi_enable_h: wifi-enable-h {
+ rockchip,pins = <0 RK_PC4 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ wireless-wlan {
+ wifi_host_wake_irq: wifi-host-wake-irq {
+ rockchip,pins = <0 RK_PB2 RK_FUNC_GPIO &pcfg_pull_down>;
+ };
+ };
+
+ bt {
+ bt_enable_h: bt-enable-h {
+ rockchip,pins = <2 RK_PC5 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ bt_host_wake_l: bt-host-wake-l {
+ rockchip,pins = <0 RK_PC5 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ bt_wake_l: bt-wake-l {
+ rockchip,pins = <4 RK_PC5 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ dp {
+ dp1_hpd: dp1-hpd {
+ rockchip,pins = <3 RK_PD5 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+};
+
+&pwm14 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwm14m1_pins>;
+ status = "okay";
+};
+
+&saradc {
+ vref-supply = <&avcc_1v8_s0>;
+ status = "okay";
+};
+
+&sdhci {
+ bus-width = <8>;
+ max-frequency = <200000000>;
+ mmc-hs400-1_8v;
+ mmc-hs400-enhanced-strobe;
+ mmc-hs200-1_8v;
+ no-sdio;
+ no-sd;
+ non-removable;
+ status = "okay";
+};
+
+&sdmmc {
+ bus-width = <4>;
+ cap-mmc-highspeed;
+ cap-sd-highspeed;
+ disable-wp;
+ max-frequency = <200000000>;
+ no-sdio;
+ no-mmc;
+ pinctrl-names = "default";
+ pinctrl-0 = <&sdmmc_bus4 &sdmmc_clk &sdmmc_cmd &sdmmc_det>;
+ sd-uhs-sdr104;
+ vmmc-supply = <&vcc_3v3_s3>;
+ vqmmc-supply = <&vccio_sd_s0>;
+ status = "okay";
+};
+
+/* M.2 E-KEY */
+&sdio {
+ broken-cd;
+ bus-width = <4>;
+ cap-sdio-irq;
+ keep-power-in-suspend;
+ max-frequency = <150000000>;
+ mmc-pwrseq = <&sdio_pwrseq>;
+ no-sd;
+ no-mmc;
+ non-removable;
+ pinctrl-names = "default";
+ pinctrl-0 = <&sdiom0_pins>;
+ sd-uhs-sdr104;
+ vmmc-supply = <&vcc3v3_ekey>;
+ status = "okay";
+};
+
+&sfc {
+ pinctrl-names = "default";
+ pinctrl-0 = <&fspim2_pins>;
+ status = "okay";
+
+ spi_flash: flash@0 {
+ compatible = "jedec,spi-nor";
+ reg = <0x0>;
+ spi-max-frequency = <50000000>;
+ spi-rx-bus-width = <4>;
+ spi-tx-bus-width = <1>;
+ };
+};
+
+&spi2 {
+ status = "okay";
+ assigned-clocks = <&cru CLK_SPI2>;
+ assigned-clock-rates = <200000000>;
+ num-cs = <1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&spi2m2_cs0 &spi2m2_pins>;
+
+ pmic@0 {
+ compatible = "rockchip,rk806";
+ reg = <0x0>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <7 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pmic_pins>, <&rk806_dvs1_null>,
+ <&rk806_dvs2_null>, <&rk806_dvs3_null>;
+ spi-max-frequency = <1000000>;
+ system-power-controller;
+
+ vcc1-supply = <&vcc5v0_sys>;
+ vcc2-supply = <&vcc5v0_sys>;
+ vcc3-supply = <&vcc5v0_sys>;
+ vcc4-supply = <&vcc5v0_sys>;
+ vcc5-supply = <&vcc5v0_sys>;
+ vcc6-supply = <&vcc5v0_sys>;
+ vcc7-supply = <&vcc5v0_sys>;
+ vcc8-supply = <&vcc5v0_sys>;
+ vcc9-supply = <&vcc5v0_sys>;
+ vcc10-supply = <&vcc5v0_sys>;
+ vcc11-supply = <&vcc_2v0_pldo_s3>;
+ vcc12-supply = <&vcc5v0_sys>;
+ vcc13-supply = <&vcc_1v1_nldo_s3>;
+ vcc14-supply = <&vcc_1v1_nldo_s3>;
+ vcca-supply = <&vcc5v0_sys>;
+
+ rk806_dvs1_null: dvs1-null-pins {
+ pins = "gpio_pwrctrl1";
+ function = "pin_fun0";
+ };
+
+ rk806_dvs2_null: dvs2-null-pins {
+ pins = "gpio_pwrctrl2";
+ function = "pin_fun0";
+ };
+
+ rk806_dvs3_null: dvs3-null-pins {
+ pins = "gpio_pwrctrl3";
+ function = "pin_fun0";
+ };
+
+ regulators {
+ vdd_gpu_s0: vdd_gpu_mem_s0: dcdc-reg1 {
+ regulator-boot-on;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <950000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vdd_gpu_s0";
+ regulator-enable-ramp-delay = <400>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_cpu_lit_s0: vdd_cpu_lit_mem_s0: dcdc-reg2 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <950000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vdd_cpu_lit_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_log_s0: dcdc-reg3 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <675000>;
+ regulator-max-microvolt = <750000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vdd_log_s0";
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <750000>;
+ };
+ };
+
+ vdd_vdenc_s0: vdd_vdenc_mem_s0: dcdc-reg4 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <950000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vdd_vdenc_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_ddr_s0: dcdc-reg5 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <675000>;
+ regulator-max-microvolt = <900000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vdd_ddr_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-suspend-microvolt = <850000>;
+ };
+ };
+
+ vdd2_ddr_s3: dcdc-reg6 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-name = "vdd2_ddr_s3";
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ };
+ };
+
+ vcc_2v0_pldo_s3: dcdc-reg7 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <2000000>;
+ regulator-max-microvolt = <2000000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vdd_2v0_pldo_s3";
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <2000000>;
+ };
+ };
+
+ vcc_3v3_s3: dcdc-reg8 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc_3v3_s3";
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <3300000>;
+ };
+ };
+
+ vddq_ddr_s0: dcdc-reg9 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-name = "vddq_ddr_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_1v8_s3: dcdc-reg10 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcc_1v8_s3";
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ avcc_1v8_s0: pldo-reg1 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "avcc_1v8_s0";
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ vcc_1v8_s0: pldo-reg2 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcc_1v8_s0";
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ avdd_1v2_s0: pldo-reg3 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-name = "avdd_1v2_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_3v3_s0: pldo-reg4 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vcc_3v3_s0";
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <3300000>;
+ };
+ };
+
+ vccio_sd_s0: pldo-reg5 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vccio_sd_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ pldo6_s3: pldo-reg6 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "pldo6_s3";
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ vdd_0v75_s3: nldo-reg1 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <750000>;
+ regulator-name = "vdd_0v75_s3";
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <750000>;
+ };
+ };
+
+ vdd_ddr_pll_s0: nldo-reg2 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <850000>;
+ regulator-max-microvolt = <850000>;
+ regulator-name = "vdd_ddr_pll_s0";
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <850000>;
+ };
+ };
+
+ avdd_0v75_s0: nldo-reg3 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <750000>;
+ regulator-name = "avdd_0v75_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_0v85_s0: nldo-reg4 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <850000>;
+ regulator-max-microvolt = <850000>;
+ regulator-name = "vdd_0v85_s0";
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <837500>;
+ };
+ };
+
+ vdd_0v75_s0: nldo-reg5 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <750000>;
+ regulator-name = "vdd_0v75_s0";
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <750000>;
+ };
+ };
+ };
+ };
+};
+
+&tsadc {
+ status = "okay";
+};
+
+&uart2 {
+ pinctrl-0 = <&uart2m0_xfer>;
+ status = "okay";
+};
+
+/* Connected to M.2 E-key */
+&uart6 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart6m1_xfer &uart6m1_ctsn &uart6m1_rtsn>;
+ status = "okay";
+};
+
+&u2phy0 {
+ status = "okay";
+};
+
+&u2phy0_otg {
+ status = "okay";
+};
+
+&u2phy1 {
+ status = "okay";
+};
+
+&u2phy1_otg {
+ /* connected to USB3 hub, which is powered by vcc5v0_usb12 */
+ phy-supply = <&vcc5v0_usb12>;
+ status = "okay";
+};
+
+&u2phy2 {
+ status = "okay";
+};
+
+&u2phy2_host {
+ /* connected to USB2 hub, which is powered by vcc5v0_usb20 */
+ phy-supply = <&vcc5v0_usb20>;
+ status = "okay";
+};
+
+&u2phy3 {
+ status = "okay";
+};
+
+&u2phy3_host {
+ phy-supply = <&vcc5v0_usb20>;
+ status = "okay";
+};
+
+&usb_host0_ehci {
+ status = "okay";
+};
+
+&usb_host0_ohci {
+ status = "okay";
+};
+
+&usb_host1_ehci {
+ status = "okay";
+};
+
+&usb_host1_ohci {
+ status = "okay";
+};
+
+&usb_host0_xhci {
+ usb-role-switch;
+ status = "okay";
+
+ port {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ dwc3_0_role_switch: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&usbc0_role_sw>;
+ };
+ };
+};
+
+&usb_host1_xhci {
+ dr_mode = "host";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "okay";
+
+ /* 2.0 hub on port 1 */
+ hub_2_0: hub@1 {
+ compatible = "usb5e3,610";
+ reg = <1>;
+ peer-hub = <&hub_3_0>;
+ vdd-supply = <&vcc_3v3_s3>;
+ };
+
+ /* 3.0 hub on port 4 */
+ hub_3_0: hub@2 {
+ compatible = "usb5e3,620";
+ reg = <2>;
+ peer-hub = <&hub_2_0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&gl3523_reset>;
+ reset-gpios = <&gpio3 RK_PB1 GPIO_ACTIVE_LOW>;
+ vdd-supply = <&vcc_3v3_s3>;
+ };
+};
+
+&usbdp_phy0 {
+ mode-switch;
+ orientation-switch;
+ sbu1-dc-gpios = <&gpio4 RK_PB7 GPIO_ACTIVE_HIGH>;
+ sbu2-dc-gpios = <&gpio4 RK_PC0 GPIO_ACTIVE_HIGH>;
+ status = "okay";
+
+ port {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ usbdp_phy0_orientation_switch: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&usbc0_orien_sw>;
+ };
+
+ usbdp_phy0_dp_altmode_mux: endpoint@1 {
+ reg = <1>;
+ remote-endpoint = <&dp_altmode_mux>;
+ };
+ };
+};
+
+&usbdp_phy1 {
+ rockchip,dp-lane-mux = <2 3>;
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-rock-5b-pcie-ep.dtso b/arch/arm64/boot/dts/rockchip/rk3588-rock-5b-pcie-ep.dtso
new file mode 100644
index 000000000000..672d748fcc67
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3588-rock-5b-pcie-ep.dtso
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * DT-overlay to run the PCIe3_4L Dual Mode controller in Endpoint mode
+ * in the SRNS (Separate Reference Clock No Spread) configuration.
+ *
+ * NOTE: If using a setup with two ROCK 5B:s, with one board running in
+ * RC mode and the other board running in EP mode, see also the device
+ * tree overlay: rk3588-rock-5b-pcie-srns.dtso.
+ */
+
+/dts-v1/;
+/plugin/;
+
+&pcie30phy {
+ rockchip,rx-common-refclk-mode = <0 0 0 0>;
+};
+
+&pcie3x4 {
+ status = "disabled";
+};
+
+&pcie3x4_ep {
+ vpcie3v3-supply = <&vcc3v3_pcie30>;
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-rock-5b-pcie-srns.dtso b/arch/arm64/boot/dts/rockchip/rk3588-rock-5b-pcie-srns.dtso
new file mode 100644
index 000000000000..1a0f1af65c43
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3588-rock-5b-pcie-srns.dtso
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * DT-overlay to run the PCIe3_4L Dual Mode controller in Root Complex
+ * mode in the SRNS (Separate Reference Clock No Spread) configuration.
+ *
+ * This device tree overlay is only needed (on the RC side) when running
+ * a setup with two ROCK 5B:s, with one board running in RC mode and the
+ * other board running in EP mode.
+ */
+
+/dts-v1/;
+/plugin/;
+
+&pcie30phy {
+ rockchip,rx-common-refclk-mode = <0 0 0 0>;
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-rock-5b.dts b/arch/arm64/boot/dts/rockchip/rk3588-rock-5b.dts
index b8e15b76a8a6..966bbc582d89 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588-rock-5b.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3588-rock-5b.dts
@@ -52,7 +52,7 @@
fan: pwm-fan {
compatible = "pwm-fan";
- cooling-levels = <0 95 145 195 255>;
+ cooling-levels = <0 120 150 180 210 240 255>;
fan-supply = <&vcc5v0_sys>;
pwms = <&pwm1 0 50000 0>;
#cooling-cells = <2>;
@@ -65,6 +65,13 @@
shutdown-gpios = <&gpio4 RK_PA2 GPIO_ACTIVE_HIGH>;
};
+ rfkill-bt {
+ compatible = "rfkill-gpio";
+ label = "rfkill-m2-bt";
+ radio-type = "bluetooth";
+ shutdown-gpios = <&gpio3 RK_PD5 GPIO_ACTIVE_HIGH>;
+ };
+
vcc3v3_pcie2x1l0: vcc3v3-pcie2x1l0-regulator {
compatible = "regulator-fixed";
enable-active-high;
@@ -279,6 +286,36 @@
};
};
+&package_thermal {
+ polling-delay = <1000>;
+
+ trips {
+ package_fan0: package-fan0 {
+ temperature = <55000>;
+ hysteresis = <2000>;
+ type = "active";
+ };
+
+ package_fan1: package-fan1 {
+ temperature = <65000>;
+ hysteresis = <2000>;
+ type = "active";
+ };
+ };
+
+ cooling-maps {
+ map1 {
+ trip = <&package_fan0>;
+ cooling-device = <&fan THERMAL_NO_LIMIT 1>;
+ };
+
+ map2 {
+ trip = <&package_fan1>;
+ cooling-device = <&fan 2 THERMAL_NO_LIMIT>;
+ };
+ };
+};
+
&pcie2x1l0 {
pinctrl-names = "default";
pinctrl-0 = <&pcie2_0_rst>;
@@ -383,6 +420,7 @@
bus-width = <4>;
cap-mmc-highspeed;
cap-sd-highspeed;
+ cd-gpios = <&gpio0 RK_PA4 GPIO_ACTIVE_LOW>;
disable-wp;
sd-uhs-sdr104;
vmmc-supply = <&vcc_3v3_s3>;
@@ -411,6 +449,20 @@
status = "okay";
};
+&sfc {
+ pinctrl-names = "default";
+ pinctrl-0 = <&fspim2_pins>;
+ status = "okay";
+
+ flash@0 {
+ compatible = "jedec,spi-nor";
+ reg = <0>;
+ spi-max-frequency = <104000000>;
+ spi-rx-bus-width = <4>;
+ spi-tx-bus-width = <1>;
+ };
+};
+
&uart6 {
pinctrl-names = "default";
pinctrl-0 = <&uart6m1_xfer &uart6m1_ctsn &uart6m1_rtsn>;
@@ -742,6 +794,10 @@
};
};
+&tsadc {
+ status = "okay";
+};
+
&uart2 {
pinctrl-0 = <&uart2m0_xfer>;
status = "okay";
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-tiger.dtsi b/arch/arm64/boot/dts/rockchip/rk3588-tiger.dtsi
index aebe1fedd2d8..615094bb8ba3 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588-tiger.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3588-tiger.dtsi
@@ -344,6 +344,11 @@
};
};
+&pwm0 {
+ pinctrl-0 = <&pwm0m1_pins>;
+ pinctrl-names = "default";
+};
+
&saradc {
vref-supply = <&vcc_1v8_s0>;
status = "okay";
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-toybrick-x0.dts b/arch/arm64/boot/dts/rockchip/rk3588-toybrick-x0.dts
index 9090c5c99f2a..d0021524e7f9 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588-toybrick-x0.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3588-toybrick-x0.dts
@@ -648,6 +648,10 @@
};
};
+&tsadc {
+ status = "okay";
+};
+
&u2phy2 {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-turing-rk1.dtsi b/arch/arm64/boot/dts/rockchip/rk3588-turing-rk1.dtsi
index 6b9206ce4a03..dbaa94ca69f4 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588-turing-rk1.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3588-turing-rk1.dtsi
@@ -198,8 +198,7 @@
&mdio1 {
rgmii_phy: ethernet-phy@1 {
/* RTL8211F */
- compatible = "ethernet-phy-id001c.c916",
- "ethernet-phy-ieee802.3-c22";
+ compatible = "ethernet-phy-id001c.c916";
reg = <0x1>;
pinctrl-names = "default";
pinctrl-0 = <&rtl8211f_rst>;
@@ -601,6 +600,10 @@
};
};
+&tsadc {
+ status = "okay";
+};
+
&uart2 {
pinctrl-0 = <&uart2m0_xfer>;
status = "okay";
diff --git a/arch/arm64/boot/dts/rockchip/rk3588.dtsi b/arch/arm64/boot/dts/rockchip/rk3588.dtsi
index 5984016b5f96..7462cc1e1007 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3588.dtsi
@@ -1,413 +1,8 @@
// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
/*
- * Copyright (c) 2021 Rockchip Electronics Co., Ltd.
+ * Copyright (c) 2022 Rockchip Electronics Co., Ltd.
+ *
*/
-#include "rk3588s.dtsi"
-#include "rk3588-pinctrl.dtsi"
-
-/ {
- usb_host1_xhci: usb@fc400000 {
- compatible = "rockchip,rk3588-dwc3", "snps,dwc3";
- reg = <0x0 0xfc400000 0x0 0x400000>;
- interrupts = <GIC_SPI 221 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&cru REF_CLK_USB3OTG1>, <&cru SUSPEND_CLK_USB3OTG1>,
- <&cru ACLK_USB3OTG1>;
- clock-names = "ref_clk", "suspend_clk", "bus_clk";
- dr_mode = "otg";
- phys = <&u2phy1_otg>, <&usbdp_phy1 PHY_TYPE_USB3>;
- phy-names = "usb2-phy", "usb3-phy";
- phy_type = "utmi_wide";
- power-domains = <&power RK3588_PD_USB>;
- resets = <&cru SRST_A_USB3OTG1>;
- snps,dis_enblslpm_quirk;
- snps,dis-u2-freeclk-exists-quirk;
- snps,dis-del-phy-power-chg-quirk;
- snps,dis-tx-ipgap-linecheck-quirk;
- status = "disabled";
- };
-
- pcie30_phy_grf: syscon@fd5b8000 {
- compatible = "rockchip,rk3588-pcie3-phy-grf", "syscon";
- reg = <0x0 0xfd5b8000 0x0 0x10000>;
- };
-
- pipe_phy1_grf: syscon@fd5c0000 {
- compatible = "rockchip,rk3588-pipe-phy-grf", "syscon";
- reg = <0x0 0xfd5c0000 0x0 0x100>;
- };
-
- usbdpphy1_grf: syscon@fd5cc000 {
- compatible = "rockchip,rk3588-usbdpphy-grf", "syscon";
- reg = <0x0 0xfd5cc000 0x0 0x4000>;
- };
-
- usb2phy1_grf: syscon@fd5d4000 {
- compatible = "rockchip,rk3588-usb2phy-grf", "syscon", "simple-mfd";
- reg = <0x0 0xfd5d4000 0x0 0x4000>;
- #address-cells = <1>;
- #size-cells = <1>;
-
- u2phy1: usb2phy@4000 {
- compatible = "rockchip,rk3588-usb2phy";
- reg = <0x4000 0x10>;
- #clock-cells = <0>;
- clocks = <&cru CLK_USB2PHY_HDPTXRXPHY_REF>;
- clock-names = "phyclk";
- clock-output-names = "usb480m_phy1";
- interrupts = <GIC_SPI 394 IRQ_TYPE_LEVEL_HIGH 0>;
- resets = <&cru SRST_OTGPHY_U3_1>, <&cru SRST_P_USB2PHY_U3_1_GRF0>;
- reset-names = "phy", "apb";
- status = "disabled";
-
- u2phy1_otg: otg-port {
- #phy-cells = <0>;
- status = "disabled";
- };
- };
- };
-
- i2s8_8ch: i2s@fddc8000 {
- compatible = "rockchip,rk3588-i2s-tdm";
- reg = <0x0 0xfddc8000 0x0 0x1000>;
- interrupts = <GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&cru MCLK_I2S8_8CH_TX>, <&cru MCLK_I2S8_8CH_TX>, <&cru HCLK_I2S8_8CH>;
- clock-names = "mclk_tx", "mclk_rx", "hclk";
- assigned-clocks = <&cru CLK_I2S8_8CH_TX_SRC>;
- assigned-clock-parents = <&cru PLL_AUPLL>;
- dmas = <&dmac2 22>;
- dma-names = "tx";
- power-domains = <&power RK3588_PD_VO0>;
- resets = <&cru SRST_M_I2S8_8CH_TX>;
- reset-names = "tx-m";
- #sound-dai-cells = <0>;
- status = "disabled";
- };
-
- i2s6_8ch: i2s@fddf4000 {
- compatible = "rockchip,rk3588-i2s-tdm";
- reg = <0x0 0xfddf4000 0x0 0x1000>;
- interrupts = <GIC_SPI 186 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&cru MCLK_I2S6_8CH_TX>, <&cru MCLK_I2S6_8CH_TX>, <&cru HCLK_I2S6_8CH>;
- clock-names = "mclk_tx", "mclk_rx", "hclk";
- assigned-clocks = <&cru CLK_I2S6_8CH_TX_SRC>;
- assigned-clock-parents = <&cru PLL_AUPLL>;
- dmas = <&dmac2 4>;
- dma-names = "tx";
- power-domains = <&power RK3588_PD_VO1>;
- resets = <&cru SRST_M_I2S6_8CH_TX>;
- reset-names = "tx-m";
- #sound-dai-cells = <0>;
- status = "disabled";
- };
-
- i2s7_8ch: i2s@fddf8000 {
- compatible = "rockchip,rk3588-i2s-tdm";
- reg = <0x0 0xfddf8000 0x0 0x1000>;
- interrupts = <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&cru MCLK_I2S7_8CH_RX>, <&cru MCLK_I2S7_8CH_RX>, <&cru HCLK_I2S7_8CH>;
- clock-names = "mclk_tx", "mclk_rx", "hclk";
- assigned-clocks = <&cru CLK_I2S7_8CH_RX_SRC>;
- assigned-clock-parents = <&cru PLL_AUPLL>;
- dmas = <&dmac2 21>;
- dma-names = "rx";
- power-domains = <&power RK3588_PD_VO1>;
- resets = <&cru SRST_M_I2S7_8CH_RX>;
- reset-names = "rx-m";
- #sound-dai-cells = <0>;
- status = "disabled";
- };
-
- i2s10_8ch: i2s@fde00000 {
- compatible = "rockchip,rk3588-i2s-tdm";
- reg = <0x0 0xfde00000 0x0 0x1000>;
- interrupts = <GIC_SPI 190 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&cru MCLK_I2S10_8CH_RX>, <&cru MCLK_I2S10_8CH_RX>, <&cru HCLK_I2S10_8CH>;
- clock-names = "mclk_tx", "mclk_rx", "hclk";
- assigned-clocks = <&cru CLK_I2S10_8CH_RX_SRC>;
- assigned-clock-parents = <&cru PLL_AUPLL>;
- dmas = <&dmac2 24>;
- dma-names = "rx";
- power-domains = <&power RK3588_PD_VO1>;
- resets = <&cru SRST_M_I2S10_8CH_RX>;
- reset-names = "rx-m";
- #sound-dai-cells = <0>;
- status = "disabled";
- };
-
- pcie3x4: pcie@fe150000 {
- compatible = "rockchip,rk3588-pcie", "rockchip,rk3568-pcie";
- #address-cells = <3>;
- #size-cells = <2>;
- bus-range = <0x00 0x0f>;
- clocks = <&cru ACLK_PCIE_4L_MSTR>, <&cru ACLK_PCIE_4L_SLV>,
- <&cru ACLK_PCIE_4L_DBI>, <&cru PCLK_PCIE_4L>,
- <&cru CLK_PCIE_AUX0>, <&cru CLK_PCIE4L_PIPE>;
- clock-names = "aclk_mst", "aclk_slv",
- "aclk_dbi", "pclk",
- "aux", "pipe";
- device_type = "pci";
- interrupts = <GIC_SPI 263 IRQ_TYPE_LEVEL_HIGH 0>,
- <GIC_SPI 262 IRQ_TYPE_LEVEL_HIGH 0>,
- <GIC_SPI 261 IRQ_TYPE_LEVEL_HIGH 0>,
- <GIC_SPI 260 IRQ_TYPE_LEVEL_HIGH 0>,
- <GIC_SPI 259 IRQ_TYPE_LEVEL_HIGH 0>;
- interrupt-names = "sys", "pmc", "msg", "legacy", "err";
- #interrupt-cells = <1>;
- interrupt-map-mask = <0 0 0 7>;
- interrupt-map = <0 0 0 1 &pcie3x4_intc 0>,
- <0 0 0 2 &pcie3x4_intc 1>,
- <0 0 0 3 &pcie3x4_intc 2>,
- <0 0 0 4 &pcie3x4_intc 3>;
- linux,pci-domain = <0>;
- max-link-speed = <3>;
- msi-map = <0x0000 &its1 0x0000 0x1000>;
- num-lanes = <4>;
- phys = <&pcie30phy>;
- phy-names = "pcie-phy";
- power-domains = <&power RK3588_PD_PCIE>;
- ranges = <0x01000000 0x0 0xf0100000 0x0 0xf0100000 0x0 0x00100000>,
- <0x02000000 0x0 0xf0200000 0x0 0xf0200000 0x0 0x00e00000>,
- <0x03000000 0x0 0x40000000 0x9 0x00000000 0x0 0x40000000>;
- reg = <0xa 0x40000000 0x0 0x00400000>,
- <0x0 0xfe150000 0x0 0x00010000>,
- <0x0 0xf0000000 0x0 0x00100000>;
- reg-names = "dbi", "apb", "config";
- resets = <&cru SRST_PCIE0_POWER_UP>, <&cru SRST_P_PCIE0>;
- reset-names = "pwr", "pipe";
- status = "disabled";
-
- pcie3x4_intc: legacy-interrupt-controller {
- interrupt-controller;
- #address-cells = <0>;
- #interrupt-cells = <1>;
- interrupt-parent = <&gic>;
- interrupts = <GIC_SPI 260 IRQ_TYPE_EDGE_RISING 0>;
- };
- };
-
- pcie3x2: pcie@fe160000 {
- compatible = "rockchip,rk3588-pcie", "rockchip,rk3568-pcie";
- #address-cells = <3>;
- #size-cells = <2>;
- bus-range = <0x10 0x1f>;
- clocks = <&cru ACLK_PCIE_2L_MSTR>, <&cru ACLK_PCIE_2L_SLV>,
- <&cru ACLK_PCIE_2L_DBI>, <&cru PCLK_PCIE_2L>,
- <&cru CLK_PCIE_AUX1>, <&cru CLK_PCIE2L_PIPE>;
- clock-names = "aclk_mst", "aclk_slv",
- "aclk_dbi", "pclk",
- "aux", "pipe";
- device_type = "pci";
- interrupts = <GIC_SPI 258 IRQ_TYPE_LEVEL_HIGH 0>,
- <GIC_SPI 257 IRQ_TYPE_LEVEL_HIGH 0>,
- <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH 0>,
- <GIC_SPI 255 IRQ_TYPE_LEVEL_HIGH 0>,
- <GIC_SPI 254 IRQ_TYPE_LEVEL_HIGH 0>;
- interrupt-names = "sys", "pmc", "msg", "legacy", "err";
- #interrupt-cells = <1>;
- interrupt-map-mask = <0 0 0 7>;
- interrupt-map = <0 0 0 1 &pcie3x2_intc 0>,
- <0 0 0 2 &pcie3x2_intc 1>,
- <0 0 0 3 &pcie3x2_intc 2>,
- <0 0 0 4 &pcie3x2_intc 3>;
- linux,pci-domain = <1>;
- max-link-speed = <3>;
- msi-map = <0x1000 &its1 0x1000 0x1000>;
- num-lanes = <2>;
- phys = <&pcie30phy>;
- phy-names = "pcie-phy";
- power-domains = <&power RK3588_PD_PCIE>;
- ranges = <0x01000000 0x0 0xf1100000 0x0 0xf1100000 0x0 0x00100000>,
- <0x02000000 0x0 0xf1200000 0x0 0xf1200000 0x0 0x00e00000>,
- <0x03000000 0x0 0x40000000 0x9 0x40000000 0x0 0x40000000>;
- reg = <0xa 0x40400000 0x0 0x00400000>,
- <0x0 0xfe160000 0x0 0x00010000>,
- <0x0 0xf1000000 0x0 0x00100000>;
- reg-names = "dbi", "apb", "config";
- resets = <&cru SRST_PCIE1_POWER_UP>, <&cru SRST_P_PCIE1>;
- reset-names = "pwr", "pipe";
- status = "disabled";
-
- pcie3x2_intc: legacy-interrupt-controller {
- interrupt-controller;
- #address-cells = <0>;
- #interrupt-cells = <1>;
- interrupt-parent = <&gic>;
- interrupts = <GIC_SPI 255 IRQ_TYPE_EDGE_RISING 0>;
- };
- };
-
- pcie2x1l0: pcie@fe170000 {
- compatible = "rockchip,rk3588-pcie", "rockchip,rk3568-pcie";
- bus-range = <0x20 0x2f>;
- clocks = <&cru ACLK_PCIE_1L0_MSTR>, <&cru ACLK_PCIE_1L0_SLV>,
- <&cru ACLK_PCIE_1L0_DBI>, <&cru PCLK_PCIE_1L0>,
- <&cru CLK_PCIE_AUX2>, <&cru CLK_PCIE1L0_PIPE>;
- clock-names = "aclk_mst", "aclk_slv",
- "aclk_dbi", "pclk",
- "aux", "pipe";
- device_type = "pci";
- interrupts = <GIC_SPI 243 IRQ_TYPE_LEVEL_HIGH 0>,
- <GIC_SPI 242 IRQ_TYPE_LEVEL_HIGH 0>,
- <GIC_SPI 241 IRQ_TYPE_LEVEL_HIGH 0>,
- <GIC_SPI 240 IRQ_TYPE_LEVEL_HIGH 0>,
- <GIC_SPI 239 IRQ_TYPE_LEVEL_HIGH 0>;
- interrupt-names = "sys", "pmc", "msg", "legacy", "err";
- #interrupt-cells = <1>;
- interrupt-map-mask = <0 0 0 7>;
- interrupt-map = <0 0 0 1 &pcie2x1l0_intc 0>,
- <0 0 0 2 &pcie2x1l0_intc 1>,
- <0 0 0 3 &pcie2x1l0_intc 2>,
- <0 0 0 4 &pcie2x1l0_intc 3>;
- linux,pci-domain = <2>;
- max-link-speed = <2>;
- msi-map = <0x2000 &its0 0x2000 0x1000>;
- num-lanes = <1>;
- phys = <&combphy1_ps PHY_TYPE_PCIE>;
- phy-names = "pcie-phy";
- power-domains = <&power RK3588_PD_PCIE>;
- ranges = <0x01000000 0x0 0xf2100000 0x0 0xf2100000 0x0 0x00100000>,
- <0x02000000 0x0 0xf2200000 0x0 0xf2200000 0x0 0x00e00000>,
- <0x03000000 0x0 0x40000000 0x9 0x80000000 0x0 0x40000000>;
- reg = <0xa 0x40800000 0x0 0x00400000>,
- <0x0 0xfe170000 0x0 0x00010000>,
- <0x0 0xf2000000 0x0 0x00100000>;
- reg-names = "dbi", "apb", "config";
- resets = <&cru SRST_PCIE2_POWER_UP>, <&cru SRST_P_PCIE2>;
- reset-names = "pwr", "pipe";
- #address-cells = <3>;
- #size-cells = <2>;
- status = "disabled";
-
- pcie2x1l0_intc: legacy-interrupt-controller {
- interrupt-controller;
- #address-cells = <0>;
- #interrupt-cells = <1>;
- interrupt-parent = <&gic>;
- interrupts = <GIC_SPI 240 IRQ_TYPE_EDGE_RISING 0>;
- };
- };
-
- gmac0: ethernet@fe1b0000 {
- compatible = "rockchip,rk3588-gmac", "snps,dwmac-4.20a";
- reg = <0x0 0xfe1b0000 0x0 0x10000>;
- interrupts = <GIC_SPI 227 IRQ_TYPE_LEVEL_HIGH 0>,
- <GIC_SPI 226 IRQ_TYPE_LEVEL_HIGH 0>;
- interrupt-names = "macirq", "eth_wake_irq";
- clocks = <&cru CLK_GMAC_125M>, <&cru CLK_GMAC_50M>,
- <&cru PCLK_GMAC0>, <&cru ACLK_GMAC0>,
- <&cru CLK_GMAC0_PTP_REF>;
- clock-names = "stmmaceth", "clk_mac_ref",
- "pclk_mac", "aclk_mac",
- "ptp_ref";
- power-domains = <&power RK3588_PD_GMAC>;
- resets = <&cru SRST_A_GMAC0>;
- reset-names = "stmmaceth";
- rockchip,grf = <&sys_grf>;
- rockchip,php-grf = <&php_grf>;
- snps,axi-config = <&gmac0_stmmac_axi_setup>;
- snps,mixed-burst;
- snps,mtl-rx-config = <&gmac0_mtl_rx_setup>;
- snps,mtl-tx-config = <&gmac0_mtl_tx_setup>;
- snps,tso;
- status = "disabled";
-
- mdio0: mdio {
- compatible = "snps,dwmac-mdio";
- #address-cells = <0x1>;
- #size-cells = <0x0>;
- };
-
- gmac0_stmmac_axi_setup: stmmac-axi-config {
- snps,blen = <0 0 0 0 16 8 4>;
- snps,wr_osr_lmt = <4>;
- snps,rd_osr_lmt = <8>;
- };
-
- gmac0_mtl_rx_setup: rx-queues-config {
- snps,rx-queues-to-use = <2>;
- queue0 {};
- queue1 {};
- };
-
- gmac0_mtl_tx_setup: tx-queues-config {
- snps,tx-queues-to-use = <2>;
- queue0 {};
- queue1 {};
- };
- };
-
- sata1: sata@fe220000 {
- compatible = "rockchip,rk3588-dwc-ahci", "snps,dwc-ahci";
- reg = <0 0xfe220000 0 0x1000>;
- interrupts = <GIC_SPI 274 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&cru ACLK_SATA1>, <&cru CLK_PMALIVE1>,
- <&cru CLK_RXOOB1>, <&cru CLK_PIPEPHY1_REF>,
- <&cru CLK_PIPEPHY1_PIPE_ASIC_G>;
- clock-names = "sata", "pmalive", "rxoob", "ref", "asic";
- ports-implemented = <0x1>;
- #address-cells = <1>;
- #size-cells = <0>;
- status = "disabled";
-
- sata-port@0 {
- reg = <0>;
- hba-port-cap = <HBA_PORT_FBSCP>;
- phys = <&combphy1_ps PHY_TYPE_SATA>;
- phy-names = "sata-phy";
- snps,rx-ts-max = <32>;
- snps,tx-ts-max = <32>;
- };
- };
-
- usbdp_phy1: phy@fed90000 {
- compatible = "rockchip,rk3588-usbdp-phy";
- reg = <0x0 0xfed90000 0x0 0x10000>;
- #phy-cells = <1>;
- clocks = <&cru CLK_USBDPPHY_MIPIDCPPHY_REF>,
- <&cru CLK_USBDP_PHY1_IMMORTAL>,
- <&cru PCLK_USBDPPHY1>,
- <&u2phy1>;
- clock-names = "refclk", "immortal", "pclk", "utmi";
- resets = <&cru SRST_USBDP_COMBO_PHY1_INIT>,
- <&cru SRST_USBDP_COMBO_PHY1_CMN>,
- <&cru SRST_USBDP_COMBO_PHY1_LANE>,
- <&cru SRST_USBDP_COMBO_PHY1_PCS>,
- <&cru SRST_P_USBDPPHY1>;
- reset-names = "init", "cmn", "lane", "pcs_apb", "pma_apb";
- rockchip,u2phy-grf = <&usb2phy1_grf>;
- rockchip,usb-grf = <&usb_grf>;
- rockchip,usbdpphy-grf = <&usbdpphy1_grf>;
- rockchip,vo-grf = <&vo0_grf>;
- status = "disabled";
- };
-
- combphy1_ps: phy@fee10000 {
- compatible = "rockchip,rk3588-naneng-combphy";
- reg = <0x0 0xfee10000 0x0 0x100>;
- clocks = <&cru CLK_REF_PIPE_PHY1>, <&cru PCLK_PCIE_COMBO_PIPE_PHY1>,
- <&cru PCLK_PHP_ROOT>;
- clock-names = "ref", "apb", "pipe";
- assigned-clocks = <&cru CLK_REF_PIPE_PHY1>;
- assigned-clock-rates = <100000000>;
- #phy-cells = <1>;
- resets = <&cru SRST_REF_PIPE_PHY1>, <&cru SRST_P_PCIE2_PHY1>;
- reset-names = "phy", "apb";
- rockchip,pipe-grf = <&php_grf>;
- rockchip,pipe-phy-grf = <&pipe_phy1_grf>;
- status = "disabled";
- };
-
- pcie30phy: phy@fee80000 {
- compatible = "rockchip,rk3588-pcie3-phy";
- reg = <0x0 0xfee80000 0x0 0x20000>;
- #phy-cells = <0>;
- clocks = <&cru PCLK_PCIE_COMBO_PIPE_PHY>;
- clock-names = "pclk";
- resets = <&cru SRST_PCIE30_PHY>;
- reset-names = "phy";
- rockchip,pipe-grf = <&php_grf>;
- rockchip,phy-grf = <&pcie30_phy_grf>;
- status = "disabled";
- };
-};
+#include "rk3588-extra.dtsi"
+#include "rk3588-opp.dtsi"
diff --git a/arch/arm64/boot/dts/rockchip/rk3588j.dtsi b/arch/arm64/boot/dts/rockchip/rk3588j.dtsi
index 38b9dbf38a21..bce72bac4503 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588j.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3588j.dtsi
@@ -4,4 +4,145 @@
*
*/
-#include "rk3588.dtsi"
+#include "rk3588-extra.dtsi"
+
+/ {
+ cluster0_opp_table: opp-table-cluster0 {
+ compatible = "operating-points-v2";
+ opp-shared;
+
+ opp-1416000000 {
+ opp-hz = /bits/ 64 <1416000000>;
+ opp-microvolt = <750000 750000 950000>;
+ clock-latency-ns = <40000>;
+ opp-suspend;
+ };
+ opp-1608000000 {
+ opp-hz = /bits/ 64 <1608000000>;
+ opp-microvolt = <887500 887500 950000>;
+ clock-latency-ns = <40000>;
+ };
+ opp-1704000000 {
+ opp-hz = /bits/ 64 <1704000000>;
+ opp-microvolt = <937500 937500 950000>;
+ clock-latency-ns = <40000>;
+ };
+ };
+
+ cluster1_opp_table: opp-table-cluster1 {
+ compatible = "operating-points-v2";
+ opp-shared;
+
+ opp-1416000000 {
+ opp-hz = /bits/ 64 <1416000000>;
+ opp-microvolt = <750000 750000 950000>;
+ clock-latency-ns = <40000>;
+ };
+ opp-1608000000 {
+ opp-hz = /bits/ 64 <1608000000>;
+ opp-microvolt = <787500 787500 950000>;
+ clock-latency-ns = <40000>;
+ };
+ opp-1800000000 {
+ opp-hz = /bits/ 64 <1800000000>;
+ opp-microvolt = <875000 875000 950000>;
+ clock-latency-ns = <40000>;
+ };
+ opp-2016000000 {
+ opp-hz = /bits/ 64 <2016000000>;
+ opp-microvolt = <950000 950000 950000>;
+ clock-latency-ns = <40000>;
+ };
+ };
+
+ cluster2_opp_table: opp-table-cluster2 {
+ compatible = "operating-points-v2";
+ opp-shared;
+
+ opp-1416000000 {
+ opp-hz = /bits/ 64 <1416000000>;
+ opp-microvolt = <750000 750000 950000>;
+ clock-latency-ns = <40000>;
+ };
+ opp-1608000000 {
+ opp-hz = /bits/ 64 <1608000000>;
+ opp-microvolt = <787500 787500 950000>;
+ clock-latency-ns = <40000>;
+ };
+ opp-1800000000 {
+ opp-hz = /bits/ 64 <1800000000>;
+ opp-microvolt = <875000 875000 950000>;
+ clock-latency-ns = <40000>;
+ };
+ opp-2016000000 {
+ opp-hz = /bits/ 64 <2016000000>;
+ opp-microvolt = <950000 950000 950000>;
+ clock-latency-ns = <40000>;
+ };
+ };
+
+ gpu_opp_table: opp-table {
+ compatible = "operating-points-v2";
+
+ opp-300000000 {
+ opp-hz = /bits/ 64 <300000000>;
+ opp-microvolt = <750000 750000 850000>;
+ };
+ opp-400000000 {
+ opp-hz = /bits/ 64 <400000000>;
+ opp-microvolt = <750000 750000 850000>;
+ };
+ opp-500000000 {
+ opp-hz = /bits/ 64 <500000000>;
+ opp-microvolt = <750000 750000 850000>;
+ };
+ opp-600000000 {
+ opp-hz = /bits/ 64 <600000000>;
+ opp-microvolt = <750000 750000 850000>;
+ };
+ opp-700000000 {
+ opp-hz = /bits/ 64 <700000000>;
+ opp-microvolt = <750000 750000 850000>;
+ };
+ opp-850000000 {
+ opp-hz = /bits/ 64 <800000000>;
+ opp-microvolt = <787500 787500 850000>;
+ };
+ };
+};
+
+&cpu_b0 {
+ operating-points-v2 = <&cluster1_opp_table>;
+};
+
+&cpu_b1 {
+ operating-points-v2 = <&cluster1_opp_table>;
+};
+
+&cpu_b2 {
+ operating-points-v2 = <&cluster2_opp_table>;
+};
+
+&cpu_b3 {
+ operating-points-v2 = <&cluster2_opp_table>;
+};
+
+&cpu_l0 {
+ operating-points-v2 = <&cluster0_opp_table>;
+};
+
+&cpu_l1 {
+ operating-points-v2 = <&cluster0_opp_table>;
+};
+
+&cpu_l2 {
+ operating-points-v2 = <&cluster0_opp_table>;
+};
+
+&cpu_l3 {
+ operating-points-v2 = <&cluster0_opp_table>;
+};
+
+&gpu {
+ operating-points-v2 = <&gpu_opp_table>;
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3588s-coolpi-4b.dts b/arch/arm64/boot/dts/rockchip/rk3588s-coolpi-4b.dts
index 3b2ec1d0c542..074c316a9a69 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588s-coolpi-4b.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3588s-coolpi-4b.dts
@@ -288,9 +288,9 @@
pinctrl-0 = <&i2c7m0_xfer>;
status = "okay";
- es8316: audio-codec@11 {
+ es8316: audio-codec@10 {
compatible = "everest,es8316";
- reg = <0x11>;
+ reg = <0x10>;
assigned-clocks = <&cru I2S0_8CH_MCLKOUT>;
assigned-clock-rates = <12288000>;
clocks = <&cru I2S0_8CH_MCLKOUT>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3588s-rock-5a.dts b/arch/arm64/boot/dts/rockchip/rk3588s-rock-5a.dts
index 8e2a07612d17..03ed48246d36 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588s-rock-5a.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3588s-rock-5a.dts
@@ -366,6 +366,7 @@
bus-width = <4>;
cap-mmc-highspeed;
cap-sd-highspeed;
+ cd-gpios = <&gpio0 RK_PA4 GPIO_ACTIVE_LOW>;
disable-wp;
max-frequency = <150000000>;
no-sdio;
@@ -376,6 +377,19 @@
status = "okay";
};
+&sfc {
+ pinctrl-names = "default";
+ pinctrl-0 = <&fspim0_pins>;
+
+ flash@0 {
+ compatible = "jedec,spi-nor";
+ reg = <0>;
+ spi-max-frequency = <104000000>;
+ spi-rx-bus-width = <4>;
+ spi-tx-bus-width = <1>;
+ };
+};
+
&spi2 {
status = "okay";
assigned-clocks = <&cru CLK_SPI2>;
@@ -393,6 +407,7 @@
pinctrl-0 = <&pmic_pins>, <&rk806_dvs1_null>,
<&rk806_dvs2_null>, <&rk806_dvs3_null>;
spi-max-frequency = <1000000>;
+ system-power-controller;
vcc1-supply = <&vcc5v0_sys>;
vcc2-supply = <&vcc5v0_sys>;
@@ -697,6 +712,10 @@
};
};
+&tsadc {
+ status = "okay";
+};
+
&u2phy0 {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3588s.dtsi b/arch/arm64/boot/dts/rockchip/rk3588s.dtsi
index 6ac5ac8b48ab..c7fecf8fe7ec 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588s.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3588s.dtsi
@@ -1,2670 +1,8 @@
// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
/*
- * Copyright (c) 2021 Rockchip Electronics Co., Ltd.
+ * Copyright (c) 2022 Rockchip Electronics Co., Ltd.
+ *
*/
-#include <dt-bindings/clock/rockchip,rk3588-cru.h>
-#include <dt-bindings/interrupt-controller/arm-gic.h>
-#include <dt-bindings/interrupt-controller/irq.h>
-#include <dt-bindings/power/rk3588-power.h>
-#include <dt-bindings/reset/rockchip,rk3588-cru.h>
-#include <dt-bindings/phy/phy.h>
-#include <dt-bindings/ata/ahci.h>
-
-/ {
- compatible = "rockchip,rk3588";
-
- interrupt-parent = <&gic>;
- #address-cells = <2>;
- #size-cells = <2>;
-
- aliases {
- gpio0 = &gpio0;
- gpio1 = &gpio1;
- gpio2 = &gpio2;
- gpio3 = &gpio3;
- gpio4 = &gpio4;
- i2c0 = &i2c0;
- i2c1 = &i2c1;
- i2c2 = &i2c2;
- i2c3 = &i2c3;
- i2c4 = &i2c4;
- i2c5 = &i2c5;
- i2c6 = &i2c6;
- i2c7 = &i2c7;
- i2c8 = &i2c8;
- serial0 = &uart0;
- serial1 = &uart1;
- serial2 = &uart2;
- serial3 = &uart3;
- serial4 = &uart4;
- serial5 = &uart5;
- serial6 = &uart6;
- serial7 = &uart7;
- serial8 = &uart8;
- serial9 = &uart9;
- spi0 = &spi0;
- spi1 = &spi1;
- spi2 = &spi2;
- spi3 = &spi3;
- spi4 = &spi4;
- };
-
- cpus {
- #address-cells = <1>;
- #size-cells = <0>;
-
- cpu-map {
- cluster0 {
- core0 {
- cpu = <&cpu_l0>;
- };
- core1 {
- cpu = <&cpu_l1>;
- };
- core2 {
- cpu = <&cpu_l2>;
- };
- core3 {
- cpu = <&cpu_l3>;
- };
- };
- cluster1 {
- core0 {
- cpu = <&cpu_b0>;
- };
- core1 {
- cpu = <&cpu_b1>;
- };
- };
- cluster2 {
- core0 {
- cpu = <&cpu_b2>;
- };
- core1 {
- cpu = <&cpu_b3>;
- };
- };
- };
-
- cpu_l0: cpu@0 {
- device_type = "cpu";
- compatible = "arm,cortex-a55";
- reg = <0x0>;
- enable-method = "psci";
- capacity-dmips-mhz = <530>;
- clocks = <&scmi_clk SCMI_CLK_CPUL>;
- assigned-clocks = <&scmi_clk SCMI_CLK_CPUL>;
- assigned-clock-rates = <816000000>;
- cpu-idle-states = <&CPU_SLEEP>;
- i-cache-size = <32768>;
- i-cache-line-size = <64>;
- i-cache-sets = <128>;
- d-cache-size = <32768>;
- d-cache-line-size = <64>;
- d-cache-sets = <128>;
- next-level-cache = <&l2_cache_l0>;
- dynamic-power-coefficient = <228>;
- #cooling-cells = <2>;
- };
-
- cpu_l1: cpu@100 {
- device_type = "cpu";
- compatible = "arm,cortex-a55";
- reg = <0x100>;
- enable-method = "psci";
- capacity-dmips-mhz = <530>;
- clocks = <&scmi_clk SCMI_CLK_CPUL>;
- cpu-idle-states = <&CPU_SLEEP>;
- i-cache-size = <32768>;
- i-cache-line-size = <64>;
- i-cache-sets = <128>;
- d-cache-size = <32768>;
- d-cache-line-size = <64>;
- d-cache-sets = <128>;
- next-level-cache = <&l2_cache_l1>;
- dynamic-power-coefficient = <228>;
- #cooling-cells = <2>;
- };
-
- cpu_l2: cpu@200 {
- device_type = "cpu";
- compatible = "arm,cortex-a55";
- reg = <0x200>;
- enable-method = "psci";
- capacity-dmips-mhz = <530>;
- clocks = <&scmi_clk SCMI_CLK_CPUL>;
- cpu-idle-states = <&CPU_SLEEP>;
- i-cache-size = <32768>;
- i-cache-line-size = <64>;
- i-cache-sets = <128>;
- d-cache-size = <32768>;
- d-cache-line-size = <64>;
- d-cache-sets = <128>;
- next-level-cache = <&l2_cache_l2>;
- dynamic-power-coefficient = <228>;
- #cooling-cells = <2>;
- };
-
- cpu_l3: cpu@300 {
- device_type = "cpu";
- compatible = "arm,cortex-a55";
- reg = <0x300>;
- enable-method = "psci";
- capacity-dmips-mhz = <530>;
- clocks = <&scmi_clk SCMI_CLK_CPUL>;
- cpu-idle-states = <&CPU_SLEEP>;
- i-cache-size = <32768>;
- i-cache-line-size = <64>;
- i-cache-sets = <128>;
- d-cache-size = <32768>;
- d-cache-line-size = <64>;
- d-cache-sets = <128>;
- next-level-cache = <&l2_cache_l3>;
- dynamic-power-coefficient = <228>;
- #cooling-cells = <2>;
- };
-
- cpu_b0: cpu@400 {
- device_type = "cpu";
- compatible = "arm,cortex-a76";
- reg = <0x400>;
- enable-method = "psci";
- capacity-dmips-mhz = <1024>;
- clocks = <&scmi_clk SCMI_CLK_CPUB01>;
- assigned-clocks = <&scmi_clk SCMI_CLK_CPUB01>;
- assigned-clock-rates = <816000000>;
- cpu-idle-states = <&CPU_SLEEP>;
- i-cache-size = <65536>;
- i-cache-line-size = <64>;
- i-cache-sets = <256>;
- d-cache-size = <65536>;
- d-cache-line-size = <64>;
- d-cache-sets = <256>;
- next-level-cache = <&l2_cache_b0>;
- dynamic-power-coefficient = <416>;
- #cooling-cells = <2>;
- };
-
- cpu_b1: cpu@500 {
- device_type = "cpu";
- compatible = "arm,cortex-a76";
- reg = <0x500>;
- enable-method = "psci";
- capacity-dmips-mhz = <1024>;
- clocks = <&scmi_clk SCMI_CLK_CPUB01>;
- cpu-idle-states = <&CPU_SLEEP>;
- i-cache-size = <65536>;
- i-cache-line-size = <64>;
- i-cache-sets = <256>;
- d-cache-size = <65536>;
- d-cache-line-size = <64>;
- d-cache-sets = <256>;
- next-level-cache = <&l2_cache_b1>;
- dynamic-power-coefficient = <416>;
- #cooling-cells = <2>;
- };
-
- cpu_b2: cpu@600 {
- device_type = "cpu";
- compatible = "arm,cortex-a76";
- reg = <0x600>;
- enable-method = "psci";
- capacity-dmips-mhz = <1024>;
- clocks = <&scmi_clk SCMI_CLK_CPUB23>;
- assigned-clocks = <&scmi_clk SCMI_CLK_CPUB23>;
- assigned-clock-rates = <816000000>;
- cpu-idle-states = <&CPU_SLEEP>;
- i-cache-size = <65536>;
- i-cache-line-size = <64>;
- i-cache-sets = <256>;
- d-cache-size = <65536>;
- d-cache-line-size = <64>;
- d-cache-sets = <256>;
- next-level-cache = <&l2_cache_b2>;
- dynamic-power-coefficient = <416>;
- #cooling-cells = <2>;
- };
-
- cpu_b3: cpu@700 {
- device_type = "cpu";
- compatible = "arm,cortex-a76";
- reg = <0x700>;
- enable-method = "psci";
- capacity-dmips-mhz = <1024>;
- clocks = <&scmi_clk SCMI_CLK_CPUB23>;
- cpu-idle-states = <&CPU_SLEEP>;
- i-cache-size = <65536>;
- i-cache-line-size = <64>;
- i-cache-sets = <256>;
- d-cache-size = <65536>;
- d-cache-line-size = <64>;
- d-cache-sets = <256>;
- next-level-cache = <&l2_cache_b3>;
- dynamic-power-coefficient = <416>;
- #cooling-cells = <2>;
- };
-
- idle-states {
- entry-method = "psci";
- CPU_SLEEP: cpu-sleep {
- compatible = "arm,idle-state";
- local-timer-stop;
- arm,psci-suspend-param = <0x0010000>;
- entry-latency-us = <100>;
- exit-latency-us = <120>;
- min-residency-us = <1000>;
- };
- };
-
- l2_cache_l0: l2-cache-l0 {
- compatible = "cache";
- cache-size = <131072>;
- cache-line-size = <64>;
- cache-sets = <512>;
- cache-level = <2>;
- cache-unified;
- next-level-cache = <&l3_cache>;
- };
-
- l2_cache_l1: l2-cache-l1 {
- compatible = "cache";
- cache-size = <131072>;
- cache-line-size = <64>;
- cache-sets = <512>;
- cache-level = <2>;
- cache-unified;
- next-level-cache = <&l3_cache>;
- };
-
- l2_cache_l2: l2-cache-l2 {
- compatible = "cache";
- cache-size = <131072>;
- cache-line-size = <64>;
- cache-sets = <512>;
- cache-level = <2>;
- cache-unified;
- next-level-cache = <&l3_cache>;
- };
-
- l2_cache_l3: l2-cache-l3 {
- compatible = "cache";
- cache-size = <131072>;
- cache-line-size = <64>;
- cache-sets = <512>;
- cache-level = <2>;
- cache-unified;
- next-level-cache = <&l3_cache>;
- };
-
- l2_cache_b0: l2-cache-b0 {
- compatible = "cache";
- cache-size = <524288>;
- cache-line-size = <64>;
- cache-sets = <1024>;
- cache-level = <2>;
- cache-unified;
- next-level-cache = <&l3_cache>;
- };
-
- l2_cache_b1: l2-cache-b1 {
- compatible = "cache";
- cache-size = <524288>;
- cache-line-size = <64>;
- cache-sets = <1024>;
- cache-level = <2>;
- cache-unified;
- next-level-cache = <&l3_cache>;
- };
-
- l2_cache_b2: l2-cache-b2 {
- compatible = "cache";
- cache-size = <524288>;
- cache-line-size = <64>;
- cache-sets = <1024>;
- cache-level = <2>;
- cache-unified;
- next-level-cache = <&l3_cache>;
- };
-
- l2_cache_b3: l2-cache-b3 {
- compatible = "cache";
- cache-size = <524288>;
- cache-line-size = <64>;
- cache-sets = <1024>;
- cache-level = <2>;
- cache-unified;
- next-level-cache = <&l3_cache>;
- };
-
- l3_cache: l3-cache {
- compatible = "cache";
- cache-size = <3145728>;
- cache-line-size = <64>;
- cache-sets = <4096>;
- cache-level = <3>;
- cache-unified;
- };
- };
-
- display_subsystem: display-subsystem {
- compatible = "rockchip,display-subsystem";
- ports = <&vop_out>;
- };
-
- firmware {
- optee: optee {
- compatible = "linaro,optee-tz";
- method = "smc";
- };
-
- scmi: scmi {
- compatible = "arm,scmi-smc";
- arm,smc-id = <0x82000010>;
- shmem = <&scmi_shmem>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- scmi_clk: protocol@14 {
- reg = <0x14>;
- #clock-cells = <1>;
- };
-
- scmi_reset: protocol@16 {
- reg = <0x16>;
- #reset-cells = <1>;
- };
- };
- };
-
- pmu-a55 {
- compatible = "arm,cortex-a55-pmu";
- interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_HIGH &ppi_partition0>;
- };
-
- pmu-a76 {
- compatible = "arm,cortex-a76-pmu";
- interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_HIGH &ppi_partition1>;
- };
-
- psci {
- compatible = "arm,psci-1.0";
- method = "smc";
- };
-
- spll: clock-0 {
- compatible = "fixed-clock";
- clock-frequency = <702000000>;
- clock-output-names = "spll";
- #clock-cells = <0>;
- };
-
- timer {
- compatible = "arm,armv8-timer";
- interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_HIGH 0>,
- <GIC_PPI 14 IRQ_TYPE_LEVEL_HIGH 0>,
- <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH 0>,
- <GIC_PPI 10 IRQ_TYPE_LEVEL_HIGH 0>,
- <GIC_PPI 12 IRQ_TYPE_LEVEL_HIGH 0>;
- interrupt-names = "sec-phys", "phys", "virt", "hyp-phys", "hyp-virt";
- };
-
- xin24m: clock-1 {
- compatible = "fixed-clock";
- clock-frequency = <24000000>;
- clock-output-names = "xin24m";
- #clock-cells = <0>;
- };
-
- xin32k: clock-2 {
- compatible = "fixed-clock";
- clock-frequency = <32768>;
- clock-output-names = "xin32k";
- #clock-cells = <0>;
- };
-
- pmu_sram: sram@10f000 {
- compatible = "mmio-sram";
- reg = <0x0 0x0010f000 0x0 0x100>;
- ranges = <0 0x0 0x0010f000 0x100>;
- #address-cells = <1>;
- #size-cells = <1>;
-
- scmi_shmem: sram@0 {
- compatible = "arm,scmi-shmem";
- reg = <0x0 0x100>;
- };
- };
-
- gpu: gpu@fb000000 {
- compatible = "rockchip,rk3588-mali", "arm,mali-valhall-csf";
- reg = <0x0 0xfb000000 0x0 0x200000>;
- #cooling-cells = <2>;
- assigned-clocks = <&scmi_clk SCMI_CLK_GPU>;
- assigned-clock-rates = <200000000>;
- clocks = <&cru CLK_GPU>, <&cru CLK_GPU_COREGROUP>,
- <&cru CLK_GPU_STACKS>;
- clock-names = "core", "coregroup", "stacks";
- dynamic-power-coefficient = <2982>;
- interrupts = <GIC_SPI 92 IRQ_TYPE_LEVEL_HIGH 0>,
- <GIC_SPI 93 IRQ_TYPE_LEVEL_HIGH 0>,
- <GIC_SPI 94 IRQ_TYPE_LEVEL_HIGH 0>;
- interrupt-names = "job", "mmu", "gpu";
- operating-points-v2 = <&gpu_opp_table>;
- power-domains = <&power RK3588_PD_GPU>;
- status = "disabled";
-
- gpu_opp_table: opp-table {
- compatible = "operating-points-v2";
-
- opp-300000000 {
- opp-hz = /bits/ 64 <300000000>;
- opp-microvolt = <675000 675000 850000>;
- };
- opp-400000000 {
- opp-hz = /bits/ 64 <400000000>;
- opp-microvolt = <675000 675000 850000>;
- };
- opp-500000000 {
- opp-hz = /bits/ 64 <500000000>;
- opp-microvolt = <675000 675000 850000>;
- };
- opp-600000000 {
- opp-hz = /bits/ 64 <600000000>;
- opp-microvolt = <675000 675000 850000>;
- };
- opp-700000000 {
- opp-hz = /bits/ 64 <700000000>;
- opp-microvolt = <700000 700000 850000>;
- };
- opp-800000000 {
- opp-hz = /bits/ 64 <800000000>;
- opp-microvolt = <750000 750000 850000>;
- };
- opp-900000000 {
- opp-hz = /bits/ 64 <900000000>;
- opp-microvolt = <800000 800000 850000>;
- };
- opp-1000000000 {
- opp-hz = /bits/ 64 <1000000000>;
- opp-microvolt = <850000 850000 850000>;
- };
- };
- };
-
- usb_host0_xhci: usb@fc000000 {
- compatible = "rockchip,rk3588-dwc3", "snps,dwc3";
- reg = <0x0 0xfc000000 0x0 0x400000>;
- interrupts = <GIC_SPI 220 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&cru REF_CLK_USB3OTG0>, <&cru SUSPEND_CLK_USB3OTG0>,
- <&cru ACLK_USB3OTG0>;
- clock-names = "ref_clk", "suspend_clk", "bus_clk";
- dr_mode = "otg";
- phys = <&u2phy0_otg>, <&usbdp_phy0 PHY_TYPE_USB3>;
- phy-names = "usb2-phy", "usb3-phy";
- phy_type = "utmi_wide";
- power-domains = <&power RK3588_PD_USB>;
- resets = <&cru SRST_A_USB3OTG0>;
- snps,dis_enblslpm_quirk;
- snps,dis-u1-entry-quirk;
- snps,dis-u2-entry-quirk;
- snps,dis-u2-freeclk-exists-quirk;
- snps,dis-del-phy-power-chg-quirk;
- snps,dis-tx-ipgap-linecheck-quirk;
- status = "disabled";
- };
-
- usb_host0_ehci: usb@fc800000 {
- compatible = "rockchip,rk3588-ehci", "generic-ehci";
- reg = <0x0 0xfc800000 0x0 0x40000>;
- interrupts = <GIC_SPI 215 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&cru HCLK_HOST0>, <&cru HCLK_HOST_ARB0>, <&cru ACLK_USB>, <&u2phy2>;
- phys = <&u2phy2_host>;
- phy-names = "usb";
- power-domains = <&power RK3588_PD_USB>;
- status = "disabled";
- };
-
- usb_host0_ohci: usb@fc840000 {
- compatible = "rockchip,rk3588-ohci", "generic-ohci";
- reg = <0x0 0xfc840000 0x0 0x40000>;
- interrupts = <GIC_SPI 216 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&cru HCLK_HOST0>, <&cru HCLK_HOST_ARB0>, <&cru ACLK_USB>, <&u2phy2>;
- phys = <&u2phy2_host>;
- phy-names = "usb";
- power-domains = <&power RK3588_PD_USB>;
- status = "disabled";
- };
-
- usb_host1_ehci: usb@fc880000 {
- compatible = "rockchip,rk3588-ehci", "generic-ehci";
- reg = <0x0 0xfc880000 0x0 0x40000>;
- interrupts = <GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&cru HCLK_HOST1>, <&cru HCLK_HOST_ARB1>, <&cru ACLK_USB>, <&u2phy3>;
- phys = <&u2phy3_host>;
- phy-names = "usb";
- power-domains = <&power RK3588_PD_USB>;
- status = "disabled";
- };
-
- usb_host1_ohci: usb@fc8c0000 {
- compatible = "rockchip,rk3588-ohci", "generic-ohci";
- reg = <0x0 0xfc8c0000 0x0 0x40000>;
- interrupts = <GIC_SPI 219 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&cru HCLK_HOST1>, <&cru HCLK_HOST_ARB1>, <&cru ACLK_USB>, <&u2phy3>;
- phys = <&u2phy3_host>;
- phy-names = "usb";
- power-domains = <&power RK3588_PD_USB>;
- status = "disabled";
- };
-
- usb_host2_xhci: usb@fcd00000 {
- compatible = "rockchip,rk3588-dwc3", "snps,dwc3";
- reg = <0x0 0xfcd00000 0x0 0x400000>;
- interrupts = <GIC_SPI 222 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&cru REF_CLK_USB3OTG2>, <&cru SUSPEND_CLK_USB3OTG2>,
- <&cru ACLK_USB3OTG2>, <&cru CLK_UTMI_OTG2>,
- <&cru CLK_PIPEPHY2_PIPE_U3_G>;
- clock-names = "ref_clk", "suspend_clk", "bus_clk", "utmi", "pipe";
- dr_mode = "host";
- phys = <&combphy2_psu PHY_TYPE_USB3>;
- phy-names = "usb3-phy";
- phy_type = "utmi_wide";
- resets = <&cru SRST_A_USB3OTG2>;
- snps,dis_enblslpm_quirk;
- snps,dis-u2-freeclk-exists-quirk;
- snps,dis-del-phy-power-chg-quirk;
- snps,dis-tx-ipgap-linecheck-quirk;
- snps,dis_rxdet_inp3_quirk;
- status = "disabled";
- };
-
- mmu600_pcie: iommu@fc900000 {
- compatible = "arm,smmu-v3";
- reg = <0x0 0xfc900000 0x0 0x200000>;
- interrupts = <GIC_SPI 369 IRQ_TYPE_LEVEL_HIGH 0>,
- <GIC_SPI 371 IRQ_TYPE_LEVEL_HIGH 0>,
- <GIC_SPI 374 IRQ_TYPE_LEVEL_HIGH 0>,
- <GIC_SPI 367 IRQ_TYPE_LEVEL_HIGH 0>;
- interrupt-names = "eventq", "gerror", "priq", "cmdq-sync";
- #iommu-cells = <1>;
- status = "disabled";
- };
-
- mmu600_php: iommu@fcb00000 {
- compatible = "arm,smmu-v3";
- reg = <0x0 0xfcb00000 0x0 0x200000>;
- interrupts = <GIC_SPI 381 IRQ_TYPE_LEVEL_HIGH 0>,
- <GIC_SPI 383 IRQ_TYPE_LEVEL_HIGH 0>,
- <GIC_SPI 386 IRQ_TYPE_LEVEL_HIGH 0>,
- <GIC_SPI 379 IRQ_TYPE_LEVEL_HIGH 0>;
- interrupt-names = "eventq", "gerror", "priq", "cmdq-sync";
- #iommu-cells = <1>;
- status = "disabled";
- };
-
- pmu1grf: syscon@fd58a000 {
- compatible = "rockchip,rk3588-pmugrf", "syscon", "simple-mfd";
- reg = <0x0 0xfd58a000 0x0 0x10000>;
- };
-
- sys_grf: syscon@fd58c000 {
- compatible = "rockchip,rk3588-sys-grf", "syscon";
- reg = <0x0 0xfd58c000 0x0 0x1000>;
- };
-
- vop_grf: syscon@fd5a4000 {
- compatible = "rockchip,rk3588-vop-grf", "syscon";
- reg = <0x0 0xfd5a4000 0x0 0x2000>;
- };
-
- vo0_grf: syscon@fd5a6000 {
- compatible = "rockchip,rk3588-vo-grf", "syscon";
- reg = <0x0 0xfd5a6000 0x0 0x2000>;
- clocks = <&cru PCLK_VO0GRF>;
- };
-
- vo1_grf: syscon@fd5a8000 {
- compatible = "rockchip,rk3588-vo-grf", "syscon";
- reg = <0x0 0xfd5a8000 0x0 0x100>;
- clocks = <&cru PCLK_VO1GRF>;
- };
-
- usb_grf: syscon@fd5ac000 {
- compatible = "rockchip,rk3588-usb-grf", "syscon";
- reg = <0x0 0xfd5ac000 0x0 0x4000>;
- };
-
- php_grf: syscon@fd5b0000 {
- compatible = "rockchip,rk3588-php-grf", "syscon";
- reg = <0x0 0xfd5b0000 0x0 0x1000>;
- };
-
- pipe_phy0_grf: syscon@fd5bc000 {
- compatible = "rockchip,rk3588-pipe-phy-grf", "syscon";
- reg = <0x0 0xfd5bc000 0x0 0x100>;
- };
-
- pipe_phy2_grf: syscon@fd5c4000 {
- compatible = "rockchip,rk3588-pipe-phy-grf", "syscon";
- reg = <0x0 0xfd5c4000 0x0 0x100>;
- };
-
- usbdpphy0_grf: syscon@fd5c8000 {
- compatible = "rockchip,rk3588-usbdpphy-grf", "syscon";
- reg = <0x0 0xfd5c8000 0x0 0x4000>;
- };
-
- usb2phy0_grf: syscon@fd5d0000 {
- compatible = "rockchip,rk3588-usb2phy-grf", "syscon", "simple-mfd";
- reg = <0x0 0xfd5d0000 0x0 0x4000>;
- #address-cells = <1>;
- #size-cells = <1>;
-
- u2phy0: usb2phy@0 {
- compatible = "rockchip,rk3588-usb2phy";
- reg = <0x0 0x10>;
- #clock-cells = <0>;
- clocks = <&cru CLK_USB2PHY_HDPTXRXPHY_REF>;
- clock-names = "phyclk";
- clock-output-names = "usb480m_phy0";
- interrupts = <GIC_SPI 393 IRQ_TYPE_LEVEL_HIGH 0>;
- resets = <&cru SRST_OTGPHY_U3_0>, <&cru SRST_P_USB2PHY_U3_0_GRF0>;
- reset-names = "phy", "apb";
- status = "disabled";
-
- u2phy0_otg: otg-port {
- #phy-cells = <0>;
- status = "disabled";
- };
- };
- };
-
- usb2phy2_grf: syscon@fd5d8000 {
- compatible = "rockchip,rk3588-usb2phy-grf", "syscon", "simple-mfd";
- reg = <0x0 0xfd5d8000 0x0 0x4000>;
- #address-cells = <1>;
- #size-cells = <1>;
-
- u2phy2: usb2phy@8000 {
- compatible = "rockchip,rk3588-usb2phy";
- reg = <0x8000 0x10>;
- #clock-cells = <0>;
- clocks = <&cru CLK_USB2PHY_HDPTXRXPHY_REF>;
- clock-names = "phyclk";
- clock-output-names = "usb480m_phy2";
- interrupts = <GIC_SPI 391 IRQ_TYPE_LEVEL_HIGH 0>;
- resets = <&cru SRST_OTGPHY_U2_0>, <&cru SRST_P_USB2PHY_U2_0_GRF0>;
- reset-names = "phy", "apb";
- status = "disabled";
-
- u2phy2_host: host-port {
- #phy-cells = <0>;
- status = "disabled";
- };
- };
- };
-
- usb2phy3_grf: syscon@fd5dc000 {
- compatible = "rockchip,rk3588-usb2phy-grf", "syscon", "simple-mfd";
- reg = <0x0 0xfd5dc000 0x0 0x4000>;
- #address-cells = <1>;
- #size-cells = <1>;
-
- u2phy3: usb2phy@c000 {
- compatible = "rockchip,rk3588-usb2phy";
- reg = <0xc000 0x10>;
- #clock-cells = <0>;
- clocks = <&cru CLK_USB2PHY_HDPTXRXPHY_REF>;
- clock-names = "phyclk";
- clock-output-names = "usb480m_phy3";
- interrupts = <GIC_SPI 392 IRQ_TYPE_LEVEL_HIGH 0>;
- resets = <&cru SRST_OTGPHY_U2_1>, <&cru SRST_P_USB2PHY_U2_1_GRF0>;
- reset-names = "phy", "apb";
- status = "disabled";
-
- u2phy3_host: host-port {
- #phy-cells = <0>;
- status = "disabled";
- };
- };
- };
-
- hdptxphy0_grf: syscon@fd5e0000 {
- compatible = "rockchip,rk3588-hdptxphy-grf", "syscon";
- reg = <0x0 0xfd5e0000 0x0 0x100>;
- };
-
- ioc: syscon@fd5f0000 {
- compatible = "rockchip,rk3588-ioc", "syscon";
- reg = <0x0 0xfd5f0000 0x0 0x10000>;
- };
-
- system_sram1: sram@fd600000 {
- compatible = "mmio-sram";
- reg = <0x0 0xfd600000 0x0 0x100000>;
- ranges = <0x0 0x0 0xfd600000 0x100000>;
- #address-cells = <1>;
- #size-cells = <1>;
- };
-
- cru: clock-controller@fd7c0000 {
- compatible = "rockchip,rk3588-cru";
- reg = <0x0 0xfd7c0000 0x0 0x5c000>;
- assigned-clocks =
- <&cru PLL_PPLL>, <&cru PLL_AUPLL>,
- <&cru PLL_NPLL>, <&cru PLL_GPLL>,
- <&cru ACLK_CENTER_ROOT>,
- <&cru HCLK_CENTER_ROOT>, <&cru ACLK_CENTER_LOW_ROOT>,
- <&cru ACLK_TOP_ROOT>, <&cru PCLK_TOP_ROOT>,
- <&cru ACLK_LOW_TOP_ROOT>, <&cru PCLK_PMU0_ROOT>,
- <&cru HCLK_PMU_CM0_ROOT>, <&cru ACLK_VOP>,
- <&cru ACLK_BUS_ROOT>, <&cru CLK_150M_SRC>,
- <&cru CLK_GPU>;
- assigned-clock-rates =
- <1100000000>, <786432000>,
- <850000000>, <1188000000>,
- <702000000>,
- <400000000>, <500000000>,
- <800000000>, <100000000>,
- <400000000>, <100000000>,
- <200000000>, <500000000>,
- <375000000>, <150000000>,
- <200000000>;
- rockchip,grf = <&php_grf>;
- #clock-cells = <1>;
- #reset-cells = <1>;
- };
-
- i2c0: i2c@fd880000 {
- compatible = "rockchip,rk3588-i2c", "rockchip,rk3399-i2c";
- reg = <0x0 0xfd880000 0x0 0x1000>;
- interrupts = <GIC_SPI 317 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&cru CLK_I2C0>, <&cru PCLK_I2C0>;
- clock-names = "i2c", "pclk";
- pinctrl-0 = <&i2c0m0_xfer>;
- pinctrl-names = "default";
- #address-cells = <1>;
- #size-cells = <0>;
- status = "disabled";
- };
-
- uart0: serial@fd890000 {
- compatible = "rockchip,rk3588-uart", "snps,dw-apb-uart";
- reg = <0x0 0xfd890000 0x0 0x100>;
- interrupts = <GIC_SPI 331 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&cru SCLK_UART0>, <&cru PCLK_UART0>;
- clock-names = "baudclk", "apb_pclk";
- dmas = <&dmac0 6>, <&dmac0 7>;
- dma-names = "tx", "rx";
- pinctrl-0 = <&uart0m1_xfer>;
- pinctrl-names = "default";
- reg-shift = <2>;
- reg-io-width = <4>;
- status = "disabled";
- };
-
- pwm0: pwm@fd8b0000 {
- compatible = "rockchip,rk3588-pwm", "rockchip,rk3328-pwm";
- reg = <0x0 0xfd8b0000 0x0 0x10>;
- clocks = <&cru CLK_PMU1PWM>, <&cru PCLK_PMU1PWM>;
- clock-names = "pwm", "pclk";
- pinctrl-0 = <&pwm0m0_pins>;
- pinctrl-names = "default";
- #pwm-cells = <3>;
- status = "disabled";
- };
-
- pwm1: pwm@fd8b0010 {
- compatible = "rockchip,rk3588-pwm", "rockchip,rk3328-pwm";
- reg = <0x0 0xfd8b0010 0x0 0x10>;
- clocks = <&cru CLK_PMU1PWM>, <&cru PCLK_PMU1PWM>;
- clock-names = "pwm", "pclk";
- pinctrl-0 = <&pwm1m0_pins>;
- pinctrl-names = "default";
- #pwm-cells = <3>;
- status = "disabled";
- };
-
- pwm2: pwm@fd8b0020 {
- compatible = "rockchip,rk3588-pwm", "rockchip,rk3328-pwm";
- reg = <0x0 0xfd8b0020 0x0 0x10>;
- clocks = <&cru CLK_PMU1PWM>, <&cru PCLK_PMU1PWM>;
- clock-names = "pwm", "pclk";
- pinctrl-0 = <&pwm2m0_pins>;
- pinctrl-names = "default";
- #pwm-cells = <3>;
- status = "disabled";
- };
-
- pwm3: pwm@fd8b0030 {
- compatible = "rockchip,rk3588-pwm", "rockchip,rk3328-pwm";
- reg = <0x0 0xfd8b0030 0x0 0x10>;
- clocks = <&cru CLK_PMU1PWM>, <&cru PCLK_PMU1PWM>;
- clock-names = "pwm", "pclk";
- pinctrl-0 = <&pwm3m0_pins>;
- pinctrl-names = "default";
- #pwm-cells = <3>;
- status = "disabled";
- };
-
- pmu: power-management@fd8d8000 {
- compatible = "rockchip,rk3588-pmu", "syscon", "simple-mfd";
- reg = <0x0 0xfd8d8000 0x0 0x400>;
-
- power: power-controller {
- compatible = "rockchip,rk3588-power-controller";
- #address-cells = <1>;
- #power-domain-cells = <1>;
- #size-cells = <0>;
- status = "okay";
-
- /* These power domains are grouped by VD_NPU */
- power-domain@RK3588_PD_NPU {
- reg = <RK3588_PD_NPU>;
- #power-domain-cells = <0>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- power-domain@RK3588_PD_NPUTOP {
- reg = <RK3588_PD_NPUTOP>;
- clocks = <&cru HCLK_NPU_ROOT>,
- <&cru PCLK_NPU_ROOT>,
- <&cru CLK_NPU_DSU0>,
- <&cru HCLK_NPU_CM0_ROOT>;
- pm_qos = <&qos_npu0_mwr>,
- <&qos_npu0_mro>,
- <&qos_mcu_npu>;
- #power-domain-cells = <0>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- power-domain@RK3588_PD_NPU1 {
- reg = <RK3588_PD_NPU1>;
- clocks = <&cru HCLK_NPU_ROOT>,
- <&cru PCLK_NPU_ROOT>,
- <&cru CLK_NPU_DSU0>;
- pm_qos = <&qos_npu1>;
- #power-domain-cells = <0>;
- };
- power-domain@RK3588_PD_NPU2 {
- reg = <RK3588_PD_NPU2>;
- clocks = <&cru HCLK_NPU_ROOT>,
- <&cru PCLK_NPU_ROOT>,
- <&cru CLK_NPU_DSU0>;
- pm_qos = <&qos_npu2>;
- #power-domain-cells = <0>;
- };
- };
- };
- /* These power domains are grouped by VD_GPU */
- power-domain@RK3588_PD_GPU {
- reg = <RK3588_PD_GPU>;
- clocks = <&cru CLK_GPU>,
- <&cru CLK_GPU_COREGROUP>,
- <&cru CLK_GPU_STACKS>;
- pm_qos = <&qos_gpu_m0>,
- <&qos_gpu_m1>,
- <&qos_gpu_m2>,
- <&qos_gpu_m3>;
- #power-domain-cells = <0>;
- };
- /* These power domains are grouped by VD_VCODEC */
- power-domain@RK3588_PD_VCODEC {
- reg = <RK3588_PD_VCODEC>;
- #address-cells = <1>;
- #size-cells = <0>;
- #power-domain-cells = <0>;
-
- power-domain@RK3588_PD_RKVDEC0 {
- reg = <RK3588_PD_RKVDEC0>;
- clocks = <&cru HCLK_RKVDEC0>,
- <&cru HCLK_VDPU_ROOT>,
- <&cru ACLK_VDPU_ROOT>,
- <&cru ACLK_RKVDEC0>,
- <&cru ACLK_RKVDEC_CCU>;
- pm_qos = <&qos_rkvdec0>;
- #power-domain-cells = <0>;
- };
- power-domain@RK3588_PD_RKVDEC1 {
- reg = <RK3588_PD_RKVDEC1>;
- clocks = <&cru HCLK_RKVDEC1>,
- <&cru HCLK_VDPU_ROOT>,
- <&cru ACLK_VDPU_ROOT>,
- <&cru ACLK_RKVDEC1>;
- pm_qos = <&qos_rkvdec1>;
- #power-domain-cells = <0>;
- };
- power-domain@RK3588_PD_VENC0 {
- reg = <RK3588_PD_VENC0>;
- clocks = <&cru HCLK_RKVENC0>,
- <&cru ACLK_RKVENC0>;
- pm_qos = <&qos_rkvenc0_m0ro>,
- <&qos_rkvenc0_m1ro>,
- <&qos_rkvenc0_m2wo>;
- #address-cells = <1>;
- #size-cells = <0>;
- #power-domain-cells = <0>;
-
- power-domain@RK3588_PD_VENC1 {
- reg = <RK3588_PD_VENC1>;
- clocks = <&cru HCLK_RKVENC1>,
- <&cru HCLK_RKVENC0>,
- <&cru ACLK_RKVENC0>,
- <&cru ACLK_RKVENC1>;
- pm_qos = <&qos_rkvenc1_m0ro>,
- <&qos_rkvenc1_m1ro>,
- <&qos_rkvenc1_m2wo>;
- #power-domain-cells = <0>;
- };
- };
- };
- /* These power domains are grouped by VD_LOGIC */
- power-domain@RK3588_PD_VDPU {
- reg = <RK3588_PD_VDPU>;
- clocks = <&cru HCLK_VDPU_ROOT>,
- <&cru ACLK_VDPU_LOW_ROOT>,
- <&cru ACLK_VDPU_ROOT>,
- <&cru ACLK_JPEG_DECODER_ROOT>,
- <&cru ACLK_IEP2P0>,
- <&cru HCLK_IEP2P0>,
- <&cru ACLK_JPEG_ENCODER0>,
- <&cru HCLK_JPEG_ENCODER0>,
- <&cru ACLK_JPEG_ENCODER1>,
- <&cru HCLK_JPEG_ENCODER1>,
- <&cru ACLK_JPEG_ENCODER2>,
- <&cru HCLK_JPEG_ENCODER2>,
- <&cru ACLK_JPEG_ENCODER3>,
- <&cru HCLK_JPEG_ENCODER3>,
- <&cru ACLK_JPEG_DECODER>,
- <&cru HCLK_JPEG_DECODER>,
- <&cru ACLK_RGA2>,
- <&cru HCLK_RGA2>;
- pm_qos = <&qos_iep>,
- <&qos_jpeg_dec>,
- <&qos_jpeg_enc0>,
- <&qos_jpeg_enc1>,
- <&qos_jpeg_enc2>,
- <&qos_jpeg_enc3>,
- <&qos_rga2_mro>,
- <&qos_rga2_mwo>;
- #address-cells = <1>;
- #size-cells = <0>;
- #power-domain-cells = <0>;
-
-
- power-domain@RK3588_PD_AV1 {
- reg = <RK3588_PD_AV1>;
- clocks = <&cru PCLK_AV1>,
- <&cru ACLK_AV1>,
- <&cru HCLK_VDPU_ROOT>;
- pm_qos = <&qos_av1>;
- #power-domain-cells = <0>;
- };
- power-domain@RK3588_PD_RKVDEC0 {
- reg = <RK3588_PD_RKVDEC0>;
- clocks = <&cru HCLK_RKVDEC0>,
- <&cru HCLK_VDPU_ROOT>,
- <&cru ACLK_VDPU_ROOT>,
- <&cru ACLK_RKVDEC0>;
- pm_qos = <&qos_rkvdec0>;
- #power-domain-cells = <0>;
- };
- power-domain@RK3588_PD_RKVDEC1 {
- reg = <RK3588_PD_RKVDEC1>;
- clocks = <&cru HCLK_RKVDEC1>,
- <&cru HCLK_VDPU_ROOT>,
- <&cru ACLK_VDPU_ROOT>;
- pm_qos = <&qos_rkvdec1>;
- #power-domain-cells = <0>;
- };
- power-domain@RK3588_PD_RGA30 {
- reg = <RK3588_PD_RGA30>;
- clocks = <&cru ACLK_RGA3_0>,
- <&cru HCLK_RGA3_0>;
- pm_qos = <&qos_rga3_0>;
- #power-domain-cells = <0>;
- };
- };
- power-domain@RK3588_PD_VOP {
- reg = <RK3588_PD_VOP>;
- clocks = <&cru PCLK_VOP_ROOT>,
- <&cru HCLK_VOP_ROOT>,
- <&cru ACLK_VOP>;
- pm_qos = <&qos_vop_m0>,
- <&qos_vop_m1>;
- #address-cells = <1>;
- #size-cells = <0>;
- #power-domain-cells = <0>;
-
- power-domain@RK3588_PD_VO0 {
- reg = <RK3588_PD_VO0>;
- clocks = <&cru PCLK_VO0_ROOT>,
- <&cru PCLK_VO0_S_ROOT>,
- <&cru HCLK_VO0_S_ROOT>,
- <&cru ACLK_VO0_ROOT>,
- <&cru HCLK_HDCP0>,
- <&cru ACLK_HDCP0>,
- <&cru HCLK_VOP_ROOT>;
- pm_qos = <&qos_hdcp0>;
- #power-domain-cells = <0>;
- };
- };
- power-domain@RK3588_PD_VO1 {
- reg = <RK3588_PD_VO1>;
- clocks = <&cru PCLK_VO1_ROOT>,
- <&cru PCLK_VO1_S_ROOT>,
- <&cru HCLK_VO1_S_ROOT>,
- <&cru HCLK_HDCP1>,
- <&cru ACLK_HDCP1>,
- <&cru ACLK_HDMIRX_ROOT>,
- <&cru HCLK_VO1USB_TOP_ROOT>;
- pm_qos = <&qos_hdcp1>,
- <&qos_hdmirx>;
- #power-domain-cells = <0>;
- };
- power-domain@RK3588_PD_VI {
- reg = <RK3588_PD_VI>;
- clocks = <&cru HCLK_VI_ROOT>,
- <&cru PCLK_VI_ROOT>,
- <&cru HCLK_ISP0>,
- <&cru ACLK_ISP0>,
- <&cru HCLK_VICAP>,
- <&cru ACLK_VICAP>;
- pm_qos = <&qos_isp0_mro>,
- <&qos_isp0_mwo>,
- <&qos_vicap_m0>,
- <&qos_vicap_m1>;
- #address-cells = <1>;
- #size-cells = <0>;
- #power-domain-cells = <0>;
-
- power-domain@RK3588_PD_ISP1 {
- reg = <RK3588_PD_ISP1>;
- clocks = <&cru HCLK_ISP1>,
- <&cru ACLK_ISP1>,
- <&cru HCLK_VI_ROOT>,
- <&cru PCLK_VI_ROOT>;
- pm_qos = <&qos_isp1_mwo>,
- <&qos_isp1_mro>;
- #power-domain-cells = <0>;
- };
- power-domain@RK3588_PD_FEC {
- reg = <RK3588_PD_FEC>;
- clocks = <&cru HCLK_FISHEYE0>,
- <&cru ACLK_FISHEYE0>,
- <&cru HCLK_FISHEYE1>,
- <&cru ACLK_FISHEYE1>,
- <&cru PCLK_VI_ROOT>;
- pm_qos = <&qos_fisheye0>,
- <&qos_fisheye1>;
- #power-domain-cells = <0>;
- };
- };
- power-domain@RK3588_PD_RGA31 {
- reg = <RK3588_PD_RGA31>;
- clocks = <&cru HCLK_RGA3_1>,
- <&cru ACLK_RGA3_1>;
- pm_qos = <&qos_rga3_1>;
- #power-domain-cells = <0>;
- };
- power-domain@RK3588_PD_USB {
- reg = <RK3588_PD_USB>;
- clocks = <&cru PCLK_PHP_ROOT>,
- <&cru ACLK_USB_ROOT>,
- <&cru ACLK_USB>,
- <&cru HCLK_USB_ROOT>,
- <&cru HCLK_HOST0>,
- <&cru HCLK_HOST_ARB0>,
- <&cru HCLK_HOST1>,
- <&cru HCLK_HOST_ARB1>;
- pm_qos = <&qos_usb3_0>,
- <&qos_usb3_1>,
- <&qos_usb2host_0>,
- <&qos_usb2host_1>;
- #power-domain-cells = <0>;
- };
- power-domain@RK3588_PD_GMAC {
- reg = <RK3588_PD_GMAC>;
- clocks = <&cru PCLK_PHP_ROOT>,
- <&cru ACLK_PCIE_ROOT>,
- <&cru ACLK_PHP_ROOT>;
- #power-domain-cells = <0>;
- };
- power-domain@RK3588_PD_PCIE {
- reg = <RK3588_PD_PCIE>;
- clocks = <&cru PCLK_PHP_ROOT>,
- <&cru ACLK_PCIE_ROOT>,
- <&cru ACLK_PHP_ROOT>;
- #power-domain-cells = <0>;
- };
- power-domain@RK3588_PD_SDIO {
- reg = <RK3588_PD_SDIO>;
- clocks = <&cru HCLK_SDIO>,
- <&cru HCLK_NVM_ROOT>;
- pm_qos = <&qos_sdio>;
- #power-domain-cells = <0>;
- };
- power-domain@RK3588_PD_AUDIO {
- reg = <RK3588_PD_AUDIO>;
- clocks = <&cru HCLK_AUDIO_ROOT>,
- <&cru PCLK_AUDIO_ROOT>;
- #power-domain-cells = <0>;
- };
- power-domain@RK3588_PD_SDMMC {
- reg = <RK3588_PD_SDMMC>;
- pm_qos = <&qos_sdmmc>;
- #power-domain-cells = <0>;
- };
- };
- };
-
- av1d: video-codec@fdc70000 {
- compatible = "rockchip,rk3588-av1-vpu";
- reg = <0x0 0xfdc70000 0x0 0x800>;
- interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH 0>;
- interrupt-names = "vdpu";
- assigned-clocks = <&cru ACLK_AV1>, <&cru PCLK_AV1>;
- assigned-clock-rates = <400000000>, <400000000>;
- clocks = <&cru ACLK_AV1>, <&cru PCLK_AV1>;
- clock-names = "aclk", "hclk";
- power-domains = <&power RK3588_PD_AV1>;
- resets = <&cru SRST_A_AV1>, <&cru SRST_P_AV1>, <&cru SRST_A_AV1_BIU>, <&cru SRST_P_AV1_BIU>;
- };
-
- vop: vop@fdd90000 {
- compatible = "rockchip,rk3588-vop";
- reg = <0x0 0xfdd90000 0x0 0x4200>, <0x0 0xfdd95000 0x0 0x1000>;
- reg-names = "vop", "gamma-lut";
- interrupts = <GIC_SPI 156 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&cru ACLK_VOP>,
- <&cru HCLK_VOP>,
- <&cru DCLK_VOP0>,
- <&cru DCLK_VOP1>,
- <&cru DCLK_VOP2>,
- <&cru DCLK_VOP3>,
- <&cru PCLK_VOP_ROOT>;
- clock-names = "aclk",
- "hclk",
- "dclk_vp0",
- "dclk_vp1",
- "dclk_vp2",
- "dclk_vp3",
- "pclk_vop";
- iommus = <&vop_mmu>;
- power-domains = <&power RK3588_PD_VOP>;
- rockchip,grf = <&sys_grf>;
- rockchip,vop-grf = <&vop_grf>;
- rockchip,vo1-grf = <&vo1_grf>;
- rockchip,pmu = <&pmu>;
- status = "disabled";
-
- vop_out: ports {
- #address-cells = <1>;
- #size-cells = <0>;
-
- vp0: port@0 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0>;
- };
-
- vp1: port@1 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <1>;
- };
-
- vp2: port@2 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <2>;
- };
-
- vp3: port@3 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <3>;
- };
- };
- };
-
- vop_mmu: iommu@fdd97e00 {
- compatible = "rockchip,rk3588-iommu", "rockchip,rk3568-iommu";
- reg = <0x0 0xfdd97e00 0x0 0x100>, <0x0 0xfdd97f00 0x0 0x100>;
- interrupts = <GIC_SPI 156 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&cru ACLK_VOP>, <&cru HCLK_VOP>;
- clock-names = "aclk", "iface";
- #iommu-cells = <0>;
- power-domains = <&power RK3588_PD_VOP>;
- status = "disabled";
- };
-
- i2s4_8ch: i2s@fddc0000 {
- compatible = "rockchip,rk3588-i2s-tdm";
- reg = <0x0 0xfddc0000 0x0 0x1000>;
- interrupts = <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&cru MCLK_I2S4_8CH_TX>, <&cru MCLK_I2S4_8CH_TX>, <&cru HCLK_I2S4_8CH>;
- clock-names = "mclk_tx", "mclk_rx", "hclk";
- assigned-clocks = <&cru CLK_I2S4_8CH_TX_SRC>;
- assigned-clock-parents = <&cru PLL_AUPLL>;
- dmas = <&dmac2 0>;
- dma-names = "tx";
- power-domains = <&power RK3588_PD_VO0>;
- resets = <&cru SRST_M_I2S4_8CH_TX>;
- reset-names = "tx-m";
- #sound-dai-cells = <0>;
- status = "disabled";
- };
-
- i2s5_8ch: i2s@fddf0000 {
- compatible = "rockchip,rk3588-i2s-tdm";
- reg = <0x0 0xfddf0000 0x0 0x1000>;
- interrupts = <GIC_SPI 185 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&cru MCLK_I2S5_8CH_TX>, <&cru MCLK_I2S5_8CH_TX>, <&cru HCLK_I2S5_8CH>;
- clock-names = "mclk_tx", "mclk_rx", "hclk";
- assigned-clocks = <&cru CLK_I2S5_8CH_TX_SRC>;
- assigned-clock-parents = <&cru PLL_AUPLL>;
- dmas = <&dmac2 2>;
- dma-names = "tx";
- power-domains = <&power RK3588_PD_VO1>;
- resets = <&cru SRST_M_I2S5_8CH_TX>;
- reset-names = "tx-m";
- #sound-dai-cells = <0>;
- status = "disabled";
- };
-
- i2s9_8ch: i2s@fddfc000 {
- compatible = "rockchip,rk3588-i2s-tdm";
- reg = <0x0 0xfddfc000 0x0 0x1000>;
- interrupts = <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&cru MCLK_I2S9_8CH_RX>, <&cru MCLK_I2S9_8CH_RX>, <&cru HCLK_I2S9_8CH>;
- clock-names = "mclk_tx", "mclk_rx", "hclk";
- assigned-clocks = <&cru CLK_I2S9_8CH_RX_SRC>;
- assigned-clock-parents = <&cru PLL_AUPLL>;
- dmas = <&dmac2 23>;
- dma-names = "rx";
- power-domains = <&power RK3588_PD_VO1>;
- resets = <&cru SRST_M_I2S9_8CH_RX>;
- reset-names = "rx-m";
- #sound-dai-cells = <0>;
- status = "disabled";
- };
-
- qos_gpu_m0: qos@fdf35000 {
- compatible = "rockchip,rk3588-qos", "syscon";
- reg = <0x0 0xfdf35000 0x0 0x20>;
- };
-
- qos_gpu_m1: qos@fdf35200 {
- compatible = "rockchip,rk3588-qos", "syscon";
- reg = <0x0 0xfdf35200 0x0 0x20>;
- };
-
- qos_gpu_m2: qos@fdf35400 {
- compatible = "rockchip,rk3588-qos", "syscon";
- reg = <0x0 0xfdf35400 0x0 0x20>;
- };
-
- qos_gpu_m3: qos@fdf35600 {
- compatible = "rockchip,rk3588-qos", "syscon";
- reg = <0x0 0xfdf35600 0x0 0x20>;
- };
-
- qos_rga3_1: qos@fdf36000 {
- compatible = "rockchip,rk3588-qos", "syscon";
- reg = <0x0 0xfdf36000 0x0 0x20>;
- };
-
- qos_sdio: qos@fdf39000 {
- compatible = "rockchip,rk3588-qos", "syscon";
- reg = <0x0 0xfdf39000 0x0 0x20>;
- };
-
- qos_sdmmc: qos@fdf3d800 {
- compatible = "rockchip,rk3588-qos", "syscon";
- reg = <0x0 0xfdf3d800 0x0 0x20>;
- };
-
- qos_usb3_1: qos@fdf3e000 {
- compatible = "rockchip,rk3588-qos", "syscon";
- reg = <0x0 0xfdf3e000 0x0 0x20>;
- };
-
- qos_usb3_0: qos@fdf3e200 {
- compatible = "rockchip,rk3588-qos", "syscon";
- reg = <0x0 0xfdf3e200 0x0 0x20>;
- };
-
- qos_usb2host_0: qos@fdf3e400 {
- compatible = "rockchip,rk3588-qos", "syscon";
- reg = <0x0 0xfdf3e400 0x0 0x20>;
- };
-
- qos_usb2host_1: qos@fdf3e600 {
- compatible = "rockchip,rk3588-qos", "syscon";
- reg = <0x0 0xfdf3e600 0x0 0x20>;
- };
-
- qos_fisheye0: qos@fdf40000 {
- compatible = "rockchip,rk3588-qos", "syscon";
- reg = <0x0 0xfdf40000 0x0 0x20>;
- };
-
- qos_fisheye1: qos@fdf40200 {
- compatible = "rockchip,rk3588-qos", "syscon";
- reg = <0x0 0xfdf40200 0x0 0x20>;
- };
-
- qos_isp0_mro: qos@fdf40400 {
- compatible = "rockchip,rk3588-qos", "syscon";
- reg = <0x0 0xfdf40400 0x0 0x20>;
- };
-
- qos_isp0_mwo: qos@fdf40500 {
- compatible = "rockchip,rk3588-qos", "syscon";
- reg = <0x0 0xfdf40500 0x0 0x20>;
- };
-
- qos_vicap_m0: qos@fdf40600 {
- compatible = "rockchip,rk3588-qos", "syscon";
- reg = <0x0 0xfdf40600 0x0 0x20>;
- };
-
- qos_vicap_m1: qos@fdf40800 {
- compatible = "rockchip,rk3588-qos", "syscon";
- reg = <0x0 0xfdf40800 0x0 0x20>;
- };
-
- qos_isp1_mwo: qos@fdf41000 {
- compatible = "rockchip,rk3588-qos", "syscon";
- reg = <0x0 0xfdf41000 0x0 0x20>;
- };
-
- qos_isp1_mro: qos@fdf41100 {
- compatible = "rockchip,rk3588-qos", "syscon";
- reg = <0x0 0xfdf41100 0x0 0x20>;
- };
-
- qos_rkvenc0_m0ro: qos@fdf60000 {
- compatible = "rockchip,rk3588-qos", "syscon";
- reg = <0x0 0xfdf60000 0x0 0x20>;
- };
-
- qos_rkvenc0_m1ro: qos@fdf60200 {
- compatible = "rockchip,rk3588-qos", "syscon";
- reg = <0x0 0xfdf60200 0x0 0x20>;
- };
-
- qos_rkvenc0_m2wo: qos@fdf60400 {
- compatible = "rockchip,rk3588-qos", "syscon";
- reg = <0x0 0xfdf60400 0x0 0x20>;
- };
-
- qos_rkvenc1_m0ro: qos@fdf61000 {
- compatible = "rockchip,rk3588-qos", "syscon";
- reg = <0x0 0xfdf61000 0x0 0x20>;
- };
-
- qos_rkvenc1_m1ro: qos@fdf61200 {
- compatible = "rockchip,rk3588-qos", "syscon";
- reg = <0x0 0xfdf61200 0x0 0x20>;
- };
-
- qos_rkvenc1_m2wo: qos@fdf61400 {
- compatible = "rockchip,rk3588-qos", "syscon";
- reg = <0x0 0xfdf61400 0x0 0x20>;
- };
-
- qos_rkvdec0: qos@fdf62000 {
- compatible = "rockchip,rk3588-qos", "syscon";
- reg = <0x0 0xfdf62000 0x0 0x20>;
- };
-
- qos_rkvdec1: qos@fdf63000 {
- compatible = "rockchip,rk3588-qos", "syscon";
- reg = <0x0 0xfdf63000 0x0 0x20>;
- };
-
- qos_av1: qos@fdf64000 {
- compatible = "rockchip,rk3588-qos", "syscon";
- reg = <0x0 0xfdf64000 0x0 0x20>;
- };
-
- qos_iep: qos@fdf66000 {
- compatible = "rockchip,rk3588-qos", "syscon";
- reg = <0x0 0xfdf66000 0x0 0x20>;
- };
-
- qos_jpeg_dec: qos@fdf66200 {
- compatible = "rockchip,rk3588-qos", "syscon";
- reg = <0x0 0xfdf66200 0x0 0x20>;
- };
-
- qos_jpeg_enc0: qos@fdf66400 {
- compatible = "rockchip,rk3588-qos", "syscon";
- reg = <0x0 0xfdf66400 0x0 0x20>;
- };
-
- qos_jpeg_enc1: qos@fdf66600 {
- compatible = "rockchip,rk3588-qos", "syscon";
- reg = <0x0 0xfdf66600 0x0 0x20>;
- };
-
- qos_jpeg_enc2: qos@fdf66800 {
- compatible = "rockchip,rk3588-qos", "syscon";
- reg = <0x0 0xfdf66800 0x0 0x20>;
- };
-
- qos_jpeg_enc3: qos@fdf66a00 {
- compatible = "rockchip,rk3588-qos", "syscon";
- reg = <0x0 0xfdf66a00 0x0 0x20>;
- };
-
- qos_rga2_mro: qos@fdf66c00 {
- compatible = "rockchip,rk3588-qos", "syscon";
- reg = <0x0 0xfdf66c00 0x0 0x20>;
- };
-
- qos_rga2_mwo: qos@fdf66e00 {
- compatible = "rockchip,rk3588-qos", "syscon";
- reg = <0x0 0xfdf66e00 0x0 0x20>;
- };
-
- qos_rga3_0: qos@fdf67000 {
- compatible = "rockchip,rk3588-qos", "syscon";
- reg = <0x0 0xfdf67000 0x0 0x20>;
- };
-
- qos_vdpu: qos@fdf67200 {
- compatible = "rockchip,rk3588-qos", "syscon";
- reg = <0x0 0xfdf67200 0x0 0x20>;
- };
-
- qos_npu1: qos@fdf70000 {
- compatible = "rockchip,rk3588-qos", "syscon";
- reg = <0x0 0xfdf70000 0x0 0x20>;
- };
-
- qos_npu2: qos@fdf71000 {
- compatible = "rockchip,rk3588-qos", "syscon";
- reg = <0x0 0xfdf71000 0x0 0x20>;
- };
-
- qos_npu0_mwr: qos@fdf72000 {
- compatible = "rockchip,rk3588-qos", "syscon";
- reg = <0x0 0xfdf72000 0x0 0x20>;
- };
-
- qos_npu0_mro: qos@fdf72200 {
- compatible = "rockchip,rk3588-qos", "syscon";
- reg = <0x0 0xfdf72200 0x0 0x20>;
- };
-
- qos_mcu_npu: qos@fdf72400 {
- compatible = "rockchip,rk3588-qos", "syscon";
- reg = <0x0 0xfdf72400 0x0 0x20>;
- };
-
- qos_hdcp0: qos@fdf80000 {
- compatible = "rockchip,rk3588-qos", "syscon";
- reg = <0x0 0xfdf80000 0x0 0x20>;
- };
-
- qos_hdcp1: qos@fdf81000 {
- compatible = "rockchip,rk3588-qos", "syscon";
- reg = <0x0 0xfdf81000 0x0 0x20>;
- };
-
- qos_hdmirx: qos@fdf81200 {
- compatible = "rockchip,rk3588-qos", "syscon";
- reg = <0x0 0xfdf81200 0x0 0x20>;
- };
-
- qos_vop_m0: qos@fdf82000 {
- compatible = "rockchip,rk3588-qos", "syscon";
- reg = <0x0 0xfdf82000 0x0 0x20>;
- };
-
- qos_vop_m1: qos@fdf82200 {
- compatible = "rockchip,rk3588-qos", "syscon";
- reg = <0x0 0xfdf82200 0x0 0x20>;
- };
-
- dfi: dfi@fe060000 {
- reg = <0x00 0xfe060000 0x00 0x10000>;
- compatible = "rockchip,rk3588-dfi";
- interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH 0>,
- <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH 0>,
- <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH 0>,
- <GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH 0>;
- rockchip,pmu = <&pmu1grf>;
- };
-
- pcie2x1l1: pcie@fe180000 {
- compatible = "rockchip,rk3588-pcie", "rockchip,rk3568-pcie";
- bus-range = <0x30 0x3f>;
- clocks = <&cru ACLK_PCIE_1L1_MSTR>, <&cru ACLK_PCIE_1L1_SLV>,
- <&cru ACLK_PCIE_1L1_DBI>, <&cru PCLK_PCIE_1L1>,
- <&cru CLK_PCIE_AUX3>, <&cru CLK_PCIE1L1_PIPE>;
- clock-names = "aclk_mst", "aclk_slv",
- "aclk_dbi", "pclk",
- "aux", "pipe";
- device_type = "pci";
- interrupts = <GIC_SPI 248 IRQ_TYPE_LEVEL_HIGH 0>,
- <GIC_SPI 247 IRQ_TYPE_LEVEL_HIGH 0>,
- <GIC_SPI 246 IRQ_TYPE_LEVEL_HIGH 0>,
- <GIC_SPI 245 IRQ_TYPE_LEVEL_HIGH 0>,
- <GIC_SPI 244 IRQ_TYPE_LEVEL_HIGH 0>;
- interrupt-names = "sys", "pmc", "msg", "legacy", "err";
- #interrupt-cells = <1>;
- interrupt-map-mask = <0 0 0 7>;
- interrupt-map = <0 0 0 1 &pcie2x1l1_intc 0>,
- <0 0 0 2 &pcie2x1l1_intc 1>,
- <0 0 0 3 &pcie2x1l1_intc 2>,
- <0 0 0 4 &pcie2x1l1_intc 3>;
- linux,pci-domain = <3>;
- max-link-speed = <2>;
- msi-map = <0x3000 &its0 0x3000 0x1000>;
- num-lanes = <1>;
- phys = <&combphy2_psu PHY_TYPE_PCIE>;
- phy-names = "pcie-phy";
- power-domains = <&power RK3588_PD_PCIE>;
- ranges = <0x01000000 0x0 0xf3100000 0x0 0xf3100000 0x0 0x00100000>,
- <0x02000000 0x0 0xf3200000 0x0 0xf3200000 0x0 0x00e00000>,
- <0x03000000 0x0 0x40000000 0x9 0xc0000000 0x0 0x40000000>;
- reg = <0xa 0x40c00000 0x0 0x00400000>,
- <0x0 0xfe180000 0x0 0x00010000>,
- <0x0 0xf3000000 0x0 0x00100000>;
- reg-names = "dbi", "apb", "config";
- resets = <&cru SRST_PCIE3_POWER_UP>, <&cru SRST_P_PCIE3>;
- reset-names = "pwr", "pipe";
- #address-cells = <3>;
- #size-cells = <2>;
- status = "disabled";
-
- pcie2x1l1_intc: legacy-interrupt-controller {
- interrupt-controller;
- #address-cells = <0>;
- #interrupt-cells = <1>;
- interrupt-parent = <&gic>;
- interrupts = <GIC_SPI 245 IRQ_TYPE_EDGE_RISING 0>;
- };
- };
-
- pcie2x1l2: pcie@fe190000 {
- compatible = "rockchip,rk3588-pcie", "rockchip,rk3568-pcie";
- bus-range = <0x40 0x4f>;
- clocks = <&cru ACLK_PCIE_1L2_MSTR>, <&cru ACLK_PCIE_1L2_SLV>,
- <&cru ACLK_PCIE_1L2_DBI>, <&cru PCLK_PCIE_1L2>,
- <&cru CLK_PCIE_AUX4>, <&cru CLK_PCIE1L2_PIPE>;
- clock-names = "aclk_mst", "aclk_slv",
- "aclk_dbi", "pclk",
- "aux", "pipe";
- device_type = "pci";
- interrupts = <GIC_SPI 253 IRQ_TYPE_LEVEL_HIGH 0>,
- <GIC_SPI 252 IRQ_TYPE_LEVEL_HIGH 0>,
- <GIC_SPI 251 IRQ_TYPE_LEVEL_HIGH 0>,
- <GIC_SPI 250 IRQ_TYPE_LEVEL_HIGH 0>,
- <GIC_SPI 249 IRQ_TYPE_LEVEL_HIGH 0>;
- interrupt-names = "sys", "pmc", "msg", "legacy", "err";
- #interrupt-cells = <1>;
- interrupt-map-mask = <0 0 0 7>;
- interrupt-map = <0 0 0 1 &pcie2x1l2_intc 0>,
- <0 0 0 2 &pcie2x1l2_intc 1>,
- <0 0 0 3 &pcie2x1l2_intc 2>,
- <0 0 0 4 &pcie2x1l2_intc 3>;
- linux,pci-domain = <4>;
- max-link-speed = <2>;
- msi-map = <0x4000 &its0 0x4000 0x1000>;
- num-lanes = <1>;
- phys = <&combphy0_ps PHY_TYPE_PCIE>;
- phy-names = "pcie-phy";
- power-domains = <&power RK3588_PD_PCIE>;
- ranges = <0x01000000 0x0 0xf4100000 0x0 0xf4100000 0x0 0x00100000>,
- <0x02000000 0x0 0xf4200000 0x0 0xf4200000 0x0 0x00e00000>,
- <0x03000000 0x0 0x40000000 0xa 0x00000000 0x0 0x40000000>;
- reg = <0xa 0x41000000 0x0 0x00400000>,
- <0x0 0xfe190000 0x0 0x00010000>,
- <0x0 0xf4000000 0x0 0x00100000>;
- reg-names = "dbi", "apb", "config";
- resets = <&cru SRST_PCIE4_POWER_UP>, <&cru SRST_P_PCIE4>;
- reset-names = "pwr", "pipe";
- #address-cells = <3>;
- #size-cells = <2>;
- status = "disabled";
-
- pcie2x1l2_intc: legacy-interrupt-controller {
- interrupt-controller;
- #address-cells = <0>;
- #interrupt-cells = <1>;
- interrupt-parent = <&gic>;
- interrupts = <GIC_SPI 250 IRQ_TYPE_EDGE_RISING 0>;
- };
- };
-
- gmac1: ethernet@fe1c0000 {
- compatible = "rockchip,rk3588-gmac", "snps,dwmac-4.20a";
- reg = <0x0 0xfe1c0000 0x0 0x10000>;
- interrupts = <GIC_SPI 234 IRQ_TYPE_LEVEL_HIGH 0>,
- <GIC_SPI 233 IRQ_TYPE_LEVEL_HIGH 0>;
- interrupt-names = "macirq", "eth_wake_irq";
- clocks = <&cru CLK_GMAC_125M>, <&cru CLK_GMAC_50M>,
- <&cru PCLK_GMAC1>, <&cru ACLK_GMAC1>,
- <&cru CLK_GMAC1_PTP_REF>;
- clock-names = "stmmaceth", "clk_mac_ref",
- "pclk_mac", "aclk_mac",
- "ptp_ref";
- power-domains = <&power RK3588_PD_GMAC>;
- resets = <&cru SRST_A_GMAC1>;
- reset-names = "stmmaceth";
- rockchip,grf = <&sys_grf>;
- rockchip,php-grf = <&php_grf>;
- snps,axi-config = <&gmac1_stmmac_axi_setup>;
- snps,mixed-burst;
- snps,mtl-rx-config = <&gmac1_mtl_rx_setup>;
- snps,mtl-tx-config = <&gmac1_mtl_tx_setup>;
- snps,tso;
- status = "disabled";
-
- mdio1: mdio {
- compatible = "snps,dwmac-mdio";
- #address-cells = <0x1>;
- #size-cells = <0x0>;
- };
-
- gmac1_stmmac_axi_setup: stmmac-axi-config {
- snps,blen = <0 0 0 0 16 8 4>;
- snps,wr_osr_lmt = <4>;
- snps,rd_osr_lmt = <8>;
- };
-
- gmac1_mtl_rx_setup: rx-queues-config {
- snps,rx-queues-to-use = <2>;
- queue0 {};
- queue1 {};
- };
-
- gmac1_mtl_tx_setup: tx-queues-config {
- snps,tx-queues-to-use = <2>;
- queue0 {};
- queue1 {};
- };
- };
-
- sata0: sata@fe210000 {
- compatible = "rockchip,rk3588-dwc-ahci", "snps,dwc-ahci";
- reg = <0 0xfe210000 0 0x1000>;
- interrupts = <GIC_SPI 273 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&cru ACLK_SATA0>, <&cru CLK_PMALIVE0>,
- <&cru CLK_RXOOB0>, <&cru CLK_PIPEPHY0_REF>,
- <&cru CLK_PIPEPHY0_PIPE_ASIC_G>;
- clock-names = "sata", "pmalive", "rxoob", "ref", "asic";
- ports-implemented = <0x1>;
- #address-cells = <1>;
- #size-cells = <0>;
- status = "disabled";
-
- sata-port@0 {
- reg = <0>;
- hba-port-cap = <HBA_PORT_FBSCP>;
- phys = <&combphy0_ps PHY_TYPE_SATA>;
- phy-names = "sata-phy";
- snps,rx-ts-max = <32>;
- snps,tx-ts-max = <32>;
- };
- };
-
- sata2: sata@fe230000 {
- compatible = "rockchip,rk3588-dwc-ahci", "snps,dwc-ahci";
- reg = <0 0xfe230000 0 0x1000>;
- interrupts = <GIC_SPI 275 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&cru ACLK_SATA2>, <&cru CLK_PMALIVE2>,
- <&cru CLK_RXOOB2>, <&cru CLK_PIPEPHY2_REF>,
- <&cru CLK_PIPEPHY2_PIPE_ASIC_G>;
- clock-names = "sata", "pmalive", "rxoob", "ref", "asic";
- ports-implemented = <0x1>;
- #address-cells = <1>;
- #size-cells = <0>;
- status = "disabled";
-
- sata-port@0 {
- reg = <0>;
- hba-port-cap = <HBA_PORT_FBSCP>;
- phys = <&combphy2_psu PHY_TYPE_SATA>;
- phy-names = "sata-phy";
- snps,rx-ts-max = <32>;
- snps,tx-ts-max = <32>;
- };
- };
-
- sfc: spi@fe2b0000 {
- compatible = "rockchip,sfc";
- reg = <0x0 0xfe2b0000 0x0 0x4000>;
- interrupts = <GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&cru SCLK_SFC>, <&cru HCLK_SFC>;
- clock-names = "clk_sfc", "hclk_sfc";
- #address-cells = <1>;
- #size-cells = <0>;
- status = "disabled";
- };
-
- sdmmc: mmc@fe2c0000 {
- compatible = "rockchip,rk3588-dw-mshc", "rockchip,rk3288-dw-mshc";
- reg = <0x0 0xfe2c0000 0x0 0x4000>;
- interrupts = <GIC_SPI 203 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&scmi_clk SCMI_HCLK_SD>, <&scmi_clk SCMI_CCLK_SD>,
- <&cru SCLK_SDMMC_DRV>, <&cru SCLK_SDMMC_SAMPLE>;
- clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
- fifo-depth = <0x100>;
- max-frequency = <200000000>;
- pinctrl-names = "default";
- pinctrl-0 = <&sdmmc_clk &sdmmc_cmd &sdmmc_det &sdmmc_bus4>;
- power-domains = <&power RK3588_PD_SDMMC>;
- status = "disabled";
- };
-
- sdio: mmc@fe2d0000 {
- compatible = "rockchip,rk3588-dw-mshc", "rockchip,rk3288-dw-mshc";
- reg = <0x00 0xfe2d0000 0x00 0x4000>;
- interrupts = <GIC_SPI 204 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&cru HCLK_SDIO>, <&cru CCLK_SRC_SDIO>,
- <&cru SCLK_SDIO_DRV>, <&cru SCLK_SDIO_SAMPLE>;
- clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
- fifo-depth = <0x100>;
- max-frequency = <200000000>;
- pinctrl-names = "default";
- pinctrl-0 = <&sdiom1_pins>;
- power-domains = <&power RK3588_PD_SDIO>;
- status = "disabled";
- };
-
- sdhci: mmc@fe2e0000 {
- compatible = "rockchip,rk3588-dwcmshc";
- reg = <0x0 0xfe2e0000 0x0 0x10000>;
- interrupts = <GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH 0>;
- assigned-clocks = <&cru BCLK_EMMC>, <&cru TMCLK_EMMC>, <&cru CCLK_EMMC>;
- assigned-clock-rates = <200000000>, <24000000>, <200000000>;
- clocks = <&cru CCLK_EMMC>, <&cru HCLK_EMMC>,
- <&cru ACLK_EMMC>, <&cru BCLK_EMMC>,
- <&cru TMCLK_EMMC>;
- clock-names = "core", "bus", "axi", "block", "timer";
- max-frequency = <200000000>;
- pinctrl-0 = <&emmc_rstnout>, <&emmc_bus8>, <&emmc_clk>,
- <&emmc_cmd>, <&emmc_data_strobe>;
- pinctrl-names = "default";
- resets = <&cru SRST_C_EMMC>, <&cru SRST_H_EMMC>,
- <&cru SRST_A_EMMC>, <&cru SRST_B_EMMC>,
- <&cru SRST_T_EMMC>;
- reset-names = "core", "bus", "axi", "block", "timer";
- status = "disabled";
- };
-
- i2s0_8ch: i2s@fe470000 {
- compatible = "rockchip,rk3588-i2s-tdm";
- reg = <0x0 0xfe470000 0x0 0x1000>;
- interrupts = <GIC_SPI 180 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&cru MCLK_I2S0_8CH_TX>, <&cru MCLK_I2S0_8CH_RX>, <&cru HCLK_I2S0_8CH>;
- clock-names = "mclk_tx", "mclk_rx", "hclk";
- assigned-clocks = <&cru CLK_I2S0_8CH_TX_SRC>, <&cru CLK_I2S0_8CH_RX_SRC>;
- assigned-clock-parents = <&cru PLL_AUPLL>, <&cru PLL_AUPLL>;
- dmas = <&dmac0 0>, <&dmac0 1>;
- dma-names = "tx", "rx";
- power-domains = <&power RK3588_PD_AUDIO>;
- resets = <&cru SRST_M_I2S0_8CH_TX>, <&cru SRST_M_I2S0_8CH_RX>;
- reset-names = "tx-m", "rx-m";
- rockchip,trcm-sync-tx-only;
- pinctrl-names = "default";
- pinctrl-0 = <&i2s0_lrck
- &i2s0_sclk
- &i2s0_sdi0
- &i2s0_sdi1
- &i2s0_sdi2
- &i2s0_sdi3
- &i2s0_sdo0
- &i2s0_sdo1
- &i2s0_sdo2
- &i2s0_sdo3>;
- #sound-dai-cells = <0>;
- status = "disabled";
- };
-
- i2s1_8ch: i2s@fe480000 {
- compatible = "rockchip,rk3588-i2s-tdm";
- reg = <0x0 0xfe480000 0x0 0x1000>;
- interrupts = <GIC_SPI 181 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&cru MCLK_I2S1_8CH_TX>, <&cru MCLK_I2S1_8CH_RX>, <&cru HCLK_I2S1_8CH>;
- clock-names = "mclk_tx", "mclk_rx", "hclk";
- dmas = <&dmac0 2>, <&dmac0 3>;
- dma-names = "tx", "rx";
- resets = <&cru SRST_M_I2S1_8CH_TX>, <&cru SRST_M_I2S1_8CH_RX>;
- reset-names = "tx-m", "rx-m";
- rockchip,trcm-sync-tx-only;
- pinctrl-names = "default";
- pinctrl-0 = <&i2s1m0_lrck
- &i2s1m0_sclk
- &i2s1m0_sdi0
- &i2s1m0_sdi1
- &i2s1m0_sdi2
- &i2s1m0_sdi3
- &i2s1m0_sdo0
- &i2s1m0_sdo1
- &i2s1m0_sdo2
- &i2s1m0_sdo3>;
- #sound-dai-cells = <0>;
- status = "disabled";
- };
-
- i2s2_2ch: i2s@fe490000 {
- compatible = "rockchip,rk3588-i2s", "rockchip,rk3066-i2s";
- reg = <0x0 0xfe490000 0x0 0x1000>;
- interrupts = <GIC_SPI 182 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&cru MCLK_I2S2_2CH>, <&cru HCLK_I2S2_2CH>;
- clock-names = "i2s_clk", "i2s_hclk";
- assigned-clocks = <&cru CLK_I2S2_2CH_SRC>;
- assigned-clock-parents = <&cru PLL_AUPLL>;
- dmas = <&dmac1 0>, <&dmac1 1>;
- dma-names = "tx", "rx";
- power-domains = <&power RK3588_PD_AUDIO>;
- pinctrl-names = "default";
- pinctrl-0 = <&i2s2m1_lrck
- &i2s2m1_sclk
- &i2s2m1_sdi
- &i2s2m1_sdo>;
- #sound-dai-cells = <0>;
- status = "disabled";
- };
-
- i2s3_2ch: i2s@fe4a0000 {
- compatible = "rockchip,rk3588-i2s", "rockchip,rk3066-i2s";
- reg = <0x0 0xfe4a0000 0x0 0x1000>;
- interrupts = <GIC_SPI 183 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&cru MCLK_I2S3_2CH>, <&cru HCLK_I2S3_2CH>;
- clock-names = "i2s_clk", "i2s_hclk";
- assigned-clocks = <&cru CLK_I2S3_2CH_SRC>;
- assigned-clock-parents = <&cru PLL_AUPLL>;
- dmas = <&dmac1 2>, <&dmac1 3>;
- dma-names = "tx", "rx";
- power-domains = <&power RK3588_PD_AUDIO>;
- pinctrl-names = "default";
- pinctrl-0 = <&i2s3_lrck
- &i2s3_sclk
- &i2s3_sdi
- &i2s3_sdo>;
- #sound-dai-cells = <0>;
- status = "disabled";
- };
-
- gic: interrupt-controller@fe600000 {
- compatible = "arm,gic-v3";
- reg = <0x0 0xfe600000 0 0x10000>, /* GICD */
- <0x0 0xfe680000 0 0x100000>; /* GICR */
- interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH 0>;
- interrupt-controller;
- mbi-alias = <0x0 0xfe610000>;
- mbi-ranges = <424 56>;
- msi-controller;
- ranges;
- #address-cells = <2>;
- #interrupt-cells = <4>;
- #size-cells = <2>;
-
- its0: msi-controller@fe640000 {
- compatible = "arm,gic-v3-its";
- reg = <0x0 0xfe640000 0x0 0x20000>;
- msi-controller;
- #msi-cells = <1>;
- };
-
- its1: msi-controller@fe660000 {
- compatible = "arm,gic-v3-its";
- reg = <0x0 0xfe660000 0x0 0x20000>;
- msi-controller;
- #msi-cells = <1>;
- };
-
- ppi-partitions {
- ppi_partition0: interrupt-partition-0 {
- affinity = <&cpu_l0 &cpu_l1 &cpu_l2 &cpu_l3>;
- };
-
- ppi_partition1: interrupt-partition-1 {
- affinity = <&cpu_b0 &cpu_b1 &cpu_b2 &cpu_b3>;
- };
- };
- };
-
- dmac0: dma-controller@fea10000 {
- compatible = "arm,pl330", "arm,primecell";
- reg = <0x0 0xfea10000 0x0 0x4000>;
- interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH 0>,
- <GIC_SPI 87 IRQ_TYPE_LEVEL_HIGH 0>;
- arm,pl330-periph-burst;
- clocks = <&cru ACLK_DMAC0>;
- clock-names = "apb_pclk";
- #dma-cells = <1>;
- };
-
- dmac1: dma-controller@fea30000 {
- compatible = "arm,pl330", "arm,primecell";
- reg = <0x0 0xfea30000 0x0 0x4000>;
- interrupts = <GIC_SPI 88 IRQ_TYPE_LEVEL_HIGH 0>,
- <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH 0>;
- arm,pl330-periph-burst;
- clocks = <&cru ACLK_DMAC1>;
- clock-names = "apb_pclk";
- #dma-cells = <1>;
- };
-
- i2c1: i2c@fea90000 {
- compatible = "rockchip,rk3588-i2c", "rockchip,rk3399-i2c";
- reg = <0x0 0xfea90000 0x0 0x1000>;
- clocks = <&cru CLK_I2C1>, <&cru PCLK_I2C1>;
- clock-names = "i2c", "pclk";
- interrupts = <GIC_SPI 318 IRQ_TYPE_LEVEL_HIGH 0>;
- pinctrl-0 = <&i2c1m0_xfer>;
- pinctrl-names = "default";
- #address-cells = <1>;
- #size-cells = <0>;
- status = "disabled";
- };
-
- i2c2: i2c@feaa0000 {
- compatible = "rockchip,rk3588-i2c", "rockchip,rk3399-i2c";
- reg = <0x0 0xfeaa0000 0x0 0x1000>;
- clocks = <&cru CLK_I2C2>, <&cru PCLK_I2C2>;
- clock-names = "i2c", "pclk";
- interrupts = <GIC_SPI 319 IRQ_TYPE_LEVEL_HIGH 0>;
- pinctrl-0 = <&i2c2m0_xfer>;
- pinctrl-names = "default";
- #address-cells = <1>;
- #size-cells = <0>;
- status = "disabled";
- };
-
- i2c3: i2c@feab0000 {
- compatible = "rockchip,rk3588-i2c", "rockchip,rk3399-i2c";
- reg = <0x0 0xfeab0000 0x0 0x1000>;
- clocks = <&cru CLK_I2C3>, <&cru PCLK_I2C3>;
- clock-names = "i2c", "pclk";
- interrupts = <GIC_SPI 320 IRQ_TYPE_LEVEL_HIGH 0>;
- pinctrl-0 = <&i2c3m0_xfer>;
- pinctrl-names = "default";
- #address-cells = <1>;
- #size-cells = <0>;
- status = "disabled";
- };
-
- i2c4: i2c@feac0000 {
- compatible = "rockchip,rk3588-i2c", "rockchip,rk3399-i2c";
- reg = <0x0 0xfeac0000 0x0 0x1000>;
- clocks = <&cru CLK_I2C4>, <&cru PCLK_I2C4>;
- clock-names = "i2c", "pclk";
- interrupts = <GIC_SPI 321 IRQ_TYPE_LEVEL_HIGH 0>;
- pinctrl-0 = <&i2c4m0_xfer>;
- pinctrl-names = "default";
- #address-cells = <1>;
- #size-cells = <0>;
- status = "disabled";
- };
-
- i2c5: i2c@fead0000 {
- compatible = "rockchip,rk3588-i2c", "rockchip,rk3399-i2c";
- reg = <0x0 0xfead0000 0x0 0x1000>;
- clocks = <&cru CLK_I2C5>, <&cru PCLK_I2C5>;
- clock-names = "i2c", "pclk";
- interrupts = <GIC_SPI 322 IRQ_TYPE_LEVEL_HIGH 0>;
- pinctrl-0 = <&i2c5m0_xfer>;
- pinctrl-names = "default";
- #address-cells = <1>;
- #size-cells = <0>;
- status = "disabled";
- };
-
- timer0: timer@feae0000 {
- compatible = "rockchip,rk3588-timer", "rockchip,rk3288-timer";
- reg = <0x0 0xfeae0000 0x0 0x20>;
- interrupts = <GIC_SPI 289 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&cru PCLK_BUSTIMER0>, <&cru CLK_BUSTIMER0>;
- clock-names = "pclk", "timer";
- };
-
- wdt: watchdog@feaf0000 {
- compatible = "rockchip,rk3588-wdt", "snps,dw-wdt";
- reg = <0x0 0xfeaf0000 0x0 0x100>;
- clocks = <&cru TCLK_WDT0>, <&cru PCLK_WDT0>;
- clock-names = "tclk", "pclk";
- interrupts = <GIC_SPI 315 IRQ_TYPE_LEVEL_HIGH 0>;
- };
-
- spi0: spi@feb00000 {
- compatible = "rockchip,rk3588-spi", "rockchip,rk3066-spi";
- reg = <0x0 0xfeb00000 0x0 0x1000>;
- interrupts = <GIC_SPI 326 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&cru CLK_SPI0>, <&cru PCLK_SPI0>;
- clock-names = "spiclk", "apb_pclk";
- dmas = <&dmac0 14>, <&dmac0 15>;
- dma-names = "tx", "rx";
- num-cs = <2>;
- pinctrl-0 = <&spi0m0_cs0 &spi0m0_cs1 &spi0m0_pins>;
- pinctrl-names = "default";
- #address-cells = <1>;
- #size-cells = <0>;
- status = "disabled";
- };
-
- spi1: spi@feb10000 {
- compatible = "rockchip,rk3588-spi", "rockchip,rk3066-spi";
- reg = <0x0 0xfeb10000 0x0 0x1000>;
- interrupts = <GIC_SPI 327 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&cru CLK_SPI1>, <&cru PCLK_SPI1>;
- clock-names = "spiclk", "apb_pclk";
- dmas = <&dmac0 16>, <&dmac0 17>;
- dma-names = "tx", "rx";
- num-cs = <2>;
- pinctrl-0 = <&spi1m1_cs0 &spi1m1_cs1 &spi1m1_pins>;
- pinctrl-names = "default";
- #address-cells = <1>;
- #size-cells = <0>;
- status = "disabled";
- };
-
- spi2: spi@feb20000 {
- compatible = "rockchip,rk3588-spi", "rockchip,rk3066-spi";
- reg = <0x0 0xfeb20000 0x0 0x1000>;
- interrupts = <GIC_SPI 328 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&cru CLK_SPI2>, <&cru PCLK_SPI2>;
- clock-names = "spiclk", "apb_pclk";
- dmas = <&dmac1 15>, <&dmac1 16>;
- dma-names = "tx", "rx";
- num-cs = <2>;
- pinctrl-0 = <&spi2m2_cs0 &spi2m2_cs1 &spi2m2_pins>;
- pinctrl-names = "default";
- #address-cells = <1>;
- #size-cells = <0>;
- status = "disabled";
- };
-
- spi3: spi@feb30000 {
- compatible = "rockchip,rk3588-spi", "rockchip,rk3066-spi";
- reg = <0x0 0xfeb30000 0x0 0x1000>;
- interrupts = <GIC_SPI 329 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&cru CLK_SPI3>, <&cru PCLK_SPI3>;
- clock-names = "spiclk", "apb_pclk";
- dmas = <&dmac1 17>, <&dmac1 18>;
- dma-names = "tx", "rx";
- num-cs = <2>;
- pinctrl-0 = <&spi3m1_cs0 &spi3m1_cs1 &spi3m1_pins>;
- pinctrl-names = "default";
- #address-cells = <1>;
- #size-cells = <0>;
- status = "disabled";
- };
-
- uart1: serial@feb40000 {
- compatible = "rockchip,rk3588-uart", "snps,dw-apb-uart";
- reg = <0x0 0xfeb40000 0x0 0x100>;
- interrupts = <GIC_SPI 332 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&cru SCLK_UART1>, <&cru PCLK_UART1>;
- clock-names = "baudclk", "apb_pclk";
- dmas = <&dmac0 8>, <&dmac0 9>;
- dma-names = "tx", "rx";
- pinctrl-0 = <&uart1m1_xfer>;
- pinctrl-names = "default";
- reg-io-width = <4>;
- reg-shift = <2>;
- status = "disabled";
- };
-
- uart2: serial@feb50000 {
- compatible = "rockchip,rk3588-uart", "snps,dw-apb-uart";
- reg = <0x0 0xfeb50000 0x0 0x100>;
- interrupts = <GIC_SPI 333 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&cru SCLK_UART2>, <&cru PCLK_UART2>;
- clock-names = "baudclk", "apb_pclk";
- dmas = <&dmac0 10>, <&dmac0 11>;
- dma-names = "tx", "rx";
- pinctrl-0 = <&uart2m1_xfer>;
- pinctrl-names = "default";
- reg-io-width = <4>;
- reg-shift = <2>;
- status = "disabled";
- };
-
- uart3: serial@feb60000 {
- compatible = "rockchip,rk3588-uart", "snps,dw-apb-uart";
- reg = <0x0 0xfeb60000 0x0 0x100>;
- interrupts = <GIC_SPI 334 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&cru SCLK_UART3>, <&cru PCLK_UART3>;
- clock-names = "baudclk", "apb_pclk";
- dmas = <&dmac0 12>, <&dmac0 13>;
- dma-names = "tx", "rx";
- pinctrl-0 = <&uart3m1_xfer>;
- pinctrl-names = "default";
- reg-io-width = <4>;
- reg-shift = <2>;
- status = "disabled";
- };
-
- uart4: serial@feb70000 {
- compatible = "rockchip,rk3588-uart", "snps,dw-apb-uart";
- reg = <0x0 0xfeb70000 0x0 0x100>;
- interrupts = <GIC_SPI 335 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&cru SCLK_UART4>, <&cru PCLK_UART4>;
- clock-names = "baudclk", "apb_pclk";
- dmas = <&dmac1 9>, <&dmac1 10>;
- dma-names = "tx", "rx";
- pinctrl-0 = <&uart4m1_xfer>;
- pinctrl-names = "default";
- reg-io-width = <4>;
- reg-shift = <2>;
- status = "disabled";
- };
-
- uart5: serial@feb80000 {
- compatible = "rockchip,rk3588-uart", "snps,dw-apb-uart";
- reg = <0x0 0xfeb80000 0x0 0x100>;
- interrupts = <GIC_SPI 336 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&cru SCLK_UART5>, <&cru PCLK_UART5>;
- clock-names = "baudclk", "apb_pclk";
- dmas = <&dmac1 11>, <&dmac1 12>;
- dma-names = "tx", "rx";
- pinctrl-0 = <&uart5m1_xfer>;
- pinctrl-names = "default";
- reg-io-width = <4>;
- reg-shift = <2>;
- status = "disabled";
- };
-
- uart6: serial@feb90000 {
- compatible = "rockchip,rk3588-uart", "snps,dw-apb-uart";
- reg = <0x0 0xfeb90000 0x0 0x100>;
- interrupts = <GIC_SPI 337 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&cru SCLK_UART6>, <&cru PCLK_UART6>;
- clock-names = "baudclk", "apb_pclk";
- dmas = <&dmac1 13>, <&dmac1 14>;
- dma-names = "tx", "rx";
- pinctrl-0 = <&uart6m1_xfer>;
- pinctrl-names = "default";
- reg-io-width = <4>;
- reg-shift = <2>;
- status = "disabled";
- };
-
- uart7: serial@feba0000 {
- compatible = "rockchip,rk3588-uart", "snps,dw-apb-uart";
- reg = <0x0 0xfeba0000 0x0 0x100>;
- interrupts = <GIC_SPI 338 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&cru SCLK_UART7>, <&cru PCLK_UART7>;
- clock-names = "baudclk", "apb_pclk";
- dmas = <&dmac2 7>, <&dmac2 8>;
- dma-names = "tx", "rx";
- pinctrl-0 = <&uart7m1_xfer>;
- pinctrl-names = "default";
- reg-io-width = <4>;
- reg-shift = <2>;
- status = "disabled";
- };
-
- uart8: serial@febb0000 {
- compatible = "rockchip,rk3588-uart", "snps,dw-apb-uart";
- reg = <0x0 0xfebb0000 0x0 0x100>;
- interrupts = <GIC_SPI 339 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&cru SCLK_UART8>, <&cru PCLK_UART8>;
- clock-names = "baudclk", "apb_pclk";
- dmas = <&dmac2 9>, <&dmac2 10>;
- dma-names = "tx", "rx";
- pinctrl-0 = <&uart8m1_xfer>;
- pinctrl-names = "default";
- reg-io-width = <4>;
- reg-shift = <2>;
- status = "disabled";
- };
-
- uart9: serial@febc0000 {
- compatible = "rockchip,rk3588-uart", "snps,dw-apb-uart";
- reg = <0x0 0xfebc0000 0x0 0x100>;
- interrupts = <GIC_SPI 340 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&cru SCLK_UART9>, <&cru PCLK_UART9>;
- clock-names = "baudclk", "apb_pclk";
- dmas = <&dmac2 11>, <&dmac2 12>;
- dma-names = "tx", "rx";
- pinctrl-0 = <&uart9m1_xfer>;
- pinctrl-names = "default";
- reg-io-width = <4>;
- reg-shift = <2>;
- status = "disabled";
- };
-
- pwm4: pwm@febd0000 {
- compatible = "rockchip,rk3588-pwm", "rockchip,rk3328-pwm";
- reg = <0x0 0xfebd0000 0x0 0x10>;
- clocks = <&cru CLK_PWM1>, <&cru PCLK_PWM1>;
- clock-names = "pwm", "pclk";
- pinctrl-0 = <&pwm4m0_pins>;
- pinctrl-names = "default";
- #pwm-cells = <3>;
- status = "disabled";
- };
-
- pwm5: pwm@febd0010 {
- compatible = "rockchip,rk3588-pwm", "rockchip,rk3328-pwm";
- reg = <0x0 0xfebd0010 0x0 0x10>;
- clocks = <&cru CLK_PWM1>, <&cru PCLK_PWM1>;
- clock-names = "pwm", "pclk";
- pinctrl-0 = <&pwm5m0_pins>;
- pinctrl-names = "default";
- #pwm-cells = <3>;
- status = "disabled";
- };
-
- pwm6: pwm@febd0020 {
- compatible = "rockchip,rk3588-pwm", "rockchip,rk3328-pwm";
- reg = <0x0 0xfebd0020 0x0 0x10>;
- clocks = <&cru CLK_PWM1>, <&cru PCLK_PWM1>;
- clock-names = "pwm", "pclk";
- pinctrl-0 = <&pwm6m0_pins>;
- pinctrl-names = "default";
- #pwm-cells = <3>;
- status = "disabled";
- };
-
- pwm7: pwm@febd0030 {
- compatible = "rockchip,rk3588-pwm", "rockchip,rk3328-pwm";
- reg = <0x0 0xfebd0030 0x0 0x10>;
- clocks = <&cru CLK_PWM1>, <&cru PCLK_PWM1>;
- clock-names = "pwm", "pclk";
- pinctrl-0 = <&pwm7m0_pins>;
- pinctrl-names = "default";
- #pwm-cells = <3>;
- status = "disabled";
- };
-
- pwm8: pwm@febe0000 {
- compatible = "rockchip,rk3588-pwm", "rockchip,rk3328-pwm";
- reg = <0x0 0xfebe0000 0x0 0x10>;
- clocks = <&cru CLK_PWM2>, <&cru PCLK_PWM2>;
- clock-names = "pwm", "pclk";
- pinctrl-0 = <&pwm8m0_pins>;
- pinctrl-names = "default";
- #pwm-cells = <3>;
- status = "disabled";
- };
-
- pwm9: pwm@febe0010 {
- compatible = "rockchip,rk3588-pwm", "rockchip,rk3328-pwm";
- reg = <0x0 0xfebe0010 0x0 0x10>;
- clocks = <&cru CLK_PWM2>, <&cru PCLK_PWM2>;
- clock-names = "pwm", "pclk";
- pinctrl-0 = <&pwm9m0_pins>;
- pinctrl-names = "default";
- #pwm-cells = <3>;
- status = "disabled";
- };
-
- pwm10: pwm@febe0020 {
- compatible = "rockchip,rk3588-pwm", "rockchip,rk3328-pwm";
- reg = <0x0 0xfebe0020 0x0 0x10>;
- clocks = <&cru CLK_PWM2>, <&cru PCLK_PWM2>;
- clock-names = "pwm", "pclk";
- pinctrl-0 = <&pwm10m0_pins>;
- pinctrl-names = "default";
- #pwm-cells = <3>;
- status = "disabled";
- };
-
- pwm11: pwm@febe0030 {
- compatible = "rockchip,rk3588-pwm", "rockchip,rk3328-pwm";
- reg = <0x0 0xfebe0030 0x0 0x10>;
- clocks = <&cru CLK_PWM2>, <&cru PCLK_PWM2>;
- clock-names = "pwm", "pclk";
- pinctrl-0 = <&pwm11m0_pins>;
- pinctrl-names = "default";
- #pwm-cells = <3>;
- status = "disabled";
- };
-
- pwm12: pwm@febf0000 {
- compatible = "rockchip,rk3588-pwm", "rockchip,rk3328-pwm";
- reg = <0x0 0xfebf0000 0x0 0x10>;
- clocks = <&cru CLK_PWM3>, <&cru PCLK_PWM3>;
- clock-names = "pwm", "pclk";
- pinctrl-0 = <&pwm12m0_pins>;
- pinctrl-names = "default";
- #pwm-cells = <3>;
- status = "disabled";
- };
-
- pwm13: pwm@febf0010 {
- compatible = "rockchip,rk3588-pwm", "rockchip,rk3328-pwm";
- reg = <0x0 0xfebf0010 0x0 0x10>;
- clocks = <&cru CLK_PWM3>, <&cru PCLK_PWM3>;
- clock-names = "pwm", "pclk";
- pinctrl-0 = <&pwm13m0_pins>;
- pinctrl-names = "default";
- #pwm-cells = <3>;
- status = "disabled";
- };
-
- pwm14: pwm@febf0020 {
- compatible = "rockchip,rk3588-pwm", "rockchip,rk3328-pwm";
- reg = <0x0 0xfebf0020 0x0 0x10>;
- clocks = <&cru CLK_PWM3>, <&cru PCLK_PWM3>;
- clock-names = "pwm", "pclk";
- pinctrl-0 = <&pwm14m0_pins>;
- pinctrl-names = "default";
- #pwm-cells = <3>;
- status = "disabled";
- };
-
- pwm15: pwm@febf0030 {
- compatible = "rockchip,rk3588-pwm", "rockchip,rk3328-pwm";
- reg = <0x0 0xfebf0030 0x0 0x10>;
- clocks = <&cru CLK_PWM3>, <&cru PCLK_PWM3>;
- clock-names = "pwm", "pclk";
- pinctrl-0 = <&pwm15m0_pins>;
- pinctrl-names = "default";
- #pwm-cells = <3>;
- status = "disabled";
- };
-
- tsadc: tsadc@fec00000 {
- compatible = "rockchip,rk3588-tsadc";
- reg = <0x0 0xfec00000 0x0 0x400>;
- interrupts = <GIC_SPI 397 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&cru CLK_TSADC>, <&cru PCLK_TSADC>;
- clock-names = "tsadc", "apb_pclk";
- assigned-clocks = <&cru CLK_TSADC>;
- assigned-clock-rates = <2000000>;
- resets = <&cru SRST_P_TSADC>, <&cru SRST_TSADC>;
- reset-names = "tsadc-apb", "tsadc";
- rockchip,hw-tshut-temp = <120000>;
- rockchip,hw-tshut-mode = <0>; /* tshut mode 0:CRU 1:GPIO */
- rockchip,hw-tshut-polarity = <0>; /* tshut polarity 0:LOW 1:HIGH */
- pinctrl-0 = <&tsadc_gpio_func>;
- pinctrl-1 = <&tsadc_shut>;
- pinctrl-names = "gpio", "otpout";
- #thermal-sensor-cells = <1>;
- status = "disabled";
- };
-
- saradc: adc@fec10000 {
- compatible = "rockchip,rk3588-saradc";
- reg = <0x0 0xfec10000 0x0 0x10000>;
- interrupts = <GIC_SPI 398 IRQ_TYPE_LEVEL_HIGH 0>;
- #io-channel-cells = <1>;
- clocks = <&cru CLK_SARADC>, <&cru PCLK_SARADC>;
- clock-names = "saradc", "apb_pclk";
- resets = <&cru SRST_P_SARADC>;
- reset-names = "saradc-apb";
- status = "disabled";
- };
-
- i2c6: i2c@fec80000 {
- compatible = "rockchip,rk3588-i2c", "rockchip,rk3399-i2c";
- reg = <0x0 0xfec80000 0x0 0x1000>;
- clocks = <&cru CLK_I2C6>, <&cru PCLK_I2C6>;
- clock-names = "i2c", "pclk";
- interrupts = <GIC_SPI 323 IRQ_TYPE_LEVEL_HIGH 0>;
- pinctrl-0 = <&i2c6m0_xfer>;
- pinctrl-names = "default";
- #address-cells = <1>;
- #size-cells = <0>;
- status = "disabled";
- };
-
- i2c7: i2c@fec90000 {
- compatible = "rockchip,rk3588-i2c", "rockchip,rk3399-i2c";
- reg = <0x0 0xfec90000 0x0 0x1000>;
- clocks = <&cru CLK_I2C7>, <&cru PCLK_I2C7>;
- clock-names = "i2c", "pclk";
- interrupts = <GIC_SPI 324 IRQ_TYPE_LEVEL_HIGH 0>;
- pinctrl-0 = <&i2c7m0_xfer>;
- pinctrl-names = "default";
- #address-cells = <1>;
- #size-cells = <0>;
- status = "disabled";
- };
-
- i2c8: i2c@feca0000 {
- compatible = "rockchip,rk3588-i2c", "rockchip,rk3399-i2c";
- reg = <0x0 0xfeca0000 0x0 0x1000>;
- clocks = <&cru CLK_I2C8>, <&cru PCLK_I2C8>;
- clock-names = "i2c", "pclk";
- interrupts = <GIC_SPI 325 IRQ_TYPE_LEVEL_HIGH 0>;
- pinctrl-0 = <&i2c8m0_xfer>;
- pinctrl-names = "default";
- #address-cells = <1>;
- #size-cells = <0>;
- status = "disabled";
- };
-
- spi4: spi@fecb0000 {
- compatible = "rockchip,rk3588-spi", "rockchip,rk3066-spi";
- reg = <0x0 0xfecb0000 0x0 0x1000>;
- interrupts = <GIC_SPI 330 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&cru CLK_SPI4>, <&cru PCLK_SPI4>;
- clock-names = "spiclk", "apb_pclk";
- dmas = <&dmac2 13>, <&dmac2 14>;
- dma-names = "tx", "rx";
- num-cs = <2>;
- pinctrl-0 = <&spi4m0_cs0 &spi4m0_cs1 &spi4m0_pins>;
- pinctrl-names = "default";
- #address-cells = <1>;
- #size-cells = <0>;
- status = "disabled";
- };
-
- otp: efuse@fecc0000 {
- compatible = "rockchip,rk3588-otp";
- reg = <0x0 0xfecc0000 0x0 0x400>;
- clocks = <&cru CLK_OTPC_NS>, <&cru PCLK_OTPC_NS>,
- <&cru CLK_OTP_PHY_G>, <&cru CLK_OTPC_ARB>;
- clock-names = "otp", "apb_pclk", "phy", "arb";
- resets = <&cru SRST_OTPC_NS>, <&cru SRST_P_OTPC_NS>,
- <&cru SRST_OTPC_ARB>;
- reset-names = "otp", "apb", "arb";
- #address-cells = <1>;
- #size-cells = <1>;
-
- cpu_code: cpu-code@2 {
- reg = <0x02 0x2>;
- };
-
- otp_id: id@7 {
- reg = <0x07 0x10>;
- };
-
- cpub0_leakage: cpu-leakage@17 {
- reg = <0x17 0x1>;
- };
-
- cpub1_leakage: cpu-leakage@18 {
- reg = <0x18 0x1>;
- };
-
- cpul_leakage: cpu-leakage@19 {
- reg = <0x19 0x1>;
- };
-
- log_leakage: log-leakage@1a {
- reg = <0x1a 0x1>;
- };
-
- gpu_leakage: gpu-leakage@1b {
- reg = <0x1b 0x1>;
- };
-
- otp_cpu_version: cpu-version@1c {
- reg = <0x1c 0x1>;
- bits = <3 3>;
- };
-
- npu_leakage: npu-leakage@28 {
- reg = <0x28 0x1>;
- };
-
- codec_leakage: codec-leakage@29 {
- reg = <0x29 0x1>;
- };
- };
-
- dmac2: dma-controller@fed10000 {
- compatible = "arm,pl330", "arm,primecell";
- reg = <0x0 0xfed10000 0x0 0x4000>;
- interrupts = <GIC_SPI 90 IRQ_TYPE_LEVEL_HIGH 0>,
- <GIC_SPI 91 IRQ_TYPE_LEVEL_HIGH 0>;
- arm,pl330-periph-burst;
- clocks = <&cru ACLK_DMAC2>;
- clock-names = "apb_pclk";
- #dma-cells = <1>;
- };
-
- hdptxphy_hdmi0: phy@fed60000 {
- compatible = "rockchip,rk3588-hdptx-phy";
- reg = <0x0 0xfed60000 0x0 0x2000>;
- clocks = <&cru CLK_USB2PHY_HDPTXRXPHY_REF>, <&cru PCLK_HDPTX0>;
- clock-names = "ref", "apb";
- #phy-cells = <0>;
- resets = <&cru SRST_HDPTX0>, <&cru SRST_P_HDPTX0>,
- <&cru SRST_HDPTX0_INIT>, <&cru SRST_HDPTX0_CMN>,
- <&cru SRST_HDPTX0_LANE>, <&cru SRST_HDPTX0_ROPLL>,
- <&cru SRST_HDPTX0_LCPLL>;
- reset-names = "phy", "apb", "init", "cmn", "lane", "ropll",
- "lcpll";
- rockchip,grf = <&hdptxphy0_grf>;
- status = "disabled";
- };
-
- usbdp_phy0: phy@fed80000 {
- compatible = "rockchip,rk3588-usbdp-phy";
- reg = <0x0 0xfed80000 0x0 0x10000>;
- #phy-cells = <1>;
- clocks = <&cru CLK_USBDPPHY_MIPIDCPPHY_REF>,
- <&cru CLK_USBDP_PHY0_IMMORTAL>,
- <&cru PCLK_USBDPPHY0>,
- <&u2phy0>;
- clock-names = "refclk", "immortal", "pclk", "utmi";
- resets = <&cru SRST_USBDP_COMBO_PHY0_INIT>,
- <&cru SRST_USBDP_COMBO_PHY0_CMN>,
- <&cru SRST_USBDP_COMBO_PHY0_LANE>,
- <&cru SRST_USBDP_COMBO_PHY0_PCS>,
- <&cru SRST_P_USBDPPHY0>;
- reset-names = "init", "cmn", "lane", "pcs_apb", "pma_apb";
- rockchip,u2phy-grf = <&usb2phy0_grf>;
- rockchip,usb-grf = <&usb_grf>;
- rockchip,usbdpphy-grf = <&usbdpphy0_grf>;
- rockchip,vo-grf = <&vo0_grf>;
- status = "disabled";
- };
-
- combphy0_ps: phy@fee00000 {
- compatible = "rockchip,rk3588-naneng-combphy";
- reg = <0x0 0xfee00000 0x0 0x100>;
- clocks = <&cru CLK_REF_PIPE_PHY0>, <&cru PCLK_PCIE_COMBO_PIPE_PHY0>,
- <&cru PCLK_PHP_ROOT>;
- clock-names = "ref", "apb", "pipe";
- assigned-clocks = <&cru CLK_REF_PIPE_PHY0>;
- assigned-clock-rates = <100000000>;
- #phy-cells = <1>;
- resets = <&cru SRST_REF_PIPE_PHY0>, <&cru SRST_P_PCIE2_PHY0>;
- reset-names = "phy", "apb";
- rockchip,pipe-grf = <&php_grf>;
- rockchip,pipe-phy-grf = <&pipe_phy0_grf>;
- status = "disabled";
- };
-
- combphy2_psu: phy@fee20000 {
- compatible = "rockchip,rk3588-naneng-combphy";
- reg = <0x0 0xfee20000 0x0 0x100>;
- clocks = <&cru CLK_REF_PIPE_PHY2>, <&cru PCLK_PCIE_COMBO_PIPE_PHY2>,
- <&cru PCLK_PHP_ROOT>;
- clock-names = "ref", "apb", "pipe";
- assigned-clocks = <&cru CLK_REF_PIPE_PHY2>;
- assigned-clock-rates = <100000000>;
- #phy-cells = <1>;
- resets = <&cru SRST_REF_PIPE_PHY2>, <&cru SRST_P_PCIE2_PHY2>;
- reset-names = "phy", "apb";
- rockchip,pipe-grf = <&php_grf>;
- rockchip,pipe-phy-grf = <&pipe_phy2_grf>;
- status = "disabled";
- };
-
- system_sram2: sram@ff001000 {
- compatible = "mmio-sram";
- reg = <0x0 0xff001000 0x0 0xef000>;
- ranges = <0x0 0x0 0xff001000 0xef000>;
- #address-cells = <1>;
- #size-cells = <1>;
- };
-
- pinctrl: pinctrl {
- compatible = "rockchip,rk3588-pinctrl";
- ranges;
- rockchip,grf = <&ioc>;
- #address-cells = <2>;
- #size-cells = <2>;
-
- gpio0: gpio@fd8a0000 {
- compatible = "rockchip,gpio-bank";
- reg = <0x0 0xfd8a0000 0x0 0x100>;
- interrupts = <GIC_SPI 277 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&cru PCLK_GPIO0>, <&cru DBCLK_GPIO0>;
- gpio-controller;
- gpio-ranges = <&pinctrl 0 0 32>;
- interrupt-controller;
- #gpio-cells = <2>;
- #interrupt-cells = <2>;
- };
-
- gpio1: gpio@fec20000 {
- compatible = "rockchip,gpio-bank";
- reg = <0x0 0xfec20000 0x0 0x100>;
- interrupts = <GIC_SPI 278 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&cru PCLK_GPIO1>, <&cru DBCLK_GPIO1>;
- gpio-controller;
- gpio-ranges = <&pinctrl 0 32 32>;
- interrupt-controller;
- #gpio-cells = <2>;
- #interrupt-cells = <2>;
- };
-
- gpio2: gpio@fec30000 {
- compatible = "rockchip,gpio-bank";
- reg = <0x0 0xfec30000 0x0 0x100>;
- interrupts = <GIC_SPI 279 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&cru PCLK_GPIO2>, <&cru DBCLK_GPIO2>;
- gpio-controller;
- gpio-ranges = <&pinctrl 0 64 32>;
- interrupt-controller;
- #gpio-cells = <2>;
- #interrupt-cells = <2>;
- };
-
- gpio3: gpio@fec40000 {
- compatible = "rockchip,gpio-bank";
- reg = <0x0 0xfec40000 0x0 0x100>;
- interrupts = <GIC_SPI 280 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&cru PCLK_GPIO3>, <&cru DBCLK_GPIO3>;
- gpio-controller;
- gpio-ranges = <&pinctrl 0 96 32>;
- interrupt-controller;
- #gpio-cells = <2>;
- #interrupt-cells = <2>;
- };
-
- gpio4: gpio@fec50000 {
- compatible = "rockchip,gpio-bank";
- reg = <0x0 0xfec50000 0x0 0x100>;
- interrupts = <GIC_SPI 281 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&cru PCLK_GPIO4>, <&cru DBCLK_GPIO4>;
- gpio-controller;
- gpio-ranges = <&pinctrl 0 128 32>;
- interrupt-controller;
- #gpio-cells = <2>;
- #interrupt-cells = <2>;
- };
- };
-};
-
-#include "rk3588s-pinctrl.dtsi"
+#include "rk3588-base.dtsi"
+#include "rk3588-opp.dtsi"
diff --git a/arch/arm64/boot/dts/sprd/ums512.dtsi b/arch/arm64/boot/dts/sprd/ums512.dtsi
index dbdb79f8e959..4c080df48724 100644
--- a/arch/arm64/boot/dts/sprd/ums512.dtsi
+++ b/arch/arm64/boot/dts/sprd/ums512.dtsi
@@ -136,16 +136,22 @@
<GIC_PPI 10 IRQ_TYPE_LEVEL_HIGH>; /* Hipervisor PPI */
};
- pmu {
- compatible = "arm,armv8-pmuv3";
+ pmu-a55 {
+ compatible = "arm,cortex-a55-pmu";
interrupts = <GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-affinity = <&CPU0>, <&CPU1>, <&CPU2>, <&CPU3>, <&CPU4>, <&CPU5>;
+ };
+
+ pmu-a75 {
+ compatible = "arm,cortex-a75-pmu";
+ interrupts = <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 119 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-affinity = <&CPU6>, <&CPU7>;
};
soc: soc {
diff --git a/arch/arm64/boot/dts/sprd/ums9620.dtsi b/arch/arm64/boot/dts/sprd/ums9620.dtsi
index 2191f0a4811b..2458071320c9 100644
--- a/arch/arm64/boot/dts/sprd/ums9620.dtsi
+++ b/arch/arm64/boot/dts/sprd/ums9620.dtsi
@@ -144,16 +144,22 @@
<GIC_PPI 10 IRQ_TYPE_LEVEL_HIGH>; /* Hipervisor PPI */
};
- pmu {
- compatible = "arm,armv8-pmuv3";
+ pmu-a55 {
+ compatible = "arm,cortex-a55-pmu";
interrupts = <GIC_SPI 177 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 178 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 179 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 180 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 181 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 180 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-affinity = <&CPU0>, <&CPU1>, <&CPU2>, <&CPU3>;
+ };
+
+ pmu-a76 {
+ compatible = "arm,cortex-a76-pmu";
+ interrupts = <GIC_SPI 181 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 182 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 183 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-affinity = <&CPU4>, <&CPU5>, <&CPU6>, <&CPU7>;
};
soc: soc {
diff --git a/arch/arm64/boot/dts/st/stm32mp25-pinctrl.dtsi b/arch/arm64/boot/dts/st/stm32mp25-pinctrl.dtsi
index 7a82896dcbf6..8fdd5f020425 100644
--- a/arch/arm64/boot/dts/st/stm32mp25-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/st/stm32mp25-pinctrl.dtsi
@@ -6,6 +6,65 @@
#include <dt-bindings/pinctrl/stm32-pinfunc.h>
&pinctrl {
+ eth2_rgmii_pins_a: eth2-rgmii-0 {
+ pins1 {
+ pinmux = <STM32_PINMUX('C', 7, AF10)>, /* ETH_RGMII_TXD0 */
+ <STM32_PINMUX('C', 8, AF10)>, /* ETH_RGMII_TXD1 */
+ <STM32_PINMUX('C', 9, AF10)>, /* ETH_RGMII_TXD2 */
+ <STM32_PINMUX('C', 10, AF10)>, /* ETH_RGMII_TXD3 */
+ <STM32_PINMUX('C', 4, AF10)>; /* ETH_RGMII_TX_CTL */
+ bias-disable;
+ drive-push-pull;
+ slew-rate = <3>;
+ };
+ pins2 {
+ pinmux = <STM32_PINMUX('F', 8, AF10)>, /* ETH_RGMII_CLK125 */
+ <STM32_PINMUX('F', 7, AF10)>, /* ETH_RGMII_GTX_CLK */
+ <STM32_PINMUX('C', 6, AF10)>; /* ETH_MDC */
+ bias-disable;
+ drive-push-pull;
+ slew-rate = <3>;
+ };
+ pins3 {
+ pinmux = <STM32_PINMUX('C', 5, AF10)>; /* ETH_MDIO */
+ bias-disable;
+ drive-push-pull;
+ slew-rate = <0>;
+ };
+ pins4 {
+ pinmux = <STM32_PINMUX('G', 0, AF10)>, /* ETH_RGMII_RXD0 */
+ <STM32_PINMUX('C', 12, AF10)>, /* ETH_RGMII_RXD1 */
+ <STM32_PINMUX('F', 9, AF10)>, /* ETH_RGMII_RXD2 */
+ <STM32_PINMUX('C', 11, AF10)>, /* ETH_RGMII_RXD3 */
+ <STM32_PINMUX('C', 3, AF10)>; /* ETH_RGMII_RX_CTL */
+ bias-disable;
+ };
+ pins5 {
+ pinmux = <STM32_PINMUX('F', 6, AF10)>; /* ETH_RGMII_RX_CLK */
+ bias-disable;
+ };
+ };
+
+ eth2_rgmii_sleep_pins_a: eth2-rgmii-sleep-0 {
+ pins {
+ pinmux = <STM32_PINMUX('C', 7, ANALOG)>, /* ETH_RGMII_TXD0 */
+ <STM32_PINMUX('C', 8, ANALOG)>, /* ETH_RGMII_TXD1 */
+ <STM32_PINMUX('C', 9, ANALOG)>, /* ETH_RGMII_TXD2 */
+ <STM32_PINMUX('C', 10, ANALOG)>, /* ETH_RGMII_TXD3 */
+ <STM32_PINMUX('C', 4, ANALOG)>, /* ETH_RGMII_TX_CTL */
+ <STM32_PINMUX('F', 8, ANALOG)>, /* ETH_RGMII_CLK125 */
+ <STM32_PINMUX('F', 7, ANALOG)>, /* ETH_RGMII_GTX_CLK */
+ <STM32_PINMUX('C', 6, ANALOG)>, /* ETH_MDC */
+ <STM32_PINMUX('C', 5, ANALOG)>, /* ETH_MDIO */
+ <STM32_PINMUX('G', 0, ANALOG)>, /* ETH_RGMII_RXD0 */
+ <STM32_PINMUX('C', 12, ANALOG)>, /* ETH_RGMII_RXD1 */
+ <STM32_PINMUX('F', 9, ANALOG)>, /* ETH_RGMII_RXD2 */
+ <STM32_PINMUX('C', 11, ANALOG)>, /* ETH_RGMII_RXD3 */
+ <STM32_PINMUX('C', 3, ANALOG)>, /* ETH_RGMII_RX_CTL */
+ <STM32_PINMUX('F', 6, ANALOG)>; /* ETH_RGMII_RX_CLK */
+ };
+ };
+
i2c2_pins_a: i2c2-0 {
pins {
pinmux = <STM32_PINMUX('B', 5, AF9)>, /* I2C2_SCL */
@@ -128,6 +187,47 @@
<STM32_PINMUX('A', 8, ANALOG)>; /* USART2_RX */
};
};
+
+ usart6_pins_a: usart6-0 {
+ pins1 {
+ pinmux = <STM32_PINMUX('F', 13, AF3)>, /* USART6_TX */
+ <STM32_PINMUX('G', 5, AF3)>; /* USART6_RTS */
+ bias-disable;
+ drive-push-pull;
+ slew-rate = <0>;
+ };
+ pins2 {
+ pinmux = <STM32_PINMUX('F', 14, AF3)>, /* USART6_RX */
+ <STM32_PINMUX('F', 15, AF3)>; /* USART6_CTS_NSS */
+ bias-pull-up;
+ };
+ };
+
+ usart6_idle_pins_a: usart6-idle-0 {
+ pins1 {
+ pinmux = <STM32_PINMUX('F', 13, ANALOG)>, /* USART6_TX */
+ <STM32_PINMUX('F', 15, ANALOG)>; /* USART6_CTS_NSS */
+ };
+ pins2 {
+ pinmux = <STM32_PINMUX('G', 5, AF3)>; /* USART6_RTS */
+ bias-disable;
+ drive-push-pull;
+ slew-rate = <0>;
+ };
+ pins3 {
+ pinmux = <STM32_PINMUX('F', 14, AF3)>; /* USART6_RX */
+ bias-pull-up;
+ };
+ };
+
+ usart6_sleep_pins_a: usart6-sleep-0 {
+ pins {
+ pinmux = <STM32_PINMUX('F', 13, ANALOG)>, /* USART6_TX */
+ <STM32_PINMUX('G', 5, ANALOG)>, /* USART6_RTS */
+ <STM32_PINMUX('F', 15, ANALOG)>, /* USART6_CTS_NSS */
+ <STM32_PINMUX('F', 14, ANALOG)>; /* USART6_RX */
+ };
+ };
};
&pinctrl_z {
diff --git a/arch/arm64/boot/dts/st/stm32mp251.dtsi b/arch/arm64/boot/dts/st/stm32mp251.dtsi
index dcd0656d67a8..1167cf63d7e8 100644
--- a/arch/arm64/boot/dts/st/stm32mp251.dtsi
+++ b/arch/arm64/boot/dts/st/stm32mp251.dtsi
@@ -6,6 +6,7 @@
#include <dt-bindings/clock/st,stm32mp25-rcc.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/reset/st,stm32mp25-rcc.h>
+#include <dt-bindings/regulator/st,stm32mp25-regulator.h>
/ {
#address-cells = <2>;
@@ -20,6 +21,8 @@
device_type = "cpu";
reg = <0>;
enable-method = "psci";
+ power-domains = <&CPU_PD0>;
+ power-domain-names = "psci";
};
};
@@ -51,9 +54,11 @@
};
firmware {
- optee {
+ optee: optee {
compatible = "linaro,optee-tz";
method = "smc";
+ interrupt-parent = <&intc>;
+ interrupts = <GIC_PPI 15 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_LOW)>;
};
scmi {
@@ -71,6 +76,40 @@
reg = <0x16>;
#reset-cells = <1>;
};
+
+ scmi_voltd: protocol@17 {
+ reg = <0x17>;
+
+ scmi_regu: regulators {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ scmi_vddio1: regulator@0 {
+ reg = <VOLTD_SCMI_VDDIO1>;
+ regulator-name = "vddio1";
+ };
+ scmi_vddio2: regulator@1 {
+ reg = <VOLTD_SCMI_VDDIO2>;
+ regulator-name = "vddio2";
+ };
+ scmi_vddio3: regulator@2 {
+ reg = <VOLTD_SCMI_VDDIO3>;
+ regulator-name = "vddio3";
+ };
+ scmi_vddio4: regulator@3 {
+ reg = <VOLTD_SCMI_VDDIO4>;
+ regulator-name = "vddio4";
+ };
+ scmi_vdd33ucpd: regulator@5 {
+ reg = <VOLTD_SCMI_UCPD>;
+ regulator-name = "vdd33ucpd";
+ };
+ scmi_vdda18adc: regulator@7 {
+ reg = <VOLTD_SCMI_ADC>;
+ regulator-name = "vdda18adc";
+ };
+ };
+ };
};
};
@@ -88,6 +127,20 @@
psci {
compatible = "arm,psci-1.0";
method = "smc";
+
+ CPU_PD0: power-domain-cpu0 {
+ #power-domain-cells = <0>;
+ power-domains = <&CLUSTER_PD>;
+ };
+
+ CLUSTER_PD: power-domain-cluster {
+ #power-domain-cells = <0>;
+ power-domains = <&RET_PD>;
+ };
+
+ RET_PD: power-domain-retention {
+ #power-domain-cells = <0>;
+ };
};
timer {
@@ -107,6 +160,75 @@
interrupt-parent = <&intc>;
ranges = <0x0 0x0 0x0 0x80000000>;
+ hpdma: dma-controller@40400000 {
+ compatible = "st,stm32mp25-dma3";
+ reg = <0x40400000 0x1000>;
+ interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 43 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 46 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 47 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk CK_SCMI_HPDMA1>;
+ #dma-cells = <3>;
+ };
+
+ hpdma2: dma-controller@40410000 {
+ compatible = "st,stm32mp25-dma3";
+ reg = <0x40410000 0x1000>;
+ interrupts = <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 50 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 51 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 57 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 59 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 60 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 61 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 63 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 64 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk CK_SCMI_HPDMA2>;
+ #dma-cells = <3>;
+ };
+
+ hpdma3: dma-controller@40420000 {
+ compatible = "st,stm32mp25-dma3";
+ reg = <0x40420000 0x1000>;
+ interrupts = <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 66 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 70 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 78 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 79 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 80 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk CK_SCMI_HPDMA3>;
+ #dma-cells = <3>;
+ };
+
rifsc: bus@42080000 {
compatible = "st,stm32mp25-rifsc", "simple-bus";
reg = <0x42080000 0x1000>;
@@ -148,6 +270,33 @@
status = "disabled";
};
+ usart3: serial@400f0000 {
+ compatible = "st,stm32h7-uart";
+ reg = <0x400f0000 0x400>;
+ interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc CK_KER_USART3>;
+ access-controllers = <&rifsc 33>;
+ status = "disabled";
+ };
+
+ uart4: serial@40100000 {
+ compatible = "st,stm32h7-uart";
+ reg = <0x40100000 0x400>;
+ interrupts = <GIC_SPI 126 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc CK_KER_UART4>;
+ access-controllers = <&rifsc 34>;
+ status = "disabled";
+ };
+
+ uart5: serial@40110000 {
+ compatible = "st,stm32h7-uart";
+ reg = <0x40110000 0x400>;
+ interrupts = <GIC_SPI 127 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc CK_KER_UART5>;
+ access-controllers = <&rifsc 35>;
+ status = "disabled";
+ };
+
i2c1: i2c@40120000 {
compatible = "st,stm32mp25-i2c";
reg = <0x40120000 0x400>;
@@ -239,6 +388,15 @@
status = "disabled";
};
+ usart6: serial@40220000 {
+ compatible = "st,stm32h7-uart";
+ reg = <0x40220000 0x400>;
+ interrupts = <GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc CK_KER_USART6>;
+ access-controllers = <&rifsc 36>;
+ status = "disabled";
+ };
+
spi1: spi@40230000 {
#address-cells = <1>;
#size-cells = <0>;
@@ -275,6 +433,24 @@
status = "disabled";
};
+ uart9: serial@402c0000 {
+ compatible = "st,stm32h7-uart";
+ reg = <0x402c0000 0x400>;
+ interrupts = <GIC_SPI 150 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc CK_KER_UART9>;
+ access-controllers = <&rifsc 39>;
+ status = "disabled";
+ };
+
+ usart1: serial@40330000 {
+ compatible = "st,stm32h7-uart";
+ reg = <0x40330000 0x400>;
+ interrupts = <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc CK_KER_USART1>;
+ access-controllers = <&rifsc 31>;
+ status = "disabled";
+ };
+
spi6: spi@40350000 {
#address-cells = <1>;
#size-cells = <0>;
@@ -299,6 +475,24 @@
status = "disabled";
};
+ uart7: serial@40370000 {
+ compatible = "st,stm32h7-uart";
+ reg = <0x40370000 0x400>;
+ interrupts = <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc CK_KER_UART7>;
+ access-controllers = <&rifsc 37>;
+ status = "disabled";
+ };
+
+ uart8: serial@40380000 {
+ compatible = "st,stm32h7-uart";
+ reg = <0x40380000 0x400>;
+ interrupts = <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc CK_KER_UART8>;
+ access-controllers = <&rifsc 38>;
+ status = "disabled";
+ };
+
spi8: spi@46020000 {
#address-cells = <1>;
#size-cells = <0>;
@@ -338,6 +532,55 @@
access-controllers = <&rifsc 76>;
status = "disabled";
};
+
+ ethernet1: ethernet@482c0000 {
+ compatible = "st,stm32mp25-dwmac", "snps,dwmac-5.20";
+ reg = <0x482c0000 0x4000>;
+ reg-names = "stmmaceth";
+ interrupts-extended = <&intc GIC_SPI 130 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "macirq";
+ clock-names = "stmmaceth",
+ "mac-clk-tx",
+ "mac-clk-rx",
+ "ptp_ref",
+ "ethstp",
+ "eth-ck";
+ clocks = <&rcc CK_ETH1_MAC>,
+ <&rcc CK_ETH1_TX>,
+ <&rcc CK_ETH1_RX>,
+ <&rcc CK_KER_ETH1PTP>,
+ <&rcc CK_ETH1_STP>,
+ <&rcc CK_KER_ETH1>;
+ snps,axi-config = <&stmmac_axi_config_1>;
+ snps,mixed-burst;
+ snps,mtl-rx-config = <&mtl_rx_setup_1>;
+ snps,mtl-tx-config = <&mtl_tx_setup_1>;
+ snps,pbl = <2>;
+ snps,tso;
+ st,syscon = <&syscfg 0x3000>;
+ access-controllers = <&rifsc 60>;
+ status = "disabled";
+
+ mtl_rx_setup_1: rx-queues-config {
+ snps,rx-queues-to-use = <2>;
+ queue0 {};
+ queue1 {};
+ };
+
+ mtl_tx_setup_1: tx-queues-config {
+ snps,tx-queues-to-use = <4>;
+ queue0 {};
+ queue1 {};
+ queue2 {};
+ queue3 {};
+ };
+
+ stmmac_axi_config_1: stmmac-axi-config {
+ snps,blen = <0 0 0 0 16 8 4>;
+ snps,rd_osr_lmt = <0x7>;
+ snps,wr_osr_lmt = <0x7>;
+ };
+ };
};
bsec: efuse@44000000 {
@@ -441,6 +684,7 @@
<&scmi_clk CK_SCMI_TIMG2>,
<&scmi_clk CK_SCMI_PLL3>,
<&clk_dsi_txbyte>;
+ access-controllers = <&rifsc 156>;
};
exti1: interrupt-controller@44220000 {
diff --git a/arch/arm64/boot/dts/st/stm32mp253.dtsi b/arch/arm64/boot/dts/st/stm32mp253.dtsi
index 029f88981961..eeceb086252b 100644
--- a/arch/arm64/boot/dts/st/stm32mp253.dtsi
+++ b/arch/arm64/boot/dts/st/stm32mp253.dtsi
@@ -12,6 +12,8 @@
device_type = "cpu";
reg = <1>;
enable-method = "psci";
+ power-domains = <&CPU_PD1>;
+ power-domain-names = "psci";
};
};
@@ -21,6 +23,13 @@
interrupt-affinity = <&cpu0>, <&cpu1>;
};
+ psci {
+ CPU_PD1: power-domain-cpu1 {
+ #power-domain-cells = <0>;
+ power-domains = <&CLUSTER_PD>;
+ };
+ };
+
timer {
interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
<GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
@@ -28,3 +37,58 @@
<GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>;
};
};
+
+&optee {
+ interrupts = <GIC_PPI 15 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>;
+};
+
+&rifsc {
+ ethernet2: ethernet@482d0000 {
+ compatible = "st,stm32mp25-dwmac", "snps,dwmac-5.20";
+ reg = <0x482d0000 0x4000>;
+ reg-names = "stmmaceth";
+ interrupts-extended = <&intc GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "macirq";
+ clock-names = "stmmaceth",
+ "mac-clk-tx",
+ "mac-clk-rx",
+ "ptp_ref",
+ "ethstp",
+ "eth-ck";
+ clocks = <&rcc CK_ETH2_MAC>,
+ <&rcc CK_ETH2_TX>,
+ <&rcc CK_ETH2_RX>,
+ <&rcc CK_KER_ETH2PTP>,
+ <&rcc CK_ETH2_STP>,
+ <&rcc CK_KER_ETH2>;
+ snps,axi-config = <&stmmac_axi_config_2>;
+ snps,mixed-burst;
+ snps,mtl-rx-config = <&mtl_rx_setup_2>;
+ snps,mtl-tx-config = <&mtl_tx_setup_2>;
+ snps,pbl = <2>;
+ snps,tso;
+ st,syscon = <&syscfg 0x3400>;
+ access-controllers = <&rifsc 61>;
+ status = "disabled";
+
+ mtl_rx_setup_2: rx-queues-config {
+ snps,rx-queues-to-use = <2>;
+ queue0 {};
+ queue1 {};
+ };
+
+ mtl_tx_setup_2: tx-queues-config {
+ snps,tx-queues-to-use = <4>;
+ queue0 {};
+ queue1 {};
+ queue2 {};
+ queue3 {};
+ };
+
+ stmmac_axi_config_2: stmmac-axi-config {
+ snps,blen = <0 0 0 0 16 8 4>;
+ snps,rd_osr_lmt = <0x7>;
+ snps,wr_osr_lmt = <0x7>;
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/st/stm32mp257f-ev1.dts b/arch/arm64/boot/dts/st/stm32mp257f-ev1.dts
index 27b7360e5dba..214191a8322b 100644
--- a/arch/arm64/boot/dts/st/stm32mp257f-ev1.dts
+++ b/arch/arm64/boot/dts/st/stm32mp257f-ev1.dts
@@ -7,6 +7,7 @@
/dts-v1/;
#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/regulator/st,stm32mp25-regulator.h>
#include "stm32mp257.dtsi"
#include "stm32mp25xf.dtsi"
#include "stm32mp25-pinctrl.dtsi"
@@ -17,7 +18,9 @@
compatible = "st,stm32mp257f-ev1", "st,stm32mp257";
aliases {
+ ethernet0 = &ethernet2;
serial0 = &usart2;
+ serial1 = &usart6;
};
chosen {
@@ -40,14 +43,6 @@
no-map;
};
};
-
- vdd_sdcard: vdd-sdcard {
- compatible = "regulator-fixed";
- regulator-name = "vdd_sdcard";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- regulator-always-on;
- };
};
&arm_wdt {
@@ -55,6 +50,29 @@
status = "okay";
};
+&ethernet2 {
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&eth2_rgmii_pins_a>;
+ pinctrl-1 = <&eth2_rgmii_sleep_pins_a>;
+ max-speed = <1000>;
+ phy-handle = <&phy0_eth2>;
+ phy-mode = "rgmii-id";
+ status = "okay";
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "snps,dwmac-mdio";
+ phy0_eth2: ethernet-phy@1 {
+ compatible = "ethernet-phy-id001c.c916";
+ reg = <1>;
+ reset-assert-us = <10000>;
+ reset-deassert-us = <300>;
+ reset-gpios = <&gpiog 6 GPIO_ACTIVE_LOW>;
+ };
+ };
+};
+
&i2c2 {
pinctrl-names = "default", "sleep";
pinctrl-0 = <&i2c2_pins_a>;
@@ -75,6 +93,37 @@
status = "disabled";
};
+&scmi_regu {
+ scmi_vddio1: regulator@0 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ };
+ scmi_vddcore: regulator@11 {
+ reg = <VOLTD_SCMI_STPMIC2_BUCK2>;
+ regulator-name = "vddcore";
+ };
+ scmi_v1v8: regulator@14 {
+ reg = <VOLTD_SCMI_STPMIC2_BUCK5>;
+ regulator-name = "v1v8";
+ };
+ scmi_v3v3: regulator@16 {
+ reg = <VOLTD_SCMI_STPMIC2_BUCK7>;
+ regulator-name = "v3v3";
+ };
+ scmi_vdd_emmc: regulator@18 {
+ reg = <VOLTD_SCMI_STPMIC2_LDO2>;
+ regulator-name = "vdd_emmc";
+ };
+ scmi_vdd3v3_usb: regulator@20 {
+ reg = <VOLTD_SCMI_STPMIC2_LDO4>;
+ regulator-name = "vdd3v3_usb";
+ };
+ scmi_vdd_sdcard: regulator@23 {
+ reg = <VOLTD_SCMI_STPMIC2_LDO7>;
+ regulator-name = "vdd_sdcard";
+ };
+};
+
&sdmmc1 {
pinctrl-names = "default", "opendrain", "sleep";
pinctrl-0 = <&sdmmc1_b4_pins_a>;
@@ -84,7 +133,8 @@
disable-wp;
st,neg-edge;
bus-width = <4>;
- vmmc-supply = <&vdd_sdcard>;
+ vmmc-supply = <&scmi_vdd_sdcard>;
+ vqmmc-supply = <&scmi_vddio1>;
status = "okay";
};
@@ -109,3 +159,12 @@
pinctrl-2 = <&usart2_sleep_pins_a>;
status = "okay";
};
+
+&usart6 {
+ pinctrl-names = "default", "idle", "sleep";
+ pinctrl-0 = <&usart6_pins_a>;
+ pinctrl-1 = <&usart6_idle_pins_a>;
+ pinctrl-2 = <&usart6_sleep_pins_a>;
+ uart-has-rtscts;
+ status = "disabled";
+};
diff --git a/arch/arm64/boot/dts/ti/Makefile b/arch/arm64/boot/dts/ti/Makefile
index 2c327cc320cf..e20b27ddf901 100644
--- a/arch/arm64/boot/dts/ti/Makefile
+++ b/arch/arm64/boot/dts/ti/Makefile
@@ -22,11 +22,14 @@ dtb-$(CONFIG_ARCH_K3) += k3-am625-verdin-wifi-dahlia.dtb
dtb-$(CONFIG_ARCH_K3) += k3-am625-verdin-wifi-dev.dtb
dtb-$(CONFIG_ARCH_K3) += k3-am625-verdin-wifi-mallow.dtb
dtb-$(CONFIG_ARCH_K3) += k3-am625-verdin-wifi-yavia.dtb
+dtb-$(CONFIG_ARCH_K3) += k3-am625-phyboard-lyra-1-4-ghz-opp.dtbo
dtb-$(CONFIG_ARCH_K3) += k3-am62x-phyboard-lyra-gpio-fan.dtbo
dtb-$(CONFIG_ARCH_K3) += k3-am62-lp-sk.dtb
+dtb-$(CONFIG_ARCH_K3) += k3-am62-lp-sk-nand.dtbo
# Boards with AM62Ax SoC
dtb-$(CONFIG_ARCH_K3) += k3-am62a7-sk.dtb
+dtb-$(CONFIG_ARCH_K3) += k3-am62a7-phyboard-lyra-rdk.dtb
# Boards with AM62Px SoC
dtb-$(CONFIG_ARCH_K3) += k3-am62p5-sk.dtb
@@ -44,16 +47,26 @@ k3-am642-hummingboard-t-usb3-dtbs := \
k3-am642-hummingboard-t.dtb k3-am642-hummingboard-t-usb3.dtbo
dtb-$(CONFIG_ARCH_K3) += k3-am642-evm.dtb
dtb-$(CONFIG_ARCH_K3) += k3-am642-evm-icssg1-dualemac.dtbo
+dtb-$(CONFIG_ARCH_K3) += k3-am642-evm-icssg1-dualemac-mii.dtbo
dtb-$(CONFIG_ARCH_K3) += k3-am642-hummingboard-t.dtb
dtb-$(CONFIG_ARCH_K3) += k3-am642-hummingboard-t-pcie.dtb
dtb-$(CONFIG_ARCH_K3) += k3-am642-hummingboard-t-usb3.dtb
+k3-am642-evm-nand-dtbs := k3-am642-evm.dtb k3-am642-evm-nand.dtbo
+dtb-$(CONFIG_ARCH_K3) += k3-am642-evm-nand.dtb
dtb-$(CONFIG_ARCH_K3) += k3-am642-phyboard-electra-rdk.dtb
dtb-$(CONFIG_ARCH_K3) += k3-am642-phyboard-electra-gpio-fan.dtbo
+dtb-$(CONFIG_ARCH_K3) += k3-am642-phyboard-electra-pcie-usb2.dtbo
dtb-$(CONFIG_ARCH_K3) += k3-am642-sk.dtb
dtb-$(CONFIG_ARCH_K3) += k3-am642-tqma64xxl-mbax4xxl.dtb
dtb-$(CONFIG_ARCH_K3) += k3-am64-tqma64xxl-mbax4xxl-sdcard.dtbo
dtb-$(CONFIG_ARCH_K3) += k3-am64-tqma64xxl-mbax4xxl-wlan.dtbo
+# Common overlays for the phyCORE-AM6* family of boards
+dtb-$(CONFIG_ARCH_K3) += k3-am6xx-phycore-disable-eth-phy.dtbo
+dtb-$(CONFIG_ARCH_K3) += k3-am6xx-phycore-disable-rtc.dtbo
+dtb-$(CONFIG_ARCH_K3) += k3-am6xx-phycore-disable-spi-nor.dtbo
+dtb-$(CONFIG_ARCH_K3) += k3-am6xx-phycore-qspi-nor.dtbo
+
# Boards with AM65x SoC
k3-am654-gp-evm-dtbs := k3-am654-base-board.dtb \
k3-am654-base-board-rocktech-rk101-panel.dtbo \
@@ -81,6 +94,7 @@ dtb-$(CONFIG_ARCH_K3) += k3-j7200-evm.dtb
# Boards with J721e SoC
k3-j721e-evm-dtbs := k3-j721e-common-proc-board.dtb k3-j721e-evm-quad-port-eth-exp.dtbo
dtb-$(CONFIG_ARCH_K3) += k3-j721e-beagleboneai64.dtb
+dtb-$(CONFIG_ARCH_K3) += k3-j721e-common-proc-board-infotainment.dtbo
dtb-$(CONFIG_ARCH_K3) += k3-j721e-evm.dtb
dtb-$(CONFIG_ARCH_K3) += k3-j721e-evm-gesi-exp-board.dtbo
dtb-$(CONFIG_ARCH_K3) += k3-j721e-evm-pcie0-ep.dtbo
@@ -101,14 +115,27 @@ dtb-$(CONFIG_ARCH_K3) += k3-j722s-evm.dtb
# Boards with J784s4 SoC
dtb-$(CONFIG_ARCH_K3) += k3-am69-sk.dtb
dtb-$(CONFIG_ARCH_K3) += k3-j784s4-evm.dtb
+dtb-$(CONFIG_ARCH_K3) += k3-j784s4-evm-pcie0-pcie1-ep.dtbo
+dtb-$(CONFIG_ARCH_K3) += k3-j784s4-evm-quad-port-eth-exp1.dtbo
+dtb-$(CONFIG_ARCH_K3) += k3-j784s4-evm-usxgmii-exp1-exp2.dtbo
# Build time test only, enabled by CONFIG_OF_ALL_DTBS
k3-am625-beagleplay-csi2-ov5640-dtbs := k3-am625-beagleplay.dtb \
k3-am625-beagleplay-csi2-ov5640.dtbo
k3-am625-beagleplay-csi2-tevi-ov5640-dtbs := k3-am625-beagleplay.dtb \
k3-am625-beagleplay-csi2-tevi-ov5640.dtbo
+k3-am625-phyboard-lyra-1-4-ghz-opp.dtbs := k3-am625-phyboard-lyra-rdk.dtb \
+ k3-am625-phyboard-lyra-1-4-ghz-opp.dtbo
+k3-am625-phyboard-lyra-disable-eth-phy-dtbs := k3-am625-phyboard-lyra-rdk.dtb \
+ k3-am6xx-phycore-disable-eth-phy.dtbo
+k3-am625-phyboard-lyra-disable-rtc-dtbs := k3-am625-phyboard-lyra-rdk.dtb \
+ k3-am6xx-phycore-disable-rtc.dtbo
+k3-am625-phyboard-lyra-disable-spi-nor-dtbs := k3-am625-phyboard-lyra-rdk.dtb \
+ k3-am6xx-phycore-disable-spi-nor.dtbo
k3-am625-phyboard-lyra-gpio-fan-dtbs := k3-am625-phyboard-lyra-rdk.dtb \
k3-am62x-phyboard-lyra-gpio-fan.dtbo
+k3-am625-phyboard-lyra-qspi-nor-dtbs := k3-am625-phyboard-lyra-rdk.dtb \
+ k3-am6xx-phycore-qspi-nor.dtbo
k3-am625-sk-csi2-imx219-dtbs := k3-am625-sk.dtb \
k3-am62x-sk-csi2-imx219.dtbo
k3-am625-sk-csi2-ov5640-dtbs := k3-am625-sk.dtb \
@@ -132,8 +159,20 @@ k3-am62p5-sk-csi2-tevi-ov5640-dtbs := k3-am62p5-sk.dtb \
k3-am62x-sk-csi2-tevi-ov5640.dtbo
k3-am642-evm-icssg1-dualemac-dtbs := \
k3-am642-evm.dtb k3-am642-evm-icssg1-dualemac.dtbo
+k3-am642-evm-icssg1-dualemac-mii-dtbs := \
+ k3-am642-evm.dtb k3-am642-evm-icssg1-dualemac-mii.dtbo
+k3-am642-phyboard-electra-disable-eth-phy-dtbs := \
+ k3-am642-phyboard-electra-rdk.dtb k3-am6xx-phycore-disable-eth-phy.dtbo
+k3-am642-phyboard-electra-disable-rtc-dtbs := \
+ k3-am642-phyboard-electra-rdk.dtb k3-am6xx-phycore-disable-rtc.dtbo
+k3-am642-phyboard-electra-disable-spi-nor-dtbs := \
+ k3-am642-phyboard-electra-rdk.dtb k3-am6xx-phycore-disable-spi-nor.dtbo
+k3-am642-phyboard-electra-qspi-nor-dtbs := \
+ k3-am642-phyboard-electra-rdk.dtb k3-am6xx-phycore-qspi-nor.dtbo
k3-am642-phyboard-electra-gpio-fan-dtbs := \
k3-am642-phyboard-electra-rdk.dtb k3-am642-phyboard-electra-gpio-fan.dtbo
+k3-am642-phyboard-electra-pcie-usb2-dtbs := \
+ k3-am642-phyboard-electra-rdk.dtb k3-am642-phyboard-electra-pcie-usb2.dtbo
k3-am642-tqma64xxl-mbax4xxl-sdcard-dtbs := \
k3-am642-tqma64xxl-mbax4xxl.dtb k3-am64-tqma64xxl-mbax4xxl-sdcard.dtbo
k3-am642-tqma64xxl-mbax4xxl-wlan-dtbs := \
@@ -142,12 +181,20 @@ k3-am68-sk-base-board-csi2-dual-imx219-dtbs := k3-am68-sk-base-board.dtb \
k3-j721e-sk-csi2-dual-imx219.dtbo
k3-am69-sk-csi2-dual-imx219-dtbs := k3-am69-sk.dtb \
k3-j721e-sk-csi2-dual-imx219.dtbo
+k3-j721e-common-proc-board-infotainment-dtbs := k3-j721e-common-proc-board.dtb \
+ k3-j721e-common-proc-board-infotainment.dtbo
k3-j721e-evm-pcie0-ep-dtbs := k3-j721e-common-proc-board.dtb \
k3-j721e-evm-pcie0-ep.dtbo
k3-j721e-sk-csi2-dual-imx219-dtbs := k3-j721e-sk.dtb \
k3-j721e-sk-csi2-dual-imx219.dtbo
k3-j721s2-evm-pcie1-ep-dtbs := k3-j721s2-common-proc-board.dtb \
k3-j721s2-evm-pcie1-ep.dtbo
+k3-j784s4-evm-pcie0-pcie1-ep-dtbs := k3-j784s4-evm.dtb \
+ k3-j784s4-evm-pcie0-pcie1-ep.dtbo
+k3-j784s4-evm-quad-port-eth-exp1-dtbs := k3-j784s4-evm.dtb \
+ k3-j784s4-evm-quad-port-eth-exp1.dtbo
+k3-j784s4-evm-usxgmii-exp1-exp2-dtbs := k3-j784s4-evm.dtb \
+ k3-j784s4-evm-usxgmii-exp1-exp2.dtbo
dtb- += k3-am625-beagleplay-csi2-ov5640.dtb \
k3-am625-beagleplay-csi2-tevi-ov5640.dtb \
k3-am625-sk-csi2-imx219.dtb \
@@ -162,17 +209,23 @@ dtb- += k3-am625-beagleplay-csi2-ov5640.dtb \
k3-am62p5-sk-csi2-ov5640.dtb \
k3-am62p5-sk-csi2-tevi-ov5640.dtb \
k3-am642-evm-icssg1-dualemac.dtb \
+ k3-am642-evm-icssg1-dualemac-mii.dtb \
k3-am642-tqma64xxl-mbax4xxl-sdcard.dtb \
k3-am642-tqma64xxl-mbax4xxl-wlan.dtb \
k3-am68-sk-base-board-csi2-dual-imx219.dtb \
k3-am69-sk-csi2-dual-imx219.dtb \
+ k3-j721e-common-proc-board-infotainment.dtb \
k3-j721e-evm-pcie0-ep.dtb \
k3-j721e-sk-csi2-dual-imx219.dtb \
- k3-j721s2-evm-pcie1-ep.dtb
+ k3-j721s2-evm-pcie1-ep.dtb \
+ k3-j784s4-evm-pcie0-pcie1-ep.dtb \
+ k3-j784s4-evm-quad-port-eth-exp1.dtb \
+ k3-j784s4-evm-usxgmii-exp1-exp2.dtb
# Enable support for device-tree overlays
DTC_FLAGS_k3-am625-beagleplay += -@
DTC_FLAGS_k3-am625-phyboard-lyra-rdk += -@
+DTC_FLAGS_k3-am62a7-phyboard-lyra-rdk += -@
DTC_FLAGS_k3-am625-sk += -@
DTC_FLAGS_k3-am62-lp-sk += -@
DTC_FLAGS_k3-am62a7-sk += -@
@@ -186,3 +239,4 @@ DTC_FLAGS_k3-am69-sk += -@
DTC_FLAGS_k3-j721e-common-proc-board += -@
DTC_FLAGS_k3-j721e-sk += -@
DTC_FLAGS_k3-j721s2-common-proc-board += -@
+DTC_FLAGS_k3-j784s4-evm += -@
diff --git a/arch/arm64/boot/dts/ti/k3-am62-lp-sk-nand.dtso b/arch/arm64/boot/dts/ti/k3-am62-lp-sk-nand.dtso
new file mode 100644
index 000000000000..173ac60723b6
--- /dev/null
+++ b/arch/arm64/boot/dts/ti/k3-am62-lp-sk-nand.dtso
@@ -0,0 +1,116 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022-2024 Texas Instruments Incorporated - https://www.ti.com/
+ */
+
+/dts-v1/;
+/plugin/;
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+#include "k3-pinctrl.h"
+
+&mcasp1 {
+ status = "disabled";
+};
+
+&main_pmx0 {
+ gpmc0_pins_default: gpmc0-pins-default {
+ pinctrl-single,pins = <
+ AM62X_IOPAD(0x003c, PIN_INPUT, 0) /* (K19) GPMC0_AD0 */
+ AM62X_IOPAD(0x0040, PIN_INPUT, 0) /* (L19) GPMC0_AD1 */
+ AM62X_IOPAD(0x0044, PIN_INPUT, 0) /* (L20) GPMC0_AD2 */
+ AM62X_IOPAD(0x0048, PIN_INPUT, 0) /* (L21) GPMC0_AD3 */
+ AM62X_IOPAD(0x004c, PIN_INPUT, 0) /* (M21) GPMC0_AD4 */
+ AM62X_IOPAD(0x0050, PIN_INPUT, 0) /* (L17) GPMC0_AD5 */
+ AM62X_IOPAD(0x0054, PIN_INPUT, 0) /* (L18) GPMC0_AD6 */
+ AM62X_IOPAD(0x0058, PIN_INPUT, 0) /* (M20) GPMC0_AD7 */
+ AM62X_IOPAD(0x0098, PIN_INPUT, 0) /* (P21) GPMC0_WAIT0 */
+ AM62X_IOPAD(0x00a8, PIN_OUTPUT, 0) /* (J18) GPMC0_CSn0 */
+ AM62X_IOPAD(0x0084, PIN_OUTPUT, 0) /* (K20) GPMC0_ADVn_ALE */
+ AM62X_IOPAD(0x0088, PIN_OUTPUT, 0) /* (K21) GPMC0_OEn_REn */
+ AM62X_IOPAD(0x008c, PIN_OUTPUT, 0) /* (J17) GPMC0_WEn */
+ AM62X_IOPAD(0x0090, PIN_OUTPUT, 0) /* (K17) GPMC0_BE0n_CLE */
+ AM62X_IOPAD(0x00a0, PIN_OUTPUT, 0) /* (J20) GPMC0_WPn */
+ >;
+ };
+};
+
+&elm0 {
+ status = "okay";
+};
+
+&gpmc0 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&gpmc0_pins_default>;
+ #address-cells = <2>;
+ #size-cells = <1>;
+
+ nand@0,0 {
+ compatible = "ti,am64-nand";
+ reg = <0 0 64>; /* device IO registers */
+ interrupt-parent = <&gpmc0>;
+ interrupts = <0 IRQ_TYPE_NONE>, /* fifoevent */
+ <1 IRQ_TYPE_NONE>; /* termcount */
+ rb-gpios = <&gpmc0 0 GPIO_ACTIVE_HIGH>; /* gpmc_wait0 */
+ ti,nand-xfer-type = "prefetch-polled";
+ ti,nand-ecc-opt = "bch8"; /* BCH8: Bootrom limitation */
+ ti,elm-id = <&elm0>;
+ nand-bus-width = <8>;
+ gpmc,device-width = <1>;
+ gpmc,sync-clk-ps = <0>;
+ gpmc,cs-on-ns = <0>;
+ gpmc,cs-rd-off-ns = <40>;
+ gpmc,cs-wr-off-ns = <40>;
+ gpmc,adv-on-ns = <0>;
+ gpmc,adv-rd-off-ns = <25>;
+ gpmc,adv-wr-off-ns = <25>;
+ gpmc,we-on-ns = <0>;
+ gpmc,we-off-ns = <20>;
+ gpmc,oe-on-ns = <3>;
+ gpmc,oe-off-ns = <30>;
+ gpmc,access-ns = <30>;
+ gpmc,rd-cycle-ns = <40>;
+ gpmc,wr-cycle-ns = <40>;
+ gpmc,bus-turnaround-ns = <0>;
+ gpmc,cycle2cycle-delay-ns = <0>;
+ gpmc,clk-activation-ns = <0>;
+ gpmc,wr-access-ns = <40>;
+ gpmc,wr-data-mux-bus-ns = <0>;
+
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ partition@0 {
+ label = "NAND.tiboot3";
+ reg = <0x00000000 0x00200000>; /* 2M */
+ };
+ partition@200000 {
+ label = "NAND.tispl";
+ reg = <0x00200000 0x00200000>; /* 2M */
+ };
+ partition@400000 {
+ label = "NAND.tiboot3.backup"; /* 2M */
+ reg = <0x00400000 0x00200000>; /* BootROM looks at 4M */
+ };
+ partition@600000 {
+ label = "NAND.u-boot";
+ reg = <0x00600000 0x00400000>; /* 4M */
+ };
+ partition@a00000 {
+ label = "NAND.u-boot-env";
+ reg = <0x00a00000 0x00040000>; /* 256K */
+ };
+ partition@a40000 {
+ label = "NAND.u-boot-env.backup";
+ reg = <0x00a40000 0x00040000>; /* 256K */
+ };
+ partition@a80000 {
+ label = "NAND.file-system";
+ reg = <0x00a80000 0x3f580000>;
+ };
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/ti/k3-am62-lp-sk.dts b/arch/arm64/boot/dts/ti/k3-am62-lp-sk.dts
index 9a17bd3e59c9..8e9fc00a6b3c 100644
--- a/arch/arm64/boot/dts/ti/k3-am62-lp-sk.dts
+++ b/arch/arm64/boot/dts/ti/k3-am62-lp-sk.dts
@@ -228,3 +228,7 @@
&tlv320aic3106 {
DVDD-supply = <&buck2_reg>;
};
+
+&gpmc0 {
+ ranges = <0 0 0x00 0x51000000 0x01000000>; /* CS0 space. Min partition = 16MB */
+};
diff --git a/arch/arm64/boot/dts/ti/k3-am62-main.dtsi b/arch/arm64/boot/dts/ti/k3-am62-main.dtsi
index 448a59dc53a7..328929c740dc 100644
--- a/arch/arm64/boot/dts/ti/k3-am62-main.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am62-main.dtsi
@@ -141,8 +141,8 @@
compatible = "ti,am64-dmss-pktdma";
reg = <0x00 0x485c0000 0x00 0x100>,
<0x00 0x4a800000 0x00 0x20000>,
- <0x00 0x4aa00000 0x00 0x40000>,
- <0x00 0x4b800000 0x00 0x400000>,
+ <0x00 0x4aa00000 0x00 0x20000>,
+ <0x00 0x4b800000 0x00 0x200000>,
<0x00 0x485e0000 0x00 0x10000>,
<0x00 0x484a0000 0x00 0x2000>,
<0x00 0x484c0000 0x00 0x2000>,
@@ -207,10 +207,6 @@
crypto: crypto@40900000 {
compatible = "ti,am62-sa3ul";
reg = <0x00 0x40900000 0x00 0x1200>;
- #address-cells = <2>;
- #size-cells = <2>;
- ranges = <0x00 0x40900000 0x00 0x40900000 0x00 0x30000>;
-
dmas = <&main_pktdma 0xf501 0>, <&main_pktdma 0x7506 0>,
<&main_pktdma 0x7507 0>;
dma-names = "tx", "rx1", "rx2";
@@ -739,7 +735,7 @@
label = "port1";
phys = <&phy_gmii_sel 1>;
mac-address = [00 00 00 00 00 00];
- ti,syscon-efuse = <&wkup_conf 0x200>;
+ ti,syscon-efuse = <&cpsw_mac_syscon 0x0>;
};
cpsw_port2: port@2 {
@@ -1057,4 +1053,33 @@
status = "disabled";
};
+ gpmc0: memory-controller@3b000000 {
+ compatible = "ti,am64-gpmc";
+ power-domains = <&k3_pds 80 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&k3_clks 80 0>;
+ clock-names = "fck";
+ reg = <0x00 0x03b000000 0x00 0x400>,
+ <0x00 0x050000000 0x00 0x8000000>;
+ reg-names = "cfg", "data";
+ interrupts = <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>;
+ gpmc,num-cs = <3>;
+ gpmc,num-waitpins = <2>;
+ #address-cells = <2>;
+ #size-cells = <1>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ status = "disabled";
+ };
+
+ elm0: ecc@25010000 {
+ compatible = "ti,am64-elm";
+ reg = <0x00 0x25010000 0x00 0x2000>;
+ interrupts = <GIC_SPI 132 IRQ_TYPE_LEVEL_HIGH>;
+ power-domains = <&k3_pds 54 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&k3_clks 54 0>;
+ clock-names = "fck";
+ status = "disabled";
+ };
};
diff --git a/arch/arm64/boot/dts/ti/k3-am62-verdin-dev.dtsi b/arch/arm64/boot/dts/ti/k3-am62-verdin-dev.dtsi
index 74eec1a1abca..5c1284b802ad 100644
--- a/arch/arm64/boot/dts/ti/k3-am62-verdin-dev.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am62-verdin-dev.dtsi
@@ -14,6 +14,7 @@
simple-audio-card,bitclock-master = <&codec_dai>;
simple-audio-card,format = "i2s";
simple-audio-card,frame-master = <&codec_dai>;
+ simple-audio-card,mclk-fs = <256>;
simple-audio-card,name = "verdin-nau8822";
simple-audio-card,routing =
"Headphones", "LHP",
@@ -34,7 +35,6 @@
"Line", "Line In";
codec_dai: simple-audio-card,codec {
- clocks = <&audio_refclk1>;
sound-dai = <&nau8822_1a>;
};
@@ -107,6 +107,8 @@
reg = <0x1a>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_i2s1_mclk>;
+ clock-names = "mclk";
+ clocks = <&audio_refclk1>;
#sound-dai-cells = <0>;
};
diff --git a/arch/arm64/boot/dts/ti/k3-am62-verdin.dtsi b/arch/arm64/boot/dts/ti/k3-am62-verdin.dtsi
index 2038c5e04639..359f53f3e019 100644
--- a/arch/arm64/boot/dts/ti/k3-am62-verdin.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am62-verdin.dtsi
@@ -1364,8 +1364,6 @@
0 0 0 0
>;
tdm-slots = <2>;
- rx-num-evt = <32>;
- tx-num-evt = <32>;
#sound-dai-cells = <0>;
status = "disabled";
};
@@ -1382,8 +1380,6 @@
0 0 0 0
>;
tdm-slots = <2>;
- rx-num-evt = <32>;
- tx-num-evt = <32>;
#sound-dai-cells = <0>;
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/ti/k3-am62-wakeup.dtsi b/arch/arm64/boot/dts/ti/k3-am62-wakeup.dtsi
index 66ddf2dc51af..e0afafd532a5 100644
--- a/arch/arm64/boot/dts/ti/k3-am62-wakeup.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am62-wakeup.dtsi
@@ -22,6 +22,11 @@
reg = <0x14 0x4>;
};
+ cpsw_mac_syscon: ethernet-mac-syscon@200 {
+ compatible = "ti,am62p-cpsw-mac-efuse", "syscon";
+ reg = <0x200 0x8>;
+ };
+
usb0_phy_ctrl: syscon@4008 {
compatible = "ti,am62-usb-phy-ctrl", "syscon";
reg = <0x4008 0x4>;
diff --git a/arch/arm64/boot/dts/ti/k3-am62.dtsi b/arch/arm64/boot/dts/ti/k3-am62.dtsi
index f0781f2bea29..bfb55ca11323 100644
--- a/arch/arm64/boot/dts/ti/k3-am62.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am62.dtsi
@@ -68,11 +68,13 @@
<0x00 0x30200000 0x00 0x30200000 0x00 0x00010000>, /* DSS */
<0x00 0x31000000 0x00 0x31000000 0x00 0x00050000>, /* USB0 DWC3 Core window */
<0x00 0x31100000 0x00 0x31100000 0x00 0x00050000>, /* USB1 DWC3 Core window */
+ <0x00 0x3b000000 0x00 0x3b000000 0x00 0x00000400>, /* GPMC0_CFG */
<0x00 0x40900000 0x00 0x40900000 0x00 0x00030000>, /* SA3UL */
<0x00 0x43600000 0x00 0x43600000 0x00 0x00010000>, /* SA3 sproxy data */
<0x00 0x44043000 0x00 0x44043000 0x00 0x00000fe0>, /* TI SCI DEBUG */
<0x00 0x44860000 0x00 0x44860000 0x00 0x00040000>, /* SA3 sproxy config */
<0x00 0x48000000 0x00 0x48000000 0x00 0x06400000>, /* DMSS */
+ <0x00 0x50000000 0x00 0x50000000 0x00 0x08000000>, /* GPMC0 DATA */
<0x00 0x60000000 0x00 0x60000000 0x00 0x08000000>, /* FSS0 DAT1 */
<0x00 0x70000000 0x00 0x70000000 0x00 0x00010000>, /* OCSRAM */
<0x01 0x00000000 0x01 0x00000000 0x00 0x00310000>, /* A53 PERIPHBASE */
diff --git a/arch/arm64/boot/dts/ti/k3-am625-beagleplay.dts b/arch/arm64/boot/dts/ti/k3-am625-beagleplay.dts
index 18e3070a8683..70de288d728e 100644
--- a/arch/arm64/boot/dts/ti/k3-am625-beagleplay.dts
+++ b/arch/arm64/boot/dts/ti/k3-am625-beagleplay.dts
@@ -924,6 +924,4 @@
0 0 0 0
0 0 0 0
>;
- tx-num-evt = <32>;
- rx-num-evt = <32>;
};
diff --git a/arch/arm64/boot/dts/ti/k3-am625-phyboard-lyra-1-4-ghz-opp.dtso b/arch/arm64/boot/dts/ti/k3-am625-phyboard-lyra-1-4-ghz-opp.dtso
new file mode 100644
index 000000000000..6ec6d57ec49c
--- /dev/null
+++ b/arch/arm64/boot/dts/ti/k3-am625-phyboard-lyra-1-4-ghz-opp.dtso
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * Copyright (C) 2024 PHYTEC America LLC
+ * Author: Nathan Morrisson <nmorrisson@phytec.com>
+ */
+
+/dts-v1/;
+/plugin/;
+
+&vdd_core {
+ regulator-min-microvolt = <850000>;
+ regulator-max-microvolt = <850000>;
+};
+
+&a53_opp_table {
+ opp-1400000000 {
+ opp-hz = /bits/ 64 <1400000000>;
+ opp-supported-hw = <0x01 0x0004>;
+ };
+};
diff --git a/arch/arm64/boot/dts/ti/k3-am625-phyboard-lyra-rdk.dts b/arch/arm64/boot/dts/ti/k3-am625-phyboard-lyra-rdk.dts
index 50d2573c840e..4fa5efdffcd7 100644
--- a/arch/arm64/boot/dts/ti/k3-am625-phyboard-lyra-rdk.dts
+++ b/arch/arm64/boot/dts/ti/k3-am625-phyboard-lyra-rdk.dts
@@ -7,477 +7,12 @@
* https://www.phytec.com/product/phyboard-am62x
*/
-#include <dt-bindings/gpio/gpio.h>
-#include <dt-bindings/input/input.h>
-#include <dt-bindings/leds/common.h>
-#include <dt-bindings/net/ti-dp83867.h>
#include "k3-am625.dtsi"
#include "k3-am62-phycore-som.dtsi"
+#include "k3-am62x-phyboard-lyra.dtsi"
/ {
compatible = "phytec,am625-phyboard-lyra-rdk",
"phytec,am62-phycore-som", "ti,am625";
model = "PHYTEC phyBOARD-Lyra AM625";
-
- aliases {
- serial2 = &main_uart0;
- serial3 = &main_uart1;
- mmc1 = &sdhci1;
- usb0 = &usb0;
- usb1 = &usb1;
- ethernet1 = &cpsw_port2;
- };
-
- can_tc1: can-phy0 {
- compatible = "ti,tcan1042";
- #phy-cells = <0>;
- max-bitrate = <8000000>;
- standby-gpios = <&gpio_exp 1 GPIO_ACTIVE_HIGH>;
- };
-
- hdmi0: connector-hdmi {
- compatible = "hdmi-connector";
- label = "hdmi";
- type = "a";
-
- port {
- hdmi_connector_in: endpoint {
- remote-endpoint = <&sii9022_out>;
- };
- };
- };
-
- keys {
- compatible = "gpio-keys";
- autorepeat;
- pinctrl-names = "default";
- pinctrl-0 = <&gpio_keys_pins_default>;
-
- key-home {
- label = "home";
- linux,code = <KEY_HOME>;
- gpios = <&main_gpio1 23 GPIO_ACTIVE_HIGH>;
- };
-
- key-menu {
- label = "menu";
- linux,code = <KEY_MENU>;
- gpios = <&gpio_exp 4 GPIO_ACTIVE_HIGH>;
- };
- };
-
- sound {
- compatible = "simple-audio-card";
- simple-audio-card,name = "phyBOARD-Lyra";
- simple-audio-card,widgets =
- "Microphone", "Mic Jack",
- "Headphone", "Headphone Jack",
- "Speaker", "External Speaker";
- simple-audio-card,routing =
- "MIC3R", "Mic Jack",
- "Mic Jack", "Mic Bias",
- "Headphone Jack", "HPLOUT",
- "Headphone Jack", "HPROUT",
- "External Speaker", "SPOP",
- "External Speaker", "SPOM";
- simple-audio-card,format = "dsp_b";
- simple-audio-card,bitclock-master = <&sound_master>;
- simple-audio-card,frame-master = <&sound_master>;
- simple-audio-card,bitclock-inversion;
-
- simple-audio-card,cpu {
- sound-dai = <&mcasp2>;
- };
-
- sound_master: simple-audio-card,codec {
- sound-dai = <&audio_codec>;
- clocks = <&audio_refclk1>;
- };
- };
-
- leds {
- compatible = "gpio-leds";
- pinctrl-names = "default";
- pinctrl-0 = <&leds_pins_default>, <&user_leds_pins_default>;
-
- led-1 {
- gpios = <&main_gpio0 32 GPIO_ACTIVE_HIGH>;
- linux,default-trigger = "mmc0";
- };
-
- led-2 {
- gpios = <&gpio_exp 2 GPIO_ACTIVE_HIGH>;
- linux,default-trigger = "mmc1";
- };
- };
-
- vcc_1v8: regulator-vcc-1v8 {
- compatible = "regulator-fixed";
- regulator-name = "VCC_1V8";
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- regulator-always-on;
- regulator-boot-on;
- };
-
- vcc_3v3_mmc: regulator-vcc-3v3-mmc {
- compatible = "regulator-fixed";
- regulator-name = "VCC_3V3_MMC";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- regulator-always-on;
- regulator-boot-on;
- };
-
- vcc_3v3_sw: regulator-vcc-3v3-sw {
- compatible = "regulator-fixed";
- regulator-name = "VCC_3V3_SW";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- regulator-always-on;
- regulator-boot-on;
- };
-};
-
-&main_pmx0 {
- audio_ext_refclk1_pins_default: audio-ext-refclk1-default-pins {
- pinctrl-single,pins = <
- AM62X_IOPAD(0x0a0, PIN_OUTPUT, 1) /* (K25) GPMC0_WPn.AUDIO_EXT_REFCLK1 */
- >;
- };
-
- gpio_keys_pins_default: gpio-keys-default-pins {
- pinctrl-single,pins = <
- AM62X_IOPAD(0x1d4, PIN_INPUT, 7) /* (B15) UART0_RTSn.GPIO1_23 */
- >;
- };
-
- gpio_exp_int_pins_default: gpio-exp-int-default-pins {
- pinctrl-single,pins = <
- AM62X_IOPAD(0x244, PIN_INPUT, 7) /* (C17) MMC1_SDWP.GPIO1_49 */
- >;
- };
-
- hdmi_int_pins_default: hdmi-int-default-pins {
- pinctrl-single,pins = <
- AM62X_IOPAD(0x040, PIN_INPUT, 7) /* (N23) GPMC0_AD1.GPIO0_16 */
- >;
- };
-
- main_dss0_pins_default: main-dss0-default-pins {
- pinctrl-single,pins = <
- AM62X_IOPAD(0x0b8, PIN_OUTPUT, 0) /* (U22) VOUT0_DATA0 */
- AM62X_IOPAD(0x0bc, PIN_OUTPUT, 0) /* (V24) VOUT0_DATA1 */
- AM62X_IOPAD(0x0e0, PIN_OUTPUT, 0) /* (V20) VOUT0_DATA10 */
- AM62X_IOPAD(0x0e4, PIN_OUTPUT, 0) /* (AA23) VOUT0_DATA11 */
- AM62X_IOPAD(0x0e8, PIN_OUTPUT, 0) /* (AB25) VOUT0_DATA12 */
- AM62X_IOPAD(0x0ec, PIN_OUTPUT, 0) /* (AA24) VOUT0_DATA13 */
- AM62X_IOPAD(0x0f0, PIN_OUTPUT, 0) /* (Y22) VOUT0_DATA14 */
- AM62X_IOPAD(0x0f4, PIN_OUTPUT, 0) /* (AA21) VOUT0_DATA15 */
- AM62X_IOPAD(0x0c0, PIN_OUTPUT, 0) /* (W25) VOUT0_DATA2 */
- AM62X_IOPAD(0x0c4, PIN_OUTPUT, 0) /* (W24) VOUT0_DATA3 */
- AM62X_IOPAD(0x0c8, PIN_OUTPUT, 0) /* (Y25) VOUT0_DATA4 */
- AM62X_IOPAD(0x0cc, PIN_OUTPUT, 0) /* (Y24) VOUT0_DATA5 */
- AM62X_IOPAD(0x0d0, PIN_OUTPUT, 0) /* (Y23) VOUT0_DATA6 */
- AM62X_IOPAD(0x0d4, PIN_OUTPUT, 0) /* (AA25) VOUT0_DATA7 */
- AM62X_IOPAD(0x0d8, PIN_OUTPUT, 0) /* (V21) VOUT0_DATA8 */
- AM62X_IOPAD(0x0dc, PIN_OUTPUT, 0) /* (W21) VOUT0_DATA9 */
- AM62X_IOPAD(0x0fc, PIN_OUTPUT, 0) /* (Y20) VOUT0_DE */
- AM62X_IOPAD(0x0f8, PIN_OUTPUT, 0) /* (AB24) VOUT0_HSYNC */
- AM62X_IOPAD(0x104, PIN_OUTPUT, 0) /* (AC24) VOUT0_PCLK */
- AM62X_IOPAD(0x100, PIN_OUTPUT, 0) /* (AC25) VOUT0_VSYNC */
- >;
- };
-
- main_i2c1_pins_default: main-i2c1-default-pins {
- pinctrl-single,pins = <
- AM62X_IOPAD(0x1e8, PIN_INPUT_PULLUP, 0) /* (B17) I2C1_SCL */
- AM62X_IOPAD(0x1ec, PIN_INPUT_PULLUP, 0) /* (A17) I2C1_SDA */
- >;
- };
-
- main_mcan0_pins_default: main-mcan0-default-pins {
- pinctrl-single,pins = <
- AM62X_IOPAD(0x1dc, PIN_INPUT, 0) /* (E15) MCAN0_RX */
- AM62X_IOPAD(0x1d8, PIN_OUTPUT, 0) /* (C15) MCAN0_TX */
- >;
- };
-
- main_mcasp2_pins_default: main-mcasp2-default-pins {
- pinctrl-single,pins = <
- AM62X_IOPAD(0x070, PIN_INPUT, 3) /* (T24) GPMC0_AD13.MCASP2_ACLKX */
- AM62X_IOPAD(0x06c, PIN_INPUT, 3) /* (T22) GPMC0_AD12.MCASP2_AFSX */
- AM62X_IOPAD(0x064, PIN_OUTPUT, 3) /* (T25) GPMC0_AD10.MCASP2_AXR2 */
- AM62X_IOPAD(0x068, PIN_INPUT, 3) /* (R21) GPMC0_AD11.MCASP2_AXR3 */
- >;
- };
-
- main_mmc1_pins_default: main-mmc1-default-pins {
- pinctrl-single,pins = <
- AM62X_IOPAD(0x23c, PIN_INPUT_PULLUP, 0) /* (A21) MMC1_CMD */
- AM62X_IOPAD(0x234, PIN_INPUT_PULLDOWN, 0) /* (B22) MMC1_CLK */
- AM62X_IOPAD(0x230, PIN_INPUT_PULLUP, 0) /* (A22) MMC1_DAT0 */
- AM62X_IOPAD(0x22c, PIN_INPUT_PULLUP, 0) /* (B21) MMC1_DAT1 */
- AM62X_IOPAD(0x228, PIN_INPUT_PULLUP, 0) /* (C21) MMC1_DAT2 */
- AM62X_IOPAD(0x224, PIN_INPUT_PULLUP, 0) /* (D22) MMC1_DAT3 */
- AM62X_IOPAD(0x240, PIN_INPUT_PULLUP, 0) /* (D17) MMC1_SDCD */
- >;
- };
-
- main_rgmii2_pins_default: main-rgmii2-default-pins {
- pinctrl-single,pins = <
- AM62X_IOPAD(0x184, PIN_INPUT, 0) /* (AE23) RGMII2_RD0 */
- AM62X_IOPAD(0x188, PIN_INPUT, 0) /* (AB20) RGMII2_RD1 */
- AM62X_IOPAD(0x18c, PIN_INPUT, 0) /* (AC21) RGMII2_RD2 */
- AM62X_IOPAD(0x190, PIN_INPUT, 0) /* (AE22) RGMII2_RD3 */
- AM62X_IOPAD(0x180, PIN_INPUT, 0) /* (AD23) RGMII2_RXC */
- AM62X_IOPAD(0x17c, PIN_INPUT, 0) /* (AD22) RGMII2_RX_CTL */
- AM62X_IOPAD(0x16c, PIN_OUTPUT, 0) /* (Y18) RGMII2_TD0 */
- AM62X_IOPAD(0x170, PIN_OUTPUT, 0) /* (AA18) RGMII2_TD1 */
- AM62X_IOPAD(0x174, PIN_OUTPUT, 0) /* (AD21) RGMII2_TD2 */
- AM62X_IOPAD(0x178, PIN_OUTPUT, 0) /* (AC20) RGMII2_TD3 */
- AM62X_IOPAD(0x168, PIN_OUTPUT, 0) /* (AE21) RGMII2_TXC */
- AM62X_IOPAD(0x164, PIN_OUTPUT, 0) /* (AA19) RGMII2_TX_CTL */
- >;
- };
-
- main_uart0_pins_default: main-uart0-default-pins {
- pinctrl-single,pins = <
- AM62X_IOPAD(0x1c8, PIN_INPUT, 0) /* (D14) UART0_RXD */
- AM62X_IOPAD(0x1cc, PIN_OUTPUT, 0) /* (E14) UART0_TXD */
- >;
- };
-
- main_uart1_pins_default: main-uart1-default-pins {
- pinctrl-single,pins = <
- AM62X_IOPAD(0x194, PIN_INPUT, 2) /* (B19) MCASP0_AXR3.UART1_CTSn */
- AM62X_IOPAD(0x198, PIN_OUTPUT, 2) /* (A19) MCASP0_AXR2.UART1_RTSn */
- AM62X_IOPAD(0x1ac, PIN_INPUT, 2) /* (E19) MCASP0_AFSR.UART1_RXD */
- AM62X_IOPAD(0x1b0, PIN_OUTPUT, 2) /* (A20) MCASP0_ACLKR.UART1_TXD */
- >;
- };
-
- main_usb1_pins_default: main-usb1-default-pins {
- pinctrl-single,pins = <
- AM62X_IOPAD(0x258, PIN_OUTPUT, 0) /* (F18) USB1_DRVVBUS */
- >;
- };
-
- user_leds_pins_default: user-leds-default-pins {
- pinctrl-single,pins = <
- AM62X_IOPAD(0x084, PIN_OUTPUT, 7) /* (L23) GPMC0_ADVn_ALE.GPIO0_32 */
- >;
- };
-};
-
-&cpsw3g {
- pinctrl-names = "default";
- pinctrl-0 = <&main_rgmii1_pins_default>, <&main_rgmii2_pins_default>;
-};
-
-&cpsw_port2 {
- phy-mode = "rgmii-rxid";
- phy-handle = <&cpsw3g_phy3>;
-};
-
-&cpsw3g_mdio {
- cpsw3g_phy3: ethernet-phy@3 {
- compatible = "ethernet-phy-id2000.a231", "ethernet-phy-ieee802.3-c22";
- reg = <3>;
- ti,clk-output-sel = <DP83867_CLK_O_SEL_OFF>;
- ti,rx-internal-delay = <DP83867_RGMIIDCTL_2_00_NS>;
- ti,fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_4_B_NIB>;
- };
-};
-
-&dss {
- pinctrl-names = "default";
- pinctrl-0 = <&main_dss0_pins_default>;
- status = "okay";
-};
-
-&dss_ports {
- #address-cells = <1>;
- #size-cells = <0>;
-
- /* VP2: DPI/HDMI Output */
- port@1 {
- reg = <1>;
-
- dpi1_out: endpoint {
- remote-endpoint = <&sii9022_in>;
- };
- };
-};
-
-&main_i2c1 {
- pinctrl-names = "default";
- pinctrl-0 = <&main_i2c1_pins_default>;
- clock-frequency = <100000>;
- status = "okay";
-
- audio_codec: audio-codec@18 {
- pinctrl-names = "default";
- pinctrl-0 = <&audio_ext_refclk1_pins_default>;
-
- #sound-dai-cells = <0>;
- compatible = "ti,tlv320aic3007";
- reg = <0x18>;
- ai3x-micbias-vg = <2>;
-
- AVDD-supply = <&vcc_3v3_sw>;
- IOVDD-supply = <&vcc_3v3_sw>;
- DRVDD-supply = <&vcc_3v3_sw>;
- DVDD-supply = <&vcc_1v8>;
- };
-
- gpio_exp: gpio-expander@21 {
- pinctrl-names = "default";
- pinctrl-0 = <&gpio_exp_int_pins_default>;
- compatible = "nxp,pcf8574";
- reg = <0x21>;
- interrupt-parent = <&main_gpio1>;
- interrupts = <49 0>;
- #gpio-cells = <2>;
- gpio-controller;
- interrupt-controller;
- #interrupt-cells = <2>;
- gpio-line-names = "", "GPIO1_CAN0_nEN",
- "GPIO2_LED2", "GPIO3_LVDS_GPIO",
- "GPIO4_BUT2", "GPIO5_LVDS_BKLT_EN",
- "GPIO6_ETH1_USER_RESET", "GPIO7_AUDIO_USER_RESET";
- };
-
- usb-pd@22 {
- compatible = "ti,tps6598x";
- reg = <0x22>;
-
- connector {
- compatible = "usb-c-connector";
- label = "USB-C";
- self-powered;
- data-role = "dual";
- power-role = "sink";
- port {
- usb_con_hs: endpoint {
- remote-endpoint = <&typec_hs>;
- };
- };
- };
- };
-
- sii9022: bridge-hdmi@39 {
- compatible = "sil,sii9022";
- reg = <0x39>;
-
- interrupt-parent = <&main_gpio0>;
- interrupts = <16 IRQ_TYPE_EDGE_FALLING>;
- pinctrl-names = "default";
- pinctrl-0 = <&hdmi_int_pins_default>;
-
- ports {
- #address-cells = <1>;
- #size-cells = <0>;
-
- port@0 {
- reg = <0>;
-
- sii9022_in: endpoint {
- remote-endpoint = <&dpi1_out>;
- };
- };
-
- port@1 {
- reg = <1>;
-
- sii9022_out: endpoint {
- remote-endpoint = <&hdmi_connector_in>;
- };
- };
- };
- };
-
- eeprom@51 {
- compatible = "atmel,24c02";
- pagesize = <16>;
- reg = <0x51>;
- };
-};
-
-&main_mcan0 {
- pinctrl-names = "default";
- pinctrl-0 = <&main_mcan0_pins_default>;
- phys = <&can_tc1>;
- status = "okay";
-};
-
-&main_uart0 {
- pinctrl-names = "default";
- pinctrl-0 = <&main_uart0_pins_default>;
- status = "okay";
-};
-
-&main_uart1 {
- pinctrl-names = "default";
- pinctrl-0 = <&main_uart1_pins_default>;
- /* Main UART1 may be used by TIFS firmware */
- status = "okay";
-};
-
-&mcasp2 {
- #sound-dai-cells = <0>;
-
- pinctrl-names = "default";
- pinctrl-0 = <&main_mcasp2_pins_default>;
-
- /* MCASP_IIS_MODE */
- op-mode = <0>;
- tdm-slots = <2>;
-
- /* 0: INACTIVE, 1: TX, 2: RX */
- serial-dir = <
- 0 0 1 2
- 0 0 0 0
- 0 0 0 0
- 0 0 0 0
- >;
- tx-num-evt = <32>;
- rx-num-evt = <32>;
- status = "okay";
-};
-
-&sdhci1 {
- vmmc-supply = <&vcc_3v3_mmc>;
- vqmmc-supply = <&vddshv5_sdio>;
- pinctrl-names = "default";
- pinctrl-0 = <&main_mmc1_pins_default>;
- disable-wp;
- no-1-8-v;
- status = "okay";
-};
-
-&usbss0 {
- ti,vbus-divider;
- status = "okay";
-};
-
-&usbss1 {
- ti,vbus-divider;
- status = "okay";
-};
-
-&usb0 {
- usb-role-switch;
-
- port {
- typec_hs: endpoint {
- remote-endpoint = <&usb_con_hs>;
- };
- };
-};
-
-&usb1 {
- dr_mode = "host";
- pinctrl-names = "default";
- pinctrl-0 = <&main_usb1_pins_default>;
};
diff --git a/arch/arm64/boot/dts/ti/k3-am62a-main.dtsi b/arch/arm64/boot/dts/ti/k3-am62a-main.dtsi
index bf9c2d9c6439..916fcf3cc57d 100644
--- a/arch/arm64/boot/dts/ti/k3-am62a-main.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am62a-main.dtsi
@@ -59,6 +59,24 @@
reg = <0x4130 0x4>;
#clock-cells = <1>;
};
+
+ audio_refclk0: clock-controller@82e0 {
+ compatible = "ti,am62-audio-refclk";
+ reg = <0x82e0 0x4>;
+ clocks = <&k3_clks 157 0>;
+ assigned-clocks = <&k3_clks 157 0>;
+ assigned-clock-parents = <&k3_clks 157 8>;
+ #clock-cells = <0>;
+ };
+
+ audio_refclk1: clock-controller@82e4 {
+ compatible = "ti,am62-audio-refclk";
+ reg = <0x82e4 0x4>;
+ clocks = <&k3_clks 157 10>;
+ assigned-clocks = <&k3_clks 157 10>;
+ assigned-clock-parents = <&k3_clks 157 18>;
+ #clock-cells = <0>;
+ };
};
dmss: bus@48000000 {
@@ -120,8 +138,8 @@
compatible = "ti,am64-dmss-pktdma";
reg = <0x00 0x485c0000 0x00 0x100>,
<0x00 0x4a800000 0x00 0x20000>,
- <0x00 0x4aa00000 0x00 0x40000>,
- <0x00 0x4b800000 0x00 0x400000>,
+ <0x00 0x4aa00000 0x00 0x20000>,
+ <0x00 0x4b800000 0x00 0x200000>,
<0x00 0x485e0000 0x00 0x10000>,
<0x00 0x484a0000 0x00 0x2000>,
<0x00 0x484c0000 0x00 0x2000>,
@@ -216,6 +234,14 @@
};
};
+ crypto: crypto@40900000 {
+ compatible = "ti,am62-sa3ul";
+ reg = <0x00 0x40900000 0x00 0x1200>;
+ dmas = <&main_pktdma 0xf501 0>, <&main_pktdma 0x7506 0>,
+ <&main_pktdma 0x7507 0>;
+ dma-names = "tx", "rx1", "rx2";
+ };
+
secure_proxy_sa3: mailbox@43600000 {
compatible = "ti,am654-secure-proxy";
#mbox-cells = <1>;
@@ -713,7 +739,7 @@
label = "port1";
phys = <&phy_gmii_sel 1>;
mac-address = [00 00 00 00 00 00];
- ti,syscon-efuse = <&wkup_conf 0x200>;
+ ti,syscon-efuse = <&cpsw_mac_syscon 0x0>;
};
cpsw_port2: port@2 {
diff --git a/arch/arm64/boot/dts/ti/k3-am62a-phycore-som.dtsi b/arch/arm64/boot/dts/ti/k3-am62a-phycore-som.dtsi
new file mode 100644
index 000000000000..a5aceaa39670
--- /dev/null
+++ b/arch/arm64/boot/dts/ti/k3-am62a-phycore-som.dtsi
@@ -0,0 +1,330 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * Copyright (C) 2023 - 2024 PHYTEC America LLC
+ * Author: Garrett Giordano <ggiordano@phytec.com>
+ *
+ * Product homepage:
+ * https://www.phytec.com/product/phycore-am62a
+ */
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/leds/common.h>
+#include <dt-bindings/net/ti-dp83867.h>
+
+/ {
+ model = "PHYTEC phyCORE-AM62Ax";
+ compatible = "phytec,am62a-phycore-som", "ti,am62a7";
+
+ aliases {
+ ethernet0 = &cpsw_port1;
+ gpio0 = &main_gpio0;
+ gpio1 = &main_gpio1;
+ i2c0 = &main_i2c0;
+ mmc0 = &sdhci0;
+ rtc0 = &i2c_som_rtc;
+ spi0 = &ospi0;
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&leds_pins_default>;
+
+ led-0 {
+ color = <LED_COLOR_ID_GREEN>;
+ gpios = <&main_gpio0 13 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "heartbeat";
+ function = LED_FUNCTION_HEARTBEAT;
+ };
+ };
+
+ memory@80000000 {
+ device_type = "memory";
+ /* 2G RAM */
+ reg = <0x00000000 0x80000000 0x00000000 0x80000000>;
+ };
+
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ /* global cma region */
+ linux,cma {
+ compatible = "shared-dma-pool";
+ reusable;
+ size = <0x00 0x24000000>;
+ alloc-ranges = <0x00 0xc0000000 0x00 0x24000000>;
+ linux,cma-default;
+ };
+
+ secure_tfa_ddr: tfa@9e780000 {
+ reg = <0x00 0x9e780000 0x00 0x80000>;
+ alignment = <0x1000>;
+ no-map;
+ };
+
+ secure_ddr: optee@9e800000 {
+ reg = <0x00 0x9e800000 0x00 0x01800000>; /* for OP-TEE */
+ alignment = <0x1000>;
+ no-map;
+ };
+
+ wkup_r5fss0_core0_memory_region: r5f-dma-memory@9c900000 {
+ compatible = "shared-dma-pool";
+ reg = <0x00 0x9c900000 0x00 0x01e00000>;
+ no-map;
+ };
+ };
+
+ vcc_5v0_som: regulator-vcc-5v0-som {
+ compatible = "regulator-fixed";
+ regulator-name = "VCC_5V0_SOM";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+};
+
+&main_pmx0 {
+ leds_pins_default: leds-default-pins {
+ pinctrl-single,pins = <
+ AM62AX_IOPAD(0x034, PIN_OUTPUT, 7) /* (K20) OSPI0_CSN2.GPIO0_13 */
+ >;
+ };
+
+ main_i2c0_pins_default: main-i2c0-default-pins {
+ pinctrl-single,pins = <
+ AM62AX_IOPAD(0x1e0, PIN_INPUT_PULLUP, 0) /* (D17) I2C0_SCL */
+ AM62AX_IOPAD(0x1e4, PIN_INPUT_PULLUP, 0) /* (E16) I2C0_SDA */
+ >;
+ };
+
+ main_mdio1_pins_default: main-mdio1-default-pins {
+ pinctrl-single,pins = <
+ AM62AX_IOPAD(0x160, PIN_OUTPUT, 0) /* (V12) MDIO0_MDC */
+ AM62AX_IOPAD(0x15c, PIN_INPUT, 0) /* (V13) MDIO0_MDIO */
+ >;
+ };
+
+ main_mmc0_pins_default: main-mmc0-default-pins {
+ pinctrl-single,pins = <
+ AM62AX_IOPAD(0x220, PIN_INPUT_PULLUP, 0) /* (Y6) MMC0_CMD */
+ AM62AX_IOPAD(0x218, PIN_INPUT_PULLDOWN, 0) /* (AB7) MMC0_CLK */
+ AM62AX_IOPAD(0x214, PIN_INPUT_PULLUP, 0) /* (AA6) MMC0_DAT0 */
+ AM62AX_IOPAD(0x210, PIN_INPUT_PULLUP, 0) /* (AB6) MMC0_DAT1 */
+ AM62AX_IOPAD(0x20c, PIN_INPUT_PULLUP, 0) /* (Y7) MMC0_DAT2 */
+ AM62AX_IOPAD(0x208, PIN_INPUT_PULLUP, 0) /* (AA7) MMC0_DAT3 */
+ AM62AX_IOPAD(0x204, PIN_INPUT_PULLUP, 0) /* (Y8) MMC0_DAT4 */
+ AM62AX_IOPAD(0x200, PIN_INPUT_PULLUP, 0) /* (W7) MMC0_DAT5 */
+ AM62AX_IOPAD(0x1fc, PIN_INPUT_PULLUP, 0) /* (W9) MMC0_DAT6 */
+ AM62AX_IOPAD(0x1f8, PIN_INPUT_PULLUP, 0) /* (AB8) MMC0_DAT7 */
+ >;
+ };
+
+ main_rgmii1_pins_default: main-rgmii1-default-pins {
+ pinctrl-single,pins = <
+ AM62AX_IOPAD(0x14c, PIN_INPUT, 0) /* (AB16) RGMII1_RD0 */
+ AM62AX_IOPAD(0x150, PIN_INPUT, 0) /* (V15) RGMII1_RD1 */
+ AM62AX_IOPAD(0x154, PIN_INPUT, 0) /* (W15) RGMII1_RD2 */
+ AM62AX_IOPAD(0x158, PIN_INPUT, 0) /* (V14) RGMII1_RD3 */
+ AM62AX_IOPAD(0x148, PIN_INPUT, 0) /* (AA16) RGMII1_RXC */
+ AM62AX_IOPAD(0x144, PIN_INPUT, 0) /* (AA15) RGMII1_RX_CTL */
+ AM62AX_IOPAD(0x134, PIN_OUTPUT, 0) /* (Y17) RGMII1_TD0 */
+ AM62AX_IOPAD(0x138, PIN_OUTPUT, 0) /* (V16) RGMII1_TD1 */
+ AM62AX_IOPAD(0x13c, PIN_OUTPUT, 0) /* (Y16) RGMII1_TD2 */
+ AM62AX_IOPAD(0x140, PIN_OUTPUT, 0) /* (AA17) RGMII1_TD3 */
+ AM62AX_IOPAD(0x130, PIN_OUTPUT, 0) /* (AB17) RGMII1_TXC */
+ AM62AX_IOPAD(0x12c, PIN_OUTPUT, 0) /* (W16) RGMII1_TX_CTL */
+ >;
+ };
+
+ ospi0_pins_default: ospi0-default-pins {
+ pinctrl-single,pins = <
+ AM62AX_IOPAD(0x000, PIN_OUTPUT, 0) /* (L22) OSPI0_CLK */
+ AM62AX_IOPAD(0x02c, PIN_OUTPUT, 0) /* (H21) OSPI0_CSn0 */
+ AM62AX_IOPAD(0x038, PIN_OUTPUT, 0) /* (G20) OSPI0_CSn3 */
+ AM62AX_IOPAD(0x00c, PIN_INPUT, 0) /* (J21) OSPI0_D0 */
+ AM62AX_IOPAD(0x010, PIN_INPUT, 0) /* (J18) OSPI0_D1 */
+ AM62AX_IOPAD(0x014, PIN_INPUT, 0) /* (J19) OSPI0_D2 */
+ AM62AX_IOPAD(0x018, PIN_INPUT, 0) /* (H18) OSPI0_D3 */
+ AM62AX_IOPAD(0x01c, PIN_INPUT, 0) /* (K21) OSPI0_D4 */
+ AM62AX_IOPAD(0x020, PIN_INPUT, 0) /* (H19) OSPI0_D5 */
+ AM62AX_IOPAD(0x024, PIN_INPUT, 0) /* (J20) OSPI0_D6 */
+ AM62AX_IOPAD(0x028, PIN_INPUT, 0) /* (J22) OSPI0_D7 */
+ AM62AX_IOPAD(0x008, PIN_INPUT, 0) /* (L21) OSPI0_DQS */
+ >;
+ };
+
+ pmic_irq_pins_default: pmic-irq-default-pins {
+ pinctrl-single,pins = <
+ AM62AX_IOPAD(0x1f4, PIN_INPUT, 0) /* (D16) EXTINTn */
+ >;
+ };
+};
+
+&cpsw3g {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_rgmii1_pins_default>;
+};
+
+&cpsw_port1 {
+ phy-mode = "rgmii-rxid";
+ phy-handle = <&cpsw3g_phy1>;
+};
+
+&cpsw3g_mdio {
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_mdio1_pins_default>;
+
+ cpsw3g_phy1: ethernet-phy@1 {
+ compatible = "ethernet-phy-id2000.a231", "ethernet-phy-ieee802.3-c22";
+ reg = <1>;
+ ti,clk-output-sel = <DP83867_CLK_O_SEL_OFF>;
+ ti,rx-internal-delay = <DP83867_RGMIIDCTL_2_00_NS>;
+ ti,fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_4_B_NIB>;
+ };
+};
+
+&fss {
+ status = "okay";
+};
+
+&main_i2c0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_i2c0_pins_default>;
+ clock-frequency = <400000>;
+ status = "okay";
+
+ pmic@30 {
+ compatible = "ti,tps65219";
+ reg = <0x30>;
+ buck1-supply = <&vcc_5v0_som>;
+ buck2-supply = <&vcc_5v0_som>;
+ buck3-supply = <&vcc_5v0_som>;
+ ldo1-supply = <&vdd_3v3>;
+ ldo2-supply = <&vdd_1v8>;
+ ldo3-supply = <&vcc_5v0_som>;
+ ldo4-supply = <&vcc_5v0_som>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pmic_irq_pins_default>;
+ interrupt-parent = <&gic500>;
+ interrupts = <GIC_SPI 224 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ ti,power-button;
+ system-power-controller;
+
+ regulators {
+ vdd_3v3: buck1 {
+ regulator-name = "VDD_3V3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vdd_1v8: buck2 {
+ regulator-name = "VDD_1V8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vdd_lpddr4: buck3 {
+ regulator-name = "VDD_LPDDR4";
+ regulator-min-microvolt = <1100000>;
+ regulator-max-microvolt = <1100000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vddshv5_sdio: ldo1 {
+ regulator-name = "VDDSHV5_SDIO";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-allow-bypass;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vddr_core: ldo2 {
+ regulator-name = "VDDR_CORE";
+ regulator-min-microvolt = <850000>;
+ regulator-max-microvolt = <850000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vdda_1v8: ldo3 {
+ regulator-name = "VDDA_1V8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vdd_2v5: ldo4 {
+ regulator-name = "VDD_2V5";
+ regulator-min-microvolt = <2500000>;
+ regulator-max-microvolt = <2500000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+ };
+ };
+
+ eeprom@50 {
+ compatible = "atmel,24c32";
+ pagesize = <32>;
+ reg = <0x50>;
+ };
+
+ i2c_som_rtc: rtc@52 {
+ compatible = "microcrystal,rv3028";
+ reg = <0x52>;
+ };
+};
+
+&main_gpio0 {
+ status = "okay";
+};
+
+&main_gpio1 {
+ status = "okay";
+};
+
+&main_gpio_intr {
+ status = "okay";
+};
+
+&ospi0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&ospi0_pins_default>;
+ status = "okay";
+
+ serial_flash: flash@0 {
+ compatible = "jedec,spi-nor";
+ reg = <0x0>;
+ spi-tx-bus-width = <8>;
+ spi-rx-bus-width = <8>;
+ spi-max-frequency = <25000000>;
+ cdns,tshsl-ns = <60>;
+ cdns,tsd2d-ns = <60>;
+ cdns,tchsh-ns = <60>;
+ cdns,tslch-ns = <60>;
+ cdns,read-delay = <0>;
+ };
+};
+
+&sdhci0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_mmc0_pins_default>;
+ disable-wp;
+ non-removable;
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/ti/k3-am62a-wakeup.dtsi b/arch/arm64/boot/dts/ti/k3-am62a-wakeup.dtsi
index 98043e9aa316..f5ac101a04df 100644
--- a/arch/arm64/boot/dts/ti/k3-am62a-wakeup.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am62a-wakeup.dtsi
@@ -6,9 +6,8 @@
*/
&cbass_wakeup {
- wkup_conf: syscon@43000000 {
- compatible = "ti,j721e-system-controller", "syscon", "simple-mfd";
- reg = <0x00 0x43000000 0x00 0x20000>;
+ wkup_conf: bus@43000000 {
+ compatible = "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
ranges = <0x00 0x00 0x43000000 0x20000>;
@@ -18,6 +17,11 @@
reg = <0x14 0x4>;
};
+ cpsw_mac_syscon: ethernet-mac-syscon@200 {
+ compatible = "ti,am62p-cpsw-mac-efuse", "syscon";
+ reg = <0x200 0x8>;
+ };
+
usb0_phy_ctrl: syscon@4008 {
compatible = "ti,am62-usb-phy-ctrl", "syscon";
reg = <0x4008 0x4>;
@@ -59,7 +63,6 @@
clock-names = "vbus", "osc32k";
power-domains = <&k3_pds 117 TI_SCI_PD_EXCLUSIVE>;
wakeup-source;
- status = "disabled";
};
wkup_rti0: watchdog@2b000000 {
diff --git a/arch/arm64/boot/dts/ti/k3-am62a7-phyboard-lyra-rdk.dts b/arch/arm64/boot/dts/ti/k3-am62a7-phyboard-lyra-rdk.dts
new file mode 100644
index 000000000000..3b93409b23e7
--- /dev/null
+++ b/arch/arm64/boot/dts/ti/k3-am62a7-phyboard-lyra-rdk.dts
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * Copyright (C) 2023 - 2024 PHYTEC America LLC
+ * Author: Garrett Giordano <ggiordano@phytec.com>
+ *
+ * Product homepage:
+ * https://www.phytec.com/product/phyboard-am62a
+ */
+
+#include "k3-am62a7.dtsi"
+#include "k3-am62a-phycore-som.dtsi"
+#include "k3-am62x-phyboard-lyra.dtsi"
+
+/ {
+ compatible = "phytec,am62a7-phyboard-lyra-rdk",
+ "phytec,am62a-phycore-som", "ti,am62a7";
+ model = "PHYTEC phyBOARD-Lyra AM62A7";
+};
diff --git a/arch/arm64/boot/dts/ti/k3-am62a7-sk.dts b/arch/arm64/boot/dts/ti/k3-am62a7-sk.dts
index fa43cd0b631e..67faf46d7a35 100644
--- a/arch/arm64/boot/dts/ti/k3-am62a7-sk.dts
+++ b/arch/arm64/boot/dts/ti/k3-am62a7-sk.dts
@@ -40,6 +40,15 @@
#size-cells = <2>;
ranges;
+ /* global cma region */
+ linux,cma {
+ compatible = "shared-dma-pool";
+ reusable;
+ size = <0x00 0x24000000>;
+ alloc-ranges = <0x00 0xc0000000 0x00 0x24000000>;
+ linux,cma-default;
+ };
+
secure_tfa_ddr: tfa@9e780000 {
reg = <0x00 0x9e780000 0x00 0x80000>;
alignment = <0x1000>;
@@ -701,8 +710,6 @@
0 0 0 0
0 0 0 0
>;
- tx-num-evt = <32>;
- rx-num-evt = <32>;
};
&dss {
diff --git a/arch/arm64/boot/dts/ti/k3-am62p-j722s-common-main.dtsi b/arch/arm64/boot/dts/ti/k3-am62p-j722s-common-main.dtsi
new file mode 100644
index 000000000000..9701fc69aed9
--- /dev/null
+++ b/arch/arm64/boot/dts/ti/k3-am62p-j722s-common-main.dtsi
@@ -0,0 +1,1062 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * Device Tree file for the MAIN domain peripherals shared by AM62P and J722S
+ *
+ * Copyright (C) 2023-2024 Texas Instruments Incorporated - https://www.ti.com/
+ */
+
+&cbass_main {
+ oc_sram: sram@70000000 {
+ compatible = "mmio-sram";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ };
+
+ gic500: interrupt-controller@1800000 {
+ compatible = "arm,gic-v3";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+ #interrupt-cells = <3>;
+ interrupt-controller;
+ reg = <0x00 0x01800000 0x00 0x10000>, /* GICD */
+ <0x00 0x01880000 0x00 0xc0000>, /* GICR */
+ <0x01 0x00000000 0x00 0x2000>, /* GICC */
+ <0x01 0x00010000 0x00 0x1000>, /* GICH */
+ <0x01 0x00020000 0x00 0x2000>; /* GICV */
+ /*
+ * vcpumntirq:
+ * virtual CPU interface maintenance interrupt
+ */
+ interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
+
+ gic_its: msi-controller@1820000 {
+ compatible = "arm,gic-v3-its";
+ reg = <0x00 0x01820000 0x00 0x10000>;
+ socionext,synquacer-pre-its = <0x1000000 0x400000>;
+ msi-controller;
+ #msi-cells = <1>;
+ };
+ };
+
+ main_conf: bus@100000 {
+ compatible = "simple-bus";
+ reg = <0x00 0x00100000 0x00 0x20000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x00 0x00 0x00100000 0x20000>;
+
+ phy_gmii_sel: phy@4044 {
+ compatible = "ti,am654-phy-gmii-sel";
+ reg = <0x4044 0x8>;
+ #phy-cells = <1>;
+ };
+
+ epwm_tbclk: clock-controller@4130 {
+ compatible = "ti,am62-epwm-tbclk";
+ reg = <0x4130 0x4>;
+ #clock-cells = <1>;
+ };
+ };
+
+ dmss: bus@48000000 {
+ compatible = "simple-bus";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ dma-ranges;
+ ranges = <0x00 0x48000000 0x00 0x48000000 0x00 0x06400000>;
+ bootph-all;
+
+ ti,sci-dev-id = <25>;
+
+ secure_proxy_main: mailbox@4d000000 {
+ compatible = "ti,am654-secure-proxy";
+ #mbox-cells = <1>;
+ reg-names = "target_data", "rt", "scfg";
+ reg = <0x00 0x4d000000 0x00 0x80000>,
+ <0x00 0x4a600000 0x00 0x80000>,
+ <0x00 0x4a400000 0x00 0x80000>;
+ interrupt-names = "rx_012";
+ interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
+ bootph-all;
+ };
+
+ inta_main_dmss: interrupt-controller@48000000 {
+ compatible = "ti,sci-inta";
+ reg = <0x00 0x48000000 0x00 0x100000>;
+ #interrupt-cells = <0>;
+ interrupt-controller;
+ interrupt-parent = <&gic500>;
+ msi-controller;
+ ti,sci = <&dmsc>;
+ ti,sci-dev-id = <28>;
+ ti,unmapped-event-sources = <&main_bcdma>, <&main_pktdma>;
+ };
+
+ main_bcdma: dma-controller@485c0100 {
+ compatible = "ti,am64-dmss-bcdma";
+ reg = <0x00 0x485c0100 0x00 0x100>,
+ <0x00 0x4c000000 0x00 0x20000>,
+ <0x00 0x4a820000 0x00 0x20000>,
+ <0x00 0x4aa40000 0x00 0x20000>,
+ <0x00 0x4bc00000 0x00 0x100000>,
+ <0x00 0x48600000 0x00 0x8000>,
+ <0x00 0x484a4000 0x00 0x2000>,
+ <0x00 0x484c2000 0x00 0x2000>,
+ <0x00 0x48420000 0x00 0x2000>;
+ reg-names = "gcfg", "bchanrt", "rchanrt", "tchanrt", "ringrt",
+ "ring", "tchan", "rchan", "bchan";
+ msi-parent = <&inta_main_dmss>;
+ #dma-cells = <3>;
+
+ ti,sci = <&dmsc>;
+ ti,sci-dev-id = <26>;
+ ti,sci-rm-range-bchan = <0x20>; /* BLOCK_COPY_CHAN */
+ ti,sci-rm-range-rchan = <0x21>; /* SPLIT_TR_RX_CHAN */
+ ti,sci-rm-range-tchan = <0x22>; /* SPLIT_TR_TX_CHAN */
+ bootph-all;
+ };
+
+ main_pktdma: dma-controller@485c0000 {
+ compatible = "ti,am64-dmss-pktdma";
+ reg = <0x00 0x485c0000 0x00 0x100>,
+ <0x00 0x4a800000 0x00 0x20000>,
+ <0x00 0x4aa00000 0x00 0x20000>,
+ <0x00 0x4b800000 0x00 0x200000>,
+ <0x00 0x485e0000 0x00 0x10000>,
+ <0x00 0x484a0000 0x00 0x2000>,
+ <0x00 0x484c0000 0x00 0x2000>,
+ <0x00 0x48430000 0x00 0x1000>;
+ reg-names = "gcfg", "rchanrt", "tchanrt", "ringrt",
+ "ring", "tchan", "rchan", "rflow";
+ msi-parent = <&inta_main_dmss>;
+ #dma-cells = <2>;
+ bootph-all;
+
+ ti,sci = <&dmsc>;
+ ti,sci-dev-id = <30>;
+ ti,sci-rm-range-tchan = <0x23>, /* UNMAPPED_TX_CHAN */
+ <0x24>, /* CPSW_TX_CHAN */
+ <0x25>, /* SAUL_TX_0_CHAN */
+ <0x26>; /* SAUL_TX_1_CHAN */
+ ti,sci-rm-range-tflow = <0x10>, /* RING_UNMAPPED_TX_CHAN */
+ <0x11>, /* RING_CPSW_TX_CHAN */
+ <0x12>, /* RING_SAUL_TX_0_CHAN */
+ <0x13>; /* RING_SAUL_TX_1_CHAN */
+ ti,sci-rm-range-rchan = <0x29>, /* UNMAPPED_RX_CHAN */
+ <0x2b>, /* CPSW_RX_CHAN */
+ <0x2d>, /* SAUL_RX_0_CHAN */
+ <0x2f>, /* SAUL_RX_1_CHAN */
+ <0x31>, /* SAUL_RX_2_CHAN */
+ <0x33>; /* SAUL_RX_3_CHAN */
+ ti,sci-rm-range-rflow = <0x2a>, /* FLOW_UNMAPPED_RX_CHAN */
+ <0x2c>, /* FLOW_CPSW_RX_CHAN */
+ <0x2e>, /* FLOW_SAUL_RX_0/1_CHAN */
+ <0x32>; /* FLOW_SAUL_RX_2/3_CHAN */
+ };
+ };
+
+ dmss_csi: bus@4e000000 {
+ compatible = "simple-bus";
+ ranges = <0x00 0x4e000000 0x00 0x4e000000 0x00 0x408000>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ dma-ranges;
+ ti,sci-dev-id = <198>;
+
+ inta_main_dmss_csi: interrupt-controller@4e400000 {
+ compatible = "ti,sci-inta";
+ reg = <0x00 0x4e400000 0x00 0x8000>;
+ #interrupt-cells = <0>;
+ interrupt-controller;
+ interrupt-parent = <&gic500>;
+ msi-controller;
+ power-domains = <&k3_pds 182 TI_SCI_PD_EXCLUSIVE>;
+ ti,sci = <&dmsc>;
+ ti,sci-dev-id = <200>;
+ ti,interrupt-ranges = <0 237 8>;
+ ti,unmapped-event-sources = <&main_bcdma_csi>;
+ };
+
+ main_bcdma_csi: dma-controller@4e230000 {
+ compatible = "ti,am62a-dmss-bcdma-csirx";
+ reg = <0x00 0x4e230000 0x00 0x100>,
+ <0x00 0x4e180000 0x00 0x8000>,
+ <0x00 0x4e100000 0x00 0x10000>;
+ reg-names = "gcfg", "rchanrt", "ringrt";
+ #dma-cells = <3>;
+ msi-parent = <&inta_main_dmss_csi>;
+ power-domains = <&k3_pds 182 TI_SCI_PD_EXCLUSIVE>;
+ ti,sci = <&dmsc>;
+ ti,sci-dev-id = <199>;
+ ti,sci-rm-range-rchan = <0x21>;
+ };
+ };
+
+ dmsc: system-controller@44043000 {
+ compatible = "ti,k2g-sci";
+ ti,host-id = <12>;
+ mbox-names = "rx", "tx";
+ mboxes = <&secure_proxy_main 12>,
+ <&secure_proxy_main 13>;
+ reg-names = "debug_messages";
+ reg = <0x00 0x44043000 0x00 0xfe0>;
+ bootph-all;
+
+ k3_pds: power-controller {
+ compatible = "ti,sci-pm-domain";
+ #power-domain-cells = <2>;
+ bootph-all;
+ };
+
+ k3_clks: clock-controller {
+ compatible = "ti,k2g-sci-clk";
+ #clock-cells = <2>;
+ bootph-all;
+ };
+
+ k3_reset: reset-controller {
+ compatible = "ti,sci-reset";
+ #reset-cells = <2>;
+ bootph-all;
+ };
+ };
+
+ crypto: crypto@40900000 {
+ compatible = "ti,am62-sa3ul";
+ reg = <0x00 0x40900000 0x00 0x1200>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ dmas = <&main_pktdma 0xf501 0>, <&main_pktdma 0x7506 0>,
+ <&main_pktdma 0x7507 0>;
+ dma-names = "tx", "rx1", "rx2";
+ };
+
+ secure_proxy_sa3: mailbox@43600000 {
+ compatible = "ti,am654-secure-proxy";
+ #mbox-cells = <1>;
+ reg-names = "target_data", "rt", "scfg";
+ reg = <0x00 0x43600000 0x00 0x10000>,
+ <0x00 0x44880000 0x00 0x20000>,
+ <0x00 0x44860000 0x00 0x20000>;
+ /*
+ * Marked Disabled:
+ * Node is incomplete as it is meant for bootloaders and
+ * firmware on non-MPU processors
+ */
+ status = "disabled";
+ bootph-all;
+ };
+
+ main_pmx0: pinctrl@f4000 {
+ compatible = "pinctrl-single";
+ reg = <0x00 0xf4000 0x00 0x2ac>;
+ #pinctrl-cells = <1>;
+ pinctrl-single,register-width = <32>;
+ pinctrl-single,function-mask = <0xffffffff>;
+ bootph-all;
+ };
+
+ main_esm: esm@420000 {
+ compatible = "ti,j721e-esm";
+ reg = <0x00 0x420000 0x00 0x1000>;
+ ti,esm-pins = <160>, <161>, <162>, <163>, <177>, <178>;
+ bootph-pre-ram;
+ };
+
+ main_timer0: timer@2400000 {
+ compatible = "ti,am654-timer";
+ reg = <0x00 0x2400000 0x00 0x400>;
+ interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&k3_clks 36 2>;
+ clock-names = "fck";
+ assigned-clocks = <&k3_clks 36 2>;
+ assigned-clock-parents = <&k3_clks 36 3>;
+ power-domains = <&k3_pds 36 TI_SCI_PD_EXCLUSIVE>;
+ ti,timer-pwm;
+ bootph-all;
+ };
+
+ main_timer1: timer@2410000 {
+ compatible = "ti,am654-timer";
+ reg = <0x00 0x2410000 0x00 0x400>;
+ interrupts = <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&k3_clks 37 2>;
+ clock-names = "fck";
+ assigned-clocks = <&k3_clks 37 2>;
+ assigned-clock-parents = <&k3_clks 37 3>;
+ power-domains = <&k3_pds 37 TI_SCI_PD_EXCLUSIVE>;
+ ti,timer-pwm;
+ };
+
+ main_timer2: timer@2420000 {
+ compatible = "ti,am654-timer";
+ reg = <0x00 0x2420000 0x00 0x400>;
+ interrupts = <GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&k3_clks 38 2>;
+ clock-names = "fck";
+ assigned-clocks = <&k3_clks 38 2>;
+ assigned-clock-parents = <&k3_clks 38 3>;
+ power-domains = <&k3_pds 38 TI_SCI_PD_EXCLUSIVE>;
+ ti,timer-pwm;
+ };
+
+ main_timer3: timer@2430000 {
+ compatible = "ti,am654-timer";
+ reg = <0x00 0x2430000 0x00 0x400>;
+ interrupts = <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&k3_clks 39 2>;
+ clock-names = "fck";
+ assigned-clocks = <&k3_clks 39 2>;
+ assigned-clock-parents = <&k3_clks 39 3>;
+ power-domains = <&k3_pds 39 TI_SCI_PD_EXCLUSIVE>;
+ ti,timer-pwm;
+ };
+
+ main_timer4: timer@2440000 {
+ compatible = "ti,am654-timer";
+ reg = <0x00 0x2440000 0x00 0x400>;
+ interrupts = <GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&k3_clks 40 2>;
+ clock-names = "fck";
+ assigned-clocks = <&k3_clks 40 2>;
+ assigned-clock-parents = <&k3_clks 40 3>;
+ power-domains = <&k3_pds 40 TI_SCI_PD_EXCLUSIVE>;
+ ti,timer-pwm;
+ };
+
+ main_timer5: timer@2450000 {
+ compatible = "ti,am654-timer";
+ reg = <0x00 0x2450000 0x00 0x400>;
+ interrupts = <GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&k3_clks 41 2>;
+ clock-names = "fck";
+ assigned-clocks = <&k3_clks 41 2>;
+ assigned-clock-parents = <&k3_clks 41 3>;
+ power-domains = <&k3_pds 41 TI_SCI_PD_EXCLUSIVE>;
+ ti,timer-pwm;
+ };
+
+ main_timer6: timer@2460000 {
+ compatible = "ti,am654-timer";
+ reg = <0x00 0x2460000 0x00 0x400>;
+ interrupts = <GIC_SPI 126 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&k3_clks 42 2>;
+ clock-names = "fck";
+ assigned-clocks = <&k3_clks 42 2>;
+ assigned-clock-parents = <&k3_clks 42 3>;
+ power-domains = <&k3_pds 42 TI_SCI_PD_EXCLUSIVE>;
+ ti,timer-pwm;
+ };
+
+ main_timer7: timer@2470000 {
+ compatible = "ti,am654-timer";
+ reg = <0x00 0x2470000 0x00 0x400>;
+ interrupts = <GIC_SPI 127 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&k3_clks 43 2>;
+ clock-names = "fck";
+ assigned-clocks = <&k3_clks 43 2>;
+ assigned-clock-parents = <&k3_clks 43 3>;
+ power-domains = <&k3_pds 43 TI_SCI_PD_EXCLUSIVE>;
+ ti,timer-pwm;
+ };
+
+ main_uart0: serial@2800000 {
+ compatible = "ti,am64-uart", "ti,am654-uart";
+ reg = <0x00 0x02800000 0x00 0x100>;
+ interrupts = <GIC_SPI 178 IRQ_TYPE_LEVEL_HIGH>;
+ power-domains = <&k3_pds 146 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&k3_clks 146 0>;
+ clock-names = "fclk";
+ status = "disabled";
+ };
+
+ main_uart1: serial@2810000 {
+ compatible = "ti,am64-uart", "ti,am654-uart";
+ reg = <0x00 0x02810000 0x00 0x100>;
+ interrupts = <GIC_SPI 179 IRQ_TYPE_LEVEL_HIGH>;
+ power-domains = <&k3_pds 152 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&k3_clks 152 0>;
+ clock-names = "fclk";
+ status = "disabled";
+ };
+
+ main_uart2: serial@2820000 {
+ compatible = "ti,am64-uart", "ti,am654-uart";
+ reg = <0x00 0x02820000 0x00 0x100>;
+ interrupts = <GIC_SPI 180 IRQ_TYPE_LEVEL_HIGH>;
+ power-domains = <&k3_pds 153 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&k3_clks 153 0>;
+ clock-names = "fclk";
+ status = "disabled";
+ };
+
+ main_uart3: serial@2830000 {
+ compatible = "ti,am64-uart", "ti,am654-uart";
+ reg = <0x00 0x02830000 0x00 0x100>;
+ interrupts = <GIC_SPI 181 IRQ_TYPE_LEVEL_HIGH>;
+ power-domains = <&k3_pds 154 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&k3_clks 154 0>;
+ clock-names = "fclk";
+ status = "disabled";
+ };
+
+ main_uart4: serial@2840000 {
+ compatible = "ti,am64-uart", "ti,am654-uart";
+ reg = <0x00 0x02840000 0x00 0x100>;
+ interrupts = <GIC_SPI 182 IRQ_TYPE_LEVEL_HIGH>;
+ power-domains = <&k3_pds 155 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&k3_clks 155 0>;
+ clock-names = "fclk";
+ status = "disabled";
+ };
+
+ main_uart5: serial@2850000 {
+ compatible = "ti,am64-uart", "ti,am654-uart";
+ reg = <0x00 0x02850000 0x00 0x100>;
+ interrupts = <GIC_SPI 183 IRQ_TYPE_LEVEL_HIGH>;
+ power-domains = <&k3_pds 156 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&k3_clks 156 0>;
+ clock-names = "fclk";
+ status = "disabled";
+ };
+
+ main_uart6: serial@2860000 {
+ compatible = "ti,am64-uart", "ti,am654-uart";
+ reg = <0x00 0x02860000 0x00 0x100>;
+ interrupts = <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>;
+ power-domains = <&k3_pds 158 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&k3_clks 158 0>;
+ clock-names = "fclk";
+ status = "disabled";
+ };
+
+ main_i2c0: i2c@20000000 {
+ compatible = "ti,am64-i2c", "ti,omap4-i2c";
+ reg = <0x00 0x20000000 0x00 0x100>;
+ interrupts = <GIC_SPI 161 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ power-domains = <&k3_pds 102 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&k3_clks 102 2>;
+ clock-names = "fck";
+ status = "disabled";
+ };
+
+ main_i2c1: i2c@20010000 {
+ compatible = "ti,am64-i2c", "ti,omap4-i2c";
+ reg = <0x00 0x20010000 0x00 0x100>;
+ interrupts = <GIC_SPI 162 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ power-domains = <&k3_pds 103 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&k3_clks 103 2>;
+ clock-names = "fck";
+ status = "disabled";
+ };
+
+ main_i2c2: i2c@20020000 {
+ compatible = "ti,am64-i2c", "ti,omap4-i2c";
+ reg = <0x00 0x20020000 0x00 0x100>;
+ interrupts = <GIC_SPI 163 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ power-domains = <&k3_pds 104 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&k3_clks 104 2>;
+ clock-names = "fck";
+ status = "disabled";
+ };
+
+ main_i2c3: i2c@20030000 {
+ compatible = "ti,am64-i2c", "ti,omap4-i2c";
+ reg = <0x00 0x20030000 0x00 0x100>;
+ interrupts = <GIC_SPI 164 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ power-domains = <&k3_pds 105 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&k3_clks 105 2>;
+ clock-names = "fck";
+ status = "disabled";
+ };
+
+ main_spi0: spi@20100000 {
+ compatible = "ti,am654-mcspi", "ti,omap4-mcspi";
+ reg = <0x00 0x20100000 0x00 0x400>;
+ interrupts = <GIC_SPI 172 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ power-domains = <&k3_pds 141 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&k3_clks 141 0>;
+ status = "disabled";
+ };
+
+ main_spi1: spi@20110000 {
+ compatible = "ti,am654-mcspi","ti,omap4-mcspi";
+ reg = <0x00 0x20110000 0x00 0x400>;
+ interrupts = <GIC_SPI 173 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ power-domains = <&k3_pds 142 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&k3_clks 142 0>;
+ status = "disabled";
+ };
+
+ main_spi2: spi@20120000 {
+ compatible = "ti,am654-mcspi","ti,omap4-mcspi";
+ reg = <0x00 0x20120000 0x00 0x400>;
+ interrupts = <GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ power-domains = <&k3_pds 143 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&k3_clks 143 0>;
+ status = "disabled";
+ };
+
+ main_gpio_intr: interrupt-controller@a00000 {
+ compatible = "ti,sci-intr";
+ reg = <0x00 0x00a00000 0x00 0x800>;
+ ti,intr-trigger-type = <1>;
+ interrupt-controller;
+ interrupt-parent = <&gic500>;
+ #interrupt-cells = <1>;
+ ti,sci = <&dmsc>;
+ ti,sci-dev-id = <3>;
+ ti,interrupt-ranges = <0 32 16>;
+ };
+
+ main_gpio0: gpio@600000 {
+ compatible = "ti,am64-gpio", "ti,keystone-gpio";
+ reg = <0x00 0x00600000 0x00 0x100>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-parent = <&main_gpio_intr>;
+ interrupts = <190>, <191>, <192>,
+ <193>, <194>, <195>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ ti,davinci-gpio-unbanked = <0>;
+ power-domains = <&k3_pds 77 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&k3_clks 77 0>;
+ clock-names = "gpio";
+ };
+
+ main_gpio1: gpio@601000 {
+ compatible = "ti,am64-gpio", "ti,keystone-gpio";
+ reg = <0x00 0x00601000 0x00 0x100>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-parent = <&main_gpio_intr>;
+ interrupts = <180>, <181>, <182>,
+ <183>, <184>, <185>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ ti,davinci-gpio-unbanked = <0>;
+ power-domains = <&k3_pds 78 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&k3_clks 78 0>;
+ clock-names = "gpio";
+ };
+
+ sdhci0: mmc@fa10000 {
+ compatible = "ti,am64-sdhci-8bit";
+ reg = <0x00 0x0fa10000 0x00 0x1000>, <0x00 0x0fa18000 0x00 0x400>;
+ interrupts = <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>;
+ power-domains = <&k3_pds 57 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&k3_clks 57 1>, <&k3_clks 57 2>;
+ clock-names = "clk_ahb", "clk_xin";
+ assigned-clocks = <&k3_clks 57 2>;
+ assigned-clock-parents = <&k3_clks 57 4>;
+ bus-width = <8>;
+ mmc-ddr-1_8v;
+ mmc-hs200-1_8v;
+ mmc-hs400-1_8v;
+ ti,clkbuf-sel = <0x7>;
+ ti,strobe-sel = <0x77>;
+ ti,trm-icp = <0x8>;
+ ti,otap-del-sel-legacy = <0x1>;
+ ti,otap-del-sel-mmc-hs = <0x1>;
+ ti,otap-del-sel-ddr52 = <0x6>;
+ ti,otap-del-sel-hs200 = <0x8>;
+ ti,otap-del-sel-hs400 = <0x5>;
+ ti,itap-del-sel-legacy = <0x10>;
+ ti,itap-del-sel-mmc-hs = <0xa>;
+ ti,itap-del-sel-ddr52 = <0x3>;
+ status = "disabled";
+ };
+
+ sdhci1: mmc@fa00000 {
+ compatible = "ti,am62-sdhci";
+ reg = <0x00 0x0fa00000 0x00 0x1000>, <0x00 0x0fa08000 0x00 0x400>;
+ interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
+ power-domains = <&k3_pds 58 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&k3_clks 58 5>, <&k3_clks 58 6>;
+ clock-names = "clk_ahb", "clk_xin";
+ bus-width = <4>;
+ ti,clkbuf-sel = <0x7>;
+ ti,otap-del-sel-legacy = <0x0>;
+ ti,otap-del-sel-sd-hs = <0x0>;
+ ti,otap-del-sel-sdr12 = <0xf>;
+ ti,otap-del-sel-sdr25 = <0xf>;
+ ti,otap-del-sel-sdr50 = <0xc>;
+ ti,otap-del-sel-ddr50 = <0x9>;
+ ti,otap-del-sel-sdr104 = <0x6>;
+ ti,itap-del-sel-legacy = <0x0>;
+ ti,itap-del-sel-sd-hs = <0x0>;
+ ti,itap-del-sel-sdr12 = <0x0>;
+ ti,itap-del-sel-sdr25 = <0x0>;
+ status = "disabled";
+ };
+
+ sdhci2: mmc@fa20000 {
+ compatible = "ti,am62-sdhci";
+ reg = <0x00 0x0fa20000 0x00 0x1000>, <0x00 0x0fa28000 0x00 0x400>;
+ interrupts = <GIC_SPI 82 IRQ_TYPE_LEVEL_HIGH>;
+ power-domains = <&k3_pds 184 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&k3_clks 184 5>, <&k3_clks 184 6>;
+ clock-names = "clk_ahb", "clk_xin";
+ bus-width = <4>;
+ ti,clkbuf-sel = <0x7>;
+ ti,otap-del-sel-legacy = <0x0>;
+ ti,otap-del-sel-sd-hs = <0x0>;
+ ti,otap-del-sel-sdr12 = <0xf>;
+ ti,otap-del-sel-sdr25 = <0xf>;
+ ti,otap-del-sel-sdr50 = <0xc>;
+ ti,otap-del-sel-ddr50 = <0x9>;
+ ti,otap-del-sel-sdr104 = <0x6>;
+ ti,itap-del-sel-legacy = <0x0>;
+ ti,itap-del-sel-sd-hs = <0x0>;
+ ti,itap-del-sel-sdr12 = <0x0>;
+ ti,itap-del-sel-sdr25 = <0x0>;
+ status = "disabled";
+ };
+
+ usbss0: usb@f900000 {
+ compatible = "ti,am62-usb";
+ reg = <0x00 0x0f900000 0x00 0x800>,
+ <0x00 0x0f908000 0x00 0x400>;
+ clocks = <&k3_clks 161 3>;
+ clock-names = "ref";
+ ti,syscon-phy-pll-refclk = <&usb0_phy_ctrl 0x0>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ power-domains = <&k3_pds 178 TI_SCI_PD_EXCLUSIVE>;
+ ranges;
+ status = "disabled";
+
+ usb0: usb@31000000 {
+ compatible = "snps,dwc3";
+ reg = <0x00 0x31000000 0x00 0x50000>;
+ interrupts = <GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>, /* irq.0 */
+ <GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>; /* irq.0 */
+ interrupt-names = "host", "peripheral";
+ maximum-speed = "high-speed";
+ dr_mode = "otg";
+ snps,usb2-gadget-lpm-disable;
+ snps,usb2-lpm-disable;
+ };
+ };
+
+ fss: bus@fc00000 {
+ compatible = "simple-bus";
+ reg = <0x00 0x0fc00000 0x00 0x70000>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ ospi0: spi@fc40000 {
+ compatible = "ti,am654-ospi", "cdns,qspi-nor";
+ reg = <0x00 0x0fc40000 0x00 0x100>,
+ <0x05 0x00000000 0x01 0x00000000>;
+ interrupts = <GIC_SPI 139 IRQ_TYPE_LEVEL_HIGH>;
+ cdns,fifo-depth = <256>;
+ cdns,fifo-width = <4>;
+ cdns,trigger-address = <0x0>;
+ clocks = <&k3_clks 75 7>;
+ assigned-clocks = <&k3_clks 75 7>;
+ assigned-clock-parents = <&k3_clks 75 8>;
+ assigned-clock-rates = <166666666>;
+ power-domains = <&k3_pds 75 TI_SCI_PD_EXCLUSIVE>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+ };
+
+ cpsw3g: ethernet@8000000 {
+ compatible = "ti,am642-cpsw-nuss";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ reg = <0x00 0x08000000 0x00 0x200000>;
+ reg-names = "cpsw_nuss";
+ ranges = <0x00 0x00 0x00 0x08000000 0x00 0x200000>;
+ clocks = <&k3_clks 13 0>;
+ assigned-clocks = <&k3_clks 13 3>;
+ assigned-clock-parents = <&k3_clks 13 11>;
+ clock-names = "fck";
+ power-domains = <&k3_pds 13 TI_SCI_PD_EXCLUSIVE>;
+ status = "disabled";
+
+ dmas = <&main_pktdma 0xc600 15>,
+ <&main_pktdma 0xc601 15>,
+ <&main_pktdma 0xc602 15>,
+ <&main_pktdma 0xc603 15>,
+ <&main_pktdma 0xc604 15>,
+ <&main_pktdma 0xc605 15>,
+ <&main_pktdma 0xc606 15>,
+ <&main_pktdma 0xc607 15>,
+ <&main_pktdma 0x4600 15>;
+ dma-names = "tx0", "tx1", "tx2", "tx3", "tx4", "tx5", "tx6",
+ "tx7", "rx";
+
+ ethernet-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpsw_port1: port@1 {
+ reg = <1>;
+ ti,mac-only;
+ label = "port1";
+ phys = <&phy_gmii_sel 1>;
+ mac-address = [00 00 00 00 00 00];
+ ti,syscon-efuse = <&cpsw_mac_syscon 0x0>;
+ status = "disabled";
+ };
+
+ cpsw_port2: port@2 {
+ reg = <2>;
+ ti,mac-only;
+ label = "port2";
+ phys = <&phy_gmii_sel 2>;
+ mac-address = [00 00 00 00 00 00];
+ status = "disabled";
+ };
+ };
+
+ cpsw3g_mdio: mdio@f00 {
+ compatible = "ti,cpsw-mdio","ti,davinci_mdio";
+ reg = <0x00 0xf00 0x00 0x100>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&k3_clks 13 0>;
+ clock-names = "fck";
+ bus_freq = <1000000>;
+ status = "disabled";
+ };
+
+ cpts@3d000 {
+ compatible = "ti,j721e-cpts";
+ reg = <0x00 0x3d000 0x00 0x400>;
+ clocks = <&k3_clks 13 3>;
+ clock-names = "cpts";
+ interrupts-extended = <&gic500 GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "cpts";
+ ti,cpts-ext-ts-inputs = <4>;
+ ti,cpts-periodic-outputs = <2>;
+ };
+ };
+
+ hwspinlock: spinlock@2a000000 {
+ compatible = "ti,am64-hwspinlock";
+ reg = <0x00 0x2a000000 0x00 0x1000>;
+ #hwlock-cells = <1>;
+ };
+
+ mailbox0_cluster0: mailbox@29000000 {
+ compatible = "ti,am64-mailbox";
+ reg = <0x00 0x29000000 0x00 0x200>;
+ interrupts = <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>;
+ #mbox-cells = <1>;
+ ti,mbox-num-users = <4>;
+ ti,mbox-num-fifos = <16>;
+ };
+
+ mailbox0_cluster1: mailbox@29010000 {
+ compatible = "ti,am64-mailbox";
+ reg = <0x00 0x29010000 0x00 0x200>;
+ interrupts = <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>;
+ #mbox-cells = <1>;
+ ti,mbox-num-users = <4>;
+ ti,mbox-num-fifos = <16>;
+ };
+
+ mailbox0_cluster2: mailbox@29020000 {
+ compatible = "ti,am64-mailbox";
+ reg = <0x00 0x29020000 0x00 0x200>;
+ interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
+ #mbox-cells = <1>;
+ ti,mbox-num-users = <4>;
+ ti,mbox-num-fifos = <16>;
+ };
+
+ mailbox0_cluster3: mailbox@29030000 {
+ compatible = "ti,am64-mailbox";
+ reg = <0x00 0x29030000 0x00 0x200>;
+ interrupts = <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>;
+ #mbox-cells = <1>;
+ ti,mbox-num-users = <4>;
+ ti,mbox-num-fifos = <16>;
+ };
+
+ ecap0: pwm@23100000 {
+ compatible = "ti,am3352-ecap";
+ #pwm-cells = <3>;
+ reg = <0x00 0x23100000 0x00 0x100>;
+ power-domains = <&k3_pds 51 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&k3_clks 51 0>;
+ clock-names = "fck";
+ status = "disabled";
+ };
+
+ ecap1: pwm@23110000 {
+ compatible = "ti,am3352-ecap";
+ #pwm-cells = <3>;
+ reg = <0x00 0x23110000 0x00 0x100>;
+ power-domains = <&k3_pds 52 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&k3_clks 52 0>;
+ clock-names = "fck";
+ status = "disabled";
+ };
+
+ ecap2: pwm@23120000 {
+ compatible = "ti,am3352-ecap";
+ #pwm-cells = <3>;
+ reg = <0x00 0x23120000 0x00 0x100>;
+ power-domains = <&k3_pds 53 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&k3_clks 53 0>;
+ clock-names = "fck";
+ status = "disabled";
+ };
+
+ main_mcan0: can@20701000 {
+ compatible = "bosch,m_can";
+ reg = <0x00 0x20701000 0x00 0x200>,
+ <0x00 0x20708000 0x00 0x8000>;
+ reg-names = "m_can", "message_ram";
+ power-domains = <&k3_pds 98 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&k3_clks 98 6>, <&k3_clks 98 1>;
+ clock-names = "hclk", "cclk";
+ interrupts = <GIC_SPI 155 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 156 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "int0", "int1";
+ bosch,mram-cfg = <0x0 128 64 64 64 64 32 32>;
+ status = "disabled";
+ };
+
+ main_mcan1: can@20711000 {
+ compatible = "bosch,m_can";
+ reg = <0x00 0x20711000 0x00 0x200>,
+ <0x00 0x20718000 0x00 0x8000>;
+ reg-names = "m_can", "message_ram";
+ power-domains = <&k3_pds 99 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&k3_clks 99 6>, <&k3_clks 99 1>;
+ clock-names = "hclk", "cclk";
+ interrupts = <GIC_SPI 213 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 214 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "int0", "int1";
+ bosch,mram-cfg = <0x0 128 64 64 64 64 32 32>;
+ status = "disabled";
+ };
+
+ main_rti0: watchdog@e000000 {
+ compatible = "ti,j7-rti-wdt";
+ reg = <0x00 0x0e000000 0x00 0x100>;
+ clocks = <&k3_clks 125 0>;
+ power-domains = <&k3_pds 125 TI_SCI_PD_EXCLUSIVE>;
+ assigned-clocks = <&k3_clks 125 0>;
+ assigned-clock-parents = <&k3_clks 125 2>;
+ };
+
+ main_rti1: watchdog@e010000 {
+ compatible = "ti,j7-rti-wdt";
+ reg = <0x00 0x0e010000 0x00 0x100>;
+ clocks = <&k3_clks 126 0>;
+ power-domains = <&k3_pds 126 TI_SCI_PD_EXCLUSIVE>;
+ assigned-clocks = <&k3_clks 126 0>;
+ assigned-clock-parents = <&k3_clks 126 2>;
+ };
+
+ main_rti2: watchdog@e020000 {
+ compatible = "ti,j7-rti-wdt";
+ reg = <0x00 0x0e020000 0x00 0x100>;
+ clocks = <&k3_clks 127 0>;
+ power-domains = <&k3_pds 127 TI_SCI_PD_EXCLUSIVE>;
+ assigned-clocks = <&k3_clks 127 0>;
+ assigned-clock-parents = <&k3_clks 127 2>;
+ };
+
+ main_rti3: watchdog@e030000 {
+ compatible = "ti,j7-rti-wdt";
+ reg = <0x00 0x0e030000 0x00 0x100>;
+ clocks = <&k3_clks 128 0>;
+ power-domains = <&k3_pds 128 TI_SCI_PD_EXCLUSIVE>;
+ assigned-clocks = <&k3_clks 128 0>;
+ assigned-clock-parents = <&k3_clks 128 2>;
+ };
+
+ main_rti15: watchdog@e0f0000 {
+ compatible = "ti,j7-rti-wdt";
+ reg = <0x00 0x0e0f0000 0x00 0x100>;
+ clocks = <&k3_clks 130 0>;
+ power-domains = <&k3_pds 130 TI_SCI_PD_EXCLUSIVE>;
+ assigned-clocks = <&k3_clks 130 0>;
+ assigned-clock-parents = <&k3_clks 130 2>;
+ };
+
+ epwm0: pwm@23000000 {
+ compatible = "ti,am64-epwm", "ti,am3352-ehrpwm";
+ #pwm-cells = <3>;
+ reg = <0x00 0x23000000 0x00 0x100>;
+ power-domains = <&k3_pds 86 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&epwm_tbclk 0>, <&k3_clks 86 0>;
+ clock-names = "tbclk", "fck";
+ status = "disabled";
+ };
+
+ epwm1: pwm@23010000 {
+ compatible = "ti,am64-epwm", "ti,am3352-ehrpwm";
+ #pwm-cells = <3>;
+ reg = <0x00 0x23010000 0x00 0x100>;
+ power-domains = <&k3_pds 87 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&epwm_tbclk 1>, <&k3_clks 87 0>;
+ clock-names = "tbclk", "fck";
+ status = "disabled";
+ };
+
+ epwm2: pwm@23020000 {
+ compatible = "ti,am64-epwm", "ti,am3352-ehrpwm";
+ #pwm-cells = <3>;
+ reg = <0x00 0x23020000 0x00 0x100>;
+ power-domains = <&k3_pds 88 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&epwm_tbclk 2>, <&k3_clks 88 0>;
+ clock-names = "tbclk", "fck";
+ status = "disabled";
+ };
+
+ mcasp0: audio-controller@2b00000 {
+ compatible = "ti,am33xx-mcasp-audio";
+ reg = <0x00 0x02b00000 0x00 0x2000>,
+ <0x00 0x02b08000 0x00 0x400>;
+ reg-names = "mpu", "dat";
+ interrupts = <GIC_SPI 236 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 235 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "tx", "rx";
+
+ dmas = <&main_bcdma 0 0xc500 0>, <&main_bcdma 0 0x4500 0>;
+ dma-names = "tx", "rx";
+
+ clocks = <&k3_clks 190 0>;
+ clock-names = "fck";
+ assigned-clocks = <&k3_clks 190 0>;
+ assigned-clock-parents = <&k3_clks 190 2>;
+ power-domains = <&k3_pds 190 TI_SCI_PD_EXCLUSIVE>;
+ status = "disabled";
+ };
+
+ mcasp1: audio-controller@2b10000 {
+ compatible = "ti,am33xx-mcasp-audio";
+ reg = <0x00 0x02b10000 0x00 0x2000>,
+ <0x00 0x02b18000 0x00 0x400>;
+ reg-names = "mpu", "dat";
+ interrupts = <GIC_SPI 238 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 237 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "tx", "rx";
+
+ dmas = <&main_bcdma 0 0xc501 0>, <&main_bcdma 0 0x4501 0>;
+ dma-names = "tx", "rx";
+
+ clocks = <&k3_clks 191 0>;
+ clock-names = "fck";
+ assigned-clocks = <&k3_clks 191 0>;
+ assigned-clock-parents = <&k3_clks 191 2>;
+ power-domains = <&k3_pds 191 TI_SCI_PD_EXCLUSIVE>;
+ status = "disabled";
+ };
+
+ mcasp2: audio-controller@2b20000 {
+ compatible = "ti,am33xx-mcasp-audio";
+ reg = <0x00 0x02b20000 0x00 0x2000>,
+ <0x00 0x02b28000 0x00 0x400>;
+ reg-names = "mpu", "dat";
+ interrupts = <GIC_SPI 240 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 239 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "tx", "rx";
+
+ dmas = <&main_bcdma 0 0xc502 0>, <&main_bcdma 0 0x4502 0>;
+ dma-names = "tx", "rx";
+
+ clocks = <&k3_clks 192 0>;
+ clock-names = "fck";
+ assigned-clocks = <&k3_clks 192 0>;
+ assigned-clock-parents = <&k3_clks 192 2>;
+ power-domains = <&k3_pds 192 TI_SCI_PD_EXCLUSIVE>;
+ status = "disabled";
+ };
+
+ ti_csi2rx0: ticsi2rx@30102000 {
+ compatible = "ti,j721e-csi2rx-shim";
+ reg = <0x00 0x30102000 0x00 0x1000>;
+ ranges;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ dmas = <&main_bcdma_csi 0 0x5000 0>;
+ dma-names = "rx0";
+ power-domains = <&k3_pds 182 TI_SCI_PD_EXCLUSIVE>;
+ status = "disabled";
+
+ cdns_csi2rx0: csi-bridge@30101000 {
+ compatible = "ti,j721e-csi2rx", "cdns,csi2rx";
+ reg = <0x00 0x30101000 0x00 0x1000>;
+ clocks = <&k3_clks 182 0>, <&k3_clks 182 3>, <&k3_clks 182 0>,
+ <&k3_clks 182 0>, <&k3_clks 182 4>, <&k3_clks 182 4>;
+ clock-names = "sys_clk", "p_clk", "pixel_if0_clk",
+ "pixel_if1_clk", "pixel_if2_clk", "pixel_if3_clk";
+ phys = <&dphy0>;
+ phy-names = "dphy";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ csi0_port0: port@0 {
+ reg = <0>;
+ status = "disabled";
+ };
+
+ csi0_port1: port@1 {
+ reg = <1>;
+ status = "disabled";
+ };
+
+ csi0_port2: port@2 {
+ reg = <2>;
+ status = "disabled";
+ };
+
+ csi0_port3: port@3 {
+ reg = <3>;
+ status = "disabled";
+ };
+
+ csi0_port4: port@4 {
+ reg = <4>;
+ status = "disabled";
+ };
+ };
+ };
+ };
+
+ dphy0: phy@30110000 {
+ compatible = "cdns,dphy-rx";
+ reg = <0x00 0x30110000 0x00 0x1100>;
+ #phy-cells = <0>;
+ power-domains = <&k3_pds 185 TI_SCI_PD_EXCLUSIVE>;
+ status = "disabled";
+ };
+
+ vpu: video-codec@30210000 {
+ compatible = "ti,j721s2-wave521c", "cnm,wave521c";
+ reg = <0x00 0x30210000 0x00 0x10000>;
+ interrupts = <GIC_SPI 225 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&k3_clks 204 2>;
+ power-domains = <&k3_pds 204 TI_SCI_PD_EXCLUSIVE>;
+ };
+};
diff --git a/arch/arm64/boot/dts/ti/k3-am62p-mcu.dtsi b/arch/arm64/boot/dts/ti/k3-am62p-j722s-common-mcu.dtsi
index b973b550eb9d..e65db6ce02bf 100644
--- a/arch/arm64/boot/dts/ti/k3-am62p-mcu.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am62p-j722s-common-mcu.dtsi
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only OR MIT
/*
- * Device Tree file for the AM62P MCU domain peripherals
+ * Device Tree file for the MCU domain peripherals shared by AM62P and J722S
+ *
* Copyright (C) 2023-2024 Texas Instruments Incorporated - https://www.ti.com/
*/
@@ -11,7 +12,15 @@
#pinctrl-cells = <1>;
pinctrl-single,register-width = <32>;
pinctrl-single,function-mask = <0xffffffff>;
+ pinctrl-single,gpio-range =
+ <&mcu_pmx_range 0 21 PIN_GPIO_RANGE_IOPAD>,
+ <&mcu_pmx_range 23 1 PIN_GPIO_RANGE_IOPAD>,
+ <&mcu_pmx_range 32 2 PIN_GPIO_RANGE_IOPAD>;
bootph-all;
+
+ mcu_pmx_range: gpio-range {
+ #pinctrl-single,gpio-range-cells = <3>;
+ };
};
mcu_esm: esm@4100000 {
diff --git a/arch/arm64/boot/dts/ti/k3-am62p-thermal.dtsi b/arch/arm64/boot/dts/ti/k3-am62p-j722s-common-thermal.dtsi
index c7486fb2a5b4..c7486fb2a5b4 100644
--- a/arch/arm64/boot/dts/ti/k3-am62p-thermal.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am62p-j722s-common-thermal.dtsi
diff --git a/arch/arm64/boot/dts/ti/k3-am62p-wakeup.dtsi b/arch/arm64/boot/dts/ti/k3-am62p-j722s-common-wakeup.dtsi
index c71d9624ea27..315d0092e736 100644
--- a/arch/arm64/boot/dts/ti/k3-am62p-wakeup.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am62p-j722s-common-wakeup.dtsi
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only OR MIT
/*
- * Device Tree file for the AM62P wakeup domain peripherals
+ * Device Tree file for the WAKEUP domain peripherals shared by AM62P and J722S
+ *
* Copyright (C) 2023-2024 Texas Instruments Incorporated - https://www.ti.com/
*/
@@ -19,6 +20,11 @@
bootph-all;
};
+ cpsw_mac_syscon: ethernet-mac-syscon@200 {
+ compatible = "ti,am62p-cpsw-mac-efuse", "syscon";
+ reg = <0x200 0x8>;
+ };
+
usb0_phy_ctrl: syscon@4008 {
compatible = "ti,am62-usb-phy-ctrl", "syscon";
reg = <0x4008 0x4>;
diff --git a/arch/arm64/boot/dts/ti/k3-am62p-main.dtsi b/arch/arm64/boot/dts/ti/k3-am62p-main.dtsi
index 900d1f9530a2..57383bd2eaeb 100644
--- a/arch/arm64/boot/dts/ti/k3-am62p-main.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am62p-main.dtsi
@@ -1,666 +1,11 @@
// SPDX-License-Identifier: GPL-2.0-only OR MIT
/*
- * Device Tree file for the AM62P main domain peripherals
+ * Device Tree file for the AM62P MAIN domain peripherals
+ *
* Copyright (C) 2023-2024 Texas Instruments Incorporated - https://www.ti.com/
*/
&cbass_main {
- oc_sram: sram@70000000 {
- compatible = "mmio-sram";
- reg = <0x00 0x70000000 0x00 0x10000>;
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0x00 0x00 0x70000000 0x10000>;
- };
-
- gic500: interrupt-controller@1800000 {
- compatible = "arm,gic-v3";
- #address-cells = <2>;
- #size-cells = <2>;
- ranges;
- #interrupt-cells = <3>;
- interrupt-controller;
- reg = <0x00 0x01800000 0x00 0x10000>, /* GICD */
- <0x00 0x01880000 0x00 0xc0000>, /* GICR */
- <0x01 0x00000000 0x00 0x2000>, /* GICC */
- <0x01 0x00010000 0x00 0x1000>, /* GICH */
- <0x01 0x00020000 0x00 0x2000>; /* GICV */
- /*
- * vcpumntirq:
- * virtual CPU interface maintenance interrupt
- */
- interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
-
- gic_its: msi-controller@1820000 {
- compatible = "arm,gic-v3-its";
- reg = <0x00 0x01820000 0x00 0x10000>;
- socionext,synquacer-pre-its = <0x1000000 0x400000>;
- msi-controller;
- #msi-cells = <1>;
- };
- };
-
- main_conf: bus@100000 {
- compatible = "simple-bus";
- reg = <0x00 0x00100000 0x00 0x20000>;
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0x00 0x00 0x00100000 0x20000>;
-
- phy_gmii_sel: phy@4044 {
- compatible = "ti,am654-phy-gmii-sel";
- reg = <0x4044 0x8>;
- #phy-cells = <1>;
- };
-
- epwm_tbclk: clock-controller@4130 {
- compatible = "ti,am62-epwm-tbclk";
- reg = <0x4130 0x4>;
- #clock-cells = <1>;
- };
- };
-
- dmss: bus@48000000 {
- compatible = "simple-bus";
- #address-cells = <2>;
- #size-cells = <2>;
- dma-ranges;
- ranges = <0x00 0x48000000 0x00 0x48000000 0x00 0x06400000>;
- bootph-all;
-
- ti,sci-dev-id = <25>;
-
- secure_proxy_main: mailbox@4d000000 {
- compatible = "ti,am654-secure-proxy";
- #mbox-cells = <1>;
- reg-names = "target_data", "rt", "scfg";
- reg = <0x00 0x4d000000 0x00 0x80000>,
- <0x00 0x4a600000 0x00 0x80000>,
- <0x00 0x4a400000 0x00 0x80000>;
- interrupt-names = "rx_012";
- interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
- bootph-all;
- };
-
- inta_main_dmss: interrupt-controller@48000000 {
- compatible = "ti,sci-inta";
- reg = <0x00 0x48000000 0x00 0x100000>;
- #interrupt-cells = <0>;
- interrupt-controller;
- interrupt-parent = <&gic500>;
- msi-controller;
- ti,sci = <&dmsc>;
- ti,sci-dev-id = <28>;
- ti,interrupt-ranges = <5 69 35>;
- ti,unmapped-event-sources = <&main_bcdma>, <&main_pktdma>;
- };
-
- main_bcdma: dma-controller@485c0100 {
- compatible = "ti,am64-dmss-bcdma";
- reg = <0x00 0x485c0100 0x00 0x100>,
- <0x00 0x4c000000 0x00 0x20000>,
- <0x00 0x4a820000 0x00 0x20000>,
- <0x00 0x4aa40000 0x00 0x20000>,
- <0x00 0x4bc00000 0x00 0x100000>,
- <0x00 0x48600000 0x00 0x8000>,
- <0x00 0x484a4000 0x00 0x2000>,
- <0x00 0x484c2000 0x00 0x2000>,
- <0x00 0x48420000 0x00 0x2000>;
- reg-names = "gcfg", "bchanrt", "rchanrt", "tchanrt", "ringrt",
- "ring", "tchan", "rchan", "bchan";
- msi-parent = <&inta_main_dmss>;
- #dma-cells = <3>;
-
- ti,sci = <&dmsc>;
- ti,sci-dev-id = <26>;
- ti,sci-rm-range-bchan = <0x20>; /* BLOCK_COPY_CHAN */
- ti,sci-rm-range-rchan = <0x21>; /* SPLIT_TR_RX_CHAN */
- ti,sci-rm-range-tchan = <0x22>; /* SPLIT_TR_TX_CHAN */
- bootph-all;
- };
-
- main_pktdma: dma-controller@485c0000 {
- compatible = "ti,am64-dmss-pktdma";
- reg = <0x00 0x485c0000 0x00 0x100>,
- <0x00 0x4a800000 0x00 0x20000>,
- <0x00 0x4aa00000 0x00 0x40000>,
- <0x00 0x4b800000 0x00 0x400000>,
- <0x00 0x485e0000 0x00 0x10000>,
- <0x00 0x484a0000 0x00 0x2000>,
- <0x00 0x484c0000 0x00 0x2000>,
- <0x00 0x48430000 0x00 0x1000>;
- reg-names = "gcfg", "rchanrt", "tchanrt", "ringrt",
- "ring", "tchan", "rchan", "rflow";
- msi-parent = <&inta_main_dmss>;
- #dma-cells = <2>;
- bootph-all;
-
- ti,sci = <&dmsc>;
- ti,sci-dev-id = <30>;
- ti,sci-rm-range-tchan = <0x23>, /* UNMAPPED_TX_CHAN */
- <0x24>, /* CPSW_TX_CHAN */
- <0x25>, /* SAUL_TX_0_CHAN */
- <0x26>; /* SAUL_TX_1_CHAN */
- ti,sci-rm-range-tflow = <0x10>, /* RING_UNMAPPED_TX_CHAN */
- <0x11>, /* RING_CPSW_TX_CHAN */
- <0x12>, /* RING_SAUL_TX_0_CHAN */
- <0x13>; /* RING_SAUL_TX_1_CHAN */
- ti,sci-rm-range-rchan = <0x29>, /* UNMAPPED_RX_CHAN */
- <0x2b>, /* CPSW_RX_CHAN */
- <0x2d>, /* SAUL_RX_0_CHAN */
- <0x2f>, /* SAUL_RX_1_CHAN */
- <0x31>, /* SAUL_RX_2_CHAN */
- <0x33>; /* SAUL_RX_3_CHAN */
- ti,sci-rm-range-rflow = <0x2a>, /* FLOW_UNMAPPED_RX_CHAN */
- <0x2c>, /* FLOW_CPSW_RX_CHAN */
- <0x2e>, /* FLOW_SAUL_RX_0/1_CHAN */
- <0x32>; /* FLOW_SAUL_RX_2/3_CHAN */
- };
- };
-
- dmss_csi: bus@4e000000 {
- compatible = "simple-bus";
- ranges = <0x00 0x4e000000 0x00 0x4e000000 0x00 0x408000>;
- #address-cells = <2>;
- #size-cells = <2>;
- dma-ranges;
- ti,sci-dev-id = <198>;
-
- inta_main_dmss_csi: interrupt-controller@4e400000 {
- compatible = "ti,sci-inta";
- reg = <0x00 0x4e400000 0x00 0x8000>;
- #interrupt-cells = <0>;
- interrupt-controller;
- interrupt-parent = <&gic500>;
- msi-controller;
- power-domains = <&k3_pds 182 TI_SCI_PD_EXCLUSIVE>;
- ti,sci = <&dmsc>;
- ti,sci-dev-id = <200>;
- ti,interrupt-ranges = <0 237 8>;
- ti,unmapped-event-sources = <&main_bcdma_csi>;
- };
-
- main_bcdma_csi: dma-controller@4e230000 {
- compatible = "ti,am62a-dmss-bcdma-csirx";
- reg = <0x00 0x4e230000 0x00 0x100>,
- <0x00 0x4e180000 0x00 0x8000>,
- <0x00 0x4e100000 0x00 0x10000>;
- reg-names = "gcfg", "rchanrt", "ringrt";
- #dma-cells = <3>;
- msi-parent = <&inta_main_dmss_csi>;
- power-domains = <&k3_pds 182 TI_SCI_PD_EXCLUSIVE>;
- ti,sci = <&dmsc>;
- ti,sci-dev-id = <199>;
- ti,sci-rm-range-rchan = <0x21>;
- };
- };
-
- dmsc: system-controller@44043000 {
- compatible = "ti,k2g-sci";
- ti,host-id = <12>;
- mbox-names = "rx", "tx";
- mboxes = <&secure_proxy_main 12>,
- <&secure_proxy_main 13>;
- reg-names = "debug_messages";
- reg = <0x00 0x44043000 0x00 0xfe0>;
- bootph-all;
-
- k3_pds: power-controller {
- compatible = "ti,sci-pm-domain";
- #power-domain-cells = <2>;
- bootph-all;
- };
-
- k3_clks: clock-controller {
- compatible = "ti,k2g-sci-clk";
- #clock-cells = <2>;
- bootph-all;
- };
-
- k3_reset: reset-controller {
- compatible = "ti,sci-reset";
- #reset-cells = <2>;
- bootph-all;
- };
- };
-
- crypto: crypto@40900000 {
- compatible = "ti,am62-sa3ul";
- reg = <0x00 0x40900000 0x00 0x1200>;
- #address-cells = <2>;
- #size-cells = <2>;
- ranges = <0x00 0x40900000 0x00 0x40900000 0x00 0x30000>;
-
- dmas = <&main_pktdma 0xf501 0>, <&main_pktdma 0x7506 0>,
- <&main_pktdma 0x7507 0>;
- dma-names = "tx", "rx1", "rx2";
- };
-
- secure_proxy_sa3: mailbox@43600000 {
- compatible = "ti,am654-secure-proxy";
- #mbox-cells = <1>;
- reg-names = "target_data", "rt", "scfg";
- reg = <0x00 0x43600000 0x00 0x10000>,
- <0x00 0x44880000 0x00 0x20000>,
- <0x00 0x44860000 0x00 0x20000>;
- /*
- * Marked Disabled:
- * Node is incomplete as it is meant for bootloaders and
- * firmware on non-MPU processors
- */
- status = "disabled";
- bootph-all;
- };
-
- main_pmx0: pinctrl@f4000 {
- compatible = "pinctrl-single";
- reg = <0x00 0xf4000 0x00 0x2ac>;
- #pinctrl-cells = <1>;
- pinctrl-single,register-width = <32>;
- pinctrl-single,function-mask = <0xffffffff>;
- bootph-all;
- };
-
- main_esm: esm@420000 {
- compatible = "ti,j721e-esm";
- reg = <0x00 0x420000 0x00 0x1000>;
- ti,esm-pins = <160>, <161>, <162>, <163>, <177>, <178>;
- bootph-pre-ram;
- };
-
- main_timer0: timer@2400000 {
- compatible = "ti,am654-timer";
- reg = <0x00 0x2400000 0x00 0x400>;
- interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&k3_clks 36 2>;
- clock-names = "fck";
- assigned-clocks = <&k3_clks 36 2>;
- assigned-clock-parents = <&k3_clks 36 3>;
- power-domains = <&k3_pds 36 TI_SCI_PD_EXCLUSIVE>;
- ti,timer-pwm;
- bootph-all;
- };
-
- main_timer1: timer@2410000 {
- compatible = "ti,am654-timer";
- reg = <0x00 0x2410000 0x00 0x400>;
- interrupts = <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&k3_clks 37 2>;
- clock-names = "fck";
- assigned-clocks = <&k3_clks 37 2>;
- assigned-clock-parents = <&k3_clks 37 3>;
- power-domains = <&k3_pds 37 TI_SCI_PD_EXCLUSIVE>;
- ti,timer-pwm;
- };
-
- main_timer2: timer@2420000 {
- compatible = "ti,am654-timer";
- reg = <0x00 0x2420000 0x00 0x400>;
- interrupts = <GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&k3_clks 38 2>;
- clock-names = "fck";
- assigned-clocks = <&k3_clks 38 2>;
- assigned-clock-parents = <&k3_clks 38 3>;
- power-domains = <&k3_pds 38 TI_SCI_PD_EXCLUSIVE>;
- ti,timer-pwm;
- };
-
- main_timer3: timer@2430000 {
- compatible = "ti,am654-timer";
- reg = <0x00 0x2430000 0x00 0x400>;
- interrupts = <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&k3_clks 39 2>;
- clock-names = "fck";
- assigned-clocks = <&k3_clks 39 2>;
- assigned-clock-parents = <&k3_clks 39 3>;
- power-domains = <&k3_pds 39 TI_SCI_PD_EXCLUSIVE>;
- ti,timer-pwm;
- };
-
- main_timer4: timer@2440000 {
- compatible = "ti,am654-timer";
- reg = <0x00 0x2440000 0x00 0x400>;
- interrupts = <GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&k3_clks 40 2>;
- clock-names = "fck";
- assigned-clocks = <&k3_clks 40 2>;
- assigned-clock-parents = <&k3_clks 40 3>;
- power-domains = <&k3_pds 40 TI_SCI_PD_EXCLUSIVE>;
- ti,timer-pwm;
- };
-
- main_timer5: timer@2450000 {
- compatible = "ti,am654-timer";
- reg = <0x00 0x2450000 0x00 0x400>;
- interrupts = <GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&k3_clks 41 2>;
- clock-names = "fck";
- assigned-clocks = <&k3_clks 41 2>;
- assigned-clock-parents = <&k3_clks 41 3>;
- power-domains = <&k3_pds 41 TI_SCI_PD_EXCLUSIVE>;
- ti,timer-pwm;
- };
-
- main_timer6: timer@2460000 {
- compatible = "ti,am654-timer";
- reg = <0x00 0x2460000 0x00 0x400>;
- interrupts = <GIC_SPI 126 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&k3_clks 42 2>;
- clock-names = "fck";
- assigned-clocks = <&k3_clks 42 2>;
- assigned-clock-parents = <&k3_clks 42 3>;
- power-domains = <&k3_pds 42 TI_SCI_PD_EXCLUSIVE>;
- ti,timer-pwm;
- };
-
- main_timer7: timer@2470000 {
- compatible = "ti,am654-timer";
- reg = <0x00 0x2470000 0x00 0x400>;
- interrupts = <GIC_SPI 127 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&k3_clks 43 2>;
- clock-names = "fck";
- assigned-clocks = <&k3_clks 43 2>;
- assigned-clock-parents = <&k3_clks 43 3>;
- power-domains = <&k3_pds 43 TI_SCI_PD_EXCLUSIVE>;
- ti,timer-pwm;
- };
-
- main_uart0: serial@2800000 {
- compatible = "ti,am64-uart", "ti,am654-uart";
- reg = <0x00 0x02800000 0x00 0x100>;
- interrupts = <GIC_SPI 178 IRQ_TYPE_LEVEL_HIGH>;
- power-domains = <&k3_pds 146 TI_SCI_PD_EXCLUSIVE>;
- clocks = <&k3_clks 146 0>;
- clock-names = "fclk";
- status = "disabled";
- };
-
- main_uart1: serial@2810000 {
- compatible = "ti,am64-uart", "ti,am654-uart";
- reg = <0x00 0x02810000 0x00 0x100>;
- interrupts = <GIC_SPI 179 IRQ_TYPE_LEVEL_HIGH>;
- power-domains = <&k3_pds 152 TI_SCI_PD_EXCLUSIVE>;
- clocks = <&k3_clks 152 0>;
- clock-names = "fclk";
- status = "disabled";
- };
-
- main_uart2: serial@2820000 {
- compatible = "ti,am64-uart", "ti,am654-uart";
- reg = <0x00 0x02820000 0x00 0x100>;
- interrupts = <GIC_SPI 180 IRQ_TYPE_LEVEL_HIGH>;
- power-domains = <&k3_pds 153 TI_SCI_PD_EXCLUSIVE>;
- clocks = <&k3_clks 153 0>;
- clock-names = "fclk";
- status = "disabled";
- };
-
- main_uart3: serial@2830000 {
- compatible = "ti,am64-uart", "ti,am654-uart";
- reg = <0x00 0x02830000 0x00 0x100>;
- interrupts = <GIC_SPI 181 IRQ_TYPE_LEVEL_HIGH>;
- power-domains = <&k3_pds 154 TI_SCI_PD_EXCLUSIVE>;
- clocks = <&k3_clks 154 0>;
- clock-names = "fclk";
- status = "disabled";
- };
-
- main_uart4: serial@2840000 {
- compatible = "ti,am64-uart", "ti,am654-uart";
- reg = <0x00 0x02840000 0x00 0x100>;
- interrupts = <GIC_SPI 182 IRQ_TYPE_LEVEL_HIGH>;
- power-domains = <&k3_pds 155 TI_SCI_PD_EXCLUSIVE>;
- clocks = <&k3_clks 155 0>;
- clock-names = "fclk";
- status = "disabled";
- };
-
- main_uart5: serial@2850000 {
- compatible = "ti,am64-uart", "ti,am654-uart";
- reg = <0x00 0x02850000 0x00 0x100>;
- interrupts = <GIC_SPI 183 IRQ_TYPE_LEVEL_HIGH>;
- power-domains = <&k3_pds 156 TI_SCI_PD_EXCLUSIVE>;
- clocks = <&k3_clks 156 0>;
- clock-names = "fclk";
- status = "disabled";
- };
-
- main_uart6: serial@2860000 {
- compatible = "ti,am64-uart", "ti,am654-uart";
- reg = <0x00 0x02860000 0x00 0x100>;
- interrupts = <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>;
- power-domains = <&k3_pds 158 TI_SCI_PD_EXCLUSIVE>;
- clocks = <&k3_clks 158 0>;
- clock-names = "fclk";
- status = "disabled";
- };
-
- main_i2c0: i2c@20000000 {
- compatible = "ti,am64-i2c", "ti,omap4-i2c";
- reg = <0x00 0x20000000 0x00 0x100>;
- interrupts = <GIC_SPI 161 IRQ_TYPE_LEVEL_HIGH>;
- #address-cells = <1>;
- #size-cells = <0>;
- power-domains = <&k3_pds 102 TI_SCI_PD_EXCLUSIVE>;
- clocks = <&k3_clks 102 2>;
- clock-names = "fck";
- status = "disabled";
- };
-
- main_i2c1: i2c@20010000 {
- compatible = "ti,am64-i2c", "ti,omap4-i2c";
- reg = <0x00 0x20010000 0x00 0x100>;
- interrupts = <GIC_SPI 162 IRQ_TYPE_LEVEL_HIGH>;
- #address-cells = <1>;
- #size-cells = <0>;
- power-domains = <&k3_pds 103 TI_SCI_PD_EXCLUSIVE>;
- clocks = <&k3_clks 103 2>;
- clock-names = "fck";
- status = "disabled";
- };
-
- main_i2c2: i2c@20020000 {
- compatible = "ti,am64-i2c", "ti,omap4-i2c";
- reg = <0x00 0x20020000 0x00 0x100>;
- interrupts = <GIC_SPI 163 IRQ_TYPE_LEVEL_HIGH>;
- #address-cells = <1>;
- #size-cells = <0>;
- power-domains = <&k3_pds 104 TI_SCI_PD_EXCLUSIVE>;
- clocks = <&k3_clks 104 2>;
- clock-names = "fck";
- status = "disabled";
- };
-
- main_i2c3: i2c@20030000 {
- compatible = "ti,am64-i2c", "ti,omap4-i2c";
- reg = <0x00 0x20030000 0x00 0x100>;
- interrupts = <GIC_SPI 164 IRQ_TYPE_LEVEL_HIGH>;
- #address-cells = <1>;
- #size-cells = <0>;
- power-domains = <&k3_pds 105 TI_SCI_PD_EXCLUSIVE>;
- clocks = <&k3_clks 105 2>;
- clock-names = "fck";
- status = "disabled";
- };
-
- main_spi0: spi@20100000 {
- compatible = "ti,am654-mcspi", "ti,omap4-mcspi";
- reg = <0x00 0x20100000 0x00 0x400>;
- interrupts = <GIC_SPI 172 IRQ_TYPE_LEVEL_HIGH>;
- #address-cells = <1>;
- #size-cells = <0>;
- power-domains = <&k3_pds 141 TI_SCI_PD_EXCLUSIVE>;
- clocks = <&k3_clks 141 0>;
- status = "disabled";
- };
-
- main_spi1: spi@20110000 {
- compatible = "ti,am654-mcspi","ti,omap4-mcspi";
- reg = <0x00 0x20110000 0x00 0x400>;
- interrupts = <GIC_SPI 173 IRQ_TYPE_LEVEL_HIGH>;
- #address-cells = <1>;
- #size-cells = <0>;
- power-domains = <&k3_pds 142 TI_SCI_PD_EXCLUSIVE>;
- clocks = <&k3_clks 142 0>;
- status = "disabled";
- };
-
- main_spi2: spi@20120000 {
- compatible = "ti,am654-mcspi","ti,omap4-mcspi";
- reg = <0x00 0x20120000 0x00 0x400>;
- interrupts = <GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH>;
- #address-cells = <1>;
- #size-cells = <0>;
- power-domains = <&k3_pds 143 TI_SCI_PD_EXCLUSIVE>;
- clocks = <&k3_clks 143 0>;
- status = "disabled";
- };
-
- main_gpio_intr: interrupt-controller@a00000 {
- compatible = "ti,sci-intr";
- reg = <0x00 0x00a00000 0x00 0x800>;
- ti,intr-trigger-type = <1>;
- interrupt-controller;
- interrupt-parent = <&gic500>;
- #interrupt-cells = <1>;
- ti,sci = <&dmsc>;
- ti,sci-dev-id = <3>;
- ti,interrupt-ranges = <0 32 16>;
- };
-
- main_gpio0: gpio@600000 {
- compatible = "ti,am64-gpio", "ti,keystone-gpio";
- reg = <0x00 0x00600000 0x00 0x100>;
- gpio-controller;
- #gpio-cells = <2>;
- interrupt-parent = <&main_gpio_intr>;
- interrupts = <190>, <191>, <192>,
- <193>, <194>, <195>;
- interrupt-controller;
- #interrupt-cells = <2>;
- ti,ngpio = <92>;
- ti,davinci-gpio-unbanked = <0>;
- power-domains = <&k3_pds 77 TI_SCI_PD_EXCLUSIVE>;
- clocks = <&k3_clks 77 0>;
- clock-names = "gpio";
- };
-
- main_gpio1: gpio@601000 {
- compatible = "ti,am64-gpio", "ti,keystone-gpio";
- reg = <0x00 0x00601000 0x00 0x100>;
- gpio-controller;
- #gpio-cells = <2>;
- interrupt-parent = <&main_gpio_intr>;
- interrupts = <180>, <181>, <182>,
- <183>, <184>, <185>;
- interrupt-controller;
- #interrupt-cells = <2>;
- ti,ngpio = <52>;
- ti,davinci-gpio-unbanked = <0>;
- power-domains = <&k3_pds 78 TI_SCI_PD_EXCLUSIVE>;
- clocks = <&k3_clks 78 0>;
- clock-names = "gpio";
- };
-
- sdhci0: mmc@fa10000 {
- compatible = "ti,am64-sdhci-8bit";
- reg = <0x00 0x0fa10000 0x00 0x1000>, <0x00 0x0fa18000 0x00 0x400>;
- interrupts = <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>;
- power-domains = <&k3_pds 57 TI_SCI_PD_EXCLUSIVE>;
- clocks = <&k3_clks 57 1>, <&k3_clks 57 2>;
- clock-names = "clk_ahb", "clk_xin";
- assigned-clocks = <&k3_clks 57 2>;
- assigned-clock-parents = <&k3_clks 57 4>;
- bus-width = <8>;
- mmc-ddr-1_8v;
- mmc-hs200-1_8v;
- mmc-hs400-1_8v;
- ti,clkbuf-sel = <0x7>;
- ti,strobe-sel = <0x77>;
- ti,trm-icp = <0x8>;
- ti,otap-del-sel-legacy = <0x1>;
- ti,otap-del-sel-mmc-hs = <0x1>;
- ti,otap-del-sel-ddr52 = <0x6>;
- ti,otap-del-sel-hs200 = <0x8>;
- ti,otap-del-sel-hs400 = <0x5>;
- ti,itap-del-sel-legacy = <0x10>;
- ti,itap-del-sel-mmc-hs = <0xa>;
- ti,itap-del-sel-ddr52 = <0x3>;
- status = "disabled";
- };
-
- sdhci1: mmc@fa00000 {
- compatible = "ti,am62-sdhci";
- reg = <0x00 0x0fa00000 0x00 0x1000>, <0x00 0x0fa08000 0x00 0x400>;
- interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
- power-domains = <&k3_pds 58 TI_SCI_PD_EXCLUSIVE>;
- clocks = <&k3_clks 58 5>, <&k3_clks 58 6>;
- clock-names = "clk_ahb", "clk_xin";
- bus-width = <4>;
- ti,clkbuf-sel = <0x7>;
- ti,otap-del-sel-legacy = <0x0>;
- ti,otap-del-sel-sd-hs = <0x0>;
- ti,otap-del-sel-sdr12 = <0xf>;
- ti,otap-del-sel-sdr25 = <0xf>;
- ti,otap-del-sel-sdr50 = <0xc>;
- ti,otap-del-sel-ddr50 = <0x9>;
- ti,otap-del-sel-sdr104 = <0x6>;
- ti,itap-del-sel-legacy = <0x0>;
- ti,itap-del-sel-sd-hs = <0x0>;
- ti,itap-del-sel-sdr12 = <0x0>;
- ti,itap-del-sel-sdr25 = <0x0>;
- status = "disabled";
- };
-
- sdhci2: mmc@fa20000 {
- compatible = "ti,am62-sdhci";
- reg = <0x00 0x0fa20000 0x00 0x1000>, <0x00 0x0fa28000 0x00 0x400>;
- interrupts = <GIC_SPI 82 IRQ_TYPE_LEVEL_HIGH>;
- power-domains = <&k3_pds 184 TI_SCI_PD_EXCLUSIVE>;
- clocks = <&k3_clks 184 5>, <&k3_clks 184 6>;
- clock-names = "clk_ahb", "clk_xin";
- bus-width = <4>;
- ti,clkbuf-sel = <0x7>;
- ti,otap-del-sel-legacy = <0x0>;
- ti,otap-del-sel-sd-hs = <0x0>;
- ti,otap-del-sel-sdr12 = <0xf>;
- ti,otap-del-sel-sdr25 = <0xf>;
- ti,otap-del-sel-sdr50 = <0xc>;
- ti,otap-del-sel-ddr50 = <0x9>;
- ti,otap-del-sel-sdr104 = <0x6>;
- ti,itap-del-sel-legacy = <0x0>;
- ti,itap-del-sel-sd-hs = <0x0>;
- ti,itap-del-sel-sdr12 = <0x0>;
- ti,itap-del-sel-sdr25 = <0x0>;
- status = "disabled";
- };
-
- usbss0: usb@f900000 {
- compatible = "ti,am62-usb";
- reg = <0x00 0x0f900000 0x00 0x800>,
- <0x00 0x0f908000 0x00 0x400>;
- clocks = <&k3_clks 161 3>;
- clock-names = "ref";
- ti,syscon-phy-pll-refclk = <&usb0_phy_ctrl 0x0>;
- #address-cells = <2>;
- #size-cells = <2>;
- power-domains = <&k3_pds 178 TI_SCI_PD_EXCLUSIVE>;
- ranges;
- status = "disabled";
-
- usb0: usb@31000000 {
- compatible = "snps,dwc3";
- reg = <0x00 0x31000000 0x00 0x50000>;
- interrupts = <GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>, /* irq.0 */
- <GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>; /* irq.0 */
- interrupt-names = "host", "peripheral";
- maximum-speed = "high-speed";
- dr_mode = "otg";
- snps,usb2-gadget-lpm-disable;
- snps,usb2-lpm-disable;
- };
- };
-
usbss1: usb@f910000 {
compatible = "ti,am62-usb";
reg = <0x00 0x0f910000 0x00 0x800>,
@@ -686,408 +31,38 @@
snps,usb2-lpm-disable;
};
};
+};
- fss: bus@fc00000 {
- compatible = "simple-bus";
- reg = <0x00 0x0fc00000 0x00 0x70000>;
- #address-cells = <2>;
- #size-cells = <2>;
- ranges;
-
- ospi0: spi@fc40000 {
- compatible = "ti,am654-ospi", "cdns,qspi-nor";
- reg = <0x00 0x0fc40000 0x00 0x100>,
- <0x05 0x00000000 0x01 0x00000000>;
- interrupts = <GIC_SPI 139 IRQ_TYPE_LEVEL_HIGH>;
- cdns,fifo-depth = <256>;
- cdns,fifo-width = <4>;
- cdns,trigger-address = <0x0>;
- clocks = <&k3_clks 75 7>;
- assigned-clocks = <&k3_clks 75 7>;
- assigned-clock-parents = <&k3_clks 75 8>;
- assigned-clock-rates = <166666666>;
- power-domains = <&k3_pds 75 TI_SCI_PD_EXCLUSIVE>;
- #address-cells = <1>;
- #size-cells = <0>;
- status = "disabled";
- };
- };
-
- cpsw3g: ethernet@8000000 {
- compatible = "ti,am642-cpsw-nuss";
- #address-cells = <2>;
- #size-cells = <2>;
- reg = <0x00 0x08000000 0x00 0x200000>;
- reg-names = "cpsw_nuss";
- ranges = <0x00 0x00 0x00 0x08000000 0x00 0x200000>;
- clocks = <&k3_clks 13 0>;
- assigned-clocks = <&k3_clks 13 3>;
- assigned-clock-parents = <&k3_clks 13 11>;
- clock-names = "fck";
- power-domains = <&k3_pds 13 TI_SCI_PD_EXCLUSIVE>;
- status = "disabled";
-
- dmas = <&main_pktdma 0xc600 15>,
- <&main_pktdma 0xc601 15>,
- <&main_pktdma 0xc602 15>,
- <&main_pktdma 0xc603 15>,
- <&main_pktdma 0xc604 15>,
- <&main_pktdma 0xc605 15>,
- <&main_pktdma 0xc606 15>,
- <&main_pktdma 0xc607 15>,
- <&main_pktdma 0x4600 15>;
- dma-names = "tx0", "tx1", "tx2", "tx3", "tx4", "tx5", "tx6",
- "tx7", "rx";
-
- ethernet-ports {
- #address-cells = <1>;
- #size-cells = <0>;
-
- cpsw_port1: port@1 {
- reg = <1>;
- ti,mac-only;
- label = "port1";
- phys = <&phy_gmii_sel 1>;
- mac-address = [00 00 00 00 00 00];
- status = "disabled";
- };
-
- cpsw_port2: port@2 {
- reg = <2>;
- ti,mac-only;
- label = "port2";
- phys = <&phy_gmii_sel 2>;
- mac-address = [00 00 00 00 00 00];
- status = "disabled";
- };
- };
-
- cpsw3g_mdio: mdio@f00 {
- compatible = "ti,cpsw-mdio","ti,davinci_mdio";
- reg = <0x00 0xf00 0x00 0x100>;
- #address-cells = <1>;
- #size-cells = <0>;
- clocks = <&k3_clks 13 0>;
- clock-names = "fck";
- bus_freq = <1000000>;
- status = "disabled";
- };
-
- cpts@3d000 {
- compatible = "ti,j721e-cpts";
- reg = <0x00 0x3d000 0x00 0x400>;
- clocks = <&k3_clks 13 3>;
- clock-names = "cpts";
- interrupts-extended = <&gic500 GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "cpts";
- ti,cpts-ext-ts-inputs = <4>;
- ti,cpts-periodic-outputs = <2>;
- };
- };
-
- hwspinlock: spinlock@2a000000 {
- compatible = "ti,am64-hwspinlock";
- reg = <0x00 0x2a000000 0x00 0x1000>;
- #hwlock-cells = <1>;
- };
-
- mailbox0_cluster0: mailbox@29000000 {
- compatible = "ti,am64-mailbox";
- reg = <0x00 0x29000000 0x00 0x200>;
- interrupts = <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>;
- #mbox-cells = <1>;
- ti,mbox-num-users = <4>;
- ti,mbox-num-fifos = <16>;
- };
-
- mailbox0_cluster1: mailbox@29010000 {
- compatible = "ti,am64-mailbox";
- reg = <0x00 0x29010000 0x00 0x200>;
- interrupts = <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>;
- #mbox-cells = <1>;
- ti,mbox-num-users = <4>;
- ti,mbox-num-fifos = <16>;
- };
-
- mailbox0_cluster2: mailbox@29020000 {
- compatible = "ti,am64-mailbox";
- reg = <0x00 0x29020000 0x00 0x200>;
- interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
- #mbox-cells = <1>;
- ti,mbox-num-users = <4>;
- ti,mbox-num-fifos = <16>;
- };
-
- mailbox0_cluster3: mailbox@29030000 {
- compatible = "ti,am64-mailbox";
- reg = <0x00 0x29030000 0x00 0x200>;
- interrupts = <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>;
- #mbox-cells = <1>;
- ti,mbox-num-users = <4>;
- ti,mbox-num-fifos = <16>;
- };
-
- ecap0: pwm@23100000 {
- compatible = "ti,am3352-ecap";
- #pwm-cells = <3>;
- reg = <0x00 0x23100000 0x00 0x100>;
- power-domains = <&k3_pds 51 TI_SCI_PD_EXCLUSIVE>;
- clocks = <&k3_clks 51 0>;
- clock-names = "fck";
- status = "disabled";
- };
-
- ecap1: pwm@23110000 {
- compatible = "ti,am3352-ecap";
- #pwm-cells = <3>;
- reg = <0x00 0x23110000 0x00 0x100>;
- power-domains = <&k3_pds 52 TI_SCI_PD_EXCLUSIVE>;
- clocks = <&k3_clks 52 0>;
- clock-names = "fck";
- status = "disabled";
- };
-
- ecap2: pwm@23120000 {
- compatible = "ti,am3352-ecap";
- #pwm-cells = <3>;
- reg = <0x00 0x23120000 0x00 0x100>;
- power-domains = <&k3_pds 53 TI_SCI_PD_EXCLUSIVE>;
- clocks = <&k3_clks 53 0>;
- clock-names = "fck";
- status = "disabled";
- };
-
- main_mcan0: can@20701000 {
- compatible = "bosch,m_can";
- reg = <0x00 0x20701000 0x00 0x200>,
- <0x00 0x20708000 0x00 0x8000>;
- reg-names = "m_can", "message_ram";
- power-domains = <&k3_pds 98 TI_SCI_PD_EXCLUSIVE>;
- clocks = <&k3_clks 98 6>, <&k3_clks 98 1>;
- clock-names = "hclk", "cclk";
- interrupts = <GIC_SPI 155 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 156 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "int0", "int1";
- bosch,mram-cfg = <0x0 128 64 64 64 64 32 32>;
- status = "disabled";
- };
-
- main_mcan1: can@20711000 {
- compatible = "bosch,m_can";
- reg = <0x00 0x20711000 0x00 0x200>,
- <0x00 0x20718000 0x00 0x8000>;
- reg-names = "m_can", "message_ram";
- power-domains = <&k3_pds 99 TI_SCI_PD_EXCLUSIVE>;
- clocks = <&k3_clks 99 6>, <&k3_clks 99 1>;
- clock-names = "hclk", "cclk";
- interrupts = <GIC_SPI 213 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 214 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "int0", "int1";
- bosch,mram-cfg = <0x0 128 64 64 64 64 32 32>;
- status = "disabled";
- };
-
- main_rti0: watchdog@e000000 {
- compatible = "ti,j7-rti-wdt";
- reg = <0x00 0x0e000000 0x00 0x100>;
- clocks = <&k3_clks 125 0>;
- power-domains = <&k3_pds 125 TI_SCI_PD_EXCLUSIVE>;
- assigned-clocks = <&k3_clks 125 0>;
- assigned-clock-parents = <&k3_clks 125 2>;
- };
-
- main_rti1: watchdog@e010000 {
- compatible = "ti,j7-rti-wdt";
- reg = <0x00 0x0e010000 0x00 0x100>;
- clocks = <&k3_clks 126 0>;
- power-domains = <&k3_pds 126 TI_SCI_PD_EXCLUSIVE>;
- assigned-clocks = <&k3_clks 126 0>;
- assigned-clock-parents = <&k3_clks 126 2>;
- };
-
- main_rti2: watchdog@e020000 {
- compatible = "ti,j7-rti-wdt";
- reg = <0x00 0x0e020000 0x00 0x100>;
- clocks = <&k3_clks 127 0>;
- power-domains = <&k3_pds 127 TI_SCI_PD_EXCLUSIVE>;
- assigned-clocks = <&k3_clks 127 0>;
- assigned-clock-parents = <&k3_clks 127 2>;
- };
-
- main_rti3: watchdog@e030000 {
- compatible = "ti,j7-rti-wdt";
- reg = <0x00 0x0e030000 0x00 0x100>;
- clocks = <&k3_clks 128 0>;
- power-domains = <&k3_pds 128 TI_SCI_PD_EXCLUSIVE>;
- assigned-clocks = <&k3_clks 128 0>;
- assigned-clock-parents = <&k3_clks 128 2>;
- };
-
- main_rti15: watchdog@e0f0000 {
- compatible = "ti,j7-rti-wdt";
- reg = <0x00 0x0e0f0000 0x00 0x100>;
- clocks = <&k3_clks 130 0>;
- power-domains = <&k3_pds 130 TI_SCI_PD_EXCLUSIVE>;
- assigned-clocks = <&k3_clks 130 0>;
- assigned-clock-parents = <&k3_clks 130 2>;
- };
-
- epwm0: pwm@23000000 {
- compatible = "ti,am64-epwm", "ti,am3352-ehrpwm";
- #pwm-cells = <3>;
- reg = <0x00 0x23000000 0x00 0x100>;
- power-domains = <&k3_pds 86 TI_SCI_PD_EXCLUSIVE>;
- clocks = <&epwm_tbclk 0>, <&k3_clks 86 0>;
- clock-names = "tbclk", "fck";
- status = "disabled";
- };
-
- epwm1: pwm@23010000 {
- compatible = "ti,am64-epwm", "ti,am3352-ehrpwm";
- #pwm-cells = <3>;
- reg = <0x00 0x23010000 0x00 0x100>;
- power-domains = <&k3_pds 87 TI_SCI_PD_EXCLUSIVE>;
- clocks = <&epwm_tbclk 1>, <&k3_clks 87 0>;
- clock-names = "tbclk", "fck";
- status = "disabled";
- };
-
- epwm2: pwm@23020000 {
- compatible = "ti,am64-epwm", "ti,am3352-ehrpwm";
- #pwm-cells = <3>;
- reg = <0x00 0x23020000 0x00 0x100>;
- power-domains = <&k3_pds 88 TI_SCI_PD_EXCLUSIVE>;
- clocks = <&epwm_tbclk 2>, <&k3_clks 88 0>;
- clock-names = "tbclk", "fck";
- status = "disabled";
- };
-
- mcasp0: audio-controller@2b00000 {
- compatible = "ti,am33xx-mcasp-audio";
- reg = <0x00 0x02b00000 0x00 0x2000>,
- <0x00 0x02b08000 0x00 0x400>;
- reg-names = "mpu", "dat";
- interrupts = <GIC_SPI 236 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 235 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "tx", "rx";
-
- dmas = <&main_bcdma 0 0xc500 0>, <&main_bcdma 0 0x4500 0>;
- dma-names = "tx", "rx";
-
- clocks = <&k3_clks 190 0>;
- clock-names = "fck";
- assigned-clocks = <&k3_clks 190 0>;
- assigned-clock-parents = <&k3_clks 190 2>;
- power-domains = <&k3_pds 190 TI_SCI_PD_EXCLUSIVE>;
- status = "disabled";
- };
-
- mcasp1: audio-controller@2b10000 {
- compatible = "ti,am33xx-mcasp-audio";
- reg = <0x00 0x02b10000 0x00 0x2000>,
- <0x00 0x02b18000 0x00 0x400>;
- reg-names = "mpu", "dat";
- interrupts = <GIC_SPI 238 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 237 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "tx", "rx";
-
- dmas = <&main_bcdma 0 0xc501 0>, <&main_bcdma 0 0x4501 0>;
- dma-names = "tx", "rx";
-
- clocks = <&k3_clks 191 0>;
- clock-names = "fck";
- assigned-clocks = <&k3_clks 191 0>;
- assigned-clock-parents = <&k3_clks 191 2>;
- power-domains = <&k3_pds 191 TI_SCI_PD_EXCLUSIVE>;
- status = "disabled";
- };
-
- mcasp2: audio-controller@2b20000 {
- compatible = "ti,am33xx-mcasp-audio";
- reg = <0x00 0x02b20000 0x00 0x2000>,
- <0x00 0x02b28000 0x00 0x400>;
- reg-names = "mpu", "dat";
- interrupts = <GIC_SPI 240 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 239 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "tx", "rx";
-
- dmas = <&main_bcdma 0 0xc502 0>, <&main_bcdma 0 0x4502 0>;
- dma-names = "tx", "rx";
-
- clocks = <&k3_clks 192 0>;
- clock-names = "fck";
- assigned-clocks = <&k3_clks 192 0>;
- assigned-clock-parents = <&k3_clks 192 2>;
- power-domains = <&k3_pds 192 TI_SCI_PD_EXCLUSIVE>;
- status = "disabled";
- };
-
- ti_csi2rx0: ticsi2rx@30102000 {
- compatible = "ti,j721e-csi2rx-shim";
- reg = <0x00 0x30102000 0x00 0x1000>;
- ranges;
- #address-cells = <2>;
- #size-cells = <2>;
- dmas = <&main_bcdma_csi 0 0x5000 0>;
- dma-names = "rx0";
- power-domains = <&k3_pds 182 TI_SCI_PD_EXCLUSIVE>;
- status = "disabled";
-
- cdns_csi2rx0: csi-bridge@30101000 {
- compatible = "ti,j721e-csi2rx", "cdns,csi2rx";
- reg = <0x00 0x30101000 0x00 0x1000>;
- clocks = <&k3_clks 182 0>, <&k3_clks 182 3>, <&k3_clks 182 0>,
- <&k3_clks 182 0>, <&k3_clks 182 4>, <&k3_clks 182 4>;
- clock-names = "sys_clk", "p_clk", "pixel_if0_clk",
- "pixel_if1_clk", "pixel_if2_clk", "pixel_if3_clk";
- phys = <&dphy0>;
- phy-names = "dphy";
-
- ports {
- #address-cells = <1>;
- #size-cells = <0>;
-
- csi0_port0: port@0 {
- reg = <0>;
- status = "disabled";
- };
-
- csi0_port1: port@1 {
- reg = <1>;
- status = "disabled";
- };
+&oc_sram {
+ reg = <0x00 0x70000000 0x00 0x10000>;
+ ranges = <0x00 0x00 0x70000000 0x10000>;
+};
- csi0_port2: port@2 {
- reg = <2>;
- status = "disabled";
- };
+&inta_main_dmss {
+ ti,interrupt-ranges = <5 69 35>;
+};
- csi0_port3: port@3 {
- reg = <3>;
- status = "disabled";
- };
+&main_pmx0 {
+ pinctrl-single,gpio-range =
+ <&main_pmx0_range 0 32 PIN_GPIO_RANGE_IOPAD>,
+ <&main_pmx0_range 33 92 PIN_GPIO_RANGE_IOPAD>,
+ <&main_pmx0_range 137 5 PIN_GPIO_RANGE_IOPAD>,
+ <&main_pmx0_range 143 3 PIN_GPIO_RANGE_IOPAD>,
+ <&main_pmx0_range 149 2 PIN_GPIO_RANGE_IOPAD>;
- csi0_port4: port@4 {
- reg = <4>;
- status = "disabled";
- };
- };
- };
+ main_pmx0_range: gpio-range {
+ #pinctrl-single,gpio-range-cells = <3>;
};
+};
- dphy0: phy@30110000 {
- compatible = "cdns,dphy-rx";
- reg = <0x00 0x30110000 0x00 0x1100>;
- #phy-cells = <0>;
- power-domains = <&k3_pds 185 TI_SCI_PD_EXCLUSIVE>;
- status = "disabled";
- };
+&main_gpio0 {
+ gpio-ranges = <&main_pmx0 0 0 32>, <&main_pmx0 32 33 38>,
+ <&main_pmx0 70 72 22>;
+ ti,ngpio = <92>;
+};
- vpu: video-codec@30210000 {
- compatible = "ti,j721s2-wave521c", "cnm,wave521c";
- reg = <0x00 0x30210000 0x00 0x10000>;
- interrupts = <GIC_SPI 225 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&k3_clks 204 2>;
- power-domains = <&k3_pds 204 TI_SCI_PD_EXCLUSIVE>;
- };
+&main_gpio1 {
+ gpio-ranges = <&main_pmx0 0 94 32>, <&main_pmx0 42 137 5>,
+ <&main_pmx0 47 143 3>, <&main_pmx0 50 149 2>;
+ ti,ngpio = <52>;
};
diff --git a/arch/arm64/boot/dts/ti/k3-am62p.dtsi b/arch/arm64/boot/dts/ti/k3-am62p.dtsi
index 94babc412575..75a15c368c11 100644
--- a/arch/arm64/boot/dts/ti/k3-am62p.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am62p.dtsi
@@ -116,10 +116,13 @@
};
};
- #include "k3-am62p-thermal.dtsi"
+ #include "k3-am62p-j722s-common-thermal.dtsi"
};
/* Now include peripherals for each bus segment */
+#include "k3-am62p-j722s-common-main.dtsi"
+#include "k3-am62p-j722s-common-mcu.dtsi"
+#include "k3-am62p-j722s-common-wakeup.dtsi"
+
+/* Include AM62P specific peripherals */
#include "k3-am62p-main.dtsi"
-#include "k3-am62p-mcu.dtsi"
-#include "k3-am62p-wakeup.dtsi"
diff --git a/arch/arm64/boot/dts/ti/k3-am62p5-sk.dts b/arch/arm64/boot/dts/ti/k3-am62p5-sk.dts
index 6e7234659111..ff65955551a3 100644
--- a/arch/arm64/boot/dts/ti/k3-am62p5-sk.dts
+++ b/arch/arm64/boot/dts/ti/k3-am62p5-sk.dts
@@ -207,7 +207,7 @@
pinctrl-single,pins = <
AM62PX_IOPAD(0x0090, PIN_INPUT, 2) /* (U24) GPMC0_BE0n_CLE.MCASP1_ACLKX */
AM62PX_IOPAD(0x0098, PIN_INPUT, 2) /* (AA24) GPMC0_WAIT0.MCASP1_AFSX */
- AM62PX_IOPAD(0x008c, PIN_INPUT, 2) /* (T25) GPMC0_WEn.MCASP1_AXR0 */
+ AM62PX_IOPAD(0x008c, PIN_OUTPUT, 2) /* (T25) GPMC0_WEn.MCASP1_AXR0 */
AM62PX_IOPAD(0x0084, PIN_INPUT, 2) /* (R25) GPMC0_ADVn_ALE.MCASP1_AXR2 */
>;
};
@@ -364,14 +364,9 @@
self-powered;
data-role = "dual";
power-role = "sink";
- ports {
- #address-cells = <1>;
- #size-cells = <0>;
- port@0 {
- reg = <0>;
- usb_con_hs: endpoint {
- remote-endpoint = <&usb0_hs_ep>;
- };
+ port {
+ usb_con_hs: endpoint {
+ remote-endpoint = <&usb0_hs_ep>;
};
};
};
@@ -516,11 +511,8 @@
&usb0 {
usb-role-switch;
- #address-cells = <1>;
- #size-cells = <0>;
- port@0 {
- reg = <0>;
+ port {
usb0_hs_ep: endpoint {
remote-endpoint = <&usb_con_hs>;
};
@@ -549,8 +541,6 @@
0 0 0 0
0 0 0 0
>;
- tx-num-evt = <32>;
- rx-num-evt = <32>;
};
&fss {
diff --git a/arch/arm64/boot/dts/ti/k3-am62x-phyboard-lyra.dtsi b/arch/arm64/boot/dts/ti/k3-am62x-phyboard-lyra.dtsi
new file mode 100644
index 000000000000..e4633af87eb9
--- /dev/null
+++ b/arch/arm64/boot/dts/ti/k3-am62x-phyboard-lyra.dtsi
@@ -0,0 +1,475 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * Copyright (C) 2022-2024 PHYTEC Messtechnik GmbH
+ * Author: Wadim Egorov <w.egorov@phytec.de>
+ *
+ */
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/leds/common.h>
+#include <dt-bindings/net/ti-dp83867.h>
+
+/ {
+ aliases {
+ serial2 = &main_uart0;
+ serial3 = &main_uart1;
+ mmc1 = &sdhci1;
+ usb0 = &usb0;
+ usb1 = &usb1;
+ ethernet1 = &cpsw_port2;
+ };
+
+ can_tc1: can-phy0 {
+ compatible = "ti,tcan1042";
+ #phy-cells = <0>;
+ max-bitrate = <8000000>;
+ standby-gpios = <&gpio_exp 1 GPIO_ACTIVE_HIGH>;
+ };
+
+ hdmi0: connector-hdmi {
+ compatible = "hdmi-connector";
+ label = "hdmi";
+ type = "a";
+
+ port {
+ hdmi_connector_in: endpoint {
+ remote-endpoint = <&sii9022_out>;
+ };
+ };
+ };
+
+ keys {
+ compatible = "gpio-keys";
+ autorepeat;
+ pinctrl-names = "default";
+ pinctrl-0 = <&gpio_keys_pins_default>;
+
+ key-home {
+ label = "home";
+ linux,code = <KEY_HOME>;
+ gpios = <&main_gpio1 23 GPIO_ACTIVE_HIGH>;
+ };
+
+ key-menu {
+ label = "menu";
+ linux,code = <KEY_MENU>;
+ gpios = <&gpio_exp 4 GPIO_ACTIVE_HIGH>;
+ };
+ };
+
+ sound {
+ compatible = "simple-audio-card";
+ simple-audio-card,name = "phyBOARD-Lyra";
+ simple-audio-card,widgets =
+ "Microphone", "Mic Jack",
+ "Headphone", "Headphone Jack",
+ "Speaker", "External Speaker";
+ simple-audio-card,routing =
+ "MIC3R", "Mic Jack",
+ "Mic Jack", "Mic Bias",
+ "Headphone Jack", "HPLOUT",
+ "Headphone Jack", "HPROUT",
+ "External Speaker", "SPOP",
+ "External Speaker", "SPOM";
+ simple-audio-card,format = "dsp_b";
+ simple-audio-card,bitclock-master = <&sound_master>;
+ simple-audio-card,frame-master = <&sound_master>;
+ simple-audio-card,bitclock-inversion;
+
+ simple-audio-card,cpu {
+ sound-dai = <&mcasp2>;
+ };
+
+ sound_master: simple-audio-card,codec {
+ sound-dai = <&audio_codec>;
+ clocks = <&audio_refclk1>;
+ };
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&leds_pins_default>, <&user_leds_pins_default>;
+
+ led-1 {
+ gpios = <&main_gpio0 32 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "mmc0";
+ };
+
+ led-2 {
+ gpios = <&gpio_exp 2 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "mmc1";
+ };
+ };
+
+ vcc_1v8: regulator-vcc-1v8 {
+ compatible = "regulator-fixed";
+ regulator-name = "VCC_1V8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ vcc_3v3_mmc: regulator-vcc-3v3-mmc {
+ compatible = "regulator-fixed";
+ regulator-name = "VCC_3V3_MMC";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ vcc_3v3_sw: regulator-vcc-3v3-sw {
+ compatible = "regulator-fixed";
+ regulator-name = "VCC_3V3_SW";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+};
+
+&main_pmx0 {
+ audio_ext_refclk1_pins_default: audio-ext-refclk1-default-pins {
+ pinctrl-single,pins = <
+ AM62X_IOPAD(0x0a0, PIN_OUTPUT, 1) /* (K25) GPMC0_WPn.AUDIO_EXT_REFCLK1 */
+ >;
+ };
+
+ gpio_keys_pins_default: gpio-keys-default-pins {
+ pinctrl-single,pins = <
+ AM62X_IOPAD(0x1d4, PIN_INPUT, 7) /* (B15) UART0_RTSn.GPIO1_23 */
+ >;
+ };
+
+ gpio_exp_int_pins_default: gpio-exp-int-default-pins {
+ pinctrl-single,pins = <
+ AM62X_IOPAD(0x244, PIN_INPUT, 7) /* (C17) MMC1_SDWP.GPIO1_49 */
+ >;
+ };
+
+ hdmi_int_pins_default: hdmi-int-default-pins {
+ pinctrl-single,pins = <
+ AM62X_IOPAD(0x040, PIN_INPUT, 7) /* (N23) GPMC0_AD1.GPIO0_16 */
+ >;
+ };
+
+ main_dss0_pins_default: main-dss0-default-pins {
+ pinctrl-single,pins = <
+ AM62X_IOPAD(0x0b8, PIN_OUTPUT, 0) /* (U22) VOUT0_DATA0 */
+ AM62X_IOPAD(0x0bc, PIN_OUTPUT, 0) /* (V24) VOUT0_DATA1 */
+ AM62X_IOPAD(0x0e0, PIN_OUTPUT, 0) /* (V20) VOUT0_DATA10 */
+ AM62X_IOPAD(0x0e4, PIN_OUTPUT, 0) /* (AA23) VOUT0_DATA11 */
+ AM62X_IOPAD(0x0e8, PIN_OUTPUT, 0) /* (AB25) VOUT0_DATA12 */
+ AM62X_IOPAD(0x0ec, PIN_OUTPUT, 0) /* (AA24) VOUT0_DATA13 */
+ AM62X_IOPAD(0x0f0, PIN_OUTPUT, 0) /* (Y22) VOUT0_DATA14 */
+ AM62X_IOPAD(0x0f4, PIN_OUTPUT, 0) /* (AA21) VOUT0_DATA15 */
+ AM62X_IOPAD(0x0c0, PIN_OUTPUT, 0) /* (W25) VOUT0_DATA2 */
+ AM62X_IOPAD(0x0c4, PIN_OUTPUT, 0) /* (W24) VOUT0_DATA3 */
+ AM62X_IOPAD(0x0c8, PIN_OUTPUT, 0) /* (Y25) VOUT0_DATA4 */
+ AM62X_IOPAD(0x0cc, PIN_OUTPUT, 0) /* (Y24) VOUT0_DATA5 */
+ AM62X_IOPAD(0x0d0, PIN_OUTPUT, 0) /* (Y23) VOUT0_DATA6 */
+ AM62X_IOPAD(0x0d4, PIN_OUTPUT, 0) /* (AA25) VOUT0_DATA7 */
+ AM62X_IOPAD(0x0d8, PIN_OUTPUT, 0) /* (V21) VOUT0_DATA8 */
+ AM62X_IOPAD(0x0dc, PIN_OUTPUT, 0) /* (W21) VOUT0_DATA9 */
+ AM62X_IOPAD(0x0fc, PIN_OUTPUT, 0) /* (Y20) VOUT0_DE */
+ AM62X_IOPAD(0x0f8, PIN_OUTPUT, 0) /* (AB24) VOUT0_HSYNC */
+ AM62X_IOPAD(0x104, PIN_OUTPUT, 0) /* (AC24) VOUT0_PCLK */
+ AM62X_IOPAD(0x100, PIN_OUTPUT, 0) /* (AC25) VOUT0_VSYNC */
+ >;
+ };
+
+ main_i2c1_pins_default: main-i2c1-default-pins {
+ pinctrl-single,pins = <
+ AM62X_IOPAD(0x1e8, PIN_INPUT_PULLUP, 0) /* (B17) I2C1_SCL */
+ AM62X_IOPAD(0x1ec, PIN_INPUT_PULLUP, 0) /* (A17) I2C1_SDA */
+ >;
+ };
+
+ main_mcan0_pins_default: main-mcan0-default-pins {
+ pinctrl-single,pins = <
+ AM62X_IOPAD(0x1dc, PIN_INPUT, 0) /* (E15) MCAN0_RX */
+ AM62X_IOPAD(0x1d8, PIN_OUTPUT, 0) /* (C15) MCAN0_TX */
+ >;
+ };
+
+ main_mcasp2_pins_default: main-mcasp2-default-pins {
+ pinctrl-single,pins = <
+ AM62X_IOPAD(0x070, PIN_INPUT, 3) /* (T24) GPMC0_AD13.MCASP2_ACLKX */
+ AM62X_IOPAD(0x06c, PIN_INPUT, 3) /* (T22) GPMC0_AD12.MCASP2_AFSX */
+ AM62X_IOPAD(0x064, PIN_OUTPUT, 3) /* (T25) GPMC0_AD10.MCASP2_AXR2 */
+ AM62X_IOPAD(0x068, PIN_INPUT, 3) /* (R21) GPMC0_AD11.MCASP2_AXR3 */
+ >;
+ };
+
+ main_mmc1_pins_default: main-mmc1-default-pins {
+ pinctrl-single,pins = <
+ AM62X_IOPAD(0x23c, PIN_INPUT_PULLUP, 0) /* (A21) MMC1_CMD */
+ AM62X_IOPAD(0x234, PIN_INPUT_PULLDOWN, 0) /* (B22) MMC1_CLK */
+ AM62X_IOPAD(0x230, PIN_INPUT_PULLUP, 0) /* (A22) MMC1_DAT0 */
+ AM62X_IOPAD(0x22c, PIN_INPUT_PULLUP, 0) /* (B21) MMC1_DAT1 */
+ AM62X_IOPAD(0x228, PIN_INPUT_PULLUP, 0) /* (C21) MMC1_DAT2 */
+ AM62X_IOPAD(0x224, PIN_INPUT_PULLUP, 0) /* (D22) MMC1_DAT3 */
+ AM62X_IOPAD(0x240, PIN_INPUT_PULLUP, 0) /* (D17) MMC1_SDCD */
+ >;
+ };
+
+ main_rgmii2_pins_default: main-rgmii2-default-pins {
+ pinctrl-single,pins = <
+ AM62X_IOPAD(0x184, PIN_INPUT, 0) /* (AE23) RGMII2_RD0 */
+ AM62X_IOPAD(0x188, PIN_INPUT, 0) /* (AB20) RGMII2_RD1 */
+ AM62X_IOPAD(0x18c, PIN_INPUT, 0) /* (AC21) RGMII2_RD2 */
+ AM62X_IOPAD(0x190, PIN_INPUT, 0) /* (AE22) RGMII2_RD3 */
+ AM62X_IOPAD(0x180, PIN_INPUT, 0) /* (AD23) RGMII2_RXC */
+ AM62X_IOPAD(0x17c, PIN_INPUT, 0) /* (AD22) RGMII2_RX_CTL */
+ AM62X_IOPAD(0x16c, PIN_OUTPUT, 0) /* (Y18) RGMII2_TD0 */
+ AM62X_IOPAD(0x170, PIN_OUTPUT, 0) /* (AA18) RGMII2_TD1 */
+ AM62X_IOPAD(0x174, PIN_OUTPUT, 0) /* (AD21) RGMII2_TD2 */
+ AM62X_IOPAD(0x178, PIN_OUTPUT, 0) /* (AC20) RGMII2_TD3 */
+ AM62X_IOPAD(0x168, PIN_OUTPUT, 0) /* (AE21) RGMII2_TXC */
+ AM62X_IOPAD(0x164, PIN_OUTPUT, 0) /* (AA19) RGMII2_TX_CTL */
+ >;
+ };
+
+ main_uart0_pins_default: main-uart0-default-pins {
+ pinctrl-single,pins = <
+ AM62X_IOPAD(0x1c8, PIN_INPUT, 0) /* (D14) UART0_RXD */
+ AM62X_IOPAD(0x1cc, PIN_OUTPUT, 0) /* (E14) UART0_TXD */
+ >;
+ };
+
+ main_uart1_pins_default: main-uart1-default-pins {
+ pinctrl-single,pins = <
+ AM62X_IOPAD(0x194, PIN_INPUT, 2) /* (B19) MCASP0_AXR3.UART1_CTSn */
+ AM62X_IOPAD(0x198, PIN_OUTPUT, 2) /* (A19) MCASP0_AXR2.UART1_RTSn */
+ AM62X_IOPAD(0x1ac, PIN_INPUT, 2) /* (E19) MCASP0_AFSR.UART1_RXD */
+ AM62X_IOPAD(0x1b0, PIN_OUTPUT, 2) /* (A20) MCASP0_ACLKR.UART1_TXD */
+ >;
+ };
+
+ main_usb1_pins_default: main-usb1-default-pins {
+ pinctrl-single,pins = <
+ AM62X_IOPAD(0x258, PIN_OUTPUT, 0) /* (F18) USB1_DRVVBUS */
+ >;
+ };
+
+ user_leds_pins_default: user-leds-default-pins {
+ pinctrl-single,pins = <
+ AM62X_IOPAD(0x084, PIN_OUTPUT, 7) /* (L23) GPMC0_ADVn_ALE.GPIO0_32 */
+ >;
+ };
+};
+
+&cpsw3g {
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_rgmii1_pins_default>, <&main_rgmii2_pins_default>;
+};
+
+&cpsw_port2 {
+ phy-mode = "rgmii-rxid";
+ phy-handle = <&cpsw3g_phy3>;
+};
+
+&cpsw3g_mdio {
+ cpsw3g_phy3: ethernet-phy@3 {
+ compatible = "ethernet-phy-id2000.a231", "ethernet-phy-ieee802.3-c22";
+ reg = <3>;
+ ti,clk-output-sel = <DP83867_CLK_O_SEL_OFF>;
+ ti,rx-internal-delay = <DP83867_RGMIIDCTL_2_00_NS>;
+ ti,fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_4_B_NIB>;
+ };
+};
+
+&dss {
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_dss0_pins_default>;
+ status = "okay";
+};
+
+&dss_ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* VP2: DPI/HDMI Output */
+ port@1 {
+ reg = <1>;
+
+ dpi1_out: endpoint {
+ remote-endpoint = <&sii9022_in>;
+ };
+ };
+};
+
+&main_i2c1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_i2c1_pins_default>;
+ clock-frequency = <100000>;
+ status = "okay";
+
+ audio_codec: audio-codec@18 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&audio_ext_refclk1_pins_default>;
+
+ #sound-dai-cells = <0>;
+ compatible = "ti,tlv320aic3007";
+ reg = <0x18>;
+ ai3x-micbias-vg = <2>;
+
+ AVDD-supply = <&vcc_3v3_sw>;
+ IOVDD-supply = <&vcc_3v3_sw>;
+ DRVDD-supply = <&vcc_3v3_sw>;
+ DVDD-supply = <&vcc_1v8>;
+ };
+
+ gpio_exp: gpio-expander@21 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&gpio_exp_int_pins_default>;
+ compatible = "nxp,pcf8574";
+ reg = <0x21>;
+ interrupt-parent = <&main_gpio1>;
+ interrupts = <49 0>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ gpio-line-names = "", "GPIO1_CAN0_nEN",
+ "GPIO2_LED2", "GPIO3_LVDS_GPIO",
+ "GPIO4_BUT2", "GPIO5_LVDS_BKLT_EN",
+ "GPIO6_ETH1_USER_RESET", "GPIO7_AUDIO_USER_RESET";
+ };
+
+ usb-pd@22 {
+ compatible = "ti,tps6598x";
+ reg = <0x22>;
+
+ connector {
+ compatible = "usb-c-connector";
+ label = "USB-C";
+ self-powered;
+ data-role = "dual";
+ power-role = "sink";
+ port {
+ usb_con_hs: endpoint {
+ remote-endpoint = <&typec_hs>;
+ };
+ };
+ };
+ };
+
+ sii9022: bridge-hdmi@39 {
+ compatible = "sil,sii9022";
+ reg = <0x39>;
+
+ interrupt-parent = <&main_gpio0>;
+ interrupts = <16 IRQ_TYPE_EDGE_FALLING>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&hdmi_int_pins_default>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ sii9022_in: endpoint {
+ remote-endpoint = <&dpi1_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ sii9022_out: endpoint {
+ remote-endpoint = <&hdmi_connector_in>;
+ };
+ };
+ };
+ };
+
+ eeprom@51 {
+ compatible = "atmel,24c02";
+ pagesize = <16>;
+ reg = <0x51>;
+ };
+};
+
+&main_mcan0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_mcan0_pins_default>;
+ phys = <&can_tc1>;
+ status = "okay";
+};
+
+&main_uart0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_uart0_pins_default>;
+ status = "okay";
+};
+
+&main_uart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_uart1_pins_default>;
+ /* Main UART1 may be used by TIFS firmware */
+ status = "okay";
+};
+
+&mcasp2 {
+ #sound-dai-cells = <0>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_mcasp2_pins_default>;
+
+ /* MCASP_IIS_MODE */
+ op-mode = <0>;
+ tdm-slots = <2>;
+
+ /* 0: INACTIVE, 1: TX, 2: RX */
+ serial-dir = <
+ 0 0 1 2
+ 0 0 0 0
+ 0 0 0 0
+ 0 0 0 0
+ >;
+ tx-num-evt = <32>;
+ rx-num-evt = <32>;
+ status = "okay";
+};
+
+&sdhci1 {
+ vmmc-supply = <&vcc_3v3_mmc>;
+ vqmmc-supply = <&vddshv5_sdio>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_mmc1_pins_default>;
+ disable-wp;
+ no-1-8-v;
+ status = "okay";
+};
+
+&usbss0 {
+ ti,vbus-divider;
+ status = "okay";
+};
+
+&usbss1 {
+ ti,vbus-divider;
+ status = "okay";
+};
+
+&usb0 {
+ usb-role-switch;
+
+ port {
+ typec_hs: endpoint {
+ remote-endpoint = <&usb_con_hs>;
+ };
+ };
+};
+
+&usb1 {
+ dr_mode = "host";
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_usb1_pins_default>;
+};
diff --git a/arch/arm64/boot/dts/ti/k3-am62x-sk-common.dtsi b/arch/arm64/boot/dts/ti/k3-am62x-sk-common.dtsi
index 3c45782ab2b7..44ff67b6bf1e 100644
--- a/arch/arm64/boot/dts/ti/k3-am62x-sk-common.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am62x-sk-common.dtsi
@@ -48,6 +48,14 @@
pmsg-size = <0x8000>;
};
+ /* global cma region */
+ linux,cma {
+ compatible = "shared-dma-pool";
+ reusable;
+ size = <0x00 0x8000000>;
+ linux,cma-default;
+ };
+
secure_tfa_ddr: tfa@9e780000 {
reg = <0x00 0x9e780000 0x00 0x80000>;
alignment = <0x1000>;
@@ -128,6 +136,10 @@
};
};
+&phy_gmii_sel {
+ bootph-all;
+};
+
&main_pmx0 {
/* First pad number is ALW package and second is AMC package */
main_uart0_pins_default: main-uart0-default-pins {
@@ -156,6 +168,7 @@
};
main_i2c1_pins_default: main-i2c1-default-pins {
+ bootph-all;
pinctrl-single,pins = <
AM62X_IOPAD(0x1e8, PIN_INPUT_PULLUP, 0) /* (B17/A17) I2C1_SCL */
AM62X_IOPAD(0x1ec, PIN_INPUT_PULLUP, 0) /* (A17/A16) I2C1_SDA */
@@ -335,15 +348,9 @@
self-powered;
data-role = "dual";
power-role = "sink";
- ports {
- #address-cells = <1>;
- #size-cells = <0>;
-
- port@0 {
- reg = <0>;
- usb_con_hs: endpoint {
- remote-endpoint = <&usb0_hs_ep>;
- };
+ port {
+ usb_con_hs: endpoint {
+ remote-endpoint = <&usb0_hs_ep>;
};
};
};
@@ -470,12 +477,9 @@
&usb0 {
bootph-all;
- #address-cells = <1>;
- #size-cells = <0>;
usb-role-switch;
- port@0 {
- reg = <0>;
+ port {
usb0_hs_ep: endpoint {
remote-endpoint = <&usb_con_hs>;
};
@@ -504,8 +508,6 @@
0 0 0 0
0 0 0 0
>;
- tx-num-evt = <32>;
- rx-num-evt = <32>;
};
&dss {
diff --git a/arch/arm64/boot/dts/ti/k3-am64-main.dtsi b/arch/arm64/boot/dts/ti/k3-am64-main.dtsi
index 6f9aa5e02138..f8370dd03350 100644
--- a/arch/arm64/boot/dts/ti/k3-am64-main.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am64-main.dtsi
@@ -1283,6 +1283,9 @@
<0x22400 0x100>;
reg-names = "iram", "control", "debug";
firmware-name = "am64x-pru0_0-fw";
+ interrupt-parent = <&icssg0_intc>;
+ interrupts = <16 2 2>;
+ interrupt-names = "vring";
};
rtu0_0: rtu@4000 {
@@ -1292,6 +1295,9 @@
<0x23400 0x100>;
reg-names = "iram", "control", "debug";
firmware-name = "am64x-rtu0_0-fw";
+ interrupt-parent = <&icssg0_intc>;
+ interrupts = <20 4 4>;
+ interrupt-names = "vring";
};
tx_pru0_0: txpru@a000 {
@@ -1310,6 +1316,9 @@
<0x24400 0x100>;
reg-names = "iram", "control", "debug";
firmware-name = "am64x-pru0_1-fw";
+ interrupt-parent = <&icssg0_intc>;
+ interrupts = <18 3 3>;
+ interrupt-names = "vring";
};
rtu0_1: rtu@6000 {
@@ -1319,6 +1328,9 @@
<0x23c00 0x100>;
reg-names = "iram", "control", "debug";
firmware-name = "am64x-rtu0_1-fw";
+ interrupt-parent = <&icssg0_intc>;
+ interrupts = <22 5 5>;
+ interrupt-names = "vring";
};
tx_pru0_1: txpru@c000 {
@@ -1436,6 +1448,9 @@
<0x22400 0x100>;
reg-names = "iram", "control", "debug";
firmware-name = "am64x-pru1_0-fw";
+ interrupt-parent = <&icssg1_intc>;
+ interrupts = <16 2 2>;
+ interrupt-names = "vring";
};
rtu1_0: rtu@4000 {
@@ -1445,6 +1460,9 @@
<0x23400 0x100>;
reg-names = "iram", "control", "debug";
firmware-name = "am64x-rtu1_0-fw";
+ interrupt-parent = <&icssg1_intc>;
+ interrupts = <20 4 4>;
+ interrupt-names = "vring";
};
tx_pru1_0: txpru@a000 {
@@ -1463,6 +1481,9 @@
<0x24400 0x100>;
reg-names = "iram", "control", "debug";
firmware-name = "am64x-pru1_1-fw";
+ interrupt-parent = <&icssg1_intc>;
+ interrupts = <18 3 3>;
+ interrupt-names = "vring";
};
rtu1_1: rtu@6000 {
@@ -1472,6 +1493,9 @@
<0x23c00 0x100>;
reg-names = "iram", "control", "debug";
firmware-name = "am64x-rtu1_1-fw";
+ interrupt-parent = <&icssg1_intc>;
+ interrupts = <22 5 5>;
+ interrupt-names = "vring";
};
tx_pru1_1: txpru@c000 {
diff --git a/arch/arm64/boot/dts/ti/k3-am64-phycore-som.dtsi b/arch/arm64/boot/dts/ti/k3-am64-phycore-som.dtsi
index 125e507966fb..ea7c58fb67e2 100644
--- a/arch/arm64/boot/dts/ti/k3-am64-phycore-som.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am64-phycore-som.dtsi
@@ -265,6 +265,50 @@
interrupts = <70 IRQ_TYPE_EDGE_FALLING>;
wakeup-source;
};
+
+ pmic@61 {
+ compatible = "ti,lp8733";
+ reg = <0x61>;
+
+ buck0-in-supply = <&vcc_5v0_som>;
+ buck1-in-supply = <&vcc_5v0_som>;
+ ldo0-in-supply = <&vdd_3v3>;
+ ldo1-in-supply = <&vdd_3v3>;
+
+ regulators {
+ vdd_core: buck0 {
+ regulator-name = "VDD_CORE";
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <750000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ vdd_3v3: buck1 {
+ regulator-name = "VDD_3V3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ vdd_1v8_ldo0: ldo0 {
+ regulator-name = "VDD_1V8_LDO0";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ vdda_1v8: ldo1 {
+ regulator-name = "VDDA_1V8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+ };
+ };
};
&main_r5fss0_core0 {
@@ -296,7 +340,7 @@
pinctrl-names = "default";
pinctrl-0 = <&ospi0_pins_default>;
- flash@0 {
+ serial_flash: flash@0 {
compatible = "jedec,spi-nor";
reg = <0x0>;
spi-tx-bus-width = <8>;
diff --git a/arch/arm64/boot/dts/ti/k3-am64-tqma64xxl-mbax4xxl-sdcard.dtso b/arch/arm64/boot/dts/ti/k3-am64-tqma64xxl-mbax4xxl-sdcard.dtso
index 79ed19c6c0e9..c4525024ba5d 100644
--- a/arch/arm64/boot/dts/ti/k3-am64-tqma64xxl-mbax4xxl-sdcard.dtso
+++ b/arch/arm64/boot/dts/ti/k3-am64-tqma64xxl-mbax4xxl-sdcard.dtso
@@ -1,6 +1,6 @@
-// SPDX-License-Identifier: GPL-2.0-only
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
/*
- * Copyright (c) 2022-2023 TQ-Systems GmbH <linux@ew.tq-group.com>, D-82229 Seefeld, Germany.
+ * Copyright (c) 2022-2024 TQ-Systems GmbH <linux@ew.tq-group.com>, D-82229 Seefeld, Germany.
*/
/dts-v1/;
diff --git a/arch/arm64/boot/dts/ti/k3-am64-tqma64xxl-mbax4xxl-wlan.dtso b/arch/arm64/boot/dts/ti/k3-am64-tqma64xxl-mbax4xxl-wlan.dtso
index 32596a84b7ba..82f8a21b6cbf 100644
--- a/arch/arm64/boot/dts/ti/k3-am64-tqma64xxl-mbax4xxl-wlan.dtso
+++ b/arch/arm64/boot/dts/ti/k3-am64-tqma64xxl-mbax4xxl-wlan.dtso
@@ -1,6 +1,6 @@
-// SPDX-License-Identifier: GPL-2.0-only
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
/*
- * Copyright (c) 2022-2023 TQ-Systems GmbH <linux@ew.tq-group.com>, D-82229 Seefeld, Germany.
+ * Copyright (c) 2022-2024 TQ-Systems GmbH <linux@ew.tq-group.com>, D-82229 Seefeld, Germany.
*/
/dts-v1/;
diff --git a/arch/arm64/boot/dts/ti/k3-am642-evm-icssg1-dualemac-mii.dtso b/arch/arm64/boot/dts/ti/k3-am642-evm-icssg1-dualemac-mii.dtso
new file mode 100644
index 000000000000..423d6027278d
--- /dev/null
+++ b/arch/arm64/boot/dts/ti/k3-am642-evm-icssg1-dualemac-mii.dtso
@@ -0,0 +1,101 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/**
+ * DT overlay for enabling both ICSSG1 port on AM642 EVM in MII mode
+ *
+ * Copyright (C) 2020-2024 Texas Instruments Incorporated - https://www.ti.com/
+ */
+
+/dts-v1/;
+/plugin/;
+#include <dt-bindings/gpio/gpio.h>
+#include "k3-pinctrl.h"
+
+&{/} {
+ aliases {
+ ethernet1 = "/icssg1-eth/ethernet-ports/port@1";
+ };
+
+ mdio-mux-2 {
+ compatible = "mdio-mux-multiplexer";
+ mux-controls = <&mdio_mux>;
+ mdio-parent-bus = <&icssg1_mdio>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ mdio@0 {
+ reg = <0x0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ icssg1_phy2: ethernet-phy@3 {
+ reg = <3>;
+ };
+ };
+ };
+};
+
+&main_pmx0 {
+ icssg1_mii1_pins_default: icssg1-mii1-default-pins {
+ pinctrl-single,pins = <
+ AM64X_IOPAD(0x00f8, PIN_INPUT, 1) /* (V9) PRG1_PRU0_GPO16.PR1_MII_MT0_CLK */
+ AM64X_IOPAD(0x00f4, PIN_OUTPUT, 0) /* (Y9) PRG1_PRU0_GPO15.PR1_MII0_TXEN */
+ AM64X_IOPAD(0x00f0, PIN_OUTPUT, 0) /* (AA9) PRG1_PRU0_GPO14.PR1_MII0_TXD3 */
+ AM64X_IOPAD(0x00ec, PIN_OUTPUT, 0) /* (W9) PRG1_PRU0_GPO13.PR1_MII0_TXD2 */
+ AM64X_IOPAD(0x00e8, PIN_OUTPUT, 0) /* (U9) PRG1_PRU0_GPO12.PR1_MII0_TXD1 */
+ AM64X_IOPAD(0x00e4, PIN_OUTPUT, 0) /* (AA8) PRG1_PRU0_GPO11.PR1_MII0_TXD0 */
+ AM64X_IOPAD(0x00c8, PIN_INPUT, 1) /* (Y8) PRG1_PRU0_GPO4.PR1_MII0_RXDV */
+ AM64X_IOPAD(0x00d0, PIN_INPUT, 1) /* (AA7) PRG1_PRU0_GPO6.PR1_MII_MR0_CLK */
+ AM64X_IOPAD(0x00c4, PIN_INPUT, 1) /* (V8) PRG1_PRU0_GPO3.PR1_MII0_RXD3 */
+ AM64X_IOPAD(0x00c0, PIN_INPUT, 1) /* (W8) PRG1_PRU0_GPO2.PR1_MII0_RXD2 */
+ AM64X_IOPAD(0x00cc, PIN_INPUT, 1) /* (V13) PRG1_PRU0_GPO5.PR1_MII0_RXER */
+ AM64X_IOPAD(0x00bc, PIN_INPUT, 1) /* (U8) PRG1_PRU0_GPO1.PR1_MII0_RXD1 */
+ AM64X_IOPAD(0x00b8, PIN_INPUT, 1) /* (Y7) PRG1_PRU0_GPO0.PR1_MII0_RXD0 */
+ AM64X_IOPAD(0x00d8, PIN_INPUT, 1) /* (W13) PRG1_PRU0_GPO8.PR1_MII0_RXLINK */
+ >;
+ };
+
+ icssg1_mii2_pins_default: icssg1-mii2-default-pins {
+ pinctrl-single,pins = <
+ AM64X_IOPAD(0x0148, PIN_INPUT, 1) /* (Y10) PRG1_PRU1_GPO16.PR1_MII_MT1_CLK */
+ AM64X_IOPAD(0x0144, PIN_OUTPUT, 0) /* (Y11) PRG1_PRU1_GPO15.PR1_MII1_TXEN */
+ AM64X_IOPAD(0x0140, PIN_OUTPUT, 0) /* (AA11) PRG1_PRU1_GPO14.PR1_MII1_TXD3 */
+ AM64X_IOPAD(0x013c, PIN_OUTPUT, 0) /* (U10) PRG1_PRU1_GPO13.PR1_MII1_TXD2 */
+ AM64X_IOPAD(0x0138, PIN_OUTPUT, 0) /* (V10) PRG1_PRU1_GPO12.PR1_MII1_TXD1 */
+ AM64X_IOPAD(0x0134, PIN_OUTPUT, 0) /* (AA10) PRG1_PRU1_GPO11.PR1_MII1_TXD0 */
+ AM64X_IOPAD(0x0118, PIN_INPUT, 1) /* (W12) PRG1_PRU1_GPO4.PR1_MII1_RXDV */
+ AM64X_IOPAD(0x0120, PIN_INPUT, 1) /* (U11) PRG1_PRU1_GPO6.PR1_MII_MR1_CLK */
+ AM64X_IOPAD(0x0114, PIN_INPUT, 1) /* (Y12) PRG1_PRU1_GPO3.PR1_MII1_RXD3 */
+ AM64X_IOPAD(0x0110, PIN_INPUT, 1) /* (AA12) PRG1_PRU1_GPO2.PR1_MII1_RXD2 */
+ AM64X_IOPAD(0x011c, PIN_INPUT, 1) /* (AA13) PRG1_PRU1_GPO5.PR1_MII1_RXER */
+ AM64X_IOPAD(0x010c, PIN_INPUT, 1) /* (V11) PRG1_PRU1_GPO1.PR1_MII1_RXD1 */
+ AM64X_IOPAD(0x0108, PIN_INPUT, 1) /* (W11) PRG1_PRU1_GPO0.PR1_MII1_RXD0 */
+ AM64X_IOPAD(0x0128, PIN_INPUT, 1) /* (U12) PRG1_PRU1_GPO8.PR1_MII1_RXLINK */
+ >;
+ };
+};
+
+&cpsw3g {
+ pinctrl-0 = <&rgmii1_pins_default>;
+};
+
+&cpsw_port2 {
+ status = "disabled";
+};
+
+&mdio_mux_1 {
+ status = "disabled";
+};
+
+&icssg1_eth {
+ pinctrl-0 = <&icssg1_mii1_pins_default &icssg1_mii2_pins_default>;
+};
+
+&icssg1_emac0 {
+ phy-mode = "mii";
+};
+
+&icssg1_emac1 {
+ status = "okay";
+ phy-handle = <&icssg1_phy2>;
+ phy-mode = "mii";
+};
diff --git a/arch/arm64/boot/dts/ti/k3-am642-evm-nand.dtso b/arch/arm64/boot/dts/ti/k3-am642-evm-nand.dtso
new file mode 100644
index 000000000000..f08c0e272b53
--- /dev/null
+++ b/arch/arm64/boot/dts/ti/k3-am642-evm-nand.dtso
@@ -0,0 +1,148 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/**
+ * DT overlay for HSE NAND expansion card on AM642 EVM
+ *
+ * Copyright (C) 2021-2024 Texas Instruments Incorporated - https://www.ti.com/
+ */
+
+/dts-v1/;
+/plugin/;
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+#include "k3-pinctrl.h"
+
+&main_pmx0 {
+ gpmc0_pins_default: gpmc0-pins-default {
+ bootph-all;
+ pinctrl-single,pins = <
+ AM64X_IOPAD(0x0094, PIN_INPUT, 7) /* (T19) GPMC0_BE1n.GPIO0_36 */
+ AM64X_IOPAD(0x003c, PIN_INPUT, 0) /* (T20) GPMC0_AD0 */
+ AM64X_IOPAD(0x0040, PIN_INPUT, 0) /* (U21) GPMC0_AD1 */
+ AM64X_IOPAD(0x0064, PIN_INPUT, 0) /* (R16) GPMC0_AD10 */
+ AM64X_IOPAD(0x0068, PIN_INPUT, 0) /* (W20) GPMC0_AD11 */
+ AM64X_IOPAD(0x006c, PIN_INPUT, 0) /* (W21) GPMC0_AD12 */
+ AM64X_IOPAD(0x0070, PIN_INPUT, 0) /* (V18) GPMC0_AD13 */
+ AM64X_IOPAD(0x0074, PIN_INPUT, 0) /* (Y21) GPMC0_AD14 */
+ AM64X_IOPAD(0x0078, PIN_INPUT, 0) /* (Y20) GPMC0_AD15 */
+ AM64X_IOPAD(0x0044, PIN_INPUT, 0) /* (T18) GPMC0_AD2 */
+ AM64X_IOPAD(0x0048, PIN_INPUT, 0) /* (U20) GPMC0_AD3 */
+ AM64X_IOPAD(0x004c, PIN_INPUT, 0) /* (U18) GPMC0_AD4 */
+ AM64X_IOPAD(0x0050, PIN_INPUT, 0) /* (U19) GPMC0_AD5 */
+ AM64X_IOPAD(0x0054, PIN_INPUT, 0) /* (V20) GPMC0_AD6 */
+ AM64X_IOPAD(0x0058, PIN_INPUT, 0) /* (V21) GPMC0_AD7 */
+ AM64X_IOPAD(0x005c, PIN_INPUT, 0) /* (V19) GPMC0_AD8 */
+ AM64X_IOPAD(0x0060, PIN_INPUT, 0) /* (T17) GPMC0_AD9 */
+ AM64X_IOPAD(0x0098, PIN_INPUT_PULLUP, 0) /* (W19) GPMC0_WAIT0 */
+ AM64X_IOPAD(0x009c, PIN_INPUT_PULLUP, 0) /* (Y18) GPMC0_WAIT1 */
+ AM64X_IOPAD(0x00a8, PIN_OUTPUT_PULLUP, 0) /* (R19) GPMC0_CSn0 */
+ AM64X_IOPAD(0x00ac, PIN_OUTPUT_PULLUP, 0) /* (R20) GPMC0_CSn1 */
+ AM64X_IOPAD(0x00b0, PIN_OUTPUT_PULLUP, 0) /* (P19) GPMC0_CSn2 */
+ AM64X_IOPAD(0x00b4, PIN_OUTPUT_PULLUP, 0) /* (R21) GPMC0_CSn3 */
+ AM64X_IOPAD(0x007c, PIN_OUTPUT, 0) /* (R17) GPMC0_CLK */
+ AM64X_IOPAD(0x0084, PIN_OUTPUT, 0) /* (P16) GPMC0_ADVn_ALE */
+ AM64X_IOPAD(0x0088, PIN_OUTPUT, 0) /* (R18) GPMC0_OEn_REn */
+ AM64X_IOPAD(0x008c, PIN_OUTPUT, 0) /* (T21) GPMC0_WEn */
+ AM64X_IOPAD(0x0090, PIN_OUTPUT, 0) /* (P17) GPMC0_BE0n_CLE */
+ AM64X_IOPAD(0x00a0, PIN_OUTPUT_PULLUP, 0) /* (N16) GPMC0_WPn */
+ AM64X_IOPAD(0x00a4, PIN_OUTPUT, 0) /* (N17) GPMC0_DIR */
+ >;
+ };
+};
+
+&main_gpio0 {
+ gpio0-36 {
+ bootph-all;
+ gpio-hog;
+ gpios = <36 0>;
+ input;
+ line-name = "GPMC0_MUX_DIR";
+ };
+};
+
+&elm0 {
+ bootph-all;
+ status = "okay";
+};
+
+&gpmc0 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&gpmc0_pins_default>;
+ #address-cells = <2>;
+ #size-cells = <1>;
+
+ nand@0,0 {
+ compatible = "ti,am64-nand";
+ reg = <0 0 64>; /* device IO registers */
+ interrupt-parent = <&gpmc0>;
+ interrupts = <0 IRQ_TYPE_NONE>, /* fifoevent */
+ <1 IRQ_TYPE_NONE>; /* termcount */
+ rb-gpios = <&gpmc0 0 GPIO_ACTIVE_HIGH>; /* gpmc_wait0 */
+ ti,nand-xfer-type = "prefetch-polled";
+ ti,nand-ecc-opt = "bch8"; /* BCH8: Bootrom limitation */
+ ti,elm-id = <&elm0>;
+ nand-bus-width = <8>;
+ gpmc,device-width = <1>;
+ gpmc,sync-clk-ps = <0>;
+ gpmc,cs-on-ns = <0>;
+ gpmc,cs-rd-off-ns = <40>;
+ gpmc,cs-wr-off-ns = <40>;
+ gpmc,adv-on-ns = <0>;
+ gpmc,adv-rd-off-ns = <25>;
+ gpmc,adv-wr-off-ns = <25>;
+ gpmc,we-on-ns = <0>;
+ gpmc,we-off-ns = <20>;
+ gpmc,oe-on-ns = <3>;
+ gpmc,oe-off-ns = <30>;
+ gpmc,access-ns = <30>;
+ gpmc,rd-cycle-ns = <40>;
+ gpmc,wr-cycle-ns = <40>;
+ gpmc,bus-turnaround-ns = <0>;
+ gpmc,cycle2cycle-delay-ns = <0>;
+ gpmc,clk-activation-ns = <0>;
+ gpmc,wr-access-ns = <40>;
+ gpmc,wr-data-mux-bus-ns = <0>;
+
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ partition@0 {
+ bootph-all;
+ label = "NAND.tiboot3";
+ reg = <0x00000000 0x00200000>; /* 2M */
+ };
+ partition@200000 {
+ bootph-all;
+ label = "NAND.tispl";
+ reg = <0x00200000 0x00200000>; /* 2M */
+ };
+ partition@400000 {
+ bootph-all;
+ label = "NAND.tiboot3.backup"; /* 2M */
+ reg = <0x00400000 0x00200000>; /* BootROM looks at 4M */
+ };
+ partition@600000 {
+ bootph-all;
+ label = "NAND.u-boot";
+ reg = <0x00600000 0x00400000>; /* 4M */
+ };
+ partition@a00000 {
+ bootph-all;
+ label = "NAND.u-boot-env";
+ reg = <0x00a00000 0x00040000>; /* 256K */
+ };
+ partition@a40000 {
+ bootph-all;
+ label = "NAND.u-boot-env.backup";
+ reg = <0x00a40000 0x00040000>; /* 256K */
+ };
+ partition@a80000 {
+ bootph-all;
+ label = "NAND.file-system";
+ reg = <0x00a80000 0x3f580000>;
+ };
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/ti/k3-am642-evm.dts b/arch/arm64/boot/dts/ti/k3-am642-evm.dts
index e20e4ffd0f1f..6bb1ad2e56ec 100644
--- a/arch/arm64/boot/dts/ti/k3-am642-evm.dts
+++ b/arch/arm64/boot/dts/ti/k3-am642-evm.dts
@@ -466,6 +466,12 @@
AM64X_IOPAD(0x00f4, PIN_INPUT, 2) /* (Y9) PRG1_PRU0_GPO15.PRG1_RGMII1_TX_CTL */
>;
};
+
+ icssg1_iep0_pins_default: icssg1-iep0-default-pins {
+ pinctrl-single,pins = <
+ AM64X_IOPAD(0x0104, PIN_OUTPUT, 2) /* (W7) PRG1_PRU0_GPO19.PRG1_IEP0_EDC_SYNC_OUT0 */
+ >;
+ };
};
&main_uart0 {
@@ -817,3 +823,12 @@
rx-internal-delay-ps = <2000>;
};
};
+
+&gpmc0 {
+ ranges = <0 0 0x00 0x51000000 0x01000000>; /* CS0 space. Min partition = 16MB */
+};
+
+&icssg1_iep0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&icssg1_iep0_pins_default>;
+};
diff --git a/arch/arm64/boot/dts/ti/k3-am642-hummingboard-t.dts b/arch/arm64/boot/dts/ti/k3-am642-hummingboard-t.dts
index 234d76e4e944..5b5e9eeec5ac 100644
--- a/arch/arm64/boot/dts/ti/k3-am642-hummingboard-t.dts
+++ b/arch/arm64/boot/dts/ti/k3-am642-hummingboard-t.dts
@@ -282,7 +282,6 @@
pinctrl-names = "default";
pinctrl-0 = <&main_uart3_default_pins>;
uart-has-rtscts;
- rs485-rts-active-low;
linux,rs485-enabled-at-boot-time;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/ti/k3-am642-phyboard-electra-pcie-usb2.dtso b/arch/arm64/boot/dts/ti/k3-am642-phyboard-electra-pcie-usb2.dtso
new file mode 100644
index 000000000000..7a5ce4bc02f3
--- /dev/null
+++ b/arch/arm64/boot/dts/ti/k3-am642-phyboard-electra-pcie-usb2.dtso
@@ -0,0 +1,87 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * DT overlay for PCIe support (limits USB to 2.0/high-speed)
+ *
+ * Copyright (C) 2021 PHYTEC America, LLC - https://www.phytec.com
+ * Author: Matt McKee <mmckee@phytec.com>
+ *
+ * Copyright (C) 2024 PHYTEC America, LLC - https://www.phytec.com
+ * Author: Nathan Morrisson <nmorrisson@phytec.com>
+ */
+
+/dts-v1/;
+/plugin/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/phy/phy.h>
+#include <dt-bindings/phy/phy-cadence.h>
+
+#include "k3-pinctrl.h"
+#include "k3-serdes.h"
+
+&{/} {
+ pcie_refclk0: pcie-refclk0 {
+ compatible = "gpio-gate-clock";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie_usb_sel_pins_default>;
+ clocks = <&serdes_refclk>;
+ #clock-cells = <0>;
+ enable-gpios = <&main_gpio1 7 GPIO_ACTIVE_HIGH>;
+ };
+};
+
+&main_pmx0 {
+ pcie_usb_sel_pins_default: pcie-usb-sel-default-pins {
+ pinctrl-single,pins = <
+ AM64X_IOPAD(0x017c, PIN_OUTPUT, 7) /* (T1) PRG0_PRU0_GPO7.GPIO1_7 */
+ >;
+ };
+
+ pcie_pins_default: pcie-default-pins {
+ pinctrl-single,pins = <
+ AM64X_IOPAD(0x0098, PIN_OUTPUT, 7) /* (W19) GPMC0_WAIT0.GPIO0_37 */
+ >;
+ };
+};
+
+&pcie0_rc {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie_pins_default>;
+ reset-gpios = <&main_gpio0 37 GPIO_ACTIVE_HIGH>;
+ phys = <&serdes0_pcie_usb_link>;
+ phy-names = "pcie-phy";
+ num-lanes = <1>;
+ status = "okay";
+};
+
+&serdes0_pcie_usb_link {
+ cdns,phy-type = <PHY_TYPE_PCIE>;
+};
+
+&serdes_ln_ctrl {
+ idle-states = <AM64_SERDES0_LANE0_PCIE0>;
+};
+
+&serdes0 {
+ assigned-clock-parents = <&pcie_refclk0>, <&pcie_refclk0>, <&pcie_refclk0>;
+};
+
+&serdes_refclk {
+ clock-frequency = <100000000>;
+};
+
+/*
+ * Assign pcie_refclk0 to serdes_wiz0 as ext_ref_clk.
+ * This makes sure that the clock generator gets enabled at the right time.
+ */
+&serdes_wiz0 {
+ clocks = <&k3_clks 162 0>, <&k3_clks 162 1>, <&pcie_refclk0>;
+};
+
+&usbss0 {
+ ti,usb2-only;
+};
+
+&usb0 {
+ maximum-speed = "high-speed";
+};
diff --git a/arch/arm64/boot/dts/ti/k3-am642-phyboard-electra-rdk.dts b/arch/arm64/boot/dts/ti/k3-am642-phyboard-electra-rdk.dts
index 6df331ccb970..30729b49dd69 100644
--- a/arch/arm64/boot/dts/ti/k3-am642-phyboard-electra-rdk.dts
+++ b/arch/arm64/boot/dts/ti/k3-am642-phyboard-electra-rdk.dts
@@ -190,18 +190,6 @@
>;
};
- pcie_usb_sel_pins_default: pcie-usb-sel-default-pins {
- pinctrl-single,pins = <
- AM64X_IOPAD(0x017c, PIN_OUTPUT, 7) /* (T1) PRG0_PRU0_GPO7.GPIO1_7 */
- >;
- };
-
- pcie0_pins_default: pcie0-default-pins {
- pinctrl-single,pins = <
- AM64X_IOPAD(0x0098, PIN_OUTPUT, 7) /* (W19) GPMC0_WAIT0.GPIO0_37 */
- >;
- };
-
user_leds_pins_default: user-leds-default-pins {
pinctrl-single,pins = <
AM64X_IOPAD(0x003c, PIN_OUTPUT, 7) /* (T20) GPMC0_AD0.GPIO0_15 */
diff --git a/arch/arm64/boot/dts/ti/k3-am642-sk.dts b/arch/arm64/boot/dts/ti/k3-am642-sk.dts
index 5b028b3a3192..44ecbcf1c844 100644
--- a/arch/arm64/boot/dts/ti/k3-am642-sk.dts
+++ b/arch/arm64/boot/dts/ti/k3-am642-sk.dts
@@ -430,6 +430,18 @@
#gpio-cells = <2>;
gpio-line-names = "LED1","LED2","LED3","LED4","LED5","LED6","LED7","LED8";
};
+
+ /* SoC power supply temperature */
+ tmp100@48 {
+ compatible = "ti,tmp100";
+ reg = <0x48>;
+ };
+
+ /* DDR power supply temperature */
+ tmp100@49 {
+ compatible = "ti,tmp100";
+ reg = <0x49>;
+ };
};
/* mcu_gpio0 and mcu_gpio_intr are reserved for mcu firmware usage */
diff --git a/arch/arm64/boot/dts/ti/k3-am642-tqma64xxl-mbax4xxl.dts b/arch/arm64/boot/dts/ti/k3-am642-tqma64xxl-mbax4xxl.dts
index 1f4dc5ad1696..c40ad67cee01 100644
--- a/arch/arm64/boot/dts/ti/k3-am642-tqma64xxl-mbax4xxl.dts
+++ b/arch/arm64/boot/dts/ti/k3-am642-tqma64xxl-mbax4xxl.dts
@@ -1,7 +1,7 @@
-// SPDX-License-Identifier: GPL-2.0-only
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
/*
* Copyright (C) 2021 Texas Instruments Incorporated - https://www.ti.com/
- * Copyright (c) 2022-2023 TQ-Systems GmbH <linux@ew.tq-group.com>, D-82229 Seefeld, Germany.
+ * Copyright (c) 2022-2024 TQ-Systems GmbH <linux@ew.tq-group.com>, D-82229 Seefeld, Germany.
*/
/dts-v1/;
diff --git a/arch/arm64/boot/dts/ti/k3-am642-tqma64xxl.dtsi b/arch/arm64/boot/dts/ti/k3-am642-tqma64xxl.dtsi
index 6c785eff7d2f..828d815d6bdf 100644
--- a/arch/arm64/boot/dts/ti/k3-am642-tqma64xxl.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am642-tqma64xxl.dtsi
@@ -1,7 +1,7 @@
-// SPDX-License-Identifier: GPL-2.0-only
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
/*
* Copyright (C) 2021 Texas Instruments Incorporated - https://www.ti.com/
- * Copyright (c) 2022-2023 TQ-Systems GmbH <linux@ew.tq-group.com>, D-82229 Seefeld, Germany.
+ * Copyright (c) 2022-2024 TQ-Systems GmbH <linux@ew.tq-group.com>, D-82229 Seefeld, Germany.
*/
#include "k3-am642.dtsi"
diff --git a/arch/arm64/boot/dts/ti/k3-am65-iot2050-common-pg1.dtsi b/arch/arm64/boot/dts/ti/k3-am65-iot2050-common-pg1.dtsi
index ef7897763ef8..0a29ed172215 100644
--- a/arch/arm64/boot/dts/ti/k3-am65-iot2050-common-pg1.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am65-iot2050-common-pg1.dtsi
@@ -73,3 +73,15 @@
"rx0", "rx1",
"rxmgm0", "rxmgm1";
};
+
+&icssg0_iep0 {
+ interrupt-parent = <&icssg0_intc>;
+ interrupts = <7 7 7>;
+ interrupt-names = "iep_cap_cmp";
+};
+
+&icssg0_iep1 {
+ interrupt-parent = <&icssg0_intc>;
+ interrupts = <56 8 8>;
+ interrupt-names = "iep_cap_cmp";
+};
diff --git a/arch/arm64/boot/dts/ti/k3-am65-main.dtsi b/arch/arm64/boot/dts/ti/k3-am65-main.dtsi
index ed71561c5bd9..1af3dedde1f6 100644
--- a/arch/arm64/boot/dts/ti/k3-am65-main.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am65-main.dtsi
@@ -1185,6 +1185,9 @@
<0x22400 0x100>;
reg-names = "iram", "control", "debug";
firmware-name = "am65x-pru0_0-fw";
+ interrupt-parent = <&icssg0_intc>;
+ interrupts = <16 2 2>;
+ interrupt-names = "vring";
};
rtu0_0: rtu@4000 {
@@ -1194,6 +1197,9 @@
<0x23400 0x100>;
reg-names = "iram", "control", "debug";
firmware-name = "am65x-rtu0_0-fw";
+ interrupt-parent = <&icssg0_intc>;
+ interrupts = <20 4 4>;
+ interrupt-names = "vring";
};
tx_pru0_0: txpru@a000 {
@@ -1212,6 +1218,9 @@
<0x24400 0x100>;
reg-names = "iram", "control", "debug";
firmware-name = "am65x-pru0_1-fw";
+ interrupt-parent = <&icssg0_intc>;
+ interrupts = <18 3 3>;
+ interrupt-names = "vring";
};
rtu0_1: rtu@6000 {
@@ -1221,6 +1230,9 @@
<0x23c00 0x100>;
reg-names = "iram", "control", "debug";
firmware-name = "am65x-rtu0_1-fw";
+ interrupt-parent = <&icssg0_intc>;
+ interrupts = <22 5 5>;
+ interrupt-names = "vring";
};
tx_pru0_1: txpru@c000 {
@@ -1339,6 +1351,9 @@
<0x22400 0x100>;
reg-names = "iram", "control", "debug";
firmware-name = "am65x-pru1_0-fw";
+ interrupt-parent = <&icssg1_intc>;
+ interrupts = <16 2 2>;
+ interrupt-names = "vring";
};
rtu1_0: rtu@4000 {
@@ -1348,6 +1363,9 @@
<0x23400 0x100>;
reg-names = "iram", "control", "debug";
firmware-name = "am65x-rtu1_0-fw";
+ interrupt-parent = <&icssg1_intc>;
+ interrupts = <20 4 4>;
+ interrupt-names = "vring";
};
tx_pru1_0: txpru@a000 {
@@ -1366,6 +1384,9 @@
<0x24400 0x100>;
reg-names = "iram", "control", "debug";
firmware-name = "am65x-pru1_1-fw";
+ interrupt-parent = <&icssg1_intc>;
+ interrupts = <18 3 3>;
+ interrupt-names = "vring";
};
rtu1_1: rtu@6000 {
@@ -1375,6 +1396,9 @@
<0x23c00 0x100>;
reg-names = "iram", "control", "debug";
firmware-name = "am65x-rtu1_1-fw";
+ interrupt-parent = <&icssg1_intc>;
+ interrupts = <22 5 5>;
+ interrupt-names = "vring";
};
tx_pru1_1: txpru@c000 {
@@ -1493,6 +1517,9 @@
<0x22400 0x100>;
reg-names = "iram", "control", "debug";
firmware-name = "am65x-pru2_0-fw";
+ interrupt-parent = <&icssg2_intc>;
+ interrupts = <16 2 2>;
+ interrupt-names = "vring";
};
rtu2_0: rtu@4000 {
@@ -1502,6 +1529,9 @@
<0x23400 0x100>;
reg-names = "iram", "control", "debug";
firmware-name = "am65x-rtu2_0-fw";
+ interrupt-parent = <&icssg2_intc>;
+ interrupts = <20 4 4>;
+ interrupt-names = "vring";
};
tx_pru2_0: txpru@a000 {
@@ -1520,6 +1550,9 @@
<0x24400 0x100>;
reg-names = "iram", "control", "debug";
firmware-name = "am65x-pru2_1-fw";
+ interrupt-parent = <&icssg2_intc>;
+ interrupts = <18 3 3>;
+ interrupt-names = "vring";
};
rtu2_1: rtu@6000 {
@@ -1529,6 +1562,9 @@
<0x23c00 0x100>;
reg-names = "iram", "control", "debug";
firmware-name = "am65x-rtu2_1-fw";
+ interrupt-parent = <&icssg2_intc>;
+ interrupts = <22 5 5>;
+ interrupt-names = "vring";
};
tx_pru2_1: txpru@c000 {
diff --git a/arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi b/arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi
index 8feab9317644..43c6118d2bf0 100644
--- a/arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi
@@ -6,13 +6,17 @@
*/
&cbass_mcu {
- mcu_conf: scm-conf@40f00000 {
- compatible = "syscon", "simple-mfd";
- reg = <0x0 0x40f00000 0x0 0x20000>;
+ mcu_conf: bus@40f00000 {
+ compatible = "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
ranges = <0x0 0x0 0x40f00000 0x20000>;
+ cpsw_mac_syscon: ethernet-mac-syscon@200 {
+ compatible = "ti,am62p-cpsw-mac-efuse", "syscon";
+ reg = <0x200 0x8>;
+ };
+
phy_gmii_sel: phy@4040 {
compatible = "ti,am654-phy-gmii-sel";
reg = <0x4040 0x4>;
@@ -358,7 +362,7 @@
reg = <1>;
ti,mac-only;
label = "port1";
- ti,syscon-efuse = <&mcu_conf 0x200>;
+ ti,syscon-efuse = <&cpsw_mac_syscon 0x0>;
phys = <&phy_gmii_sel 1>;
};
};
diff --git a/arch/arm64/boot/dts/ti/k3-am654-base-board.dts b/arch/arm64/boot/dts/ti/k3-am654-base-board.dts
index aba0c52b1213..aa7139cc8a92 100644
--- a/arch/arm64/boot/dts/ti/k3-am654-base-board.dts
+++ b/arch/arm64/boot/dts/ti/k3-am654-base-board.dts
@@ -33,6 +33,7 @@
memory@80000000 {
device_type = "memory";
+ bootph-all;
/* 4G RAM */
reg = <0x00000000 0x80000000 0x00000000 0x80000000>,
<0x00000008 0x80000000 0x00000000 0x80000000>;
diff --git a/arch/arm64/boot/dts/ti/k3-am68-sk-base-board.dts b/arch/arm64/boot/dts/ti/k3-am68-sk-base-board.dts
index d743f023cdd9..90dbe31c5b81 100644
--- a/arch/arm64/boot/dts/ti/k3-am68-sk-base-board.dts
+++ b/arch/arm64/boot/dts/ti/k3-am68-sk-base-board.dts
@@ -414,6 +414,82 @@
pinctrl-0 = <&wkup_uart0_pins_default>;
};
+&wkup_i2c0 {
+ bootph-all;
+ clock-frequency = <400000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&wkup_i2c0_pins_default>;
+ status = "okay";
+
+ lp8733: pmic@60 {
+ compatible = "ti,lp8733";
+ reg = <0x60>;
+ buck0-in-supply = <&vsys_3v3>;
+ buck1-in-supply = <&vsys_3v3>;
+ ldo0-in-supply = <&vsys_3v3>;
+ ldo1-in-supply = <&vsys_3v3>;
+
+ lp8733_regulators: regulators {
+ lp8733_buck0_reg: buck0 {
+ /* FB_B0 -> LP8733-BUCK1 - VDD_MCU_0V85 */
+ regulator-name = "lp8733-buck0";
+ regulator-min-microvolt = <850000>;
+ regulator-max-microvolt = <850000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ lp8733_buck1_reg: buck1 {
+ /* FB_B1 -> LP8733-BUCK2 - VDD_DDR_1V1 */
+ regulator-name = "lp8733-buck1";
+ regulator-min-microvolt = <1100000>;
+ regulator-max-microvolt = <1100000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ lp8733_ldo0_reg: ldo0 {
+ /* LDO0 -> LP8733-LDO1 - VDA_DLL_0V8 */
+ regulator-name = "lp8733-ldo0";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <800000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ lp8733_ldo1_reg: ldo1 {
+ /* LDO1 -> LP8733-LDO2 - VDA_LN_1V8 */
+ regulator-name = "lp8733-ldo1";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+ };
+ };
+
+ tps62873a: regulator@40 {
+ compatible = "ti,tps62873";
+ reg = <0x40>;
+ bootph-pre-ram;
+ regulator-name = "VDD_CPU_AVS";
+ regulator-min-microvolt = <600000>;
+ regulator-max-microvolt = <900000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ tps62873b: regulator@43 {
+ compatible = "ti,tps62873";
+ reg = <0x43>;
+ regulator-name = "VDD_CORE_0V8";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <800000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+};
+
&mcu_uart0 {
status = "okay";
pinctrl-names = "default";
diff --git a/arch/arm64/boot/dts/ti/k3-am68-sk-som.dtsi b/arch/arm64/boot/dts/ti/k3-am68-sk-som.dtsi
index 0f4a5da0ebc4..5c66e0ec6e82 100644
--- a/arch/arm64/boot/dts/ti/k3-am68-sk-som.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am68-sk-som.dtsi
@@ -11,9 +11,10 @@
/ {
memory@80000000 {
device_type = "memory";
+ bootph-all;
/* 16 GB RAM */
- reg = <0x00 0x80000000 0x00 0x80000000>,
- <0x08 0x80000000 0x03 0x80000000>;
+ reg = <0x00000000 0x80000000 0x00000000 0x80000000>,
+ <0x00000008 0x80000000 0x00000003 0x80000000>;
};
reserved_memory: reserved-memory {
@@ -130,6 +131,25 @@
};
};
+&wkup_pmx0 {
+ mcu_fss0_ospi0_pins_default: mcu-fss0-ospi0-pins {
+ bootph-all;
+ pinctrl-single,pins = <
+ J721S2_WKUP_IOPAD(0x000, PIN_OUTPUT, 0) /* (D19) MCU_OSPI0_CLK */
+ J721S2_WKUP_IOPAD(0x02c, PIN_OUTPUT, 0) /* (F15) MCU_OSPI0_CSn0 */
+ J721S2_WKUP_IOPAD(0x00c, PIN_INPUT, 0) /* (C19) MCU_OSPI0_D0 */
+ J721S2_WKUP_IOPAD(0x010, PIN_INPUT, 0) /* (F16) MCU_OSPI0_D1 */
+ J721S2_WKUP_IOPAD(0x014, PIN_INPUT, 0) /* (G15) MCU_OSPI0_D2 */
+ J721S2_WKUP_IOPAD(0x018, PIN_INPUT, 0) /* (F18) MCU_OSPI0_D3 */
+ J721S2_WKUP_IOPAD(0x01c, PIN_INPUT, 0) /* (E19) MCU_OSPI0_D4 */
+ J721S2_WKUP_IOPAD(0x020, PIN_INPUT, 0) /* (G19) MCU_OSPI0_D5 */
+ J721S2_WKUP_IOPAD(0x024, PIN_INPUT, 0) /* (F19) MCU_OSPI0_D6 */
+ J721S2_WKUP_IOPAD(0x028, PIN_INPUT, 0) /* (F20) MCU_OSPI0_D7 */
+ J721S2_WKUP_IOPAD(0x008, PIN_INPUT, 0) /* (E18) MCU_OSPI0_DQS */
+ >;
+ };
+};
+
&wkup_pmx2 {
wkup_i2c0_pins_default: wkup-i2c0-default-pins {
pinctrl-single,pins = <
@@ -152,6 +172,68 @@
};
};
+&ospi0 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&mcu_fss0_ospi0_pins_default>;
+
+ flash@0 {
+ compatible = "jedec,spi-nor";
+ reg = <0x0>;
+ spi-tx-bus-width = <8>;
+ spi-rx-bus-width = <8>;
+ spi-max-frequency = <25000000>;
+ cdns,tshsl-ns = <60>;
+ cdns,tsd2d-ns = <60>;
+ cdns,tchsh-ns = <60>;
+ cdns,tslch-ns = <60>;
+ cdns,read-delay = <4>;
+
+ partitions {
+ bootph-all;
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ partition@0 {
+ label = "ospi.tiboot3";
+ reg = <0x0 0x80000>;
+ };
+
+ partition@80000 {
+ label = "ospi.tispl";
+ reg = <0x80000 0x200000>;
+ };
+
+ partition@280000 {
+ label = "ospi.u-boot";
+ reg = <0x280000 0x400000>;
+ };
+
+ partition@680000 {
+ label = "ospi.env";
+ reg = <0x680000 0x40000>;
+ };
+
+ partition@740000 {
+ label = "ospi.env.backup";
+ reg = <0x740000 0x40000>;
+ };
+
+ partition@800000 {
+ label = "ospi.rootfs";
+ reg = <0x800000 0x37c0000>;
+ };
+
+ partition@3fc0000 {
+ bootph-pre-ram;
+ label = "ospi.phypattern";
+ reg = <0x3fc0000 0x40000>;
+ };
+ };
+ };
+};
+
&mailbox0_cluster0 {
status = "okay";
interrupts = <436>;
diff --git a/arch/arm64/boot/dts/ti/k3-am69-sk.dts b/arch/arm64/boot/dts/ti/k3-am69-sk.dts
index d88651c297a2..3f655852244e 100644
--- a/arch/arm64/boot/dts/ti/k3-am69-sk.dts
+++ b/arch/arm64/boot/dts/ti/k3-am69-sk.dts
@@ -35,8 +35,8 @@
device_type = "memory";
bootph-all;
/* 32G RAM */
- reg = <0x00 0x80000000 0x00 0x80000000>,
- <0x08 0x80000000 0x07 0x80000000>;
+ reg = <0x00000000 0x80000000 0x00000000 0x80000000>,
+ <0x00000008 0x80000000 0x00000007 0x80000000>;
};
reserved_memory: reserved-memory {
@@ -814,6 +814,27 @@
};
};
};
+
+ tps62873a: regulator@40 {
+ compatible = "ti,tps62873";
+ reg = <0x40>;
+ bootph-pre-ram;
+ regulator-name = "VDD_CPU_AVS";
+ regulator-min-microvolt = <600000>;
+ regulator-max-microvolt = <900000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ tps62873b: regulator@43 {
+ compatible = "ti,tps62873";
+ reg = <0x43>;
+ regulator-name = "VDD_CORE_0V8";
+ regulator-min-microvolt = <760000>;
+ regulator-max-microvolt = <840000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
};
&wkup_gpio0 {
@@ -1203,3 +1224,65 @@
};
};
};
+
+&serdes_ln_ctrl {
+ idle-states = <J784S4_SERDES0_LANE0_PCIE1_LANE0>, <J784S4_SERDES0_LANE1_PCIE1_LANE1>,
+ <J784S4_SERDES0_LANE2_PCIE3_LANE0>, <J784S4_SERDES0_LANE3_USB>,
+ <J784S4_SERDES1_LANE0_PCIE0_LANE0>, <J784S4_SERDES1_LANE1_PCIE0_LANE1>,
+ <J784S4_SERDES1_LANE2_PCIE0_LANE2>, <J784S4_SERDES1_LANE3_PCIE0_LANE3>;
+};
+
+&serdes_wiz0 {
+ status = "okay";
+};
+
+&serdes0 {
+ status = "okay";
+
+ serdes0_pcie_link: phy@0 {
+ reg = <0>;
+ cdns,num-lanes = <3>;
+ #phy-cells = <0>;
+ cdns,phy-type = <PHY_TYPE_PCIE>;
+ resets = <&serdes_wiz0 1>, <&serdes_wiz0 2>, <&serdes_wiz0 3>;
+ };
+};
+
+&serdes_wiz1 {
+ status = "okay";
+};
+
+&serdes1 {
+ status = "okay";
+
+ serdes1_pcie_link: phy@0 {
+ reg = <0>;
+ cdns,num-lanes = <4>;
+ #phy-cells = <0>;
+ cdns,phy-type = <PHY_TYPE_PCIE>;
+ resets = <&serdes_wiz1 1>, <&serdes_wiz1 2>, <&serdes_wiz1 3>, <&serdes_wiz1 4>;
+ };
+};
+
+&pcie0_rc {
+ status = "okay";
+ reset-gpios = <&exp1 4 GPIO_ACTIVE_HIGH>;
+ phys = <&serdes1_pcie_link>;
+ phy-names = "pcie-phy";
+};
+
+&pcie1_rc {
+ status = "okay";
+ reset-gpios = <&exp1 5 GPIO_ACTIVE_HIGH>;
+ phys = <&serdes0_pcie_link>;
+ phy-names = "pcie-phy";
+ num-lanes = <2>;
+};
+
+&pcie3_rc {
+ status = "okay";
+ reset-gpios = <&exp1 6 GPIO_ACTIVE_HIGH>;
+ phys = <&serdes0_pcie_link>;
+ phy-names = "pcie-phy";
+ num-lanes = <1>;
+};
diff --git a/arch/arm64/boot/dts/ti/k3-am6xx-phycore-disable-eth-phy.dtso b/arch/arm64/boot/dts/ti/k3-am6xx-phycore-disable-eth-phy.dtso
new file mode 100644
index 000000000000..356c82bbe143
--- /dev/null
+++ b/arch/arm64/boot/dts/ti/k3-am6xx-phycore-disable-eth-phy.dtso
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * Copyright (C) 2023 PHYTEC America, LLC
+ * Author: Garrett Giordano <ggiordano@phytec.com>
+ *
+ * Copyright (C) 2024 PHYTEC America, LLC
+ * Author: Nathan Morrisson <nmorrisson@phytec.com>
+ */
+
+/dts-v1/;
+/plugin/;
+
+&cpsw3g_phy1 {
+ status = "disabled";
+};
+
+&cpsw_port1 {
+ status = "disabled";
+};
diff --git a/arch/arm64/boot/dts/ti/k3-am6xx-phycore-disable-rtc.dtso b/arch/arm64/boot/dts/ti/k3-am6xx-phycore-disable-rtc.dtso
new file mode 100644
index 000000000000..8b24191f5948
--- /dev/null
+++ b/arch/arm64/boot/dts/ti/k3-am6xx-phycore-disable-rtc.dtso
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * Copyright (C) 2023 PHYTEC America, LLC
+ * Author: Garrett Giordano <ggiordano@phytec.com>
+ *
+ * Copyright (C) 2024 PHYTEC America, LLC
+ * Author: Nathan Morrisson <nmorrisson@phytec.com>
+ */
+
+/dts-v1/;
+/plugin/;
+
+&i2c_som_rtc {
+ status = "disabled";
+};
diff --git a/arch/arm64/boot/dts/ti/k3-am6xx-phycore-disable-spi-nor.dtso b/arch/arm64/boot/dts/ti/k3-am6xx-phycore-disable-spi-nor.dtso
new file mode 100644
index 000000000000..cc0cf269b6e4
--- /dev/null
+++ b/arch/arm64/boot/dts/ti/k3-am6xx-phycore-disable-spi-nor.dtso
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * Copyright (C) 2023 PHYTEC America, LLC
+ * Author: Garrett Giordano <ggiordano@phytec.com>
+ *
+ * Copyright (C) 2024 PHYTEC America, LLC
+ * Author: Nathan Morrisson <nmorrisson@phytec.com>
+ */
+
+/dts-v1/;
+/plugin/;
+
+&serial_flash {
+ status = "disabled";
+};
diff --git a/arch/arm64/boot/dts/ti/k3-am6xx-phycore-qspi-nor.dtso b/arch/arm64/boot/dts/ti/k3-am6xx-phycore-qspi-nor.dtso
new file mode 100644
index 000000000000..969dfebcd637
--- /dev/null
+++ b/arch/arm64/boot/dts/ti/k3-am6xx-phycore-qspi-nor.dtso
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * Copyright (C) 2024 PHYTEC America LLC
+ * Author: Nathan Morrisson <nmorrisson@phytec.com>
+ */
+
+/dts-v1/;
+/plugin/;
+
+#include "k3-pinctrl.h"
+
+&serial_flash {
+ spi-tx-bus-width = <1>;
+ spi-rx-bus-width = <4>;
+};
diff --git a/arch/arm64/boot/dts/ti/k3-j7200-mcu-wakeup.dtsi b/arch/arm64/boot/dts/ti/k3-j7200-mcu-wakeup.dtsi
index fccaabfb1348..5097d192c2b2 100644
--- a/arch/arm64/boot/dts/ti/k3-j7200-mcu-wakeup.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j7200-mcu-wakeup.dtsi
@@ -164,12 +164,16 @@
ti,timer-pwm;
};
- mcu_conf: syscon@40f00000 {
- compatible = "syscon", "simple-mfd";
- reg = <0x00 0x40f00000 0x00 0x20000>;
+ mcu_conf: bus@40f00000 {
+ compatible = "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
- ranges = <0x00 0x00 0x40f00000 0x20000>;
+ ranges = <0x0 0x0 0x40f00000 0x20000>;
+
+ cpsw_mac_syscon: ethernet-mac-syscon@200 {
+ compatible = "ti,am62p-cpsw-mac-efuse", "syscon";
+ reg = <0x200 0x8>;
+ };
phy_gmii_sel: phy@4040 {
compatible = "ti,am654-phy-gmii-sel";
@@ -420,7 +424,7 @@
reg = <1>;
ti,mac-only;
label = "port1";
- ti,syscon-efuse = <&mcu_conf 0x200>;
+ ti,syscon-efuse = <&cpsw_mac_syscon 0x0>;
phys = <&phy_gmii_sel 1>;
};
};
diff --git a/arch/arm64/boot/dts/ti/k3-j7200-som-p0.dtsi b/arch/arm64/boot/dts/ti/k3-j7200-som-p0.dtsi
index 7e6a584ac6f0..21fe194a5766 100644
--- a/arch/arm64/boot/dts/ti/k3-j7200-som-p0.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j7200-som-p0.dtsi
@@ -12,9 +12,10 @@
/ {
memory@80000000 {
device_type = "memory";
+ bootph-all;
/* 4G RAM */
- reg = <0x00 0x80000000 0x00 0x80000000>,
- <0x08 0x80000000 0x00 0x80000000>;
+ reg = <0x00000000 0x80000000 0x00000000 0x80000000>,
+ <0x00000008 0x80000000 0x00000000 0x80000000>;
};
reserved_memory: reserved-memory {
diff --git a/arch/arm64/boot/dts/ti/k3-j721e-common-proc-board-infotainment.dtso b/arch/arm64/boot/dts/ti/k3-j721e-common-proc-board-infotainment.dtso
new file mode 100644
index 000000000000..65a7e54f0884
--- /dev/null
+++ b/arch/arm64/boot/dts/ti/k3-j721e-common-proc-board-infotainment.dtso
@@ -0,0 +1,164 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * Infotainment Expansion Board for j721e-evm
+ * User Guide: <https://www.ti.com/lit/ug/spruit0a/spruit0a.pdf>
+ *
+ * Copyright (C) 2024 Texas Instruments Incorporated - https://www.ti.com/
+ */
+
+/dts-v1/;
+/plugin/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+
+#include "k3-pinctrl.h"
+
+&{/} {
+ hdmi-connector {
+ compatible = "hdmi-connector";
+ label = "hdmi";
+ type = "a";
+ ddc-i2c-bus = <&main_i2c1>;
+ digital;
+ /* P12 - HDMI_HPD */
+ hpd-gpios = <&exp6 10 GPIO_ACTIVE_HIGH>;
+
+ port {
+ hdmi_connector_in: endpoint {
+ remote-endpoint = <&tfp410_out>;
+ };
+ };
+ };
+
+ dvi-bridge {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "ti,tfp410";
+ /* P10 - HDMI_PDn */
+ powerdown-gpios = <&exp6 8 GPIO_ACTIVE_LOW>;
+
+ port@0 {
+ reg = <0>;
+
+ tfp410_in: endpoint {
+ remote-endpoint = <&dpi_out0>;
+ pclk-sample = <1>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ tfp410_out: endpoint {
+ remote-endpoint =
+ <&hdmi_connector_in>;
+ };
+ };
+ };
+};
+
+&main_pmx0 {
+ main_i2c1_exp6_pins_default: main-i2c1-exp6-default-pins {
+ pinctrl-single,pins = <
+ J721E_IOPAD(0x264, PIN_INPUT, 7) /* (T29) MMC2_DAT2.GPIO1_24 */
+ >;
+ };
+
+ dss_vout0_pins_default: dss-vout0-default-pins {
+ pinctrl-single,pins = <
+ J721E_IOPAD(0x58, PIN_OUTPUT, 10) /* (AE22) PRG1_PRU1_GPO0.VOUT0_DATA0 */
+ J721E_IOPAD(0x5c, PIN_OUTPUT, 10) /* (AG23) PRG1_PRU1_GPO1.VOUT0_DATA1 */
+ J721E_IOPAD(0x60, PIN_OUTPUT, 10) /* (AF23) PRG1_PRU1_GPO2.VOUT0_DATA2 */
+ J721E_IOPAD(0x64, PIN_OUTPUT, 10) /* (AD23) PRG1_PRU1_GPO3.VOUT0_DATA3 */
+ J721E_IOPAD(0x68, PIN_OUTPUT, 10) /* (AH24) PRG1_PRU1_GPO4.VOUT0_DATA4 */
+ J721E_IOPAD(0x6c, PIN_OUTPUT, 10) /* (AG21) PRG1_PRU1_GPO5.VOUT0_DATA5 */
+ J721E_IOPAD(0x70, PIN_OUTPUT, 10) /* (AE23) PRG1_PRU1_GPO6.VOUT0_DATA6 */
+ J721E_IOPAD(0x74, PIN_OUTPUT, 10) /* (AC21) PRG1_PRU1_GPO7.VOUT0_DATA7 */
+ J721E_IOPAD(0x78, PIN_OUTPUT, 10) /* (Y23) PRG1_PRU1_GPO8.VOUT0_DATA8 */
+ J721E_IOPAD(0x7c, PIN_OUTPUT, 10) /* (AF21) PRG1_PRU1_GPO9.VOUT0_DATA9 */
+ J721E_IOPAD(0x80, PIN_OUTPUT, 10) /* (AB23) PRG1_PRU1_GPO10.VOUT0_DATA10 */
+ J721E_IOPAD(0x84, PIN_OUTPUT, 10) /* (AJ25) PRG1_PRU1_GPO11.VOUT0_DATA11 */
+ J721E_IOPAD(0x88, PIN_OUTPUT, 10) /* (AH25) PRG1_PRU1_GPO12.VOUT0_DATA12 */
+ J721E_IOPAD(0x8c, PIN_OUTPUT, 10) /* (AG25) PRG1_PRU1_GPO13.VOUT0_DATA13 */
+ J721E_IOPAD(0x90, PIN_OUTPUT, 10) /* (AH26) PRG1_PRU1_GPO14.VOUT0_DATA14 */
+ J721E_IOPAD(0x94, PIN_OUTPUT, 10) /* (AJ27) PRG1_PRU1_GPO15.VOUT0_DATA15 */
+ J721E_IOPAD(0x30, PIN_OUTPUT, 10) /* (AF24) PRG1_PRU0_GPO11.VOUT0_DATA16 */
+ J721E_IOPAD(0x34, PIN_OUTPUT, 10) /* (AJ24) PRG1_PRU0_GPO12.VOUT0_DATA17 */
+ J721E_IOPAD(0x38, PIN_OUTPUT, 10) /* (AG24) PRG1_PRU0_GPO13.VOUT0_DATA18 */
+ J721E_IOPAD(0x3c, PIN_OUTPUT, 10) /* (AD24) PRG1_PRU0_GPO14.VOUT0_DATA19 */
+ J721E_IOPAD(0x40, PIN_OUTPUT, 10) /* (AC24) PRG1_PRU0_GPO15.VOUT0_DATA20 */
+ J721E_IOPAD(0x44, PIN_OUTPUT, 10) /* (AE24) PRG1_PRU0_GPO16.VOUT0_DATA21 */
+ J721E_IOPAD(0x24, PIN_OUTPUT, 10) /* (AJ20) PRG1_PRU0_GPO8.VOUT0_DATA22 */
+ J721E_IOPAD(0x28, PIN_OUTPUT, 10) /* (AG20) PRG1_PRU0_GPO9.VOUT0_DATA23 */
+ J721E_IOPAD(0x9c, PIN_OUTPUT, 10) /* (AC22) PRG1_PRU1_GPO17.VOUT0_DE */
+ J721E_IOPAD(0x98, PIN_OUTPUT, 10) /* (AJ26) PRG1_PRU1_GPO16.VOUT0_HSYNC */
+ J721E_IOPAD(0xa4, PIN_OUTPUT, 10) /* (AH22) PRG1_PRU1_GPO19.VOUT0_PCLK */
+ J721E_IOPAD(0xa0, PIN_OUTPUT, 10) /* (AJ22) PRG1_PRU1_GPO18.VOUT0_VSYNC */
+ >;
+ };
+};
+
+&exp1 {
+ p14-hog {
+ /* P14 - VINOUT_MUX_SEL0 */
+ gpio-hog;
+ gpios = <12 GPIO_ACTIVE_HIGH>;
+ output-low;
+ line-name = "VINOUT_MUX_SEL0";
+ };
+
+ p15-hog {
+ /* P15 - VINOUT_MUX_SEL1 */
+ gpio-hog;
+ gpios = <13 GPIO_ACTIVE_HIGH>;
+ output-high;
+ line-name = "VINOUT_MUX_SEL1";
+ };
+};
+
+&main_i2c1 {
+ /* i2c1 is used for DVI DDC, so we need to use 100kHz */
+ clock-frequency = <100000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ exp6: gpio@21 {
+ compatible = "ti,tca6416";
+ reg = <0x21>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_i2c1_exp6_pins_default>;
+ interrupt-parent = <&main_gpio1>;
+ interrupts = <24 IRQ_TYPE_EDGE_FALLING>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ p11-hog {
+ /* P11 - HDMI_DDC_OE */
+ gpio-hog;
+ gpios = <9 GPIO_ACTIVE_HIGH>;
+ output-high;
+ line-name = "HDMI_DDC_OE";
+ };
+ };
+};
+
+&dss {
+ pinctrl-names = "default";
+ pinctrl-0 = <&dss_vout0_pins_default>;
+};
+
+&dss_ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@1 {
+ reg = <1>;
+
+ dpi_out0: endpoint {
+ remote-endpoint = <&tfp410_in>;
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi b/arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi
index 9349ae07c046..6b6ef6a30614 100644
--- a/arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi
@@ -34,13 +34,17 @@
};
};
- mcu_conf: syscon@40f00000 {
- compatible = "syscon", "simple-mfd";
- reg = <0x0 0x40f00000 0x0 0x20000>;
+ mcu_conf: bus@40f00000 {
+ compatible = "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
ranges = <0x0 0x0 0x40f00000 0x20000>;
+ cpsw_mac_syscon: ethernet-mac-syscon@200 {
+ compatible = "ti,am62p-cpsw-mac-efuse", "syscon";
+ reg = <0x200 0x8>;
+ };
+
phy_gmii_sel: phy@4040 {
compatible = "ti,am654-phy-gmii-sel";
reg = <0x4040 0x4>;
@@ -546,7 +550,7 @@
reg = <1>;
ti,mac-only;
label = "port1";
- ti,syscon-efuse = <&mcu_conf 0x200>;
+ ti,syscon-efuse = <&cpsw_mac_syscon 0x0>;
phys = <&phy_gmii_sel 1>;
};
};
diff --git a/arch/arm64/boot/dts/ti/k3-j721e-sk.dts b/arch/arm64/boot/dts/ti/k3-j721e-sk.dts
index 0c4575ad8d7c..89fbfb21e5d3 100644
--- a/arch/arm64/boot/dts/ti/k3-j721e-sk.dts
+++ b/arch/arm64/boot/dts/ti/k3-j721e-sk.dts
@@ -31,6 +31,7 @@
memory@80000000 {
device_type = "memory";
+ bootph-all;
/* 4G RAM */
reg = <0x00000000 0x80000000 0x00000000 0x80000000>,
<0x00000008 0x80000000 0x00000000 0x80000000>;
@@ -210,6 +211,42 @@
<3300000 0x1>;
};
+ transceiver1: can-phy1 {
+ compatible = "ti,tcan1042";
+ #phy-cells = <0>;
+ max-bitrate = <5000000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&mcu_mcan0_gpio_pins_default>;
+ standby-gpios = <&wkup_gpio0 3 GPIO_ACTIVE_HIGH>;
+ };
+
+ transceiver2: can-phy2 {
+ compatible = "ti,tcan1042";
+ #phy-cells = <0>;
+ max-bitrate = <5000000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_mcan0_gpio_pins_default>;
+ standby-gpios = <&main_gpio0 65 GPIO_ACTIVE_HIGH>;
+ };
+
+ transceiver3: can-phy3 {
+ compatible = "ti,tcan1042";
+ #phy-cells = <0>;
+ max-bitrate = <5000000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_mcan5_gpio_pins_default>;
+ standby-gpios = <&main_gpio0 66 GPIO_ACTIVE_HIGH>;
+ };
+
+ transceiver4: can-phy4 {
+ compatible = "ti,tcan1042";
+ #phy-cells = <0>;
+ max-bitrate = <5000000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_mcan9_gpio_pins_default>;
+ standby-gpios = <&main_gpio0 67 GPIO_ACTIVE_HIGH>;
+ };
+
dp_pwr_3v3: fixedregulator-dp-prw {
compatible = "regulator-fixed";
regulator-name = "dp-pwr";
@@ -367,6 +404,45 @@
>;
};
+ main_mcan0_pins_default: main-mcan0-default-pins {
+ pinctrl-single,pins = <
+ J721E_IOPAD(0x208, PIN_INPUT, 0) /* (W5) MCAN0_RX */
+ J721E_IOPAD(0x20c, PIN_OUTPUT, 0) /* (W6) MCAN0_TX */
+ >;
+ };
+
+ main_mcan0_gpio_pins_default: main-mcan0-gpio-default-pins {
+ pinctrl-single,pins = <
+ J721E_IOPAD(0x108, PIN_INPUT, 7) /* (AD27) PRG0_PRU1_GPO2.GPIO0_65 */
+ >;
+ };
+
+ main_mcan5_pins_default: main-mcan5-default-pins {
+ pinctrl-single,pins = <
+ J721E_IOPAD(0x050, PIN_INPUT, 6) /* (AE21) PRG1_PRU0_GPO18.MCAN5_RX */
+ J721E_IOPAD(0x04c, PIN_OUTPUT, 6) /* (AJ21) PRG1_PRU0_GPO17.MCAN5_TX */
+ >;
+ };
+
+ main_mcan5_gpio_pins_default: main-mcan5-gpio-default-pins {
+ pinctrl-single,pins = <
+ J721E_IOPAD(0x10c, PIN_INPUT, 7) /* (AC25) PRG0_PRU1_GPO3.GPIO0_66 */
+ >;
+ };
+
+ main_mcan9_pins_default: main-mcan9-default-pins {
+ pinctrl-single,pins = <
+ J721E_IOPAD(0x0d0, PIN_INPUT, 6) /* (AC27) PRG0_PRU0_GPO8.MCAN9_RX */
+ J721E_IOPAD(0x0cc, PIN_OUTPUT, 6) /* (AC28) PRG0_PRU0_GPO7.MCAN9_TX */
+ >;
+ };
+
+ main_mcan9_gpio_pins_default: main-mcan9-gpio-default-pins {
+ pinctrl-single,pins = <
+ J721E_IOPAD(0x110, PIN_INPUT, 7) /* (AD29) PRG0_PRU1_GPO4.GPIO0_67 */
+ >;
+ };
+
dp0_pins_default: dp0-default-pins {
pinctrl-single,pins = <
J721E_IOPAD(0x1c4, PIN_INPUT, 5) /* SPI0_CS1.DP0_HPD */
@@ -555,6 +631,19 @@
>;
};
+ mcu_mcan0_pins_default: mcu-mcan0-default-pins {
+ pinctrl-single,pins = <
+ J721E_WKUP_IOPAD(0x0ac, PIN_INPUT, 0) /* (C29) MCU_MCAN0_RX */
+ J721E_WKUP_IOPAD(0x0a8, PIN_OUTPUT, 0) /* (D29) MCU_MCAN0_TX */
+ >;
+ };
+
+ mcu_mcan0_gpio_pins_default: mcu-mcan0-gpio-default-pins {
+ pinctrl-single,pins = <
+ J721E_WKUP_IOPAD(0x0bc, PIN_INPUT, 7) /* (F27) WKUP_GPIO0_3 */
+ >;
+ };
+
/* Reset for M.2 M Key slot on PCIe1 */
mkey_reset_pins_default: mkey-reset-pns-default-pins {
pinctrl-single,pins = <
@@ -1108,6 +1197,34 @@
num-lanes = <2>;
};
+&mcu_mcan0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&mcu_mcan0_pins_default>;
+ phys = <&transceiver1>;
+ status = "okay";
+};
+
+&main_mcan0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_mcan0_pins_default>;
+ phys = <&transceiver2>;
+ status = "okay";
+};
+
+&main_mcan5 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_mcan5_pins_default>;
+ phys = <&transceiver3>;
+ status = "okay";
+};
+
+&main_mcan9 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_mcan9_pins_default>;
+ phys = <&transceiver4>;
+ status = "okay";
+};
+
&ufs_wrapper {
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/ti/k3-j721e-som-p0.dtsi b/arch/arm64/boot/dts/ti/k3-j721e-som-p0.dtsi
index 1fae6495db07..5ba947771b84 100644
--- a/arch/arm64/boot/dts/ti/k3-j721e-som-p0.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j721e-som-p0.dtsi
@@ -12,6 +12,7 @@
/ {
memory@80000000 {
device_type = "memory";
+ bootph-all;
/* 4G RAM */
reg = <0x00000000 0x80000000 0x00000000 0x80000000>,
<0x00000008 0x80000000 0x00000000 0x80000000>;
diff --git a/arch/arm64/boot/dts/ti/k3-j721s2-mcu-wakeup.dtsi b/arch/arm64/boot/dts/ti/k3-j721s2-mcu-wakeup.dtsi
index 5ccb04c7c462..8feb42c89e47 100644
--- a/arch/arm64/boot/dts/ti/k3-j721s2-mcu-wakeup.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j721s2-mcu-wakeup.dtsi
@@ -139,13 +139,17 @@
ti,interrupt-ranges = <16 960 16>;
};
- mcu_conf: syscon@40f00000 {
- compatible = "syscon", "simple-mfd";
- reg = <0x0 0x40f00000 0x0 0x20000>;
+ mcu_conf: bus@40f00000 {
+ compatible = "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
ranges = <0x0 0x0 0x40f00000 0x20000>;
+ cpsw_mac_syscon: ethernet-mac-syscon@200 {
+ compatible = "ti,am62p-cpsw-mac-efuse", "syscon";
+ reg = <0x200 0x8>;
+ };
+
phy_gmii_sel: phy@4040 {
compatible = "ti,am654-phy-gmii-sel";
reg = <0x4040 0x4>;
@@ -544,7 +548,7 @@
reg = <1>;
ti,mac-only;
label = "port1";
- ti,syscon-efuse = <&mcu_conf 0x200>;
+ ti,syscon-efuse = <&cpsw_mac_syscon 0x0>;
phys = <&phy_gmii_sel 1>;
};
};
diff --git a/arch/arm64/boot/dts/ti/k3-j721s2-som-p0.dtsi b/arch/arm64/boot/dts/ti/k3-j721s2-som-p0.dtsi
index 623c8421525d..82aacc01e8fe 100644
--- a/arch/arm64/boot/dts/ti/k3-j721s2-som-p0.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j721s2-som-p0.dtsi
@@ -13,9 +13,10 @@
/ {
memory@80000000 {
device_type = "memory";
+ bootph-all;
/* 16 GB RAM */
- reg = <0x00 0x80000000 0x00 0x80000000>,
- <0x08 0x80000000 0x03 0x80000000>;
+ reg = <0x00000000 0x80000000 0x00000000 0x80000000>,
+ <0x00000008 0x80000000 0x00000003 0x80000000>;
};
/* Reserving memory regions still pending */
diff --git a/arch/arm64/boot/dts/ti/k3-j722s-evm.dts b/arch/arm64/boot/dts/ti/k3-j722s-evm.dts
index bf3c246d13d1..dd3b5f7039d7 100644
--- a/arch/arm64/boot/dts/ti/k3-j722s-evm.dts
+++ b/arch/arm64/boot/dts/ti/k3-j722s-evm.dts
@@ -9,7 +9,9 @@
/dts-v1/;
#include <dt-bindings/net/ti-dp83867.h>
+#include <dt-bindings/phy/phy.h>
#include "k3-j722s.dtsi"
+#include "k3-serdes.h"
/ {
compatible = "ti,j722s-evm", "ti,j722s";
@@ -105,6 +107,15 @@
<3300000 0x1>;
};
+ vsys_io_3v3: regulator-vsys-io-3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "vsys_io_3v3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
vsys_io_1v8: regulator-vsys-io-1v8 {
compatible = "regulator-fixed";
regulator-name = "vsys_io_1v8";
@@ -122,6 +133,35 @@
regulator-always-on;
regulator-boot-on;
};
+
+ codec_audio: sound {
+ compatible = "simple-audio-card";
+ simple-audio-card,name = "J722S-EVM";
+ simple-audio-card,widgets =
+ "Headphone", "Headphone Jack",
+ "Line", "Line In",
+ "Microphone", "Microphone Jack";
+ simple-audio-card,routing =
+ "Headphone Jack", "HPLOUT",
+ "Headphone Jack", "HPROUT",
+ "LINE1L", "Line In",
+ "LINE1R", "Line In",
+ "MIC3R", "Microphone Jack",
+ "Microphone Jack", "Mic Bias";
+ simple-audio-card,format = "dsp_b";
+ simple-audio-card,bitclock-master = <&sound_master>;
+ simple-audio-card,frame-master = <&sound_master>;
+ simple-audio-card,bitclock-inversion;
+
+ simple-audio-card,cpu {
+ sound-dai = <&mcasp1>;
+ };
+
+ sound_master: simple-audio-card,codec {
+ sound-dai = <&tlv320aic3106>;
+ clocks = <&audio_refclk1>;
+ };
+ };
};
&main_pmx0 {
@@ -202,6 +242,27 @@
J722S_IOPAD(0x012c, PIN_OUTPUT, 0) /* (AF25) RGMII1_TX_CTL */
>;
};
+
+ main_usb1_pins_default: main-usb1-default-pins {
+ pinctrl-single,pins = <
+ J722S_IOPAD(0x0258, PIN_INPUT, 0) /* (B27) USB1_DRVVBUS */
+ >;
+ };
+
+ main_mcasp1_pins_default: main-mcasp1-default-pins {
+ pinctrl-single,pins = <
+ J722S_IOPAD(0x0090, PIN_INPUT, 2) /* (P27) GPMC0_BE0n_CLE.MCASP1_ACLKX */
+ J722S_IOPAD(0x0098, PIN_INPUT, 2) /* (V21) GPMC0_WAIT0.MCASP1_AFSX */
+ J722S_IOPAD(0x008c, PIN_OUTPUT, 2) /* (N23) GPMC0_WEn.MCASP1_AXR0 */
+ J722S_IOPAD(0x0084, PIN_INPUT, 2) /* (N21) GPMC0_ADVn_ALE.MCASP1_AXR2 */
+ >;
+ };
+
+ audio_ext_refclk1_pins_default: audio-ext-refclk1-default-pins {
+ pinctrl-single,pins = <
+ J722S_IOPAD(0x00a0, PIN_OUTPUT, 1) /* (N24) GPMC0_WPn.AUDIO_EXT_REFCLK1 */
+ >;
+ };
};
&cpsw3g {
@@ -277,6 +338,12 @@
bootph-all;
};
+&k3_clks {
+ /* Configure AUDIO_EXT_REFCLK1 pin as output */
+ pinctrl-names = "default";
+ pinctrl-0 = <&audio_ext_refclk1_pins_default>;
+};
+
&main_i2c0 {
pinctrl-names = "default";
pinctrl-0 = <&main_i2c0_pins_default>;
@@ -301,6 +368,48 @@
"PCIe0_1L_RC_RSTz", "PCIe0_1L_PRSNT#",
"ENET1_EXP_SPARE2", "ENET1_EXP_PWRDN",
"PD_I2ENET1_I2CMUX_SELC_IRQ", "ENET1_EXP_RESETZ";
+
+ p05-hog {
+ /* P05 - USB2.0_MUX_SEL */
+ gpio-hog;
+ gpios = <5 GPIO_ACTIVE_HIGH>;
+ output-high;
+ };
+
+ p01_hog: p01-hog {
+ /* P01 - TRC_MUX_SEL */
+ gpio-hog;
+ gpios = <0 GPIO_ACTIVE_HIGH>;
+ output-low;
+ line-name = "TRC_MUX_SEL";
+ };
+
+ p02_hog: p02-hog {
+ /* P02 - MCASP1_FET_SEL */
+ gpio-hog;
+ gpios = <2 GPIO_ACTIVE_HIGH>;
+ output-high;
+ line-name = "MCASP1_FET_SEL";
+ };
+
+ p13_hog: p13-hog {
+ /* P13 - GPIO_AUD_RSTn */
+ gpio-hog;
+ gpios = <13 GPIO_ACTIVE_HIGH>;
+ output-high;
+ line-name = "GPIO_AUD_RSTn";
+ };
+ };
+
+ tlv320aic3106: audio-codec@1b {
+ #sound-dai-cells = <0>;
+ compatible = "ti,tlv320aic3106";
+ reg = <0x1b>;
+ ai3x-micbias-vg = <1>; /* 2.0V */
+ AVDD-supply = <&vsys_io_3v3>;
+ IOVDD-supply = <&vsys_io_3v3>;
+ DRVDD-supply = <&vsys_io_3v3>;
+ DVDD-supply = <&vsys_io_1v8>;
};
};
@@ -384,3 +493,76 @@
status = "okay";
bootph-all;
};
+
+&serdes_ln_ctrl {
+ idle-states = <J722S_SERDES0_LANE0_USB>,
+ <J722S_SERDES1_LANE0_PCIE0_LANE0>;
+};
+
+&serdes0 {
+ status = "okay";
+ serdes0_usb_link: phy@0 {
+ reg = <0>;
+ cdns,num-lanes = <1>;
+ #phy-cells = <0>;
+ cdns,phy-type = <PHY_TYPE_USB3>;
+ resets = <&serdes_wiz0 1>;
+ };
+};
+
+&serdes1 {
+ status = "okay";
+ serdes1_pcie_link: phy@0 {
+ reg = <0>;
+ cdns,num-lanes = <1>;
+ #phy-cells = <0>;
+ cdns,phy-type = <PHY_TYPE_PCIE>;
+ resets = <&serdes_wiz1 1>;
+ };
+};
+
+&pcie0_rc {
+ reset-gpios = <&exp1 18 GPIO_ACTIVE_HIGH>;
+ phys = <&serdes1_pcie_link>;
+ phy-names = "pcie-phy";
+ status = "okay";
+};
+
+&usbss0 {
+ ti,vbus-divider;
+ status = "okay";
+};
+
+&usb0 {
+ dr_mode = "otg";
+ usb-role-switch;
+};
+
+&usbss1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_usb1_pins_default>;
+ ti,vbus-divider;
+ status = "okay";
+};
+
+&usb1 {
+ dr_mode = "host";
+ maximum-speed = "super-speed";
+ phys = <&serdes0_usb_link>;
+ phy-names = "cdns3,usb3-phy";
+};
+
+&mcasp1 {
+ status = "okay";
+ #sound-dai-cells = <0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_mcasp1_pins_default>;
+ op-mode = <0>; /* MCASP_IIS_MODE */
+ tdm-slots = <2>;
+ serial-dir = < /* 0: INACTIVE, 1: TX, 2: RX */
+ 1 0 2 0
+ 0 0 0 0
+ 0 0 0 0
+ 0 0 0 0
+ >;
+};
diff --git a/arch/arm64/boot/dts/ti/k3-j722s-main.dtsi b/arch/arm64/boot/dts/ti/k3-j722s-main.dtsi
new file mode 100644
index 000000000000..c797980528ec
--- /dev/null
+++ b/arch/arm64/boot/dts/ti/k3-j722s-main.dtsi
@@ -0,0 +1,217 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * Device Tree file for the J722S MAIN domain peripherals
+ *
+ * Copyright (C) 2023-2024 Texas Instruments Incorporated - https://www.ti.com/
+ */
+
+#include <dt-bindings/phy/phy-cadence.h>
+#include <dt-bindings/phy/phy-ti.h>
+
+/ {
+ serdes_refclk: clk-0 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <0>;
+ };
+};
+
+&cbass_main {
+ serdes_wiz0: phy@f000000 {
+ compatible = "ti,am64-wiz-10g";
+ ranges = <0x0f000000 0x0 0x0f000000 0x00010000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ power-domains = <&k3_pds 279 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&k3_clks 279 0>, <&k3_clks 279 1>, <&serdes_refclk>;
+ clock-names = "fck", "core_ref_clk", "ext_ref_clk";
+ num-lanes = <1>;
+ #reset-cells = <1>;
+ #clock-cells = <1>;
+
+ assigned-clocks = <&k3_clks 279 1>;
+ assigned-clock-parents = <&k3_clks 279 5>;
+
+ serdes0: serdes@f000000 {
+ compatible = "ti,j721e-serdes-10g";
+ reg = <0x0f000000 0x00010000>;
+ reg-names = "torrent_phy";
+ resets = <&serdes_wiz0 0>;
+ reset-names = "torrent_reset";
+ clocks = <&serdes_wiz0 TI_WIZ_PLL0_REFCLK>,
+ <&serdes_wiz0 TI_WIZ_PHY_EN_REFCLK>;
+ clock-names = "refclk", "phy_en_refclk";
+ assigned-clocks = <&serdes_wiz0 TI_WIZ_PLL0_REFCLK>,
+ <&serdes_wiz0 TI_WIZ_PLL1_REFCLK>,
+ <&serdes_wiz0 TI_WIZ_REFCLK_DIG>;
+ assigned-clock-parents = <&k3_clks 279 1>,
+ <&k3_clks 279 1>,
+ <&k3_clks 279 1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #clock-cells = <1>;
+
+ status = "disabled"; /* Needs lane config */
+ };
+ };
+
+ serdes_wiz1: phy@f010000 {
+ compatible = "ti,am64-wiz-10g";
+ ranges = <0x0f010000 0x0 0x0f010000 0x00010000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ power-domains = <&k3_pds 280 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&k3_clks 280 0>, <&k3_clks 280 1>, <&serdes_refclk>;
+ clock-names = "fck", "core_ref_clk", "ext_ref_clk";
+ num-lanes = <1>;
+ #reset-cells = <1>;
+ #clock-cells = <1>;
+
+ assigned-clocks = <&k3_clks 280 1>;
+ assigned-clock-parents = <&k3_clks 280 5>;
+
+ serdes1: serdes@f010000 {
+ compatible = "ti,j721e-serdes-10g";
+ reg = <0x0f010000 0x00010000>;
+ reg-names = "torrent_phy";
+ resets = <&serdes_wiz1 0>;
+ reset-names = "torrent_reset";
+ clocks = <&serdes_wiz1 TI_WIZ_PLL0_REFCLK>,
+ <&serdes_wiz1 TI_WIZ_PHY_EN_REFCLK>;
+ clock-names = "refclk", "phy_en_refclk";
+ assigned-clocks = <&serdes_wiz1 TI_WIZ_PLL0_REFCLK>,
+ <&serdes_wiz1 TI_WIZ_PLL1_REFCLK>,
+ <&serdes_wiz1 TI_WIZ_REFCLK_DIG>;
+ assigned-clock-parents = <&k3_clks 280 1>,
+ <&k3_clks 280 1>,
+ <&k3_clks 280 1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #clock-cells = <1>;
+
+ status = "disabled"; /* Needs lane config */
+ };
+ };
+
+ pcie0_rc: pcie@f102000 {
+ compatible = "ti,j722s-pcie-host", "ti,j721e-pcie-host";
+ reg = <0x00 0x0f102000 0x00 0x1000>,
+ <0x00 0x0f100000 0x00 0x400>,
+ <0x00 0x0d000000 0x00 0x00800000>,
+ <0x00 0x68000000 0x00 0x00001000>;
+ reg-names = "intd_cfg", "user_cfg", "reg", "cfg";
+ ranges = <0x01000000 0x00 0x68001000 0x00 0x68001000 0x00 0x0010000>,
+ <0x02000000 0x00 0x68011000 0x00 0x68011000 0x00 0x7fef000>;
+ dma-ranges = <0x02000000 0x0 0x0 0x0 0x0 0x10000 0x0>;
+ interrupt-names = "link_state";
+ interrupts = <GIC_SPI 99 IRQ_TYPE_EDGE_RISING>;
+ device_type = "pci";
+ max-link-speed = <3>;
+ num-lanes = <1>;
+ power-domains = <&k3_pds 259 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&k3_clks 259 0>, <&serdes1 CDNS_TORRENT_REFCLK_DRIVER>;
+ clock-names = "fck", "pcie_refclk";
+ #address-cells = <3>;
+ #size-cells = <2>;
+ bus-range = <0x0 0xff>;
+ vendor-id = <0x104c>;
+ device-id = <0xb010>;
+ cdns,no-bar-match-nbits = <64>;
+ ti,syscon-pcie-ctrl = <&pcie0_ctrl 0x0>;
+ msi-map = <0x0 &gic_its 0x0 0x10000>;
+ status = "disabled";
+ };
+
+ usbss1: usb@f920000 {
+ compatible = "ti,j721e-usb";
+ reg = <0x00 0x0f920000 0x00 0x100>;
+ power-domains = <&k3_pds 278 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&k3_clks 278 3>, <&k3_clks 278 1>;
+ clock-names = "ref", "lpm";
+ assigned-clocks = <&k3_clks 278 3>; /* USB2_REFCLK */
+ assigned-clock-parents = <&k3_clks 278 4>; /* HF0SC0 */
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+ status = "disabled";
+
+ usb1: usb@31200000{
+ compatible = "cdns,usb3";
+ reg = <0x00 0x31200000 0x00 0x10000>,
+ <0x00 0x31210000 0x00 0x10000>,
+ <0x00 0x31220000 0x00 0x10000>;
+ reg-names = "otg",
+ "xhci",
+ "dev";
+ interrupts = <GIC_SPI 226 IRQ_TYPE_LEVEL_HIGH>, /* irq.0 */
+ <GIC_SPI 232 IRQ_TYPE_LEVEL_HIGH>, /* irq.6 */
+ <GIC_SPI 245 IRQ_TYPE_LEVEL_HIGH>; /* otgirq */
+ interrupt-names = "host",
+ "peripheral",
+ "otg";
+ maximum-speed = "super-speed";
+ dr_mode = "otg";
+ };
+ };
+};
+
+&main_conf {
+ serdes_ln_ctrl: mux-controller@4080 {
+ compatible = "reg-mux";
+ reg = <0x4080 0x14>;
+ #mux-control-cells = <1>;
+ mux-reg-masks = <0x00 0x3>, /* SERDES0 lane0 select */
+ <0x10 0x3>; /* SERDES1 lane0 select */
+ };
+
+ audio_refclk1: clock@82e4 {
+ compatible = "ti,am62-audio-refclk";
+ reg = <0x82e4 0x4>;
+ clocks = <&k3_clks 157 18>;
+ assigned-clocks = <&k3_clks 157 18>;
+ assigned-clock-parents = <&k3_clks 157 33>;
+ #clock-cells = <0>;
+ };
+};
+
+&wkup_conf {
+ pcie0_ctrl: pcie0-ctrl@4070 {
+ compatible = "ti,j784s4-pcie-ctrl", "syscon";
+ reg = <0x4070 0x4>;
+ };
+};
+
+&oc_sram {
+ reg = <0x00 0x70000000 0x00 0x40000>;
+ ranges = <0x00 0x00 0x70000000 0x40000>;
+};
+
+&inta_main_dmss {
+ ti,interrupt-ranges = <7 71 21>;
+};
+
+&main_pmx0 {
+ pinctrl-single,gpio-range =
+ <&main_pmx0_range 0 32 PIN_GPIO_RANGE_IOPAD>,
+ <&main_pmx0_range 33 55 PIN_GPIO_RANGE_IOPAD>,
+ <&main_pmx0_range 101 25 PIN_GPIO_RANGE_IOPAD>,
+ <&main_pmx0_range 137 5 PIN_GPIO_RANGE_IOPAD>,
+ <&main_pmx0_range 143 3 PIN_GPIO_RANGE_IOPAD>,
+ <&main_pmx0_range 149 2 PIN_GPIO_RANGE_IOPAD>;
+
+ main_pmx0_range: gpio-range {
+ #pinctrl-single,gpio-range-cells = <3>;
+ };
+};
+
+&main_gpio0 {
+ gpio-ranges = <&main_pmx0 0 0 32>, <&main_pmx0 32 33 38>,
+ <&main_pmx0 70 72 17>;
+ ti,ngpio = <87>;
+};
+
+&main_gpio1 {
+ gpio-ranges = <&main_pmx0 7 101 25>, <&main_pmx0 42 137 5>,
+ <&main_pmx0 47 143 3>, <&main_pmx0 50 149 2>;
+ ti,ngpio = <73>;
+};
diff --git a/arch/arm64/boot/dts/ti/k3-j722s.dtsi b/arch/arm64/boot/dts/ti/k3-j722s.dtsi
index c75744edb143..14c6c6a332ef 100644
--- a/arch/arm64/boot/dts/ti/k3-j722s.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j722s.dtsi
@@ -10,11 +10,133 @@
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/soc/ti,sci_pm_domain.h>
-#include "k3-am62p5.dtsi"
+#include "k3-pinctrl.h"
/ {
model = "Texas Instruments K3 J722S SoC";
compatible = "ti,j722s";
+ interrupt-parent = <&gic500>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu-map {
+ cluster0: cluster0 {
+ core0 {
+ cpu = <&cpu0>;
+ };
+
+ core1 {
+ cpu = <&cpu1>;
+ };
+
+ core2 {
+ cpu = <&cpu2>;
+ };
+
+ core3 {
+ cpu = <&cpu3>;
+ };
+ };
+ };
+
+ cpu0: cpu@0 {
+ compatible = "arm,cortex-a53";
+ reg = <0x000>;
+ device_type = "cpu";
+ enable-method = "psci";
+ i-cache-size = <0x8000>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <256>;
+ d-cache-size = <0x8000>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <128>;
+ next-level-cache = <&l2_0>;
+ clocks = <&k3_clks 135 0>;
+ };
+
+ cpu1: cpu@1 {
+ compatible = "arm,cortex-a53";
+ reg = <0x001>;
+ device_type = "cpu";
+ enable-method = "psci";
+ i-cache-size = <0x8000>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <256>;
+ d-cache-size = <0x8000>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <128>;
+ next-level-cache = <&l2_0>;
+ clocks = <&k3_clks 136 0>;
+ };
+
+ cpu2: cpu@2 {
+ compatible = "arm,cortex-a53";
+ reg = <0x002>;
+ device_type = "cpu";
+ enable-method = "psci";
+ i-cache-size = <0x8000>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <256>;
+ d-cache-size = <0x8000>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <128>;
+ next-level-cache = <&l2_0>;
+ clocks = <&k3_clks 137 0>;
+ };
+
+ cpu3: cpu@3 {
+ compatible = "arm,cortex-a53";
+ reg = <0x003>;
+ device_type = "cpu";
+ enable-method = "psci";
+ i-cache-size = <0x8000>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <256>;
+ d-cache-size = <0x8000>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <128>;
+ next-level-cache = <&l2_0>;
+ clocks = <&k3_clks 138 0>;
+ };
+ };
+
+ l2_0: l2-cache0 {
+ compatible = "cache";
+ cache-unified;
+ cache-level = <2>;
+ cache-size = <0x80000>;
+ cache-line-size = <64>;
+ cache-sets = <512>;
+ };
+
+ firmware {
+ optee {
+ compatible = "linaro,optee-tz";
+ method = "smc";
+ };
+
+ psci: psci {
+ compatible = "arm,psci-1.0";
+ method = "smc";
+ };
+ };
+
+ a53_timer0: timer-cl0-cpu0 {
+ compatible = "arm,armv8-timer";
+ interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_LOW>, /* cntpsirq */
+ <GIC_PPI 14 IRQ_TYPE_LEVEL_LOW>, /* cntpnsirq */
+ <GIC_PPI 11 IRQ_TYPE_LEVEL_LOW>, /* cntvirq */
+ <GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>; /* cnthpirq */
+ };
+
+ pmu: pmu {
+ compatible = "arm,cortex-a53-pmu";
+ interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_HIGH>;
+ };
cbass_main: bus@f0000 {
compatible = "simple-bus";
@@ -74,16 +196,39 @@
<0x00 0x43000000 0x00 0x43000000 0x00 0x00020000>,
<0x00 0x78000000 0x00 0x78000000 0x00 0x00008000>,
<0x00 0x78100000 0x00 0x78100000 0x00 0x00008000>;
- };
-};
-/* Main domain overrides */
+ cbass_mcu: bus@4000000 {
+ compatible = "simple-bus";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges = <0x00 0x04000000 0x00 0x04000000 0x00 0x01ff1400>, /* Peripheral window */
+ <0x00 0x79000000 0x00 0x79000000 0x00 0x00008000>, /* MCU R5 ATCM */
+ <0x00 0x79020000 0x00 0x79020000 0x00 0x00008000>, /* MCU R5 BTCM */
+ <0x00 0x79100000 0x00 0x79100000 0x00 0x00040000>, /* MCU IRAM0 */
+ <0x00 0x79140000 0x00 0x79140000 0x00 0x00040000>; /* MCU IRAM1 */
+ bootph-all;
+ };
-&inta_main_dmss {
- ti,interrupt-ranges = <7 71 21>;
-};
+ cbass_wakeup: bus@b00000 {
+ compatible = "simple-bus";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges = <0x00 0x00b00000 0x00 0x00b00000 0x00 0x00002400>, /* VTM */
+ <0x00 0x2b000000 0x00 0x2b000000 0x00 0x00300400>, /* Peripheral Window */
+ <0x00 0x43000000 0x00 0x43000000 0x00 0x00020000>, /* WKUP CTRL MMR */
+ <0x00 0x78000000 0x00 0x78000000 0x00 0x00008000>, /* DM R5 ATCM*/
+ <0x00 0x78100000 0x00 0x78100000 0x00 0x00008000>; /* DM R5 BTCM*/
+ bootph-all;
+ };
+ };
-&oc_sram {
- reg = <0x00 0x70000000 0x00 0x40000>;
- ranges = <0x00 0x00 0x70000000 0x40000>;
+ #include "k3-am62p-j722s-common-thermal.dtsi"
};
+
+/* Include peripherals shared with AM62P */
+#include "k3-am62p-j722s-common-main.dtsi"
+#include "k3-am62p-j722s-common-mcu.dtsi"
+#include "k3-am62p-j722s-common-wakeup.dtsi"
+
+/* Include J722S specific peripherals */
+#include "k3-j722s-main.dtsi"
diff --git a/arch/arm64/boot/dts/ti/k3-j784s4-evm-pcie0-pcie1-ep.dtso b/arch/arm64/boot/dts/ti/k3-j784s4-evm-pcie0-pcie1-ep.dtso
new file mode 100644
index 000000000000..685305092bd8
--- /dev/null
+++ b/arch/arm64/boot/dts/ti/k3-j784s4-evm-pcie0-pcie1-ep.dtso
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/**
+ * DT Overlay for enabling PCIE0 and PCIE1 instances in Endpoint Configuration
+ * on J784S4 EVM.
+ *
+ * J784S4 EVM Product Link: https://www.ti.com/tool/J784S4XEVM
+ *
+ * Copyright (C) 2024 Texas Instruments Incorporated - https://www.ti.com/
+ */
+
+/dts-v1/;
+/plugin/;
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/soc/ti,sci_pm_domain.h>
+
+#include "k3-pinctrl.h"
+
+/*
+ * Since Root Complex and Endpoint modes are mutually exclusive
+ * disable Root Complex mode.
+ */
+&pcie0_rc {
+ status = "disabled";
+};
+
+&pcie1_rc {
+ status = "disabled";
+};
+
+&cbass_main {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ interrupt-parent = <&gic500>;
+
+ pcie0_ep: pcie-ep@2900000 {
+ compatible = "ti,j784s4-pcie-ep";
+ reg = <0x00 0x02900000 0x00 0x1000>,
+ <0x00 0x02907000 0x00 0x400>,
+ <0x00 0x0d000000 0x00 0x00800000>,
+ <0x00 0x10000000 0x00 0x08000000>;
+ reg-names = "intd_cfg", "user_cfg", "reg", "mem";
+ interrupt-names = "link_state";
+ interrupts = <GIC_SPI 318 IRQ_TYPE_EDGE_RISING>;
+ ti,syscon-pcie-ctrl = <&pcie0_ctrl 0x0>;
+ max-link-speed = <3>;
+ num-lanes = <4>;
+ power-domains = <&k3_pds 332 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&k3_clks 332 0>;
+ clock-names = "fck";
+ max-functions = /bits/ 8 <6>;
+ max-virtual-functions = /bits/ 8 <4 4 4 4 0 0>;
+ dma-coherent;
+ phys = <&serdes1_pcie0_link>;
+ phy-names = "pcie-phy";
+ };
+
+ pcie1_ep: pcie-ep@2910000 {
+ compatible = "ti,j784s4-pcie-ep";
+ reg = <0x00 0x02910000 0x00 0x1000>,
+ <0x00 0x02917000 0x00 0x400>,
+ <0x00 0x0d800000 0x00 0x00800000>,
+ <0x00 0x18000000 0x00 0x08000000>;
+ reg-names = "intd_cfg", "user_cfg", "reg", "mem";
+ interrupt-names = "link_state";
+ interrupts = <GIC_SPI 330 IRQ_TYPE_EDGE_RISING>;
+ ti,syscon-pcie-ctrl = <&pcie1_ctrl 0x0>;
+ max-link-speed = <3>;
+ num-lanes = <2>;
+ power-domains = <&k3_pds 333 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&k3_clks 333 0>;
+ clock-names = "fck";
+ max-functions = /bits/ 8 <6>;
+ max-virtual-functions = /bits/ 8 <4 4 4 4 0 0>;
+ dma-coherent;
+ phys = <&serdes0_pcie1_link>;
+ phy-names = "pcie-phy";
+ };
+};
diff --git a/arch/arm64/boot/dts/ti/k3-j784s4-evm-quad-port-eth-exp1.dtso b/arch/arm64/boot/dts/ti/k3-j784s4-evm-quad-port-eth-exp1.dtso
new file mode 100644
index 000000000000..dcd2c7c39ec3
--- /dev/null
+++ b/arch/arm64/boot/dts/ti/k3-j784s4-evm-quad-port-eth-exp1.dtso
@@ -0,0 +1,147 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/**
+ * DT Overlay for CPSW9G in QSGMII mode using J7 Quad Port ETH EXP Add-On Ethernet Card with
+ * J784S4 EVM. The Add-On Ethernet Card has to be connected to ENET Expansion 1 slot on the
+ * board.
+ *
+ * Product Datasheet: https://www.ti.com/lit/ug/spruj74/spruj74.pdf
+ *
+ * Link to QSGMII Daughtercard: https://www.ti.com/tool/J721EXENETXPANEVM
+ *
+ * Copyright (C) 2024 Texas Instruments Incorporated - https://www.ti.com/
+ */
+
+/dts-v1/;
+/plugin/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/phy/phy-cadence.h>
+#include <dt-bindings/phy/phy.h>
+
+#include "k3-pinctrl.h"
+#include "k3-serdes.h"
+
+&{/} {
+ aliases {
+ ethernet1 = "/bus@100000/ethernet@c000000/ethernet-ports/port@5";
+ ethernet2 = "/bus@100000/ethernet@c000000/ethernet-ports/port@6";
+ ethernet3 = "/bus@100000/ethernet@c000000/ethernet-ports/port@7";
+ ethernet4 = "/bus@100000/ethernet@c000000/ethernet-ports/port@8";
+ ethernet5 = "/bus@100000/ethernet@c200000/ethernet-ports/port@1";
+ };
+};
+
+&main_cpsw0 {
+ status = "okay";
+};
+
+&main_cpsw0_port5 {
+ phy-handle = <&cpsw9g_phy1>;
+ phy-mode = "qsgmii";
+ mac-address = [00 00 00 00 00 00];
+ phys = <&cpsw0_phy_gmii_sel 5>, <&serdes2_qsgmii_link>;
+ phy-names = "mac", "serdes";
+ status = "okay";
+};
+
+&main_cpsw0_port6 {
+ phy-handle = <&cpsw9g_phy2>;
+ phy-mode = "qsgmii";
+ mac-address = [00 00 00 00 00 00];
+ phys = <&cpsw0_phy_gmii_sel 6>, <&serdes2_qsgmii_link>;
+ phy-names = "mac", "serdes";
+ status = "okay";
+};
+
+&main_cpsw0_port7 {
+ phy-handle = <&cpsw9g_phy0>;
+ phy-mode = "qsgmii";
+ mac-address = [00 00 00 00 00 00];
+ phys = <&cpsw0_phy_gmii_sel 7>, <&serdes2_qsgmii_link>;
+ phy-names = "mac", "serdes";
+ status = "okay";
+};
+
+&main_cpsw0_port8 {
+ phy-handle = <&cpsw9g_phy3>;
+ phy-mode = "qsgmii";
+ mac-address = [00 00 00 00 00 00];
+ phys = <&cpsw0_phy_gmii_sel 8>, <&serdes2_qsgmii_link>;
+ phy-names = "mac", "serdes";
+ status = "okay";
+};
+
+&main_cpsw0_mdio {
+ pinctrl-names = "default";
+ pinctrl-0 = <&mdio0_default_pins>;
+ bus_freq = <1000000>;
+ reset-gpios = <&exp2 17 GPIO_ACTIVE_LOW>;
+ reset-post-delay-us = <120000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "okay";
+
+ cpsw9g_phy0: ethernet-phy@16 {
+ reg = <16>;
+ };
+ cpsw9g_phy1: ethernet-phy@17 {
+ reg = <17>;
+ };
+ cpsw9g_phy2: ethernet-phy@18 {
+ reg = <18>;
+ };
+ cpsw9g_phy3: ethernet-phy@19 {
+ reg = <19>;
+ };
+};
+
+&exp2 {
+ /* Power-up ENET1 EXPANDER PHY. */
+ qsgmii-line-hog {
+ gpio-hog;
+ gpios = <16 GPIO_ACTIVE_HIGH>;
+ output-low;
+ };
+
+ /* Toggle MUX2 for MDIO lines */
+ mux-sel-hog {
+ gpio-hog;
+ gpios = <13 GPIO_ACTIVE_HIGH>, <14 GPIO_ACTIVE_HIGH>, <15 GPIO_ACTIVE_HIGH>;
+ output-high;
+ };
+};
+
+&main_pmx0 {
+ mdio0_default_pins: mdio0-default-pins {
+ pinctrl-single,pins = <
+ J784S4_IOPAD(0x05c, PIN_INPUT, 4) /* (AC36) MCASP2_AXR0.MDIO1_MDIO */
+ J784S4_IOPAD(0x058, PIN_INPUT, 4) /* (AE37) MCASP2_AFSX.MDIO1_MDC */
+ >;
+ };
+};
+
+&serdes_ln_ctrl {
+ idle-states = <J784S4_SERDES0_LANE0_PCIE1_LANE0>, <J784S4_SERDES0_LANE1_PCIE1_LANE1>,
+ <J784S4_SERDES0_LANE2_IP3_UNUSED>, <J784S4_SERDES0_LANE3_USB>,
+ <J784S4_SERDES1_LANE0_PCIE0_LANE0>, <J784S4_SERDES1_LANE1_PCIE0_LANE1>,
+ <J784S4_SERDES1_LANE2_PCIE0_LANE2>, <J784S4_SERDES1_LANE3_PCIE0_LANE3>,
+ <J784S4_SERDES2_LANE0_QSGMII_LANE5>, <J784S4_SERDES2_LANE1_QSGMII_LANE6>,
+ <J784S4_SERDES2_LANE2_QSGMII_LANE7>, <J784S4_SERDES2_LANE3_QSGMII_LANE8>;
+};
+
+&serdes_wiz2 {
+ status = "okay";
+};
+
+&serdes2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "okay";
+ serdes2_qsgmii_link: phy@0 {
+ reg = <2>;
+ cdns,num-lanes = <1>;
+ #phy-cells = <0>;
+ cdns,phy-type = <PHY_TYPE_QSGMII>;
+ resets = <&serdes_wiz2 3>;
+ };
+};
diff --git a/arch/arm64/boot/dts/ti/k3-j784s4-evm-usxgmii-exp1-exp2.dtso b/arch/arm64/boot/dts/ti/k3-j784s4-evm-usxgmii-exp1-exp2.dtso
new file mode 100644
index 000000000000..d5f8c8531923
--- /dev/null
+++ b/arch/arm64/boot/dts/ti/k3-j784s4-evm-usxgmii-exp1-exp2.dtso
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/**
+ * DT Overlay for CPSW9G in dual port fixed-link USXGMII mode using ENET-1
+ * and ENET-2 Expansion slots of J784S4 EVM.
+ *
+ * Copyright (C) 2024 Texas Instruments Incorporated - https://www.ti.com/
+ */
+
+/dts-v1/;
+/plugin/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/phy/phy-cadence.h>
+#include <dt-bindings/phy/phy.h>
+
+#include "k3-serdes.h"
+
+&{/} {
+ aliases {
+ ethernet1 = "/bus@100000/ethernet@c000000/ethernet-ports/port@1";
+ ethernet2 = "/bus@100000/ethernet@c000000/ethernet-ports/port@2";
+ ethernet3 = "/bus@100000/ethernet@c200000/ethernet-ports/port@1";
+ };
+};
+
+&main_cpsw0 {
+ pinctrl-names = "default";
+ status = "okay";
+};
+
+&main_cpsw0_port1 {
+ phy-mode = "usxgmii";
+ mac-address = [00 00 00 00 00 00];
+ phys = <&cpsw0_phy_gmii_sel 1>, <&serdes2_usxgmii_link>;
+ phy-names = "mac", "serdes";
+ status = "okay";
+ fixed-link {
+ speed = <5000>;
+ full-duplex;
+ };
+};
+
+&main_cpsw0_port2 {
+ phy-mode = "usxgmii";
+ mac-address = [00 00 00 00 00 00];
+ phys = <&cpsw0_phy_gmii_sel 2>, <&serdes2_usxgmii_link>;
+ phy-names = "mac", "serdes";
+ status = "okay";
+ fixed-link {
+ speed = <5000>;
+ full-duplex;
+ };
+};
+
+&serdes_wiz2 {
+ assigned-clock-parents = <&k3_clks 406 9>; /* Use 156.25 MHz clock for USXGMII */
+ status = "okay";
+};
+
+&serdes2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "okay";
+
+ serdes2_usxgmii_link: phy@2 {
+ reg = <2>;
+ cdns,num-lanes = <2>;
+ #phy-cells = <0>;
+ cdns,phy-type = <PHY_TYPE_USXGMII>;
+ resets = <&serdes_wiz2 3>, <&serdes_wiz2 4>;
+ };
+};
+
+&serdes_ln_ctrl {
+ idle-states = <J784S4_SERDES0_LANE0_PCIE1_LANE0>, <J784S4_SERDES0_LANE1_PCIE1_LANE1>,
+ <J784S4_SERDES0_LANE2_IP3_UNUSED>, <J784S4_SERDES0_LANE3_USB>,
+ <J784S4_SERDES1_LANE0_PCIE0_LANE0>, <J784S4_SERDES1_LANE1_PCIE0_LANE1>,
+ <J784S4_SERDES1_LANE2_PCIE0_LANE2>, <J784S4_SERDES1_LANE3_PCIE0_LANE3>,
+ <J784S4_SERDES2_LANE0_IP2_UNUSED>, <J784S4_SERDES2_LANE1_IP2_UNUSED>,
+ <J784S4_SERDES2_LANE2_QSGMII_LANE1>, <J784S4_SERDES2_LANE3_QSGMII_LANE2>;
+};
diff --git a/arch/arm64/boot/dts/ti/k3-j784s4-evm.dts b/arch/arm64/boot/dts/ti/k3-j784s4-evm.dts
index d511b25d62e3..9338d987180d 100644
--- a/arch/arm64/boot/dts/ti/k3-j784s4-evm.dts
+++ b/arch/arm64/boot/dts/ti/k3-j784s4-evm.dts
@@ -27,14 +27,16 @@
mmc1 = &main_sdhci1;
i2c0 = &wkup_i2c0;
i2c3 = &main_i2c0;
+ ethernet0 = &mcu_cpsw_port1;
+ ethernet1 = &main_cpsw1_port1;
};
memory@80000000 {
device_type = "memory";
bootph-all;
/* 32G RAM */
- reg = <0x00 0x80000000 0x00 0x80000000>,
- <0x08 0x80000000 0x07 0x80000000>;
+ reg = <0x00000000 0x80000000 0x00000000 0x80000000>,
+ <0x00000008 0x80000000 0x00000007 0x80000000>;
};
reserved_memory: reserved-memory {
@@ -272,6 +274,59 @@
};
};
};
+
+ transceiver0: can-phy0 {
+ compatible = "ti,tcan1042";
+ #phy-cells = <0>;
+ max-bitrate = <5000000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&mcu_mcan0_gpio_pins_default>;
+ standby-gpios = <&wkup_gpio0 69 GPIO_ACTIVE_HIGH>;
+ };
+
+ transceiver1: can-phy1 {
+ compatible = "ti,tcan1042";
+ #phy-cells = <0>;
+ max-bitrate = <5000000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&mcu_mcan1_gpio_pins_default>;
+ standby-gpios = <&wkup_gpio0 2 GPIO_ACTIVE_HIGH>;
+ };
+
+ transceiver2: can-phy2 {
+ /* standby pin has been grounded by default */
+ compatible = "ti,tcan1042";
+ #phy-cells = <0>;
+ max-bitrate = <5000000>;
+ };
+
+ transceiver3: can-phy3 {
+ compatible = "ti,tcan1042";
+ #phy-cells = <0>;
+ max-bitrate = <5000000>;
+ standby-gpios = <&exp2 7 GPIO_ACTIVE_HIGH>;
+ mux-states = <&mux1 1>;
+ };
+
+ mux1: mux-controller {
+ compatible = "gpio-mux";
+ #mux-state-cells = <1>;
+ mux-gpios = <&exp2 14 GPIO_ACTIVE_HIGH>;
+ idle-state = <1>;
+ };
+
+ codec_audio: sound {
+ compatible = "ti,j7200-cpb-audio";
+ model = "j784s4-cpb";
+
+ ti,cpb-mcasp = <&mcasp0>;
+ ti,cpb-codec = <&pcm3168a_1>;
+
+ clocks = <&k3_clks 265 0>, <&k3_clks 265 1>,
+ <&k3_clks 157 34>, <&k3_clks 157 63>;
+ clock-names = "cpb-mcasp-auxclk", "cpb-mcasp-auxclk-48000",
+ "cpb-codec-scki", "cpb-codec-scki-48000";
+ };
};
&wkup_gpio0 {
@@ -280,6 +335,30 @@
&main_pmx0 {
bootph-all;
+ main_cpsw2g_default_pins: main-cpsw2g-default-pins {
+ pinctrl-single,pins = <
+ J784S4_IOPAD(0x0b8, PIN_INPUT, 6) /* (AC34) MCASP1_ACLKX.RGMII1_RD0 */
+ J784S4_IOPAD(0x0a0, PIN_INPUT, 6) /* (AD34) MCASP0_AXR12.RGMII1_RD1 */
+ J784S4_IOPAD(0x0a4, PIN_INPUT, 6) /* (AJ36) MCASP0_AXR13.RGMII1_RD2 */
+ J784S4_IOPAD(0x0a8, PIN_INPUT, 6) /* (AF34) MCASP0_AXR14.RGMII1_RD3 */
+ J784S4_IOPAD(0x0b0, PIN_INPUT, 6) /* (AL33) MCASP1_AXR3.RGMII1_RXC */
+ J784S4_IOPAD(0x0ac, PIN_INPUT, 6) /* (AE34) MCASP0_AXR15.RGMII1_RX_CTL */
+ J784S4_IOPAD(0x08c, PIN_INPUT, 6) /* (AE35) MCASP0_AXR7.RGMII1_TD0 */
+ J784S4_IOPAD(0x090, PIN_INPUT, 6) /* (AC35) MCASP0_AXR8.RGMII1_TD1 */
+ J784S4_IOPAD(0x094, PIN_INPUT, 6) /* (AG35) MCASP0_AXR9.RGMII1_TD2 */
+ J784S4_IOPAD(0x098, PIN_INPUT, 6) /* (AH36) MCASP0_AXR10.RGMII1_TD3 */
+ J784S4_IOPAD(0x0b4, PIN_INPUT, 6) /* (AL34) MCASP1_AXR4.RGMII1_TXC */
+ J784S4_IOPAD(0x09c, PIN_INPUT, 6) /* (AF35) MCASP0_AXR11.RGMII1_TX_CTL */
+ >;
+ };
+
+ main_cpsw2g_mdio_default_pins: main-cpsw2g-mdio-default-pins {
+ pinctrl-single,pins = <
+ J784S4_IOPAD(0x0c0, PIN_INPUT, 6) /* (AD38) MCASP1_AXR0.MDIO0_MDC */
+ J784S4_IOPAD(0x0bc, PIN_INPUT, 6) /* (AD33) MCASP1_AFSX.MDIO0_MDIO */
+ >;
+ };
+
main_uart8_pins_default: main-uart8-default-pins {
bootph-all;
pinctrl-single,pins = <
@@ -336,6 +415,49 @@
J784S4_IOPAD(0x010, PIN_INPUT_PULLUP, 8) /* (AH33) MCAN13_RX.I2C4_SDA */
>;
};
+
+ main_mcan4_pins_default: main-mcan4-default-pins {
+ pinctrl-single,pins = <
+ J784S4_IOPAD(0x088, PIN_INPUT, 0) /* (AF36) MCAN4_RX */
+ J784S4_IOPAD(0x084, PIN_OUTPUT, 0) /* (AG38) MCAN4_TX */
+ >;
+ };
+
+ main_mcan16_pins_default: main-mcan16-default-pins {
+ pinctrl-single,pins = <
+ J784S4_IOPAD(0x028, PIN_INPUT, 0) /* (AE33) MCAN16_RX */
+ J784S4_IOPAD(0x024, PIN_OUTPUT, 0) /* (AH34) MCAN16_TX */
+ >;
+ };
+
+ main_usbss0_pins_default: main-usbss0-default-pins {
+ bootph-all;
+ pinctrl-single,pins = <
+ J784S4_IOPAD(0x0ec, PIN_OUTPUT, 6) /* (AN37) TIMER_IO1.USB0_DRVVBUS */
+ >;
+ };
+
+ main_i2c3_pins_default: main-i2c3-default-pins {
+ pinctrl-single,pins = <
+ J784S4_IOPAD(0x064, PIN_INPUT, 13) /* (AF38) MCAN0_TX.I2C3_SCL */
+ J784S4_IOPAD(0x060, PIN_INPUT, 13) /* (AE36) MCASP2_AXR1.I2C3_SDA */
+ >;
+ };
+
+ main_mcasp0_pins_default: main-mcasp0-default-pins {
+ pinctrl-single,pins = <
+ J784S4_IOPAD(0x038, PIN_OUTPUT_PULLDOWN, 1) /* (AK35) MCASP0_ACLKX */
+ J784S4_IOPAD(0x03c, PIN_OUTPUT_PULLDOWN, 1) /* (AK38) MCASP0_AFSX */
+ J784S4_IOPAD(0x07c, PIN_OUTPUT_PULLDOWN, 1) /* (AJ38) MCASP0_AXR3 */
+ J784S4_IOPAD(0x080, PIN_INPUT_PULLDOWN, 1) /* (AK34) MCASP0_AXR4 */
+ >;
+ };
+
+ audio_ext_refclk1_pins_default: audio-ext-refclk1-default-pins {
+ pinctrl-single,pins = <
+ J784S4_IOPAD(0x078, PIN_OUTPUT, 1) /* (AH37) MCAN2_RX.AUDIO_EXT_REFCLK1 */
+ >;
+ };
};
&wkup_pmx2 {
@@ -415,6 +537,32 @@
J784S4_WKUP_IOPAD(0x108, PIN_INPUT, 0) /* (Y36) MCU_ADC1_AIN7 */
>;
};
+
+ mcu_mcan0_pins_default: mcu-mcan0-default-pins {
+ pinctrl-single,pins = <
+ J784S4_WKUP_IOPAD(0x050, PIN_OUTPUT, 0) /* (K33) MCU_MCAN0_TX */
+ J784S4_WKUP_IOPAD(0x054, PIN_INPUT, 0) /* (F38) MCU_MCAN0_RX */
+ >;
+ };
+
+ mcu_mcan1_pins_default: mcu-mcan1-default-pins {
+ pinctrl-single,pins = <
+ J784S4_WKUP_IOPAD(0x068, PIN_OUTPUT, 0) /* (H35) WKUP_GPIO0_4.MCU_MCAN1_TX */
+ J784S4_WKUP_IOPAD(0x06c, PIN_INPUT, 0) /* (K36) WKUP_GPIO0_5.MCU_MCAN1_RX */
+ >;
+ };
+
+ mcu_mcan0_gpio_pins_default: mcu-mcan0-gpio-default-pins {
+ pinctrl-single,pins = <
+ J784S4_WKUP_IOPAD(0x040, PIN_INPUT, 7) /* (J38) MCU_SPI0_D1.WKUP_GPIO0_69 */
+ >;
+ };
+
+ mcu_mcan1_gpio_pins_default: mcu-mcan1-gpio-default-pins {
+ pinctrl-single,pins = <
+ J784S4_WKUP_IOPAD(0x060, PIN_INPUT, 7) /* (J35) WKUP_GPIO0_2 */
+ >;
+ };
};
&wkup_pmx1 {
@@ -579,6 +727,27 @@
};
};
};
+
+ tps62873a: regulator@40 {
+ compatible = "ti,tps62873";
+ reg = <0x40>;
+ bootph-pre-ram;
+ regulator-name = "VDD_CPU_AVS";
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <1330000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ tps62873b: regulator@43 {
+ compatible = "ti,tps62873";
+ reg = <0x43>;
+ regulator-name = "VDD_CORE_0V8";
+ regulator-min-microvolt = <760000>;
+ regulator-max-microvolt = <840000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
};
&mcu_uart0 {
@@ -748,6 +917,14 @@
"PCIE0_4L_RC_RSTZ", "PCIE0_4L_EP_RST_EN", "PCIE1_4L_PRSNT#",
"PCIE0_4L_PRSNT#", "CDCI1_OE1/OE4", "CDCI1_OE2/OE3",
"AUDIO_MUX_SEL", "EXP_MUX2", "EXP_MUX3", "GESI_EXP_PHY_RSTZ";
+
+ p12-hog {
+ /* P12 - AUDIO_MUX_SEL */
+ gpio-hog;
+ gpios = <12 GPIO_ACTIVE_HIGH>;
+ output-low;
+ line-name = "AUDIO_MUX_SEL";
+ };
};
exp2: gpio@22 {
@@ -763,6 +940,22 @@
"CANUART_MUX1_SEL1", "ENET1_EXP_PWRDN", "ENET1_EXP_RESETZ",
"ENET1_I2CMUX_SEL", "ENET1_EXP_SPARE2", "ENET2_EXP_RESETZ",
"USER_INPUT1", "USER_LED1", "USER_LED2";
+
+ p13-hog {
+ /* P13 - CANUART_MUX_SEL0 */
+ gpio-hog;
+ gpios = <13 GPIO_ACTIVE_HIGH>;
+ output-high;
+ line-name = "CANUART_MUX_SEL0";
+ };
+
+ p15-hog {
+ /* P15 - CANUART_MUX1_SEL1 */
+ gpio-hog;
+ gpios = <15 GPIO_ACTIVE_HIGH>;
+ output-high;
+ line-name = "CANUART_MUX1_SEL1";
+ };
};
};
@@ -832,6 +1025,31 @@
phy-handle = <&mcu_phy0>;
};
+&main_cpsw1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_cpsw2g_default_pins>;
+ status = "okay";
+};
+
+&main_cpsw1_mdio {
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_cpsw2g_mdio_default_pins>;
+ status = "okay";
+
+ main_cpsw1_phy0: ethernet-phy@0 {
+ reg = <0>;
+ ti,rx-internal-delay = <DP83867_RGMIIDCTL_2_00_NS>;
+ ti,fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_4_B_NIB>;
+ ti,min-output-impedance;
+ };
+};
+
+&main_cpsw1_port1 {
+ phy-mode = "rgmii-rxid";
+ phy-handle = <&main_cpsw1_phy0>;
+ status = "okay";
+};
+
&mailbox0_cluster0 {
status = "okay";
interrupts = <436>;
@@ -1041,6 +1259,40 @@
<&k3_clks 218 22>;
};
+&serdes0 {
+ status = "okay";
+
+ serdes0_usb_link: phy@3 {
+ reg = <3>;
+ cdns,num-lanes = <1>;
+ #phy-cells = <0>;
+ cdns,phy-type = <PHY_TYPE_USB3>;
+ resets = <&serdes_wiz0 4>;
+ };
+};
+
+&serdes_wiz0 {
+ status = "okay";
+};
+
+&usb_serdes_mux {
+ idle-states = <0>; /* USB0 to SERDES lane 3 */
+};
+
+&usbss0 {
+ status = "okay";
+ pinctrl-0 = <&main_usbss0_pins_default>;
+ pinctrl-names = "default";
+ ti,vbus-divider;
+};
+
+&usb0 {
+ dr_mode = "otg";
+ maximum-speed = "super-speed";
+ phys = <&serdes0_usb_link>;
+ phy-names = "cdns3,usb3-phy";
+};
+
&serdes_wiz4 {
status = "okay";
};
@@ -1105,3 +1357,130 @@
};
};
};
+
+&mcu_mcan0 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&mcu_mcan0_pins_default>;
+ phys = <&transceiver0>;
+};
+
+&mcu_mcan1 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&mcu_mcan1_pins_default>;
+ phys = <&transceiver1>;
+};
+
+&main_mcan16 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_mcan16_pins_default>;
+ phys = <&transceiver2>;
+};
+
+&main_mcan4 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_mcan4_pins_default>;
+ phys = <&transceiver3>;
+};
+
+&serdes0 {
+ status = "okay";
+
+ serdes0_pcie1_link: phy@0 {
+ reg = <0>;
+ cdns,num-lanes = <4>;
+ #phy-cells = <0>;
+ cdns,phy-type = <PHY_TYPE_PCIE>;
+ resets = <&serdes_wiz0 1>, <&serdes_wiz0 2>,
+ <&serdes_wiz0 3>, <&serdes_wiz0 4>;
+ };
+};
+
+&serdes_wiz0 {
+ status = "okay";
+};
+
+&pcie1_rc {
+ status = "okay";
+ num-lanes = <2>;
+ reset-gpios = <&exp1 2 GPIO_ACTIVE_HIGH>;
+ phys = <&serdes0_pcie1_link>;
+ phy-names = "pcie-phy";
+};
+
+&serdes1 {
+ status = "okay";
+
+ serdes1_pcie0_link: phy@0 {
+ reg = <0>;
+ cdns,num-lanes = <2>;
+ #phy-cells = <0>;
+ cdns,phy-type = <PHY_TYPE_PCIE>;
+ resets = <&serdes_wiz1 1>, <&serdes_wiz1 2>;
+ };
+};
+
+&serdes_wiz1 {
+ status = "okay";
+};
+
+&pcie0_rc {
+ status = "okay";
+ reset-gpios = <&exp1 6 GPIO_ACTIVE_HIGH>;
+ phys = <&serdes1_pcie0_link>;
+ phy-names = "pcie-phy";
+};
+
+&k3_clks {
+ /* Confiure AUDIO_EXT_REFCLK1 pin as output */
+ pinctrl-names = "default";
+ pinctrl-0 = <&audio_ext_refclk1_pins_default>;
+};
+
+&main_i2c3 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_i2c3_pins_default>;
+ clock-frequency = <400000>;
+
+ exp3: gpio@20 {
+ compatible = "ti,tca6408";
+ reg = <0x20>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ pcm3168a_1: audio-codec@44 {
+ compatible = "ti,pcm3168a";
+ reg = <0x44>;
+ #sound-dai-cells = <1>;
+ reset-gpios = <&exp3 0 GPIO_ACTIVE_LOW>;
+ clocks = <&audio_refclk1>;
+ clock-names = "scki";
+ VDD1-supply = <&vsys_3v3>;
+ VDD2-supply = <&vsys_3v3>;
+ VCCAD1-supply = <&vsys_5v0>;
+ VCCAD2-supply = <&vsys_5v0>;
+ VCCDA1-supply = <&vsys_5v0>;
+ VCCDA2-supply = <&vsys_5v0>;
+ };
+};
+
+&mcasp0 {
+ status = "okay";
+ #sound-dai-cells = <0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_mcasp0_pins_default>;
+ op-mode = <0>; /* MCASP_IIS_MODE */
+ tdm-slots = <2>;
+ auxclk-fs-ratio = <256>;
+ serial-dir = < /* 0: INACTIVE, 1: TX, 2: RX */
+ 0 0 0 1
+ 2 0 0 0
+ 0 0 0 0
+ 0 0 0 0
+ >;
+};
diff --git a/arch/arm64/boot/dts/ti/k3-j784s4-main.dtsi b/arch/arm64/boot/dts/ti/k3-j784s4-main.dtsi
index 6a4554c6c9c1..f170f80f00c1 100644
--- a/arch/arm64/boot/dts/ti/k3-j784s4-main.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j784s4-main.dtsi
@@ -48,6 +48,39 @@
#size-cells = <1>;
ranges = <0x00 0x00 0x00100000 0x1c000>;
+ cpsw1_phy_gmii_sel: phy@4034 {
+ compatible = "ti,am654-phy-gmii-sel";
+ reg = <0x4034 0x4>;
+ #phy-cells = <1>;
+ };
+
+ cpsw0_phy_gmii_sel: phy@4044 {
+ compatible = "ti,j784s4-cpsw9g-phy-gmii-sel";
+ reg = <0x4044 0x20>;
+ #phy-cells = <1>;
+ ti,qsgmii-main-ports = <7>, <7>;
+ };
+
+ pcie0_ctrl: pcie0-ctrl@4070 {
+ compatible = "ti,j784s4-pcie-ctrl", "syscon";
+ reg = <0x4070 0x4>;
+ };
+
+ pcie1_ctrl: pcie1-ctrl@4074 {
+ compatible = "ti,j784s4-pcie-ctrl", "syscon";
+ reg = <0x4074 0x4>;
+ };
+
+ pcie2_ctrl: pcie2-ctrl@4078 {
+ compatible = "ti,j784s4-pcie-ctrl", "syscon";
+ reg = <0x4078 0x4>;
+ };
+
+ pcie3_ctrl: pcie3-ctrl@407c {
+ compatible = "ti,j784s4-pcie-ctrl", "syscon";
+ reg = <0x407c 0x4>;
+ };
+
serdes_ln_ctrl: mux-controller@4080 {
compatible = "reg-mux";
reg = <0x00004080 0x30>;
@@ -75,6 +108,88 @@
<J784S4_SERDES4_LANE2_EDP_LANE2>,
<J784S4_SERDES4_LANE3_EDP_LANE3>;
};
+
+ usb_serdes_mux: mux-controller@4000 {
+ compatible = "reg-mux";
+ reg = <0x4000 0x4>;
+ #mux-control-cells = <1>;
+ mux-reg-masks = <0x0 0x8000000>; /* USB0 to SERDES0 lane 3 mux */
+ };
+
+ ehrpwm_tbclk: clock-controller@4140 {
+ compatible = "ti,am654-ehrpwm-tbclk";
+ reg = <0x4140 0x18>;
+ #clock-cells = <1>;
+ };
+
+ audio_refclk1: clock@82e4 {
+ compatible = "ti,am62-audio-refclk";
+ reg = <0x82e4 0x4>;
+ clocks = <&k3_clks 157 34>;
+ assigned-clocks = <&k3_clks 157 34>;
+ assigned-clock-parents = <&k3_clks 157 63>;
+ #clock-cells = <0>;
+ };
+ };
+
+ main_ehrpwm0: pwm@3000000 {
+ compatible = "ti,am654-ehrpwm", "ti,am3352-ehrpwm";
+ reg = <0x00 0x3000000 0x00 0x100>;
+ clocks = <&ehrpwm_tbclk 0>, <&k3_clks 219 0>;
+ clock-names = "tbclk", "fck";
+ power-domains = <&k3_pds 219 TI_SCI_PD_EXCLUSIVE>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ main_ehrpwm1: pwm@3010000 {
+ compatible = "ti,am654-ehrpwm", "ti,am3352-ehrpwm";
+ reg = <0x00 0x3010000 0x00 0x100>;
+ clocks = <&ehrpwm_tbclk 1>, <&k3_clks 220 0>;
+ clock-names = "tbclk", "fck";
+ power-domains = <&k3_pds 220 TI_SCI_PD_EXCLUSIVE>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ main_ehrpwm2: pwm@3020000 {
+ compatible = "ti,am654-ehrpwm", "ti,am3352-ehrpwm";
+ reg = <0x00 0x3020000 0x00 0x100>;
+ clocks = <&ehrpwm_tbclk 2>, <&k3_clks 221 0>;
+ clock-names = "tbclk", "fck";
+ power-domains = <&k3_pds 221 TI_SCI_PD_EXCLUSIVE>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ main_ehrpwm3: pwm@3030000 {
+ compatible = "ti,am654-ehrpwm", "ti,am3352-ehrpwm";
+ reg = <0x00 0x3030000 0x00 0x100>;
+ clocks = <&ehrpwm_tbclk 3>, <&k3_clks 222 0>;
+ clock-names = "tbclk", "fck";
+ power-domains = <&k3_pds 222 TI_SCI_PD_EXCLUSIVE>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ main_ehrpwm4: pwm@3040000 {
+ compatible = "ti,am654-ehrpwm", "ti,am3352-ehrpwm";
+ reg = <0x00 0x3040000 0x00 0x100>;
+ clocks = <&ehrpwm_tbclk 4>, <&k3_clks 223 0>;
+ clock-names = "tbclk", "fck";
+ power-domains = <&k3_pds 223 TI_SCI_PD_EXCLUSIVE>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ main_ehrpwm5: pwm@3050000 {
+ compatible = "ti,am654-ehrpwm", "ti,am3352-ehrpwm";
+ reg = <0x00 0x3050000 0x00 0x100>;
+ clocks = <&ehrpwm_tbclk 5>, <&k3_clks 224 0>;
+ clock-names = "tbclk", "fck";
+ power-domains = <&k3_pds 224 TI_SCI_PD_EXCLUSIVE>;
+ #pwm-cells = <3>;
+ status = "disabled";
};
gic500: interrupt-controller@1800000 {
@@ -568,6 +683,38 @@
status = "disabled";
};
+ usbss0: usb@4104000 {
+ bootph-all;
+ compatible = "ti,j721e-usb";
+ reg = <0x00 0x4104000 0x00 0x100>;
+ dma-coherent;
+ power-domains = <&k3_pds 398 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&k3_clks 398 21>, <&k3_clks 398 2>;
+ clock-names = "ref", "lpm";
+ assigned-clocks = <&k3_clks 398 21>; /* USB2_REFCLK */
+ assigned-clock-parents = <&k3_clks 398 22>; /* HFOSC0 */
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ status = "disabled"; /* Needs lane config */
+
+ usb0: usb@6000000 {
+ bootph-all;
+ compatible = "cdns,usb3";
+ reg = <0x00 0x6000000 0x00 0x10000>,
+ <0x00 0x6010000 0x00 0x10000>,
+ <0x00 0x6020000 0x00 0x10000>;
+ reg-names = "otg", "xhci", "dev";
+ interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>, /* irq.0 */
+ <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>, /* irq.6 */
+ <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>; /* otgirq.0 */
+ interrupt-names = "host",
+ "peripheral",
+ "otg";
+ };
+ };
+
main_i2c0: i2c@2000000 {
compatible = "ti,j721e-i2c", "ti,omap4-i2c";
reg = <0x00 0x02000000 0x00 0x100>;
@@ -907,6 +1054,122 @@
status = "disabled";
};
+ pcie0_rc: pcie@2900000 {
+ compatible = "ti,j784s4-pcie-host";
+ reg = <0x00 0x02900000 0x00 0x1000>,
+ <0x00 0x02907000 0x00 0x400>,
+ <0x00 0x0d000000 0x00 0x00800000>,
+ <0x00 0x10000000 0x00 0x00001000>;
+ reg-names = "intd_cfg", "user_cfg", "reg", "cfg";
+ interrupt-names = "link_state";
+ interrupts = <GIC_SPI 318 IRQ_TYPE_EDGE_RISING>;
+ device_type = "pci";
+ ti,syscon-pcie-ctrl = <&pcie0_ctrl 0x0>;
+ max-link-speed = <3>;
+ num-lanes = <4>;
+ power-domains = <&k3_pds 332 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&k3_clks 332 0>;
+ clock-names = "fck";
+ #address-cells = <3>;
+ #size-cells = <2>;
+ bus-range = <0x0 0xff>;
+ vendor-id = <0x104c>;
+ device-id = <0xb012>;
+ msi-map = <0x0 &gic_its 0x0 0x10000>;
+ dma-coherent;
+ ranges = <0x01000000 0x0 0x10001000 0x0 0x10001000 0x0 0x0010000>,
+ <0x02000000 0x0 0x10011000 0x0 0x10011000 0x0 0x7fef000>;
+ dma-ranges = <0x02000000 0x0 0x0 0x0 0x0 0x10000 0x0>;
+ status = "disabled";
+ };
+
+ pcie1_rc: pcie@2910000 {
+ compatible = "ti,j784s4-pcie-host";
+ reg = <0x00 0x02910000 0x00 0x1000>,
+ <0x00 0x02917000 0x00 0x400>,
+ <0x00 0x0d800000 0x00 0x00800000>,
+ <0x00 0x18000000 0x00 0x00001000>;
+ reg-names = "intd_cfg", "user_cfg", "reg", "cfg";
+ interrupt-names = "link_state";
+ interrupts = <GIC_SPI 330 IRQ_TYPE_EDGE_RISING>;
+ device_type = "pci";
+ ti,syscon-pcie-ctrl = <&pcie1_ctrl 0x0>;
+ max-link-speed = <3>;
+ num-lanes = <4>;
+ power-domains = <&k3_pds 333 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&k3_clks 333 0>;
+ clock-names = "fck";
+ #address-cells = <3>;
+ #size-cells = <2>;
+ bus-range = <0x0 0xff>;
+ vendor-id = <0x104c>;
+ device-id = <0xb012>;
+ msi-map = <0x0 &gic_its 0x10000 0x10000>;
+ dma-coherent;
+ ranges = <0x01000000 0x0 0x18001000 0x00 0x18001000 0x0 0x0010000>,
+ <0x02000000 0x0 0x18011000 0x00 0x18011000 0x0 0x7fef000>;
+ dma-ranges = <0x02000000 0x0 0x0 0x0 0x0 0x10000 0x0>;
+ status = "disabled";
+ };
+
+ pcie2_rc: pcie@2920000 {
+ compatible = "ti,j784s4-pcie-host";
+ reg = <0x00 0x02920000 0x00 0x1000>,
+ <0x00 0x02927000 0x00 0x400>,
+ <0x00 0x0e000000 0x00 0x00800000>,
+ <0x44 0x00000000 0x00 0x00001000>;
+ reg-names = "intd_cfg", "user_cfg", "reg", "cfg";
+ interrupt-names = "link_state";
+ interrupts = <GIC_SPI 342 IRQ_TYPE_EDGE_RISING>;
+ device_type = "pci";
+ ti,syscon-pcie-ctrl = <&pcie2_ctrl 0x0>;
+ max-link-speed = <3>;
+ num-lanes = <2>;
+ power-domains = <&k3_pds 334 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&k3_clks 334 0>;
+ clock-names = "fck";
+ #address-cells = <3>;
+ #size-cells = <2>;
+ bus-range = <0x0 0xff>;
+ vendor-id = <0x104c>;
+ device-id = <0xb012>;
+ msi-map = <0x0 &gic_its 0x20000 0x10000>;
+ dma-coherent;
+ ranges = <0x01000000 0x0 0x00001000 0x44 0x00001000 0x0 0x0010000>,
+ <0x02000000 0x0 0x00011000 0x44 0x00011000 0x0 0x7fef000>;
+ dma-ranges = <0x02000000 0x0 0x0 0x0 0x0 0x10000 0x0>;
+ status = "disabled";
+ };
+
+ pcie3_rc: pcie@2930000 {
+ compatible = "ti,j784s4-pcie-host";
+ reg = <0x00 0x02930000 0x00 0x1000>,
+ <0x00 0x02937000 0x00 0x400>,
+ <0x00 0x0e800000 0x00 0x00800000>,
+ <0x44 0x10000000 0x00 0x00001000>;
+ reg-names = "intd_cfg", "user_cfg", "reg", "cfg";
+ interrupt-names = "link_state";
+ interrupts = <GIC_SPI 354 IRQ_TYPE_EDGE_RISING>;
+ device_type = "pci";
+ ti,syscon-pcie-ctrl = <&pcie3_ctrl 0x0>;
+ max-link-speed = <3>;
+ num-lanes = <2>;
+ power-domains = <&k3_pds 335 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&k3_clks 335 0>;
+ clock-names = "fck";
+ #address-cells = <3>;
+ #size-cells = <2>;
+ bus-range = <0x0 0xff>;
+ vendor-id = <0x104c>;
+ device-id = <0xb012>;
+ msi-map = <0x0 &gic_its 0x30000 0x10000>;
+ dma-coherent;
+ ranges = <0x01000000 0x0 0x00001000 0x44 0x10001000 0x0 0x0010000>,
+ <0x02000000 0x0 0x00011000 0x44 0x10011000 0x0 0x7fef000>;
+ dma-ranges = <0x02000000 0x0 0x0 0x0 0x0 0x10000 0x0>;
+ status = "disabled";
+ };
+
serdes_wiz0: wiz@5060000 {
compatible = "ti,j784s4-wiz-10g";
#address-cells = <1>;
@@ -1427,6 +1690,180 @@
};
};
+ main_cpsw0: ethernet@c000000 {
+ compatible = "ti,j784s4-cpswxg-nuss";
+ reg = <0x00 0xc000000 0x00 0x200000>;
+ reg-names = "cpsw_nuss";
+ ranges = <0x00 0x00 0x00 0xc000000 0x00 0x200000>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ dma-coherent;
+ clocks = <&k3_clks 64 0>;
+ clock-names = "fck";
+ power-domains = <&k3_pds 64 TI_SCI_PD_EXCLUSIVE>;
+
+ dmas = <&main_udmap 0xca00>,
+ <&main_udmap 0xca01>,
+ <&main_udmap 0xca02>,
+ <&main_udmap 0xca03>,
+ <&main_udmap 0xca04>,
+ <&main_udmap 0xca05>,
+ <&main_udmap 0xca06>,
+ <&main_udmap 0xca07>,
+ <&main_udmap 0x4a00>;
+ dma-names = "tx0", "tx1", "tx2", "tx3",
+ "tx4", "tx5", "tx6", "tx7",
+ "rx";
+
+ status = "disabled";
+
+ ethernet-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ main_cpsw0_port1: port@1 {
+ reg = <1>;
+ label = "port1";
+ ti,mac-only;
+ status = "disabled";
+ };
+
+ main_cpsw0_port2: port@2 {
+ reg = <2>;
+ label = "port2";
+ ti,mac-only;
+ status = "disabled";
+ };
+
+ main_cpsw0_port3: port@3 {
+ reg = <3>;
+ label = "port3";
+ ti,mac-only;
+ status = "disabled";
+ };
+
+ main_cpsw0_port4: port@4 {
+ reg = <4>;
+ label = "port4";
+ ti,mac-only;
+ status = "disabled";
+ };
+
+ main_cpsw0_port5: port@5 {
+ reg = <5>;
+ label = "port5";
+ ti,mac-only;
+ status = "disabled";
+ };
+
+ main_cpsw0_port6: port@6 {
+ reg = <6>;
+ label = "port6";
+ ti,mac-only;
+ status = "disabled";
+ };
+
+ main_cpsw0_port7: port@7 {
+ reg = <7>;
+ label = "port7";
+ ti,mac-only;
+ status = "disabled";
+ };
+
+ main_cpsw0_port8: port@8 {
+ reg = <8>;
+ label = "port8";
+ ti,mac-only;
+ status = "disabled";
+ };
+ };
+
+ main_cpsw0_mdio: mdio@f00 {
+ compatible = "ti,cpsw-mdio","ti,davinci_mdio";
+ reg = <0x00 0xf00 0x00 0x100>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&k3_clks 64 0>;
+ clock-names = "fck";
+ bus_freq = <1000000>;
+ status = "disabled";
+ };
+
+ cpts@3d000 {
+ compatible = "ti,am65-cpts";
+ reg = <0x00 0x3d000 0x00 0x400>;
+ clocks = <&k3_clks 64 3>;
+ clock-names = "cpts";
+ interrupts-extended = <&gic500 GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "cpts";
+ ti,cpts-ext-ts-inputs = <4>;
+ ti,cpts-periodic-outputs = <2>;
+ };
+ };
+
+ main_cpsw1: ethernet@c200000 {
+ compatible = "ti,j721e-cpsw-nuss";
+ reg = <0x00 0xc200000 0x00 0x200000>;
+ reg-names = "cpsw_nuss";
+ ranges = <0x00 0x00 0x00 0xc200000 0x00 0x200000>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ dma-coherent;
+ clocks = <&k3_clks 62 0>;
+ clock-names = "fck";
+ power-domains = <&k3_pds 62 TI_SCI_PD_EXCLUSIVE>;
+
+ dmas = <&main_udmap 0xc640>,
+ <&main_udmap 0xc641>,
+ <&main_udmap 0xc642>,
+ <&main_udmap 0xc643>,
+ <&main_udmap 0xc644>,
+ <&main_udmap 0xc645>,
+ <&main_udmap 0xc646>,
+ <&main_udmap 0xc647>,
+ <&main_udmap 0x4640>;
+ dma-names = "tx0", "tx1", "tx2", "tx3",
+ "tx4", "tx5", "tx6", "tx7",
+ "rx";
+
+ status = "disabled";
+
+ ethernet-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ main_cpsw1_port1: port@1 {
+ reg = <1>;
+ label = "port1";
+ phys = <&cpsw1_phy_gmii_sel 1>;
+ ti,mac-only;
+ status = "disabled";
+ };
+ };
+
+ main_cpsw1_mdio: mdio@f00 {
+ compatible = "ti,cpsw-mdio", "ti,davinci_mdio";
+ reg = <0x00 0xf00 0x00 0x100>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&k3_clks 62 0>;
+ clock-names = "fck";
+ bus_freq = <1000000>;
+ status = "disabled";
+ };
+
+ cpts@3d000 {
+ compatible = "ti,am65-cpts";
+ reg = <0x00 0x3d000 0x00 0x400>;
+ clocks = <&k3_clks 62 3>;
+ clock-names = "cpts";
+ interrupts-extended = <&gic500 GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "cpts";
+ ti,cpts-ext-ts-inputs = <4>;
+ ti,cpts-periodic-outputs = <2>;
+ };
+ };
+
main_mcan0: can@2701000 {
compatible = "bosch,m_can";
reg = <0x00 0x02701000 0x00 0x200>,
@@ -2255,4 +2692,94 @@
*/
};
};
+
+ mcasp0: mcasp@2b00000 {
+ compatible = "ti,am33xx-mcasp-audio";
+ reg = <0x00 0x02b00000 0x00 0x2000>,
+ <0x00 0x02b08000 0x00 0x1000>;
+ reg-names = "mpu","dat";
+ interrupts = <GIC_SPI 544 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 545 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "tx", "rx";
+ dmas = <&main_udmap 0xc400>, <&main_udmap 0x4400>;
+ dma-names = "tx", "rx";
+ clocks = <&k3_clks 265 0>;
+ clock-names = "fck";
+ assigned-clocks = <&k3_clks 265 0>;
+ assigned-clock-parents = <&k3_clks 265 1>;
+ power-domains = <&k3_pds 265 TI_SCI_PD_EXCLUSIVE>;
+ status = "disabled";
+ };
+
+ mcasp1: mcasp@2b10000 {
+ compatible = "ti,am33xx-mcasp-audio";
+ reg = <0x00 0x02b10000 0x00 0x2000>,
+ <0x00 0x02b18000 0x00 0x1000>;
+ reg-names = "mpu","dat";
+ interrupts = <GIC_SPI 546 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 547 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "tx", "rx";
+ dmas = <&main_udmap 0xc401>, <&main_udmap 0x4401>;
+ dma-names = "tx", "rx";
+ clocks = <&k3_clks 266 0>;
+ clock-names = "fck";
+ assigned-clocks = <&k3_clks 266 0>;
+ assigned-clock-parents = <&k3_clks 266 1>;
+ power-domains = <&k3_pds 266 TI_SCI_PD_EXCLUSIVE>;
+ status = "disabled";
+ };
+
+ mcasp2: mcasp@2b20000 {
+ compatible = "ti,am33xx-mcasp-audio";
+ reg = <0x00 0x02b20000 0x00 0x2000>,
+ <0x00 0x02b28000 0x00 0x1000>;
+ reg-names = "mpu","dat";
+ interrupts = <GIC_SPI 548 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 549 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "tx", "rx";
+ dmas = <&main_udmap 0xc402>, <&main_udmap 0x4402>;
+ dma-names = "tx", "rx";
+ clocks = <&k3_clks 267 0>;
+ clock-names = "fck";
+ assigned-clocks = <&k3_clks 267 0>;
+ assigned-clock-parents = <&k3_clks 267 1>;
+ power-domains = <&k3_pds 267 TI_SCI_PD_EXCLUSIVE>;
+ status = "disabled";
+ };
+
+ mcasp3: mcasp@2b30000 {
+ compatible = "ti,am33xx-mcasp-audio";
+ reg = <0x00 0x02b30000 0x00 0x2000>,
+ <0x00 0x02b38000 0x00 0x1000>;
+ reg-names = "mpu","dat";
+ interrupts = <GIC_SPI 550 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 551 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "tx", "rx";
+ dmas = <&main_udmap 0xc500>, <&main_udmap 0x4500>;
+ dma-names = "tx", "rx";
+ clocks = <&k3_clks 268 0>;
+ clock-names = "fck";
+ assigned-clocks = <&k3_clks 268 0>;
+ assigned-clock-parents = <&k3_clks 268 1>;
+ power-domains = <&k3_pds 268 TI_SCI_PD_EXCLUSIVE>;
+ status = "disabled";
+ };
+
+ mcasp4: mcasp@2b40000 {
+ compatible = "ti,am33xx-mcasp-audio";
+ reg = <0x00 0x02b40000 0x00 0x2000>,
+ <0x00 0x02b48000 0x00 0x1000>;
+ reg-names = "mpu","dat";
+ interrupts = <GIC_SPI 552 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 553 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "tx", "rx";
+ dmas = <&main_udmap 0xc501>, <&main_udmap 0x4501>;
+ dma-names = "tx", "rx";
+ clocks = <&k3_clks 269 0>;
+ clock-names = "fck";
+ assigned-clocks = <&k3_clks 269 0>;
+ assigned-clock-parents = <&k3_clks 269 1>;
+ power-domains = <&k3_pds 269 TI_SCI_PD_EXCLUSIVE>;
+ status = "disabled";
+ };
};
diff --git a/arch/arm64/boot/dts/ti/k3-j784s4-mcu-wakeup.dtsi b/arch/arm64/boot/dts/ti/k3-j784s4-mcu-wakeup.dtsi
index 2e18d91ae92f..f3a6ed1c979d 100644
--- a/arch/arm64/boot/dts/ti/k3-j784s4-mcu-wakeup.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j784s4-mcu-wakeup.dtsi
@@ -145,12 +145,16 @@
status = "reserved";
};
- mcu_conf: syscon@40f00000 {
- compatible = "ti,j721e-system-controller", "syscon", "simple-mfd";
- reg = <0x00 0x40f00000 0x00 0x20000>;
+ mcu_conf: bus@40f00000 {
+ compatible = "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
- ranges = <0x00 0x00 0x40f00000 0x20000>;
+ ranges = <0x0 0x0 0x40f00000 0x20000>;
+
+ cpsw_mac_syscon: ethernet-mac-syscon@200 {
+ compatible = "ti,am62p-cpsw-mac-efuse", "syscon";
+ reg = <0x200 0x8>;
+ };
phy_gmii_sel: phy@4040 {
compatible = "ti,am654-phy-gmii-sel";
@@ -553,7 +557,7 @@
reg = <1>;
ti,mac-only;
label = "port1";
- ti,syscon-efuse = <&mcu_conf 0x200>;
+ ti,syscon-efuse = <&cpsw_mac_syscon 0x0>;
phys = <&phy_gmii_sel 1>;
};
};
diff --git a/arch/arm64/boot/dts/ti/k3-j784s4.dtsi b/arch/arm64/boot/dts/ti/k3-j784s4.dtsi
index da7368ed6b52..73cc3c1fec08 100644
--- a/arch/arm64/boot/dts/ti/k3-j784s4.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j784s4.dtsi
@@ -238,7 +238,10 @@
<0x00 0x01000000 0x00 0x01000000 0x00 0x0d000000>, /* Most peripherals */
<0x00 0x04210000 0x00 0x04210000 0x00 0x00010000>, /* VPU0 */
<0x00 0x04220000 0x00 0x04220000 0x00 0x00010000>, /* VPU1 */
- <0x00 0x0d000000 0x00 0x0d000000 0x00 0x01000000>, /* PCIe Core*/
+ <0x00 0x0d000000 0x00 0x0d000000 0x00 0x00800000>, /* PCIe0 Core*/
+ <0x00 0x0d800000 0x00 0x0d800000 0x00 0x00800000>, /* PCIe1 Core*/
+ <0x00 0x0e000000 0x00 0x0e000000 0x00 0x00800000>, /* PCIe2 Core*/
+ <0x00 0x0e800000 0x00 0x0e800000 0x00 0x00800000>, /* PCIe3 Core*/
<0x00 0x10000000 0x00 0x10000000 0x00 0x08000000>, /* PCIe0 DAT0 */
<0x00 0x18000000 0x00 0x18000000 0x00 0x08000000>, /* PCIe1 DAT0 */
<0x00 0x64800000 0x00 0x64800000 0x00 0x0070c000>, /* C71_1 */
@@ -248,7 +251,12 @@
<0x00 0x6f000000 0x00 0x6f000000 0x00 0x00310000>, /* A72 PERIPHBASE */
<0x00 0x70000000 0x00 0x70000000 0x00 0x00400000>, /* MSMC RAM */
<0x00 0x30000000 0x00 0x30000000 0x00 0x0c400000>, /* MAIN NAVSS */
+ <0x40 0x00000000 0x40 0x00000000 0x01 0x00000000>, /* PCIe0 DAT1 */
<0x41 0x00000000 0x41 0x00000000 0x01 0x00000000>, /* PCIe1 DAT1 */
+ <0x42 0x00000000 0x42 0x00000000 0x01 0x00000000>, /* PCIe2 DAT1 */
+ <0x43 0x00000000 0x43 0x00000000 0x01 0x00000000>, /* PCIe3 DAT1 */
+ <0x44 0x00000000 0x44 0x00000000 0x00 0x08000000>, /* PCIe2 DAT0 */
+ <0x44 0x10000000 0x44 0x10000000 0x00 0x08000000>, /* PCIe3 DAT0 */
<0x4e 0x20000000 0x4e 0x20000000 0x00 0x00080000>, /* GPU */
/* MCUSS_WKUP Range */
diff --git a/arch/arm64/boot/dts/ti/k3-pinctrl.h b/arch/arm64/boot/dts/ti/k3-pinctrl.h
index 4cd2df467d0b..22b8d73cfd32 100644
--- a/arch/arm64/boot/dts/ti/k3-pinctrl.h
+++ b/arch/arm64/boot/dts/ti/k3-pinctrl.h
@@ -38,6 +38,9 @@
#define PIN_DEBOUNCE_CONF5 (5 << DEBOUNCE_SHIFT)
#define PIN_DEBOUNCE_CONF6 (6 << DEBOUNCE_SHIFT)
+/* Default mux configuration for gpio-ranges to use with pinctrl */
+#define PIN_GPIO_RANGE_IOPAD (PIN_INPUT | 7)
+
#define AM62AX_IOPAD(pa, val, muxmode) (((pa) & 0x1fff)) ((val) | (muxmode))
#define AM62AX_MCU_IOPAD(pa, val, muxmode) (((pa) & 0x1fff)) ((val) | (muxmode))
diff --git a/arch/arm64/boot/dts/ti/k3-serdes.h b/arch/arm64/boot/dts/ti/k3-serdes.h
index a011ad893b44..ef3606068140 100644
--- a/arch/arm64/boot/dts/ti/k3-serdes.h
+++ b/arch/arm64/boot/dts/ti/k3-serdes.h
@@ -201,4 +201,12 @@
#define J784S4_SERDES4_LANE3_USB 0x2
#define J784S4_SERDES4_LANE3_IP4_UNUSED 0x3
+/* J722S */
+
+#define J722S_SERDES0_LANE0_USB 0x0
+#define J722S_SERDES0_LANE0_QSGMII_LANE2 0x1
+
+#define J722S_SERDES1_LANE0_PCIE0_LANE0 0x0
+#define J722S_SERDES1_LANE0_QSGMII_LANE1 0x1
+
#endif /* DTS_ARM64_TI_K3_SERDES_H */
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-clk-ccf.dtsi b/arch/arm64/boot/dts/xilinx/zynqmp-clk-ccf.dtsi
index dd4569e7bd95..60d1b1acf9a0 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-clk-ccf.dtsi
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-clk-ccf.dtsi
@@ -70,6 +70,22 @@
clocks = <&zynqmp_clk ACPU>;
};
+&cpu0_debug {
+ clocks = <&zynqmp_clk DBF_FPD>;
+};
+
+&cpu1_debug {
+ clocks = <&zynqmp_clk DBF_FPD>;
+};
+
+&cpu2_debug {
+ clocks = <&zynqmp_clk DBF_FPD>;
+};
+
+&cpu3_debug {
+ clocks = <&zynqmp_clk DBF_FPD>;
+};
+
&fpd_dma_chan1 {
clocks = <&zynqmp_clk GDMA_REF>, <&zynqmp_clk LPD_LSBUS>;
};
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-sck-kv-g-revA.dtso b/arch/arm64/boot/dts/xilinx/zynqmp-sck-kv-g-revA.dtso
index d7535a77b45e..95d16904d765 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-sck-kv-g-revA.dtso
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-sck-kv-g-revA.dtso
@@ -22,6 +22,17 @@
/plugin/;
&{/} {
+ compatible = "xlnx,zynqmp-sk-kv260-revA",
+ "xlnx,zynqmp-sk-kv260-revY",
+ "xlnx,zynqmp-sk-kv260-revZ",
+ "xlnx,zynqmp-sk-kv260", "xlnx,zynqmp";
+ model = "ZynqMP KV260 revA";
+
+ ina260-u14 {
+ compatible = "iio-hwmon";
+ io-channels = <&u14 0>, <&u14 1>, <&u14 2>;
+ };
+
si5332_0: si5332-0 { /* u17 */
compatible = "fixed-clock";
#clock-cells = <0>;
@@ -68,7 +79,12 @@
scl-gpios = <&gpio 24 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
sda-gpios = <&gpio 25 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
- /* u14 - 0x40 - ina260 */
+ u14: ina260@40 { /* u14 */
+ compatible = "ti,ina260";
+ #io-channel-cells = <1>;
+ label = "ina260-u14";
+ reg = <0x40>;
+ };
/* u27 - 0xe0 - STDP4320 DP/HDMI splitter */
};
@@ -321,6 +337,7 @@
slew-rate = <SLEW_RATE_SLOW>;
power-source = <IO_STANDARD_LVCMOS18>;
bias-disable;
+ output-enable;
};
conf-cd {
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-sck-kv-g-revB.dtso b/arch/arm64/boot/dts/xilinx/zynqmp-sck-kv-g-revB.dtso
index a7b8fffad499..a74d0ac7e07a 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-sck-kv-g-revB.dtso
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-sck-kv-g-revB.dtso
@@ -17,6 +17,17 @@
/plugin/;
&{/} {
+ compatible = "xlnx,zynqmp-sk-kv260-rev2",
+ "xlnx,zynqmp-sk-kv260-rev1",
+ "xlnx,zynqmp-sk-kv260-revB",
+ "xlnx,zynqmp-sk-kv260", "xlnx,zynqmp";
+ model = "ZynqMP KV260 revB";
+
+ ina260-u14 {
+ compatible = "iio-hwmon";
+ io-channels = <&u14 0>, <&u14 1>, <&u14 2>;
+ };
+
si5332_0: si5332-0 { /* u17 */
compatible = "fixed-clock";
#clock-cells = <0>;
@@ -52,6 +63,18 @@
#clock-cells = <0>;
clock-frequency = <27000000>;
};
+
+ dpcon {
+ compatible = "dp-connector";
+ label = "P11";
+ type = "full-size";
+
+ port {
+ dpcon_in: endpoint {
+ remote-endpoint = <&dpsub_dp_out>;
+ };
+ };
+ };
};
&i2c1 { /* I2C_SCK C23/C24 - MIO from SOM */
@@ -63,8 +86,13 @@
scl-gpios = <&gpio 24 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
sda-gpios = <&gpio 25 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
- /* u14 - 0x40 - ina260 */
- /* u43 - 0x2d - usb5744 */
+ u14: ina260@40 { /* u14 */
+ compatible = "ti,ina260";
+ #io-channel-cells = <1>;
+ label = "ina260-u14";
+ reg = <0x40>;
+ };
+ /* u43 - 0x2d - USB hub */
/* u27 - 0xe0 - STDP4320 DP/HDMI splitter */
};
@@ -81,6 +109,14 @@
phy-names = "dp-phy0", "dp-phy1";
phys = <&psgtr 1 PHY_TYPE_DP 0 0>, <&psgtr 0 PHY_TYPE_DP 1 0>;
assigned-clock-rates = <27000000>, <25000000>, <300000000>;
+
+ ports {
+ port@5 {
+ dpsub_dp_out: endpoint {
+ remote-endpoint = <&dpcon_in>;
+ };
+ };
+ };
};
&zynqmp_dpdma {
@@ -305,6 +341,7 @@
slew-rate = <SLEW_RATE_SLOW>;
power-source = <IO_STANDARD_LVCMOS18>;
bias-disable;
+ output-enable;
};
conf-cd {
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-sm-k26-revA.dts b/arch/arm64/boot/dts/xilinx/zynqmp-sm-k26-revA.dts
index 51622896b1b1..86e6c4990560 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-sm-k26-revA.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-sm-k26-revA.dts
@@ -1,8 +1,9 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * dts file for Xilinx ZynqMP SM-K26 rev1/B/A
+ * dts file for Xilinx ZynqMP SM-K26 rev2/1/B/A
*
* (C) Copyright 2020 - 2021, Xilinx, Inc.
+ * (C) Copyright 2023 - 2024, Advanced Micro Devices, Inc.
*
* Michal Simek <michal.simek@amd.com>
*/
@@ -17,8 +18,9 @@
#include <dt-bindings/pinctrl/pinctrl-zynqmp.h>
/ {
- model = "ZynqMP SM-K26 Rev1/B/A";
- compatible = "xlnx,zynqmp-sm-k26-rev1", "xlnx,zynqmp-sm-k26-revB",
+ model = "ZynqMP SM-K26 Rev2/1/B/A";
+ compatible = "xlnx,zynqmp-sm-k26-rev2",
+ "xlnx,zynqmp-sm-k26-rev1", "xlnx,zynqmp-sm-k26-revB",
"xlnx,zynqmp-sm-k26-revA", "xlnx,zynqmp-sm-k26",
"xlnx,zynqmp";
@@ -101,12 +103,23 @@
<&xilinx_ams 24>, <&xilinx_ams 25>, <&xilinx_ams 26>,
<&xilinx_ams 27>, <&xilinx_ams 28>, <&xilinx_ams 29>;
};
+
+ pwm-fan {
+ compatible = "pwm-fan";
+ status = "okay";
+ pwms = <&ttc0 2 40000 0>;
+ };
};
&modepin_gpio {
label = "modepin";
};
+&ttc0 {
+ status = "okay";
+ #pwm-cells = <3>;
+};
+
&uart1 { /* MIO36/MIO37 */
status = "okay";
};
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-smk-k26-revA.dts b/arch/arm64/boot/dts/xilinx/zynqmp-smk-k26-revA.dts
index 85b0d1677240..b804abe89d1d 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-smk-k26-revA.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-smk-k26-revA.dts
@@ -1,8 +1,9 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * dts file for Xilinx ZynqMP SMK-K26 rev1/B/A
+ * dts file for Xilinx ZynqMP SMK-K26 rev2/1/B/A
*
* (C) Copyright 2020 - 2021, Xilinx, Inc.
+ * (C) Copyright 2023 - 2024, Advanced Micro Devices, Inc.
*
* Michal Simek <michal.simek@amd.com>
*/
@@ -10,8 +11,9 @@
#include "zynqmp-sm-k26-revA.dts"
/ {
- model = "ZynqMP SMK-K26 Rev1/B/A";
- compatible = "xlnx,zynqmp-smk-k26-rev1", "xlnx,zynqmp-smk-k26-revB",
+ model = "ZynqMP SMK-K26 Rev2/1/B/A";
+ compatible = "xlnx,zynqmp-smk-k26-rev2",
+ "xlnx,zynqmp-smk-k26-rev1", "xlnx,zynqmp-smk-k26-revB",
"xlnx,zynqmp-smk-k26-revA", "xlnx,zynqmp-smk-k26",
"xlnx,zynqmp";
};
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zcu102-rev1.0.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zcu102-rev1.0.dts
index c8f71a1aec89..495ca94b45db 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-zcu102-rev1.0.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zcu102-rev1.0.dts
@@ -14,6 +14,14 @@
compatible = "xlnx,zynqmp-zcu102-rev1.0", "xlnx,zynqmp-zcu102", "xlnx,zynqmp";
};
+&rproc_split {
+ status = "okay";
+};
+
+&rproc_lockstep {
+ status = "disabled";
+};
+
&eeprom {
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
index d99830c9b85f..b1b31dcf6291 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
+++ b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
@@ -207,13 +207,71 @@
mbox-names = "tx", "rx";
};
- nvmem-firmware {
+ soc-nvmem {
compatible = "xlnx,zynqmp-nvmem-fw";
- #address-cells = <1>;
- #size-cells = <1>;
-
- soc_revision: soc-revision@0 {
- reg = <0x0 0x4>;
+ nvmem-layout {
+ compatible = "fixed-layout";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ soc_revision: soc-revision@0 {
+ reg = <0x0 0x4>;
+ };
+ /* efuse access */
+ efuse_dna: efuse-dna@c {
+ reg = <0xc 0xc>;
+ };
+ efuse_usr0: efuse-usr0@20 {
+ reg = <0x20 0x4>;
+ };
+ efuse_usr1: efuse-usr1@24 {
+ reg = <0x24 0x4>;
+ };
+ efuse_usr2: efuse-usr2@28 {
+ reg = <0x28 0x4>;
+ };
+ efuse_usr3: efuse-usr3@2c {
+ reg = <0x2c 0x4>;
+ };
+ efuse_usr4: efuse-usr4@30 {
+ reg = <0x30 0x4>;
+ };
+ efuse_usr5: efuse-usr5@34 {
+ reg = <0x34 0x4>;
+ };
+ efuse_usr6: efuse-usr6@38 {
+ reg = <0x38 0x4>;
+ };
+ efuse_usr7: efuse-usr7@3c {
+ reg = <0x3c 0x4>;
+ };
+ efuse_miscusr: efuse-miscusr@40 {
+ reg = <0x40 0x4>;
+ };
+ efuse_chash: efuse-chash@50 {
+ reg = <0x50 0x4>;
+ };
+ efuse_pufmisc: efuse-pufmisc@54 {
+ reg = <0x54 0x4>;
+ };
+ efuse_sec: efuse-sec@58 {
+ reg = <0x58 0x4>;
+ };
+ efuse_spkid: efuse-spkid@5c {
+ reg = <0x5c 0x4>;
+ };
+ efuse_aeskey: efuse-aeskey@60 {
+ reg = <0x60 0x20>;
+ };
+ efuse_ppk0hash: efuse-ppk0hash@a0 {
+ reg = <0xa0 0x30>;
+ };
+ efuse_ppk1hash: efuse-ppk1hash@d0 {
+ reg = <0xd0 0x30>;
+ };
+ efuse_pufuser: efuse-pufuser@100 {
+ reg = <0x100 0x7F>;
+ };
};
};
@@ -252,7 +310,7 @@
<GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
};
- fpga_full: fpga-full {
+ fpga_full: fpga-region {
compatible = "fpga-region";
fpga-mgr = <&zynqmp_pcap>;
#address-cells = <2>;
@@ -260,19 +318,76 @@
ranges;
};
- remoteproc {
+ rproc_lockstep: remoteproc@ffe00000 {
compatible = "xlnx,zynqmp-r5fss";
xlnx,cluster-mode = <1>;
+ xlnx,tcm-mode = <1>;
+
+ #address-cells = <2>;
+ #size-cells = <2>;
- r5f-0 {
+ ranges = <0x0 0x0 0x0 0xffe00000 0x0 0x10000>,
+ <0x0 0x20000 0x0 0xffe20000 0x0 0x10000>,
+ <0x0 0x10000 0x0 0xffe10000 0x0 0x10000>,
+ <0x0 0x30000 0x0 0xffe30000 0x0 0x10000>;
+
+ r5f@0 {
compatible = "xlnx,zynqmp-r5f";
- power-domains = <&zynqmp_firmware PD_RPU_0>;
+ reg = <0x0 0x0 0x0 0x10000>,
+ <0x0 0x20000 0x0 0x10000>,
+ <0x0 0x10000 0x0 0x10000>,
+ <0x0 0x30000 0x0 0x10000>;
+ reg-names = "atcm0", "btcm0", "atcm1", "btcm1";
+ power-domains = <&zynqmp_firmware PD_RPU_0>,
+ <&zynqmp_firmware PD_R5_0_ATCM>,
+ <&zynqmp_firmware PD_R5_0_BTCM>,
+ <&zynqmp_firmware PD_R5_1_ATCM>,
+ <&zynqmp_firmware PD_R5_1_BTCM>;
memory-region = <&rproc_0_fw_image>;
};
- r5f-1 {
+ r5f@1 {
compatible = "xlnx,zynqmp-r5f";
- power-domains = <&zynqmp_firmware PD_RPU_1>;
+ reg = <0x1 0x0 0x0 0x10000>, <0x1 0x20000 0x0 0x10000>;
+ reg-names = "atcm0", "btcm0";
+ power-domains = <&zynqmp_firmware PD_RPU_1>,
+ <&zynqmp_firmware PD_R5_1_ATCM>,
+ <&zynqmp_firmware PD_R5_1_BTCM>;
+ memory-region = <&rproc_1_fw_image>;
+ };
+ };
+
+ rproc_split: remoteproc-split@ffe00000 {
+ status = "disabled";
+ compatible = "xlnx,zynqmp-r5fss";
+ xlnx,cluster-mode = <0>;
+ xlnx,tcm-mode = <0>;
+
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ ranges = <0x0 0x0 0x0 0xffe00000 0x0 0x10000>,
+ <0x0 0x20000 0x0 0xffe20000 0x0 0x10000>,
+ <0x1 0x0 0x0 0xffe90000 0x0 0x10000>,
+ <0x1 0x20000 0x0 0xffeb0000 0x0 0x10000>;
+
+ r5f@0 {
+ compatible = "xlnx,zynqmp-r5f";
+ reg = <0x0 0x0 0x0 0x10000>, <0x0 0x20000 0x0 0x10000>;
+ reg-names = "atcm0", "btcm0";
+ power-domains = <&zynqmp_firmware PD_RPU_0>,
+ <&zynqmp_firmware PD_R5_0_ATCM>,
+ <&zynqmp_firmware PD_R5_0_BTCM>;
+ memory-region = <&rproc_0_fw_image>;
+ };
+
+ r5f@1 {
+ compatible = "xlnx,zynqmp-r5f";
+ reg = <0x1 0x0 0x0 0x10000>, <0x1 0x20000 0x0 0x10000>;
+ reg-names = "atcm0", "btcm0";
+ power-domains = <&zynqmp_firmware PD_RPU_1>,
+ <&zynqmp_firmware PD_R5_1_ATCM>,
+ <&zynqmp_firmware PD_R5_1_BTCM>;
memory-region = <&rproc_1_fw_image>;
};
};
@@ -330,6 +445,34 @@
};
};
+ cpu0_debug: debug@fec10000 {
+ compatible = "arm,coresight-cpu-debug", "arm,primecell";
+ reg = <0x0 0xfec10000 0x0 0x1000>;
+ clock-names = "apb_pclk";
+ cpu = <&cpu0>;
+ };
+
+ cpu1_debug: debug@fed10000 {
+ compatible = "arm,coresight-cpu-debug", "arm,primecell";
+ reg = <0x0 0xfed10000 0x0 0x1000>;
+ clock-names = "apb_pclk";
+ cpu = <&cpu1>;
+ };
+
+ cpu2_debug: debug@fee10000 {
+ compatible = "arm,coresight-cpu-debug", "arm,primecell";
+ reg = <0x0 0xfee10000 0x0 0x1000>;
+ clock-names = "apb_pclk";
+ cpu = <&cpu2>;
+ };
+
+ cpu3_debug: debug@fef10000 {
+ compatible = "arm,coresight-cpu-debug", "arm,primecell";
+ reg = <0x0 0xfef10000 0x0 0x1000>;
+ clock-names = "apb_pclk";
+ cpu = <&cpu3>;
+ };
+
/* GDMA */
fpd_dma_chan1: dma-controller@fd500000 {
status = "disabled";
@@ -684,6 +827,13 @@
power-domains = <&zynqmp_firmware PD_I2C_1>;
};
+ ocm: memory-controller@ff960000 {
+ compatible = "xlnx,zynqmp-ocmc-1.0";
+ reg = <0x0 0xff960000 0x0 0x1000>;
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
pcie: pcie@fd0e0000 {
compatible = "xlnx,nwl-pcie-2.11";
status = "disabled";
@@ -941,10 +1091,11 @@
status = "disabled";
reg = <0x0 0xfe200000 0x0 0x40000>;
interrupt-parent = <&gic>;
- interrupt-names = "host", "peripheral", "otg";
+ interrupt-names = "host", "peripheral", "otg", "wakeup";
interrupts = <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>;
+ <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>;
clock-names = "ref";
/* iommus = <&smmu 0x860>; */
snps,quirk-frame-length-adjustment = <0x20>;
@@ -972,10 +1123,11 @@
status = "disabled";
reg = <0x0 0xfe300000 0x0 0x40000>;
interrupt-parent = <&gic>;
- interrupt-names = "host", "peripheral", "otg";
+ interrupt-names = "host", "peripheral", "otg", "wakeup";
interrupts = <GIC_SPI 70 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 70 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>;
+ <GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>;
clock-names = "ref";
/* iommus = <&smmu 0x861>; */
snps,quirk-frame-length-adjustment = <0x20>;
@@ -1024,8 +1176,6 @@
compatible = "xlnx,zynqmp-ams-pl";
status = "disabled";
reg = <0x400 0x400>;
- #address-cells = <1>;
- #size-cells = <0>;
};
};
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index 57a9abe78ee4..ef2235838c44 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -233,6 +233,8 @@ CONFIG_PCIE_HISI_STB=y
CONFIG_PCIE_ARMADA_8K=y
CONFIG_PCIE_TEGRA194_HOST=m
CONFIG_PCIE_QCOM=y
+CONFIG_PCIE_RCAR_GEN4_HOST=m
+CONFIG_PCIE_RCAR_GEN4_EP=m
CONFIG_PCIE_ROCKCHIP_DW_HOST=y
CONFIG_PCIE_VISCONTI_HOST=y
CONFIG_PCIE_LAYERSCAPE_GEN4=y
@@ -256,6 +258,7 @@ CONFIG_GOOGLE_CBMEM=m
CONFIG_GOOGLE_COREBOOT_TABLE=m
CONFIG_EFI_CAPSULE_LOADER=y
CONFIG_IMX_SCU=y
+CONFIG_QCOM_TZMEM_MODE_SHMBRIDGE=y
CONFIG_QCOM_QSEECOM=y
CONFIG_QCOM_QSEECOM_UEFISECAPP=y
CONFIG_GNSS=m
@@ -279,6 +282,8 @@ CONFIG_MTD_NAND_FSL_IFC=y
CONFIG_MTD_NAND_QCOM=y
CONFIG_MTD_SPI_NOR=y
CONFIG_MTD_UBI=m
+CONFIG_MTD_HYPERBUS=m
+CONFIG_HBMC_AM654=m
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_NBD=m
CONFIG_VIRTIO_BLK=y
@@ -375,6 +380,7 @@ CONFIG_AQUANTIA_PHY=y
CONFIG_BCM54140_PHY=m
CONFIG_MARVELL_PHY=m
CONFIG_MARVELL_10G_PHY=y
+CONFIG_MARVELL_88Q2XXX_PHY=y
CONFIG_MICREL_PHY=y
CONFIG_MICROSEMI_PHY=y
CONFIG_AT803X_PHY=y
@@ -414,6 +420,9 @@ CONFIG_ATH11K_AHB=m
CONFIG_ATH11K_PCI=m
CONFIG_ATH12K=m
CONFIG_BRCMFMAC=m
+CONFIG_IWLWIFI=m
+CONFIG_IWLDVM=m
+CONFIG_IWLMVM=m
CONFIG_MWIFIEX=m
CONFIG_MWIFIEX_SDIO=m
CONFIG_MWIFIEX_PCIE=m
@@ -745,6 +754,7 @@ CONFIG_MFD_SEC_CORE=y
CONFIG_MFD_SL28CPLD=y
CONFIG_RZ_MTU3=y
CONFIG_MFD_TI_AM335X_TSCADC=m
+CONFIG_MFD_TI_LP873X=m
CONFIG_MFD_TPS65219=y
CONFIG_MFD_TPS6594_I2C=m
CONFIG_MFD_ROHM_BD718XX=y
@@ -760,6 +770,7 @@ CONFIG_REGULATOR_FAN53555=y
CONFIG_REGULATOR_GPIO=y
CONFIG_REGULATOR_HI6421V530=y
CONFIG_REGULATOR_HI655X=y
+CONFIG_REGULATOR_LP873X=m
CONFIG_REGULATOR_MAX77620=y
CONFIG_REGULATOR_MAX8973=y
CONFIG_REGULATOR_MAX20411=m
@@ -1036,6 +1047,7 @@ CONFIG_SND_AUDIO_GRAPH_CARD2=m
CONFIG_HID_MULTITOUCH=m
CONFIG_I2C_HID_ACPI=m
CONFIG_I2C_HID_OF=m
+CONFIG_I2C_HID_OF_ELAN=m
CONFIG_USB=y
CONFIG_USB_OTG=y
CONFIG_USB_XHCI_HCD=y
@@ -1062,6 +1074,7 @@ CONFIG_USB_MTU3=y
CONFIG_USB_MUSB_HDRC=y
CONFIG_USB_MUSB_SUNXI=y
CONFIG_USB_DWC3=y
+CONFIG_OMAP_USB2=m
CONFIG_USB_DWC2=y
CONFIG_USB_CHIPIDEA=y
CONFIG_USB_CHIPIDEA_UDC=y
@@ -1215,6 +1228,7 @@ CONFIG_QCOM_BAM_DMA=y
CONFIG_QCOM_GPI_DMA=m
CONFIG_QCOM_HIDMA_MGMT=y
CONFIG_QCOM_HIDMA=y
+CONFIG_DW_EDMA=m
CONFIG_RCAR_DMAC=y
CONFIG_RENESAS_USB_DMAC=m
CONFIG_RZ_DMAC=y
@@ -1333,6 +1347,7 @@ CONFIG_SM_GCC_8650=y
CONFIG_SM_GPUCC_6115=m
CONFIG_SM_GPUCC_8150=y
CONFIG_SM_GPUCC_8250=y
+CONFIG_SM_GPUCC_8350=m
CONFIG_SM_GPUCC_8450=m
CONFIG_SM_GPUCC_8550=m
CONFIG_SM_GPUCC_8650=m
@@ -1555,6 +1570,7 @@ CONFIG_ARM_SPE_PMU=m
CONFIG_ARM_DMC620_PMU=m
CONFIG_HISI_PMU=y
CONFIG_ARM_CORESIGHT_PMU_ARCH_SYSTEM_PMU=m
+CONFIG_NVIDIA_CORESIGHT_PMU_ARCH_SYSTEM_PMU=m
CONFIG_MESON_DDR_PMU=m
CONFIG_NVMEM_LAYOUT_SL28_VPD=m
CONFIG_NVMEM_IMX_OCOTP=y
@@ -1564,6 +1580,7 @@ CONFIG_NVMEM_LAYERSCAPE_SFP=m
CONFIG_NVMEM_MESON_EFUSE=m
CONFIG_NVMEM_MTK_EFUSE=y
CONFIG_NVMEM_QCOM_QFPROM=y
+CONFIG_NVMEM_QCOM_SEC_QFPROM=m
CONFIG_NVMEM_RMEM=m
CONFIG_NVMEM_ROCKCHIP_EFUSE=y
CONFIG_NVMEM_ROCKCHIP_OTP=y
@@ -1593,7 +1610,7 @@ CONFIG_INTERCONNECT_IMX8MQ=m
CONFIG_INTERCONNECT_IMX8MP=y
CONFIG_INTERCONNECT_QCOM=y
CONFIG_INTERCONNECT_QCOM_MSM8916=m
-CONFIG_INTERCONNECT_QCOM_MSM8996=m
+CONFIG_INTERCONNECT_QCOM_MSM8996=y
CONFIG_INTERCONNECT_QCOM_OSM_L3=m
CONFIG_INTERCONNECT_QCOM_QCM2290=y
CONFIG_INTERCONNECT_QCOM_QCS404=m
@@ -1606,9 +1623,9 @@ CONFIG_INTERCONNECT_QCOM_SC8280XP=y
CONFIG_INTERCONNECT_QCOM_SDM845=y
CONFIG_INTERCONNECT_QCOM_SDX75=y
CONFIG_INTERCONNECT_QCOM_SM6115=y
-CONFIG_INTERCONNECT_QCOM_SM8150=m
+CONFIG_INTERCONNECT_QCOM_SM8150=y
CONFIG_INTERCONNECT_QCOM_SM8250=y
-CONFIG_INTERCONNECT_QCOM_SM8350=m
+CONFIG_INTERCONNECT_QCOM_SM8350=y
CONFIG_INTERCONNECT_QCOM_SM8450=y
CONFIG_INTERCONNECT_QCOM_SM8550=y
CONFIG_INTERCONNECT_QCOM_SM8650=y
diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild
index 4b6d2d52053e..7d7d97ad3cd5 100644
--- a/arch/arm64/include/asm/Kbuild
+++ b/arch/arm64/include/asm/Kbuild
@@ -1,4 +1,12 @@
# SPDX-License-Identifier: GPL-2.0
+syscall-y += syscall_table_32.h
+syscall-y += syscall_table_64.h
+
+# arm32 syscall table used by lib/compat_audit.c:
+syscall-y += unistd_32.h
+# same constants with prefixes, used by vdso, seccomp and sigreturn:
+syscall-y += unistd_compat_32.h
+
generic-y += early_ioremap.h
generic-y += mcs_spinlock.h
generic-y += qrwlock.h
diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h
index 6792a1f83f2a..a407f9cd549e 100644
--- a/arch/arm64/include/asm/acpi.h
+++ b/arch/arm64/include/asm/acpi.h
@@ -119,6 +119,18 @@ static inline u32 get_acpi_id_for_cpu(unsigned int cpu)
return acpi_cpu_get_madt_gicc(cpu)->uid;
}
+static inline int get_cpu_for_acpi_id(u32 uid)
+{
+ int cpu;
+
+ for (cpu = 0; cpu < nr_cpu_ids; cpu++)
+ if (acpi_cpu_get_madt_gicc(cpu) &&
+ uid == get_acpi_id_for_cpu(cpu))
+ return cpu;
+
+ return -EINVAL;
+}
+
static inline void arch_fix_phys_package_id(int num, u32 slot) { }
void __init acpi_init_cpus(void);
int apei_claim_sea(struct pt_regs *regs);
diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h
index 5f172611654b..9e96f024b2f1 100644
--- a/arch/arm64/include/asm/arch_gicv3.h
+++ b/arch/arm64/include/asm/arch_gicv3.h
@@ -175,21 +175,6 @@ static inline bool gic_prio_masking_enabled(void)
static inline void gic_pmr_mask_irqs(void)
{
- BUILD_BUG_ON(GICD_INT_DEF_PRI < (__GIC_PRIO_IRQOFF |
- GIC_PRIO_PSR_I_SET));
- BUILD_BUG_ON(GICD_INT_DEF_PRI >= GIC_PRIO_IRQON);
- /*
- * Need to make sure IRQON allows IRQs when SCR_EL3.FIQ is cleared
- * and non-secure PMR accesses are not subject to the shifts that
- * are applied to IRQ priorities
- */
- BUILD_BUG_ON((0x80 | (GICD_INT_DEF_PRI >> 1)) >= GIC_PRIO_IRQON);
- /*
- * Same situation as above, but now we make sure that we can mask
- * regular interrupts.
- */
- BUILD_BUG_ON((0x80 | (GICD_INT_DEF_PRI >> 1)) < (__GIC_PRIO_IRQOFF_NS |
- GIC_PRIO_PSR_I_SET));
gic_write_pmr(GIC_PRIO_IRQOFF);
}
diff --git a/arch/arm64/include/asm/arch_timer.h b/arch/arm64/include/asm/arch_timer.h
index 934c658ee947..f5794d50f51d 100644
--- a/arch/arm64/include/asm/arch_timer.h
+++ b/arch/arm64/include/asm/arch_timer.h
@@ -15,7 +15,7 @@
#include <linux/bug.h>
#include <linux/init.h>
#include <linux/jump_label.h>
-#include <linux/smp.h>
+#include <linux/percpu.h>
#include <linux/types.h>
#include <clocksource/arm_arch_timer.h>
diff --git a/arch/arm64/include/asm/arm_pmuv3.h b/arch/arm64/include/asm/arm_pmuv3.h
index c27404fa4418..a4697a0b6835 100644
--- a/arch/arm64/include/asm/arm_pmuv3.h
+++ b/arch/arm64/include/asm/arm_pmuv3.h
@@ -6,7 +6,7 @@
#ifndef __ASM_PMUV3_H
#define __ASM_PMUV3_H
-#include <linux/kvm_host.h>
+#include <asm/kvm_host.h>
#include <asm/cpufeature.h>
#include <asm/sysreg.h>
diff --git a/arch/arm64/include/asm/asm-extable.h b/arch/arm64/include/asm/asm-extable.h
index 980d1dd8e1a3..b8a5861dc7b7 100644
--- a/arch/arm64/include/asm/asm-extable.h
+++ b/arch/arm64/include/asm/asm-extable.h
@@ -112,6 +112,9 @@
#define _ASM_EXTABLE_KACCESS_ERR(insn, fixup, err) \
_ASM_EXTABLE_KACCESS_ERR_ZERO(insn, fixup, err, wzr)
+#define _ASM_EXTABLE_KACCESS(insn, fixup) \
+ _ASM_EXTABLE_KACCESS_ERR_ZERO(insn, fixup, wzr, wzr)
+
#define _ASM_EXTABLE_LOAD_UNALIGNED_ZEROPAD(insn, fixup, data, addr) \
__DEFINE_ASM_GPR_NUMS \
__ASM_EXTABLE_RAW(#insn, #fixup, \
diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
index 7529c0263933..a6e5b07b64fd 100644
--- a/arch/arm64/include/asm/cpucaps.h
+++ b/arch/arm64/include/asm/cpucaps.h
@@ -59,7 +59,7 @@ cpucap_is_possible(const unsigned int cap)
case ARM64_WORKAROUND_REPEAT_TLBI:
return IS_ENABLED(CONFIG_ARM64_WORKAROUND_REPEAT_TLBI);
case ARM64_WORKAROUND_SPECULATIVE_SSBS:
- return IS_ENABLED(CONFIG_ARM64_WORKAROUND_SPECULATIVE_SSBS);
+ return IS_ENABLED(CONFIG_ARM64_ERRATUM_3194386);
}
return true;
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 8b904a757bd3..558434267271 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -588,14 +588,14 @@ static inline bool id_aa64pfr0_32bit_el1(u64 pfr0)
{
u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_EL1_SHIFT);
- return val == ID_AA64PFR0_EL1_ELx_32BIT_64BIT;
+ return val == ID_AA64PFR0_EL1_EL1_AARCH32;
}
static inline bool id_aa64pfr0_32bit_el0(u64 pfr0)
{
u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_EL0_SHIFT);
- return val == ID_AA64PFR0_EL1_ELx_32BIT_64BIT;
+ return val == ID_AA64PFR0_EL1_EL0_AARCH32;
}
static inline bool id_aa64pfr0_sve(u64 pfr0)
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
index 7b32b99023a2..1cb0704c6163 100644
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -86,9 +86,12 @@
#define ARM_CPU_PART_CORTEX_X2 0xD48
#define ARM_CPU_PART_NEOVERSE_N2 0xD49
#define ARM_CPU_PART_CORTEX_A78C 0xD4B
+#define ARM_CPU_PART_CORTEX_X3 0xD4E
#define ARM_CPU_PART_NEOVERSE_V2 0xD4F
+#define ARM_CPU_PART_CORTEX_A720 0xD81
#define ARM_CPU_PART_CORTEX_X4 0xD82
#define ARM_CPU_PART_NEOVERSE_V3 0xD84
+#define ARM_CPU_PART_CORTEX_X925 0xD85
#define APM_CPU_PART_XGENE 0x000
#define APM_CPU_VAR_POTENZA 0x00
@@ -162,9 +165,12 @@
#define MIDR_CORTEX_X2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X2)
#define MIDR_NEOVERSE_N2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N2)
#define MIDR_CORTEX_A78C MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78C)
+#define MIDR_CORTEX_X3 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X3)
#define MIDR_NEOVERSE_V2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V2)
+#define MIDR_CORTEX_A720 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A720)
#define MIDR_CORTEX_X4 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X4)
#define MIDR_NEOVERSE_V3 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V3)
+#define MIDR_CORTEX_X925 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X925)
#define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
#define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
#define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX)
diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h
index e4546b29dd0c..fd87c4b8f984 100644
--- a/arch/arm64/include/asm/el2_setup.h
+++ b/arch/arm64/include/asm/el2_setup.h
@@ -146,7 +146,7 @@
/* Coprocessor traps */
.macro __init_el2_cptr
__check_hvhe .LnVHE_\@, x1
- mov x0, #(CPACR_EL1_FPEN_EL1EN | CPACR_EL1_FPEN_EL0EN)
+ mov x0, #CPACR_ELx_FPEN
msr cpacr_el1, x0
b .Lskip_set_cptr_\@
.LnVHE_\@:
@@ -277,7 +277,7 @@
// (h)VHE case
mrs x0, cpacr_el1 // Disable SVE traps
- orr x0, x0, #(CPACR_EL1_ZEN_EL1EN | CPACR_EL1_ZEN_EL0EN)
+ orr x0, x0, #CPACR_ELx_ZEN
msr cpacr_el1, x0
b .Lskip_set_cptr_\@
@@ -298,7 +298,7 @@
// (h)VHE case
mrs x0, cpacr_el1 // Disable SME traps
- orr x0, x0, #(CPACR_EL1_SMEN_EL0EN | CPACR_EL1_SMEN_EL1EN)
+ orr x0, x0, #CPACR_ELx_SMEN
msr cpacr_el1, x0
b .Lskip_set_cptr_sme_\@
diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
index 7abf09df7033..3f482500f71f 100644
--- a/arch/arm64/include/asm/esr.h
+++ b/arch/arm64/include/asm/esr.h
@@ -121,6 +121,14 @@
#define ESR_ELx_FSC_SECC (0x18)
#define ESR_ELx_FSC_SECC_TTW(n) (0x1c + (n))
+/* Status codes for individual page table levels */
+#define ESR_ELx_FSC_ACCESS_L(n) (ESR_ELx_FSC_ACCESS + n)
+#define ESR_ELx_FSC_PERM_L(n) (ESR_ELx_FSC_PERM + n)
+
+#define ESR_ELx_FSC_FAULT_nL (0x2C)
+#define ESR_ELx_FSC_FAULT_L(n) (((n) < 0 ? ESR_ELx_FSC_FAULT_nL : \
+ ESR_ELx_FSC_FAULT) + (n))
+
/* ISS field definitions for Data Aborts */
#define ESR_ELx_ISV_SHIFT (24)
#define ESR_ELx_ISV (UL(1) << ESR_ELx_ISV_SHIFT)
@@ -388,20 +396,33 @@ static inline bool esr_is_data_abort(unsigned long esr)
static inline bool esr_fsc_is_translation_fault(unsigned long esr)
{
- /* Translation fault, level -1 */
- if ((esr & ESR_ELx_FSC) == 0b101011)
- return true;
- return (esr & ESR_ELx_FSC_TYPE) == ESR_ELx_FSC_FAULT;
+ esr = esr & ESR_ELx_FSC;
+
+ return (esr == ESR_ELx_FSC_FAULT_L(3)) ||
+ (esr == ESR_ELx_FSC_FAULT_L(2)) ||
+ (esr == ESR_ELx_FSC_FAULT_L(1)) ||
+ (esr == ESR_ELx_FSC_FAULT_L(0)) ||
+ (esr == ESR_ELx_FSC_FAULT_L(-1));
}
static inline bool esr_fsc_is_permission_fault(unsigned long esr)
{
- return (esr & ESR_ELx_FSC_TYPE) == ESR_ELx_FSC_PERM;
+ esr = esr & ESR_ELx_FSC;
+
+ return (esr == ESR_ELx_FSC_PERM_L(3)) ||
+ (esr == ESR_ELx_FSC_PERM_L(2)) ||
+ (esr == ESR_ELx_FSC_PERM_L(1)) ||
+ (esr == ESR_ELx_FSC_PERM_L(0));
}
static inline bool esr_fsc_is_access_flag_fault(unsigned long esr)
{
- return (esr & ESR_ELx_FSC_TYPE) == ESR_ELx_FSC_ACCESS;
+ esr = esr & ESR_ELx_FSC;
+
+ return (esr == ESR_ELx_FSC_ACCESS_L(3)) ||
+ (esr == ESR_ELx_FSC_ACCESS_L(2)) ||
+ (esr == ESR_ELx_FSC_ACCESS_L(1)) ||
+ (esr == ESR_ELx_FSC_ACCESS_L(0));
}
/* Indicate whether ESR.EC==0x1A is for an ERETAx instruction */
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
index 4ff0ae3f6d66..41fd90895dfc 100644
--- a/arch/arm64/include/asm/io.h
+++ b/arch/arm64/include/asm/io.h
@@ -153,8 +153,9 @@ extern void __memset_io(volatile void __iomem *, int, size_t);
* emit the large TLP from the CPU.
*/
-static inline void __const_memcpy_toio_aligned32(volatile u32 __iomem *to,
- const u32 *from, size_t count)
+static __always_inline void
+__const_memcpy_toio_aligned32(volatile u32 __iomem *to, const u32 *from,
+ size_t count)
{
switch (count) {
case 8:
@@ -196,24 +197,22 @@ static inline void __const_memcpy_toio_aligned32(volatile u32 __iomem *to,
void __iowrite32_copy_full(void __iomem *to, const void *from, size_t count);
-static inline void __const_iowrite32_copy(void __iomem *to, const void *from,
- size_t count)
+static __always_inline void
+__iowrite32_copy(void __iomem *to, const void *from, size_t count)
{
- if (count == 8 || count == 4 || count == 2 || count == 1) {
+ if (__builtin_constant_p(count) &&
+ (count == 8 || count == 4 || count == 2 || count == 1)) {
__const_memcpy_toio_aligned32(to, from, count);
dgh();
} else {
__iowrite32_copy_full(to, from, count);
}
}
+#define __iowrite32_copy __iowrite32_copy
-#define __iowrite32_copy(to, from, count) \
- (__builtin_constant_p(count) ? \
- __const_iowrite32_copy(to, from, count) : \
- __iowrite32_copy_full(to, from, count))
-
-static inline void __const_memcpy_toio_aligned64(volatile u64 __iomem *to,
- const u64 *from, size_t count)
+static __always_inline void
+__const_memcpy_toio_aligned64(volatile u64 __iomem *to, const u64 *from,
+ size_t count)
{
switch (count) {
case 8:
@@ -255,21 +254,18 @@ static inline void __const_memcpy_toio_aligned64(volatile u64 __iomem *to,
void __iowrite64_copy_full(void __iomem *to, const void *from, size_t count);
-static inline void __const_iowrite64_copy(void __iomem *to, const void *from,
- size_t count)
+static __always_inline void
+__iowrite64_copy(void __iomem *to, const void *from, size_t count)
{
- if (count == 8 || count == 4 || count == 2 || count == 1) {
+ if (__builtin_constant_p(count) &&
+ (count == 8 || count == 4 || count == 2 || count == 1)) {
__const_memcpy_toio_aligned64(to, from, count);
dgh();
} else {
__iowrite64_copy_full(to, from, count);
}
}
-
-#define __iowrite64_copy(to, from, count) \
- (__builtin_constant_p(count) ? \
- __const_iowrite64_copy(to, from, count) : \
- __iowrite64_copy_full(to, from, count))
+#define __iowrite64_copy __iowrite64_copy
/*
* I/O memory mapping functions.
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index e01bb5ca13b7..b2adc2c6c82a 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -305,6 +305,12 @@
GENMASK(19, 14) | \
BIT(11))
+#define CPTR_VHE_EL2_RES0 (GENMASK(63, 32) | \
+ GENMASK(27, 26) | \
+ GENMASK(23, 22) | \
+ GENMASK(19, 18) | \
+ GENMASK(15, 0))
+
/* Hyp Debug Configuration Register bits */
#define MDCR_EL2_E2TB_MASK (UL(0x3))
#define MDCR_EL2_E2TB_SHIFT (UL(24))
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index 501e3e019c93..21650e7924d4 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -557,6 +557,68 @@ static __always_inline void kvm_incr_pc(struct kvm_vcpu *vcpu)
vcpu_set_flag((v), e); \
} while (0)
+#define __build_check_all_or_none(r, bits) \
+ BUILD_BUG_ON(((r) & (bits)) && ((r) & (bits)) != (bits))
+
+#define __cpacr_to_cptr_clr(clr, set) \
+ ({ \
+ u64 cptr = 0; \
+ \
+ if ((set) & CPACR_ELx_FPEN) \
+ cptr |= CPTR_EL2_TFP; \
+ if ((set) & CPACR_ELx_ZEN) \
+ cptr |= CPTR_EL2_TZ; \
+ if ((set) & CPACR_ELx_SMEN) \
+ cptr |= CPTR_EL2_TSM; \
+ if ((clr) & CPACR_ELx_TTA) \
+ cptr |= CPTR_EL2_TTA; \
+ if ((clr) & CPTR_EL2_TAM) \
+ cptr |= CPTR_EL2_TAM; \
+ if ((clr) & CPTR_EL2_TCPAC) \
+ cptr |= CPTR_EL2_TCPAC; \
+ \
+ cptr; \
+ })
+
+#define __cpacr_to_cptr_set(clr, set) \
+ ({ \
+ u64 cptr = 0; \
+ \
+ if ((clr) & CPACR_ELx_FPEN) \
+ cptr |= CPTR_EL2_TFP; \
+ if ((clr) & CPACR_ELx_ZEN) \
+ cptr |= CPTR_EL2_TZ; \
+ if ((clr) & CPACR_ELx_SMEN) \
+ cptr |= CPTR_EL2_TSM; \
+ if ((set) & CPACR_ELx_TTA) \
+ cptr |= CPTR_EL2_TTA; \
+ if ((set) & CPTR_EL2_TAM) \
+ cptr |= CPTR_EL2_TAM; \
+ if ((set) & CPTR_EL2_TCPAC) \
+ cptr |= CPTR_EL2_TCPAC; \
+ \
+ cptr; \
+ })
+
+#define cpacr_clear_set(clr, set) \
+ do { \
+ BUILD_BUG_ON((set) & CPTR_VHE_EL2_RES0); \
+ BUILD_BUG_ON((clr) & CPACR_ELx_E0POE); \
+ __build_check_all_or_none((clr), CPACR_ELx_FPEN); \
+ __build_check_all_or_none((set), CPACR_ELx_FPEN); \
+ __build_check_all_or_none((clr), CPACR_ELx_ZEN); \
+ __build_check_all_or_none((set), CPACR_ELx_ZEN); \
+ __build_check_all_or_none((clr), CPACR_ELx_SMEN); \
+ __build_check_all_or_none((set), CPACR_ELx_SMEN); \
+ \
+ if (has_vhe() || has_hvhe()) \
+ sysreg_clear_set(cpacr_el1, clr, set); \
+ else \
+ sysreg_clear_set(cptr_el2, \
+ __cpacr_to_cptr_clr(clr, set), \
+ __cpacr_to_cptr_set(clr, set));\
+ } while (0)
+
static __always_inline void kvm_write_cptr_el2(u64 val)
{
if (has_vhe() || has_hvhe())
@@ -570,17 +632,16 @@ static __always_inline u64 kvm_get_reset_cptr_el2(struct kvm_vcpu *vcpu)
u64 val;
if (has_vhe()) {
- val = (CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN |
- CPACR_EL1_ZEN_EL1EN);
+ val = (CPACR_ELx_FPEN | CPACR_EL1_ZEN_EL1EN);
if (cpus_have_final_cap(ARM64_SME))
val |= CPACR_EL1_SMEN_EL1EN;
} else if (has_hvhe()) {
- val = (CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN);
+ val = CPACR_ELx_FPEN;
if (!vcpu_has_sve(vcpu) || !guest_owns_fp_regs())
- val |= CPACR_EL1_ZEN_EL1EN | CPACR_EL1_ZEN_EL0EN;
+ val |= CPACR_ELx_ZEN;
if (cpus_have_final_cap(ARM64_SME))
- val |= CPACR_EL1_SMEN_EL1EN | CPACR_EL1_SMEN_EL0EN;
+ val |= CPACR_ELx_SMEN;
} else {
val = CPTR_NVHE_EL2_RES1;
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 8170c04fde91..36b8e97bf49e 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -76,6 +76,7 @@ static inline enum kvm_mode kvm_get_mode(void) { return KVM_MODE_NONE; };
DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
extern unsigned int __ro_after_init kvm_sve_max_vl;
+extern unsigned int __ro_after_init kvm_host_sve_max_vl;
int __init kvm_arm_init_sve(void);
u32 __attribute_const__ kvm_target_cpu(void);
@@ -521,6 +522,20 @@ struct kvm_cpu_context {
u64 *vncr_array;
};
+struct cpu_sve_state {
+ __u64 zcr_el1;
+
+ /*
+ * Ordering is important since __sve_save_state/__sve_restore_state
+ * relies on it.
+ */
+ __u32 fpsr;
+ __u32 fpcr;
+
+ /* Must be SVE_VQ_BYTES (128 bit) aligned. */
+ __u8 sve_regs[];
+};
+
/*
* This structure is instantiated on a per-CPU basis, and contains
* data that is:
@@ -534,7 +549,15 @@ struct kvm_cpu_context {
*/
struct kvm_host_data {
struct kvm_cpu_context host_ctxt;
- struct user_fpsimd_state *fpsimd_state; /* hyp VA */
+
+ /*
+ * All pointers in this union are hyp VA.
+ * sve_state is only used in pKVM and if system_supports_sve().
+ */
+ union {
+ struct user_fpsimd_state *fpsimd_state;
+ struct cpu_sve_state *sve_state;
+ };
/* Ownership of the FP regs */
enum {
diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h
index 3e80464f8953..b05bceca3385 100644
--- a/arch/arm64/include/asm/kvm_hyp.h
+++ b/arch/arm64/include/asm/kvm_hyp.h
@@ -111,7 +111,8 @@ void __debug_restore_host_buffers_nvhe(struct kvm_vcpu *vcpu);
void __fpsimd_save_state(struct user_fpsimd_state *fp_regs);
void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs);
-void __sve_restore_state(void *sve_pffr, u32 *fpsr);
+void __sve_save_state(void *sve_pffr, u32 *fpsr, int save_ffr);
+void __sve_restore_state(void *sve_pffr, u32 *fpsr, int restore_ffr);
u64 __guest_enter(struct kvm_vcpu *vcpu);
@@ -142,5 +143,6 @@ extern u64 kvm_nvhe_sym(id_aa64smfr0_el1_sys_val);
extern unsigned long kvm_nvhe_sym(__icache_flags);
extern unsigned int kvm_nvhe_sym(kvm_arm_vmid_bits);
+extern unsigned int kvm_nvhe_sym(kvm_host_sve_max_vl);
#endif /* __ARM64_KVM_HYP_H__ */
diff --git a/arch/arm64/include/asm/kvm_pkvm.h b/arch/arm64/include/asm/kvm_pkvm.h
index ad9cfb5c1ff4..cd56acd9a842 100644
--- a/arch/arm64/include/asm/kvm_pkvm.h
+++ b/arch/arm64/include/asm/kvm_pkvm.h
@@ -128,4 +128,13 @@ static inline unsigned long hyp_ffa_proxy_pages(void)
return (2 * KVM_FFA_MBOX_NR_PAGES) + DIV_ROUND_UP(desc_max, PAGE_SIZE);
}
+static inline size_t pkvm_host_sve_state_size(void)
+{
+ if (!system_supports_sve())
+ return 0;
+
+ return size_add(sizeof(struct cpu_sve_state),
+ SVE_SIG_REGS_SIZE(sve_vq_from_vl(kvm_host_sve_max_vl)));
+}
+
#endif /* __ARM64_KVM_PKVM_H__ */
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
index c768d16b81a4..bd19f4c758b7 100644
--- a/arch/arm64/include/asm/mmu_context.h
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -72,11 +72,11 @@ static inline void __cpu_set_tcr_t0sz(unsigned long t0sz)
{
unsigned long tcr = read_sysreg(tcr_el1);
- if ((tcr & TCR_T0SZ_MASK) >> TCR_T0SZ_OFFSET == t0sz)
+ if ((tcr & TCR_T0SZ_MASK) == t0sz)
return;
tcr &= ~TCR_T0SZ_MASK;
- tcr |= t0sz << TCR_T0SZ_OFFSET;
+ tcr |= t0sz;
write_sysreg(tcr, tcr_el1);
isb();
}
diff --git a/arch/arm64/include/asm/mte.h b/arch/arm64/include/asm/mte.h
index 91fbd5c8a391..0f84518632b4 100644
--- a/arch/arm64/include/asm/mte.h
+++ b/arch/arm64/include/asm/mte.h
@@ -182,7 +182,7 @@ void mte_check_tfsr_el1(void);
static inline void mte_check_tfsr_entry(void)
{
- if (!system_supports_mte())
+ if (!kasan_hw_tags_enabled())
return;
mte_check_tfsr_el1();
@@ -190,7 +190,7 @@ static inline void mte_check_tfsr_entry(void)
static inline void mte_check_tfsr_exit(void)
{
- if (!system_supports_mte())
+ if (!kasan_hw_tags_enabled())
return;
/*
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
index 9943ff0af4c9..1f60aa1bc750 100644
--- a/arch/arm64/include/asm/pgtable-hwdef.h
+++ b/arch/arm64/include/asm/pgtable-hwdef.h
@@ -170,6 +170,7 @@
#define PTE_CONT (_AT(pteval_t, 1) << 52) /* Contiguous range */
#define PTE_PXN (_AT(pteval_t, 1) << 53) /* Privileged XN */
#define PTE_UXN (_AT(pteval_t, 1) << 54) /* User XN */
+#define PTE_SWBITS_MASK _AT(pteval_t, (BIT(63) | GENMASK(58, 55)))
#define PTE_ADDR_LOW (((_AT(pteval_t, 1) << (50 - PAGE_SHIFT)) - 1) << PAGE_SHIFT)
#ifdef CONFIG_ARM64_PA_BITS_52
diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h
index 47ec58031f11..0abe975d68a8 100644
--- a/arch/arm64/include/asm/ptrace.h
+++ b/arch/arm64/include/asm/ptrace.h
@@ -21,35 +21,12 @@
#define INIT_PSTATE_EL2 \
(PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT | PSR_MODE_EL2h)
-/*
- * PMR values used to mask/unmask interrupts.
- *
- * GIC priority masking works as follows: if an IRQ's priority is a higher value
- * than the value held in PMR, that IRQ is masked. Lowering the value of PMR
- * means masking more IRQs (or at least that the same IRQs remain masked).
- *
- * To mask interrupts, we clear the most significant bit of PMR.
- *
- * Some code sections either automatically switch back to PSR.I or explicitly
- * require to not use priority masking. If bit GIC_PRIO_PSR_I_SET is included
- * in the priority mask, it indicates that PSR.I should be set and
- * interrupt disabling temporarily does not rely on IRQ priorities.
- */
-#define GIC_PRIO_IRQON 0xe0
-#define __GIC_PRIO_IRQOFF (GIC_PRIO_IRQON & ~0x80)
-#define __GIC_PRIO_IRQOFF_NS 0xa0
-#define GIC_PRIO_PSR_I_SET (1 << 4)
-
-#define GIC_PRIO_IRQOFF \
- ({ \
- extern struct static_key_false gic_nonsecure_priorities;\
- u8 __prio = __GIC_PRIO_IRQOFF; \
- \
- if (static_branch_unlikely(&gic_nonsecure_priorities)) \
- __prio = __GIC_PRIO_IRQOFF_NS; \
- \
- __prio; \
- })
+#include <linux/irqchip/arm-gic-v3-prio.h>
+
+#define GIC_PRIO_IRQON GICV3_PRIO_UNMASKED
+#define GIC_PRIO_IRQOFF GICV3_PRIO_IRQ
+
+#define GIC_PRIO_PSR_I_SET GICV3_PRIO_PSR_I_SET
/* Additional SPSR bits not exposed in the UABI */
#define PSR_MODE_THREAD_BIT (1 << 0)
diff --git a/arch/arm64/include/asm/runtime-const.h b/arch/arm64/include/asm/runtime-const.h
new file mode 100644
index 000000000000..be5915669d23
--- /dev/null
+++ b/arch/arm64/include/asm/runtime-const.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_RUNTIME_CONST_H
+#define _ASM_RUNTIME_CONST_H
+
+#include <asm/cacheflush.h>
+
+/* Sigh. You can still run arm64 in BE mode */
+#include <asm/byteorder.h>
+
+#define runtime_const_ptr(sym) ({ \
+ typeof(sym) __ret; \
+ asm_inline("1:\t" \
+ "movz %0, #0xcdef\n\t" \
+ "movk %0, #0x89ab, lsl #16\n\t" \
+ "movk %0, #0x4567, lsl #32\n\t" \
+ "movk %0, #0x0123, lsl #48\n\t" \
+ ".pushsection runtime_ptr_" #sym ",\"a\"\n\t" \
+ ".long 1b - .\n\t" \
+ ".popsection" \
+ :"=r" (__ret)); \
+ __ret; })
+
+#define runtime_const_shift_right_32(val, sym) ({ \
+ unsigned long __ret; \
+ asm_inline("1:\t" \
+ "lsr %w0,%w1,#12\n\t" \
+ ".pushsection runtime_shift_" #sym ",\"a\"\n\t" \
+ ".long 1b - .\n\t" \
+ ".popsection" \
+ :"=r" (__ret) \
+ :"r" (0u+(val))); \
+ __ret; })
+
+#define runtime_const_init(type, sym) do { \
+ extern s32 __start_runtime_##type##_##sym[]; \
+ extern s32 __stop_runtime_##type##_##sym[]; \
+ runtime_const_fixup(__runtime_fixup_##type, \
+ (unsigned long)(sym), \
+ __start_runtime_##type##_##sym, \
+ __stop_runtime_##type##_##sym); \
+} while (0)
+
+/* 16-bit immediate for wide move (movz and movk) in bits 5..20 */
+static inline void __runtime_fixup_16(__le32 *p, unsigned int val)
+{
+ u32 insn = le32_to_cpu(*p);
+ insn &= 0xffe0001f;
+ insn |= (val & 0xffff) << 5;
+ *p = cpu_to_le32(insn);
+}
+
+static inline void __runtime_fixup_caches(void *where, unsigned int insns)
+{
+ unsigned long va = (unsigned long)where;
+ caches_clean_inval_pou(va, va + 4*insns);
+}
+
+static inline void __runtime_fixup_ptr(void *where, unsigned long val)
+{
+ __le32 *p = lm_alias(where);
+ __runtime_fixup_16(p, val);
+ __runtime_fixup_16(p+1, val >> 16);
+ __runtime_fixup_16(p+2, val >> 32);
+ __runtime_fixup_16(p+3, val >> 48);
+ __runtime_fixup_caches(where, 4);
+}
+
+/* Immediate value is 6 bits starting at bit #16 */
+static inline void __runtime_fixup_shift(void *where, unsigned long val)
+{
+ __le32 *p = lm_alias(where);
+ u32 insn = le32_to_cpu(*p);
+ insn &= 0xffc0ffff;
+ insn |= (val & 63) << 16;
+ *p = cpu_to_le32(insn);
+ __runtime_fixup_caches(where, 1);
+}
+
+static inline void runtime_const_fixup(void (*fn)(void *, unsigned long),
+ unsigned long val, s32 *start, s32 *end)
+{
+ while (start < end) {
+ fn(*start + (void *)start, val);
+ start++;
+ }
+}
+
+#endif
diff --git a/arch/arm64/include/asm/seccomp.h b/arch/arm64/include/asm/seccomp.h
index 30256233788b..b83975555314 100644
--- a/arch/arm64/include/asm/seccomp.h
+++ b/arch/arm64/include/asm/seccomp.h
@@ -8,13 +8,13 @@
#ifndef _ASM_SECCOMP_H
#define _ASM_SECCOMP_H
-#include <asm/unistd.h>
+#include <asm/unistd_compat_32.h>
#ifdef CONFIG_COMPAT
-#define __NR_seccomp_read_32 __NR_compat_read
-#define __NR_seccomp_write_32 __NR_compat_write
-#define __NR_seccomp_exit_32 __NR_compat_exit
-#define __NR_seccomp_sigreturn_32 __NR_compat_rt_sigreturn
+#define __NR_seccomp_read_32 __NR_compat32_read
+#define __NR_seccomp_write_32 __NR_compat32_write
+#define __NR_seccomp_exit_32 __NR_compat32_exit
+#define __NR_seccomp_sigreturn_32 __NR_compat32_rt_sigreturn
#endif /* CONFIG_COMPAT */
#include <asm-generic/seccomp.h>
@@ -23,8 +23,9 @@
#define SECCOMP_ARCH_NATIVE_NR NR_syscalls
#define SECCOMP_ARCH_NATIVE_NAME "aarch64"
#ifdef CONFIG_COMPAT
+#include <asm/unistd_compat_32.h>
# define SECCOMP_ARCH_COMPAT AUDIT_ARCH_ARM
-# define SECCOMP_ARCH_COMPAT_NR __NR_compat_syscalls
+# define SECCOMP_ARCH_COMPAT_NR __NR_compat32_syscalls
# define SECCOMP_ARCH_COMPAT_NAME "arm"
#endif
diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h
index efb13112b408..2510eec026f7 100644
--- a/arch/arm64/include/asm/smp.h
+++ b/arch/arm64/include/asm/smp.h
@@ -25,22 +25,11 @@
#ifndef __ASSEMBLY__
-#include <asm/percpu.h>
-
#include <linux/threads.h>
#include <linux/cpumask.h>
#include <linux/thread_info.h>
-DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
-
-/*
- * We don't use this_cpu_read(cpu_number) as that has implicit writes to
- * preempt_count, and associated (compiler) barriers, that we'd like to avoid
- * the expense of. If we're preemptible, the value can be stale at use anyway.
- * And we can't use this_cpu_ptr() either, as that winds up recursing back
- * here under CONFIG_DEBUG_PREEMPT=y.
- */
-#define raw_smp_processor_id() (*raw_cpu_ptr(&cpu_number))
+#define raw_smp_processor_id() (current_thread_info()->cpu)
/*
* Logical CPU mapping.
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index af3b206fa423..1b6e436dbb55 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -872,10 +872,6 @@
/* Position the attr at the correct index */
#define MAIR_ATTRIDX(attr, idx) ((attr) << ((idx) * 8))
-/* id_aa64pfr0 */
-#define ID_AA64PFR0_EL1_ELx_64BIT_ONLY 0x1
-#define ID_AA64PFR0_EL1_ELx_32BIT_64BIT 0x2
-
/* id_aa64mmfr0 */
#define ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MIN 0x0
#define ID_AA64MMFR0_EL1_TGRAN4_LPA2 ID_AA64MMFR0_EL1_TGRAN4_52_BIT
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 14be5000c5a0..28f665e0975a 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -184,29 +184,40 @@ static inline void __user *__uaccess_mask_ptr(const void __user *ptr)
* The "__xxx_error" versions set the third argument to -EFAULT if an error
* occurs, and leave it unchanged on success.
*/
-#define __get_mem_asm(load, reg, x, addr, err, type) \
+#ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
+#define __get_mem_asm(load, reg, x, addr, label, type) \
+ asm_goto_output( \
+ "1: " load " " reg "0, [%1]\n" \
+ _ASM_EXTABLE_##type##ACCESS_ERR(1b, %l2, %w0) \
+ : "=r" (x) \
+ : "r" (addr) : : label)
+#else
+#define __get_mem_asm(load, reg, x, addr, label, type) do { \
+ int __gma_err = 0; \
asm volatile( \
"1: " load " " reg "1, [%2]\n" \
"2:\n" \
_ASM_EXTABLE_##type##ACCESS_ERR_ZERO(1b, 2b, %w0, %w1) \
- : "+r" (err), "=r" (x) \
- : "r" (addr))
+ : "+r" (__gma_err), "=r" (x) \
+ : "r" (addr)); \
+ if (__gma_err) goto label; } while (0)
+#endif
-#define __raw_get_mem(ldr, x, ptr, err, type) \
+#define __raw_get_mem(ldr, x, ptr, label, type) \
do { \
unsigned long __gu_val; \
switch (sizeof(*(ptr))) { \
case 1: \
- __get_mem_asm(ldr "b", "%w", __gu_val, (ptr), (err), type); \
+ __get_mem_asm(ldr "b", "%w", __gu_val, (ptr), label, type); \
break; \
case 2: \
- __get_mem_asm(ldr "h", "%w", __gu_val, (ptr), (err), type); \
+ __get_mem_asm(ldr "h", "%w", __gu_val, (ptr), label, type); \
break; \
case 4: \
- __get_mem_asm(ldr, "%w", __gu_val, (ptr), (err), type); \
+ __get_mem_asm(ldr, "%w", __gu_val, (ptr), label, type); \
break; \
case 8: \
- __get_mem_asm(ldr, "%x", __gu_val, (ptr), (err), type); \
+ __get_mem_asm(ldr, "%x", __gu_val, (ptr), label, type); \
break; \
default: \
BUILD_BUG(); \
@@ -219,27 +230,34 @@ do { \
* uaccess_ttbr0_disable(). As `x` and `ptr` could contain blocking functions,
* we must evaluate these outside of the critical section.
*/
-#define __raw_get_user(x, ptr, err) \
+#define __raw_get_user(x, ptr, label) \
do { \
__typeof__(*(ptr)) __user *__rgu_ptr = (ptr); \
__typeof__(x) __rgu_val; \
__chk_user_ptr(ptr); \
- \
- uaccess_ttbr0_enable(); \
- __raw_get_mem("ldtr", __rgu_val, __rgu_ptr, err, U); \
- uaccess_ttbr0_disable(); \
- \
- (x) = __rgu_val; \
+ do { \
+ __label__ __rgu_failed; \
+ uaccess_ttbr0_enable(); \
+ __raw_get_mem("ldtr", __rgu_val, __rgu_ptr, __rgu_failed, U); \
+ uaccess_ttbr0_disable(); \
+ (x) = __rgu_val; \
+ break; \
+ __rgu_failed: \
+ uaccess_ttbr0_disable(); \
+ goto label; \
+ } while (0); \
} while (0)
#define __get_user_error(x, ptr, err) \
do { \
+ __label__ __gu_failed; \
__typeof__(*(ptr)) __user *__p = (ptr); \
might_fault(); \
if (access_ok(__p, sizeof(*__p))) { \
__p = uaccess_mask_ptr(__p); \
- __raw_get_user((x), __p, (err)); \
+ __raw_get_user((x), __p, __gu_failed); \
} else { \
+ __gu_failed: \
(x) = (__force __typeof__(x))0; (err) = -EFAULT; \
} \
} while (0)
@@ -262,40 +280,42 @@ do { \
do { \
__typeof__(dst) __gkn_dst = (dst); \
__typeof__(src) __gkn_src = (src); \
- int __gkn_err = 0; \
- \
- __mte_enable_tco_async(); \
- __raw_get_mem("ldr", *((type *)(__gkn_dst)), \
- (__force type *)(__gkn_src), __gkn_err, K); \
- __mte_disable_tco_async(); \
+ do { \
+ __label__ __gkn_label; \
\
- if (unlikely(__gkn_err)) \
+ __mte_enable_tco_async(); \
+ __raw_get_mem("ldr", *((type *)(__gkn_dst)), \
+ (__force type *)(__gkn_src), __gkn_label, K); \
+ __mte_disable_tco_async(); \
+ break; \
+ __gkn_label: \
+ __mte_disable_tco_async(); \
goto err_label; \
+ } while (0); \
} while (0)
-#define __put_mem_asm(store, reg, x, addr, err, type) \
- asm volatile( \
- "1: " store " " reg "1, [%2]\n" \
+#define __put_mem_asm(store, reg, x, addr, label, type) \
+ asm goto( \
+ "1: " store " " reg "0, [%1]\n" \
"2:\n" \
- _ASM_EXTABLE_##type##ACCESS_ERR(1b, 2b, %w0) \
- : "+r" (err) \
- : "rZ" (x), "r" (addr))
+ _ASM_EXTABLE_##type##ACCESS(1b, %l2) \
+ : : "rZ" (x), "r" (addr) : : label)
-#define __raw_put_mem(str, x, ptr, err, type) \
+#define __raw_put_mem(str, x, ptr, label, type) \
do { \
__typeof__(*(ptr)) __pu_val = (x); \
switch (sizeof(*(ptr))) { \
case 1: \
- __put_mem_asm(str "b", "%w", __pu_val, (ptr), (err), type); \
+ __put_mem_asm(str "b", "%w", __pu_val, (ptr), label, type); \
break; \
case 2: \
- __put_mem_asm(str "h", "%w", __pu_val, (ptr), (err), type); \
+ __put_mem_asm(str "h", "%w", __pu_val, (ptr), label, type); \
break; \
case 4: \
- __put_mem_asm(str, "%w", __pu_val, (ptr), (err), type); \
+ __put_mem_asm(str, "%w", __pu_val, (ptr), label, type); \
break; \
case 8: \
- __put_mem_asm(str, "%x", __pu_val, (ptr), (err), type); \
+ __put_mem_asm(str, "%x", __pu_val, (ptr), label, type); \
break; \
default: \
BUILD_BUG(); \
@@ -307,25 +327,34 @@ do { \
* uaccess_ttbr0_disable(). As `x` and `ptr` could contain blocking functions,
* we must evaluate these outside of the critical section.
*/
-#define __raw_put_user(x, ptr, err) \
+#define __raw_put_user(x, ptr, label) \
do { \
+ __label__ __rpu_failed; \
__typeof__(*(ptr)) __user *__rpu_ptr = (ptr); \
__typeof__(*(ptr)) __rpu_val = (x); \
__chk_user_ptr(__rpu_ptr); \
\
- uaccess_ttbr0_enable(); \
- __raw_put_mem("sttr", __rpu_val, __rpu_ptr, err, U); \
- uaccess_ttbr0_disable(); \
+ do { \
+ uaccess_ttbr0_enable(); \
+ __raw_put_mem("sttr", __rpu_val, __rpu_ptr, __rpu_failed, U); \
+ uaccess_ttbr0_disable(); \
+ break; \
+ __rpu_failed: \
+ uaccess_ttbr0_disable(); \
+ goto label; \
+ } while (0); \
} while (0)
#define __put_user_error(x, ptr, err) \
do { \
+ __label__ __pu_failed; \
__typeof__(*(ptr)) __user *__p = (ptr); \
might_fault(); \
if (access_ok(__p, sizeof(*__p))) { \
__p = uaccess_mask_ptr(__p); \
- __raw_put_user((x), __p, (err)); \
+ __raw_put_user((x), __p, __pu_failed); \
} else { \
+ __pu_failed: \
(err) = -EFAULT; \
} \
} while (0)
@@ -348,15 +377,18 @@ do { \
do { \
__typeof__(dst) __pkn_dst = (dst); \
__typeof__(src) __pkn_src = (src); \
- int __pkn_err = 0; \
\
- __mte_enable_tco_async(); \
- __raw_put_mem("str", *((type *)(__pkn_src)), \
- (__force type *)(__pkn_dst), __pkn_err, K); \
- __mte_disable_tco_async(); \
- \
- if (unlikely(__pkn_err)) \
+ do { \
+ __label__ __pkn_err; \
+ __mte_enable_tco_async(); \
+ __raw_put_mem("str", *((type *)(__pkn_src)), \
+ (__force type *)(__pkn_dst), __pkn_err, K); \
+ __mte_disable_tco_async(); \
+ break; \
+ __pkn_err: \
+ __mte_disable_tco_async(); \
goto err_label; \
+ } while (0); \
} while(0)
extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n);
@@ -381,6 +413,51 @@ extern unsigned long __must_check __arch_copy_to_user(void __user *to, const voi
__actu_ret; \
})
+static __must_check __always_inline bool user_access_begin(const void __user *ptr, size_t len)
+{
+ if (unlikely(!access_ok(ptr,len)))
+ return 0;
+ uaccess_ttbr0_enable();
+ return 1;
+}
+#define user_access_begin(a,b) user_access_begin(a,b)
+#define user_access_end() uaccess_ttbr0_disable()
+#define unsafe_put_user(x, ptr, label) \
+ __raw_put_mem("sttr", x, uaccess_mask_ptr(ptr), label, U)
+#define unsafe_get_user(x, ptr, label) \
+ __raw_get_mem("ldtr", x, uaccess_mask_ptr(ptr), label, U)
+
+/*
+ * KCSAN uses these to save and restore ttbr state.
+ * We do not support KCSAN with ARM64_SW_TTBR0_PAN, so
+ * they are no-ops.
+ */
+static inline unsigned long user_access_save(void) { return 0; }
+static inline void user_access_restore(unsigned long enabled) { }
+
+/*
+ * We want the unsafe accessors to always be inlined and use
+ * the error labels - thus the macro games.
+ */
+#define unsafe_copy_loop(dst, src, len, type, label) \
+ while (len >= sizeof(type)) { \
+ unsafe_put_user(*(type *)(src),(type __user *)(dst),label); \
+ dst += sizeof(type); \
+ src += sizeof(type); \
+ len -= sizeof(type); \
+ }
+
+#define unsafe_copy_to_user(_dst,_src,_len,label) \
+do { \
+ char __user *__ucu_dst = (_dst); \
+ const char *__ucu_src = (_src); \
+ size_t __ucu_len = (_len); \
+ unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u64, label); \
+ unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u32, label); \
+ unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u16, label); \
+ unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u8, label); \
+} while (0)
+
#define INLINE_COPY_TO_USER
#define INLINE_COPY_FROM_USER
diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h
index 1346579f802f..80618c9bbcd8 100644
--- a/arch/arm64/include/asm/unistd.h
+++ b/arch/arm64/include/asm/unistd.h
@@ -17,35 +17,17 @@
#define __ARCH_WANT_SYS_VFORK
/*
- * Compat syscall numbers used by the AArch64 kernel.
- */
-#define __NR_compat_restart_syscall 0
-#define __NR_compat_exit 1
-#define __NR_compat_read 3
-#define __NR_compat_write 4
-#define __NR_compat_gettimeofday 78
-#define __NR_compat_sigreturn 119
-#define __NR_compat_rt_sigreturn 173
-#define __NR_compat_clock_gettime 263
-#define __NR_compat_clock_getres 264
-#define __NR_compat_clock_gettime64 403
-#define __NR_compat_clock_getres_time64 406
-
-/*
* The following SVCs are ARM private.
*/
#define __ARM_NR_COMPAT_BASE 0x0f0000
#define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE + 2)
#define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE + 5)
#define __ARM_NR_COMPAT_END (__ARM_NR_COMPAT_BASE + 0x800)
-
-#define __NR_compat_syscalls 463
#endif
#define __ARCH_WANT_SYS_CLONE
+#define __ARCH_WANT_NEW_STAT
-#ifndef __COMPAT_SYSCALL_NR
-#include <uapi/asm/unistd.h>
-#endif
+#include <asm/unistd_64.h>
#define NR_syscalls (__NR_syscalls)
diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h
index 266b96acc014..e0b1a0b57f75 100644
--- a/arch/arm64/include/asm/unistd32.h
+++ b/arch/arm64/include/asm/unistd32.h
@@ -1,938 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * AArch32 (compat) system call definitions.
- *
- * Copyright (C) 2001-2005 Russell King
- * Copyright (C) 2012 ARM Ltd.
- */
+#ifndef _UAPI__ASM_ARM_UNISTD_H
+#define _UAPI__ASM_ARM_UNISTD_H
-#ifndef __SYSCALL
-#define __SYSCALL(x, y)
-#endif
+#include <asm/unistd_32.h>
-#define __NR_restart_syscall 0
-__SYSCALL(__NR_restart_syscall, sys_restart_syscall)
-#define __NR_exit 1
-__SYSCALL(__NR_exit, sys_exit)
-#define __NR_fork 2
-__SYSCALL(__NR_fork, sys_fork)
-#define __NR_read 3
-__SYSCALL(__NR_read, sys_read)
-#define __NR_write 4
-__SYSCALL(__NR_write, sys_write)
-#define __NR_open 5
-__SYSCALL(__NR_open, compat_sys_open)
-#define __NR_close 6
-__SYSCALL(__NR_close, sys_close)
- /* 7 was sys_waitpid */
-__SYSCALL(7, sys_ni_syscall)
-#define __NR_creat 8
-__SYSCALL(__NR_creat, sys_creat)
-#define __NR_link 9
-__SYSCALL(__NR_link, sys_link)
-#define __NR_unlink 10
-__SYSCALL(__NR_unlink, sys_unlink)
-#define __NR_execve 11
-__SYSCALL(__NR_execve, compat_sys_execve)
-#define __NR_chdir 12
-__SYSCALL(__NR_chdir, sys_chdir)
- /* 13 was sys_time */
-__SYSCALL(13, sys_ni_syscall)
-#define __NR_mknod 14
-__SYSCALL(__NR_mknod, sys_mknod)
-#define __NR_chmod 15
-__SYSCALL(__NR_chmod, sys_chmod)
-#define __NR_lchown 16
-__SYSCALL(__NR_lchown, sys_lchown16)
- /* 17 was sys_break */
-__SYSCALL(17, sys_ni_syscall)
- /* 18 was sys_stat */
-__SYSCALL(18, sys_ni_syscall)
-#define __NR_lseek 19
-__SYSCALL(__NR_lseek, compat_sys_lseek)
-#define __NR_getpid 20
-__SYSCALL(__NR_getpid, sys_getpid)
-#define __NR_mount 21
-__SYSCALL(__NR_mount, sys_mount)
- /* 22 was sys_umount */
-__SYSCALL(22, sys_ni_syscall)
-#define __NR_setuid 23
-__SYSCALL(__NR_setuid, sys_setuid16)
-#define __NR_getuid 24
-__SYSCALL(__NR_getuid, sys_getuid16)
- /* 25 was sys_stime */
-__SYSCALL(25, sys_ni_syscall)
-#define __NR_ptrace 26
-__SYSCALL(__NR_ptrace, compat_sys_ptrace)
- /* 27 was sys_alarm */
-__SYSCALL(27, sys_ni_syscall)
- /* 28 was sys_fstat */
-__SYSCALL(28, sys_ni_syscall)
-#define __NR_pause 29
-__SYSCALL(__NR_pause, sys_pause)
- /* 30 was sys_utime */
-__SYSCALL(30, sys_ni_syscall)
- /* 31 was sys_stty */
-__SYSCALL(31, sys_ni_syscall)
- /* 32 was sys_gtty */
-__SYSCALL(32, sys_ni_syscall)
-#define __NR_access 33
-__SYSCALL(__NR_access, sys_access)
-#define __NR_nice 34
-__SYSCALL(__NR_nice, sys_nice)
- /* 35 was sys_ftime */
-__SYSCALL(35, sys_ni_syscall)
-#define __NR_sync 36
-__SYSCALL(__NR_sync, sys_sync)
-#define __NR_kill 37
-__SYSCALL(__NR_kill, sys_kill)
-#define __NR_rename 38
-__SYSCALL(__NR_rename, sys_rename)
-#define __NR_mkdir 39
-__SYSCALL(__NR_mkdir, sys_mkdir)
-#define __NR_rmdir 40
-__SYSCALL(__NR_rmdir, sys_rmdir)
-#define __NR_dup 41
-__SYSCALL(__NR_dup, sys_dup)
-#define __NR_pipe 42
-__SYSCALL(__NR_pipe, sys_pipe)
-#define __NR_times 43
-__SYSCALL(__NR_times, compat_sys_times)
- /* 44 was sys_prof */
-__SYSCALL(44, sys_ni_syscall)
-#define __NR_brk 45
-__SYSCALL(__NR_brk, sys_brk)
-#define __NR_setgid 46
-__SYSCALL(__NR_setgid, sys_setgid16)
-#define __NR_getgid 47
-__SYSCALL(__NR_getgid, sys_getgid16)
- /* 48 was sys_signal */
-__SYSCALL(48, sys_ni_syscall)
-#define __NR_geteuid 49
-__SYSCALL(__NR_geteuid, sys_geteuid16)
-#define __NR_getegid 50
-__SYSCALL(__NR_getegid, sys_getegid16)
-#define __NR_acct 51
-__SYSCALL(__NR_acct, sys_acct)
-#define __NR_umount2 52
-__SYSCALL(__NR_umount2, sys_umount)
- /* 53 was sys_lock */
-__SYSCALL(53, sys_ni_syscall)
-#define __NR_ioctl 54
-__SYSCALL(__NR_ioctl, compat_sys_ioctl)
-#define __NR_fcntl 55
-__SYSCALL(__NR_fcntl, compat_sys_fcntl)
- /* 56 was sys_mpx */
-__SYSCALL(56, sys_ni_syscall)
-#define __NR_setpgid 57
-__SYSCALL(__NR_setpgid, sys_setpgid)
- /* 58 was sys_ulimit */
-__SYSCALL(58, sys_ni_syscall)
- /* 59 was sys_olduname */
-__SYSCALL(59, sys_ni_syscall)
-#define __NR_umask 60
-__SYSCALL(__NR_umask, sys_umask)
-#define __NR_chroot 61
-__SYSCALL(__NR_chroot, sys_chroot)
-#define __NR_ustat 62
-__SYSCALL(__NR_ustat, compat_sys_ustat)
-#define __NR_dup2 63
-__SYSCALL(__NR_dup2, sys_dup2)
-#define __NR_getppid 64
-__SYSCALL(__NR_getppid, sys_getppid)
-#define __NR_getpgrp 65
-__SYSCALL(__NR_getpgrp, sys_getpgrp)
-#define __NR_setsid 66
-__SYSCALL(__NR_setsid, sys_setsid)
-#define __NR_sigaction 67
-__SYSCALL(__NR_sigaction, compat_sys_sigaction)
- /* 68 was sys_sgetmask */
-__SYSCALL(68, sys_ni_syscall)
- /* 69 was sys_ssetmask */
-__SYSCALL(69, sys_ni_syscall)
-#define __NR_setreuid 70
-__SYSCALL(__NR_setreuid, sys_setreuid16)
-#define __NR_setregid 71
-__SYSCALL(__NR_setregid, sys_setregid16)
-#define __NR_sigsuspend 72
-__SYSCALL(__NR_sigsuspend, sys_sigsuspend)
-#define __NR_sigpending 73
-__SYSCALL(__NR_sigpending, compat_sys_sigpending)
-#define __NR_sethostname 74
-__SYSCALL(__NR_sethostname, sys_sethostname)
-#define __NR_setrlimit 75
-__SYSCALL(__NR_setrlimit, compat_sys_setrlimit)
- /* 76 was compat_sys_getrlimit */
-__SYSCALL(76, sys_ni_syscall)
-#define __NR_getrusage 77
-__SYSCALL(__NR_getrusage, compat_sys_getrusage)
-#define __NR_gettimeofday 78
-__SYSCALL(__NR_gettimeofday, compat_sys_gettimeofday)
-#define __NR_settimeofday 79
-__SYSCALL(__NR_settimeofday, compat_sys_settimeofday)
-#define __NR_getgroups 80
-__SYSCALL(__NR_getgroups, sys_getgroups16)
-#define __NR_setgroups 81
-__SYSCALL(__NR_setgroups, sys_setgroups16)
- /* 82 was compat_sys_select */
-__SYSCALL(82, sys_ni_syscall)
-#define __NR_symlink 83
-__SYSCALL(__NR_symlink, sys_symlink)
- /* 84 was sys_lstat */
-__SYSCALL(84, sys_ni_syscall)
-#define __NR_readlink 85
-__SYSCALL(__NR_readlink, sys_readlink)
-#define __NR_uselib 86
-__SYSCALL(__NR_uselib, sys_uselib)
-#define __NR_swapon 87
-__SYSCALL(__NR_swapon, sys_swapon)
-#define __NR_reboot 88
-__SYSCALL(__NR_reboot, sys_reboot)
- /* 89 was sys_readdir */
-__SYSCALL(89, sys_ni_syscall)
- /* 90 was sys_mmap */
-__SYSCALL(90, sys_ni_syscall)
-#define __NR_munmap 91
-__SYSCALL(__NR_munmap, sys_munmap)
-#define __NR_truncate 92
-__SYSCALL(__NR_truncate, compat_sys_truncate)
-#define __NR_ftruncate 93
-__SYSCALL(__NR_ftruncate, compat_sys_ftruncate)
-#define __NR_fchmod 94
-__SYSCALL(__NR_fchmod, sys_fchmod)
-#define __NR_fchown 95
-__SYSCALL(__NR_fchown, sys_fchown16)
-#define __NR_getpriority 96
-__SYSCALL(__NR_getpriority, sys_getpriority)
-#define __NR_setpriority 97
-__SYSCALL(__NR_setpriority, sys_setpriority)
- /* 98 was sys_profil */
-__SYSCALL(98, sys_ni_syscall)
-#define __NR_statfs 99
-__SYSCALL(__NR_statfs, compat_sys_statfs)
-#define __NR_fstatfs 100
-__SYSCALL(__NR_fstatfs, compat_sys_fstatfs)
- /* 101 was sys_ioperm */
-__SYSCALL(101, sys_ni_syscall)
- /* 102 was sys_socketcall */
-__SYSCALL(102, sys_ni_syscall)
-#define __NR_syslog 103
-__SYSCALL(__NR_syslog, sys_syslog)
-#define __NR_setitimer 104
-__SYSCALL(__NR_setitimer, compat_sys_setitimer)
-#define __NR_getitimer 105
-__SYSCALL(__NR_getitimer, compat_sys_getitimer)
-#define __NR_stat 106
-__SYSCALL(__NR_stat, compat_sys_newstat)
-#define __NR_lstat 107
-__SYSCALL(__NR_lstat, compat_sys_newlstat)
-#define __NR_fstat 108
-__SYSCALL(__NR_fstat, compat_sys_newfstat)
- /* 109 was sys_uname */
-__SYSCALL(109, sys_ni_syscall)
- /* 110 was sys_iopl */
-__SYSCALL(110, sys_ni_syscall)
-#define __NR_vhangup 111
-__SYSCALL(__NR_vhangup, sys_vhangup)
- /* 112 was sys_idle */
-__SYSCALL(112, sys_ni_syscall)
- /* 113 was sys_syscall */
-__SYSCALL(113, sys_ni_syscall)
-#define __NR_wait4 114
-__SYSCALL(__NR_wait4, compat_sys_wait4)
-#define __NR_swapoff 115
-__SYSCALL(__NR_swapoff, sys_swapoff)
-#define __NR_sysinfo 116
-__SYSCALL(__NR_sysinfo, compat_sys_sysinfo)
- /* 117 was sys_ipc */
-__SYSCALL(117, sys_ni_syscall)
-#define __NR_fsync 118
-__SYSCALL(__NR_fsync, sys_fsync)
-#define __NR_sigreturn 119
-__SYSCALL(__NR_sigreturn, compat_sys_sigreturn)
-#define __NR_clone 120
-__SYSCALL(__NR_clone, sys_clone)
-#define __NR_setdomainname 121
-__SYSCALL(__NR_setdomainname, sys_setdomainname)
-#define __NR_uname 122
-__SYSCALL(__NR_uname, sys_newuname)
- /* 123 was sys_modify_ldt */
-__SYSCALL(123, sys_ni_syscall)
-#define __NR_adjtimex 124
-__SYSCALL(__NR_adjtimex, sys_adjtimex_time32)
-#define __NR_mprotect 125
-__SYSCALL(__NR_mprotect, sys_mprotect)
-#define __NR_sigprocmask 126
-__SYSCALL(__NR_sigprocmask, compat_sys_sigprocmask)
- /* 127 was sys_create_module */
-__SYSCALL(127, sys_ni_syscall)
-#define __NR_init_module 128
-__SYSCALL(__NR_init_module, sys_init_module)
-#define __NR_delete_module 129
-__SYSCALL(__NR_delete_module, sys_delete_module)
- /* 130 was sys_get_kernel_syms */
-__SYSCALL(130, sys_ni_syscall)
-#define __NR_quotactl 131
-__SYSCALL(__NR_quotactl, sys_quotactl)
-#define __NR_getpgid 132
-__SYSCALL(__NR_getpgid, sys_getpgid)
-#define __NR_fchdir 133
-__SYSCALL(__NR_fchdir, sys_fchdir)
-#define __NR_bdflush 134
-__SYSCALL(__NR_bdflush, sys_ni_syscall)
-#define __NR_sysfs 135
-__SYSCALL(__NR_sysfs, sys_sysfs)
-#define __NR_personality 136
-__SYSCALL(__NR_personality, sys_personality)
- /* 137 was sys_afs_syscall */
-__SYSCALL(137, sys_ni_syscall)
-#define __NR_setfsuid 138
-__SYSCALL(__NR_setfsuid, sys_setfsuid16)
-#define __NR_setfsgid 139
-__SYSCALL(__NR_setfsgid, sys_setfsgid16)
-#define __NR__llseek 140
-__SYSCALL(__NR__llseek, sys_llseek)
-#define __NR_getdents 141
-__SYSCALL(__NR_getdents, compat_sys_getdents)
-#define __NR__newselect 142
-__SYSCALL(__NR__newselect, compat_sys_select)
-#define __NR_flock 143
-__SYSCALL(__NR_flock, sys_flock)
-#define __NR_msync 144
-__SYSCALL(__NR_msync, sys_msync)
-#define __NR_readv 145
-__SYSCALL(__NR_readv, sys_readv)
-#define __NR_writev 146
-__SYSCALL(__NR_writev, sys_writev)
-#define __NR_getsid 147
-__SYSCALL(__NR_getsid, sys_getsid)
-#define __NR_fdatasync 148
-__SYSCALL(__NR_fdatasync, sys_fdatasync)
- /* 149 was sys_sysctl */
-__SYSCALL(149, sys_ni_syscall)
-#define __NR_mlock 150
-__SYSCALL(__NR_mlock, sys_mlock)
-#define __NR_munlock 151
-__SYSCALL(__NR_munlock, sys_munlock)
-#define __NR_mlockall 152
-__SYSCALL(__NR_mlockall, sys_mlockall)
-#define __NR_munlockall 153
-__SYSCALL(__NR_munlockall, sys_munlockall)
-#define __NR_sched_setparam 154
-__SYSCALL(__NR_sched_setparam, sys_sched_setparam)
-#define __NR_sched_getparam 155
-__SYSCALL(__NR_sched_getparam, sys_sched_getparam)
-#define __NR_sched_setscheduler 156
-__SYSCALL(__NR_sched_setscheduler, sys_sched_setscheduler)
-#define __NR_sched_getscheduler 157
-__SYSCALL(__NR_sched_getscheduler, sys_sched_getscheduler)
-#define __NR_sched_yield 158
-__SYSCALL(__NR_sched_yield, sys_sched_yield)
-#define __NR_sched_get_priority_max 159
-__SYSCALL(__NR_sched_get_priority_max, sys_sched_get_priority_max)
-#define __NR_sched_get_priority_min 160
-__SYSCALL(__NR_sched_get_priority_min, sys_sched_get_priority_min)
-#define __NR_sched_rr_get_interval 161
-__SYSCALL(__NR_sched_rr_get_interval, sys_sched_rr_get_interval_time32)
-#define __NR_nanosleep 162
-__SYSCALL(__NR_nanosleep, sys_nanosleep_time32)
-#define __NR_mremap 163
-__SYSCALL(__NR_mremap, sys_mremap)
-#define __NR_setresuid 164
-__SYSCALL(__NR_setresuid, sys_setresuid16)
-#define __NR_getresuid 165
-__SYSCALL(__NR_getresuid, sys_getresuid16)
- /* 166 was sys_vm86 */
-__SYSCALL(166, sys_ni_syscall)
- /* 167 was sys_query_module */
-__SYSCALL(167, sys_ni_syscall)
-#define __NR_poll 168
-__SYSCALL(__NR_poll, sys_poll)
-#define __NR_nfsservctl 169
-__SYSCALL(__NR_nfsservctl, sys_ni_syscall)
-#define __NR_setresgid 170
-__SYSCALL(__NR_setresgid, sys_setresgid16)
-#define __NR_getresgid 171
-__SYSCALL(__NR_getresgid, sys_getresgid16)
-#define __NR_prctl 172
-__SYSCALL(__NR_prctl, sys_prctl)
-#define __NR_rt_sigreturn 173
-__SYSCALL(__NR_rt_sigreturn, compat_sys_rt_sigreturn)
-#define __NR_rt_sigaction 174
-__SYSCALL(__NR_rt_sigaction, compat_sys_rt_sigaction)
-#define __NR_rt_sigprocmask 175
-__SYSCALL(__NR_rt_sigprocmask, compat_sys_rt_sigprocmask)
-#define __NR_rt_sigpending 176
-__SYSCALL(__NR_rt_sigpending, compat_sys_rt_sigpending)
-#define __NR_rt_sigtimedwait 177
-__SYSCALL(__NR_rt_sigtimedwait, compat_sys_rt_sigtimedwait_time32)
-#define __NR_rt_sigqueueinfo 178
-__SYSCALL(__NR_rt_sigqueueinfo, compat_sys_rt_sigqueueinfo)
-#define __NR_rt_sigsuspend 179
-__SYSCALL(__NR_rt_sigsuspend, compat_sys_rt_sigsuspend)
-#define __NR_pread64 180
-__SYSCALL(__NR_pread64, compat_sys_aarch32_pread64)
-#define __NR_pwrite64 181
-__SYSCALL(__NR_pwrite64, compat_sys_aarch32_pwrite64)
-#define __NR_chown 182
-__SYSCALL(__NR_chown, sys_chown16)
-#define __NR_getcwd 183
-__SYSCALL(__NR_getcwd, sys_getcwd)
-#define __NR_capget 184
-__SYSCALL(__NR_capget, sys_capget)
-#define __NR_capset 185
-__SYSCALL(__NR_capset, sys_capset)
-#define __NR_sigaltstack 186
-__SYSCALL(__NR_sigaltstack, compat_sys_sigaltstack)
-#define __NR_sendfile 187
-__SYSCALL(__NR_sendfile, compat_sys_sendfile)
- /* 188 reserved */
-__SYSCALL(188, sys_ni_syscall)
- /* 189 reserved */
-__SYSCALL(189, sys_ni_syscall)
-#define __NR_vfork 190
-__SYSCALL(__NR_vfork, sys_vfork)
-#define __NR_ugetrlimit 191 /* SuS compliant getrlimit */
-__SYSCALL(__NR_ugetrlimit, compat_sys_getrlimit) /* SuS compliant getrlimit */
-#define __NR_mmap2 192
-__SYSCALL(__NR_mmap2, compat_sys_aarch32_mmap2)
-#define __NR_truncate64 193
-__SYSCALL(__NR_truncate64, compat_sys_aarch32_truncate64)
-#define __NR_ftruncate64 194
-__SYSCALL(__NR_ftruncate64, compat_sys_aarch32_ftruncate64)
-#define __NR_stat64 195
-__SYSCALL(__NR_stat64, sys_stat64)
-#define __NR_lstat64 196
-__SYSCALL(__NR_lstat64, sys_lstat64)
-#define __NR_fstat64 197
-__SYSCALL(__NR_fstat64, sys_fstat64)
-#define __NR_lchown32 198
-__SYSCALL(__NR_lchown32, sys_lchown)
-#define __NR_getuid32 199
-__SYSCALL(__NR_getuid32, sys_getuid)
-#define __NR_getgid32 200
-__SYSCALL(__NR_getgid32, sys_getgid)
-#define __NR_geteuid32 201
-__SYSCALL(__NR_geteuid32, sys_geteuid)
-#define __NR_getegid32 202
-__SYSCALL(__NR_getegid32, sys_getegid)
-#define __NR_setreuid32 203
-__SYSCALL(__NR_setreuid32, sys_setreuid)
-#define __NR_setregid32 204
-__SYSCALL(__NR_setregid32, sys_setregid)
-#define __NR_getgroups32 205
-__SYSCALL(__NR_getgroups32, sys_getgroups)
-#define __NR_setgroups32 206
-__SYSCALL(__NR_setgroups32, sys_setgroups)
-#define __NR_fchown32 207
-__SYSCALL(__NR_fchown32, sys_fchown)
-#define __NR_setresuid32 208
-__SYSCALL(__NR_setresuid32, sys_setresuid)
-#define __NR_getresuid32 209
-__SYSCALL(__NR_getresuid32, sys_getresuid)
-#define __NR_setresgid32 210
-__SYSCALL(__NR_setresgid32, sys_setresgid)
-#define __NR_getresgid32 211
-__SYSCALL(__NR_getresgid32, sys_getresgid)
-#define __NR_chown32 212
-__SYSCALL(__NR_chown32, sys_chown)
-#define __NR_setuid32 213
-__SYSCALL(__NR_setuid32, sys_setuid)
-#define __NR_setgid32 214
-__SYSCALL(__NR_setgid32, sys_setgid)
-#define __NR_setfsuid32 215
-__SYSCALL(__NR_setfsuid32, sys_setfsuid)
-#define __NR_setfsgid32 216
-__SYSCALL(__NR_setfsgid32, sys_setfsgid)
-#define __NR_getdents64 217
-__SYSCALL(__NR_getdents64, sys_getdents64)
-#define __NR_pivot_root 218
-__SYSCALL(__NR_pivot_root, sys_pivot_root)
-#define __NR_mincore 219
-__SYSCALL(__NR_mincore, sys_mincore)
-#define __NR_madvise 220
-__SYSCALL(__NR_madvise, sys_madvise)
-#define __NR_fcntl64 221
-__SYSCALL(__NR_fcntl64, compat_sys_fcntl64)
- /* 222 for tux */
-__SYSCALL(222, sys_ni_syscall)
- /* 223 is unused */
-__SYSCALL(223, sys_ni_syscall)
-#define __NR_gettid 224
-__SYSCALL(__NR_gettid, sys_gettid)
-#define __NR_readahead 225
-__SYSCALL(__NR_readahead, compat_sys_aarch32_readahead)
-#define __NR_setxattr 226
-__SYSCALL(__NR_setxattr, sys_setxattr)
-#define __NR_lsetxattr 227
-__SYSCALL(__NR_lsetxattr, sys_lsetxattr)
-#define __NR_fsetxattr 228
-__SYSCALL(__NR_fsetxattr, sys_fsetxattr)
-#define __NR_getxattr 229
-__SYSCALL(__NR_getxattr, sys_getxattr)
-#define __NR_lgetxattr 230
-__SYSCALL(__NR_lgetxattr, sys_lgetxattr)
-#define __NR_fgetxattr 231
-__SYSCALL(__NR_fgetxattr, sys_fgetxattr)
-#define __NR_listxattr 232
-__SYSCALL(__NR_listxattr, sys_listxattr)
-#define __NR_llistxattr 233
-__SYSCALL(__NR_llistxattr, sys_llistxattr)
-#define __NR_flistxattr 234
-__SYSCALL(__NR_flistxattr, sys_flistxattr)
-#define __NR_removexattr 235
-__SYSCALL(__NR_removexattr, sys_removexattr)
-#define __NR_lremovexattr 236
-__SYSCALL(__NR_lremovexattr, sys_lremovexattr)
-#define __NR_fremovexattr 237
-__SYSCALL(__NR_fremovexattr, sys_fremovexattr)
-#define __NR_tkill 238
-__SYSCALL(__NR_tkill, sys_tkill)
-#define __NR_sendfile64 239
-__SYSCALL(__NR_sendfile64, sys_sendfile64)
-#define __NR_futex 240
-__SYSCALL(__NR_futex, sys_futex_time32)
-#define __NR_sched_setaffinity 241
-__SYSCALL(__NR_sched_setaffinity, compat_sys_sched_setaffinity)
-#define __NR_sched_getaffinity 242
-__SYSCALL(__NR_sched_getaffinity, compat_sys_sched_getaffinity)
-#define __NR_io_setup 243
-__SYSCALL(__NR_io_setup, compat_sys_io_setup)
-#define __NR_io_destroy 244
-__SYSCALL(__NR_io_destroy, sys_io_destroy)
-#define __NR_io_getevents 245
-__SYSCALL(__NR_io_getevents, sys_io_getevents_time32)
-#define __NR_io_submit 246
-__SYSCALL(__NR_io_submit, compat_sys_io_submit)
-#define __NR_io_cancel 247
-__SYSCALL(__NR_io_cancel, sys_io_cancel)
-#define __NR_exit_group 248
-__SYSCALL(__NR_exit_group, sys_exit_group)
- /* 249 was lookup_dcookie */
-__SYSCALL(249, sys_ni_syscall)
-#define __NR_epoll_create 250
-__SYSCALL(__NR_epoll_create, sys_epoll_create)
-#define __NR_epoll_ctl 251
-__SYSCALL(__NR_epoll_ctl, sys_epoll_ctl)
-#define __NR_epoll_wait 252
-__SYSCALL(__NR_epoll_wait, sys_epoll_wait)
-#define __NR_remap_file_pages 253
-__SYSCALL(__NR_remap_file_pages, sys_remap_file_pages)
- /* 254 for set_thread_area */
-__SYSCALL(254, sys_ni_syscall)
- /* 255 for get_thread_area */
-__SYSCALL(255, sys_ni_syscall)
-#define __NR_set_tid_address 256
-__SYSCALL(__NR_set_tid_address, sys_set_tid_address)
-#define __NR_timer_create 257
-__SYSCALL(__NR_timer_create, compat_sys_timer_create)
-#define __NR_timer_settime 258
-__SYSCALL(__NR_timer_settime, sys_timer_settime32)
-#define __NR_timer_gettime 259
-__SYSCALL(__NR_timer_gettime, sys_timer_gettime32)
-#define __NR_timer_getoverrun 260
-__SYSCALL(__NR_timer_getoverrun, sys_timer_getoverrun)
-#define __NR_timer_delete 261
-__SYSCALL(__NR_timer_delete, sys_timer_delete)
-#define __NR_clock_settime 262
-__SYSCALL(__NR_clock_settime, sys_clock_settime32)
-#define __NR_clock_gettime 263
-__SYSCALL(__NR_clock_gettime, sys_clock_gettime32)
-#define __NR_clock_getres 264
-__SYSCALL(__NR_clock_getres, sys_clock_getres_time32)
-#define __NR_clock_nanosleep 265
-__SYSCALL(__NR_clock_nanosleep, sys_clock_nanosleep_time32)
-#define __NR_statfs64 266
-__SYSCALL(__NR_statfs64, compat_sys_aarch32_statfs64)
-#define __NR_fstatfs64 267
-__SYSCALL(__NR_fstatfs64, compat_sys_aarch32_fstatfs64)
-#define __NR_tgkill 268
-__SYSCALL(__NR_tgkill, sys_tgkill)
-#define __NR_utimes 269
-__SYSCALL(__NR_utimes, sys_utimes_time32)
-#define __NR_arm_fadvise64_64 270
-__SYSCALL(__NR_arm_fadvise64_64, compat_sys_aarch32_fadvise64_64)
-#define __NR_pciconfig_iobase 271
-__SYSCALL(__NR_pciconfig_iobase, sys_pciconfig_iobase)
-#define __NR_pciconfig_read 272
-__SYSCALL(__NR_pciconfig_read, sys_pciconfig_read)
-#define __NR_pciconfig_write 273
-__SYSCALL(__NR_pciconfig_write, sys_pciconfig_write)
-#define __NR_mq_open 274
-__SYSCALL(__NR_mq_open, compat_sys_mq_open)
-#define __NR_mq_unlink 275
-__SYSCALL(__NR_mq_unlink, sys_mq_unlink)
-#define __NR_mq_timedsend 276
-__SYSCALL(__NR_mq_timedsend, sys_mq_timedsend_time32)
-#define __NR_mq_timedreceive 277
-__SYSCALL(__NR_mq_timedreceive, sys_mq_timedreceive_time32)
-#define __NR_mq_notify 278
-__SYSCALL(__NR_mq_notify, compat_sys_mq_notify)
-#define __NR_mq_getsetattr 279
-__SYSCALL(__NR_mq_getsetattr, compat_sys_mq_getsetattr)
-#define __NR_waitid 280
-__SYSCALL(__NR_waitid, compat_sys_waitid)
-#define __NR_socket 281
-__SYSCALL(__NR_socket, sys_socket)
-#define __NR_bind 282
-__SYSCALL(__NR_bind, sys_bind)
-#define __NR_connect 283
-__SYSCALL(__NR_connect, sys_connect)
-#define __NR_listen 284
-__SYSCALL(__NR_listen, sys_listen)
-#define __NR_accept 285
-__SYSCALL(__NR_accept, sys_accept)
-#define __NR_getsockname 286
-__SYSCALL(__NR_getsockname, sys_getsockname)
-#define __NR_getpeername 287
-__SYSCALL(__NR_getpeername, sys_getpeername)
-#define __NR_socketpair 288
-__SYSCALL(__NR_socketpair, sys_socketpair)
-#define __NR_send 289
-__SYSCALL(__NR_send, sys_send)
-#define __NR_sendto 290
-__SYSCALL(__NR_sendto, sys_sendto)
-#define __NR_recv 291
-__SYSCALL(__NR_recv, compat_sys_recv)
-#define __NR_recvfrom 292
-__SYSCALL(__NR_recvfrom, compat_sys_recvfrom)
-#define __NR_shutdown 293
-__SYSCALL(__NR_shutdown, sys_shutdown)
-#define __NR_setsockopt 294
-__SYSCALL(__NR_setsockopt, sys_setsockopt)
-#define __NR_getsockopt 295
-__SYSCALL(__NR_getsockopt, sys_getsockopt)
-#define __NR_sendmsg 296
-__SYSCALL(__NR_sendmsg, compat_sys_sendmsg)
-#define __NR_recvmsg 297
-__SYSCALL(__NR_recvmsg, compat_sys_recvmsg)
-#define __NR_semop 298
-__SYSCALL(__NR_semop, sys_semop)
-#define __NR_semget 299
-__SYSCALL(__NR_semget, sys_semget)
-#define __NR_semctl 300
-__SYSCALL(__NR_semctl, compat_sys_old_semctl)
-#define __NR_msgsnd 301
-__SYSCALL(__NR_msgsnd, compat_sys_msgsnd)
-#define __NR_msgrcv 302
-__SYSCALL(__NR_msgrcv, compat_sys_msgrcv)
-#define __NR_msgget 303
-__SYSCALL(__NR_msgget, sys_msgget)
-#define __NR_msgctl 304
-__SYSCALL(__NR_msgctl, compat_sys_old_msgctl)
-#define __NR_shmat 305
-__SYSCALL(__NR_shmat, compat_sys_shmat)
-#define __NR_shmdt 306
-__SYSCALL(__NR_shmdt, sys_shmdt)
-#define __NR_shmget 307
-__SYSCALL(__NR_shmget, sys_shmget)
-#define __NR_shmctl 308
-__SYSCALL(__NR_shmctl, compat_sys_old_shmctl)
-#define __NR_add_key 309
-__SYSCALL(__NR_add_key, sys_add_key)
-#define __NR_request_key 310
-__SYSCALL(__NR_request_key, sys_request_key)
-#define __NR_keyctl 311
-__SYSCALL(__NR_keyctl, compat_sys_keyctl)
-#define __NR_semtimedop 312
-__SYSCALL(__NR_semtimedop, sys_semtimedop_time32)
-#define __NR_vserver 313
-__SYSCALL(__NR_vserver, sys_ni_syscall)
-#define __NR_ioprio_set 314
-__SYSCALL(__NR_ioprio_set, sys_ioprio_set)
-#define __NR_ioprio_get 315
-__SYSCALL(__NR_ioprio_get, sys_ioprio_get)
-#define __NR_inotify_init 316
-__SYSCALL(__NR_inotify_init, sys_inotify_init)
-#define __NR_inotify_add_watch 317
-__SYSCALL(__NR_inotify_add_watch, sys_inotify_add_watch)
-#define __NR_inotify_rm_watch 318
-__SYSCALL(__NR_inotify_rm_watch, sys_inotify_rm_watch)
-#define __NR_mbind 319
-__SYSCALL(__NR_mbind, sys_mbind)
-#define __NR_get_mempolicy 320
-__SYSCALL(__NR_get_mempolicy, sys_get_mempolicy)
-#define __NR_set_mempolicy 321
-__SYSCALL(__NR_set_mempolicy, sys_set_mempolicy)
-#define __NR_openat 322
-__SYSCALL(__NR_openat, compat_sys_openat)
-#define __NR_mkdirat 323
-__SYSCALL(__NR_mkdirat, sys_mkdirat)
-#define __NR_mknodat 324
-__SYSCALL(__NR_mknodat, sys_mknodat)
-#define __NR_fchownat 325
-__SYSCALL(__NR_fchownat, sys_fchownat)
-#define __NR_futimesat 326
-__SYSCALL(__NR_futimesat, sys_futimesat_time32)
-#define __NR_fstatat64 327
-__SYSCALL(__NR_fstatat64, sys_fstatat64)
-#define __NR_unlinkat 328
-__SYSCALL(__NR_unlinkat, sys_unlinkat)
-#define __NR_renameat 329
-__SYSCALL(__NR_renameat, sys_renameat)
-#define __NR_linkat 330
-__SYSCALL(__NR_linkat, sys_linkat)
-#define __NR_symlinkat 331
-__SYSCALL(__NR_symlinkat, sys_symlinkat)
-#define __NR_readlinkat 332
-__SYSCALL(__NR_readlinkat, sys_readlinkat)
-#define __NR_fchmodat 333
-__SYSCALL(__NR_fchmodat, sys_fchmodat)
-#define __NR_faccessat 334
-__SYSCALL(__NR_faccessat, sys_faccessat)
-#define __NR_pselect6 335
-__SYSCALL(__NR_pselect6, compat_sys_pselect6_time32)
-#define __NR_ppoll 336
-__SYSCALL(__NR_ppoll, compat_sys_ppoll_time32)
-#define __NR_unshare 337
-__SYSCALL(__NR_unshare, sys_unshare)
-#define __NR_set_robust_list 338
-__SYSCALL(__NR_set_robust_list, compat_sys_set_robust_list)
-#define __NR_get_robust_list 339
-__SYSCALL(__NR_get_robust_list, compat_sys_get_robust_list)
-#define __NR_splice 340
-__SYSCALL(__NR_splice, sys_splice)
-#define __NR_sync_file_range2 341
-__SYSCALL(__NR_sync_file_range2, compat_sys_aarch32_sync_file_range2)
-#define __NR_tee 342
-__SYSCALL(__NR_tee, sys_tee)
-#define __NR_vmsplice 343
-__SYSCALL(__NR_vmsplice, sys_vmsplice)
-#define __NR_move_pages 344
-__SYSCALL(__NR_move_pages, sys_move_pages)
-#define __NR_getcpu 345
-__SYSCALL(__NR_getcpu, sys_getcpu)
-#define __NR_epoll_pwait 346
-__SYSCALL(__NR_epoll_pwait, compat_sys_epoll_pwait)
-#define __NR_kexec_load 347
-__SYSCALL(__NR_kexec_load, compat_sys_kexec_load)
-#define __NR_utimensat 348
-__SYSCALL(__NR_utimensat, sys_utimensat_time32)
-#define __NR_signalfd 349
-__SYSCALL(__NR_signalfd, compat_sys_signalfd)
-#define __NR_timerfd_create 350
-__SYSCALL(__NR_timerfd_create, sys_timerfd_create)
-#define __NR_eventfd 351
-__SYSCALL(__NR_eventfd, sys_eventfd)
-#define __NR_fallocate 352
-__SYSCALL(__NR_fallocate, compat_sys_aarch32_fallocate)
-#define __NR_timerfd_settime 353
-__SYSCALL(__NR_timerfd_settime, sys_timerfd_settime32)
-#define __NR_timerfd_gettime 354
-__SYSCALL(__NR_timerfd_gettime, sys_timerfd_gettime32)
-#define __NR_signalfd4 355
-__SYSCALL(__NR_signalfd4, compat_sys_signalfd4)
-#define __NR_eventfd2 356
-__SYSCALL(__NR_eventfd2, sys_eventfd2)
-#define __NR_epoll_create1 357
-__SYSCALL(__NR_epoll_create1, sys_epoll_create1)
-#define __NR_dup3 358
-__SYSCALL(__NR_dup3, sys_dup3)
-#define __NR_pipe2 359
-__SYSCALL(__NR_pipe2, sys_pipe2)
-#define __NR_inotify_init1 360
-__SYSCALL(__NR_inotify_init1, sys_inotify_init1)
-#define __NR_preadv 361
-__SYSCALL(__NR_preadv, compat_sys_preadv)
-#define __NR_pwritev 362
-__SYSCALL(__NR_pwritev, compat_sys_pwritev)
-#define __NR_rt_tgsigqueueinfo 363
-__SYSCALL(__NR_rt_tgsigqueueinfo, compat_sys_rt_tgsigqueueinfo)
-#define __NR_perf_event_open 364
-__SYSCALL(__NR_perf_event_open, sys_perf_event_open)
-#define __NR_recvmmsg 365
-__SYSCALL(__NR_recvmmsg, compat_sys_recvmmsg_time32)
-#define __NR_accept4 366
-__SYSCALL(__NR_accept4, sys_accept4)
-#define __NR_fanotify_init 367
-__SYSCALL(__NR_fanotify_init, sys_fanotify_init)
-#define __NR_fanotify_mark 368
-__SYSCALL(__NR_fanotify_mark, compat_sys_fanotify_mark)
-#define __NR_prlimit64 369
-__SYSCALL(__NR_prlimit64, sys_prlimit64)
-#define __NR_name_to_handle_at 370
-__SYSCALL(__NR_name_to_handle_at, sys_name_to_handle_at)
-#define __NR_open_by_handle_at 371
-__SYSCALL(__NR_open_by_handle_at, compat_sys_open_by_handle_at)
-#define __NR_clock_adjtime 372
-__SYSCALL(__NR_clock_adjtime, sys_clock_adjtime32)
-#define __NR_syncfs 373
-__SYSCALL(__NR_syncfs, sys_syncfs)
-#define __NR_sendmmsg 374
-__SYSCALL(__NR_sendmmsg, compat_sys_sendmmsg)
-#define __NR_setns 375
-__SYSCALL(__NR_setns, sys_setns)
-#define __NR_process_vm_readv 376
-__SYSCALL(__NR_process_vm_readv, sys_process_vm_readv)
-#define __NR_process_vm_writev 377
-__SYSCALL(__NR_process_vm_writev, sys_process_vm_writev)
-#define __NR_kcmp 378
-__SYSCALL(__NR_kcmp, sys_kcmp)
-#define __NR_finit_module 379
-__SYSCALL(__NR_finit_module, sys_finit_module)
-#define __NR_sched_setattr 380
-__SYSCALL(__NR_sched_setattr, sys_sched_setattr)
-#define __NR_sched_getattr 381
-__SYSCALL(__NR_sched_getattr, sys_sched_getattr)
-#define __NR_renameat2 382
-__SYSCALL(__NR_renameat2, sys_renameat2)
-#define __NR_seccomp 383
-__SYSCALL(__NR_seccomp, sys_seccomp)
-#define __NR_getrandom 384
-__SYSCALL(__NR_getrandom, sys_getrandom)
-#define __NR_memfd_create 385
-__SYSCALL(__NR_memfd_create, sys_memfd_create)
-#define __NR_bpf 386
-__SYSCALL(__NR_bpf, sys_bpf)
-#define __NR_execveat 387
-__SYSCALL(__NR_execveat, compat_sys_execveat)
-#define __NR_userfaultfd 388
-__SYSCALL(__NR_userfaultfd, sys_userfaultfd)
-#define __NR_membarrier 389
-__SYSCALL(__NR_membarrier, sys_membarrier)
-#define __NR_mlock2 390
-__SYSCALL(__NR_mlock2, sys_mlock2)
-#define __NR_copy_file_range 391
-__SYSCALL(__NR_copy_file_range, sys_copy_file_range)
-#define __NR_preadv2 392
-__SYSCALL(__NR_preadv2, compat_sys_preadv2)
-#define __NR_pwritev2 393
-__SYSCALL(__NR_pwritev2, compat_sys_pwritev2)
-#define __NR_pkey_mprotect 394
-__SYSCALL(__NR_pkey_mprotect, sys_pkey_mprotect)
-#define __NR_pkey_alloc 395
-__SYSCALL(__NR_pkey_alloc, sys_pkey_alloc)
-#define __NR_pkey_free 396
-__SYSCALL(__NR_pkey_free, sys_pkey_free)
-#define __NR_statx 397
-__SYSCALL(__NR_statx, sys_statx)
-#define __NR_rseq 398
-__SYSCALL(__NR_rseq, sys_rseq)
-#define __NR_io_pgetevents 399
-__SYSCALL(__NR_io_pgetevents, compat_sys_io_pgetevents)
-#define __NR_migrate_pages 400
-__SYSCALL(__NR_migrate_pages, sys_migrate_pages)
-#define __NR_kexec_file_load 401
-__SYSCALL(__NR_kexec_file_load, sys_kexec_file_load)
-/* 402 is unused */
-#define __NR_clock_gettime64 403
-__SYSCALL(__NR_clock_gettime64, sys_clock_gettime)
-#define __NR_clock_settime64 404
-__SYSCALL(__NR_clock_settime64, sys_clock_settime)
-#define __NR_clock_adjtime64 405
-__SYSCALL(__NR_clock_adjtime64, sys_clock_adjtime)
-#define __NR_clock_getres_time64 406
-__SYSCALL(__NR_clock_getres_time64, sys_clock_getres)
-#define __NR_clock_nanosleep_time64 407
-__SYSCALL(__NR_clock_nanosleep_time64, sys_clock_nanosleep)
-#define __NR_timer_gettime64 408
-__SYSCALL(__NR_timer_gettime64, sys_timer_gettime)
-#define __NR_timer_settime64 409
-__SYSCALL(__NR_timer_settime64, sys_timer_settime)
-#define __NR_timerfd_gettime64 410
-__SYSCALL(__NR_timerfd_gettime64, sys_timerfd_gettime)
-#define __NR_timerfd_settime64 411
-__SYSCALL(__NR_timerfd_settime64, sys_timerfd_settime)
-#define __NR_utimensat_time64 412
-__SYSCALL(__NR_utimensat_time64, sys_utimensat)
-#define __NR_pselect6_time64 413
-__SYSCALL(__NR_pselect6_time64, compat_sys_pselect6_time64)
-#define __NR_ppoll_time64 414
-__SYSCALL(__NR_ppoll_time64, compat_sys_ppoll_time64)
-#define __NR_io_pgetevents_time64 416
-__SYSCALL(__NR_io_pgetevents_time64, sys_io_pgetevents)
-#define __NR_recvmmsg_time64 417
-__SYSCALL(__NR_recvmmsg_time64, compat_sys_recvmmsg_time64)
-#define __NR_mq_timedsend_time64 418
-__SYSCALL(__NR_mq_timedsend_time64, sys_mq_timedsend)
-#define __NR_mq_timedreceive_time64 419
-__SYSCALL(__NR_mq_timedreceive_time64, sys_mq_timedreceive)
-#define __NR_semtimedop_time64 420
-__SYSCALL(__NR_semtimedop_time64, sys_semtimedop)
-#define __NR_rt_sigtimedwait_time64 421
-__SYSCALL(__NR_rt_sigtimedwait_time64, compat_sys_rt_sigtimedwait_time64)
-#define __NR_futex_time64 422
-__SYSCALL(__NR_futex_time64, sys_futex)
-#define __NR_sched_rr_get_interval_time64 423
-__SYSCALL(__NR_sched_rr_get_interval_time64, sys_sched_rr_get_interval)
-#define __NR_pidfd_send_signal 424
-__SYSCALL(__NR_pidfd_send_signal, sys_pidfd_send_signal)
-#define __NR_io_uring_setup 425
-__SYSCALL(__NR_io_uring_setup, sys_io_uring_setup)
-#define __NR_io_uring_enter 426
-__SYSCALL(__NR_io_uring_enter, sys_io_uring_enter)
-#define __NR_io_uring_register 427
-__SYSCALL(__NR_io_uring_register, sys_io_uring_register)
-#define __NR_open_tree 428
-__SYSCALL(__NR_open_tree, sys_open_tree)
-#define __NR_move_mount 429
-__SYSCALL(__NR_move_mount, sys_move_mount)
-#define __NR_fsopen 430
-__SYSCALL(__NR_fsopen, sys_fsopen)
-#define __NR_fsconfig 431
-__SYSCALL(__NR_fsconfig, sys_fsconfig)
-#define __NR_fsmount 432
-__SYSCALL(__NR_fsmount, sys_fsmount)
-#define __NR_fspick 433
-__SYSCALL(__NR_fspick, sys_fspick)
-#define __NR_pidfd_open 434
-__SYSCALL(__NR_pidfd_open, sys_pidfd_open)
-#define __NR_clone3 435
-__SYSCALL(__NR_clone3, sys_clone3)
-#define __NR_close_range 436
-__SYSCALL(__NR_close_range, sys_close_range)
-#define __NR_openat2 437
-__SYSCALL(__NR_openat2, sys_openat2)
-#define __NR_pidfd_getfd 438
-__SYSCALL(__NR_pidfd_getfd, sys_pidfd_getfd)
-#define __NR_faccessat2 439
-__SYSCALL(__NR_faccessat2, sys_faccessat2)
-#define __NR_process_madvise 440
-__SYSCALL(__NR_process_madvise, sys_process_madvise)
-#define __NR_epoll_pwait2 441
-__SYSCALL(__NR_epoll_pwait2, compat_sys_epoll_pwait2)
-#define __NR_mount_setattr 442
-__SYSCALL(__NR_mount_setattr, sys_mount_setattr)
-#define __NR_quotactl_fd 443
-__SYSCALL(__NR_quotactl_fd, sys_quotactl_fd)
-#define __NR_landlock_create_ruleset 444
-__SYSCALL(__NR_landlock_create_ruleset, sys_landlock_create_ruleset)
-#define __NR_landlock_add_rule 445
-__SYSCALL(__NR_landlock_add_rule, sys_landlock_add_rule)
-#define __NR_landlock_restrict_self 446
-__SYSCALL(__NR_landlock_restrict_self, sys_landlock_restrict_self)
-#define __NR_process_mrelease 448
-__SYSCALL(__NR_process_mrelease, sys_process_mrelease)
-#define __NR_futex_waitv 449
-__SYSCALL(__NR_futex_waitv, sys_futex_waitv)
-#define __NR_set_mempolicy_home_node 450
-__SYSCALL(__NR_set_mempolicy_home_node, sys_set_mempolicy_home_node)
-#define __NR_cachestat 451
-__SYSCALL(__NR_cachestat, sys_cachestat)
-#define __NR_fchmodat2 452
-__SYSCALL(__NR_fchmodat2, sys_fchmodat2)
-#define __NR_map_shadow_stack 453
-__SYSCALL(__NR_map_shadow_stack, sys_map_shadow_stack)
-#define __NR_futex_wake 454
-__SYSCALL(__NR_futex_wake, sys_futex_wake)
-#define __NR_futex_wait 455
-__SYSCALL(__NR_futex_wait, sys_futex_wait)
-#define __NR_futex_requeue 456
-__SYSCALL(__NR_futex_requeue, sys_futex_requeue)
-#define __NR_statmount 457
-__SYSCALL(__NR_statmount, sys_statmount)
-#define __NR_listmount 458
-__SYSCALL(__NR_listmount, sys_listmount)
-#define __NR_lsm_get_self_attr 459
-__SYSCALL(__NR_lsm_get_self_attr, sys_lsm_get_self_attr)
-#define __NR_lsm_set_self_attr 460
-__SYSCALL(__NR_lsm_set_self_attr, sys_lsm_set_self_attr)
-#define __NR_lsm_list_modules 461
-__SYSCALL(__NR_lsm_list_modules, sys_lsm_list_modules)
-#define __NR_mseal 462
-__SYSCALL(__NR_mseal, sys_mseal)
+#define __NR_sync_file_range2 __NR_arm_sync_file_range
-/*
- * Please add new compat syscalls above this comment and update
- * __NR_compat_syscalls in asm/unistd.h.
- */
+#endif /* _UAPI__ASM_ARM_UNISTD_H */
diff --git a/arch/arm64/include/asm/vdso/compat_gettimeofday.h b/arch/arm64/include/asm/vdso/compat_gettimeofday.h
index ecb6fd4c3c64..778c1202bbbf 100644
--- a/arch/arm64/include/asm/vdso/compat_gettimeofday.h
+++ b/arch/arm64/include/asm/vdso/compat_gettimeofday.h
@@ -8,7 +8,7 @@
#ifndef __ASSEMBLY__
#include <asm/barrier.h>
-#include <asm/unistd.h>
+#include <asm/unistd_compat_32.h>
#include <asm/errno.h>
#include <asm/vdso/compat_barrier.h>
@@ -24,7 +24,7 @@ int gettimeofday_fallback(struct __kernel_old_timeval *_tv,
register struct timezone *tz asm("r1") = _tz;
register struct __kernel_old_timeval *tv asm("r0") = _tv;
register long ret asm ("r0");
- register long nr asm("r7") = __NR_compat_gettimeofday;
+ register long nr asm("r7") = __NR_compat32_gettimeofday;
asm volatile(
" swi #0\n"
@@ -41,7 +41,7 @@ long clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
register struct __kernel_timespec *ts asm("r1") = _ts;
register clockid_t clkid asm("r0") = _clkid;
register long ret asm ("r0");
- register long nr asm("r7") = __NR_compat_clock_gettime64;
+ register long nr asm("r7") = __NR_compat32_clock_gettime64;
asm volatile(
" swi #0\n"
@@ -58,7 +58,7 @@ long clock_gettime32_fallback(clockid_t _clkid, struct old_timespec32 *_ts)
register struct old_timespec32 *ts asm("r1") = _ts;
register clockid_t clkid asm("r0") = _clkid;
register long ret asm ("r0");
- register long nr asm("r7") = __NR_compat_clock_gettime;
+ register long nr asm("r7") = __NR_compat32_clock_gettime;
asm volatile(
" swi #0\n"
@@ -75,7 +75,7 @@ int clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
register struct __kernel_timespec *ts asm("r1") = _ts;
register clockid_t clkid asm("r0") = _clkid;
register long ret asm ("r0");
- register long nr asm("r7") = __NR_compat_clock_getres_time64;
+ register long nr asm("r7") = __NR_compat32_clock_getres_time64;
asm volatile(
" swi #0\n"
@@ -92,7 +92,7 @@ int clock_getres32_fallback(clockid_t _clkid, struct old_timespec32 *_ts)
register struct old_timespec32 *ts asm("r1") = _ts;
register clockid_t clkid asm("r0") = _clkid;
register long ret asm ("r0");
- register long nr asm("r7") = __NR_compat_clock_getres;
+ register long nr asm("r7") = __NR_compat32_clock_getres;
asm volatile(
" swi #0\n"
diff --git a/arch/arm64/include/asm/word-at-a-time.h b/arch/arm64/include/asm/word-at-a-time.h
index 14251abee23c..824ca6987a51 100644
--- a/arch/arm64/include/asm/word-at-a-time.h
+++ b/arch/arm64/include/asm/word-at-a-time.h
@@ -27,20 +27,15 @@ static inline unsigned long has_zero(unsigned long a, unsigned long *bits,
}
#define prep_zero_mask(a, bits, c) (bits)
+#define create_zero_mask(bits) (bits)
+#define find_zero(bits) (__ffs(bits) >> 3)
-static inline unsigned long create_zero_mask(unsigned long bits)
+static inline unsigned long zero_bytemask(unsigned long bits)
{
bits = (bits - 1) & ~bits;
return bits >> 7;
}
-static inline unsigned long find_zero(unsigned long mask)
-{
- return fls64(mask) >> 3;
-}
-
-#define zero_bytemask(mask) (mask)
-
#else /* __AARCH64EB__ */
#include <asm-generic/word-at-a-time.h>
#endif
diff --git a/arch/arm64/include/uapi/asm/Kbuild b/arch/arm64/include/uapi/asm/Kbuild
index 602d137932dc..c6d141d7b7d7 100644
--- a/arch/arm64/include/uapi/asm/Kbuild
+++ b/arch/arm64/include/uapi/asm/Kbuild
@@ -1,3 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
+syscall-y += unistd_64.h
generic-y += kvm_para.h
diff --git a/arch/arm64/include/uapi/asm/unistd.h b/arch/arm64/include/uapi/asm/unistd.h
index ce2ee8f1e361..df36f23876e8 100644
--- a/arch/arm64/include/uapi/asm/unistd.h
+++ b/arch/arm64/include/uapi/asm/unistd.h
@@ -1,25 +1,2 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/*
- * Copyright (C) 2012 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#define __ARCH_WANT_RENAMEAT
-#define __ARCH_WANT_NEW_STAT
-#define __ARCH_WANT_SET_GET_RLIMIT
-#define __ARCH_WANT_TIME32_SYSCALLS
-#define __ARCH_WANT_SYS_CLONE3
-#define __ARCH_WANT_MEMFD_SECRET
-
-#include <asm-generic/unistd.h>
+#include <asm/unistd_64.h>
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index 763824963ed1..2b112f3b7510 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -46,7 +46,6 @@ obj-$(CONFIG_PERF_EVENTS) += perf_regs.o perf_callchain.o
obj-$(CONFIG_HARDLOCKUP_DETECTOR_PERF) += watchdog_hld.o
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
obj-$(CONFIG_CPU_PM) += sleep.o suspend.o
-obj-$(CONFIG_CPU_IDLE) += cpuidle.o
obj-$(CONFIG_JUMP_LABEL) += jump_label.o
obj-$(CONFIG_KGDB) += kgdb.o
obj-$(CONFIG_EFI) += efi.o efi-rt-wrapper.o
diff --git a/arch/arm64/kernel/Makefile.syscalls b/arch/arm64/kernel/Makefile.syscalls
new file mode 100644
index 000000000000..3cfafd003b2d
--- /dev/null
+++ b/arch/arm64/kernel/Makefile.syscalls
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+
+syscall_abis_32 +=
+syscall_abis_64 += renameat newstat rlimit memfd_secret
+
+syscalltbl = arch/arm64/tools/syscall_%.tbl
diff --git a/arch/arm64/kernel/acpi.c b/arch/arm64/kernel/acpi.c
index e0e7b93c16cc..e6f66491fbe9 100644
--- a/arch/arm64/kernel/acpi.c
+++ b/arch/arm64/kernel/acpi.c
@@ -30,6 +30,7 @@
#include <linux/pgtable.h>
#include <acpi/ghes.h>
+#include <acpi/processor.h>
#include <asm/cputype.h>
#include <asm/cpu_ops.h>
#include <asm/daifflags.h>
@@ -45,6 +46,7 @@ EXPORT_SYMBOL(acpi_pci_disabled);
static bool param_acpi_off __initdata;
static bool param_acpi_on __initdata;
static bool param_acpi_force __initdata;
+static bool param_acpi_nospcr __initdata;
static int __init parse_acpi(char *arg)
{
@@ -58,6 +60,8 @@ static int __init parse_acpi(char *arg)
param_acpi_on = true;
else if (strcmp(arg, "force") == 0) /* force ACPI to be enabled */
param_acpi_force = true;
+ else if (strcmp(arg, "nospcr") == 0) /* disable SPCR as default console */
+ param_acpi_nospcr = true;
else
return -EINVAL; /* Core will print when we return error */
@@ -237,7 +241,20 @@ done:
acpi_put_table(facs);
}
#endif
- acpi_parse_spcr(earlycon_acpi_spcr_enable, true);
+
+ /*
+ * For varying privacy and security reasons, sometimes need
+ * to completely silence the serial console output, and only
+ * enable it when needed.
+ * But there are many existing systems that depend on this
+ * behaviour, use acpi=nospcr to disable console in ACPI SPCR
+ * table as default serial console.
+ */
+ acpi_parse_spcr(earlycon_acpi_spcr_enable,
+ !param_acpi_nospcr);
+ pr_info("Use ACPI SPCR as default console: %s\n",
+ param_acpi_nospcr ? "No" : "Yes");
+
if (IS_ENABLED(CONFIG_ACPI_BGRT))
acpi_table_parse(ACPI_SIG_BGRT, acpi_parse_bgrt);
}
@@ -423,107 +440,23 @@ void arch_reserve_mem_area(acpi_physical_address addr, size_t size)
memblock_mark_nomap(addr, size);
}
-#ifdef CONFIG_ACPI_FFH
-/*
- * Implements ARM64 specific callbacks to support ACPI FFH Operation Region as
- * specified in https://developer.arm.com/docs/den0048/latest
- */
-struct acpi_ffh_data {
- struct acpi_ffh_info info;
- void (*invoke_ffh_fn)(unsigned long a0, unsigned long a1,
- unsigned long a2, unsigned long a3,
- unsigned long a4, unsigned long a5,
- unsigned long a6, unsigned long a7,
- struct arm_smccc_res *args,
- struct arm_smccc_quirk *res);
- void (*invoke_ffh64_fn)(const struct arm_smccc_1_2_regs *args,
- struct arm_smccc_1_2_regs *res);
-};
-
-int acpi_ffh_address_space_arch_setup(void *handler_ctxt, void **region_ctxt)
+#ifdef CONFIG_ACPI_HOTPLUG_CPU
+int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, u32 apci_id,
+ int *pcpu)
{
- enum arm_smccc_conduit conduit;
- struct acpi_ffh_data *ffh_ctxt;
-
- if (arm_smccc_get_version() < ARM_SMCCC_VERSION_1_2)
- return -EOPNOTSUPP;
-
- conduit = arm_smccc_1_1_get_conduit();
- if (conduit == SMCCC_CONDUIT_NONE) {
- pr_err("%s: invalid SMCCC conduit\n", __func__);
- return -EOPNOTSUPP;
+ /* If an error code is passed in this stub can't fix it */
+ if (*pcpu < 0) {
+ pr_warn_once("Unable to map CPU to valid ID\n");
+ return *pcpu;
}
- ffh_ctxt = kzalloc(sizeof(*ffh_ctxt), GFP_KERNEL);
- if (!ffh_ctxt)
- return -ENOMEM;
-
- if (conduit == SMCCC_CONDUIT_SMC) {
- ffh_ctxt->invoke_ffh_fn = __arm_smccc_smc;
- ffh_ctxt->invoke_ffh64_fn = arm_smccc_1_2_smc;
- } else {
- ffh_ctxt->invoke_ffh_fn = __arm_smccc_hvc;
- ffh_ctxt->invoke_ffh64_fn = arm_smccc_1_2_hvc;
- }
-
- memcpy(ffh_ctxt, handler_ctxt, sizeof(ffh_ctxt->info));
-
- *region_ctxt = ffh_ctxt;
- return AE_OK;
-}
-
-static bool acpi_ffh_smccc_owner_allowed(u32 fid)
-{
- int owner = ARM_SMCCC_OWNER_NUM(fid);
-
- if (owner == ARM_SMCCC_OWNER_STANDARD ||
- owner == ARM_SMCCC_OWNER_SIP || owner == ARM_SMCCC_OWNER_OEM)
- return true;
-
- return false;
+ return 0;
}
+EXPORT_SYMBOL(acpi_map_cpu);
-int acpi_ffh_address_space_arch_handler(acpi_integer *value, void *region_context)
+int acpi_unmap_cpu(int cpu)
{
- int ret = 0;
- struct acpi_ffh_data *ffh_ctxt = region_context;
-
- if (ffh_ctxt->info.offset == 0) {
- /* SMC/HVC 32bit call */
- struct arm_smccc_res res;
- u32 a[8] = { 0 }, *ptr = (u32 *)value;
-
- if (!ARM_SMCCC_IS_FAST_CALL(*ptr) || ARM_SMCCC_IS_64(*ptr) ||
- !acpi_ffh_smccc_owner_allowed(*ptr) ||
- ffh_ctxt->info.length > 32) {
- ret = AE_ERROR;
- } else {
- int idx, len = ffh_ctxt->info.length >> 2;
-
- for (idx = 0; idx < len; idx++)
- a[idx] = *(ptr + idx);
-
- ffh_ctxt->invoke_ffh_fn(a[0], a[1], a[2], a[3], a[4],
- a[5], a[6], a[7], &res, NULL);
- memcpy(value, &res, sizeof(res));
- }
-
- } else if (ffh_ctxt->info.offset == 1) {
- /* SMC/HVC 64bit call */
- struct arm_smccc_1_2_regs *r = (struct arm_smccc_1_2_regs *)value;
-
- if (!ARM_SMCCC_IS_FAST_CALL(r->a0) || !ARM_SMCCC_IS_64(r->a0) ||
- !acpi_ffh_smccc_owner_allowed(r->a0) ||
- ffh_ctxt->info.length > sizeof(*r)) {
- ret = AE_ERROR;
- } else {
- ffh_ctxt->invoke_ffh64_fn(r, r);
- memcpy(value, r, ffh_ctxt->info.length);
- }
- } else {
- ret = AE_ERROR;
- }
-
- return ret;
+ return 0;
}
-#endif /* CONFIG_ACPI_FFH */
+EXPORT_SYMBOL(acpi_unmap_cpu);
+#endif /* CONFIG_ACPI_HOTPLUG_CPU */
diff --git a/arch/arm64/kernel/acpi_numa.c b/arch/arm64/kernel/acpi_numa.c
index e51535a5f939..0c036a9a3c33 100644
--- a/arch/arm64/kernel/acpi_numa.c
+++ b/arch/arm64/kernel/acpi_numa.c
@@ -34,17 +34,6 @@ int __init acpi_numa_get_nid(unsigned int cpu)
return acpi_early_node_map[cpu];
}
-static inline int get_cpu_for_acpi_id(u32 uid)
-{
- int cpu;
-
- for (cpu = 0; cpu < nr_cpu_ids; cpu++)
- if (uid == get_acpi_id_for_cpu(cpu))
- return cpu;
-
- return -EINVAL;
-}
-
static int __init acpi_parse_gicc_pxm(union acpi_subtable_headers *header,
const unsigned long end)
{
diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c
index dd6ce86d4332..b776e7424fe9 100644
--- a/arch/arm64/kernel/armv8_deprecated.c
+++ b/arch/arm64/kernel/armv8_deprecated.c
@@ -462,6 +462,9 @@ static int run_all_insn_set_hw_mode(unsigned int cpu)
for (int i = 0; i < ARRAY_SIZE(insn_emulations); i++) {
struct insn_emulation *insn = insn_emulations[i];
bool enable = READ_ONCE(insn->current_mode) == INSN_HW;
+ if (insn->status == INSN_UNAVAILABLE)
+ continue;
+
if (insn->set_hw_mode && insn->set_hw_mode(enable)) {
pr_warn("CPU[%u] cannot support the emulation of %s",
cpu, insn->name);
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index 828be635e7e1..617424b73f8c 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -432,14 +432,17 @@ static const struct midr_range erratum_spec_unpriv_load_list[] = {
};
#endif
-#ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_SSBS
-static const struct midr_range erratum_spec_ssbs_list[] = {
#ifdef CONFIG_ARM64_ERRATUM_3194386
+static const struct midr_range erratum_spec_ssbs_list[] = {
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A720),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_X2),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_X3),
MIDR_ALL_VERSIONS(MIDR_CORTEX_X4),
-#endif
-#ifdef CONFIG_ARM64_ERRATUM_3312417
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_X925),
+ MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V3),
-#endif
+ MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V2),
{}
};
#endif
@@ -741,9 +744,9 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
MIDR_FIXED(MIDR_CPU_VAR_REV(1,1), BIT(25)),
},
#endif
-#ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_SSBS
+#ifdef CONFIG_ARM64_ERRATUM_3194386
{
- .desc = "ARM errata 3194386, 3312417",
+ .desc = "SSBS not fully self-synchronizing",
.capability = ARM64_WORKAROUND_SPECULATIVE_SSBS,
ERRATA_MIDR_RANGE_LIST(erratum_spec_ssbs_list),
},
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 48e7029f1054..646ecd3069fd 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -285,8 +285,8 @@ static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_FP_SHIFT, 4, ID_AA64PFR0_EL1_FP_NI),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_EL3_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_EL2_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_EL1_SHIFT, 4, ID_AA64PFR0_EL1_ELx_64BIT_ONLY),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_EL0_SHIFT, 4, ID_AA64PFR0_EL1_ELx_64BIT_ONLY),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_EL1_SHIFT, 4, ID_AA64PFR0_EL1_EL1_IMP),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_EL0_SHIFT, 4, ID_AA64PFR0_EL1_EL0_IMP),
ARM64_FTR_END,
};
@@ -429,6 +429,7 @@ static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = {
};
static const struct arm64_ftr_bits ftr_id_aa64mmfr1[] = {
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_ECBHB_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_TIDCP1_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_AFP_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_HCX_SHIFT, 4, 0),
diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
index 4a92096db34e..712718aed5dd 100644
--- a/arch/arm64/kernel/efi.c
+++ b/arch/arm64/kernel/efi.c
@@ -9,6 +9,7 @@
#include <linux/efi.h>
#include <linux/init.h>
+#include <linux/kmemleak.h>
#include <linux/screen_info.h>
#include <linux/vmalloc.h>
@@ -213,6 +214,7 @@ l: if (!p) {
return -ENOMEM;
}
+ kmemleak_not_leak(p);
efi_rt_stack_top = p + THREAD_SIZE;
return 0;
}
diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h
index ba4f8f7d6a91..8f5422ed1b75 100644
--- a/arch/arm64/kernel/image-vars.h
+++ b/arch/arm64/kernel/image-vars.h
@@ -105,11 +105,6 @@ KVM_NVHE_ALIAS(__hyp_stub_vectors);
KVM_NVHE_ALIAS(vgic_v2_cpuif_trap);
KVM_NVHE_ALIAS(vgic_v3_cpuif_trap);
-#ifdef CONFIG_ARM64_PSEUDO_NMI
-/* Static key checked in GIC_PRIO_IRQOFF. */
-KVM_NVHE_ALIAS(gic_nonsecure_priorities);
-#endif
-
/* EL2 exception handling */
KVM_NVHE_ALIAS(__start___kvm_ex_table);
KVM_NVHE_ALIAS(__stop___kvm_ex_table);
diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c
index dcdcccd40891..6174671be7c1 100644
--- a/arch/arm64/kernel/mte.c
+++ b/arch/arm64/kernel/mte.c
@@ -582,12 +582,9 @@ subsys_initcall(register_mte_tcf_preferred_sysctl);
size_t mte_probe_user_range(const char __user *uaddr, size_t size)
{
const char __user *end = uaddr + size;
- int err = 0;
char val;
- __raw_get_user(val, uaddr, err);
- if (err)
- return size;
+ __raw_get_user(val, uaddr, efault);
uaddr = PTR_ALIGN(uaddr, MTE_GRANULE_SIZE);
while (uaddr < end) {
@@ -595,12 +592,13 @@ size_t mte_probe_user_range(const char __user *uaddr, size_t size)
* A read is sufficient for mte, the caller should have probed
* for the pte write permission if required.
*/
- __raw_get_user(val, uaddr, err);
- if (err)
- return end - uaddr;
+ __raw_get_user(val, uaddr, efault);
uaddr += MTE_GRANULE_SIZE;
}
(void)val;
return 0;
+
+efault:
+ return end - uaddr;
}
diff --git a/arch/arm64/kernel/pi/map_kernel.c b/arch/arm64/kernel/pi/map_kernel.c
index 5fa08e13e17e..f374a3e5a5fe 100644
--- a/arch/arm64/kernel/pi/map_kernel.c
+++ b/arch/arm64/kernel/pi/map_kernel.c
@@ -173,7 +173,7 @@ static void __init remap_idmap_for_lpa2(void)
* Don't bother with the FDT, we no longer need it after this.
*/
memset(init_idmap_pg_dir, 0,
- (u64)init_idmap_pg_dir - (u64)init_idmap_pg_end);
+ (u64)init_idmap_pg_end - (u64)init_idmap_pg_dir);
create_init_idmap(init_idmap_pg_dir, mask);
dsb(ishst);
diff --git a/arch/arm64/kernel/proton-pack.c b/arch/arm64/kernel/proton-pack.c
index baca47bd443c..da53722f95d4 100644
--- a/arch/arm64/kernel/proton-pack.c
+++ b/arch/arm64/kernel/proton-pack.c
@@ -567,7 +567,7 @@ static enum mitigation_state spectre_v4_enable_hw_mitigation(void)
* Mitigate this with an unconditional speculation barrier, as CPUs
* could mis-speculate branches and bypass a conditional barrier.
*/
- if (IS_ENABLED(CONFIG_ARM64_WORKAROUND_SPECULATIVE_SSBS))
+ if (IS_ENABLED(CONFIG_ARM64_ERRATUM_3194386))
spec_bar();
return SPECTRE_MITIGATED;
diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c
index 29a8e444db83..fabd732d0a2d 100644
--- a/arch/arm64/kernel/psci.c
+++ b/arch/arm64/kernel/psci.c
@@ -40,7 +40,7 @@ static int cpu_psci_cpu_boot(unsigned int cpu)
{
phys_addr_t pa_secondary_entry = __pa_symbol(secondary_entry);
int err = psci_ops.cpu_on(cpu_logical_map(cpu), pa_secondary_entry);
- if (err)
+ if (err && err != -EPERM)
pr_err("failed to boot CPU%d (%d)\n", cpu, err);
return err;
diff --git a/arch/arm64/kernel/reloc_test_core.c b/arch/arm64/kernel/reloc_test_core.c
index 99f2ffe9fc05..5b0891146054 100644
--- a/arch/arm64/kernel/reloc_test_core.c
+++ b/arch/arm64/kernel/reloc_test_core.c
@@ -74,4 +74,5 @@ static void __exit reloc_test_exit(void)
module_init(reloc_test_init);
module_exit(reloc_test_exit);
+MODULE_DESCRIPTION("Relocation testing module");
MODULE_LICENSE("GPL v2");
diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c
index bbd542704730..81e798b6dada 100644
--- a/arch/arm64/kernel/signal32.c
+++ b/arch/arm64/kernel/signal32.c
@@ -17,7 +17,7 @@
#include <asm/signal32.h>
#include <asm/traps.h>
#include <linux/uaccess.h>
-#include <asm/unistd.h>
+#include <asm/unistd_compat_32.h>
#include <asm/vdso.h>
struct compat_vfp_sigframe {
@@ -451,7 +451,7 @@ int compat_setup_frame(int usig, struct ksignal *ksig, sigset_t *set,
void compat_setup_restart_syscall(struct pt_regs *regs)
{
- regs->regs[7] = __NR_compat_restart_syscall;
+ regs->regs[7] = __NR_compat32_restart_syscall;
}
/*
diff --git a/arch/arm64/kernel/sigreturn32.S b/arch/arm64/kernel/sigreturn32.S
index ccbd4aab4ba4..6f486b95b413 100644
--- a/arch/arm64/kernel/sigreturn32.S
+++ b/arch/arm64/kernel/sigreturn32.S
@@ -13,7 +13,7 @@
* need two 16-bit instructions.
*/
-#include <asm/unistd.h>
+#include <asm/unistd_compat_32.h>
.section .rodata
.globl __aarch32_sigret_code_start
@@ -22,26 +22,26 @@ __aarch32_sigret_code_start:
/*
* ARM Code
*/
- .byte __NR_compat_sigreturn, 0x70, 0xa0, 0xe3 // mov r7, #__NR_compat_sigreturn
- .byte __NR_compat_sigreturn, 0x00, 0x00, 0xef // svc #__NR_compat_sigreturn
+ .byte __NR_compat32_sigreturn, 0x70, 0xa0, 0xe3 // mov r7, #__NR_compat32_sigreturn
+ .byte __NR_compat32_sigreturn, 0x00, 0x00, 0xef // svc #__NR_compat32_sigreturn
/*
* Thumb code
*/
- .byte __NR_compat_sigreturn, 0x27 // svc #__NR_compat_sigreturn
- .byte __NR_compat_sigreturn, 0xdf // mov r7, #__NR_compat_sigreturn
+ .byte __NR_compat32_sigreturn, 0x27 // svc #__NR_compat32_sigreturn
+ .byte __NR_compat32_sigreturn, 0xdf // mov r7, #__NR_compat32_sigreturn
/*
* ARM code
*/
- .byte __NR_compat_rt_sigreturn, 0x70, 0xa0, 0xe3 // mov r7, #__NR_compat_rt_sigreturn
- .byte __NR_compat_rt_sigreturn, 0x00, 0x00, 0xef // svc #__NR_compat_rt_sigreturn
+ .byte __NR_compat32_rt_sigreturn, 0x70, 0xa0, 0xe3 // mov r7, #__NR_compat32_rt_sigreturn
+ .byte __NR_compat32_rt_sigreturn, 0x00, 0x00, 0xef // svc #__NR_compat32_rt_sigreturn
/*
* Thumb code
*/
- .byte __NR_compat_rt_sigreturn, 0x27 // svc #__NR_compat_rt_sigreturn
- .byte __NR_compat_rt_sigreturn, 0xdf // mov r7, #__NR_compat_rt_sigreturn
+ .byte __NR_compat32_rt_sigreturn, 0x27 // svc #__NR_compat32_rt_sigreturn
+ .byte __NR_compat32_rt_sigreturn, 0xdf // mov r7, #__NR_compat32_rt_sigreturn
.globl __aarch32_sigret_code_end
__aarch32_sigret_code_end:
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 31c8b3094dd7..5e18fbcee9a2 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -55,9 +55,6 @@
#include <trace/events/ipi.h>
-DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
-EXPORT_PER_CPU_SYMBOL(cpu_number);
-
/*
* as from 2.5, kernels no longer have an init_tasks structure
* so we need some other way of telling a new secondary core
@@ -132,7 +129,8 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
/* Now bring the CPU into our world */
ret = boot_secondary(cpu, idle);
if (ret) {
- pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
+ if (ret != -EPERM)
+ pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
return ret;
}
@@ -510,6 +508,59 @@ static int __init smp_cpu_setup(int cpu)
static bool bootcpu_valid __initdata;
static unsigned int cpu_count = 1;
+int arch_register_cpu(int cpu)
+{
+ acpi_handle acpi_handle = acpi_get_processor_handle(cpu);
+ struct cpu *c = &per_cpu(cpu_devices, cpu);
+
+ if (!acpi_disabled && !acpi_handle &&
+ IS_ENABLED(CONFIG_ACPI_HOTPLUG_CPU))
+ return -EPROBE_DEFER;
+
+#ifdef CONFIG_ACPI_HOTPLUG_CPU
+ /* For now block anything that looks like physical CPU Hotplug */
+ if (invalid_logical_cpuid(cpu) || !cpu_present(cpu)) {
+ pr_err_once("Changing CPU present bit is not supported\n");
+ return -ENODEV;
+ }
+#endif
+
+ /*
+ * Availability of the acpi handle is sufficient to establish
+ * that _STA has aleady been checked. No need to recheck here.
+ */
+ c->hotpluggable = arch_cpu_is_hotpluggable(cpu);
+
+ return register_cpu(c, cpu);
+}
+
+#ifdef CONFIG_ACPI_HOTPLUG_CPU
+void arch_unregister_cpu(int cpu)
+{
+ acpi_handle acpi_handle = acpi_get_processor_handle(cpu);
+ struct cpu *c = &per_cpu(cpu_devices, cpu);
+ acpi_status status;
+ unsigned long long sta;
+
+ if (!acpi_handle) {
+ pr_err_once("Removing a CPU without associated ACPI handle\n");
+ return;
+ }
+
+ status = acpi_evaluate_integer(acpi_handle, "_STA", NULL, &sta);
+ if (ACPI_FAILURE(status))
+ return;
+
+ /* For now do not allow anything that looks like physical CPU HP */
+ if (cpu_present(cpu) && !(sta & ACPI_STA_DEVICE_PRESENT)) {
+ pr_err_once("Changing CPU present bit is not supported\n");
+ return;
+ }
+
+ unregister_cpu(c);
+}
+#endif /* CONFIG_ACPI_HOTPLUG_CPU */
+
#ifdef CONFIG_ACPI
static struct acpi_madt_generic_interrupt cpu_madt_gicc[NR_CPUS];
@@ -530,7 +581,8 @@ acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor)
{
u64 hwid = processor->arm_mpidr;
- if (!acpi_gicc_is_usable(processor)) {
+ if (!(processor->flags &
+ (ACPI_MADT_ENABLED | ACPI_MADT_GICC_ONLINE_CAPABLE))) {
pr_debug("skipping disabled CPU entry with 0x%llx MPIDR\n", hwid);
return;
}
@@ -749,8 +801,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
*/
for_each_possible_cpu(cpu) {
- per_cpu(cpu_number, cpu) = cpu;
-
if (cpu == smp_processor_id())
continue;
@@ -767,13 +817,15 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
}
}
-static const char *ipi_types[NR_IPI] __tracepoint_string = {
+static const char *ipi_types[MAX_IPI] __tracepoint_string = {
[IPI_RESCHEDULE] = "Rescheduling interrupts",
[IPI_CALL_FUNC] = "Function call interrupts",
[IPI_CPU_STOP] = "CPU stop interrupts",
[IPI_CPU_CRASH_STOP] = "CPU stop (for crash dump) interrupts",
[IPI_TIMER] = "Timer broadcast interrupts",
[IPI_IRQ_WORK] = "IRQ work interrupts",
+ [IPI_CPU_BACKTRACE] = "CPU backtrace interrupts",
+ [IPI_KGDB_ROUNDUP] = "KGDB roundup interrupts",
};
static void smp_cross_call(const struct cpumask *target, unsigned int ipinr);
@@ -784,7 +836,7 @@ int arch_show_interrupts(struct seq_file *p, int prec)
{
unsigned int cpu, i;
- for (i = 0; i < NR_IPI; i++) {
+ for (i = 0; i < MAX_IPI; i++) {
seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i,
prec >= 4 ? " " : "");
for_each_online_cpu(cpu)
@@ -1028,12 +1080,12 @@ void __init set_smp_ipi_range(int ipi_base, int n)
if (ipi_should_be_nmi(i)) {
err = request_percpu_nmi(ipi_base + i, ipi_handler,
- "IPI", &cpu_number);
+ "IPI", &irq_stat);
WARN(err, "Could not request IPI %d as NMI, err=%d\n",
i, err);
} else {
err = request_percpu_irq(ipi_base + i, ipi_handler,
- "IPI", &cpu_number);
+ "IPI", &irq_stat);
WARN(err, "Could not request IPI %d as IRQ, err=%d\n",
i, err);
}
diff --git a/arch/arm64/kernel/sys.c b/arch/arm64/kernel/sys.c
index d5ffaaab31a7..f08408b6e826 100644
--- a/arch/arm64/kernel/sys.c
+++ b/arch/arm64/kernel/sys.c
@@ -48,14 +48,16 @@ asmlinkage long __arm64_sys_ni_syscall(const struct pt_regs *__unused)
*/
#define __arm64_sys_personality __arm64_sys_arm64_personality
+#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, native)
+
#undef __SYSCALL
#define __SYSCALL(nr, sym) asmlinkage long __arm64_##sym(const struct pt_regs *);
-#include <asm/unistd.h>
+#include <asm/syscall_table_64.h>
#undef __SYSCALL
#define __SYSCALL(nr, sym) [nr] = __arm64_##sym,
const syscall_fn_t sys_call_table[__NR_syscalls] = {
[0 ... __NR_syscalls - 1] = __arm64_sys_ni_syscall,
-#include <asm/unistd.h>
+#include <asm/syscall_table_64.h>
};
diff --git a/arch/arm64/kernel/sys32.c b/arch/arm64/kernel/sys32.c
index fc40386afb1b..96bcfb907443 100644
--- a/arch/arm64/kernel/sys32.c
+++ b/arch/arm64/kernel/sys32.c
@@ -5,17 +5,12 @@
* Copyright (C) 2015 ARM Ltd.
*/
-/*
- * Needed to avoid conflicting __NR_* macros between uapi/asm/unistd.h and
- * asm/unistd32.h.
- */
-#define __COMPAT_SYSCALL_NR
-
#include <linux/compat.h>
#include <linux/compiler.h>
#include <linux/syscalls.h>
#include <asm/syscall.h>
+#include <asm/unistd_compat_32.h>
asmlinkage long compat_sys_sigreturn(void);
asmlinkage long compat_sys_rt_sigreturn(void);
@@ -122,14 +117,16 @@ COMPAT_SYSCALL_DEFINE6(aarch32_fallocate, int, fd, int, mode,
return ksys_fallocate(fd, mode, arg_u64(offset), arg_u64(len));
}
+#define __SYSCALL_WITH_COMPAT(nr, sym, compat) __SYSCALL(nr, compat)
+
#undef __SYSCALL
#define __SYSCALL(nr, sym) asmlinkage long __arm64_##sym(const struct pt_regs *);
-#include <asm/unistd32.h>
+#include <asm/syscall_table_32.h>
#undef __SYSCALL
#define __SYSCALL(nr, sym) [nr] = __arm64_##sym,
-const syscall_fn_t compat_sys_call_table[__NR_compat_syscalls] = {
- [0 ... __NR_compat_syscalls - 1] = __arm64_sys_ni_syscall,
-#include <asm/unistd32.h>
+const syscall_fn_t compat_sys_call_table[__NR_compat32_syscalls] = {
+ [0 ... __NR_compat32_syscalls - 1] = __arm64_sys_ni_syscall,
+#include <asm/syscall_table_32.h>
};
diff --git a/arch/arm64/kernel/syscall.c b/arch/arm64/kernel/syscall.c
index ad198262b981..c442fcec6b9e 100644
--- a/arch/arm64/kernel/syscall.c
+++ b/arch/arm64/kernel/syscall.c
@@ -14,6 +14,7 @@
#include <asm/syscall.h>
#include <asm/thread_info.h>
#include <asm/unistd.h>
+#include <asm/unistd_compat_32.h>
long compat_arm_syscall(struct pt_regs *regs, int scno);
long sys_ni_syscall(void);
@@ -53,17 +54,15 @@ static void invoke_syscall(struct pt_regs *regs, unsigned int scno,
syscall_set_return_value(current, regs, 0, ret);
/*
- * Ultimately, this value will get limited by KSTACK_OFFSET_MAX(),
- * but not enough for arm64 stack utilization comfort. To keep
- * reasonable stack head room, reduce the maximum offset to 9 bits.
+ * This value will get limited by KSTACK_OFFSET_MAX(), which is 10
+ * bits. The actual entropy will be further reduced by the compiler
+ * when applying stack alignment constraints: the AAPCS mandates a
+ * 16-byte aligned SP at function boundaries, which will remove the
+ * 4 low bits from any entropy chosen here.
*
- * The actual entropy will be further reduced by the compiler when
- * applying stack alignment constraints: the AAPCS mandates a
- * 16-byte (i.e. 4-bit) aligned SP at function boundaries.
- *
- * The resulting 5 bits of entropy is seen in SP[8:4].
+ * The resulting 6 bits of entropy is seen in SP[9:4].
*/
- choose_random_kstack_offset(get_random_u16() & 0x1FF);
+ choose_random_kstack_offset(get_random_u16());
}
static inline bool has_syscall_work(unsigned long flags)
@@ -155,7 +154,7 @@ void do_el0_svc(struct pt_regs *regs)
#ifdef CONFIG_COMPAT
void do_el0_svc_compat(struct pt_regs *regs)
{
- el0_svc_common(regs, regs->regs[7], __NR_compat_syscalls,
+ el0_svc_common(regs, regs->regs[7], __NR_compat32_syscalls,
compat_sys_call_table);
}
#endif
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index 755a22d4f840..55a8e310ea12 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -264,6 +264,9 @@ SECTIONS
EXIT_DATA
}
+ RUNTIME_CONST(shift, d_hash_shift)
+ RUNTIME_CONST(ptr, dentry_hashtable)
+
PERCPU_SECTION(L1_CACHE_BYTES)
HYPERVISOR_PERCPU_SECTION
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 9996a989b52e..59716789fe0f 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -1931,6 +1931,11 @@ static unsigned long nvhe_percpu_order(void)
return size ? get_order(size) : 0;
}
+static size_t pkvm_host_sve_state_order(void)
+{
+ return get_order(pkvm_host_sve_state_size());
+}
+
/* A lookup table holding the hypervisor VA for each vector slot */
static void *hyp_spectre_vector_selector[BP_HARDEN_EL2_SLOTS];
@@ -2310,12 +2315,20 @@ static void __init teardown_subsystems(void)
static void __init teardown_hyp_mode(void)
{
+ bool free_sve = system_supports_sve() && is_protected_kvm_enabled();
int cpu;
free_hyp_pgds();
for_each_possible_cpu(cpu) {
free_page(per_cpu(kvm_arm_hyp_stack_page, cpu));
free_pages(kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu], nvhe_percpu_order());
+
+ if (free_sve) {
+ struct cpu_sve_state *sve_state;
+
+ sve_state = per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state;
+ free_pages((unsigned long) sve_state, pkvm_host_sve_state_order());
+ }
}
}
@@ -2398,6 +2411,58 @@ static int __init kvm_hyp_init_protection(u32 hyp_va_bits)
return 0;
}
+static int init_pkvm_host_sve_state(void)
+{
+ int cpu;
+
+ if (!system_supports_sve())
+ return 0;
+
+ /* Allocate pages for host sve state in protected mode. */
+ for_each_possible_cpu(cpu) {
+ struct page *page = alloc_pages(GFP_KERNEL, pkvm_host_sve_state_order());
+
+ if (!page)
+ return -ENOMEM;
+
+ per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state = page_address(page);
+ }
+
+ /*
+ * Don't map the pages in hyp since these are only used in protected
+ * mode, which will (re)create its own mapping when initialized.
+ */
+
+ return 0;
+}
+
+/*
+ * Finalizes the initialization of hyp mode, once everything else is initialized
+ * and the initialziation process cannot fail.
+ */
+static void finalize_init_hyp_mode(void)
+{
+ int cpu;
+
+ if (system_supports_sve() && is_protected_kvm_enabled()) {
+ for_each_possible_cpu(cpu) {
+ struct cpu_sve_state *sve_state;
+
+ sve_state = per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state;
+ per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state =
+ kern_hyp_va(sve_state);
+ }
+ } else {
+ for_each_possible_cpu(cpu) {
+ struct user_fpsimd_state *fpsimd_state;
+
+ fpsimd_state = &per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->host_ctxt.fp_regs;
+ per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->fpsimd_state =
+ kern_hyp_va(fpsimd_state);
+ }
+ }
+}
+
static void pkvm_hyp_init_ptrauth(void)
{
struct kvm_cpu_context *hyp_ctxt;
@@ -2566,6 +2631,10 @@ static int __init init_hyp_mode(void)
goto out_err;
}
+ err = init_pkvm_host_sve_state();
+ if (err)
+ goto out_err;
+
err = kvm_hyp_init_protection(hyp_va_bits);
if (err) {
kvm_err("Failed to init hyp memory protection\n");
@@ -2730,6 +2799,13 @@ static __init int kvm_arm_init(void)
if (err)
goto out_subs;
+ /*
+ * This should be called after initialization is done and failure isn't
+ * possible anymore.
+ */
+ if (!in_hyp_mode)
+ finalize_init_hyp_mode();
+
kvm_arm_initialised = true;
return 0;
diff --git a/arch/arm64/kvm/emulate-nested.c b/arch/arm64/kvm/emulate-nested.c
index 72d733c74a38..54090967a335 100644
--- a/arch/arm64/kvm/emulate-nested.c
+++ b/arch/arm64/kvm/emulate-nested.c
@@ -2181,16 +2181,23 @@ void kvm_emulate_nested_eret(struct kvm_vcpu *vcpu)
if (forward_traps(vcpu, HCR_NV))
return;
+ spsr = vcpu_read_sys_reg(vcpu, SPSR_EL2);
+ spsr = kvm_check_illegal_exception_return(vcpu, spsr);
+
/* Check for an ERETAx */
esr = kvm_vcpu_get_esr(vcpu);
if (esr_iss_is_eretax(esr) && !kvm_auth_eretax(vcpu, &elr)) {
/*
- * Oh no, ERETAx failed to authenticate. If we have
- * FPACCOMBINE, deliver an exception right away. If we
- * don't, then let the mangled ELR value trickle down the
+ * Oh no, ERETAx failed to authenticate.
+ *
+ * If we have FPACCOMBINE and we don't have a pending
+ * Illegal Execution State exception (which has priority
+ * over FPAC), deliver an exception right away.
+ *
+ * Otherwise, let the mangled ELR value trickle down the
* ERET handling, and the guest will have a little surprise.
*/
- if (kvm_has_pauth(vcpu->kvm, FPACCOMBINE)) {
+ if (kvm_has_pauth(vcpu->kvm, FPACCOMBINE) && !(spsr & PSR_IL_BIT)) {
esr &= ESR_ELx_ERET_ISS_ERETA;
esr |= FIELD_PREP(ESR_ELx_EC_MASK, ESR_ELx_EC_FPAC);
kvm_inject_nested_sync(vcpu, esr);
@@ -2201,17 +2208,11 @@ void kvm_emulate_nested_eret(struct kvm_vcpu *vcpu)
preempt_disable();
kvm_arch_vcpu_put(vcpu);
- spsr = __vcpu_sys_reg(vcpu, SPSR_EL2);
- spsr = kvm_check_illegal_exception_return(vcpu, spsr);
if (!esr_iss_is_eretax(esr))
elr = __vcpu_sys_reg(vcpu, ELR_EL2);
trace_kvm_nested_eret(vcpu, elr, spsr);
- /*
- * Note that the current exception level is always the virtual EL2,
- * since we set HCR_EL2.NV bit only when entering the virtual EL2.
- */
*vcpu_pc(vcpu) = elr;
*vcpu_cpsr(vcpu) = spsr;
diff --git a/arch/arm64/kvm/fpsimd.c b/arch/arm64/kvm/fpsimd.c
index 1807d3a79a8a..521b32868d0d 100644
--- a/arch/arm64/kvm/fpsimd.c
+++ b/arch/arm64/kvm/fpsimd.c
@@ -90,6 +90,13 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu)
fpsimd_save_and_flush_cpu_state();
}
}
+
+ /*
+ * If normal guests gain SME support, maintain this behavior for pKVM
+ * guests, which don't support SME.
+ */
+ WARN_ON(is_protected_kvm_enabled() && system_supports_sme() &&
+ read_sysreg_s(SYS_SVCR));
}
/*
@@ -161,9 +168,7 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu)
if (has_vhe() && system_supports_sme()) {
/* Also restore EL0 state seen on entry */
if (vcpu_get_flag(vcpu, HOST_SME_ENABLED))
- sysreg_clear_set(CPACR_EL1, 0,
- CPACR_EL1_SMEN_EL0EN |
- CPACR_EL1_SMEN_EL1EN);
+ sysreg_clear_set(CPACR_EL1, 0, CPACR_ELx_SMEN);
else
sysreg_clear_set(CPACR_EL1,
CPACR_EL1_SMEN_EL0EN,
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
index e2f762d959bb..11098eb7eb44 100644
--- a/arch/arm64/kvm/guest.c
+++ b/arch/arm64/kvm/guest.c
@@ -251,6 +251,7 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
case PSR_AA32_MODE_SVC:
case PSR_AA32_MODE_ABT:
case PSR_AA32_MODE_UND:
+ case PSR_AA32_MODE_SYS:
if (!vcpu_el1_is_32bit(vcpu))
return -EINVAL;
break;
@@ -276,7 +277,7 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
if (*vcpu_cpsr(vcpu) & PSR_MODE32_BIT) {
int i, nr_reg;
- switch (*vcpu_cpsr(vcpu)) {
+ switch (*vcpu_cpsr(vcpu) & PSR_AA32_MODE_MASK) {
/*
* Either we are dealing with user mode, and only the
* first 15 registers (+ PC) must be narrowed to 32bit.
diff --git a/arch/arm64/kvm/hyp/aarch32.c b/arch/arm64/kvm/hyp/aarch32.c
index 8d9670e6615d..449fa58cf3b6 100644
--- a/arch/arm64/kvm/hyp/aarch32.c
+++ b/arch/arm64/kvm/hyp/aarch32.c
@@ -50,9 +50,23 @@ bool kvm_condition_valid32(const struct kvm_vcpu *vcpu)
u32 cpsr_cond;
int cond;
- /* Top two bits non-zero? Unconditional. */
- if (kvm_vcpu_get_esr(vcpu) >> 30)
+ /*
+ * These are the exception classes that could fire with a
+ * conditional instruction.
+ */
+ switch (kvm_vcpu_trap_get_class(vcpu)) {
+ case ESR_ELx_EC_CP15_32:
+ case ESR_ELx_EC_CP15_64:
+ case ESR_ELx_EC_CP14_MR:
+ case ESR_ELx_EC_CP14_LS:
+ case ESR_ELx_EC_FP_ASIMD:
+ case ESR_ELx_EC_CP10_ID:
+ case ESR_ELx_EC_CP14_64:
+ case ESR_ELx_EC_SVC32:
+ break;
+ default:
return true;
+ }
/* Is condition field valid? */
cond = kvm_vcpu_get_condition(vcpu);
diff --git a/arch/arm64/kvm/hyp/fpsimd.S b/arch/arm64/kvm/hyp/fpsimd.S
index 61e6f3ba7b7d..e950875e31ce 100644
--- a/arch/arm64/kvm/hyp/fpsimd.S
+++ b/arch/arm64/kvm/hyp/fpsimd.S
@@ -25,3 +25,9 @@ SYM_FUNC_START(__sve_restore_state)
sve_load 0, x1, x2, 3
ret
SYM_FUNC_END(__sve_restore_state)
+
+SYM_FUNC_START(__sve_save_state)
+ mov x2, #1
+ sve_save 0, x1, x2, 3
+ ret
+SYM_FUNC_END(__sve_save_state)
diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h
index a92566f36022..0c4de44534b7 100644
--- a/arch/arm64/kvm/hyp/include/hyp/switch.h
+++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
@@ -316,10 +316,24 @@ static inline void __hyp_sve_restore_guest(struct kvm_vcpu *vcpu)
{
sve_cond_update_zcr_vq(vcpu_sve_max_vq(vcpu) - 1, SYS_ZCR_EL2);
__sve_restore_state(vcpu_sve_pffr(vcpu),
- &vcpu->arch.ctxt.fp_regs.fpsr);
+ &vcpu->arch.ctxt.fp_regs.fpsr,
+ true);
write_sysreg_el1(__vcpu_sys_reg(vcpu, ZCR_EL1), SYS_ZCR);
}
+static inline void __hyp_sve_save_host(void)
+{
+ struct cpu_sve_state *sve_state = *host_data_ptr(sve_state);
+
+ sve_state->zcr_el1 = read_sysreg_el1(SYS_ZCR);
+ write_sysreg_s(ZCR_ELx_LEN_MASK, SYS_ZCR_EL2);
+ __sve_save_state(sve_state->sve_regs + sve_ffr_offset(kvm_host_sve_max_vl),
+ &sve_state->fpsr,
+ true);
+}
+
+static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu);
+
/*
* We trap the first access to the FP/SIMD to save the host context and
* restore the guest context lazily.
@@ -330,7 +344,6 @@ static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
{
bool sve_guest;
u8 esr_ec;
- u64 reg;
if (!system_supports_fpsimd())
return false;
@@ -353,24 +366,15 @@ static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
/* Valid trap. Switch the context: */
/* First disable enough traps to allow us to update the registers */
- if (has_vhe() || has_hvhe()) {
- reg = CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN;
- if (sve_guest)
- reg |= CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN;
-
- sysreg_clear_set(cpacr_el1, 0, reg);
- } else {
- reg = CPTR_EL2_TFP;
- if (sve_guest)
- reg |= CPTR_EL2_TZ;
-
- sysreg_clear_set(cptr_el2, reg, 0);
- }
+ if (sve_guest || (is_protected_kvm_enabled() && system_supports_sve()))
+ cpacr_clear_set(0, CPACR_ELx_FPEN | CPACR_ELx_ZEN);
+ else
+ cpacr_clear_set(0, CPACR_ELx_FPEN);
isb();
/* Write out the host state if it's in the registers */
if (host_owns_fp_regs())
- __fpsimd_save_state(*host_data_ptr(fpsimd_state));
+ kvm_hyp_save_fpsimd_host(vcpu);
/* Restore the guest state */
if (sve_guest)
diff --git a/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h b/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h
index 51f043649146..f957890c7e38 100644
--- a/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h
+++ b/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h
@@ -52,11 +52,11 @@
* Supported by KVM
*/
#define PVM_ID_AA64PFR0_RESTRICT_UNSIGNED (\
- FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0), ID_AA64PFR0_EL1_ELx_64BIT_ONLY) | \
- FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL1), ID_AA64PFR0_EL1_ELx_64BIT_ONLY) | \
- FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL2), ID_AA64PFR0_EL1_ELx_64BIT_ONLY) | \
- FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL3), ID_AA64PFR0_EL1_ELx_64BIT_ONLY) | \
- FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_RAS), ID_AA64PFR0_EL1_RAS_IMP) \
+ SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, EL0, IMP) | \
+ SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, EL1, IMP) | \
+ SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, EL2, IMP) | \
+ SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, EL3, IMP) | \
+ SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, RAS, IMP) \
)
/*
diff --git a/arch/arm64/kvm/hyp/include/nvhe/pkvm.h b/arch/arm64/kvm/hyp/include/nvhe/pkvm.h
index 22f374e9f532..24a9a8330d19 100644
--- a/arch/arm64/kvm/hyp/include/nvhe/pkvm.h
+++ b/arch/arm64/kvm/hyp/include/nvhe/pkvm.h
@@ -59,7 +59,6 @@ static inline bool pkvm_hyp_vcpu_is_protected(struct pkvm_hyp_vcpu *hyp_vcpu)
}
void pkvm_hyp_vm_table_init(void *tbl);
-void pkvm_host_fpsimd_state_init(void);
int __pkvm_init_vm(struct kvm *host_kvm, unsigned long vm_hva,
unsigned long pgd_hva);
diff --git a/arch/arm64/kvm/hyp/nvhe/ffa.c b/arch/arm64/kvm/hyp/nvhe/ffa.c
index 02746f9d0980..efb053af331c 100644
--- a/arch/arm64/kvm/hyp/nvhe/ffa.c
+++ b/arch/arm64/kvm/hyp/nvhe/ffa.c
@@ -177,6 +177,14 @@ static void ffa_retrieve_req(struct arm_smccc_res *res, u32 len)
res);
}
+static void ffa_rx_release(struct arm_smccc_res *res)
+{
+ arm_smccc_1_1_smc(FFA_RX_RELEASE,
+ 0, 0,
+ 0, 0, 0, 0, 0,
+ res);
+}
+
static void do_ffa_rxtx_map(struct arm_smccc_res *res,
struct kvm_cpu_context *ctxt)
{
@@ -543,16 +551,19 @@ static void do_ffa_mem_reclaim(struct arm_smccc_res *res,
if (WARN_ON(offset > len ||
fraglen > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE)) {
ret = FFA_RET_ABORTED;
+ ffa_rx_release(res);
goto out_unlock;
}
if (len > ffa_desc_buf.len) {
ret = FFA_RET_NO_MEMORY;
+ ffa_rx_release(res);
goto out_unlock;
}
buf = ffa_desc_buf.buf;
memcpy(buf, hyp_buffers.rx, fraglen);
+ ffa_rx_release(res);
for (fragoff = fraglen; fragoff < len; fragoff += fraglen) {
ffa_mem_frag_rx(res, handle_lo, handle_hi, fragoff);
@@ -563,6 +574,7 @@ static void do_ffa_mem_reclaim(struct arm_smccc_res *res,
fraglen = res->a3;
memcpy((void *)buf + fragoff, hyp_buffers.rx, fraglen);
+ ffa_rx_release(res);
}
ffa_mem_reclaim(res, handle_lo, handle_hi, flags);
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
index d5c48dc98f67..f43d845f3c4e 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
@@ -23,20 +23,80 @@ DEFINE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params);
void __kvm_hyp_host_forward_smc(struct kvm_cpu_context *host_ctxt);
+static void __hyp_sve_save_guest(struct kvm_vcpu *vcpu)
+{
+ __vcpu_sys_reg(vcpu, ZCR_EL1) = read_sysreg_el1(SYS_ZCR);
+ /*
+ * On saving/restoring guest sve state, always use the maximum VL for
+ * the guest. The layout of the data when saving the sve state depends
+ * on the VL, so use a consistent (i.e., the maximum) guest VL.
+ */
+ sve_cond_update_zcr_vq(vcpu_sve_max_vq(vcpu) - 1, SYS_ZCR_EL2);
+ __sve_save_state(vcpu_sve_pffr(vcpu), &vcpu->arch.ctxt.fp_regs.fpsr, true);
+ write_sysreg_s(ZCR_ELx_LEN_MASK, SYS_ZCR_EL2);
+}
+
+static void __hyp_sve_restore_host(void)
+{
+ struct cpu_sve_state *sve_state = *host_data_ptr(sve_state);
+
+ /*
+ * On saving/restoring host sve state, always use the maximum VL for
+ * the host. The layout of the data when saving the sve state depends
+ * on the VL, so use a consistent (i.e., the maximum) host VL.
+ *
+ * Setting ZCR_EL2 to ZCR_ELx_LEN_MASK sets the effective length
+ * supported by the system (or limited at EL3).
+ */
+ write_sysreg_s(ZCR_ELx_LEN_MASK, SYS_ZCR_EL2);
+ __sve_restore_state(sve_state->sve_regs + sve_ffr_offset(kvm_host_sve_max_vl),
+ &sve_state->fpsr,
+ true);
+ write_sysreg_el1(sve_state->zcr_el1, SYS_ZCR);
+}
+
+static void fpsimd_sve_flush(void)
+{
+ *host_data_ptr(fp_owner) = FP_STATE_HOST_OWNED;
+}
+
+static void fpsimd_sve_sync(struct kvm_vcpu *vcpu)
+{
+ if (!guest_owns_fp_regs())
+ return;
+
+ cpacr_clear_set(0, CPACR_ELx_FPEN | CPACR_ELx_ZEN);
+ isb();
+
+ if (vcpu_has_sve(vcpu))
+ __hyp_sve_save_guest(vcpu);
+ else
+ __fpsimd_save_state(&vcpu->arch.ctxt.fp_regs);
+
+ if (system_supports_sve())
+ __hyp_sve_restore_host();
+ else
+ __fpsimd_restore_state(*host_data_ptr(fpsimd_state));
+
+ *host_data_ptr(fp_owner) = FP_STATE_HOST_OWNED;
+}
+
static void flush_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
{
struct kvm_vcpu *host_vcpu = hyp_vcpu->host_vcpu;
+ fpsimd_sve_flush();
+
hyp_vcpu->vcpu.arch.ctxt = host_vcpu->arch.ctxt;
hyp_vcpu->vcpu.arch.sve_state = kern_hyp_va(host_vcpu->arch.sve_state);
- hyp_vcpu->vcpu.arch.sve_max_vl = host_vcpu->arch.sve_max_vl;
+ /* Limit guest vector length to the maximum supported by the host. */
+ hyp_vcpu->vcpu.arch.sve_max_vl = min(host_vcpu->arch.sve_max_vl, kvm_host_sve_max_vl);
hyp_vcpu->vcpu.arch.hw_mmu = host_vcpu->arch.hw_mmu;
hyp_vcpu->vcpu.arch.hcr_el2 = host_vcpu->arch.hcr_el2;
hyp_vcpu->vcpu.arch.mdcr_el2 = host_vcpu->arch.mdcr_el2;
- hyp_vcpu->vcpu.arch.cptr_el2 = host_vcpu->arch.cptr_el2;
hyp_vcpu->vcpu.arch.iflags = host_vcpu->arch.iflags;
@@ -54,10 +114,11 @@ static void sync_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
struct vgic_v3_cpu_if *host_cpu_if = &host_vcpu->arch.vgic_cpu.vgic_v3;
unsigned int i;
+ fpsimd_sve_sync(&hyp_vcpu->vcpu);
+
host_vcpu->arch.ctxt = hyp_vcpu->vcpu.arch.ctxt;
host_vcpu->arch.hcr_el2 = hyp_vcpu->vcpu.arch.hcr_el2;
- host_vcpu->arch.cptr_el2 = hyp_vcpu->vcpu.arch.cptr_el2;
host_vcpu->arch.fault = hyp_vcpu->vcpu.arch.fault;
@@ -79,6 +140,17 @@ static void handle___kvm_vcpu_run(struct kvm_cpu_context *host_ctxt)
struct pkvm_hyp_vcpu *hyp_vcpu;
struct kvm *host_kvm;
+ /*
+ * KVM (and pKVM) doesn't support SME guests for now, and
+ * ensures that SME features aren't enabled in pstate when
+ * loading a vcpu. Therefore, if SME features enabled the host
+ * is misbehaving.
+ */
+ if (unlikely(system_supports_sme() && read_sysreg_s(SYS_SVCR))) {
+ ret = -EINVAL;
+ goto out;
+ }
+
host_kvm = kern_hyp_va(host_vcpu->kvm);
hyp_vcpu = pkvm_load_hyp_vcpu(host_kvm->arch.pkvm.handle,
host_vcpu->vcpu_idx);
@@ -405,11 +477,7 @@ void handle_trap(struct kvm_cpu_context *host_ctxt)
handle_host_smc(host_ctxt);
break;
case ESR_ELx_EC_SVE:
- if (has_hvhe())
- sysreg_clear_set(cpacr_el1, 0, (CPACR_EL1_ZEN_EL1EN |
- CPACR_EL1_ZEN_EL0EN));
- else
- sysreg_clear_set(cptr_el2, CPTR_EL2_TZ, 0);
+ cpacr_clear_set(0, CPACR_ELx_ZEN);
isb();
sve_cond_update_zcr_vq(ZCR_ELx_LEN_MASK, SYS_ZCR_EL2);
break;
diff --git a/arch/arm64/kvm/hyp/nvhe/pkvm.c b/arch/arm64/kvm/hyp/nvhe/pkvm.c
index 16aa4875ddb8..187a5f4d56c0 100644
--- a/arch/arm64/kvm/hyp/nvhe/pkvm.c
+++ b/arch/arm64/kvm/hyp/nvhe/pkvm.c
@@ -18,6 +18,8 @@ unsigned long __icache_flags;
/* Used by kvm_get_vttbr(). */
unsigned int kvm_arm_vmid_bits;
+unsigned int kvm_host_sve_max_vl;
+
/*
* Set trap register values based on features in ID_AA64PFR0.
*/
@@ -31,9 +33,9 @@ static void pvm_init_traps_aa64pfr0(struct kvm_vcpu *vcpu)
/* Protected KVM does not support AArch32 guests. */
BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0),
- PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) != ID_AA64PFR0_EL1_ELx_64BIT_ONLY);
+ PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) != ID_AA64PFR0_EL1_EL0_IMP);
BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL1),
- PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) != ID_AA64PFR0_EL1_ELx_64BIT_ONLY);
+ PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) != ID_AA64PFR0_EL1_EL1_IMP);
/*
* Linux guests assume support for floating-point and Advanced SIMD. Do
@@ -63,7 +65,7 @@ static void pvm_init_traps_aa64pfr0(struct kvm_vcpu *vcpu)
/* Trap SVE */
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE), feature_ids)) {
if (has_hvhe())
- cptr_clear |= CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN;
+ cptr_clear |= CPACR_ELx_ZEN;
else
cptr_set |= CPTR_EL2_TZ;
}
@@ -247,17 +249,6 @@ void pkvm_hyp_vm_table_init(void *tbl)
vm_table = tbl;
}
-void pkvm_host_fpsimd_state_init(void)
-{
- unsigned long i;
-
- for (i = 0; i < hyp_nr_cpus; i++) {
- struct kvm_host_data *host_data = per_cpu_ptr(&kvm_host_data, i);
-
- host_data->fpsimd_state = &host_data->host_ctxt.fp_regs;
- }
-}
-
/*
* Return the hyp vm structure corresponding to the handle.
*/
@@ -586,6 +577,8 @@ unlock:
if (ret)
unmap_donated_memory(hyp_vcpu, sizeof(*hyp_vcpu));
+ hyp_vcpu->vcpu.arch.cptr_el2 = kvm_get_reset_cptr_el2(&hyp_vcpu->vcpu);
+
return ret;
}
diff --git a/arch/arm64/kvm/hyp/nvhe/setup.c b/arch/arm64/kvm/hyp/nvhe/setup.c
index 859f22f754d3..f4350ba07b0b 100644
--- a/arch/arm64/kvm/hyp/nvhe/setup.c
+++ b/arch/arm64/kvm/hyp/nvhe/setup.c
@@ -67,6 +67,28 @@ static int divide_memory_pool(void *virt, unsigned long size)
return 0;
}
+static int pkvm_create_host_sve_mappings(void)
+{
+ void *start, *end;
+ int ret, i;
+
+ if (!system_supports_sve())
+ return 0;
+
+ for (i = 0; i < hyp_nr_cpus; i++) {
+ struct kvm_host_data *host_data = per_cpu_ptr(&kvm_host_data, i);
+ struct cpu_sve_state *sve_state = host_data->sve_state;
+
+ start = kern_hyp_va(sve_state);
+ end = start + PAGE_ALIGN(pkvm_host_sve_state_size());
+ ret = pkvm_create_mappings(start, end, PAGE_HYP);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static int recreate_hyp_mappings(phys_addr_t phys, unsigned long size,
unsigned long *per_cpu_base,
u32 hyp_va_bits)
@@ -125,6 +147,8 @@ static int recreate_hyp_mappings(phys_addr_t phys, unsigned long size,
return ret;
}
+ pkvm_create_host_sve_mappings();
+
/*
* Map the host sections RO in the hypervisor, but transfer the
* ownership from the host to the hypervisor itself to make sure they
@@ -300,7 +324,6 @@ void __noreturn __pkvm_init_finalise(void)
goto out;
pkvm_hyp_vm_table_init(vm_table_base);
- pkvm_host_fpsimd_state_init();
out:
/*
* We tail-called to here from handle___pkvm_init() and will not return,
diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c
index 6758cd905570..6af179c6356d 100644
--- a/arch/arm64/kvm/hyp/nvhe/switch.c
+++ b/arch/arm64/kvm/hyp/nvhe/switch.c
@@ -48,15 +48,14 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
val |= has_hvhe() ? CPACR_EL1_TTA : CPTR_EL2_TTA;
if (cpus_have_final_cap(ARM64_SME)) {
if (has_hvhe())
- val &= ~(CPACR_EL1_SMEN_EL1EN | CPACR_EL1_SMEN_EL0EN);
+ val &= ~CPACR_ELx_SMEN;
else
val |= CPTR_EL2_TSM;
}
if (!guest_owns_fp_regs()) {
if (has_hvhe())
- val &= ~(CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN |
- CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN);
+ val &= ~(CPACR_ELx_FPEN | CPACR_ELx_ZEN);
else
val |= CPTR_EL2_TFP | CPTR_EL2_TZ;
@@ -182,6 +181,25 @@ static bool kvm_handle_pvm_sys64(struct kvm_vcpu *vcpu, u64 *exit_code)
kvm_handle_pvm_sysreg(vcpu, exit_code));
}
+static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu)
+{
+ /*
+ * Non-protected kvm relies on the host restoring its sve state.
+ * Protected kvm restores the host's sve state as not to reveal that
+ * fpsimd was used by a guest nor leak upper sve bits.
+ */
+ if (unlikely(is_protected_kvm_enabled() && system_supports_sve())) {
+ __hyp_sve_save_host();
+
+ /* Re-enable SVE traps if not supported for the guest vcpu. */
+ if (!vcpu_has_sve(vcpu))
+ cpacr_clear_set(CPACR_ELx_ZEN, 0);
+
+ } else {
+ __fpsimd_save_state(*host_data_ptr(fpsimd_state));
+ }
+}
+
static const exit_handler_fn hyp_exit_handlers[] = {
[0 ... ESR_ELx_EC_MAX] = NULL,
[ESR_ELx_EC_CP15_32] = kvm_hyp_handle_cp15_32,
diff --git a/arch/arm64/kvm/hyp/nvhe/sys_regs.c b/arch/arm64/kvm/hyp/nvhe/sys_regs.c
index edd969a1f36b..2860548d4250 100644
--- a/arch/arm64/kvm/hyp/nvhe/sys_regs.c
+++ b/arch/arm64/kvm/hyp/nvhe/sys_regs.c
@@ -276,7 +276,7 @@ static bool pvm_access_id_aarch32(struct kvm_vcpu *vcpu,
* of AArch32 feature id registers.
*/
BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL1),
- PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) > ID_AA64PFR0_EL1_ELx_64BIT_ONLY);
+ PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) > ID_AA64PFR0_EL1_EL1_IMP);
return pvm_access_raz_wi(vcpu, p, r);
}
diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c
index d7af5f46f22a..8fbb6a2e0559 100644
--- a/arch/arm64/kvm/hyp/vhe/switch.c
+++ b/arch/arm64/kvm/hyp/vhe/switch.c
@@ -93,8 +93,7 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
val = read_sysreg(cpacr_el1);
val |= CPACR_ELx_TTA;
- val &= ~(CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN |
- CPACR_EL1_SMEN_EL0EN | CPACR_EL1_SMEN_EL1EN);
+ val &= ~(CPACR_ELx_ZEN | CPACR_ELx_SMEN);
/*
* With VHE (HCR.E2H == 1), accesses to CPACR_EL1 are routed to
@@ -109,9 +108,9 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
if (guest_owns_fp_regs()) {
if (vcpu_has_sve(vcpu))
- val |= CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN;
+ val |= CPACR_ELx_ZEN;
} else {
- val &= ~(CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN);
+ val &= ~CPACR_ELx_FPEN;
__activate_traps_fpsimd32(vcpu);
}
@@ -262,6 +261,11 @@ static bool kvm_hyp_handle_eret(struct kvm_vcpu *vcpu, u64 *exit_code)
return true;
}
+static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu)
+{
+ __fpsimd_save_state(*host_data_ptr(fpsimd_state));
+}
+
static const exit_handler_fn hyp_exit_handlers[] = {
[0 ... ESR_ELx_EC_MAX] = NULL,
[ESR_ELx_EC_CP15_32] = kvm_hyp_handle_cp15_32,
diff --git a/arch/arm64/kvm/nested.c b/arch/arm64/kvm/nested.c
index 6813c7c7f00a..bae8536cbf00 100644
--- a/arch/arm64/kvm/nested.c
+++ b/arch/arm64/kvm/nested.c
@@ -58,8 +58,10 @@ static u64 limit_nv_id_reg(u32 id, u64 val)
break;
case SYS_ID_AA64PFR1_EL1:
- /* Only support SSBS */
- val &= NV_FTR(PFR1, SSBS);
+ /* Only support BTI, SSBS, CSV2_frac */
+ val &= (NV_FTR(PFR1, BT) |
+ NV_FTR(PFR1, SSBS) |
+ NV_FTR(PFR1, CSV2_frac));
break;
case SYS_ID_AA64MMFR0_EL1:
diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c
index a35ce10e0a9f..d1a476b08f54 100644
--- a/arch/arm64/kvm/pmu-emul.c
+++ b/arch/arm64/kvm/pmu-emul.c
@@ -14,7 +14,6 @@
#include <asm/kvm_emulate.h>
#include <kvm/arm_pmu.h>
#include <kvm/arm_vgic.h>
-#include <asm/arm_pmuv3.h>
#define PERF_ATTR_CFG1_COUNTER_64BIT BIT(0)
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
index 1b7b58cb121f..3fc8ca164dbe 100644
--- a/arch/arm64/kvm/reset.c
+++ b/arch/arm64/kvm/reset.c
@@ -32,6 +32,7 @@
/* Maximum phys_shift supported for any VM on this host */
static u32 __ro_after_init kvm_ipa_limit;
+unsigned int __ro_after_init kvm_host_sve_max_vl;
/*
* ARMv8 Reset Values
@@ -51,6 +52,8 @@ int __init kvm_arm_init_sve(void)
{
if (system_supports_sve()) {
kvm_sve_max_vl = sve_max_virtualisable_vl();
+ kvm_host_sve_max_vl = sve_max_vl();
+ kvm_nvhe_sym(kvm_host_sve_max_vl) = kvm_host_sve_max_vl;
/*
* The get_sve_reg()/set_sve_reg() ioctl interface will need
diff --git a/arch/arm64/kvm/vgic/vgic-init.c b/arch/arm64/kvm/vgic/vgic-init.c
index 8f5b7a3e7009..7f68cf58b978 100644
--- a/arch/arm64/kvm/vgic/vgic-init.c
+++ b/arch/arm64/kvm/vgic/vgic-init.c
@@ -391,7 +391,7 @@ static void kvm_vgic_dist_destroy(struct kvm *kvm)
if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
list_for_each_entry_safe(rdreg, next, &dist->rd_regions, list)
- vgic_v3_free_redist_region(rdreg);
+ vgic_v3_free_redist_region(kvm, rdreg);
INIT_LIST_HEAD(&dist->rd_regions);
} else {
dist->vgic_cpu_base = VGIC_ADDR_UNDEF;
diff --git a/arch/arm64/kvm/vgic/vgic-mmio-v3.c b/arch/arm64/kvm/vgic/vgic-mmio-v3.c
index a3983a631b5a..9e50928f5d7d 100644
--- a/arch/arm64/kvm/vgic/vgic-mmio-v3.c
+++ b/arch/arm64/kvm/vgic/vgic-mmio-v3.c
@@ -919,8 +919,19 @@ free:
return ret;
}
-void vgic_v3_free_redist_region(struct vgic_redist_region *rdreg)
+void vgic_v3_free_redist_region(struct kvm *kvm, struct vgic_redist_region *rdreg)
{
+ struct kvm_vcpu *vcpu;
+ unsigned long c;
+
+ lockdep_assert_held(&kvm->arch.config_lock);
+
+ /* Garbage collect the region */
+ kvm_for_each_vcpu(c, vcpu, kvm) {
+ if (vcpu->arch.vgic_cpu.rdreg == rdreg)
+ vcpu->arch.vgic_cpu.rdreg = NULL;
+ }
+
list_del(&rdreg->list);
kfree(rdreg);
}
@@ -945,7 +956,7 @@ int vgic_v3_set_redist_base(struct kvm *kvm, u32 index, u64 addr, u32 count)
mutex_lock(&kvm->arch.config_lock);
rdreg = vgic_v3_rdist_region_from_index(kvm, index);
- vgic_v3_free_redist_region(rdreg);
+ vgic_v3_free_redist_region(kvm, rdreg);
mutex_unlock(&kvm->arch.config_lock);
return ret;
}
diff --git a/arch/arm64/kvm/vgic/vgic.h b/arch/arm64/kvm/vgic/vgic.h
index 6106ebd5ba42..03d356a12377 100644
--- a/arch/arm64/kvm/vgic/vgic.h
+++ b/arch/arm64/kvm/vgic/vgic.h
@@ -316,7 +316,7 @@ vgic_v3_rd_region_size(struct kvm *kvm, struct vgic_redist_region *rdreg)
struct vgic_redist_region *vgic_v3_rdist_region_from_index(struct kvm *kvm,
u32 index);
-void vgic_v3_free_redist_region(struct vgic_redist_region *rdreg);
+void vgic_v3_free_redist_region(struct kvm *kvm, struct vgic_redist_region *rdreg);
bool vgic_v3_rdist_overlap(struct kvm *kvm, gpa_t base, size_t size);
diff --git a/arch/arm64/mm/contpte.c b/arch/arm64/mm/contpte.c
index 9f9486de0004..a3edced29ac1 100644
--- a/arch/arm64/mm/contpte.c
+++ b/arch/arm64/mm/contpte.c
@@ -376,7 +376,7 @@ void contpte_clear_young_dirty_ptes(struct vm_area_struct *vma,
* clearing access/dirty for the whole block.
*/
unsigned long start = addr;
- unsigned long end = start + nr;
+ unsigned long end = start + nr * PAGE_SIZE;
if (pte_cont(__ptep_get(ptep + nr - 1)))
end = ALIGN(end, CONT_PTE_SIZE);
@@ -386,7 +386,7 @@ void contpte_clear_young_dirty_ptes(struct vm_area_struct *vma,
ptep = contpte_align_down(ptep);
}
- __clear_young_dirty_ptes(vma, start, ptep, end - start, flags);
+ __clear_young_dirty_ptes(vma, start, ptep, (end - start) / PAGE_SIZE, flags);
}
EXPORT_SYMBOL_GPL(contpte_clear_young_dirty_ptes);
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index c927e9312f10..353ea5dc32b8 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -124,7 +124,8 @@ bool pgattr_change_is_safe(u64 old, u64 new)
* The following mapping attributes may be updated in live
* kernel mappings without the need for break-before-make.
*/
- pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE | PTE_NG;
+ pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE | PTE_NG |
+ PTE_SWBITS_MASK;
/* creating or taking down mappings is always safe */
if (!pte_valid(__pte(old)) || !pte_valid(__pte(new)))
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index 720336d28856..dd0bb069df4b 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -1244,6 +1244,13 @@ emit_cond_jmp:
break;
}
+ /* Implement helper call to bpf_get_current_task/_btf() inline */
+ if (insn->src_reg == 0 && (insn->imm == BPF_FUNC_get_current_task ||
+ insn->imm == BPF_FUNC_get_current_task_btf)) {
+ emit(A64_MRS_SP_EL0(r0), ctx);
+ break;
+ }
+
ret = bpf_jit_get_func_addr(ctx->prog, insn, extra_pass,
&func_addr, &func_addr_fixed);
if (ret < 0)
@@ -1829,8 +1836,7 @@ skip_init_ctx:
prog->jited_len = 0;
goto out_free_hdr;
}
- if (WARN_ON(bpf_jit_binary_pack_finalize(prog, ro_header,
- header))) {
+ if (WARN_ON(bpf_jit_binary_pack_finalize(ro_header, header))) {
/* ro_header has been freed */
ro_header = NULL;
prog = orig_prog;
@@ -2141,7 +2147,7 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im,
emit(A64_STR64I(A64_R(20), A64_SP, regs_off + 8), ctx);
if (flags & BPF_TRAMP_F_CALL_ORIG) {
- emit_addr_mov_i64(A64_R(0), (const u64)im, ctx);
+ emit_a64_mov_i64(A64_R(0), (const u64)im, ctx);
emit_call((const u64)__bpf_tramp_enter, ctx);
}
@@ -2185,7 +2191,7 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im,
if (flags & BPF_TRAMP_F_CALL_ORIG) {
im->ip_epilogue = ctx->ro_image + ctx->idx;
- emit_addr_mov_i64(A64_R(0), (const u64)im, ctx);
+ emit_a64_mov_i64(A64_R(0), (const u64)im, ctx);
emit_call((const u64)__bpf_tramp_exit, ctx);
}
@@ -2581,6 +2587,8 @@ bool bpf_jit_inlines_helper_call(s32 imm)
{
switch (imm) {
case BPF_FUNC_get_smp_processor_id:
+ case BPF_FUNC_get_current_task:
+ case BPF_FUNC_get_current_task_btf:
return true;
default:
return false;
diff --git a/arch/arm64/tools/Makefile b/arch/arm64/tools/Makefile
index fa2251d9762d..c2b34e761006 100644
--- a/arch/arm64/tools/Makefile
+++ b/arch/arm64/tools/Makefile
@@ -3,12 +3,16 @@
gen := arch/$(ARCH)/include/generated
kapi := $(gen)/asm
-kapi-hdrs-y := $(kapi)/cpucap-defs.h $(kapi)/sysreg-defs.h
+kapisyshdr-y := cpucap-defs.h sysreg-defs.h
+
+kapi-hdrs-y := $(addprefix $(kapi)/, $(kapisyshdr-y))
targets += $(addprefix ../../../, $(kapi-hdrs-y))
PHONY += kapi
+all: $(syscall64) kapi
+
kapi: $(kapi-hdrs-y)
quiet_cmd_gen_cpucaps = GEN $@
diff --git a/arch/arm64/tools/syscall_32.tbl b/arch/arm64/tools/syscall_32.tbl
new file mode 100644
index 000000000000..9a37930d4e26
--- /dev/null
+++ b/arch/arm64/tools/syscall_32.tbl
@@ -0,0 +1,476 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# AArch32 (compat) system call definitions.
+#
+# Copyright (C) 2001-2005 Russell King
+# Copyright (C) 2012 ARM Ltd.
+#
+# This file corresponds to arch/arm/tools/syscall.tbl
+# for the native EABI syscalls and should be kept in sync
+# Instead of the OABI syscalls, it contains pointers to
+# the compat entry points where they differ from the native
+# syscalls.
+#
+0 common restart_syscall sys_restart_syscall
+1 common exit sys_exit
+2 common fork sys_fork
+3 common read sys_read
+4 common write sys_write
+5 common open sys_open compat_sys_open
+6 common close sys_close
+# 7 was sys_waitpid
+8 common creat sys_creat
+9 common link sys_link
+10 common unlink sys_unlink
+11 common execve sys_execve compat_sys_execve
+12 common chdir sys_chdir
+# 13 was sys_time
+14 common mknod sys_mknod
+15 common chmod sys_chmod
+16 common lchown sys_lchown16
+# 17 was sys_break
+# 18 was sys_stat
+19 common lseek sys_lseek compat_sys_lseek
+20 common getpid sys_getpid
+21 common mount sys_mount
+# 22 was sys_umount
+23 common setuid sys_setuid16
+24 common getuid sys_getuid16
+# 25 was sys_stime
+26 common ptrace sys_ptrace compat_sys_ptrace
+# 27 was sys_alarm
+# 28 was sys_fstat
+29 common pause sys_pause
+# 30 was sys_utime
+# 31 was sys_stty
+# 32 was sys_gtty
+33 common access sys_access
+34 common nice sys_nice
+# 35 was sys_ftime
+36 common sync sys_sync
+37 common kill sys_kill
+38 common rename sys_rename
+39 common mkdir sys_mkdir
+40 common rmdir sys_rmdir
+41 common dup sys_dup
+42 common pipe sys_pipe
+43 common times sys_times compat_sys_times
+# 44 was sys_prof
+45 common brk sys_brk
+46 common setgid sys_setgid16
+47 common getgid sys_getgid16
+# 48 was sys_signal
+49 common geteuid sys_geteuid16
+50 common getegid sys_getegid16
+51 common acct sys_acct
+52 common umount2 sys_umount
+# 53 was sys_lock
+54 common ioctl sys_ioctl compat_sys_ioctl
+55 common fcntl sys_fcntl compat_sys_fcntl
+# 56 was sys_mpx
+57 common setpgid sys_setpgid
+# 58 was sys_ulimit
+# 59 was sys_olduname
+60 common umask sys_umask
+61 common chroot sys_chroot
+62 common ustat sys_ustat compat_sys_ustat
+63 common dup2 sys_dup2
+64 common getppid sys_getppid
+65 common getpgrp sys_getpgrp
+66 common setsid sys_setsid
+67 common sigaction sys_sigaction compat_sys_sigaction
+# 68 was sys_sgetmask
+# 69 was sys_ssetmask
+70 common setreuid sys_setreuid16
+71 common setregid sys_setregid16
+72 common sigsuspend sys_sigsuspend
+73 common sigpending sys_sigpending compat_sys_sigpending
+74 common sethostname sys_sethostname
+75 common setrlimit sys_setrlimit compat_sys_setrlimit
+# 76 was compat_sys_getrlimit
+77 common getrusage sys_getrusage compat_sys_getrusage
+78 common gettimeofday sys_gettimeofday compat_sys_gettimeofday
+79 common settimeofday sys_settimeofday compat_sys_settimeofday
+80 common getgroups sys_getgroups16
+81 common setgroups sys_setgroups16
+# 82 was compat_sys_select
+83 common symlink sys_symlink
+# 84 was sys_lstat
+85 common readlink sys_readlink
+86 common uselib sys_uselib
+87 common swapon sys_swapon
+88 common reboot sys_reboot
+# 89 was sys_readdir
+# 90 was sys_mmap
+91 common munmap sys_munmap
+92 common truncate sys_truncate compat_sys_truncate
+93 common ftruncate sys_ftruncate compat_sys_ftruncate
+94 common fchmod sys_fchmod
+95 common fchown sys_fchown16
+96 common getpriority sys_getpriority
+97 common setpriority sys_setpriority
+# 98 was sys_profil
+99 common statfs sys_statfs compat_sys_statfs
+100 common fstatfs sys_fstatfs compat_sys_fstatfs
+# 101 was sys_ioperm
+# 102 was sys_socketcall
+103 common syslog sys_syslog
+104 common setitimer sys_setitimer compat_sys_setitimer
+105 common getitimer sys_getitimer compat_sys_getitimer
+106 common stat sys_newstat compat_sys_newstat
+107 common lstat sys_newlstat compat_sys_newlstat
+108 common fstat sys_newfstat compat_sys_newfstat
+# 109 was sys_uname
+# 110 was sys_iopl
+111 common vhangup sys_vhangup
+# 112 was sys_idle
+# 113 was sys_syscall
+114 common wait4 sys_wait4 compat_sys_wait4
+115 common swapoff sys_swapoff
+116 common sysinfo sys_sysinfo compat_sys_sysinfo
+# 117 was sys_ipc
+118 common fsync sys_fsync
+119 common sigreturn sys_sigreturn_wrapper compat_sys_sigreturn
+120 common clone sys_clone
+121 common setdomainname sys_setdomainname
+122 common uname sys_newuname
+# 123 was sys_modify_ldt
+124 common adjtimex sys_adjtimex_time32
+125 common mprotect sys_mprotect
+126 common sigprocmask sys_sigprocmask compat_sys_sigprocmask
+# 127 was sys_create_module
+128 common init_module sys_init_module
+129 common delete_module sys_delete_module
+# 130 was sys_get_kernel_syms
+131 common quotactl sys_quotactl
+132 common getpgid sys_getpgid
+133 common fchdir sys_fchdir
+134 common bdflush sys_ni_syscall
+135 common sysfs sys_sysfs
+136 common personality sys_personality
+# 137 was sys_afs_syscall
+138 common setfsuid sys_setfsuid16
+139 common setfsgid sys_setfsgid16
+140 common _llseek sys_llseek
+141 common getdents sys_getdents compat_sys_getdents
+142 common _newselect sys_select compat_sys_select
+143 common flock sys_flock
+144 common msync sys_msync
+145 common readv sys_readv
+146 common writev sys_writev
+147 common getsid sys_getsid
+148 common fdatasync sys_fdatasync
+149 common _sysctl sys_ni_syscall
+150 common mlock sys_mlock
+151 common munlock sys_munlock
+152 common mlockall sys_mlockall
+153 common munlockall sys_munlockall
+154 common sched_setparam sys_sched_setparam
+155 common sched_getparam sys_sched_getparam
+156 common sched_setscheduler sys_sched_setscheduler
+157 common sched_getscheduler sys_sched_getscheduler
+158 common sched_yield sys_sched_yield
+159 common sched_get_priority_max sys_sched_get_priority_max
+160 common sched_get_priority_min sys_sched_get_priority_min
+161 common sched_rr_get_interval sys_sched_rr_get_interval_time32
+162 common nanosleep sys_nanosleep_time32
+163 common mremap sys_mremap
+164 common setresuid sys_setresuid16
+165 common getresuid sys_getresuid16
+# 166 was sys_vm86
+# 167 was sys_query_module
+168 common poll sys_poll
+169 common nfsservctl sys_ni_syscall
+170 common setresgid sys_setresgid16
+171 common getresgid sys_getresgid16
+172 common prctl sys_prctl
+173 common rt_sigreturn sys_rt_sigreturn_wrapper compat_sys_rt_sigreturn
+174 common rt_sigaction sys_rt_sigaction compat_sys_rt_sigaction
+175 common rt_sigprocmask sys_rt_sigprocmask compat_sys_rt_sigprocmask
+176 common rt_sigpending sys_rt_sigpending compat_sys_rt_sigpending
+177 common rt_sigtimedwait sys_rt_sigtimedwait_time32 compat_sys_rt_sigtimedwait_time32
+178 common rt_sigqueueinfo sys_rt_sigqueueinfo compat_sys_rt_sigqueueinfo
+179 common rt_sigsuspend sys_rt_sigsuspend compat_sys_rt_sigsuspend
+180 common pread64 sys_pread64 compat_sys_aarch32_pread64
+181 common pwrite64 sys_pwrite64 compat_sys_aarch32_pwrite64
+182 common chown sys_chown16
+183 common getcwd sys_getcwd
+184 common capget sys_capget
+185 common capset sys_capset
+186 common sigaltstack sys_sigaltstack compat_sys_sigaltstack
+187 common sendfile sys_sendfile compat_sys_sendfile
+# 188 reserved
+# 189 reserved
+190 common vfork sys_vfork
+# SuS compliant getrlimit
+191 common ugetrlimit sys_getrlimit compat_sys_getrlimit
+192 common mmap2 sys_mmap2 compat_sys_aarch32_mmap2
+193 common truncate64 sys_truncate64 compat_sys_aarch32_truncate64
+194 common ftruncate64 sys_ftruncate64 compat_sys_aarch32_ftruncate64
+195 common stat64 sys_stat64
+196 common lstat64 sys_lstat64
+197 common fstat64 sys_fstat64
+198 common lchown32 sys_lchown
+199 common getuid32 sys_getuid
+200 common getgid32 sys_getgid
+201 common geteuid32 sys_geteuid
+202 common getegid32 sys_getegid
+203 common setreuid32 sys_setreuid
+204 common setregid32 sys_setregid
+205 common getgroups32 sys_getgroups
+206 common setgroups32 sys_setgroups
+207 common fchown32 sys_fchown
+208 common setresuid32 sys_setresuid
+209 common getresuid32 sys_getresuid
+210 common setresgid32 sys_setresgid
+211 common getresgid32 sys_getresgid
+212 common chown32 sys_chown
+213 common setuid32 sys_setuid
+214 common setgid32 sys_setgid
+215 common setfsuid32 sys_setfsuid
+216 common setfsgid32 sys_setfsgid
+217 common getdents64 sys_getdents64
+218 common pivot_root sys_pivot_root
+219 common mincore sys_mincore
+220 common madvise sys_madvise
+221 common fcntl64 sys_fcntl64 compat_sys_fcntl64
+# 222 for tux
+# 223 is unused
+224 common gettid sys_gettid
+225 common readahead sys_readahead compat_sys_aarch32_readahead
+226 common setxattr sys_setxattr
+227 common lsetxattr sys_lsetxattr
+228 common fsetxattr sys_fsetxattr
+229 common getxattr sys_getxattr
+230 common lgetxattr sys_lgetxattr
+231 common fgetxattr sys_fgetxattr
+232 common listxattr sys_listxattr
+233 common llistxattr sys_llistxattr
+234 common flistxattr sys_flistxattr
+235 common removexattr sys_removexattr
+236 common lremovexattr sys_lremovexattr
+237 common fremovexattr sys_fremovexattr
+238 common tkill sys_tkill
+239 common sendfile64 sys_sendfile64
+240 common futex sys_futex_time32
+241 common sched_setaffinity sys_sched_setaffinity compat_sys_sched_setaffinity
+242 common sched_getaffinity sys_sched_getaffinity compat_sys_sched_getaffinity
+243 common io_setup sys_io_setup compat_sys_io_setup
+244 common io_destroy sys_io_destroy
+245 common io_getevents sys_io_getevents_time32
+246 common io_submit sys_io_submit compat_sys_io_submit
+247 common io_cancel sys_io_cancel
+248 common exit_group sys_exit_group
+249 common lookup_dcookie sys_ni_syscall
+250 common epoll_create sys_epoll_create
+251 common epoll_ctl sys_epoll_ctl
+252 common epoll_wait sys_epoll_wait
+253 common remap_file_pages sys_remap_file_pages
+# 254 for set_thread_area
+# 255 for get_thread_area
+256 common set_tid_address sys_set_tid_address
+257 common timer_create sys_timer_create compat_sys_timer_create
+258 common timer_settime sys_timer_settime32
+259 common timer_gettime sys_timer_gettime32
+260 common timer_getoverrun sys_timer_getoverrun
+261 common timer_delete sys_timer_delete
+262 common clock_settime sys_clock_settime32
+263 common clock_gettime sys_clock_gettime32
+264 common clock_getres sys_clock_getres_time32
+265 common clock_nanosleep sys_clock_nanosleep_time32
+266 common statfs64 sys_statfs64_wrapper compat_sys_aarch32_statfs64
+267 common fstatfs64 sys_fstatfs64_wrapper compat_sys_aarch32_fstatfs64
+268 common tgkill sys_tgkill
+269 common utimes sys_utimes_time32
+270 common arm_fadvise64_64 sys_arm_fadvise64_64 compat_sys_aarch32_fadvise64_64
+271 common pciconfig_iobase sys_pciconfig_iobase
+272 common pciconfig_read sys_pciconfig_read
+273 common pciconfig_write sys_pciconfig_write
+274 common mq_open sys_mq_open compat_sys_mq_open
+275 common mq_unlink sys_mq_unlink
+276 common mq_timedsend sys_mq_timedsend_time32
+277 common mq_timedreceive sys_mq_timedreceive_time32
+278 common mq_notify sys_mq_notify compat_sys_mq_notify
+279 common mq_getsetattr sys_mq_getsetattr compat_sys_mq_getsetattr
+280 common waitid sys_waitid compat_sys_waitid
+281 common socket sys_socket
+282 common bind sys_bind
+283 common connect sys_connect
+284 common listen sys_listen
+285 common accept sys_accept
+286 common getsockname sys_getsockname
+287 common getpeername sys_getpeername
+288 common socketpair sys_socketpair
+289 common send sys_send
+290 common sendto sys_sendto
+291 common recv sys_recv compat_sys_recv
+292 common recvfrom sys_recvfrom compat_sys_recvfrom
+293 common shutdown sys_shutdown
+294 common setsockopt sys_setsockopt
+295 common getsockopt sys_getsockopt
+296 common sendmsg sys_sendmsg compat_sys_sendmsg
+297 common recvmsg sys_recvmsg compat_sys_recvmsg
+298 common semop sys_semop
+299 common semget sys_semget
+300 common semctl sys_old_semctl compat_sys_old_semctl
+301 common msgsnd sys_msgsnd compat_sys_msgsnd
+302 common msgrcv sys_msgrcv compat_sys_msgrcv
+303 common msgget sys_msgget
+304 common msgctl sys_old_msgctl compat_sys_old_msgctl
+305 common shmat sys_shmat compat_sys_shmat
+306 common shmdt sys_shmdt
+307 common shmget sys_shmget
+308 common shmctl sys_old_shmctl compat_sys_old_shmctl
+309 common add_key sys_add_key
+310 common request_key sys_request_key
+311 common keyctl sys_keyctl compat_sys_keyctl
+312 common semtimedop sys_semtimedop_time32
+313 common vserver sys_ni_syscall
+314 common ioprio_set sys_ioprio_set
+315 common ioprio_get sys_ioprio_get
+316 common inotify_init sys_inotify_init
+317 common inotify_add_watch sys_inotify_add_watch
+318 common inotify_rm_watch sys_inotify_rm_watch
+319 common mbind sys_mbind
+320 common get_mempolicy sys_get_mempolicy
+321 common set_mempolicy sys_set_mempolicy
+322 common openat sys_openat compat_sys_openat
+323 common mkdirat sys_mkdirat
+324 common mknodat sys_mknodat
+325 common fchownat sys_fchownat
+326 common futimesat sys_futimesat_time32
+327 common fstatat64 sys_fstatat64
+328 common unlinkat sys_unlinkat
+329 common renameat sys_renameat
+330 common linkat sys_linkat
+331 common symlinkat sys_symlinkat
+332 common readlinkat sys_readlinkat
+333 common fchmodat sys_fchmodat
+334 common faccessat sys_faccessat
+335 common pselect6 sys_pselect6_time32 compat_sys_pselect6_time32
+336 common ppoll sys_ppoll_time32 compat_sys_ppoll_time32
+337 common unshare sys_unshare
+338 common set_robust_list sys_set_robust_list compat_sys_set_robust_list
+339 common get_robust_list sys_get_robust_list compat_sys_get_robust_list
+340 common splice sys_splice
+341 common arm_sync_file_range sys_sync_file_range2 compat_sys_aarch32_sync_file_range2
+342 common tee sys_tee
+343 common vmsplice sys_vmsplice
+344 common move_pages sys_move_pages
+345 common getcpu sys_getcpu
+346 common epoll_pwait sys_epoll_pwait compat_sys_epoll_pwait
+347 common kexec_load sys_kexec_load compat_sys_kexec_load
+348 common utimensat sys_utimensat_time32
+349 common signalfd sys_signalfd compat_sys_signalfd
+350 common timerfd_create sys_timerfd_create
+351 common eventfd sys_eventfd
+352 common fallocate sys_fallocate compat_sys_aarch32_fallocate
+353 common timerfd_settime sys_timerfd_settime32
+354 common timerfd_gettime sys_timerfd_gettime32
+355 common signalfd4 sys_signalfd4 compat_sys_signalfd4
+356 common eventfd2 sys_eventfd2
+357 common epoll_create1 sys_epoll_create1
+358 common dup3 sys_dup3
+359 common pipe2 sys_pipe2
+360 common inotify_init1 sys_inotify_init1
+361 common preadv sys_preadv compat_sys_preadv
+362 common pwritev sys_pwritev compat_sys_pwritev
+363 common rt_tgsigqueueinfo sys_rt_tgsigqueueinfo compat_sys_rt_tgsigqueueinfo
+364 common perf_event_open sys_perf_event_open
+365 common recvmmsg sys_recvmmsg_time32 compat_sys_recvmmsg_time32
+366 common accept4 sys_accept4
+367 common fanotify_init sys_fanotify_init
+368 common fanotify_mark sys_fanotify_mark compat_sys_fanotify_mark
+369 common prlimit64 sys_prlimit64
+370 common name_to_handle_at sys_name_to_handle_at
+371 common open_by_handle_at sys_open_by_handle_at compat_sys_open_by_handle_at
+372 common clock_adjtime sys_clock_adjtime32
+373 common syncfs sys_syncfs
+374 common sendmmsg sys_sendmmsg compat_sys_sendmmsg
+375 common setns sys_setns
+376 common process_vm_readv sys_process_vm_readv
+377 common process_vm_writev sys_process_vm_writev
+378 common kcmp sys_kcmp
+379 common finit_module sys_finit_module
+380 common sched_setattr sys_sched_setattr
+381 common sched_getattr sys_sched_getattr
+382 common renameat2 sys_renameat2
+383 common seccomp sys_seccomp
+384 common getrandom sys_getrandom
+385 common memfd_create sys_memfd_create
+386 common bpf sys_bpf
+387 common execveat sys_execveat compat_sys_execveat
+388 common userfaultfd sys_userfaultfd
+389 common membarrier sys_membarrier
+390 common mlock2 sys_mlock2
+391 common copy_file_range sys_copy_file_range
+392 common preadv2 sys_preadv2 compat_sys_preadv2
+393 common pwritev2 sys_pwritev2 compat_sys_pwritev2
+394 common pkey_mprotect sys_pkey_mprotect
+395 common pkey_alloc sys_pkey_alloc
+396 common pkey_free sys_pkey_free
+397 common statx sys_statx
+398 common rseq sys_rseq
+399 common io_pgetevents sys_io_pgetevents_time32 compat_sys_io_pgetevents
+400 common migrate_pages sys_migrate_pages
+401 common kexec_file_load sys_kexec_file_load
+# 402 is unused
+403 common clock_gettime64 sys_clock_gettime
+404 common clock_settime64 sys_clock_settime
+405 common clock_adjtime64 sys_clock_adjtime
+406 common clock_getres_time64 sys_clock_getres
+407 common clock_nanosleep_time64 sys_clock_nanosleep
+408 common timer_gettime64 sys_timer_gettime
+409 common timer_settime64 sys_timer_settime
+410 common timerfd_gettime64 sys_timerfd_gettime
+411 common timerfd_settime64 sys_timerfd_settime
+412 common utimensat_time64 sys_utimensat
+413 common pselect6_time64 sys_pselect6 compat_sys_pselect6_time64
+414 common ppoll_time64 sys_ppoll compat_sys_ppoll_time64
+416 common io_pgetevents_time64 sys_io_pgetevents compat_sys_io_pgetevents_time64
+417 common recvmmsg_time64 sys_recvmmsg compat_sys_recvmmsg_time64
+418 common mq_timedsend_time64 sys_mq_timedsend
+419 common mq_timedreceive_time64 sys_mq_timedreceive
+420 common semtimedop_time64 sys_semtimedop
+421 common rt_sigtimedwait_time64 sys_rt_sigtimedwait compat_sys_rt_sigtimedwait_time64
+422 common futex_time64 sys_futex
+423 common sched_rr_get_interval_time64 sys_sched_rr_get_interval
+424 common pidfd_send_signal sys_pidfd_send_signal
+425 common io_uring_setup sys_io_uring_setup
+426 common io_uring_enter sys_io_uring_enter
+427 common io_uring_register sys_io_uring_register
+428 common open_tree sys_open_tree
+429 common move_mount sys_move_mount
+430 common fsopen sys_fsopen
+431 common fsconfig sys_fsconfig
+432 common fsmount sys_fsmount
+433 common fspick sys_fspick
+434 common pidfd_open sys_pidfd_open
+435 common clone3 sys_clone3
+436 common close_range sys_close_range
+437 common openat2 sys_openat2
+438 common pidfd_getfd sys_pidfd_getfd
+439 common faccessat2 sys_faccessat2
+440 common process_madvise sys_process_madvise
+441 common epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2
+442 common mount_setattr sys_mount_setattr
+443 common quotactl_fd sys_quotactl_fd
+444 common landlock_create_ruleset sys_landlock_create_ruleset
+445 common landlock_add_rule sys_landlock_add_rule
+446 common landlock_restrict_self sys_landlock_restrict_self
+# 447 reserved for memfd_secret
+448 common process_mrelease sys_process_mrelease
+449 common futex_waitv sys_futex_waitv
+450 common set_mempolicy_home_node sys_set_mempolicy_home_node
+451 common cachestat sys_cachestat
+452 common fchmodat2 sys_fchmodat2
+453 common map_shadow_stack sys_map_shadow_stack
+454 common futex_wake sys_futex_wake
+455 common futex_wait sys_futex_wait
+456 common futex_requeue sys_futex_requeue
+457 common statmount sys_statmount
+458 common listmount sys_listmount
+459 common lsm_get_self_attr sys_lsm_get_self_attr
+460 common lsm_set_self_attr sys_lsm_set_self_attr
+461 common lsm_list_modules sys_lsm_list_modules
+462 common mseal sys_mseal
diff --git a/arch/arm64/tools/syscall_64.tbl b/arch/arm64/tools/syscall_64.tbl
new file mode 120000
index 000000000000..63135cf34b65
--- /dev/null
+++ b/arch/arm64/tools/syscall_64.tbl
@@ -0,0 +1 @@
+../../../scripts/syscall.tbl \ No newline at end of file
diff --git a/arch/csky/include/asm/Kbuild b/arch/csky/include/asm/Kbuild
index 1117c28cb7e8..9a9bc65b57a9 100644
--- a/arch/csky/include/asm/Kbuild
+++ b/arch/csky/include/asm/Kbuild
@@ -1,7 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
+syscall-y := syscall_table_32.h
+
generic-y += asm-offsets.h
generic-y += extable.h
-generic-y += gpio.h
generic-y += kvm_para.h
generic-y += mcs_spinlock.h
generic-y += qrwlock.h
diff --git a/arch/csky/include/asm/unistd.h b/arch/csky/include/asm/unistd.h
index 9cf97de9a26d..2c2c24de95d8 100644
--- a/arch/csky/include/asm/unistd.h
+++ b/arch/csky/include/asm/unistd.h
@@ -2,4 +2,7 @@
#include <uapi/asm/unistd.h>
+#define __ARCH_WANT_STAT64
+#define __ARCH_WANT_SYS_CLONE
+
#define NR_syscalls (__NR_syscalls)
diff --git a/arch/csky/include/uapi/asm/Kbuild b/arch/csky/include/uapi/asm/Kbuild
index e78470141932..2501e82a1a0a 100644
--- a/arch/csky/include/uapi/asm/Kbuild
+++ b/arch/csky/include/uapi/asm/Kbuild
@@ -1,2 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
+syscall-y += unistd_32.h
+
generic-y += ucontext.h
diff --git a/arch/csky/include/uapi/asm/unistd.h b/arch/csky/include/uapi/asm/unistd.h
index 7ff6a2466af1..44882179a6e1 100644
--- a/arch/csky/include/uapi/asm/unistd.h
+++ b/arch/csky/include/uapi/asm/unistd.h
@@ -1,14 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#define __ARCH_WANT_STAT64
-#define __ARCH_WANT_NEW_STAT
-#define __ARCH_WANT_SYS_CLONE
-#define __ARCH_WANT_SYS_CLONE3
-#define __ARCH_WANT_SET_GET_RLIMIT
-#define __ARCH_WANT_TIME32_SYSCALLS
-#include <asm-generic/unistd.h>
+#include <asm/unistd_32.h>
-#define __NR_set_thread_area (__NR_arch_specific_syscall + 0)
-__SYSCALL(__NR_set_thread_area, sys_set_thread_area)
-#define __NR_cacheflush (__NR_arch_specific_syscall + 1)
-__SYSCALL(__NR_cacheflush, sys_cacheflush)
+#define __NR_sync_file_range2 84
+#undef __NR_sync_file_range
diff --git a/arch/csky/kernel/Makefile.syscalls b/arch/csky/kernel/Makefile.syscalls
new file mode 100644
index 000000000000..3df3b5822fce
--- /dev/null
+++ b/arch/csky/kernel/Makefile.syscalls
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+
+syscall_abis_32 += csky time32 stat64 rlimit
+
diff --git a/arch/csky/kernel/syscall.c b/arch/csky/kernel/syscall.c
index 3d30e58a45d2..4540a271ee39 100644
--- a/arch/csky/kernel/syscall.c
+++ b/arch/csky/kernel/syscall.c
@@ -20,7 +20,7 @@ SYSCALL_DEFINE6(mmap2,
unsigned long, prot,
unsigned long, flags,
unsigned long, fd,
- off_t, offset)
+ unsigned long, offset)
{
if (unlikely(offset & (~PAGE_MASK >> 12)))
return -EINVAL;
diff --git a/arch/csky/kernel/syscall_table.c b/arch/csky/kernel/syscall_table.c
index a0c238c5377a..a6eb91a0e2f6 100644
--- a/arch/csky/kernel/syscall_table.c
+++ b/arch/csky/kernel/syscall_table.c
@@ -6,9 +6,11 @@
#undef __SYSCALL
#define __SYSCALL(nr, call)[nr] = (call),
+#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, native)
#define sys_fadvise64_64 sys_csky_fadvise64_64
+#define sys_sync_file_range sys_sync_file_range2
void * const sys_call_table[__NR_syscalls] __page_aligned_data = {
[0 ... __NR_syscalls - 1] = sys_ni_syscall,
-#include <asm/unistd.h>
+#include <asm/syscall_table_32.h>
};
diff --git a/arch/hexagon/include/asm/Kbuild b/arch/hexagon/include/asm/Kbuild
index 3ece3c93fe08..8c1a78c8f527 100644
--- a/arch/hexagon/include/asm/Kbuild
+++ b/arch/hexagon/include/asm/Kbuild
@@ -1,4 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
+syscall-y += syscall_table_32.h
+
generic-y += extable.h
generic-y += iomap.h
generic-y += kvm_para.h
diff --git a/arch/hexagon/include/asm/syscalls.h b/arch/hexagon/include/asm/syscalls.h
new file mode 100644
index 000000000000..40f2d08bec92
--- /dev/null
+++ b/arch/hexagon/include/asm/syscalls.h
@@ -0,0 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#include <asm-generic/syscalls.h>
+
+asmlinkage long sys_hexagon_fadvise64_64(int fd, int advice,
+ u32 a2, u32 a3, u32 a4, u32 a5);
diff --git a/arch/hexagon/include/asm/unistd.h b/arch/hexagon/include/asm/unistd.h
new file mode 100644
index 000000000000..1f462bade75c
--- /dev/null
+++ b/arch/hexagon/include/asm/unistd.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+
+#define __ARCH_WANT_STAT64
+#define __ARCH_WANT_SYS_CLONE
+#define __ARCH_WANT_SYS_VFORK
+#define __ARCH_WANT_SYS_FORK
+
+#define __ARCH_BROKEN_SYS_CLONE3
+
+#include <uapi/asm/unistd.h>
diff --git a/arch/hexagon/include/uapi/asm/Kbuild b/arch/hexagon/include/uapi/asm/Kbuild
index e78470141932..2501e82a1a0a 100644
--- a/arch/hexagon/include/uapi/asm/Kbuild
+++ b/arch/hexagon/include/uapi/asm/Kbuild
@@ -1,2 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
+syscall-y += unistd_32.h
+
generic-y += ucontext.h
diff --git a/arch/hexagon/include/uapi/asm/unistd.h b/arch/hexagon/include/uapi/asm/unistd.h
index 432c4db1b623..a3b0cac25580 100644
--- a/arch/hexagon/include/uapi/asm/unistd.h
+++ b/arch/hexagon/include/uapi/asm/unistd.h
@@ -27,14 +27,7 @@
* See also: syscalltab.c
*/
-#define sys_mmap2 sys_mmap_pgoff
-#define __ARCH_WANT_RENAMEAT
-#define __ARCH_WANT_STAT64
-#define __ARCH_WANT_SET_GET_RLIMIT
-#define __ARCH_WANT_SYS_EXECVE
-#define __ARCH_WANT_SYS_CLONE
-#define __ARCH_WANT_SYS_VFORK
-#define __ARCH_WANT_SYS_FORK
-#define __ARCH_WANT_TIME32_SYSCALLS
+#include <asm/unistd_32.h>
-#include <asm-generic/unistd.h>
+#define __NR_sync_file_range2 84
+#undef __NR_sync_file_range
diff --git a/arch/hexagon/kernel/Makefile.syscalls b/arch/hexagon/kernel/Makefile.syscalls
new file mode 100644
index 000000000000..d2b7c5d44d95
--- /dev/null
+++ b/arch/hexagon/kernel/Makefile.syscalls
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+
+syscall_abis_32 += hexagon time32 stat64 rlimit renameat
diff --git a/arch/hexagon/kernel/syscalltab.c b/arch/hexagon/kernel/syscalltab.c
index 0fadd582cfc7..b53e2eead4ac 100644
--- a/arch/hexagon/kernel/syscalltab.c
+++ b/arch/hexagon/kernel/syscalltab.c
@@ -11,9 +11,20 @@
#include <asm/syscall.h>
-#undef __SYSCALL
#define __SYSCALL(nr, call) [nr] = (call),
+#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, native)
+
+#define sys_mmap2 sys_mmap_pgoff
+
+SYSCALL_DEFINE6(hexagon_fadvise64_64, int, fd, int, advice,
+ SC_ARG64(offset), SC_ARG64(len))
+{
+ return ksys_fadvise64_64(fd, SC_VAL64(loff_t, offset), SC_VAL64(loff_t, len), advice);
+}
+#define sys_fadvise64_64 sys_hexagon_fadvise64_64
+
+#define sys_sync_file_range sys_sync_file_range2
void *sys_call_table[__NR_syscalls] = {
-#include <asm/unistd.h>
+#include <asm/syscall_table_32.h>
};
diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
index e38139c576ee..ddc042895d01 100644
--- a/arch/loongarch/Kconfig
+++ b/arch/loongarch/Kconfig
@@ -143,7 +143,7 @@ config LOONGARCH
select HAVE_LIVEPATCH
select HAVE_MOD_ARCH_SPECIFIC
select HAVE_NMI
- select HAVE_OBJTOOL if AS_HAS_EXPLICIT_RELOCS
+ select HAVE_OBJTOOL if AS_HAS_EXPLICIT_RELOCS && AS_HAS_THIN_ADD_SUB && !CC_IS_CLANG
select HAVE_PCI
select HAVE_PERF_EVENTS
select HAVE_PERF_REGS
@@ -261,6 +261,9 @@ config AS_HAS_EXPLICIT_RELOCS
config AS_HAS_FCSR_CLASS
def_bool $(as-instr,movfcsr2gr \$t0$(comma)\$fcsr0)
+config AS_HAS_THIN_ADD_SUB
+ def_bool $(cc-option,-Wa$(comma)-mthin-add-sub)
+
config AS_HAS_LSX_EXTENSION
def_bool $(as-instr,vld \$vr0$(comma)\$a0$(comma)0)
diff --git a/arch/loongarch/Kconfig.debug b/arch/loongarch/Kconfig.debug
index 98d60630c3d4..8b2ce5b5d43e 100644
--- a/arch/loongarch/Kconfig.debug
+++ b/arch/loongarch/Kconfig.debug
@@ -28,6 +28,7 @@ config UNWINDER_PROLOGUE
config UNWINDER_ORC
bool "ORC unwinder"
+ depends on HAVE_OBJTOOL
select OBJTOOL
help
This option enables the ORC (Oops Rewind Capability) unwinder for
diff --git a/arch/loongarch/boot/dts/loongson-2k0500-ref.dts b/arch/loongarch/boot/dts/loongson-2k0500-ref.dts
index 8aefb0c12672..a34734a6c3ce 100644
--- a/arch/loongarch/boot/dts/loongson-2k0500-ref.dts
+++ b/arch/loongarch/boot/dts/loongson-2k0500-ref.dts
@@ -44,14 +44,14 @@
&gmac0 {
status = "okay";
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
bus_id = <0x0>;
};
&gmac1 {
status = "okay";
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
bus_id = <0x1>;
};
diff --git a/arch/loongarch/boot/dts/loongson-2k1000-ref.dts b/arch/loongarch/boot/dts/loongson-2k1000-ref.dts
index 8463fe035386..23cf26cc3e5f 100644
--- a/arch/loongarch/boot/dts/loongson-2k1000-ref.dts
+++ b/arch/loongarch/boot/dts/loongson-2k1000-ref.dts
@@ -43,7 +43,7 @@
&gmac0 {
status = "okay";
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
phy-handle = <&phy0>;
mdio {
compatible = "snps,dwmac-mdio";
@@ -58,7 +58,7 @@
&gmac1 {
status = "okay";
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
phy-handle = <&phy1>;
mdio {
compatible = "snps,dwmac-mdio";
diff --git a/arch/loongarch/boot/dts/loongson-2k2000-ref.dts b/arch/loongarch/boot/dts/loongson-2k2000-ref.dts
index 74b99bd234cc..ea9e6985d0e9 100644
--- a/arch/loongarch/boot/dts/loongson-2k2000-ref.dts
+++ b/arch/loongarch/boot/dts/loongson-2k2000-ref.dts
@@ -92,7 +92,7 @@
&gmac2 {
status = "okay";
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
phy-handle = <&phy2>;
mdio {
compatible = "snps,dwmac-mdio";
diff --git a/arch/loongarch/include/asm/Kbuild b/arch/loongarch/include/asm/Kbuild
index c862672ed953..2bb3676429c0 100644
--- a/arch/loongarch/include/asm/Kbuild
+++ b/arch/loongarch/include/asm/Kbuild
@@ -1,28 +1,13 @@
# SPDX-License-Identifier: GPL-2.0
+syscall-y += syscall_table_64.h
generated-y += orc_hash.h
-generic-y += dma-contiguous.h
generic-y += mcs_spinlock.h
generic-y += parport.h
generic-y += early_ioremap.h
generic-y += qrwlock.h
generic-y += qspinlock.h
-generic-y += rwsem.h
-generic-y += segment.h
generic-y += user.h
-generic-y += stat.h
-generic-y += fcntl.h
generic-y += ioctl.h
-generic-y += ioctls.h
-generic-y += mman.h
-generic-y += msgbuf.h
-generic-y += sembuf.h
-generic-y += shmbuf.h
generic-y += statfs.h
-generic-y += socket.h
-generic-y += sockios.h
-generic-y += termbits.h
-generic-y += poll.h
generic-y += param.h
-generic-y += posix_types.h
-generic-y += resource.h
diff --git a/arch/loongarch/include/asm/hw_breakpoint.h b/arch/loongarch/include/asm/hw_breakpoint.h
index 21447fb1efc7..d78330916bd1 100644
--- a/arch/loongarch/include/asm/hw_breakpoint.h
+++ b/arch/loongarch/include/asm/hw_breakpoint.h
@@ -75,6 +75,8 @@ do { \
#define CSR_MWPC_NUM 0x3f
#define CTRL_PLV_ENABLE 0x1e
+#define CTRL_PLV0_ENABLE 0x02
+#define CTRL_PLV3_ENABLE 0x10
#define MWPnCFG3_LoadEn 8
#define MWPnCFG3_StoreEn 9
@@ -101,7 +103,7 @@ struct perf_event;
struct perf_event_attr;
extern int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
- int *gen_len, int *gen_type, int *offset);
+ int *gen_len, int *gen_type);
extern int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw);
extern int hw_breakpoint_arch_parse(struct perf_event *bp,
const struct perf_event_attr *attr,
diff --git a/arch/loongarch/include/asm/numa.h b/arch/loongarch/include/asm/numa.h
index 27f319b49862..b5f9de9f102e 100644
--- a/arch/loongarch/include/asm/numa.h
+++ b/arch/loongarch/include/asm/numa.h
@@ -56,6 +56,7 @@ extern int early_cpu_to_node(int cpu);
static inline void early_numa_add_cpu(int cpuid, s16 node) { }
static inline void numa_add_cpu(unsigned int cpu) { }
static inline void numa_remove_cpu(unsigned int cpu) { }
+static inline void set_cpuid_to_node(int cpuid, s16 node) { }
static inline int early_cpu_to_node(int cpu)
{
diff --git a/arch/loongarch/include/asm/stackframe.h b/arch/loongarch/include/asm/stackframe.h
index 45b507a7b06f..d9eafd3ee3d1 100644
--- a/arch/loongarch/include/asm/stackframe.h
+++ b/arch/loongarch/include/asm/stackframe.h
@@ -42,7 +42,7 @@
.macro JUMP_VIRT_ADDR temp1 temp2
li.d \temp1, CACHE_BASE
pcaddi \temp2, 0
- or \temp1, \temp1, \temp2
+ bstrins.d \temp1, \temp2, (DMW_PABITS - 1), 0
jirl zero, \temp1, 0xc
.endm
diff --git a/arch/loongarch/include/asm/unistd.h b/arch/loongarch/include/asm/unistd.h
index cfddb0116a8c..fc0a481a7416 100644
--- a/arch/loongarch/include/asm/unistd.h
+++ b/arch/loongarch/include/asm/unistd.h
@@ -8,4 +8,6 @@
#include <uapi/asm/unistd.h>
+#define __ARCH_WANT_SYS_CLONE
+
#define NR_syscalls (__NR_syscalls)
diff --git a/arch/loongarch/include/uapi/asm/Kbuild b/arch/loongarch/include/uapi/asm/Kbuild
index 4aa680ca2e5f..c6d141d7b7d7 100644
--- a/arch/loongarch/include/uapi/asm/Kbuild
+++ b/arch/loongarch/include/uapi/asm/Kbuild
@@ -1,2 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
+syscall-y += unistd_64.h
+
generic-y += kvm_para.h
diff --git a/arch/loongarch/include/uapi/asm/unistd.h b/arch/loongarch/include/uapi/asm/unistd.h
index fcb668984f03..1f01980f9c94 100644
--- a/arch/loongarch/include/uapi/asm/unistd.h
+++ b/arch/loongarch/include/uapi/asm/unistd.h
@@ -1,5 +1,3 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#define __ARCH_WANT_SYS_CLONE
-#define __ARCH_WANT_SYS_CLONE3
-#include <asm-generic/unistd.h>
+#include <asm/unistd_64.h>
diff --git a/arch/loongarch/kernel/Makefile.syscalls b/arch/loongarch/kernel/Makefile.syscalls
new file mode 100644
index 000000000000..ab7d9baa2915
--- /dev/null
+++ b/arch/loongarch/kernel/Makefile.syscalls
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+
+# No special ABIs on loongarch so far
+syscall_abis_64 +=
diff --git a/arch/loongarch/kernel/head.S b/arch/loongarch/kernel/head.S
index c4f7de2e2805..4677ea8fa8e9 100644
--- a/arch/loongarch/kernel/head.S
+++ b/arch/loongarch/kernel/head.S
@@ -22,7 +22,7 @@
_head:
.word MZ_MAGIC /* "MZ", MS-DOS header */
.org 0x8
- .dword kernel_entry /* Kernel entry point */
+ .dword _kernel_entry /* Kernel entry point (physical address) */
.dword _kernel_asize /* Kernel image effective size */
.quad PHYS_LINK_KADDR /* Kernel image load offset from start of RAM */
.org 0x38 /* 0x20 ~ 0x37 reserved */
diff --git a/arch/loongarch/kernel/hw_breakpoint.c b/arch/loongarch/kernel/hw_breakpoint.c
index fc55c4de2a11..621ad7634df7 100644
--- a/arch/loongarch/kernel/hw_breakpoint.c
+++ b/arch/loongarch/kernel/hw_breakpoint.c
@@ -174,11 +174,21 @@ void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
static int hw_breakpoint_control(struct perf_event *bp,
enum hw_breakpoint_ops ops)
{
- u32 ctrl;
+ u32 ctrl, privilege;
int i, max_slots, enable;
+ struct pt_regs *regs;
struct perf_event **slots;
struct arch_hw_breakpoint *info = counter_arch_bp(bp);
+ if (arch_check_bp_in_kernelspace(info))
+ privilege = CTRL_PLV0_ENABLE;
+ else
+ privilege = CTRL_PLV3_ENABLE;
+
+ /* Whether bp belongs to a task. */
+ if (bp->hw.target)
+ regs = task_pt_regs(bp->hw.target);
+
if (info->ctrl.type == LOONGARCH_BREAKPOINT_EXECUTE) {
/* Breakpoint */
slots = this_cpu_ptr(bp_on_reg);
@@ -197,31 +207,38 @@ static int hw_breakpoint_control(struct perf_event *bp,
switch (ops) {
case HW_BREAKPOINT_INSTALL:
/* Set the FWPnCFG/MWPnCFG 1~4 register. */
- write_wb_reg(CSR_CFG_ADDR, i, 0, info->address);
- write_wb_reg(CSR_CFG_ADDR, i, 1, info->address);
- write_wb_reg(CSR_CFG_MASK, i, 0, info->mask);
- write_wb_reg(CSR_CFG_MASK, i, 1, info->mask);
- write_wb_reg(CSR_CFG_ASID, i, 0, 0);
- write_wb_reg(CSR_CFG_ASID, i, 1, 0);
if (info->ctrl.type == LOONGARCH_BREAKPOINT_EXECUTE) {
- write_wb_reg(CSR_CFG_CTRL, i, 0, CTRL_PLV_ENABLE);
+ write_wb_reg(CSR_CFG_ADDR, i, 0, info->address);
+ write_wb_reg(CSR_CFG_MASK, i, 0, info->mask);
+ write_wb_reg(CSR_CFG_ASID, i, 0, 0);
+ write_wb_reg(CSR_CFG_CTRL, i, 0, privilege);
} else {
+ write_wb_reg(CSR_CFG_ADDR, i, 1, info->address);
+ write_wb_reg(CSR_CFG_MASK, i, 1, info->mask);
+ write_wb_reg(CSR_CFG_ASID, i, 1, 0);
ctrl = encode_ctrl_reg(info->ctrl);
- write_wb_reg(CSR_CFG_CTRL, i, 1, ctrl | CTRL_PLV_ENABLE);
+ write_wb_reg(CSR_CFG_CTRL, i, 1, ctrl | privilege);
}
enable = csr_read64(LOONGARCH_CSR_CRMD);
csr_write64(CSR_CRMD_WE | enable, LOONGARCH_CSR_CRMD);
+ if (bp->hw.target)
+ regs->csr_prmd |= CSR_PRMD_PWE;
break;
case HW_BREAKPOINT_UNINSTALL:
/* Reset the FWPnCFG/MWPnCFG 1~4 register. */
- write_wb_reg(CSR_CFG_ADDR, i, 0, 0);
- write_wb_reg(CSR_CFG_ADDR, i, 1, 0);
- write_wb_reg(CSR_CFG_MASK, i, 0, 0);
- write_wb_reg(CSR_CFG_MASK, i, 1, 0);
- write_wb_reg(CSR_CFG_CTRL, i, 0, 0);
- write_wb_reg(CSR_CFG_CTRL, i, 1, 0);
- write_wb_reg(CSR_CFG_ASID, i, 0, 0);
- write_wb_reg(CSR_CFG_ASID, i, 1, 0);
+ if (info->ctrl.type == LOONGARCH_BREAKPOINT_EXECUTE) {
+ write_wb_reg(CSR_CFG_ADDR, i, 0, 0);
+ write_wb_reg(CSR_CFG_MASK, i, 0, 0);
+ write_wb_reg(CSR_CFG_CTRL, i, 0, 0);
+ write_wb_reg(CSR_CFG_ASID, i, 0, 0);
+ } else {
+ write_wb_reg(CSR_CFG_ADDR, i, 1, 0);
+ write_wb_reg(CSR_CFG_MASK, i, 1, 0);
+ write_wb_reg(CSR_CFG_CTRL, i, 1, 0);
+ write_wb_reg(CSR_CFG_ASID, i, 1, 0);
+ }
+ if (bp->hw.target)
+ regs->csr_prmd &= ~CSR_PRMD_PWE;
break;
}
@@ -283,7 +300,7 @@ int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw)
* to generic breakpoint descriptions.
*/
int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
- int *gen_len, int *gen_type, int *offset)
+ int *gen_len, int *gen_type)
{
/* Type */
switch (ctrl.type) {
@@ -303,11 +320,6 @@ int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
return -EINVAL;
}
- if (!ctrl.len)
- return -EINVAL;
-
- *offset = __ffs(ctrl.len);
-
/* Len */
switch (ctrl.len) {
case LOONGARCH_BREAKPOINT_LEN_1:
@@ -386,21 +398,17 @@ int hw_breakpoint_arch_parse(struct perf_event *bp,
struct arch_hw_breakpoint *hw)
{
int ret;
- u64 alignment_mask, offset;
+ u64 alignment_mask;
/* Build the arch_hw_breakpoint. */
ret = arch_build_bp_info(bp, attr, hw);
if (ret)
return ret;
- if (hw->ctrl.type != LOONGARCH_BREAKPOINT_EXECUTE)
- alignment_mask = 0x7;
- else
+ if (hw->ctrl.type == LOONGARCH_BREAKPOINT_EXECUTE) {
alignment_mask = 0x3;
- offset = hw->address & alignment_mask;
-
- hw->address &= ~alignment_mask;
- hw->ctrl.len <<= offset;
+ hw->address &= ~alignment_mask;
+ }
return 0;
}
@@ -471,12 +479,15 @@ void breakpoint_handler(struct pt_regs *regs)
slots = this_cpu_ptr(bp_on_reg);
for (i = 0; i < boot_cpu_data.watch_ireg_count; ++i) {
- bp = slots[i];
- if (bp == NULL)
- continue;
- perf_bp_event(bp, regs);
+ if ((csr_read32(LOONGARCH_CSR_FWPS) & (0x1 << i))) {
+ bp = slots[i];
+ if (bp == NULL)
+ continue;
+ perf_bp_event(bp, regs);
+ csr_write32(0x1 << i, LOONGARCH_CSR_FWPS);
+ update_bp_registers(regs, 0, 0);
+ }
}
- update_bp_registers(regs, 0, 0);
}
NOKPROBE_SYMBOL(breakpoint_handler);
@@ -488,12 +499,15 @@ void watchpoint_handler(struct pt_regs *regs)
slots = this_cpu_ptr(wp_on_reg);
for (i = 0; i < boot_cpu_data.watch_dreg_count; ++i) {
- wp = slots[i];
- if (wp == NULL)
- continue;
- perf_bp_event(wp, regs);
+ if ((csr_read32(LOONGARCH_CSR_MWPS) & (0x1 << i))) {
+ wp = slots[i];
+ if (wp == NULL)
+ continue;
+ perf_bp_event(wp, regs);
+ csr_write32(0x1 << i, LOONGARCH_CSR_MWPS);
+ update_bp_registers(regs, 0, 1);
+ }
}
- update_bp_registers(regs, 0, 1);
}
NOKPROBE_SYMBOL(watchpoint_handler);
diff --git a/arch/loongarch/kernel/ptrace.c b/arch/loongarch/kernel/ptrace.c
index c114c5ef1332..200109de1971 100644
--- a/arch/loongarch/kernel/ptrace.c
+++ b/arch/loongarch/kernel/ptrace.c
@@ -494,28 +494,14 @@ static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type,
struct arch_hw_breakpoint_ctrl ctrl,
struct perf_event_attr *attr)
{
- int err, len, type, offset;
+ int err, len, type;
- err = arch_bp_generic_fields(ctrl, &len, &type, &offset);
+ err = arch_bp_generic_fields(ctrl, &len, &type);
if (err)
return err;
- switch (note_type) {
- case NT_LOONGARCH_HW_BREAK:
- if ((type & HW_BREAKPOINT_X) != type)
- return -EINVAL;
- break;
- case NT_LOONGARCH_HW_WATCH:
- if ((type & HW_BREAKPOINT_RW) != type)
- return -EINVAL;
- break;
- default:
- return -EINVAL;
- }
-
attr->bp_len = len;
attr->bp_type = type;
- attr->bp_addr += offset;
return 0;
}
@@ -609,10 +595,27 @@ static int ptrace_hbp_set_ctrl(unsigned int note_type,
return PTR_ERR(bp);
attr = bp->attr;
- decode_ctrl_reg(uctrl, &ctrl);
- err = ptrace_hbp_fill_attr_ctrl(note_type, ctrl, &attr);
- if (err)
- return err;
+
+ switch (note_type) {
+ case NT_LOONGARCH_HW_BREAK:
+ ctrl.type = LOONGARCH_BREAKPOINT_EXECUTE;
+ ctrl.len = LOONGARCH_BREAKPOINT_LEN_4;
+ break;
+ case NT_LOONGARCH_HW_WATCH:
+ decode_ctrl_reg(uctrl, &ctrl);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (uctrl & CTRL_PLV_ENABLE) {
+ err = ptrace_hbp_fill_attr_ctrl(note_type, ctrl, &attr);
+ if (err)
+ return err;
+ attr.disabled = 0;
+ } else {
+ attr.disabled = 1;
+ }
return modify_user_hw_breakpoint(bp, &attr);
}
@@ -643,6 +646,10 @@ static int ptrace_hbp_set_addr(unsigned int note_type,
struct perf_event *bp;
struct perf_event_attr attr;
+ /* Kernel-space address cannot be monitored by user-space */
+ if ((unsigned long)addr >= XKPRANGE)
+ return -EINVAL;
+
bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
if (IS_ERR(bp))
return PTR_ERR(bp);
diff --git a/arch/loongarch/kernel/setup.c b/arch/loongarch/kernel/setup.c
index 60e0fe97f61a..3d048f1be143 100644
--- a/arch/loongarch/kernel/setup.c
+++ b/arch/loongarch/kernel/setup.c
@@ -282,7 +282,7 @@ static void __init fdt_setup(void)
return;
/* Prefer to use built-in dtb, checking its legality first. */
- if (!fdt_check_header(__dtb_start))
+ if (IS_ENABLED(CONFIG_BUILTIN_DTB) && !fdt_check_header(__dtb_start))
fdt_pointer = __dtb_start;
else
fdt_pointer = efi_fdt_pointer(); /* Fallback to firmware dtb */
@@ -351,10 +351,8 @@ void __init platform_init(void)
arch_reserve_vmcore();
arch_reserve_crashkernel();
-#ifdef CONFIG_ACPI_TABLE_UPGRADE
- acpi_table_upgrade();
-#endif
#ifdef CONFIG_ACPI
+ acpi_table_upgrade();
acpi_gbl_use_default_register_widths = false;
acpi_boot_table_init();
#endif
diff --git a/arch/loongarch/kernel/smp.c b/arch/loongarch/kernel/smp.c
index 0dfe2388ef41..1436d2465939 100644
--- a/arch/loongarch/kernel/smp.c
+++ b/arch/loongarch/kernel/smp.c
@@ -273,7 +273,6 @@ static void __init fdt_smp_setup(void)
if (cpuid == loongson_sysconf.boot_cpu_id) {
cpu = 0;
- numa_add_cpu(cpu);
} else {
cpu = cpumask_next_zero(-1, cpu_present_mask);
}
@@ -283,6 +282,9 @@ static void __init fdt_smp_setup(void)
set_cpu_present(cpu, true);
__cpu_number_map[cpuid] = cpu;
__cpu_logical_map[cpu] = cpuid;
+
+ early_numa_add_cpu(cpu, 0);
+ set_cpuid_to_node(cpuid, 0);
}
loongson_sysconf.nr_cpus = num_processors;
@@ -468,6 +470,7 @@ void smp_prepare_boot_cpu(void)
set_cpu_possible(0, true);
set_cpu_online(0, true);
set_my_cpu_offset(per_cpu_offset(0));
+ numa_add_cpu(0);
rr_node = first_node(node_online_map);
for_each_possible_cpu(cpu) {
diff --git a/arch/loongarch/kernel/syscall.c b/arch/loongarch/kernel/syscall.c
index b4c5acd7aa3b..ec17cd5163b7 100644
--- a/arch/loongarch/kernel/syscall.c
+++ b/arch/loongarch/kernel/syscall.c
@@ -20,9 +20,10 @@
#undef __SYSCALL
#define __SYSCALL(nr, call) [nr] = (call),
+#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, native)
SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len, unsigned long,
- prot, unsigned long, flags, unsigned long, fd, off_t, offset)
+ prot, unsigned long, flags, unsigned long, fd, unsigned long, offset)
{
if (offset & ~PAGE_MASK)
return -EINVAL;
@@ -32,7 +33,7 @@ SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len, unsigned long,
void *sys_call_table[__NR_syscalls] = {
[0 ... __NR_syscalls - 1] = sys_ni_syscall,
-#include <asm/unistd.h>
+#include <asm/syscall_table_64.h>
};
typedef long (*sys_call_fn)(unsigned long, unsigned long,
diff --git a/arch/loongarch/kernel/vmlinux.lds.S b/arch/loongarch/kernel/vmlinux.lds.S
index e8e97dbf9ca4..3c7595342730 100644
--- a/arch/loongarch/kernel/vmlinux.lds.S
+++ b/arch/loongarch/kernel/vmlinux.lds.S
@@ -6,6 +6,7 @@
#define PAGE_SIZE _PAGE_SIZE
#define RO_EXCEPTION_TABLE_ALIGN 4
+#define PHYSADDR_MASK 0xffffffffffff /* 48-bit */
/*
* Put .bss..swapper_pg_dir as the first thing in .bss. This will
@@ -142,10 +143,11 @@ SECTIONS
#ifdef CONFIG_EFI_STUB
/* header symbols */
- _kernel_asize = _end - _text;
- _kernel_fsize = _edata - _text;
- _kernel_vsize = _end - __initdata_begin;
- _kernel_rsize = _edata - __initdata_begin;
+ _kernel_entry = ABSOLUTE(kernel_entry & PHYSADDR_MASK);
+ _kernel_asize = ABSOLUTE(_end - _text);
+ _kernel_fsize = ABSOLUTE(_edata - _text);
+ _kernel_vsize = ABSOLUTE(_end - __initdata_begin);
+ _kernel_rsize = ABSOLUTE(_edata - __initdata_begin);
#endif
.gptab.sdata : {
diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c
index c86e099af5ca..a68573e091c0 100644
--- a/arch/loongarch/kvm/exit.c
+++ b/arch/loongarch/kvm/exit.c
@@ -761,7 +761,7 @@ static void kvm_handle_service(struct kvm_vcpu *vcpu)
default:
ret = KVM_HCALL_INVALID_CODE;
break;
- };
+ }
kvm_write_reg(vcpu, LOONGARCH_GPR_A0, ret);
}
diff --git a/arch/m68k/amiga/config.c b/arch/m68k/amiga/config.c
index d4b170c861bf..0147130dc34e 100644
--- a/arch/m68k/amiga/config.c
+++ b/arch/m68k/amiga/config.c
@@ -180,6 +180,15 @@ int __init amiga_parse_bootinfo(const struct bi_record *record)
dev->slotsize = be16_to_cpu(cd->cd_SlotSize);
dev->boardaddr = be32_to_cpu(cd->cd_BoardAddr);
dev->boardsize = be32_to_cpu(cd->cd_BoardSize);
+
+ /* CS-LAB Warp 1260 workaround */
+ if (be16_to_cpu(dev->rom.er_Manufacturer) == ZORRO_MANUF(ZORRO_PROD_CSLAB_WARP_1260) &&
+ dev->rom.er_Product == ZORRO_PROD(ZORRO_PROD_CSLAB_WARP_1260)) {
+
+ /* turn off all interrupts */
+ pr_info("Warp 1260 card detected: applying interrupt storm workaround\n");
+ *(uint32_t *)(dev->boardaddr + 0x1000) = 0xfff;
+ }
} else
pr_warn("amiga_parse_bootinfo: too many AutoConfig devices\n");
#endif /* CONFIG_ZORRO */
diff --git a/arch/m68k/atari/ataints.c b/arch/m68k/atari/ataints.c
index 23256434191c..0465444ceb21 100644
--- a/arch/m68k/atari/ataints.c
+++ b/arch/m68k/atari/ataints.c
@@ -301,11 +301,7 @@ void __init atari_init_IRQ(void)
if (ATARIHW_PRESENT(SCU)) {
/* init the SCU if present */
- tt_scu.sys_mask = 0x10; /* enable VBL (for the cursor) and
- * disable HSYNC interrupts (who
- * needs them?) MFP and SCC are
- * enabled in VME mask
- */
+ tt_scu.sys_mask = 0x0; /* disable all interrupts */
tt_scu.vme_mask = 0x60; /* enable MFP and SCC ints */
} else {
/* If no SCU and no Hades, the HSYNC interrupt needs to be
diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig
index 6bb202d03d34..9a084943a68a 100644
--- a/arch/m68k/configs/amiga_defconfig
+++ b/arch/m68k/configs/amiga_defconfig
@@ -373,6 +373,7 @@ CONFIG_VXLAN=m
CONFIG_GENEVE=m
CONFIG_BAREUDP=m
CONFIG_GTP=m
+CONFIG_PFCP=m
CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig
index 3598e0cc66b0..58ec725bf392 100644
--- a/arch/m68k/configs/apollo_defconfig
+++ b/arch/m68k/configs/apollo_defconfig
@@ -353,6 +353,7 @@ CONFIG_VXLAN=m
CONFIG_GENEVE=m
CONFIG_BAREUDP=m
CONFIG_GTP=m
+CONFIG_PFCP=m
CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig
index 3fbb8a9a307d..63ab7e892e59 100644
--- a/arch/m68k/configs/atari_defconfig
+++ b/arch/m68k/configs/atari_defconfig
@@ -368,6 +368,7 @@ CONFIG_VXLAN=m
CONFIG_GENEVE=m
CONFIG_BAREUDP=m
CONFIG_GTP=m
+CONFIG_PFCP=m
CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig
index 9a2b0c7871ce..ca40744eec60 100644
--- a/arch/m68k/configs/bvme6000_defconfig
+++ b/arch/m68k/configs/bvme6000_defconfig
@@ -351,6 +351,7 @@ CONFIG_VXLAN=m
CONFIG_GENEVE=m
CONFIG_BAREUDP=m
CONFIG_GTP=m
+CONFIG_PFCP=m
CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig
index 2408785e0e48..77bcc6faf468 100644
--- a/arch/m68k/configs/hp300_defconfig
+++ b/arch/m68k/configs/hp300_defconfig
@@ -352,6 +352,7 @@ CONFIG_VXLAN=m
CONFIG_GENEVE=m
CONFIG_BAREUDP=m
CONFIG_GTP=m
+CONFIG_PFCP=m
CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig
index 6789d03b7b52..e73d4032b659 100644
--- a/arch/m68k/configs/mac_defconfig
+++ b/arch/m68k/configs/mac_defconfig
@@ -364,6 +364,7 @@ CONFIG_VXLAN=m
CONFIG_GENEVE=m
CONFIG_BAREUDP=m
CONFIG_GTP=m
+CONFIG_PFCP=m
CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig
index b57af39db3b4..638df8442c98 100644
--- a/arch/m68k/configs/multi_defconfig
+++ b/arch/m68k/configs/multi_defconfig
@@ -407,6 +407,7 @@ CONFIG_VXLAN=m
CONFIG_GENEVE=m
CONFIG_BAREUDP=m
CONFIG_GTP=m
+CONFIG_PFCP=m
CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig
index 29b70f27aedd..2248db426081 100644
--- a/arch/m68k/configs/mvme147_defconfig
+++ b/arch/m68k/configs/mvme147_defconfig
@@ -350,6 +350,7 @@ CONFIG_VXLAN=m
CONFIG_GENEVE=m
CONFIG_BAREUDP=m
CONFIG_GTP=m
+CONFIG_PFCP=m
CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig
index 757c582ffe84..2975b66521f6 100644
--- a/arch/m68k/configs/mvme16x_defconfig
+++ b/arch/m68k/configs/mvme16x_defconfig
@@ -351,6 +351,7 @@ CONFIG_VXLAN=m
CONFIG_GENEVE=m
CONFIG_BAREUDP=m
CONFIG_GTP=m
+CONFIG_PFCP=m
CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig
index f15d1009108a..0a0e32344033 100644
--- a/arch/m68k/configs/q40_defconfig
+++ b/arch/m68k/configs/q40_defconfig
@@ -357,6 +357,7 @@ CONFIG_VXLAN=m
CONFIG_GENEVE=m
CONFIG_BAREUDP=m
CONFIG_GTP=m
+CONFIG_PFCP=m
CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig
index 5218c1e614b5..f16f1a99c2ba 100644
--- a/arch/m68k/configs/sun3_defconfig
+++ b/arch/m68k/configs/sun3_defconfig
@@ -347,6 +347,7 @@ CONFIG_VXLAN=m
CONFIG_GENEVE=m
CONFIG_BAREUDP=m
CONFIG_GTP=m
+CONFIG_PFCP=m
CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig
index acdf4eb7af28..667bd61ae9b3 100644
--- a/arch/m68k/configs/sun3x_defconfig
+++ b/arch/m68k/configs/sun3x_defconfig
@@ -348,6 +348,7 @@ CONFIG_VXLAN=m
CONFIG_GENEVE=m
CONFIG_BAREUDP=m
CONFIG_GTP=m
+CONFIG_PFCP=m
CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
diff --git a/arch/m68k/emu/nfblock.c b/arch/m68k/emu/nfblock.c
index 642fb80c5c4e..83410f8184ec 100644
--- a/arch/m68k/emu/nfblock.c
+++ b/arch/m68k/emu/nfblock.c
@@ -71,7 +71,7 @@ static void nfhd_submit_bio(struct bio *bio)
len = bvec.bv_len;
len >>= 9;
nfhd_read_write(dev->id, 0, dir, sec >> shift, len >> shift,
- page_to_phys(bvec.bv_page) + bvec.bv_offset);
+ bvec_phys(&bvec));
sec += len;
}
bio_endio(bio);
@@ -98,6 +98,7 @@ static int __init nfhd_init_one(int id, u32 blocks, u32 bsize)
{
struct queue_limits lim = {
.logical_block_size = bsize,
+ .features = BLK_FEAT_ROTATIONAL,
};
struct nfhd_device *dev;
int dev_id = id - NFHD_DEV_OFFSET;
@@ -193,4 +194,5 @@ static void __exit nfhd_exit(void)
module_init(nfhd_init);
module_exit(nfhd_exit);
+MODULE_DESCRIPTION("Atari NatFeat block device driver");
MODULE_LICENSE("GPL");
diff --git a/arch/m68k/emu/nfcon.c b/arch/m68k/emu/nfcon.c
index 17b2987c2bf5..d41260672e24 100644
--- a/arch/m68k/emu/nfcon.c
+++ b/arch/m68k/emu/nfcon.c
@@ -173,4 +173,5 @@ static void __exit nfcon_exit(void)
module_init(nfcon_init);
module_exit(nfcon_exit);
+MODULE_DESCRIPTION("Atari NatFeat console driver");
MODULE_LICENSE("GPL");
diff --git a/arch/m68k/include/asm/cmpxchg.h b/arch/m68k/include/asm/cmpxchg.h
index d7f3de9c5d6f..4ba14f3535fc 100644
--- a/arch/m68k/include/asm/cmpxchg.h
+++ b/arch/m68k/include/asm/cmpxchg.h
@@ -32,7 +32,7 @@ static inline unsigned long __arch_xchg(unsigned long x, volatile void * ptr, in
x = tmp;
break;
default:
- tmp = __invalid_xchg_size(x, ptr, size);
+ x = __invalid_xchg_size(x, ptr, size);
break;
}
diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h
index 4ae52414cd9d..2e0047cf86f8 100644
--- a/arch/m68k/include/asm/unistd.h
+++ b/arch/m68k/include/asm/unistd.h
@@ -30,6 +30,5 @@
#define __ARCH_WANT_SYS_SIGPROCMASK
#define __ARCH_WANT_SYS_FORK
#define __ARCH_WANT_SYS_VFORK
-#define __ARCH_WANT_SYS_CLONE3
#endif /* _ASM_M68K_UNISTD_H_ */
diff --git a/arch/microblaze/kernel/sys_microblaze.c b/arch/microblaze/kernel/sys_microblaze.c
index ed9f34da1a2a..0850b099f300 100644
--- a/arch/microblaze/kernel/sys_microblaze.c
+++ b/arch/microblaze/kernel/sys_microblaze.c
@@ -35,7 +35,7 @@
SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
unsigned long, prot, unsigned long, flags, unsigned long, fd,
- off_t, pgoff)
+ unsigned long, pgoff)
{
if (pgoff & ~PAGE_MASK)
return -EINVAL;
diff --git a/arch/mips/bmips/setup.c b/arch/mips/bmips/setup.c
index ec180ab92eaa..66a8ba19c287 100644
--- a/arch/mips/bmips/setup.c
+++ b/arch/mips/bmips/setup.c
@@ -110,7 +110,8 @@ static void bcm6358_quirks(void)
* RAC flush causes kernel panics on BCM6358 when booting from TP1
* because the bootloader is not initializing it properly.
*/
- bmips_rac_flush_disable = !!(read_c0_brcm_cmt_local() & (1 << 31));
+ bmips_rac_flush_disable = !!(read_c0_brcm_cmt_local() & (1 << 31)) ||
+ !!BMIPS_GET_CBR();
}
static void bcm6368_quirks(void)
diff --git a/arch/mips/include/asm/mipsmtregs.h b/arch/mips/include/asm/mipsmtregs.h
index 30e86861c206..b1ee3c48e84b 100644
--- a/arch/mips/include/asm/mipsmtregs.h
+++ b/arch/mips/include/asm/mipsmtregs.h
@@ -322,7 +322,7 @@ static inline void ehb(void)
" .set push \n" \
" .set "MIPS_ISA_LEVEL" \n" \
_ASM_SET_MFTC0 \
- " mftc0 $1, " #rt ", " #sel " \n" \
+ " mftc0 %0, " #rt ", " #sel " \n" \
_ASM_UNSET_MFTC0 \
" .set pop \n" \
: "=r" (__res)); \
diff --git a/arch/mips/include/asm/unistd.h b/arch/mips/include/asm/unistd.h
index 25a5253db7f4..ba83d3fb0a84 100644
--- a/arch/mips/include/asm/unistd.h
+++ b/arch/mips/include/asm/unistd.h
@@ -58,7 +58,6 @@
# endif
#define __ARCH_WANT_SYS_FORK
#define __ARCH_WANT_SYS_CLONE
-#define __ARCH_WANT_SYS_CLONE3
/* whitelists for checksyscalls */
#define __IGNORE_fadvise64_64
diff --git a/arch/mips/kernel/syscalls/syscall_n32.tbl b/arch/mips/kernel/syscalls/syscall_n32.tbl
index cc869f5d5693..953f5b7dc723 100644
--- a/arch/mips/kernel/syscalls/syscall_n32.tbl
+++ b/arch/mips/kernel/syscalls/syscall_n32.tbl
@@ -354,7 +354,7 @@
412 n32 utimensat_time64 sys_utimensat
413 n32 pselect6_time64 compat_sys_pselect6_time64
414 n32 ppoll_time64 compat_sys_ppoll_time64
-416 n32 io_pgetevents_time64 sys_io_pgetevents
+416 n32 io_pgetevents_time64 compat_sys_io_pgetevents_time64
417 n32 recvmmsg_time64 compat_sys_recvmmsg_time64
418 n32 mq_timedsend_time64 sys_mq_timedsend
419 n32 mq_timedreceive_time64 sys_mq_timedreceive
diff --git a/arch/mips/kernel/syscalls/syscall_o32.tbl b/arch/mips/kernel/syscalls/syscall_o32.tbl
index 008ebe60263e..2439a2491cff 100644
--- a/arch/mips/kernel/syscalls/syscall_o32.tbl
+++ b/arch/mips/kernel/syscalls/syscall_o32.tbl
@@ -27,7 +27,7 @@
17 o32 break sys_ni_syscall
# 18 was sys_stat
18 o32 unused18 sys_ni_syscall
-19 o32 lseek sys_lseek
+19 o32 lseek sys_lseek compat_sys_lseek
20 o32 getpid sys_getpid
21 o32 mount sys_mount
22 o32 umount sys_oldumount
@@ -403,7 +403,7 @@
412 o32 utimensat_time64 sys_utimensat sys_utimensat
413 o32 pselect6_time64 sys_pselect6 compat_sys_pselect6_time64
414 o32 ppoll_time64 sys_ppoll compat_sys_ppoll_time64
-416 o32 io_pgetevents_time64 sys_io_pgetevents sys_io_pgetevents
+416 o32 io_pgetevents_time64 sys_io_pgetevents compat_sys_io_pgetevents_time64
417 o32 recvmmsg_time64 sys_recvmmsg compat_sys_recvmmsg_time64
418 o32 mq_timedsend_time64 sys_mq_timedsend sys_mq_timedsend
419 o32 mq_timedreceive_time64 sys_mq_timedreceive sys_mq_timedreceive
diff --git a/arch/mips/pci/ops-rc32434.c b/arch/mips/pci/ops-rc32434.c
index 874ed6df9768..34b9323bdabb 100644
--- a/arch/mips/pci/ops-rc32434.c
+++ b/arch/mips/pci/ops-rc32434.c
@@ -112,8 +112,8 @@ retry:
* gives them time to settle
*/
if (where == PCI_VENDOR_ID) {
- if (ret == 0xffffffff || ret == 0x00000000 ||
- ret == 0x0000ffff || ret == 0xffff0000) {
+ if (*val == 0xffffffff || *val == 0x00000000 ||
+ *val == 0x0000ffff || *val == 0xffff0000) {
if (delay > 4)
return 0;
delay *= 2;
diff --git a/arch/nios2/include/asm/Kbuild b/arch/nios2/include/asm/Kbuild
index 7fe7437555fb..0d09829ed144 100644
--- a/arch/nios2/include/asm/Kbuild
+++ b/arch/nios2/include/asm/Kbuild
@@ -1,4 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
+syscall-y += syscall_table_32.h
+
generic-y += cmpxchg.h
generic-y += extable.h
generic-y += kvm_para.h
diff --git a/arch/nios2/include/asm/unistd.h b/arch/nios2/include/asm/unistd.h
new file mode 100644
index 000000000000..1146e56473c5
--- /dev/null
+++ b/arch/nios2/include/asm/unistd.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef __ASM_UNISTD_H
+#define __ASM_UNISTD_H
+
+#include <uapi/asm/unistd.h>
+
+#define __ARCH_WANT_STAT64
+#define __ARCH_WANT_SET_GET_RLIMIT
+
+#define __ARCH_BROKEN_SYS_CLONE3
+
+#endif
diff --git a/arch/nios2/include/uapi/asm/Kbuild b/arch/nios2/include/uapi/asm/Kbuild
index e78470141932..2501e82a1a0a 100644
--- a/arch/nios2/include/uapi/asm/Kbuild
+++ b/arch/nios2/include/uapi/asm/Kbuild
@@ -1,2 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
+syscall-y += unistd_32.h
+
generic-y += ucontext.h
diff --git a/arch/nios2/include/uapi/asm/unistd.h b/arch/nios2/include/uapi/asm/unistd.h
index 0b4bb1d41b28..1f0e0f5538d9 100644
--- a/arch/nios2/include/uapi/asm/unistd.h
+++ b/arch/nios2/include/uapi/asm/unistd.h
@@ -16,16 +16,4 @@
*
*/
- #define sys_mmap2 sys_mmap_pgoff
-
-#define __ARCH_WANT_RENAMEAT
-#define __ARCH_WANT_STAT64
-#define __ARCH_WANT_SET_GET_RLIMIT
-#define __ARCH_WANT_TIME32_SYSCALLS
-
-/* Use the standard ABI for syscalls */
-#include <asm-generic/unistd.h>
-
-/* Additional Nios II specific syscalls. */
-#define __NR_cacheflush (__NR_arch_specific_syscall)
-__SYSCALL(__NR_cacheflush, sys_cacheflush)
+#include <asm/unistd_32.h>
diff --git a/arch/nios2/kernel/Makefile.syscalls b/arch/nios2/kernel/Makefile.syscalls
new file mode 100644
index 000000000000..579a9daec272
--- /dev/null
+++ b/arch/nios2/kernel/Makefile.syscalls
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+
+syscall_abis_32 += nios2 time32 stat64 renameat rlimit
diff --git a/arch/nios2/kernel/syscall_table.c b/arch/nios2/kernel/syscall_table.c
index c2875a6dd5a4..434694067d8f 100644
--- a/arch/nios2/kernel/syscall_table.c
+++ b/arch/nios2/kernel/syscall_table.c
@@ -9,10 +9,12 @@
#include <asm/syscalls.h>
-#undef __SYSCALL
#define __SYSCALL(nr, call) [nr] = (call),
+#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, native)
+
+#define sys_mmap2 sys_mmap_pgoff
void *sys_call_table[__NR_syscalls] = {
[0 ... __NR_syscalls-1] = sys_ni_syscall,
-#include <asm/unistd.h>
+#include <asm/syscall_table_32.h>
};
diff --git a/arch/openrisc/include/asm/Kbuild b/arch/openrisc/include/asm/Kbuild
index c8c99b554ca4..cef49d60d74c 100644
--- a/arch/openrisc/include/asm/Kbuild
+++ b/arch/openrisc/include/asm/Kbuild
@@ -1,4 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
+syscall-y += syscall_table_32.h
+
generic-y += extable.h
generic-y += kvm_para.h
generic-y += parport.h
diff --git a/arch/openrisc/include/asm/syscalls.h b/arch/openrisc/include/asm/syscalls.h
index aa1c7e98722e..9f4c47961bea 100644
--- a/arch/openrisc/include/asm/syscalls.h
+++ b/arch/openrisc/include/asm/syscalls.h
@@ -25,8 +25,4 @@ asmlinkage long __sys_clone(unsigned long clone_flags, unsigned long newsp,
asmlinkage long __sys_clone3(struct clone_args __user *uargs, size_t size);
asmlinkage long __sys_fork(void);
-#define sys_clone __sys_clone
-#define sys_clone3 __sys_clone3
-#define sys_fork __sys_fork
-
#endif /* __ASM_OPENRISC_SYSCALLS_H */
diff --git a/arch/openrisc/include/asm/unistd.h b/arch/openrisc/include/asm/unistd.h
new file mode 100644
index 000000000000..c73f65e18d3b
--- /dev/null
+++ b/arch/openrisc/include/asm/unistd.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+
+#define __ARCH_WANT_STAT64
+#define __ARCH_WANT_SYS_FORK
+#define __ARCH_WANT_SYS_CLONE
+#define __ARCH_WANT_TIME32_SYSCALLS
+
+#include <uapi/asm/unistd.h>
diff --git a/arch/openrisc/include/uapi/asm/Kbuild b/arch/openrisc/include/uapi/asm/Kbuild
index e78470141932..2501e82a1a0a 100644
--- a/arch/openrisc/include/uapi/asm/Kbuild
+++ b/arch/openrisc/include/uapi/asm/Kbuild
@@ -1,2 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
+syscall-y += unistd_32.h
+
generic-y += ucontext.h
diff --git a/arch/openrisc/include/uapi/asm/unistd.h b/arch/openrisc/include/uapi/asm/unistd.h
index fae34c60fa88..46b94d454efd 100644
--- a/arch/openrisc/include/uapi/asm/unistd.h
+++ b/arch/openrisc/include/uapi/asm/unistd.h
@@ -17,17 +17,4 @@
* (at your option) any later version.
*/
-#define sys_mmap2 sys_mmap_pgoff
-
-#define __ARCH_WANT_RENAMEAT
-#define __ARCH_WANT_STAT64
-#define __ARCH_WANT_SET_GET_RLIMIT
-#define __ARCH_WANT_SYS_FORK
-#define __ARCH_WANT_SYS_CLONE
-#define __ARCH_WANT_SYS_CLONE3
-#define __ARCH_WANT_TIME32_SYSCALLS
-
-#include <asm-generic/unistd.h>
-
-#define __NR_or1k_atomic __NR_arch_specific_syscall
-__SYSCALL(__NR_or1k_atomic, sys_or1k_atomic)
+#include <asm/unistd_32.h>
diff --git a/arch/openrisc/kernel/Makefile.syscalls b/arch/openrisc/kernel/Makefile.syscalls
new file mode 100644
index 000000000000..525a1e7e7fc9
--- /dev/null
+++ b/arch/openrisc/kernel/Makefile.syscalls
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+
+syscall_abis_32 += or1k time32 stat64 rlimit renameat
diff --git a/arch/openrisc/kernel/sys_call_table.c b/arch/openrisc/kernel/sys_call_table.c
index 3d18008310e4..b2f57e2538f7 100644
--- a/arch/openrisc/kernel/sys_call_table.c
+++ b/arch/openrisc/kernel/sys_call_table.c
@@ -16,9 +16,14 @@
#include <asm/syscalls.h>
-#undef __SYSCALL
#define __SYSCALL(nr, call) [nr] = (call),
+#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, native)
+
+#define sys_mmap2 sys_mmap_pgoff
+#define sys_clone __sys_clone
+#define sys_clone3 __sys_clone3
+#define sys_fork __sys_fork
void *sys_call_table[__NR_syscalls] = {
-#include <asm/unistd.h>
+#include <asm/syscall_table_32.h>
};
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index daafeb20f993..dc9b902de8ea 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -16,6 +16,7 @@ config PARISC
select ARCH_HAS_UBSAN
select ARCH_HAS_PTE_SPECIAL
select ARCH_NO_SG_CHAIN
+ select ARCH_SPLIT_ARG64 if !64BIT
select ARCH_SUPPORTS_HUGETLBFS if PA20
select ARCH_SUPPORTS_MEMORY_FAILURE
select ARCH_STACKWALK
diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h
index ba4c05bc24d6..8394718870e1 100644
--- a/arch/parisc/include/asm/cacheflush.h
+++ b/arch/parisc/include/asm/cacheflush.h
@@ -31,18 +31,17 @@ void flush_cache_all_local(void);
void flush_cache_all(void);
void flush_cache_mm(struct mm_struct *mm);
-void flush_kernel_dcache_page_addr(const void *addr);
-
#define flush_kernel_dcache_range(start,size) \
flush_kernel_dcache_range_asm((start), (start)+(size));
+/* The only way to flush a vmap range is to flush whole cache */
#define ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE 1
void flush_kernel_vmap_range(void *vaddr, int size);
void invalidate_kernel_vmap_range(void *vaddr, int size);
-#define flush_cache_vmap(start, end) flush_cache_all()
+void flush_cache_vmap(unsigned long start, unsigned long end);
#define flush_cache_vmap_early(start, end) do { } while (0)
-#define flush_cache_vunmap(start, end) flush_cache_all()
+void flush_cache_vunmap(unsigned long start, unsigned long end);
void flush_dcache_folio(struct folio *folio);
#define flush_dcache_folio flush_dcache_folio
@@ -77,17 +76,11 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
void flush_cache_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
-/* defined in pacache.S exported in cache.c used by flush_anon_page */
-void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
-
#define ARCH_HAS_FLUSH_ANON_PAGE
void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr);
#define ARCH_HAS_FLUSH_ON_KUNMAP
-static inline void kunmap_flush_on_unmap(const void *addr)
-{
- flush_kernel_dcache_page_addr(addr);
-}
+void kunmap_flush_on_unmap(const void *addr);
#endif /* _PARISC_CACHEFLUSH_H */
diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
index 974accac05cd..babf65751e81 100644
--- a/arch/parisc/include/asm/pgtable.h
+++ b/arch/parisc/include/asm/pgtable.h
@@ -448,14 +448,17 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte)
return pte;
}
+static inline pte_t ptep_get(pte_t *ptep)
+{
+ return READ_ONCE(*ptep);
+}
+#define ptep_get ptep_get
+
static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
{
pte_t pte;
- if (!pte_young(*ptep))
- return 0;
-
- pte = *ptep;
+ pte = ptep_get(ptep);
if (!pte_young(pte)) {
return 0;
}
@@ -463,17 +466,10 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned
return 1;
}
-struct mm_struct;
-static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
-{
- pte_t old_pte;
-
- old_pte = *ptep;
- set_pte(ptep, __pte(0));
-
- return old_pte;
-}
+int ptep_clear_flush_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep);
+pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep);
+struct mm_struct;
static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
set_pte(ptep, pte_wrprotect(*ptep));
@@ -511,7 +507,8 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
-#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
+#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
+#define __HAVE_ARCH_PTEP_CLEAR_FLUSH
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
#define __HAVE_ARCH_PTE_SAME
diff --git a/arch/parisc/include/asm/unistd.h b/arch/parisc/include/asm/unistd.h
index e38f9a90ac15..98851ff7699a 100644
--- a/arch/parisc/include/asm/unistd.h
+++ b/arch/parisc/include/asm/unistd.h
@@ -160,7 +160,6 @@ type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) \
#define __ARCH_WANT_SYS_FORK
#define __ARCH_WANT_SYS_VFORK
#define __ARCH_WANT_SYS_CLONE
-#define __ARCH_WANT_SYS_CLONE3
#define __ARCH_WANT_COMPAT_SYS_SENDFILE
#define __ARCH_WANT_COMPAT_STAT
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index 422f3e1e6d9c..483bfafd930c 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -20,6 +20,7 @@
#include <linux/sched.h>
#include <linux/sched/mm.h>
#include <linux/syscalls.h>
+#include <linux/vmalloc.h>
#include <asm/pdc.h>
#include <asm/cache.h>
#include <asm/cacheflush.h>
@@ -31,20 +32,31 @@
#include <asm/mmu_context.h>
#include <asm/cachectl.h>
+#define PTR_PAGE_ALIGN_DOWN(addr) PTR_ALIGN_DOWN(addr, PAGE_SIZE)
+
+/*
+ * When nonzero, use _PAGE_ACCESSED bit to try to reduce the number
+ * of page flushes done flush_cache_page_if_present. There are some
+ * pros and cons in using this option. It may increase the risk of
+ * random segmentation faults.
+ */
+#define CONFIG_FLUSH_PAGE_ACCESSED 0
+
int split_tlb __ro_after_init;
int dcache_stride __ro_after_init;
int icache_stride __ro_after_init;
EXPORT_SYMBOL(dcache_stride);
+/* Internal implementation in arch/parisc/kernel/pacache.S */
void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
EXPORT_SYMBOL(flush_dcache_page_asm);
void purge_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr);
-
-/* Internal implementation in arch/parisc/kernel/pacache.S */
void flush_data_cache_local(void *); /* flushes local data-cache only */
void flush_instruction_cache_local(void); /* flushes local code-cache only */
+static void flush_kernel_dcache_page_addr(const void *addr);
+
/* On some machines (i.e., ones with the Merced bus), there can be
* only a single PxTLB broadcast at a time; this must be guaranteed
* by software. We need a spinlock around all TLB flushes to ensure
@@ -321,6 +333,18 @@ __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
{
if (!static_branch_likely(&parisc_has_cache))
return;
+
+ /*
+ * The TLB is the engine of coherence on parisc. The CPU is
+ * entitled to speculate any page with a TLB mapping, so here
+ * we kill the mapping then flush the page along a special flush
+ * only alias mapping. This guarantees that the page is no-longer
+ * in the cache for any process and nor may it be speculatively
+ * read in (until the user or kernel specifically accesses it,
+ * of course).
+ */
+ flush_tlb_page(vma, vmaddr);
+
preempt_disable();
flush_dcache_page_asm(physaddr, vmaddr);
if (vma->vm_flags & VM_EXEC)
@@ -328,46 +352,44 @@ __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
preempt_enable();
}
-static void flush_user_cache_page(struct vm_area_struct *vma, unsigned long vmaddr)
+static void flush_kernel_dcache_page_addr(const void *addr)
{
- unsigned long flags, space, pgd, prot;
-#ifdef CONFIG_TLB_PTLOCK
- unsigned long pgd_lock;
-#endif
+ unsigned long vaddr = (unsigned long)addr;
+ unsigned long flags;
- vmaddr &= PAGE_MASK;
+ /* Purge TLB entry to remove translation on all CPUs */
+ purge_tlb_start(flags);
+ pdtlb(SR_KERNEL, addr);
+ purge_tlb_end(flags);
+ /* Use tmpalias flush to prevent data cache move-in */
preempt_disable();
+ flush_dcache_page_asm(__pa(vaddr), vaddr);
+ preempt_enable();
+}
- /* Set context for flush */
- local_irq_save(flags);
- prot = mfctl(8);
- space = mfsp(SR_USER);
- pgd = mfctl(25);
-#ifdef CONFIG_TLB_PTLOCK
- pgd_lock = mfctl(28);
-#endif
- switch_mm_irqs_off(NULL, vma->vm_mm, NULL);
- local_irq_restore(flags);
-
- flush_user_dcache_range_asm(vmaddr, vmaddr + PAGE_SIZE);
- if (vma->vm_flags & VM_EXEC)
- flush_user_icache_range_asm(vmaddr, vmaddr + PAGE_SIZE);
- flush_tlb_page(vma, vmaddr);
+static void flush_kernel_icache_page_addr(const void *addr)
+{
+ unsigned long vaddr = (unsigned long)addr;
+ unsigned long flags;
- /* Restore previous context */
- local_irq_save(flags);
-#ifdef CONFIG_TLB_PTLOCK
- mtctl(pgd_lock, 28);
-#endif
- mtctl(pgd, 25);
- mtsp(space, SR_USER);
- mtctl(prot, 8);
- local_irq_restore(flags);
+ /* Purge TLB entry to remove translation on all CPUs */
+ purge_tlb_start(flags);
+ pdtlb(SR_KERNEL, addr);
+ purge_tlb_end(flags);
+ /* Use tmpalias flush to prevent instruction cache move-in */
+ preempt_disable();
+ flush_icache_page_asm(__pa(vaddr), vaddr);
preempt_enable();
}
+void kunmap_flush_on_unmap(const void *addr)
+{
+ flush_kernel_dcache_page_addr(addr);
+}
+EXPORT_SYMBOL(kunmap_flush_on_unmap);
+
void flush_icache_pages(struct vm_area_struct *vma, struct page *page,
unsigned int nr)
{
@@ -375,13 +397,16 @@ void flush_icache_pages(struct vm_area_struct *vma, struct page *page,
for (;;) {
flush_kernel_dcache_page_addr(kaddr);
- flush_kernel_icache_page(kaddr);
+ flush_kernel_icache_page_addr(kaddr);
if (--nr == 0)
break;
kaddr += PAGE_SIZE;
}
}
+/*
+ * Walk page directory for MM to find PTEP pointer for address ADDR.
+ */
static inline pte_t *get_ptep(struct mm_struct *mm, unsigned long addr)
{
pte_t *ptep = NULL;
@@ -410,6 +435,41 @@ static inline bool pte_needs_flush(pte_t pte)
== (_PAGE_PRESENT | _PAGE_ACCESSED);
}
+/*
+ * Return user physical address. Returns 0 if page is not present.
+ */
+static inline unsigned long get_upa(struct mm_struct *mm, unsigned long addr)
+{
+ unsigned long flags, space, pgd, prot, pa;
+#ifdef CONFIG_TLB_PTLOCK
+ unsigned long pgd_lock;
+#endif
+
+ /* Save context */
+ local_irq_save(flags);
+ prot = mfctl(8);
+ space = mfsp(SR_USER);
+ pgd = mfctl(25);
+#ifdef CONFIG_TLB_PTLOCK
+ pgd_lock = mfctl(28);
+#endif
+
+ /* Set context for lpa_user */
+ switch_mm_irqs_off(NULL, mm, NULL);
+ pa = lpa_user(addr);
+
+ /* Restore previous context */
+#ifdef CONFIG_TLB_PTLOCK
+ mtctl(pgd_lock, 28);
+#endif
+ mtctl(pgd, 25);
+ mtsp(space, SR_USER);
+ mtctl(prot, 8);
+ local_irq_restore(flags);
+
+ return pa;
+}
+
void flush_dcache_folio(struct folio *folio)
{
struct address_space *mapping = folio_flush_mapping(folio);
@@ -458,50 +518,23 @@ void flush_dcache_folio(struct folio *folio)
if (addr + nr * PAGE_SIZE > vma->vm_end)
nr = (vma->vm_end - addr) / PAGE_SIZE;
- if (parisc_requires_coherency()) {
- for (i = 0; i < nr; i++) {
- pte_t *ptep = get_ptep(vma->vm_mm,
- addr + i * PAGE_SIZE);
- if (!ptep)
- continue;
- if (pte_needs_flush(*ptep))
- flush_user_cache_page(vma,
- addr + i * PAGE_SIZE);
- /* Optimise accesses to the same table? */
- pte_unmap(ptep);
- }
- } else {
+ if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1))
+ != (addr & (SHM_COLOUR - 1))) {
+ for (i = 0; i < nr; i++)
+ __flush_cache_page(vma,
+ addr + i * PAGE_SIZE,
+ (pfn + i) * PAGE_SIZE);
/*
- * The TLB is the engine of coherence on parisc:
- * The CPU is entitled to speculate any page
- * with a TLB mapping, so here we kill the
- * mapping then flush the page along a special
- * flush only alias mapping. This guarantees that
- * the page is no-longer in the cache for any
- * process and nor may it be speculatively read
- * in (until the user or kernel specifically
- * accesses it, of course)
+ * Software is allowed to have any number
+ * of private mappings to a page.
*/
- for (i = 0; i < nr; i++)
- flush_tlb_page(vma, addr + i * PAGE_SIZE);
- if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1))
- != (addr & (SHM_COLOUR - 1))) {
- for (i = 0; i < nr; i++)
- __flush_cache_page(vma,
- addr + i * PAGE_SIZE,
- (pfn + i) * PAGE_SIZE);
- /*
- * Software is allowed to have any number
- * of private mappings to a page.
- */
- if (!(vma->vm_flags & VM_SHARED))
- continue;
- if (old_addr)
- pr_err("INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n",
- old_addr, addr, vma->vm_file);
- if (nr == folio_nr_pages(folio))
- old_addr = addr;
- }
+ if (!(vma->vm_flags & VM_SHARED))
+ continue;
+ if (old_addr)
+ pr_err("INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n",
+ old_addr, addr, vma->vm_file);
+ if (nr == folio_nr_pages(folio))
+ old_addr = addr;
}
WARN_ON(++count == 4096);
}
@@ -591,35 +624,28 @@ extern void purge_kernel_dcache_page_asm(unsigned long);
extern void clear_user_page_asm(void *, unsigned long);
extern void copy_user_page_asm(void *, void *, unsigned long);
-void flush_kernel_dcache_page_addr(const void *addr)
-{
- unsigned long flags;
-
- flush_kernel_dcache_page_asm(addr);
- purge_tlb_start(flags);
- pdtlb(SR_KERNEL, addr);
- purge_tlb_end(flags);
-}
-EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
-
static void flush_cache_page_if_present(struct vm_area_struct *vma,
- unsigned long vmaddr, unsigned long pfn)
+ unsigned long vmaddr)
{
+#if CONFIG_FLUSH_PAGE_ACCESSED
bool needs_flush = false;
- pte_t *ptep;
+ pte_t *ptep, pte;
- /*
- * The pte check is racy and sometimes the flush will trigger
- * a non-access TLB miss. Hopefully, the page has already been
- * flushed.
- */
ptep = get_ptep(vma->vm_mm, vmaddr);
if (ptep) {
- needs_flush = pte_needs_flush(*ptep);
+ pte = ptep_get(ptep);
+ needs_flush = pte_needs_flush(pte);
pte_unmap(ptep);
}
if (needs_flush)
- flush_cache_page(vma, vmaddr, pfn);
+ __flush_cache_page(vma, vmaddr, PFN_PHYS(pte_pfn(pte)));
+#else
+ struct mm_struct *mm = vma->vm_mm;
+ unsigned long physaddr = get_upa(mm, vmaddr);
+
+ if (physaddr)
+ __flush_cache_page(vma, vmaddr, PAGE_ALIGN_DOWN(physaddr));
+#endif
}
void copy_user_highpage(struct page *to, struct page *from,
@@ -629,7 +655,7 @@ void copy_user_highpage(struct page *to, struct page *from,
kfrom = kmap_local_page(from);
kto = kmap_local_page(to);
- flush_cache_page_if_present(vma, vaddr, page_to_pfn(from));
+ __flush_cache_page(vma, vaddr, PFN_PHYS(page_to_pfn(from)));
copy_page_asm(kto, kfrom);
kunmap_local(kto);
kunmap_local(kfrom);
@@ -638,16 +664,17 @@ void copy_user_highpage(struct page *to, struct page *from,
void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long user_vaddr, void *dst, void *src, int len)
{
- flush_cache_page_if_present(vma, user_vaddr, page_to_pfn(page));
+ __flush_cache_page(vma, user_vaddr, PFN_PHYS(page_to_pfn(page)));
memcpy(dst, src, len);
- flush_kernel_dcache_range_asm((unsigned long)dst, (unsigned long)dst + len);
+ flush_kernel_dcache_page_addr(PTR_PAGE_ALIGN_DOWN(dst));
}
void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long user_vaddr, void *dst, void *src, int len)
{
- flush_cache_page_if_present(vma, user_vaddr, page_to_pfn(page));
+ __flush_cache_page(vma, user_vaddr, PFN_PHYS(page_to_pfn(page)));
memcpy(dst, src, len);
+ flush_kernel_dcache_page_addr(PTR_PAGE_ALIGN_DOWN(src));
}
/* __flush_tlb_range()
@@ -681,32 +708,10 @@ int __flush_tlb_range(unsigned long sid, unsigned long start,
static void flush_cache_pages(struct vm_area_struct *vma, unsigned long start, unsigned long end)
{
- unsigned long addr, pfn;
- pte_t *ptep;
-
- for (addr = start; addr < end; addr += PAGE_SIZE) {
- bool needs_flush = false;
- /*
- * The vma can contain pages that aren't present. Although
- * the pte search is expensive, we need the pte to find the
- * page pfn and to check whether the page should be flushed.
- */
- ptep = get_ptep(vma->vm_mm, addr);
- if (ptep) {
- needs_flush = pte_needs_flush(*ptep);
- pfn = pte_pfn(*ptep);
- pte_unmap(ptep);
- }
- if (needs_flush) {
- if (parisc_requires_coherency()) {
- flush_user_cache_page(vma, addr);
- } else {
- if (WARN_ON(!pfn_valid(pfn)))
- return;
- __flush_cache_page(vma, addr, PFN_PHYS(pfn));
- }
- }
- }
+ unsigned long addr;
+
+ for (addr = start; addr < end; addr += PAGE_SIZE)
+ flush_cache_page_if_present(vma, addr);
}
static inline unsigned long mm_total_size(struct mm_struct *mm)
@@ -757,21 +762,19 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned
if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled()))
return;
flush_tlb_range(vma, start, end);
- flush_cache_all();
+ if (vma->vm_flags & VM_EXEC)
+ flush_cache_all();
+ else
+ flush_data_cache();
return;
}
- flush_cache_pages(vma, start, end);
+ flush_cache_pages(vma, start & PAGE_MASK, end);
}
void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
{
- if (WARN_ON(!pfn_valid(pfn)))
- return;
- if (parisc_requires_coherency())
- flush_user_cache_page(vma, vmaddr);
- else
- __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
+ __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
}
void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
@@ -779,34 +782,133 @@ void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned lon
if (!PageAnon(page))
return;
- if (parisc_requires_coherency()) {
- if (vma->vm_flags & VM_SHARED)
- flush_data_cache();
- else
- flush_user_cache_page(vma, vmaddr);
+ __flush_cache_page(vma, vmaddr, PFN_PHYS(page_to_pfn(page)));
+}
+
+int ptep_clear_flush_young(struct vm_area_struct *vma, unsigned long addr,
+ pte_t *ptep)
+{
+ pte_t pte = ptep_get(ptep);
+
+ if (!pte_young(pte))
+ return 0;
+ set_pte(ptep, pte_mkold(pte));
+#if CONFIG_FLUSH_PAGE_ACCESSED
+ __flush_cache_page(vma, addr, PFN_PHYS(pte_pfn(pte)));
+#endif
+ return 1;
+}
+
+/*
+ * After a PTE is cleared, we have no way to flush the cache for
+ * the physical page. On PA8800 and PA8900 processors, these lines
+ * can cause random cache corruption. Thus, we must flush the cache
+ * as well as the TLB when clearing a PTE that's valid.
+ */
+pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long addr,
+ pte_t *ptep)
+{
+ struct mm_struct *mm = (vma)->vm_mm;
+ pte_t pte = ptep_get_and_clear(mm, addr, ptep);
+ unsigned long pfn = pte_pfn(pte);
+
+ if (pfn_valid(pfn))
+ __flush_cache_page(vma, addr, PFN_PHYS(pfn));
+ else if (pte_accessible(mm, pte))
+ flush_tlb_page(vma, addr);
+
+ return pte;
+}
+
+/*
+ * The physical address for pages in the ioremap case can be obtained
+ * from the vm_struct struct. I wasn't able to successfully handle the
+ * vmalloc and vmap cases. We have an array of struct page pointers in
+ * the uninitialized vmalloc case but the flush failed using page_to_pfn.
+ */
+void flush_cache_vmap(unsigned long start, unsigned long end)
+{
+ unsigned long addr, physaddr;
+ struct vm_struct *vm;
+
+ /* Prevent cache move-in */
+ flush_tlb_kernel_range(start, end);
+
+ if (end - start >= parisc_cache_flush_threshold) {
+ flush_cache_all();
return;
}
- flush_tlb_page(vma, vmaddr);
- preempt_disable();
- flush_dcache_page_asm(page_to_phys(page), vmaddr);
- preempt_enable();
+ if (WARN_ON_ONCE(!is_vmalloc_addr((void *)start))) {
+ flush_cache_all();
+ return;
+ }
+
+ vm = find_vm_area((void *)start);
+ if (WARN_ON_ONCE(!vm)) {
+ flush_cache_all();
+ return;
+ }
+
+ /* The physical addresses of IOREMAP regions are contiguous */
+ if (vm->flags & VM_IOREMAP) {
+ physaddr = vm->phys_addr;
+ for (addr = start; addr < end; addr += PAGE_SIZE) {
+ preempt_disable();
+ flush_dcache_page_asm(physaddr, start);
+ flush_icache_page_asm(physaddr, start);
+ preempt_enable();
+ physaddr += PAGE_SIZE;
+ }
+ return;
+ }
+
+ flush_cache_all();
}
+EXPORT_SYMBOL(flush_cache_vmap);
+/*
+ * The vm_struct has been retired and the page table is set up. The
+ * last page in the range is a guard page. Its physical address can't
+ * be determined using lpa, so there is no way to flush the range
+ * using flush_dcache_page_asm.
+ */
+void flush_cache_vunmap(unsigned long start, unsigned long end)
+{
+ /* Prevent cache move-in */
+ flush_tlb_kernel_range(start, end);
+ flush_data_cache();
+}
+EXPORT_SYMBOL(flush_cache_vunmap);
+
+/*
+ * On systems with PA8800/PA8900 processors, there is no way to flush
+ * a vmap range other than using the architected loop to flush the
+ * entire cache. The page directory is not set up, so we can't use
+ * fdc, etc. FDCE/FICE don't work to flush a portion of the cache.
+ * L2 is physically indexed but FDCE/FICE instructions in virtual
+ * mode output their virtual address on the core bus, not their
+ * real address. As a result, the L2 cache index formed from the
+ * virtual address will most likely not be the same as the L2 index
+ * formed from the real address.
+ */
void flush_kernel_vmap_range(void *vaddr, int size)
{
unsigned long start = (unsigned long)vaddr;
unsigned long end = start + size;
- if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
- (unsigned long)size >= parisc_cache_flush_threshold) {
- flush_tlb_kernel_range(start, end);
- flush_data_cache();
+ flush_tlb_kernel_range(start, end);
+
+ if (!static_branch_likely(&parisc_has_dcache))
+ return;
+
+ /* If interrupts are disabled, we can only do local flush */
+ if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled())) {
+ flush_data_cache_local(NULL);
return;
}
- flush_kernel_dcache_range_asm(start, end);
- flush_tlb_kernel_range(start, end);
+ flush_data_cache();
}
EXPORT_SYMBOL(flush_kernel_vmap_range);
@@ -818,15 +920,18 @@ void invalidate_kernel_vmap_range(void *vaddr, int size)
/* Ensure DMA is complete */
asm_syncdma();
- if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
- (unsigned long)size >= parisc_cache_flush_threshold) {
- flush_tlb_kernel_range(start, end);
- flush_data_cache();
+ flush_tlb_kernel_range(start, end);
+
+ if (!static_branch_likely(&parisc_has_dcache))
+ return;
+
+ /* If interrupts are disabled, we can only do local flush */
+ if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled())) {
+ flush_data_cache_local(NULL);
return;
}
- purge_kernel_dcache_range_asm(start, end);
- flush_tlb_kernel_range(start, end);
+ flush_data_cache();
}
EXPORT_SYMBOL(invalidate_kernel_vmap_range);
diff --git a/arch/parisc/kernel/sys_parisc32.c b/arch/parisc/kernel/sys_parisc32.c
index 2a12a547b447..826c8e51b585 100644
--- a/arch/parisc/kernel/sys_parisc32.c
+++ b/arch/parisc/kernel/sys_parisc32.c
@@ -23,12 +23,3 @@ asmlinkage long sys32_unimplemented(int r26, int r25, int r24, int r23,
current->comm, current->pid, r20);
return -ENOSYS;
}
-
-asmlinkage long sys32_fanotify_mark(compat_int_t fanotify_fd, compat_uint_t flags,
- compat_uint_t mask0, compat_uint_t mask1, compat_int_t dfd,
- const char __user * pathname)
-{
- return sys_fanotify_mark(fanotify_fd, flags,
- ((__u64)mask1 << 32) | mask0,
- dfd, pathname);
-}
diff --git a/arch/parisc/kernel/syscalls/syscall.tbl b/arch/parisc/kernel/syscalls/syscall.tbl
index b13c21373974..66dc406b12e4 100644
--- a/arch/parisc/kernel/syscalls/syscall.tbl
+++ b/arch/parisc/kernel/syscalls/syscall.tbl
@@ -108,7 +108,7 @@
95 common fchown sys_fchown
96 common getpriority sys_getpriority
97 common setpriority sys_setpriority
-98 common recv sys_recv
+98 common recv sys_recv compat_sys_recv
99 common statfs sys_statfs compat_sys_statfs
100 common fstatfs sys_fstatfs compat_sys_fstatfs
101 common stat64 sys_stat64
@@ -135,7 +135,7 @@
120 common clone sys_clone_wrapper
121 common setdomainname sys_setdomainname
122 common sendfile sys_sendfile compat_sys_sendfile
-123 common recvfrom sys_recvfrom
+123 common recvfrom sys_recvfrom compat_sys_recvfrom
124 32 adjtimex sys_adjtimex_time32
124 64 adjtimex sys_adjtimex
125 common mprotect sys_mprotect
@@ -364,7 +364,7 @@
320 common accept4 sys_accept4
321 common prlimit64 sys_prlimit64
322 common fanotify_init sys_fanotify_init
-323 common fanotify_mark sys_fanotify_mark sys32_fanotify_mark
+323 common fanotify_mark sys_fanotify_mark compat_sys_fanotify_mark
324 32 clock_adjtime sys_clock_adjtime32
324 64 clock_adjtime sys_clock_adjtime
325 common name_to_handle_at sys_name_to_handle_at
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 3c968f2f4ac4..c88c6d46a5bc 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -137,7 +137,7 @@ config PPC
select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAS_HUGEPD if HUGETLB_PAGE
select ARCH_HAS_KCOV
- select ARCH_HAS_KERNEL_FPU_SUPPORT if PPC_FPU
+ select ARCH_HAS_KERNEL_FPU_SUPPORT if PPC64 && PPC_FPU
select ARCH_HAS_MEMBARRIER_CALLBACKS
select ARCH_HAS_MEMBARRIER_SYNC_CORE
select ARCH_HAS_MEMREMAP_COMPAT_ALIGN if PPC_64S_HASH_MMU
diff --git a/arch/powerpc/crypto/.gitignore b/arch/powerpc/crypto/.gitignore
index e1094f08f713..e9fe73aac8b6 100644
--- a/arch/powerpc/crypto/.gitignore
+++ b/arch/powerpc/crypto/.gitignore
@@ -1,3 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
aesp10-ppc.S
+aesp8-ppc.S
ghashp10-ppc.S
+ghashp8-ppc.S
diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
index de10437fd206..fd594bf6c6a9 100644
--- a/arch/powerpc/include/asm/uaccess.h
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -92,9 +92,25 @@ __pu_failed: \
: label)
#endif
+#ifdef CONFIG_CC_IS_CLANG
+#define DS_FORM_CONSTRAINT "Z<>"
+#else
+#define DS_FORM_CONSTRAINT "YZ<>"
+#endif
+
#ifdef __powerpc64__
+#ifdef CONFIG_PPC_KERNEL_PREFIXED
#define __put_user_asm2_goto(x, ptr, label) \
__put_user_asm_goto(x, ptr, label, "std")
+#else
+#define __put_user_asm2_goto(x, addr, label) \
+ asm goto ("1: std%U1%X1 %0,%1 # put_user\n" \
+ EX_TABLE(1b, %l2) \
+ : \
+ : "r" (x), DS_FORM_CONSTRAINT (*addr) \
+ : \
+ : label)
+#endif // CONFIG_PPC_KERNEL_PREFIXED
#else /* __powerpc64__ */
#define __put_user_asm2_goto(x, addr, label) \
asm goto( \
@@ -165,8 +181,19 @@ do { \
#endif
#ifdef __powerpc64__
+#ifdef CONFIG_PPC_KERNEL_PREFIXED
#define __get_user_asm2_goto(x, addr, label) \
__get_user_asm_goto(x, addr, label, "ld")
+#else
+#define __get_user_asm2_goto(x, addr, label) \
+ asm_goto_output( \
+ "1: ld%U1%X1 %0, %1 # get_user\n" \
+ EX_TABLE(1b, %l2) \
+ : "=r" (x) \
+ : DS_FORM_CONSTRAINT (*addr) \
+ : \
+ : label)
+#endif // CONFIG_PPC_KERNEL_PREFIXED
#else /* __powerpc64__ */
#define __get_user_asm2_goto(x, addr, label) \
asm_goto_output( \
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h
index 659a996c75aa..027ef94a12fb 100644
--- a/arch/powerpc/include/asm/unistd.h
+++ b/arch/powerpc/include/asm/unistd.h
@@ -51,7 +51,6 @@
#define __ARCH_WANT_SYS_FORK
#define __ARCH_WANT_SYS_VFORK
#define __ARCH_WANT_SYS_CLONE
-#define __ARCH_WANT_SYS_CLONE3
#endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_UNISTD_H_ */
diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c
index d1030bc52564..d283d281d28e 100644
--- a/arch/powerpc/kernel/eeh_pe.c
+++ b/arch/powerpc/kernel/eeh_pe.c
@@ -849,6 +849,7 @@ struct pci_bus *eeh_pe_bus_get(struct eeh_pe *pe)
{
struct eeh_dev *edev;
struct pci_dev *pdev;
+ struct pci_bus *bus = NULL;
if (pe->type & EEH_PE_PHB)
return pe->phb->bus;
@@ -859,9 +860,11 @@ struct pci_bus *eeh_pe_bus_get(struct eeh_pe *pe)
/* Retrieve the parent PCI bus of first (top) PCI device */
edev = list_first_entry_or_null(&pe->edevs, struct eeh_dev, entry);
+ pci_lock_rescan_remove();
pdev = eeh_dev_to_pci_dev(edev);
if (pdev)
- return pdev->bus;
+ bus = pdev->bus;
+ pci_unlock_rescan_remove();
- return NULL;
+ return bus;
}
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 4690c219bfa4..63432a33ec49 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -647,8 +647,9 @@ __after_prom_start:
* Note: This process overwrites the OF exception vectors.
*/
LOAD_REG_IMMEDIATE(r3, PAGE_OFFSET)
- mr. r4,r26 /* In some cases the loader may */
- beq 9f /* have already put us at zero */
+ mr r4,r26 /* Load the virtual source address into r4 */
+ cmpld r3,r4 /* Check if source == dest */
+ beq 9f /* If so skip the copy */
li r6,0x100 /* Start offset, the first 0x100 */
/* bytes were copied earlier. */
diff --git a/arch/powerpc/kernel/syscalls/syscall.tbl b/arch/powerpc/kernel/syscalls/syscall.tbl
index 3656f1ca7a21..ebae8415dfbb 100644
--- a/arch/powerpc/kernel/syscalls/syscall.tbl
+++ b/arch/powerpc/kernel/syscalls/syscall.tbl
@@ -230,8 +230,10 @@
178 nospu rt_sigsuspend sys_rt_sigsuspend compat_sys_rt_sigsuspend
179 32 pread64 sys_ppc_pread64 compat_sys_ppc_pread64
179 64 pread64 sys_pread64
+179 spu pread64 sys_pread64
180 32 pwrite64 sys_ppc_pwrite64 compat_sys_ppc_pwrite64
180 64 pwrite64 sys_pwrite64
+180 spu pwrite64 sys_pwrite64
181 common chown sys_chown
182 common getcwd sys_getcwd
183 common capget sys_capget
@@ -246,6 +248,7 @@
190 common ugetrlimit sys_getrlimit compat_sys_getrlimit
191 32 readahead sys_ppc_readahead compat_sys_ppc_readahead
191 64 readahead sys_readahead
+191 spu readahead sys_readahead
192 32 mmap2 sys_mmap2 compat_sys_mmap2
193 32 truncate64 sys_ppc_truncate64 compat_sys_ppc_truncate64
194 32 ftruncate64 sys_ppc_ftruncate64 compat_sys_ppc_ftruncate64
@@ -293,6 +296,7 @@
232 nospu set_tid_address sys_set_tid_address
233 32 fadvise64 sys_ppc32_fadvise64 compat_sys_ppc32_fadvise64
233 64 fadvise64 sys_fadvise64
+233 spu fadvise64 sys_fadvise64
234 nospu exit_group sys_exit_group
235 nospu lookup_dcookie sys_ni_syscall
236 common epoll_create sys_epoll_create
@@ -502,7 +506,7 @@
412 32 utimensat_time64 sys_utimensat sys_utimensat
413 32 pselect6_time64 sys_pselect6 compat_sys_pselect6_time64
414 32 ppoll_time64 sys_ppoll compat_sys_ppoll_time64
-416 32 io_pgetevents_time64 sys_io_pgetevents sys_io_pgetevents
+416 32 io_pgetevents_time64 sys_io_pgetevents compat_sys_io_pgetevents_time64
417 32 recvmmsg_time64 sys_recvmmsg compat_sys_recvmmsg_time64
418 32 mq_timedsend_time64 sys_mq_timedsend sys_mq_timedsend
419 32 mq_timedreceive_time64 sys_mq_timedreceive sys_mq_timedreceive
diff --git a/arch/powerpc/kexec/core_64.c b/arch/powerpc/kexec/core_64.c
index 85050be08a23..72b12bc10f90 100644
--- a/arch/powerpc/kexec/core_64.c
+++ b/arch/powerpc/kexec/core_64.c
@@ -27,6 +27,7 @@
#include <asm/paca.h>
#include <asm/mmu.h>
#include <asm/sections.h> /* _end */
+#include <asm/setup.h>
#include <asm/smp.h>
#include <asm/hw_breakpoint.h>
#include <asm/svm.h>
@@ -317,6 +318,16 @@ void default_machine_kexec(struct kimage *image)
if (!kdump_in_progress())
kexec_prepare_cpus();
+#ifdef CONFIG_PPC_PSERIES
+ /*
+ * This must be done after other CPUs have shut down, otherwise they
+ * could execute the 'scv' instruction, which is not supported with
+ * reloc disabled (see configure_exceptions()).
+ */
+ if (firmware_has_feature(FW_FEATURE_SET_MODE))
+ pseries_disable_reloc_on_exc();
+#endif
+
printk("kexec: Starting switchover sequence.\n");
/* switch to a staticly allocated stack. Based on irq stack code.
diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c
index b569ebaa590e..3ff3de9a52ac 100644
--- a/arch/powerpc/kvm/book3s_64_vio.c
+++ b/arch/powerpc/kvm/book3s_64_vio.c
@@ -130,14 +130,16 @@ long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
}
rcu_read_unlock();
- fdput(f);
-
- if (!found)
+ if (!found) {
+ fdput(f);
return -EINVAL;
+ }
table_group = iommu_group_get_iommudata(grp);
- if (WARN_ON(!table_group))
+ if (WARN_ON(!table_group)) {
+ fdput(f);
return -EFAULT;
+ }
for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
struct iommu_table *tbltmp = table_group->tables[i];
@@ -158,8 +160,10 @@ long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
break;
}
}
- if (!tbl)
+ if (!tbl) {
+ fdput(f);
return -EINVAL;
+ }
rcu_read_lock();
list_for_each_entry_rcu(stit, &stt->iommu_tables, next) {
@@ -170,6 +174,7 @@ long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
/* stit is being destroyed */
iommu_tce_table_put(tbl);
rcu_read_unlock();
+ fdput(f);
return -ENOTTY;
}
/*
@@ -177,6 +182,7 @@ long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
* its KVM reference counter and can return.
*/
rcu_read_unlock();
+ fdput(f);
return 0;
}
rcu_read_unlock();
@@ -184,6 +190,7 @@ long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
stit = kzalloc(sizeof(*stit), GFP_KERNEL);
if (!stit) {
iommu_tce_table_put(tbl);
+ fdput(f);
return -ENOMEM;
}
@@ -192,6 +199,7 @@ long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
list_add_rcu(&stit->next, &stt->iommu_tables);
+ fdput(f);
return 0;
}
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
index 984655419da5..2a36cc2e7e9e 100644
--- a/arch/powerpc/net/bpf_jit_comp.c
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -225,7 +225,7 @@ skip_init_ctx:
fp->jited_len = proglen + FUNCTION_DESCR_SIZE;
if (!fp->is_func || extra_pass) {
- if (bpf_jit_binary_pack_finalize(fp, fhdr, hdr)) {
+ if (bpf_jit_binary_pack_finalize(fhdr, hdr)) {
fp = org_fp;
goto out_addrs;
}
@@ -348,7 +348,7 @@ void bpf_jit_free(struct bpf_prog *fp)
* before freeing it.
*/
if (jit_data) {
- bpf_jit_binary_pack_finalize(fp, jit_data->fhdr, jit_data->hdr);
+ bpf_jit_binary_pack_finalize(jit_data->fhdr, jit_data->hdr);
kvfree(jit_data->addrs);
kfree(jit_data);
}
diff --git a/arch/powerpc/net/bpf_jit_comp32.c b/arch/powerpc/net/bpf_jit_comp32.c
index 43b97032a91c..a0c4f1bde83e 100644
--- a/arch/powerpc/net/bpf_jit_comp32.c
+++ b/arch/powerpc/net/bpf_jit_comp32.c
@@ -900,6 +900,15 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code
/* Get offset into TMP_REG */
EMIT(PPC_RAW_LI(tmp_reg, off));
+ /*
+ * Enforce full ordering for operations with BPF_FETCH by emitting a 'sync'
+ * before and after the operation.
+ *
+ * This is a requirement in the Linux Kernel Memory Model.
+ * See __cmpxchg_u32() in asm/cmpxchg.h as an example.
+ */
+ if ((imm & BPF_FETCH) && IS_ENABLED(CONFIG_SMP))
+ EMIT(PPC_RAW_SYNC());
tmp_idx = ctx->idx * 4;
/* load value from memory into r0 */
EMIT(PPC_RAW_LWARX(_R0, tmp_reg, dst_reg, 0));
@@ -953,6 +962,9 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code
/* For the BPF_FETCH variant, get old data into src_reg */
if (imm & BPF_FETCH) {
+ /* Emit 'sync' to enforce full ordering */
+ if (IS_ENABLED(CONFIG_SMP))
+ EMIT(PPC_RAW_SYNC());
EMIT(PPC_RAW_MR(ret_reg, ax_reg));
if (!fp->aux->verifier_zext)
EMIT(PPC_RAW_LI(ret_reg - 1, 0)); /* higher 32-bit */
diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
index 8afc14a4a125..7703dcf48be8 100644
--- a/arch/powerpc/net/bpf_jit_comp64.c
+++ b/arch/powerpc/net/bpf_jit_comp64.c
@@ -846,6 +846,15 @@ emit_clear:
/* Get offset into TMP_REG_1 */
EMIT(PPC_RAW_LI(tmp1_reg, off));
+ /*
+ * Enforce full ordering for operations with BPF_FETCH by emitting a 'sync'
+ * before and after the operation.
+ *
+ * This is a requirement in the Linux Kernel Memory Model.
+ * See __cmpxchg_u64() in asm/cmpxchg.h as an example.
+ */
+ if ((imm & BPF_FETCH) && IS_ENABLED(CONFIG_SMP))
+ EMIT(PPC_RAW_SYNC());
tmp_idx = ctx->idx * 4;
/* load value from memory into TMP_REG_2 */
if (size == BPF_DW)
@@ -908,6 +917,9 @@ emit_clear:
PPC_BCC_SHORT(COND_NE, tmp_idx);
if (imm & BPF_FETCH) {
+ /* Emit 'sync' to enforce full ordering */
+ if (IS_ENABLED(CONFIG_SMP))
+ EMIT(PPC_RAW_SYNC());
EMIT(PPC_RAW_MR(ret_reg, _R0));
/*
* Skip unnecessary zero-extension for 32-bit cmpxchg.
diff --git a/arch/powerpc/platforms/pseries/kexec.c b/arch/powerpc/platforms/pseries/kexec.c
index 096d09ed89f6..431be156ca9b 100644
--- a/arch/powerpc/platforms/pseries/kexec.c
+++ b/arch/powerpc/platforms/pseries/kexec.c
@@ -61,11 +61,3 @@ void pseries_kexec_cpu_down(int crash_shutdown, int secondary)
} else
xics_kexec_teardown_cpu(secondary);
}
-
-void pseries_machine_kexec(struct kimage *image)
-{
- if (firmware_has_feature(FW_FEATURE_SET_MODE))
- pseries_disable_reloc_on_exc();
-
- default_machine_kexec(image);
-}
diff --git a/arch/powerpc/platforms/pseries/lparcfg.c b/arch/powerpc/platforms/pseries/lparcfg.c
index 6e7029640c0c..62da20f9700a 100644
--- a/arch/powerpc/platforms/pseries/lparcfg.c
+++ b/arch/powerpc/platforms/pseries/lparcfg.c
@@ -371,8 +371,8 @@ static int read_dt_lpar_name(struct seq_file *m)
static void read_lpar_name(struct seq_file *m)
{
- if (read_rtas_lpar_name(m) && read_dt_lpar_name(m))
- pr_err_once("Error can't get the LPAR name");
+ if (read_rtas_lpar_name(m))
+ read_dt_lpar_name(m);
}
#define SPLPAR_MAXLENGTH 1026*(sizeof(char))
diff --git a/arch/powerpc/platforms/pseries/pseries.h b/arch/powerpc/platforms/pseries/pseries.h
index bba4ad192b0f..3968a6970fa8 100644
--- a/arch/powerpc/platforms/pseries/pseries.h
+++ b/arch/powerpc/platforms/pseries/pseries.h
@@ -38,7 +38,6 @@ static inline void smp_init_pseries(void) { }
#endif
extern void pseries_kexec_cpu_down(int crash_shutdown, int secondary);
-void pseries_machine_kexec(struct kimage *image);
extern void pSeries_final_fixup(void);
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index 284a6fa04b0c..b10a25325238 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -343,8 +343,8 @@ static int alloc_dispatch_log_kmem_cache(void)
{
void (*ctor)(void *) = get_dtl_cache_ctor();
- dtl_cache = kmem_cache_create("dtl", DISPATCH_LOG_BYTES,
- DISPATCH_LOG_BYTES, 0, ctor);
+ dtl_cache = kmem_cache_create_usercopy("dtl", DISPATCH_LOG_BYTES,
+ DISPATCH_LOG_BYTES, 0, 0, DISPATCH_LOG_BYTES, ctor);
if (!dtl_cache) {
pr_warn("Failed to create dispatch trace log buffer cache\n");
pr_warn("Stolen time statistics will be unreliable\n");
@@ -1159,7 +1159,6 @@ define_machine(pseries) {
.machine_check_exception = pSeries_machine_check_exception,
.machine_check_log_err = pSeries_machine_check_log_err,
#ifdef CONFIG_KEXEC_CORE
- .machine_kexec = pseries_machine_kexec,
.kexec_cpu_down = pseries_kexec_cpu_down,
#endif
#ifdef CONFIG_MEMORY_HOTPLUG
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index b94176e25be1..9f38a5ecbee3 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -106,7 +106,7 @@ config RISCV
select HAS_IOPORT if MMU
select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_HUGE_VMALLOC if HAVE_ARCH_HUGE_VMAP
- select HAVE_ARCH_HUGE_VMAP if MMU && 64BIT && !XIP_KERNEL
+ select HAVE_ARCH_HUGE_VMAP if MMU && 64BIT
select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL
select HAVE_ARCH_JUMP_LABEL_RELATIVE if !XIP_KERNEL
select HAVE_ARCH_KASAN if MMU && 64BIT
@@ -610,6 +610,18 @@ config TOOLCHAIN_HAS_VECTOR_CRYPTO
def_bool $(as-instr, .option arch$(comma) +v$(comma) +zvkb)
depends on AS_HAS_OPTION_ARCH
+config RISCV_ISA_ZBA
+ bool "Zba extension support for bit manipulation instructions"
+ default y
+ help
+ Add support for enabling optimisations in the kernel when the Zba
+ extension is detected at boot.
+
+ The Zba extension provides instructions to accelerate the generation
+ of addresses that index into arrays of basic data types.
+
+ If you don't know what to do here, say Y.
+
config RISCV_ISA_ZBB
bool "Zbb extension support for bit manipulation instructions"
depends on TOOLCHAIN_HAS_ZBB
diff --git a/arch/riscv/boot/dts/allwinner/Makefile b/arch/riscv/boot/dts/allwinner/Makefile
index 87f70b1af6b4..1c91be38ea16 100644
--- a/arch/riscv/boot/dts/allwinner/Makefile
+++ b/arch/riscv/boot/dts/allwinner/Makefile
@@ -1,4 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
+dtb-$(CONFIG_ARCH_SUNXI) += sun20i-d1-clockworkpi-v3.14.dtb
+dtb-$(CONFIG_ARCH_SUNXI) += sun20i-d1-devterm-v3.14.dtb
dtb-$(CONFIG_ARCH_SUNXI) += sun20i-d1-dongshan-nezha-stu.dtb
dtb-$(CONFIG_ARCH_SUNXI) += sun20i-d1-lichee-rv-86-panel-480p.dtb
dtb-$(CONFIG_ARCH_SUNXI) += sun20i-d1-lichee-rv-86-panel-720p.dtb
diff --git a/arch/riscv/boot/dts/allwinner/sun20i-d1-clockworkpi-v3.14.dts b/arch/riscv/boot/dts/allwinner/sun20i-d1-clockworkpi-v3.14.dts
new file mode 100644
index 000000000000..750aec6cf2f2
--- /dev/null
+++ b/arch/riscv/boot/dts/allwinner/sun20i-d1-clockworkpi-v3.14.dts
@@ -0,0 +1,252 @@
+// SPDX-License-Identifier: (GPL-2.0+ or MIT)
+// Copyright (C) 2022 Samuel Holland <samuel@sholland.org>
+
+#include <dt-bindings/gpio/gpio.h>
+
+/dts-v1/;
+
+#include "sun20i-d1.dtsi"
+#include "sun20i-common-regulators.dtsi"
+
+/ {
+ model = "ClockworkPi v3.14 (R-01)";
+ compatible = "clockwork,r-01-clockworkpi-v3.14", "allwinner,sun20i-d1";
+
+ aliases {
+ ethernet0 = &ap6256;
+ serial0 = &uart0;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ /*
+ * This regulator is PWM-controlled, but the PWM controller is not
+ * yet supported, so fix the regulator to its default voltage.
+ */
+ reg_vdd_cpu: vdd-cpu {
+ compatible = "regulator-fixed";
+ regulator-name = "vdd-cpu";
+ regulator-min-microvolt = <1100000>;
+ regulator-max-microvolt = <1100000>;
+ vin-supply = <&reg_vcc>;
+ };
+
+ wifi_pwrseq: wifi-pwrseq {
+ compatible = "mmc-pwrseq-simple";
+ reset-gpios = <&pio 6 11 GPIO_ACTIVE_LOW>; /* PG11/GPIO3 */
+ };
+};
+
+&cpu0 {
+ cpu-supply = <&reg_vdd_cpu>;
+};
+
+&dcxo {
+ clock-frequency = <24000000>;
+};
+
+&ehci1 {
+ status = "okay";
+};
+
+&i2c0 {
+ pinctrl-0 = <&i2c0_pb10_pins>;
+ pinctrl-names = "default";
+ status = "okay";
+
+ axp221: pmic@34 {
+ compatible = "x-powers,axp228", "x-powers,axp221";
+ reg = <0x34>;
+ interrupt-parent = <&pio>;
+ interrupts = <4 9 IRQ_TYPE_LEVEL_LOW>; /* PE9/GPIO2 */
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ ac_power_supply: ac-power {
+ compatible = "x-powers,axp221-ac-power-supply";
+ };
+
+ axp_adc: adc {
+ compatible = "x-powers,axp221-adc";
+ #io-channel-cells = <1>;
+ };
+
+ battery_power_supply: battery-power {
+ compatible = "x-powers,axp221-battery-power-supply";
+ };
+
+ axp_gpio: gpio {
+ compatible = "x-powers,axp221-gpio";
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ regulators {
+ x-powers,dcdc-freq = <3000>;
+
+ reg_dcdc1: dcdc1 {
+ regulator-name = "sys-3v3";
+ regulator-always-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ reg_dcdc3: dcdc3 {
+ regulator-name = "sys-1v8";
+ regulator-always-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ reg_aldo1: aldo1 {
+ regulator-name = "aud-3v3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ reg_aldo2: aldo2 {
+ regulator-name = "disp-3v3";
+ regulator-always-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ reg_aldo3: aldo3 {
+ regulator-name = "vdd-wifi";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ /* DLDO1 and ELDO1-3 are connected in parallel. */
+ reg_dldo1: dldo1 {
+ regulator-name = "vbat-wifi-a";
+ regulator-always-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ /* DLDO2-DLDO4 are connected in parallel. */
+ reg_dldo2: dldo2 {
+ regulator-name = "vcc-3v3-ext-a";
+ regulator-always-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ reg_dldo3: dldo3 {
+ regulator-name = "vcc-3v3-ext-b";
+ regulator-always-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ reg_dldo4: dldo4 {
+ regulator-name = "vcc-3v3-ext-c";
+ regulator-always-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ reg_eldo1: eldo1 {
+ regulator-name = "vbat-wifi-b";
+ regulator-always-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ reg_eldo2: eldo2 {
+ regulator-name = "vbat-wifi-c";
+ regulator-always-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ reg_eldo3: eldo3 {
+ regulator-name = "vbat-wifi-d";
+ regulator-always-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+ };
+
+ usb_power_supply: usb-power {
+ compatible = "x-powers,axp221-usb-power-supply";
+ status = "disabled";
+ };
+ };
+};
+
+&mmc0 {
+ broken-cd;
+ bus-width = <4>;
+ disable-wp;
+ vmmc-supply = <&reg_dcdc1>;
+ vqmmc-supply = <&reg_vcc_3v3>;
+ pinctrl-0 = <&mmc0_pins>;
+ pinctrl-names = "default";
+ status = "okay";
+};
+
+&mmc1 {
+ bus-width = <4>;
+ mmc-pwrseq = <&wifi_pwrseq>;
+ non-removable;
+ vmmc-supply = <&reg_dldo1>;
+ vqmmc-supply = <&reg_aldo3>;
+ pinctrl-0 = <&mmc1_pins>;
+ pinctrl-names = "default";
+ status = "okay";
+
+ ap6256: wifi@1 {
+ compatible = "brcm,bcm43456-fmac", "brcm,bcm4329-fmac";
+ reg = <1>;
+ interrupt-parent = <&pio>;
+ interrupts = <6 10 IRQ_TYPE_LEVEL_LOW>; /* PG10/GPIO4 */
+ interrupt-names = "host-wake";
+ };
+};
+
+&ohci1 {
+ status = "okay";
+};
+
+&pio {
+ vcc-pg-supply = <&reg_ldoa>;
+};
+
+&uart0 {
+ pinctrl-0 = <&uart0_pb8_pins>;
+ pinctrl-names = "default";
+ status = "okay";
+};
+
+&uart1 {
+ uart-has-rtscts;
+ pinctrl-0 = <&uart1_pg6_pins>, <&uart1_pg8_rts_cts_pins>;
+ pinctrl-names = "default";
+ status = "okay";
+
+ bluetooth {
+ compatible = "brcm,bcm4345c5";
+ interrupt-parent = <&pio>;
+ interrupts = <6 17 IRQ_TYPE_LEVEL_HIGH>; /* PG17/GPIO6 */
+ device-wakeup-gpios = <&pio 6 16 GPIO_ACTIVE_HIGH>; /* PG16/GPIO7 */
+ shutdown-gpios = <&pio 6 18 GPIO_ACTIVE_HIGH>; /* PG18/GPIO5 */
+ max-speed = <1500000>;
+ vbat-supply = <&reg_dldo1>;
+ vddio-supply = <&reg_aldo3>;
+ };
+};
+
+&usb_otg {
+ dr_mode = "peripheral";
+ status = "okay";
+};
+
+&usbphy {
+ usb0_vbus_power-supply = <&ac_power_supply>;
+ usb1_vbus-supply = <&reg_vcc>;
+ status = "okay";
+};
diff --git a/arch/riscv/boot/dts/allwinner/sun20i-d1-devterm-v3.14.dts b/arch/riscv/boot/dts/allwinner/sun20i-d1-devterm-v3.14.dts
new file mode 100644
index 000000000000..bc5c84f22762
--- /dev/null
+++ b/arch/riscv/boot/dts/allwinner/sun20i-d1-devterm-v3.14.dts
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: (GPL-2.0+ or MIT)
+// Copyright (C) 2022 Samuel Holland <samuel@sholland.org>
+
+#include "sun20i-d1-clockworkpi-v3.14.dts"
+
+/ {
+ model = "Clockwork DevTerm (R-01)";
+ compatible = "clockwork,r-01-devterm-v3.14",
+ "clockwork,r-01-clockworkpi-v3.14",
+ "allwinner,sun20i-d1";
+
+ fan {
+ compatible = "gpio-fan";
+ gpios = <&pio 3 10 GPIO_ACTIVE_HIGH>; /* PD10/GPIO41 */
+ gpio-fan,speed-map = <0 0>,
+ <6000 1>;
+ #cooling-cells = <2>;
+ };
+
+ i2c-gpio-0 {
+ compatible = "i2c-gpio";
+ sda-gpios = <&pio 3 14 (GPIO_ACTIVE_HIGH|GPIO_OPEN_DRAIN)>; /* PD14/GPIO44 */
+ scl-gpios = <&pio 3 15 (GPIO_ACTIVE_HIGH|GPIO_OPEN_DRAIN)>; /* PD15/GPIO45 */
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ adc@54 {
+ compatible = "ti,adc101c";
+ reg = <0x54>;
+ interrupt-parent = <&pio>;
+ interrupts = <4 12 IRQ_TYPE_LEVEL_LOW>; /* PE12/GPIO35 */
+ vref-supply = <&reg_dldo2>;
+ #io-channel-cells = <1>;
+ };
+ };
+};
diff --git a/arch/riscv/boot/dts/allwinner/sunxi-d1s-t113.dtsi b/arch/riscv/boot/dts/allwinner/sunxi-d1s-t113.dtsi
index 5a9d7f5a75b4..e4175adb028d 100644
--- a/arch/riscv/boot/dts/allwinner/sunxi-d1s-t113.dtsi
+++ b/arch/riscv/boot/dts/allwinner/sunxi-d1s-t113.dtsi
@@ -396,6 +396,17 @@
ranges;
#address-cells = <1>;
#size-cells = <1>;
+
+ regulators@3000150 {
+ compatible = "allwinner,sun20i-d1-system-ldos";
+ reg = <0x3000150 0x4>;
+
+ reg_ldoa: ldoa {
+ };
+
+ reg_ldob: ldob {
+ };
+ };
};
dma: dma-controller@3002000 {
diff --git a/arch/riscv/boot/dts/canaan/canaan_kd233.dts b/arch/riscv/boot/dts/canaan/canaan_kd233.dts
index 8df4cf3656f2..a7d753b6fdfd 100644
--- a/arch/riscv/boot/dts/canaan/canaan_kd233.dts
+++ b/arch/riscv/boot/dts/canaan/canaan_kd233.dts
@@ -15,6 +15,10 @@
model = "Kendryte KD233";
compatible = "canaan,kendryte-kd233", "canaan,kendryte-k210";
+ aliases {
+ serial0 = &uarths0;
+ };
+
chosen {
bootargs = "earlycon console=ttySIF0";
stdout-path = "serial0:115200n8";
@@ -46,7 +50,6 @@
&fpioa {
pinctrl-0 = <&jtag_pinctrl>;
pinctrl-names = "default";
- status = "okay";
jtag_pinctrl: jtag-pinmux {
pinmux = <K210_FPIOA(0, K210_PCF_JTAG_TCLK)>,
@@ -118,6 +121,7 @@
#sound-dai-cells = <1>;
pinctrl-0 = <&i2s0_pinctrl>;
pinctrl-names = "default";
+ status = "okay";
};
&spi0 {
@@ -125,6 +129,7 @@
pinctrl-names = "default";
num-cs = <1>;
cs-gpios = <&gpio0 20 GPIO_ACTIVE_HIGH>;
+ status = "okay";
panel@0 {
compatible = "canaan,kd233-tft", "ilitek,ili9341";
diff --git a/arch/riscv/boot/dts/canaan/k210.dtsi b/arch/riscv/boot/dts/canaan/k210.dtsi
index f87c5164d9cf..4f5d40fa1e77 100644
--- a/arch/riscv/boot/dts/canaan/k210.dtsi
+++ b/arch/riscv/boot/dts/canaan/k210.dtsi
@@ -16,13 +16,6 @@
#size-cells = <1>;
compatible = "canaan,kendryte-k210";
- aliases {
- serial0 = &uarths0;
- serial1 = &uart1;
- serial2 = &uart2;
- serial3 = &uart3;
- };
-
/*
* The K210 has an sv39 MMU following the privileged specification v1.9.
* Since this is a non-ratified draft specification, the kernel does not
@@ -137,6 +130,7 @@
reg = <0x38000000 0x1000>;
interrupts = <33>;
clocks = <&sysclk K210_CLK_CPU>;
+ status = "disabled";
};
gpio0: gpio-controller@38001000 {
@@ -152,6 +146,7 @@
<62>, <63>, <64>, <65>;
gpio-controller;
ngpios = <32>;
+ status = "disabled";
};
dmac0: dma-controller@50000000 {
@@ -187,6 +182,7 @@
<&sysclk K210_CLK_GPIO>;
clock-names = "bus", "db";
resets = <&sysrst K210_RST_GPIO>;
+ status = "disabled";
gpio1_0: gpio-port@0 {
#gpio-cells = <2>;
@@ -214,6 +210,7 @@
dsr-override;
cts-override;
ri-override;
+ status = "disabled";
};
uart2: serial@50220000 {
@@ -230,6 +227,7 @@
dsr-override;
cts-override;
ri-override;
+ status = "disabled";
};
uart3: serial@50230000 {
@@ -246,6 +244,7 @@
dsr-override;
cts-override;
ri-override;
+ status = "disabled";
};
spi2: spi@50240000 {
@@ -259,6 +258,7 @@
<&sysclk K210_CLK_APB0>;
clock-names = "ssi_clk", "pclk";
resets = <&sysrst K210_RST_SPI2>;
+ status = "disabled";
};
i2s0: i2s@50250000 {
@@ -268,6 +268,7 @@
clocks = <&sysclk K210_CLK_I2S0>;
clock-names = "i2sclk";
resets = <&sysrst K210_RST_I2S0>;
+ status = "disabled";
};
i2s1: i2s@50260000 {
@@ -277,6 +278,7 @@
clocks = <&sysclk K210_CLK_I2S1>;
clock-names = "i2sclk";
resets = <&sysrst K210_RST_I2S1>;
+ status = "disabled";
};
i2s2: i2s@50270000 {
@@ -286,6 +288,7 @@
clocks = <&sysclk K210_CLK_I2S2>;
clock-names = "i2sclk";
resets = <&sysrst K210_RST_I2S2>;
+ status = "disabled";
};
i2c0: i2c@50280000 {
@@ -296,6 +299,7 @@
<&sysclk K210_CLK_APB0>;
clock-names = "ref", "pclk";
resets = <&sysrst K210_RST_I2C0>;
+ status = "disabled";
};
i2c1: i2c@50290000 {
@@ -306,6 +310,7 @@
<&sysclk K210_CLK_APB0>;
clock-names = "ref", "pclk";
resets = <&sysrst K210_RST_I2C1>;
+ status = "disabled";
};
i2c2: i2c@502a0000 {
@@ -316,6 +321,7 @@
<&sysclk K210_CLK_APB0>;
clock-names = "ref", "pclk";
resets = <&sysrst K210_RST_I2C2>;
+ status = "disabled";
};
fpioa: pinmux@502b0000 {
@@ -464,6 +470,7 @@
reset-names = "spi";
num-cs = <4>;
reg-io-width = <4>;
+ status = "disabled";
};
spi1: spi@53000000 {
@@ -479,6 +486,7 @@
reset-names = "spi";
num-cs = <4>;
reg-io-width = <4>;
+ status = "disabled";
};
spi3: spi@54000000 {
@@ -495,6 +503,7 @@
num-cs = <4>;
reg-io-width = <4>;
+ status = "disabled";
};
};
};
diff --git a/arch/riscv/boot/dts/canaan/k210_generic.dts b/arch/riscv/boot/dts/canaan/k210_generic.dts
index 396c8ca4d24d..5734cc03753b 100644
--- a/arch/riscv/boot/dts/canaan/k210_generic.dts
+++ b/arch/riscv/boot/dts/canaan/k210_generic.dts
@@ -15,6 +15,10 @@
model = "Kendryte K210 generic";
compatible = "canaan,kendryte-k210";
+ aliases {
+ serial0 = &uarths0;
+ };
+
chosen {
bootargs = "earlycon console=ttySIF0";
stdout-path = "serial0:115200n8";
@@ -24,7 +28,6 @@
&fpioa {
pinctrl-0 = <&jtag_pins>;
pinctrl-names = "default";
- status = "okay";
jtag_pins: jtag-pinmux {
pinmux = <K210_FPIOA(0, K210_PCF_JTAG_TCLK)>,
diff --git a/arch/riscv/boot/dts/canaan/sipeed_maix_bit.dts b/arch/riscv/boot/dts/canaan/sipeed_maix_bit.dts
index 6d25bf07481a..2ab376d609d2 100644
--- a/arch/riscv/boot/dts/canaan/sipeed_maix_bit.dts
+++ b/arch/riscv/boot/dts/canaan/sipeed_maix_bit.dts
@@ -17,6 +17,10 @@
compatible = "sipeed,maix-bit", "sipeed,maix-bitm",
"canaan,kendryte-k210";
+ aliases {
+ serial0 = &uarths0;
+ };
+
chosen {
bootargs = "earlycon console=ttySIF0";
stdout-path = "serial0:115200n8";
@@ -58,7 +62,6 @@
&fpioa {
pinctrl-names = "default";
pinctrl-0 = <&jtag_pinctrl>;
- status = "okay";
jtag_pinctrl: jtag-pinmux {
pinmux = <K210_FPIOA(0, K210_PCF_JTAG_TCLK)>,
@@ -156,6 +159,7 @@
#sound-dai-cells = <1>;
pinctrl-0 = <&i2s0_pinctrl>;
pinctrl-names = "default";
+ status = "okay";
};
&i2c1 {
@@ -170,6 +174,7 @@
pinctrl-names = "default";
num-cs = <1>;
cs-gpios = <&gpio0 20 GPIO_ACTIVE_HIGH>;
+ status = "okay";
panel@0 {
compatible = "sitronix,st7789v";
@@ -199,6 +204,8 @@
};
&spi3 {
+ status = "okay";
+
flash@0 {
compatible = "jedec,spi-nor";
reg = <0>;
diff --git a/arch/riscv/boot/dts/canaan/sipeed_maix_dock.dts b/arch/riscv/boot/dts/canaan/sipeed_maix_dock.dts
index f4f4d8d5e8b8..d98e20775c07 100644
--- a/arch/riscv/boot/dts/canaan/sipeed_maix_dock.dts
+++ b/arch/riscv/boot/dts/canaan/sipeed_maix_dock.dts
@@ -17,6 +17,10 @@
compatible = "sipeed,maix-dock-m1", "sipeed,maix-dock-m1w",
"canaan,kendryte-k210";
+ aliases {
+ serial0 = &uarths0;
+ };
+
chosen {
bootargs = "earlycon console=ttySIF0";
stdout-path = "serial0:115200n8";
@@ -63,7 +67,6 @@
&fpioa {
pinctrl-0 = <&jtag_pinctrl>;
pinctrl-names = "default";
- status = "okay";
jtag_pinctrl: jtag-pinmux {
pinmux = <K210_FPIOA(0, K210_PCF_JTAG_TCLK)>,
@@ -159,6 +162,7 @@
#sound-dai-cells = <1>;
pinctrl-0 = <&i2s0_pinctrl>;
pinctrl-names = "default";
+ status = "okay";
};
&i2c1 {
@@ -173,6 +177,7 @@
pinctrl-names = "default";
num-cs = <1>;
cs-gpios = <&gpio0 20 GPIO_ACTIVE_HIGH>;
+ status = "okay";
panel@0 {
compatible = "sitronix,st7789v";
diff --git a/arch/riscv/boot/dts/canaan/sipeed_maix_go.dts b/arch/riscv/boot/dts/canaan/sipeed_maix_go.dts
index 0d86df47e1ed..79ecd549700a 100644
--- a/arch/riscv/boot/dts/canaan/sipeed_maix_go.dts
+++ b/arch/riscv/boot/dts/canaan/sipeed_maix_go.dts
@@ -16,6 +16,10 @@
model = "SiPeed MAIX GO";
compatible = "sipeed,maix-go", "canaan,kendryte-k210";
+ aliases {
+ serial0 = &uarths0;
+ };
+
chosen {
bootargs = "earlycon console=ttySIF0";
stdout-path = "serial0:115200n8";
@@ -69,7 +73,6 @@
&fpioa {
pinctrl-0 = <&jtag_pinctrl>;
pinctrl-names = "default";
- status = "okay";
jtag_pinctrl: jtag-pinmux {
pinmux = <K210_FPIOA(0, K210_PCF_JTAG_TCLK)>,
@@ -167,6 +170,7 @@
#sound-dai-cells = <1>;
pinctrl-0 = <&i2s0_pinctrl>;
pinctrl-names = "default";
+ status = "okay";
};
&i2c1 {
@@ -181,6 +185,7 @@
pinctrl-names = "default";
num-cs = <1>;
cs-gpios = <&gpio0 20 GPIO_ACTIVE_HIGH>;
+ status = "okay";
panel@0 {
compatible = "sitronix,st7789v";
@@ -209,6 +214,8 @@
};
&spi3 {
+ status = "okay";
+
flash@0 {
compatible = "jedec,spi-nor";
reg = <0>;
diff --git a/arch/riscv/boot/dts/canaan/sipeed_maixduino.dts b/arch/riscv/boot/dts/canaan/sipeed_maixduino.dts
index 5c05c498e2b8..019c03ae51f6 100644
--- a/arch/riscv/boot/dts/canaan/sipeed_maixduino.dts
+++ b/arch/riscv/boot/dts/canaan/sipeed_maixduino.dts
@@ -15,6 +15,10 @@
model = "SiPeed MAIXDUINO";
compatible = "sipeed,maixduino", "canaan,kendryte-k210";
+ aliases {
+ serial0 = &uarths0;
+ };
+
chosen {
bootargs = "earlycon console=ttySIF0";
stdout-path = "serial0:115200n8";
@@ -39,8 +43,6 @@
};
&fpioa {
- status = "okay";
-
uarths_pinctrl: uarths-pinmux {
pinmux = <K210_FPIOA(4, K210_PCF_UARTHS_RX)>, /* Header "0" */
<K210_FPIOA(5, K210_PCF_UARTHS_TX)>; /* Header "1" */
@@ -132,6 +134,7 @@
#sound-dai-cells = <1>;
pinctrl-0 = <&i2s0_pinctrl>;
pinctrl-names = "default";
+ status = "okay";
};
&i2c1 {
@@ -146,6 +149,7 @@
pinctrl-names = "default";
num-cs = <1>;
cs-gpios = <&gpio0 20 GPIO_ACTIVE_HIGH>;
+ status = "okay";
panel@0 {
compatible = "sitronix,st7789v";
@@ -174,6 +178,8 @@
};
&spi3 {
+ status = "okay";
+
flash@0 {
compatible = "jedec,spi-nor";
reg = <0>;
diff --git a/arch/riscv/boot/dts/microchip/Makefile b/arch/riscv/boot/dts/microchip/Makefile
index e177815bf1a2..f51aeeb9fd3b 100644
--- a/arch/riscv/boot/dts/microchip/Makefile
+++ b/arch/riscv/boot/dts/microchip/Makefile
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
+dtb-$(CONFIG_ARCH_MICROCHIP_POLARFIRE) += mpfs-beaglev-fire.dtb
dtb-$(CONFIG_ARCH_MICROCHIP_POLARFIRE) += mpfs-icicle-kit.dtb
dtb-$(CONFIG_ARCH_MICROCHIP_POLARFIRE) += mpfs-m100pfsevp.dtb
dtb-$(CONFIG_ARCH_MICROCHIP_POLARFIRE) += mpfs-polarberry.dtb
diff --git a/arch/riscv/boot/dts/microchip/mpfs-beaglev-fire-fabric.dtsi b/arch/riscv/boot/dts/microchip/mpfs-beaglev-fire-fabric.dtsi
new file mode 100644
index 000000000000..e153eaf9b90e
--- /dev/null
+++ b/arch/riscv/boot/dts/microchip/mpfs-beaglev-fire-fabric.dtsi
@@ -0,0 +1,82 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+
+/ {
+ fabric_clk3: fabric-clk3 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <50000000>;
+ };
+
+ fabric_clk1: fabric-clk1 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <125000000>;
+ };
+
+ fabric-bus@40000000 {
+ compatible = "simple-bus";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges = <0x0 0x40000000 0x0 0x40000000 0x0 0x20000000>, /* FIC3-FAB */
+ <0x0 0x60000000 0x0 0x60000000 0x0 0x20000000>, /* FIC0, LO */
+ <0x0 0xe0000000 0x0 0xe0000000 0x0 0x20000000>, /* FIC1, LO */
+ <0x20 0x0 0x20 0x0 0x10 0x0>, /* FIC0,HI */
+ <0x30 0x0 0x30 0x0 0x10 0x0>; /* FIC1,HI */
+
+ cape_gpios_p8: gpio@41100000 {
+ compatible = "microchip,coregpio-rtl-v3";
+ reg = <0x0 0x41100000 0x0 0x1000>;
+ clocks = <&fabric_clk3>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ ngpios = <16>;
+ gpio-line-names = "P8_PIN31", "P8_PIN32", "P8_PIN33", "P8_PIN34",
+ "P8_PIN35", "P8_PIN36", "P8_PIN37", "P8_PIN38",
+ "P8_PIN39", "P8_PIN40", "P8_PIN41", "P8_PIN42",
+ "P8_PIN43", "P8_PIN44", "P8_PIN45", "P8_PIN46";
+ };
+
+ cape_gpios_p9: gpio@41200000 {
+ compatible = "microchip,coregpio-rtl-v3";
+ reg = <0x0 0x41200000 0x0 0x1000>;
+ clocks = <&fabric_clk3>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ ngpios = <20>;
+ gpio-line-names = "P9_PIN11", "P9_PIN12", "P9_PIN13", "P9_PIN14",
+ "P9_PIN15", "P9_PIN16", "P9_PIN17", "P9_PIN18",
+ "P9_PIN21", "P9_PIN22", "P9_PIN23", "P9_PIN24",
+ "P9_PIN25", "P9_PIN26", "P9_PIN27", "P9_PIN28",
+ "P9_PIN29", "P9_PIN31", "P9_PIN41", "P9_PIN42";
+ };
+
+ hsi_gpios: gpio@44000000 {
+ compatible = "microchip,coregpio-rtl-v3";
+ reg = <0x0 0x44000000 0x0 0x1000>;
+ clocks = <&fabric_clk3>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ ngpios = <20>;
+ gpio-line-names = "B0_HSIO70N", "B0_HSIO71N", "B0_HSIO83N",
+ "B0_HSIO73N_C2P_CLKN", "B0_HSIO70P", "B0_HSIO71P",
+ "B0_HSIO83P", "B0_HSIO73N_C2P_CLKP", "XCVR1_RX_VALID",
+ "XCVR1_LOCK", "XCVR1_ERROR", "XCVR2_RX_VALID",
+ "XCVR2_LOCK", "XCVR2_ERROR", "XCVR3_RX_VALID",
+ "XCVR3_LOCK", "XCVR3_ERROR", "XCVR_0B_REF_CLK_PLL_LOCK",
+ "XCVR_0C_REF_CLK_PLL_LOCK", "B0_HSIO81N";
+ };
+ };
+
+ refclk_ccc: cccrefclk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ };
+};
+
+&ccc_nw {
+ clocks = <&refclk_ccc>, <&refclk_ccc>, <&refclk_ccc>, <&refclk_ccc>,
+ <&refclk_ccc>, <&refclk_ccc>;
+ clock-names = "pll0_ref0", "pll0_ref1", "pll1_ref0", "pll1_ref1",
+ "dll0_ref", "dll1_ref";
+ status = "okay";
+};
diff --git a/arch/riscv/boot/dts/microchip/mpfs-beaglev-fire.dts b/arch/riscv/boot/dts/microchip/mpfs-beaglev-fire.dts
new file mode 100644
index 000000000000..47cf693beb68
--- /dev/null
+++ b/arch/riscv/boot/dts/microchip/mpfs-beaglev-fire.dts
@@ -0,0 +1,223 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/* Copyright (c) 2020-2021 Microchip Technology Inc */
+
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include "mpfs.dtsi"
+#include "mpfs-beaglev-fire-fabric.dtsi"
+
+/* Clock frequency (in Hz) of MTIMER */
+#define MTIMER_FREQ 1000000
+
+/ {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ model = "BeagleBoard BeagleV-Fire";
+ compatible = "beagle,beaglev-fire", "microchip,mpfs";
+
+ aliases {
+ serial0 = &mmuart0;
+ serial1 = &mmuart1;
+ serial2 = &mmuart2;
+ serial3 = &mmuart3;
+ serial4 = &mmuart4;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ cpus {
+ timebase-frequency = <MTIMER_FREQ>;
+ };
+
+ ddrc_cache_lo: memory@80000000 {
+ device_type = "memory";
+ reg = <0x0 0x80000000 0x0 0x40000000>;
+ status = "okay";
+ };
+
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ hss: hss-buffer@103fc00000 {
+ compatible = "shared-dma-pool";
+ reg = <0x10 0x3fc00000 0x0 0x400000>;
+ no-map;
+ };
+ };
+
+ imx219_clk: camera-clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <24000000>;
+ };
+
+ imx219_vana: fixedregulator-0 {
+ compatible = "regulator-fixed";
+ regulator-name = "imx219_vana";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ };
+
+ imx219_vdig: fixedregulator-1 {
+ compatible = "regulator-fixed";
+ regulator-name = "imx219_vdig";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ imx219_vddl: fixedregulator-2 {
+ compatible = "regulator-fixed";
+ regulator-name = "imx219_vddl";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ };
+
+};
+
+&gpio2 {
+ interrupts = <53>, <53>, <53>, <53>,
+ <53>, <53>, <53>, <53>,
+ <53>, <53>, <53>, <53>,
+ <53>, <53>, <53>, <53>,
+ <53>, <53>, <53>, <53>,
+ <53>, <53>, <53>, <53>,
+ <53>, <53>, <53>, <53>,
+ <53>, <53>, <53>, <53>;
+ ngpios=<32>;
+ gpio-line-names = "P8_PIN3_USER_LED_0", "P8_PIN4_USER_LED_1", "P8_PIN5_USER_LED_2",
+ "P8_PIN6_USER_LED_3", "P8_PIN7_USER_LED_4", "P8_PIN8_USER_LED_5",
+ "P8_PIN9_USER_LED_6", "P8_PIN10_USER_LED_7", "P8_PIN11_USER_LED_8",
+ "P8_PIN12_USER_LED_9", "P8_PIN13_USER_LED_10", "P8_PIN14_USER_LED_11",
+ "P8_PIN15", "P8_PIN16", "P8_PIN17", "P8_PIN18", "P8_PIN19", "P8_PIN20",
+ "P8_PIN21", "P8_PIN22", "P8_PIN23", "P8_PIN24", "P8_PIN25", "P8_PIN26",
+ "P8_PIN27", "P8_PIN28", "P8_PIN29", "P8_PIN30", "M2_W_DISABLE1",
+ "M2_W_DISABLE2", "VIO_ENABLE", "SD_DET";
+ status = "okay";
+
+ vio-enable-hog {
+ gpio-hog;
+ gpios = <30 30>;
+ output-high;
+ line-name = "VIO_ENABLE";
+ };
+
+ sd-det-hog {
+ gpio-hog;
+ gpios = <31 31>;
+ input;
+ line-name = "SD_DET";
+ };
+};
+
+&i2c0 {
+ status = "okay";
+};
+
+&i2c1 {
+ status = "okay";
+
+ eeprom: eeprom@50 {
+ compatible = "atmel,24c32";
+ reg = <0x50>;
+ };
+
+ imx219: sensor@10 {
+ compatible = "sony,imx219";
+ reg = <0x10>;
+ clocks = <&imx219_clk>;
+ VANA-supply = <&imx219_vana>; /* 2.8v */
+ VDIG-supply = <&imx219_vdig>; /* 1.8v */
+ VDDL-supply = <&imx219_vddl>; /* 1.2v */
+
+ port {
+ imx219_0: endpoint {
+ data-lanes = <1 2>;
+ clock-noncontinuous;
+ link-frequencies = /bits/ 64 <456000000>;
+ };
+ };
+ };
+};
+
+&mac0 {
+ status = "okay";
+ phy-mode = "sgmii";
+ phy-handle = <&phy0>;
+ phy0: ethernet-phy@0 {
+ reg = <0>;
+ };
+};
+
+&mbox {
+ status = "okay";
+};
+
+&mmc {
+ bus-width = <4>;
+ disable-wp;
+ cap-sd-highspeed;
+ cap-mmc-highspeed;
+ mmc-ddr-1_8v;
+ mmc-hs200-1_8v;
+ sd-uhs-sdr12;
+ sd-uhs-sdr25;
+ sd-uhs-sdr50;
+ sd-uhs-sdr104;
+ status = "okay";
+};
+
+&mmuart0 {
+ status = "okay";
+};
+
+&mmuart1 {
+ status = "okay";
+};
+
+&refclk {
+ clock-frequency = <125000000>;
+};
+
+&refclk_ccc {
+ clock-frequency = <50000000>;
+};
+
+&rtc {
+ status = "okay";
+};
+
+&spi0 {
+ status = "okay";
+};
+
+&spi1 {
+ status = "okay";
+};
+
+&syscontroller {
+ microchip,bitstream-flash = <&sys_ctrl_flash>;
+ status = "okay";
+};
+
+&syscontroller_qspi {
+ status = "okay";
+
+ sys_ctrl_flash: flash@0 { // MT25QL01GBBB8ESF-0SIT
+ compatible = "jedec,spi-nor";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ spi-max-frequency = <20000000>;
+ spi-rx-bus-width = <1>;
+ reg = <0>;
+ };
+};
+
+&usb {
+ status = "okay";
+ dr_mode = "otg";
+};
diff --git a/arch/riscv/boot/dts/sophgo/cv1800b-milkv-duo.dts b/arch/riscv/boot/dts/sophgo/cv1800b-milkv-duo.dts
index cd013588adc0..375ff2661b6e 100644
--- a/arch/riscv/boot/dts/sophgo/cv1800b-milkv-duo.dts
+++ b/arch/riscv/boot/dts/sophgo/cv1800b-milkv-duo.dts
@@ -45,6 +45,7 @@
no-1-8-v;
no-mmc;
no-sdio;
+ disable-wp;
};
&uart0 {
diff --git a/arch/riscv/boot/dts/sophgo/sg2042-milkv-pioneer.dts b/arch/riscv/boot/dts/sophgo/sg2042-milkv-pioneer.dts
index 49b4b9c2c101..80cb017974d8 100644
--- a/arch/riscv/boot/dts/sophgo/sg2042-milkv-pioneer.dts
+++ b/arch/riscv/boot/dts/sophgo/sg2042-milkv-pioneer.dts
@@ -14,6 +14,18 @@
};
};
+&cgi_main {
+ clock-frequency = <25000000>;
+};
+
+&cgi_dpll0 {
+ clock-frequency = <25000000>;
+};
+
+&cgi_dpll1 {
+ clock-frequency = <25000000>;
+};
+
&uart0 {
status = "okay";
};
diff --git a/arch/riscv/boot/dts/sophgo/sg2042.dtsi b/arch/riscv/boot/dts/sophgo/sg2042.dtsi
index 81fda312f988..34c802bd3f9b 100644
--- a/arch/riscv/boot/dts/sophgo/sg2042.dtsi
+++ b/arch/riscv/boot/dts/sophgo/sg2042.dtsi
@@ -4,8 +4,10 @@
*/
/dts-v1/;
+#include <dt-bindings/clock/sophgo,sg2042-clkgen.h>
+#include <dt-bindings/clock/sophgo,sg2042-pll.h>
+#include <dt-bindings/clock/sophgo,sg2042-rpgate.h>
#include <dt-bindings/interrupt-controller/irq.h>
-
#include <dt-bindings/reset/sophgo,sg2042-reset.h>
#include "sg2042-cpus.dtsi"
@@ -20,12 +22,60 @@
serial0 = &uart0;
};
+ cgi_main: oscillator0 {
+ compatible = "fixed-clock";
+ clock-output-names = "cgi_main";
+ #clock-cells = <0>;
+ };
+
+ cgi_dpll0: oscillator1 {
+ compatible = "fixed-clock";
+ clock-output-names = "cgi_dpll0";
+ #clock-cells = <0>;
+ };
+
+ cgi_dpll1: oscillator2 {
+ compatible = "fixed-clock";
+ clock-output-names = "cgi_dpll1";
+ #clock-cells = <0>;
+ };
+
soc: soc {
compatible = "simple-bus";
#address-cells = <2>;
#size-cells = <2>;
ranges;
+ pllclk: clock-controller@70300100c0 {
+ compatible = "sophgo,sg2042-pll";
+ reg = <0x70 0x300100c0 0x0 0x40>;
+ clocks = <&cgi_main>, <&cgi_dpll0>, <&cgi_dpll1>;
+ clock-names = "cgi_main", "cgi_dpll0", "cgi_dpll1";
+ #clock-cells = <1>;
+ };
+
+ rpgate: clock-controller@7030010368 {
+ compatible = "sophgo,sg2042-rpgate";
+ reg = <0x70 0x30010368 0x0 0x98>;
+ clocks = <&clkgen GATE_CLK_RP_CPU_NORMAL>;
+ clock-names = "rpgate";
+ #clock-cells = <1>;
+ };
+
+ clkgen: clock-controller@7030012000 {
+ compatible = "sophgo,sg2042-clkgen";
+ reg = <0x70 0x30012000 0x0 0x1000>;
+ clocks = <&pllclk MPLL_CLK>,
+ <&pllclk FPLL_CLK>,
+ <&pllclk DPLL0_CLK>,
+ <&pllclk DPLL1_CLK>;
+ clock-names = "mpll",
+ "fpll",
+ "dpll0",
+ "dpll1";
+ #clock-cells = <1>;
+ };
+
clint_mswi: interrupt-controller@7094000000 {
compatible = "sophgo,sg2042-aclint-mswi", "thead,c900-aclint-mswi";
reg = <0x00000070 0x94000000 0x00000000 0x00004000>;
@@ -341,6 +391,9 @@
interrupt-parent = <&intc>;
interrupts = <112 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <500000000>;
+ clocks = <&clkgen GATE_CLK_UART_500M>,
+ <&clkgen GATE_CLK_APB_UART>;
+ clock-names = "baudclk", "apb_pclk";
reg-shift = <2>;
reg-io-width = <4>;
resets = <&rstgen RST_UART0>;
diff --git a/arch/riscv/boot/dts/starfive/Makefile b/arch/riscv/boot/dts/starfive/Makefile
index 2fa0cd7f31c3..7a163a7d6ba3 100644
--- a/arch/riscv/boot/dts/starfive/Makefile
+++ b/arch/riscv/boot/dts/starfive/Makefile
@@ -9,5 +9,6 @@ dtb-$(CONFIG_ARCH_STARFIVE) += jh7100-beaglev-starlight.dtb
dtb-$(CONFIG_ARCH_STARFIVE) += jh7100-starfive-visionfive-v1.dtb
dtb-$(CONFIG_ARCH_STARFIVE) += jh7110-milkv-mars.dtb
+dtb-$(CONFIG_ARCH_STARFIVE) += jh7110-pine64-star64.dtb
dtb-$(CONFIG_ARCH_STARFIVE) += jh7110-starfive-visionfive-2-v1.2a.dtb
dtb-$(CONFIG_ARCH_STARFIVE) += jh7110-starfive-visionfive-2-v1.3b.dtb
diff --git a/arch/riscv/boot/dts/starfive/jh7110-common.dtsi b/arch/riscv/boot/dts/starfive/jh7110-common.dtsi
index 8ff6ea64f048..ca2d44d59d48 100644
--- a/arch/riscv/boot/dts/starfive/jh7110-common.dtsi
+++ b/arch/riscv/boot/dts/starfive/jh7110-common.dtsi
@@ -244,7 +244,7 @@
regulator-boot-on;
regulator-always-on;
regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
regulator-name = "emmc_vdd";
};
};
@@ -294,6 +294,20 @@
status = "okay";
};
+&pcie0 {
+ perst-gpios = <&sysgpio 26 GPIO_ACTIVE_LOW>;
+ phys = <&pciephy0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie0_pins>;
+};
+
+&pcie1 {
+ perst-gpios = <&sysgpio 28 GPIO_ACTIVE_LOW>;
+ phys = <&pciephy1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie1_pins>;
+};
+
&pwmdac {
pinctrl-names = "default";
pinctrl-0 = <&pwmdac_pins>;
@@ -321,16 +335,13 @@
#size-cells = <1>;
spl@0 {
- reg = <0x0 0x80000>;
+ reg = <0x0 0xf0000>;
};
uboot-env@f0000 {
reg = <0xf0000 0x10000>;
};
uboot@100000 {
- reg = <0x100000 0x400000>;
- };
- reserved-data@600000 {
- reg = <0x600000 0xa00000>;
+ reg = <0x100000 0xf00000>;
};
};
};
@@ -476,6 +487,54 @@
};
};
+ pcie0_pins: pcie0-0 {
+ clkreq-pins {
+ pinmux = <GPIOMUX(27, GPOUT_LOW,
+ GPOEN_DISABLE,
+ GPI_NONE)>;
+ bias-pull-down;
+ drive-strength = <2>;
+ input-enable;
+ input-schmitt-disable;
+ slew-rate = <0>;
+ };
+
+ wake-pins {
+ pinmux = <GPIOMUX(32, GPOUT_LOW,
+ GPOEN_DISABLE,
+ GPI_NONE)>;
+ bias-pull-up;
+ drive-strength = <2>;
+ input-enable;
+ input-schmitt-disable;
+ slew-rate = <0>;
+ };
+ };
+
+ pcie1_pins: pcie1-0 {
+ clkreq-pins {
+ pinmux = <GPIOMUX(29, GPOUT_LOW,
+ GPOEN_DISABLE,
+ GPI_NONE)>;
+ bias-pull-down;
+ drive-strength = <2>;
+ input-enable;
+ input-schmitt-disable;
+ slew-rate = <0>;
+ };
+
+ wake-pins {
+ pinmux = <GPIOMUX(21, GPOUT_LOW,
+ GPOEN_DISABLE,
+ GPI_NONE)>;
+ bias-pull-up;
+ drive-strength = <2>;
+ input-enable;
+ input-schmitt-disable;
+ slew-rate = <0>;
+ };
+ };
+
pwmdac_pins: pwmdac-0 {
pwmdac-pins {
pinmux = <GPIOMUX(33, GPOUT_SYS_PWMDAC_LEFT,
diff --git a/arch/riscv/boot/dts/starfive/jh7110-milkv-mars.dts b/arch/riscv/boot/dts/starfive/jh7110-milkv-mars.dts
index fa0eac78e0ba..5cb9e99e1dac 100644
--- a/arch/riscv/boot/dts/starfive/jh7110-milkv-mars.dts
+++ b/arch/riscv/boot/dts/starfive/jh7110-milkv-mars.dts
@@ -17,6 +17,13 @@
assigned-clock-parents = <&aoncrg JH7110_AONCLK_GMAC0_RMII_RTX>;
};
+&pcie0 {
+ status = "okay";
+};
+
+&pcie1 {
+ status = "okay";
+};
&phy0 {
motorcomm,tx-clk-adj-enabled;
diff --git a/arch/riscv/boot/dts/starfive/jh7110-pine64-star64.dts b/arch/riscv/boot/dts/starfive/jh7110-pine64-star64.dts
new file mode 100644
index 000000000000..b720cdd15ed6
--- /dev/null
+++ b/arch/riscv/boot/dts/starfive/jh7110-pine64-star64.dts
@@ -0,0 +1,65 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Copyright (C) 2024 Henry Bell <dmoo_dv@protonmail.com>
+ */
+
+/dts-v1/;
+#include "jh7110-common.dtsi"
+
+/ {
+ model = "Pine64 Star64";
+ compatible = "pine64,star64", "starfive,jh7110";
+ aliases {
+ ethernet1 = &gmac1;
+ };
+};
+
+&gmac0 {
+ starfive,tx-use-rgmii-clk;
+ assigned-clocks = <&aoncrg JH7110_AONCLK_GMAC0_TX>;
+ assigned-clock-parents = <&aoncrg JH7110_AONCLK_GMAC0_RMII_RTX>;
+};
+
+&gmac1 {
+ phy-handle = <&phy1>;
+ phy-mode = "rgmii-id";
+ starfive,tx-use-rgmii-clk;
+ assigned-clocks = <&syscrg JH7110_SYSCLK_GMAC1_TX>;
+ assigned-clock-parents = <&syscrg JH7110_SYSCLK_GMAC1_RMII_RTX>;
+ status = "okay";
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "snps,dwmac-mdio";
+
+ phy1: ethernet-phy@1 {
+ reg = <1>;
+ };
+ };
+};
+
+&pcie1 {
+ status = "okay";
+};
+
+&phy0 {
+ rx-internal-delay-ps = <1900>;
+ tx-internal-delay-ps = <1500>;
+ motorcomm,rx-clk-drv-microamp = <2910>;
+ motorcomm,rx-data-drv-microamp = <2910>;
+ motorcomm,tx-clk-adj-enabled;
+ motorcomm,tx-clk-10-inverted;
+ motorcomm,tx-clk-100-inverted;
+ motorcomm,tx-clk-1000-inverted;
+};
+
+&phy1 {
+ rx-internal-delay-ps = <0>;
+ tx-internal-delay-ps = <300>;
+ motorcomm,rx-clk-drv-microamp = <2910>;
+ motorcomm,rx-data-drv-microamp = <2910>;
+ motorcomm,tx-clk-adj-enabled;
+ motorcomm,tx-clk-10-inverted;
+ motorcomm,tx-clk-100-inverted;
+};
diff --git a/arch/riscv/boot/dts/starfive/jh7110-starfive-visionfive-2.dtsi b/arch/riscv/boot/dts/starfive/jh7110-starfive-visionfive-2.dtsi
index 9d70f21c86fc..18f38fc790a4 100644
--- a/arch/riscv/boot/dts/starfive/jh7110-starfive-visionfive-2.dtsi
+++ b/arch/riscv/boot/dts/starfive/jh7110-starfive-visionfive-2.dtsi
@@ -32,3 +32,11 @@
&mmc0 {
non-removable;
};
+
+&pcie0 {
+ status = "okay";
+};
+
+&pcie1 {
+ status = "okay";
+};
diff --git a/arch/riscv/boot/dts/starfive/jh7110.dtsi b/arch/riscv/boot/dts/starfive/jh7110.dtsi
index 18047195c600..5ac70759e0ab 100644
--- a/arch/riscv/boot/dts/starfive/jh7110.dtsi
+++ b/arch/riscv/boot/dts/starfive/jh7110.dtsi
@@ -1214,5 +1214,91 @@
#reset-cells = <1>;
power-domains = <&pwrc JH7110_PD_VOUT>;
};
+
+ pcie0: pcie@940000000 {
+ compatible = "starfive,jh7110-pcie";
+ reg = <0x9 0x40000000 0x0 0x1000000>,
+ <0x0 0x2b000000 0x0 0x100000>;
+ reg-names = "cfg", "apb";
+ linux,pci-domain = <0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ #interrupt-cells = <1>;
+ ranges = <0x82000000 0x0 0x30000000 0x0 0x30000000 0x0 0x08000000>,
+ <0xc3000000 0x9 0x00000000 0x9 0x00000000 0x0 0x40000000>;
+ interrupts = <56>;
+ interrupt-map-mask = <0x0 0x0 0x0 0x7>;
+ interrupt-map = <0x0 0x0 0x0 0x1 &pcie_intc0 0x1>,
+ <0x0 0x0 0x0 0x2 &pcie_intc0 0x2>,
+ <0x0 0x0 0x0 0x3 &pcie_intc0 0x3>,
+ <0x0 0x0 0x0 0x4 &pcie_intc0 0x4>;
+ msi-controller;
+ device_type = "pci";
+ starfive,stg-syscon = <&stg_syscon>;
+ bus-range = <0x0 0xff>;
+ clocks = <&syscrg JH7110_SYSCLK_NOC_BUS_STG_AXI>,
+ <&stgcrg JH7110_STGCLK_PCIE0_TL>,
+ <&stgcrg JH7110_STGCLK_PCIE0_AXI_MST0>,
+ <&stgcrg JH7110_STGCLK_PCIE0_APB>;
+ clock-names = "noc", "tl", "axi_mst0", "apb";
+ resets = <&stgcrg JH7110_STGRST_PCIE0_AXI_MST0>,
+ <&stgcrg JH7110_STGRST_PCIE0_AXI_SLV0>,
+ <&stgcrg JH7110_STGRST_PCIE0_AXI_SLV>,
+ <&stgcrg JH7110_STGRST_PCIE0_BRG>,
+ <&stgcrg JH7110_STGRST_PCIE0_CORE>,
+ <&stgcrg JH7110_STGRST_PCIE0_APB>;
+ reset-names = "mst0", "slv0", "slv", "brg",
+ "core", "apb";
+ status = "disabled";
+
+ pcie_intc0: interrupt-controller {
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ interrupt-controller;
+ };
+ };
+
+ pcie1: pcie@9c0000000 {
+ compatible = "starfive,jh7110-pcie";
+ reg = <0x9 0xc0000000 0x0 0x1000000>,
+ <0x0 0x2c000000 0x0 0x100000>;
+ reg-names = "cfg", "apb";
+ linux,pci-domain = <1>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ #interrupt-cells = <1>;
+ ranges = <0x82000000 0x0 0x38000000 0x0 0x38000000 0x0 0x08000000>,
+ <0xc3000000 0x9 0x80000000 0x9 0x80000000 0x0 0x40000000>;
+ interrupts = <57>;
+ interrupt-map-mask = <0x0 0x0 0x0 0x7>;
+ interrupt-map = <0x0 0x0 0x0 0x1 &pcie_intc1 0x1>,
+ <0x0 0x0 0x0 0x2 &pcie_intc1 0x2>,
+ <0x0 0x0 0x0 0x3 &pcie_intc1 0x3>,
+ <0x0 0x0 0x0 0x4 &pcie_intc1 0x4>;
+ msi-controller;
+ device_type = "pci";
+ starfive,stg-syscon = <&stg_syscon>;
+ bus-range = <0x0 0xff>;
+ clocks = <&syscrg JH7110_SYSCLK_NOC_BUS_STG_AXI>,
+ <&stgcrg JH7110_STGCLK_PCIE1_TL>,
+ <&stgcrg JH7110_STGCLK_PCIE1_AXI_MST0>,
+ <&stgcrg JH7110_STGCLK_PCIE1_APB>;
+ clock-names = "noc", "tl", "axi_mst0", "apb";
+ resets = <&stgcrg JH7110_STGRST_PCIE1_AXI_MST0>,
+ <&stgcrg JH7110_STGRST_PCIE1_AXI_SLV0>,
+ <&stgcrg JH7110_STGRST_PCIE1_AXI_SLV>,
+ <&stgcrg JH7110_STGRST_PCIE1_BRG>,
+ <&stgcrg JH7110_STGRST_PCIE1_CORE>,
+ <&stgcrg JH7110_STGRST_PCIE1_APB>;
+ reset-names = "mst0", "slv0", "slv", "brg",
+ "core", "apb";
+ status = "disabled";
+
+ pcie_intc1: interrupt-controller {
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ interrupt-controller;
+ };
+ };
};
};
diff --git a/arch/riscv/boot/dts/thead/th1520.dtsi b/arch/riscv/boot/dts/thead/th1520.dtsi
index d2fa25839012..3c9974062c20 100644
--- a/arch/riscv/boot/dts/thead/th1520.dtsi
+++ b/arch/riscv/boot/dts/thead/th1520.dtsi
@@ -122,6 +122,87 @@
};
};
+ pmu {
+ compatible = "riscv,pmu";
+ riscv,event-to-mhpmcounters =
+ <0x00003 0x00003 0x0007fff8>,
+ <0x00004 0x00004 0x0007fff8>,
+ <0x00005 0x00005 0x0007fff8>,
+ <0x00006 0x00006 0x0007fff8>,
+ <0x00007 0x00007 0x0007fff8>,
+ <0x00008 0x00008 0x0007fff8>,
+ <0x00009 0x00009 0x0007fff8>,
+ <0x0000a 0x0000a 0x0007fff8>,
+ <0x10000 0x10000 0x0007fff8>,
+ <0x10001 0x10001 0x0007fff8>,
+ <0x10002 0x10002 0x0007fff8>,
+ <0x10003 0x10003 0x0007fff8>,
+ <0x10010 0x10010 0x0007fff8>,
+ <0x10011 0x10011 0x0007fff8>,
+ <0x10012 0x10012 0x0007fff8>,
+ <0x10013 0x10013 0x0007fff8>;
+ riscv,event-to-mhpmevent =
+ <0x00003 0x00000000 0x00000001>,
+ <0x00004 0x00000000 0x00000002>,
+ <0x00006 0x00000000 0x00000006>,
+ <0x00005 0x00000000 0x00000007>,
+ <0x00007 0x00000000 0x00000008>,
+ <0x00008 0x00000000 0x00000009>,
+ <0x00009 0x00000000 0x0000000a>,
+ <0x0000a 0x00000000 0x0000000b>,
+ <0x10000 0x00000000 0x0000000c>,
+ <0x10001 0x00000000 0x0000000d>,
+ <0x10002 0x00000000 0x0000000e>,
+ <0x10003 0x00000000 0x0000000f>,
+ <0x10010 0x00000000 0x00000010>,
+ <0x10011 0x00000000 0x00000011>,
+ <0x10012 0x00000000 0x00000012>,
+ <0x10013 0x00000000 0x00000013>;
+ riscv,raw-event-to-mhpmcounters =
+ <0x00000000 0x00000001 0xffffffff 0xffffffff 0x0007fff8>,
+ <0x00000000 0x00000002 0xffffffff 0xffffffff 0x0007fff8>,
+ <0x00000000 0x00000003 0xffffffff 0xffffffff 0x0007fff8>,
+ <0x00000000 0x00000004 0xffffffff 0xffffffff 0x0007fff8>,
+ <0x00000000 0x00000005 0xffffffff 0xffffffff 0x0007fff8>,
+ <0x00000000 0x00000006 0xffffffff 0xffffffff 0x0007fff8>,
+ <0x00000000 0x00000007 0xffffffff 0xffffffff 0x0007fff8>,
+ <0x00000000 0x00000008 0xffffffff 0xffffffff 0x0007fff8>,
+ <0x00000000 0x00000009 0xffffffff 0xffffffff 0x0007fff8>,
+ <0x00000000 0x0000000a 0xffffffff 0xffffffff 0x0007fff8>,
+ <0x00000000 0x0000000b 0xffffffff 0xffffffff 0x0007fff8>,
+ <0x00000000 0x0000000c 0xffffffff 0xffffffff 0x0007fff8>,
+ <0x00000000 0x0000000d 0xffffffff 0xffffffff 0x0007fff8>,
+ <0x00000000 0x0000000e 0xffffffff 0xffffffff 0x0007fff8>,
+ <0x00000000 0x0000000f 0xffffffff 0xffffffff 0x0007fff8>,
+ <0x00000000 0x00000010 0xffffffff 0xffffffff 0x0007fff8>,
+ <0x00000000 0x00000011 0xffffffff 0xffffffff 0x0007fff8>,
+ <0x00000000 0x00000012 0xffffffff 0xffffffff 0x0007fff8>,
+ <0x00000000 0x00000013 0xffffffff 0xffffffff 0x0007fff8>,
+ <0x00000000 0x00000014 0xffffffff 0xffffffff 0x0007fff8>,
+ <0x00000000 0x00000015 0xffffffff 0xffffffff 0x0007fff8>,
+ <0x00000000 0x00000016 0xffffffff 0xffffffff 0x0007fff8>,
+ <0x00000000 0x00000017 0xffffffff 0xffffffff 0x0007fff8>,
+ <0x00000000 0x00000018 0xffffffff 0xffffffff 0x0007fff8>,
+ <0x00000000 0x00000019 0xffffffff 0xffffffff 0x0007fff8>,
+ <0x00000000 0x0000001a 0xffffffff 0xffffffff 0x0007fff8>,
+ <0x00000000 0x0000001b 0xffffffff 0xffffffff 0x0007fff8>,
+ <0x00000000 0x0000001c 0xffffffff 0xffffffff 0x0007fff8>,
+ <0x00000000 0x0000001d 0xffffffff 0xffffffff 0x0007fff8>,
+ <0x00000000 0x0000001e 0xffffffff 0xffffffff 0x0007fff8>,
+ <0x00000000 0x0000001f 0xffffffff 0xffffffff 0x0007fff8>,
+ <0x00000000 0x00000020 0xffffffff 0xffffffff 0x0007fff8>,
+ <0x00000000 0x00000021 0xffffffff 0xffffffff 0x0007fff8>,
+ <0x00000000 0x00000022 0xffffffff 0xffffffff 0x0007fff8>,
+ <0x00000000 0x00000023 0xffffffff 0xffffffff 0x0007fff8>,
+ <0x00000000 0x00000024 0xffffffff 0xffffffff 0x0007fff8>,
+ <0x00000000 0x00000025 0xffffffff 0xffffffff 0x0007fff8>,
+ <0x00000000 0x00000026 0xffffffff 0xffffffff 0x0007fff8>,
+ <0x00000000 0x00000027 0xffffffff 0xffffffff 0x0007fff8>,
+ <0x00000000 0x00000028 0xffffffff 0xffffffff 0x0007fff8>,
+ <0x00000000 0x00000029 0xffffffff 0xffffffff 0x0007fff8>,
+ <0x00000000 0x0000002a 0xffffffff 0xffffffff 0x0007fff8>;
+ };
+
osc: oscillator {
compatible = "fixed-clock";
clock-output-names = "osc_24m";
diff --git a/arch/riscv/configs/defconfig b/arch/riscv/configs/defconfig
index 12dc8c73a8ac..3f1f055866af 100644
--- a/arch/riscv/configs/defconfig
+++ b/arch/riscv/configs/defconfig
@@ -110,8 +110,10 @@ CONFIG_PCIEPORTBUS=y
CONFIG_PCI_HOST_GENERIC=y
CONFIG_PCIE_XILINX=y
CONFIG_PCIE_FU740=y
+CONFIG_PCIE_STARFIVE_HOST=m
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_SIFIVE_CCACHE=y
CONFIG_MTD=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
@@ -143,6 +145,7 @@ CONFIG_RAVB=y
CONFIG_STMMAC_ETH=m
CONFIG_MICREL_PHY=y
CONFIG_MICROSEMI_PHY=y
+CONFIG_MOTORCOMM_PHY=y
CONFIG_CAN_RCAR_CANFD=m
CONFIG_INPUT_MOUSEDEV=y
CONFIG_KEYBOARD_SUN4I_LRADC=m
@@ -155,24 +158,35 @@ CONFIG_SERIAL_EARLYCON_RISCV_SBI=y
CONFIG_VIRTIO_CONSOLE=y
CONFIG_HW_RANDOM=y
CONFIG_HW_RANDOM_VIRTIO=y
+CONFIG_HW_RANDOM_JH7110=m
+CONFIG_I2C=y
CONFIG_I2C_CHARDEV=m
+CONFIG_I2C_DESIGNWARE_PLATFORM=y
CONFIG_I2C_MV64XXX=m
CONFIG_I2C_RIIC=y
CONFIG_SPI=y
+CONFIG_SPI_CADENCE_QUADSPI=m
+CONFIG_SPI_PL022=m
CONFIG_SPI_RSPI=m
CONFIG_SPI_SIFIVE=y
CONFIG_SPI_SUN6I=y
# CONFIG_PTP_1588_CLOCK is not set
CONFIG_GPIO_SIFIVE=y
+CONFIG_POWER_RESET_GPIO_RESTART=y
+CONFIG_SENSORS_SFCTEMP=m
CONFIG_CPU_THERMAL=y
CONFIG_DEVFREQ_THERMAL=y
CONFIG_RZG2L_THERMAL=y
CONFIG_WATCHDOG=y
CONFIG_SUNXI_WATCHDOG=y
CONFIG_RENESAS_RZG2LWDT=y
+CONFIG_MFD_AXP20X_I2C=y
CONFIG_REGULATOR=y
CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_AXP20X=y
CONFIG_REGULATOR_GPIO=y
+CONFIG_MEDIA_SUPPORT=m
+CONFIG_VIDEO_CADENCE_CSI2RX=m
CONFIG_DRM=m
CONFIG_DRM_RADEON=m
CONFIG_DRM_NOUVEAU=m
@@ -184,6 +198,10 @@ CONFIG_SOUND=y
CONFIG_SND=y
CONFIG_SND_SOC=y
CONFIG_SND_SOC_RZ=m
+CONFIG_SND_DESIGNWARE_I2S=m
+CONFIG_SND_SOC_STARFIVE=m
+CONFIG_SND_SOC_JH7110_PWMDAC=m
+CONFIG_SND_SOC_JH7110_TDM=m
CONFIG_SND_SOC_WM8978=m
CONFIG_SND_SIMPLE_CARD=m
CONFIG_USB=y
@@ -197,6 +215,11 @@ CONFIG_USB_OHCI_HCD_PLATFORM=y
CONFIG_USB_RENESAS_USBHS=m
CONFIG_USB_STORAGE=y
CONFIG_USB_UAS=y
+CONFIG_USB_CDNS_SUPPORT=m
+CONFIG_USB_CDNS3=m
+CONFIG_USB_CDNS3_GADGET=y
+CONFIG_USB_CDNS3_HOST=y
+CONFIG_USB_CDNS3_STARFIVE=m
CONFIG_USB_MUSB_HDRC=m
CONFIG_USB_MUSB_SUNXI=m
CONFIG_NOP_USB_XCEIV=m
@@ -246,6 +269,9 @@ CONFIG_RZG2L_ADC=m
CONFIG_RESET_RZG2L_USBPHY_CTRL=y
CONFIG_PHY_SUN4I_USB=m
CONFIG_PHY_RCAR_GEN3_USB2=y
+CONFIG_PHY_STARFIVE_JH7110_DPHY_RX=m
+CONFIG_PHY_STARFIVE_JH7110_PCIE=m
+CONFIG_PHY_STARFIVE_JH7110_USB=m
CONFIG_LIBNVDIMM=y
CONFIG_NVMEM_SUNXI_SID=y
CONFIG_EXT4_FS=y
diff --git a/arch/riscv/include/asm/Kbuild b/arch/riscv/include/asm/Kbuild
index 504f8b7e72d4..5c589770f2a8 100644
--- a/arch/riscv/include/asm/Kbuild
+++ b/arch/riscv/include/asm/Kbuild
@@ -1,4 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
+syscall-y += syscall_table_32.h
+syscall-y += syscall_table_64.h
+
generic-y += early_ioremap.h
generic-y += flat.h
generic-y += kvm_para.h
diff --git a/arch/riscv/include/asm/cmpxchg.h b/arch/riscv/include/asm/cmpxchg.h
index ddb002ed89de..808b4c78462e 100644
--- a/arch/riscv/include/asm/cmpxchg.h
+++ b/arch/riscv/include/asm/cmpxchg.h
@@ -10,7 +10,7 @@
#include <asm/fence.h>
-#define __arch_xchg_masked(prepend, append, r, p, n) \
+#define __arch_xchg_masked(sc_sfx, prepend, append, r, p, n) \
({ \
u32 *__ptr32b = (u32 *)((ulong)(p) & ~0x3); \
ulong __s = ((ulong)(p) & (0x4 - sizeof(*p))) * BITS_PER_BYTE; \
@@ -25,7 +25,7 @@
"0: lr.w %0, %2\n" \
" and %1, %0, %z4\n" \
" or %1, %1, %z3\n" \
- " sc.w %1, %1, %2\n" \
+ " sc.w" sc_sfx " %1, %1, %2\n" \
" bnez %1, 0b\n" \
append \
: "=&r" (__retx), "=&r" (__rc), "+A" (*(__ptr32b)) \
@@ -46,7 +46,8 @@
: "memory"); \
})
-#define _arch_xchg(ptr, new, sfx, prepend, append) \
+#define _arch_xchg(ptr, new, sc_sfx, swap_sfx, prepend, \
+ sc_append, swap_append) \
({ \
__typeof__(ptr) __ptr = (ptr); \
__typeof__(*(__ptr)) __new = (new); \
@@ -55,15 +56,15 @@
switch (sizeof(*__ptr)) { \
case 1: \
case 2: \
- __arch_xchg_masked(prepend, append, \
+ __arch_xchg_masked(sc_sfx, prepend, sc_append, \
__ret, __ptr, __new); \
break; \
case 4: \
- __arch_xchg(".w" sfx, prepend, append, \
+ __arch_xchg(".w" swap_sfx, prepend, swap_append, \
__ret, __ptr, __new); \
break; \
case 8: \
- __arch_xchg(".d" sfx, prepend, append, \
+ __arch_xchg(".d" swap_sfx, prepend, swap_append, \
__ret, __ptr, __new); \
break; \
default: \
@@ -73,16 +74,17 @@
})
#define arch_xchg_relaxed(ptr, x) \
- _arch_xchg(ptr, x, "", "", "")
+ _arch_xchg(ptr, x, "", "", "", "", "")
#define arch_xchg_acquire(ptr, x) \
- _arch_xchg(ptr, x, "", "", RISCV_ACQUIRE_BARRIER)
+ _arch_xchg(ptr, x, "", "", "", \
+ RISCV_ACQUIRE_BARRIER, RISCV_ACQUIRE_BARRIER)
#define arch_xchg_release(ptr, x) \
- _arch_xchg(ptr, x, "", RISCV_RELEASE_BARRIER, "")
+ _arch_xchg(ptr, x, "", "", RISCV_RELEASE_BARRIER, "", "")
#define arch_xchg(ptr, x) \
- _arch_xchg(ptr, x, ".aqrl", "", "")
+ _arch_xchg(ptr, x, ".rl", ".aqrl", "", RISCV_FULL_BARRIER, "")
#define xchg32(ptr, x) \
({ \
diff --git a/arch/riscv/include/asm/insn.h b/arch/riscv/include/asm/insn.h
index 06e439eeef9a..09fde95a5e8f 100644
--- a/arch/riscv/include/asm/insn.h
+++ b/arch/riscv/include/asm/insn.h
@@ -145,7 +145,7 @@
/* parts of opcode for RVF, RVD and RVQ */
#define RVFDQ_FL_FS_WIDTH_OFF 12
-#define RVFDQ_FL_FS_WIDTH_MASK GENMASK(3, 0)
+#define RVFDQ_FL_FS_WIDTH_MASK GENMASK(2, 0)
#define RVFDQ_FL_FS_WIDTH_W 2
#define RVFDQ_FL_FS_WIDTH_D 3
#define RVFDQ_LS_FS_WIDTH_Q 4
diff --git a/arch/riscv/include/asm/syscall_table.h b/arch/riscv/include/asm/syscall_table.h
new file mode 100644
index 000000000000..0c2d61782813
--- /dev/null
+++ b/arch/riscv/include/asm/syscall_table.h
@@ -0,0 +1,7 @@
+#include <asm/bitsperlong.h>
+
+#if __BITS_PER_LONG == 64
+#include <asm/syscall_table_64.h>
+#else
+#include <asm/syscall_table_32.h>
+#endif
diff --git a/arch/riscv/include/asm/unistd.h b/arch/riscv/include/asm/unistd.h
index 221630bdbd07..e6d904fa67c5 100644
--- a/arch/riscv/include/asm/unistd.h
+++ b/arch/riscv/include/asm/unistd.h
@@ -3,11 +3,6 @@
* Copyright (C) 2012 Regents of the University of California
*/
-/*
- * There is explicitly no include guard here because this file is expected to
- * be included multiple times.
- */
-
#define __ARCH_WANT_SYS_CLONE
#ifdef CONFIG_COMPAT
@@ -21,6 +16,14 @@
#define __ARCH_WANT_COMPAT_FADVISE64_64
#endif
+#if defined(__LP64__) && !defined(__SYSCALL_COMPAT)
+#define __ARCH_WANT_NEW_STAT
+#define __ARCH_WANT_SET_GET_RLIMIT
+#endif /* __LP64__ */
+
+#define __ARCH_WANT_MEMFD_SECRET
+
+
#include <uapi/asm/unistd.h>
#define NR_syscalls (__NR_syscalls)
diff --git a/arch/riscv/include/uapi/asm/Kbuild b/arch/riscv/include/uapi/asm/Kbuild
index f66554cd5c45..89ac01faa5ae 100644
--- a/arch/riscv/include/uapi/asm/Kbuild
+++ b/arch/riscv/include/uapi/asm/Kbuild
@@ -1 +1,3 @@
# SPDX-License-Identifier: GPL-2.0
+syscall-y += unistd_32.h
+syscall-y += unistd_64.h
diff --git a/arch/riscv/include/uapi/asm/unistd.h b/arch/riscv/include/uapi/asm/unistd.h
index 950ab3fd4409..81896bbbf727 100644
--- a/arch/riscv/include/uapi/asm/unistd.h
+++ b/arch/riscv/include/uapi/asm/unistd.h
@@ -14,41 +14,10 @@
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
+#include <asm/bitsperlong.h>
-#if defined(__LP64__) && !defined(__SYSCALL_COMPAT)
-#define __ARCH_WANT_NEW_STAT
-#define __ARCH_WANT_SET_GET_RLIMIT
-#endif /* __LP64__ */
-
-#define __ARCH_WANT_SYS_CLONE3
-#define __ARCH_WANT_MEMFD_SECRET
-
-#include <asm-generic/unistd.h>
-
-/*
- * Allows the instruction cache to be flushed from userspace. Despite RISC-V
- * having a direct 'fence.i' instruction available to userspace (which we
- * can't trap!), that's not actually viable when running on Linux because the
- * kernel might schedule a process on another hart. There is no way for
- * userspace to handle this without invoking the kernel (as it doesn't know the
- * thread->hart mappings), so we've defined a RISC-V specific system call to
- * flush the instruction cache.
- *
- * __NR_riscv_flush_icache is defined to flush the instruction cache over an
- * address range, with the flush applying to either all threads or just the
- * caller. We don't currently do anything with the address range, that's just
- * in there for forwards compatibility.
- */
-#ifndef __NR_riscv_flush_icache
-#define __NR_riscv_flush_icache (__NR_arch_specific_syscall + 15)
-#endif
-__SYSCALL(__NR_riscv_flush_icache, sys_riscv_flush_icache)
-
-/*
- * Allows userspace to query the kernel for CPU architecture and
- * microarchitecture details across a given set of CPUs.
- */
-#ifndef __NR_riscv_hwprobe
-#define __NR_riscv_hwprobe (__NR_arch_specific_syscall + 14)
+#if __BITS_PER_LONG == 64
+#include <asm/unistd_64.h>
+#else
+#include <asm/unistd_32.h>
#endif
-__SYSCALL(__NR_riscv_hwprobe, sys_riscv_hwprobe)
diff --git a/arch/riscv/kernel/Makefile.syscalls b/arch/riscv/kernel/Makefile.syscalls
new file mode 100644
index 000000000000..52087a023b3d
--- /dev/null
+++ b/arch/riscv/kernel/Makefile.syscalls
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+
+syscall_abis_32 += riscv memfd_secret
+syscall_abis_64 += riscv newstat rlimit memfd_secret
diff --git a/arch/riscv/kernel/compat_syscall_table.c b/arch/riscv/kernel/compat_syscall_table.c
index ad7f2d712f5f..e884c069e88f 100644
--- a/arch/riscv/kernel/compat_syscall_table.c
+++ b/arch/riscv/kernel/compat_syscall_table.c
@@ -8,9 +8,11 @@
#include <asm-generic/syscalls.h>
#include <asm/syscall.h>
+#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, compat)
+
#undef __SYSCALL
#define __SYSCALL(nr, call) asmlinkage long __riscv_##call(const struct pt_regs *);
-#include <asm/unistd.h>
+#include <asm/syscall_table_32.h>
#undef __SYSCALL
#define __SYSCALL(nr, call) [nr] = __riscv_##call,
@@ -19,5 +21,5 @@ asmlinkage long compat_sys_rt_sigreturn(void);
void * const compat_sys_call_table[__NR_syscalls] = {
[0 ... __NR_syscalls - 1] = __riscv_sys_ni_syscall,
-#include <asm/unistd.h>
+#include <asm/syscall_table_32.h>
};
diff --git a/arch/riscv/kernel/cpu_ops_sbi.c b/arch/riscv/kernel/cpu_ops_sbi.c
index 1cc7df740edd..e6fbaaf54956 100644
--- a/arch/riscv/kernel/cpu_ops_sbi.c
+++ b/arch/riscv/kernel/cpu_ops_sbi.c
@@ -72,7 +72,7 @@ static int sbi_cpu_start(unsigned int cpuid, struct task_struct *tidle)
/* Make sure tidle is updated */
smp_mb();
bdata->task_ptr = tidle;
- bdata->stack_ptr = task_stack_page(tidle) + THREAD_SIZE;
+ bdata->stack_ptr = task_pt_regs(tidle);
/* Make sure boot data is updated */
smp_mb();
hsm_data = __pa(bdata);
diff --git a/arch/riscv/kernel/cpu_ops_spinwait.c b/arch/riscv/kernel/cpu_ops_spinwait.c
index 613872b0a21a..24869eb88908 100644
--- a/arch/riscv/kernel/cpu_ops_spinwait.c
+++ b/arch/riscv/kernel/cpu_ops_spinwait.c
@@ -34,8 +34,7 @@ static void cpu_update_secondary_bootdata(unsigned int cpuid,
/* Make sure tidle is updated */
smp_mb();
- WRITE_ONCE(__cpu_spinwait_stack_pointer[hartid],
- task_stack_page(tidle) + THREAD_SIZE);
+ WRITE_ONCE(__cpu_spinwait_stack_pointer[hartid], task_pt_regs(tidle));
WRITE_ONCE(__cpu_spinwait_task_pointer[hartid], tidle);
}
diff --git a/arch/riscv/kernel/ftrace.c b/arch/riscv/kernel/ftrace.c
index 87cbd86576b2..4b95c574fd04 100644
--- a/arch/riscv/kernel/ftrace.c
+++ b/arch/riscv/kernel/ftrace.c
@@ -120,9 +120,6 @@ int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
out = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
mutex_unlock(&text_mutex);
- if (!mod)
- local_flush_icache_range(rec->ip, rec->ip + MCOUNT_INSN_SIZE);
-
return out;
}
@@ -156,9 +153,9 @@ static int __ftrace_modify_code(void *data)
} else {
while (atomic_read(&param->cpu_count) <= num_online_cpus())
cpu_relax();
- }
- local_flush_icache_all();
+ local_flush_icache_all();
+ }
return 0;
}
diff --git a/arch/riscv/kernel/machine_kexec.c b/arch/riscv/kernel/machine_kexec.c
index ed9cad20c039..3c830a6f7ef4 100644
--- a/arch/riscv/kernel/machine_kexec.c
+++ b/arch/riscv/kernel/machine_kexec.c
@@ -121,20 +121,12 @@ static void machine_kexec_mask_interrupts(void)
for_each_irq_desc(i, desc) {
struct irq_chip *chip;
- int ret;
chip = irq_desc_get_chip(desc);
if (!chip)
continue;
- /*
- * First try to remove the active state. If this
- * fails, try to EOI the interrupt.
- */
- ret = irq_set_irqchip_state(i, IRQCHIP_STATE_ACTIVE, false);
-
- if (ret && irqd_irq_inprogress(&desc->irq_data) &&
- chip->irq_eoi)
+ if (chip->irq_eoi && irqd_irq_inprogress(&desc->irq_data))
chip->irq_eoi(&desc->irq_data);
if (chip->irq_mask)
diff --git a/arch/riscv/kernel/patch.c b/arch/riscv/kernel/patch.c
index 4007563fb607..ab03732d06c4 100644
--- a/arch/riscv/kernel/patch.c
+++ b/arch/riscv/kernel/patch.c
@@ -89,6 +89,14 @@ static int __patch_insn_set(void *addr, u8 c, size_t len)
memset(waddr, c, len);
+ /*
+ * We could have just patched a function that is about to be
+ * called so make sure we don't execute partially patched
+ * instructions by flushing the icache as soon as possible.
+ */
+ local_flush_icache_range((unsigned long)waddr,
+ (unsigned long)waddr + len);
+
patch_unmap(FIX_TEXT_POKE0);
if (across_pages)
@@ -135,6 +143,14 @@ static int __patch_insn_write(void *addr, const void *insn, size_t len)
ret = copy_to_kernel_nofault(waddr, insn, len);
+ /*
+ * We could have just patched a function that is about to be
+ * called so make sure we don't execute partially patched
+ * instructions by flushing the icache as soon as possible.
+ */
+ local_flush_icache_range((unsigned long)waddr,
+ (unsigned long)waddr + len);
+
patch_unmap(FIX_TEXT_POKE0);
if (across_pages)
@@ -189,9 +205,6 @@ int patch_text_set_nosync(void *addr, u8 c, size_t len)
ret = patch_insn_set(tp, c, len);
- if (!ret)
- flush_icache_range((uintptr_t)tp, (uintptr_t)tp + len);
-
return ret;
}
NOKPROBE_SYMBOL(patch_text_set_nosync);
@@ -224,9 +237,6 @@ int patch_text_nosync(void *addr, const void *insns, size_t len)
ret = patch_insn_write(tp, insns, len);
- if (!ret)
- flush_icache_range((uintptr_t) tp, (uintptr_t) tp + len);
-
return ret;
}
NOKPROBE_SYMBOL(patch_text_nosync);
@@ -253,9 +263,9 @@ static int patch_text_cb(void *data)
} else {
while (atomic_read(&patch->cpu_count) <= num_online_cpus())
cpu_relax();
- }
- local_flush_icache_all();
+ local_flush_icache_all();
+ }
return ret;
}
diff --git a/arch/riscv/kernel/stacktrace.c b/arch/riscv/kernel/stacktrace.c
index 528ec7cc9a62..10e311b2759d 100644
--- a/arch/riscv/kernel/stacktrace.c
+++ b/arch/riscv/kernel/stacktrace.c
@@ -32,6 +32,7 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
bool (*fn)(void *, unsigned long), void *arg)
{
unsigned long fp, sp, pc;
+ int graph_idx = 0;
int level = 0;
if (regs) {
@@ -68,7 +69,7 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
pc = regs->ra;
} else {
fp = frame->fp;
- pc = ftrace_graph_ret_addr(current, NULL, frame->ra,
+ pc = ftrace_graph_ret_addr(current, &graph_idx, frame->ra,
&frame->ra);
if (pc == (unsigned long)ret_from_exception) {
if (unlikely(!__kernel_text_address(pc) || !fn(arg, pc)))
@@ -156,7 +157,7 @@ unsigned long __get_wchan(struct task_struct *task)
return pc;
}
-noinline void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
+noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
struct task_struct *task, struct pt_regs *regs)
{
walk_stackframe(task, regs, consume_entry, cookie);
diff --git a/arch/riscv/kernel/sys_riscv.c b/arch/riscv/kernel/sys_riscv.c
index 64155323cc92..d77afe05578f 100644
--- a/arch/riscv/kernel/sys_riscv.c
+++ b/arch/riscv/kernel/sys_riscv.c
@@ -23,7 +23,7 @@ static long riscv_sys_mmap(unsigned long addr, unsigned long len,
#ifdef CONFIG_64BIT
SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
unsigned long, prot, unsigned long, flags,
- unsigned long, fd, off_t, offset)
+ unsigned long, fd, unsigned long, offset)
{
return riscv_sys_mmap(addr, len, prot, flags, fd, offset, 0);
}
@@ -32,7 +32,7 @@ SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
#if defined(CONFIG_32BIT) || defined(CONFIG_COMPAT)
SYSCALL_DEFINE6(mmap2, unsigned long, addr, unsigned long, len,
unsigned long, prot, unsigned long, flags,
- unsigned long, fd, off_t, offset)
+ unsigned long, fd, unsigned long, offset)
{
/*
* Note that the shift for mmap2 is constant (12),
diff --git a/arch/riscv/kernel/syscall_table.c b/arch/riscv/kernel/syscall_table.c
index dda913764903..6f1a36cb0f3f 100644
--- a/arch/riscv/kernel/syscall_table.c
+++ b/arch/riscv/kernel/syscall_table.c
@@ -9,14 +9,16 @@
#include <asm-generic/syscalls.h>
#include <asm/syscall.h>
+#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, native)
+
#undef __SYSCALL
#define __SYSCALL(nr, call) asmlinkage long __riscv_##call(const struct pt_regs *);
-#include <asm/unistd.h>
+#include <asm/syscall_table.h>
#undef __SYSCALL
#define __SYSCALL(nr, call) [nr] = __riscv_##call,
void * const sys_call_table[__NR_syscalls] = {
[0 ... __NR_syscalls - 1] = __riscv_sys_ni_syscall,
-#include <asm/unistd.h>
+#include <asm/syscall_table.h>
};
diff --git a/arch/riscv/kvm/aia_device.c b/arch/riscv/kvm/aia_device.c
index 0eb689351b7d..5cd407c6a8e4 100644
--- a/arch/riscv/kvm/aia_device.c
+++ b/arch/riscv/kvm/aia_device.c
@@ -237,10 +237,11 @@ static gpa_t aia_imsic_ppn(struct kvm_aia *aia, gpa_t addr)
static u32 aia_imsic_hart_index(struct kvm_aia *aia, gpa_t addr)
{
- u32 hart, group = 0;
+ u32 hart = 0, group = 0;
- hart = (addr >> (aia->nr_guest_bits + IMSIC_MMIO_PAGE_SHIFT)) &
- GENMASK_ULL(aia->nr_hart_bits - 1, 0);
+ if (aia->nr_hart_bits)
+ hart = (addr >> (aia->nr_guest_bits + IMSIC_MMIO_PAGE_SHIFT)) &
+ GENMASK_ULL(aia->nr_hart_bits - 1, 0);
if (aia->nr_group_bits)
group = (addr >> aia->nr_group_shift) &
GENMASK_ULL(aia->nr_group_bits - 1, 0);
diff --git a/arch/riscv/kvm/vcpu_onereg.c b/arch/riscv/kvm/vcpu_onereg.c
index c676275ea0a0..62874fbca29f 100644
--- a/arch/riscv/kvm/vcpu_onereg.c
+++ b/arch/riscv/kvm/vcpu_onereg.c
@@ -724,9 +724,9 @@ static int kvm_riscv_vcpu_set_reg_isa_ext(struct kvm_vcpu *vcpu,
switch (reg_subtype) {
case KVM_REG_RISCV_ISA_SINGLE:
return riscv_vcpu_set_isa_ext_single(vcpu, reg_num, reg_val);
- case KVM_REG_RISCV_SBI_MULTI_EN:
+ case KVM_REG_RISCV_ISA_MULTI_EN:
return riscv_vcpu_set_isa_ext_multi(vcpu, reg_num, reg_val, true);
- case KVM_REG_RISCV_SBI_MULTI_DIS:
+ case KVM_REG_RISCV_ISA_MULTI_DIS:
return riscv_vcpu_set_isa_ext_multi(vcpu, reg_num, reg_val, false);
default:
return -ENOENT;
diff --git a/arch/riscv/kvm/vcpu_pmu.c b/arch/riscv/kvm/vcpu_pmu.c
index 04db1f993c47..bcf41d6e0df0 100644
--- a/arch/riscv/kvm/vcpu_pmu.c
+++ b/arch/riscv/kvm/vcpu_pmu.c
@@ -327,7 +327,7 @@ static long kvm_pmu_create_perf_event(struct kvm_pmc *pmc, struct perf_event_att
event = perf_event_create_kernel_counter(attr, -1, current, kvm_riscv_pmu_overflow, pmc);
if (IS_ERR(event)) {
- pr_err("kvm pmu event creation failed for eidx %lx: %ld\n", eidx, PTR_ERR(event));
+ pr_debug("kvm pmu event creation failed for eidx %lx: %ld\n", eidx, PTR_ERR(event));
return PTR_ERR(event);
}
diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c
index b3fcf7d67efb..5224f3733802 100644
--- a/arch/riscv/mm/fault.c
+++ b/arch/riscv/mm/fault.c
@@ -293,8 +293,8 @@ void handle_page_fault(struct pt_regs *regs)
if (unlikely(access_error(cause, vma))) {
vma_end_read(vma);
count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
- tsk->thread.bad_cause = SEGV_ACCERR;
- bad_area_nosemaphore(regs, code, addr);
+ tsk->thread.bad_cause = cause;
+ bad_area_nosemaphore(regs, SEGV_ACCERR, addr);
return;
}
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index e3218d65f21d..e3405e4b99af 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -250,18 +250,19 @@ static void __init setup_bootmem(void)
kernel_map.va_pa_offset = PAGE_OFFSET - phys_ram_base;
/*
- * memblock allocator is not aware of the fact that last 4K bytes of
- * the addressable memory can not be mapped because of IS_ERR_VALUE
- * macro. Make sure that last 4k bytes are not usable by memblock
- * if end of dram is equal to maximum addressable memory. For 64-bit
- * kernel, this problem can't happen here as the end of the virtual
- * address space is occupied by the kernel mapping then this check must
- * be done as soon as the kernel mapping base address is determined.
+ * Reserve physical address space that would be mapped to virtual
+ * addresses greater than (void *)(-PAGE_SIZE) because:
+ * - This memory would overlap with ERR_PTR
+ * - This memory belongs to high memory, which is not supported
+ *
+ * This is not applicable to 64-bit kernel, because virtual addresses
+ * after (void *)(-PAGE_SIZE) are not linearly mapped: they are
+ * occupied by kernel mapping. Also it is unrealistic for high memory
+ * to exist on 64-bit platforms.
*/
if (!IS_ENABLED(CONFIG_64BIT)) {
- max_mapped_addr = __pa(~(ulong)0);
- if (max_mapped_addr == (phys_ram_end - 1))
- memblock_set_current_limit(max_mapped_addr - 4096);
+ max_mapped_addr = __va_to_pa_nodebug(-PAGE_SIZE);
+ memblock_reserve(max_mapped_addr, (phys_addr_t)-max_mapped_addr);
}
min_low_pfn = PFN_UP(phys_ram_base);
diff --git a/arch/riscv/net/bpf_jit.h b/arch/riscv/net/bpf_jit.h
index fdbf88ca8b70..1d1c78d4cff1 100644
--- a/arch/riscv/net/bpf_jit.h
+++ b/arch/riscv/net/bpf_jit.h
@@ -18,6 +18,11 @@ static inline bool rvc_enabled(void)
return IS_ENABLED(CONFIG_RISCV_ISA_C);
}
+static inline bool rvzba_enabled(void)
+{
+ return IS_ENABLED(CONFIG_RISCV_ISA_ZBA) && riscv_has_extension_likely(RISCV_ISA_EXT_ZBA);
+}
+
static inline bool rvzbb_enabled(void)
{
return IS_ENABLED(CONFIG_RISCV_ISA_ZBB) && riscv_has_extension_likely(RISCV_ISA_EXT_ZBB);
@@ -737,6 +742,17 @@ static inline u16 rvc_swsp(u32 imm8, u8 rs2)
return rv_css_insn(0x6, imm, rs2, 0x2);
}
+/* RVZBA instructions. */
+static inline u32 rvzba_sh2add(u8 rd, u8 rs1, u8 rs2)
+{
+ return rv_r_insn(0x10, rs2, rs1, 0x4, rd, 0x33);
+}
+
+static inline u32 rvzba_sh3add(u8 rd, u8 rs1, u8 rs2)
+{
+ return rv_r_insn(0x10, rs2, rs1, 0x6, rd, 0x33);
+}
+
/* RVZBB instructions. */
static inline u32 rvzbb_sextb(u8 rd, u8 rs1)
{
@@ -939,6 +955,14 @@ static inline u16 rvc_sdsp(u32 imm9, u8 rs2)
return rv_css_insn(0x7, imm, rs2, 0x2);
}
+/* RV64-only ZBA instructions. */
+
+static inline u32 rvzba_zextw(u8 rd, u8 rs1)
+{
+ /* add.uw rd, rs1, ZERO */
+ return rv_r_insn(0x04, RV_REG_ZERO, rs1, 0, rd, 0x3b);
+}
+
#endif /* __riscv_xlen == 64 */
/* Helper functions that emit RVC instructions when possible. */
@@ -1082,6 +1106,28 @@ static inline void emit_sw(u8 rs1, s32 off, u8 rs2, struct rv_jit_context *ctx)
emit(rv_sw(rs1, off, rs2), ctx);
}
+static inline void emit_sh2add(u8 rd, u8 rs1, u8 rs2, struct rv_jit_context *ctx)
+{
+ if (rvzba_enabled()) {
+ emit(rvzba_sh2add(rd, rs1, rs2), ctx);
+ return;
+ }
+
+ emit_slli(rd, rs1, 2, ctx);
+ emit_add(rd, rd, rs2, ctx);
+}
+
+static inline void emit_sh3add(u8 rd, u8 rs1, u8 rs2, struct rv_jit_context *ctx)
+{
+ if (rvzba_enabled()) {
+ emit(rvzba_sh3add(rd, rs1, rs2), ctx);
+ return;
+ }
+
+ emit_slli(rd, rs1, 3, ctx);
+ emit_add(rd, rd, rs2, ctx);
+}
+
/* RV64-only helper functions. */
#if __riscv_xlen == 64
@@ -1161,6 +1207,11 @@ static inline void emit_zexth(u8 rd, u8 rs, struct rv_jit_context *ctx)
static inline void emit_zextw(u8 rd, u8 rs, struct rv_jit_context *ctx)
{
+ if (rvzba_enabled()) {
+ emit(rvzba_zextw(rd, rs), ctx);
+ return;
+ }
+
emit_slli(rd, rs, 32, ctx);
emit_srli(rd, rd, 32, ctx);
}
diff --git a/arch/riscv/net/bpf_jit_comp32.c b/arch/riscv/net/bpf_jit_comp32.c
index f5ba73bb153d..592dd86fbf81 100644
--- a/arch/riscv/net/bpf_jit_comp32.c
+++ b/arch/riscv/net/bpf_jit_comp32.c
@@ -811,8 +811,7 @@ static int emit_bpf_tail_call(int insn, struct rv_jit_context *ctx)
* if (!prog)
* goto out;
*/
- emit(rv_slli(RV_REG_T0, lo(idx_reg), 2), ctx);
- emit(rv_add(RV_REG_T0, RV_REG_T0, lo(arr_reg)), ctx);
+ emit_sh2add(RV_REG_T0, lo(idx_reg), lo(arr_reg), ctx);
off = offsetof(struct bpf_array, ptrs);
if (is_12b_check(off, insn))
return -1;
diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c
index 79a001d5533e..0795efdd3519 100644
--- a/arch/riscv/net/bpf_jit_comp64.c
+++ b/arch/riscv/net/bpf_jit_comp64.c
@@ -15,7 +15,10 @@
#include <asm/percpu.h>
#include "bpf_jit.h"
+#define RV_MAX_REG_ARGS 8
#define RV_FENTRY_NINSNS 2
+/* imm that allows emit_imm to emit max count insns */
+#define RV_MAX_COUNT_IMM 0x7FFF7FF7FF7FF7FF
#define RV_REG_TCC RV_REG_A6
#define RV_REG_TCC_SAVED RV_REG_S6 /* Store A6 in S6 if program do calls */
@@ -380,8 +383,7 @@ static int emit_bpf_tail_call(int insn, struct rv_jit_context *ctx)
* if (!prog)
* goto out;
*/
- emit_slli(RV_REG_T2, RV_REG_A2, 3, ctx);
- emit_add(RV_REG_T2, RV_REG_T2, RV_REG_A1, ctx);
+ emit_sh3add(RV_REG_T2, RV_REG_A2, RV_REG_A1, ctx);
off = offsetof(struct bpf_array, ptrs);
if (is_12b_check(off, insn))
return -1;
@@ -537,8 +539,10 @@ static void emit_atomic(u8 rd, u8 rs, s16 off, s32 imm, bool is64,
/* r0 = atomic_cmpxchg(dst_reg + off16, r0, src_reg); */
case BPF_CMPXCHG:
r0 = bpf_to_rv_reg(BPF_REG_0, ctx);
- emit(is64 ? rv_addi(RV_REG_T2, r0, 0) :
- rv_addiw(RV_REG_T2, r0, 0), ctx);
+ if (is64)
+ emit_mv(RV_REG_T2, r0, ctx);
+ else
+ emit_addiw(RV_REG_T2, r0, 0, ctx);
emit(is64 ? rv_lr_d(r0, 0, rd, 0, 0) :
rv_lr_w(r0, 0, rd, 0, 0), ctx);
jmp_offset = ninsns_rvoff(8);
@@ -689,26 +693,45 @@ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type poke_type,
return ret;
}
-static void store_args(int nregs, int args_off, struct rv_jit_context *ctx)
+static void store_args(int nr_arg_slots, int args_off, struct rv_jit_context *ctx)
{
int i;
- for (i = 0; i < nregs; i++) {
- emit_sd(RV_REG_FP, -args_off, RV_REG_A0 + i, ctx);
+ for (i = 0; i < nr_arg_slots; i++) {
+ if (i < RV_MAX_REG_ARGS) {
+ emit_sd(RV_REG_FP, -args_off, RV_REG_A0 + i, ctx);
+ } else {
+ /* skip slots for T0 and FP of traced function */
+ emit_ld(RV_REG_T1, 16 + (i - RV_MAX_REG_ARGS) * 8, RV_REG_FP, ctx);
+ emit_sd(RV_REG_FP, -args_off, RV_REG_T1, ctx);
+ }
args_off -= 8;
}
}
-static void restore_args(int nregs, int args_off, struct rv_jit_context *ctx)
+static void restore_args(int nr_reg_args, int args_off, struct rv_jit_context *ctx)
{
int i;
- for (i = 0; i < nregs; i++) {
+ for (i = 0; i < nr_reg_args; i++) {
emit_ld(RV_REG_A0 + i, -args_off, RV_REG_FP, ctx);
args_off -= 8;
}
}
+static void restore_stack_args(int nr_stack_args, int args_off, int stk_arg_off,
+ struct rv_jit_context *ctx)
+{
+ int i;
+
+ for (i = 0; i < nr_stack_args; i++) {
+ emit_ld(RV_REG_T1, -(args_off - RV_MAX_REG_ARGS * 8), RV_REG_FP, ctx);
+ emit_sd(RV_REG_FP, -stk_arg_off, RV_REG_T1, ctx);
+ args_off -= 8;
+ stk_arg_off -= 8;
+ }
+}
+
static int invoke_bpf_prog(struct bpf_tramp_link *l, int args_off, int retval_off,
int run_ctx_off, bool save_ret, struct rv_jit_context *ctx)
{
@@ -781,8 +804,8 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
{
int i, ret, offset;
int *branches_off = NULL;
- int stack_size = 0, nregs = m->nr_args;
- int retval_off, args_off, nregs_off, ip_off, run_ctx_off, sreg_off;
+ int stack_size = 0, nr_arg_slots = 0;
+ int retval_off, args_off, nregs_off, ip_off, run_ctx_off, sreg_off, stk_arg_off;
struct bpf_tramp_links *fentry = &tlinks[BPF_TRAMP_FENTRY];
struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT];
struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN];
@@ -828,20 +851,21 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
* FP - sreg_off [ callee saved reg ]
*
* [ pads ] pads for 16 bytes alignment
+ *
+ * [ stack_argN ]
+ * [ ... ]
+ * FP - stk_arg_off [ stack_arg1 ] BPF_TRAMP_F_CALL_ORIG
*/
if (flags & (BPF_TRAMP_F_ORIG_STACK | BPF_TRAMP_F_SHARE_IPMODIFY))
return -ENOTSUPP;
- /* extra regiters for struct arguments */
- for (i = 0; i < m->nr_args; i++)
- if (m->arg_flags[i] & BTF_FMODEL_STRUCT_ARG)
- nregs += round_up(m->arg_size[i], 8) / 8 - 1;
-
- /* 8 arguments passed by registers */
- if (nregs > 8)
+ if (m->nr_args > MAX_BPF_FUNC_ARGS)
return -ENOTSUPP;
+ for (i = 0; i < m->nr_args; i++)
+ nr_arg_slots += round_up(m->arg_size[i], 8) / 8;
+
/* room of trampoline frame to store return address and frame pointer */
stack_size += 16;
@@ -851,7 +875,7 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
retval_off = stack_size;
}
- stack_size += nregs * 8;
+ stack_size += nr_arg_slots * 8;
args_off = stack_size;
stack_size += 8;
@@ -868,7 +892,13 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
stack_size += 8;
sreg_off = stack_size;
- stack_size = round_up(stack_size, 16);
+ if ((flags & BPF_TRAMP_F_CALL_ORIG) && (nr_arg_slots - RV_MAX_REG_ARGS > 0))
+ stack_size += (nr_arg_slots - RV_MAX_REG_ARGS) * 8;
+
+ stack_size = round_up(stack_size, STACK_ALIGN);
+
+ /* room for args on stack must be at the top of stack */
+ stk_arg_off = stack_size;
if (!is_struct_ops) {
/* For the trampoline called from function entry,
@@ -905,17 +935,17 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
emit_sd(RV_REG_FP, -ip_off, RV_REG_T1, ctx);
}
- emit_li(RV_REG_T1, nregs, ctx);
+ emit_li(RV_REG_T1, nr_arg_slots, ctx);
emit_sd(RV_REG_FP, -nregs_off, RV_REG_T1, ctx);
- store_args(nregs, args_off, ctx);
+ store_args(nr_arg_slots, args_off, ctx);
/* skip to actual body of traced function */
if (flags & BPF_TRAMP_F_SKIP_FRAME)
orig_call += RV_FENTRY_NINSNS * 4;
if (flags & BPF_TRAMP_F_CALL_ORIG) {
- emit_imm(RV_REG_A0, (const s64)im, ctx);
+ emit_imm(RV_REG_A0, ctx->insns ? (const s64)im : RV_MAX_COUNT_IMM, ctx);
ret = emit_call((const u64)__bpf_tramp_enter, true, ctx);
if (ret)
return ret;
@@ -948,13 +978,14 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
}
if (flags & BPF_TRAMP_F_CALL_ORIG) {
- restore_args(nregs, args_off, ctx);
+ restore_args(min_t(int, nr_arg_slots, RV_MAX_REG_ARGS), args_off, ctx);
+ restore_stack_args(nr_arg_slots - RV_MAX_REG_ARGS, args_off, stk_arg_off, ctx);
ret = emit_call((const u64)orig_call, true, ctx);
if (ret)
goto out;
emit_sd(RV_REG_FP, -retval_off, RV_REG_A0, ctx);
emit_sd(RV_REG_FP, -(retval_off - 8), regmap[BPF_REG_0], ctx);
- im->ip_after_call = ctx->insns + ctx->ninsns;
+ im->ip_after_call = ctx->ro_insns + ctx->ninsns;
/* 2 nops reserved for auipc+jalr pair */
emit(rv_nop(), ctx);
emit(rv_nop(), ctx);
@@ -975,15 +1006,15 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
}
if (flags & BPF_TRAMP_F_CALL_ORIG) {
- im->ip_epilogue = ctx->insns + ctx->ninsns;
- emit_imm(RV_REG_A0, (const s64)im, ctx);
+ im->ip_epilogue = ctx->ro_insns + ctx->ninsns;
+ emit_imm(RV_REG_A0, ctx->insns ? (const s64)im : RV_MAX_COUNT_IMM, ctx);
ret = emit_call((const u64)__bpf_tramp_exit, true, ctx);
if (ret)
goto out;
}
if (flags & BPF_TRAMP_F_RESTORE_REGS)
- restore_args(nregs, args_off, ctx);
+ restore_args(min_t(int, nr_arg_slots, RV_MAX_REG_ARGS), args_off, ctx);
if (save_ret) {
emit_ld(RV_REG_A0, -retval_off, RV_REG_FP, ctx);
@@ -1038,31 +1069,52 @@ int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags,
return ret < 0 ? ret : ninsns_rvoff(ctx.ninsns);
}
-int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image,
- void *image_end, const struct btf_func_model *m,
+void *arch_alloc_bpf_trampoline(unsigned int size)
+{
+ return bpf_prog_pack_alloc(size, bpf_fill_ill_insns);
+}
+
+void arch_free_bpf_trampoline(void *image, unsigned int size)
+{
+ bpf_prog_pack_free(image, size);
+}
+
+int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *ro_image,
+ void *ro_image_end, const struct btf_func_model *m,
u32 flags, struct bpf_tramp_links *tlinks,
void *func_addr)
{
int ret;
+ void *image, *res;
struct rv_jit_context ctx;
+ u32 size = ro_image_end - ro_image;
+
+ image = kvmalloc(size, GFP_KERNEL);
+ if (!image)
+ return -ENOMEM;
ctx.ninsns = 0;
- /*
- * The bpf_int_jit_compile() uses a RW buffer (ctx.insns) to write the
- * JITed instructions and later copies it to a RX region (ctx.ro_insns).
- * It also uses ctx.ro_insns to calculate offsets for jumps etc. As the
- * trampoline image uses the same memory area for writing and execution,
- * both ctx.insns and ctx.ro_insns can be set to image.
- */
ctx.insns = image;
- ctx.ro_insns = image;
+ ctx.ro_insns = ro_image;
ret = __arch_prepare_bpf_trampoline(im, m, tlinks, func_addr, flags, &ctx);
if (ret < 0)
- return ret;
+ goto out;
- bpf_flush_icache(ctx.insns, ctx.insns + ctx.ninsns);
+ if (WARN_ON(size < ninsns_rvoff(ctx.ninsns))) {
+ ret = -E2BIG;
+ goto out;
+ }
+
+ res = bpf_arch_text_copy(ro_image, image, size);
+ if (IS_ERR(res)) {
+ ret = PTR_ERR(res);
+ goto out;
+ }
- return ninsns_rvoff(ret);
+ bpf_flush_icache(ro_image, ro_image_end);
+out:
+ kvfree(image);
+ return ret < 0 ? ret : size;
}
int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
@@ -1097,12 +1149,10 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
/* Load current CPU number in T1 */
emit_ld(RV_REG_T1, offsetof(struct thread_info, cpu),
RV_REG_TP, ctx);
- /* << 3 because offsets are 8 bytes */
- emit_slli(RV_REG_T1, RV_REG_T1, 3, ctx);
/* Load address of __per_cpu_offset array in T2 */
emit_addr(RV_REG_T2, (u64)&__per_cpu_offset, extra_pass, ctx);
- /* Add offset of current CPU to __per_cpu_offset */
- emit_add(RV_REG_T1, RV_REG_T2, RV_REG_T1, ctx);
+ /* Get address of __per_cpu_offset[cpu] in T1 */
+ emit_sh3add(RV_REG_T1, RV_REG_T1, RV_REG_T2, ctx);
/* Load __per_cpu_offset[cpu] in T1 */
emit_ld(RV_REG_T1, 0, RV_REG_T1, ctx);
/* Add the offset to Rd */
@@ -1960,7 +2010,7 @@ void bpf_jit_build_prologue(struct rv_jit_context *ctx, bool is_subprog)
{
int i, stack_adjust = 0, store_offset, bpf_stack_adjust;
- bpf_stack_adjust = round_up(ctx->prog->aux->stack_depth, 16);
+ bpf_stack_adjust = round_up(ctx->prog->aux->stack_depth, STACK_ALIGN);
if (bpf_stack_adjust)
mark_fp(ctx);
@@ -1982,7 +2032,7 @@ void bpf_jit_build_prologue(struct rv_jit_context *ctx, bool is_subprog)
if (ctx->arena_vm_start)
stack_adjust += 8;
- stack_adjust = round_up(stack_adjust, 16);
+ stack_adjust = round_up(stack_adjust, STACK_ALIGN);
stack_adjust += bpf_stack_adjust;
store_offset = stack_adjust - 8;
diff --git a/arch/riscv/net/bpf_jit_core.c b/arch/riscv/net/bpf_jit_core.c
index 0a96abdaca65..6de753c667f4 100644
--- a/arch/riscv/net/bpf_jit_core.c
+++ b/arch/riscv/net/bpf_jit_core.c
@@ -178,8 +178,7 @@ skip_init_ctx:
prog->jited_len = prog_size - cfi_get_offset();
if (!prog->is_func || extra_pass) {
- if (WARN_ON(bpf_jit_binary_pack_finalize(prog, jit_data->ro_header,
- jit_data->header))) {
+ if (WARN_ON(bpf_jit_binary_pack_finalize(jit_data->ro_header, jit_data->header))) {
/* ro_header has been freed */
jit_data->ro_header = NULL;
prog = orig_prog;
@@ -258,7 +257,7 @@ void bpf_jit_free(struct bpf_prog *prog)
* before freeing it.
*/
if (jit_data) {
- bpf_jit_binary_pack_finalize(prog, jit_data->ro_header, jit_data->header);
+ bpf_jit_binary_pack_finalize(jit_data->ro_header, jit_data->header);
kfree(jit_data);
}
hdr = bpf_jit_binary_pack_hdr(prog);
diff --git a/arch/s390/boot/startup.c b/arch/s390/boot/startup.c
index 182aac6a0f77..5a36d5538dae 100644
--- a/arch/s390/boot/startup.c
+++ b/arch/s390/boot/startup.c
@@ -170,11 +170,14 @@ static void kaslr_adjust_got(unsigned long offset)
u64 *entry;
/*
- * Even without -fPIE, Clang still uses a global offset table for some
- * reason. Adjust the GOT entries.
+ * Adjust GOT entries, except for ones for undefined weak symbols
+ * that resolved to zero. This also skips the first three reserved
+ * entries on s390x that are zero.
*/
- for (entry = (u64 *)vmlinux.got_start; entry < (u64 *)vmlinux.got_end; entry++)
- *entry += offset - __START_KERNEL;
+ for (entry = (u64 *)vmlinux.got_start; entry < (u64 *)vmlinux.got_end; entry++) {
+ if (*entry)
+ *entry += offset - __START_KERNEL;
+ }
}
/*
@@ -384,7 +387,7 @@ static void fixup_vmlinux_info(void)
void startup_kernel(void)
{
unsigned long kernel_size = vmlinux.image_size + vmlinux.bss_size;
- unsigned long nokaslr_offset_phys = mem_safe_offset();
+ unsigned long nokaslr_offset_phys, kaslr_large_page_offset;
unsigned long amode31_lma = 0;
unsigned long max_physmem_end;
unsigned long asce_limit;
@@ -393,6 +396,12 @@ void startup_kernel(void)
fixup_vmlinux_info();
setup_lpp();
+
+ /*
+ * Non-randomized kernel physical start address must be _SEGMENT_SIZE
+ * aligned (see blow).
+ */
+ nokaslr_offset_phys = ALIGN(mem_safe_offset(), _SEGMENT_SIZE);
safe_addr = PAGE_ALIGN(nokaslr_offset_phys + kernel_size);
/*
@@ -425,10 +434,25 @@ void startup_kernel(void)
save_ipl_cert_comp_list();
rescue_initrd(safe_addr, ident_map_size);
- if (kaslr_enabled())
- __kaslr_offset_phys = randomize_within_range(kernel_size, THREAD_SIZE, 0, ident_map_size);
+ /*
+ * __kaslr_offset_phys must be _SEGMENT_SIZE aligned, so the lower
+ * 20 bits (the offset within a large page) are zero. Copy the last
+ * 20 bits of __kaslr_offset, which is THREAD_SIZE aligned, to
+ * __kaslr_offset_phys.
+ *
+ * With this the last 20 bits of __kaslr_offset_phys and __kaslr_offset
+ * are identical, which is required to allow for large mappings of the
+ * kernel image.
+ */
+ kaslr_large_page_offset = __kaslr_offset & ~_SEGMENT_MASK;
+ if (kaslr_enabled()) {
+ unsigned long end = ident_map_size - kaslr_large_page_offset;
+
+ __kaslr_offset_phys = randomize_within_range(kernel_size, _SEGMENT_SIZE, 0, end);
+ }
if (!__kaslr_offset_phys)
__kaslr_offset_phys = nokaslr_offset_phys;
+ __kaslr_offset_phys |= kaslr_large_page_offset;
kaslr_adjust_vmlinux_info(__kaslr_offset_phys);
physmem_reserve(RR_VMLINUX, __kaslr_offset_phys, kernel_size);
deploy_kernel((void *)__kaslr_offset_phys);
diff --git a/arch/s390/boot/vmem.c b/arch/s390/boot/vmem.c
index 96d48b7112d4..40cfce2687c4 100644
--- a/arch/s390/boot/vmem.c
+++ b/arch/s390/boot/vmem.c
@@ -261,21 +261,27 @@ static unsigned long _pa(unsigned long addr, unsigned long size, enum populate_m
static bool large_allowed(enum populate_mode mode)
{
- return (mode == POPULATE_DIRECT) || (mode == POPULATE_IDENTITY);
+ return (mode == POPULATE_DIRECT) || (mode == POPULATE_IDENTITY) || (mode == POPULATE_KERNEL);
}
static bool can_large_pud(pud_t *pu_dir, unsigned long addr, unsigned long end,
enum populate_mode mode)
{
+ unsigned long size = end - addr;
+
return machine.has_edat2 && large_allowed(mode) &&
- IS_ALIGNED(addr, PUD_SIZE) && (end - addr) >= PUD_SIZE;
+ IS_ALIGNED(addr, PUD_SIZE) && (size >= PUD_SIZE) &&
+ IS_ALIGNED(_pa(addr, size, mode), PUD_SIZE);
}
static bool can_large_pmd(pmd_t *pm_dir, unsigned long addr, unsigned long end,
enum populate_mode mode)
{
+ unsigned long size = end - addr;
+
return machine.has_edat1 && large_allowed(mode) &&
- IS_ALIGNED(addr, PMD_SIZE) && (end - addr) >= PMD_SIZE;
+ IS_ALIGNED(addr, PMD_SIZE) && (size >= PMD_SIZE) &&
+ IS_ALIGNED(_pa(addr, size, mode), PMD_SIZE);
}
static void pgtable_pte_populate(pmd_t *pmd, unsigned long addr, unsigned long end,
diff --git a/arch/s390/boot/vmlinux.lds.S b/arch/s390/boot/vmlinux.lds.S
index 1fe5a1d3ff60..a750711d44c8 100644
--- a/arch/s390/boot/vmlinux.lds.S
+++ b/arch/s390/boot/vmlinux.lds.S
@@ -109,6 +109,7 @@ SECTIONS
#ifdef CONFIG_KERNEL_UNCOMPRESSED
. = ALIGN(PAGE_SIZE);
. += AMODE31_SIZE; /* .amode31 section */
+ . = ALIGN(1 << 20); /* _SEGMENT_SIZE */
#else
. = ALIGN(8);
#endif
diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig
index 145342e46ea8..f3602414a961 100644
--- a/arch/s390/configs/debug_defconfig
+++ b/arch/s390/configs/debug_defconfig
@@ -43,7 +43,6 @@ CONFIG_PROFILING=y
CONFIG_KEXEC=y
CONFIG_KEXEC_FILE=y
CONFIG_KEXEC_SIG=y
-CONFIG_CRASH_DUMP=y
CONFIG_LIVEPATCH=y
CONFIG_MARCH_Z13=y
CONFIG_NR_CPUS=512
@@ -51,6 +50,7 @@ CONFIG_NUMA=y
CONFIG_HZ_100=y
CONFIG_CERT_STORE=y
CONFIG_EXPOLINE=y
+# CONFIG_EXPOLINE_EXTERN is not set
CONFIG_EXPOLINE_AUTO=y
CONFIG_CHSC_SCH=y
CONFIG_VFIO_CCW=m
@@ -76,6 +76,7 @@ CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODULE_UNLOAD_TAINT_TRACKING=y
CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_MODULE_SIG_SHA256=y
CONFIG_BLK_DEV_THROTTLING=y
CONFIG_BLK_WBT=y
CONFIG_BLK_CGROUP_IOLATENCY=y
@@ -100,7 +101,6 @@ CONFIG_MEMORY_HOTPLUG=y
CONFIG_MEMORY_HOTREMOVE=y
CONFIG_KSM=y
CONFIG_TRANSPARENT_HUGEPAGE=y
-CONFIG_CMA_DEBUG=y
CONFIG_CMA_DEBUGFS=y
CONFIG_CMA_SYSFS=y
CONFIG_CMA_AREAS=7
@@ -119,6 +119,7 @@ CONFIG_UNIX_DIAG=m
CONFIG_XFRM_USER=m
CONFIG_NET_KEY=m
CONFIG_SMC_DIAG=m
+CONFIG_SMC_LO=y
CONFIG_INET=y
CONFIG_IP_MULTICAST=y
CONFIG_IP_ADVANCED_ROUTER=y
@@ -133,7 +134,6 @@ CONFIG_IP_MROUTE=y
CONFIG_IP_MROUTE_MULTIPLE_TABLES=y
CONFIG_IP_PIMSM_V1=y
CONFIG_IP_PIMSM_V2=y
-CONFIG_SYN_COOKIES=y
CONFIG_NET_IPVTI=m
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
@@ -167,6 +167,7 @@ CONFIG_BRIDGE_NETFILTER=m
CONFIG_NETFILTER_NETLINK_HOOK=m
CONFIG_NF_CONNTRACK=m
CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_ZONES=y
CONFIG_NF_CONNTRACK_PROCFS=y
CONFIG_NF_CONNTRACK_EVENTS=y
CONFIG_NF_CONNTRACK_TIMEOUT=y
@@ -183,17 +184,39 @@ CONFIG_NF_CONNTRACK_SIP=m
CONFIG_NF_CONNTRACK_TFTP=m
CONFIG_NF_CT_NETLINK=m
CONFIG_NF_CT_NETLINK_TIMEOUT=m
+CONFIG_NF_CT_NETLINK_HELPER=m
+CONFIG_NETFILTER_NETLINK_GLUE_CT=y
CONFIG_NF_TABLES=m
CONFIG_NF_TABLES_INET=y
+CONFIG_NF_TABLES_NETDEV=y
+CONFIG_NFT_NUMGEN=m
CONFIG_NFT_CT=m
+CONFIG_NFT_FLOW_OFFLOAD=m
+CONFIG_NFT_CONNLIMIT=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
+CONFIG_NFT_MASQ=m
+CONFIG_NFT_REDIR=m
CONFIG_NFT_NAT=m
+CONFIG_NFT_TUNNEL=m
+CONFIG_NFT_QUEUE=m
+CONFIG_NFT_QUOTA=m
CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
CONFIG_NFT_HASH=m
CONFIG_NFT_FIB_INET=m
-CONFIG_NETFILTER_XTABLES_COMPAT=y
+CONFIG_NFT_XFRM=m
+CONFIG_NFT_SOCKET=m
+CONFIG_NFT_OSF=m
+CONFIG_NFT_TPROXY=m
+CONFIG_NFT_SYNPROXY=m
+CONFIG_NFT_DUP_NETDEV=m
+CONFIG_NFT_FWD_NETDEV=m
+CONFIG_NFT_FIB_NETDEV=m
+CONFIG_NFT_REJECT_NETDEV=m
+CONFIG_NF_FLOW_TABLE_INET=m
+CONFIG_NF_FLOW_TABLE=m
+CONFIG_NF_FLOW_TABLE_PROCFS=y
CONFIG_NETFILTER_XT_SET=m
CONFIG_NETFILTER_XT_TARGET_AUDIT=m
CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
@@ -206,8 +229,10 @@ CONFIG_NETFILTER_XT_TARGET_HMARK=m
CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
CONFIG_NETFILTER_XT_TARGET_LOG=m
CONFIG_NETFILTER_XT_TARGET_MARK=m
+CONFIG_NETFILTER_XT_TARGET_NETMAP=m
CONFIG_NETFILTER_XT_TARGET_NFLOG=m
CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_TARGET_REDIRECT=m
CONFIG_NETFILTER_XT_TARGET_TEE=m
CONFIG_NETFILTER_XT_TARGET_TPROXY=m
CONFIG_NETFILTER_XT_TARGET_TRACE=m
@@ -216,6 +241,7 @@ CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
CONFIG_NETFILTER_XT_MATCH_BPF=m
+CONFIG_NETFILTER_XT_MATCH_CGROUP=m
CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
CONFIG_NETFILTER_XT_MATCH_COMMENT=m
CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
@@ -230,6 +256,7 @@ CONFIG_NETFILTER_XT_MATCH_DSCP=m
CONFIG_NETFILTER_XT_MATCH_ESP=m
CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
CONFIG_NETFILTER_XT_MATCH_HELPER=m
+CONFIG_NETFILTER_XT_MATCH_IPCOMP=m
CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
CONFIG_NETFILTER_XT_MATCH_IPVS=m
CONFIG_NETFILTER_XT_MATCH_LENGTH=m
@@ -247,6 +274,7 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m
CONFIG_NETFILTER_XT_MATCH_RATEEST=m
CONFIG_NETFILTER_XT_MATCH_REALM=m
CONFIG_NETFILTER_XT_MATCH_RECENT=m
+CONFIG_NETFILTER_XT_MATCH_SOCKET=m
CONFIG_NETFILTER_XT_MATCH_STATE=m
CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
CONFIG_NETFILTER_XT_MATCH_STRING=m
@@ -302,7 +330,6 @@ CONFIG_IP_NF_TARGET_ECN=m
CONFIG_IP_NF_TARGET_TTL=m
CONFIG_IP_NF_RAW=m
CONFIG_IP_NF_SECURITY=m
-CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NFT_FIB_IPV6=m
@@ -373,7 +400,6 @@ CONFIG_NET_ACT_POLICE=m
CONFIG_NET_ACT_GACT=m
CONFIG_GACT_PROB=y
CONFIG_NET_ACT_MIRRED=m
-CONFIG_NET_ACT_IPT=m
CONFIG_NET_ACT_NAT=m
CONFIG_NET_ACT_PEDIT=m
CONFIG_NET_ACT_SIMP=m
@@ -462,6 +488,7 @@ CONFIG_DM_VERITY=m
CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG=y
CONFIG_DM_SWITCH=m
CONFIG_DM_INTEGRITY=m
+CONFIG_DM_VDO=m
CONFIG_NETDEVICES=y
CONFIG_BONDING=m
CONFIG_DUMMY=m
@@ -574,18 +601,16 @@ CONFIG_WATCHDOG=y
CONFIG_WATCHDOG_NOWAYOUT=y
CONFIG_SOFT_WATCHDOG=m
CONFIG_DIAG288_WATCHDOG=m
-# CONFIG_DRM_DEBUG_MODESET_LOCK is not set
+CONFIG_DRM=m
+CONFIG_DRM_VIRTIO_GPU=m
CONFIG_FB=y
# CONFIG_FB_DEVICE is not set
-CONFIG_FRAMEBUFFER_CONSOLE=y
-CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
# CONFIG_HID_SUPPORT is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_INFINIBAND=m
CONFIG_INFINIBAND_USER_ACCESS=m
CONFIG_MLX4_INFINIBAND=m
CONFIG_MLX5_INFINIBAND=m
-CONFIG_SYNC_FILE=y
CONFIG_VFIO=m
CONFIG_VFIO_PCI=m
CONFIG_MLX5_VFIO_PCI=m
@@ -645,7 +670,6 @@ CONFIG_MSDOS_FS=m
CONFIG_VFAT_FS=m
CONFIG_EXFAT_FS=m
CONFIG_NTFS_FS=m
-CONFIG_NTFS_RW=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
@@ -663,6 +687,7 @@ CONFIG_SQUASHFS_XZ=y
CONFIG_SQUASHFS_ZSTD=y
CONFIG_ROMFS_FS=m
CONFIG_NFS_FS=m
+CONFIG_NFS_V2=m
CONFIG_NFS_V3_ACL=y
CONFIG_NFS_V4=m
CONFIG_NFS_SWAP=y
@@ -879,6 +904,5 @@ CONFIG_RBTREE_TEST=y
CONFIG_INTERVAL_TREE_TEST=m
CONFIG_PERCPU_TEST=m
CONFIG_ATOMIC64_SELFTEST=y
-CONFIG_STRING_SELFTEST=y
CONFIG_TEST_BITOPS=m
CONFIG_TEST_BPF=m
diff --git a/arch/s390/configs/defconfig b/arch/s390/configs/defconfig
index dc237896f99d..d0d8925fdf09 100644
--- a/arch/s390/configs/defconfig
+++ b/arch/s390/configs/defconfig
@@ -41,7 +41,6 @@ CONFIG_PROFILING=y
CONFIG_KEXEC=y
CONFIG_KEXEC_FILE=y
CONFIG_KEXEC_SIG=y
-CONFIG_CRASH_DUMP=y
CONFIG_LIVEPATCH=y
CONFIG_MARCH_Z13=y
CONFIG_NR_CPUS=512
@@ -49,6 +48,7 @@ CONFIG_NUMA=y
CONFIG_HZ_100=y
CONFIG_CERT_STORE=y
CONFIG_EXPOLINE=y
+# CONFIG_EXPOLINE_EXTERN is not set
CONFIG_EXPOLINE_AUTO=y
CONFIG_CHSC_SCH=y
CONFIG_VFIO_CCW=m
@@ -71,6 +71,7 @@ CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODULE_UNLOAD_TAINT_TRACKING=y
CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_MODULE_SIG_SHA256=y
CONFIG_BLK_DEV_THROTTLING=y
CONFIG_BLK_WBT=y
CONFIG_BLK_CGROUP_IOLATENCY=y
@@ -110,6 +111,7 @@ CONFIG_UNIX_DIAG=m
CONFIG_XFRM_USER=m
CONFIG_NET_KEY=m
CONFIG_SMC_DIAG=m
+CONFIG_SMC_LO=y
CONFIG_INET=y
CONFIG_IP_MULTICAST=y
CONFIG_IP_ADVANCED_ROUTER=y
@@ -124,7 +126,6 @@ CONFIG_IP_MROUTE=y
CONFIG_IP_MROUTE_MULTIPLE_TABLES=y
CONFIG_IP_PIMSM_V1=y
CONFIG_IP_PIMSM_V2=y
-CONFIG_SYN_COOKIES=y
CONFIG_NET_IPVTI=m
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
@@ -158,6 +159,7 @@ CONFIG_BRIDGE_NETFILTER=m
CONFIG_NETFILTER_NETLINK_HOOK=m
CONFIG_NF_CONNTRACK=m
CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_ZONES=y
CONFIG_NF_CONNTRACK_PROCFS=y
CONFIG_NF_CONNTRACK_EVENTS=y
CONFIG_NF_CONNTRACK_TIMEOUT=y
@@ -174,17 +176,39 @@ CONFIG_NF_CONNTRACK_SIP=m
CONFIG_NF_CONNTRACK_TFTP=m
CONFIG_NF_CT_NETLINK=m
CONFIG_NF_CT_NETLINK_TIMEOUT=m
+CONFIG_NF_CT_NETLINK_HELPER=m
+CONFIG_NETFILTER_NETLINK_GLUE_CT=y
CONFIG_NF_TABLES=m
CONFIG_NF_TABLES_INET=y
+CONFIG_NF_TABLES_NETDEV=y
+CONFIG_NFT_NUMGEN=m
CONFIG_NFT_CT=m
+CONFIG_NFT_FLOW_OFFLOAD=m
+CONFIG_NFT_CONNLIMIT=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
+CONFIG_NFT_MASQ=m
+CONFIG_NFT_REDIR=m
CONFIG_NFT_NAT=m
+CONFIG_NFT_TUNNEL=m
+CONFIG_NFT_QUEUE=m
+CONFIG_NFT_QUOTA=m
CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
CONFIG_NFT_HASH=m
CONFIG_NFT_FIB_INET=m
-CONFIG_NETFILTER_XTABLES_COMPAT=y
+CONFIG_NFT_XFRM=m
+CONFIG_NFT_SOCKET=m
+CONFIG_NFT_OSF=m
+CONFIG_NFT_TPROXY=m
+CONFIG_NFT_SYNPROXY=m
+CONFIG_NFT_DUP_NETDEV=m
+CONFIG_NFT_FWD_NETDEV=m
+CONFIG_NFT_FIB_NETDEV=m
+CONFIG_NFT_REJECT_NETDEV=m
+CONFIG_NF_FLOW_TABLE_INET=m
+CONFIG_NF_FLOW_TABLE=m
+CONFIG_NF_FLOW_TABLE_PROCFS=y
CONFIG_NETFILTER_XT_SET=m
CONFIG_NETFILTER_XT_TARGET_AUDIT=m
CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
@@ -197,8 +221,10 @@ CONFIG_NETFILTER_XT_TARGET_HMARK=m
CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
CONFIG_NETFILTER_XT_TARGET_LOG=m
CONFIG_NETFILTER_XT_TARGET_MARK=m
+CONFIG_NETFILTER_XT_TARGET_NETMAP=m
CONFIG_NETFILTER_XT_TARGET_NFLOG=m
CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_TARGET_REDIRECT=m
CONFIG_NETFILTER_XT_TARGET_TEE=m
CONFIG_NETFILTER_XT_TARGET_TPROXY=m
CONFIG_NETFILTER_XT_TARGET_TRACE=m
@@ -207,6 +233,7 @@ CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
CONFIG_NETFILTER_XT_MATCH_BPF=m
+CONFIG_NETFILTER_XT_MATCH_CGROUP=m
CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
CONFIG_NETFILTER_XT_MATCH_COMMENT=m
CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
@@ -221,6 +248,7 @@ CONFIG_NETFILTER_XT_MATCH_DSCP=m
CONFIG_NETFILTER_XT_MATCH_ESP=m
CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
CONFIG_NETFILTER_XT_MATCH_HELPER=m
+CONFIG_NETFILTER_XT_MATCH_IPCOMP=m
CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
CONFIG_NETFILTER_XT_MATCH_IPVS=m
CONFIG_NETFILTER_XT_MATCH_LENGTH=m
@@ -238,6 +266,7 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m
CONFIG_NETFILTER_XT_MATCH_RATEEST=m
CONFIG_NETFILTER_XT_MATCH_REALM=m
CONFIG_NETFILTER_XT_MATCH_RECENT=m
+CONFIG_NETFILTER_XT_MATCH_SOCKET=m
CONFIG_NETFILTER_XT_MATCH_STATE=m
CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
CONFIG_NETFILTER_XT_MATCH_STRING=m
@@ -293,7 +322,6 @@ CONFIG_IP_NF_TARGET_ECN=m
CONFIG_IP_NF_TARGET_TTL=m
CONFIG_IP_NF_RAW=m
CONFIG_IP_NF_SECURITY=m
-CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NFT_FIB_IPV6=m
@@ -363,7 +391,6 @@ CONFIG_NET_ACT_POLICE=m
CONFIG_NET_ACT_GACT=m
CONFIG_GACT_PROB=y
CONFIG_NET_ACT_MIRRED=m
-CONFIG_NET_ACT_IPT=m
CONFIG_NET_ACT_NAT=m
CONFIG_NET_ACT_PEDIT=m
CONFIG_NET_ACT_SIMP=m
@@ -452,6 +479,7 @@ CONFIG_DM_VERITY=m
CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG=y
CONFIG_DM_SWITCH=m
CONFIG_DM_INTEGRITY=m
+CONFIG_DM_VDO=m
CONFIG_NETDEVICES=y
CONFIG_BONDING=m
CONFIG_DUMMY=m
@@ -564,17 +592,16 @@ CONFIG_WATCHDOG_CORE=y
CONFIG_WATCHDOG_NOWAYOUT=y
CONFIG_SOFT_WATCHDOG=m
CONFIG_DIAG288_WATCHDOG=m
+CONFIG_DRM=m
+CONFIG_DRM_VIRTIO_GPU=m
CONFIG_FB=y
# CONFIG_FB_DEVICE is not set
-CONFIG_FRAMEBUFFER_CONSOLE=y
-CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
# CONFIG_HID_SUPPORT is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_INFINIBAND=m
CONFIG_INFINIBAND_USER_ACCESS=m
CONFIG_MLX4_INFINIBAND=m
CONFIG_MLX5_INFINIBAND=m
-CONFIG_SYNC_FILE=y
CONFIG_VFIO=m
CONFIG_VFIO_PCI=m
CONFIG_MLX5_VFIO_PCI=m
@@ -630,7 +657,6 @@ CONFIG_MSDOS_FS=m
CONFIG_VFAT_FS=m
CONFIG_EXFAT_FS=m
CONFIG_NTFS_FS=m
-CONFIG_NTFS_RW=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
@@ -649,6 +675,7 @@ CONFIG_SQUASHFS_XZ=y
CONFIG_SQUASHFS_ZSTD=y
CONFIG_ROMFS_FS=m
CONFIG_NFS_FS=m
+CONFIG_NFS_V2=m
CONFIG_NFS_V3_ACL=y
CONFIG_NFS_V4=m
CONFIG_NFS_SWAP=y
diff --git a/arch/s390/configs/zfcpdump_defconfig b/arch/s390/configs/zfcpdump_defconfig
index c51f3ec4eb28..8c2b61363bab 100644
--- a/arch/s390/configs/zfcpdump_defconfig
+++ b/arch/s390/configs/zfcpdump_defconfig
@@ -9,25 +9,22 @@ CONFIG_BPF_SYSCALL=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_KEXEC=y
-CONFIG_CRASH_DUMP=y
CONFIG_MARCH_Z13=y
CONFIG_NR_CPUS=2
CONFIG_HZ_100=y
# CONFIG_CHSC_SCH is not set
# CONFIG_SCM_BUS is not set
+# CONFIG_AP is not set
# CONFIG_PFAULT is not set
# CONFIG_S390_HYPFS is not set
# CONFIG_VIRTUALIZATION is not set
# CONFIG_S390_GUEST is not set
# CONFIG_SECCOMP is not set
-# CONFIG_GCC_PLUGINS is not set
# CONFIG_BLOCK_LEGACY_AUTOLOAD is not set
CONFIG_PARTITION_ADVANCED=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
# CONFIG_SWAP is not set
# CONFIG_COMPAT_BRK is not set
-# CONFIG_COMPACTION is not set
-# CONFIG_MIGRATION is not set
CONFIG_NET=y
# CONFIG_IUCV is not set
# CONFIG_PCPU_DEV_REFCNT is not set
diff --git a/arch/s390/include/asm/entry-common.h b/arch/s390/include/asm/entry-common.h
index 7f5004065e8a..35555c944630 100644
--- a/arch/s390/include/asm/entry-common.h
+++ b/arch/s390/include/asm/entry-common.h
@@ -54,7 +54,7 @@ static __always_inline void arch_exit_to_user_mode(void)
static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
unsigned long ti_work)
{
- choose_random_kstack_offset(get_tod_clock_fast() & 0xff);
+ choose_random_kstack_offset(get_tod_clock_fast());
}
#define arch_exit_to_user_mode_prepare arch_exit_to_user_mode_prepare
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index 95990461888f..9281063636a7 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -427,6 +427,7 @@ struct kvm_vcpu_stat {
u64 instruction_io_other;
u64 instruction_lpsw;
u64 instruction_lpswe;
+ u64 instruction_lpswey;
u64 instruction_pfmf;
u64 instruction_ptff;
u64 instruction_sck;
diff --git a/arch/s390/include/asm/unistd.h b/arch/s390/include/asm/unistd.h
index 4260bc5ce7f8..70fc671397da 100644
--- a/arch/s390/include/asm/unistd.h
+++ b/arch/s390/include/asm/unistd.h
@@ -35,6 +35,5 @@
#define __ARCH_WANT_SYS_FORK
#define __ARCH_WANT_SYS_VFORK
#define __ARCH_WANT_SYS_CLONE
-#define __ARCH_WANT_SYS_CLONE3
#endif /* _ASM_S390_UNISTD_H_ */
diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c
index 9863ebe75019..edae13416196 100644
--- a/arch/s390/kernel/crash_dump.c
+++ b/arch/s390/kernel/crash_dump.c
@@ -451,7 +451,7 @@ static void *nt_final(void *ptr)
/*
* Initialize ELF header (new kernel)
*/
-static void *ehdr_init(Elf64_Ehdr *ehdr, int mem_chunk_cnt)
+static void *ehdr_init(Elf64_Ehdr *ehdr, int phdr_count)
{
memset(ehdr, 0, sizeof(*ehdr));
memcpy(ehdr->e_ident, ELFMAG, SELFMAG);
@@ -465,11 +465,8 @@ static void *ehdr_init(Elf64_Ehdr *ehdr, int mem_chunk_cnt)
ehdr->e_phoff = sizeof(Elf64_Ehdr);
ehdr->e_ehsize = sizeof(Elf64_Ehdr);
ehdr->e_phentsize = sizeof(Elf64_Phdr);
- /*
- * Number of memory chunk PT_LOAD program headers plus one kernel
- * image PT_LOAD program header plus one PT_NOTE program header.
- */
- ehdr->e_phnum = mem_chunk_cnt + 1 + 1;
+ /* Number of PT_LOAD program headers plus PT_NOTE program header */
+ ehdr->e_phnum = phdr_count + 1;
return ehdr + 1;
}
@@ -503,12 +500,14 @@ static int get_mem_chunk_cnt(void)
/*
* Initialize ELF loads (new kernel)
*/
-static void loads_init(Elf64_Phdr *phdr)
+static void loads_init(Elf64_Phdr *phdr, bool os_info_has_vm)
{
- unsigned long old_identity_base = os_info_old_value(OS_INFO_IDENTITY_BASE);
+ unsigned long old_identity_base = 0;
phys_addr_t start, end;
u64 idx;
+ if (os_info_has_vm)
+ old_identity_base = os_info_old_value(OS_INFO_IDENTITY_BASE);
for_each_physmem_range(idx, &oldmem_type, &start, &end) {
phdr->p_type = PT_LOAD;
phdr->p_vaddr = old_identity_base + start;
@@ -522,6 +521,11 @@ static void loads_init(Elf64_Phdr *phdr)
}
}
+static bool os_info_has_vm(void)
+{
+ return os_info_old_value(OS_INFO_KASLR_OFFSET);
+}
+
/*
* Prepare PT_LOAD type program header for kernel image region
*/
@@ -566,7 +570,7 @@ static void *notes_init(Elf64_Phdr *phdr, void *ptr, u64 notes_offset)
return ptr;
}
-static size_t get_elfcorehdr_size(int mem_chunk_cnt)
+static size_t get_elfcorehdr_size(int phdr_count)
{
size_t size;
@@ -581,10 +585,8 @@ static size_t get_elfcorehdr_size(int mem_chunk_cnt)
size += nt_vmcoreinfo_size();
/* nt_final */
size += sizeof(Elf64_Nhdr);
- /* PT_LOAD type program header for kernel text region */
- size += sizeof(Elf64_Phdr);
/* PT_LOADS */
- size += mem_chunk_cnt * sizeof(Elf64_Phdr);
+ size += phdr_count * sizeof(Elf64_Phdr);
return size;
}
@@ -595,8 +597,8 @@ static size_t get_elfcorehdr_size(int mem_chunk_cnt)
int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size)
{
Elf64_Phdr *phdr_notes, *phdr_loads, *phdr_text;
+ int mem_chunk_cnt, phdr_text_cnt;
size_t alloc_size;
- int mem_chunk_cnt;
void *ptr, *hdr;
u64 hdr_off;
@@ -615,12 +617,14 @@ int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size)
}
mem_chunk_cnt = get_mem_chunk_cnt();
+ phdr_text_cnt = os_info_has_vm() ? 1 : 0;
- alloc_size = get_elfcorehdr_size(mem_chunk_cnt);
+ alloc_size = get_elfcorehdr_size(mem_chunk_cnt + phdr_text_cnt);
hdr = kzalloc(alloc_size, GFP_KERNEL);
- /* Without elfcorehdr /proc/vmcore cannot be created. Thus creating
+ /*
+ * Without elfcorehdr /proc/vmcore cannot be created. Thus creating
* a dump with this crash kernel will fail. Panic now to allow other
* dump mechanisms to take over.
*/
@@ -628,21 +632,23 @@ int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size)
panic("s390 kdump allocating elfcorehdr failed");
/* Init elf header */
- ptr = ehdr_init(hdr, mem_chunk_cnt);
+ phdr_notes = ehdr_init(hdr, mem_chunk_cnt + phdr_text_cnt);
/* Init program headers */
- phdr_notes = ptr;
- ptr = PTR_ADD(ptr, sizeof(Elf64_Phdr));
- phdr_text = ptr;
- ptr = PTR_ADD(ptr, sizeof(Elf64_Phdr));
- phdr_loads = ptr;
- ptr = PTR_ADD(ptr, sizeof(Elf64_Phdr) * mem_chunk_cnt);
+ if (phdr_text_cnt) {
+ phdr_text = phdr_notes + 1;
+ phdr_loads = phdr_text + 1;
+ } else {
+ phdr_loads = phdr_notes + 1;
+ }
+ ptr = PTR_ADD(phdr_loads, sizeof(Elf64_Phdr) * mem_chunk_cnt);
/* Init notes */
hdr_off = PTR_DIFF(ptr, hdr);
ptr = notes_init(phdr_notes, ptr, ((unsigned long) hdr) + hdr_off);
/* Init kernel text program header */
- text_init(phdr_text);
+ if (phdr_text_cnt)
+ text_init(phdr_text);
/* Init loads */
- loads_init(phdr_loads);
+ loads_init(phdr_loads, phdr_text_cnt);
/* Finalize program headers */
hdr_off = PTR_DIFF(ptr, hdr);
*addr = (unsigned long long) hdr;
diff --git a/arch/s390/kernel/syscall.c b/arch/s390/kernel/syscall.c
index dc2355c623d6..50cbcbbaa03d 100644
--- a/arch/s390/kernel/syscall.c
+++ b/arch/s390/kernel/syscall.c
@@ -38,33 +38,6 @@
#include "entry.h"
-/*
- * Perform the mmap() system call. Linux for S/390 isn't able to handle more
- * than 5 system call parameters, so this system call uses a memory block
- * for parameter passing.
- */
-
-struct s390_mmap_arg_struct {
- unsigned long addr;
- unsigned long len;
- unsigned long prot;
- unsigned long flags;
- unsigned long fd;
- unsigned long offset;
-};
-
-SYSCALL_DEFINE1(mmap2, struct s390_mmap_arg_struct __user *, arg)
-{
- struct s390_mmap_arg_struct a;
- int error = -EFAULT;
-
- if (copy_from_user(&a, arg, sizeof(a)))
- goto out;
- error = ksys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset);
-out:
- return error;
-}
-
#ifdef CONFIG_SYSVIPC
/*
* sys_ipc() is the de-multiplexer for the SysV IPC calls.
diff --git a/arch/s390/kernel/syscalls/syscall.tbl b/arch/s390/kernel/syscalls/syscall.tbl
index bd0fee24ad10..01071182763e 100644
--- a/arch/s390/kernel/syscalls/syscall.tbl
+++ b/arch/s390/kernel/syscalls/syscall.tbl
@@ -418,7 +418,7 @@
412 32 utimensat_time64 - sys_utimensat
413 32 pselect6_time64 - compat_sys_pselect6_time64
414 32 ppoll_time64 - compat_sys_ppoll_time64
-416 32 io_pgetevents_time64 - sys_io_pgetevents
+416 32 io_pgetevents_time64 - compat_sys_io_pgetevents_time64
417 32 recvmmsg_time64 - compat_sys_recvmmsg_time64
418 32 mq_timedsend_time64 - sys_mq_timedsend
419 32 mq_timedreceive_time64 - sys_mq_timedreceive
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 82e9631cd9ef..54b5b2565df8 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -132,6 +132,7 @@ const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
STATS_DESC_COUNTER(VCPU, instruction_io_other),
STATS_DESC_COUNTER(VCPU, instruction_lpsw),
STATS_DESC_COUNTER(VCPU, instruction_lpswe),
+ STATS_DESC_COUNTER(VCPU, instruction_lpswey),
STATS_DESC_COUNTER(VCPU, instruction_pfmf),
STATS_DESC_COUNTER(VCPU, instruction_ptff),
STATS_DESC_COUNTER(VCPU, instruction_sck),
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index 111eb5c74784..bf8534218af3 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -138,6 +138,21 @@ static inline u64 kvm_s390_get_base_disp_s(struct kvm_vcpu *vcpu, u8 *ar)
return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2;
}
+static inline u64 kvm_s390_get_base_disp_siy(struct kvm_vcpu *vcpu, u8 *ar)
+{
+ u32 base1 = vcpu->arch.sie_block->ipb >> 28;
+ s64 disp1;
+
+ /* The displacement is a 20bit _SIGNED_ value */
+ disp1 = sign_extend64(((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16) +
+ ((vcpu->arch.sie_block->ipb & 0xff00) << 4), 19);
+
+ if (ar)
+ *ar = base1;
+
+ return (base1 ? vcpu->run->s.regs.gprs[base1] : 0) + disp1;
+}
+
static inline void kvm_s390_get_base_disp_sse(struct kvm_vcpu *vcpu,
u64 *address1, u64 *address2,
u8 *ar_b1, u8 *ar_b2)
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index 1be19cc9d73c..1a49b89706f8 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -797,6 +797,36 @@ static int handle_lpswe(struct kvm_vcpu *vcpu)
return 0;
}
+static int handle_lpswey(struct kvm_vcpu *vcpu)
+{
+ psw_t new_psw;
+ u64 addr;
+ int rc;
+ u8 ar;
+
+ vcpu->stat.instruction_lpswey++;
+
+ if (!test_kvm_facility(vcpu->kvm, 193))
+ return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
+
+ if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
+ return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
+
+ addr = kvm_s390_get_base_disp_siy(vcpu, &ar);
+ if (addr & 7)
+ return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
+
+ rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw));
+ if (rc)
+ return kvm_s390_inject_prog_cond(vcpu, rc);
+
+ vcpu->arch.sie_block->gpsw = new_psw;
+ if (!is_valid_psw(&vcpu->arch.sie_block->gpsw))
+ return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
+
+ return 0;
+}
+
static int handle_stidp(struct kvm_vcpu *vcpu)
{
u64 stidp_data = vcpu->kvm->arch.model.cpuid;
@@ -1462,6 +1492,8 @@ int kvm_s390_handle_eb(struct kvm_vcpu *vcpu)
case 0x61:
case 0x62:
return handle_ri(vcpu);
+ case 0x71:
+ return handle_lpswey(vcpu);
default:
return -EOPNOTSUPP;
}
diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c
index abb629d7e131..7e3e767ab87d 100644
--- a/arch/s390/mm/pgalloc.c
+++ b/arch/s390/mm/pgalloc.c
@@ -55,6 +55,8 @@ unsigned long *crst_table_alloc(struct mm_struct *mm)
void crst_table_free(struct mm_struct *mm, unsigned long *table)
{
+ if (!table)
+ return;
pagetable_free(virt_to_ptdesc(table));
}
@@ -262,6 +264,8 @@ static unsigned long *base_crst_alloc(unsigned long val)
static void base_crst_free(unsigned long *table)
{
+ if (!table)
+ return;
pagetable_free(virt_to_ptdesc(table));
}
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index 4be8f5cadd02..9d440a0b729e 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -31,11 +31,12 @@
#include <asm/nospec-branch.h>
#include <asm/set_memory.h>
#include <asm/text-patching.h>
+#include <asm/unwind.h>
#include "bpf_jit.h"
struct bpf_jit {
u32 seen; /* Flags to remember seen eBPF instructions */
- u32 seen_reg[16]; /* Array to remember which registers are used */
+ u16 seen_regs; /* Mask to remember which registers are used */
u32 *addrs; /* Array with relative instruction addresses */
u8 *prg_buf; /* Start of program */
int size; /* Size of program and literal pool */
@@ -53,6 +54,8 @@ struct bpf_jit {
int excnt; /* Number of exception table entries */
int prologue_plt_ret; /* Return address for prologue hotpatch PLT */
int prologue_plt; /* Start of prologue hotpatch PLT */
+ int kern_arena; /* Pool offset of kernel arena address */
+ u64 user_arena; /* User arena address */
};
#define SEEN_MEM BIT(0) /* use mem[] for temporary storage */
@@ -60,6 +63,8 @@ struct bpf_jit {
#define SEEN_FUNC BIT(2) /* calls C functions */
#define SEEN_STACK (SEEN_FUNC | SEEN_MEM)
+#define NVREGS 0xffc0 /* %r6-%r15 */
+
/*
* s390 registers
*/
@@ -118,8 +123,8 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1)
{
u32 r1 = reg2hex[b1];
- if (r1 >= 6 && r1 <= 15 && !jit->seen_reg[r1])
- jit->seen_reg[r1] = 1;
+ if (r1 >= 6 && r1 <= 15)
+ jit->seen_regs |= (1 << r1);
}
#define REG_SET_SEEN(b1) \
@@ -127,8 +132,6 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1)
reg_set_seen(jit, b1); \
})
-#define REG_SEEN(b1) jit->seen_reg[reg2hex[(b1)]]
-
/*
* EMIT macros for code generation
*/
@@ -436,12 +439,12 @@ static void restore_regs(struct bpf_jit *jit, u32 rs, u32 re, u32 stack_depth)
/*
* Return first seen register (from start)
*/
-static int get_start(struct bpf_jit *jit, int start)
+static int get_start(u16 seen_regs, int start)
{
int i;
for (i = start; i <= 15; i++) {
- if (jit->seen_reg[i])
+ if (seen_regs & (1 << i))
return i;
}
return 0;
@@ -450,15 +453,15 @@ static int get_start(struct bpf_jit *jit, int start)
/*
* Return last seen register (from start) (gap >= 2)
*/
-static int get_end(struct bpf_jit *jit, int start)
+static int get_end(u16 seen_regs, int start)
{
int i;
for (i = start; i < 15; i++) {
- if (!jit->seen_reg[i] && !jit->seen_reg[i + 1])
+ if (!(seen_regs & (3 << i)))
return i - 1;
}
- return jit->seen_reg[15] ? 15 : 14;
+ return (seen_regs & (1 << 15)) ? 15 : 14;
}
#define REGS_SAVE 1
@@ -467,8 +470,10 @@ static int get_end(struct bpf_jit *jit, int start)
* Save and restore clobbered registers (6-15) on stack.
* We save/restore registers in chunks with gap >= 2 registers.
*/
-static void save_restore_regs(struct bpf_jit *jit, int op, u32 stack_depth)
+static void save_restore_regs(struct bpf_jit *jit, int op, u32 stack_depth,
+ u16 extra_regs)
{
+ u16 seen_regs = jit->seen_regs | extra_regs;
const int last = 15, save_restore_size = 6;
int re = 6, rs;
@@ -482,10 +487,10 @@ static void save_restore_regs(struct bpf_jit *jit, int op, u32 stack_depth)
}
do {
- rs = get_start(jit, re);
+ rs = get_start(seen_regs, re);
if (!rs)
break;
- re = get_end(jit, rs + 1);
+ re = get_end(seen_regs, rs + 1);
if (op == REGS_SAVE)
save_regs(jit, rs, re);
else
@@ -570,8 +575,21 @@ static void bpf_jit_prologue(struct bpf_jit *jit, struct bpf_prog *fp,
}
/* Tail calls have to skip above initialization */
jit->tail_call_start = jit->prg;
- /* Save registers */
- save_restore_regs(jit, REGS_SAVE, stack_depth);
+ if (fp->aux->exception_cb) {
+ /*
+ * Switch stack, the new address is in the 2nd parameter.
+ *
+ * Arrange the restoration of %r6-%r15 in the epilogue.
+ * Do not restore them now, the prog does not need them.
+ */
+ /* lgr %r15,%r3 */
+ EMIT4(0xb9040000, REG_15, REG_3);
+ jit->seen_regs |= NVREGS;
+ } else {
+ /* Save registers */
+ save_restore_regs(jit, REGS_SAVE, stack_depth,
+ fp->aux->exception_boundary ? NVREGS : 0);
+ }
/* Setup literal pool */
if (is_first_pass(jit) || (jit->seen & SEEN_LITERAL)) {
if (!is_first_pass(jit) &&
@@ -647,7 +665,7 @@ static void bpf_jit_epilogue(struct bpf_jit *jit, u32 stack_depth)
/* Load exit code: lgr %r2,%b0 */
EMIT4(0xb9040000, REG_2, BPF_REG_0);
/* Restore registers */
- save_restore_regs(jit, REGS_RESTORE, stack_depth);
+ save_restore_regs(jit, REGS_RESTORE, stack_depth, 0);
if (nospec_uses_trampoline()) {
jit->r14_thunk_ip = jit->prg;
/* Generate __s390_indirect_jump_r14 thunk */
@@ -667,50 +685,111 @@ static void bpf_jit_epilogue(struct bpf_jit *jit, u32 stack_depth)
jit->prg += sizeof(struct bpf_plt);
}
-static int get_probe_mem_regno(const u8 *insn)
-{
- /*
- * insn must point to llgc, llgh, llgf, lg, lgb, lgh or lgf, which have
- * destination register at the same position.
- */
- if (insn[0] != 0xe3) /* common prefix */
- return -1;
- if (insn[5] != 0x90 && /* llgc */
- insn[5] != 0x91 && /* llgh */
- insn[5] != 0x16 && /* llgf */
- insn[5] != 0x04 && /* lg */
- insn[5] != 0x77 && /* lgb */
- insn[5] != 0x15 && /* lgh */
- insn[5] != 0x14) /* lgf */
- return -1;
- return insn[1] >> 4;
-}
-
bool ex_handler_bpf(const struct exception_table_entry *x, struct pt_regs *regs)
{
regs->psw.addr = extable_fixup(x);
- regs->gprs[x->data] = 0;
+ if (x->data != -1)
+ regs->gprs[x->data] = 0;
return true;
}
-static int bpf_jit_probe_mem(struct bpf_jit *jit, struct bpf_prog *fp,
- int probe_prg, int nop_prg)
+/*
+ * A single BPF probe instruction
+ */
+struct bpf_jit_probe {
+ int prg; /* JITed instruction offset */
+ int nop_prg; /* JITed nop offset */
+ int reg; /* Register to clear on exception */
+ int arena_reg; /* Register to use for arena addressing */
+};
+
+static void bpf_jit_probe_init(struct bpf_jit_probe *probe)
+{
+ probe->prg = -1;
+ probe->nop_prg = -1;
+ probe->reg = -1;
+ probe->arena_reg = REG_0;
+}
+
+/*
+ * Handlers of certain exceptions leave psw.addr pointing to the instruction
+ * directly after the failing one. Therefore, create two exception table
+ * entries and also add a nop in case two probing instructions come directly
+ * after each other.
+ */
+static void bpf_jit_probe_emit_nop(struct bpf_jit *jit,
+ struct bpf_jit_probe *probe)
+{
+ if (probe->prg == -1 || probe->nop_prg != -1)
+ /* The probe is not armed or nop is already emitted. */
+ return;
+
+ probe->nop_prg = jit->prg;
+ /* bcr 0,%0 */
+ _EMIT2(0x0700);
+}
+
+static void bpf_jit_probe_load_pre(struct bpf_jit *jit, struct bpf_insn *insn,
+ struct bpf_jit_probe *probe)
+{
+ if (BPF_MODE(insn->code) != BPF_PROBE_MEM &&
+ BPF_MODE(insn->code) != BPF_PROBE_MEMSX &&
+ BPF_MODE(insn->code) != BPF_PROBE_MEM32)
+ return;
+
+ if (BPF_MODE(insn->code) == BPF_PROBE_MEM32) {
+ /* lgrl %r1,kern_arena */
+ EMIT6_PCREL_RILB(0xc4080000, REG_W1, jit->kern_arena);
+ probe->arena_reg = REG_W1;
+ }
+ probe->prg = jit->prg;
+ probe->reg = reg2hex[insn->dst_reg];
+}
+
+static void bpf_jit_probe_store_pre(struct bpf_jit *jit, struct bpf_insn *insn,
+ struct bpf_jit_probe *probe)
+{
+ if (BPF_MODE(insn->code) != BPF_PROBE_MEM32)
+ return;
+
+ /* lgrl %r1,kern_arena */
+ EMIT6_PCREL_RILB(0xc4080000, REG_W1, jit->kern_arena);
+ probe->arena_reg = REG_W1;
+ probe->prg = jit->prg;
+}
+
+static void bpf_jit_probe_atomic_pre(struct bpf_jit *jit,
+ struct bpf_insn *insn,
+ struct bpf_jit_probe *probe)
+{
+ if (BPF_MODE(insn->code) != BPF_PROBE_ATOMIC)
+ return;
+
+ /* lgrl %r1,kern_arena */
+ EMIT6_PCREL_RILB(0xc4080000, REG_W1, jit->kern_arena);
+ /* agr %r1,%dst */
+ EMIT4(0xb9080000, REG_W1, insn->dst_reg);
+ probe->arena_reg = REG_W1;
+ probe->prg = jit->prg;
+}
+
+static int bpf_jit_probe_post(struct bpf_jit *jit, struct bpf_prog *fp,
+ struct bpf_jit_probe *probe)
{
struct exception_table_entry *ex;
- int reg, prg;
+ int i, prg;
s64 delta;
u8 *insn;
- int i;
+ if (probe->prg == -1)
+ /* The probe is not armed. */
+ return 0;
+ bpf_jit_probe_emit_nop(jit, probe);
if (!fp->aux->extable)
/* Do nothing during early JIT passes. */
return 0;
- insn = jit->prg_buf + probe_prg;
- reg = get_probe_mem_regno(insn);
- if (WARN_ON_ONCE(reg < 0))
- /* JIT bug - unexpected probe instruction. */
- return -1;
- if (WARN_ON_ONCE(probe_prg + insn_length(*insn) != nop_prg))
+ insn = jit->prg_buf + probe->prg;
+ if (WARN_ON_ONCE(probe->prg + insn_length(*insn) != probe->nop_prg))
/* JIT bug - gap between probe and nop instructions. */
return -1;
for (i = 0; i < 2; i++) {
@@ -719,23 +798,24 @@ static int bpf_jit_probe_mem(struct bpf_jit *jit, struct bpf_prog *fp,
return -1;
ex = &fp->aux->extable[jit->excnt];
/* Add extable entries for probe and nop instructions. */
- prg = i == 0 ? probe_prg : nop_prg;
+ prg = i == 0 ? probe->prg : probe->nop_prg;
delta = jit->prg_buf + prg - (u8 *)&ex->insn;
if (WARN_ON_ONCE(delta < INT_MIN || delta > INT_MAX))
/* JIT bug - code and extable must be close. */
return -1;
ex->insn = delta;
/*
- * Always land on the nop. Note that extable infrastructure
- * ignores fixup field, it is handled by ex_handler_bpf().
+ * Land on the current instruction. Note that the extable
+ * infrastructure ignores the fixup field; it is handled by
+ * ex_handler_bpf().
*/
- delta = jit->prg_buf + nop_prg - (u8 *)&ex->fixup;
+ delta = jit->prg_buf + jit->prg - (u8 *)&ex->fixup;
if (WARN_ON_ONCE(delta < INT_MIN || delta > INT_MAX))
/* JIT bug - landing pad and extable must be close. */
return -1;
ex->fixup = delta;
ex->type = EX_TYPE_BPF;
- ex->data = reg;
+ ex->data = probe->reg;
jit->excnt++;
}
return 0;
@@ -782,19 +862,15 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
s32 branch_oc_off = insn->off;
u32 dst_reg = insn->dst_reg;
u32 src_reg = insn->src_reg;
+ struct bpf_jit_probe probe;
int last, insn_count = 1;
u32 *addrs = jit->addrs;
s32 imm = insn->imm;
s16 off = insn->off;
- int probe_prg = -1;
unsigned int mask;
- int nop_prg;
int err;
- if (BPF_CLASS(insn->code) == BPF_LDX &&
- (BPF_MODE(insn->code) == BPF_PROBE_MEM ||
- BPF_MODE(insn->code) == BPF_PROBE_MEMSX))
- probe_prg = jit->prg;
+ bpf_jit_probe_init(&probe);
switch (insn->code) {
/*
@@ -823,6 +899,22 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
}
break;
case BPF_ALU64 | BPF_MOV | BPF_X:
+ if (insn_is_cast_user(insn)) {
+ int patch_brc;
+
+ /* ltgr %dst,%src */
+ EMIT4(0xb9020000, dst_reg, src_reg);
+ /* brc 8,0f */
+ patch_brc = jit->prg;
+ EMIT4_PCREL_RIC(0xa7040000, 8, 0);
+ /* iihf %dst,user_arena>>32 */
+ EMIT6_IMM(0xc0080000, dst_reg, jit->user_arena >> 32);
+ /* 0: */
+ if (jit->prg_buf)
+ *(u16 *)(jit->prg_buf + patch_brc + 2) =
+ (jit->prg - patch_brc) >> 1;
+ break;
+ }
switch (insn->off) {
case 0: /* DST = SRC */
/* lgr %dst,%src */
@@ -1366,51 +1458,99 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
* BPF_ST(X)
*/
case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src_reg */
- /* stcy %src,off(%dst) */
- EMIT6_DISP_LH(0xe3000000, 0x0072, src_reg, dst_reg, REG_0, off);
+ case BPF_STX | BPF_PROBE_MEM32 | BPF_B:
+ bpf_jit_probe_store_pre(jit, insn, &probe);
+ /* stcy %src,off(%dst,%arena) */
+ EMIT6_DISP_LH(0xe3000000, 0x0072, src_reg, dst_reg,
+ probe.arena_reg, off);
+ err = bpf_jit_probe_post(jit, fp, &probe);
+ if (err < 0)
+ return err;
jit->seen |= SEEN_MEM;
break;
case BPF_STX | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = src */
- /* sthy %src,off(%dst) */
- EMIT6_DISP_LH(0xe3000000, 0x0070, src_reg, dst_reg, REG_0, off);
+ case BPF_STX | BPF_PROBE_MEM32 | BPF_H:
+ bpf_jit_probe_store_pre(jit, insn, &probe);
+ /* sthy %src,off(%dst,%arena) */
+ EMIT6_DISP_LH(0xe3000000, 0x0070, src_reg, dst_reg,
+ probe.arena_reg, off);
+ err = bpf_jit_probe_post(jit, fp, &probe);
+ if (err < 0)
+ return err;
jit->seen |= SEEN_MEM;
break;
case BPF_STX | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = src */
- /* sty %src,off(%dst) */
- EMIT6_DISP_LH(0xe3000000, 0x0050, src_reg, dst_reg, REG_0, off);
+ case BPF_STX | BPF_PROBE_MEM32 | BPF_W:
+ bpf_jit_probe_store_pre(jit, insn, &probe);
+ /* sty %src,off(%dst,%arena) */
+ EMIT6_DISP_LH(0xe3000000, 0x0050, src_reg, dst_reg,
+ probe.arena_reg, off);
+ err = bpf_jit_probe_post(jit, fp, &probe);
+ if (err < 0)
+ return err;
jit->seen |= SEEN_MEM;
break;
case BPF_STX | BPF_MEM | BPF_DW: /* (u64 *)(dst + off) = src */
- /* stg %src,off(%dst) */
- EMIT6_DISP_LH(0xe3000000, 0x0024, src_reg, dst_reg, REG_0, off);
+ case BPF_STX | BPF_PROBE_MEM32 | BPF_DW:
+ bpf_jit_probe_store_pre(jit, insn, &probe);
+ /* stg %src,off(%dst,%arena) */
+ EMIT6_DISP_LH(0xe3000000, 0x0024, src_reg, dst_reg,
+ probe.arena_reg, off);
+ err = bpf_jit_probe_post(jit, fp, &probe);
+ if (err < 0)
+ return err;
jit->seen |= SEEN_MEM;
break;
case BPF_ST | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = imm */
+ case BPF_ST | BPF_PROBE_MEM32 | BPF_B:
/* lhi %w0,imm */
EMIT4_IMM(0xa7080000, REG_W0, (u8) imm);
- /* stcy %w0,off(dst) */
- EMIT6_DISP_LH(0xe3000000, 0x0072, REG_W0, dst_reg, REG_0, off);
+ bpf_jit_probe_store_pre(jit, insn, &probe);
+ /* stcy %w0,off(%dst,%arena) */
+ EMIT6_DISP_LH(0xe3000000, 0x0072, REG_W0, dst_reg,
+ probe.arena_reg, off);
+ err = bpf_jit_probe_post(jit, fp, &probe);
+ if (err < 0)
+ return err;
jit->seen |= SEEN_MEM;
break;
case BPF_ST | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = imm */
+ case BPF_ST | BPF_PROBE_MEM32 | BPF_H:
/* lhi %w0,imm */
EMIT4_IMM(0xa7080000, REG_W0, (u16) imm);
- /* sthy %w0,off(dst) */
- EMIT6_DISP_LH(0xe3000000, 0x0070, REG_W0, dst_reg, REG_0, off);
+ bpf_jit_probe_store_pre(jit, insn, &probe);
+ /* sthy %w0,off(%dst,%arena) */
+ EMIT6_DISP_LH(0xe3000000, 0x0070, REG_W0, dst_reg,
+ probe.arena_reg, off);
+ err = bpf_jit_probe_post(jit, fp, &probe);
+ if (err < 0)
+ return err;
jit->seen |= SEEN_MEM;
break;
case BPF_ST | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = imm */
+ case BPF_ST | BPF_PROBE_MEM32 | BPF_W:
/* llilf %w0,imm */
EMIT6_IMM(0xc00f0000, REG_W0, (u32) imm);
- /* sty %w0,off(%dst) */
- EMIT6_DISP_LH(0xe3000000, 0x0050, REG_W0, dst_reg, REG_0, off);
+ bpf_jit_probe_store_pre(jit, insn, &probe);
+ /* sty %w0,off(%dst,%arena) */
+ EMIT6_DISP_LH(0xe3000000, 0x0050, REG_W0, dst_reg,
+ probe.arena_reg, off);
+ err = bpf_jit_probe_post(jit, fp, &probe);
+ if (err < 0)
+ return err;
jit->seen |= SEEN_MEM;
break;
case BPF_ST | BPF_MEM | BPF_DW: /* *(u64 *)(dst + off) = imm */
+ case BPF_ST | BPF_PROBE_MEM32 | BPF_DW:
/* lgfi %w0,imm */
EMIT6_IMM(0xc0010000, REG_W0, imm);
- /* stg %w0,off(%dst) */
- EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W0, dst_reg, REG_0, off);
+ bpf_jit_probe_store_pre(jit, insn, &probe);
+ /* stg %w0,off(%dst,%arena) */
+ EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W0, dst_reg,
+ probe.arena_reg, off);
+ err = bpf_jit_probe_post(jit, fp, &probe);
+ if (err < 0)
+ return err;
jit->seen |= SEEN_MEM;
break;
/*
@@ -1418,15 +1558,30 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
*/
case BPF_STX | BPF_ATOMIC | BPF_DW:
case BPF_STX | BPF_ATOMIC | BPF_W:
+ case BPF_STX | BPF_PROBE_ATOMIC | BPF_DW:
+ case BPF_STX | BPF_PROBE_ATOMIC | BPF_W:
{
bool is32 = BPF_SIZE(insn->code) == BPF_W;
+ /*
+ * Unlike loads and stores, atomics have only a base register,
+ * but no index register. For the non-arena case, simply use
+ * %dst as a base. For the arena case, use the work register
+ * %r1: first, load the arena base into it, and then add %dst
+ * to it.
+ */
+ probe.arena_reg = dst_reg;
+
switch (insn->imm) {
-/* {op32|op64} {%w0|%src},%src,off(%dst) */
#define EMIT_ATOMIC(op32, op64) do { \
+ bpf_jit_probe_atomic_pre(jit, insn, &probe); \
+ /* {op32|op64} {%w0|%src},%src,off(%arena) */ \
EMIT6_DISP_LH(0xeb000000, is32 ? (op32) : (op64), \
(insn->imm & BPF_FETCH) ? src_reg : REG_W0, \
- src_reg, dst_reg, off); \
+ src_reg, probe.arena_reg, off); \
+ err = bpf_jit_probe_post(jit, fp, &probe); \
+ if (err < 0) \
+ return err; \
if (insn->imm & BPF_FETCH) { \
/* bcr 14,0 - see atomic_fetch_{add,and,or,xor}() */ \
_EMIT2(0x07e0); \
@@ -1455,25 +1610,50 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
EMIT_ATOMIC(0x00f7, 0x00e7);
break;
#undef EMIT_ATOMIC
- case BPF_XCHG:
- /* {ly|lg} %w0,off(%dst) */
+ case BPF_XCHG: {
+ struct bpf_jit_probe load_probe = probe;
+ int loop_start;
+
+ bpf_jit_probe_atomic_pre(jit, insn, &load_probe);
+ /* {ly|lg} %w0,off(%arena) */
EMIT6_DISP_LH(0xe3000000,
is32 ? 0x0058 : 0x0004, REG_W0, REG_0,
- dst_reg, off);
- /* 0: {csy|csg} %w0,%src,off(%dst) */
+ load_probe.arena_reg, off);
+ bpf_jit_probe_emit_nop(jit, &load_probe);
+ /* Reuse {ly|lg}'s arena_reg for {csy|csg}. */
+ if (load_probe.prg != -1) {
+ probe.prg = jit->prg;
+ probe.arena_reg = load_probe.arena_reg;
+ }
+ loop_start = jit->prg;
+ /* 0: {csy|csg} %w0,%src,off(%arena) */
EMIT6_DISP_LH(0xeb000000, is32 ? 0x0014 : 0x0030,
- REG_W0, src_reg, dst_reg, off);
+ REG_W0, src_reg, probe.arena_reg, off);
+ bpf_jit_probe_emit_nop(jit, &probe);
/* brc 4,0b */
- EMIT4_PCREL_RIC(0xa7040000, 4, jit->prg - 6);
+ EMIT4_PCREL_RIC(0xa7040000, 4, loop_start);
/* {llgfr|lgr} %src,%w0 */
EMIT4(is32 ? 0xb9160000 : 0xb9040000, src_reg, REG_W0);
+ /* Both probes should land here on exception. */
+ err = bpf_jit_probe_post(jit, fp, &load_probe);
+ if (err < 0)
+ return err;
+ err = bpf_jit_probe_post(jit, fp, &probe);
+ if (err < 0)
+ return err;
if (is32 && insn_is_zext(&insn[1]))
insn_count = 2;
break;
+ }
case BPF_CMPXCHG:
- /* 0: {csy|csg} %b0,%src,off(%dst) */
+ bpf_jit_probe_atomic_pre(jit, insn, &probe);
+ /* 0: {csy|csg} %b0,%src,off(%arena) */
EMIT6_DISP_LH(0xeb000000, is32 ? 0x0014 : 0x0030,
- BPF_REG_0, src_reg, dst_reg, off);
+ BPF_REG_0, src_reg,
+ probe.arena_reg, off);
+ err = bpf_jit_probe_post(jit, fp, &probe);
+ if (err < 0)
+ return err;
break;
default:
pr_err("Unknown atomic operation %02x\n", insn->imm);
@@ -1488,51 +1668,87 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
*/
case BPF_LDX | BPF_MEM | BPF_B: /* dst = *(u8 *)(ul) (src + off) */
case BPF_LDX | BPF_PROBE_MEM | BPF_B:
- /* llgc %dst,0(off,%src) */
- EMIT6_DISP_LH(0xe3000000, 0x0090, dst_reg, src_reg, REG_0, off);
+ case BPF_LDX | BPF_PROBE_MEM32 | BPF_B:
+ bpf_jit_probe_load_pre(jit, insn, &probe);
+ /* llgc %dst,off(%src,%arena) */
+ EMIT6_DISP_LH(0xe3000000, 0x0090, dst_reg, src_reg,
+ probe.arena_reg, off);
+ err = bpf_jit_probe_post(jit, fp, &probe);
+ if (err < 0)
+ return err;
jit->seen |= SEEN_MEM;
if (insn_is_zext(&insn[1]))
insn_count = 2;
break;
case BPF_LDX | BPF_MEMSX | BPF_B: /* dst = *(s8 *)(ul) (src + off) */
case BPF_LDX | BPF_PROBE_MEMSX | BPF_B:
- /* lgb %dst,0(off,%src) */
+ bpf_jit_probe_load_pre(jit, insn, &probe);
+ /* lgb %dst,off(%src) */
EMIT6_DISP_LH(0xe3000000, 0x0077, dst_reg, src_reg, REG_0, off);
+ err = bpf_jit_probe_post(jit, fp, &probe);
+ if (err < 0)
+ return err;
jit->seen |= SEEN_MEM;
break;
case BPF_LDX | BPF_MEM | BPF_H: /* dst = *(u16 *)(ul) (src + off) */
case BPF_LDX | BPF_PROBE_MEM | BPF_H:
- /* llgh %dst,0(off,%src) */
- EMIT6_DISP_LH(0xe3000000, 0x0091, dst_reg, src_reg, REG_0, off);
+ case BPF_LDX | BPF_PROBE_MEM32 | BPF_H:
+ bpf_jit_probe_load_pre(jit, insn, &probe);
+ /* llgh %dst,off(%src,%arena) */
+ EMIT6_DISP_LH(0xe3000000, 0x0091, dst_reg, src_reg,
+ probe.arena_reg, off);
+ err = bpf_jit_probe_post(jit, fp, &probe);
+ if (err < 0)
+ return err;
jit->seen |= SEEN_MEM;
if (insn_is_zext(&insn[1]))
insn_count = 2;
break;
case BPF_LDX | BPF_MEMSX | BPF_H: /* dst = *(s16 *)(ul) (src + off) */
case BPF_LDX | BPF_PROBE_MEMSX | BPF_H:
- /* lgh %dst,0(off,%src) */
+ bpf_jit_probe_load_pre(jit, insn, &probe);
+ /* lgh %dst,off(%src) */
EMIT6_DISP_LH(0xe3000000, 0x0015, dst_reg, src_reg, REG_0, off);
+ err = bpf_jit_probe_post(jit, fp, &probe);
+ if (err < 0)
+ return err;
jit->seen |= SEEN_MEM;
break;
case BPF_LDX | BPF_MEM | BPF_W: /* dst = *(u32 *)(ul) (src + off) */
case BPF_LDX | BPF_PROBE_MEM | BPF_W:
+ case BPF_LDX | BPF_PROBE_MEM32 | BPF_W:
+ bpf_jit_probe_load_pre(jit, insn, &probe);
/* llgf %dst,off(%src) */
jit->seen |= SEEN_MEM;
- EMIT6_DISP_LH(0xe3000000, 0x0016, dst_reg, src_reg, REG_0, off);
+ EMIT6_DISP_LH(0xe3000000, 0x0016, dst_reg, src_reg,
+ probe.arena_reg, off);
+ err = bpf_jit_probe_post(jit, fp, &probe);
+ if (err < 0)
+ return err;
if (insn_is_zext(&insn[1]))
insn_count = 2;
break;
case BPF_LDX | BPF_MEMSX | BPF_W: /* dst = *(s32 *)(ul) (src + off) */
case BPF_LDX | BPF_PROBE_MEMSX | BPF_W:
+ bpf_jit_probe_load_pre(jit, insn, &probe);
/* lgf %dst,off(%src) */
jit->seen |= SEEN_MEM;
EMIT6_DISP_LH(0xe3000000, 0x0014, dst_reg, src_reg, REG_0, off);
+ err = bpf_jit_probe_post(jit, fp, &probe);
+ if (err < 0)
+ return err;
break;
case BPF_LDX | BPF_MEM | BPF_DW: /* dst = *(u64 *)(ul) (src + off) */
case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
- /* lg %dst,0(off,%src) */
+ case BPF_LDX | BPF_PROBE_MEM32 | BPF_DW:
+ bpf_jit_probe_load_pre(jit, insn, &probe);
+ /* lg %dst,off(%src,%arena) */
jit->seen |= SEEN_MEM;
- EMIT6_DISP_LH(0xe3000000, 0x0004, dst_reg, src_reg, REG_0, off);
+ EMIT6_DISP_LH(0xe3000000, 0x0004, dst_reg, src_reg,
+ probe.arena_reg, off);
+ err = bpf_jit_probe_post(jit, fp, &probe);
+ if (err < 0)
+ return err;
break;
/*
* BPF_JMP / CALL
@@ -1647,7 +1863,7 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
/*
* Restore registers before calling function
*/
- save_restore_regs(jit, REGS_RESTORE, stack_depth);
+ save_restore_regs(jit, REGS_RESTORE, stack_depth, 0);
/*
* goto *(prog->bpf_func + tail_call_start);
@@ -1897,22 +2113,6 @@ branch_oc:
return -1;
}
- if (probe_prg != -1) {
- /*
- * Handlers of certain exceptions leave psw.addr pointing to
- * the instruction directly after the failing one. Therefore,
- * create two exception table entries and also add a nop in
- * case two probing instructions come directly after each
- * other.
- */
- nop_prg = jit->prg;
- /* bcr 0,%0 */
- _EMIT2(0x0700);
- err = bpf_jit_probe_mem(jit, fp, probe_prg, nop_prg);
- if (err < 0)
- return err;
- }
-
return insn_count;
}
@@ -1958,12 +2158,18 @@ static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp,
bool extra_pass, u32 stack_depth)
{
int i, insn_count, lit32_size, lit64_size;
+ u64 kern_arena;
jit->lit32 = jit->lit32_start;
jit->lit64 = jit->lit64_start;
jit->prg = 0;
jit->excnt = 0;
+ kern_arena = bpf_arena_get_kern_vm_start(fp->aux->arena);
+ if (kern_arena)
+ jit->kern_arena = _EMIT_CONST_U64(kern_arena);
+ jit->user_arena = bpf_arena_get_user_vm_start(fp->aux->arena);
+
bpf_jit_prologue(jit, fp, stack_depth);
if (bpf_set_addr(jit, 0) < 0)
return -1;
@@ -2011,9 +2217,25 @@ static struct bpf_binary_header *bpf_jit_alloc(struct bpf_jit *jit,
struct bpf_prog *fp)
{
struct bpf_binary_header *header;
+ struct bpf_insn *insn;
u32 extable_size;
u32 code_size;
+ int i;
+
+ for (i = 0; i < fp->len; i++) {
+ insn = &fp->insnsi[i];
+ if (BPF_CLASS(insn->code) == BPF_STX &&
+ BPF_MODE(insn->code) == BPF_PROBE_ATOMIC &&
+ (BPF_SIZE(insn->code) == BPF_DW ||
+ BPF_SIZE(insn->code) == BPF_W) &&
+ insn->imm == BPF_XCHG)
+ /*
+ * bpf_jit_insn() emits a load and a compare-and-swap,
+ * both of which need to be probed.
+ */
+ fp->aux->num_exentries += 1;
+ }
/* We need two entries per insn. */
fp->aux->num_exentries *= 2;
@@ -2689,3 +2911,52 @@ bool bpf_jit_supports_subprog_tailcalls(void)
{
return true;
}
+
+bool bpf_jit_supports_arena(void)
+{
+ return true;
+}
+
+bool bpf_jit_supports_insn(struct bpf_insn *insn, bool in_arena)
+{
+ /*
+ * Currently the verifier uses this function only to check which
+ * atomic stores to arena are supported, and they all are.
+ */
+ return true;
+}
+
+bool bpf_jit_supports_exceptions(void)
+{
+ /*
+ * Exceptions require unwinding support, which is always available,
+ * because the kernel is always built with backchain.
+ */
+ return true;
+}
+
+void arch_bpf_stack_walk(bool (*consume_fn)(void *, u64, u64, u64),
+ void *cookie)
+{
+ unsigned long addr, prev_addr = 0;
+ struct unwind_state state;
+
+ unwind_for_each_frame(&state, NULL, NULL, 0) {
+ addr = unwind_get_return_address(&state);
+ if (!addr)
+ break;
+ /*
+ * addr is a return address and state.sp is the value of %r15
+ * at this address. exception_cb needs %r15 at entry to the
+ * function containing addr, so take the next state.sp.
+ *
+ * There is no bp, and the exception_cb prog does not need one
+ * to perform a quasi-longjmp. The common code requires a
+ * non-zero bp, so pass sp there as well.
+ */
+ if (prev_addr && !consume_fn(cookie, prev_addr, state.sp,
+ state.sp))
+ break;
+ prev_addr = addr;
+ }
+}
diff --git a/arch/s390/pci/pci_irq.c b/arch/s390/pci/pci_irq.c
index ff8f24854c64..0ef83b6ac0db 100644
--- a/arch/s390/pci/pci_irq.c
+++ b/arch/s390/pci/pci_irq.c
@@ -410,7 +410,7 @@ static void __init cpu_enable_directed_irq(void *unused)
union zpci_sic_iib iib = {{0}};
union zpci_sic_iib ziib = {{0}};
- iib.cdiib.dibv_addr = (u64) zpci_ibv[smp_processor_id()]->vector;
+ iib.cdiib.dibv_addr = virt_to_phys(zpci_ibv[smp_processor_id()]->vector);
zpci_set_irq_ctrl(SIC_IRQ_MODE_SET_CPU, 0, &iib);
zpci_set_irq_ctrl(SIC_IRQ_MODE_D_SINGLE, PCI_ISC, &ziib);
diff --git a/arch/sh/boards/board-sh7757lcr.c b/arch/sh/boards/board-sh7757lcr.c
index 689ea14a6678..bca54e489e11 100644
--- a/arch/sh/boards/board-sh7757lcr.c
+++ b/arch/sh/boards/board-sh7757lcr.c
@@ -14,9 +14,9 @@
#include <linux/spi/spi.h>
#include <linux/spi/flash.h>
#include <linux/io.h>
-#include <linux/mfd/tmio.h>
#include <linux/mmc/host.h>
#include <linux/platform_data/sh_mmcif.h>
+#include <linux/platform_data/tmio.h>
#include <linux/sh_eth.h>
#include <linux/sh_intc.h>
#include <linux/usb/renesas_usbhs.h>
diff --git a/arch/sh/boards/mach-ap325rxa/setup.c b/arch/sh/boards/mach-ap325rxa/setup.c
index 645cccf3da88..bb5004a8ac02 100644
--- a/arch/sh/boards/mach-ap325rxa/setup.c
+++ b/arch/sh/boards/mach-ap325rxa/setup.c
@@ -24,10 +24,10 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/memblock.h>
-#include <linux/mfd/tmio.h>
#include <linux/mmc/host.h>
#include <linux/mtd/physmap.h>
#include <linux/mtd/sh_flctl.h>
+#include <linux/platform_data/tmio.h>
#include <linux/platform_device.h>
#include <linux/regulator/fixed.h>
#include <linux/regulator/machine.h>
diff --git a/arch/sh/boards/mach-ecovec24/setup.c b/arch/sh/boards/mach-ecovec24/setup.c
index 30d117f9ad7e..6f13557eecd6 100644
--- a/arch/sh/boards/mach-ecovec24/setup.c
+++ b/arch/sh/boards/mach-ecovec24/setup.c
@@ -17,13 +17,13 @@
#include <linux/input/sh_keysc.h>
#include <linux/interrupt.h>
#include <linux/memblock.h>
-#include <linux/mfd/tmio.h>
#include <linux/mmc/host.h>
#include <linux/platform_data/sh_mmcif.h>
#include <linux/mtd/physmap.h>
#include <linux/gpio.h>
#include <linux/gpio/machine.h>
#include <linux/platform_data/gpio_backlight.h>
+#include <linux/platform_data/tmio.h>
#include <linux/platform_data/tsc2007.h>
#include <linux/platform_device.h>
#include <linux/regulator/fixed.h>
diff --git a/arch/sh/boards/mach-kfr2r09/setup.c b/arch/sh/boards/mach-kfr2r09/setup.c
index 6b775eae85c0..70236859919d 100644
--- a/arch/sh/boards/mach-kfr2r09/setup.c
+++ b/arch/sh/boards/mach-kfr2r09/setup.c
@@ -22,10 +22,10 @@
#include <linux/input/sh_keysc.h>
#include <linux/interrupt.h>
#include <linux/memblock.h>
-#include <linux/mfd/tmio.h>
#include <linux/mmc/host.h>
#include <linux/mtd/physmap.h>
#include <linux/platform_data/lv5207lp.h>
+#include <linux/platform_data/tmio.h>
#include <linux/platform_device.h>
#include <linux/regulator/fixed.h>
#include <linux/regulator/machine.h>
diff --git a/arch/sh/boards/mach-migor/setup.c b/arch/sh/boards/mach-migor/setup.c
index 773ee767d0c4..1853e6319a66 100644
--- a/arch/sh/boards/mach-migor/setup.c
+++ b/arch/sh/boards/mach-migor/setup.c
@@ -7,6 +7,7 @@
#include <linux/clkdev.h>
#include <linux/dma-map-ops.h>
#include <linux/init.h>
+#include <linux/platform_data/tmio.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/input.h>
@@ -14,7 +15,6 @@
#include <linux/memblock.h>
#include <linux/mmc/host.h>
#include <linux/mtd/physmap.h>
-#include <linux/mfd/tmio.h>
#include <linux/mtd/platnand.h>
#include <linux/i2c.h>
#include <linux/regulator/fixed.h>
diff --git a/arch/sh/boards/mach-se/7724/setup.c b/arch/sh/boards/mach-se/7724/setup.c
index 787ddd3c627a..e500feb91053 100644
--- a/arch/sh/boards/mach-se/7724/setup.c
+++ b/arch/sh/boards/mach-se/7724/setup.c
@@ -21,9 +21,9 @@
#include <linux/input/sh_keysc.h>
#include <linux/interrupt.h>
#include <linux/memblock.h>
-#include <linux/mfd/tmio.h>
#include <linux/mmc/host.h>
#include <linux/mtd/physmap.h>
+#include <linux/platform_data/tmio.h>
#include <linux/platform_device.h>
#include <linux/regulator/fixed.h>
#include <linux/regulator/machine.h>
diff --git a/arch/sh/include/asm/unistd.h b/arch/sh/include/asm/unistd.h
index d6e126250136..3d7f35650a13 100644
--- a/arch/sh/include/asm/unistd.h
+++ b/arch/sh/include/asm/unistd.h
@@ -28,4 +28,6 @@
# define __ARCH_WANT_SYS_VFORK
# define __ARCH_WANT_SYS_CLONE
+#define __ARCH_BROKEN_SYS_CLONE3
+
#include <uapi/asm/unistd.h>
diff --git a/arch/sh/kernel/sys_sh32.c b/arch/sh/kernel/sys_sh32.c
index 9dca568509a5..d6f4afcb0e87 100644
--- a/arch/sh/kernel/sys_sh32.c
+++ b/arch/sh/kernel/sys_sh32.c
@@ -59,3 +59,14 @@ asmlinkage int sys_fadvise64_64_wrapper(int fd, u32 offset0, u32 offset1,
(u64)len0 << 32 | len1, advice);
#endif
}
+
+/*
+ * swap the arguments the way that libc wants them instead of
+ * moving flags ahead of the 64-bit nbytes argument
+ */
+SYSCALL_DEFINE6(sh_sync_file_range6, int, fd, SC_ARG64(offset),
+ SC_ARG64(nbytes), unsigned int, flags)
+{
+ return ksys_sync_file_range(fd, SC_VAL64(loff_t, offset),
+ SC_VAL64(loff_t, nbytes), flags);
+}
diff --git a/arch/sh/kernel/syscalls/syscall.tbl b/arch/sh/kernel/syscalls/syscall.tbl
index bbf83a2db986..c55fd7696d40 100644
--- a/arch/sh/kernel/syscalls/syscall.tbl
+++ b/arch/sh/kernel/syscalls/syscall.tbl
@@ -321,7 +321,7 @@
311 common set_robust_list sys_set_robust_list
312 common get_robust_list sys_get_robust_list
313 common splice sys_splice
-314 common sync_file_range sys_sync_file_range
+314 common sync_file_range sys_sh_sync_file_range6
315 common tee sys_tee
316 common vmsplice sys_vmsplice
317 common move_pages sys_move_pages
@@ -395,6 +395,7 @@
385 common pkey_alloc sys_pkey_alloc
386 common pkey_free sys_pkey_free
387 common rseq sys_rseq
+388 common sync_file_range2 sys_sync_file_range2
# room for arch specific syscalls
393 common semget sys_semget
394 common semctl sys_semctl
diff --git a/arch/sparc/include/asm/unistd.h b/arch/sparc/include/asm/unistd.h
index d6bc76706a7a..3380411a4537 100644
--- a/arch/sparc/include/asm/unistd.h
+++ b/arch/sparc/include/asm/unistd.h
@@ -49,6 +49,8 @@
#define __ARCH_WANT_COMPAT_STAT
#endif
+#define __ARCH_BROKEN_SYS_CLONE3
+
#ifdef __32bit_syscall_numbers__
/* Sparc 32-bit only has the "setresuid32", "getresuid32" variants,
* it never had the plain ones and there is no value to adding those
diff --git a/arch/sparc/kernel/sys32.S b/arch/sparc/kernel/sys32.S
index a45f0f31fe51..a3d308f2043e 100644
--- a/arch/sparc/kernel/sys32.S
+++ b/arch/sparc/kernel/sys32.S
@@ -18,224 +18,3 @@ sys32_mmap2:
sethi %hi(sys_mmap), %g1
jmpl %g1 + %lo(sys_mmap), %g0
sllx %o5, 12, %o5
-
- .align 32
- .globl sys32_socketcall
-sys32_socketcall: /* %o0=call, %o1=args */
- cmp %o0, 1
- bl,pn %xcc, do_einval
- cmp %o0, 18
- bg,pn %xcc, do_einval
- sub %o0, 1, %o0
- sllx %o0, 5, %o0
- sethi %hi(__socketcall_table_begin), %g2
- or %g2, %lo(__socketcall_table_begin), %g2
- jmpl %g2 + %o0, %g0
- nop
-do_einval:
- retl
- mov -EINVAL, %o0
-
- .align 32
-__socketcall_table_begin:
-
- /* Each entry is exactly 32 bytes. */
-do_sys_socket: /* sys_socket(int, int, int) */
-1: ldswa [%o1 + 0x0] %asi, %o0
- sethi %hi(sys_socket), %g1
-2: ldswa [%o1 + 0x8] %asi, %o2
- jmpl %g1 + %lo(sys_socket), %g0
-3: ldswa [%o1 + 0x4] %asi, %o1
- nop
- nop
- nop
-do_sys_bind: /* sys_bind(int fd, struct sockaddr *, int) */
-4: ldswa [%o1 + 0x0] %asi, %o0
- sethi %hi(sys_bind), %g1
-5: ldswa [%o1 + 0x8] %asi, %o2
- jmpl %g1 + %lo(sys_bind), %g0
-6: lduwa [%o1 + 0x4] %asi, %o1
- nop
- nop
- nop
-do_sys_connect: /* sys_connect(int, struct sockaddr *, int) */
-7: ldswa [%o1 + 0x0] %asi, %o0
- sethi %hi(sys_connect), %g1
-8: ldswa [%o1 + 0x8] %asi, %o2
- jmpl %g1 + %lo(sys_connect), %g0
-9: lduwa [%o1 + 0x4] %asi, %o1
- nop
- nop
- nop
-do_sys_listen: /* sys_listen(int, int) */
-10: ldswa [%o1 + 0x0] %asi, %o0
- sethi %hi(sys_listen), %g1
- jmpl %g1 + %lo(sys_listen), %g0
-11: ldswa [%o1 + 0x4] %asi, %o1
- nop
- nop
- nop
- nop
-do_sys_accept: /* sys_accept(int, struct sockaddr *, int *) */
-12: ldswa [%o1 + 0x0] %asi, %o0
- sethi %hi(sys_accept), %g1
-13: lduwa [%o1 + 0x8] %asi, %o2
- jmpl %g1 + %lo(sys_accept), %g0
-14: lduwa [%o1 + 0x4] %asi, %o1
- nop
- nop
- nop
-do_sys_getsockname: /* sys_getsockname(int, struct sockaddr *, int *) */
-15: ldswa [%o1 + 0x0] %asi, %o0
- sethi %hi(sys_getsockname), %g1
-16: lduwa [%o1 + 0x8] %asi, %o2
- jmpl %g1 + %lo(sys_getsockname), %g0
-17: lduwa [%o1 + 0x4] %asi, %o1
- nop
- nop
- nop
-do_sys_getpeername: /* sys_getpeername(int, struct sockaddr *, int *) */
-18: ldswa [%o1 + 0x0] %asi, %o0
- sethi %hi(sys_getpeername), %g1
-19: lduwa [%o1 + 0x8] %asi, %o2
- jmpl %g1 + %lo(sys_getpeername), %g0
-20: lduwa [%o1 + 0x4] %asi, %o1
- nop
- nop
- nop
-do_sys_socketpair: /* sys_socketpair(int, int, int, int *) */
-21: ldswa [%o1 + 0x0] %asi, %o0
- sethi %hi(sys_socketpair), %g1
-22: ldswa [%o1 + 0x8] %asi, %o2
-23: lduwa [%o1 + 0xc] %asi, %o3
- jmpl %g1 + %lo(sys_socketpair), %g0
-24: ldswa [%o1 + 0x4] %asi, %o1
- nop
- nop
-do_sys_send: /* sys_send(int, void *, size_t, unsigned int) */
-25: ldswa [%o1 + 0x0] %asi, %o0
- sethi %hi(sys_send), %g1
-26: lduwa [%o1 + 0x8] %asi, %o2
-27: lduwa [%o1 + 0xc] %asi, %o3
- jmpl %g1 + %lo(sys_send), %g0
-28: lduwa [%o1 + 0x4] %asi, %o1
- nop
- nop
-do_sys_recv: /* sys_recv(int, void *, size_t, unsigned int) */
-29: ldswa [%o1 + 0x0] %asi, %o0
- sethi %hi(sys_recv), %g1
-30: lduwa [%o1 + 0x8] %asi, %o2
-31: lduwa [%o1 + 0xc] %asi, %o3
- jmpl %g1 + %lo(sys_recv), %g0
-32: lduwa [%o1 + 0x4] %asi, %o1
- nop
- nop
-do_sys_sendto: /* sys_sendto(int, u32, compat_size_t, unsigned int, u32, int) */
-33: ldswa [%o1 + 0x0] %asi, %o0
- sethi %hi(sys_sendto), %g1
-34: lduwa [%o1 + 0x8] %asi, %o2
-35: lduwa [%o1 + 0xc] %asi, %o3
-36: lduwa [%o1 + 0x10] %asi, %o4
-37: ldswa [%o1 + 0x14] %asi, %o5
- jmpl %g1 + %lo(sys_sendto), %g0
-38: lduwa [%o1 + 0x4] %asi, %o1
-do_sys_recvfrom: /* sys_recvfrom(int, u32, compat_size_t, unsigned int, u32, u32) */
-39: ldswa [%o1 + 0x0] %asi, %o0
- sethi %hi(sys_recvfrom), %g1
-40: lduwa [%o1 + 0x8] %asi, %o2
-41: lduwa [%o1 + 0xc] %asi, %o3
-42: lduwa [%o1 + 0x10] %asi, %o4
-43: lduwa [%o1 + 0x14] %asi, %o5
- jmpl %g1 + %lo(sys_recvfrom), %g0
-44: lduwa [%o1 + 0x4] %asi, %o1
-do_sys_shutdown: /* sys_shutdown(int, int) */
-45: ldswa [%o1 + 0x0] %asi, %o0
- sethi %hi(sys_shutdown), %g1
- jmpl %g1 + %lo(sys_shutdown), %g0
-46: ldswa [%o1 + 0x4] %asi, %o1
- nop
- nop
- nop
- nop
-do_sys_setsockopt: /* sys_setsockopt(int, int, int, char *, int) */
-47: ldswa [%o1 + 0x0] %asi, %o0
- sethi %hi(sys_setsockopt), %g1
-48: ldswa [%o1 + 0x8] %asi, %o2
-49: lduwa [%o1 + 0xc] %asi, %o3
-50: ldswa [%o1 + 0x10] %asi, %o4
- jmpl %g1 + %lo(sys_setsockopt), %g0
-51: ldswa [%o1 + 0x4] %asi, %o1
- nop
-do_sys_getsockopt: /* sys_getsockopt(int, int, int, u32, u32) */
-52: ldswa [%o1 + 0x0] %asi, %o0
- sethi %hi(sys_getsockopt), %g1
-53: ldswa [%o1 + 0x8] %asi, %o2
-54: lduwa [%o1 + 0xc] %asi, %o3
-55: lduwa [%o1 + 0x10] %asi, %o4
- jmpl %g1 + %lo(sys_getsockopt), %g0
-56: ldswa [%o1 + 0x4] %asi, %o1
- nop
-do_sys_sendmsg: /* compat_sys_sendmsg(int, struct compat_msghdr *, unsigned int) */
-57: ldswa [%o1 + 0x0] %asi, %o0
- sethi %hi(compat_sys_sendmsg), %g1
-58: lduwa [%o1 + 0x8] %asi, %o2
- jmpl %g1 + %lo(compat_sys_sendmsg), %g0
-59: lduwa [%o1 + 0x4] %asi, %o1
- nop
- nop
- nop
-do_sys_recvmsg: /* compat_sys_recvmsg(int, struct compat_msghdr *, unsigned int) */
-60: ldswa [%o1 + 0x0] %asi, %o0
- sethi %hi(compat_sys_recvmsg), %g1
-61: lduwa [%o1 + 0x8] %asi, %o2
- jmpl %g1 + %lo(compat_sys_recvmsg), %g0
-62: lduwa [%o1 + 0x4] %asi, %o1
- nop
- nop
- nop
-do_sys_accept4: /* sys_accept4(int, struct sockaddr *, int *, int) */
-63: ldswa [%o1 + 0x0] %asi, %o0
- sethi %hi(sys_accept4), %g1
-64: lduwa [%o1 + 0x8] %asi, %o2
-65: ldswa [%o1 + 0xc] %asi, %o3
- jmpl %g1 + %lo(sys_accept4), %g0
-66: lduwa [%o1 + 0x4] %asi, %o1
- nop
- nop
-
- .section __ex_table,"a"
- .align 4
- .word 1b, __retl_efault, 2b, __retl_efault
- .word 3b, __retl_efault, 4b, __retl_efault
- .word 5b, __retl_efault, 6b, __retl_efault
- .word 7b, __retl_efault, 8b, __retl_efault
- .word 9b, __retl_efault, 10b, __retl_efault
- .word 11b, __retl_efault, 12b, __retl_efault
- .word 13b, __retl_efault, 14b, __retl_efault
- .word 15b, __retl_efault, 16b, __retl_efault
- .word 17b, __retl_efault, 18b, __retl_efault
- .word 19b, __retl_efault, 20b, __retl_efault
- .word 21b, __retl_efault, 22b, __retl_efault
- .word 23b, __retl_efault, 24b, __retl_efault
- .word 25b, __retl_efault, 26b, __retl_efault
- .word 27b, __retl_efault, 28b, __retl_efault
- .word 29b, __retl_efault, 30b, __retl_efault
- .word 31b, __retl_efault, 32b, __retl_efault
- .word 33b, __retl_efault, 34b, __retl_efault
- .word 35b, __retl_efault, 36b, __retl_efault
- .word 37b, __retl_efault, 38b, __retl_efault
- .word 39b, __retl_efault, 40b, __retl_efault
- .word 41b, __retl_efault, 42b, __retl_efault
- .word 43b, __retl_efault, 44b, __retl_efault
- .word 45b, __retl_efault, 46b, __retl_efault
- .word 47b, __retl_efault, 48b, __retl_efault
- .word 49b, __retl_efault, 50b, __retl_efault
- .word 51b, __retl_efault, 52b, __retl_efault
- .word 53b, __retl_efault, 54b, __retl_efault
- .word 55b, __retl_efault, 56b, __retl_efault
- .word 57b, __retl_efault, 58b, __retl_efault
- .word 59b, __retl_efault, 60b, __retl_efault
- .word 61b, __retl_efault, 62b, __retl_efault
- .word 63b, __retl_efault, 64b, __retl_efault
- .word 65b, __retl_efault, 66b, __retl_efault
- .previous
diff --git a/arch/sparc/kernel/syscalls/syscall.tbl b/arch/sparc/kernel/syscalls/syscall.tbl
index ac6c281ccfe0..cfdfb3707c16 100644
--- a/arch/sparc/kernel/syscalls/syscall.tbl
+++ b/arch/sparc/kernel/syscalls/syscall.tbl
@@ -117,7 +117,7 @@
90 common dup2 sys_dup2
91 32 setfsuid32 sys_setfsuid
92 common fcntl sys_fcntl compat_sys_fcntl
-93 common select sys_select
+93 common select sys_select compat_sys_select
94 32 setfsgid32 sys_setfsgid
95 common fsync sys_fsync
96 common setpriority sys_setpriority
@@ -155,7 +155,7 @@
123 32 fchown sys_fchown16
123 64 fchown sys_fchown
124 common fchmod sys_fchmod
-125 common recvfrom sys_recvfrom
+125 common recvfrom sys_recvfrom compat_sys_recvfrom
126 32 setreuid sys_setreuid16
126 64 setreuid sys_setreuid
127 32 setregid sys_setregid16
@@ -247,7 +247,7 @@
204 32 readdir sys_old_readdir compat_sys_old_readdir
204 64 readdir sys_nis_syscall
205 common readahead sys_readahead compat_sys_readahead
-206 common socketcall sys_socketcall sys32_socketcall
+206 common socketcall sys_socketcall compat_sys_socketcall
207 common syslog sys_syslog
208 common lookup_dcookie sys_ni_syscall
209 common fadvise64 sys_fadvise64 compat_sys_fadvise64
@@ -461,7 +461,7 @@
412 32 utimensat_time64 sys_utimensat sys_utimensat
413 32 pselect6_time64 sys_pselect6 compat_sys_pselect6_time64
414 32 ppoll_time64 sys_ppoll compat_sys_ppoll_time64
-416 32 io_pgetevents_time64 sys_io_pgetevents sys_io_pgetevents
+416 32 io_pgetevents_time64 sys_io_pgetevents compat_sys_io_pgetevents_time64
417 32 recvmmsg_time64 sys_recvmmsg compat_sys_recvmmsg_time64
418 32 mq_timedsend_time64 sys_mq_timedsend sys_mq_timedsend
419 32 mq_timedreceive_time64 sys_mq_timedreceive sys_mq_timedreceive
diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
index ef805eaa9e01..9f1e76ddda5a 100644
--- a/arch/um/drivers/ubd_kern.c
+++ b/arch/um/drivers/ubd_kern.c
@@ -447,43 +447,31 @@ static int bulk_req_safe_read(
return n;
}
-/* Called without dev->lock held, and only in interrupt context. */
-static void ubd_handler(void)
+static void ubd_end_request(struct io_thread_req *io_req)
{
- int n;
- int count;
-
- while(1){
- n = bulk_req_safe_read(
- thread_fd,
- irq_req_buffer,
- &irq_remainder,
- &irq_remainder_size,
- UBD_REQ_BUFFER_SIZE
- );
- if (n < 0) {
- if(n == -EAGAIN)
- break;
- printk(KERN_ERR "spurious interrupt in ubd_handler, "
- "err = %d\n", -n);
- return;
- }
- for (count = 0; count < n/sizeof(struct io_thread_req *); count++) {
- struct io_thread_req *io_req = (*irq_req_buffer)[count];
-
- if ((io_req->error == BLK_STS_NOTSUPP) && (req_op(io_req->req) == REQ_OP_DISCARD)) {
- blk_queue_max_discard_sectors(io_req->req->q, 0);
- blk_queue_max_write_zeroes_sectors(io_req->req->q, 0);
- }
- blk_mq_end_request(io_req->req, io_req->error);
- kfree(io_req);
- }
+ if (io_req->error == BLK_STS_NOTSUPP) {
+ if (req_op(io_req->req) == REQ_OP_DISCARD)
+ blk_queue_disable_discard(io_req->req->q);
+ else if (req_op(io_req->req) == REQ_OP_WRITE_ZEROES)
+ blk_queue_disable_write_zeroes(io_req->req->q);
}
+ blk_mq_end_request(io_req->req, io_req->error);
+ kfree(io_req);
}
static irqreturn_t ubd_intr(int irq, void *dev)
{
- ubd_handler();
+ int len, i;
+
+ while ((len = bulk_req_safe_read(thread_fd, irq_req_buffer,
+ &irq_remainder, &irq_remainder_size,
+ UBD_REQ_BUFFER_SIZE)) >= 0) {
+ for (i = 0; i < len / sizeof(struct io_thread_req *); i++)
+ ubd_end_request((*irq_req_buffer)[i]);
+ }
+
+ if (len < 0 && len != -EAGAIN)
+ pr_err("spurious interrupt in %s, err = %d\n", __func__, len);
return IRQ_HANDLED;
}
@@ -847,6 +835,7 @@ static int ubd_add(int n, char **error_out)
struct queue_limits lim = {
.max_segments = MAX_SG,
.seg_boundary_mask = PAGE_SIZE - 1,
+ .features = BLK_FEAT_WRITE_CACHE,
};
struct gendisk *disk;
int err = 0;
@@ -893,8 +882,6 @@ static int ubd_add(int n, char **error_out)
goto out_cleanup_tags;
}
- blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue);
- blk_queue_write_cache(disk->queue, true, false);
disk->major = UBD_MAJOR;
disk->first_minor = n << UBD_SHIFT;
disk->minors = 1 << UBD_SHIFT;
diff --git a/arch/um/include/asm/Kbuild b/arch/um/include/asm/Kbuild
index 6fe34779291a..18f902da8e99 100644
--- a/arch/um/include/asm/Kbuild
+++ b/arch/um/include/asm/Kbuild
@@ -1,5 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
-generic-y += bpf_perf_event.h
generic-y += bug.h
generic-y += compat.h
generic-y += current.h
@@ -20,6 +19,7 @@ generic-y += param.h
generic-y += parport.h
generic-y += percpu.h
generic-y += preempt.h
+generic-y += runtime-const.h
generic-y += softirq_stack.h
generic-y += switch_to.h
generic-y += topology.h
diff --git a/arch/um/include/asm/bpf_perf_event.h b/arch/um/include/asm/bpf_perf_event.h
new file mode 100644
index 000000000000..287221342d2c
--- /dev/null
+++ b/arch/um/include/asm/bpf_perf_event.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * asm-generic/bpf_perf_event.h is part of the uapi headers, but since
+ * arch/um has no uapi of its on, we can't use the "generic-y"
+ * Kbuild rule to generate the wrapper
+ */
+
+#include <asm-generic/bpf_perf_event.h>
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 1d7122a1883e..7cd41bbaf875 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -1118,6 +1118,13 @@ config X86_LOCAL_APIC
depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_APIC || PCI_MSI
select IRQ_DOMAIN_HIERARCHY
+config ACPI_MADT_WAKEUP
+ def_bool y
+ depends on X86_64
+ depends on ACPI
+ depends on SMP
+ depends on X86_LOCAL_APIC
+
config X86_IO_APIC
def_bool y
depends on X86_LOCAL_APIC || X86_UP_IOAPIC
@@ -2038,26 +2045,6 @@ config EFI_MIXED
If unsure, say N.
-config EFI_FAKE_MEMMAP
- bool "Enable EFI fake memory map"
- depends on EFI
- help
- Saying Y here will enable "efi_fake_mem" boot option. By specifying
- this parameter, you can add arbitrary attribute to specific memory
- range by updating original (firmware provided) EFI memmap. This is
- useful for debugging of EFI memmap related feature, e.g., Address
- Range Mirroring feature.
-
-config EFI_MAX_FAKE_MEM
- int "maximum allowable number of ranges in efi_fake_mem boot option"
- depends on EFI_FAKE_MEMMAP
- range 1 128
- default 8
- help
- Maximum allowable number of ranges in efi_fake_mem boot option.
- Ranges can be set up to this value using comma-separated list.
- The default value is 8.
-
config EFI_RUNTIME_MAP
bool "Export EFI runtime maps to sysfs" if EXPERT
depends on EFI
@@ -2427,6 +2414,15 @@ config STRICT_SIGALTSTACK_SIZE
Say 'N' unless you want to really enforce this check.
+config CFI_AUTO_DEFAULT
+ bool "Attempt to use FineIBT by default at boot time"
+ depends on FINEIBT
+ default y
+ help
+ Attempt to use FineIBT by default at boot time. If enabled,
+ this is the same as booting with "cfi=auto". If disabled,
+ this is the same as booting with "cfi=kcfi".
+
source "kernel/livepatch/Kconfig"
endmenu
diff --git a/arch/x86/Kconfig.assembler b/arch/x86/Kconfig.assembler
index 59aedf32c4ea..6d20a6ce0507 100644
--- a/arch/x86/Kconfig.assembler
+++ b/arch/x86/Kconfig.assembler
@@ -36,6 +36,6 @@ config AS_VPCLMULQDQ
Supported by binutils >= 2.30 and LLVM integrated assembler
config AS_WRUSS
- def_bool $(as-instr,wrussq %rax$(comma)(%rbx))
+ def_bool $(as-instr64,wrussq %rax$(comma)(%rbx))
help
Supported by binutils >= 2.31 and LLVM integrated assembler
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index 243ee86cb1b1..f2051644de94 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -105,9 +105,9 @@ vmlinux-objs-$(CONFIG_UNACCEPTED_MEMORY) += $(obj)/mem.o
vmlinux-objs-$(CONFIG_EFI) += $(obj)/efi.o
vmlinux-objs-$(CONFIG_EFI_MIXED) += $(obj)/efi_mixed.o
-vmlinux-objs-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a
+vmlinux-libs-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a
-$(obj)/vmlinux: $(vmlinux-objs-y) FORCE
+$(obj)/vmlinux: $(vmlinux-objs-y) $(vmlinux-libs-y) FORCE
$(call if_changed,ld)
OBJCOPYFLAGS_vmlinux.bin := -R .comment -S
diff --git a/arch/x86/boot/compressed/kaslr.c b/arch/x86/boot/compressed/kaslr.c
index dec961c6d16a..f4d82379bf44 100644
--- a/arch/x86/boot/compressed/kaslr.c
+++ b/arch/x86/boot/compressed/kaslr.c
@@ -119,13 +119,8 @@ char *skip_spaces(const char *str)
#include "../../../../lib/ctype.c"
#include "../../../../lib/cmdline.c"
-enum parse_mode {
- PARSE_MEMMAP,
- PARSE_EFI,
-};
-
static int
-parse_memmap(char *p, u64 *start, u64 *size, enum parse_mode mode)
+parse_memmap(char *p, u64 *start, u64 *size)
{
char *oldp;
@@ -148,29 +143,11 @@ parse_memmap(char *p, u64 *start, u64 *size, enum parse_mode mode)
*start = memparse(p + 1, &p);
return 0;
case '@':
- if (mode == PARSE_MEMMAP) {
- /*
- * memmap=nn@ss specifies usable region, should
- * be skipped
- */
- *size = 0;
- } else {
- u64 flags;
-
- /*
- * efi_fake_mem=nn@ss:attr the attr specifies
- * flags that might imply a soft-reservation.
- */
- *start = memparse(p + 1, &p);
- if (p && *p == ':') {
- p++;
- if (kstrtoull(p, 0, &flags) < 0)
- *size = 0;
- else if (flags & EFI_MEMORY_SP)
- return 0;
- }
- *size = 0;
- }
+ /*
+ * memmap=nn@ss specifies usable region, should
+ * be skipped
+ */
+ *size = 0;
fallthrough;
default:
/*
@@ -185,7 +162,7 @@ parse_memmap(char *p, u64 *start, u64 *size, enum parse_mode mode)
return -EINVAL;
}
-static void mem_avoid_memmap(enum parse_mode mode, char *str)
+static void mem_avoid_memmap(char *str)
{
static int i;
@@ -200,7 +177,7 @@ static void mem_avoid_memmap(enum parse_mode mode, char *str)
if (k)
*k++ = 0;
- rc = parse_memmap(str, &start, &size, mode);
+ rc = parse_memmap(str, &start, &size);
if (rc < 0)
break;
str = k;
@@ -281,7 +258,7 @@ static void handle_mem_options(void)
break;
if (!strcmp(param, "memmap")) {
- mem_avoid_memmap(PARSE_MEMMAP, val);
+ mem_avoid_memmap(val);
} else if (IS_ENABLED(CONFIG_X86_64) && strstr(param, "hugepages")) {
parse_gb_huge_pages(param, val);
} else if (!strcmp(param, "mem")) {
@@ -295,8 +272,6 @@ static void handle_mem_options(void)
if (mem_size < mem_limit)
mem_limit = mem_size;
- } else if (!strcmp(param, "efi_fake_mem")) {
- mem_avoid_memmap(PARSE_EFI, val);
}
}
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
index b70e4a21c15f..944454306ef4 100644
--- a/arch/x86/boot/compressed/misc.c
+++ b/arch/x86/boot/compressed/misc.c
@@ -531,8 +531,3 @@ asmlinkage __visible void *extract_kernel(void *rmode, unsigned char *output)
return output + entry_offset;
}
-
-void __fortify_panic(const u8 reason, size_t avail, size_t size)
-{
- error("detected buffer overflow");
-}
diff --git a/arch/x86/boot/compressed/sev.c b/arch/x86/boot/compressed/sev.c
index 0457a9d7e515..cd44e120fe53 100644
--- a/arch/x86/boot/compressed/sev.c
+++ b/arch/x86/boot/compressed/sev.c
@@ -127,7 +127,35 @@ static bool fault_in_kernel_space(unsigned long address)
#include "../../lib/insn.c"
/* Include code for early handlers */
-#include "../../kernel/sev-shared.c"
+#include "../../coco/sev/shared.c"
+
+static struct svsm_ca *svsm_get_caa(void)
+{
+ return boot_svsm_caa;
+}
+
+static u64 svsm_get_caa_pa(void)
+{
+ return boot_svsm_caa_pa;
+}
+
+static int svsm_perform_call_protocol(struct svsm_call *call)
+{
+ struct ghcb *ghcb;
+ int ret;
+
+ if (boot_ghcb)
+ ghcb = boot_ghcb;
+ else
+ ghcb = NULL;
+
+ do {
+ ret = ghcb ? svsm_perform_ghcb_protocol(ghcb, call)
+ : svsm_perform_msr_protocol(call);
+ } while (ret == -EAGAIN);
+
+ return ret;
+}
bool sev_snp_enabled(void)
{
@@ -145,8 +173,8 @@ static void __page_state_change(unsigned long paddr, enum psc_op op)
* If private -> shared then invalidate the page before requesting the
* state change in the RMP table.
*/
- if (op == SNP_PAGE_STATE_SHARED && pvalidate(paddr, RMP_PG_SIZE_4K, 0))
- sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PVALIDATE);
+ if (op == SNP_PAGE_STATE_SHARED)
+ pvalidate_4k_page(paddr, paddr, false);
/* Issue VMGEXIT to change the page state in RMP table. */
sev_es_wr_ghcb_msr(GHCB_MSR_PSC_REQ_GFN(paddr >> PAGE_SHIFT, op));
@@ -161,8 +189,8 @@ static void __page_state_change(unsigned long paddr, enum psc_op op)
* Now that page state is changed in the RMP table, validate it so that it is
* consistent with the RMP entry.
*/
- if (op == SNP_PAGE_STATE_PRIVATE && pvalidate(paddr, RMP_PG_SIZE_4K, 1))
- sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PVALIDATE);
+ if (op == SNP_PAGE_STATE_PRIVATE)
+ pvalidate_4k_page(paddr, paddr, true);
}
void snp_set_page_private(unsigned long paddr)
@@ -256,6 +284,16 @@ void sev_es_shutdown_ghcb(void)
error("SEV-ES CPU Features missing.");
/*
+ * This denotes whether to use the GHCB MSR protocol or the GHCB
+ * shared page to perform a GHCB request. Since the GHCB page is
+ * being changed to encrypted, it can't be used to perform GHCB
+ * requests. Clear the boot_ghcb variable so that the GHCB MSR
+ * protocol is used to change the GHCB page over to an encrypted
+ * page.
+ */
+ boot_ghcb = NULL;
+
+ /*
* GHCB Page must be flushed from the cache and mapped encrypted again.
* Otherwise the running kernel will see strange cache effects when
* trying to use that page.
@@ -463,6 +501,13 @@ static bool early_snp_init(struct boot_params *bp)
setup_cpuid_table(cc_info);
/*
+ * Record the SVSM Calling Area (CA) address if the guest is not
+ * running at VMPL0. The CA will be used to communicate with the
+ * SVSM and request its services.
+ */
+ svsm_setup_ca(cc_info);
+
+ /*
* Pass run-time kernel a pointer to CC info via boot_params so EFI
* config table doesn't need to be searched again during early startup
* phase.
@@ -565,22 +610,31 @@ void sev_enable(struct boot_params *bp)
* features.
*/
if (sev_status & MSR_AMD64_SEV_SNP_ENABLED) {
- if (!(get_hv_features() & GHCB_HV_FT_SNP))
+ u64 hv_features;
+ int ret;
+
+ hv_features = get_hv_features();
+ if (!(hv_features & GHCB_HV_FT_SNP))
sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED);
/*
- * Enforce running at VMPL0.
- *
- * RMPADJUST modifies RMP permissions of a lesser-privileged (numerically
- * higher) privilege level. Here, clear the VMPL1 permission mask of the
- * GHCB page. If the guest is not running at VMPL0, this will fail.
+ * Enforce running at VMPL0 or with an SVSM.
*
- * If the guest is running at VMPL0, it will succeed. Even if that operation
- * modifies permission bits, it is still ok to do so currently because Linux
- * SNP guests running at VMPL0 only run at VMPL0, so VMPL1 or higher
- * permission mask changes are a don't-care.
+ * Use RMPADJUST (see the rmpadjust() function for a description of
+ * what the instruction does) to update the VMPL1 permissions of a
+ * page. If the guest is running at VMPL0, this will succeed. If the
+ * guest is running at any other VMPL, this will fail. Linux SNP guests
+ * only ever run at a single VMPL level so permission mask changes of a
+ * lesser-privileged VMPL are a don't-care.
+ */
+ ret = rmpadjust((unsigned long)&boot_ghcb_page, RMP_PG_SIZE_4K, 1);
+
+ /*
+ * Running at VMPL0 is not required if an SVSM is present and the hypervisor
+ * supports the required SVSM GHCB events.
*/
- if (rmpadjust((unsigned long)&boot_ghcb_page, RMP_PG_SIZE_4K, 1))
+ if (ret &&
+ !(snp_vmpl && (hv_features & GHCB_HV_FT_SNP_MULTI_VMPL)))
sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_NOT_VMPL0);
}
diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
index fed8d13ce252..0aae4d4ed615 100644
--- a/arch/x86/boot/cpucheck.c
+++ b/arch/x86/boot/cpucheck.c
@@ -203,7 +203,7 @@ int check_knl_erratum(void)
*/
if (!is_intel() ||
cpu.family != 6 ||
- cpu.model != INTEL_FAM6_XEON_PHI_KNL)
+ cpu.model != 0x57 /*INTEL_XEON_PHI_KNL*/)
return 0;
/*
diff --git a/arch/x86/boot/main.c b/arch/x86/boot/main.c
index 9049f390d834..9d0fea18d3c8 100644
--- a/arch/x86/boot/main.c
+++ b/arch/x86/boot/main.c
@@ -27,34 +27,32 @@ char *heap_end = _end; /* Default end of heap = no heap */
* screws up the old-style command line protocol, adjust by
* filling in the new-style command line pointer instead.
*/
-
static void copy_boot_params(void)
{
struct old_cmdline {
u16 cl_magic;
u16 cl_offset;
};
- const struct old_cmdline * const oldcmd =
- absolute_pointer(OLD_CL_ADDRESS);
+ const struct old_cmdline * const oldcmd = absolute_pointer(OLD_CL_ADDRESS);
BUILD_BUG_ON(sizeof(boot_params) != 4096);
memcpy(&boot_params.hdr, &hdr, sizeof(hdr));
- if (!boot_params.hdr.cmd_line_ptr &&
- oldcmd->cl_magic == OLD_CL_MAGIC) {
- /* Old-style command line protocol. */
+ if (!boot_params.hdr.cmd_line_ptr && oldcmd->cl_magic == OLD_CL_MAGIC) {
+ /* Old-style command line protocol */
u16 cmdline_seg;
- /* Figure out if the command line falls in the region
- of memory that an old kernel would have copied up
- to 0x90000... */
+ /*
+ * Figure out if the command line falls in the region
+ * of memory that an old kernel would have copied up
+ * to 0x90000...
+ */
if (oldcmd->cl_offset < boot_params.hdr.setup_move_size)
cmdline_seg = ds();
else
cmdline_seg = 0x9000;
- boot_params.hdr.cmd_line_ptr =
- (cmdline_seg << 4) + oldcmd->cl_offset;
+ boot_params.hdr.cmd_line_ptr = (cmdline_seg << 4) + oldcmd->cl_offset;
}
}
@@ -66,6 +64,7 @@ static void copy_boot_params(void)
static void keyboard_init(void)
{
struct biosregs ireg, oreg;
+
initregs(&ireg);
ireg.ah = 0x02; /* Get keyboard status */
@@ -83,8 +82,10 @@ static void query_ist(void)
{
struct biosregs ireg, oreg;
- /* Some older BIOSes apparently crash on this call, so filter
- it from machines too old to have SpeedStep at all. */
+ /*
+ * Some older BIOSes apparently crash on this call, so filter
+ * it from machines too old to have SpeedStep at all.
+ */
if (cpu.level < 6)
return;
@@ -119,17 +120,13 @@ static void init_heap(void)
char *stack_end;
if (boot_params.hdr.loadflags & CAN_USE_HEAP) {
- asm("leal %n1(%%esp),%0"
- : "=r" (stack_end) : "i" (STACK_SIZE));
-
- heap_end = (char *)
- ((size_t)boot_params.hdr.heap_end_ptr + 0x200);
+ stack_end = (char *) (current_stack_pointer - STACK_SIZE);
+ heap_end = (char *) ((size_t)boot_params.hdr.heap_end_ptr + 0x200);
if (heap_end > stack_end)
heap_end = stack_end;
} else {
/* Boot protocol 2.00 only, no heap available */
- puts("WARNING: Ancient bootloader, some functionality "
- "may be limited!\n");
+ puts("WARNING: Ancient bootloader, some functionality may be limited!\n");
}
}
@@ -150,12 +147,11 @@ void main(void)
/* Make sure we have all the proper CPU support */
if (validate_cpu()) {
- puts("Unable to boot - please use a kernel appropriate "
- "for your CPU.\n");
+ puts("Unable to boot - please use a kernel appropriate for your CPU.\n");
die();
}
- /* Tell the BIOS what CPU mode we intend to run in. */
+ /* Tell the BIOS what CPU mode we intend to run in */
set_bios_mode();
/* Detect memory layout */
diff --git a/arch/x86/coco/Makefile b/arch/x86/coco/Makefile
index c816acf78b6a..eabdc7486538 100644
--- a/arch/x86/coco/Makefile
+++ b/arch/x86/coco/Makefile
@@ -6,3 +6,4 @@ CFLAGS_core.o += -fno-stack-protector
obj-y += core.o
obj-$(CONFIG_INTEL_TDX_GUEST) += tdx/
+obj-$(CONFIG_AMD_MEM_ENCRYPT) += sev/
diff --git a/arch/x86/coco/core.c b/arch/x86/coco/core.c
index b31ef2424d19..0f81f70aca82 100644
--- a/arch/x86/coco/core.c
+++ b/arch/x86/coco/core.c
@@ -29,7 +29,6 @@ static bool noinstr intel_cc_platform_has(enum cc_attr attr)
{
switch (attr) {
case CC_ATTR_GUEST_UNROLL_STRING_IO:
- case CC_ATTR_HOTPLUG_DISABLED:
case CC_ATTR_GUEST_MEM_ENCRYPT:
case CC_ATTR_MEM_ENCRYPT:
return true;
diff --git a/arch/x86/coco/sev/Makefile b/arch/x86/coco/sev/Makefile
new file mode 100644
index 000000000000..4e375e7305ac
--- /dev/null
+++ b/arch/x86/coco/sev/Makefile
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-y += core.o
+
+ifdef CONFIG_FUNCTION_TRACER
+CFLAGS_REMOVE_core.o = -pg
+endif
+
+KASAN_SANITIZE_core.o := n
+KMSAN_SANITIZE_core.o := n
+KCOV_INSTRUMENT_core.o := n
+
+# With some compiler versions the generated code results in boot hangs, caused
+# by several compilation units. To be safe, disable all instrumentation.
+KCSAN_SANITIZE := n
diff --git a/arch/x86/kernel/sev.c b/arch/x86/coco/sev/core.c
index 3342ed58e168..082d61d85dfc 100644
--- a/arch/x86/kernel/sev.c
+++ b/arch/x86/coco/sev/core.c
@@ -133,16 +133,20 @@ struct ghcb_state {
struct ghcb *ghcb;
};
+/* For early boot SVSM communication */
+static struct svsm_ca boot_svsm_ca_page __aligned(PAGE_SIZE);
+
static DEFINE_PER_CPU(struct sev_es_runtime_data*, runtime_data);
static DEFINE_PER_CPU(struct sev_es_save_area *, sev_vmsa);
+static DEFINE_PER_CPU(struct svsm_ca *, svsm_caa);
+static DEFINE_PER_CPU(u64, svsm_caa_pa);
struct sev_config {
__u64 debug : 1,
/*
- * A flag used by __set_pages_state() that indicates when the
- * per-CPU GHCB has been created and registered and thus can be
- * used by the BSP instead of the early boot GHCB.
+ * Indicates when the per-CPU GHCB has been created and registered
+ * and thus can be used by the BSP instead of the early boot GHCB.
*
* For APs, the per-CPU GHCB is created before they are started
* and registered upon startup, so this flag can be used globally
@@ -150,6 +154,15 @@ struct sev_config {
*/
ghcbs_initialized : 1,
+ /*
+ * Indicates when the per-CPU SVSM CA is to be used instead of the
+ * boot SVSM CA.
+ *
+ * For APs, the per-CPU SVSM CA is created as part of the AP
+ * bringup, so this flag can be used globally for the BSP and APs.
+ */
+ use_cas : 1,
+
__reserved : 62;
};
@@ -572,8 +585,61 @@ fault:
return ES_EXCEPTION;
}
+static __always_inline void vc_forward_exception(struct es_em_ctxt *ctxt)
+{
+ long error_code = ctxt->fi.error_code;
+ int trapnr = ctxt->fi.vector;
+
+ ctxt->regs->orig_ax = ctxt->fi.error_code;
+
+ switch (trapnr) {
+ case X86_TRAP_GP:
+ exc_general_protection(ctxt->regs, error_code);
+ break;
+ case X86_TRAP_UD:
+ exc_invalid_op(ctxt->regs);
+ break;
+ case X86_TRAP_PF:
+ write_cr2(ctxt->fi.cr2);
+ exc_page_fault(ctxt->regs, error_code);
+ break;
+ case X86_TRAP_AC:
+ exc_alignment_check(ctxt->regs, error_code);
+ break;
+ default:
+ pr_emerg("Unsupported exception in #VC instruction emulation - can't continue\n");
+ BUG();
+ }
+}
+
/* Include code shared with pre-decompression boot stage */
-#include "sev-shared.c"
+#include "shared.c"
+
+static inline struct svsm_ca *svsm_get_caa(void)
+{
+ /*
+ * Use rIP-relative references when called early in the boot. If
+ * ->use_cas is set, then it is late in the boot and no need
+ * to worry about rIP-relative references.
+ */
+ if (RIP_REL_REF(sev_cfg).use_cas)
+ return this_cpu_read(svsm_caa);
+ else
+ return RIP_REL_REF(boot_svsm_caa);
+}
+
+static u64 svsm_get_caa_pa(void)
+{
+ /*
+ * Use rIP-relative references when called early in the boot. If
+ * ->use_cas is set, then it is late in the boot and no need
+ * to worry about rIP-relative references.
+ */
+ if (RIP_REL_REF(sev_cfg).use_cas)
+ return this_cpu_read(svsm_caa_pa);
+ else
+ return RIP_REL_REF(boot_svsm_caa_pa);
+}
static noinstr void __sev_put_ghcb(struct ghcb_state *state)
{
@@ -600,6 +666,44 @@ static noinstr void __sev_put_ghcb(struct ghcb_state *state)
}
}
+static int svsm_perform_call_protocol(struct svsm_call *call)
+{
+ struct ghcb_state state;
+ unsigned long flags;
+ struct ghcb *ghcb;
+ int ret;
+
+ /*
+ * This can be called very early in the boot, use native functions in
+ * order to avoid paravirt issues.
+ */
+ flags = native_local_irq_save();
+
+ /*
+ * Use rip-relative references when called early in the boot. If
+ * ghcbs_initialized is set, then it is late in the boot and no need
+ * to worry about rip-relative references in called functions.
+ */
+ if (RIP_REL_REF(sev_cfg).ghcbs_initialized)
+ ghcb = __sev_get_ghcb(&state);
+ else if (RIP_REL_REF(boot_ghcb))
+ ghcb = RIP_REL_REF(boot_ghcb);
+ else
+ ghcb = NULL;
+
+ do {
+ ret = ghcb ? svsm_perform_ghcb_protocol(ghcb, call)
+ : svsm_perform_msr_protocol(call);
+ } while (ret == -EAGAIN);
+
+ if (RIP_REL_REF(sev_cfg).ghcbs_initialized)
+ __sev_put_ghcb(&state);
+
+ native_local_irq_restore(flags);
+
+ return ret;
+}
+
void noinstr __sev_es_nmi_complete(void)
{
struct ghcb_state state;
@@ -709,7 +813,6 @@ early_set_pages_state(unsigned long vaddr, unsigned long paddr,
{
unsigned long paddr_end;
u64 val;
- int ret;
vaddr = vaddr & PAGE_MASK;
@@ -717,12 +820,9 @@ early_set_pages_state(unsigned long vaddr, unsigned long paddr,
paddr_end = paddr + (npages << PAGE_SHIFT);
while (paddr < paddr_end) {
- if (op == SNP_PAGE_STATE_SHARED) {
- /* Page validation must be rescinded before changing to shared */
- ret = pvalidate(vaddr, RMP_PG_SIZE_4K, false);
- if (WARN(ret, "Failed to validate address 0x%lx ret %d", paddr, ret))
- goto e_term;
- }
+ /* Page validation must be rescinded before changing to shared */
+ if (op == SNP_PAGE_STATE_SHARED)
+ pvalidate_4k_page(vaddr, paddr, false);
/*
* Use the MSR protocol because this function can be called before
@@ -744,12 +844,9 @@ early_set_pages_state(unsigned long vaddr, unsigned long paddr,
paddr, GHCB_MSR_PSC_RESP_VAL(val)))
goto e_term;
- if (op == SNP_PAGE_STATE_PRIVATE) {
- /* Page validation must be performed after changing to private */
- ret = pvalidate(vaddr, RMP_PG_SIZE_4K, true);
- if (WARN(ret, "Failed to validate address 0x%lx ret %d", paddr, ret))
- goto e_term;
- }
+ /* Page validation must be performed after changing to private */
+ if (op == SNP_PAGE_STATE_PRIVATE)
+ pvalidate_4k_page(vaddr, paddr, true);
vaddr += PAGE_SIZE;
paddr += PAGE_SIZE;
@@ -913,22 +1010,49 @@ void snp_accept_memory(phys_addr_t start, phys_addr_t end)
set_pages_state(vaddr, npages, SNP_PAGE_STATE_PRIVATE);
}
-static int snp_set_vmsa(void *va, bool vmsa)
+static int snp_set_vmsa(void *va, void *caa, int apic_id, bool make_vmsa)
{
- u64 attrs;
+ int ret;
- /*
- * Running at VMPL0 allows the kernel to change the VMSA bit for a page
- * using the RMPADJUST instruction. However, for the instruction to
- * succeed it must target the permissions of a lesser privileged
- * (higher numbered) VMPL level, so use VMPL1 (refer to the RMPADJUST
- * instruction in the AMD64 APM Volume 3).
- */
- attrs = 1;
- if (vmsa)
- attrs |= RMPADJUST_VMSA_PAGE_BIT;
+ if (snp_vmpl) {
+ struct svsm_call call = {};
+ unsigned long flags;
+
+ local_irq_save(flags);
- return rmpadjust((unsigned long)va, RMP_PG_SIZE_4K, attrs);
+ call.caa = this_cpu_read(svsm_caa);
+ call.rcx = __pa(va);
+
+ if (make_vmsa) {
+ /* Protocol 0, Call ID 2 */
+ call.rax = SVSM_CORE_CALL(SVSM_CORE_CREATE_VCPU);
+ call.rdx = __pa(caa);
+ call.r8 = apic_id;
+ } else {
+ /* Protocol 0, Call ID 3 */
+ call.rax = SVSM_CORE_CALL(SVSM_CORE_DELETE_VCPU);
+ }
+
+ ret = svsm_perform_call_protocol(&call);
+
+ local_irq_restore(flags);
+ } else {
+ /*
+ * If the kernel runs at VMPL0, it can change the VMSA
+ * bit for a page using the RMPADJUST instruction.
+ * However, for the instruction to succeed it must
+ * target the permissions of a lesser privileged (higher
+ * numbered) VMPL level, so use VMPL1.
+ */
+ u64 attrs = 1;
+
+ if (make_vmsa)
+ attrs |= RMPADJUST_VMSA_PAGE_BIT;
+
+ ret = rmpadjust((unsigned long)va, RMP_PG_SIZE_4K, attrs);
+ }
+
+ return ret;
}
#define __ATTR_BASE (SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK)
@@ -962,11 +1086,11 @@ static void *snp_alloc_vmsa_page(int cpu)
return page_address(p + 1);
}
-static void snp_cleanup_vmsa(struct sev_es_save_area *vmsa)
+static void snp_cleanup_vmsa(struct sev_es_save_area *vmsa, int apic_id)
{
int err;
- err = snp_set_vmsa(vmsa, false);
+ err = snp_set_vmsa(vmsa, NULL, apic_id, false);
if (err)
pr_err("clear VMSA page failed (%u), leaking page\n", err);
else
@@ -977,6 +1101,7 @@ static int wakeup_cpu_via_vmgexit(u32 apic_id, unsigned long start_ip)
{
struct sev_es_save_area *cur_vmsa, *vmsa;
struct ghcb_state state;
+ struct svsm_ca *caa;
unsigned long flags;
struct ghcb *ghcb;
u8 sipi_vector;
@@ -1023,6 +1148,9 @@ static int wakeup_cpu_via_vmgexit(u32 apic_id, unsigned long start_ip)
if (!vmsa)
return -ENOMEM;
+ /* If an SVSM is present, the SVSM per-CPU CAA will be !NULL */
+ caa = per_cpu(svsm_caa, cpu);
+
/* CR4 should maintain the MCE value */
cr4 = native_read_cr4() & X86_CR4_MCE;
@@ -1070,11 +1198,11 @@ static int wakeup_cpu_via_vmgexit(u32 apic_id, unsigned long start_ip)
* VMPL level
* SEV_FEATURES (matches the SEV STATUS MSR right shifted 2 bits)
*/
- vmsa->vmpl = 0;
+ vmsa->vmpl = snp_vmpl;
vmsa->sev_features = sev_status >> 2;
/* Switch the page over to a VMSA page now that it is initialized */
- ret = snp_set_vmsa(vmsa, true);
+ ret = snp_set_vmsa(vmsa, caa, apic_id, true);
if (ret) {
pr_err("set VMSA page failed (%u)\n", ret);
free_page((unsigned long)vmsa);
@@ -1090,7 +1218,10 @@ static int wakeup_cpu_via_vmgexit(u32 apic_id, unsigned long start_ip)
vc_ghcb_invalidate(ghcb);
ghcb_set_rax(ghcb, vmsa->sev_features);
ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_AP_CREATION);
- ghcb_set_sw_exit_info_1(ghcb, ((u64)apic_id << 32) | SVM_VMGEXIT_AP_CREATE);
+ ghcb_set_sw_exit_info_1(ghcb,
+ ((u64)apic_id << 32) |
+ ((u64)snp_vmpl << 16) |
+ SVM_VMGEXIT_AP_CREATE);
ghcb_set_sw_exit_info_2(ghcb, __pa(vmsa));
sev_es_wr_ghcb_msr(__pa(ghcb));
@@ -1108,13 +1239,13 @@ static int wakeup_cpu_via_vmgexit(u32 apic_id, unsigned long start_ip)
/* Perform cleanup if there was an error */
if (ret) {
- snp_cleanup_vmsa(vmsa);
+ snp_cleanup_vmsa(vmsa, apic_id);
vmsa = NULL;
}
/* Free up any previous VMSA page */
if (cur_vmsa)
- snp_cleanup_vmsa(cur_vmsa);
+ snp_cleanup_vmsa(cur_vmsa, apic_id);
/* Record the current VMSA page */
per_cpu(sev_vmsa, cpu) = vmsa;
@@ -1209,6 +1340,17 @@ static enum es_result vc_handle_msr(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
/* Is it a WRMSR? */
exit_info_1 = (ctxt->insn.opcode.bytes[1] == 0x30) ? 1 : 0;
+ if (regs->cx == MSR_SVSM_CAA) {
+ /* Writes to the SVSM CAA msr are ignored */
+ if (exit_info_1)
+ return ES_OK;
+
+ regs->ax = lower_32_bits(this_cpu_read(svsm_caa_pa));
+ regs->dx = upper_32_bits(this_cpu_read(svsm_caa_pa));
+
+ return ES_OK;
+ }
+
ghcb_set_rcx(ghcb, regs->cx);
if (exit_info_1) {
ghcb_set_rax(ghcb, regs->ax);
@@ -1346,6 +1488,18 @@ static void __init alloc_runtime_data(int cpu)
panic("Can't allocate SEV-ES runtime data");
per_cpu(runtime_data, cpu) = data;
+
+ if (snp_vmpl) {
+ struct svsm_ca *caa;
+
+ /* Allocate the SVSM CA page if an SVSM is present */
+ caa = memblock_alloc(sizeof(*caa), PAGE_SIZE);
+ if (!caa)
+ panic("Can't allocate SVSM CA page\n");
+
+ per_cpu(svsm_caa, cpu) = caa;
+ per_cpu(svsm_caa_pa, cpu) = __pa(caa);
+ }
}
static void __init init_ghcb(int cpu)
@@ -1395,6 +1549,32 @@ void __init sev_es_init_vc_handling(void)
init_ghcb(cpu);
}
+ /* If running under an SVSM, switch to the per-cpu CA */
+ if (snp_vmpl) {
+ struct svsm_call call = {};
+ unsigned long flags;
+ int ret;
+
+ local_irq_save(flags);
+
+ /*
+ * SVSM_CORE_REMAP_CA call:
+ * RAX = 0 (Protocol=0, CallID=0)
+ * RCX = New CA GPA
+ */
+ call.caa = svsm_get_caa();
+ call.rax = SVSM_CORE_CALL(SVSM_CORE_REMAP_CA);
+ call.rcx = this_cpu_read(svsm_caa_pa);
+ ret = svsm_perform_call_protocol(&call);
+ if (ret)
+ panic("Can't remap the SVSM CA, ret=%d, rax_out=0x%llx\n",
+ ret, call.rax_out);
+
+ sev_cfg.use_cas = true;
+
+ local_irq_restore(flags);
+ }
+
sev_es_setup_play_dead();
/* Secondary CPUs use the runtime #VC handler */
@@ -1819,33 +1999,6 @@ static enum es_result vc_handle_exitcode(struct es_em_ctxt *ctxt,
return result;
}
-static __always_inline void vc_forward_exception(struct es_em_ctxt *ctxt)
-{
- long error_code = ctxt->fi.error_code;
- int trapnr = ctxt->fi.vector;
-
- ctxt->regs->orig_ax = ctxt->fi.error_code;
-
- switch (trapnr) {
- case X86_TRAP_GP:
- exc_general_protection(ctxt->regs, error_code);
- break;
- case X86_TRAP_UD:
- exc_invalid_op(ctxt->regs);
- break;
- case X86_TRAP_PF:
- write_cr2(ctxt->fi.cr2);
- exc_page_fault(ctxt->regs, error_code);
- break;
- case X86_TRAP_AC:
- exc_alignment_check(ctxt->regs, error_code);
- break;
- default:
- pr_emerg("Unsupported exception in #VC instruction emulation - can't continue\n");
- BUG();
- }
-}
-
static __always_inline bool is_vc2_stack(unsigned long sp)
{
return (sp >= __this_cpu_ist_bottom_va(VC2) && sp < __this_cpu_ist_top_va(VC2));
@@ -2095,6 +2248,47 @@ found_cc_info:
return cc_info;
}
+static __head void svsm_setup(struct cc_blob_sev_info *cc_info)
+{
+ struct svsm_call call = {};
+ int ret;
+ u64 pa;
+
+ /*
+ * Record the SVSM Calling Area address (CAA) if the guest is not
+ * running at VMPL0. The CA will be used to communicate with the
+ * SVSM to perform the SVSM services.
+ */
+ if (!svsm_setup_ca(cc_info))
+ return;
+
+ /*
+ * It is very early in the boot and the kernel is running identity
+ * mapped but without having adjusted the pagetables to where the
+ * kernel was loaded (physbase), so the get the CA address using
+ * RIP-relative addressing.
+ */
+ pa = (u64)&RIP_REL_REF(boot_svsm_ca_page);
+
+ /*
+ * Switch over to the boot SVSM CA while the current CA is still
+ * addressable. There is no GHCB at this point so use the MSR protocol.
+ *
+ * SVSM_CORE_REMAP_CA call:
+ * RAX = 0 (Protocol=0, CallID=0)
+ * RCX = New CA GPA
+ */
+ call.caa = svsm_get_caa();
+ call.rax = SVSM_CORE_CALL(SVSM_CORE_REMAP_CA);
+ call.rcx = pa;
+ ret = svsm_perform_call_protocol(&call);
+ if (ret)
+ panic("Can't remap the SVSM CA, ret=%d, rax_out=0x%llx\n", ret, call.rax_out);
+
+ RIP_REL_REF(boot_svsm_caa) = (struct svsm_ca *)pa;
+ RIP_REL_REF(boot_svsm_caa_pa) = pa;
+}
+
bool __head snp_init(struct boot_params *bp)
{
struct cc_blob_sev_info *cc_info;
@@ -2108,6 +2302,8 @@ bool __head snp_init(struct boot_params *bp)
setup_cpuid_table(cc_info);
+ svsm_setup(cc_info);
+
/*
* The CC blob will be used later to access the secrets page. Cache
* it here like the boot kernel does.
@@ -2156,23 +2352,27 @@ static void dump_cpuid_table(void)
* expected, but that initialization happens too early in boot to print any
* sort of indicator, and there's not really any other good place to do it,
* so do it here.
+ *
+ * If running as an SNP guest, report the current VM privilege level (VMPL).
*/
-static int __init report_cpuid_table(void)
+static int __init report_snp_info(void)
{
const struct snp_cpuid_table *cpuid_table = snp_cpuid_get_table();
- if (!cpuid_table->count)
- return 0;
+ if (cpuid_table->count) {
+ pr_info("Using SNP CPUID table, %d entries present.\n",
+ cpuid_table->count);
- pr_info("Using SNP CPUID table, %d entries present.\n",
- cpuid_table->count);
+ if (sev_cfg.debug)
+ dump_cpuid_table();
+ }
- if (sev_cfg.debug)
- dump_cpuid_table();
+ if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
+ pr_info("SNP running at VMPL%u.\n", snp_vmpl);
return 0;
}
-arch_initcall(report_cpuid_table);
+arch_initcall(report_snp_info);
static int __init init_sev_config(char *str)
{
@@ -2191,6 +2391,56 @@ static int __init init_sev_config(char *str)
}
__setup("sev=", init_sev_config);
+static void update_attest_input(struct svsm_call *call, struct svsm_attest_call *input)
+{
+ /* If (new) lengths have been returned, propagate them up */
+ if (call->rcx_out != call->rcx)
+ input->manifest_buf.len = call->rcx_out;
+
+ if (call->rdx_out != call->rdx)
+ input->certificates_buf.len = call->rdx_out;
+
+ if (call->r8_out != call->r8)
+ input->report_buf.len = call->r8_out;
+}
+
+int snp_issue_svsm_attest_req(u64 call_id, struct svsm_call *call,
+ struct svsm_attest_call *input)
+{
+ struct svsm_attest_call *ac;
+ unsigned long flags;
+ u64 attest_call_pa;
+ int ret;
+
+ if (!snp_vmpl)
+ return -EINVAL;
+
+ local_irq_save(flags);
+
+ call->caa = svsm_get_caa();
+
+ ac = (struct svsm_attest_call *)call->caa->svsm_buffer;
+ attest_call_pa = svsm_get_caa_pa() + offsetof(struct svsm_ca, svsm_buffer);
+
+ *ac = *input;
+
+ /*
+ * Set input registers for the request and set RDX and R8 to known
+ * values in order to detect length values being returned in them.
+ */
+ call->rax = call_id;
+ call->rcx = attest_call_pa;
+ call->rdx = -1;
+ call->r8 = -1;
+ ret = svsm_perform_call_protocol(call);
+ update_attest_input(call, input);
+
+ local_irq_restore(flags);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(snp_issue_svsm_attest_req);
+
int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, struct snp_guest_request_ioctl *rio)
{
struct ghcb_state state;
@@ -2299,3 +2549,58 @@ void sev_show_status(void)
}
pr_cont("\n");
}
+
+void __init snp_update_svsm_ca(void)
+{
+ if (!snp_vmpl)
+ return;
+
+ /* Update the CAA to a proper kernel address */
+ boot_svsm_caa = &boot_svsm_ca_page;
+}
+
+#ifdef CONFIG_SYSFS
+static ssize_t vmpl_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "%d\n", snp_vmpl);
+}
+
+static struct kobj_attribute vmpl_attr = __ATTR_RO(vmpl);
+
+static struct attribute *vmpl_attrs[] = {
+ &vmpl_attr.attr,
+ NULL
+};
+
+static struct attribute_group sev_attr_group = {
+ .attrs = vmpl_attrs,
+};
+
+static int __init sev_sysfs_init(void)
+{
+ struct kobject *sev_kobj;
+ struct device *dev_root;
+ int ret;
+
+ if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
+ return -ENODEV;
+
+ dev_root = bus_get_dev_root(&cpu_subsys);
+ if (!dev_root)
+ return -ENODEV;
+
+ sev_kobj = kobject_create_and_add("sev", &dev_root->kobj);
+ put_device(dev_root);
+
+ if (!sev_kobj)
+ return -ENOMEM;
+
+ ret = sysfs_create_group(sev_kobj, &sev_attr_group);
+ if (ret)
+ kobject_put(sev_kobj);
+
+ return ret;
+}
+arch_initcall(sev_sysfs_init);
+#endif // CONFIG_SYSFS
diff --git a/arch/x86/kernel/sev-shared.c b/arch/x86/coco/sev/shared.c
index b4f8fa0f722c..71de53194089 100644
--- a/arch/x86/kernel/sev-shared.c
+++ b/arch/x86/coco/sev/shared.c
@@ -21,8 +21,30 @@
#define WARN(condition, format...) (!!(condition))
#define sev_printk(fmt, ...)
#define sev_printk_rtl(fmt, ...)
+#undef vc_forward_exception
+#define vc_forward_exception(c) panic("SNP: Hypervisor requested exception\n")
#endif
+/*
+ * SVSM related information:
+ * When running under an SVSM, the VMPL that Linux is executing at must be
+ * non-zero. The VMPL is therefore used to indicate the presence of an SVSM.
+ *
+ * During boot, the page tables are set up as identity mapped and later
+ * changed to use kernel virtual addresses. Maintain separate virtual and
+ * physical addresses for the CAA to allow SVSM functions to be used during
+ * early boot, both with identity mapped virtual addresses and proper kernel
+ * virtual addresses.
+ */
+u8 snp_vmpl __ro_after_init;
+EXPORT_SYMBOL_GPL(snp_vmpl);
+static struct svsm_ca *boot_svsm_caa __ro_after_init;
+static u64 boot_svsm_caa_pa __ro_after_init;
+
+static struct svsm_ca *svsm_get_caa(void);
+static u64 svsm_get_caa_pa(void);
+static int svsm_perform_call_protocol(struct svsm_call *call);
+
/* I/O parameters for CPUID-related helpers */
struct cpuid_leaf {
u32 fn;
@@ -229,6 +251,126 @@ static enum es_result verify_exception_info(struct ghcb *ghcb, struct es_em_ctxt
return ES_VMM_ERROR;
}
+static inline int svsm_process_result_codes(struct svsm_call *call)
+{
+ switch (call->rax_out) {
+ case SVSM_SUCCESS:
+ return 0;
+ case SVSM_ERR_INCOMPLETE:
+ case SVSM_ERR_BUSY:
+ return -EAGAIN;
+ default:
+ return -EINVAL;
+ }
+}
+
+/*
+ * Issue a VMGEXIT to call the SVSM:
+ * - Load the SVSM register state (RAX, RCX, RDX, R8 and R9)
+ * - Set the CA call pending field to 1
+ * - Issue VMGEXIT
+ * - Save the SVSM return register state (RAX, RCX, RDX, R8 and R9)
+ * - Perform atomic exchange of the CA call pending field
+ *
+ * - See the "Secure VM Service Module for SEV-SNP Guests" specification for
+ * details on the calling convention.
+ * - The calling convention loosely follows the Microsoft X64 calling
+ * convention by putting arguments in RCX, RDX, R8 and R9.
+ * - RAX specifies the SVSM protocol/callid as input and the return code
+ * as output.
+ */
+static __always_inline void svsm_issue_call(struct svsm_call *call, u8 *pending)
+{
+ register unsigned long rax asm("rax") = call->rax;
+ register unsigned long rcx asm("rcx") = call->rcx;
+ register unsigned long rdx asm("rdx") = call->rdx;
+ register unsigned long r8 asm("r8") = call->r8;
+ register unsigned long r9 asm("r9") = call->r9;
+
+ call->caa->call_pending = 1;
+
+ asm volatile("rep; vmmcall\n\t"
+ : "+r" (rax), "+r" (rcx), "+r" (rdx), "+r" (r8), "+r" (r9)
+ : : "memory");
+
+ *pending = xchg(&call->caa->call_pending, *pending);
+
+ call->rax_out = rax;
+ call->rcx_out = rcx;
+ call->rdx_out = rdx;
+ call->r8_out = r8;
+ call->r9_out = r9;
+}
+
+static int svsm_perform_msr_protocol(struct svsm_call *call)
+{
+ u8 pending = 0;
+ u64 val, resp;
+
+ /*
+ * When using the MSR protocol, be sure to save and restore
+ * the current MSR value.
+ */
+ val = sev_es_rd_ghcb_msr();
+
+ sev_es_wr_ghcb_msr(GHCB_MSR_VMPL_REQ_LEVEL(0));
+
+ svsm_issue_call(call, &pending);
+
+ resp = sev_es_rd_ghcb_msr();
+
+ sev_es_wr_ghcb_msr(val);
+
+ if (pending)
+ return -EINVAL;
+
+ if (GHCB_RESP_CODE(resp) != GHCB_MSR_VMPL_RESP)
+ return -EINVAL;
+
+ if (GHCB_MSR_VMPL_RESP_VAL(resp))
+ return -EINVAL;
+
+ return svsm_process_result_codes(call);
+}
+
+static int svsm_perform_ghcb_protocol(struct ghcb *ghcb, struct svsm_call *call)
+{
+ struct es_em_ctxt ctxt;
+ u8 pending = 0;
+
+ vc_ghcb_invalidate(ghcb);
+
+ /*
+ * Fill in protocol and format specifiers. This can be called very early
+ * in the boot, so use rip-relative references as needed.
+ */
+ ghcb->protocol_version = RIP_REL_REF(ghcb_version);
+ ghcb->ghcb_usage = GHCB_DEFAULT_USAGE;
+
+ ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_SNP_RUN_VMPL);
+ ghcb_set_sw_exit_info_1(ghcb, 0);
+ ghcb_set_sw_exit_info_2(ghcb, 0);
+
+ sev_es_wr_ghcb_msr(__pa(ghcb));
+
+ svsm_issue_call(call, &pending);
+
+ if (pending)
+ return -EINVAL;
+
+ switch (verify_exception_info(ghcb, &ctxt)) {
+ case ES_OK:
+ break;
+ case ES_EXCEPTION:
+ vc_forward_exception(&ctxt);
+ fallthrough;
+ default:
+ return -EINVAL;
+ }
+
+ return svsm_process_result_codes(call);
+}
+
static enum es_result sev_es_ghcb_hv_call(struct ghcb *ghcb,
struct es_em_ctxt *ctxt,
u64 exit_code, u64 exit_info_1,
@@ -1079,38 +1221,268 @@ static void __head setup_cpuid_table(const struct cc_blob_sev_info *cc_info)
}
}
-static void pvalidate_pages(struct snp_psc_desc *desc)
+static inline void __pval_terminate(u64 pfn, bool action, unsigned int page_size,
+ int ret, u64 svsm_ret)
+{
+ WARN(1, "PVALIDATE failure: pfn: 0x%llx, action: %u, size: %u, ret: %d, svsm_ret: 0x%llx\n",
+ pfn, action, page_size, ret, svsm_ret);
+
+ sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PVALIDATE);
+}
+
+static void svsm_pval_terminate(struct svsm_pvalidate_call *pc, int ret, u64 svsm_ret)
+{
+ unsigned int page_size;
+ bool action;
+ u64 pfn;
+
+ pfn = pc->entry[pc->cur_index].pfn;
+ action = pc->entry[pc->cur_index].action;
+ page_size = pc->entry[pc->cur_index].page_size;
+
+ __pval_terminate(pfn, action, page_size, ret, svsm_ret);
+}
+
+static void svsm_pval_4k_page(unsigned long paddr, bool validate)
+{
+ struct svsm_pvalidate_call *pc;
+ struct svsm_call call = {};
+ unsigned long flags;
+ u64 pc_pa;
+ int ret;
+
+ /*
+ * This can be called very early in the boot, use native functions in
+ * order to avoid paravirt issues.
+ */
+ flags = native_local_irq_save();
+
+ call.caa = svsm_get_caa();
+
+ pc = (struct svsm_pvalidate_call *)call.caa->svsm_buffer;
+ pc_pa = svsm_get_caa_pa() + offsetof(struct svsm_ca, svsm_buffer);
+
+ pc->num_entries = 1;
+ pc->cur_index = 0;
+ pc->entry[0].page_size = RMP_PG_SIZE_4K;
+ pc->entry[0].action = validate;
+ pc->entry[0].ignore_cf = 0;
+ pc->entry[0].pfn = paddr >> PAGE_SHIFT;
+
+ /* Protocol 0, Call ID 1 */
+ call.rax = SVSM_CORE_CALL(SVSM_CORE_PVALIDATE);
+ call.rcx = pc_pa;
+
+ ret = svsm_perform_call_protocol(&call);
+ if (ret)
+ svsm_pval_terminate(pc, ret, call.rax_out);
+
+ native_local_irq_restore(flags);
+}
+
+static void pvalidate_4k_page(unsigned long vaddr, unsigned long paddr, bool validate)
+{
+ int ret;
+
+ /*
+ * This can be called very early during boot, so use rIP-relative
+ * references as needed.
+ */
+ if (RIP_REL_REF(snp_vmpl)) {
+ svsm_pval_4k_page(paddr, validate);
+ } else {
+ ret = pvalidate(vaddr, RMP_PG_SIZE_4K, validate);
+ if (ret)
+ __pval_terminate(PHYS_PFN(paddr), validate, RMP_PG_SIZE_4K, ret, 0);
+ }
+}
+
+static void pval_pages(struct snp_psc_desc *desc)
{
struct psc_entry *e;
unsigned long vaddr;
unsigned int size;
unsigned int i;
bool validate;
+ u64 pfn;
int rc;
for (i = 0; i <= desc->hdr.end_entry; i++) {
e = &desc->entries[i];
- vaddr = (unsigned long)pfn_to_kaddr(e->gfn);
+ pfn = e->gfn;
+ vaddr = (unsigned long)pfn_to_kaddr(pfn);
size = e->pagesize ? RMP_PG_SIZE_2M : RMP_PG_SIZE_4K;
validate = e->operation == SNP_PAGE_STATE_PRIVATE;
rc = pvalidate(vaddr, size, validate);
+ if (!rc)
+ continue;
+
if (rc == PVALIDATE_FAIL_SIZEMISMATCH && size == RMP_PG_SIZE_2M) {
unsigned long vaddr_end = vaddr + PMD_SIZE;
- for (; vaddr < vaddr_end; vaddr += PAGE_SIZE) {
+ for (; vaddr < vaddr_end; vaddr += PAGE_SIZE, pfn++) {
rc = pvalidate(vaddr, RMP_PG_SIZE_4K, validate);
if (rc)
- break;
+ __pval_terminate(pfn, validate, RMP_PG_SIZE_4K, rc, 0);
}
+ } else {
+ __pval_terminate(pfn, validate, size, rc, 0);
}
+ }
+}
+
+static u64 svsm_build_ca_from_pfn_range(u64 pfn, u64 pfn_end, bool action,
+ struct svsm_pvalidate_call *pc)
+{
+ struct svsm_pvalidate_entry *pe;
+
+ /* Nothing in the CA yet */
+ pc->num_entries = 0;
+ pc->cur_index = 0;
+
+ pe = &pc->entry[0];
+
+ while (pfn < pfn_end) {
+ pe->page_size = RMP_PG_SIZE_4K;
+ pe->action = action;
+ pe->ignore_cf = 0;
+ pe->pfn = pfn;
+
+ pe++;
+ pfn++;
+
+ pc->num_entries++;
+ if (pc->num_entries == SVSM_PVALIDATE_MAX_COUNT)
+ break;
+ }
+
+ return pfn;
+}
+
+static int svsm_build_ca_from_psc_desc(struct snp_psc_desc *desc, unsigned int desc_entry,
+ struct svsm_pvalidate_call *pc)
+{
+ struct svsm_pvalidate_entry *pe;
+ struct psc_entry *e;
+
+ /* Nothing in the CA yet */
+ pc->num_entries = 0;
+ pc->cur_index = 0;
+
+ pe = &pc->entry[0];
+ e = &desc->entries[desc_entry];
+
+ while (desc_entry <= desc->hdr.end_entry) {
+ pe->page_size = e->pagesize ? RMP_PG_SIZE_2M : RMP_PG_SIZE_4K;
+ pe->action = e->operation == SNP_PAGE_STATE_PRIVATE;
+ pe->ignore_cf = 0;
+ pe->pfn = e->gfn;
+
+ pe++;
+ e++;
+
+ desc_entry++;
+ pc->num_entries++;
+ if (pc->num_entries == SVSM_PVALIDATE_MAX_COUNT)
+ break;
+ }
+
+ return desc_entry;
+}
+
+static void svsm_pval_pages(struct snp_psc_desc *desc)
+{
+ struct svsm_pvalidate_entry pv_4k[VMGEXIT_PSC_MAX_ENTRY];
+ unsigned int i, pv_4k_count = 0;
+ struct svsm_pvalidate_call *pc;
+ struct svsm_call call = {};
+ unsigned long flags;
+ bool action;
+ u64 pc_pa;
+ int ret;
+
+ /*
+ * This can be called very early in the boot, use native functions in
+ * order to avoid paravirt issues.
+ */
+ flags = native_local_irq_save();
+
+ /*
+ * The SVSM calling area (CA) can support processing 510 entries at a
+ * time. Loop through the Page State Change descriptor until the CA is
+ * full or the last entry in the descriptor is reached, at which time
+ * the SVSM is invoked. This repeats until all entries in the descriptor
+ * are processed.
+ */
+ call.caa = svsm_get_caa();
+
+ pc = (struct svsm_pvalidate_call *)call.caa->svsm_buffer;
+ pc_pa = svsm_get_caa_pa() + offsetof(struct svsm_ca, svsm_buffer);
+
+ /* Protocol 0, Call ID 1 */
+ call.rax = SVSM_CORE_CALL(SVSM_CORE_PVALIDATE);
+ call.rcx = pc_pa;
+
+ for (i = 0; i <= desc->hdr.end_entry;) {
+ i = svsm_build_ca_from_psc_desc(desc, i, pc);
+
+ do {
+ ret = svsm_perform_call_protocol(&call);
+ if (!ret)
+ continue;
+
+ /*
+ * Check if the entry failed because of an RMP mismatch (a
+ * PVALIDATE at 2M was requested, but the page is mapped in
+ * the RMP as 4K).
+ */
- if (rc) {
- WARN(1, "Failed to validate address 0x%lx ret %d", vaddr, rc);
- sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PVALIDATE);
+ if (call.rax_out == SVSM_PVALIDATE_FAIL_SIZEMISMATCH &&
+ pc->entry[pc->cur_index].page_size == RMP_PG_SIZE_2M) {
+ /* Save this entry for post-processing at 4K */
+ pv_4k[pv_4k_count++] = pc->entry[pc->cur_index];
+
+ /* Skip to the next one unless at the end of the list */
+ pc->cur_index++;
+ if (pc->cur_index < pc->num_entries)
+ ret = -EAGAIN;
+ else
+ ret = 0;
+ }
+ } while (ret == -EAGAIN);
+
+ if (ret)
+ svsm_pval_terminate(pc, ret, call.rax_out);
+ }
+
+ /* Process any entries that failed to be validated at 2M and validate them at 4K */
+ for (i = 0; i < pv_4k_count; i++) {
+ u64 pfn, pfn_end;
+
+ action = pv_4k[i].action;
+ pfn = pv_4k[i].pfn;
+ pfn_end = pfn + 512;
+
+ while (pfn < pfn_end) {
+ pfn = svsm_build_ca_from_pfn_range(pfn, pfn_end, action, pc);
+
+ ret = svsm_perform_call_protocol(&call);
+ if (ret)
+ svsm_pval_terminate(pc, ret, call.rax_out);
}
}
+
+ native_local_irq_restore(flags);
+}
+
+static void pvalidate_pages(struct snp_psc_desc *desc)
+{
+ if (snp_vmpl)
+ svsm_pval_pages(desc);
+ else
+ pval_pages(desc);
}
static int vmgexit_psc(struct ghcb *ghcb, struct snp_psc_desc *desc)
@@ -1269,3 +1641,77 @@ static enum es_result vc_check_opcode_bytes(struct es_em_ctxt *ctxt,
return ES_UNSUPPORTED;
}
+
+/*
+ * Maintain the GPA of the SVSM Calling Area (CA) in order to utilize the SVSM
+ * services needed when not running in VMPL0.
+ */
+static bool __head svsm_setup_ca(const struct cc_blob_sev_info *cc_info)
+{
+ struct snp_secrets_page *secrets_page;
+ struct snp_cpuid_table *cpuid_table;
+ unsigned int i;
+ u64 caa;
+
+ BUILD_BUG_ON(sizeof(*secrets_page) != PAGE_SIZE);
+
+ /*
+ * Check if running at VMPL0.
+ *
+ * Use RMPADJUST (see the rmpadjust() function for a description of what
+ * the instruction does) to update the VMPL1 permissions of a page. If
+ * the guest is running at VMPL0, this will succeed and implies there is
+ * no SVSM. If the guest is running at any other VMPL, this will fail.
+ * Linux SNP guests only ever run at a single VMPL level so permission mask
+ * changes of a lesser-privileged VMPL are a don't-care.
+ *
+ * Use a rip-relative reference to obtain the proper address, since this
+ * routine is running identity mapped when called, both by the decompressor
+ * code and the early kernel code.
+ */
+ if (!rmpadjust((unsigned long)&RIP_REL_REF(boot_ghcb_page), RMP_PG_SIZE_4K, 1))
+ return false;
+
+ /*
+ * Not running at VMPL0, ensure everything has been properly supplied
+ * for running under an SVSM.
+ */
+ if (!cc_info || !cc_info->secrets_phys || cc_info->secrets_len != PAGE_SIZE)
+ sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_SECRETS_PAGE);
+
+ secrets_page = (struct snp_secrets_page *)cc_info->secrets_phys;
+ if (!secrets_page->svsm_size)
+ sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_NO_SVSM);
+
+ if (!secrets_page->svsm_guest_vmpl)
+ sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_SVSM_VMPL0);
+
+ RIP_REL_REF(snp_vmpl) = secrets_page->svsm_guest_vmpl;
+
+ caa = secrets_page->svsm_caa;
+
+ /*
+ * An open-coded PAGE_ALIGNED() in order to avoid including
+ * kernel-proper headers into the decompressor.
+ */
+ if (caa & (PAGE_SIZE - 1))
+ sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_SVSM_CAA);
+
+ /*
+ * The CA is identity mapped when this routine is called, both by the
+ * decompressor code and the early kernel code.
+ */
+ RIP_REL_REF(boot_svsm_caa) = (struct svsm_ca *)caa;
+ RIP_REL_REF(boot_svsm_caa_pa) = caa;
+
+ /* Advertise the SVSM presence via CPUID. */
+ cpuid_table = (struct snp_cpuid_table *)snp_cpuid_get_table();
+ for (i = 0; i < cpuid_table->count; i++) {
+ struct snp_cpuid_fn *fn = &cpuid_table->fn[i];
+
+ if (fn->eax_in == 0x8000001f)
+ fn->eax |= BIT(28);
+ }
+
+ return true;
+}
diff --git a/arch/x86/coco/tdx/tdx.c b/arch/x86/coco/tdx/tdx.c
index c1cb90369915..078e2bac2553 100644
--- a/arch/x86/coco/tdx/tdx.c
+++ b/arch/x86/coco/tdx/tdx.c
@@ -7,6 +7,7 @@
#include <linux/cpufeature.h>
#include <linux/export.h>
#include <linux/io.h>
+#include <linux/kexec.h>
#include <asm/coco.h>
#include <asm/tdx.h>
#include <asm/vmx.h>
@@ -14,6 +15,7 @@
#include <asm/insn.h>
#include <asm/insn-eval.h>
#include <asm/pgtable.h>
+#include <asm/set_memory.h>
/* MMIO direction */
#define EPT_READ 0
@@ -38,6 +40,8 @@
#define TDREPORT_SUBTYPE_0 0
+static atomic_long_t nr_shared;
+
/* Called from __tdx_hypercall() for unrecoverable failure */
noinstr void __noreturn __tdx_hypercall_failed(void)
{
@@ -798,28 +802,124 @@ static bool tdx_enc_status_changed(unsigned long vaddr, int numpages, bool enc)
return true;
}
-static bool tdx_enc_status_change_prepare(unsigned long vaddr, int numpages,
- bool enc)
+static int tdx_enc_status_change_prepare(unsigned long vaddr, int numpages,
+ bool enc)
{
/*
* Only handle shared->private conversion here.
* See the comment in tdx_early_init().
*/
- if (enc)
- return tdx_enc_status_changed(vaddr, numpages, enc);
- return true;
+ if (enc && !tdx_enc_status_changed(vaddr, numpages, enc))
+ return -EIO;
+
+ return 0;
}
-static bool tdx_enc_status_change_finish(unsigned long vaddr, int numpages,
+static int tdx_enc_status_change_finish(unsigned long vaddr, int numpages,
bool enc)
{
/*
* Only handle private->shared conversion here.
* See the comment in tdx_early_init().
*/
- if (!enc)
- return tdx_enc_status_changed(vaddr, numpages, enc);
- return true;
+ if (!enc && !tdx_enc_status_changed(vaddr, numpages, enc))
+ return -EIO;
+
+ if (enc)
+ atomic_long_sub(numpages, &nr_shared);
+ else
+ atomic_long_add(numpages, &nr_shared);
+
+ return 0;
+}
+
+/* Stop new private<->shared conversions */
+static void tdx_kexec_begin(void)
+{
+ if (!IS_ENABLED(CONFIG_KEXEC_CORE))
+ return;
+
+ /*
+ * Crash kernel reaches here with interrupts disabled: can't wait for
+ * conversions to finish.
+ *
+ * If race happened, just report and proceed.
+ */
+ if (!set_memory_enc_stop_conversion())
+ pr_warn("Failed to stop shared<->private conversions\n");
+}
+
+/* Walk direct mapping and convert all shared memory back to private */
+static void tdx_kexec_finish(void)
+{
+ unsigned long addr, end;
+ long found = 0, shared;
+
+ if (!IS_ENABLED(CONFIG_KEXEC_CORE))
+ return;
+
+ lockdep_assert_irqs_disabled();
+
+ addr = PAGE_OFFSET;
+ end = PAGE_OFFSET + get_max_mapped();
+
+ while (addr < end) {
+ unsigned long size;
+ unsigned int level;
+ pte_t *pte;
+
+ pte = lookup_address(addr, &level);
+ size = page_level_size(level);
+
+ if (pte && pte_decrypted(*pte)) {
+ int pages = size / PAGE_SIZE;
+
+ /*
+ * Touching memory with shared bit set triggers implicit
+ * conversion to shared.
+ *
+ * Make sure nobody touches the shared range from
+ * now on.
+ */
+ set_pte(pte, __pte(0));
+
+ /*
+ * Memory encryption state persists across kexec.
+ * If tdx_enc_status_changed() fails in the first
+ * kernel, it leaves memory in an unknown state.
+ *
+ * If that memory remains shared, accessing it in the
+ * *next* kernel through a private mapping will result
+ * in an unrecoverable guest shutdown.
+ *
+ * The kdump kernel boot is not impacted as it uses
+ * a pre-reserved memory range that is always private.
+ * However, gathering crash information could lead to
+ * a crash if it accesses unconverted memory through
+ * a private mapping which is possible when accessing
+ * that memory through /proc/vmcore, for example.
+ *
+ * In all cases, print error info in order to leave
+ * enough bread crumbs for debugging.
+ */
+ if (!tdx_enc_status_changed(addr, pages, true)) {
+ pr_err("Failed to unshare range %#lx-%#lx\n",
+ addr, addr + size);
+ }
+
+ found += pages;
+ }
+
+ addr += size;
+ }
+
+ __flush_tlb_all();
+
+ shared = atomic_long_read(&nr_shared);
+ if (shared != found) {
+ pr_err("shared page accounting is off\n");
+ pr_err("nr_shared = %ld, nr_found = %ld\n", shared, found);
+ }
}
void __init tdx_early_init(void)
@@ -881,6 +981,9 @@ void __init tdx_early_init(void)
x86_platform.guest.enc_cache_flush_required = tdx_cache_flush_required;
x86_platform.guest.enc_tlb_flush_required = tdx_tlb_flush_required;
+ x86_platform.guest.enc_kexec_begin = tdx_kexec_begin;
+ x86_platform.guest.enc_kexec_finish = tdx_kexec_finish;
+
/*
* TDX intercepts the RDMSR to read the X2APIC ID in the parallel
* bringup low level code. That raises #VE which cannot be handled
diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
index 11c9b8efdc4c..ed0a5f2dc129 100644
--- a/arch/x86/entry/entry_64_compat.S
+++ b/arch/x86/entry/entry_64_compat.S
@@ -89,10 +89,6 @@ SYM_INNER_LABEL(entry_SYSENTER_compat_after_hwframe, SYM_L_GLOBAL)
cld
- IBRS_ENTER
- UNTRAIN_RET
- CLEAR_BRANCH_HISTORY
-
/*
* SYSENTER doesn't filter flags, so we need to clear NT and AC
* ourselves. To save a few cycles, we can check whether
@@ -116,6 +112,16 @@ SYM_INNER_LABEL(entry_SYSENTER_compat_after_hwframe, SYM_L_GLOBAL)
jnz .Lsysenter_fix_flags
.Lsysenter_flags_fixed:
+ /*
+ * CPU bugs mitigations mechanisms can call other functions. They
+ * should be invoked after making sure TF is cleared because
+ * single-step is ignored only for instructions inside the
+ * entry_SYSENTER_compat function.
+ */
+ IBRS_ENTER
+ UNTRAIN_RET
+ CLEAR_BRANCH_HISTORY
+
movq %rsp, %rdi
call do_SYSENTER_32
jmp sysret32_from_system_call
diff --git a/arch/x86/entry/syscall_32.c b/arch/x86/entry/syscall_32.c
index c2235bae17ef..8cc9950d7104 100644
--- a/arch/x86/entry/syscall_32.c
+++ b/arch/x86/entry/syscall_32.c
@@ -14,9 +14,12 @@
#endif
#define __SYSCALL(nr, sym) extern long __ia32_##sym(const struct pt_regs *);
-
+#define __SYSCALL_NORETURN(nr, sym) extern long __noreturn __ia32_##sym(const struct pt_regs *);
#include <asm/syscalls_32.h>
-#undef __SYSCALL
+#undef __SYSCALL
+
+#undef __SYSCALL_NORETURN
+#define __SYSCALL_NORETURN __SYSCALL
/*
* The sys_call_table[] is no longer used for system calls, but
@@ -28,11 +31,10 @@
const sys_call_ptr_t sys_call_table[] = {
#include <asm/syscalls_32.h>
};
-#undef __SYSCALL
+#undef __SYSCALL
#endif
#define __SYSCALL(nr, sym) case nr: return __ia32_##sym(regs);
-
long ia32_sys_call(const struct pt_regs *regs, unsigned int nr)
{
switch (nr) {
diff --git a/arch/x86/entry/syscall_64.c b/arch/x86/entry/syscall_64.c
index 33b3f09e6f15..ba8354424860 100644
--- a/arch/x86/entry/syscall_64.c
+++ b/arch/x86/entry/syscall_64.c
@@ -8,8 +8,12 @@
#include <asm/syscall.h>
#define __SYSCALL(nr, sym) extern long __x64_##sym(const struct pt_regs *);
+#define __SYSCALL_NORETURN(nr, sym) extern long __noreturn __x64_##sym(const struct pt_regs *);
#include <asm/syscalls_64.h>
-#undef __SYSCALL
+#undef __SYSCALL
+
+#undef __SYSCALL_NORETURN
+#define __SYSCALL_NORETURN __SYSCALL
/*
* The sys_call_table[] is no longer used for system calls, but
@@ -20,10 +24,9 @@
const sys_call_ptr_t sys_call_table[] = {
#include <asm/syscalls_64.h>
};
-#undef __SYSCALL
+#undef __SYSCALL
#define __SYSCALL(nr, sym) case nr: return __x64_##sym(regs);
-
long x64_sys_call(const struct pt_regs *regs, unsigned int nr)
{
switch (nr) {
diff --git a/arch/x86/entry/syscall_x32.c b/arch/x86/entry/syscall_x32.c
index 03de4a932131..fb77908f44f3 100644
--- a/arch/x86/entry/syscall_x32.c
+++ b/arch/x86/entry/syscall_x32.c
@@ -8,11 +8,14 @@
#include <asm/syscall.h>
#define __SYSCALL(nr, sym) extern long __x64_##sym(const struct pt_regs *);
+#define __SYSCALL_NORETURN(nr, sym) extern long __noreturn __x64_##sym(const struct pt_regs *);
#include <asm/syscalls_x32.h>
-#undef __SYSCALL
+#undef __SYSCALL
-#define __SYSCALL(nr, sym) case nr: return __x64_##sym(regs);
+#undef __SYSCALL_NORETURN
+#define __SYSCALL_NORETURN __SYSCALL
+#define __SYSCALL(nr, sym) case nr: return __x64_##sym(regs);
long x32_sys_call(const struct pt_regs *regs, unsigned int nr)
{
switch (nr) {
diff --git a/arch/x86/entry/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl
index 7fd1f57ad3d3..534c74b14fab 100644
--- a/arch/x86/entry/syscalls/syscall_32.tbl
+++ b/arch/x86/entry/syscalls/syscall_32.tbl
@@ -1,8 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
#
# 32-bit system call numbers and entry vectors
#
# The format is:
-# <number> <abi> <name> <entry point> <compat entry point>
+# <number> <abi> <name> <entry point> [<compat entry point> [noreturn]]
#
# The __ia32_sys and __ia32_compat_sys stubs are created on-the-fly for
# sys_*() system calls and compat_sys_*() compat system calls if
@@ -12,7 +13,7 @@
# The abi is always "i386" for this file.
#
0 i386 restart_syscall sys_restart_syscall
-1 i386 exit sys_exit
+1 i386 exit sys_exit - noreturn
2 i386 fork sys_fork
3 i386 read sys_read
4 i386 write sys_write
@@ -263,7 +264,7 @@
249 i386 io_cancel sys_io_cancel
250 i386 fadvise64 sys_ia32_fadvise64
# 251 is available for reuse (was briefly sys_set_zone_reclaim)
-252 i386 exit_group sys_exit_group
+252 i386 exit_group sys_exit_group - noreturn
253 i386 lookup_dcookie
254 i386 epoll_create sys_epoll_create
255 i386 epoll_ctl sys_epoll_ctl
@@ -420,7 +421,7 @@
412 i386 utimensat_time64 sys_utimensat
413 i386 pselect6_time64 sys_pselect6 compat_sys_pselect6_time64
414 i386 ppoll_time64 sys_ppoll compat_sys_ppoll_time64
-416 i386 io_pgetevents_time64 sys_io_pgetevents
+416 i386 io_pgetevents_time64 sys_io_pgetevents compat_sys_io_pgetevents_time64
417 i386 recvmmsg_time64 sys_recvmmsg compat_sys_recvmmsg_time64
418 i386 mq_timedsend_time64 sys_mq_timedsend
419 i386 mq_timedreceive_time64 sys_mq_timedreceive
diff --git a/arch/x86/entry/syscalls/syscall_64.tbl b/arch/x86/entry/syscalls/syscall_64.tbl
index a396f6e6ab5b..097e5a18db52 100644
--- a/arch/x86/entry/syscalls/syscall_64.tbl
+++ b/arch/x86/entry/syscalls/syscall_64.tbl
@@ -1,8 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
#
# 64-bit system call numbers and entry vectors
#
# The format is:
-# <number> <abi> <name> <entry point>
+# <number> <abi> <name> <entry point> [<compat entry point> [noreturn]]
#
# The __x64_sys_*() stubs are created on-the-fly for sys_*() system calls
#
@@ -68,7 +69,7 @@
57 common fork sys_fork
58 common vfork sys_vfork
59 64 execve sys_execve
-60 common exit sys_exit
+60 common exit sys_exit - noreturn
61 common wait4 sys_wait4
62 common kill sys_kill
63 common uname sys_newuname
@@ -239,7 +240,7 @@
228 common clock_gettime sys_clock_gettime
229 common clock_getres sys_clock_getres
230 common clock_nanosleep sys_clock_nanosleep
-231 common exit_group sys_exit_group
+231 common exit_group sys_exit_group - noreturn
232 common epoll_wait sys_epoll_wait
233 common epoll_ctl sys_epoll_ctl
234 common tgkill sys_tgkill
diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c
index 1fc4ce44e743..920e3a640cad 100644
--- a/arch/x86/events/amd/core.c
+++ b/arch/x86/events/amd/core.c
@@ -432,8 +432,10 @@ static void __amd_put_nb_event_constraints(struct cpu_hw_events *cpuc,
* be removed on one CPU at a time AND PMU is disabled
* when we come here
*/
- for (i = 0; i < x86_pmu.num_counters; i++) {
- if (cmpxchg(nb->owners + i, event, NULL) == event)
+ for_each_set_bit(i, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) {
+ struct perf_event *tmp = event;
+
+ if (try_cmpxchg(nb->owners + i, &tmp, NULL))
break;
}
}
@@ -499,7 +501,7 @@ __amd_get_nb_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *ev
* because of successive calls to x86_schedule_events() from
* hw_perf_group_sched_in() without hw_perf_enable()
*/
- for_each_set_bit(idx, c->idxmsk, x86_pmu.num_counters) {
+ for_each_set_bit(idx, c->idxmsk, x86_pmu_max_num_counters(NULL)) {
if (new == -1 || hwc->idx == idx)
/* assign free slot, prefer hwc->idx */
old = cmpxchg(nb->owners + idx, NULL, event);
@@ -542,7 +544,7 @@ static struct amd_nb *amd_alloc_nb(int cpu)
/*
* initialize all possible NB constraints
*/
- for (i = 0; i < x86_pmu.num_counters; i++) {
+ for_each_set_bit(i, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) {
__set_bit(i, nb->event_constraints[i].idxmsk);
nb->event_constraints[i].weight = 1;
}
@@ -735,7 +737,7 @@ static void amd_pmu_check_overflow(void)
* counters are always enabled when this function is called and
* ARCH_PERFMON_EVENTSEL_INT is always set.
*/
- for (idx = 0; idx < x86_pmu.num_counters; idx++) {
+ for_each_set_bit(idx, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) {
if (!test_bit(idx, cpuc->active_mask))
continue;
@@ -755,7 +757,7 @@ static void amd_pmu_enable_all(int added)
amd_brs_enable_all();
- for (idx = 0; idx < x86_pmu.num_counters; idx++) {
+ for_each_set_bit(idx, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) {
/* only activate events which are marked as active */
if (!test_bit(idx, cpuc->active_mask))
continue;
@@ -978,7 +980,7 @@ static int amd_pmu_v2_handle_irq(struct pt_regs *regs)
/* Clear any reserved bits set by buggy microcode */
status &= amd_pmu_global_cntr_mask;
- for (idx = 0; idx < x86_pmu.num_counters; idx++) {
+ for_each_set_bit(idx, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) {
if (!test_bit(idx, cpuc->active_mask))
continue;
@@ -1313,7 +1315,7 @@ static __initconst const struct x86_pmu amd_pmu = {
.addr_offset = amd_pmu_addr_offset,
.event_map = amd_pmu_event_map,
.max_events = ARRAY_SIZE(amd_perfmon_event_map),
- .num_counters = AMD64_NUM_COUNTERS,
+ .cntr_mask64 = GENMASK_ULL(AMD64_NUM_COUNTERS - 1, 0),
.add = amd_pmu_add_event,
.del = amd_pmu_del_event,
.cntval_bits = 48,
@@ -1412,7 +1414,7 @@ static int __init amd_core_pmu_init(void)
*/
x86_pmu.eventsel = MSR_F15H_PERF_CTL;
x86_pmu.perfctr = MSR_F15H_PERF_CTR;
- x86_pmu.num_counters = AMD64_NUM_COUNTERS_CORE;
+ x86_pmu.cntr_mask64 = GENMASK_ULL(AMD64_NUM_COUNTERS_CORE - 1, 0);
/* Check for Performance Monitoring v2 support */
if (boot_cpu_has(X86_FEATURE_PERFMON_V2)) {
@@ -1422,9 +1424,9 @@ static int __init amd_core_pmu_init(void)
x86_pmu.version = 2;
/* Find the number of available Core PMCs */
- x86_pmu.num_counters = ebx.split.num_core_pmc;
+ x86_pmu.cntr_mask64 = GENMASK_ULL(ebx.split.num_core_pmc - 1, 0);
- amd_pmu_global_cntr_mask = (1ULL << x86_pmu.num_counters) - 1;
+ amd_pmu_global_cntr_mask = x86_pmu.cntr_mask64;
/* Update PMC handling functions */
x86_pmu.enable_all = amd_pmu_v2_enable_all;
@@ -1452,12 +1454,12 @@ static int __init amd_core_pmu_init(void)
* even numbered counter that has a consecutive adjacent odd
* numbered counter following it.
*/
- for (i = 0; i < x86_pmu.num_counters - 1; i += 2)
+ for (i = 0; i < x86_pmu_max_num_counters(NULL) - 1; i += 2)
even_ctr_mask |= BIT_ULL(i);
pair_constraint = (struct event_constraint)
__EVENT_CONSTRAINT(0, even_ctr_mask, 0,
- x86_pmu.num_counters / 2, 0,
+ x86_pmu_max_num_counters(NULL) / 2, 0,
PERF_X86_EVENT_PAIR);
x86_pmu.get_event_constraints = amd_get_event_constraints_f17h;
diff --git a/arch/x86/events/amd/uncore.c b/arch/x86/events/amd/uncore.c
index 4ccb8fa483e6..0bfde2ea5cb8 100644
--- a/arch/x86/events/amd/uncore.c
+++ b/arch/x86/events/amd/uncore.c
@@ -162,7 +162,9 @@ static int amd_uncore_add(struct perf_event *event, int flags)
/* if not, take the first available counter */
hwc->idx = -1;
for (i = 0; i < pmu->num_counters; i++) {
- if (cmpxchg(&ctx->events[i], NULL, event) == NULL) {
+ struct perf_event *tmp = NULL;
+
+ if (try_cmpxchg(&ctx->events[i], &tmp, event)) {
hwc->idx = i;
break;
}
@@ -196,7 +198,9 @@ static void amd_uncore_del(struct perf_event *event, int flags)
event->pmu->stop(event, PERF_EF_UPDATE);
for (i = 0; i < pmu->num_counters; i++) {
- if (cmpxchg(&ctx->events[i], event, NULL) == event)
+ struct perf_event *tmp = event;
+
+ if (try_cmpxchg(&ctx->events[i], &tmp, NULL))
break;
}
@@ -639,7 +643,7 @@ void amd_uncore_df_ctx_scan(struct amd_uncore *uncore, unsigned int cpu)
info.split.aux_data = 0;
info.split.num_pmcs = NUM_COUNTERS_NB;
info.split.gid = 0;
- info.split.cid = topology_die_id(cpu);
+ info.split.cid = topology_logical_package_id(cpu);
if (pmu_version >= 2) {
ebx.full = cpuid_ebx(EXT_PERFMON_DEBUG_FEATURES);
@@ -654,17 +658,20 @@ int amd_uncore_df_ctx_init(struct amd_uncore *uncore, unsigned int cpu)
{
struct attribute **df_attr = amd_uncore_df_format_attr;
struct amd_uncore_pmu *pmu;
+ int num_counters;
/* Run just once */
if (uncore->init_done)
return amd_uncore_ctx_init(uncore, cpu);
+ num_counters = amd_uncore_ctx_num_pmcs(uncore, cpu);
+ if (!num_counters)
+ goto done;
+
/* No grouping, single instance for a system */
uncore->pmus = kzalloc(sizeof(*uncore->pmus), GFP_KERNEL);
- if (!uncore->pmus) {
- uncore->num_pmus = 0;
+ if (!uncore->pmus)
goto done;
- }
/*
* For Family 17h and above, the Northbridge counters are repurposed
@@ -674,7 +681,7 @@ int amd_uncore_df_ctx_init(struct amd_uncore *uncore, unsigned int cpu)
pmu = &uncore->pmus[0];
strscpy(pmu->name, boot_cpu_data.x86 >= 0x17 ? "amd_df" : "amd_nb",
sizeof(pmu->name));
- pmu->num_counters = amd_uncore_ctx_num_pmcs(uncore, cpu);
+ pmu->num_counters = num_counters;
pmu->msr_base = MSR_F15H_NB_PERF_CTL;
pmu->rdpmc_base = RDPMC_BASE_NB;
pmu->group = amd_uncore_ctx_gid(uncore, cpu);
@@ -785,17 +792,20 @@ int amd_uncore_l3_ctx_init(struct amd_uncore *uncore, unsigned int cpu)
{
struct attribute **l3_attr = amd_uncore_l3_format_attr;
struct amd_uncore_pmu *pmu;
+ int num_counters;
/* Run just once */
if (uncore->init_done)
return amd_uncore_ctx_init(uncore, cpu);
+ num_counters = amd_uncore_ctx_num_pmcs(uncore, cpu);
+ if (!num_counters)
+ goto done;
+
/* No grouping, single instance for a system */
uncore->pmus = kzalloc(sizeof(*uncore->pmus), GFP_KERNEL);
- if (!uncore->pmus) {
- uncore->num_pmus = 0;
+ if (!uncore->pmus)
goto done;
- }
/*
* For Family 17h and above, L3 cache counters are available instead
@@ -805,7 +815,7 @@ int amd_uncore_l3_ctx_init(struct amd_uncore *uncore, unsigned int cpu)
pmu = &uncore->pmus[0];
strscpy(pmu->name, boot_cpu_data.x86 >= 0x17 ? "amd_l3" : "amd_l2",
sizeof(pmu->name));
- pmu->num_counters = amd_uncore_ctx_num_pmcs(uncore, cpu);
+ pmu->num_counters = num_counters;
pmu->msr_base = MSR_F16H_L2I_PERF_CTL;
pmu->rdpmc_base = RDPMC_BASE_LLC;
pmu->group = amd_uncore_ctx_gid(uncore, cpu);
@@ -893,8 +903,8 @@ void amd_uncore_umc_ctx_scan(struct amd_uncore *uncore, unsigned int cpu)
cpuid(EXT_PERFMON_DEBUG_FEATURES, &eax, &ebx.full, &ecx, &edx);
info.split.aux_data = ecx; /* stash active mask */
info.split.num_pmcs = ebx.split.num_umc_pmc;
- info.split.gid = topology_die_id(cpu);
- info.split.cid = topology_die_id(cpu);
+ info.split.gid = topology_logical_package_id(cpu);
+ info.split.cid = topology_logical_package_id(cpu);
*per_cpu_ptr(uncore->info, cpu) = info;
}
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 5b0dd07b1ef1..12f2a0c14d33 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -189,29 +189,31 @@ static DEFINE_MUTEX(pmc_reserve_mutex);
#ifdef CONFIG_X86_LOCAL_APIC
-static inline int get_possible_num_counters(void)
+static inline u64 get_possible_counter_mask(void)
{
- int i, num_counters = x86_pmu.num_counters;
+ u64 cntr_mask = x86_pmu.cntr_mask64;
+ int i;
if (!is_hybrid())
- return num_counters;
+ return cntr_mask;
for (i = 0; i < x86_pmu.num_hybrid_pmus; i++)
- num_counters = max_t(int, num_counters, x86_pmu.hybrid_pmu[i].num_counters);
+ cntr_mask |= x86_pmu.hybrid_pmu[i].cntr_mask64;
- return num_counters;
+ return cntr_mask;
}
static bool reserve_pmc_hardware(void)
{
- int i, num_counters = get_possible_num_counters();
+ u64 cntr_mask = get_possible_counter_mask();
+ int i, end;
- for (i = 0; i < num_counters; i++) {
+ for_each_set_bit(i, (unsigned long *)&cntr_mask, X86_PMC_IDX_MAX) {
if (!reserve_perfctr_nmi(x86_pmu_event_addr(i)))
goto perfctr_fail;
}
- for (i = 0; i < num_counters; i++) {
+ for_each_set_bit(i, (unsigned long *)&cntr_mask, X86_PMC_IDX_MAX) {
if (!reserve_evntsel_nmi(x86_pmu_config_addr(i)))
goto eventsel_fail;
}
@@ -219,13 +221,14 @@ static bool reserve_pmc_hardware(void)
return true;
eventsel_fail:
- for (i--; i >= 0; i--)
+ end = i;
+ for_each_set_bit(i, (unsigned long *)&cntr_mask, end)
release_evntsel_nmi(x86_pmu_config_addr(i));
-
- i = num_counters;
+ i = X86_PMC_IDX_MAX;
perfctr_fail:
- for (i--; i >= 0; i--)
+ end = i;
+ for_each_set_bit(i, (unsigned long *)&cntr_mask, end)
release_perfctr_nmi(x86_pmu_event_addr(i));
return false;
@@ -233,9 +236,10 @@ perfctr_fail:
static void release_pmc_hardware(void)
{
- int i, num_counters = get_possible_num_counters();
+ u64 cntr_mask = get_possible_counter_mask();
+ int i;
- for (i = 0; i < num_counters; i++) {
+ for_each_set_bit(i, (unsigned long *)&cntr_mask, X86_PMC_IDX_MAX) {
release_perfctr_nmi(x86_pmu_event_addr(i));
release_evntsel_nmi(x86_pmu_config_addr(i));
}
@@ -248,7 +252,8 @@ static void release_pmc_hardware(void) {}
#endif
-bool check_hw_exists(struct pmu *pmu, int num_counters, int num_counters_fixed)
+bool check_hw_exists(struct pmu *pmu, unsigned long *cntr_mask,
+ unsigned long *fixed_cntr_mask)
{
u64 val, val_fail = -1, val_new= ~0;
int i, reg, reg_fail = -1, ret = 0;
@@ -259,7 +264,7 @@ bool check_hw_exists(struct pmu *pmu, int num_counters, int num_counters_fixed)
* Check to see if the BIOS enabled any of the counters, if so
* complain and bail.
*/
- for (i = 0; i < num_counters; i++) {
+ for_each_set_bit(i, cntr_mask, X86_PMC_IDX_MAX) {
reg = x86_pmu_config_addr(i);
ret = rdmsrl_safe(reg, &val);
if (ret)
@@ -273,12 +278,12 @@ bool check_hw_exists(struct pmu *pmu, int num_counters, int num_counters_fixed)
}
}
- if (num_counters_fixed) {
+ if (*(u64 *)fixed_cntr_mask) {
reg = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
ret = rdmsrl_safe(reg, &val);
if (ret)
goto msr_fail;
- for (i = 0; i < num_counters_fixed; i++) {
+ for_each_set_bit(i, fixed_cntr_mask, X86_PMC_IDX_MAX) {
if (fixed_counter_disabled(i, pmu))
continue;
if (val & (0x03ULL << i*4)) {
@@ -619,7 +624,7 @@ int x86_pmu_hw_config(struct perf_event *event)
event->hw.config |= ARCH_PERFMON_EVENTSEL_OS;
if (event->attr.type == event->pmu->type)
- event->hw.config |= event->attr.config & X86_RAW_EVENT_MASK;
+ event->hw.config |= x86_pmu_get_event_config(event);
if (event->attr.sample_period && x86_pmu.limit_period) {
s64 left = event->attr.sample_period;
@@ -679,7 +684,7 @@ void x86_pmu_disable_all(void)
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
int idx;
- for (idx = 0; idx < x86_pmu.num_counters; idx++) {
+ for_each_set_bit(idx, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) {
struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
u64 val;
@@ -736,7 +741,7 @@ void x86_pmu_enable_all(int added)
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
int idx;
- for (idx = 0; idx < x86_pmu.num_counters; idx++) {
+ for_each_set_bit(idx, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) {
struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
if (!test_bit(idx, cpuc->active_mask))
@@ -975,7 +980,6 @@ EXPORT_SYMBOL_GPL(perf_assign_events);
int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
{
- int num_counters = hybrid(cpuc->pmu, num_counters);
struct event_constraint *c;
struct perf_event *e;
int n0, i, wmin, wmax, unsched = 0;
@@ -1051,7 +1055,7 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
/* slow path */
if (i != n) {
- int gpmax = num_counters;
+ int gpmax = x86_pmu_max_num_counters(cpuc->pmu);
/*
* Do not allow scheduling of more than half the available
@@ -1072,7 +1076,7 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
* the extra Merge events needed by large increment events.
*/
if (x86_pmu.flags & PMU_FL_PAIR) {
- gpmax = num_counters - cpuc->n_pair;
+ gpmax -= cpuc->n_pair;
WARN_ON(gpmax <= 0);
}
@@ -1157,12 +1161,10 @@ static int collect_event(struct cpu_hw_events *cpuc, struct perf_event *event,
*/
static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, bool dogrp)
{
- int num_counters = hybrid(cpuc->pmu, num_counters);
- int num_counters_fixed = hybrid(cpuc->pmu, num_counters_fixed);
struct perf_event *event;
int n, max_count;
- max_count = num_counters + num_counters_fixed;
+ max_count = x86_pmu_num_counters(cpuc->pmu) + x86_pmu_num_counters_fixed(cpuc->pmu);
/* current number of events already accepted */
n = cpuc->n_events;
@@ -1234,8 +1236,7 @@ static inline void x86_assign_hw_event(struct perf_event *event,
fallthrough;
case INTEL_PMC_IDX_FIXED ... INTEL_PMC_IDX_FIXED_BTS-1:
hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
- hwc->event_base = MSR_ARCH_PERFMON_FIXED_CTR0 +
- (idx - INTEL_PMC_IDX_FIXED);
+ hwc->event_base = x86_pmu_fixed_ctr_addr(idx - INTEL_PMC_IDX_FIXED);
hwc->event_base_rdpmc = (idx - INTEL_PMC_IDX_FIXED) |
INTEL_PMC_FIXED_RDPMC_BASE;
break;
@@ -1522,13 +1523,13 @@ void perf_event_print_debug(void)
u64 pebs, debugctl;
int cpu = smp_processor_id();
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
- int num_counters = hybrid(cpuc->pmu, num_counters);
- int num_counters_fixed = hybrid(cpuc->pmu, num_counters_fixed);
+ unsigned long *cntr_mask = hybrid(cpuc->pmu, cntr_mask);
+ unsigned long *fixed_cntr_mask = hybrid(cpuc->pmu, fixed_cntr_mask);
struct event_constraint *pebs_constraints = hybrid(cpuc->pmu, pebs_constraints);
unsigned long flags;
int idx;
- if (!num_counters)
+ if (!*(u64 *)cntr_mask)
return;
local_irq_save(flags);
@@ -1555,7 +1556,7 @@ void perf_event_print_debug(void)
}
pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask);
- for (idx = 0; idx < num_counters; idx++) {
+ for_each_set_bit(idx, cntr_mask, X86_PMC_IDX_MAX) {
rdmsrl(x86_pmu_config_addr(idx), pmc_ctrl);
rdmsrl(x86_pmu_event_addr(idx), pmc_count);
@@ -1568,10 +1569,10 @@ void perf_event_print_debug(void)
pr_info("CPU#%d: gen-PMC%d left: %016llx\n",
cpu, idx, prev_left);
}
- for (idx = 0; idx < num_counters_fixed; idx++) {
+ for_each_set_bit(idx, fixed_cntr_mask, X86_PMC_IDX_MAX) {
if (fixed_counter_disabled(idx, cpuc->pmu))
continue;
- rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
+ rdmsrl(x86_pmu_fixed_ctr_addr(idx), pmc_count);
pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
cpu, idx, pmc_count);
@@ -1682,7 +1683,7 @@ int x86_pmu_handle_irq(struct pt_regs *regs)
*/
apic_write(APIC_LVTPC, APIC_DM_NMI);
- for (idx = 0; idx < x86_pmu.num_counters; idx++) {
+ for_each_set_bit(idx, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) {
if (!test_bit(idx, cpuc->active_mask))
continue;
@@ -2038,18 +2039,15 @@ static void _x86_pmu_read(struct perf_event *event)
static_call(x86_pmu_update)(event);
}
-void x86_pmu_show_pmu_cap(int num_counters, int num_counters_fixed,
- u64 intel_ctrl)
+void x86_pmu_show_pmu_cap(struct pmu *pmu)
{
pr_info("... version: %d\n", x86_pmu.version);
pr_info("... bit width: %d\n", x86_pmu.cntval_bits);
- pr_info("... generic registers: %d\n", num_counters);
+ pr_info("... generic registers: %d\n", x86_pmu_num_counters(pmu));
pr_info("... value mask: %016Lx\n", x86_pmu.cntval_mask);
pr_info("... max period: %016Lx\n", x86_pmu.max_period);
- pr_info("... fixed-purpose events: %lu\n",
- hweight64((((1ULL << num_counters_fixed) - 1)
- << INTEL_PMC_IDX_FIXED) & intel_ctrl));
- pr_info("... event mask: %016Lx\n", intel_ctrl);
+ pr_info("... fixed-purpose events: %d\n", x86_pmu_num_counters_fixed(pmu));
+ pr_info("... event mask: %016Lx\n", hybrid(pmu, intel_ctrl));
}
static int __init init_hw_perf_events(void)
@@ -2086,7 +2084,7 @@ static int __init init_hw_perf_events(void)
pmu_check_apic();
/* sanity check that the hardware exists or is emulated */
- if (!check_hw_exists(&pmu, x86_pmu.num_counters, x86_pmu.num_counters_fixed))
+ if (!check_hw_exists(&pmu, x86_pmu.cntr_mask, x86_pmu.fixed_cntr_mask))
goto out_bad_pmu;
pr_cont("%s PMU driver.\n", x86_pmu.name);
@@ -2097,14 +2095,17 @@ static int __init init_hw_perf_events(void)
quirk->func();
if (!x86_pmu.intel_ctrl)
- x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
+ x86_pmu.intel_ctrl = x86_pmu.cntr_mask64;
+
+ if (!x86_pmu.config_mask)
+ x86_pmu.config_mask = X86_RAW_EVENT_MASK;
perf_events_lapic_init();
register_nmi_handler(NMI_LOCAL, perf_event_nmi_handler, 0, "PMI");
unconstrained = (struct event_constraint)
- __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1,
- 0, x86_pmu.num_counters, 0, 0);
+ __EVENT_CONSTRAINT(0, x86_pmu.cntr_mask64,
+ 0, x86_pmu_num_counters(NULL), 0, 0);
x86_pmu_format_group.attrs = x86_pmu.format_attrs;
@@ -2113,11 +2114,8 @@ static int __init init_hw_perf_events(void)
pmu.attr_update = x86_pmu.attr_update;
- if (!is_hybrid()) {
- x86_pmu_show_pmu_cap(x86_pmu.num_counters,
- x86_pmu.num_counters_fixed,
- x86_pmu.intel_ctrl);
- }
+ if (!is_hybrid())
+ x86_pmu_show_pmu_cap(NULL);
if (!x86_pmu.read)
x86_pmu.read = _x86_pmu_read;
@@ -2481,10 +2479,10 @@ void perf_clear_dirty_counters(void)
for_each_set_bit(i, cpuc->dirty, X86_PMC_IDX_MAX) {
if (i >= INTEL_PMC_IDX_FIXED) {
/* Metrics and fake events don't have corresponding HW counters. */
- if ((i - INTEL_PMC_IDX_FIXED) >= hybrid(cpuc->pmu, num_counters_fixed))
+ if (!test_bit(i - INTEL_PMC_IDX_FIXED, hybrid(cpuc->pmu, fixed_cntr_mask)))
continue;
- wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + (i - INTEL_PMC_IDX_FIXED), 0);
+ wrmsrl(x86_pmu_fixed_ctr_addr(i - INTEL_PMC_IDX_FIXED), 0);
} else {
wrmsrl(x86_pmu_event_addr(i), 0);
}
@@ -2547,6 +2545,7 @@ static ssize_t set_attr_rdpmc(struct device *cdev,
struct device_attribute *attr,
const char *buf, size_t count)
{
+ static DEFINE_MUTEX(rdpmc_mutex);
unsigned long val;
ssize_t ret;
@@ -2560,6 +2559,8 @@ static ssize_t set_attr_rdpmc(struct device *cdev,
if (x86_pmu.attr_rdpmc_broken)
return -ENOTSUPP;
+ guard(mutex)(&rdpmc_mutex);
+
if (val != x86_pmu.attr_rdpmc) {
/*
* Changing into or out of never available or always available,
@@ -2983,8 +2984,8 @@ void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap)
* base PMU holds the correct number of counters for P-cores.
*/
cap->version = x86_pmu.version;
- cap->num_counters_gp = x86_pmu.num_counters;
- cap->num_counters_fixed = x86_pmu.num_counters_fixed;
+ cap->num_counters_gp = x86_pmu_num_counters(NULL);
+ cap->num_counters_fixed = x86_pmu_num_counters_fixed(NULL);
cap->bit_width_gp = x86_pmu.cntval_bits;
cap->bit_width_fixed = x86_pmu.cntval_bits;
cap->events_mask = (unsigned int)x86_pmu.events_maskl;
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 38c1b1f1deaa..0c9c2706d4ec 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -220,6 +220,17 @@ static struct event_constraint intel_grt_event_constraints[] __read_mostly = {
EVENT_CONSTRAINT_END
};
+static struct event_constraint intel_skt_event_constraints[] __read_mostly = {
+ FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
+ FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
+ FIXED_EVENT_CONSTRAINT(0x0300, 2), /* pseudo CPU_CLK_UNHALTED.REF */
+ FIXED_EVENT_CONSTRAINT(0x013c, 2), /* CPU_CLK_UNHALTED.REF_TSC_P */
+ FIXED_EVENT_CONSTRAINT(0x0073, 4), /* TOPDOWN_BAD_SPECULATION.ALL */
+ FIXED_EVENT_CONSTRAINT(0x019c, 5), /* TOPDOWN_FE_BOUND.ALL */
+ FIXED_EVENT_CONSTRAINT(0x02c2, 6), /* TOPDOWN_RETIRING.ALL */
+ EVENT_CONSTRAINT_END
+};
+
static struct event_constraint intel_skl_event_constraints[] = {
FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
@@ -370,6 +381,55 @@ static struct extra_reg intel_rwc_extra_regs[] __read_mostly = {
EVENT_EXTRA_END
};
+static struct event_constraint intel_lnc_event_constraints[] = {
+ FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
+ FIXED_EVENT_CONSTRAINT(0x0100, 0), /* INST_RETIRED.PREC_DIST */
+ FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
+ FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
+ FIXED_EVENT_CONSTRAINT(0x013c, 2), /* CPU_CLK_UNHALTED.REF_TSC_P */
+ FIXED_EVENT_CONSTRAINT(0x0400, 3), /* SLOTS */
+ METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_RETIRING, 0),
+ METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BAD_SPEC, 1),
+ METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FE_BOUND, 2),
+ METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BE_BOUND, 3),
+ METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_HEAVY_OPS, 4),
+ METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BR_MISPREDICT, 5),
+ METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FETCH_LAT, 6),
+ METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_MEM_BOUND, 7),
+
+ INTEL_UEVENT_CONSTRAINT(0x0148, 0x4),
+ INTEL_UEVENT_CONSTRAINT(0x0175, 0x4),
+
+ INTEL_EVENT_CONSTRAINT(0x2e, 0x3ff),
+ INTEL_EVENT_CONSTRAINT(0x3c, 0x3ff),
+ /*
+ * Generally event codes < 0x90 are restricted to counters 0-3.
+ * The 0x2E and 0x3C are exception, which has no restriction.
+ */
+ INTEL_EVENT_CONSTRAINT_RANGE(0x01, 0x8f, 0xf),
+
+ INTEL_UEVENT_CONSTRAINT(0x01a3, 0xf),
+ INTEL_UEVENT_CONSTRAINT(0x02a3, 0xf),
+ INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4),
+ INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4),
+ INTEL_UEVENT_CONSTRAINT(0x04a4, 0x1),
+ INTEL_UEVENT_CONSTRAINT(0x08a4, 0x1),
+ INTEL_UEVENT_CONSTRAINT(0x10a4, 0x1),
+ INTEL_UEVENT_CONSTRAINT(0x01b1, 0x8),
+ INTEL_UEVENT_CONSTRAINT(0x02cd, 0x3),
+ INTEL_EVENT_CONSTRAINT(0xce, 0x1),
+
+ INTEL_EVENT_CONSTRAINT_RANGE(0xd0, 0xdf, 0xf),
+ /*
+ * Generally event codes >= 0x90 are likely to have no restrictions.
+ * The exception are defined as above.
+ */
+ INTEL_EVENT_CONSTRAINT_RANGE(0x90, 0xfe, 0x3ff),
+
+ EVENT_CONSTRAINT_END
+};
+
+
EVENT_ATTR_STR(mem-loads, mem_ld_nhm, "event=0x0b,umask=0x10,ldlat=3");
EVENT_ATTR_STR(mem-loads, mem_ld_snb, "event=0xcd,umask=0x1,ldlat=3");
EVENT_ATTR_STR(mem-stores, mem_st_snb, "event=0xcd,umask=0x2");
@@ -2874,26 +2934,26 @@ static void intel_pmu_reset(void)
{
struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds);
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
- int num_counters_fixed = hybrid(cpuc->pmu, num_counters_fixed);
- int num_counters = hybrid(cpuc->pmu, num_counters);
+ unsigned long *cntr_mask = hybrid(cpuc->pmu, cntr_mask);
+ unsigned long *fixed_cntr_mask = hybrid(cpuc->pmu, fixed_cntr_mask);
unsigned long flags;
int idx;
- if (!num_counters)
+ if (!*(u64 *)cntr_mask)
return;
local_irq_save(flags);
pr_info("clearing PMU state on CPU#%d\n", smp_processor_id());
- for (idx = 0; idx < num_counters; idx++) {
+ for_each_set_bit(idx, cntr_mask, INTEL_PMC_MAX_GENERIC) {
wrmsrl_safe(x86_pmu_config_addr(idx), 0ull);
wrmsrl_safe(x86_pmu_event_addr(idx), 0ull);
}
- for (idx = 0; idx < num_counters_fixed; idx++) {
+ for_each_set_bit(idx, fixed_cntr_mask, INTEL_PMC_MAX_FIXED) {
if (fixed_counter_disabled(idx, cpuc->pmu))
continue;
- wrmsrl_safe(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
+ wrmsrl_safe(x86_pmu_fixed_ctr_addr(idx), 0ull);
}
if (ds)
@@ -2940,8 +3000,7 @@ static void x86_pmu_handle_guest_pebs(struct pt_regs *regs,
!guest_pebs_idxs)
return;
- for_each_set_bit(bit, (unsigned long *)&guest_pebs_idxs,
- INTEL_PMC_IDX_FIXED + x86_pmu.num_counters_fixed) {
+ for_each_set_bit(bit, (unsigned long *)&guest_pebs_idxs, X86_PMC_IDX_MAX) {
event = cpuc->events[bit];
if (!event->attr.precise_ip)
continue;
@@ -4199,7 +4258,7 @@ static struct perf_guest_switch_msr *core_guest_get_msrs(int *nr, void *data)
struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
int idx;
- for (idx = 0; idx < x86_pmu.num_counters; idx++) {
+ for_each_set_bit(idx, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) {
struct perf_event *event = cpuc->events[idx];
arr[idx].msr = x86_pmu_config_addr(idx);
@@ -4217,7 +4276,7 @@ static struct perf_guest_switch_msr *core_guest_get_msrs(int *nr, void *data)
arr[idx].guest &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
}
- *nr = x86_pmu.num_counters;
+ *nr = x86_pmu_max_num_counters(cpuc->pmu);
return arr;
}
@@ -4232,7 +4291,7 @@ static void core_pmu_enable_all(int added)
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
int idx;
- for (idx = 0; idx < x86_pmu.num_counters; idx++) {
+ for_each_set_bit(idx, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) {
struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
if (!test_bit(idx, cpuc->active_mask) ||
@@ -4573,8 +4632,55 @@ PMU_FORMAT_ATTR(pc, "config:19" );
PMU_FORMAT_ATTR(any, "config:21" ); /* v3 + */
PMU_FORMAT_ATTR(inv, "config:23" );
PMU_FORMAT_ATTR(cmask, "config:24-31" );
-PMU_FORMAT_ATTR(in_tx, "config:32");
-PMU_FORMAT_ATTR(in_tx_cp, "config:33");
+PMU_FORMAT_ATTR(in_tx, "config:32" );
+PMU_FORMAT_ATTR(in_tx_cp, "config:33" );
+PMU_FORMAT_ATTR(eq, "config:36" ); /* v6 + */
+
+static ssize_t umask2_show(struct device *dev,
+ struct device_attribute *attr,
+ char *page)
+{
+ u64 mask = hybrid(dev_get_drvdata(dev), config_mask) & ARCH_PERFMON_EVENTSEL_UMASK2;
+
+ if (mask == ARCH_PERFMON_EVENTSEL_UMASK2)
+ return sprintf(page, "config:8-15,40-47\n");
+
+ /* Roll back to the old format if umask2 is not supported. */
+ return sprintf(page, "config:8-15\n");
+}
+
+static struct device_attribute format_attr_umask2 =
+ __ATTR(umask, 0444, umask2_show, NULL);
+
+static struct attribute *format_evtsel_ext_attrs[] = {
+ &format_attr_umask2.attr,
+ &format_attr_eq.attr,
+ NULL
+};
+
+static umode_t
+evtsel_ext_is_visible(struct kobject *kobj, struct attribute *attr, int i)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ u64 mask;
+
+ /*
+ * The umask and umask2 have different formats but share the
+ * same attr name. In update mode, the previous value of the
+ * umask is unconditionally removed before is_visible. If
+ * umask2 format is not enumerated, it's impossible to roll
+ * back to the old format.
+ * Does the check in umask2_show rather than is_visible.
+ */
+ if (i == 0)
+ return attr->mode;
+
+ mask = hybrid(dev_get_drvdata(dev), config_mask);
+ if (i == 1)
+ return (mask & ARCH_PERFMON_EVENTSEL_EQ) ? attr->mode : 0;
+
+ return 0;
+}
static struct attribute *intel_arch_formats_attr[] = {
&format_attr_event.attr,
@@ -4684,13 +4790,33 @@ static void flip_smm_bit(void *data)
}
}
-static void intel_pmu_check_num_counters(int *num_counters,
- int *num_counters_fixed,
- u64 *intel_ctrl, u64 fixed_mask);
+static void intel_pmu_check_counters_mask(u64 *cntr_mask,
+ u64 *fixed_cntr_mask,
+ u64 *intel_ctrl)
+{
+ unsigned int bit;
+
+ bit = fls64(*cntr_mask);
+ if (bit > INTEL_PMC_MAX_GENERIC) {
+ WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
+ bit, INTEL_PMC_MAX_GENERIC);
+ *cntr_mask &= GENMASK_ULL(INTEL_PMC_MAX_GENERIC - 1, 0);
+ }
+ *intel_ctrl = *cntr_mask;
+
+ bit = fls64(*fixed_cntr_mask);
+ if (bit > INTEL_PMC_MAX_FIXED) {
+ WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
+ bit, INTEL_PMC_MAX_FIXED);
+ *fixed_cntr_mask &= GENMASK_ULL(INTEL_PMC_MAX_FIXED - 1, 0);
+ }
+
+ *intel_ctrl |= *fixed_cntr_mask << INTEL_PMC_IDX_FIXED;
+}
static void intel_pmu_check_event_constraints(struct event_constraint *event_constraints,
- int num_counters,
- int num_counters_fixed,
+ u64 cntr_mask,
+ u64 fixed_cntr_mask,
u64 intel_ctrl);
static void intel_pmu_check_extra_regs(struct extra_reg *extra_regs);
@@ -4698,8 +4824,8 @@ static void intel_pmu_check_extra_regs(struct extra_reg *extra_regs);
static inline bool intel_pmu_broken_perf_cap(void)
{
/* The Perf Metric (Bit 15) is always cleared */
- if ((boot_cpu_data.x86_model == INTEL_FAM6_METEORLAKE) ||
- (boot_cpu_data.x86_model == INTEL_FAM6_METEORLAKE_L))
+ if (boot_cpu_data.x86_vfm == INTEL_METEORLAKE ||
+ boot_cpu_data.x86_vfm == INTEL_METEORLAKE_L)
return true;
return false;
@@ -4707,17 +4833,22 @@ static inline bool intel_pmu_broken_perf_cap(void)
static void update_pmu_cap(struct x86_hybrid_pmu *pmu)
{
- unsigned int sub_bitmaps = cpuid_eax(ARCH_PERFMON_EXT_LEAF);
- unsigned int eax, ebx, ecx, edx;
+ unsigned int sub_bitmaps, eax, ebx, ecx, edx;
+
+ cpuid(ARCH_PERFMON_EXT_LEAF, &sub_bitmaps, &ebx, &ecx, &edx);
+
+ if (ebx & ARCH_PERFMON_EXT_UMASK2)
+ pmu->config_mask |= ARCH_PERFMON_EVENTSEL_UMASK2;
+ if (ebx & ARCH_PERFMON_EXT_EQ)
+ pmu->config_mask |= ARCH_PERFMON_EVENTSEL_EQ;
if (sub_bitmaps & ARCH_PERFMON_NUM_COUNTER_LEAF_BIT) {
cpuid_count(ARCH_PERFMON_EXT_LEAF, ARCH_PERFMON_NUM_COUNTER_LEAF,
&eax, &ebx, &ecx, &edx);
- pmu->num_counters = fls(eax);
- pmu->num_counters_fixed = fls(ebx);
+ pmu->cntr_mask64 = eax;
+ pmu->fixed_cntr_mask64 = ebx;
}
-
if (!intel_pmu_broken_perf_cap()) {
/* Perf Metric (Bit 15) and PEBS via PT (Bit 16) are hybrid enumeration */
rdmsrl(MSR_IA32_PERF_CAPABILITIES, pmu->intel_cap.capabilities);
@@ -4726,12 +4857,12 @@ static void update_pmu_cap(struct x86_hybrid_pmu *pmu)
static void intel_pmu_check_hybrid_pmus(struct x86_hybrid_pmu *pmu)
{
- intel_pmu_check_num_counters(&pmu->num_counters, &pmu->num_counters_fixed,
- &pmu->intel_ctrl, (1ULL << pmu->num_counters_fixed) - 1);
- pmu->max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, pmu->num_counters);
+ intel_pmu_check_counters_mask(&pmu->cntr_mask64, &pmu->fixed_cntr_mask64,
+ &pmu->intel_ctrl);
+ pmu->pebs_events_mask = intel_pmu_pebs_mask(pmu->cntr_mask64);
pmu->unconstrained = (struct event_constraint)
- __EVENT_CONSTRAINT(0, (1ULL << pmu->num_counters) - 1,
- 0, pmu->num_counters, 0, 0);
+ __EVENT_CONSTRAINT(0, pmu->cntr_mask64,
+ 0, x86_pmu_num_counters(&pmu->pmu), 0, 0);
if (pmu->intel_cap.perf_metrics)
pmu->intel_ctrl |= 1ULL << GLOBAL_CTRL_EN_PERF_METRICS;
@@ -4744,8 +4875,8 @@ static void intel_pmu_check_hybrid_pmus(struct x86_hybrid_pmu *pmu)
pmu->pmu.capabilities &= ~PERF_PMU_CAP_AUX_OUTPUT;
intel_pmu_check_event_constraints(pmu->event_constraints,
- pmu->num_counters,
- pmu->num_counters_fixed,
+ pmu->cntr_mask64,
+ pmu->fixed_cntr_mask64,
pmu->intel_ctrl);
intel_pmu_check_extra_regs(pmu->extra_regs);
@@ -4806,7 +4937,7 @@ static bool init_hybrid_pmu(int cpu)
intel_pmu_check_hybrid_pmus(pmu);
- if (!check_hw_exists(&pmu->pmu, pmu->num_counters, pmu->num_counters_fixed))
+ if (!check_hw_exists(&pmu->pmu, pmu->cntr_mask, pmu->fixed_cntr_mask))
return false;
pr_info("%s PMU driver: ", pmu->name);
@@ -4816,8 +4947,7 @@ static bool init_hybrid_pmu(int cpu)
pr_cont("\n");
- x86_pmu_show_pmu_cap(pmu->num_counters, pmu->num_counters_fixed,
- pmu->intel_ctrl);
+ x86_pmu_show_pmu_cap(&pmu->pmu);
end:
cpumask_set_cpu(cpu, &pmu->supported_cpus);
@@ -5058,6 +5188,7 @@ static __initconst const struct x86_pmu core_pmu = {
.schedule_events = x86_schedule_events,
.eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
.perfctr = MSR_ARCH_PERFMON_PERFCTR0,
+ .fixedctr = MSR_ARCH_PERFMON_FIXED_CTR0,
.event_map = intel_pmu_event_map,
.max_events = ARRAY_SIZE(intel_perfmon_event_map),
.apic = 1,
@@ -5111,6 +5242,7 @@ static __initconst const struct x86_pmu intel_pmu = {
.schedule_events = x86_schedule_events,
.eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
.perfctr = MSR_ARCH_PERFMON_PERFCTR0,
+ .fixedctr = MSR_ARCH_PERFMON_FIXED_CTR0,
.event_map = intel_pmu_event_map,
.max_events = ARRAY_SIZE(intel_perfmon_event_map),
.apic = 1,
@@ -5187,35 +5319,35 @@ static __init void intel_clovertown_quirk(void)
}
static const struct x86_cpu_desc isolation_ucodes[] = {
- INTEL_CPU_DESC(INTEL_FAM6_HASWELL, 3, 0x0000001f),
- INTEL_CPU_DESC(INTEL_FAM6_HASWELL_L, 1, 0x0000001e),
- INTEL_CPU_DESC(INTEL_FAM6_HASWELL_G, 1, 0x00000015),
- INTEL_CPU_DESC(INTEL_FAM6_HASWELL_X, 2, 0x00000037),
- INTEL_CPU_DESC(INTEL_FAM6_HASWELL_X, 4, 0x0000000a),
- INTEL_CPU_DESC(INTEL_FAM6_BROADWELL, 4, 0x00000023),
- INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_G, 1, 0x00000014),
- INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_D, 2, 0x00000010),
- INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_D, 3, 0x07000009),
- INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_D, 4, 0x0f000009),
- INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_D, 5, 0x0e000002),
- INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_X, 1, 0x0b000014),
- INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 3, 0x00000021),
- INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 4, 0x00000000),
- INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 5, 0x00000000),
- INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 6, 0x00000000),
- INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 7, 0x00000000),
- INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 11, 0x00000000),
- INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_L, 3, 0x0000007c),
- INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE, 3, 0x0000007c),
- INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE, 9, 0x0000004e),
- INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_L, 9, 0x0000004e),
- INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_L, 10, 0x0000004e),
- INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_L, 11, 0x0000004e),
- INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_L, 12, 0x0000004e),
- INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE, 10, 0x0000004e),
- INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE, 11, 0x0000004e),
- INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE, 12, 0x0000004e),
- INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE, 13, 0x0000004e),
+ INTEL_CPU_DESC(INTEL_HASWELL, 3, 0x0000001f),
+ INTEL_CPU_DESC(INTEL_HASWELL_L, 1, 0x0000001e),
+ INTEL_CPU_DESC(INTEL_HASWELL_G, 1, 0x00000015),
+ INTEL_CPU_DESC(INTEL_HASWELL_X, 2, 0x00000037),
+ INTEL_CPU_DESC(INTEL_HASWELL_X, 4, 0x0000000a),
+ INTEL_CPU_DESC(INTEL_BROADWELL, 4, 0x00000023),
+ INTEL_CPU_DESC(INTEL_BROADWELL_G, 1, 0x00000014),
+ INTEL_CPU_DESC(INTEL_BROADWELL_D, 2, 0x00000010),
+ INTEL_CPU_DESC(INTEL_BROADWELL_D, 3, 0x07000009),
+ INTEL_CPU_DESC(INTEL_BROADWELL_D, 4, 0x0f000009),
+ INTEL_CPU_DESC(INTEL_BROADWELL_D, 5, 0x0e000002),
+ INTEL_CPU_DESC(INTEL_BROADWELL_X, 1, 0x0b000014),
+ INTEL_CPU_DESC(INTEL_SKYLAKE_X, 3, 0x00000021),
+ INTEL_CPU_DESC(INTEL_SKYLAKE_X, 4, 0x00000000),
+ INTEL_CPU_DESC(INTEL_SKYLAKE_X, 5, 0x00000000),
+ INTEL_CPU_DESC(INTEL_SKYLAKE_X, 6, 0x00000000),
+ INTEL_CPU_DESC(INTEL_SKYLAKE_X, 7, 0x00000000),
+ INTEL_CPU_DESC(INTEL_SKYLAKE_X, 11, 0x00000000),
+ INTEL_CPU_DESC(INTEL_SKYLAKE_L, 3, 0x0000007c),
+ INTEL_CPU_DESC(INTEL_SKYLAKE, 3, 0x0000007c),
+ INTEL_CPU_DESC(INTEL_KABYLAKE, 9, 0x0000004e),
+ INTEL_CPU_DESC(INTEL_KABYLAKE_L, 9, 0x0000004e),
+ INTEL_CPU_DESC(INTEL_KABYLAKE_L, 10, 0x0000004e),
+ INTEL_CPU_DESC(INTEL_KABYLAKE_L, 11, 0x0000004e),
+ INTEL_CPU_DESC(INTEL_KABYLAKE_L, 12, 0x0000004e),
+ INTEL_CPU_DESC(INTEL_KABYLAKE, 10, 0x0000004e),
+ INTEL_CPU_DESC(INTEL_KABYLAKE, 11, 0x0000004e),
+ INTEL_CPU_DESC(INTEL_KABYLAKE, 12, 0x0000004e),
+ INTEL_CPU_DESC(INTEL_KABYLAKE, 13, 0x0000004e),
{}
};
@@ -5232,9 +5364,9 @@ static __init void intel_pebs_isolation_quirk(void)
}
static const struct x86_cpu_desc pebs_ucodes[] = {
- INTEL_CPU_DESC(INTEL_FAM6_SANDYBRIDGE, 7, 0x00000028),
- INTEL_CPU_DESC(INTEL_FAM6_SANDYBRIDGE_X, 6, 0x00000618),
- INTEL_CPU_DESC(INTEL_FAM6_SANDYBRIDGE_X, 7, 0x0000070c),
+ INTEL_CPU_DESC(INTEL_SANDYBRIDGE, 7, 0x00000028),
+ INTEL_CPU_DESC(INTEL_SANDYBRIDGE_X, 6, 0x00000618),
+ INTEL_CPU_DESC(INTEL_SANDYBRIDGE_X, 7, 0x0000070c),
{}
};
@@ -5698,8 +5830,22 @@ exra_is_visible(struct kobject *kobj, struct attribute *attr, int i)
return x86_pmu.version >= 2 ? attr->mode : 0;
}
+static umode_t
+td_is_visible(struct kobject *kobj, struct attribute *attr, int i)
+{
+ /*
+ * Hide the perf metrics topdown events
+ * if the feature is not enumerated.
+ */
+ if (x86_pmu.num_topdown_events)
+ return x86_pmu.intel_cap.perf_metrics ? attr->mode : 0;
+
+ return attr->mode;
+}
+
static struct attribute_group group_events_td = {
.name = "events",
+ .is_visible = td_is_visible,
};
static struct attribute_group group_events_mem = {
@@ -5733,6 +5879,12 @@ static struct attribute_group group_format_extra_skl = {
.is_visible = exra_is_visible,
};
+static struct attribute_group group_format_evtsel_ext = {
+ .name = "format",
+ .attrs = format_evtsel_ext_attrs,
+ .is_visible = evtsel_ext_is_visible,
+};
+
static struct attribute_group group_default = {
.attrs = intel_pmu_attrs,
.is_visible = default_is_visible,
@@ -5746,6 +5898,7 @@ static const struct attribute_group *attr_update[] = {
&group_caps_lbr,
&group_format_extra,
&group_format_extra_skl,
+ &group_format_evtsel_ext,
&group_default,
NULL,
};
@@ -5773,6 +5926,23 @@ static struct attribute *adl_hybrid_events_attrs[] = {
NULL,
};
+EVENT_ATTR_STR_HYBRID(topdown-retiring, td_retiring_lnl, "event=0xc2,umask=0x02;event=0x00,umask=0x80", hybrid_big_small);
+EVENT_ATTR_STR_HYBRID(topdown-fe-bound, td_fe_bound_lnl, "event=0x9c,umask=0x01;event=0x00,umask=0x82", hybrid_big_small);
+EVENT_ATTR_STR_HYBRID(topdown-be-bound, td_be_bound_lnl, "event=0xa4,umask=0x02;event=0x00,umask=0x83", hybrid_big_small);
+
+static struct attribute *lnl_hybrid_events_attrs[] = {
+ EVENT_PTR(slots_adl),
+ EVENT_PTR(td_retiring_lnl),
+ EVENT_PTR(td_bad_spec_adl),
+ EVENT_PTR(td_fe_bound_lnl),
+ EVENT_PTR(td_be_bound_lnl),
+ EVENT_PTR(td_heavy_ops_adl),
+ EVENT_PTR(td_br_mis_adl),
+ EVENT_PTR(td_fetch_lat_adl),
+ EVENT_PTR(td_mem_bound_adl),
+ NULL
+};
+
/* Must be in IDX order */
EVENT_ATTR_STR_HYBRID(mem-loads, mem_ld_adl, "event=0xd0,umask=0x5,ldlat=3;event=0xcd,umask=0x1,ldlat=3", hybrid_big_small);
EVENT_ATTR_STR_HYBRID(mem-stores, mem_st_adl, "event=0xd0,umask=0x6;event=0xcd,umask=0x2", hybrid_big_small);
@@ -5901,9 +6071,27 @@ static umode_t hybrid_format_is_visible(struct kobject *kobj,
return (cpu >= 0) && (pmu->pmu_type & pmu_attr->pmu_type) ? attr->mode : 0;
}
+static umode_t hybrid_td_is_visible(struct kobject *kobj,
+ struct attribute *attr, int i)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct x86_hybrid_pmu *pmu =
+ container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu);
+
+ if (!is_attr_for_this_pmu(kobj, attr))
+ return 0;
+
+
+ /* Only the big core supports perf metrics */
+ if (pmu->pmu_type == hybrid_big)
+ return pmu->intel_cap.perf_metrics ? attr->mode : 0;
+
+ return attr->mode;
+}
+
static struct attribute_group hybrid_group_events_td = {
.name = "events",
- .is_visible = hybrid_events_is_visible,
+ .is_visible = hybrid_td_is_visible,
};
static struct attribute_group hybrid_group_events_mem = {
@@ -5948,6 +6136,7 @@ static const struct attribute_group *hybrid_attr_update[] = {
&group_caps_gen,
&group_caps_lbr,
&hybrid_group_format_extra,
+ &group_format_evtsel_ext,
&group_default,
&hybrid_group_cpus,
NULL,
@@ -5955,29 +6144,9 @@ static const struct attribute_group *hybrid_attr_update[] = {
static struct attribute *empty_attrs;
-static void intel_pmu_check_num_counters(int *num_counters,
- int *num_counters_fixed,
- u64 *intel_ctrl, u64 fixed_mask)
-{
- if (*num_counters > INTEL_PMC_MAX_GENERIC) {
- WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
- *num_counters, INTEL_PMC_MAX_GENERIC);
- *num_counters = INTEL_PMC_MAX_GENERIC;
- }
- *intel_ctrl = (1ULL << *num_counters) - 1;
-
- if (*num_counters_fixed > INTEL_PMC_MAX_FIXED) {
- WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
- *num_counters_fixed, INTEL_PMC_MAX_FIXED);
- *num_counters_fixed = INTEL_PMC_MAX_FIXED;
- }
-
- *intel_ctrl |= fixed_mask << INTEL_PMC_IDX_FIXED;
-}
-
static void intel_pmu_check_event_constraints(struct event_constraint *event_constraints,
- int num_counters,
- int num_counters_fixed,
+ u64 cntr_mask,
+ u64 fixed_cntr_mask,
u64 intel_ctrl)
{
struct event_constraint *c;
@@ -6014,10 +6183,9 @@ static void intel_pmu_check_event_constraints(struct event_constraint *event_con
* generic counters
*/
if (!use_fixed_pseudo_encoding(c->code))
- c->idxmsk64 |= (1ULL << num_counters) - 1;
+ c->idxmsk64 |= cntr_mask;
}
- c->idxmsk64 &=
- ~(~0ULL << (INTEL_PMC_IDX_FIXED + num_counters_fixed));
+ c->idxmsk64 &= cntr_mask | (fixed_cntr_mask << INTEL_PMC_IDX_FIXED);
c->weight = hweight64(c->idxmsk64);
}
}
@@ -6042,6 +6210,11 @@ static void intel_pmu_check_extra_regs(struct extra_reg *extra_regs)
}
}
+static inline int intel_pmu_v6_addr_offset(int index, bool eventsel)
+{
+ return MSR_IA32_PMC_V6_STEP * index;
+}
+
static const struct { enum hybrid_pmu_type id; char *name; } intel_hybrid_pmu_type_map[] __initconst = {
{ hybrid_small, "cpu_atom" },
{ hybrid_big, "cpu_core" },
@@ -6068,12 +6241,13 @@ static __always_inline int intel_pmu_init_hybrid(enum hybrid_pmu_type pmus)
pmu->pmu_type = intel_hybrid_pmu_type_map[bit].id;
pmu->name = intel_hybrid_pmu_type_map[bit].name;
- pmu->num_counters = x86_pmu.num_counters;
- pmu->num_counters_fixed = x86_pmu.num_counters_fixed;
- pmu->max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, pmu->num_counters);
+ pmu->cntr_mask64 = x86_pmu.cntr_mask64;
+ pmu->fixed_cntr_mask64 = x86_pmu.fixed_cntr_mask64;
+ pmu->pebs_events_mask = intel_pmu_pebs_mask(pmu->cntr_mask64);
+ pmu->config_mask = X86_RAW_EVENT_MASK;
pmu->unconstrained = (struct event_constraint)
- __EVENT_CONSTRAINT(0, (1ULL << pmu->num_counters) - 1,
- 0, pmu->num_counters, 0, 0);
+ __EVENT_CONSTRAINT(0, pmu->cntr_mask64,
+ 0, x86_pmu_num_counters(&pmu->pmu), 0, 0);
pmu->intel_cap.capabilities = x86_pmu.intel_cap.capabilities;
if (pmu->pmu_type & hybrid_small) {
@@ -6143,6 +6317,21 @@ static __always_inline void intel_pmu_init_grt(struct pmu *pmu)
intel_pmu_ref_cycles_ext();
}
+static __always_inline void intel_pmu_init_lnc(struct pmu *pmu)
+{
+ intel_pmu_init_glc(pmu);
+ hybrid(pmu, event_constraints) = intel_lnc_event_constraints;
+ hybrid(pmu, pebs_constraints) = intel_lnc_pebs_event_constraints;
+ hybrid(pmu, extra_regs) = intel_rwc_extra_regs;
+}
+
+static __always_inline void intel_pmu_init_skt(struct pmu *pmu)
+{
+ intel_pmu_init_grt(pmu);
+ hybrid(pmu, event_constraints) = intel_skt_event_constraints;
+ hybrid(pmu, extra_regs) = intel_cmt_extra_regs;
+}
+
__init int intel_pmu_init(void)
{
struct attribute **extra_skl_attr = &empty_attrs;
@@ -6186,14 +6375,14 @@ __init int intel_pmu_init(void)
x86_pmu = intel_pmu;
x86_pmu.version = version;
- x86_pmu.num_counters = eax.split.num_counters;
+ x86_pmu.cntr_mask64 = GENMASK_ULL(eax.split.num_counters - 1, 0);
x86_pmu.cntval_bits = eax.split.bit_width;
x86_pmu.cntval_mask = (1ULL << eax.split.bit_width) - 1;
x86_pmu.events_maskl = ebx.full;
x86_pmu.events_mask_len = eax.split.mask_length;
- x86_pmu.max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, x86_pmu.num_counters);
+ x86_pmu.pebs_events_mask = intel_pmu_pebs_mask(x86_pmu.cntr_mask64);
x86_pmu.pebs_capable = PEBS_COUNTER_MASK;
/*
@@ -6203,12 +6392,10 @@ __init int intel_pmu_init(void)
if (version > 1 && version < 5) {
int assume = 3 * !boot_cpu_has(X86_FEATURE_HYPERVISOR);
- x86_pmu.num_counters_fixed =
- max((int)edx.split.num_counters_fixed, assume);
-
- fixed_mask = (1L << x86_pmu.num_counters_fixed) - 1;
+ x86_pmu.fixed_cntr_mask64 =
+ GENMASK_ULL(max((int)edx.split.num_counters_fixed, assume) - 1, 0);
} else if (version >= 5)
- x86_pmu.num_counters_fixed = fls(fixed_mask);
+ x86_pmu.fixed_cntr_mask64 = fixed_mask;
if (boot_cpu_has(X86_FEATURE_PDCM)) {
u64 capabilities;
@@ -6238,19 +6425,19 @@ __init int intel_pmu_init(void)
/*
* Install the hw-cache-events table:
*/
- switch (boot_cpu_data.x86_model) {
- case INTEL_FAM6_CORE_YONAH:
+ switch (boot_cpu_data.x86_vfm) {
+ case INTEL_CORE_YONAH:
pr_cont("Core events, ");
name = "core";
break;
- case INTEL_FAM6_CORE2_MEROM:
+ case INTEL_CORE2_MEROM:
x86_add_quirk(intel_clovertown_quirk);
fallthrough;
- case INTEL_FAM6_CORE2_MEROM_L:
- case INTEL_FAM6_CORE2_PENRYN:
- case INTEL_FAM6_CORE2_DUNNINGTON:
+ case INTEL_CORE2_MEROM_L:
+ case INTEL_CORE2_PENRYN:
+ case INTEL_CORE2_DUNNINGTON:
memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
@@ -6262,9 +6449,9 @@ __init int intel_pmu_init(void)
name = "core2";
break;
- case INTEL_FAM6_NEHALEM:
- case INTEL_FAM6_NEHALEM_EP:
- case INTEL_FAM6_NEHALEM_EX:
+ case INTEL_NEHALEM:
+ case INTEL_NEHALEM_EP:
+ case INTEL_NEHALEM_EX:
memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
@@ -6296,11 +6483,11 @@ __init int intel_pmu_init(void)
name = "nehalem";
break;
- case INTEL_FAM6_ATOM_BONNELL:
- case INTEL_FAM6_ATOM_BONNELL_MID:
- case INTEL_FAM6_ATOM_SALTWELL:
- case INTEL_FAM6_ATOM_SALTWELL_MID:
- case INTEL_FAM6_ATOM_SALTWELL_TABLET:
+ case INTEL_ATOM_BONNELL:
+ case INTEL_ATOM_BONNELL_MID:
+ case INTEL_ATOM_SALTWELL:
+ case INTEL_ATOM_SALTWELL_MID:
+ case INTEL_ATOM_SALTWELL_TABLET:
memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
@@ -6313,11 +6500,11 @@ __init int intel_pmu_init(void)
name = "bonnell";
break;
- case INTEL_FAM6_ATOM_SILVERMONT:
- case INTEL_FAM6_ATOM_SILVERMONT_D:
- case INTEL_FAM6_ATOM_SILVERMONT_MID:
- case INTEL_FAM6_ATOM_AIRMONT:
- case INTEL_FAM6_ATOM_AIRMONT_MID:
+ case INTEL_ATOM_SILVERMONT:
+ case INTEL_ATOM_SILVERMONT_D:
+ case INTEL_ATOM_SILVERMONT_MID:
+ case INTEL_ATOM_AIRMONT:
+ case INTEL_ATOM_AIRMONT_MID:
memcpy(hw_cache_event_ids, slm_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
memcpy(hw_cache_extra_regs, slm_hw_cache_extra_regs,
@@ -6335,8 +6522,8 @@ __init int intel_pmu_init(void)
name = "silvermont";
break;
- case INTEL_FAM6_ATOM_GOLDMONT:
- case INTEL_FAM6_ATOM_GOLDMONT_D:
+ case INTEL_ATOM_GOLDMONT:
+ case INTEL_ATOM_GOLDMONT_D:
memcpy(hw_cache_event_ids, glm_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
memcpy(hw_cache_extra_regs, glm_hw_cache_extra_regs,
@@ -6362,7 +6549,7 @@ __init int intel_pmu_init(void)
name = "goldmont";
break;
- case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
+ case INTEL_ATOM_GOLDMONT_PLUS:
memcpy(hw_cache_event_ids, glp_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
memcpy(hw_cache_extra_regs, glp_hw_cache_extra_regs,
@@ -6391,9 +6578,9 @@ __init int intel_pmu_init(void)
name = "goldmont_plus";
break;
- case INTEL_FAM6_ATOM_TREMONT_D:
- case INTEL_FAM6_ATOM_TREMONT:
- case INTEL_FAM6_ATOM_TREMONT_L:
+ case INTEL_ATOM_TREMONT_D:
+ case INTEL_ATOM_TREMONT:
+ case INTEL_ATOM_TREMONT_L:
x86_pmu.late_ack = true;
memcpy(hw_cache_event_ids, glp_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
@@ -6420,10 +6607,10 @@ __init int intel_pmu_init(void)
name = "Tremont";
break;
- case INTEL_FAM6_ATOM_GRACEMONT:
+ case INTEL_ATOM_GRACEMONT:
intel_pmu_init_grt(NULL);
intel_pmu_pebs_data_source_grt();
- x86_pmu.pebs_latency_data = adl_latency_data_small;
+ x86_pmu.pebs_latency_data = grt_latency_data;
x86_pmu.get_event_constraints = tnt_get_event_constraints;
td_attr = tnt_events_attrs;
mem_attr = grt_mem_attrs;
@@ -6432,12 +6619,12 @@ __init int intel_pmu_init(void)
name = "gracemont";
break;
- case INTEL_FAM6_ATOM_CRESTMONT:
- case INTEL_FAM6_ATOM_CRESTMONT_X:
+ case INTEL_ATOM_CRESTMONT:
+ case INTEL_ATOM_CRESTMONT_X:
intel_pmu_init_grt(NULL);
x86_pmu.extra_regs = intel_cmt_extra_regs;
intel_pmu_pebs_data_source_cmt();
- x86_pmu.pebs_latency_data = mtl_latency_data_small;
+ x86_pmu.pebs_latency_data = cmt_latency_data;
x86_pmu.get_event_constraints = cmt_get_event_constraints;
td_attr = cmt_events_attrs;
mem_attr = grt_mem_attrs;
@@ -6446,9 +6633,9 @@ __init int intel_pmu_init(void)
name = "crestmont";
break;
- case INTEL_FAM6_WESTMERE:
- case INTEL_FAM6_WESTMERE_EP:
- case INTEL_FAM6_WESTMERE_EX:
+ case INTEL_WESTMERE:
+ case INTEL_WESTMERE_EP:
+ case INTEL_WESTMERE_EX:
memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
@@ -6477,8 +6664,8 @@ __init int intel_pmu_init(void)
name = "westmere";
break;
- case INTEL_FAM6_SANDYBRIDGE:
- case INTEL_FAM6_SANDYBRIDGE_X:
+ case INTEL_SANDYBRIDGE:
+ case INTEL_SANDYBRIDGE_X:
x86_add_quirk(intel_sandybridge_quirk);
x86_add_quirk(intel_ht_bug);
memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
@@ -6491,7 +6678,7 @@ __init int intel_pmu_init(void)
x86_pmu.event_constraints = intel_snb_event_constraints;
x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints;
x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
- if (boot_cpu_data.x86_model == INTEL_FAM6_SANDYBRIDGE_X)
+ if (boot_cpu_data.x86_vfm == INTEL_SANDYBRIDGE_X)
x86_pmu.extra_regs = intel_snbep_extra_regs;
else
x86_pmu.extra_regs = intel_snb_extra_regs;
@@ -6517,8 +6704,8 @@ __init int intel_pmu_init(void)
name = "sandybridge";
break;
- case INTEL_FAM6_IVYBRIDGE:
- case INTEL_FAM6_IVYBRIDGE_X:
+ case INTEL_IVYBRIDGE:
+ case INTEL_IVYBRIDGE_X:
x86_add_quirk(intel_ht_bug);
memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
@@ -6534,7 +6721,7 @@ __init int intel_pmu_init(void)
x86_pmu.pebs_constraints = intel_ivb_pebs_event_constraints;
x86_pmu.pebs_aliases = intel_pebs_aliases_ivb;
x86_pmu.pebs_prec_dist = true;
- if (boot_cpu_data.x86_model == INTEL_FAM6_IVYBRIDGE_X)
+ if (boot_cpu_data.x86_vfm == INTEL_IVYBRIDGE_X)
x86_pmu.extra_regs = intel_snbep_extra_regs;
else
x86_pmu.extra_regs = intel_snb_extra_regs;
@@ -6556,10 +6743,10 @@ __init int intel_pmu_init(void)
break;
- case INTEL_FAM6_HASWELL:
- case INTEL_FAM6_HASWELL_X:
- case INTEL_FAM6_HASWELL_L:
- case INTEL_FAM6_HASWELL_G:
+ case INTEL_HASWELL:
+ case INTEL_HASWELL_X:
+ case INTEL_HASWELL_L:
+ case INTEL_HASWELL_G:
x86_add_quirk(intel_ht_bug);
x86_add_quirk(intel_pebs_isolation_quirk);
x86_pmu.late_ack = true;
@@ -6589,10 +6776,10 @@ __init int intel_pmu_init(void)
name = "haswell";
break;
- case INTEL_FAM6_BROADWELL:
- case INTEL_FAM6_BROADWELL_D:
- case INTEL_FAM6_BROADWELL_G:
- case INTEL_FAM6_BROADWELL_X:
+ case INTEL_BROADWELL:
+ case INTEL_BROADWELL_D:
+ case INTEL_BROADWELL_G:
+ case INTEL_BROADWELL_X:
x86_add_quirk(intel_pebs_isolation_quirk);
x86_pmu.late_ack = true;
memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids));
@@ -6631,8 +6818,8 @@ __init int intel_pmu_init(void)
name = "broadwell";
break;
- case INTEL_FAM6_XEON_PHI_KNL:
- case INTEL_FAM6_XEON_PHI_KNM:
+ case INTEL_XEON_PHI_KNL:
+ case INTEL_XEON_PHI_KNM:
memcpy(hw_cache_event_ids,
slm_hw_cache_event_ids, sizeof(hw_cache_event_ids));
memcpy(hw_cache_extra_regs,
@@ -6651,15 +6838,15 @@ __init int intel_pmu_init(void)
name = "knights-landing";
break;
- case INTEL_FAM6_SKYLAKE_X:
+ case INTEL_SKYLAKE_X:
pmem = true;
fallthrough;
- case INTEL_FAM6_SKYLAKE_L:
- case INTEL_FAM6_SKYLAKE:
- case INTEL_FAM6_KABYLAKE_L:
- case INTEL_FAM6_KABYLAKE:
- case INTEL_FAM6_COMETLAKE_L:
- case INTEL_FAM6_COMETLAKE:
+ case INTEL_SKYLAKE_L:
+ case INTEL_SKYLAKE:
+ case INTEL_KABYLAKE_L:
+ case INTEL_KABYLAKE:
+ case INTEL_COMETLAKE_L:
+ case INTEL_COMETLAKE:
x86_add_quirk(intel_pebs_isolation_quirk);
x86_pmu.late_ack = true;
memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids));
@@ -6708,16 +6895,16 @@ __init int intel_pmu_init(void)
name = "skylake";
break;
- case INTEL_FAM6_ICELAKE_X:
- case INTEL_FAM6_ICELAKE_D:
+ case INTEL_ICELAKE_X:
+ case INTEL_ICELAKE_D:
x86_pmu.pebs_ept = 1;
pmem = true;
fallthrough;
- case INTEL_FAM6_ICELAKE_L:
- case INTEL_FAM6_ICELAKE:
- case INTEL_FAM6_TIGERLAKE_L:
- case INTEL_FAM6_TIGERLAKE:
- case INTEL_FAM6_ROCKETLAKE:
+ case INTEL_ICELAKE_L:
+ case INTEL_ICELAKE:
+ case INTEL_TIGERLAKE_L:
+ case INTEL_TIGERLAKE:
+ case INTEL_ROCKETLAKE:
x86_pmu.late_ack = true;
memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids));
memcpy(hw_cache_extra_regs, skl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
@@ -6752,16 +6939,22 @@ __init int intel_pmu_init(void)
name = "icelake";
break;
- case INTEL_FAM6_SAPPHIRERAPIDS_X:
- case INTEL_FAM6_EMERALDRAPIDS_X:
+ case INTEL_SAPPHIRERAPIDS_X:
+ case INTEL_EMERALDRAPIDS_X:
x86_pmu.flags |= PMU_FL_MEM_LOADS_AUX;
x86_pmu.extra_regs = intel_glc_extra_regs;
- fallthrough;
- case INTEL_FAM6_GRANITERAPIDS_X:
- case INTEL_FAM6_GRANITERAPIDS_D:
+ pr_cont("Sapphire Rapids events, ");
+ name = "sapphire_rapids";
+ goto glc_common;
+
+ case INTEL_GRANITERAPIDS_X:
+ case INTEL_GRANITERAPIDS_D:
+ x86_pmu.extra_regs = intel_rwc_extra_regs;
+ pr_cont("Granite Rapids events, ");
+ name = "granite_rapids";
+
+ glc_common:
intel_pmu_init_glc(NULL);
- if (!x86_pmu.extra_regs)
- x86_pmu.extra_regs = intel_rwc_extra_regs;
x86_pmu.pebs_ept = 1;
x86_pmu.hw_config = hsw_hw_config;
x86_pmu.get_event_constraints = glc_get_event_constraints;
@@ -6772,15 +6965,13 @@ __init int intel_pmu_init(void)
td_attr = glc_td_events_attrs;
tsx_attr = glc_tsx_events_attrs;
intel_pmu_pebs_data_source_skl(true);
- pr_cont("Sapphire Rapids events, ");
- name = "sapphire_rapids";
break;
- case INTEL_FAM6_ALDERLAKE:
- case INTEL_FAM6_ALDERLAKE_L:
- case INTEL_FAM6_RAPTORLAKE:
- case INTEL_FAM6_RAPTORLAKE_P:
- case INTEL_FAM6_RAPTORLAKE_S:
+ case INTEL_ALDERLAKE:
+ case INTEL_ALDERLAKE_L:
+ case INTEL_RAPTORLAKE:
+ case INTEL_RAPTORLAKE_P:
+ case INTEL_RAPTORLAKE_S:
/*
* Alder Lake has 2 types of CPU, core and atom.
*
@@ -6788,7 +6979,7 @@ __init int intel_pmu_init(void)
*/
intel_pmu_init_hybrid(hybrid_big_small);
- x86_pmu.pebs_latency_data = adl_latency_data_small;
+ x86_pmu.pebs_latency_data = grt_latency_data;
x86_pmu.get_event_constraints = adl_get_event_constraints;
x86_pmu.hw_config = adl_hw_config;
x86_pmu.get_hybrid_cpu_type = adl_get_hybrid_cpu_type;
@@ -6803,11 +6994,13 @@ __init int intel_pmu_init(void)
pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX];
intel_pmu_init_glc(&pmu->pmu);
if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU)) {
- pmu->num_counters = x86_pmu.num_counters + 2;
- pmu->num_counters_fixed = x86_pmu.num_counters_fixed + 1;
+ pmu->cntr_mask64 <<= 2;
+ pmu->cntr_mask64 |= 0x3;
+ pmu->fixed_cntr_mask64 <<= 1;
+ pmu->fixed_cntr_mask64 |= 0x1;
} else {
- pmu->num_counters = x86_pmu.num_counters;
- pmu->num_counters_fixed = x86_pmu.num_counters_fixed;
+ pmu->cntr_mask64 = x86_pmu.cntr_mask64;
+ pmu->fixed_cntr_mask64 = x86_pmu.fixed_cntr_mask64;
}
/*
@@ -6817,15 +7010,16 @@ __init int intel_pmu_init(void)
* mistakenly add extra counters for P-cores. Correct the number of
* counters here.
*/
- if ((pmu->num_counters > 8) || (pmu->num_counters_fixed > 4)) {
- pmu->num_counters = x86_pmu.num_counters;
- pmu->num_counters_fixed = x86_pmu.num_counters_fixed;
+ if ((x86_pmu_num_counters(&pmu->pmu) > 8) || (x86_pmu_num_counters_fixed(&pmu->pmu) > 4)) {
+ pmu->cntr_mask64 = x86_pmu.cntr_mask64;
+ pmu->fixed_cntr_mask64 = x86_pmu.fixed_cntr_mask64;
}
- pmu->max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, pmu->num_counters);
+ pmu->pebs_events_mask = intel_pmu_pebs_mask(pmu->cntr_mask64);
pmu->unconstrained = (struct event_constraint)
- __EVENT_CONSTRAINT(0, (1ULL << pmu->num_counters) - 1,
- 0, pmu->num_counters, 0, 0);
+ __EVENT_CONSTRAINT(0, pmu->cntr_mask64,
+ 0, x86_pmu_num_counters(&pmu->pmu), 0, 0);
+
pmu->extra_regs = intel_glc_extra_regs;
/* Initialize Atom core specific PerfMon capabilities.*/
@@ -6838,11 +7032,11 @@ __init int intel_pmu_init(void)
name = "alderlake_hybrid";
break;
- case INTEL_FAM6_METEORLAKE:
- case INTEL_FAM6_METEORLAKE_L:
+ case INTEL_METEORLAKE:
+ case INTEL_METEORLAKE_L:
intel_pmu_init_hybrid(hybrid_big_small);
- x86_pmu.pebs_latency_data = mtl_latency_data_small;
+ x86_pmu.pebs_latency_data = cmt_latency_data;
x86_pmu.get_event_constraints = mtl_get_event_constraints;
x86_pmu.hw_config = adl_hw_config;
@@ -6867,6 +7061,33 @@ __init int intel_pmu_init(void)
name = "meteorlake_hybrid";
break;
+ case INTEL_LUNARLAKE_M:
+ case INTEL_ARROWLAKE:
+ intel_pmu_init_hybrid(hybrid_big_small);
+
+ x86_pmu.pebs_latency_data = lnl_latency_data;
+ x86_pmu.get_event_constraints = mtl_get_event_constraints;
+ x86_pmu.hw_config = adl_hw_config;
+
+ td_attr = lnl_hybrid_events_attrs;
+ mem_attr = mtl_hybrid_mem_attrs;
+ tsx_attr = adl_hybrid_tsx_attrs;
+ extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
+ mtl_hybrid_extra_attr_rtm : mtl_hybrid_extra_attr;
+
+ /* Initialize big core specific PerfMon capabilities.*/
+ pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX];
+ intel_pmu_init_lnc(&pmu->pmu);
+
+ /* Initialize Atom core specific PerfMon capabilities.*/
+ pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_ATOM_IDX];
+ intel_pmu_init_skt(&pmu->pmu);
+
+ intel_pmu_pebs_data_source_lnl();
+ pr_cont("Lunarlake Hybrid events, ");
+ name = "lunarlake_hybrid";
+ break;
+
default:
switch (x86_pmu.version) {
case 1:
@@ -6892,9 +7113,9 @@ __init int intel_pmu_init(void)
* The constraints may be cut according to the CPUID enumeration
* by inserting the EVENT_CONSTRAINT_END.
*/
- if (x86_pmu.num_counters_fixed > INTEL_PMC_MAX_FIXED)
- x86_pmu.num_counters_fixed = INTEL_PMC_MAX_FIXED;
- intel_v5_gen_event_constraints[x86_pmu.num_counters_fixed].weight = -1;
+ if (fls64(x86_pmu.fixed_cntr_mask64) > INTEL_PMC_MAX_FIXED)
+ x86_pmu.fixed_cntr_mask64 &= GENMASK_ULL(INTEL_PMC_MAX_FIXED - 1, 0);
+ intel_v5_gen_event_constraints[fls64(x86_pmu.fixed_cntr_mask64)].weight = -1;
x86_pmu.event_constraints = intel_v5_gen_event_constraints;
pr_cont("generic architected perfmon, ");
name = "generic_arch_v5+";
@@ -6921,18 +7142,17 @@ __init int intel_pmu_init(void)
x86_pmu.attr_update = hybrid_attr_update;
}
- intel_pmu_check_num_counters(&x86_pmu.num_counters,
- &x86_pmu.num_counters_fixed,
- &x86_pmu.intel_ctrl,
- (u64)fixed_mask);
+ intel_pmu_check_counters_mask(&x86_pmu.cntr_mask64,
+ &x86_pmu.fixed_cntr_mask64,
+ &x86_pmu.intel_ctrl);
/* AnyThread may be deprecated on arch perfmon v5 or later */
if (x86_pmu.intel_cap.anythread_deprecated)
x86_pmu.format_attrs = intel_arch_formats_attr;
intel_pmu_check_event_constraints(x86_pmu.event_constraints,
- x86_pmu.num_counters,
- x86_pmu.num_counters_fixed,
+ x86_pmu.cntr_mask64,
+ x86_pmu.fixed_cntr_mask64,
x86_pmu.intel_ctrl);
/*
* Access LBR MSR may cause #GP under certain circumstances.
@@ -6973,6 +7193,14 @@ __init int intel_pmu_init(void)
pr_cont("full-width counters, ");
}
+ /* Support V6+ MSR Aliasing */
+ if (x86_pmu.version >= 6) {
+ x86_pmu.perfctr = MSR_IA32_PMC_V6_GP0_CTR;
+ x86_pmu.eventsel = MSR_IA32_PMC_V6_GP0_CFG_A;
+ x86_pmu.fixedctr = MSR_IA32_PMC_V6_FX0_CTR;
+ x86_pmu.addr_offset = intel_pmu_v6_addr_offset;
+ }
+
if (!is_hybrid() && x86_pmu.intel_cap.perf_metrics)
x86_pmu.intel_ctrl |= 1ULL << GLOBAL_CTRL_EN_PERF_METRICS;
diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c
index e64eaa8dda5a..be58cfb012dd 100644
--- a/arch/x86/events/intel/cstate.c
+++ b/arch/x86/events/intel/cstate.c
@@ -41,7 +41,7 @@
* MSR_CORE_C1_RES: CORE C1 Residency Counter
* perf code: 0x00
* Available model: SLM,AMT,GLM,CNL,ICX,TNT,ADL,RPL
- * MTL,SRF,GRR
+ * MTL,SRF,GRR,ARL,LNL
* Scope: Core (each processor core has a MSR)
* MSR_CORE_C3_RESIDENCY: CORE C3 Residency Counter
* perf code: 0x01
@@ -53,50 +53,50 @@
* Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,
* SKL,KNL,GLM,CNL,KBL,CML,ICL,ICX,
* TGL,TNT,RKL,ADL,RPL,SPR,MTL,SRF,
- * GRR
+ * GRR,ARL,LNL
* Scope: Core
* MSR_CORE_C7_RESIDENCY: CORE C7 Residency Counter
* perf code: 0x03
* Available model: SNB,IVB,HSW,BDW,SKL,CNL,KBL,CML,
- * ICL,TGL,RKL,ADL,RPL,MTL
+ * ICL,TGL,RKL,ADL,RPL,MTL,ARL,LNL
* Scope: Core
* MSR_PKG_C2_RESIDENCY: Package C2 Residency Counter.
* perf code: 0x00
* Available model: SNB,IVB,HSW,BDW,SKL,KNL,GLM,CNL,
* KBL,CML,ICL,ICX,TGL,TNT,RKL,ADL,
- * RPL,SPR,MTL
+ * RPL,SPR,MTL,ARL,LNL
* Scope: Package (physical package)
* MSR_PKG_C3_RESIDENCY: Package C3 Residency Counter.
* perf code: 0x01
* Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,KNL,
* GLM,CNL,KBL,CML,ICL,TGL,TNT,RKL,
- * ADL,RPL,MTL
+ * ADL,RPL,MTL,ARL,LNL
* Scope: Package (physical package)
* MSR_PKG_C6_RESIDENCY: Package C6 Residency Counter.
* perf code: 0x02
* Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,
* SKL,KNL,GLM,CNL,KBL,CML,ICL,ICX,
- * TGL,TNT,RKL,ADL,RPL,SPR,MTL,SRF
+ * TGL,TNT,RKL,ADL,RPL,SPR,MTL,SRF,
+ * ARL,LNL
* Scope: Package (physical package)
* MSR_PKG_C7_RESIDENCY: Package C7 Residency Counter.
* perf code: 0x03
* Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,CNL,
- * KBL,CML,ICL,TGL,RKL,ADL,RPL,MTL
+ * KBL,CML,ICL,TGL,RKL
* Scope: Package (physical package)
* MSR_PKG_C8_RESIDENCY: Package C8 Residency Counter.
* perf code: 0x04
* Available model: HSW ULT,KBL,CNL,CML,ICL,TGL,RKL,
- * ADL,RPL,MTL
+ * ADL,RPL,MTL,ARL
* Scope: Package (physical package)
* MSR_PKG_C9_RESIDENCY: Package C9 Residency Counter.
* perf code: 0x05
- * Available model: HSW ULT,KBL,CNL,CML,ICL,TGL,RKL,
- * ADL,RPL,MTL
+ * Available model: HSW ULT,KBL,CNL,CML,ICL,TGL,RKL
* Scope: Package (physical package)
* MSR_PKG_C10_RESIDENCY: Package C10 Residency Counter.
* perf code: 0x06
* Available model: HSW ULT,KBL,GLM,CNL,CML,ICL,TGL,
- * TNT,RKL,ADL,RPL,MTL
+ * TNT,RKL,ADL,RPL,MTL,ARL,LNL
* Scope: Package (physical package)
* MSR_MODULE_C6_RES_MS: Module C6 Residency Counter.
* perf code: 0x00
@@ -114,6 +114,7 @@
#include "../perf_event.h"
#include "../probe.h"
+MODULE_DESCRIPTION("Support for Intel cstate performance events");
MODULE_LICENSE("GPL");
#define DEFINE_CSTATE_FORMAT_ATTR(_var, _name, _format) \
@@ -636,9 +637,18 @@ static const struct cstate_model adl_cstates __initconst = {
.pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) |
BIT(PERF_CSTATE_PKG_C3_RES) |
BIT(PERF_CSTATE_PKG_C6_RES) |
- BIT(PERF_CSTATE_PKG_C7_RES) |
BIT(PERF_CSTATE_PKG_C8_RES) |
- BIT(PERF_CSTATE_PKG_C9_RES) |
+ BIT(PERF_CSTATE_PKG_C10_RES),
+};
+
+static const struct cstate_model lnl_cstates __initconst = {
+ .core_events = BIT(PERF_CSTATE_CORE_C1_RES) |
+ BIT(PERF_CSTATE_CORE_C6_RES) |
+ BIT(PERF_CSTATE_CORE_C7_RES),
+
+ .pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) |
+ BIT(PERF_CSTATE_PKG_C3_RES) |
+ BIT(PERF_CSTATE_PKG_C6_RES) |
BIT(PERF_CSTATE_PKG_C10_RES),
};
@@ -762,6 +772,10 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = {
X86_MATCH_VFM(INTEL_RAPTORLAKE_S, &adl_cstates),
X86_MATCH_VFM(INTEL_METEORLAKE, &adl_cstates),
X86_MATCH_VFM(INTEL_METEORLAKE_L, &adl_cstates),
+ X86_MATCH_VFM(INTEL_ARROWLAKE, &adl_cstates),
+ X86_MATCH_VFM(INTEL_ARROWLAKE_H, &adl_cstates),
+ X86_MATCH_VFM(INTEL_ARROWLAKE_U, &adl_cstates),
+ X86_MATCH_VFM(INTEL_LUNARLAKE_M, &lnl_cstates),
{ },
};
MODULE_DEVICE_TABLE(x86cpu, intel_cstates_match);
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index e010bfed8417..fa5ea65de0d0 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -63,6 +63,15 @@ union intel_x86_pebs_dse {
unsigned int mtl_fwd_blk:1;
unsigned int ld_reserved4:24;
};
+ struct {
+ unsigned int lnc_dse:8;
+ unsigned int ld_reserved5:2;
+ unsigned int lnc_stlb_miss:1;
+ unsigned int lnc_locked:1;
+ unsigned int lnc_data_blk:1;
+ unsigned int lnc_addr_blk:1;
+ unsigned int ld_reserved6:18;
+ };
};
@@ -77,7 +86,7 @@ union intel_x86_pebs_dse {
#define SNOOP_NONE_MISS (P(SNOOP, NONE) | P(SNOOP, MISS))
/* Version for Sandy Bridge and later */
-static u64 pebs_data_source[] = {
+static u64 pebs_data_source[PERF_PEBS_DATA_SOURCE_MAX] = {
P(OP, LOAD) | P(LVL, MISS) | LEVEL(L3) | P(SNOOP, NA),/* 0x00:ukn L3 */
OP_LH | P(LVL, L1) | LEVEL(L1) | P(SNOOP, NONE), /* 0x01: L1 local */
OP_LH | P(LVL, LFB) | LEVEL(LFB) | P(SNOOP, NONE), /* 0x02: LFB hit */
@@ -173,6 +182,40 @@ void __init intel_pmu_pebs_data_source_cmt(void)
__intel_pmu_pebs_data_source_cmt(pebs_data_source);
}
+/* Version for Lion Cove and later */
+static u64 lnc_pebs_data_source[PERF_PEBS_DATA_SOURCE_MAX] = {
+ P(OP, LOAD) | P(LVL, MISS) | LEVEL(L3) | P(SNOOP, NA), /* 0x00: ukn L3 */
+ OP_LH | P(LVL, L1) | LEVEL(L1) | P(SNOOP, NONE), /* 0x01: L1 hit */
+ OP_LH | P(LVL, L1) | LEVEL(L1) | P(SNOOP, NONE), /* 0x02: L1 hit */
+ OP_LH | P(LVL, LFB) | LEVEL(LFB) | P(SNOOP, NONE), /* 0x03: LFB/L1 Miss Handling Buffer hit */
+ 0, /* 0x04: Reserved */
+ OP_LH | P(LVL, L2) | LEVEL(L2) | P(SNOOP, NONE), /* 0x05: L2 Hit */
+ OP_LH | LEVEL(L2_MHB) | P(SNOOP, NONE), /* 0x06: L2 Miss Handling Buffer Hit */
+ 0, /* 0x07: Reserved */
+ OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, NONE), /* 0x08: L3 Hit */
+ 0, /* 0x09: Reserved */
+ 0, /* 0x0a: Reserved */
+ 0, /* 0x0b: Reserved */
+ OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOPX, FWD), /* 0x0c: L3 Hit Snoop Fwd */
+ OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HITM), /* 0x0d: L3 Hit Snoop HitM */
+ 0, /* 0x0e: Reserved */
+ P(OP, LOAD) | P(LVL, MISS) | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HITM), /* 0x0f: L3 Miss Snoop HitM */
+ OP_LH | LEVEL(MSC) | P(SNOOP, NONE), /* 0x10: Memory-side Cache Hit */
+ OP_LH | P(LVL, LOC_RAM) | LEVEL(RAM) | P(SNOOP, NONE), /* 0x11: Local Memory Hit */
+};
+
+void __init intel_pmu_pebs_data_source_lnl(void)
+{
+ u64 *data_source;
+
+ data_source = x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX].pebs_data_source;
+ memcpy(data_source, lnc_pebs_data_source, sizeof(lnc_pebs_data_source));
+
+ data_source = x86_pmu.hybrid_pmu[X86_HYBRID_PMU_ATOM_IDX].pebs_data_source;
+ memcpy(data_source, pebs_data_source, sizeof(pebs_data_source));
+ __intel_pmu_pebs_data_source_cmt(data_source);
+}
+
static u64 precise_store_data(u64 status)
{
union intel_x86_pebs_dse dse;
@@ -257,14 +300,14 @@ static inline void pebs_set_tlb_lock(u64 *val, bool tlb, bool lock)
}
/* Retrieve the latency data for e-core of ADL */
-static u64 __adl_latency_data_small(struct perf_event *event, u64 status,
- u8 dse, bool tlb, bool lock, bool blk)
+static u64 __grt_latency_data(struct perf_event *event, u64 status,
+ u8 dse, bool tlb, bool lock, bool blk)
{
u64 val;
WARN_ON_ONCE(hybrid_pmu(event->pmu)->pmu_type == hybrid_big);
- dse &= PERF_PEBS_DATA_SOURCE_MASK;
+ dse &= PERF_PEBS_DATA_SOURCE_GRT_MASK;
val = hybrid_var(event->pmu, pebs_data_source)[dse];
pebs_set_tlb_lock(&val, tlb, lock);
@@ -277,27 +320,72 @@ static u64 __adl_latency_data_small(struct perf_event *event, u64 status,
return val;
}
-u64 adl_latency_data_small(struct perf_event *event, u64 status)
+u64 grt_latency_data(struct perf_event *event, u64 status)
{
union intel_x86_pebs_dse dse;
dse.val = status;
- return __adl_latency_data_small(event, status, dse.ld_dse,
- dse.ld_locked, dse.ld_stlb_miss,
- dse.ld_data_blk);
+ return __grt_latency_data(event, status, dse.ld_dse,
+ dse.ld_locked, dse.ld_stlb_miss,
+ dse.ld_data_blk);
}
/* Retrieve the latency data for e-core of MTL */
-u64 mtl_latency_data_small(struct perf_event *event, u64 status)
+u64 cmt_latency_data(struct perf_event *event, u64 status)
{
union intel_x86_pebs_dse dse;
dse.val = status;
- return __adl_latency_data_small(event, status, dse.mtl_dse,
- dse.mtl_stlb_miss, dse.mtl_locked,
- dse.mtl_fwd_blk);
+ return __grt_latency_data(event, status, dse.mtl_dse,
+ dse.mtl_stlb_miss, dse.mtl_locked,
+ dse.mtl_fwd_blk);
+}
+
+static u64 lnc_latency_data(struct perf_event *event, u64 status)
+{
+ union intel_x86_pebs_dse dse;
+ union perf_mem_data_src src;
+ u64 val;
+
+ dse.val = status;
+
+ /* LNC core latency data */
+ val = hybrid_var(event->pmu, pebs_data_source)[status & PERF_PEBS_DATA_SOURCE_MASK];
+ if (!val)
+ val = P(OP, LOAD) | LEVEL(NA) | P(SNOOP, NA);
+
+ if (dse.lnc_stlb_miss)
+ val |= P(TLB, MISS) | P(TLB, L2);
+ else
+ val |= P(TLB, HIT) | P(TLB, L1) | P(TLB, L2);
+
+ if (dse.lnc_locked)
+ val |= P(LOCK, LOCKED);
+
+ if (dse.lnc_data_blk)
+ val |= P(BLK, DATA);
+ if (dse.lnc_addr_blk)
+ val |= P(BLK, ADDR);
+ if (!dse.lnc_data_blk && !dse.lnc_addr_blk)
+ val |= P(BLK, NA);
+
+ src.val = val;
+ if (event->hw.flags & PERF_X86_EVENT_PEBS_ST_HSW)
+ src.mem_op = P(OP, STORE);
+
+ return src.val;
+}
+
+u64 lnl_latency_data(struct perf_event *event, u64 status)
+{
+ struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu);
+
+ if (pmu->pmu_type == hybrid_small)
+ return cmt_latency_data(event, status);
+
+ return lnc_latency_data(event, status);
}
static u64 load_latency_data(struct perf_event *event, u64 status)
@@ -1086,6 +1174,32 @@ struct event_constraint intel_glc_pebs_event_constraints[] = {
EVENT_CONSTRAINT_END
};
+struct event_constraint intel_lnc_pebs_event_constraints[] = {
+ INTEL_FLAGS_UEVENT_CONSTRAINT(0x100, 0x100000000ULL), /* INST_RETIRED.PREC_DIST */
+ INTEL_FLAGS_UEVENT_CONSTRAINT(0x0400, 0x800000000ULL),
+
+ INTEL_HYBRID_LDLAT_CONSTRAINT(0x1cd, 0x3ff),
+ INTEL_HYBRID_STLAT_CONSTRAINT(0x2cd, 0x3),
+ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_LOADS */
+ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x12d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_STORES */
+ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x21d0, 0xf), /* MEM_INST_RETIRED.LOCK_LOADS */
+ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x41d0, 0xf), /* MEM_INST_RETIRED.SPLIT_LOADS */
+ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x42d0, 0xf), /* MEM_INST_RETIRED.SPLIT_STORES */
+ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x81d0, 0xf), /* MEM_INST_RETIRED.ALL_LOADS */
+ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x82d0, 0xf), /* MEM_INST_RETIRED.ALL_STORES */
+
+ INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD_RANGE(0xd1, 0xd4, 0xf),
+
+ INTEL_FLAGS_EVENT_CONSTRAINT(0xd0, 0xf),
+
+ /*
+ * Everything else is handled by PMU_FL_PEBS_ALL, because we
+ * need the full constraints from the main table.
+ */
+
+ EVENT_CONSTRAINT_END
+};
+
struct event_constraint *intel_pebs_constraints(struct perf_event *event)
{
struct event_constraint *pebs_constraints = hybrid(event->pmu, pebs_constraints);
@@ -1137,8 +1251,7 @@ void intel_pmu_pebs_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sche
static inline void pebs_update_threshold(struct cpu_hw_events *cpuc)
{
struct debug_store *ds = cpuc->ds;
- int max_pebs_events = hybrid(cpuc->pmu, max_pebs_events);
- int num_counters_fixed = hybrid(cpuc->pmu, num_counters_fixed);
+ int max_pebs_events = intel_pmu_max_num_pebs(cpuc->pmu);
u64 threshold;
int reserved;
@@ -1146,7 +1259,7 @@ static inline void pebs_update_threshold(struct cpu_hw_events *cpuc)
return;
if (x86_pmu.flags & PMU_FL_PEBS_ALL)
- reserved = max_pebs_events + num_counters_fixed;
+ reserved = max_pebs_events + x86_pmu_max_num_counters_fixed(cpuc->pmu);
else
reserved = max_pebs_events;
@@ -1831,8 +1944,12 @@ static void setup_pebs_adaptive_sample_data(struct perf_event *event,
set_linear_ip(regs, basic->ip);
regs->flags = PERF_EFLAGS_EXACT;
- if ((sample_type & PERF_SAMPLE_WEIGHT_STRUCT) && (x86_pmu.flags & PMU_FL_RETIRE_LATENCY))
- data->weight.var3_w = format_size >> PEBS_RETIRE_LATENCY_OFFSET & PEBS_LATENCY_MASK;
+ if (sample_type & PERF_SAMPLE_WEIGHT_STRUCT) {
+ if (x86_pmu.flags & PMU_FL_RETIRE_LATENCY)
+ data->weight.var3_w = format_size >> PEBS_RETIRE_LATENCY_OFFSET & PEBS_LATENCY_MASK;
+ else
+ data->weight.var3_w = 0;
+ }
/*
* The record for MEMINFO is in front of GP
@@ -2157,6 +2274,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs, struct perf_sample_d
void *base, *at, *top;
short counts[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS] = {};
short error[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS] = {};
+ int max_pebs_events = intel_pmu_max_num_pebs(NULL);
int bit, i, size;
u64 mask;
@@ -2168,11 +2286,11 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs, struct perf_sample_d
ds->pebs_index = ds->pebs_buffer_base;
- mask = (1ULL << x86_pmu.max_pebs_events) - 1;
- size = x86_pmu.max_pebs_events;
+ mask = x86_pmu.pebs_events_mask;
+ size = max_pebs_events;
if (x86_pmu.flags & PMU_FL_PEBS_ALL) {
- mask |= ((1ULL << x86_pmu.num_counters_fixed) - 1) << INTEL_PMC_IDX_FIXED;
- size = INTEL_PMC_IDX_FIXED + x86_pmu.num_counters_fixed;
+ mask |= x86_pmu.fixed_cntr_mask64 << INTEL_PMC_IDX_FIXED;
+ size = INTEL_PMC_IDX_FIXED + x86_pmu_max_num_counters_fixed(NULL);
}
if (unlikely(base >= top)) {
@@ -2208,8 +2326,9 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs, struct perf_sample_d
pebs_status = p->status = cpuc->pebs_enabled;
bit = find_first_bit((unsigned long *)&pebs_status,
- x86_pmu.max_pebs_events);
- if (bit >= x86_pmu.max_pebs_events)
+ max_pebs_events);
+
+ if (!(x86_pmu.pebs_events_mask & (1 << bit)))
continue;
/*
@@ -2267,12 +2386,10 @@ static void intel_pmu_drain_pebs_icl(struct pt_regs *iregs, struct perf_sample_d
{
short counts[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS] = {};
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
- int max_pebs_events = hybrid(cpuc->pmu, max_pebs_events);
- int num_counters_fixed = hybrid(cpuc->pmu, num_counters_fixed);
struct debug_store *ds = cpuc->ds;
struct perf_event *event;
void *base, *at, *top;
- int bit, size;
+ int bit;
u64 mask;
if (!x86_pmu.pebs_active)
@@ -2283,12 +2400,11 @@ static void intel_pmu_drain_pebs_icl(struct pt_regs *iregs, struct perf_sample_d
ds->pebs_index = ds->pebs_buffer_base;
- mask = ((1ULL << max_pebs_events) - 1) |
- (((1ULL << num_counters_fixed) - 1) << INTEL_PMC_IDX_FIXED);
- size = INTEL_PMC_IDX_FIXED + num_counters_fixed;
+ mask = hybrid(cpuc->pmu, pebs_events_mask) |
+ (hybrid(cpuc->pmu, fixed_cntr_mask64) << INTEL_PMC_IDX_FIXED);
if (unlikely(base >= top)) {
- intel_pmu_pebs_event_update_no_drain(cpuc, size);
+ intel_pmu_pebs_event_update_no_drain(cpuc, X86_PMC_IDX_MAX);
return;
}
@@ -2298,11 +2414,11 @@ static void intel_pmu_drain_pebs_icl(struct pt_regs *iregs, struct perf_sample_d
pebs_status = get_pebs_status(at) & cpuc->pebs_enabled;
pebs_status &= mask;
- for_each_set_bit(bit, (unsigned long *)&pebs_status, size)
+ for_each_set_bit(bit, (unsigned long *)&pebs_status, X86_PMC_IDX_MAX)
counts[bit]++;
}
- for_each_set_bit(bit, (unsigned long *)&mask, size) {
+ for_each_set_bit(bit, (unsigned long *)&mask, X86_PMC_IDX_MAX) {
if (counts[bit] == 0)
continue;
diff --git a/arch/x86/events/intel/knc.c b/arch/x86/events/intel/knc.c
index 618001c208e8..034a1f6a457c 100644
--- a/arch/x86/events/intel/knc.c
+++ b/arch/x86/events/intel/knc.c
@@ -303,7 +303,7 @@ static const struct x86_pmu knc_pmu __initconst = {
.apic = 1,
.max_period = (1ULL << 39) - 1,
.version = 0,
- .num_counters = 2,
+ .cntr_mask64 = 0x3,
.cntval_bits = 40,
.cntval_mask = (1ULL << 40) - 1,
.get_event_constraints = x86_get_event_constraints,
diff --git a/arch/x86/events/intel/p4.c b/arch/x86/events/intel/p4.c
index 35936188db01..844bc4fc4724 100644
--- a/arch/x86/events/intel/p4.c
+++ b/arch/x86/events/intel/p4.c
@@ -919,7 +919,7 @@ static void p4_pmu_disable_all(void)
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
int idx;
- for (idx = 0; idx < x86_pmu.num_counters; idx++) {
+ for_each_set_bit(idx, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) {
struct perf_event *event = cpuc->events[idx];
if (!test_bit(idx, cpuc->active_mask))
continue;
@@ -998,7 +998,7 @@ static void p4_pmu_enable_all(int added)
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
int idx;
- for (idx = 0; idx < x86_pmu.num_counters; idx++) {
+ for_each_set_bit(idx, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) {
struct perf_event *event = cpuc->events[idx];
if (!test_bit(idx, cpuc->active_mask))
continue;
@@ -1040,7 +1040,7 @@ static int p4_pmu_handle_irq(struct pt_regs *regs)
cpuc = this_cpu_ptr(&cpu_hw_events);
- for (idx = 0; idx < x86_pmu.num_counters; idx++) {
+ for_each_set_bit(idx, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) {
int overflow;
if (!test_bit(idx, cpuc->active_mask)) {
@@ -1353,7 +1353,7 @@ static __initconst const struct x86_pmu p4_pmu = {
* though leave it restricted at moment assuming
* HT is on
*/
- .num_counters = ARCH_P4_MAX_CCCR,
+ .cntr_mask64 = GENMASK_ULL(ARCH_P4_MAX_CCCR - 1, 0),
.apic = 1,
.cntval_bits = ARCH_P4_CNTRVAL_BITS,
.cntval_mask = ARCH_P4_CNTRVAL_MASK,
@@ -1395,7 +1395,7 @@ __init int p4_pmu_init(void)
*
* Solve this by zero'ing out the registers to mimic a reset.
*/
- for (i = 0; i < x86_pmu.num_counters; i++) {
+ for_each_set_bit(i, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) {
reg = x86_pmu_config_addr(i);
wrmsrl_safe(reg, 0ULL);
}
diff --git a/arch/x86/events/intel/p6.c b/arch/x86/events/intel/p6.c
index 408879b0c0d4..a6cffb4f4ef5 100644
--- a/arch/x86/events/intel/p6.c
+++ b/arch/x86/events/intel/p6.c
@@ -214,7 +214,7 @@ static __initconst const struct x86_pmu p6_pmu = {
.apic = 1,
.max_period = (1ULL << 31) - 1,
.version = 0,
- .num_counters = 2,
+ .cntr_mask64 = 0x3,
/*
* Events have 40 bits implemented. However they are designed such
* that bits [32-39] are sign extensions of bit 31. As such the
diff --git a/arch/x86/events/intel/pt.c b/arch/x86/events/intel/pt.c
index 14db6d9d318b..b4aa8daa4773 100644
--- a/arch/x86/events/intel/pt.c
+++ b/arch/x86/events/intel/pt.c
@@ -878,7 +878,7 @@ static void pt_update_head(struct pt *pt)
*/
static void *pt_buffer_region(struct pt_buffer *buf)
{
- return phys_to_virt(TOPA_ENTRY(buf->cur, buf->cur_idx)->base << TOPA_SHIFT);
+ return phys_to_virt((phys_addr_t)TOPA_ENTRY(buf->cur, buf->cur_idx)->base << TOPA_SHIFT);
}
/**
@@ -990,7 +990,7 @@ pt_topa_entry_for_page(struct pt_buffer *buf, unsigned int pg)
* order allocations, there shouldn't be many of these.
*/
list_for_each_entry(topa, &buf->tables, list) {
- if (topa->offset + topa->size > pg << PAGE_SHIFT)
+ if (topa->offset + topa->size > (unsigned long)pg << PAGE_SHIFT)
goto found;
}
diff --git a/arch/x86/events/intel/pt.h b/arch/x86/events/intel/pt.h
index 96906a62aacd..f5e46c04c145 100644
--- a/arch/x86/events/intel/pt.h
+++ b/arch/x86/events/intel/pt.h
@@ -33,8 +33,8 @@ struct topa_entry {
u64 rsvd2 : 1;
u64 size : 4;
u64 rsvd3 : 2;
- u64 base : 36;
- u64 rsvd4 : 16;
+ u64 base : 40;
+ u64 rsvd4 : 12;
};
/* TSC to Core Crystal Clock Ratio */
diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c
index 419c517b8594..64ca8625eb58 100644
--- a/arch/x86/events/intel/uncore.c
+++ b/arch/x86/events/intel/uncore.c
@@ -34,6 +34,7 @@ static struct event_constraint uncore_constraint_fixed =
struct event_constraint uncore_constraint_empty =
EVENT_CONSTRAINT(0, 0, 0);
+MODULE_DESCRIPTION("Support for Intel uncore performance events");
MODULE_LICENSE("GPL");
int uncore_pcibus_to_dieid(struct pci_bus *bus)
@@ -263,6 +264,9 @@ static void uncore_assign_hw_event(struct intel_uncore_box *box,
return;
}
+ if (intel_generic_uncore_assign_hw_event(event, box))
+ return;
+
hwc->config_base = uncore_event_ctl(box, hwc->idx);
hwc->event_base = uncore_perf_ctr(box, hwc->idx);
}
@@ -843,7 +847,9 @@ static void uncore_pmu_disable(struct pmu *pmu)
static ssize_t uncore_get_attr_cpumask(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return cpumap_print_to_pagebuf(true, buf, &uncore_cpu_mask);
+ struct intel_uncore_pmu *pmu = container_of(dev_get_drvdata(dev), struct intel_uncore_pmu, pmu);
+
+ return cpumap_print_to_pagebuf(true, buf, &pmu->cpu_mask);
}
static DEVICE_ATTR(cpumask, S_IRUGO, uncore_get_attr_cpumask, NULL);
@@ -860,7 +866,10 @@ static const struct attribute_group uncore_pmu_attr_group = {
static inline int uncore_get_box_id(struct intel_uncore_type *type,
struct intel_uncore_pmu *pmu)
{
- return type->box_ids ? type->box_ids[pmu->pmu_idx] : pmu->pmu_idx;
+ if (type->boxes)
+ return intel_uncore_find_discovery_unit_id(type->boxes, -1, pmu->pmu_idx);
+
+ return pmu->pmu_idx;
}
void uncore_get_alias_name(char *pmu_name, struct intel_uncore_pmu *pmu)
@@ -961,6 +970,9 @@ static void uncore_type_exit(struct intel_uncore_type *type)
if (type->cleanup_mapping)
type->cleanup_mapping(type);
+ if (type->cleanup_extra_boxes)
+ type->cleanup_extra_boxes(type);
+
if (pmu) {
for (i = 0; i < type->num_boxes; i++, pmu++) {
uncore_pmu_unregister(pmu);
@@ -969,10 +981,7 @@ static void uncore_type_exit(struct intel_uncore_type *type)
kfree(type->pmus);
type->pmus = NULL;
}
- if (type->box_ids) {
- kfree(type->box_ids);
- type->box_ids = NULL;
- }
+
kfree(type->events_group);
type->events_group = NULL;
}
@@ -1076,22 +1085,19 @@ static struct intel_uncore_pmu *
uncore_pci_find_dev_pmu_from_types(struct pci_dev *pdev)
{
struct intel_uncore_type **types = uncore_pci_uncores;
+ struct intel_uncore_discovery_unit *unit;
struct intel_uncore_type *type;
- u64 box_ctl;
- int i, die;
+ struct rb_node *node;
for (; *types; types++) {
type = *types;
- for (die = 0; die < __uncore_max_dies; die++) {
- for (i = 0; i < type->num_boxes; i++) {
- if (!type->box_ctls[die])
- continue;
- box_ctl = type->box_ctls[die] + type->pci_offsets[i];
- if (pdev->devfn == UNCORE_DISCOVERY_PCI_DEVFN(box_ctl) &&
- pdev->bus->number == UNCORE_DISCOVERY_PCI_BUS(box_ctl) &&
- pci_domain_nr(pdev->bus) == UNCORE_DISCOVERY_PCI_DOMAIN(box_ctl))
- return &type->pmus[i];
- }
+
+ for (node = rb_first(type->boxes); node; node = rb_next(node)) {
+ unit = rb_entry(node, struct intel_uncore_discovery_unit, node);
+ if (pdev->devfn == UNCORE_DISCOVERY_PCI_DEVFN(unit->addr) &&
+ pdev->bus->number == UNCORE_DISCOVERY_PCI_BUS(unit->addr) &&
+ pci_domain_nr(pdev->bus) == UNCORE_DISCOVERY_PCI_DOMAIN(unit->addr))
+ return &type->pmus[unit->pmu_idx];
}
}
@@ -1367,28 +1373,25 @@ static struct notifier_block uncore_pci_notifier = {
static void uncore_pci_pmus_register(void)
{
struct intel_uncore_type **types = uncore_pci_uncores;
+ struct intel_uncore_discovery_unit *unit;
struct intel_uncore_type *type;
struct intel_uncore_pmu *pmu;
+ struct rb_node *node;
struct pci_dev *pdev;
- u64 box_ctl;
- int i, die;
for (; *types; types++) {
type = *types;
- for (die = 0; die < __uncore_max_dies; die++) {
- for (i = 0; i < type->num_boxes; i++) {
- if (!type->box_ctls[die])
- continue;
- box_ctl = type->box_ctls[die] + type->pci_offsets[i];
- pdev = pci_get_domain_bus_and_slot(UNCORE_DISCOVERY_PCI_DOMAIN(box_ctl),
- UNCORE_DISCOVERY_PCI_BUS(box_ctl),
- UNCORE_DISCOVERY_PCI_DEVFN(box_ctl));
- if (!pdev)
- continue;
- pmu = &type->pmus[i];
-
- uncore_pci_pmu_register(pdev, type, pmu, die);
- }
+
+ for (node = rb_first(type->boxes); node; node = rb_next(node)) {
+ unit = rb_entry(node, struct intel_uncore_discovery_unit, node);
+ pdev = pci_get_domain_bus_and_slot(UNCORE_DISCOVERY_PCI_DOMAIN(unit->addr),
+ UNCORE_DISCOVERY_PCI_BUS(unit->addr),
+ UNCORE_DISCOVERY_PCI_DEVFN(unit->addr));
+
+ if (!pdev)
+ continue;
+ pmu = &type->pmus[unit->pmu_idx];
+ uncore_pci_pmu_register(pdev, type, pmu, unit->die);
}
}
@@ -1453,6 +1456,18 @@ static void uncore_pci_exit(void)
}
}
+static bool uncore_die_has_box(struct intel_uncore_type *type,
+ int die, unsigned int pmu_idx)
+{
+ if (!type->boxes)
+ return true;
+
+ if (intel_uncore_find_discovery_unit_id(type->boxes, die, pmu_idx) < 0)
+ return false;
+
+ return true;
+}
+
static void uncore_change_type_ctx(struct intel_uncore_type *type, int old_cpu,
int new_cpu)
{
@@ -1468,18 +1483,25 @@ static void uncore_change_type_ctx(struct intel_uncore_type *type, int old_cpu,
if (old_cpu < 0) {
WARN_ON_ONCE(box->cpu != -1);
- box->cpu = new_cpu;
+ if (uncore_die_has_box(type, die, pmu->pmu_idx)) {
+ box->cpu = new_cpu;
+ cpumask_set_cpu(new_cpu, &pmu->cpu_mask);
+ }
continue;
}
- WARN_ON_ONCE(box->cpu != old_cpu);
+ WARN_ON_ONCE(box->cpu != -1 && box->cpu != old_cpu);
box->cpu = -1;
+ cpumask_clear_cpu(old_cpu, &pmu->cpu_mask);
if (new_cpu < 0)
continue;
+ if (!uncore_die_has_box(type, die, pmu->pmu_idx))
+ continue;
uncore_pmu_cancel_hrtimer(box);
perf_pmu_migrate_context(&pmu->pmu, old_cpu, new_cpu);
box->cpu = new_cpu;
+ cpumask_set_cpu(new_cpu, &pmu->cpu_mask);
}
}
@@ -1502,7 +1524,7 @@ static void uncore_box_unref(struct intel_uncore_type **types, int id)
pmu = type->pmus;
for (i = 0; i < type->num_boxes; i++, pmu++) {
box = pmu->boxes[id];
- if (box && atomic_dec_return(&box->refcnt) == 0)
+ if (box && box->cpu >= 0 && atomic_dec_return(&box->refcnt) == 0)
uncore_box_exit(box);
}
}
@@ -1592,7 +1614,7 @@ static int uncore_box_ref(struct intel_uncore_type **types,
pmu = type->pmus;
for (i = 0; i < type->num_boxes; i++, pmu++) {
box = pmu->boxes[id];
- if (box && atomic_inc_return(&box->refcnt) == 1)
+ if (box && box->cpu >= 0 && atomic_inc_return(&box->refcnt) == 1)
uncore_box_init(box);
}
}
diff --git a/arch/x86/events/intel/uncore.h b/arch/x86/events/intel/uncore.h
index 4838502d89ae..027ef292c602 100644
--- a/arch/x86/events/intel/uncore.h
+++ b/arch/x86/events/intel/uncore.h
@@ -62,7 +62,6 @@ struct intel_uncore_type {
unsigned fixed_ctr;
unsigned fixed_ctl;
unsigned box_ctl;
- u64 *box_ctls; /* Unit ctrl addr of the first box of each die */
union {
unsigned msr_offset;
unsigned mmio_offset;
@@ -76,7 +75,6 @@ struct intel_uncore_type {
u64 *pci_offsets;
u64 *mmio_offsets;
};
- unsigned *box_ids;
struct event_constraint unconstrainted;
struct event_constraint *constraints;
struct intel_uncore_pmu *pmus;
@@ -86,6 +84,7 @@ struct intel_uncore_type {
const struct attribute_group *attr_groups[4];
const struct attribute_group **attr_update;
struct pmu *pmu; /* for custom pmu ops */
+ struct rb_root *boxes;
/*
* Uncore PMU would store relevant platform topology configuration here
* to identify which platform component each PMON block of that type is
@@ -98,6 +97,10 @@ struct intel_uncore_type {
int (*get_topology)(struct intel_uncore_type *type);
void (*set_mapping)(struct intel_uncore_type *type);
void (*cleanup_mapping)(struct intel_uncore_type *type);
+ /*
+ * Optional callbacks for extra uncore units cleanup
+ */
+ void (*cleanup_extra_boxes)(struct intel_uncore_type *type);
};
#define pmu_group attr_groups[0]
@@ -125,6 +128,7 @@ struct intel_uncore_pmu {
int func_id;
bool registered;
atomic_t activeboxes;
+ cpumask_t cpu_mask;
struct intel_uncore_type *type;
struct intel_uncore_box **boxes;
};
diff --git a/arch/x86/events/intel/uncore_discovery.c b/arch/x86/events/intel/uncore_discovery.c
index 9a698a92962a..571e44b49691 100644
--- a/arch/x86/events/intel/uncore_discovery.c
+++ b/arch/x86/events/intel/uncore_discovery.c
@@ -89,9 +89,7 @@ add_uncore_discovery_type(struct uncore_unit_discovery *unit)
if (!type)
return NULL;
- type->box_ctrl_die = kcalloc(__uncore_max_dies, sizeof(u64), GFP_KERNEL);
- if (!type->box_ctrl_die)
- goto free_type;
+ type->units = RB_ROOT;
type->access_type = unit->access_type;
num_discovered_types[type->access_type]++;
@@ -100,12 +98,6 @@ add_uncore_discovery_type(struct uncore_unit_discovery *unit)
rb_add(&type->node, &discovery_tables, __type_less);
return type;
-
-free_type:
- kfree(type);
-
- return NULL;
-
}
static struct intel_uncore_discovery_type *
@@ -120,14 +112,118 @@ get_uncore_discovery_type(struct uncore_unit_discovery *unit)
return add_uncore_discovery_type(unit);
}
+static inline int pmu_idx_cmp(const void *key, const struct rb_node *b)
+{
+ struct intel_uncore_discovery_unit *unit;
+ const unsigned int *id = key;
+
+ unit = rb_entry(b, struct intel_uncore_discovery_unit, node);
+
+ if (unit->pmu_idx > *id)
+ return -1;
+ else if (unit->pmu_idx < *id)
+ return 1;
+
+ return 0;
+}
+
+static struct intel_uncore_discovery_unit *
+intel_uncore_find_discovery_unit(struct rb_root *units, int die,
+ unsigned int pmu_idx)
+{
+ struct intel_uncore_discovery_unit *unit;
+ struct rb_node *pos;
+
+ if (!units)
+ return NULL;
+
+ pos = rb_find_first(&pmu_idx, units, pmu_idx_cmp);
+ if (!pos)
+ return NULL;
+ unit = rb_entry(pos, struct intel_uncore_discovery_unit, node);
+
+ if (die < 0)
+ return unit;
+
+ for (; pos; pos = rb_next(pos)) {
+ unit = rb_entry(pos, struct intel_uncore_discovery_unit, node);
+
+ if (unit->pmu_idx != pmu_idx)
+ break;
+
+ if (unit->die == die)
+ return unit;
+ }
+
+ return NULL;
+}
+
+int intel_uncore_find_discovery_unit_id(struct rb_root *units, int die,
+ unsigned int pmu_idx)
+{
+ struct intel_uncore_discovery_unit *unit;
+
+ unit = intel_uncore_find_discovery_unit(units, die, pmu_idx);
+ if (unit)
+ return unit->id;
+
+ return -1;
+}
+
+static inline bool unit_less(struct rb_node *a, const struct rb_node *b)
+{
+ struct intel_uncore_discovery_unit *a_node, *b_node;
+
+ a_node = rb_entry(a, struct intel_uncore_discovery_unit, node);
+ b_node = rb_entry(b, struct intel_uncore_discovery_unit, node);
+
+ if (a_node->pmu_idx < b_node->pmu_idx)
+ return true;
+ if (a_node->pmu_idx > b_node->pmu_idx)
+ return false;
+
+ if (a_node->die < b_node->die)
+ return true;
+ if (a_node->die > b_node->die)
+ return false;
+
+ return 0;
+}
+
+static inline struct intel_uncore_discovery_unit *
+uncore_find_unit(struct rb_root *root, unsigned int id)
+{
+ struct intel_uncore_discovery_unit *unit;
+ struct rb_node *node;
+
+ for (node = rb_first(root); node; node = rb_next(node)) {
+ unit = rb_entry(node, struct intel_uncore_discovery_unit, node);
+ if (unit->id == id)
+ return unit;
+ }
+
+ return NULL;
+}
+
+void uncore_find_add_unit(struct intel_uncore_discovery_unit *node,
+ struct rb_root *root, u16 *num_units)
+{
+ struct intel_uncore_discovery_unit *unit = uncore_find_unit(root, node->id);
+
+ if (unit)
+ node->pmu_idx = unit->pmu_idx;
+ else if (num_units)
+ node->pmu_idx = (*num_units)++;
+
+ rb_add(&node->node, root, unit_less);
+}
+
static void
uncore_insert_box_info(struct uncore_unit_discovery *unit,
- int die, bool parsed)
+ int die)
{
+ struct intel_uncore_discovery_unit *node;
struct intel_uncore_discovery_type *type;
- unsigned int *ids;
- u64 *box_offset;
- int i;
if (!unit->ctl || !unit->ctl_offset || !unit->ctr_offset) {
pr_info("Invalid address is detected for uncore type %d box %d, "
@@ -136,71 +232,29 @@ uncore_insert_box_info(struct uncore_unit_discovery *unit,
return;
}
- if (parsed) {
- type = search_uncore_discovery_type(unit->box_type);
- if (!type) {
- pr_info("A spurious uncore type %d is detected, "
- "Disable the uncore type.\n",
- unit->box_type);
- return;
- }
- /* Store the first box of each die */
- if (!type->box_ctrl_die[die])
- type->box_ctrl_die[die] = unit->ctl;
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
return;
- }
- type = get_uncore_discovery_type(unit);
- if (!type)
- return;
+ node->die = die;
+ node->id = unit->box_id;
+ node->addr = unit->ctl;
- box_offset = kcalloc(type->num_boxes + 1, sizeof(u64), GFP_KERNEL);
- if (!box_offset)
+ type = get_uncore_discovery_type(unit);
+ if (!type) {
+ kfree(node);
return;
+ }
- ids = kcalloc(type->num_boxes + 1, sizeof(unsigned int), GFP_KERNEL);
- if (!ids)
- goto free_box_offset;
+ uncore_find_add_unit(node, &type->units, &type->num_units);
/* Store generic information for the first box */
- if (!type->num_boxes) {
- type->box_ctrl = unit->ctl;
- type->box_ctrl_die[die] = unit->ctl;
+ if (type->num_units == 1) {
type->num_counters = unit->num_regs;
type->counter_width = unit->bit_width;
type->ctl_offset = unit->ctl_offset;
type->ctr_offset = unit->ctr_offset;
- *ids = unit->box_id;
- goto end;
- }
-
- for (i = 0; i < type->num_boxes; i++) {
- ids[i] = type->ids[i];
- box_offset[i] = type->box_offset[i];
-
- if (unit->box_id == ids[i]) {
- pr_info("Duplicate uncore type %d box ID %d is detected, "
- "Drop the duplicate uncore unit.\n",
- unit->box_type, unit->box_id);
- goto free_ids;
- }
}
- ids[i] = unit->box_id;
- box_offset[i] = unit->ctl - type->box_ctrl;
- kfree(type->ids);
- kfree(type->box_offset);
-end:
- type->ids = ids;
- type->box_offset = box_offset;
- type->num_boxes++;
- return;
-
-free_ids:
- kfree(ids);
-
-free_box_offset:
- kfree(box_offset);
-
}
static bool
@@ -279,7 +333,7 @@ static int parse_discovery_table(struct pci_dev *dev, int die,
if (uncore_ignore_unit(&unit, ignore))
continue;
- uncore_insert_box_info(&unit, die, *parsed);
+ uncore_insert_box_info(&unit, die);
}
*parsed = true;
@@ -339,9 +393,16 @@ err:
void intel_uncore_clear_discovery_tables(void)
{
struct intel_uncore_discovery_type *type, *next;
+ struct intel_uncore_discovery_unit *pos;
+ struct rb_node *node;
rbtree_postorder_for_each_entry_safe(type, next, &discovery_tables, node) {
- kfree(type->box_ctrl_die);
+ while (!RB_EMPTY_ROOT(&type->units)) {
+ node = rb_first(&type->units);
+ pos = rb_entry(node, struct intel_uncore_discovery_unit, node);
+ rb_erase(node, &type->units);
+ kfree(pos);
+ }
kfree(type);
}
}
@@ -366,19 +427,31 @@ static const struct attribute_group generic_uncore_format_group = {
.attrs = generic_uncore_formats_attr,
};
+static u64 intel_generic_uncore_box_ctl(struct intel_uncore_box *box)
+{
+ struct intel_uncore_discovery_unit *unit;
+
+ unit = intel_uncore_find_discovery_unit(box->pmu->type->boxes,
+ -1, box->pmu->pmu_idx);
+ if (WARN_ON_ONCE(!unit))
+ return 0;
+
+ return unit->addr;
+}
+
void intel_generic_uncore_msr_init_box(struct intel_uncore_box *box)
{
- wrmsrl(uncore_msr_box_ctl(box), GENERIC_PMON_BOX_CTL_INT);
+ wrmsrl(intel_generic_uncore_box_ctl(box), GENERIC_PMON_BOX_CTL_INT);
}
void intel_generic_uncore_msr_disable_box(struct intel_uncore_box *box)
{
- wrmsrl(uncore_msr_box_ctl(box), GENERIC_PMON_BOX_CTL_FRZ);
+ wrmsrl(intel_generic_uncore_box_ctl(box), GENERIC_PMON_BOX_CTL_FRZ);
}
void intel_generic_uncore_msr_enable_box(struct intel_uncore_box *box)
{
- wrmsrl(uncore_msr_box_ctl(box), 0);
+ wrmsrl(intel_generic_uncore_box_ctl(box), 0);
}
static void intel_generic_uncore_msr_enable_event(struct intel_uncore_box *box,
@@ -406,10 +479,47 @@ static struct intel_uncore_ops generic_uncore_msr_ops = {
.read_counter = uncore_msr_read_counter,
};
+bool intel_generic_uncore_assign_hw_event(struct perf_event *event,
+ struct intel_uncore_box *box)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ u64 box_ctl;
+
+ if (!box->pmu->type->boxes)
+ return false;
+
+ if (box->io_addr) {
+ hwc->config_base = uncore_pci_event_ctl(box, hwc->idx);
+ hwc->event_base = uncore_pci_perf_ctr(box, hwc->idx);
+ return true;
+ }
+
+ box_ctl = intel_generic_uncore_box_ctl(box);
+ if (!box_ctl)
+ return false;
+
+ if (box->pci_dev) {
+ box_ctl = UNCORE_DISCOVERY_PCI_BOX_CTRL(box_ctl);
+ hwc->config_base = box_ctl + uncore_pci_event_ctl(box, hwc->idx);
+ hwc->event_base = box_ctl + uncore_pci_perf_ctr(box, hwc->idx);
+ return true;
+ }
+
+ hwc->config_base = box_ctl + box->pmu->type->event_ctl + hwc->idx;
+ hwc->event_base = box_ctl + box->pmu->type->perf_ctr + hwc->idx;
+
+ return true;
+}
+
+static inline int intel_pci_uncore_box_ctl(struct intel_uncore_box *box)
+{
+ return UNCORE_DISCOVERY_PCI_BOX_CTRL(intel_generic_uncore_box_ctl(box));
+}
+
void intel_generic_uncore_pci_init_box(struct intel_uncore_box *box)
{
struct pci_dev *pdev = box->pci_dev;
- int box_ctl = uncore_pci_box_ctl(box);
+ int box_ctl = intel_pci_uncore_box_ctl(box);
__set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
pci_write_config_dword(pdev, box_ctl, GENERIC_PMON_BOX_CTL_INT);
@@ -418,7 +528,7 @@ void intel_generic_uncore_pci_init_box(struct intel_uncore_box *box)
void intel_generic_uncore_pci_disable_box(struct intel_uncore_box *box)
{
struct pci_dev *pdev = box->pci_dev;
- int box_ctl = uncore_pci_box_ctl(box);
+ int box_ctl = intel_pci_uncore_box_ctl(box);
pci_write_config_dword(pdev, box_ctl, GENERIC_PMON_BOX_CTL_FRZ);
}
@@ -426,7 +536,7 @@ void intel_generic_uncore_pci_disable_box(struct intel_uncore_box *box)
void intel_generic_uncore_pci_enable_box(struct intel_uncore_box *box)
{
struct pci_dev *pdev = box->pci_dev;
- int box_ctl = uncore_pci_box_ctl(box);
+ int box_ctl = intel_pci_uncore_box_ctl(box);
pci_write_config_dword(pdev, box_ctl, 0);
}
@@ -473,34 +583,30 @@ static struct intel_uncore_ops generic_uncore_pci_ops = {
#define UNCORE_GENERIC_MMIO_SIZE 0x4000
-static u64 generic_uncore_mmio_box_ctl(struct intel_uncore_box *box)
-{
- struct intel_uncore_type *type = box->pmu->type;
-
- if (!type->box_ctls || !type->box_ctls[box->dieid] || !type->mmio_offsets)
- return 0;
-
- return type->box_ctls[box->dieid] + type->mmio_offsets[box->pmu->pmu_idx];
-}
-
void intel_generic_uncore_mmio_init_box(struct intel_uncore_box *box)
{
- u64 box_ctl = generic_uncore_mmio_box_ctl(box);
+ static struct intel_uncore_discovery_unit *unit;
struct intel_uncore_type *type = box->pmu->type;
resource_size_t addr;
- if (!box_ctl) {
+ unit = intel_uncore_find_discovery_unit(type->boxes, box->dieid, box->pmu->pmu_idx);
+ if (!unit) {
+ pr_warn("Uncore type %d id %d: Cannot find box control address.\n",
+ type->type_id, box->pmu->pmu_idx);
+ return;
+ }
+
+ if (!unit->addr) {
pr_warn("Uncore type %d box %d: Invalid box control address.\n",
- type->type_id, type->box_ids[box->pmu->pmu_idx]);
+ type->type_id, unit->id);
return;
}
- addr = box_ctl;
+ addr = unit->addr;
box->io_addr = ioremap(addr, UNCORE_GENERIC_MMIO_SIZE);
if (!box->io_addr) {
pr_warn("Uncore type %d box %d: ioremap error for 0x%llx.\n",
- type->type_id, type->box_ids[box->pmu->pmu_idx],
- (unsigned long long)addr);
+ type->type_id, unit->id, (unsigned long long)addr);
return;
}
@@ -560,34 +666,22 @@ static bool uncore_update_uncore_type(enum uncore_access_type type_id,
struct intel_uncore_discovery_type *type)
{
uncore->type_id = type->type;
- uncore->num_boxes = type->num_boxes;
uncore->num_counters = type->num_counters;
uncore->perf_ctr_bits = type->counter_width;
- uncore->box_ids = type->ids;
+ uncore->perf_ctr = (unsigned int)type->ctr_offset;
+ uncore->event_ctl = (unsigned int)type->ctl_offset;
+ uncore->boxes = &type->units;
+ uncore->num_boxes = type->num_units;
switch (type_id) {
case UNCORE_ACCESS_MSR:
uncore->ops = &generic_uncore_msr_ops;
- uncore->perf_ctr = (unsigned int)type->box_ctrl + type->ctr_offset;
- uncore->event_ctl = (unsigned int)type->box_ctrl + type->ctl_offset;
- uncore->box_ctl = (unsigned int)type->box_ctrl;
- uncore->msr_offsets = type->box_offset;
break;
case UNCORE_ACCESS_PCI:
uncore->ops = &generic_uncore_pci_ops;
- uncore->perf_ctr = (unsigned int)UNCORE_DISCOVERY_PCI_BOX_CTRL(type->box_ctrl) + type->ctr_offset;
- uncore->event_ctl = (unsigned int)UNCORE_DISCOVERY_PCI_BOX_CTRL(type->box_ctrl) + type->ctl_offset;
- uncore->box_ctl = (unsigned int)UNCORE_DISCOVERY_PCI_BOX_CTRL(type->box_ctrl);
- uncore->box_ctls = type->box_ctrl_die;
- uncore->pci_offsets = type->box_offset;
break;
case UNCORE_ACCESS_MMIO:
uncore->ops = &generic_uncore_mmio_ops;
- uncore->perf_ctr = (unsigned int)type->ctr_offset;
- uncore->event_ctl = (unsigned int)type->ctl_offset;
- uncore->box_ctl = (unsigned int)type->box_ctrl;
- uncore->box_ctls = type->box_ctrl_die;
- uncore->mmio_offsets = type->box_offset;
uncore->mmio_map_size = UNCORE_GENERIC_MMIO_SIZE;
break;
default:
diff --git a/arch/x86/events/intel/uncore_discovery.h b/arch/x86/events/intel/uncore_discovery.h
index 22e769a81103..0e94aa7db8e7 100644
--- a/arch/x86/events/intel/uncore_discovery.h
+++ b/arch/x86/events/intel/uncore_discovery.h
@@ -113,19 +113,24 @@ struct uncore_unit_discovery {
};
};
+struct intel_uncore_discovery_unit {
+ struct rb_node node;
+ unsigned int pmu_idx; /* The idx of the corresponding PMU */
+ unsigned int id; /* Unit ID */
+ unsigned int die; /* Die ID */
+ u64 addr; /* Unit Control Address */
+};
+
struct intel_uncore_discovery_type {
struct rb_node node;
enum uncore_access_type access_type;
- u64 box_ctrl; /* Unit ctrl addr of the first box */
- u64 *box_ctrl_die; /* Unit ctrl addr of the first box of each die */
+ struct rb_root units; /* Unit ctrl addr for all units */
u16 type; /* Type ID of the uncore block */
u8 num_counters;
u8 counter_width;
u8 ctl_offset; /* Counter Control 0 offset */
u8 ctr_offset; /* Counter 0 offset */
- u16 num_boxes; /* number of boxes for the uncore block */
- unsigned int *ids; /* Box IDs */
- u64 *box_offset; /* Box offset */
+ u16 num_units; /* number of units */
};
bool intel_uncore_has_discovery_tables(int *ignore);
@@ -156,3 +161,10 @@ u64 intel_generic_uncore_pci_read_counter(struct intel_uncore_box *box,
struct intel_uncore_type **
intel_uncore_generic_init_uncores(enum uncore_access_type type_id, int num_extra);
+
+int intel_uncore_find_discovery_unit_id(struct rb_root *units, int die,
+ unsigned int pmu_idx);
+bool intel_generic_uncore_assign_hw_event(struct perf_event *event,
+ struct intel_uncore_box *box);
+void uncore_find_add_unit(struct intel_uncore_discovery_unit *node,
+ struct rb_root *root, u16 *num_units);
diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c
index 74b8b21e8990..ca98744343b8 100644
--- a/arch/x86/events/intel/uncore_snbep.c
+++ b/arch/x86/events/intel/uncore_snbep.c
@@ -462,6 +462,7 @@
#define SPR_UBOX_DID 0x3250
/* SPR CHA */
+#define SPR_CHA_EVENT_MASK_EXT 0xffffffff
#define SPR_CHA_PMON_CTL_TID_EN (1 << 16)
#define SPR_CHA_PMON_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \
SPR_CHA_PMON_CTL_TID_EN)
@@ -478,6 +479,7 @@ DEFINE_UNCORE_FORMAT_ATTR(umask_ext, umask, "config:8-15,32-43,45-55");
DEFINE_UNCORE_FORMAT_ATTR(umask_ext2, umask, "config:8-15,32-57");
DEFINE_UNCORE_FORMAT_ATTR(umask_ext3, umask, "config:8-15,32-39");
DEFINE_UNCORE_FORMAT_ATTR(umask_ext4, umask, "config:8-15,32-55");
+DEFINE_UNCORE_FORMAT_ATTR(umask_ext5, umask, "config:8-15,32-63");
DEFINE_UNCORE_FORMAT_ATTR(qor, qor, "config:16");
DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19");
@@ -5933,10 +5935,11 @@ static int spr_cha_hw_config(struct intel_uncore_box *box, struct perf_event *ev
struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
bool tie_en = !!(event->hw.config & SPR_CHA_PMON_CTL_TID_EN);
struct intel_uncore_type *type = box->pmu->type;
+ int id = intel_uncore_find_discovery_unit_id(type->boxes, -1, box->pmu->pmu_idx);
if (tie_en) {
reg1->reg = SPR_C0_MSR_PMON_BOX_FILTER0 +
- HSWEP_CBO_MSR_OFFSET * type->box_ids[box->pmu->pmu_idx];
+ HSWEP_CBO_MSR_OFFSET * id;
reg1->config = event->attr.config1 & SPR_CHA_PMON_BOX_FILTER_TID;
reg1->idx = 0;
}
@@ -5958,7 +5961,7 @@ static struct intel_uncore_ops spr_uncore_chabox_ops = {
static struct attribute *spr_uncore_cha_formats_attr[] = {
&format_attr_event.attr,
- &format_attr_umask_ext4.attr,
+ &format_attr_umask_ext5.attr,
&format_attr_tid_en2.attr,
&format_attr_edge.attr,
&format_attr_inv.attr,
@@ -5994,7 +5997,7 @@ ATTRIBUTE_GROUPS(uncore_alias);
static struct intel_uncore_type spr_uncore_chabox = {
.name = "cha",
.event_mask = SPR_CHA_PMON_EVENT_MASK,
- .event_mask_ext = SPR_RAW_EVENT_MASK_EXT,
+ .event_mask_ext = SPR_CHA_EVENT_MASK_EXT,
.num_shared_regs = 1,
.constraints = skx_uncore_chabox_constraints,
.ops = &spr_uncore_chabox_ops,
@@ -6162,7 +6165,55 @@ static struct intel_uncore_type spr_uncore_mdf = {
.name = "mdf",
};
-#define UNCORE_SPR_NUM_UNCORE_TYPES 12
+static void spr_uncore_mmio_offs8_init_box(struct intel_uncore_box *box)
+{
+ __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
+ intel_generic_uncore_mmio_init_box(box);
+}
+
+static struct intel_uncore_ops spr_uncore_mmio_offs8_ops = {
+ .init_box = spr_uncore_mmio_offs8_init_box,
+ .exit_box = uncore_mmio_exit_box,
+ .disable_box = intel_generic_uncore_mmio_disable_box,
+ .enable_box = intel_generic_uncore_mmio_enable_box,
+ .disable_event = intel_generic_uncore_mmio_disable_event,
+ .enable_event = spr_uncore_mmio_enable_event,
+ .read_counter = uncore_mmio_read_counter,
+};
+
+#define SPR_UNCORE_MMIO_OFFS8_COMMON_FORMAT() \
+ SPR_UNCORE_COMMON_FORMAT(), \
+ .ops = &spr_uncore_mmio_offs8_ops
+
+static struct event_constraint spr_uncore_cxlcm_constraints[] = {
+ UNCORE_EVENT_CONSTRAINT(0x02, 0x0f),
+ UNCORE_EVENT_CONSTRAINT(0x05, 0x0f),
+ UNCORE_EVENT_CONSTRAINT(0x40, 0xf0),
+ UNCORE_EVENT_CONSTRAINT(0x41, 0xf0),
+ UNCORE_EVENT_CONSTRAINT(0x42, 0xf0),
+ UNCORE_EVENT_CONSTRAINT(0x43, 0xf0),
+ UNCORE_EVENT_CONSTRAINT(0x4b, 0xf0),
+ UNCORE_EVENT_CONSTRAINT(0x52, 0xf0),
+ EVENT_CONSTRAINT_END
+};
+
+static struct intel_uncore_type spr_uncore_cxlcm = {
+ SPR_UNCORE_MMIO_OFFS8_COMMON_FORMAT(),
+ .name = "cxlcm",
+ .constraints = spr_uncore_cxlcm_constraints,
+};
+
+static struct intel_uncore_type spr_uncore_cxldp = {
+ SPR_UNCORE_MMIO_OFFS8_COMMON_FORMAT(),
+ .name = "cxldp",
+};
+
+static struct intel_uncore_type spr_uncore_hbm = {
+ SPR_UNCORE_COMMON_FORMAT(),
+ .name = "hbm",
+};
+
+#define UNCORE_SPR_NUM_UNCORE_TYPES 15
#define UNCORE_SPR_CHA 0
#define UNCORE_SPR_IIO 1
#define UNCORE_SPR_IMC 6
@@ -6186,6 +6237,9 @@ static struct intel_uncore_type *spr_uncores[UNCORE_SPR_NUM_UNCORE_TYPES] = {
NULL,
NULL,
&spr_uncore_mdf,
+ &spr_uncore_cxlcm,
+ &spr_uncore_cxldp,
+ &spr_uncore_hbm,
};
/*
@@ -6198,6 +6252,24 @@ static u64 spr_upi_pci_offsets[SPR_UNCORE_UPI_NUM_BOXES] = {
0, 0x8000, 0x10000, 0x18000
};
+static void spr_extra_boxes_cleanup(struct intel_uncore_type *type)
+{
+ struct intel_uncore_discovery_unit *pos;
+ struct rb_node *node;
+
+ if (!type->boxes)
+ return;
+
+ while (!RB_EMPTY_ROOT(type->boxes)) {
+ node = rb_first(type->boxes);
+ pos = rb_entry(node, struct intel_uncore_discovery_unit, node);
+ rb_erase(node, type->boxes);
+ kfree(pos);
+ }
+ kfree(type->boxes);
+ type->boxes = NULL;
+}
+
static struct intel_uncore_type spr_uncore_upi = {
.event_mask = SNBEP_PMON_RAW_EVENT_MASK,
.event_mask_ext = SPR_RAW_EVENT_MASK_EXT,
@@ -6212,10 +6284,11 @@ static struct intel_uncore_type spr_uncore_upi = {
.num_counters = 4,
.num_boxes = SPR_UNCORE_UPI_NUM_BOXES,
.perf_ctr_bits = 48,
- .perf_ctr = ICX_UPI_PCI_PMON_CTR0,
- .event_ctl = ICX_UPI_PCI_PMON_CTL0,
+ .perf_ctr = ICX_UPI_PCI_PMON_CTR0 - ICX_UPI_PCI_PMON_BOX_CTL,
+ .event_ctl = ICX_UPI_PCI_PMON_CTL0 - ICX_UPI_PCI_PMON_BOX_CTL,
.box_ctl = ICX_UPI_PCI_PMON_BOX_CTL,
.pci_offsets = spr_upi_pci_offsets,
+ .cleanup_extra_boxes = spr_extra_boxes_cleanup,
};
static struct intel_uncore_type spr_uncore_m3upi = {
@@ -6225,11 +6298,12 @@ static struct intel_uncore_type spr_uncore_m3upi = {
.num_counters = 4,
.num_boxes = SPR_UNCORE_UPI_NUM_BOXES,
.perf_ctr_bits = 48,
- .perf_ctr = ICX_M3UPI_PCI_PMON_CTR0,
- .event_ctl = ICX_M3UPI_PCI_PMON_CTL0,
+ .perf_ctr = ICX_M3UPI_PCI_PMON_CTR0 - ICX_M3UPI_PCI_PMON_BOX_CTL,
+ .event_ctl = ICX_M3UPI_PCI_PMON_CTL0 - ICX_M3UPI_PCI_PMON_BOX_CTL,
.box_ctl = ICX_M3UPI_PCI_PMON_BOX_CTL,
.pci_offsets = spr_upi_pci_offsets,
.constraints = icx_uncore_m3upi_constraints,
+ .cleanup_extra_boxes = spr_extra_boxes_cleanup,
};
enum perf_uncore_spr_iio_freerunning_type_id {
@@ -6460,18 +6534,21 @@ uncore_find_type_by_id(struct intel_uncore_type **types, int type_id)
static int uncore_type_max_boxes(struct intel_uncore_type **types,
int type_id)
{
+ struct intel_uncore_discovery_unit *unit;
struct intel_uncore_type *type;
- int i, max = 0;
+ struct rb_node *node;
+ int max = 0;
type = uncore_find_type_by_id(types, type_id);
if (!type)
return 0;
- for (i = 0; i < type->num_boxes; i++) {
- if (type->box_ids[i] > max)
- max = type->box_ids[i];
- }
+ for (node = rb_first(type->boxes); node; node = rb_next(node)) {
+ unit = rb_entry(node, struct intel_uncore_discovery_unit, node);
+ if (unit->id > max)
+ max = unit->id;
+ }
return max + 1;
}
@@ -6513,10 +6590,11 @@ void spr_uncore_cpu_init(void)
static void spr_update_device_location(int type_id)
{
+ struct intel_uncore_discovery_unit *unit;
struct intel_uncore_type *type;
struct pci_dev *dev = NULL;
+ struct rb_root *root;
u32 device, devfn;
- u64 *ctls;
int die;
if (type_id == UNCORE_SPR_UPI) {
@@ -6530,27 +6608,35 @@ static void spr_update_device_location(int type_id)
} else
return;
- ctls = kcalloc(__uncore_max_dies, sizeof(u64), GFP_KERNEL);
- if (!ctls) {
+ root = kzalloc(sizeof(struct rb_root), GFP_KERNEL);
+ if (!root) {
type->num_boxes = 0;
return;
}
+ *root = RB_ROOT;
while ((dev = pci_get_device(PCI_VENDOR_ID_INTEL, device, dev)) != NULL) {
- if (devfn != dev->devfn)
- continue;
die = uncore_device_to_die(dev);
if (die < 0)
continue;
- ctls[die] = pci_domain_nr(dev->bus) << UNCORE_DISCOVERY_PCI_DOMAIN_OFFSET |
- dev->bus->number << UNCORE_DISCOVERY_PCI_BUS_OFFSET |
- devfn << UNCORE_DISCOVERY_PCI_DEVFN_OFFSET |
- type->box_ctl;
+ unit = kzalloc(sizeof(*unit), GFP_KERNEL);
+ if (!unit)
+ continue;
+ unit->die = die;
+ unit->id = PCI_SLOT(dev->devfn) - PCI_SLOT(devfn);
+ unit->addr = pci_domain_nr(dev->bus) << UNCORE_DISCOVERY_PCI_DOMAIN_OFFSET |
+ dev->bus->number << UNCORE_DISCOVERY_PCI_BUS_OFFSET |
+ devfn << UNCORE_DISCOVERY_PCI_DEVFN_OFFSET |
+ type->box_ctl;
+
+ unit->pmu_idx = unit->id;
+
+ uncore_find_add_unit(unit, root, NULL);
}
- type->box_ctls = ctls;
+ type->boxes = root;
}
int spr_uncore_pci_init(void)
@@ -6623,7 +6709,7 @@ static struct intel_uncore_type gnr_uncore_b2cmi = {
};
static struct intel_uncore_type gnr_uncore_b2cxl = {
- SPR_UNCORE_MMIO_COMMON_FORMAT(),
+ SPR_UNCORE_MMIO_OFFS8_COMMON_FORMAT(),
.name = "b2cxl",
};
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index 72b022a1e16c..ac1182141bf6 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -476,6 +476,14 @@ struct cpu_hw_events {
__EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LAT_HYBRID)
+#define INTEL_HYBRID_LDLAT_CONSTRAINT(c, n) \
+ __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
+ HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LAT_HYBRID|PERF_X86_EVENT_PEBS_LD_HSW)
+
+#define INTEL_HYBRID_STLAT_CONSTRAINT(c, n) \
+ __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
+ HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LAT_HYBRID|PERF_X86_EVENT_PEBS_ST_HSW)
+
/* Event constraint, but match on all event flags too. */
#define INTEL_FLAGS_EVENT_CONSTRAINT(c, n) \
EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS)
@@ -655,8 +663,10 @@ enum {
x86_lbr_exclusive_max,
};
-#define PERF_PEBS_DATA_SOURCE_MAX 0x10
+#define PERF_PEBS_DATA_SOURCE_MAX 0x100
#define PERF_PEBS_DATA_SOURCE_MASK (PERF_PEBS_DATA_SOURCE_MAX - 1)
+#define PERF_PEBS_DATA_SOURCE_GRT_MAX 0x10
+#define PERF_PEBS_DATA_SOURCE_GRT_MASK (PERF_PEBS_DATA_SOURCE_GRT_MAX - 1)
enum hybrid_cpu_type {
HYBRID_INTEL_NONE,
@@ -684,9 +694,16 @@ struct x86_hybrid_pmu {
cpumask_t supported_cpus;
union perf_capabilities intel_cap;
u64 intel_ctrl;
- int max_pebs_events;
- int num_counters;
- int num_counters_fixed;
+ u64 pebs_events_mask;
+ u64 config_mask;
+ union {
+ u64 cntr_mask64;
+ unsigned long cntr_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
+ };
+ union {
+ u64 fixed_cntr_mask64;
+ unsigned long fixed_cntr_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
+ };
struct event_constraint unconstrained;
u64 hw_cache_event_ids
@@ -770,12 +787,20 @@ struct x86_pmu {
int (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign);
unsigned eventsel;
unsigned perfctr;
+ unsigned fixedctr;
int (*addr_offset)(int index, bool eventsel);
int (*rdpmc_index)(int index);
u64 (*event_map)(int);
int max_events;
- int num_counters;
- int num_counters_fixed;
+ u64 config_mask;
+ union {
+ u64 cntr_mask64;
+ unsigned long cntr_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
+ };
+ union {
+ u64 fixed_cntr_mask64;
+ unsigned long fixed_cntr_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
+ };
int cntval_bits;
u64 cntval_mask;
union {
@@ -852,7 +877,7 @@ struct x86_pmu {
pebs_ept :1;
int pebs_record_size;
int pebs_buffer_size;
- int max_pebs_events;
+ u64 pebs_events_mask;
void (*drain_pebs)(struct pt_regs *regs, struct perf_sample_data *data);
struct event_constraint *pebs_constraints;
void (*pebs_aliases)(struct perf_event *event);
@@ -1120,13 +1145,19 @@ static inline unsigned int x86_pmu_event_addr(int index)
x86_pmu.addr_offset(index, false) : index);
}
+static inline unsigned int x86_pmu_fixed_ctr_addr(int index)
+{
+ return x86_pmu.fixedctr + (x86_pmu.addr_offset ?
+ x86_pmu.addr_offset(index, false) : index);
+}
+
static inline int x86_pmu_rdpmc_index(int index)
{
return x86_pmu.rdpmc_index ? x86_pmu.rdpmc_index(index) : index;
}
-bool check_hw_exists(struct pmu *pmu, int num_counters,
- int num_counters_fixed);
+bool check_hw_exists(struct pmu *pmu, unsigned long *cntr_mask,
+ unsigned long *fixed_cntr_mask);
int x86_add_exclusive(unsigned int what);
@@ -1197,8 +1228,32 @@ void x86_pmu_enable_event(struct perf_event *event);
int x86_pmu_handle_irq(struct pt_regs *regs);
-void x86_pmu_show_pmu_cap(int num_counters, int num_counters_fixed,
- u64 intel_ctrl);
+void x86_pmu_show_pmu_cap(struct pmu *pmu);
+
+static inline int x86_pmu_num_counters(struct pmu *pmu)
+{
+ return hweight64(hybrid(pmu, cntr_mask64));
+}
+
+static inline int x86_pmu_max_num_counters(struct pmu *pmu)
+{
+ return fls64(hybrid(pmu, cntr_mask64));
+}
+
+static inline int x86_pmu_num_counters_fixed(struct pmu *pmu)
+{
+ return hweight64(hybrid(pmu, fixed_cntr_mask64));
+}
+
+static inline int x86_pmu_max_num_counters_fixed(struct pmu *pmu)
+{
+ return fls64(hybrid(pmu, fixed_cntr_mask64));
+}
+
+static inline u64 x86_pmu_get_event_config(struct perf_event *event)
+{
+ return event->attr.config & hybrid(event->pmu, config_mask);
+}
extern struct event_constraint emptyconstraint;
@@ -1517,9 +1572,11 @@ void intel_pmu_disable_bts(void);
int intel_pmu_drain_bts_buffer(void);
-u64 adl_latency_data_small(struct perf_event *event, u64 status);
+u64 grt_latency_data(struct perf_event *event, u64 status);
-u64 mtl_latency_data_small(struct perf_event *event, u64 status);
+u64 cmt_latency_data(struct perf_event *event, u64 status);
+
+u64 lnl_latency_data(struct perf_event *event, u64 status);
extern struct event_constraint intel_core2_pebs_event_constraints[];
@@ -1551,6 +1608,8 @@ extern struct event_constraint intel_icl_pebs_event_constraints[];
extern struct event_constraint intel_glc_pebs_event_constraints[];
+extern struct event_constraint intel_lnc_pebs_event_constraints[];
+
struct event_constraint *intel_pebs_constraints(struct perf_event *event);
void intel_pmu_pebs_add(struct perf_event *event);
@@ -1640,6 +1699,8 @@ void intel_pmu_pebs_data_source_mtl(void);
void intel_pmu_pebs_data_source_cmt(void);
+void intel_pmu_pebs_data_source_lnl(void);
+
int intel_pmu_setup_lbr_filter(struct perf_event *event);
void intel_pt_interrupt(void);
@@ -1661,6 +1722,17 @@ static inline int is_ht_workaround_enabled(void)
return !!(x86_pmu.flags & PMU_FL_EXCL_ENABLED);
}
+static inline u64 intel_pmu_pebs_mask(u64 cntr_mask)
+{
+ return MAX_PEBS_EVENTS_MASK & cntr_mask;
+}
+
+static inline int intel_pmu_max_num_pebs(struct pmu *pmu)
+{
+ static_assert(MAX_PEBS_EVENTS == 32);
+ return fls((u32)hybrid(pmu, pebs_events_mask));
+}
+
#else /* CONFIG_CPU_SUP_INTEL */
static inline void reserve_ds_buffers(void)
diff --git a/arch/x86/events/rapl.c b/arch/x86/events/rapl.c
index 46e673585560..b985ca79cf97 100644
--- a/arch/x86/events/rapl.c
+++ b/arch/x86/events/rapl.c
@@ -64,6 +64,7 @@
#include "perf_event.h"
#include "probe.h"
+MODULE_DESCRIPTION("Support Intel/AMD RAPL energy consumption counters");
MODULE_LICENSE("GPL");
/*
@@ -764,51 +765,51 @@ static struct rapl_model model_amd_hygon = {
};
static const struct x86_cpu_id rapl_model_match[] __initconst = {
- X86_MATCH_FEATURE(X86_FEATURE_RAPL, &model_amd_hygon),
- X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE, &model_snb),
- X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE_X, &model_snbep),
- X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE, &model_snb),
- X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE_X, &model_snbep),
- X86_MATCH_INTEL_FAM6_MODEL(HASWELL, &model_hsw),
- X86_MATCH_INTEL_FAM6_MODEL(HASWELL_X, &model_hsx),
- X86_MATCH_INTEL_FAM6_MODEL(HASWELL_L, &model_hsw),
- X86_MATCH_INTEL_FAM6_MODEL(HASWELL_G, &model_hsw),
- X86_MATCH_INTEL_FAM6_MODEL(BROADWELL, &model_hsw),
- X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_G, &model_hsw),
- X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, &model_hsx),
- X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_D, &model_hsx),
- X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNL, &model_knl),
- X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNM, &model_knl),
- X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_L, &model_skl),
- X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE, &model_skl),
- X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, &model_hsx),
- X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE_L, &model_skl),
- X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE, &model_skl),
- X86_MATCH_INTEL_FAM6_MODEL(CANNONLAKE_L, &model_skl),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, &model_hsw),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_D, &model_hsw),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_PLUS, &model_hsw),
- X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, &model_skl),
- X86_MATCH_INTEL_FAM6_MODEL(ICELAKE, &model_skl),
- X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, &model_hsx),
- X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, &model_hsx),
- X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE_L, &model_skl),
- X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE, &model_skl),
- X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, &model_skl),
- X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, &model_skl),
- X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, &model_skl),
- X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, &model_skl),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_GRACEMONT, &model_skl),
- X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &model_spr),
- X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X, &model_spr),
- X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, &model_skl),
- X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, &model_skl),
- X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_S, &model_skl),
- X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE, &model_skl),
- X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L, &model_skl),
- X86_MATCH_INTEL_FAM6_MODEL(ARROWLAKE_H, &model_skl),
- X86_MATCH_INTEL_FAM6_MODEL(ARROWLAKE, &model_skl),
- X86_MATCH_INTEL_FAM6_MODEL(LUNARLAKE_M, &model_skl),
+ X86_MATCH_FEATURE(X86_FEATURE_RAPL, &model_amd_hygon),
+ X86_MATCH_VFM(INTEL_SANDYBRIDGE, &model_snb),
+ X86_MATCH_VFM(INTEL_SANDYBRIDGE_X, &model_snbep),
+ X86_MATCH_VFM(INTEL_IVYBRIDGE, &model_snb),
+ X86_MATCH_VFM(INTEL_IVYBRIDGE_X, &model_snbep),
+ X86_MATCH_VFM(INTEL_HASWELL, &model_hsw),
+ X86_MATCH_VFM(INTEL_HASWELL_X, &model_hsx),
+ X86_MATCH_VFM(INTEL_HASWELL_L, &model_hsw),
+ X86_MATCH_VFM(INTEL_HASWELL_G, &model_hsw),
+ X86_MATCH_VFM(INTEL_BROADWELL, &model_hsw),
+ X86_MATCH_VFM(INTEL_BROADWELL_G, &model_hsw),
+ X86_MATCH_VFM(INTEL_BROADWELL_X, &model_hsx),
+ X86_MATCH_VFM(INTEL_BROADWELL_D, &model_hsx),
+ X86_MATCH_VFM(INTEL_XEON_PHI_KNL, &model_knl),
+ X86_MATCH_VFM(INTEL_XEON_PHI_KNM, &model_knl),
+ X86_MATCH_VFM(INTEL_SKYLAKE_L, &model_skl),
+ X86_MATCH_VFM(INTEL_SKYLAKE, &model_skl),
+ X86_MATCH_VFM(INTEL_SKYLAKE_X, &model_hsx),
+ X86_MATCH_VFM(INTEL_KABYLAKE_L, &model_skl),
+ X86_MATCH_VFM(INTEL_KABYLAKE, &model_skl),
+ X86_MATCH_VFM(INTEL_CANNONLAKE_L, &model_skl),
+ X86_MATCH_VFM(INTEL_ATOM_GOLDMONT, &model_hsw),
+ X86_MATCH_VFM(INTEL_ATOM_GOLDMONT_D, &model_hsw),
+ X86_MATCH_VFM(INTEL_ATOM_GOLDMONT_PLUS, &model_hsw),
+ X86_MATCH_VFM(INTEL_ICELAKE_L, &model_skl),
+ X86_MATCH_VFM(INTEL_ICELAKE, &model_skl),
+ X86_MATCH_VFM(INTEL_ICELAKE_D, &model_hsx),
+ X86_MATCH_VFM(INTEL_ICELAKE_X, &model_hsx),
+ X86_MATCH_VFM(INTEL_COMETLAKE_L, &model_skl),
+ X86_MATCH_VFM(INTEL_COMETLAKE, &model_skl),
+ X86_MATCH_VFM(INTEL_TIGERLAKE_L, &model_skl),
+ X86_MATCH_VFM(INTEL_TIGERLAKE, &model_skl),
+ X86_MATCH_VFM(INTEL_ALDERLAKE, &model_skl),
+ X86_MATCH_VFM(INTEL_ALDERLAKE_L, &model_skl),
+ X86_MATCH_VFM(INTEL_ATOM_GRACEMONT, &model_skl),
+ X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, &model_spr),
+ X86_MATCH_VFM(INTEL_EMERALDRAPIDS_X, &model_spr),
+ X86_MATCH_VFM(INTEL_RAPTORLAKE, &model_skl),
+ X86_MATCH_VFM(INTEL_RAPTORLAKE_P, &model_skl),
+ X86_MATCH_VFM(INTEL_RAPTORLAKE_S, &model_skl),
+ X86_MATCH_VFM(INTEL_METEORLAKE, &model_skl),
+ X86_MATCH_VFM(INTEL_METEORLAKE_L, &model_skl),
+ X86_MATCH_VFM(INTEL_ARROWLAKE_H, &model_skl),
+ X86_MATCH_VFM(INTEL_ARROWLAKE, &model_skl),
+ X86_MATCH_VFM(INTEL_LUNARLAKE_M, &model_skl),
{},
};
MODULE_DEVICE_TABLE(x86cpu, rapl_model_match);
diff --git a/arch/x86/events/zhaoxin/core.c b/arch/x86/events/zhaoxin/core.c
index 3e9acdaeed1e..2fd9b0cf9a5e 100644
--- a/arch/x86/events/zhaoxin/core.c
+++ b/arch/x86/events/zhaoxin/core.c
@@ -530,13 +530,13 @@ __init int zhaoxin_pmu_init(void)
pr_info("Version check pass!\n");
x86_pmu.version = version;
- x86_pmu.num_counters = eax.split.num_counters;
+ x86_pmu.cntr_mask64 = GENMASK_ULL(eax.split.num_counters - 1, 0);
x86_pmu.cntval_bits = eax.split.bit_width;
x86_pmu.cntval_mask = (1ULL << eax.split.bit_width) - 1;
x86_pmu.events_maskl = ebx.full;
x86_pmu.events_mask_len = eax.split.mask_length;
- x86_pmu.num_counters_fixed = edx.split.num_counters_fixed;
+ x86_pmu.fixed_cntr_mask64 = GENMASK_ULL(edx.split.num_counters_fixed - 1, 0);
x86_add_quirk(zhaoxin_arch_events_quirk);
switch (boot_cpu_data.x86) {
@@ -604,13 +604,13 @@ __init int zhaoxin_pmu_init(void)
return -ENODEV;
}
- x86_pmu.intel_ctrl = (1 << (x86_pmu.num_counters)) - 1;
- x86_pmu.intel_ctrl |= ((1LL << x86_pmu.num_counters_fixed)-1) << INTEL_PMC_IDX_FIXED;
+ x86_pmu.intel_ctrl = x86_pmu.cntr_mask64;
+ x86_pmu.intel_ctrl |= x86_pmu.fixed_cntr_mask64 << INTEL_PMC_IDX_FIXED;
if (x86_pmu.event_constraints) {
for_each_event_constraint(c, x86_pmu.event_constraints) {
- c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
- c->weight += x86_pmu.num_counters;
+ c->idxmsk64 |= x86_pmu.cntr_mask64;
+ c->weight += x86_pmu_num_counters(NULL);
}
}
diff --git a/arch/x86/hyperv/ivm.c b/arch/x86/hyperv/ivm.c
index 768d73de0d09..b4a851d27c7c 100644
--- a/arch/x86/hyperv/ivm.c
+++ b/arch/x86/hyperv/ivm.c
@@ -523,9 +523,9 @@ static int hv_mark_gpa_visibility(u16 count, const u64 pfn[],
* transition is complete, hv_vtom_set_host_visibility() marks the pages
* as "present" again.
*/
-static bool hv_vtom_clear_present(unsigned long kbuffer, int pagecount, bool enc)
+static int hv_vtom_clear_present(unsigned long kbuffer, int pagecount, bool enc)
{
- return !set_memory_np(kbuffer, pagecount);
+ return set_memory_np(kbuffer, pagecount);
}
/*
@@ -536,20 +536,19 @@ static bool hv_vtom_clear_present(unsigned long kbuffer, int pagecount, bool enc
* with host. This function works as wrap of hv_mark_gpa_visibility()
* with memory base and size.
*/
-static bool hv_vtom_set_host_visibility(unsigned long kbuffer, int pagecount, bool enc)
+static int hv_vtom_set_host_visibility(unsigned long kbuffer, int pagecount, bool enc)
{
enum hv_mem_host_visibility visibility = enc ?
VMBUS_PAGE_NOT_VISIBLE : VMBUS_PAGE_VISIBLE_READ_WRITE;
u64 *pfn_array;
phys_addr_t paddr;
+ int i, pfn, err;
void *vaddr;
int ret = 0;
- bool result = true;
- int i, pfn;
pfn_array = kmalloc(HV_HYP_PAGE_SIZE, GFP_KERNEL);
if (!pfn_array) {
- result = false;
+ ret = -ENOMEM;
goto err_set_memory_p;
}
@@ -568,10 +567,8 @@ static bool hv_vtom_set_host_visibility(unsigned long kbuffer, int pagecount, bo
if (pfn == HV_MAX_MODIFY_GPA_REP_COUNT || i == pagecount - 1) {
ret = hv_mark_gpa_visibility(pfn, pfn_array,
visibility);
- if (ret) {
- result = false;
+ if (ret)
goto err_free_pfn_array;
- }
pfn = 0;
}
}
@@ -586,10 +583,11 @@ err_set_memory_p:
* order to avoid leaving the memory range in a "broken" state. Setting
* the PRESENT bits shouldn't fail, but return an error if it does.
*/
- if (set_memory_p(kbuffer, pagecount))
- result = false;
+ err = set_memory_p(kbuffer, pagecount);
+ if (err && !ret)
+ ret = err;
- return result;
+ return ret;
}
static bool hv_vtom_tlb_flush_required(bool private)
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h
index 5af926c050f0..21bc53f5ed0c 100644
--- a/arch/x86/include/asm/acpi.h
+++ b/arch/x86/include/asm/acpi.h
@@ -78,6 +78,13 @@ static inline bool acpi_skip_set_wakeup_address(void)
#define acpi_skip_set_wakeup_address acpi_skip_set_wakeup_address
+union acpi_subtable_headers;
+
+int __init acpi_parse_mp_wake(union acpi_subtable_headers *header,
+ const unsigned long end);
+
+void asm_acpi_mp_play_dead(u64 reset_vector, u64 pgd_pa);
+
/*
* Check if the CPU can handle C2 and deeper
*/
diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
index ba99ef75f56c..ca9ae606aab9 100644
--- a/arch/x86/include/asm/alternative.h
+++ b/arch/x86/include/asm/alternative.h
@@ -156,102 +156,50 @@ static inline int alternatives_text_reserved(void *start, void *end)
#define ALT_CALL_INSTR "call BUG_func"
-#define b_replacement(num) "664"#num
-#define e_replacement(num) "665"#num
+#define alt_slen "772b-771b"
+#define alt_total_slen "773b-771b"
+#define alt_rlen "775f-774f"
-#define alt_end_marker "663"
-#define alt_slen "662b-661b"
-#define alt_total_slen alt_end_marker"b-661b"
-#define alt_rlen(num) e_replacement(num)"f-"b_replacement(num)"f"
-
-#define OLDINSTR(oldinstr, num) \
- "# ALT: oldnstr\n" \
- "661:\n\t" oldinstr "\n662:\n" \
+#define OLDINSTR(oldinstr) \
+ "# ALT: oldinstr\n" \
+ "771:\n\t" oldinstr "\n772:\n" \
"# ALT: padding\n" \
- ".skip -(((" alt_rlen(num) ")-(" alt_slen ")) > 0) * " \
- "((" alt_rlen(num) ")-(" alt_slen ")),0x90\n" \
- alt_end_marker ":\n"
-
-/*
- * gas compatible max based on the idea from:
- * http://graphics.stanford.edu/~seander/bithacks.html#IntegerMinOrMax
- *
- * The additional "-" is needed because gas uses a "true" value of -1.
- */
-#define alt_max_short(a, b) "((" a ") ^ (((" a ") ^ (" b ")) & -(-((" a ") < (" b ")))))"
-
-/*
- * Pad the second replacement alternative with additional NOPs if it is
- * additionally longer than the first replacement alternative.
- */
-#define OLDINSTR_2(oldinstr, num1, num2) \
- "# ALT: oldinstr2\n" \
- "661:\n\t" oldinstr "\n662:\n" \
- "# ALT: padding2\n" \
- ".skip -((" alt_max_short(alt_rlen(num1), alt_rlen(num2)) " - (" alt_slen ")) > 0) * " \
- "(" alt_max_short(alt_rlen(num1), alt_rlen(num2)) " - (" alt_slen ")), 0x90\n" \
- alt_end_marker ":\n"
-
-#define OLDINSTR_3(oldinsn, n1, n2, n3) \
- "# ALT: oldinstr3\n" \
- "661:\n\t" oldinsn "\n662:\n" \
- "# ALT: padding3\n" \
- ".skip -((" alt_max_short(alt_max_short(alt_rlen(n1), alt_rlen(n2)), alt_rlen(n3)) \
- " - (" alt_slen ")) > 0) * " \
- "(" alt_max_short(alt_max_short(alt_rlen(n1), alt_rlen(n2)), alt_rlen(n3)) \
- " - (" alt_slen ")), 0x90\n" \
- alt_end_marker ":\n"
-
-#define ALTINSTR_ENTRY(ft_flags, num) \
- " .long 661b - .\n" /* label */ \
- " .long " b_replacement(num)"f - .\n" /* new instruction */ \
+ ".skip -(((" alt_rlen ")-(" alt_slen ")) > 0) * " \
+ "((" alt_rlen ")-(" alt_slen ")),0x90\n" \
+ "773:\n"
+
+#define ALTINSTR_ENTRY(ft_flags) \
+ ".pushsection .altinstructions,\"a\"\n" \
+ " .long 771b - .\n" /* label */ \
+ " .long 774f - .\n" /* new instruction */ \
" .4byte " __stringify(ft_flags) "\n" /* feature + flags */ \
" .byte " alt_total_slen "\n" /* source len */ \
- " .byte " alt_rlen(num) "\n" /* replacement len */
+ " .byte " alt_rlen "\n" /* replacement len */ \
+ ".popsection\n"
-#define ALTINSTR_REPLACEMENT(newinstr, num) /* replacement */ \
- "# ALT: replacement " #num "\n" \
- b_replacement(num)":\n\t" newinstr "\n" e_replacement(num) ":\n"
+#define ALTINSTR_REPLACEMENT(newinstr) /* replacement */ \
+ ".pushsection .altinstr_replacement, \"ax\"\n" \
+ "# ALT: replacement\n" \
+ "774:\n\t" newinstr "\n775:\n" \
+ ".popsection\n"
/* alternative assembly primitive: */
#define ALTERNATIVE(oldinstr, newinstr, ft_flags) \
- OLDINSTR(oldinstr, 1) \
- ".pushsection .altinstructions,\"a\"\n" \
- ALTINSTR_ENTRY(ft_flags, 1) \
- ".popsection\n" \
- ".pushsection .altinstr_replacement, \"ax\"\n" \
- ALTINSTR_REPLACEMENT(newinstr, 1) \
- ".popsection\n"
+ OLDINSTR(oldinstr) \
+ ALTINSTR_ENTRY(ft_flags) \
+ ALTINSTR_REPLACEMENT(newinstr)
#define ALTERNATIVE_2(oldinstr, newinstr1, ft_flags1, newinstr2, ft_flags2) \
- OLDINSTR_2(oldinstr, 1, 2) \
- ".pushsection .altinstructions,\"a\"\n" \
- ALTINSTR_ENTRY(ft_flags1, 1) \
- ALTINSTR_ENTRY(ft_flags2, 2) \
- ".popsection\n" \
- ".pushsection .altinstr_replacement, \"ax\"\n" \
- ALTINSTR_REPLACEMENT(newinstr1, 1) \
- ALTINSTR_REPLACEMENT(newinstr2, 2) \
- ".popsection\n"
+ ALTERNATIVE(ALTERNATIVE(oldinstr, newinstr1, ft_flags1), newinstr2, ft_flags2)
/* If @feature is set, patch in @newinstr_yes, otherwise @newinstr_no. */
#define ALTERNATIVE_TERNARY(oldinstr, ft_flags, newinstr_yes, newinstr_no) \
- ALTERNATIVE_2(oldinstr, newinstr_no, X86_FEATURE_ALWAYS, \
- newinstr_yes, ft_flags)
-
-#define ALTERNATIVE_3(oldinsn, newinsn1, ft_flags1, newinsn2, ft_flags2, \
- newinsn3, ft_flags3) \
- OLDINSTR_3(oldinsn, 1, 2, 3) \
- ".pushsection .altinstructions,\"a\"\n" \
- ALTINSTR_ENTRY(ft_flags1, 1) \
- ALTINSTR_ENTRY(ft_flags2, 2) \
- ALTINSTR_ENTRY(ft_flags3, 3) \
- ".popsection\n" \
- ".pushsection .altinstr_replacement, \"ax\"\n" \
- ALTINSTR_REPLACEMENT(newinsn1, 1) \
- ALTINSTR_REPLACEMENT(newinsn2, 2) \
- ALTINSTR_REPLACEMENT(newinsn3, 3) \
- ".popsection\n"
+ ALTERNATIVE_2(oldinstr, newinstr_no, X86_FEATURE_ALWAYS, newinstr_yes, ft_flags)
+
+#define ALTERNATIVE_3(oldinstr, newinstr1, ft_flags1, newinstr2, ft_flags2, \
+ newinstr3, ft_flags3) \
+ ALTERNATIVE(ALTERNATIVE_2(oldinstr, newinstr1, ft_flags1, newinstr2, ft_flags2), \
+ newinstr3, ft_flags3)
/*
* Alternative instructions for different CPU types or capabilities.
@@ -266,14 +214,11 @@ static inline int alternatives_text_reserved(void *start, void *end)
* without volatile and memory clobber.
*/
#define alternative(oldinstr, newinstr, ft_flags) \
- asm_inline volatile (ALTERNATIVE(oldinstr, newinstr, ft_flags) : : : "memory")
+ asm_inline volatile(ALTERNATIVE(oldinstr, newinstr, ft_flags) : : : "memory")
#define alternative_2(oldinstr, newinstr1, ft_flags1, newinstr2, ft_flags2) \
asm_inline volatile(ALTERNATIVE_2(oldinstr, newinstr1, ft_flags1, newinstr2, ft_flags2) ::: "memory")
-#define alternative_ternary(oldinstr, ft_flags, newinstr_yes, newinstr_no) \
- asm_inline volatile(ALTERNATIVE_TERNARY(oldinstr, ft_flags, newinstr_yes, newinstr_no) ::: "memory")
-
/*
* Alternative inline assembly with input.
*
@@ -283,18 +228,28 @@ static inline int alternatives_text_reserved(void *start, void *end)
* Leaving an unused argument 0 to keep API compatibility.
*/
#define alternative_input(oldinstr, newinstr, ft_flags, input...) \
- asm_inline volatile (ALTERNATIVE(oldinstr, newinstr, ft_flags) \
+ asm_inline volatile(ALTERNATIVE(oldinstr, newinstr, ft_flags) \
: : "i" (0), ## input)
/* Like alternative_input, but with a single output argument */
#define alternative_io(oldinstr, newinstr, ft_flags, output, input...) \
- asm_inline volatile (ALTERNATIVE(oldinstr, newinstr, ft_flags) \
+ asm_inline volatile(ALTERNATIVE(oldinstr, newinstr, ft_flags) \
: output : "i" (0), ## input)
-/* Like alternative_io, but for replacing a direct call with another one. */
-#define alternative_call(oldfunc, newfunc, ft_flags, output, input...) \
- asm_inline volatile (ALTERNATIVE("call %c[old]", "call %c[new]", ft_flags) \
- : output : [old] "i" (oldfunc), [new] "i" (newfunc), ## input)
+/*
+ * Like alternative_io, but for replacing a direct call with another one.
+ *
+ * Use the %c operand modifier which is the generic way to print a bare
+ * constant expression with all syntax-specific punctuation omitted. %P
+ * is the x86-specific variant which can handle constants too, for
+ * historical reasons, but it should be used primarily for PIC
+ * references: i.e., if used for a function, it would add the PLT
+ * suffix.
+ */
+#define alternative_call(oldfunc, newfunc, ft_flags, output, input...) \
+ asm_inline volatile(ALTERNATIVE("call %c[old]", "call %c[new]", ft_flags) \
+ : ALT_OUTPUT_SP(output) \
+ : [old] "i" (oldfunc), [new] "i" (newfunc), ## input)
/*
* Like alternative_call, but there are two features and respective functions.
@@ -302,12 +257,12 @@ static inline int alternatives_text_reserved(void *start, void *end)
* Otherwise, if CPU has feature1, function1 is used.
* Otherwise, old function is used.
*/
-#define alternative_call_2(oldfunc, newfunc1, ft_flags1, newfunc2, ft_flags2, \
- output, input...) \
- asm_inline volatile (ALTERNATIVE_2("call %c[old]", "call %c[new1]", ft_flags1, \
- "call %c[new2]", ft_flags2) \
- : output, ASM_CALL_CONSTRAINT \
- : [old] "i" (oldfunc), [new1] "i" (newfunc1), \
+#define alternative_call_2(oldfunc, newfunc1, ft_flags1, newfunc2, ft_flags2, \
+ output, input...) \
+ asm_inline volatile(ALTERNATIVE_2("call %c[old]", "call %c[new1]", ft_flags1, \
+ "call %c[new2]", ft_flags2) \
+ : ALT_OUTPUT_SP(output) \
+ : [old] "i" (oldfunc), [new1] "i" (newfunc1), \
[new2] "i" (newfunc2), ## input)
/*
@@ -322,6 +277,8 @@ static inline int alternatives_text_reserved(void *start, void *end)
*/
#define ASM_NO_INPUT_CLOBBER(clbr...) "i" (0) : clbr
+#define ALT_OUTPUT_SP(...) ASM_CALL_CONSTRAINT, ## __VA_ARGS__
+
/* Macro for creating assembler functions avoiding any C magic. */
#define DEFINE_ASM_FUNC(func, instr, sec) \
asm (".pushsection " #sec ", \"ax\"\n" \
@@ -388,22 +345,23 @@ void nop_func(void);
* @newinstr. ".skip" directive takes care of proper instruction padding
* in case @newinstr is longer than @oldinstr.
*/
-.macro ALTERNATIVE oldinstr, newinstr, ft_flags
-140:
- \oldinstr
-141:
- .skip -(((144f-143f)-(141b-140b)) > 0) * ((144f-143f)-(141b-140b)),0x90
-142:
-
- .pushsection .altinstructions,"a"
- altinstr_entry 140b,143f,\ft_flags,142b-140b,144f-143f
- .popsection
+#define __ALTERNATIVE(oldinst, newinst, flag) \
+740: \
+ oldinst ; \
+741: \
+ .skip -(((744f-743f)-(741b-740b)) > 0) * ((744f-743f)-(741b-740b)),0x90 ;\
+742: \
+ .pushsection .altinstructions,"a" ; \
+ altinstr_entry 740b,743f,flag,742b-740b,744f-743f ; \
+ .popsection ; \
+ .pushsection .altinstr_replacement,"ax" ; \
+743: \
+ newinst ; \
+744: \
+ .popsection ;
- .pushsection .altinstr_replacement,"ax"
-143:
- \newinstr
-144:
- .popsection
+.macro ALTERNATIVE oldinstr, newinstr, ft_flags
+ __ALTERNATIVE(\oldinstr, \newinstr, \ft_flags)
.endm
#define old_len 141b-140b
@@ -412,65 +370,18 @@ void nop_func(void);
#define new_len3 146f-145f
/*
- * gas compatible max based on the idea from:
- * http://graphics.stanford.edu/~seander/bithacks.html#IntegerMinOrMax
- *
- * The additional "-" is needed because gas uses a "true" value of -1.
- */
-#define alt_max_2(a, b) ((a) ^ (((a) ^ (b)) & -(-((a) < (b)))))
-#define alt_max_3(a, b, c) (alt_max_2(alt_max_2(a, b), c))
-
-
-/*
* Same as ALTERNATIVE macro above but for two alternatives. If CPU
* has @feature1, it replaces @oldinstr with @newinstr1. If CPU has
* @feature2, it replaces @oldinstr with @feature2.
*/
.macro ALTERNATIVE_2 oldinstr, newinstr1, ft_flags1, newinstr2, ft_flags2
-140:
- \oldinstr
-141:
- .skip -((alt_max_2(new_len1, new_len2) - (old_len)) > 0) * \
- (alt_max_2(new_len1, new_len2) - (old_len)),0x90
-142:
-
- .pushsection .altinstructions,"a"
- altinstr_entry 140b,143f,\ft_flags1,142b-140b,144f-143f
- altinstr_entry 140b,144f,\ft_flags2,142b-140b,145f-144f
- .popsection
-
- .pushsection .altinstr_replacement,"ax"
-143:
- \newinstr1
-144:
- \newinstr2
-145:
- .popsection
+ __ALTERNATIVE(__ALTERNATIVE(\oldinstr, \newinstr1, \ft_flags1),
+ \newinstr2, \ft_flags2)
.endm
.macro ALTERNATIVE_3 oldinstr, newinstr1, ft_flags1, newinstr2, ft_flags2, newinstr3, ft_flags3
-140:
- \oldinstr
-141:
- .skip -((alt_max_3(new_len1, new_len2, new_len3) - (old_len)) > 0) * \
- (alt_max_3(new_len1, new_len2, new_len3) - (old_len)),0x90
-142:
-
- .pushsection .altinstructions,"a"
- altinstr_entry 140b,143f,\ft_flags1,142b-140b,144f-143f
- altinstr_entry 140b,144f,\ft_flags2,142b-140b,145f-144f
- altinstr_entry 140b,145f,\ft_flags3,142b-140b,146f-145f
- .popsection
-
- .pushsection .altinstr_replacement,"ax"
-143:
- \newinstr1
-144:
- \newinstr2
-145:
- \newinstr3
-146:
- .popsection
+ __ALTERNATIVE(ALTERNATIVE_2(\oldinstr, \newinstr1, \ft_flags1, \newinstr2, \ft_flags2),
+ \newinstr3, \ft_flags3)
.endm
/* If @feature is set, patch in @newinstr_yes, otherwise @newinstr_no. */
diff --git a/arch/x86/include/asm/amd_nb.h b/arch/x86/include/asm/amd_nb.h
index 5c37944c8a5e..6f3b6aef47ba 100644
--- a/arch/x86/include/asm/amd_nb.h
+++ b/arch/x86/include/asm/amd_nb.h
@@ -21,8 +21,8 @@ extern int amd_numa_init(void);
extern int amd_get_subcaches(int);
extern int amd_set_subcaches(int, unsigned long);
-extern int amd_smn_read(u16 node, u32 address, u32 *value);
-extern int amd_smn_write(u16 node, u32 address, u32 value);
+int __must_check amd_smn_read(u16 node, u32 address, u32 *value);
+int __must_check amd_smn_write(u16 node, u32 address, u32 value);
struct amd_l3_cache {
unsigned indices;
diff --git a/arch/x86/include/asm/cfi.h b/arch/x86/include/asm/cfi.h
index 7cd752557905..31d19c815f99 100644
--- a/arch/x86/include/asm/cfi.h
+++ b/arch/x86/include/asm/cfi.h
@@ -93,7 +93,7 @@
*
*/
enum cfi_mode {
- CFI_DEFAULT, /* FineIBT if hardware has IBT, otherwise kCFI */
+ CFI_AUTO, /* FineIBT if hardware has IBT, otherwise kCFI */
CFI_OFF, /* Taditional / IBT depending on .config */
CFI_KCFI, /* Optionally CALL_PADDING, IBT, RETPOLINE */
CFI_FINEIBT, /* see arch/x86/kernel/alternative.c */
diff --git a/arch/x86/include/asm/cmpxchg_32.h b/arch/x86/include/asm/cmpxchg_32.h
index ed2797f132ce..62cef2113ca7 100644
--- a/arch/x86/include/asm/cmpxchg_32.h
+++ b/arch/x86/include/asm/cmpxchg_32.h
@@ -93,10 +93,9 @@ static __always_inline bool __try_cmpxchg64_local(volatile u64 *ptr, u64 *oldp,
\
asm volatile(ALTERNATIVE(_lock_loc \
"call cmpxchg8b_emu", \
- _lock "cmpxchg8b %[ptr]", X86_FEATURE_CX8) \
- : [ptr] "+m" (*(_ptr)), \
- "+a" (o.low), "+d" (o.high) \
- : "b" (n.low), "c" (n.high), "S" (_ptr) \
+ _lock "cmpxchg8b %a[ptr]", X86_FEATURE_CX8) \
+ : "+a" (o.low), "+d" (o.high) \
+ : "b" (n.low), "c" (n.high), [ptr] "S" (_ptr) \
: "memory"); \
\
o.full; \
@@ -122,12 +121,11 @@ static __always_inline u64 arch_cmpxchg64_local(volatile u64 *ptr, u64 old, u64
\
asm volatile(ALTERNATIVE(_lock_loc \
"call cmpxchg8b_emu", \
- _lock "cmpxchg8b %[ptr]", X86_FEATURE_CX8) \
+ _lock "cmpxchg8b %a[ptr]", X86_FEATURE_CX8) \
CC_SET(e) \
: CC_OUT(e) (ret), \
- [ptr] "+m" (*(_ptr)), \
"+a" (o.low), "+d" (o.high) \
- : "b" (n.low), "c" (n.high), "S" (_ptr) \
+ : "b" (n.low), "c" (n.high), [ptr] "S" (_ptr) \
: "memory"); \
\
if (unlikely(!ret)) \
diff --git a/arch/x86/include/asm/cpu_device_id.h b/arch/x86/include/asm/cpu_device_id.h
index b6325ee30871..3831f612e89c 100644
--- a/arch/x86/include/asm/cpu_device_id.h
+++ b/arch/x86/include/asm/cpu_device_id.h
@@ -280,10 +280,10 @@ struct x86_cpu_desc {
u32 x86_microcode_rev;
};
-#define INTEL_CPU_DESC(model, stepping, revision) { \
- .x86_family = 6, \
- .x86_vendor = X86_VENDOR_INTEL, \
- .x86_model = (model), \
+#define INTEL_CPU_DESC(vfm, stepping, revision) { \
+ .x86_family = VFM_FAMILY(vfm), \
+ .x86_vendor = VFM_VENDOR(vfm), \
+ .x86_model = VFM_MODEL(vfm), \
.x86_stepping = (stepping), \
.x86_microcode_rev = (revision), \
}
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index 3c7434329661..dd4682857c12 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -18,170 +18,170 @@
/*
* Note: If the comment begins with a quoted string, that string is used
- * in /proc/cpuinfo instead of the macro name. If the string is "",
- * this feature bit is not displayed in /proc/cpuinfo at all.
+ * in /proc/cpuinfo instead of the macro name. Otherwise, this feature
+ * bit is not displayed in /proc/cpuinfo at all.
*
* When adding new features here that depend on other features,
* please update the table in kernel/cpu/cpuid-deps.c as well.
*/
/* Intel-defined CPU features, CPUID level 0x00000001 (EDX), word 0 */
-#define X86_FEATURE_FPU ( 0*32+ 0) /* Onboard FPU */
-#define X86_FEATURE_VME ( 0*32+ 1) /* Virtual Mode Extensions */
-#define X86_FEATURE_DE ( 0*32+ 2) /* Debugging Extensions */
-#define X86_FEATURE_PSE ( 0*32+ 3) /* Page Size Extensions */
-#define X86_FEATURE_TSC ( 0*32+ 4) /* Time Stamp Counter */
-#define X86_FEATURE_MSR ( 0*32+ 5) /* Model-Specific Registers */
-#define X86_FEATURE_PAE ( 0*32+ 6) /* Physical Address Extensions */
-#define X86_FEATURE_MCE ( 0*32+ 7) /* Machine Check Exception */
-#define X86_FEATURE_CX8 ( 0*32+ 8) /* CMPXCHG8 instruction */
-#define X86_FEATURE_APIC ( 0*32+ 9) /* Onboard APIC */
-#define X86_FEATURE_SEP ( 0*32+11) /* SYSENTER/SYSEXIT */
-#define X86_FEATURE_MTRR ( 0*32+12) /* Memory Type Range Registers */
-#define X86_FEATURE_PGE ( 0*32+13) /* Page Global Enable */
-#define X86_FEATURE_MCA ( 0*32+14) /* Machine Check Architecture */
-#define X86_FEATURE_CMOV ( 0*32+15) /* CMOV instructions (plus FCMOVcc, FCOMI with FPU) */
-#define X86_FEATURE_PAT ( 0*32+16) /* Page Attribute Table */
-#define X86_FEATURE_PSE36 ( 0*32+17) /* 36-bit PSEs */
-#define X86_FEATURE_PN ( 0*32+18) /* Processor serial number */
-#define X86_FEATURE_CLFLUSH ( 0*32+19) /* CLFLUSH instruction */
+#define X86_FEATURE_FPU ( 0*32+ 0) /* "fpu" Onboard FPU */
+#define X86_FEATURE_VME ( 0*32+ 1) /* "vme" Virtual Mode Extensions */
+#define X86_FEATURE_DE ( 0*32+ 2) /* "de" Debugging Extensions */
+#define X86_FEATURE_PSE ( 0*32+ 3) /* "pse" Page Size Extensions */
+#define X86_FEATURE_TSC ( 0*32+ 4) /* "tsc" Time Stamp Counter */
+#define X86_FEATURE_MSR ( 0*32+ 5) /* "msr" Model-Specific Registers */
+#define X86_FEATURE_PAE ( 0*32+ 6) /* "pae" Physical Address Extensions */
+#define X86_FEATURE_MCE ( 0*32+ 7) /* "mce" Machine Check Exception */
+#define X86_FEATURE_CX8 ( 0*32+ 8) /* "cx8" CMPXCHG8 instruction */
+#define X86_FEATURE_APIC ( 0*32+ 9) /* "apic" Onboard APIC */
+#define X86_FEATURE_SEP ( 0*32+11) /* "sep" SYSENTER/SYSEXIT */
+#define X86_FEATURE_MTRR ( 0*32+12) /* "mtrr" Memory Type Range Registers */
+#define X86_FEATURE_PGE ( 0*32+13) /* "pge" Page Global Enable */
+#define X86_FEATURE_MCA ( 0*32+14) /* "mca" Machine Check Architecture */
+#define X86_FEATURE_CMOV ( 0*32+15) /* "cmov" CMOV instructions (plus FCMOVcc, FCOMI with FPU) */
+#define X86_FEATURE_PAT ( 0*32+16) /* "pat" Page Attribute Table */
+#define X86_FEATURE_PSE36 ( 0*32+17) /* "pse36" 36-bit PSEs */
+#define X86_FEATURE_PN ( 0*32+18) /* "pn" Processor serial number */
+#define X86_FEATURE_CLFLUSH ( 0*32+19) /* "clflush" CLFLUSH instruction */
#define X86_FEATURE_DS ( 0*32+21) /* "dts" Debug Store */
-#define X86_FEATURE_ACPI ( 0*32+22) /* ACPI via MSR */
-#define X86_FEATURE_MMX ( 0*32+23) /* Multimedia Extensions */
-#define X86_FEATURE_FXSR ( 0*32+24) /* FXSAVE/FXRSTOR, CR4.OSFXSR */
+#define X86_FEATURE_ACPI ( 0*32+22) /* "acpi" ACPI via MSR */
+#define X86_FEATURE_MMX ( 0*32+23) /* "mmx" Multimedia Extensions */
+#define X86_FEATURE_FXSR ( 0*32+24) /* "fxsr" FXSAVE/FXRSTOR, CR4.OSFXSR */
#define X86_FEATURE_XMM ( 0*32+25) /* "sse" */
#define X86_FEATURE_XMM2 ( 0*32+26) /* "sse2" */
#define X86_FEATURE_SELFSNOOP ( 0*32+27) /* "ss" CPU self snoop */
-#define X86_FEATURE_HT ( 0*32+28) /* Hyper-Threading */
+#define X86_FEATURE_HT ( 0*32+28) /* "ht" Hyper-Threading */
#define X86_FEATURE_ACC ( 0*32+29) /* "tm" Automatic clock control */
-#define X86_FEATURE_IA64 ( 0*32+30) /* IA-64 processor */
-#define X86_FEATURE_PBE ( 0*32+31) /* Pending Break Enable */
+#define X86_FEATURE_IA64 ( 0*32+30) /* "ia64" IA-64 processor */
+#define X86_FEATURE_PBE ( 0*32+31) /* "pbe" Pending Break Enable */
/* AMD-defined CPU features, CPUID level 0x80000001, word 1 */
/* Don't duplicate feature flags which are redundant with Intel! */
-#define X86_FEATURE_SYSCALL ( 1*32+11) /* SYSCALL/SYSRET */
-#define X86_FEATURE_MP ( 1*32+19) /* MP Capable */
-#define X86_FEATURE_NX ( 1*32+20) /* Execute Disable */
-#define X86_FEATURE_MMXEXT ( 1*32+22) /* AMD MMX extensions */
-#define X86_FEATURE_FXSR_OPT ( 1*32+25) /* FXSAVE/FXRSTOR optimizations */
+#define X86_FEATURE_SYSCALL ( 1*32+11) /* "syscall" SYSCALL/SYSRET */
+#define X86_FEATURE_MP ( 1*32+19) /* "mp" MP Capable */
+#define X86_FEATURE_NX ( 1*32+20) /* "nx" Execute Disable */
+#define X86_FEATURE_MMXEXT ( 1*32+22) /* "mmxext" AMD MMX extensions */
+#define X86_FEATURE_FXSR_OPT ( 1*32+25) /* "fxsr_opt" FXSAVE/FXRSTOR optimizations */
#define X86_FEATURE_GBPAGES ( 1*32+26) /* "pdpe1gb" GB pages */
-#define X86_FEATURE_RDTSCP ( 1*32+27) /* RDTSCP */
-#define X86_FEATURE_LM ( 1*32+29) /* Long Mode (x86-64, 64-bit support) */
-#define X86_FEATURE_3DNOWEXT ( 1*32+30) /* AMD 3DNow extensions */
-#define X86_FEATURE_3DNOW ( 1*32+31) /* 3DNow */
+#define X86_FEATURE_RDTSCP ( 1*32+27) /* "rdtscp" RDTSCP */
+#define X86_FEATURE_LM ( 1*32+29) /* "lm" Long Mode (x86-64, 64-bit support) */
+#define X86_FEATURE_3DNOWEXT ( 1*32+30) /* "3dnowext" AMD 3DNow extensions */
+#define X86_FEATURE_3DNOW ( 1*32+31) /* "3dnow" 3DNow */
/* Transmeta-defined CPU features, CPUID level 0x80860001, word 2 */
-#define X86_FEATURE_RECOVERY ( 2*32+ 0) /* CPU in recovery mode */
-#define X86_FEATURE_LONGRUN ( 2*32+ 1) /* Longrun power control */
-#define X86_FEATURE_LRTI ( 2*32+ 3) /* LongRun table interface */
+#define X86_FEATURE_RECOVERY ( 2*32+ 0) /* "recovery" CPU in recovery mode */
+#define X86_FEATURE_LONGRUN ( 2*32+ 1) /* "longrun" Longrun power control */
+#define X86_FEATURE_LRTI ( 2*32+ 3) /* "lrti" LongRun table interface */
/* Other features, Linux-defined mapping, word 3 */
/* This range is used for feature bits which conflict or are synthesized */
-#define X86_FEATURE_CXMMX ( 3*32+ 0) /* Cyrix MMX extensions */
-#define X86_FEATURE_K6_MTRR ( 3*32+ 1) /* AMD K6 nonstandard MTRRs */
-#define X86_FEATURE_CYRIX_ARR ( 3*32+ 2) /* Cyrix ARRs (= MTRRs) */
-#define X86_FEATURE_CENTAUR_MCR ( 3*32+ 3) /* Centaur MCRs (= MTRRs) */
-#define X86_FEATURE_K8 ( 3*32+ 4) /* "" Opteron, Athlon64 */
-#define X86_FEATURE_ZEN5 ( 3*32+ 5) /* "" CPU based on Zen5 microarchitecture */
-#define X86_FEATURE_P3 ( 3*32+ 6) /* "" P3 */
-#define X86_FEATURE_P4 ( 3*32+ 7) /* "" P4 */
-#define X86_FEATURE_CONSTANT_TSC ( 3*32+ 8) /* TSC ticks at a constant rate */
-#define X86_FEATURE_UP ( 3*32+ 9) /* SMP kernel running on UP */
-#define X86_FEATURE_ART ( 3*32+10) /* Always running timer (ART) */
-#define X86_FEATURE_ARCH_PERFMON ( 3*32+11) /* Intel Architectural PerfMon */
-#define X86_FEATURE_PEBS ( 3*32+12) /* Precise-Event Based Sampling */
-#define X86_FEATURE_BTS ( 3*32+13) /* Branch Trace Store */
-#define X86_FEATURE_SYSCALL32 ( 3*32+14) /* "" syscall in IA32 userspace */
-#define X86_FEATURE_SYSENTER32 ( 3*32+15) /* "" sysenter in IA32 userspace */
-#define X86_FEATURE_REP_GOOD ( 3*32+16) /* REP microcode works well */
-#define X86_FEATURE_AMD_LBR_V2 ( 3*32+17) /* AMD Last Branch Record Extension Version 2 */
-#define X86_FEATURE_CLEAR_CPU_BUF ( 3*32+18) /* "" Clear CPU buffers using VERW */
-#define X86_FEATURE_ACC_POWER ( 3*32+19) /* AMD Accumulated Power Mechanism */
-#define X86_FEATURE_NOPL ( 3*32+20) /* The NOPL (0F 1F) instructions */
-#define X86_FEATURE_ALWAYS ( 3*32+21) /* "" Always-present feature */
-#define X86_FEATURE_XTOPOLOGY ( 3*32+22) /* CPU topology enum extensions */
-#define X86_FEATURE_TSC_RELIABLE ( 3*32+23) /* TSC is known to be reliable */
-#define X86_FEATURE_NONSTOP_TSC ( 3*32+24) /* TSC does not stop in C states */
-#define X86_FEATURE_CPUID ( 3*32+25) /* CPU has CPUID instruction itself */
-#define X86_FEATURE_EXTD_APICID ( 3*32+26) /* Extended APICID (8 bits) */
-#define X86_FEATURE_AMD_DCM ( 3*32+27) /* AMD multi-node processor */
-#define X86_FEATURE_APERFMPERF ( 3*32+28) /* P-State hardware coordination feedback capability (APERF/MPERF MSRs) */
-#define X86_FEATURE_RAPL ( 3*32+29) /* AMD/Hygon RAPL interface */
-#define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */
-#define X86_FEATURE_TSC_KNOWN_FREQ ( 3*32+31) /* TSC has known frequency */
+#define X86_FEATURE_CXMMX ( 3*32+ 0) /* "cxmmx" Cyrix MMX extensions */
+#define X86_FEATURE_K6_MTRR ( 3*32+ 1) /* "k6_mtrr" AMD K6 nonstandard MTRRs */
+#define X86_FEATURE_CYRIX_ARR ( 3*32+ 2) /* "cyrix_arr" Cyrix ARRs (= MTRRs) */
+#define X86_FEATURE_CENTAUR_MCR ( 3*32+ 3) /* "centaur_mcr" Centaur MCRs (= MTRRs) */
+#define X86_FEATURE_K8 ( 3*32+ 4) /* Opteron, Athlon64 */
+#define X86_FEATURE_ZEN5 ( 3*32+ 5) /* CPU based on Zen5 microarchitecture */
+#define X86_FEATURE_P3 ( 3*32+ 6) /* P3 */
+#define X86_FEATURE_P4 ( 3*32+ 7) /* P4 */
+#define X86_FEATURE_CONSTANT_TSC ( 3*32+ 8) /* "constant_tsc" TSC ticks at a constant rate */
+#define X86_FEATURE_UP ( 3*32+ 9) /* "up" SMP kernel running on UP */
+#define X86_FEATURE_ART ( 3*32+10) /* "art" Always running timer (ART) */
+#define X86_FEATURE_ARCH_PERFMON ( 3*32+11) /* "arch_perfmon" Intel Architectural PerfMon */
+#define X86_FEATURE_PEBS ( 3*32+12) /* "pebs" Precise-Event Based Sampling */
+#define X86_FEATURE_BTS ( 3*32+13) /* "bts" Branch Trace Store */
+#define X86_FEATURE_SYSCALL32 ( 3*32+14) /* syscall in IA32 userspace */
+#define X86_FEATURE_SYSENTER32 ( 3*32+15) /* sysenter in IA32 userspace */
+#define X86_FEATURE_REP_GOOD ( 3*32+16) /* "rep_good" REP microcode works well */
+#define X86_FEATURE_AMD_LBR_V2 ( 3*32+17) /* "amd_lbr_v2" AMD Last Branch Record Extension Version 2 */
+#define X86_FEATURE_CLEAR_CPU_BUF ( 3*32+18) /* Clear CPU buffers using VERW */
+#define X86_FEATURE_ACC_POWER ( 3*32+19) /* "acc_power" AMD Accumulated Power Mechanism */
+#define X86_FEATURE_NOPL ( 3*32+20) /* "nopl" The NOPL (0F 1F) instructions */
+#define X86_FEATURE_ALWAYS ( 3*32+21) /* Always-present feature */
+#define X86_FEATURE_XTOPOLOGY ( 3*32+22) /* "xtopology" CPU topology enum extensions */
+#define X86_FEATURE_TSC_RELIABLE ( 3*32+23) /* "tsc_reliable" TSC is known to be reliable */
+#define X86_FEATURE_NONSTOP_TSC ( 3*32+24) /* "nonstop_tsc" TSC does not stop in C states */
+#define X86_FEATURE_CPUID ( 3*32+25) /* "cpuid" CPU has CPUID instruction itself */
+#define X86_FEATURE_EXTD_APICID ( 3*32+26) /* "extd_apicid" Extended APICID (8 bits) */
+#define X86_FEATURE_AMD_DCM ( 3*32+27) /* "amd_dcm" AMD multi-node processor */
+#define X86_FEATURE_APERFMPERF ( 3*32+28) /* "aperfmperf" P-State hardware coordination feedback capability (APERF/MPERF MSRs) */
+#define X86_FEATURE_RAPL ( 3*32+29) /* "rapl" AMD/Hygon RAPL interface */
+#define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* "nonstop_tsc_s3" TSC doesn't stop in S3 state */
+#define X86_FEATURE_TSC_KNOWN_FREQ ( 3*32+31) /* "tsc_known_freq" TSC has known frequency */
/* Intel-defined CPU features, CPUID level 0x00000001 (ECX), word 4 */
#define X86_FEATURE_XMM3 ( 4*32+ 0) /* "pni" SSE-3 */
-#define X86_FEATURE_PCLMULQDQ ( 4*32+ 1) /* PCLMULQDQ instruction */
-#define X86_FEATURE_DTES64 ( 4*32+ 2) /* 64-bit Debug Store */
+#define X86_FEATURE_PCLMULQDQ ( 4*32+ 1) /* "pclmulqdq" PCLMULQDQ instruction */
+#define X86_FEATURE_DTES64 ( 4*32+ 2) /* "dtes64" 64-bit Debug Store */
#define X86_FEATURE_MWAIT ( 4*32+ 3) /* "monitor" MONITOR/MWAIT support */
#define X86_FEATURE_DSCPL ( 4*32+ 4) /* "ds_cpl" CPL-qualified (filtered) Debug Store */
-#define X86_FEATURE_VMX ( 4*32+ 5) /* Hardware virtualization */
-#define X86_FEATURE_SMX ( 4*32+ 6) /* Safer Mode eXtensions */
-#define X86_FEATURE_EST ( 4*32+ 7) /* Enhanced SpeedStep */
-#define X86_FEATURE_TM2 ( 4*32+ 8) /* Thermal Monitor 2 */
-#define X86_FEATURE_SSSE3 ( 4*32+ 9) /* Supplemental SSE-3 */
-#define X86_FEATURE_CID ( 4*32+10) /* Context ID */
-#define X86_FEATURE_SDBG ( 4*32+11) /* Silicon Debug */
-#define X86_FEATURE_FMA ( 4*32+12) /* Fused multiply-add */
-#define X86_FEATURE_CX16 ( 4*32+13) /* CMPXCHG16B instruction */
-#define X86_FEATURE_XTPR ( 4*32+14) /* Send Task Priority Messages */
-#define X86_FEATURE_PDCM ( 4*32+15) /* Perf/Debug Capabilities MSR */
-#define X86_FEATURE_PCID ( 4*32+17) /* Process Context Identifiers */
-#define X86_FEATURE_DCA ( 4*32+18) /* Direct Cache Access */
+#define X86_FEATURE_VMX ( 4*32+ 5) /* "vmx" Hardware virtualization */
+#define X86_FEATURE_SMX ( 4*32+ 6) /* "smx" Safer Mode eXtensions */
+#define X86_FEATURE_EST ( 4*32+ 7) /* "est" Enhanced SpeedStep */
+#define X86_FEATURE_TM2 ( 4*32+ 8) /* "tm2" Thermal Monitor 2 */
+#define X86_FEATURE_SSSE3 ( 4*32+ 9) /* "ssse3" Supplemental SSE-3 */
+#define X86_FEATURE_CID ( 4*32+10) /* "cid" Context ID */
+#define X86_FEATURE_SDBG ( 4*32+11) /* "sdbg" Silicon Debug */
+#define X86_FEATURE_FMA ( 4*32+12) /* "fma" Fused multiply-add */
+#define X86_FEATURE_CX16 ( 4*32+13) /* "cx16" CMPXCHG16B instruction */
+#define X86_FEATURE_XTPR ( 4*32+14) /* "xtpr" Send Task Priority Messages */
+#define X86_FEATURE_PDCM ( 4*32+15) /* "pdcm" Perf/Debug Capabilities MSR */
+#define X86_FEATURE_PCID ( 4*32+17) /* "pcid" Process Context Identifiers */
+#define X86_FEATURE_DCA ( 4*32+18) /* "dca" Direct Cache Access */
#define X86_FEATURE_XMM4_1 ( 4*32+19) /* "sse4_1" SSE-4.1 */
#define X86_FEATURE_XMM4_2 ( 4*32+20) /* "sse4_2" SSE-4.2 */
-#define X86_FEATURE_X2APIC ( 4*32+21) /* X2APIC */
-#define X86_FEATURE_MOVBE ( 4*32+22) /* MOVBE instruction */
-#define X86_FEATURE_POPCNT ( 4*32+23) /* POPCNT instruction */
-#define X86_FEATURE_TSC_DEADLINE_TIMER ( 4*32+24) /* TSC deadline timer */
-#define X86_FEATURE_AES ( 4*32+25) /* AES instructions */
-#define X86_FEATURE_XSAVE ( 4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV instructions */
-#define X86_FEATURE_OSXSAVE ( 4*32+27) /* "" XSAVE instruction enabled in the OS */
-#define X86_FEATURE_AVX ( 4*32+28) /* Advanced Vector Extensions */
-#define X86_FEATURE_F16C ( 4*32+29) /* 16-bit FP conversions */
-#define X86_FEATURE_RDRAND ( 4*32+30) /* RDRAND instruction */
-#define X86_FEATURE_HYPERVISOR ( 4*32+31) /* Running on a hypervisor */
+#define X86_FEATURE_X2APIC ( 4*32+21) /* "x2apic" X2APIC */
+#define X86_FEATURE_MOVBE ( 4*32+22) /* "movbe" MOVBE instruction */
+#define X86_FEATURE_POPCNT ( 4*32+23) /* "popcnt" POPCNT instruction */
+#define X86_FEATURE_TSC_DEADLINE_TIMER ( 4*32+24) /* "tsc_deadline_timer" TSC deadline timer */
+#define X86_FEATURE_AES ( 4*32+25) /* "aes" AES instructions */
+#define X86_FEATURE_XSAVE ( 4*32+26) /* "xsave" XSAVE/XRSTOR/XSETBV/XGETBV instructions */
+#define X86_FEATURE_OSXSAVE ( 4*32+27) /* XSAVE instruction enabled in the OS */
+#define X86_FEATURE_AVX ( 4*32+28) /* "avx" Advanced Vector Extensions */
+#define X86_FEATURE_F16C ( 4*32+29) /* "f16c" 16-bit FP conversions */
+#define X86_FEATURE_RDRAND ( 4*32+30) /* "rdrand" RDRAND instruction */
+#define X86_FEATURE_HYPERVISOR ( 4*32+31) /* "hypervisor" Running on a hypervisor */
/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */
#define X86_FEATURE_XSTORE ( 5*32+ 2) /* "rng" RNG present (xstore) */
#define X86_FEATURE_XSTORE_EN ( 5*32+ 3) /* "rng_en" RNG enabled */
#define X86_FEATURE_XCRYPT ( 5*32+ 6) /* "ace" on-CPU crypto (xcrypt) */
#define X86_FEATURE_XCRYPT_EN ( 5*32+ 7) /* "ace_en" on-CPU crypto enabled */
-#define X86_FEATURE_ACE2 ( 5*32+ 8) /* Advanced Cryptography Engine v2 */
-#define X86_FEATURE_ACE2_EN ( 5*32+ 9) /* ACE v2 enabled */
-#define X86_FEATURE_PHE ( 5*32+10) /* PadLock Hash Engine */
-#define X86_FEATURE_PHE_EN ( 5*32+11) /* PHE enabled */
-#define X86_FEATURE_PMM ( 5*32+12) /* PadLock Montgomery Multiplier */
-#define X86_FEATURE_PMM_EN ( 5*32+13) /* PMM enabled */
+#define X86_FEATURE_ACE2 ( 5*32+ 8) /* "ace2" Advanced Cryptography Engine v2 */
+#define X86_FEATURE_ACE2_EN ( 5*32+ 9) /* "ace2_en" ACE v2 enabled */
+#define X86_FEATURE_PHE ( 5*32+10) /* "phe" PadLock Hash Engine */
+#define X86_FEATURE_PHE_EN ( 5*32+11) /* "phe_en" PHE enabled */
+#define X86_FEATURE_PMM ( 5*32+12) /* "pmm" PadLock Montgomery Multiplier */
+#define X86_FEATURE_PMM_EN ( 5*32+13) /* "pmm_en" PMM enabled */
/* More extended AMD flags: CPUID level 0x80000001, ECX, word 6 */
-#define X86_FEATURE_LAHF_LM ( 6*32+ 0) /* LAHF/SAHF in long mode */
-#define X86_FEATURE_CMP_LEGACY ( 6*32+ 1) /* If yes HyperThreading not valid */
-#define X86_FEATURE_SVM ( 6*32+ 2) /* Secure Virtual Machine */
-#define X86_FEATURE_EXTAPIC ( 6*32+ 3) /* Extended APIC space */
-#define X86_FEATURE_CR8_LEGACY ( 6*32+ 4) /* CR8 in 32-bit mode */
-#define X86_FEATURE_ABM ( 6*32+ 5) /* Advanced bit manipulation */
-#define X86_FEATURE_SSE4A ( 6*32+ 6) /* SSE-4A */
-#define X86_FEATURE_MISALIGNSSE ( 6*32+ 7) /* Misaligned SSE mode */
-#define X86_FEATURE_3DNOWPREFETCH ( 6*32+ 8) /* 3DNow prefetch instructions */
-#define X86_FEATURE_OSVW ( 6*32+ 9) /* OS Visible Workaround */
-#define X86_FEATURE_IBS ( 6*32+10) /* Instruction Based Sampling */
-#define X86_FEATURE_XOP ( 6*32+11) /* extended AVX instructions */
-#define X86_FEATURE_SKINIT ( 6*32+12) /* SKINIT/STGI instructions */
-#define X86_FEATURE_WDT ( 6*32+13) /* Watchdog timer */
-#define X86_FEATURE_LWP ( 6*32+15) /* Light Weight Profiling */
-#define X86_FEATURE_FMA4 ( 6*32+16) /* 4 operands MAC instructions */
-#define X86_FEATURE_TCE ( 6*32+17) /* Translation Cache Extension */
-#define X86_FEATURE_NODEID_MSR ( 6*32+19) /* NodeId MSR */
-#define X86_FEATURE_TBM ( 6*32+21) /* Trailing Bit Manipulations */
-#define X86_FEATURE_TOPOEXT ( 6*32+22) /* Topology extensions CPUID leafs */
-#define X86_FEATURE_PERFCTR_CORE ( 6*32+23) /* Core performance counter extensions */
-#define X86_FEATURE_PERFCTR_NB ( 6*32+24) /* NB performance counter extensions */
-#define X86_FEATURE_BPEXT ( 6*32+26) /* Data breakpoint extension */
-#define X86_FEATURE_PTSC ( 6*32+27) /* Performance time-stamp counter */
-#define X86_FEATURE_PERFCTR_LLC ( 6*32+28) /* Last Level Cache performance counter extensions */
-#define X86_FEATURE_MWAITX ( 6*32+29) /* MWAIT extension (MONITORX/MWAITX instructions) */
+#define X86_FEATURE_LAHF_LM ( 6*32+ 0) /* "lahf_lm" LAHF/SAHF in long mode */
+#define X86_FEATURE_CMP_LEGACY ( 6*32+ 1) /* "cmp_legacy" If yes HyperThreading not valid */
+#define X86_FEATURE_SVM ( 6*32+ 2) /* "svm" Secure Virtual Machine */
+#define X86_FEATURE_EXTAPIC ( 6*32+ 3) /* "extapic" Extended APIC space */
+#define X86_FEATURE_CR8_LEGACY ( 6*32+ 4) /* "cr8_legacy" CR8 in 32-bit mode */
+#define X86_FEATURE_ABM ( 6*32+ 5) /* "abm" Advanced bit manipulation */
+#define X86_FEATURE_SSE4A ( 6*32+ 6) /* "sse4a" SSE-4A */
+#define X86_FEATURE_MISALIGNSSE ( 6*32+ 7) /* "misalignsse" Misaligned SSE mode */
+#define X86_FEATURE_3DNOWPREFETCH ( 6*32+ 8) /* "3dnowprefetch" 3DNow prefetch instructions */
+#define X86_FEATURE_OSVW ( 6*32+ 9) /* "osvw" OS Visible Workaround */
+#define X86_FEATURE_IBS ( 6*32+10) /* "ibs" Instruction Based Sampling */
+#define X86_FEATURE_XOP ( 6*32+11) /* "xop" Extended AVX instructions */
+#define X86_FEATURE_SKINIT ( 6*32+12) /* "skinit" SKINIT/STGI instructions */
+#define X86_FEATURE_WDT ( 6*32+13) /* "wdt" Watchdog timer */
+#define X86_FEATURE_LWP ( 6*32+15) /* "lwp" Light Weight Profiling */
+#define X86_FEATURE_FMA4 ( 6*32+16) /* "fma4" 4 operands MAC instructions */
+#define X86_FEATURE_TCE ( 6*32+17) /* "tce" Translation Cache Extension */
+#define X86_FEATURE_NODEID_MSR ( 6*32+19) /* "nodeid_msr" NodeId MSR */
+#define X86_FEATURE_TBM ( 6*32+21) /* "tbm" Trailing Bit Manipulations */
+#define X86_FEATURE_TOPOEXT ( 6*32+22) /* "topoext" Topology extensions CPUID leafs */
+#define X86_FEATURE_PERFCTR_CORE ( 6*32+23) /* "perfctr_core" Core performance counter extensions */
+#define X86_FEATURE_PERFCTR_NB ( 6*32+24) /* "perfctr_nb" NB performance counter extensions */
+#define X86_FEATURE_BPEXT ( 6*32+26) /* "bpext" Data breakpoint extension */
+#define X86_FEATURE_PTSC ( 6*32+27) /* "ptsc" Performance time-stamp counter */
+#define X86_FEATURE_PERFCTR_LLC ( 6*32+28) /* "perfctr_llc" Last Level Cache performance counter extensions */
+#define X86_FEATURE_MWAITX ( 6*32+29) /* "mwaitx" MWAIT extension (MONITORX/MWAITX instructions) */
/*
* Auxiliary flags: Linux defined - For features scattered in various
@@ -189,93 +189,93 @@
*
* Reuse free bits when adding new feature flags!
*/
-#define X86_FEATURE_RING3MWAIT ( 7*32+ 0) /* Ring 3 MONITOR/MWAIT instructions */
-#define X86_FEATURE_CPUID_FAULT ( 7*32+ 1) /* Intel CPUID faulting */
-#define X86_FEATURE_CPB ( 7*32+ 2) /* AMD Core Performance Boost */
-#define X86_FEATURE_EPB ( 7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */
-#define X86_FEATURE_CAT_L3 ( 7*32+ 4) /* Cache Allocation Technology L3 */
-#define X86_FEATURE_CAT_L2 ( 7*32+ 5) /* Cache Allocation Technology L2 */
-#define X86_FEATURE_CDP_L3 ( 7*32+ 6) /* Code and Data Prioritization L3 */
-#define X86_FEATURE_TDX_HOST_PLATFORM ( 7*32+ 7) /* Platform supports being a TDX host */
-#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */
-#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
-#define X86_FEATURE_XCOMPACTED ( 7*32+10) /* "" Use compacted XSTATE (XSAVES or XSAVEC) */
-#define X86_FEATURE_PTI ( 7*32+11) /* Kernel Page Table Isolation enabled */
-#define X86_FEATURE_KERNEL_IBRS ( 7*32+12) /* "" Set/clear IBRS on kernel entry/exit */
-#define X86_FEATURE_RSB_VMEXIT ( 7*32+13) /* "" Fill RSB on VM-Exit */
-#define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */
-#define X86_FEATURE_CDP_L2 ( 7*32+15) /* Code and Data Prioritization L2 */
-#define X86_FEATURE_MSR_SPEC_CTRL ( 7*32+16) /* "" MSR SPEC_CTRL is implemented */
-#define X86_FEATURE_SSBD ( 7*32+17) /* Speculative Store Bypass Disable */
-#define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */
-#define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* "" Fill RSB on context switches */
-#define X86_FEATURE_PERFMON_V2 ( 7*32+20) /* AMD Performance Monitoring Version 2 */
-#define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */
-#define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */
-#define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23) /* "" Disable Speculative Store Bypass. */
-#define X86_FEATURE_LS_CFG_SSBD ( 7*32+24) /* "" AMD SSBD implementation via LS_CFG MSR */
-#define X86_FEATURE_IBRS ( 7*32+25) /* Indirect Branch Restricted Speculation */
-#define X86_FEATURE_IBPB ( 7*32+26) /* Indirect Branch Prediction Barrier */
-#define X86_FEATURE_STIBP ( 7*32+27) /* Single Thread Indirect Branch Predictors */
-#define X86_FEATURE_ZEN ( 7*32+28) /* "" Generic flag for all Zen and newer */
-#define X86_FEATURE_L1TF_PTEINV ( 7*32+29) /* "" L1TF workaround PTE inversion */
-#define X86_FEATURE_IBRS_ENHANCED ( 7*32+30) /* Enhanced IBRS */
-#define X86_FEATURE_MSR_IA32_FEAT_CTL ( 7*32+31) /* "" MSR IA32_FEAT_CTL configured */
+#define X86_FEATURE_RING3MWAIT ( 7*32+ 0) /* "ring3mwait" Ring 3 MONITOR/MWAIT instructions */
+#define X86_FEATURE_CPUID_FAULT ( 7*32+ 1) /* "cpuid_fault" Intel CPUID faulting */
+#define X86_FEATURE_CPB ( 7*32+ 2) /* "cpb" AMD Core Performance Boost */
+#define X86_FEATURE_EPB ( 7*32+ 3) /* "epb" IA32_ENERGY_PERF_BIAS support */
+#define X86_FEATURE_CAT_L3 ( 7*32+ 4) /* "cat_l3" Cache Allocation Technology L3 */
+#define X86_FEATURE_CAT_L2 ( 7*32+ 5) /* "cat_l2" Cache Allocation Technology L2 */
+#define X86_FEATURE_CDP_L3 ( 7*32+ 6) /* "cdp_l3" Code and Data Prioritization L3 */
+#define X86_FEATURE_TDX_HOST_PLATFORM ( 7*32+ 7) /* "tdx_host_platform" Platform supports being a TDX host */
+#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* "hw_pstate" AMD HW-PState */
+#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* "proc_feedback" AMD ProcFeedbackInterface */
+#define X86_FEATURE_XCOMPACTED ( 7*32+10) /* Use compacted XSTATE (XSAVES or XSAVEC) */
+#define X86_FEATURE_PTI ( 7*32+11) /* "pti" Kernel Page Table Isolation enabled */
+#define X86_FEATURE_KERNEL_IBRS ( 7*32+12) /* Set/clear IBRS on kernel entry/exit */
+#define X86_FEATURE_RSB_VMEXIT ( 7*32+13) /* Fill RSB on VM-Exit */
+#define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* "intel_ppin" Intel Processor Inventory Number */
+#define X86_FEATURE_CDP_L2 ( 7*32+15) /* "cdp_l2" Code and Data Prioritization L2 */
+#define X86_FEATURE_MSR_SPEC_CTRL ( 7*32+16) /* MSR SPEC_CTRL is implemented */
+#define X86_FEATURE_SSBD ( 7*32+17) /* "ssbd" Speculative Store Bypass Disable */
+#define X86_FEATURE_MBA ( 7*32+18) /* "mba" Memory Bandwidth Allocation */
+#define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* Fill RSB on context switches */
+#define X86_FEATURE_PERFMON_V2 ( 7*32+20) /* "perfmon_v2" AMD Performance Monitoring Version 2 */
+#define X86_FEATURE_USE_IBPB ( 7*32+21) /* Indirect Branch Prediction Barrier enabled */
+#define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* Use IBRS during runtime firmware calls */
+#define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23) /* Disable Speculative Store Bypass. */
+#define X86_FEATURE_LS_CFG_SSBD ( 7*32+24) /* AMD SSBD implementation via LS_CFG MSR */
+#define X86_FEATURE_IBRS ( 7*32+25) /* "ibrs" Indirect Branch Restricted Speculation */
+#define X86_FEATURE_IBPB ( 7*32+26) /* "ibpb" Indirect Branch Prediction Barrier */
+#define X86_FEATURE_STIBP ( 7*32+27) /* "stibp" Single Thread Indirect Branch Predictors */
+#define X86_FEATURE_ZEN ( 7*32+28) /* Generic flag for all Zen and newer */
+#define X86_FEATURE_L1TF_PTEINV ( 7*32+29) /* L1TF workaround PTE inversion */
+#define X86_FEATURE_IBRS_ENHANCED ( 7*32+30) /* "ibrs_enhanced" Enhanced IBRS */
+#define X86_FEATURE_MSR_IA32_FEAT_CTL ( 7*32+31) /* MSR IA32_FEAT_CTL configured */
/* Virtualization flags: Linux defined, word 8 */
-#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
-#define X86_FEATURE_FLEXPRIORITY ( 8*32+ 1) /* Intel FlexPriority */
-#define X86_FEATURE_EPT ( 8*32+ 2) /* Intel Extended Page Table */
-#define X86_FEATURE_VPID ( 8*32+ 3) /* Intel Virtual Processor ID */
+#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* "tpr_shadow" Intel TPR Shadow */
+#define X86_FEATURE_FLEXPRIORITY ( 8*32+ 1) /* "flexpriority" Intel FlexPriority */
+#define X86_FEATURE_EPT ( 8*32+ 2) /* "ept" Intel Extended Page Table */
+#define X86_FEATURE_VPID ( 8*32+ 3) /* "vpid" Intel Virtual Processor ID */
-#define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer VMMCALL to VMCALL */
-#define X86_FEATURE_XENPV ( 8*32+16) /* "" Xen paravirtual guest */
-#define X86_FEATURE_EPT_AD ( 8*32+17) /* Intel Extended Page Table access-dirty bit */
-#define X86_FEATURE_VMCALL ( 8*32+18) /* "" Hypervisor supports the VMCALL instruction */
-#define X86_FEATURE_VMW_VMMCALL ( 8*32+19) /* "" VMware prefers VMMCALL hypercall instruction */
-#define X86_FEATURE_PVUNLOCK ( 8*32+20) /* "" PV unlock function */
-#define X86_FEATURE_VCPUPREEMPT ( 8*32+21) /* "" PV vcpu_is_preempted function */
-#define X86_FEATURE_TDX_GUEST ( 8*32+22) /* Intel Trust Domain Extensions Guest */
+#define X86_FEATURE_VMMCALL ( 8*32+15) /* "vmmcall" Prefer VMMCALL to VMCALL */
+#define X86_FEATURE_XENPV ( 8*32+16) /* Xen paravirtual guest */
+#define X86_FEATURE_EPT_AD ( 8*32+17) /* "ept_ad" Intel Extended Page Table access-dirty bit */
+#define X86_FEATURE_VMCALL ( 8*32+18) /* Hypervisor supports the VMCALL instruction */
+#define X86_FEATURE_VMW_VMMCALL ( 8*32+19) /* VMware prefers VMMCALL hypercall instruction */
+#define X86_FEATURE_PVUNLOCK ( 8*32+20) /* PV unlock function */
+#define X86_FEATURE_VCPUPREEMPT ( 8*32+21) /* PV vcpu_is_preempted function */
+#define X86_FEATURE_TDX_GUEST ( 8*32+22) /* "tdx_guest" Intel Trust Domain Extensions Guest */
/* Intel-defined CPU features, CPUID level 0x00000007:0 (EBX), word 9 */
-#define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* RDFSBASE, WRFSBASE, RDGSBASE, WRGSBASE instructions*/
-#define X86_FEATURE_TSC_ADJUST ( 9*32+ 1) /* TSC adjustment MSR 0x3B */
-#define X86_FEATURE_SGX ( 9*32+ 2) /* Software Guard Extensions */
-#define X86_FEATURE_BMI1 ( 9*32+ 3) /* 1st group bit manipulation extensions */
-#define X86_FEATURE_HLE ( 9*32+ 4) /* Hardware Lock Elision */
-#define X86_FEATURE_AVX2 ( 9*32+ 5) /* AVX2 instructions */
-#define X86_FEATURE_FDP_EXCPTN_ONLY ( 9*32+ 6) /* "" FPU data pointer updated only on x87 exceptions */
-#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Protection */
-#define X86_FEATURE_BMI2 ( 9*32+ 8) /* 2nd group bit manipulation extensions */
-#define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB instructions */
-#define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */
-#define X86_FEATURE_RTM ( 9*32+11) /* Restricted Transactional Memory */
-#define X86_FEATURE_CQM ( 9*32+12) /* Cache QoS Monitoring */
-#define X86_FEATURE_ZERO_FCS_FDS ( 9*32+13) /* "" Zero out FPU CS and FPU DS */
-#define X86_FEATURE_MPX ( 9*32+14) /* Memory Protection Extension */
-#define X86_FEATURE_RDT_A ( 9*32+15) /* Resource Director Technology Allocation */
-#define X86_FEATURE_AVX512F ( 9*32+16) /* AVX-512 Foundation */
-#define X86_FEATURE_AVX512DQ ( 9*32+17) /* AVX-512 DQ (Double/Quad granular) Instructions */
-#define X86_FEATURE_RDSEED ( 9*32+18) /* RDSEED instruction */
-#define X86_FEATURE_ADX ( 9*32+19) /* ADCX and ADOX instructions */
-#define X86_FEATURE_SMAP ( 9*32+20) /* Supervisor Mode Access Prevention */
-#define X86_FEATURE_AVX512IFMA ( 9*32+21) /* AVX-512 Integer Fused Multiply-Add instructions */
-#define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */
-#define X86_FEATURE_CLWB ( 9*32+24) /* CLWB instruction */
-#define X86_FEATURE_INTEL_PT ( 9*32+25) /* Intel Processor Trace */
-#define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */
-#define X86_FEATURE_AVX512ER ( 9*32+27) /* AVX-512 Exponential and Reciprocal */
-#define X86_FEATURE_AVX512CD ( 9*32+28) /* AVX-512 Conflict Detection */
-#define X86_FEATURE_SHA_NI ( 9*32+29) /* SHA1/SHA256 Instruction Extensions */
-#define X86_FEATURE_AVX512BW ( 9*32+30) /* AVX-512 BW (Byte/Word granular) Instructions */
-#define X86_FEATURE_AVX512VL ( 9*32+31) /* AVX-512 VL (128/256 Vector Length) Extensions */
+#define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* "fsgsbase" RDFSBASE, WRFSBASE, RDGSBASE, WRGSBASE instructions*/
+#define X86_FEATURE_TSC_ADJUST ( 9*32+ 1) /* "tsc_adjust" TSC adjustment MSR 0x3B */
+#define X86_FEATURE_SGX ( 9*32+ 2) /* "sgx" Software Guard Extensions */
+#define X86_FEATURE_BMI1 ( 9*32+ 3) /* "bmi1" 1st group bit manipulation extensions */
+#define X86_FEATURE_HLE ( 9*32+ 4) /* "hle" Hardware Lock Elision */
+#define X86_FEATURE_AVX2 ( 9*32+ 5) /* "avx2" AVX2 instructions */
+#define X86_FEATURE_FDP_EXCPTN_ONLY ( 9*32+ 6) /* FPU data pointer updated only on x87 exceptions */
+#define X86_FEATURE_SMEP ( 9*32+ 7) /* "smep" Supervisor Mode Execution Protection */
+#define X86_FEATURE_BMI2 ( 9*32+ 8) /* "bmi2" 2nd group bit manipulation extensions */
+#define X86_FEATURE_ERMS ( 9*32+ 9) /* "erms" Enhanced REP MOVSB/STOSB instructions */
+#define X86_FEATURE_INVPCID ( 9*32+10) /* "invpcid" Invalidate Processor Context ID */
+#define X86_FEATURE_RTM ( 9*32+11) /* "rtm" Restricted Transactional Memory */
+#define X86_FEATURE_CQM ( 9*32+12) /* "cqm" Cache QoS Monitoring */
+#define X86_FEATURE_ZERO_FCS_FDS ( 9*32+13) /* Zero out FPU CS and FPU DS */
+#define X86_FEATURE_MPX ( 9*32+14) /* "mpx" Memory Protection Extension */
+#define X86_FEATURE_RDT_A ( 9*32+15) /* "rdt_a" Resource Director Technology Allocation */
+#define X86_FEATURE_AVX512F ( 9*32+16) /* "avx512f" AVX-512 Foundation */
+#define X86_FEATURE_AVX512DQ ( 9*32+17) /* "avx512dq" AVX-512 DQ (Double/Quad granular) Instructions */
+#define X86_FEATURE_RDSEED ( 9*32+18) /* "rdseed" RDSEED instruction */
+#define X86_FEATURE_ADX ( 9*32+19) /* "adx" ADCX and ADOX instructions */
+#define X86_FEATURE_SMAP ( 9*32+20) /* "smap" Supervisor Mode Access Prevention */
+#define X86_FEATURE_AVX512IFMA ( 9*32+21) /* "avx512ifma" AVX-512 Integer Fused Multiply-Add instructions */
+#define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* "clflushopt" CLFLUSHOPT instruction */
+#define X86_FEATURE_CLWB ( 9*32+24) /* "clwb" CLWB instruction */
+#define X86_FEATURE_INTEL_PT ( 9*32+25) /* "intel_pt" Intel Processor Trace */
+#define X86_FEATURE_AVX512PF ( 9*32+26) /* "avx512pf" AVX-512 Prefetch */
+#define X86_FEATURE_AVX512ER ( 9*32+27) /* "avx512er" AVX-512 Exponential and Reciprocal */
+#define X86_FEATURE_AVX512CD ( 9*32+28) /* "avx512cd" AVX-512 Conflict Detection */
+#define X86_FEATURE_SHA_NI ( 9*32+29) /* "sha_ni" SHA1/SHA256 Instruction Extensions */
+#define X86_FEATURE_AVX512BW ( 9*32+30) /* "avx512bw" AVX-512 BW (Byte/Word granular) Instructions */
+#define X86_FEATURE_AVX512VL ( 9*32+31) /* "avx512vl" AVX-512 VL (128/256 Vector Length) Extensions */
/* Extended state features, CPUID level 0x0000000d:1 (EAX), word 10 */
-#define X86_FEATURE_XSAVEOPT (10*32+ 0) /* XSAVEOPT instruction */
-#define X86_FEATURE_XSAVEC (10*32+ 1) /* XSAVEC instruction */
-#define X86_FEATURE_XGETBV1 (10*32+ 2) /* XGETBV with ECX = 1 instruction */
-#define X86_FEATURE_XSAVES (10*32+ 3) /* XSAVES/XRSTORS instructions */
-#define X86_FEATURE_XFD (10*32+ 4) /* "" eXtended Feature Disabling */
+#define X86_FEATURE_XSAVEOPT (10*32+ 0) /* "xsaveopt" XSAVEOPT instruction */
+#define X86_FEATURE_XSAVEC (10*32+ 1) /* "xsavec" XSAVEC instruction */
+#define X86_FEATURE_XGETBV1 (10*32+ 2) /* "xgetbv1" XGETBV with ECX = 1 instruction */
+#define X86_FEATURE_XSAVES (10*32+ 3) /* "xsaves" XSAVES/XRSTORS instructions */
+#define X86_FEATURE_XFD (10*32+ 4) /* eXtended Feature Disabling */
/*
* Extended auxiliary flags: Linux defined - for features scattered in various
@@ -283,181 +283,183 @@
*
* Reuse free bits when adding new feature flags!
*/
-#define X86_FEATURE_CQM_LLC (11*32+ 0) /* LLC QoS if 1 */
-#define X86_FEATURE_CQM_OCCUP_LLC (11*32+ 1) /* LLC occupancy monitoring */
-#define X86_FEATURE_CQM_MBM_TOTAL (11*32+ 2) /* LLC Total MBM monitoring */
-#define X86_FEATURE_CQM_MBM_LOCAL (11*32+ 3) /* LLC Local MBM monitoring */
-#define X86_FEATURE_FENCE_SWAPGS_USER (11*32+ 4) /* "" LFENCE in user entry SWAPGS path */
-#define X86_FEATURE_FENCE_SWAPGS_KERNEL (11*32+ 5) /* "" LFENCE in kernel entry SWAPGS path */
-#define X86_FEATURE_SPLIT_LOCK_DETECT (11*32+ 6) /* #AC for split lock */
-#define X86_FEATURE_PER_THREAD_MBA (11*32+ 7) /* "" Per-thread Memory Bandwidth Allocation */
-#define X86_FEATURE_SGX1 (11*32+ 8) /* "" Basic SGX */
-#define X86_FEATURE_SGX2 (11*32+ 9) /* "" SGX Enclave Dynamic Memory Management (EDMM) */
-#define X86_FEATURE_ENTRY_IBPB (11*32+10) /* "" Issue an IBPB on kernel entry */
-#define X86_FEATURE_RRSBA_CTRL (11*32+11) /* "" RET prediction control */
-#define X86_FEATURE_RETPOLINE (11*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */
-#define X86_FEATURE_RETPOLINE_LFENCE (11*32+13) /* "" Use LFENCE for Spectre variant 2 */
-#define X86_FEATURE_RETHUNK (11*32+14) /* "" Use REturn THUNK */
-#define X86_FEATURE_UNRET (11*32+15) /* "" AMD BTB untrain return */
-#define X86_FEATURE_USE_IBPB_FW (11*32+16) /* "" Use IBPB during runtime firmware calls */
-#define X86_FEATURE_RSB_VMEXIT_LITE (11*32+17) /* "" Fill RSB on VM exit when EIBRS is enabled */
-#define X86_FEATURE_SGX_EDECCSSA (11*32+18) /* "" SGX EDECCSSA user leaf function */
-#define X86_FEATURE_CALL_DEPTH (11*32+19) /* "" Call depth tracking for RSB stuffing */
-#define X86_FEATURE_MSR_TSX_CTRL (11*32+20) /* "" MSR IA32_TSX_CTRL (Intel) implemented */
-#define X86_FEATURE_SMBA (11*32+21) /* "" Slow Memory Bandwidth Allocation */
-#define X86_FEATURE_BMEC (11*32+22) /* "" Bandwidth Monitoring Event Configuration */
-#define X86_FEATURE_USER_SHSTK (11*32+23) /* Shadow stack support for user mode applications */
-#define X86_FEATURE_SRSO (11*32+24) /* "" AMD BTB untrain RETs */
-#define X86_FEATURE_SRSO_ALIAS (11*32+25) /* "" AMD BTB untrain RETs through aliasing */
-#define X86_FEATURE_IBPB_ON_VMEXIT (11*32+26) /* "" Issue an IBPB only on VMEXIT */
-#define X86_FEATURE_APIC_MSRS_FENCE (11*32+27) /* "" IA32_TSC_DEADLINE and X2APIC MSRs need fencing */
-#define X86_FEATURE_ZEN2 (11*32+28) /* "" CPU based on Zen2 microarchitecture */
-#define X86_FEATURE_ZEN3 (11*32+29) /* "" CPU based on Zen3 microarchitecture */
-#define X86_FEATURE_ZEN4 (11*32+30) /* "" CPU based on Zen4 microarchitecture */
-#define X86_FEATURE_ZEN1 (11*32+31) /* "" CPU based on Zen1 microarchitecture */
+#define X86_FEATURE_CQM_LLC (11*32+ 0) /* "cqm_llc" LLC QoS if 1 */
+#define X86_FEATURE_CQM_OCCUP_LLC (11*32+ 1) /* "cqm_occup_llc" LLC occupancy monitoring */
+#define X86_FEATURE_CQM_MBM_TOTAL (11*32+ 2) /* "cqm_mbm_total" LLC Total MBM monitoring */
+#define X86_FEATURE_CQM_MBM_LOCAL (11*32+ 3) /* "cqm_mbm_local" LLC Local MBM monitoring */
+#define X86_FEATURE_FENCE_SWAPGS_USER (11*32+ 4) /* LFENCE in user entry SWAPGS path */
+#define X86_FEATURE_FENCE_SWAPGS_KERNEL (11*32+ 5) /* LFENCE in kernel entry SWAPGS path */
+#define X86_FEATURE_SPLIT_LOCK_DETECT (11*32+ 6) /* "split_lock_detect" #AC for split lock */
+#define X86_FEATURE_PER_THREAD_MBA (11*32+ 7) /* Per-thread Memory Bandwidth Allocation */
+#define X86_FEATURE_SGX1 (11*32+ 8) /* Basic SGX */
+#define X86_FEATURE_SGX2 (11*32+ 9) /* SGX Enclave Dynamic Memory Management (EDMM) */
+#define X86_FEATURE_ENTRY_IBPB (11*32+10) /* Issue an IBPB on kernel entry */
+#define X86_FEATURE_RRSBA_CTRL (11*32+11) /* RET prediction control */
+#define X86_FEATURE_RETPOLINE (11*32+12) /* Generic Retpoline mitigation for Spectre variant 2 */
+#define X86_FEATURE_RETPOLINE_LFENCE (11*32+13) /* Use LFENCE for Spectre variant 2 */
+#define X86_FEATURE_RETHUNK (11*32+14) /* Use REturn THUNK */
+#define X86_FEATURE_UNRET (11*32+15) /* AMD BTB untrain return */
+#define X86_FEATURE_USE_IBPB_FW (11*32+16) /* Use IBPB during runtime firmware calls */
+#define X86_FEATURE_RSB_VMEXIT_LITE (11*32+17) /* Fill RSB on VM exit when EIBRS is enabled */
+#define X86_FEATURE_SGX_EDECCSSA (11*32+18) /* SGX EDECCSSA user leaf function */
+#define X86_FEATURE_CALL_DEPTH (11*32+19) /* Call depth tracking for RSB stuffing */
+#define X86_FEATURE_MSR_TSX_CTRL (11*32+20) /* MSR IA32_TSX_CTRL (Intel) implemented */
+#define X86_FEATURE_SMBA (11*32+21) /* Slow Memory Bandwidth Allocation */
+#define X86_FEATURE_BMEC (11*32+22) /* Bandwidth Monitoring Event Configuration */
+#define X86_FEATURE_USER_SHSTK (11*32+23) /* "user_shstk" Shadow stack support for user mode applications */
+#define X86_FEATURE_SRSO (11*32+24) /* AMD BTB untrain RETs */
+#define X86_FEATURE_SRSO_ALIAS (11*32+25) /* AMD BTB untrain RETs through aliasing */
+#define X86_FEATURE_IBPB_ON_VMEXIT (11*32+26) /* Issue an IBPB only on VMEXIT */
+#define X86_FEATURE_APIC_MSRS_FENCE (11*32+27) /* IA32_TSC_DEADLINE and X2APIC MSRs need fencing */
+#define X86_FEATURE_ZEN2 (11*32+28) /* CPU based on Zen2 microarchitecture */
+#define X86_FEATURE_ZEN3 (11*32+29) /* CPU based on Zen3 microarchitecture */
+#define X86_FEATURE_ZEN4 (11*32+30) /* CPU based on Zen4 microarchitecture */
+#define X86_FEATURE_ZEN1 (11*32+31) /* CPU based on Zen1 microarchitecture */
/* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */
-#define X86_FEATURE_AVX_VNNI (12*32+ 4) /* AVX VNNI instructions */
-#define X86_FEATURE_AVX512_BF16 (12*32+ 5) /* AVX512 BFLOAT16 instructions */
-#define X86_FEATURE_CMPCCXADD (12*32+ 7) /* "" CMPccXADD instructions */
-#define X86_FEATURE_ARCH_PERFMON_EXT (12*32+ 8) /* "" Intel Architectural PerfMon Extension */
-#define X86_FEATURE_FZRM (12*32+10) /* "" Fast zero-length REP MOVSB */
-#define X86_FEATURE_FSRS (12*32+11) /* "" Fast short REP STOSB */
-#define X86_FEATURE_FSRC (12*32+12) /* "" Fast short REP {CMPSB,SCASB} */
-#define X86_FEATURE_FRED (12*32+17) /* Flexible Return and Event Delivery */
-#define X86_FEATURE_LKGS (12*32+18) /* "" Load "kernel" (userspace) GS */
-#define X86_FEATURE_WRMSRNS (12*32+19) /* "" Non-serializing WRMSR */
-#define X86_FEATURE_AMX_FP16 (12*32+21) /* "" AMX fp16 Support */
-#define X86_FEATURE_AVX_IFMA (12*32+23) /* "" Support for VPMADD52[H,L]UQ */
-#define X86_FEATURE_LAM (12*32+26) /* Linear Address Masking */
+#define X86_FEATURE_AVX_VNNI (12*32+ 4) /* "avx_vnni" AVX VNNI instructions */
+#define X86_FEATURE_AVX512_BF16 (12*32+ 5) /* "avx512_bf16" AVX512 BFLOAT16 instructions */
+#define X86_FEATURE_CMPCCXADD (12*32+ 7) /* CMPccXADD instructions */
+#define X86_FEATURE_ARCH_PERFMON_EXT (12*32+ 8) /* Intel Architectural PerfMon Extension */
+#define X86_FEATURE_FZRM (12*32+10) /* Fast zero-length REP MOVSB */
+#define X86_FEATURE_FSRS (12*32+11) /* Fast short REP STOSB */
+#define X86_FEATURE_FSRC (12*32+12) /* Fast short REP {CMPSB,SCASB} */
+#define X86_FEATURE_FRED (12*32+17) /* "fred" Flexible Return and Event Delivery */
+#define X86_FEATURE_LKGS (12*32+18) /* Load "kernel" (userspace) GS */
+#define X86_FEATURE_WRMSRNS (12*32+19) /* Non-serializing WRMSR */
+#define X86_FEATURE_AMX_FP16 (12*32+21) /* AMX fp16 Support */
+#define X86_FEATURE_AVX_IFMA (12*32+23) /* Support for VPMADD52[H,L]UQ */
+#define X86_FEATURE_LAM (12*32+26) /* "lam" Linear Address Masking */
/* AMD-defined CPU features, CPUID level 0x80000008 (EBX), word 13 */
-#define X86_FEATURE_CLZERO (13*32+ 0) /* CLZERO instruction */
-#define X86_FEATURE_IRPERF (13*32+ 1) /* Instructions Retired Count */
-#define X86_FEATURE_XSAVEERPTR (13*32+ 2) /* Always save/restore FP error pointers */
-#define X86_FEATURE_RDPRU (13*32+ 4) /* Read processor register at user level */
-#define X86_FEATURE_WBNOINVD (13*32+ 9) /* WBNOINVD instruction */
-#define X86_FEATURE_AMD_IBPB (13*32+12) /* "" Indirect Branch Prediction Barrier */
-#define X86_FEATURE_AMD_IBRS (13*32+14) /* "" Indirect Branch Restricted Speculation */
-#define X86_FEATURE_AMD_STIBP (13*32+15) /* "" Single Thread Indirect Branch Predictors */
-#define X86_FEATURE_AMD_STIBP_ALWAYS_ON (13*32+17) /* "" Single Thread Indirect Branch Predictors always-on preferred */
-#define X86_FEATURE_AMD_PPIN (13*32+23) /* Protected Processor Inventory Number */
-#define X86_FEATURE_AMD_SSBD (13*32+24) /* "" Speculative Store Bypass Disable */
-#define X86_FEATURE_VIRT_SSBD (13*32+25) /* Virtualized Speculative Store Bypass Disable */
-#define X86_FEATURE_AMD_SSB_NO (13*32+26) /* "" Speculative Store Bypass is fixed in hardware. */
-#define X86_FEATURE_CPPC (13*32+27) /* Collaborative Processor Performance Control */
-#define X86_FEATURE_AMD_PSFD (13*32+28) /* "" Predictive Store Forwarding Disable */
-#define X86_FEATURE_BTC_NO (13*32+29) /* "" Not vulnerable to Branch Type Confusion */
-#define X86_FEATURE_BRS (13*32+31) /* Branch Sampling available */
+#define X86_FEATURE_CLZERO (13*32+ 0) /* "clzero" CLZERO instruction */
+#define X86_FEATURE_IRPERF (13*32+ 1) /* "irperf" Instructions Retired Count */
+#define X86_FEATURE_XSAVEERPTR (13*32+ 2) /* "xsaveerptr" Always save/restore FP error pointers */
+#define X86_FEATURE_RDPRU (13*32+ 4) /* "rdpru" Read processor register at user level */
+#define X86_FEATURE_WBNOINVD (13*32+ 9) /* "wbnoinvd" WBNOINVD instruction */
+#define X86_FEATURE_AMD_IBPB (13*32+12) /* Indirect Branch Prediction Barrier */
+#define X86_FEATURE_AMD_IBRS (13*32+14) /* Indirect Branch Restricted Speculation */
+#define X86_FEATURE_AMD_STIBP (13*32+15) /* Single Thread Indirect Branch Predictors */
+#define X86_FEATURE_AMD_STIBP_ALWAYS_ON (13*32+17) /* Single Thread Indirect Branch Predictors always-on preferred */
+#define X86_FEATURE_AMD_PPIN (13*32+23) /* "amd_ppin" Protected Processor Inventory Number */
+#define X86_FEATURE_AMD_SSBD (13*32+24) /* Speculative Store Bypass Disable */
+#define X86_FEATURE_VIRT_SSBD (13*32+25) /* "virt_ssbd" Virtualized Speculative Store Bypass Disable */
+#define X86_FEATURE_AMD_SSB_NO (13*32+26) /* Speculative Store Bypass is fixed in hardware. */
+#define X86_FEATURE_CPPC (13*32+27) /* "cppc" Collaborative Processor Performance Control */
+#define X86_FEATURE_AMD_PSFD (13*32+28) /* Predictive Store Forwarding Disable */
+#define X86_FEATURE_BTC_NO (13*32+29) /* Not vulnerable to Branch Type Confusion */
+#define X86_FEATURE_BRS (13*32+31) /* "brs" Branch Sampling available */
/* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */
-#define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */
-#define X86_FEATURE_IDA (14*32+ 1) /* Intel Dynamic Acceleration */
-#define X86_FEATURE_ARAT (14*32+ 2) /* Always Running APIC Timer */
-#define X86_FEATURE_PLN (14*32+ 4) /* Intel Power Limit Notification */
-#define X86_FEATURE_PTS (14*32+ 6) /* Intel Package Thermal Status */
-#define X86_FEATURE_HWP (14*32+ 7) /* Intel Hardware P-states */
-#define X86_FEATURE_HWP_NOTIFY (14*32+ 8) /* HWP Notification */
-#define X86_FEATURE_HWP_ACT_WINDOW (14*32+ 9) /* HWP Activity Window */
-#define X86_FEATURE_HWP_EPP (14*32+10) /* HWP Energy Perf. Preference */
-#define X86_FEATURE_HWP_PKG_REQ (14*32+11) /* HWP Package Level Request */
-#define X86_FEATURE_HFI (14*32+19) /* Hardware Feedback Interface */
+#define X86_FEATURE_DTHERM (14*32+ 0) /* "dtherm" Digital Thermal Sensor */
+#define X86_FEATURE_IDA (14*32+ 1) /* "ida" Intel Dynamic Acceleration */
+#define X86_FEATURE_ARAT (14*32+ 2) /* "arat" Always Running APIC Timer */
+#define X86_FEATURE_PLN (14*32+ 4) /* "pln" Intel Power Limit Notification */
+#define X86_FEATURE_PTS (14*32+ 6) /* "pts" Intel Package Thermal Status */
+#define X86_FEATURE_HWP (14*32+ 7) /* "hwp" Intel Hardware P-states */
+#define X86_FEATURE_HWP_NOTIFY (14*32+ 8) /* "hwp_notify" HWP Notification */
+#define X86_FEATURE_HWP_ACT_WINDOW (14*32+ 9) /* "hwp_act_window" HWP Activity Window */
+#define X86_FEATURE_HWP_EPP (14*32+10) /* "hwp_epp" HWP Energy Perf. Preference */
+#define X86_FEATURE_HWP_PKG_REQ (14*32+11) /* "hwp_pkg_req" HWP Package Level Request */
+#define X86_FEATURE_HWP_HIGHEST_PERF_CHANGE (14*32+15) /* HWP Highest perf change */
+#define X86_FEATURE_HFI (14*32+19) /* "hfi" Hardware Feedback Interface */
/* AMD SVM Feature Identification, CPUID level 0x8000000a (EDX), word 15 */
-#define X86_FEATURE_NPT (15*32+ 0) /* Nested Page Table support */
-#define X86_FEATURE_LBRV (15*32+ 1) /* LBR Virtualization support */
+#define X86_FEATURE_NPT (15*32+ 0) /* "npt" Nested Page Table support */
+#define X86_FEATURE_LBRV (15*32+ 1) /* "lbrv" LBR Virtualization support */
#define X86_FEATURE_SVML (15*32+ 2) /* "svm_lock" SVM locking MSR */
#define X86_FEATURE_NRIPS (15*32+ 3) /* "nrip_save" SVM next_rip save */
#define X86_FEATURE_TSCRATEMSR (15*32+ 4) /* "tsc_scale" TSC scaling support */
#define X86_FEATURE_VMCBCLEAN (15*32+ 5) /* "vmcb_clean" VMCB clean bits support */
-#define X86_FEATURE_FLUSHBYASID (15*32+ 6) /* flush-by-ASID support */
-#define X86_FEATURE_DECODEASSISTS (15*32+ 7) /* Decode Assists support */
-#define X86_FEATURE_PAUSEFILTER (15*32+10) /* filtered pause intercept */
-#define X86_FEATURE_PFTHRESHOLD (15*32+12) /* pause filter threshold */
-#define X86_FEATURE_AVIC (15*32+13) /* Virtual Interrupt Controller */
-#define X86_FEATURE_V_VMSAVE_VMLOAD (15*32+15) /* Virtual VMSAVE VMLOAD */
-#define X86_FEATURE_VGIF (15*32+16) /* Virtual GIF */
-#define X86_FEATURE_X2AVIC (15*32+18) /* Virtual x2apic */
-#define X86_FEATURE_V_SPEC_CTRL (15*32+20) /* Virtual SPEC_CTRL */
-#define X86_FEATURE_VNMI (15*32+25) /* Virtual NMI */
-#define X86_FEATURE_SVME_ADDR_CHK (15*32+28) /* "" SVME addr check */
+#define X86_FEATURE_FLUSHBYASID (15*32+ 6) /* "flushbyasid" Flush-by-ASID support */
+#define X86_FEATURE_DECODEASSISTS (15*32+ 7) /* "decodeassists" Decode Assists support */
+#define X86_FEATURE_PAUSEFILTER (15*32+10) /* "pausefilter" Filtered pause intercept */
+#define X86_FEATURE_PFTHRESHOLD (15*32+12) /* "pfthreshold" Pause filter threshold */
+#define X86_FEATURE_AVIC (15*32+13) /* "avic" Virtual Interrupt Controller */
+#define X86_FEATURE_V_VMSAVE_VMLOAD (15*32+15) /* "v_vmsave_vmload" Virtual VMSAVE VMLOAD */
+#define X86_FEATURE_VGIF (15*32+16) /* "vgif" Virtual GIF */
+#define X86_FEATURE_X2AVIC (15*32+18) /* "x2avic" Virtual x2apic */
+#define X86_FEATURE_V_SPEC_CTRL (15*32+20) /* "v_spec_ctrl" Virtual SPEC_CTRL */
+#define X86_FEATURE_VNMI (15*32+25) /* "vnmi" Virtual NMI */
+#define X86_FEATURE_SVME_ADDR_CHK (15*32+28) /* SVME addr check */
/* Intel-defined CPU features, CPUID level 0x00000007:0 (ECX), word 16 */
-#define X86_FEATURE_AVX512VBMI (16*32+ 1) /* AVX512 Vector Bit Manipulation instructions*/
-#define X86_FEATURE_UMIP (16*32+ 2) /* User Mode Instruction Protection */
-#define X86_FEATURE_PKU (16*32+ 3) /* Protection Keys for Userspace */
-#define X86_FEATURE_OSPKE (16*32+ 4) /* OS Protection Keys Enable */
-#define X86_FEATURE_WAITPKG (16*32+ 5) /* UMONITOR/UMWAIT/TPAUSE Instructions */
-#define X86_FEATURE_AVX512_VBMI2 (16*32+ 6) /* Additional AVX512 Vector Bit Manipulation Instructions */
-#define X86_FEATURE_SHSTK (16*32+ 7) /* "" Shadow stack */
-#define X86_FEATURE_GFNI (16*32+ 8) /* Galois Field New Instructions */
-#define X86_FEATURE_VAES (16*32+ 9) /* Vector AES */
-#define X86_FEATURE_VPCLMULQDQ (16*32+10) /* Carry-Less Multiplication Double Quadword */
-#define X86_FEATURE_AVX512_VNNI (16*32+11) /* Vector Neural Network Instructions */
-#define X86_FEATURE_AVX512_BITALG (16*32+12) /* Support for VPOPCNT[B,W] and VPSHUF-BITQMB instructions */
-#define X86_FEATURE_TME (16*32+13) /* Intel Total Memory Encryption */
-#define X86_FEATURE_AVX512_VPOPCNTDQ (16*32+14) /* POPCNT for vectors of DW/QW */
-#define X86_FEATURE_LA57 (16*32+16) /* 5-level page tables */
-#define X86_FEATURE_RDPID (16*32+22) /* RDPID instruction */
-#define X86_FEATURE_BUS_LOCK_DETECT (16*32+24) /* Bus Lock detect */
-#define X86_FEATURE_CLDEMOTE (16*32+25) /* CLDEMOTE instruction */
-#define X86_FEATURE_MOVDIRI (16*32+27) /* MOVDIRI instruction */
-#define X86_FEATURE_MOVDIR64B (16*32+28) /* MOVDIR64B instruction */
-#define X86_FEATURE_ENQCMD (16*32+29) /* ENQCMD and ENQCMDS instructions */
-#define X86_FEATURE_SGX_LC (16*32+30) /* Software Guard Extensions Launch Control */
+#define X86_FEATURE_AVX512VBMI (16*32+ 1) /* "avx512vbmi" AVX512 Vector Bit Manipulation instructions*/
+#define X86_FEATURE_UMIP (16*32+ 2) /* "umip" User Mode Instruction Protection */
+#define X86_FEATURE_PKU (16*32+ 3) /* "pku" Protection Keys for Userspace */
+#define X86_FEATURE_OSPKE (16*32+ 4) /* "ospke" OS Protection Keys Enable */
+#define X86_FEATURE_WAITPKG (16*32+ 5) /* "waitpkg" UMONITOR/UMWAIT/TPAUSE Instructions */
+#define X86_FEATURE_AVX512_VBMI2 (16*32+ 6) /* "avx512_vbmi2" Additional AVX512 Vector Bit Manipulation Instructions */
+#define X86_FEATURE_SHSTK (16*32+ 7) /* Shadow stack */
+#define X86_FEATURE_GFNI (16*32+ 8) /* "gfni" Galois Field New Instructions */
+#define X86_FEATURE_VAES (16*32+ 9) /* "vaes" Vector AES */
+#define X86_FEATURE_VPCLMULQDQ (16*32+10) /* "vpclmulqdq" Carry-Less Multiplication Double Quadword */
+#define X86_FEATURE_AVX512_VNNI (16*32+11) /* "avx512_vnni" Vector Neural Network Instructions */
+#define X86_FEATURE_AVX512_BITALG (16*32+12) /* "avx512_bitalg" Support for VPOPCNT[B,W] and VPSHUF-BITQMB instructions */
+#define X86_FEATURE_TME (16*32+13) /* "tme" Intel Total Memory Encryption */
+#define X86_FEATURE_AVX512_VPOPCNTDQ (16*32+14) /* "avx512_vpopcntdq" POPCNT for vectors of DW/QW */
+#define X86_FEATURE_LA57 (16*32+16) /* "la57" 5-level page tables */
+#define X86_FEATURE_RDPID (16*32+22) /* "rdpid" RDPID instruction */
+#define X86_FEATURE_BUS_LOCK_DETECT (16*32+24) /* "bus_lock_detect" Bus Lock detect */
+#define X86_FEATURE_CLDEMOTE (16*32+25) /* "cldemote" CLDEMOTE instruction */
+#define X86_FEATURE_MOVDIRI (16*32+27) /* "movdiri" MOVDIRI instruction */
+#define X86_FEATURE_MOVDIR64B (16*32+28) /* "movdir64b" MOVDIR64B instruction */
+#define X86_FEATURE_ENQCMD (16*32+29) /* "enqcmd" ENQCMD and ENQCMDS instructions */
+#define X86_FEATURE_SGX_LC (16*32+30) /* "sgx_lc" Software Guard Extensions Launch Control */
/* AMD-defined CPU features, CPUID level 0x80000007 (EBX), word 17 */
-#define X86_FEATURE_OVERFLOW_RECOV (17*32+ 0) /* MCA overflow recovery support */
-#define X86_FEATURE_SUCCOR (17*32+ 1) /* Uncorrectable error containment and recovery */
-#define X86_FEATURE_SMCA (17*32+ 3) /* Scalable MCA */
+#define X86_FEATURE_OVERFLOW_RECOV (17*32+ 0) /* "overflow_recov" MCA overflow recovery support */
+#define X86_FEATURE_SUCCOR (17*32+ 1) /* "succor" Uncorrectable error containment and recovery */
+#define X86_FEATURE_SMCA (17*32+ 3) /* "smca" Scalable MCA */
/* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */
-#define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */
-#define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */
-#define X86_FEATURE_FSRM (18*32+ 4) /* Fast Short Rep Mov */
-#define X86_FEATURE_AVX512_VP2INTERSECT (18*32+ 8) /* AVX-512 Intersect for D/Q */
-#define X86_FEATURE_SRBDS_CTRL (18*32+ 9) /* "" SRBDS mitigation MSR available */
-#define X86_FEATURE_MD_CLEAR (18*32+10) /* VERW clears CPU buffers */
-#define X86_FEATURE_RTM_ALWAYS_ABORT (18*32+11) /* "" RTM transaction always aborts */
-#define X86_FEATURE_TSX_FORCE_ABORT (18*32+13) /* "" TSX_FORCE_ABORT */
-#define X86_FEATURE_SERIALIZE (18*32+14) /* SERIALIZE instruction */
-#define X86_FEATURE_HYBRID_CPU (18*32+15) /* "" This part has CPUs of more than one type */
-#define X86_FEATURE_TSXLDTRK (18*32+16) /* TSX Suspend Load Address Tracking */
-#define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */
-#define X86_FEATURE_ARCH_LBR (18*32+19) /* Intel ARCH LBR */
-#define X86_FEATURE_IBT (18*32+20) /* Indirect Branch Tracking */
-#define X86_FEATURE_AMX_BF16 (18*32+22) /* AMX bf16 Support */
-#define X86_FEATURE_AVX512_FP16 (18*32+23) /* AVX512 FP16 */
-#define X86_FEATURE_AMX_TILE (18*32+24) /* AMX tile Support */
-#define X86_FEATURE_AMX_INT8 (18*32+25) /* AMX int8 Support */
-#define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */
-#define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */
-#define X86_FEATURE_FLUSH_L1D (18*32+28) /* Flush L1D cache */
-#define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */
-#define X86_FEATURE_CORE_CAPABILITIES (18*32+30) /* "" IA32_CORE_CAPABILITIES MSR */
-#define X86_FEATURE_SPEC_CTRL_SSBD (18*32+31) /* "" Speculative Store Bypass Disable */
+#define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* "avx512_4vnniw" AVX-512 Neural Network Instructions */
+#define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* "avx512_4fmaps" AVX-512 Multiply Accumulation Single precision */
+#define X86_FEATURE_FSRM (18*32+ 4) /* "fsrm" Fast Short Rep Mov */
+#define X86_FEATURE_AVX512_VP2INTERSECT (18*32+ 8) /* "avx512_vp2intersect" AVX-512 Intersect for D/Q */
+#define X86_FEATURE_SRBDS_CTRL (18*32+ 9) /* SRBDS mitigation MSR available */
+#define X86_FEATURE_MD_CLEAR (18*32+10) /* "md_clear" VERW clears CPU buffers */
+#define X86_FEATURE_RTM_ALWAYS_ABORT (18*32+11) /* RTM transaction always aborts */
+#define X86_FEATURE_TSX_FORCE_ABORT (18*32+13) /* TSX_FORCE_ABORT */
+#define X86_FEATURE_SERIALIZE (18*32+14) /* "serialize" SERIALIZE instruction */
+#define X86_FEATURE_HYBRID_CPU (18*32+15) /* This part has CPUs of more than one type */
+#define X86_FEATURE_TSXLDTRK (18*32+16) /* "tsxldtrk" TSX Suspend Load Address Tracking */
+#define X86_FEATURE_PCONFIG (18*32+18) /* "pconfig" Intel PCONFIG */
+#define X86_FEATURE_ARCH_LBR (18*32+19) /* "arch_lbr" Intel ARCH LBR */
+#define X86_FEATURE_IBT (18*32+20) /* "ibt" Indirect Branch Tracking */
+#define X86_FEATURE_AMX_BF16 (18*32+22) /* "amx_bf16" AMX bf16 Support */
+#define X86_FEATURE_AVX512_FP16 (18*32+23) /* "avx512_fp16" AVX512 FP16 */
+#define X86_FEATURE_AMX_TILE (18*32+24) /* "amx_tile" AMX tile Support */
+#define X86_FEATURE_AMX_INT8 (18*32+25) /* "amx_int8" AMX int8 Support */
+#define X86_FEATURE_SPEC_CTRL (18*32+26) /* Speculation Control (IBRS + IBPB) */
+#define X86_FEATURE_INTEL_STIBP (18*32+27) /* Single Thread Indirect Branch Predictors */
+#define X86_FEATURE_FLUSH_L1D (18*32+28) /* "flush_l1d" Flush L1D cache */
+#define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* "arch_capabilities" IA32_ARCH_CAPABILITIES MSR (Intel) */
+#define X86_FEATURE_CORE_CAPABILITIES (18*32+30) /* IA32_CORE_CAPABILITIES MSR */
+#define X86_FEATURE_SPEC_CTRL_SSBD (18*32+31) /* Speculative Store Bypass Disable */
/* AMD-defined memory encryption features, CPUID level 0x8000001f (EAX), word 19 */
-#define X86_FEATURE_SME (19*32+ 0) /* AMD Secure Memory Encryption */
-#define X86_FEATURE_SEV (19*32+ 1) /* AMD Secure Encrypted Virtualization */
-#define X86_FEATURE_VM_PAGE_FLUSH (19*32+ 2) /* "" VM Page Flush MSR is supported */
-#define X86_FEATURE_SEV_ES (19*32+ 3) /* AMD Secure Encrypted Virtualization - Encrypted State */
-#define X86_FEATURE_SEV_SNP (19*32+ 4) /* AMD Secure Encrypted Virtualization - Secure Nested Paging */
-#define X86_FEATURE_V_TSC_AUX (19*32+ 9) /* "" Virtual TSC_AUX */
-#define X86_FEATURE_SME_COHERENT (19*32+10) /* "" AMD hardware-enforced cache coherency */
-#define X86_FEATURE_DEBUG_SWAP (19*32+14) /* AMD SEV-ES full debug state swap support */
+#define X86_FEATURE_SME (19*32+ 0) /* "sme" AMD Secure Memory Encryption */
+#define X86_FEATURE_SEV (19*32+ 1) /* "sev" AMD Secure Encrypted Virtualization */
+#define X86_FEATURE_VM_PAGE_FLUSH (19*32+ 2) /* VM Page Flush MSR is supported */
+#define X86_FEATURE_SEV_ES (19*32+ 3) /* "sev_es" AMD Secure Encrypted Virtualization - Encrypted State */
+#define X86_FEATURE_SEV_SNP (19*32+ 4) /* "sev_snp" AMD Secure Encrypted Virtualization - Secure Nested Paging */
+#define X86_FEATURE_V_TSC_AUX (19*32+ 9) /* Virtual TSC_AUX */
+#define X86_FEATURE_SME_COHERENT (19*32+10) /* AMD hardware-enforced cache coherency */
+#define X86_FEATURE_DEBUG_SWAP (19*32+14) /* "debug_swap" AMD SEV-ES full debug state swap support */
+#define X86_FEATURE_SVSM (19*32+28) /* "svsm" SVSM present */
/* AMD-defined Extended Feature 2 EAX, CPUID level 0x80000021 (EAX), word 20 */
-#define X86_FEATURE_NO_NESTED_DATA_BP (20*32+ 0) /* "" No Nested Data Breakpoints */
-#define X86_FEATURE_WRMSR_XX_BASE_NS (20*32+ 1) /* "" WRMSR to {FS,GS,KERNEL_GS}_BASE is non-serializing */
-#define X86_FEATURE_LFENCE_RDTSC (20*32+ 2) /* "" LFENCE always serializing / synchronizes RDTSC */
-#define X86_FEATURE_NULL_SEL_CLR_BASE (20*32+ 6) /* "" Null Selector Clears Base */
-#define X86_FEATURE_AUTOIBRS (20*32+ 8) /* "" Automatic IBRS */
-#define X86_FEATURE_NO_SMM_CTL_MSR (20*32+ 9) /* "" SMM_CTL MSR is not present */
+#define X86_FEATURE_NO_NESTED_DATA_BP (20*32+ 0) /* No Nested Data Breakpoints */
+#define X86_FEATURE_WRMSR_XX_BASE_NS (20*32+ 1) /* WRMSR to {FS,GS,KERNEL_GS}_BASE is non-serializing */
+#define X86_FEATURE_LFENCE_RDTSC (20*32+ 2) /* LFENCE always serializing / synchronizes RDTSC */
+#define X86_FEATURE_NULL_SEL_CLR_BASE (20*32+ 6) /* Null Selector Clears Base */
+#define X86_FEATURE_AUTOIBRS (20*32+ 8) /* Automatic IBRS */
+#define X86_FEATURE_NO_SMM_CTL_MSR (20*32+ 9) /* SMM_CTL MSR is not present */
-#define X86_FEATURE_SBPB (20*32+27) /* "" Selective Branch Prediction Barrier */
-#define X86_FEATURE_IBPB_BRTYPE (20*32+28) /* "" MSR_PRED_CMD[IBPB] flushes all branch type predictions */
-#define X86_FEATURE_SRSO_NO (20*32+29) /* "" CPU is not affected by SRSO */
+#define X86_FEATURE_SBPB (20*32+27) /* Selective Branch Prediction Barrier */
+#define X86_FEATURE_IBPB_BRTYPE (20*32+28) /* MSR_PRED_CMD[IBPB] flushes all branch type predictions */
+#define X86_FEATURE_SRSO_NO (20*32+29) /* CPU is not affected by SRSO */
/*
* Extended auxiliary flags: Linux defined - for features scattered in various
@@ -465,59 +467,60 @@
*
* Reuse free bits when adding new feature flags!
*/
-#define X86_FEATURE_AMD_LBR_PMC_FREEZE (21*32+ 0) /* AMD LBR and PMC Freeze */
-#define X86_FEATURE_CLEAR_BHB_LOOP (21*32+ 1) /* "" Clear branch history at syscall entry using SW loop */
-#define X86_FEATURE_BHI_CTRL (21*32+ 2) /* "" BHI_DIS_S HW control available */
-#define X86_FEATURE_CLEAR_BHB_HW (21*32+ 3) /* "" BHI_DIS_S HW control enabled */
-#define X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT (21*32+ 4) /* "" Clear branch history at vmexit using SW loop */
+#define X86_FEATURE_AMD_LBR_PMC_FREEZE (21*32+ 0) /* "amd_lbr_pmc_freeze" AMD LBR and PMC Freeze */
+#define X86_FEATURE_CLEAR_BHB_LOOP (21*32+ 1) /* Clear branch history at syscall entry using SW loop */
+#define X86_FEATURE_BHI_CTRL (21*32+ 2) /* BHI_DIS_S HW control available */
+#define X86_FEATURE_CLEAR_BHB_HW (21*32+ 3) /* BHI_DIS_S HW control enabled */
+#define X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT (21*32+ 4) /* Clear branch history at vmexit using SW loop */
+#define X86_FEATURE_FAST_CPPC (21*32 + 5) /* AMD Fast CPPC */
/*
* BUG word(s)
*/
#define X86_BUG(x) (NCAPINTS*32 + (x))
-#define X86_BUG_F00F X86_BUG(0) /* Intel F00F */
-#define X86_BUG_FDIV X86_BUG(1) /* FPU FDIV */
-#define X86_BUG_COMA X86_BUG(2) /* Cyrix 6x86 coma */
+#define X86_BUG_F00F X86_BUG(0) /* "f00f" Intel F00F */
+#define X86_BUG_FDIV X86_BUG(1) /* "fdiv" FPU FDIV */
+#define X86_BUG_COMA X86_BUG(2) /* "coma" Cyrix 6x86 coma */
#define X86_BUG_AMD_TLB_MMATCH X86_BUG(3) /* "tlb_mmatch" AMD Erratum 383 */
#define X86_BUG_AMD_APIC_C1E X86_BUG(4) /* "apic_c1e" AMD Erratum 400 */
-#define X86_BUG_11AP X86_BUG(5) /* Bad local APIC aka 11AP */
-#define X86_BUG_FXSAVE_LEAK X86_BUG(6) /* FXSAVE leaks FOP/FIP/FOP */
-#define X86_BUG_CLFLUSH_MONITOR X86_BUG(7) /* AAI65, CLFLUSH required before MONITOR */
-#define X86_BUG_SYSRET_SS_ATTRS X86_BUG(8) /* SYSRET doesn't fix up SS attrs */
+#define X86_BUG_11AP X86_BUG(5) /* "11ap" Bad local APIC aka 11AP */
+#define X86_BUG_FXSAVE_LEAK X86_BUG(6) /* "fxsave_leak" FXSAVE leaks FOP/FIP/FOP */
+#define X86_BUG_CLFLUSH_MONITOR X86_BUG(7) /* "clflush_monitor" AAI65, CLFLUSH required before MONITOR */
+#define X86_BUG_SYSRET_SS_ATTRS X86_BUG(8) /* "sysret_ss_attrs" SYSRET doesn't fix up SS attrs */
#ifdef CONFIG_X86_32
/*
* 64-bit kernels don't use X86_BUG_ESPFIX. Make the define conditional
* to avoid confusion.
*/
-#define X86_BUG_ESPFIX X86_BUG(9) /* "" IRET to 16-bit SS corrupts ESP/RSP high bits */
+#define X86_BUG_ESPFIX X86_BUG(9) /* IRET to 16-bit SS corrupts ESP/RSP high bits */
#endif
-#define X86_BUG_NULL_SEG X86_BUG(10) /* Nulling a selector preserves the base */
-#define X86_BUG_SWAPGS_FENCE X86_BUG(11) /* SWAPGS without input dep on GS */
-#define X86_BUG_MONITOR X86_BUG(12) /* IPI required to wake up remote CPU */
-#define X86_BUG_AMD_E400 X86_BUG(13) /* CPU is among the affected by Erratum 400 */
-#define X86_BUG_CPU_MELTDOWN X86_BUG(14) /* CPU is affected by meltdown attack and needs kernel page table isolation */
-#define X86_BUG_SPECTRE_V1 X86_BUG(15) /* CPU is affected by Spectre variant 1 attack with conditional branches */
-#define X86_BUG_SPECTRE_V2 X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */
-#define X86_BUG_SPEC_STORE_BYPASS X86_BUG(17) /* CPU is affected by speculative store bypass attack */
-#define X86_BUG_L1TF X86_BUG(18) /* CPU is affected by L1 Terminal Fault */
-#define X86_BUG_MDS X86_BUG(19) /* CPU is affected by Microarchitectural data sampling */
-#define X86_BUG_MSBDS_ONLY X86_BUG(20) /* CPU is only affected by the MSDBS variant of BUG_MDS */
-#define X86_BUG_SWAPGS X86_BUG(21) /* CPU is affected by speculation through SWAPGS */
-#define X86_BUG_TAA X86_BUG(22) /* CPU is affected by TSX Async Abort(TAA) */
-#define X86_BUG_ITLB_MULTIHIT X86_BUG(23) /* CPU may incur MCE during certain page attribute changes */
-#define X86_BUG_SRBDS X86_BUG(24) /* CPU may leak RNG bits if not mitigated */
-#define X86_BUG_MMIO_STALE_DATA X86_BUG(25) /* CPU is affected by Processor MMIO Stale Data vulnerabilities */
-#define X86_BUG_MMIO_UNKNOWN X86_BUG(26) /* CPU is too old and its MMIO Stale Data status is unknown */
-#define X86_BUG_RETBLEED X86_BUG(27) /* CPU is affected by RETBleed */
-#define X86_BUG_EIBRS_PBRSB X86_BUG(28) /* EIBRS is vulnerable to Post Barrier RSB Predictions */
-#define X86_BUG_SMT_RSB X86_BUG(29) /* CPU is vulnerable to Cross-Thread Return Address Predictions */
-#define X86_BUG_GDS X86_BUG(30) /* CPU is affected by Gather Data Sampling */
-#define X86_BUG_TDX_PW_MCE X86_BUG(31) /* CPU may incur #MC if non-TD software does partial write to TDX private memory */
+#define X86_BUG_NULL_SEG X86_BUG(10) /* "null_seg" Nulling a selector preserves the base */
+#define X86_BUG_SWAPGS_FENCE X86_BUG(11) /* "swapgs_fence" SWAPGS without input dep on GS */
+#define X86_BUG_MONITOR X86_BUG(12) /* "monitor" IPI required to wake up remote CPU */
+#define X86_BUG_AMD_E400 X86_BUG(13) /* "amd_e400" CPU is among the affected by Erratum 400 */
+#define X86_BUG_CPU_MELTDOWN X86_BUG(14) /* "cpu_meltdown" CPU is affected by meltdown attack and needs kernel page table isolation */
+#define X86_BUG_SPECTRE_V1 X86_BUG(15) /* "spectre_v1" CPU is affected by Spectre variant 1 attack with conditional branches */
+#define X86_BUG_SPECTRE_V2 X86_BUG(16) /* "spectre_v2" CPU is affected by Spectre variant 2 attack with indirect branches */
+#define X86_BUG_SPEC_STORE_BYPASS X86_BUG(17) /* "spec_store_bypass" CPU is affected by speculative store bypass attack */
+#define X86_BUG_L1TF X86_BUG(18) /* "l1tf" CPU is affected by L1 Terminal Fault */
+#define X86_BUG_MDS X86_BUG(19) /* "mds" CPU is affected by Microarchitectural data sampling */
+#define X86_BUG_MSBDS_ONLY X86_BUG(20) /* "msbds_only" CPU is only affected by the MSDBS variant of BUG_MDS */
+#define X86_BUG_SWAPGS X86_BUG(21) /* "swapgs" CPU is affected by speculation through SWAPGS */
+#define X86_BUG_TAA X86_BUG(22) /* "taa" CPU is affected by TSX Async Abort(TAA) */
+#define X86_BUG_ITLB_MULTIHIT X86_BUG(23) /* "itlb_multihit" CPU may incur MCE during certain page attribute changes */
+#define X86_BUG_SRBDS X86_BUG(24) /* "srbds" CPU may leak RNG bits if not mitigated */
+#define X86_BUG_MMIO_STALE_DATA X86_BUG(25) /* "mmio_stale_data" CPU is affected by Processor MMIO Stale Data vulnerabilities */
+#define X86_BUG_MMIO_UNKNOWN X86_BUG(26) /* "mmio_unknown" CPU is too old and its MMIO Stale Data status is unknown */
+#define X86_BUG_RETBLEED X86_BUG(27) /* "retbleed" CPU is affected by RETBleed */
+#define X86_BUG_EIBRS_PBRSB X86_BUG(28) /* "eibrs_pbrsb" EIBRS is vulnerable to Post Barrier RSB Predictions */
+#define X86_BUG_SMT_RSB X86_BUG(29) /* "smt_rsb" CPU is vulnerable to Cross-Thread Return Address Predictions */
+#define X86_BUG_GDS X86_BUG(30) /* "gds" CPU is affected by Gather Data Sampling */
+#define X86_BUG_TDX_PW_MCE X86_BUG(31) /* "tdx_pw_mce" CPU may incur #MC if non-TD software does partial write to TDX private memory */
/* BUG word 2 */
-#define X86_BUG_SRSO X86_BUG(1*32 + 0) /* AMD SRSO bug */
-#define X86_BUG_DIV0 X86_BUG(1*32 + 1) /* AMD DIV0 speculation bug */
-#define X86_BUG_RFDS X86_BUG(1*32 + 2) /* CPU is vulnerable to Register File Data Sampling */
-#define X86_BUG_BHI X86_BUG(1*32 + 3) /* CPU is affected by Branch History Injection */
+#define X86_BUG_SRSO X86_BUG(1*32 + 0) /* "srso" AMD SRSO bug */
+#define X86_BUG_DIV0 X86_BUG(1*32 + 1) /* "div0" AMD DIV0 speculation bug */
+#define X86_BUG_RFDS X86_BUG(1*32 + 2) /* "rfds" CPU is vulnerable to Register File Data Sampling */
+#define X86_BUG_BHI X86_BUG(1*32 + 3) /* "bhi" CPU is affected by Branch History Injection */
#endif /* _ASM_X86_CPUFEATURES_H */
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
index 1dc600fa3ba5..521aad70e41b 100644
--- a/arch/x86/include/asm/efi.h
+++ b/arch/x86/include/asm/efi.h
@@ -229,7 +229,8 @@ static inline bool efi_is_native(void)
static inline void *efi64_zero_upper(void *p)
{
- ((u32 *)p)[1] = 0;
+ if (p)
+ ((u32 *)p)[1] = 0;
return p;
}
@@ -315,6 +316,10 @@ static inline u32 efi64_convert_status(efi_status_t status)
#define __efi64_argmap_clear_memory_attributes(protocol, phys, size, flags) \
((protocol), __efi64_split(phys), __efi64_split(size), __efi64_split(flags))
+/* EFI SMBIOS protocol */
+#define __efi64_argmap_get_next(protocol, smbioshandle, type, record, phandle) \
+ ((protocol), (smbioshandle), (type), efi64_zero_upper(record), \
+ efi64_zero_upper(phandle))
/*
* The macros below handle the plumbing for the argument mapping. To add a
* mapping for a specific EFI method, simply define a macro
@@ -384,24 +389,8 @@ static inline void efi_reserve_boot_services(void)
}
#endif /* CONFIG_EFI */
-#ifdef CONFIG_EFI_FAKE_MEMMAP
-extern void __init efi_fake_memmap_early(void);
-extern void __init efi_fake_memmap(void);
-#else
-static inline void efi_fake_memmap_early(void)
-{
-}
-
-static inline void efi_fake_memmap(void)
-{
-}
-#endif
-
extern int __init efi_memmap_alloc(unsigned int num_entries,
struct efi_memory_map_data *data);
-extern void __efi_memmap_free(u64 phys, unsigned long size,
- unsigned long flags);
-#define __efi_memmap_free __efi_memmap_free
extern int __init efi_memmap_install(struct efi_memory_map_data *data);
extern int __init efi_memmap_split_count(efi_memory_desc_t *md,
diff --git a/arch/x86/include/asm/entry-common.h b/arch/x86/include/asm/entry-common.h
index 7e523bb3d2d3..fb2809b20b0a 100644
--- a/arch/x86/include/asm/entry-common.h
+++ b/arch/x86/include/asm/entry-common.h
@@ -73,19 +73,16 @@ static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
#endif
/*
- * Ultimately, this value will get limited by KSTACK_OFFSET_MAX(),
- * but not enough for x86 stack utilization comfort. To keep
- * reasonable stack head room, reduce the maximum offset to 8 bits.
- *
- * The actual entropy will be further reduced by the compiler when
- * applying stack alignment constraints (see cc_stack_align4/8 in
+ * This value will get limited by KSTACK_OFFSET_MAX(), which is 10
+ * bits. The actual entropy will be further reduced by the compiler
+ * when applying stack alignment constraints (see cc_stack_align4/8 in
* arch/x86/Makefile), which will remove the 3 (x86_64) or 2 (ia32)
* low bits from any entropy chosen here.
*
- * Therefore, final stack offset entropy will be 5 (x86_64) or
- * 6 (ia32) bits.
+ * Therefore, final stack offset entropy will be 7 (x86_64) or
+ * 8 (ia32) bits.
*/
- choose_random_kstack_offset(rdtsc() & 0xFF);
+ choose_random_kstack_offset(rdtsc());
}
#define arch_exit_to_user_mode_prepare arch_exit_to_user_mode_prepare
diff --git a/arch/x86/include/asm/init.h b/arch/x86/include/asm/init.h
index cc9ccf61b6bd..14d72727d7ee 100644
--- a/arch/x86/include/asm/init.h
+++ b/arch/x86/include/asm/init.h
@@ -6,6 +6,7 @@
struct x86_mapping_info {
void *(*alloc_pgt_page)(void *); /* allocate buf for page table */
+ void (*free_pgt_page)(void *, void *); /* free buf for page table */
void *context; /* context for alloc_pgt_page */
unsigned long page_flag; /* page flag for PMD or PUD entry */
unsigned long offset; /* ident mapping offset */
@@ -16,4 +17,6 @@ struct x86_mapping_info {
int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
unsigned long pstart, unsigned long pend);
+void kernel_ident_mapping_free(struct x86_mapping_info *info, pgd_t *pgd);
+
#endif /* _ASM_X86_INIT_H */
diff --git a/arch/x86/include/asm/intel_ds.h b/arch/x86/include/asm/intel_ds.h
index 2f9eeb5c3069..5dbeac48a5b9 100644
--- a/arch/x86/include/asm/intel_ds.h
+++ b/arch/x86/include/asm/intel_ds.h
@@ -9,6 +9,7 @@
/* The maximal number of PEBS events: */
#define MAX_PEBS_EVENTS_FMT4 8
#define MAX_PEBS_EVENTS 32
+#define MAX_PEBS_EVENTS_MASK GENMASK_ULL(MAX_PEBS_EVENTS - 1, 0)
#define MAX_FIXED_PEBS_EVENTS 16
/*
diff --git a/arch/x86/include/asm/intel_pconfig.h b/arch/x86/include/asm/intel_pconfig.h
deleted file mode 100644
index 994638ef171b..000000000000
--- a/arch/x86/include/asm/intel_pconfig.h
+++ /dev/null
@@ -1,65 +0,0 @@
-#ifndef _ASM_X86_INTEL_PCONFIG_H
-#define _ASM_X86_INTEL_PCONFIG_H
-
-#include <asm/asm.h>
-#include <asm/processor.h>
-
-enum pconfig_target {
- INVALID_TARGET = 0,
- MKTME_TARGET = 1,
- PCONFIG_TARGET_NR
-};
-
-int pconfig_target_supported(enum pconfig_target target);
-
-enum pconfig_leaf {
- MKTME_KEY_PROGRAM = 0,
- PCONFIG_LEAF_INVALID,
-};
-
-#define PCONFIG ".byte 0x0f, 0x01, 0xc5"
-
-/* Defines and structure for MKTME_KEY_PROGRAM of PCONFIG instruction */
-
-/* mktme_key_program::keyid_ctrl COMMAND, bits [7:0] */
-#define MKTME_KEYID_SET_KEY_DIRECT 0
-#define MKTME_KEYID_SET_KEY_RANDOM 1
-#define MKTME_KEYID_CLEAR_KEY 2
-#define MKTME_KEYID_NO_ENCRYPT 3
-
-/* mktme_key_program::keyid_ctrl ENC_ALG, bits [23:8] */
-#define MKTME_AES_XTS_128 (1 << 8)
-
-/* Return codes from the PCONFIG MKTME_KEY_PROGRAM */
-#define MKTME_PROG_SUCCESS 0
-#define MKTME_INVALID_PROG_CMD 1
-#define MKTME_ENTROPY_ERROR 2
-#define MKTME_INVALID_KEYID 3
-#define MKTME_INVALID_ENC_ALG 4
-#define MKTME_DEVICE_BUSY 5
-
-/* Hardware requires the structure to be 256 byte aligned. Otherwise #GP(0). */
-struct mktme_key_program {
- u16 keyid;
- u32 keyid_ctrl;
- u8 __rsvd[58];
- u8 key_field_1[64];
- u8 key_field_2[64];
-} __packed __aligned(256);
-
-static inline int mktme_key_program(struct mktme_key_program *key_program)
-{
- unsigned long rax = MKTME_KEY_PROGRAM;
-
- if (!pconfig_target_supported(MKTME_TARGET))
- return -ENXIO;
-
- asm volatile(PCONFIG
- : "=a" (rax), "=b" (key_program)
- : "0" (rax), "1" (key_program)
- : "memory", "cc");
-
- return rax;
-}
-
-#endif /* _ASM_X86_INTEL_PCONFIG_H */
diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
index 8c5ae649d2df..cf7fc2b8e3ce 100644
--- a/arch/x86/include/asm/irqflags.h
+++ b/arch/x86/include/asm/irqflags.h
@@ -54,6 +54,26 @@ static __always_inline void native_halt(void)
asm volatile("hlt": : :"memory");
}
+static __always_inline int native_irqs_disabled_flags(unsigned long flags)
+{
+ return !(flags & X86_EFLAGS_IF);
+}
+
+static __always_inline unsigned long native_local_irq_save(void)
+{
+ unsigned long flags = native_save_fl();
+
+ native_irq_disable();
+
+ return flags;
+}
+
+static __always_inline void native_local_irq_restore(unsigned long flags)
+{
+ if (!native_irqs_disabled_flags(flags))
+ native_irq_enable();
+}
+
#endif
#ifdef CONFIG_PARAVIRT_XXL
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index ece45b3f6f20..f8ca74e7678f 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -2154,6 +2154,7 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
void *insn, int insn_len);
+void kvm_mmu_print_sptes(struct kvm_vcpu *vcpu, gpa_t gpa, const char *msg);
void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva);
void kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
u64 addr, unsigned long roots);
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index dfd2e9699bd7..3ad29b128943 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -261,7 +261,8 @@ enum mcp_flags {
MCP_DONTLOG = BIT(2), /* only clear, don't log */
MCP_QUEUE_LOG = BIT(3), /* only queue to genpool */
};
-bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b);
+
+void machine_check_poll(enum mcp_flags flags, mce_banks_t *b);
int mce_notify_irq(void);
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index e022e6eb766c..82c6a4d350e0 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -566,6 +566,12 @@
#define MSR_RELOAD_PMC0 0x000014c1
#define MSR_RELOAD_FIXED_CTR0 0x00001309
+/* V6 PMON MSR range */
+#define MSR_IA32_PMC_V6_GP0_CTR 0x1900
+#define MSR_IA32_PMC_V6_GP0_CFG_A 0x1901
+#define MSR_IA32_PMC_V6_FX0_CTR 0x1980
+#define MSR_IA32_PMC_V6_STEP 4
+
/* KeyID partitioning between MKTME and TDX */
#define MSR_IA32_MKTME_KEYID_PARTITIONING 0x00000087
@@ -660,6 +666,8 @@
#define MSR_AMD64_RMP_BASE 0xc0010132
#define MSR_AMD64_RMP_END 0xc0010133
+#define MSR_SVSM_CAA 0xc001f000
+
/* AMD Collaborative Processor Performance Control MSRs */
#define MSR_AMD_CPPC_CAP1 0xc00102b0
#define MSR_AMD_CPPC_ENABLE 0xc00102b1
@@ -781,6 +789,8 @@
#define MSR_K7_HWCR_IRPERF_EN BIT_ULL(MSR_K7_HWCR_IRPERF_EN_BIT)
#define MSR_K7_FID_VID_CTL 0xc0010041
#define MSR_K7_FID_VID_STATUS 0xc0010042
+#define MSR_K7_HWCR_CPB_DIS_BIT 25
+#define MSR_K7_HWCR_CPB_DIS BIT_ULL(MSR_K7_HWCR_CPB_DIS_BIT)
/* K6 MSRs */
#define MSR_K6_WHCR 0xc0000082
@@ -1164,6 +1174,7 @@
#define MSR_IA32_QM_CTR 0xc8e
#define MSR_IA32_PQR_ASSOC 0xc8f
#define MSR_IA32_L3_CBM_BASE 0xc90
+#define MSR_RMID_SNC_CONFIG 0xca0
#define MSR_IA32_L2_CBM_BASE 0xd10
#define MSR_IA32_MBA_THRTL_BASE 0xd50
diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h
index cc6b8e087192..af4302d79b59 100644
--- a/arch/x86/include/asm/page_64.h
+++ b/arch/x86/include/asm/page_64.h
@@ -54,7 +54,7 @@ static inline void clear_page(void *page)
clear_page_rep, X86_FEATURE_REP_GOOD,
clear_page_erms, X86_FEATURE_ERMS,
"=D" (page),
- "0" (page)
+ "D" (page)
: "cc", "memory", "rax", "rcx");
}
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index 7f1e17250546..91b73571412f 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -32,6 +32,8 @@
#define ARCH_PERFMON_EVENTSEL_INV (1ULL << 23)
#define ARCH_PERFMON_EVENTSEL_CMASK 0xFF000000ULL
#define ARCH_PERFMON_EVENTSEL_BR_CNTR (1ULL << 35)
+#define ARCH_PERFMON_EVENTSEL_EQ (1ULL << 36)
+#define ARCH_PERFMON_EVENTSEL_UMASK2 (0xFFULL << 40)
#define INTEL_FIXED_BITS_MASK 0xFULL
#define INTEL_FIXED_BITS_STRIDE 4
@@ -185,6 +187,8 @@ union cpuid10_edx {
* detection/enumeration details:
*/
#define ARCH_PERFMON_EXT_LEAF 0x00000023
+#define ARCH_PERFMON_EXT_UMASK2 0x1
+#define ARCH_PERFMON_EXT_EQ 0x2
#define ARCH_PERFMON_NUM_COUNTER_LEAF_BIT 0x1
#define ARCH_PERFMON_NUM_COUNTER_LEAF 0x1
@@ -307,6 +311,10 @@ struct x86_pmu_capability {
#define INTEL_PMC_IDX_FIXED_SLOTS (INTEL_PMC_IDX_FIXED + 3)
#define INTEL_PMC_MSK_FIXED_SLOTS (1ULL << INTEL_PMC_IDX_FIXED_SLOTS)
+/* TOPDOWN_BAD_SPECULATION.ALL: fixed counter 4 (Atom only) */
+/* TOPDOWN_FE_BOUND.ALL: fixed counter 5 (Atom only) */
+/* TOPDOWN_RETIRING.ALL: fixed counter 6 (Atom only) */
+
static inline bool use_fixed_pseudo_encoding(u64 code)
{
return !(code & 0xff);
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 65b8e5bb902c..e39311a89bf4 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -140,6 +140,11 @@ static inline int pte_young(pte_t pte)
return pte_flags(pte) & _PAGE_ACCESSED;
}
+static inline bool pte_decrypted(pte_t pte)
+{
+ return cc_mkdec(pte_val(pte)) == pte_val(pte);
+}
+
#define pmd_dirty pmd_dirty
static inline bool pmd_dirty(pmd_t pmd)
{
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index b78644962626..2f321137736c 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -549,6 +549,7 @@ enum pg_level {
PG_LEVEL_2M,
PG_LEVEL_1G,
PG_LEVEL_512G,
+ PG_LEVEL_256T,
PG_LEVEL_NUM
};
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index cb4f6c513c48..a75a07f4931f 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -692,7 +692,17 @@ static inline u32 per_cpu_l2c_id(unsigned int cpu)
#ifdef CONFIG_CPU_SUP_AMD
extern u32 amd_get_highest_perf(void);
-extern void amd_clear_divider(void);
+
+/*
+ * Issue a DIV 0/1 insn to clear any division data from previous DIV
+ * operations.
+ */
+static __always_inline void amd_clear_divider(void)
+{
+ asm volatile(ALTERNATIVE("", "div %2\n\t", X86_BUG_DIV0)
+ :: "a" (0), "d" (0), "r" (1));
+}
+
extern void amd_check_microcode(void);
#else
static inline u32 amd_get_highest_perf(void) { return 0; }
diff --git a/arch/x86/include/asm/runtime-const.h b/arch/x86/include/asm/runtime-const.h
new file mode 100644
index 000000000000..24e3a53ca255
--- /dev/null
+++ b/arch/x86/include/asm/runtime-const.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_RUNTIME_CONST_H
+#define _ASM_RUNTIME_CONST_H
+
+#define runtime_const_ptr(sym) ({ \
+ typeof(sym) __ret; \
+ asm_inline("mov %1,%0\n1:\n" \
+ ".pushsection runtime_ptr_" #sym ",\"a\"\n\t" \
+ ".long 1b - %c2 - .\n\t" \
+ ".popsection" \
+ :"=r" (__ret) \
+ :"i" ((unsigned long)0x0123456789abcdefull), \
+ "i" (sizeof(long))); \
+ __ret; })
+
+// The 'typeof' will create at _least_ a 32-bit type, but
+// will happily also take a bigger type and the 'shrl' will
+// clear the upper bits
+#define runtime_const_shift_right_32(val, sym) ({ \
+ typeof(0u+(val)) __ret = (val); \
+ asm_inline("shrl $12,%k0\n1:\n" \
+ ".pushsection runtime_shift_" #sym ",\"a\"\n\t" \
+ ".long 1b - 1 - .\n\t" \
+ ".popsection" \
+ :"+r" (__ret)); \
+ __ret; })
+
+#define runtime_const_init(type, sym) do { \
+ extern s32 __start_runtime_##type##_##sym[]; \
+ extern s32 __stop_runtime_##type##_##sym[]; \
+ runtime_const_fixup(__runtime_fixup_##type, \
+ (unsigned long)(sym), \
+ __start_runtime_##type##_##sym, \
+ __stop_runtime_##type##_##sym); \
+} while (0)
+
+/*
+ * The text patching is trivial - you can only do this at init time,
+ * when the text section hasn't been marked RO, and before the text
+ * has ever been executed.
+ */
+static inline void __runtime_fixup_ptr(void *where, unsigned long val)
+{
+ *(unsigned long *)where = val;
+}
+
+static inline void __runtime_fixup_shift(void *where, unsigned long val)
+{
+ *(unsigned char *)where = val;
+}
+
+static inline void runtime_const_fixup(void (*fn)(void *, unsigned long),
+ unsigned long val, s32 *start, s32 *end)
+{
+ while (start < end) {
+ fn(*start + (void *)start, val);
+ start++;
+ }
+}
+
+#endif
diff --git a/arch/x86/include/asm/set_memory.h b/arch/x86/include/asm/set_memory.h
index 9aee31862b4a..4b2abce2e3e7 100644
--- a/arch/x86/include/asm/set_memory.h
+++ b/arch/x86/include/asm/set_memory.h
@@ -49,8 +49,11 @@ int set_memory_wb(unsigned long addr, int numpages);
int set_memory_np(unsigned long addr, int numpages);
int set_memory_p(unsigned long addr, int numpages);
int set_memory_4k(unsigned long addr, int numpages);
+
+bool set_memory_enc_stop_conversion(void);
int set_memory_encrypted(unsigned long addr, int numpages);
int set_memory_decrypted(unsigned long addr, int numpages);
+
int set_memory_np_noalias(unsigned long addr, int numpages);
int set_memory_nonglobal(unsigned long addr, int numpages);
int set_memory_global(unsigned long addr, int numpages);
diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h
index e61e68d71cba..0667b2a88614 100644
--- a/arch/x86/include/asm/setup.h
+++ b/arch/x86/include/asm/setup.h
@@ -28,6 +28,8 @@
#define NEW_CL_POINTER 0x228 /* Relative to real mode data */
#ifndef __ASSEMBLY__
+#include <linux/cache.h>
+
#include <asm/bootparam.h>
#include <asm/x86_init.h>
@@ -133,6 +135,12 @@ asmlinkage void __init __noreturn x86_64_start_reservations(char *real_mode_data
#endif /* __i386__ */
#endif /* _SETUP */
+#ifdef CONFIG_CMDLINE_BOOL
+extern bool builtin_cmdline_added __ro_after_init;
+#else
+#define builtin_cmdline_added 0
+#endif
+
#else /* __ASSEMBLY */
.macro __RESERVE_BRK name, size
diff --git a/arch/x86/include/asm/sev-common.h b/arch/x86/include/asm/sev-common.h
index 5a8246dd532f..e90d403f2068 100644
--- a/arch/x86/include/asm/sev-common.h
+++ b/arch/x86/include/asm/sev-common.h
@@ -98,6 +98,19 @@ enum psc_op {
/* GHCBData[63:32] */ \
(((u64)(val) & GENMASK_ULL(63, 32)) >> 32)
+/* GHCB Run at VMPL Request/Response */
+#define GHCB_MSR_VMPL_REQ 0x016
+#define GHCB_MSR_VMPL_REQ_LEVEL(v) \
+ /* GHCBData[39:32] */ \
+ (((u64)(v) & GENMASK_ULL(7, 0) << 32) | \
+ /* GHCBDdata[11:0] */ \
+ GHCB_MSR_VMPL_REQ)
+
+#define GHCB_MSR_VMPL_RESP 0x017
+#define GHCB_MSR_VMPL_RESP_VAL(v) \
+ /* GHCBData[63:32] */ \
+ (((u64)(v) & GENMASK_ULL(63, 32)) >> 32)
+
/* GHCB Hypervisor Feature Request/Response */
#define GHCB_MSR_HV_FT_REQ 0x080
#define GHCB_MSR_HV_FT_RESP 0x081
@@ -109,6 +122,7 @@ enum psc_op {
#define GHCB_HV_FT_SNP BIT_ULL(0)
#define GHCB_HV_FT_SNP_AP_CREATION BIT_ULL(1)
+#define GHCB_HV_FT_SNP_MULTI_VMPL BIT_ULL(5)
/*
* SNP Page State Change NAE event
@@ -163,6 +177,10 @@ struct snp_psc_desc {
#define GHCB_TERM_NOT_VMPL0 3 /* SNP guest is not running at VMPL-0 */
#define GHCB_TERM_CPUID 4 /* CPUID-validation failure */
#define GHCB_TERM_CPUID_HV 5 /* CPUID failure during hypervisor fallback */
+#define GHCB_TERM_SECRETS_PAGE 6 /* Secrets page failure */
+#define GHCB_TERM_NO_SVSM 7 /* SVSM is not advertised in the secrets page */
+#define GHCB_TERM_SVSM_VMPL0 8 /* SVSM is present but has set VMPL to 0 */
+#define GHCB_TERM_SVSM_CAA 9 /* SVSM is present but CAA is not page aligned */
#define GHCB_RESP_CODE(v) ((v) & GHCB_MSR_INFO_MASK)
diff --git a/arch/x86/include/asm/sev.h b/arch/x86/include/asm/sev.h
index ca20cc4e5826..ac5886ce252e 100644
--- a/arch/x86/include/asm/sev.h
+++ b/arch/x86/include/asm/sev.h
@@ -152,10 +152,119 @@ struct snp_secrets_page {
u8 vmpck2[VMPCK_KEY_LEN];
u8 vmpck3[VMPCK_KEY_LEN];
struct secrets_os_area os_area;
- u8 rsvd3[3840];
+
+ u8 vmsa_tweak_bitmap[64];
+
+ /* SVSM fields */
+ u64 svsm_base;
+ u64 svsm_size;
+ u64 svsm_caa;
+ u32 svsm_max_version;
+ u8 svsm_guest_vmpl;
+ u8 rsvd3[3];
+
+ /* Remainder of page */
+ u8 rsvd4[3744];
} __packed;
+/*
+ * The SVSM Calling Area (CA) related structures.
+ */
+struct svsm_ca {
+ u8 call_pending;
+ u8 mem_available;
+ u8 rsvd1[6];
+
+ u8 svsm_buffer[PAGE_SIZE - 8];
+};
+
+#define SVSM_SUCCESS 0
+#define SVSM_ERR_INCOMPLETE 0x80000000
+#define SVSM_ERR_UNSUPPORTED_PROTOCOL 0x80000001
+#define SVSM_ERR_UNSUPPORTED_CALL 0x80000002
+#define SVSM_ERR_INVALID_ADDRESS 0x80000003
+#define SVSM_ERR_INVALID_FORMAT 0x80000004
+#define SVSM_ERR_INVALID_PARAMETER 0x80000005
+#define SVSM_ERR_INVALID_REQUEST 0x80000006
+#define SVSM_ERR_BUSY 0x80000007
+#define SVSM_PVALIDATE_FAIL_SIZEMISMATCH 0x80001006
+
+/*
+ * The SVSM PVALIDATE related structures
+ */
+struct svsm_pvalidate_entry {
+ u64 page_size : 2,
+ action : 1,
+ ignore_cf : 1,
+ rsvd : 8,
+ pfn : 52;
+};
+
+struct svsm_pvalidate_call {
+ u16 num_entries;
+ u16 cur_index;
+
+ u8 rsvd1[4];
+
+ struct svsm_pvalidate_entry entry[];
+};
+
+#define SVSM_PVALIDATE_MAX_COUNT ((sizeof_field(struct svsm_ca, svsm_buffer) - \
+ offsetof(struct svsm_pvalidate_call, entry)) / \
+ sizeof(struct svsm_pvalidate_entry))
+
+/*
+ * The SVSM Attestation related structures
+ */
+struct svsm_loc_entry {
+ u64 pa;
+ u32 len;
+ u8 rsvd[4];
+};
+
+struct svsm_attest_call {
+ struct svsm_loc_entry report_buf;
+ struct svsm_loc_entry nonce;
+ struct svsm_loc_entry manifest_buf;
+ struct svsm_loc_entry certificates_buf;
+
+ /* For attesting a single service */
+ u8 service_guid[16];
+ u32 service_manifest_ver;
+ u8 rsvd[4];
+};
+
+/*
+ * SVSM protocol structure
+ */
+struct svsm_call {
+ struct svsm_ca *caa;
+ u64 rax;
+ u64 rcx;
+ u64 rdx;
+ u64 r8;
+ u64 r9;
+ u64 rax_out;
+ u64 rcx_out;
+ u64 rdx_out;
+ u64 r8_out;
+ u64 r9_out;
+};
+
+#define SVSM_CORE_CALL(x) ((0ULL << 32) | (x))
+#define SVSM_CORE_REMAP_CA 0
+#define SVSM_CORE_PVALIDATE 1
+#define SVSM_CORE_CREATE_VCPU 2
+#define SVSM_CORE_DELETE_VCPU 3
+
+#define SVSM_ATTEST_CALL(x) ((1ULL << 32) | (x))
+#define SVSM_ATTEST_SERVICES 0
+#define SVSM_ATTEST_SINGLE_SERVICE 1
+
#ifdef CONFIG_AMD_MEM_ENCRYPT
+
+extern u8 snp_vmpl;
+
extern void __sev_es_ist_enter(struct pt_regs *regs);
extern void __sev_es_ist_exit(void);
static __always_inline void sev_es_ist_enter(struct pt_regs *regs)
@@ -181,6 +290,14 @@ static __always_inline void sev_es_nmi_complete(void)
extern int __init sev_es_efi_map_ghcbs(pgd_t *pgd);
extern void sev_enable(struct boot_params *bp);
+/*
+ * RMPADJUST modifies the RMP permissions of a page of a lesser-
+ * privileged (numerically higher) VMPL.
+ *
+ * If the guest is running at a higher-privilege than the privilege
+ * level the instruction is targeting, the instruction will succeed,
+ * otherwise, it will fail.
+ */
static inline int rmpadjust(unsigned long vaddr, bool rmp_psize, unsigned long attrs)
{
int rc;
@@ -225,11 +342,16 @@ bool snp_init(struct boot_params *bp);
void __noreturn snp_abort(void);
void snp_dmi_setup(void);
int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, struct snp_guest_request_ioctl *rio);
+int snp_issue_svsm_attest_req(u64 call_id, struct svsm_call *call, struct svsm_attest_call *input);
void snp_accept_memory(phys_addr_t start, phys_addr_t end);
u64 snp_get_unsupported_features(u64 status);
u64 sev_get_status(void);
void sev_show_status(void);
-#else
+void snp_update_svsm_ca(void);
+
+#else /* !CONFIG_AMD_MEM_ENCRYPT */
+
+#define snp_vmpl 0
static inline void sev_es_ist_enter(struct pt_regs *regs) { }
static inline void sev_es_ist_exit(void) { }
static inline int sev_es_setup_ap_jump_table(struct real_mode_header *rmh) { return 0; }
@@ -253,12 +375,17 @@ static inline int snp_issue_guest_request(u64 exit_code, struct snp_req_data *in
{
return -ENOTTY;
}
-
+static inline int snp_issue_svsm_attest_req(u64 call_id, struct svsm_call *call, struct svsm_attest_call *input)
+{
+ return -ENOTTY;
+}
static inline void snp_accept_memory(phys_addr_t start, phys_addr_t end) { }
static inline u64 snp_get_unsupported_features(u64 status) { return 0; }
static inline u64 sev_get_status(void) { return 0; }
static inline void sev_show_status(void) { }
-#endif
+static inline void snp_update_svsm_ca(void) { }
+
+#endif /* CONFIG_AMD_MEM_ENCRYPT */
#ifdef CONFIG_KVM_AMD_SEV
bool snp_probe_rmptable_info(void);
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index a35936b512fe..ca073f40698f 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -35,6 +35,7 @@ struct smp_ops {
int (*cpu_disable)(void);
void (*cpu_die)(unsigned int cpu);
void (*play_dead)(void);
+ void (*stop_this_cpu)(void);
void (*send_call_func_ipi)(const struct cpumask *mask);
void (*send_call_func_single_ipi)(int cpu);
diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h
index 405efb3e4996..94408a784c8e 100644
--- a/arch/x86/include/asm/tsc.h
+++ b/arch/x86/include/asm/tsc.h
@@ -28,9 +28,6 @@ static inline cycles_t get_cycles(void)
}
#define get_cycles get_cycles
-extern struct system_counterval_t convert_art_to_tsc(u64 art);
-extern struct system_counterval_t convert_art_ns_to_tsc(u64 art_ns);
-
extern void tsc_early_init(void);
extern void tsc_init(void);
extern void mark_tsc_unstable(char *reason);
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 0f9bab92a43d..3a7755c1a441 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -78,10 +78,10 @@ extern int __get_user_bad(void);
int __ret_gu; \
register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \
__chk_user_ptr(ptr); \
- asm volatile("call __" #fn "_%c4" \
+ asm volatile("call __" #fn "_%c[size]" \
: "=a" (__ret_gu), "=r" (__val_gu), \
ASM_CALL_CONSTRAINT \
- : "0" (ptr), "i" (sizeof(*(ptr)))); \
+ : "0" (ptr), [size] "i" (sizeof(*(ptr)))); \
instrument_get_user(__val_gu); \
(x) = (__force __typeof__(*(ptr))) __val_gu; \
__builtin_expect(__ret_gu, 0); \
diff --git a/arch/x86/include/asm/unistd.h b/arch/x86/include/asm/unistd.h
index 761173ccc33c..6c9e5bdd3916 100644
--- a/arch/x86/include/asm/unistd.h
+++ b/arch/x86/include/asm/unistd.h
@@ -56,6 +56,5 @@
# define __ARCH_WANT_SYS_FORK
# define __ARCH_WANT_SYS_VFORK
# define __ARCH_WANT_SYS_CLONE
-# define __ARCH_WANT_SYS_CLONE3
#endif /* _ASM_X86_UNISTD_H */
diff --git a/arch/x86/include/asm/vdso/gettimeofday.h b/arch/x86/include/asm/vdso/gettimeofday.h
index 0ef36190abe6..b2d2df026f6e 100644
--- a/arch/x86/include/asm/vdso/gettimeofday.h
+++ b/arch/x86/include/asm/vdso/gettimeofday.h
@@ -328,9 +328,8 @@ static __always_inline u64 vdso_calc_ns(const struct vdso_data *vd, u64 cycles,
* due to unsigned comparison.
*
* Due to the MSB/Sign-bit being used as invalid marker (see
- * arch_vdso_cycles_valid() above), the effective mask is S64_MAX,
- * but that case is also unlikely and will also take the unlikely path
- * here.
+ * arch_vdso_cycles_ok() above), the effective mask is S64_MAX, but that
+ * case is also unlikely and will also take the unlikely path here.
*/
if (unlikely(delta > vd->max_cycles)) {
/*
diff --git a/arch/x86/include/asm/vdso/vsyscall.h b/arch/x86/include/asm/vdso/vsyscall.h
index be199a9b2676..93226281b450 100644
--- a/arch/x86/include/asm/vdso/vsyscall.h
+++ b/arch/x86/include/asm/vdso/vsyscall.h
@@ -4,7 +4,6 @@
#ifndef __ASSEMBLY__
-#include <linux/hrtimer.h>
#include <linux/timekeeper_internal.h>
#include <vdso/datapage.h>
#include <asm/vgtod.h>
diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h
index 7aa38b2ad8a9..a0ce291abcae 100644
--- a/arch/x86/include/asm/vgtod.h
+++ b/arch/x86/include/asm/vgtod.h
@@ -14,11 +14,6 @@
#include <uapi/linux/time.h>
-#ifdef BUILD_VDSO32_64
-typedef u64 gtod_long_t;
-#else
-typedef unsigned long gtod_long_t;
-#endif
#endif /* CONFIG_GENERIC_GETTIMEOFDAY */
#endif /* _ASM_X86_VGTOD_H */
diff --git a/arch/x86/include/asm/vmware.h b/arch/x86/include/asm/vmware.h
index ac9fc51e2b18..c9cf43d5ef23 100644
--- a/arch/x86/include/asm/vmware.h
+++ b/arch/x86/include/asm/vmware.h
@@ -7,51 +7,321 @@
#include <linux/stringify.h>
/*
- * The hypercall definitions differ in the low word of the %edx argument
- * in the following way: the old port base interface uses the port
- * number to distinguish between high- and low bandwidth versions.
+ * VMware hypercall ABI.
+ *
+ * - Low bandwidth (LB) hypercalls (I/O port based, vmcall and vmmcall)
+ * have up to 6 input and 6 output arguments passed and returned using
+ * registers: %eax (arg0), %ebx (arg1), %ecx (arg2), %edx (arg3),
+ * %esi (arg4), %edi (arg5).
+ * The following input arguments must be initialized by the caller:
+ * arg0 - VMWARE_HYPERVISOR_MAGIC
+ * arg2 - Hypercall command
+ * arg3 bits [15:0] - Port number, LB and direction flags
+ *
+ * - Low bandwidth TDX hypercalls (x86_64 only) are similar to LB
+ * hypercalls. They also have up to 6 input and 6 output on registers
+ * arguments, with different argument to register mapping:
+ * %r12 (arg0), %rbx (arg1), %r13 (arg2), %rdx (arg3),
+ * %rsi (arg4), %rdi (arg5).
+ *
+ * - High bandwidth (HB) hypercalls are I/O port based only. They have
+ * up to 7 input and 7 output arguments passed and returned using
+ * registers: %eax (arg0), %ebx (arg1), %ecx (arg2), %edx (arg3),
+ * %esi (arg4), %edi (arg5), %ebp (arg6).
+ * The following input arguments must be initialized by the caller:
+ * arg0 - VMWARE_HYPERVISOR_MAGIC
+ * arg1 - Hypercall command
+ * arg3 bits [15:0] - Port number, HB and direction flags
+ *
+ * For compatibility purposes, x86_64 systems use only lower 32 bits
+ * for input and output arguments.
+ *
+ * The hypercall definitions differ in the low word of the %edx (arg3)
+ * in the following way: the old I/O port based interface uses the port
+ * number to distinguish between high- and low bandwidth versions, and
+ * uses IN/OUT instructions to define transfer direction.
*
* The new vmcall interface instead uses a set of flags to select
* bandwidth mode and transfer direction. The flags should be loaded
- * into %dx by any user and are automatically replaced by the port
- * number if the VMWARE_HYPERVISOR_PORT method is used.
- *
- * In short, new driver code should strictly use the new definition of
- * %dx content.
+ * into arg3 by any user and are automatically replaced by the port
+ * number if the I/O port method is used.
*/
-/* Old port-based version */
-#define VMWARE_HYPERVISOR_PORT 0x5658
-#define VMWARE_HYPERVISOR_PORT_HB 0x5659
+#define VMWARE_HYPERVISOR_HB BIT(0)
+#define VMWARE_HYPERVISOR_OUT BIT(1)
-/* Current vmcall / vmmcall version */
-#define VMWARE_HYPERVISOR_HB BIT(0)
-#define VMWARE_HYPERVISOR_OUT BIT(1)
+#define VMWARE_HYPERVISOR_PORT 0x5658
+#define VMWARE_HYPERVISOR_PORT_HB (VMWARE_HYPERVISOR_PORT | \
+ VMWARE_HYPERVISOR_HB)
-/* The low bandwidth call. The low word of edx is presumed clear. */
-#define VMWARE_HYPERCALL \
- ALTERNATIVE_2("movw $" __stringify(VMWARE_HYPERVISOR_PORT) ", %%dx; " \
- "inl (%%dx), %%eax", \
- "vmcall", X86_FEATURE_VMCALL, \
- "vmmcall", X86_FEATURE_VMW_VMMCALL)
+#define VMWARE_HYPERVISOR_MAGIC 0x564d5868U
+#define VMWARE_CMD_GETVERSION 10
+#define VMWARE_CMD_GETHZ 45
+#define VMWARE_CMD_GETVCPU_INFO 68
+#define VMWARE_CMD_STEALCLOCK 91
/*
- * The high bandwidth out call. The low word of edx is presumed to have the
- * HB and OUT bits set.
+ * Hypercall command mask:
+ * bits [6:0] command, range [0, 127]
+ * bits [19:16] sub-command, range [0, 15]
*/
-#define VMWARE_HYPERCALL_HB_OUT \
- ALTERNATIVE_2("movw $" __stringify(VMWARE_HYPERVISOR_PORT_HB) ", %%dx; " \
- "rep outsb", \
- "vmcall", X86_FEATURE_VMCALL, \
- "vmmcall", X86_FEATURE_VMW_VMMCALL)
+#define VMWARE_CMD_MASK 0xf007fU
+
+#define CPUID_VMWARE_FEATURES_ECX_VMMCALL BIT(0)
+#define CPUID_VMWARE_FEATURES_ECX_VMCALL BIT(1)
+
+extern unsigned long vmware_hypercall_slow(unsigned long cmd,
+ unsigned long in1, unsigned long in3,
+ unsigned long in4, unsigned long in5,
+ u32 *out1, u32 *out2, u32 *out3,
+ u32 *out4, u32 *out5);
+
+#define VMWARE_TDX_VENDOR_LEAF 0x1af7e4909ULL
+#define VMWARE_TDX_HCALL_FUNC 1
+
+extern unsigned long vmware_tdx_hypercall(unsigned long cmd,
+ unsigned long in1, unsigned long in3,
+ unsigned long in4, unsigned long in5,
+ u32 *out1, u32 *out2, u32 *out3,
+ u32 *out4, u32 *out5);
/*
- * The high bandwidth in call. The low word of edx is presumed to have the
- * HB bit set.
+ * The low bandwidth call. The low word of %edx is presumed to have OUT bit
+ * set. The high word of %edx may contain input data from the caller.
*/
-#define VMWARE_HYPERCALL_HB_IN \
- ALTERNATIVE_2("movw $" __stringify(VMWARE_HYPERVISOR_PORT_HB) ", %%dx; " \
- "rep insb", \
- "vmcall", X86_FEATURE_VMCALL, \
+#define VMWARE_HYPERCALL \
+ ALTERNATIVE_2("movw %[port], %%dx\n\t" \
+ "inl (%%dx), %%eax", \
+ "vmcall", X86_FEATURE_VMCALL, \
"vmmcall", X86_FEATURE_VMW_VMMCALL)
+
+static inline
+unsigned long vmware_hypercall1(unsigned long cmd, unsigned long in1)
+{
+ unsigned long out0;
+
+ if (cpu_feature_enabled(X86_FEATURE_TDX_GUEST))
+ return vmware_tdx_hypercall(cmd, in1, 0, 0, 0,
+ NULL, NULL, NULL, NULL, NULL);
+
+ if (unlikely(!alternatives_patched) && !__is_defined(MODULE))
+ return vmware_hypercall_slow(cmd, in1, 0, 0, 0,
+ NULL, NULL, NULL, NULL, NULL);
+
+ asm_inline volatile (VMWARE_HYPERCALL
+ : "=a" (out0)
+ : [port] "i" (VMWARE_HYPERVISOR_PORT),
+ "a" (VMWARE_HYPERVISOR_MAGIC),
+ "b" (in1),
+ "c" (cmd),
+ "d" (0)
+ : "cc", "memory");
+ return out0;
+}
+
+static inline
+unsigned long vmware_hypercall3(unsigned long cmd, unsigned long in1,
+ u32 *out1, u32 *out2)
+{
+ unsigned long out0;
+
+ if (cpu_feature_enabled(X86_FEATURE_TDX_GUEST))
+ return vmware_tdx_hypercall(cmd, in1, 0, 0, 0,
+ out1, out2, NULL, NULL, NULL);
+
+ if (unlikely(!alternatives_patched) && !__is_defined(MODULE))
+ return vmware_hypercall_slow(cmd, in1, 0, 0, 0,
+ out1, out2, NULL, NULL, NULL);
+
+ asm_inline volatile (VMWARE_HYPERCALL
+ : "=a" (out0), "=b" (*out1), "=c" (*out2)
+ : [port] "i" (VMWARE_HYPERVISOR_PORT),
+ "a" (VMWARE_HYPERVISOR_MAGIC),
+ "b" (in1),
+ "c" (cmd),
+ "d" (0)
+ : "cc", "memory");
+ return out0;
+}
+
+static inline
+unsigned long vmware_hypercall4(unsigned long cmd, unsigned long in1,
+ u32 *out1, u32 *out2, u32 *out3)
+{
+ unsigned long out0;
+
+ if (cpu_feature_enabled(X86_FEATURE_TDX_GUEST))
+ return vmware_tdx_hypercall(cmd, in1, 0, 0, 0,
+ out1, out2, out3, NULL, NULL);
+
+ if (unlikely(!alternatives_patched) && !__is_defined(MODULE))
+ return vmware_hypercall_slow(cmd, in1, 0, 0, 0,
+ out1, out2, out3, NULL, NULL);
+
+ asm_inline volatile (VMWARE_HYPERCALL
+ : "=a" (out0), "=b" (*out1), "=c" (*out2), "=d" (*out3)
+ : [port] "i" (VMWARE_HYPERVISOR_PORT),
+ "a" (VMWARE_HYPERVISOR_MAGIC),
+ "b" (in1),
+ "c" (cmd),
+ "d" (0)
+ : "cc", "memory");
+ return out0;
+}
+
+static inline
+unsigned long vmware_hypercall5(unsigned long cmd, unsigned long in1,
+ unsigned long in3, unsigned long in4,
+ unsigned long in5, u32 *out2)
+{
+ unsigned long out0;
+
+ if (cpu_feature_enabled(X86_FEATURE_TDX_GUEST))
+ return vmware_tdx_hypercall(cmd, in1, in3, in4, in5,
+ NULL, out2, NULL, NULL, NULL);
+
+ if (unlikely(!alternatives_patched) && !__is_defined(MODULE))
+ return vmware_hypercall_slow(cmd, in1, in3, in4, in5,
+ NULL, out2, NULL, NULL, NULL);
+
+ asm_inline volatile (VMWARE_HYPERCALL
+ : "=a" (out0), "=c" (*out2)
+ : [port] "i" (VMWARE_HYPERVISOR_PORT),
+ "a" (VMWARE_HYPERVISOR_MAGIC),
+ "b" (in1),
+ "c" (cmd),
+ "d" (in3),
+ "S" (in4),
+ "D" (in5)
+ : "cc", "memory");
+ return out0;
+}
+
+static inline
+unsigned long vmware_hypercall6(unsigned long cmd, unsigned long in1,
+ unsigned long in3, u32 *out2,
+ u32 *out3, u32 *out4, u32 *out5)
+{
+ unsigned long out0;
+
+ if (cpu_feature_enabled(X86_FEATURE_TDX_GUEST))
+ return vmware_tdx_hypercall(cmd, in1, in3, 0, 0,
+ NULL, out2, out3, out4, out5);
+
+ if (unlikely(!alternatives_patched) && !__is_defined(MODULE))
+ return vmware_hypercall_slow(cmd, in1, in3, 0, 0,
+ NULL, out2, out3, out4, out5);
+
+ asm_inline volatile (VMWARE_HYPERCALL
+ : "=a" (out0), "=c" (*out2), "=d" (*out3), "=S" (*out4),
+ "=D" (*out5)
+ : [port] "i" (VMWARE_HYPERVISOR_PORT),
+ "a" (VMWARE_HYPERVISOR_MAGIC),
+ "b" (in1),
+ "c" (cmd),
+ "d" (in3)
+ : "cc", "memory");
+ return out0;
+}
+
+static inline
+unsigned long vmware_hypercall7(unsigned long cmd, unsigned long in1,
+ unsigned long in3, unsigned long in4,
+ unsigned long in5, u32 *out1,
+ u32 *out2, u32 *out3)
+{
+ unsigned long out0;
+
+ if (cpu_feature_enabled(X86_FEATURE_TDX_GUEST))
+ return vmware_tdx_hypercall(cmd, in1, in3, in4, in5,
+ out1, out2, out3, NULL, NULL);
+
+ if (unlikely(!alternatives_patched) && !__is_defined(MODULE))
+ return vmware_hypercall_slow(cmd, in1, in3, in4, in5,
+ out1, out2, out3, NULL, NULL);
+
+ asm_inline volatile (VMWARE_HYPERCALL
+ : "=a" (out0), "=b" (*out1), "=c" (*out2), "=d" (*out3)
+ : [port] "i" (VMWARE_HYPERVISOR_PORT),
+ "a" (VMWARE_HYPERVISOR_MAGIC),
+ "b" (in1),
+ "c" (cmd),
+ "d" (in3),
+ "S" (in4),
+ "D" (in5)
+ : "cc", "memory");
+ return out0;
+}
+
+#ifdef CONFIG_X86_64
+#define VMW_BP_CONSTRAINT "r"
+#else
+#define VMW_BP_CONSTRAINT "m"
+#endif
+
+/*
+ * High bandwidth calls are not supported on encrypted memory guests.
+ * The caller should check cc_platform_has(CC_ATTR_MEM_ENCRYPT) and use
+ * low bandwidth hypercall if memory encryption is set.
+ * This assumption simplifies HB hypercall implementation to just I/O port
+ * based approach without alternative patching.
+ */
+static inline
+unsigned long vmware_hypercall_hb_out(unsigned long cmd, unsigned long in2,
+ unsigned long in3, unsigned long in4,
+ unsigned long in5, unsigned long in6,
+ u32 *out1)
+{
+ unsigned long out0;
+
+ asm_inline volatile (
+ UNWIND_HINT_SAVE
+ "push %%" _ASM_BP "\n\t"
+ UNWIND_HINT_UNDEFINED
+ "mov %[in6], %%" _ASM_BP "\n\t"
+ "rep outsb\n\t"
+ "pop %%" _ASM_BP "\n\t"
+ UNWIND_HINT_RESTORE
+ : "=a" (out0), "=b" (*out1)
+ : "a" (VMWARE_HYPERVISOR_MAGIC),
+ "b" (cmd),
+ "c" (in2),
+ "d" (in3 | VMWARE_HYPERVISOR_PORT_HB),
+ "S" (in4),
+ "D" (in5),
+ [in6] VMW_BP_CONSTRAINT (in6)
+ : "cc", "memory");
+ return out0;
+}
+
+static inline
+unsigned long vmware_hypercall_hb_in(unsigned long cmd, unsigned long in2,
+ unsigned long in3, unsigned long in4,
+ unsigned long in5, unsigned long in6,
+ u32 *out1)
+{
+ unsigned long out0;
+
+ asm_inline volatile (
+ UNWIND_HINT_SAVE
+ "push %%" _ASM_BP "\n\t"
+ UNWIND_HINT_UNDEFINED
+ "mov %[in6], %%" _ASM_BP "\n\t"
+ "rep insb\n\t"
+ "pop %%" _ASM_BP "\n\t"
+ UNWIND_HINT_RESTORE
+ : "=a" (out0), "=b" (*out1)
+ : "a" (VMWARE_HYPERVISOR_MAGIC),
+ "b" (cmd),
+ "c" (in2),
+ "d" (in3 | VMWARE_HYPERVISOR_PORT_HB),
+ "S" (in4),
+ "D" (in5),
+ [in6] VMW_BP_CONSTRAINT (in6)
+ : "cc", "memory");
+ return out0;
+}
+#undef VMW_BP_CONSTRAINT
+#undef VMWARE_HYPERCALL
+
#endif
diff --git a/arch/x86/include/asm/vmxfeatures.h b/arch/x86/include/asm/vmxfeatures.h
index 266daf5b5b84..09b1d7e607c1 100644
--- a/arch/x86/include/asm/vmxfeatures.h
+++ b/arch/x86/include/asm/vmxfeatures.h
@@ -9,85 +9,85 @@
/*
* Note: If the comment begins with a quoted string, that string is used
- * in /proc/cpuinfo instead of the macro name. If the string is "",
- * this feature bit is not displayed in /proc/cpuinfo at all.
+ * in /proc/cpuinfo instead of the macro name. Otherwise, this feature bit
+ * is not displayed in /proc/cpuinfo at all.
*/
/* Pin-Based VM-Execution Controls, EPT/VPID, APIC and VM-Functions, word 0 */
-#define VMX_FEATURE_INTR_EXITING ( 0*32+ 0) /* "" VM-Exit on vectored interrupts */
-#define VMX_FEATURE_NMI_EXITING ( 0*32+ 3) /* "" VM-Exit on NMIs */
+#define VMX_FEATURE_INTR_EXITING ( 0*32+ 0) /* VM-Exit on vectored interrupts */
+#define VMX_FEATURE_NMI_EXITING ( 0*32+ 3) /* VM-Exit on NMIs */
#define VMX_FEATURE_VIRTUAL_NMIS ( 0*32+ 5) /* "vnmi" NMI virtualization */
-#define VMX_FEATURE_PREEMPTION_TIMER ( 0*32+ 6) /* VMX Preemption Timer */
-#define VMX_FEATURE_POSTED_INTR ( 0*32+ 7) /* Posted Interrupts */
+#define VMX_FEATURE_PREEMPTION_TIMER ( 0*32+ 6) /* "preemption_timer" VMX Preemption Timer */
+#define VMX_FEATURE_POSTED_INTR ( 0*32+ 7) /* "posted_intr" Posted Interrupts */
/* EPT/VPID features, scattered to bits 16-23 */
-#define VMX_FEATURE_INVVPID ( 0*32+ 16) /* INVVPID is supported */
+#define VMX_FEATURE_INVVPID ( 0*32+ 16) /* "invvpid" INVVPID is supported */
#define VMX_FEATURE_EPT_EXECUTE_ONLY ( 0*32+ 17) /* "ept_x_only" EPT entries can be execute only */
-#define VMX_FEATURE_EPT_AD ( 0*32+ 18) /* EPT Accessed/Dirty bits */
-#define VMX_FEATURE_EPT_1GB ( 0*32+ 19) /* 1GB EPT pages */
-#define VMX_FEATURE_EPT_5LEVEL ( 0*32+ 20) /* 5-level EPT paging */
+#define VMX_FEATURE_EPT_AD ( 0*32+ 18) /* "ept_ad" EPT Accessed/Dirty bits */
+#define VMX_FEATURE_EPT_1GB ( 0*32+ 19) /* "ept_1gb" 1GB EPT pages */
+#define VMX_FEATURE_EPT_5LEVEL ( 0*32+ 20) /* "ept_5level" 5-level EPT paging */
/* Aggregated APIC features 24-27 */
-#define VMX_FEATURE_FLEXPRIORITY ( 0*32+ 24) /* TPR shadow + virt APIC */
-#define VMX_FEATURE_APICV ( 0*32+ 25) /* TPR shadow + APIC reg virt + virt intr delivery + posted interrupts */
+#define VMX_FEATURE_FLEXPRIORITY ( 0*32+ 24) /* "flexpriority" TPR shadow + virt APIC */
+#define VMX_FEATURE_APICV ( 0*32+ 25) /* "apicv" TPR shadow + APIC reg virt + virt intr delivery + posted interrupts */
/* VM-Functions, shifted to bits 28-31 */
-#define VMX_FEATURE_EPTP_SWITCHING ( 0*32+ 28) /* EPTP switching (in guest) */
+#define VMX_FEATURE_EPTP_SWITCHING ( 0*32+ 28) /* "eptp_switching" EPTP switching (in guest) */
/* Primary Processor-Based VM-Execution Controls, word 1 */
-#define VMX_FEATURE_INTR_WINDOW_EXITING ( 1*32+ 2) /* "" VM-Exit if INTRs are unblocked in guest */
+#define VMX_FEATURE_INTR_WINDOW_EXITING ( 1*32+ 2) /* VM-Exit if INTRs are unblocked in guest */
#define VMX_FEATURE_USE_TSC_OFFSETTING ( 1*32+ 3) /* "tsc_offset" Offset hardware TSC when read in guest */
-#define VMX_FEATURE_HLT_EXITING ( 1*32+ 7) /* "" VM-Exit on HLT */
-#define VMX_FEATURE_INVLPG_EXITING ( 1*32+ 9) /* "" VM-Exit on INVLPG */
-#define VMX_FEATURE_MWAIT_EXITING ( 1*32+ 10) /* "" VM-Exit on MWAIT */
-#define VMX_FEATURE_RDPMC_EXITING ( 1*32+ 11) /* "" VM-Exit on RDPMC */
-#define VMX_FEATURE_RDTSC_EXITING ( 1*32+ 12) /* "" VM-Exit on RDTSC */
-#define VMX_FEATURE_CR3_LOAD_EXITING ( 1*32+ 15) /* "" VM-Exit on writes to CR3 */
-#define VMX_FEATURE_CR3_STORE_EXITING ( 1*32+ 16) /* "" VM-Exit on reads from CR3 */
-#define VMX_FEATURE_TERTIARY_CONTROLS ( 1*32+ 17) /* "" Enable Tertiary VM-Execution Controls */
-#define VMX_FEATURE_CR8_LOAD_EXITING ( 1*32+ 19) /* "" VM-Exit on writes to CR8 */
-#define VMX_FEATURE_CR8_STORE_EXITING ( 1*32+ 20) /* "" VM-Exit on reads from CR8 */
+#define VMX_FEATURE_HLT_EXITING ( 1*32+ 7) /* VM-Exit on HLT */
+#define VMX_FEATURE_INVLPG_EXITING ( 1*32+ 9) /* VM-Exit on INVLPG */
+#define VMX_FEATURE_MWAIT_EXITING ( 1*32+ 10) /* VM-Exit on MWAIT */
+#define VMX_FEATURE_RDPMC_EXITING ( 1*32+ 11) /* VM-Exit on RDPMC */
+#define VMX_FEATURE_RDTSC_EXITING ( 1*32+ 12) /* VM-Exit on RDTSC */
+#define VMX_FEATURE_CR3_LOAD_EXITING ( 1*32+ 15) /* VM-Exit on writes to CR3 */
+#define VMX_FEATURE_CR3_STORE_EXITING ( 1*32+ 16) /* VM-Exit on reads from CR3 */
+#define VMX_FEATURE_TERTIARY_CONTROLS ( 1*32+ 17) /* Enable Tertiary VM-Execution Controls */
+#define VMX_FEATURE_CR8_LOAD_EXITING ( 1*32+ 19) /* VM-Exit on writes to CR8 */
+#define VMX_FEATURE_CR8_STORE_EXITING ( 1*32+ 20) /* VM-Exit on reads from CR8 */
#define VMX_FEATURE_VIRTUAL_TPR ( 1*32+ 21) /* "vtpr" TPR virtualization, a.k.a. TPR shadow */
-#define VMX_FEATURE_NMI_WINDOW_EXITING ( 1*32+ 22) /* "" VM-Exit if NMIs are unblocked in guest */
-#define VMX_FEATURE_MOV_DR_EXITING ( 1*32+ 23) /* "" VM-Exit on accesses to debug registers */
-#define VMX_FEATURE_UNCOND_IO_EXITING ( 1*32+ 24) /* "" VM-Exit on *all* IN{S} and OUT{S}*/
-#define VMX_FEATURE_USE_IO_BITMAPS ( 1*32+ 25) /* "" VM-Exit based on I/O port */
+#define VMX_FEATURE_NMI_WINDOW_EXITING ( 1*32+ 22) /* VM-Exit if NMIs are unblocked in guest */
+#define VMX_FEATURE_MOV_DR_EXITING ( 1*32+ 23) /* VM-Exit on accesses to debug registers */
+#define VMX_FEATURE_UNCOND_IO_EXITING ( 1*32+ 24) /* VM-Exit on *all* IN{S} and OUT{S}*/
+#define VMX_FEATURE_USE_IO_BITMAPS ( 1*32+ 25) /* VM-Exit based on I/O port */
#define VMX_FEATURE_MONITOR_TRAP_FLAG ( 1*32+ 27) /* "mtf" VMX single-step VM-Exits */
-#define VMX_FEATURE_USE_MSR_BITMAPS ( 1*32+ 28) /* "" VM-Exit based on MSR index */
-#define VMX_FEATURE_MONITOR_EXITING ( 1*32+ 29) /* "" VM-Exit on MONITOR (MWAIT's accomplice) */
-#define VMX_FEATURE_PAUSE_EXITING ( 1*32+ 30) /* "" VM-Exit on PAUSE (unconditionally) */
-#define VMX_FEATURE_SEC_CONTROLS ( 1*32+ 31) /* "" Enable Secondary VM-Execution Controls */
+#define VMX_FEATURE_USE_MSR_BITMAPS ( 1*32+ 28) /* VM-Exit based on MSR index */
+#define VMX_FEATURE_MONITOR_EXITING ( 1*32+ 29) /* VM-Exit on MONITOR (MWAIT's accomplice) */
+#define VMX_FEATURE_PAUSE_EXITING ( 1*32+ 30) /* VM-Exit on PAUSE (unconditionally) */
+#define VMX_FEATURE_SEC_CONTROLS ( 1*32+ 31) /* Enable Secondary VM-Execution Controls */
/* Secondary Processor-Based VM-Execution Controls, word 2 */
#define VMX_FEATURE_VIRT_APIC_ACCESSES ( 2*32+ 0) /* "vapic" Virtualize memory mapped APIC accesses */
-#define VMX_FEATURE_EPT ( 2*32+ 1) /* Extended Page Tables, a.k.a. Two-Dimensional Paging */
-#define VMX_FEATURE_DESC_EXITING ( 2*32+ 2) /* "" VM-Exit on {S,L}*DT instructions */
-#define VMX_FEATURE_RDTSCP ( 2*32+ 3) /* "" Enable RDTSCP in guest */
-#define VMX_FEATURE_VIRTUAL_X2APIC ( 2*32+ 4) /* "" Virtualize X2APIC for the guest */
-#define VMX_FEATURE_VPID ( 2*32+ 5) /* Virtual Processor ID (TLB ASID modifier) */
-#define VMX_FEATURE_WBINVD_EXITING ( 2*32+ 6) /* "" VM-Exit on WBINVD */
-#define VMX_FEATURE_UNRESTRICTED_GUEST ( 2*32+ 7) /* Allow Big Real Mode and other "invalid" states */
+#define VMX_FEATURE_EPT ( 2*32+ 1) /* "ept" Extended Page Tables, a.k.a. Two-Dimensional Paging */
+#define VMX_FEATURE_DESC_EXITING ( 2*32+ 2) /* VM-Exit on {S,L}*DT instructions */
+#define VMX_FEATURE_RDTSCP ( 2*32+ 3) /* Enable RDTSCP in guest */
+#define VMX_FEATURE_VIRTUAL_X2APIC ( 2*32+ 4) /* Virtualize X2APIC for the guest */
+#define VMX_FEATURE_VPID ( 2*32+ 5) /* "vpid" Virtual Processor ID (TLB ASID modifier) */
+#define VMX_FEATURE_WBINVD_EXITING ( 2*32+ 6) /* VM-Exit on WBINVD */
+#define VMX_FEATURE_UNRESTRICTED_GUEST ( 2*32+ 7) /* "unrestricted_guest" Allow Big Real Mode and other "invalid" states */
#define VMX_FEATURE_APIC_REGISTER_VIRT ( 2*32+ 8) /* "vapic_reg" Hardware emulation of reads to the virtual-APIC */
#define VMX_FEATURE_VIRT_INTR_DELIVERY ( 2*32+ 9) /* "vid" Evaluation and delivery of pending virtual interrupts */
#define VMX_FEATURE_PAUSE_LOOP_EXITING ( 2*32+ 10) /* "ple" Conditionally VM-Exit on PAUSE at CPL0 */
-#define VMX_FEATURE_RDRAND_EXITING ( 2*32+ 11) /* "" VM-Exit on RDRAND*/
-#define VMX_FEATURE_INVPCID ( 2*32+ 12) /* "" Enable INVPCID in guest */
-#define VMX_FEATURE_VMFUNC ( 2*32+ 13) /* "" Enable VM-Functions (leaf dependent) */
-#define VMX_FEATURE_SHADOW_VMCS ( 2*32+ 14) /* VMREAD/VMWRITE in guest can access shadow VMCS */
-#define VMX_FEATURE_ENCLS_EXITING ( 2*32+ 15) /* "" VM-Exit on ENCLS (leaf dependent) */
-#define VMX_FEATURE_RDSEED_EXITING ( 2*32+ 16) /* "" VM-Exit on RDSEED */
+#define VMX_FEATURE_RDRAND_EXITING ( 2*32+ 11) /* VM-Exit on RDRAND*/
+#define VMX_FEATURE_INVPCID ( 2*32+ 12) /* Enable INVPCID in guest */
+#define VMX_FEATURE_VMFUNC ( 2*32+ 13) /* Enable VM-Functions (leaf dependent) */
+#define VMX_FEATURE_SHADOW_VMCS ( 2*32+ 14) /* "shadow_vmcs" VMREAD/VMWRITE in guest can access shadow VMCS */
+#define VMX_FEATURE_ENCLS_EXITING ( 2*32+ 15) /* VM-Exit on ENCLS (leaf dependent) */
+#define VMX_FEATURE_RDSEED_EXITING ( 2*32+ 16) /* VM-Exit on RDSEED */
#define VMX_FEATURE_PAGE_MOD_LOGGING ( 2*32+ 17) /* "pml" Log dirty pages into buffer */
-#define VMX_FEATURE_EPT_VIOLATION_VE ( 2*32+ 18) /* "" Conditionally reflect EPT violations as #VE exceptions */
-#define VMX_FEATURE_PT_CONCEAL_VMX ( 2*32+ 19) /* "" Suppress VMX indicators in Processor Trace */
-#define VMX_FEATURE_XSAVES ( 2*32+ 20) /* "" Enable XSAVES and XRSTORS in guest */
+#define VMX_FEATURE_EPT_VIOLATION_VE ( 2*32+ 18) /* "ept_violation_ve" Conditionally reflect EPT violations as #VE exceptions */
+#define VMX_FEATURE_PT_CONCEAL_VMX ( 2*32+ 19) /* Suppress VMX indicators in Processor Trace */
+#define VMX_FEATURE_XSAVES ( 2*32+ 20) /* Enable XSAVES and XRSTORS in guest */
#define VMX_FEATURE_MODE_BASED_EPT_EXEC ( 2*32+ 22) /* "ept_mode_based_exec" Enable separate EPT EXEC bits for supervisor vs. user */
-#define VMX_FEATURE_PT_USE_GPA ( 2*32+ 24) /* "" Processor Trace logs GPAs */
-#define VMX_FEATURE_TSC_SCALING ( 2*32+ 25) /* Scale hardware TSC when read in guest */
-#define VMX_FEATURE_USR_WAIT_PAUSE ( 2*32+ 26) /* Enable TPAUSE, UMONITOR, UMWAIT in guest */
-#define VMX_FEATURE_ENCLV_EXITING ( 2*32+ 28) /* "" VM-Exit on ENCLV (leaf dependent) */
-#define VMX_FEATURE_BUS_LOCK_DETECTION ( 2*32+ 30) /* "" VM-Exit when bus lock caused */
-#define VMX_FEATURE_NOTIFY_VM_EXITING ( 2*32+ 31) /* VM-Exit when no event windows after notify window */
+#define VMX_FEATURE_PT_USE_GPA ( 2*32+ 24) /* Processor Trace logs GPAs */
+#define VMX_FEATURE_TSC_SCALING ( 2*32+ 25) /* "tsc_scaling" Scale hardware TSC when read in guest */
+#define VMX_FEATURE_USR_WAIT_PAUSE ( 2*32+ 26) /* "usr_wait_pause" Enable TPAUSE, UMONITOR, UMWAIT in guest */
+#define VMX_FEATURE_ENCLV_EXITING ( 2*32+ 28) /* VM-Exit on ENCLV (leaf dependent) */
+#define VMX_FEATURE_BUS_LOCK_DETECTION ( 2*32+ 30) /* VM-Exit when bus lock caused */
+#define VMX_FEATURE_NOTIFY_VM_EXITING ( 2*32+ 31) /* "notify_vm_exiting" VM-Exit when no event windows after notify window */
/* Tertiary Processor-Based VM-Execution Controls, word 3 */
-#define VMX_FEATURE_IPI_VIRT ( 3*32+ 4) /* Enable IPI virtualization */
+#define VMX_FEATURE_IPI_VIRT ( 3*32+ 4) /* "ipi_virt" Enable IPI virtualization */
#endif /* _ASM_X86_VMXFEATURES_H */
diff --git a/arch/x86/include/asm/word-at-a-time.h b/arch/x86/include/asm/word-at-a-time.h
index e8d7d4941c4c..422a47746657 100644
--- a/arch/x86/include/asm/word-at-a-time.h
+++ b/arch/x86/include/asm/word-at-a-time.h
@@ -5,45 +5,12 @@
#include <linux/bitops.h>
#include <linux/wordpart.h>
-/*
- * This is largely generic for little-endian machines, but the
- * optimal byte mask counting is probably going to be something
- * that is architecture-specific. If you have a reliably fast
- * bit count instruction, that might be better than the multiply
- * and shift, for example.
- */
struct word_at_a_time {
const unsigned long one_bits, high_bits;
};
#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
-#ifdef CONFIG_64BIT
-
-/*
- * Jan Achrenius on G+: microoptimized version of
- * the simpler "(mask & ONEBYTES) * ONEBYTES >> 56"
- * that works for the bytemasks without having to
- * mask them first.
- */
-static inline long count_masked_bytes(unsigned long mask)
-{
- return mask*0x0001020304050608ul >> 56;
-}
-
-#else /* 32-bit case */
-
-/* Carl Chatfield / Jan Achrenius G+ version for 32-bit */
-static inline long count_masked_bytes(long mask)
-{
- /* (000000 0000ff 00ffff ffffff) -> ( 1 1 2 3 ) */
- long a = (0x0ff0001+mask) >> 23;
- /* Fix the 1 for 00 case */
- return a & mask;
-}
-
-#endif
-
/* Return nonzero if it has a zero */
static inline unsigned long has_zero(unsigned long a, unsigned long *bits, const struct word_at_a_time *c)
{
@@ -57,6 +24,22 @@ static inline unsigned long prep_zero_mask(unsigned long a, unsigned long bits,
return bits;
}
+#ifdef CONFIG_64BIT
+
+/* Keep the initial has_zero() value for both bitmask and size calc */
+#define create_zero_mask(bits) (bits)
+
+static inline unsigned long zero_bytemask(unsigned long bits)
+{
+ bits = (bits - 1) & ~bits;
+ return bits >> 7;
+}
+
+#define find_zero(bits) (__ffs(bits) >> 3)
+
+#else
+
+/* Create the final mask for both bytemask and size */
static inline unsigned long create_zero_mask(unsigned long bits)
{
bits = (bits - 1) & ~bits;
@@ -66,11 +49,17 @@ static inline unsigned long create_zero_mask(unsigned long bits)
/* The mask we created is directly usable as a bytemask */
#define zero_bytemask(mask) (mask)
+/* Carl Chatfield / Jan Achrenius G+ version for 32-bit */
static inline unsigned long find_zero(unsigned long mask)
{
- return count_masked_bytes(mask);
+ /* (000000 0000ff 00ffff ffffff) -> ( 1 1 2 3 ) */
+ long a = (0x0ff0001+mask) >> 23;
+ /* Fix the 1 for 00 case */
+ return a & mask;
}
+#endif
+
/*
* Load an unaligned word from kernel space.
*
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
index 6149eabe200f..213cf5379a5a 100644
--- a/arch/x86/include/asm/x86_init.h
+++ b/arch/x86/include/asm/x86_init.h
@@ -149,12 +149,22 @@ struct x86_init_acpi {
* @enc_status_change_finish Notify HV after the encryption status of a range is changed
* @enc_tlb_flush_required Returns true if a TLB flush is needed before changing page encryption status
* @enc_cache_flush_required Returns true if a cache flush is needed before changing page encryption status
+ * @enc_kexec_begin Begin the two-step process of converting shared memory back
+ * to private. It stops the new conversions from being started
+ * and waits in-flight conversions to finish, if possible.
+ * @enc_kexec_finish Finish the two-step process of converting shared memory to
+ * private. All memory is private after the call when
+ * the function returns.
+ * It is called on only one CPU while the others are shut down
+ * and with interrupts disabled.
*/
struct x86_guest {
- bool (*enc_status_change_prepare)(unsigned long vaddr, int npages, bool enc);
- bool (*enc_status_change_finish)(unsigned long vaddr, int npages, bool enc);
+ int (*enc_status_change_prepare)(unsigned long vaddr, int npages, bool enc);
+ int (*enc_status_change_finish)(unsigned long vaddr, int npages, bool enc);
bool (*enc_tlb_flush_required)(bool enc);
bool (*enc_cache_flush_required)(void);
+ void (*enc_kexec_begin)(void);
+ void (*enc_kexec_finish)(void);
};
/**
diff --git a/arch/x86/include/uapi/asm/svm.h b/arch/x86/include/uapi/asm/svm.h
index 80e1df482337..1814b413fd57 100644
--- a/arch/x86/include/uapi/asm/svm.h
+++ b/arch/x86/include/uapi/asm/svm.h
@@ -115,6 +115,7 @@
#define SVM_VMGEXIT_AP_CREATE_ON_INIT 0
#define SVM_VMGEXIT_AP_CREATE 1
#define SVM_VMGEXIT_AP_DESTROY 2
+#define SVM_VMGEXIT_SNP_RUN_VMPL 0x80000018
#define SVM_VMGEXIT_HV_FEATURES 0x8000fffd
#define SVM_VMGEXIT_TERM_REQUEST 0x8000fffe
#define SVM_VMGEXIT_TERM_REASON(reason_set, reason_code) \
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 20a0dd51700a..a847180836e4 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -17,7 +17,6 @@ CFLAGS_REMOVE_ftrace.o = -pg
CFLAGS_REMOVE_early_printk.o = -pg
CFLAGS_REMOVE_head64.o = -pg
CFLAGS_REMOVE_head32.o = -pg
-CFLAGS_REMOVE_sev.o = -pg
CFLAGS_REMOVE_rethook.o = -pg
endif
@@ -26,19 +25,16 @@ KASAN_SANITIZE_dumpstack.o := n
KASAN_SANITIZE_dumpstack_$(BITS).o := n
KASAN_SANITIZE_stacktrace.o := n
KASAN_SANITIZE_paravirt.o := n
-KASAN_SANITIZE_sev.o := n
# With some compiler versions the generated code results in boot hangs, caused
# by several compilation units. To be safe, disable all instrumentation.
KCSAN_SANITIZE := n
KMSAN_SANITIZE_head$(BITS).o := n
KMSAN_SANITIZE_nmi.o := n
-KMSAN_SANITIZE_sev.o := n
# If instrumentation of the following files is enabled, boot hangs during
# first second.
KCOV_INSTRUMENT_head$(BITS).o := n
-KCOV_INSTRUMENT_sev.o := n
CFLAGS_irq.o := -I $(src)/../include/asm/trace
@@ -142,8 +138,6 @@ obj-$(CONFIG_UNWINDER_ORC) += unwind_orc.o
obj-$(CONFIG_UNWINDER_FRAME_POINTER) += unwind_frame.o
obj-$(CONFIG_UNWINDER_GUESS) += unwind_guess.o
-obj-$(CONFIG_AMD_MEM_ENCRYPT) += sev.o
-
obj-$(CONFIG_CFI_CLANG) += cfi.o
obj-$(CONFIG_CALL_THUNKS) += callthunks.o
diff --git a/arch/x86/kernel/acpi/Makefile b/arch/x86/kernel/acpi/Makefile
index fc17b3f136fe..842a5f449404 100644
--- a/arch/x86/kernel/acpi/Makefile
+++ b/arch/x86/kernel/acpi/Makefile
@@ -4,6 +4,7 @@ obj-$(CONFIG_ACPI) += boot.o
obj-$(CONFIG_ACPI_SLEEP) += sleep.o wakeup_$(BITS).o
obj-$(CONFIG_ACPI_APEI) += apei.o
obj-$(CONFIG_ACPI_CPPC_LIB) += cppc.o
+obj-$(CONFIG_ACPI_MADT_WAKEUP) += madt_wakeup.o madt_playdead.o
ifneq ($(CONFIG_ACPI_PROCESSOR),)
obj-y += cstate.o
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 4bf82dbd2a6b..9f4618dcd704 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -67,13 +67,6 @@ static bool has_lapic_cpus __initdata;
static bool acpi_support_online_capable;
#endif
-#ifdef CONFIG_X86_64
-/* Physical address of the Multiprocessor Wakeup Structure mailbox */
-static u64 acpi_mp_wake_mailbox_paddr;
-/* Virtual address of the Multiprocessor Wakeup Structure mailbox */
-static struct acpi_madt_multiproc_wakeup_mailbox *acpi_mp_wake_mailbox;
-#endif
-
#ifdef CONFIG_X86_IO_APIC
/*
* Locks related to IOAPIC hotplug
@@ -341,60 +334,6 @@ acpi_parse_lapic_nmi(union acpi_subtable_headers * header, const unsigned long e
return 0;
}
-
-#ifdef CONFIG_X86_64
-static int acpi_wakeup_cpu(u32 apicid, unsigned long start_ip)
-{
- /*
- * Remap mailbox memory only for the first call to acpi_wakeup_cpu().
- *
- * Wakeup of secondary CPUs is fully serialized in the core code.
- * No need to protect acpi_mp_wake_mailbox from concurrent accesses.
- */
- if (!acpi_mp_wake_mailbox) {
- acpi_mp_wake_mailbox = memremap(acpi_mp_wake_mailbox_paddr,
- sizeof(*acpi_mp_wake_mailbox),
- MEMREMAP_WB);
- }
-
- /*
- * Mailbox memory is shared between the firmware and OS. Firmware will
- * listen on mailbox command address, and once it receives the wakeup
- * command, the CPU associated with the given apicid will be booted.
- *
- * The value of 'apic_id' and 'wakeup_vector' must be visible to the
- * firmware before the wakeup command is visible. smp_store_release()
- * ensures ordering and visibility.
- */
- acpi_mp_wake_mailbox->apic_id = apicid;
- acpi_mp_wake_mailbox->wakeup_vector = start_ip;
- smp_store_release(&acpi_mp_wake_mailbox->command,
- ACPI_MP_WAKE_COMMAND_WAKEUP);
-
- /*
- * Wait for the CPU to wake up.
- *
- * The CPU being woken up is essentially in a spin loop waiting to be
- * woken up. It should not take long for it wake up and acknowledge by
- * zeroing out ->command.
- *
- * ACPI specification doesn't provide any guidance on how long kernel
- * has to wait for a wake up acknowledgement. It also doesn't provide
- * a way to cancel a wake up request if it takes too long.
- *
- * In TDX environment, the VMM has control over how long it takes to
- * wake up secondary. It can postpone scheduling secondary vCPU
- * indefinitely. Giving up on wake up request and reporting error opens
- * possible attack vector for VMM: it can wake up a secondary CPU when
- * kernel doesn't expect it. Wait until positive result of the wake up
- * request.
- */
- while (READ_ONCE(acpi_mp_wake_mailbox->command))
- cpu_relax();
-
- return 0;
-}
-#endif /* CONFIG_X86_64 */
#endif /* CONFIG_X86_LOCAL_APIC */
#ifdef CONFIG_X86_IO_APIC
@@ -1124,29 +1063,6 @@ static int __init acpi_parse_madt_lapic_entries(void)
}
return 0;
}
-
-#ifdef CONFIG_X86_64
-static int __init acpi_parse_mp_wake(union acpi_subtable_headers *header,
- const unsigned long end)
-{
- struct acpi_madt_multiproc_wakeup *mp_wake;
-
- if (!IS_ENABLED(CONFIG_SMP))
- return -ENODEV;
-
- mp_wake = (struct acpi_madt_multiproc_wakeup *)header;
- if (BAD_MADT_ENTRY(mp_wake, end))
- return -EINVAL;
-
- acpi_table_print_madt_entry(&header->common);
-
- acpi_mp_wake_mailbox_paddr = mp_wake->base_address;
-
- apic_update_callback(wakeup_secondary_cpu_64, acpi_wakeup_cpu);
-
- return 0;
-}
-#endif /* CONFIG_X86_64 */
#endif /* CONFIG_X86_LOCAL_APIC */
#ifdef CONFIG_X86_IO_APIC
@@ -1343,7 +1259,7 @@ static void __init acpi_process_madt(void)
smp_found_config = 1;
}
-#ifdef CONFIG_X86_64
+#ifdef CONFIG_ACPI_MADT_WAKEUP
/*
* Parse MADT MP Wake entry.
*/
diff --git a/arch/x86/kernel/acpi/madt_playdead.S b/arch/x86/kernel/acpi/madt_playdead.S
new file mode 100644
index 000000000000..4e498d28cdc8
--- /dev/null
+++ b/arch/x86/kernel/acpi/madt_playdead.S
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/linkage.h>
+#include <asm/nospec-branch.h>
+#include <asm/page_types.h>
+#include <asm/processor-flags.h>
+
+ .text
+ .align PAGE_SIZE
+
+/*
+ * asm_acpi_mp_play_dead() - Hand over control of the CPU to the BIOS
+ *
+ * rdi: Address of the ACPI MADT MPWK ResetVector
+ * rsi: PGD of the identity mapping
+ */
+SYM_FUNC_START(asm_acpi_mp_play_dead)
+ /* Turn off global entries. Following CR3 write will flush them. */
+ movq %cr4, %rdx
+ andq $~(X86_CR4_PGE), %rdx
+ movq %rdx, %cr4
+
+ /* Switch to identity mapping */
+ movq %rsi, %cr3
+
+ /* Jump to reset vector */
+ ANNOTATE_RETPOLINE_SAFE
+ jmp *%rdi
+SYM_FUNC_END(asm_acpi_mp_play_dead)
diff --git a/arch/x86/kernel/acpi/madt_wakeup.c b/arch/x86/kernel/acpi/madt_wakeup.c
new file mode 100644
index 000000000000..6cfe762be28b
--- /dev/null
+++ b/arch/x86/kernel/acpi/madt_wakeup.c
@@ -0,0 +1,292 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+#include <linux/acpi.h>
+#include <linux/cpu.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/kexec.h>
+#include <linux/memblock.h>
+#include <linux/pgtable.h>
+#include <linux/sched/hotplug.h>
+#include <asm/apic.h>
+#include <asm/barrier.h>
+#include <asm/init.h>
+#include <asm/intel_pt.h>
+#include <asm/nmi.h>
+#include <asm/processor.h>
+#include <asm/reboot.h>
+
+/* Physical address of the Multiprocessor Wakeup Structure mailbox */
+static u64 acpi_mp_wake_mailbox_paddr __ro_after_init;
+
+/* Virtual address of the Multiprocessor Wakeup Structure mailbox */
+static struct acpi_madt_multiproc_wakeup_mailbox *acpi_mp_wake_mailbox __ro_after_init;
+
+static u64 acpi_mp_pgd __ro_after_init;
+static u64 acpi_mp_reset_vector_paddr __ro_after_init;
+
+static void acpi_mp_stop_this_cpu(void)
+{
+ asm_acpi_mp_play_dead(acpi_mp_reset_vector_paddr, acpi_mp_pgd);
+}
+
+static void acpi_mp_play_dead(void)
+{
+ play_dead_common();
+ asm_acpi_mp_play_dead(acpi_mp_reset_vector_paddr, acpi_mp_pgd);
+}
+
+static void acpi_mp_cpu_die(unsigned int cpu)
+{
+ u32 apicid = per_cpu(x86_cpu_to_apicid, cpu);
+ unsigned long timeout;
+
+ /*
+ * Use TEST mailbox command to prove that BIOS got control over
+ * the CPU before declaring it dead.
+ *
+ * BIOS has to clear 'command' field of the mailbox.
+ */
+ acpi_mp_wake_mailbox->apic_id = apicid;
+ smp_store_release(&acpi_mp_wake_mailbox->command,
+ ACPI_MP_WAKE_COMMAND_TEST);
+
+ /* Don't wait longer than a second. */
+ timeout = USEC_PER_SEC;
+ while (READ_ONCE(acpi_mp_wake_mailbox->command) && --timeout)
+ udelay(1);
+
+ if (!timeout)
+ pr_err("Failed to hand over CPU %d to BIOS\n", cpu);
+}
+
+/* The argument is required to match type of x86_mapping_info::alloc_pgt_page */
+static void __init *alloc_pgt_page(void *dummy)
+{
+ return memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+}
+
+static void __init free_pgt_page(void *pgt, void *dummy)
+{
+ return memblock_free(pgt, PAGE_SIZE);
+}
+
+/*
+ * Make sure asm_acpi_mp_play_dead() is present in the identity mapping at
+ * the same place as in the kernel page tables. asm_acpi_mp_play_dead() switches
+ * to the identity mapping and the function has be present at the same spot in
+ * the virtual address space before and after switching page tables.
+ */
+static int __init init_transition_pgtable(pgd_t *pgd)
+{
+ pgprot_t prot = PAGE_KERNEL_EXEC_NOENC;
+ unsigned long vaddr, paddr;
+ p4d_t *p4d;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+
+ vaddr = (unsigned long)asm_acpi_mp_play_dead;
+ pgd += pgd_index(vaddr);
+ if (!pgd_present(*pgd)) {
+ p4d = (p4d_t *)alloc_pgt_page(NULL);
+ if (!p4d)
+ return -ENOMEM;
+ set_pgd(pgd, __pgd(__pa(p4d) | _KERNPG_TABLE));
+ }
+ p4d = p4d_offset(pgd, vaddr);
+ if (!p4d_present(*p4d)) {
+ pud = (pud_t *)alloc_pgt_page(NULL);
+ if (!pud)
+ return -ENOMEM;
+ set_p4d(p4d, __p4d(__pa(pud) | _KERNPG_TABLE));
+ }
+ pud = pud_offset(p4d, vaddr);
+ if (!pud_present(*pud)) {
+ pmd = (pmd_t *)alloc_pgt_page(NULL);
+ if (!pmd)
+ return -ENOMEM;
+ set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
+ }
+ pmd = pmd_offset(pud, vaddr);
+ if (!pmd_present(*pmd)) {
+ pte = (pte_t *)alloc_pgt_page(NULL);
+ if (!pte)
+ return -ENOMEM;
+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
+ }
+ pte = pte_offset_kernel(pmd, vaddr);
+
+ paddr = __pa(vaddr);
+ set_pte(pte, pfn_pte(paddr >> PAGE_SHIFT, prot));
+
+ return 0;
+}
+
+static int __init acpi_mp_setup_reset(u64 reset_vector)
+{
+ struct x86_mapping_info info = {
+ .alloc_pgt_page = alloc_pgt_page,
+ .free_pgt_page = free_pgt_page,
+ .page_flag = __PAGE_KERNEL_LARGE_EXEC,
+ .kernpg_flag = _KERNPG_TABLE_NOENC,
+ };
+ pgd_t *pgd;
+
+ pgd = alloc_pgt_page(NULL);
+ if (!pgd)
+ return -ENOMEM;
+
+ for (int i = 0; i < nr_pfn_mapped; i++) {
+ unsigned long mstart, mend;
+
+ mstart = pfn_mapped[i].start << PAGE_SHIFT;
+ mend = pfn_mapped[i].end << PAGE_SHIFT;
+ if (kernel_ident_mapping_init(&info, pgd, mstart, mend)) {
+ kernel_ident_mapping_free(&info, pgd);
+ return -ENOMEM;
+ }
+ }
+
+ if (kernel_ident_mapping_init(&info, pgd,
+ PAGE_ALIGN_DOWN(reset_vector),
+ PAGE_ALIGN(reset_vector + 1))) {
+ kernel_ident_mapping_free(&info, pgd);
+ return -ENOMEM;
+ }
+
+ if (init_transition_pgtable(pgd)) {
+ kernel_ident_mapping_free(&info, pgd);
+ return -ENOMEM;
+ }
+
+ smp_ops.play_dead = acpi_mp_play_dead;
+ smp_ops.stop_this_cpu = acpi_mp_stop_this_cpu;
+ smp_ops.cpu_die = acpi_mp_cpu_die;
+
+ acpi_mp_reset_vector_paddr = reset_vector;
+ acpi_mp_pgd = __pa(pgd);
+
+ return 0;
+}
+
+static int acpi_wakeup_cpu(u32 apicid, unsigned long start_ip)
+{
+ if (!acpi_mp_wake_mailbox_paddr) {
+ pr_warn_once("No MADT mailbox: cannot bringup secondary CPUs. Booting with kexec?\n");
+ return -EOPNOTSUPP;
+ }
+
+ /*
+ * Remap mailbox memory only for the first call to acpi_wakeup_cpu().
+ *
+ * Wakeup of secondary CPUs is fully serialized in the core code.
+ * No need to protect acpi_mp_wake_mailbox from concurrent accesses.
+ */
+ if (!acpi_mp_wake_mailbox) {
+ acpi_mp_wake_mailbox = memremap(acpi_mp_wake_mailbox_paddr,
+ sizeof(*acpi_mp_wake_mailbox),
+ MEMREMAP_WB);
+ }
+
+ /*
+ * Mailbox memory is shared between the firmware and OS. Firmware will
+ * listen on mailbox command address, and once it receives the wakeup
+ * command, the CPU associated with the given apicid will be booted.
+ *
+ * The value of 'apic_id' and 'wakeup_vector' must be visible to the
+ * firmware before the wakeup command is visible. smp_store_release()
+ * ensures ordering and visibility.
+ */
+ acpi_mp_wake_mailbox->apic_id = apicid;
+ acpi_mp_wake_mailbox->wakeup_vector = start_ip;
+ smp_store_release(&acpi_mp_wake_mailbox->command,
+ ACPI_MP_WAKE_COMMAND_WAKEUP);
+
+ /*
+ * Wait for the CPU to wake up.
+ *
+ * The CPU being woken up is essentially in a spin loop waiting to be
+ * woken up. It should not take long for it wake up and acknowledge by
+ * zeroing out ->command.
+ *
+ * ACPI specification doesn't provide any guidance on how long kernel
+ * has to wait for a wake up acknowledgment. It also doesn't provide
+ * a way to cancel a wake up request if it takes too long.
+ *
+ * In TDX environment, the VMM has control over how long it takes to
+ * wake up secondary. It can postpone scheduling secondary vCPU
+ * indefinitely. Giving up on wake up request and reporting error opens
+ * possible attack vector for VMM: it can wake up a secondary CPU when
+ * kernel doesn't expect it. Wait until positive result of the wake up
+ * request.
+ */
+ while (READ_ONCE(acpi_mp_wake_mailbox->command))
+ cpu_relax();
+
+ return 0;
+}
+
+static void acpi_mp_disable_offlining(struct acpi_madt_multiproc_wakeup *mp_wake)
+{
+ cpu_hotplug_disable_offlining();
+
+ /*
+ * ACPI MADT doesn't allow to offline a CPU after it was onlined. This
+ * limits kexec: the second kernel won't be able to use more than one CPU.
+ *
+ * To prevent a kexec kernel from onlining secondary CPUs invalidate the
+ * mailbox address in the ACPI MADT wakeup structure which prevents a
+ * kexec kernel to use it.
+ *
+ * This is safe as the booting kernel has the mailbox address cached
+ * already and acpi_wakeup_cpu() uses the cached value to bring up the
+ * secondary CPUs.
+ *
+ * Note: This is a Linux specific convention and not covered by the
+ * ACPI specification.
+ */
+ mp_wake->mailbox_address = 0;
+}
+
+int __init acpi_parse_mp_wake(union acpi_subtable_headers *header,
+ const unsigned long end)
+{
+ struct acpi_madt_multiproc_wakeup *mp_wake;
+
+ mp_wake = (struct acpi_madt_multiproc_wakeup *)header;
+
+ /*
+ * Cannot use the standard BAD_MADT_ENTRY() to sanity check the @mp_wake
+ * entry. 'sizeof (struct acpi_madt_multiproc_wakeup)' can be larger
+ * than the actual size of the MP wakeup entry in ACPI table because the
+ * 'reset_vector' is only available in the V1 MP wakeup structure.
+ */
+ if (!mp_wake)
+ return -EINVAL;
+ if (end - (unsigned long)mp_wake < ACPI_MADT_MP_WAKEUP_SIZE_V0)
+ return -EINVAL;
+ if (mp_wake->header.length < ACPI_MADT_MP_WAKEUP_SIZE_V0)
+ return -EINVAL;
+
+ acpi_table_print_madt_entry(&header->common);
+
+ acpi_mp_wake_mailbox_paddr = mp_wake->mailbox_address;
+
+ if (mp_wake->version >= ACPI_MADT_MP_WAKEUP_VERSION_V1 &&
+ mp_wake->header.length >= ACPI_MADT_MP_WAKEUP_SIZE_V1) {
+ if (acpi_mp_setup_reset(mp_wake->reset_vector)) {
+ pr_warn("Failed to setup MADT reset vector\n");
+ acpi_mp_disable_offlining(mp_wake);
+ }
+ } else {
+ /*
+ * CPU offlining requires version 1 of the ACPI MADT wakeup
+ * structure.
+ */
+ acpi_mp_disable_offlining(mp_wake);
+ }
+
+ apic_update_callback(wakeup_secondary_cpu_64, acpi_wakeup_cpu);
+
+ return 0;
+}
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index 89de61243272..d17518ca19b8 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -432,6 +432,11 @@ static int alt_replace_call(u8 *instr, u8 *insn_buff, struct alt_instr *a)
return 5;
}
+static inline u8 * instr_va(struct alt_instr *i)
+{
+ return (u8 *)&i->instr_offset + i->instr_offset;
+}
+
/*
* Replace instructions with better alternatives for this CPU type. This runs
* before SMP is initialized to avoid SMP problems with self modifying code.
@@ -447,7 +452,7 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start,
{
u8 insn_buff[MAX_PATCH_LEN];
u8 *instr, *replacement;
- struct alt_instr *a;
+ struct alt_instr *a, *b;
DPRINTK(ALT, "alt table %px, -> %px", start, end);
@@ -473,7 +478,18 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start,
for (a = start; a < end; a++) {
int insn_buff_sz = 0;
- instr = (u8 *)&a->instr_offset + a->instr_offset;
+ /*
+ * In case of nested ALTERNATIVE()s the outer alternative might
+ * add more padding. To ensure consistent patching find the max
+ * padding for all alt_instr entries for this site (nested
+ * alternatives result in consecutive entries).
+ */
+ for (b = a+1; b < end && instr_va(b) == instr_va(a); b++) {
+ u8 len = max(a->instrlen, b->instrlen);
+ a->instrlen = b->instrlen = len;
+ }
+
+ instr = instr_va(a);
replacement = (u8 *)&a->repl_offset + a->repl_offset;
BUG_ON(a->instrlen > sizeof(insn_buff));
BUG_ON(a->cpuid >= (NCAPINTS + NBUGINTS) * 32);
@@ -885,8 +901,8 @@ void __init_or_module apply_seal_endbr(s32 *start, s32 *end) { }
#endif /* CONFIG_X86_KERNEL_IBT */
-#ifdef CONFIG_FINEIBT
-#define __CFI_DEFAULT CFI_DEFAULT
+#ifdef CONFIG_CFI_AUTO_DEFAULT
+#define __CFI_DEFAULT CFI_AUTO
#elif defined(CONFIG_CFI_CLANG)
#define __CFI_DEFAULT CFI_KCFI
#else
@@ -994,7 +1010,7 @@ static __init int cfi_parse_cmdline(char *str)
}
if (!strcmp(str, "auto")) {
- cfi_mode = CFI_DEFAULT;
+ cfi_mode = CFI_AUTO;
} else if (!strcmp(str, "off")) {
cfi_mode = CFI_OFF;
cfi_rand = false;
@@ -1254,7 +1270,7 @@ static void __apply_fineibt(s32 *start_retpoline, s32 *end_retpoline,
"FineIBT preamble wrong size: %ld", fineibt_preamble_size))
return;
- if (cfi_mode == CFI_DEFAULT) {
+ if (cfi_mode == CFI_AUTO) {
cfi_mode = CFI_KCFI;
if (HAS_KERNEL_IBT && cpu_feature_enabled(X86_FEATURE_IBT))
cfi_mode = CFI_FINEIBT;
@@ -1641,7 +1657,7 @@ static noinline void __init alt_reloc_selftest(void)
*/
asm_inline volatile (
ALTERNATIVE("", "lea %[mem], %%" _ASM_ARG1 "; call __alt_reloc_selftest;", X86_FEATURE_ALWAYS)
- : /* output */
+ : ASM_CALL_CONSTRAINT
: [mem] "m" (__alt_reloc_selftest_addr)
: _ASM_ARG1
);
diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
index 3cf156f70859..059e5c16af05 100644
--- a/arch/x86/kernel/amd_nb.c
+++ b/arch/x86/kernel/amd_nb.c
@@ -180,6 +180,43 @@ static struct pci_dev *next_northbridge(struct pci_dev *dev,
return dev;
}
+/*
+ * SMN accesses may fail in ways that are difficult to detect here in the called
+ * functions amd_smn_read() and amd_smn_write(). Therefore, callers must do
+ * their own checking based on what behavior they expect.
+ *
+ * For SMN reads, the returned value may be zero if the register is Read-as-Zero.
+ * Or it may be a "PCI Error Response", e.g. all 0xFFs. The "PCI Error Response"
+ * can be checked here, and a proper error code can be returned.
+ *
+ * But the Read-as-Zero response cannot be verified here. A value of 0 may be
+ * correct in some cases, so callers must check that this correct is for the
+ * register/fields they need.
+ *
+ * For SMN writes, success can be determined through a "write and read back"
+ * However, this is not robust when done here.
+ *
+ * Possible issues:
+ *
+ * 1) Bits that are "Write-1-to-Clear". In this case, the read value should
+ * *not* match the write value.
+ *
+ * 2) Bits that are "Read-as-Zero"/"Writes-Ignored". This information cannot be
+ * known here.
+ *
+ * 3) Bits that are "Reserved / Set to 1". Ditto above.
+ *
+ * Callers of amd_smn_write() should do the "write and read back" check
+ * themselves, if needed.
+ *
+ * For #1, they can see if their target bits got cleared.
+ *
+ * For #2 and #3, they can check if their target bits got set as intended.
+ *
+ * This matches what is done for RDMSR/WRMSR. As long as there's no #GP, then
+ * the operation is considered a success, and the caller does their own
+ * checking.
+ */
static int __amd_smn_rw(u16 node, u32 address, u32 *value, bool write)
{
struct pci_dev *root;
@@ -202,9 +239,6 @@ static int __amd_smn_rw(u16 node, u32 address, u32 *value, bool write)
err = (write ? pci_write_config_dword(root, 0x64, *value)
: pci_read_config_dword(root, 0x64, value));
- if (err)
- pr_warn("Error %s SMN address 0x%x.\n",
- (write ? "writing to" : "reading from"), address);
out_unlock:
mutex_unlock(&smn_mutex);
@@ -213,13 +247,20 @@ out:
return err;
}
-int amd_smn_read(u16 node, u32 address, u32 *value)
+int __must_check amd_smn_read(u16 node, u32 address, u32 *value)
{
- return __amd_smn_rw(node, address, value, false);
+ int err = __amd_smn_rw(node, address, value, false);
+
+ if (PCI_POSSIBLE_ERROR(*value)) {
+ err = -ENODEV;
+ *value = 0;
+ }
+
+ return err;
}
EXPORT_SYMBOL_GPL(amd_smn_read);
-int amd_smn_write(u16 node, u32 address, u32 value)
+int __must_check amd_smn_write(u16 node, u32 address, u32 value)
{
return __amd_smn_rw(node, address, &value, true);
}
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index a02bba0ed6b9..5857a0f5d514 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -34,7 +34,7 @@ obj-$(CONFIG_PROC_FS) += proc.o
obj-$(CONFIG_IA32_FEAT_CTL) += feat_ctl.o
ifdef CONFIG_CPU_SUP_INTEL
-obj-y += intel.o intel_pconfig.o tsx.o
+obj-y += intel.o tsx.o
obj-$(CONFIG_PM) += intel_epb.o
endif
obj-$(CONFIG_CPU_SUP_AMD) += amd.o
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 44df3f11e731..be5889bded49 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -1220,14 +1220,3 @@ void amd_check_microcode(void)
on_each_cpu(zenbleed_check_cpu, NULL, 1);
}
-
-/*
- * Issue a DIV 0/1 insn to clear any division data from previous DIV
- * operations.
- */
-void noinstr amd_clear_divider(void)
-{
- asm volatile(ALTERNATIVE("", "div %2\n\t", X86_BUG_DIV0)
- :: "a" (0), "d" (0), "r" (1));
-}
-EXPORT_SYMBOL_GPL(amd_clear_divider);
diff --git a/arch/x86/kernel/cpu/aperfmperf.c b/arch/x86/kernel/cpu/aperfmperf.c
index f9a8c7b7943f..b3fa61d45352 100644
--- a/arch/x86/kernel/cpu/aperfmperf.c
+++ b/arch/x86/kernel/cpu/aperfmperf.c
@@ -345,6 +345,7 @@ static DECLARE_WORK(disable_freq_invariance_work,
disable_freq_invariance_workfn);
DEFINE_PER_CPU(unsigned long, arch_freq_scale) = SCHED_CAPACITY_SCALE;
+EXPORT_PER_CPU_SYMBOL_GPL(arch_freq_scale);
static void scale_freq_tick(u64 acnt, u64 mcnt)
{
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index b6f927f6c567..45675da354f3 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -1625,6 +1625,7 @@ static bool __init spec_ctrl_bhi_dis(void)
enum bhi_mitigations {
BHI_MITIGATION_OFF,
BHI_MITIGATION_ON,
+ BHI_MITIGATION_VMEXIT_ONLY,
};
static enum bhi_mitigations bhi_mitigation __ro_after_init =
@@ -1639,6 +1640,8 @@ static int __init spectre_bhi_parse_cmdline(char *str)
bhi_mitigation = BHI_MITIGATION_OFF;
else if (!strcmp(str, "on"))
bhi_mitigation = BHI_MITIGATION_ON;
+ else if (!strcmp(str, "vmexit"))
+ bhi_mitigation = BHI_MITIGATION_VMEXIT_ONLY;
else
pr_err("Ignoring unknown spectre_bhi option (%s)", str);
@@ -1659,19 +1662,22 @@ static void __init bhi_select_mitigation(void)
return;
}
+ /* Mitigate in hardware if supported */
if (spec_ctrl_bhi_dis())
return;
if (!IS_ENABLED(CONFIG_X86_64))
return;
- /* Mitigate KVM by default */
- setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT);
- pr_info("Spectre BHI mitigation: SW BHB clearing on vm exit\n");
+ if (bhi_mitigation == BHI_MITIGATION_VMEXIT_ONLY) {
+ pr_info("Spectre BHI mitigation: SW BHB clearing on VM exit only\n");
+ setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT);
+ return;
+ }
- /* Mitigate syscalls when the mitigation is forced =on */
+ pr_info("Spectre BHI mitigation: SW BHB clearing on syscall and VM exit\n");
setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_LOOP);
- pr_info("Spectre BHI mitigation: SW BHB clearing on syscall\n");
+ setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT);
}
static void __init spectre_v2_select_mitigation(void)
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 2b170da84f97..d4e539d4e158 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1075,6 +1075,10 @@ void get_cpu_address_sizes(struct cpuinfo_x86 *c)
c->x86_virt_bits = (eax >> 8) & 0xff;
c->x86_phys_bits = eax & 0xff;
+
+ /* Provide a sane default if not enumerated: */
+ if (!c->x86_clflush_size)
+ c->x86_clflush_size = 32;
}
c->x86_cache_bits = c->x86_phys_bits;
@@ -1585,6 +1589,7 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
if (have_cpuid_p()) {
cpu_detect(c);
get_cpu_vendor(c);
+ intel_unlock_cpuid_leafs(c);
get_cpu_cap(c);
setup_force_cpu_cap(X86_FEATURE_CPUID);
get_cpu_address_sizes(c);
@@ -1744,7 +1749,7 @@ static void generic_identify(struct cpuinfo_x86 *c)
cpu_detect(c);
get_cpu_vendor(c);
-
+ intel_unlock_cpuid_leafs(c);
get_cpu_cap(c);
get_cpu_address_sizes(c);
diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h
index ea9e07d57c8d..1beccefbaff9 100644
--- a/arch/x86/kernel/cpu/cpu.h
+++ b/arch/x86/kernel/cpu/cpu.h
@@ -61,9 +61,11 @@ extern __ro_after_init enum tsx_ctrl_states tsx_ctrl_state;
extern void __init tsx_init(void);
void tsx_ap_init(void);
+void intel_unlock_cpuid_leafs(struct cpuinfo_x86 *c);
#else
static inline void tsx_init(void) { }
static inline void tsx_ap_init(void) { }
+static inline void intel_unlock_cpuid_leafs(struct cpuinfo_x86 *c) { }
#endif /* CONFIG_CPU_SUP_INTEL */
extern void init_spectral_chicken(struct cpuinfo_x86 *c);
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 3c3e7e5695ba..08b95a35b5cb 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -72,19 +72,19 @@ static bool cpu_model_supports_sld __ro_after_init;
*/
static void check_memory_type_self_snoop_errata(struct cpuinfo_x86 *c)
{
- switch (c->x86_model) {
- case INTEL_FAM6_CORE_YONAH:
- case INTEL_FAM6_CORE2_MEROM:
- case INTEL_FAM6_CORE2_MEROM_L:
- case INTEL_FAM6_CORE2_PENRYN:
- case INTEL_FAM6_CORE2_DUNNINGTON:
- case INTEL_FAM6_NEHALEM:
- case INTEL_FAM6_NEHALEM_G:
- case INTEL_FAM6_NEHALEM_EP:
- case INTEL_FAM6_NEHALEM_EX:
- case INTEL_FAM6_WESTMERE:
- case INTEL_FAM6_WESTMERE_EP:
- case INTEL_FAM6_SANDYBRIDGE:
+ switch (c->x86_vfm) {
+ case INTEL_CORE_YONAH:
+ case INTEL_CORE2_MEROM:
+ case INTEL_CORE2_MEROM_L:
+ case INTEL_CORE2_PENRYN:
+ case INTEL_CORE2_DUNNINGTON:
+ case INTEL_NEHALEM:
+ case INTEL_NEHALEM_G:
+ case INTEL_NEHALEM_EP:
+ case INTEL_NEHALEM_EX:
+ case INTEL_WESTMERE:
+ case INTEL_WESTMERE_EP:
+ case INTEL_SANDYBRIDGE:
setup_clear_cpu_cap(X86_FEATURE_SELFSNOOP);
}
}
@@ -106,9 +106,9 @@ static void probe_xeon_phi_r3mwait(struct cpuinfo_x86 *c)
*/
if (c->x86 != 6)
return;
- switch (c->x86_model) {
- case INTEL_FAM6_XEON_PHI_KNL:
- case INTEL_FAM6_XEON_PHI_KNM:
+ switch (c->x86_vfm) {
+ case INTEL_XEON_PHI_KNL:
+ case INTEL_XEON_PHI_KNM:
break;
default:
return;
@@ -134,32 +134,32 @@ static void probe_xeon_phi_r3mwait(struct cpuinfo_x86 *c)
* - Release note from 20180108 microcode release
*/
struct sku_microcode {
- u8 model;
+ u32 vfm;
u8 stepping;
u32 microcode;
};
static const struct sku_microcode spectre_bad_microcodes[] = {
- { INTEL_FAM6_KABYLAKE, 0x0B, 0x80 },
- { INTEL_FAM6_KABYLAKE, 0x0A, 0x80 },
- { INTEL_FAM6_KABYLAKE, 0x09, 0x80 },
- { INTEL_FAM6_KABYLAKE_L, 0x0A, 0x80 },
- { INTEL_FAM6_KABYLAKE_L, 0x09, 0x80 },
- { INTEL_FAM6_SKYLAKE_X, 0x03, 0x0100013e },
- { INTEL_FAM6_SKYLAKE_X, 0x04, 0x0200003c },
- { INTEL_FAM6_BROADWELL, 0x04, 0x28 },
- { INTEL_FAM6_BROADWELL_G, 0x01, 0x1b },
- { INTEL_FAM6_BROADWELL_D, 0x02, 0x14 },
- { INTEL_FAM6_BROADWELL_D, 0x03, 0x07000011 },
- { INTEL_FAM6_BROADWELL_X, 0x01, 0x0b000025 },
- { INTEL_FAM6_HASWELL_L, 0x01, 0x21 },
- { INTEL_FAM6_HASWELL_G, 0x01, 0x18 },
- { INTEL_FAM6_HASWELL, 0x03, 0x23 },
- { INTEL_FAM6_HASWELL_X, 0x02, 0x3b },
- { INTEL_FAM6_HASWELL_X, 0x04, 0x10 },
- { INTEL_FAM6_IVYBRIDGE_X, 0x04, 0x42a },
+ { INTEL_KABYLAKE, 0x0B, 0x80 },
+ { INTEL_KABYLAKE, 0x0A, 0x80 },
+ { INTEL_KABYLAKE, 0x09, 0x80 },
+ { INTEL_KABYLAKE_L, 0x0A, 0x80 },
+ { INTEL_KABYLAKE_L, 0x09, 0x80 },
+ { INTEL_SKYLAKE_X, 0x03, 0x0100013e },
+ { INTEL_SKYLAKE_X, 0x04, 0x0200003c },
+ { INTEL_BROADWELL, 0x04, 0x28 },
+ { INTEL_BROADWELL_G, 0x01, 0x1b },
+ { INTEL_BROADWELL_D, 0x02, 0x14 },
+ { INTEL_BROADWELL_D, 0x03, 0x07000011 },
+ { INTEL_BROADWELL_X, 0x01, 0x0b000025 },
+ { INTEL_HASWELL_L, 0x01, 0x21 },
+ { INTEL_HASWELL_G, 0x01, 0x18 },
+ { INTEL_HASWELL, 0x03, 0x23 },
+ { INTEL_HASWELL_X, 0x02, 0x3b },
+ { INTEL_HASWELL_X, 0x04, 0x10 },
+ { INTEL_IVYBRIDGE_X, 0x04, 0x42a },
/* Observed in the wild */
- { INTEL_FAM6_SANDYBRIDGE_X, 0x06, 0x61b },
- { INTEL_FAM6_SANDYBRIDGE_X, 0x07, 0x712 },
+ { INTEL_SANDYBRIDGE_X, 0x06, 0x61b },
+ { INTEL_SANDYBRIDGE_X, 0x07, 0x712 },
};
static bool bad_spectre_microcode(struct cpuinfo_x86 *c)
@@ -173,11 +173,8 @@ static bool bad_spectre_microcode(struct cpuinfo_x86 *c)
if (cpu_has(c, X86_FEATURE_HYPERVISOR))
return false;
- if (c->x86 != 6)
- return false;
-
for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) {
- if (c->x86_model == spectre_bad_microcodes[i].model &&
+ if (c->x86_vfm == spectre_bad_microcodes[i].vfm &&
c->x86_stepping == spectre_bad_microcodes[i].stepping)
return (c->microcode <= spectre_bad_microcodes[i].microcode);
}
@@ -190,98 +187,57 @@ static bool bad_spectre_microcode(struct cpuinfo_x86 *c)
#define TME_ACTIVATE_LOCKED(x) (x & 0x1)
#define TME_ACTIVATE_ENABLED(x) (x & 0x2)
-#define TME_ACTIVATE_POLICY(x) ((x >> 4) & 0xf) /* Bits 7:4 */
-#define TME_ACTIVATE_POLICY_AES_XTS_128 0
-
#define TME_ACTIVATE_KEYID_BITS(x) ((x >> 32) & 0xf) /* Bits 35:32 */
-#define TME_ACTIVATE_CRYPTO_ALGS(x) ((x >> 48) & 0xffff) /* Bits 63:48 */
-#define TME_ACTIVATE_CRYPTO_AES_XTS_128 1
-
-/* Values for mktme_status (SW only construct) */
-#define MKTME_ENABLED 0
-#define MKTME_DISABLED 1
-#define MKTME_UNINITIALIZED 2
-static int mktme_status = MKTME_UNINITIALIZED;
-
static void detect_tme_early(struct cpuinfo_x86 *c)
{
- u64 tme_activate, tme_policy, tme_crypto_algs;
- int keyid_bits = 0, nr_keyids = 0;
- static u64 tme_activate_cpu0 = 0;
+ u64 tme_activate;
+ int keyid_bits;
rdmsrl(MSR_IA32_TME_ACTIVATE, tme_activate);
- if (mktme_status != MKTME_UNINITIALIZED) {
- if (tme_activate != tme_activate_cpu0) {
- /* Broken BIOS? */
- pr_err_once("x86/tme: configuration is inconsistent between CPUs\n");
- pr_err_once("x86/tme: MKTME is not usable\n");
- mktme_status = MKTME_DISABLED;
-
- /* Proceed. We may need to exclude bits from x86_phys_bits. */
- }
- } else {
- tme_activate_cpu0 = tme_activate;
- }
-
if (!TME_ACTIVATE_LOCKED(tme_activate) || !TME_ACTIVATE_ENABLED(tme_activate)) {
pr_info_once("x86/tme: not enabled by BIOS\n");
- mktme_status = MKTME_DISABLED;
clear_cpu_cap(c, X86_FEATURE_TME);
return;
}
+ pr_info_once("x86/tme: enabled by BIOS\n");
+ keyid_bits = TME_ACTIVATE_KEYID_BITS(tme_activate);
+ if (!keyid_bits)
+ return;
- if (mktme_status != MKTME_UNINITIALIZED)
- goto detect_keyid_bits;
-
- pr_info("x86/tme: enabled by BIOS\n");
-
- tme_policy = TME_ACTIVATE_POLICY(tme_activate);
- if (tme_policy != TME_ACTIVATE_POLICY_AES_XTS_128)
- pr_warn("x86/tme: Unknown policy is active: %#llx\n", tme_policy);
+ /*
+ * KeyID bits are set by BIOS and can be present regardless
+ * of whether the kernel is using them. They effectively lower
+ * the number of physical address bits.
+ *
+ * Update cpuinfo_x86::x86_phys_bits accordingly.
+ */
+ c->x86_phys_bits -= keyid_bits;
+ pr_info_once("x86/mktme: BIOS enabled: x86_phys_bits reduced by %d\n",
+ keyid_bits);
+}
- tme_crypto_algs = TME_ACTIVATE_CRYPTO_ALGS(tme_activate);
- if (!(tme_crypto_algs & TME_ACTIVATE_CRYPTO_AES_XTS_128)) {
- pr_err("x86/mktme: No known encryption algorithm is supported: %#llx\n",
- tme_crypto_algs);
- mktme_status = MKTME_DISABLED;
- }
-detect_keyid_bits:
- keyid_bits = TME_ACTIVATE_KEYID_BITS(tme_activate);
- nr_keyids = (1UL << keyid_bits) - 1;
- if (nr_keyids) {
- pr_info_once("x86/mktme: enabled by BIOS\n");
- pr_info_once("x86/mktme: %d KeyIDs available\n", nr_keyids);
- } else {
- pr_info_once("x86/mktme: disabled by BIOS\n");
- }
+void intel_unlock_cpuid_leafs(struct cpuinfo_x86 *c)
+{
+ if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
+ return;
- if (mktme_status == MKTME_UNINITIALIZED) {
- /* MKTME is usable */
- mktme_status = MKTME_ENABLED;
- }
+ if (c->x86 < 6 || (c->x86 == 6 && c->x86_model < 0xd))
+ return;
/*
- * KeyID bits effectively lower the number of physical address
- * bits. Update cpuinfo_x86::x86_phys_bits accordingly.
+ * The BIOS can have limited CPUID to leaf 2, which breaks feature
+ * enumeration. Unlock it and update the maximum leaf info.
*/
- c->x86_phys_bits -= keyid_bits;
+ if (msr_clear_bit(MSR_IA32_MISC_ENABLE, MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT) > 0)
+ c->cpuid_level = cpuid_eax(0);
}
static void early_init_intel(struct cpuinfo_x86 *c)
{
u64 misc_enable;
- /* Unmask CPUID levels if masked: */
- if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) {
- if (msr_clear_bit(MSR_IA32_MISC_ENABLE,
- MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT) > 0) {
- c->cpuid_level = cpuid_eax(0);
- get_cpu_cap(c);
- }
- }
-
if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
(c->x86 == 0x6 && c->x86_model >= 0x0e))
set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
@@ -313,7 +269,7 @@ static void early_init_intel(struct cpuinfo_x86 *c)
* need the microcode to have already been loaded... so if it is
* not, recommend a BIOS update and disable large pages.
*/
- if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_stepping <= 2 &&
+ if (c->x86_vfm == INTEL_ATOM_BONNELL && c->x86_stepping <= 2 &&
c->microcode < 0x20e) {
pr_warn("Atom PSE erratum detected, BIOS microcode update recommended\n");
clear_cpu_cap(c, X86_FEATURE_PSE);
@@ -345,17 +301,13 @@ static void early_init_intel(struct cpuinfo_x86 *c)
}
/* Penwell and Cloverview have the TSC which doesn't sleep on S3 */
- if (c->x86 == 6) {
- switch (c->x86_model) {
- case INTEL_FAM6_ATOM_SALTWELL_MID:
- case INTEL_FAM6_ATOM_SALTWELL_TABLET:
- case INTEL_FAM6_ATOM_SILVERMONT_MID:
- case INTEL_FAM6_ATOM_AIRMONT_NP:
- set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC_S3);
- break;
- default:
- break;
- }
+ switch (c->x86_vfm) {
+ case INTEL_ATOM_SALTWELL_MID:
+ case INTEL_ATOM_SALTWELL_TABLET:
+ case INTEL_ATOM_SILVERMONT_MID:
+ case INTEL_ATOM_AIRMONT_NP:
+ set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC_S3);
+ break;
}
/*
@@ -394,7 +346,7 @@ static void early_init_intel(struct cpuinfo_x86 *c)
* should be false so that __flush_tlb_all() causes CR3 instead of CR4.PGE
* to be modified.
*/
- if (c->x86 == 5 && c->x86_model == 9) {
+ if (c->x86_vfm == INTEL_QUARK_X1000) {
pr_info("Disabling PGE capability bit\n");
setup_clear_cpu_cap(X86_FEATURE_PGE);
}
@@ -626,12 +578,13 @@ static void init_intel(struct cpuinfo_x86 *c)
set_cpu_cap(c, X86_FEATURE_PEBS);
}
- if (c->x86 == 6 && boot_cpu_has(X86_FEATURE_CLFLUSH) &&
- (c->x86_model == 29 || c->x86_model == 46 || c->x86_model == 47))
+ if (boot_cpu_has(X86_FEATURE_CLFLUSH) &&
+ (c->x86_vfm == INTEL_CORE2_DUNNINGTON ||
+ c->x86_vfm == INTEL_NEHALEM_EX ||
+ c->x86_vfm == INTEL_WESTMERE_EX))
set_cpu_bug(c, X86_BUG_CLFLUSH_MONITOR);
- if (c->x86 == 6 && boot_cpu_has(X86_FEATURE_MWAIT) &&
- ((c->x86_model == INTEL_FAM6_ATOM_GOLDMONT)))
+ if (boot_cpu_has(X86_FEATURE_MWAIT) && c->x86_vfm == INTEL_ATOM_GOLDMONT)
set_cpu_bug(c, X86_BUG_MONITOR);
#ifdef CONFIG_X86_64
@@ -1247,9 +1200,9 @@ void handle_bus_lock(struct pt_regs *regs)
* feature even though they do not enumerate IA32_CORE_CAPABILITIES.
*/
static const struct x86_cpu_id split_lock_cpu_ids[] __initconst = {
- X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, 0),
- X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, 0),
- X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, 0),
+ X86_MATCH_VFM(INTEL_ICELAKE_X, 0),
+ X86_MATCH_VFM(INTEL_ICELAKE_L, 0),
+ X86_MATCH_VFM(INTEL_ICELAKE_D, 0),
{}
};
diff --git a/arch/x86/kernel/cpu/intel_pconfig.c b/arch/x86/kernel/cpu/intel_pconfig.c
deleted file mode 100644
index 5be2b1790282..000000000000
--- a/arch/x86/kernel/cpu/intel_pconfig.c
+++ /dev/null
@@ -1,84 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Intel PCONFIG instruction support.
- *
- * Copyright (C) 2017 Intel Corporation
- *
- * Author:
- * Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
- */
-#include <linux/bug.h>
-#include <linux/limits.h>
-
-#include <asm/cpufeature.h>
-#include <asm/intel_pconfig.h>
-
-#define PCONFIG_CPUID 0x1b
-
-#define PCONFIG_CPUID_SUBLEAF_MASK ((1 << 12) - 1)
-
-/* Subleaf type (EAX) for PCONFIG CPUID leaf (0x1B) */
-enum {
- PCONFIG_CPUID_SUBLEAF_INVALID = 0,
- PCONFIG_CPUID_SUBLEAF_TARGETID = 1,
-};
-
-/* Bitmask of supported targets */
-static u64 targets_supported __read_mostly;
-
-int pconfig_target_supported(enum pconfig_target target)
-{
- /*
- * We would need to re-think the implementation once we get > 64
- * PCONFIG targets. Spec allows up to 2^32 targets.
- */
- BUILD_BUG_ON(PCONFIG_TARGET_NR >= 64);
-
- if (WARN_ON_ONCE(target >= 64))
- return 0;
- return targets_supported & (1ULL << target);
-}
-
-static int __init intel_pconfig_init(void)
-{
- int subleaf;
-
- if (!boot_cpu_has(X86_FEATURE_PCONFIG))
- return 0;
-
- /*
- * Scan subleafs of PCONFIG CPUID leaf.
- *
- * Subleafs of the same type need not to be consecutive.
- *
- * Stop on the first invalid subleaf type. All subleafs after the first
- * invalid are invalid too.
- */
- for (subleaf = 0; subleaf < INT_MAX; subleaf++) {
- struct cpuid_regs regs;
-
- cpuid_count(PCONFIG_CPUID, subleaf,
- &regs.eax, &regs.ebx, &regs.ecx, &regs.edx);
-
- switch (regs.eax & PCONFIG_CPUID_SUBLEAF_MASK) {
- case PCONFIG_CPUID_SUBLEAF_INVALID:
- /* Stop on the first invalid subleaf */
- goto out;
- case PCONFIG_CPUID_SUBLEAF_TARGETID:
- /* Mark supported PCONFIG targets */
- if (regs.ebx < 64)
- targets_supported |= (1ULL << regs.ebx);
- if (regs.ecx < 64)
- targets_supported |= (1ULL << regs.ecx);
- if (regs.edx < 64)
- targets_supported |= (1ULL << regs.edx);
- break;
- default:
- /* Unknown CPUID.PCONFIG subleaf: ignore */
- break;
- }
- }
-out:
- return 0;
-}
-arch_initcall(intel_pconfig_init);
diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c
index ad0623b659ed..b85ec7a4ec9e 100644
--- a/arch/x86/kernel/cpu/mce/core.c
+++ b/arch/x86/kernel/cpu/mce/core.c
@@ -677,10 +677,9 @@ DEFINE_PER_CPU(unsigned, mce_poll_count);
* is already totally * confused. In this case it's likely it will
* not fully execute the machine check handler either.
*/
-bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
+void machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
{
struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array);
- bool error_seen = false;
struct mce m;
int i;
@@ -754,8 +753,6 @@ bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
continue;
log_it:
- error_seen = true;
-
if (flags & MCP_DONTLOG)
goto clear_it;
@@ -787,8 +784,6 @@ clear_it:
*/
sync_core();
-
- return error_seen;
}
EXPORT_SYMBOL_GPL(machine_check_poll);
diff --git a/arch/x86/kernel/cpu/mce/inject.c b/arch/x86/kernel/cpu/mce/inject.c
index 94953d749475..49ed3428785d 100644
--- a/arch/x86/kernel/cpu/mce/inject.c
+++ b/arch/x86/kernel/cpu/mce/inject.c
@@ -487,12 +487,16 @@ static void prepare_msrs(void *info)
wrmsrl(MSR_AMD64_SMCA_MCx_ADDR(b), m.addr);
}
- wrmsrl(MSR_AMD64_SMCA_MCx_MISC(b), m.misc);
wrmsrl(MSR_AMD64_SMCA_MCx_SYND(b), m.synd);
+
+ if (m.misc)
+ wrmsrl(MSR_AMD64_SMCA_MCx_MISC(b), m.misc);
} else {
wrmsrl(MSR_IA32_MCx_STATUS(b), m.status);
wrmsrl(MSR_IA32_MCx_ADDR(b), m.addr);
- wrmsrl(MSR_IA32_MCx_MISC(b), m.misc);
+
+ if (m.misc)
+ wrmsrl(MSR_IA32_MCx_MISC(b), m.misc);
}
}
@@ -795,4 +799,5 @@ static void __exit inject_exit(void)
module_init(inject_init);
module_exit(inject_exit);
+MODULE_DESCRIPTION("Machine check injection support");
MODULE_LICENSE("GPL");
diff --git a/arch/x86/kernel/cpu/mkcapflags.sh b/arch/x86/kernel/cpu/mkcapflags.sh
index 1db560ed2ca3..68f537347466 100644
--- a/arch/x86/kernel/cpu/mkcapflags.sh
+++ b/arch/x86/kernel/cpu/mkcapflags.sh
@@ -30,8 +30,7 @@ dump_array()
# If the /* comment */ starts with a quote string, grab that.
VALUE="$(echo "$i" | sed -n 's@.*/\* *\("[^"]*"\).*\*/@\1@p')"
- [ -z "$VALUE" ] && VALUE="\"$NAME\""
- [ "$VALUE" = '""' ] && continue
+ [ ! "$VALUE" ] && continue
# Name is uppercase, VALUE is all lowercase
VALUE="$(echo "$VALUE" | tr A-Z a-z)"
diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c
index a113d9aba553..1930fce9dfe9 100644
--- a/arch/x86/kernel/cpu/resctrl/core.c
+++ b/arch/x86/kernel/cpu/resctrl/core.c
@@ -19,7 +19,6 @@
#include <linux/cpu.h>
#include <linux/slab.h>
#include <linux/err.h>
-#include <linux/cacheinfo.h>
#include <linux/cpuhotplug.h>
#include <asm/cpu_device_id.h>
@@ -60,7 +59,8 @@ static void mba_wrmsr_intel(struct msr_param *m);
static void cat_wrmsr(struct msr_param *m);
static void mba_wrmsr_amd(struct msr_param *m);
-#define domain_init(id) LIST_HEAD_INIT(rdt_resources_all[id].r_resctrl.domains)
+#define ctrl_domain_init(id) LIST_HEAD_INIT(rdt_resources_all[id].r_resctrl.ctrl_domains)
+#define mon_domain_init(id) LIST_HEAD_INIT(rdt_resources_all[id].r_resctrl.mon_domains)
struct rdt_hw_resource rdt_resources_all[] = {
[RDT_RESOURCE_L3] =
@@ -68,8 +68,10 @@ struct rdt_hw_resource rdt_resources_all[] = {
.r_resctrl = {
.rid = RDT_RESOURCE_L3,
.name = "L3",
- .cache_level = 3,
- .domains = domain_init(RDT_RESOURCE_L3),
+ .ctrl_scope = RESCTRL_L3_CACHE,
+ .mon_scope = RESCTRL_L3_CACHE,
+ .ctrl_domains = ctrl_domain_init(RDT_RESOURCE_L3),
+ .mon_domains = mon_domain_init(RDT_RESOURCE_L3),
.parse_ctrlval = parse_cbm,
.format_str = "%d=%0*x",
.fflags = RFTYPE_RES_CACHE,
@@ -82,8 +84,8 @@ struct rdt_hw_resource rdt_resources_all[] = {
.r_resctrl = {
.rid = RDT_RESOURCE_L2,
.name = "L2",
- .cache_level = 2,
- .domains = domain_init(RDT_RESOURCE_L2),
+ .ctrl_scope = RESCTRL_L2_CACHE,
+ .ctrl_domains = ctrl_domain_init(RDT_RESOURCE_L2),
.parse_ctrlval = parse_cbm,
.format_str = "%d=%0*x",
.fflags = RFTYPE_RES_CACHE,
@@ -96,8 +98,8 @@ struct rdt_hw_resource rdt_resources_all[] = {
.r_resctrl = {
.rid = RDT_RESOURCE_MBA,
.name = "MB",
- .cache_level = 3,
- .domains = domain_init(RDT_RESOURCE_MBA),
+ .ctrl_scope = RESCTRL_L3_CACHE,
+ .ctrl_domains = ctrl_domain_init(RDT_RESOURCE_MBA),
.parse_ctrlval = parse_bw,
.format_str = "%d=%*u",
.fflags = RFTYPE_RES_MB,
@@ -108,8 +110,8 @@ struct rdt_hw_resource rdt_resources_all[] = {
.r_resctrl = {
.rid = RDT_RESOURCE_SMBA,
.name = "SMBA",
- .cache_level = 3,
- .domains = domain_init(RDT_RESOURCE_SMBA),
+ .ctrl_scope = RESCTRL_L3_CACHE,
+ .ctrl_domains = ctrl_domain_init(RDT_RESOURCE_SMBA),
.parse_ctrlval = parse_bw,
.format_str = "%d=%*u",
.fflags = RFTYPE_RES_MB,
@@ -306,8 +308,8 @@ static void rdt_get_cdp_l2_config(void)
static void mba_wrmsr_amd(struct msr_param *m)
{
+ struct rdt_hw_ctrl_domain *hw_dom = resctrl_to_arch_ctrl_dom(m->dom);
struct rdt_hw_resource *hw_res = resctrl_to_arch_res(m->res);
- struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(m->dom);
unsigned int i;
for (i = m->low; i < m->high; i++)
@@ -330,8 +332,8 @@ static u32 delay_bw_map(unsigned long bw, struct rdt_resource *r)
static void mba_wrmsr_intel(struct msr_param *m)
{
+ struct rdt_hw_ctrl_domain *hw_dom = resctrl_to_arch_ctrl_dom(m->dom);
struct rdt_hw_resource *hw_res = resctrl_to_arch_res(m->res);
- struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(m->dom);
unsigned int i;
/* Write the delay values for mba. */
@@ -341,23 +343,38 @@ static void mba_wrmsr_intel(struct msr_param *m)
static void cat_wrmsr(struct msr_param *m)
{
+ struct rdt_hw_ctrl_domain *hw_dom = resctrl_to_arch_ctrl_dom(m->dom);
struct rdt_hw_resource *hw_res = resctrl_to_arch_res(m->res);
- struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(m->dom);
unsigned int i;
for (i = m->low; i < m->high; i++)
wrmsrl(hw_res->msr_base + i, hw_dom->ctrl_val[i]);
}
-struct rdt_domain *get_domain_from_cpu(int cpu, struct rdt_resource *r)
+struct rdt_ctrl_domain *get_ctrl_domain_from_cpu(int cpu, struct rdt_resource *r)
{
- struct rdt_domain *d;
+ struct rdt_ctrl_domain *d;
lockdep_assert_cpus_held();
- list_for_each_entry(d, &r->domains, list) {
+ list_for_each_entry(d, &r->ctrl_domains, hdr.list) {
/* Find the domain that contains this CPU */
- if (cpumask_test_cpu(cpu, &d->cpu_mask))
+ if (cpumask_test_cpu(cpu, &d->hdr.cpu_mask))
+ return d;
+ }
+
+ return NULL;
+}
+
+struct rdt_mon_domain *get_mon_domain_from_cpu(int cpu, struct rdt_resource *r)
+{
+ struct rdt_mon_domain *d;
+
+ lockdep_assert_cpus_held();
+
+ list_for_each_entry(d, &r->mon_domains, hdr.list) {
+ /* Find the domain that contains this CPU */
+ if (cpumask_test_cpu(cpu, &d->hdr.cpu_mask))
return d;
}
@@ -379,24 +396,21 @@ void rdt_ctrl_update(void *arg)
}
/*
- * rdt_find_domain - Find a domain in a resource that matches input resource id
+ * rdt_find_domain - Search for a domain id in a resource domain list.
*
- * Search resource r's domain list to find the resource id. If the resource
- * id is found in a domain, return the domain. Otherwise, if requested by
- * caller, return the first domain whose id is bigger than the input id.
- * The domain list is sorted by id in ascending order.
+ * Search the domain list to find the domain id. If the domain id is
+ * found, return the domain. NULL otherwise. If the domain id is not
+ * found (and NULL returned) then the first domain with id bigger than
+ * the input id can be returned to the caller via @pos.
*/
-struct rdt_domain *rdt_find_domain(struct rdt_resource *r, int id,
- struct list_head **pos)
+struct rdt_domain_hdr *rdt_find_domain(struct list_head *h, int id,
+ struct list_head **pos)
{
- struct rdt_domain *d;
+ struct rdt_domain_hdr *d;
struct list_head *l;
- if (id < 0)
- return ERR_PTR(-ENODEV);
-
- list_for_each(l, &r->domains) {
- d = list_entry(l, struct rdt_domain, list);
+ list_for_each(l, h) {
+ d = list_entry(l, struct rdt_domain_hdr, list);
/* When id is found, return its domain. */
if (id == d->id)
return d;
@@ -425,18 +439,23 @@ static void setup_default_ctrlval(struct rdt_resource *r, u32 *dc)
*dc = r->default_ctrl;
}
-static void domain_free(struct rdt_hw_domain *hw_dom)
+static void ctrl_domain_free(struct rdt_hw_ctrl_domain *hw_dom)
+{
+ kfree(hw_dom->ctrl_val);
+ kfree(hw_dom);
+}
+
+static void mon_domain_free(struct rdt_hw_mon_domain *hw_dom)
{
kfree(hw_dom->arch_mbm_total);
kfree(hw_dom->arch_mbm_local);
- kfree(hw_dom->ctrl_val);
kfree(hw_dom);
}
-static int domain_setup_ctrlval(struct rdt_resource *r, struct rdt_domain *d)
+static int domain_setup_ctrlval(struct rdt_resource *r, struct rdt_ctrl_domain *d)
{
+ struct rdt_hw_ctrl_domain *hw_dom = resctrl_to_arch_ctrl_dom(d);
struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
- struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
struct msr_param m;
u32 *dc;
@@ -461,7 +480,7 @@ static int domain_setup_ctrlval(struct rdt_resource *r, struct rdt_domain *d)
* @num_rmid: The size of the MBM counter array
* @hw_dom: The domain that owns the allocated arrays
*/
-static int arch_domain_mbm_alloc(u32 num_rmid, struct rdt_hw_domain *hw_dom)
+static int arch_domain_mbm_alloc(u32 num_rmid, struct rdt_hw_mon_domain *hw_dom)
{
size_t tsize;
@@ -484,37 +503,45 @@ static int arch_domain_mbm_alloc(u32 num_rmid, struct rdt_hw_domain *hw_dom)
return 0;
}
-/*
- * domain_add_cpu - Add a cpu to a resource's domain list.
- *
- * If an existing domain in the resource r's domain list matches the cpu's
- * resource id, add the cpu in the domain.
- *
- * Otherwise, a new domain is allocated and inserted into the right position
- * in the domain list sorted by id in ascending order.
- *
- * The order in the domain list is visible to users when we print entries
- * in the schemata file and schemata input is validated to have the same order
- * as this list.
- */
-static void domain_add_cpu(int cpu, struct rdt_resource *r)
+static int get_domain_id_from_scope(int cpu, enum resctrl_scope scope)
{
- int id = get_cpu_cacheinfo_id(cpu, r->cache_level);
+ switch (scope) {
+ case RESCTRL_L2_CACHE:
+ case RESCTRL_L3_CACHE:
+ return get_cpu_cacheinfo_id(cpu, scope);
+ case RESCTRL_L3_NODE:
+ return cpu_to_node(cpu);
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static void domain_add_cpu_ctrl(int cpu, struct rdt_resource *r)
+{
+ int id = get_domain_id_from_scope(cpu, r->ctrl_scope);
+ struct rdt_hw_ctrl_domain *hw_dom;
struct list_head *add_pos = NULL;
- struct rdt_hw_domain *hw_dom;
- struct rdt_domain *d;
+ struct rdt_domain_hdr *hdr;
+ struct rdt_ctrl_domain *d;
int err;
lockdep_assert_held(&domain_list_lock);
- d = rdt_find_domain(r, id, &add_pos);
- if (IS_ERR(d)) {
- pr_warn("Couldn't find cache id for CPU %d\n", cpu);
+ if (id < 0) {
+ pr_warn_once("Can't find control domain id for CPU:%d scope:%d for resource %s\n",
+ cpu, r->ctrl_scope, r->name);
return;
}
- if (d) {
- cpumask_set_cpu(cpu, &d->cpu_mask);
+ hdr = rdt_find_domain(&r->ctrl_domains, id, &add_pos);
+ if (hdr) {
+ if (WARN_ON_ONCE(hdr->type != RESCTRL_CTRL_DOMAIN))
+ return;
+ d = container_of(hdr, struct rdt_ctrl_domain, hdr);
+
+ cpumask_set_cpu(cpu, &d->hdr.cpu_mask);
if (r->cache.arch_has_per_cpu_cfg)
rdt_domain_reconfigure_cdp(r);
return;
@@ -525,64 +552,187 @@ static void domain_add_cpu(int cpu, struct rdt_resource *r)
return;
d = &hw_dom->d_resctrl;
- d->id = id;
- cpumask_set_cpu(cpu, &d->cpu_mask);
+ d->hdr.id = id;
+ d->hdr.type = RESCTRL_CTRL_DOMAIN;
+ cpumask_set_cpu(cpu, &d->hdr.cpu_mask);
rdt_domain_reconfigure_cdp(r);
- if (r->alloc_capable && domain_setup_ctrlval(r, d)) {
- domain_free(hw_dom);
+ if (domain_setup_ctrlval(r, d)) {
+ ctrl_domain_free(hw_dom);
return;
}
- if (r->mon_capable && arch_domain_mbm_alloc(r->num_rmid, hw_dom)) {
- domain_free(hw_dom);
+ list_add_tail_rcu(&d->hdr.list, add_pos);
+
+ err = resctrl_online_ctrl_domain(r, d);
+ if (err) {
+ list_del_rcu(&d->hdr.list);
+ synchronize_rcu();
+ ctrl_domain_free(hw_dom);
+ }
+}
+
+static void domain_add_cpu_mon(int cpu, struct rdt_resource *r)
+{
+ int id = get_domain_id_from_scope(cpu, r->mon_scope);
+ struct list_head *add_pos = NULL;
+ struct rdt_hw_mon_domain *hw_dom;
+ struct rdt_domain_hdr *hdr;
+ struct rdt_mon_domain *d;
+ int err;
+
+ lockdep_assert_held(&domain_list_lock);
+
+ if (id < 0) {
+ pr_warn_once("Can't find monitor domain id for CPU:%d scope:%d for resource %s\n",
+ cpu, r->mon_scope, r->name);
return;
}
- list_add_tail_rcu(&d->list, add_pos);
+ hdr = rdt_find_domain(&r->mon_domains, id, &add_pos);
+ if (hdr) {
+ if (WARN_ON_ONCE(hdr->type != RESCTRL_MON_DOMAIN))
+ return;
+ d = container_of(hdr, struct rdt_mon_domain, hdr);
+
+ cpumask_set_cpu(cpu, &d->hdr.cpu_mask);
+ return;
+ }
- err = resctrl_online_domain(r, d);
+ hw_dom = kzalloc_node(sizeof(*hw_dom), GFP_KERNEL, cpu_to_node(cpu));
+ if (!hw_dom)
+ return;
+
+ d = &hw_dom->d_resctrl;
+ d->hdr.id = id;
+ d->hdr.type = RESCTRL_MON_DOMAIN;
+ d->ci = get_cpu_cacheinfo_level(cpu, RESCTRL_L3_CACHE);
+ if (!d->ci) {
+ pr_warn_once("Can't find L3 cache for CPU:%d resource %s\n", cpu, r->name);
+ mon_domain_free(hw_dom);
+ return;
+ }
+ cpumask_set_cpu(cpu, &d->hdr.cpu_mask);
+
+ arch_mon_domain_online(r, d);
+
+ if (arch_domain_mbm_alloc(r->num_rmid, hw_dom)) {
+ mon_domain_free(hw_dom);
+ return;
+ }
+
+ list_add_tail_rcu(&d->hdr.list, add_pos);
+
+ err = resctrl_online_mon_domain(r, d);
if (err) {
- list_del_rcu(&d->list);
+ list_del_rcu(&d->hdr.list);
synchronize_rcu();
- domain_free(hw_dom);
+ mon_domain_free(hw_dom);
}
}
-static void domain_remove_cpu(int cpu, struct rdt_resource *r)
+static void domain_add_cpu(int cpu, struct rdt_resource *r)
+{
+ if (r->alloc_capable)
+ domain_add_cpu_ctrl(cpu, r);
+ if (r->mon_capable)
+ domain_add_cpu_mon(cpu, r);
+}
+
+static void domain_remove_cpu_ctrl(int cpu, struct rdt_resource *r)
{
- int id = get_cpu_cacheinfo_id(cpu, r->cache_level);
- struct rdt_hw_domain *hw_dom;
- struct rdt_domain *d;
+ int id = get_domain_id_from_scope(cpu, r->ctrl_scope);
+ struct rdt_hw_ctrl_domain *hw_dom;
+ struct rdt_domain_hdr *hdr;
+ struct rdt_ctrl_domain *d;
lockdep_assert_held(&domain_list_lock);
- d = rdt_find_domain(r, id, NULL);
- if (IS_ERR_OR_NULL(d)) {
- pr_warn("Couldn't find cache id for CPU %d\n", cpu);
+ if (id < 0) {
+ pr_warn_once("Can't find control domain id for CPU:%d scope:%d for resource %s\n",
+ cpu, r->ctrl_scope, r->name);
+ return;
+ }
+
+ hdr = rdt_find_domain(&r->ctrl_domains, id, NULL);
+ if (!hdr) {
+ pr_warn("Can't find control domain for id=%d for CPU %d for resource %s\n",
+ id, cpu, r->name);
return;
}
- hw_dom = resctrl_to_arch_dom(d);
- cpumask_clear_cpu(cpu, &d->cpu_mask);
- if (cpumask_empty(&d->cpu_mask)) {
- resctrl_offline_domain(r, d);
- list_del_rcu(&d->list);
+ if (WARN_ON_ONCE(hdr->type != RESCTRL_CTRL_DOMAIN))
+ return;
+
+ d = container_of(hdr, struct rdt_ctrl_domain, hdr);
+ hw_dom = resctrl_to_arch_ctrl_dom(d);
+
+ cpumask_clear_cpu(cpu, &d->hdr.cpu_mask);
+ if (cpumask_empty(&d->hdr.cpu_mask)) {
+ resctrl_offline_ctrl_domain(r, d);
+ list_del_rcu(&d->hdr.list);
synchronize_rcu();
/*
- * rdt_domain "d" is going to be freed below, so clear
+ * rdt_ctrl_domain "d" is going to be freed below, so clear
* its pointer from pseudo_lock_region struct.
*/
if (d->plr)
d->plr->d = NULL;
- domain_free(hw_dom);
+ ctrl_domain_free(hw_dom);
return;
}
}
+static void domain_remove_cpu_mon(int cpu, struct rdt_resource *r)
+{
+ int id = get_domain_id_from_scope(cpu, r->mon_scope);
+ struct rdt_hw_mon_domain *hw_dom;
+ struct rdt_domain_hdr *hdr;
+ struct rdt_mon_domain *d;
+
+ lockdep_assert_held(&domain_list_lock);
+
+ if (id < 0) {
+ pr_warn_once("Can't find monitor domain id for CPU:%d scope:%d for resource %s\n",
+ cpu, r->mon_scope, r->name);
+ return;
+ }
+
+ hdr = rdt_find_domain(&r->mon_domains, id, NULL);
+ if (!hdr) {
+ pr_warn("Can't find monitor domain for id=%d for CPU %d for resource %s\n",
+ id, cpu, r->name);
+ return;
+ }
+
+ if (WARN_ON_ONCE(hdr->type != RESCTRL_MON_DOMAIN))
+ return;
+
+ d = container_of(hdr, struct rdt_mon_domain, hdr);
+ hw_dom = resctrl_to_arch_mon_dom(d);
+
+ cpumask_clear_cpu(cpu, &d->hdr.cpu_mask);
+ if (cpumask_empty(&d->hdr.cpu_mask)) {
+ resctrl_offline_mon_domain(r, d);
+ list_del_rcu(&d->hdr.list);
+ synchronize_rcu();
+ mon_domain_free(hw_dom);
+
+ return;
+ }
+}
+
+static void domain_remove_cpu(int cpu, struct rdt_resource *r)
+{
+ if (r->alloc_capable)
+ domain_remove_cpu_ctrl(cpu, r);
+ if (r->mon_capable)
+ domain_remove_cpu_mon(cpu, r);
+}
+
static void clear_closid_rmid(int cpu)
{
struct resctrl_pqr_state *state = this_cpu_ptr(&pqr_state);
diff --git a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
index b7291f60399c..50fa1fe9a073 100644
--- a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
+++ b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
@@ -60,7 +60,7 @@ static bool bw_validate(char *buf, unsigned long *data, struct rdt_resource *r)
}
int parse_bw(struct rdt_parse_data *data, struct resctrl_schema *s,
- struct rdt_domain *d)
+ struct rdt_ctrl_domain *d)
{
struct resctrl_staged_config *cfg;
u32 closid = data->rdtgrp->closid;
@@ -69,7 +69,7 @@ int parse_bw(struct rdt_parse_data *data, struct resctrl_schema *s,
cfg = &d->staged_config[s->conf_type];
if (cfg->have_new_ctrl) {
- rdt_last_cmd_printf("Duplicate domain %d\n", d->id);
+ rdt_last_cmd_printf("Duplicate domain %d\n", d->hdr.id);
return -EINVAL;
}
@@ -139,7 +139,7 @@ static bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r)
* resource type.
*/
int parse_cbm(struct rdt_parse_data *data, struct resctrl_schema *s,
- struct rdt_domain *d)
+ struct rdt_ctrl_domain *d)
{
struct rdtgroup *rdtgrp = data->rdtgrp;
struct resctrl_staged_config *cfg;
@@ -148,7 +148,7 @@ int parse_cbm(struct rdt_parse_data *data, struct resctrl_schema *s,
cfg = &d->staged_config[s->conf_type];
if (cfg->have_new_ctrl) {
- rdt_last_cmd_printf("Duplicate domain %d\n", d->id);
+ rdt_last_cmd_printf("Duplicate domain %d\n", d->hdr.id);
return -EINVAL;
}
@@ -208,8 +208,8 @@ static int parse_line(char *line, struct resctrl_schema *s,
struct resctrl_staged_config *cfg;
struct rdt_resource *r = s->res;
struct rdt_parse_data data;
+ struct rdt_ctrl_domain *d;
char *dom = NULL, *id;
- struct rdt_domain *d;
unsigned long dom_id;
/* Walking r->domains, ensure it can't race with cpuhp */
@@ -231,8 +231,8 @@ next:
return -EINVAL;
}
dom = strim(dom);
- list_for_each_entry(d, &r->domains, list) {
- if (d->id == dom_id) {
+ list_for_each_entry(d, &r->ctrl_domains, hdr.list) {
+ if (d->hdr.id == dom_id) {
data.buf = dom;
data.rdtgrp = rdtgrp;
if (r->parse_ctrlval(&data, s, d))
@@ -272,15 +272,15 @@ static u32 get_config_index(u32 closid, enum resctrl_conf_type type)
}
}
-int resctrl_arch_update_one(struct rdt_resource *r, struct rdt_domain *d,
+int resctrl_arch_update_one(struct rdt_resource *r, struct rdt_ctrl_domain *d,
u32 closid, enum resctrl_conf_type t, u32 cfg_val)
{
+ struct rdt_hw_ctrl_domain *hw_dom = resctrl_to_arch_ctrl_dom(d);
struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
- struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
u32 idx = get_config_index(closid, t);
struct msr_param msr_param;
- if (!cpumask_test_cpu(smp_processor_id(), &d->cpu_mask))
+ if (!cpumask_test_cpu(smp_processor_id(), &d->hdr.cpu_mask))
return -EINVAL;
hw_dom->ctrl_val[idx] = cfg_val;
@@ -297,17 +297,17 @@ int resctrl_arch_update_one(struct rdt_resource *r, struct rdt_domain *d,
int resctrl_arch_update_domains(struct rdt_resource *r, u32 closid)
{
struct resctrl_staged_config *cfg;
- struct rdt_hw_domain *hw_dom;
+ struct rdt_hw_ctrl_domain *hw_dom;
struct msr_param msr_param;
+ struct rdt_ctrl_domain *d;
enum resctrl_conf_type t;
- struct rdt_domain *d;
u32 idx;
/* Walking r->domains, ensure it can't race with cpuhp */
lockdep_assert_cpus_held();
- list_for_each_entry(d, &r->domains, list) {
- hw_dom = resctrl_to_arch_dom(d);
+ list_for_each_entry(d, &r->ctrl_domains, hdr.list) {
+ hw_dom = resctrl_to_arch_ctrl_dom(d);
msr_param.res = NULL;
for (t = 0; t < CDP_NUM_TYPES; t++) {
cfg = &hw_dom->d_resctrl.staged_config[t];
@@ -330,7 +330,7 @@ int resctrl_arch_update_domains(struct rdt_resource *r, u32 closid)
}
}
if (msr_param.res)
- smp_call_function_any(&d->cpu_mask, rdt_ctrl_update, &msr_param, 1);
+ smp_call_function_any(&d->hdr.cpu_mask, rdt_ctrl_update, &msr_param, 1);
}
return 0;
@@ -430,10 +430,10 @@ out:
return ret ?: nbytes;
}
-u32 resctrl_arch_get_config(struct rdt_resource *r, struct rdt_domain *d,
+u32 resctrl_arch_get_config(struct rdt_resource *r, struct rdt_ctrl_domain *d,
u32 closid, enum resctrl_conf_type type)
{
- struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
+ struct rdt_hw_ctrl_domain *hw_dom = resctrl_to_arch_ctrl_dom(d);
u32 idx = get_config_index(closid, type);
return hw_dom->ctrl_val[idx];
@@ -442,7 +442,7 @@ u32 resctrl_arch_get_config(struct rdt_resource *r, struct rdt_domain *d,
static void show_doms(struct seq_file *s, struct resctrl_schema *schema, int closid)
{
struct rdt_resource *r = schema->res;
- struct rdt_domain *dom;
+ struct rdt_ctrl_domain *dom;
bool sep = false;
u32 ctrl_val;
@@ -450,7 +450,7 @@ static void show_doms(struct seq_file *s, struct resctrl_schema *schema, int clo
lockdep_assert_cpus_held();
seq_printf(s, "%*s:", max_name_width, schema->name);
- list_for_each_entry(dom, &r->domains, list) {
+ list_for_each_entry(dom, &r->ctrl_domains, hdr.list) {
if (sep)
seq_puts(s, ";");
@@ -460,7 +460,7 @@ static void show_doms(struct seq_file *s, struct resctrl_schema *schema, int clo
ctrl_val = resctrl_arch_get_config(r, dom, closid,
schema->conf_type);
- seq_printf(s, r->format_str, dom->id, max_data_width,
+ seq_printf(s, r->format_str, dom->hdr.id, max_data_width,
ctrl_val);
sep = true;
}
@@ -489,7 +489,7 @@ int rdtgroup_schemata_show(struct kernfs_open_file *of,
} else {
seq_printf(s, "%s:%d=%x\n",
rdtgrp->plr->s->res->name,
- rdtgrp->plr->d->id,
+ rdtgrp->plr->d->hdr.id,
rdtgrp->plr->cbm);
}
} else {
@@ -514,8 +514,8 @@ static int smp_mon_event_count(void *arg)
}
void mon_event_read(struct rmid_read *rr, struct rdt_resource *r,
- struct rdt_domain *d, struct rdtgroup *rdtgrp,
- int evtid, int first)
+ struct rdt_mon_domain *d, struct rdtgroup *rdtgrp,
+ cpumask_t *cpumask, int evtid, int first)
{
int cpu;
@@ -529,7 +529,6 @@ void mon_event_read(struct rmid_read *rr, struct rdt_resource *r,
rr->evtid = evtid;
rr->r = r;
rr->d = d;
- rr->val = 0;
rr->first = first;
rr->arch_mon_ctx = resctrl_arch_mon_ctx_alloc(r, evtid);
if (IS_ERR(rr->arch_mon_ctx)) {
@@ -537,7 +536,7 @@ void mon_event_read(struct rmid_read *rr, struct rdt_resource *r,
return;
}
- cpu = cpumask_any_housekeeping(&d->cpu_mask, RESCTRL_PICK_ANY_CPU);
+ cpu = cpumask_any_housekeeping(cpumask, RESCTRL_PICK_ANY_CPU);
/*
* cpumask_any_housekeeping() prefers housekeeping CPUs, but
@@ -546,7 +545,7 @@ void mon_event_read(struct rmid_read *rr, struct rdt_resource *r,
* counters on some platforms if its called in IRQ context.
*/
if (tick_nohz_full_cpu(cpu))
- smp_call_function_any(&d->cpu_mask, mon_event_count, rr, 1);
+ smp_call_function_any(cpumask, mon_event_count, rr, 1);
else
smp_call_on_cpu(cpu, smp_mon_event_count, rr, false);
@@ -556,12 +555,13 @@ void mon_event_read(struct rmid_read *rr, struct rdt_resource *r,
int rdtgroup_mondata_show(struct seq_file *m, void *arg)
{
struct kernfs_open_file *of = m->private;
+ struct rdt_domain_hdr *hdr;
+ struct rmid_read rr = {0};
+ struct rdt_mon_domain *d;
u32 resid, evtid, domid;
struct rdtgroup *rdtgrp;
struct rdt_resource *r;
union mon_data_bits md;
- struct rdt_domain *d;
- struct rmid_read rr;
int ret = 0;
rdtgrp = rdtgroup_kn_lock_live(of->kn);
@@ -574,15 +574,40 @@ int rdtgroup_mondata_show(struct seq_file *m, void *arg)
resid = md.u.rid;
domid = md.u.domid;
evtid = md.u.evtid;
-
r = &rdt_resources_all[resid].r_resctrl;
- d = rdt_find_domain(r, domid, NULL);
- if (IS_ERR_OR_NULL(d)) {
+
+ if (md.u.sum) {
+ /*
+ * This file requires summing across all domains that share
+ * the L3 cache id that was provided in the "domid" field of the
+ * mon_data_bits union. Search all domains in the resource for
+ * one that matches this cache id.
+ */
+ list_for_each_entry(d, &r->mon_domains, hdr.list) {
+ if (d->ci->id == domid) {
+ rr.ci = d->ci;
+ mon_event_read(&rr, r, NULL, rdtgrp,
+ &d->ci->shared_cpu_map, evtid, false);
+ goto checkresult;
+ }
+ }
ret = -ENOENT;
goto out;
+ } else {
+ /*
+ * This file provides data from a single domain. Search
+ * the resource to find the domain with "domid".
+ */
+ hdr = rdt_find_domain(&r->mon_domains, domid, NULL);
+ if (!hdr || WARN_ON_ONCE(hdr->type != RESCTRL_MON_DOMAIN)) {
+ ret = -ENOENT;
+ goto out;
+ }
+ d = container_of(hdr, struct rdt_mon_domain, hdr);
+ mon_event_read(&rr, r, d, rdtgrp, &d->hdr.cpu_mask, evtid, false);
}
- mon_event_read(&rr, r, d, rdtgrp, evtid, false);
+checkresult:
if (rr.err == -EIO)
seq_puts(m, "Error\n");
diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h
index f1d926832ec8..955999aecfca 100644
--- a/arch/x86/kernel/cpu/resctrl/internal.h
+++ b/arch/x86/kernel/cpu/resctrl/internal.h
@@ -127,29 +127,54 @@ struct mon_evt {
};
/**
- * union mon_data_bits - Monitoring details for each event file
+ * union mon_data_bits - Monitoring details for each event file.
* @priv: Used to store monitoring event data in @u
- * as kernfs private data
- * @rid: Resource id associated with the event file
- * @evtid: Event id associated with the event file
- * @domid: The domain to which the event file belongs
- * @u: Name of the bit fields struct
+ * as kernfs private data.
+ * @u.rid: Resource id associated with the event file.
+ * @u.evtid: Event id associated with the event file.
+ * @u.sum: Set when event must be summed across multiple
+ * domains.
+ * @u.domid: When @u.sum is zero this is the domain to which
+ * the event file belongs. When @sum is one this
+ * is the id of the L3 cache that all domains to be
+ * summed share.
+ * @u: Name of the bit fields struct.
*/
union mon_data_bits {
void *priv;
struct {
unsigned int rid : 10;
- enum resctrl_event_id evtid : 8;
+ enum resctrl_event_id evtid : 7;
+ unsigned int sum : 1;
unsigned int domid : 14;
} u;
};
+/**
+ * struct rmid_read - Data passed across smp_call*() to read event count.
+ * @rgrp: Resource group for which the counter is being read. If it is a parent
+ * resource group then its event count is summed with the count from all
+ * its child resource groups.
+ * @r: Resource describing the properties of the event being read.
+ * @d: Domain that the counter should be read from. If NULL then sum all
+ * domains in @r sharing L3 @ci.id
+ * @evtid: Which monitor event to read.
+ * @first: Initialize MBM counter when true.
+ * @ci: Cacheinfo for L3. Only set when @d is NULL. Used when summing domains.
+ * @err: Error encountered when reading counter.
+ * @val: Returned value of event counter. If @rgrp is a parent resource group,
+ * @val includes the sum of event counts from its child resource groups.
+ * If @d is NULL, @val includes the sum of all domains in @r sharing @ci.id,
+ * (summed across child resource groups if @rgrp is a parent resource group).
+ * @arch_mon_ctx: Hardware monitor allocated for this read request (MPAM only).
+ */
struct rmid_read {
struct rdtgroup *rgrp;
struct rdt_resource *r;
- struct rdt_domain *d;
+ struct rdt_mon_domain *d;
enum resctrl_event_id evtid;
bool first;
+ struct cacheinfo *ci;
int err;
u64 val;
void *arch_mon_ctx;
@@ -232,7 +257,7 @@ struct mongroup {
*/
struct pseudo_lock_region {
struct resctrl_schema *s;
- struct rdt_domain *d;
+ struct rdt_ctrl_domain *d;
u32 cbm;
wait_queue_head_t lock_thread_wq;
int thread_done;
@@ -355,25 +380,41 @@ struct arch_mbm_state {
};
/**
- * struct rdt_hw_domain - Arch private attributes of a set of CPUs that share
- * a resource
+ * struct rdt_hw_ctrl_domain - Arch private attributes of a set of CPUs that share
+ * a resource for a control function
* @d_resctrl: Properties exposed to the resctrl file system
* @ctrl_val: array of cache or mem ctrl values (indexed by CLOSID)
+ *
+ * Members of this structure are accessed via helpers that provide abstraction.
+ */
+struct rdt_hw_ctrl_domain {
+ struct rdt_ctrl_domain d_resctrl;
+ u32 *ctrl_val;
+};
+
+/**
+ * struct rdt_hw_mon_domain - Arch private attributes of a set of CPUs that share
+ * a resource for a monitor function
+ * @d_resctrl: Properties exposed to the resctrl file system
* @arch_mbm_total: arch private state for MBM total bandwidth
* @arch_mbm_local: arch private state for MBM local bandwidth
*
* Members of this structure are accessed via helpers that provide abstraction.
*/
-struct rdt_hw_domain {
- struct rdt_domain d_resctrl;
- u32 *ctrl_val;
+struct rdt_hw_mon_domain {
+ struct rdt_mon_domain d_resctrl;
struct arch_mbm_state *arch_mbm_total;
struct arch_mbm_state *arch_mbm_local;
};
-static inline struct rdt_hw_domain *resctrl_to_arch_dom(struct rdt_domain *r)
+static inline struct rdt_hw_ctrl_domain *resctrl_to_arch_ctrl_dom(struct rdt_ctrl_domain *r)
{
- return container_of(r, struct rdt_hw_domain, d_resctrl);
+ return container_of(r, struct rdt_hw_ctrl_domain, d_resctrl);
+}
+
+static inline struct rdt_hw_mon_domain *resctrl_to_arch_mon_dom(struct rdt_mon_domain *r)
+{
+ return container_of(r, struct rdt_hw_mon_domain, d_resctrl);
}
/**
@@ -385,7 +426,7 @@ static inline struct rdt_hw_domain *resctrl_to_arch_dom(struct rdt_domain *r)
*/
struct msr_param {
struct rdt_resource *res;
- struct rdt_domain *dom;
+ struct rdt_ctrl_domain *dom;
u32 low;
u32 high;
};
@@ -458,9 +499,9 @@ static inline struct rdt_hw_resource *resctrl_to_arch_res(struct rdt_resource *r
}
int parse_cbm(struct rdt_parse_data *data, struct resctrl_schema *s,
- struct rdt_domain *d);
+ struct rdt_ctrl_domain *d);
int parse_bw(struct rdt_parse_data *data, struct resctrl_schema *s,
- struct rdt_domain *d);
+ struct rdt_ctrl_domain *d);
extern struct mutex rdtgroup_mutex;
@@ -493,6 +534,8 @@ static inline bool resctrl_arch_get_cdp_enabled(enum resctrl_res_level l)
int resctrl_arch_set_cdp_enabled(enum resctrl_res_level l, bool enable);
+void arch_mon_domain_online(struct rdt_resource *r, struct rdt_mon_domain *d);
+
/*
* To return the common struct rdt_resource, which is contained in struct
* rdt_hw_resource, walk the resctrl member of struct rdt_hw_resource.
@@ -558,27 +601,28 @@ void rdtgroup_kn_unlock(struct kernfs_node *kn);
int rdtgroup_kn_mode_restrict(struct rdtgroup *r, const char *name);
int rdtgroup_kn_mode_restore(struct rdtgroup *r, const char *name,
umode_t mask);
-struct rdt_domain *rdt_find_domain(struct rdt_resource *r, int id,
- struct list_head **pos);
+struct rdt_domain_hdr *rdt_find_domain(struct list_head *h, int id,
+ struct list_head **pos);
ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
char *buf, size_t nbytes, loff_t off);
int rdtgroup_schemata_show(struct kernfs_open_file *of,
struct seq_file *s, void *v);
-bool rdtgroup_cbm_overlaps(struct resctrl_schema *s, struct rdt_domain *d,
+bool rdtgroup_cbm_overlaps(struct resctrl_schema *s, struct rdt_ctrl_domain *d,
unsigned long cbm, int closid, bool exclusive);
-unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r, struct rdt_domain *d,
+unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r, struct rdt_ctrl_domain *d,
unsigned long cbm);
enum rdtgrp_mode rdtgroup_mode_by_closid(int closid);
int rdtgroup_tasks_assigned(struct rdtgroup *r);
int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp);
int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp);
-bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, unsigned long cbm);
-bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_domain *d);
+bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_ctrl_domain *d, unsigned long cbm);
+bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_ctrl_domain *d);
int rdt_pseudo_lock_init(void);
void rdt_pseudo_lock_release(void);
int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp);
void rdtgroup_pseudo_lock_remove(struct rdtgroup *rdtgrp);
-struct rdt_domain *get_domain_from_cpu(int cpu, struct rdt_resource *r);
+struct rdt_ctrl_domain *get_ctrl_domain_from_cpu(int cpu, struct rdt_resource *r);
+struct rdt_mon_domain *get_mon_domain_from_cpu(int cpu, struct rdt_resource *r);
int closids_supported(void);
void closid_free(int closid);
int alloc_rmid(u32 closid);
@@ -589,19 +633,19 @@ bool __init rdt_cpu_has(int flag);
void mon_event_count(void *info);
int rdtgroup_mondata_show(struct seq_file *m, void *arg);
void mon_event_read(struct rmid_read *rr, struct rdt_resource *r,
- struct rdt_domain *d, struct rdtgroup *rdtgrp,
- int evtid, int first);
-void mbm_setup_overflow_handler(struct rdt_domain *dom,
+ struct rdt_mon_domain *d, struct rdtgroup *rdtgrp,
+ cpumask_t *cpumask, int evtid, int first);
+void mbm_setup_overflow_handler(struct rdt_mon_domain *dom,
unsigned long delay_ms,
int exclude_cpu);
void mbm_handle_overflow(struct work_struct *work);
void __init intel_rdt_mbm_apply_quirk(void);
bool is_mba_sc(struct rdt_resource *r);
-void cqm_setup_limbo_handler(struct rdt_domain *dom, unsigned long delay_ms,
+void cqm_setup_limbo_handler(struct rdt_mon_domain *dom, unsigned long delay_ms,
int exclude_cpu);
void cqm_handle_limbo(struct work_struct *work);
-bool has_busy_rmid(struct rdt_domain *d);
-void __check_limbo(struct rdt_domain *d, bool force_free);
+bool has_busy_rmid(struct rdt_mon_domain *d);
+void __check_limbo(struct rdt_mon_domain *d, bool force_free);
void rdt_domain_reconfigure_cdp(struct rdt_resource *r);
void __init thread_throttle_mode_init(void);
void __init mbm_config_rftype_init(const char *config);
diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c
index 2345e6836593..851b561850e0 100644
--- a/arch/x86/kernel/cpu/resctrl/monitor.c
+++ b/arch/x86/kernel/cpu/resctrl/monitor.c
@@ -15,6 +15,8 @@
* Software Developer Manual June 2016, volume 3, section 17.17.
*/
+#define pr_fmt(fmt) "resctrl: " fmt
+
#include <linux/cpu.h>
#include <linux/module.h>
#include <linux/sizes.h>
@@ -97,6 +99,8 @@ unsigned int resctrl_rmid_realloc_limit;
#define CF(cf) ((unsigned long)(1048576 * (cf) + 0.5))
+static int snc_nodes_per_l3_cache = 1;
+
/*
* The correction factor table is documented in Documentation/arch/x86/resctrl.rst.
* If rmid > rmid threshold, MBM total and local values should be multiplied
@@ -185,7 +189,43 @@ static inline struct rmid_entry *__rmid_entry(u32 idx)
return entry;
}
-static int __rmid_read(u32 rmid, enum resctrl_event_id eventid, u64 *val)
+/*
+ * When Sub-NUMA Cluster (SNC) mode is not enabled (as indicated by
+ * "snc_nodes_per_l3_cache == 1") no translation of the RMID value is
+ * needed. The physical RMID is the same as the logical RMID.
+ *
+ * On a platform with SNC mode enabled, Linux enables RMID sharing mode
+ * via MSR 0xCA0 (see the "RMID Sharing Mode" section in the "Intel
+ * Resource Director Technology Architecture Specification" for a full
+ * description of RMID sharing mode).
+ *
+ * In RMID sharing mode there are fewer "logical RMID" values available
+ * to accumulate data ("physical RMIDs" are divided evenly between SNC
+ * nodes that share an L3 cache). Linux creates an rdt_mon_domain for
+ * each SNC node.
+ *
+ * The value loaded into IA32_PQR_ASSOC is the "logical RMID".
+ *
+ * Data is collected independently on each SNC node and can be retrieved
+ * using the "physical RMID" value computed by this function and loaded
+ * into IA32_QM_EVTSEL. @cpu can be any CPU in the SNC node.
+ *
+ * The scope of the IA32_QM_EVTSEL and IA32_QM_CTR MSRs is at the L3
+ * cache. So a "physical RMID" may be read from any CPU that shares
+ * the L3 cache with the desired SNC node, not just from a CPU in
+ * the specific SNC node.
+ */
+static int logical_rmid_to_physical_rmid(int cpu, int lrmid)
+{
+ struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl;
+
+ if (snc_nodes_per_l3_cache == 1)
+ return lrmid;
+
+ return lrmid + (cpu_to_node(cpu) % snc_nodes_per_l3_cache) * r->num_rmid;
+}
+
+static int __rmid_read_phys(u32 prmid, enum resctrl_event_id eventid, u64 *val)
{
u64 msr_val;
@@ -197,7 +237,7 @@ static int __rmid_read(u32 rmid, enum resctrl_event_id eventid, u64 *val)
* IA32_QM_CTR.Error (bit 63) and IA32_QM_CTR.Unavailable (bit 62)
* are error bits.
*/
- wrmsr(MSR_IA32_QM_EVTSEL, eventid, rmid);
+ wrmsr(MSR_IA32_QM_EVTSEL, eventid, prmid);
rdmsrl(MSR_IA32_QM_CTR, msr_val);
if (msr_val & RMID_VAL_ERROR)
@@ -209,7 +249,7 @@ static int __rmid_read(u32 rmid, enum resctrl_event_id eventid, u64 *val)
return 0;
}
-static struct arch_mbm_state *get_arch_mbm_state(struct rdt_hw_domain *hw_dom,
+static struct arch_mbm_state *get_arch_mbm_state(struct rdt_hw_mon_domain *hw_dom,
u32 rmid,
enum resctrl_event_id eventid)
{
@@ -228,19 +268,22 @@ static struct arch_mbm_state *get_arch_mbm_state(struct rdt_hw_domain *hw_dom,
return NULL;
}
-void resctrl_arch_reset_rmid(struct rdt_resource *r, struct rdt_domain *d,
+void resctrl_arch_reset_rmid(struct rdt_resource *r, struct rdt_mon_domain *d,
u32 unused, u32 rmid,
enum resctrl_event_id eventid)
{
- struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
+ struct rdt_hw_mon_domain *hw_dom = resctrl_to_arch_mon_dom(d);
+ int cpu = cpumask_any(&d->hdr.cpu_mask);
struct arch_mbm_state *am;
+ u32 prmid;
am = get_arch_mbm_state(hw_dom, rmid, eventid);
if (am) {
memset(am, 0, sizeof(*am));
+ prmid = logical_rmid_to_physical_rmid(cpu, rmid);
/* Record any initial, non-zero count value. */
- __rmid_read(rmid, eventid, &am->prev_msr);
+ __rmid_read_phys(prmid, eventid, &am->prev_msr);
}
}
@@ -248,9 +291,9 @@ void resctrl_arch_reset_rmid(struct rdt_resource *r, struct rdt_domain *d,
* Assumes that hardware counters are also reset and thus that there is
* no need to record initial non-zero counts.
*/
-void resctrl_arch_reset_rmid_all(struct rdt_resource *r, struct rdt_domain *d)
+void resctrl_arch_reset_rmid_all(struct rdt_resource *r, struct rdt_mon_domain *d)
{
- struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
+ struct rdt_hw_mon_domain *hw_dom = resctrl_to_arch_mon_dom(d);
if (is_mbm_total_enabled())
memset(hw_dom->arch_mbm_total, 0,
@@ -269,22 +312,22 @@ static u64 mbm_overflow_count(u64 prev_msr, u64 cur_msr, unsigned int width)
return chunks >> shift;
}
-int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_domain *d,
+int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_mon_domain *d,
u32 unused, u32 rmid, enum resctrl_event_id eventid,
u64 *val, void *ignored)
{
+ struct rdt_hw_mon_domain *hw_dom = resctrl_to_arch_mon_dom(d);
struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
- struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
+ int cpu = cpumask_any(&d->hdr.cpu_mask);
struct arch_mbm_state *am;
u64 msr_val, chunks;
+ u32 prmid;
int ret;
resctrl_arch_rmid_read_context_check();
- if (!cpumask_test_cpu(smp_processor_id(), &d->cpu_mask))
- return -EINVAL;
-
- ret = __rmid_read(rmid, eventid, &msr_val);
+ prmid = logical_rmid_to_physical_rmid(cpu, rmid);
+ ret = __rmid_read_phys(prmid, eventid, &msr_val);
if (ret)
return ret;
@@ -320,7 +363,7 @@ static void limbo_release_entry(struct rmid_entry *entry)
* decrement the count. If the busy count gets to zero on an RMID, we
* free the RMID
*/
-void __check_limbo(struct rdt_domain *d, bool force_free)
+void __check_limbo(struct rdt_mon_domain *d, bool force_free)
{
struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl;
u32 idx_limit = resctrl_arch_system_num_rmid_idx();
@@ -364,7 +407,7 @@ void __check_limbo(struct rdt_domain *d, bool force_free)
* CLOSID and RMID because there may be dependencies between them
* on some architectures.
*/
- trace_mon_llc_occupancy_limbo(entry->closid, entry->rmid, d->id, val);
+ trace_mon_llc_occupancy_limbo(entry->closid, entry->rmid, d->hdr.id, val);
}
if (force_free || !rmid_dirty) {
@@ -378,7 +421,7 @@ void __check_limbo(struct rdt_domain *d, bool force_free)
resctrl_arch_mon_ctx_free(r, QOS_L3_OCCUP_EVENT_ID, arch_mon_ctx);
}
-bool has_busy_rmid(struct rdt_domain *d)
+bool has_busy_rmid(struct rdt_mon_domain *d)
{
u32 idx_limit = resctrl_arch_system_num_rmid_idx();
@@ -479,7 +522,7 @@ int alloc_rmid(u32 closid)
static void add_rmid_to_limbo(struct rmid_entry *entry)
{
struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl;
- struct rdt_domain *d;
+ struct rdt_mon_domain *d;
u32 idx;
lockdep_assert_held(&rdtgroup_mutex);
@@ -490,7 +533,7 @@ static void add_rmid_to_limbo(struct rmid_entry *entry)
idx = resctrl_arch_rmid_idx_encode(entry->closid, entry->rmid);
entry->busy = 0;
- list_for_each_entry(d, &r->domains, list) {
+ list_for_each_entry(d, &r->mon_domains, hdr.list) {
/*
* For the first limbo RMID in the domain,
* setup up the limbo worker.
@@ -519,7 +562,8 @@ void free_rmid(u32 closid, u32 rmid)
* allows architectures that ignore the closid parameter to avoid an
* unnecessary check.
*/
- if (idx == resctrl_arch_rmid_idx_encode(RESCTRL_RESERVED_CLOSID,
+ if (!resctrl_arch_mon_capable() ||
+ idx == resctrl_arch_rmid_idx_encode(RESCTRL_RESERVED_CLOSID,
RESCTRL_RESERVED_RMID))
return;
@@ -531,7 +575,7 @@ void free_rmid(u32 closid, u32 rmid)
list_add_tail(&entry->list, &rmid_free_lru);
}
-static struct mbm_state *get_mbm_state(struct rdt_domain *d, u32 closid,
+static struct mbm_state *get_mbm_state(struct rdt_mon_domain *d, u32 closid,
u32 rmid, enum resctrl_event_id evtid)
{
u32 idx = resctrl_arch_rmid_idx_encode(closid, rmid);
@@ -548,7 +592,10 @@ static struct mbm_state *get_mbm_state(struct rdt_domain *d, u32 closid,
static int __mon_event_count(u32 closid, u32 rmid, struct rmid_read *rr)
{
+ int cpu = smp_processor_id();
+ struct rdt_mon_domain *d;
struct mbm_state *m;
+ int err, ret;
u64 tval = 0;
if (rr->first) {
@@ -559,14 +606,47 @@ static int __mon_event_count(u32 closid, u32 rmid, struct rmid_read *rr)
return 0;
}
- rr->err = resctrl_arch_rmid_read(rr->r, rr->d, closid, rmid, rr->evtid,
- &tval, rr->arch_mon_ctx);
- if (rr->err)
- return rr->err;
+ if (rr->d) {
+ /* Reading a single domain, must be on a CPU in that domain. */
+ if (!cpumask_test_cpu(cpu, &rr->d->hdr.cpu_mask))
+ return -EINVAL;
+ rr->err = resctrl_arch_rmid_read(rr->r, rr->d, closid, rmid,
+ rr->evtid, &tval, rr->arch_mon_ctx);
+ if (rr->err)
+ return rr->err;
- rr->val += tval;
+ rr->val += tval;
- return 0;
+ return 0;
+ }
+
+ /* Summing domains that share a cache, must be on a CPU for that cache. */
+ if (!cpumask_test_cpu(cpu, &rr->ci->shared_cpu_map))
+ return -EINVAL;
+
+ /*
+ * Legacy files must report the sum of an event across all
+ * domains that share the same L3 cache instance.
+ * Report success if a read from any domain succeeds, -EINVAL
+ * (translated to "Unavailable" for user space) if reading from
+ * all domains fail for any reason.
+ */
+ ret = -EINVAL;
+ list_for_each_entry(d, &rr->r->mon_domains, hdr.list) {
+ if (d->ci->id != rr->ci->id)
+ continue;
+ err = resctrl_arch_rmid_read(rr->r, d, closid, rmid,
+ rr->evtid, &tval, rr->arch_mon_ctx);
+ if (!err) {
+ rr->val += tval;
+ ret = 0;
+ }
+ }
+
+ if (ret)
+ rr->err = ret;
+
+ return ret;
}
/*
@@ -667,12 +747,12 @@ void mon_event_count(void *info)
* throttle MSRs already have low percentage values. To avoid
* unnecessarily restricting such rdtgroups, we also increase the bandwidth.
*/
-static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm)
+static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_mon_domain *dom_mbm)
{
u32 closid, rmid, cur_msr_val, new_msr_val;
struct mbm_state *pmbm_data, *cmbm_data;
+ struct rdt_ctrl_domain *dom_mba;
struct rdt_resource *r_mba;
- struct rdt_domain *dom_mba;
u32 cur_bw, user_bw, idx;
struct list_head *head;
struct rdtgroup *entry;
@@ -687,7 +767,7 @@ static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm)
idx = resctrl_arch_rmid_idx_encode(closid, rmid);
pmbm_data = &dom_mbm->mbm_local[idx];
- dom_mba = get_domain_from_cpu(smp_processor_id(), r_mba);
+ dom_mba = get_ctrl_domain_from_cpu(smp_processor_id(), r_mba);
if (!dom_mba) {
pr_warn_once("Failure to get domain for MBA update\n");
return;
@@ -733,12 +813,11 @@ static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm)
resctrl_arch_update_one(r_mba, dom_mba, closid, CDP_NONE, new_msr_val);
}
-static void mbm_update(struct rdt_resource *r, struct rdt_domain *d,
+static void mbm_update(struct rdt_resource *r, struct rdt_mon_domain *d,
u32 closid, u32 rmid)
{
- struct rmid_read rr;
+ struct rmid_read rr = {0};
- rr.first = false;
rr.r = r;
rr.d = d;
@@ -791,17 +870,17 @@ static void mbm_update(struct rdt_resource *r, struct rdt_domain *d,
void cqm_handle_limbo(struct work_struct *work)
{
unsigned long delay = msecs_to_jiffies(CQM_LIMBOCHECK_INTERVAL);
- struct rdt_domain *d;
+ struct rdt_mon_domain *d;
cpus_read_lock();
mutex_lock(&rdtgroup_mutex);
- d = container_of(work, struct rdt_domain, cqm_limbo.work);
+ d = container_of(work, struct rdt_mon_domain, cqm_limbo.work);
__check_limbo(d, false);
if (has_busy_rmid(d)) {
- d->cqm_work_cpu = cpumask_any_housekeeping(&d->cpu_mask,
+ d->cqm_work_cpu = cpumask_any_housekeeping(&d->hdr.cpu_mask,
RESCTRL_PICK_ANY_CPU);
schedule_delayed_work_on(d->cqm_work_cpu, &d->cqm_limbo,
delay);
@@ -819,13 +898,13 @@ void cqm_handle_limbo(struct work_struct *work)
* @exclude_cpu: Which CPU the handler should not run on,
* RESCTRL_PICK_ANY_CPU to pick any CPU.
*/
-void cqm_setup_limbo_handler(struct rdt_domain *dom, unsigned long delay_ms,
+void cqm_setup_limbo_handler(struct rdt_mon_domain *dom, unsigned long delay_ms,
int exclude_cpu)
{
unsigned long delay = msecs_to_jiffies(delay_ms);
int cpu;
- cpu = cpumask_any_housekeeping(&dom->cpu_mask, exclude_cpu);
+ cpu = cpumask_any_housekeeping(&dom->hdr.cpu_mask, exclude_cpu);
dom->cqm_work_cpu = cpu;
if (cpu < nr_cpu_ids)
@@ -836,9 +915,9 @@ void mbm_handle_overflow(struct work_struct *work)
{
unsigned long delay = msecs_to_jiffies(MBM_OVERFLOW_INTERVAL);
struct rdtgroup *prgrp, *crgrp;
+ struct rdt_mon_domain *d;
struct list_head *head;
struct rdt_resource *r;
- struct rdt_domain *d;
cpus_read_lock();
mutex_lock(&rdtgroup_mutex);
@@ -851,7 +930,7 @@ void mbm_handle_overflow(struct work_struct *work)
goto out_unlock;
r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl;
- d = container_of(work, struct rdt_domain, mbm_over.work);
+ d = container_of(work, struct rdt_mon_domain, mbm_over.work);
list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) {
mbm_update(r, d, prgrp->closid, prgrp->mon.rmid);
@@ -868,7 +947,7 @@ void mbm_handle_overflow(struct work_struct *work)
* Re-check for housekeeping CPUs. This allows the overflow handler to
* move off a nohz_full CPU quickly.
*/
- d->mbm_work_cpu = cpumask_any_housekeeping(&d->cpu_mask,
+ d->mbm_work_cpu = cpumask_any_housekeeping(&d->hdr.cpu_mask,
RESCTRL_PICK_ANY_CPU);
schedule_delayed_work_on(d->mbm_work_cpu, &d->mbm_over, delay);
@@ -885,7 +964,7 @@ out_unlock:
* @exclude_cpu: Which CPU the handler should not run on,
* RESCTRL_PICK_ANY_CPU to pick any CPU.
*/
-void mbm_setup_overflow_handler(struct rdt_domain *dom, unsigned long delay_ms,
+void mbm_setup_overflow_handler(struct rdt_mon_domain *dom, unsigned long delay_ms,
int exclude_cpu)
{
unsigned long delay = msecs_to_jiffies(delay_ms);
@@ -897,7 +976,7 @@ void mbm_setup_overflow_handler(struct rdt_domain *dom, unsigned long delay_ms,
*/
if (!resctrl_mounted || !resctrl_arch_mon_capable())
return;
- cpu = cpumask_any_housekeeping(&dom->cpu_mask, exclude_cpu);
+ cpu = cpumask_any_housekeeping(&dom->hdr.cpu_mask, exclude_cpu);
dom->mbm_work_cpu = cpu;
if (cpu < nr_cpu_ids)
@@ -1014,6 +1093,88 @@ static void l3_mon_evt_init(struct rdt_resource *r)
list_add_tail(&mbm_local_event.list, &r->evt_list);
}
+/*
+ * The power-on reset value of MSR_RMID_SNC_CONFIG is 0x1
+ * which indicates that RMIDs are configured in legacy mode.
+ * This mode is incompatible with Linux resctrl semantics
+ * as RMIDs are partitioned between SNC nodes, which requires
+ * a user to know which RMID is allocated to a task.
+ * Clearing bit 0 reconfigures the RMID counters for use
+ * in RMID sharing mode. This mode is better for Linux.
+ * The RMID space is divided between all SNC nodes with the
+ * RMIDs renumbered to start from zero in each node when
+ * counting operations from tasks. Code to read the counters
+ * must adjust RMID counter numbers based on SNC node. See
+ * logical_rmid_to_physical_rmid() for code that does this.
+ */
+void arch_mon_domain_online(struct rdt_resource *r, struct rdt_mon_domain *d)
+{
+ if (snc_nodes_per_l3_cache > 1)
+ msr_clear_bit(MSR_RMID_SNC_CONFIG, 0);
+}
+
+/* CPU models that support MSR_RMID_SNC_CONFIG */
+static const struct x86_cpu_id snc_cpu_ids[] __initconst = {
+ X86_MATCH_VFM(INTEL_ICELAKE_X, 0),
+ X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, 0),
+ X86_MATCH_VFM(INTEL_EMERALDRAPIDS_X, 0),
+ X86_MATCH_VFM(INTEL_GRANITERAPIDS_X, 0),
+ X86_MATCH_VFM(INTEL_ATOM_CRESTMONT_X, 0),
+ {}
+};
+
+/*
+ * There isn't a simple hardware bit that indicates whether a CPU is running
+ * in Sub-NUMA Cluster (SNC) mode. Infer the state by comparing the
+ * number of CPUs sharing the L3 cache with CPU0 to the number of CPUs in
+ * the same NUMA node as CPU0.
+ * It is not possible to accurately determine SNC state if the system is
+ * booted with a maxcpus=N parameter. That distorts the ratio of SNC nodes
+ * to L3 caches. It will be OK if system is booted with hyperthreading
+ * disabled (since this doesn't affect the ratio).
+ */
+static __init int snc_get_config(void)
+{
+ struct cacheinfo *ci = get_cpu_cacheinfo_level(0, RESCTRL_L3_CACHE);
+ const cpumask_t *node0_cpumask;
+ int cpus_per_node, cpus_per_l3;
+ int ret;
+
+ if (!x86_match_cpu(snc_cpu_ids) || !ci)
+ return 1;
+
+ cpus_read_lock();
+ if (num_online_cpus() != num_present_cpus())
+ pr_warn("Some CPUs offline, SNC detection may be incorrect\n");
+ cpus_read_unlock();
+
+ node0_cpumask = cpumask_of_node(cpu_to_node(0));
+
+ cpus_per_node = cpumask_weight(node0_cpumask);
+ cpus_per_l3 = cpumask_weight(&ci->shared_cpu_map);
+
+ if (!cpus_per_node || !cpus_per_l3)
+ return 1;
+
+ ret = cpus_per_l3 / cpus_per_node;
+
+ /* sanity check: Only valid results are 1, 2, 3, 4 */
+ switch (ret) {
+ case 1:
+ break;
+ case 2 ... 4:
+ pr_info("Sub-NUMA Cluster mode detected with %d nodes per L3 cache\n", ret);
+ rdt_resources_all[RDT_RESOURCE_L3].r_resctrl.mon_scope = RESCTRL_L3_NODE;
+ break;
+ default:
+ pr_warn("Ignore improbable SNC node count %d\n", ret);
+ ret = 1;
+ break;
+ }
+
+ return ret;
+}
+
int __init rdt_get_mon_l3_config(struct rdt_resource *r)
{
unsigned int mbm_offset = boot_cpu_data.x86_cache_mbm_width_offset;
@@ -1021,9 +1182,11 @@ int __init rdt_get_mon_l3_config(struct rdt_resource *r)
unsigned int threshold;
int ret;
+ snc_nodes_per_l3_cache = snc_get_config();
+
resctrl_rmid_realloc_limit = boot_cpu_data.x86_cache_size * 1024;
- hw_res->mon_scale = boot_cpu_data.x86_cache_occ_scale;
- r->num_rmid = boot_cpu_data.x86_cache_max_rmid + 1;
+ hw_res->mon_scale = boot_cpu_data.x86_cache_occ_scale / snc_nodes_per_l3_cache;
+ r->num_rmid = (boot_cpu_data.x86_cache_max_rmid + 1) / snc_nodes_per_l3_cache;
hw_res->mbm_width = MBM_CNTR_WIDTH_BASE;
if (mbm_offset > 0 && mbm_offset <= MBM_CNTR_WIDTH_OFFSET_MAX)
diff --git a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c
index aacf236dfe3b..e69489d48625 100644
--- a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c
+++ b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c
@@ -11,7 +11,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/cacheinfo.h>
#include <linux/cpu.h>
#include <linux/cpumask.h>
#include <linux/debugfs.h>
@@ -221,7 +220,7 @@ static int pseudo_lock_cstates_constrain(struct pseudo_lock_region *plr)
int cpu;
int ret;
- for_each_cpu(cpu, &plr->d->cpu_mask) {
+ for_each_cpu(cpu, &plr->d->hdr.cpu_mask) {
pm_req = kzalloc(sizeof(*pm_req), GFP_KERNEL);
if (!pm_req) {
rdt_last_cmd_puts("Failure to allocate memory for PM QoS\n");
@@ -292,12 +291,15 @@ static void pseudo_lock_region_clear(struct pseudo_lock_region *plr)
*/
static int pseudo_lock_region_init(struct pseudo_lock_region *plr)
{
- struct cpu_cacheinfo *ci;
+ enum resctrl_scope scope = plr->s->res->ctrl_scope;
+ struct cacheinfo *ci;
int ret;
- int i;
+
+ if (WARN_ON_ONCE(scope != RESCTRL_L2_CACHE && scope != RESCTRL_L3_CACHE))
+ return -ENODEV;
/* Pick the first cpu we find that is associated with the cache. */
- plr->cpu = cpumask_first(&plr->d->cpu_mask);
+ plr->cpu = cpumask_first(&plr->d->hdr.cpu_mask);
if (!cpu_online(plr->cpu)) {
rdt_last_cmd_printf("CPU %u associated with cache not online\n",
@@ -306,15 +308,11 @@ static int pseudo_lock_region_init(struct pseudo_lock_region *plr)
goto out_region;
}
- ci = get_cpu_cacheinfo(plr->cpu);
-
- plr->size = rdtgroup_cbm_to_size(plr->s->res, plr->d, plr->cbm);
-
- for (i = 0; i < ci->num_leaves; i++) {
- if (ci->info_list[i].level == plr->s->res->cache_level) {
- plr->line_size = ci->info_list[i].coherency_line_size;
- return 0;
- }
+ ci = get_cpu_cacheinfo_level(plr->cpu, scope);
+ if (ci) {
+ plr->line_size = ci->coherency_line_size;
+ plr->size = rdtgroup_cbm_to_size(plr->s->res, plr->d, plr->cbm);
+ return 0;
}
ret = -1;
@@ -810,7 +808,7 @@ int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp)
* Return: true if @cbm overlaps with pseudo-locked region on @d, false
* otherwise.
*/
-bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, unsigned long cbm)
+bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_ctrl_domain *d, unsigned long cbm)
{
unsigned int cbm_len;
unsigned long cbm_b;
@@ -837,11 +835,11 @@ bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, unsigned long cbm
* if it is not possible to test due to memory allocation issue,
* false otherwise.
*/
-bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_domain *d)
+bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_ctrl_domain *d)
{
+ struct rdt_ctrl_domain *d_i;
cpumask_var_t cpu_with_psl;
struct rdt_resource *r;
- struct rdt_domain *d_i;
bool ret = false;
/* Walking r->domains, ensure it can't race with cpuhp */
@@ -855,10 +853,10 @@ bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_domain *d)
* associated with them.
*/
for_each_alloc_capable_rdt_resource(r) {
- list_for_each_entry(d_i, &r->domains, list) {
+ list_for_each_entry(d_i, &r->ctrl_domains, hdr.list) {
if (d_i->plr)
cpumask_or(cpu_with_psl, cpu_with_psl,
- &d_i->cpu_mask);
+ &d_i->hdr.cpu_mask);
}
}
@@ -866,7 +864,7 @@ bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_domain *d)
* Next test if new pseudo-locked region would intersect with
* existing region.
*/
- if (cpumask_intersects(&d->cpu_mask, cpu_with_psl))
+ if (cpumask_intersects(&d->hdr.cpu_mask, cpu_with_psl))
ret = true;
free_cpumask_var(cpu_with_psl);
@@ -1198,7 +1196,7 @@ static int pseudo_lock_measure_cycles(struct rdtgroup *rdtgrp, int sel)
}
plr->thread_done = 0;
- cpu = cpumask_first(&plr->d->cpu_mask);
+ cpu = cpumask_first(&plr->d->hdr.cpu_mask);
if (!cpu_online(cpu)) {
ret = -ENODEV;
goto out;
@@ -1528,7 +1526,7 @@ static int pseudo_lock_dev_mmap(struct file *filp, struct vm_area_struct *vma)
* may be scheduled elsewhere and invalidate entries in the
* pseudo-locked region.
*/
- if (!cpumask_subset(current->cpus_ptr, &plr->d->cpu_mask)) {
+ if (!cpumask_subset(current->cpus_ptr, &plr->d->hdr.cpu_mask)) {
mutex_unlock(&rdtgroup_mutex);
return -EINVAL;
}
diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
index 02f213f1c51c..d7163b764c62 100644
--- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c
+++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
@@ -12,7 +12,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/cacheinfo.h>
#include <linux/cpu.h>
#include <linux/debugfs.h>
#include <linux/fs.h>
@@ -92,13 +91,13 @@ void rdt_last_cmd_printf(const char *fmt, ...)
void rdt_staged_configs_clear(void)
{
+ struct rdt_ctrl_domain *dom;
struct rdt_resource *r;
- struct rdt_domain *dom;
lockdep_assert_held(&rdtgroup_mutex);
for_each_alloc_capable_rdt_resource(r) {
- list_for_each_entry(dom, &r->domains, list)
+ list_for_each_entry(dom, &r->ctrl_domains, hdr.list)
memset(dom->staged_config, 0, sizeof(dom->staged_config));
}
}
@@ -317,7 +316,7 @@ static int rdtgroup_cpus_show(struct kernfs_open_file *of,
rdt_last_cmd_puts("Cache domain offline\n");
ret = -ENODEV;
} else {
- mask = &rdtgrp->plr->d->cpu_mask;
+ mask = &rdtgrp->plr->d->hdr.cpu_mask;
seq_printf(s, is_cpu_list(of) ?
"%*pbl\n" : "%*pb\n",
cpumask_pr_args(mask));
@@ -1012,7 +1011,7 @@ static int rdt_bit_usage_show(struct kernfs_open_file *of,
unsigned long sw_shareable = 0, hw_shareable = 0;
unsigned long exclusive = 0, pseudo_locked = 0;
struct rdt_resource *r = s->res;
- struct rdt_domain *dom;
+ struct rdt_ctrl_domain *dom;
int i, hwb, swb, excl, psl;
enum rdtgrp_mode mode;
bool sep = false;
@@ -1021,12 +1020,12 @@ static int rdt_bit_usage_show(struct kernfs_open_file *of,
cpus_read_lock();
mutex_lock(&rdtgroup_mutex);
hw_shareable = r->cache.shareable_bits;
- list_for_each_entry(dom, &r->domains, list) {
+ list_for_each_entry(dom, &r->ctrl_domains, hdr.list) {
if (sep)
seq_putc(seq, ';');
sw_shareable = 0;
exclusive = 0;
- seq_printf(seq, "%d=", dom->id);
+ seq_printf(seq, "%d=", dom->hdr.id);
for (i = 0; i < closids_supported(); i++) {
if (!closid_allocated(i))
continue;
@@ -1243,7 +1242,7 @@ static int rdt_has_sparse_bitmasks_show(struct kernfs_open_file *of,
*
* Return: false if CBM does not overlap, true if it does.
*/
-static bool __rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d,
+static bool __rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_ctrl_domain *d,
unsigned long cbm, int closid,
enum resctrl_conf_type type, bool exclusive)
{
@@ -1298,7 +1297,7 @@ static bool __rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d
*
* Return: true if CBM overlap detected, false if there is no overlap
*/
-bool rdtgroup_cbm_overlaps(struct resctrl_schema *s, struct rdt_domain *d,
+bool rdtgroup_cbm_overlaps(struct resctrl_schema *s, struct rdt_ctrl_domain *d,
unsigned long cbm, int closid, bool exclusive)
{
enum resctrl_conf_type peer_type = resctrl_peer_type(s->conf_type);
@@ -1329,10 +1328,10 @@ bool rdtgroup_cbm_overlaps(struct resctrl_schema *s, struct rdt_domain *d,
static bool rdtgroup_mode_test_exclusive(struct rdtgroup *rdtgrp)
{
int closid = rdtgrp->closid;
+ struct rdt_ctrl_domain *d;
struct resctrl_schema *s;
struct rdt_resource *r;
bool has_cache = false;
- struct rdt_domain *d;
u32 ctrl;
/* Walking r->domains, ensure it can't race with cpuhp */
@@ -1343,7 +1342,7 @@ static bool rdtgroup_mode_test_exclusive(struct rdtgroup *rdtgrp)
if (r->rid == RDT_RESOURCE_MBA || r->rid == RDT_RESOURCE_SMBA)
continue;
has_cache = true;
- list_for_each_entry(d, &r->domains, list) {
+ list_for_each_entry(d, &r->ctrl_domains, hdr.list) {
ctrl = resctrl_arch_get_config(r, d, closid,
s->conf_type);
if (rdtgroup_cbm_overlaps(s, d, ctrl, closid, false)) {
@@ -1448,20 +1447,19 @@ out:
* bitmap functions work correctly.
*/
unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r,
- struct rdt_domain *d, unsigned long cbm)
+ struct rdt_ctrl_domain *d, unsigned long cbm)
{
- struct cpu_cacheinfo *ci;
unsigned int size = 0;
- int num_b, i;
+ struct cacheinfo *ci;
+ int num_b;
+
+ if (WARN_ON_ONCE(r->ctrl_scope != RESCTRL_L2_CACHE && r->ctrl_scope != RESCTRL_L3_CACHE))
+ return size;
num_b = bitmap_weight(&cbm, r->cache.cbm_len);
- ci = get_cpu_cacheinfo(cpumask_any(&d->cpu_mask));
- for (i = 0; i < ci->num_leaves; i++) {
- if (ci->info_list[i].level == r->cache_level) {
- size = ci->info_list[i].size / r->cache.cbm_len * num_b;
- break;
- }
- }
+ ci = get_cpu_cacheinfo_level(cpumask_any(&d->hdr.cpu_mask), r->ctrl_scope);
+ if (ci)
+ size = ci->size / r->cache.cbm_len * num_b;
return size;
}
@@ -1477,9 +1475,9 @@ static int rdtgroup_size_show(struct kernfs_open_file *of,
{
struct resctrl_schema *schema;
enum resctrl_conf_type type;
+ struct rdt_ctrl_domain *d;
struct rdtgroup *rdtgrp;
struct rdt_resource *r;
- struct rdt_domain *d;
unsigned int size;
int ret = 0;
u32 closid;
@@ -1503,7 +1501,7 @@ static int rdtgroup_size_show(struct kernfs_open_file *of,
size = rdtgroup_cbm_to_size(rdtgrp->plr->s->res,
rdtgrp->plr->d,
rdtgrp->plr->cbm);
- seq_printf(s, "%d=%u\n", rdtgrp->plr->d->id, size);
+ seq_printf(s, "%d=%u\n", rdtgrp->plr->d->hdr.id, size);
}
goto out;
}
@@ -1515,7 +1513,7 @@ static int rdtgroup_size_show(struct kernfs_open_file *of,
type = schema->conf_type;
sep = false;
seq_printf(s, "%*s:", max_name_width, schema->name);
- list_for_each_entry(d, &r->domains, list) {
+ list_for_each_entry(d, &r->ctrl_domains, hdr.list) {
if (sep)
seq_putc(s, ';');
if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
@@ -1533,7 +1531,7 @@ static int rdtgroup_size_show(struct kernfs_open_file *of,
else
size = rdtgroup_cbm_to_size(r, d, ctrl);
}
- seq_printf(s, "%d=%u", d->id, size);
+ seq_printf(s, "%d=%u", d->hdr.id, size);
sep = true;
}
seq_putc(s, '\n');
@@ -1591,21 +1589,21 @@ static void mon_event_config_read(void *info)
mon_info->mon_config = msrval & MAX_EVT_CONFIG_BITS;
}
-static void mondata_config_read(struct rdt_domain *d, struct mon_config_info *mon_info)
+static void mondata_config_read(struct rdt_mon_domain *d, struct mon_config_info *mon_info)
{
- smp_call_function_any(&d->cpu_mask, mon_event_config_read, mon_info, 1);
+ smp_call_function_any(&d->hdr.cpu_mask, mon_event_config_read, mon_info, 1);
}
static int mbm_config_show(struct seq_file *s, struct rdt_resource *r, u32 evtid)
{
struct mon_config_info mon_info = {0};
- struct rdt_domain *dom;
+ struct rdt_mon_domain *dom;
bool sep = false;
cpus_read_lock();
mutex_lock(&rdtgroup_mutex);
- list_for_each_entry(dom, &r->domains, list) {
+ list_for_each_entry(dom, &r->mon_domains, hdr.list) {
if (sep)
seq_puts(s, ";");
@@ -1613,7 +1611,7 @@ static int mbm_config_show(struct seq_file *s, struct rdt_resource *r, u32 evtid
mon_info.evtid = evtid;
mondata_config_read(dom, &mon_info);
- seq_printf(s, "%d=0x%02x", dom->id, mon_info.mon_config);
+ seq_printf(s, "%d=0x%02x", dom->hdr.id, mon_info.mon_config);
sep = true;
}
seq_puts(s, "\n");
@@ -1658,7 +1656,7 @@ static void mon_event_config_write(void *info)
}
static void mbm_config_write_domain(struct rdt_resource *r,
- struct rdt_domain *d, u32 evtid, u32 val)
+ struct rdt_mon_domain *d, u32 evtid, u32 val)
{
struct mon_config_info mon_info = {0};
@@ -1679,7 +1677,7 @@ static void mbm_config_write_domain(struct rdt_resource *r,
* are scoped at the domain level. Writing any of these MSRs
* on one CPU is observed by all the CPUs in the domain.
*/
- smp_call_function_any(&d->cpu_mask, mon_event_config_write,
+ smp_call_function_any(&d->hdr.cpu_mask, mon_event_config_write,
&mon_info, 1);
/*
@@ -1699,7 +1697,7 @@ static int mon_config_write(struct rdt_resource *r, char *tok, u32 evtid)
struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
char *dom_str = NULL, *id_str;
unsigned long dom_id, val;
- struct rdt_domain *d;
+ struct rdt_mon_domain *d;
/* Walking r->domains, ensure it can't race with cpuhp */
lockdep_assert_cpus_held();
@@ -1729,8 +1727,8 @@ next:
return -EINVAL;
}
- list_for_each_entry(d, &r->domains, list) {
- if (d->id == dom_id) {
+ list_for_each_entry(d, &r->mon_domains, hdr.list) {
+ if (d->hdr.id == dom_id) {
mbm_config_write_domain(r, d, evtid, val);
goto next;
}
@@ -2258,9 +2256,9 @@ static inline bool is_mba_linear(void)
static int set_cache_qos_cfg(int level, bool enable)
{
void (*update)(void *arg);
+ struct rdt_ctrl_domain *d;
struct rdt_resource *r_l;
cpumask_var_t cpu_mask;
- struct rdt_domain *d;
int cpu;
/* Walking r->domains, ensure it can't race with cpuhp */
@@ -2277,14 +2275,14 @@ static int set_cache_qos_cfg(int level, bool enable)
return -ENOMEM;
r_l = &rdt_resources_all[level].r_resctrl;
- list_for_each_entry(d, &r_l->domains, list) {
+ list_for_each_entry(d, &r_l->ctrl_domains, hdr.list) {
if (r_l->cache.arch_has_per_cpu_cfg)
/* Pick all the CPUs in the domain instance */
- for_each_cpu(cpu, &d->cpu_mask)
+ for_each_cpu(cpu, &d->hdr.cpu_mask)
cpumask_set_cpu(cpu, cpu_mask);
else
/* Pick one CPU from each domain instance to update MSR */
- cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask);
+ cpumask_set_cpu(cpumask_any(&d->hdr.cpu_mask), cpu_mask);
}
/* Update QOS_CFG MSR on all the CPUs in cpu_mask */
@@ -2310,10 +2308,10 @@ void rdt_domain_reconfigure_cdp(struct rdt_resource *r)
l3_qos_cfg_update(&hw_res->cdp_enabled);
}
-static int mba_sc_domain_allocate(struct rdt_resource *r, struct rdt_domain *d)
+static int mba_sc_domain_allocate(struct rdt_resource *r, struct rdt_ctrl_domain *d)
{
u32 num_closid = resctrl_arch_get_num_closid(r);
- int cpu = cpumask_any(&d->cpu_mask);
+ int cpu = cpumask_any(&d->hdr.cpu_mask);
int i;
d->mbps_val = kcalloc_node(num_closid, sizeof(*d->mbps_val),
@@ -2328,7 +2326,7 @@ static int mba_sc_domain_allocate(struct rdt_resource *r, struct rdt_domain *d)
}
static void mba_sc_domain_destroy(struct rdt_resource *r,
- struct rdt_domain *d)
+ struct rdt_ctrl_domain *d)
{
kfree(d->mbps_val);
d->mbps_val = NULL;
@@ -2336,14 +2334,18 @@ static void mba_sc_domain_destroy(struct rdt_resource *r,
/*
* MBA software controller is supported only if
- * MBM is supported and MBA is in linear scale.
+ * MBM is supported and MBA is in linear scale,
+ * and the MBM monitor scope is the same as MBA
+ * control scope.
*/
static bool supports_mba_mbps(void)
{
+ struct rdt_resource *rmbm = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl;
struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl;
return (is_mbm_local_enabled() &&
- r->alloc_capable && is_mba_linear());
+ r->alloc_capable && is_mba_linear() &&
+ r->ctrl_scope == rmbm->mon_scope);
}
/*
@@ -2354,7 +2356,7 @@ static int set_mba_sc(bool mba_sc)
{
struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl;
u32 num_closid = resctrl_arch_get_num_closid(r);
- struct rdt_domain *d;
+ struct rdt_ctrl_domain *d;
int i;
if (!supports_mba_mbps() || mba_sc == is_mba_sc(r))
@@ -2362,7 +2364,7 @@ static int set_mba_sc(bool mba_sc)
r->membw.mba_sc = mba_sc;
- list_for_each_entry(d, &r->domains, list) {
+ list_for_each_entry(d, &r->ctrl_domains, hdr.list) {
for (i = 0; i < num_closid; i++)
d->mbps_val[i] = MBA_MAX_MBPS;
}
@@ -2626,7 +2628,7 @@ static int rdt_get_tree(struct fs_context *fc)
{
struct rdt_fs_context *ctx = rdt_fc2context(fc);
unsigned long flags = RFTYPE_CTRL_BASE;
- struct rdt_domain *dom;
+ struct rdt_mon_domain *dom;
struct rdt_resource *r;
int ret;
@@ -2701,7 +2703,7 @@ static int rdt_get_tree(struct fs_context *fc)
if (is_mbm_enabled()) {
r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl;
- list_for_each_entry(dom, &r->domains, list)
+ list_for_each_entry(dom, &r->mon_domains, hdr.list)
mbm_setup_overflow_handler(dom, MBM_OVERFLOW_INTERVAL,
RESCTRL_PICK_ANY_CPU);
}
@@ -2751,6 +2753,7 @@ static int rdt_parse_param(struct fs_context *fc, struct fs_parameter *param)
{
struct rdt_fs_context *ctx = rdt_fc2context(fc);
struct fs_parse_result result;
+ const char *msg;
int opt;
opt = fs_parse(fc, rdt_fs_parameters, param, &result);
@@ -2765,8 +2768,9 @@ static int rdt_parse_param(struct fs_context *fc, struct fs_parameter *param)
ctx->enable_cdpl2 = true;
return 0;
case Opt_mba_mbps:
+ msg = "mba_MBps requires local MBM and linear scale MBA at L3 scope";
if (!supports_mba_mbps())
- return -EINVAL;
+ return invalfc(fc, msg);
ctx->enable_mba_mbps = true;
return 0;
case Opt_debug:
@@ -2811,9 +2815,9 @@ static int rdt_init_fs_context(struct fs_context *fc)
static int reset_all_ctrls(struct rdt_resource *r)
{
struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
- struct rdt_hw_domain *hw_dom;
+ struct rdt_hw_ctrl_domain *hw_dom;
struct msr_param msr_param;
- struct rdt_domain *d;
+ struct rdt_ctrl_domain *d;
int i;
/* Walking r->domains, ensure it can't race with cpuhp */
@@ -2825,16 +2829,16 @@ static int reset_all_ctrls(struct rdt_resource *r)
/*
* Disable resource control for this resource by setting all
- * CBMs in all domains to the maximum mask value. Pick one CPU
+ * CBMs in all ctrl_domains to the maximum mask value. Pick one CPU
* from each domain to update the MSRs below.
*/
- list_for_each_entry(d, &r->domains, list) {
- hw_dom = resctrl_to_arch_dom(d);
+ list_for_each_entry(d, &r->ctrl_domains, hdr.list) {
+ hw_dom = resctrl_to_arch_ctrl_dom(d);
for (i = 0; i < hw_res->num_closid; i++)
hw_dom->ctrl_val[i] = r->default_ctrl;
msr_param.dom = d;
- smp_call_function_any(&d->cpu_mask, rdt_ctrl_update, &msr_param, 1);
+ smp_call_function_any(&d->hdr.cpu_mask, rdt_ctrl_update, &msr_param, 1);
}
return 0;
@@ -3002,62 +3006,126 @@ static int mon_addfile(struct kernfs_node *parent_kn, const char *name,
return ret;
}
+static void mon_rmdir_one_subdir(struct kernfs_node *pkn, char *name, char *subname)
+{
+ struct kernfs_node *kn;
+
+ kn = kernfs_find_and_get(pkn, name);
+ if (!kn)
+ return;
+ kernfs_put(kn);
+
+ if (kn->dir.subdirs <= 1)
+ kernfs_remove(kn);
+ else
+ kernfs_remove_by_name(kn, subname);
+}
+
/*
* Remove all subdirectories of mon_data of ctrl_mon groups
- * and monitor groups with given domain id.
+ * and monitor groups for the given domain.
+ * Remove files and directories containing "sum" of domain data
+ * when last domain being summed is removed.
*/
static void rmdir_mondata_subdir_allrdtgrp(struct rdt_resource *r,
- unsigned int dom_id)
+ struct rdt_mon_domain *d)
{
struct rdtgroup *prgrp, *crgrp;
+ char subname[32];
+ bool snc_mode;
char name[32];
+ snc_mode = r->mon_scope == RESCTRL_L3_NODE;
+ sprintf(name, "mon_%s_%02d", r->name, snc_mode ? d->ci->id : d->hdr.id);
+ if (snc_mode)
+ sprintf(subname, "mon_sub_%s_%02d", r->name, d->hdr.id);
+
list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) {
- sprintf(name, "mon_%s_%02d", r->name, dom_id);
- kernfs_remove_by_name(prgrp->mon.mon_data_kn, name);
+ mon_rmdir_one_subdir(prgrp->mon.mon_data_kn, name, subname);
list_for_each_entry(crgrp, &prgrp->mon.crdtgrp_list, mon.crdtgrp_list)
- kernfs_remove_by_name(crgrp->mon.mon_data_kn, name);
+ mon_rmdir_one_subdir(crgrp->mon.mon_data_kn, name, subname);
}
}
-static int mkdir_mondata_subdir(struct kernfs_node *parent_kn,
- struct rdt_domain *d,
- struct rdt_resource *r, struct rdtgroup *prgrp)
+static int mon_add_all_files(struct kernfs_node *kn, struct rdt_mon_domain *d,
+ struct rdt_resource *r, struct rdtgroup *prgrp,
+ bool do_sum)
{
+ struct rmid_read rr = {0};
union mon_data_bits priv;
- struct kernfs_node *kn;
struct mon_evt *mevt;
- struct rmid_read rr;
- char name[32];
int ret;
- sprintf(name, "mon_%s_%02d", r->name, d->id);
- /* create the directory */
- kn = kernfs_create_dir(parent_kn, name, parent_kn->mode, prgrp);
- if (IS_ERR(kn))
- return PTR_ERR(kn);
-
- ret = rdtgroup_kn_set_ugid(kn);
- if (ret)
- goto out_destroy;
-
- if (WARN_ON(list_empty(&r->evt_list))) {
- ret = -EPERM;
- goto out_destroy;
- }
+ if (WARN_ON(list_empty(&r->evt_list)))
+ return -EPERM;
priv.u.rid = r->rid;
- priv.u.domid = d->id;
+ priv.u.domid = do_sum ? d->ci->id : d->hdr.id;
+ priv.u.sum = do_sum;
list_for_each_entry(mevt, &r->evt_list, list) {
priv.u.evtid = mevt->evtid;
ret = mon_addfile(kn, mevt->name, priv.priv);
if (ret)
+ return ret;
+
+ if (!do_sum && is_mbm_event(mevt->evtid))
+ mon_event_read(&rr, r, d, prgrp, &d->hdr.cpu_mask, mevt->evtid, true);
+ }
+
+ return 0;
+}
+
+static int mkdir_mondata_subdir(struct kernfs_node *parent_kn,
+ struct rdt_mon_domain *d,
+ struct rdt_resource *r, struct rdtgroup *prgrp)
+{
+ struct kernfs_node *kn, *ckn;
+ char name[32];
+ bool snc_mode;
+ int ret = 0;
+
+ lockdep_assert_held(&rdtgroup_mutex);
+
+ snc_mode = r->mon_scope == RESCTRL_L3_NODE;
+ sprintf(name, "mon_%s_%02d", r->name, snc_mode ? d->ci->id : d->hdr.id);
+ kn = kernfs_find_and_get(parent_kn, name);
+ if (kn) {
+ /*
+ * rdtgroup_mutex will prevent this directory from being
+ * removed. No need to keep this hold.
+ */
+ kernfs_put(kn);
+ } else {
+ kn = kernfs_create_dir(parent_kn, name, parent_kn->mode, prgrp);
+ if (IS_ERR(kn))
+ return PTR_ERR(kn);
+
+ ret = rdtgroup_kn_set_ugid(kn);
+ if (ret)
+ goto out_destroy;
+ ret = mon_add_all_files(kn, d, r, prgrp, snc_mode);
+ if (ret)
+ goto out_destroy;
+ }
+
+ if (snc_mode) {
+ sprintf(name, "mon_sub_%s_%02d", r->name, d->hdr.id);
+ ckn = kernfs_create_dir(kn, name, parent_kn->mode, prgrp);
+ if (IS_ERR(ckn)) {
+ ret = -EINVAL;
+ goto out_destroy;
+ }
+
+ ret = rdtgroup_kn_set_ugid(ckn);
+ if (ret)
goto out_destroy;
- if (is_mbm_event(mevt->evtid))
- mon_event_read(&rr, r, d, prgrp, mevt->evtid, true);
+ ret = mon_add_all_files(ckn, d, r, prgrp, false);
+ if (ret)
+ goto out_destroy;
}
+
kernfs_activate(kn);
return 0;
@@ -3071,7 +3139,7 @@ out_destroy:
* and "monitor" groups with given domain id.
*/
static void mkdir_mondata_subdir_allrdtgrp(struct rdt_resource *r,
- struct rdt_domain *d)
+ struct rdt_mon_domain *d)
{
struct kernfs_node *parent_kn;
struct rdtgroup *prgrp, *crgrp;
@@ -3093,13 +3161,13 @@ static int mkdir_mondata_subdir_alldom(struct kernfs_node *parent_kn,
struct rdt_resource *r,
struct rdtgroup *prgrp)
{
- struct rdt_domain *dom;
+ struct rdt_mon_domain *dom;
int ret;
/* Walking r->domains, ensure it can't race with cpuhp */
lockdep_assert_cpus_held();
- list_for_each_entry(dom, &r->domains, list) {
+ list_for_each_entry(dom, &r->mon_domains, hdr.list) {
ret = mkdir_mondata_subdir(parent_kn, dom, r, prgrp);
if (ret)
return ret;
@@ -3198,7 +3266,7 @@ static u32 cbm_ensure_valid(u32 _val, struct rdt_resource *r)
* Set the RDT domain up to start off with all usable allocations. That is,
* all shareable and unused bits. All-zero CBM is invalid.
*/
-static int __init_one_rdt_domain(struct rdt_domain *d, struct resctrl_schema *s,
+static int __init_one_rdt_domain(struct rdt_ctrl_domain *d, struct resctrl_schema *s,
u32 closid)
{
enum resctrl_conf_type peer_type = resctrl_peer_type(s->conf_type);
@@ -3258,7 +3326,7 @@ static int __init_one_rdt_domain(struct rdt_domain *d, struct resctrl_schema *s,
*/
tmp_cbm = cfg->new_ctrl;
if (bitmap_weight(&tmp_cbm, r->cache.cbm_len) < r->cache.min_cbm_bits) {
- rdt_last_cmd_printf("No space on %s:%d\n", s->name, d->id);
+ rdt_last_cmd_printf("No space on %s:%d\n", s->name, d->hdr.id);
return -ENOSPC;
}
cfg->have_new_ctrl = true;
@@ -3278,10 +3346,10 @@ static int __init_one_rdt_domain(struct rdt_domain *d, struct resctrl_schema *s,
*/
static int rdtgroup_init_cat(struct resctrl_schema *s, u32 closid)
{
- struct rdt_domain *d;
+ struct rdt_ctrl_domain *d;
int ret;
- list_for_each_entry(d, &s->res->domains, list) {
+ list_for_each_entry(d, &s->res->ctrl_domains, hdr.list) {
ret = __init_one_rdt_domain(d, s, closid);
if (ret < 0)
return ret;
@@ -3294,9 +3362,9 @@ static int rdtgroup_init_cat(struct resctrl_schema *s, u32 closid)
static void rdtgroup_init_mba(struct rdt_resource *r, u32 closid)
{
struct resctrl_staged_config *cfg;
- struct rdt_domain *d;
+ struct rdt_ctrl_domain *d;
- list_for_each_entry(d, &r->domains, list) {
+ list_for_each_entry(d, &r->ctrl_domains, hdr.list) {
if (is_mba_sc(r)) {
d->mbps_val[closid] = MBA_MAX_MBPS;
continue;
@@ -3920,29 +3988,33 @@ static void __init rdtgroup_setup_default(void)
mutex_unlock(&rdtgroup_mutex);
}
-static void domain_destroy_mon_state(struct rdt_domain *d)
+static void domain_destroy_mon_state(struct rdt_mon_domain *d)
{
bitmap_free(d->rmid_busy_llc);
kfree(d->mbm_total);
kfree(d->mbm_local);
}
-void resctrl_offline_domain(struct rdt_resource *r, struct rdt_domain *d)
+void resctrl_offline_ctrl_domain(struct rdt_resource *r, struct rdt_ctrl_domain *d)
{
mutex_lock(&rdtgroup_mutex);
if (supports_mba_mbps() && r->rid == RDT_RESOURCE_MBA)
mba_sc_domain_destroy(r, d);
- if (!r->mon_capable)
- goto out_unlock;
+ mutex_unlock(&rdtgroup_mutex);
+}
+
+void resctrl_offline_mon_domain(struct rdt_resource *r, struct rdt_mon_domain *d)
+{
+ mutex_lock(&rdtgroup_mutex);
/*
* If resctrl is mounted, remove all the
* per domain monitor data directories.
*/
if (resctrl_mounted && resctrl_arch_mon_capable())
- rmdir_mondata_subdir_allrdtgrp(r, d->id);
+ rmdir_mondata_subdir_allrdtgrp(r, d);
if (is_mbm_enabled())
cancel_delayed_work(&d->mbm_over);
@@ -3961,11 +4033,10 @@ void resctrl_offline_domain(struct rdt_resource *r, struct rdt_domain *d)
domain_destroy_mon_state(d);
-out_unlock:
mutex_unlock(&rdtgroup_mutex);
}
-static int domain_setup_mon_state(struct rdt_resource *r, struct rdt_domain *d)
+static int domain_setup_mon_state(struct rdt_resource *r, struct rdt_mon_domain *d)
{
u32 idx_limit = resctrl_arch_system_num_rmid_idx();
size_t tsize;
@@ -3996,7 +4067,7 @@ static int domain_setup_mon_state(struct rdt_resource *r, struct rdt_domain *d)
return 0;
}
-int resctrl_online_domain(struct rdt_resource *r, struct rdt_domain *d)
+int resctrl_online_ctrl_domain(struct rdt_resource *r, struct rdt_ctrl_domain *d)
{
int err = 0;
@@ -4005,11 +4076,18 @@ int resctrl_online_domain(struct rdt_resource *r, struct rdt_domain *d)
if (supports_mba_mbps() && r->rid == RDT_RESOURCE_MBA) {
/* RDT_RESOURCE_MBA is never mon_capable */
err = mba_sc_domain_allocate(r, d);
- goto out_unlock;
}
- if (!r->mon_capable)
- goto out_unlock;
+ mutex_unlock(&rdtgroup_mutex);
+
+ return err;
+}
+
+int resctrl_online_mon_domain(struct rdt_resource *r, struct rdt_mon_domain *d)
+{
+ int err;
+
+ mutex_lock(&rdtgroup_mutex);
err = domain_setup_mon_state(r, d);
if (err)
@@ -4060,8 +4138,8 @@ static void clear_childcpus(struct rdtgroup *r, unsigned int cpu)
void resctrl_offline_cpu(unsigned int cpu)
{
struct rdt_resource *l3 = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl;
+ struct rdt_mon_domain *d;
struct rdtgroup *rdtgrp;
- struct rdt_domain *d;
mutex_lock(&rdtgroup_mutex);
list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) {
@@ -4074,7 +4152,7 @@ void resctrl_offline_cpu(unsigned int cpu)
if (!l3->mon_capable)
goto out_unlock;
- d = get_domain_from_cpu(cpu, l3);
+ d = get_mon_domain_from_cpu(cpu, l3);
if (d) {
if (is_mbm_enabled() && cpu == d->mbm_work_cpu) {
cancel_delayed_work(&d->mbm_over);
diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c
index af5aa2c754c2..c84c30188fdf 100644
--- a/arch/x86/kernel/cpu/scattered.c
+++ b/arch/x86/kernel/cpu/scattered.c
@@ -45,6 +45,7 @@ static const struct cpuid_bit cpuid_bits[] = {
{ X86_FEATURE_HW_PSTATE, CPUID_EDX, 7, 0x80000007, 0 },
{ X86_FEATURE_CPB, CPUID_EDX, 9, 0x80000007, 0 },
{ X86_FEATURE_PROC_FEEDBACK, CPUID_EDX, 11, 0x80000007, 0 },
+ { X86_FEATURE_FAST_CPPC, CPUID_EDX, 15, 0x80000007, 0 },
{ X86_FEATURE_MBA, CPUID_EBX, 6, 0x80000008, 0 },
{ X86_FEATURE_SMBA, CPUID_EBX, 2, 0x80000020, 0 },
{ X86_FEATURE_BMEC, CPUID_EBX, 3, 0x80000020, 0 },
diff --git a/arch/x86/kernel/cpu/topology_amd.c b/arch/x86/kernel/cpu/topology_amd.c
index d419deed6a48..7d476fa697ca 100644
--- a/arch/x86/kernel/cpu/topology_amd.c
+++ b/arch/x86/kernel/cpu/topology_amd.c
@@ -84,9 +84,9 @@ static bool parse_8000_001e(struct topo_scan *tscan, bool has_topoext)
/*
* If leaf 0xb is available, then the domain shifts are set
- * already and nothing to do here.
+ * already and nothing to do here. Only valid for family >= 0x17.
*/
- if (!has_topoext) {
+ if (!has_topoext && tscan->c->x86 >= 0x17) {
/*
* Leaf 0x80000008 set the CORE domain shift already.
* Update the SMT domain, but do not propagate it.
diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c
index 11f83d07925e..00189cdeb775 100644
--- a/arch/x86/kernel/cpu/vmware.c
+++ b/arch/x86/kernel/cpu/vmware.c
@@ -41,80 +41,97 @@
#define CPUID_VMWARE_INFO_LEAF 0x40000000
#define CPUID_VMWARE_FEATURES_LEAF 0x40000010
-#define CPUID_VMWARE_FEATURES_ECX_VMMCALL BIT(0)
-#define CPUID_VMWARE_FEATURES_ECX_VMCALL BIT(1)
-#define VMWARE_HYPERVISOR_MAGIC 0x564D5868
-
-#define VMWARE_CMD_GETVERSION 10
-#define VMWARE_CMD_GETHZ 45
-#define VMWARE_CMD_GETVCPU_INFO 68
-#define VMWARE_CMD_LEGACY_X2APIC 3
-#define VMWARE_CMD_VCPU_RESERVED 31
-#define VMWARE_CMD_STEALCLOCK 91
+#define GETVCPU_INFO_LEGACY_X2APIC BIT(3)
+#define GETVCPU_INFO_VCPU_RESERVED BIT(31)
#define STEALCLOCK_NOT_AVAILABLE (-1)
#define STEALCLOCK_DISABLED 0
#define STEALCLOCK_ENABLED 1
-#define VMWARE_PORT(cmd, eax, ebx, ecx, edx) \
- __asm__("inl (%%dx), %%eax" : \
- "=a"(eax), "=c"(ecx), "=d"(edx), "=b"(ebx) : \
- "a"(VMWARE_HYPERVISOR_MAGIC), \
- "c"(VMWARE_CMD_##cmd), \
- "d"(VMWARE_HYPERVISOR_PORT), "b"(UINT_MAX) : \
- "memory")
-
-#define VMWARE_VMCALL(cmd, eax, ebx, ecx, edx) \
- __asm__("vmcall" : \
- "=a"(eax), "=c"(ecx), "=d"(edx), "=b"(ebx) : \
- "a"(VMWARE_HYPERVISOR_MAGIC), \
- "c"(VMWARE_CMD_##cmd), \
- "d"(0), "b"(UINT_MAX) : \
- "memory")
-
-#define VMWARE_VMMCALL(cmd, eax, ebx, ecx, edx) \
- __asm__("vmmcall" : \
- "=a"(eax), "=c"(ecx), "=d"(edx), "=b"(ebx) : \
- "a"(VMWARE_HYPERVISOR_MAGIC), \
- "c"(VMWARE_CMD_##cmd), \
- "d"(0), "b"(UINT_MAX) : \
- "memory")
-
-#define VMWARE_CMD(cmd, eax, ebx, ecx, edx) do { \
- switch (vmware_hypercall_mode) { \
- case CPUID_VMWARE_FEATURES_ECX_VMCALL: \
- VMWARE_VMCALL(cmd, eax, ebx, ecx, edx); \
- break; \
- case CPUID_VMWARE_FEATURES_ECX_VMMCALL: \
- VMWARE_VMMCALL(cmd, eax, ebx, ecx, edx); \
- break; \
- default: \
- VMWARE_PORT(cmd, eax, ebx, ecx, edx); \
- break; \
- } \
- } while (0)
-
struct vmware_steal_time {
union {
- uint64_t clock; /* stolen time counter in units of vtsc */
+ u64 clock; /* stolen time counter in units of vtsc */
struct {
/* only for little-endian */
- uint32_t clock_low;
- uint32_t clock_high;
+ u32 clock_low;
+ u32 clock_high;
};
};
- uint64_t reserved[7];
+ u64 reserved[7];
};
static unsigned long vmware_tsc_khz __ro_after_init;
static u8 vmware_hypercall_mode __ro_after_init;
+unsigned long vmware_hypercall_slow(unsigned long cmd,
+ unsigned long in1, unsigned long in3,
+ unsigned long in4, unsigned long in5,
+ u32 *out1, u32 *out2, u32 *out3,
+ u32 *out4, u32 *out5)
+{
+ unsigned long out0, rbx, rcx, rdx, rsi, rdi;
+
+ switch (vmware_hypercall_mode) {
+ case CPUID_VMWARE_FEATURES_ECX_VMCALL:
+ asm_inline volatile ("vmcall"
+ : "=a" (out0), "=b" (rbx), "=c" (rcx),
+ "=d" (rdx), "=S" (rsi), "=D" (rdi)
+ : "a" (VMWARE_HYPERVISOR_MAGIC),
+ "b" (in1),
+ "c" (cmd),
+ "d" (in3),
+ "S" (in4),
+ "D" (in5)
+ : "cc", "memory");
+ break;
+ case CPUID_VMWARE_FEATURES_ECX_VMMCALL:
+ asm_inline volatile ("vmmcall"
+ : "=a" (out0), "=b" (rbx), "=c" (rcx),
+ "=d" (rdx), "=S" (rsi), "=D" (rdi)
+ : "a" (VMWARE_HYPERVISOR_MAGIC),
+ "b" (in1),
+ "c" (cmd),
+ "d" (in3),
+ "S" (in4),
+ "D" (in5)
+ : "cc", "memory");
+ break;
+ default:
+ asm_inline volatile ("movw %[port], %%dx; inl (%%dx), %%eax"
+ : "=a" (out0), "=b" (rbx), "=c" (rcx),
+ "=d" (rdx), "=S" (rsi), "=D" (rdi)
+ : [port] "i" (VMWARE_HYPERVISOR_PORT),
+ "a" (VMWARE_HYPERVISOR_MAGIC),
+ "b" (in1),
+ "c" (cmd),
+ "d" (in3),
+ "S" (in4),
+ "D" (in5)
+ : "cc", "memory");
+ break;
+ }
+
+ if (out1)
+ *out1 = rbx;
+ if (out2)
+ *out2 = rcx;
+ if (out3)
+ *out3 = rdx;
+ if (out4)
+ *out4 = rsi;
+ if (out5)
+ *out5 = rdi;
+
+ return out0;
+}
+
static inline int __vmware_platform(void)
{
- uint32_t eax, ebx, ecx, edx;
- VMWARE_CMD(GETVERSION, eax, ebx, ecx, edx);
- return eax != (uint32_t)-1 && ebx == VMWARE_HYPERVISOR_MAGIC;
+ u32 eax, ebx, ecx;
+
+ eax = vmware_hypercall3(VMWARE_CMD_GETVERSION, 0, &ebx, &ecx);
+ return eax != UINT_MAX && ebx == VMWARE_HYPERVISOR_MAGIC;
}
static unsigned long vmware_get_tsc_khz(void)
@@ -166,21 +183,12 @@ static void __init vmware_cyc2ns_setup(void)
pr_info("using clock offset of %llu ns\n", d->cyc2ns_offset);
}
-static int vmware_cmd_stealclock(uint32_t arg1, uint32_t arg2)
+static int vmware_cmd_stealclock(u32 addr_hi, u32 addr_lo)
{
- uint32_t result, info;
-
- asm volatile (VMWARE_HYPERCALL :
- "=a"(result),
- "=c"(info) :
- "a"(VMWARE_HYPERVISOR_MAGIC),
- "b"(0),
- "c"(VMWARE_CMD_STEALCLOCK),
- "d"(0),
- "S"(arg1),
- "D"(arg2) :
- "memory");
- return result;
+ u32 info;
+
+ return vmware_hypercall5(VMWARE_CMD_STEALCLOCK, 0, 0, addr_hi, addr_lo,
+ &info);
}
static bool stealclock_enable(phys_addr_t pa)
@@ -215,15 +223,15 @@ static bool vmware_is_stealclock_available(void)
* Return:
* The steal clock reading in ns.
*/
-static uint64_t vmware_steal_clock(int cpu)
+static u64 vmware_steal_clock(int cpu)
{
struct vmware_steal_time *steal = &per_cpu(vmw_steal_time, cpu);
- uint64_t clock;
+ u64 clock;
if (IS_ENABLED(CONFIG_64BIT))
clock = READ_ONCE(steal->clock);
else {
- uint32_t initial_high, low, high;
+ u32 initial_high, low, high;
do {
initial_high = READ_ONCE(steal->clock_high);
@@ -235,7 +243,7 @@ static uint64_t vmware_steal_clock(int cpu)
high = READ_ONCE(steal->clock_high);
} while (initial_high != high);
- clock = ((uint64_t)high << 32) | low;
+ clock = ((u64)high << 32) | low;
}
return mul_u64_u32_shr(clock, vmware_cyc2ns.cyc2ns_mul,
@@ -389,13 +397,13 @@ static void __init vmware_set_capabilities(void)
static void __init vmware_platform_setup(void)
{
- uint32_t eax, ebx, ecx, edx;
- uint64_t lpj, tsc_khz;
+ u32 eax, ebx, ecx;
+ u64 lpj, tsc_khz;
- VMWARE_CMD(GETHZ, eax, ebx, ecx, edx);
+ eax = vmware_hypercall3(VMWARE_CMD_GETHZ, UINT_MAX, &ebx, &ecx);
if (ebx != UINT_MAX) {
- lpj = tsc_khz = eax | (((uint64_t)ebx) << 32);
+ lpj = tsc_khz = eax | (((u64)ebx) << 32);
do_div(tsc_khz, 1000);
WARN_ON(tsc_khz >> 32);
pr_info("TSC freq read from hypervisor : %lu.%03lu MHz\n",
@@ -446,7 +454,7 @@ static u8 __init vmware_select_hypercall(void)
* If !boot_cpu_has(X86_FEATURE_HYPERVISOR), vmware_hypercall_mode
* intentionally defaults to 0.
*/
-static uint32_t __init vmware_platform(void)
+static u32 __init vmware_platform(void)
{
if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
unsigned int eax;
@@ -474,12 +482,65 @@ static uint32_t __init vmware_platform(void)
/* Checks if hypervisor supports x2apic without VT-D interrupt remapping. */
static bool __init vmware_legacy_x2apic_available(void)
{
- uint32_t eax, ebx, ecx, edx;
- VMWARE_CMD(GETVCPU_INFO, eax, ebx, ecx, edx);
- return !(eax & BIT(VMWARE_CMD_VCPU_RESERVED)) &&
- (eax & BIT(VMWARE_CMD_LEGACY_X2APIC));
+ u32 eax;
+
+ eax = vmware_hypercall1(VMWARE_CMD_GETVCPU_INFO, 0);
+ return !(eax & GETVCPU_INFO_VCPU_RESERVED) &&
+ (eax & GETVCPU_INFO_LEGACY_X2APIC);
}
+#ifdef CONFIG_INTEL_TDX_GUEST
+/*
+ * TDCALL[TDG.VP.VMCALL] uses %rax (arg0) and %rcx (arg2). Therefore,
+ * we remap those registers to %r12 and %r13, respectively.
+ */
+unsigned long vmware_tdx_hypercall(unsigned long cmd,
+ unsigned long in1, unsigned long in3,
+ unsigned long in4, unsigned long in5,
+ u32 *out1, u32 *out2, u32 *out3,
+ u32 *out4, u32 *out5)
+{
+ struct tdx_module_args args = {};
+
+ if (!hypervisor_is_type(X86_HYPER_VMWARE)) {
+ pr_warn_once("Incorrect usage\n");
+ return ULONG_MAX;
+ }
+
+ if (cmd & ~VMWARE_CMD_MASK) {
+ pr_warn_once("Out of range command %lx\n", cmd);
+ return ULONG_MAX;
+ }
+
+ args.rbx = in1;
+ args.rdx = in3;
+ args.rsi = in4;
+ args.rdi = in5;
+ args.r10 = VMWARE_TDX_VENDOR_LEAF;
+ args.r11 = VMWARE_TDX_HCALL_FUNC;
+ args.r12 = VMWARE_HYPERVISOR_MAGIC;
+ args.r13 = cmd;
+ /* CPL */
+ args.r15 = 0;
+
+ __tdx_hypercall(&args);
+
+ if (out1)
+ *out1 = args.rbx;
+ if (out2)
+ *out2 = args.r13;
+ if (out3)
+ *out3 = args.rdx;
+ if (out4)
+ *out4 = args.rsi;
+ if (out5)
+ *out5 = args.rdi;
+
+ return args.r12;
+}
+EXPORT_SYMBOL_GPL(vmware_tdx_hypercall);
+#endif
+
#ifdef CONFIG_AMD_MEM_ENCRYPT
static void vmware_sev_es_hcall_prepare(struct ghcb *ghcb,
struct pt_regs *regs)
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
index f06501445cd9..340af8155658 100644
--- a/arch/x86/kernel/crash.c
+++ b/arch/x86/kernel/crash.c
@@ -128,6 +128,18 @@ void native_machine_crash_shutdown(struct pt_regs *regs)
#ifdef CONFIG_HPET_TIMER
hpet_disable();
#endif
+
+ /*
+ * Non-crash kexec calls enc_kexec_begin() while scheduling is still
+ * active. This allows the callback to wait until all in-flight
+ * shared<->private conversions are complete. In a crash scenario,
+ * enc_kexec_begin() gets called after all but one CPU have been shut
+ * down and interrupts have been disabled. This allows the callback to
+ * detect a race with the conversion and report it.
+ */
+ x86_platform.guest.enc_kexec_begin();
+ x86_platform.guest.enc_kexec_finish();
+
crash_save_cpu(regs, safe_smp_processor_id());
}
diff --git a/arch/x86/kernel/devicetree.c b/arch/x86/kernel/devicetree.c
index 8e3c53b4d070..64280879c68c 100644
--- a/arch/x86/kernel/devicetree.c
+++ b/arch/x86/kernel/devicetree.c
@@ -83,7 +83,7 @@ static int x86_of_pci_irq_enable(struct pci_dev *dev)
ret = pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
if (ret)
- return ret;
+ return pcibios_err_to_errno(ret);
if (!pin)
return 0;
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index 68b09f718f10..4893d30ce438 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -828,7 +828,7 @@ u64 __init e820__memblock_alloc_reserved(u64 size, u64 align)
/*
* Find the highest page frame number we have available
*/
-static unsigned long __init e820_end_pfn(unsigned long limit_pfn, enum e820_type type)
+static unsigned long __init e820__end_ram_pfn(unsigned long limit_pfn)
{
int i;
unsigned long last_pfn = 0;
@@ -839,7 +839,8 @@ static unsigned long __init e820_end_pfn(unsigned long limit_pfn, enum e820_type
unsigned long start_pfn;
unsigned long end_pfn;
- if (entry->type != type)
+ if (entry->type != E820_TYPE_RAM &&
+ entry->type != E820_TYPE_ACPI)
continue;
start_pfn = entry->addr >> PAGE_SHIFT;
@@ -865,12 +866,12 @@ static unsigned long __init e820_end_pfn(unsigned long limit_pfn, enum e820_type
unsigned long __init e820__end_of_ram_pfn(void)
{
- return e820_end_pfn(MAX_ARCH_PFN, E820_TYPE_RAM);
+ return e820__end_ram_pfn(MAX_ARCH_PFN);
}
unsigned long __init e820__end_of_low_ram_pfn(void)
{
- return e820_end_pfn(1UL << (32 - PAGE_SHIFT), E820_TYPE_RAM);
+ return e820__end_ram_pfn(1UL << (32 - PAGE_SHIFT));
}
static void __init early_panic(char *msg)
diff --git a/arch/x86/kernel/fpu/xstate.h b/arch/x86/kernel/fpu/xstate.h
index 05df04f39628..2ee0b9c53dcc 100644
--- a/arch/x86/kernel/fpu/xstate.h
+++ b/arch/x86/kernel/fpu/xstate.h
@@ -106,21 +106,17 @@ static inline u64 xfeatures_mask_independent(void)
* Otherwise, if XSAVEOPT is enabled, XSAVEOPT replaces XSAVE because XSAVEOPT
* supports modified optimization which is not supported by XSAVE.
*
- * We use XSAVE as a fallback.
- *
- * The 661 label is defined in the ALTERNATIVE* macros as the address of the
- * original instruction which gets replaced. We need to use it here as the
- * address of the instruction where we might get an exception at.
+ * Use XSAVE as a fallback.
*/
#define XSTATE_XSAVE(st, lmask, hmask, err) \
- asm volatile(ALTERNATIVE_3(XSAVE, \
+ asm volatile("1: " ALTERNATIVE_3(XSAVE, \
XSAVEOPT, X86_FEATURE_XSAVEOPT, \
XSAVEC, X86_FEATURE_XSAVEC, \
XSAVES, X86_FEATURE_XSAVES) \
"\n" \
"xor %[err], %[err]\n" \
"3:\n" \
- _ASM_EXTABLE_TYPE_REG(661b, 3b, EX_TYPE_EFAULT_REG, %[err]) \
+ _ASM_EXTABLE_TYPE_REG(1b, 3b, EX_TYPE_EFAULT_REG, %[err]) \
: [err] "=r" (err) \
: "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
: "memory")
@@ -130,11 +126,11 @@ static inline u64 xfeatures_mask_independent(void)
* XSAVE area format.
*/
#define XSTATE_XRESTORE(st, lmask, hmask) \
- asm volatile(ALTERNATIVE(XRSTOR, \
+ asm volatile("1: " ALTERNATIVE(XRSTOR, \
XRSTORS, X86_FEATURE_XSAVES) \
"\n" \
"3:\n" \
- _ASM_EXTABLE_TYPE(661b, 3b, EX_TYPE_FPU_RESTORE) \
+ _ASM_EXTABLE_TYPE(1b, 3b, EX_TYPE_FPU_RESTORE) \
: \
: "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
: "memory")
diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c
index b180d8e497c3..cc0f7f70b17b 100644
--- a/arch/x86/kernel/machine_kexec_64.c
+++ b/arch/x86/kernel/machine_kexec_64.c
@@ -295,8 +295,15 @@ void machine_kexec_cleanup(struct kimage *image)
void machine_kexec(struct kimage *image)
{
unsigned long page_list[PAGES_NR];
- void *control_page;
+ unsigned int host_mem_enc_active;
int save_ftrace_enabled;
+ void *control_page;
+
+ /*
+ * This must be done before load_segments() since if call depth tracking
+ * is used then GS must be valid to make any function calls.
+ */
+ host_mem_enc_active = cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT);
#ifdef CONFIG_KEXEC_JUMP
if (image->preserve_context)
@@ -358,7 +365,7 @@ void machine_kexec(struct kimage *image)
(unsigned long)page_list,
image->start,
image->preserve_context,
- cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT));
+ host_mem_enc_active);
#ifdef CONFIG_KEXEC_JUMP
if (image->preserve_context)
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index b8441147eb5e..f63f8fd00a91 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -835,6 +835,13 @@ void __noreturn stop_this_cpu(void *dummy)
*/
cpumask_clear_cpu(cpu, &cpus_stop_mask);
+#ifdef CONFIG_SMP
+ if (smp_ops.stop_this_cpu) {
+ smp_ops.stop_this_cpu();
+ unreachable();
+ }
+#endif
+
for (;;) {
/*
* Use native_halt() so that memory contents don't change
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index f3130f762784..0e0a4cf6b5eb 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -12,6 +12,7 @@
#include <linux/delay.h>
#include <linux/objtool.h>
#include <linux/pgtable.h>
+#include <linux/kexec.h>
#include <acpi/reboot.h>
#include <asm/io.h>
#include <asm/apic.h>
@@ -716,6 +717,14 @@ static void native_machine_emergency_restart(void)
void native_machine_shutdown(void)
{
+ /*
+ * Call enc_kexec_begin() while all CPUs are still active and
+ * interrupts are enabled. This will allow all in-flight memory
+ * conversions to finish cleanly.
+ */
+ if (kexec_in_progress)
+ x86_platform.guest.enc_kexec_begin();
+
/* Stop the cpus and apics */
#ifdef CONFIG_X86_IO_APIC
/*
@@ -752,6 +761,9 @@ void native_machine_shutdown(void)
#ifdef CONFIG_X86_64
x86_platform.iommu_shutdown();
#endif
+
+ if (kexec_in_progress)
+ x86_platform.guest.enc_kexec_finish();
}
static void __machine_emergency_restart(int emergency)
@@ -868,6 +880,12 @@ static int crash_nmi_callback(unsigned int val, struct pt_regs *regs)
cpu_emergency_disable_virtualization();
atomic_dec(&waiting_for_crash_ipi);
+
+ if (smp_ops.stop_this_cpu) {
+ smp_ops.stop_this_cpu();
+ unreachable();
+ }
+
/* Assume hlt works */
halt();
for (;;)
diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
index 56cab1bb25f5..042c9a0334e9 100644
--- a/arch/x86/kernel/relocate_kernel_64.S
+++ b/arch/x86/kernel/relocate_kernel_64.S
@@ -5,6 +5,8 @@
*/
#include <linux/linkage.h>
+#include <linux/stringify.h>
+#include <asm/alternative.h>
#include <asm/page_types.h>
#include <asm/kexec.h>
#include <asm/processor-flags.h>
@@ -145,16 +147,15 @@ SYM_CODE_START_LOCAL_NOALIGN(identity_mapped)
* Set cr4 to a known state:
* - physical address extension enabled
* - 5-level paging, if it was enabled before
+ * - Machine check exception on TDX guest, if it was enabled before.
+ * Clearing MCE might not be allowed in TDX guests, depending on setup.
+ *
+ * Use R13 that contains the original CR4 value, read in relocate_kernel().
+ * PAE is always set in the original CR4.
*/
- movl $X86_CR4_PAE, %eax
- testq $X86_CR4_LA57, %r13
- jz 1f
- orl $X86_CR4_LA57, %eax
-1:
- movq %rax, %cr4
-
- jmp 1f
-1:
+ andl $(X86_CR4_PAE | X86_CR4_LA57), %r13d
+ ALTERNATIVE "", __stringify(orl $X86_CR4_MCE, %r13d), X86_FEATURE_TDX_GUEST
+ movq %r13, %cr4
/* Flush the TLB (needed?) */
movq %r9, %cr3
@@ -165,9 +166,9 @@ SYM_CODE_START_LOCAL_NOALIGN(identity_mapped)
* used by kexec. Flush the caches before copying the kernel.
*/
testq %r12, %r12
- jz 1f
+ jz .Lsme_off
wbinvd
-1:
+.Lsme_off:
movq %rcx, %r11
call swap_pages
@@ -187,7 +188,7 @@ SYM_CODE_START_LOCAL_NOALIGN(identity_mapped)
*/
testq %r11, %r11
- jnz 1f
+ jnz .Lrelocate
xorl %eax, %eax
xorl %ebx, %ebx
xorl %ecx, %ecx
@@ -208,7 +209,7 @@ SYM_CODE_START_LOCAL_NOALIGN(identity_mapped)
ret
int3
-1:
+.Lrelocate:
popq %rdx
leaq PAGE_SIZE(%r10), %rsp
ANNOTATE_RETPOLINE_SAFE
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 05c5aa951da7..5d34cad9b7b1 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -165,6 +165,7 @@ unsigned long saved_video_mode;
static char __initdata command_line[COMMAND_LINE_SIZE];
#ifdef CONFIG_CMDLINE_BOOL
static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE;
+bool builtin_cmdline_added __ro_after_init;
#endif
#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
@@ -765,6 +766,7 @@ void __init setup_arch(char **cmdline_p)
strscpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
}
#endif
+ builtin_cmdline_added = true;
#endif
strscpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
@@ -995,7 +997,6 @@ void __init setup_arch(char **cmdline_p)
mem_encrypt_setup_arch();
cc_random_init();
- efi_fake_memmap();
efi_find_mirror();
efi_esrt_init();
efi_mokvar_table_init();
diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
index e42faa792c07..52e1f3f0b361 100644
--- a/arch/x86/kernel/time.c
+++ b/arch/x86/kernel/time.c
@@ -27,25 +27,7 @@
unsigned long profile_pc(struct pt_regs *regs)
{
- unsigned long pc = instruction_pointer(regs);
-
- if (!user_mode(regs) && in_lock_functions(pc)) {
-#ifdef CONFIG_FRAME_POINTER
- return *(unsigned long *)(regs->bp + sizeof(long));
-#else
- unsigned long *sp = (unsigned long *)regs->sp;
- /*
- * Return address is either directly at stack pointer
- * or above a saved flags. Eflags has bits 22-31 zero,
- * kernel addresses don't.
- */
- if (sp[0] >> 22)
- return sp[0];
- if (sp[1] >> 22)
- return sp[1];
-#endif
- }
- return pc;
+ return instruction_pointer(regs);
}
EXPORT_SYMBOL(profile_pc);
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 06b170759e5b..d4462fb26299 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -50,9 +50,9 @@ int tsc_clocksource_reliable;
static int __read_mostly tsc_force_recalibrate;
-static u32 art_to_tsc_numerator;
-static u32 art_to_tsc_denominator;
-static u64 art_to_tsc_offset;
+static struct clocksource_base art_base_clk = {
+ .id = CSID_X86_ART,
+};
static bool have_art;
struct cyc2ns {
@@ -1074,7 +1074,7 @@ core_initcall(cpufreq_register_tsc_scaling);
*/
static void __init detect_art(void)
{
- unsigned int unused[2];
+ unsigned int unused;
if (boot_cpu_data.cpuid_level < ART_CPUID_LEAF)
return;
@@ -1089,13 +1089,14 @@ static void __init detect_art(void)
tsc_async_resets)
return;
- cpuid(ART_CPUID_LEAF, &art_to_tsc_denominator,
- &art_to_tsc_numerator, unused, unused+1);
+ cpuid(ART_CPUID_LEAF, &art_base_clk.denominator,
+ &art_base_clk.numerator, &art_base_clk.freq_khz, &unused);
- if (art_to_tsc_denominator < ART_MIN_DENOMINATOR)
+ art_base_clk.freq_khz /= KHZ;
+ if (art_base_clk.denominator < ART_MIN_DENOMINATOR)
return;
- rdmsrl(MSR_IA32_TSC_ADJUST, art_to_tsc_offset);
+ rdmsrl(MSR_IA32_TSC_ADJUST, art_base_clk.offset);
/* Make this sticky over multiple CPU init calls */
setup_force_cpu_cap(X86_FEATURE_ART);
@@ -1296,67 +1297,6 @@ int unsynchronized_tsc(void)
return 0;
}
-/*
- * Convert ART to TSC given numerator/denominator found in detect_art()
- */
-struct system_counterval_t convert_art_to_tsc(u64 art)
-{
- u64 tmp, res, rem;
-
- rem = do_div(art, art_to_tsc_denominator);
-
- res = art * art_to_tsc_numerator;
- tmp = rem * art_to_tsc_numerator;
-
- do_div(tmp, art_to_tsc_denominator);
- res += tmp + art_to_tsc_offset;
-
- return (struct system_counterval_t) {
- .cs_id = have_art ? CSID_X86_TSC : CSID_GENERIC,
- .cycles = res,
- };
-}
-EXPORT_SYMBOL(convert_art_to_tsc);
-
-/**
- * convert_art_ns_to_tsc() - Convert ART in nanoseconds to TSC.
- * @art_ns: ART (Always Running Timer) in unit of nanoseconds
- *
- * PTM requires all timestamps to be in units of nanoseconds. When user
- * software requests a cross-timestamp, this function converts system timestamp
- * to TSC.
- *
- * This is valid when CPU feature flag X86_FEATURE_TSC_KNOWN_FREQ is set
- * indicating the tsc_khz is derived from CPUID[15H]. Drivers should check
- * that this flag is set before conversion to TSC is attempted.
- *
- * Return:
- * struct system_counterval_t - system counter value with the ID of the
- * corresponding clocksource:
- * cycles: System counter value
- * cs_id: The clocksource ID for validating comparability
- */
-
-struct system_counterval_t convert_art_ns_to_tsc(u64 art_ns)
-{
- u64 tmp, res, rem;
-
- rem = do_div(art_ns, USEC_PER_SEC);
-
- res = art_ns * tsc_khz;
- tmp = rem * tsc_khz;
-
- do_div(tmp, USEC_PER_SEC);
- res += tmp;
-
- return (struct system_counterval_t) {
- .cs_id = have_art ? CSID_X86_TSC : CSID_GENERIC,
- .cycles = res,
- };
-}
-EXPORT_SYMBOL(convert_art_ns_to_tsc);
-
-
static void tsc_refine_calibration_work(struct work_struct *work);
static DECLARE_DELAYED_WORK(tsc_irqwork, tsc_refine_calibration_work);
/**
@@ -1458,8 +1398,10 @@ out:
if (tsc_unstable)
goto unreg;
- if (boot_cpu_has(X86_FEATURE_ART))
+ if (boot_cpu_has(X86_FEATURE_ART)) {
have_art = true;
+ clocksource_tsc.base = &art_base_clk;
+ }
clocksource_register_khz(&clocksource_tsc, tsc_khz);
unreg:
clocksource_unregister(&clocksource_tsc_early);
@@ -1484,8 +1426,10 @@ static int __init init_tsc_clocksource(void)
* the refined calibration and directly register it as a clocksource.
*/
if (boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ)) {
- if (boot_cpu_has(X86_FEATURE_ART))
+ if (boot_cpu_has(X86_FEATURE_ART)) {
have_art = true;
+ clocksource_tsc.base = &art_base_clk;
+ }
clocksource_register_khz(&clocksource_tsc, tsc_khz);
clocksource_unregister(&clocksource_tsc_early);
@@ -1509,10 +1453,12 @@ static bool __init determine_cpu_tsc_frequencies(bool early)
if (early) {
cpu_khz = x86_platform.calibrate_cpu();
- if (tsc_early_khz)
+ if (tsc_early_khz) {
tsc_khz = tsc_early_khz;
- else
+ } else {
tsc_khz = x86_platform.calibrate_tsc();
+ clocksource_tsc.freq_khz = tsc_khz;
+ }
} else {
/* We should not be here with non-native cpu calibration */
WARN_ON(x86_platform.calibrate_cpu != native_calibrate_cpu);
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index 3509afc6a672..6e73403e874f 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -357,6 +357,9 @@ SECTIONS
PERCPU_SECTION(INTERNODE_CACHE_BYTES)
#endif
+ RUNTIME_CONST(shift, d_hash_shift)
+ RUNTIME_CONST(ptr, dentry_hashtable)
+
. = ALIGN(PAGE_SIZE);
/* freed after init ends here */
diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
index d5dc5a92635a..82b128d3f309 100644
--- a/arch/x86/kernel/x86_init.c
+++ b/arch/x86/kernel/x86_init.c
@@ -134,10 +134,12 @@ struct x86_cpuinit_ops x86_cpuinit = {
static void default_nmi_init(void) { };
-static bool enc_status_change_prepare_noop(unsigned long vaddr, int npages, bool enc) { return true; }
-static bool enc_status_change_finish_noop(unsigned long vaddr, int npages, bool enc) { return true; }
+static int enc_status_change_prepare_noop(unsigned long vaddr, int npages, bool enc) { return 0; }
+static int enc_status_change_finish_noop(unsigned long vaddr, int npages, bool enc) { return 0; }
static bool enc_tlb_flush_required_noop(bool enc) { return false; }
static bool enc_cache_flush_required_noop(void) { return false; }
+static void enc_kexec_begin_noop(void) {}
+static void enc_kexec_finish_noop(void) {}
static bool is_private_mmio_noop(u64 addr) {return false; }
struct x86_platform_ops x86_platform __ro_after_init = {
@@ -161,6 +163,8 @@ struct x86_platform_ops x86_platform __ro_after_init = {
.enc_status_change_finish = enc_status_change_finish_noop,
.enc_tlb_flush_required = enc_tlb_flush_required_noop,
.enc_cache_flush_required = enc_cache_flush_required_noop,
+ .enc_kexec_begin = enc_kexec_begin_noop,
+ .enc_kexec_finish = enc_kexec_finish_noop,
},
};
diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
index d64fb2b3eb69..fec95a770270 100644
--- a/arch/x86/kvm/Kconfig
+++ b/arch/x86/kvm/Kconfig
@@ -44,6 +44,7 @@ config KVM
select KVM_VFIO
select HAVE_KVM_PM_NOTIFIER if PM
select KVM_GENERIC_HARDWARE_ENABLING
+ select KVM_WERROR if WERROR
help
Support hosting fully virtualized guest machines using hardware
virtualization extensions. You will need a fairly recent
@@ -66,7 +67,7 @@ config KVM_WERROR
# FRAME_WARN, i.e. KVM_WERROR=y with KASAN=y requires special tuning.
# Building KVM with -Werror and KASAN is still doable via enabling
# the kernel-wide WERROR=y.
- depends on KVM && EXPERT && !KASAN
+ depends on KVM && ((EXPERT && !KASAN) || WERROR)
help
Add -Werror to the build flags for KVM.
@@ -97,15 +98,17 @@ config KVM_INTEL
config KVM_INTEL_PROVE_VE
bool "Check that guests do not receive #VE exceptions"
- default KVM_PROVE_MMU || DEBUG_KERNEL
- depends on KVM_INTEL
+ depends on KVM_INTEL && EXPERT
help
-
Checks that KVM's page table management code will not incorrectly
let guests receive a virtualization exception. Virtualization
exceptions will be trapped by the hypervisor rather than injected
in the guest.
+ Note: some CPUs appear to generate spurious EPT Violations #VEs
+ that trigger KVM's WARN, in particular with eptad=0 and/or nested
+ virtualization.
+
If unsure, say N.
config X86_SGX_KVM
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 5d4c86133453..c8cc578646d0 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -1069,7 +1069,7 @@ static __always_inline u8 test_cc(unsigned int condition, unsigned long flags)
flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
asm("push %[flags]; popf; " CALL_NOSPEC
- : "=a"(rc) : [thunk_target]"r"(fop), [flags]"r"(flags));
+ : "=a"(rc), ASM_CALL_CONSTRAINT : [thunk_target]"r"(fop), [flags]"r"(flags));
return rc;
}
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index ebf41023be38..acd7d48100a1 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -59,7 +59,17 @@
#define MAX_APIC_VECTOR 256
#define APIC_VECTORS_PER_REG 32
-static bool lapic_timer_advance_dynamic __read_mostly;
+/*
+ * Enable local APIC timer advancement (tscdeadline mode only) with adaptive
+ * tuning. When enabled, KVM programs the host timer event to fire early, i.e.
+ * before the deadline expires, to account for the delay between taking the
+ * VM-Exit (to inject the guest event) and the subsequent VM-Enter to resume
+ * the guest, i.e. so that the interrupt arrives in the guest with minimal
+ * latency relative to the deadline programmed by the guest.
+ */
+static bool lapic_timer_advance __read_mostly = true;
+module_param(lapic_timer_advance, bool, 0444);
+
#define LAPIC_TIMER_ADVANCE_ADJUST_MIN 100 /* clock cycles */
#define LAPIC_TIMER_ADVANCE_ADJUST_MAX 10000 /* clock cycles */
#define LAPIC_TIMER_ADVANCE_NS_INIT 1000
@@ -1854,16 +1864,14 @@ static void __kvm_wait_lapic_expire(struct kvm_vcpu *vcpu)
guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
trace_kvm_wait_lapic_expire(vcpu->vcpu_id, guest_tsc - tsc_deadline);
- if (lapic_timer_advance_dynamic) {
- adjust_lapic_timer_advance(vcpu, guest_tsc - tsc_deadline);
- /*
- * If the timer fired early, reread the TSC to account for the
- * overhead of the above adjustment to avoid waiting longer
- * than is necessary.
- */
- if (guest_tsc < tsc_deadline)
- guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
- }
+ adjust_lapic_timer_advance(vcpu, guest_tsc - tsc_deadline);
+
+ /*
+ * If the timer fired early, reread the TSC to account for the overhead
+ * of the above adjustment to avoid waiting longer than is necessary.
+ */
+ if (guest_tsc < tsc_deadline)
+ guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
if (guest_tsc < tsc_deadline)
__wait_lapic_expire(vcpu, tsc_deadline - guest_tsc);
@@ -2812,7 +2820,7 @@ static enum hrtimer_restart apic_timer_fn(struct hrtimer *data)
return HRTIMER_NORESTART;
}
-int kvm_create_lapic(struct kvm_vcpu *vcpu, int timer_advance_ns)
+int kvm_create_lapic(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic;
@@ -2845,13 +2853,8 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu, int timer_advance_ns)
hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
HRTIMER_MODE_ABS_HARD);
apic->lapic_timer.timer.function = apic_timer_fn;
- if (timer_advance_ns == -1) {
+ if (lapic_timer_advance)
apic->lapic_timer.timer_advance_ns = LAPIC_TIMER_ADVANCE_NS_INIT;
- lapic_timer_advance_dynamic = true;
- } else {
- apic->lapic_timer.timer_advance_ns = timer_advance_ns;
- lapic_timer_advance_dynamic = false;
- }
/*
* Stuff the APIC ENABLE bit in lieu of temporarily incrementing
diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
index 0a0ea4b5dd8c..a69e706b9080 100644
--- a/arch/x86/kvm/lapic.h
+++ b/arch/x86/kvm/lapic.h
@@ -85,7 +85,7 @@ struct kvm_lapic {
struct dest_map;
-int kvm_create_lapic(struct kvm_vcpu *vcpu, int timer_advance_ns);
+int kvm_create_lapic(struct kvm_vcpu *vcpu);
void kvm_free_lapic(struct kvm_vcpu *vcpu);
int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 662f62dfb2aa..8d74bdef68c1 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -336,16 +336,19 @@ static int is_cpuid_PSE36(void)
#ifdef CONFIG_X86_64
static void __set_spte(u64 *sptep, u64 spte)
{
+ KVM_MMU_WARN_ON(is_ept_ve_possible(spte));
WRITE_ONCE(*sptep, spte);
}
static void __update_clear_spte_fast(u64 *sptep, u64 spte)
{
+ KVM_MMU_WARN_ON(is_ept_ve_possible(spte));
WRITE_ONCE(*sptep, spte);
}
static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
{
+ KVM_MMU_WARN_ON(is_ept_ve_possible(spte));
return xchg(sptep, spte);
}
@@ -4101,23 +4104,31 @@ static int get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, int *root_level
return leaf;
}
-/* return true if reserved bit(s) are detected on a valid, non-MMIO SPTE. */
-static bool get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
+static int get_sptes_lockless(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
+ int *root_level)
{
- u64 sptes[PT64_ROOT_MAX_LEVEL + 1];
- struct rsvd_bits_validate *rsvd_check;
- int root, leaf, level;
- bool reserved = false;
+ int leaf;
walk_shadow_page_lockless_begin(vcpu);
if (is_tdp_mmu_active(vcpu))
- leaf = kvm_tdp_mmu_get_walk(vcpu, addr, sptes, &root);
+ leaf = kvm_tdp_mmu_get_walk(vcpu, addr, sptes, root_level);
else
- leaf = get_walk(vcpu, addr, sptes, &root);
+ leaf = get_walk(vcpu, addr, sptes, root_level);
walk_shadow_page_lockless_end(vcpu);
+ return leaf;
+}
+
+/* return true if reserved bit(s) are detected on a valid, non-MMIO SPTE. */
+static bool get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
+{
+ u64 sptes[PT64_ROOT_MAX_LEVEL + 1];
+ struct rsvd_bits_validate *rsvd_check;
+ int root, leaf, level;
+ bool reserved = false;
+ leaf = get_sptes_lockless(vcpu, addr, sptes, &root);
if (unlikely(leaf < 0)) {
*sptep = 0ull;
return reserved;
@@ -4400,9 +4411,6 @@ static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
return RET_PF_EMULATE;
}
- fault->mmu_seq = vcpu->kvm->mmu_invalidate_seq;
- smp_rmb();
-
/*
* Check for a relevant mmu_notifier invalidation event before getting
* the pfn from the primary MMU, and before acquiring mmu_lock.
@@ -5921,6 +5929,22 @@ emulate:
}
EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
+void kvm_mmu_print_sptes(struct kvm_vcpu *vcpu, gpa_t gpa, const char *msg)
+{
+ u64 sptes[PT64_ROOT_MAX_LEVEL + 1];
+ int root_level, leaf, level;
+
+ leaf = get_sptes_lockless(vcpu, gpa, sptes, &root_level);
+ if (unlikely(leaf < 0))
+ return;
+
+ pr_err("%s %llx", msg, gpa);
+ for (level = root_level; level >= leaf; level--)
+ pr_cont(", spte[%d] = 0x%llx", level, sptes[level]);
+ pr_cont("\n");
+}
+EXPORT_SYMBOL_GPL(kvm_mmu_print_sptes);
+
static void __kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
u64 addr, hpa_t root_hpa)
{
diff --git a/arch/x86/kvm/mmu/spte.h b/arch/x86/kvm/mmu/spte.h
index 5dd5405fa07a..52fa004a1fbc 100644
--- a/arch/x86/kvm/mmu/spte.h
+++ b/arch/x86/kvm/mmu/spte.h
@@ -3,6 +3,8 @@
#ifndef KVM_X86_MMU_SPTE_H
#define KVM_X86_MMU_SPTE_H
+#include <asm/vmx.h>
+
#include "mmu.h"
#include "mmu_internal.h"
@@ -276,6 +278,13 @@ static inline bool is_shadow_present_pte(u64 pte)
return !!(pte & SPTE_MMU_PRESENT_MASK);
}
+static inline bool is_ept_ve_possible(u64 spte)
+{
+ return (shadow_present_mask & VMX_EPT_SUPPRESS_VE_BIT) &&
+ !(spte & VMX_EPT_SUPPRESS_VE_BIT) &&
+ (spte & VMX_EPT_RWX_MASK) != VMX_EPT_MISCONFIG_WX_VALUE;
+}
+
/*
* Returns true if A/D bits are supported in hardware and are enabled by KVM.
* When enabled, KVM uses A/D bits for all non-nested MMUs. Because L1 can
diff --git a/arch/x86/kvm/mmu/tdp_iter.h b/arch/x86/kvm/mmu/tdp_iter.h
index fae559559a80..2880fd392e0c 100644
--- a/arch/x86/kvm/mmu/tdp_iter.h
+++ b/arch/x86/kvm/mmu/tdp_iter.h
@@ -21,11 +21,13 @@ static inline u64 kvm_tdp_mmu_read_spte(tdp_ptep_t sptep)
static inline u64 kvm_tdp_mmu_write_spte_atomic(tdp_ptep_t sptep, u64 new_spte)
{
+ KVM_MMU_WARN_ON(is_ept_ve_possible(new_spte));
return xchg(rcu_dereference(sptep), new_spte);
}
static inline void __kvm_tdp_mmu_write_spte(tdp_ptep_t sptep, u64 new_spte)
{
+ KVM_MMU_WARN_ON(is_ept_ve_possible(new_spte));
WRITE_ONCE(*rcu_dereference(sptep), new_spte);
}
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index 1259dd63defc..36539c1b36cd 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -626,7 +626,7 @@ static inline int tdp_mmu_zap_spte_atomic(struct kvm *kvm,
* SPTEs.
*/
handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte,
- 0, iter->level, true);
+ SHADOW_NONPRESENT_VALUE, iter->level, true);
return 0;
}
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index 0623cfaa7bb0..95095a233a45 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -779,6 +779,14 @@ static int __sev_launch_update_vmsa(struct kvm *kvm, struct kvm_vcpu *vcpu,
*/
fpstate_set_confidential(&vcpu->arch.guest_fpu);
vcpu->arch.guest_state_protected = true;
+
+ /*
+ * SEV-ES guest mandates LBR Virtualization to be _always_ ON. Enable it
+ * only after setting guest_state_protected because KVM_SET_MSRS allows
+ * dynamic toggling of LBRV (for performance reason) on write access to
+ * MSR_IA32_DEBUGCTLMSR when guest_state_protected is not set.
+ */
+ svm_enable_lbrv(vcpu);
return 0;
}
@@ -2406,6 +2414,12 @@ void __init sev_hardware_setup(void)
if (!boot_cpu_has(X86_FEATURE_SEV_ES))
goto out;
+ if (!lbrv) {
+ WARN_ONCE(!boot_cpu_has(X86_FEATURE_LBRV),
+ "LBRV must be present for SEV-ES support");
+ goto out;
+ }
+
/* Has the system been allocated ASIDs for SEV-ES? */
if (min_sev_asid == 1)
goto out;
@@ -3216,7 +3230,6 @@ static void sev_es_init_vmcb(struct vcpu_svm *svm)
struct kvm_vcpu *vcpu = &svm->vcpu;
svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ES_ENABLE;
- svm->vmcb->control.virt_ext |= LBR_CTL_ENABLE_MASK;
/*
* An SEV-ES guest requires a VMSA area that is a separate from the
@@ -3268,10 +3281,6 @@ static void sev_es_init_vmcb(struct vcpu_svm *svm)
/* Clear intercepts on selected MSRs */
set_msr_interception(vcpu, svm->msrpm, MSR_EFER, 1, 1);
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_CR_PAT, 1, 1);
- set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1);
- set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1);
- set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
- set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
}
void sev_init_vmcb(struct vcpu_svm *svm)
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index c8dc25886c16..c95d3900fe56 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -99,6 +99,7 @@ static const struct svm_direct_access_msrs {
{ .index = MSR_IA32_SPEC_CTRL, .always = false },
{ .index = MSR_IA32_PRED_CMD, .always = false },
{ .index = MSR_IA32_FLUSH_CMD, .always = false },
+ { .index = MSR_IA32_DEBUGCTLMSR, .always = false },
{ .index = MSR_IA32_LASTBRANCHFROMIP, .always = false },
{ .index = MSR_IA32_LASTBRANCHTOIP, .always = false },
{ .index = MSR_IA32_LASTINTFROMIP, .always = false },
@@ -215,7 +216,7 @@ int vgif = true;
module_param(vgif, int, 0444);
/* enable/disable LBR virtualization */
-static int lbrv = true;
+int lbrv = true;
module_param(lbrv, int, 0444);
static int tsc_scaling = true;
@@ -990,7 +991,7 @@ void svm_copy_lbrs(struct vmcb *to_vmcb, struct vmcb *from_vmcb)
vmcb_mark_dirty(to_vmcb, VMCB_LBR);
}
-static void svm_enable_lbrv(struct kvm_vcpu *vcpu)
+void svm_enable_lbrv(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
@@ -1000,6 +1001,9 @@ static void svm_enable_lbrv(struct kvm_vcpu *vcpu)
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
+ if (sev_es_guest(vcpu->kvm))
+ set_msr_interception(vcpu, svm->msrpm, MSR_IA32_DEBUGCTLMSR, 1, 1);
+
/* Move the LBR msrs to the vmcb02 so that the guest can see them. */
if (is_guest_mode(vcpu))
svm_copy_lbrs(svm->vmcb, svm->vmcb01.ptr);
@@ -1009,6 +1013,8 @@ static void svm_disable_lbrv(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
+ KVM_BUG_ON(sev_es_guest(vcpu->kvm), vcpu->kvm);
+
svm->vmcb->control.virt_ext &= ~LBR_CTL_ENABLE_MASK;
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHFROMIP, 0, 0);
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHTOIP, 0, 0);
@@ -2822,10 +2828,24 @@ static int svm_get_msr_feature(struct kvm_msr_entry *msr)
return 0;
}
+static bool
+sev_es_prevent_msr_access(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+{
+ return sev_es_guest(vcpu->kvm) &&
+ vcpu->arch.guest_state_protected &&
+ svm_msrpm_offset(msr_info->index) != MSR_INVALID &&
+ !msr_write_intercepted(vcpu, msr_info->index);
+}
+
static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{
struct vcpu_svm *svm = to_svm(vcpu);
+ if (sev_es_prevent_msr_access(vcpu, msr_info)) {
+ msr_info->data = 0;
+ return vcpu->kvm->arch.has_protected_state ? -EINVAL : 0;
+ }
+
switch (msr_info->index) {
case MSR_AMD64_TSC_RATIO:
if (!msr_info->host_initiated &&
@@ -2976,6 +2996,10 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
u32 ecx = msr->index;
u64 data = msr->data;
+
+ if (sev_es_prevent_msr_access(vcpu, msr))
+ return vcpu->kvm->arch.has_protected_state ? -EINVAL : 0;
+
switch (ecx) {
case MSR_AMD64_TSC_RATIO:
@@ -3846,16 +3870,27 @@ static void svm_enable_nmi_window(struct kvm_vcpu *vcpu)
struct vcpu_svm *svm = to_svm(vcpu);
/*
- * KVM should never request an NMI window when vNMI is enabled, as KVM
- * allows at most one to-be-injected NMI and one pending NMI, i.e. if
- * two NMIs arrive simultaneously, KVM will inject one and set
- * V_NMI_PENDING for the other. WARN, but continue with the standard
- * single-step approach to try and salvage the pending NMI.
+ * If NMIs are outright masked, i.e. the vCPU is already handling an
+ * NMI, and KVM has not yet intercepted an IRET, then there is nothing
+ * more to do at this time as KVM has already enabled IRET intercepts.
+ * If KVM has already intercepted IRET, then single-step over the IRET,
+ * as NMIs aren't architecturally unmasked until the IRET completes.
+ *
+ * If vNMI is enabled, KVM should never request an NMI window if NMIs
+ * are masked, as KVM allows at most one to-be-injected NMI and one
+ * pending NMI. If two NMIs arrive simultaneously, KVM will inject one
+ * NMI and set V_NMI_PENDING for the other, but if and only if NMIs are
+ * unmasked. KVM _will_ request an NMI window in some situations, e.g.
+ * if the vCPU is in an STI shadow or if GIF=0, KVM can't immediately
+ * inject the NMI. In those situations, KVM needs to single-step over
+ * the STI shadow or intercept STGI.
*/
- WARN_ON_ONCE(is_vnmi_enabled(svm));
+ if (svm_get_nmi_mask(vcpu)) {
+ WARN_ON_ONCE(is_vnmi_enabled(svm));
- if (svm_get_nmi_mask(vcpu) && !svm->awaiting_iret_completion)
- return; /* IRET will cause a vm exit */
+ if (!svm->awaiting_iret_completion)
+ return; /* IRET will cause a vm exit */
+ }
/*
* SEV-ES guests are responsible for signaling when a vCPU is ready to
@@ -5265,6 +5300,12 @@ static __init int svm_hardware_setup(void)
nrips = nrips && boot_cpu_has(X86_FEATURE_NRIPS);
+ if (lbrv) {
+ if (!boot_cpu_has(X86_FEATURE_LBRV))
+ lbrv = false;
+ else
+ pr_info("LBR virtualization supported\n");
+ }
/*
* Note, SEV setup consumes npt_enabled and enable_mmio_caching (which
* may be modified by svm_adjust_mmio_mask()), as well as nrips.
@@ -5318,14 +5359,6 @@ static __init int svm_hardware_setup(void)
svm_x86_ops.set_vnmi_pending = NULL;
}
-
- if (lbrv) {
- if (!boot_cpu_has(X86_FEATURE_LBRV))
- lbrv = false;
- else
- pr_info("LBR virtualization supported\n");
- }
-
if (!enable_pmu)
pr_info("PMU virtualization is disabled\n");
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index be57213cd295..0f1472690b59 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -30,7 +30,7 @@
#define IOPM_SIZE PAGE_SIZE * 3
#define MSRPM_SIZE PAGE_SIZE * 2
-#define MAX_DIRECT_ACCESS_MSRS 47
+#define MAX_DIRECT_ACCESS_MSRS 48
#define MSRPM_OFFSETS 32
extern u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
extern bool npt_enabled;
@@ -39,6 +39,7 @@ extern int vgif;
extern bool intercept_smi;
extern bool x2avic_enabled;
extern bool vnmi;
+extern int lbrv;
/*
* Clean bits in VMCB.
@@ -552,6 +553,7 @@ u32 *svm_vcpu_alloc_msrpm(void);
void svm_vcpu_init_msrpm(struct kvm_vcpu *vcpu, u32 *msrpm);
void svm_vcpu_free_msrpm(u32 *msrpm);
void svm_copy_lbrs(struct vmcb *to_vmcb, struct vmcb *from_vmcb);
+void svm_enable_lbrv(struct kvm_vcpu *vcpu);
void svm_update_lbrv(struct kvm_vcpu *vcpu);
int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer);
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index d5b832126e34..643935a0f70a 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -2242,6 +2242,9 @@ static void prepare_vmcs02_constant_state(struct vcpu_vmx *vmx)
vmcs_write64(EPT_POINTER,
construct_eptp(&vmx->vcpu, 0, PT64_ROOT_4LEVEL));
+ if (vmx->ve_info)
+ vmcs_write64(VE_INFORMATION_ADDRESS, __pa(vmx->ve_info));
+
/* All VMFUNCs are currently emulated through L0 vmexits. */
if (cpu_has_vmx_vmfunc())
vmcs_write64(VM_FUNCTION_CONTROL, 0);
@@ -6230,6 +6233,8 @@ static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu,
else if (is_alignment_check(intr_info) &&
!vmx_guest_inject_ac(vcpu))
return true;
+ else if (is_ve_fault(intr_info))
+ return true;
return false;
case EXIT_REASON_EXTERNAL_INTERRUPT:
return true;
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 6051fad5945f..b3c83c06f826 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -5218,8 +5218,15 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu)
if (is_invalid_opcode(intr_info))
return handle_ud(vcpu);
- if (KVM_BUG_ON(is_ve_fault(intr_info), vcpu->kvm))
- return -EIO;
+ if (WARN_ON_ONCE(is_ve_fault(intr_info))) {
+ struct vmx_ve_information *ve_info = vmx->ve_info;
+
+ WARN_ONCE(ve_info->exit_reason != EXIT_REASON_EPT_VIOLATION,
+ "Unexpected #VE on VM-Exit reason 0x%x", ve_info->exit_reason);
+ dump_vmcs(vcpu);
+ kvm_mmu_print_sptes(vcpu, ve_info->guest_physical_address, "#VE");
+ return 1;
+ }
error_code = 0;
if (intr_info & INTR_INFO_DELIVER_CODE_MASK)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 082ac6d95a3a..0763a0f72a06 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -164,15 +164,6 @@ module_param(kvmclock_periodic_sync, bool, 0444);
static u32 __read_mostly tsc_tolerance_ppm = 250;
module_param(tsc_tolerance_ppm, uint, 0644);
-/*
- * lapic timer advance (tscdeadline mode only) in nanoseconds. '-1' enables
- * adaptive tuning starting from default advancement of 1000ns. '0' disables
- * advancement entirely. Any other value is used as-is and disables adaptive
- * tuning, i.e. allows privileged userspace to set an exact advancement time.
- */
-static int __read_mostly lapic_timer_advance_ns = -1;
-module_param(lapic_timer_advance_ns, int, 0644);
-
static bool __read_mostly vector_hashing = true;
module_param(vector_hashing, bool, 0444);
@@ -10727,13 +10718,12 @@ static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
bitmap_zero(vcpu->arch.ioapic_handled_vectors, 256);
+ static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu);
+
if (irqchip_split(vcpu->kvm))
kvm_scan_ioapic_routes(vcpu, vcpu->arch.ioapic_handled_vectors);
- else {
- static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu);
- if (ioapic_in_kernel(vcpu->kvm))
- kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors);
- }
+ else if (ioapic_in_kernel(vcpu->kvm))
+ kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors);
if (is_guest_mode(vcpu))
vcpu->arch.load_eoi_exitmap_pending = true;
@@ -12169,7 +12159,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
if (r < 0)
return r;
- r = kvm_create_lapic(vcpu, lapic_timer_advance_ns);
+ r = kvm_create_lapic(vcpu);
if (r < 0)
goto fail_mmu_destroy;
diff --git a/arch/x86/lib/cmdline.c b/arch/x86/lib/cmdline.c
index 80570eb3c89b..384da1fdd5c6 100644
--- a/arch/x86/lib/cmdline.c
+++ b/arch/x86/lib/cmdline.c
@@ -6,8 +6,10 @@
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/ctype.h>
+
#include <asm/setup.h>
#include <asm/cmdline.h>
+#include <asm/bug.h>
static inline int myisspace(u8 c)
{
@@ -205,12 +207,18 @@ __cmdline_find_option(const char *cmdline, int max_cmdline_size,
int cmdline_find_option_bool(const char *cmdline, const char *option)
{
+ if (IS_ENABLED(CONFIG_CMDLINE_BOOL))
+ WARN_ON_ONCE(!builtin_cmdline_added);
+
return __cmdline_find_option_bool(cmdline, COMMAND_LINE_SIZE, option);
}
int cmdline_find_option(const char *cmdline, const char *option, char *buffer,
int bufsize)
{
+ if (IS_ENABLED(CONFIG_CMDLINE_BOOL))
+ WARN_ON_ONCE(!builtin_cmdline_added);
+
return __cmdline_find_option(cmdline, COMMAND_LINE_SIZE, option,
buffer, bufsize);
}
diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
index 10d5ed8b5990..a314622aa093 100644
--- a/arch/x86/lib/getuser.S
+++ b/arch/x86/lib/getuser.S
@@ -50,11 +50,17 @@
.endif
.endm
+.macro UACCESS op src dst
+1: \op \src,\dst
+ _ASM_EXTABLE_UA(1b, __get_user_handle_exception)
+.endm
+
+
.text
SYM_FUNC_START(__get_user_1)
check_range size=1
ASM_STAC
-1: movzbl (%_ASM_AX),%edx
+ UACCESS movzbl (%_ASM_AX),%edx
xor %eax,%eax
ASM_CLAC
RET
@@ -64,7 +70,7 @@ EXPORT_SYMBOL(__get_user_1)
SYM_FUNC_START(__get_user_2)
check_range size=2
ASM_STAC
-2: movzwl (%_ASM_AX),%edx
+ UACCESS movzwl (%_ASM_AX),%edx
xor %eax,%eax
ASM_CLAC
RET
@@ -74,7 +80,7 @@ EXPORT_SYMBOL(__get_user_2)
SYM_FUNC_START(__get_user_4)
check_range size=4
ASM_STAC
-3: movl (%_ASM_AX),%edx
+ UACCESS movl (%_ASM_AX),%edx
xor %eax,%eax
ASM_CLAC
RET
@@ -85,10 +91,11 @@ SYM_FUNC_START(__get_user_8)
check_range size=8
ASM_STAC
#ifdef CONFIG_X86_64
-4: movq (%_ASM_AX),%rdx
+ UACCESS movq (%_ASM_AX),%rdx
#else
-4: movl (%_ASM_AX),%edx
-5: movl 4(%_ASM_AX),%ecx
+ xor %ecx,%ecx
+ UACCESS movl (%_ASM_AX),%edx
+ UACCESS movl 4(%_ASM_AX),%ecx
#endif
xor %eax,%eax
ASM_CLAC
@@ -100,7 +107,7 @@ EXPORT_SYMBOL(__get_user_8)
SYM_FUNC_START(__get_user_nocheck_1)
ASM_STAC
ASM_BARRIER_NOSPEC
-6: movzbl (%_ASM_AX),%edx
+ UACCESS movzbl (%_ASM_AX),%edx
xor %eax,%eax
ASM_CLAC
RET
@@ -110,7 +117,7 @@ EXPORT_SYMBOL(__get_user_nocheck_1)
SYM_FUNC_START(__get_user_nocheck_2)
ASM_STAC
ASM_BARRIER_NOSPEC
-7: movzwl (%_ASM_AX),%edx
+ UACCESS movzwl (%_ASM_AX),%edx
xor %eax,%eax
ASM_CLAC
RET
@@ -120,7 +127,7 @@ EXPORT_SYMBOL(__get_user_nocheck_2)
SYM_FUNC_START(__get_user_nocheck_4)
ASM_STAC
ASM_BARRIER_NOSPEC
-8: movl (%_ASM_AX),%edx
+ UACCESS movl (%_ASM_AX),%edx
xor %eax,%eax
ASM_CLAC
RET
@@ -131,10 +138,11 @@ SYM_FUNC_START(__get_user_nocheck_8)
ASM_STAC
ASM_BARRIER_NOSPEC
#ifdef CONFIG_X86_64
-9: movq (%_ASM_AX),%rdx
+ UACCESS movq (%_ASM_AX),%rdx
#else
-9: movl (%_ASM_AX),%edx
-10: movl 4(%_ASM_AX),%ecx
+ xor %ecx,%ecx
+ UACCESS movl (%_ASM_AX),%edx
+ UACCESS movl 4(%_ASM_AX),%ecx
#endif
xor %eax,%eax
ASM_CLAC
@@ -150,36 +158,3 @@ SYM_CODE_START_LOCAL(__get_user_handle_exception)
mov $(-EFAULT),%_ASM_AX
RET
SYM_CODE_END(__get_user_handle_exception)
-
-#ifdef CONFIG_X86_32
-SYM_CODE_START_LOCAL(__get_user_8_handle_exception)
- ASM_CLAC
-bad_get_user_8:
- xor %edx,%edx
- xor %ecx,%ecx
- mov $(-EFAULT),%_ASM_AX
- RET
-SYM_CODE_END(__get_user_8_handle_exception)
-#endif
-
-/* get_user */
- _ASM_EXTABLE_UA(1b, __get_user_handle_exception)
- _ASM_EXTABLE_UA(2b, __get_user_handle_exception)
- _ASM_EXTABLE_UA(3b, __get_user_handle_exception)
-#ifdef CONFIG_X86_64
- _ASM_EXTABLE_UA(4b, __get_user_handle_exception)
-#else
- _ASM_EXTABLE_UA(4b, __get_user_8_handle_exception)
- _ASM_EXTABLE_UA(5b, __get_user_8_handle_exception)
-#endif
-
-/* __get_user */
- _ASM_EXTABLE_UA(6b, __get_user_handle_exception)
- _ASM_EXTABLE_UA(7b, __get_user_handle_exception)
- _ASM_EXTABLE_UA(8b, __get_user_handle_exception)
-#ifdef CONFIG_X86_64
- _ASM_EXTABLE_UA(9b, __get_user_handle_exception)
-#else
- _ASM_EXTABLE_UA(9b, __get_user_8_handle_exception)
- _ASM_EXTABLE_UA(10b, __get_user_8_handle_exception)
-#endif
diff --git a/arch/x86/lib/iomem.c b/arch/x86/lib/iomem.c
index e0411a3774d4..5eecb45d05d5 100644
--- a/arch/x86/lib/iomem.c
+++ b/arch/x86/lib/iomem.c
@@ -25,6 +25,9 @@ static __always_inline void rep_movs(void *to, const void *from, size_t n)
static void string_memcpy_fromio(void *to, const volatile void __iomem *from, size_t n)
{
+ const void *orig_to = to;
+ const size_t orig_n = n;
+
if (unlikely(!n))
return;
@@ -39,7 +42,7 @@ static void string_memcpy_fromio(void *to, const volatile void __iomem *from, si
}
rep_movs(to, (const void *)from, n);
/* KMSAN must treat values read from devices as initialized. */
- kmsan_unpoison_memory(to, n);
+ kmsan_unpoison_memory(orig_to, orig_n);
}
static void string_memcpy_toio(volatile void __iomem *to, const void *from, size_t n)
diff --git a/arch/x86/mm/ident_map.c b/arch/x86/mm/ident_map.c
index 968d7005f4a7..c45127265f2f 100644
--- a/arch/x86/mm/ident_map.c
+++ b/arch/x86/mm/ident_map.c
@@ -4,6 +4,79 @@
* included by both the compressed kernel and the regular kernel.
*/
+static void free_pte(struct x86_mapping_info *info, pmd_t *pmd)
+{
+ pte_t *pte = pte_offset_kernel(pmd, 0);
+
+ info->free_pgt_page(pte, info->context);
+}
+
+static void free_pmd(struct x86_mapping_info *info, pud_t *pud)
+{
+ pmd_t *pmd = pmd_offset(pud, 0);
+ int i;
+
+ for (i = 0; i < PTRS_PER_PMD; i++) {
+ if (!pmd_present(pmd[i]))
+ continue;
+
+ if (pmd_leaf(pmd[i]))
+ continue;
+
+ free_pte(info, &pmd[i]);
+ }
+
+ info->free_pgt_page(pmd, info->context);
+}
+
+static void free_pud(struct x86_mapping_info *info, p4d_t *p4d)
+{
+ pud_t *pud = pud_offset(p4d, 0);
+ int i;
+
+ for (i = 0; i < PTRS_PER_PUD; i++) {
+ if (!pud_present(pud[i]))
+ continue;
+
+ if (pud_leaf(pud[i]))
+ continue;
+
+ free_pmd(info, &pud[i]);
+ }
+
+ info->free_pgt_page(pud, info->context);
+}
+
+static void free_p4d(struct x86_mapping_info *info, pgd_t *pgd)
+{
+ p4d_t *p4d = p4d_offset(pgd, 0);
+ int i;
+
+ for (i = 0; i < PTRS_PER_P4D; i++) {
+ if (!p4d_present(p4d[i]))
+ continue;
+
+ free_pud(info, &p4d[i]);
+ }
+
+ if (pgtable_l5_enabled())
+ info->free_pgt_page(p4d, info->context);
+}
+
+void kernel_ident_mapping_free(struct x86_mapping_info *info, pgd_t *pgd)
+{
+ int i;
+
+ for (i = 0; i < PTRS_PER_PGD; i++) {
+ if (!pgd_present(pgd[i]))
+ continue;
+
+ free_p4d(info, &pgd[i]);
+ }
+
+ info->free_pgt_page(pgd, info->context);
+}
+
static void ident_pmd_init(struct x86_mapping_info *info, pmd_t *pmd_page,
unsigned long addr, unsigned long end)
{
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 7e177856ee4f..28002cc7a37d 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -469,7 +469,9 @@ phys_pte_init(pte_t *pte_page, unsigned long paddr, unsigned long paddr_end,
!e820__mapped_any(paddr & PAGE_MASK, paddr_next,
E820_TYPE_RAM) &&
!e820__mapped_any(paddr & PAGE_MASK, paddr_next,
- E820_TYPE_RESERVED_KERN))
+ E820_TYPE_RESERVED_KERN) &&
+ !e820__mapped_any(paddr & PAGE_MASK, paddr_next,
+ E820_TYPE_ACPI))
set_pte_init(pte, __pte(0), init);
continue;
}
@@ -524,7 +526,9 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long paddr, unsigned long paddr_end,
!e820__mapped_any(paddr & PMD_MASK, paddr_next,
E820_TYPE_RAM) &&
!e820__mapped_any(paddr & PMD_MASK, paddr_next,
- E820_TYPE_RESERVED_KERN))
+ E820_TYPE_RESERVED_KERN) &&
+ !e820__mapped_any(paddr & PMD_MASK, paddr_next,
+ E820_TYPE_ACPI))
set_pmd_init(pmd, __pmd(0), init);
continue;
}
@@ -611,7 +615,9 @@ phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end,
!e820__mapped_any(paddr & PUD_MASK, paddr_next,
E820_TYPE_RAM) &&
!e820__mapped_any(paddr & PUD_MASK, paddr_next,
- E820_TYPE_RESERVED_KERN))
+ E820_TYPE_RESERVED_KERN) &&
+ !e820__mapped_any(paddr & PUD_MASK, paddr_next,
+ E820_TYPE_ACPI))
set_pud_init(pud, __pud(0), init);
continue;
}
@@ -698,7 +704,9 @@ phys_p4d_init(p4d_t *p4d_page, unsigned long paddr, unsigned long paddr_end,
!e820__mapped_any(paddr & P4D_MASK, paddr_next,
E820_TYPE_RAM) &&
!e820__mapped_any(paddr & P4D_MASK, paddr_next,
- E820_TYPE_RESERVED_KERN))
+ E820_TYPE_RESERVED_KERN) &&
+ !e820__mapped_any(paddr & P4D_MASK, paddr_next,
+ E820_TYPE_ACPI))
set_p4d_init(p4d, __p4d(0), init);
continue;
}
diff --git a/arch/x86/mm/mem_encrypt_amd.c b/arch/x86/mm/mem_encrypt_amd.c
index 422602f6039b..86a476a426c2 100644
--- a/arch/x86/mm/mem_encrypt_amd.c
+++ b/arch/x86/mm/mem_encrypt_amd.c
@@ -2,7 +2,7 @@
/*
* AMD Memory Encryption Support
*
- * Copyright (C) 2016 Advanced Micro Devices, Inc.
+ * Copyright (C) 2016-2024 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
*/
@@ -283,7 +283,7 @@ static void enc_dec_hypercall(unsigned long vaddr, unsigned long size, bool enc)
#endif
}
-static bool amd_enc_status_change_prepare(unsigned long vaddr, int npages, bool enc)
+static int amd_enc_status_change_prepare(unsigned long vaddr, int npages, bool enc)
{
/*
* To maintain the security guarantees of SEV-SNP guests, make sure
@@ -292,11 +292,11 @@ static bool amd_enc_status_change_prepare(unsigned long vaddr, int npages, bool
if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP) && !enc)
snp_set_memory_shared(vaddr, npages);
- return true;
+ return 0;
}
/* Return true unconditionally: return value doesn't matter for the SEV side */
-static bool amd_enc_status_change_finish(unsigned long vaddr, int npages, bool enc)
+static int amd_enc_status_change_finish(unsigned long vaddr, int npages, bool enc)
{
/*
* After memory is mapped encrypted in the page table, validate it
@@ -308,7 +308,7 @@ static bool amd_enc_status_change_finish(unsigned long vaddr, int npages, bool e
if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT))
enc_dec_hypercall(vaddr, npages << PAGE_SHIFT, enc);
- return true;
+ return 0;
}
static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc)
@@ -510,6 +510,12 @@ void __init sme_early_init(void)
*/
x86_init.resources.dmi_setup = snp_dmi_setup;
}
+
+ /*
+ * Switch the SVSM CA mapping (if active) from identity mapped to
+ * kernel mapped.
+ */
+ snp_update_svsm_ca();
}
void __init mem_encrypt_free_decrypted_mem(void)
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index ce84ba86e69e..6ce10e3c6228 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -493,7 +493,7 @@ static void __init numa_clear_kernel_node_hotplug(void)
for_each_reserved_mem_region(mb_region) {
int nid = memblock_get_region_node(mb_region);
- if (nid != MAX_NUMNODES)
+ if (nid != NUMA_NO_NODE)
node_set(nid, reserved_nodemask);
}
@@ -614,9 +614,9 @@ static int __init numa_init(int (*init_func)(void))
nodes_clear(node_online_map);
memset(&numa_meminfo, 0, sizeof(numa_meminfo));
WARN_ON(memblock_set_node(0, ULLONG_MAX, &memblock.memory,
- MAX_NUMNODES));
+ NUMA_NO_NODE));
WARN_ON(memblock_set_node(0, ULLONG_MAX, &memblock.reserved,
- MAX_NUMNODES));
+ NUMA_NO_NODE));
/* In case that parsing SRAT failed. */
WARN_ON(memblock_clear_hotplug(0, ULLONG_MAX));
numa_reset_distance();
diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c
index 19fdfbb171ed..443a97e515c0 100644
--- a/arch/x86/mm/pat/set_memory.c
+++ b/arch/x86/mm/pat/set_memory.c
@@ -662,8 +662,9 @@ static inline pgprot_t verify_rwx(pgprot_t old, pgprot_t new, unsigned long star
/*
* Lookup the page table entry for a virtual address in a specific pgd.
- * Return a pointer to the entry, the level of the mapping, and the effective
- * NX and RW bits of all page table levels.
+ * Return a pointer to the entry (or NULL if the entry does not exist),
+ * the level of the entry, and the effective NX and RW bits of all
+ * page table levels.
*/
pte_t *lookup_address_in_pgd_attr(pgd_t *pgd, unsigned long address,
unsigned int *level, bool *nx, bool *rw)
@@ -672,13 +673,14 @@ pte_t *lookup_address_in_pgd_attr(pgd_t *pgd, unsigned long address,
pud_t *pud;
pmd_t *pmd;
- *level = PG_LEVEL_NONE;
+ *level = PG_LEVEL_256T;
*nx = false;
*rw = true;
if (pgd_none(*pgd))
return NULL;
+ *level = PG_LEVEL_512G;
*nx |= pgd_flags(*pgd) & _PAGE_NX;
*rw &= pgd_flags(*pgd) & _PAGE_RW;
@@ -686,10 +688,10 @@ pte_t *lookup_address_in_pgd_attr(pgd_t *pgd, unsigned long address,
if (p4d_none(*p4d))
return NULL;
- *level = PG_LEVEL_512G;
if (p4d_leaf(*p4d) || !p4d_present(*p4d))
return (pte_t *)p4d;
+ *level = PG_LEVEL_1G;
*nx |= p4d_flags(*p4d) & _PAGE_NX;
*rw &= p4d_flags(*p4d) & _PAGE_RW;
@@ -697,10 +699,10 @@ pte_t *lookup_address_in_pgd_attr(pgd_t *pgd, unsigned long address,
if (pud_none(*pud))
return NULL;
- *level = PG_LEVEL_1G;
if (pud_leaf(*pud) || !pud_present(*pud))
return (pte_t *)pud;
+ *level = PG_LEVEL_2M;
*nx |= pud_flags(*pud) & _PAGE_NX;
*rw &= pud_flags(*pud) & _PAGE_RW;
@@ -708,15 +710,13 @@ pte_t *lookup_address_in_pgd_attr(pgd_t *pgd, unsigned long address,
if (pmd_none(*pmd))
return NULL;
- *level = PG_LEVEL_2M;
if (pmd_leaf(*pmd) || !pmd_present(*pmd))
return (pte_t *)pmd;
+ *level = PG_LEVEL_4K;
*nx |= pmd_flags(*pmd) & _PAGE_NX;
*rw &= pmd_flags(*pmd) & _PAGE_RW;
- *level = PG_LEVEL_4K;
-
return pte_offset_kernel(pmd, address);
}
@@ -736,9 +736,8 @@ pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
* Lookup the page table entry for a virtual address. Return a pointer
* to the entry and the level of the mapping.
*
- * Note: We return pud and pmd either when the entry is marked large
- * or when the present bit is not set. Otherwise we would return a
- * pointer to a nonexisting mapping.
+ * Note: the function returns p4d, pud or pmd either when the entry is marked
+ * large or when the present bit is not set. Otherwise it returns NULL.
*/
pte_t *lookup_address(unsigned long address, unsigned int *level)
{
@@ -2196,7 +2195,8 @@ static int __set_memory_enc_pgtable(unsigned long addr, int numpages, bool enc)
cpa_flush(&cpa, x86_platform.guest.enc_cache_flush_required());
/* Notify hypervisor that we are about to set/clr encryption attribute. */
- if (!x86_platform.guest.enc_status_change_prepare(addr, numpages, enc))
+ ret = x86_platform.guest.enc_status_change_prepare(addr, numpages, enc);
+ if (ret)
goto vmm_fail;
ret = __change_page_attr_set_clr(&cpa, 1);
@@ -2214,24 +2214,61 @@ static int __set_memory_enc_pgtable(unsigned long addr, int numpages, bool enc)
return ret;
/* Notify hypervisor that we have successfully set/clr encryption attribute. */
- if (!x86_platform.guest.enc_status_change_finish(addr, numpages, enc))
+ ret = x86_platform.guest.enc_status_change_finish(addr, numpages, enc);
+ if (ret)
goto vmm_fail;
return 0;
vmm_fail:
- WARN_ONCE(1, "CPA VMM failure to convert memory (addr=%p, numpages=%d) to %s.\n",
- (void *)addr, numpages, enc ? "private" : "shared");
+ WARN_ONCE(1, "CPA VMM failure to convert memory (addr=%p, numpages=%d) to %s: %d\n",
+ (void *)addr, numpages, enc ? "private" : "shared", ret);
+
+ return ret;
+}
+
+/*
+ * The lock serializes conversions between private and shared memory.
+ *
+ * It is taken for read on conversion. A write lock guarantees that no
+ * concurrent conversions are in progress.
+ */
+static DECLARE_RWSEM(mem_enc_lock);
+
+/*
+ * Stop new private<->shared conversions.
+ *
+ * Taking the exclusive mem_enc_lock waits for in-flight conversions to complete.
+ * The lock is not released to prevent new conversions from being started.
+ */
+bool set_memory_enc_stop_conversion(void)
+{
+ /*
+ * In a crash scenario, sleep is not allowed. Try to take the lock.
+ * Failure indicates that there is a race with the conversion.
+ */
+ if (oops_in_progress)
+ return down_write_trylock(&mem_enc_lock);
+
+ down_write(&mem_enc_lock);
- return -EIO;
+ return true;
}
static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc)
{
- if (cc_platform_has(CC_ATTR_MEM_ENCRYPT))
- return __set_memory_enc_pgtable(addr, numpages, enc);
+ int ret = 0;
- return 0;
+ if (cc_platform_has(CC_ATTR_MEM_ENCRYPT)) {
+ if (!down_read_trylock(&mem_enc_lock))
+ return -EBUSY;
+
+ ret = __set_memory_enc_pgtable(addr, numpages, enc);
+
+ up_read(&mem_enc_lock);
+ }
+
+ return ret;
}
int set_memory_encrypted(unsigned long addr, int numpages)
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 5159c7a22922..d25d81c8ecc0 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -1234,13 +1234,11 @@ bool ex_handler_bpf(const struct exception_table_entry *x, struct pt_regs *regs)
}
static void detect_reg_usage(struct bpf_insn *insn, int insn_cnt,
- bool *regs_used, bool *tail_call_seen)
+ bool *regs_used)
{
int i;
for (i = 1; i <= insn_cnt; i++, insn++) {
- if (insn->code == (BPF_JMP | BPF_TAIL_CALL))
- *tail_call_seen = true;
if (insn->dst_reg == BPF_REG_6 || insn->src_reg == BPF_REG_6)
regs_used[0] = true;
if (insn->dst_reg == BPF_REG_7 || insn->src_reg == BPF_REG_7)
@@ -1324,7 +1322,6 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image
struct bpf_insn *insn = bpf_prog->insnsi;
bool callee_regs_used[4] = {};
int insn_cnt = bpf_prog->len;
- bool tail_call_seen = false;
bool seen_exit = false;
u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY];
u64 arena_vm_start, user_vm_start;
@@ -1336,11 +1333,7 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image
arena_vm_start = bpf_arena_get_kern_vm_start(bpf_prog->aux->arena);
user_vm_start = bpf_arena_get_user_vm_start(bpf_prog->aux->arena);
- detect_reg_usage(insn, insn_cnt, callee_regs_used,
- &tail_call_seen);
-
- /* tail call's presence in current prog implies it is reachable */
- tail_call_reachable |= tail_call_seen;
+ detect_reg_usage(insn, insn_cnt, callee_regs_used);
emit_prologue(&prog, bpf_prog->aux->stack_depth,
bpf_prog_was_classic(bpf_prog), tail_call_reachable,
@@ -3363,7 +3356,7 @@ out_image:
*
* Both cases are serious bugs and justify WARN_ON.
*/
- if (WARN_ON(bpf_jit_binary_pack_finalize(prog, header, rw_header))) {
+ if (WARN_ON(bpf_jit_binary_pack_finalize(header, rw_header))) {
/* header has been freed */
header = NULL;
goto out_image;
@@ -3442,7 +3435,7 @@ void bpf_jit_free(struct bpf_prog *prog)
* before freeing it.
*/
if (jit_data) {
- bpf_jit_binary_pack_finalize(prog, jit_data->header,
+ bpf_jit_binary_pack_finalize(jit_data->header,
jit_data->rw_header);
kvfree(jit_data->addrs);
kfree(jit_data);
diff --git a/arch/x86/pci/intel_mid_pci.c b/arch/x86/pci/intel_mid_pci.c
index 8edd62206604..b433b1753016 100644
--- a/arch/x86/pci/intel_mid_pci.c
+++ b/arch/x86/pci/intel_mid_pci.c
@@ -216,7 +216,7 @@ static int pci_write(struct pci_bus *bus, unsigned int devfn, int where,
}
static const struct x86_cpu_id intel_mid_cpu_ids[] = {
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT_MID, NULL),
+ X86_MATCH_VFM(INTEL_ATOM_SILVERMONT_MID, NULL),
{}
};
@@ -233,9 +233,9 @@ static int intel_mid_pci_irq_enable(struct pci_dev *dev)
return 0;
ret = pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &gsi);
- if (ret < 0) {
+ if (ret) {
dev_warn(&dev->dev, "Failed to read interrupt line: %d\n", ret);
- return ret;
+ return pcibios_err_to_errno(ret);
}
id = x86_match_cpu(intel_mid_cpu_ids);
@@ -243,7 +243,7 @@ static int intel_mid_pci_irq_enable(struct pci_dev *dev)
model = id->model;
switch (model) {
- case INTEL_FAM6_ATOM_SILVERMONT_MID:
+ case VFM_MODEL(INTEL_ATOM_SILVERMONT_MID):
polarity_low = false;
/* Special treatment for IRQ0 */
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
index 652cd53e77f6..0f2fe524f60d 100644
--- a/arch/x86/pci/xen.c
+++ b/arch/x86/pci/xen.c
@@ -38,10 +38,10 @@ static int xen_pcifront_enable_irq(struct pci_dev *dev)
u8 gsi;
rc = pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &gsi);
- if (rc < 0) {
+ if (rc) {
dev_warn(&dev->dev, "Xen PCI: failed to read interrupt line: %d\n",
rc);
- return rc;
+ return pcibios_err_to_errno(rc);
}
/* In PV DomU the Xen PCI backend puts the PIRQ in the interrupt line.*/
pirq = gsi;
diff --git a/arch/x86/platform/atom/punit_atom_debug.c b/arch/x86/platform/atom/punit_atom_debug.c
index 6b9c6deca8ba..44c30ce6360a 100644
--- a/arch/x86/platform/atom/punit_atom_debug.c
+++ b/arch/x86/platform/atom/punit_atom_debug.c
@@ -165,14 +165,13 @@ static void punit_s2idle_check_register(struct punit_device *punit_device) {}
static void punit_s2idle_check_unregister(void) {}
#endif
-#define X86_MATCH(model, data) \
- X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, INTEL_FAM6_##model, \
- X86_FEATURE_MWAIT, data)
+#define X86_MATCH(vfm, data) \
+ X86_MATCH_VFM_FEATURE(vfm, X86_FEATURE_MWAIT, data)
static const struct x86_cpu_id intel_punit_cpu_ids[] = {
- X86_MATCH(ATOM_SILVERMONT, &punit_device_byt),
- X86_MATCH(ATOM_SILVERMONT_MID, &punit_device_tng),
- X86_MATCH(ATOM_AIRMONT, &punit_device_cht),
+ X86_MATCH(INTEL_ATOM_SILVERMONT, &punit_device_byt),
+ X86_MATCH(INTEL_ATOM_SILVERMONT_MID, &punit_device_tng),
+ X86_MATCH(INTEL_ATOM_AIRMONT, &punit_device_cht),
{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_punit_cpu_ids);
diff --git a/arch/x86/platform/efi/Makefile b/arch/x86/platform/efi/Makefile
index 543df9a1379d..500cab4a7f7c 100644
--- a/arch/x86/platform/efi/Makefile
+++ b/arch/x86/platform/efi/Makefile
@@ -5,5 +5,4 @@ GCOV_PROFILE := n
obj-$(CONFIG_EFI) += memmap.o quirks.o efi.o efi_$(BITS).o \
efi_stub_$(BITS).o
obj-$(CONFIG_EFI_MIXED) += efi_thunk_$(BITS).o
-obj-$(CONFIG_EFI_FAKE_MEMMAP) += fake_mem.o
obj-$(CONFIG_EFI_RUNTIME_MAP) += runtime-map.o
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index f090ec972d7b..88a96816de9a 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -226,8 +226,6 @@ int __init efi_memblock_x86_reserve_range(void)
if (add_efi_memmap || do_efi_soft_reserve())
do_add_efi_memmap();
- efi_fake_memmap_early();
-
WARN(efi.memmap.desc_version != 1,
"Unexpected EFI_MEMORY_DESCRIPTOR version %ld",
efi.memmap.desc_version);
diff --git a/arch/x86/platform/efi/fake_mem.c b/arch/x86/platform/efi/fake_mem.c
deleted file mode 100644
index 41d57cad3d84..000000000000
--- a/arch/x86/platform/efi/fake_mem.c
+++ /dev/null
@@ -1,197 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * fake_mem.c
- *
- * Copyright (C) 2015 FUJITSU LIMITED
- * Author: Taku Izumi <izumi.taku@jp.fujitsu.com>
- *
- * This code introduces new boot option named "efi_fake_mem"
- * By specifying this parameter, you can add arbitrary attribute to
- * specific memory range by updating original (firmware provided) EFI
- * memmap.
- */
-
-#include <linux/kernel.h>
-#include <linux/efi.h>
-#include <linux/init.h>
-#include <linux/memblock.h>
-#include <linux/types.h>
-#include <linux/sort.h>
-#include <asm/e820/api.h>
-#include <asm/efi.h>
-
-#define EFI_MAX_FAKEMEM CONFIG_EFI_MAX_FAKE_MEM
-
-static struct efi_mem_range efi_fake_mems[EFI_MAX_FAKEMEM];
-static int nr_fake_mem;
-
-static int __init cmp_fake_mem(const void *x1, const void *x2)
-{
- const struct efi_mem_range *m1 = x1;
- const struct efi_mem_range *m2 = x2;
-
- if (m1->range.start < m2->range.start)
- return -1;
- if (m1->range.start > m2->range.start)
- return 1;
- return 0;
-}
-
-static void __init efi_fake_range(struct efi_mem_range *efi_range)
-{
- struct efi_memory_map_data data = { 0 };
- int new_nr_map = efi.memmap.nr_map;
- efi_memory_desc_t *md;
- void *new_memmap;
-
- /* count up the number of EFI memory descriptor */
- for_each_efi_memory_desc(md)
- new_nr_map += efi_memmap_split_count(md, &efi_range->range);
-
- /* allocate memory for new EFI memmap */
- if (efi_memmap_alloc(new_nr_map, &data) != 0)
- return;
-
- /* create new EFI memmap */
- new_memmap = early_memremap(data.phys_map, data.size);
- if (!new_memmap) {
- __efi_memmap_free(data.phys_map, data.size, data.flags);
- return;
- }
-
- efi_memmap_insert(&efi.memmap, new_memmap, efi_range);
-
- /* swap into new EFI memmap */
- early_memunmap(new_memmap, data.size);
-
- efi_memmap_install(&data);
-}
-
-void __init efi_fake_memmap(void)
-{
- int i;
-
- if (!efi_enabled(EFI_MEMMAP) || !nr_fake_mem)
- return;
-
- for (i = 0; i < nr_fake_mem; i++)
- efi_fake_range(&efi_fake_mems[i]);
-
- /* print new EFI memmap */
- efi_print_memmap();
-}
-
-static int __init setup_fake_mem(char *p)
-{
- u64 start = 0, mem_size = 0, attribute = 0;
- int i;
-
- if (!p)
- return -EINVAL;
-
- while (*p != '\0') {
- mem_size = memparse(p, &p);
- if (*p == '@')
- start = memparse(p+1, &p);
- else
- break;
-
- if (*p == ':')
- attribute = simple_strtoull(p+1, &p, 0);
- else
- break;
-
- if (nr_fake_mem >= EFI_MAX_FAKEMEM)
- break;
-
- efi_fake_mems[nr_fake_mem].range.start = start;
- efi_fake_mems[nr_fake_mem].range.end = start + mem_size - 1;
- efi_fake_mems[nr_fake_mem].attribute = attribute;
- nr_fake_mem++;
-
- if (*p == ',')
- p++;
- }
-
- sort(efi_fake_mems, nr_fake_mem, sizeof(struct efi_mem_range),
- cmp_fake_mem, NULL);
-
- for (i = 0; i < nr_fake_mem; i++)
- pr_info("efi_fake_mem: add attr=0x%016llx to [mem 0x%016llx-0x%016llx]",
- efi_fake_mems[i].attribute, efi_fake_mems[i].range.start,
- efi_fake_mems[i].range.end);
-
- return *p == '\0' ? 0 : -EINVAL;
-}
-
-early_param("efi_fake_mem", setup_fake_mem);
-
-void __init efi_fake_memmap_early(void)
-{
- int i;
-
- /*
- * The late efi_fake_mem() call can handle all requests if
- * EFI_MEMORY_SP support is disabled.
- */
- if (!efi_soft_reserve_enabled())
- return;
-
- if (!efi_enabled(EFI_MEMMAP) || !nr_fake_mem)
- return;
-
- /*
- * Given that efi_fake_memmap() needs to perform memblock
- * allocations it needs to run after e820__memblock_setup().
- * However, if efi_fake_mem specifies EFI_MEMORY_SP for a given
- * address range that potentially needs to mark the memory as
- * reserved prior to e820__memblock_setup(). Update e820
- * directly if EFI_MEMORY_SP is specified for an
- * EFI_CONVENTIONAL_MEMORY descriptor.
- */
- for (i = 0; i < nr_fake_mem; i++) {
- struct efi_mem_range *mem = &efi_fake_mems[i];
- efi_memory_desc_t *md;
- u64 m_start, m_end;
-
- if ((mem->attribute & EFI_MEMORY_SP) == 0)
- continue;
-
- m_start = mem->range.start;
- m_end = mem->range.end;
- for_each_efi_memory_desc(md) {
- u64 start, end, size;
-
- if (md->type != EFI_CONVENTIONAL_MEMORY)
- continue;
-
- start = md->phys_addr;
- end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - 1;
-
- if (m_start <= end && m_end >= start)
- /* fake range overlaps descriptor */;
- else
- continue;
-
- /*
- * Trim the boundary of the e820 update to the
- * descriptor in case the fake range overlaps
- * !EFI_CONVENTIONAL_MEMORY
- */
- start = max(start, m_start);
- end = min(end, m_end);
- size = end - start + 1;
-
- if (end <= start)
- continue;
-
- /*
- * Ensure each efi_fake_mem instance results in
- * a unique e820 resource
- */
- e820__range_remove(start, size, E820_TYPE_RAM, 1);
- e820__range_add(start, size, E820_TYPE_SOFT_RESERVED);
- e820__update_table(e820_table);
- }
- }
-}
diff --git a/arch/x86/platform/efi/memmap.c b/arch/x86/platform/efi/memmap.c
index 4ef20b49eb5e..061b8ecc71a1 100644
--- a/arch/x86/platform/efi/memmap.c
+++ b/arch/x86/platform/efi/memmap.c
@@ -30,6 +30,7 @@ static phys_addr_t __init __efi_memmap_alloc_late(unsigned long size)
return PFN_PHYS(page_to_pfn(p));
}
+static
void __init __efi_memmap_free(u64 phys, unsigned long size, unsigned long flags)
{
if (flags & EFI_MEMMAP_MEMBLOCK) {
@@ -92,12 +93,22 @@ int __init efi_memmap_alloc(unsigned int num_entries,
*/
int __init efi_memmap_install(struct efi_memory_map_data *data)
{
+ unsigned long size = efi.memmap.desc_size * efi.memmap.nr_map;
+ unsigned long flags = efi.memmap.flags;
+ u64 phys = efi.memmap.phys_map;
+ int ret;
+
efi_memmap_unmap();
if (efi_enabled(EFI_PARAVIRT))
return 0;
- return __efi_memmap_init(data);
+ ret = __efi_memmap_init(data);
+ if (ret)
+ return ret;
+
+ __efi_memmap_free(phys, size, flags);
+ return 0;
}
/**
diff --git a/arch/x86/platform/intel-mid/intel-mid.c b/arch/x86/platform/intel-mid/intel-mid.c
index 7be71c2cdc83..f83bbe0acd4a 100644
--- a/arch/x86/platform/intel-mid/intel-mid.c
+++ b/arch/x86/platform/intel-mid/intel-mid.c
@@ -22,6 +22,7 @@
#include <asm/mpspec_def.h>
#include <asm/hw_irq.h>
#include <asm/apic.h>
+#include <asm/cpu_device_id.h>
#include <asm/io_apic.h>
#include <asm/intel-mid.h>
#include <asm/io.h>
@@ -55,9 +56,8 @@ static void __init intel_mid_time_init(void)
static void intel_mid_arch_setup(void)
{
- switch (boot_cpu_data.x86_model) {
- case 0x3C:
- case 0x4A:
+ switch (boot_cpu_data.x86_vfm) {
+ case INTEL_ATOM_SILVERMONT_MID:
x86_platform.legacy.rtc = 1;
break;
default:
diff --git a/arch/x86/platform/intel/iosf_mbi.c b/arch/x86/platform/intel/iosf_mbi.c
index fdd49d70b437..c81cea208c2c 100644
--- a/arch/x86/platform/intel/iosf_mbi.c
+++ b/arch/x86/platform/intel/iosf_mbi.c
@@ -62,7 +62,7 @@ static int iosf_mbi_pci_read_mdr(u32 mcrx, u32 mcr, u32 *mdr)
fail_read:
dev_err(&mbi_pdev->dev, "PCI config access failed with %d\n", result);
- return result;
+ return pcibios_err_to_errno(result);
}
static int iosf_mbi_pci_write_mdr(u32 mcrx, u32 mcr, u32 mdr)
@@ -91,7 +91,7 @@ static int iosf_mbi_pci_write_mdr(u32 mcrx, u32 mcr, u32 mdr)
fail_write:
dev_err(&mbi_pdev->dev, "PCI config access failed with %d\n", result);
- return result;
+ return pcibios_err_to_errno(result);
}
int iosf_mbi_read(u8 port, u8 opcode, u32 offset, u32 *mdr)
diff --git a/arch/x86/um/sys_call_table_32.c b/arch/x86/um/sys_call_table_32.c
index 89df5d89d664..51655133eee3 100644
--- a/arch/x86/um/sys_call_table_32.c
+++ b/arch/x86/um/sys_call_table_32.c
@@ -9,6 +9,10 @@
#include <linux/cache.h>
#include <asm/syscall.h>
+extern asmlinkage long sys_ni_syscall(unsigned long, unsigned long,
+ unsigned long, unsigned long,
+ unsigned long, unsigned long);
+
/*
* Below you can see, in terms of #define's, the differences between the x86-64
* and the UML syscall table.
@@ -22,15 +26,13 @@
#define sys_vm86 sys_ni_syscall
#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, native)
+#define __SYSCALL_NORETURN __SYSCALL
#define __SYSCALL(nr, sym) extern asmlinkage long sym(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
#include <asm/syscalls_32.h>
+#undef __SYSCALL
-#undef __SYSCALL
#define __SYSCALL(nr, sym) sym,
-
-extern asmlinkage long sys_ni_syscall(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
-
const sys_call_ptr_t sys_call_table[] ____cacheline_aligned = {
#include <asm/syscalls_32.h>
};
diff --git a/arch/x86/um/sys_call_table_64.c b/arch/x86/um/sys_call_table_64.c
index b0b4cfd2308c..943d414f2109 100644
--- a/arch/x86/um/sys_call_table_64.c
+++ b/arch/x86/um/sys_call_table_64.c
@@ -9,6 +9,10 @@
#include <linux/cache.h>
#include <asm/syscall.h>
+extern asmlinkage long sys_ni_syscall(unsigned long, unsigned long,
+ unsigned long, unsigned long,
+ unsigned long, unsigned long);
+
/*
* Below you can see, in terms of #define's, the differences between the x86-64
* and the UML syscall table.
@@ -18,14 +22,13 @@
#define sys_iopl sys_ni_syscall
#define sys_ioperm sys_ni_syscall
+#define __SYSCALL_NORETURN __SYSCALL
+
#define __SYSCALL(nr, sym) extern asmlinkage long sym(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
#include <asm/syscalls_64.h>
+#undef __SYSCALL
-#undef __SYSCALL
#define __SYSCALL(nr, sym) sym,
-
-extern asmlinkage long sys_ni_syscall(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
-
const sys_call_ptr_t sys_call_table[] ____cacheline_aligned = {
#include <asm/syscalls_64.h>
};
diff --git a/arch/x86/virt/svm/sev.c b/arch/x86/virt/svm/sev.c
index 0ae10535c699..0ce17766c0e5 100644
--- a/arch/x86/virt/svm/sev.c
+++ b/arch/x86/virt/svm/sev.c
@@ -120,7 +120,7 @@ static __init void snp_enable(void *arg)
bool snp_probe_rmptable_info(void)
{
- u64 max_rmp_pfn, calc_rmp_sz, rmp_sz, rmp_base, rmp_end;
+ u64 rmp_sz, rmp_base, rmp_end;
rdmsrl(MSR_AMD64_RMP_BASE, rmp_base);
rdmsrl(MSR_AMD64_RMP_END, rmp_end);
@@ -137,28 +137,11 @@ bool snp_probe_rmptable_info(void)
rmp_sz = rmp_end - rmp_base + 1;
- /*
- * Calculate the amount the memory that must be reserved by the BIOS to
- * address the whole RAM, including the bookkeeping area. The RMP itself
- * must also be covered.
- */
- max_rmp_pfn = max_pfn;
- if (PHYS_PFN(rmp_end) > max_pfn)
- max_rmp_pfn = PHYS_PFN(rmp_end);
-
- calc_rmp_sz = (max_rmp_pfn << 4) + RMPTABLE_CPU_BOOKKEEPING_SZ;
-
- if (calc_rmp_sz > rmp_sz) {
- pr_err("Memory reserved for the RMP table does not cover full system RAM (expected 0x%llx got 0x%llx)\n",
- calc_rmp_sz, rmp_sz);
- return false;
- }
-
probed_rmp_base = rmp_base;
probed_rmp_size = rmp_sz;
pr_info("RMP table physical range [0x%016llx - 0x%016llx]\n",
- probed_rmp_base, probed_rmp_base + probed_rmp_size - 1);
+ rmp_base, rmp_end);
return true;
}
@@ -206,9 +189,8 @@ void __init snp_fixup_e820_tables(void)
*/
static int __init snp_rmptable_init(void)
{
+ u64 max_rmp_pfn, calc_rmp_sz, rmptable_size, rmp_end, val;
void *rmptable_start;
- u64 rmptable_size;
- u64 val;
if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP))
return 0;
@@ -219,10 +201,28 @@ static int __init snp_rmptable_init(void)
if (!probed_rmp_size)
goto nosnp;
+ rmp_end = probed_rmp_base + probed_rmp_size - 1;
+
+ /*
+ * Calculate the amount the memory that must be reserved by the BIOS to
+ * address the whole RAM, including the bookkeeping area. The RMP itself
+ * must also be covered.
+ */
+ max_rmp_pfn = max_pfn;
+ if (PFN_UP(rmp_end) > max_pfn)
+ max_rmp_pfn = PFN_UP(rmp_end);
+
+ calc_rmp_sz = (max_rmp_pfn << 4) + RMPTABLE_CPU_BOOKKEEPING_SZ;
+ if (calc_rmp_sz > probed_rmp_size) {
+ pr_err("Memory reserved for the RMP table does not cover full system RAM (expected 0x%llx got 0x%llx)\n",
+ calc_rmp_sz, probed_rmp_size);
+ goto nosnp;
+ }
+
rmptable_start = memremap(probed_rmp_base, probed_rmp_size, MEMREMAP_WB);
if (!rmptable_start) {
pr_err("Failed to map RMP table\n");
- return 1;
+ goto nosnp;
}
/*
diff --git a/arch/x86/virt/vmx/tdx/tdx.c b/arch/x86/virt/vmx/tdx/tdx.c
index 49a1c6890b55..4e2b2e2ac9f9 100644
--- a/arch/x86/virt/vmx/tdx/tdx.c
+++ b/arch/x86/virt/vmx/tdx/tdx.c
@@ -33,7 +33,7 @@
#include <asm/msr.h>
#include <asm/cpufeature.h>
#include <asm/tdx.h>
-#include <asm/intel-family.h>
+#include <asm/cpu_device_id.h>
#include <asm/processor.h>
#include <asm/mce.h>
#include "tdx.h"
@@ -1426,9 +1426,9 @@ static void __init check_tdx_erratum(void)
* private memory poisons that memory, and a subsequent read of
* that memory triggers #MC.
*/
- switch (boot_cpu_data.x86_model) {
- case INTEL_FAM6_SAPPHIRERAPIDS_X:
- case INTEL_FAM6_EMERALDRAPIDS_X:
+ switch (boot_cpu_data.x86_vfm) {
+ case INTEL_SAPPHIRERAPIDS_X:
+ case INTEL_EMERALDRAPIDS_X:
setup_force_cpu_bug(X86_BUG_TDX_PW_MCE);
}
}
diff --git a/arch/x86/xen/apic.c b/arch/x86/xen/apic.c
index 8b045dd25196..bb0f3f368446 100644
--- a/arch/x86/xen/apic.c
+++ b/arch/x86/xen/apic.c
@@ -10,8 +10,6 @@
#include <xen/xen.h>
#include <xen/interface/physdev.h>
#include "xen-ops.h"
-#include "pmu.h"
-#include "smp.h"
static unsigned int xen_io_apic_read(unsigned apic, unsigned reg)
{
diff --git a/arch/x86/xen/debugfs.c b/arch/x86/xen/debugfs.c
index 532410998684..b8c9f2a7d9b6 100644
--- a/arch/x86/xen/debugfs.c
+++ b/arch/x86/xen/debugfs.c
@@ -3,7 +3,7 @@
#include <linux/debugfs.h>
#include <linux/slab.h>
-#include "debugfs.h"
+#include "xen-ops.h"
static struct dentry *d_xen_debug;
diff --git a/arch/x86/xen/debugfs.h b/arch/x86/xen/debugfs.h
deleted file mode 100644
index 6b813ad1091c..000000000000
--- a/arch/x86/xen/debugfs.h
+++ /dev/null
@@ -1,7 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _XEN_DEBUGFS_H
-#define _XEN_DEBUGFS_H
-
-struct dentry * __init xen_init_debugfs(void);
-
-#endif /* _XEN_DEBUGFS_H */
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 0305485edcd3..84e5adbd0925 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -20,8 +20,6 @@
#include <asm/setup.h>
#include "xen-ops.h"
-#include "smp.h"
-#include "pmu.h"
EXPORT_SYMBOL_GPL(hypercall_page);
diff --git a/arch/x86/xen/enlighten_hvm.c b/arch/x86/xen/enlighten_hvm.c
index c001a2296582..24d2957a4726 100644
--- a/arch/x86/xen/enlighten_hvm.c
+++ b/arch/x86/xen/enlighten_hvm.c
@@ -28,8 +28,6 @@
#include <asm/xen/page.h>
#include "xen-ops.h"
-#include "mmu.h"
-#include "smp.h"
static unsigned long shared_info_pfn;
diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
index 9ba53814ed6a..2c12ae42dc8b 100644
--- a/arch/x86/xen/enlighten_pv.c
+++ b/arch/x86/xen/enlighten_pv.c
@@ -85,10 +85,6 @@
#endif
#include "xen-ops.h"
-#include "mmu.h"
-#include "smp.h"
-#include "multicalls.h"
-#include "pmu.h"
#include "../kernel/cpu/cpu.h" /* get_cpu_cap() */
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 60e9c37fd79f..c4c479373249 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -5,8 +5,7 @@
#include <asm/xen/hypercall.h>
#include <xen/interface/memory.h>
-#include "multicalls.h"
-#include "mmu.h"
+#include "xen-ops.h"
unsigned long arbitrary_virt_to_mfn(void *vaddr)
{
diff --git a/arch/x86/xen/mmu.h b/arch/x86/xen/mmu.h
deleted file mode 100644
index 6e4c6bd62203..000000000000
--- a/arch/x86/xen/mmu.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _XEN_MMU_H
-
-#include <linux/linkage.h>
-#include <asm/page.h>
-
-enum pt_level {
- PT_PGD,
- PT_P4D,
- PT_PUD,
- PT_PMD,
- PT_PTE
-};
-
-
-bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
-
-void set_pte_mfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags);
-
-pte_t xen_ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep);
-void xen_ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr,
- pte_t *ptep, pte_t pte);
-
-unsigned long xen_read_cr2_direct(void);
-
-extern void xen_init_mmu_ops(void);
-extern void xen_hvm_init_mmu_ops(void);
-#endif /* _XEN_MMU_H */
diff --git a/arch/x86/xen/mmu_hvm.c b/arch/x86/xen/mmu_hvm.c
index 509bdee3ab90..337955652202 100644
--- a/arch/x86/xen/mmu_hvm.c
+++ b/arch/x86/xen/mmu_hvm.c
@@ -5,7 +5,7 @@
#include <xen/interface/xen.h>
#include <xen/hvm.h>
-#include "mmu.h"
+#include "xen-ops.h"
#ifdef CONFIG_PROC_VMCORE
/*
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
index 54e0d311dcc9..f1ce39d6d32c 100644
--- a/arch/x86/xen/mmu_pv.c
+++ b/arch/x86/xen/mmu_pv.c
@@ -82,9 +82,7 @@
#include <xen/hvc-console.h>
#include <xen/swiotlb-xen.h>
-#include "multicalls.h"
-#include "mmu.h"
-#include "debugfs.h"
+#include "xen-ops.h"
/*
* Prototypes for functions called via PV_CALLEE_SAVE_REGS_THUNK() in order
@@ -128,7 +126,7 @@ static DEFINE_SPINLOCK(xen_reservation_lock);
* looking at another vcpu's cr3 value, it should use this variable.
*/
DEFINE_PER_CPU(unsigned long, xen_cr3); /* cr3 stored as physaddr */
-DEFINE_PER_CPU(unsigned long, xen_current_cr3); /* actual vcpu cr3 */
+static DEFINE_PER_CPU(unsigned long, xen_current_cr3); /* actual vcpu cr3 */
static phys_addr_t xen_pt_base, xen_pt_size __initdata;
@@ -305,16 +303,17 @@ static void xen_set_pte(pte_t *ptep, pte_t pteval)
__xen_set_pte(ptep, pteval);
}
-pte_t xen_ptep_modify_prot_start(struct vm_area_struct *vma,
- unsigned long addr, pte_t *ptep)
+static pte_t xen_ptep_modify_prot_start(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep)
{
/* Just return the pte as-is. We preserve the bits on commit */
trace_xen_mmu_ptep_modify_prot_start(vma->vm_mm, addr, ptep, *ptep);
return *ptep;
}
-void xen_ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr,
- pte_t *ptep, pte_t pte)
+static void xen_ptep_modify_prot_commit(struct vm_area_struct *vma,
+ unsigned long addr,
+ pte_t *ptep, pte_t pte)
{
struct mmu_update u;
diff --git a/arch/x86/xen/multicalls.c b/arch/x86/xen/multicalls.c
index 07054572297f..d4cefd8a9af4 100644
--- a/arch/x86/xen/multicalls.c
+++ b/arch/x86/xen/multicalls.c
@@ -23,26 +23,21 @@
#include <linux/percpu.h>
#include <linux/hardirq.h>
#include <linux/debugfs.h>
+#include <linux/jump_label.h>
+#include <linux/printk.h>
#include <asm/xen/hypercall.h>
-#include "multicalls.h"
-#include "debugfs.h"
+#include "xen-ops.h"
#define MC_BATCH 32
-#define MC_DEBUG 0
-
#define MC_ARGS (MC_BATCH * 16)
struct mc_buffer {
unsigned mcidx, argidx, cbidx;
struct multicall_entry entries[MC_BATCH];
-#if MC_DEBUG
- struct multicall_entry debug[MC_BATCH];
- void *caller[MC_BATCH];
-#endif
unsigned char args[MC_ARGS];
struct callback {
void (*fn)(void *);
@@ -50,13 +45,98 @@ struct mc_buffer {
} callbacks[MC_BATCH];
};
+struct mc_debug_data {
+ struct multicall_entry entries[MC_BATCH];
+ void *caller[MC_BATCH];
+ size_t argsz[MC_BATCH];
+ unsigned long *args[MC_BATCH];
+};
+
static DEFINE_PER_CPU(struct mc_buffer, mc_buffer);
+static struct mc_debug_data mc_debug_data_early __initdata;
+static struct mc_debug_data __percpu *mc_debug_data __refdata =
+ &mc_debug_data_early;
DEFINE_PER_CPU(unsigned long, xen_mc_irq_flags);
+static struct static_key mc_debug __ro_after_init;
+static bool mc_debug_enabled __initdata;
+
+static int __init xen_parse_mc_debug(char *arg)
+{
+ mc_debug_enabled = true;
+ static_key_slow_inc(&mc_debug);
+
+ return 0;
+}
+early_param("xen_mc_debug", xen_parse_mc_debug);
+
+static int __init mc_debug_enable(void)
+{
+ struct mc_debug_data __percpu *mcdb;
+ unsigned long flags;
+
+ if (!mc_debug_enabled)
+ return 0;
+
+ mcdb = alloc_percpu(struct mc_debug_data);
+ if (!mcdb) {
+ pr_err("xen_mc_debug inactive\n");
+ static_key_slow_dec(&mc_debug);
+ return -ENOMEM;
+ }
+
+ /* Be careful when switching to percpu debug data. */
+ local_irq_save(flags);
+ xen_mc_flush();
+ mc_debug_data = mcdb;
+ local_irq_restore(flags);
+
+ pr_info("xen_mc_debug active\n");
+
+ return 0;
+}
+early_initcall(mc_debug_enable);
+
+/* Number of parameters of hypercalls used via multicalls. */
+static const uint8_t hpcpars[] = {
+ [__HYPERVISOR_mmu_update] = 4,
+ [__HYPERVISOR_stack_switch] = 2,
+ [__HYPERVISOR_fpu_taskswitch] = 1,
+ [__HYPERVISOR_update_descriptor] = 2,
+ [__HYPERVISOR_update_va_mapping] = 3,
+ [__HYPERVISOR_mmuext_op] = 4,
+};
+
+static void print_debug_data(struct mc_buffer *b, struct mc_debug_data *mcdb,
+ int idx)
+{
+ unsigned int arg;
+ unsigned int opidx = mcdb->entries[idx].op & 0xff;
+ unsigned int pars = 0;
+
+ pr_err(" call %2d: op=%lu result=%ld caller=%pS ", idx + 1,
+ mcdb->entries[idx].op, b->entries[idx].result,
+ mcdb->caller[idx]);
+ if (opidx < ARRAY_SIZE(hpcpars))
+ pars = hpcpars[opidx];
+ if (pars) {
+ pr_cont("pars=");
+ for (arg = 0; arg < pars; arg++)
+ pr_cont("%lx ", mcdb->entries[idx].args[arg]);
+ }
+ if (mcdb->argsz[idx]) {
+ pr_cont("args=");
+ for (arg = 0; arg < mcdb->argsz[idx] / 8; arg++)
+ pr_cont("%lx ", mcdb->args[idx][arg]);
+ }
+ pr_cont("\n");
+}
+
void xen_mc_flush(void)
{
struct mc_buffer *b = this_cpu_ptr(&mc_buffer);
struct multicall_entry *mc;
+ struct mc_debug_data *mcdb = NULL;
int ret = 0;
unsigned long flags;
int i;
@@ -69,10 +149,11 @@ void xen_mc_flush(void)
trace_xen_mc_flush(b->mcidx, b->argidx, b->cbidx);
-#if MC_DEBUG
- memcpy(b->debug, b->entries,
- b->mcidx * sizeof(struct multicall_entry));
-#endif
+ if (static_key_false(&mc_debug)) {
+ mcdb = this_cpu_ptr(mc_debug_data);
+ memcpy(mcdb->entries, b->entries,
+ b->mcidx * sizeof(struct multicall_entry));
+ }
switch (b->mcidx) {
case 0:
@@ -103,21 +184,14 @@ void xen_mc_flush(void)
pr_err("%d of %d multicall(s) failed: cpu %d\n",
ret, b->mcidx, smp_processor_id());
for (i = 0; i < b->mcidx; i++) {
- if (b->entries[i].result < 0) {
-#if MC_DEBUG
- pr_err(" call %2d: op=%lu arg=[%lx] result=%ld\t%pS\n",
- i + 1,
- b->debug[i].op,
- b->debug[i].args[0],
- b->entries[i].result,
- b->caller[i]);
-#else
+ if (static_key_false(&mc_debug)) {
+ print_debug_data(b, mcdb, i);
+ } else if (b->entries[i].result < 0) {
pr_err(" call %2d: op=%lu arg=[%lx] result=%ld\n",
i + 1,
b->entries[i].op,
b->entries[i].args[0],
b->entries[i].result);
-#endif
}
}
}
@@ -155,9 +229,13 @@ struct multicall_space __xen_mc_entry(size_t args)
}
ret.mc = &b->entries[b->mcidx];
-#if MC_DEBUG
- b->caller[b->mcidx] = __builtin_return_address(0);
-#endif
+ if (static_key_false(&mc_debug)) {
+ struct mc_debug_data *mcdb = this_cpu_ptr(mc_debug_data);
+
+ mcdb->caller[b->mcidx] = __builtin_return_address(0);
+ mcdb->argsz[b->mcidx] = args;
+ mcdb->args[b->mcidx] = (unsigned long *)(&b->args[argidx]);
+ }
b->mcidx++;
ret.args = &b->args[argidx];
b->argidx = argidx + args;
diff --git a/arch/x86/xen/multicalls.h b/arch/x86/xen/multicalls.h
deleted file mode 100644
index c3867b585e0d..000000000000
--- a/arch/x86/xen/multicalls.h
+++ /dev/null
@@ -1,69 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _XEN_MULTICALLS_H
-#define _XEN_MULTICALLS_H
-
-#include <trace/events/xen.h>
-
-#include "xen-ops.h"
-
-/* Multicalls */
-struct multicall_space
-{
- struct multicall_entry *mc;
- void *args;
-};
-
-/* Allocate room for a multicall and its args */
-struct multicall_space __xen_mc_entry(size_t args);
-
-DECLARE_PER_CPU(unsigned long, xen_mc_irq_flags);
-
-/* Call to start a batch of multiple __xen_mc_entry()s. Must be
- paired with xen_mc_issue() */
-static inline void xen_mc_batch(void)
-{
- unsigned long flags;
-
- /* need to disable interrupts until this entry is complete */
- local_irq_save(flags);
- trace_xen_mc_batch(xen_get_lazy_mode());
- __this_cpu_write(xen_mc_irq_flags, flags);
-}
-
-static inline struct multicall_space xen_mc_entry(size_t args)
-{
- xen_mc_batch();
- return __xen_mc_entry(args);
-}
-
-/* Flush all pending multicalls */
-void xen_mc_flush(void);
-
-/* Issue a multicall if we're not in a lazy mode */
-static inline void xen_mc_issue(unsigned mode)
-{
- trace_xen_mc_issue(mode);
-
- if ((xen_get_lazy_mode() & mode) == 0)
- xen_mc_flush();
-
- /* restore flags saved in xen_mc_batch */
- local_irq_restore(this_cpu_read(xen_mc_irq_flags));
-}
-
-/* Set up a callback to be called when the current batch is flushed */
-void xen_mc_callback(void (*fn)(void *), void *data);
-
-/*
- * Try to extend the arguments of the previous multicall command. The
- * previous command's op must match. If it does, then it attempts to
- * extend the argument space allocated to the multicall entry by
- * arg_size bytes.
- *
- * The returned multicall_space will return with mc pointing to the
- * command on success, or NULL on failure, and args pointing to the
- * newly allocated space.
- */
-struct multicall_space xen_mc_extend_args(unsigned long op, size_t arg_size);
-
-#endif /* _XEN_MULTICALLS_H */
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index 99918beccd80..7c735b730acd 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -81,7 +81,6 @@
#include <xen/balloon.h>
#include <xen/grant_table.h>
-#include "multicalls.h"
#include "xen-ops.h"
#define P2M_MID_PER_PAGE (PAGE_SIZE / sizeof(unsigned long *))
@@ -730,7 +729,7 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
* immediate unmapping.
*/
map_ops[i].status = GNTST_general_error;
- unmap[0].host_addr = map_ops[i].host_addr,
+ unmap[0].host_addr = map_ops[i].host_addr;
unmap[0].handle = map_ops[i].handle;
map_ops[i].handle = INVALID_GRANT_HANDLE;
if (map_ops[i].flags & GNTMAP_device_map)
@@ -740,7 +739,7 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
if (kmap_ops) {
kmap_ops[i].status = GNTST_general_error;
- unmap[1].host_addr = kmap_ops[i].host_addr,
+ unmap[1].host_addr = kmap_ops[i].host_addr;
unmap[1].handle = kmap_ops[i].handle;
kmap_ops[i].handle = INVALID_GRANT_HANDLE;
if (kmap_ops[i].flags & GNTMAP_device_map)
@@ -795,7 +794,6 @@ int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
#ifdef CONFIG_XEN_DEBUG_FS
#include <linux/debugfs.h>
-#include "debugfs.h"
static int p2m_dump_show(struct seq_file *m, void *v)
{
static const char * const type_name[] = {
diff --git a/arch/x86/xen/pmu.c b/arch/x86/xen/pmu.c
index 246d67dab510..f06987b0efc3 100644
--- a/arch/x86/xen/pmu.c
+++ b/arch/x86/xen/pmu.c
@@ -10,7 +10,6 @@
#include <xen/interface/xenpmu.h>
#include "xen-ops.h"
-#include "pmu.h"
/* x86_pmu.handle_irq definition */
#include "../events/perf_event.h"
diff --git a/arch/x86/xen/pmu.h b/arch/x86/xen/pmu.h
deleted file mode 100644
index 65c58894fc79..000000000000
--- a/arch/x86/xen/pmu.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __XEN_PMU_H
-#define __XEN_PMU_H
-
-#include <xen/interface/xenpmu.h>
-
-extern bool is_xen_pmu;
-
-irqreturn_t xen_pmu_irq_handler(int irq, void *dev_id);
-#ifdef CONFIG_XEN_HAVE_VPMU
-void xen_pmu_init(int cpu);
-void xen_pmu_finish(int cpu);
-#else
-static inline void xen_pmu_init(int cpu) {}
-static inline void xen_pmu_finish(int cpu) {}
-#endif
-bool pmu_msr_read(unsigned int msr, uint64_t *val, int *err);
-bool pmu_msr_write(unsigned int msr, uint32_t low, uint32_t high, int *err);
-int pmu_apic_update(uint32_t reg);
-unsigned long long xen_read_pmc(int counter);
-
-#endif /* __XEN_PMU_H */
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index 380591028cb8..a0c3e77e3d5b 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -34,7 +34,6 @@
#include <xen/features.h>
#include <xen/hvc-console.h>
#include "xen-ops.h"
-#include "mmu.h"
#define GB(x) ((uint64_t)(x) * 1024 * 1024 * 1024)
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 935771726f9c..05f92c812ac8 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -9,7 +9,6 @@
#include <xen/hvc-console.h>
#include "xen-ops.h"
-#include "smp.h"
static DEFINE_PER_CPU(struct xen_common_irq, xen_resched_irq) = { .irq = -1 };
static DEFINE_PER_CPU(struct xen_common_irq, xen_callfunc_irq) = { .irq = -1 };
diff --git a/arch/x86/xen/smp.h b/arch/x86/xen/smp.h
deleted file mode 100644
index b8efdbc693f7..000000000000
--- a/arch/x86/xen/smp.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _XEN_SMP_H
-
-#ifdef CONFIG_SMP
-
-void asm_cpu_bringup_and_idle(void);
-asmlinkage void cpu_bringup_and_idle(void);
-
-extern void xen_send_IPI_mask(const struct cpumask *mask,
- int vector);
-extern void xen_send_IPI_mask_allbutself(const struct cpumask *mask,
- int vector);
-extern void xen_send_IPI_allbutself(int vector);
-extern void xen_send_IPI_all(int vector);
-extern void xen_send_IPI_self(int vector);
-
-extern int xen_smp_intr_init(unsigned int cpu);
-extern void xen_smp_intr_free(unsigned int cpu);
-int xen_smp_intr_init_pv(unsigned int cpu);
-void xen_smp_intr_free_pv(unsigned int cpu);
-
-void xen_smp_count_cpus(void);
-void xen_smp_cpus_done(unsigned int max_cpus);
-
-void xen_smp_send_reschedule(int cpu);
-void xen_smp_send_call_function_ipi(const struct cpumask *mask);
-void xen_smp_send_call_function_single_ipi(int cpu);
-
-void __noreturn xen_cpu_bringup_again(unsigned long stack);
-
-struct xen_common_irq {
- int irq;
- char *name;
-};
-#else /* CONFIG_SMP */
-
-static inline int xen_smp_intr_init(unsigned int cpu)
-{
- return 0;
-}
-static inline void xen_smp_intr_free(unsigned int cpu) {}
-
-static inline int xen_smp_intr_init_pv(unsigned int cpu)
-{
- return 0;
-}
-static inline void xen_smp_intr_free_pv(unsigned int cpu) {}
-static inline void xen_smp_count_cpus(void) { }
-#endif /* CONFIG_SMP */
-
-#endif
diff --git a/arch/x86/xen/smp_hvm.c b/arch/x86/xen/smp_hvm.c
index ac95d1981cc0..485c1d8804f7 100644
--- a/arch/x86/xen/smp_hvm.c
+++ b/arch/x86/xen/smp_hvm.c
@@ -5,8 +5,6 @@
#include <xen/events.h>
#include "xen-ops.h"
-#include "smp.h"
-
static void __init xen_hvm_smp_prepare_boot_cpu(void)
{
diff --git a/arch/x86/xen/smp_pv.c b/arch/x86/xen/smp_pv.c
index ac41d83b38d3..7ea57f728b89 100644
--- a/arch/x86/xen/smp_pv.c
+++ b/arch/x86/xen/smp_pv.c
@@ -46,9 +46,6 @@
#include <xen/hvc-console.h>
#include "xen-ops.h"
-#include "mmu.h"
-#include "smp.h"
-#include "pmu.h"
cpumask_var_t xen_cpu_initialized_map;
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index 5c6fc16e4b92..8e4efe0fb6f9 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -18,7 +18,6 @@
static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
static DEFINE_PER_CPU(char *, irq_name);
static DEFINE_PER_CPU(atomic_t, xen_qlock_wait_nest);
-static bool xen_pvspin = true;
static void xen_qlock_kick(int cpu)
{
@@ -68,7 +67,7 @@ void xen_init_lock_cpu(int cpu)
int irq;
char *name;
- if (!xen_pvspin)
+ if (nopvspin)
return;
WARN(per_cpu(lock_kicker_irq, cpu) >= 0, "spinlock on CPU%d exists on IRQ%d!\n",
@@ -95,7 +94,7 @@ void xen_uninit_lock_cpu(int cpu)
{
int irq;
- if (!xen_pvspin)
+ if (nopvspin)
return;
kfree(per_cpu(irq_name, cpu));
@@ -125,10 +124,10 @@ PV_CALLEE_SAVE_REGS_THUNK(xen_vcpu_stolen);
void __init xen_init_spinlocks(void)
{
/* Don't need to use pvqspinlock code if there is only 1 vCPU. */
- if (num_possible_cpus() == 1 || nopvspin)
- xen_pvspin = false;
+ if (num_possible_cpus() == 1)
+ nopvspin = true;
- if (!xen_pvspin) {
+ if (nopvspin) {
printk(KERN_DEBUG "xen: PV spinlocks disabled\n");
static_branch_disable(&virt_spin_lock_key);
return;
@@ -143,12 +142,3 @@ void __init xen_init_spinlocks(void)
pv_ops.lock.kick = xen_qlock_kick;
pv_ops.lock.vcpu_is_preempted = PV_CALLEE_SAVE(xen_vcpu_stolen);
}
-
-static __init int xen_parse_nopvspin(char *arg)
-{
- pr_notice("\"xen_nopvspin\" is deprecated, please use \"nopvspin\" instead\n");
- xen_pvspin = false;
- return 0;
-}
-early_param("xen_nopvspin", xen_parse_nopvspin);
-
diff --git a/arch/x86/xen/suspend.c b/arch/x86/xen/suspend.c
index 1d83152c761b..77a6ea1c60e4 100644
--- a/arch/x86/xen/suspend.c
+++ b/arch/x86/xen/suspend.c
@@ -15,8 +15,6 @@
#include <asm/fixmap.h>
#include "xen-ops.h"
-#include "mmu.h"
-#include "pmu.h"
static DEFINE_PER_CPU(u64, spec_ctrl);
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index 52fa5609b7f6..96521b1874ac 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -30,7 +30,7 @@
#include "xen-ops.h"
/* Minimum amount of time until next clock event fires */
-#define TIMER_SLOP 100000
+#define TIMER_SLOP 1
static u64 xen_sched_clock_offset __read_mostly;
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index 79cf93f2c92f..e7775dff9452 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -5,8 +5,15 @@
#include <linux/init.h>
#include <linux/clocksource.h>
#include <linux/irqreturn.h>
+#include <linux/linkage.h>
+
+#include <xen/interface/xenpmu.h>
#include <xen/xen-ops.h>
+#include <asm/page.h>
+
+#include <trace/events/xen.h>
+
/* These are code, but not functions. Defined in entry.S */
extern const char xen_failsafe_callback[];
@@ -23,14 +30,11 @@ void xen_copy_trap_info(struct trap_info *traps);
DECLARE_PER_CPU_ALIGNED(struct vcpu_info, xen_vcpu_info);
DECLARE_PER_CPU(unsigned long, xen_cr3);
-DECLARE_PER_CPU(unsigned long, xen_current_cr3);
extern struct start_info *xen_start_info;
extern struct shared_info xen_dummy_shared_info;
extern struct shared_info *HYPERVISOR_shared_info;
-extern bool xen_fifo_events;
-
void xen_setup_mfn_list_list(void);
void xen_build_mfn_list_list(void);
void xen_setup_machphys_mapping(void);
@@ -177,4 +181,142 @@ static inline void xen_hvm_post_suspend(int suspend_cancelled) {}
void xen_add_extra_mem(unsigned long start_pfn, unsigned long n_pfns);
+struct dentry * __init xen_init_debugfs(void);
+
+enum pt_level {
+ PT_PGD,
+ PT_P4D,
+ PT_PUD,
+ PT_PMD,
+ PT_PTE
+};
+
+bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
+void set_pte_mfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags);
+unsigned long xen_read_cr2_direct(void);
+void xen_init_mmu_ops(void);
+void xen_hvm_init_mmu_ops(void);
+
+/* Multicalls */
+struct multicall_space
+{
+ struct multicall_entry *mc;
+ void *args;
+};
+
+/* Allocate room for a multicall and its args */
+struct multicall_space __xen_mc_entry(size_t args);
+
+DECLARE_PER_CPU(unsigned long, xen_mc_irq_flags);
+
+/* Call to start a batch of multiple __xen_mc_entry()s. Must be
+ paired with xen_mc_issue() */
+static inline void xen_mc_batch(void)
+{
+ unsigned long flags;
+
+ /* need to disable interrupts until this entry is complete */
+ local_irq_save(flags);
+ trace_xen_mc_batch(xen_get_lazy_mode());
+ __this_cpu_write(xen_mc_irq_flags, flags);
+}
+
+static inline struct multicall_space xen_mc_entry(size_t args)
+{
+ xen_mc_batch();
+ return __xen_mc_entry(args);
+}
+
+/* Flush all pending multicalls */
+void xen_mc_flush(void);
+
+/* Issue a multicall if we're not in a lazy mode */
+static inline void xen_mc_issue(unsigned mode)
+{
+ trace_xen_mc_issue(mode);
+
+ if ((xen_get_lazy_mode() & mode) == 0)
+ xen_mc_flush();
+
+ /* restore flags saved in xen_mc_batch */
+ local_irq_restore(this_cpu_read(xen_mc_irq_flags));
+}
+
+/* Set up a callback to be called when the current batch is flushed */
+void xen_mc_callback(void (*fn)(void *), void *data);
+
+/*
+ * Try to extend the arguments of the previous multicall command. The
+ * previous command's op must match. If it does, then it attempts to
+ * extend the argument space allocated to the multicall entry by
+ * arg_size bytes.
+ *
+ * The returned multicall_space will return with mc pointing to the
+ * command on success, or NULL on failure, and args pointing to the
+ * newly allocated space.
+ */
+struct multicall_space xen_mc_extend_args(unsigned long op, size_t arg_size);
+
+extern bool is_xen_pmu;
+
+irqreturn_t xen_pmu_irq_handler(int irq, void *dev_id);
+#ifdef CONFIG_XEN_HAVE_VPMU
+void xen_pmu_init(int cpu);
+void xen_pmu_finish(int cpu);
+#else
+static inline void xen_pmu_init(int cpu) {}
+static inline void xen_pmu_finish(int cpu) {}
+#endif
+bool pmu_msr_read(unsigned int msr, uint64_t *val, int *err);
+bool pmu_msr_write(unsigned int msr, uint32_t low, uint32_t high, int *err);
+int pmu_apic_update(uint32_t reg);
+unsigned long long xen_read_pmc(int counter);
+
+#ifdef CONFIG_SMP
+
+void asm_cpu_bringup_and_idle(void);
+asmlinkage void cpu_bringup_and_idle(void);
+
+extern void xen_send_IPI_mask(const struct cpumask *mask,
+ int vector);
+extern void xen_send_IPI_mask_allbutself(const struct cpumask *mask,
+ int vector);
+extern void xen_send_IPI_allbutself(int vector);
+extern void xen_send_IPI_all(int vector);
+extern void xen_send_IPI_self(int vector);
+
+extern int xen_smp_intr_init(unsigned int cpu);
+extern void xen_smp_intr_free(unsigned int cpu);
+int xen_smp_intr_init_pv(unsigned int cpu);
+void xen_smp_intr_free_pv(unsigned int cpu);
+
+void xen_smp_count_cpus(void);
+void xen_smp_cpus_done(unsigned int max_cpus);
+
+void xen_smp_send_reschedule(int cpu);
+void xen_smp_send_call_function_ipi(const struct cpumask *mask);
+void xen_smp_send_call_function_single_ipi(int cpu);
+
+void __noreturn xen_cpu_bringup_again(unsigned long stack);
+
+struct xen_common_irq {
+ int irq;
+ char *name;
+};
+#else /* CONFIG_SMP */
+
+static inline int xen_smp_intr_init(unsigned int cpu)
+{
+ return 0;
+}
+static inline void xen_smp_intr_free(unsigned int cpu) {}
+
+static inline int xen_smp_intr_init_pv(unsigned int cpu)
+{
+ return 0;
+}
+static inline void xen_smp_intr_free_pv(unsigned int cpu) {}
+static inline void xen_smp_count_cpus(void) { }
+#endif /* CONFIG_SMP */
+
#endif /* XEN_OPS_H */
diff --git a/arch/xtensa/include/asm/current.h b/arch/xtensa/include/asm/current.h
index 08010dbf5e09..df275d554788 100644
--- a/arch/xtensa/include/asm/current.h
+++ b/arch/xtensa/include/asm/current.h
@@ -19,7 +19,7 @@
struct task_struct;
-static inline struct task_struct *get_current(void)
+static __always_inline struct task_struct *get_current(void)
{
return current_thread_info()->task;
}
diff --git a/arch/xtensa/include/asm/thread_info.h b/arch/xtensa/include/asm/thread_info.h
index 326db1c1d5d8..e0dffcc43b9e 100644
--- a/arch/xtensa/include/asm/thread_info.h
+++ b/arch/xtensa/include/asm/thread_info.h
@@ -91,7 +91,7 @@ struct thread_info {
}
/* how to get the thread information struct from C */
-static inline struct thread_info *current_thread_info(void)
+static __always_inline struct thread_info *current_thread_info(void)
{
struct thread_info *ti;
__asm__("extui %0, a1, 0, "__stringify(CURRENT_SHIFT)"\n\t"
diff --git a/arch/xtensa/include/asm/unistd.h b/arch/xtensa/include/asm/unistd.h
index b52236245e51..30af4dc3ce7b 100644
--- a/arch/xtensa/include/asm/unistd.h
+++ b/arch/xtensa/include/asm/unistd.h
@@ -3,7 +3,6 @@
#define _XTENSA_UNISTD_H
#define __ARCH_WANT_SYS_CLONE
-#define __ARCH_WANT_SYS_CLONE3
#include <uapi/asm/unistd.h>
#define __ARCH_WANT_NEW_STAT
diff --git a/arch/xtensa/platforms/iss/simdisk.c b/arch/xtensa/platforms/iss/simdisk.c
index defc67909a9c..d6d2b533a574 100644
--- a/arch/xtensa/platforms/iss/simdisk.c
+++ b/arch/xtensa/platforms/iss/simdisk.c
@@ -263,6 +263,9 @@ static const struct proc_ops simdisk_proc_ops = {
static int __init simdisk_setup(struct simdisk *dev, int which,
struct proc_dir_entry *procdir)
{
+ struct queue_limits lim = {
+ .features = BLK_FEAT_ROTATIONAL,
+ };
char tmp[2] = { '0' + which, 0 };
int err;
@@ -271,7 +274,7 @@ static int __init simdisk_setup(struct simdisk *dev, int which,
spin_lock_init(&dev->lock);
dev->users = 0;
- dev->gd = blk_alloc_disk(NULL, NUMA_NO_NODE);
+ dev->gd = blk_alloc_disk(&lim, NUMA_NO_NODE);
if (IS_ERR(dev->gd)) {
err = PTR_ERR(dev->gd);
goto out;
diff --git a/block/Kconfig b/block/Kconfig
index dc12af58dbae..5b623b876d3b 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -62,6 +62,8 @@ config BLK_DEV_BSGLIB
config BLK_DEV_INTEGRITY
bool "Block layer data integrity support"
+ select CRC_T10DIF
+ select CRC64_ROCKSOFT
help
Some storage devices allow extra information to be
stored/retrieved to help protect the data. The block layer
@@ -72,12 +74,6 @@ config BLK_DEV_INTEGRITY
T10/SCSI Data Integrity Field or the T13/ATA External Path
Protection. If in doubt, say N.
-config BLK_DEV_INTEGRITY_T10
- tristate
- depends on BLK_DEV_INTEGRITY
- select CRC_T10DIF
- select CRC64_ROCKSOFT
-
config BLK_DEV_WRITE_MOUNTED
bool "Allow writing to mounted block devices"
default y
diff --git a/block/Makefile b/block/Makefile
index 168150b9c510..ddfd21c1a9ff 100644
--- a/block/Makefile
+++ b/block/Makefile
@@ -26,8 +26,7 @@ obj-$(CONFIG_MQ_IOSCHED_KYBER) += kyber-iosched.o
bfq-y := bfq-iosched.o bfq-wf2q.o bfq-cgroup.o
obj-$(CONFIG_IOSCHED_BFQ) += bfq.o
-obj-$(CONFIG_BLK_DEV_INTEGRITY) += bio-integrity.o blk-integrity.o
-obj-$(CONFIG_BLK_DEV_INTEGRITY_T10) += t10-pi.o
+obj-$(CONFIG_BLK_DEV_INTEGRITY) += bio-integrity.o blk-integrity.o t10-pi.o
obj-$(CONFIG_BLK_MQ_PCI) += blk-mq-pci.o
obj-$(CONFIG_BLK_MQ_VIRTIO) += blk-mq-virtio.o
obj-$(CONFIG_BLK_DEV_ZONED) += blk-zoned.o
diff --git a/block/bdev.c b/block/bdev.c
index 353677ac49b3..c5507b6f63b8 100644
--- a/block/bdev.c
+++ b/block/bdev.c
@@ -385,7 +385,7 @@ static struct file_system_type bd_type = {
};
struct super_block *blockdev_superblock __ro_after_init;
-struct vfsmount *blockdev_mnt __ro_after_init;
+static struct vfsmount *blockdev_mnt __ro_after_init;
EXPORT_SYMBOL_GPL(blockdev_superblock);
void __init bdev_cache_init(void)
@@ -1260,23 +1260,42 @@ void sync_bdevs(bool wait)
}
/*
- * Handle STATX_DIOALIGN for block devices.
- *
- * Note that the inode passed to this is the inode of a block device node file,
- * not the block device's internal inode. Therefore it is *not* valid to use
- * I_BDEV() here; the block device has to be looked up by i_rdev instead.
+ * Handle STATX_{DIOALIGN, WRITE_ATOMIC} for block devices.
*/
-void bdev_statx_dioalign(struct inode *inode, struct kstat *stat)
+void bdev_statx(struct path *path, struct kstat *stat,
+ u32 request_mask)
{
+ struct inode *backing_inode;
struct block_device *bdev;
- bdev = blkdev_get_no_open(inode->i_rdev);
+ if (!(request_mask & (STATX_DIOALIGN | STATX_WRITE_ATOMIC)))
+ return;
+
+ backing_inode = d_backing_inode(path->dentry);
+
+ /*
+ * Note that backing_inode is the inode of a block device node file,
+ * not the block device's internal inode. Therefore it is *not* valid
+ * to use I_BDEV() here; the block device has to be looked up by i_rdev
+ * instead.
+ */
+ bdev = blkdev_get_no_open(backing_inode->i_rdev);
if (!bdev)
return;
- stat->dio_mem_align = bdev_dma_alignment(bdev) + 1;
- stat->dio_offset_align = bdev_logical_block_size(bdev);
- stat->result_mask |= STATX_DIOALIGN;
+ if (request_mask & STATX_DIOALIGN) {
+ stat->dio_mem_align = bdev_dma_alignment(bdev) + 1;
+ stat->dio_offset_align = bdev_logical_block_size(bdev);
+ stat->result_mask |= STATX_DIOALIGN;
+ }
+
+ if (request_mask & STATX_WRITE_ATOMIC && bdev_can_atomic_write(bdev)) {
+ struct request_queue *bd_queue = bdev->bd_queue;
+
+ generic_fill_statx_atomic_writes(stat,
+ queue_atomic_write_unit_min_bytes(bd_queue),
+ queue_atomic_write_unit_max_bytes(bd_queue));
+ }
blkdev_put_no_open(bdev);
}
diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c
index d442ee358fc2..b758693697c0 100644
--- a/block/bfq-cgroup.c
+++ b/block/bfq-cgroup.c
@@ -797,57 +797,6 @@ void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio)
*/
bfq_link_bfqg(bfqd, bfqg);
__bfq_bic_change_cgroup(bfqd, bic, bfqg);
- /*
- * Update blkg_path for bfq_log_* functions. We cache this
- * path, and update it here, for the following
- * reasons. Operations on blkg objects in blk-cgroup are
- * protected with the request_queue lock, and not with the
- * lock that protects the instances of this scheduler
- * (bfqd->lock). This exposes BFQ to the following sort of
- * race.
- *
- * The blkg_lookup performed in bfq_get_queue, protected
- * through rcu, may happen to return the address of a copy of
- * the original blkg. If this is the case, then the
- * bfqg_and_blkg_get performed in bfq_get_queue, to pin down
- * the blkg, is useless: it does not prevent blk-cgroup code
- * from destroying both the original blkg and all objects
- * directly or indirectly referred by the copy of the
- * blkg.
- *
- * On the bright side, destroy operations on a blkg invoke, as
- * a first step, hooks of the scheduler associated with the
- * blkg. And these hooks are executed with bfqd->lock held for
- * BFQ. As a consequence, for any blkg associated with the
- * request queue this instance of the scheduler is attached
- * to, we are guaranteed that such a blkg is not destroyed, and
- * that all the pointers it contains are consistent, while we
- * are holding bfqd->lock. A blkg_lookup performed with
- * bfqd->lock held then returns a fully consistent blkg, which
- * remains consistent until this lock is held.
- *
- * Thanks to the last fact, and to the fact that: (1) bfqg has
- * been obtained through a blkg_lookup in the above
- * assignment, and (2) bfqd->lock is being held, here we can
- * safely use the policy data for the involved blkg (i.e., the
- * field bfqg->pd) to get to the blkg associated with bfqg,
- * and then we can safely use any field of blkg. After we
- * release bfqd->lock, even just getting blkg through this
- * bfqg may cause dangling references to be traversed, as
- * bfqg->pd may not exist any more.
- *
- * In view of the above facts, here we cache, in the bfqg, any
- * blkg data we may need for this bic, and for its associated
- * bfq_queue. As of now, we need to cache only the path of the
- * blkg, which is used in the bfq_log_* functions.
- *
- * Finally, note that bfqg itself needs to be protected from
- * destruction on the blkg_free of the original blkg (which
- * invokes bfq_pd_free). We use an additional private
- * refcounter for bfqg, to let it disappear only after no
- * bfq_queue refers to it any longer.
- */
- blkg_path(bfqg_to_blkg(bfqg), bfqg->blkg_path, sizeof(bfqg->blkg_path));
bic->blkcg_serial_nr = serial_nr;
}
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index 4b88a54a9b76..36a4998c4b37 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -5463,40 +5463,42 @@ static void bfq_exit_icq_bfqq(struct bfq_io_cq *bic, bool is_sync,
}
}
+static void _bfq_exit_icq(struct bfq_io_cq *bic, unsigned int num_actuators)
+{
+ struct bfq_iocq_bfqq_data *bfqq_data = bic->bfqq_data;
+ unsigned int act_idx;
+
+ for (act_idx = 0; act_idx < num_actuators; act_idx++) {
+ if (bfqq_data[act_idx].stable_merge_bfqq)
+ bfq_put_stable_ref(bfqq_data[act_idx].stable_merge_bfqq);
+
+ bfq_exit_icq_bfqq(bic, true, act_idx);
+ bfq_exit_icq_bfqq(bic, false, act_idx);
+ }
+}
+
static void bfq_exit_icq(struct io_cq *icq)
{
struct bfq_io_cq *bic = icq_to_bic(icq);
struct bfq_data *bfqd = bic_to_bfqd(bic);
unsigned long flags;
- unsigned int act_idx;
+
/*
* If bfqd and thus bfqd->num_actuators is not available any
* longer, then cycle over all possible per-actuator bfqqs in
* next loop. We rely on bic being zeroed on creation, and
* therefore on its unused per-actuator fields being NULL.
- */
- unsigned int num_actuators = BFQ_MAX_ACTUATORS;
- struct bfq_iocq_bfqq_data *bfqq_data = bic->bfqq_data;
-
- /*
+ *
* bfqd is NULL if scheduler already exited, and in that case
* this is the last time these queues are accessed.
*/
if (bfqd) {
spin_lock_irqsave(&bfqd->lock, flags);
- num_actuators = bfqd->num_actuators;
- }
-
- for (act_idx = 0; act_idx < num_actuators; act_idx++) {
- if (bfqq_data[act_idx].stable_merge_bfqq)
- bfq_put_stable_ref(bfqq_data[act_idx].stable_merge_bfqq);
-
- bfq_exit_icq_bfqq(bic, true, act_idx);
- bfq_exit_icq_bfqq(bic, false, act_idx);
- }
-
- if (bfqd)
+ _bfq_exit_icq(bic, bfqd->num_actuators);
spin_unlock_irqrestore(&bfqd->lock, flags);
+ } else {
+ _bfq_exit_icq(bic, BFQ_MAX_ACTUATORS);
+ }
}
/*
diff --git a/block/bfq-iosched.h b/block/bfq-iosched.h
index 467e8cfc41a2..08ddf2cfae5b 100644
--- a/block/bfq-iosched.h
+++ b/block/bfq-iosched.h
@@ -1003,9 +1003,6 @@ struct bfq_group {
/* must be the first member */
struct blkg_policy_data pd;
- /* cached path for this blkg (see comments in bfq_bic_update_cgroup) */
- char blkg_path[128];
-
/* reference counter (see comments in bfq_bic_update_cgroup) */
refcount_t ref;
diff --git a/block/bio-integrity.c b/block/bio-integrity.c
index 2e3e8e04961e..b78c145eb026 100644
--- a/block/bio-integrity.c
+++ b/block/bio-integrity.c
@@ -76,7 +76,7 @@ struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio,
&bip->bip_max_vcnt, gfp_mask);
if (!bip->bip_vec)
goto err;
- } else {
+ } else if (nr_vecs) {
bip->bip_vec = bip->bip_inline_vecs;
}
@@ -144,10 +144,10 @@ void bio_integrity_free(struct bio *bio)
struct bio_integrity_payload *bip = bio_integrity(bio);
struct bio_set *bs = bio->bi_pool;
+ if (bip->bip_flags & BIP_INTEGRITY_USER)
+ return;
if (bip->bip_flags & BIP_BLOCK_INTEGRITY)
kfree(bvec_virt(bip->bip_vec));
- else if (bip->bip_flags & BIP_INTEGRITY_USER)
- bio_integrity_unmap_user(bip);
__bio_integrity_free(bs, bip);
bio->bi_integrity = NULL;
@@ -155,6 +155,28 @@ void bio_integrity_free(struct bio *bio)
}
/**
+ * bio_integrity_unmap_free_user - Unmap and free bio user integrity payload
+ * @bio: bio containing bip to be unmapped and freed
+ *
+ * Description: Used to unmap and free the user mapped integrity portion of a
+ * bio. Submitter attaching the user integrity buffer is responsible for
+ * unmapping and freeing it during completion.
+ */
+void bio_integrity_unmap_free_user(struct bio *bio)
+{
+ struct bio_integrity_payload *bip = bio_integrity(bio);
+ struct bio_set *bs = bio->bi_pool;
+
+ if (WARN_ON_ONCE(!(bip->bip_flags & BIP_INTEGRITY_USER)))
+ return;
+ bio_integrity_unmap_user(bip);
+ __bio_integrity_free(bs, bip);
+ bio->bi_integrity = NULL;
+ bio->bi_opf &= ~REQ_INTEGRITY;
+}
+EXPORT_SYMBOL(bio_integrity_unmap_free_user);
+
+/**
* bio_integrity_add_page - Attach integrity metadata
* @bio: bio to update
* @page: page containing integrity metadata
@@ -254,6 +276,7 @@ static int bio_integrity_copy_user(struct bio *bio, struct bio_vec *bvec,
bip->bip_flags |= BIP_INTEGRITY_USER | BIP_COPY_USER;
bip->bip_iter.bi_sector = seed;
+ bip->bip_vcnt = nr_vecs;
return 0;
free_bip:
bio_integrity_free(bio);
@@ -275,6 +298,7 @@ static int bio_integrity_init_user(struct bio *bio, struct bio_vec *bvec,
bip->bip_flags |= BIP_INTEGRITY_USER;
bip->bip_iter.bi_sector = seed;
bip->bip_iter.bi_size = len;
+ bip->bip_vcnt = nr_vecs;
return 0;
}
@@ -312,7 +336,7 @@ int bio_integrity_map_user(struct bio *bio, void __user *ubuf, ssize_t bytes,
u32 seed)
{
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
- unsigned int align = q->dma_pad_mask | queue_dma_alignment(q);
+ unsigned int align = blk_lim_dma_alignment_and_pad(&q->limits);
struct page *stack_pages[UIO_FASTIOV], **pages = stack_pages;
struct bio_vec stack_vec[UIO_FASTIOV], *bvec = stack_vec;
unsigned int direction, nr_bvecs;
@@ -375,44 +399,6 @@ free_bvec:
EXPORT_SYMBOL_GPL(bio_integrity_map_user);
/**
- * bio_integrity_process - Process integrity metadata for a bio
- * @bio: bio to generate/verify integrity metadata for
- * @proc_iter: iterator to process
- * @proc_fn: Pointer to the relevant processing function
- */
-static blk_status_t bio_integrity_process(struct bio *bio,
- struct bvec_iter *proc_iter, integrity_processing_fn *proc_fn)
-{
- struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
- struct blk_integrity_iter iter;
- struct bvec_iter bviter;
- struct bio_vec bv;
- struct bio_integrity_payload *bip = bio_integrity(bio);
- blk_status_t ret = BLK_STS_OK;
-
- iter.disk_name = bio->bi_bdev->bd_disk->disk_name;
- iter.interval = 1 << bi->interval_exp;
- iter.tuple_size = bi->tuple_size;
- iter.seed = proc_iter->bi_sector;
- iter.prot_buf = bvec_virt(bip->bip_vec);
- iter.pi_offset = bi->pi_offset;
-
- __bio_for_each_segment(bv, bio, bviter, *proc_iter) {
- void *kaddr = bvec_kmap_local(&bv);
-
- iter.data_buf = kaddr;
- iter.data_size = bv.bv_len;
- ret = proc_fn(&iter);
- kunmap_local(kaddr);
-
- if (ret)
- break;
-
- }
- return ret;
-}
-
-/**
* bio_integrity_prep - Prepare bio for integrity I/O
* @bio: bio to prepare
*
@@ -428,17 +414,13 @@ bool bio_integrity_prep(struct bio *bio)
{
struct bio_integrity_payload *bip;
struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
+ unsigned int len;
void *buf;
- unsigned long start, end;
- unsigned int len, nr_pages;
- unsigned int bytes, offset, i;
+ gfp_t gfp = GFP_NOIO;
if (!bi)
return true;
- if (bio_op(bio) != REQ_OP_READ && bio_op(bio) != REQ_OP_WRITE)
- return true;
-
if (!bio_sectors(bio))
return true;
@@ -446,32 +428,36 @@ bool bio_integrity_prep(struct bio *bio)
if (bio_integrity(bio))
return true;
- if (bio_data_dir(bio) == READ) {
- if (!bi->profile->verify_fn ||
- !(bi->flags & BLK_INTEGRITY_VERIFY))
+ switch (bio_op(bio)) {
+ case REQ_OP_READ:
+ if (bi->flags & BLK_INTEGRITY_NOVERIFY)
return true;
- } else {
- if (!bi->profile->generate_fn ||
- !(bi->flags & BLK_INTEGRITY_GENERATE))
+ break;
+ case REQ_OP_WRITE:
+ if (bi->flags & BLK_INTEGRITY_NOGENERATE)
return true;
+
+ /*
+ * Zero the memory allocated to not leak uninitialized kernel
+ * memory to disk for non-integrity metadata where nothing else
+ * initializes the memory.
+ */
+ if (bi->csum_type == BLK_INTEGRITY_CSUM_NONE)
+ gfp |= __GFP_ZERO;
+ break;
+ default:
+ return true;
}
/* Allocate kernel buffer for protection data */
len = bio_integrity_bytes(bi, bio_sectors(bio));
- buf = kmalloc(len, GFP_NOIO);
+ buf = kmalloc(len, gfp);
if (unlikely(buf == NULL)) {
- printk(KERN_ERR "could not allocate integrity buffer\n");
goto err_end_io;
}
- end = (((unsigned long) buf) + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
- start = ((unsigned long) buf) >> PAGE_SHIFT;
- nr_pages = end - start;
-
- /* Allocate bio integrity payload and integrity vectors */
- bip = bio_integrity_alloc(bio, GFP_NOIO, nr_pages);
+ bip = bio_integrity_alloc(bio, GFP_NOIO, 1);
if (IS_ERR(bip)) {
- printk(KERN_ERR "could not allocate data integrity bioset\n");
kfree(buf);
goto err_end_io;
}
@@ -479,35 +465,20 @@ bool bio_integrity_prep(struct bio *bio)
bip->bip_flags |= BIP_BLOCK_INTEGRITY;
bip_set_seed(bip, bio->bi_iter.bi_sector);
- if (bi->flags & BLK_INTEGRITY_IP_CHECKSUM)
+ if (bi->csum_type == BLK_INTEGRITY_CSUM_IP)
bip->bip_flags |= BIP_IP_CHECKSUM;
- /* Map it */
- offset = offset_in_page(buf);
- for (i = 0; i < nr_pages && len > 0; i++) {
- bytes = PAGE_SIZE - offset;
-
- if (bytes > len)
- bytes = len;
-
- if (bio_integrity_add_page(bio, virt_to_page(buf),
- bytes, offset) < bytes) {
- printk(KERN_ERR "could not attach integrity payload\n");
- goto err_end_io;
- }
-
- buf += bytes;
- len -= bytes;
- offset = 0;
+ if (bio_integrity_add_page(bio, virt_to_page(buf), len,
+ offset_in_page(buf)) < len) {
+ printk(KERN_ERR "could not attach integrity payload\n");
+ goto err_end_io;
}
/* Auto-generate integrity metadata if this is a write */
- if (bio_data_dir(bio) == WRITE) {
- bio_integrity_process(bio, &bio->bi_iter,
- bi->profile->generate_fn);
- } else {
+ if (bio_data_dir(bio) == WRITE)
+ blk_integrity_generate(bio);
+ else
bip->bio_iter = bio->bi_iter;
- }
return true;
err_end_io:
@@ -530,15 +501,8 @@ static void bio_integrity_verify_fn(struct work_struct *work)
struct bio_integrity_payload *bip =
container_of(work, struct bio_integrity_payload, bip_work);
struct bio *bio = bip->bip_bio;
- struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
- /*
- * At the moment verify is called bio's iterator was advanced
- * during split and completion, we need to rewind iterator to
- * it's original position.
- */
- bio->bi_status = bio_integrity_process(bio, &bip->bio_iter,
- bi->profile->verify_fn);
+ blk_integrity_verify(bio);
bio_integrity_free(bio);
bio_endio(bio);
}
@@ -560,7 +524,7 @@ bool __bio_integrity_endio(struct bio *bio)
struct bio_integrity_payload *bip = bio_integrity(bio);
if (bio_op(bio) == REQ_OP_READ && !bio->bi_status &&
- (bip->bip_flags & BIP_BLOCK_INTEGRITY) && bi->profile->verify_fn) {
+ (bip->bip_flags & BIP_BLOCK_INTEGRITY) && bi->csum_type) {
INIT_WORK(&bip->bip_work, bio_integrity_verify_fn);
queue_work(kintegrityd_wq, &bip->bip_work);
return false;
@@ -620,14 +584,11 @@ int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
BUG_ON(bip_src == NULL);
- bip = bio_integrity_alloc(bio, gfp_mask, bip_src->bip_vcnt);
+ bip = bio_integrity_alloc(bio, gfp_mask, 0);
if (IS_ERR(bip))
return PTR_ERR(bip);
- memcpy(bip->bip_vec, bip_src->bip_vec,
- bip_src->bip_vcnt * sizeof(struct bio_vec));
-
- bip->bip_vcnt = bip_src->bip_vcnt;
+ bip->bip_vec = bip_src->bip_vec;
bip->bip_iter = bip_src->bip_iter;
bip->bip_flags = bip_src->bip_flags & ~BIP_BLOCK_INTEGRITY;
diff --git a/block/bio.c b/block/bio.c
index e9e809a63c59..a3b1b2266c50 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -953,7 +953,7 @@ bool bvec_try_merge_hw_page(struct request_queue *q, struct bio_vec *bv,
bool *same_page)
{
unsigned long mask = queue_segment_boundary(q);
- phys_addr_t addr1 = page_to_phys(bv->bv_page) + bv->bv_offset;
+ phys_addr_t addr1 = bvec_phys(bv);
phys_addr_t addr2 = page_to_phys(page) + offset + len - 1;
if ((addr1 | mask) != (addr2 | mask))
diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h
index 90b3959d88cf..bd472a30bc61 100644
--- a/block/blk-cgroup.h
+++ b/block/blk-cgroup.h
@@ -301,19 +301,6 @@ static inline struct blkcg *cpd_to_blkcg(struct blkcg_policy_data *cpd)
}
/**
- * blkg_path - format cgroup path of blkg
- * @blkg: blkg of interest
- * @buf: target buffer
- * @buflen: target buffer length
- *
- * Format the path of the cgroup of @blkg into @buf.
- */
-static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
-{
- return cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
-}
-
-/**
* blkg_get - get a blkg reference
* @blkg: blkg to get
*
diff --git a/block/blk-core.c b/block/blk-core.c
index 82c3ae22d76d..02bceeb36f2c 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -94,20 +94,6 @@ void blk_queue_flag_clear(unsigned int flag, struct request_queue *q)
}
EXPORT_SYMBOL(blk_queue_flag_clear);
-/**
- * blk_queue_flag_test_and_set - atomically test and set a queue flag
- * @flag: flag to be set
- * @q: request queue
- *
- * Returns the previous value of @flag - 0 if the flag was not set and 1 if
- * the flag was already set.
- */
-bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q)
-{
- return test_and_set_bit(flag, &q->queue_flags);
-}
-EXPORT_SYMBOL_GPL(blk_queue_flag_test_and_set);
-
#define REQ_OP_NAME(name) [REQ_OP_##name] = #name
static const char *const blk_op_name[] = {
REQ_OP_NAME(READ),
@@ -174,6 +160,8 @@ static const struct {
/* Command duration limit device-side timeout */
[BLK_STS_DURATION_LIMIT] = { -ETIME, "duration limit exceeded" },
+ [BLK_STS_INVAL] = { -EINVAL, "invalid" },
+
/* everything else not covered above: */
[BLK_STS_IOERR] = { -EIO, "I/O" },
};
@@ -739,6 +727,18 @@ void submit_bio_noacct_nocheck(struct bio *bio)
__submit_bio_noacct(bio);
}
+static blk_status_t blk_validate_atomic_write_op_size(struct request_queue *q,
+ struct bio *bio)
+{
+ if (bio->bi_iter.bi_size > queue_atomic_write_unit_max_bytes(q))
+ return BLK_STS_INVAL;
+
+ if (bio->bi_iter.bi_size % queue_atomic_write_unit_min_bytes(q))
+ return BLK_STS_INVAL;
+
+ return BLK_STS_OK;
+}
+
/**
* submit_bio_noacct - re-submit a bio to the block device layer for I/O
* @bio: The bio describing the location in memory and on the device.
@@ -782,7 +782,7 @@ void submit_bio_noacct(struct bio *bio)
if (WARN_ON_ONCE(bio_op(bio) != REQ_OP_WRITE &&
bio_op(bio) != REQ_OP_ZONE_APPEND))
goto end_io;
- if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags)) {
+ if (!bdev_write_cache(bdev)) {
bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA);
if (!bio_sectors(bio)) {
status = BLK_STS_OK;
@@ -791,12 +791,17 @@ void submit_bio_noacct(struct bio *bio)
}
}
- if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
+ if (!(q->limits.features & BLK_FEAT_POLL))
bio_clear_polled(bio);
switch (bio_op(bio)) {
case REQ_OP_READ:
case REQ_OP_WRITE:
+ if (bio->bi_opf & REQ_ATOMIC) {
+ status = blk_validate_atomic_write_op_size(q, bio);
+ if (status != BLK_STS_OK)
+ goto end_io;
+ }
break;
case REQ_OP_FLUSH:
/*
@@ -825,11 +830,8 @@ void submit_bio_noacct(struct bio *bio)
case REQ_OP_ZONE_OPEN:
case REQ_OP_ZONE_CLOSE:
case REQ_OP_ZONE_FINISH:
- if (!bdev_is_zoned(bio->bi_bdev))
- goto not_supported;
- break;
case REQ_OP_ZONE_RESET_ALL:
- if (!bdev_is_zoned(bio->bi_bdev) || !blk_queue_zone_resetall(q))
+ if (!bdev_is_zoned(bio->bi_bdev))
goto not_supported;
break;
case REQ_OP_DRV_IN:
@@ -915,8 +917,7 @@ int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags)
return 0;
q = bdev_get_queue(bdev);
- if (cookie == BLK_QC_T_NONE ||
- !test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
+ if (cookie == BLK_QC_T_NONE || !(q->limits.features & BLK_FEAT_POLL))
return 0;
blk_flush_plug(current->plug, false);
diff --git a/block/blk-flush.c b/block/blk-flush.c
index c17cf8ed8113..a72e2a83d075 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -100,23 +100,6 @@ blk_get_flush_queue(struct request_queue *q, struct blk_mq_ctx *ctx)
return blk_mq_map_queue(q, REQ_OP_FLUSH, ctx)->fq;
}
-static unsigned int blk_flush_policy(unsigned long fflags, struct request *rq)
-{
- unsigned int policy = 0;
-
- if (blk_rq_sectors(rq))
- policy |= REQ_FSEQ_DATA;
-
- if (fflags & (1UL << QUEUE_FLAG_WC)) {
- if (rq->cmd_flags & REQ_PREFLUSH)
- policy |= REQ_FSEQ_PREFLUSH;
- if (!(fflags & (1UL << QUEUE_FLAG_FUA)) &&
- (rq->cmd_flags & REQ_FUA))
- policy |= REQ_FSEQ_POSTFLUSH;
- }
- return policy;
-}
-
static unsigned int blk_flush_cur_seq(struct request *rq)
{
return 1 << ffz(rq->flush.seq);
@@ -185,7 +168,7 @@ static void blk_flush_complete_seq(struct request *rq,
/* queue for flush */
if (list_empty(pending))
fq->flush_pending_since = jiffies;
- list_move_tail(&rq->queuelist, pending);
+ list_add_tail(&rq->queuelist, pending);
break;
case REQ_FSEQ_DATA:
@@ -263,6 +246,7 @@ static enum rq_end_io_ret flush_end_io(struct request *flush_rq,
unsigned int seq = blk_flush_cur_seq(rq);
BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH);
+ list_del_init(&rq->queuelist);
blk_flush_complete_seq(rq, fq, seq, error);
}
@@ -398,19 +382,32 @@ static void blk_rq_init_flush(struct request *rq)
bool blk_insert_flush(struct request *rq)
{
struct request_queue *q = rq->q;
- unsigned long fflags = q->queue_flags; /* may change, cache */
- unsigned int policy = blk_flush_policy(fflags, rq);
struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx);
+ bool supports_fua = q->limits.features & BLK_FEAT_FUA;
+ unsigned int policy = 0;
/* FLUSH/FUA request must never be merged */
WARN_ON_ONCE(rq->bio != rq->biotail);
+ if (blk_rq_sectors(rq))
+ policy |= REQ_FSEQ_DATA;
+
+ /*
+ * Check which flushes we need to sequence for this operation.
+ */
+ if (blk_queue_write_cache(q)) {
+ if (rq->cmd_flags & REQ_PREFLUSH)
+ policy |= REQ_FSEQ_PREFLUSH;
+ if ((rq->cmd_flags & REQ_FUA) && !supports_fua)
+ policy |= REQ_FSEQ_POSTFLUSH;
+ }
+
/*
* @policy now records what operations need to be done. Adjust
* REQ_PREFLUSH and FUA for the driver.
*/
rq->cmd_flags &= ~REQ_PREFLUSH;
- if (!(fflags & (1UL << QUEUE_FLAG_FUA)))
+ if (!supports_fua)
rq->cmd_flags &= ~REQ_FUA;
/*
diff --git a/block/blk-integrity.c b/block/blk-integrity.c
index ccbeb6dfa87a..010decc892ea 100644
--- a/block/blk-integrity.c
+++ b/block/blk-integrity.c
@@ -107,60 +107,6 @@ new_segment:
}
EXPORT_SYMBOL(blk_rq_map_integrity_sg);
-/**
- * blk_integrity_compare - Compare integrity profile of two disks
- * @gd1: Disk to compare
- * @gd2: Disk to compare
- *
- * Description: Meta-devices like DM and MD need to verify that all
- * sub-devices use the same integrity format before advertising to
- * upper layers that they can send/receive integrity metadata. This
- * function can be used to check whether two gendisk devices have
- * compatible integrity formats.
- */
-int blk_integrity_compare(struct gendisk *gd1, struct gendisk *gd2)
-{
- struct blk_integrity *b1 = &gd1->queue->integrity;
- struct blk_integrity *b2 = &gd2->queue->integrity;
-
- if (!b1->profile && !b2->profile)
- return 0;
-
- if (!b1->profile || !b2->profile)
- return -1;
-
- if (b1->interval_exp != b2->interval_exp) {
- pr_err("%s: %s/%s protection interval %u != %u\n",
- __func__, gd1->disk_name, gd2->disk_name,
- 1 << b1->interval_exp, 1 << b2->interval_exp);
- return -1;
- }
-
- if (b1->tuple_size != b2->tuple_size) {
- pr_err("%s: %s/%s tuple sz %u != %u\n", __func__,
- gd1->disk_name, gd2->disk_name,
- b1->tuple_size, b2->tuple_size);
- return -1;
- }
-
- if (b1->tag_size && b2->tag_size && (b1->tag_size != b2->tag_size)) {
- pr_err("%s: %s/%s tag sz %u != %u\n", __func__,
- gd1->disk_name, gd2->disk_name,
- b1->tag_size, b2->tag_size);
- return -1;
- }
-
- if (b1->profile != b2->profile) {
- pr_err("%s: %s/%s type %s != %s\n", __func__,
- gd1->disk_name, gd2->disk_name,
- b1->profile->name, b2->profile->name);
- return -1;
- }
-
- return 0;
-}
-EXPORT_SYMBOL(blk_integrity_compare);
-
bool blk_integrity_merge_rq(struct request_queue *q, struct request *req,
struct request *next)
{
@@ -214,7 +160,64 @@ bool blk_integrity_merge_bio(struct request_queue *q, struct request *req,
static inline struct blk_integrity *dev_to_bi(struct device *dev)
{
- return &dev_to_disk(dev)->queue->integrity;
+ return &dev_to_disk(dev)->queue->limits.integrity;
+}
+
+const char *blk_integrity_profile_name(struct blk_integrity *bi)
+{
+ switch (bi->csum_type) {
+ case BLK_INTEGRITY_CSUM_IP:
+ if (bi->flags & BLK_INTEGRITY_REF_TAG)
+ return "T10-DIF-TYPE1-IP";
+ return "T10-DIF-TYPE3-IP";
+ case BLK_INTEGRITY_CSUM_CRC:
+ if (bi->flags & BLK_INTEGRITY_REF_TAG)
+ return "T10-DIF-TYPE1-CRC";
+ return "T10-DIF-TYPE3-CRC";
+ case BLK_INTEGRITY_CSUM_CRC64:
+ if (bi->flags & BLK_INTEGRITY_REF_TAG)
+ return "EXT-DIF-TYPE1-CRC64";
+ return "EXT-DIF-TYPE3-CRC64";
+ case BLK_INTEGRITY_CSUM_NONE:
+ break;
+ }
+
+ return "nop";
+}
+EXPORT_SYMBOL_GPL(blk_integrity_profile_name);
+
+static ssize_t flag_store(struct device *dev, const char *page, size_t count,
+ unsigned char flag)
+{
+ struct request_queue *q = dev_to_disk(dev)->queue;
+ struct queue_limits lim;
+ unsigned long val;
+ int err;
+
+ err = kstrtoul(page, 10, &val);
+ if (err)
+ return err;
+
+ /* note that the flags are inverted vs the values in the sysfs files */
+ lim = queue_limits_start_update(q);
+ if (val)
+ lim.integrity.flags &= ~flag;
+ else
+ lim.integrity.flags |= flag;
+
+ blk_mq_freeze_queue(q);
+ err = queue_limits_commit_update(q, &lim);
+ blk_mq_unfreeze_queue(q);
+ if (err)
+ return err;
+ return count;
+}
+
+static ssize_t flag_show(struct device *dev, char *page, unsigned char flag)
+{
+ struct blk_integrity *bi = dev_to_bi(dev);
+
+ return sysfs_emit(page, "%d\n", !(bi->flags & flag));
}
static ssize_t format_show(struct device *dev, struct device_attribute *attr,
@@ -222,9 +225,9 @@ static ssize_t format_show(struct device *dev, struct device_attribute *attr,
{
struct blk_integrity *bi = dev_to_bi(dev);
- if (bi->profile && bi->profile->name)
- return sysfs_emit(page, "%s\n", bi->profile->name);
- return sysfs_emit(page, "none\n");
+ if (!bi->tuple_size)
+ return sysfs_emit(page, "none\n");
+ return sysfs_emit(page, "%s\n", blk_integrity_profile_name(bi));
}
static ssize_t tag_size_show(struct device *dev, struct device_attribute *attr,
@@ -249,49 +252,26 @@ static ssize_t read_verify_store(struct device *dev,
struct device_attribute *attr,
const char *page, size_t count)
{
- struct blk_integrity *bi = dev_to_bi(dev);
- char *p = (char *) page;
- unsigned long val = simple_strtoul(p, &p, 10);
-
- if (val)
- bi->flags |= BLK_INTEGRITY_VERIFY;
- else
- bi->flags &= ~BLK_INTEGRITY_VERIFY;
-
- return count;
+ return flag_store(dev, page, count, BLK_INTEGRITY_NOVERIFY);
}
static ssize_t read_verify_show(struct device *dev,
struct device_attribute *attr, char *page)
{
- struct blk_integrity *bi = dev_to_bi(dev);
-
- return sysfs_emit(page, "%d\n", !!(bi->flags & BLK_INTEGRITY_VERIFY));
+ return flag_show(dev, page, BLK_INTEGRITY_NOVERIFY);
}
static ssize_t write_generate_store(struct device *dev,
struct device_attribute *attr,
const char *page, size_t count)
{
- struct blk_integrity *bi = dev_to_bi(dev);
-
- char *p = (char *) page;
- unsigned long val = simple_strtoul(p, &p, 10);
-
- if (val)
- bi->flags |= BLK_INTEGRITY_GENERATE;
- else
- bi->flags &= ~BLK_INTEGRITY_GENERATE;
-
- return count;
+ return flag_store(dev, page, count, BLK_INTEGRITY_NOGENERATE);
}
static ssize_t write_generate_show(struct device *dev,
struct device_attribute *attr, char *page)
{
- struct blk_integrity *bi = dev_to_bi(dev);
-
- return sysfs_emit(page, "%d\n", !!(bi->flags & BLK_INTEGRITY_GENERATE));
+ return flag_show(dev, page, BLK_INTEGRITY_NOGENERATE);
}
static ssize_t device_is_integrity_capable_show(struct device *dev,
@@ -325,81 +305,3 @@ const struct attribute_group blk_integrity_attr_group = {
.name = "integrity",
.attrs = integrity_attrs,
};
-
-static blk_status_t blk_integrity_nop_fn(struct blk_integrity_iter *iter)
-{
- return BLK_STS_OK;
-}
-
-static void blk_integrity_nop_prepare(struct request *rq)
-{
-}
-
-static void blk_integrity_nop_complete(struct request *rq,
- unsigned int nr_bytes)
-{
-}
-
-static const struct blk_integrity_profile nop_profile = {
- .name = "nop",
- .generate_fn = blk_integrity_nop_fn,
- .verify_fn = blk_integrity_nop_fn,
- .prepare_fn = blk_integrity_nop_prepare,
- .complete_fn = blk_integrity_nop_complete,
-};
-
-/**
- * blk_integrity_register - Register a gendisk as being integrity-capable
- * @disk: struct gendisk pointer to make integrity-aware
- * @template: block integrity profile to register
- *
- * Description: When a device needs to advertise itself as being able to
- * send/receive integrity metadata it must use this function to register
- * the capability with the block layer. The template is a blk_integrity
- * struct with values appropriate for the underlying hardware. See
- * Documentation/block/data-integrity.rst.
- */
-void blk_integrity_register(struct gendisk *disk, struct blk_integrity *template)
-{
- struct blk_integrity *bi = &disk->queue->integrity;
-
- bi->flags = BLK_INTEGRITY_VERIFY | BLK_INTEGRITY_GENERATE |
- template->flags;
- bi->interval_exp = template->interval_exp ? :
- ilog2(queue_logical_block_size(disk->queue));
- bi->profile = template->profile ? template->profile : &nop_profile;
- bi->tuple_size = template->tuple_size;
- bi->tag_size = template->tag_size;
- bi->pi_offset = template->pi_offset;
-
- blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, disk->queue);
-
-#ifdef CONFIG_BLK_INLINE_ENCRYPTION
- if (disk->queue->crypto_profile) {
- pr_warn("blk-integrity: Integrity and hardware inline encryption are not supported together. Disabling hardware inline encryption.\n");
- disk->queue->crypto_profile = NULL;
- }
-#endif
-}
-EXPORT_SYMBOL(blk_integrity_register);
-
-/**
- * blk_integrity_unregister - Unregister block integrity profile
- * @disk: disk whose integrity profile to unregister
- *
- * Description: This function unregisters the integrity capability from
- * a block device.
- */
-void blk_integrity_unregister(struct gendisk *disk)
-{
- struct blk_integrity *bi = &disk->queue->integrity;
-
- if (!bi->profile)
- return;
-
- /* ensure all bios are off the integrity workqueue */
- blk_flush_integrity();
- blk_queue_flag_clear(QUEUE_FLAG_STABLE_WRITES, disk->queue);
- memset(bi, 0, sizeof(*bi));
-}
-EXPORT_SYMBOL(blk_integrity_unregister);
diff --git a/block/blk-lib.c b/block/blk-lib.c
index 442da9dad042..9f735efa6c94 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -103,38 +103,71 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
}
EXPORT_SYMBOL(blkdev_issue_discard);
-static int __blkdev_issue_write_zeroes(struct block_device *bdev,
- sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
- struct bio **biop, unsigned flags)
+static sector_t bio_write_zeroes_limit(struct block_device *bdev)
{
- struct bio *bio = *biop;
- unsigned int max_sectors;
-
- if (bdev_read_only(bdev))
- return -EPERM;
-
- /* Ensure that max_sectors doesn't overflow bi_size */
- max_sectors = bdev_write_zeroes_sectors(bdev);
+ sector_t bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
- if (max_sectors == 0)
- return -EOPNOTSUPP;
+ return min(bdev_write_zeroes_sectors(bdev),
+ (UINT_MAX >> SECTOR_SHIFT) & ~bs_mask);
+}
+static void __blkdev_issue_write_zeroes(struct block_device *bdev,
+ sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
+ struct bio **biop, unsigned flags)
+{
while (nr_sects) {
- unsigned int len = min_t(sector_t, nr_sects, max_sectors);
+ unsigned int len = min_t(sector_t, nr_sects,
+ bio_write_zeroes_limit(bdev));
+ struct bio *bio;
+
+ if ((flags & BLKDEV_ZERO_KILLABLE) &&
+ fatal_signal_pending(current))
+ break;
- bio = blk_next_bio(bio, bdev, 0, REQ_OP_WRITE_ZEROES, gfp_mask);
+ bio = bio_alloc(bdev, 0, REQ_OP_WRITE_ZEROES, gfp_mask);
bio->bi_iter.bi_sector = sector;
if (flags & BLKDEV_ZERO_NOUNMAP)
bio->bi_opf |= REQ_NOUNMAP;
bio->bi_iter.bi_size = len << SECTOR_SHIFT;
+ *biop = bio_chain_and_submit(*biop, bio);
+
nr_sects -= len;
sector += len;
cond_resched();
}
+}
- *biop = bio;
- return 0;
+static int blkdev_issue_write_zeroes(struct block_device *bdev, sector_t sector,
+ sector_t nr_sects, gfp_t gfp, unsigned flags)
+{
+ struct bio *bio = NULL;
+ struct blk_plug plug;
+ int ret = 0;
+
+ blk_start_plug(&plug);
+ __blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp, &bio, flags);
+ if (bio) {
+ if ((flags & BLKDEV_ZERO_KILLABLE) &&
+ fatal_signal_pending(current)) {
+ bio_await_chain(bio);
+ blk_finish_plug(&plug);
+ return -EINTR;
+ }
+ ret = submit_bio_wait(bio);
+ bio_put(bio);
+ }
+ blk_finish_plug(&plug);
+
+ /*
+ * For some devices there is no non-destructive way to verify whether
+ * WRITE ZEROES is actually supported. These will clear the capability
+ * on an I/O error, in which case we'll turn any error into
+ * "not supported" here.
+ */
+ if (ret && !bdev_write_zeroes_sectors(bdev))
+ return -EOPNOTSUPP;
+ return ret;
}
/*
@@ -150,35 +183,63 @@ static unsigned int __blkdev_sectors_to_bio_pages(sector_t nr_sects)
return min(pages, (sector_t)BIO_MAX_VECS);
}
-static int __blkdev_issue_zero_pages(struct block_device *bdev,
+static void __blkdev_issue_zero_pages(struct block_device *bdev,
sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
- struct bio **biop)
+ struct bio **biop, unsigned int flags)
{
- struct bio *bio = *biop;
- int bi_size = 0;
- unsigned int sz;
-
- if (bdev_read_only(bdev))
- return -EPERM;
+ while (nr_sects) {
+ unsigned int nr_vecs = __blkdev_sectors_to_bio_pages(nr_sects);
+ struct bio *bio;
- while (nr_sects != 0) {
- bio = blk_next_bio(bio, bdev, __blkdev_sectors_to_bio_pages(nr_sects),
- REQ_OP_WRITE, gfp_mask);
+ bio = bio_alloc(bdev, nr_vecs, REQ_OP_WRITE, gfp_mask);
bio->bi_iter.bi_sector = sector;
- while (nr_sects != 0) {
- sz = min((sector_t) PAGE_SIZE, nr_sects << 9);
- bi_size = bio_add_page(bio, ZERO_PAGE(0), sz, 0);
- nr_sects -= bi_size >> 9;
- sector += bi_size >> 9;
- if (bi_size < sz)
+ if ((flags & BLKDEV_ZERO_KILLABLE) &&
+ fatal_signal_pending(current))
+ break;
+
+ do {
+ unsigned int len, added;
+
+ len = min_t(sector_t,
+ PAGE_SIZE, nr_sects << SECTOR_SHIFT);
+ added = bio_add_page(bio, ZERO_PAGE(0), len, 0);
+ if (added < len)
break;
- }
+ nr_sects -= added >> SECTOR_SHIFT;
+ sector += added >> SECTOR_SHIFT;
+ } while (nr_sects);
+
+ *biop = bio_chain_and_submit(*biop, bio);
cond_resched();
}
+}
- *biop = bio;
- return 0;
+static int blkdev_issue_zero_pages(struct block_device *bdev, sector_t sector,
+ sector_t nr_sects, gfp_t gfp, unsigned flags)
+{
+ struct bio *bio = NULL;
+ struct blk_plug plug;
+ int ret = 0;
+
+ if (flags & BLKDEV_ZERO_NOFALLBACK)
+ return -EOPNOTSUPP;
+
+ blk_start_plug(&plug);
+ __blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp, &bio, flags);
+ if (bio) {
+ if ((flags & BLKDEV_ZERO_KILLABLE) &&
+ fatal_signal_pending(current)) {
+ bio_await_chain(bio);
+ blk_finish_plug(&plug);
+ return -EINTR;
+ }
+ ret = submit_bio_wait(bio);
+ bio_put(bio);
+ }
+ blk_finish_plug(&plug);
+
+ return ret;
}
/**
@@ -204,20 +265,19 @@ int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask, struct bio **biop,
unsigned flags)
{
- int ret;
- sector_t bs_mask;
-
- bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
- if ((sector | nr_sects) & bs_mask)
- return -EINVAL;
-
- ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp_mask,
- biop, flags);
- if (ret != -EOPNOTSUPP || (flags & BLKDEV_ZERO_NOFALLBACK))
- return ret;
+ if (bdev_read_only(bdev))
+ return -EPERM;
- return __blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp_mask,
- biop);
+ if (bdev_write_zeroes_sectors(bdev)) {
+ __blkdev_issue_write_zeroes(bdev, sector, nr_sects,
+ gfp_mask, biop, flags);
+ } else {
+ if (flags & BLKDEV_ZERO_NOFALLBACK)
+ return -EOPNOTSUPP;
+ __blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp_mask,
+ biop, flags);
+ }
+ return 0;
}
EXPORT_SYMBOL(__blkdev_issue_zeroout);
@@ -237,51 +297,21 @@ EXPORT_SYMBOL(__blkdev_issue_zeroout);
int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask, unsigned flags)
{
- int ret = 0;
- sector_t bs_mask;
- struct bio *bio;
- struct blk_plug plug;
- bool try_write_zeroes = !!bdev_write_zeroes_sectors(bdev);
+ int ret;
- bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
- if ((sector | nr_sects) & bs_mask)
+ if ((sector | nr_sects) & ((bdev_logical_block_size(bdev) >> 9) - 1))
return -EINVAL;
+ if (bdev_read_only(bdev))
+ return -EPERM;
-retry:
- bio = NULL;
- blk_start_plug(&plug);
- if (try_write_zeroes) {
- ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects,
- gfp_mask, &bio, flags);
- } else if (!(flags & BLKDEV_ZERO_NOFALLBACK)) {
- ret = __blkdev_issue_zero_pages(bdev, sector, nr_sects,
- gfp_mask, &bio);
- } else {
- /* No zeroing offload support */
- ret = -EOPNOTSUPP;
- }
- if (ret == 0 && bio) {
- ret = submit_bio_wait(bio);
- bio_put(bio);
- }
- blk_finish_plug(&plug);
- if (ret && try_write_zeroes) {
- if (!(flags & BLKDEV_ZERO_NOFALLBACK)) {
- try_write_zeroes = false;
- goto retry;
- }
- if (!bdev_write_zeroes_sectors(bdev)) {
- /*
- * Zeroing offload support was indicated, but the
- * device reported ILLEGAL REQUEST (for some devices
- * there is no non-destructive way to verify whether
- * WRITE ZEROES is actually supported).
- */
- ret = -EOPNOTSUPP;
- }
+ if (bdev_write_zeroes_sectors(bdev)) {
+ ret = blkdev_issue_write_zeroes(bdev, sector, nr_sects,
+ gfp_mask, flags);
+ if (ret != -EOPNOTSUPP)
+ return ret;
}
- return ret;
+ return blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp_mask, flags);
}
EXPORT_SYMBOL(blkdev_issue_zeroout);
diff --git a/block/blk-map.c b/block/blk-map.c
index 71210cdb3442..bce144091128 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -634,7 +634,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
const struct iov_iter *iter, gfp_t gfp_mask)
{
bool copy = false, map_bvec = false;
- unsigned long align = q->dma_pad_mask | queue_dma_alignment(q);
+ unsigned long align = blk_lim_dma_alignment_and_pad(&q->limits);
struct bio *bio = NULL;
struct iov_iter i;
int ret = -EINVAL;
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 8534c35e0497..de5281bcadc5 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -154,6 +154,19 @@ static struct bio *bio_split_write_zeroes(struct bio *bio,
return bio_split(bio, lim->max_write_zeroes_sectors, GFP_NOIO, bs);
}
+static inline unsigned int blk_boundary_sectors(const struct queue_limits *lim,
+ bool is_atomic)
+{
+ /*
+ * chunk_sectors must be a multiple of atomic_write_boundary_sectors if
+ * both non-zero.
+ */
+ if (is_atomic && lim->atomic_write_boundary_sectors)
+ return lim->atomic_write_boundary_sectors;
+
+ return lim->chunk_sectors;
+}
+
/*
* Return the maximum number of sectors from the start of a bio that may be
* submitted as a single request to a block device. If enough sectors remain,
@@ -167,12 +180,23 @@ static inline unsigned get_max_io_size(struct bio *bio,
{
unsigned pbs = lim->physical_block_size >> SECTOR_SHIFT;
unsigned lbs = lim->logical_block_size >> SECTOR_SHIFT;
- unsigned max_sectors = lim->max_sectors, start, end;
+ bool is_atomic = bio->bi_opf & REQ_ATOMIC;
+ unsigned boundary_sectors = blk_boundary_sectors(lim, is_atomic);
+ unsigned max_sectors, start, end;
- if (lim->chunk_sectors) {
+ /*
+ * We ignore lim->max_sectors for atomic writes because it may less
+ * than the actual bio size, which we cannot tolerate.
+ */
+ if (is_atomic)
+ max_sectors = lim->atomic_write_max_sectors;
+ else
+ max_sectors = lim->max_sectors;
+
+ if (boundary_sectors) {
max_sectors = min(max_sectors,
- blk_chunk_sectors_left(bio->bi_iter.bi_sector,
- lim->chunk_sectors));
+ blk_boundary_sectors_left(bio->bi_iter.bi_sector,
+ boundary_sectors));
}
start = bio->bi_iter.bi_sector & (pbs - 1);
@@ -185,23 +209,22 @@ static inline unsigned get_max_io_size(struct bio *bio,
/**
* get_max_segment_size() - maximum number of bytes to add as a single segment
* @lim: Request queue limits.
- * @start_page: See below.
- * @offset: Offset from @start_page where to add a segment.
+ * @paddr: address of the range to add
+ * @len: maximum length available to add at @paddr
*
- * Returns the maximum number of bytes that can be added as a single segment.
+ * Returns the maximum number of bytes of the range starting at @paddr that can
+ * be added to a single segment.
*/
static inline unsigned get_max_segment_size(const struct queue_limits *lim,
- struct page *start_page, unsigned long offset)
+ phys_addr_t paddr, unsigned int len)
{
- unsigned long mask = lim->seg_boundary_mask;
-
- offset = mask & (page_to_phys(start_page) + offset);
-
/*
* Prevent an overflow if mask = ULONG_MAX and offset = 0 by adding 1
* after having calculated the minimum.
*/
- return min(mask - offset, (unsigned long)lim->max_segment_size - 1) + 1;
+ return min_t(unsigned long, len,
+ min(lim->seg_boundary_mask - (lim->seg_boundary_mask & paddr),
+ (unsigned long)lim->max_segment_size - 1) + 1);
}
/**
@@ -234,9 +257,7 @@ static bool bvec_split_segs(const struct queue_limits *lim,
unsigned seg_size = 0;
while (len && *nsegs < max_segs) {
- seg_size = get_max_segment_size(lim, bv->bv_page,
- bv->bv_offset + total_len);
- seg_size = min(seg_size, len);
+ seg_size = get_max_segment_size(lim, bvec_phys(bv) + total_len, len);
(*nsegs)++;
total_len += seg_size;
@@ -305,6 +326,11 @@ struct bio *bio_split_rw(struct bio *bio, const struct queue_limits *lim,
*segs = nsegs;
return NULL;
split:
+ if (bio->bi_opf & REQ_ATOMIC) {
+ bio->bi_status = BLK_STS_INVAL;
+ bio_endio(bio);
+ return ERR_PTR(-EINVAL);
+ }
/*
* We can't sanely support splitting for a REQ_NOWAIT bio. End it
* with EAGAIN if splitting is required and return an error pointer.
@@ -465,8 +491,8 @@ static unsigned blk_bvec_map_sg(struct request_queue *q,
while (nbytes > 0) {
unsigned offset = bvec->bv_offset + total;
- unsigned len = min(get_max_segment_size(&q->limits,
- bvec->bv_page, offset), nbytes);
+ unsigned len = get_max_segment_size(&q->limits,
+ bvec_phys(bvec) + total, nbytes);
struct page *page = bvec->bv_page;
/*
@@ -588,18 +614,22 @@ static inline unsigned int blk_rq_get_max_sectors(struct request *rq,
sector_t offset)
{
struct request_queue *q = rq->q;
- unsigned int max_sectors;
+ struct queue_limits *lim = &q->limits;
+ unsigned int max_sectors, boundary_sectors;
+ bool is_atomic = rq->cmd_flags & REQ_ATOMIC;
if (blk_rq_is_passthrough(rq))
return q->limits.max_hw_sectors;
- max_sectors = blk_queue_get_max_sectors(q, req_op(rq));
- if (!q->limits.chunk_sectors ||
+ boundary_sectors = blk_boundary_sectors(lim, is_atomic);
+ max_sectors = blk_queue_get_max_sectors(rq);
+
+ if (!boundary_sectors ||
req_op(rq) == REQ_OP_DISCARD ||
req_op(rq) == REQ_OP_SECURE_ERASE)
return max_sectors;
return min(max_sectors,
- blk_chunk_sectors_left(offset, q->limits.chunk_sectors));
+ blk_boundary_sectors_left(offset, boundary_sectors));
}
static inline int ll_new_hw_segment(struct request *req, struct bio *bio,
@@ -797,6 +827,18 @@ static enum elv_merge blk_try_req_merge(struct request *req,
return ELEVATOR_NO_MERGE;
}
+static bool blk_atomic_write_mergeable_rq_bio(struct request *rq,
+ struct bio *bio)
+{
+ return (rq->cmd_flags & REQ_ATOMIC) == (bio->bi_opf & REQ_ATOMIC);
+}
+
+static bool blk_atomic_write_mergeable_rqs(struct request *rq,
+ struct request *next)
+{
+ return (rq->cmd_flags & REQ_ATOMIC) == (next->cmd_flags & REQ_ATOMIC);
+}
+
/*
* For non-mq, this has to be called with the request spinlock acquired.
* For mq with scheduling, the appropriate queue wide lock should be held.
@@ -820,6 +862,9 @@ static struct request *attempt_merge(struct request_queue *q,
if (req->ioprio != next->ioprio)
return NULL;
+ if (!blk_atomic_write_mergeable_rqs(req, next))
+ return NULL;
+
/*
* If we are allowed to merge, then append bio list
* from next to rq and release next. merge_requests_fn
@@ -951,6 +996,9 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
if (rq->ioprio != bio_prio(bio))
return false;
+ if (blk_atomic_write_mergeable_rq_bio(rq, bio) == false)
+ return false;
+
return true;
}
diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c
index 770c0c2b72fa..344f9e503bdb 100644
--- a/block/blk-mq-debugfs.c
+++ b/block/blk-mq-debugfs.c
@@ -84,28 +84,15 @@ static const char *const blk_queue_flag_name[] = {
QUEUE_FLAG_NAME(NOMERGES),
QUEUE_FLAG_NAME(SAME_COMP),
QUEUE_FLAG_NAME(FAIL_IO),
- QUEUE_FLAG_NAME(NONROT),
- QUEUE_FLAG_NAME(IO_STAT),
QUEUE_FLAG_NAME(NOXMERGES),
- QUEUE_FLAG_NAME(ADD_RANDOM),
- QUEUE_FLAG_NAME(SYNCHRONOUS),
QUEUE_FLAG_NAME(SAME_FORCE),
QUEUE_FLAG_NAME(INIT_DONE),
- QUEUE_FLAG_NAME(STABLE_WRITES),
- QUEUE_FLAG_NAME(POLL),
- QUEUE_FLAG_NAME(WC),
- QUEUE_FLAG_NAME(FUA),
- QUEUE_FLAG_NAME(DAX),
QUEUE_FLAG_NAME(STATS),
QUEUE_FLAG_NAME(REGISTERED),
QUEUE_FLAG_NAME(QUIESCED),
- QUEUE_FLAG_NAME(PCI_P2PDMA),
- QUEUE_FLAG_NAME(ZONE_RESETALL),
QUEUE_FLAG_NAME(RQ_ALLOC_TIME),
QUEUE_FLAG_NAME(HCTX_ACTIVE),
- QUEUE_FLAG_NAME(NOWAIT),
QUEUE_FLAG_NAME(SQ_SCHED),
- QUEUE_FLAG_NAME(SKIP_TAGSET_QUIESCE),
};
#undef QUEUE_FLAG_NAME
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 3b4df8e5ac9e..e3c3c0c21b55 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -448,6 +448,10 @@ static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data)
if (data->cmd_flags & REQ_NOWAIT)
data->flags |= BLK_MQ_REQ_NOWAIT;
+retry:
+ data->ctx = blk_mq_get_ctx(q);
+ data->hctx = blk_mq_map_queue(q, data->cmd_flags, data->ctx);
+
if (q->elevator) {
/*
* All requests use scheduler tags when an I/O scheduler is
@@ -469,13 +473,9 @@ static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data)
if (ops->limit_depth)
ops->limit_depth(data->cmd_flags, data);
}
- }
-
-retry:
- data->ctx = blk_mq_get_ctx(q);
- data->hctx = blk_mq_map_queue(q, data->cmd_flags, data->ctx);
- if (!(data->rq_flags & RQF_SCHED_TAGS))
+ } else {
blk_mq_tag_busy(data->hctx);
+ }
if (data->flags & BLK_MQ_REQ_RESERVED)
data->rq_flags |= RQF_RESV;
@@ -804,10 +804,8 @@ static void blk_complete_request(struct request *req)
if (!bio)
return;
-#ifdef CONFIG_BLK_DEV_INTEGRITY
if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ)
- req->q->integrity.profile->complete_fn(req, total_bytes);
-#endif
+ blk_integrity_complete(req, total_bytes);
/*
* Upper layers may call blk_crypto_evict_key() anytime after the last
@@ -875,11 +873,9 @@ bool blk_update_request(struct request *req, blk_status_t error,
if (!req->bio)
return false;
-#ifdef CONFIG_BLK_DEV_INTEGRITY
if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ &&
error == BLK_STS_OK)
- req->q->integrity.profile->complete_fn(req, nr_bytes);
-#endif
+ blk_integrity_complete(req, nr_bytes);
/*
* Upper layers may call blk_crypto_evict_key() anytime after the last
@@ -1264,10 +1260,9 @@ void blk_mq_start_request(struct request *rq)
WRITE_ONCE(rq->state, MQ_RQ_IN_FLIGHT);
rq->mq_hctx->tags->rqs[rq->tag] = rq;
-#ifdef CONFIG_BLK_DEV_INTEGRITY
if (blk_integrity_rq(rq) && req_op(rq) == REQ_OP_WRITE)
- q->integrity.profile->prepare_fn(rq);
-#endif
+ blk_integrity_prepare(rq);
+
if (rq->bio && rq->bio->bi_opf & REQ_POLLED)
WRITE_ONCE(rq->bio->bi_cookie, rq->mq_hctx->queue_num);
}
@@ -2914,6 +2909,17 @@ static void blk_mq_use_cached_rq(struct request *rq, struct blk_plug *plug,
INIT_LIST_HEAD(&rq->queuelist);
}
+static bool bio_unaligned(const struct bio *bio, struct request_queue *q)
+{
+ unsigned int bs_mask = queue_logical_block_size(q) - 1;
+
+ /* .bi_sector of any zero sized bio need to be initialized */
+ if ((bio->bi_iter.bi_size & bs_mask) ||
+ ((bio->bi_iter.bi_sector << SECTOR_SHIFT) & bs_mask))
+ return true;
+ return false;
+}
+
/**
* blk_mq_submit_bio - Create and send a request to block device.
* @bio: Bio pointer.
@@ -2966,6 +2972,15 @@ void blk_mq_submit_bio(struct bio *bio)
return;
}
+ /*
+ * Device reconfiguration may change logical block size, so alignment
+ * check has to be done with queue usage counter held
+ */
+ if (unlikely(bio_unaligned(bio, q))) {
+ bio_io_error(bio);
+ goto queue_exit;
+ }
+
if (unlikely(bio_may_exceed_limits(bio, &q->limits))) {
bio = __bio_split_to_limits(bio, &q->limits, &nr_segs);
if (!bio)
@@ -3041,7 +3056,7 @@ queue_exit:
blk_status_t blk_insert_cloned_request(struct request *rq)
{
struct request_queue *q = rq->q;
- unsigned int max_sectors = blk_queue_get_max_sectors(q, req_op(rq));
+ unsigned int max_sectors = blk_queue_get_max_sectors(rq);
unsigned int max_segments = blk_rq_get_max_segments(rq);
blk_status_t ret;
@@ -4114,6 +4129,12 @@ void blk_mq_release(struct request_queue *q)
blk_mq_sysfs_deinit(q);
}
+static bool blk_mq_can_poll(struct blk_mq_tag_set *set)
+{
+ return set->nr_maps > HCTX_TYPE_POLL &&
+ set->map[HCTX_TYPE_POLL].nr_queues;
+}
+
struct request_queue *blk_mq_alloc_queue(struct blk_mq_tag_set *set,
struct queue_limits *lim, void *queuedata)
{
@@ -4121,7 +4142,13 @@ struct request_queue *blk_mq_alloc_queue(struct blk_mq_tag_set *set,
struct request_queue *q;
int ret;
- q = blk_alloc_queue(lim ? lim : &default_lim, set->numa_node);
+ if (!lim)
+ lim = &default_lim;
+ lim->features |= BLK_FEAT_IO_STAT | BLK_FEAT_NOWAIT;
+ if (blk_mq_can_poll(set))
+ lim->features |= BLK_FEAT_POLL;
+
+ q = blk_alloc_queue(lim, set->numa_node);
if (IS_ERR(q))
return q;
q->queuedata = queuedata;
@@ -4274,17 +4301,6 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
mutex_unlock(&q->sysfs_lock);
}
-static void blk_mq_update_poll_flag(struct request_queue *q)
-{
- struct blk_mq_tag_set *set = q->tag_set;
-
- if (set->nr_maps > HCTX_TYPE_POLL &&
- set->map[HCTX_TYPE_POLL].nr_queues)
- blk_queue_flag_set(QUEUE_FLAG_POLL, q);
- else
- blk_queue_flag_clear(QUEUE_FLAG_POLL, q);
-}
-
int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
struct request_queue *q)
{
@@ -4312,7 +4328,6 @@ int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
q->tag_set = set;
q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
- blk_mq_update_poll_flag(q);
INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work);
INIT_LIST_HEAD(&q->flush_list);
@@ -4636,13 +4651,15 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
int ret;
unsigned long i;
+ if (WARN_ON_ONCE(!q->mq_freeze_depth))
+ return -EINVAL;
+
if (!set)
return -EINVAL;
if (q->nr_requests == nr)
return 0;
- blk_mq_freeze_queue(q);
blk_mq_quiesce_queue(q);
ret = 0;
@@ -4676,7 +4693,6 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
}
blk_mq_unquiesce_queue(q);
- blk_mq_unfreeze_queue(q);
return ret;
}
@@ -4798,8 +4814,10 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
fallback:
blk_mq_update_queue_map(set);
list_for_each_entry(q, &set->tag_list, tag_set_list) {
+ struct queue_limits lim;
+
blk_mq_realloc_hw_ctxs(set, q);
- blk_mq_update_poll_flag(q);
+
if (q->nr_hw_queues != set->nr_hw_queues) {
int i = prev_nr_hw_queues;
@@ -4811,6 +4829,13 @@ fallback:
set->nr_hw_queues = prev_nr_hw_queues;
goto fallback;
}
+ lim = queue_limits_start_update(q);
+ if (blk_mq_can_poll(set))
+ lim.features |= BLK_FEAT_POLL;
+ else
+ lim.features &= ~BLK_FEAT_POLL;
+ if (queue_limits_commit_update(q, &lim) < 0)
+ pr_warn("updating the poll flag failed\n");
blk_mq_map_swqueue(q);
}
diff --git a/block/blk-settings.c b/block/blk-settings.c
index a7fe8e90240a..cd8a8eabc9a5 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -6,7 +6,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/bio.h>
-#include <linux/blkdev.h>
+#include <linux/blk-integrity.h>
#include <linux/pagemap.h>
#include <linux/backing-dev-defs.h>
#include <linux/gcd.h>
@@ -55,7 +55,7 @@ void blk_set_stacking_limits(struct queue_limits *lim)
}
EXPORT_SYMBOL(blk_set_stacking_limits);
-static void blk_apply_bdi_limits(struct backing_dev_info *bdi,
+void blk_apply_bdi_limits(struct backing_dev_info *bdi,
struct queue_limits *lim)
{
/*
@@ -68,7 +68,7 @@ static void blk_apply_bdi_limits(struct backing_dev_info *bdi,
static int blk_validate_zoned_limits(struct queue_limits *lim)
{
- if (!lim->zoned) {
+ if (!(lim->features & BLK_FEAT_ZONED)) {
if (WARN_ON_ONCE(lim->max_open_zones) ||
WARN_ON_ONCE(lim->max_active_zones) ||
WARN_ON_ONCE(lim->zone_write_granularity) ||
@@ -80,6 +80,14 @@ static int blk_validate_zoned_limits(struct queue_limits *lim)
if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_BLK_DEV_ZONED)))
return -EINVAL;
+ /*
+ * Given that active zones include open zones, the maximum number of
+ * open zones cannot be larger than the maximum number of active zones.
+ */
+ if (lim->max_active_zones &&
+ lim->max_open_zones > lim->max_active_zones)
+ return -EINVAL;
+
if (lim->zone_write_granularity < lim->logical_block_size)
lim->zone_write_granularity = lim->logical_block_size;
@@ -97,6 +105,120 @@ static int blk_validate_zoned_limits(struct queue_limits *lim)
return 0;
}
+static int blk_validate_integrity_limits(struct queue_limits *lim)
+{
+ struct blk_integrity *bi = &lim->integrity;
+
+ if (!bi->tuple_size) {
+ if (bi->csum_type != BLK_INTEGRITY_CSUM_NONE ||
+ bi->tag_size || ((bi->flags & BLK_INTEGRITY_REF_TAG))) {
+ pr_warn("invalid PI settings.\n");
+ return -EINVAL;
+ }
+ return 0;
+ }
+
+ if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY)) {
+ pr_warn("integrity support disabled.\n");
+ return -EINVAL;
+ }
+
+ if (bi->csum_type == BLK_INTEGRITY_CSUM_NONE &&
+ (bi->flags & BLK_INTEGRITY_REF_TAG)) {
+ pr_warn("ref tag not support without checksum.\n");
+ return -EINVAL;
+ }
+
+ if (!bi->interval_exp)
+ bi->interval_exp = ilog2(lim->logical_block_size);
+
+ return 0;
+}
+
+/*
+ * Returns max guaranteed bytes which we can fit in a bio.
+ *
+ * We request that an atomic_write is ITER_UBUF iov_iter (so a single vector),
+ * so we assume that we can fit in at least PAGE_SIZE in a segment, apart from
+ * the first and last segments.
+ */
+static unsigned int blk_queue_max_guaranteed_bio(struct queue_limits *lim)
+{
+ unsigned int max_segments = min(BIO_MAX_VECS, lim->max_segments);
+ unsigned int length;
+
+ length = min(max_segments, 2) * lim->logical_block_size;
+ if (max_segments > 2)
+ length += (max_segments - 2) * PAGE_SIZE;
+
+ return length;
+}
+
+static void blk_atomic_writes_update_limits(struct queue_limits *lim)
+{
+ unsigned int unit_limit = min(lim->max_hw_sectors << SECTOR_SHIFT,
+ blk_queue_max_guaranteed_bio(lim));
+
+ unit_limit = rounddown_pow_of_two(unit_limit);
+
+ lim->atomic_write_max_sectors =
+ min(lim->atomic_write_hw_max >> SECTOR_SHIFT,
+ lim->max_hw_sectors);
+ lim->atomic_write_unit_min =
+ min(lim->atomic_write_hw_unit_min, unit_limit);
+ lim->atomic_write_unit_max =
+ min(lim->atomic_write_hw_unit_max, unit_limit);
+ lim->atomic_write_boundary_sectors =
+ lim->atomic_write_hw_boundary >> SECTOR_SHIFT;
+}
+
+static void blk_validate_atomic_write_limits(struct queue_limits *lim)
+{
+ unsigned int boundary_sectors;
+
+ if (!lim->atomic_write_hw_max)
+ goto unsupported;
+
+ boundary_sectors = lim->atomic_write_hw_boundary >> SECTOR_SHIFT;
+
+ if (boundary_sectors) {
+ /*
+ * A feature of boundary support is that it disallows bios to
+ * be merged which would result in a merged request which
+ * crosses either a chunk sector or atomic write HW boundary,
+ * even though chunk sectors may be just set for performance.
+ * For simplicity, disallow atomic writes for a chunk sector
+ * which is non-zero and smaller than atomic write HW boundary.
+ * Furthermore, chunk sectors must be a multiple of atomic
+ * write HW boundary. Otherwise boundary support becomes
+ * complicated.
+ * Devices which do not conform to these rules can be dealt
+ * with if and when they show up.
+ */
+ if (WARN_ON_ONCE(lim->chunk_sectors % boundary_sectors))
+ goto unsupported;
+
+ /*
+ * The boundary size just needs to be a multiple of unit_max
+ * (and not necessarily a power-of-2), so this following check
+ * could be relaxed in future.
+ * Furthermore, if needed, unit_max could even be reduced so
+ * that it is compliant with a !power-of-2 boundary.
+ */
+ if (!is_power_of_2(boundary_sectors))
+ goto unsupported;
+ }
+
+ blk_atomic_writes_update_limits(lim);
+ return;
+
+unsupported:
+ lim->atomic_write_max_sectors = 0;
+ lim->atomic_write_boundary_sectors = 0;
+ lim->atomic_write_unit_min = 0;
+ lim->atomic_write_unit_max = 0;
+}
+
/*
* Check that the limits in lim are valid, initialize defaults for unset
* values, and cap values based on others where needed.
@@ -104,6 +226,8 @@ static int blk_validate_zoned_limits(struct queue_limits *lim)
static int blk_validate_limits(struct queue_limits *lim)
{
unsigned int max_hw_sectors;
+ unsigned int logical_block_sectors;
+ int err;
/*
* Unless otherwise specified, default to 512 byte logical blocks and a
@@ -111,6 +235,10 @@ static int blk_validate_limits(struct queue_limits *lim)
*/
if (!lim->logical_block_size)
lim->logical_block_size = SECTOR_SIZE;
+ else if (blk_validate_block_size(lim->logical_block_size)) {
+ pr_warn("Invalid logical block size (%d)\n", lim->logical_block_size);
+ return -EINVAL;
+ }
if (lim->physical_block_size < lim->logical_block_size)
lim->physical_block_size = lim->logical_block_size;
@@ -134,8 +262,11 @@ static int blk_validate_limits(struct queue_limits *lim)
lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
if (WARN_ON_ONCE(lim->max_hw_sectors < PAGE_SECTORS))
return -EINVAL;
+ logical_block_sectors = lim->logical_block_size >> SECTOR_SHIFT;
+ if (WARN_ON_ONCE(logical_block_sectors > lim->max_hw_sectors))
+ return -EINVAL;
lim->max_hw_sectors = round_down(lim->max_hw_sectors,
- lim->logical_block_size >> SECTOR_SHIFT);
+ logical_block_sectors);
/*
* The actual max_sectors value is a complex beast and also takes the
@@ -149,11 +280,17 @@ static int blk_validate_limits(struct queue_limits *lim)
if (lim->max_user_sectors < PAGE_SIZE / SECTOR_SIZE)
return -EINVAL;
lim->max_sectors = min(max_hw_sectors, lim->max_user_sectors);
+ } else if (lim->io_opt > (BLK_DEF_MAX_SECTORS_CAP << SECTOR_SHIFT)) {
+ lim->max_sectors =
+ min(max_hw_sectors, lim->io_opt >> SECTOR_SHIFT);
+ } else if (lim->io_min > (BLK_DEF_MAX_SECTORS_CAP << SECTOR_SHIFT)) {
+ lim->max_sectors =
+ min(max_hw_sectors, lim->io_min >> SECTOR_SHIFT);
} else {
lim->max_sectors = min(max_hw_sectors, BLK_DEF_MAX_SECTORS_CAP);
}
lim->max_sectors = round_down(lim->max_sectors,
- lim->logical_block_size >> SECTOR_SHIFT);
+ logical_block_sectors);
/*
* Random default for the maximum number of segments. Driver should not
@@ -216,9 +353,17 @@ static int blk_validate_limits(struct queue_limits *lim)
if (lim->alignment_offset) {
lim->alignment_offset &= (lim->physical_block_size - 1);
- lim->misaligned = 0;
+ lim->flags &= ~BLK_FLAG_MISALIGNED;
}
+ if (!(lim->features & BLK_FEAT_WRITE_CACHE))
+ lim->features &= ~BLK_FEAT_FUA;
+
+ blk_validate_atomic_write_limits(lim);
+
+ err = blk_validate_integrity_limits(lim);
+ if (err)
+ return err;
return blk_validate_zoned_limits(lim);
}
@@ -250,15 +395,25 @@ int blk_set_default_limits(struct queue_limits *lim)
*/
int queue_limits_commit_update(struct request_queue *q,
struct queue_limits *lim)
- __releases(q->limits_lock)
{
- int error = blk_validate_limits(lim);
+ int error;
- if (!error) {
- q->limits = *lim;
- if (q->disk)
- blk_apply_bdi_limits(q->disk->bdi, lim);
+ error = blk_validate_limits(lim);
+ if (error)
+ goto out_unlock;
+
+#ifdef CONFIG_BLK_INLINE_ENCRYPTION
+ if (q->crypto_profile && lim->integrity.tag_size) {
+ pr_warn("blk-integrity: Integrity and hardware inline encryption are not supported together.\n");
+ error = -EINVAL;
+ goto out_unlock;
}
+#endif
+
+ q->limits = *lim;
+ if (q->disk)
+ blk_apply_bdi_limits(q->disk->bdi, lim);
+out_unlock:
mutex_unlock(&q->limits_lock);
return error;
}
@@ -283,204 +438,6 @@ int queue_limits_set(struct request_queue *q, struct queue_limits *lim)
EXPORT_SYMBOL_GPL(queue_limits_set);
/**
- * blk_queue_chunk_sectors - set size of the chunk for this queue
- * @q: the request queue for the device
- * @chunk_sectors: chunk sectors in the usual 512b unit
- *
- * Description:
- * If a driver doesn't want IOs to cross a given chunk size, it can set
- * this limit and prevent merging across chunks. Note that the block layer
- * must accept a page worth of data at any offset. So if the crossing of
- * chunks is a hard limitation in the driver, it must still be prepared
- * to split single page bios.
- **/
-void blk_queue_chunk_sectors(struct request_queue *q, unsigned int chunk_sectors)
-{
- q->limits.chunk_sectors = chunk_sectors;
-}
-EXPORT_SYMBOL(blk_queue_chunk_sectors);
-
-/**
- * blk_queue_max_discard_sectors - set max sectors for a single discard
- * @q: the request queue for the device
- * @max_discard_sectors: maximum number of sectors to discard
- **/
-void blk_queue_max_discard_sectors(struct request_queue *q,
- unsigned int max_discard_sectors)
-{
- struct queue_limits *lim = &q->limits;
-
- lim->max_hw_discard_sectors = max_discard_sectors;
- lim->max_discard_sectors =
- min(max_discard_sectors, lim->max_user_discard_sectors);
-}
-EXPORT_SYMBOL(blk_queue_max_discard_sectors);
-
-/**
- * blk_queue_max_secure_erase_sectors - set max sectors for a secure erase
- * @q: the request queue for the device
- * @max_sectors: maximum number of sectors to secure_erase
- **/
-void blk_queue_max_secure_erase_sectors(struct request_queue *q,
- unsigned int max_sectors)
-{
- q->limits.max_secure_erase_sectors = max_sectors;
-}
-EXPORT_SYMBOL(blk_queue_max_secure_erase_sectors);
-
-/**
- * blk_queue_max_write_zeroes_sectors - set max sectors for a single
- * write zeroes
- * @q: the request queue for the device
- * @max_write_zeroes_sectors: maximum number of sectors to write per command
- **/
-void blk_queue_max_write_zeroes_sectors(struct request_queue *q,
- unsigned int max_write_zeroes_sectors)
-{
- q->limits.max_write_zeroes_sectors = max_write_zeroes_sectors;
-}
-EXPORT_SYMBOL(blk_queue_max_write_zeroes_sectors);
-
-/**
- * blk_queue_max_zone_append_sectors - set max sectors for a single zone append
- * @q: the request queue for the device
- * @max_zone_append_sectors: maximum number of sectors to write per command
- *
- * Sets the maximum number of sectors allowed for zone append commands. If
- * Specifying 0 for @max_zone_append_sectors indicates that the queue does
- * not natively support zone append operations and that the block layer must
- * emulate these operations using regular writes.
- **/
-void blk_queue_max_zone_append_sectors(struct request_queue *q,
- unsigned int max_zone_append_sectors)
-{
- unsigned int max_sectors = 0;
-
- if (WARN_ON(!blk_queue_is_zoned(q)))
- return;
-
- if (max_zone_append_sectors) {
- max_sectors = min(q->limits.max_hw_sectors,
- max_zone_append_sectors);
- max_sectors = min(q->limits.chunk_sectors, max_sectors);
-
- /*
- * Signal eventual driver bugs resulting in the max_zone_append
- * sectors limit being 0 due to the chunk_sectors limit (zone
- * size) not set or the max_hw_sectors limit not set.
- */
- WARN_ON_ONCE(!max_sectors);
- }
-
- q->limits.max_zone_append_sectors = max_sectors;
-}
-EXPORT_SYMBOL_GPL(blk_queue_max_zone_append_sectors);
-
-/**
- * blk_queue_logical_block_size - set logical block size for the queue
- * @q: the request queue for the device
- * @size: the logical block size, in bytes
- *
- * Description:
- * This should be set to the lowest possible block size that the
- * storage device can address. The default of 512 covers most
- * hardware.
- **/
-void blk_queue_logical_block_size(struct request_queue *q, unsigned int size)
-{
- struct queue_limits *limits = &q->limits;
-
- limits->logical_block_size = size;
-
- if (limits->discard_granularity < limits->logical_block_size)
- limits->discard_granularity = limits->logical_block_size;
-
- if (limits->physical_block_size < size)
- limits->physical_block_size = size;
-
- if (limits->io_min < limits->physical_block_size)
- limits->io_min = limits->physical_block_size;
-
- limits->max_hw_sectors =
- round_down(limits->max_hw_sectors, size >> SECTOR_SHIFT);
- limits->max_sectors =
- round_down(limits->max_sectors, size >> SECTOR_SHIFT);
-}
-EXPORT_SYMBOL(blk_queue_logical_block_size);
-
-/**
- * blk_queue_physical_block_size - set physical block size for the queue
- * @q: the request queue for the device
- * @size: the physical block size, in bytes
- *
- * Description:
- * This should be set to the lowest possible sector size that the
- * hardware can operate on without reverting to read-modify-write
- * operations.
- */
-void blk_queue_physical_block_size(struct request_queue *q, unsigned int size)
-{
- q->limits.physical_block_size = size;
-
- if (q->limits.physical_block_size < q->limits.logical_block_size)
- q->limits.physical_block_size = q->limits.logical_block_size;
-
- if (q->limits.discard_granularity < q->limits.physical_block_size)
- q->limits.discard_granularity = q->limits.physical_block_size;
-
- if (q->limits.io_min < q->limits.physical_block_size)
- q->limits.io_min = q->limits.physical_block_size;
-}
-EXPORT_SYMBOL(blk_queue_physical_block_size);
-
-/**
- * blk_queue_zone_write_granularity - set zone write granularity for the queue
- * @q: the request queue for the zoned device
- * @size: the zone write granularity size, in bytes
- *
- * Description:
- * This should be set to the lowest possible size allowing to write in
- * sequential zones of a zoned block device.
- */
-void blk_queue_zone_write_granularity(struct request_queue *q,
- unsigned int size)
-{
- if (WARN_ON_ONCE(!blk_queue_is_zoned(q)))
- return;
-
- q->limits.zone_write_granularity = size;
-
- if (q->limits.zone_write_granularity < q->limits.logical_block_size)
- q->limits.zone_write_granularity = q->limits.logical_block_size;
-}
-EXPORT_SYMBOL_GPL(blk_queue_zone_write_granularity);
-
-/**
- * blk_queue_alignment_offset - set physical block alignment offset
- * @q: the request queue for the device
- * @offset: alignment offset in bytes
- *
- * Description:
- * Some devices are naturally misaligned to compensate for things like
- * the legacy DOS partition table 63-sector offset. Low-level drivers
- * should call this function for devices whose first sector is not
- * naturally aligned.
- */
-void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset)
-{
- q->limits.alignment_offset =
- offset & (q->limits.physical_block_size - 1);
- q->limits.misaligned = 0;
-}
-EXPORT_SYMBOL(blk_queue_alignment_offset);
-
-void disk_update_readahead(struct gendisk *disk)
-{
- blk_apply_bdi_limits(disk->bdi, &disk->queue->limits);
-}
-EXPORT_SYMBOL_GPL(disk_update_readahead);
-
-/**
* blk_limits_io_min - set minimum request size for a device
* @limits: the queue limits
* @min: smallest I/O size in bytes
@@ -504,26 +461,6 @@ void blk_limits_io_min(struct queue_limits *limits, unsigned int min)
EXPORT_SYMBOL(blk_limits_io_min);
/**
- * blk_queue_io_min - set minimum request size for the queue
- * @q: the request queue for the device
- * @min: smallest I/O size in bytes
- *
- * Description:
- * Storage devices may report a granularity or preferred minimum I/O
- * size which is the smallest request the device can perform without
- * incurring a performance penalty. For disk drives this is often the
- * physical block size. For RAID arrays it is often the stripe chunk
- * size. A properly aligned multiple of minimum_io_size is the
- * preferred request size for workloads where a high number of I/O
- * operations is desired.
- */
-void blk_queue_io_min(struct request_queue *q, unsigned int min)
-{
- blk_limits_io_min(&q->limits, min);
-}
-EXPORT_SYMBOL(blk_queue_io_min);
-
-/**
* blk_limits_io_opt - set optimal request size for a device
* @limits: the queue limits
* @opt: smallest I/O size in bytes
@@ -610,14 +547,30 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
{
unsigned int top, bottom, alignment, ret = 0;
+ t->features |= (b->features & BLK_FEAT_INHERIT_MASK);
+
+ /*
+ * BLK_FEAT_NOWAIT and BLK_FEAT_POLL need to be supported both by the
+ * stacking driver and all underlying devices. The stacking driver sets
+ * the flags before stacking the limits, and this will clear the flags
+ * if any of the underlying devices does not support it.
+ */
+ if (!(b->features & BLK_FEAT_NOWAIT))
+ t->features &= ~BLK_FEAT_NOWAIT;
+ if (!(b->features & BLK_FEAT_POLL))
+ t->features &= ~BLK_FEAT_POLL;
+
+ t->flags |= (b->flags & BLK_FLAG_MISALIGNED);
+
t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
+ t->max_user_sectors = min_not_zero(t->max_user_sectors,
+ b->max_user_sectors);
t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors);
t->max_write_zeroes_sectors = min(t->max_write_zeroes_sectors,
b->max_write_zeroes_sectors);
t->max_zone_append_sectors = min(queue_limits_max_zone_append_sectors(t),
queue_limits_max_zone_append_sectors(b));
- t->bounce = max(t->bounce, b->bounce);
t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
b->seg_boundary_mask);
@@ -633,8 +586,6 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
t->max_segment_size = min_not_zero(t->max_segment_size,
b->max_segment_size);
- t->misaligned |= b->misaligned;
-
alignment = queue_limit_alignment_offset(b, start);
/* Bottom device has different alignment. Check that it is
@@ -648,7 +599,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
/* Verify that top and bottom intervals line up */
if (max(top, bottom) % min(top, bottom)) {
- t->misaligned = 1;
+ t->flags |= BLK_FLAG_MISALIGNED;
ret = -1;
}
}
@@ -670,42 +621,38 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
/* Physical block size a multiple of the logical block size? */
if (t->physical_block_size & (t->logical_block_size - 1)) {
t->physical_block_size = t->logical_block_size;
- t->misaligned = 1;
+ t->flags |= BLK_FLAG_MISALIGNED;
ret = -1;
}
/* Minimum I/O a multiple of the physical block size? */
if (t->io_min & (t->physical_block_size - 1)) {
t->io_min = t->physical_block_size;
- t->misaligned = 1;
+ t->flags |= BLK_FLAG_MISALIGNED;
ret = -1;
}
/* Optimal I/O a multiple of the physical block size? */
if (t->io_opt & (t->physical_block_size - 1)) {
t->io_opt = 0;
- t->misaligned = 1;
+ t->flags |= BLK_FLAG_MISALIGNED;
ret = -1;
}
/* chunk_sectors a multiple of the physical block size? */
if ((t->chunk_sectors << 9) & (t->physical_block_size - 1)) {
t->chunk_sectors = 0;
- t->misaligned = 1;
+ t->flags |= BLK_FLAG_MISALIGNED;
ret = -1;
}
- t->raid_partial_stripes_expensive =
- max(t->raid_partial_stripes_expensive,
- b->raid_partial_stripes_expensive);
-
/* Find lowest common alignment_offset */
t->alignment_offset = lcm_not_zero(t->alignment_offset, alignment)
% max(t->physical_block_size, t->io_min);
/* Verify that new alignment_offset is on a logical block boundary */
if (t->alignment_offset & (t->logical_block_size - 1)) {
- t->misaligned = 1;
+ t->flags |= BLK_FLAG_MISALIGNED;
ret = -1;
}
@@ -717,16 +664,6 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
if (b->discard_granularity) {
alignment = queue_limit_discard_alignment(b, start);
- if (t->discard_granularity != 0 &&
- t->discard_alignment != alignment) {
- top = t->discard_granularity + t->discard_alignment;
- bottom = b->discard_granularity + alignment;
-
- /* Verify that top and bottom intervals line up */
- if ((max(top, bottom) % min(top, bottom)) != 0)
- t->discard_misaligned = 1;
- }
-
t->max_discard_sectors = min_not_zero(t->max_discard_sectors,
b->max_discard_sectors);
t->max_hw_discard_sectors = min_not_zero(t->max_hw_discard_sectors,
@@ -740,8 +677,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
b->max_secure_erase_sectors);
t->zone_write_granularity = max(t->zone_write_granularity,
b->zone_write_granularity);
- t->zoned = max(t->zoned, b->zoned);
- if (!t->zoned) {
+ if (!(t->features & BLK_FEAT_ZONED)) {
t->zone_write_granularity = 0;
t->max_zone_append_sectors = 0;
}
@@ -775,21 +711,65 @@ void queue_limits_stack_bdev(struct queue_limits *t, struct block_device *bdev,
EXPORT_SYMBOL_GPL(queue_limits_stack_bdev);
/**
- * blk_queue_update_dma_pad - update pad mask
- * @q: the request queue for the device
- * @mask: pad mask
+ * queue_limits_stack_integrity - stack integrity profile
+ * @t: target queue limits
+ * @b: base queue limits
*
- * Update dma pad mask.
+ * Check if the integrity profile in the @b can be stacked into the
+ * target @t. Stacking is possible if either:
*
- * Appending pad buffer to a request modifies the last entry of a
- * scatter list such that it includes the pad buffer.
- **/
-void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask)
-{
- if (mask > q->dma_pad_mask)
- q->dma_pad_mask = mask;
+ * a) does not have any integrity information stacked into it yet
+ * b) the integrity profile in @b is identical to the one in @t
+ *
+ * If @b can be stacked into @t, return %true. Else return %false and clear the
+ * integrity information in @t.
+ */
+bool queue_limits_stack_integrity(struct queue_limits *t,
+ struct queue_limits *b)
+{
+ struct blk_integrity *ti = &t->integrity;
+ struct blk_integrity *bi = &b->integrity;
+
+ if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY))
+ return true;
+
+ if (!ti->tuple_size) {
+ /* inherit the settings from the first underlying device */
+ if (!(ti->flags & BLK_INTEGRITY_STACKED)) {
+ ti->flags = BLK_INTEGRITY_DEVICE_CAPABLE |
+ (bi->flags & BLK_INTEGRITY_REF_TAG);
+ ti->csum_type = bi->csum_type;
+ ti->tuple_size = bi->tuple_size;
+ ti->pi_offset = bi->pi_offset;
+ ti->interval_exp = bi->interval_exp;
+ ti->tag_size = bi->tag_size;
+ goto done;
+ }
+ if (!bi->tuple_size)
+ goto done;
+ }
+
+ if (ti->tuple_size != bi->tuple_size)
+ goto incompatible;
+ if (ti->interval_exp != bi->interval_exp)
+ goto incompatible;
+ if (ti->tag_size != bi->tag_size)
+ goto incompatible;
+ if (ti->csum_type != bi->csum_type)
+ goto incompatible;
+ if ((ti->flags & BLK_INTEGRITY_REF_TAG) !=
+ (bi->flags & BLK_INTEGRITY_REF_TAG))
+ goto incompatible;
+
+done:
+ ti->flags |= BLK_INTEGRITY_STACKED;
+ return true;
+
+incompatible:
+ memset(ti, 0, sizeof(*ti));
+ return false;
}
-EXPORT_SYMBOL(blk_queue_update_dma_pad);
+EXPORT_SYMBOL_GPL(queue_limits_stack_integrity);
/**
* blk_set_queue_depth - tell the block layer about the device queue depth
@@ -804,54 +784,11 @@ void blk_set_queue_depth(struct request_queue *q, unsigned int depth)
}
EXPORT_SYMBOL(blk_set_queue_depth);
-/**
- * blk_queue_write_cache - configure queue's write cache
- * @q: the request queue for the device
- * @wc: write back cache on or off
- * @fua: device supports FUA writes, if true
- *
- * Tell the block layer about the write cache of @q.
- */
-void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua)
-{
- if (wc) {
- blk_queue_flag_set(QUEUE_FLAG_HW_WC, q);
- blk_queue_flag_set(QUEUE_FLAG_WC, q);
- } else {
- blk_queue_flag_clear(QUEUE_FLAG_HW_WC, q);
- blk_queue_flag_clear(QUEUE_FLAG_WC, q);
- }
- if (fua)
- blk_queue_flag_set(QUEUE_FLAG_FUA, q);
- else
- blk_queue_flag_clear(QUEUE_FLAG_FUA, q);
-}
-EXPORT_SYMBOL_GPL(blk_queue_write_cache);
-
-/**
- * disk_set_zoned - inidicate a zoned device
- * @disk: gendisk to configure
- */
-void disk_set_zoned(struct gendisk *disk)
-{
- struct request_queue *q = disk->queue;
-
- WARN_ON_ONCE(!IS_ENABLED(CONFIG_BLK_DEV_ZONED));
-
- /*
- * Set the zone write granularity to the device logical block
- * size by default. The driver can change this value if needed.
- */
- q->limits.zoned = true;
- blk_queue_zone_write_granularity(q, queue_logical_block_size(q));
-}
-EXPORT_SYMBOL_GPL(disk_set_zoned);
-
int bdev_alignment_offset(struct block_device *bdev)
{
struct request_queue *q = bdev_get_queue(bdev);
- if (q->limits.misaligned)
+ if (q->limits.flags & BLK_FLAG_MISALIGNED)
return -1;
if (bdev_is_partition(bdev))
return queue_limit_alignment_offset(&q->limits,
diff --git a/block/blk-stat.h b/block/blk-stat.h
index 17e1eb4ec7e2..5d7f18ba436d 100644
--- a/block/blk-stat.h
+++ b/block/blk-stat.h
@@ -64,7 +64,6 @@ struct blk_stat_callback {
struct blk_queue_stats *blk_alloc_queue_stats(void);
void blk_free_queue_stats(struct blk_queue_stats *);
-bool blk_stats_alloc_enable(struct request_queue *q);
void blk_stat_add(struct request *rq, u64 now);
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index f0f9314ab65c..60116d13cb80 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -22,8 +22,8 @@
struct queue_sysfs_entry {
struct attribute attr;
- ssize_t (*show)(struct request_queue *, char *);
- ssize_t (*store)(struct request_queue *, const char *, size_t);
+ ssize_t (*show)(struct gendisk *disk, char *page);
+ ssize_t (*store)(struct gendisk *disk, const char *page, size_t count);
};
static ssize_t
@@ -47,18 +47,18 @@ queue_var_store(unsigned long *var, const char *page, size_t count)
return count;
}
-static ssize_t queue_requests_show(struct request_queue *q, char *page)
+static ssize_t queue_requests_show(struct gendisk *disk, char *page)
{
- return queue_var_show(q->nr_requests, page);
+ return queue_var_show(disk->queue->nr_requests, page);
}
static ssize_t
-queue_requests_store(struct request_queue *q, const char *page, size_t count)
+queue_requests_store(struct gendisk *disk, const char *page, size_t count)
{
unsigned long nr;
int ret, err;
- if (!queue_is_mq(q))
+ if (!queue_is_mq(disk->queue))
return -EINVAL;
ret = queue_var_store(&nr, page, count);
@@ -68,111 +68,91 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)
if (nr < BLKDEV_MIN_RQ)
nr = BLKDEV_MIN_RQ;
- err = blk_mq_update_nr_requests(q, nr);
+ err = blk_mq_update_nr_requests(disk->queue, nr);
if (err)
return err;
return ret;
}
-static ssize_t queue_ra_show(struct request_queue *q, char *page)
+static ssize_t queue_ra_show(struct gendisk *disk, char *page)
{
- unsigned long ra_kb;
-
- if (!q->disk)
- return -EINVAL;
- ra_kb = q->disk->bdi->ra_pages << (PAGE_SHIFT - 10);
- return queue_var_show(ra_kb, page);
+ return queue_var_show(disk->bdi->ra_pages << (PAGE_SHIFT - 10), page);
}
static ssize_t
-queue_ra_store(struct request_queue *q, const char *page, size_t count)
+queue_ra_store(struct gendisk *disk, const char *page, size_t count)
{
unsigned long ra_kb;
ssize_t ret;
- if (!q->disk)
- return -EINVAL;
ret = queue_var_store(&ra_kb, page, count);
if (ret < 0)
return ret;
- q->disk->bdi->ra_pages = ra_kb >> (PAGE_SHIFT - 10);
+ disk->bdi->ra_pages = ra_kb >> (PAGE_SHIFT - 10);
return ret;
}
-static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
-{
- int max_sectors_kb = queue_max_sectors(q) >> 1;
-
- return queue_var_show(max_sectors_kb, page);
-}
-
-static ssize_t queue_max_segments_show(struct request_queue *q, char *page)
-{
- return queue_var_show(queue_max_segments(q), page);
-}
-
-static ssize_t queue_max_discard_segments_show(struct request_queue *q,
- char *page)
-{
- return queue_var_show(queue_max_discard_segments(q), page);
-}
-
-static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page)
-{
- return queue_var_show(q->limits.max_integrity_segments, page);
-}
-
-static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page)
-{
- return queue_var_show(queue_max_segment_size(q), page);
-}
-
-static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page)
-{
- return queue_var_show(queue_logical_block_size(q), page);
-}
-
-static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page)
-{
- return queue_var_show(queue_physical_block_size(q), page);
-}
-
-static ssize_t queue_chunk_sectors_show(struct request_queue *q, char *page)
-{
- return queue_var_show(q->limits.chunk_sectors, page);
-}
-
-static ssize_t queue_io_min_show(struct request_queue *q, char *page)
-{
- return queue_var_show(queue_io_min(q), page);
+#define QUEUE_SYSFS_LIMIT_SHOW(_field) \
+static ssize_t queue_##_field##_show(struct gendisk *disk, char *page) \
+{ \
+ return queue_var_show(disk->queue->limits._field, page); \
+}
+
+QUEUE_SYSFS_LIMIT_SHOW(max_segments)
+QUEUE_SYSFS_LIMIT_SHOW(max_discard_segments)
+QUEUE_SYSFS_LIMIT_SHOW(max_integrity_segments)
+QUEUE_SYSFS_LIMIT_SHOW(max_segment_size)
+QUEUE_SYSFS_LIMIT_SHOW(logical_block_size)
+QUEUE_SYSFS_LIMIT_SHOW(physical_block_size)
+QUEUE_SYSFS_LIMIT_SHOW(chunk_sectors)
+QUEUE_SYSFS_LIMIT_SHOW(io_min)
+QUEUE_SYSFS_LIMIT_SHOW(io_opt)
+QUEUE_SYSFS_LIMIT_SHOW(discard_granularity)
+QUEUE_SYSFS_LIMIT_SHOW(zone_write_granularity)
+QUEUE_SYSFS_LIMIT_SHOW(virt_boundary_mask)
+QUEUE_SYSFS_LIMIT_SHOW(dma_alignment)
+QUEUE_SYSFS_LIMIT_SHOW(max_open_zones)
+QUEUE_SYSFS_LIMIT_SHOW(max_active_zones)
+QUEUE_SYSFS_LIMIT_SHOW(atomic_write_unit_min)
+QUEUE_SYSFS_LIMIT_SHOW(atomic_write_unit_max)
+
+#define QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(_field) \
+static ssize_t queue_##_field##_show(struct gendisk *disk, char *page) \
+{ \
+ return sprintf(page, "%llu\n", \
+ (unsigned long long)disk->queue->limits._field << \
+ SECTOR_SHIFT); \
}
-static ssize_t queue_io_opt_show(struct request_queue *q, char *page)
-{
- return queue_var_show(queue_io_opt(q), page);
-}
+QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(max_discard_sectors)
+QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(max_hw_discard_sectors)
+QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(max_write_zeroes_sectors)
+QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(atomic_write_max_sectors)
+QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(atomic_write_boundary_sectors)
-static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page)
-{
- return queue_var_show(q->limits.discard_granularity, page);
+#define QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_KB(_field) \
+static ssize_t queue_##_field##_show(struct gendisk *disk, char *page) \
+{ \
+ return queue_var_show(disk->queue->limits._field >> 1, page); \
}
-static ssize_t queue_discard_max_hw_show(struct request_queue *q, char *page)
-{
+QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_KB(max_sectors)
+QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_KB(max_hw_sectors)
- return sprintf(page, "%llu\n",
- (unsigned long long)q->limits.max_hw_discard_sectors << 9);
+#define QUEUE_SYSFS_SHOW_CONST(_name, _val) \
+static ssize_t queue_##_name##_show(struct gendisk *disk, char *page) \
+{ \
+ return sprintf(page, "%d\n", _val); \
}
-static ssize_t queue_discard_max_show(struct request_queue *q, char *page)
-{
- return sprintf(page, "%llu\n",
- (unsigned long long)q->limits.max_discard_sectors << 9);
-}
+/* deprecated fields */
+QUEUE_SYSFS_SHOW_CONST(discard_zeroes_data, 0)
+QUEUE_SYSFS_SHOW_CONST(write_same_max, 0)
+QUEUE_SYSFS_SHOW_CONST(poll_delay, -1)
-static ssize_t queue_discard_max_store(struct request_queue *q,
- const char *page, size_t count)
+static ssize_t queue_max_discard_sectors_store(struct gendisk *disk,
+ const char *page, size_t count)
{
unsigned long max_discard_bytes;
struct queue_limits lim;
@@ -183,54 +163,34 @@ static ssize_t queue_discard_max_store(struct request_queue *q,
if (ret < 0)
return ret;
- if (max_discard_bytes & (q->limits.discard_granularity - 1))
+ if (max_discard_bytes & (disk->queue->limits.discard_granularity - 1))
return -EINVAL;
if ((max_discard_bytes >> SECTOR_SHIFT) > UINT_MAX)
return -EINVAL;
- blk_mq_freeze_queue(q);
- lim = queue_limits_start_update(q);
+ lim = queue_limits_start_update(disk->queue);
lim.max_user_discard_sectors = max_discard_bytes >> SECTOR_SHIFT;
- err = queue_limits_commit_update(q, &lim);
- blk_mq_unfreeze_queue(q);
-
+ err = queue_limits_commit_update(disk->queue, &lim);
if (err)
return err;
return ret;
}
-static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page)
-{
- return queue_var_show(0, page);
-}
-
-static ssize_t queue_write_same_max_show(struct request_queue *q, char *page)
-{
- return queue_var_show(0, page);
-}
-
-static ssize_t queue_write_zeroes_max_show(struct request_queue *q, char *page)
+/*
+ * For zone append queue_max_zone_append_sectors does not just return the
+ * underlying queue limits, but actually contains a calculation. Because of
+ * that we can't simply use QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES here.
+ */
+static ssize_t queue_zone_append_max_show(struct gendisk *disk, char *page)
{
return sprintf(page, "%llu\n",
- (unsigned long long)q->limits.max_write_zeroes_sectors << 9);
-}
-
-static ssize_t queue_zone_write_granularity_show(struct request_queue *q,
- char *page)
-{
- return queue_var_show(queue_zone_write_granularity(q), page);
-}
-
-static ssize_t queue_zone_append_max_show(struct request_queue *q, char *page)
-{
- unsigned long long max_sectors = queue_max_zone_append_sectors(q);
-
- return sprintf(page, "%llu\n", max_sectors << SECTOR_SHIFT);
+ (u64)queue_max_zone_append_sectors(disk->queue) <<
+ SECTOR_SHIFT);
}
static ssize_t
-queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
+queue_max_sectors_store(struct gendisk *disk, const char *page, size_t count)
{
unsigned long max_sectors_kb;
struct queue_limits lim;
@@ -241,94 +201,83 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
if (ret < 0)
return ret;
- blk_mq_freeze_queue(q);
- lim = queue_limits_start_update(q);
+ lim = queue_limits_start_update(disk->queue);
lim.max_user_sectors = max_sectors_kb << 1;
- err = queue_limits_commit_update(q, &lim);
- blk_mq_unfreeze_queue(q);
+ err = queue_limits_commit_update(disk->queue, &lim);
if (err)
return err;
return ret;
}
-static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
+static ssize_t queue_feature_store(struct gendisk *disk, const char *page,
+ size_t count, blk_features_t feature)
{
- int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1;
-
- return queue_var_show(max_hw_sectors_kb, page);
-}
+ struct queue_limits lim;
+ unsigned long val;
+ ssize_t ret;
-static ssize_t queue_virt_boundary_mask_show(struct request_queue *q, char *page)
-{
- return queue_var_show(q->limits.virt_boundary_mask, page);
-}
+ ret = queue_var_store(&val, page, count);
+ if (ret < 0)
+ return ret;
-static ssize_t queue_dma_alignment_show(struct request_queue *q, char *page)
-{
- return queue_var_show(queue_dma_alignment(q), page);
+ lim = queue_limits_start_update(disk->queue);
+ if (val)
+ lim.features |= feature;
+ else
+ lim.features &= ~feature;
+ ret = queue_limits_commit_update(disk->queue, &lim);
+ if (ret)
+ return ret;
+ return count;
}
-#define QUEUE_SYSFS_BIT_FNS(name, flag, neg) \
-static ssize_t \
-queue_##name##_show(struct request_queue *q, char *page) \
+#define QUEUE_SYSFS_FEATURE(_name, _feature) \
+static ssize_t queue_##_name##_show(struct gendisk *disk, char *page) \
{ \
- int bit; \
- bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags); \
- return queue_var_show(neg ? !bit : bit, page); \
+ return sprintf(page, "%u\n", \
+ !!(disk->queue->limits.features & _feature)); \
} \
-static ssize_t \
-queue_##name##_store(struct request_queue *q, const char *page, size_t count) \
+static ssize_t queue_##_name##_store(struct gendisk *disk, \
+ const char *page, size_t count) \
{ \
- unsigned long val; \
- ssize_t ret; \
- ret = queue_var_store(&val, page, count); \
- if (ret < 0) \
- return ret; \
- if (neg) \
- val = !val; \
- \
- if (val) \
- blk_queue_flag_set(QUEUE_FLAG_##flag, q); \
- else \
- blk_queue_flag_clear(QUEUE_FLAG_##flag, q); \
- return ret; \
-}
-
-QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1);
-QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0);
-QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0);
-QUEUE_SYSFS_BIT_FNS(stable_writes, STABLE_WRITES, 0);
-#undef QUEUE_SYSFS_BIT_FNS
-
-static ssize_t queue_zoned_show(struct request_queue *q, char *page)
-{
- if (blk_queue_is_zoned(q))
- return sprintf(page, "host-managed\n");
- return sprintf(page, "none\n");
+ return queue_feature_store(disk, page, count, _feature); \
}
-static ssize_t queue_nr_zones_show(struct request_queue *q, char *page)
-{
- return queue_var_show(disk_nr_zones(q->disk), page);
+QUEUE_SYSFS_FEATURE(rotational, BLK_FEAT_ROTATIONAL)
+QUEUE_SYSFS_FEATURE(add_random, BLK_FEAT_ADD_RANDOM)
+QUEUE_SYSFS_FEATURE(iostats, BLK_FEAT_IO_STAT)
+QUEUE_SYSFS_FEATURE(stable_writes, BLK_FEAT_STABLE_WRITES);
+
+#define QUEUE_SYSFS_FEATURE_SHOW(_name, _feature) \
+static ssize_t queue_##_name##_show(struct gendisk *disk, char *page) \
+{ \
+ return sprintf(page, "%u\n", \
+ !!(disk->queue->limits.features & _feature)); \
}
-static ssize_t queue_max_open_zones_show(struct request_queue *q, char *page)
+QUEUE_SYSFS_FEATURE_SHOW(poll, BLK_FEAT_POLL);
+QUEUE_SYSFS_FEATURE_SHOW(fua, BLK_FEAT_FUA);
+QUEUE_SYSFS_FEATURE_SHOW(dax, BLK_FEAT_DAX);
+
+static ssize_t queue_zoned_show(struct gendisk *disk, char *page)
{
- return queue_var_show(bdev_max_open_zones(q->disk->part0), page);
+ if (blk_queue_is_zoned(disk->queue))
+ return sprintf(page, "host-managed\n");
+ return sprintf(page, "none\n");
}
-static ssize_t queue_max_active_zones_show(struct request_queue *q, char *page)
+static ssize_t queue_nr_zones_show(struct gendisk *disk, char *page)
{
- return queue_var_show(bdev_max_active_zones(q->disk->part0), page);
+ return queue_var_show(disk_nr_zones(disk), page);
}
-static ssize_t queue_nomerges_show(struct request_queue *q, char *page)
+static ssize_t queue_nomerges_show(struct gendisk *disk, char *page)
{
- return queue_var_show((blk_queue_nomerges(q) << 1) |
- blk_queue_noxmerges(q), page);
+ return queue_var_show((blk_queue_nomerges(disk->queue) << 1) |
+ blk_queue_noxmerges(disk->queue), page);
}
-static ssize_t queue_nomerges_store(struct request_queue *q, const char *page,
+static ssize_t queue_nomerges_store(struct gendisk *disk, const char *page,
size_t count)
{
unsigned long nm;
@@ -337,29 +286,30 @@ static ssize_t queue_nomerges_store(struct request_queue *q, const char *page,
if (ret < 0)
return ret;
- blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, q);
- blk_queue_flag_clear(QUEUE_FLAG_NOXMERGES, q);
+ blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, disk->queue);
+ blk_queue_flag_clear(QUEUE_FLAG_NOXMERGES, disk->queue);
if (nm == 2)
- blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q);
+ blk_queue_flag_set(QUEUE_FLAG_NOMERGES, disk->queue);
else if (nm)
- blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
+ blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, disk->queue);
return ret;
}
-static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page)
+static ssize_t queue_rq_affinity_show(struct gendisk *disk, char *page)
{
- bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags);
- bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags);
+ bool set = test_bit(QUEUE_FLAG_SAME_COMP, &disk->queue->queue_flags);
+ bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &disk->queue->queue_flags);
return queue_var_show(set << force, page);
}
static ssize_t
-queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
+queue_rq_affinity_store(struct gendisk *disk, const char *page, size_t count)
{
ssize_t ret = -EINVAL;
#ifdef CONFIG_SMP
+ struct request_queue *q = disk->queue;
unsigned long val;
ret = queue_var_store(&val, page, count);
@@ -380,38 +330,28 @@ queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
return ret;
}
-static ssize_t queue_poll_delay_show(struct request_queue *q, char *page)
-{
- return sprintf(page, "%d\n", -1);
-}
-
-static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page,
+static ssize_t queue_poll_delay_store(struct gendisk *disk, const char *page,
size_t count)
{
return count;
}
-static ssize_t queue_poll_show(struct request_queue *q, char *page)
-{
- return queue_var_show(test_bit(QUEUE_FLAG_POLL, &q->queue_flags), page);
-}
-
-static ssize_t queue_poll_store(struct request_queue *q, const char *page,
+static ssize_t queue_poll_store(struct gendisk *disk, const char *page,
size_t count)
{
- if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
+ if (!(disk->queue->limits.features & BLK_FEAT_POLL))
return -EINVAL;
pr_info_ratelimited("writes to the poll attribute are ignored.\n");
pr_info_ratelimited("please use driver specific parameters instead.\n");
return count;
}
-static ssize_t queue_io_timeout_show(struct request_queue *q, char *page)
+static ssize_t queue_io_timeout_show(struct gendisk *disk, char *page)
{
- return sprintf(page, "%u\n", jiffies_to_msecs(q->rq_timeout));
+ return sprintf(page, "%u\n", jiffies_to_msecs(disk->queue->rq_timeout));
}
-static ssize_t queue_io_timeout_store(struct request_queue *q, const char *page,
+static ssize_t queue_io_timeout_store(struct gendisk *disk, const char *page,
size_t count)
{
unsigned int val;
@@ -421,46 +361,45 @@ static ssize_t queue_io_timeout_store(struct request_queue *q, const char *page,
if (err || val == 0)
return -EINVAL;
- blk_queue_rq_timeout(q, msecs_to_jiffies(val));
+ blk_queue_rq_timeout(disk->queue, msecs_to_jiffies(val));
return count;
}
-static ssize_t queue_wc_show(struct request_queue *q, char *page)
+static ssize_t queue_wc_show(struct gendisk *disk, char *page)
{
- if (test_bit(QUEUE_FLAG_WC, &q->queue_flags))
+ if (blk_queue_write_cache(disk->queue))
return sprintf(page, "write back\n");
-
return sprintf(page, "write through\n");
}
-static ssize_t queue_wc_store(struct request_queue *q, const char *page,
+static ssize_t queue_wc_store(struct gendisk *disk, const char *page,
size_t count)
{
+ struct queue_limits lim;
+ bool disable;
+ int err;
+
if (!strncmp(page, "write back", 10)) {
- if (!test_bit(QUEUE_FLAG_HW_WC, &q->queue_flags))
- return -EINVAL;
- blk_queue_flag_set(QUEUE_FLAG_WC, q);
+ disable = false;
} else if (!strncmp(page, "write through", 13) ||
- !strncmp(page, "none", 4)) {
- blk_queue_flag_clear(QUEUE_FLAG_WC, q);
+ !strncmp(page, "none", 4)) {
+ disable = true;
} else {
return -EINVAL;
}
+ lim = queue_limits_start_update(disk->queue);
+ if (disable)
+ lim.flags |= BLK_FLAG_WRITE_CACHE_DISABLED;
+ else
+ lim.flags &= ~BLK_FLAG_WRITE_CACHE_DISABLED;
+ err = queue_limits_commit_update(disk->queue, &lim);
+ if (err)
+ return err;
return count;
}
-static ssize_t queue_fua_show(struct request_queue *q, char *page)
-{
- return sprintf(page, "%u\n", test_bit(QUEUE_FLAG_FUA, &q->queue_flags));
-}
-
-static ssize_t queue_dax_show(struct request_queue *q, char *page)
-{
- return queue_var_show(blk_queue_dax(q), page);
-}
-
#define QUEUE_RO_ENTRY(_prefix, _name) \
static struct queue_sysfs_entry _prefix##_entry = { \
.attr = { .name = _name, .mode = 0444 }, \
@@ -491,12 +430,18 @@ QUEUE_RO_ENTRY(queue_io_opt, "optimal_io_size");
QUEUE_RO_ENTRY(queue_max_discard_segments, "max_discard_segments");
QUEUE_RO_ENTRY(queue_discard_granularity, "discard_granularity");
-QUEUE_RO_ENTRY(queue_discard_max_hw, "discard_max_hw_bytes");
-QUEUE_RW_ENTRY(queue_discard_max, "discard_max_bytes");
+QUEUE_RO_ENTRY(queue_max_hw_discard_sectors, "discard_max_hw_bytes");
+QUEUE_RW_ENTRY(queue_max_discard_sectors, "discard_max_bytes");
QUEUE_RO_ENTRY(queue_discard_zeroes_data, "discard_zeroes_data");
+QUEUE_RO_ENTRY(queue_atomic_write_max_sectors, "atomic_write_max_bytes");
+QUEUE_RO_ENTRY(queue_atomic_write_boundary_sectors,
+ "atomic_write_boundary_bytes");
+QUEUE_RO_ENTRY(queue_atomic_write_unit_max, "atomic_write_unit_max_bytes");
+QUEUE_RO_ENTRY(queue_atomic_write_unit_min, "atomic_write_unit_min_bytes");
+
QUEUE_RO_ENTRY(queue_write_same_max, "write_same_max_bytes");
-QUEUE_RO_ENTRY(queue_write_zeroes_max, "write_zeroes_max_bytes");
+QUEUE_RO_ENTRY(queue_max_write_zeroes_sectors, "write_zeroes_max_bytes");
QUEUE_RO_ENTRY(queue_zone_append_max, "zone_append_max_bytes");
QUEUE_RO_ENTRY(queue_zone_write_granularity, "zone_write_granularity");
@@ -522,9 +467,9 @@ static struct queue_sysfs_entry queue_hw_sector_size_entry = {
.show = queue_logical_block_size_show,
};
-QUEUE_RW_ENTRY(queue_nonrot, "rotational");
+QUEUE_RW_ENTRY(queue_rotational, "rotational");
QUEUE_RW_ENTRY(queue_iostats, "iostats");
-QUEUE_RW_ENTRY(queue_random, "add_random");
+QUEUE_RW_ENTRY(queue_add_random, "add_random");
QUEUE_RW_ENTRY(queue_stable_writes, "stable_writes");
#ifdef CONFIG_BLK_WBT
@@ -541,20 +486,22 @@ static ssize_t queue_var_store64(s64 *var, const char *page)
return 0;
}
-static ssize_t queue_wb_lat_show(struct request_queue *q, char *page)
+static ssize_t queue_wb_lat_show(struct gendisk *disk, char *page)
{
- if (!wbt_rq_qos(q))
+ if (!wbt_rq_qos(disk->queue))
return -EINVAL;
- if (wbt_disabled(q))
+ if (wbt_disabled(disk->queue))
return sprintf(page, "0\n");
- return sprintf(page, "%llu\n", div_u64(wbt_get_min_lat(q), 1000));
+ return sprintf(page, "%llu\n",
+ div_u64(wbt_get_min_lat(disk->queue), 1000));
}
-static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page,
+static ssize_t queue_wb_lat_store(struct gendisk *disk, const char *page,
size_t count)
{
+ struct request_queue *q = disk->queue;
struct rq_qos *rqos;
ssize_t ret;
s64 val;
@@ -567,7 +514,7 @@ static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page,
rqos = wbt_rq_qos(q);
if (!rqos) {
- ret = wbt_init(q->disk);
+ ret = wbt_init(disk);
if (ret)
return ret;
}
@@ -585,13 +532,11 @@ static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page,
* ends up either enabling or disabling wbt completely. We can't
* have IO inflight if that happens.
*/
- blk_mq_freeze_queue(q);
blk_mq_quiesce_queue(q);
wbt_set_min_lat(q, val);
blk_mq_unquiesce_queue(q);
- blk_mq_unfreeze_queue(q);
return count;
}
@@ -615,14 +560,18 @@ static struct attribute *queue_attrs[] = {
&queue_io_min_entry.attr,
&queue_io_opt_entry.attr,
&queue_discard_granularity_entry.attr,
- &queue_discard_max_entry.attr,
- &queue_discard_max_hw_entry.attr,
+ &queue_max_discard_sectors_entry.attr,
+ &queue_max_hw_discard_sectors_entry.attr,
&queue_discard_zeroes_data_entry.attr,
+ &queue_atomic_write_max_sectors_entry.attr,
+ &queue_atomic_write_boundary_sectors_entry.attr,
+ &queue_atomic_write_unit_min_entry.attr,
+ &queue_atomic_write_unit_max_entry.attr,
&queue_write_same_max_entry.attr,
- &queue_write_zeroes_max_entry.attr,
+ &queue_max_write_zeroes_sectors_entry.attr,
&queue_zone_append_max_entry.attr,
&queue_zone_write_granularity_entry.attr,
- &queue_nonrot_entry.attr,
+ &queue_rotational_entry.attr,
&queue_zoned_entry.attr,
&queue_nr_zones_entry.attr,
&queue_max_open_zones_entry.attr,
@@ -630,7 +579,7 @@ static struct attribute *queue_attrs[] = {
&queue_nomerges_entry.attr,
&queue_iostats_entry.attr,
&queue_stable_writes_entry.attr,
- &queue_random_entry.attr,
+ &queue_add_random_entry.attr,
&queue_poll_entry.attr,
&queue_wc_entry.attr,
&queue_fua_entry.attr,
@@ -699,14 +648,13 @@ queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
{
struct queue_sysfs_entry *entry = to_queue(attr);
struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
- struct request_queue *q = disk->queue;
ssize_t res;
if (!entry->show)
return -EIO;
- mutex_lock(&q->sysfs_lock);
- res = entry->show(q, page);
- mutex_unlock(&q->sysfs_lock);
+ mutex_lock(&disk->queue->sysfs_lock);
+ res = entry->show(disk, page);
+ mutex_unlock(&disk->queue->sysfs_lock);
return res;
}
@@ -722,9 +670,11 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr,
if (!entry->store)
return -EIO;
+ blk_mq_freeze_queue(q);
mutex_lock(&q->sysfs_lock);
- res = entry->store(q, page, length);
+ res = entry->store(disk, page, length);
mutex_unlock(&q->sysfs_lock);
+ blk_mq_unfreeze_queue(q);
return res;
}
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 0be180f9a789..dc6140fa3de0 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -704,6 +704,9 @@ static unsigned long tg_within_iops_limit(struct throtl_grp *tg, struct bio *bio
/* Calc approx time to dispatch */
jiffy_wait = jiffy_elapsed_rnd - jiffy_elapsed;
+
+ /* make sure at least one io can be dispatched after waiting */
+ jiffy_wait = max(jiffy_wait, HZ / iops_limit + 1);
return jiffy_wait;
}
@@ -1399,32 +1402,32 @@ static u64 tg_prfill_limit(struct seq_file *sf, struct blkg_policy_data *pd,
bps_dft = U64_MAX;
iops_dft = UINT_MAX;
- if (tg->bps_conf[READ] == bps_dft &&
- tg->bps_conf[WRITE] == bps_dft &&
- tg->iops_conf[READ] == iops_dft &&
- tg->iops_conf[WRITE] == iops_dft)
+ if (tg->bps[READ] == bps_dft &&
+ tg->bps[WRITE] == bps_dft &&
+ tg->iops[READ] == iops_dft &&
+ tg->iops[WRITE] == iops_dft)
return 0;
seq_printf(sf, "%s", dname);
- if (tg->bps_conf[READ] == U64_MAX)
+ if (tg->bps[READ] == U64_MAX)
seq_printf(sf, " rbps=max");
else
- seq_printf(sf, " rbps=%llu", tg->bps_conf[READ]);
+ seq_printf(sf, " rbps=%llu", tg->bps[READ]);
- if (tg->bps_conf[WRITE] == U64_MAX)
+ if (tg->bps[WRITE] == U64_MAX)
seq_printf(sf, " wbps=max");
else
- seq_printf(sf, " wbps=%llu", tg->bps_conf[WRITE]);
+ seq_printf(sf, " wbps=%llu", tg->bps[WRITE]);
- if (tg->iops_conf[READ] == UINT_MAX)
+ if (tg->iops[READ] == UINT_MAX)
seq_printf(sf, " riops=max");
else
- seq_printf(sf, " riops=%u", tg->iops_conf[READ]);
+ seq_printf(sf, " riops=%u", tg->iops[READ]);
- if (tg->iops_conf[WRITE] == UINT_MAX)
+ if (tg->iops[WRITE] == UINT_MAX)
seq_printf(sf, " wiops=max");
else
- seq_printf(sf, " wiops=%u", tg->iops_conf[WRITE]);
+ seq_printf(sf, " wiops=%u", tg->iops[WRITE]);
seq_printf(sf, "\n");
return 0;
diff --git a/block/blk-throttle.h b/block/blk-throttle.h
index 393c3d134b96..4d9ef5abdf21 100644
--- a/block/blk-throttle.h
+++ b/block/blk-throttle.h
@@ -95,15 +95,11 @@ struct throtl_grp {
bool has_rules_bps[2];
bool has_rules_iops[2];
- /* internally used bytes per second rate limits */
+ /* bytes per second rate limits */
uint64_t bps[2];
- /* user configured bps limits */
- uint64_t bps_conf[2];
- /* internally used IOPS limits */
+ /* IOPS limits */
unsigned int iops[2];
- /* user configured IOPS limits */
- unsigned int iops_conf[2];
/* Number of bytes dispatched in current slice */
uint64_t bytes_disp[2];
diff --git a/block/blk-wbt.c b/block/blk-wbt.c
index 64472134dd26..6dfc659d22e2 100644
--- a/block/blk-wbt.c
+++ b/block/blk-wbt.c
@@ -37,7 +37,7 @@
enum wbt_flags {
WBT_TRACKED = 1, /* write, tracked for throttling */
WBT_READ = 2, /* read */
- WBT_KSWAPD = 4, /* write, from kswapd */
+ WBT_SWAP = 4, /* write, from swap_writepage() */
WBT_DISCARD = 8, /* discard */
WBT_NR_BITS = 4, /* number of bits */
@@ -45,7 +45,7 @@ enum wbt_flags {
enum {
WBT_RWQ_BG = 0,
- WBT_RWQ_KSWAPD,
+ WBT_RWQ_SWAP,
WBT_RWQ_DISCARD,
WBT_NUM_RWQ,
};
@@ -172,8 +172,8 @@ static bool wb_recent_wait(struct rq_wb *rwb)
static inline struct rq_wait *get_rq_wait(struct rq_wb *rwb,
enum wbt_flags wb_acct)
{
- if (wb_acct & WBT_KSWAPD)
- return &rwb->rq_wait[WBT_RWQ_KSWAPD];
+ if (wb_acct & WBT_SWAP)
+ return &rwb->rq_wait[WBT_RWQ_SWAP];
else if (wb_acct & WBT_DISCARD)
return &rwb->rq_wait[WBT_RWQ_DISCARD];
@@ -206,8 +206,8 @@ static void wbt_rqw_done(struct rq_wb *rwb, struct rq_wait *rqw,
*/
if (wb_acct & WBT_DISCARD)
limit = rwb->wb_background;
- else if (test_bit(QUEUE_FLAG_WC, &rwb->rqos.disk->queue->queue_flags) &&
- !wb_recent_wait(rwb))
+ else if (blk_queue_write_cache(rwb->rqos.disk->queue) &&
+ !wb_recent_wait(rwb))
limit = 0;
else
limit = rwb->wb_normal;
@@ -528,7 +528,7 @@ static bool close_io(struct rq_wb *rwb)
time_before(now, rwb->last_comp + HZ / 10);
}
-#define REQ_HIPRIO (REQ_SYNC | REQ_META | REQ_PRIO)
+#define REQ_HIPRIO (REQ_SYNC | REQ_META | REQ_PRIO | REQ_SWAP)
static inline unsigned int get_limit(struct rq_wb *rwb, blk_opf_t opf)
{
@@ -539,13 +539,13 @@ static inline unsigned int get_limit(struct rq_wb *rwb, blk_opf_t opf)
/*
* At this point we know it's a buffered write. If this is
- * kswapd trying to free memory, or REQ_SYNC is set, then
+ * swap trying to free memory, or REQ_SYNC is set, then
* it's WB_SYNC_ALL writeback, and we'll use the max limit for
* that. If the write is marked as a background write, then use
* the idle limit, or go to normal if we haven't had competing
* IO for a bit.
*/
- if ((opf & REQ_HIPRIO) || wb_recent_wait(rwb) || current_is_kswapd())
+ if ((opf & REQ_HIPRIO) || wb_recent_wait(rwb))
limit = rwb->rq_depth.max_depth;
else if ((opf & REQ_BACKGROUND) || close_io(rwb)) {
/*
@@ -622,8 +622,8 @@ static enum wbt_flags bio_to_wbt_flags(struct rq_wb *rwb, struct bio *bio)
if (bio_op(bio) == REQ_OP_READ) {
flags = WBT_READ;
} else if (wbt_should_throttle(bio)) {
- if (current_is_kswapd())
- flags |= WBT_KSWAPD;
+ if (bio->bi_opf & REQ_SWAP)
+ flags |= WBT_SWAP;
if (bio_op(bio) == REQ_OP_DISCARD)
flags |= WBT_DISCARD;
flags |= WBT_TRACKED;
diff --git a/block/blk-zoned.c b/block/blk-zoned.c
index 03aa4eead39e..af19296fa50d 100644
--- a/block/blk-zoned.c
+++ b/block/blk-zoned.c
@@ -116,24 +116,6 @@ const char *blk_zone_cond_str(enum blk_zone_cond zone_cond)
EXPORT_SYMBOL_GPL(blk_zone_cond_str);
/**
- * bdev_nr_zones - Get number of zones
- * @bdev: Target device
- *
- * Return the total number of zones of a zoned block device. For a block
- * device without zone capabilities, the number of zones is always 0.
- */
-unsigned int bdev_nr_zones(struct block_device *bdev)
-{
- sector_t zone_sectors = bdev_zone_sectors(bdev);
-
- if (!bdev_is_zoned(bdev))
- return 0;
- return (bdev_nr_sectors(bdev) + zone_sectors - 1) >>
- ilog2(zone_sectors);
-}
-EXPORT_SYMBOL_GPL(bdev_nr_zones);
-
-/**
* blkdev_report_zones - Get zones information
* @bdev: Target block device
* @sector: Sector from which to report zones
@@ -168,77 +150,6 @@ int blkdev_report_zones(struct block_device *bdev, sector_t sector,
}
EXPORT_SYMBOL_GPL(blkdev_report_zones);
-static inline unsigned long *blk_alloc_zone_bitmap(int node,
- unsigned int nr_zones)
-{
- return kcalloc_node(BITS_TO_LONGS(nr_zones), sizeof(unsigned long),
- GFP_NOIO, node);
-}
-
-static int blk_zone_need_reset_cb(struct blk_zone *zone, unsigned int idx,
- void *data)
-{
- /*
- * For an all-zones reset, ignore conventional, empty, read-only
- * and offline zones.
- */
- switch (zone->cond) {
- case BLK_ZONE_COND_NOT_WP:
- case BLK_ZONE_COND_EMPTY:
- case BLK_ZONE_COND_READONLY:
- case BLK_ZONE_COND_OFFLINE:
- return 0;
- default:
- set_bit(idx, (unsigned long *)data);
- return 0;
- }
-}
-
-static int blkdev_zone_reset_all_emulated(struct block_device *bdev)
-{
- struct gendisk *disk = bdev->bd_disk;
- sector_t capacity = bdev_nr_sectors(bdev);
- sector_t zone_sectors = bdev_zone_sectors(bdev);
- unsigned long *need_reset;
- struct bio *bio = NULL;
- sector_t sector = 0;
- int ret;
-
- need_reset = blk_alloc_zone_bitmap(disk->queue->node, disk->nr_zones);
- if (!need_reset)
- return -ENOMEM;
-
- ret = disk->fops->report_zones(disk, 0, disk->nr_zones,
- blk_zone_need_reset_cb, need_reset);
- if (ret < 0)
- goto out_free_need_reset;
-
- ret = 0;
- while (sector < capacity) {
- if (!test_bit(disk_zone_no(disk, sector), need_reset)) {
- sector += zone_sectors;
- continue;
- }
-
- bio = blk_next_bio(bio, bdev, 0, REQ_OP_ZONE_RESET | REQ_SYNC,
- GFP_KERNEL);
- bio->bi_iter.bi_sector = sector;
- sector += zone_sectors;
-
- /* This may take a while, so be nice to others */
- cond_resched();
- }
-
- if (bio) {
- ret = submit_bio_wait(bio);
- bio_put(bio);
- }
-
-out_free_need_reset:
- kfree(need_reset);
- return ret;
-}
-
static int blkdev_zone_reset_all(struct block_device *bdev)
{
struct bio bio;
@@ -265,7 +176,6 @@ static int blkdev_zone_reset_all(struct block_device *bdev)
int blkdev_zone_mgmt(struct block_device *bdev, enum req_op op,
sector_t sector, sector_t nr_sectors)
{
- struct request_queue *q = bdev_get_queue(bdev);
sector_t zone_sectors = bdev_zone_sectors(bdev);
sector_t capacity = bdev_nr_sectors(bdev);
sector_t end_sector = sector + nr_sectors;
@@ -293,16 +203,11 @@ int blkdev_zone_mgmt(struct block_device *bdev, enum req_op op,
return -EINVAL;
/*
- * In the case of a zone reset operation over all zones,
- * REQ_OP_ZONE_RESET_ALL can be used with devices supporting this
- * command. For other devices, we emulate this command behavior by
- * identifying the zones needing a reset.
+ * In the case of a zone reset operation over all zones, use
+ * REQ_OP_ZONE_RESET_ALL.
*/
- if (op == REQ_OP_ZONE_RESET && sector == 0 && nr_sectors == capacity) {
- if (!blk_queue_zone_resetall(q))
- return blkdev_zone_reset_all_emulated(bdev);
+ if (op == REQ_OP_ZONE_RESET && sector == 0 && nr_sectors == capacity)
return blkdev_zone_reset_all(bdev);
- }
while (sector < end_sector) {
bio = blk_next_bio(bio, bdev, 0, op | REQ_SYNC, GFP_KERNEL);
@@ -450,6 +355,25 @@ static inline bool disk_zone_is_conv(struct gendisk *disk, sector_t sector)
return test_bit(disk_zone_no(disk, sector), disk->conv_zones_bitmap);
}
+static bool disk_zone_is_last(struct gendisk *disk, struct blk_zone *zone)
+{
+ return zone->start + zone->len >= get_capacity(disk);
+}
+
+static bool disk_zone_is_full(struct gendisk *disk,
+ unsigned int zno, unsigned int offset_in_zone)
+{
+ if (zno < disk->nr_zones - 1)
+ return offset_in_zone >= disk->zone_capacity;
+ return offset_in_zone >= disk->last_zone_capacity;
+}
+
+static bool disk_zone_wplug_is_full(struct gendisk *disk,
+ struct blk_zone_wplug *zwplug)
+{
+ return disk_zone_is_full(disk, zwplug->zone_no, zwplug->wp_offset);
+}
+
static bool disk_insert_zone_wplug(struct gendisk *disk,
struct blk_zone_wplug *zwplug)
{
@@ -543,7 +467,7 @@ static inline bool disk_should_remove_zone_wplug(struct gendisk *disk,
return false;
/* We can remove zone write plugs for zones that are empty or full. */
- return !zwplug->wp_offset || zwplug->wp_offset >= disk->zone_capacity;
+ return !zwplug->wp_offset || disk_zone_wplug_is_full(disk, zwplug);
}
static void disk_remove_zone_wplug(struct gendisk *disk,
@@ -664,13 +588,12 @@ static void disk_zone_wplug_abort(struct blk_zone_wplug *zwplug)
static void disk_zone_wplug_abort_unaligned(struct gendisk *disk,
struct blk_zone_wplug *zwplug)
{
- unsigned int zone_capacity = disk->zone_capacity;
unsigned int wp_offset = zwplug->wp_offset;
struct bio_list bl = BIO_EMPTY_LIST;
struct bio *bio;
while ((bio = bio_list_pop(&zwplug->bio_list))) {
- if (wp_offset >= zone_capacity ||
+ if (disk_zone_is_full(disk, zwplug->zone_no, wp_offset) ||
(bio_op(bio) != REQ_OP_ZONE_APPEND &&
bio_offset_from_zone_start(bio) != wp_offset)) {
blk_zone_wplug_bio_io_error(zwplug, bio);
@@ -909,7 +832,6 @@ void blk_zone_write_plug_init_request(struct request *req)
sector_t req_back_sector = blk_rq_pos(req) + blk_rq_sectors(req);
struct request_queue *q = req->q;
struct gendisk *disk = q->disk;
- unsigned int zone_capacity = disk->zone_capacity;
struct blk_zone_wplug *zwplug =
disk_get_zone_wplug(disk, blk_rq_pos(req));
unsigned long flags;
@@ -933,7 +855,7 @@ void blk_zone_write_plug_init_request(struct request *req)
* into the back of the request.
*/
spin_lock_irqsave(&zwplug->lock, flags);
- while (zwplug->wp_offset < zone_capacity) {
+ while (!disk_zone_wplug_is_full(disk, zwplug)) {
bio = bio_list_peek(&zwplug->bio_list);
if (!bio)
break;
@@ -979,7 +901,7 @@ static bool blk_zone_wplug_prepare_bio(struct blk_zone_wplug *zwplug,
* We know such BIO will fail, and that would potentially overflow our
* write pointer offset beyond the end of the zone.
*/
- if (zwplug->wp_offset >= disk->zone_capacity)
+ if (disk_zone_wplug_is_full(disk, zwplug))
goto err;
if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
@@ -1535,6 +1457,9 @@ static void disk_destroy_zone_wplugs_hash_table(struct gendisk *disk)
void disk_free_zone_resources(struct gendisk *disk)
{
+ if (!disk->zone_wplugs_pool)
+ return;
+
cancel_work_sync(&disk->zone_wplugs_work);
if (disk->zone_wplugs_wq) {
@@ -1553,9 +1478,10 @@ void disk_free_zone_resources(struct gendisk *disk)
mempool_destroy(disk->zone_wplugs_pool);
disk->zone_wplugs_pool = NULL;
- kfree(disk->conv_zones_bitmap);
+ bitmap_free(disk->conv_zones_bitmap);
disk->conv_zones_bitmap = NULL;
disk->zone_capacity = 0;
+ disk->last_zone_capacity = 0;
disk->nr_zones = 0;
}
@@ -1600,6 +1526,7 @@ struct blk_revalidate_zone_args {
unsigned long *conv_zones_bitmap;
unsigned int nr_zones;
unsigned int zone_capacity;
+ unsigned int last_zone_capacity;
sector_t sector;
};
@@ -1617,6 +1544,7 @@ static int disk_update_zone_resources(struct gendisk *disk,
disk->nr_zones = args->nr_zones;
disk->zone_capacity = args->zone_capacity;
+ disk->last_zone_capacity = args->last_zone_capacity;
swap(disk->conv_zones_bitmap, args->conv_zones_bitmap);
if (disk->conv_zones_bitmap)
nr_conv_zones = bitmap_weight(disk->conv_zones_bitmap,
@@ -1627,8 +1555,22 @@ static int disk_update_zone_resources(struct gendisk *disk,
return -ENODEV;
}
+ lim = queue_limits_start_update(q);
+
+ /*
+ * Some devices can advertize zone resource limits that are larger than
+ * the number of sequential zones of the zoned block device, e.g. a
+ * small ZNS namespace. For such case, assume that the zoned device has
+ * no zone resource limits.
+ */
+ nr_seq_zones = disk->nr_zones - nr_conv_zones;
+ if (lim.max_open_zones >= nr_seq_zones)
+ lim.max_open_zones = 0;
+ if (lim.max_active_zones >= nr_seq_zones)
+ lim.max_active_zones = 0;
+
if (!disk->zone_wplugs_pool)
- return 0;
+ goto commit;
/*
* If the device has no limit on the maximum number of open and active
@@ -1637,9 +1579,6 @@ static int disk_update_zone_resources(struct gendisk *disk,
* dynamic zone write plug allocation when simultaneously writing to
* more zones than the size of the mempool.
*/
- lim = queue_limits_start_update(q);
-
- nr_seq_zones = disk->nr_zones - nr_conv_zones;
pool_size = max(lim.max_open_zones, lim.max_active_zones);
if (!pool_size)
pool_size = min(BLK_ZONE_WPLUG_DEFAULT_POOL_SIZE, nr_seq_zones);
@@ -1653,6 +1592,7 @@ static int disk_update_zone_resources(struct gendisk *disk,
lim.max_open_zones = 0;
}
+commit:
return queue_limits_commit_update(q, &lim);
}
@@ -1660,7 +1600,6 @@ static int blk_revalidate_conv_zone(struct blk_zone *zone, unsigned int idx,
struct blk_revalidate_zone_args *args)
{
struct gendisk *disk = args->disk;
- struct request_queue *q = disk->queue;
if (zone->capacity != zone->len) {
pr_warn("%s: Invalid conventional zone capacity\n",
@@ -1668,12 +1607,15 @@ static int blk_revalidate_conv_zone(struct blk_zone *zone, unsigned int idx,
return -ENODEV;
}
+ if (disk_zone_is_last(disk, zone))
+ args->last_zone_capacity = zone->capacity;
+
if (!disk_need_zone_resources(disk))
return 0;
if (!args->conv_zones_bitmap) {
args->conv_zones_bitmap =
- blk_alloc_zone_bitmap(q->node, args->nr_zones);
+ bitmap_zalloc(args->nr_zones, GFP_NOIO);
if (!args->conv_zones_bitmap)
return -ENOMEM;
}
@@ -1693,11 +1635,14 @@ static int blk_revalidate_seq_zone(struct blk_zone *zone, unsigned int idx,
/*
* Remember the capacity of the first sequential zone and check
- * if it is constant for all zones.
+ * if it is constant for all zones, ignoring the last zone as it can be
+ * smaller.
*/
if (!args->zone_capacity)
args->zone_capacity = zone->capacity;
- if (zone->capacity != args->zone_capacity) {
+ if (disk_zone_is_last(disk, zone)) {
+ args->last_zone_capacity = zone->capacity;
+ } else if (zone->capacity != args->zone_capacity) {
pr_warn("%s: Invalid variable zone capacity\n",
disk->disk_name);
return -ENODEV;
@@ -1732,7 +1677,6 @@ static int blk_revalidate_zone_cb(struct blk_zone *zone, unsigned int idx,
{
struct blk_revalidate_zone_args *args = data;
struct gendisk *disk = args->disk;
- sector_t capacity = get_capacity(disk);
sector_t zone_sectors = disk->queue->limits.chunk_sectors;
int ret;
@@ -1743,7 +1687,7 @@ static int blk_revalidate_zone_cb(struct blk_zone *zone, unsigned int idx,
return -ENODEV;
}
- if (zone->start >= capacity || !zone->len) {
+ if (zone->start >= get_capacity(disk) || !zone->len) {
pr_warn("%s: Invalid zone start %llu, length %llu\n",
disk->disk_name, zone->start, zone->len);
return -ENODEV;
@@ -1753,7 +1697,7 @@ static int blk_revalidate_zone_cb(struct blk_zone *zone, unsigned int idx,
* All zones must have the same size, with the exception on an eventual
* smaller last zone.
*/
- if (zone->start + zone->len < capacity) {
+ if (!disk_zone_is_last(disk, zone)) {
if (zone->len != zone_sectors) {
pr_warn("%s: Invalid zoned device with non constant zone size\n",
disk->disk_name);
diff --git a/block/blk.h b/block/blk.h
index 189bc25beb50..8e8936e97307 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -98,8 +98,8 @@ static inline bool biovec_phys_mergeable(struct request_queue *q,
struct bio_vec *vec1, struct bio_vec *vec2)
{
unsigned long mask = queue_segment_boundary(q);
- phys_addr_t addr1 = page_to_phys(vec1->bv_page) + vec1->bv_offset;
- phys_addr_t addr2 = page_to_phys(vec2->bv_page) + vec2->bv_offset;
+ phys_addr_t addr1 = bvec_phys(vec1);
+ phys_addr_t addr2 = bvec_phys(vec2);
/*
* Merging adjacent physical pages may not work correctly under KMSAN
@@ -181,9 +181,11 @@ static inline unsigned int blk_rq_get_max_segments(struct request *rq)
return queue_max_segments(rq->q);
}
-static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
- enum req_op op)
+static inline unsigned int blk_queue_get_max_sectors(struct request *rq)
{
+ struct request_queue *q = rq->q;
+ enum req_op op = req_op(rq);
+
if (unlikely(op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE))
return min(q->limits.max_discard_sectors,
UINT_MAX >> SECTOR_SHIFT);
@@ -191,6 +193,9 @@ static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
if (unlikely(op == REQ_OP_WRITE_ZEROES))
return q->limits.max_write_zeroes_sectors;
+ if (rq->cmd_flags & REQ_ATOMIC)
+ return q->limits.atomic_write_max_sectors;
+
return q->limits.max_sectors;
}
@@ -352,6 +357,8 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio);
enum elv_merge blk_try_merge(struct request *rq, struct bio *bio);
int blk_set_default_limits(struct queue_limits *lim);
+void blk_apply_bdi_limits(struct backing_dev_info *bdi,
+ struct queue_limits *lim);
int blk_dev_init(void);
/*
@@ -393,7 +400,7 @@ struct bio *__blk_queue_bounce(struct bio *bio, struct request_queue *q);
static inline bool blk_queue_may_bounce(struct request_queue *q)
{
return IS_ENABLED(CONFIG_BOUNCE) &&
- q->limits.bounce == BLK_BOUNCE_HIGH &&
+ (q->limits.features & BLK_FEAT_BOUNCE_HIGH) &&
max_low_pfn >= max_pfn;
}
@@ -673,4 +680,9 @@ int bdev_open(struct block_device *bdev, blk_mode_t mode, void *holder,
const struct blk_holder_ops *hops, struct file *bdev_file);
int bdev_permission(dev_t dev, blk_mode_t mode, void *holder);
+void blk_integrity_generate(struct bio *bio);
+void blk_integrity_verify(struct bio *bio);
+void blk_integrity_prepare(struct request *rq);
+void blk_integrity_complete(struct request *rq, unsigned int nr_bytes);
+
#endif /* BLK_INTERNAL_H */
diff --git a/block/elevator.c b/block/elevator.c
index f64ebd726e58..f13d552a32c8 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -709,24 +709,25 @@ static int elevator_change(struct request_queue *q, const char *elevator_name)
return ret;
}
-ssize_t elv_iosched_store(struct request_queue *q, const char *buf,
+ssize_t elv_iosched_store(struct gendisk *disk, const char *buf,
size_t count)
{
char elevator_name[ELV_NAME_MAX];
int ret;
- if (!elv_support_iosched(q))
+ if (!elv_support_iosched(disk->queue))
return count;
strscpy(elevator_name, buf, sizeof(elevator_name));
- ret = elevator_change(q, strstrip(elevator_name));
+ ret = elevator_change(disk->queue, strstrip(elevator_name));
if (!ret)
return count;
return ret;
}
-ssize_t elv_iosched_show(struct request_queue *q, char *name)
+ssize_t elv_iosched_show(struct gendisk *disk, char *name)
{
+ struct request_queue *q = disk->queue;
struct elevator_queue *eq = q->elevator;
struct elevator_type *cur = NULL, *e;
int len = 0;
diff --git a/block/elevator.h b/block/elevator.h
index e9a050a96e53..3fe18e1a8692 100644
--- a/block/elevator.h
+++ b/block/elevator.h
@@ -147,8 +147,8 @@ extern void elv_unregister(struct elevator_type *);
/*
* io scheduler sysfs switching
*/
-extern ssize_t elv_iosched_show(struct request_queue *, char *);
-extern ssize_t elv_iosched_store(struct request_queue *, const char *, size_t);
+ssize_t elv_iosched_show(struct gendisk *disk, char *page);
+ssize_t elv_iosched_store(struct gendisk *disk, const char *page, size_t count);
extern bool elv_bio_merge_ok(struct request *, struct bio *);
extern struct elevator_queue *elevator_alloc(struct request_queue *,
diff --git a/block/fops.c b/block/fops.c
index 376265935714..9825c1713a49 100644
--- a/block/fops.c
+++ b/block/fops.c
@@ -34,9 +34,12 @@ static blk_opf_t dio_bio_write_op(struct kiocb *iocb)
return opf;
}
-static bool blkdev_dio_unaligned(struct block_device *bdev, loff_t pos,
- struct iov_iter *iter)
+static bool blkdev_dio_invalid(struct block_device *bdev, loff_t pos,
+ struct iov_iter *iter, bool is_atomic)
{
+ if (is_atomic && !generic_atomic_write_valid(iter, pos))
+ return true;
+
return pos & (bdev_logical_block_size(bdev) - 1) ||
!bdev_iter_is_aligned(bdev, iter);
}
@@ -72,6 +75,8 @@ static ssize_t __blkdev_direct_IO_simple(struct kiocb *iocb,
bio.bi_iter.bi_sector = pos >> SECTOR_SHIFT;
bio.bi_write_hint = file_inode(iocb->ki_filp)->i_write_hint;
bio.bi_ioprio = iocb->ki_ioprio;
+ if (iocb->ki_flags & IOCB_ATOMIC)
+ bio.bi_opf |= REQ_ATOMIC;
ret = bio_iov_iter_get_pages(&bio, iter);
if (unlikely(ret))
@@ -343,6 +348,9 @@ static ssize_t __blkdev_direct_IO_async(struct kiocb *iocb,
task_io_account_write(bio->bi_iter.bi_size);
}
+ if (iocb->ki_flags & IOCB_ATOMIC)
+ bio->bi_opf |= REQ_ATOMIC;
+
if (iocb->ki_flags & IOCB_NOWAIT)
bio->bi_opf |= REQ_NOWAIT;
@@ -359,12 +367,13 @@ static ssize_t __blkdev_direct_IO_async(struct kiocb *iocb,
static ssize_t blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
{
struct block_device *bdev = I_BDEV(iocb->ki_filp->f_mapping->host);
+ bool is_atomic = iocb->ki_flags & IOCB_ATOMIC;
unsigned int nr_pages;
if (!iov_iter_count(iter))
return 0;
- if (blkdev_dio_unaligned(bdev, iocb->ki_pos, iter))
+ if (blkdev_dio_invalid(bdev, iocb->ki_pos, iter, is_atomic))
return -EINVAL;
nr_pages = bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS + 1);
@@ -373,6 +382,8 @@ static ssize_t blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
return __blkdev_direct_IO_simple(iocb, iter, bdev,
nr_pages);
return __blkdev_direct_IO_async(iocb, iter, bdev, nr_pages);
+ } else if (is_atomic) {
+ return -EINVAL;
}
return __blkdev_direct_IO(iocb, iter, bdev, bio_max_segs(nr_pages));
}
@@ -383,10 +394,11 @@ static int blkdev_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
struct block_device *bdev = I_BDEV(inode);
loff_t isize = i_size_read(inode);
- iomap->bdev = bdev;
- iomap->offset = ALIGN_DOWN(offset, bdev_logical_block_size(bdev));
if (offset >= isize)
return -EIO;
+
+ iomap->bdev = bdev;
+ iomap->offset = ALIGN_DOWN(offset, bdev_logical_block_size(bdev));
iomap->type = IOMAP_MAPPED;
iomap->addr = iomap->offset;
iomap->length = isize - iomap->offset;
@@ -612,6 +624,9 @@ static int blkdev_open(struct inode *inode, struct file *filp)
if (!bdev)
return -ENXIO;
+ if (bdev_can_atomic_write(bdev) && filp->f_flags & O_DIRECT)
+ filp->f_mode |= FMODE_CAN_ATOMIC_WRITE;
+
ret = bdev_open(bdev, mode, filp->private_data, NULL, filp);
if (ret)
blkdev_put_no_open(bdev);
diff --git a/block/genhd.c b/block/genhd.c
index 8f1f3c6b4d67..4dc95a463505 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -524,7 +524,7 @@ int __must_check device_add_disk(struct device *parent, struct gendisk *disk,
disk->part0->bd_dev = MKDEV(disk->major, disk->first_minor);
}
- disk_update_readahead(disk);
+ blk_apply_bdi_limits(disk->bdi, &disk->queue->limits);
disk_add_events(disk);
set_bit(GD_ADDED, &disk->state);
return 0;
diff --git a/block/ioctl.c b/block/ioctl.c
index d570e1695896..e8e4a4190f18 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -224,7 +224,7 @@ static int blk_ioctl_zeroout(struct block_device *bdev, blk_mode_t mode,
goto fail;
err = blkdev_issue_zeroout(bdev, start >> 9, len >> 9, GFP_KERNEL,
- BLKDEV_ZERO_NOUNMAP);
+ BLKDEV_ZERO_NOUNMAP | BLKDEV_ZERO_KILLABLE);
fail:
filemap_invalidate_unlock(bdev->bd_mapping);
diff --git a/block/mq-deadline.c b/block/mq-deadline.c
index 94eede4fb9eb..acdc28756d9d 100644
--- a/block/mq-deadline.c
+++ b/block/mq-deadline.c
@@ -488,6 +488,20 @@ unlock:
}
/*
+ * 'depth' is a number in the range 1..INT_MAX representing a number of
+ * requests. Scale it with a factor (1 << bt->sb.shift) / q->nr_requests since
+ * 1..(1 << bt->sb.shift) is the range expected by sbitmap_get_shallow().
+ * Values larger than q->nr_requests have the same effect as q->nr_requests.
+ */
+static int dd_to_word_depth(struct blk_mq_hw_ctx *hctx, unsigned int qdepth)
+{
+ struct sbitmap_queue *bt = &hctx->sched_tags->bitmap_tags;
+ const unsigned int nrr = hctx->queue->nr_requests;
+
+ return ((qdepth << bt->sb.shift) + nrr - 1) / nrr;
+}
+
+/*
* Called by __blk_mq_alloc_request(). The shallow_depth value set by this
* function is used by __blk_mq_get_tag().
*/
@@ -503,7 +517,7 @@ static void dd_limit_depth(blk_opf_t opf, struct blk_mq_alloc_data *data)
* Throttle asynchronous requests and writes such that these requests
* do not block the allocation of synchronous requests.
*/
- data->shallow_depth = dd->async_depth;
+ data->shallow_depth = dd_to_word_depth(data->hctx, dd->async_depth);
}
/* Called by blk_mq_update_nr_requests(). */
@@ -513,9 +527,9 @@ static void dd_depth_updated(struct blk_mq_hw_ctx *hctx)
struct deadline_data *dd = q->elevator->elevator_data;
struct blk_mq_tags *tags = hctx->sched_tags;
- dd->async_depth = max(1UL, 3 * q->nr_requests / 4);
+ dd->async_depth = q->nr_requests;
- sbitmap_queue_min_shallow_depth(&tags->bitmap_tags, dd->async_depth);
+ sbitmap_queue_min_shallow_depth(&tags->bitmap_tags, 1);
}
/* Called by blk_mq_init_hctx() and blk_mq_init_sched(). */
diff --git a/block/sed-opal.c b/block/sed-opal.c
index 14fe0fef811c..598fd3e7fcc8 100644
--- a/block/sed-opal.c
+++ b/block/sed-opal.c
@@ -314,7 +314,7 @@ static int read_sed_opal_key(const char *key_name, u_char *buffer, int buflen)
&key_type_user, key_name, true);
if (IS_ERR(kref))
- ret = PTR_ERR(kref);
+ return PTR_ERR(kref);
key = key_ref_to_ptr(kref);
down_read(&key->sem);
diff --git a/block/t10-pi.c b/block/t10-pi.c
index f4cc91156da1..425e2836f3e1 100644
--- a/block/t10-pi.c
+++ b/block/t10-pi.c
@@ -11,17 +11,23 @@
#include <linux/module.h>
#include <net/checksum.h>
#include <asm/unaligned.h>
+#include "blk.h"
+
+struct blk_integrity_iter {
+ void *prot_buf;
+ void *data_buf;
+ sector_t seed;
+ unsigned int data_size;
+ unsigned short interval;
+ const char *disk_name;
+};
-typedef __be16 (csum_fn) (__be16, void *, unsigned int);
-
-static __be16 t10_pi_crc_fn(__be16 crc, void *data, unsigned int len)
-{
- return cpu_to_be16(crc_t10dif_update(be16_to_cpu(crc), data, len));
-}
-
-static __be16 t10_pi_ip_fn(__be16 csum, void *data, unsigned int len)
+static __be16 t10_pi_csum(__be16 csum, void *data, unsigned int len,
+ unsigned char csum_type)
{
- return (__force __be16)ip_compute_csum(data, len);
+ if (csum_type == BLK_INTEGRITY_CSUM_IP)
+ return (__force __be16)ip_compute_csum(data, len);
+ return cpu_to_be16(crc_t10dif_update(be16_to_cpu(csum), data, len));
}
/*
@@ -29,48 +35,44 @@ static __be16 t10_pi_ip_fn(__be16 csum, void *data, unsigned int len)
* 16 bit app tag, 32 bit reference tag. Type 3 does not define the ref
* tag.
*/
-static blk_status_t t10_pi_generate(struct blk_integrity_iter *iter,
- csum_fn *fn, enum t10_dif_type type)
+static void t10_pi_generate(struct blk_integrity_iter *iter,
+ struct blk_integrity *bi)
{
- u8 offset = iter->pi_offset;
+ u8 offset = bi->pi_offset;
unsigned int i;
for (i = 0 ; i < iter->data_size ; i += iter->interval) {
struct t10_pi_tuple *pi = iter->prot_buf + offset;
- pi->guard_tag = fn(0, iter->data_buf, iter->interval);
+ pi->guard_tag = t10_pi_csum(0, iter->data_buf, iter->interval,
+ bi->csum_type);
if (offset)
- pi->guard_tag = fn(pi->guard_tag, iter->prot_buf,
- offset);
+ pi->guard_tag = t10_pi_csum(pi->guard_tag,
+ iter->prot_buf, offset, bi->csum_type);
pi->app_tag = 0;
- if (type == T10_PI_TYPE1_PROTECTION)
+ if (bi->flags & BLK_INTEGRITY_REF_TAG)
pi->ref_tag = cpu_to_be32(lower_32_bits(iter->seed));
else
pi->ref_tag = 0;
iter->data_buf += iter->interval;
- iter->prot_buf += iter->tuple_size;
+ iter->prot_buf += bi->tuple_size;
iter->seed++;
}
-
- return BLK_STS_OK;
}
static blk_status_t t10_pi_verify(struct blk_integrity_iter *iter,
- csum_fn *fn, enum t10_dif_type type)
+ struct blk_integrity *bi)
{
- u8 offset = iter->pi_offset;
+ u8 offset = bi->pi_offset;
unsigned int i;
- BUG_ON(type == T10_PI_TYPE0_PROTECTION);
-
for (i = 0 ; i < iter->data_size ; i += iter->interval) {
struct t10_pi_tuple *pi = iter->prot_buf + offset;
__be16 csum;
- if (type == T10_PI_TYPE1_PROTECTION ||
- type == T10_PI_TYPE2_PROTECTION) {
+ if (bi->flags & BLK_INTEGRITY_REF_TAG) {
if (pi->app_tag == T10_PI_APP_ESCAPE)
goto next;
@@ -82,15 +84,17 @@ static blk_status_t t10_pi_verify(struct blk_integrity_iter *iter,
iter->seed, be32_to_cpu(pi->ref_tag));
return BLK_STS_PROTECTION;
}
- } else if (type == T10_PI_TYPE3_PROTECTION) {
+ } else {
if (pi->app_tag == T10_PI_APP_ESCAPE &&
pi->ref_tag == T10_PI_REF_ESCAPE)
goto next;
}
- csum = fn(0, iter->data_buf, iter->interval);
+ csum = t10_pi_csum(0, iter->data_buf, iter->interval,
+ bi->csum_type);
if (offset)
- csum = fn(csum, iter->prot_buf, offset);
+ csum = t10_pi_csum(csum, iter->prot_buf, offset,
+ bi->csum_type);
if (pi->guard_tag != csum) {
pr_err("%s: guard tag error at sector %llu " \
@@ -102,33 +106,13 @@ static blk_status_t t10_pi_verify(struct blk_integrity_iter *iter,
next:
iter->data_buf += iter->interval;
- iter->prot_buf += iter->tuple_size;
+ iter->prot_buf += bi->tuple_size;
iter->seed++;
}
return BLK_STS_OK;
}
-static blk_status_t t10_pi_type1_generate_crc(struct blk_integrity_iter *iter)
-{
- return t10_pi_generate(iter, t10_pi_crc_fn, T10_PI_TYPE1_PROTECTION);
-}
-
-static blk_status_t t10_pi_type1_generate_ip(struct blk_integrity_iter *iter)
-{
- return t10_pi_generate(iter, t10_pi_ip_fn, T10_PI_TYPE1_PROTECTION);
-}
-
-static blk_status_t t10_pi_type1_verify_crc(struct blk_integrity_iter *iter)
-{
- return t10_pi_verify(iter, t10_pi_crc_fn, T10_PI_TYPE1_PROTECTION);
-}
-
-static blk_status_t t10_pi_type1_verify_ip(struct blk_integrity_iter *iter)
-{
- return t10_pi_verify(iter, t10_pi_ip_fn, T10_PI_TYPE1_PROTECTION);
-}
-
/**
* t10_pi_type1_prepare - prepare PI prior submitting request to device
* @rq: request with PI that should be prepared
@@ -141,7 +125,7 @@ static blk_status_t t10_pi_type1_verify_ip(struct blk_integrity_iter *iter)
*/
static void t10_pi_type1_prepare(struct request *rq)
{
- struct blk_integrity *bi = &rq->q->integrity;
+ struct blk_integrity *bi = &rq->q->limits.integrity;
const int tuple_sz = bi->tuple_size;
u32 ref_tag = t10_pi_ref_tag(rq);
u8 offset = bi->pi_offset;
@@ -192,7 +176,7 @@ static void t10_pi_type1_prepare(struct request *rq)
*/
static void t10_pi_type1_complete(struct request *rq, unsigned int nr_bytes)
{
- struct blk_integrity *bi = &rq->q->integrity;
+ struct blk_integrity *bi = &rq->q->limits.integrity;
unsigned intervals = nr_bytes >> bi->interval_exp;
const int tuple_sz = bi->tuple_size;
u32 ref_tag = t10_pi_ref_tag(rq);
@@ -225,81 +209,15 @@ static void t10_pi_type1_complete(struct request *rq, unsigned int nr_bytes)
}
}
-static blk_status_t t10_pi_type3_generate_crc(struct blk_integrity_iter *iter)
-{
- return t10_pi_generate(iter, t10_pi_crc_fn, T10_PI_TYPE3_PROTECTION);
-}
-
-static blk_status_t t10_pi_type3_generate_ip(struct blk_integrity_iter *iter)
-{
- return t10_pi_generate(iter, t10_pi_ip_fn, T10_PI_TYPE3_PROTECTION);
-}
-
-static blk_status_t t10_pi_type3_verify_crc(struct blk_integrity_iter *iter)
-{
- return t10_pi_verify(iter, t10_pi_crc_fn, T10_PI_TYPE3_PROTECTION);
-}
-
-static blk_status_t t10_pi_type3_verify_ip(struct blk_integrity_iter *iter)
-{
- return t10_pi_verify(iter, t10_pi_ip_fn, T10_PI_TYPE3_PROTECTION);
-}
-
-/* Type 3 does not have a reference tag so no remapping is required. */
-static void t10_pi_type3_prepare(struct request *rq)
-{
-}
-
-/* Type 3 does not have a reference tag so no remapping is required. */
-static void t10_pi_type3_complete(struct request *rq, unsigned int nr_bytes)
-{
-}
-
-const struct blk_integrity_profile t10_pi_type1_crc = {
- .name = "T10-DIF-TYPE1-CRC",
- .generate_fn = t10_pi_type1_generate_crc,
- .verify_fn = t10_pi_type1_verify_crc,
- .prepare_fn = t10_pi_type1_prepare,
- .complete_fn = t10_pi_type1_complete,
-};
-EXPORT_SYMBOL(t10_pi_type1_crc);
-
-const struct blk_integrity_profile t10_pi_type1_ip = {
- .name = "T10-DIF-TYPE1-IP",
- .generate_fn = t10_pi_type1_generate_ip,
- .verify_fn = t10_pi_type1_verify_ip,
- .prepare_fn = t10_pi_type1_prepare,
- .complete_fn = t10_pi_type1_complete,
-};
-EXPORT_SYMBOL(t10_pi_type1_ip);
-
-const struct blk_integrity_profile t10_pi_type3_crc = {
- .name = "T10-DIF-TYPE3-CRC",
- .generate_fn = t10_pi_type3_generate_crc,
- .verify_fn = t10_pi_type3_verify_crc,
- .prepare_fn = t10_pi_type3_prepare,
- .complete_fn = t10_pi_type3_complete,
-};
-EXPORT_SYMBOL(t10_pi_type3_crc);
-
-const struct blk_integrity_profile t10_pi_type3_ip = {
- .name = "T10-DIF-TYPE3-IP",
- .generate_fn = t10_pi_type3_generate_ip,
- .verify_fn = t10_pi_type3_verify_ip,
- .prepare_fn = t10_pi_type3_prepare,
- .complete_fn = t10_pi_type3_complete,
-};
-EXPORT_SYMBOL(t10_pi_type3_ip);
-
static __be64 ext_pi_crc64(u64 crc, void *data, unsigned int len)
{
return cpu_to_be64(crc64_rocksoft_update(crc, data, len));
}
-static blk_status_t ext_pi_crc64_generate(struct blk_integrity_iter *iter,
- enum t10_dif_type type)
+static void ext_pi_crc64_generate(struct blk_integrity_iter *iter,
+ struct blk_integrity *bi)
{
- u8 offset = iter->pi_offset;
+ u8 offset = bi->pi_offset;
unsigned int i;
for (i = 0 ; i < iter->data_size ; i += iter->interval) {
@@ -311,17 +229,15 @@ static blk_status_t ext_pi_crc64_generate(struct blk_integrity_iter *iter,
iter->prot_buf, offset);
pi->app_tag = 0;
- if (type == T10_PI_TYPE1_PROTECTION)
+ if (bi->flags & BLK_INTEGRITY_REF_TAG)
put_unaligned_be48(iter->seed, pi->ref_tag);
else
put_unaligned_be48(0ULL, pi->ref_tag);
iter->data_buf += iter->interval;
- iter->prot_buf += iter->tuple_size;
+ iter->prot_buf += bi->tuple_size;
iter->seed++;
}
-
- return BLK_STS_OK;
}
static bool ext_pi_ref_escape(u8 *ref_tag)
@@ -332,9 +248,9 @@ static bool ext_pi_ref_escape(u8 *ref_tag)
}
static blk_status_t ext_pi_crc64_verify(struct blk_integrity_iter *iter,
- enum t10_dif_type type)
+ struct blk_integrity *bi)
{
- u8 offset = iter->pi_offset;
+ u8 offset = bi->pi_offset;
unsigned int i;
for (i = 0; i < iter->data_size; i += iter->interval) {
@@ -342,7 +258,7 @@ static blk_status_t ext_pi_crc64_verify(struct blk_integrity_iter *iter,
u64 ref, seed;
__be64 csum;
- if (type == T10_PI_TYPE1_PROTECTION) {
+ if (bi->flags & BLK_INTEGRITY_REF_TAG) {
if (pi->app_tag == T10_PI_APP_ESCAPE)
goto next;
@@ -353,7 +269,7 @@ static blk_status_t ext_pi_crc64_verify(struct blk_integrity_iter *iter,
iter->disk_name, seed, ref);
return BLK_STS_PROTECTION;
}
- } else if (type == T10_PI_TYPE3_PROTECTION) {
+ } else {
if (pi->app_tag == T10_PI_APP_ESCAPE &&
ext_pi_ref_escape(pi->ref_tag))
goto next;
@@ -374,26 +290,16 @@ static blk_status_t ext_pi_crc64_verify(struct blk_integrity_iter *iter,
next:
iter->data_buf += iter->interval;
- iter->prot_buf += iter->tuple_size;
+ iter->prot_buf += bi->tuple_size;
iter->seed++;
}
return BLK_STS_OK;
}
-static blk_status_t ext_pi_type1_verify_crc64(struct blk_integrity_iter *iter)
-{
- return ext_pi_crc64_verify(iter, T10_PI_TYPE1_PROTECTION);
-}
-
-static blk_status_t ext_pi_type1_generate_crc64(struct blk_integrity_iter *iter)
-{
- return ext_pi_crc64_generate(iter, T10_PI_TYPE1_PROTECTION);
-}
-
static void ext_pi_type1_prepare(struct request *rq)
{
- struct blk_integrity *bi = &rq->q->integrity;
+ struct blk_integrity *bi = &rq->q->limits.integrity;
const int tuple_sz = bi->tuple_size;
u64 ref_tag = ext_pi_ref_tag(rq);
u8 offset = bi->pi_offset;
@@ -433,7 +339,7 @@ static void ext_pi_type1_prepare(struct request *rq)
static void ext_pi_type1_complete(struct request *rq, unsigned int nr_bytes)
{
- struct blk_integrity *bi = &rq->q->integrity;
+ struct blk_integrity *bi = &rq->q->limits.integrity;
unsigned intervals = nr_bytes >> bi->interval_exp;
const int tuple_sz = bi->tuple_size;
u64 ref_tag = ext_pi_ref_tag(rq);
@@ -467,33 +373,105 @@ static void ext_pi_type1_complete(struct request *rq, unsigned int nr_bytes)
}
}
-static blk_status_t ext_pi_type3_verify_crc64(struct blk_integrity_iter *iter)
+void blk_integrity_generate(struct bio *bio)
{
- return ext_pi_crc64_verify(iter, T10_PI_TYPE3_PROTECTION);
+ struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
+ struct bio_integrity_payload *bip = bio_integrity(bio);
+ struct blk_integrity_iter iter;
+ struct bvec_iter bviter;
+ struct bio_vec bv;
+
+ iter.disk_name = bio->bi_bdev->bd_disk->disk_name;
+ iter.interval = 1 << bi->interval_exp;
+ iter.seed = bio->bi_iter.bi_sector;
+ iter.prot_buf = bvec_virt(bip->bip_vec);
+ bio_for_each_segment(bv, bio, bviter) {
+ void *kaddr = bvec_kmap_local(&bv);
+
+ iter.data_buf = kaddr;
+ iter.data_size = bv.bv_len;
+ switch (bi->csum_type) {
+ case BLK_INTEGRITY_CSUM_CRC64:
+ ext_pi_crc64_generate(&iter, bi);
+ break;
+ case BLK_INTEGRITY_CSUM_CRC:
+ case BLK_INTEGRITY_CSUM_IP:
+ t10_pi_generate(&iter, bi);
+ break;
+ default:
+ break;
+ }
+ kunmap_local(kaddr);
+ }
}
-static blk_status_t ext_pi_type3_generate_crc64(struct blk_integrity_iter *iter)
+void blk_integrity_verify(struct bio *bio)
{
- return ext_pi_crc64_generate(iter, T10_PI_TYPE3_PROTECTION);
+ struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
+ struct bio_integrity_payload *bip = bio_integrity(bio);
+ struct blk_integrity_iter iter;
+ struct bvec_iter bviter;
+ struct bio_vec bv;
+
+ /*
+ * At the moment verify is called bi_iter has been advanced during split
+ * and completion, so use the copy created during submission here.
+ */
+ iter.disk_name = bio->bi_bdev->bd_disk->disk_name;
+ iter.interval = 1 << bi->interval_exp;
+ iter.seed = bip->bio_iter.bi_sector;
+ iter.prot_buf = bvec_virt(bip->bip_vec);
+ __bio_for_each_segment(bv, bio, bviter, bip->bio_iter) {
+ void *kaddr = bvec_kmap_local(&bv);
+ blk_status_t ret = BLK_STS_OK;
+
+ iter.data_buf = kaddr;
+ iter.data_size = bv.bv_len;
+ switch (bi->csum_type) {
+ case BLK_INTEGRITY_CSUM_CRC64:
+ ret = ext_pi_crc64_verify(&iter, bi);
+ break;
+ case BLK_INTEGRITY_CSUM_CRC:
+ case BLK_INTEGRITY_CSUM_IP:
+ ret = t10_pi_verify(&iter, bi);
+ break;
+ default:
+ break;
+ }
+ kunmap_local(kaddr);
+
+ if (ret) {
+ bio->bi_status = ret;
+ return;
+ }
+ }
}
-const struct blk_integrity_profile ext_pi_type1_crc64 = {
- .name = "EXT-DIF-TYPE1-CRC64",
- .generate_fn = ext_pi_type1_generate_crc64,
- .verify_fn = ext_pi_type1_verify_crc64,
- .prepare_fn = ext_pi_type1_prepare,
- .complete_fn = ext_pi_type1_complete,
-};
-EXPORT_SYMBOL_GPL(ext_pi_type1_crc64);
-
-const struct blk_integrity_profile ext_pi_type3_crc64 = {
- .name = "EXT-DIF-TYPE3-CRC64",
- .generate_fn = ext_pi_type3_generate_crc64,
- .verify_fn = ext_pi_type3_verify_crc64,
- .prepare_fn = t10_pi_type3_prepare,
- .complete_fn = t10_pi_type3_complete,
-};
-EXPORT_SYMBOL_GPL(ext_pi_type3_crc64);
+void blk_integrity_prepare(struct request *rq)
+{
+ struct blk_integrity *bi = &rq->q->limits.integrity;
+
+ if (!(bi->flags & BLK_INTEGRITY_REF_TAG))
+ return;
+
+ if (bi->csum_type == BLK_INTEGRITY_CSUM_CRC64)
+ ext_pi_type1_prepare(rq);
+ else
+ t10_pi_type1_prepare(rq);
+}
+
+void blk_integrity_complete(struct request *rq, unsigned int nr_bytes)
+{
+ struct blk_integrity *bi = &rq->q->limits.integrity;
+
+ if (!(bi->flags & BLK_INTEGRITY_REF_TAG))
+ return;
+
+ if (bi->csum_type == BLK_INTEGRITY_CSUM_CRC64)
+ ext_pi_type1_complete(rq, nr_bytes);
+ else
+ t10_pi_type1_complete(rq, nr_bytes);
+}
MODULE_DESCRIPTION("T10 Protection Information module");
MODULE_LICENSE("GPL");
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 39ea5cfa8326..61ca4afe83dc 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -77,6 +77,7 @@ obj-$(CONFIG_ACPI_TINY_POWER_BUTTON) += tiny-power-button.o
obj-$(CONFIG_ACPI_FAN) += fan.o
fan-objs := fan_core.o
fan-objs += fan_attr.o
+fan-$(CONFIG_HWMON) += fan_hwmon.o
obj-$(CONFIG_ACPI_VIDEO) += video.o
obj-$(CONFIG_ACPI_TAD) += acpi_tad.o
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
index 2d4a35e6dd18..eaa70b23dd0b 100644
--- a/drivers/acpi/ac.c
+++ b/drivers/acpi/ac.c
@@ -112,7 +112,7 @@ static int get_ac_property(struct power_supply *psy,
return 0;
}
-static enum power_supply_property ac_props[] = {
+static const enum power_supply_property ac_props[] = {
POWER_SUPPLY_PROP_ONLINE,
};
@@ -145,7 +145,7 @@ static void acpi_ac_notify(acpi_handle handle, u32 event, void *data)
dev_name(&adev->dev), event,
(u32) ac->state);
acpi_notifier_call_chain(adev, event, (u32) ac->state);
- kobject_uevent(&ac->charger->dev.kobj, KOBJ_CHANGE);
+ power_supply_changed(ac->charger);
}
}
@@ -268,7 +268,7 @@ static int acpi_ac_resume(struct device *dev)
if (acpi_ac_get_state(ac))
return 0;
if (old_state != ac->state)
- kobject_uevent(&ac->charger->dev.kobj, KOBJ_CHANGE);
+ power_supply_changed(ac->charger);
return 0;
}
diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
index bd1ad07f0290..350d3a892889 100644
--- a/drivers/acpi/acpi_pad.c
+++ b/drivers/acpi/acpi_pad.c
@@ -25,6 +25,10 @@
#define ACPI_PROCESSOR_AGGREGATOR_CLASS "acpi_pad"
#define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator"
#define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80
+
+#define ACPI_PROCESSOR_AGGREGATOR_STATUS_SUCCESS 0
+#define ACPI_PROCESSOR_AGGREGATOR_STATUS_NO_ACTION 1
+
static DEFINE_MUTEX(isolated_cpus_lock);
static DEFINE_MUTEX(round_robin_lock);
@@ -382,16 +386,23 @@ static void acpi_pad_handle_notify(acpi_handle handle)
.length = 4,
.pointer = (void *)&idle_cpus,
};
+ u32 status;
mutex_lock(&isolated_cpus_lock);
num_cpus = acpi_pad_pur(handle);
if (num_cpus < 0) {
- mutex_unlock(&isolated_cpus_lock);
- return;
+ /* The ACPI specification says that if no action was performed when
+ * processing the _PUR object, _OST should still be evaluated, albeit
+ * with a different status code.
+ */
+ status = ACPI_PROCESSOR_AGGREGATOR_STATUS_NO_ACTION;
+ } else {
+ status = ACPI_PROCESSOR_AGGREGATOR_STATUS_SUCCESS;
+ acpi_pad_idle_cpus(num_cpus);
}
- acpi_pad_idle_cpus(num_cpus);
+
idle_cpus = acpi_pad_idle_cpus_num();
- acpi_evaluate_ost(handle, ACPI_PROCESSOR_AGGREGATOR_NOTIFY, 0, &param);
+ acpi_evaluate_ost(handle, ACPI_PROCESSOR_AGGREGATOR_NOTIFY, status, &param);
mutex_unlock(&isolated_cpus_lock);
}
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
index 7a0dd35d62c9..9916cc7ced39 100644
--- a/drivers/acpi/acpi_processor.c
+++ b/drivers/acpi/acpi_processor.c
@@ -35,6 +35,17 @@ EXPORT_PER_CPU_SYMBOL(processors);
struct acpi_processor_errata errata __read_mostly;
EXPORT_SYMBOL_GPL(errata);
+acpi_handle acpi_get_processor_handle(int cpu)
+{
+ struct acpi_processor *pr;
+
+ pr = per_cpu(processors, cpu);
+ if (pr)
+ return pr->handle;
+
+ return NULL;
+}
+
static int acpi_processor_errata_piix4(struct pci_dev *dev)
{
u8 value1 = 0;
@@ -183,20 +194,44 @@ static void __init acpi_pcc_cpufreq_init(void) {}
#endif /* CONFIG_X86 */
/* Initialization */
+static DEFINE_PER_CPU(void *, processor_device_array);
+
+static int acpi_processor_set_per_cpu(struct acpi_processor *pr,
+ struct acpi_device *device)
+{
+ BUG_ON(pr->id >= nr_cpu_ids);
+
+ /*
+ * Buggy BIOS check.
+ * ACPI id of processors can be reported wrongly by the BIOS.
+ * Don't trust it blindly
+ */
+ if (per_cpu(processor_device_array, pr->id) != NULL &&
+ per_cpu(processor_device_array, pr->id) != device) {
+ dev_warn(&device->dev,
+ "BIOS reported wrong ACPI id %d for the processor\n",
+ pr->id);
+ return -EINVAL;
+ }
+ /*
+ * processor_device_array is not cleared on errors to allow buggy BIOS
+ * checks.
+ */
+ per_cpu(processor_device_array, pr->id) = device;
+ per_cpu(processors, pr->id) = pr;
+
+ return 0;
+}
+
#ifdef CONFIG_ACPI_HOTPLUG_CPU
-static int acpi_processor_hotadd_init(struct acpi_processor *pr)
+static int acpi_processor_hotadd_init(struct acpi_processor *pr,
+ struct acpi_device *device)
{
- unsigned long long sta;
- acpi_status status;
int ret;
if (invalid_phys_cpuid(pr->phys_id))
return -ENODEV;
- status = acpi_evaluate_integer(pr->handle, "_STA", NULL, &sta);
- if (ACPI_FAILURE(status) || !(sta & ACPI_STA_DEVICE_PRESENT))
- return -ENODEV;
-
cpu_maps_update_begin();
cpus_write_lock();
@@ -204,19 +239,26 @@ static int acpi_processor_hotadd_init(struct acpi_processor *pr)
if (ret)
goto out;
+ ret = acpi_processor_set_per_cpu(pr, device);
+ if (ret) {
+ acpi_unmap_cpu(pr->id);
+ goto out;
+ }
+
ret = arch_register_cpu(pr->id);
if (ret) {
+ /* Leave the processor device array in place to detect buggy bios */
+ per_cpu(processors, pr->id) = NULL;
acpi_unmap_cpu(pr->id);
goto out;
}
/*
- * CPU got hot-added, but cpu_data is not initialized yet. Set a flag
- * to delay cpu_idle/throttling initialization and do it when the CPU
- * gets online for the first time.
+ * CPU got hot-added, but cpu_data is not initialized yet. Do
+ * cpu_idle/throttling initialization when the CPU gets online for
+ * the first time.
*/
pr_info("CPU%d has been hot-added\n", pr->id);
- pr->flags.need_hotplug_init = 1;
out:
cpus_write_unlock();
@@ -224,7 +266,8 @@ out:
return ret;
}
#else
-static inline int acpi_processor_hotadd_init(struct acpi_processor *pr)
+static inline int acpi_processor_hotadd_init(struct acpi_processor *pr,
+ struct acpi_device *device)
{
return -ENODEV;
}
@@ -239,6 +282,7 @@ static int acpi_processor_get_info(struct acpi_device *device)
acpi_status status = AE_OK;
static int cpu0_initialized;
unsigned long long value;
+ int ret;
acpi_processor_errata();
@@ -315,19 +359,19 @@ static int acpi_processor_get_info(struct acpi_device *device)
}
/*
- * Extra Processor objects may be enumerated on MP systems with
- * less than the max # of CPUs. They should be ignored _iff
- * they are physically not present.
- *
- * NOTE: Even if the processor has a cpuid, it may not be present
- * because cpuid <-> apicid mapping is persistent now.
+ * This code is not called unless we know the CPU is present and
+ * enabled. The two paths are:
+ * a) Initially present CPUs on architectures that do not defer
+ * their arch_register_cpu() calls until this point.
+ * b) Hotplugged CPUs (enabled bit in _STA has transitioned from not
+ * enabled to enabled)
*/
- if (invalid_logical_cpuid(pr->id) || !cpu_present(pr->id)) {
- int ret = acpi_processor_hotadd_init(pr);
-
- if (ret)
- return ret;
- }
+ if (!get_cpu_device(pr->id))
+ ret = acpi_processor_hotadd_init(pr, device);
+ else
+ ret = acpi_processor_set_per_cpu(pr, device);
+ if (ret)
+ return ret;
/*
* On some boxes several processors use the same processor bus id.
@@ -372,8 +416,6 @@ static int acpi_processor_get_info(struct acpi_device *device)
* (cpu_data(cpu)) values, like CPU feature flags, family, model, etc.
* Such things have to be put in and set up by the processor driver's .probe().
*/
-static DEFINE_PER_CPU(void *, processor_device_array);
-
static int acpi_processor_add(struct acpi_device *device,
const struct acpi_device_id *id)
{
@@ -400,39 +442,17 @@ static int acpi_processor_add(struct acpi_device *device,
result = acpi_processor_get_info(device);
if (result) /* Processor is not physically present or unavailable */
- return 0;
-
- BUG_ON(pr->id >= nr_cpu_ids);
-
- /*
- * Buggy BIOS check.
- * ACPI id of processors can be reported wrongly by the BIOS.
- * Don't trust it blindly
- */
- if (per_cpu(processor_device_array, pr->id) != NULL &&
- per_cpu(processor_device_array, pr->id) != device) {
- dev_warn(&device->dev,
- "BIOS reported wrong ACPI id %d for the processor\n",
- pr->id);
- /* Give up, but do not abort the namespace scan. */
- goto err;
- }
- /*
- * processor_device_array is not cleared on errors to allow buggy BIOS
- * checks.
- */
- per_cpu(processor_device_array, pr->id) = device;
- per_cpu(processors, pr->id) = pr;
+ goto err_clear_driver_data;
dev = get_cpu_device(pr->id);
if (!dev) {
result = -ENODEV;
- goto err;
+ goto err_clear_per_cpu;
}
result = acpi_bind_one(dev, device);
if (result)
- goto err;
+ goto err_clear_per_cpu;
pr->dev = dev;
@@ -443,10 +463,11 @@ static int acpi_processor_add(struct acpi_device *device,
dev_err(dev, "Processor driver could not be attached\n");
acpi_unbind_one(dev);
- err:
- free_cpumask_var(pr->throttling.shared_cpu_map);
- device->driver_data = NULL;
+ err_clear_per_cpu:
per_cpu(processors, pr->id) = NULL;
+ err_clear_driver_data:
+ device->driver_data = NULL;
+ free_cpumask_var(pr->throttling.shared_cpu_map);
err_free_pr:
kfree(pr);
return result;
@@ -454,7 +475,7 @@ static int acpi_processor_add(struct acpi_device *device,
#ifdef CONFIG_ACPI_HOTPLUG_CPU
/* Removal */
-static void acpi_processor_remove(struct acpi_device *device)
+static void acpi_processor_post_eject(struct acpi_device *device)
{
struct acpi_processor *pr;
@@ -476,10 +497,6 @@ static void acpi_processor_remove(struct acpi_device *device)
device_release_driver(pr->dev);
acpi_unbind_one(pr->dev);
- /* Clean up. */
- per_cpu(processor_device_array, pr->id) = NULL;
- per_cpu(processors, pr->id) = NULL;
-
cpu_maps_update_begin();
cpus_write_lock();
@@ -487,6 +504,10 @@ static void acpi_processor_remove(struct acpi_device *device)
arch_unregister_cpu(pr->id);
acpi_unmap_cpu(pr->id);
+ /* Clean up. */
+ per_cpu(processor_device_array, pr->id) = NULL;
+ per_cpu(processors, pr->id) = NULL;
+
cpus_write_unlock();
cpu_maps_update_done();
@@ -598,9 +619,9 @@ static bool __init acpi_early_processor_osc(void)
void __init acpi_early_processor_control_setup(void)
{
if (acpi_early_processor_osc()) {
- pr_info("_OSC evaluated successfully for all CPUs\n");
+ pr_debug("_OSC evaluated successfully for all CPUs\n");
} else {
- pr_info("_OSC evaluation for CPUs failed, trying _PDC\n");
+ pr_debug("_OSC evaluation for CPUs failed, trying _PDC\n");
acpi_early_processor_set_pdc();
}
}
@@ -622,7 +643,7 @@ static struct acpi_scan_handler processor_handler = {
.ids = processor_device_ids,
.attach = acpi_processor_add,
#ifdef CONFIG_ACPI_HOTPLUG_CPU
- .detach = acpi_processor_remove,
+ .post_eject = acpi_processor_post_eject,
#endif
.hotplug = {
.enabled = true,
diff --git a/drivers/acpi/acpi_tad.c b/drivers/acpi/acpi_tad.c
index 1d670dbe4d1d..b831cb8e53dc 100644
--- a/drivers/acpi/acpi_tad.c
+++ b/drivers/acpi/acpi_tad.c
@@ -27,6 +27,7 @@
#include <linux/pm_runtime.h>
#include <linux/suspend.h>
+MODULE_DESCRIPTION("ACPI Time and Alarm (TAD) Device Driver");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Rafael J. Wysocki");
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index 1fda30388297..8274a17872ed 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -1128,8 +1128,8 @@ static int acpi_video_bus_get_one_device(struct acpi_device *device, void *arg)
return -ENOMEM;
}
- strcpy(acpi_device_name(device), ACPI_VIDEO_DEVICE_NAME);
- strcpy(acpi_device_class(device), ACPI_VIDEO_CLASS);
+ strscpy(acpi_device_name(device), ACPI_VIDEO_DEVICE_NAME);
+ strscpy(acpi_device_class(device), ACPI_VIDEO_CLASS);
data->device_id = device_id;
data->video = video;
@@ -2010,8 +2010,8 @@ static int acpi_video_bus_add(struct acpi_device *device)
}
video->device = device;
- strcpy(acpi_device_name(device), ACPI_VIDEO_BUS_NAME);
- strcpy(acpi_device_class(device), ACPI_VIDEO_CLASS);
+ strscpy(acpi_device_name(device), ACPI_VIDEO_BUS_NAME);
+ strscpy(acpi_device_class(device), ACPI_VIDEO_CLASS);
device->driver_data = video;
acpi_video_bus_find_cap(video);
diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h
index ddd072cbc738..2133085deda7 100644
--- a/drivers/acpi/acpica/acevents.h
+++ b/drivers/acpi/acpica/acevents.h
@@ -191,6 +191,10 @@ void
acpi_ev_execute_reg_methods(struct acpi_namespace_node *node,
acpi_adr_space_type space_id, u32 function);
+void
+acpi_ev_execute_orphan_reg_method(struct acpi_namespace_node *node,
+ acpi_adr_space_type space_id);
+
acpi_status
acpi_ev_execute_reg_method(union acpi_operand_object *region_obj, u32 function);
diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
index 18fdf2bc2d49..dc6004daf624 100644
--- a/drivers/acpi/acpica/evregion.c
+++ b/drivers/acpi/acpica/evregion.c
@@ -20,10 +20,6 @@ extern u8 acpi_gbl_default_address_spaces[];
/* Local prototypes */
-static void
-acpi_ev_execute_orphan_reg_method(struct acpi_namespace_node *device_node,
- acpi_adr_space_type space_id);
-
static acpi_status
acpi_ev_reg_run(acpi_handle obj_handle,
u32 level, void *context, void **return_value);
@@ -818,7 +814,7 @@ acpi_ev_reg_run(acpi_handle obj_handle,
*
******************************************************************************/
-static void
+void
acpi_ev_execute_orphan_reg_method(struct acpi_namespace_node *device_node,
acpi_adr_space_type space_id)
{
diff --git a/drivers/acpi/acpica/evxfregn.c b/drivers/acpi/acpica/evxfregn.c
index 3197e6303c5b..624361a5f34d 100644
--- a/drivers/acpi/acpica/evxfregn.c
+++ b/drivers/acpi/acpica/evxfregn.c
@@ -306,3 +306,57 @@ acpi_execute_reg_methods(acpi_handle device, acpi_adr_space_type space_id)
}
ACPI_EXPORT_SYMBOL(acpi_execute_reg_methods)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_execute_orphan_reg_method
+ *
+ * PARAMETERS: device - Handle for the device
+ * space_id - The address space ID
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Execute an "orphan" _REG method that appears under an ACPI
+ * device. This is a _REG method that has no corresponding region
+ * within the device's scope.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_execute_orphan_reg_method(acpi_handle device, acpi_adr_space_type space_id)
+{
+ struct acpi_namespace_node *node;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(acpi_execute_orphan_reg_method);
+
+ /* Parameter validation */
+
+ if (!device) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Convert and validate the device handle */
+
+ node = acpi_ns_validate_handle(device);
+ if (node) {
+
+ /*
+ * If an "orphan" _REG method is present in the device's scope
+ * for the given address space ID, run it.
+ */
+
+ acpi_ev_execute_orphan_reg_method(node, space_id);
+ } else {
+ status = AE_BAD_PARAMETER;
+ }
+
+ (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_execute_orphan_reg_method)
diff --git a/drivers/acpi/acpica/exregion.c b/drivers/acpi/acpica/exregion.c
index 8907b8bf4267..c49b9f8de723 100644
--- a/drivers/acpi/acpica/exregion.c
+++ b/drivers/acpi/acpica/exregion.c
@@ -44,7 +44,6 @@ acpi_ex_system_memory_space_handler(u32 function,
struct acpi_mem_mapping *mm = mem_info->cur_mm;
u32 length;
acpi_size map_length;
- acpi_size page_boundary_map_length;
#ifdef ACPI_MISALIGNMENT_NOT_SUPPORTED
u32 remainder;
#endif
@@ -138,26 +137,8 @@ acpi_ex_system_memory_space_handler(u32 function,
map_length = (acpi_size)
((mem_info->address + mem_info->length) - address);
- /*
- * If mapping the entire remaining portion of the region will cross
- * a page boundary, just map up to the page boundary, do not cross.
- * On some systems, crossing a page boundary while mapping regions
- * can cause warnings if the pages have different attributes
- * due to resource management.
- *
- * This has the added benefit of constraining a single mapping to
- * one page, which is similar to the original code that used a 4k
- * maximum window.
- */
- page_boundary_map_length = (acpi_size)
- (ACPI_ROUND_UP(address, ACPI_DEFAULT_PAGE_SIZE) - address);
- if (page_boundary_map_length == 0) {
- page_boundary_map_length = ACPI_DEFAULT_PAGE_SIZE;
- }
-
- if (map_length > page_boundary_map_length) {
- map_length = page_boundary_map_length;
- }
+ if (map_length > ACPI_DEFAULT_PAGE_SIZE)
+ map_length = ACPI_DEFAULT_PAGE_SIZE;
/* Create a new mapping starting at the address given */
diff --git a/drivers/acpi/apei/einj-core.c b/drivers/acpi/apei/einj-core.c
index 9515bcfe5e97..73903a497d73 100644
--- a/drivers/acpi/apei/einj-core.c
+++ b/drivers/acpi/apei/einj-core.c
@@ -909,7 +909,7 @@ static void __exit einj_exit(void)
if (einj_initialized)
platform_driver_unregister(&einj_driver);
- platform_device_del(einj_dev);
+ platform_device_unregister(einj_dev);
}
module_init(einj_init);
diff --git a/drivers/acpi/arm64/Makefile b/drivers/acpi/arm64/Makefile
index 726944648c9b..05ecde9eaabe 100644
--- a/drivers/acpi/arm64/Makefile
+++ b/drivers/acpi/arm64/Makefile
@@ -1,8 +1,10 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_ACPI_AGDI) += agdi.o
-obj-$(CONFIG_ACPI_IORT) += iort.o
-obj-$(CONFIG_ACPI_GTDT) += gtdt.o
obj-$(CONFIG_ACPI_APMT) += apmt.o
+obj-$(CONFIG_ACPI_FFH) += ffh.o
+obj-$(CONFIG_ACPI_GTDT) += gtdt.o
+obj-$(CONFIG_ACPI_IORT) += iort.o
+obj-$(CONFIG_ACPI_PROCESSOR_IDLE) += cpuidle.o
obj-$(CONFIG_ARM_AMBA) += amba.o
obj-y += dma.o init.o
obj-y += thermal_cpufreq.o
diff --git a/drivers/acpi/arm64/amba.c b/drivers/acpi/arm64/amba.c
index e1f0bbb8f393..1350083bce5f 100644
--- a/drivers/acpi/arm64/amba.c
+++ b/drivers/acpi/arm64/amba.c
@@ -27,11 +27,7 @@ static const struct acpi_device_id amba_id_list[] = {
static void amba_register_dummy_clk(void)
{
- static struct clk *amba_dummy_clk;
-
- /* If clock already registered */
- if (amba_dummy_clk)
- return;
+ struct clk *amba_dummy_clk;
amba_dummy_clk = clk_register_fixed_rate(NULL, "apb_pclk", NULL, 0, 0);
clk_register_clkdev(amba_dummy_clk, "apb_pclk", NULL);
diff --git a/arch/arm64/kernel/cpuidle.c b/drivers/acpi/arm64/cpuidle.c
index f372295207fb..801f9c450142 100644
--- a/arch/arm64/kernel/cpuidle.c
+++ b/drivers/acpi/arm64/cpuidle.c
@@ -10,9 +10,6 @@
#include <linux/cpuidle.h>
#include <linux/cpu_pm.h>
#include <linux/psci.h>
-
-#ifdef CONFIG_ACPI_PROCESSOR_IDLE
-
#include <acpi/processor.h>
#define ARM64_LPI_IS_RETENTION_STATE(arch_flags) (!(arch_flags))
@@ -71,4 +68,3 @@ __cpuidle int acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi)
return CPU_PM_CPU_IDLE_ENTER_PARAM_RCU(psci_cpu_suspend_enter,
lpi->index, state);
}
-#endif
diff --git a/drivers/acpi/arm64/ffh.c b/drivers/acpi/arm64/ffh.c
new file mode 100644
index 000000000000..877edc6557e9
--- /dev/null
+++ b/drivers/acpi/arm64/ffh.c
@@ -0,0 +1,107 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/acpi.h>
+#include <linux/arm-smccc.h>
+#include <linux/slab.h>
+
+/*
+ * Implements ARM64 specific callbacks to support ACPI FFH Operation Region as
+ * specified in https://developer.arm.com/docs/den0048/latest
+ */
+struct acpi_ffh_data {
+ struct acpi_ffh_info info;
+ void (*invoke_ffh_fn)(unsigned long a0, unsigned long a1,
+ unsigned long a2, unsigned long a3,
+ unsigned long a4, unsigned long a5,
+ unsigned long a6, unsigned long a7,
+ struct arm_smccc_res *args,
+ struct arm_smccc_quirk *res);
+ void (*invoke_ffh64_fn)(const struct arm_smccc_1_2_regs *args,
+ struct arm_smccc_1_2_regs *res);
+};
+
+int acpi_ffh_address_space_arch_setup(void *handler_ctxt, void **region_ctxt)
+{
+ enum arm_smccc_conduit conduit;
+ struct acpi_ffh_data *ffh_ctxt;
+
+ if (arm_smccc_get_version() < ARM_SMCCC_VERSION_1_2)
+ return -EOPNOTSUPP;
+
+ conduit = arm_smccc_1_1_get_conduit();
+ if (conduit == SMCCC_CONDUIT_NONE) {
+ pr_err("%s: invalid SMCCC conduit\n", __func__);
+ return -EOPNOTSUPP;
+ }
+
+ ffh_ctxt = kzalloc(sizeof(*ffh_ctxt), GFP_KERNEL);
+ if (!ffh_ctxt)
+ return -ENOMEM;
+
+ if (conduit == SMCCC_CONDUIT_SMC) {
+ ffh_ctxt->invoke_ffh_fn = __arm_smccc_smc;
+ ffh_ctxt->invoke_ffh64_fn = arm_smccc_1_2_smc;
+ } else {
+ ffh_ctxt->invoke_ffh_fn = __arm_smccc_hvc;
+ ffh_ctxt->invoke_ffh64_fn = arm_smccc_1_2_hvc;
+ }
+
+ memcpy(ffh_ctxt, handler_ctxt, sizeof(ffh_ctxt->info));
+
+ *region_ctxt = ffh_ctxt;
+ return AE_OK;
+}
+
+static bool acpi_ffh_smccc_owner_allowed(u32 fid)
+{
+ int owner = ARM_SMCCC_OWNER_NUM(fid);
+
+ if (owner == ARM_SMCCC_OWNER_STANDARD ||
+ owner == ARM_SMCCC_OWNER_SIP || owner == ARM_SMCCC_OWNER_OEM)
+ return true;
+
+ return false;
+}
+
+int acpi_ffh_address_space_arch_handler(acpi_integer *value, void *region_context)
+{
+ int ret = 0;
+ struct acpi_ffh_data *ffh_ctxt = region_context;
+
+ if (ffh_ctxt->info.offset == 0) {
+ /* SMC/HVC 32bit call */
+ struct arm_smccc_res res;
+ u32 a[8] = { 0 }, *ptr = (u32 *)value;
+
+ if (!ARM_SMCCC_IS_FAST_CALL(*ptr) || ARM_SMCCC_IS_64(*ptr) ||
+ !acpi_ffh_smccc_owner_allowed(*ptr) ||
+ ffh_ctxt->info.length > 32) {
+ ret = AE_ERROR;
+ } else {
+ int idx, len = ffh_ctxt->info.length >> 2;
+
+ for (idx = 0; idx < len; idx++)
+ a[idx] = *(ptr + idx);
+
+ ffh_ctxt->invoke_ffh_fn(a[0], a[1], a[2], a[3], a[4],
+ a[5], a[6], a[7], &res, NULL);
+ memcpy(value, &res, sizeof(res));
+ }
+
+ } else if (ffh_ctxt->info.offset == 1) {
+ /* SMC/HVC 64bit call */
+ struct arm_smccc_1_2_regs *r = (struct arm_smccc_1_2_regs *)value;
+
+ if (!ARM_SMCCC_IS_FAST_CALL(r->a0) || !ARM_SMCCC_IS_64(r->a0) ||
+ !acpi_ffh_smccc_owner_allowed(r->a0) ||
+ ffh_ctxt->info.length > sizeof(*r)) {
+ ret = AE_ERROR;
+ } else {
+ ffh_ctxt->invoke_ffh64_fn(r, r);
+ memcpy(value, r, ffh_ctxt->info.length);
+ }
+ } else {
+ ret = AE_ERROR;
+ }
+
+ return ret;
+}
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index b379401ff1c2..da3a879d638a 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -38,9 +38,10 @@
/* Battery power unit: 0 means mW, 1 means mA */
#define ACPI_BATTERY_POWER_UNIT_MA 1
-#define ACPI_BATTERY_STATE_DISCHARGING 0x1
-#define ACPI_BATTERY_STATE_CHARGING 0x2
-#define ACPI_BATTERY_STATE_CRITICAL 0x4
+#define ACPI_BATTERY_STATE_DISCHARGING 0x1
+#define ACPI_BATTERY_STATE_CHARGING 0x2
+#define ACPI_BATTERY_STATE_CRITICAL 0x4
+#define ACPI_BATTERY_STATE_CHARGE_LIMITING 0x8
#define MAX_STRING_LENGTH 64
@@ -155,7 +156,7 @@ static int acpi_battery_get_state(struct acpi_battery *battery);
static int acpi_battery_is_charged(struct acpi_battery *battery)
{
- /* charging, discharging or critical low */
+ /* charging, discharging, critical low or charge limited */
if (battery->state != 0)
return 0;
@@ -215,6 +216,8 @@ static int acpi_battery_get_property(struct power_supply *psy,
val->intval = acpi_battery_handle_discharging(battery);
else if (battery->state & ACPI_BATTERY_STATE_CHARGING)
val->intval = POWER_SUPPLY_STATUS_CHARGING;
+ else if (battery->state & ACPI_BATTERY_STATE_CHARGE_LIMITING)
+ val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
else if (acpi_battery_is_charged(battery))
val->intval = POWER_SUPPLY_STATUS_FULL;
else
@@ -308,7 +311,7 @@ static int acpi_battery_get_property(struct power_supply *psy,
return ret;
}
-static enum power_supply_property charge_battery_props[] = {
+static const enum power_supply_property charge_battery_props[] = {
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_PRESENT,
POWER_SUPPLY_PROP_TECHNOLOGY,
@@ -326,7 +329,7 @@ static enum power_supply_property charge_battery_props[] = {
POWER_SUPPLY_PROP_SERIAL_NUMBER,
};
-static enum power_supply_property charge_battery_full_cap_broken_props[] = {
+static const enum power_supply_property charge_battery_full_cap_broken_props[] = {
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_PRESENT,
POWER_SUPPLY_PROP_TECHNOLOGY,
@@ -340,7 +343,7 @@ static enum power_supply_property charge_battery_full_cap_broken_props[] = {
POWER_SUPPLY_PROP_SERIAL_NUMBER,
};
-static enum power_supply_property energy_battery_props[] = {
+static const enum power_supply_property energy_battery_props[] = {
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_PRESENT,
POWER_SUPPLY_PROP_TECHNOLOGY,
@@ -358,7 +361,7 @@ static enum power_supply_property energy_battery_props[] = {
POWER_SUPPLY_PROP_SERIAL_NUMBER,
};
-static enum power_supply_property energy_battery_full_cap_broken_props[] = {
+static const enum power_supply_property energy_battery_full_cap_broken_props[] = {
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_PRESENT,
POWER_SUPPLY_PROP_TECHNOLOGY,
@@ -661,7 +664,7 @@ static ssize_t acpi_battery_alarm_show(struct device *dev,
{
struct acpi_battery *battery = to_acpi_battery(dev_get_drvdata(dev));
- return sprintf(buf, "%d\n", battery->alarm * 1000);
+ return sysfs_emit(buf, "%d\n", battery->alarm * 1000);
}
static ssize_t acpi_battery_alarm_store(struct device *dev,
@@ -678,12 +681,18 @@ static ssize_t acpi_battery_alarm_store(struct device *dev,
return count;
}
-static const struct device_attribute alarm_attr = {
+static struct device_attribute alarm_attr = {
.attr = {.name = "alarm", .mode = 0644},
.show = acpi_battery_alarm_show,
.store = acpi_battery_alarm_store,
};
+static struct attribute *acpi_battery_attrs[] = {
+ &alarm_attr.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(acpi_battery);
+
/*
* The Battery Hooking API
*
@@ -756,6 +765,21 @@ end:
}
EXPORT_SYMBOL_GPL(battery_hook_register);
+static void devm_battery_hook_unregister(void *data)
+{
+ struct acpi_battery_hook *hook = data;
+
+ battery_hook_unregister(hook);
+}
+
+int devm_battery_hook_register(struct device *dev, struct acpi_battery_hook *hook)
+{
+ battery_hook_register(hook);
+
+ return devm_add_action_or_reset(dev, devm_battery_hook_unregister, hook);
+}
+EXPORT_SYMBOL_GPL(devm_battery_hook_register);
+
/*
* This function gets called right after the battery sysfs
* attributes have been added, so that the drivers that
@@ -823,7 +847,10 @@ static void __exit battery_hook_exit(void)
static int sysfs_add_battery(struct acpi_battery *battery)
{
- struct power_supply_config psy_cfg = { .drv_data = battery, };
+ struct power_supply_config psy_cfg = {
+ .drv_data = battery,
+ .attr_grp = acpi_battery_groups,
+ };
bool full_cap_broken = false;
if (!ACPI_BATTERY_CAPACITY_VALID(battery->full_charge_capacity) &&
@@ -868,7 +895,7 @@ static int sysfs_add_battery(struct acpi_battery *battery)
return result;
}
battery_hook_add_battery(battery);
- return device_create_file(&battery->bat->dev, &alarm_attr);
+ return 0;
}
static void sysfs_remove_battery(struct acpi_battery *battery)
@@ -879,7 +906,6 @@ static void sysfs_remove_battery(struct acpi_battery *battery)
return;
}
battery_hook_remove_battery(battery);
- device_remove_file(&battery->bat->dev, &alarm_attr);
power_supply_unregister(battery->bat);
battery->bat = NULL;
mutex_unlock(&battery->sysfs_lock);
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 787eca838410..bdbd60ae8897 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -329,6 +329,8 @@ static void acpi_bus_osc_negotiate_platform_control(void)
capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PPC_OST_SUPPORT;
if (IS_ENABLED(CONFIG_ACPI_THERMAL))
capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_FAST_THERMAL_SAMPLING_SUPPORT;
+ if (IS_ENABLED(CONFIG_ACPI_BATTERY))
+ capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_BATTERY_CHARGE_LIMITING_SUPPORT;
capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_HOTPLUG_OST_SUPPORT;
capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PCLPI_SUPPORT;
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index 1d857978f5f4..dd3d3082c8c7 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -160,6 +160,7 @@ show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, highest_perf);
show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_perf);
show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, nominal_perf);
show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_nonlinear_perf);
+show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, guaranteed_perf);
show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_freq);
show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, nominal_freq);
@@ -196,6 +197,7 @@ static struct attribute *cppc_attrs[] = {
&highest_perf.attr,
&lowest_perf.attr,
&lowest_nonlinear_perf.attr,
+ &guaranteed_perf.attr,
&nominal_perf.attr,
&nominal_freq.attr,
&lowest_freq.attr,
@@ -1837,7 +1839,7 @@ static void cppc_find_dmi_mhz(const struct dmi_header *dm, void *private)
dm->length >= DMI_ENTRY_PROCESSOR_MIN_LENGTH) {
u16 val = (u16)get_unaligned((const u16 *)
(dmi_data + DMI_PROCESSOR_MAX_SPEED));
- *mhz = val > *mhz ? val : *mhz;
+ *mhz = umax(val, *mhz);
}
}
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index e7793ee9e649..299ec653388c 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -1333,10 +1333,13 @@ acpi_ec_space_handler(u32 function, acpi_physical_address address,
if (ec->busy_polling || bits > 8)
acpi_ec_burst_enable(ec);
- for (i = 0; i < bytes; ++i, ++address, ++value)
+ for (i = 0; i < bytes; ++i, ++address, ++value) {
result = (function == ACPI_READ) ?
acpi_ec_read(ec, address, value) :
acpi_ec_write(ec, address, *value);
+ if (result < 0)
+ break;
+ }
if (ec->busy_polling || bits > 8)
acpi_ec_burst_disable(ec);
@@ -1348,8 +1351,10 @@ acpi_ec_space_handler(u32 function, acpi_physical_address address,
return AE_NOT_FOUND;
case -ETIME:
return AE_TIME;
- default:
+ case 0:
return AE_OK;
+ default:
+ return AE_ERROR;
}
}
@@ -1502,6 +1507,9 @@ static int ec_install_handlers(struct acpi_ec *ec, struct acpi_device *device,
if (call_reg && !test_bit(EC_FLAGS_EC_REG_CALLED, &ec->flags)) {
acpi_execute_reg_methods(scope_handle, ACPI_ADR_SPACE_EC);
+ if (scope_handle != ec->handle)
+ acpi_execute_orphan_reg_method(ec->handle, ACPI_ADR_SPACE_EC);
+
set_bit(EC_FLAGS_EC_REG_CALLED, &ec->flags);
}
diff --git a/drivers/acpi/fan.h b/drivers/acpi/fan.h
index f89d19c922dc..db25a3898af7 100644
--- a/drivers/acpi/fan.h
+++ b/drivers/acpi/fan.h
@@ -10,6 +10,8 @@
#ifndef _ACPI_FAN_H_
#define _ACPI_FAN_H_
+#include <linux/kconfig.h>
+
#define ACPI_FAN_DEVICE_IDS \
{"INT3404", }, /* Fan */ \
{"INTC1044", }, /* Fan for Tiger Lake generation */ \
@@ -57,4 +59,11 @@ struct acpi_fan {
int acpi_fan_get_fst(struct acpi_device *device, struct acpi_fan_fst *fst);
int acpi_fan_create_attributes(struct acpi_device *device);
void acpi_fan_delete_attributes(struct acpi_device *device);
+
+#if IS_REACHABLE(CONFIG_HWMON)
+int devm_acpi_fan_create_hwmon(struct acpi_device *device);
+#else
+static inline int devm_acpi_fan_create_hwmon(struct acpi_device *device) { return 0; };
+#endif
+
#endif
diff --git a/drivers/acpi/fan_core.c b/drivers/acpi/fan_core.c
index ff72e4ef8738..7cea4495f19b 100644
--- a/drivers/acpi/fan_core.c
+++ b/drivers/acpi/fan_core.c
@@ -336,6 +336,10 @@ static int acpi_fan_probe(struct platform_device *pdev)
if (result)
return result;
+ result = devm_acpi_fan_create_hwmon(device);
+ if (result)
+ return result;
+
result = acpi_fan_create_attributes(device);
if (result)
return result;
diff --git a/drivers/acpi/fan_hwmon.c b/drivers/acpi/fan_hwmon.c
new file mode 100644
index 000000000000..bd0d31a398fa
--- /dev/null
+++ b/drivers/acpi/fan_hwmon.c
@@ -0,0 +1,170 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * hwmon interface for the ACPI Fan driver.
+ *
+ * Copyright (C) 2024 Armin Wolf <W_Armin@gmx.de>
+ */
+
+#include <linux/acpi.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/hwmon.h>
+#include <linux/limits.h>
+#include <linux/types.h>
+#include <linux/units.h>
+
+#include "fan.h"
+
+/* Returned when the ACPI fan does not support speed reporting */
+#define FAN_SPEED_UNAVAILABLE U32_MAX
+#define FAN_POWER_UNAVAILABLE U32_MAX
+
+static struct acpi_fan_fps *acpi_fan_get_current_fps(struct acpi_fan *fan, u64 control)
+{
+ unsigned int i;
+
+ for (i = 0; i < fan->fps_count; i++) {
+ if (fan->fps[i].control == control)
+ return &fan->fps[i];
+ }
+
+ return NULL;
+}
+
+static umode_t acpi_fan_hwmon_is_visible(const void *drvdata, enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ const struct acpi_fan *fan = drvdata;
+ unsigned int i;
+
+ switch (type) {
+ case hwmon_fan:
+ switch (attr) {
+ case hwmon_fan_input:
+ return 0444;
+ case hwmon_fan_target:
+ /*
+ * When in fine grain control mode, not every fan control value
+ * has an associated fan performance state.
+ */
+ if (fan->fif.fine_grain_ctrl)
+ return 0;
+
+ return 0444;
+ default:
+ return 0;
+ }
+ case hwmon_power:
+ switch (attr) {
+ case hwmon_power_input:
+ /*
+ * When in fine grain control mode, not every fan control value
+ * has an associated fan performance state.
+ */
+ if (fan->fif.fine_grain_ctrl)
+ return 0;
+
+ /*
+ * When all fan performance states contain no valid power data,
+ * when the associated attribute should not be created.
+ */
+ for (i = 0; i < fan->fps_count; i++) {
+ if (fan->fps[i].power != FAN_POWER_UNAVAILABLE)
+ return 0444;
+ }
+
+ return 0;
+ default:
+ return 0;
+ }
+ default:
+ return 0;
+ }
+}
+
+static int acpi_fan_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
+ int channel, long *val)
+{
+ struct acpi_device *adev = to_acpi_device(dev->parent);
+ struct acpi_fan *fan = dev_get_drvdata(dev);
+ struct acpi_fan_fps *fps;
+ struct acpi_fan_fst fst;
+ int ret;
+
+ ret = acpi_fan_get_fst(adev, &fst);
+ if (ret < 0)
+ return ret;
+
+ switch (type) {
+ case hwmon_fan:
+ switch (attr) {
+ case hwmon_fan_input:
+ if (fst.speed == FAN_SPEED_UNAVAILABLE)
+ return -ENODEV;
+
+ if (fst.speed > LONG_MAX)
+ return -EOVERFLOW;
+
+ *val = fst.speed;
+ return 0;
+ case hwmon_fan_target:
+ fps = acpi_fan_get_current_fps(fan, fst.control);
+ if (!fps)
+ return -EIO;
+
+ if (fps->speed > LONG_MAX)
+ return -EOVERFLOW;
+
+ *val = fps->speed;
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+ case hwmon_power:
+ switch (attr) {
+ case hwmon_power_input:
+ fps = acpi_fan_get_current_fps(fan, fst.control);
+ if (!fps)
+ return -EIO;
+
+ if (fps->power == FAN_POWER_UNAVAILABLE)
+ return -ENODEV;
+
+ if (fps->power > LONG_MAX / MICROWATT_PER_MILLIWATT)
+ return -EOVERFLOW;
+
+ *val = fps->power * MICROWATT_PER_MILLIWATT;
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static const struct hwmon_ops acpi_fan_hwmon_ops = {
+ .is_visible = acpi_fan_hwmon_is_visible,
+ .read = acpi_fan_hwmon_read,
+};
+
+static const struct hwmon_channel_info * const acpi_fan_hwmon_info[] = {
+ HWMON_CHANNEL_INFO(fan, HWMON_F_INPUT | HWMON_F_TARGET),
+ HWMON_CHANNEL_INFO(power, HWMON_P_INPUT),
+ NULL
+};
+
+static const struct hwmon_chip_info acpi_fan_hwmon_chip_info = {
+ .ops = &acpi_fan_hwmon_ops,
+ .info = acpi_fan_hwmon_info,
+};
+
+int devm_acpi_fan_create_hwmon(struct acpi_device *device)
+{
+ struct acpi_fan *fan = acpi_driver_data(device);
+ struct device *hdev;
+
+ hdev = devm_hwmon_device_register_with_info(&device->dev, "acpi_fan", fan,
+ &acpi_fan_hwmon_chip_info, NULL);
+ return PTR_ERR_OR_ZERO(hdev);
+}
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 2a0e9fc7b74c..601b670356e5 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -302,6 +302,10 @@ void acpi_mipi_check_crs_csi2(acpi_handle handle);
void acpi_mipi_scan_crs_csi2(void);
void acpi_mipi_init_crs_csi2_swnodes(void);
void acpi_mipi_crs_csi2_cleanup(void);
+#ifdef CONFIG_X86
bool acpi_graph_ignore_port(acpi_handle handle);
+#else
+static inline bool acpi_graph_ignore_port(acpi_handle handle) { return false; }
+#endif
#endif /* _ACPI_INTERNAL_H_ */
diff --git a/drivers/acpi/mipi-disco-img.c b/drivers/acpi/mipi-disco-img.c
index d05413a0672a..92b658f92dc0 100644
--- a/drivers/acpi/mipi-disco-img.c
+++ b/drivers/acpi/mipi-disco-img.c
@@ -725,14 +725,20 @@ void acpi_mipi_crs_csi2_cleanup(void)
acpi_mipi_del_crs_csi2(csi2);
}
-static const struct dmi_system_id dmi_ignore_port_nodes[] = {
- {
- .matches = {
- DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "XPS 9315"),
- },
- },
- { }
+#ifdef CONFIG_X86
+#include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
+
+/* CPU matches for Dell generations with broken ACPI MIPI DISCO info */
+static const struct x86_cpu_id dell_broken_mipi_disco_cpu_gens[] = {
+ X86_MATCH_VFM(INTEL_TIGERLAKE, NULL),
+ X86_MATCH_VFM(INTEL_TIGERLAKE_L, NULL),
+ X86_MATCH_VFM(INTEL_ALDERLAKE, NULL),
+ X86_MATCH_VFM(INTEL_ALDERLAKE_L, NULL),
+ X86_MATCH_VFM(INTEL_RAPTORLAKE, NULL),
+ X86_MATCH_VFM(INTEL_RAPTORLAKE_P, NULL),
+ X86_MATCH_VFM(INTEL_RAPTORLAKE_S, NULL),
+ {}
};
static const char *strnext(const char *s1, const char *s2)
@@ -761,7 +767,10 @@ bool acpi_graph_ignore_port(acpi_handle handle)
static bool dmi_tested, ignore_port;
if (!dmi_tested) {
- ignore_port = dmi_first_match(dmi_ignore_port_nodes);
+ if (dmi_name_in_vendors("Dell Inc.") &&
+ x86_match_cpu(dell_broken_mipi_disco_cpu_gens))
+ ignore_port = true;
+
dmi_tested = true;
}
@@ -794,3 +803,4 @@ out_free:
kfree(orig_path);
return false;
}
+#endif
diff --git a/drivers/acpi/numa/hmat.c b/drivers/acpi/numa/hmat.c
index 2c8ccc91ebe6..febd9e51350b 100644
--- a/drivers/acpi/numa/hmat.c
+++ b/drivers/acpi/numa/hmat.c
@@ -408,7 +408,7 @@ static __init void hmat_update_target(unsigned int tgt_pxm, unsigned int init_px
if (target && target->processor_pxm == init_pxm) {
hmat_update_target_access(target, type, value,
ACCESS_COORDINATE_LOCAL);
- /* If the node has a CPU, update access 1 */
+ /* If the node has a CPU, update access ACCESS_COORDINATE_CPU */
if (node_state(pxm_to_node(init_pxm), N_CPU))
hmat_update_target_access(target, type, value,
ACCESS_COORDINATE_CPU);
@@ -948,7 +948,7 @@ static int hmat_set_default_dram_perf(void)
target = find_mem_target(pxm);
if (!target)
continue;
- attrs = &target->coord[1];
+ attrs = &target->coord[ACCESS_COORDINATE_CPU];
rc = mt_set_default_dram_perf(nid, attrs, "ACPI HMAT");
if (rc)
return rc;
@@ -975,7 +975,7 @@ static int hmat_calculate_adistance(struct notifier_block *self,
hmat_update_target_attrs(target, p_nodes, ACCESS_COORDINATE_CPU);
mutex_unlock(&target_lock);
- perf = &target->coord[1];
+ perf = &target->coord[ACCESS_COORDINATE_CPU];
if (mt_perf_to_adistance(perf, adist))
return NOTIFY_OK;
diff --git a/drivers/acpi/platform_profile.c b/drivers/acpi/platform_profile.c
index 4a9704730224..d2f7fd7743a1 100644
--- a/drivers/acpi/platform_profile.c
+++ b/drivers/acpi/platform_profile.c
@@ -217,4 +217,5 @@ int platform_profile_remove(void)
EXPORT_SYMBOL_GPL(platform_profile_remove);
MODULE_AUTHOR("Mark Pearson <markpearson@lenovo.com>");
+MODULE_DESCRIPTION("ACPI platform profile sysfs interface");
MODULE_LICENSE("GPL");
diff --git a/drivers/acpi/pmic/intel_pmic.c b/drivers/acpi/pmic/intel_pmic.c
index f20dbda1a831..134e9ca8eaa2 100644
--- a/drivers/acpi/pmic/intel_pmic.c
+++ b/drivers/acpi/pmic/intel_pmic.c
@@ -31,7 +31,7 @@ struct intel_pmic_opregion {
static struct intel_pmic_opregion *intel_pmic_opregion;
-static int pmic_get_reg_bit(int address, struct pmic_table *table,
+static int pmic_get_reg_bit(int address, const struct pmic_table *table,
int count, int *reg, int *bit)
{
int i;
diff --git a/drivers/acpi/pmic/intel_pmic.h b/drivers/acpi/pmic/intel_pmic.h
index d956b03a6ca0..006f0780ffab 100644
--- a/drivers/acpi/pmic/intel_pmic.h
+++ b/drivers/acpi/pmic/intel_pmic.h
@@ -21,9 +21,9 @@ struct intel_pmic_opregion_data {
u32 reg_address, u32 value, u32 mask);
int (*lpat_raw_to_temp)(struct acpi_lpat_conversion_table *lpat_table,
int raw);
- struct pmic_table *power_table;
+ const struct pmic_table *power_table;
int power_table_count;
- struct pmic_table *thermal_table;
+ const struct pmic_table *thermal_table;
int thermal_table_count;
/* For generic exec_mipi_pmic_seq_element handling */
int pmic_i2c_address;
diff --git a/drivers/acpi/pmic/intel_pmic_bxtwc.c b/drivers/acpi/pmic/intel_pmic_bxtwc.c
index e247615189fa..c332afbf82bd 100644
--- a/drivers/acpi/pmic/intel_pmic_bxtwc.c
+++ b/drivers/acpi/pmic/intel_pmic_bxtwc.c
@@ -24,7 +24,7 @@
#define VSWITCH1_OUTPUT BIT(4)
#define VUSBPHY_CHARGE BIT(1)
-static struct pmic_table power_table[] = {
+static const struct pmic_table power_table[] = {
{
.address = 0x0,
.reg = 0x63,
@@ -177,7 +177,7 @@ static struct pmic_table power_table[] = {
} /* MOFF -> MODEMCTRL Bit 0 */
};
-static struct pmic_table thermal_table[] = {
+static const struct pmic_table thermal_table[] = {
{
.address = 0x00,
.reg = 0x4F39
diff --git a/drivers/acpi/pmic/intel_pmic_bytcrc.c b/drivers/acpi/pmic/intel_pmic_bytcrc.c
index 2b09f8da5400..b4c21a75294a 100644
--- a/drivers/acpi/pmic/intel_pmic_bytcrc.c
+++ b/drivers/acpi/pmic/intel_pmic_bytcrc.c
@@ -16,7 +16,7 @@
#define PMIC_A0LOCK_REG 0xc5
-static struct pmic_table power_table[] = {
+static const struct pmic_table power_table[] = {
/* {
.address = 0x00,
.reg = ??,
@@ -134,7 +134,7 @@ static struct pmic_table power_table[] = {
}, /* V105 -> V1P05S, L2 SRAM */
};
-static struct pmic_table thermal_table[] = {
+static const struct pmic_table thermal_table[] = {
{
.address = 0x00,
.reg = 0x75
diff --git a/drivers/acpi/pmic/intel_pmic_chtdc_ti.c b/drivers/acpi/pmic/intel_pmic_chtdc_ti.c
index c84ef3d15181..ecb36fbc1e7f 100644
--- a/drivers/acpi/pmic/intel_pmic_chtdc_ti.c
+++ b/drivers/acpi/pmic/intel_pmic_chtdc_ti.c
@@ -8,18 +8,22 @@
*/
#include <linux/acpi.h>
+#include <linux/bits.h>
#include <linux/init.h>
#include <linux/mfd/intel_soc_pmic.h>
#include <linux/platform_device.h>
+#include <asm/byteorder.h>
#include "intel_pmic.h"
/* registers stored in 16bit BE (high:low, total 10bit) */
+#define PMIC_REG_MASK GENMASK(9, 0)
+
#define CHTDC_TI_VBAT 0x54
#define CHTDC_TI_DIETEMP 0x56
#define CHTDC_TI_BPTHERM 0x58
#define CHTDC_TI_GPADC 0x5a
-static struct pmic_table chtdc_ti_power_table[] = {
+static const struct pmic_table chtdc_ti_power_table[] = {
{ .address = 0x00, .reg = 0x41 }, /* LDO1 */
{ .address = 0x04, .reg = 0x42 }, /* LDO2 */
{ .address = 0x08, .reg = 0x43 }, /* LDO3 */
@@ -35,7 +39,7 @@ static struct pmic_table chtdc_ti_power_table[] = {
{ .address = 0x30, .reg = 0x4e }, /* LD14 */
};
-static struct pmic_table chtdc_ti_thermal_table[] = {
+static const struct pmic_table chtdc_ti_thermal_table[] = {
{
.address = 0x00,
.reg = CHTDC_TI_GPADC
@@ -73,7 +77,7 @@ static int chtdc_ti_pmic_get_power(struct regmap *regmap, int reg, int bit,
if (regmap_read(regmap, reg, &data))
return -EIO;
- *value = data & 1;
+ *value = data & BIT(0);
return 0;
}
@@ -85,13 +89,12 @@ static int chtdc_ti_pmic_update_power(struct regmap *regmap, int reg, int bit,
static int chtdc_ti_pmic_get_raw_temp(struct regmap *regmap, int reg)
{
- u8 buf[2];
+ __be16 buf;
- if (regmap_bulk_read(regmap, reg, buf, 2))
+ if (regmap_bulk_read(regmap, reg, &buf, sizeof(buf)))
return -EIO;
- /* stored in big-endian */
- return ((buf[0] & 0x03) << 8) | buf[1];
+ return be16_to_cpu(buf) & PMIC_REG_MASK;
}
static const struct intel_pmic_opregion_data chtdc_ti_pmic_opregion_data = {
diff --git a/drivers/acpi/pmic/intel_pmic_chtwc.c b/drivers/acpi/pmic/intel_pmic_chtwc.c
index f2c42f4c79ca..81caede51ca2 100644
--- a/drivers/acpi/pmic/intel_pmic_chtwc.c
+++ b/drivers/acpi/pmic/intel_pmic_chtwc.c
@@ -70,7 +70,7 @@
* "regulator: whiskey_cove: implements Whiskey Cove pmic VRF support"
* https://github.com/intel-aero/meta-intel-aero/blob/master/recipes-kernel/linux/linux-yocto/0019-regulator-whiskey_cove-implements-WhiskeyCove-pmic-V.patch
*/
-static struct pmic_table power_table[] = {
+static const struct pmic_table power_table[] = {
{
.address = 0x0,
.reg = CHT_WC_V1P8A_CTRL,
@@ -236,11 +236,12 @@ static int intel_cht_wc_exec_mipi_pmic_seq_element(struct regmap *regmap,
u32 reg_address,
u32 value, u32 mask)
{
+ struct device *dev = regmap_get_device(regmap);
u32 address;
if (i2c_client_address > 0xff || reg_address > 0xff) {
- pr_warn("%s warning addresses too big client 0x%x reg 0x%x\n",
- __func__, i2c_client_address, reg_address);
+ dev_warn(dev, "warning addresses too big client 0x%x reg 0x%x\n",
+ i2c_client_address, reg_address);
return -ERANGE;
}
diff --git a/drivers/acpi/pmic/intel_pmic_xpower.c b/drivers/acpi/pmic/intel_pmic_xpower.c
index 61bbe4c24d87..49bda5e0c8aa 100644
--- a/drivers/acpi/pmic/intel_pmic_xpower.c
+++ b/drivers/acpi/pmic/intel_pmic_xpower.c
@@ -26,7 +26,7 @@
#define AXP288_ADC_TS_CURRENT_ON_ONDEMAND (2 << 0)
#define AXP288_ADC_TS_CURRENT_ON (3 << 0)
-static struct pmic_table power_table[] = {
+static const struct pmic_table power_table[] = {
{
.address = 0x00,
.reg = 0x13,
@@ -129,7 +129,7 @@ static struct pmic_table power_table[] = {
};
/* TMP0 - TMP5 are the same, all from GPADC */
-static struct pmic_table thermal_table[] = {
+static const struct pmic_table thermal_table[] = {
{
.address = 0x00,
.reg = XPOWER_GPADC_LOW
@@ -255,7 +255,7 @@ static int intel_xpower_pmic_get_raw_temp(struct regmap *regmap, int reg)
if (ret)
return ret;
- ret = regmap_bulk_read(regmap, AXP288_GP_ADC_H, buf, 2);
+ ret = regmap_bulk_read(regmap, AXP288_GP_ADC_H, buf, sizeof(buf));
if (ret == 0)
ret = (buf[0] << 4) + ((buf[1] >> 4) & 0x0f);
@@ -274,11 +274,12 @@ static int intel_xpower_exec_mipi_pmic_seq_element(struct regmap *regmap,
u16 i2c_address, u32 reg_address,
u32 value, u32 mask)
{
+ struct device *dev = regmap_get_device(regmap);
int ret;
if (i2c_address != 0x34) {
- pr_err("%s: Unexpected i2c-addr: 0x%02x (reg-addr 0x%x value 0x%x mask 0x%x)\n",
- __func__, i2c_address, reg_address, value, mask);
+ dev_err(dev, "Unexpected i2c-addr: 0x%02x (reg-addr 0x%x value 0x%x mask 0x%x)\n",
+ i2c_address, reg_address, value, mask);
return -ENXIO;
}
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index b203cfe28550..b04b684f3190 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -90,7 +90,8 @@ static int map_gicc_mpidr(struct acpi_subtable_header *entry,
struct acpi_madt_generic_interrupt *gicc =
container_of(entry, struct acpi_madt_generic_interrupt, header);
- if (!acpi_gicc_is_usable(gicc))
+ if (!(gicc->flags &
+ (ACPI_MADT_ENABLED | ACPI_MADT_GICC_ONLINE_CAPABLE)))
return -ENODEV;
/* device_declaration means Device object in DSDT, in the
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index 67db60eda370..cb52dd000b95 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -33,7 +33,6 @@ MODULE_AUTHOR("Paul Diefenbaugh");
MODULE_DESCRIPTION("ACPI Processor Driver");
MODULE_LICENSE("GPL");
-static int acpi_processor_start(struct device *dev);
static int acpi_processor_stop(struct device *dev);
static const struct acpi_device_id processor_device_ids[] = {
@@ -47,7 +46,6 @@ static struct device_driver acpi_processor_driver = {
.name = "processor",
.bus = &cpu_subsys,
.acpi_match_table = processor_device_ids,
- .probe = acpi_processor_start,
.remove = acpi_processor_stop,
};
@@ -115,12 +113,9 @@ static int acpi_soft_cpu_online(unsigned int cpu)
* CPU got physically hotplugged and onlined for the first time:
* Initialize missing things.
*/
- if (pr->flags.need_hotplug_init) {
+ if (!pr->flags.previously_online) {
int ret;
- pr_info("Will online and init hotplugged CPU: %d\n",
- pr->id);
- pr->flags.need_hotplug_init = 0;
ret = __acpi_processor_start(device);
WARN(ret, "Failed to start CPU: %d\n", pr->id);
} else {
@@ -167,9 +162,6 @@ static int __acpi_processor_start(struct acpi_device *device)
if (!pr)
return -ENODEV;
- if (pr->flags.need_hotplug_init)
- return 0;
-
result = acpi_cppc_processor_probe(pr);
if (result && !IS_ENABLED(CONFIG_ACPI_CPU_FREQ_PSS))
dev_dbg(&device->dev, "CPPC data invalid or not present\n");
@@ -185,32 +177,21 @@ static int __acpi_processor_start(struct acpi_device *device)
status = acpi_install_notify_handler(device->handle, ACPI_DEVICE_NOTIFY,
acpi_processor_notify, device);
- if (ACPI_SUCCESS(status))
- return 0;
+ if (!ACPI_SUCCESS(status)) {
+ result = -ENODEV;
+ goto err_thermal_exit;
+ }
+ pr->flags.previously_online = 1;
- result = -ENODEV;
- acpi_processor_thermal_exit(pr, device);
+ return 0;
+err_thermal_exit:
+ acpi_processor_thermal_exit(pr, device);
err_power_exit:
acpi_processor_power_exit(pr);
return result;
}
-static int acpi_processor_start(struct device *dev)
-{
- struct acpi_device *device = ACPI_COMPANION(dev);
- int ret;
-
- if (!device)
- return -ENODEV;
-
- /* Protect against concurrent CPU hotplug operations */
- cpu_hotplug_disable();
- ret = __acpi_processor_start(device);
- cpu_hotplug_enable();
- return ret;
-}
-
static int acpi_processor_stop(struct device *dev)
{
struct acpi_device *device = ACPI_COMPANION(dev);
@@ -279,9 +260,9 @@ static int __init acpi_processor_driver_init(void)
if (result < 0)
return result;
- result = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
- "acpi/cpu-drv:online",
- acpi_soft_cpu_online, NULL);
+ result = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
+ "acpi/cpu-drv:online",
+ acpi_soft_cpu_online, NULL);
if (result < 0)
goto err;
hp_online = result;
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index bd6a7857ce05..831fa4a12159 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -16,7 +16,6 @@
#include <linux/acpi.h>
#include <linux/dmi.h>
#include <linux/sched.h> /* need_resched() */
-#include <linux/sort.h>
#include <linux/tick.h>
#include <linux/cpuidle.h>
#include <linux/cpu.h>
@@ -386,25 +385,24 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, 1);
}
-static int acpi_cst_latency_cmp(const void *a, const void *b)
+static void acpi_cst_latency_sort(struct acpi_processor_cx *states, size_t length)
{
- const struct acpi_processor_cx *x = a, *y = b;
+ int i, j, k;
- if (!(x->valid && y->valid))
- return 0;
- if (x->latency > y->latency)
- return 1;
- if (x->latency < y->latency)
- return -1;
- return 0;
-}
-static void acpi_cst_latency_swap(void *a, void *b, int n)
-{
- struct acpi_processor_cx *x = a, *y = b;
+ for (i = 1; i < length; i++) {
+ if (!states[i].valid)
+ continue;
- if (!(x->valid && y->valid))
- return;
- swap(x->latency, y->latency);
+ for (j = i - 1, k = i; j >= 0; j--) {
+ if (!states[j].valid)
+ continue;
+
+ if (states[j].latency > states[k].latency)
+ swap(states[j].latency, states[k].latency);
+
+ k = j;
+ }
+ }
}
static int acpi_processor_power_verify(struct acpi_processor *pr)
@@ -449,10 +447,7 @@ static int acpi_processor_power_verify(struct acpi_processor *pr)
if (buggy_latency) {
pr_notice("FW issue: working around C-state latencies out of order\n");
- sort(&pr->power.states[1], max_cstate,
- sizeof(struct acpi_processor_cx),
- acpi_cst_latency_cmp,
- acpi_cst_latency_swap);
+ acpi_cst_latency_sort(&pr->power.states[1], max_cstate);
}
lapic_timer_propagate_broadcast(pr);
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index b5bf8b81a050..df5d5a554b38 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -525,6 +525,20 @@ static const struct dmi_system_id irq1_level_low_skip_override[] = {
},
},
{
+ /* Asus Vivobook Pro N6506MU */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "N6506MU"),
+ },
+ },
+ {
+ /* Asus Vivobook Pro N6506MJ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "N6506MJ"),
+ },
+ },
+ {
/* LG Electronics 17U70P */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LG Electronics"),
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
index 94e3c000df2e..7a0914055fca 100644
--- a/drivers/acpi/sbs.c
+++ b/drivers/acpi/sbs.c
@@ -77,7 +77,6 @@ struct acpi_battery {
u16 spec;
u8 id;
u8 present:1;
- u8 have_sysfs_alarm:1;
};
#define to_acpi_battery(x) power_supply_get_drvdata(x)
@@ -241,11 +240,11 @@ static int acpi_sbs_battery_get_property(struct power_supply *psy,
return 0;
}
-static enum power_supply_property sbs_ac_props[] = {
+static const enum power_supply_property sbs_ac_props[] = {
POWER_SUPPLY_PROP_ONLINE,
};
-static enum power_supply_property sbs_charge_battery_props[] = {
+static const enum power_supply_property sbs_charge_battery_props[] = {
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_PRESENT,
POWER_SUPPLY_PROP_TECHNOLOGY,
@@ -263,7 +262,7 @@ static enum power_supply_property sbs_charge_battery_props[] = {
POWER_SUPPLY_PROP_MANUFACTURER,
};
-static enum power_supply_property sbs_energy_battery_props[] = {
+static const enum power_supply_property sbs_energy_battery_props[] = {
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_PRESENT,
POWER_SUPPLY_PROP_TECHNOLOGY,
@@ -462,12 +461,18 @@ static ssize_t acpi_battery_alarm_store(struct device *dev,
return count;
}
-static const struct device_attribute alarm_attr = {
+static struct device_attribute alarm_attr = {
.attr = {.name = "alarm", .mode = 0644},
.show = acpi_battery_alarm_show,
.store = acpi_battery_alarm_store,
};
+static struct attribute *acpi_battery_attrs[] = {
+ &alarm_attr.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(acpi_battery);
+
/* --------------------------------------------------------------------------
Driver Interface
-------------------------------------------------------------------------- */
@@ -518,7 +523,10 @@ static int acpi_battery_read(struct acpi_battery *battery)
static int acpi_battery_add(struct acpi_sbs *sbs, int id)
{
struct acpi_battery *battery = &sbs->battery[id];
- struct power_supply_config psy_cfg = { .drv_data = battery, };
+ struct power_supply_config psy_cfg = {
+ .drv_data = battery,
+ .attr_grp = acpi_battery_groups,
+ };
int result;
battery->id = id;
@@ -548,10 +556,6 @@ static int acpi_battery_add(struct acpi_sbs *sbs, int id)
goto end;
}
- result = device_create_file(&battery->bat->dev, &alarm_attr);
- if (result)
- goto end;
- battery->have_sysfs_alarm = 1;
end:
pr_info("%s [%s]: Battery Slot [%s] (battery %s)\n",
ACPI_SBS_DEVICE_NAME, acpi_device_bid(sbs->device),
@@ -563,11 +567,8 @@ static void acpi_battery_remove(struct acpi_sbs *sbs, int id)
{
struct acpi_battery *battery = &sbs->battery[id];
- if (battery->bat) {
- if (battery->have_sysfs_alarm)
- device_remove_file(&battery->bat->dev, &alarm_attr);
+ if (battery->bat)
power_supply_unregister(battery->bat);
- }
}
static int acpi_charger_add(struct acpi_sbs *sbs)
@@ -610,7 +611,7 @@ static void acpi_sbs_callback(void *context)
if (sbs->charger_exists) {
acpi_ac_get_present(sbs);
if (sbs->charger_present != saved_charger_state)
- kobject_uevent(&sbs->charger->dev.kobj, KOBJ_CHANGE);
+ power_supply_changed(sbs->charger);
}
if (sbs->manager_present) {
@@ -622,7 +623,7 @@ static void acpi_sbs_callback(void *context)
acpi_battery_read(bat);
if (saved_battery_state == bat->present)
continue;
- kobject_uevent(&bat->bat->dev.kobj, KOBJ_CHANGE);
+ power_supply_changed(bat->bat);
}
}
}
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 503773707e01..0aa20623525a 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -243,13 +243,17 @@ static int acpi_scan_try_to_offline(struct acpi_device *device)
return 0;
}
-static int acpi_scan_check_and_detach(struct acpi_device *adev, void *check)
+#define ACPI_SCAN_CHECK_FLAG_STATUS BIT(0)
+#define ACPI_SCAN_CHECK_FLAG_EJECT BIT(1)
+
+static int acpi_scan_check_and_detach(struct acpi_device *adev, void *p)
{
struct acpi_scan_handler *handler = adev->handler;
+ uintptr_t flags = (uintptr_t)p;
- acpi_dev_for_each_child_reverse(adev, acpi_scan_check_and_detach, check);
+ acpi_dev_for_each_child_reverse(adev, acpi_scan_check_and_detach, p);
- if (check) {
+ if (flags & ACPI_SCAN_CHECK_FLAG_STATUS) {
acpi_bus_get_status(adev);
/*
* Skip devices that are still there and take the enabled
@@ -269,8 +273,6 @@ static int acpi_scan_check_and_detach(struct acpi_device *adev, void *check)
if (handler) {
if (handler->detach)
handler->detach(adev);
-
- adev->handler = NULL;
} else {
device_release_driver(&adev->dev);
}
@@ -280,6 +282,28 @@ static int acpi_scan_check_and_detach(struct acpi_device *adev, void *check)
*/
acpi_device_set_power(adev, ACPI_STATE_D3_COLD);
adev->flags.initialized = false;
+
+ /* For eject this is deferred to acpi_bus_post_eject() */
+ if (!(flags & ACPI_SCAN_CHECK_FLAG_EJECT)) {
+ adev->handler = NULL;
+ acpi_device_clear_enumerated(adev);
+ }
+ return 0;
+}
+
+static int acpi_bus_post_eject(struct acpi_device *adev, void *not_used)
+{
+ struct acpi_scan_handler *handler = adev->handler;
+
+ acpi_dev_for_each_child_reverse(adev, acpi_bus_post_eject, NULL);
+
+ if (handler) {
+ if (handler->post_eject)
+ handler->post_eject(adev);
+
+ adev->handler = NULL;
+ }
+
acpi_device_clear_enumerated(adev);
return 0;
@@ -287,7 +311,9 @@ static int acpi_scan_check_and_detach(struct acpi_device *adev, void *check)
static void acpi_scan_check_subtree(struct acpi_device *adev)
{
- acpi_scan_check_and_detach(adev, (void *)true);
+ uintptr_t flags = ACPI_SCAN_CHECK_FLAG_STATUS;
+
+ acpi_scan_check_and_detach(adev, (void *)flags);
}
static int acpi_scan_hot_remove(struct acpi_device *device)
@@ -295,6 +321,7 @@ static int acpi_scan_hot_remove(struct acpi_device *device)
acpi_handle handle = device->handle;
unsigned long long sta;
acpi_status status;
+ uintptr_t flags = ACPI_SCAN_CHECK_FLAG_EJECT;
if (device->handler && device->handler->hotplug.demand_offline) {
if (!acpi_scan_is_offline(device, true))
@@ -307,7 +334,7 @@ static int acpi_scan_hot_remove(struct acpi_device *device)
acpi_handle_debug(handle, "Ejecting\n");
- acpi_bus_trim(device);
+ acpi_scan_check_and_detach(device, (void *)flags);
acpi_evaluate_lck(handle, 0);
/*
@@ -330,6 +357,8 @@ static int acpi_scan_hot_remove(struct acpi_device *device)
} else if (sta & ACPI_STA_DEVICE_ENABLED) {
acpi_handle_warn(handle,
"Eject incomplete - status 0x%llx\n", sta);
+ } else {
+ acpi_bus_post_eject(device, NULL);
}
return 0;
@@ -2596,7 +2625,9 @@ EXPORT_SYMBOL(acpi_bus_scan);
*/
void acpi_bus_trim(struct acpi_device *adev)
{
- acpi_scan_check_and_detach(adev, NULL);
+ uintptr_t flags = 0;
+
+ acpi_scan_check_and_detach(adev, (void *)flags);
}
EXPORT_SYMBOL_GPL(acpi_bus_trim);
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index b976e5fc3fbc..9e1b01c35070 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -198,6 +198,20 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
}
break;
+ case ACPI_MADT_TYPE_MULTIPROC_WAKEUP:
+ {
+ struct acpi_madt_multiproc_wakeup *p =
+ (struct acpi_madt_multiproc_wakeup *)header;
+ u64 reset_vector = 0;
+
+ if (p->version >= ACPI_MADT_MP_WAKEUP_VERSION_V1)
+ reset_vector = p->reset_vector;
+
+ pr_debug("MP Wakeup (version[%d], mailbox[%#llx], reset[%#llx])\n",
+ p->version, p->mailbox_address, reset_vector);
+ }
+ break;
+
case ACPI_MADT_TYPE_CORE_PIC:
{
struct acpi_madt_core_pic *p = (struct acpi_madt_core_pic *)header;
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index d67881b50bca..a0cfc857fb55 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -168,11 +168,17 @@ static int acpi_thermal_get_polling_frequency(struct acpi_thermal *tz)
static int acpi_thermal_temp(struct acpi_thermal *tz, int temp_deci_k)
{
+ int temp;
+
if (temp_deci_k == THERMAL_TEMP_INVALID)
return THERMAL_TEMP_INVALID;
- return deci_kelvin_to_millicelsius_with_offset(temp_deci_k,
+ temp = deci_kelvin_to_millicelsius_with_offset(temp_deci_k,
tz->kelvin_offset);
+ if (temp <= 0)
+ return THERMAL_TEMP_INVALID;
+
+ return temp;
}
static bool acpi_thermal_trip_valid(struct acpi_thermal_trip *acpi_trip)
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index 2cc3821b2b16..c11cbe5b6eaa 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -540,6 +540,14 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
},
},
{
+ .callback = video_detect_force_native,
+ /* Apple MacBook Air 9,1 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir9,1"),
+ },
+ },
+ {
/* https://bugzilla.redhat.com/show_bug.cgi?id=1217249 */
.callback = video_detect_force_native,
/* Apple MacBook Pro 12,1 */
@@ -550,6 +558,14 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
},
{
.callback = video_detect_force_native,
+ /* Apple MacBook Pro 16,2 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro16,2"),
+ },
+ },
+ {
+ .callback = video_detect_force_native,
/* Dell Inspiron N4010 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
diff --git a/drivers/acpi/x86/lpss.c b/drivers/acpi/x86/lpss.c
index 148e29c2c526..258440b899a9 100644
--- a/drivers/acpi/x86/lpss.c
+++ b/drivers/acpi/x86/lpss.c
@@ -338,8 +338,8 @@ static const struct lpss_device_desc bsw_spi_dev_desc = {
};
static const struct x86_cpu_id lpss_cpu_ids[] = {
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT, NULL),
+ X86_MATCH_VFM(INTEL_ATOM_SILVERMONT, NULL),
+ X86_MATCH_VFM(INTEL_ATOM_AIRMONT, NULL),
{}
};
diff --git a/drivers/acpi/x86/utils.c b/drivers/acpi/x86/utils.c
index 7dca73417e2b..ab2b5fa83e1f 100644
--- a/drivers/acpi/x86/utils.c
+++ b/drivers/acpi/x86/utils.c
@@ -45,37 +45,37 @@ struct override_status_id {
unsigned long long status;
};
-#define ENTRY(status, hid, uid, path, cpu_model, dmi...) { \
+#define ENTRY(status, hid, uid, path, cpu_vfm, dmi...) { \
{ { hid, }, {} }, \
- { X86_MATCH_INTEL_FAM6_MODEL(cpu_model, NULL), {} }, \
+ { X86_MATCH_VFM(cpu_vfm, NULL), {} }, \
{ { .matches = dmi }, {} }, \
uid, \
path, \
status, \
}
-#define PRESENT_ENTRY_HID(hid, uid, cpu_model, dmi...) \
- ENTRY(ACPI_STA_DEFAULT, hid, uid, NULL, cpu_model, dmi)
+#define PRESENT_ENTRY_HID(hid, uid, cpu_vfm, dmi...) \
+ ENTRY(ACPI_STA_DEFAULT, hid, uid, NULL, cpu_vfm, dmi)
-#define NOT_PRESENT_ENTRY_HID(hid, uid, cpu_model, dmi...) \
- ENTRY(0, hid, uid, NULL, cpu_model, dmi)
+#define NOT_PRESENT_ENTRY_HID(hid, uid, cpu_vfm, dmi...) \
+ ENTRY(0, hid, uid, NULL, cpu_vfm, dmi)
-#define PRESENT_ENTRY_PATH(path, cpu_model, dmi...) \
- ENTRY(ACPI_STA_DEFAULT, "", NULL, path, cpu_model, dmi)
+#define PRESENT_ENTRY_PATH(path, cpu_vfm, dmi...) \
+ ENTRY(ACPI_STA_DEFAULT, "", NULL, path, cpu_vfm, dmi)
-#define NOT_PRESENT_ENTRY_PATH(path, cpu_model, dmi...) \
- ENTRY(0, "", NULL, path, cpu_model, dmi)
+#define NOT_PRESENT_ENTRY_PATH(path, cpu_vfm, dmi...) \
+ ENTRY(0, "", NULL, path, cpu_vfm, dmi)
static const struct override_status_id override_status_ids[] = {
/*
* Bay / Cherry Trail PWM directly poked by GPU driver in win10,
* but Linux uses a separate PWM driver, harmless if not used.
*/
- PRESENT_ENTRY_HID("80860F09", "1", ATOM_SILVERMONT, {}),
- PRESENT_ENTRY_HID("80862288", "1", ATOM_AIRMONT, {}),
+ PRESENT_ENTRY_HID("80860F09", "1", INTEL_ATOM_SILVERMONT, {}),
+ PRESENT_ENTRY_HID("80862288", "1", INTEL_ATOM_AIRMONT, {}),
/* The Xiaomi Mi Pad 2 uses PWM2 for touchkeys backlight control */
- PRESENT_ENTRY_HID("80862289", "2", ATOM_AIRMONT, {
+ PRESENT_ENTRY_HID("80862289", "2", INTEL_ATOM_AIRMONT, {
DMI_MATCH(DMI_SYS_VENDOR, "Xiaomi Inc"),
DMI_MATCH(DMI_PRODUCT_NAME, "Mipad2"),
}),
@@ -84,18 +84,18 @@ static const struct override_status_id override_status_ids[] = {
* The INT0002 device is necessary to clear wakeup interrupt sources
* on Cherry Trail devices, without it we get nobody cared IRQ msgs.
*/
- PRESENT_ENTRY_HID("INT0002", "1", ATOM_AIRMONT, {}),
+ PRESENT_ENTRY_HID("INT0002", "1", INTEL_ATOM_AIRMONT, {}),
/*
* On the Dell Venue 11 Pro 7130 and 7139, the DSDT hides
* the touchscreen ACPI device until a certain time
* after _SB.PCI0.GFX0.LCD.LCD1._ON gets called has passed
* *and* _STA has been called at least 3 times since.
*/
- PRESENT_ENTRY_HID("SYNA7500", "1", HASWELL_L, {
+ PRESENT_ENTRY_HID("SYNA7500", "1", INTEL_HASWELL_L, {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Venue 11 Pro 7130"),
}),
- PRESENT_ENTRY_HID("SYNA7500", "1", HASWELL_L, {
+ PRESENT_ENTRY_HID("SYNA7500", "1", INTEL_HASWELL_L, {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Venue 11 Pro 7139"),
}),
@@ -104,7 +104,7 @@ static const struct override_status_id override_status_ids[] = {
* The Dell XPS 15 9550 has a SMO8110 accelerometer /
* HDD freefall sensor which is wrongly marked as not present.
*/
- PRESENT_ENTRY_HID("SMO8810", "1", SKYLAKE, {
+ PRESENT_ENTRY_HID("SMO8810", "1", INTEL_SKYLAKE, {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "XPS 15 9550"),
}),
@@ -121,19 +121,19 @@ static const struct override_status_id override_status_ids[] = {
* was copy-pasted from the GPD win, so it has a disabled KIOX000A
* node which we should not enable, thus we also check the BIOS date.
*/
- PRESENT_ENTRY_HID("KIOX000A", "1", ATOM_AIRMONT, {
+ PRESENT_ENTRY_HID("KIOX000A", "1", INTEL_ATOM_AIRMONT, {
DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
DMI_MATCH(DMI_BOARD_NAME, "Default string"),
DMI_MATCH(DMI_PRODUCT_NAME, "Default string"),
DMI_MATCH(DMI_BIOS_DATE, "02/21/2017")
}),
- PRESENT_ENTRY_HID("KIOX000A", "1", ATOM_AIRMONT, {
+ PRESENT_ENTRY_HID("KIOX000A", "1", INTEL_ATOM_AIRMONT, {
DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
DMI_MATCH(DMI_BOARD_NAME, "Default string"),
DMI_MATCH(DMI_PRODUCT_NAME, "Default string"),
DMI_MATCH(DMI_BIOS_DATE, "03/20/2017")
}),
- PRESENT_ENTRY_HID("KIOX000A", "1", ATOM_AIRMONT, {
+ PRESENT_ENTRY_HID("KIOX000A", "1", INTEL_ATOM_AIRMONT, {
DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
DMI_MATCH(DMI_BOARD_NAME, "Default string"),
DMI_MATCH(DMI_PRODUCT_NAME, "Default string"),
@@ -146,7 +146,7 @@ static const struct override_status_id override_status_ids[] = {
* method sets a GPIO causing the PCI wifi card to turn off.
* See above remark about uniqueness of the DMI match.
*/
- NOT_PRESENT_ENTRY_PATH("\\_SB_.PCI0.SDHB.BRC1", ATOM_AIRMONT, {
+ NOT_PRESENT_ENTRY_PATH("\\_SB_.PCI0.SDHB.BRC1", INTEL_ATOM_AIRMONT, {
DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
DMI_EXACT_MATCH(DMI_BOARD_NAME, "Default string"),
DMI_EXACT_MATCH(DMI_BOARD_SERIAL, "Default string"),
@@ -158,7 +158,7 @@ static const struct override_status_id override_status_ids[] = {
* as both ACCL0001 and MAGN0001. As we can only ever register an
* i2c client for one of them, ignore MAGN0001.
*/
- NOT_PRESENT_ENTRY_HID("MAGN0001", "1", ATOM_SILVERMONT, {
+ NOT_PRESENT_ENTRY_HID("MAGN0001", "1", INTEL_ATOM_SILVERMONT, {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_FAMILY, "YOGATablet2"),
}),
@@ -206,16 +206,16 @@ bool acpi_device_override_status(struct acpi_device *adev, unsigned long long *s
}
/*
- * AMD systems from Renoir and Lucienne *require* that the NVME controller
+ * AMD systems from Renoir onwards *require* that the NVME controller
* is put into D3 over a Modern Standby / suspend-to-idle cycle.
*
* This is "typically" accomplished using the `StorageD3Enable`
* property in the _DSD that is checked via the `acpi_storage_d3` function
- * but this property was introduced after many of these systems launched
- * and most OEM systems don't have it in their BIOS.
+ * but some OEM systems still don't have it in their BIOS.
*
* The Microsoft documentation for StorageD3Enable mentioned that Windows has
- * a hardcoded allowlist for D3 support, which was used for these platforms.
+ * a hardcoded allowlist for D3 support as well as a registry key to override
+ * the BIOS, which has been used for these cases.
*
* This allows quirking on Linux in a similar fashion.
*
@@ -228,19 +228,15 @@ bool acpi_device_override_status(struct acpi_device *adev, unsigned long long *s
* https://bugzilla.kernel.org/show_bug.cgi?id=216773
* https://bugzilla.kernel.org/show_bug.cgi?id=217003
* 2) On at least one HP system StorageD3Enable is missing on the second NVME
- disk in the system.
+ * disk in the system.
+ * 3) On at least one HP Rembrandt system StorageD3Enable is missing on the only
+ * NVME device.
*/
-static const struct x86_cpu_id storage_d3_cpu_ids[] = {
- X86_MATCH_VENDOR_FAM_MODEL(AMD, 23, 24, NULL), /* Picasso */
- X86_MATCH_VENDOR_FAM_MODEL(AMD, 23, 96, NULL), /* Renoir */
- X86_MATCH_VENDOR_FAM_MODEL(AMD, 23, 104, NULL), /* Lucienne */
- X86_MATCH_VENDOR_FAM_MODEL(AMD, 25, 80, NULL), /* Cezanne */
- {}
-};
-
bool force_storage_d3(void)
{
- return x86_match_cpu(storage_d3_cpu_ids);
+ if (!cpu_feature_enabled(X86_FEATURE_ZEN))
+ return false;
+ return acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0;
}
/*
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index b595494ab9b4..e00536b49552 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -118,7 +118,7 @@ config SATA_AHCI
config SATA_MOBILE_LPM_POLICY
int "Default SATA Link Power Management policy"
range 0 4
- default 0
+ default 3
depends on SATA_AHCI
help
Select the Default SATA Link Power Management (LPM) policy to use
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 6548f10e61d9..a05c17249448 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -429,7 +429,6 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x02d7), board_ahci_pcs_quirk }, /* Comet Lake PCH RAID */
/* Elkhart Lake IDs 0x4b60 & 0x4b62 https://sata-io.org/product/8803 not tested yet */
{ PCI_VDEVICE(INTEL, 0x4b63), board_ahci_pcs_quirk }, /* Elkhart Lake AHCI */
- { PCI_VDEVICE(INTEL, 0x7ae2), board_ahci_pcs_quirk }, /* Alder Lake-P AHCI */
/* JMicron 360/1/3/5/6, match class to avoid IDE function */
{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
@@ -1733,8 +1732,18 @@ static void ahci_update_initial_lpm_policy(struct ata_port *ap)
* Management Interaction in AHCI 1.3.1. Therefore, do not enable
* LPM if the port advertises itself as an external port.
*/
- if (ap->pflags & ATA_PFLAG_EXTERNAL)
+ if (ap->pflags & ATA_PFLAG_EXTERNAL) {
+ ata_port_dbg(ap, "external port, not enabling LPM\n");
return;
+ }
+
+ /* If no LPM states are supported by the HBA, do not bother with LPM */
+ if ((ap->host->flags & ATA_HOST_NO_PART) &&
+ (ap->host->flags & ATA_HOST_NO_SSC) &&
+ (ap->host->flags & ATA_HOST_NO_DEVSLP)) {
+ ata_port_dbg(ap, "no LPM states supported, not enabling LPM\n");
+ return;
+ }
/* user modified policy via module param */
if (mobile_lpm_policy != -1) {
@@ -1968,8 +1977,10 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));
host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
- if (!host)
- return -ENOMEM;
+ if (!host) {
+ rc = -ENOMEM;
+ goto err_rm_sysfs_file;
+ }
host->private_data = hpriv;
if (ahci_init_msi(pdev, n_ports, hpriv) < 0) {
@@ -2024,11 +2035,11 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* initialize adapter */
rc = ahci_configure_dma_masks(pdev, hpriv);
if (rc)
- return rc;
+ goto err_rm_sysfs_file;
rc = ahci_pci_reset_controller(host);
if (rc)
- return rc;
+ goto err_rm_sysfs_file;
ahci_pci_init_controller(host);
ahci_pci_print_info(host);
@@ -2037,10 +2048,15 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
rc = ahci_host_activate(host, &ahci_sht);
if (rc)
- return rc;
+ goto err_rm_sysfs_file;
pm_runtime_put_noidle(&pdev->dev);
return 0;
+
+err_rm_sysfs_file:
+ sysfs_remove_file_from_group(&pdev->dev.kobj,
+ &dev_attr_remapped_nvme.attr, NULL);
+ return rc;
}
static void ahci_shutdown_one(struct pci_dev *pdev)
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 83431aae74d8..fdfa7b266218 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -2075,13 +2075,6 @@ static void ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
struct ahci_port_priv *pp = qc->ap->private_data;
u8 *rx_fis = pp->rx_fis;
- /*
- * rtf may already be filled (e.g. for successful NCQ commands).
- * If that is the case, we have nothing to do.
- */
- if (qc->flags & ATA_QCFLAG_RTF_FILLED)
- return;
-
if (pp->fbs_enabled)
rx_fis += qc->dev->link->pmp * AHCI_RX_FIS_SZ;
@@ -2095,7 +2088,6 @@ static void ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
!(qc->flags & ATA_QCFLAG_EH)) {
ata_tf_from_fis(rx_fis + RX_FIS_PIO_SETUP, &qc->result_tf);
qc->result_tf.status = (rx_fis + RX_FIS_PIO_SETUP)[15];
- qc->flags |= ATA_QCFLAG_RTF_FILLED;
return;
}
@@ -2118,12 +2110,10 @@ static void ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
*/
qc->result_tf.status = fis[2];
qc->result_tf.error = fis[3];
- qc->flags |= ATA_QCFLAG_RTF_FILLED;
return;
}
ata_tf_from_fis(rx_fis + RX_FIS_D2H_REG, &qc->result_tf);
- qc->flags |= ATA_QCFLAG_RTF_FILLED;
}
static void ahci_qc_ncq_fill_rtf(struct ata_port *ap, u64 done_mask)
@@ -2158,6 +2148,7 @@ static void ahci_qc_ncq_fill_rtf(struct ata_port *ap, u64 done_mask)
if (qc && ata_is_ncq(qc->tf.protocol)) {
qc->result_tf.status = status;
qc->result_tf.error = error;
+ qc->result_tf.flags = qc->tf.flags;
qc->flags |= ATA_QCFLAG_RTF_FILLED;
}
done_mask &= ~(1ULL << tag);
@@ -2182,6 +2173,7 @@ static void ahci_qc_ncq_fill_rtf(struct ata_port *ap, u64 done_mask)
fis += RX_FIS_SDB;
qc->result_tf.status = fis[2];
qc->result_tf.error = fis[3];
+ qc->result_tf.flags = qc->tf.flags;
qc->flags |= ATA_QCFLAG_RTF_FILLED;
}
done_mask &= ~(1ULL << tag);
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 4f35aab81a0a..c7752dc80028 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -86,7 +86,7 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
static void ata_dev_xfermask(struct ata_device *dev);
static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
-atomic_t ata_print_id = ATOMIC_INIT(0);
+static DEFINE_IDA(ata_ida);
#ifdef CONFIG_ATA_FORCE
struct ata_force_param {
@@ -4136,8 +4136,8 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
{ "PIONEER BD-RW BDR-207M", NULL, ATA_HORKAGE_NOLPM },
{ "PIONEER BD-RW BDR-205", NULL, ATA_HORKAGE_NOLPM },
- /* Crucial BX100 SSD 500GB has broken LPM support */
- { "CT500BX100SSD1", NULL, ATA_HORKAGE_NOLPM },
+ /* Crucial devices with broken LPM support */
+ { "CT*0BX*00SSD1", NULL, ATA_HORKAGE_NOLPM },
/* 512GB MX100 with MU01 firmware has both queued TRIM and LPM issues */
{ "Crucial_CT512MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
@@ -4155,6 +4155,12 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
ATA_HORKAGE_ZERO_AFTER_TRIM |
ATA_HORKAGE_NOLPM },
+ /* AMD Radeon devices with broken LPM support */
+ { "R3SL240G", NULL, ATA_HORKAGE_NOLPM },
+
+ /* Apacer models with LPM issues */
+ { "Apacer AS340*", NULL, ATA_HORKAGE_NOLPM },
+
/* These specific Samsung models/firmware-revs do not handle LPM well */
{ "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM },
{ "SAMSUNG SSD PM830 mSATA *", "CXM13D1Q", ATA_HORKAGE_NOLPM },
@@ -4794,8 +4800,16 @@ static void fill_result_tf(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
+ /*
+ * rtf may already be filled (e.g. for successful NCQ commands).
+ * If that is the case, we have nothing to do.
+ */
+ if (qc->flags & ATA_QCFLAG_RTF_FILLED)
+ return;
+
qc->result_tf.flags = qc->tf.flags;
ap->ops->qc_fill_rtf(qc);
+ qc->flags |= ATA_QCFLAG_RTF_FILLED;
}
static void ata_verify_xfer(struct ata_queued_cmd *qc)
@@ -5449,6 +5463,7 @@ int sata_link_init_spd(struct ata_link *link)
struct ata_port *ata_port_alloc(struct ata_host *host)
{
struct ata_port *ap;
+ int id;
ap = kzalloc(sizeof(*ap), GFP_KERNEL);
if (!ap)
@@ -5456,8 +5471,12 @@ struct ata_port *ata_port_alloc(struct ata_host *host)
ap->pflags |= ATA_PFLAG_INITIALIZING | ATA_PFLAG_FROZEN;
ap->lock = &host->lock;
- ap->print_id = -1;
- ap->local_port_no = -1;
+ id = ida_alloc_min(&ata_ida, 1, GFP_KERNEL);
+ if (id < 0) {
+ kfree(ap);
+ return NULL;
+ }
+ ap->print_id = id;
ap->host = host;
ap->dev = host->dev;
@@ -5482,6 +5501,20 @@ struct ata_port *ata_port_alloc(struct ata_host *host)
return ap;
}
+EXPORT_SYMBOL_GPL(ata_port_alloc);
+
+void ata_port_free(struct ata_port *ap)
+{
+ if (!ap)
+ return;
+
+ kfree(ap->pmp_link);
+ kfree(ap->slave_link);
+ kfree(ap->ncq_sense_buf);
+ ida_free(&ata_ida, ap->print_id);
+ kfree(ap);
+}
+EXPORT_SYMBOL_GPL(ata_port_free);
static void ata_devres_release(struct device *gendev, void *res)
{
@@ -5509,12 +5542,7 @@ static void ata_host_release(struct kref *kref)
int i;
for (i = 0; i < host->n_ports; i++) {
- struct ata_port *ap = host->ports[i];
-
- kfree(ap->pmp_link);
- kfree(ap->slave_link);
- kfree(ap->ncq_sense_buf);
- kfree(ap);
+ ata_port_free(host->ports[i]);
host->ports[i] = NULL;
}
kfree(host);
@@ -5534,24 +5562,19 @@ EXPORT_SYMBOL_GPL(ata_host_put);
/**
* ata_host_alloc - allocate and init basic ATA host resources
* @dev: generic device this host is associated with
- * @max_ports: maximum number of ATA ports associated with this host
+ * @n_ports: the number of ATA ports associated with this host
*
* Allocate and initialize basic ATA host resources. LLD calls
* this function to allocate a host, initializes it fully and
* attaches it using ata_host_register().
*
- * @max_ports ports are allocated and host->n_ports is
- * initialized to @max_ports. The caller is allowed to decrease
- * host->n_ports before calling ata_host_register(). The unused
- * ports will be automatically freed on registration.
- *
* RETURNS:
* Allocate ATA host on success, NULL on failure.
*
* LOCKING:
* Inherited from calling layer (may sleep).
*/
-struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
+struct ata_host *ata_host_alloc(struct device *dev, int n_ports)
{
struct ata_host *host;
size_t sz;
@@ -5559,13 +5582,15 @@ struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
void *dr;
/* alloc a container for our list of ATA ports (buses) */
- sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
+ sz = sizeof(struct ata_host) + n_ports * sizeof(void *);
host = kzalloc(sz, GFP_KERNEL);
if (!host)
return NULL;
- if (!devres_open_group(dev, NULL, GFP_KERNEL))
- goto err_free;
+ if (!devres_open_group(dev, NULL, GFP_KERNEL)) {
+ kfree(host);
+ return NULL;
+ }
dr = devres_alloc(ata_devres_release, 0, GFP_KERNEL);
if (!dr)
@@ -5577,11 +5602,11 @@ struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
spin_lock_init(&host->lock);
mutex_init(&host->eh_mutex);
host->dev = dev;
- host->n_ports = max_ports;
+ host->n_ports = n_ports;
kref_init(&host->kref);
/* allocate ports bound to this host */
- for (i = 0; i < max_ports; i++) {
+ for (i = 0; i < n_ports; i++) {
struct ata_port *ap;
ap = ata_port_alloc(host);
@@ -5597,8 +5622,6 @@ struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
err_out:
devres_release_group(dev, NULL);
- err_free:
- kfree(host);
return NULL;
}
EXPORT_SYMBOL_GPL(ata_host_alloc);
@@ -5892,19 +5915,6 @@ int ata_host_register(struct ata_host *host, const struct scsi_host_template *sh
return -EINVAL;
}
- /* Blow away unused ports. This happens when LLD can't
- * determine the exact number of ports to allocate at
- * allocation time.
- */
- for (i = host->n_ports; host->ports[i]; i++)
- kfree(host->ports[i]);
-
- /* give ports names and add SCSI hosts */
- for (i = 0; i < host->n_ports; i++) {
- host->ports[i]->print_id = atomic_inc_return(&ata_print_id);
- host->ports[i]->local_port_no = i + 1;
- }
-
/* Create associated sysfs transport objects */
for (i = 0; i < host->n_ports; i++) {
rc = ata_tport_add(host->dev,host->ports[i]);
diff --git a/drivers/ata/libata-sata.c b/drivers/ata/libata-sata.c
index 9e047bf912b1..48660d445602 100644
--- a/drivers/ata/libata-sata.c
+++ b/drivers/ata/libata-sata.c
@@ -1205,55 +1205,6 @@ int ata_scsi_change_queue_depth(struct scsi_device *sdev, int queue_depth)
EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
/**
- * ata_sas_port_alloc - Allocate port for a SAS attached SATA device
- * @host: ATA host container for all SAS ports
- * @port_info: Information from low-level host driver
- * @shost: SCSI host that the scsi device is attached to
- *
- * LOCKING:
- * PCI/etc. bus probe sem.
- *
- * RETURNS:
- * ata_port pointer on success / NULL on failure.
- */
-
-struct ata_port *ata_sas_port_alloc(struct ata_host *host,
- struct ata_port_info *port_info,
- struct Scsi_Host *shost)
-{
- struct ata_port *ap;
-
- ap = ata_port_alloc(host);
- if (!ap)
- return NULL;
-
- ap->port_no = 0;
- ap->lock = &host->lock;
- ap->pio_mask = port_info->pio_mask;
- ap->mwdma_mask = port_info->mwdma_mask;
- ap->udma_mask = port_info->udma_mask;
- ap->flags |= port_info->flags;
- ap->ops = port_info->port_ops;
- ap->cbl = ATA_CBL_SATA;
- ap->print_id = atomic_inc_return(&ata_print_id);
-
- return ap;
-}
-EXPORT_SYMBOL_GPL(ata_sas_port_alloc);
-
-int ata_sas_tport_add(struct device *parent, struct ata_port *ap)
-{
- return ata_tport_add(parent, ap);
-}
-EXPORT_SYMBOL_GPL(ata_sas_tport_add);
-
-void ata_sas_tport_delete(struct ata_port *ap)
-{
- ata_tport_delete(ap);
-}
-EXPORT_SYMBOL_GPL(ata_sas_tport_delete);
-
-/**
* ata_sas_device_configure - Default device_configure routine for libata
* devices
* @sdev: SCSI device to configure
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index cdf29b178ddc..d6f5e25e1ed8 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -230,6 +230,87 @@ void ata_scsi_set_sense_information(struct ata_device *dev,
SCSI_SENSE_BUFFERSIZE, information);
}
+/**
+ * ata_scsi_set_passthru_sense_fields - Set ATA fields in sense buffer
+ * @qc: ATA PASS-THROUGH command.
+ *
+ * Populates "ATA Status Return sense data descriptor" / "Fixed format
+ * sense data" with ATA taskfile fields.
+ *
+ * LOCKING:
+ * None.
+ */
+static void ata_scsi_set_passthru_sense_fields(struct ata_queued_cmd *qc)
+{
+ struct ata_device *dev = qc->dev;
+ struct scsi_cmnd *cmd = qc->scsicmd;
+ struct ata_taskfile *tf = &qc->result_tf;
+ unsigned char *sb = cmd->sense_buffer;
+
+ if (!(qc->flags & ATA_QCFLAG_RTF_FILLED)) {
+ ata_dev_dbg(dev,
+ "missing result TF: can't set ATA PT sense fields\n");
+ return;
+ }
+
+ if ((sb[0] & 0x7f) >= 0x72) {
+ unsigned char *desc;
+ u8 len;
+
+ /* descriptor format */
+ len = sb[7];
+ desc = (char *)scsi_sense_desc_find(sb, len + 8, 9);
+ if (!desc) {
+ if (SCSI_SENSE_BUFFERSIZE < len + 14)
+ return;
+ sb[7] = len + 14;
+ desc = sb + 8 + len;
+ }
+ desc[0] = 9;
+ desc[1] = 12;
+ /*
+ * Copy registers into sense buffer.
+ */
+ desc[2] = 0x00;
+ desc[3] = tf->error;
+ desc[5] = tf->nsect;
+ desc[7] = tf->lbal;
+ desc[9] = tf->lbam;
+ desc[11] = tf->lbah;
+ desc[12] = tf->device;
+ desc[13] = tf->status;
+
+ /*
+ * Fill in Extend bit, and the high order bytes
+ * if applicable.
+ */
+ if (tf->flags & ATA_TFLAG_LBA48) {
+ desc[2] |= 0x01;
+ desc[4] = tf->hob_nsect;
+ desc[6] = tf->hob_lbal;
+ desc[8] = tf->hob_lbam;
+ desc[10] = tf->hob_lbah;
+ }
+ } else {
+ /* Fixed sense format */
+ sb[0] |= 0x80;
+ sb[3] = tf->error;
+ sb[4] = tf->status;
+ sb[5] = tf->device;
+ sb[6] = tf->nsect;
+ if (tf->flags & ATA_TFLAG_LBA48) {
+ sb[8] |= 0x80;
+ if (tf->hob_nsect)
+ sb[8] |= 0x40;
+ if (tf->hob_lbal || tf->hob_lbam || tf->hob_lbah)
+ sb[8] |= 0x20;
+ }
+ sb[9] = tf->lbal;
+ sb[10] = tf->lbam;
+ sb[11] = tf->lbah;
+ }
+}
+
static void ata_scsi_set_invalid_field(struct ata_device *dev,
struct scsi_cmnd *cmd, u16 field, u8 bit)
{
@@ -711,7 +792,6 @@ static void ata_qc_set_pc_nbytes(struct ata_queued_cmd *qc)
/**
* ata_to_sense_error - convert ATA error to SCSI error
- * @id: ATA device number
* @drv_stat: value contained in ATA status register
* @drv_err: value contained in ATA error register
* @sk: the sense key we'll fill out
@@ -725,8 +805,8 @@ static void ata_qc_set_pc_nbytes(struct ata_queued_cmd *qc)
* LOCKING:
* spin_lock_irqsave(host lock)
*/
-static void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk,
- u8 *asc, u8 *ascq)
+static void ata_to_sense_error(u8 drv_stat, u8 drv_err, u8 *sk, u8 *asc,
+ u8 *ascq)
{
int i;
@@ -837,10 +917,8 @@ static void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk,
* ata_gen_passthru_sense - Generate check condition sense block.
* @qc: Command that completed.
*
- * This function is specific to the ATA descriptor format sense
- * block specified for the ATA pass through commands. Regardless
- * of whether the command errored or not, return a sense
- * block. Copy all controller registers into the sense
+ * This function is specific to the ATA pass through commands.
+ * Regardless of whether the command errored or not, return a sense
* block. If there was no error, we get the request from an ATA
* passthrough command, so we use the following sense data:
* sk = RECOVERED ERROR
@@ -852,13 +930,16 @@ static void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk,
*/
static void ata_gen_passthru_sense(struct ata_queued_cmd *qc)
{
+ struct ata_device *dev = qc->dev;
struct scsi_cmnd *cmd = qc->scsicmd;
struct ata_taskfile *tf = &qc->result_tf;
- unsigned char *sb = cmd->sense_buffer;
- unsigned char *desc = sb + 8;
u8 sense_key, asc, ascq;
- memset(sb, 0, SCSI_SENSE_BUFFERSIZE);
+ if (!(qc->flags & ATA_QCFLAG_RTF_FILLED)) {
+ ata_dev_dbg(dev,
+ "missing result TF: can't generate ATA PT sense data\n");
+ return;
+ }
/*
* Use ata_to_sense_error() to map status register bits
@@ -866,71 +947,12 @@ static void ata_gen_passthru_sense(struct ata_queued_cmd *qc)
*/
if (qc->err_mask ||
tf->status & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) {
- ata_to_sense_error(qc->ap->print_id, tf->status, tf->error,
+ ata_to_sense_error(tf->status, tf->error,
&sense_key, &asc, &ascq);
ata_scsi_set_sense(qc->dev, cmd, sense_key, asc, ascq);
} else {
- /*
- * ATA PASS-THROUGH INFORMATION AVAILABLE
- * Always in descriptor format sense.
- */
- scsi_build_sense(cmd, 1, RECOVERED_ERROR, 0, 0x1D);
- }
-
- if ((cmd->sense_buffer[0] & 0x7f) >= 0x72) {
- u8 len;
-
- /* descriptor format */
- len = sb[7];
- desc = (char *)scsi_sense_desc_find(sb, len + 8, 9);
- if (!desc) {
- if (SCSI_SENSE_BUFFERSIZE < len + 14)
- return;
- sb[7] = len + 14;
- desc = sb + 8 + len;
- }
- desc[0] = 9;
- desc[1] = 12;
- /*
- * Copy registers into sense buffer.
- */
- desc[2] = 0x00;
- desc[3] = tf->error;
- desc[5] = tf->nsect;
- desc[7] = tf->lbal;
- desc[9] = tf->lbam;
- desc[11] = tf->lbah;
- desc[12] = tf->device;
- desc[13] = tf->status;
-
- /*
- * Fill in Extend bit, and the high order bytes
- * if applicable.
- */
- if (tf->flags & ATA_TFLAG_LBA48) {
- desc[2] |= 0x01;
- desc[4] = tf->hob_nsect;
- desc[6] = tf->hob_lbal;
- desc[8] = tf->hob_lbam;
- desc[10] = tf->hob_lbah;
- }
- } else {
- /* Fixed sense format */
- desc[0] = tf->error;
- desc[1] = tf->status;
- desc[2] = tf->device;
- desc[3] = tf->nsect;
- desc[7] = 0;
- if (tf->flags & ATA_TFLAG_LBA48) {
- desc[8] |= 0x80;
- if (tf->hob_nsect)
- desc[8] |= 0x40;
- if (tf->hob_lbal || tf->hob_lbam || tf->hob_lbah)
- desc[8] |= 0x20;
- }
- desc[9] = tf->lbal;
- desc[10] = tf->lbam;
- desc[11] = tf->lbah;
+ /* ATA PASS-THROUGH INFORMATION AVAILABLE */
+ ata_scsi_set_sense(qc->dev, cmd, RECOVERED_ERROR, 0, 0x1D);
}
}
@@ -953,20 +975,25 @@ static void ata_gen_ata_sense(struct ata_queued_cmd *qc)
u64 block;
u8 sense_key, asc, ascq;
- memset(sb, 0, SCSI_SENSE_BUFFERSIZE);
-
if (ata_dev_disabled(dev)) {
/* Device disabled after error recovery */
/* LOGICAL UNIT NOT READY, HARD RESET REQUIRED */
ata_scsi_set_sense(dev, cmd, NOT_READY, 0x04, 0x21);
return;
}
+
+ if (!(qc->flags & ATA_QCFLAG_RTF_FILLED)) {
+ ata_dev_dbg(dev,
+ "missing result TF: can't generate sense data\n");
+ return;
+ }
+
/* Use ata_to_sense_error() to map status register bits
* onto sense key, asc & ascq.
*/
if (qc->err_mask ||
tf->status & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) {
- ata_to_sense_error(qc->ap->print_id, tf->status, tf->error,
+ ata_to_sense_error(tf->status, tf->error,
&sense_key, &asc, &ascq);
ata_scsi_set_sense(dev, cmd, sense_key, asc, ascq);
} else {
@@ -1024,7 +1051,6 @@ EXPORT_SYMBOL_GPL(ata_scsi_dma_need_drain);
int ata_scsi_dev_config(struct scsi_device *sdev, struct queue_limits *lim,
struct ata_device *dev)
{
- struct request_queue *q = sdev->request_queue;
int depth = 1;
if (!ata_id_has_unload(dev->id))
@@ -1038,7 +1064,7 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct queue_limits *lim,
sdev->sector_size = ATA_SECT_SIZE;
/* set DMA padding */
- blk_queue_update_dma_pad(q, ATA_DMA_PAD_SZ - 1);
+ lim->dma_pad_mask = ATA_DMA_PAD_SZ - 1;
/* make room for appending the drain */
lim->max_segments--;
@@ -1632,26 +1658,32 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
{
struct scsi_cmnd *cmd = qc->scsicmd;
u8 *cdb = cmd->cmnd;
- int need_sense = (qc->err_mask != 0) &&
- !(qc->flags & ATA_QCFLAG_SENSE_VALID);
+ bool have_sense = qc->flags & ATA_QCFLAG_SENSE_VALID;
+ bool is_ata_passthru = cdb[0] == ATA_16 || cdb[0] == ATA_12;
+ bool is_ck_cond_request = cdb[2] & 0x20;
+ bool is_error = qc->err_mask != 0;
/* For ATA pass thru (SAT) commands, generate a sense block if
* user mandated it or if there's an error. Note that if we
- * generate because the user forced us to [CK_COND =1], a check
+ * generate because the user forced us to [CK_COND=1], a check
* condition is generated and the ATA register values are returned
* whether the command completed successfully or not. If there
- * was no error, we use the following sense data:
+ * was no error, and CK_COND=1, we use the following sense data:
* sk = RECOVERED ERROR
* asc,ascq = ATA PASS-THROUGH INFORMATION AVAILABLE
*/
- if (((cdb[0] == ATA_16) || (cdb[0] == ATA_12)) &&
- ((cdb[2] & 0x20) || need_sense))
- ata_gen_passthru_sense(qc);
- else if (need_sense)
+ if (is_ata_passthru && (is_ck_cond_request || is_error || have_sense)) {
+ if (!have_sense)
+ ata_gen_passthru_sense(qc);
+ ata_scsi_set_passthru_sense_fields(qc);
+ if (is_ck_cond_request)
+ set_status_byte(qc->scsicmd, SAM_STAT_CHECK_CONDITION);
+ } else if (is_error && !have_sense) {
ata_gen_ata_sense(qc);
- else
+ } else {
/* Keep the SCSI ML and status byte, clear host byte. */
cmd->result &= 0x0000ffff;
+ }
ata_qc_done(qc);
}
@@ -1831,11 +1863,11 @@ static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf)
2
};
- /* set scsi removable (RMB) bit per ata bit, or if the
- * AHCI port says it's external (Hotplug-capable, eSATA).
+ /*
+ * Set the SCSI Removable Media Bit (RMB) if the ATA removable media
+ * device bit (obsolete since ATA-8 ACS) is set.
*/
- if (ata_id_removable(args->id) ||
- (args->dev->link->ap->pflags & ATA_PFLAG_EXTERNAL))
+ if (ata_id_removable(args->id))
hdr[1] |= (1 << 7);
if (args->dev->class == ATA_DEV_ZAC) {
@@ -2590,14 +2622,8 @@ static void atapi_qc_complete(struct ata_queued_cmd *qc)
/* handle completion from EH */
if (unlikely(err_mask || qc->flags & ATA_QCFLAG_SENSE_VALID)) {
- if (!(qc->flags & ATA_QCFLAG_SENSE_VALID)) {
- /* FIXME: not quite right; we don't want the
- * translation of taskfile registers into a
- * sense descriptors, since that's only
- * correct for ATA, not ATAPI
- */
+ if (!(qc->flags & ATA_QCFLAG_SENSE_VALID))
ata_gen_passthru_sense(qc);
- }
/* SCSI EH automatically locks door if sdev->locked is
* set. Sometimes door lock request continues to
diff --git a/drivers/ata/libata-transport.c b/drivers/ata/libata-transport.c
index 3e49a877500e..9e24c33388f9 100644
--- a/drivers/ata/libata-transport.c
+++ b/drivers/ata/libata-transport.c
@@ -217,7 +217,8 @@ static DEVICE_ATTR(name, S_IRUGO, show_ata_port_##name, NULL)
ata_port_simple_attr(nr_pmp_links, nr_pmp_links, "%d\n", int);
ata_port_simple_attr(stats.idle_irq, idle_irq, "%ld\n", unsigned long);
-ata_port_simple_attr(local_port_no, port_no, "%u\n", unsigned int);
+/* We want the port_no sysfs attibute to start at 1 (ap->port_no starts at 0) */
+ata_port_simple_attr(port_no + 1, port_no, "%u\n", unsigned int);
static DECLARE_TRANSPORT_CLASS(ata_port_class,
"ata_port", NULL, NULL, NULL);
@@ -265,6 +266,7 @@ void ata_tport_delete(struct ata_port *ap)
transport_destroy_device(dev);
put_device(dev);
}
+EXPORT_SYMBOL_GPL(ata_tport_delete);
static const struct device_type ata_port_sas_type = {
.name = ATA_PORT_TYPE_NAME,
@@ -329,6 +331,7 @@ int ata_tport_add(struct device *parent,
put_device(dev);
return error;
}
+EXPORT_SYMBOL_GPL(ata_tport_add);
/**
* ata_port_classify - determine device type based on ATA-spec signature
diff --git a/drivers/ata/libata-transport.h b/drivers/ata/libata-transport.h
index 08a57fb9dc61..50cd2cbe8eea 100644
--- a/drivers/ata/libata-transport.h
+++ b/drivers/ata/libata-transport.h
@@ -8,9 +8,6 @@ extern struct scsi_transport_template *ata_scsi_transport_template;
int ata_tlink_add(struct ata_link *link);
void ata_tlink_delete(struct ata_link *link);
-int ata_tport_add(struct device *parent, struct ata_port *ap);
-void ata_tport_delete(struct ata_port *ap);
-
struct scsi_transport_template *ata_attach_transport(void);
void ata_release_transport(struct scsi_transport_template *t);
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index 38ce13b55474..6abf265f626e 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -32,7 +32,6 @@ enum {
#define ATA_PORT_TYPE_NAME "ata_port"
-extern atomic_t ata_print_id;
extern int atapi_passthru16;
extern int libata_fua;
extern int libata_noacpi;
@@ -82,7 +81,6 @@ extern void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp);
extern int sata_link_init_spd(struct ata_link *link);
extern int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg);
extern int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg);
-extern struct ata_port *ata_port_alloc(struct ata_host *host);
extern const char *sata_spd_string(unsigned int spd);
extern unsigned int ata_read_log_page(struct ata_device *dev, u8 log,
u8 page, void *buf, unsigned int sectors);
diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c
index 817838e2f70e..1b85e8bf4ef9 100644
--- a/drivers/ata/pata_macio.c
+++ b/drivers/ata/pata_macio.c
@@ -816,7 +816,7 @@ static int pata_macio_device_configure(struct scsi_device *sdev,
/* OHare has issues with non cache aligned DMA on some chipsets */
if (priv->kind == controller_ohare) {
lim->dma_alignment = 31;
- blk_queue_update_dma_pad(sdev->request_queue, 31);
+ lim->dma_pad_mask = 31;
/* Tell the world about it */
ata_dev_info(dev, "OHare alignment limits applied\n");
@@ -831,7 +831,7 @@ static int pata_macio_device_configure(struct scsi_device *sdev,
if (priv->kind == controller_sh_ata6 || priv->kind == controller_k2_ata6) {
/* Allright these are bad, apply restrictions */
lim->dma_alignment = 15;
- blk_queue_update_dma_pad(sdev->request_queue, 15);
+ lim->dma_pad_mask = 15;
/* We enable MWI and hack cache line size directly here, this
* is specific to this chipset and not normal values, we happen
@@ -915,10 +915,13 @@ static const struct scsi_host_template pata_macio_sht = {
.sg_tablesize = MAX_DCMDS,
/* We may not need that strict one */
.dma_boundary = ATA_DMA_BOUNDARY,
- /* Not sure what the real max is but we know it's less than 64K, let's
- * use 64K minus 256
+ /*
+ * The SCSI core requires the segment size to cover at least a page, so
+ * for 64K page size kernels this must be at least 64K. However the
+ * hardware can't handle 64K, so pata_macio_qc_prep() will split large
+ * requests.
*/
- .max_segment_size = MAX_DBDMA_SEG,
+ .max_segment_size = SZ_64K,
.device_configure = pata_macio_device_configure,
.sdev_groups = ata_common_sdev_groups,
.can_queue = ATA_DEF_QUEUE,
diff --git a/drivers/base/Makefile b/drivers/base/Makefile
index 3079bfe53d04..7fb21768ca36 100644
--- a/drivers/base/Makefile
+++ b/drivers/base/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_NUMA) += node.o
obj-$(CONFIG_MEMORY_HOTPLUG) += memory.o
ifeq ($(CONFIG_SYSFS),y)
obj-$(CONFIG_MODULES) += module.o
+obj-$(CONFIG_AUXILIARY_BUS) += auxiliary_sysfs.o
endif
obj-$(CONFIG_SYS_HYPERVISOR) += hypervisor.o
obj-$(CONFIG_REGMAP) += regmap/
diff --git a/drivers/base/auxiliary.c b/drivers/base/auxiliary.c
index d3a2c40c2f12..3f01f4ec69e5 100644
--- a/drivers/base/auxiliary.c
+++ b/drivers/base/auxiliary.c
@@ -287,6 +287,7 @@ int auxiliary_device_init(struct auxiliary_device *auxdev)
dev->bus = &auxiliary_bus_type;
device_initialize(&auxdev->dev);
+ mutex_init(&auxdev->sysfs.lock);
return 0;
}
EXPORT_SYMBOL_GPL(auxiliary_device_init);
diff --git a/drivers/base/auxiliary_sysfs.c b/drivers/base/auxiliary_sysfs.c
new file mode 100644
index 000000000000..754f21730afd
--- /dev/null
+++ b/drivers/base/auxiliary_sysfs.c
@@ -0,0 +1,113 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES
+ */
+
+#include <linux/auxiliary_bus.h>
+#include <linux/slab.h>
+
+#define AUXILIARY_MAX_IRQ_NAME 11
+
+struct auxiliary_irq_info {
+ struct device_attribute sysfs_attr;
+ char name[AUXILIARY_MAX_IRQ_NAME];
+};
+
+static struct attribute *auxiliary_irq_attrs[] = {
+ NULL
+};
+
+static const struct attribute_group auxiliary_irqs_group = {
+ .name = "irqs",
+ .attrs = auxiliary_irq_attrs,
+};
+
+static int auxiliary_irq_dir_prepare(struct auxiliary_device *auxdev)
+{
+ int ret = 0;
+
+ guard(mutex)(&auxdev->sysfs.lock);
+ if (auxdev->sysfs.irq_dir_exists)
+ return 0;
+
+ ret = devm_device_add_group(&auxdev->dev, &auxiliary_irqs_group);
+ if (ret)
+ return ret;
+
+ auxdev->sysfs.irq_dir_exists = true;
+ xa_init(&auxdev->sysfs.irqs);
+ return 0;
+}
+
+/**
+ * auxiliary_device_sysfs_irq_add - add a sysfs entry for the given IRQ
+ * @auxdev: auxiliary bus device to add the sysfs entry.
+ * @irq: The associated interrupt number.
+ *
+ * This function should be called after auxiliary device have successfully
+ * received the irq.
+ * The driver is responsible to add a unique irq for the auxiliary device. The
+ * driver can invoke this function from multiple thread context safely for
+ * unique irqs of the auxiliary devices. The driver must not invoke this API
+ * multiple times if the irq is already added previously.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+int auxiliary_device_sysfs_irq_add(struct auxiliary_device *auxdev, int irq)
+{
+ struct auxiliary_irq_info *info __free(kfree) = NULL;
+ struct device *dev = &auxdev->dev;
+ int ret;
+
+ ret = auxiliary_irq_dir_prepare(auxdev);
+ if (ret)
+ return ret;
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ sysfs_attr_init(&info->sysfs_attr.attr);
+ snprintf(info->name, AUXILIARY_MAX_IRQ_NAME, "%d", irq);
+
+ ret = xa_insert(&auxdev->sysfs.irqs, irq, info, GFP_KERNEL);
+ if (ret)
+ return ret;
+
+ info->sysfs_attr.attr.name = info->name;
+ ret = sysfs_add_file_to_group(&dev->kobj, &info->sysfs_attr.attr,
+ auxiliary_irqs_group.name);
+ if (ret)
+ goto sysfs_add_err;
+
+ xa_store(&auxdev->sysfs.irqs, irq, no_free_ptr(info), GFP_KERNEL);
+ return 0;
+
+sysfs_add_err:
+ xa_erase(&auxdev->sysfs.irqs, irq);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(auxiliary_device_sysfs_irq_add);
+
+/**
+ * auxiliary_device_sysfs_irq_remove - remove a sysfs entry for the given IRQ
+ * @auxdev: auxiliary bus device to add the sysfs entry.
+ * @irq: the IRQ to remove.
+ *
+ * This function should be called to remove an IRQ sysfs entry.
+ * The driver must invoke this API when IRQ is released by the device.
+ */
+void auxiliary_device_sysfs_irq_remove(struct auxiliary_device *auxdev, int irq)
+{
+ struct auxiliary_irq_info *info __free(kfree) = xa_load(&auxdev->sysfs.irqs, irq);
+ struct device *dev = &auxdev->dev;
+
+ if (!info) {
+ dev_err(&auxdev->dev, "IRQ %d doesn't exist\n", irq);
+ return;
+ }
+ sysfs_remove_file_from_group(&dev->kobj, &info->sysfs_attr.attr,
+ auxiliary_irqs_group.name);
+ xa_erase(&auxdev->sysfs.irqs, irq);
+}
+EXPORT_SYMBOL_GPL(auxiliary_device_sysfs_irq_remove);
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 131d96c6090b..2b4c0624b704 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -2739,8 +2739,11 @@ static ssize_t uevent_show(struct device *dev, struct device_attribute *attr,
if (!env)
return -ENOMEM;
+ /* Synchronize with really_probe() */
+ device_lock(dev);
/* let the kset specific function add its keys */
retval = kset->uevent_ops->uevent(&dev->kobj, env);
+ device_unlock(dev);
if (retval)
goto out;
@@ -2845,15 +2848,6 @@ static void devm_attr_group_remove(struct device *dev, void *res)
sysfs_remove_group(&dev->kobj, group);
}
-static void devm_attr_groups_remove(struct device *dev, void *res)
-{
- union device_attr_group_devres *devres = res;
- const struct attribute_group **groups = devres->groups;
-
- dev_dbg(dev, "%s: removing groups %p\n", __func__, groups);
- sysfs_remove_groups(&dev->kobj, groups);
-}
-
/**
* devm_device_add_group - given a device, create a managed attribute group
* @dev: The device to create the group for
@@ -2886,42 +2880,6 @@ int devm_device_add_group(struct device *dev, const struct attribute_group *grp)
}
EXPORT_SYMBOL_GPL(devm_device_add_group);
-/**
- * devm_device_add_groups - create a bunch of managed attribute groups
- * @dev: The device to create the group for
- * @groups: The attribute groups to create, NULL terminated
- *
- * This function creates a bunch of managed attribute groups. If an error
- * occurs when creating a group, all previously created groups will be
- * removed, unwinding everything back to the original state when this
- * function was called. It will explicitly warn and error if any of the
- * attribute files being created already exist.
- *
- * Returns 0 on success or error code from sysfs_create_group on failure.
- */
-int devm_device_add_groups(struct device *dev,
- const struct attribute_group **groups)
-{
- union device_attr_group_devres *devres;
- int error;
-
- devres = devres_alloc(devm_attr_groups_remove,
- sizeof(*devres), GFP_KERNEL);
- if (!devres)
- return -ENOMEM;
-
- error = sysfs_create_groups(&dev->kobj, groups);
- if (error) {
- devres_free(devres);
- return error;
- }
-
- devres->groups = groups;
- devres_add(dev, devres);
- return 0;
-}
-EXPORT_SYMBOL_GPL(devm_device_add_groups);
-
static int device_add_attrs(struct device *dev)
{
const struct class *class = dev->class;
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index c61ecb0c2ae2..b57326fd48d4 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -95,6 +95,7 @@ void unregister_cpu(struct cpu *cpu)
{
int logical_cpu = cpu->dev.id;
+ set_cpu_enabled(logical_cpu, false);
unregister_cpu_under_node(logical_cpu, cpu_to_node(logical_cpu));
device_unregister(&cpu->dev);
@@ -273,6 +274,13 @@ static ssize_t print_cpus_offline(struct device *dev,
}
static DEVICE_ATTR(offline, 0444, print_cpus_offline, NULL);
+static ssize_t print_cpus_enabled(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "%*pbl\n", cpumask_pr_args(cpu_enabled_mask));
+}
+static DEVICE_ATTR(enabled, 0444, print_cpus_enabled, NULL);
+
static ssize_t print_cpus_isolated(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -413,6 +421,7 @@ int register_cpu(struct cpu *cpu, int num)
register_cpu_under_node(num, cpu_to_node(num));
dev_pm_qos_expose_latency_limit(&cpu->dev,
PM_QOS_RESUME_LATENCY_NO_CONSTRAINT);
+ set_cpu_enabled(num, true);
return 0;
}
@@ -494,6 +503,7 @@ static struct attribute *cpu_root_attrs[] = {
&cpu_attrs[2].attr.attr,
&dev_attr_kernel_max.attr,
&dev_attr_offline.attr,
+ &dev_attr_enabled.attr,
&dev_attr_isolated.attr,
#ifdef CONFIG_NO_HZ_FULL
&dev_attr_nohz_full.attr,
@@ -558,7 +568,7 @@ static void __init cpu_dev_register_generic(void)
for_each_present_cpu(i) {
ret = arch_register_cpu(i);
- if (ret)
+ if (ret && ret != -EPROBE_DEFER)
pr_warn("register_cpu %d failed (%d)\n", i, ret);
}
}
diff --git a/drivers/base/regmap/regcache-maple.c b/drivers/base/regmap/regcache-maple.c
index e42433404854..f0df2da6d522 100644
--- a/drivers/base/regmap/regcache-maple.c
+++ b/drivers/base/regmap/regcache-maple.c
@@ -132,9 +132,9 @@ static int regcache_maple_drop(struct regmap *map, unsigned int min,
lower_index = mas.index;
lower_last = min -1;
- lower = kmemdup(entry, ((min - mas.index) *
- sizeof(unsigned long)),
- map->alloc_flags);
+ lower = kmemdup_array(entry,
+ min - mas.index, sizeof(*lower),
+ map->alloc_flags);
if (!lower) {
ret = -ENOMEM;
goto out_unlocked;
@@ -145,10 +145,9 @@ static int regcache_maple_drop(struct regmap *map, unsigned int min,
upper_index = max + 1;
upper_last = mas.last;
- upper = kmemdup(&entry[max - mas.index + 1],
- ((mas.last - max) *
- sizeof(unsigned long)),
- map->alloc_flags);
+ upper = kmemdup_array(&entry[max - mas.index + 1],
+ mas.last - max, sizeof(*upper),
+ map->alloc_flags);
if (!upper) {
ret = -ENOMEM;
goto out_unlocked;
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index 2e41cb12b8e2..7ec1ec605335 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -170,8 +170,8 @@ int regcache_init(struct regmap *map, const struct regmap_config *config)
* a copy of it.
*/
if (config->reg_defaults) {
- tmp_buf = kmemdup(config->reg_defaults, map->num_reg_defaults *
- sizeof(struct reg_default), GFP_KERNEL);
+ tmp_buf = kmemdup_array(config->reg_defaults, map->num_reg_defaults,
+ sizeof(*map->reg_defaults), GFP_KERNEL);
if (!tmp_buf)
return -ENOMEM;
map->reg_defaults = tmp_buf;
@@ -407,7 +407,7 @@ out:
* have gone out of sync, force writes of all the paging
* registers.
*/
- rb_for_each(node, 0, &map->range_tree, rbtree_all) {
+ rb_for_each(node, NULL, &map->range_tree, rbtree_all) {
struct regmap_range_node *this =
rb_entry(node, struct regmap_range_node, node);
diff --git a/drivers/base/regmap/regmap-ac97.c b/drivers/base/regmap/regmap-ac97.c
index b9f76bdf74a9..a561971c459c 100644
--- a/drivers/base/regmap/regmap-ac97.c
+++ b/drivers/base/regmap/regmap-ac97.c
@@ -86,4 +86,5 @@ struct regmap *__devm_regmap_init_ac97(struct snd_ac97 *ac97,
}
EXPORT_SYMBOL_GPL(__devm_regmap_init_ac97);
+MODULE_DESCRIPTION("Register map access API - AC'97 support");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/base/regmap/regmap-i2c.c b/drivers/base/regmap/regmap-i2c.c
index 3ec611dc0c09..c9b39a02278e 100644
--- a/drivers/base/regmap/regmap-i2c.c
+++ b/drivers/base/regmap/regmap-i2c.c
@@ -350,7 +350,8 @@ static const struct regmap_bus *regmap_get_i2c_bus(struct i2c_client *i2c,
if (quirks->max_write_len &&
(bus->max_raw_write == 0 || bus->max_raw_write > quirks->max_write_len))
- max_write = quirks->max_write_len;
+ max_write = quirks->max_write_len -
+ (config->reg_bits + config->pad_bits) / BITS_PER_BYTE;
if (max_read || max_write) {
ret_bus = kmemdup(bus, sizeof(*bus), GFP_KERNEL);
@@ -396,4 +397,5 @@ struct regmap *__devm_regmap_init_i2c(struct i2c_client *i2c,
}
EXPORT_SYMBOL_GPL(__devm_regmap_init_i2c);
+MODULE_DESCRIPTION("Register map access API - I2C support");
MODULE_LICENSE("GPL");
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index 45fd13ef13fc..d3ec1345b5b5 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -305,8 +305,8 @@ static inline int read_sub_irq_data(struct regmap_irq_chip_data *data,
unsigned int b)
{
const struct regmap_irq_chip *chip = data->chip;
+ const struct regmap_irq_sub_irq_map *subreg;
struct regmap *map = data->map;
- struct regmap_irq_sub_irq_map *subreg;
unsigned int reg;
int i, ret = 0;
diff --git a/drivers/base/regmap/regmap-kunit.c b/drivers/base/regmap/regmap-kunit.c
index be32cd4e84da..d790c7df5cac 100644
--- a/drivers/base/regmap/regmap-kunit.c
+++ b/drivers/base/regmap/regmap-kunit.c
@@ -145,9 +145,9 @@ static struct regmap *gen_regmap(struct kunit *test,
const struct regmap_test_param *param = test->param_value;
struct regmap_test_priv *priv = test->priv;
unsigned int *buf;
- struct regmap *ret;
+ struct regmap *ret = ERR_PTR(-ENOMEM);
size_t size;
- int i;
+ int i, error;
struct reg_default *defaults;
config->cache_type = param->cache;
@@ -163,7 +163,7 @@ static struct regmap *gen_regmap(struct kunit *test,
config->max_register += (BLOCK_TEST_SIZE * config->reg_stride);
}
- size = (config->max_register + 1) * sizeof(unsigned int);
+ size = array_size(config->max_register + 1, sizeof(*buf));
buf = kmalloc(size, GFP_KERNEL);
if (!buf)
return ERR_PTR(-ENOMEM);
@@ -172,15 +172,17 @@ static struct regmap *gen_regmap(struct kunit *test,
*data = kzalloc(sizeof(**data), GFP_KERNEL);
if (!(*data))
- return ERR_PTR(-ENOMEM);
+ goto out_free;
(*data)->vals = buf;
if (config->num_reg_defaults) {
- defaults = kcalloc(config->num_reg_defaults,
- sizeof(struct reg_default),
- GFP_KERNEL);
+ defaults = kunit_kcalloc(test,
+ config->num_reg_defaults,
+ sizeof(struct reg_default),
+ GFP_KERNEL);
if (!defaults)
- return ERR_PTR(-ENOMEM);
+ goto out_free;
+
config->reg_defaults = defaults;
for (i = 0; i < config->num_reg_defaults; i++) {
@@ -190,12 +192,19 @@ static struct regmap *gen_regmap(struct kunit *test,
}
ret = regmap_init_ram(priv->dev, config, *data);
- if (IS_ERR(ret)) {
- kfree(buf);
- kfree(*data);
- } else {
- kunit_add_action(test, regmap_exit_action, ret);
- }
+ if (IS_ERR(ret))
+ goto out_free;
+
+ /* This calls regmap_exit() on failure, which frees buf and *data */
+ error = kunit_add_action_or_reset(test, regmap_exit_action, ret);
+ if (error)
+ ret = ERR_PTR(error);
+
+ return ret;
+
+out_free:
+ kfree(buf);
+ kfree(*data);
return ret;
}
@@ -295,6 +304,77 @@ static void bulk_read(struct kunit *test)
KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]);
}
+static void multi_write(struct kunit *test)
+{
+ struct regmap *map;
+ struct regmap_config config;
+ struct regmap_ram_data *data;
+ struct reg_sequence sequence[BLOCK_TEST_SIZE];
+ unsigned int val[BLOCK_TEST_SIZE], rval[BLOCK_TEST_SIZE];
+ int i;
+
+ config = test_regmap_config;
+
+ map = gen_regmap(test, &config, &data);
+ KUNIT_ASSERT_FALSE(test, IS_ERR(map));
+ if (IS_ERR(map))
+ return;
+
+ get_random_bytes(&val, sizeof(val));
+
+ /*
+ * Data written via the multi API can be read back with single
+ * reads.
+ */
+ for (i = 0; i < BLOCK_TEST_SIZE; i++) {
+ sequence[i].reg = i;
+ sequence[i].def = val[i];
+ sequence[i].delay_us = 0;
+ }
+ KUNIT_EXPECT_EQ(test, 0,
+ regmap_multi_reg_write(map, sequence, BLOCK_TEST_SIZE));
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval[i]));
+
+ KUNIT_EXPECT_MEMEQ(test, val, rval, sizeof(val));
+
+ /* If using a cache the cache satisfied the read */
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]);
+}
+
+static void multi_read(struct kunit *test)
+{
+ struct regmap *map;
+ struct regmap_config config;
+ struct regmap_ram_data *data;
+ unsigned int regs[BLOCK_TEST_SIZE];
+ unsigned int val[BLOCK_TEST_SIZE], rval[BLOCK_TEST_SIZE];
+ int i;
+
+ config = test_regmap_config;
+
+ map = gen_regmap(test, &config, &data);
+ KUNIT_ASSERT_FALSE(test, IS_ERR(map));
+ if (IS_ERR(map))
+ return;
+
+ get_random_bytes(&val, sizeof(val));
+
+ /* Data written as single writes can be read via the multi API */
+ for (i = 0; i < BLOCK_TEST_SIZE; i++) {
+ regs[i] = i;
+ KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, val[i]));
+ }
+ KUNIT_EXPECT_EQ(test, 0,
+ regmap_multi_reg_read(map, regs, rval, BLOCK_TEST_SIZE));
+ KUNIT_EXPECT_MEMEQ(test, val, rval, sizeof(val));
+
+ /* If using a cache the cache satisfied the read */
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]);
+}
+
static void read_bypassed(struct kunit *test)
{
const struct regmap_test_param *param = test->param_value;
@@ -759,10 +839,9 @@ static void stress_insert(struct kunit *test)
if (IS_ERR(map))
return;
- vals = kunit_kcalloc(test, sizeof(unsigned long), config.max_register,
- GFP_KERNEL);
+ buf_sz = array_size(sizeof(*vals), config.max_register);
+ vals = kunit_kmalloc(test, buf_sz, GFP_KERNEL);
KUNIT_ASSERT_FALSE(test, vals == NULL);
- buf_sz = sizeof(unsigned long) * config.max_register;
get_random_bytes(vals, buf_sz);
@@ -1497,16 +1576,17 @@ static struct regmap *gen_raw_regmap(struct kunit *test,
struct regmap_test_priv *priv = test->priv;
const struct regmap_test_param *param = test->param_value;
u16 *buf;
- struct regmap *ret;
- size_t size = (config->max_register + 1) * config->reg_bits / 8;
- int i;
+ struct regmap *ret = ERR_PTR(-ENOMEM);
+ int i, error;
struct reg_default *defaults;
+ size_t size;
config->cache_type = param->cache;
config->val_format_endian = param->val_endian;
config->disable_locking = config->cache_type == REGCACHE_RBTREE ||
config->cache_type == REGCACHE_MAPLE;
+ size = array_size(config->max_register + 1, BITS_TO_BYTES(config->reg_bits));
buf = kmalloc(size, GFP_KERNEL);
if (!buf)
return ERR_PTR(-ENOMEM);
@@ -1515,15 +1595,16 @@ static struct regmap *gen_raw_regmap(struct kunit *test,
*data = kzalloc(sizeof(**data), GFP_KERNEL);
if (!(*data))
- return ERR_PTR(-ENOMEM);
+ goto out_free;
(*data)->vals = (void *)buf;
config->num_reg_defaults = config->max_register + 1;
- defaults = kcalloc(config->num_reg_defaults,
- sizeof(struct reg_default),
- GFP_KERNEL);
+ defaults = kunit_kcalloc(test,
+ config->num_reg_defaults,
+ sizeof(struct reg_default),
+ GFP_KERNEL);
if (!defaults)
- return ERR_PTR(-ENOMEM);
+ goto out_free;
config->reg_defaults = defaults;
for (i = 0; i < config->num_reg_defaults; i++) {
@@ -1536,7 +1617,8 @@ static struct regmap *gen_raw_regmap(struct kunit *test,
defaults[i].def = be16_to_cpu(buf[i]);
break;
default:
- return ERR_PTR(-EINVAL);
+ ret = ERR_PTR(-EINVAL);
+ goto out_free;
}
}
@@ -1548,12 +1630,19 @@ static struct regmap *gen_raw_regmap(struct kunit *test,
config->num_reg_defaults = 0;
ret = regmap_init_raw_ram(priv->dev, config, *data);
- if (IS_ERR(ret)) {
- kfree(buf);
- kfree(*data);
- } else {
- kunit_add_action(test, regmap_exit_action, ret);
- }
+ if (IS_ERR(ret))
+ goto out_free;
+
+ /* This calls regmap_exit() on failure, which frees buf and *data */
+ error = kunit_add_action_or_reset(test, regmap_exit_action, ret);
+ if (error)
+ ret = ERR_PTR(error);
+
+ return ret;
+
+out_free:
+ kfree(buf);
+ kfree(*data);
return ret;
}
@@ -1597,7 +1686,7 @@ static void raw_read_defaults(struct kunit *test)
if (IS_ERR(map))
return;
- val_len = sizeof(*rval) * (config.max_register + 1);
+ val_len = array_size(sizeof(*rval), config.max_register + 1);
rval = kunit_kmalloc(test, val_len, GFP_KERNEL);
KUNIT_ASSERT_TRUE(test, rval != NULL);
if (!rval)
@@ -1887,6 +1976,8 @@ static struct kunit_case regmap_test_cases[] = {
KUNIT_CASE_PARAM(read_bypassed_volatile, real_cache_types_gen_params),
KUNIT_CASE_PARAM(bulk_write, regcache_types_gen_params),
KUNIT_CASE_PARAM(bulk_read, regcache_types_gen_params),
+ KUNIT_CASE_PARAM(multi_write, regcache_types_gen_params),
+ KUNIT_CASE_PARAM(multi_read, regcache_types_gen_params),
KUNIT_CASE_PARAM(write_readonly, regcache_types_gen_params),
KUNIT_CASE_PARAM(read_writeonly, regcache_types_gen_params),
KUNIT_CASE_PARAM(reg_defaults, regcache_types_gen_params),
@@ -1958,4 +2049,5 @@ static struct kunit_suite regmap_test_suite = {
};
kunit_test_suite(regmap_test_suite);
+MODULE_DESCRIPTION("Regmap KUnit tests");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/base/regmap/regmap-ram.c b/drivers/base/regmap/regmap-ram.c
index 5b4cbf982a11..4e5b4518ce4d 100644
--- a/drivers/base/regmap/regmap-ram.c
+++ b/drivers/base/regmap/regmap-ram.c
@@ -83,4 +83,5 @@ struct regmap *__regmap_init_ram(struct device *dev,
}
EXPORT_SYMBOL_GPL(__regmap_init_ram);
+MODULE_DESCRIPTION("Register map access API - Memory region");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/base/regmap/regmap-raw-ram.c b/drivers/base/regmap/regmap-raw-ram.c
index 69eabfb89eda..76c98814fb8a 100644
--- a/drivers/base/regmap/regmap-raw-ram.c
+++ b/drivers/base/regmap/regmap-raw-ram.c
@@ -142,4 +142,5 @@ struct regmap *__regmap_init_raw_ram(struct device *dev,
}
EXPORT_SYMBOL_GPL(__regmap_init_raw_ram);
+MODULE_DESCRIPTION("Register map access API - Memory region with raw access");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/base/regmap/regmap-sccb.c b/drivers/base/regmap/regmap-sccb.c
index 986af26d88c2..12bbbb03e5f2 100644
--- a/drivers/base/regmap/regmap-sccb.c
+++ b/drivers/base/regmap/regmap-sccb.c
@@ -125,4 +125,5 @@ struct regmap *__devm_regmap_init_sccb(struct i2c_client *i2c,
}
EXPORT_SYMBOL_GPL(__devm_regmap_init_sccb);
+MODULE_DESCRIPTION("Register map access API - SCCB support");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/base/regmap/regmap-slimbus.c b/drivers/base/regmap/regmap-slimbus.c
index 8075db788b39..54eb7d227cf4 100644
--- a/drivers/base/regmap/regmap-slimbus.c
+++ b/drivers/base/regmap/regmap-slimbus.c
@@ -68,4 +68,5 @@ struct regmap *__devm_regmap_init_slimbus(struct slim_device *slimbus,
}
EXPORT_SYMBOL_GPL(__devm_regmap_init_slimbus);
+MODULE_DESCRIPTION("Register map access API - SLIMbus support");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/base/regmap/regmap-spi-avmm.c b/drivers/base/regmap/regmap-spi-avmm.c
index 4c2b94b3e30b..d86a06cadcdb 100644
--- a/drivers/base/regmap/regmap-spi-avmm.c
+++ b/drivers/base/regmap/regmap-spi-avmm.c
@@ -710,4 +710,5 @@ struct regmap *__devm_regmap_init_spi_avmm(struct spi_device *spi,
}
EXPORT_SYMBOL_GPL(__devm_regmap_init_spi_avmm);
+MODULE_DESCRIPTION("Register map access API - SPI AVMM support");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/base/regmap/regmap-spi.c b/drivers/base/regmap/regmap-spi.c
index 094cf2a2ca3c..14b1d88997cb 100644
--- a/drivers/base/regmap/regmap-spi.c
+++ b/drivers/base/regmap/regmap-spi.c
@@ -122,8 +122,7 @@ static const struct regmap_bus *regmap_get_spi_bus(struct spi_device *spi,
return ERR_PTR(-ENOMEM);
max_msg_size = spi_max_message_size(spi);
- reg_reserve_size = config->reg_bits / BITS_PER_BYTE
- + config->pad_bits / BITS_PER_BYTE;
+ reg_reserve_size = (config->reg_bits + config->pad_bits) / BITS_PER_BYTE;
if (max_size + reg_reserve_size > max_msg_size)
max_size -= reg_reserve_size;
diff --git a/drivers/base/regmap/regmap-spmi.c b/drivers/base/regmap/regmap-spmi.c
index cdf12d2aa3a1..347bfe9544ce 100644
--- a/drivers/base/regmap/regmap-spmi.c
+++ b/drivers/base/regmap/regmap-spmi.c
@@ -222,4 +222,5 @@ struct regmap *__devm_regmap_init_spmi_ext(struct spmi_device *sdev,
}
EXPORT_SYMBOL_GPL(__devm_regmap_init_spmi_ext);
+MODULE_DESCRIPTION("Register map access API - SPMI support");
MODULE_LICENSE("GPL");
diff --git a/drivers/base/regmap/regmap-w1.c b/drivers/base/regmap/regmap-w1.c
index 3a8b402db852..29fd24f9c7ed 100644
--- a/drivers/base/regmap/regmap-w1.c
+++ b/drivers/base/regmap/regmap-w1.c
@@ -234,4 +234,5 @@ struct regmap *__devm_regmap_init_w1(struct device *w1_dev,
}
EXPORT_SYMBOL_GPL(__devm_regmap_init_w1);
+MODULE_DESCRIPTION("Register map access API - W1 (1-Wire) support");
MODULE_LICENSE("GPL");
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 0a34dd3c4f38..bfc6bc1eb3a4 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -2347,7 +2347,7 @@ out:
} else {
void *wval;
- wval = kmemdup(val, val_count * val_bytes, map->alloc_flags);
+ wval = kmemdup_array(val, val_count, val_bytes, map->alloc_flags);
if (!wval)
return -ENOMEM;
@@ -3101,8 +3101,53 @@ int regmap_fields_read(struct regmap_field *field, unsigned int id,
}
EXPORT_SYMBOL_GPL(regmap_fields_read);
+static int _regmap_bulk_read(struct regmap *map, unsigned int reg,
+ unsigned int *regs, void *val, size_t val_count)
+{
+ u32 *u32 = val;
+ u16 *u16 = val;
+ u8 *u8 = val;
+ int ret, i;
+
+ map->lock(map->lock_arg);
+
+ for (i = 0; i < val_count; i++) {
+ unsigned int ival;
+
+ if (regs) {
+ if (!IS_ALIGNED(regs[i], map->reg_stride)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ ret = _regmap_read(map, regs[i], &ival);
+ } else {
+ ret = _regmap_read(map, reg + regmap_get_offset(map, i), &ival);
+ }
+ if (ret != 0)
+ goto out;
+
+ switch (map->format.val_bytes) {
+ case 4:
+ u32[i] = ival;
+ break;
+ case 2:
+ u16[i] = ival;
+ break;
+ case 1:
+ u8[i] = ival;
+ break;
+ default:
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+out:
+ map->unlock(map->lock_arg);
+ return ret;
+}
+
/**
- * regmap_bulk_read() - Read multiple registers from the device
+ * regmap_bulk_read() - Read multiple sequential registers from the device
*
* @map: Register map to read from
* @reg: First register to be read from
@@ -3132,47 +3177,35 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
for (i = 0; i < val_count * val_bytes; i += val_bytes)
map->format.parse_inplace(val + i);
} else {
- u32 *u32 = val;
- u16 *u16 = val;
- u8 *u8 = val;
-
- map->lock(map->lock_arg);
-
- for (i = 0; i < val_count; i++) {
- unsigned int ival;
-
- ret = _regmap_read(map, reg + regmap_get_offset(map, i),
- &ival);
- if (ret != 0)
- goto out;
-
- switch (map->format.val_bytes) {
- case 4:
- u32[i] = ival;
- break;
- case 2:
- u16[i] = ival;
- break;
- case 1:
- u8[i] = ival;
- break;
- default:
- ret = -EINVAL;
- goto out;
- }
- }
-
-out:
- map->unlock(map->lock_arg);
+ ret = _regmap_bulk_read(map, reg, NULL, val, val_count);
}
-
if (!ret)
trace_regmap_bulk_read(map, reg, val, val_bytes * val_count);
-
return ret;
}
EXPORT_SYMBOL_GPL(regmap_bulk_read);
+/**
+ * regmap_multi_reg_read() - Read multiple non-sequential registers from the device
+ *
+ * @map: Register map to read from
+ * @regs: Array of registers to read from
+ * @val: Pointer to store read value, in native register size for device
+ * @val_count: Number of registers to read
+ *
+ * A value of zero will be returned on success, a negative errno will
+ * be returned in error cases.
+ */
+int regmap_multi_reg_read(struct regmap *map, unsigned int *regs, void *val,
+ size_t val_count)
+{
+ if (val_count == 0)
+ return -EINVAL;
+
+ return _regmap_bulk_read(map, 0, regs, val, val_count);
+}
+EXPORT_SYMBOL_GPL(regmap_multi_reg_read);
+
static int _regmap_update_bits(struct regmap *map, unsigned int reg,
unsigned int mask, unsigned int val,
bool *change, bool force_write)
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 5b9d4aaebb81..ed209f4f2798 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -354,6 +354,15 @@ config VIRTIO_BLK
This is the virtual block driver for virtio. It can be used with
QEMU based VMMs (like KVM or Xen). Say Y or M.
+config BLK_DEV_RUST_NULL
+ tristate "Rust null block driver (Experimental)"
+ depends on RUST
+ help
+ This is the Rust implementation of the null block driver. For now it
+ is only a minimal stub.
+
+ If unsure, say N.
+
config BLK_DEV_RBD
tristate "Rados block device (RBD)"
depends on INET && BLOCK
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index 101612cba303..1105a2d4fdcb 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -9,6 +9,9 @@
# needed for trace events
ccflags-y += -I$(src)
+obj-$(CONFIG_BLK_DEV_RUST_NULL) += rnull_mod.o
+rnull_mod-y := rnull.o
+
obj-$(CONFIG_MAC_FLOPPY) += swim3.o
obj-$(CONFIG_BLK_DEV_SWIM) += swim_mod.o
obj-$(CONFIG_BLK_DEV_FD) += floppy.o
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
index a25414228e47..49ced65bef4c 100644
--- a/drivers/block/amiflop.c
+++ b/drivers/block/amiflop.c
@@ -232,6 +232,7 @@ static DEFINE_MUTEX(amiflop_mutex);
static unsigned long int fd_def_df0 = FD_DD_3; /* default for df0 if it doesn't identify */
module_param(fd_def_df0, ulong, 0);
+MODULE_DESCRIPTION("Amiga floppy driver");
MODULE_LICENSE("GPL");
/*
@@ -1776,10 +1777,13 @@ static const struct blk_mq_ops amiflop_mq_ops = {
static int fd_alloc_disk(int drive, int system)
{
+ struct queue_limits lim = {
+ .features = BLK_FEAT_ROTATIONAL,
+ };
struct gendisk *disk;
int err;
- disk = blk_mq_alloc_disk(&unit[drive].tag_set, NULL, NULL);
+ disk = blk_mq_alloc_disk(&unit[drive].tag_set, &lim, NULL);
if (IS_ERR(disk))
return PTR_ERR(disk);
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index b6dac8cee70f..2028795ec61c 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -337,6 +337,7 @@ aoeblk_gdalloc(void *vp)
struct queue_limits lim = {
.max_hw_sectors = aoe_maxsectors,
.io_opt = SZ_2M,
+ .features = BLK_FEAT_ROTATIONAL,
};
ulong flags;
int late = 0;
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
index cacc4ba942a8..4ba98c6654be 100644
--- a/drivers/block/ataflop.c
+++ b/drivers/block/ataflop.c
@@ -1992,9 +1992,12 @@ static const struct blk_mq_ops ataflop_mq_ops = {
static int ataflop_alloc_disk(unsigned int drive, unsigned int type)
{
+ struct queue_limits lim = {
+ .features = BLK_FEAT_ROTATIONAL,
+ };
struct gendisk *disk;
- disk = blk_mq_alloc_disk(&unit[drive].tag_set, NULL, NULL);
+ disk = blk_mq_alloc_disk(&unit[drive].tag_set, &lim, NULL);
if (IS_ERR(disk))
return PTR_ERR(disk);
@@ -2197,4 +2200,5 @@ static void __exit atari_floppy_exit(void)
module_init(atari_floppy_init)
module_exit(atari_floppy_exit)
+MODULE_DESCRIPTION("Atari floppy driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 558d8e670566..2fd1ed101748 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -296,6 +296,7 @@ static int max_part = 1;
module_param(max_part, int, 0444);
MODULE_PARM_DESC(max_part, "Num Minors to reserve between devices");
+MODULE_DESCRIPTION("Ram backed block device driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS_BLOCKDEV_MAJOR(RAMDISK_MAJOR);
MODULE_ALIAS("rd");
@@ -335,6 +336,8 @@ static int brd_alloc(int i)
.max_hw_discard_sectors = UINT_MAX,
.max_discard_segments = 1,
.discard_granularity = PAGE_SIZE,
+ .features = BLK_FEAT_SYNCHRONOUS |
+ BLK_FEAT_NOWAIT,
};
list_for_each_entry(brd, &brd_devices, brd_list)
@@ -366,10 +369,6 @@ static int brd_alloc(int i)
strscpy(disk->disk_name, buf, DISK_NAME_LEN);
set_capacity(disk, rd_size * 2);
- /* Tell the block layer that this is not a rotational device */
- blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue);
- blk_queue_flag_set(QUEUE_FLAG_SYNCHRONOUS, disk->queue);
- blk_queue_flag_set(QUEUE_FLAG_NOWAIT, disk->queue);
err = add_disk(disk);
if (err)
goto out_cleanup_disk;
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 113b441d4d36..f92673f05c7a 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -2697,6 +2697,9 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
* connect.
*/
.max_hw_sectors = DRBD_MAX_BIO_SIZE_SAFE >> 8,
+ .features = BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA |
+ BLK_FEAT_ROTATIONAL |
+ BLK_FEAT_STABLE_WRITES,
};
device = minor_to_device(minor);
@@ -2735,9 +2738,6 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
sprintf(disk->disk_name, "drbd%d", minor);
disk->private_data = device;
- blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, disk->queue);
- blk_queue_write_cache(disk->queue, true, true);
-
device->md_io.page = alloc_page(GFP_KERNEL);
if (!device->md_io.page)
goto out_no_io_page;
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 25c9d85667f1..3affb538b989 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -4516,7 +4516,8 @@ static bool floppy_available(int drive)
static int floppy_alloc_disk(unsigned int drive, unsigned int type)
{
struct queue_limits lim = {
- .max_hw_sectors = 64,
+ .max_hw_sectors = 64,
+ .features = BLK_FEAT_ROTATIONAL,
};
struct gendisk *disk;
@@ -5016,6 +5017,7 @@ module_param(floppy, charp, 0);
module_param(FLOPPY_IRQ, int, 0);
module_param(FLOPPY_DMA, int, 0);
MODULE_AUTHOR("Alain L. Knaff");
+MODULE_DESCRIPTION("Normal floppy disk support");
MODULE_LICENSE("GPL");
/* This doesn't actually get used other than for module information */
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 93780f41646b..78a7bb28defe 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -211,13 +211,10 @@ static void __loop_update_dio(struct loop_device *lo, bool dio)
if (lo->lo_state == Lo_bound)
blk_mq_freeze_queue(lo->lo_queue);
lo->use_dio = use_dio;
- if (use_dio) {
- blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, lo->lo_queue);
+ if (use_dio)
lo->lo_flags |= LO_FLAGS_DIRECT_IO;
- } else {
- blk_queue_flag_set(QUEUE_FLAG_NOMERGES, lo->lo_queue);
+ else
lo->lo_flags &= ~LO_FLAGS_DIRECT_IO;
- }
if (lo->lo_state == Lo_bound)
blk_mq_unfreeze_queue(lo->lo_queue);
}
@@ -302,6 +299,21 @@ static int lo_read_simple(struct loop_device *lo, struct request *rq,
return 0;
}
+static void loop_clear_limits(struct loop_device *lo, int mode)
+{
+ struct queue_limits lim = queue_limits_start_update(lo->lo_queue);
+
+ if (mode & FALLOC_FL_ZERO_RANGE)
+ lim.max_write_zeroes_sectors = 0;
+
+ if (mode & FALLOC_FL_PUNCH_HOLE) {
+ lim.max_hw_discard_sectors = 0;
+ lim.discard_granularity = 0;
+ }
+
+ queue_limits_commit_update(lo->lo_queue, &lim);
+}
+
static int lo_fallocate(struct loop_device *lo, struct request *rq, loff_t pos,
int mode)
{
@@ -320,6 +332,14 @@ static int lo_fallocate(struct loop_device *lo, struct request *rq, loff_t pos,
ret = file->f_op->fallocate(file, mode, pos, blk_rq_bytes(rq));
if (unlikely(ret && ret != -EINVAL && ret != -EOPNOTSUPP))
return -EIO;
+
+ /*
+ * We initially configure the limits in a hope that fallocate is
+ * supported and clear them here if that turns out not to be true.
+ */
+ if (unlikely(ret == -EOPNOTSUPP))
+ loop_clear_limits(lo, mode);
+
return ret;
}
@@ -916,24 +936,6 @@ static void loop_free_idle_workers_timer(struct timer_list *timer)
return loop_free_idle_workers(lo, false);
}
-static void loop_update_rotational(struct loop_device *lo)
-{
- struct file *file = lo->lo_backing_file;
- struct inode *file_inode = file->f_mapping->host;
- struct block_device *file_bdev = file_inode->i_sb->s_bdev;
- struct request_queue *q = lo->lo_queue;
- bool nonrot = true;
-
- /* not all filesystems (e.g. tmpfs) have a sb->s_bdev */
- if (file_bdev)
- nonrot = bdev_nonrot(file_bdev);
-
- if (nonrot)
- blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
- else
- blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
-}
-
/**
* loop_set_status_from_info - configure device from loop_info
* @lo: struct loop_device to configure
@@ -975,17 +977,40 @@ loop_set_status_from_info(struct loop_device *lo,
return 0;
}
-static int loop_reconfigure_limits(struct loop_device *lo, unsigned short bsize,
- bool update_discard_settings)
+static unsigned short loop_default_blocksize(struct loop_device *lo,
+ struct block_device *backing_bdev)
{
+ /* In case of direct I/O, match underlying block size */
+ if ((lo->lo_backing_file->f_flags & O_DIRECT) && backing_bdev)
+ return bdev_logical_block_size(backing_bdev);
+ return SECTOR_SIZE;
+}
+
+static int loop_reconfigure_limits(struct loop_device *lo, unsigned short bsize)
+{
+ struct file *file = lo->lo_backing_file;
+ struct inode *inode = file->f_mapping->host;
+ struct block_device *backing_bdev = NULL;
struct queue_limits lim;
+ if (S_ISBLK(inode->i_mode))
+ backing_bdev = I_BDEV(inode);
+ else if (inode->i_sb->s_bdev)
+ backing_bdev = inode->i_sb->s_bdev;
+
+ if (!bsize)
+ bsize = loop_default_blocksize(lo, backing_bdev);
+
lim = queue_limits_start_update(lo->lo_queue);
lim.logical_block_size = bsize;
lim.physical_block_size = bsize;
lim.io_min = bsize;
- if (update_discard_settings)
- loop_config_discard(lo, &lim);
+ lim.features &= ~(BLK_FEAT_WRITE_CACHE | BLK_FEAT_ROTATIONAL);
+ if (file->f_op->fsync && !(lo->lo_flags & LO_FLAGS_READ_ONLY))
+ lim.features |= BLK_FEAT_WRITE_CACHE;
+ if (backing_bdev && !bdev_nonrot(backing_bdev))
+ lim.features |= BLK_FEAT_ROTATIONAL;
+ loop_config_discard(lo, &lim);
return queue_limits_commit_update(lo->lo_queue, &lim);
}
@@ -994,12 +1019,10 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode,
const struct loop_config *config)
{
struct file *file = fget(config->fd);
- struct inode *inode;
struct address_space *mapping;
int error;
loff_t size;
bool partscan;
- unsigned short bsize;
bool is_loop;
if (!file)
@@ -1032,19 +1055,12 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode,
goto out_unlock;
mapping = file->f_mapping;
- inode = mapping->host;
if ((config->info.lo_flags & ~LOOP_CONFIGURE_SETTABLE_FLAGS) != 0) {
error = -EINVAL;
goto out_unlock;
}
- if (config->block_size) {
- error = blk_validate_block_size(config->block_size);
- if (error)
- goto out_unlock;
- }
-
error = loop_set_status_from_info(lo, &config->info);
if (error)
goto out_unlock;
@@ -1075,22 +1091,10 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode,
lo->old_gfp_mask = mapping_gfp_mask(mapping);
mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
- if (!(lo->lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync)
- blk_queue_write_cache(lo->lo_queue, true, false);
-
- if (config->block_size)
- bsize = config->block_size;
- else if ((lo->lo_backing_file->f_flags & O_DIRECT) && inode->i_sb->s_bdev)
- /* In case of direct I/O, match underlying block size */
- bsize = bdev_logical_block_size(inode->i_sb->s_bdev);
- else
- bsize = 512;
-
- error = loop_reconfigure_limits(lo, bsize, true);
- if (WARN_ON_ONCE(error))
+ error = loop_reconfigure_limits(lo, config->block_size);
+ if (error)
goto out_unlock;
- loop_update_rotational(lo);
loop_update_dio(lo);
loop_sysfs_init(lo);
@@ -1131,22 +1135,12 @@ out_putf:
return error;
}
-static void __loop_clr_fd(struct loop_device *lo, bool release)
+static void __loop_clr_fd(struct loop_device *lo)
{
+ struct queue_limits lim;
struct file *filp;
gfp_t gfp = lo->old_gfp_mask;
- if (test_bit(QUEUE_FLAG_WC, &lo->lo_queue->queue_flags))
- blk_queue_write_cache(lo->lo_queue, false, false);
-
- /*
- * Freeze the request queue when unbinding on a live file descriptor and
- * thus an open device. When called from ->release we are guaranteed
- * that there is no I/O in progress already.
- */
- if (!release)
- blk_mq_freeze_queue(lo->lo_queue);
-
spin_lock_irq(&lo->lo_lock);
filp = lo->lo_backing_file;
lo->lo_backing_file = NULL;
@@ -1156,7 +1150,14 @@ static void __loop_clr_fd(struct loop_device *lo, bool release)
lo->lo_offset = 0;
lo->lo_sizelimit = 0;
memset(lo->lo_file_name, 0, LO_NAME_SIZE);
- loop_reconfigure_limits(lo, 512, false);
+
+ /* reset the block size to the default */
+ lim = queue_limits_start_update(lo->lo_queue);
+ lim.logical_block_size = SECTOR_SIZE;
+ lim.physical_block_size = SECTOR_SIZE;
+ lim.io_min = SECTOR_SIZE;
+ queue_limits_commit_update(lo->lo_queue, &lim);
+
invalidate_disk(lo->lo_disk);
loop_sysfs_exit(lo);
/* let user-space know about this change */
@@ -1164,8 +1165,6 @@ static void __loop_clr_fd(struct loop_device *lo, bool release)
mapping_set_gfp_mask(filp->f_mapping, gfp);
/* This is safe: open() is still holding a reference. */
module_put(THIS_MODULE);
- if (!release)
- blk_mq_unfreeze_queue(lo->lo_queue);
disk_force_media_change(lo->lo_disk);
@@ -1180,11 +1179,7 @@ static void __loop_clr_fd(struct loop_device *lo, bool release)
* must be at least one and it can only become zero when the
* current holder is released.
*/
- if (!release)
- mutex_lock(&lo->lo_disk->open_mutex);
err = bdev_disk_changed(lo->lo_disk, false);
- if (!release)
- mutex_unlock(&lo->lo_disk->open_mutex);
if (err)
pr_warn("%s: partition scan of loop%d failed (rc=%d)\n",
__func__, lo->lo_number, err);
@@ -1233,24 +1228,16 @@ static int loop_clr_fd(struct loop_device *lo)
return -ENXIO;
}
/*
- * If we've explicitly asked to tear down the loop device,
- * and it has an elevated reference count, set it for auto-teardown when
- * the last reference goes away. This stops $!~#$@ udev from
- * preventing teardown because it decided that it needs to run blkid on
- * the loopback device whenever they appear. xfstests is notorious for
- * failing tests because blkid via udev races with a losetup
- * <dev>/do something like mkfs/losetup -d <dev> causing the losetup -d
- * command to fail with EBUSY.
+ * Mark the device for removing the backing device on last close.
+ * If we are the only opener, also switch the state to roundown here to
+ * prevent new openers from coming in.
*/
- if (disk_openers(lo->lo_disk) > 1) {
- lo->lo_flags |= LO_FLAGS_AUTOCLEAR;
- loop_global_unlock(lo, true);
- return 0;
- }
- lo->lo_state = Lo_rundown;
+
+ lo->lo_flags |= LO_FLAGS_AUTOCLEAR;
+ if (disk_openers(lo->lo_disk) == 1)
+ lo->lo_state = Lo_rundown;
loop_global_unlock(lo, true);
- __loop_clr_fd(lo, false);
return 0;
}
@@ -1477,10 +1464,6 @@ static int loop_set_block_size(struct loop_device *lo, unsigned long arg)
if (lo->lo_state != Lo_bound)
return -ENXIO;
- err = blk_validate_block_size(arg);
- if (err)
- return err;
-
if (lo->lo_queue->limits.logical_block_size == arg)
return 0;
@@ -1488,7 +1471,7 @@ static int loop_set_block_size(struct loop_device *lo, unsigned long arg)
invalidate_bdev(lo->lo_device);
blk_mq_freeze_queue(lo->lo_queue);
- err = loop_reconfigure_limits(lo, arg, false);
+ err = loop_reconfigure_limits(lo, arg);
loop_update_dio(lo);
blk_mq_unfreeze_queue(lo->lo_queue);
@@ -1717,25 +1700,43 @@ static int lo_compat_ioctl(struct block_device *bdev, blk_mode_t mode,
}
#endif
+static int lo_open(struct gendisk *disk, blk_mode_t mode)
+{
+ struct loop_device *lo = disk->private_data;
+ int err;
+
+ err = mutex_lock_killable(&lo->lo_mutex);
+ if (err)
+ return err;
+
+ if (lo->lo_state == Lo_deleting || lo->lo_state == Lo_rundown)
+ err = -ENXIO;
+ mutex_unlock(&lo->lo_mutex);
+ return err;
+}
+
static void lo_release(struct gendisk *disk)
{
struct loop_device *lo = disk->private_data;
+ bool need_clear = false;
if (disk_openers(disk) > 0)
return;
+ /*
+ * Clear the backing device information if this is the last close of
+ * a device that's been marked for auto clear, or on which LOOP_CLR_FD
+ * has been called.
+ */
mutex_lock(&lo->lo_mutex);
- if (lo->lo_state == Lo_bound && (lo->lo_flags & LO_FLAGS_AUTOCLEAR)) {
+ if (lo->lo_state == Lo_bound && (lo->lo_flags & LO_FLAGS_AUTOCLEAR))
lo->lo_state = Lo_rundown;
- mutex_unlock(&lo->lo_mutex);
- /*
- * In autoclear mode, stop the loop thread
- * and remove configuration after last close.
- */
- __loop_clr_fd(lo, true);
- return;
- }
+
+ need_clear = (lo->lo_state == Lo_rundown);
mutex_unlock(&lo->lo_mutex);
+
+ if (need_clear)
+ __loop_clr_fd(lo);
}
static void lo_free_disk(struct gendisk *disk)
@@ -1752,6 +1753,7 @@ static void lo_free_disk(struct gendisk *disk)
static const struct block_device_operations lo_fops = {
.owner = THIS_MODULE,
+ .open = lo_open,
.release = lo_release,
.ioctl = lo_ioctl,
#ifdef CONFIG_COMPAT
@@ -1830,6 +1832,7 @@ static const struct kernel_param_ops loop_hw_qdepth_param_ops = {
device_param_cb(hw_queue_depth, &loop_hw_qdepth_param_ops, &hw_queue_depth, 0444);
MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: " __stringify(LOOP_DEFAULT_HW_Q_DEPTH));
+MODULE_DESCRIPTION("Loopback device support");
MODULE_LICENSE("GPL");
MODULE_ALIAS_BLOCKDEV_MAJOR(LOOP_MAJOR);
@@ -2037,14 +2040,6 @@ static int loop_add(int i)
lo->lo_queue = lo->lo_disk->queue;
/*
- * By default, we do buffer IO, so it doesn't make sense to enable
- * merge because the I/O submitted to backing file is handled page by
- * page. For directio mode, merge does help to dispatch bigger request
- * to underlayer disk. We will enable merge once directio is enabled.
- */
- blk_queue_flag_set(QUEUE_FLAG_NOMERGES, lo->lo_queue);
-
- /*
* Disable partition scanning by default. The in-kernel partition
* scanning can be requested individually per-device during its
* setup. Userspace can always add and remove partitions from all
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 43a187609ef7..c6ef0546ffc9 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -3485,8 +3485,6 @@ skip_create_disk:
goto start_service_thread;
/* Set device limits. */
- blk_queue_flag_set(QUEUE_FLAG_NONROT, dd->queue);
- blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, dd->queue);
dma_set_max_seg_size(&dd->pdev->dev, 0x400000);
/* Set the capacity of the device in 512 byte sectors. */
diff --git a/drivers/block/n64cart.c b/drivers/block/n64cart.c
index 27b2187e7a6d..b9fdeff31caf 100644
--- a/drivers/block/n64cart.c
+++ b/drivers/block/n64cart.c
@@ -150,8 +150,6 @@ static int __init n64cart_probe(struct platform_device *pdev)
set_capacity(disk, size >> SECTOR_SHIFT);
set_disk_ro(disk, 1);
- blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue);
-
err = add_disk(disk);
if (err)
goto out_cleanup_disk;
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 22a79a62cc4e..41a90150b501 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -342,6 +342,14 @@ static int __nbd_set_size(struct nbd_device *nbd, loff_t bytesize,
lim.max_hw_discard_sectors = UINT_MAX;
else
lim.max_hw_discard_sectors = 0;
+ if (!(nbd->config->flags & NBD_FLAG_SEND_FLUSH)) {
+ lim.features &= ~(BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA);
+ } else if (nbd->config->flags & NBD_FLAG_SEND_FUA) {
+ lim.features |= BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA;
+ } else {
+ lim.features |= BLK_FEAT_WRITE_CACHE;
+ lim.features &= ~BLK_FEAT_FUA;
+ }
lim.logical_block_size = blksize;
lim.physical_block_size = blksize;
error = queue_limits_commit_update(nbd->disk->queue, &lim);
@@ -589,10 +597,11 @@ static inline int was_interrupted(int result)
}
/*
- * Returns BLK_STS_RESOURCE if the caller should retry after a delay. Returns
- * -EAGAIN if the caller should requeue @cmd. Returns -EIO if sending failed.
+ * Returns BLK_STS_RESOURCE if the caller should retry after a delay.
+ * Returns BLK_STS_IOERR if sending failed.
*/
-static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
+static blk_status_t nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd,
+ int index)
{
struct request *req = blk_mq_rq_from_pdu(cmd);
struct nbd_config *config = nbd->config;
@@ -614,13 +623,13 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
type = req_to_nbd_cmd_type(req);
if (type == U32_MAX)
- return -EIO;
+ return BLK_STS_IOERR;
if (rq_data_dir(req) == WRITE &&
(config->flags & NBD_FLAG_READ_ONLY)) {
dev_err_ratelimited(disk_to_dev(nbd->disk),
"Write on read-only\n");
- return -EIO;
+ return BLK_STS_IOERR;
}
if (req->cmd_flags & REQ_FUA)
@@ -674,11 +683,11 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
nsock->sent = sent;
}
set_bit(NBD_CMD_REQUEUED, &cmd->flags);
- return (__force int)BLK_STS_RESOURCE;
+ return BLK_STS_RESOURCE;
}
dev_err_ratelimited(disk_to_dev(nbd->disk),
"Send control failed (result %d)\n", result);
- return -EAGAIN;
+ goto requeue;
}
send_pages:
if (type != NBD_CMD_WRITE)
@@ -715,12 +724,12 @@ send_pages:
nsock->pending = req;
nsock->sent = sent;
set_bit(NBD_CMD_REQUEUED, &cmd->flags);
- return (__force int)BLK_STS_RESOURCE;
+ return BLK_STS_RESOURCE;
}
dev_err(disk_to_dev(nbd->disk),
"Send data failed (result %d)\n",
result);
- return -EAGAIN;
+ goto requeue;
}
/*
* The completion might already have come in,
@@ -737,7 +746,16 @@ out:
trace_nbd_payload_sent(req, handle);
nsock->pending = NULL;
nsock->sent = 0;
- return 0;
+ __set_bit(NBD_CMD_INFLIGHT, &cmd->flags);
+ return BLK_STS_OK;
+
+requeue:
+ /* retry on a different socket */
+ dev_err_ratelimited(disk_to_dev(nbd->disk),
+ "Request send failed, requeueing\n");
+ nbd_mark_nsock_dead(nbd, nsock, 1);
+ nbd_requeue_cmd(cmd);
+ return BLK_STS_OK;
}
static int nbd_read_reply(struct nbd_device *nbd, struct socket *sock,
@@ -1018,7 +1036,7 @@ static blk_status_t nbd_handle_cmd(struct nbd_cmd *cmd, int index)
struct nbd_device *nbd = cmd->nbd;
struct nbd_config *config;
struct nbd_sock *nsock;
- int ret;
+ blk_status_t ret;
lockdep_assert_held(&cmd->lock);
@@ -1072,28 +1090,11 @@ again:
ret = BLK_STS_OK;
goto out;
}
- /*
- * Some failures are related to the link going down, so anything that
- * returns EAGAIN can be retried on a different socket.
- */
ret = nbd_send_cmd(nbd, cmd, index);
- /*
- * Access to this flag is protected by cmd->lock, thus it's safe to set
- * the flag after nbd_send_cmd() succeed to send request to server.
- */
- if (!ret)
- __set_bit(NBD_CMD_INFLIGHT, &cmd->flags);
- else if (ret == -EAGAIN) {
- dev_err_ratelimited(disk_to_dev(nbd->disk),
- "Request send failed, requeueing\n");
- nbd_mark_nsock_dead(nbd, nsock, 1);
- nbd_requeue_cmd(cmd);
- ret = BLK_STS_OK;
- }
out:
mutex_unlock(&nsock->tx_lock);
nbd_config_put(nbd);
- return ret < 0 ? BLK_STS_IOERR : (__force blk_status_t)ret;
+ return ret;
}
static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
@@ -1286,19 +1287,10 @@ static void nbd_bdev_reset(struct nbd_device *nbd)
static void nbd_parse_flags(struct nbd_device *nbd)
{
- struct nbd_config *config = nbd->config;
- if (config->flags & NBD_FLAG_READ_ONLY)
+ if (nbd->config->flags & NBD_FLAG_READ_ONLY)
set_disk_ro(nbd->disk, true);
else
set_disk_ro(nbd->disk, false);
- if (config->flags & NBD_FLAG_SEND_FLUSH) {
- if (config->flags & NBD_FLAG_SEND_FUA)
- blk_queue_write_cache(nbd->disk->queue, true, true);
- else
- blk_queue_write_cache(nbd->disk->queue, true, false);
- }
- else
- blk_queue_write_cache(nbd->disk->queue, false, false);
}
static void send_disconnects(struct nbd_device *nbd)
@@ -1808,7 +1800,7 @@ static struct nbd_device *nbd_dev_add(int index, unsigned int refs)
{
struct queue_limits lim = {
.max_hw_sectors = 65536,
- .max_user_sectors = 256,
+ .io_opt = 256 << SECTOR_SHIFT,
.max_segments = USHRT_MAX,
.max_segment_size = UINT_MAX,
};
@@ -1868,11 +1860,6 @@ static struct nbd_device *nbd_dev_add(int index, unsigned int refs)
goto out_err_disk;
}
- /*
- * Tell the block layer that we are not a rotational device
- */
- blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue);
-
mutex_init(&nbd->config_lock);
refcount_set(&nbd->config_refs, 0);
/*
diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
index eb023d267369..2f0431e42c49 100644
--- a/drivers/block/null_blk/main.c
+++ b/drivers/block/null_blk/main.c
@@ -77,7 +77,7 @@ enum {
NULL_IRQ_TIMER = 2,
};
-static bool g_virt_boundary = false;
+static bool g_virt_boundary;
module_param_named(virt_boundary, g_virt_boundary, bool, 0444);
MODULE_PARM_DESC(virt_boundary, "Require a virtual boundary for the device. Default: False");
@@ -227,7 +227,7 @@ MODULE_PARM_DESC(mbps, "Cache size in MiB for memory-backed device. Default: 0 (
static bool g_fua = true;
module_param_named(fua, g_fua, bool, 0444);
-MODULE_PARM_DESC(zoned, "Enable/disable FUA support when cache_size is used. Default: true");
+MODULE_PARM_DESC(fua, "Enable/disable FUA support when cache_size is used. Default: true");
static unsigned int g_mbps;
module_param_named(mbps, g_mbps, uint, 0444);
@@ -262,6 +262,10 @@ module_param_named(zone_append_max_sectors, g_zone_append_max_sectors, int, 0444
MODULE_PARM_DESC(zone_append_max_sectors,
"Maximum size of a zone append command (in 512B sectors). Specify 0 for zone append emulation");
+static bool g_zone_full;
+module_param_named(zone_full, g_zone_full, bool, S_IRUGO);
+MODULE_PARM_DESC(zone_full, "Initialize the sequential write required zones of a zoned device to be full. Default: false");
+
static struct nullb_device *null_alloc_dev(void);
static void null_free_dev(struct nullb_device *dev);
static void null_del_dev(struct nullb *nullb);
@@ -458,6 +462,7 @@ NULLB_DEVICE_ATTR(zone_nr_conv, uint, NULL);
NULLB_DEVICE_ATTR(zone_max_open, uint, NULL);
NULLB_DEVICE_ATTR(zone_max_active, uint, NULL);
NULLB_DEVICE_ATTR(zone_append_max_sectors, uint, NULL);
+NULLB_DEVICE_ATTR(zone_full, bool, NULL);
NULLB_DEVICE_ATTR(virt_boundary, bool, NULL);
NULLB_DEVICE_ATTR(no_sched, bool, NULL);
NULLB_DEVICE_ATTR(shared_tags, bool, NULL);
@@ -494,6 +499,7 @@ static ssize_t nullb_device_power_store(struct config_item *item,
set_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags);
dev->power = newp;
+ ret = count;
} else if (dev->power && !newp) {
if (test_and_clear_bit(NULLB_DEV_FL_UP, &dev->flags)) {
dev->power = newp;
@@ -609,6 +615,7 @@ static struct configfs_attribute *nullb_device_attrs[] = {
&nullb_device_attr_zone_append_max_sectors,
&nullb_device_attr_zone_readonly,
&nullb_device_attr_zone_offline,
+ &nullb_device_attr_zone_full,
&nullb_device_attr_virt_boundary,
&nullb_device_attr_no_sched,
&nullb_device_attr_shared_tags,
@@ -699,7 +706,7 @@ static ssize_t memb_group_features_show(struct config_item *item, char *page)
"shared_tags,size,submit_queues,use_per_node_hctx,"
"virt_boundary,zoned,zone_capacity,zone_max_active,"
"zone_max_open,zone_nr_conv,zone_offline,zone_readonly,"
- "zone_size,zone_append_max_sectors\n");
+ "zone_size,zone_append_max_sectors,zone_full\n");
}
CONFIGFS_ATTR_RO(memb_group_, features);
@@ -780,6 +787,7 @@ static struct nullb_device *null_alloc_dev(void)
dev->zone_max_open = g_zone_max_open;
dev->zone_max_active = g_zone_max_active;
dev->zone_append_max_sectors = g_zone_append_max_sectors;
+ dev->zone_full = g_zone_full;
dev->virt_boundary = g_virt_boundary;
dev->no_sched = g_no_sched;
dev->shared_tags = g_shared_tags;
@@ -1823,9 +1831,6 @@ static int null_validate_conf(struct nullb_device *dev)
dev->queue_mode = NULL_Q_MQ;
}
- dev->blocksize = round_down(dev->blocksize, 512);
- dev->blocksize = clamp_t(unsigned int, dev->blocksize, 512, 4096);
-
if (dev->use_per_node_hctx) {
if (dev->submit_queues != nr_online_nodes)
dev->submit_queues = nr_online_nodes;
@@ -1927,6 +1932,13 @@ static int null_add_dev(struct nullb_device *dev)
goto out_cleanup_tags;
}
+ if (dev->cache_size > 0) {
+ set_bit(NULLB_DEV_FL_CACHE, &nullb->dev->flags);
+ lim.features |= BLK_FEAT_WRITE_CACHE;
+ if (dev->fua)
+ lim.features |= BLK_FEAT_FUA;
+ }
+
nullb->disk = blk_mq_alloc_disk(nullb->tag_set, &lim, nullb);
if (IS_ERR(nullb->disk)) {
rv = PTR_ERR(nullb->disk);
@@ -1939,13 +1951,7 @@ static int null_add_dev(struct nullb_device *dev)
nullb_setup_bwtimer(nullb);
}
- if (dev->cache_size > 0) {
- set_bit(NULLB_DEV_FL_CACHE, &nullb->dev->flags);
- blk_queue_write_cache(nullb->q, true, dev->fua);
- }
-
nullb->q->queuedata = nullb;
- blk_queue_flag_set(QUEUE_FLAG_NONROT, nullb->q);
rv = ida_alloc(&nullb_indexes, GFP_KERNEL);
if (rv < 0)
diff --git a/drivers/block/null_blk/null_blk.h b/drivers/block/null_blk/null_blk.h
index 3234e6c85eed..a7bb32f73ec3 100644
--- a/drivers/block/null_blk/null_blk.h
+++ b/drivers/block/null_blk/null_blk.h
@@ -101,6 +101,7 @@ struct nullb_device {
bool memory_backed; /* if data is stored in memory */
bool discard; /* if support discard */
bool zoned; /* if device is zoned */
+ bool zone_full; /* Initialize zones to be full */
bool virt_boundary; /* virtual boundary on/off for the device */
bool no_sched; /* no IO scheduler for the device */
bool shared_tags; /* share tag set between devices for blk-mq */
diff --git a/drivers/block/null_blk/zoned.c b/drivers/block/null_blk/zoned.c
index 5b5a63adacc1..9bc768b2ca56 100644
--- a/drivers/block/null_blk/zoned.c
+++ b/drivers/block/null_blk/zoned.c
@@ -74,6 +74,17 @@ int null_init_zoned_dev(struct nullb_device *dev,
return -EINVAL;
}
+ /*
+ * If a smaller zone capacity was requested, do not allow a smaller last
+ * zone at the same time as such zone configuration does not correspond
+ * to any real zoned device.
+ */
+ if (dev->zone_capacity != dev->zone_size &&
+ dev->size & (dev->zone_size - 1)) {
+ pr_err("A smaller last zone is not allowed with zone capacity smaller than zone size.\n");
+ return -EINVAL;
+ }
+
zone_capacity_sects = mb_to_sects(dev->zone_capacity);
dev_capacity_sects = mb_to_sects(dev->size);
dev->zone_size_sects = mb_to_sects(dev->zone_size);
@@ -108,7 +119,7 @@ int null_init_zoned_dev(struct nullb_device *dev,
if (dev->zone_max_active && dev->zone_max_open > dev->zone_max_active) {
dev->zone_max_open = dev->zone_max_active;
pr_info("changed the maximum number of open zones to %u\n",
- dev->nr_zones);
+ dev->zone_max_open);
} else if (dev->zone_max_open >= dev->nr_zones - dev->zone_nr_conv) {
dev->zone_max_open = 0;
pr_info("zone_max_open limit disabled, limit >= zone count\n");
@@ -134,7 +145,7 @@ int null_init_zoned_dev(struct nullb_device *dev,
zone = &dev->zones[i];
null_init_zone_lock(dev, zone);
- zone->start = zone->wp = sector;
+ zone->start = sector;
if (zone->start + dev->zone_size_sects > dev_capacity_sects)
zone->len = dev_capacity_sects - zone->start;
else
@@ -142,12 +153,18 @@ int null_init_zoned_dev(struct nullb_device *dev,
zone->capacity =
min_t(sector_t, zone->len, zone_capacity_sects);
zone->type = BLK_ZONE_TYPE_SEQWRITE_REQ;
- zone->cond = BLK_ZONE_COND_EMPTY;
+ if (dev->zone_full) {
+ zone->cond = BLK_ZONE_COND_FULL;
+ zone->wp = zone->start + zone->capacity;
+ } else{
+ zone->cond = BLK_ZONE_COND_EMPTY;
+ zone->wp = zone->start;
+ }
sector += dev->zone_size_sects;
}
- lim->zoned = true;
+ lim->features |= BLK_FEAT_ZONED;
lim->chunk_sectors = dev->zone_size_sects;
lim->max_zone_append_sectors = dev->zone_append_max_sectors;
lim->max_open_zones = dev->zone_max_open;
@@ -160,9 +177,6 @@ int null_register_zoned_dev(struct nullb *nullb)
struct request_queue *q = nullb->q;
struct gendisk *disk = nullb->disk;
- blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q);
- disk->nr_zones = bdev_nr_zones(disk->part0);
-
pr_info("%s: using %s zone append\n",
disk->disk_name,
queue_emulates_zone_append(q) ? "emulated" : "native");
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 8a2ce8070010..7cece5884b9c 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -2622,6 +2622,7 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
struct queue_limits lim = {
.max_hw_sectors = PACKET_MAX_SECTORS,
.logical_block_size = CD_FRAMESIZE,
+ .features = BLK_FEAT_ROTATIONAL,
};
int idx;
int ret = -ENOMEM;
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c
index b810ac0a5c4b..ff45ed766469 100644
--- a/drivers/block/ps3disk.c
+++ b/drivers/block/ps3disk.c
@@ -388,9 +388,9 @@ static int ps3disk_probe(struct ps3_system_bus_device *_dev)
.max_segments = -1,
.max_segment_size = dev->bounce_size,
.dma_alignment = dev->blk_size - 1,
+ .features = BLK_FEAT_WRITE_CACHE |
+ BLK_FEAT_ROTATIONAL,
};
-
- struct request_queue *queue;
struct gendisk *gendisk;
if (dev->blk_size < 512) {
@@ -447,10 +447,6 @@ static int ps3disk_probe(struct ps3_system_bus_device *_dev)
goto fail_free_tag_set;
}
- queue = gendisk->queue;
-
- blk_queue_write_cache(queue, true, false);
-
priv->gendisk = gendisk;
gendisk->major = ps3disk_major;
gendisk->first_minor = devidx * PS3DISK_MINORS;
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 26ff5cd2bf0a..008e850555f4 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -4949,14 +4949,12 @@ static const struct blk_mq_ops rbd_mq_ops = {
static int rbd_init_disk(struct rbd_device *rbd_dev)
{
struct gendisk *disk;
- struct request_queue *q;
unsigned int objset_bytes =
rbd_dev->layout.object_size * rbd_dev->layout.stripe_count;
struct queue_limits lim = {
.max_hw_sectors = objset_bytes >> SECTOR_SHIFT,
- .max_user_sectors = objset_bytes >> SECTOR_SHIFT,
+ .io_opt = objset_bytes,
.io_min = rbd_dev->opts->alloc_size,
- .io_opt = rbd_dev->opts->alloc_size,
.max_segments = USHRT_MAX,
.max_segment_size = UINT_MAX,
};
@@ -4980,12 +4978,14 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
lim.max_write_zeroes_sectors = objset_bytes >> SECTOR_SHIFT;
}
+ if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC))
+ lim.features |= BLK_FEAT_STABLE_WRITES;
+
disk = blk_mq_alloc_disk(&rbd_dev->tag_set, &lim, rbd_dev);
if (IS_ERR(disk)) {
err = PTR_ERR(disk);
goto out_tag_set;
}
- q = disk->queue;
snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
rbd_dev->dev_id);
@@ -4997,13 +4997,6 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
disk->minors = RBD_MINORS_PER_MAJOR;
disk->fops = &rbd_bd_ops;
disk->private_data = rbd_dev;
-
- blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
- /* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */
-
- if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC))
- blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, q);
-
rbd_dev->disk = disk;
return 0;
diff --git a/drivers/block/rnbd/rnbd-clt-sysfs.c b/drivers/block/rnbd/rnbd-clt-sysfs.c
index 39887556cf95..6ea7c12e3a87 100644
--- a/drivers/block/rnbd/rnbd-clt-sysfs.c
+++ b/drivers/block/rnbd/rnbd-clt-sysfs.c
@@ -475,7 +475,7 @@ void rnbd_clt_remove_dev_symlink(struct rnbd_clt_dev *dev)
}
}
-static struct kobj_type rnbd_dev_ktype = {
+static const struct kobj_type rnbd_dev_ktype = {
.sysfs_ops = &kobj_sysfs_ops,
.default_groups = rnbd_dev_groups,
};
diff --git a/drivers/block/rnbd/rnbd-clt.c b/drivers/block/rnbd/rnbd-clt.c
index b7ffe03c6160..c34695d2eea7 100644
--- a/drivers/block/rnbd/rnbd-clt.c
+++ b/drivers/block/rnbd/rnbd-clt.c
@@ -1352,10 +1352,6 @@ static int rnbd_clt_setup_gen_disk(struct rnbd_clt_dev *dev,
if (dev->access_mode == RNBD_ACCESS_RO)
set_disk_ro(dev->gd, true);
- /*
- * Network device does not need rotational
- */
- blk_queue_flag_set(QUEUE_FLAG_NONROT, dev->queue);
err = add_disk(dev->gd);
if (err)
put_disk(dev->gd);
@@ -1389,18 +1385,18 @@ static int rnbd_client_setup_device(struct rnbd_clt_dev *dev,
le32_to_cpu(rsp->max_discard_sectors);
}
+ if (rsp->cache_policy & RNBD_WRITEBACK) {
+ lim.features |= BLK_FEAT_WRITE_CACHE;
+ if (rsp->cache_policy & RNBD_FUA)
+ lim.features |= BLK_FEAT_FUA;
+ }
+
dev->gd = blk_mq_alloc_disk(&dev->sess->tag_set, &lim, dev);
if (IS_ERR(dev->gd))
return PTR_ERR(dev->gd);
dev->queue = dev->gd->queue;
rnbd_init_mq_hw_queues(dev);
- blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, dev->queue);
- blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, dev->queue);
- blk_queue_write_cache(dev->queue,
- !!(rsp->cache_policy & RNBD_WRITEBACK),
- !!(rsp->cache_policy & RNBD_FUA));
-
return rnbd_clt_setup_gen_disk(dev, rsp, idx);
}
diff --git a/drivers/block/rnbd/rnbd-srv-sysfs.c b/drivers/block/rnbd/rnbd-srv-sysfs.c
index cba6ba43c2c2..64780094442c 100644
--- a/drivers/block/rnbd/rnbd-srv-sysfs.c
+++ b/drivers/block/rnbd/rnbd-srv-sysfs.c
@@ -33,7 +33,7 @@ static void rnbd_srv_dev_release(struct kobject *kobj)
kfree(dev);
}
-static struct kobj_type dev_ktype = {
+static const struct kobj_type dev_ktype = {
.sysfs_ops = &kobj_sysfs_ops,
.release = rnbd_srv_dev_release
};
@@ -184,7 +184,7 @@ static void rnbd_srv_sess_dev_release(struct kobject *kobj)
rnbd_destroy_sess_dev(sess_dev, sess_dev->keep_id);
}
-static struct kobj_type rnbd_srv_sess_dev_ktype = {
+static const struct kobj_type rnbd_srv_sess_dev_ktype = {
.sysfs_ops = &kobj_sysfs_ops,
.release = rnbd_srv_sess_dev_release,
};
diff --git a/drivers/block/rnull.rs b/drivers/block/rnull.rs
new file mode 100644
index 000000000000..b0227cf9ddd3
--- /dev/null
+++ b/drivers/block/rnull.rs
@@ -0,0 +1,73 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! This is a Rust implementation of the C null block driver.
+//!
+//! Supported features:
+//!
+//! - blk-mq interface
+//! - direct completion
+//! - block size 4k
+//!
+//! The driver is not configurable.
+
+use kernel::{
+ alloc::flags,
+ block::mq::{
+ self,
+ gen_disk::{self, GenDisk},
+ Operations, TagSet,
+ },
+ error::Result,
+ new_mutex, pr_info,
+ prelude::*,
+ sync::{Arc, Mutex},
+ types::ARef,
+};
+
+module! {
+ type: NullBlkModule,
+ name: "rnull_mod",
+ author: "Andreas Hindborg",
+ license: "GPL v2",
+}
+
+struct NullBlkModule {
+ _disk: Pin<Box<Mutex<GenDisk<NullBlkDevice>>>>,
+}
+
+impl kernel::Module for NullBlkModule {
+ fn init(_module: &'static ThisModule) -> Result<Self> {
+ pr_info!("Rust null_blk loaded\n");
+ let tagset = Arc::pin_init(TagSet::new(1, 256, 1), flags::GFP_KERNEL)?;
+
+ let disk = gen_disk::GenDiskBuilder::new()
+ .capacity_sectors(4096 << 11)
+ .logical_block_size(4096)?
+ .physical_block_size(4096)?
+ .rotational(false)
+ .build(format_args!("rnullb{}", 0), tagset)?;
+
+ let disk = Box::pin_init(new_mutex!(disk, "nullb:disk"), flags::GFP_KERNEL)?;
+
+ Ok(Self { _disk: disk })
+ }
+}
+
+struct NullBlkDevice;
+
+#[vtable]
+impl Operations for NullBlkDevice {
+ #[inline(always)]
+ fn queue_rq(rq: ARef<mq::Request<Self>>, _is_last: bool) -> Result {
+ mq::Request::end_ok(rq)
+ .map_err(|_e| kernel::error::code::EIO)
+ // We take no refcounts on the request, so we expect to be able to
+ // end the request. The request reference must be unique at this
+ // point, and so `end_ok` cannot fail.
+ .expect("Fatal error - expected to be able to end request");
+
+ Ok(())
+ }
+
+ fn commit_rqs() {}
+}
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
index 5286cb8e0824..2d38331ee667 100644
--- a/drivers/block/sunvdc.c
+++ b/drivers/block/sunvdc.c
@@ -791,6 +791,7 @@ static int probe_disk(struct vdc_port *port)
.seg_boundary_mask = PAGE_SIZE - 1,
.max_segment_size = PAGE_SIZE,
.max_segments = port->ring_cookies,
+ .features = BLK_FEAT_ROTATIONAL,
};
struct request_queue *q;
struct gendisk *g;
diff --git a/drivers/block/swim.c b/drivers/block/swim.c
index 6731678f3a41..126f151c4f2c 100644
--- a/drivers/block/swim.c
+++ b/drivers/block/swim.c
@@ -787,6 +787,9 @@ static void swim_cleanup_floppy_disk(struct floppy_state *fs)
static int swim_floppy_init(struct swim_priv *swd)
{
+ struct queue_limits lim = {
+ .features = BLK_FEAT_ROTATIONAL,
+ };
int err;
int drive;
struct swim __iomem *base = swd->base;
@@ -820,7 +823,7 @@ static int swim_floppy_init(struct swim_priv *swd)
goto exit_put_disks;
swd->unit[drive].disk =
- blk_mq_alloc_disk(&swd->unit[drive].tag_set, NULL,
+ blk_mq_alloc_disk(&swd->unit[drive].tag_set, &lim,
&swd->unit[drive]);
if (IS_ERR(swd->unit[drive].disk)) {
blk_mq_free_tag_set(&swd->unit[drive].tag_set);
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index a04756ac778e..90be1017f7bf 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -1189,6 +1189,9 @@ static int swim3_add_device(struct macio_dev *mdev, int index)
static int swim3_attach(struct macio_dev *mdev,
const struct of_device_id *match)
{
+ struct queue_limits lim = {
+ .features = BLK_FEAT_ROTATIONAL,
+ };
struct floppy_state *fs;
struct gendisk *disk;
int rc;
@@ -1210,7 +1213,7 @@ static int swim3_attach(struct macio_dev *mdev,
if (rc)
goto out_unregister;
- disk = blk_mq_alloc_disk(&fs->tag_set, NULL, fs);
+ disk = blk_mq_alloc_disk(&fs->tag_set, &lim, fs);
if (IS_ERR(disk)) {
rc = PTR_ERR(disk);
goto out_free_tag_set;
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index 4e159948c912..6fb799ec0d88 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -248,8 +248,6 @@ static int ublk_dev_param_zoned_validate(const struct ublk_device *ub)
static void ublk_dev_param_zoned_apply(struct ublk_device *ub)
{
- blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, ub->ub_disk->queue);
-
ub->ub_disk->nr_zones = ublk_get_nr_zones(ub);
}
@@ -484,16 +482,8 @@ static inline unsigned ublk_pos_to_tag(loff_t pos)
static void ublk_dev_param_basic_apply(struct ublk_device *ub)
{
- struct request_queue *q = ub->ub_disk->queue;
const struct ublk_param_basic *p = &ub->params.basic;
- blk_queue_write_cache(q, p->attrs & UBLK_ATTR_VOLATILE_CACHE,
- p->attrs & UBLK_ATTR_FUA);
- if (p->attrs & UBLK_ATTR_ROTATIONAL)
- blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
- else
- blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
-
if (p->attrs & UBLK_ATTR_READ_ONLY)
set_disk_ro(ub->ub_disk, true);
@@ -2204,12 +2194,21 @@ static int ublk_ctrl_start_dev(struct ublk_device *ub, struct io_uring_cmd *cmd)
if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED))
return -EOPNOTSUPP;
- lim.zoned = true;
+ lim.features |= BLK_FEAT_ZONED;
lim.max_active_zones = p->max_active_zones;
lim.max_open_zones = p->max_open_zones;
lim.max_zone_append_sectors = p->max_zone_append_sectors;
}
+ if (ub->params.basic.attrs & UBLK_ATTR_VOLATILE_CACHE) {
+ lim.features |= BLK_FEAT_WRITE_CACHE;
+ if (ub->params.basic.attrs & UBLK_ATTR_FUA)
+ lim.features |= BLK_FEAT_FUA;
+ }
+
+ if (ub->params.basic.attrs & UBLK_ATTR_ROTATIONAL)
+ lim.features |= BLK_FEAT_ROTATIONAL;
+
if (wait_for_completion_interruptible(&ub->completion) != 0)
return -EINTR;
@@ -3017,4 +3016,5 @@ module_param_cb(ublks_max, &ublk_max_ublks_ops, &ublks_max, 0644);
MODULE_PARM_DESC(ublks_max, "max number of ublk devices allowed to add(default: 64)");
MODULE_AUTHOR("Ming Lei <ming.lei@redhat.com>");
+MODULE_DESCRIPTION("Userspace block device");
MODULE_LICENSE("GPL");
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 2351f411fa46..e3147a611151 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -728,7 +728,7 @@ static int virtblk_read_zoned_limits(struct virtio_blk *vblk,
dev_dbg(&vdev->dev, "probing host-managed zoned device\n");
- lim->zoned = true;
+ lim->features |= BLK_FEAT_ZONED;
virtio_cread(vdev, struct virtio_blk_config,
zoned.max_open_zones, &v);
@@ -1089,14 +1089,6 @@ static int virtblk_get_cache_mode(struct virtio_device *vdev)
return writeback;
}
-static void virtblk_update_cache_mode(struct virtio_device *vdev)
-{
- u8 writeback = virtblk_get_cache_mode(vdev);
- struct virtio_blk *vblk = vdev->priv;
-
- blk_queue_write_cache(vblk->disk->queue, writeback, false);
-}
-
static const char *const virtblk_cache_types[] = {
"write through", "write back"
};
@@ -1108,6 +1100,7 @@ cache_type_store(struct device *dev, struct device_attribute *attr,
struct gendisk *disk = dev_to_disk(dev);
struct virtio_blk *vblk = disk->private_data;
struct virtio_device *vdev = vblk->vdev;
+ struct queue_limits lim;
int i;
BUG_ON(!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_CONFIG_WCE));
@@ -1116,7 +1109,17 @@ cache_type_store(struct device *dev, struct device_attribute *attr,
return i;
virtio_cwrite8(vdev, offsetof(struct virtio_blk_config, wce), i);
- virtblk_update_cache_mode(vdev);
+
+ lim = queue_limits_start_update(disk->queue);
+ if (virtblk_get_cache_mode(vdev))
+ lim.features |= BLK_FEAT_WRITE_CACHE;
+ else
+ lim.features &= ~BLK_FEAT_WRITE_CACHE;
+ blk_mq_freeze_queue(disk->queue);
+ i = queue_limits_commit_update(disk->queue, &lim);
+ blk_mq_unfreeze_queue(disk->queue);
+ if (i)
+ return i;
return count;
}
@@ -1247,7 +1250,7 @@ static int virtblk_read_limits(struct virtio_blk *vblk,
struct queue_limits *lim)
{
struct virtio_device *vdev = vblk->vdev;
- u32 v, blk_size, max_size, sg_elems, opt_io_size;
+ u32 v, max_size, sg_elems, opt_io_size;
u32 max_discard_segs = 0;
u32 discard_granularity = 0;
u16 min_io_size;
@@ -1286,46 +1289,36 @@ static int virtblk_read_limits(struct virtio_blk *vblk,
lim->max_segment_size = max_size;
/* Host can optionally specify the block size of the device */
- err = virtio_cread_feature(vdev, VIRTIO_BLK_F_BLK_SIZE,
+ virtio_cread_feature(vdev, VIRTIO_BLK_F_BLK_SIZE,
struct virtio_blk_config, blk_size,
- &blk_size);
- if (!err) {
- err = blk_validate_block_size(blk_size);
- if (err) {
- dev_err(&vdev->dev,
- "virtio_blk: invalid block size: 0x%x\n",
- blk_size);
- return err;
- }
-
- lim->logical_block_size = blk_size;
- } else
- blk_size = lim->logical_block_size;
+ &lim->logical_block_size);
/* Use topology information if available */
err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
struct virtio_blk_config, physical_block_exp,
&physical_block_exp);
if (!err && physical_block_exp)
- lim->physical_block_size = blk_size * (1 << physical_block_exp);
+ lim->physical_block_size =
+ lim->logical_block_size * (1 << physical_block_exp);
err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
struct virtio_blk_config, alignment_offset,
&alignment_offset);
if (!err && alignment_offset)
- lim->alignment_offset = blk_size * alignment_offset;
+ lim->alignment_offset =
+ lim->logical_block_size * alignment_offset;
err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
struct virtio_blk_config, min_io_size,
&min_io_size);
if (!err && min_io_size)
- lim->io_min = blk_size * min_io_size;
+ lim->io_min = lim->logical_block_size * min_io_size;
err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
struct virtio_blk_config, opt_io_size,
&opt_io_size);
if (!err && opt_io_size)
- lim->io_opt = blk_size * opt_io_size;
+ lim->io_opt = lim->logical_block_size * opt_io_size;
if (virtio_has_feature(vdev, VIRTIO_BLK_F_DISCARD)) {
virtio_cread(vdev, struct virtio_blk_config,
@@ -1419,7 +1412,7 @@ static int virtblk_read_limits(struct virtio_blk *vblk,
lim->discard_granularity =
discard_granularity << SECTOR_SHIFT;
else
- lim->discard_granularity = blk_size;
+ lim->discard_granularity = lim->logical_block_size;
}
if (virtio_has_feature(vdev, VIRTIO_BLK_F_ZONED)) {
@@ -1448,7 +1441,10 @@ static int virtblk_read_limits(struct virtio_blk *vblk,
static int virtblk_probe(struct virtio_device *vdev)
{
struct virtio_blk *vblk;
- struct queue_limits lim = { };
+ struct queue_limits lim = {
+ .features = BLK_FEAT_ROTATIONAL,
+ .logical_block_size = SECTOR_SIZE,
+ };
int err, index;
unsigned int queue_depth;
@@ -1512,6 +1508,9 @@ static int virtblk_probe(struct virtio_device *vdev)
if (err)
goto out_free_tags;
+ if (virtblk_get_cache_mode(vdev))
+ lim.features |= BLK_FEAT_WRITE_CACHE;
+
vblk->disk = blk_mq_alloc_disk(&vblk->tag_set, &lim, vblk);
if (IS_ERR(vblk->disk)) {
err = PTR_ERR(vblk->disk);
@@ -1527,9 +1526,6 @@ static int virtblk_probe(struct virtio_device *vdev)
vblk->disk->fops = &virtblk_fops;
vblk->index = index;
- /* configure queue flush support */
- virtblk_update_cache_mode(vdev);
-
/* If disk is read-only in the host, the guest should obey */
if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO))
set_disk_ro(vblk->disk, 1);
@@ -1541,8 +1537,8 @@ static int virtblk_probe(struct virtio_device *vdev)
* All steps that follow use the VQs therefore they need to be
* placed after the virtio_device_ready() call above.
*/
- if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) && lim.zoned) {
- blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, vblk->disk->queue);
+ if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
+ (lim.features & BLK_FEAT_ZONED)) {
err = blk_revalidate_disk_zones(vblk->disk);
if (err)
goto out_cleanup_disk;
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index 944576d582fb..838064593f62 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -1563,5 +1563,6 @@ static void __exit xen_blkif_fini(void)
module_exit(xen_blkif_fini);
+MODULE_DESCRIPTION("Virtual block device back-end driver");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_ALIAS("xen-backend:vbd");
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index fd7c0ff2139c..59ce113b882a 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -788,6 +788,11 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
* A barrier request a superset of FUA, so we can
* implement it the same way. (It's also a FLUSH+FUA,
* since it is guaranteed ordered WRT previous writes.)
+ *
+ * Note that can end up here with a FUA write and the
+ * flags cleared. This happens when the flag was
+ * run-time disabled after a failing I/O, and we'll
+ * simplify submit it as a normal write.
*/
if (info->feature_flush && info->feature_fua)
ring_req->operation =
@@ -795,8 +800,6 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
else if (info->feature_flush)
ring_req->operation =
BLKIF_OP_FLUSH_DISKCACHE;
- else
- ring_req->operation = 0;
}
ring_req->u.rw.nr_segments = num_grant;
if (unlikely(require_extra_req)) {
@@ -887,16 +890,6 @@ static inline void flush_requests(struct blkfront_ring_info *rinfo)
notify_remote_via_irq(rinfo->irq);
}
-static inline bool blkif_request_flush_invalid(struct request *req,
- struct blkfront_info *info)
-{
- return (blk_rq_is_passthrough(req) ||
- ((req_op(req) == REQ_OP_FLUSH) &&
- !info->feature_flush) ||
- ((req->cmd_flags & REQ_FUA) &&
- !info->feature_fua));
-}
-
static blk_status_t blkif_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *qd)
{
@@ -908,12 +901,22 @@ static blk_status_t blkif_queue_rq(struct blk_mq_hw_ctx *hctx,
rinfo = get_rinfo(info, qid);
blk_mq_start_request(qd->rq);
spin_lock_irqsave(&rinfo->ring_lock, flags);
- if (RING_FULL(&rinfo->ring))
- goto out_busy;
- if (blkif_request_flush_invalid(qd->rq, rinfo->dev_info))
- goto out_err;
+ /*
+ * Check if the backend actually supports flushes.
+ *
+ * While the block layer won't send us flushes if we don't claim to
+ * support them, the Xen protocol allows the backend to revoke support
+ * at any time. That is of course a really bad idea and dangerous, but
+ * has been allowed for 10+ years. In that case we simply clear the
+ * flags, and directly return here for an empty flush and ignore the
+ * FUA flag later on.
+ */
+ if (unlikely(req_op(qd->rq) == REQ_OP_FLUSH && !info->feature_flush))
+ goto complete;
+ if (RING_FULL(&rinfo->ring))
+ goto out_busy;
if (blkif_queue_request(qd->rq, rinfo))
goto out_busy;
@@ -921,14 +924,14 @@ static blk_status_t blkif_queue_rq(struct blk_mq_hw_ctx *hctx,
spin_unlock_irqrestore(&rinfo->ring_lock, flags);
return BLK_STS_OK;
-out_err:
- spin_unlock_irqrestore(&rinfo->ring_lock, flags);
- return BLK_STS_IOERR;
-
out_busy:
blk_mq_stop_hw_queue(hctx);
spin_unlock_irqrestore(&rinfo->ring_lock, flags);
return BLK_STS_DEV_RESOURCE;
+complete:
+ spin_unlock_irqrestore(&rinfo->ring_lock, flags);
+ blk_mq_end_request(qd->rq, BLK_STS_OK);
+ return BLK_STS_OK;
}
static void blkif_complete_rq(struct request *rq)
@@ -956,6 +959,12 @@ static void blkif_set_queue_limits(const struct blkfront_info *info,
lim->max_secure_erase_sectors = UINT_MAX;
}
+ if (info->feature_flush) {
+ lim->features |= BLK_FEAT_WRITE_CACHE;
+ if (info->feature_fua)
+ lim->features |= BLK_FEAT_FUA;
+ }
+
/* Hard sector size and max sectors impersonate the equiv. hardware. */
lim->logical_block_size = info->sector_size;
lim->physical_block_size = info->physical_sector_size;
@@ -984,8 +993,6 @@ static const char *flush_info(struct blkfront_info *info)
static void xlvbd_flush(struct blkfront_info *info)
{
- blk_queue_write_cache(info->rq, info->feature_flush ? true : false,
- info->feature_fua ? true : false);
pr_info("blkfront: %s: %s %s %s %s %s %s %s\n",
info->gd->disk_name, flush_info(info),
"persistent grants:", info->feature_persistent ?
@@ -1063,8 +1070,7 @@ static char *encode_disk_name(char *ptr, unsigned int n)
}
static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
- struct blkfront_info *info, u16 sector_size,
- unsigned int physical_sector_size)
+ struct blkfront_info *info)
{
struct queue_limits lim = {};
struct gendisk *gd;
@@ -1139,7 +1145,6 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
err = PTR_ERR(gd);
goto out_free_tag_set;
}
- blk_queue_flag_set(QUEUE_FLAG_VIRT, gd->queue);
strcpy(gd->disk_name, DEV_NAME);
ptr = encode_disk_name(gd->disk_name + sizeof(DEV_NAME) - 1, offset);
@@ -1159,8 +1164,6 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
info->rq = gd->queue;
info->gd = gd;
- info->sector_size = sector_size;
- info->physical_sector_size = physical_sector_size;
xlvbd_flush(info);
@@ -1605,8 +1608,8 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
blkif_req(req)->error = BLK_STS_NOTSUPP;
info->feature_discard = 0;
info->feature_secdiscard = 0;
- blk_queue_max_discard_sectors(rq, 0);
- blk_queue_max_secure_erase_sectors(rq, 0);
+ blk_queue_disable_discard(rq);
+ blk_queue_disable_secure_erase(rq);
}
break;
case BLKIF_OP_FLUSH_DISKCACHE:
@@ -1627,7 +1630,6 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
blkif_req(req)->error = BLK_STS_OK;
info->feature_fua = 0;
info->feature_flush = 0;
- xlvbd_flush(info);
}
fallthrough;
case BLKIF_OP_READ:
@@ -2315,8 +2317,6 @@ static void blkfront_gather_backend_features(struct blkfront_info *info)
static void blkfront_connect(struct blkfront_info *info)
{
unsigned long long sectors;
- unsigned long sector_size;
- unsigned int physical_sector_size;
int err, i;
struct blkfront_ring_info *rinfo;
@@ -2355,7 +2355,7 @@ static void blkfront_connect(struct blkfront_info *info)
err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
"sectors", "%llu", &sectors,
"info", "%u", &info->vdisk_info,
- "sector-size", "%lu", &sector_size,
+ "sector-size", "%lu", &info->sector_size,
NULL);
if (err) {
xenbus_dev_fatal(info->xbdev, err,
@@ -2369,9 +2369,9 @@ static void blkfront_connect(struct blkfront_info *info)
* provide this. Assume physical sector size to be the same as
* sector_size in that case.
*/
- physical_sector_size = xenbus_read_unsigned(info->xbdev->otherend,
+ info->physical_sector_size = xenbus_read_unsigned(info->xbdev->otherend,
"physical-sector-size",
- sector_size);
+ info->sector_size);
blkfront_gather_backend_features(info);
for_each_rinfo(info, rinfo, i) {
err = blkfront_setup_indirect(rinfo);
@@ -2383,8 +2383,7 @@ static void blkfront_connect(struct blkfront_info *info)
}
}
- err = xlvbd_alloc_gendisk(sectors, info, sector_size,
- physical_sector_size);
+ err = xlvbd_alloc_gendisk(sectors, info);
if (err) {
xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s",
info->xbdev->otherend);
diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c
index 7c5f4e4d9b50..4b7219be1bb8 100644
--- a/drivers/block/z2ram.c
+++ b/drivers/block/z2ram.c
@@ -409,4 +409,5 @@ static void __exit z2_exit(void)
module_init(z2_init);
module_exit(z2_exit);
+MODULE_DESCRIPTION("Amiga Zorro II ramdisk driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 3acd7006ad2c..efcb8d9d274c 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -2208,6 +2208,8 @@ static int zram_add(void)
#if ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE
.max_write_zeroes_sectors = UINT_MAX,
#endif
+ .features = BLK_FEAT_STABLE_WRITES |
+ BLK_FEAT_SYNCHRONOUS,
};
struct zram *zram;
int ret, device_id;
@@ -2245,10 +2247,6 @@ static int zram_add(void)
/* Actual capacity set using sysfs (/sys/block/zram<id>/disksize */
set_capacity(zram->disk, 0);
- /* zram devices sort of resembles non-rotational disks */
- blk_queue_flag_set(QUEUE_FLAG_NONROT, zram->disk->queue);
- blk_queue_flag_set(QUEUE_FLAG_SYNCHRONOUS, zram->disk->queue);
- blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, zram->disk->queue);
ret = device_add_disk(NULL, zram->disk, zram_disk_groups);
if (ret)
goto out_cleanup_disk;
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index 0b5f218ac505..90a94a111e67 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -105,6 +105,7 @@ config BT_HCIUART
tristate "HCI UART driver"
depends on SERIAL_DEV_BUS || !SERIAL_DEV_BUS
depends on NVMEM || !NVMEM
+ depends on POWER_SEQUENCING || !POWER_SEQUENCING
depends on TTY
help
Bluetooth HCI UART driver.
@@ -287,12 +288,12 @@ config BT_HCIBCM203X
config BT_HCIBCM4377
- tristate "HCI BCM4377/4378/4387 PCIe driver"
+ tristate "HCI BCM4377/4378/4387/4388 PCIe driver"
depends on PCI
select FW_LOADER
help
- Support for Broadcom BCM4377/4378/4387 Bluetooth chipsets attached via
- PCIe. These are usually found in Apple machines.
+ Support for Broadcom BCM4377/4378/4387/4388 Bluetooth chipsets
+ attached via PCIe. These are usually found in Apple machines.
Say Y here to compile support for HCI BCM4377 family devices into the
kernel or say M to compile it as module (hci_bcm4377).
diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c
index 0c855c3ee1c1..e7a612504ab1 100644
--- a/drivers/bluetooth/btintel.c
+++ b/drivers/bluetooth/btintel.c
@@ -26,21 +26,11 @@
#define ECDSA_OFFSET 644
#define ECDSA_HEADER_LEN 320
-#define BTINTEL_PPAG_NAME "PPAG"
-
enum {
DSM_SET_WDISABLE2_DELAY = 1,
DSM_SET_RESET_METHOD = 3,
};
-/* structure to store the PPAG data read from ACPI table */
-struct btintel_ppag {
- u32 domain;
- u32 mode;
- acpi_status status;
- struct hci_dev *hdev;
-};
-
#define CMD_WRITE_BOOT_PARAMS 0xfc0e
struct cmd_write_boot_params {
__le32 boot_addr;
@@ -482,6 +472,7 @@ int btintel_version_info_tlv(struct hci_dev *hdev,
case 0x19: /* Slr-F */
case 0x1b: /* Mgr */
case 0x1c: /* Gale Peak (GaP) */
+ case 0x1d: /* BlazarU (BzrU) */
case 0x1e: /* BlazarI (Bzr) */
break;
default:
@@ -641,6 +632,10 @@ int btintel_parse_version_tlv(struct hci_dev *hdev,
case INTEL_TLV_GIT_SHA1:
version->git_sha1 = get_unaligned_le32(tlv->val);
break;
+ case INTEL_TLV_FW_ID:
+ snprintf(version->fw_id, sizeof(version->fw_id),
+ "%s", tlv->val);
+ break;
default:
/* Ignore rest of information */
break;
@@ -1324,65 +1319,6 @@ static int btintel_read_debug_features(struct hci_dev *hdev,
return 0;
}
-static acpi_status btintel_ppag_callback(acpi_handle handle, u32 lvl, void *data,
- void **ret)
-{
- acpi_status status;
- size_t len;
- struct btintel_ppag *ppag = data;
- union acpi_object *p, *elements;
- struct acpi_buffer string = {ACPI_ALLOCATE_BUFFER, NULL};
- struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
- struct hci_dev *hdev = ppag->hdev;
-
- status = acpi_get_name(handle, ACPI_FULL_PATHNAME, &string);
- if (ACPI_FAILURE(status)) {
- bt_dev_warn(hdev, "PPAG-BT: ACPI Failure: %s", acpi_format_exception(status));
- return status;
- }
-
- len = strlen(string.pointer);
- if (len < strlen(BTINTEL_PPAG_NAME)) {
- kfree(string.pointer);
- return AE_OK;
- }
-
- if (strncmp((char *)string.pointer + len - 4, BTINTEL_PPAG_NAME, 4)) {
- kfree(string.pointer);
- return AE_OK;
- }
- kfree(string.pointer);
-
- status = acpi_evaluate_object(handle, NULL, NULL, &buffer);
- if (ACPI_FAILURE(status)) {
- ppag->status = status;
- bt_dev_warn(hdev, "PPAG-BT: ACPI Failure: %s", acpi_format_exception(status));
- return status;
- }
-
- p = buffer.pointer;
- ppag = (struct btintel_ppag *)data;
-
- if (p->type != ACPI_TYPE_PACKAGE || p->package.count != 2) {
- kfree(buffer.pointer);
- bt_dev_warn(hdev, "PPAG-BT: Invalid object type: %d or package count: %d",
- p->type, p->package.count);
- ppag->status = AE_ERROR;
- return AE_ERROR;
- }
-
- elements = p->package.elements;
-
- /* PPAG table is located at element[1] */
- p = &elements[1];
-
- ppag->domain = (u32)p->package.elements[0].integer.value;
- ppag->mode = (u32)p->package.elements[1].integer.value;
- ppag->status = AE_OK;
- kfree(buffer.pointer);
- return AE_CTRL_TERMINATE;
-}
-
static int btintel_set_debug_features(struct hci_dev *hdev,
const struct intel_debug_features *features)
{
@@ -2202,30 +2138,61 @@ static void btintel_get_fw_name_tlv(const struct intel_version_tlv *ver,
const char *suffix)
{
const char *format;
- /* The firmware file name for new generation controllers will be
- * ibt-<cnvi_top type+cnvi_top step>-<cnvr_top type+cnvr_top step>
- */
- switch (ver->cnvi_top & 0xfff) {
+ u32 cnvi, cnvr;
+
+ cnvi = INTEL_CNVX_TOP_PACK_SWAB(INTEL_CNVX_TOP_TYPE(ver->cnvi_top),
+ INTEL_CNVX_TOP_STEP(ver->cnvi_top));
+
+ cnvr = INTEL_CNVX_TOP_PACK_SWAB(INTEL_CNVX_TOP_TYPE(ver->cnvr_top),
+ INTEL_CNVX_TOP_STEP(ver->cnvr_top));
+
/* Only Blazar product supports downloading of intermediate loader
* image
*/
- case BTINTEL_CNVI_BLAZARI:
- if (ver->img_type == BTINTEL_IMG_BOOTLOADER)
+ if (INTEL_HW_VARIANT(ver->cnvi_bt) >= 0x1e) {
+ u8 zero[BTINTEL_FWID_MAXLEN];
+
+ if (ver->img_type == BTINTEL_IMG_BOOTLOADER) {
format = "intel/ibt-%04x-%04x-iml.%s";
- else
- format = "intel/ibt-%04x-%04x.%s";
- break;
- default:
- format = "intel/ibt-%04x-%04x.%s";
- break;
+ snprintf(fw_name, len, format, cnvi, cnvr, suffix);
+ return;
+ }
+
+ memset(zero, 0, sizeof(zero));
+
+ /* ibt-<cnvi_top type+cnvi_top step>-<cnvr_top type+cnvr_top step-fw_id> */
+ if (memcmp(ver->fw_id, zero, sizeof(zero))) {
+ format = "intel/ibt-%04x-%04x-%s.%s";
+ snprintf(fw_name, len, format, cnvi, cnvr,
+ ver->fw_id, suffix);
+ return;
+ }
+ /* If firmware id is not present, fallback to legacy naming
+ * convention
+ */
}
+ /* Fallback to legacy naming convention for other controllers
+ * ibt-<cnvi_top type+cnvi_top step>-<cnvr_top type+cnvr_top step>
+ */
+ format = "intel/ibt-%04x-%04x.%s";
+ snprintf(fw_name, len, format, cnvi, cnvr, suffix);
+}
- snprintf(fw_name, len, format,
- INTEL_CNVX_TOP_PACK_SWAB(INTEL_CNVX_TOP_TYPE(ver->cnvi_top),
- INTEL_CNVX_TOP_STEP(ver->cnvi_top)),
- INTEL_CNVX_TOP_PACK_SWAB(INTEL_CNVX_TOP_TYPE(ver->cnvr_top),
- INTEL_CNVX_TOP_STEP(ver->cnvr_top)),
- suffix);
+static void btintel_get_iml_tlv(const struct intel_version_tlv *ver,
+ char *fw_name, size_t len,
+ const char *suffix)
+{
+ const char *format;
+ u32 cnvi, cnvr;
+
+ cnvi = INTEL_CNVX_TOP_PACK_SWAB(INTEL_CNVX_TOP_TYPE(ver->cnvi_top),
+ INTEL_CNVX_TOP_STEP(ver->cnvi_top));
+
+ cnvr = INTEL_CNVX_TOP_PACK_SWAB(INTEL_CNVX_TOP_TYPE(ver->cnvr_top),
+ INTEL_CNVX_TOP_STEP(ver->cnvr_top));
+
+ format = "intel/ibt-%04x-%04x-iml.%s";
+ snprintf(fw_name, len, format, cnvi, cnvr, suffix);
}
static int btintel_prepare_fw_download_tlv(struct hci_dev *hdev,
@@ -2233,7 +2200,7 @@ static int btintel_prepare_fw_download_tlv(struct hci_dev *hdev,
u32 *boot_param)
{
const struct firmware *fw;
- char fwname[64];
+ char fwname[128];
int err;
ktime_t calltime;
@@ -2268,7 +2235,20 @@ static int btintel_prepare_fw_download_tlv(struct hci_dev *hdev,
}
}
- btintel_get_fw_name_tlv(ver, fwname, sizeof(fwname), "sfi");
+ if (ver->img_type == BTINTEL_IMG_OP) {
+ /* Controller running OP image. In case of FW downgrade,
+ * FWID TLV may not be present and driver may attempt to load
+ * firmware image which doesn't exist. Lets compare the version
+ * of IML image
+ */
+ if (INTEL_HW_VARIANT(ver->cnvi_bt) >= 0x1e)
+ btintel_get_iml_tlv(ver, fwname, sizeof(fwname), "sfi");
+ else
+ btintel_get_fw_name_tlv(ver, fwname, sizeof(fwname), "sfi");
+ } else {
+ btintel_get_fw_name_tlv(ver, fwname, sizeof(fwname), "sfi");
+ }
+
err = firmware_request_nowarn(&fw, fwname, &hdev->dev);
if (err < 0) {
if (!btintel_test_flag(hdev, INTEL_BOOTLOADER)) {
@@ -2427,10 +2407,13 @@ error:
static void btintel_set_ppag(struct hci_dev *hdev, struct intel_version_tlv *ver)
{
- struct btintel_ppag ppag;
struct sk_buff *skb;
struct hci_ppag_enable_cmd ppag_cmd;
acpi_handle handle;
+ struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
+ union acpi_object *p, *elements;
+ u32 domain, mode;
+ acpi_status status;
/* PPAG is not supported if CRF is HrP2, Jfp2, JfP1 */
switch (ver->cnvr_top & 0xFFF) {
@@ -2448,22 +2431,34 @@ static void btintel_set_ppag(struct hci_dev *hdev, struct intel_version_tlv *ver
return;
}
- memset(&ppag, 0, sizeof(ppag));
-
- ppag.hdev = hdev;
- ppag.status = AE_NOT_FOUND;
- acpi_walk_namespace(ACPI_TYPE_PACKAGE, handle, 1, NULL,
- btintel_ppag_callback, &ppag, NULL);
-
- if (ACPI_FAILURE(ppag.status)) {
- if (ppag.status == AE_NOT_FOUND) {
+ status = acpi_evaluate_object(handle, "PPAG", NULL, &buffer);
+ if (ACPI_FAILURE(status)) {
+ if (status == AE_NOT_FOUND) {
bt_dev_dbg(hdev, "PPAG-BT: ACPI entry not found");
return;
}
+ bt_dev_warn(hdev, "PPAG-BT: ACPI Failure: %s", acpi_format_exception(status));
return;
}
- if (ppag.domain != 0x12) {
+ p = buffer.pointer;
+ if (p->type != ACPI_TYPE_PACKAGE || p->package.count != 2) {
+ bt_dev_warn(hdev, "PPAG-BT: Invalid object type: %d or package count: %d",
+ p->type, p->package.count);
+ kfree(buffer.pointer);
+ return;
+ }
+
+ elements = p->package.elements;
+
+ /* PPAG table is located at element[1] */
+ p = &elements[1];
+
+ domain = (u32)p->package.elements[0].integer.value;
+ mode = (u32)p->package.elements[1].integer.value;
+ kfree(buffer.pointer);
+
+ if (domain != 0x12) {
bt_dev_dbg(hdev, "PPAG-BT: Bluetooth domain is disabled in ACPI firmware");
return;
}
@@ -2474,19 +2469,22 @@ static void btintel_set_ppag(struct hci_dev *hdev, struct intel_version_tlv *ver
* BIT 1 : 0 Disabled in China
* 1 Enabled in China
*/
- if ((ppag.mode & 0x01) != BIT(0) && (ppag.mode & 0x02) != BIT(1)) {
- bt_dev_dbg(hdev, "PPAG-BT: EU, China mode are disabled in CB/BIOS");
+ mode &= 0x03;
+
+ if (!mode) {
+ bt_dev_dbg(hdev, "PPAG-BT: EU, China mode are disabled in BIOS");
return;
}
- ppag_cmd.ppag_enable_flags = cpu_to_le32(ppag.mode);
+ ppag_cmd.ppag_enable_flags = cpu_to_le32(mode);
- skb = __hci_cmd_sync(hdev, INTEL_OP_PPAG_CMD, sizeof(ppag_cmd), &ppag_cmd, HCI_CMD_TIMEOUT);
+ skb = __hci_cmd_sync(hdev, INTEL_OP_PPAG_CMD, sizeof(ppag_cmd),
+ &ppag_cmd, HCI_CMD_TIMEOUT);
if (IS_ERR(skb)) {
bt_dev_warn(hdev, "Failed to send PPAG Enable (%ld)", PTR_ERR(skb));
return;
}
- bt_dev_info(hdev, "PPAG-BT: Enabled (Mode %d)", ppag.mode);
+ bt_dev_info(hdev, "PPAG-BT: Enabled (Mode %d)", mode);
kfree_skb(skb);
}
@@ -2600,6 +2598,24 @@ static void btintel_set_dsm_reset_method(struct hci_dev *hdev,
data->acpi_reset_method = btintel_acpi_reset_method;
}
+#define BTINTEL_ISODATA_HANDLE_BASE 0x900
+
+static u8 btintel_classify_pkt_type(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ /*
+ * Distinguish ISO data packets form ACL data packets
+ * based on their connection handle value range.
+ */
+ if (hci_skb_pkt_type(skb) == HCI_ACLDATA_PKT) {
+ __u16 handle = __le16_to_cpu(hci_acl_hdr(skb)->handle);
+
+ if (hci_handle(handle) >= BTINTEL_ISODATA_HANDLE_BASE)
+ return HCI_ISODATA_PKT;
+ }
+
+ return hci_skb_pkt_type(skb);
+}
+
int btintel_bootloader_setup_tlv(struct hci_dev *hdev,
struct intel_version_tlv *ver)
{
@@ -2635,7 +2651,7 @@ int btintel_bootloader_setup_tlv(struct hci_dev *hdev,
return err;
/* If image type returned is BTINTEL_IMG_IML, then controller supports
- * intermediae loader image
+ * intermediate loader image
*/
if (ver->img_type == BTINTEL_IMG_IML) {
err = btintel_prepare_fw_download_tlv(hdev, ver, &boot_param);
@@ -2703,6 +2719,7 @@ void btintel_set_msft_opcode(struct hci_dev *hdev, u8 hw_variant)
case 0x19:
case 0x1b:
case 0x1c:
+ case 0x1d:
case 0x1e:
hci_set_msft_opcode(hdev, 0xFC1E);
break;
@@ -2713,7 +2730,7 @@ void btintel_set_msft_opcode(struct hci_dev *hdev, u8 hw_variant)
}
EXPORT_SYMBOL_GPL(btintel_set_msft_opcode);
-static void btintel_print_fseq_info(struct hci_dev *hdev)
+void btintel_print_fseq_info(struct hci_dev *hdev)
{
struct sk_buff *skb;
u8 *p;
@@ -2825,6 +2842,7 @@ static void btintel_print_fseq_info(struct hci_dev *hdev)
kfree_skb(skb);
}
+EXPORT_SYMBOL_GPL(btintel_print_fseq_info);
static int btintel_setup_combined(struct hci_dev *hdev)
{
@@ -3039,11 +3057,15 @@ static int btintel_setup_combined(struct hci_dev *hdev)
err = btintel_bootloader_setup(hdev, &ver);
btintel_register_devcoredump_support(hdev);
break;
+ case 0x18: /* GfP2 */
+ case 0x1c: /* GaP */
+ /* Re-classify packet type for controllers with LE audio */
+ hdev->classify_pkt_type = btintel_classify_pkt_type;
+ fallthrough;
case 0x17:
- case 0x18:
case 0x19:
case 0x1b:
- case 0x1c:
+ case 0x1d:
case 0x1e:
/* Display version information of TLV type */
btintel_version_info_tlv(hdev, &ver_tlv);
diff --git a/drivers/bluetooth/btintel.h b/drivers/bluetooth/btintel.h
index b5fea735e260..aa70e4c27416 100644
--- a/drivers/bluetooth/btintel.h
+++ b/drivers/bluetooth/btintel.h
@@ -42,7 +42,8 @@ enum {
INTEL_TLV_SBE_TYPE,
INTEL_TLV_OTP_BDADDR,
INTEL_TLV_UNLOCKED_STATE,
- INTEL_TLV_GIT_SHA1
+ INTEL_TLV_GIT_SHA1,
+ INTEL_TLV_FW_ID = 0x50
};
struct intel_tlv {
@@ -57,6 +58,8 @@ struct intel_tlv {
#define BTINTEL_IMG_IML 0x02 /* Intermediate image */
#define BTINTEL_IMG_OP 0x03 /* Operational image */
+#define BTINTEL_FWID_MAXLEN 64
+
struct intel_version_tlv {
u32 cnvi_top;
u32 cnvr_top;
@@ -77,6 +80,7 @@ struct intel_version_tlv {
u8 limited_cce;
u8 sbe_type;
u32 git_sha1;
+ u8 fw_id[BTINTEL_FWID_MAXLEN];
bdaddr_t otp_bd_addr;
};
@@ -244,6 +248,7 @@ int btintel_bootloader_setup_tlv(struct hci_dev *hdev,
struct intel_version_tlv *ver);
int btintel_shutdown_combined(struct hci_dev *hdev);
void btintel_hw_error(struct hci_dev *hdev, u8 code);
+void btintel_print_fseq_info(struct hci_dev *hdev);
#else
static inline int btintel_check_bdaddr(struct hci_dev *hdev)
@@ -373,4 +378,8 @@ static inline int btintel_shutdown_combined(struct hci_dev *hdev)
static inline void btintel_hw_error(struct hci_dev *hdev, u8 code)
{
}
+
+static inline void btintel_print_fseq_info(struct hci_dev *hdev)
+{
+}
#endif
diff --git a/drivers/bluetooth/btintel_pcie.c b/drivers/bluetooth/btintel_pcie.c
index 5b6805d87fcf..0d1a0415557b 100644
--- a/drivers/bluetooth/btintel_pcie.c
+++ b/drivers/bluetooth/btintel_pcie.c
@@ -382,7 +382,7 @@ static int btintel_pcie_recv_frame(struct btintel_pcie_data *data,
/* The first 4 bytes indicates the Intel PCIe specific packet type */
pdata = skb_pull_data(skb, BTINTEL_PCIE_HCI_TYPE_LEN);
- if (!data) {
+ if (!pdata) {
bt_dev_err(hdev, "Corrupted packet received");
ret = -EILSEQ;
goto exit_error;
@@ -797,7 +797,6 @@ static int btintel_pcie_setup_txq_bufs(struct btintel_pcie_data *data,
kfree(txq->bufs);
return -ENOMEM;
}
- memset(txq->buf_v_addr, 0, txq->count * BTINTEL_PCIE_BUFFER_SIZE);
/* Setup the allocated DMA buffer to bufs. Each data_buf should
* have virtual address and physical address
@@ -842,7 +841,6 @@ static int btintel_pcie_setup_rxq_bufs(struct btintel_pcie_data *data,
kfree(rxq->bufs);
return -ENOMEM;
}
- memset(rxq->buf_v_addr, 0, rxq->count * BTINTEL_PCIE_BUFFER_SIZE);
/* Setup the allocated DMA buffer to bufs. Each data_buf should
* have virtual address and physical address
@@ -1197,9 +1195,11 @@ static int btintel_pcie_setup(struct hci_dev *hdev)
bt_dev_err(hdev, "Unsupported Intel hw variant (%u)",
INTEL_HW_VARIANT(ver_tlv.cnvi_bt));
err = -EINVAL;
+ goto exit_error;
break;
}
+ btintel_print_fseq_info(hdev);
exit_error:
kfree_skb(skb);
@@ -1327,6 +1327,12 @@ static void btintel_pcie_remove(struct pci_dev *pdev)
data = pci_get_drvdata(pdev);
btintel_pcie_reset_bt(data);
+ for (int i = 0; i < data->alloc_vecs; i++) {
+ struct msix_entry *msix_entry;
+
+ msix_entry = &data->msix_entries[i];
+ free_irq(msix_entry->vector, msix_entry);
+ }
pci_free_irq_vectors(pdev);
diff --git a/drivers/bluetooth/btmtk.c b/drivers/bluetooth/btmtk.c
index 812fd2a8f853..b7c348687a77 100644
--- a/drivers/bluetooth/btmtk.c
+++ b/drivers/bluetooth/btmtk.c
@@ -4,6 +4,9 @@
*/
#include <linux/module.h>
#include <linux/firmware.h>
+#include <linux/usb.h>
+#include <linux/iopoll.h>
+#include <asm/unaligned.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
@@ -19,6 +22,9 @@
#define MTK_SEC_MAP_COMMON_SIZE 12
#define MTK_SEC_MAP_NEED_SEND_SIZE 52
+/* It is for mt79xx iso data transmission setting */
+#define MTK_ISO_THRESHOLD 264
+
struct btmtk_patch_header {
u8 datetime[16];
u8 platform[4];
@@ -64,7 +70,7 @@ static void btmtk_coredump(struct hci_dev *hdev)
static void btmtk_coredump_hdr(struct hci_dev *hdev, struct sk_buff *skb)
{
- struct btmediatek_data *data = hci_get_priv(hdev);
+ struct btmtk_data *data = hci_get_priv(hdev);
char buf[80];
snprintf(buf, sizeof(buf), "Controller Name: 0x%X\n",
@@ -85,7 +91,7 @@ static void btmtk_coredump_hdr(struct hci_dev *hdev, struct sk_buff *skb)
static void btmtk_coredump_notify(struct hci_dev *hdev, int state)
{
- struct btmediatek_data *data = hci_get_priv(hdev);
+ struct btmtk_data *data = hci_get_priv(hdev);
switch (state) {
case HCI_DEVCOREDUMP_IDLE:
@@ -103,6 +109,24 @@ static void btmtk_coredump_notify(struct hci_dev *hdev, int state)
}
}
+void btmtk_fw_get_filename(char *buf, size_t size, u32 dev_id, u32 fw_ver,
+ u32 fw_flavor)
+{
+ if (dev_id == 0x7925)
+ snprintf(buf, size,
+ "mediatek/mt%04x/BT_RAM_CODE_MT%04x_1_%x_hdr.bin",
+ dev_id & 0xffff, dev_id & 0xffff, (fw_ver & 0xff) + 1);
+ else if (dev_id == 0x7961 && fw_flavor)
+ snprintf(buf, size,
+ "mediatek/BT_RAM_CODE_MT%04x_1a_%x_hdr.bin",
+ dev_id & 0xffff, (fw_ver & 0xff) + 1);
+ else
+ snprintf(buf, size,
+ "mediatek/BT_RAM_CODE_MT%04x_1_%x_hdr.bin",
+ dev_id & 0xffff, (fw_ver & 0xff) + 1);
+}
+EXPORT_SYMBOL_GPL(btmtk_fw_get_filename);
+
int btmtk_setup_firmware_79xx(struct hci_dev *hdev, const char *fwname,
wmt_cmd_sync_func_t wmt_cmd_sync)
{
@@ -337,7 +361,7 @@ EXPORT_SYMBOL_GPL(btmtk_set_bdaddr);
void btmtk_reset_sync(struct hci_dev *hdev)
{
- struct btmediatek_data *reset_work = hci_get_priv(hdev);
+ struct btmtk_data *reset_work = hci_get_priv(hdev);
int err;
hci_dev_lock(hdev);
@@ -353,7 +377,7 @@ EXPORT_SYMBOL_GPL(btmtk_reset_sync);
int btmtk_register_coredump(struct hci_dev *hdev, const char *name,
u32 fw_version)
{
- struct btmediatek_data *data = hci_get_priv(hdev);
+ struct btmtk_data *data = hci_get_priv(hdev);
if (!IS_ENABLED(CONFIG_DEV_COREDUMP))
return -EOPNOTSUPP;
@@ -369,7 +393,7 @@ EXPORT_SYMBOL_GPL(btmtk_register_coredump);
int btmtk_process_coredump(struct hci_dev *hdev, struct sk_buff *skb)
{
- struct btmediatek_data *data = hci_get_priv(hdev);
+ struct btmtk_data *data = hci_get_priv(hdev);
int err;
if (!IS_ENABLED(CONFIG_DEV_COREDUMP)) {
@@ -413,6 +437,1057 @@ int btmtk_process_coredump(struct hci_dev *hdev, struct sk_buff *skb)
}
EXPORT_SYMBOL_GPL(btmtk_process_coredump);
+static void btmtk_usb_wmt_recv(struct urb *urb)
+{
+ struct hci_dev *hdev = urb->context;
+ struct btmtk_data *data = hci_get_priv(hdev);
+ struct sk_buff *skb;
+ int err;
+
+ if (urb->status == 0 && urb->actual_length > 0) {
+ hdev->stat.byte_rx += urb->actual_length;
+
+ /* WMT event shouldn't be fragmented and the size should be
+ * less than HCI_WMT_MAX_EVENT_SIZE.
+ */
+ skb = bt_skb_alloc(HCI_WMT_MAX_EVENT_SIZE, GFP_ATOMIC);
+ if (!skb) {
+ hdev->stat.err_rx++;
+ kfree(urb->setup_packet);
+ return;
+ }
+
+ hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
+ skb_put_data(skb, urb->transfer_buffer, urb->actual_length);
+
+ /* When someone waits for the WMT event, the skb is being cloned
+ * and being processed the events from there then.
+ */
+ if (test_bit(BTMTK_TX_WAIT_VND_EVT, &data->flags)) {
+ data->evt_skb = skb_clone(skb, GFP_ATOMIC);
+ if (!data->evt_skb) {
+ kfree_skb(skb);
+ kfree(urb->setup_packet);
+ return;
+ }
+ }
+
+ err = hci_recv_frame(hdev, skb);
+ if (err < 0) {
+ kfree_skb(data->evt_skb);
+ data->evt_skb = NULL;
+ kfree(urb->setup_packet);
+ return;
+ }
+
+ if (test_and_clear_bit(BTMTK_TX_WAIT_VND_EVT,
+ &data->flags)) {
+ /* Barrier to sync with other CPUs */
+ smp_mb__after_atomic();
+ wake_up_bit(&data->flags,
+ BTMTK_TX_WAIT_VND_EVT);
+ }
+ kfree(urb->setup_packet);
+ return;
+ } else if (urb->status == -ENOENT) {
+ /* Avoid suspend failed when usb_kill_urb */
+ return;
+ }
+
+ usb_mark_last_busy(data->udev);
+
+ /* The URB complete handler is still called with urb->actual_length = 0
+ * when the event is not available, so we should keep re-submitting
+ * URB until WMT event returns, Also, It's necessary to wait some time
+ * between the two consecutive control URBs to relax the target device
+ * to generate the event. Otherwise, the WMT event cannot return from
+ * the device successfully.
+ */
+ udelay(500);
+
+ usb_anchor_urb(urb, data->ctrl_anchor);
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (err < 0) {
+ kfree(urb->setup_packet);
+ /* -EPERM: urb is being killed;
+ * -ENODEV: device got disconnected
+ */
+ if (err != -EPERM && err != -ENODEV)
+ bt_dev_err(hdev, "urb %p failed to resubmit (%d)",
+ urb, -err);
+ usb_unanchor_urb(urb);
+ }
+}
+
+static int btmtk_usb_submit_wmt_recv_urb(struct hci_dev *hdev)
+{
+ struct btmtk_data *data = hci_get_priv(hdev);
+ struct usb_ctrlrequest *dr;
+ unsigned char *buf;
+ int err, size = 64;
+ unsigned int pipe;
+ struct urb *urb;
+
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!urb)
+ return -ENOMEM;
+
+ dr = kmalloc(sizeof(*dr), GFP_KERNEL);
+ if (!dr) {
+ usb_free_urb(urb);
+ return -ENOMEM;
+ }
+
+ dr->bRequestType = USB_TYPE_VENDOR | USB_DIR_IN;
+ dr->bRequest = 1;
+ dr->wIndex = cpu_to_le16(0);
+ dr->wValue = cpu_to_le16(48);
+ dr->wLength = cpu_to_le16(size);
+
+ buf = kmalloc(size, GFP_KERNEL);
+ if (!buf) {
+ kfree(dr);
+ usb_free_urb(urb);
+ return -ENOMEM;
+ }
+
+ pipe = usb_rcvctrlpipe(data->udev, 0);
+
+ usb_fill_control_urb(urb, data->udev, pipe, (void *)dr,
+ buf, size, btmtk_usb_wmt_recv, hdev);
+
+ urb->transfer_flags |= URB_FREE_BUFFER;
+
+ usb_anchor_urb(urb, data->ctrl_anchor);
+ err = usb_submit_urb(urb, GFP_KERNEL);
+ if (err < 0) {
+ if (err != -EPERM && err != -ENODEV)
+ bt_dev_err(hdev, "urb %p submission failed (%d)",
+ urb, -err);
+ usb_unanchor_urb(urb);
+ }
+
+ usb_free_urb(urb);
+
+ return err;
+}
+
+static int btmtk_usb_hci_wmt_sync(struct hci_dev *hdev,
+ struct btmtk_hci_wmt_params *wmt_params)
+{
+ struct btmtk_data *data = hci_get_priv(hdev);
+ struct btmtk_hci_wmt_evt_funcc *wmt_evt_funcc;
+ u32 hlen, status = BTMTK_WMT_INVALID;
+ struct btmtk_hci_wmt_evt *wmt_evt;
+ struct btmtk_hci_wmt_cmd *wc;
+ struct btmtk_wmt_hdr *hdr;
+ int err;
+
+ /* Send the WMT command and wait until the WMT event returns */
+ hlen = sizeof(*hdr) + wmt_params->dlen;
+ if (hlen > 255)
+ return -EINVAL;
+
+ wc = kzalloc(hlen, GFP_KERNEL);
+ if (!wc)
+ return -ENOMEM;
+
+ hdr = &wc->hdr;
+ hdr->dir = 1;
+ hdr->op = wmt_params->op;
+ hdr->dlen = cpu_to_le16(wmt_params->dlen + 1);
+ hdr->flag = wmt_params->flag;
+ memcpy(wc->data, wmt_params->data, wmt_params->dlen);
+
+ set_bit(BTMTK_TX_WAIT_VND_EVT, &data->flags);
+
+ /* WMT cmd/event doesn't follow up the generic HCI cmd/event handling,
+ * it needs constantly polling control pipe until the host received the
+ * WMT event, thus, we should require to specifically acquire PM counter
+ * on the USB to prevent the interface from entering auto suspended
+ * while WMT cmd/event in progress.
+ */
+ err = usb_autopm_get_interface(data->intf);
+ if (err < 0)
+ goto err_free_wc;
+
+ err = __hci_cmd_send(hdev, 0xfc6f, hlen, wc);
+
+ if (err < 0) {
+ clear_bit(BTMTK_TX_WAIT_VND_EVT, &data->flags);
+ usb_autopm_put_interface(data->intf);
+ goto err_free_wc;
+ }
+
+ /* Submit control IN URB on demand to process the WMT event */
+ err = btmtk_usb_submit_wmt_recv_urb(hdev);
+
+ usb_autopm_put_interface(data->intf);
+
+ if (err < 0)
+ goto err_free_wc;
+
+ /* The vendor specific WMT commands are all answered by a vendor
+ * specific event and will have the Command Status or Command
+ * Complete as with usual HCI command flow control.
+ *
+ * After sending the command, wait for BTUSB_TX_WAIT_VND_EVT
+ * state to be cleared. The driver specific event receive routine
+ * will clear that state and with that indicate completion of the
+ * WMT command.
+ */
+ err = wait_on_bit_timeout(&data->flags, BTMTK_TX_WAIT_VND_EVT,
+ TASK_INTERRUPTIBLE, HCI_INIT_TIMEOUT);
+ if (err == -EINTR) {
+ bt_dev_err(hdev, "Execution of wmt command interrupted");
+ clear_bit(BTMTK_TX_WAIT_VND_EVT, &data->flags);
+ goto err_free_wc;
+ }
+
+ if (err) {
+ bt_dev_err(hdev, "Execution of wmt command timed out");
+ clear_bit(BTMTK_TX_WAIT_VND_EVT, &data->flags);
+ err = -ETIMEDOUT;
+ goto err_free_wc;
+ }
+
+ if (data->evt_skb == NULL)
+ goto err_free_wc;
+
+ /* Parse and handle the return WMT event */
+ wmt_evt = (struct btmtk_hci_wmt_evt *)data->evt_skb->data;
+ if (wmt_evt->whdr.op != hdr->op) {
+ bt_dev_err(hdev, "Wrong op received %d expected %d",
+ wmt_evt->whdr.op, hdr->op);
+ err = -EIO;
+ goto err_free_skb;
+ }
+
+ switch (wmt_evt->whdr.op) {
+ case BTMTK_WMT_SEMAPHORE:
+ if (wmt_evt->whdr.flag == 2)
+ status = BTMTK_WMT_PATCH_UNDONE;
+ else
+ status = BTMTK_WMT_PATCH_DONE;
+ break;
+ case BTMTK_WMT_FUNC_CTRL:
+ wmt_evt_funcc = (struct btmtk_hci_wmt_evt_funcc *)wmt_evt;
+ if (be16_to_cpu(wmt_evt_funcc->status) == 0x404)
+ status = BTMTK_WMT_ON_DONE;
+ else if (be16_to_cpu(wmt_evt_funcc->status) == 0x420)
+ status = BTMTK_WMT_ON_PROGRESS;
+ else
+ status = BTMTK_WMT_ON_UNDONE;
+ break;
+ case BTMTK_WMT_PATCH_DWNLD:
+ if (wmt_evt->whdr.flag == 2)
+ status = BTMTK_WMT_PATCH_DONE;
+ else if (wmt_evt->whdr.flag == 1)
+ status = BTMTK_WMT_PATCH_PROGRESS;
+ else
+ status = BTMTK_WMT_PATCH_UNDONE;
+ break;
+ }
+
+ if (wmt_params->status)
+ *wmt_params->status = status;
+
+err_free_skb:
+ kfree_skb(data->evt_skb);
+ data->evt_skb = NULL;
+err_free_wc:
+ kfree(wc);
+ return err;
+}
+
+static int btmtk_usb_func_query(struct hci_dev *hdev)
+{
+ struct btmtk_hci_wmt_params wmt_params;
+ int status, err;
+ u8 param = 0;
+
+ /* Query whether the function is enabled */
+ wmt_params.op = BTMTK_WMT_FUNC_CTRL;
+ wmt_params.flag = 4;
+ wmt_params.dlen = sizeof(param);
+ wmt_params.data = &param;
+ wmt_params.status = &status;
+
+ err = btmtk_usb_hci_wmt_sync(hdev, &wmt_params);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to query function status (%d)", err);
+ return err;
+ }
+
+ return status;
+}
+
+static int btmtk_usb_uhw_reg_write(struct hci_dev *hdev, u32 reg, u32 val)
+{
+ struct btmtk_data *data = hci_get_priv(hdev);
+ int pipe, err;
+ void *buf;
+
+ buf = kzalloc(4, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ put_unaligned_le32(val, buf);
+
+ pipe = usb_sndctrlpipe(data->udev, 0);
+ err = usb_control_msg(data->udev, pipe, 0x02,
+ 0x5E,
+ reg >> 16, reg & 0xffff,
+ buf, 4, USB_CTRL_SET_TIMEOUT);
+ if (err < 0)
+ bt_dev_err(hdev, "Failed to write uhw reg(%d)", err);
+
+ kfree(buf);
+
+ return err;
+}
+
+static int btmtk_usb_uhw_reg_read(struct hci_dev *hdev, u32 reg, u32 *val)
+{
+ struct btmtk_data *data = hci_get_priv(hdev);
+ int pipe, err;
+ void *buf;
+
+ buf = kzalloc(4, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ pipe = usb_rcvctrlpipe(data->udev, 0);
+ err = usb_control_msg(data->udev, pipe, 0x01,
+ 0xDE,
+ reg >> 16, reg & 0xffff,
+ buf, 4, USB_CTRL_GET_TIMEOUT);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to read uhw reg(%d)", err);
+ goto err_free_buf;
+ }
+
+ *val = get_unaligned_le32(buf);
+ bt_dev_dbg(hdev, "reg=%x, value=0x%08x", reg, *val);
+
+err_free_buf:
+ kfree(buf);
+
+ return err;
+}
+
+static int btmtk_usb_reg_read(struct hci_dev *hdev, u32 reg, u32 *val)
+{
+ struct btmtk_data *data = hci_get_priv(hdev);
+ int pipe, err, size = sizeof(u32);
+ void *buf;
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ pipe = usb_rcvctrlpipe(data->udev, 0);
+ err = usb_control_msg(data->udev, pipe, 0x63,
+ USB_TYPE_VENDOR | USB_DIR_IN,
+ reg >> 16, reg & 0xffff,
+ buf, size, USB_CTRL_GET_TIMEOUT);
+ if (err < 0)
+ goto err_free_buf;
+
+ *val = get_unaligned_le32(buf);
+
+err_free_buf:
+ kfree(buf);
+
+ return err;
+}
+
+static int btmtk_usb_id_get(struct hci_dev *hdev, u32 reg, u32 *id)
+{
+ return btmtk_usb_reg_read(hdev, reg, id);
+}
+
+static u32 btmtk_usb_reset_done(struct hci_dev *hdev)
+{
+ u32 val = 0;
+
+ btmtk_usb_uhw_reg_read(hdev, MTK_BT_MISC, &val);
+
+ return val & MTK_BT_RST_DONE;
+}
+
+int btmtk_usb_subsys_reset(struct hci_dev *hdev, u32 dev_id)
+{
+ u32 val;
+ int err;
+
+ if (dev_id == 0x7922) {
+ err = btmtk_usb_uhw_reg_read(hdev, MTK_BT_SUBSYS_RST, &val);
+ if (err < 0)
+ return err;
+ val |= 0x00002020;
+ err = btmtk_usb_uhw_reg_write(hdev, MTK_BT_SUBSYS_RST, val);
+ if (err < 0)
+ return err;
+ err = btmtk_usb_uhw_reg_write(hdev, MTK_EP_RST_OPT, 0x00010001);
+ if (err < 0)
+ return err;
+ err = btmtk_usb_uhw_reg_read(hdev, MTK_BT_SUBSYS_RST, &val);
+ if (err < 0)
+ return err;
+ val |= BIT(0);
+ err = btmtk_usb_uhw_reg_write(hdev, MTK_BT_SUBSYS_RST, val);
+ if (err < 0)
+ return err;
+ msleep(100);
+ } else if (dev_id == 0x7925) {
+ err = btmtk_usb_uhw_reg_read(hdev, MTK_BT_RESET_REG_CONNV3, &val);
+ if (err < 0)
+ return err;
+ val |= (1 << 5);
+ err = btmtk_usb_uhw_reg_write(hdev, MTK_BT_RESET_REG_CONNV3, val);
+ if (err < 0)
+ return err;
+ err = btmtk_usb_uhw_reg_read(hdev, MTK_BT_RESET_REG_CONNV3, &val);
+ if (err < 0)
+ return err;
+ val &= 0xFFFF00FF;
+ val |= (1 << 13);
+ err = btmtk_usb_uhw_reg_write(hdev, MTK_BT_RESET_REG_CONNV3, val);
+ if (err < 0)
+ return err;
+ err = btmtk_usb_uhw_reg_write(hdev, MTK_EP_RST_OPT, 0x00010001);
+ if (err < 0)
+ return err;
+ err = btmtk_usb_uhw_reg_read(hdev, MTK_BT_RESET_REG_CONNV3, &val);
+ if (err < 0)
+ return err;
+ val |= (1 << 0);
+ err = btmtk_usb_uhw_reg_write(hdev, MTK_BT_RESET_REG_CONNV3, val);
+ if (err < 0)
+ return err;
+ err = btmtk_usb_uhw_reg_write(hdev, MTK_UDMA_INT_STA_BT, 0x000000FF);
+ if (err < 0)
+ return err;
+ err = btmtk_usb_uhw_reg_read(hdev, MTK_UDMA_INT_STA_BT, &val);
+ if (err < 0)
+ return err;
+ err = btmtk_usb_uhw_reg_write(hdev, MTK_UDMA_INT_STA_BT1, 0x000000FF);
+ if (err < 0)
+ return err;
+ err = btmtk_usb_uhw_reg_read(hdev, MTK_UDMA_INT_STA_BT1, &val);
+ if (err < 0)
+ return err;
+ msleep(100);
+ } else {
+ /* It's Device EndPoint Reset Option Register */
+ bt_dev_dbg(hdev, "Initiating reset mechanism via uhw");
+ err = btmtk_usb_uhw_reg_write(hdev, MTK_EP_RST_OPT, MTK_EP_RST_IN_OUT_OPT);
+ if (err < 0)
+ return err;
+ err = btmtk_usb_uhw_reg_read(hdev, MTK_BT_WDT_STATUS, &val);
+ if (err < 0)
+ return err;
+ /* Reset the bluetooth chip via USB interface. */
+ err = btmtk_usb_uhw_reg_write(hdev, MTK_BT_SUBSYS_RST, 1);
+ if (err < 0)
+ return err;
+ err = btmtk_usb_uhw_reg_write(hdev, MTK_UDMA_INT_STA_BT, 0x000000FF);
+ if (err < 0)
+ return err;
+ err = btmtk_usb_uhw_reg_read(hdev, MTK_UDMA_INT_STA_BT, &val);
+ if (err < 0)
+ return err;
+ err = btmtk_usb_uhw_reg_write(hdev, MTK_UDMA_INT_STA_BT1, 0x000000FF);
+ if (err < 0)
+ return err;
+ err = btmtk_usb_uhw_reg_read(hdev, MTK_UDMA_INT_STA_BT1, &val);
+ if (err < 0)
+ return err;
+ /* MT7921 need to delay 20ms between toggle reset bit */
+ msleep(20);
+ err = btmtk_usb_uhw_reg_write(hdev, MTK_BT_SUBSYS_RST, 0);
+ if (err < 0)
+ return err;
+ err = btmtk_usb_uhw_reg_read(hdev, MTK_BT_SUBSYS_RST, &val);
+ if (err < 0)
+ return err;
+ }
+
+ err = readx_poll_timeout(btmtk_usb_reset_done, hdev, val,
+ val & MTK_BT_RST_DONE, 20000, 1000000);
+ if (err < 0)
+ bt_dev_err(hdev, "Reset timeout");
+
+ if (dev_id == 0x7922) {
+ err = btmtk_usb_uhw_reg_write(hdev, MTK_UDMA_INT_STA_BT, 0x000000FF);
+ if (err < 0)
+ return err;
+ }
+
+ err = btmtk_usb_id_get(hdev, 0x70010200, &val);
+ if (err < 0 || !val)
+ bt_dev_err(hdev, "Can't get device id, subsys reset fail.");
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(btmtk_usb_subsys_reset);
+
+int btmtk_usb_recv_acl(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct btmtk_data *data = hci_get_priv(hdev);
+ u16 handle = le16_to_cpu(hci_acl_hdr(skb)->handle);
+
+ switch (handle) {
+ case 0xfc6f: /* Firmware dump from device */
+ /* When the firmware hangs, the device can no longer
+ * suspend and thus disable auto-suspend.
+ */
+ usb_disable_autosuspend(data->udev);
+
+ /* We need to forward the diagnostic packet to userspace daemon
+ * for backward compatibility, so we have to clone the packet
+ * extraly for the in-kernel coredump support.
+ */
+ if (IS_ENABLED(CONFIG_DEV_COREDUMP)) {
+ struct sk_buff *skb_cd = skb_clone(skb, GFP_ATOMIC);
+
+ if (skb_cd)
+ btmtk_process_coredump(hdev, skb_cd);
+ }
+
+ fallthrough;
+ case 0x05ff: /* Firmware debug logging 1 */
+ case 0x05fe: /* Firmware debug logging 2 */
+ return hci_recv_diag(hdev, skb);
+ }
+
+ return hci_recv_frame(hdev, skb);
+}
+EXPORT_SYMBOL_GPL(btmtk_usb_recv_acl);
+
+static int btmtk_isopkt_pad(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ if (skb->len > MTK_ISO_THRESHOLD)
+ return -EINVAL;
+
+ if (skb_pad(skb, MTK_ISO_THRESHOLD - skb->len))
+ return -ENOMEM;
+
+ __skb_put(skb, MTK_ISO_THRESHOLD - skb->len);
+
+ return 0;
+}
+
+static int __set_mtk_intr_interface(struct hci_dev *hdev)
+{
+ struct btmtk_data *btmtk_data = hci_get_priv(hdev);
+ struct usb_interface *intf = btmtk_data->isopkt_intf;
+ int i, err;
+
+ if (!btmtk_data->isopkt_intf)
+ return -ENODEV;
+
+ err = usb_set_interface(btmtk_data->udev, MTK_ISO_IFNUM, 1);
+ if (err < 0) {
+ bt_dev_err(hdev, "setting interface failed (%d)", -err);
+ return err;
+ }
+
+ btmtk_data->isopkt_tx_ep = NULL;
+ btmtk_data->isopkt_rx_ep = NULL;
+
+ for (i = 0; i < intf->cur_altsetting->desc.bNumEndpoints; i++) {
+ struct usb_endpoint_descriptor *ep_desc;
+
+ ep_desc = &intf->cur_altsetting->endpoint[i].desc;
+
+ if (!btmtk_data->isopkt_tx_ep &&
+ usb_endpoint_is_int_out(ep_desc)) {
+ btmtk_data->isopkt_tx_ep = ep_desc;
+ continue;
+ }
+
+ if (!btmtk_data->isopkt_rx_ep &&
+ usb_endpoint_is_int_in(ep_desc)) {
+ btmtk_data->isopkt_rx_ep = ep_desc;
+ continue;
+ }
+ }
+
+ if (!btmtk_data->isopkt_tx_ep ||
+ !btmtk_data->isopkt_rx_ep) {
+ bt_dev_err(hdev, "invalid interrupt descriptors");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+struct urb *alloc_mtk_intr_urb(struct hci_dev *hdev, struct sk_buff *skb,
+ usb_complete_t tx_complete)
+{
+ struct btmtk_data *btmtk_data = hci_get_priv(hdev);
+ struct urb *urb;
+ unsigned int pipe;
+
+ if (!btmtk_data->isopkt_tx_ep)
+ return ERR_PTR(-ENODEV);
+
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!urb)
+ return ERR_PTR(-ENOMEM);
+
+ if (btmtk_isopkt_pad(hdev, skb))
+ return ERR_PTR(-EINVAL);
+
+ pipe = usb_sndintpipe(btmtk_data->udev,
+ btmtk_data->isopkt_tx_ep->bEndpointAddress);
+
+ usb_fill_int_urb(urb, btmtk_data->udev, pipe,
+ skb->data, skb->len, tx_complete,
+ skb, btmtk_data->isopkt_tx_ep->bInterval);
+
+ skb->dev = (void *)hdev;
+
+ return urb;
+}
+EXPORT_SYMBOL_GPL(alloc_mtk_intr_urb);
+
+static int btmtk_recv_isopkt(struct hci_dev *hdev, void *buffer, int count)
+{
+ struct btmtk_data *btmtk_data = hci_get_priv(hdev);
+ struct sk_buff *skb;
+ unsigned long flags;
+ int err = 0;
+
+ spin_lock_irqsave(&btmtk_data->isorxlock, flags);
+ skb = btmtk_data->isopkt_skb;
+
+ while (count) {
+ int len;
+
+ if (!skb) {
+ skb = bt_skb_alloc(HCI_MAX_ISO_SIZE, GFP_ATOMIC);
+ if (!skb) {
+ err = -ENOMEM;
+ break;
+ }
+
+ hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
+ hci_skb_expect(skb) = HCI_ISO_HDR_SIZE;
+ }
+
+ len = min_t(uint, hci_skb_expect(skb), count);
+ skb_put_data(skb, buffer, len);
+
+ count -= len;
+ buffer += len;
+ hci_skb_expect(skb) -= len;
+
+ if (skb->len == HCI_ISO_HDR_SIZE) {
+ __le16 dlen = ((struct hci_iso_hdr *)skb->data)->dlen;
+
+ /* Complete ISO header */
+ hci_skb_expect(skb) = __le16_to_cpu(dlen);
+
+ if (skb_tailroom(skb) < hci_skb_expect(skb)) {
+ kfree_skb(skb);
+ skb = NULL;
+
+ err = -EILSEQ;
+ break;
+ }
+ }
+
+ if (!hci_skb_expect(skb)) {
+ /* Complete frame */
+ hci_recv_frame(hdev, skb);
+ skb = NULL;
+ }
+ }
+
+ btmtk_data->isopkt_skb = skb;
+ spin_unlock_irqrestore(&btmtk_data->isorxlock, flags);
+
+ return err;
+}
+
+static void btmtk_intr_complete(struct urb *urb)
+{
+ struct hci_dev *hdev = urb->context;
+ struct btmtk_data *btmtk_data = hci_get_priv(hdev);
+ int err;
+
+ BT_DBG("%s urb %p status %d count %d", hdev->name, urb, urb->status,
+ urb->actual_length);
+
+ if (!test_bit(HCI_RUNNING, &hdev->flags))
+ return;
+
+ if (hdev->suspended)
+ return;
+
+ if (urb->status == 0) {
+ hdev->stat.byte_rx += urb->actual_length;
+
+ if (btmtk_recv_isopkt(hdev, urb->transfer_buffer,
+ urb->actual_length) < 0) {
+ bt_dev_err(hdev, "corrupted iso packet");
+ hdev->stat.err_rx++;
+ }
+ } else if (urb->status == -ENOENT) {
+ /* Avoid suspend failed when usb_kill_urb */
+ return;
+ }
+
+ usb_mark_last_busy(btmtk_data->udev);
+ usb_anchor_urb(urb, &btmtk_data->isopkt_anchor);
+
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (err < 0) {
+ /* -EPERM: urb is being killed;
+ * -ENODEV: device got disconnected
+ */
+ if (err != -EPERM && err != -ENODEV)
+ bt_dev_err(hdev, "urb %p failed to resubmit (%d)",
+ urb, -err);
+ if (err != -EPERM)
+ hci_cmd_sync_cancel(hdev, -err);
+ usb_unanchor_urb(urb);
+ }
+}
+
+static int btmtk_submit_intr_urb(struct hci_dev *hdev, gfp_t mem_flags)
+{
+ struct btmtk_data *btmtk_data = hci_get_priv(hdev);
+ unsigned char *buf;
+ unsigned int pipe;
+ struct urb *urb;
+ int err, size;
+
+ BT_DBG("%s", hdev->name);
+
+ if (!btmtk_data->isopkt_rx_ep)
+ return -ENODEV;
+
+ urb = usb_alloc_urb(0, mem_flags);
+ if (!urb)
+ return -ENOMEM;
+ size = le16_to_cpu(btmtk_data->isopkt_rx_ep->wMaxPacketSize);
+
+ buf = kmalloc(size, mem_flags);
+ if (!buf) {
+ usb_free_urb(urb);
+ return -ENOMEM;
+ }
+
+ pipe = usb_rcvintpipe(btmtk_data->udev,
+ btmtk_data->isopkt_rx_ep->bEndpointAddress);
+
+ usb_fill_int_urb(urb, btmtk_data->udev, pipe, buf, size,
+ btmtk_intr_complete, hdev,
+ btmtk_data->isopkt_rx_ep->bInterval);
+
+ urb->transfer_flags |= URB_FREE_BUFFER;
+
+ usb_mark_last_busy(btmtk_data->udev);
+ usb_anchor_urb(urb, &btmtk_data->isopkt_anchor);
+
+ err = usb_submit_urb(urb, mem_flags);
+ if (err < 0) {
+ if (err != -EPERM && err != -ENODEV)
+ bt_dev_err(hdev, "urb %p submission failed (%d)",
+ urb, -err);
+ usb_unanchor_urb(urb);
+ }
+
+ usb_free_urb(urb);
+
+ return err;
+}
+
+static int btmtk_usb_isointf_init(struct hci_dev *hdev)
+{
+ struct btmtk_data *btmtk_data = hci_get_priv(hdev);
+ u8 iso_param[2] = { 0x08, 0x01 };
+ struct sk_buff *skb;
+ int err;
+
+ init_usb_anchor(&btmtk_data->isopkt_anchor);
+ spin_lock_init(&btmtk_data->isorxlock);
+
+ __set_mtk_intr_interface(hdev);
+
+ err = btmtk_submit_intr_urb(hdev, GFP_KERNEL);
+ if (err < 0) {
+ usb_kill_anchored_urbs(&btmtk_data->isopkt_anchor);
+ bt_dev_err(hdev, "ISO intf not support (%d)", err);
+ return err;
+ }
+
+ skb = __hci_cmd_sync(hdev, 0xfd98, sizeof(iso_param), iso_param,
+ HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ bt_dev_err(hdev, "Failed to apply iso setting (%ld)", PTR_ERR(skb));
+ return PTR_ERR(skb);
+ }
+ kfree_skb(skb);
+
+ return 0;
+}
+
+int btmtk_usb_resume(struct hci_dev *hdev)
+{
+ /* This function describes the specific additional steps taken by MediaTek
+ * when Bluetooth usb driver's resume function is called.
+ */
+ struct btmtk_data *btmtk_data = hci_get_priv(hdev);
+
+ /* Resubmit urb for iso data transmission */
+ if (test_bit(BTMTK_ISOPKT_RUNNING, &btmtk_data->flags)) {
+ if (btmtk_submit_intr_urb(hdev, GFP_NOIO) < 0)
+ clear_bit(BTMTK_ISOPKT_RUNNING, &btmtk_data->flags);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(btmtk_usb_resume);
+
+int btmtk_usb_suspend(struct hci_dev *hdev)
+{
+ /* This function describes the specific additional steps taken by MediaTek
+ * when Bluetooth usb driver's suspend function is called.
+ */
+ struct btmtk_data *btmtk_data = hci_get_priv(hdev);
+
+ /* Stop urb anchor for iso data transmission */
+ usb_kill_anchored_urbs(&btmtk_data->isopkt_anchor);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(btmtk_usb_suspend);
+
+int btmtk_usb_setup(struct hci_dev *hdev)
+{
+ struct btmtk_data *btmtk_data = hci_get_priv(hdev);
+ struct btmtk_hci_wmt_params wmt_params;
+ ktime_t calltime, delta, rettime;
+ struct btmtk_tci_sleep tci_sleep;
+ unsigned long long duration;
+ struct sk_buff *skb;
+ const char *fwname;
+ int err, status;
+ u32 dev_id = 0;
+ char fw_bin_name[64];
+ u32 fw_version = 0, fw_flavor = 0;
+ u8 param;
+
+ calltime = ktime_get();
+
+ err = btmtk_usb_id_get(hdev, 0x80000008, &dev_id);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to get device id (%d)", err);
+ return err;
+ }
+
+ if (!dev_id || dev_id != 0x7663) {
+ err = btmtk_usb_id_get(hdev, 0x70010200, &dev_id);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to get device id (%d)", err);
+ return err;
+ }
+ err = btmtk_usb_id_get(hdev, 0x80021004, &fw_version);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to get fw version (%d)", err);
+ return err;
+ }
+ err = btmtk_usb_id_get(hdev, 0x70010020, &fw_flavor);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to get fw flavor (%d)", err);
+ return err;
+ }
+ fw_flavor = (fw_flavor & 0x00000080) >> 7;
+ }
+
+ btmtk_data->dev_id = dev_id;
+
+ err = btmtk_register_coredump(hdev, btmtk_data->drv_name, fw_version);
+ if (err < 0)
+ bt_dev_err(hdev, "Failed to register coredump (%d)", err);
+
+ switch (dev_id) {
+ case 0x7663:
+ fwname = FIRMWARE_MT7663;
+ break;
+ case 0x7668:
+ fwname = FIRMWARE_MT7668;
+ break;
+ case 0x7922:
+ case 0x7961:
+ case 0x7925:
+ /* Reset the device to ensure it's in the initial state before
+ * downloading the firmware to ensure.
+ */
+
+ if (!test_bit(BTMTK_FIRMWARE_LOADED, &btmtk_data->flags))
+ btmtk_usb_subsys_reset(hdev, dev_id);
+
+ btmtk_fw_get_filename(fw_bin_name, sizeof(fw_bin_name), dev_id,
+ fw_version, fw_flavor);
+
+ err = btmtk_setup_firmware_79xx(hdev, fw_bin_name,
+ btmtk_usb_hci_wmt_sync);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to set up firmware (%d)", err);
+ clear_bit(BTMTK_FIRMWARE_LOADED, &btmtk_data->flags);
+ return err;
+ }
+
+ set_bit(BTMTK_FIRMWARE_LOADED, &btmtk_data->flags);
+
+ /* It's Device EndPoint Reset Option Register */
+ err = btmtk_usb_uhw_reg_write(hdev, MTK_EP_RST_OPT,
+ MTK_EP_RST_IN_OUT_OPT);
+ if (err < 0)
+ return err;
+
+ /* Enable Bluetooth protocol */
+ param = 1;
+ wmt_params.op = BTMTK_WMT_FUNC_CTRL;
+ wmt_params.flag = 0;
+ wmt_params.dlen = sizeof(param);
+ wmt_params.data = &param;
+ wmt_params.status = NULL;
+
+ err = btmtk_usb_hci_wmt_sync(hdev, &wmt_params);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err);
+ return err;
+ }
+
+ hci_set_msft_opcode(hdev, 0xFD30);
+ hci_set_aosp_capable(hdev);
+
+ /* Set up ISO interface after protocol enabled */
+ if (test_bit(BTMTK_ISOPKT_OVER_INTR, &btmtk_data->flags)) {
+ if (!btmtk_usb_isointf_init(hdev))
+ set_bit(BTMTK_ISOPKT_RUNNING, &btmtk_data->flags);
+ }
+
+ goto done;
+ default:
+ bt_dev_err(hdev, "Unsupported hardware variant (%08x)",
+ dev_id);
+ return -ENODEV;
+ }
+
+ /* Query whether the firmware is already download */
+ wmt_params.op = BTMTK_WMT_SEMAPHORE;
+ wmt_params.flag = 1;
+ wmt_params.dlen = 0;
+ wmt_params.data = NULL;
+ wmt_params.status = &status;
+
+ err = btmtk_usb_hci_wmt_sync(hdev, &wmt_params);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to query firmware status (%d)", err);
+ return err;
+ }
+
+ if (status == BTMTK_WMT_PATCH_DONE) {
+ bt_dev_info(hdev, "firmware already downloaded");
+ goto ignore_setup_fw;
+ }
+
+ /* Setup a firmware which the device definitely requires */
+ err = btmtk_setup_firmware(hdev, fwname,
+ btmtk_usb_hci_wmt_sync);
+ if (err < 0)
+ return err;
+
+ignore_setup_fw:
+ err = readx_poll_timeout(btmtk_usb_func_query, hdev, status,
+ status < 0 || status != BTMTK_WMT_ON_PROGRESS,
+ 2000, 5000000);
+ /* -ETIMEDOUT happens */
+ if (err < 0)
+ return err;
+
+ /* The other errors happen in btmtk_usb_func_query */
+ if (status < 0)
+ return status;
+
+ if (status == BTMTK_WMT_ON_DONE) {
+ bt_dev_info(hdev, "function already on");
+ goto ignore_func_on;
+ }
+
+ /* Enable Bluetooth protocol */
+ param = 1;
+ wmt_params.op = BTMTK_WMT_FUNC_CTRL;
+ wmt_params.flag = 0;
+ wmt_params.dlen = sizeof(param);
+ wmt_params.data = &param;
+ wmt_params.status = NULL;
+
+ err = btmtk_usb_hci_wmt_sync(hdev, &wmt_params);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err);
+ return err;
+ }
+
+ignore_func_on:
+ /* Apply the low power environment setup */
+ tci_sleep.mode = 0x5;
+ tci_sleep.duration = cpu_to_le16(0x640);
+ tci_sleep.host_duration = cpu_to_le16(0x640);
+ tci_sleep.host_wakeup_pin = 0;
+ tci_sleep.time_compensation = 0;
+
+ skb = __hci_cmd_sync(hdev, 0xfc7a, sizeof(tci_sleep), &tci_sleep,
+ HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ err = PTR_ERR(skb);
+ bt_dev_err(hdev, "Failed to apply low power setting (%d)", err);
+ return err;
+ }
+ kfree_skb(skb);
+
+done:
+ rettime = ktime_get();
+ delta = ktime_sub(rettime, calltime);
+ duration = (unsigned long long)ktime_to_ns(delta) >> 10;
+
+ bt_dev_info(hdev, "Device setup in %llu usecs", duration);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(btmtk_usb_setup);
+
+int btmtk_usb_shutdown(struct hci_dev *hdev)
+{
+ struct btmtk_hci_wmt_params wmt_params;
+ u8 param = 0;
+ int err;
+
+ /* Disable the device */
+ wmt_params.op = BTMTK_WMT_FUNC_CTRL;
+ wmt_params.flag = 0;
+ wmt_params.dlen = sizeof(param);
+ wmt_params.data = &param;
+ wmt_params.status = NULL;
+
+ err = btmtk_usb_hci_wmt_sync(hdev, &wmt_params);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err);
+ return err;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(btmtk_usb_shutdown);
+
MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
MODULE_AUTHOR("Mark Chen <mark-yw.chen@mediatek.com>");
MODULE_DESCRIPTION("Bluetooth support for MediaTek devices ver " VERSION);
diff --git a/drivers/bluetooth/btmtk.h b/drivers/bluetooth/btmtk.h
index cbcdb99a22e6..5df7c3296624 100644
--- a/drivers/bluetooth/btmtk.h
+++ b/drivers/bluetooth/btmtk.h
@@ -28,6 +28,21 @@
#define MTK_COREDUMP_END_LEN (sizeof(MTK_COREDUMP_END))
#define MTK_COREDUMP_NUM 255
+/* UHW CR mapping */
+#define MTK_BT_MISC 0x70002510
+#define MTK_BT_SUBSYS_RST 0x70002610
+#define MTK_UDMA_INT_STA_BT 0x74000024
+#define MTK_UDMA_INT_STA_BT1 0x74000308
+#define MTK_BT_WDT_STATUS 0x740003A0
+#define MTK_EP_RST_OPT 0x74011890
+#define MTK_EP_RST_IN_OUT_OPT 0x00010001
+#define MTK_BT_RST_DONE 0x00000100
+#define MTK_BT_RESET_REG_CONNV3 0x70028610
+#define MTK_BT_READ_DEV_ID 0x70010200
+
+/* MediaTek ISO Interface */
+#define MTK_ISO_IFNUM 2
+
enum {
BTMTK_WMT_PATCH_DWNLD = 0x1,
BTMTK_WMT_TEST = 0x2,
@@ -126,6 +141,14 @@ struct btmtk_hci_wmt_params {
u32 *status;
};
+enum {
+ BTMTK_TX_WAIT_VND_EVT,
+ BTMTK_FIRMWARE_LOADED,
+ BTMTK_HW_RESET_ACTIVE,
+ BTMTK_ISOPKT_OVER_INTR,
+ BTMTK_ISOPKT_RUNNING,
+};
+
typedef int (*btmtk_reset_sync_func_t)(struct hci_dev *, void *);
struct btmtk_coredump_info {
@@ -135,10 +158,25 @@ struct btmtk_coredump_info {
int state;
};
-struct btmediatek_data {
+struct btmtk_data {
+ const char *drv_name;
+ unsigned long flags;
u32 dev_id;
btmtk_reset_sync_func_t reset_sync;
struct btmtk_coredump_info cd_info;
+
+ struct usb_device *udev;
+ struct usb_interface *intf;
+ struct usb_anchor *ctrl_anchor;
+ struct sk_buff *evt_skb;
+ struct usb_endpoint_descriptor *isopkt_tx_ep;
+ struct usb_endpoint_descriptor *isopkt_rx_ep;
+ struct usb_interface *isopkt_intf;
+ struct usb_anchor isopkt_anchor;
+ struct sk_buff *isopkt_skb;
+
+ /* spinlock for ISO data transmission */
+ spinlock_t isorxlock;
};
typedef int (*wmt_cmd_sync_func_t)(struct hci_dev *,
@@ -160,6 +198,24 @@ int btmtk_register_coredump(struct hci_dev *hdev, const char *name,
u32 fw_version);
int btmtk_process_coredump(struct hci_dev *hdev, struct sk_buff *skb);
+
+void btmtk_fw_get_filename(char *buf, size_t size, u32 dev_id, u32 fw_ver,
+ u32 fw_flavor);
+
+int btmtk_usb_subsys_reset(struct hci_dev *hdev, u32 dev_id);
+
+int btmtk_usb_recv_acl(struct hci_dev *hdev, struct sk_buff *skb);
+
+struct urb *alloc_mtk_intr_urb(struct hci_dev *hdev, struct sk_buff *skb,
+ usb_complete_t tx_complete);
+
+int btmtk_usb_resume(struct hci_dev *hdev);
+
+int btmtk_usb_suspend(struct hci_dev *hdev);
+
+int btmtk_usb_setup(struct hci_dev *hdev);
+
+int btmtk_usb_shutdown(struct hci_dev *hdev);
#else
static inline int btmtk_set_bdaddr(struct hci_dev *hdev,
@@ -168,29 +224,73 @@ static inline int btmtk_set_bdaddr(struct hci_dev *hdev,
return -EOPNOTSUPP;
}
-static int btmtk_setup_firmware_79xx(struct hci_dev *hdev, const char *fwname,
- wmt_cmd_sync_func_t wmt_cmd_sync)
+static inline int btmtk_setup_firmware_79xx(struct hci_dev *hdev,
+ const char *fwname,
+ wmt_cmd_sync_func_t wmt_cmd_sync)
{
return -EOPNOTSUPP;
}
-static int btmtk_setup_firmware(struct hci_dev *hdev, const char *fwname,
- wmt_cmd_sync_func_t wmt_cmd_sync)
+static inline int btmtk_setup_firmware(struct hci_dev *hdev, const char *fwname,
+ wmt_cmd_sync_func_t wmt_cmd_sync)
{
return -EOPNOTSUPP;
}
-static void btmtk_reset_sync(struct hci_dev *hdev)
+static inline void btmtk_reset_sync(struct hci_dev *hdev)
+{
+}
+
+static inline int btmtk_register_coredump(struct hci_dev *hdev,
+ const char *name, u32 fw_version)
{
+ return -EOPNOTSUPP;
+}
+
+static inline int btmtk_process_coredump(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void btmtk_fw_get_filename(char *buf, size_t size, u32 dev_id,
+ u32 fw_ver, u32 fw_flavor)
+{
+}
+
+static inline int btmtk_usb_subsys_reset(struct hci_dev *hdev, u32 dev_id)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int btmtk_usb_recv_acl(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline struct urb *alloc_mtk_intr_urb(struct hci_dev *hdev,
+ struct sk_buff *skb,
+ usb_complete_t tx_complete)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline int btmtk_usb_resume(struct hci_dev *hdev)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int btmtk_usb_suspend(struct hci_dev *hdev)
+{
+ return -EOPNOTSUPP;
}
-static int btmtk_register_coredump(struct hci_dev *hdev, const char *name,
- u32 fw_version)
+static inline int btmtk_usb_setup(struct hci_dev *hdev)
{
return -EOPNOTSUPP;
}
-static int btmtk_process_coredump(struct hci_dev *hdev, struct sk_buff *skb)
+static inline int btmtk_usb_shutdown(struct hci_dev *hdev)
{
return -EOPNOTSUPP;
}
diff --git a/drivers/bluetooth/btmtksdio.c b/drivers/bluetooth/btmtksdio.c
index 8ded9ef8089a..39d6898497a4 100644
--- a/drivers/bluetooth/btmtksdio.c
+++ b/drivers/bluetooth/btmtksdio.c
@@ -20,6 +20,7 @@
#include <linux/of.h>
#include <linux/pm_runtime.h>
#include <linux/skbuff.h>
+#include <linux/usb.h>
#include <linux/mmc/host.h>
#include <linux/mmc/sdio_ids.h>
@@ -1117,6 +1118,9 @@ static int btmtksdio_setup(struct hci_dev *hdev)
return err;
}
+ btmtk_fw_get_filename(fwname, sizeof(fwname), dev_id,
+ fw_version, 0);
+
snprintf(fwname, sizeof(fwname),
"mediatek/BT_RAM_CODE_MT%04x_1_%x_hdr.bin",
dev_id & 0xffff, (fw_version & 0xff) + 1);
diff --git a/drivers/bluetooth/btmtkuart.c b/drivers/bluetooth/btmtkuart.c
index e6bc4a73c9fc..aa87c3e78871 100644
--- a/drivers/bluetooth/btmtkuart.c
+++ b/drivers/bluetooth/btmtkuart.c
@@ -22,6 +22,7 @@
#include <linux/regulator/consumer.h>
#include <linux/serdev.h>
#include <linux/skbuff.h>
+#include <linux/usb.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
diff --git a/drivers/bluetooth/btnxpuart.c b/drivers/bluetooth/btnxpuart.c
index 9d0c7e278114..31d3dd90b672 100644
--- a/drivers/bluetooth/btnxpuart.c
+++ b/drivers/bluetooth/btnxpuart.c
@@ -29,27 +29,38 @@
#define BTNXPUART_CHECK_BOOT_SIGNATURE 3
#define BTNXPUART_SERDEV_OPEN 4
#define BTNXPUART_IR_IN_PROGRESS 5
+#define BTNXPUART_FW_DOWNLOAD_ABORT 6
/* NXP HW err codes */
#define BTNXPUART_IR_HW_ERR 0xb0
-#define FIRMWARE_W8987 "nxp/uartuart8987_bt.bin"
-#define FIRMWARE_W8997 "nxp/uartuart8997_bt_v4.bin"
-#define FIRMWARE_W9098 "nxp/uartuart9098_bt_v1.bin"
-#define FIRMWARE_IW416 "nxp/uartiw416_bt_v0.bin"
-#define FIRMWARE_IW612 "nxp/uartspi_n61x_v1.bin.se"
-#define FIRMWARE_IW624 "nxp/uartiw624_bt.bin"
-#define FIRMWARE_SECURE_IW624 "nxp/uartiw624_bt.bin.se"
-#define FIRMWARE_AW693 "nxp/uartaw693_bt.bin"
-#define FIRMWARE_SECURE_AW693 "nxp/uartaw693_bt.bin.se"
-#define FIRMWARE_HELPER "nxp/helper_uart_3000000.bin"
+#define FIRMWARE_W8987 "uart8987_bt_v0.bin"
+#define FIRMWARE_W8987_OLD "uartuart8987_bt.bin"
+#define FIRMWARE_W8997 "uart8997_bt_v4.bin"
+#define FIRMWARE_W8997_OLD "uartuart8997_bt_v4.bin"
+#define FIRMWARE_W9098 "uart9098_bt_v1.bin"
+#define FIRMWARE_W9098_OLD "uartuart9098_bt_v1.bin"
+#define FIRMWARE_IW416 "uartiw416_bt_v0.bin"
+#define FIRMWARE_IW612 "uartspi_n61x_v1.bin.se"
+#define FIRMWARE_IW615 "uartspi_iw610_v0.bin"
+#define FIRMWARE_SECURE_IW615 "uartspi_iw610_v0.bin.se"
+#define FIRMWARE_IW624 "uartiw624_bt.bin"
+#define FIRMWARE_SECURE_IW624 "uartiw624_bt.bin.se"
+#define FIRMWARE_AW693 "uartaw693_bt.bin"
+#define FIRMWARE_SECURE_AW693 "uartaw693_bt.bin.se"
+#define FIRMWARE_AW693_A1 "uartaw693_bt_v1.bin"
+#define FIRMWARE_SECURE_AW693_A1 "uartaw693_bt_v1.bin.se"
+#define FIRMWARE_HELPER "helper_uart_3000000.bin"
#define CHIP_ID_W9098 0x5c03
#define CHIP_ID_IW416 0x7201
#define CHIP_ID_IW612 0x7601
#define CHIP_ID_IW624a 0x8000
#define CHIP_ID_IW624c 0x8001
-#define CHIP_ID_AW693 0x8200
+#define CHIP_ID_AW693a0 0x8200
+#define CHIP_ID_AW693a1 0x8201
+#define CHIP_ID_IW615a0 0x8800
+#define CHIP_ID_IW615a1 0x8801
#define FW_SECURE_MASK 0xc0
#define FW_OPEN 0x00
@@ -144,6 +155,7 @@ struct psmode_cmd_payload {
struct btnxpuart_data {
const char *helper_fw_name;
const char *fw_name;
+ const char *fw_name_old;
};
struct btnxpuart_dev {
@@ -159,6 +171,7 @@ struct btnxpuart_dev {
u8 fw_name[MAX_FW_FILE_NAME_LEN];
u32 fw_dnld_v1_offset;
u32 fw_v1_sent_bytes;
+ u32 fw_dnld_v3_offset;
u32 fw_v3_offset_correction;
u32 fw_v1_expected_len;
u32 boot_reg_offset;
@@ -187,6 +200,11 @@ struct btnxpuart_dev {
#define NXP_NAK_V3 0x7b
#define NXP_CRC_ERROR_V3 0x7c
+/* Bootloader signature error codes */
+#define NXP_ACK_RX_TIMEOUT 0x0002 /* ACK not received from host */
+#define NXP_HDR_RX_TIMEOUT 0x0003 /* FW Header chunk not received */
+#define NXP_DATA_RX_TIMEOUT 0x0004 /* FW Data chunk not received */
+
#define HDR_LEN 16
#define NXP_RECV_CHIP_VER_V1 \
@@ -277,11 +295,22 @@ struct nxp_bootloader_cmd {
__be32 crc;
} __packed;
+struct nxp_v3_rx_timeout_nak {
+ u8 nak;
+ __le32 offset;
+ u8 crc;
+} __packed;
+
+union nxp_v3_rx_timeout_nak_u {
+ struct nxp_v3_rx_timeout_nak pkt;
+ u8 buf[6];
+};
+
static u8 crc8_table[CRC8_TABLE_SIZE];
/* Default configurations */
#define DEFAULT_H2C_WAKEUP_MODE WAKEUP_METHOD_BREAK
-#define DEFAULT_PS_MODE PS_MODE_DISABLE
+#define DEFAULT_PS_MODE PS_MODE_ENABLE
#define FW_INIT_BAUDRATE HCI_NXP_PRI_BAUDRATE
static struct sk_buff *nxp_drv_send_cmd(struct hci_dev *hdev, u16 opcode,
@@ -328,7 +357,7 @@ static void ps_cancel_timer(struct btnxpuart_dev *nxpdev)
struct ps_data *psdata = &nxpdev->psdata;
flush_work(&psdata->work);
- del_timer_sync(&psdata->ps_timer);
+ timer_shutdown_sync(&psdata->ps_timer);
}
static void ps_control(struct hci_dev *hdev, u8 ps_state)
@@ -550,6 +579,7 @@ static int nxp_download_firmware(struct hci_dev *hdev)
nxpdev->fw_v1_sent_bytes = 0;
nxpdev->fw_v1_expected_len = HDR_LEN;
nxpdev->boot_reg_offset = 0;
+ nxpdev->fw_dnld_v3_offset = 0;
nxpdev->fw_v3_offset_correction = 0;
nxpdev->baudrate_changed = false;
nxpdev->timeout_changed = false;
@@ -564,14 +594,23 @@ static int nxp_download_firmware(struct hci_dev *hdev)
!test_bit(BTNXPUART_FW_DOWNLOADING,
&nxpdev->tx_state),
msecs_to_jiffies(60000));
+
+ release_firmware(nxpdev->fw);
+ memset(nxpdev->fw_name, 0, sizeof(nxpdev->fw_name));
+
if (err == 0) {
- bt_dev_err(hdev, "FW Download Timeout.");
+ bt_dev_err(hdev, "FW Download Timeout. offset: %d",
+ nxpdev->fw_dnld_v1_offset ?
+ nxpdev->fw_dnld_v1_offset :
+ nxpdev->fw_dnld_v3_offset);
return -ETIMEDOUT;
}
+ if (test_bit(BTNXPUART_FW_DOWNLOAD_ABORT, &nxpdev->tx_state)) {
+ bt_dev_err(hdev, "FW Download Aborted");
+ return -EINTR;
+ }
serdev_device_set_flow_control(nxpdev->serdev, true);
- release_firmware(nxpdev->fw);
- memset(nxpdev->fw_name, 0, sizeof(nxpdev->fw_name));
/* Allow the downloaded FW to initialize */
msleep(1200);
@@ -682,19 +721,30 @@ static bool process_boot_signature(struct btnxpuart_dev *nxpdev)
return is_fw_downloading(nxpdev);
}
-static int nxp_request_firmware(struct hci_dev *hdev, const char *fw_name)
+static int nxp_request_firmware(struct hci_dev *hdev, const char *fw_name,
+ const char *fw_name_old)
{
struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
+ const char *fw_name_dt;
int err = 0;
if (!fw_name)
return -ENOENT;
if (!strlen(nxpdev->fw_name)) {
- snprintf(nxpdev->fw_name, MAX_FW_FILE_NAME_LEN, "%s", fw_name);
+ if (strcmp(fw_name, FIRMWARE_HELPER) &&
+ !device_property_read_string(&nxpdev->serdev->dev,
+ "firmware-name",
+ &fw_name_dt))
+ fw_name = fw_name_dt;
+ snprintf(nxpdev->fw_name, MAX_FW_FILE_NAME_LEN, "nxp/%s", fw_name);
+ err = request_firmware_direct(&nxpdev->fw, nxpdev->fw_name, &hdev->dev);
+ if (err < 0 && fw_name_old) {
+ snprintf(nxpdev->fw_name, MAX_FW_FILE_NAME_LEN, "nxp/%s", fw_name_old);
+ err = request_firmware_direct(&nxpdev->fw, nxpdev->fw_name, &hdev->dev);
+ }
- bt_dev_dbg(hdev, "Request Firmware: %s", nxpdev->fw_name);
- err = request_firmware(&nxpdev->fw, nxpdev->fw_name, &hdev->dev);
+ bt_dev_info(hdev, "Request Firmware: %s", nxpdev->fw_name);
if (err < 0) {
bt_dev_err(hdev, "Firmware file %s not found", nxpdev->fw_name);
clear_bit(BTNXPUART_FW_DOWNLOADING, &nxpdev->tx_state);
@@ -773,15 +823,15 @@ static int nxp_recv_fw_req_v1(struct hci_dev *hdev, struct sk_buff *skb)
}
if (!nxp_data->helper_fw_name || nxpdev->helper_downloaded) {
- if (nxp_request_firmware(hdev, nxp_data->fw_name))
+ if (nxp_request_firmware(hdev, nxp_data->fw_name, nxp_data->fw_name_old))
goto free_skb;
} else if (nxp_data->helper_fw_name && !nxpdev->helper_downloaded) {
- if (nxp_request_firmware(hdev, nxp_data->helper_fw_name))
+ if (nxp_request_firmware(hdev, nxp_data->helper_fw_name, NULL))
goto free_skb;
}
if (!len) {
- bt_dev_dbg(hdev, "FW Downloaded Successfully: %zu bytes",
+ bt_dev_info(hdev, "FW Download Complete: %zu bytes",
nxpdev->fw->size);
if (nxp_data->helper_fw_name && !nxpdev->helper_downloaded) {
nxpdev->helper_downloaded = true;
@@ -863,7 +913,7 @@ static char *nxp_get_fw_name_from_chipid(struct hci_dev *hdev, u16 chipid,
else
bt_dev_err(hdev, "Illegal loader version %02x", loader_ver);
break;
- case CHIP_ID_AW693:
+ case CHIP_ID_AW693a0:
if ((loader_ver & FW_SECURE_MASK) == FW_OPEN)
fw_name = FIRMWARE_AW693;
else if ((loader_ver & FW_SECURE_MASK) != FW_AUTH_ILLEGAL)
@@ -871,6 +921,23 @@ static char *nxp_get_fw_name_from_chipid(struct hci_dev *hdev, u16 chipid,
else
bt_dev_err(hdev, "Illegal loader version %02x", loader_ver);
break;
+ case CHIP_ID_AW693a1:
+ if ((loader_ver & FW_SECURE_MASK) == FW_OPEN)
+ fw_name = FIRMWARE_AW693_A1;
+ else if ((loader_ver & FW_SECURE_MASK) != FW_AUTH_ILLEGAL)
+ fw_name = FIRMWARE_SECURE_AW693_A1;
+ else
+ bt_dev_err(hdev, "Illegal loader version %02x", loader_ver);
+ break;
+ case CHIP_ID_IW615a0:
+ case CHIP_ID_IW615a1:
+ if ((loader_ver & FW_SECURE_MASK) == FW_OPEN)
+ fw_name = FIRMWARE_IW615;
+ else if ((loader_ver & FW_SECURE_MASK) != FW_AUTH_ILLEGAL)
+ fw_name = FIRMWARE_SECURE_IW615;
+ else
+ bt_dev_err(hdev, "Illegal loader version %02x", loader_ver);
+ break;
default:
bt_dev_err(hdev, "Unknown chip signature %04x", chipid);
break;
@@ -878,10 +945,25 @@ static char *nxp_get_fw_name_from_chipid(struct hci_dev *hdev, u16 chipid,
return fw_name;
}
+static char *nxp_get_old_fw_name_from_chipid(struct hci_dev *hdev, u16 chipid,
+ u8 loader_ver)
+{
+ char *fw_name_old = NULL;
+
+ switch (chipid) {
+ case CHIP_ID_W9098:
+ fw_name_old = FIRMWARE_W9098_OLD;
+ break;
+ }
+ return fw_name_old;
+}
+
static int nxp_recv_chip_ver_v3(struct hci_dev *hdev, struct sk_buff *skb)
{
struct v3_start_ind *req = skb_pull_data(skb, sizeof(*req));
struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
+ const char *fw_name;
+ const char *fw_name_old;
u16 chip_id;
u8 loader_ver;
@@ -890,8 +972,10 @@ static int nxp_recv_chip_ver_v3(struct hci_dev *hdev, struct sk_buff *skb)
chip_id = le16_to_cpu(req->chip_id);
loader_ver = req->loader_ver;
- if (!nxp_request_firmware(hdev, nxp_get_fw_name_from_chipid(hdev,
- chip_id, loader_ver)))
+ bt_dev_info(hdev, "ChipID: %04x, Version: %d", chip_id, loader_ver);
+ fw_name = nxp_get_fw_name_from_chipid(hdev, chip_id, loader_ver);
+ fw_name_old = nxp_get_old_fw_name_from_chipid(hdev, chip_id, loader_ver);
+ if (!nxp_request_firmware(hdev, fw_name, fw_name_old))
nxp_send_ack(NXP_ACK_V3, hdev);
free_skb:
@@ -899,6 +983,32 @@ free_skb:
return 0;
}
+static void nxp_handle_fw_download_error(struct hci_dev *hdev, struct v3_data_req *req)
+{
+ struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
+ __u32 offset = __le32_to_cpu(req->offset);
+ __u16 err = __le16_to_cpu(req->error);
+ union nxp_v3_rx_timeout_nak_u nak_tx_buf;
+
+ switch (err) {
+ case NXP_ACK_RX_TIMEOUT:
+ case NXP_HDR_RX_TIMEOUT:
+ case NXP_DATA_RX_TIMEOUT:
+ nak_tx_buf.pkt.nak = NXP_NAK_V3;
+ nak_tx_buf.pkt.offset = __cpu_to_le32(offset);
+ nak_tx_buf.pkt.crc = crc8(crc8_table, nak_tx_buf.buf,
+ sizeof(nak_tx_buf) - 1, 0xff);
+ serdev_device_write_buf(nxpdev->serdev, nak_tx_buf.buf,
+ sizeof(nak_tx_buf));
+ break;
+ default:
+ bt_dev_dbg(hdev, "Unknown bootloader error code: %d", err);
+ break;
+
+ }
+
+}
+
static int nxp_recv_fw_req_v3(struct hci_dev *hdev, struct sk_buff *skb)
{
struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
@@ -913,7 +1023,12 @@ static int nxp_recv_fw_req_v3(struct hci_dev *hdev, struct sk_buff *skb)
if (!req || !nxpdev->fw)
goto free_skb;
- nxp_send_ack(NXP_ACK_V3, hdev);
+ if (!req->error) {
+ nxp_send_ack(NXP_ACK_V3, hdev);
+ } else {
+ nxp_handle_fw_download_error(hdev, req);
+ goto free_skb;
+ }
len = __le16_to_cpu(req->len);
@@ -934,15 +1049,12 @@ static int nxp_recv_fw_req_v3(struct hci_dev *hdev, struct sk_buff *skb)
}
if (req->len == 0) {
- bt_dev_dbg(hdev, "FW Downloaded Successfully: %zu bytes",
+ bt_dev_info(hdev, "FW Download Complete: %zu bytes",
nxpdev->fw->size);
clear_bit(BTNXPUART_FW_DOWNLOADING, &nxpdev->tx_state);
wake_up_interruptible(&nxpdev->fw_dnld_done_wait_q);
goto free_skb;
}
- if (req->error)
- bt_dev_dbg(hdev, "FW Download received err 0x%02x from chip",
- req->error);
offset = __le32_to_cpu(req->offset);
if (offset < nxpdev->fw_v3_offset_correction) {
@@ -954,8 +1066,9 @@ static int nxp_recv_fw_req_v3(struct hci_dev *hdev, struct sk_buff *skb)
goto free_skb;
}
- serdev_device_write_buf(nxpdev->serdev, nxpdev->fw->data + offset -
- nxpdev->fw_v3_offset_correction, len);
+ nxpdev->fw_dnld_v3_offset = offset - nxpdev->fw_v3_offset_correction;
+ serdev_device_write_buf(nxpdev->serdev, nxpdev->fw->data +
+ nxpdev->fw_dnld_v3_offset, len);
free_skb:
kfree_skb(skb);
@@ -1037,7 +1150,7 @@ static int nxp_setup(struct hci_dev *hdev)
if (err < 0)
return err;
} else {
- bt_dev_dbg(hdev, "FW already running.");
+ bt_dev_info(hdev, "FW already running.");
clear_bit(BTNXPUART_FW_DOWNLOADING, &nxpdev->tx_state);
}
@@ -1253,8 +1366,10 @@ static int btnxpuart_close(struct hci_dev *hdev)
ps_wakeup(nxpdev);
serdev_device_close(nxpdev->serdev);
skb_queue_purge(&nxpdev->txq);
- kfree_skb(nxpdev->rx_skb);
- nxpdev->rx_skb = NULL;
+ if (!IS_ERR_OR_NULL(nxpdev->rx_skb)) {
+ kfree_skb(nxpdev->rx_skb);
+ nxpdev->rx_skb = NULL;
+ }
clear_bit(BTNXPUART_SERDEV_OPEN, &nxpdev->tx_state);
return 0;
}
@@ -1269,8 +1384,10 @@ static int btnxpuart_flush(struct hci_dev *hdev)
cancel_work_sync(&nxpdev->tx_work);
- kfree_skb(nxpdev->rx_skb);
- nxpdev->rx_skb = NULL;
+ if (!IS_ERR_OR_NULL(nxpdev->rx_skb)) {
+ kfree_skb(nxpdev->rx_skb);
+ nxpdev->rx_skb = NULL;
+ }
return 0;
}
@@ -1385,28 +1502,56 @@ static void nxp_serdev_remove(struct serdev_device *serdev)
struct btnxpuart_dev *nxpdev = serdev_device_get_drvdata(serdev);
struct hci_dev *hdev = nxpdev->hdev;
- /* Restore FW baudrate to fw_init_baudrate if changed.
- * This will ensure FW baudrate is in sync with
- * driver baudrate in case this driver is re-inserted.
- */
- if (nxpdev->current_baudrate != nxpdev->fw_init_baudrate) {
- nxpdev->new_baudrate = nxpdev->fw_init_baudrate;
- nxp_set_baudrate_cmd(hdev, NULL);
+ if (is_fw_downloading(nxpdev)) {
+ set_bit(BTNXPUART_FW_DOWNLOAD_ABORT, &nxpdev->tx_state);
+ clear_bit(BTNXPUART_FW_DOWNLOADING, &nxpdev->tx_state);
+ wake_up_interruptible(&nxpdev->check_boot_sign_wait_q);
+ wake_up_interruptible(&nxpdev->fw_dnld_done_wait_q);
+ } else {
+ /* Restore FW baudrate to fw_init_baudrate if changed.
+ * This will ensure FW baudrate is in sync with
+ * driver baudrate in case this driver is re-inserted.
+ */
+ if (nxpdev->current_baudrate != nxpdev->fw_init_baudrate) {
+ nxpdev->new_baudrate = nxpdev->fw_init_baudrate;
+ nxp_set_baudrate_cmd(hdev, NULL);
+ }
+ ps_cancel_timer(nxpdev);
}
-
- ps_cancel_timer(nxpdev);
hci_unregister_dev(hdev);
hci_free_dev(hdev);
}
+#ifdef CONFIG_PM_SLEEP
+static int nxp_serdev_suspend(struct device *dev)
+{
+ struct btnxpuart_dev *nxpdev = dev_get_drvdata(dev);
+ struct ps_data *psdata = &nxpdev->psdata;
+
+ ps_control(psdata->hdev, PS_STATE_SLEEP);
+ return 0;
+}
+
+static int nxp_serdev_resume(struct device *dev)
+{
+ struct btnxpuart_dev *nxpdev = dev_get_drvdata(dev);
+ struct ps_data *psdata = &nxpdev->psdata;
+
+ ps_control(psdata->hdev, PS_STATE_AWAKE);
+ return 0;
+}
+#endif
+
static struct btnxpuart_data w8987_data __maybe_unused = {
.helper_fw_name = NULL,
.fw_name = FIRMWARE_W8987,
+ .fw_name_old = FIRMWARE_W8987_OLD,
};
static struct btnxpuart_data w8997_data __maybe_unused = {
.helper_fw_name = FIRMWARE_HELPER,
.fw_name = FIRMWARE_W8997,
+ .fw_name_old = FIRMWARE_W8997_OLD,
};
static const struct of_device_id nxpuart_of_match_table[] __maybe_unused = {
@@ -1416,12 +1561,17 @@ static const struct of_device_id nxpuart_of_match_table[] __maybe_unused = {
};
MODULE_DEVICE_TABLE(of, nxpuart_of_match_table);
+static const struct dev_pm_ops nxp_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(nxp_serdev_suspend, nxp_serdev_resume)
+};
+
static struct serdev_device_driver nxp_serdev_driver = {
.probe = nxp_serdev_probe,
.remove = nxp_serdev_remove,
.driver = {
.name = "btnxpuart",
.of_match_table = of_match_ptr(nxpuart_of_match_table),
+ .pm = &nxp_pm_ops,
},
};
diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c
index 4f1e37b4f780..f2f37143c454 100644
--- a/drivers/bluetooth/btrtl.c
+++ b/drivers/bluetooth/btrtl.c
@@ -811,7 +811,7 @@ static int rtl_download_firmware(struct hci_dev *hdev,
struct sk_buff *skb;
struct hci_rp_read_local_version *rp;
- dl_cmd = kmalloc(sizeof(struct rtl_download_cmd), GFP_KERNEL);
+ dl_cmd = kmalloc(sizeof(*dl_cmd), GFP_KERNEL);
if (!dl_cmd)
return -ENOMEM;
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index e384ef6ff050..acdba5d77694 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -479,6 +479,7 @@ static const struct usb_device_id quirks_table[] = {
{ USB_DEVICE(0x8087, 0x0036), .driver_info = BTUSB_INTEL_COMBINED },
{ USB_DEVICE(0x8087, 0x0037), .driver_info = BTUSB_INTEL_COMBINED },
{ USB_DEVICE(0x8087, 0x0038), .driver_info = BTUSB_INTEL_COMBINED },
+ { USB_DEVICE(0x8087, 0x0039), .driver_info = BTUSB_INTEL_COMBINED },
{ USB_DEVICE(0x8087, 0x07da), .driver_info = BTUSB_CSR },
{ USB_DEVICE(0x8087, 0x07dc), .driver_info = BTUSB_INTEL_COMBINED |
BTUSB_INTEL_NO_WBS_SUPPORT |
@@ -555,6 +556,10 @@ static const struct usb_device_id quirks_table[] = {
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3572), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3591), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe125), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
/* Realtek 8852BT/8852BE-VT Bluetooth devices */
{ USB_DEVICE(0x0bda, 0x8520), .driver_info = BTUSB_REALTEK |
@@ -891,6 +896,9 @@ struct btusb_data {
int (*setup_on_usb)(struct hci_dev *hdev);
+ int (*suspend)(struct hci_dev *hdev);
+ int (*resume)(struct hci_dev *hdev);
+
int oob_wake_irq; /* irq for out-of-band wake-on-bt */
unsigned cmd_timeout_cnt;
@@ -2638,410 +2646,48 @@ static int btusb_recv_event_realtek(struct hci_dev *hdev, struct sk_buff *skb)
return hci_recv_frame(hdev, skb);
}
-/* UHW CR mapping */
-#define MTK_BT_MISC 0x70002510
-#define MTK_BT_SUBSYS_RST 0x70002610
-#define MTK_UDMA_INT_STA_BT 0x74000024
-#define MTK_UDMA_INT_STA_BT1 0x74000308
-#define MTK_BT_WDT_STATUS 0x740003A0
-#define MTK_EP_RST_OPT 0x74011890
-#define MTK_EP_RST_IN_OUT_OPT 0x00010001
-#define MTK_BT_RST_DONE 0x00000100
-#define MTK_BT_RESET_REG_CONNV3 0x70028610
-#define MTK_BT_READ_DEV_ID 0x70010200
-
-
-static void btusb_mtk_wmt_recv(struct urb *urb)
-{
- struct hci_dev *hdev = urb->context;
- struct btusb_data *data = hci_get_drvdata(hdev);
- struct sk_buff *skb;
- int err;
-
- if (urb->status == 0 && urb->actual_length > 0) {
- hdev->stat.byte_rx += urb->actual_length;
-
- /* WMT event shouldn't be fragmented and the size should be
- * less than HCI_WMT_MAX_EVENT_SIZE.
- */
- skb = bt_skb_alloc(HCI_WMT_MAX_EVENT_SIZE, GFP_ATOMIC);
- if (!skb) {
- hdev->stat.err_rx++;
- kfree(urb->setup_packet);
- return;
- }
-
- hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
- skb_put_data(skb, urb->transfer_buffer, urb->actual_length);
-
- /* When someone waits for the WMT event, the skb is being cloned
- * and being processed the events from there then.
- */
- if (test_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags)) {
- data->evt_skb = skb_clone(skb, GFP_ATOMIC);
- if (!data->evt_skb) {
- kfree_skb(skb);
- kfree(urb->setup_packet);
- return;
- }
- }
-
- err = hci_recv_frame(hdev, skb);
- if (err < 0) {
- kfree_skb(data->evt_skb);
- data->evt_skb = NULL;
- kfree(urb->setup_packet);
- return;
- }
-
- if (test_and_clear_bit(BTUSB_TX_WAIT_VND_EVT,
- &data->flags)) {
- /* Barrier to sync with other CPUs */
- smp_mb__after_atomic();
- wake_up_bit(&data->flags,
- BTUSB_TX_WAIT_VND_EVT);
- }
- kfree(urb->setup_packet);
- return;
- } else if (urb->status == -ENOENT) {
- /* Avoid suspend failed when usb_kill_urb */
- return;
- }
-
- usb_mark_last_busy(data->udev);
-
- /* The URB complete handler is still called with urb->actual_length = 0
- * when the event is not available, so we should keep re-submitting
- * URB until WMT event returns, Also, It's necessary to wait some time
- * between the two consecutive control URBs to relax the target device
- * to generate the event. Otherwise, the WMT event cannot return from
- * the device successfully.
- */
- udelay(500);
-
- usb_anchor_urb(urb, &data->ctrl_anchor);
- err = usb_submit_urb(urb, GFP_ATOMIC);
- if (err < 0) {
- kfree(urb->setup_packet);
- /* -EPERM: urb is being killed;
- * -ENODEV: device got disconnected
- */
- if (err != -EPERM && err != -ENODEV)
- bt_dev_err(hdev, "urb %p failed to resubmit (%d)",
- urb, -err);
- usb_unanchor_urb(urb);
- }
-}
-
-static int btusb_mtk_submit_wmt_recv_urb(struct hci_dev *hdev)
-{
- struct btusb_data *data = hci_get_drvdata(hdev);
- struct usb_ctrlrequest *dr;
- unsigned char *buf;
- int err, size = 64;
- unsigned int pipe;
- struct urb *urb;
-
- urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!urb)
- return -ENOMEM;
-
- dr = kmalloc(sizeof(*dr), GFP_KERNEL);
- if (!dr) {
- usb_free_urb(urb);
- return -ENOMEM;
- }
-
- dr->bRequestType = USB_TYPE_VENDOR | USB_DIR_IN;
- dr->bRequest = 1;
- dr->wIndex = cpu_to_le16(0);
- dr->wValue = cpu_to_le16(48);
- dr->wLength = cpu_to_le16(size);
-
- buf = kmalloc(size, GFP_KERNEL);
- if (!buf) {
- kfree(dr);
- usb_free_urb(urb);
- return -ENOMEM;
- }
-
- pipe = usb_rcvctrlpipe(data->udev, 0);
-
- usb_fill_control_urb(urb, data->udev, pipe, (void *)dr,
- buf, size, btusb_mtk_wmt_recv, hdev);
-
- urb->transfer_flags |= URB_FREE_BUFFER;
-
- usb_anchor_urb(urb, &data->ctrl_anchor);
- err = usb_submit_urb(urb, GFP_KERNEL);
- if (err < 0) {
- if (err != -EPERM && err != -ENODEV)
- bt_dev_err(hdev, "urb %p submission failed (%d)",
- urb, -err);
- usb_unanchor_urb(urb);
- }
-
- usb_free_urb(urb);
-
- return err;
-}
-
-static int btusb_mtk_hci_wmt_sync(struct hci_dev *hdev,
- struct btmtk_hci_wmt_params *wmt_params)
+static void btusb_mtk_claim_iso_intf(struct btusb_data *data)
{
- struct btusb_data *data = hci_get_drvdata(hdev);
- struct btmtk_hci_wmt_evt_funcc *wmt_evt_funcc;
- u32 hlen, status = BTMTK_WMT_INVALID;
- struct btmtk_hci_wmt_evt *wmt_evt;
- struct btmtk_hci_wmt_cmd *wc;
- struct btmtk_wmt_hdr *hdr;
+ struct btmtk_data *btmtk_data = hci_get_priv(data->hdev);
int err;
- /* Send the WMT command and wait until the WMT event returns */
- hlen = sizeof(*hdr) + wmt_params->dlen;
- if (hlen > 255)
- return -EINVAL;
-
- wc = kzalloc(hlen, GFP_KERNEL);
- if (!wc)
- return -ENOMEM;
-
- hdr = &wc->hdr;
- hdr->dir = 1;
- hdr->op = wmt_params->op;
- hdr->dlen = cpu_to_le16(wmt_params->dlen + 1);
- hdr->flag = wmt_params->flag;
- memcpy(wc->data, wmt_params->data, wmt_params->dlen);
-
- set_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags);
-
- /* WMT cmd/event doesn't follow up the generic HCI cmd/event handling,
- * it needs constantly polling control pipe until the host received the
- * WMT event, thus, we should require to specifically acquire PM counter
- * on the USB to prevent the interface from entering auto suspended
- * while WMT cmd/event in progress.
- */
- err = usb_autopm_get_interface(data->intf);
- if (err < 0)
- goto err_free_wc;
-
- err = __hci_cmd_send(hdev, 0xfc6f, hlen, wc);
-
- if (err < 0) {
- clear_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags);
- usb_autopm_put_interface(data->intf);
- goto err_free_wc;
- }
-
- /* Submit control IN URB on demand to process the WMT event */
- err = btusb_mtk_submit_wmt_recv_urb(hdev);
-
- usb_autopm_put_interface(data->intf);
-
- if (err < 0)
- goto err_free_wc;
-
- /* The vendor specific WMT commands are all answered by a vendor
- * specific event and will have the Command Status or Command
- * Complete as with usual HCI command flow control.
- *
- * After sending the command, wait for BTUSB_TX_WAIT_VND_EVT
- * state to be cleared. The driver specific event receive routine
- * will clear that state and with that indicate completion of the
- * WMT command.
- */
- err = wait_on_bit_timeout(&data->flags, BTUSB_TX_WAIT_VND_EVT,
- TASK_INTERRUPTIBLE, HCI_INIT_TIMEOUT);
- if (err == -EINTR) {
- bt_dev_err(hdev, "Execution of wmt command interrupted");
- clear_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags);
- goto err_free_wc;
- }
-
- if (err) {
- bt_dev_err(hdev, "Execution of wmt command timed out");
- clear_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags);
- err = -ETIMEDOUT;
- goto err_free_wc;
- }
-
- if (data->evt_skb == NULL)
- goto err_free_wc;
-
- /* Parse and handle the return WMT event */
- wmt_evt = (struct btmtk_hci_wmt_evt *)data->evt_skb->data;
- if (wmt_evt->whdr.op != hdr->op) {
- bt_dev_err(hdev, "Wrong op received %d expected %d",
- wmt_evt->whdr.op, hdr->op);
- err = -EIO;
- goto err_free_skb;
- }
-
- switch (wmt_evt->whdr.op) {
- case BTMTK_WMT_SEMAPHORE:
- if (wmt_evt->whdr.flag == 2)
- status = BTMTK_WMT_PATCH_UNDONE;
- else
- status = BTMTK_WMT_PATCH_DONE;
- break;
- case BTMTK_WMT_FUNC_CTRL:
- wmt_evt_funcc = (struct btmtk_hci_wmt_evt_funcc *)wmt_evt;
- if (be16_to_cpu(wmt_evt_funcc->status) == 0x404)
- status = BTMTK_WMT_ON_DONE;
- else if (be16_to_cpu(wmt_evt_funcc->status) == 0x420)
- status = BTMTK_WMT_ON_PROGRESS;
- else
- status = BTMTK_WMT_ON_UNDONE;
- break;
- case BTMTK_WMT_PATCH_DWNLD:
- if (wmt_evt->whdr.flag == 2)
- status = BTMTK_WMT_PATCH_DONE;
- else if (wmt_evt->whdr.flag == 1)
- status = BTMTK_WMT_PATCH_PROGRESS;
- else
- status = BTMTK_WMT_PATCH_UNDONE;
- break;
- }
-
- if (wmt_params->status)
- *wmt_params->status = status;
-
-err_free_skb:
- kfree_skb(data->evt_skb);
- data->evt_skb = NULL;
-err_free_wc:
- kfree(wc);
- return err;
-}
-
-static int btusb_mtk_func_query(struct hci_dev *hdev)
-{
- struct btmtk_hci_wmt_params wmt_params;
- int status, err;
- u8 param = 0;
-
- /* Query whether the function is enabled */
- wmt_params.op = BTMTK_WMT_FUNC_CTRL;
- wmt_params.flag = 4;
- wmt_params.dlen = sizeof(param);
- wmt_params.data = &param;
- wmt_params.status = &status;
-
- err = btusb_mtk_hci_wmt_sync(hdev, &wmt_params);
- if (err < 0) {
- bt_dev_err(hdev, "Failed to query function status (%d)", err);
- return err;
- }
-
- return status;
-}
-
-static int btusb_mtk_uhw_reg_write(struct btusb_data *data, u32 reg, u32 val)
-{
- struct hci_dev *hdev = data->hdev;
- int pipe, err;
- void *buf;
-
- buf = kzalloc(4, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- put_unaligned_le32(val, buf);
-
- pipe = usb_sndctrlpipe(data->udev, 0);
- err = usb_control_msg(data->udev, pipe, 0x02,
- 0x5E,
- reg >> 16, reg & 0xffff,
- buf, 4, USB_CTRL_SET_TIMEOUT);
+ err = usb_driver_claim_interface(&btusb_driver,
+ btmtk_data->isopkt_intf, data);
if (err < 0) {
- bt_dev_err(hdev, "Failed to write uhw reg(%d)", err);
- goto err_free_buf;
+ btmtk_data->isopkt_intf = NULL;
+ bt_dev_err(data->hdev, "Failed to claim iso interface");
+ return;
}
-err_free_buf:
- kfree(buf);
-
- return err;
+ set_bit(BTMTK_ISOPKT_OVER_INTR, &btmtk_data->flags);
}
-static int btusb_mtk_uhw_reg_read(struct btusb_data *data, u32 reg, u32 *val)
+static void btusb_mtk_release_iso_intf(struct btusb_data *data)
{
- struct hci_dev *hdev = data->hdev;
- int pipe, err;
- void *buf;
+ struct btmtk_data *btmtk_data = hci_get_priv(data->hdev);
- buf = kzalloc(4, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
+ if (btmtk_data->isopkt_intf) {
+ usb_kill_anchored_urbs(&btmtk_data->isopkt_anchor);
+ clear_bit(BTMTK_ISOPKT_RUNNING, &btmtk_data->flags);
- pipe = usb_rcvctrlpipe(data->udev, 0);
- err = usb_control_msg(data->udev, pipe, 0x01,
- 0xDE,
- reg >> 16, reg & 0xffff,
- buf, 4, USB_CTRL_GET_TIMEOUT);
- if (err < 0) {
- bt_dev_err(hdev, "Failed to read uhw reg(%d)", err);
- goto err_free_buf;
+ dev_kfree_skb_irq(btmtk_data->isopkt_skb);
+ btmtk_data->isopkt_skb = NULL;
+ usb_set_intfdata(btmtk_data->isopkt_intf, NULL);
+ usb_driver_release_interface(&btusb_driver,
+ btmtk_data->isopkt_intf);
}
- *val = get_unaligned_le32(buf);
- bt_dev_dbg(hdev, "reg=%x, value=0x%08x", reg, *val);
-
-err_free_buf:
- kfree(buf);
-
- return err;
-}
-
-static int btusb_mtk_reg_read(struct btusb_data *data, u32 reg, u32 *val)
-{
- int pipe, err, size = sizeof(u32);
- void *buf;
-
- buf = kzalloc(size, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- pipe = usb_rcvctrlpipe(data->udev, 0);
- err = usb_control_msg(data->udev, pipe, 0x63,
- USB_TYPE_VENDOR | USB_DIR_IN,
- reg >> 16, reg & 0xffff,
- buf, size, USB_CTRL_GET_TIMEOUT);
- if (err < 0)
- goto err_free_buf;
-
- *val = get_unaligned_le32(buf);
-
-err_free_buf:
- kfree(buf);
-
- return err;
-}
-
-static int btusb_mtk_id_get(struct btusb_data *data, u32 reg, u32 *id)
-{
- return btusb_mtk_reg_read(data, reg, id);
-}
-
-static u32 btusb_mtk_reset_done(struct hci_dev *hdev)
-{
- struct btusb_data *data = hci_get_drvdata(hdev);
- u32 val = 0;
-
- btusb_mtk_uhw_reg_read(data, MTK_BT_MISC, &val);
-
- return val & MTK_BT_RST_DONE;
+ clear_bit(BTMTK_ISOPKT_OVER_INTR, &btmtk_data->flags);
}
static int btusb_mtk_reset(struct hci_dev *hdev, void *rst_data)
{
struct btusb_data *data = hci_get_drvdata(hdev);
- struct btmediatek_data *mediatek;
- u32 val;
+ struct btmtk_data *btmtk_data = hci_get_priv(hdev);
int err;
/* It's MediaTek specific bluetooth reset mechanism via USB */
- if (test_and_set_bit(BTUSB_HW_RESET_ACTIVE, &data->flags)) {
+ if (test_and_set_bit(BTMTK_HW_RESET_ACTIVE, &btmtk_data->flags)) {
bt_dev_err(hdev, "last reset failed? Not resetting again");
return -EBUSY;
}
@@ -3050,302 +2696,68 @@ static int btusb_mtk_reset(struct hci_dev *hdev, void *rst_data)
if (err < 0)
return err;
+ if (test_bit(BTMTK_ISOPKT_RUNNING, &btmtk_data->flags))
+ btusb_mtk_release_iso_intf(data);
+
btusb_stop_traffic(data);
usb_kill_anchored_urbs(&data->tx_anchor);
- mediatek = hci_get_priv(hdev);
-
- if (mediatek->dev_id == 0x7925) {
- btusb_mtk_uhw_reg_read(data, MTK_BT_RESET_REG_CONNV3, &val);
- val |= (1 << 5);
- btusb_mtk_uhw_reg_write(data, MTK_BT_RESET_REG_CONNV3, val);
- btusb_mtk_uhw_reg_read(data, MTK_BT_RESET_REG_CONNV3, &val);
- val &= 0xFFFF00FF;
- val |= (1 << 13);
- btusb_mtk_uhw_reg_write(data, MTK_BT_RESET_REG_CONNV3, val);
- btusb_mtk_uhw_reg_write(data, MTK_EP_RST_OPT, 0x00010001);
- btusb_mtk_uhw_reg_read(data, MTK_BT_RESET_REG_CONNV3, &val);
- val |= (1 << 0);
- btusb_mtk_uhw_reg_write(data, MTK_BT_RESET_REG_CONNV3, val);
- btusb_mtk_uhw_reg_write(data, MTK_UDMA_INT_STA_BT, 0x000000FF);
- btusb_mtk_uhw_reg_read(data, MTK_UDMA_INT_STA_BT, &val);
- btusb_mtk_uhw_reg_write(data, MTK_UDMA_INT_STA_BT1, 0x000000FF);
- btusb_mtk_uhw_reg_read(data, MTK_UDMA_INT_STA_BT1, &val);
- msleep(100);
- } else {
- /* It's Device EndPoint Reset Option Register */
- bt_dev_dbg(hdev, "Initiating reset mechanism via uhw");
- btusb_mtk_uhw_reg_write(data, MTK_EP_RST_OPT, MTK_EP_RST_IN_OUT_OPT);
- btusb_mtk_uhw_reg_read(data, MTK_BT_WDT_STATUS, &val);
-
- /* Reset the bluetooth chip via USB interface. */
- btusb_mtk_uhw_reg_write(data, MTK_BT_SUBSYS_RST, 1);
- btusb_mtk_uhw_reg_write(data, MTK_UDMA_INT_STA_BT, 0x000000FF);
- btusb_mtk_uhw_reg_read(data, MTK_UDMA_INT_STA_BT, &val);
- btusb_mtk_uhw_reg_write(data, MTK_UDMA_INT_STA_BT1, 0x000000FF);
- btusb_mtk_uhw_reg_read(data, MTK_UDMA_INT_STA_BT1, &val);
- /* MT7921 need to delay 20ms between toggle reset bit */
- msleep(20);
- btusb_mtk_uhw_reg_write(data, MTK_BT_SUBSYS_RST, 0);
- btusb_mtk_uhw_reg_read(data, MTK_BT_SUBSYS_RST, &val);
- }
- err = readx_poll_timeout(btusb_mtk_reset_done, hdev, val,
- val & MTK_BT_RST_DONE, 20000, 1000000);
- if (err < 0)
- bt_dev_err(hdev, "Reset timeout");
-
- btusb_mtk_id_get(data, 0x70010200, &val);
- if (!val)
- bt_dev_err(hdev, "Can't get device id, subsys reset fail.");
+ err = btmtk_usb_subsys_reset(hdev, btmtk_data->dev_id);
usb_queue_reset_device(data->intf);
-
- clear_bit(BTUSB_HW_RESET_ACTIVE, &data->flags);
+ clear_bit(BTMTK_HW_RESET_ACTIVE, &btmtk_data->flags);
return err;
}
-static int btusb_mtk_setup(struct hci_dev *hdev)
+static int btusb_send_frame_mtk(struct hci_dev *hdev, struct sk_buff *skb)
{
- struct btusb_data *data = hci_get_drvdata(hdev);
- struct btmtk_hci_wmt_params wmt_params;
- ktime_t calltime, delta, rettime;
- struct btmtk_tci_sleep tci_sleep;
- unsigned long long duration;
- struct sk_buff *skb;
- const char *fwname;
- int err, status;
- u32 dev_id = 0;
- char fw_bin_name[64];
- u32 fw_version = 0, fw_flavor = 0;
- u8 param;
- struct btmediatek_data *mediatek;
-
- calltime = ktime_get();
-
- err = btusb_mtk_id_get(data, 0x80000008, &dev_id);
- if (err < 0) {
- bt_dev_err(hdev, "Failed to get device id (%d)", err);
- return err;
- }
-
- if (!dev_id || dev_id != 0x7663) {
- err = btusb_mtk_id_get(data, 0x70010200, &dev_id);
- if (err < 0) {
- bt_dev_err(hdev, "Failed to get device id (%d)", err);
- return err;
- }
- err = btusb_mtk_id_get(data, 0x80021004, &fw_version);
- if (err < 0) {
- bt_dev_err(hdev, "Failed to get fw version (%d)", err);
- return err;
- }
- err = btusb_mtk_id_get(data, 0x70010020, &fw_flavor);
- if (err < 0) {
- bt_dev_err(hdev, "Failed to get fw flavor (%d)", err);
- return err;
- }
- fw_flavor = (fw_flavor & 0x00000080) >> 7;
- }
-
- mediatek = hci_get_priv(hdev);
- mediatek->dev_id = dev_id;
- mediatek->reset_sync = btusb_mtk_reset;
-
- err = btmtk_register_coredump(hdev, btusb_driver.name, fw_version);
- if (err < 0)
- bt_dev_err(hdev, "Failed to register coredump (%d)", err);
-
- switch (dev_id) {
- case 0x7663:
- fwname = FIRMWARE_MT7663;
- break;
- case 0x7668:
- fwname = FIRMWARE_MT7668;
- break;
- case 0x7922:
- case 0x7961:
- case 0x7925:
- if (dev_id == 0x7925)
- snprintf(fw_bin_name, sizeof(fw_bin_name),
- "mediatek/mt%04x/BT_RAM_CODE_MT%04x_1_%x_hdr.bin",
- dev_id & 0xffff, dev_id & 0xffff, (fw_version & 0xff) + 1);
- else if (dev_id == 0x7961 && fw_flavor)
- snprintf(fw_bin_name, sizeof(fw_bin_name),
- "mediatek/BT_RAM_CODE_MT%04x_1a_%x_hdr.bin",
- dev_id & 0xffff, (fw_version & 0xff) + 1);
- else
- snprintf(fw_bin_name, sizeof(fw_bin_name),
- "mediatek/BT_RAM_CODE_MT%04x_1_%x_hdr.bin",
- dev_id & 0xffff, (fw_version & 0xff) + 1);
-
- err = btmtk_setup_firmware_79xx(hdev, fw_bin_name,
- btusb_mtk_hci_wmt_sync);
- if (err < 0) {
- bt_dev_err(hdev, "Failed to set up firmware (%d)", err);
- return err;
- }
-
- /* It's Device EndPoint Reset Option Register */
- btusb_mtk_uhw_reg_write(data, MTK_EP_RST_OPT, MTK_EP_RST_IN_OUT_OPT);
-
- /* Enable Bluetooth protocol */
- param = 1;
- wmt_params.op = BTMTK_WMT_FUNC_CTRL;
- wmt_params.flag = 0;
- wmt_params.dlen = sizeof(param);
- wmt_params.data = &param;
- wmt_params.status = NULL;
-
- err = btusb_mtk_hci_wmt_sync(hdev, &wmt_params);
- if (err < 0) {
- bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err);
- return err;
- }
-
- hci_set_msft_opcode(hdev, 0xFD30);
- hci_set_aosp_capable(hdev);
- goto done;
- default:
- bt_dev_err(hdev, "Unsupported hardware variant (%08x)",
- dev_id);
- return -ENODEV;
- }
-
- /* Query whether the firmware is already download */
- wmt_params.op = BTMTK_WMT_SEMAPHORE;
- wmt_params.flag = 1;
- wmt_params.dlen = 0;
- wmt_params.data = NULL;
- wmt_params.status = &status;
-
- err = btusb_mtk_hci_wmt_sync(hdev, &wmt_params);
- if (err < 0) {
- bt_dev_err(hdev, "Failed to query firmware status (%d)", err);
- return err;
- }
-
- if (status == BTMTK_WMT_PATCH_DONE) {
- bt_dev_info(hdev, "firmware already downloaded");
- goto ignore_setup_fw;
- }
-
- /* Setup a firmware which the device definitely requires */
- err = btmtk_setup_firmware(hdev, fwname,
- btusb_mtk_hci_wmt_sync);
- if (err < 0)
- return err;
-
-ignore_setup_fw:
- err = readx_poll_timeout(btusb_mtk_func_query, hdev, status,
- status < 0 || status != BTMTK_WMT_ON_PROGRESS,
- 2000, 5000000);
- /* -ETIMEDOUT happens */
- if (err < 0)
- return err;
-
- /* The other errors happen in btusb_mtk_func_query */
- if (status < 0)
- return status;
-
- if (status == BTMTK_WMT_ON_DONE) {
- bt_dev_info(hdev, "function already on");
- goto ignore_func_on;
- }
-
- /* Enable Bluetooth protocol */
- param = 1;
- wmt_params.op = BTMTK_WMT_FUNC_CTRL;
- wmt_params.flag = 0;
- wmt_params.dlen = sizeof(param);
- wmt_params.data = &param;
- wmt_params.status = NULL;
+ struct urb *urb;
- err = btusb_mtk_hci_wmt_sync(hdev, &wmt_params);
- if (err < 0) {
- bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err);
- return err;
- }
+ BT_DBG("%s", hdev->name);
-ignore_func_on:
- /* Apply the low power environment setup */
- tci_sleep.mode = 0x5;
- tci_sleep.duration = cpu_to_le16(0x640);
- tci_sleep.host_duration = cpu_to_le16(0x640);
- tci_sleep.host_wakeup_pin = 0;
- tci_sleep.time_compensation = 0;
+ if (hci_skb_pkt_type(skb) == HCI_ISODATA_PKT) {
+ urb = alloc_mtk_intr_urb(hdev, skb, btusb_tx_complete);
+ if (IS_ERR(urb))
+ return PTR_ERR(urb);
- skb = __hci_cmd_sync(hdev, 0xfc7a, sizeof(tci_sleep), &tci_sleep,
- HCI_INIT_TIMEOUT);
- if (IS_ERR(skb)) {
- err = PTR_ERR(skb);
- bt_dev_err(hdev, "Failed to apply low power setting (%d)", err);
- return err;
+ return submit_or_queue_tx_urb(hdev, urb);
+ } else {
+ return btusb_send_frame(hdev, skb);
}
- kfree_skb(skb);
-
-done:
- rettime = ktime_get();
- delta = ktime_sub(rettime, calltime);
- duration = (unsigned long long)ktime_to_ns(delta) >> 10;
-
- bt_dev_info(hdev, "Device setup in %llu usecs", duration);
-
- return 0;
}
-static int btusb_mtk_shutdown(struct hci_dev *hdev)
+static int btusb_mtk_setup(struct hci_dev *hdev)
{
- struct btmtk_hci_wmt_params wmt_params;
- u8 param = 0;
- int err;
-
- /* Disable the device */
- wmt_params.op = BTMTK_WMT_FUNC_CTRL;
- wmt_params.flag = 0;
- wmt_params.dlen = sizeof(param);
- wmt_params.data = &param;
- wmt_params.status = NULL;
-
- err = btusb_mtk_hci_wmt_sync(hdev, &wmt_params);
- if (err < 0) {
- bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err);
- return err;
- }
+ struct btusb_data *data = hci_get_drvdata(hdev);
+ struct btmtk_data *btmtk_data = hci_get_priv(hdev);
- return 0;
+ /* MediaTek WMT vendor cmd requiring below USB resources to
+ * complete the handshake.
+ */
+ btmtk_data->drv_name = btusb_driver.name;
+ btmtk_data->intf = data->intf;
+ btmtk_data->udev = data->udev;
+ btmtk_data->ctrl_anchor = &data->ctrl_anchor;
+ btmtk_data->reset_sync = btusb_mtk_reset;
+
+ /* Claim ISO data interface and endpoint */
+ btmtk_data->isopkt_intf = usb_ifnum_to_if(data->udev, MTK_ISO_IFNUM);
+ if (btmtk_data->isopkt_intf)
+ btusb_mtk_claim_iso_intf(data);
+
+ return btmtk_usb_setup(hdev);
}
-static int btusb_recv_acl_mtk(struct hci_dev *hdev, struct sk_buff *skb)
+static int btusb_mtk_shutdown(struct hci_dev *hdev)
{
struct btusb_data *data = hci_get_drvdata(hdev);
- u16 handle = le16_to_cpu(hci_acl_hdr(skb)->handle);
-
- switch (handle) {
- case 0xfc6f: /* Firmware dump from device */
- /* When the firmware hangs, the device can no longer
- * suspend and thus disable auto-suspend.
- */
- usb_disable_autosuspend(data->udev);
+ struct btmtk_data *btmtk_data = hci_get_priv(hdev);
- /* We need to forward the diagnostic packet to userspace daemon
- * for backward compatibility, so we have to clone the packet
- * extraly for the in-kernel coredump support.
- */
- if (IS_ENABLED(CONFIG_DEV_COREDUMP)) {
- struct sk_buff *skb_cd = skb_clone(skb, GFP_ATOMIC);
+ if (test_bit(BTMTK_ISOPKT_RUNNING, &btmtk_data->flags))
+ btusb_mtk_release_iso_intf(data);
- if (skb_cd)
- btmtk_process_coredump(hdev, skb_cd);
- }
-
- fallthrough;
- case 0x05ff: /* Firmware debug logging 1 */
- case 0x05fe: /* Firmware debug logging 2 */
- return hci_recv_diag(hdev, skb);
- }
-
- return hci_recv_frame(hdev, skb);
+ return btmtk_usb_shutdown(hdev);
}
#ifdef CONFIG_PM
@@ -4347,7 +3759,7 @@ static int btusb_probe(struct usb_interface *intf,
data->recv_event = btusb_recv_event_realtek;
} else if (id->driver_info & BTUSB_MEDIATEK) {
/* Allocate extra space for Mediatek device */
- priv_size += sizeof(struct btmediatek_data);
+ priv_size += sizeof(struct btmtk_data);
}
data->recv_acl = hci_recv_frame;
@@ -4451,9 +3863,12 @@ static int btusb_probe(struct usb_interface *intf,
hdev->manufacturer = 70;
hdev->cmd_timeout = btmtk_reset_sync;
hdev->set_bdaddr = btmtk_set_bdaddr;
+ hdev->send = btusb_send_frame_mtk;
set_bit(HCI_QUIRK_BROKEN_ENHANCED_SETUP_SYNC_CONN, &hdev->quirks);
set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks);
- data->recv_acl = btusb_recv_acl_mtk;
+ data->recv_acl = btmtk_usb_recv_acl;
+ data->suspend = btmtk_usb_suspend;
+ data->resume = btmtk_usb_resume;
}
if (id->driver_info & BTUSB_SWAVE) {
@@ -4694,6 +4109,9 @@ static int btusb_suspend(struct usb_interface *intf, pm_message_t message)
cancel_work_sync(&data->work);
+ if (data->suspend)
+ data->suspend(data->hdev);
+
btusb_stop_traffic(data);
usb_kill_anchored_urbs(&data->tx_anchor);
@@ -4797,6 +4215,9 @@ static int btusb_resume(struct usb_interface *intf)
btusb_submit_isoc_urb(hdev, GFP_NOIO);
}
+ if (data->resume)
+ data->resume(hdev);
+
spin_lock_irq(&data->txlock);
play_deferred(data);
clear_bit(BTUSB_SUSPENDING, &data->flags);
diff --git a/drivers/bluetooth/hci_bcm4377.c b/drivers/bluetooth/hci_bcm4377.c
index 0c2f15235b4c..77a5454a8721 100644
--- a/drivers/bluetooth/hci_bcm4377.c
+++ b/drivers/bluetooth/hci_bcm4377.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only OR MIT
/*
- * Bluetooth HCI driver for Broadcom 4377/4378/4387 devices attached via PCIe
+ * Bluetooth HCI driver for Broadcom 4377/4378/4387/4388 devices attached via PCIe
*
* Copyright (C) The Asahi Linux Contributors
*/
@@ -26,13 +26,16 @@ enum bcm4377_chip {
BCM4377 = 0,
BCM4378,
BCM4387,
+ BCM4388,
};
#define BCM4377_DEVICE_ID 0x5fa0
#define BCM4378_DEVICE_ID 0x5f69
#define BCM4387_DEVICE_ID 0x5f71
+#define BCM4388_DEVICE_ID 0x5f72
-#define BCM4377_TIMEOUT 1000
+#define BCM4377_TIMEOUT msecs_to_jiffies(1000)
+#define BCM4377_BOOT_TIMEOUT msecs_to_jiffies(5000)
/*
* These devices only support DMA transactions inside a 32bit window
@@ -487,6 +490,7 @@ struct bcm4377_data;
* second window in BAR0
* has_bar0_core2_window2: Set to true if this chip requires the second core's
* second window to be configured
+ * bar2_offset: Offset to the start of the variables in BAR2
* clear_pciecfg_subsystem_ctrl_bit19: Set to true if bit 19 in the
* vendor-specific subsystem control
* register has to be cleared
@@ -495,6 +499,10 @@ struct bcm4377_data;
* extended scanning
* broken_mws_transport_config: Set to true if the chip erroneously claims to
* support MWS Transport Configuration
+ * broken_le_ext_adv_report_phy: Set to true if this chip stuffs flags inside
+ * reserved bits of Primary/Secondary_PHY inside
+ * LE Extended Advertising Report events which
+ * have to be ignored
* send_calibration: Optional callback to send calibration data
* send_ptb: Callback to send "PTB" regulatory/calibration data
*/
@@ -506,6 +514,7 @@ struct bcm4377_hw {
u32 bar0_window1;
u32 bar0_window2;
u32 bar0_core2_window2;
+ u32 bar2_offset;
unsigned long has_bar0_core2_window2 : 1;
unsigned long clear_pciecfg_subsystem_ctrl_bit19 : 1;
@@ -513,6 +522,7 @@ struct bcm4377_hw {
unsigned long broken_ext_scan : 1;
unsigned long broken_mws_transport_config : 1;
unsigned long broken_le_coded : 1;
+ unsigned long broken_le_ext_adv_report_phy : 1;
int (*send_calibration)(struct bcm4377_data *bcm4377);
int (*send_ptb)(struct bcm4377_data *bcm4377,
@@ -716,7 +726,7 @@ static void bcm4377_handle_ack(struct bcm4377_data *bcm4377,
ring->events[msgid] = NULL;
}
- bitmap_release_region(ring->msgids, msgid, ring->n_entries);
+ bitmap_release_region(ring->msgids, msgid, 0);
unlock:
spin_unlock_irqrestore(&ring->lock, flags);
@@ -830,8 +840,8 @@ static irqreturn_t bcm4377_irq(int irq, void *data)
struct bcm4377_data *bcm4377 = data;
u32 bootstage, rti_status;
- bootstage = ioread32(bcm4377->bar2 + BCM4377_BAR2_BOOTSTAGE);
- rti_status = ioread32(bcm4377->bar2 + BCM4377_BAR2_RTI_STATUS);
+ bootstage = ioread32(bcm4377->bar2 + bcm4377->hw->bar2_offset + BCM4377_BAR2_BOOTSTAGE);
+ rti_status = ioread32(bcm4377->bar2 + bcm4377->hw->bar2_offset + BCM4377_BAR2_RTI_STATUS);
if (bootstage != bcm4377->bootstage ||
rti_status != bcm4377->rti_status) {
@@ -1191,6 +1201,14 @@ static int bcm4387_send_calibration(struct bcm4377_data *bcm4377)
bcm4377->taurus_cal_size);
}
+static int bcm4388_send_calibration(struct bcm4377_data *bcm4377)
+{
+ /* BCM4388 always uses beamforming */
+ return __bcm4378_send_calibration(
+ bcm4377, bcm4377->taurus_beamforming_cal_blob,
+ bcm4377->taurus_beamforming_cal_size);
+}
+
static const struct firmware *bcm4377_request_blob(struct bcm4377_data *bcm4377,
const char *suffix)
{
@@ -1814,8 +1832,8 @@ static int bcm4377_boot(struct bcm4377_data *bcm4377)
int ret = 0;
u32 bootstage, rti_status;
- bootstage = ioread32(bcm4377->bar2 + BCM4377_BAR2_BOOTSTAGE);
- rti_status = ioread32(bcm4377->bar2 + BCM4377_BAR2_RTI_STATUS);
+ bootstage = ioread32(bcm4377->bar2 + bcm4377->hw->bar2_offset + BCM4377_BAR2_BOOTSTAGE);
+ rti_status = ioread32(bcm4377->bar2 + bcm4377->hw->bar2_offset + BCM4377_BAR2_RTI_STATUS);
if (bootstage != 0) {
dev_err(&bcm4377->pdev->dev, "bootstage is %d and not 0\n",
@@ -1849,15 +1867,18 @@ static int bcm4377_boot(struct bcm4377_data *bcm4377)
iowrite32(BCM4377_DMA_MASK,
bcm4377->bar0 + BCM4377_BAR0_HOST_WINDOW_SIZE);
- iowrite32(lower_32_bits(fw_dma), bcm4377->bar2 + BCM4377_BAR2_FW_LO);
- iowrite32(upper_32_bits(fw_dma), bcm4377->bar2 + BCM4377_BAR2_FW_HI);
- iowrite32(fw->size, bcm4377->bar2 + BCM4377_BAR2_FW_SIZE);
+ iowrite32(lower_32_bits(fw_dma),
+ bcm4377->bar2 + bcm4377->hw->bar2_offset + BCM4377_BAR2_FW_LO);
+ iowrite32(upper_32_bits(fw_dma),
+ bcm4377->bar2 + bcm4377->hw->bar2_offset + BCM4377_BAR2_FW_HI);
+ iowrite32(fw->size,
+ bcm4377->bar2 + bcm4377->hw->bar2_offset + BCM4377_BAR2_FW_SIZE);
iowrite32(0, bcm4377->bar0 + BCM4377_BAR0_FW_DOORBELL);
dev_dbg(&bcm4377->pdev->dev, "waiting for firmware to boot\n");
ret = wait_for_completion_interruptible_timeout(&bcm4377->event,
- BCM4377_TIMEOUT);
+ BCM4377_BOOT_TIMEOUT);
if (ret == 0) {
ret = -ETIMEDOUT;
goto out_dma_free;
@@ -1908,16 +1929,16 @@ static int bcm4377_setup_rti(struct bcm4377_data *bcm4377)
dev_dbg(&bcm4377->pdev->dev, "RTI is in state 1\n");
/* allow access to the entire IOVA space again */
- iowrite32(0, bcm4377->bar2 + BCM4377_BAR2_RTI_WINDOW_LO);
- iowrite32(0, bcm4377->bar2 + BCM4377_BAR2_RTI_WINDOW_HI);
+ iowrite32(0, bcm4377->bar2 + bcm4377->hw->bar2_offset + BCM4377_BAR2_RTI_WINDOW_LO);
+ iowrite32(0, bcm4377->bar2 + bcm4377->hw->bar2_offset + BCM4377_BAR2_RTI_WINDOW_HI);
iowrite32(BCM4377_DMA_MASK,
- bcm4377->bar2 + BCM4377_BAR2_RTI_WINDOW_SIZE);
+ bcm4377->bar2 + bcm4377->hw->bar2_offset + BCM4377_BAR2_RTI_WINDOW_SIZE);
/* setup "Converged IPC" context */
iowrite32(lower_32_bits(bcm4377->ctx_dma),
- bcm4377->bar2 + BCM4377_BAR2_CONTEXT_ADDR_LO);
+ bcm4377->bar2 + bcm4377->hw->bar2_offset + BCM4377_BAR2_CONTEXT_ADDR_LO);
iowrite32(upper_32_bits(bcm4377->ctx_dma),
- bcm4377->bar2 + BCM4377_BAR2_CONTEXT_ADDR_HI);
+ bcm4377->bar2 + bcm4377->hw->bar2_offset + BCM4377_BAR2_CONTEXT_ADDR_HI);
iowrite32(2, bcm4377->bar0 + BCM4377_BAR0_RTI_CONTROL);
ret = wait_for_completion_interruptible_timeout(&bcm4377->event,
@@ -2373,6 +2394,8 @@ static int bcm4377_probe(struct pci_dev *pdev, const struct pci_device_id *id)
set_bit(HCI_QUIRK_BROKEN_EXT_SCAN, &hdev->quirks);
if (bcm4377->hw->broken_le_coded)
set_bit(HCI_QUIRK_BROKEN_LE_CODED, &hdev->quirks);
+ if (bcm4377->hw->broken_le_ext_adv_report_phy)
+ set_bit(HCI_QUIRK_FIXUP_LE_EXT_ADV_REPORT_PHY, &hdev->quirks);
pci_set_drvdata(pdev, bcm4377);
hci_set_drvdata(hdev, bcm4377);
@@ -2477,9 +2500,25 @@ static const struct bcm4377_hw bcm4377_hw_variants[] = {
.clear_pciecfg_subsystem_ctrl_bit19 = true,
.broken_mws_transport_config = true,
.broken_le_coded = true,
+ .broken_le_ext_adv_report_phy = true,
.send_calibration = bcm4387_send_calibration,
.send_ptb = bcm4378_send_ptb,
},
+
+ [BCM4388] = {
+ .id = 0x4388,
+ .otp_offset = 0x415c,
+ .bar2_offset = 0x200000,
+ .bar0_window1 = 0x18002000,
+ .bar0_window2 = 0x18109000,
+ .bar0_core2_window2 = 0x18106000,
+ .has_bar0_core2_window2 = true,
+ .broken_mws_transport_config = true,
+ .broken_le_coded = true,
+ .broken_le_ext_adv_report_phy = true,
+ .send_calibration = bcm4388_send_calibration,
+ .send_ptb = bcm4378_send_ptb,
+ },
};
#define BCM4377_DEVID_ENTRY(id) \
@@ -2493,6 +2532,7 @@ static const struct pci_device_id bcm4377_devid_table[] = {
BCM4377_DEVID_ENTRY(4377),
BCM4377_DEVID_ENTRY(4378),
BCM4377_DEVID_ENTRY(4387),
+ BCM4377_DEVID_ENTRY(4388),
{},
};
MODULE_DEVICE_TABLE(pci, bcm4377_devid_table);
@@ -2507,7 +2547,7 @@ static struct pci_driver bcm4377_pci_driver = {
module_pci_driver(bcm4377_pci_driver);
MODULE_AUTHOR("Sven Peter <sven@svenpeter.dev>");
-MODULE_DESCRIPTION("Bluetooth support for Broadcom 4377/4378/4387 devices");
+MODULE_DESCRIPTION("Bluetooth support for Broadcom 4377/4378/4387/4388 devices");
MODULE_LICENSE("Dual MIT/GPL");
MODULE_FIRMWARE("brcm/brcmbt4377*.bin");
MODULE_FIRMWARE("brcm/brcmbt4377*.ptb");
@@ -2515,3 +2555,5 @@ MODULE_FIRMWARE("brcm/brcmbt4378*.bin");
MODULE_FIRMWARE("brcm/brcmbt4378*.ptb");
MODULE_FIRMWARE("brcm/brcmbt4387*.bin");
MODULE_FIRMWARE("brcm/brcmbt4387*.ptb");
+MODULE_FIRMWARE("brcm/brcmbt4388*.bin");
+MODULE_FIRMWARE("brcm/brcmbt4388*.ptb");
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index 17a2f158a0df..30192bb08354 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -488,7 +488,7 @@ static int hci_uart_tty_open(struct tty_struct *tty)
if (tty->ops->write == NULL)
return -EOPNOTSUPP;
- hu = kzalloc(sizeof(struct hci_uart), GFP_KERNEL);
+ hu = kzalloc(sizeof(*hu), GFP_KERNEL);
if (!hu) {
BT_ERR("Can't allocate control structure");
return -ENFILE;
diff --git a/drivers/bluetooth/hci_nokia.c b/drivers/bluetooth/hci_nokia.c
index 97da0b2bfd17..62633d9ba7c4 100644
--- a/drivers/bluetooth/hci_nokia.c
+++ b/drivers/bluetooth/hci_nokia.c
@@ -116,11 +116,6 @@ struct hci_nokia_neg_evt {
#define SETUP_BAUD_RATE 921600
#define INIT_BAUD_RATE 120000
-struct hci_nokia_radio_hdr {
- u8 evt;
- u8 dlen;
-} __packed;
-
struct nokia_bt_dev {
struct hci_uart hu;
struct serdev_device *serdev;
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index 0c9c9ee56592..ca6466676902 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -28,6 +28,7 @@
#include <linux/of.h>
#include <linux/acpi.h>
#include <linux/platform_device.h>
+#include <linux/pwrseq/consumer.h>
#include <linux/regulator/consumer.h>
#include <linux/serdev.h>
#include <linux/mutex.h>
@@ -214,6 +215,7 @@ struct qca_power {
struct regulator_bulk_data *vreg_bulk;
int num_vregs;
bool vregs_on;
+ struct pwrseq_desc *pwrseq;
};
struct qca_serdev {
@@ -569,7 +571,7 @@ static int qca_open(struct hci_uart *hu)
if (!hci_uart_has_flow_control(hu))
return -EOPNOTSUPP;
- qca = kzalloc(sizeof(struct qca_data), GFP_KERNEL);
+ qca = kzalloc(sizeof(*qca), GFP_KERNEL);
if (!qca)
return -ENOMEM;
@@ -1040,8 +1042,7 @@ static void qca_controller_memdump(struct work_struct *work)
}
if (!qca_memdump) {
- qca_memdump = kzalloc(sizeof(struct qca_memdump_info),
- GFP_ATOMIC);
+ qca_memdump = kzalloc(sizeof(*qca_memdump), GFP_ATOMIC);
if (!qca_memdump) {
mutex_unlock(&qca->hci_memdump_lock);
return;
@@ -1685,6 +1686,27 @@ static bool qca_wakeup(struct hci_dev *hdev)
return wakeup;
}
+static int qca_port_reopen(struct hci_uart *hu)
+{
+ int ret;
+
+ /* Now the device is in ready state to communicate with host.
+ * To sync host with device we need to reopen port.
+ * Without this, we will have RTS and CTS synchronization
+ * issues.
+ */
+ serdev_device_close(hu->serdev);
+ ret = serdev_device_open(hu->serdev);
+ if (ret) {
+ bt_dev_err(hu->hdev, "failed to open port");
+ return ret;
+ }
+
+ hci_uart_set_flow_control(hu, false);
+
+ return 0;
+}
+
static int qca_regulator_init(struct hci_uart *hu)
{
enum qca_btsoc_type soc_type = qca_soc_type(hu);
@@ -1696,6 +1718,7 @@ static int qca_regulator_init(struct hci_uart *hu)
* off the voltage regulator.
*/
qcadev = serdev_device_get_drvdata(hu->serdev);
+
if (!qcadev->bt_power->vregs_on) {
serdev_device_close(hu->serdev);
ret = qca_regulator_enable(qcadev);
@@ -1753,21 +1776,7 @@ static int qca_regulator_init(struct hci_uart *hu)
break;
}
- /* Now the device is in ready state to communicate with host.
- * To sync host with device we need to reopen port.
- * Without this, we will have RTS and CTS synchronization
- * issues.
- */
- serdev_device_close(hu->serdev);
- ret = serdev_device_open(hu->serdev);
- if (ret) {
- bt_dev_err(hu->hdev, "failed to open port");
- return ret;
- }
-
- hci_uart_set_flow_control(hu, false);
-
- return 0;
+ return qca_port_reopen(hu);
}
static int qca_power_on(struct hci_dev *hdev)
@@ -1792,6 +1801,7 @@ static int qca_power_on(struct hci_dev *hdev)
case QCA_WCN6750:
case QCA_WCN6855:
case QCA_WCN7850:
+ case QCA_QCA6390:
ret = qca_regulator_init(hu);
break;
@@ -2130,6 +2140,7 @@ static void qca_power_shutdown(struct hci_uart *hu)
unsigned long flags;
enum qca_btsoc_type soc_type = qca_soc_type(hu);
bool sw_ctrl_state;
+ struct qca_power *power;
/* From this point we go into power off state. But serial port is
* still open, stop queueing the IBS data and flush all the buffered
@@ -2147,6 +2158,13 @@ static void qca_power_shutdown(struct hci_uart *hu)
return;
qcadev = serdev_device_get_drvdata(hu->serdev);
+ power = qcadev->bt_power;
+
+ if (power->pwrseq) {
+ pwrseq_power_off(power->pwrseq);
+ set_bit(QCA_BT_OFF, &qca->flags);
+ return;
+ }
switch (soc_type) {
case QCA_WCN3988:
@@ -2169,6 +2187,10 @@ static void qca_power_shutdown(struct hci_uart *hu)
}
break;
+ case QCA_QCA6390:
+ pwrseq_power_off(qcadev->bt_power->pwrseq);
+ break;
+
default:
gpiod_set_value_cansleep(qcadev->bt_en, 0);
}
@@ -2204,6 +2226,9 @@ static int qca_regulator_enable(struct qca_serdev *qcadev)
struct qca_power *power = qcadev->bt_power;
int ret;
+ if (power->pwrseq)
+ return pwrseq_power_on(power->pwrseq);
+
/* Already enabled */
if (power->vregs_on)
return 0;
@@ -2272,6 +2297,13 @@ static int qca_init_regulators(struct qca_power *qca,
return 0;
}
+static void qca_clk_disable_unprepare(void *data)
+{
+ struct clk *clk = data;
+
+ clk_disable_unprepare(clk);
+}
+
static int qca_serdev_probe(struct serdev_device *serdev)
{
struct qca_serdev *qcadev;
@@ -2310,12 +2342,40 @@ static int qca_serdev_probe(struct serdev_device *serdev)
case QCA_WCN6750:
case QCA_WCN6855:
case QCA_WCN7850:
+ case QCA_QCA6390:
qcadev->bt_power = devm_kzalloc(&serdev->dev,
sizeof(struct qca_power),
GFP_KERNEL);
if (!qcadev->bt_power)
return -ENOMEM;
+ break;
+ default:
+ break;
+ }
+ switch (qcadev->btsoc_type) {
+ case QCA_WCN6855:
+ case QCA_WCN7850:
+ if (!device_property_present(&serdev->dev, "enable-gpios")) {
+ /*
+ * Backward compatibility with old DT sources. If the
+ * node doesn't have the 'enable-gpios' property then
+ * let's use the power sequencer. Otherwise, let's
+ * drive everything outselves.
+ */
+ qcadev->bt_power->pwrseq = devm_pwrseq_get(&serdev->dev,
+ "bluetooth");
+ if (IS_ERR(qcadev->bt_power->pwrseq))
+ return PTR_ERR(qcadev->bt_power->pwrseq);
+
+ break;
+ }
+ fallthrough;
+ case QCA_WCN3988:
+ case QCA_WCN3990:
+ case QCA_WCN3991:
+ case QCA_WCN3998:
+ case QCA_WCN6750:
qcadev->bt_power->dev = &serdev->dev;
err = qca_init_regulators(qcadev->bt_power, data->vregs,
data->num_vregs);
@@ -2353,12 +2413,13 @@ static int qca_serdev_probe(struct serdev_device *serdev)
dev_err(&serdev->dev, "failed to acquire clk\n");
return PTR_ERR(qcadev->susclk);
}
+ break;
- err = hci_uart_register_device(&qcadev->serdev_hu, &qca_proto);
- if (err) {
- BT_ERR("wcn3990 serdev registration failed");
- return err;
- }
+ case QCA_QCA6390:
+ qcadev->bt_power->pwrseq = devm_pwrseq_get(&serdev->dev,
+ "bluetooth");
+ if (IS_ERR(qcadev->bt_power->pwrseq))
+ return PTR_ERR(qcadev->bt_power->pwrseq);
break;
default:
@@ -2385,12 +2446,18 @@ static int qca_serdev_probe(struct serdev_device *serdev)
if (err)
return err;
- err = hci_uart_register_device(&qcadev->serdev_hu, &qca_proto);
- if (err) {
- BT_ERR("Rome serdev registration failed");
- clk_disable_unprepare(qcadev->susclk);
+ err = devm_add_action_or_reset(&serdev->dev,
+ qca_clk_disable_unprepare,
+ qcadev->susclk);
+ if (err)
return err;
- }
+
+ }
+
+ err = hci_uart_register_device(&qcadev->serdev_hu, &qca_proto);
+ if (err) {
+ BT_ERR("serdev registration failed");
+ return err;
}
hdev = qcadev->serdev_hu.hdev;
@@ -2428,15 +2495,11 @@ static void qca_serdev_remove(struct serdev_device *serdev)
case QCA_WCN6750:
case QCA_WCN6855:
case QCA_WCN7850:
- if (power->vregs_on) {
+ if (power->vregs_on)
qca_power_shutdown(&qcadev->serdev_hu);
- break;
- }
- fallthrough;
-
+ break;
default:
- if (qcadev->susclk)
- clk_disable_unprepare(qcadev->susclk);
+ break;
}
hci_uart_unregister_device(&qcadev->serdev_hu);
@@ -2450,15 +2513,27 @@ static void qca_serdev_shutdown(struct device *dev)
struct qca_serdev *qcadev = serdev_device_get_drvdata(serdev);
struct hci_uart *hu = &qcadev->serdev_hu;
struct hci_dev *hdev = hu->hdev;
- struct qca_data *qca = hu->priv;
const u8 ibs_wake_cmd[] = { 0xFD };
const u8 edl_reset_soc_cmd[] = { 0x01, 0x00, 0xFC, 0x01, 0x05 };
if (qcadev->btsoc_type == QCA_QCA6390) {
- if (test_bit(QCA_BT_OFF, &qca->flags) ||
- !test_bit(HCI_RUNNING, &hdev->flags))
+ /* The purpose of sending the VSC is to reset SOC into a initial
+ * state and the state will ensure next hdev->setup() success.
+ * if HCI_QUIRK_NON_PERSISTENT_SETUP is set, it means that
+ * hdev->setup() can do its job regardless of SoC state, so
+ * don't need to send the VSC.
+ * if HCI_SETUP is set, it means that hdev->setup() was never
+ * invoked and the SOC is already in the initial state, so
+ * don't also need to send the VSC.
+ */
+ if (test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks) ||
+ hci_dev_test_flag(hdev, HCI_SETUP))
return;
+ /* The serdev must be in open state when conrol logic arrives
+ * here, so also fix the use-after-free issue caused by that
+ * the serdev is flushed or wrote after it is closed.
+ */
serdev_device_write_flush(serdev);
ret = serdev_device_write_buf(serdev, ibs_wake_cmd,
sizeof(ibs_wake_cmd));
diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
index 28750a40f0ed..c4046f8f1985 100644
--- a/drivers/bluetooth/hci_vhci.c
+++ b/drivers/bluetooth/hci_vhci.c
@@ -633,7 +633,7 @@ static int vhci_open(struct inode *inode, struct file *file)
{
struct vhci_data *data;
- data = kzalloc(sizeof(struct vhci_data), GFP_KERNEL);
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
diff --git a/drivers/bus/sunxi-rsb.c b/drivers/bus/sunxi-rsb.c
index 1e29ba76615d..ac6c7e4900f4 100644
--- a/drivers/bus/sunxi-rsb.c
+++ b/drivers/bus/sunxi-rsb.c
@@ -457,7 +457,7 @@ static void regmap_sunxi_rsb_free_ctx(void *context)
kfree(ctx);
}
-static struct regmap_bus regmap_sunxi_rsb = {
+static const struct regmap_bus regmap_sunxi_rsb = {
.reg_write = regmap_sunxi_rsb_reg_write,
.reg_read = regmap_sunxi_rsb_reg_read,
.free_context = regmap_sunxi_rsb_free_ctx,
diff --git a/drivers/bus/ts-nbus.c b/drivers/bus/ts-nbus.c
index baf22a82c47a..b8af44c5cdbd 100644
--- a/drivers/bus/ts-nbus.c
+++ b/drivers/bus/ts-nbus.c
@@ -294,7 +294,7 @@ static int ts_nbus_probe(struct platform_device *pdev)
state.duty_cycle = state.period;
state.enabled = true;
- ret = pwm_apply_state(pwm, &state);
+ ret = pwm_apply_might_sleep(pwm, &state);
if (ret < 0)
return dev_err_probe(dev, ret, "failed to configure PWM\n");
diff --git a/drivers/bus/vexpress-config.c b/drivers/bus/vexpress-config.c
index d2c7ada90186..64ee920721ee 100644
--- a/drivers/bus/vexpress-config.c
+++ b/drivers/bus/vexpress-config.c
@@ -414,4 +414,5 @@ static struct platform_driver vexpress_syscfg_driver = {
.probe = vexpress_syscfg_probe,
};
module_platform_driver(vexpress_syscfg_driver);
+MODULE_DESCRIPTION("Versatile Express configuration bus");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/cache/Kconfig b/drivers/cache/Kconfig
index 9345ce4976d7..94abd8f632a7 100644
--- a/drivers/cache/Kconfig
+++ b/drivers/cache/Kconfig
@@ -14,4 +14,13 @@ config SIFIVE_CCACHE
help
Support for the composable cache controller on SiFive platforms.
+config STARFIVE_STARLINK_CACHE
+ bool "StarFive StarLink Cache controller"
+ depends on RISCV
+ depends on ARCH_STARFIVE
+ select RISCV_DMA_NONCOHERENT
+ select RISCV_NONSTANDARD_CACHE_OPS
+ help
+ Support for the StarLink cache controller IP from StarFive.
+
endmenu
diff --git a/drivers/cache/Makefile b/drivers/cache/Makefile
index 7657cff3bd6c..55c5e851034d 100644
--- a/drivers/cache/Makefile
+++ b/drivers/cache/Makefile
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_AX45MP_L2_CACHE) += ax45mp_cache.o
-obj-$(CONFIG_SIFIVE_CCACHE) += sifive_ccache.o
+obj-$(CONFIG_AX45MP_L2_CACHE) += ax45mp_cache.o
+obj-$(CONFIG_SIFIVE_CCACHE) += sifive_ccache.o
+obj-$(CONFIG_STARFIVE_STARLINK_CACHE) += starfive_starlink_cache.o
diff --git a/drivers/cache/starfive_starlink_cache.c b/drivers/cache/starfive_starlink_cache.c
new file mode 100644
index 000000000000..24c7d078ca22
--- /dev/null
+++ b/drivers/cache/starfive_starlink_cache.c
@@ -0,0 +1,130 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Cache Management Operations for StarFive's Starlink cache controller
+ *
+ * Copyright (C) 2024 Shanghai StarFive Technology Co., Ltd.
+ *
+ * Author: Joshua Yeong <joshua.yeong@starfivetech.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/cacheflush.h>
+#include <linux/iopoll.h>
+#include <linux/of_address.h>
+
+#include <asm/dma-noncoherent.h>
+
+#define STARLINK_CACHE_FLUSH_START_ADDR 0x0
+#define STARLINK_CACHE_FLUSH_END_ADDR 0x8
+#define STARLINK_CACHE_FLUSH_CTL 0x10
+#define STARLINK_CACHE_ALIGN 0x40
+
+#define STARLINK_CACHE_ADDRESS_RANGE_MASK GENMASK(39, 0)
+#define STARLINK_CACHE_FLUSH_CTL_MODE_MASK GENMASK(2, 1)
+#define STARLINK_CACHE_FLUSH_CTL_ENABLE_MASK BIT(0)
+
+#define STARLINK_CACHE_FLUSH_CTL_CLEAN_INVALIDATE 0
+#define STARLINK_CACHE_FLUSH_CTL_MAKE_INVALIDATE 1
+#define STARLINK_CACHE_FLUSH_CTL_CLEAN_SHARED 2
+#define STARLINK_CACHE_FLUSH_POLL_DELAY_US 1
+#define STARLINK_CACHE_FLUSH_TIMEOUT_US 5000000
+
+static void __iomem *starlink_cache_base;
+
+static void starlink_cache_flush_complete(void)
+{
+ volatile void __iomem *ctl = starlink_cache_base + STARLINK_CACHE_FLUSH_CTL;
+ u64 v;
+ int ret;
+
+ ret = readq_poll_timeout_atomic(ctl, v, !(v & STARLINK_CACHE_FLUSH_CTL_ENABLE_MASK),
+ STARLINK_CACHE_FLUSH_POLL_DELAY_US,
+ STARLINK_CACHE_FLUSH_TIMEOUT_US);
+ if (ret)
+ WARN(1, "StarFive Starlink cache flush operation timeout\n");
+}
+
+static void starlink_cache_dma_cache_wback(phys_addr_t paddr, unsigned long size)
+{
+ writeq(FIELD_PREP(STARLINK_CACHE_ADDRESS_RANGE_MASK, paddr),
+ starlink_cache_base + STARLINK_CACHE_FLUSH_START_ADDR);
+ writeq(FIELD_PREP(STARLINK_CACHE_ADDRESS_RANGE_MASK, paddr + size),
+ starlink_cache_base + STARLINK_CACHE_FLUSH_END_ADDR);
+
+ mb();
+ writeq(FIELD_PREP(STARLINK_CACHE_FLUSH_CTL_MODE_MASK,
+ STARLINK_CACHE_FLUSH_CTL_CLEAN_SHARED),
+ starlink_cache_base + STARLINK_CACHE_FLUSH_CTL);
+
+ starlink_cache_flush_complete();
+}
+
+static void starlink_cache_dma_cache_invalidate(phys_addr_t paddr, unsigned long size)
+{
+ writeq(FIELD_PREP(STARLINK_CACHE_ADDRESS_RANGE_MASK, paddr),
+ starlink_cache_base + STARLINK_CACHE_FLUSH_START_ADDR);
+ writeq(FIELD_PREP(STARLINK_CACHE_ADDRESS_RANGE_MASK, paddr + size),
+ starlink_cache_base + STARLINK_CACHE_FLUSH_END_ADDR);
+
+ mb();
+ writeq(FIELD_PREP(STARLINK_CACHE_FLUSH_CTL_MODE_MASK,
+ STARLINK_CACHE_FLUSH_CTL_MAKE_INVALIDATE),
+ starlink_cache_base + STARLINK_CACHE_FLUSH_CTL);
+
+ starlink_cache_flush_complete();
+}
+
+static void starlink_cache_dma_cache_wback_inv(phys_addr_t paddr, unsigned long size)
+{
+ writeq(FIELD_PREP(STARLINK_CACHE_ADDRESS_RANGE_MASK, paddr),
+ starlink_cache_base + STARLINK_CACHE_FLUSH_START_ADDR);
+ writeq(FIELD_PREP(STARLINK_CACHE_ADDRESS_RANGE_MASK, paddr + size),
+ starlink_cache_base + STARLINK_CACHE_FLUSH_END_ADDR);
+
+ mb();
+ writeq(FIELD_PREP(STARLINK_CACHE_FLUSH_CTL_MODE_MASK,
+ STARLINK_CACHE_FLUSH_CTL_CLEAN_INVALIDATE),
+ starlink_cache_base + STARLINK_CACHE_FLUSH_CTL);
+
+ starlink_cache_flush_complete();
+}
+
+static const struct riscv_nonstd_cache_ops starlink_cache_ops = {
+ .wback = &starlink_cache_dma_cache_wback,
+ .inv = &starlink_cache_dma_cache_invalidate,
+ .wback_inv = &starlink_cache_dma_cache_wback_inv,
+};
+
+static const struct of_device_id starlink_cache_ids[] = {
+ { .compatible = "starfive,jh8100-starlink-cache" },
+ { /* sentinel */ }
+};
+
+static int __init starlink_cache_init(void)
+{
+ struct device_node *np;
+ u32 block_size;
+ int ret;
+
+ np = of_find_matching_node(NULL, starlink_cache_ids);
+ if (!of_device_is_available(np))
+ return -ENODEV;
+
+ ret = of_property_read_u32(np, "cache-block-size", &block_size);
+ if (ret)
+ return ret;
+
+ if (block_size % STARLINK_CACHE_ALIGN)
+ return -EINVAL;
+
+ starlink_cache_base = of_iomap(np, 0);
+ if (!starlink_cache_base)
+ return -ENOMEM;
+
+ riscv_cbom_block_size = block_size;
+ riscv_noncoherent_supported();
+ riscv_noncoherent_register_cache_ops(&starlink_cache_ops);
+
+ return 0;
+}
+arch_initcall(starlink_cache_init);
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index 20c90ebb3a3f..49e4829b7264 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -3708,4 +3708,5 @@ static void __exit cdrom_exit(void)
module_init(cdrom_init);
module_exit(cdrom_exit);
+MODULE_DESCRIPTION("Uniform CD-ROM driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
index eefdd422ad8e..71cfe7a85913 100644
--- a/drivers/cdrom/gdrom.c
+++ b/drivers/cdrom/gdrom.c
@@ -744,6 +744,7 @@ static int probe_gdrom(struct platform_device *devptr)
.max_segments = 1,
/* set a large max size to get most from DMA */
.max_segment_size = 0x40000,
+ .features = BLK_FEAT_ROTATIONAL,
};
int err;
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index d51fc8321d41..da32e8ed0830 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -269,8 +269,13 @@ hpet_read(struct file *file, char __user *buf, size_t count, loff_t * ppos)
if (!devp->hd_ireqfreq)
return -EIO;
- if (count < sizeof(unsigned long))
- return -EINVAL;
+ if (in_compat_syscall()) {
+ if (count < sizeof(compat_ulong_t))
+ return -EINVAL;
+ } else {
+ if (count < sizeof(unsigned long))
+ return -EINVAL;
+ }
add_wait_queue(&devp->hd_waitqueue, &wait);
@@ -294,9 +299,16 @@ hpet_read(struct file *file, char __user *buf, size_t count, loff_t * ppos)
schedule();
}
- retval = put_user(data, (unsigned long __user *)buf);
- if (!retval)
- retval = sizeof(unsigned long);
+ if (in_compat_syscall()) {
+ retval = put_user(data, (compat_ulong_t __user *)buf);
+ if (!retval)
+ retval = sizeof(compat_ulong_t);
+ } else {
+ retval = put_user(data, (unsigned long __user *)buf);
+ if (!retval)
+ retval = sizeof(unsigned long);
+ }
+
out:
__set_current_state(TASK_RUNNING);
remove_wait_queue(&devp->hd_waitqueue, &wait);
@@ -651,12 +663,24 @@ struct compat_hpet_info {
unsigned short hi_timer;
};
+/* 32-bit types would lead to different command codes which should be
+ * translated into 64-bit ones before passed to hpet_ioctl_common
+ */
+#define COMPAT_HPET_INFO _IOR('h', 0x03, struct compat_hpet_info)
+#define COMPAT_HPET_IRQFREQ _IOW('h', 0x6, compat_ulong_t)
+
static long
hpet_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct hpet_info info;
int err;
+ if (cmd == COMPAT_HPET_INFO)
+ cmd = HPET_INFO;
+
+ if (cmd == COMPAT_HPET_IRQFREQ)
+ cmd = HPET_IRQFREQ;
+
mutex_lock(&hpet_mutex);
err = hpet_ioctl_common(file->private_data, cmd, arg, &info);
mutex_unlock(&hpet_mutex);
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index f5c71a617a99..4084df65c9fa 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -64,19 +64,6 @@ static size_t rng_buffer_size(void)
return RNG_BUFFER_SIZE;
}
-static void add_early_randomness(struct hwrng *rng)
-{
- int bytes_read;
-
- mutex_lock(&reading_mutex);
- bytes_read = rng_get_data(rng, rng_fillbuf, 32, 0);
- mutex_unlock(&reading_mutex);
- if (bytes_read > 0) {
- size_t entropy = bytes_read * 8 * rng->quality / 1024;
- add_hwgenerator_randomness(rng_fillbuf, bytes_read, entropy, false);
- }
-}
-
static inline void cleanup_rng(struct kref *kref)
{
struct hwrng *rng = container_of(kref, struct hwrng, ref);
@@ -340,13 +327,12 @@ static ssize_t rng_current_store(struct device *dev,
const char *buf, size_t len)
{
int err;
- struct hwrng *rng, *old_rng, *new_rng;
+ struct hwrng *rng, *new_rng;
err = mutex_lock_interruptible(&rng_mutex);
if (err)
return -ERESTARTSYS;
- old_rng = current_rng;
if (sysfs_streq(buf, "")) {
err = enable_best_rng();
} else {
@@ -362,11 +348,8 @@ static ssize_t rng_current_store(struct device *dev,
new_rng = get_current_rng_nolock();
mutex_unlock(&rng_mutex);
- if (new_rng) {
- if (new_rng != old_rng)
- add_early_randomness(new_rng);
+ if (new_rng)
put_rng(new_rng);
- }
return err ? : len;
}
@@ -544,7 +527,6 @@ int hwrng_register(struct hwrng *rng)
{
int err = -EINVAL;
struct hwrng *tmp;
- bool is_new_current = false;
if (!rng->name || (!rng->data_read && !rng->read))
goto out;
@@ -573,25 +555,8 @@ int hwrng_register(struct hwrng *rng)
err = set_current_rng(rng);
if (err)
goto out_unlock;
- /* to use current_rng in add_early_randomness() we need
- * to take a ref
- */
- is_new_current = true;
- kref_get(&rng->ref);
}
mutex_unlock(&rng_mutex);
- if (is_new_current || !rng->init) {
- /*
- * Use a new device's input to add some randomness to
- * the system. If this rng device isn't going to be
- * used right away, its init function hasn't been
- * called yet by set_current_rng(); so only use the
- * randomness from devices that don't need an init callback
- */
- add_early_randomness(rng);
- }
- if (is_new_current)
- put_rng(rng);
return 0;
out_unlock:
mutex_unlock(&rng_mutex);
@@ -602,12 +567,11 @@ EXPORT_SYMBOL_GPL(hwrng_register);
void hwrng_unregister(struct hwrng *rng)
{
- struct hwrng *old_rng, *new_rng;
+ struct hwrng *new_rng;
int err;
mutex_lock(&rng_mutex);
- old_rng = current_rng;
list_del(&rng->list);
complete_all(&rng->dying);
if (current_rng == rng) {
@@ -626,11 +590,8 @@ void hwrng_unregister(struct hwrng *rng)
} else
mutex_unlock(&rng_mutex);
- if (new_rng) {
- if (old_rng != new_rng)
- add_early_randomness(new_rng);
+ if (new_rng)
put_rng(new_rng);
- }
wait_for_completion(&rng->cleanup_done);
}
diff --git a/drivers/char/ipmi/ipmb_dev_int.c b/drivers/char/ipmi/ipmb_dev_int.c
index 49100845fcb7..7296127181ec 100644
--- a/drivers/char/ipmi/ipmb_dev_int.c
+++ b/drivers/char/ipmi/ipmb_dev_int.c
@@ -350,8 +350,8 @@ static void ipmb_remove(struct i2c_client *client)
}
static const struct i2c_device_id ipmb_id[] = {
- { "ipmb-dev", 0 },
- {},
+ { "ipmb-dev" },
+ {}
};
MODULE_DEVICE_TABLE(i2c, ipmb_id);
diff --git a/drivers/char/ipmi/ipmi_ipmb.c b/drivers/char/ipmi/ipmi_ipmb.c
index 4e335832fc26..6a4f279c7c1f 100644
--- a/drivers/char/ipmi/ipmi_ipmb.c
+++ b/drivers/char/ipmi/ipmi_ipmb.c
@@ -561,8 +561,8 @@ MODULE_DEVICE_TABLE(of, of_ipmi_ipmb_match);
#endif
static const struct i2c_device_id ipmi_ipmb_id[] = {
- { DEVICE_NAME, 0 },
- {},
+ { DEVICE_NAME },
+ {}
};
MODULE_DEVICE_TABLE(i2c, ipmi_ipmb_id);
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index 3f509a22217b..96ad571d041a 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -2049,7 +2049,7 @@ static int dmi_ipmi_probe(struct platform_device *pdev)
#endif
static const struct i2c_device_id ssif_id[] = {
- { DEVICE_NAME, 0 },
+ { DEVICE_NAME },
{ }
};
MODULE_DEVICE_TABLE(i2c, ssif_id);
diff --git a/drivers/char/ipmi/ssif_bmc.c b/drivers/char/ipmi/ssif_bmc.c
index 56346fb32872..a14fafc583d4 100644
--- a/drivers/char/ipmi/ssif_bmc.c
+++ b/drivers/char/ipmi/ssif_bmc.c
@@ -177,13 +177,15 @@ static ssize_t ssif_bmc_write(struct file *file, const char __user *buf, size_t
unsigned long flags;
ssize_t ret;
- if (count > sizeof(struct ipmi_ssif_msg))
+ if (count < sizeof(msg.len) ||
+ count > sizeof(struct ipmi_ssif_msg))
return -EINVAL;
if (copy_from_user(&msg, buf, count))
return -EFAULT;
- if (!msg.len || count < sizeof_field(struct ipmi_ssif_msg, len) + msg.len)
+ if (!msg.len || msg.len > IPMI_SSIF_PAYLOAD_MAX ||
+ count < sizeof_field(struct ipmi_ssif_msg, len) + msg.len)
return -EINVAL;
spin_lock_irqsave(&ssif_bmc->lock, flags);
@@ -850,8 +852,8 @@ static const struct of_device_id ssif_bmc_match[] = {
MODULE_DEVICE_TABLE(of, ssif_bmc_match);
static const struct i2c_device_id ssif_bmc_id[] = {
- { DEVICE_NAME, 0 },
- { },
+ { DEVICE_NAME },
+ { }
};
MODULE_DEVICE_TABLE(i2c, ssif_bmc_id);
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
index e63a6a17793c..cf0be8a7939d 100644
--- a/drivers/char/tpm/Kconfig
+++ b/drivers/char/tpm/Kconfig
@@ -29,7 +29,7 @@ if TCG_TPM
config TCG_TPM2_HMAC
bool "Use HMAC and encrypted transactions on the TPM bus"
- default y
+ default X86_64
select CRYPTO_ECDH
select CRYPTO_LIB_AESCFB
select CRYPTO_LIB_SHA256
diff --git a/drivers/char/tpm/Makefile b/drivers/char/tpm/Makefile
index 4c695b0388f3..9bb142c75243 100644
--- a/drivers/char/tpm/Makefile
+++ b/drivers/char/tpm/Makefile
@@ -16,8 +16,8 @@ tpm-y += eventlog/common.o
tpm-y += eventlog/tpm1.o
tpm-y += eventlog/tpm2.o
tpm-y += tpm-buf.o
+tpm-y += tpm2-sessions.o
-tpm-$(CONFIG_TCG_TPM2_HMAC) += tpm2-sessions.o
tpm-$(CONFIG_ACPI) += tpm_ppi.o eventlog/acpi.o
tpm-$(CONFIG_EFI) += eventlog/efi.o
tpm-$(CONFIG_OF) += eventlog/of.o
diff --git a/drivers/char/tpm/eventlog/common.c b/drivers/char/tpm/eventlog/common.c
index 639c3f395a5a..4c0bbba64ee5 100644
--- a/drivers/char/tpm/eventlog/common.c
+++ b/drivers/char/tpm/eventlog/common.c
@@ -47,6 +47,8 @@ static int tpm_bios_measurements_open(struct inode *inode,
if (!err) {
seq = file->private_data;
seq->private = chip;
+ } else {
+ put_device(&chip->dev);
}
return err;
diff --git a/drivers/char/tpm/tpm-buf.c b/drivers/char/tpm/tpm-buf.c
index 647c6ca92ac3..cad0048bcc3c 100644
--- a/drivers/char/tpm/tpm-buf.c
+++ b/drivers/char/tpm/tpm-buf.c
@@ -223,30 +223,4 @@ u32 tpm_buf_read_u32(struct tpm_buf *buf, off_t *offset)
}
EXPORT_SYMBOL_GPL(tpm_buf_read_u32);
-static u16 tpm_buf_tag(struct tpm_buf *buf)
-{
- struct tpm_header *head = (struct tpm_header *)buf->data;
-
- return be16_to_cpu(head->tag);
-}
-
-/**
- * tpm_buf_parameters - return the TPM response parameters area of the tpm_buf
- * @buf: tpm_buf to use
- *
- * Where the parameters are located depends on the tag of a TPM
- * command (it's immediately after the header for TPM_ST_NO_SESSIONS
- * or 4 bytes after for TPM_ST_SESSIONS). Evaluate this and return a
- * pointer to the first byte of the parameters area.
- *
- * @return: pointer to parameters area
- */
-u8 *tpm_buf_parameters(struct tpm_buf *buf)
-{
- int offset = TPM_HEADER_SIZE;
-
- if (tpm_buf_tag(buf) == TPM2_ST_SESSIONS)
- offset += 4;
- return &buf->data[offset];
-}
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index 6b8b9956ba69..7bb87fa5f7a1 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -28,7 +28,7 @@
#include <linux/tpm_eventlog.h>
#ifdef CONFIG_X86
-#include <asm/intel-family.h>
+#include <asm/cpu_device_id.h>
#endif
#define TPM_MINOR 224 /* officially assigned */
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
index 0cdf892ec2a7..1e856259219e 100644
--- a/drivers/char/tpm/tpm2-cmd.c
+++ b/drivers/char/tpm/tpm2-cmd.c
@@ -281,6 +281,7 @@ struct tpm2_get_random_out {
int tpm2_get_random(struct tpm_chip *chip, u8 *dest, size_t max)
{
struct tpm2_get_random_out *out;
+ struct tpm_header *head;
struct tpm_buf buf;
u32 recd;
u32 num_bytes = max;
@@ -288,6 +289,7 @@ int tpm2_get_random(struct tpm_chip *chip, u8 *dest, size_t max)
int total = 0;
int retries = 5;
u8 *dest_ptr = dest;
+ off_t offset;
if (!num_bytes || max > TPM_MAX_RNG_DATA)
return -EINVAL;
@@ -320,7 +322,13 @@ int tpm2_get_random(struct tpm_chip *chip, u8 *dest, size_t max)
goto out;
}
- out = (struct tpm2_get_random_out *)tpm_buf_parameters(&buf);
+ head = (struct tpm_header *)buf.data;
+ offset = TPM_HEADER_SIZE;
+ /* Skip the parameter size field: */
+ if (be16_to_cpu(head->tag) == TPM2_ST_SESSIONS)
+ offset += 4;
+
+ out = (struct tpm2_get_random_out *)&buf.data[offset];
recd = min_t(u32, be16_to_cpu(out->size), num_bytes);
if (tpm_buf_length(&buf) <
TPM_HEADER_SIZE +
diff --git a/drivers/char/tpm/tpm2-sessions.c b/drivers/char/tpm/tpm2-sessions.c
index ea8860661876..2281d55df545 100644
--- a/drivers/char/tpm/tpm2-sessions.c
+++ b/drivers/char/tpm/tpm2-sessions.c
@@ -80,8 +80,8 @@
/* maximum number of names the TPM must remember for authorization */
#define AUTH_MAX_NAMES 3
-static int tpm2_create_primary(struct tpm_chip *chip, u32 hierarchy,
- u32 *handle, u8 *name);
+#define AES_KEY_BYTES AES_KEYSIZE_128
+#define AES_KEY_BITS (AES_KEY_BYTES*8)
/*
* This is the structure that carries all the auth information (like
@@ -145,6 +145,7 @@ struct tpm2_auth {
u8 name[AUTH_MAX_NAMES][2 + SHA512_DIGEST_SIZE];
};
+#ifdef CONFIG_TCG_TPM2_HMAC
/*
* Name Size based on TPM algorithm (assumes no hash bigger than 255)
*/
@@ -160,6 +161,226 @@ static u8 name_size(const u8 *name)
return size_map[alg] + 2;
}
+static int tpm2_parse_read_public(char *name, struct tpm_buf *buf)
+{
+ struct tpm_header *head = (struct tpm_header *)buf->data;
+ off_t offset = TPM_HEADER_SIZE;
+ u32 tot_len = be32_to_cpu(head->length);
+ u32 val;
+
+ /* we're starting after the header so adjust the length */
+ tot_len -= TPM_HEADER_SIZE;
+
+ /* skip public */
+ val = tpm_buf_read_u16(buf, &offset);
+ if (val > tot_len)
+ return -EINVAL;
+ offset += val;
+ /* name */
+ val = tpm_buf_read_u16(buf, &offset);
+ if (val != name_size(&buf->data[offset]))
+ return -EINVAL;
+ memcpy(name, &buf->data[offset], val);
+ /* forget the rest */
+ return 0;
+}
+
+static int tpm2_read_public(struct tpm_chip *chip, u32 handle, char *name)
+{
+ struct tpm_buf buf;
+ int rc;
+
+ rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_READ_PUBLIC);
+ if (rc)
+ return rc;
+
+ tpm_buf_append_u32(&buf, handle);
+ rc = tpm_transmit_cmd(chip, &buf, 0, "read public");
+ if (rc == TPM2_RC_SUCCESS)
+ rc = tpm2_parse_read_public(name, &buf);
+
+ tpm_buf_destroy(&buf);
+
+ return rc;
+}
+#endif /* CONFIG_TCG_TPM2_HMAC */
+
+/**
+ * tpm_buf_append_name() - add a handle area to the buffer
+ * @chip: the TPM chip structure
+ * @buf: The buffer to be appended
+ * @handle: The handle to be appended
+ * @name: The name of the handle (may be NULL)
+ *
+ * In order to compute session HMACs, we need to know the names of the
+ * objects pointed to by the handles. For most objects, this is simply
+ * the actual 4 byte handle or an empty buf (in these cases @name
+ * should be NULL) but for volatile objects, permanent objects and NV
+ * areas, the name is defined as the hash (according to the name
+ * algorithm which should be set to sha256) of the public area to
+ * which the two byte algorithm id has been appended. For these
+ * objects, the @name pointer should point to this. If a name is
+ * required but @name is NULL, then TPM2_ReadPublic() will be called
+ * on the handle to obtain the name.
+ *
+ * As with most tpm_buf operations, success is assumed because failure
+ * will be caused by an incorrect programming model and indicated by a
+ * kernel message.
+ */
+void tpm_buf_append_name(struct tpm_chip *chip, struct tpm_buf *buf,
+ u32 handle, u8 *name)
+{
+#ifdef CONFIG_TCG_TPM2_HMAC
+ enum tpm2_mso_type mso = tpm2_handle_mso(handle);
+ struct tpm2_auth *auth;
+ int slot;
+#endif
+
+ if (!tpm2_chip_auth(chip)) {
+ tpm_buf_append_u32(buf, handle);
+ /* count the number of handles in the upper bits of flags */
+ buf->handles++;
+ return;
+ }
+
+#ifdef CONFIG_TCG_TPM2_HMAC
+ slot = (tpm_buf_length(buf) - TPM_HEADER_SIZE) / 4;
+ if (slot >= AUTH_MAX_NAMES) {
+ dev_err(&chip->dev, "TPM: too many handles\n");
+ return;
+ }
+ auth = chip->auth;
+ WARN(auth->session != tpm_buf_length(buf),
+ "name added in wrong place\n");
+ tpm_buf_append_u32(buf, handle);
+ auth->session += 4;
+
+ if (mso == TPM2_MSO_PERSISTENT ||
+ mso == TPM2_MSO_VOLATILE ||
+ mso == TPM2_MSO_NVRAM) {
+ if (!name)
+ tpm2_read_public(chip, handle, auth->name[slot]);
+ } else {
+ if (name)
+ dev_err(&chip->dev, "TPM: Handle does not require name but one is specified\n");
+ }
+
+ auth->name_h[slot] = handle;
+ if (name)
+ memcpy(auth->name[slot], name, name_size(name));
+#endif
+}
+EXPORT_SYMBOL_GPL(tpm_buf_append_name);
+
+/**
+ * tpm_buf_append_hmac_session() - Append a TPM session element
+ * @chip: the TPM chip structure
+ * @buf: The buffer to be appended
+ * @attributes: The session attributes
+ * @passphrase: The session authority (NULL if none)
+ * @passphrase_len: The length of the session authority (0 if none)
+ *
+ * This fills in a session structure in the TPM command buffer, except
+ * for the HMAC which cannot be computed until the command buffer is
+ * complete. The type of session is controlled by the @attributes,
+ * the main ones of which are TPM2_SA_CONTINUE_SESSION which means the
+ * session won't terminate after tpm_buf_check_hmac_response(),
+ * TPM2_SA_DECRYPT which means this buffers first parameter should be
+ * encrypted with a session key and TPM2_SA_ENCRYPT, which means the
+ * response buffer's first parameter needs to be decrypted (confusing,
+ * but the defines are written from the point of view of the TPM).
+ *
+ * Any session appended by this command must be finalized by calling
+ * tpm_buf_fill_hmac_session() otherwise the HMAC will be incorrect
+ * and the TPM will reject the command.
+ *
+ * As with most tpm_buf operations, success is assumed because failure
+ * will be caused by an incorrect programming model and indicated by a
+ * kernel message.
+ */
+void tpm_buf_append_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf,
+ u8 attributes, u8 *passphrase,
+ int passphrase_len)
+{
+#ifdef CONFIG_TCG_TPM2_HMAC
+ u8 nonce[SHA256_DIGEST_SIZE];
+ struct tpm2_auth *auth;
+ u32 len;
+#endif
+
+ if (!tpm2_chip_auth(chip)) {
+ /* offset tells us where the sessions area begins */
+ int offset = buf->handles * 4 + TPM_HEADER_SIZE;
+ u32 len = 9 + passphrase_len;
+
+ if (tpm_buf_length(buf) != offset) {
+ /* not the first session so update the existing length */
+ len += get_unaligned_be32(&buf->data[offset]);
+ put_unaligned_be32(len, &buf->data[offset]);
+ } else {
+ tpm_buf_append_u32(buf, len);
+ }
+ /* auth handle */
+ tpm_buf_append_u32(buf, TPM2_RS_PW);
+ /* nonce */
+ tpm_buf_append_u16(buf, 0);
+ /* attributes */
+ tpm_buf_append_u8(buf, 0);
+ /* passphrase */
+ tpm_buf_append_u16(buf, passphrase_len);
+ tpm_buf_append(buf, passphrase, passphrase_len);
+ return;
+ }
+
+#ifdef CONFIG_TCG_TPM2_HMAC
+ /*
+ * The Architecture Guide requires us to strip trailing zeros
+ * before computing the HMAC
+ */
+ while (passphrase && passphrase_len > 0 && passphrase[passphrase_len - 1] == '\0')
+ passphrase_len--;
+
+ auth = chip->auth;
+ auth->attrs = attributes;
+ auth->passphrase_len = passphrase_len;
+ if (passphrase_len)
+ memcpy(auth->passphrase, passphrase, passphrase_len);
+
+ if (auth->session != tpm_buf_length(buf)) {
+ /* we're not the first session */
+ len = get_unaligned_be32(&buf->data[auth->session]);
+ if (4 + len + auth->session != tpm_buf_length(buf)) {
+ WARN(1, "session length mismatch, cannot append");
+ return;
+ }
+
+ /* add our new session */
+ len += 9 + 2 * SHA256_DIGEST_SIZE;
+ put_unaligned_be32(len, &buf->data[auth->session]);
+ } else {
+ tpm_buf_append_u32(buf, 9 + 2 * SHA256_DIGEST_SIZE);
+ }
+
+ /* random number for our nonce */
+ get_random_bytes(nonce, sizeof(nonce));
+ memcpy(auth->our_nonce, nonce, sizeof(nonce));
+ tpm_buf_append_u32(buf, auth->handle);
+ /* our new nonce */
+ tpm_buf_append_u16(buf, SHA256_DIGEST_SIZE);
+ tpm_buf_append(buf, nonce, SHA256_DIGEST_SIZE);
+ tpm_buf_append_u8(buf, auth->attrs);
+ /* and put a placeholder for the hmac */
+ tpm_buf_append_u16(buf, SHA256_DIGEST_SIZE);
+ tpm_buf_append(buf, nonce, SHA256_DIGEST_SIZE);
+#endif
+}
+EXPORT_SYMBOL_GPL(tpm_buf_append_hmac_session);
+
+#ifdef CONFIG_TCG_TPM2_HMAC
+
+static int tpm2_create_primary(struct tpm_chip *chip, u32 hierarchy,
+ u32 *handle, u8 *name);
+
/*
* It turns out the crypto hmac(sha256) is hard for us to consume
* because it assumes a fixed key and the TPM seems to change the key
@@ -341,82 +562,6 @@ static void tpm_buf_append_salt(struct tpm_buf *buf, struct tpm_chip *chip)
}
/**
- * tpm_buf_append_hmac_session() - Append a TPM session element
- * @chip: the TPM chip structure
- * @buf: The buffer to be appended
- * @attributes: The session attributes
- * @passphrase: The session authority (NULL if none)
- * @passphrase_len: The length of the session authority (0 if none)
- *
- * This fills in a session structure in the TPM command buffer, except
- * for the HMAC which cannot be computed until the command buffer is
- * complete. The type of session is controlled by the @attributes,
- * the main ones of which are TPM2_SA_CONTINUE_SESSION which means the
- * session won't terminate after tpm_buf_check_hmac_response(),
- * TPM2_SA_DECRYPT which means this buffers first parameter should be
- * encrypted with a session key and TPM2_SA_ENCRYPT, which means the
- * response buffer's first parameter needs to be decrypted (confusing,
- * but the defines are written from the point of view of the TPM).
- *
- * Any session appended by this command must be finalized by calling
- * tpm_buf_fill_hmac_session() otherwise the HMAC will be incorrect
- * and the TPM will reject the command.
- *
- * As with most tpm_buf operations, success is assumed because failure
- * will be caused by an incorrect programming model and indicated by a
- * kernel message.
- */
-void tpm_buf_append_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf,
- u8 attributes, u8 *passphrase,
- int passphrase_len)
-{
- u8 nonce[SHA256_DIGEST_SIZE];
- u32 len;
- struct tpm2_auth *auth = chip->auth;
-
- /*
- * The Architecture Guide requires us to strip trailing zeros
- * before computing the HMAC
- */
- while (passphrase && passphrase_len > 0
- && passphrase[passphrase_len - 1] == '\0')
- passphrase_len--;
-
- auth->attrs = attributes;
- auth->passphrase_len = passphrase_len;
- if (passphrase_len)
- memcpy(auth->passphrase, passphrase, passphrase_len);
-
- if (auth->session != tpm_buf_length(buf)) {
- /* we're not the first session */
- len = get_unaligned_be32(&buf->data[auth->session]);
- if (4 + len + auth->session != tpm_buf_length(buf)) {
- WARN(1, "session length mismatch, cannot append");
- return;
- }
-
- /* add our new session */
- len += 9 + 2 * SHA256_DIGEST_SIZE;
- put_unaligned_be32(len, &buf->data[auth->session]);
- } else {
- tpm_buf_append_u32(buf, 9 + 2 * SHA256_DIGEST_SIZE);
- }
-
- /* random number for our nonce */
- get_random_bytes(nonce, sizeof(nonce));
- memcpy(auth->our_nonce, nonce, sizeof(nonce));
- tpm_buf_append_u32(buf, auth->handle);
- /* our new nonce */
- tpm_buf_append_u16(buf, SHA256_DIGEST_SIZE);
- tpm_buf_append(buf, nonce, SHA256_DIGEST_SIZE);
- tpm_buf_append_u8(buf, auth->attrs);
- /* and put a placeholder for the hmac */
- tpm_buf_append_u16(buf, SHA256_DIGEST_SIZE);
- tpm_buf_append(buf, nonce, SHA256_DIGEST_SIZE);
-}
-EXPORT_SYMBOL(tpm_buf_append_hmac_session);
-
-/**
* tpm_buf_fill_hmac_session() - finalize the session HMAC
* @chip: the TPM chip structure
* @buf: The buffer to be appended
@@ -446,6 +591,9 @@ void tpm_buf_fill_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf)
u8 cphash[SHA256_DIGEST_SIZE];
struct sha256_state sctx;
+ if (!auth)
+ return;
+
/* save the command code in BE format */
auth->ordinal = head->ordinal;
@@ -564,104 +712,6 @@ void tpm_buf_fill_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf)
}
EXPORT_SYMBOL(tpm_buf_fill_hmac_session);
-static int tpm2_parse_read_public(char *name, struct tpm_buf *buf)
-{
- struct tpm_header *head = (struct tpm_header *)buf->data;
- off_t offset = TPM_HEADER_SIZE;
- u32 tot_len = be32_to_cpu(head->length);
- u32 val;
-
- /* we're starting after the header so adjust the length */
- tot_len -= TPM_HEADER_SIZE;
-
- /* skip public */
- val = tpm_buf_read_u16(buf, &offset);
- if (val > tot_len)
- return -EINVAL;
- offset += val;
- /* name */
- val = tpm_buf_read_u16(buf, &offset);
- if (val != name_size(&buf->data[offset]))
- return -EINVAL;
- memcpy(name, &buf->data[offset], val);
- /* forget the rest */
- return 0;
-}
-
-static int tpm2_read_public(struct tpm_chip *chip, u32 handle, char *name)
-{
- struct tpm_buf buf;
- int rc;
-
- rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_READ_PUBLIC);
- if (rc)
- return rc;
-
- tpm_buf_append_u32(&buf, handle);
- rc = tpm_transmit_cmd(chip, &buf, 0, "read public");
- if (rc == TPM2_RC_SUCCESS)
- rc = tpm2_parse_read_public(name, &buf);
-
- tpm_buf_destroy(&buf);
-
- return rc;
-}
-
-/**
- * tpm_buf_append_name() - add a handle area to the buffer
- * @chip: the TPM chip structure
- * @buf: The buffer to be appended
- * @handle: The handle to be appended
- * @name: The name of the handle (may be NULL)
- *
- * In order to compute session HMACs, we need to know the names of the
- * objects pointed to by the handles. For most objects, this is simply
- * the actual 4 byte handle or an empty buf (in these cases @name
- * should be NULL) but for volatile objects, permanent objects and NV
- * areas, the name is defined as the hash (according to the name
- * algorithm which should be set to sha256) of the public area to
- * which the two byte algorithm id has been appended. For these
- * objects, the @name pointer should point to this. If a name is
- * required but @name is NULL, then TPM2_ReadPublic() will be called
- * on the handle to obtain the name.
- *
- * As with most tpm_buf operations, success is assumed because failure
- * will be caused by an incorrect programming model and indicated by a
- * kernel message.
- */
-void tpm_buf_append_name(struct tpm_chip *chip, struct tpm_buf *buf,
- u32 handle, u8 *name)
-{
- enum tpm2_mso_type mso = tpm2_handle_mso(handle);
- struct tpm2_auth *auth = chip->auth;
- int slot;
-
- slot = (tpm_buf_length(buf) - TPM_HEADER_SIZE)/4;
- if (slot >= AUTH_MAX_NAMES) {
- dev_err(&chip->dev, "TPM: too many handles\n");
- return;
- }
- WARN(auth->session != tpm_buf_length(buf),
- "name added in wrong place\n");
- tpm_buf_append_u32(buf, handle);
- auth->session += 4;
-
- if (mso == TPM2_MSO_PERSISTENT ||
- mso == TPM2_MSO_VOLATILE ||
- mso == TPM2_MSO_NVRAM) {
- if (!name)
- tpm2_read_public(chip, handle, auth->name[slot]);
- } else {
- if (name)
- dev_err(&chip->dev, "TPM: Handle does not require name but one is specified\n");
- }
-
- auth->name_h[slot] = handle;
- if (name)
- memcpy(auth->name[slot], name, name_size(name));
-}
-EXPORT_SYMBOL(tpm_buf_append_name);
-
/**
* tpm_buf_check_hmac_response() - check the TPM return HMAC for correctness
* @chip: the TPM chip structure
@@ -702,6 +752,9 @@ int tpm_buf_check_hmac_response(struct tpm_chip *chip, struct tpm_buf *buf,
u32 cc = be32_to_cpu(auth->ordinal);
int parm_len, len, i, handles;
+ if (!auth)
+ return rc;
+
if (auth->session >= TPM_HEADER_SIZE) {
WARN(1, "tpm session not filled correctly\n");
goto out;
@@ -821,8 +874,13 @@ EXPORT_SYMBOL(tpm_buf_check_hmac_response);
*/
void tpm2_end_auth_session(struct tpm_chip *chip)
{
- tpm2_flush_context(chip, chip->auth->handle);
- memzero_explicit(chip->auth, sizeof(*chip->auth));
+ struct tpm2_auth *auth = chip->auth;
+
+ if (!auth)
+ return;
+
+ tpm2_flush_context(chip, auth->handle);
+ memzero_explicit(auth, sizeof(*auth));
}
EXPORT_SYMBOL(tpm2_end_auth_session);
@@ -904,6 +962,11 @@ int tpm2_start_auth_session(struct tpm_chip *chip)
int rc;
u32 null_key;
+ if (!auth) {
+ dev_warn_once(&chip->dev, "auth session is not active\n");
+ return 0;
+ }
+
rc = tpm2_load_null(chip, &null_key);
if (rc)
goto out;
@@ -954,6 +1017,20 @@ int tpm2_start_auth_session(struct tpm_chip *chip)
}
EXPORT_SYMBOL(tpm2_start_auth_session);
+/*
+ * A mask containing the object attributes for the kernel held null primary key
+ * used in HMAC encryption. For more information on specific attributes look up
+ * to "8.3 TPMA_OBJECT (Object Attributes)".
+ */
+#define TPM2_OA_NULL_KEY ( \
+ TPM2_OA_NO_DA | \
+ TPM2_OA_FIXED_TPM | \
+ TPM2_OA_FIXED_PARENT | \
+ TPM2_OA_SENSITIVE_DATA_ORIGIN | \
+ TPM2_OA_USER_WITH_AUTH | \
+ TPM2_OA_DECRYPT | \
+ TPM2_OA_RESTRICTED)
+
/**
* tpm2_parse_create_primary() - parse the data returned from TPM_CC_CREATE_PRIMARY
*
@@ -1018,7 +1095,7 @@ static int tpm2_parse_create_primary(struct tpm_chip *chip, struct tpm_buf *buf,
val = tpm_buf_read_u32(buf, &offset_t);
/* object properties */
- if (val != TPM2_OA_TMPL)
+ if (val != TPM2_OA_NULL_KEY)
return -EINVAL;
/* auth policy (empty) */
@@ -1178,7 +1255,7 @@ static int tpm2_create_primary(struct tpm_chip *chip, u32 hierarchy,
tpm_buf_append_u16(&template, TPM_ALG_SHA256);
/* object properties */
- tpm_buf_append_u32(&template, TPM2_OA_TMPL);
+ tpm_buf_append_u32(&template, TPM2_OA_NULL_KEY);
/* sauth policy (empty) */
tpm_buf_append_u16(&template, 0);
@@ -1284,3 +1361,4 @@ int tpm2_sessions_init(struct tpm_chip *chip)
return rc;
}
+#endif /* CONFIG_TCG_TPM2_HMAC */
diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
index 176cd8dbf1db..fdef214b9f6b 100644
--- a/drivers/char/tpm/tpm_tis_core.c
+++ b/drivers/char/tpm/tpm_tis_core.c
@@ -1020,7 +1020,8 @@ void tpm_tis_remove(struct tpm_chip *chip)
interrupt = 0;
tpm_tis_write32(priv, reg, ~TPM_GLOBAL_INT_ENABLE & interrupt);
- flush_work(&priv->free_irq_work);
+ if (priv->free_irq_work.func)
+ flush_work(&priv->free_irq_work);
tpm_tis_clkrun_enable(chip, false);
diff --git a/drivers/char/tpm/tpm_tis_core.h b/drivers/char/tpm/tpm_tis_core.h
index 13e99cf65efe..690ad8e9b731 100644
--- a/drivers/char/tpm/tpm_tis_core.h
+++ b/drivers/char/tpm/tpm_tis_core.h
@@ -210,7 +210,7 @@ static inline int tpm_tis_verify_crc(struct tpm_tis_data *data, size_t len,
static inline bool is_bsw(void)
{
#ifdef CONFIG_X86
- return ((boot_cpu_data.x86_model == INTEL_FAM6_ATOM_AIRMONT) ? 1 : 0);
+ return (boot_cpu_data.x86_vfm == INTEL_ATOM_AIRMONT) ? 1 : 0;
#else
return false;
#endif
diff --git a/drivers/char/tpm/tpm_tis_spi_main.c b/drivers/char/tpm/tpm_tis_spi_main.c
index 3f9eaf27b41b..61b42c83ced8 100644
--- a/drivers/char/tpm/tpm_tis_spi_main.c
+++ b/drivers/char/tpm/tpm_tis_spi_main.c
@@ -37,6 +37,7 @@
#include "tpm_tis_spi.h"
#define MAX_SPI_FRAMESIZE 64
+#define SPI_HDRSIZE 4
/*
* TCG SPI flow control is documented in section 6.4 of the spec[1]. In short,
@@ -247,7 +248,7 @@ static int tpm_tis_spi_write_bytes(struct tpm_tis_data *data, u32 addr,
int tpm_tis_spi_init(struct spi_device *spi, struct tpm_tis_spi_phy *phy,
int irq, const struct tpm_tis_phy_ops *phy_ops)
{
- phy->iobuf = devm_kmalloc(&spi->dev, MAX_SPI_FRAMESIZE, GFP_KERNEL);
+ phy->iobuf = devm_kmalloc(&spi->dev, SPI_HDRSIZE + MAX_SPI_FRAMESIZE, GFP_KERNEL);
if (!phy->iobuf)
return -ENOMEM;
@@ -317,6 +318,7 @@ static void tpm_tis_spi_remove(struct spi_device *dev)
}
static const struct spi_device_id tpm_tis_spi_id[] = {
+ { "attpm20p", (unsigned long)tpm_tis_spi_probe },
{ "st33htpm-spi", (unsigned long)tpm_tis_spi_probe },
{ "slb9670", (unsigned long)tpm_tis_spi_probe },
{ "tpm_tis_spi", (unsigned long)tpm_tis_spi_probe },
diff --git a/drivers/clk/clkdev.c b/drivers/clk/clkdev.c
index 6a77d7e201a9..2f83fb97c6fb 100644
--- a/drivers/clk/clkdev.c
+++ b/drivers/clk/clkdev.c
@@ -204,8 +204,15 @@ fail:
pr_err("%pV:%s: %s ID is greater than %zu\n",
&vaf, con_id, failure, max_size);
va_end(ap_copy);
- kfree(cla);
- return NULL;
+
+ /*
+ * Don't fail in this case, but as the entry won't ever match just
+ * fill it with something that also won't match.
+ */
+ strscpy(cla->con_id, "bad", sizeof(cla->con_id));
+ strscpy(cla->dev_id, "bad", sizeof(cla->dev_id));
+
+ return &cla->cl;
}
static struct clk_lookup *
diff --git a/drivers/clk/mediatek/clk-mt8183-mfgcfg.c b/drivers/clk/mediatek/clk-mt8183-mfgcfg.c
index ba504e19d420..62d876e150e1 100644
--- a/drivers/clk/mediatek/clk-mt8183-mfgcfg.c
+++ b/drivers/clk/mediatek/clk-mt8183-mfgcfg.c
@@ -29,6 +29,7 @@ static const struct mtk_gate mfg_clks[] = {
static const struct mtk_clk_desc mfg_desc = {
.clks = mfg_clks,
.num_clks = ARRAY_SIZE(mfg_clks),
+ .need_runtime_pm = true,
};
static const struct of_device_id of_match_clk_mt8183_mfg[] = {
diff --git a/drivers/clk/mediatek/clk-mtk.c b/drivers/clk/mediatek/clk-mtk.c
index bd37ab4d1a9b..ba1d1c495bc2 100644
--- a/drivers/clk/mediatek/clk-mtk.c
+++ b/drivers/clk/mediatek/clk-mtk.c
@@ -496,14 +496,16 @@ static int __mtk_clk_simple_probe(struct platform_device *pdev,
}
- devm_pm_runtime_enable(&pdev->dev);
- /*
- * Do a pm_runtime_resume_and_get() to workaround a possible
- * deadlock between clk_register() and the genpd framework.
- */
- r = pm_runtime_resume_and_get(&pdev->dev);
- if (r)
- return r;
+ if (mcd->need_runtime_pm) {
+ devm_pm_runtime_enable(&pdev->dev);
+ /*
+ * Do a pm_runtime_resume_and_get() to workaround a possible
+ * deadlock between clk_register() and the genpd framework.
+ */
+ r = pm_runtime_resume_and_get(&pdev->dev);
+ if (r)
+ return r;
+ }
/* Calculate how many clk_hw_onecell_data entries to allocate */
num_clks = mcd->num_clks + mcd->num_composite_clks;
@@ -585,7 +587,8 @@ static int __mtk_clk_simple_probe(struct platform_device *pdev,
goto unregister_clks;
}
- pm_runtime_put(&pdev->dev);
+ if (mcd->need_runtime_pm)
+ pm_runtime_put(&pdev->dev);
return r;
@@ -618,7 +621,8 @@ free_base:
if (mcd->shared_io && base)
iounmap(base);
- pm_runtime_put(&pdev->dev);
+ if (mcd->need_runtime_pm)
+ pm_runtime_put(&pdev->dev);
return r;
}
diff --git a/drivers/clk/mediatek/clk-mtk.h b/drivers/clk/mediatek/clk-mtk.h
index 22096501a60a..c17fe1c2d732 100644
--- a/drivers/clk/mediatek/clk-mtk.h
+++ b/drivers/clk/mediatek/clk-mtk.h
@@ -237,6 +237,8 @@ struct mtk_clk_desc {
int (*clk_notifier_func)(struct device *dev, struct clk *clk);
unsigned int mfg_clk_idx;
+
+ bool need_runtime_pm;
};
int mtk_clk_pdev_probe(struct platform_device *pdev);
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index 1bb51a058872..46369edfc07a 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -827,6 +827,14 @@ config SM_CAMCC_8550
Support for the camera clock controller on SM8550 devices.
Say Y if you want to support camera devices and camera functionality.
+config SM_CAMCC_8650
+ tristate "SM8650 Camera Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ select SM_GCC_8650
+ help
+ Support for the camera clock controller on SM8650 devices.
+ Say Y if you want to support camera devices and camera functionality.
+
config SM_DISPCC_6115
tristate "SM6115 Display Clock Controller"
depends on ARM64 || COMPILE_TEST
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index dec5b6db6860..28bffa1eb8dd 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -109,6 +109,7 @@ obj-$(CONFIG_SM_CAMCC_6350) += camcc-sm6350.o
obj-$(CONFIG_SM_CAMCC_8250) += camcc-sm8250.o
obj-$(CONFIG_SM_CAMCC_8450) += camcc-sm8450.o
obj-$(CONFIG_SM_CAMCC_8550) += camcc-sm8550.o
+obj-$(CONFIG_SM_CAMCC_8650) += camcc-sm8650.o
obj-$(CONFIG_SM_DISPCC_6115) += dispcc-sm6115.o
obj-$(CONFIG_SM_DISPCC_6125) += dispcc-sm6125.o
obj-$(CONFIG_SM_DISPCC_6350) += dispcc-sm6350.o
diff --git a/drivers/clk/qcom/apss-ipq-pll.c b/drivers/clk/qcom/apss-ipq-pll.c
index 5f7f537e4ecb..e8632db2c542 100644
--- a/drivers/clk/qcom/apss-ipq-pll.c
+++ b/drivers/clk/qcom/apss-ipq-pll.c
@@ -70,7 +70,6 @@ static struct clk_alpha_pll ipq_pll_stromer_plus = {
static const struct alpha_pll_config ipq5018_pll_config = {
.l = 0x2a,
.config_ctl_val = 0x4001075b,
- .config_ctl_hi_val = 0x304,
.main_output_mask = BIT(0),
.aux_output_mask = BIT(1),
.early_output_mask = BIT(3),
@@ -84,7 +83,6 @@ static const struct alpha_pll_config ipq5018_pll_config = {
static const struct alpha_pll_config ipq5332_pll_config = {
.l = 0x2d,
.config_ctl_val = 0x4001075b,
- .config_ctl_hi_val = 0x304,
.main_output_mask = BIT(0),
.aux_output_mask = BIT(1),
.early_output_mask = BIT(3),
diff --git a/drivers/clk/qcom/camcc-sm8650.c b/drivers/clk/qcom/camcc-sm8650.c
new file mode 100644
index 000000000000..1b28e086e519
--- /dev/null
+++ b/drivers/clk/qcom/camcc-sm8650.c
@@ -0,0 +1,3591 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,sm8650-camcc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ DT_IFACE,
+ DT_BI_TCXO,
+ DT_BI_TCXO_AO,
+ DT_SLEEP_CLK,
+};
+
+enum {
+ P_BI_TCXO,
+ P_BI_TCXO_AO,
+ P_CAM_CC_PLL0_OUT_EVEN,
+ P_CAM_CC_PLL0_OUT_MAIN,
+ P_CAM_CC_PLL0_OUT_ODD,
+ P_CAM_CC_PLL1_OUT_EVEN,
+ P_CAM_CC_PLL2_OUT_EVEN,
+ P_CAM_CC_PLL2_OUT_MAIN,
+ P_CAM_CC_PLL3_OUT_EVEN,
+ P_CAM_CC_PLL4_OUT_EVEN,
+ P_CAM_CC_PLL5_OUT_EVEN,
+ P_CAM_CC_PLL6_OUT_EVEN,
+ P_CAM_CC_PLL7_OUT_EVEN,
+ P_CAM_CC_PLL8_OUT_EVEN,
+ P_CAM_CC_PLL9_OUT_EVEN,
+ P_CAM_CC_PLL9_OUT_ODD,
+ P_CAM_CC_PLL10_OUT_EVEN,
+ P_SLEEP_CLK,
+};
+
+static const struct pll_vco lucid_ole_vco[] = {
+ { 249600000, 2300000000, 0 },
+};
+
+static const struct pll_vco rivian_ole_vco[] = {
+ { 777000000, 1285000000, 0 },
+};
+
+static const struct alpha_pll_config cam_cc_pll0_config = {
+ .l = 0x3e,
+ .alpha = 0x8000,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00008400,
+ .user_ctl_hi_val = 0x00000005,
+};
+
+static struct clk_alpha_pll cam_cc_pll0 = {
+ .offset = 0x0,
+ .vco_table = lucid_ole_vco,
+ .num_vco = ARRAY_SIZE(lucid_ole_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll0_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll0_out_even = {
+ .offset = 0x0,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_cam_cc_pll0_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll0_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll0_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_ole_ops,
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll0_out_odd[] = {
+ { 0x2, 3 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll0_out_odd = {
+ .offset = 0x0,
+ .post_div_shift = 14,
+ .post_div_table = post_div_table_cam_cc_pll0_out_odd,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll0_out_odd),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll0_out_odd",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_ole_ops,
+ },
+};
+
+static const struct alpha_pll_config cam_cc_pll1_config = {
+ .l = 0x31,
+ .alpha = 0x7aaa,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00000400,
+ .user_ctl_hi_val = 0x00000005,
+};
+
+static struct clk_alpha_pll cam_cc_pll1 = {
+ .offset = 0x1000,
+ .vco_table = lucid_ole_vco,
+ .num_vco = ARRAY_SIZE(lucid_ole_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll1",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll1_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll1_out_even = {
+ .offset = 0x1000,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_cam_cc_pll1_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll1_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll1_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll1.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_ole_ops,
+ },
+};
+
+static const struct alpha_pll_config cam_cc_pll2_config = {
+ .l = 0x32,
+ .alpha = 0x0,
+ .config_ctl_val = 0x10000030,
+ .config_ctl_hi_val = 0x80890263,
+ .config_ctl_hi1_val = 0x00000217,
+ .user_ctl_val = 0x00000001,
+ .user_ctl_hi_val = 0x00000000,
+};
+
+static struct clk_alpha_pll cam_cc_pll2 = {
+ .offset = 0x2000,
+ .vco_table = rivian_ole_vco,
+ .num_vco = ARRAY_SIZE(rivian_ole_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_RIVIAN_EVO],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll2",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_rivian_evo_ops,
+ },
+ },
+};
+
+static const struct alpha_pll_config cam_cc_pll3_config = {
+ .l = 0x30,
+ .alpha = 0x8aaa,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00000400,
+ .user_ctl_hi_val = 0x00000005,
+};
+
+static struct clk_alpha_pll cam_cc_pll3 = {
+ .offset = 0x3000,
+ .vco_table = lucid_ole_vco,
+ .num_vco = ARRAY_SIZE(lucid_ole_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll3",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll3_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll3_out_even = {
+ .offset = 0x3000,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_cam_cc_pll3_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll3_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll3_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll3.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_ole_ops,
+ },
+};
+
+static const struct alpha_pll_config cam_cc_pll4_config = {
+ .l = 0x30,
+ .alpha = 0x8aaa,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00000400,
+ .user_ctl_hi_val = 0x00000005,
+};
+
+static struct clk_alpha_pll cam_cc_pll4 = {
+ .offset = 0x4000,
+ .vco_table = lucid_ole_vco,
+ .num_vco = ARRAY_SIZE(lucid_ole_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll4",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll4_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll4_out_even = {
+ .offset = 0x4000,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_cam_cc_pll4_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll4_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll4_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll4.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_ole_ops,
+ },
+};
+
+static const struct alpha_pll_config cam_cc_pll5_config = {
+ .l = 0x30,
+ .alpha = 0x8aaa,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00000400,
+ .user_ctl_hi_val = 0x00000005,
+};
+
+static struct clk_alpha_pll cam_cc_pll5 = {
+ .offset = 0x5000,
+ .vco_table = lucid_ole_vco,
+ .num_vco = ARRAY_SIZE(lucid_ole_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll5",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll5_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll5_out_even = {
+ .offset = 0x5000,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_cam_cc_pll5_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll5_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll5_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll5.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_ole_ops,
+ },
+};
+
+static const struct alpha_pll_config cam_cc_pll6_config = {
+ .l = 0x30,
+ .alpha = 0x8aaa,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00000400,
+ .user_ctl_hi_val = 0x00000005,
+};
+
+static struct clk_alpha_pll cam_cc_pll6 = {
+ .offset = 0x6000,
+ .vco_table = lucid_ole_vco,
+ .num_vco = ARRAY_SIZE(lucid_ole_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll6",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll6_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll6_out_even = {
+ .offset = 0x6000,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_cam_cc_pll6_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll6_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll6_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll6.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_ole_ops,
+ },
+};
+
+static const struct alpha_pll_config cam_cc_pll7_config = {
+ .l = 0x30,
+ .alpha = 0x8aaa,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00000400,
+ .user_ctl_hi_val = 0x00000005,
+};
+
+static struct clk_alpha_pll cam_cc_pll7 = {
+ .offset = 0x7000,
+ .vco_table = lucid_ole_vco,
+ .num_vco = ARRAY_SIZE(lucid_ole_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll7",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll7_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll7_out_even = {
+ .offset = 0x7000,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_cam_cc_pll7_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll7_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll7_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll7.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_ole_ops,
+ },
+};
+
+static const struct alpha_pll_config cam_cc_pll8_config = {
+ .l = 0x14,
+ .alpha = 0xd555,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00000400,
+ .user_ctl_hi_val = 0x00000005,
+};
+
+static struct clk_alpha_pll cam_cc_pll8 = {
+ .offset = 0x8000,
+ .vco_table = lucid_ole_vco,
+ .num_vco = ARRAY_SIZE(lucid_ole_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll8",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll8_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll8_out_even = {
+ .offset = 0x8000,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_cam_cc_pll8_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll8_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll8_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll8.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_ole_ops,
+ },
+};
+
+static const struct alpha_pll_config cam_cc_pll9_config = {
+ .l = 0x32,
+ .alpha = 0x0,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00008400,
+ .user_ctl_hi_val = 0x00000005,
+};
+
+static struct clk_alpha_pll cam_cc_pll9 = {
+ .offset = 0x9000,
+ .vco_table = lucid_ole_vco,
+ .num_vco = ARRAY_SIZE(lucid_ole_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll9",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll9_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll9_out_even = {
+ .offset = 0x9000,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_cam_cc_pll9_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll9_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll9_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll9.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_ole_ops,
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll9_out_odd[] = {
+ { 0x2, 3 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll9_out_odd = {
+ .offset = 0x9000,
+ .post_div_shift = 14,
+ .post_div_table = post_div_table_cam_cc_pll9_out_odd,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll9_out_odd),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll9_out_odd",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll9.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_ole_ops,
+ },
+};
+
+static const struct alpha_pll_config cam_cc_pll10_config = {
+ .l = 0x30,
+ .alpha = 0x8aaa,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00000400,
+ .user_ctl_hi_val = 0x00000005,
+};
+
+static struct clk_alpha_pll cam_cc_pll10 = {
+ .offset = 0xa000,
+ .vco_table = lucid_ole_vco,
+ .num_vco = ARRAY_SIZE(lucid_ole_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll10",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll10_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll10_out_even = {
+ .offset = 0xa000,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_cam_cc_pll10_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll10_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll10_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll10.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_ole_ops,
+ },
+};
+
+static const struct parent_map cam_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL0_OUT_MAIN, 1 },
+ { P_CAM_CC_PLL0_OUT_EVEN, 2 },
+ { P_CAM_CC_PLL0_OUT_ODD, 3 },
+ { P_CAM_CC_PLL9_OUT_ODD, 4 },
+ { P_CAM_CC_PLL9_OUT_EVEN, 5 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll0.clkr.hw },
+ { .hw = &cam_cc_pll0_out_even.clkr.hw },
+ { .hw = &cam_cc_pll0_out_odd.clkr.hw },
+ { .hw = &cam_cc_pll9_out_odd.clkr.hw },
+ { .hw = &cam_cc_pll9_out_even.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL2_OUT_EVEN, 3 },
+ { P_CAM_CC_PLL2_OUT_MAIN, 5 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll2.clkr.hw },
+ { .hw = &cam_cc_pll2.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL8_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_2[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll8_out_even.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL3_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_3[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll3_out_even.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL4_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_4[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll4_out_even.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_5[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL5_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_5[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll5_out_even.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_6[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL1_OUT_EVEN, 4 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_6[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll1_out_even.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_7[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL6_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_7[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll6_out_even.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_8[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL7_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_8[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll7_out_even.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_9[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL10_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_9[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll10_out_even.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_10[] = {
+ { P_SLEEP_CLK, 0 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_10[] = {
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct parent_map cam_cc_parent_map_11_ao[] = {
+ { P_BI_TCXO_AO, 0 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_11_ao[] = {
+ { .index = DT_BI_TCXO_AO },
+};
+
+static const struct freq_tbl ftbl_cam_cc_bps_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(200000000, P_CAM_CC_PLL8_OUT_EVEN, 1, 0, 0),
+ F(400000000, P_CAM_CC_PLL8_OUT_EVEN, 1, 0, 0),
+ F(480000000, P_CAM_CC_PLL8_OUT_EVEN, 1, 0, 0),
+ F(785000000, P_CAM_CC_PLL8_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_bps_clk_src = {
+ .cmd_rcgr = 0x10050,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_2,
+ .freq_tbl = ftbl_cam_cc_bps_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_bps_clk_src",
+ .parent_data = cam_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_camnoc_axi_rt_clk_src[] = {
+ F(300000000, P_CAM_CC_PLL0_OUT_EVEN, 2, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_camnoc_axi_rt_clk_src = {
+ .cmd_rcgr = 0x1325c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_camnoc_axi_rt_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_camnoc_axi_rt_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_cci_0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(37500000, P_CAM_CC_PLL0_OUT_EVEN, 16, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_cci_0_clk_src = {
+ .cmd_rcgr = 0x131cc,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_cci_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_0_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_cci_1_clk_src = {
+ .cmd_rcgr = 0x131e8,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_cci_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_1_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_cci_2_clk_src = {
+ .cmd_rcgr = 0x13204,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_cci_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_2_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_cphy_rx_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(480000000, P_CAM_CC_PLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_cphy_rx_clk_src = {
+ .cmd_rcgr = 0x1104c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_cphy_rx_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cphy_rx_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_cre_clk_src[] = {
+ F(200000000, P_CAM_CC_PLL0_OUT_ODD, 2, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0),
+ F(480000000, P_CAM_CC_PLL9_OUT_EVEN, 1, 0, 0),
+ F(600000000, P_CAM_CC_PLL0_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_cre_clk_src = {
+ .cmd_rcgr = 0x13144,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_cre_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cre_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_csi0phytimer_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_csi0phytimer_clk_src = {
+ .cmd_rcgr = 0x150e0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi0phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_csi1phytimer_clk_src = {
+ .cmd_rcgr = 0x15104,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi1phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_csi2phytimer_clk_src = {
+ .cmd_rcgr = 0x15124,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi2phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_csi3phytimer_clk_src = {
+ .cmd_rcgr = 0x15144,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi3phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_csi4phytimer_clk_src = {
+ .cmd_rcgr = 0x15164,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi4phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_csi5phytimer_clk_src = {
+ .cmd_rcgr = 0x15184,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi5phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_csi6phytimer_clk_src = {
+ .cmd_rcgr = 0x151a4,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi6phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_csi7phytimer_clk_src = {
+ .cmd_rcgr = 0x151c4,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi7phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_csid_clk_src[] = {
+ F(400000000, P_CAM_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(480000000, P_CAM_CC_PLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_csid_clk_src = {
+ .cmd_rcgr = 0x13238,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csid_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csid_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_fast_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(300000000, P_CAM_CC_PLL0_OUT_EVEN, 2, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_fast_ahb_clk_src = {
+ .cmd_rcgr = 0x10018,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_fast_ahb_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_fast_ahb_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_icp_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0),
+ F(480000000, P_CAM_CC_PLL9_OUT_EVEN, 1, 0, 0),
+ F(600000000, P_CAM_CC_PLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_icp_clk_src = {
+ .cmd_rcgr = 0x131a4,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_icp_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_icp_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_ife_0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(466000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+ F(594000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+ F(675000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+ F(785000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_ife_0_clk_src = {
+ .cmd_rcgr = 0x11018,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_3,
+ .freq_tbl = ftbl_cam_cc_ife_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_0_clk_src",
+ .parent_data = cam_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_ife_1_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(466000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+ F(594000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+ F(675000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+ F(785000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_ife_1_clk_src = {
+ .cmd_rcgr = 0x12018,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_4,
+ .freq_tbl = ftbl_cam_cc_ife_1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_1_clk_src",
+ .parent_data = cam_cc_parent_data_4,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_4),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_ife_2_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(466000000, P_CAM_CC_PLL5_OUT_EVEN, 1, 0, 0),
+ F(594000000, P_CAM_CC_PLL5_OUT_EVEN, 1, 0, 0),
+ F(675000000, P_CAM_CC_PLL5_OUT_EVEN, 1, 0, 0),
+ F(785000000, P_CAM_CC_PLL5_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_ife_2_clk_src = {
+ .cmd_rcgr = 0x12068,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_5,
+ .freq_tbl = ftbl_cam_cc_ife_2_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_2_clk_src",
+ .parent_data = cam_cc_parent_data_5,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_5),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_ife_lite_clk_src = {
+ .cmd_rcgr = 0x13000,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csid_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_ife_lite_csid_clk_src = {
+ .cmd_rcgr = 0x13028,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csid_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_csid_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_ipe_nps_clk_src[] = {
+ F(475000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0),
+ F(575000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0),
+ F(675000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0),
+ F(825000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_ipe_nps_clk_src = {
+ .cmd_rcgr = 0x10094,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_6,
+ .freq_tbl = ftbl_cam_cc_ipe_nps_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_nps_clk_src",
+ .parent_data = cam_cc_parent_data_6,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_6),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_jpeg_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(200000000, P_CAM_CC_PLL0_OUT_ODD, 2, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0),
+ F(480000000, P_CAM_CC_PLL9_OUT_EVEN, 1, 0, 0),
+ F(600000000, P_CAM_CC_PLL0_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_jpeg_clk_src = {
+ .cmd_rcgr = 0x13168,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_jpeg_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_jpeg_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_mclk0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(24000000, P_CAM_CC_PLL2_OUT_EVEN, 10, 1, 4),
+ F(68571429, P_CAM_CC_PLL2_OUT_MAIN, 14, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_mclk0_clk_src = {
+ .cmd_rcgr = 0x15000,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk0_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk1_clk_src = {
+ .cmd_rcgr = 0x1501c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk1_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk2_clk_src = {
+ .cmd_rcgr = 0x15038,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk2_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk3_clk_src = {
+ .cmd_rcgr = 0x15054,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk3_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk4_clk_src = {
+ .cmd_rcgr = 0x15070,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk4_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk5_clk_src = {
+ .cmd_rcgr = 0x1508c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk5_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk6_clk_src = {
+ .cmd_rcgr = 0x150a8,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk6_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk7_clk_src = {
+ .cmd_rcgr = 0x150c4,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk7_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_qdss_debug_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(75000000, P_CAM_CC_PLL0_OUT_EVEN, 8, 0, 0),
+ F(150000000, P_CAM_CC_PLL0_OUT_EVEN, 4, 0, 0),
+ F(300000000, P_CAM_CC_PLL0_OUT_MAIN, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_qdss_debug_clk_src = {
+ .cmd_rcgr = 0x1329c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_qdss_debug_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_qdss_debug_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_sfe_0_clk_src[] = {
+ F(466000000, P_CAM_CC_PLL6_OUT_EVEN, 1, 0, 0),
+ F(594000000, P_CAM_CC_PLL6_OUT_EVEN, 1, 0, 0),
+ F(675000000, P_CAM_CC_PLL6_OUT_EVEN, 1, 0, 0),
+ F(785000000, P_CAM_CC_PLL6_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_sfe_0_clk_src = {
+ .cmd_rcgr = 0x1306c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_7,
+ .freq_tbl = ftbl_cam_cc_sfe_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_sfe_0_clk_src",
+ .parent_data = cam_cc_parent_data_7,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_7),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_sfe_1_clk_src[] = {
+ F(466000000, P_CAM_CC_PLL7_OUT_EVEN, 1, 0, 0),
+ F(594000000, P_CAM_CC_PLL7_OUT_EVEN, 1, 0, 0),
+ F(675000000, P_CAM_CC_PLL7_OUT_EVEN, 1, 0, 0),
+ F(785000000, P_CAM_CC_PLL7_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_sfe_1_clk_src = {
+ .cmd_rcgr = 0x130bc,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_8,
+ .freq_tbl = ftbl_cam_cc_sfe_1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_sfe_1_clk_src",
+ .parent_data = cam_cc_parent_data_8,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_8),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_sfe_2_clk_src[] = {
+ F(466000000, P_CAM_CC_PLL10_OUT_EVEN, 1, 0, 0),
+ F(594000000, P_CAM_CC_PLL10_OUT_EVEN, 1, 0, 0),
+ F(675000000, P_CAM_CC_PLL10_OUT_EVEN, 1, 0, 0),
+ F(785000000, P_CAM_CC_PLL10_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_sfe_2_clk_src = {
+ .cmd_rcgr = 0x1310c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_9,
+ .freq_tbl = ftbl_cam_cc_sfe_2_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_sfe_2_clk_src",
+ .parent_data = cam_cc_parent_data_9,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_9),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_sleep_clk_src[] = {
+ F(32000, P_SLEEP_CLK, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_sleep_clk_src = {
+ .cmd_rcgr = 0x132f0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_10,
+ .freq_tbl = ftbl_cam_cc_sleep_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_sleep_clk_src",
+ .parent_data = cam_cc_parent_data_10,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_10),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_slow_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(80000000, P_CAM_CC_PLL0_OUT_EVEN, 7.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_slow_ahb_clk_src = {
+ .cmd_rcgr = 0x10034,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_slow_ahb_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_slow_ahb_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_xo_clk_src[] = {
+ F(19200000, P_BI_TCXO_AO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_xo_clk_src = {
+ .cmd_rcgr = 0x132d4,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_11_ao,
+ .freq_tbl = ftbl_cam_cc_xo_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_xo_clk_src",
+ .parent_data = cam_cc_parent_data_11_ao,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_11_ao),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_branch cam_cc_bps_ahb_clk = {
+ .halt_reg = 0x1004c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1004c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_bps_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_bps_clk = {
+ .halt_reg = 0x10068,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x10068,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_bps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_bps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_bps_fast_ahb_clk = {
+ .halt_reg = 0x10030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x10030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_bps_fast_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_bps_shift_clk = {
+ .halt_reg = 0x10078,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x10078,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_bps_shift_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_camnoc_axi_nrt_clk = {
+ .halt_reg = 0x13284,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13284,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_camnoc_axi_nrt_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_camnoc_axi_rt_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_camnoc_axi_rt_clk = {
+ .halt_reg = 0x13274,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13274,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_camnoc_axi_rt_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_camnoc_axi_rt_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_camnoc_dcd_xo_clk = {
+ .halt_reg = 0x13290,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13290,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_camnoc_dcd_xo_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_camnoc_xo_clk = {
+ .halt_reg = 0x13294,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13294,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_camnoc_xo_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cci_0_clk = {
+ .halt_reg = 0x131e4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x131e4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cci_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cci_1_clk = {
+ .halt_reg = 0x13200,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13200,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cci_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cci_2_clk = {
+ .halt_reg = 0x1321c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1321c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cci_2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_core_ahb_clk = {
+ .halt_reg = 0x132d0,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x132d0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_core_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cpas_ahb_clk = {
+ .halt_reg = 0x13220,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13220,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cpas_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cpas_bps_clk = {
+ .halt_reg = 0x10074,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x10074,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cpas_bps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_bps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cpas_cre_clk = {
+ .halt_reg = 0x13160,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13160,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cpas_cre_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cre_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cpas_fast_ahb_clk = {
+ .halt_reg = 0x1322c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1322c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cpas_fast_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cpas_ife_0_clk = {
+ .halt_reg = 0x1103c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1103c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cpas_ife_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cpas_ife_1_clk = {
+ .halt_reg = 0x1203c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1203c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cpas_ife_1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cpas_ife_2_clk = {
+ .halt_reg = 0x1208c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1208c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cpas_ife_2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cpas_ife_lite_clk = {
+ .halt_reg = 0x13024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cpas_ife_lite_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_lite_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cpas_ipe_nps_clk = {
+ .halt_reg = 0x100b8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x100b8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cpas_ipe_nps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ipe_nps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cpas_sbi_clk = {
+ .halt_reg = 0x10104,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x10104,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cpas_sbi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cpas_sfe_0_clk = {
+ .halt_reg = 0x13090,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13090,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cpas_sfe_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_sfe_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cpas_sfe_1_clk = {
+ .halt_reg = 0x130e0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x130e0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cpas_sfe_1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_sfe_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cpas_sfe_2_clk = {
+ .halt_reg = 0x13130,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13130,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cpas_sfe_2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_sfe_2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cre_ahb_clk = {
+ .halt_reg = 0x13164,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13164,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cre_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cre_clk = {
+ .halt_reg = 0x1315c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1315c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cre_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cre_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi0phytimer_clk = {
+ .halt_reg = 0x150f8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x150f8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi0phytimer_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_csi0phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi1phytimer_clk = {
+ .halt_reg = 0x1511c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1511c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi1phytimer_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_csi1phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi2phytimer_clk = {
+ .halt_reg = 0x1513c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1513c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi2phytimer_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_csi2phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi3phytimer_clk = {
+ .halt_reg = 0x1515c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1515c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi3phytimer_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_csi3phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi4phytimer_clk = {
+ .halt_reg = 0x1517c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1517c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi4phytimer_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_csi4phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi5phytimer_clk = {
+ .halt_reg = 0x1519c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1519c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi5phytimer_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_csi5phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi6phytimer_clk = {
+ .halt_reg = 0x151bc,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x151bc,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi6phytimer_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_csi6phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi7phytimer_clk = {
+ .halt_reg = 0x151dc,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x151dc,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi7phytimer_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_csi7phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csid_clk = {
+ .halt_reg = 0x13250,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13250,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csid_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csid_csiphy_rx_clk = {
+ .halt_reg = 0x15100,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x15100,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csid_csiphy_rx_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy0_clk = {
+ .halt_reg = 0x150fc,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x150fc,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csiphy0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy1_clk = {
+ .halt_reg = 0x15120,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x15120,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csiphy1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy2_clk = {
+ .halt_reg = 0x15140,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x15140,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csiphy2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy3_clk = {
+ .halt_reg = 0x15160,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x15160,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csiphy3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy4_clk = {
+ .halt_reg = 0x15180,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x15180,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csiphy4_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy5_clk = {
+ .halt_reg = 0x151a0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x151a0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csiphy5_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy6_clk = {
+ .halt_reg = 0x151c0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x151c0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csiphy6_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy7_clk = {
+ .halt_reg = 0x151e0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x151e0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csiphy7_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_icp_ahb_clk = {
+ .halt_reg = 0x131c8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x131c8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_icp_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_icp_clk = {
+ .halt_reg = 0x131bc,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x131bc,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_icp_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_icp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_0_clk = {
+ .halt_reg = 0x11030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x11030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_0_fast_ahb_clk = {
+ .halt_reg = 0x11048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x11048,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_0_fast_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_0_shift_clk = {
+ .halt_reg = 0x11064,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x11064,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_0_shift_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_1_clk = {
+ .halt_reg = 0x12030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x12030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_1_fast_ahb_clk = {
+ .halt_reg = 0x12048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x12048,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_1_fast_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_1_shift_clk = {
+ .halt_reg = 0x1204c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x1204c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_1_shift_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_2_clk = {
+ .halt_reg = 0x12080,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x12080,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_2_fast_ahb_clk = {
+ .halt_reg = 0x12098,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x12098,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_2_fast_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_2_shift_clk = {
+ .halt_reg = 0x1209c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x1209c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_2_shift_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_ahb_clk = {
+ .halt_reg = 0x13050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13050,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_clk = {
+ .halt_reg = 0x13018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_lite_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_cphy_rx_clk = {
+ .halt_reg = 0x1304c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1304c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_cphy_rx_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_csid_clk = {
+ .halt_reg = 0x13040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13040,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_csid_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_lite_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_nps_ahb_clk = {
+ .halt_reg = 0x100d0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x100d0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_nps_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_nps_clk = {
+ .halt_reg = 0x100ac,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x100ac,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_nps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ipe_nps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_nps_fast_ahb_clk = {
+ .halt_reg = 0x100d4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x100d4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_nps_fast_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_pps_clk = {
+ .halt_reg = 0x100bc,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x100bc,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_pps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ipe_nps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_pps_fast_ahb_clk = {
+ .halt_reg = 0x100d8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x100d8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_pps_fast_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_shift_clk = {
+ .halt_reg = 0x100dc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x100dc,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_shift_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_jpeg_1_clk = {
+ .halt_reg = 0x1318c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1318c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_jpeg_1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_jpeg_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_jpeg_clk = {
+ .halt_reg = 0x13180,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13180,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_jpeg_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_jpeg_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk0_clk = {
+ .halt_reg = 0x15018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x15018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk1_clk = {
+ .halt_reg = 0x15034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x15034,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk2_clk = {
+ .halt_reg = 0x15050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x15050,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk3_clk = {
+ .halt_reg = 0x1506c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1506c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk4_clk = {
+ .halt_reg = 0x15088,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x15088,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk4_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk5_clk = {
+ .halt_reg = 0x150a4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x150a4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk5_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk6_clk = {
+ .halt_reg = 0x150c0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x150c0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk6_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk7_clk = {
+ .halt_reg = 0x150dc,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x150dc,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk7_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk7_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_qdss_debug_clk = {
+ .halt_reg = 0x132b4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x132b4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_qdss_debug_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_qdss_debug_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_qdss_debug_xo_clk = {
+ .halt_reg = 0x132b8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x132b8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_qdss_debug_xo_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_sbi_clk = {
+ .halt_reg = 0x100f8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x100f8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_sbi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_sbi_fast_ahb_clk = {
+ .halt_reg = 0x10108,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x10108,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_sbi_fast_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_sbi_shift_clk = {
+ .halt_reg = 0x1010c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x1010c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_sbi_shift_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_sfe_0_clk = {
+ .halt_reg = 0x13084,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13084,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_sfe_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_sfe_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_sfe_0_fast_ahb_clk = {
+ .halt_reg = 0x1309c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1309c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_sfe_0_fast_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_sfe_0_shift_clk = {
+ .halt_reg = 0x130a0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x130a0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_sfe_0_shift_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_sfe_1_clk = {
+ .halt_reg = 0x130d4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x130d4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_sfe_1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_sfe_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_sfe_1_fast_ahb_clk = {
+ .halt_reg = 0x130ec,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x130ec,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_sfe_1_fast_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_sfe_1_shift_clk = {
+ .halt_reg = 0x130f0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x130f0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_sfe_1_shift_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_sfe_2_clk = {
+ .halt_reg = 0x13124,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13124,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_sfe_2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_sfe_2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_sfe_2_fast_ahb_clk = {
+ .halt_reg = 0x1313c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1313c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_sfe_2_fast_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_sfe_2_shift_clk = {
+ .halt_reg = 0x13140,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x13140,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_sfe_2_shift_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_titan_top_shift_clk = {
+ .halt_reg = 0x1330c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x1330c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_titan_top_shift_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc cam_cc_titan_top_gdsc = {
+ .gdscr = 0x132bc,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "cam_cc_titan_top_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc cam_cc_bps_gdsc = {
+ .gdscr = 0x10004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "cam_cc_bps_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .parent = &cam_cc_titan_top_gdsc.pd,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc cam_cc_ife_0_gdsc = {
+ .gdscr = 0x11004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "cam_cc_ife_0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .parent = &cam_cc_titan_top_gdsc.pd,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc cam_cc_ife_1_gdsc = {
+ .gdscr = 0x12004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "cam_cc_ife_1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .parent = &cam_cc_titan_top_gdsc.pd,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc cam_cc_ife_2_gdsc = {
+ .gdscr = 0x12054,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "cam_cc_ife_2_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .parent = &cam_cc_titan_top_gdsc.pd,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc cam_cc_ipe_0_gdsc = {
+ .gdscr = 0x10080,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "cam_cc_ipe_0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .parent = &cam_cc_titan_top_gdsc.pd,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc cam_cc_sbi_gdsc = {
+ .gdscr = 0x100e4,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "cam_cc_sbi_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .parent = &cam_cc_titan_top_gdsc.pd,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc cam_cc_sfe_0_gdsc = {
+ .gdscr = 0x13058,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "cam_cc_sfe_0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .parent = &cam_cc_titan_top_gdsc.pd,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc cam_cc_sfe_1_gdsc = {
+ .gdscr = 0x130a8,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "cam_cc_sfe_1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .parent = &cam_cc_titan_top_gdsc.pd,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc cam_cc_sfe_2_gdsc = {
+ .gdscr = 0x130f8,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "cam_cc_sfe_2_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .parent = &cam_cc_titan_top_gdsc.pd,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct clk_regmap *cam_cc_sm8650_clocks[] = {
+ [CAM_CC_BPS_AHB_CLK] = &cam_cc_bps_ahb_clk.clkr,
+ [CAM_CC_BPS_CLK] = &cam_cc_bps_clk.clkr,
+ [CAM_CC_BPS_CLK_SRC] = &cam_cc_bps_clk_src.clkr,
+ [CAM_CC_BPS_FAST_AHB_CLK] = &cam_cc_bps_fast_ahb_clk.clkr,
+ [CAM_CC_BPS_SHIFT_CLK] = &cam_cc_bps_shift_clk.clkr,
+ [CAM_CC_CAMNOC_AXI_NRT_CLK] = &cam_cc_camnoc_axi_nrt_clk.clkr,
+ [CAM_CC_CAMNOC_AXI_RT_CLK] = &cam_cc_camnoc_axi_rt_clk.clkr,
+ [CAM_CC_CAMNOC_AXI_RT_CLK_SRC] = &cam_cc_camnoc_axi_rt_clk_src.clkr,
+ [CAM_CC_CAMNOC_DCD_XO_CLK] = &cam_cc_camnoc_dcd_xo_clk.clkr,
+ [CAM_CC_CAMNOC_XO_CLK] = &cam_cc_camnoc_xo_clk.clkr,
+ [CAM_CC_CCI_0_CLK] = &cam_cc_cci_0_clk.clkr,
+ [CAM_CC_CCI_0_CLK_SRC] = &cam_cc_cci_0_clk_src.clkr,
+ [CAM_CC_CCI_1_CLK] = &cam_cc_cci_1_clk.clkr,
+ [CAM_CC_CCI_1_CLK_SRC] = &cam_cc_cci_1_clk_src.clkr,
+ [CAM_CC_CCI_2_CLK] = &cam_cc_cci_2_clk.clkr,
+ [CAM_CC_CCI_2_CLK_SRC] = &cam_cc_cci_2_clk_src.clkr,
+ [CAM_CC_CORE_AHB_CLK] = &cam_cc_core_ahb_clk.clkr,
+ [CAM_CC_CPAS_AHB_CLK] = &cam_cc_cpas_ahb_clk.clkr,
+ [CAM_CC_CPAS_BPS_CLK] = &cam_cc_cpas_bps_clk.clkr,
+ [CAM_CC_CPAS_CRE_CLK] = &cam_cc_cpas_cre_clk.clkr,
+ [CAM_CC_CPAS_FAST_AHB_CLK] = &cam_cc_cpas_fast_ahb_clk.clkr,
+ [CAM_CC_CPAS_IFE_0_CLK] = &cam_cc_cpas_ife_0_clk.clkr,
+ [CAM_CC_CPAS_IFE_1_CLK] = &cam_cc_cpas_ife_1_clk.clkr,
+ [CAM_CC_CPAS_IFE_2_CLK] = &cam_cc_cpas_ife_2_clk.clkr,
+ [CAM_CC_CPAS_IFE_LITE_CLK] = &cam_cc_cpas_ife_lite_clk.clkr,
+ [CAM_CC_CPAS_IPE_NPS_CLK] = &cam_cc_cpas_ipe_nps_clk.clkr,
+ [CAM_CC_CPAS_SBI_CLK] = &cam_cc_cpas_sbi_clk.clkr,
+ [CAM_CC_CPAS_SFE_0_CLK] = &cam_cc_cpas_sfe_0_clk.clkr,
+ [CAM_CC_CPAS_SFE_1_CLK] = &cam_cc_cpas_sfe_1_clk.clkr,
+ [CAM_CC_CPAS_SFE_2_CLK] = &cam_cc_cpas_sfe_2_clk.clkr,
+ [CAM_CC_CPHY_RX_CLK_SRC] = &cam_cc_cphy_rx_clk_src.clkr,
+ [CAM_CC_CRE_AHB_CLK] = &cam_cc_cre_ahb_clk.clkr,
+ [CAM_CC_CRE_CLK] = &cam_cc_cre_clk.clkr,
+ [CAM_CC_CRE_CLK_SRC] = &cam_cc_cre_clk_src.clkr,
+ [CAM_CC_CSI0PHYTIMER_CLK] = &cam_cc_csi0phytimer_clk.clkr,
+ [CAM_CC_CSI0PHYTIMER_CLK_SRC] = &cam_cc_csi0phytimer_clk_src.clkr,
+ [CAM_CC_CSI1PHYTIMER_CLK] = &cam_cc_csi1phytimer_clk.clkr,
+ [CAM_CC_CSI1PHYTIMER_CLK_SRC] = &cam_cc_csi1phytimer_clk_src.clkr,
+ [CAM_CC_CSI2PHYTIMER_CLK] = &cam_cc_csi2phytimer_clk.clkr,
+ [CAM_CC_CSI2PHYTIMER_CLK_SRC] = &cam_cc_csi2phytimer_clk_src.clkr,
+ [CAM_CC_CSI3PHYTIMER_CLK] = &cam_cc_csi3phytimer_clk.clkr,
+ [CAM_CC_CSI3PHYTIMER_CLK_SRC] = &cam_cc_csi3phytimer_clk_src.clkr,
+ [CAM_CC_CSI4PHYTIMER_CLK] = &cam_cc_csi4phytimer_clk.clkr,
+ [CAM_CC_CSI4PHYTIMER_CLK_SRC] = &cam_cc_csi4phytimer_clk_src.clkr,
+ [CAM_CC_CSI5PHYTIMER_CLK] = &cam_cc_csi5phytimer_clk.clkr,
+ [CAM_CC_CSI5PHYTIMER_CLK_SRC] = &cam_cc_csi5phytimer_clk_src.clkr,
+ [CAM_CC_CSI6PHYTIMER_CLK] = &cam_cc_csi6phytimer_clk.clkr,
+ [CAM_CC_CSI6PHYTIMER_CLK_SRC] = &cam_cc_csi6phytimer_clk_src.clkr,
+ [CAM_CC_CSI7PHYTIMER_CLK] = &cam_cc_csi7phytimer_clk.clkr,
+ [CAM_CC_CSI7PHYTIMER_CLK_SRC] = &cam_cc_csi7phytimer_clk_src.clkr,
+ [CAM_CC_CSID_CLK] = &cam_cc_csid_clk.clkr,
+ [CAM_CC_CSID_CLK_SRC] = &cam_cc_csid_clk_src.clkr,
+ [CAM_CC_CSID_CSIPHY_RX_CLK] = &cam_cc_csid_csiphy_rx_clk.clkr,
+ [CAM_CC_CSIPHY0_CLK] = &cam_cc_csiphy0_clk.clkr,
+ [CAM_CC_CSIPHY1_CLK] = &cam_cc_csiphy1_clk.clkr,
+ [CAM_CC_CSIPHY2_CLK] = &cam_cc_csiphy2_clk.clkr,
+ [CAM_CC_CSIPHY3_CLK] = &cam_cc_csiphy3_clk.clkr,
+ [CAM_CC_CSIPHY4_CLK] = &cam_cc_csiphy4_clk.clkr,
+ [CAM_CC_CSIPHY5_CLK] = &cam_cc_csiphy5_clk.clkr,
+ [CAM_CC_CSIPHY6_CLK] = &cam_cc_csiphy6_clk.clkr,
+ [CAM_CC_CSIPHY7_CLK] = &cam_cc_csiphy7_clk.clkr,
+ [CAM_CC_FAST_AHB_CLK_SRC] = &cam_cc_fast_ahb_clk_src.clkr,
+ [CAM_CC_ICP_AHB_CLK] = &cam_cc_icp_ahb_clk.clkr,
+ [CAM_CC_ICP_CLK] = &cam_cc_icp_clk.clkr,
+ [CAM_CC_ICP_CLK_SRC] = &cam_cc_icp_clk_src.clkr,
+ [CAM_CC_IFE_0_CLK] = &cam_cc_ife_0_clk.clkr,
+ [CAM_CC_IFE_0_CLK_SRC] = &cam_cc_ife_0_clk_src.clkr,
+ [CAM_CC_IFE_0_FAST_AHB_CLK] = &cam_cc_ife_0_fast_ahb_clk.clkr,
+ [CAM_CC_IFE_0_SHIFT_CLK] = &cam_cc_ife_0_shift_clk.clkr,
+ [CAM_CC_IFE_1_CLK] = &cam_cc_ife_1_clk.clkr,
+ [CAM_CC_IFE_1_CLK_SRC] = &cam_cc_ife_1_clk_src.clkr,
+ [CAM_CC_IFE_1_FAST_AHB_CLK] = &cam_cc_ife_1_fast_ahb_clk.clkr,
+ [CAM_CC_IFE_1_SHIFT_CLK] = &cam_cc_ife_1_shift_clk.clkr,
+ [CAM_CC_IFE_2_CLK] = &cam_cc_ife_2_clk.clkr,
+ [CAM_CC_IFE_2_CLK_SRC] = &cam_cc_ife_2_clk_src.clkr,
+ [CAM_CC_IFE_2_FAST_AHB_CLK] = &cam_cc_ife_2_fast_ahb_clk.clkr,
+ [CAM_CC_IFE_2_SHIFT_CLK] = &cam_cc_ife_2_shift_clk.clkr,
+ [CAM_CC_IFE_LITE_AHB_CLK] = &cam_cc_ife_lite_ahb_clk.clkr,
+ [CAM_CC_IFE_LITE_CLK] = &cam_cc_ife_lite_clk.clkr,
+ [CAM_CC_IFE_LITE_CLK_SRC] = &cam_cc_ife_lite_clk_src.clkr,
+ [CAM_CC_IFE_LITE_CPHY_RX_CLK] = &cam_cc_ife_lite_cphy_rx_clk.clkr,
+ [CAM_CC_IFE_LITE_CSID_CLK] = &cam_cc_ife_lite_csid_clk.clkr,
+ [CAM_CC_IFE_LITE_CSID_CLK_SRC] = &cam_cc_ife_lite_csid_clk_src.clkr,
+ [CAM_CC_IPE_NPS_AHB_CLK] = &cam_cc_ipe_nps_ahb_clk.clkr,
+ [CAM_CC_IPE_NPS_CLK] = &cam_cc_ipe_nps_clk.clkr,
+ [CAM_CC_IPE_NPS_CLK_SRC] = &cam_cc_ipe_nps_clk_src.clkr,
+ [CAM_CC_IPE_NPS_FAST_AHB_CLK] = &cam_cc_ipe_nps_fast_ahb_clk.clkr,
+ [CAM_CC_IPE_PPS_CLK] = &cam_cc_ipe_pps_clk.clkr,
+ [CAM_CC_IPE_PPS_FAST_AHB_CLK] = &cam_cc_ipe_pps_fast_ahb_clk.clkr,
+ [CAM_CC_IPE_SHIFT_CLK] = &cam_cc_ipe_shift_clk.clkr,
+ [CAM_CC_JPEG_1_CLK] = &cam_cc_jpeg_1_clk.clkr,
+ [CAM_CC_JPEG_CLK] = &cam_cc_jpeg_clk.clkr,
+ [CAM_CC_JPEG_CLK_SRC] = &cam_cc_jpeg_clk_src.clkr,
+ [CAM_CC_MCLK0_CLK] = &cam_cc_mclk0_clk.clkr,
+ [CAM_CC_MCLK0_CLK_SRC] = &cam_cc_mclk0_clk_src.clkr,
+ [CAM_CC_MCLK1_CLK] = &cam_cc_mclk1_clk.clkr,
+ [CAM_CC_MCLK1_CLK_SRC] = &cam_cc_mclk1_clk_src.clkr,
+ [CAM_CC_MCLK2_CLK] = &cam_cc_mclk2_clk.clkr,
+ [CAM_CC_MCLK2_CLK_SRC] = &cam_cc_mclk2_clk_src.clkr,
+ [CAM_CC_MCLK3_CLK] = &cam_cc_mclk3_clk.clkr,
+ [CAM_CC_MCLK3_CLK_SRC] = &cam_cc_mclk3_clk_src.clkr,
+ [CAM_CC_MCLK4_CLK] = &cam_cc_mclk4_clk.clkr,
+ [CAM_CC_MCLK4_CLK_SRC] = &cam_cc_mclk4_clk_src.clkr,
+ [CAM_CC_MCLK5_CLK] = &cam_cc_mclk5_clk.clkr,
+ [CAM_CC_MCLK5_CLK_SRC] = &cam_cc_mclk5_clk_src.clkr,
+ [CAM_CC_MCLK6_CLK] = &cam_cc_mclk6_clk.clkr,
+ [CAM_CC_MCLK6_CLK_SRC] = &cam_cc_mclk6_clk_src.clkr,
+ [CAM_CC_MCLK7_CLK] = &cam_cc_mclk7_clk.clkr,
+ [CAM_CC_MCLK7_CLK_SRC] = &cam_cc_mclk7_clk_src.clkr,
+ [CAM_CC_PLL0] = &cam_cc_pll0.clkr,
+ [CAM_CC_PLL0_OUT_EVEN] = &cam_cc_pll0_out_even.clkr,
+ [CAM_CC_PLL0_OUT_ODD] = &cam_cc_pll0_out_odd.clkr,
+ [CAM_CC_PLL1] = &cam_cc_pll1.clkr,
+ [CAM_CC_PLL1_OUT_EVEN] = &cam_cc_pll1_out_even.clkr,
+ [CAM_CC_PLL2] = &cam_cc_pll2.clkr,
+ [CAM_CC_PLL3] = &cam_cc_pll3.clkr,
+ [CAM_CC_PLL3_OUT_EVEN] = &cam_cc_pll3_out_even.clkr,
+ [CAM_CC_PLL4] = &cam_cc_pll4.clkr,
+ [CAM_CC_PLL4_OUT_EVEN] = &cam_cc_pll4_out_even.clkr,
+ [CAM_CC_PLL5] = &cam_cc_pll5.clkr,
+ [CAM_CC_PLL5_OUT_EVEN] = &cam_cc_pll5_out_even.clkr,
+ [CAM_CC_PLL6] = &cam_cc_pll6.clkr,
+ [CAM_CC_PLL6_OUT_EVEN] = &cam_cc_pll6_out_even.clkr,
+ [CAM_CC_PLL7] = &cam_cc_pll7.clkr,
+ [CAM_CC_PLL7_OUT_EVEN] = &cam_cc_pll7_out_even.clkr,
+ [CAM_CC_PLL8] = &cam_cc_pll8.clkr,
+ [CAM_CC_PLL8_OUT_EVEN] = &cam_cc_pll8_out_even.clkr,
+ [CAM_CC_PLL9] = &cam_cc_pll9.clkr,
+ [CAM_CC_PLL9_OUT_EVEN] = &cam_cc_pll9_out_even.clkr,
+ [CAM_CC_PLL9_OUT_ODD] = &cam_cc_pll9_out_odd.clkr,
+ [CAM_CC_PLL10] = &cam_cc_pll10.clkr,
+ [CAM_CC_PLL10_OUT_EVEN] = &cam_cc_pll10_out_even.clkr,
+ [CAM_CC_QDSS_DEBUG_CLK] = &cam_cc_qdss_debug_clk.clkr,
+ [CAM_CC_QDSS_DEBUG_CLK_SRC] = &cam_cc_qdss_debug_clk_src.clkr,
+ [CAM_CC_QDSS_DEBUG_XO_CLK] = &cam_cc_qdss_debug_xo_clk.clkr,
+ [CAM_CC_SBI_CLK] = &cam_cc_sbi_clk.clkr,
+ [CAM_CC_SBI_FAST_AHB_CLK] = &cam_cc_sbi_fast_ahb_clk.clkr,
+ [CAM_CC_SBI_SHIFT_CLK] = &cam_cc_sbi_shift_clk.clkr,
+ [CAM_CC_SFE_0_CLK] = &cam_cc_sfe_0_clk.clkr,
+ [CAM_CC_SFE_0_CLK_SRC] = &cam_cc_sfe_0_clk_src.clkr,
+ [CAM_CC_SFE_0_FAST_AHB_CLK] = &cam_cc_sfe_0_fast_ahb_clk.clkr,
+ [CAM_CC_SFE_0_SHIFT_CLK] = &cam_cc_sfe_0_shift_clk.clkr,
+ [CAM_CC_SFE_1_CLK] = &cam_cc_sfe_1_clk.clkr,
+ [CAM_CC_SFE_1_CLK_SRC] = &cam_cc_sfe_1_clk_src.clkr,
+ [CAM_CC_SFE_1_FAST_AHB_CLK] = &cam_cc_sfe_1_fast_ahb_clk.clkr,
+ [CAM_CC_SFE_1_SHIFT_CLK] = &cam_cc_sfe_1_shift_clk.clkr,
+ [CAM_CC_SFE_2_CLK] = &cam_cc_sfe_2_clk.clkr,
+ [CAM_CC_SFE_2_CLK_SRC] = &cam_cc_sfe_2_clk_src.clkr,
+ [CAM_CC_SFE_2_FAST_AHB_CLK] = &cam_cc_sfe_2_fast_ahb_clk.clkr,
+ [CAM_CC_SFE_2_SHIFT_CLK] = &cam_cc_sfe_2_shift_clk.clkr,
+ [CAM_CC_SLEEP_CLK_SRC] = &cam_cc_sleep_clk_src.clkr,
+ [CAM_CC_SLOW_AHB_CLK_SRC] = &cam_cc_slow_ahb_clk_src.clkr,
+ [CAM_CC_TITAN_TOP_SHIFT_CLK] = &cam_cc_titan_top_shift_clk.clkr,
+ [CAM_CC_XO_CLK_SRC] = &cam_cc_xo_clk_src.clkr,
+};
+
+static struct gdsc *cam_cc_sm8650_gdscs[] = {
+ [CAM_CC_TITAN_TOP_GDSC] = &cam_cc_titan_top_gdsc,
+ [CAM_CC_BPS_GDSC] = &cam_cc_bps_gdsc,
+ [CAM_CC_IFE_0_GDSC] = &cam_cc_ife_0_gdsc,
+ [CAM_CC_IFE_1_GDSC] = &cam_cc_ife_1_gdsc,
+ [CAM_CC_IFE_2_GDSC] = &cam_cc_ife_2_gdsc,
+ [CAM_CC_IPE_0_GDSC] = &cam_cc_ipe_0_gdsc,
+ [CAM_CC_SBI_GDSC] = &cam_cc_sbi_gdsc,
+ [CAM_CC_SFE_0_GDSC] = &cam_cc_sfe_0_gdsc,
+ [CAM_CC_SFE_1_GDSC] = &cam_cc_sfe_1_gdsc,
+ [CAM_CC_SFE_2_GDSC] = &cam_cc_sfe_2_gdsc,
+};
+
+static const struct qcom_reset_map cam_cc_sm8650_resets[] = {
+ [CAM_CC_BPS_BCR] = { 0x10000 },
+ [CAM_CC_DRV_BCR] = { 0x13310 },
+ [CAM_CC_ICP_BCR] = { 0x131a0 },
+ [CAM_CC_IFE_0_BCR] = { 0x11000 },
+ [CAM_CC_IFE_1_BCR] = { 0x12000 },
+ [CAM_CC_IFE_2_BCR] = { 0x12050 },
+ [CAM_CC_IPE_0_BCR] = { 0x1007c },
+ [CAM_CC_QDSS_DEBUG_BCR] = { 0x13298 },
+ [CAM_CC_SBI_BCR] = { 0x100e0 },
+ [CAM_CC_SFE_0_BCR] = { 0x13054 },
+ [CAM_CC_SFE_1_BCR] = { 0x130a4 },
+ [CAM_CC_SFE_2_BCR] = { 0x130f4 },
+};
+
+static const struct regmap_config cam_cc_sm8650_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x1603c,
+ .fast_io = true,
+};
+
+static struct qcom_cc_desc cam_cc_sm8650_desc = {
+ .config = &cam_cc_sm8650_regmap_config,
+ .clks = cam_cc_sm8650_clocks,
+ .num_clks = ARRAY_SIZE(cam_cc_sm8650_clocks),
+ .resets = cam_cc_sm8650_resets,
+ .num_resets = ARRAY_SIZE(cam_cc_sm8650_resets),
+ .gdscs = cam_cc_sm8650_gdscs,
+ .num_gdscs = ARRAY_SIZE(cam_cc_sm8650_gdscs),
+};
+
+static const struct of_device_id cam_cc_sm8650_match_table[] = {
+ { .compatible = "qcom,sm8650-camcc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, cam_cc_sm8650_match_table);
+
+static int cam_cc_sm8650_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ int ret;
+
+ ret = devm_pm_runtime_enable(&pdev->dev);
+ if (ret)
+ return ret;
+
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret)
+ return ret;
+
+ regmap = qcom_cc_map(pdev, &cam_cc_sm8650_desc);
+ if (IS_ERR(regmap)) {
+ pm_runtime_put(&pdev->dev);
+ return PTR_ERR(regmap);
+ }
+
+ clk_lucid_ole_pll_configure(&cam_cc_pll0, regmap, &cam_cc_pll0_config);
+ clk_lucid_ole_pll_configure(&cam_cc_pll1, regmap, &cam_cc_pll1_config);
+ clk_rivian_evo_pll_configure(&cam_cc_pll2, regmap, &cam_cc_pll2_config);
+ clk_lucid_ole_pll_configure(&cam_cc_pll3, regmap, &cam_cc_pll3_config);
+ clk_lucid_ole_pll_configure(&cam_cc_pll4, regmap, &cam_cc_pll4_config);
+ clk_lucid_ole_pll_configure(&cam_cc_pll5, regmap, &cam_cc_pll5_config);
+ clk_lucid_ole_pll_configure(&cam_cc_pll6, regmap, &cam_cc_pll6_config);
+ clk_lucid_ole_pll_configure(&cam_cc_pll7, regmap, &cam_cc_pll7_config);
+ clk_lucid_ole_pll_configure(&cam_cc_pll8, regmap, &cam_cc_pll8_config);
+ clk_lucid_ole_pll_configure(&cam_cc_pll9, regmap, &cam_cc_pll9_config);
+ clk_lucid_ole_pll_configure(&cam_cc_pll10, regmap, &cam_cc_pll10_config);
+
+ /* Keep clocks always enabled */
+ qcom_branch_set_clk_en(regmap, 0x13318); /* CAM_CC_DRV_AHB_CLK */
+ qcom_branch_set_clk_en(regmap, 0x13314); /* CAM_CC_DRV_XO_CLK */
+ qcom_branch_set_clk_en(regmap, 0x132ec); /* CAM_CC_GDSC_CLK */
+ qcom_branch_set_clk_en(regmap, 0x13308); /* CAM_CC_SLEEP_CLK */
+
+ ret = qcom_cc_really_probe(pdev, &cam_cc_sm8650_desc, regmap);
+
+ pm_runtime_put(&pdev->dev);
+
+ return ret;
+}
+
+static struct platform_driver cam_cc_sm8650_driver = {
+ .probe = cam_cc_sm8650_probe,
+ .driver = {
+ .name = "camcc-sm8650",
+ .of_match_table = cam_cc_sm8650_match_table,
+ },
+};
+
+module_platform_driver(cam_cc_sm8650_driver);
+
+MODULE_DESCRIPTION("QTI CAMCC SM8650 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
index d4227909d1fe..c51647e37df8 100644
--- a/drivers/clk/qcom/clk-alpha-pll.c
+++ b/drivers/clk/qcom/clk-alpha-pll.c
@@ -2574,6 +2574,9 @@ static int clk_alpha_pll_stromer_plus_set_rate(struct clk_hw *hw,
regmap_write(pll->clkr.regmap, PLL_ALPHA_VAL_U(pll),
a >> ALPHA_BITWIDTH);
+ regmap_update_bits(pll->clkr.regmap, PLL_USER_CTL(pll),
+ PLL_ALPHA_EN, PLL_ALPHA_EN);
+
regmap_write(pll->clkr.regmap, PLL_MODE(pll), PLL_BYPASSNL);
/* Wait five micro seconds or more */
diff --git a/drivers/clk/qcom/gcc-ipq9574.c b/drivers/clk/qcom/gcc-ipq9574.c
index 0a3f846695b8..f8b9a1e93bef 100644
--- a/drivers/clk/qcom/gcc-ipq9574.c
+++ b/drivers/clk/qcom/gcc-ipq9574.c
@@ -2140,9 +2140,10 @@ static struct clk_rcg2 pcnoc_bfdcd_clk_src = {
static struct clk_branch gcc_crypto_axi_clk = {
.halt_reg = 0x16010,
+ .halt_check = BRANCH_HALT_VOTED,
.clkr = {
- .enable_reg = 0x16010,
- .enable_mask = BIT(0),
+ .enable_reg = 0xb004,
+ .enable_mask = BIT(15),
.hw.init = &(const struct clk_init_data) {
.name = "gcc_crypto_axi_clk",
.parent_hws = (const struct clk_hw *[]) {
@@ -2156,9 +2157,10 @@ static struct clk_branch gcc_crypto_axi_clk = {
static struct clk_branch gcc_crypto_ahb_clk = {
.halt_reg = 0x16014,
+ .halt_check = BRANCH_HALT_VOTED,
.clkr = {
- .enable_reg = 0x16014,
- .enable_mask = BIT(0),
+ .enable_reg = 0xb004,
+ .enable_mask = BIT(16),
.hw.init = &(const struct clk_init_data) {
.name = "gcc_crypto_ahb_clk",
.parent_hws = (const struct clk_hw *[]) {
diff --git a/drivers/clk/qcom/gcc-sm6350.c b/drivers/clk/qcom/gcc-sm6350.c
index cf4a7b6e0b23..0559a33faf00 100644
--- a/drivers/clk/qcom/gcc-sm6350.c
+++ b/drivers/clk/qcom/gcc-sm6350.c
@@ -100,8 +100,8 @@ static struct clk_alpha_pll gpll6 = {
.enable_mask = BIT(6),
.hw.init = &(struct clk_init_data){
.name = "gpll6",
- .parent_hws = (const struct clk_hw*[]){
- &gpll0.clkr.hw,
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
},
.num_parents = 1,
.ops = &clk_alpha_pll_fixed_fabia_ops,
@@ -124,7 +124,7 @@ static struct clk_alpha_pll_postdiv gpll6_out_even = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll6_out_even",
.parent_hws = (const struct clk_hw*[]){
- &gpll0.clkr.hw,
+ &gpll6.clkr.hw,
},
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_fabia_ops,
@@ -139,8 +139,8 @@ static struct clk_alpha_pll gpll7 = {
.enable_mask = BIT(7),
.hw.init = &(struct clk_init_data){
.name = "gpll7",
- .parent_hws = (const struct clk_hw*[]){
- &gpll0.clkr.hw,
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
},
.num_parents = 1,
.ops = &clk_alpha_pll_fixed_fabia_ops,
diff --git a/drivers/clk/qcom/gdsc.c b/drivers/clk/qcom/gdsc.c
index df9618ab7eea..fa5fe4c2a2ee 100644
--- a/drivers/clk/qcom/gdsc.c
+++ b/drivers/clk/qcom/gdsc.c
@@ -363,6 +363,43 @@ static int gdsc_disable(struct generic_pm_domain *domain)
return 0;
}
+static int gdsc_set_hwmode(struct generic_pm_domain *domain, struct device *dev, bool mode)
+{
+ struct gdsc *sc = domain_to_gdsc(domain);
+ int ret;
+
+ ret = gdsc_hwctrl(sc, mode);
+ if (ret)
+ return ret;
+
+ /*
+ * Wait for the GDSC to go through a power down and
+ * up cycle. If we poll the status register before the
+ * power cycle is finished we might read incorrect values.
+ */
+ udelay(1);
+
+ /*
+ * When the GDSC is switched to HW mode, HW can disable the GDSC.
+ * When the GDSC is switched back to SW mode, the GDSC will be enabled
+ * again, hence we need to poll for GDSC to complete the power up.
+ */
+ if (!mode)
+ return gdsc_poll_status(sc, GDSC_ON);
+
+ return 0;
+}
+
+static bool gdsc_get_hwmode(struct generic_pm_domain *domain, struct device *dev)
+{
+ struct gdsc *sc = domain_to_gdsc(domain);
+ u32 val;
+
+ regmap_read(sc->regmap, sc->gdscr, &val);
+
+ return !!(val & HW_CONTROL_MASK);
+}
+
static int gdsc_init(struct gdsc *sc)
{
u32 mask, val;
@@ -451,6 +488,10 @@ static int gdsc_init(struct gdsc *sc)
sc->pd.power_off = gdsc_disable;
if (!sc->pd.power_on)
sc->pd.power_on = gdsc_enable;
+ if (sc->flags & HW_CTRL_TRIGGER) {
+ sc->pd.set_hwmode_dev = gdsc_set_hwmode;
+ sc->pd.get_hwmode_dev = gdsc_get_hwmode;
+ }
ret = pm_genpd_init(&sc->pd, NULL, !on);
if (ret)
diff --git a/drivers/clk/qcom/gdsc.h b/drivers/clk/qcom/gdsc.h
index 803512688336..1e2779b823d1 100644
--- a/drivers/clk/qcom/gdsc.h
+++ b/drivers/clk/qcom/gdsc.h
@@ -67,6 +67,7 @@ struct gdsc {
#define ALWAYS_ON BIT(6)
#define RETAIN_FF_ENABLE BIT(7)
#define NO_RET_PERIPH BIT(8)
+#define HW_CTRL_TRIGGER BIT(9)
struct reset_controller_dev *rcdev;
unsigned int *resets;
unsigned int reset_count;
diff --git a/drivers/clk/qcom/videocc-sc7280.c b/drivers/clk/qcom/videocc-sc7280.c
index cdd59c6f60df..d55613a47ff7 100644
--- a/drivers/clk/qcom/videocc-sc7280.c
+++ b/drivers/clk/qcom/videocc-sc7280.c
@@ -236,7 +236,7 @@ static struct gdsc mvs0_gdsc = {
.name = "mvs0_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
- .flags = HW_CTRL | RETAIN_FF_ENABLE,
+ .flags = HW_CTRL_TRIGGER | RETAIN_FF_ENABLE,
};
static struct gdsc mvsc_gdsc = {
diff --git a/drivers/clk/qcom/videocc-sm8250.c b/drivers/clk/qcom/videocc-sm8250.c
index 016b596e03b3..cac10ccd362e 100644
--- a/drivers/clk/qcom/videocc-sm8250.c
+++ b/drivers/clk/qcom/videocc-sm8250.c
@@ -293,7 +293,7 @@ static struct gdsc mvs0_gdsc = {
.pd = {
.name = "mvs0_gdsc",
},
- .flags = HW_CTRL,
+ .flags = HW_CTRL_TRIGGER,
.pwrsts = PWRSTS_OFF_ON,
};
@@ -302,7 +302,7 @@ static struct gdsc mvs1_gdsc = {
.pd = {
.name = "mvs1_gdsc",
},
- .flags = HW_CTRL,
+ .flags = HW_CTRL_TRIGGER,
.pwrsts = PWRSTS_OFF_ON,
};
diff --git a/drivers/clk/qcom/videocc-sm8550.c b/drivers/clk/qcom/videocc-sm8550.c
index d73f747d2474..c601c35e6724 100644
--- a/drivers/clk/qcom/videocc-sm8550.c
+++ b/drivers/clk/qcom/videocc-sm8550.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/clk-provider.h>
@@ -10,7 +10,7 @@
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
-#include <dt-bindings/clock/qcom,sm8450-videocc.h>
+#include <dt-bindings/clock/qcom,sm8650-videocc.h>
#include "clk-alpha-pll.h"
#include "clk-branch.h"
@@ -35,7 +35,7 @@ static const struct pll_vco lucid_ole_vco[] = {
{ 249600000, 2300000000, 0 },
};
-static const struct alpha_pll_config video_cc_pll0_config = {
+static struct alpha_pll_config video_cc_pll0_config = {
.l = 0x25,
.alpha = 0x8000,
.config_ctl_val = 0x20485699,
@@ -66,7 +66,7 @@ static struct clk_alpha_pll video_cc_pll0 = {
},
};
-static const struct alpha_pll_config video_cc_pll1_config = {
+static struct alpha_pll_config video_cc_pll1_config = {
.l = 0x36,
.alpha = 0xb000,
.config_ctl_val = 0x20485699,
@@ -117,6 +117,14 @@ static const struct clk_parent_data video_cc_parent_data_1[] = {
{ .hw = &video_cc_pll1.clkr.hw },
};
+static const struct parent_map video_cc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data video_cc_parent_data_2[] = {
+ { .index = DT_BI_TCXO },
+};
+
static const struct freq_tbl ftbl_video_cc_mvs0_clk_src[] = {
F(720000000, P_VIDEO_CC_PLL0_OUT_MAIN, 1, 0, 0),
F(1014000000, P_VIDEO_CC_PLL0_OUT_MAIN, 1, 0, 0),
@@ -126,6 +134,16 @@ static const struct freq_tbl ftbl_video_cc_mvs0_clk_src[] = {
{ }
};
+static const struct freq_tbl ftbl_video_cc_mvs0_clk_src_sm8650[] = {
+ F(588000000, P_VIDEO_CC_PLL0_OUT_MAIN, 1, 0, 0),
+ F(900000000, P_VIDEO_CC_PLL0_OUT_MAIN, 1, 0, 0),
+ F(1140000000, P_VIDEO_CC_PLL0_OUT_MAIN, 1, 0, 0),
+ F(1305000000, P_VIDEO_CC_PLL0_OUT_MAIN, 1, 0, 0),
+ F(1440000000, P_VIDEO_CC_PLL0_OUT_MAIN, 1, 0, 0),
+ F(1600000000, P_VIDEO_CC_PLL0_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
static struct clk_rcg2 video_cc_mvs0_clk_src = {
.cmd_rcgr = 0x8000,
.mnd_width = 0,
@@ -149,6 +167,15 @@ static const struct freq_tbl ftbl_video_cc_mvs1_clk_src[] = {
{ }
};
+static const struct freq_tbl ftbl_video_cc_mvs1_clk_src_sm8650[] = {
+ F(840000000, P_VIDEO_CC_PLL1_OUT_MAIN, 1, 0, 0),
+ F(1110000000, P_VIDEO_CC_PLL1_OUT_MAIN, 1, 0, 0),
+ F(1350000000, P_VIDEO_CC_PLL1_OUT_MAIN, 1, 0, 0),
+ F(1500000000, P_VIDEO_CC_PLL1_OUT_MAIN, 1, 0, 0),
+ F(1650000000, P_VIDEO_CC_PLL1_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
static struct clk_rcg2 video_cc_mvs1_clk_src = {
.cmd_rcgr = 0x8018,
.mnd_width = 0,
@@ -164,6 +191,26 @@ static struct clk_rcg2 video_cc_mvs1_clk_src = {
},
};
+static const struct freq_tbl ftbl_video_cc_xo_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 video_cc_xo_clk_src = {
+ .cmd_rcgr = 0x810c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = video_cc_parent_map_2,
+ .freq_tbl = ftbl_video_cc_xo_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_xo_clk_src",
+ .parent_data = video_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(video_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
static struct clk_regmap_div video_cc_mvs0_div_clk_src = {
.reg = 0x80c4,
.shift = 0,
@@ -244,6 +291,26 @@ static struct clk_branch video_cc_mvs0_clk = {
},
};
+static struct clk_branch video_cc_mvs0_shift_clk = {
+ .halt_reg = 0x8128,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x8128,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x8128,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs0_shift_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &video_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_branch video_cc_mvs0c_clk = {
.halt_reg = 0x8064,
.halt_check = BRANCH_HALT,
@@ -262,6 +329,26 @@ static struct clk_branch video_cc_mvs0c_clk = {
},
};
+static struct clk_branch video_cc_mvs0c_shift_clk = {
+ .halt_reg = 0x812c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x812c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x812c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs0c_shift_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &video_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_branch video_cc_mvs1_clk = {
.halt_reg = 0x80e0,
.halt_check = BRANCH_HALT_SKIP,
@@ -282,6 +369,26 @@ static struct clk_branch video_cc_mvs1_clk = {
},
};
+static struct clk_branch video_cc_mvs1_shift_clk = {
+ .halt_reg = 0x8130,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x8130,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x8130,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs1_shift_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &video_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_branch video_cc_mvs1c_clk = {
.halt_reg = 0x8090,
.halt_check = BRANCH_HALT,
@@ -300,6 +407,26 @@ static struct clk_branch video_cc_mvs1c_clk = {
},
};
+static struct clk_branch video_cc_mvs1c_shift_clk = {
+ .halt_reg = 0x8134,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x8134,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x8134,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs1c_shift_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &video_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct gdsc video_cc_mvs0c_gdsc = {
.gdscr = 0x804c,
.en_rest_wait_val = 0x2,
@@ -363,6 +490,7 @@ static struct clk_regmap *video_cc_sm8550_clocks[] = {
[VIDEO_CC_MVS1C_DIV2_DIV_CLK_SRC] = &video_cc_mvs1c_div2_div_clk_src.clkr,
[VIDEO_CC_PLL0] = &video_cc_pll0.clkr,
[VIDEO_CC_PLL1] = &video_cc_pll1.clkr,
+ [VIDEO_CC_XO_CLK_SRC] = NULL,
};
static struct gdsc *video_cc_sm8550_gdscs[] = {
@@ -380,6 +508,7 @@ static const struct qcom_reset_map video_cc_sm8550_resets[] = {
[CVP_VIDEO_CC_MVS1C_BCR] = { 0x8074 },
[VIDEO_CC_MVS0C_CLK_ARES] = { .reg = 0x8064, .bit = 2, .udelay = 1000 },
[VIDEO_CC_MVS1C_CLK_ARES] = { .reg = 0x8090, .bit = 2, .udelay = 1000 },
+ [VIDEO_CC_XO_CLK_ARES] = { .reg = 0x8124, .bit = 2, .udelay = 100 },
};
static const struct regmap_config video_cc_sm8550_regmap_config = {
@@ -402,6 +531,7 @@ static struct qcom_cc_desc video_cc_sm8550_desc = {
static const struct of_device_id video_cc_sm8550_match_table[] = {
{ .compatible = "qcom,sm8550-videocc" },
+ { .compatible = "qcom,sm8650-videocc" },
{ }
};
MODULE_DEVICE_TABLE(of, video_cc_sm8550_match_table);
@@ -410,6 +540,7 @@ static int video_cc_sm8550_probe(struct platform_device *pdev)
{
struct regmap *regmap;
int ret;
+ u32 sleep_clk_offset = 0x8140;
ret = devm_pm_runtime_enable(&pdev->dev);
if (ret)
@@ -425,12 +556,27 @@ static int video_cc_sm8550_probe(struct platform_device *pdev)
return PTR_ERR(regmap);
}
+ if (of_device_is_compatible(pdev->dev.of_node, "qcom,sm8650-videocc")) {
+ sleep_clk_offset = 0x8150;
+ video_cc_pll0_config.l = 0x1e;
+ video_cc_pll0_config.alpha = 0xa000;
+ video_cc_pll1_config.l = 0x2b;
+ video_cc_pll1_config.alpha = 0xc000;
+ video_cc_mvs0_clk_src.freq_tbl = ftbl_video_cc_mvs0_clk_src_sm8650;
+ video_cc_mvs1_clk_src.freq_tbl = ftbl_video_cc_mvs1_clk_src_sm8650;
+ video_cc_sm8550_clocks[VIDEO_CC_MVS0_SHIFT_CLK] = &video_cc_mvs0_shift_clk.clkr;
+ video_cc_sm8550_clocks[VIDEO_CC_MVS0C_SHIFT_CLK] = &video_cc_mvs0c_shift_clk.clkr;
+ video_cc_sm8550_clocks[VIDEO_CC_MVS1_SHIFT_CLK] = &video_cc_mvs1_shift_clk.clkr;
+ video_cc_sm8550_clocks[VIDEO_CC_MVS1C_SHIFT_CLK] = &video_cc_mvs1c_shift_clk.clkr;
+ video_cc_sm8550_clocks[VIDEO_CC_XO_CLK_SRC] = &video_cc_xo_clk_src.clkr;
+ }
+
clk_lucid_ole_pll_configure(&video_cc_pll0, regmap, &video_cc_pll0_config);
clk_lucid_ole_pll_configure(&video_cc_pll1, regmap, &video_cc_pll1_config);
/* Keep some clocks always-on */
qcom_branch_set_clk_en(regmap, 0x80f4); /* VIDEO_CC_AHB_CLK */
- qcom_branch_set_clk_en(regmap, 0x8140); /* VIDEO_CC_SLEEP_CLK */
+ qcom_branch_set_clk_en(regmap, sleep_clk_offset); /* VIDEO_CC_SLEEP_CLK */
qcom_branch_set_clk_en(regmap, 0x8124); /* VIDEO_CC_XO_CLK */
ret = qcom_cc_really_probe(pdev, &video_cc_sm8550_desc, regmap);
diff --git a/drivers/clk/sifive/sifive-prci.c b/drivers/clk/sifive/sifive-prci.c
index 25b8e1a80ddc..b32a59fe55e7 100644
--- a/drivers/clk/sifive/sifive-prci.c
+++ b/drivers/clk/sifive/sifive-prci.c
@@ -4,7 +4,6 @@
* Copyright (C) 2020 Zong Li
*/
-#include <linux/clkdev.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/module.h>
@@ -537,13 +536,6 @@ static int __prci_register_clocks(struct device *dev, struct __prci_data *pd,
return r;
}
- r = clk_hw_register_clkdev(&pic->hw, pic->name, dev_name(dev));
- if (r) {
- dev_warn(dev, "Failed to register clkdev for %s: %d\n",
- init.name, r);
- return r;
- }
-
pd->hw_clks.hws[i] = &pic->hw;
}
diff --git a/drivers/clk/sunxi-ng/ccu_common.c b/drivers/clk/sunxi-ng/ccu_common.c
index ac0091b4ce24..be375ce0149c 100644
--- a/drivers/clk/sunxi-ng/ccu_common.c
+++ b/drivers/clk/sunxi-ng/ccu_common.c
@@ -132,7 +132,6 @@ static int sunxi_ccu_probe(struct sunxi_ccu *ccu, struct device *dev,
for (i = 0; i < desc->hw_clks->num ; i++) {
struct clk_hw *hw = desc->hw_clks->hws[i];
- struct ccu_common *common = hw_to_ccu_common(hw);
const char *name;
if (!hw)
@@ -147,14 +146,21 @@ static int sunxi_ccu_probe(struct sunxi_ccu *ccu, struct device *dev,
pr_err("Couldn't register clock %d - %s\n", i, name);
goto err_clk_unreg;
}
+ }
+
+ for (i = 0; i < desc->num_ccu_clks; i++) {
+ struct ccu_common *cclk = desc->ccu_clks[i];
+
+ if (!cclk)
+ continue;
- if (common->max_rate)
- clk_hw_set_rate_range(hw, common->min_rate,
- common->max_rate);
+ if (cclk->max_rate)
+ clk_hw_set_rate_range(&cclk->hw, cclk->min_rate,
+ cclk->max_rate);
else
- WARN(common->min_rate,
+ WARN(cclk->min_rate,
"No max_rate, ignoring min_rate of clock %d - %s\n",
- i, name);
+ i, clk_hw_get_name(&cclk->hw));
}
ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get,
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 34faa0320ece..95dd4660b5b6 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -134,6 +134,16 @@ config RDA_TIMER
help
Enables the support for the RDA Micro timer driver.
+config REALTEK_OTTO_TIMER
+ bool "Clocksource/timer for the Realtek Otto platform" if COMPILE_TEST
+ select TIMER_OF
+ help
+ This driver adds support for the timers found in the Realtek RTL83xx
+ and RTL93xx SoCs series. This includes chips such as RTL8380, RTL8381
+ and RTL832, as well as chips from the RTL839x series, such as RTL8390
+ RT8391, RTL8392, RTL8393 and RTL8396 and chips of the RTL930x series
+ such as RTL9301, RTL9302 or RTL9303.
+
config SUN4I_TIMER
bool "Sun4i timer driver" if COMPILE_TEST
depends on HAS_IOMEM
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index 4bb856e4df55..22743785299e 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -59,6 +59,7 @@ obj-$(CONFIG_MILBEAUT_TIMER) += timer-milbeaut.o
obj-$(CONFIG_SPRD_TIMER) += timer-sprd.o
obj-$(CONFIG_NPCM7XX_TIMER) += timer-npcm7xx.o
obj-$(CONFIG_RDA_TIMER) += timer-rda.o
+obj-$(CONFIG_REALTEK_OTTO_TIMER) += timer-rtl-otto.o
obj-$(CONFIG_ARC_TIMERS) += arc_timer.o
obj-$(CONFIG_ARM_ARCH_TIMER) += arm_arch_timer.o
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index 5bb43cc1a8df..aeafc74181f0 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -1556,7 +1556,7 @@ static int __init
arch_timer_mem_frame_register(struct arch_timer_mem_frame *frame)
{
void __iomem *base;
- int ret, irq = 0;
+ int ret, irq;
if (arch_timer_mem_use_virtual)
irq = frame->virt_irq;
diff --git a/drivers/clocksource/arm_global_timer.c b/drivers/clocksource/arm_global_timer.c
index ab1c8c2b66b8..a05cfaab5f84 100644
--- a/drivers/clocksource/arm_global_timer.c
+++ b/drivers/clocksource/arm_global_timer.c
@@ -343,7 +343,7 @@ static int __init global_timer_of_register(struct device_node *np)
{
struct clk *gt_clk;
static unsigned long gt_clk_rate;
- int err = 0;
+ int err;
/*
* In A9 r2p0 the comparators for each processor with the global timer
diff --git a/drivers/clocksource/mips-gic-timer.c b/drivers/clocksource/mips-gic-timer.c
index b3ae38f36720..110347707ff9 100644
--- a/drivers/clocksource/mips-gic-timer.c
+++ b/drivers/clocksource/mips-gic-timer.c
@@ -19,6 +19,7 @@
static DEFINE_PER_CPU(struct clock_event_device, gic_clockevent_device);
static int gic_timer_irq;
static unsigned int gic_frequency;
+static unsigned int gic_count_width;
static bool __read_mostly gic_clock_unstable;
static void gic_clocksource_unstable(char *reason);
@@ -186,18 +187,21 @@ static void gic_clocksource_unstable(char *reason)
static int __init __gic_clocksource_init(void)
{
- unsigned int count_width;
int ret;
/* Set clocksource mask. */
- count_width = read_gic_config() & GIC_CONFIG_COUNTBITS;
- count_width >>= __ffs(GIC_CONFIG_COUNTBITS);
- count_width *= 4;
- count_width += 32;
- gic_clocksource.mask = CLOCKSOURCE_MASK(count_width);
+ gic_count_width = read_gic_config() & GIC_CONFIG_COUNTBITS;
+ gic_count_width >>= __ffs(GIC_CONFIG_COUNTBITS);
+ gic_count_width *= 4;
+ gic_count_width += 32;
+ gic_clocksource.mask = CLOCKSOURCE_MASK(gic_count_width);
/* Calculate a somewhat reasonable rating value. */
- gic_clocksource.rating = 200 + gic_frequency / 10000000;
+ if (mips_cm_revision() >= CM_REV_CM3 || !IS_ENABLED(CONFIG_CPU_FREQ))
+ gic_clocksource.rating = 300; /* Good when frequecy is stable */
+ else
+ gic_clocksource.rating = 200;
+ gic_clocksource.rating += clamp(gic_frequency / 10000000, 0, 99);
ret = clocksource_register_hz(&gic_clocksource, gic_frequency);
if (ret < 0)
@@ -260,7 +264,7 @@ static int __init gic_clocksource_of_init(struct device_node *node)
if (mips_cm_revision() >= CM_REV_CM3 || !IS_ENABLED(CONFIG_CPU_FREQ)) {
sched_clock_register(mips_cm_is64 ?
gic_read_count_64 : gic_read_count_2x32,
- 64, gic_frequency);
+ gic_count_width, gic_frequency);
}
return 0;
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index 26919556ef5f..b72b36e0abed 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -528,6 +528,7 @@ static void sh_cmt_set_next(struct sh_cmt_channel *ch, unsigned long delta)
static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id)
{
struct sh_cmt_channel *ch = dev_id;
+ unsigned long flags;
/* clear flags */
sh_cmt_write_cmcsr(ch, sh_cmt_read_cmcsr(ch) &
@@ -558,6 +559,8 @@ static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id)
ch->flags &= ~FLAG_SKIPEVENT;
+ raw_spin_lock_irqsave(&ch->lock, flags);
+
if (ch->flags & FLAG_REPROGRAM) {
ch->flags &= ~FLAG_REPROGRAM;
sh_cmt_clock_event_program_verify(ch, 1);
@@ -570,6 +573,8 @@ static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id)
ch->flags &= ~FLAG_IRQCONTEXT;
+ raw_spin_unlock_irqrestore(&ch->lock, flags);
+
return IRQ_HANDLED;
}
@@ -780,12 +785,18 @@ static int sh_cmt_clock_event_next(unsigned long delta,
struct clock_event_device *ced)
{
struct sh_cmt_channel *ch = ced_to_sh_cmt(ced);
+ unsigned long flags;
BUG_ON(!clockevent_state_oneshot(ced));
+
+ raw_spin_lock_irqsave(&ch->lock, flags);
+
if (likely(ch->flags & FLAG_IRQCONTEXT))
ch->next_match_value = delta - 1;
else
- sh_cmt_set_next(ch, delta - 1);
+ __sh_cmt_set_next(ch, delta - 1);
+
+ raw_spin_unlock_irqrestore(&ch->lock, flags);
return 0;
}
diff --git a/drivers/clocksource/timer-rtl-otto.c b/drivers/clocksource/timer-rtl-otto.c
new file mode 100644
index 000000000000..8a3068b36e75
--- /dev/null
+++ b/drivers/clocksource/timer-rtl-otto.c
@@ -0,0 +1,291 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/clk.h>
+#include <linux/clockchips.h>
+#include <linux/cpu.h>
+#include <linux/cpuhotplug.h>
+#include <linux/cpumask.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/jiffies.h>
+#include <linux/printk.h>
+#include <linux/sched_clock.h>
+#include "timer-of.h"
+
+#define RTTM_DATA 0x0
+#define RTTM_CNT 0x4
+#define RTTM_CTRL 0x8
+#define RTTM_INT 0xc
+
+#define RTTM_CTRL_ENABLE BIT(28)
+#define RTTM_INT_PENDING BIT(16)
+#define RTTM_INT_ENABLE BIT(20)
+
+/*
+ * The Otto platform provides multiple 28 bit timers/counters with the following
+ * operating logic. If enabled the timer counts up. Per timer one can set a
+ * maximum counter value as an end marker. If end marker is reached the timer
+ * fires an interrupt. If the timer "overflows" by reaching the end marker or
+ * by adding 1 to 0x0fffffff the counter is reset to 0. When this happens and
+ * the timer is in operating mode COUNTER it stops. In mode TIMER it will
+ * continue to count up.
+ */
+#define RTTM_CTRL_COUNTER 0
+#define RTTM_CTRL_TIMER BIT(24)
+
+#define RTTM_BIT_COUNT 28
+#define RTTM_MIN_DELTA 8
+#define RTTM_MAX_DELTA CLOCKSOURCE_MASK(28)
+
+/*
+ * Timers are derived from the LXB clock frequency. Usually this is a fixed
+ * multiple of the 25 MHz oscillator. The 930X SOC is an exception from that.
+ * Its LXB clock has only dividers and uses the switch PLL of 2.45 GHz as its
+ * base. The only meaningful frequencies we can achieve from that are 175.000
+ * MHz and 153.125 MHz. The greatest common divisor of all explained possible
+ * speeds is 3125000. Pin the timers to this 3.125 MHz reference frequency.
+ */
+#define RTTM_TICKS_PER_SEC 3125000
+
+struct rttm_cs {
+ struct timer_of to;
+ struct clocksource cs;
+};
+
+/* Simple internal register functions */
+static inline void rttm_set_counter(void __iomem *base, unsigned int counter)
+{
+ iowrite32(counter, base + RTTM_CNT);
+}
+
+static inline unsigned int rttm_get_counter(void __iomem *base)
+{
+ return ioread32(base + RTTM_CNT);
+}
+
+static inline void rttm_set_period(void __iomem *base, unsigned int period)
+{
+ iowrite32(period, base + RTTM_DATA);
+}
+
+static inline void rttm_disable_timer(void __iomem *base)
+{
+ iowrite32(0, base + RTTM_CTRL);
+}
+
+static inline void rttm_enable_timer(void __iomem *base, u32 mode, u32 divisor)
+{
+ iowrite32(RTTM_CTRL_ENABLE | mode | divisor, base + RTTM_CTRL);
+}
+
+static inline void rttm_ack_irq(void __iomem *base)
+{
+ iowrite32(ioread32(base + RTTM_INT) | RTTM_INT_PENDING, base + RTTM_INT);
+}
+
+static inline void rttm_enable_irq(void __iomem *base)
+{
+ iowrite32(RTTM_INT_ENABLE, base + RTTM_INT);
+}
+
+static inline void rttm_disable_irq(void __iomem *base)
+{
+ iowrite32(0, base + RTTM_INT);
+}
+
+/* Aggregated control functions for kernel clock framework */
+#define RTTM_DEBUG(base) \
+ pr_debug("------------- %d %p\n", \
+ smp_processor_id(), base)
+
+static irqreturn_t rttm_timer_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *clkevt = dev_id;
+ struct timer_of *to = to_timer_of(clkevt);
+
+ rttm_ack_irq(to->of_base.base);
+ RTTM_DEBUG(to->of_base.base);
+ clkevt->event_handler(clkevt);
+
+ return IRQ_HANDLED;
+}
+
+static void rttm_stop_timer(void __iomem *base)
+{
+ rttm_disable_timer(base);
+ rttm_ack_irq(base);
+}
+
+static void rttm_start_timer(struct timer_of *to, u32 mode)
+{
+ rttm_set_counter(to->of_base.base, 0);
+ rttm_enable_timer(to->of_base.base, mode, to->of_clk.rate / RTTM_TICKS_PER_SEC);
+}
+
+static int rttm_next_event(unsigned long delta, struct clock_event_device *clkevt)
+{
+ struct timer_of *to = to_timer_of(clkevt);
+
+ RTTM_DEBUG(to->of_base.base);
+ rttm_stop_timer(to->of_base.base);
+ rttm_set_period(to->of_base.base, delta);
+ rttm_start_timer(to, RTTM_CTRL_COUNTER);
+
+ return 0;
+}
+
+static int rttm_state_oneshot(struct clock_event_device *clkevt)
+{
+ struct timer_of *to = to_timer_of(clkevt);
+
+ RTTM_DEBUG(to->of_base.base);
+ rttm_stop_timer(to->of_base.base);
+ rttm_set_period(to->of_base.base, RTTM_TICKS_PER_SEC / HZ);
+ rttm_start_timer(to, RTTM_CTRL_COUNTER);
+
+ return 0;
+}
+
+static int rttm_state_periodic(struct clock_event_device *clkevt)
+{
+ struct timer_of *to = to_timer_of(clkevt);
+
+ RTTM_DEBUG(to->of_base.base);
+ rttm_stop_timer(to->of_base.base);
+ rttm_set_period(to->of_base.base, RTTM_TICKS_PER_SEC / HZ);
+ rttm_start_timer(to, RTTM_CTRL_TIMER);
+
+ return 0;
+}
+
+static int rttm_state_shutdown(struct clock_event_device *clkevt)
+{
+ struct timer_of *to = to_timer_of(clkevt);
+
+ RTTM_DEBUG(to->of_base.base);
+ rttm_stop_timer(to->of_base.base);
+
+ return 0;
+}
+
+static void rttm_setup_timer(void __iomem *base)
+{
+ RTTM_DEBUG(base);
+ rttm_stop_timer(base);
+ rttm_set_period(base, 0);
+}
+
+static u64 rttm_read_clocksource(struct clocksource *cs)
+{
+ struct rttm_cs *rcs = container_of(cs, struct rttm_cs, cs);
+
+ return rttm_get_counter(rcs->to.of_base.base);
+}
+
+/* Module initialization part. */
+static DEFINE_PER_CPU(struct timer_of, rttm_to) = {
+ .flags = TIMER_OF_BASE | TIMER_OF_CLOCK | TIMER_OF_IRQ,
+ .of_irq = {
+ .flags = IRQF_PERCPU | IRQF_TIMER,
+ .handler = rttm_timer_interrupt,
+ },
+ .clkevt = {
+ .rating = 400,
+ .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
+ .set_state_periodic = rttm_state_periodic,
+ .set_state_shutdown = rttm_state_shutdown,
+ .set_state_oneshot = rttm_state_oneshot,
+ .set_next_event = rttm_next_event
+ },
+};
+
+static int rttm_enable_clocksource(struct clocksource *cs)
+{
+ struct rttm_cs *rcs = container_of(cs, struct rttm_cs, cs);
+
+ rttm_disable_irq(rcs->to.of_base.base);
+ rttm_setup_timer(rcs->to.of_base.base);
+ rttm_enable_timer(rcs->to.of_base.base, RTTM_CTRL_TIMER,
+ rcs->to.of_clk.rate / RTTM_TICKS_PER_SEC);
+
+ return 0;
+}
+
+struct rttm_cs rttm_cs = {
+ .to = {
+ .flags = TIMER_OF_BASE | TIMER_OF_CLOCK,
+ },
+ .cs = {
+ .name = "realtek_otto_timer",
+ .rating = 400,
+ .mask = CLOCKSOURCE_MASK(RTTM_BIT_COUNT),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+ .read = rttm_read_clocksource,
+ }
+};
+
+static u64 notrace rttm_read_clock(void)
+{
+ return rttm_get_counter(rttm_cs.to.of_base.base);
+}
+
+static int rttm_cpu_starting(unsigned int cpu)
+{
+ struct timer_of *to = per_cpu_ptr(&rttm_to, cpu);
+
+ RTTM_DEBUG(to->of_base.base);
+ to->clkevt.cpumask = cpumask_of(cpu);
+ irq_force_affinity(to->of_irq.irq, to->clkevt.cpumask);
+ clockevents_config_and_register(&to->clkevt, RTTM_TICKS_PER_SEC,
+ RTTM_MIN_DELTA, RTTM_MAX_DELTA);
+ rttm_enable_irq(to->of_base.base);
+
+ return 0;
+}
+
+static int __init rttm_probe(struct device_node *np)
+{
+ unsigned int cpu, cpu_rollback;
+ struct timer_of *to;
+ unsigned int clkidx = num_possible_cpus();
+
+ /* Use the first n timers as per CPU clock event generators */
+ for_each_possible_cpu(cpu) {
+ to = per_cpu_ptr(&rttm_to, cpu);
+ to->of_irq.index = to->of_base.index = cpu;
+ if (timer_of_init(np, to)) {
+ pr_err("setup of timer %d failed\n", cpu);
+ goto rollback;
+ }
+ rttm_setup_timer(to->of_base.base);
+ }
+
+ /* Activate the n'th + 1 timer as a stable CPU clocksource. */
+ to = &rttm_cs.to;
+ to->of_base.index = clkidx;
+ timer_of_init(np, to);
+ if (rttm_cs.to.of_base.base && rttm_cs.to.of_clk.rate) {
+ rttm_enable_clocksource(&rttm_cs.cs);
+ clocksource_register_hz(&rttm_cs.cs, RTTM_TICKS_PER_SEC);
+ sched_clock_register(rttm_read_clock, RTTM_BIT_COUNT, RTTM_TICKS_PER_SEC);
+ } else
+ pr_err(" setup of timer %d as clocksource failed", clkidx);
+
+ return cpuhp_setup_state(CPUHP_AP_REALTEK_TIMER_STARTING,
+ "timer/realtek:online",
+ rttm_cpu_starting, NULL);
+rollback:
+ pr_err("timer registration failed\n");
+ for_each_possible_cpu(cpu_rollback) {
+ if (cpu_rollback == cpu)
+ break;
+ to = per_cpu_ptr(&rttm_to, cpu_rollback);
+ timer_of_cleanup(to);
+ }
+
+ return -EINVAL;
+}
+
+TIMER_OF_DECLARE(otto_timer, "realtek,otto-timer", rttm_probe);
diff --git a/drivers/counter/stm32-timer-cnt.c b/drivers/counter/stm32-timer-cnt.c
index 0664ef969f79..186e73d6ccb4 100644
--- a/drivers/counter/stm32-timer-cnt.c
+++ b/drivers/counter/stm32-timer-cnt.c
@@ -465,7 +465,7 @@ static int stm32_count_events_configure(struct counter_device *counter)
ret = stm32_count_capture_configure(counter, event_node->channel, true);
if (ret)
return ret;
- dier |= TIM_DIER_CC_IE(event_node->channel);
+ dier |= TIM_DIER_CCxIE(event_node->channel + 1);
break;
default:
/* should never reach this path */
@@ -478,7 +478,7 @@ static int stm32_count_events_configure(struct counter_device *counter)
/* check for disabled capture events */
for (i = 0 ; i < priv->nchannels; i++) {
- if (!(dier & TIM_DIER_CC_IE(i))) {
+ if (!(dier & TIM_DIER_CCxIE(i + 1))) {
ret = stm32_count_capture_configure(counter, i, false);
if (ret)
return ret;
diff --git a/drivers/counter/ti-eqep.c b/drivers/counter/ti-eqep.c
index 072b11fd6b32..825ae22c3ebc 100644
--- a/drivers/counter/ti-eqep.c
+++ b/drivers/counter/ti-eqep.c
@@ -6,6 +6,7 @@
*/
#include <linux/bitops.h>
+#include <linux/clk.h>
#include <linux/counter.h>
#include <linux/kernel.h>
#include <linux/mod_devicetable.h>
@@ -376,6 +377,7 @@ static int ti_eqep_probe(struct platform_device *pdev)
struct counter_device *counter;
struct ti_eqep_cnt *priv;
void __iomem *base;
+ struct clk *clk;
int err;
counter = devm_counter_alloc(dev, sizeof(*priv));
@@ -415,6 +417,10 @@ static int ti_eqep_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
pm_runtime_get_sync(dev);
+ clk = devm_clk_get_enabled(dev, NULL);
+ if (IS_ERR(clk))
+ return dev_err_probe(dev, PTR_ERR(clk), "failed to enable clock\n");
+
err = counter_add(counter);
if (err < 0) {
pm_runtime_put_sync(dev);
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index 94e55c40970a..10cda6f2fe1d 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -262,6 +262,18 @@ config LOONGSON2_CPUFREQ
If in doubt, say N.
endif
+if LOONGARCH
+config LOONGSON3_CPUFREQ
+ tristate "Loongson3 CPUFreq Driver"
+ help
+ This option adds a CPUFreq driver for Loongson processors which
+ support software configurable cpu frequency.
+
+ Loongson-3 family processors support this feature.
+
+ If in doubt, say N.
+endif
+
if SPARC64
config SPARC_US3_CPUFREQ
tristate "UltraSPARC-III CPU Frequency driver"
diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86
index 438c9e75a04d..97c2d4f15d76 100644
--- a/drivers/cpufreq/Kconfig.x86
+++ b/drivers/cpufreq/Kconfig.x86
@@ -71,6 +71,7 @@ config X86_AMD_PSTATE_DEFAULT_MODE
config X86_AMD_PSTATE_UT
tristate "selftest for AMD Processor P-State driver"
depends on X86 && ACPI_PROCESSOR
+ depends on X86_AMD_PSTATE
default n
help
This kernel module is used for testing. It's safe to say M here.
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 8d141c71b016..0f184031dd12 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -103,6 +103,7 @@ obj-$(CONFIG_POWERNV_CPUFREQ) += powernv-cpufreq.o
# Other platform drivers
obj-$(CONFIG_BMIPS_CPUFREQ) += bmips-cpufreq.o
obj-$(CONFIG_LOONGSON2_CPUFREQ) += loongson2_cpufreq.o
+obj-$(CONFIG_LOONGSON3_CPUFREQ) += loongson3_cpufreq.o
obj-$(CONFIG_SH_CPU_FREQ) += sh-cpufreq.o
obj-$(CONFIG_SPARC_US2E_CPUFREQ) += sparc-us2e-cpufreq.o
obj-$(CONFIG_SPARC_US3_CPUFREQ) += sparc-us3-cpufreq.o
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 37f1cdf46d29..a8ca625a98b8 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -50,8 +50,6 @@ enum {
#define AMD_MSR_RANGE (0x7)
#define HYGON_MSR_RANGE (0x7)
-#define MSR_K7_HWCR_CPB_DIS (1ULL << 25)
-
struct acpi_cpufreq_data {
unsigned int resume;
unsigned int cpu_feature;
@@ -890,8 +888,10 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
if (perf->states[0].core_frequency * 1000 != freq_table[0].frequency)
pr_warn(FW_WARN "P-state 0 is not max freq\n");
- if (acpi_cpufreq_driver.set_boost)
+ if (acpi_cpufreq_driver.set_boost) {
set_boost(policy, acpi_cpufreq_driver.boost_enabled);
+ policy->boost_enabled = acpi_cpufreq_driver.boost_enabled;
+ }
return result;
@@ -906,7 +906,7 @@ err_free:
return result;
}
-static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
+static void acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
{
struct acpi_cpufreq_data *data = policy->driver_data;
@@ -919,8 +919,6 @@ static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
free_cpumask_var(data->freqdomain_cpus);
kfree(policy->freq_table);
kfree(data);
-
- return 0;
}
static int acpi_cpufreq_resume(struct cpufreq_policy *policy)
diff --git a/drivers/cpufreq/amd-pstate-ut.c b/drivers/cpufreq/amd-pstate-ut.c
index f04ae67dda37..66b73c308ce6 100644
--- a/drivers/cpufreq/amd-pstate-ut.c
+++ b/drivers/cpufreq/amd-pstate-ut.c
@@ -26,10 +26,11 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/fs.h>
-#include <linux/amd-pstate.h>
#include <acpi/cppc_acpi.h>
+#include "amd-pstate.h"
+
/*
* Abbreviations:
* amd_pstate_ut: used as a shortform for AMD P-State unit test.
@@ -201,6 +202,7 @@ static void amd_pstate_ut_check_freq(u32 index)
int cpu = 0;
struct cpufreq_policy *policy = NULL;
struct amd_cpudata *cpudata = NULL;
+ u32 nominal_freq_khz;
for_each_possible_cpu(cpu) {
policy = cpufreq_cpu_get(cpu);
@@ -208,13 +210,14 @@ static void amd_pstate_ut_check_freq(u32 index)
break;
cpudata = policy->driver_data;
- if (!((cpudata->max_freq >= cpudata->nominal_freq) &&
- (cpudata->nominal_freq > cpudata->lowest_nonlinear_freq) &&
+ nominal_freq_khz = cpudata->nominal_freq*1000;
+ if (!((cpudata->max_freq >= nominal_freq_khz) &&
+ (nominal_freq_khz > cpudata->lowest_nonlinear_freq) &&
(cpudata->lowest_nonlinear_freq > cpudata->min_freq) &&
(cpudata->min_freq > 0))) {
amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
pr_err("%s cpu%d max=%d >= nominal=%d > lowest_nonlinear=%d > min=%d > 0, the formula is incorrect!\n",
- __func__, cpu, cpudata->max_freq, cpudata->nominal_freq,
+ __func__, cpu, cpudata->max_freq, nominal_freq_khz,
cpudata->lowest_nonlinear_freq, cpudata->min_freq);
goto skip_test;
}
@@ -228,13 +231,13 @@ static void amd_pstate_ut_check_freq(u32 index)
if (cpudata->boost_supported) {
if ((policy->max == cpudata->max_freq) ||
- (policy->max == cpudata->nominal_freq))
+ (policy->max == nominal_freq_khz))
amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_PASS;
else {
amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
pr_err("%s cpu%d policy_max=%d should be equal cpu_max=%d or cpu_nominal=%d !\n",
__func__, cpu, policy->max, cpudata->max_freq,
- cpudata->nominal_freq);
+ nominal_freq_khz);
goto skip_test;
}
} else {
diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
index 1b7e82a0ad2e..68c616b572f2 100644
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -36,7 +36,6 @@
#include <linux/delay.h>
#include <linux/uaccess.h>
#include <linux/static_call.h>
-#include <linux/amd-pstate.h>
#include <linux/topology.h>
#include <acpi/processor.h>
@@ -46,22 +45,47 @@
#include <asm/processor.h>
#include <asm/cpufeature.h>
#include <asm/cpu_device_id.h>
+
+#include "amd-pstate.h"
#include "amd-pstate-trace.h"
#define AMD_PSTATE_TRANSITION_LATENCY 20000
#define AMD_PSTATE_TRANSITION_DELAY 1000
+#define AMD_PSTATE_FAST_CPPC_TRANSITION_DELAY 600
#define CPPC_HIGHEST_PERF_PERFORMANCE 196
#define CPPC_HIGHEST_PERF_DEFAULT 166
+#define AMD_CPPC_EPP_PERFORMANCE 0x00
+#define AMD_CPPC_EPP_BALANCE_PERFORMANCE 0x80
+#define AMD_CPPC_EPP_BALANCE_POWERSAVE 0xBF
+#define AMD_CPPC_EPP_POWERSAVE 0xFF
+
/*
- * TODO: We need more time to fine tune processors with shared memory solution
- * with community together.
- *
- * There are some performance drops on the CPU benchmarks which reports from
- * Suse. We are co-working with them to fine tune the shared memory solution. So
- * we disable it by default to go acpi-cpufreq on these processors and add a
- * module parameter to be able to enable it manually for debugging.
+ * enum amd_pstate_mode - driver working mode of amd pstate
*/
+enum amd_pstate_mode {
+ AMD_PSTATE_UNDEFINED = 0,
+ AMD_PSTATE_DISABLE,
+ AMD_PSTATE_PASSIVE,
+ AMD_PSTATE_ACTIVE,
+ AMD_PSTATE_GUIDED,
+ AMD_PSTATE_MAX,
+};
+
+static const char * const amd_pstate_mode_string[] = {
+ [AMD_PSTATE_UNDEFINED] = "undefined",
+ [AMD_PSTATE_DISABLE] = "disable",
+ [AMD_PSTATE_PASSIVE] = "passive",
+ [AMD_PSTATE_ACTIVE] = "active",
+ [AMD_PSTATE_GUIDED] = "guided",
+ NULL,
+};
+
+struct quirk_entry {
+ u32 nominal_freq;
+ u32 lowest_freq;
+};
+
static struct cpufreq_driver *current_pstate_driver;
static struct cpufreq_driver amd_pstate_driver;
static struct cpufreq_driver amd_pstate_epp_driver;
@@ -125,7 +149,7 @@ static int __init dmi_matched_7k62_bios_bug(const struct dmi_system_id *dmi)
* broken BIOS lack of nominal_freq and lowest_freq capabilities
* definition in ACPI tables
*/
- if (boot_cpu_has(X86_FEATURE_ZEN2)) {
+ if (cpu_feature_enabled(X86_FEATURE_ZEN2)) {
quirks = dmi->driver_data;
pr_info("Overriding nominal and lowest frequencies for %s\n", dmi->ident);
return 1;
@@ -167,7 +191,7 @@ static s16 amd_pstate_get_epp(struct amd_cpudata *cpudata, u64 cppc_req_cached)
u64 epp;
int ret;
- if (boot_cpu_has(X86_FEATURE_CPPC)) {
+ if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
if (!cppc_req_cached) {
epp = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ,
&cppc_req_cached);
@@ -215,12 +239,32 @@ static int amd_pstate_get_energy_pref_index(struct amd_cpudata *cpudata)
return index;
}
+static void pstate_update_perf(struct amd_cpudata *cpudata, u32 min_perf,
+ u32 des_perf, u32 max_perf, bool fast_switch)
+{
+ if (fast_switch)
+ wrmsrl(MSR_AMD_CPPC_REQ, READ_ONCE(cpudata->cppc_req_cached));
+ else
+ wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ,
+ READ_ONCE(cpudata->cppc_req_cached));
+}
+
+DEFINE_STATIC_CALL(amd_pstate_update_perf, pstate_update_perf);
+
+static inline void amd_pstate_update_perf(struct amd_cpudata *cpudata,
+ u32 min_perf, u32 des_perf,
+ u32 max_perf, bool fast_switch)
+{
+ static_call(amd_pstate_update_perf)(cpudata, min_perf, des_perf,
+ max_perf, fast_switch);
+}
+
static int amd_pstate_set_epp(struct amd_cpudata *cpudata, u32 epp)
{
int ret;
struct cppc_perf_ctrls perf_ctrls;
- if (boot_cpu_has(X86_FEATURE_CPPC)) {
+ if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
u64 value = READ_ONCE(cpudata->cppc_req_cached);
value &= ~GENMASK_ULL(31, 24);
@@ -231,6 +275,9 @@ static int amd_pstate_set_epp(struct amd_cpudata *cpudata, u32 epp)
if (!ret)
cpudata->epp_cached = epp;
} else {
+ amd_pstate_update_perf(cpudata, cpudata->min_limit_perf, 0U,
+ cpudata->max_limit_perf, false);
+
perf_ctrls.energy_perf = epp;
ret = cppc_set_epp_perf(cpudata->cpu, &perf_ctrls, 1);
if (ret) {
@@ -249,10 +296,8 @@ static int amd_pstate_set_energy_pref_index(struct amd_cpudata *cpudata,
int epp = -EINVAL;
int ret;
- if (!pref_index) {
- pr_debug("EPP pref_index is invalid\n");
- return -EINVAL;
- }
+ if (!pref_index)
+ epp = cpudata->epp_default;
if (epp == -EINVAL)
epp = epp_values[pref_index];
@@ -420,16 +465,6 @@ static inline int amd_pstate_init_perf(struct amd_cpudata *cpudata)
return static_call(amd_pstate_init_perf)(cpudata);
}
-static void pstate_update_perf(struct amd_cpudata *cpudata, u32 min_perf,
- u32 des_perf, u32 max_perf, bool fast_switch)
-{
- if (fast_switch)
- wrmsrl(MSR_AMD_CPPC_REQ, READ_ONCE(cpudata->cppc_req_cached));
- else
- wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ,
- READ_ONCE(cpudata->cppc_req_cached));
-}
-
static void cppc_update_perf(struct amd_cpudata *cpudata,
u32 min_perf, u32 des_perf,
u32 max_perf, bool fast_switch)
@@ -443,16 +478,6 @@ static void cppc_update_perf(struct amd_cpudata *cpudata,
cppc_set_perf(cpudata->cpu, &perf_ctrls);
}
-DEFINE_STATIC_CALL(amd_pstate_update_perf, pstate_update_perf);
-
-static inline void amd_pstate_update_perf(struct amd_cpudata *cpudata,
- u32 min_perf, u32 des_perf,
- u32 max_perf, bool fast_switch)
-{
- static_call(amd_pstate_update_perf)(cpudata, min_perf, des_perf,
- max_perf, fast_switch);
-}
-
static inline bool amd_pstate_sample(struct amd_cpudata *cpudata)
{
u64 aperf, mperf, tsc;
@@ -489,7 +514,10 @@ static inline bool amd_pstate_sample(struct amd_cpudata *cpudata)
static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf,
u32 des_perf, u32 max_perf, bool fast_switch, int gov_flags)
{
+ unsigned long max_freq;
+ struct cpufreq_policy *policy = cpufreq_cpu_get(cpudata->cpu);
u64 prev = READ_ONCE(cpudata->cppc_req_cached);
+ u32 nominal_perf = READ_ONCE(cpudata->nominal_perf);
u64 value = prev;
min_perf = clamp_t(unsigned long, min_perf, cpudata->min_limit_perf,
@@ -498,6 +526,9 @@ static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf,
cpudata->max_limit_perf);
des_perf = clamp_t(unsigned long, des_perf, min_perf, max_perf);
+ max_freq = READ_ONCE(cpudata->max_limit_freq);
+ policy->cur = div_u64(des_perf * max_freq, max_perf);
+
if ((cppc_state == AMD_PSTATE_GUIDED) && (gov_flags & CPUFREQ_GOV_DYNAMIC_SWITCHING)) {
min_perf = des_perf;
des_perf = 0;
@@ -509,6 +540,10 @@ static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf,
value &= ~AMD_CPPC_DES_PERF(~0L);
value |= AMD_CPPC_DES_PERF(des_perf);
+ /* limit the max perf when core performance boost feature is disabled */
+ if (!cpudata->boost_supported)
+ max_perf = min_t(unsigned long, nominal_perf, max_perf);
+
value &= ~AMD_CPPC_MAX_PERF(~0L);
value |= AMD_CPPC_MAX_PERF(max_perf);
@@ -619,10 +654,9 @@ static void amd_pstate_adjust_perf(unsigned int cpu,
unsigned long capacity)
{
unsigned long max_perf, min_perf, des_perf,
- cap_perf, lowest_nonlinear_perf, max_freq;
+ cap_perf, lowest_nonlinear_perf;
struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
struct amd_cpudata *cpudata = policy->driver_data;
- unsigned int target_freq;
if (policy->min != cpudata->min_limit_freq || policy->max != cpudata->max_limit_freq)
amd_pstate_update_min_max_limit(policy);
@@ -630,7 +664,6 @@ static void amd_pstate_adjust_perf(unsigned int cpu,
cap_perf = READ_ONCE(cpudata->highest_perf);
lowest_nonlinear_perf = READ_ONCE(cpudata->lowest_nonlinear_perf);
- max_freq = READ_ONCE(cpudata->max_freq);
des_perf = cap_perf;
if (target_perf < capacity)
@@ -648,51 +681,111 @@ static void amd_pstate_adjust_perf(unsigned int cpu,
max_perf = min_perf;
des_perf = clamp_t(unsigned long, des_perf, min_perf, max_perf);
- target_freq = div_u64(des_perf * max_freq, max_perf);
- policy->cur = target_freq;
amd_pstate_update(cpudata, min_perf, des_perf, max_perf, true,
policy->governor->flags);
cpufreq_cpu_put(policy);
}
-static int amd_pstate_set_boost(struct cpufreq_policy *policy, int state)
+static int amd_pstate_cpu_boost_update(struct cpufreq_policy *policy, bool on)
{
struct amd_cpudata *cpudata = policy->driver_data;
+ struct cppc_perf_ctrls perf_ctrls;
+ u32 highest_perf, nominal_perf, nominal_freq, max_freq;
int ret;
- if (!cpudata->boost_supported) {
- pr_err("Boost mode is not supported by this processor or SBIOS\n");
- return -EINVAL;
+ highest_perf = READ_ONCE(cpudata->highest_perf);
+ nominal_perf = READ_ONCE(cpudata->nominal_perf);
+ nominal_freq = READ_ONCE(cpudata->nominal_freq);
+ max_freq = READ_ONCE(cpudata->max_freq);
+
+ if (boot_cpu_has(X86_FEATURE_CPPC)) {
+ u64 value = READ_ONCE(cpudata->cppc_req_cached);
+
+ value &= ~GENMASK_ULL(7, 0);
+ value |= on ? highest_perf : nominal_perf;
+ WRITE_ONCE(cpudata->cppc_req_cached, value);
+
+ wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
+ } else {
+ perf_ctrls.max_perf = on ? highest_perf : nominal_perf;
+ ret = cppc_set_perf(cpudata->cpu, &perf_ctrls);
+ if (ret) {
+ cpufreq_cpu_release(policy);
+ pr_debug("Failed to set max perf on CPU:%d. ret:%d\n",
+ cpudata->cpu, ret);
+ return ret;
+ }
}
- if (state)
- policy->cpuinfo.max_freq = cpudata->max_freq;
- else
- policy->cpuinfo.max_freq = cpudata->nominal_freq;
+ if (on)
+ policy->cpuinfo.max_freq = max_freq;
+ else if (policy->cpuinfo.max_freq > nominal_freq * 1000)
+ policy->cpuinfo.max_freq = nominal_freq * 1000;
policy->max = policy->cpuinfo.max_freq;
- ret = freq_qos_update_request(&cpudata->req[1],
- policy->cpuinfo.max_freq);
- if (ret < 0)
- return ret;
+ if (cppc_state == AMD_PSTATE_PASSIVE) {
+ ret = freq_qos_update_request(&cpudata->req[1], policy->cpuinfo.max_freq);
+ if (ret < 0)
+ pr_debug("Failed to update freq constraint: CPU%d\n", cpudata->cpu);
+ }
- return 0;
+ return ret < 0 ? ret : 0;
}
-static void amd_pstate_boost_init(struct amd_cpudata *cpudata)
+static int amd_pstate_set_boost(struct cpufreq_policy *policy, int state)
{
- u32 highest_perf, nominal_perf;
+ struct amd_cpudata *cpudata = policy->driver_data;
+ int ret;
- highest_perf = READ_ONCE(cpudata->highest_perf);
- nominal_perf = READ_ONCE(cpudata->nominal_perf);
+ if (!cpudata->boost_supported) {
+ pr_err("Boost mode is not supported by this processor or SBIOS\n");
+ return -EOPNOTSUPP;
+ }
+ mutex_lock(&amd_pstate_driver_lock);
+ ret = amd_pstate_cpu_boost_update(policy, state);
+ WRITE_ONCE(cpudata->boost_state, !ret ? state : false);
+ policy->boost_enabled = !ret ? state : false;
+ refresh_frequency_limits(policy);
+ mutex_unlock(&amd_pstate_driver_lock);
- if (highest_perf <= nominal_perf)
- return;
+ return ret;
+}
+
+static int amd_pstate_init_boost_support(struct amd_cpudata *cpudata)
+{
+ u64 boost_val;
+ int ret = -1;
- cpudata->boost_supported = true;
+ /*
+ * If platform has no CPB support or disable it, initialize current driver
+ * boost_enabled state to be false, it is not an error for cpufreq core to handle.
+ */
+ if (!cpu_feature_enabled(X86_FEATURE_CPB)) {
+ pr_debug_once("Boost CPB capabilities not present in the processor\n");
+ ret = 0;
+ goto exit_err;
+ }
+
+ /* at least one CPU supports CPB, even if others fail later on to set up */
current_pstate_driver->boost_enabled = true;
+
+ ret = rdmsrl_on_cpu(cpudata->cpu, MSR_K7_HWCR, &boost_val);
+ if (ret) {
+ pr_err_once("failed to read initial CPU boost state!\n");
+ ret = -EIO;
+ goto exit_err;
+ }
+
+ if (!(boost_val & MSR_K7_HWCR_CPB_DIS))
+ cpudata->boost_supported = true;
+
+ return 0;
+
+exit_err:
+ cpudata->boost_supported = false;
+ return ret;
}
static void amd_perf_ctl_reset(unsigned int cpu)
@@ -721,7 +814,7 @@ static int amd_pstate_get_highest_perf(int cpu, u32 *highest_perf)
{
int ret;
- if (boot_cpu_has(X86_FEATURE_CPPC)) {
+ if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
u64 cap1;
ret = rdmsrl_safe_on_cpu(cpu, MSR_AMD_CPPC_CAP1, &cap1);
@@ -817,8 +910,12 @@ static u32 amd_pstate_get_transition_delay_us(unsigned int cpu)
u32 transition_delay_ns;
transition_delay_ns = cppc_get_transition_latency(cpu);
- if (transition_delay_ns == CPUFREQ_ETERNAL)
- return AMD_PSTATE_TRANSITION_DELAY;
+ if (transition_delay_ns == CPUFREQ_ETERNAL) {
+ if (cpu_feature_enabled(X86_FEATURE_FAST_CPPC))
+ return AMD_PSTATE_FAST_CPPC_TRANSITION_DELAY;
+ else
+ return AMD_PSTATE_TRANSITION_DELAY;
+ }
return transition_delay_ns / NSEC_PER_USEC;
}
@@ -889,12 +986,30 @@ static int amd_pstate_init_freq(struct amd_cpudata *cpudata)
WRITE_ONCE(cpudata->nominal_freq, nominal_freq);
WRITE_ONCE(cpudata->max_freq, max_freq);
+ /**
+ * Below values need to be initialized correctly, otherwise driver will fail to load
+ * max_freq is calculated according to (nominal_freq * highest_perf)/nominal_perf
+ * lowest_nonlinear_freq is a value between [min_freq, nominal_freq]
+ * Check _CPC in ACPI table objects if any values are incorrect
+ */
+ if (min_freq <= 0 || max_freq <= 0 || nominal_freq <= 0 || min_freq > max_freq) {
+ pr_err("min_freq(%d) or max_freq(%d) or nominal_freq(%d) value is incorrect\n",
+ min_freq, max_freq, nominal_freq * 1000);
+ return -EINVAL;
+ }
+
+ if (lowest_nonlinear_freq <= min_freq || lowest_nonlinear_freq > nominal_freq * 1000) {
+ pr_err("lowest_nonlinear_freq(%d) value is out of range [min_freq(%d), nominal_freq(%d)]\n",
+ lowest_nonlinear_freq, min_freq, nominal_freq * 1000);
+ return -EINVAL;
+ }
+
return 0;
}
static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
{
- int min_freq, max_freq, nominal_freq, ret;
+ int min_freq, max_freq, ret;
struct device *dev;
struct amd_cpudata *cpudata;
@@ -923,18 +1038,12 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
if (ret)
goto free_cpudata1;
+ ret = amd_pstate_init_boost_support(cpudata);
+ if (ret)
+ goto free_cpudata1;
+
min_freq = READ_ONCE(cpudata->min_freq);
max_freq = READ_ONCE(cpudata->max_freq);
- nominal_freq = READ_ONCE(cpudata->nominal_freq);
-
- if (min_freq <= 0 || max_freq <= 0 ||
- nominal_freq <= 0 || min_freq > max_freq) {
- dev_err(dev,
- "min_freq(%d) or max_freq(%d) or nominal_freq (%d) value is incorrect, check _CPC in ACPI tables\n",
- min_freq, max_freq, nominal_freq);
- ret = -EINVAL;
- goto free_cpudata1;
- }
policy->cpuinfo.transition_latency = amd_pstate_get_transition_latency(policy->cpu);
policy->transition_delay_us = amd_pstate_get_transition_delay_us(policy->cpu);
@@ -945,10 +1054,12 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
policy->cpuinfo.min_freq = min_freq;
policy->cpuinfo.max_freq = max_freq;
+ policy->boost_enabled = READ_ONCE(cpudata->boost_supported);
+
/* It will be updated by governor */
policy->cur = policy->cpuinfo.min_freq;
- if (boot_cpu_has(X86_FEATURE_CPPC))
+ if (cpu_feature_enabled(X86_FEATURE_CPPC))
policy->fast_switch_possible = true;
ret = freq_qos_add_request(&policy->constraints, &cpudata->req[0],
@@ -970,7 +1081,6 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
policy->driver_data = cpudata;
- amd_pstate_boost_init(cpudata);
if (!current_pstate_driver->adjust_perf)
current_pstate_driver->adjust_perf = amd_pstate_adjust_perf;
@@ -983,7 +1093,7 @@ free_cpudata1:
return ret;
}
-static int amd_pstate_cpu_exit(struct cpufreq_policy *policy)
+static void amd_pstate_cpu_exit(struct cpufreq_policy *policy)
{
struct amd_cpudata *cpudata = policy->driver_data;
@@ -991,8 +1101,6 @@ static int amd_pstate_cpu_exit(struct cpufreq_policy *policy)
freq_qos_remove_request(&cpudata->req[0]);
policy->fast_switch_possible = false;
kfree(cpudata);
-
- return 0;
}
static int amd_pstate_cpu_resume(struct cpufreq_policy *policy)
@@ -1181,7 +1289,7 @@ static int amd_pstate_change_mode_without_dvr_change(int mode)
cppc_state = mode;
- if (boot_cpu_has(X86_FEATURE_CPPC) || cppc_state == AMD_PSTATE_ACTIVE)
+ if (cpu_feature_enabled(X86_FEATURE_CPPC) || cppc_state == AMD_PSTATE_ACTIVE)
return 0;
for_each_present_cpu(cpu) {
@@ -1354,7 +1462,7 @@ static bool amd_pstate_acpi_pm_profile_undefined(void)
static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
{
- int min_freq, max_freq, nominal_freq, ret;
+ int min_freq, max_freq, ret;
struct amd_cpudata *cpudata;
struct device *dev;
u64 value;
@@ -1385,17 +1493,12 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
if (ret)
goto free_cpudata1;
+ ret = amd_pstate_init_boost_support(cpudata);
+ if (ret)
+ goto free_cpudata1;
+
min_freq = READ_ONCE(cpudata->min_freq);
max_freq = READ_ONCE(cpudata->max_freq);
- nominal_freq = READ_ONCE(cpudata->nominal_freq);
- if (min_freq <= 0 || max_freq <= 0 ||
- nominal_freq <= 0 || min_freq > max_freq) {
- dev_err(dev,
- "min_freq(%d) or max_freq(%d) or nominal_freq(%d) value is incorrect, check _CPC in ACPI tables\n",
- min_freq, max_freq, nominal_freq);
- ret = -EINVAL;
- goto free_cpudata1;
- }
policy->cpuinfo.min_freq = min_freq;
policy->cpuinfo.max_freq = max_freq;
@@ -1404,11 +1507,13 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
policy->driver_data = cpudata;
- cpudata->epp_cached = amd_pstate_get_epp(cpudata, 0);
+ cpudata->epp_cached = cpudata->epp_default = amd_pstate_get_epp(cpudata, 0);
policy->min = policy->cpuinfo.min_freq;
policy->max = policy->cpuinfo.max_freq;
+ policy->boost_enabled = READ_ONCE(cpudata->boost_supported);
+
/*
* Set the policy to provide a valid fallback value in case
* the default cpufreq governor is neither powersave nor performance.
@@ -1419,7 +1524,7 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
else
policy->policy = CPUFREQ_POLICY_POWERSAVE;
- if (boot_cpu_has(X86_FEATURE_CPPC)) {
+ if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
ret = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, &value);
if (ret)
return ret;
@@ -1430,7 +1535,6 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
return ret;
WRITE_ONCE(cpudata->cppc_cap1_cached, value);
}
- amd_pstate_boost_init(cpudata);
return 0;
@@ -1439,7 +1543,7 @@ free_cpudata1:
return ret;
}
-static int amd_pstate_epp_cpu_exit(struct cpufreq_policy *policy)
+static void amd_pstate_epp_cpu_exit(struct cpufreq_policy *policy)
{
struct amd_cpudata *cpudata = policy->driver_data;
@@ -1449,7 +1553,6 @@ static int amd_pstate_epp_cpu_exit(struct cpufreq_policy *policy)
}
pr_debug("CPU %d exiting\n", policy->cpu);
- return 0;
}
static void amd_pstate_epp_update_limit(struct cpufreq_policy *policy)
@@ -1509,7 +1612,7 @@ static void amd_pstate_epp_update_limit(struct cpufreq_policy *policy)
epp = 0;
/* Set initial EPP value */
- if (boot_cpu_has(X86_FEATURE_CPPC)) {
+ if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
value &= ~GENMASK_ULL(31, 24);
value |= (u64)epp << 24;
}
@@ -1532,6 +1635,12 @@ static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy)
amd_pstate_epp_update_limit(policy);
+ /*
+ * policy->cur is never updated with the amd_pstate_epp driver, but it
+ * is used as a stale frequency value. So, keep it within limits.
+ */
+ policy->cur = policy->min;
+
return 0;
}
@@ -1548,7 +1657,7 @@ static void amd_pstate_epp_reenable(struct amd_cpudata *cpudata)
value = READ_ONCE(cpudata->cppc_req_cached);
max_perf = READ_ONCE(cpudata->highest_perf);
- if (boot_cpu_has(X86_FEATURE_CPPC)) {
+ if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
} else {
perf_ctrls.max_perf = max_perf;
@@ -1582,7 +1691,7 @@ static void amd_pstate_epp_offline(struct cpufreq_policy *policy)
value = READ_ONCE(cpudata->cppc_req_cached);
mutex_lock(&amd_pstate_limits_lock);
- if (boot_cpu_has(X86_FEATURE_CPPC)) {
+ if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
cpudata->epp_policy = CPUFREQ_POLICY_UNKNOWN;
/* Set max perf same as min perf */
@@ -1686,6 +1795,7 @@ static struct cpufreq_driver amd_pstate_epp_driver = {
.suspend = amd_pstate_epp_suspend,
.resume = amd_pstate_epp_resume,
.update_limits = amd_pstate_update_limits,
+ .set_boost = amd_pstate_set_boost,
.name = "amd-pstate-epp",
.attr = amd_pstate_epp_attr,
};
@@ -1709,6 +1819,46 @@ static int __init amd_pstate_set_driver(int mode_idx)
return -EINVAL;
}
+/**
+ * CPPC function is not supported for family ID 17H with model_ID ranging from 0x10 to 0x2F.
+ * show the debug message that helps to check if the CPU has CPPC support for loading issue.
+ */
+static bool amd_cppc_supported(void)
+{
+ struct cpuinfo_x86 *c = &cpu_data(0);
+ bool warn = false;
+
+ if ((boot_cpu_data.x86 == 0x17) && (boot_cpu_data.x86_model < 0x30)) {
+ pr_debug_once("CPPC feature is not supported by the processor\n");
+ return false;
+ }
+
+ /*
+ * If the CPPC feature is disabled in the BIOS for processors that support MSR-based CPPC,
+ * the AMD Pstate driver may not function correctly.
+ * Check the CPPC flag and display a warning message if the platform supports CPPC.
+ * Note: below checking code will not abort the driver registeration process because of
+ * the code is added for debugging purposes.
+ */
+ if (!cpu_feature_enabled(X86_FEATURE_CPPC)) {
+ if (cpu_feature_enabled(X86_FEATURE_ZEN1) || cpu_feature_enabled(X86_FEATURE_ZEN2)) {
+ if (c->x86_model > 0x60 && c->x86_model < 0xaf)
+ warn = true;
+ } else if (cpu_feature_enabled(X86_FEATURE_ZEN3) || cpu_feature_enabled(X86_FEATURE_ZEN4)) {
+ if ((c->x86_model > 0x10 && c->x86_model < 0x1F) ||
+ (c->x86_model > 0x40 && c->x86_model < 0xaf))
+ warn = true;
+ } else if (cpu_feature_enabled(X86_FEATURE_ZEN5)) {
+ warn = true;
+ }
+ }
+
+ if (warn)
+ pr_warn_once("The CPPC feature is supported but currently disabled by the BIOS.\n"
+ "Please enable it if your BIOS has the CPPC option.\n");
+ return true;
+}
+
static int __init amd_pstate_init(void)
{
struct device *dev_root;
@@ -1717,6 +1867,11 @@ static int __init amd_pstate_init(void)
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
return -ENODEV;
+ /* show debug message only if CPPC is not supported */
+ if (!amd_cppc_supported())
+ return -EOPNOTSUPP;
+
+ /* show warning message when BIOS broken or ACPI disabled */
if (!acpi_cpc_valid()) {
pr_warn_once("the _CPC object is not present in SBIOS or ACPI disabled\n");
return -ENODEV;
@@ -1731,35 +1886,43 @@ static int __init amd_pstate_init(void)
/* check if this machine need CPPC quirks */
dmi_check_system(amd_pstate_quirks_table);
- switch (cppc_state) {
- case AMD_PSTATE_UNDEFINED:
+ /*
+ * determine the driver mode from the command line or kernel config.
+ * If no command line input is provided, cppc_state will be AMD_PSTATE_UNDEFINED.
+ * command line options will override the kernel config settings.
+ */
+
+ if (cppc_state == AMD_PSTATE_UNDEFINED) {
/* Disable on the following configs by default:
* 1. Undefined platforms
* 2. Server platforms
- * 3. Shared memory designs
*/
if (amd_pstate_acpi_pm_profile_undefined() ||
- amd_pstate_acpi_pm_profile_server() ||
- !boot_cpu_has(X86_FEATURE_CPPC)) {
+ amd_pstate_acpi_pm_profile_server()) {
pr_info("driver load is disabled, boot with specific mode to enable this\n");
return -ENODEV;
}
- ret = amd_pstate_set_driver(CONFIG_X86_AMD_PSTATE_DEFAULT_MODE);
- if (ret)
- return ret;
- break;
+ /* get driver mode from kernel config option [1:4] */
+ cppc_state = CONFIG_X86_AMD_PSTATE_DEFAULT_MODE;
+ }
+
+ switch (cppc_state) {
case AMD_PSTATE_DISABLE:
+ pr_info("driver load is disabled, boot with specific mode to enable this\n");
return -ENODEV;
case AMD_PSTATE_PASSIVE:
case AMD_PSTATE_ACTIVE:
case AMD_PSTATE_GUIDED:
+ ret = amd_pstate_set_driver(cppc_state);
+ if (ret)
+ return ret;
break;
default:
return -EINVAL;
}
/* capability check */
- if (boot_cpu_has(X86_FEATURE_CPPC)) {
+ if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
pr_debug("AMD CPPC MSR based functionality is supported\n");
if (cppc_state != AMD_PSTATE_ACTIVE)
current_pstate_driver->adjust_perf = amd_pstate_adjust_perf;
@@ -1773,13 +1936,15 @@ static int __init amd_pstate_init(void)
/* enable amd pstate feature */
ret = amd_pstate_enable(true);
if (ret) {
- pr_err("failed to enable with return %d\n", ret);
+ pr_err("failed to enable driver mode(%d)\n", cppc_state);
return ret;
}
ret = cpufreq_register_driver(current_pstate_driver);
- if (ret)
+ if (ret) {
pr_err("failed to register with return %d\n", ret);
+ goto disable_driver;
+ }
dev_root = bus_get_dev_root(&cpu_subsys);
if (dev_root) {
@@ -1795,6 +1960,8 @@ static int __init amd_pstate_init(void)
global_attr_free:
cpufreq_unregister_driver(current_pstate_driver);
+disable_driver:
+ amd_pstate_enable(false);
return ret;
}
device_initcall(amd_pstate_init);
diff --git a/include/linux/amd-pstate.h b/drivers/cpufreq/amd-pstate.h
index d58fc022ec46..cc8bb2bc325a 100644
--- a/include/linux/amd-pstate.h
+++ b/drivers/cpufreq/amd-pstate.h
@@ -1,7 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * linux/include/linux/amd-pstate.h
- *
* Copyright (C) 2022 Advanced Micro Devices, Inc.
*
* Author: Meng Li <li.meng@amd.com>
@@ -12,11 +10,6 @@
#include <linux/pm_qos.h>
-#define AMD_CPPC_EPP_PERFORMANCE 0x00
-#define AMD_CPPC_EPP_BALANCE_PERFORMANCE 0x80
-#define AMD_CPPC_EPP_BALANCE_POWERSAVE 0xBF
-#define AMD_CPPC_EPP_POWERSAVE 0xFF
-
/*********************************************************************
* AMD P-state INTERFACE *
*********************************************************************/
@@ -106,32 +99,8 @@ struct amd_cpudata {
u32 policy;
u64 cppc_cap1_cached;
bool suspended;
-};
-
-/*
- * enum amd_pstate_mode - driver working mode of amd pstate
- */
-enum amd_pstate_mode {
- AMD_PSTATE_UNDEFINED = 0,
- AMD_PSTATE_DISABLE,
- AMD_PSTATE_PASSIVE,
- AMD_PSTATE_ACTIVE,
- AMD_PSTATE_GUIDED,
- AMD_PSTATE_MAX,
-};
-
-static const char * const amd_pstate_mode_string[] = {
- [AMD_PSTATE_UNDEFINED] = "undefined",
- [AMD_PSTATE_DISABLE] = "disable",
- [AMD_PSTATE_PASSIVE] = "passive",
- [AMD_PSTATE_ACTIVE] = "active",
- [AMD_PSTATE_GUIDED] = "guided",
- NULL,
-};
-
-struct quirk_entry {
- u32 nominal_freq;
- u32 lowest_freq;
+ s16 epp_default;
+ bool boost_state;
};
#endif /* _LINUX_AMD_PSTATE_H */
diff --git a/drivers/cpufreq/apple-soc-cpufreq.c b/drivers/cpufreq/apple-soc-cpufreq.c
index 021f423705e1..af34c22fa273 100644
--- a/drivers/cpufreq/apple-soc-cpufreq.c
+++ b/drivers/cpufreq/apple-soc-cpufreq.c
@@ -305,7 +305,7 @@ out_iounmap:
return ret;
}
-static int apple_soc_cpufreq_exit(struct cpufreq_policy *policy)
+static void apple_soc_cpufreq_exit(struct cpufreq_policy *policy)
{
struct apple_cpu_priv *priv = policy->driver_data;
@@ -313,8 +313,6 @@ static int apple_soc_cpufreq_exit(struct cpufreq_policy *policy)
dev_pm_opp_remove_all_dynamic(priv->cpu_dev);
iounmap(priv->reg_base);
kfree(priv);
-
- return 0;
}
static struct cpufreq_driver apple_soc_cpufreq_driver = {
diff --git a/drivers/cpufreq/bmips-cpufreq.c b/drivers/cpufreq/bmips-cpufreq.c
index 39221a9a187a..17a4c174553d 100644
--- a/drivers/cpufreq/bmips-cpufreq.c
+++ b/drivers/cpufreq/bmips-cpufreq.c
@@ -121,11 +121,9 @@ static int bmips_cpufreq_target_index(struct cpufreq_policy *policy,
return 0;
}
-static int bmips_cpufreq_exit(struct cpufreq_policy *policy)
+static void bmips_cpufreq_exit(struct cpufreq_policy *policy)
{
kfree(policy->freq_table);
-
- return 0;
}
static int bmips_cpufreq_init(struct cpufreq_policy *policy)
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
index 15f1d41920a3..bafa32dd375d 100644
--- a/drivers/cpufreq/cppc_cpufreq.c
+++ b/drivers/cpufreq/cppc_cpufreq.c
@@ -291,15 +291,10 @@ static int cppc_cpufreq_set_target(struct cpufreq_policy *policy,
struct cppc_cpudata *cpu_data = policy->driver_data;
unsigned int cpu = policy->cpu;
struct cpufreq_freqs freqs;
- u32 desired_perf;
int ret = 0;
- desired_perf = cppc_khz_to_perf(&cpu_data->perf_caps, target_freq);
- /* Return if it is exactly the same perf */
- if (desired_perf == cpu_data->perf_ctrls.desired_perf)
- return ret;
-
- cpu_data->perf_ctrls.desired_perf = desired_perf;
+ cpu_data->perf_ctrls.desired_perf =
+ cppc_khz_to_perf(&cpu_data->perf_caps, target_freq);
freqs.old = policy->cur;
freqs.new = target_freq;
@@ -688,7 +683,7 @@ out:
return ret;
}
-static int cppc_cpufreq_cpu_exit(struct cpufreq_policy *policy)
+static void cppc_cpufreq_cpu_exit(struct cpufreq_policy *policy)
{
struct cppc_cpudata *cpu_data = policy->driver_data;
struct cppc_perf_caps *caps = &cpu_data->perf_caps;
@@ -705,7 +700,6 @@ static int cppc_cpufreq_cpu_exit(struct cpufreq_policy *policy)
caps->lowest_perf, cpu, ret);
cppc_cpufreq_put_cpu_data(policy);
- return 0;
}
static inline u64 get_delta(u64 t1, u64 t0)
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index c74dd1e01e0d..cac379ba006d 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -233,4 +233,5 @@ create_pdev:
sizeof(struct cpufreq_dt_platform_data)));
}
core_initcall(cpufreq_dt_platdev_init);
+MODULE_DESCRIPTION("Generic DT based cpufreq platdev driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index 907e22632fda..6532c4d71338 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -157,10 +157,9 @@ static int cpufreq_offline(struct cpufreq_policy *policy)
return 0;
}
-static int cpufreq_exit(struct cpufreq_policy *policy)
+static void cpufreq_exit(struct cpufreq_policy *policy)
{
clk_put(policy->clk);
- return 0;
}
static struct cpufreq_driver dt_cpufreq_driver = {
diff --git a/drivers/cpufreq/cpufreq-nforce2.c b/drivers/cpufreq/cpufreq-nforce2.c
index f7a7bcf6f52e..fedad1081973 100644
--- a/drivers/cpufreq/cpufreq-nforce2.c
+++ b/drivers/cpufreq/cpufreq-nforce2.c
@@ -359,11 +359,6 @@ static int nforce2_cpu_init(struct cpufreq_policy *policy)
return 0;
}
-static int nforce2_cpu_exit(struct cpufreq_policy *policy)
-{
- return 0;
-}
-
static struct cpufreq_driver nforce2_driver = {
.name = "nforce2",
.flags = CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING,
@@ -371,7 +366,6 @@ static struct cpufreq_driver nforce2_driver = {
.target = nforce2_target,
.get = nforce2_get,
.init = nforce2_cpu_init,
- .exit = nforce2_cpu_exit,
};
#ifdef MODULE
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index a45aac17c20f..04fc786dd2c0 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -608,16 +608,15 @@ EXPORT_SYMBOL_GPL(cpufreq_policy_transition_delay_us);
static ssize_t show_boost(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
- return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
+ return sysfs_emit(buf, "%d\n", cpufreq_driver->boost_enabled);
}
static ssize_t store_boost(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t count)
{
- int ret, enable;
+ bool enable;
- ret = sscanf(buf, "%d", &enable);
- if (ret != 1 || enable < 0 || enable > 1)
+ if (kstrtobool(buf, &enable))
return -EINVAL;
if (cpufreq_boost_trigger_state(enable)) {
@@ -641,10 +640,10 @@ static ssize_t show_local_boost(struct cpufreq_policy *policy, char *buf)
static ssize_t store_local_boost(struct cpufreq_policy *policy,
const char *buf, size_t count)
{
- int ret, enable;
+ int ret;
+ bool enable;
- ret = kstrtoint(buf, 10, &enable);
- if (ret || enable < 0 || enable > 1)
+ if (kstrtobool(buf, &enable))
return -EINVAL;
if (!cpufreq_driver->boost_enabled)
@@ -739,7 +738,7 @@ static struct cpufreq_governor *cpufreq_parse_governor(char *str_governor)
static ssize_t show_##file_name \
(struct cpufreq_policy *policy, char *buf) \
{ \
- return sprintf(buf, "%u\n", policy->object); \
+ return sysfs_emit(buf, "%u\n", policy->object); \
}
show_one(cpuinfo_min_freq, cpuinfo.min_freq);
@@ -760,11 +759,11 @@ static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
freq = arch_freq_get_on_cpu(policy->cpu);
if (freq)
- ret = sprintf(buf, "%u\n", freq);
+ ret = sysfs_emit(buf, "%u\n", freq);
else if (cpufreq_driver->setpolicy && cpufreq_driver->get)
- ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu));
+ ret = sysfs_emit(buf, "%u\n", cpufreq_driver->get(policy->cpu));
else
- ret = sprintf(buf, "%u\n", policy->cur);
+ ret = sysfs_emit(buf, "%u\n", policy->cur);
return ret;
}
@@ -798,9 +797,9 @@ static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
unsigned int cur_freq = __cpufreq_get(policy);
if (cur_freq)
- return sprintf(buf, "%u\n", cur_freq);
+ return sysfs_emit(buf, "%u\n", cur_freq);
- return sprintf(buf, "<unknown>\n");
+ return sysfs_emit(buf, "<unknown>\n");
}
/*
@@ -809,12 +808,11 @@ static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
{
if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
- return sprintf(buf, "powersave\n");
+ return sysfs_emit(buf, "powersave\n");
else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
- return sprintf(buf, "performance\n");
+ return sysfs_emit(buf, "performance\n");
else if (policy->governor)
- return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
- policy->governor->name);
+ return sysfs_emit(buf, "%s\n", policy->governor->name);
return -EINVAL;
}
@@ -873,7 +871,7 @@ static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
struct cpufreq_governor *t;
if (!has_target()) {
- i += sprintf(buf, "performance powersave");
+ i += sysfs_emit(buf, "performance powersave");
goto out;
}
@@ -882,11 +880,11 @@ static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
- (CPUFREQ_NAME_LEN + 2)))
break;
- i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
+ i += sysfs_emit_at(buf, i, "%s ", t->name);
}
mutex_unlock(&cpufreq_governor_mutex);
out:
- i += sprintf(&buf[i], "\n");
+ i += sysfs_emit_at(buf, i, "\n");
return i;
}
@@ -896,7 +894,7 @@ ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
unsigned int cpu;
for_each_cpu(cpu, mask) {
- i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u ", cpu);
+ i += sysfs_emit_at(buf, i, "%u ", cpu);
if (i >= (PAGE_SIZE - 5))
break;
}
@@ -904,7 +902,7 @@ ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
/* Remove the extra space at the end */
i--;
- i += sprintf(&buf[i], "\n");
+ i += sysfs_emit_at(buf, i, "\n");
return i;
}
EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
@@ -947,7 +945,7 @@ static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
{
if (!policy->governor || !policy->governor->show_setspeed)
- return sprintf(buf, "<unsupported>\n");
+ return sysfs_emit(buf, "<unsupported>\n");
return policy->governor->show_setspeed(policy, buf);
}
@@ -961,8 +959,8 @@ static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
int ret;
ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
if (!ret)
- return sprintf(buf, "%u\n", limit);
- return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
+ return sysfs_emit(buf, "%u\n", limit);
+ return sysfs_emit(buf, "%u\n", policy->cpuinfo.max_freq);
}
cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
@@ -1431,7 +1429,8 @@ static int cpufreq_online(unsigned int cpu)
}
/* Let the per-policy boost flag mirror the cpufreq_driver boost during init */
- policy->boost_enabled = cpufreq_boost_enabled() && policy_has_boost_freq(policy);
+ if (cpufreq_boost_enabled() && policy_has_boost_freq(policy))
+ policy->boost_enabled = true;
/*
* The initialization has succeeded and the policy is online.
@@ -2875,7 +2874,7 @@ int cpufreq_enable_boost_support(void)
}
EXPORT_SYMBOL_GPL(cpufreq_enable_boost_support);
-int cpufreq_boost_enabled(void)
+bool cpufreq_boost_enabled(void)
{
return cpufreq_driver->boost_enabled;
}
diff --git a/drivers/cpufreq/e_powersaver.c b/drivers/cpufreq/e_powersaver.c
index ab93bce8ae77..6e958b09e1b5 100644
--- a/drivers/cpufreq/e_powersaver.c
+++ b/drivers/cpufreq/e_powersaver.c
@@ -360,14 +360,13 @@ static int eps_cpu_init(struct cpufreq_policy *policy)
return 0;
}
-static int eps_cpu_exit(struct cpufreq_policy *policy)
+static void eps_cpu_exit(struct cpufreq_policy *policy)
{
unsigned int cpu = policy->cpu;
/* Bye */
kfree(eps_cpu[cpu]);
eps_cpu[cpu] = NULL;
- return 0;
}
static struct cpufreq_driver eps_driver = {
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 4b986c044741..392a8000b238 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -300,6 +300,7 @@ static struct cpufreq_driver *intel_pstate_driver __read_mostly;
#define HYBRID_SCALING_FACTOR 78741
#define HYBRID_SCALING_FACTOR_MTL 80000
+#define HYBRID_SCALING_FACTOR_LNL 86957
static int hybrid_scaling_factor = HYBRID_SCALING_FACTOR;
@@ -355,15 +356,14 @@ static void intel_pstate_set_itmt_prio(int cpu)
int ret;
ret = cppc_get_perf_caps(cpu, &cppc_perf);
- if (ret)
- return;
-
/*
- * On some systems with overclocking enabled, CPPC.highest_perf is hardcoded to 0xff.
- * In this case we can't use CPPC.highest_perf to enable ITMT.
- * In this case we can look at MSR_HWP_CAPABILITIES bits [8:0] to decide.
+ * If CPPC is not available, fall back to MSR_HWP_CAPABILITIES bits [8:0].
+ *
+ * Also, on some systems with overclocking enabled, CPPC.highest_perf is
+ * hardcoded to 0xff, so CPPC.highest_perf cannot be used to enable ITMT.
+ * Fall back to MSR_HWP_CAPABILITIES then too.
*/
- if (cppc_perf.highest_perf == CPPC_MAX_PERF)
+ if (ret || cppc_perf.highest_perf == CPPC_MAX_PERF)
cppc_perf.highest_perf = HWP_HIGHEST_PERF(READ_ONCE(all_cpu_data[cpu]->hwp_cap_cached));
/*
@@ -1153,7 +1153,8 @@ static void intel_pstate_update_policies(void)
static void __intel_pstate_update_max_freq(struct cpudata *cpudata,
struct cpufreq_policy *policy)
{
- intel_pstate_get_hwp_cap(cpudata);
+ if (hwp_active)
+ intel_pstate_get_hwp_cap(cpudata);
policy->cpuinfo.max_freq = READ_ONCE(global.no_turbo) ?
cpudata->pstate.max_freq : cpudata->pstate.turbo_freq;
@@ -1301,12 +1302,17 @@ static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b,
no_turbo = !!clamp_t(int, input, 0, 1);
- if (no_turbo == global.no_turbo)
- goto unlock_driver;
-
- if (global.turbo_disabled) {
- pr_notice_once("Turbo disabled by BIOS or unavailable on processor\n");
+ WRITE_ONCE(global.turbo_disabled, turbo_is_disabled());
+ if (global.turbo_disabled && !no_turbo) {
+ pr_notice("Turbo disabled by BIOS or unavailable on processor\n");
count = -EPERM;
+ if (global.no_turbo)
+ goto unlock_driver;
+ else
+ no_turbo = 1;
+ }
+
+ if (no_turbo == global.no_turbo) {
goto unlock_driver;
}
@@ -1620,17 +1626,24 @@ static void intel_pstate_notify_work(struct work_struct *work)
static DEFINE_SPINLOCK(hwp_notify_lock);
static cpumask_t hwp_intr_enable_mask;
+#define HWP_GUARANTEED_PERF_CHANGE_STATUS BIT(0)
+#define HWP_HIGHEST_PERF_CHANGE_STATUS BIT(3)
+
void notify_hwp_interrupt(void)
{
unsigned int this_cpu = smp_processor_id();
+ u64 value, status_mask;
unsigned long flags;
- u64 value;
- if (!hwp_active || !boot_cpu_has(X86_FEATURE_HWP_NOTIFY))
+ if (!hwp_active || !cpu_feature_enabled(X86_FEATURE_HWP_NOTIFY))
return;
+ status_mask = HWP_GUARANTEED_PERF_CHANGE_STATUS;
+ if (cpu_feature_enabled(X86_FEATURE_HWP_HIGHEST_PERF_CHANGE))
+ status_mask |= HWP_HIGHEST_PERF_CHANGE_STATUS;
+
rdmsrl_safe(MSR_HWP_STATUS, &value);
- if (!(value & 0x01))
+ if (!(value & status_mask))
return;
spin_lock_irqsave(&hwp_notify_lock, flags);
@@ -1654,7 +1667,7 @@ static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata)
{
bool cancel_work;
- if (!boot_cpu_has(X86_FEATURE_HWP_NOTIFY))
+ if (!cpu_feature_enabled(X86_FEATURE_HWP_NOTIFY))
return;
/* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */
@@ -1668,17 +1681,25 @@ static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata)
cancel_delayed_work_sync(&cpudata->hwp_notify_work);
}
+#define HWP_GUARANTEED_PERF_CHANGE_REQ BIT(0)
+#define HWP_HIGHEST_PERF_CHANGE_REQ BIT(2)
+
static void intel_pstate_enable_hwp_interrupt(struct cpudata *cpudata)
{
- /* Enable HWP notification interrupt for guaranteed performance change */
+ /* Enable HWP notification interrupt for performance change */
if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) {
+ u64 interrupt_mask = HWP_GUARANTEED_PERF_CHANGE_REQ;
+
spin_lock_irq(&hwp_notify_lock);
INIT_DELAYED_WORK(&cpudata->hwp_notify_work, intel_pstate_notify_work);
cpumask_set_cpu(cpudata->cpu, &hwp_intr_enable_mask);
spin_unlock_irq(&hwp_notify_lock);
+ if (cpu_feature_enabled(X86_FEATURE_HWP_HIGHEST_PERF_CHANGE))
+ interrupt_mask |= HWP_HIGHEST_PERF_CHANGE_REQ;
+
/* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */
- wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x01);
+ wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, interrupt_mask);
wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_STATUS, 0);
}
}
@@ -1761,7 +1782,7 @@ static u64 atom_get_val(struct cpudata *cpudata, int pstate)
u32 vid;
val = (u64)pstate << 8;
- if (READ_ONCE(global.no_turbo) && !global.turbo_disabled)
+ if (READ_ONCE(global.no_turbo) && !READ_ONCE(global.turbo_disabled))
val |= (u64)1 << 32;
vid_fp = cpudata->vid.min + mul_fp(
@@ -1926,7 +1947,7 @@ static u64 core_get_val(struct cpudata *cpudata, int pstate)
u64 val;
val = (u64)pstate << 8;
- if (READ_ONCE(global.no_turbo) && !global.turbo_disabled)
+ if (READ_ONCE(global.no_turbo) && !READ_ONCE(global.turbo_disabled))
val |= (u64)1 << 32;
return val;
@@ -2362,54 +2383,54 @@ static const struct pstate_funcs knl_funcs = {
.get_val = core_get_val,
};
-#define X86_MATCH(model, policy) \
- X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, INTEL_FAM6_##model, \
- X86_FEATURE_APERFMPERF, &policy)
+#define X86_MATCH(vfm, policy) \
+ X86_MATCH_VFM_FEATURE(vfm, X86_FEATURE_APERFMPERF, &policy)
static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
- X86_MATCH(SANDYBRIDGE, core_funcs),
- X86_MATCH(SANDYBRIDGE_X, core_funcs),
- X86_MATCH(ATOM_SILVERMONT, silvermont_funcs),
- X86_MATCH(IVYBRIDGE, core_funcs),
- X86_MATCH(HASWELL, core_funcs),
- X86_MATCH(BROADWELL, core_funcs),
- X86_MATCH(IVYBRIDGE_X, core_funcs),
- X86_MATCH(HASWELL_X, core_funcs),
- X86_MATCH(HASWELL_L, core_funcs),
- X86_MATCH(HASWELL_G, core_funcs),
- X86_MATCH(BROADWELL_G, core_funcs),
- X86_MATCH(ATOM_AIRMONT, airmont_funcs),
- X86_MATCH(SKYLAKE_L, core_funcs),
- X86_MATCH(BROADWELL_X, core_funcs),
- X86_MATCH(SKYLAKE, core_funcs),
- X86_MATCH(BROADWELL_D, core_funcs),
- X86_MATCH(XEON_PHI_KNL, knl_funcs),
- X86_MATCH(XEON_PHI_KNM, knl_funcs),
- X86_MATCH(ATOM_GOLDMONT, core_funcs),
- X86_MATCH(ATOM_GOLDMONT_PLUS, core_funcs),
- X86_MATCH(SKYLAKE_X, core_funcs),
- X86_MATCH(COMETLAKE, core_funcs),
- X86_MATCH(ICELAKE_X, core_funcs),
- X86_MATCH(TIGERLAKE, core_funcs),
- X86_MATCH(SAPPHIRERAPIDS_X, core_funcs),
- X86_MATCH(EMERALDRAPIDS_X, core_funcs),
+ X86_MATCH(INTEL_SANDYBRIDGE, core_funcs),
+ X86_MATCH(INTEL_SANDYBRIDGE_X, core_funcs),
+ X86_MATCH(INTEL_ATOM_SILVERMONT, silvermont_funcs),
+ X86_MATCH(INTEL_IVYBRIDGE, core_funcs),
+ X86_MATCH(INTEL_HASWELL, core_funcs),
+ X86_MATCH(INTEL_BROADWELL, core_funcs),
+ X86_MATCH(INTEL_IVYBRIDGE_X, core_funcs),
+ X86_MATCH(INTEL_HASWELL_X, core_funcs),
+ X86_MATCH(INTEL_HASWELL_L, core_funcs),
+ X86_MATCH(INTEL_HASWELL_G, core_funcs),
+ X86_MATCH(INTEL_BROADWELL_G, core_funcs),
+ X86_MATCH(INTEL_ATOM_AIRMONT, airmont_funcs),
+ X86_MATCH(INTEL_SKYLAKE_L, core_funcs),
+ X86_MATCH(INTEL_BROADWELL_X, core_funcs),
+ X86_MATCH(INTEL_SKYLAKE, core_funcs),
+ X86_MATCH(INTEL_BROADWELL_D, core_funcs),
+ X86_MATCH(INTEL_XEON_PHI_KNL, knl_funcs),
+ X86_MATCH(INTEL_XEON_PHI_KNM, knl_funcs),
+ X86_MATCH(INTEL_ATOM_GOLDMONT, core_funcs),
+ X86_MATCH(INTEL_ATOM_GOLDMONT_PLUS, core_funcs),
+ X86_MATCH(INTEL_SKYLAKE_X, core_funcs),
+ X86_MATCH(INTEL_COMETLAKE, core_funcs),
+ X86_MATCH(INTEL_ICELAKE_X, core_funcs),
+ X86_MATCH(INTEL_TIGERLAKE, core_funcs),
+ X86_MATCH(INTEL_SAPPHIRERAPIDS_X, core_funcs),
+ X86_MATCH(INTEL_EMERALDRAPIDS_X, core_funcs),
{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
#ifdef CONFIG_ACPI
static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = {
- X86_MATCH(BROADWELL_D, core_funcs),
- X86_MATCH(BROADWELL_X, core_funcs),
- X86_MATCH(SKYLAKE_X, core_funcs),
- X86_MATCH(ICELAKE_X, core_funcs),
- X86_MATCH(SAPPHIRERAPIDS_X, core_funcs),
+ X86_MATCH(INTEL_BROADWELL_D, core_funcs),
+ X86_MATCH(INTEL_BROADWELL_X, core_funcs),
+ X86_MATCH(INTEL_SKYLAKE_X, core_funcs),
+ X86_MATCH(INTEL_ICELAKE_X, core_funcs),
+ X86_MATCH(INTEL_SAPPHIRERAPIDS_X, core_funcs),
+ X86_MATCH(INTEL_EMERALDRAPIDS_X, core_funcs),
{}
};
#endif
static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[] = {
- X86_MATCH(KABYLAKE, core_funcs),
+ X86_MATCH(INTEL_KABYLAKE, core_funcs),
{}
};
@@ -2694,13 +2715,11 @@ static int intel_pstate_cpu_offline(struct cpufreq_policy *policy)
return intel_cpufreq_cpu_offline(policy);
}
-static int intel_pstate_cpu_exit(struct cpufreq_policy *policy)
+static void intel_pstate_cpu_exit(struct cpufreq_policy *policy)
{
pr_debug("CPU %d exiting\n", policy->cpu);
policy->fast_switch_possible = false;
-
- return 0;
}
static int __intel_pstate_cpu_init(struct cpufreq_policy *policy)
@@ -3031,7 +3050,7 @@ pstate_exit:
return ret;
}
-static int intel_cpufreq_cpu_exit(struct cpufreq_policy *policy)
+static void intel_cpufreq_cpu_exit(struct cpufreq_policy *policy)
{
struct freq_qos_request *req;
@@ -3041,7 +3060,7 @@ static int intel_cpufreq_cpu_exit(struct cpufreq_policy *policy)
freq_qos_remove_request(req);
kfree(req);
- return intel_pstate_cpu_exit(policy);
+ intel_pstate_cpu_exit(policy);
}
static int intel_cpufreq_suspend(struct cpufreq_policy *policy)
@@ -3345,14 +3364,13 @@ static inline void intel_pstate_request_control_from_smm(void) {}
#define INTEL_PSTATE_HWP_BROADWELL 0x01
-#define X86_MATCH_HWP(model, hwp_mode) \
- X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, INTEL_FAM6_##model, \
- X86_FEATURE_HWP, hwp_mode)
+#define X86_MATCH_HWP(vfm, hwp_mode) \
+ X86_MATCH_VFM_FEATURE(vfm, X86_FEATURE_HWP, hwp_mode)
static const struct x86_cpu_id hwp_support_ids[] __initconst = {
- X86_MATCH_HWP(BROADWELL_X, INTEL_PSTATE_HWP_BROADWELL),
- X86_MATCH_HWP(BROADWELL_D, INTEL_PSTATE_HWP_BROADWELL),
- X86_MATCH_HWP(ANY, 0),
+ X86_MATCH_HWP(INTEL_BROADWELL_X, INTEL_PSTATE_HWP_BROADWELL),
+ X86_MATCH_HWP(INTEL_BROADWELL_D, INTEL_PSTATE_HWP_BROADWELL),
+ X86_MATCH_HWP(INTEL_ANY, 0),
{}
};
@@ -3385,15 +3403,19 @@ static const struct x86_cpu_id intel_epp_default[] = {
* which can result in one core turbo frequency for
* AlderLake Mobile CPUs.
*/
- X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, HWP_SET_DEF_BALANCE_PERF_EPP(102)),
- X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, HWP_SET_DEF_BALANCE_PERF_EPP(32)),
- X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L, HWP_SET_EPP_VALUES(HWP_EPP_POWERSAVE,
- HWP_EPP_BALANCE_POWERSAVE, 115, 16)),
+ X86_MATCH_VFM(INTEL_ALDERLAKE_L, HWP_SET_DEF_BALANCE_PERF_EPP(102)),
+ X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, HWP_SET_DEF_BALANCE_PERF_EPP(32)),
+ X86_MATCH_VFM(INTEL_METEORLAKE_L, HWP_SET_EPP_VALUES(HWP_EPP_POWERSAVE,
+ 179, 64, 16)),
+ X86_MATCH_VFM(INTEL_ARROWLAKE, HWP_SET_EPP_VALUES(HWP_EPP_POWERSAVE,
+ 179, 64, 16)),
{}
};
static const struct x86_cpu_id intel_hybrid_scaling_factor[] = {
- X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L, HYBRID_SCALING_FACTOR_MTL),
+ X86_MATCH_VFM(INTEL_METEORLAKE_L, HYBRID_SCALING_FACTOR_MTL),
+ X86_MATCH_VFM(INTEL_ARROWLAKE, HYBRID_SCALING_FACTOR_MTL),
+ X86_MATCH_VFM(INTEL_LUNARLAKE_M, HYBRID_SCALING_FACTOR_LNL),
{}
};
diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c
index 4c57c6725c13..bd6fe8638d39 100644
--- a/drivers/cpufreq/longhaul.c
+++ b/drivers/cpufreq/longhaul.c
@@ -236,8 +236,9 @@ static void do_powersaver(int cx_address, unsigned int mults_index,
}
/**
- * longhaul_set_cpu_frequency()
- * @mults_index : bitpattern of the new multiplier.
+ * longhaul_setstate()
+ * @policy: cpufreq_policy structure containing the current policy.
+ * @table_index: index of the frequency within the cpufreq_frequency_table.
*
* Sets a new clock ratio.
*/
diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c
index afc59b292153..6a8e97896d38 100644
--- a/drivers/cpufreq/loongson2_cpufreq.c
+++ b/drivers/cpufreq/loongson2_cpufreq.c
@@ -85,18 +85,12 @@ static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy)
return 0;
}
-static int loongson2_cpufreq_exit(struct cpufreq_policy *policy)
-{
- return 0;
-}
-
static struct cpufreq_driver loongson2_cpufreq_driver = {
.name = "loongson2",
.init = loongson2_cpufreq_cpu_init,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = loongson2_cpufreq_target,
.get = cpufreq_generic_get,
- .exit = loongson2_cpufreq_exit,
.attr = cpufreq_generic_attr,
};
diff --git a/drivers/cpufreq/loongson3_cpufreq.c b/drivers/cpufreq/loongson3_cpufreq.c
new file mode 100644
index 000000000000..5f79b6de127c
--- /dev/null
+++ b/drivers/cpufreq/loongson3_cpufreq.c
@@ -0,0 +1,395 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * CPUFreq driver for the Loongson-3 processors.
+ *
+ * All revisions of Loongson-3 processor support cpu_has_scalefreq feature.
+ *
+ * Author: Huacai Chen <chenhuacai@loongson.cn>
+ * Copyright (C) 2024 Loongson Technology Corporation Limited
+ */
+#include <linux/cpufreq.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/units.h>
+
+#include <asm/idle.h>
+#include <asm/loongarch.h>
+#include <asm/loongson.h>
+
+/* Message */
+union smc_message {
+ u32 value;
+ struct {
+ u32 id : 4;
+ u32 info : 4;
+ u32 val : 16;
+ u32 cmd : 6;
+ u32 extra : 1;
+ u32 complete : 1;
+ };
+};
+
+/* Command return values */
+#define CMD_OK 0 /* No error */
+#define CMD_ERROR 1 /* Regular error */
+#define CMD_NOCMD 2 /* Command does not support */
+#define CMD_INVAL 3 /* Invalid Parameter */
+
+/* Version commands */
+/*
+ * CMD_GET_VERSION - Get interface version
+ * Input: none
+ * Output: version
+ */
+#define CMD_GET_VERSION 0x1
+
+/* Feature commands */
+/*
+ * CMD_GET_FEATURE - Get feature state
+ * Input: feature ID
+ * Output: feature flag
+ */
+#define CMD_GET_FEATURE 0x2
+
+/*
+ * CMD_SET_FEATURE - Set feature state
+ * Input: feature ID, feature flag
+ * output: none
+ */
+#define CMD_SET_FEATURE 0x3
+
+/* Feature IDs */
+#define FEATURE_SENSOR 0
+#define FEATURE_FAN 1
+#define FEATURE_DVFS 2
+
+/* Sensor feature flags */
+#define FEATURE_SENSOR_ENABLE BIT(0)
+#define FEATURE_SENSOR_SAMPLE BIT(1)
+
+/* Fan feature flags */
+#define FEATURE_FAN_ENABLE BIT(0)
+#define FEATURE_FAN_AUTO BIT(1)
+
+/* DVFS feature flags */
+#define FEATURE_DVFS_ENABLE BIT(0)
+#define FEATURE_DVFS_BOOST BIT(1)
+#define FEATURE_DVFS_AUTO BIT(2)
+#define FEATURE_DVFS_SINGLE_BOOST BIT(3)
+
+/* Sensor commands */
+/*
+ * CMD_GET_SENSOR_NUM - Get number of sensors
+ * Input: none
+ * Output: number
+ */
+#define CMD_GET_SENSOR_NUM 0x4
+
+/*
+ * CMD_GET_SENSOR_STATUS - Get sensor status
+ * Input: sensor ID, type
+ * Output: sensor status
+ */
+#define CMD_GET_SENSOR_STATUS 0x5
+
+/* Sensor types */
+#define SENSOR_INFO_TYPE 0
+#define SENSOR_INFO_TYPE_TEMP 1
+
+/* Fan commands */
+/*
+ * CMD_GET_FAN_NUM - Get number of fans
+ * Input: none
+ * Output: number
+ */
+#define CMD_GET_FAN_NUM 0x6
+
+/*
+ * CMD_GET_FAN_INFO - Get fan status
+ * Input: fan ID, type
+ * Output: fan info
+ */
+#define CMD_GET_FAN_INFO 0x7
+
+/*
+ * CMD_SET_FAN_INFO - Set fan status
+ * Input: fan ID, type, value
+ * Output: none
+ */
+#define CMD_SET_FAN_INFO 0x8
+
+/* Fan types */
+#define FAN_INFO_TYPE_LEVEL 0
+
+/* DVFS commands */
+/*
+ * CMD_GET_FREQ_LEVEL_NUM - Get number of freq levels
+ * Input: CPU ID
+ * Output: number
+ */
+#define CMD_GET_FREQ_LEVEL_NUM 0x9
+
+/*
+ * CMD_GET_FREQ_BOOST_LEVEL - Get the first boost level
+ * Input: CPU ID
+ * Output: number
+ */
+#define CMD_GET_FREQ_BOOST_LEVEL 0x10
+
+/*
+ * CMD_GET_FREQ_LEVEL_INFO - Get freq level info
+ * Input: CPU ID, level ID
+ * Output: level info
+ */
+#define CMD_GET_FREQ_LEVEL_INFO 0x11
+
+/*
+ * CMD_GET_FREQ_INFO - Get freq info
+ * Input: CPU ID, type
+ * Output: freq info
+ */
+#define CMD_GET_FREQ_INFO 0x12
+
+/*
+ * CMD_SET_FREQ_INFO - Set freq info
+ * Input: CPU ID, type, value
+ * Output: none
+ */
+#define CMD_SET_FREQ_INFO 0x13
+
+/* Freq types */
+#define FREQ_INFO_TYPE_FREQ 0
+#define FREQ_INFO_TYPE_LEVEL 1
+
+#define FREQ_MAX_LEVEL 16
+
+struct loongson3_freq_data {
+ unsigned int def_freq_level;
+ struct cpufreq_frequency_table table[];
+};
+
+static struct mutex cpufreq_mutex[MAX_PACKAGES];
+static struct cpufreq_driver loongson3_cpufreq_driver;
+static DEFINE_PER_CPU(struct loongson3_freq_data *, freq_data);
+
+static inline int do_service_request(u32 id, u32 info, u32 cmd, u32 val, u32 extra)
+{
+ int retries;
+ unsigned int cpu = smp_processor_id();
+ unsigned int package = cpu_data[cpu].package;
+ union smc_message msg, last;
+
+ mutex_lock(&cpufreq_mutex[package]);
+
+ last.value = iocsr_read32(LOONGARCH_IOCSR_SMCMBX);
+ if (!last.complete) {
+ mutex_unlock(&cpufreq_mutex[package]);
+ return -EPERM;
+ }
+
+ msg.id = id;
+ msg.info = info;
+ msg.cmd = cmd;
+ msg.val = val;
+ msg.extra = extra;
+ msg.complete = 0;
+
+ iocsr_write32(msg.value, LOONGARCH_IOCSR_SMCMBX);
+ iocsr_write32(iocsr_read32(LOONGARCH_IOCSR_MISC_FUNC) | IOCSR_MISC_FUNC_SOFT_INT,
+ LOONGARCH_IOCSR_MISC_FUNC);
+
+ for (retries = 0; retries < 10000; retries++) {
+ msg.value = iocsr_read32(LOONGARCH_IOCSR_SMCMBX);
+ if (msg.complete)
+ break;
+
+ usleep_range(8, 12);
+ }
+
+ if (!msg.complete || msg.cmd != CMD_OK) {
+ mutex_unlock(&cpufreq_mutex[package]);
+ return -EPERM;
+ }
+
+ mutex_unlock(&cpufreq_mutex[package]);
+
+ return msg.val;
+}
+
+static unsigned int loongson3_cpufreq_get(unsigned int cpu)
+{
+ int ret;
+
+ ret = do_service_request(cpu, FREQ_INFO_TYPE_FREQ, CMD_GET_FREQ_INFO, 0, 0);
+
+ return ret * KILO;
+}
+
+static int loongson3_cpufreq_target(struct cpufreq_policy *policy, unsigned int index)
+{
+ int ret;
+
+ ret = do_service_request(cpu_data[policy->cpu].core,
+ FREQ_INFO_TYPE_LEVEL, CMD_SET_FREQ_INFO, index, 0);
+
+ return (ret >= 0) ? 0 : ret;
+}
+
+static int configure_freq_table(int cpu)
+{
+ int i, ret, boost_level, max_level, freq_level;
+ struct platform_device *pdev = cpufreq_get_driver_data();
+ struct loongson3_freq_data *data;
+
+ if (per_cpu(freq_data, cpu))
+ return 0;
+
+ ret = do_service_request(cpu, 0, CMD_GET_FREQ_LEVEL_NUM, 0, 0);
+ if (ret < 0)
+ return ret;
+ max_level = ret;
+
+ ret = do_service_request(cpu, 0, CMD_GET_FREQ_BOOST_LEVEL, 0, 0);
+ if (ret < 0)
+ return ret;
+ boost_level = ret;
+
+ freq_level = min(max_level, FREQ_MAX_LEVEL);
+ data = devm_kzalloc(&pdev->dev, struct_size(data, table, freq_level + 1), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->def_freq_level = boost_level - 1;
+
+ for (i = 0; i < freq_level; i++) {
+ ret = do_service_request(cpu, FREQ_INFO_TYPE_FREQ, CMD_GET_FREQ_LEVEL_INFO, i, 0);
+ if (ret < 0) {
+ devm_kfree(&pdev->dev, data);
+ return ret;
+ }
+
+ data->table[i].frequency = ret * KILO;
+ data->table[i].flags = (i >= boost_level) ? CPUFREQ_BOOST_FREQ : 0;
+ }
+
+ data->table[freq_level].flags = 0;
+ data->table[freq_level].frequency = CPUFREQ_TABLE_END;
+
+ per_cpu(freq_data, cpu) = data;
+
+ return 0;
+}
+
+static int loongson3_cpufreq_cpu_init(struct cpufreq_policy *policy)
+{
+ int i, ret, cpu = policy->cpu;
+
+ ret = configure_freq_table(cpu);
+ if (ret < 0)
+ return ret;
+
+ policy->cpuinfo.transition_latency = 10000;
+ policy->freq_table = per_cpu(freq_data, cpu)->table;
+ policy->suspend_freq = policy->freq_table[per_cpu(freq_data, cpu)->def_freq_level].frequency;
+ cpumask_copy(policy->cpus, topology_sibling_cpumask(cpu));
+
+ for_each_cpu(i, policy->cpus) {
+ if (i != cpu)
+ per_cpu(freq_data, i) = per_cpu(freq_data, cpu);
+ }
+
+ if (policy_has_boost_freq(policy)) {
+ ret = cpufreq_enable_boost_support();
+ if (ret < 0) {
+ pr_warn("cpufreq: Failed to enable boost: %d\n", ret);
+ return ret;
+ }
+ loongson3_cpufreq_driver.boost_enabled = true;
+ }
+
+ return 0;
+}
+
+static void loongson3_cpufreq_cpu_exit(struct cpufreq_policy *policy)
+{
+ int cpu = policy->cpu;
+
+ loongson3_cpufreq_target(policy, per_cpu(freq_data, cpu)->def_freq_level);
+}
+
+static int loongson3_cpufreq_cpu_online(struct cpufreq_policy *policy)
+{
+ return 0;
+}
+
+static int loongson3_cpufreq_cpu_offline(struct cpufreq_policy *policy)
+{
+ return 0;
+}
+
+static struct cpufreq_driver loongson3_cpufreq_driver = {
+ .name = "loongson3",
+ .flags = CPUFREQ_CONST_LOOPS,
+ .init = loongson3_cpufreq_cpu_init,
+ .exit = loongson3_cpufreq_cpu_exit,
+ .online = loongson3_cpufreq_cpu_online,
+ .offline = loongson3_cpufreq_cpu_offline,
+ .get = loongson3_cpufreq_get,
+ .target_index = loongson3_cpufreq_target,
+ .attr = cpufreq_generic_attr,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .suspend = cpufreq_generic_suspend,
+};
+
+static int loongson3_cpufreq_probe(struct platform_device *pdev)
+{
+ int i, ret;
+
+ for (i = 0; i < MAX_PACKAGES; i++)
+ devm_mutex_init(&pdev->dev, &cpufreq_mutex[i]);
+
+ ret = do_service_request(0, 0, CMD_GET_VERSION, 0, 0);
+ if (ret <= 0)
+ return -EPERM;
+
+ ret = do_service_request(FEATURE_DVFS, 0, CMD_SET_FEATURE,
+ FEATURE_DVFS_ENABLE | FEATURE_DVFS_BOOST, 0);
+ if (ret < 0)
+ return -EPERM;
+
+ loongson3_cpufreq_driver.driver_data = pdev;
+
+ ret = cpufreq_register_driver(&loongson3_cpufreq_driver);
+ if (ret)
+ return ret;
+
+ pr_info("cpufreq: Loongson-3 CPU frequency driver.\n");
+
+ return 0;
+}
+
+static void loongson3_cpufreq_remove(struct platform_device *pdev)
+{
+ cpufreq_unregister_driver(&loongson3_cpufreq_driver);
+}
+
+static struct platform_device_id cpufreq_id_table[] = {
+ { "loongson3_cpufreq", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, cpufreq_id_table);
+
+static struct platform_driver loongson3_platform_driver = {
+ .driver = {
+ .name = "loongson3_cpufreq",
+ },
+ .id_table = cpufreq_id_table,
+ .probe = loongson3_cpufreq_probe,
+ .remove_new = loongson3_cpufreq_remove,
+};
+module_platform_driver(loongson3_platform_driver);
+
+MODULE_AUTHOR("Huacai Chen <chenhuacai@loongson.cn>");
+MODULE_DESCRIPTION("CPUFreq driver for Loongson-3 processors");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/mediatek-cpufreq-hw.c b/drivers/cpufreq/mediatek-cpufreq-hw.c
index 8d097dcddda4..8925e096d5b9 100644
--- a/drivers/cpufreq/mediatek-cpufreq-hw.c
+++ b/drivers/cpufreq/mediatek-cpufreq-hw.c
@@ -260,7 +260,7 @@ static int mtk_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
return 0;
}
-static int mtk_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy)
+static void mtk_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy)
{
struct mtk_cpufreq_data *data = policy->driver_data;
struct resource *res = data->res;
@@ -270,8 +270,6 @@ static int mtk_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy)
writel_relaxed(0x0, data->reg_bases[REG_FREQ_ENABLE]);
iounmap(base);
release_mem_region(res->start, resource_size(res));
-
- return 0;
}
static void mtk_cpufreq_register_em(struct cpufreq_policy *policy)
diff --git a/drivers/cpufreq/mediatek-cpufreq.c b/drivers/cpufreq/mediatek-cpufreq.c
index 518606adf14e..3a1aadaa723c 100644
--- a/drivers/cpufreq/mediatek-cpufreq.c
+++ b/drivers/cpufreq/mediatek-cpufreq.c
@@ -390,28 +390,23 @@ static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu)
int ret;
cpu_dev = get_cpu_device(cpu);
- if (!cpu_dev) {
- dev_err(cpu_dev, "failed to get cpu%d device\n", cpu);
- return -ENODEV;
- }
+ if (!cpu_dev)
+ return dev_err_probe(cpu_dev, -ENODEV, "failed to get cpu%d device\n", cpu);
info->cpu_dev = cpu_dev;
info->ccifreq_bound = false;
if (info->soc_data->ccifreq_supported) {
info->cci_dev = of_get_cci(info->cpu_dev);
- if (IS_ERR(info->cci_dev)) {
- ret = PTR_ERR(info->cci_dev);
- dev_err(cpu_dev, "cpu%d: failed to get cci device\n", cpu);
- return -ENODEV;
- }
+ if (IS_ERR(info->cci_dev))
+ return dev_err_probe(cpu_dev, PTR_ERR(info->cci_dev),
+ "cpu%d: failed to get cci device\n",
+ cpu);
}
info->cpu_clk = clk_get(cpu_dev, "cpu");
- if (IS_ERR(info->cpu_clk)) {
- ret = PTR_ERR(info->cpu_clk);
- return dev_err_probe(cpu_dev, ret,
+ if (IS_ERR(info->cpu_clk))
+ return dev_err_probe(cpu_dev, PTR_ERR(info->cpu_clk),
"cpu%d: failed to get cpu clk\n", cpu);
- }
info->inter_clk = clk_get(cpu_dev, "intermediate");
if (IS_ERR(info->inter_clk)) {
@@ -431,7 +426,7 @@ static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu)
ret = regulator_enable(info->proc_reg);
if (ret) {
- dev_warn(cpu_dev, "cpu%d: failed to enable vproc\n", cpu);
+ dev_err_probe(cpu_dev, ret, "cpu%d: failed to enable vproc\n", cpu);
goto out_free_proc_reg;
}
@@ -439,14 +434,17 @@ static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu)
info->sram_reg = regulator_get_optional(cpu_dev, "sram");
if (IS_ERR(info->sram_reg)) {
ret = PTR_ERR(info->sram_reg);
- if (ret == -EPROBE_DEFER)
+ if (ret == -EPROBE_DEFER) {
+ dev_err_probe(cpu_dev, ret,
+ "cpu%d: Failed to get sram regulator\n", cpu);
goto out_disable_proc_reg;
+ }
info->sram_reg = NULL;
} else {
ret = regulator_enable(info->sram_reg);
if (ret) {
- dev_warn(cpu_dev, "cpu%d: failed to enable vsram\n", cpu);
+ dev_err_probe(cpu_dev, ret, "cpu%d: failed to enable vsram\n", cpu);
goto out_free_sram_reg;
}
}
@@ -454,31 +452,34 @@ static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu)
/* Get OPP-sharing information from "operating-points-v2" bindings */
ret = dev_pm_opp_of_get_sharing_cpus(cpu_dev, &info->cpus);
if (ret) {
- dev_err(cpu_dev,
+ dev_err_probe(cpu_dev, ret,
"cpu%d: failed to get OPP-sharing information\n", cpu);
goto out_disable_sram_reg;
}
ret = dev_pm_opp_of_cpumask_add_table(&info->cpus);
if (ret) {
- dev_warn(cpu_dev, "cpu%d: no OPP table\n", cpu);
+ dev_err_probe(cpu_dev, ret, "cpu%d: no OPP table\n", cpu);
goto out_disable_sram_reg;
}
ret = clk_prepare_enable(info->cpu_clk);
- if (ret)
+ if (ret) {
+ dev_err_probe(cpu_dev, ret, "cpu%d: failed to enable cpu clk\n", cpu);
goto out_free_opp_table;
+ }
ret = clk_prepare_enable(info->inter_clk);
- if (ret)
+ if (ret) {
+ dev_err_probe(cpu_dev, ret, "cpu%d: failed to enable inter clk\n", cpu);
goto out_disable_mux_clock;
+ }
if (info->soc_data->ccifreq_supported) {
info->vproc_on_boot = regulator_get_voltage(info->proc_reg);
if (info->vproc_on_boot < 0) {
- ret = info->vproc_on_boot;
- dev_err(info->cpu_dev,
- "invalid Vproc value: %d\n", info->vproc_on_boot);
+ ret = dev_err_probe(info->cpu_dev, info->vproc_on_boot,
+ "invalid Vproc value\n");
goto out_disable_inter_clock;
}
}
@@ -487,8 +488,8 @@ static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu)
rate = clk_get_rate(info->inter_clk);
opp = dev_pm_opp_find_freq_ceil(cpu_dev, &rate);
if (IS_ERR(opp)) {
- dev_err(cpu_dev, "cpu%d: failed to get intermediate opp\n", cpu);
- ret = PTR_ERR(opp);
+ ret = dev_err_probe(cpu_dev, PTR_ERR(opp),
+ "cpu%d: failed to get intermediate opp\n", cpu);
goto out_disable_inter_clock;
}
info->intermediate_voltage = dev_pm_opp_get_voltage(opp);
@@ -501,7 +502,7 @@ static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu)
info->opp_nb.notifier_call = mtk_cpufreq_opp_notifier;
ret = dev_pm_opp_register_notifier(cpu_dev, &info->opp_nb);
if (ret) {
- dev_err(cpu_dev, "cpu%d: failed to register opp notifier\n", cpu);
+ dev_err_probe(cpu_dev, ret, "cpu%d: failed to register opp notifier\n", cpu);
goto out_disable_inter_clock;
}
@@ -599,13 +600,11 @@ static int mtk_cpufreq_init(struct cpufreq_policy *policy)
return 0;
}
-static int mtk_cpufreq_exit(struct cpufreq_policy *policy)
+static void mtk_cpufreq_exit(struct cpufreq_policy *policy)
{
struct mtk_cpu_dvfs_info *info = policy->driver_data;
dev_pm_opp_free_cpufreq_table(info->cpu_dev, &policy->freq_table);
-
- return 0;
}
static struct cpufreq_driver mtk_cpufreq_driver = {
@@ -629,11 +628,9 @@ static int mtk_cpufreq_probe(struct platform_device *pdev)
int cpu, ret;
data = dev_get_platdata(&pdev->dev);
- if (!data) {
- dev_err(&pdev->dev,
- "failed to get mtk cpufreq platform data\n");
- return -ENODEV;
- }
+ if (!data)
+ return dev_err_probe(&pdev->dev, -ENODEV,
+ "failed to get mtk cpufreq platform data\n");
for_each_possible_cpu(cpu) {
info = mtk_cpu_dvfs_info_lookup(cpu);
@@ -642,25 +639,22 @@ static int mtk_cpufreq_probe(struct platform_device *pdev)
info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
if (!info) {
- ret = -ENOMEM;
+ ret = dev_err_probe(&pdev->dev, -ENOMEM,
+ "Failed to allocate dvfs_info\n");
goto release_dvfs_info_list;
}
info->soc_data = data;
ret = mtk_cpu_dvfs_info_init(info, cpu);
- if (ret) {
- dev_err(&pdev->dev,
- "failed to initialize dvfs info for cpu%d\n",
- cpu);
+ if (ret)
goto release_dvfs_info_list;
- }
list_add(&info->list_head, &dvfs_info_list);
}
ret = cpufreq_register_driver(&mtk_cpufreq_driver);
if (ret) {
- dev_err(&pdev->dev, "failed to register mtk cpufreq driver\n");
+ dev_err_probe(&pdev->dev, ret, "failed to register mtk cpufreq driver\n");
goto release_dvfs_info_list;
}
diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c
index 895690856665..3458d5cc9b7f 100644
--- a/drivers/cpufreq/omap-cpufreq.c
+++ b/drivers/cpufreq/omap-cpufreq.c
@@ -135,11 +135,10 @@ static int omap_cpu_init(struct cpufreq_policy *policy)
return 0;
}
-static int omap_cpu_exit(struct cpufreq_policy *policy)
+static void omap_cpu_exit(struct cpufreq_policy *policy)
{
freq_table_free();
clk_put(policy->clk);
- return 0;
}
static struct cpufreq_driver omap_driver = {
diff --git a/drivers/cpufreq/pasemi-cpufreq.c b/drivers/cpufreq/pasemi-cpufreq.c
index 039a66bbe1be..ee925b53b6b9 100644
--- a/drivers/cpufreq/pasemi-cpufreq.c
+++ b/drivers/cpufreq/pasemi-cpufreq.c
@@ -204,21 +204,19 @@ out:
return err;
}
-static int pas_cpufreq_cpu_exit(struct cpufreq_policy *policy)
+static void pas_cpufreq_cpu_exit(struct cpufreq_policy *policy)
{
/*
* We don't support CPU hotplug. Don't unmap after the system
* has already made it to a running state.
*/
if (system_state >= SYSTEM_RUNNING)
- return 0;
+ return;
if (sdcasr_mapbase)
iounmap(sdcasr_mapbase);
if (sdcpwr_mapbase)
iounmap(sdcpwr_mapbase);
-
- return 0;
}
static int pas_cpufreq_target(struct cpufreq_policy *policy,
diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c
index 6f8b5ea7aeae..771efbf51a48 100644
--- a/drivers/cpufreq/pcc-cpufreq.c
+++ b/drivers/cpufreq/pcc-cpufreq.c
@@ -562,18 +562,12 @@ out:
return result;
}
-static int pcc_cpufreq_cpu_exit(struct cpufreq_policy *policy)
-{
- return 0;
-}
-
static struct cpufreq_driver pcc_cpufreq_driver = {
.flags = CPUFREQ_CONST_LOOPS,
.get = pcc_get_freq,
.verify = pcc_cpufreq_verify,
.target = pcc_cpufreq_target,
.init = pcc_cpufreq_cpu_init,
- .exit = pcc_cpufreq_cpu_exit,
.name = "pcc-cpufreq",
};
diff --git a/drivers/cpufreq/powernow-k6.c b/drivers/cpufreq/powernow-k6.c
index 41eefef95d87..f0a4a6c31204 100644
--- a/drivers/cpufreq/powernow-k6.c
+++ b/drivers/cpufreq/powernow-k6.c
@@ -219,7 +219,7 @@ have_busfreq:
}
-static int powernow_k6_cpu_exit(struct cpufreq_policy *policy)
+static void powernow_k6_cpu_exit(struct cpufreq_policy *policy)
{
unsigned int i;
@@ -234,10 +234,9 @@ static int powernow_k6_cpu_exit(struct cpufreq_policy *policy)
cpufreq_freq_transition_begin(policy, &freqs);
powernow_k6_target(policy, i);
cpufreq_freq_transition_end(policy, &freqs, 0);
- break;
+ return;
}
}
- return 0;
}
static unsigned int powernow_k6_get(unsigned int cpu)
diff --git a/drivers/cpufreq/powernow-k7.c b/drivers/cpufreq/powernow-k7.c
index 5d515fc34836..4271446c8725 100644
--- a/drivers/cpufreq/powernow-k7.c
+++ b/drivers/cpufreq/powernow-k7.c
@@ -644,7 +644,7 @@ static int powernow_cpu_init(struct cpufreq_policy *policy)
return 0;
}
-static int powernow_cpu_exit(struct cpufreq_policy *policy)
+static void powernow_cpu_exit(struct cpufreq_policy *policy)
{
#ifdef CONFIG_X86_POWERNOW_K7_ACPI
if (acpi_processor_perf) {
@@ -655,7 +655,6 @@ static int powernow_cpu_exit(struct cpufreq_policy *policy)
#endif
kfree(powernow_table);
- return 0;
}
static struct cpufreq_driver powernow_driver = {
diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
index b10f7a1b77f1..a01170f7d01c 100644
--- a/drivers/cpufreq/powernow-k8.c
+++ b/drivers/cpufreq/powernow-k8.c
@@ -1089,13 +1089,13 @@ err_out:
return -ENODEV;
}
-static int powernowk8_cpu_exit(struct cpufreq_policy *pol)
+static void powernowk8_cpu_exit(struct cpufreq_policy *pol)
{
struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
int cpu;
if (!data)
- return -EINVAL;
+ return;
powernow_k8_cpu_exit_acpi(data);
@@ -1104,8 +1104,6 @@ static int powernowk8_cpu_exit(struct cpufreq_policy *pol)
/* pol->cpus will be empty here, use related_cpus instead. */
for_each_cpu(cpu, pol->related_cpus)
per_cpu(powernow_data, cpu) = NULL;
-
- return 0;
}
static void query_values_on_cpu(void *_err)
diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
index fddbd1ea1635..50c62929f7ca 100644
--- a/drivers/cpufreq/powernv-cpufreq.c
+++ b/drivers/cpufreq/powernv-cpufreq.c
@@ -874,7 +874,7 @@ static int powernv_cpufreq_cpu_init(struct cpufreq_policy *policy)
return 0;
}
-static int powernv_cpufreq_cpu_exit(struct cpufreq_policy *policy)
+static void powernv_cpufreq_cpu_exit(struct cpufreq_policy *policy)
{
struct powernv_smp_call_data freq_data;
struct global_pstate_info *gpstates = policy->driver_data;
@@ -886,8 +886,6 @@ static int powernv_cpufreq_cpu_exit(struct cpufreq_policy *policy)
del_timer_sync(&gpstates->timer);
kfree(policy->driver_data);
-
- return 0;
}
static int powernv_cpufreq_reboot_notifier(struct notifier_block *nb,
diff --git a/drivers/cpufreq/ppc_cbe_cpufreq.c b/drivers/cpufreq/ppc_cbe_cpufreq.c
index 88afc49941b7..5ee4c7bfdcc5 100644
--- a/drivers/cpufreq/ppc_cbe_cpufreq.c
+++ b/drivers/cpufreq/ppc_cbe_cpufreq.c
@@ -113,10 +113,9 @@ static int cbe_cpufreq_cpu_init(struct cpufreq_policy *policy)
return 0;
}
-static int cbe_cpufreq_cpu_exit(struct cpufreq_policy *policy)
+static void cbe_cpufreq_cpu_exit(struct cpufreq_policy *policy)
{
cbe_cpufreq_pmi_policy_exit(policy);
- return 0;
}
static int cbe_cpufreq_target(struct cpufreq_policy *policy,
diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c
index ec8df5496a0c..370fe6a0104b 100644
--- a/drivers/cpufreq/qcom-cpufreq-hw.c
+++ b/drivers/cpufreq/qcom-cpufreq-hw.c
@@ -573,7 +573,7 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
return qcom_cpufreq_hw_lmh_init(policy, index);
}
-static int qcom_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy)
+static void qcom_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy)
{
struct device *cpu_dev = get_cpu_device(policy->cpu);
struct qcom_cpufreq_data *data = policy->driver_data;
@@ -583,8 +583,6 @@ static int qcom_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy)
qcom_cpufreq_hw_lmh_exit(data);
kfree(policy->freq_table);
kfree(data);
-
- return 0;
}
static void qcom_cpufreq_ready(struct cpufreq_policy *policy)
diff --git a/drivers/cpufreq/qcom-cpufreq-nvmem.c b/drivers/cpufreq/qcom-cpufreq-nvmem.c
index ea05d9d67490..939702dfa73f 100644
--- a/drivers/cpufreq/qcom-cpufreq-nvmem.c
+++ b/drivers/cpufreq/qcom-cpufreq-nvmem.c
@@ -191,6 +191,7 @@ static int qcom_cpufreq_kryo_name_version(struct device *cpu_dev,
case QCOM_ID_IPQ5312:
case QCOM_ID_IPQ5302:
case QCOM_ID_IPQ5300:
+ case QCOM_ID_IPQ5321:
case QCOM_ID_IPQ9514:
case QCOM_ID_IPQ9550:
case QCOM_ID_IPQ9554:
@@ -455,7 +456,6 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
{
struct qcom_cpufreq_drv *drv;
struct nvmem_cell *speedbin_nvmem;
- struct device_node *np;
struct device *cpu_dev;
char pvs_name_buffer[] = "speedXX-pvsXX-vXX";
char *pvs_name = pvs_name_buffer;
@@ -467,16 +467,15 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
if (!cpu_dev)
return -ENODEV;
- np = dev_pm_opp_of_get_opp_desc_node(cpu_dev);
+ struct device_node *np __free(device_node) =
+ dev_pm_opp_of_get_opp_desc_node(cpu_dev);
if (!np)
return -ENOENT;
ret = of_device_is_compatible(np, "operating-points-v2-kryo-cpu") ||
of_device_is_compatible(np, "operating-points-v2-krait-cpu");
- if (!ret) {
- of_node_put(np);
+ if (!ret)
return -ENOENT;
- }
drv = devm_kzalloc(&pdev->dev, struct_size(drv, cpus, num_possible_cpus()),
GFP_KERNEL);
@@ -502,7 +501,6 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
}
nvmem_cell_put(speedbin_nvmem);
}
- of_node_put(np);
for_each_possible_cpu(cpu) {
struct device **virt_devs = NULL;
@@ -638,7 +636,7 @@ MODULE_DEVICE_TABLE(of, qcom_cpufreq_match_list);
*/
static int __init qcom_cpufreq_init(void)
{
- struct device_node *np = of_find_node_by_path("/");
+ struct device_node *np __free(device_node) = of_find_node_by_path("/");
const struct of_device_id *match;
int ret;
@@ -646,7 +644,6 @@ static int __init qcom_cpufreq_init(void)
return -ENODEV;
match = of_match_node(qcom_cpufreq_match_list, np);
- of_node_put(np);
if (!match)
return -ENODEV;
diff --git a/drivers/cpufreq/qoriq-cpufreq.c b/drivers/cpufreq/qoriq-cpufreq.c
index 0aecaecbb0e6..3519bf34d397 100644
--- a/drivers/cpufreq/qoriq-cpufreq.c
+++ b/drivers/cpufreq/qoriq-cpufreq.c
@@ -225,7 +225,7 @@ err_np:
return -ENODEV;
}
-static int qoriq_cpufreq_cpu_exit(struct cpufreq_policy *policy)
+static void qoriq_cpufreq_cpu_exit(struct cpufreq_policy *policy)
{
struct cpu_data *data = policy->driver_data;
@@ -233,8 +233,6 @@ static int qoriq_cpufreq_cpu_exit(struct cpufreq_policy *policy)
kfree(data->table);
kfree(data);
policy->driver_data = NULL;
-
- return 0;
}
static int qoriq_cpufreq_target(struct cpufreq_policy *policy,
diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c
index 3b4f6bfb2f4c..5892c73e129d 100644
--- a/drivers/cpufreq/scmi-cpufreq.c
+++ b/drivers/cpufreq/scmi-cpufreq.c
@@ -63,9 +63,9 @@ static unsigned int scmi_cpufreq_fast_switch(struct cpufreq_policy *policy,
unsigned int target_freq)
{
struct scmi_data *priv = policy->driver_data;
+ unsigned long freq = target_freq;
- if (!perf_ops->freq_set(ph, priv->domain_id,
- target_freq * 1000, true))
+ if (!perf_ops->freq_set(ph, priv->domain_id, freq * 1000, true))
return target_freq;
return 0;
@@ -308,7 +308,7 @@ out_free_priv:
return ret;
}
-static int scmi_cpufreq_exit(struct cpufreq_policy *policy)
+static void scmi_cpufreq_exit(struct cpufreq_policy *policy)
{
struct scmi_data *priv = policy->driver_data;
@@ -316,8 +316,6 @@ static int scmi_cpufreq_exit(struct cpufreq_policy *policy)
dev_pm_opp_remove_all_dynamic(priv->cpu_dev);
free_cpumask_var(priv->opp_shared_cpus);
kfree(priv);
-
- return 0;
}
static void scmi_cpufreq_register_em(struct cpufreq_policy *policy)
diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c
index d33be56983ed..8d73e6e8be2a 100644
--- a/drivers/cpufreq/scpi-cpufreq.c
+++ b/drivers/cpufreq/scpi-cpufreq.c
@@ -167,7 +167,7 @@ out_free_opp:
return ret;
}
-static int scpi_cpufreq_exit(struct cpufreq_policy *policy)
+static void scpi_cpufreq_exit(struct cpufreq_policy *policy)
{
struct scpi_data *priv = policy->driver_data;
@@ -175,8 +175,6 @@ static int scpi_cpufreq_exit(struct cpufreq_policy *policy)
dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
dev_pm_opp_remove_all_dynamic(priv->cpu_dev);
kfree(priv);
-
- return 0;
}
static struct cpufreq_driver scpi_cpufreq_driver = {
diff --git a/drivers/cpufreq/sh-cpufreq.c b/drivers/cpufreq/sh-cpufreq.c
index b8704232c27b..aa74036d0420 100644
--- a/drivers/cpufreq/sh-cpufreq.c
+++ b/drivers/cpufreq/sh-cpufreq.c
@@ -135,14 +135,12 @@ static int sh_cpufreq_cpu_init(struct cpufreq_policy *policy)
return 0;
}
-static int sh_cpufreq_cpu_exit(struct cpufreq_policy *policy)
+static void sh_cpufreq_cpu_exit(struct cpufreq_policy *policy)
{
unsigned int cpu = policy->cpu;
struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu);
clk_put(cpuclk);
-
- return 0;
}
static struct cpufreq_driver sh_cpufreq_driver = {
diff --git a/drivers/cpufreq/sparc-us2e-cpufreq.c b/drivers/cpufreq/sparc-us2e-cpufreq.c
index 2783d3d55fce..8a0cd5312a59 100644
--- a/drivers/cpufreq/sparc-us2e-cpufreq.c
+++ b/drivers/cpufreq/sparc-us2e-cpufreq.c
@@ -296,10 +296,9 @@ static int us2e_freq_cpu_init(struct cpufreq_policy *policy)
return 0;
}
-static int us2e_freq_cpu_exit(struct cpufreq_policy *policy)
+static void us2e_freq_cpu_exit(struct cpufreq_policy *policy)
{
us2e_freq_target(policy, 0);
- return 0;
}
static struct cpufreq_driver cpufreq_us2e_driver = {
diff --git a/drivers/cpufreq/sparc-us3-cpufreq.c b/drivers/cpufreq/sparc-us3-cpufreq.c
index 6c3657679a88..b50f9d13e6d2 100644
--- a/drivers/cpufreq/sparc-us3-cpufreq.c
+++ b/drivers/cpufreq/sparc-us3-cpufreq.c
@@ -140,10 +140,9 @@ static int us3_freq_cpu_init(struct cpufreq_policy *policy)
return 0;
}
-static int us3_freq_cpu_exit(struct cpufreq_policy *policy)
+static void us3_freq_cpu_exit(struct cpufreq_policy *policy)
{
us3_freq_target(policy, 0);
- return 0;
}
static struct cpufreq_driver cpufreq_us3_driver = {
diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c
index 75b10ecdb60f..3fafedb983b5 100644
--- a/drivers/cpufreq/speedstep-centrino.c
+++ b/drivers/cpufreq/speedstep-centrino.c
@@ -400,16 +400,12 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
return 0;
}
-static int centrino_cpu_exit(struct cpufreq_policy *policy)
+static void centrino_cpu_exit(struct cpufreq_policy *policy)
{
unsigned int cpu = policy->cpu;
- if (!per_cpu(centrino_model, cpu))
- return -ENODEV;
-
- per_cpu(centrino_model, cpu) = NULL;
-
- return 0;
+ if (per_cpu(centrino_model, cpu))
+ per_cpu(centrino_model, cpu) = NULL;
}
/**
@@ -520,10 +516,10 @@ static struct cpufreq_driver centrino_driver = {
* or ASCII model IDs.
*/
static const struct x86_cpu_id centrino_ids[] = {
- X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, 9, X86_FEATURE_EST, NULL),
- X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, 13, X86_FEATURE_EST, NULL),
- X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 15, 3, X86_FEATURE_EST, NULL),
- X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 15, 4, X86_FEATURE_EST, NULL),
+ X86_MATCH_VFM_FEATURE(IFM( 6, 9), X86_FEATURE_EST, NULL),
+ X86_MATCH_VFM_FEATURE(IFM( 6, 13), X86_FEATURE_EST, NULL),
+ X86_MATCH_VFM_FEATURE(IFM(15, 3), X86_FEATURE_EST, NULL),
+ X86_MATCH_VFM_FEATURE(IFM(15, 4), X86_FEATURE_EST, NULL),
{}
};
diff --git a/drivers/cpufreq/sti-cpufreq.c b/drivers/cpufreq/sti-cpufreq.c
index 9c542e723a15..8e2e703c3865 100644
--- a/drivers/cpufreq/sti-cpufreq.c
+++ b/drivers/cpufreq/sti-cpufreq.c
@@ -18,7 +18,7 @@
#include <linux/regmap.h>
#define VERSION_ELEMENTS 3
-#define MAX_PCODE_NAME_LEN 7
+#define MAX_PCODE_NAME_LEN 16
#define VERSION_SHIFT 28
#define HW_INFO_INDEX 1
@@ -293,6 +293,7 @@ module_init(sti_cpufreq_init);
static const struct of_device_id __maybe_unused sti_cpufreq_of_match[] = {
{ .compatible = "st,stih407" },
{ .compatible = "st,stih410" },
+ { .compatible = "st,stih418" },
{ },
};
MODULE_DEVICE_TABLE(of, sti_cpufreq_of_match);
diff --git a/drivers/cpufreq/sun50i-cpufreq-nvmem.c b/drivers/cpufreq/sun50i-cpufreq-nvmem.c
index 0b882765cd66..95ac8d46c156 100644
--- a/drivers/cpufreq/sun50i-cpufreq-nvmem.c
+++ b/drivers/cpufreq/sun50i-cpufreq-nvmem.c
@@ -91,6 +91,9 @@ static u32 sun50i_h616_efuse_xlate(u32 speedbin)
case 0x5d00:
value = 0;
break;
+ case 0x6c00:
+ value = 5;
+ break;
default:
pr_warn("sun50i-cpufreq-nvmem: unknown speed bin 0x%x, using default bin 0\n",
speedbin & 0xffff);
@@ -131,26 +134,24 @@ static const struct of_device_id cpu_opp_match_list[] = {
static bool dt_has_supported_hw(void)
{
bool has_opp_supported_hw = false;
- struct device_node *np, *opp;
struct device *cpu_dev;
cpu_dev = get_cpu_device(0);
if (!cpu_dev)
return false;
- np = dev_pm_opp_of_get_opp_desc_node(cpu_dev);
+ struct device_node *np __free(device_node) =
+ dev_pm_opp_of_get_opp_desc_node(cpu_dev);
if (!np)
return false;
- for_each_child_of_node(np, opp) {
+ for_each_child_of_node_scoped(np, opp) {
if (of_find_property(opp, "opp-supported-hw", NULL)) {
has_opp_supported_hw = true;
break;
}
}
- of_node_put(np);
-
return has_opp_supported_hw;
}
@@ -165,7 +166,6 @@ static int sun50i_cpufreq_get_efuse(void)
const struct sunxi_cpufreq_data *opp_data;
struct nvmem_cell *speedbin_nvmem;
const struct of_device_id *match;
- struct device_node *np;
struct device *cpu_dev;
u32 *speedbin;
int ret;
@@ -174,19 +174,18 @@ static int sun50i_cpufreq_get_efuse(void)
if (!cpu_dev)
return -ENODEV;
- np = dev_pm_opp_of_get_opp_desc_node(cpu_dev);
+ struct device_node *np __free(device_node) =
+ dev_pm_opp_of_get_opp_desc_node(cpu_dev);
if (!np)
return -ENOENT;
match = of_match_node(cpu_opp_match_list, np);
- if (!match) {
- of_node_put(np);
+ if (!match)
return -ENOENT;
- }
+
opp_data = match->data;
speedbin_nvmem = of_nvmem_cell_get(np, NULL);
- of_node_put(np);
if (IS_ERR(speedbin_nvmem))
return dev_err_probe(cpu_dev, PTR_ERR(speedbin_nvmem),
"Could not get nvmem cell\n");
@@ -301,14 +300,9 @@ MODULE_DEVICE_TABLE(of, sun50i_cpufreq_match_list);
static const struct of_device_id *sun50i_cpufreq_match_node(void)
{
- const struct of_device_id *match;
- struct device_node *np;
-
- np = of_find_node_by_path("/");
- match = of_match_node(sun50i_cpufreq_match_list, np);
- of_node_put(np);
+ struct device_node *np __free(device_node) = of_find_node_by_path("/");
- return match;
+ return of_match_node(sun50i_cpufreq_match_list, np);
}
/*
diff --git a/drivers/cpufreq/tegra194-cpufreq.c b/drivers/cpufreq/tegra194-cpufreq.c
index 59865ea455a8..07ea7ed61b68 100644
--- a/drivers/cpufreq/tegra194-cpufreq.c
+++ b/drivers/cpufreq/tegra194-cpufreq.c
@@ -551,14 +551,12 @@ static int tegra194_cpufreq_offline(struct cpufreq_policy *policy)
return 0;
}
-static int tegra194_cpufreq_exit(struct cpufreq_policy *policy)
+static void tegra194_cpufreq_exit(struct cpufreq_policy *policy)
{
struct device *cpu_dev = get_cpu_device(policy->cpu);
dev_pm_opp_remove_all_dynamic(cpu_dev);
dev_pm_opp_of_cpumask_remove_table(policy->related_cpus);
-
- return 0;
}
static int tegra194_cpufreq_set_target(struct cpufreq_policy *policy,
diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c
index 714ed53753fa..4d3f27958fbd 100644
--- a/drivers/cpufreq/ti-cpufreq.c
+++ b/drivers/cpufreq/ti-cpufreq.c
@@ -47,6 +47,35 @@
#define AM625_SUPPORT_S_MPU_OPP BIT(1)
#define AM625_SUPPORT_T_MPU_OPP BIT(2)
+enum {
+ AM62A7_EFUSE_M_MPU_OPP = 13,
+ AM62A7_EFUSE_N_MPU_OPP,
+ AM62A7_EFUSE_O_MPU_OPP,
+ AM62A7_EFUSE_P_MPU_OPP,
+ AM62A7_EFUSE_Q_MPU_OPP,
+ AM62A7_EFUSE_R_MPU_OPP,
+ AM62A7_EFUSE_S_MPU_OPP,
+ /*
+ * The V, U, and T speed grade numbering is out of order
+ * to align with the AM625 more uniformly. I promise I know
+ * my ABCs ;)
+ */
+ AM62A7_EFUSE_V_MPU_OPP,
+ AM62A7_EFUSE_U_MPU_OPP,
+ AM62A7_EFUSE_T_MPU_OPP,
+};
+
+#define AM62A7_SUPPORT_N_MPU_OPP BIT(0)
+#define AM62A7_SUPPORT_R_MPU_OPP BIT(1)
+#define AM62A7_SUPPORT_V_MPU_OPP BIT(2)
+
+#define AM62P5_EFUSE_O_MPU_OPP 15
+#define AM62P5_EFUSE_S_MPU_OPP 19
+#define AM62P5_EFUSE_U_MPU_OPP 21
+
+#define AM62P5_SUPPORT_O_MPU_OPP BIT(0)
+#define AM62P5_SUPPORT_U_MPU_OPP BIT(2)
+
#define VERSION_COUNT 2
struct ti_cpufreq_data;
@@ -112,6 +141,49 @@ static unsigned long omap3_efuse_xlate(struct ti_cpufreq_data *opp_data,
return BIT(efuse);
}
+static unsigned long am62p5_efuse_xlate(struct ti_cpufreq_data *opp_data,
+ unsigned long efuse)
+{
+ unsigned long calculated_efuse = AM62P5_SUPPORT_O_MPU_OPP;
+
+ switch (efuse) {
+ case AM62P5_EFUSE_U_MPU_OPP:
+ case AM62P5_EFUSE_S_MPU_OPP:
+ calculated_efuse |= AM62P5_SUPPORT_U_MPU_OPP;
+ fallthrough;
+ case AM62P5_EFUSE_O_MPU_OPP:
+ calculated_efuse |= AM62P5_SUPPORT_O_MPU_OPP;
+ }
+
+ return calculated_efuse;
+}
+
+static unsigned long am62a7_efuse_xlate(struct ti_cpufreq_data *opp_data,
+ unsigned long efuse)
+{
+ unsigned long calculated_efuse = AM62A7_SUPPORT_N_MPU_OPP;
+
+ switch (efuse) {
+ case AM62A7_EFUSE_V_MPU_OPP:
+ case AM62A7_EFUSE_U_MPU_OPP:
+ case AM62A7_EFUSE_T_MPU_OPP:
+ case AM62A7_EFUSE_S_MPU_OPP:
+ calculated_efuse |= AM62A7_SUPPORT_V_MPU_OPP;
+ fallthrough;
+ case AM62A7_EFUSE_R_MPU_OPP:
+ case AM62A7_EFUSE_Q_MPU_OPP:
+ case AM62A7_EFUSE_P_MPU_OPP:
+ case AM62A7_EFUSE_O_MPU_OPP:
+ calculated_efuse |= AM62A7_SUPPORT_R_MPU_OPP;
+ fallthrough;
+ case AM62A7_EFUSE_N_MPU_OPP:
+ case AM62A7_EFUSE_M_MPU_OPP:
+ calculated_efuse |= AM62A7_SUPPORT_N_MPU_OPP;
+ }
+
+ return calculated_efuse;
+}
+
static unsigned long am625_efuse_xlate(struct ti_cpufreq_data *opp_data,
unsigned long efuse)
{
@@ -234,6 +306,24 @@ static struct ti_cpufreq_soc_data am625_soc_data = {
.multi_regulator = false,
};
+static struct ti_cpufreq_soc_data am62a7_soc_data = {
+ .efuse_xlate = am62a7_efuse_xlate,
+ .efuse_offset = 0x0,
+ .efuse_mask = 0x07c0,
+ .efuse_shift = 0x6,
+ .rev_offset = 0x0014,
+ .multi_regulator = false,
+};
+
+static struct ti_cpufreq_soc_data am62p5_soc_data = {
+ .efuse_xlate = am62p5_efuse_xlate,
+ .efuse_offset = 0x0,
+ .efuse_mask = 0x07c0,
+ .efuse_shift = 0x6,
+ .rev_offset = 0x0014,
+ .multi_regulator = false,
+};
+
/**
* ti_cpufreq_get_efuse() - Parse and return efuse value present on SoC
* @opp_data: pointer to ti_cpufreq_data context
@@ -337,8 +427,8 @@ static const struct of_device_id ti_cpufreq_of_match[] = {
{ .compatible = "ti,omap34xx", .data = &omap34xx_soc_data, },
{ .compatible = "ti,omap36xx", .data = &omap36xx_soc_data, },
{ .compatible = "ti,am625", .data = &am625_soc_data, },
- { .compatible = "ti,am62a7", .data = &am625_soc_data, },
- { .compatible = "ti,am62p5", .data = &am625_soc_data, },
+ { .compatible = "ti,am62a7", .data = &am62a7_soc_data, },
+ { .compatible = "ti,am62p5", .data = &am62p5_soc_data, },
/* legacy */
{ .compatible = "ti,omap3430", .data = &omap34xx_soc_data, },
{ .compatible = "ti,omap3630", .data = &omap36xx_soc_data, },
@@ -417,7 +507,7 @@ static int ti_cpufreq_probe(struct platform_device *pdev)
ret = dev_pm_opp_set_config(opp_data->cpu_dev, &config);
if (ret < 0) {
- dev_err(opp_data->cpu_dev, "Failed to set OPP config\n");
+ dev_err_probe(opp_data->cpu_dev, ret, "Failed to set OPP config\n");
goto fail_put_node;
}
diff --git a/drivers/cpufreq/vexpress-spc-cpufreq.c b/drivers/cpufreq/vexpress-spc-cpufreq.c
index 9ac4ea50b874..3fadf536c429 100644
--- a/drivers/cpufreq/vexpress-spc-cpufreq.c
+++ b/drivers/cpufreq/vexpress-spc-cpufreq.c
@@ -447,7 +447,7 @@ static int ve_spc_cpufreq_init(struct cpufreq_policy *policy)
return 0;
}
-static int ve_spc_cpufreq_exit(struct cpufreq_policy *policy)
+static void ve_spc_cpufreq_exit(struct cpufreq_policy *policy)
{
struct device *cpu_dev;
@@ -455,11 +455,10 @@ static int ve_spc_cpufreq_exit(struct cpufreq_policy *policy)
if (!cpu_dev) {
pr_err("%s: failed to get cpu%d device\n", __func__,
policy->cpu);
- return -ENODEV;
+ return;
}
put_cluster_clk_and_freq_table(cpu_dev, policy->related_cpus);
- return 0;
}
static struct cpufreq_driver ve_spc_cpufreq_driver = {
diff --git a/drivers/cpuidle/cpuidle-haltpoll.c b/drivers/cpuidle/cpuidle-haltpoll.c
index d8515d5c0853..bcd03e893a0a 100644
--- a/drivers/cpuidle/cpuidle-haltpoll.c
+++ b/drivers/cpuidle/cpuidle-haltpoll.c
@@ -141,5 +141,6 @@ static void __exit haltpoll_exit(void)
module_init(haltpoll_init);
module_exit(haltpoll_exit);
+MODULE_DESCRIPTION("cpuidle driver for haltpoll governor");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Marcelo Tosatti <mtosatti@redhat.com>");
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index b96e3da0fedd..f3c9d49f0f2a 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -14,8 +14,6 @@
#include <linux/ktime.h>
#include <linux/hrtimer.h>
#include <linux/tick.h>
-#include <linux/sched.h>
-#include <linux/sched/loadavg.h>
#include <linux/sched/stat.h>
#include <linux/math64.h>
@@ -95,16 +93,11 @@
* state, and thus the less likely a busy CPU will hit such a deep
* C state.
*
- * Two factors are used in determing this multiplier:
- * a value of 10 is added for each point of "per cpu load average" we have.
- * a value of 5 points is added for each process that is waiting for
- * IO on this CPU.
- * (these values are experimentally determined)
- *
- * The load average factor gives a longer term (few seconds) input to the
- * decision, while the iowait value gives a cpu local instantanious input.
- * The iowait factor may look low, but realize that this is also already
- * represented in the system load average.
+ * Currently there is only one value determining the factor:
+ * 10 points are added for each process that is waiting for IO on this CPU.
+ * (This value was experimentally determined.)
+ * Utilization is no longer a factor as it was shown that it never contributed
+ * significantly to the performance multiplier in the first place.
*
*/
diff --git a/drivers/cpuidle/governors/teo.c b/drivers/cpuidle/governors/teo.c
index 7244f71c59c5..f2992f92d8db 100644
--- a/drivers/cpuidle/governors/teo.c
+++ b/drivers/cpuidle/governors/teo.c
@@ -2,13 +2,8 @@
/*
* Timer events oriented CPU idle governor
*
- * TEO governor:
* Copyright (C) 2018 - 2021 Intel Corporation
* Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
- *
- * Util-awareness mechanism:
- * Copyright (C) 2022 Arm Ltd.
- * Author: Kajetan Puchalski <kajetan.puchalski@arm.com>
*/
/**
@@ -59,16 +54,12 @@
* shallower than the one whose bin is fallen into by the sleep length (these
* situations are referred to as "intercepts" below).
*
- * In addition to the metrics described above, the governor counts recent
- * intercepts (that is, intercepts that have occurred during the last
- * %NR_RECENT invocations of it for the given CPU) for each bin.
- *
* In order to select an idle state for a CPU, the governor takes the following
* steps (modulo the possible latency constraint that must be taken into account
* too):
*
* 1. Find the deepest CPU idle state whose target residency does not exceed
- * the current sleep length (the candidate idle state) and compute 3 sums as
+ * the current sleep length (the candidate idle state) and compute 2 sums as
* follows:
*
* - The sum of the "hits" and "intercepts" metrics for the candidate state
@@ -81,20 +72,15 @@
* idle long enough to avoid being intercepted if the sleep length had been
* equal to the current one).
*
- * - The sum of the numbers of recent intercepts for all of the idle states
- * shallower than the candidate one.
- *
- * 2. If the second sum is greater than the first one or the third sum is
- * greater than %NR_RECENT / 2, the CPU is likely to wake up early, so look
- * for an alternative idle state to select.
+ * 2. If the second sum is greater than the first one the CPU is likely to wake
+ * up early, so look for an alternative idle state to select.
*
* - Traverse the idle states shallower than the candidate one in the
* descending order.
*
- * - For each of them compute the sum of the "intercepts" metrics and the sum
- * of the numbers of recent intercepts over all of the idle states between
- * it and the candidate one (including the former and excluding the
- * latter).
+ * - For each of them compute the sum of the "intercepts" metrics over all
+ * of the idle states between it and the candidate one (including the
+ * former and excluding the latter).
*
* - If each of these sums that needs to be taken into account (because the
* check related to it has indicated that the CPU is likely to wake up
@@ -104,79 +90,31 @@
* select the given idle state instead of the candidate one.
*
* 3. By default, select the candidate state.
- *
- * Util-awareness mechanism:
- *
- * The idea behind the util-awareness extension is that there are two distinct
- * scenarios for the CPU which should result in two different approaches to idle
- * state selection - utilized and not utilized.
- *
- * In this case, 'utilized' means that the average runqueue util of the CPU is
- * above a certain threshold.
- *
- * When the CPU is utilized while going into idle, more likely than not it will
- * be woken up to do more work soon and so a shallower idle state should be
- * selected to minimise latency and maximise performance. When the CPU is not
- * being utilized, the usual metrics-based approach to selecting the deepest
- * available idle state should be preferred to take advantage of the power
- * saving.
- *
- * In order to achieve this, the governor uses a utilization threshold.
- * The threshold is computed per-CPU as a percentage of the CPU's capacity
- * by bit shifting the capacity value. Based on testing, the shift of 6 (~1.56%)
- * seems to be getting the best results.
- *
- * Before selecting the next idle state, the governor compares the current CPU
- * util to the precomputed util threshold. If it's below, it defaults to the
- * TEO metrics mechanism. If it's above, the closest shallower idle state will
- * be selected instead, as long as is not a polling state.
*/
#include <linux/cpuidle.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
-#include <linux/sched.h>
#include <linux/sched/clock.h>
-#include <linux/sched/topology.h>
#include <linux/tick.h>
#include "gov.h"
/*
- * The number of bits to shift the CPU's capacity by in order to determine
- * the utilized threshold.
- *
- * 6 was chosen based on testing as the number that achieved the best balance
- * of power and performance on average.
- *
- * The resulting threshold is high enough to not be triggered by background
- * noise and low enough to react quickly when activity starts to ramp up.
- */
-#define UTIL_THRESHOLD_SHIFT 6
-
-/*
* The PULSE value is added to metrics when they grow and the DECAY_SHIFT value
* is used for decreasing metrics on a regular basis.
*/
#define PULSE 1024
#define DECAY_SHIFT 3
-/*
- * Number of the most recent idle duration values to take into consideration for
- * the detection of recent early wakeup patterns.
- */
-#define NR_RECENT 9
-
/**
* struct teo_bin - Metrics used by the TEO cpuidle governor.
* @intercepts: The "intercepts" metric.
* @hits: The "hits" metric.
- * @recent: The number of recent "intercepts".
*/
struct teo_bin {
unsigned int intercepts;
unsigned int hits;
- unsigned int recent;
};
/**
@@ -185,42 +123,19 @@ struct teo_bin {
* @sleep_length_ns: Time till the closest timer event (at the selection time).
* @state_bins: Idle state data bins for this CPU.
* @total: Grand total of the "intercepts" and "hits" metrics for all bins.
- * @next_recent_idx: Index of the next @recent_idx entry to update.
- * @recent_idx: Indices of bins corresponding to recent "intercepts".
* @tick_hits: Number of "hits" after TICK_NSEC.
- * @util_threshold: Threshold above which the CPU is considered utilized
*/
struct teo_cpu {
s64 time_span_ns;
s64 sleep_length_ns;
struct teo_bin state_bins[CPUIDLE_STATE_MAX];
unsigned int total;
- int next_recent_idx;
- int recent_idx[NR_RECENT];
unsigned int tick_hits;
- unsigned long util_threshold;
};
static DEFINE_PER_CPU(struct teo_cpu, teo_cpus);
/**
- * teo_cpu_is_utilized - Check if the CPU's util is above the threshold
- * @cpu: Target CPU
- * @cpu_data: Governor CPU data for the target CPU
- */
-#ifdef CONFIG_SMP
-static bool teo_cpu_is_utilized(int cpu, struct teo_cpu *cpu_data)
-{
- return sched_cpu_util(cpu) > cpu_data->util_threshold;
-}
-#else
-static bool teo_cpu_is_utilized(int cpu, struct teo_cpu *cpu_data)
-{
- return false;
-}
-#endif
-
-/**
* teo_update - Update CPU metrics after wakeup.
* @drv: cpuidle driver containing state data.
* @dev: Target CPU.
@@ -286,13 +201,6 @@ static void teo_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
}
}
- i = cpu_data->next_recent_idx++;
- if (cpu_data->next_recent_idx >= NR_RECENT)
- cpu_data->next_recent_idx = 0;
-
- if (cpu_data->recent_idx[i] >= 0)
- cpu_data->state_bins[cpu_data->recent_idx[i]].recent--;
-
/*
* If the deepest state's target residency is below the tick length,
* make a record of it to help teo_select() decide whether or not
@@ -319,14 +227,10 @@ static void teo_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
* Otherwise, update the "intercepts" metric for the bin fallen into by
* the measured idle duration.
*/
- if (idx_timer == idx_duration) {
+ if (idx_timer == idx_duration)
cpu_data->state_bins[idx_timer].hits += PULSE;
- cpu_data->recent_idx[i] = -1;
- } else {
+ else
cpu_data->state_bins[idx_duration].intercepts += PULSE;
- cpu_data->state_bins[idx_duration].recent++;
- cpu_data->recent_idx[i] = idx_duration;
- }
end:
cpu_data->total += PULSE;
@@ -379,14 +283,11 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
unsigned int tick_intercept_sum = 0;
unsigned int idx_intercept_sum = 0;
unsigned int intercept_sum = 0;
- unsigned int idx_recent_sum = 0;
- unsigned int recent_sum = 0;
unsigned int idx_hit_sum = 0;
unsigned int hit_sum = 0;
int constraint_idx = 0;
int idx0 = 0, idx = -1;
- bool alt_intercepts, alt_recent;
- bool cpu_utilized;
+ int prev_intercept_idx;
s64 duration_ns;
int i;
@@ -411,32 +312,6 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
if (!dev->states_usage[0].disable)
idx = 0;
- cpu_utilized = teo_cpu_is_utilized(dev->cpu, cpu_data);
- /*
- * If the CPU is being utilized over the threshold and there are only 2
- * states to choose from, the metrics need not be considered, so choose
- * the shallowest non-polling state and exit.
- */
- if (drv->state_count < 3 && cpu_utilized) {
- /*
- * If state 0 is enabled and it is not a polling one, select it
- * right away unless the scheduler tick has been stopped, in
- * which case care needs to be taken to leave the CPU in a deep
- * enough state in case it is not woken up any time soon after
- * all. If state 1 is disabled, though, state 0 must be used
- * anyway.
- */
- if ((!idx && !(drv->states[0].flags & CPUIDLE_FLAG_POLLING) &&
- teo_state_ok(0, drv)) || dev->states_usage[1].disable) {
- idx = 0;
- goto out_tick;
- }
- /* Assume that state 1 is not a polling one and use it. */
- idx = 1;
- duration_ns = drv->states[1].target_residency_ns;
- goto end;
- }
-
/* Compute the sums of metrics for early wakeup pattern detection. */
for (i = 1; i < drv->state_count; i++) {
struct teo_bin *prev_bin = &cpu_data->state_bins[i-1];
@@ -448,7 +323,6 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
*/
intercept_sum += prev_bin->intercepts;
hit_sum += prev_bin->hits;
- recent_sum += prev_bin->recent;
if (dev->states_usage[i].disable)
continue;
@@ -464,7 +338,6 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
/* Save the sums for the current state. */
idx_intercept_sum = intercept_sum;
idx_hit_sum = hit_sum;
- idx_recent_sum = recent_sum;
}
/* Avoid unnecessary overhead. */
@@ -489,37 +362,29 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
* If the sum of the intercepts metric for all of the idle states
* shallower than the current candidate one (idx) is greater than the
* sum of the intercepts and hits metrics for the candidate state and
- * all of the deeper states, or the sum of the numbers of recent
- * intercepts over all of the states shallower than the candidate one
- * is greater than a half of the number of recent events taken into
- * account, a shallower idle state is likely to be a better choice.
+ * all of the deeper states a shallower idle state is likely to be a
+ * better choice.
*/
- alt_intercepts = 2 * idx_intercept_sum > cpu_data->total - idx_hit_sum;
- alt_recent = idx_recent_sum > NR_RECENT / 2;
- if (alt_recent || alt_intercepts) {
+ prev_intercept_idx = idx;
+ if (2 * idx_intercept_sum > cpu_data->total - idx_hit_sum) {
int first_suitable_idx = idx;
/*
* Look for the deepest idle state whose target residency had
* not exceeded the idle duration in over a half of the relevant
- * cases (both with respect to intercepts overall and with
- * respect to the recent intercepts only) in the past.
+ * cases in the past.
*
* Take the possible duration limitation present if the tick
* has been stopped already into account.
*/
intercept_sum = 0;
- recent_sum = 0;
for (i = idx - 1; i >= 0; i--) {
struct teo_bin *bin = &cpu_data->state_bins[i];
intercept_sum += bin->intercepts;
- recent_sum += bin->recent;
- if ((!alt_recent || 2 * recent_sum > idx_recent_sum) &&
- (!alt_intercepts ||
- 2 * intercept_sum > idx_intercept_sum)) {
+ if (2 * intercept_sum > idx_intercept_sum) {
/*
* Use the current state unless it is too
* shallow or disabled, in which case take the
@@ -552,6 +417,15 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
first_suitable_idx = i;
}
}
+ if (!idx && prev_intercept_idx) {
+ /*
+ * We have to query the sleep length here otherwise we don't
+ * know after wakeup if our guess was correct.
+ */
+ duration_ns = tick_nohz_get_sleep_length(&delta_tick);
+ cpu_data->sleep_length_ns = duration_ns;
+ goto out_tick;
+ }
/*
* If there is a latency constraint, it may be necessary to select an
@@ -561,18 +435,6 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
idx = constraint_idx;
/*
- * If the CPU is being utilized over the threshold, choose a shallower
- * non-polling state to improve latency, unless the scheduler tick has
- * been stopped already and the shallower state's target residency is
- * not sufficiently large.
- */
- if (cpu_utilized) {
- i = teo_find_shallower_state(drv, dev, idx, KTIME_MAX, true);
- if (teo_state_ok(i, drv))
- idx = i;
- }
-
- /*
* Skip the timers check if state 0 is the current candidate one,
* because an immediate non-timer wakeup is expected in that case.
*/
@@ -592,7 +454,7 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
cpu_data->sleep_length_ns = duration_ns;
/*
- * If the closest expected timer is before the terget residency of the
+ * If the closest expected timer is before the target residency of the
* candidate state, a shallower one needs to be found.
*/
if (drv->states[idx].target_residency_ns > duration_ns) {
@@ -667,14 +529,8 @@ static int teo_enable_device(struct cpuidle_driver *drv,
struct cpuidle_device *dev)
{
struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, dev->cpu);
- unsigned long max_capacity = arch_scale_cpu_capacity(dev->cpu);
- int i;
memset(cpu_data, 0, sizeof(*cpu_data));
- cpu_data->util_threshold = max_capacity >> UTIL_THRESHOLD_SHIFT;
-
- for (i = 0; i < NR_RECENT; i++)
- cpu_data->recent_idx[i] = -1;
return 0;
}
diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig
index c631f99e415f..05210a0edb8a 100644
--- a/drivers/crypto/caam/Kconfig
+++ b/drivers/crypto/caam/Kconfig
@@ -10,7 +10,7 @@ config CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC
config CRYPTO_DEV_FSL_CAAM
tristate "Freescale CAAM-Multicore platform driver backend"
- depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE
+ depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE || COMPILE_TEST
select SOC_BUS
select CRYPTO_DEV_FSL_CAAM_COMMON
imply FSL_MC_BUS
diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c
index a4f6884416a0..207dc422785a 100644
--- a/drivers/crypto/caam/caamalg_qi2.c
+++ b/drivers/crypto/caam/caamalg_qi2.c
@@ -4990,11 +4990,23 @@ err_dma_map:
return err;
}
+static void free_dpaa2_pcpu_netdev(struct dpaa2_caam_priv *priv, const cpumask_t *cpus)
+{
+ struct dpaa2_caam_priv_per_cpu *ppriv;
+ int i;
+
+ for_each_cpu(i, cpus) {
+ ppriv = per_cpu_ptr(priv->ppriv, i);
+ free_netdev(ppriv->net_dev);
+ }
+}
+
static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev)
{
struct device *dev = &ls_dev->dev;
struct dpaa2_caam_priv *priv;
struct dpaa2_caam_priv_per_cpu *ppriv;
+ cpumask_t clean_mask;
int err, cpu;
u8 i;
@@ -5073,6 +5085,7 @@ static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev)
}
}
+ cpumask_clear(&clean_mask);
i = 0;
for_each_online_cpu(cpu) {
u8 j;
@@ -5096,15 +5109,23 @@ static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev)
priv->rx_queue_attr[j].fqid,
priv->tx_queue_attr[j].fqid);
- ppriv->net_dev.dev = *dev;
- INIT_LIST_HEAD(&ppriv->net_dev.napi_list);
- netif_napi_add_tx_weight(&ppriv->net_dev, &ppriv->napi,
+ ppriv->net_dev = alloc_netdev_dummy(0);
+ if (!ppriv->net_dev) {
+ err = -ENOMEM;
+ goto err_alloc_netdev;
+ }
+ cpumask_set_cpu(cpu, &clean_mask);
+ ppriv->net_dev->dev = *dev;
+
+ netif_napi_add_tx_weight(ppriv->net_dev, &ppriv->napi,
dpaa2_dpseci_poll,
DPAA2_CAAM_NAPI_WEIGHT);
}
return 0;
+err_alloc_netdev:
+ free_dpaa2_pcpu_netdev(priv, &clean_mask);
err_get_rx_queue:
dpaa2_dpseci_congestion_free(priv);
err_get_vers:
@@ -5153,6 +5174,7 @@ static int __cold dpaa2_dpseci_disable(struct dpaa2_caam_priv *priv)
ppriv = per_cpu_ptr(priv->ppriv, i);
napi_disable(&ppriv->napi);
netif_napi_del(&ppriv->napi);
+ free_netdev(ppriv->net_dev);
}
return 0;
diff --git a/drivers/crypto/caam/caamalg_qi2.h b/drivers/crypto/caam/caamalg_qi2.h
index abb502bb675c..61d1219a202f 100644
--- a/drivers/crypto/caam/caamalg_qi2.h
+++ b/drivers/crypto/caam/caamalg_qi2.h
@@ -81,7 +81,7 @@ struct dpaa2_caam_priv {
*/
struct dpaa2_caam_priv_per_cpu {
struct napi_struct napi;
- struct net_device net_dev;
+ struct net_device *net_dev;
int req_fqid;
int rsp_fqid;
int prio;
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index bd418dea586d..d4b39184dbdb 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -80,6 +80,7 @@ static void build_deinstantiation_desc(u32 *desc, int handle)
append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TYPE_HALT);
}
+#ifdef CONFIG_OF
static const struct of_device_id imx8m_machine_match[] = {
{ .compatible = "fsl,imx8mm", },
{ .compatible = "fsl,imx8mn", },
@@ -88,6 +89,7 @@ static const struct of_device_id imx8m_machine_match[] = {
{ .compatible = "fsl,imx8ulp", },
{ }
};
+#endif
/*
* run_descriptor_deco0 - runs a descriptor on DECO0, under direct control of
diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c
index 46a083849a8e..ba8fb5d8a7b2 100644
--- a/drivers/crypto/caam/qi.c
+++ b/drivers/crypto/caam/qi.c
@@ -57,7 +57,7 @@ struct caam_napi {
*/
struct caam_qi_pcpu_priv {
struct caam_napi caam_napi;
- struct net_device net_dev;
+ struct net_device *net_dev;
struct qman_fq *rsp_fq;
} ____cacheline_aligned;
@@ -144,7 +144,7 @@ static void caam_fq_ern_cb(struct qman_portal *qm, struct qman_fq *fq,
{
const struct qm_fd *fd;
struct caam_drv_req *drv_req;
- struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev);
+ struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev->dev);
struct caam_drv_private *priv = dev_get_drvdata(qidev);
fd = &msg->ern.fd;
@@ -530,6 +530,7 @@ static void caam_qi_shutdown(void *data)
if (kill_fq(qidev, per_cpu(pcpu_qipriv.rsp_fq, i)))
dev_err(qidev, "Rsp FQ kill failed, cpu: %d\n", i);
+ free_netdev(per_cpu(pcpu_qipriv.net_dev, i));
}
qman_delete_cgr_safe(&priv->cgr);
@@ -573,7 +574,7 @@ static enum qman_cb_dqrr_result caam_rsp_fq_dqrr_cb(struct qman_portal *p,
struct caam_napi *caam_napi = raw_cpu_ptr(&pcpu_qipriv.caam_napi);
struct caam_drv_req *drv_req;
const struct qm_fd *fd;
- struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev);
+ struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev->dev);
struct caam_drv_private *priv = dev_get_drvdata(qidev);
u32 status;
@@ -718,12 +719,24 @@ static void free_rsp_fqs(void)
kfree(per_cpu(pcpu_qipriv.rsp_fq, i));
}
+static void free_caam_qi_pcpu_netdev(const cpumask_t *cpus)
+{
+ struct caam_qi_pcpu_priv *priv;
+ int i;
+
+ for_each_cpu(i, cpus) {
+ priv = per_cpu_ptr(&pcpu_qipriv, i);
+ free_netdev(priv->net_dev);
+ }
+}
+
int caam_qi_init(struct platform_device *caam_pdev)
{
int err, i;
struct device *ctrldev = &caam_pdev->dev, *qidev;
struct caam_drv_private *ctrlpriv;
const cpumask_t *cpus = qman_affine_cpus();
+ cpumask_t clean_mask;
ctrlpriv = dev_get_drvdata(ctrldev);
qidev = ctrldev;
@@ -743,6 +756,8 @@ int caam_qi_init(struct platform_device *caam_pdev)
return err;
}
+ cpumask_clear(&clean_mask);
+
/*
* Enable the NAPI contexts on each of the core which has an affine
* portal.
@@ -751,10 +766,16 @@ int caam_qi_init(struct platform_device *caam_pdev)
struct caam_qi_pcpu_priv *priv = per_cpu_ptr(&pcpu_qipriv, i);
struct caam_napi *caam_napi = &priv->caam_napi;
struct napi_struct *irqtask = &caam_napi->irqtask;
- struct net_device *net_dev = &priv->net_dev;
+ struct net_device *net_dev;
+ net_dev = alloc_netdev_dummy(0);
+ if (!net_dev) {
+ err = -ENOMEM;
+ goto fail;
+ }
+ cpumask_set_cpu(i, &clean_mask);
+ priv->net_dev = net_dev;
net_dev->dev = *qidev;
- INIT_LIST_HEAD(&net_dev->napi_list);
netif_napi_add_tx_weight(net_dev, irqtask, caam_qi_poll,
CAAM_NAPI_WEIGHT);
@@ -766,16 +787,22 @@ int caam_qi_init(struct platform_device *caam_pdev)
dma_get_cache_alignment(), 0, NULL);
if (!qi_cache) {
dev_err(qidev, "Can't allocate CAAM cache\n");
- free_rsp_fqs();
- return -ENOMEM;
+ err = -ENOMEM;
+ goto fail2;
}
caam_debugfs_qi_init(ctrlpriv);
err = devm_add_action_or_reset(qidev, caam_qi_shutdown, ctrlpriv);
if (err)
- return err;
+ goto fail2;
dev_info(qidev, "Linux CAAM Queue I/F driver initialised\n");
return 0;
+
+fail2:
+ free_rsp_fqs();
+fail:
+ free_caam_qi_pcpu_netdev(&clean_mask);
+ return err;
}
diff --git a/drivers/crypto/intel/qat/qat_common/Makefile b/drivers/crypto/intel/qat/qat_common/Makefile
index 6f9266edc9f1..eac73cbfdd38 100644
--- a/drivers/crypto/intel/qat/qat_common/Makefile
+++ b/drivers/crypto/intel/qat/qat_common/Makefile
@@ -39,7 +39,8 @@ intel_qat-objs := adf_cfg.o \
adf_sysfs_rl.o \
qat_uclo.o \
qat_hal.o \
- qat_bl.o
+ qat_bl.o \
+ qat_mig_dev.o
intel_qat-$(CONFIG_DEBUG_FS) += adf_transport_debug.o \
adf_fw_counters.o \
@@ -56,6 +57,6 @@ intel_qat-$(CONFIG_DEBUG_FS) += adf_transport_debug.o \
intel_qat-$(CONFIG_PCI_IOV) += adf_sriov.o adf_vf_isr.o adf_pfvf_utils.o \
adf_pfvf_pf_msg.o adf_pfvf_pf_proto.o \
adf_pfvf_vf_msg.o adf_pfvf_vf_proto.o \
- adf_gen2_pfvf.o adf_gen4_pfvf.o qat_mig_dev.o
+ adf_gen2_pfvf.o adf_gen4_pfvf.o
intel_qat-$(CONFIG_CRYPTO_DEV_QAT_ERROR_INJECTION) += adf_heartbeat_inject.o
diff --git a/drivers/cxl/core/hdm.c b/drivers/cxl/core/hdm.c
index 784843fa2a22..3df10517a327 100644
--- a/drivers/cxl/core/hdm.c
+++ b/drivers/cxl/core/hdm.c
@@ -52,6 +52,14 @@ int devm_cxl_add_passthrough_decoder(struct cxl_port *port)
struct cxl_dport *dport = NULL;
int single_port_map[1];
unsigned long index;
+ struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);
+
+ /*
+ * Capability checks are moot for passthrough decoders, support
+ * any and all possibilities.
+ */
+ cxlhdm->interleave_mask = ~0U;
+ cxlhdm->iw_cap_mask = ~0UL;
cxlsd = cxl_switch_decoder_alloc(port, 1);
if (IS_ERR(cxlsd))
@@ -79,6 +87,11 @@ static void parse_hdm_decoder_caps(struct cxl_hdm *cxlhdm)
cxlhdm->interleave_mask |= GENMASK(11, 8);
if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_14_12, hdm_cap))
cxlhdm->interleave_mask |= GENMASK(14, 12);
+ cxlhdm->iw_cap_mask = BIT(1) | BIT(2) | BIT(4) | BIT(8);
+ if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_3_6_12_WAY, hdm_cap))
+ cxlhdm->iw_cap_mask |= BIT(3) | BIT(6) | BIT(12);
+ if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_16_WAY, hdm_cap))
+ cxlhdm->iw_cap_mask |= BIT(16);
}
static bool should_emulate_decoders(struct cxl_endpoint_dvsec_info *info)
diff --git a/drivers/cxl/core/pmem.c b/drivers/cxl/core/pmem.c
index e69625a8d6a1..c00f3a933164 100644
--- a/drivers/cxl/core/pmem.c
+++ b/drivers/cxl/core/pmem.c
@@ -62,10 +62,14 @@ static int match_nvdimm_bridge(struct device *dev, void *data)
return is_cxl_nvdimm_bridge(dev);
}
-struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_memdev *cxlmd)
+/**
+ * cxl_find_nvdimm_bridge() - find a bridge device relative to a port
+ * @port: any descendant port of an nvdimm-bridge associated
+ * root-cxl-port
+ */
+struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_port *port)
{
- struct cxl_root *cxl_root __free(put_cxl_root) =
- find_cxl_root(cxlmd->endpoint);
+ struct cxl_root *cxl_root __free(put_cxl_root) = find_cxl_root(port);
struct device *dev;
if (!cxl_root)
@@ -242,18 +246,20 @@ static void cxlmd_release_nvdimm(void *_cxlmd)
/**
* devm_cxl_add_nvdimm() - add a bridge between a cxl_memdev and an nvdimm
+ * @parent_port: parent port for the (to be added) @cxlmd endpoint port
* @cxlmd: cxl_memdev instance that will perform LIBNVDIMM operations
*
* Return: 0 on success negative error code on failure.
*/
-int devm_cxl_add_nvdimm(struct cxl_memdev *cxlmd)
+int devm_cxl_add_nvdimm(struct cxl_port *parent_port,
+ struct cxl_memdev *cxlmd)
{
struct cxl_nvdimm_bridge *cxl_nvb;
struct cxl_nvdimm *cxl_nvd;
struct device *dev;
int rc;
- cxl_nvb = cxl_find_nvdimm_bridge(cxlmd);
+ cxl_nvb = cxl_find_nvdimm_bridge(parent_port);
if (!cxl_nvb)
return -ENODEV;
diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
index 00a9f0eef8dd..538ebd5a64fd 100644
--- a/drivers/cxl/core/region.c
+++ b/drivers/cxl/core/region.c
@@ -1101,6 +1101,26 @@ static int cxl_port_attach_region(struct cxl_port *port,
}
cxld = cxl_rr->decoder;
+ /*
+ * the number of targets should not exceed the target_count
+ * of the decoder
+ */
+ if (is_switch_decoder(&cxld->dev)) {
+ struct cxl_switch_decoder *cxlsd;
+
+ cxlsd = to_cxl_switch_decoder(&cxld->dev);
+ if (cxl_rr->nr_targets > cxlsd->nr_targets) {
+ dev_dbg(&cxlr->dev,
+ "%s:%s %s add: %s:%s @ %d overflows targets: %d\n",
+ dev_name(port->uport_dev), dev_name(&port->dev),
+ dev_name(&cxld->dev), dev_name(&cxlmd->dev),
+ dev_name(&cxled->cxld.dev), pos,
+ cxlsd->nr_targets);
+ rc = -ENXIO;
+ goto out_erase;
+ }
+ }
+
rc = cxl_rr_ep_add(cxl_rr, cxled);
if (rc) {
dev_dbg(&cxlr->dev,
@@ -1210,6 +1230,50 @@ static int check_last_peer(struct cxl_endpoint_decoder *cxled,
return 0;
}
+static int check_interleave_cap(struct cxl_decoder *cxld, int iw, int ig)
+{
+ struct cxl_port *port = to_cxl_port(cxld->dev.parent);
+ struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);
+ unsigned int interleave_mask;
+ u8 eiw;
+ u16 eig;
+ int high_pos, low_pos;
+
+ if (!test_bit(iw, &cxlhdm->iw_cap_mask))
+ return -ENXIO;
+ /*
+ * Per CXL specification r3.1(8.2.4.20.13 Decoder Protection),
+ * if eiw < 8:
+ * DPAOFFSET[51: eig + 8] = HPAOFFSET[51: eig + 8 + eiw]
+ * DPAOFFSET[eig + 7: 0] = HPAOFFSET[eig + 7: 0]
+ *
+ * when the eiw is 0, all the bits of HPAOFFSET[51: 0] are used, the
+ * interleave bits are none.
+ *
+ * if eiw >= 8:
+ * DPAOFFSET[51: eig + 8] = HPAOFFSET[51: eig + eiw] / 3
+ * DPAOFFSET[eig + 7: 0] = HPAOFFSET[eig + 7: 0]
+ *
+ * when the eiw is 8, all the bits of HPAOFFSET[51: 0] are used, the
+ * interleave bits are none.
+ */
+ ways_to_eiw(iw, &eiw);
+ if (eiw == 0 || eiw == 8)
+ return 0;
+
+ granularity_to_eig(ig, &eig);
+ if (eiw > 8)
+ high_pos = eiw + eig - 1;
+ else
+ high_pos = eiw + eig + 7;
+ low_pos = eig + 8;
+ interleave_mask = GENMASK(high_pos, low_pos);
+ if (interleave_mask & ~cxlhdm->interleave_mask)
+ return -ENXIO;
+
+ return 0;
+}
+
static int cxl_port_setup_targets(struct cxl_port *port,
struct cxl_region *cxlr,
struct cxl_endpoint_decoder *cxled)
@@ -1360,6 +1424,15 @@ static int cxl_port_setup_targets(struct cxl_port *port,
return -ENXIO;
}
} else {
+ rc = check_interleave_cap(cxld, iw, ig);
+ if (rc) {
+ dev_dbg(&cxlr->dev,
+ "%s:%s iw: %d ig: %d is not supported\n",
+ dev_name(port->uport_dev),
+ dev_name(&port->dev), iw, ig);
+ return rc;
+ }
+
cxld->interleave_ways = iw;
cxld->interleave_granularity = ig;
cxld->hpa_range = (struct range) {
@@ -1796,6 +1869,15 @@ static int cxl_region_attach(struct cxl_region *cxlr,
struct cxl_dport *dport;
int rc = -ENXIO;
+ rc = check_interleave_cap(&cxled->cxld, p->interleave_ways,
+ p->interleave_granularity);
+ if (rc) {
+ dev_dbg(&cxlr->dev, "%s iw: %d ig: %d is not supported\n",
+ dev_name(&cxled->cxld.dev), p->interleave_ways,
+ p->interleave_granularity);
+ return rc;
+ }
+
if (cxled->mode != cxlr->mode) {
dev_dbg(&cxlr->dev, "%s region mode: %d mismatch: %d\n",
dev_name(&cxled->cxld.dev), cxlr->mode, cxled->mode);
@@ -2352,15 +2434,6 @@ static struct cxl_region *devm_cxl_add_region(struct cxl_root_decoder *cxlrd,
struct device *dev;
int rc;
- switch (mode) {
- case CXL_DECODER_RAM:
- case CXL_DECODER_PMEM:
- break;
- default:
- dev_err(&cxlrd->cxlsd.cxld.dev, "unsupported mode %d\n", mode);
- return ERR_PTR(-EINVAL);
- }
-
cxlr = cxl_region_alloc(cxlrd, id);
if (IS_ERR(cxlr))
return cxlr;
@@ -2415,6 +2488,15 @@ static struct cxl_region *__create_region(struct cxl_root_decoder *cxlrd,
{
int rc;
+ switch (mode) {
+ case CXL_DECODER_RAM:
+ case CXL_DECODER_PMEM:
+ break;
+ default:
+ dev_err(&cxlrd->cxlsd.cxld.dev, "unsupported mode %d\n", mode);
+ return ERR_PTR(-EINVAL);
+ }
+
rc = memregion_alloc(GFP_KERNEL);
if (rc < 0)
return ERR_PTR(rc);
@@ -2688,22 +2770,33 @@ static int __cxl_dpa_to_region(struct device *dev, void *arg)
{
struct cxl_dpa_to_region_context *ctx = arg;
struct cxl_endpoint_decoder *cxled;
+ struct cxl_region *cxlr;
u64 dpa = ctx->dpa;
if (!is_endpoint_decoder(dev))
return 0;
cxled = to_cxl_endpoint_decoder(dev);
- if (!cxled->dpa_res || !resource_size(cxled->dpa_res))
+ if (!cxled || !cxled->dpa_res || !resource_size(cxled->dpa_res))
return 0;
if (dpa > cxled->dpa_res->end || dpa < cxled->dpa_res->start)
return 0;
- dev_dbg(dev, "dpa:0x%llx mapped in region:%s\n", dpa,
- dev_name(&cxled->cxld.region->dev));
+ /*
+ * Stop the region search (return 1) when an endpoint mapping is
+ * found. The region may not be fully constructed so offering
+ * the cxlr in the context structure is not guaranteed.
+ */
+ cxlr = cxled->cxld.region;
+ if (cxlr)
+ dev_dbg(dev, "dpa:0x%llx mapped in region:%s\n", dpa,
+ dev_name(&cxlr->dev));
+ else
+ dev_dbg(dev, "dpa:0x%llx mapped in endpoint:%s\n", dpa,
+ dev_name(dev));
- ctx->cxlr = cxled->cxld.region;
+ ctx->cxlr = cxlr;
return 1;
}
@@ -2847,7 +2940,7 @@ static int cxl_pmem_region_alloc(struct cxl_region *cxlr)
* bridge for one device is the same for all.
*/
if (i == 0) {
- cxl_nvb = cxl_find_nvdimm_bridge(cxlmd);
+ cxl_nvb = cxl_find_nvdimm_bridge(cxlmd->endpoint);
if (!cxl_nvb)
return -ENODEV;
cxlr->cxl_nvb = cxl_nvb;
diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
index 603c0120cff8..a6613a6f8923 100644
--- a/drivers/cxl/cxl.h
+++ b/drivers/cxl/cxl.h
@@ -47,6 +47,8 @@ extern const struct nvdimm_security_ops *cxl_security_ops;
#define CXL_HDM_DECODER_TARGET_COUNT_MASK GENMASK(7, 4)
#define CXL_HDM_DECODER_INTERLEAVE_11_8 BIT(8)
#define CXL_HDM_DECODER_INTERLEAVE_14_12 BIT(9)
+#define CXL_HDM_DECODER_INTERLEAVE_3_6_12_WAY BIT(11)
+#define CXL_HDM_DECODER_INTERLEAVE_16_WAY BIT(12)
#define CXL_HDM_DECODER_CTRL_OFFSET 0x4
#define CXL_HDM_DECODER_ENABLE BIT(1)
#define CXL_HDM_DECODER0_BASE_LOW_OFFSET(i) (0x20 * (i) + 0x10)
@@ -855,8 +857,8 @@ struct cxl_nvdimm_bridge *devm_cxl_add_nvdimm_bridge(struct device *host,
struct cxl_nvdimm *to_cxl_nvdimm(struct device *dev);
bool is_cxl_nvdimm(struct device *dev);
bool is_cxl_nvdimm_bridge(struct device *dev);
-int devm_cxl_add_nvdimm(struct cxl_memdev *cxlmd);
-struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_memdev *cxlmd);
+int devm_cxl_add_nvdimm(struct cxl_port *parent_port, struct cxl_memdev *cxlmd);
+struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_port *port);
#ifdef CONFIG_CXL_REGION
bool is_cxl_pmem_region(struct device *dev);
diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h
index 19aba81cdf13..af8169ccdbc0 100644
--- a/drivers/cxl/cxlmem.h
+++ b/drivers/cxl/cxlmem.h
@@ -395,9 +395,9 @@ enum cxl_devtype {
/**
* struct cxl_dpa_perf - DPA performance property entry
- * @dpa_range - range for DPA address
- * @coord - QoS performance data (i.e. latency, bandwidth)
- * @qos_class - QoS Class cookies
+ * @dpa_range: range for DPA address
+ * @coord: QoS performance data (i.e. latency, bandwidth)
+ * @qos_class: QoS Class cookies
*/
struct cxl_dpa_perf {
struct range dpa_range;
@@ -464,13 +464,14 @@ struct cxl_dev_state {
* @active_persistent_bytes: sum of hard + soft persistent
* @next_volatile_bytes: volatile capacity change pending device reset
* @next_persistent_bytes: persistent capacity change pending device reset
+ * @ram_perf: performance data entry matched to RAM partition
+ * @pmem_perf: performance data entry matched to PMEM partition
* @event: event log driver state
* @poison: poison driver state info
* @security: security driver state info
* @fw: firmware upload / activation state
+ * @mbox_wait: RCU wait for mbox send completely
* @mbox_send: @dev specific transport for transmitting mailbox commands
- * @ram_perf: performance data entry matched to RAM partition
- * @pmem_perf: performance data entry matched to PMEM partition
*
* See CXL 3.0 8.2.9.8.2 Capacity Configuration and Label Storage for
* details on capacity parameters.
@@ -851,11 +852,21 @@ static inline void cxl_mem_active_dec(void)
int cxl_mem_sanitize(struct cxl_memdev *cxlmd, u16 cmd);
+/**
+ * struct cxl_hdm - HDM Decoder registers and cached / decoded capabilities
+ * @regs: mapped registers, see devm_cxl_setup_hdm()
+ * @decoder_count: number of decoders for this port
+ * @target_count: for switch decoders, max downstream port targets
+ * @interleave_mask: interleave granularity capability, see check_interleave_cap()
+ * @iw_cap_mask: bitmask of supported interleave ways, see check_interleave_cap()
+ * @port: mapped cxl_port, see devm_cxl_setup_hdm()
+ */
struct cxl_hdm {
struct cxl_component_regs regs;
unsigned int decoder_count;
unsigned int target_count;
unsigned int interleave_mask;
+ unsigned long iw_cap_mask;
struct cxl_port *port;
};
diff --git a/drivers/cxl/mem.c b/drivers/cxl/mem.c
index 0c79d9ce877c..2f1b49bfe162 100644
--- a/drivers/cxl/mem.c
+++ b/drivers/cxl/mem.c
@@ -152,6 +152,15 @@ static int cxl_mem_probe(struct device *dev)
return -ENXIO;
}
+ if (resource_size(&cxlds->pmem_res) && IS_ENABLED(CONFIG_CXL_PMEM)) {
+ rc = devm_cxl_add_nvdimm(parent_port, cxlmd);
+ if (rc) {
+ if (rc == -ENODEV)
+ dev_info(dev, "PMEM disabled by platform\n");
+ return rc;
+ }
+ }
+
if (dport->rch)
endpoint_parent = parent_port->uport_dev;
else
@@ -174,14 +183,6 @@ unlock:
if (rc)
return rc;
- if (resource_size(&cxlds->pmem_res) && IS_ENABLED(CONFIG_CXL_PMEM)) {
- rc = devm_cxl_add_nvdimm(cxlmd);
- if (rc == -ENODEV)
- dev_info(dev, "PMEM disabled by platform\n");
- else
- return rc;
- }
-
/*
* The kernel may be operating out of CXL memory on this device,
* there is no spec defined way to determine whether this device
diff --git a/drivers/dma-buf/st-dma-fence.c b/drivers/dma-buf/st-dma-fence.c
index b7c6f7ea9e0c..6a1bfcd0cc21 100644
--- a/drivers/dma-buf/st-dma-fence.c
+++ b/drivers/dma-buf/st-dma-fence.c
@@ -540,6 +540,12 @@ static int race_signal_callback(void *arg)
t[i].before = pass;
t[i].task = kthread_run(thread_signal_callback, &t[i],
"dma-fence:%d", i);
+ if (IS_ERR(t[i].task)) {
+ ret = PTR_ERR(t[i].task);
+ while (--i >= 0)
+ kthread_stop_put(t[i].task);
+ return ret;
+ }
get_task_struct(t[i].task);
}
diff --git a/drivers/dma-buf/sync_debug.c b/drivers/dma-buf/sync_debug.c
index 101394f16930..237bce21d1e7 100644
--- a/drivers/dma-buf/sync_debug.c
+++ b/drivers/dma-buf/sync_debug.c
@@ -110,12 +110,12 @@ static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)
seq_printf(s, "%s: %d\n", obj->name, obj->value);
- spin_lock_irq(&obj->lock);
+ spin_lock(&obj->lock); /* Caller already disabled IRQ. */
list_for_each(pos, &obj->pt_list) {
struct sync_pt *pt = container_of(pos, struct sync_pt, link);
sync_print_fence(s, &pt->base, false);
}
- spin_unlock_irq(&obj->lock);
+ spin_unlock(&obj->lock);
}
static void sync_print_sync_file(struct seq_file *s,
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 002a5ec80620..9fc99cfbef08 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -394,7 +394,7 @@ config LS2X_APB_DMA
config MCF_EDMA
tristate "Freescale eDMA engine support, ColdFire mcf5441x SoCs"
- depends on M5441x || COMPILE_TEST
+ depends on M5441x || (COMPILE_TEST && FSL_EDMA=n)
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
help
diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c
index 8dc029c86551..fc049c9c9892 100644
--- a/drivers/dma/idxd/irq.c
+++ b/drivers/dma/idxd/irq.c
@@ -611,11 +611,13 @@ static void irq_process_work_list(struct idxd_irq_entry *irq_entry)
spin_unlock(&irq_entry->list_lock);
- list_for_each_entry(desc, &flist, list) {
+ list_for_each_entry_safe(desc, n, &flist, list) {
/*
* Check against the original status as ABORT is software defined
* and 0xff, which DSA_COMP_STATUS_MASK can mask out.
*/
+ list_del(&desc->list);
+
if (unlikely(desc->completion->status == IDXD_COMP_DESC_ABORT)) {
idxd_desc_complete(desc, IDXD_COMPLETE_ABORT, true);
continue;
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
index 9c364e92cb82..e8f45a7fded4 100644
--- a/drivers/dma/ioat/init.c
+++ b/drivers/dma/ioat/init.c
@@ -534,18 +534,6 @@ err_out:
return err;
}
-static int ioat_register(struct ioatdma_device *ioat_dma)
-{
- int err = dma_async_device_register(&ioat_dma->dma_dev);
-
- if (err) {
- ioat_disable_interrupts(ioat_dma);
- dma_pool_destroy(ioat_dma->completion_pool);
- }
-
- return err;
-}
-
static void ioat_dma_remove(struct ioatdma_device *ioat_dma)
{
struct dma_device *dma = &ioat_dma->dma_dev;
@@ -1181,9 +1169,9 @@ static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca)
ioat_chan->reg_base + IOAT_DCACTRL_OFFSET);
}
- err = ioat_register(ioat_dma);
+ err = dma_async_device_register(&ioat_dma->dma_dev);
if (err)
- return err;
+ goto err_disable_interrupts;
ioat_kobject_add(ioat_dma, &ioat_ktype);
@@ -1192,20 +1180,29 @@ static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca)
/* disable relaxed ordering */
err = pcie_capability_read_word(pdev, PCI_EXP_DEVCTL, &val16);
- if (err)
- return pcibios_err_to_errno(err);
+ if (err) {
+ err = pcibios_err_to_errno(err);
+ goto err_disable_interrupts;
+ }
/* clear relaxed ordering enable */
val16 &= ~PCI_EXP_DEVCTL_RELAX_EN;
err = pcie_capability_write_word(pdev, PCI_EXP_DEVCTL, val16);
- if (err)
- return pcibios_err_to_errno(err);
+ if (err) {
+ err = pcibios_err_to_errno(err);
+ goto err_disable_interrupts;
+ }
if (ioat_dma->cap & IOAT_CAP_DPS)
writeb(ioat_pending_level + 1,
ioat_dma->reg_base + IOAT_PREFETCH_LIMIT_OFFSET);
return 0;
+
+err_disable_interrupts:
+ ioat_disable_interrupts(ioat_dma);
+ dma_pool_destroy(ioat_dma->completion_pool);
+ return err;
}
static void ioat_shutdown(struct pci_dev *pdev)
@@ -1350,6 +1347,8 @@ static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
void __iomem * const *iomap;
struct device *dev = &pdev->dev;
struct ioatdma_device *device;
+ unsigned int i;
+ u8 version;
int err;
err = pcim_enable_device(pdev);
@@ -1363,6 +1362,10 @@ static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (!iomap)
return -ENOMEM;
+ version = readb(iomap[IOAT_MMIO_BAR] + IOAT_VER_OFFSET);
+ if (version < IOAT_VER_3_0)
+ return -ENODEV;
+
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (err)
return err;
@@ -1373,17 +1376,18 @@ static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pci_set_master(pdev);
pci_set_drvdata(pdev, device);
- device->version = readb(device->reg_base + IOAT_VER_OFFSET);
+ device->version = version;
if (device->version >= IOAT_VER_3_4)
ioat_dca_enabled = 0;
- if (device->version >= IOAT_VER_3_0) {
- if (is_skx_ioat(pdev))
- device->version = IOAT_VER_3_2;
- err = ioat3_dma_probe(device, ioat_dca_enabled);
- } else
- return -ENODEV;
+ if (is_skx_ioat(pdev))
+ device->version = IOAT_VER_3_2;
+
+ err = ioat3_dma_probe(device, ioat_dca_enabled);
if (err) {
+ for (i = 0; i < IOAT_MAX_CHANS; i++)
+ kfree(device->idx[i]);
+ kfree(device);
dev_err(dev, "Intel(R) I/OAT DMA Engine init failed\n");
return -ENODEV;
}
@@ -1445,6 +1449,7 @@ module_init(ioat_init_module);
static void __exit ioat_exit_module(void)
{
pci_unregister_driver(&ioat_pci_driver);
+ kmem_cache_destroy(ioat_sed_cache);
kmem_cache_destroy(ioat_cache);
}
module_exit(ioat_exit_module);
diff --git a/drivers/dma/ti/k3-udma-glue.c b/drivers/dma/ti/k3-udma-glue.c
index c9b93055dc9d..dd1a068f905d 100644
--- a/drivers/dma/ti/k3-udma-glue.c
+++ b/drivers/dma/ti/k3-udma-glue.c
@@ -200,12 +200,9 @@ of_k3_udma_glue_parse_chn_by_id(struct device_node *udmax_np, struct k3_udma_glu
ret = of_k3_udma_glue_parse(udmax_np, common);
if (ret)
- goto out_put_spec;
+ return ret;
ret = of_k3_udma_glue_parse_chn_common(common, thread_id, tx_chn);
-
-out_put_spec:
- of_node_put(udmax_np);
return ret;
}
@@ -1531,6 +1528,9 @@ int k3_udma_glue_rx_get_irq(struct k3_udma_glue_rx_channel *rx_chn,
flow->virq = k3_ringacc_get_ring_irq_num(flow->ringrx);
}
+ if (!flow->virq)
+ return -ENXIO;
+
return flow->virq;
}
EXPORT_SYMBOL_GPL(k3_udma_glue_rx_get_irq);
diff --git a/drivers/dma/xilinx/xdma.c b/drivers/dma/xilinx/xdma.c
index e143a7330816..718842fdaf98 100644
--- a/drivers/dma/xilinx/xdma.c
+++ b/drivers/dma/xilinx/xdma.c
@@ -885,11 +885,11 @@ static irqreturn_t xdma_channel_isr(int irq, void *dev_id)
u32 st;
bool repeat_tx;
+ spin_lock(&xchan->vchan.lock);
+
if (xchan->stop_requested)
complete(&xchan->last_interrupt);
- spin_lock(&xchan->vchan.lock);
-
/* get submitted request */
vd = vchan_next_desc(&xchan->vchan);
if (!vd)
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
index 9c09893695b7..4edfb83ffbee 100644
--- a/drivers/edac/Makefile
+++ b/drivers/edac/Makefile
@@ -54,11 +54,13 @@ obj-$(CONFIG_EDAC_MPC85XX) += mpc85xx_edac_mod.o
layerscape_edac_mod-y := fsl_ddr_edac.o layerscape_edac.o
obj-$(CONFIG_EDAC_LAYERSCAPE) += layerscape_edac_mod.o
-skx_edac-y := skx_common.o skx_base.o
-obj-$(CONFIG_EDAC_SKX) += skx_edac.o
+skx_edac_common-y := skx_common.o
-i10nm_edac-y := skx_common.o i10nm_base.o
-obj-$(CONFIG_EDAC_I10NM) += i10nm_edac.o
+skx_edac-y := skx_base.o
+obj-$(CONFIG_EDAC_SKX) += skx_edac.o skx_edac_common.o
+
+i10nm_edac-y := i10nm_base.o
+obj-$(CONFIG_EDAC_I10NM) += i10nm_edac.o skx_edac_common.o
obj-$(CONFIG_EDAC_CELL) += cell_edac.o
obj-$(CONFIG_EDAC_PPC4XX) += ppc4xx_edac.o
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 1f3520d76861..ddfbdb66b794 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -20,7 +20,6 @@ static inline u32 get_umc_reg(struct amd64_pvt *pvt, u32 reg)
return reg;
switch (reg) {
- case UMCCH_ADDR_CFG: return UMCCH_ADDR_CFG_DDR5;
case UMCCH_ADDR_MASK_SEC: return UMCCH_ADDR_MASK_SEC_DDR5;
case UMCCH_DIMM_CFG: return UMCCH_DIMM_CFG_DDR5;
}
@@ -81,7 +80,7 @@ int __amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset,
amd64_warn("%s: error reading F%dx%03x.\n",
func, PCI_FUNC(pdev->devfn), offset);
- return err;
+ return pcibios_err_to_errno(err);
}
int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
@@ -94,7 +93,7 @@ int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
amd64_warn("%s: error writing to F%dx%03x.\n",
func, PCI_FUNC(pdev->devfn), offset);
- return err;
+ return pcibios_err_to_errno(err);
}
/*
@@ -1025,8 +1024,10 @@ static int gpu_get_node_map(struct amd64_pvt *pvt)
}
ret = pci_read_config_dword(pdev, REG_LOCAL_NODE_TYPE_MAP, &tmp);
- if (ret)
+ if (ret) {
+ ret = pcibios_err_to_errno(ret);
goto out;
+ }
gpu_node_map.node_count = FIELD_GET(LNTM_NODE_COUNT, tmp);
gpu_node_map.base_node_id = FIELD_GET(LNTM_BASE_NODE_ID, tmp);
@@ -1339,22 +1340,15 @@ static void umc_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
static void umc_dump_misc_regs(struct amd64_pvt *pvt)
{
struct amd64_umc *umc;
- u32 i, tmp, umc_base;
+ u32 i;
for_each_umc(i) {
- umc_base = get_umc_base(i);
umc = &pvt->umc[i];
edac_dbg(1, "UMC%d DIMM cfg: 0x%x\n", i, umc->dimm_cfg);
edac_dbg(1, "UMC%d UMC cfg: 0x%x\n", i, umc->umc_cfg);
edac_dbg(1, "UMC%d SDP ctrl: 0x%x\n", i, umc->sdp_ctrl);
edac_dbg(1, "UMC%d ECC ctrl: 0x%x\n", i, umc->ecc_ctrl);
-
- amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_ECC_BAD_SYMBOL, &tmp);
- edac_dbg(1, "UMC%d ECC bad symbol: 0x%x\n", i, tmp);
-
- amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_UMC_CAP, &tmp);
- edac_dbg(1, "UMC%d UMC cap: 0x%x\n", i, tmp);
edac_dbg(1, "UMC%d UMC cap high: 0x%x\n", i, umc->umc_cap_hi);
edac_dbg(1, "UMC%d ECC capable: %s, ChipKill ECC capable: %s\n",
@@ -1367,14 +1361,6 @@ static void umc_dump_misc_regs(struct amd64_pvt *pvt)
edac_dbg(1, "UMC%d x16 DIMMs present: %s\n",
i, (umc->dimm_cfg & BIT(7)) ? "yes" : "no");
- if (umc->dram_type == MEM_LRDDR4 || umc->dram_type == MEM_LRDDR5) {
- amd_smn_read(pvt->mc_node_id,
- umc_base + get_umc_reg(pvt, UMCCH_ADDR_CFG),
- &tmp);
- edac_dbg(1, "UMC%d LRDIMM %dx rank multiply\n",
- i, 1 << ((tmp >> 4) & 0x3));
- }
-
umc_debug_display_dimm_sizes(pvt, i);
}
}
@@ -1452,6 +1438,7 @@ static void umc_read_base_mask(struct amd64_pvt *pvt)
u32 *base, *base_sec;
u32 *mask, *mask_sec;
int cs, umc;
+ u32 tmp;
for_each_umc(umc) {
umc_base_reg = get_umc_base(umc) + UMCCH_BASE_ADDR;
@@ -1464,13 +1451,17 @@ static void umc_read_base_mask(struct amd64_pvt *pvt)
base_reg = umc_base_reg + (cs * 4);
base_reg_sec = umc_base_reg_sec + (cs * 4);
- if (!amd_smn_read(pvt->mc_node_id, base_reg, base))
+ if (!amd_smn_read(pvt->mc_node_id, base_reg, &tmp)) {
+ *base = tmp;
edac_dbg(0, " DCSB%d[%d]=0x%08x reg: 0x%x\n",
umc, cs, *base, base_reg);
+ }
- if (!amd_smn_read(pvt->mc_node_id, base_reg_sec, base_sec))
+ if (!amd_smn_read(pvt->mc_node_id, base_reg_sec, &tmp)) {
+ *base_sec = tmp;
edac_dbg(0, " DCSB_SEC%d[%d]=0x%08x reg: 0x%x\n",
umc, cs, *base_sec, base_reg_sec);
+ }
}
umc_mask_reg = get_umc_base(umc) + UMCCH_ADDR_MASK;
@@ -1483,13 +1474,17 @@ static void umc_read_base_mask(struct amd64_pvt *pvt)
mask_reg = umc_mask_reg + (cs * 4);
mask_reg_sec = umc_mask_reg_sec + (cs * 4);
- if (!amd_smn_read(pvt->mc_node_id, mask_reg, mask))
+ if (!amd_smn_read(pvt->mc_node_id, mask_reg, &tmp)) {
+ *mask = tmp;
edac_dbg(0, " DCSM%d[%d]=0x%08x reg: 0x%x\n",
umc, cs, *mask, mask_reg);
+ }
- if (!amd_smn_read(pvt->mc_node_id, mask_reg_sec, mask_sec))
+ if (!amd_smn_read(pvt->mc_node_id, mask_reg_sec, &tmp)) {
+ *mask_sec = tmp;
edac_dbg(0, " DCSM_SEC%d[%d]=0x%08x reg: 0x%x\n",
umc, cs, *mask_sec, mask_reg_sec);
+ }
}
}
}
@@ -2908,7 +2903,7 @@ static void umc_read_mc_regs(struct amd64_pvt *pvt)
{
u8 nid = pvt->mc_node_id;
struct amd64_umc *umc;
- u32 i, umc_base;
+ u32 i, tmp, umc_base;
/* Read registers from each UMC */
for_each_umc(i) {
@@ -2916,11 +2911,20 @@ static void umc_read_mc_regs(struct amd64_pvt *pvt)
umc_base = get_umc_base(i);
umc = &pvt->umc[i];
- amd_smn_read(nid, umc_base + get_umc_reg(pvt, UMCCH_DIMM_CFG), &umc->dimm_cfg);
- amd_smn_read(nid, umc_base + UMCCH_UMC_CFG, &umc->umc_cfg);
- amd_smn_read(nid, umc_base + UMCCH_SDP_CTRL, &umc->sdp_ctrl);
- amd_smn_read(nid, umc_base + UMCCH_ECC_CTRL, &umc->ecc_ctrl);
- amd_smn_read(nid, umc_base + UMCCH_UMC_CAP_HI, &umc->umc_cap_hi);
+ if (!amd_smn_read(nid, umc_base + get_umc_reg(pvt, UMCCH_DIMM_CFG), &tmp))
+ umc->dimm_cfg = tmp;
+
+ if (!amd_smn_read(nid, umc_base + UMCCH_UMC_CFG, &tmp))
+ umc->umc_cfg = tmp;
+
+ if (!amd_smn_read(nid, umc_base + UMCCH_SDP_CTRL, &tmp))
+ umc->sdp_ctrl = tmp;
+
+ if (!amd_smn_read(nid, umc_base + UMCCH_ECC_CTRL, &tmp))
+ umc->ecc_ctrl = tmp;
+
+ if (!amd_smn_read(nid, umc_base + UMCCH_UMC_CAP_HI, &tmp))
+ umc->umc_cap_hi = tmp;
}
}
@@ -3649,16 +3653,21 @@ static void gpu_read_mc_regs(struct amd64_pvt *pvt)
{
u8 nid = pvt->mc_node_id;
struct amd64_umc *umc;
- u32 i, umc_base;
+ u32 i, tmp, umc_base;
/* Read registers from each UMC */
for_each_umc(i) {
umc_base = gpu_get_umc_base(pvt, i, 0);
umc = &pvt->umc[i];
- amd_smn_read(nid, umc_base + UMCCH_UMC_CFG, &umc->umc_cfg);
- amd_smn_read(nid, umc_base + UMCCH_SDP_CTRL, &umc->sdp_ctrl);
- amd_smn_read(nid, umc_base + UMCCH_ECC_CTRL, &umc->ecc_ctrl);
+ if (!amd_smn_read(nid, umc_base + UMCCH_UMC_CFG, &tmp))
+ umc->umc_cfg = tmp;
+
+ if (!amd_smn_read(nid, umc_base + UMCCH_SDP_CTRL, &tmp))
+ umc->sdp_ctrl = tmp;
+
+ if (!amd_smn_read(nid, umc_base + UMCCH_ECC_CTRL, &tmp))
+ umc->ecc_ctrl = tmp;
}
}
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
index b879b12971e7..17228d07de4c 100644
--- a/drivers/edac/amd64_edac.h
+++ b/drivers/edac/amd64_edac.h
@@ -256,15 +256,11 @@
#define UMCCH_ADDR_MASK 0x20
#define UMCCH_ADDR_MASK_SEC 0x28
#define UMCCH_ADDR_MASK_SEC_DDR5 0x30
-#define UMCCH_ADDR_CFG 0x30
-#define UMCCH_ADDR_CFG_DDR5 0x40
#define UMCCH_DIMM_CFG 0x80
#define UMCCH_DIMM_CFG_DDR5 0x90
#define UMCCH_UMC_CFG 0x100
#define UMCCH_SDP_CTRL 0x104
#define UMCCH_ECC_CTRL 0x14C
-#define UMCCH_ECC_BAD_SYMBOL 0xD90
-#define UMCCH_UMC_CAP 0xDF0
#define UMCCH_UMC_CAP_HI 0xDF4
/* UMC CH bitfields */
diff --git a/drivers/edac/dmc520_edac.c b/drivers/edac/dmc520_edac.c
index 4e30b989a1a4..5e52d31db3b8 100644
--- a/drivers/edac/dmc520_edac.c
+++ b/drivers/edac/dmc520_edac.c
@@ -480,7 +480,6 @@ static int dmc520_edac_probe(struct platform_device *pdev)
struct mem_ctl_info *mci;
void __iomem *reg_base;
u32 irq_mask_all = 0;
- struct resource *res;
struct device *dev;
int ret, idx, irq;
u32 reg_val;
@@ -505,8 +504,7 @@ static int dmc520_edac_probe(struct platform_device *pdev)
}
/* Initialize dmc520 edac */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- reg_base = devm_ioremap_resource(dev, res);
+ reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(reg_base))
return PTR_ERR(reg_base);
diff --git a/drivers/edac/ghes_edac.c b/drivers/edac/ghes_edac.c
index cf2b618c1ada..1eb0136c6fbd 100644
--- a/drivers/edac/ghes_edac.c
+++ b/drivers/edac/ghes_edac.c
@@ -547,7 +547,7 @@ static int __init ghes_edac_init(void)
return -ENODEV;
if (list_empty(ghes_devs)) {
- pr_info("GHES probing device list is empty");
+ pr_info("GHES probing device list is empty\n");
return -ENODEV;
}
diff --git a/drivers/edac/i10nm_base.c b/drivers/edac/i10nm_base.c
index 3fd22a1eb1a9..24dd896d9a9d 100644
--- a/drivers/edac/i10nm_base.c
+++ b/drivers/edac/i10nm_base.c
@@ -942,16 +942,16 @@ static struct res_config gnr_cfg = {
};
static const struct x86_cpu_id i10nm_cpuids[] = {
- X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(ATOM_TREMONT_D, X86_STEPPINGS(0x0, 0x3), &i10nm_cfg0),
- X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(ATOM_TREMONT_D, X86_STEPPINGS(0x4, 0xf), &i10nm_cfg1),
- X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(ICELAKE_X, X86_STEPPINGS(0x0, 0x3), &i10nm_cfg0),
- X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(ICELAKE_X, X86_STEPPINGS(0x4, 0xf), &i10nm_cfg1),
- X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(ICELAKE_D, X86_STEPPINGS(0x0, 0xf), &i10nm_cfg1),
- X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(SAPPHIRERAPIDS_X, X86_STEPPINGS(0x0, 0xf), &spr_cfg),
- X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(EMERALDRAPIDS_X, X86_STEPPINGS(0x0, 0xf), &spr_cfg),
- X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(GRANITERAPIDS_X, X86_STEPPINGS(0x0, 0xf), &gnr_cfg),
- X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(ATOM_CRESTMONT_X, X86_STEPPINGS(0x0, 0xf), &gnr_cfg),
- X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(ATOM_CRESTMONT, X86_STEPPINGS(0x0, 0xf), &gnr_cfg),
+ X86_MATCH_VFM_STEPPINGS(INTEL_ATOM_TREMONT_D, X86_STEPPINGS(0x0, 0x3), &i10nm_cfg0),
+ X86_MATCH_VFM_STEPPINGS(INTEL_ATOM_TREMONT_D, X86_STEPPINGS(0x4, 0xf), &i10nm_cfg1),
+ X86_MATCH_VFM_STEPPINGS(INTEL_ICELAKE_X, X86_STEPPINGS(0x0, 0x3), &i10nm_cfg0),
+ X86_MATCH_VFM_STEPPINGS(INTEL_ICELAKE_X, X86_STEPPINGS(0x4, 0xf), &i10nm_cfg1),
+ X86_MATCH_VFM_STEPPINGS(INTEL_ICELAKE_D, X86_STEPPINGS(0x0, 0xf), &i10nm_cfg1),
+ X86_MATCH_VFM_STEPPINGS(INTEL_SAPPHIRERAPIDS_X, X86_STEPPINGS(0x0, 0xf), &spr_cfg),
+ X86_MATCH_VFM_STEPPINGS(INTEL_EMERALDRAPIDS_X, X86_STEPPINGS(0x0, 0xf), &spr_cfg),
+ X86_MATCH_VFM_STEPPINGS(INTEL_GRANITERAPIDS_X, X86_STEPPINGS(0x0, 0xf), &gnr_cfg),
+ X86_MATCH_VFM_STEPPINGS(INTEL_ATOM_CRESTMONT_X, X86_STEPPINGS(0x0, 0xf), &gnr_cfg),
+ X86_MATCH_VFM_STEPPINGS(INTEL_ATOM_CRESTMONT, X86_STEPPINGS(0x0, 0xf), &gnr_cfg),
{}
};
MODULE_DEVICE_TABLE(x86cpu, i10nm_cpuids);
diff --git a/drivers/edac/igen6_edac.c b/drivers/edac/igen6_edac.c
index cdd8480e7368..0fe75eed8973 100644
--- a/drivers/edac/igen6_edac.c
+++ b/drivers/edac/igen6_edac.c
@@ -258,6 +258,11 @@ static struct work_struct ecclog_work;
#define DID_MTL_P_SKU2 0x7d02
#define DID_MTL_P_SKU3 0x7d14
+/* Compute die IDs for Arrow Lake-UH with IBECC */
+#define DID_ARL_UH_SKU1 0x7d06
+#define DID_ARL_UH_SKU2 0x7d20
+#define DID_ARL_UH_SKU3 0x7d30
+
static int get_mchbar(struct pci_dev *pdev, u64 *mchbar)
{
union {
@@ -597,6 +602,9 @@ static const struct pci_device_id igen6_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, DID_MTL_P_SKU1), (kernel_ulong_t)&mtl_p_cfg },
{ PCI_VDEVICE(INTEL, DID_MTL_P_SKU2), (kernel_ulong_t)&mtl_p_cfg },
{ PCI_VDEVICE(INTEL, DID_MTL_P_SKU3), (kernel_ulong_t)&mtl_p_cfg },
+ { PCI_VDEVICE(INTEL, DID_ARL_UH_SKU1), (kernel_ulong_t)&mtl_p_cfg },
+ { PCI_VDEVICE(INTEL, DID_ARL_UH_SKU2), (kernel_ulong_t)&mtl_p_cfg },
+ { PCI_VDEVICE(INTEL, DID_ARL_UH_SKU3), (kernel_ulong_t)&mtl_p_cfg },
{ },
};
MODULE_DEVICE_TABLE(pci, igen6_pci_tbl);
@@ -800,7 +808,7 @@ static int errcmd_enable_error_reporting(bool enable)
rc = pci_read_config_word(imc->pdev, ERRCMD_OFFSET, &errcmd);
if (rc)
- return rc;
+ return pcibios_err_to_errno(rc);
if (enable)
errcmd |= ERRCMD_CE | ERRSTS_UE;
@@ -809,7 +817,7 @@ static int errcmd_enable_error_reporting(bool enable)
rc = pci_write_config_word(imc->pdev, ERRCMD_OFFSET, errcmd);
if (rc)
- return rc;
+ return pcibios_err_to_errno(rc);
return 0;
}
diff --git a/drivers/edac/layerscape_edac.c b/drivers/edac/layerscape_edac.c
index d2f895033280..0d42c1238908 100644
--- a/drivers/edac/layerscape_edac.c
+++ b/drivers/edac/layerscape_edac.c
@@ -69,6 +69,7 @@ static void __exit fsl_ddr_mc_exit(void)
module_exit(fsl_ddr_mc_exit);
+MODULE_DESCRIPTION("Freescale Layerscape EDAC driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("NXP Semiconductor");
module_param(edac_op_state, int, 0444);
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
index e8945d4adbad..d0266cbcbeda 100644
--- a/drivers/edac/mpc85xx_edac.c
+++ b/drivers/edac/mpc85xx_edac.c
@@ -704,6 +704,7 @@ static void __exit mpc85xx_mc_exit(void)
module_exit(mpc85xx_mc_exit);
+MODULE_DESCRIPTION("Freescale MPC85xx Memory Controller EDAC driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Montavista Software, Inc.");
module_param(edac_op_state, int, 0444);
diff --git a/drivers/edac/octeon_edac-l2c.c b/drivers/edac/octeon_edac-l2c.c
index 919095d10528..2adb9c8093f8 100644
--- a/drivers/edac/octeon_edac-l2c.c
+++ b/drivers/edac/octeon_edac-l2c.c
@@ -201,5 +201,6 @@ static struct platform_driver octeon_l2c_driver = {
};
module_platform_driver(octeon_l2c_driver);
+MODULE_DESCRIPTION("Cavium Octeon Secondary Caches (L2C) EDAC driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Ralf Baechle <ralf@linux-mips.org>");
diff --git a/drivers/edac/octeon_edac-lmc.c b/drivers/edac/octeon_edac-lmc.c
index 18615cbcd9ea..4112c2ee34b8 100644
--- a/drivers/edac/octeon_edac-lmc.c
+++ b/drivers/edac/octeon_edac-lmc.c
@@ -319,5 +319,6 @@ static struct platform_driver octeon_lmc_edac_driver = {
};
module_platform_driver(octeon_lmc_edac_driver);
+MODULE_DESCRIPTION("Cavium Octeon DRAM Memory Controller (LMC) EDAC driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Ralf Baechle <ralf@linux-mips.org>");
diff --git a/drivers/edac/octeon_edac-pc.c b/drivers/edac/octeon_edac-pc.c
index b8404cc7b65f..d9eeb40d2784 100644
--- a/drivers/edac/octeon_edac-pc.c
+++ b/drivers/edac/octeon_edac-pc.c
@@ -137,5 +137,6 @@ static struct platform_driver co_cache_error_driver = {
};
module_platform_driver(co_cache_error_driver);
+MODULE_DESCRIPTION("Cavium Octeon Primary Caches EDAC driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Ralf Baechle <ralf@linux-mips.org>");
diff --git a/drivers/edac/octeon_edac-pci.c b/drivers/edac/octeon_edac-pci.c
index 108ad9493cfb..4d368af2c5f0 100644
--- a/drivers/edac/octeon_edac-pci.c
+++ b/drivers/edac/octeon_edac-pci.c
@@ -104,5 +104,6 @@ static struct platform_driver octeon_pci_driver = {
};
module_platform_driver(octeon_pci_driver);
+MODULE_DESCRIPTION("Cavium Octeon PCI Controller EDAC driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Ralf Baechle <ralf@linux-mips.org>");
diff --git a/drivers/edac/pnd2_edac.c b/drivers/edac/pnd2_edac.c
index 2afcd148fcf8..f93f2f2b1cf2 100644
--- a/drivers/edac/pnd2_edac.c
+++ b/drivers/edac/pnd2_edac.c
@@ -1511,8 +1511,8 @@ static struct dunit_ops dnv_ops = {
};
static const struct x86_cpu_id pnd2_cpuids[] = {
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, &apl_ops),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_D, &dnv_ops),
+ X86_MATCH_VFM(INTEL_ATOM_GOLDMONT, &apl_ops),
+ X86_MATCH_VFM(INTEL_ATOM_GOLDMONT_D, &dnv_ops),
{ }
};
MODULE_DEVICE_TABLE(x86cpu, pnd2_cpuids);
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index 26cca5a9322d..cbc92d3683e6 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -3546,13 +3546,13 @@ fail0:
}
static const struct x86_cpu_id sbridge_cpuids[] = {
- X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE_X, &pci_dev_descr_sbridge_table),
- X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE_X, &pci_dev_descr_ibridge_table),
- X86_MATCH_INTEL_FAM6_MODEL(HASWELL_X, &pci_dev_descr_haswell_table),
- X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, &pci_dev_descr_broadwell_table),
- X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_D, &pci_dev_descr_broadwell_table),
- X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNL, &pci_dev_descr_knl_table),
- X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNM, &pci_dev_descr_knl_table),
+ X86_MATCH_VFM(INTEL_SANDYBRIDGE_X, &pci_dev_descr_sbridge_table),
+ X86_MATCH_VFM(INTEL_IVYBRIDGE_X, &pci_dev_descr_ibridge_table),
+ X86_MATCH_VFM(INTEL_HASWELL_X, &pci_dev_descr_haswell_table),
+ X86_MATCH_VFM(INTEL_BROADWELL_X, &pci_dev_descr_broadwell_table),
+ X86_MATCH_VFM(INTEL_BROADWELL_D, &pci_dev_descr_broadwell_table),
+ X86_MATCH_VFM(INTEL_XEON_PHI_KNL, &pci_dev_descr_knl_table),
+ X86_MATCH_VFM(INTEL_XEON_PHI_KNM, &pci_dev_descr_knl_table),
{ }
};
MODULE_DEVICE_TABLE(x86cpu, sbridge_cpuids);
diff --git a/drivers/edac/skx_base.c b/drivers/edac/skx_base.c
index 0a862336a7ce..af3fa807acdb 100644
--- a/drivers/edac/skx_base.c
+++ b/drivers/edac/skx_base.c
@@ -164,7 +164,7 @@ static struct res_config skx_cfg = {
};
static const struct x86_cpu_id skx_cpuids[] = {
- X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(SKYLAKE_X, X86_STEPPINGS(0x0, 0xf), &skx_cfg),
+ X86_MATCH_VFM_STEPPINGS(INTEL_SKYLAKE_X, X86_STEPPINGS(0x0, 0xf), &skx_cfg),
{ }
};
MODULE_DEVICE_TABLE(x86cpu, skx_cpuids);
diff --git a/drivers/edac/skx_common.c b/drivers/edac/skx_common.c
index 27996b7924c8..8d18099fd528 100644
--- a/drivers/edac/skx_common.c
+++ b/drivers/edac/skx_common.c
@@ -48,7 +48,7 @@ static u64 skx_tolm, skx_tohm;
static LIST_HEAD(dev_edac_list);
static bool skx_mem_cfg_2lm;
-int __init skx_adxl_get(void)
+int skx_adxl_get(void)
{
const char * const *names;
int i, j;
@@ -110,12 +110,14 @@ err:
return -ENODEV;
}
+EXPORT_SYMBOL_GPL(skx_adxl_get);
-void __exit skx_adxl_put(void)
+void skx_adxl_put(void)
{
kfree(adxl_values);
kfree(adxl_msg);
}
+EXPORT_SYMBOL_GPL(skx_adxl_put);
static bool skx_adxl_decode(struct decoded_addr *res, bool error_in_1st_level_mem)
{
@@ -187,12 +189,14 @@ void skx_set_mem_cfg(bool mem_cfg_2lm)
{
skx_mem_cfg_2lm = mem_cfg_2lm;
}
+EXPORT_SYMBOL_GPL(skx_set_mem_cfg);
void skx_set_decode(skx_decode_f decode, skx_show_retry_log_f show_retry_log)
{
driver_decode = decode;
skx_show_retry_rd_err_log = show_retry_log;
}
+EXPORT_SYMBOL_GPL(skx_set_decode);
int skx_get_src_id(struct skx_dev *d, int off, u8 *id)
{
@@ -206,6 +210,7 @@ int skx_get_src_id(struct skx_dev *d, int off, u8 *id)
*id = GET_BITFIELD(reg, 12, 14);
return 0;
}
+EXPORT_SYMBOL_GPL(skx_get_src_id);
int skx_get_node_id(struct skx_dev *d, u8 *id)
{
@@ -219,6 +224,7 @@ int skx_get_node_id(struct skx_dev *d, u8 *id)
*id = GET_BITFIELD(reg, 0, 2);
return 0;
}
+EXPORT_SYMBOL_GPL(skx_get_node_id);
static int get_width(u32 mtr)
{
@@ -284,6 +290,7 @@ int skx_get_all_bus_mappings(struct res_config *cfg, struct list_head **list)
*list = &dev_edac_list;
return ndev;
}
+EXPORT_SYMBOL_GPL(skx_get_all_bus_mappings);
int skx_get_hi_lo(unsigned int did, int off[], u64 *tolm, u64 *tohm)
{
@@ -323,6 +330,7 @@ fail:
pci_dev_put(pdev);
return -ENODEV;
}
+EXPORT_SYMBOL_GPL(skx_get_hi_lo);
static int skx_get_dimm_attr(u32 reg, int lobit, int hibit, int add,
int minval, int maxval, const char *name)
@@ -394,6 +402,7 @@ int skx_get_dimm_info(u32 mtr, u32 mcmtr, u32 amap, struct dimm_info *dimm,
return 1;
}
+EXPORT_SYMBOL_GPL(skx_get_dimm_info);
int skx_get_nvdimm_info(struct dimm_info *dimm, struct skx_imc *imc,
int chan, int dimmno, const char *mod_str)
@@ -442,6 +451,7 @@ unknown_size:
return (size == 0 || size == ~0ull) ? 0 : 1;
}
+EXPORT_SYMBOL_GPL(skx_get_nvdimm_info);
int skx_register_mci(struct skx_imc *imc, struct pci_dev *pdev,
const char *ctl_name, const char *mod_str,
@@ -512,6 +522,7 @@ fail0:
imc->mci = NULL;
return rc;
}
+EXPORT_SYMBOL_GPL(skx_register_mci);
static void skx_unregister_mci(struct skx_imc *imc)
{
@@ -688,6 +699,7 @@ int skx_mce_check_error(struct notifier_block *nb, unsigned long val,
mce->kflags |= MCE_HANDLED_EDAC;
return NOTIFY_DONE;
}
+EXPORT_SYMBOL_GPL(skx_mce_check_error);
void skx_remove(void)
{
@@ -725,3 +737,8 @@ void skx_remove(void)
kfree(d);
}
}
+EXPORT_SYMBOL_GPL(skx_remove);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Tony Luck");
+MODULE_DESCRIPTION("MC Driver for Intel server processors");
diff --git a/drivers/edac/skx_common.h b/drivers/edac/skx_common.h
index b6d3607dffe2..11faf1db4fa4 100644
--- a/drivers/edac/skx_common.h
+++ b/drivers/edac/skx_common.h
@@ -231,8 +231,8 @@ typedef int (*get_dimm_config_f)(struct mem_ctl_info *mci,
typedef bool (*skx_decode_f)(struct decoded_addr *res);
typedef void (*skx_show_retry_log_f)(struct decoded_addr *res, char *msg, int len, bool scrub_err);
-int __init skx_adxl_get(void);
-void __exit skx_adxl_put(void);
+int skx_adxl_get(void);
+void skx_adxl_put(void);
void skx_set_decode(skx_decode_f decode, skx_show_retry_log_f show_retry_log);
void skx_set_mem_cfg(bool mem_cfg_2lm);
diff --git a/drivers/edac/thunderx_edac.c b/drivers/edac/thunderx_edac.c
index fab9891e569a..75c04dfc3962 100644
--- a/drivers/edac/thunderx_edac.c
+++ b/drivers/edac/thunderx_edac.c
@@ -35,12 +35,6 @@ enum {
ERR_UNKNOWN = 3,
};
-#define MAX_SYNDROME_REGS 4
-
-struct error_syndrome {
- u64 reg[MAX_SYNDROME_REGS];
-};
-
struct error_descr {
int type;
u64 mask;
diff --git a/drivers/firewire/Kconfig b/drivers/firewire/Kconfig
index 869598b20e3a..5268b3f0a25a 100644
--- a/drivers/firewire/Kconfig
+++ b/drivers/firewire/Kconfig
@@ -11,7 +11,7 @@ config FIREWIRE
This is the new-generation IEEE 1394 (FireWire) driver stack
a.k.a. Juju, a new implementation designed for robustness and
simplicity.
- See http://ieee1394.wiki.kernel.org/index.php/Juju_Migration
+ See http://ieee1394.docs.kernel.org/en/latest/migration.html
for information about migration from the older Linux 1394 stack
to the new driver stack.
diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
index 127d87e3a153..f8b99dd6cd82 100644
--- a/drivers/firewire/core-card.c
+++ b/drivers/firewire/core-card.c
@@ -222,14 +222,14 @@ static int reset_bus(struct fw_card *card, bool short_reset)
int reg = short_reset ? 5 : 1;
int bit = short_reset ? PHY_BUS_SHORT_RESET : PHY_BUS_RESET;
- trace_bus_reset_initiate(card->generation, short_reset);
+ trace_bus_reset_initiate(card->index, card->generation, short_reset);
return card->driver->update_phy_reg(card, reg, 0, bit);
}
void fw_schedule_bus_reset(struct fw_card *card, bool delayed, bool short_reset)
{
- trace_bus_reset_schedule(card->generation, short_reset);
+ trace_bus_reset_schedule(card->index, card->generation, short_reset);
/* We don't try hard to sort out requests of long vs. short resets. */
card->br_short = short_reset;
@@ -249,7 +249,7 @@ static void br_work(struct work_struct *work)
/* Delay for 2s after last reset per IEEE 1394 clause 8.2.1. */
if (card->reset_jiffies != 0 &&
time_before64(get_jiffies_64(), card->reset_jiffies + 2 * HZ)) {
- trace_bus_reset_postpone(card->generation, card->br_short);
+ trace_bus_reset_postpone(card->index, card->generation, card->br_short);
if (!queue_delayed_work(fw_workqueue, &card->br_work, 2 * HZ))
fw_card_put(card);
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index 55993c9e0b90..9a7dc90330a3 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -1559,7 +1559,7 @@ static void outbound_phy_packet_callback(struct fw_packet *packet,
struct client *e_client = e->client;
u32 rcode;
- trace_async_phy_outbound_complete((uintptr_t)packet, status, packet->generation,
+ trace_async_phy_outbound_complete((uintptr_t)packet, card->index, status, packet->generation,
packet->timestamp);
switch (status) {
@@ -1659,8 +1659,8 @@ static int ioctl_send_phy_packet(struct client *client, union ioctl_arg *arg)
memcpy(pp->data, a->data, sizeof(a->data));
}
- trace_async_phy_outbound_initiate((uintptr_t)&e->p, e->p.generation, e->p.header[1],
- e->p.header[2]);
+ trace_async_phy_outbound_initiate((uintptr_t)&e->p, card->index, e->p.generation,
+ e->p.header[1], e->p.header[2]);
card->driver->send_request(card, &e->p);
diff --git a/drivers/firewire/core-topology.c b/drivers/firewire/core-topology.c
index 837cc44d8d9f..8107eebd4296 100644
--- a/drivers/firewire/core-topology.c
+++ b/drivers/firewire/core-topology.c
@@ -508,7 +508,7 @@ void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation,
struct fw_node *local_node;
unsigned long flags;
- trace_bus_reset_handle(generation, node_id, bm_abdicate, self_ids, self_id_count);
+ trace_bus_reset_handle(card->index, generation, node_id, bm_abdicate, self_ids, self_id_count);
spin_lock_irqsave(&card->lock, flags);
diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
index 571fdff65c2b..76ab6a209768 100644
--- a/drivers/firewire/core-transaction.c
+++ b/drivers/firewire/core-transaction.c
@@ -174,8 +174,8 @@ static void transmit_complete_callback(struct fw_packet *packet,
struct fw_transaction *t =
container_of(packet, struct fw_transaction, packet);
- trace_async_request_outbound_complete((uintptr_t)t, packet->generation, packet->speed,
- status, packet->timestamp);
+ trace_async_request_outbound_complete((uintptr_t)t, card->index, packet->generation,
+ packet->speed, status, packet->timestamp);
switch (status) {
case ACK_COMPLETE:
@@ -398,7 +398,8 @@ void __fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode
spin_unlock_irqrestore(&card->lock, flags);
- trace_async_request_outbound_initiate((uintptr_t)t, generation, speed, t->packet.header, payload,
+ trace_async_request_outbound_initiate((uintptr_t)t, card->index, generation, speed,
+ t->packet.header, payload,
tcode_is_read_request(tcode) ? 0 : length / 4);
card->driver->send_request(card, &t->packet);
@@ -463,7 +464,7 @@ static DECLARE_COMPLETION(phy_config_done);
static void transmit_phy_packet_callback(struct fw_packet *packet,
struct fw_card *card, int status)
{
- trace_async_phy_outbound_complete((uintptr_t)packet, packet->generation, status,
+ trace_async_phy_outbound_complete((uintptr_t)packet, card->index, packet->generation, status,
packet->timestamp);
complete(&phy_config_done);
}
@@ -503,7 +504,7 @@ void fw_send_phy_config(struct fw_card *card,
phy_config_packet.generation = generation;
reinit_completion(&phy_config_done);
- trace_async_phy_outbound_initiate((uintptr_t)&phy_config_packet,
+ trace_async_phy_outbound_initiate((uintptr_t)&phy_config_packet, card->index,
phy_config_packet.generation, phy_config_packet.header[1],
phy_config_packet.header[2]);
@@ -674,7 +675,7 @@ static void free_response_callback(struct fw_packet *packet,
{
struct fw_request *request = container_of(packet, struct fw_request, response);
- trace_async_response_outbound_complete((uintptr_t)request, packet->generation,
+ trace_async_response_outbound_complete((uintptr_t)request, card->index, packet->generation,
packet->speed, status, packet->timestamp);
// Decrease the reference count since not at in-flight.
@@ -879,9 +880,10 @@ void fw_send_response(struct fw_card *card,
// Increase the reference count so that the object is kept during in-flight.
fw_request_get(request);
- trace_async_response_outbound_initiate((uintptr_t)request, request->response.generation,
- request->response.speed, request->response.header,
- data, data ? data_length / 4 : 0);
+ trace_async_response_outbound_initiate((uintptr_t)request, card->index,
+ request->response.generation, request->response.speed,
+ request->response.header, data,
+ data ? data_length / 4 : 0);
card->driver->send_response(card, &request->response);
}
@@ -995,7 +997,7 @@ void fw_core_handle_request(struct fw_card *card, struct fw_packet *p)
tcode = async_header_get_tcode(p->header);
if (tcode_is_link_internal(tcode)) {
- trace_async_phy_inbound((uintptr_t)p, p->generation, p->ack, p->timestamp,
+ trace_async_phy_inbound((uintptr_t)p, card->index, p->generation, p->ack, p->timestamp,
p->header[1], p->header[2]);
fw_cdev_handle_phy_packet(card, p);
return;
@@ -1007,8 +1009,8 @@ void fw_core_handle_request(struct fw_card *card, struct fw_packet *p)
return;
}
- trace_async_request_inbound((uintptr_t)request, p->generation, p->speed, p->ack,
- p->timestamp, p->header, request->data,
+ trace_async_request_inbound((uintptr_t)request, card->index, p->generation, p->speed,
+ p->ack, p->timestamp, p->header, request->data,
tcode_is_read_request(tcode) ? 0 : request->length / 4);
offset = async_header_get_offset(p->header);
@@ -1078,8 +1080,8 @@ void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
}
spin_unlock_irqrestore(&card->lock, flags);
- trace_async_response_inbound((uintptr_t)t, p->generation, p->speed, p->ack, p->timestamp,
- p->header, data, data_length / 4);
+ trace_async_response_inbound((uintptr_t)t, card->index, p->generation, p->speed, p->ack,
+ p->timestamp, p->header, data, data_length / 4);
if (!t) {
timed_out:
diff --git a/drivers/firewire/packet-serdes-test.c b/drivers/firewire/packet-serdes-test.c
index f93c966e794d..e83b1fece780 100644
--- a/drivers/firewire/packet-serdes-test.c
+++ b/drivers/firewire/packet-serdes-test.c
@@ -579,4 +579,5 @@ static struct kunit_suite packet_serdes_test_suite = {
};
kunit_test_suite(packet_serdes_test_suite);
+MODULE_DESCRIPTION("FireWire packet serialization/deserialization unit test suite");
MODULE_LICENSE("GPL");
diff --git a/drivers/firewire/uapi-test.c b/drivers/firewire/uapi-test.c
index 2fcbede4fab1..bc3f10a2e516 100644
--- a/drivers/firewire/uapi-test.c
+++ b/drivers/firewire/uapi-test.c
@@ -86,4 +86,5 @@ static struct kunit_suite structure_layout_test_suite = {
};
kunit_test_suite(structure_layout_test_suite);
+MODULE_DESCRIPTION("FireWire UAPI unit test suite");
MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/arm_ffa/Makefile b/drivers/firmware/arm_ffa/Makefile
index 9d9f37523200..168990a7e792 100644
--- a/drivers/firmware/arm_ffa/Makefile
+++ b/drivers/firmware/arm_ffa/Makefile
@@ -2,5 +2,7 @@
ffa-bus-y = bus.o
ffa-driver-y = driver.o
ffa-transport-$(CONFIG_ARM_FFA_SMCCC) += smccc.o
-ffa-module-objs := $(ffa-bus-y) $(ffa-driver-y) $(ffa-transport-y)
-obj-$(CONFIG_ARM_FFA_TRANSPORT) = ffa-module.o
+ffa-core-objs := $(ffa-bus-y)
+ffa-module-objs := $(ffa-driver-y) $(ffa-transport-y)
+obj-$(CONFIG_ARM_FFA_TRANSPORT) = ffa-core.o
+obj-$(CONFIG_ARM_FFA_TRANSPORT) += ffa-module.o
diff --git a/drivers/firmware/arm_ffa/bus.c b/drivers/firmware/arm_ffa/bus.c
index 2f557e90f2eb..0c83931485f6 100644
--- a/drivers/firmware/arm_ffa/bus.c
+++ b/drivers/firmware/arm_ffa/bus.c
@@ -30,12 +30,11 @@ static int ffa_device_match(struct device *dev, struct device_driver *drv)
while (!uuid_is_null(&id_table->uuid)) {
/*
* FF-A v1.0 doesn't provide discovery of UUIDs, just the
- * partition IDs, so fetch the partitions IDs for this
- * id_table UUID and assign the UUID to the device if the
- * partition ID matches
+ * partition IDs, so match it unconditionally here and handle
+ * it via the installed bus notifier during driver binding.
*/
if (uuid_is_null(&ffa_dev->uuid))
- ffa_device_match_uuid(ffa_dev, &id_table->uuid);
+ return 1;
if (uuid_equal(&ffa_dev->uuid, &id_table->uuid))
return 1;
@@ -50,6 +49,10 @@ static int ffa_device_probe(struct device *dev)
struct ffa_driver *ffa_drv = to_ffa_driver(dev->driver);
struct ffa_device *ffa_dev = to_ffa_dev(dev);
+ /* UUID can be still NULL with FF-A v1.0, so just skip probing them */
+ if (uuid_is_null(&ffa_dev->uuid))
+ return -ENODEV;
+
return ffa_drv->probe(ffa_dev);
}
@@ -232,14 +235,21 @@ void ffa_device_unregister(struct ffa_device *ffa_dev)
}
EXPORT_SYMBOL_GPL(ffa_device_unregister);
-int arm_ffa_bus_init(void)
+static int __init arm_ffa_bus_init(void)
{
return bus_register(&ffa_bus_type);
}
+subsys_initcall(arm_ffa_bus_init);
-void arm_ffa_bus_exit(void)
+static void __exit arm_ffa_bus_exit(void)
{
ffa_devices_unregister();
bus_unregister(&ffa_bus_type);
ida_destroy(&ffa_bus_id);
}
+module_exit(arm_ffa_bus_exit);
+
+MODULE_ALIAS("ffa-core");
+MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
+MODULE_DESCRIPTION("ARM FF-A bus");
+MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/arm_ffa/common.h b/drivers/firmware/arm_ffa/common.h
index d6eccf1fd3f6..9c6425a81d0d 100644
--- a/drivers/firmware/arm_ffa/common.h
+++ b/drivers/firmware/arm_ffa/common.h
@@ -14,8 +14,6 @@ typedef struct arm_smccc_1_2_regs ffa_value_t;
typedef void (ffa_fn)(ffa_value_t, ffa_value_t *);
-int arm_ffa_bus_init(void);
-void arm_ffa_bus_exit(void);
bool ffa_device_is_valid(struct ffa_device *ffa_dev);
void ffa_device_match_uuid(struct ffa_device *ffa_dev, const uuid_t *uuid);
diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c
index 1609247cfafc..7ba98c7af2e9 100644
--- a/drivers/firmware/arm_ffa/driver.c
+++ b/drivers/firmware/arm_ffa/driver.c
@@ -1224,14 +1224,6 @@ void ffa_device_match_uuid(struct ffa_device *ffa_dev, const uuid_t *uuid)
int count, idx;
struct ffa_partition_info *pbuf, *tpbuf;
- /*
- * FF-A v1.1 provides UUID for each partition as part of the discovery
- * API, the discovered UUID must be populated in the device's UUID and
- * there is no need to copy the same from the driver table.
- */
- if (drv_info->version > FFA_VERSION_1_0)
- return;
-
count = ffa_partition_probe(uuid, &pbuf);
if (count <= 0)
return;
@@ -1242,6 +1234,35 @@ void ffa_device_match_uuid(struct ffa_device *ffa_dev, const uuid_t *uuid)
kfree(pbuf);
}
+static int
+ffa_bus_notifier(struct notifier_block *nb, unsigned long action, void *data)
+{
+ struct device *dev = data;
+ struct ffa_device *fdev = to_ffa_dev(dev);
+
+ if (action == BUS_NOTIFY_BIND_DRIVER) {
+ struct ffa_driver *ffa_drv = to_ffa_driver(dev->driver);
+ const struct ffa_device_id *id_table= ffa_drv->id_table;
+
+ /*
+ * FF-A v1.1 provides UUID for each partition as part of the
+ * discovery API, the discovered UUID must be populated in the
+ * device's UUID and there is no need to workaround by copying
+ * the same from the driver table.
+ */
+ if (uuid_is_null(&fdev->uuid))
+ ffa_device_match_uuid(fdev, &id_table->uuid);
+
+ return NOTIFY_OK;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block ffa_bus_nb = {
+ .notifier_call = ffa_bus_notifier,
+};
+
static int ffa_setup_partitions(void)
{
int count, idx, ret;
@@ -1250,6 +1271,12 @@ static int ffa_setup_partitions(void)
struct ffa_dev_part_info *info;
struct ffa_partition_info *pbuf, *tpbuf;
+ if (drv_info->version == FFA_VERSION_1_0) {
+ ret = bus_register_notifier(&ffa_bus_type, &ffa_bus_nb);
+ if (ret)
+ pr_err("Failed to register FF-A bus notifiers\n");
+ }
+
count = ffa_partition_probe(&uuid_null, &pbuf);
if (count <= 0) {
pr_info("%s: No partitions found, error %d\n", __func__, count);
@@ -1261,7 +1288,7 @@ static int ffa_setup_partitions(void)
import_uuid(&uuid, (u8 *)tpbuf->uuid);
/* Note that if the UUID will be uuid_null, that will require
- * ffa_device_match() to find the UUID of this partition id
+ * ffa_bus_notifier() to find the UUID of this partition id
* with help of ffa_device_match_uuid(). FF-A v1.1 and above
* provides UUID here for each partition as part of the
* discovery API and the same is passed.
@@ -1581,14 +1608,9 @@ static int __init ffa_init(void)
if (ret)
return ret;
- ret = arm_ffa_bus_init();
- if (ret)
- return ret;
-
drv_info = kzalloc(sizeof(*drv_info), GFP_KERNEL);
if (!drv_info) {
- ret = -ENOMEM;
- goto ffa_bus_exit;
+ return -ENOMEM;
}
ret = ffa_version_check(&drv_info->version);
@@ -1649,11 +1671,9 @@ free_pages:
free_pages_exact(drv_info->rx_buffer, RXTX_BUFFER_SIZE);
free_drv_info:
kfree(drv_info);
-ffa_bus_exit:
- arm_ffa_bus_exit();
return ret;
}
-subsys_initcall(ffa_init);
+module_init(ffa_init);
static void __exit ffa_exit(void)
{
@@ -1663,7 +1683,6 @@ static void __exit ffa_exit(void)
free_pages_exact(drv_info->tx_buffer, RXTX_BUFFER_SIZE);
free_pages_exact(drv_info->rx_buffer, RXTX_BUFFER_SIZE);
kfree(drv_info);
- arm_ffa_bus_exit();
}
module_exit(ffa_exit);
diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h
index b5ac25dbc1ca..4b8c5250cdb5 100644
--- a/drivers/firmware/arm_scmi/common.h
+++ b/drivers/firmware/arm_scmi/common.h
@@ -326,6 +326,7 @@ void shmem_clear_channel(struct scmi_shared_mem __iomem *shmem);
bool shmem_poll_done(struct scmi_shared_mem __iomem *shmem,
struct scmi_xfer *xfer);
bool shmem_channel_free(struct scmi_shared_mem __iomem *shmem);
+bool shmem_channel_intr_enabled(struct scmi_shared_mem __iomem *shmem);
/* declarations for message passing transports */
struct scmi_msg_payld;
diff --git a/drivers/firmware/arm_scmi/mailbox.c b/drivers/firmware/arm_scmi/mailbox.c
index 615a3b2ad83d..0219a12e3209 100644
--- a/drivers/firmware/arm_scmi/mailbox.c
+++ b/drivers/firmware/arm_scmi/mailbox.c
@@ -21,6 +21,7 @@
* @cl: Mailbox Client
* @chan: Transmit/Receive mailbox uni/bi-directional channel
* @chan_receiver: Optional Receiver mailbox unidirectional channel
+ * @chan_platform_receiver: Optional Platform Receiver mailbox unidirectional channel
* @cinfo: SCMI channel info
* @shmem: Transmit/Receive shared memory area
*/
@@ -28,6 +29,7 @@ struct scmi_mailbox {
struct mbox_client cl;
struct mbox_chan *chan;
struct mbox_chan *chan_receiver;
+ struct mbox_chan *chan_platform_receiver;
struct scmi_chan_info *cinfo;
struct scmi_shared_mem __iomem *shmem;
};
@@ -91,6 +93,8 @@ static bool mailbox_chan_available(struct device_node *of_node, int idx)
* for replies on the a2p channel. Set as zero if not present.
* @p2a_chan: A reference to the optional p2a channel.
* Set as zero if not present.
+ * @p2a_rx_chan: A reference to the optional p2a completion channel.
+ * Set as zero if not present.
*
* At first, validate the transport configuration as described in terms of
* 'mboxes' and 'shmem', then determin which mailbox channel indexes are
@@ -98,8 +102,8 @@ static bool mailbox_chan_available(struct device_node *of_node, int idx)
*
* Return: 0 on Success or error
*/
-static int mailbox_chan_validate(struct device *cdev,
- int *a2p_rx_chan, int *p2a_chan)
+static int mailbox_chan_validate(struct device *cdev, int *a2p_rx_chan,
+ int *p2a_chan, int *p2a_rx_chan)
{
int num_mb, num_sh, ret = 0;
struct device_node *np = cdev->of_node;
@@ -109,8 +113,9 @@ static int mailbox_chan_validate(struct device *cdev,
dev_dbg(cdev, "Found %d mboxes and %d shmems !\n", num_mb, num_sh);
/* Bail out if mboxes and shmem descriptors are inconsistent */
- if (num_mb <= 0 || num_sh <= 0 || num_sh > 2 || num_mb > 3 ||
- (num_mb == 1 && num_sh != 1) || (num_mb == 3 && num_sh != 2)) {
+ if (num_mb <= 0 || num_sh <= 0 || num_sh > 2 || num_mb > 4 ||
+ (num_mb == 1 && num_sh != 1) || (num_mb == 3 && num_sh != 2) ||
+ (num_mb == 4 && num_sh != 2)) {
dev_warn(cdev,
"Invalid channel descriptor for '%s' - mbs:%d shm:%d\n",
of_node_full_name(np), num_mb, num_sh);
@@ -139,6 +144,7 @@ static int mailbox_chan_validate(struct device *cdev,
case 1:
*a2p_rx_chan = 0;
*p2a_chan = 0;
+ *p2a_rx_chan = 0;
break;
case 2:
if (num_sh == 2) {
@@ -148,10 +154,17 @@ static int mailbox_chan_validate(struct device *cdev,
*a2p_rx_chan = 1;
*p2a_chan = 0;
}
+ *p2a_rx_chan = 0;
break;
case 3:
*a2p_rx_chan = 1;
*p2a_chan = 2;
+ *p2a_rx_chan = 0;
+ break;
+ case 4:
+ *a2p_rx_chan = 1;
+ *p2a_chan = 2;
+ *p2a_rx_chan = 3;
break;
}
}
@@ -166,12 +179,12 @@ static int mailbox_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
struct device *cdev = cinfo->dev;
struct scmi_mailbox *smbox;
struct device_node *shmem;
- int ret, a2p_rx_chan, p2a_chan, idx = tx ? 0 : 1;
+ int ret, a2p_rx_chan, p2a_chan, p2a_rx_chan, idx = tx ? 0 : 1;
struct mbox_client *cl;
resource_size_t size;
struct resource res;
- ret = mailbox_chan_validate(cdev, &a2p_rx_chan, &p2a_chan);
+ ret = mailbox_chan_validate(cdev, &a2p_rx_chan, &p2a_chan, &p2a_rx_chan);
if (ret)
return ret;
@@ -229,6 +242,17 @@ static int mailbox_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
}
}
+ if (!tx && p2a_rx_chan) {
+ smbox->chan_platform_receiver = mbox_request_channel(cl, p2a_rx_chan);
+ if (IS_ERR(smbox->chan_platform_receiver)) {
+ ret = PTR_ERR(smbox->chan_platform_receiver);
+ if (ret != -EPROBE_DEFER)
+ dev_err(cdev, "failed to request SCMI P2A Receiver mailbox\n");
+ return ret;
+ }
+ }
+
+
cinfo->transport_info = smbox;
smbox->cinfo = cinfo;
@@ -243,9 +267,11 @@ static int mailbox_chan_free(int id, void *p, void *data)
if (smbox && !IS_ERR(smbox->chan)) {
mbox_free_channel(smbox->chan);
mbox_free_channel(smbox->chan_receiver);
+ mbox_free_channel(smbox->chan_platform_receiver);
cinfo->transport_info = NULL;
smbox->chan = NULL;
smbox->chan_receiver = NULL;
+ smbox->chan_platform_receiver = NULL;
smbox->cinfo = NULL;
}
@@ -300,8 +326,27 @@ static void mailbox_fetch_notification(struct scmi_chan_info *cinfo,
static void mailbox_clear_channel(struct scmi_chan_info *cinfo)
{
struct scmi_mailbox *smbox = cinfo->transport_info;
+ struct mbox_chan *intr_chan;
+ int ret;
shmem_clear_channel(smbox->shmem);
+
+ if (!shmem_channel_intr_enabled(smbox->shmem))
+ return;
+
+ if (smbox->chan_platform_receiver)
+ intr_chan = smbox->chan_platform_receiver;
+ else if (smbox->chan)
+ intr_chan = smbox->chan;
+ else
+ return;
+
+ ret = mbox_send_message(intr_chan, NULL);
+ /* mbox_send_message returns non-negative value on success, so reset */
+ if (ret > 0)
+ ret = 0;
+
+ mbox_client_txdone(intr_chan, ret);
}
static bool
diff --git a/drivers/firmware/arm_scmi/scmi_power_control.c b/drivers/firmware/arm_scmi/scmi_power_control.c
index 6eb7d2a4b6b1..21f467a92942 100644
--- a/drivers/firmware/arm_scmi/scmi_power_control.c
+++ b/drivers/firmware/arm_scmi/scmi_power_control.c
@@ -50,6 +50,7 @@
#include <linux/reboot.h>
#include <linux/scmi_protocol.h>
#include <linux/slab.h>
+#include <linux/suspend.h>
#include <linux/time64.h>
#include <linux/timer.h>
#include <linux/types.h>
@@ -78,6 +79,7 @@ enum scmi_syspower_state {
* @reboot_nb: A notifier_block optionally used to track reboot progress
* @forceful_work: A worker used to trigger a forceful transition once a
* graceful has timed out.
+ * @suspend_work: A worker used to trigger system suspend
*/
struct scmi_syspower_conf {
struct device *dev;
@@ -90,6 +92,7 @@ struct scmi_syspower_conf {
struct notifier_block reboot_nb;
struct delayed_work forceful_work;
+ struct work_struct suspend_work;
};
#define userspace_nb_to_sconf(x) \
@@ -249,6 +252,9 @@ static void scmi_request_graceful_transition(struct scmi_syspower_conf *sc,
case SCMI_SYSTEM_WARMRESET:
orderly_reboot();
break;
+ case SCMI_SYSTEM_SUSPEND:
+ schedule_work(&sc->suspend_work);
+ break;
default:
break;
}
@@ -277,7 +283,8 @@ static int scmi_userspace_notifier(struct notifier_block *nb,
struct scmi_system_power_state_notifier_report *er = data;
struct scmi_syspower_conf *sc = userspace_nb_to_sconf(nb);
- if (er->system_state >= SCMI_SYSTEM_POWERUP) {
+ if (er->system_state >= SCMI_SYSTEM_MAX ||
+ er->system_state == SCMI_SYSTEM_POWERUP) {
dev_err(sc->dev, "Ignoring unsupported system_state: 0x%X\n",
er->system_state);
return NOTIFY_DONE;
@@ -315,6 +322,16 @@ static int scmi_userspace_notifier(struct notifier_block *nb,
return NOTIFY_OK;
}
+static void scmi_suspend_work_func(struct work_struct *work)
+{
+ struct scmi_syspower_conf *sc =
+ container_of(work, struct scmi_syspower_conf, suspend_work);
+
+ pm_suspend(PM_SUSPEND_MEM);
+
+ sc->state = SCMI_SYSPOWER_IDLE;
+}
+
static int scmi_syspower_probe(struct scmi_device *sdev)
{
int ret;
@@ -338,6 +355,8 @@ static int scmi_syspower_probe(struct scmi_device *sdev)
sc->userspace_nb.notifier_call = &scmi_userspace_notifier;
sc->dev = &sdev->dev;
+ INIT_WORK(&sc->suspend_work, scmi_suspend_work_func);
+
return handle->notify_ops->devm_event_notifier_register(sdev,
SCMI_PROTOCOL_SYSTEM,
SCMI_EVENT_SYSTEM_POWER_STATE_NOTIFIER,
diff --git a/drivers/firmware/arm_scmi/shmem.c b/drivers/firmware/arm_scmi/shmem.c
index 8bf495bcad09..b74e5a740f2c 100644
--- a/drivers/firmware/arm_scmi/shmem.c
+++ b/drivers/firmware/arm_scmi/shmem.c
@@ -128,3 +128,8 @@ bool shmem_channel_free(struct scmi_shared_mem __iomem *shmem)
return (ioread32(&shmem->channel_status) &
SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE);
}
+
+bool shmem_channel_intr_enabled(struct scmi_shared_mem __iomem *shmem)
+{
+ return ioread32(&shmem->flags) & SCMI_SHMEM_FLAG_INTR_ENABLED;
+}
diff --git a/drivers/firmware/cirrus/cs_dsp.c b/drivers/firmware/cirrus/cs_dsp.c
index 0d139e4de37c..42c9cd0ebfb7 100644
--- a/drivers/firmware/cirrus/cs_dsp.c
+++ b/drivers/firmware/cirrus/cs_dsp.c
@@ -275,6 +275,12 @@
#define HALO_MPU_VIO_ERR_SRC_MASK 0x00007fff
#define HALO_MPU_VIO_ERR_SRC_SHIFT 0
+/*
+ * Write Sequence
+ */
+#define WSEQ_OP_MAX_WORDS 3
+#define WSEQ_END_OF_SCRIPT 0xFFFFFF
+
struct cs_dsp_ops {
bool (*validate_version)(struct cs_dsp *dsp, unsigned int version);
unsigned int (*parse_sizes)(struct cs_dsp *dsp,
@@ -1107,9 +1113,16 @@ struct cs_dsp_coeff_parsed_coeff {
int len;
};
-static int cs_dsp_coeff_parse_string(int bytes, const u8 **pos, const u8 **str)
+static int cs_dsp_coeff_parse_string(int bytes, const u8 **pos, unsigned int avail,
+ const u8 **str)
{
- int length;
+ int length, total_field_len;
+
+ /* String fields are at least one __le32 */
+ if (sizeof(__le32) > avail) {
+ *pos = NULL;
+ return 0;
+ }
switch (bytes) {
case 1:
@@ -1122,10 +1135,16 @@ static int cs_dsp_coeff_parse_string(int bytes, const u8 **pos, const u8 **str)
return 0;
}
+ total_field_len = ((length + bytes) + 3) & ~0x03;
+ if ((unsigned int)total_field_len > avail) {
+ *pos = NULL;
+ return 0;
+ }
+
if (str)
*str = *pos + bytes;
- *pos += ((length + bytes) + 3) & ~0x03;
+ *pos += total_field_len;
return length;
}
@@ -1150,71 +1169,134 @@ static int cs_dsp_coeff_parse_int(int bytes, const u8 **pos)
return val;
}
-static inline void cs_dsp_coeff_parse_alg(struct cs_dsp *dsp, const u8 **data,
- struct cs_dsp_coeff_parsed_alg *blk)
+static int cs_dsp_coeff_parse_alg(struct cs_dsp *dsp,
+ const struct wmfw_region *region,
+ struct cs_dsp_coeff_parsed_alg *blk)
{
const struct wmfw_adsp_alg_data *raw;
+ unsigned int data_len = le32_to_cpu(region->len);
+ unsigned int pos;
+ const u8 *tmp;
+
+ raw = (const struct wmfw_adsp_alg_data *)region->data;
switch (dsp->fw_ver) {
case 0:
case 1:
- raw = (const struct wmfw_adsp_alg_data *)*data;
- *data = raw->data;
+ if (sizeof(*raw) > data_len)
+ return -EOVERFLOW;
blk->id = le32_to_cpu(raw->id);
blk->name = raw->name;
- blk->name_len = strlen(raw->name);
+ blk->name_len = strnlen(raw->name, ARRAY_SIZE(raw->name));
blk->ncoeff = le32_to_cpu(raw->ncoeff);
+
+ pos = sizeof(*raw);
break;
default:
- blk->id = cs_dsp_coeff_parse_int(sizeof(raw->id), data);
- blk->name_len = cs_dsp_coeff_parse_string(sizeof(u8), data,
+ if (sizeof(raw->id) > data_len)
+ return -EOVERFLOW;
+
+ tmp = region->data;
+ blk->id = cs_dsp_coeff_parse_int(sizeof(raw->id), &tmp);
+ pos = tmp - region->data;
+
+ tmp = &region->data[pos];
+ blk->name_len = cs_dsp_coeff_parse_string(sizeof(u8), &tmp, data_len - pos,
&blk->name);
- cs_dsp_coeff_parse_string(sizeof(u16), data, NULL);
- blk->ncoeff = cs_dsp_coeff_parse_int(sizeof(raw->ncoeff), data);
+ if (!tmp)
+ return -EOVERFLOW;
+
+ pos = tmp - region->data;
+ cs_dsp_coeff_parse_string(sizeof(u16), &tmp, data_len - pos, NULL);
+ if (!tmp)
+ return -EOVERFLOW;
+
+ pos = tmp - region->data;
+ if (sizeof(raw->ncoeff) > (data_len - pos))
+ return -EOVERFLOW;
+
+ blk->ncoeff = cs_dsp_coeff_parse_int(sizeof(raw->ncoeff), &tmp);
+ pos += sizeof(raw->ncoeff);
break;
}
+ if ((int)blk->ncoeff < 0)
+ return -EOVERFLOW;
+
cs_dsp_dbg(dsp, "Algorithm ID: %#x\n", blk->id);
cs_dsp_dbg(dsp, "Algorithm name: %.*s\n", blk->name_len, blk->name);
cs_dsp_dbg(dsp, "# of coefficient descriptors: %#x\n", blk->ncoeff);
+
+ return pos;
}
-static inline void cs_dsp_coeff_parse_coeff(struct cs_dsp *dsp, const u8 **data,
- struct cs_dsp_coeff_parsed_coeff *blk)
+static int cs_dsp_coeff_parse_coeff(struct cs_dsp *dsp,
+ const struct wmfw_region *region,
+ unsigned int pos,
+ struct cs_dsp_coeff_parsed_coeff *blk)
{
const struct wmfw_adsp_coeff_data *raw;
+ unsigned int data_len = le32_to_cpu(region->len);
+ unsigned int blk_len, blk_end_pos;
const u8 *tmp;
- int length;
+
+ raw = (const struct wmfw_adsp_coeff_data *)&region->data[pos];
+ if (sizeof(raw->hdr) > (data_len - pos))
+ return -EOVERFLOW;
+
+ blk_len = le32_to_cpu(raw->hdr.size);
+ if (blk_len > S32_MAX)
+ return -EOVERFLOW;
+
+ if (blk_len > (data_len - pos - sizeof(raw->hdr)))
+ return -EOVERFLOW;
+
+ blk_end_pos = pos + sizeof(raw->hdr) + blk_len;
+
+ blk->offset = le16_to_cpu(raw->hdr.offset);
+ blk->mem_type = le16_to_cpu(raw->hdr.type);
switch (dsp->fw_ver) {
case 0:
case 1:
- raw = (const struct wmfw_adsp_coeff_data *)*data;
- *data = *data + sizeof(raw->hdr) + le32_to_cpu(raw->hdr.size);
+ if (sizeof(*raw) > (data_len - pos))
+ return -EOVERFLOW;
- blk->offset = le16_to_cpu(raw->hdr.offset);
- blk->mem_type = le16_to_cpu(raw->hdr.type);
blk->name = raw->name;
- blk->name_len = strlen(raw->name);
+ blk->name_len = strnlen(raw->name, ARRAY_SIZE(raw->name));
blk->ctl_type = le16_to_cpu(raw->ctl_type);
blk->flags = le16_to_cpu(raw->flags);
blk->len = le32_to_cpu(raw->len);
break;
default:
- tmp = *data;
- blk->offset = cs_dsp_coeff_parse_int(sizeof(raw->hdr.offset), &tmp);
- blk->mem_type = cs_dsp_coeff_parse_int(sizeof(raw->hdr.type), &tmp);
- length = cs_dsp_coeff_parse_int(sizeof(raw->hdr.size), &tmp);
- blk->name_len = cs_dsp_coeff_parse_string(sizeof(u8), &tmp,
+ pos += sizeof(raw->hdr);
+ tmp = &region->data[pos];
+ blk->name_len = cs_dsp_coeff_parse_string(sizeof(u8), &tmp, data_len - pos,
&blk->name);
- cs_dsp_coeff_parse_string(sizeof(u8), &tmp, NULL);
- cs_dsp_coeff_parse_string(sizeof(u16), &tmp, NULL);
+ if (!tmp)
+ return -EOVERFLOW;
+
+ pos = tmp - region->data;
+ cs_dsp_coeff_parse_string(sizeof(u8), &tmp, data_len - pos, NULL);
+ if (!tmp)
+ return -EOVERFLOW;
+
+ pos = tmp - region->data;
+ cs_dsp_coeff_parse_string(sizeof(u16), &tmp, data_len - pos, NULL);
+ if (!tmp)
+ return -EOVERFLOW;
+
+ pos = tmp - region->data;
+ if (sizeof(raw->ctl_type) + sizeof(raw->flags) + sizeof(raw->len) >
+ (data_len - pos))
+ return -EOVERFLOW;
+
blk->ctl_type = cs_dsp_coeff_parse_int(sizeof(raw->ctl_type), &tmp);
+ pos += sizeof(raw->ctl_type);
blk->flags = cs_dsp_coeff_parse_int(sizeof(raw->flags), &tmp);
+ pos += sizeof(raw->flags);
blk->len = cs_dsp_coeff_parse_int(sizeof(raw->len), &tmp);
-
- *data = *data + sizeof(raw->hdr) + length;
break;
}
@@ -1224,6 +1306,8 @@ static inline void cs_dsp_coeff_parse_coeff(struct cs_dsp *dsp, const u8 **data,
cs_dsp_dbg(dsp, "\tCoefficient flags: %#x\n", blk->flags);
cs_dsp_dbg(dsp, "\tALSA control type: %#x\n", blk->ctl_type);
cs_dsp_dbg(dsp, "\tALSA control len: %#x\n", blk->len);
+
+ return blk_end_pos;
}
static int cs_dsp_check_coeff_flags(struct cs_dsp *dsp,
@@ -1247,12 +1331,16 @@ static int cs_dsp_parse_coeff(struct cs_dsp *dsp,
struct cs_dsp_alg_region alg_region = {};
struct cs_dsp_coeff_parsed_alg alg_blk;
struct cs_dsp_coeff_parsed_coeff coeff_blk;
- const u8 *data = region->data;
- int i, ret;
+ int i, pos, ret;
+
+ pos = cs_dsp_coeff_parse_alg(dsp, region, &alg_blk);
+ if (pos < 0)
+ return pos;
- cs_dsp_coeff_parse_alg(dsp, &data, &alg_blk);
for (i = 0; i < alg_blk.ncoeff; i++) {
- cs_dsp_coeff_parse_coeff(dsp, &data, &coeff_blk);
+ pos = cs_dsp_coeff_parse_coeff(dsp, region, pos, &coeff_blk);
+ if (pos < 0)
+ return pos;
switch (coeff_blk.ctl_type) {
case WMFW_CTL_TYPE_BYTES:
@@ -1321,6 +1409,10 @@ static unsigned int cs_dsp_adsp1_parse_sizes(struct cs_dsp *dsp,
const struct wmfw_adsp1_sizes *adsp1_sizes;
adsp1_sizes = (void *)&firmware->data[pos];
+ if (sizeof(*adsp1_sizes) > firmware->size - pos) {
+ cs_dsp_err(dsp, "%s: file truncated\n", file);
+ return 0;
+ }
cs_dsp_dbg(dsp, "%s: %d DM, %d PM, %d ZM\n", file,
le32_to_cpu(adsp1_sizes->dm), le32_to_cpu(adsp1_sizes->pm),
@@ -1337,6 +1429,10 @@ static unsigned int cs_dsp_adsp2_parse_sizes(struct cs_dsp *dsp,
const struct wmfw_adsp2_sizes *adsp2_sizes;
adsp2_sizes = (void *)&firmware->data[pos];
+ if (sizeof(*adsp2_sizes) > firmware->size - pos) {
+ cs_dsp_err(dsp, "%s: file truncated\n", file);
+ return 0;
+ }
cs_dsp_dbg(dsp, "%s: %d XM, %d YM %d PM, %d ZM\n", file,
le32_to_cpu(adsp2_sizes->xm), le32_to_cpu(adsp2_sizes->ym),
@@ -1376,7 +1472,6 @@ static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware,
struct regmap *regmap = dsp->regmap;
unsigned int pos = 0;
const struct wmfw_header *header;
- const struct wmfw_adsp1_sizes *adsp1_sizes;
const struct wmfw_footer *footer;
const struct wmfw_region *region;
const struct cs_dsp_region *mem;
@@ -1392,10 +1487,8 @@ static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware,
ret = -EINVAL;
- pos = sizeof(*header) + sizeof(*adsp1_sizes) + sizeof(*footer);
- if (pos >= firmware->size) {
- cs_dsp_err(dsp, "%s: file too short, %zu bytes\n",
- file, firmware->size);
+ if (sizeof(*header) >= firmware->size) {
+ ret = -EOVERFLOW;
goto out_fw;
}
@@ -1423,22 +1516,36 @@ static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware,
pos = sizeof(*header);
pos = dsp->ops->parse_sizes(dsp, file, pos, firmware);
+ if ((pos == 0) || (sizeof(*footer) > firmware->size - pos)) {
+ ret = -EOVERFLOW;
+ goto out_fw;
+ }
footer = (void *)&firmware->data[pos];
pos += sizeof(*footer);
if (le32_to_cpu(header->len) != pos) {
- cs_dsp_err(dsp, "%s: unexpected header length %d\n",
- file, le32_to_cpu(header->len));
+ ret = -EOVERFLOW;
goto out_fw;
}
cs_dsp_dbg(dsp, "%s: timestamp %llu\n", file,
le64_to_cpu(footer->timestamp));
- while (pos < firmware->size &&
- sizeof(*region) < firmware->size - pos) {
+ while (pos < firmware->size) {
+ /* Is there enough data for a complete block header? */
+ if (sizeof(*region) > firmware->size - pos) {
+ ret = -EOVERFLOW;
+ goto out_fw;
+ }
+
region = (void *)&(firmware->data[pos]);
+
+ if (le32_to_cpu(region->len) > firmware->size - pos - sizeof(*region)) {
+ ret = -EOVERFLOW;
+ goto out_fw;
+ }
+
region_name = "Unknown";
reg = 0;
text = NULL;
@@ -1495,16 +1602,6 @@ static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware,
regions, le32_to_cpu(region->len), offset,
region_name);
- if (le32_to_cpu(region->len) >
- firmware->size - pos - sizeof(*region)) {
- cs_dsp_err(dsp,
- "%s.%d: %s region len %d bytes exceeds file length %zu\n",
- file, regions, region_name,
- le32_to_cpu(region->len), firmware->size);
- ret = -EINVAL;
- goto out_fw;
- }
-
if (text) {
memcpy(text, region->data, le32_to_cpu(region->len));
cs_dsp_info(dsp, "%s: %s\n", file, text);
@@ -1555,6 +1652,9 @@ out_fw:
cs_dsp_buf_free(&buf_list);
kfree(text);
+ if (ret == -EOVERFLOW)
+ cs_dsp_err(dsp, "%s: file content overflows file data\n", file);
+
return ret;
}
@@ -2122,10 +2222,20 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware
pos = le32_to_cpu(hdr->len);
blocks = 0;
- while (pos < firmware->size &&
- sizeof(*blk) < firmware->size - pos) {
+ while (pos < firmware->size) {
+ /* Is there enough data for a complete block header? */
+ if (sizeof(*blk) > firmware->size - pos) {
+ ret = -EOVERFLOW;
+ goto out_fw;
+ }
+
blk = (void *)(&firmware->data[pos]);
+ if (le32_to_cpu(blk->len) > firmware->size - pos - sizeof(*blk)) {
+ ret = -EOVERFLOW;
+ goto out_fw;
+ }
+
type = le16_to_cpu(blk->type);
offset = le16_to_cpu(blk->offset);
version = le32_to_cpu(blk->ver) >> 8;
@@ -2222,17 +2332,6 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware
}
if (reg) {
- if (le32_to_cpu(blk->len) >
- firmware->size - pos - sizeof(*blk)) {
- cs_dsp_err(dsp,
- "%s.%d: %s region len %d bytes exceeds file length %zu\n",
- file, blocks, region_name,
- le32_to_cpu(blk->len),
- firmware->size);
- ret = -EINVAL;
- goto out_fw;
- }
-
buf = cs_dsp_buf_alloc(blk->data,
le32_to_cpu(blk->len),
&buf_list);
@@ -2272,6 +2371,10 @@ out_fw:
regmap_async_complete(regmap);
cs_dsp_buf_free(&buf_list);
kfree(text);
+
+ if (ret == -EOVERFLOW)
+ cs_dsp_err(dsp, "%s: file content overflows file data\n", file);
+
return ret;
}
@@ -3398,6 +3501,278 @@ int cs_dsp_chunk_read(struct cs_dsp_chunk *ch, int nbits)
}
EXPORT_SYMBOL_NS_GPL(cs_dsp_chunk_read, FW_CS_DSP);
+
+struct cs_dsp_wseq_op {
+ struct list_head list;
+ u32 address;
+ u32 data;
+ u16 offset;
+ u8 operation;
+};
+
+static void cs_dsp_wseq_clear(struct cs_dsp *dsp, struct cs_dsp_wseq *wseq)
+{
+ struct cs_dsp_wseq_op *op, *op_tmp;
+
+ list_for_each_entry_safe(op, op_tmp, &wseq->ops, list) {
+ list_del(&op->list);
+ devm_kfree(dsp->dev, op);
+ }
+}
+
+static int cs_dsp_populate_wseq(struct cs_dsp *dsp, struct cs_dsp_wseq *wseq)
+{
+ struct cs_dsp_wseq_op *op = NULL;
+ struct cs_dsp_chunk chunk;
+ u8 *words;
+ int ret;
+
+ if (!wseq->ctl) {
+ cs_dsp_err(dsp, "No control for write sequence\n");
+ return -EINVAL;
+ }
+
+ words = kzalloc(wseq->ctl->len, GFP_KERNEL);
+ if (!words)
+ return -ENOMEM;
+
+ ret = cs_dsp_coeff_read_ctrl(wseq->ctl, 0, words, wseq->ctl->len);
+ if (ret) {
+ cs_dsp_err(dsp, "Failed to read %s: %d\n", wseq->ctl->subname, ret);
+ goto err_free;
+ }
+
+ INIT_LIST_HEAD(&wseq->ops);
+
+ chunk = cs_dsp_chunk(words, wseq->ctl->len);
+
+ while (!cs_dsp_chunk_end(&chunk)) {
+ op = devm_kzalloc(dsp->dev, sizeof(*op), GFP_KERNEL);
+ if (!op) {
+ ret = -ENOMEM;
+ goto err_free;
+ }
+
+ op->offset = cs_dsp_chunk_bytes(&chunk);
+ op->operation = cs_dsp_chunk_read(&chunk, 8);
+
+ switch (op->operation) {
+ case CS_DSP_WSEQ_END:
+ op->data = WSEQ_END_OF_SCRIPT;
+ break;
+ case CS_DSP_WSEQ_UNLOCK:
+ op->data = cs_dsp_chunk_read(&chunk, 16);
+ break;
+ case CS_DSP_WSEQ_ADDR8:
+ op->address = cs_dsp_chunk_read(&chunk, 8);
+ op->data = cs_dsp_chunk_read(&chunk, 32);
+ break;
+ case CS_DSP_WSEQ_H16:
+ case CS_DSP_WSEQ_L16:
+ op->address = cs_dsp_chunk_read(&chunk, 24);
+ op->data = cs_dsp_chunk_read(&chunk, 16);
+ break;
+ case CS_DSP_WSEQ_FULL:
+ op->address = cs_dsp_chunk_read(&chunk, 32);
+ op->data = cs_dsp_chunk_read(&chunk, 32);
+ break;
+ default:
+ ret = -EINVAL;
+ cs_dsp_err(dsp, "Unsupported op: %X\n", op->operation);
+ devm_kfree(dsp->dev, op);
+ goto err_free;
+ }
+
+ list_add_tail(&op->list, &wseq->ops);
+
+ if (op->operation == CS_DSP_WSEQ_END)
+ break;
+ }
+
+ if (op && op->operation != CS_DSP_WSEQ_END) {
+ cs_dsp_err(dsp, "%s missing end terminator\n", wseq->ctl->subname);
+ ret = -ENOENT;
+ }
+
+err_free:
+ kfree(words);
+
+ return ret;
+}
+
+/**
+ * cs_dsp_wseq_init() - Initialize write sequences contained within the loaded DSP firmware
+ * @dsp: Pointer to DSP structure
+ * @wseqs: List of write sequences to initialize
+ * @num_wseqs: Number of write sequences to initialize
+ *
+ * Return: Zero for success, a negative number on error.
+ */
+int cs_dsp_wseq_init(struct cs_dsp *dsp, struct cs_dsp_wseq *wseqs, unsigned int num_wseqs)
+{
+ int i, ret;
+
+ lockdep_assert_held(&dsp->pwr_lock);
+
+ for (i = 0; i < num_wseqs; i++) {
+ ret = cs_dsp_populate_wseq(dsp, &wseqs[i]);
+ if (ret) {
+ cs_dsp_wseq_clear(dsp, &wseqs[i]);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_wseq_init, FW_CS_DSP);
+
+static struct cs_dsp_wseq_op *cs_dsp_wseq_find_op(u32 addr, u8 op_code,
+ struct list_head *wseq_ops)
+{
+ struct cs_dsp_wseq_op *op;
+
+ list_for_each_entry(op, wseq_ops, list) {
+ if (op->operation == op_code && op->address == addr)
+ return op;
+ }
+
+ return NULL;
+}
+
+/**
+ * cs_dsp_wseq_write() - Add or update an entry in a write sequence
+ * @dsp: Pointer to a DSP structure
+ * @wseq: Write sequence to write to
+ * @addr: Address of the register to be written to
+ * @data: Data to be written
+ * @op_code: The type of operation of the new entry
+ * @update: If true, searches for the first entry in the write sequence with
+ * the same address and op_code, and replaces it. If false, creates a new entry
+ * at the tail
+ *
+ * This function formats register address and value pairs into the format
+ * required for write sequence entries, and either updates or adds the
+ * new entry into the write sequence.
+ *
+ * If update is set to true and no matching entry is found, it will add a new entry.
+ *
+ * Return: Zero for success, a negative number on error.
+ */
+int cs_dsp_wseq_write(struct cs_dsp *dsp, struct cs_dsp_wseq *wseq,
+ u32 addr, u32 data, u8 op_code, bool update)
+{
+ struct cs_dsp_wseq_op *op_end, *op_new = NULL;
+ u32 words[WSEQ_OP_MAX_WORDS];
+ struct cs_dsp_chunk chunk;
+ int new_op_size, ret;
+
+ if (update)
+ op_new = cs_dsp_wseq_find_op(addr, op_code, &wseq->ops);
+
+ /* If entry to update is not found, treat it as a new operation */
+ if (!op_new) {
+ op_end = cs_dsp_wseq_find_op(0, CS_DSP_WSEQ_END, &wseq->ops);
+ if (!op_end) {
+ cs_dsp_err(dsp, "Missing terminator for %s\n", wseq->ctl->subname);
+ return -EINVAL;
+ }
+
+ op_new = devm_kzalloc(dsp->dev, sizeof(*op_new), GFP_KERNEL);
+ if (!op_new)
+ return -ENOMEM;
+
+ op_new->operation = op_code;
+ op_new->address = addr;
+ op_new->offset = op_end->offset;
+ update = false;
+ }
+
+ op_new->data = data;
+
+ chunk = cs_dsp_chunk(words, sizeof(words));
+ cs_dsp_chunk_write(&chunk, 8, op_new->operation);
+
+ switch (op_code) {
+ case CS_DSP_WSEQ_FULL:
+ cs_dsp_chunk_write(&chunk, 32, op_new->address);
+ cs_dsp_chunk_write(&chunk, 32, op_new->data);
+ break;
+ case CS_DSP_WSEQ_L16:
+ case CS_DSP_WSEQ_H16:
+ cs_dsp_chunk_write(&chunk, 24, op_new->address);
+ cs_dsp_chunk_write(&chunk, 16, op_new->data);
+ break;
+ default:
+ ret = -EINVAL;
+ cs_dsp_err(dsp, "Operation %X not supported\n", op_code);
+ goto op_new_free;
+ }
+
+ new_op_size = cs_dsp_chunk_bytes(&chunk);
+
+ if (!update) {
+ if (wseq->ctl->len - op_end->offset < new_op_size) {
+ cs_dsp_err(dsp, "Not enough memory in %s for entry\n", wseq->ctl->subname);
+ ret = -E2BIG;
+ goto op_new_free;
+ }
+
+ op_end->offset += new_op_size;
+
+ ret = cs_dsp_coeff_write_ctrl(wseq->ctl, op_end->offset / sizeof(u32),
+ &op_end->data, sizeof(u32));
+ if (ret)
+ goto op_new_free;
+
+ list_add_tail(&op_new->list, &op_end->list);
+ }
+
+ ret = cs_dsp_coeff_write_ctrl(wseq->ctl, op_new->offset / sizeof(u32),
+ words, new_op_size);
+ if (ret)
+ goto op_new_free;
+
+ return 0;
+
+op_new_free:
+ devm_kfree(dsp->dev, op_new);
+
+ return ret;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_wseq_write, FW_CS_DSP);
+
+/**
+ * cs_dsp_wseq_multi_write() - Add or update multiple entries in a write sequence
+ * @dsp: Pointer to a DSP structure
+ * @wseq: Write sequence to write to
+ * @reg_seq: List of address-data pairs
+ * @num_regs: Number of address-data pairs
+ * @op_code: The types of operations of the new entries
+ * @update: If true, searches for the first entry in the write sequence with
+ * the same address and op_code, and replaces it. If false, creates a new entry
+ * at the tail
+ *
+ * This function calls cs_dsp_wseq_write() for multiple address-data pairs.
+ *
+ * Return: Zero for success, a negative number on error.
+ */
+int cs_dsp_wseq_multi_write(struct cs_dsp *dsp, struct cs_dsp_wseq *wseq,
+ const struct reg_sequence *reg_seq, int num_regs,
+ u8 op_code, bool update)
+{
+ int i, ret;
+
+ for (i = 0; i < num_regs; i++) {
+ ret = cs_dsp_wseq_write(dsp, wseq, reg_seq[i].reg,
+ reg_seq[i].def, op_code, update);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_wseq_multi_write, FW_CS_DSP);
+
MODULE_DESCRIPTION("Cirrus Logic DSP Support");
MODULE_AUTHOR("Simon Trimmer <simont@opensource.cirrus.com>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/firmware/efi/efi-pstore.c b/drivers/firmware/efi/efi-pstore.c
index 5b9dc26e6bcb..552c78f5f059 100644
--- a/drivers/firmware/efi/efi-pstore.c
+++ b/drivers/firmware/efi/efi-pstore.c
@@ -136,7 +136,7 @@ static int efi_pstore_read_func(struct pstore_record *record,
&size, record->buf);
if (status != EFI_SUCCESS) {
kfree(record->buf);
- return -EIO;
+ return efi_status_to_err(status);
}
/*
@@ -189,7 +189,7 @@ static ssize_t efi_pstore_read(struct pstore_record *record)
return 0;
if (status != EFI_SUCCESS)
- return -EIO;
+ return efi_status_to_err(status);
/* skip variables that don't concern us */
if (efi_guidcmp(guid, LINUX_EFI_CRASH_GUID))
@@ -227,7 +227,7 @@ static int efi_pstore_write(struct pstore_record *record)
record->size, record->psi->buf,
true);
efivar_unlock();
- return status == EFI_SUCCESS ? 0 : -EIO;
+ return efi_status_to_err(status);
};
static int efi_pstore_erase(struct pstore_record *record)
@@ -238,7 +238,7 @@ static int efi_pstore_erase(struct pstore_record *record)
PSTORE_EFI_ATTRIBUTES, 0, NULL);
if (status != EFI_SUCCESS && status != EFI_NOT_FOUND)
- return -EIO;
+ return efi_status_to_err(status);
return 0;
}
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index 06f0428a723c..1f32d6cf98d6 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -76,7 +76,7 @@ lib-$(CONFIG_EFI_GENERIC_STUB) += efi-stub.o string.o intrinsics.o systable.o \
lib-$(CONFIG_ARM) += arm32-stub.o
lib-$(CONFIG_ARM64) += kaslr.o arm64.o arm64-stub.o smbios.o
-lib-$(CONFIG_X86) += x86-stub.o
+lib-$(CONFIG_X86) += x86-stub.o smbios.o
lib-$(CONFIG_X86_64) += x86-5lvl.o
lib-$(CONFIG_RISCV) += kaslr.o riscv.o riscv-stub.o
lib-$(CONFIG_LOONGARCH) += loongarch.o loongarch-stub.o
diff --git a/drivers/firmware/efi/libstub/arm64-stub.c b/drivers/firmware/efi/libstub/arm64-stub.c
index 452b7ccd330e..2c3869356147 100644
--- a/drivers/firmware/efi/libstub/arm64-stub.c
+++ b/drivers/firmware/efi/libstub/arm64-stub.c
@@ -21,7 +21,6 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
efi_loaded_image_t *image,
efi_handle_t image_handle)
{
- efi_status_t status;
unsigned long kernel_size, kernel_codesize, kernel_memsize;
if (image->image_base != _text) {
@@ -39,15 +38,9 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
*reserve_size = kernel_memsize;
*image_addr = (unsigned long)_text;
- status = efi_kaslr_relocate_kernel(image_addr,
- reserve_addr, reserve_size,
- kernel_size, kernel_codesize,
- kernel_memsize,
- efi_kaslr_get_phys_seed(image_handle));
- if (status != EFI_SUCCESS)
- return status;
-
- return EFI_SUCCESS;
+ return efi_kaslr_relocate_kernel(image_addr, reserve_addr, reserve_size,
+ kernel_size, kernel_codesize, kernel_memsize,
+ efi_kaslr_get_phys_seed(image_handle));
}
asmlinkage void primary_entry(void);
diff --git a/drivers/firmware/efi/libstub/arm64.c b/drivers/firmware/efi/libstub/arm64.c
index 446e35eaf3d9..e57cd3de0a00 100644
--- a/drivers/firmware/efi/libstub/arm64.c
+++ b/drivers/firmware/efi/libstub/arm64.c
@@ -39,8 +39,7 @@ static bool system_needs_vamap(void)
static char const emag[] = "eMAG";
default:
- version = efi_get_smbios_string(&record->header, 4,
- processor_version);
+ version = efi_get_smbios_string(record, processor_version);
if (!version || (strncmp(version, altra, sizeof(altra) - 1) &&
strncmp(version, emag, sizeof(emag) - 1)))
break;
diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h
index 27abb4ce0291..d33ccbc4a2c6 100644
--- a/drivers/firmware/efi/libstub/efistub.h
+++ b/drivers/firmware/efi/libstub/efistub.h
@@ -1204,14 +1204,13 @@ struct efi_smbios_type4_record {
u16 thread_enabled;
};
-#define efi_get_smbios_string(__record, __type, __name) ({ \
- int off = offsetof(struct efi_smbios_type ## __type ## _record, \
- __name); \
- __efi_get_smbios_string((__record), __type, off); \
+#define efi_get_smbios_string(__record, __field) ({ \
+ __typeof__(__record) __rec = __record; \
+ __efi_get_smbios_string(&__rec->header, &__rec->__field); \
})
const u8 *__efi_get_smbios_string(const struct efi_smbios_record *record,
- u8 type, int offset);
+ const u8 *offset);
void efi_remap_image(unsigned long image_base, unsigned alloc_size,
unsigned long code_size);
diff --git a/drivers/firmware/efi/libstub/kaslr.c b/drivers/firmware/efi/libstub/kaslr.c
index 1a9808012abd..6318c40bda38 100644
--- a/drivers/firmware/efi/libstub/kaslr.c
+++ b/drivers/firmware/efi/libstub/kaslr.c
@@ -18,8 +18,6 @@
*/
u32 efi_kaslr_get_phys_seed(efi_handle_t image_handle)
{
- efi_status_t status;
- u32 phys_seed;
efi_guid_t li_fixed_proto = LINUX_EFI_LOADED_IMAGE_FIXED_GUID;
void *p;
@@ -32,18 +30,20 @@ u32 efi_kaslr_get_phys_seed(efi_handle_t image_handle)
&li_fixed_proto, &p) == EFI_SUCCESS) {
efi_info("Image placement fixed by loader\n");
} else {
+ efi_status_t status;
+ u32 phys_seed;
+
status = efi_get_random_bytes(sizeof(phys_seed),
(u8 *)&phys_seed);
- if (status == EFI_SUCCESS) {
+ if (status == EFI_SUCCESS)
return phys_seed;
- } else if (status == EFI_NOT_FOUND) {
+
+ if (status == EFI_NOT_FOUND)
efi_info("EFI_RNG_PROTOCOL unavailable\n");
- efi_nokaslr = true;
- } else if (status != EFI_SUCCESS) {
- efi_err("efi_get_random_bytes() failed (0x%lx)\n",
- status);
- efi_nokaslr = true;
- }
+ else
+ efi_err("efi_get_random_bytes() failed (0x%lx)\n", status);
+
+ efi_nokaslr = true;
}
return 0;
diff --git a/drivers/firmware/efi/libstub/loongarch.c b/drivers/firmware/efi/libstub/loongarch.c
index 684c9354637c..d0ef93551c44 100644
--- a/drivers/firmware/efi/libstub/loongarch.c
+++ b/drivers/firmware/efi/libstub/loongarch.c
@@ -41,7 +41,7 @@ static efi_status_t exit_boot_func(struct efi_boot_memmap *map, void *priv)
unsigned long __weak kernel_entry_address(unsigned long kernel_addr,
efi_loaded_image_t *image)
{
- return *(unsigned long *)(kernel_addr + 8) - VMLINUX_LOAD_ADDRESS + kernel_addr;
+ return *(unsigned long *)(kernel_addr + 8) - PHYSADDR(VMLINUX_LOAD_ADDRESS) + kernel_addr;
}
efi_status_t efi_boot_kernel(void *handle, efi_loaded_image_t *image,
diff --git a/drivers/firmware/efi/libstub/relocate.c b/drivers/firmware/efi/libstub/relocate.c
index bf6fbd5d22a1..d694bcfa1074 100644
--- a/drivers/firmware/efi/libstub/relocate.c
+++ b/drivers/firmware/efi/libstub/relocate.c
@@ -48,7 +48,7 @@ efi_status_t efi_low_alloc_above(unsigned long size, unsigned long align,
unsigned long m = (unsigned long)map->map;
u64 start, end;
- desc = efi_early_memdesc_ptr(m, map->desc_size, i);
+ desc = efi_memdesc_ptr(m, map->desc_size, i);
if (desc->type != EFI_CONVENTIONAL_MEMORY)
continue;
diff --git a/drivers/firmware/efi/libstub/smbios.c b/drivers/firmware/efi/libstub/smbios.c
index c217de2cc8d5..f31410d7e7e1 100644
--- a/drivers/firmware/efi/libstub/smbios.c
+++ b/drivers/firmware/efi/libstub/smbios.c
@@ -6,20 +6,31 @@
#include "efistub.h"
-typedef struct efi_smbios_protocol efi_smbios_protocol_t;
-
-struct efi_smbios_protocol {
- efi_status_t (__efiapi *add)(efi_smbios_protocol_t *, efi_handle_t,
- u16 *, struct efi_smbios_record *);
- efi_status_t (__efiapi *update_string)(efi_smbios_protocol_t *, u16 *,
- unsigned long *, u8 *);
- efi_status_t (__efiapi *remove)(efi_smbios_protocol_t *, u16);
- efi_status_t (__efiapi *get_next)(efi_smbios_protocol_t *, u16 *, u8 *,
- struct efi_smbios_record **,
- efi_handle_t *);
-
- u8 major_version;
- u8 minor_version;
+typedef union efi_smbios_protocol efi_smbios_protocol_t;
+
+union efi_smbios_protocol {
+ struct {
+ efi_status_t (__efiapi *add)(efi_smbios_protocol_t *, efi_handle_t,
+ u16 *, struct efi_smbios_record *);
+ efi_status_t (__efiapi *update_string)(efi_smbios_protocol_t *, u16 *,
+ unsigned long *, u8 *);
+ efi_status_t (__efiapi *remove)(efi_smbios_protocol_t *, u16);
+ efi_status_t (__efiapi *get_next)(efi_smbios_protocol_t *, u16 *, u8 *,
+ struct efi_smbios_record **,
+ efi_handle_t *);
+
+ u8 major_version;
+ u8 minor_version;
+ };
+ struct {
+ u32 add;
+ u32 update_string;
+ u32 remove;
+ u32 get_next;
+
+ u8 major_version;
+ u8 minor_version;
+ } mixed_mode;
};
const struct efi_smbios_record *efi_get_smbios_record(u8 type)
@@ -38,7 +49,7 @@ const struct efi_smbios_record *efi_get_smbios_record(u8 type)
}
const u8 *__efi_get_smbios_string(const struct efi_smbios_record *record,
- u8 type, int offset)
+ const u8 *offset)
{
const u8 *strtable;
@@ -46,7 +57,7 @@ const u8 *__efi_get_smbios_string(const struct efi_smbios_record *record,
return NULL;
strtable = (u8 *)record + record->length;
- for (int i = 1; i < ((u8 *)record)[offset]; i++) {
+ for (int i = 1; i < *offset; i++) {
int len = strlen(strtable);
if (!len)
diff --git a/drivers/firmware/efi/libstub/unaccepted_memory.c b/drivers/firmware/efi/libstub/unaccepted_memory.c
index 9a655f30ba47..c295ea3a6efc 100644
--- a/drivers/firmware/efi/libstub/unaccepted_memory.c
+++ b/drivers/firmware/efi/libstub/unaccepted_memory.c
@@ -29,7 +29,7 @@ efi_status_t allocate_unaccepted_bitmap(__u32 nr_desc,
efi_memory_desc_t *d;
unsigned long m = (unsigned long)map->map;
- d = efi_early_memdesc_ptr(m, map->desc_size, i);
+ d = efi_memdesc_ptr(m, map->desc_size, i);
if (d->type != EFI_UNACCEPTED_MEMORY)
continue;
diff --git a/drivers/firmware/efi/libstub/x86-stub.c b/drivers/firmware/efi/libstub/x86-stub.c
index 1983fd3bf392..078055b054e3 100644
--- a/drivers/firmware/efi/libstub/x86-stub.c
+++ b/drivers/firmware/efi/libstub/x86-stub.c
@@ -225,6 +225,68 @@ static void retrieve_apple_device_properties(struct boot_params *boot_params)
}
}
+static bool apple_match_product_name(void)
+{
+ static const char type1_product_matches[][15] = {
+ "MacBookPro11,3",
+ "MacBookPro11,5",
+ "MacBookPro13,3",
+ "MacBookPro14,3",
+ "MacBookPro15,1",
+ "MacBookPro15,3",
+ "MacBookPro16,1",
+ "MacBookPro16,4",
+ };
+ const struct efi_smbios_type1_record *record;
+ const u8 *product;
+
+ record = (struct efi_smbios_type1_record *)efi_get_smbios_record(1);
+ if (!record)
+ return false;
+
+ product = efi_get_smbios_string(record, product_name);
+ if (!product)
+ return false;
+
+ for (int i = 0; i < ARRAY_SIZE(type1_product_matches); i++) {
+ if (!strcmp(product, type1_product_matches[i]))
+ return true;
+ }
+
+ return false;
+}
+
+static void apple_set_os(void)
+{
+ struct {
+ unsigned long version;
+ efi_status_t (__efiapi *set_os_version)(const char *);
+ efi_status_t (__efiapi *set_os_vendor)(const char *);
+ } *set_os;
+ efi_status_t status;
+
+ if (!efi_is_64bit() || !apple_match_product_name())
+ return;
+
+ status = efi_bs_call(locate_protocol, &APPLE_SET_OS_PROTOCOL_GUID, NULL,
+ (void **)&set_os);
+ if (status != EFI_SUCCESS)
+ return;
+
+ if (set_os->version >= 2) {
+ status = set_os->set_os_vendor("Apple Inc.");
+ if (status != EFI_SUCCESS)
+ efi_err("Failed to set OS vendor via apple_set_os\n");
+ }
+
+ if (set_os->version > 0) {
+ /* The version being set doesn't seem to matter */
+ status = set_os->set_os_version("Mac OS X 10.9");
+ if (status != EFI_SUCCESS)
+ efi_err("Failed to set OS version via apple_set_os\n");
+ }
+}
+
efi_status_t efi_adjust_memory_range_protection(unsigned long start,
unsigned long size)
{
@@ -335,9 +397,12 @@ static const efi_char16_t apple[] = L"Apple";
static void setup_quirks(struct boot_params *boot_params)
{
- if (IS_ENABLED(CONFIG_APPLE_PROPERTIES) &&
- !memcmp(efistub_fw_vendor(), apple, sizeof(apple)))
- retrieve_apple_device_properties(boot_params);
+ if (!memcmp(efistub_fw_vendor(), apple, sizeof(apple))) {
+ if (IS_ENABLED(CONFIG_APPLE_PROPERTIES))
+ retrieve_apple_device_properties(boot_params);
+
+ apple_set_os();
+ }
}
/*
@@ -476,9 +541,6 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
efi_status_t status;
char *cmdline_ptr;
- if (efi_is_native())
- memset(_bss, 0, _ebss - _bss);
-
efi_system_table = sys_table_arg;
/* Check if we were booted by the EFI firmware */
@@ -501,16 +563,13 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
/* Convert unicode cmdline to ascii */
cmdline_ptr = efi_convert_cmdline(image, &options_size);
if (!cmdline_ptr)
- goto fail;
+ efi_exit(handle, EFI_OUT_OF_RESOURCES);
efi_set_u64_split((unsigned long)cmdline_ptr, &hdr->cmd_line_ptr,
&boot_params.ext_cmd_line_ptr);
efi_stub_entry(handle, sys_table_arg, &boot_params);
/* not reached */
-
-fail:
- efi_exit(handle, status);
}
static void add_e820ext(struct boot_params *params,
@@ -555,7 +614,7 @@ setup_e820(struct boot_params *params, struct setup_data *e820ext, u32 e820ext_s
m |= (u64)efi->efi_memmap_hi << 32;
#endif
- d = efi_early_memdesc_ptr(m, efi->efi_memdesc_size, i);
+ d = efi_memdesc_ptr(m, efi->efi_memdesc_size, i);
switch (d->type) {
case EFI_RESERVED_TYPE:
case EFI_RUNTIME_SERVICES_CODE:
@@ -781,7 +840,7 @@ static const char *cmdline_memmap_override;
static efi_status_t parse_options(const char *cmdline)
{
static const char opts[][14] = {
- "mem=", "memmap=", "efi_fake_mem=", "hugepages="
+ "mem=", "memmap=", "hugepages="
};
for (int i = 0; i < ARRAY_SIZE(opts); i++) {
diff --git a/drivers/firmware/efi/libstub/zboot.lds b/drivers/firmware/efi/libstub/zboot.lds
index ac8c0ef85158..af2c82f7bd90 100644
--- a/drivers/firmware/efi/libstub/zboot.lds
+++ b/drivers/firmware/efi/libstub/zboot.lds
@@ -41,6 +41,7 @@ SECTIONS
}
/DISCARD/ : {
+ *(.discard .discard.*)
*(.modinfo .init.modinfo)
}
}
diff --git a/drivers/firmware/efi/memattr.c b/drivers/firmware/efi/memattr.c
index ab85bf8e165a..164203429fa7 100644
--- a/drivers/firmware/efi/memattr.c
+++ b/drivers/firmware/efi/memattr.c
@@ -164,7 +164,7 @@ int __init efi_memattr_apply_permissions(struct mm_struct *mm,
bool valid;
char buf[64];
- valid = entry_is_valid((void *)tbl->entry + i * tbl->desc_size,
+ valid = entry_is_valid(efi_memdesc_ptr(tbl->entry, tbl->desc_size, i),
&md);
size = md.num_pages << EFI_PAGE_SHIFT;
if (efi_enabled(EFI_DBG) || !valid)
diff --git a/drivers/firmware/efi/memmap.c b/drivers/firmware/efi/memmap.c
index 3365944f7965..34109fd86c55 100644
--- a/drivers/firmware/efi/memmap.c
+++ b/drivers/firmware/efi/memmap.c
@@ -15,10 +15,6 @@
#include <asm/early_ioremap.h>
#include <asm/efi.h>
-#ifndef __efi_memmap_free
-#define __efi_memmap_free(phys, size, flags) do { } while (0)
-#endif
-
/**
* __efi_memmap_init - Common code for mapping the EFI memory map
* @data: EFI memory map data
@@ -51,11 +47,6 @@ int __init __efi_memmap_init(struct efi_memory_map_data *data)
return -ENOMEM;
}
- if (efi.memmap.flags & (EFI_MEMMAP_MEMBLOCK | EFI_MEMMAP_SLAB))
- __efi_memmap_free(efi.memmap.phys_map,
- efi.memmap.desc_size * efi.memmap.nr_map,
- efi.memmap.flags);
-
map.phys_map = data->phys_map;
map.nr_map = data->size / data->desc_size;
map.map_end = map.map + data->size;
diff --git a/drivers/firmware/efi/runtime-wrappers.c b/drivers/firmware/efi/runtime-wrappers.c
index 5d56bc40a79d..708b777857d3 100644
--- a/drivers/firmware/efi/runtime-wrappers.c
+++ b/drivers/firmware/efi/runtime-wrappers.c
@@ -213,7 +213,7 @@ extern struct semaphore __efi_uv_runtime_lock __alias(efi_runtime_lock);
* Calls the appropriate efi_runtime_service() with the appropriate
* arguments.
*/
-static void efi_call_rts(struct work_struct *work)
+static void __nocfi efi_call_rts(struct work_struct *work)
{
const union efi_rts_args *args = efi_rts_work.args;
efi_status_t status = EFI_NOT_FOUND;
@@ -435,7 +435,7 @@ static efi_status_t virt_efi_set_variable(efi_char16_t *name,
return status;
}
-static efi_status_t
+static efi_status_t __nocfi
virt_efi_set_variable_nb(efi_char16_t *name, efi_guid_t *vendor, u32 attr,
unsigned long data_size, void *data)
{
@@ -469,7 +469,7 @@ static efi_status_t virt_efi_query_variable_info(u32 attr,
return status;
}
-static efi_status_t
+static efi_status_t __nocfi
virt_efi_query_variable_info_nb(u32 attr, u64 *storage_space,
u64 *remaining_space, u64 *max_variable_size)
{
@@ -499,10 +499,9 @@ static efi_status_t virt_efi_get_next_high_mono_count(u32 *count)
return status;
}
-static void virt_efi_reset_system(int reset_type,
- efi_status_t status,
- unsigned long data_size,
- efi_char16_t *data)
+static void __nocfi
+virt_efi_reset_system(int reset_type, efi_status_t status,
+ unsigned long data_size, efi_char16_t *data)
{
if (down_trylock(&efi_runtime_lock)) {
pr_warn("failed to invoke the reset_system() runtime service:\n"
diff --git a/drivers/firmware/google/cbmem.c b/drivers/firmware/google/cbmem.c
index 6f810d720f4d..66042160b361 100644
--- a/drivers/firmware/google/cbmem.c
+++ b/drivers/firmware/google/cbmem.c
@@ -131,4 +131,5 @@ static struct coreboot_driver cbmem_entry_driver = {
module_coreboot_driver(cbmem_entry_driver);
MODULE_AUTHOR("Jack Rosenthal <jrosenth@chromium.org>");
+MODULE_DESCRIPTION("Driver for exporting CBMEM entries in sysfs");
MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/google/coreboot_table.c b/drivers/firmware/google/coreboot_table.c
index fa7752f6e89b..a4e3bbd556a3 100644
--- a/drivers/firmware/google/coreboot_table.c
+++ b/drivers/firmware/google/coreboot_table.c
@@ -255,4 +255,5 @@ module_init(coreboot_table_driver_init);
module_exit(coreboot_table_driver_exit);
MODULE_AUTHOR("Google, Inc.");
+MODULE_DESCRIPTION("Module providing coreboot table access");
MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/google/framebuffer-coreboot.c b/drivers/firmware/google/framebuffer-coreboot.c
index 07c458bf64ec..daadd71d8ddd 100644
--- a/drivers/firmware/google/framebuffer-coreboot.c
+++ b/drivers/firmware/google/framebuffer-coreboot.c
@@ -97,4 +97,5 @@ static struct coreboot_driver framebuffer_driver = {
module_coreboot_driver(framebuffer_driver);
MODULE_AUTHOR("Samuel Holland <samuel@sholland.org>");
+MODULE_DESCRIPTION("Memory based framebuffer accessed through coreboot table");
MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/google/gsmi.c b/drivers/firmware/google/gsmi.c
index 96ea1fa76d35..d304913314e4 100644
--- a/drivers/firmware/google/gsmi.c
+++ b/drivers/firmware/google/gsmi.c
@@ -1090,4 +1090,5 @@ module_init(gsmi_init);
module_exit(gsmi_exit);
MODULE_AUTHOR("Google, Inc.");
+MODULE_DESCRIPTION("EFI SMI interface for Google platforms");
MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/google/memconsole-coreboot.c b/drivers/firmware/google/memconsole-coreboot.c
index 24c97a70aa80..c5f08617aa8d 100644
--- a/drivers/firmware/google/memconsole-coreboot.c
+++ b/drivers/firmware/google/memconsole-coreboot.c
@@ -113,4 +113,5 @@ static struct coreboot_driver memconsole_driver = {
module_coreboot_driver(memconsole_driver);
MODULE_AUTHOR("Google, Inc.");
+MODULE_DESCRIPTION("Memory based BIOS console accessed through coreboot table");
MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/google/memconsole-x86-legacy.c b/drivers/firmware/google/memconsole-x86-legacy.c
index 3d3c4f6b8194..a0974c376985 100644
--- a/drivers/firmware/google/memconsole-x86-legacy.c
+++ b/drivers/firmware/google/memconsole-x86-legacy.c
@@ -154,4 +154,5 @@ module_init(memconsole_x86_init);
module_exit(memconsole_x86_exit);
MODULE_AUTHOR("Google, Inc.");
+MODULE_DESCRIPTION("EBDA specific parts of the memory based BIOS console.");
MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/google/memconsole.c b/drivers/firmware/google/memconsole.c
index 44d314ad69e4..b9d99fe1ff0f 100644
--- a/drivers/firmware/google/memconsole.c
+++ b/drivers/firmware/google/memconsole.c
@@ -50,4 +50,5 @@ void memconsole_exit(void)
EXPORT_SYMBOL(memconsole_exit);
MODULE_AUTHOR("Google, Inc.");
+MODULE_DESCRIPTION("Architecture-independent parts of the memory based BIOS console");
MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/google/vpd.c b/drivers/firmware/google/vpd.c
index 8e4216714b29..1749529f63d4 100644
--- a/drivers/firmware/google/vpd.c
+++ b/drivers/firmware/google/vpd.c
@@ -323,4 +323,5 @@ static struct coreboot_driver vpd_driver = {
module_coreboot_driver(vpd_driver);
MODULE_AUTHOR("Google, Inc.");
+MODULE_DESCRIPTION("Driver for exporting Vital Product Data content to sysfs");
MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/meson/meson_sm.c b/drivers/firmware/meson/meson_sm.c
index 5d7f62fe1d5f..f25a9746249b 100644
--- a/drivers/firmware/meson/meson_sm.c
+++ b/drivers/firmware/meson/meson_sm.c
@@ -340,4 +340,5 @@ static struct platform_driver meson_sm_driver = {
},
};
module_platform_driver_probe(meson_sm_driver, meson_sm_probe);
+MODULE_DESCRIPTION("Amlogic Secure Monitor driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/firmware/microchip/mpfs-auto-update.c b/drivers/firmware/microchip/mpfs-auto-update.c
index 835a19a7a3a0..30de47895b1c 100644
--- a/drivers/firmware/microchip/mpfs-auto-update.c
+++ b/drivers/firmware/microchip/mpfs-auto-update.c
@@ -9,6 +9,7 @@
*
* Author: Conor Dooley <conor.dooley@microchip.com>
*/
+#include <linux/cleanup.h>
#include <linux/debugfs.h>
#include <linux/firmware.h>
#include <linux/math.h>
@@ -71,8 +72,9 @@
#define AUTO_UPDATE_UPGRADE_DIRECTORY (AUTO_UPDATE_DIRECTORY_WIDTH * AUTO_UPDATE_UPGRADE_INDEX)
#define AUTO_UPDATE_BLANK_DIRECTORY (AUTO_UPDATE_DIRECTORY_WIDTH * AUTO_UPDATE_BLANK_INDEX)
#define AUTO_UPDATE_DIRECTORY_SIZE SZ_1K
-#define AUTO_UPDATE_RESERVED_SIZE SZ_1M
-#define AUTO_UPDATE_BITSTREAM_BASE (AUTO_UPDATE_DIRECTORY_SIZE + AUTO_UPDATE_RESERVED_SIZE)
+#define AUTO_UPDATE_INFO_BASE AUTO_UPDATE_DIRECTORY_SIZE
+#define AUTO_UPDATE_INFO_SIZE SZ_1M
+#define AUTO_UPDATE_BITSTREAM_BASE (AUTO_UPDATE_DIRECTORY_SIZE + AUTO_UPDATE_INFO_SIZE)
#define AUTO_UPDATE_TIMEOUT_MS 60000
@@ -86,6 +88,17 @@ struct mpfs_auto_update_priv {
bool cancel_request;
};
+static bool mpfs_auto_update_is_bitstream_info(const u8 *data, u32 size)
+{
+ if (size < 4)
+ return false;
+
+ if (data[0] == 0x4d && data[1] == 0x43 && data[2] == 0x48 && data[3] == 0x50)
+ return true;
+
+ return false;
+}
+
static enum fw_upload_err mpfs_auto_update_prepare(struct fw_upload *fw_uploader, const u8 *data,
u32 size)
{
@@ -162,28 +175,17 @@ static enum fw_upload_err mpfs_auto_update_poll_complete(struct fw_upload *fw_up
static int mpfs_auto_update_verify_image(struct fw_upload *fw_uploader)
{
struct mpfs_auto_update_priv *priv = fw_uploader->dd_handle;
- struct mpfs_mss_response *response;
- struct mpfs_mss_msg *message;
- u32 *response_msg;
+ u32 *response_msg __free(kfree) =
+ kzalloc(AUTO_UPDATE_FEATURE_RESP_SIZE * sizeof(*response_msg), GFP_KERNEL);
+ struct mpfs_mss_response *response __free(kfree) =
+ kzalloc(sizeof(struct mpfs_mss_response), GFP_KERNEL);
+ struct mpfs_mss_msg *message __free(kfree) =
+ kzalloc(sizeof(struct mpfs_mss_msg), GFP_KERNEL);
int ret;
- response_msg = devm_kzalloc(priv->dev, AUTO_UPDATE_FEATURE_RESP_SIZE * sizeof(*response_msg),
- GFP_KERNEL);
- if (!response_msg)
+ if (!response_msg || !response || !message)
return -ENOMEM;
- response = devm_kzalloc(priv->dev, sizeof(struct mpfs_mss_response), GFP_KERNEL);
- if (!response) {
- ret = -ENOMEM;
- goto free_response_msg;
- }
-
- message = devm_kzalloc(priv->dev, sizeof(struct mpfs_mss_msg), GFP_KERNEL);
- if (!message) {
- ret = -ENOMEM;
- goto free_response;
- }
-
/*
* The system controller can verify that an image in the flash is valid.
* Rather than duplicate the check in this driver, call the relevant
@@ -205,31 +207,25 @@ static int mpfs_auto_update_verify_image(struct fw_upload *fw_uploader)
ret = mpfs_blocking_transaction(priv->sys_controller, message);
if (ret | response->resp_status) {
dev_warn(priv->dev, "Verification of Upgrade Image failed!\n");
- ret = ret ? ret : -EBADMSG;
- goto free_message;
+ return ret ? ret : -EBADMSG;
}
dev_info(priv->dev, "Verification of Upgrade Image passed!\n");
-free_message:
- devm_kfree(priv->dev, message);
-free_response:
- devm_kfree(priv->dev, response);
-free_response_msg:
- devm_kfree(priv->dev, response_msg);
-
- return ret;
+ return 0;
}
-static int mpfs_auto_update_set_image_address(struct mpfs_auto_update_priv *priv, char *buffer,
+static int mpfs_auto_update_set_image_address(struct mpfs_auto_update_priv *priv,
u32 image_address, loff_t directory_address)
{
struct erase_info erase;
- size_t erase_size = AUTO_UPDATE_DIRECTORY_SIZE;
+ size_t erase_size = round_up(AUTO_UPDATE_DIRECTORY_SIZE, (u64)priv->flash->erasesize);
size_t bytes_written = 0, bytes_read = 0;
+ char *buffer __free(kfree) = kzalloc(erase_size, GFP_KERNEL);
int ret;
- erase_size = round_up(erase_size, (u64)priv->flash->erasesize);
+ if (!buffer)
+ return -ENOMEM;
erase.addr = AUTO_UPDATE_DIRECTORY_BASE;
erase.len = erase_size;
@@ -275,7 +271,7 @@ static int mpfs_auto_update_set_image_address(struct mpfs_auto_update_priv *priv
return ret;
if (bytes_written != erase_size)
- return ret;
+ return -EIO;
return 0;
}
@@ -285,26 +281,36 @@ static int mpfs_auto_update_write_bitstream(struct fw_upload *fw_uploader, const
{
struct mpfs_auto_update_priv *priv = fw_uploader->dd_handle;
struct erase_info erase;
- char *buffer;
loff_t directory_address = AUTO_UPDATE_UPGRADE_DIRECTORY;
size_t erase_size = AUTO_UPDATE_DIRECTORY_SIZE;
size_t bytes_written = 0;
+ bool is_info = mpfs_auto_update_is_bitstream_info(data, size);
u32 image_address;
int ret;
erase_size = round_up(erase_size, (u64)priv->flash->erasesize);
- image_address = AUTO_UPDATE_BITSTREAM_BASE +
- AUTO_UPDATE_UPGRADE_INDEX * priv->size_per_bitstream;
-
- buffer = devm_kzalloc(priv->dev, erase_size, GFP_KERNEL);
- if (!buffer)
- return -ENOMEM;
+ if (is_info)
+ image_address = AUTO_UPDATE_INFO_BASE;
+ else
+ image_address = AUTO_UPDATE_BITSTREAM_BASE +
+ AUTO_UPDATE_UPGRADE_INDEX * priv->size_per_bitstream;
- ret = mpfs_auto_update_set_image_address(priv, buffer, image_address, directory_address);
- if (ret) {
- dev_err(priv->dev, "failed to set image address in the SPI directory: %d\n", ret);
- goto out;
+ /*
+ * For bitstream info, the descriptor is written to a fixed offset,
+ * so there is no need to set the image address.
+ */
+ if (!is_info) {
+ ret = mpfs_auto_update_set_image_address(priv, image_address, directory_address);
+ if (ret) {
+ dev_err(priv->dev, "failed to set image address in the SPI directory: %d\n", ret);
+ return ret;
+ }
+ } else {
+ if (size > AUTO_UPDATE_INFO_SIZE) {
+ dev_err(priv->dev, "bitstream info exceeds permitted size\n");
+ return -ENOSPC;
+ }
}
/*
@@ -318,7 +324,7 @@ static int mpfs_auto_update_write_bitstream(struct fw_upload *fw_uploader, const
dev_info(priv->dev, "Erasing the flash at address (0x%x)\n", image_address);
ret = mtd_erase(priv->flash, &erase);
if (ret)
- goto out;
+ return ret;
/*
* No parsing etc of the bitstream is required. The system controller
@@ -328,18 +334,15 @@ static int mpfs_auto_update_write_bitstream(struct fw_upload *fw_uploader, const
dev_info(priv->dev, "Writing the image to the flash at address (0x%x)\n", image_address);
ret = mtd_write(priv->flash, (loff_t)image_address, size, &bytes_written, data);
if (ret)
- goto out;
+ return ret;
- if (bytes_written != size) {
- ret = -EIO;
- goto out;
- }
+ if (bytes_written != size)
+ return -EIO;
*written = bytes_written;
+ dev_info(priv->dev, "Wrote 0x%zx bytes to the flash\n", bytes_written);
-out:
- devm_kfree(priv->dev, buffer);
- return ret;
+ return 0;
}
static enum fw_upload_err mpfs_auto_update_write(struct fw_upload *fw_uploader, const u8 *data,
@@ -362,6 +365,9 @@ static enum fw_upload_err mpfs_auto_update_write(struct fw_upload *fw_uploader,
goto out;
}
+ if (mpfs_auto_update_is_bitstream_info(data, size))
+ goto out;
+
ret = mpfs_auto_update_verify_image(fw_uploader);
if (ret)
err = FW_UPLOAD_ERR_FW_INVALID;
@@ -381,23 +387,15 @@ static const struct fw_upload_ops mpfs_auto_update_ops = {
static int mpfs_auto_update_available(struct mpfs_auto_update_priv *priv)
{
- struct mpfs_mss_response *response;
- struct mpfs_mss_msg *message;
- u32 *response_msg;
+ u32 *response_msg __free(kfree) =
+ kzalloc(AUTO_UPDATE_FEATURE_RESP_SIZE * sizeof(*response_msg), GFP_KERNEL);
+ struct mpfs_mss_response *response __free(kfree) =
+ kzalloc(sizeof(struct mpfs_mss_response), GFP_KERNEL);
+ struct mpfs_mss_msg *message __free(kfree) =
+ kzalloc(sizeof(struct mpfs_mss_msg), GFP_KERNEL);
int ret;
- response_msg = devm_kzalloc(priv->dev,
- AUTO_UPDATE_FEATURE_RESP_SIZE * sizeof(*response_msg),
- GFP_KERNEL);
- if (!response_msg)
- return -ENOMEM;
-
- response = devm_kzalloc(priv->dev, sizeof(struct mpfs_mss_response), GFP_KERNEL);
- if (!response)
- return -ENOMEM;
-
- message = devm_kzalloc(priv->dev, sizeof(struct mpfs_mss_msg), GFP_KERNEL);
- if (!message)
+ if (!response_msg || !response || !message)
return -ENOMEM;
/*
diff --git a/drivers/firmware/psci/psci.c b/drivers/firmware/psci/psci.c
index d9629ff87861..2328ca58bba6 100644
--- a/drivers/firmware/psci/psci.c
+++ b/drivers/firmware/psci/psci.c
@@ -497,10 +497,12 @@ int psci_cpu_suspend_enter(u32 state)
static int psci_system_suspend(unsigned long unused)
{
+ int err;
phys_addr_t pa_cpu_resume = __pa_symbol(cpu_resume);
- return invoke_psci_fn(PSCI_FN_NATIVE(1_0, SYSTEM_SUSPEND),
+ err = invoke_psci_fn(PSCI_FN_NATIVE(1_0, SYSTEM_SUSPEND),
pa_cpu_resume, 0, 0);
+ return psci_to_linux_errno(err);
}
static int psci_system_suspend_enter(suspend_state_t state)
diff --git a/drivers/firmware/qcom/Kconfig b/drivers/firmware/qcom/Kconfig
index 3f05d9854ddf..73a1a41bf92d 100644
--- a/drivers/firmware/qcom/Kconfig
+++ b/drivers/firmware/qcom/Kconfig
@@ -7,8 +7,40 @@
menu "Qualcomm firmware drivers"
config QCOM_SCM
+ select QCOM_TZMEM
tristate
+config QCOM_TZMEM
+ tristate
+ select GENERIC_ALLOCATOR
+
+choice
+ prompt "TrustZone interface memory allocator mode"
+ depends on QCOM_TZMEM
+ default QCOM_TZMEM_MODE_GENERIC
+ help
+ Selects the mode of the memory allocator providing memory buffers of
+ suitable format for sharing with the TrustZone. If in doubt, select
+ 'Generic'.
+
+config QCOM_TZMEM_MODE_GENERIC
+ bool "Generic"
+ help
+ Use the generic allocator mode. The memory is page-aligned, non-cachable
+ and physically contiguous.
+
+config QCOM_TZMEM_MODE_SHMBRIDGE
+ bool "SHM Bridge"
+ help
+ Use Qualcomm Shared Memory Bridge. The memory has the same alignment as
+ in the 'Generic' allocator but is also explicitly marked as an SHM Bridge
+ buffer.
+
+ With this selected, all buffers passed to the TrustZone must be allocated
+ using the TZMem allocator or else the TrustZone will refuse to use them.
+
+endchoice
+
config QCOM_SCM_DOWNLOAD_MODE_DEFAULT
bool "Qualcomm download mode enabled by default"
depends on QCOM_SCM
diff --git a/drivers/firmware/qcom/Makefile b/drivers/firmware/qcom/Makefile
index c9f12ee8224a..0be40a1abc13 100644
--- a/drivers/firmware/qcom/Makefile
+++ b/drivers/firmware/qcom/Makefile
@@ -5,5 +5,6 @@
obj-$(CONFIG_QCOM_SCM) += qcom-scm.o
qcom-scm-objs += qcom_scm.o qcom_scm-smc.o qcom_scm-legacy.o
+obj-$(CONFIG_QCOM_TZMEM) += qcom_tzmem.o
obj-$(CONFIG_QCOM_QSEECOM) += qcom_qseecom.o
obj-$(CONFIG_QCOM_QSEECOM_UEFISECAPP) += qcom_qseecom_uefisecapp.o
diff --git a/drivers/firmware/qcom/qcom_qseecom_uefisecapp.c b/drivers/firmware/qcom/qcom_qseecom_uefisecapp.c
index bc550ad0dbe0..6fefa4fe80e8 100644
--- a/drivers/firmware/qcom/qcom_qseecom_uefisecapp.c
+++ b/drivers/firmware/qcom/qcom_qseecom_uefisecapp.c
@@ -13,11 +13,14 @@
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/sizes.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/ucs2_string.h>
#include <linux/firmware/qcom/qcom_qseecom.h>
+#include <linux/firmware/qcom/qcom_scm.h>
+#include <linux/firmware/qcom/qcom_tzmem.h>
/* -- Qualcomm "uefisecapp" interface definitions. -------------------------- */
@@ -272,6 +275,7 @@ struct qsee_rsp_uefi_query_variable_info {
struct qcuefi_client {
struct qseecom_client *client;
struct efivars efivars;
+ struct qcom_tzmem_pool *mempool;
};
static struct device *qcuefi_dev(struct qcuefi_client *qcuefi)
@@ -293,12 +297,11 @@ static efi_status_t qsee_uefi_get_variable(struct qcuefi_client *qcuefi, const e
{
struct qsee_req_uefi_get_variable *req_data;
struct qsee_rsp_uefi_get_variable *rsp_data;
+ void *cmd_buf __free(qcom_tzmem) = NULL;
unsigned long buffer_size = *data_size;
- efi_status_t efi_status = EFI_SUCCESS;
unsigned long name_length;
- dma_addr_t cmd_buf_dma;
+ efi_status_t efi_status;
size_t cmd_buf_size;
- void *cmd_buf;
size_t guid_offs;
size_t name_offs;
size_t req_size;
@@ -333,11 +336,9 @@ static efi_status_t qsee_uefi_get_variable(struct qcuefi_client *qcuefi, const e
__reqdata_offs(rsp_size, &rsp_offs)
);
- cmd_buf = qseecom_dma_alloc(qcuefi->client, cmd_buf_size, &cmd_buf_dma, GFP_KERNEL);
- if (!cmd_buf) {
- efi_status = EFI_OUT_OF_RESOURCES;
- goto out;
- }
+ cmd_buf = qcom_tzmem_alloc(qcuefi->mempool, cmd_buf_size, GFP_KERNEL);
+ if (!cmd_buf)
+ return EFI_OUT_OF_RESOURCES;
req_data = cmd_buf + req_offs;
rsp_data = cmd_buf + rsp_offs;
@@ -351,30 +352,22 @@ static efi_status_t qsee_uefi_get_variable(struct qcuefi_client *qcuefi, const e
req_data->length = req_size;
status = ucs2_strscpy(((void *)req_data) + req_data->name_offset, name, name_length);
- if (status < 0) {
- efi_status = EFI_INVALID_PARAMETER;
- goto out_free;
- }
+ if (status < 0)
+ return EFI_INVALID_PARAMETER;
memcpy(((void *)req_data) + req_data->guid_offset, guid, req_data->guid_size);
status = qcom_qseecom_app_send(qcuefi->client,
- cmd_buf_dma + req_offs, req_size,
- cmd_buf_dma + rsp_offs, rsp_size);
- if (status) {
- efi_status = EFI_DEVICE_ERROR;
- goto out_free;
- }
+ cmd_buf + req_offs, req_size,
+ cmd_buf + rsp_offs, rsp_size);
+ if (status)
+ return EFI_DEVICE_ERROR;
- if (rsp_data->command_id != QSEE_CMD_UEFI_GET_VARIABLE) {
- efi_status = EFI_DEVICE_ERROR;
- goto out_free;
- }
+ if (rsp_data->command_id != QSEE_CMD_UEFI_GET_VARIABLE)
+ return EFI_DEVICE_ERROR;
- if (rsp_data->length < sizeof(*rsp_data)) {
- efi_status = EFI_DEVICE_ERROR;
- goto out_free;
- }
+ if (rsp_data->length < sizeof(*rsp_data))
+ return EFI_DEVICE_ERROR;
if (rsp_data->status) {
dev_dbg(qcuefi_dev(qcuefi), "%s: uefisecapp error: 0x%x\n",
@@ -388,18 +381,14 @@ static efi_status_t qsee_uefi_get_variable(struct qcuefi_client *qcuefi, const e
*attributes = rsp_data->attributes;
}
- goto out_free;
+ return qsee_uefi_status_to_efi(rsp_data->status);
}
- if (rsp_data->length > rsp_size) {
- efi_status = EFI_DEVICE_ERROR;
- goto out_free;
- }
+ if (rsp_data->length > rsp_size)
+ return EFI_DEVICE_ERROR;
- if (rsp_data->data_offset + rsp_data->data_size > rsp_data->length) {
- efi_status = EFI_DEVICE_ERROR;
- goto out_free;
- }
+ if (rsp_data->data_offset + rsp_data->data_size > rsp_data->length)
+ return EFI_DEVICE_ERROR;
/*
* Note: We need to set attributes and data size even if the buffer is
@@ -422,22 +411,15 @@ static efi_status_t qsee_uefi_get_variable(struct qcuefi_client *qcuefi, const e
if (attributes)
*attributes = rsp_data->attributes;
- if (buffer_size == 0 && !data) {
- efi_status = EFI_SUCCESS;
- goto out_free;
- }
+ if (buffer_size == 0 && !data)
+ return EFI_SUCCESS;
- if (buffer_size < rsp_data->data_size) {
- efi_status = EFI_BUFFER_TOO_SMALL;
- goto out_free;
- }
+ if (buffer_size < rsp_data->data_size)
+ return EFI_BUFFER_TOO_SMALL;
memcpy(data, ((void *)rsp_data) + rsp_data->data_offset, rsp_data->data_size);
-out_free:
- qseecom_dma_free(qcuefi->client, cmd_buf_size, cmd_buf, cmd_buf_dma);
-out:
- return efi_status;
+ return EFI_SUCCESS;
}
static efi_status_t qsee_uefi_set_variable(struct qcuefi_client *qcuefi, const efi_char16_t *name,
@@ -446,11 +428,9 @@ static efi_status_t qsee_uefi_set_variable(struct qcuefi_client *qcuefi, const e
{
struct qsee_req_uefi_set_variable *req_data;
struct qsee_rsp_uefi_set_variable *rsp_data;
- efi_status_t efi_status = EFI_SUCCESS;
+ void *cmd_buf __free(qcom_tzmem) = NULL;
unsigned long name_length;
- dma_addr_t cmd_buf_dma;
size_t cmd_buf_size;
- void *cmd_buf;
size_t name_offs;
size_t guid_offs;
size_t data_offs;
@@ -486,11 +466,9 @@ static efi_status_t qsee_uefi_set_variable(struct qcuefi_client *qcuefi, const e
__reqdata_offs(sizeof(*rsp_data), &rsp_offs)
);
- cmd_buf = qseecom_dma_alloc(qcuefi->client, cmd_buf_size, &cmd_buf_dma, GFP_KERNEL);
- if (!cmd_buf) {
- efi_status = EFI_OUT_OF_RESOURCES;
- goto out;
- }
+ cmd_buf = qcom_tzmem_alloc(qcuefi->mempool, cmd_buf_size, GFP_KERNEL);
+ if (!cmd_buf)
+ return EFI_OUT_OF_RESOURCES;
req_data = cmd_buf + req_offs;
rsp_data = cmd_buf + rsp_offs;
@@ -506,10 +484,8 @@ static efi_status_t qsee_uefi_set_variable(struct qcuefi_client *qcuefi, const e
req_data->length = req_size;
status = ucs2_strscpy(((void *)req_data) + req_data->name_offset, name, name_length);
- if (status < 0) {
- efi_status = EFI_INVALID_PARAMETER;
- goto out_free;
- }
+ if (status < 0)
+ return EFI_INVALID_PARAMETER;
memcpy(((void *)req_data) + req_data->guid_offset, guid, req_data->guid_size);
@@ -517,33 +493,24 @@ static efi_status_t qsee_uefi_set_variable(struct qcuefi_client *qcuefi, const e
memcpy(((void *)req_data) + req_data->data_offset, data, req_data->data_size);
status = qcom_qseecom_app_send(qcuefi->client,
- cmd_buf_dma + req_offs, req_size,
- cmd_buf_dma + rsp_offs, sizeof(*rsp_data));
- if (status) {
- efi_status = EFI_DEVICE_ERROR;
- goto out_free;
- }
+ cmd_buf + req_offs, req_size,
+ cmd_buf + rsp_offs, sizeof(*rsp_data));
+ if (status)
+ return EFI_DEVICE_ERROR;
- if (rsp_data->command_id != QSEE_CMD_UEFI_SET_VARIABLE) {
- efi_status = EFI_DEVICE_ERROR;
- goto out_free;
- }
+ if (rsp_data->command_id != QSEE_CMD_UEFI_SET_VARIABLE)
+ return EFI_DEVICE_ERROR;
- if (rsp_data->length != sizeof(*rsp_data)) {
- efi_status = EFI_DEVICE_ERROR;
- goto out_free;
- }
+ if (rsp_data->length != sizeof(*rsp_data))
+ return EFI_DEVICE_ERROR;
if (rsp_data->status) {
dev_dbg(qcuefi_dev(qcuefi), "%s: uefisecapp error: 0x%x\n",
__func__, rsp_data->status);
- efi_status = qsee_uefi_status_to_efi(rsp_data->status);
+ return qsee_uefi_status_to_efi(rsp_data->status);
}
-out_free:
- qseecom_dma_free(qcuefi->client, cmd_buf_size, cmd_buf, cmd_buf_dma);
-out:
- return efi_status;
+ return EFI_SUCCESS;
}
static efi_status_t qsee_uefi_get_next_variable(struct qcuefi_client *qcuefi,
@@ -552,10 +519,9 @@ static efi_status_t qsee_uefi_get_next_variable(struct qcuefi_client *qcuefi,
{
struct qsee_req_uefi_get_next_variable *req_data;
struct qsee_rsp_uefi_get_next_variable *rsp_data;
- efi_status_t efi_status = EFI_SUCCESS;
- dma_addr_t cmd_buf_dma;
+ void *cmd_buf __free(qcom_tzmem) = NULL;
+ efi_status_t efi_status;
size_t cmd_buf_size;
- void *cmd_buf;
size_t guid_offs;
size_t name_offs;
size_t req_size;
@@ -587,11 +553,9 @@ static efi_status_t qsee_uefi_get_next_variable(struct qcuefi_client *qcuefi,
__reqdata_offs(rsp_size, &rsp_offs)
);
- cmd_buf = qseecom_dma_alloc(qcuefi->client, cmd_buf_size, &cmd_buf_dma, GFP_KERNEL);
- if (!cmd_buf) {
- efi_status = EFI_OUT_OF_RESOURCES;
- goto out;
- }
+ cmd_buf = qcom_tzmem_alloc(qcuefi->mempool, cmd_buf_size, GFP_KERNEL);
+ if (!cmd_buf)
+ return EFI_OUT_OF_RESOURCES;
req_data = cmd_buf + req_offs;
rsp_data = cmd_buf + rsp_offs;
@@ -606,28 +570,20 @@ static efi_status_t qsee_uefi_get_next_variable(struct qcuefi_client *qcuefi,
memcpy(((void *)req_data) + req_data->guid_offset, guid, req_data->guid_size);
status = ucs2_strscpy(((void *)req_data) + req_data->name_offset, name,
*name_size / sizeof(*name));
- if (status < 0) {
- efi_status = EFI_INVALID_PARAMETER;
- goto out_free;
- }
+ if (status < 0)
+ return EFI_INVALID_PARAMETER;
status = qcom_qseecom_app_send(qcuefi->client,
- cmd_buf_dma + req_offs, req_size,
- cmd_buf_dma + rsp_offs, rsp_size);
- if (status) {
- efi_status = EFI_DEVICE_ERROR;
- goto out_free;
- }
+ cmd_buf + req_offs, req_size,
+ cmd_buf + rsp_offs, rsp_size);
+ if (status)
+ return EFI_DEVICE_ERROR;
- if (rsp_data->command_id != QSEE_CMD_UEFI_GET_NEXT_VARIABLE) {
- efi_status = EFI_DEVICE_ERROR;
- goto out_free;
- }
+ if (rsp_data->command_id != QSEE_CMD_UEFI_GET_NEXT_VARIABLE)
+ return EFI_DEVICE_ERROR;
- if (rsp_data->length < sizeof(*rsp_data)) {
- efi_status = EFI_DEVICE_ERROR;
- goto out_free;
- }
+ if (rsp_data->length < sizeof(*rsp_data))
+ return EFI_DEVICE_ERROR;
if (rsp_data->status) {
dev_dbg(qcuefi_dev(qcuefi), "%s: uefisecapp error: 0x%x\n",
@@ -642,53 +598,40 @@ static efi_status_t qsee_uefi_get_next_variable(struct qcuefi_client *qcuefi,
if (efi_status == EFI_BUFFER_TOO_SMALL)
*name_size = rsp_data->name_size;
- goto out_free;
+ return efi_status;
}
- if (rsp_data->length > rsp_size) {
- efi_status = EFI_DEVICE_ERROR;
- goto out_free;
- }
+ if (rsp_data->length > rsp_size)
+ return EFI_DEVICE_ERROR;
- if (rsp_data->name_offset + rsp_data->name_size > rsp_data->length) {
- efi_status = EFI_DEVICE_ERROR;
- goto out_free;
- }
+ if (rsp_data->name_offset + rsp_data->name_size > rsp_data->length)
+ return EFI_DEVICE_ERROR;
- if (rsp_data->guid_offset + rsp_data->guid_size > rsp_data->length) {
- efi_status = EFI_DEVICE_ERROR;
- goto out_free;
- }
+ if (rsp_data->guid_offset + rsp_data->guid_size > rsp_data->length)
+ return EFI_DEVICE_ERROR;
if (rsp_data->name_size > *name_size) {
*name_size = rsp_data->name_size;
- efi_status = EFI_BUFFER_TOO_SMALL;
- goto out_free;
+ return EFI_BUFFER_TOO_SMALL;
}
- if (rsp_data->guid_size != sizeof(*guid)) {
- efi_status = EFI_DEVICE_ERROR;
- goto out_free;
- }
+ if (rsp_data->guid_size != sizeof(*guid))
+ return EFI_DEVICE_ERROR;
memcpy(guid, ((void *)rsp_data) + rsp_data->guid_offset, rsp_data->guid_size);
status = ucs2_strscpy(name, ((void *)rsp_data) + rsp_data->name_offset,
rsp_data->name_size / sizeof(*name));
*name_size = rsp_data->name_size;
- if (status < 0) {
+ if (status < 0)
/*
* Return EFI_DEVICE_ERROR here because the buffer size should
* have already been validated above, causing this function to
* bail with EFI_BUFFER_TOO_SMALL.
*/
- efi_status = EFI_DEVICE_ERROR;
- }
+ return EFI_DEVICE_ERROR;
-out_free:
- qseecom_dma_free(qcuefi->client, cmd_buf_size, cmd_buf, cmd_buf_dma);
-out:
- return efi_status;
+ return EFI_SUCCESS;
}
static efi_status_t qsee_uefi_query_variable_info(struct qcuefi_client *qcuefi, u32 attr,
@@ -697,10 +640,8 @@ static efi_status_t qsee_uefi_query_variable_info(struct qcuefi_client *qcuefi,
{
struct qsee_req_uefi_query_variable_info *req_data;
struct qsee_rsp_uefi_query_variable_info *rsp_data;
- efi_status_t efi_status = EFI_SUCCESS;
- dma_addr_t cmd_buf_dma;
+ void *cmd_buf __free(qcom_tzmem) = NULL;
size_t cmd_buf_size;
- void *cmd_buf;
size_t req_offs;
size_t rsp_offs;
int status;
@@ -710,11 +651,9 @@ static efi_status_t qsee_uefi_query_variable_info(struct qcuefi_client *qcuefi,
__reqdata_offs(sizeof(*rsp_data), &rsp_offs)
);
- cmd_buf = qseecom_dma_alloc(qcuefi->client, cmd_buf_size, &cmd_buf_dma, GFP_KERNEL);
- if (!cmd_buf) {
- efi_status = EFI_OUT_OF_RESOURCES;
- goto out;
- }
+ cmd_buf = qcom_tzmem_alloc(qcuefi->mempool, cmd_buf_size, GFP_KERNEL);
+ if (!cmd_buf)
+ return EFI_OUT_OF_RESOURCES;
req_data = cmd_buf + req_offs;
rsp_data = cmd_buf + rsp_offs;
@@ -724,28 +663,21 @@ static efi_status_t qsee_uefi_query_variable_info(struct qcuefi_client *qcuefi,
req_data->length = sizeof(*req_data);
status = qcom_qseecom_app_send(qcuefi->client,
- cmd_buf_dma + req_offs, sizeof(*req_data),
- cmd_buf_dma + rsp_offs, sizeof(*rsp_data));
- if (status) {
- efi_status = EFI_DEVICE_ERROR;
- goto out_free;
- }
+ cmd_buf + req_offs, sizeof(*req_data),
+ cmd_buf + rsp_offs, sizeof(*rsp_data));
+ if (status)
+ return EFI_DEVICE_ERROR;
- if (rsp_data->command_id != QSEE_CMD_UEFI_QUERY_VARIABLE_INFO) {
- efi_status = EFI_DEVICE_ERROR;
- goto out_free;
- }
+ if (rsp_data->command_id != QSEE_CMD_UEFI_QUERY_VARIABLE_INFO)
+ return EFI_DEVICE_ERROR;
- if (rsp_data->length != sizeof(*rsp_data)) {
- efi_status = EFI_DEVICE_ERROR;
- goto out_free;
- }
+ if (rsp_data->length != sizeof(*rsp_data))
+ return EFI_DEVICE_ERROR;
if (rsp_data->status) {
dev_dbg(qcuefi_dev(qcuefi), "%s: uefisecapp error: 0x%x\n",
__func__, rsp_data->status);
- efi_status = qsee_uefi_status_to_efi(rsp_data->status);
- goto out_free;
+ return qsee_uefi_status_to_efi(rsp_data->status);
}
if (storage_space)
@@ -757,10 +689,7 @@ static efi_status_t qsee_uefi_query_variable_info(struct qcuefi_client *qcuefi,
if (max_variable_size)
*max_variable_size = rsp_data->max_variable_size;
-out_free:
- qseecom_dma_free(qcuefi->client, cmd_buf_size, cmd_buf, cmd_buf_dma);
-out:
- return efi_status;
+ return EFI_SUCCESS;
}
/* -- Global efivar interface. ---------------------------------------------- */
@@ -871,6 +800,7 @@ static const struct efivar_operations qcom_efivar_ops = {
static int qcom_uefisecapp_probe(struct auxiliary_device *aux_dev,
const struct auxiliary_device_id *aux_dev_id)
{
+ struct qcom_tzmem_pool_config pool_config;
struct qcuefi_client *qcuefi;
int status;
@@ -889,6 +819,16 @@ static int qcom_uefisecapp_probe(struct auxiliary_device *aux_dev,
if (status)
qcuefi_set_reference(NULL);
+ memset(&pool_config, 0, sizeof(pool_config));
+ pool_config.initial_size = SZ_4K;
+ pool_config.policy = QCOM_TZMEM_POLICY_MULTIPLIER;
+ pool_config.increment = 2;
+ pool_config.max_size = SZ_256K;
+
+ qcuefi->mempool = devm_qcom_tzmem_pool_new(&aux_dev->dev, &pool_config);
+ if (IS_ERR(qcuefi->mempool))
+ return PTR_ERR(qcuefi->mempool);
+
return status;
}
diff --git a/drivers/firmware/qcom/qcom_scm-smc.c b/drivers/firmware/qcom/qcom_scm-smc.c
index 16cf88acfa8e..dca5f3f1883b 100644
--- a/drivers/firmware/qcom/qcom_scm-smc.c
+++ b/drivers/firmware/qcom/qcom_scm-smc.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2015,2019 The Linux Foundation. All rights reserved.
*/
+#include <linux/cleanup.h>
#include <linux/io.h>
#include <linux/errno.h>
#include <linux/delay.h>
@@ -9,6 +10,7 @@
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/firmware/qcom/qcom_scm.h>
+#include <linux/firmware/qcom/qcom_tzmem.h>
#include <linux/arm-smccc.h>
#include <linux/dma-mapping.h>
@@ -150,11 +152,10 @@ int __scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
enum qcom_scm_convention qcom_convention,
struct qcom_scm_res *res, bool atomic)
{
+ struct qcom_tzmem_pool *mempool = qcom_scm_get_tzmem_pool();
int arglen = desc->arginfo & 0xf;
int i, ret;
- dma_addr_t args_phys = 0;
- void *args_virt = NULL;
- size_t alloc_len;
+ void *args_virt __free(qcom_tzmem) = NULL;
gfp_t flag = atomic ? GFP_ATOMIC : GFP_KERNEL;
u32 smccc_call_type = atomic ? ARM_SMCCC_FAST_CALL : ARM_SMCCC_STD_CALL;
u32 qcom_smccc_convention = (qcom_convention == SMC_CONVENTION_ARM_32) ?
@@ -172,9 +173,9 @@ int __scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
smc.args[i + SCM_SMC_FIRST_REG_IDX] = desc->args[i];
if (unlikely(arglen > SCM_SMC_N_REG_ARGS)) {
- alloc_len = SCM_SMC_N_EXT_ARGS * sizeof(u64);
- args_virt = kzalloc(PAGE_ALIGN(alloc_len), flag);
-
+ args_virt = qcom_tzmem_alloc(mempool,
+ SCM_SMC_N_EXT_ARGS * sizeof(u64),
+ flag);
if (!args_virt)
return -ENOMEM;
@@ -192,25 +193,10 @@ int __scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
SCM_SMC_FIRST_EXT_IDX]);
}
- args_phys = dma_map_single(dev, args_virt, alloc_len,
- DMA_TO_DEVICE);
-
- if (dma_mapping_error(dev, args_phys)) {
- kfree(args_virt);
- return -ENOMEM;
- }
-
- smc.args[SCM_SMC_LAST_REG_IDX] = args_phys;
+ smc.args[SCM_SMC_LAST_REG_IDX] = qcom_tzmem_to_phys(args_virt);
}
- /* ret error check follows after args_virt cleanup*/
ret = __scm_smc_do(dev, &smc, &smc_res, atomic);
-
- if (args_virt) {
- dma_unmap_single(dev, args_phys, alloc_len, DMA_TO_DEVICE);
- kfree(args_virt);
- }
-
if (ret)
return ret;
diff --git a/drivers/firmware/qcom/qcom_scm.c b/drivers/firmware/qcom/qcom_scm.c
index 68f4df7e6c3c..00c379a3cceb 100644
--- a/drivers/firmware/qcom/qcom_scm.c
+++ b/drivers/firmware/qcom/qcom_scm.c
@@ -6,12 +6,15 @@
#include <linux/arm-smccc.h>
#include <linux/bitfield.h>
#include <linux/bits.h>
+#include <linux/cleanup.h>
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/cpumask.h>
#include <linux/dma-mapping.h>
+#include <linux/err.h>
#include <linux/export.h>
#include <linux/firmware/qcom/qcom_scm.h>
+#include <linux/firmware/qcom/qcom_tzmem.h>
#include <linux/init.h>
#include <linux/interconnect.h>
#include <linux/interrupt.h>
@@ -20,11 +23,14 @@
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
+#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
#include <linux/reset-controller.h>
+#include <linux/sizes.h>
#include <linux/types.h>
#include "qcom_scm.h"
+#include "qcom_tzmem.h"
static bool download_mode = IS_ENABLED(CONFIG_QCOM_SCM_DOWNLOAD_MODE_DEFAULT);
module_param(download_mode, bool, 0);
@@ -43,6 +49,8 @@ struct qcom_scm {
int scm_vote_count;
u64 dload_mode_addr;
+
+ struct qcom_tzmem_pool *mempool;
};
struct qcom_scm_current_perm_info {
@@ -114,7 +122,6 @@ static const u8 qcom_scm_cpu_warm_bits[QCOM_SCM_BOOT_MAX_CPUS] = {
};
#define QCOM_SMC_WAITQ_FLAG_WAKE_ONE BIT(0)
-#define QCOM_SMC_WAITQ_FLAG_WAKE_ALL BIT(1)
#define QCOM_DLOAD_MASK GENMASK(5, 4)
#define QCOM_DLOAD_NODUMP 0
@@ -198,6 +205,11 @@ static void qcom_scm_bw_disable(void)
enum qcom_scm_convention qcom_scm_convention = SMC_CONVENTION_UNKNOWN;
static DEFINE_SPINLOCK(scm_query_lock);
+struct qcom_tzmem_pool *qcom_scm_get_tzmem_pool(void)
+{
+ return __scm->mempool;
+}
+
static enum qcom_scm_convention __get_convention(void)
{
unsigned long flags;
@@ -570,6 +582,13 @@ int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size,
* During the scm call memory protection will be enabled for the meta
* data blob, so make sure it's physically contiguous, 4K aligned and
* non-cachable to avoid XPU violations.
+ *
+ * For PIL calls the hypervisor creates SHM Bridges for the blob
+ * buffers on behalf of Linux so we must not do it ourselves hence
+ * not using the TZMem allocator here.
+ *
+ * If we pass a buffer that is already part of an SHM Bridge to this
+ * call, it will fail.
*/
mdata_buf = dma_alloc_coherent(__scm->dev, size, &mdata_phys,
GFP_KERNEL);
@@ -1008,14 +1027,13 @@ int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
struct qcom_scm_mem_map_info *mem_to_map;
phys_addr_t mem_to_map_phys;
phys_addr_t dest_phys;
- dma_addr_t ptr_phys;
+ phys_addr_t ptr_phys;
size_t mem_to_map_sz;
size_t dest_sz;
size_t src_sz;
size_t ptr_sz;
int next_vm;
__le32 *src;
- void *ptr;
int ret, i, b;
u64 srcvm_bits = *srcvm;
@@ -1025,10 +1043,13 @@ int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
ptr_sz = ALIGN(src_sz, SZ_64) + ALIGN(mem_to_map_sz, SZ_64) +
ALIGN(dest_sz, SZ_64);
- ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_phys, GFP_KERNEL);
+ void *ptr __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
+ ptr_sz, GFP_KERNEL);
if (!ptr)
return -ENOMEM;
+ ptr_phys = qcom_tzmem_to_phys(ptr);
+
/* Fill source vmid detail */
src = ptr;
i = 0;
@@ -1057,7 +1078,6 @@ int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
ret = __qcom_scm_assign_mem(__scm->dev, mem_to_map_phys, mem_to_map_sz,
ptr_phys, src_sz, dest_phys, dest_sz);
- dma_free_coherent(__scm->dev, ptr_sz, ptr, ptr_phys);
if (ret) {
dev_err(__scm->dev,
"Assign memory protection call failed %d\n", ret);
@@ -1205,32 +1225,21 @@ int qcom_scm_ice_set_key(u32 index, const u8 *key, u32 key_size,
.args[4] = data_unit_size,
.owner = ARM_SMCCC_OWNER_SIP,
};
- void *keybuf;
- dma_addr_t key_phys;
- int ret;
- /*
- * 'key' may point to vmalloc()'ed memory, but we need to pass a
- * physical address that's been properly flushed. The sanctioned way to
- * do this is by using the DMA API. But as is best practice for crypto
- * keys, we also must wipe the key after use. This makes kmemdup() +
- * dma_map_single() not clearly correct, since the DMA API can use
- * bounce buffers. Instead, just use dma_alloc_coherent(). Programming
- * keys is normally rare and thus not performance-critical.
- */
+ int ret;
- keybuf = dma_alloc_coherent(__scm->dev, key_size, &key_phys,
- GFP_KERNEL);
+ void *keybuf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
+ key_size,
+ GFP_KERNEL);
if (!keybuf)
return -ENOMEM;
memcpy(keybuf, key, key_size);
- desc.args[1] = key_phys;
+ desc.args[1] = qcom_tzmem_to_phys(keybuf);
ret = qcom_scm_call(__scm->dev, &desc, NULL);
memzero_explicit(keybuf, key_size);
- dma_free_coherent(__scm->dev, key_size, keybuf, key_phys);
return ret;
}
EXPORT_SYMBOL_GPL(qcom_scm_ice_set_key);
@@ -1342,6 +1351,66 @@ bool qcom_scm_lmh_dcvsh_available(void)
}
EXPORT_SYMBOL_GPL(qcom_scm_lmh_dcvsh_available);
+int qcom_scm_shm_bridge_enable(void)
+{
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_MP,
+ .cmd = QCOM_SCM_MP_SHM_BRIDGE_ENABLE,
+ .owner = ARM_SMCCC_OWNER_SIP
+ };
+
+ struct qcom_scm_res res;
+
+ if (!__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_MP,
+ QCOM_SCM_MP_SHM_BRIDGE_ENABLE))
+ return -EOPNOTSUPP;
+
+ return qcom_scm_call(__scm->dev, &desc, &res) ?: res.result[0];
+}
+EXPORT_SYMBOL_GPL(qcom_scm_shm_bridge_enable);
+
+int qcom_scm_shm_bridge_create(struct device *dev, u64 pfn_and_ns_perm_flags,
+ u64 ipfn_and_s_perm_flags, u64 size_and_flags,
+ u64 ns_vmids, u64 *handle)
+{
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_MP,
+ .cmd = QCOM_SCM_MP_SHM_BRIDGE_CREATE,
+ .owner = ARM_SMCCC_OWNER_SIP,
+ .args[0] = pfn_and_ns_perm_flags,
+ .args[1] = ipfn_and_s_perm_flags,
+ .args[2] = size_and_flags,
+ .args[3] = ns_vmids,
+ .arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_VAL, QCOM_SCM_VAL,
+ QCOM_SCM_VAL, QCOM_SCM_VAL),
+ };
+
+ struct qcom_scm_res res;
+ int ret;
+
+ ret = qcom_scm_call(__scm->dev, &desc, &res);
+
+ if (handle && !ret)
+ *handle = res.result[1];
+
+ return ret ?: res.result[0];
+}
+EXPORT_SYMBOL_GPL(qcom_scm_shm_bridge_create);
+
+int qcom_scm_shm_bridge_delete(struct device *dev, u64 handle)
+{
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_MP,
+ .cmd = QCOM_SCM_MP_SHM_BRIDGE_DELETE,
+ .owner = ARM_SMCCC_OWNER_SIP,
+ .args[0] = handle,
+ .arginfo = QCOM_SCM_ARGS(1, QCOM_SCM_VAL),
+ };
+
+ return qcom_scm_call(__scm->dev, &desc, NULL);
+}
+EXPORT_SYMBOL_GPL(qcom_scm_shm_bridge_delete);
+
int qcom_scm_lmh_profile_change(u32 profile_id)
{
struct qcom_scm_desc desc = {
@@ -1359,8 +1428,6 @@ EXPORT_SYMBOL_GPL(qcom_scm_lmh_profile_change);
int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val,
u64 limit_node, u32 node_id, u64 version)
{
- dma_addr_t payload_phys;
- u32 *payload_buf;
int ret, payload_size = 5 * sizeof(u32);
struct qcom_scm_desc desc = {
@@ -1375,7 +1442,9 @@ int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val,
.owner = ARM_SMCCC_OWNER_SIP,
};
- payload_buf = dma_alloc_coherent(__scm->dev, payload_size, &payload_phys, GFP_KERNEL);
+ u32 *payload_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
+ payload_size,
+ GFP_KERNEL);
if (!payload_buf)
return -ENOMEM;
@@ -1385,15 +1454,28 @@ int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val,
payload_buf[3] = 1;
payload_buf[4] = payload_val;
- desc.args[0] = payload_phys;
+ desc.args[0] = qcom_tzmem_to_phys(payload_buf);
ret = qcom_scm_call(__scm->dev, &desc, NULL);
- dma_free_coherent(__scm->dev, payload_size, payload_buf, payload_phys);
return ret;
}
EXPORT_SYMBOL_GPL(qcom_scm_lmh_dcvsh);
+int qcom_scm_gpu_init_regs(u32 gpu_req)
+{
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_GPU,
+ .cmd = QCOM_SCM_SVC_GPU_INIT_REGS,
+ .arginfo = QCOM_SCM_ARGS(1),
+ .args[0] = gpu_req,
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+
+ return qcom_scm_call(__scm->dev, &desc, NULL);
+}
+EXPORT_SYMBOL_GPL(qcom_scm_gpu_init_regs);
+
static int qcom_scm_find_dload_address(struct device *dev, u64 *addr)
{
struct device_node *tcsr;
@@ -1545,37 +1627,27 @@ int qcom_scm_qseecom_app_get_id(const char *app_name, u32 *app_id)
unsigned long app_name_len = strlen(app_name);
struct qcom_scm_desc desc = {};
struct qcom_scm_qseecom_resp res = {};
- dma_addr_t name_buf_phys;
- char *name_buf;
int status;
if (app_name_len >= name_buf_size)
return -EINVAL;
- name_buf = kzalloc(name_buf_size, GFP_KERNEL);
+ char *name_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
+ name_buf_size,
+ GFP_KERNEL);
if (!name_buf)
return -ENOMEM;
memcpy(name_buf, app_name, app_name_len);
- name_buf_phys = dma_map_single(__scm->dev, name_buf, name_buf_size, DMA_TO_DEVICE);
- status = dma_mapping_error(__scm->dev, name_buf_phys);
- if (status) {
- kfree(name_buf);
- dev_err(__scm->dev, "qseecom: failed to map dma address\n");
- return status;
- }
-
desc.owner = QSEECOM_TZ_OWNER_QSEE_OS;
desc.svc = QSEECOM_TZ_SVC_APP_MGR;
desc.cmd = QSEECOM_TZ_CMD_APP_LOOKUP;
desc.arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_RW, QCOM_SCM_VAL);
- desc.args[0] = name_buf_phys;
+ desc.args[0] = qcom_tzmem_to_phys(name_buf);
desc.args[1] = app_name_len;
status = qcom_scm_qseecom_call(&desc, &res);
- dma_unmap_single(__scm->dev, name_buf_phys, name_buf_size, DMA_TO_DEVICE);
- kfree(name_buf);
if (status)
return status;
@@ -1597,9 +1669,9 @@ EXPORT_SYMBOL_GPL(qcom_scm_qseecom_app_get_id);
/**
* qcom_scm_qseecom_app_send() - Send to and receive data from a given QSEE app.
* @app_id: The ID of the target app.
- * @req: DMA address of the request buffer sent to the app.
+ * @req: Request buffer sent to the app (must be TZ memory)
* @req_size: Size of the request buffer.
- * @rsp: DMA address of the response buffer, written to by the app.
+ * @rsp: Response buffer, written to by the app (must be TZ memory)
* @rsp_size: Size of the response buffer.
*
* Sends a request to the QSEE app associated with the given ID and read back
@@ -1610,13 +1682,18 @@ EXPORT_SYMBOL_GPL(qcom_scm_qseecom_app_get_id);
*
* Return: Zero on success, nonzero on failure.
*/
-int qcom_scm_qseecom_app_send(u32 app_id, dma_addr_t req, size_t req_size,
- dma_addr_t rsp, size_t rsp_size)
+int qcom_scm_qseecom_app_send(u32 app_id, void *req, size_t req_size,
+ void *rsp, size_t rsp_size)
{
struct qcom_scm_qseecom_resp res = {};
struct qcom_scm_desc desc = {};
+ phys_addr_t req_phys;
+ phys_addr_t rsp_phys;
int status;
+ req_phys = qcom_tzmem_to_phys(req);
+ rsp_phys = qcom_tzmem_to_phys(rsp);
+
desc.owner = QSEECOM_TZ_OWNER_TZ_APPS;
desc.svc = QSEECOM_TZ_SVC_APP_ID_PLACEHOLDER;
desc.cmd = QSEECOM_TZ_CMD_APP_SEND;
@@ -1624,9 +1701,9 @@ int qcom_scm_qseecom_app_send(u32 app_id, dma_addr_t req, size_t req_size,
QCOM_SCM_RW, QCOM_SCM_VAL,
QCOM_SCM_RW, QCOM_SCM_VAL);
desc.args[0] = app_id;
- desc.args[1] = req;
+ desc.args[1] = req_phys;
desc.args[2] = req_size;
- desc.args[3] = rsp;
+ desc.args[3] = rsp_phys;
desc.args[4] = rsp_size;
status = qcom_scm_qseecom_call(&desc, &res);
@@ -1649,6 +1726,8 @@ static const struct of_device_id qcom_scm_qseecom_allowlist[] __maybe_unused = {
{ .compatible = "lenovo,flex-5g" },
{ .compatible = "lenovo,thinkpad-x13s", },
{ .compatible = "qcom,sc8180x-primus" },
+ { .compatible = "qcom,x1e80100-crd" },
+ { .compatible = "qcom,x1e80100-qcp" },
{ }
};
@@ -1793,9 +1872,8 @@ static irqreturn_t qcom_scm_irq_handler(int irq, void *data)
goto out;
}
- if (flags != QCOM_SMC_WAITQ_FLAG_WAKE_ONE &&
- flags != QCOM_SMC_WAITQ_FLAG_WAKE_ALL) {
- dev_err(scm->dev, "Invalid flags found for wq_ctx: %u\n", flags);
+ if (flags != QCOM_SMC_WAITQ_FLAG_WAKE_ONE) {
+ dev_err(scm->dev, "Invalid flags received for wq_ctx: %u\n", flags);
goto out;
}
@@ -1810,6 +1888,7 @@ out:
static int qcom_scm_probe(struct platform_device *pdev)
{
+ struct qcom_tzmem_pool_config pool_config;
struct qcom_scm *scm;
int irq, ret;
@@ -1885,6 +1964,26 @@ static int qcom_scm_probe(struct platform_device *pdev)
if (of_property_read_bool(pdev->dev.of_node, "qcom,sdi-enabled"))
qcom_scm_disable_sdi();
+ ret = of_reserved_mem_device_init(__scm->dev);
+ if (ret && ret != -ENODEV)
+ return dev_err_probe(__scm->dev, ret,
+ "Failed to setup the reserved memory region for TZ mem\n");
+
+ ret = qcom_tzmem_enable(__scm->dev);
+ if (ret)
+ return dev_err_probe(__scm->dev, ret,
+ "Failed to enable the TrustZone memory allocator\n");
+
+ memset(&pool_config, 0, sizeof(pool_config));
+ pool_config.initial_size = 0;
+ pool_config.policy = QCOM_TZMEM_POLICY_ON_DEMAND;
+ pool_config.max_size = SZ_256K;
+
+ __scm->mempool = devm_qcom_tzmem_pool_new(__scm->dev, &pool_config);
+ if (IS_ERR(__scm->mempool))
+ return dev_err_probe(__scm->dev, PTR_ERR(__scm->mempool),
+ "Failed to create the SCM memory pool\n");
+
/*
* Initialize the QSEECOM interface.
*
diff --git a/drivers/firmware/qcom/qcom_scm.h b/drivers/firmware/qcom/qcom_scm.h
index 4532907e8489..685b8f59e7a6 100644
--- a/drivers/firmware/qcom/qcom_scm.h
+++ b/drivers/firmware/qcom/qcom_scm.h
@@ -5,6 +5,7 @@
#define __QCOM_SCM_INT_H
struct device;
+struct qcom_tzmem_pool;
enum qcom_scm_convention {
SMC_CONVENTION_UNKNOWN,
@@ -78,6 +79,8 @@ int scm_legacy_call_atomic(struct device *dev, const struct qcom_scm_desc *desc,
int scm_legacy_call(struct device *dev, const struct qcom_scm_desc *desc,
struct qcom_scm_res *res);
+struct qcom_tzmem_pool *qcom_scm_get_tzmem_pool(void);
+
#define QCOM_SCM_SVC_BOOT 0x01
#define QCOM_SCM_BOOT_SET_ADDR 0x01
#define QCOM_SCM_BOOT_TERMINATE_PC 0x02
@@ -113,6 +116,9 @@ int scm_legacy_call(struct device *dev, const struct qcom_scm_desc *desc,
#define QCOM_SCM_MP_IOMMU_SET_CP_POOL_SIZE 0x05
#define QCOM_SCM_MP_VIDEO_VAR 0x08
#define QCOM_SCM_MP_ASSIGN 0x16
+#define QCOM_SCM_MP_SHM_BRIDGE_ENABLE 0x1c
+#define QCOM_SCM_MP_SHM_BRIDGE_DELETE 0x1d
+#define QCOM_SCM_MP_SHM_BRIDGE_CREATE 0x1e
#define QCOM_SCM_SVC_OCMEM 0x0f
#define QCOM_SCM_OCMEM_LOCK_CMD 0x01
@@ -138,6 +144,9 @@ int scm_legacy_call(struct device *dev, const struct qcom_scm_desc *desc,
#define QCOM_SCM_WAITQ_RESUME 0x02
#define QCOM_SCM_WAITQ_GET_WQ_CTX 0x03
+#define QCOM_SCM_SVC_GPU 0x28
+#define QCOM_SCM_SVC_GPU_INIT_REGS 0x01
+
/* common error codes */
#define QCOM_SCM_V2_EBUSY -12
#define QCOM_SCM_ENOMEM -5
diff --git a/drivers/firmware/qcom/qcom_tzmem.c b/drivers/firmware/qcom/qcom_tzmem.c
new file mode 100644
index 000000000000..17948cfc82e7
--- /dev/null
+++ b/drivers/firmware/qcom/qcom_tzmem.c
@@ -0,0 +1,469 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Memory allocator for buffers shared with the TrustZone.
+ *
+ * Copyright (C) 2023-2024 Linaro Ltd.
+ */
+
+#include <linux/bug.h>
+#include <linux/cleanup.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/firmware/qcom/qcom_tzmem.h>
+#include <linux/genalloc.h>
+#include <linux/gfp.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/radix-tree.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include "qcom_tzmem.h"
+
+struct qcom_tzmem_area {
+ struct list_head list;
+ void *vaddr;
+ dma_addr_t paddr;
+ size_t size;
+ void *priv;
+};
+
+struct qcom_tzmem_pool {
+ struct gen_pool *genpool;
+ struct list_head areas;
+ enum qcom_tzmem_policy policy;
+ size_t increment;
+ size_t max_size;
+ spinlock_t lock;
+};
+
+struct qcom_tzmem_chunk {
+ phys_addr_t paddr;
+ size_t size;
+ struct qcom_tzmem_pool *owner;
+};
+
+static struct device *qcom_tzmem_dev;
+static RADIX_TREE(qcom_tzmem_chunks, GFP_ATOMIC);
+static DEFINE_SPINLOCK(qcom_tzmem_chunks_lock);
+
+#if IS_ENABLED(CONFIG_QCOM_TZMEM_MODE_GENERIC)
+
+static int qcom_tzmem_init(void)
+{
+ return 0;
+}
+
+static int qcom_tzmem_init_area(struct qcom_tzmem_area *area)
+{
+ return 0;
+}
+
+static void qcom_tzmem_cleanup_area(struct qcom_tzmem_area *area)
+{
+
+}
+
+#elif IS_ENABLED(CONFIG_QCOM_TZMEM_MODE_SHMBRIDGE)
+
+#include <linux/firmware/qcom/qcom_scm.h>
+#include <linux/of.h>
+
+#define QCOM_SHM_BRIDGE_NUM_VM_SHIFT 9
+
+static bool qcom_tzmem_using_shm_bridge;
+
+/* List of machines that are known to not support SHM bridge correctly. */
+static const char *const qcom_tzmem_blacklist[] = {
+ "qcom,sc8180x",
+ "qcom,sdm845", /* reset in rmtfs memory assignment */
+ "qcom,sm8150", /* reset in rmtfs memory assignment */
+ NULL
+};
+
+static int qcom_tzmem_init(void)
+{
+ const char *const *platform;
+ int ret;
+
+ for (platform = qcom_tzmem_blacklist; *platform; platform++) {
+ if (of_machine_is_compatible(*platform))
+ goto notsupp;
+ }
+
+ ret = qcom_scm_shm_bridge_enable();
+ if (ret == -EOPNOTSUPP)
+ goto notsupp;
+
+ if (!ret)
+ qcom_tzmem_using_shm_bridge = true;
+
+ return ret;
+
+notsupp:
+ dev_info(qcom_tzmem_dev, "SHM Bridge not supported\n");
+ return 0;
+}
+
+static int qcom_tzmem_init_area(struct qcom_tzmem_area *area)
+{
+ u64 pfn_and_ns_perm, ipfn_and_s_perm, size_and_flags;
+ int ret;
+
+ if (!qcom_tzmem_using_shm_bridge)
+ return 0;
+
+ pfn_and_ns_perm = (u64)area->paddr | QCOM_SCM_PERM_RW;
+ ipfn_and_s_perm = (u64)area->paddr | QCOM_SCM_PERM_RW;
+ size_and_flags = area->size | (1 << QCOM_SHM_BRIDGE_NUM_VM_SHIFT);
+
+ u64 *handle __free(kfree) = kzalloc(sizeof(*handle), GFP_KERNEL);
+ if (!handle)
+ return -ENOMEM;
+
+ ret = qcom_scm_shm_bridge_create(qcom_tzmem_dev, pfn_and_ns_perm,
+ ipfn_and_s_perm, size_and_flags,
+ QCOM_SCM_VMID_HLOS, handle);
+ if (ret)
+ return ret;
+
+ area->priv = no_free_ptr(handle);
+
+ return 0;
+}
+
+static void qcom_tzmem_cleanup_area(struct qcom_tzmem_area *area)
+{
+ u64 *handle = area->priv;
+
+ if (!qcom_tzmem_using_shm_bridge)
+ return;
+
+ qcom_scm_shm_bridge_delete(qcom_tzmem_dev, *handle);
+ kfree(handle);
+}
+
+#endif /* CONFIG_QCOM_TZMEM_MODE_SHMBRIDGE */
+
+static int qcom_tzmem_pool_add_memory(struct qcom_tzmem_pool *pool,
+ size_t size, gfp_t gfp)
+{
+ int ret;
+
+ struct qcom_tzmem_area *area __free(kfree) = kzalloc(sizeof(*area),
+ gfp);
+ if (!area)
+ return -ENOMEM;
+
+ area->size = PAGE_ALIGN(size);
+
+ area->vaddr = dma_alloc_coherent(qcom_tzmem_dev, area->size,
+ &area->paddr, gfp);
+ if (!area->vaddr)
+ return -ENOMEM;
+
+ ret = qcom_tzmem_init_area(area);
+ if (ret) {
+ dma_free_coherent(qcom_tzmem_dev, area->size,
+ area->vaddr, area->paddr);
+ return ret;
+ }
+
+ ret = gen_pool_add_virt(pool->genpool, (unsigned long)area->vaddr,
+ (phys_addr_t)area->paddr, size, -1);
+ if (ret) {
+ dma_free_coherent(qcom_tzmem_dev, area->size,
+ area->vaddr, area->paddr);
+ return ret;
+ }
+
+ scoped_guard(spinlock_irqsave, &pool->lock)
+ list_add_tail(&area->list, &pool->areas);
+
+ area = NULL;
+ return 0;
+}
+
+/**
+ * qcom_tzmem_pool_new() - Create a new TZ memory pool.
+ * @config: Pool configuration.
+ *
+ * Create a new pool of memory suitable for sharing with the TrustZone.
+ *
+ * Must not be used in atomic context.
+ *
+ * Return: New memory pool address or ERR_PTR() on error.
+ */
+struct qcom_tzmem_pool *
+qcom_tzmem_pool_new(const struct qcom_tzmem_pool_config *config)
+{
+ int ret = -ENOMEM;
+
+ might_sleep();
+
+ switch (config->policy) {
+ case QCOM_TZMEM_POLICY_STATIC:
+ if (!config->initial_size)
+ return ERR_PTR(-EINVAL);
+ break;
+ case QCOM_TZMEM_POLICY_MULTIPLIER:
+ if (!config->increment)
+ return ERR_PTR(-EINVAL);
+ break;
+ case QCOM_TZMEM_POLICY_ON_DEMAND:
+ break;
+ default:
+ return ERR_PTR(-EINVAL);
+ }
+
+ struct qcom_tzmem_pool *pool __free(kfree) = kzalloc(sizeof(*pool),
+ GFP_KERNEL);
+ if (!pool)
+ return ERR_PTR(-ENOMEM);
+
+ pool->genpool = gen_pool_create(PAGE_SHIFT, -1);
+ if (!pool->genpool)
+ return ERR_PTR(-ENOMEM);
+
+ gen_pool_set_algo(pool->genpool, gen_pool_best_fit, NULL);
+
+ pool->policy = config->policy;
+ pool->increment = config->increment;
+ pool->max_size = config->max_size;
+ INIT_LIST_HEAD(&pool->areas);
+ spin_lock_init(&pool->lock);
+
+ if (config->initial_size) {
+ ret = qcom_tzmem_pool_add_memory(pool, config->initial_size,
+ GFP_KERNEL);
+ if (ret) {
+ gen_pool_destroy(pool->genpool);
+ return ERR_PTR(ret);
+ }
+ }
+
+ return_ptr(pool);
+}
+EXPORT_SYMBOL_GPL(qcom_tzmem_pool_new);
+
+/**
+ * qcom_tzmem_pool_free() - Destroy a TZ memory pool and free all resources.
+ * @pool: Memory pool to free.
+ *
+ * Must not be called if any of the allocated chunks has not been freed.
+ * Must not be used in atomic context.
+ */
+void qcom_tzmem_pool_free(struct qcom_tzmem_pool *pool)
+{
+ struct qcom_tzmem_area *area, *next;
+ struct qcom_tzmem_chunk *chunk;
+ struct radix_tree_iter iter;
+ bool non_empty = false;
+ void __rcu **slot;
+
+ might_sleep();
+
+ if (!pool)
+ return;
+
+ scoped_guard(spinlock_irqsave, &qcom_tzmem_chunks_lock) {
+ radix_tree_for_each_slot(slot, &qcom_tzmem_chunks, &iter, 0) {
+ chunk = radix_tree_deref_slot_protected(slot,
+ &qcom_tzmem_chunks_lock);
+
+ if (chunk->owner == pool)
+ non_empty = true;
+ }
+ }
+
+ WARN(non_empty, "Freeing TZ memory pool with memory still allocated");
+
+ list_for_each_entry_safe(area, next, &pool->areas, list) {
+ list_del(&area->list);
+ qcom_tzmem_cleanup_area(area);
+ dma_free_coherent(qcom_tzmem_dev, area->size,
+ area->vaddr, area->paddr);
+ kfree(area);
+ }
+
+ gen_pool_destroy(pool->genpool);
+ kfree(pool);
+}
+EXPORT_SYMBOL_GPL(qcom_tzmem_pool_free);
+
+static void devm_qcom_tzmem_pool_free(void *data)
+{
+ struct qcom_tzmem_pool *pool = data;
+
+ qcom_tzmem_pool_free(pool);
+}
+
+/**
+ * devm_qcom_tzmem_pool_new() - Managed variant of qcom_tzmem_pool_new().
+ * @dev: Device managing this resource.
+ * @config: Pool configuration.
+ *
+ * Must not be used in atomic context.
+ *
+ * Return: Address of the managed pool or ERR_PTR() on failure.
+ */
+struct qcom_tzmem_pool *
+devm_qcom_tzmem_pool_new(struct device *dev,
+ const struct qcom_tzmem_pool_config *config)
+{
+ struct qcom_tzmem_pool *pool;
+ int ret;
+
+ pool = qcom_tzmem_pool_new(config);
+ if (IS_ERR(pool))
+ return pool;
+
+ ret = devm_add_action_or_reset(dev, devm_qcom_tzmem_pool_free, pool);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return pool;
+}
+EXPORT_SYMBOL_GPL(devm_qcom_tzmem_pool_new);
+
+static bool qcom_tzmem_try_grow_pool(struct qcom_tzmem_pool *pool,
+ size_t requested, gfp_t gfp)
+{
+ size_t current_size = gen_pool_size(pool->genpool);
+
+ if (pool->max_size && (current_size + requested) > pool->max_size)
+ return false;
+
+ switch (pool->policy) {
+ case QCOM_TZMEM_POLICY_STATIC:
+ return false;
+ case QCOM_TZMEM_POLICY_MULTIPLIER:
+ requested = current_size * pool->increment;
+ break;
+ case QCOM_TZMEM_POLICY_ON_DEMAND:
+ break;
+ }
+
+ return !qcom_tzmem_pool_add_memory(pool, requested, gfp);
+}
+
+/**
+ * qcom_tzmem_alloc() - Allocate a memory chunk suitable for sharing with TZ.
+ * @pool: TZ memory pool from which to allocate memory.
+ * @size: Number of bytes to allocate.
+ * @gfp: GFP flags.
+ *
+ * Can be used in any context.
+ *
+ * Return:
+ * Address of the allocated buffer or NULL if no more memory can be allocated.
+ * The buffer must be released using qcom_tzmem_free().
+ */
+void *qcom_tzmem_alloc(struct qcom_tzmem_pool *pool, size_t size, gfp_t gfp)
+{
+ unsigned long vaddr;
+ int ret;
+
+ if (!size)
+ return NULL;
+
+ size = PAGE_ALIGN(size);
+
+ struct qcom_tzmem_chunk *chunk __free(kfree) = kzalloc(sizeof(*chunk),
+ gfp);
+ if (!chunk)
+ return NULL;
+
+again:
+ vaddr = gen_pool_alloc(pool->genpool, size);
+ if (!vaddr) {
+ if (qcom_tzmem_try_grow_pool(pool, size, gfp))
+ goto again;
+
+ return NULL;
+ }
+
+ chunk->paddr = gen_pool_virt_to_phys(pool->genpool, vaddr);
+ chunk->size = size;
+ chunk->owner = pool;
+
+ scoped_guard(spinlock_irqsave, &qcom_tzmem_chunks_lock) {
+ ret = radix_tree_insert(&qcom_tzmem_chunks, vaddr, chunk);
+ if (ret) {
+ gen_pool_free(pool->genpool, vaddr, size);
+ return NULL;
+ }
+
+ chunk = NULL;
+ }
+
+ return (void *)vaddr;
+}
+EXPORT_SYMBOL_GPL(qcom_tzmem_alloc);
+
+/**
+ * qcom_tzmem_free() - Release a buffer allocated from a TZ memory pool.
+ * @vaddr: Virtual address of the buffer.
+ *
+ * Can be used in any context.
+ */
+void qcom_tzmem_free(void *vaddr)
+{
+ struct qcom_tzmem_chunk *chunk;
+
+ scoped_guard(spinlock_irqsave, &qcom_tzmem_chunks_lock)
+ chunk = radix_tree_delete_item(&qcom_tzmem_chunks,
+ (unsigned long)vaddr, NULL);
+
+ if (!chunk) {
+ WARN(1, "Virtual address %p not owned by TZ memory allocator",
+ vaddr);
+ return;
+ }
+
+ scoped_guard(spinlock_irqsave, &chunk->owner->lock)
+ gen_pool_free(chunk->owner->genpool, (unsigned long)vaddr,
+ chunk->size);
+ kfree(chunk);
+}
+EXPORT_SYMBOL_GPL(qcom_tzmem_free);
+
+/**
+ * qcom_tzmem_to_phys() - Map the virtual address of a TZ buffer to physical.
+ * @vaddr: Virtual address of the buffer allocated from a TZ memory pool.
+ *
+ * Can be used in any context. The address must have been returned by a call
+ * to qcom_tzmem_alloc().
+ *
+ * Returns: Physical address of the buffer.
+ */
+phys_addr_t qcom_tzmem_to_phys(void *vaddr)
+{
+ struct qcom_tzmem_chunk *chunk;
+
+ guard(spinlock_irqsave)(&qcom_tzmem_chunks_lock);
+
+ chunk = radix_tree_lookup(&qcom_tzmem_chunks, (unsigned long)vaddr);
+ if (!chunk)
+ return 0;
+
+ return chunk->paddr;
+}
+EXPORT_SYMBOL_GPL(qcom_tzmem_to_phys);
+
+int qcom_tzmem_enable(struct device *dev)
+{
+ if (qcom_tzmem_dev)
+ return -EBUSY;
+
+ qcom_tzmem_dev = dev;
+
+ return qcom_tzmem_init();
+}
+EXPORT_SYMBOL_GPL(qcom_tzmem_enable);
+
+MODULE_DESCRIPTION("TrustZone memory allocator for Qualcomm firmware drivers");
+MODULE_AUTHOR("Bartosz Golaszewski <bartosz.golaszewski@linaro.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/qcom/qcom_tzmem.h b/drivers/firmware/qcom/qcom_tzmem.h
new file mode 100644
index 000000000000..8fa8a3eb940e
--- /dev/null
+++ b/drivers/firmware/qcom/qcom_tzmem.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2023-2024 Linaro Ltd.
+ */
+
+#ifndef __QCOM_TZMEM_PRIV_H
+#define __QCOM_TZMEM_PRIV_H
+
+struct device;
+
+int qcom_tzmem_enable(struct device *dev);
+
+#endif /* __QCOM_TZMEM_PRIV_H */
diff --git a/drivers/firmware/sysfb.c b/drivers/firmware/sysfb.c
index 880ffcb50088..921f61507ae8 100644
--- a/drivers/firmware/sysfb.c
+++ b/drivers/firmware/sysfb.c
@@ -101,8 +101,10 @@ static __init struct device *sysfb_parent_dev(const struct screen_info *si)
if (IS_ERR(pdev)) {
return ERR_CAST(pdev);
} else if (pdev) {
- if (!sysfb_pci_dev_is_enabled(pdev))
+ if (!sysfb_pci_dev_is_enabled(pdev)) {
+ pci_dev_put(pdev);
return ERR_PTR(-ENODEV);
+ }
return &pdev->dev;
}
@@ -137,7 +139,7 @@ static __init int sysfb_init(void)
if (compatible) {
pd = sysfb_create_simplefb(si, &mode, parent);
if (!IS_ERR(pd))
- goto unlock_mutex;
+ goto put_device;
}
/* if the FB is incompatible, create a legacy framebuffer device */
@@ -155,7 +157,7 @@ static __init int sysfb_init(void)
pd = platform_device_alloc(name, 0);
if (!pd) {
ret = -ENOMEM;
- goto unlock_mutex;
+ goto put_device;
}
pd->dev.parent = parent;
@@ -170,9 +172,11 @@ static __init int sysfb_init(void)
if (ret)
goto err;
- goto unlock_mutex;
+ goto put_device;
err:
platform_device_put(pd);
+put_device:
+ put_device(parent);
unlock_mutex:
mutex_unlock(&disable_lock);
return ret;
diff --git a/drivers/firmware/ti_sci.h b/drivers/firmware/ti_sci.h
index ef3a8214d002..5846c60220f5 100644
--- a/drivers/firmware/ti_sci.h
+++ b/drivers/firmware/ti_sci.h
@@ -4,7 +4,7 @@
*
* Communication protocol with TI SCI hardware
* The system works in a message response protocol
- * See: http://processors.wiki.ti.com/index.php/TISCI for details
+ * See: https://software-dl.ti.com/tisci/esd/latest/index.html for details
*
* Copyright (C) 2015-2016 Texas Instruments Incorporated - https://www.ti.com/
*/
diff --git a/drivers/firmware/turris-mox-rwtm.c b/drivers/firmware/turris-mox-rwtm.c
index 31d962cdd6eb..3e7f186d239a 100644
--- a/drivers/firmware/turris-mox-rwtm.c
+++ b/drivers/firmware/turris-mox-rwtm.c
@@ -2,7 +2,7 @@
/*
* Turris Mox rWTM firmware driver
*
- * Copyright (C) 2019 Marek Behún <kabel@kernel.org>
+ * Copyright (C) 2019, 2024 Marek Behún <kabel@kernel.org>
*/
#include <linux/armada-37xx-rwtm-mailbox.h>
@@ -174,6 +174,9 @@ static void mox_rwtm_rx_callback(struct mbox_client *cl, void *data)
struct mox_rwtm *rwtm = dev_get_drvdata(cl->dev);
struct armada_37xx_rwtm_rx_msg *msg = data;
+ if (completion_done(&rwtm->cmd_done))
+ return;
+
rwtm->reply = *msg;
complete(&rwtm->cmd_done);
}
@@ -199,9 +202,8 @@ static int mox_get_board_info(struct mox_rwtm *rwtm)
if (ret < 0)
return ret;
- ret = wait_for_completion_timeout(&rwtm->cmd_done, HZ / 2);
- if (ret < 0)
- return ret;
+ if (!wait_for_completion_timeout(&rwtm->cmd_done, HZ / 2))
+ return -ETIMEDOUT;
ret = mox_get_status(MBOX_CMD_BOARD_INFO, reply->retval);
if (ret == -ENODATA) {
@@ -235,9 +237,8 @@ static int mox_get_board_info(struct mox_rwtm *rwtm)
if (ret < 0)
return ret;
- ret = wait_for_completion_timeout(&rwtm->cmd_done, HZ / 2);
- if (ret < 0)
- return ret;
+ if (!wait_for_completion_timeout(&rwtm->cmd_done, HZ / 2))
+ return -ETIMEDOUT;
ret = mox_get_status(MBOX_CMD_ECDSA_PUB_KEY, reply->retval);
if (ret == -ENODATA) {
@@ -274,9 +275,8 @@ static int check_get_random_support(struct mox_rwtm *rwtm)
if (ret < 0)
return ret;
- ret = wait_for_completion_timeout(&rwtm->cmd_done, HZ / 2);
- if (ret < 0)
- return ret;
+ if (!wait_for_completion_timeout(&rwtm->cmd_done, HZ / 2))
+ return -ETIMEDOUT;
return mox_get_status(MBOX_CMD_GET_RANDOM, rwtm->reply.retval);
}
@@ -499,6 +499,7 @@ static int turris_mox_rwtm_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, rwtm);
mutex_init(&rwtm->busy);
+ init_completion(&rwtm->cmd_done);
rwtm->mbox_client.dev = dev;
rwtm->mbox_client.rx_callback = mox_rwtm_rx_callback;
@@ -512,8 +513,6 @@ static int turris_mox_rwtm_probe(struct platform_device *pdev)
goto remove_files;
}
- init_completion(&rwtm->cmd_done);
-
ret = mox_get_board_info(rwtm);
if (ret < 0)
dev_warn(dev, "Cannot read board information: %i\n", ret);
diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c
index 9bc45357e1a8..add8acf66a9c 100644
--- a/drivers/firmware/xilinx/zynqmp.c
+++ b/drivers/firmware/xilinx/zynqmp.c
@@ -41,9 +41,6 @@
/* IOCTL/QUERY feature payload size */
#define FEATURE_PAYLOAD_SIZE 2
-/* Firmware feature check version mask */
-#define FIRMWARE_VERSION_MASK GENMASK(15, 0)
-
static bool feature_check_enabled;
static DEFINE_HASHTABLE(pm_api_features_map, PM_API_FEATURE_CHECK_MAX_ORDER);
static u32 ioctl_features[FEATURE_PAYLOAD_SIZE];
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 3dbddec07028..58f43bcced7c 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -219,7 +219,7 @@ config GPIO_BCM_XGS_IPROC
config GPIO_BRCMSTB
tristate "BRCMSTB GPIO support"
default y if (ARCH_BRCMSTB || BMIPS_GENERIC)
- depends on OF_GPIO && (ARCH_BRCMSTB || BMIPS_GENERIC || COMPILE_TEST)
+ depends on OF_GPIO && (ARCH_BRCMSTB || ARCH_BCM2835 || BMIPS_GENERIC || COMPILE_TEST)
select GPIO_GENERIC
select IRQ_DOMAIN
help
@@ -1576,7 +1576,7 @@ config GPIO_TPS68470
are "output only" GPIOs.
config GPIO_TQMX86
- tristate "TQ-Systems QTMX86 GPIO"
+ tristate "TQ-Systems TQMx86 GPIO"
depends on MFD_TQMX86 || COMPILE_TEST
depends on HAS_IOPORT_MAP
select GPIOLIB_IRQCHIP
@@ -1891,4 +1891,35 @@ config GPIO_SIM
endmenu
+menu "GPIO Debugging utilities"
+
+config GPIO_SLOPPY_LOGIC_ANALYZER
+ tristate "Sloppy GPIO logic analyzer"
+ depends on (GPIOLIB || COMPILE_TEST) && CPUSETS && DEBUG_FS && EXPERT
+ help
+ This option enables support for a sloppy logic analyzer using polled
+ GPIOs. Use the 'tools/gpio/gpio-sloppy-logic-analyzer' script with
+ this driver. The script will make it easier to use and will also
+ isolate a CPU for the polling task. Note that this is a last resort
+ analyzer which can be affected by latencies, non-deterministic code
+ paths, or NMIs. However, for e.g. remote development, it may be useful
+ to get a first view and aid further debugging.
+
+ If this driver is built as a module it will be called
+ 'gpio-sloppy-logic-analyzer'.
+
+config GPIO_VIRTUSER
+ tristate "GPIO Virtual User Testing Module"
+ select DEBUG_FS
+ select CONFIGFS_FS
+ select IRQ_WORK
+ help
+ Say Y here to enable the configurable, configfs-based virtual GPIO
+ consumer testing driver.
+
+ This driver is aimed as a helper in spotting any regressions in
+ hot-unplug handling in GPIOLIB.
+
+endmenu
+
endif
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index e2a53013780e..64dd6d9d730d 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -150,6 +150,7 @@ obj-$(CONFIG_GPIO_SIFIVE) += gpio-sifive.o
obj-$(CONFIG_GPIO_SIM) += gpio-sim.o
obj-$(CONFIG_GPIO_SIOX) += gpio-siox.o
obj-$(CONFIG_GPIO_SL28CPLD) += gpio-sl28cpld.o
+obj-$(CONFIG_GPIO_SLOPPY_LOGIC_ANALYZER) += gpio-sloppy-logic-analyzer.o
obj-$(CONFIG_GPIO_SODAVILLE) += gpio-sodaville.o
obj-$(CONFIG_GPIO_SPEAR_SPICS) += gpio-spear-spics.o
obj-$(CONFIG_GPIO_SPRD) += gpio-sprd.o
@@ -181,6 +182,7 @@ obj-$(CONFIG_GPIO_TWL6040) += gpio-twl6040.o
obj-$(CONFIG_GPIO_UNIPHIER) += gpio-uniphier.o
obj-$(CONFIG_GPIO_VF610) += gpio-vf610.o
obj-$(CONFIG_GPIO_VIPERBOARD) += gpio-viperboard.o
+obj-$(CONFIG_GPIO_VIRTUSER) += gpio-virtuser.o
obj-$(CONFIG_GPIO_VIRTIO) += gpio-virtio.o
obj-$(CONFIG_GPIO_VISCONTI) += gpio-visconti.o
obj-$(CONFIG_GPIO_VX855) += gpio-vx855.o
diff --git a/drivers/gpio/gpio-amd8111.c b/drivers/gpio/gpio-amd8111.c
index 6f3ded619c8b..3377667a28de 100644
--- a/drivers/gpio/gpio-amd8111.c
+++ b/drivers/gpio/gpio-amd8111.c
@@ -195,8 +195,10 @@ static int __init amd_gpio_init(void)
found:
err = pci_read_config_dword(pdev, 0x58, &gp.pmbase);
- if (err)
+ if (err) {
+ err = pcibios_err_to_errno(err);
goto out;
+ }
err = -EIO;
gp.pmbase &= 0x0000FF00;
if (gp.pmbase == 0)
diff --git a/drivers/gpio/gpio-ath79.c b/drivers/gpio/gpio-ath79.c
index f0c0c0f77eb0..6211d99a5770 100644
--- a/drivers/gpio/gpio-ath79.c
+++ b/drivers/gpio/gpio-ath79.c
@@ -273,8 +273,6 @@ static int ath79_gpio_probe(struct platform_device *pdev)
dev_err(dev, "bgpio_init failed\n");
return err;
}
- /* Use base 0 to stay compatible with legacy platforms */
- ctrl->gc.base = 0;
/* Optional interrupt setup */
if (!np || of_property_read_bool(np, "interrupt-controller")) {
diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c
index bb499e362912..1d0175d6350b 100644
--- a/drivers/gpio/gpio-davinci.c
+++ b/drivers/gpio/gpio-davinci.c
@@ -225,6 +225,11 @@ static int davinci_gpio_probe(struct platform_device *pdev)
else
nirq = DIV_ROUND_UP(ngpio, 16);
+ if (nirq > MAX_INT_PER_BANK) {
+ dev_err(dev, "Too many IRQs!\n");
+ return -EINVAL;
+ }
+
chips = devm_kzalloc(dev, sizeof(*chips), GFP_KERNEL);
if (!chips)
return -ENOMEM;
diff --git a/drivers/gpio/gpio-graniterapids.c b/drivers/gpio/gpio-graniterapids.c
index c693fe05d50f..f2e911a3d2ca 100644
--- a/drivers/gpio/gpio-graniterapids.c
+++ b/drivers/gpio/gpio-graniterapids.c
@@ -296,6 +296,8 @@ static int gnr_gpio_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
+ raw_spin_lock_init(&priv->lock);
+
regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(regs))
return PTR_ERR(regs);
diff --git a/drivers/gpio/gpio-gw-pld.c b/drivers/gpio/gpio-gw-pld.c
index 899335da93c7..7e29a2d8de1a 100644
--- a/drivers/gpio/gpio-gw-pld.c
+++ b/drivers/gpio/gpio-gw-pld.c
@@ -130,5 +130,6 @@ static struct i2c_driver gw_pld_driver = {
};
module_i2c_driver(gw_pld_driver);
+MODULE_DESCRIPTION("Gateworks I2C PLD GPIO expander");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
diff --git a/drivers/gpio/gpio-mc33880.c b/drivers/gpio/gpio-mc33880.c
index cd9b16dbe1a9..5fb357d7b78a 100644
--- a/drivers/gpio/gpio-mc33880.c
+++ b/drivers/gpio/gpio-mc33880.c
@@ -99,7 +99,7 @@ static int mc33880_probe(struct spi_device *spi)
mc->spi = spi;
- mc->chip.label = DRIVER_NAME,
+ mc->chip.label = DRIVER_NAME;
mc->chip.set = mc33880_set;
mc->chip.base = pdata->base;
mc->chip.ngpio = PIN_NUMBER;
@@ -168,5 +168,6 @@ static void __exit mc33880_exit(void)
module_exit(mc33880_exit);
MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>");
+MODULE_DESCRIPTION("MC33880 high-side/low-side switch GPIO driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-mmio.c b/drivers/gpio/gpio-mmio.c
index 71e1af7c2184..d89e78f0ead3 100644
--- a/drivers/gpio/gpio-mmio.c
+++ b/drivers/gpio/gpio-mmio.c
@@ -619,8 +619,6 @@ int bgpio_init(struct gpio_chip *gc, struct device *dev,
ret = gpiochip_get_ngpios(gc, dev);
if (ret)
gc->ngpio = gc->bgpio_bits;
- else
- gc->bgpio_bits = roundup_pow_of_two(round_up(gc->ngpio, 8));
ret = bgpio_setup_io(gc, dat, set, clr, flags);
if (ret)
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index 77a2812f2974..8baf3edd5274 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -758,6 +758,8 @@ static void pca953x_irq_bus_sync_unlock(struct irq_data *d)
int level;
if (chip->driver_data & PCA_PCAL) {
+ guard(mutex)(&chip->i2c_lock);
+
/* Enable latch on interrupt-enabled inputs */
pca953x_write_regs(chip, PCAL953X_IN_LATCH, chip->irq_mask);
@@ -1313,6 +1315,7 @@ static const struct of_device_id pca953x_dt_ids[] = {
{ .compatible = "ti,tca6408", .data = OF_953X( 8, PCA_INT), },
{ .compatible = "ti,tca6416", .data = OF_953X(16, PCA_INT), },
{ .compatible = "ti,tca6424", .data = OF_953X(24, PCA_INT), },
+ { .compatible = "ti,tca9535", .data = OF_953X(16, PCA_INT), },
{ .compatible = "ti,tca9538", .data = OF_953X( 8, PCA_INT), },
{ .compatible = "ti,tca9539", .data = OF_953X(16, PCA_INT), },
diff --git a/drivers/gpio/gpio-pcf857x.c b/drivers/gpio/gpio-pcf857x.c
index 53b69abe6787..7c57eaeb0afe 100644
--- a/drivers/gpio/gpio-pcf857x.c
+++ b/drivers/gpio/gpio-pcf857x.c
@@ -438,5 +438,6 @@ static void __exit pcf857x_exit(void)
}
module_exit(pcf857x_exit);
+MODULE_DESCRIPTION("Driver for pcf857x, pca857x, and pca967x I2C GPIO expanders");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("David Brownell");
diff --git a/drivers/gpio/gpio-pl061.c b/drivers/gpio/gpio-pl061.c
index 9fc1f3dd4190..a211a02d4b4a 100644
--- a/drivers/gpio/gpio-pl061.c
+++ b/drivers/gpio/gpio-pl061.c
@@ -438,4 +438,5 @@ static struct amba_driver pl061_gpio_driver = {
};
module_amba_driver(pl061_gpio_driver);
+MODULE_DESCRIPTION("Driver for the ARM PrimeCell(tm) General Purpose Input/Output (PL061)");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-rdc321x.c b/drivers/gpio/gpio-rdc321x.c
index 01ed2517e9fd..ec7fb9220a47 100644
--- a/drivers/gpio/gpio-rdc321x.c
+++ b/drivers/gpio/gpio-rdc321x.c
@@ -102,7 +102,7 @@ static int rdc_gpio_config(struct gpio_chip *chip,
unlock:
spin_unlock(&gpch->lock);
- return err;
+ return pcibios_err_to_errno(err);
}
/* configure GPIO pin as input */
@@ -170,13 +170,13 @@ static int rdc321x_gpio_probe(struct platform_device *pdev)
rdc321x_gpio_dev->reg1_data_base,
&rdc321x_gpio_dev->data_reg[0]);
if (err)
- return err;
+ return pcibios_err_to_errno(err);
err = pci_read_config_dword(rdc321x_gpio_dev->sb_pdev,
rdc321x_gpio_dev->reg2_data_base,
&rdc321x_gpio_dev->data_reg[1]);
if (err)
- return err;
+ return pcibios_err_to_errno(err);
dev_info(&pdev->dev, "registering %d GPIOs\n",
rdc321x_gpio_dev->chip.ngpio);
diff --git a/drivers/gpio/gpio-sim.c b/drivers/gpio/gpio-sim.c
index 2ed5cbe7c8a8..dcca1d7f173e 100644
--- a/drivers/gpio/gpio-sim.c
+++ b/drivers/gpio/gpio-sim.c
@@ -7,6 +7,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/array_size.h>
#include <linux/bitmap.h>
#include <linux/cleanup.h>
#include <linux/completion.h>
@@ -20,7 +21,6 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/irq_sim.h>
-#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/lockdep.h>
#include <linux/minmax.h>
@@ -227,6 +227,27 @@ static void gpio_sim_free(struct gpio_chip *gc, unsigned int offset)
}
}
+static int gpio_sim_irq_requested(struct irq_domain *domain,
+ irq_hw_number_t hwirq, void *data)
+{
+ struct gpio_sim_chip *chip = data;
+
+ return gpiochip_lock_as_irq(&chip->gc, hwirq);
+}
+
+static void gpio_sim_irq_released(struct irq_domain *domain,
+ irq_hw_number_t hwirq, void *data)
+{
+ struct gpio_sim_chip *chip = data;
+
+ gpiochip_unlock_as_irq(&chip->gc, hwirq);
+}
+
+static const struct irq_sim_ops gpio_sim_irq_sim_ops = {
+ .irq_sim_irq_requested = gpio_sim_irq_requested,
+ .irq_sim_irq_released = gpio_sim_irq_released,
+};
+
static void gpio_sim_dbg_show(struct seq_file *seq, struct gpio_chip *gc)
{
struct gpio_sim_chip *chip = gpiochip_get_data(gc);
@@ -308,13 +329,6 @@ static ssize_t gpio_sim_sysfs_pull_store(struct device *dev,
return len;
}
-static void gpio_sim_mutex_destroy(void *data)
-{
- struct mutex *lock = data;
-
- mutex_destroy(lock);
-}
-
static void gpio_sim_put_device(void *data)
{
struct device *dev = data;
@@ -450,7 +464,9 @@ static int gpio_sim_add_bank(struct fwnode_handle *swnode, struct device *dev)
if (!chip->pull_map)
return -ENOMEM;
- chip->irq_sim = devm_irq_domain_create_sim(dev, swnode, num_lines);
+ chip->irq_sim = devm_irq_domain_create_sim_full(dev, swnode, num_lines,
+ &gpio_sim_irq_sim_ops,
+ chip);
if (IS_ERR(chip->irq_sim))
return PTR_ERR(chip->irq_sim);
@@ -458,9 +474,7 @@ static int gpio_sim_add_bank(struct fwnode_handle *swnode, struct device *dev)
if (ret)
return ret;
- mutex_init(&chip->lock);
- ret = devm_add_action_or_reset(dev, gpio_sim_mutex_destroy,
- &chip->lock);
+ ret = devm_mutex_init(dev, &chip->lock);
if (ret)
return ret;
@@ -581,19 +595,19 @@ static int gpio_sim_bus_notifier_call(struct notifier_block *nb,
snprintf(devname, sizeof(devname), "gpio-sim.%u", simdev->id);
- if (strcmp(dev_name(dev), devname) == 0) {
- if (action == BUS_NOTIFY_BOUND_DRIVER)
- simdev->driver_bound = true;
- else if (action == BUS_NOTIFY_DRIVER_NOT_BOUND)
- simdev->driver_bound = false;
- else
- return NOTIFY_DONE;
+ if (!device_match_name(dev, devname))
+ return NOTIFY_DONE;
- complete(&simdev->probe_completion);
- return NOTIFY_OK;
- }
+ if (action == BUS_NOTIFY_BOUND_DRIVER)
+ simdev->driver_bound = true;
+ else if (action == BUS_NOTIFY_DRIVER_NOT_BOUND)
+ simdev->driver_bound = false;
+ else
+ return NOTIFY_DONE;
+
+ complete(&simdev->probe_completion);
- return NOTIFY_DONE;
+ return NOTIFY_OK;
}
static struct gpio_sim_device *to_gpio_sim_device(struct config_item *item)
diff --git a/drivers/gpio/gpio-sloppy-logic-analyzer.c b/drivers/gpio/gpio-sloppy-logic-analyzer.c
new file mode 100644
index 000000000000..aed6d1f6cfc3
--- /dev/null
+++ b/drivers/gpio/gpio-sloppy-logic-analyzer.c
@@ -0,0 +1,344 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Sloppy logic analyzer using GPIOs (to be run on an isolated CPU)
+ *
+ * Use the 'gpio-sloppy-logic-analyzer' script in the 'tools/gpio' folder for
+ * easier usage and further documentation. Note that this is a last resort
+ * analyzer which can be affected by latencies and non-deterministic code
+ * paths. However, for e.g. remote development, it may be useful to get a first
+ * view and aid further debugging.
+ *
+ * Copyright (C) Wolfram Sang <wsa@sang-engineering.com>
+ * Copyright (C) Renesas Electronics Corporation
+ */
+
+#include <linux/ctype.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/init.h>
+#include <linux/ktime.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/slab.h>
+#include <linux/sizes.h>
+#include <linux/timekeeping.h>
+#include <linux/types.h>
+#include <linux/vmalloc.h>
+
+#define GPIO_LA_NAME "gpio-sloppy-logic-analyzer"
+#define GPIO_LA_DEFAULT_BUF_SIZE SZ_256K
+/* can be increased but then we need to extend the u8 buffers */
+#define GPIO_LA_MAX_PROBES 8
+#define GPIO_LA_NUM_TESTS 1024
+
+struct gpio_la_poll_priv {
+ struct mutex blob_lock; /* serialize access to the blob (data) */
+ u32 buf_idx;
+ struct gpio_descs *descs;
+ unsigned long delay_ns;
+ unsigned long acq_delay;
+ struct debugfs_blob_wrapper blob;
+ struct dentry *debug_dir;
+ struct dentry *blob_dent;
+ struct debugfs_blob_wrapper meta;
+ struct device *dev;
+ unsigned int trig_len;
+ u8 *trig_data;
+};
+
+static struct dentry *gpio_la_poll_debug_dir;
+
+static __always_inline int gpio_la_get_array(struct gpio_descs *d, unsigned long *sptr)
+{
+ int ret;
+
+ ret = gpiod_get_array_value(d->ndescs, d->desc, d->info, sptr);
+ if (ret == 0 && fatal_signal_pending(current))
+ ret = -EINTR;
+
+ return ret;
+}
+
+static int fops_capture_set(void *data, u64 val)
+{
+ struct gpio_la_poll_priv *priv = data;
+ u8 *la_buf = priv->blob.data;
+ unsigned long state = 0; /* zeroed because GPIO arrays are bitfields */
+ unsigned long delay;
+ ktime_t start_time;
+ unsigned int i;
+ int ret;
+
+ if (!val)
+ return 0;
+
+ if (!la_buf)
+ return -ENOMEM;
+
+ if (!priv->delay_ns)
+ return -EINVAL;
+
+ mutex_lock(&priv->blob_lock);
+ if (priv->blob_dent) {
+ debugfs_remove(priv->blob_dent);
+ priv->blob_dent = NULL;
+ }
+
+ priv->buf_idx = 0;
+
+ local_irq_disable();
+ preempt_disable_notrace();
+
+ /* Measure delay of reading GPIOs */
+ start_time = ktime_get();
+ for (i = 0; i < GPIO_LA_NUM_TESTS; i++) {
+ ret = gpio_la_get_array(priv->descs, &state);
+ if (ret)
+ goto out;
+ }
+
+ priv->acq_delay = ktime_sub(ktime_get(), start_time) / GPIO_LA_NUM_TESTS;
+ if (priv->delay_ns < priv->acq_delay) {
+ ret = -ERANGE;
+ goto out;
+ }
+
+ delay = priv->delay_ns - priv->acq_delay;
+
+ /* Wait for triggers */
+ for (i = 0; i < priv->trig_len; i += 2) {
+ do {
+ ret = gpio_la_get_array(priv->descs, &state);
+ if (ret)
+ goto out;
+
+ ndelay(delay);
+ } while ((state & priv->trig_data[i]) != priv->trig_data[i + 1]);
+ }
+
+ /* With triggers, final state is also the first sample */
+ if (priv->trig_len)
+ la_buf[priv->buf_idx++] = state;
+
+ /* Sample */
+ while (priv->buf_idx < priv->blob.size) {
+ ret = gpio_la_get_array(priv->descs, &state);
+ if (ret)
+ goto out;
+
+ la_buf[priv->buf_idx++] = state;
+ ndelay(delay);
+ }
+out:
+ preempt_enable_notrace();
+ local_irq_enable();
+ if (ret)
+ dev_err(priv->dev, "couldn't read GPIOs: %d\n", ret);
+
+ kfree(priv->trig_data);
+ priv->trig_data = NULL;
+ priv->trig_len = 0;
+
+ priv->blob_dent = debugfs_create_blob("sample_data", 0400, priv->debug_dir, &priv->blob);
+ mutex_unlock(&priv->blob_lock);
+
+ return ret;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(fops_capture, NULL, fops_capture_set, "%llu\n");
+
+static int fops_buf_size_get(void *data, u64 *val)
+{
+ struct gpio_la_poll_priv *priv = data;
+
+ *val = priv->blob.size;
+
+ return 0;
+}
+
+static int fops_buf_size_set(void *data, u64 val)
+{
+ struct gpio_la_poll_priv *priv = data;
+ int ret = 0;
+ void *p;
+
+ if (!val)
+ return -EINVAL;
+
+ mutex_lock(&priv->blob_lock);
+
+ vfree(priv->blob.data);
+ p = vzalloc(val);
+ if (!p) {
+ val = 0;
+ ret = -ENOMEM;
+ }
+
+ priv->blob.data = p;
+ priv->blob.size = val;
+
+ mutex_unlock(&priv->blob_lock);
+ return ret;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(fops_buf_size, fops_buf_size_get, fops_buf_size_set, "%llu\n");
+
+static int trigger_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, NULL, inode->i_private);
+}
+
+static ssize_t trigger_write(struct file *file, const char __user *ubuf,
+ size_t count, loff_t *offset)
+{
+ struct seq_file *m = file->private_data;
+ struct gpio_la_poll_priv *priv = m->private;
+ char *buf;
+
+ /* upper limit is arbitrary but should be less than PAGE_SIZE */
+ if (count > 2048 || count & 1)
+ return -EINVAL;
+
+ buf = memdup_user(ubuf, count);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ priv->trig_data = buf;
+ priv->trig_len = count;
+
+ return count;
+}
+
+static const struct file_operations fops_trigger = {
+ .owner = THIS_MODULE,
+ .open = trigger_open,
+ .write = trigger_write,
+ .llseek = no_llseek,
+ .release = single_release,
+};
+
+static int gpio_la_poll_probe(struct platform_device *pdev)
+{
+ struct gpio_la_poll_priv *priv;
+ struct device *dev = &pdev->dev;
+ const char *devname = dev_name(dev);
+ const char *gpio_names[GPIO_LA_MAX_PROBES];
+ char *meta = NULL;
+ unsigned int i, meta_len = 0;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ devm_mutex_init(dev, &priv->blob_lock);
+
+ fops_buf_size_set(priv, GPIO_LA_DEFAULT_BUF_SIZE);
+
+ priv->descs = devm_gpiod_get_array(dev, "probe", GPIOD_IN);
+ if (IS_ERR(priv->descs))
+ return PTR_ERR(priv->descs);
+
+ /* artificial limit to keep 1 byte per sample for now */
+ if (priv->descs->ndescs > GPIO_LA_MAX_PROBES)
+ return -EFBIG;
+
+ ret = device_property_read_string_array(dev, "probe-names", gpio_names,
+ priv->descs->ndescs);
+ if (ret >= 0 && ret != priv->descs->ndescs)
+ ret = -EBADR;
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "error naming the GPIOs");
+
+ for (i = 0; i < priv->descs->ndescs; i++) {
+ unsigned int add_len;
+ char *new_meta, *consumer_name;
+
+ if (gpiod_cansleep(priv->descs->desc[i]))
+ return -EREMOTE;
+
+ consumer_name = kasprintf(GFP_KERNEL, "%s: %s", devname, gpio_names[i]);
+ if (!consumer_name)
+ return -ENOMEM;
+ gpiod_set_consumer_name(priv->descs->desc[i], consumer_name);
+ kfree(consumer_name);
+
+ /* '10' is length of 'probe00=\n\0' */
+ add_len = strlen(gpio_names[i]) + 10;
+
+ new_meta = devm_krealloc(dev, meta, meta_len + add_len, GFP_KERNEL);
+ if (!new_meta)
+ return -ENOMEM;
+
+ meta = new_meta;
+ meta_len += snprintf(meta + meta_len, add_len, "probe%02u=%s\n",
+ i + 1, gpio_names[i]);
+ }
+
+ platform_set_drvdata(pdev, priv);
+ priv->dev = dev;
+
+ priv->meta.data = meta;
+ priv->meta.size = meta_len;
+ priv->debug_dir = debugfs_create_dir(devname, gpio_la_poll_debug_dir);
+ debugfs_create_blob("meta_data", 0400, priv->debug_dir, &priv->meta);
+ debugfs_create_ulong("delay_ns", 0600, priv->debug_dir, &priv->delay_ns);
+ debugfs_create_ulong("delay_ns_acquisition", 0400, priv->debug_dir, &priv->acq_delay);
+ debugfs_create_file_unsafe("buf_size", 0600, priv->debug_dir, priv, &fops_buf_size);
+ debugfs_create_file_unsafe("capture", 0200, priv->debug_dir, priv, &fops_capture);
+ debugfs_create_file_unsafe("trigger", 0200, priv->debug_dir, priv, &fops_trigger);
+
+ return 0;
+}
+
+static void gpio_la_poll_remove(struct platform_device *pdev)
+{
+ struct gpio_la_poll_priv *priv = platform_get_drvdata(pdev);
+
+ mutex_lock(&priv->blob_lock);
+ debugfs_remove_recursive(priv->debug_dir);
+ mutex_unlock(&priv->blob_lock);
+}
+
+static const struct of_device_id gpio_la_poll_of_match[] = {
+ { .compatible = GPIO_LA_NAME },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gpio_la_poll_of_match);
+
+static struct platform_driver gpio_la_poll_device_driver = {
+ .probe = gpio_la_poll_probe,
+ .remove_new = gpio_la_poll_remove,
+ .driver = {
+ .name = GPIO_LA_NAME,
+ .of_match_table = gpio_la_poll_of_match,
+ }
+};
+
+static int __init gpio_la_poll_init(void)
+{
+ gpio_la_poll_debug_dir = debugfs_create_dir(GPIO_LA_NAME, NULL);
+
+ return platform_driver_register(&gpio_la_poll_device_driver);
+}
+/*
+ * Non-strict pin controllers can read GPIOs while being muxed to something else.
+ * To support that, we need to claim GPIOs before further pinmuxing happens. So,
+ * we probe early using 'late_initcall'
+ */
+late_initcall(gpio_la_poll_init);
+
+static void __exit gpio_la_poll_exit(void)
+{
+ platform_driver_unregister(&gpio_la_poll_device_driver);
+ debugfs_remove_recursive(gpio_la_poll_debug_dir);
+}
+module_exit(gpio_la_poll_exit);
+
+MODULE_AUTHOR("Wolfram Sang <wsa@sang-engineering.com>");
+MODULE_DESCRIPTION("Sloppy logic analyzer using GPIOs");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-syscon.c b/drivers/gpio/gpio-syscon.c
index 6e1a2581e6ae..3a90a3a1caea 100644
--- a/drivers/gpio/gpio-syscon.c
+++ b/drivers/gpio/gpio-syscon.c
@@ -208,6 +208,7 @@ static int syscon_gpio_probe(struct platform_device *pdev)
struct syscon_gpio_priv *priv;
struct device_node *np = dev->of_node;
int ret;
+ bool use_parent_regmap = false;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -216,24 +217,28 @@ static int syscon_gpio_probe(struct platform_device *pdev)
priv->data = of_device_get_match_data(dev);
priv->syscon = syscon_regmap_lookup_by_phandle(np, "gpio,syscon-dev");
- if (IS_ERR(priv->syscon) && np->parent)
+ if (IS_ERR(priv->syscon) && np->parent) {
priv->syscon = syscon_node_to_regmap(np->parent);
+ use_parent_regmap = true;
+ }
if (IS_ERR(priv->syscon))
return PTR_ERR(priv->syscon);
- ret = of_property_read_u32_index(np, "gpio,syscon-dev", 1,
- &priv->dreg_offset);
- if (ret)
- dev_err(dev, "can't read the data register offset!\n");
+ if (!use_parent_regmap) {
+ ret = of_property_read_u32_index(np, "gpio,syscon-dev", 1,
+ &priv->dreg_offset);
+ if (ret)
+ dev_err(dev, "can't read the data register offset!\n");
- priv->dreg_offset <<= 3;
+ priv->dreg_offset <<= 3;
- ret = of_property_read_u32_index(np, "gpio,syscon-dev", 2,
- &priv->dir_reg_offset);
- if (ret)
- dev_dbg(dev, "can't read the dir register offset!\n");
+ ret = of_property_read_u32_index(np, "gpio,syscon-dev", 2,
+ &priv->dir_reg_offset);
+ if (ret)
+ dev_dbg(dev, "can't read the dir register offset!\n");
- priv->dir_reg_offset <<= 3;
+ priv->dir_reg_offset <<= 3;
+ }
priv->chip.parent = dev;
priv->chip.owner = THIS_MODULE;
diff --git a/drivers/gpio/gpio-tqmx86.c b/drivers/gpio/gpio-tqmx86.c
index 3a28c1f273c3..f2e7e8754d95 100644
--- a/drivers/gpio/gpio-tqmx86.c
+++ b/drivers/gpio/gpio-tqmx86.c
@@ -6,6 +6,7 @@
* Vadim V.Vlasov <vvlasov@dev.rtsoft.ru>
*/
+#include <linux/bitmap.h>
#include <linux/bitops.h>
#include <linux/errno.h>
#include <linux/gpio/driver.h>
@@ -28,16 +29,25 @@
#define TQMX86_GPIIC 3 /* GPI Interrupt Configuration Register */
#define TQMX86_GPIIS 4 /* GPI Interrupt Status Register */
+#define TQMX86_GPII_NONE 0
#define TQMX86_GPII_FALLING BIT(0)
#define TQMX86_GPII_RISING BIT(1)
+/* Stored in irq_type as a trigger type, but not actually valid as a register
+ * value, so the name doesn't use "GPII"
+ */
+#define TQMX86_INT_BOTH (BIT(0) | BIT(1))
#define TQMX86_GPII_MASK (BIT(0) | BIT(1))
#define TQMX86_GPII_BITS 2
+/* Stored in irq_type with GPII bits */
+#define TQMX86_INT_UNMASKED BIT(2)
struct tqmx86_gpio_data {
struct gpio_chip chip;
void __iomem *io_base;
int irq;
+ /* Lock must be held for accessing output and irq_type fields */
raw_spinlock_t spinlock;
+ DECLARE_BITMAP(output, TQMX86_NGPIO);
u8 irq_type[TQMX86_NGPI];
};
@@ -64,15 +74,10 @@ static void tqmx86_gpio_set(struct gpio_chip *chip, unsigned int offset,
{
struct tqmx86_gpio_data *gpio = gpiochip_get_data(chip);
unsigned long flags;
- u8 val;
raw_spin_lock_irqsave(&gpio->spinlock, flags);
- val = tqmx86_gpio_read(gpio, TQMX86_GPIOD);
- if (value)
- val |= BIT(offset);
- else
- val &= ~BIT(offset);
- tqmx86_gpio_write(gpio, val, TQMX86_GPIOD);
+ __assign_bit(offset, gpio->output, value);
+ tqmx86_gpio_write(gpio, bitmap_get_value8(gpio->output, 0), TQMX86_GPIOD);
raw_spin_unlock_irqrestore(&gpio->spinlock, flags);
}
@@ -107,21 +112,38 @@ static int tqmx86_gpio_get_direction(struct gpio_chip *chip,
return GPIO_LINE_DIRECTION_OUT;
}
+static void tqmx86_gpio_irq_config(struct tqmx86_gpio_data *gpio, int offset)
+ __must_hold(&gpio->spinlock)
+{
+ u8 type = TQMX86_GPII_NONE, gpiic;
+
+ if (gpio->irq_type[offset] & TQMX86_INT_UNMASKED) {
+ type = gpio->irq_type[offset] & TQMX86_GPII_MASK;
+
+ if (type == TQMX86_INT_BOTH)
+ type = tqmx86_gpio_get(&gpio->chip, offset + TQMX86_NGPO)
+ ? TQMX86_GPII_FALLING
+ : TQMX86_GPII_RISING;
+ }
+
+ gpiic = tqmx86_gpio_read(gpio, TQMX86_GPIIC);
+ gpiic &= ~(TQMX86_GPII_MASK << (offset * TQMX86_GPII_BITS));
+ gpiic |= type << (offset * TQMX86_GPII_BITS);
+ tqmx86_gpio_write(gpio, gpiic, TQMX86_GPIIC);
+}
+
static void tqmx86_gpio_irq_mask(struct irq_data *data)
{
unsigned int offset = (data->hwirq - TQMX86_NGPO);
struct tqmx86_gpio_data *gpio = gpiochip_get_data(
irq_data_get_irq_chip_data(data));
unsigned long flags;
- u8 gpiic, mask;
-
- mask = TQMX86_GPII_MASK << (offset * TQMX86_GPII_BITS);
raw_spin_lock_irqsave(&gpio->spinlock, flags);
- gpiic = tqmx86_gpio_read(gpio, TQMX86_GPIIC);
- gpiic &= ~mask;
- tqmx86_gpio_write(gpio, gpiic, TQMX86_GPIIC);
+ gpio->irq_type[offset] &= ~TQMX86_INT_UNMASKED;
+ tqmx86_gpio_irq_config(gpio, offset);
raw_spin_unlock_irqrestore(&gpio->spinlock, flags);
+
gpiochip_disable_irq(&gpio->chip, irqd_to_hwirq(data));
}
@@ -131,16 +153,12 @@ static void tqmx86_gpio_irq_unmask(struct irq_data *data)
struct tqmx86_gpio_data *gpio = gpiochip_get_data(
irq_data_get_irq_chip_data(data));
unsigned long flags;
- u8 gpiic, mask;
-
- mask = TQMX86_GPII_MASK << (offset * TQMX86_GPII_BITS);
gpiochip_enable_irq(&gpio->chip, irqd_to_hwirq(data));
+
raw_spin_lock_irqsave(&gpio->spinlock, flags);
- gpiic = tqmx86_gpio_read(gpio, TQMX86_GPIIC);
- gpiic &= ~mask;
- gpiic |= gpio->irq_type[offset] << (offset * TQMX86_GPII_BITS);
- tqmx86_gpio_write(gpio, gpiic, TQMX86_GPIIC);
+ gpio->irq_type[offset] |= TQMX86_INT_UNMASKED;
+ tqmx86_gpio_irq_config(gpio, offset);
raw_spin_unlock_irqrestore(&gpio->spinlock, flags);
}
@@ -151,7 +169,7 @@ static int tqmx86_gpio_irq_set_type(struct irq_data *data, unsigned int type)
unsigned int offset = (data->hwirq - TQMX86_NGPO);
unsigned int edge_type = type & IRQF_TRIGGER_MASK;
unsigned long flags;
- u8 new_type, gpiic;
+ u8 new_type;
switch (edge_type) {
case IRQ_TYPE_EDGE_RISING:
@@ -161,19 +179,16 @@ static int tqmx86_gpio_irq_set_type(struct irq_data *data, unsigned int type)
new_type = TQMX86_GPII_FALLING;
break;
case IRQ_TYPE_EDGE_BOTH:
- new_type = TQMX86_GPII_FALLING | TQMX86_GPII_RISING;
+ new_type = TQMX86_INT_BOTH;
break;
default:
return -EINVAL; /* not supported */
}
- gpio->irq_type[offset] = new_type;
-
raw_spin_lock_irqsave(&gpio->spinlock, flags);
- gpiic = tqmx86_gpio_read(gpio, TQMX86_GPIIC);
- gpiic &= ~((TQMX86_GPII_MASK) << (offset * TQMX86_GPII_BITS));
- gpiic |= new_type << (offset * TQMX86_GPII_BITS);
- tqmx86_gpio_write(gpio, gpiic, TQMX86_GPIIC);
+ gpio->irq_type[offset] &= ~TQMX86_GPII_MASK;
+ gpio->irq_type[offset] |= new_type;
+ tqmx86_gpio_irq_config(gpio, offset);
raw_spin_unlock_irqrestore(&gpio->spinlock, flags);
return 0;
@@ -184,8 +199,8 @@ static void tqmx86_gpio_irq_handler(struct irq_desc *desc)
struct gpio_chip *chip = irq_desc_get_handler_data(desc);
struct tqmx86_gpio_data *gpio = gpiochip_get_data(chip);
struct irq_chip *irq_chip = irq_desc_get_chip(desc);
- unsigned long irq_bits;
- int i = 0;
+ unsigned long irq_bits, flags;
+ int i;
u8 irq_status;
chained_irq_enter(irq_chip, desc);
@@ -194,6 +209,34 @@ static void tqmx86_gpio_irq_handler(struct irq_desc *desc)
tqmx86_gpio_write(gpio, irq_status, TQMX86_GPIIS);
irq_bits = irq_status;
+
+ raw_spin_lock_irqsave(&gpio->spinlock, flags);
+ for_each_set_bit(i, &irq_bits, TQMX86_NGPI) {
+ /*
+ * Edge-both triggers are implemented by flipping the edge
+ * trigger after each interrupt, as the controller only supports
+ * either rising or falling edge triggers, but not both.
+ *
+ * Internally, the TQMx86 GPIO controller has separate status
+ * registers for rising and falling edge interrupts. GPIIC
+ * configures which bits from which register are visible in the
+ * interrupt status register GPIIS and defines what triggers the
+ * parent IRQ line. Writing to GPIIS always clears both rising
+ * and falling interrupt flags internally, regardless of the
+ * currently configured trigger.
+ *
+ * In consequence, we can cleanly implement the edge-both
+ * trigger in software by first clearing the interrupt and then
+ * setting the new trigger based on the current GPIO input in
+ * tqmx86_gpio_irq_config() - even if an edge arrives between
+ * reading the input and setting the trigger, we will have a new
+ * interrupt pending.
+ */
+ if ((gpio->irq_type[i] & TQMX86_GPII_MASK) == TQMX86_INT_BOTH)
+ tqmx86_gpio_irq_config(gpio, i);
+ }
+ raw_spin_unlock_irqrestore(&gpio->spinlock, flags);
+
for_each_set_bit(i, &irq_bits, TQMX86_NGPI)
generic_handle_domain_irq(gpio->chip.irq.domain,
i + TQMX86_NGPO);
@@ -277,6 +320,13 @@ static int tqmx86_gpio_probe(struct platform_device *pdev)
tqmx86_gpio_write(gpio, (u8)~TQMX86_DIR_INPUT_MASK, TQMX86_GPIODD);
+ /*
+ * Reading the previous output state is not possible with TQMx86 hardware.
+ * Initialize all outputs to 0 to have a defined state that matches the
+ * shadow register.
+ */
+ tqmx86_gpio_write(gpio, 0, TQMX86_GPIOD);
+
chip = &gpio->chip;
chip->label = "gpio-tqmx86";
chip->owner = THIS_MODULE;
diff --git a/drivers/gpio/gpio-virtuser.c b/drivers/gpio/gpio-virtuser.c
new file mode 100644
index 000000000000..0e0d55da4f01
--- /dev/null
+++ b/drivers/gpio/gpio-virtuser.c
@@ -0,0 +1,1807 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Configurable virtual GPIO consumer module.
+ *
+ * Copyright (C) 2023-2024 Bartosz Golaszewski <bartosz.golaszewski@linaro.org>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/array_size.h>
+#include <linux/atomic.h>
+#include <linux/bitmap.h>
+#include <linux/cleanup.h>
+#include <linux/completion.h>
+#include <linux/configfs.h>
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/gpio/machine.h>
+#include <linux/idr.h>
+#include <linux/interrupt.h>
+#include <linux/irq_work.h>
+#include <linux/limits.h>
+#include <linux/list.h>
+#include <linux/lockdep.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/notifier.h>
+#include <linux/of.h>
+#include <linux/overflow.h>
+#include <linux/platform_device.h>
+#include <linux/printk.h>
+#include <linux/property.h>
+#include <linux/slab.h>
+#include <linux/string_helpers.h>
+#include <linux/types.h>
+
+#define GPIO_VIRTUSER_NAME_BUF_LEN 32
+
+static DEFINE_IDA(gpio_virtuser_ida);
+static struct dentry *gpio_virtuser_dbg_root;
+
+struct gpio_virtuser_attr_data {
+ union {
+ struct gpio_desc *desc;
+ struct gpio_descs *descs;
+ };
+ struct dentry *dbgfs_dir;
+};
+
+struct gpio_virtuser_line_array_data {
+ struct gpio_virtuser_attr_data ad;
+};
+
+struct gpio_virtuser_line_data {
+ struct gpio_virtuser_attr_data ad;
+ char consumer[GPIO_VIRTUSER_NAME_BUF_LEN];
+ struct mutex consumer_lock;
+ unsigned int debounce;
+ atomic_t irq;
+ atomic_t irq_count;
+};
+
+struct gpio_virtuser_dbgfs_attr_descr {
+ const char *name;
+ const struct file_operations *fops;
+};
+
+struct gpio_virtuser_irq_work_context {
+ struct irq_work work;
+ struct completion work_completion;
+ union {
+ struct {
+ struct gpio_desc *desc;
+ int dir;
+ int val;
+ int ret;
+ };
+ struct {
+ struct gpio_descs *descs;
+ unsigned long *values;
+ };
+ };
+};
+
+static struct gpio_virtuser_irq_work_context *
+to_gpio_virtuser_irq_work_context(struct irq_work *work)
+{
+ return container_of(work, struct gpio_virtuser_irq_work_context, work);
+}
+
+static void
+gpio_virtuser_init_irq_work_context(struct gpio_virtuser_irq_work_context *ctx)
+{
+ memset(ctx, 0, sizeof(*ctx));
+ init_completion(&ctx->work_completion);
+}
+
+static void
+gpio_virtuser_irq_work_queue_sync(struct gpio_virtuser_irq_work_context *ctx)
+{
+ irq_work_queue(&ctx->work);
+ wait_for_completion(&ctx->work_completion);
+}
+
+static void gpio_virtuser_dbgfs_emit_value_array(char *buf,
+ unsigned long *values,
+ size_t num_values)
+{
+ size_t i;
+
+ for (i = 0; i < num_values; i++)
+ buf[i] = test_bit(i, values) ? '1' : '0';
+
+ buf[i++] = '\n';
+}
+
+static void gpio_virtuser_get_value_array_atomic(struct irq_work *work)
+{
+ struct gpio_virtuser_irq_work_context *ctx =
+ to_gpio_virtuser_irq_work_context(work);
+ struct gpio_descs *descs = ctx->descs;
+
+ ctx->ret = gpiod_get_array_value(descs->ndescs, descs->desc,
+ descs->info, ctx->values);
+ complete(&ctx->work_completion);
+}
+
+static int gpio_virtuser_get_array_value(struct gpio_descs *descs,
+ unsigned long *values, bool atomic)
+{
+ struct gpio_virtuser_irq_work_context ctx;
+
+ if (!atomic)
+ return gpiod_get_array_value_cansleep(descs->ndescs,
+ descs->desc,
+ descs->info, values);
+
+ gpio_virtuser_init_irq_work_context(&ctx);
+ ctx.work = IRQ_WORK_INIT_HARD(gpio_virtuser_get_value_array_atomic);
+ ctx.descs = descs;
+ ctx.values = values;
+
+ gpio_virtuser_irq_work_queue_sync(&ctx);
+
+ return ctx.ret;
+}
+
+static ssize_t gpio_virtuser_value_array_do_read(struct file *file,
+ char __user *user_buf,
+ size_t size, loff_t *ppos,
+ bool atomic)
+{
+ struct gpio_virtuser_line_data *data = file->private_data;
+ struct gpio_descs *descs = data->ad.descs;
+ size_t bufsize;
+ int ret;
+
+ unsigned long *values __free(bitmap) = bitmap_zalloc(descs->ndescs,
+ GFP_KERNEL);
+ if (!values)
+ return -ENOMEM;
+
+ ret = gpio_virtuser_get_array_value(descs, values, atomic);
+ if (ret)
+ return ret;
+
+ bufsize = descs->ndescs + 2;
+
+ char *buf __free(kfree) = kzalloc(bufsize, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ gpio_virtuser_dbgfs_emit_value_array(buf, values, descs->ndescs);
+
+ return simple_read_from_buffer(user_buf, size, ppos, buf,
+ descs->ndescs + 1);
+}
+
+static int gpio_virtuser_dbgfs_parse_value_array(const char *buf,
+ size_t len,
+ unsigned long *values)
+{
+ size_t i;
+
+ for (i = 0; i < len; i++) {
+ if (buf[i] == '0')
+ clear_bit(i, values);
+ else if (buf[i] == '1')
+ set_bit(i, values);
+ else
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void gpio_virtuser_set_value_array_atomic(struct irq_work *work)
+{
+ struct gpio_virtuser_irq_work_context *ctx =
+ to_gpio_virtuser_irq_work_context(work);
+ struct gpio_descs *descs = ctx->descs;
+
+ ctx->ret = gpiod_set_array_value(descs->ndescs, descs->desc,
+ descs->info, ctx->values);
+ complete(&ctx->work_completion);
+}
+
+static int gpio_virtuser_set_array_value(struct gpio_descs *descs,
+ unsigned long *values, bool atomic)
+{
+ struct gpio_virtuser_irq_work_context ctx;
+
+ if (!atomic)
+ return gpiod_set_array_value_cansleep(descs->ndescs,
+ descs->desc,
+ descs->info, values);
+
+ gpio_virtuser_init_irq_work_context(&ctx);
+ ctx.work = IRQ_WORK_INIT_HARD(gpio_virtuser_set_value_array_atomic);
+ ctx.descs = descs;
+ ctx.values = values;
+
+ gpio_virtuser_irq_work_queue_sync(&ctx);
+
+ return ctx.ret;
+}
+
+static ssize_t gpio_virtuser_value_array_do_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos,
+ bool atomic)
+{
+ struct gpio_virtuser_line_data *data = file->private_data;
+ struct gpio_descs *descs = data->ad.descs;
+ int ret;
+
+ if (count - 1 != descs->ndescs)
+ return -EINVAL;
+
+ char *buf __free(kfree) = kzalloc(count, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = simple_write_to_buffer(buf, count, ppos, user_buf, count);
+ if (ret < 0)
+ return ret;
+
+ unsigned long *values __free(bitmap) = bitmap_zalloc(descs->ndescs,
+ GFP_KERNEL);
+ if (!values)
+ return -ENOMEM;
+
+ ret = gpio_virtuser_dbgfs_parse_value_array(buf, count - 1, values);
+ if (ret)
+ return ret;
+
+ ret = gpio_virtuser_set_array_value(descs, values, atomic);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static ssize_t gpio_virtuser_value_array_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ return gpio_virtuser_value_array_do_read(file, user_buf, count, ppos,
+ false);
+}
+
+static ssize_t gpio_virtuser_value_array_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ return gpio_virtuser_value_array_do_write(file, user_buf, count, ppos,
+ false);
+}
+
+static const struct file_operations gpio_virtuser_value_array_fops = {
+ .read = gpio_virtuser_value_array_read,
+ .write = gpio_virtuser_value_array_write,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t
+gpio_virtuser_value_array_atomic_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ return gpio_virtuser_value_array_do_read(file, user_buf, count, ppos,
+ true);
+}
+
+static ssize_t
+gpio_virtuser_value_array_atomic_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ return gpio_virtuser_value_array_do_write(file, user_buf, count, ppos,
+ true);
+}
+
+static const struct file_operations gpio_virtuser_value_array_atomic_fops = {
+ .read = gpio_virtuser_value_array_atomic_read,
+ .write = gpio_virtuser_value_array_atomic_write,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static void gpio_virtuser_do_get_direction_atomic(struct irq_work *work)
+{
+ struct gpio_virtuser_irq_work_context *ctx =
+ to_gpio_virtuser_irq_work_context(work);
+
+ ctx->ret = gpiod_get_direction(ctx->desc);
+ complete(&ctx->work_completion);
+}
+
+static int gpio_virtuser_get_direction_atomic(struct gpio_desc *desc)
+{
+ struct gpio_virtuser_irq_work_context ctx;
+
+ gpio_virtuser_init_irq_work_context(&ctx);
+ ctx.work = IRQ_WORK_INIT_HARD(gpio_virtuser_do_get_direction_atomic);
+ ctx.desc = desc;
+
+ gpio_virtuser_irq_work_queue_sync(&ctx);
+
+ return ctx.ret;
+}
+
+static ssize_t gpio_virtuser_direction_do_read(struct file *file,
+ char __user *user_buf,
+ size_t size, loff_t *ppos,
+ bool atomic)
+{
+ struct gpio_virtuser_line_data *data = file->private_data;
+ struct gpio_desc *desc = data->ad.desc;
+ char buf[32];
+ int dir;
+
+ if (!atomic)
+ dir = gpiod_get_direction(desc);
+ else
+ dir = gpio_virtuser_get_direction_atomic(desc);
+ if (dir < 0)
+ return dir;
+
+ snprintf(buf, sizeof(buf), "%s\n", dir ? "input" : "output");
+
+ return simple_read_from_buffer(user_buf, size, ppos, buf, strlen(buf));
+}
+
+static int gpio_virtuser_set_direction(struct gpio_desc *desc, int dir, int val)
+{
+ if (dir)
+ return gpiod_direction_input(desc);
+
+ return gpiod_direction_output(desc, val);
+}
+
+static void gpio_virtuser_do_set_direction_atomic(struct irq_work *work)
+{
+ struct gpio_virtuser_irq_work_context *ctx =
+ to_gpio_virtuser_irq_work_context(work);
+
+ ctx->ret = gpio_virtuser_set_direction(ctx->desc, ctx->dir, ctx->val);
+ complete(&ctx->work_completion);
+}
+
+static int gpio_virtuser_set_direction_atomic(struct gpio_desc *desc,
+ int dir, int val)
+{
+ struct gpio_virtuser_irq_work_context ctx;
+
+ gpio_virtuser_init_irq_work_context(&ctx);
+ ctx.work = IRQ_WORK_INIT_HARD(gpio_virtuser_do_set_direction_atomic);
+ ctx.desc = desc;
+ ctx.dir = dir;
+ ctx.val = val;
+
+ gpio_virtuser_irq_work_queue_sync(&ctx);
+
+ return ctx.ret;
+}
+
+static ssize_t gpio_virtuser_direction_do_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos,
+ bool atomic)
+{
+ struct gpio_virtuser_line_data *data = file->private_data;
+ struct gpio_desc *desc = data->ad.desc;
+ char buf[32], *trimmed;
+ int ret, dir, val = 0;
+
+ ret = simple_write_to_buffer(buf, sizeof(buf), ppos, user_buf, count);
+ if (ret < 0)
+ return ret;
+
+ trimmed = strim(buf);
+
+ if (strcmp(trimmed, "input") == 0) {
+ dir = 1;
+ } else if (strcmp(trimmed, "output-high") == 0) {
+ dir = 0;
+ val = 1;
+ } else if (strcmp(trimmed, "output-low") == 0) {
+ dir = val = 0;
+ } else {
+ return -EINVAL;
+ }
+
+ if (!atomic)
+ ret = gpio_virtuser_set_direction(desc, dir, val);
+ else
+ ret = gpio_virtuser_set_direction_atomic(desc, dir, val);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static ssize_t gpio_virtuser_direction_read(struct file *file,
+ char __user *user_buf,
+ size_t size, loff_t *ppos)
+{
+ return gpio_virtuser_direction_do_read(file, user_buf, size, ppos,
+ false);
+}
+
+static ssize_t gpio_virtuser_direction_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ return gpio_virtuser_direction_do_write(file, user_buf, count, ppos,
+ false);
+}
+
+static const struct file_operations gpio_virtuser_direction_fops = {
+ .read = gpio_virtuser_direction_read,
+ .write = gpio_virtuser_direction_write,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t gpio_virtuser_direction_atomic_read(struct file *file,
+ char __user *user_buf,
+ size_t size, loff_t *ppos)
+{
+ return gpio_virtuser_direction_do_read(file, user_buf, size, ppos,
+ true);
+}
+
+static ssize_t gpio_virtuser_direction_atomic_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ return gpio_virtuser_direction_do_write(file, user_buf, count, ppos,
+ true);
+}
+
+static const struct file_operations gpio_virtuser_direction_atomic_fops = {
+ .read = gpio_virtuser_direction_atomic_read,
+ .write = gpio_virtuser_direction_atomic_write,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static int gpio_virtuser_value_get(void *data, u64 *val)
+{
+ struct gpio_virtuser_line_data *ld = data;
+ int ret;
+
+ ret = gpiod_get_value_cansleep(ld->ad.desc);
+ if (ret < 0)
+ return ret;
+
+ *val = ret;
+
+ return 0;
+}
+
+static int gpio_virtuser_value_set(void *data, u64 val)
+{
+ struct gpio_virtuser_line_data *ld = data;
+
+ if (val > 1)
+ return -EINVAL;
+
+ gpiod_set_value_cansleep(ld->ad.desc, (int)val);
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(gpio_virtuser_value_fops,
+ gpio_virtuser_value_get,
+ gpio_virtuser_value_set,
+ "%llu\n");
+
+static void gpio_virtuser_get_value_atomic(struct irq_work *work)
+{
+ struct gpio_virtuser_irq_work_context *ctx =
+ to_gpio_virtuser_irq_work_context(work);
+
+ ctx->val = gpiod_get_value(ctx->desc);
+ complete(&ctx->work_completion);
+}
+
+static int gpio_virtuser_value_atomic_get(void *data, u64 *val)
+{
+ struct gpio_virtuser_line_data *ld = data;
+ struct gpio_virtuser_irq_work_context ctx;
+
+ gpio_virtuser_init_irq_work_context(&ctx);
+ ctx.work = IRQ_WORK_INIT_HARD(gpio_virtuser_get_value_atomic);
+ ctx.desc = ld->ad.desc;
+
+ gpio_virtuser_irq_work_queue_sync(&ctx);
+
+ if (ctx.val < 0)
+ return ctx.val;
+
+ *val = ctx.val;
+
+ return 0;
+}
+
+static void gpio_virtuser_set_value_atomic(struct irq_work *work)
+{
+ struct gpio_virtuser_irq_work_context *ctx =
+ to_gpio_virtuser_irq_work_context(work);
+
+ gpiod_set_value(ctx->desc, ctx->val);
+ complete(&ctx->work_completion);
+}
+
+static int gpio_virtuser_value_atomic_set(void *data, u64 val)
+{
+ struct gpio_virtuser_line_data *ld = data;
+ struct gpio_virtuser_irq_work_context ctx;
+
+ if (val > 1)
+ return -EINVAL;
+
+ gpio_virtuser_init_irq_work_context(&ctx);
+ ctx.work = IRQ_WORK_INIT_HARD(gpio_virtuser_set_value_atomic);
+ ctx.desc = ld->ad.desc;
+ ctx.val = (int)val;
+
+ gpio_virtuser_irq_work_queue_sync(&ctx);
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(gpio_virtuser_value_atomic_fops,
+ gpio_virtuser_value_atomic_get,
+ gpio_virtuser_value_atomic_set,
+ "%llu\n");
+
+static int gpio_virtuser_debounce_get(void *data, u64 *val)
+{
+ struct gpio_virtuser_line_data *ld = data;
+
+ *val = READ_ONCE(ld->debounce);
+
+ return 0;
+}
+
+static int gpio_virtuser_debounce_set(void *data, u64 val)
+{
+ struct gpio_virtuser_line_data *ld = data;
+ int ret;
+
+ if (val > UINT_MAX)
+ return -E2BIG;
+
+ ret = gpiod_set_debounce(ld->ad.desc, (unsigned int)val);
+ if (ret)
+ /* Don't propagate errno unknown to user-space. */
+ return ret == -ENOTSUPP ? -EOPNOTSUPP : ret;
+
+ WRITE_ONCE(ld->debounce, (unsigned int)val);
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(gpio_virtuser_debounce_fops,
+ gpio_virtuser_debounce_get,
+ gpio_virtuser_debounce_set,
+ "%llu\n");
+
+static ssize_t gpio_virtuser_consumer_read(struct file *file,
+ char __user *user_buf,
+ size_t size, loff_t *ppos)
+{
+ struct gpio_virtuser_line_data *data = file->private_data;
+ char buf[GPIO_VIRTUSER_NAME_BUF_LEN + 1];
+ ssize_t ret;
+
+ memset(buf, 0x0, sizeof(buf));
+
+ scoped_guard(mutex, &data->consumer_lock)
+ ret = snprintf(buf, sizeof(buf), "%s\n", data->consumer);
+
+ return simple_read_from_buffer(user_buf, size, ppos, buf, ret);
+}
+
+static ssize_t gpio_virtuser_consumer_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct gpio_virtuser_line_data *data = file->private_data;
+ char buf[GPIO_VIRTUSER_NAME_BUF_LEN + 2];
+ int ret;
+
+ ret = simple_write_to_buffer(buf, GPIO_VIRTUSER_NAME_BUF_LEN, ppos,
+ user_buf, count);
+ if (ret < 0)
+ return ret;
+
+ buf[strlen(buf) - 1] = '\0';
+
+ ret = gpiod_set_consumer_name(data->ad.desc, buf);
+ if (ret)
+ return ret;
+
+ scoped_guard(mutex, &data->consumer_lock)
+ strscpy(data->consumer, buf, sizeof(data->consumer));
+
+ return count;
+}
+
+static const struct file_operations gpio_virtuser_consumer_fops = {
+ .read = gpio_virtuser_consumer_read,
+ .write = gpio_virtuser_consumer_write,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static int gpio_virtuser_interrupts_get(void *data, u64 *val)
+{
+ struct gpio_virtuser_line_data *ld = data;
+
+ *val = atomic_read(&ld->irq_count);
+
+ return 0;
+}
+
+static irqreturn_t gpio_virtuser_irq_handler(int irq, void *data)
+{
+ struct gpio_virtuser_line_data *ld = data;
+
+ atomic_inc(&ld->irq_count);
+
+ return IRQ_HANDLED;
+}
+
+static int gpio_virtuser_interrupts_set(void *data, u64 val)
+{
+ struct gpio_virtuser_line_data *ld = data;
+ int irq, ret;
+
+ if (val > 1)
+ return -EINVAL;
+
+ if (val) {
+ irq = gpiod_to_irq(ld->ad.desc);
+ if (irq < 0)
+ return irq;
+
+ ret = request_threaded_irq(irq, NULL,
+ gpio_virtuser_irq_handler,
+ IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING |
+ IRQF_ONESHOT,
+ ld->consumer, data);
+ if (ret)
+ return ret;
+
+ atomic_set(&ld->irq, irq);
+ } else {
+ irq = atomic_xchg(&ld->irq, 0);
+ free_irq(irq, ld);
+ }
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(gpio_virtuser_interrupts_fops,
+ gpio_virtuser_interrupts_get,
+ gpio_virtuser_interrupts_set,
+ "%llu\n");
+
+static const struct gpio_virtuser_dbgfs_attr_descr
+gpio_virtuser_line_array_dbgfs_attrs[] = {
+ {
+ .name = "values",
+ .fops = &gpio_virtuser_value_array_fops,
+ },
+ {
+ .name = "values_atomic",
+ .fops = &gpio_virtuser_value_array_atomic_fops,
+ },
+};
+
+static const struct gpio_virtuser_dbgfs_attr_descr
+gpio_virtuser_line_dbgfs_attrs[] = {
+ {
+ .name = "direction",
+ .fops = &gpio_virtuser_direction_fops,
+ },
+ {
+ .name = "direction_atomic",
+ .fops = &gpio_virtuser_direction_atomic_fops,
+ },
+ {
+ .name = "value",
+ .fops = &gpio_virtuser_value_fops,
+ },
+ {
+ .name = "value_atomic",
+ .fops = &gpio_virtuser_value_atomic_fops,
+ },
+ {
+ .name = "debounce",
+ .fops = &gpio_virtuser_debounce_fops,
+ },
+ {
+ .name = "consumer",
+ .fops = &gpio_virtuser_consumer_fops,
+ },
+ {
+ .name = "interrupts",
+ .fops = &gpio_virtuser_interrupts_fops,
+ },
+};
+
+static int gpio_virtuser_create_debugfs_attrs(
+ const struct gpio_virtuser_dbgfs_attr_descr *attr,
+ size_t num_attrs, struct dentry *parent, void *data)
+{
+ struct dentry *ret;
+ size_t i;
+
+ for (i = 0; i < num_attrs; i++, attr++) {
+ ret = debugfs_create_file(attr->name, 0644, parent, data,
+ attr->fops);
+ if (IS_ERR(ret))
+ return PTR_ERR(ret);
+ }
+
+ return 0;
+}
+
+static int gpio_virtuser_dbgfs_init_line_array_attrs(struct device *dev,
+ struct gpio_descs *descs,
+ const char *id,
+ struct dentry *dbgfs_entry)
+{
+ struct gpio_virtuser_line_array_data *data;
+ char *name;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->ad.descs = descs;
+
+ name = devm_kasprintf(dev, GFP_KERNEL, "gpiod:%s", id);
+ if (!name)
+ return -ENOMEM;
+
+ data->ad.dbgfs_dir = debugfs_create_dir(name, dbgfs_entry);
+ if (IS_ERR(data->ad.dbgfs_dir))
+ return PTR_ERR(data->ad.dbgfs_dir);
+
+ return gpio_virtuser_create_debugfs_attrs(
+ gpio_virtuser_line_array_dbgfs_attrs,
+ ARRAY_SIZE(gpio_virtuser_line_array_dbgfs_attrs),
+ data->ad.dbgfs_dir, data);
+}
+
+static int gpio_virtuser_dbgfs_init_line_attrs(struct device *dev,
+ struct gpio_desc *desc,
+ const char *id,
+ unsigned int index,
+ struct dentry *dbgfs_entry)
+{
+ struct gpio_virtuser_line_data *data;
+ char *name;
+ int ret;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->ad.desc = desc;
+ sprintf(data->consumer, id);
+ atomic_set(&data->irq, 0);
+ atomic_set(&data->irq_count, 0);
+
+ name = devm_kasprintf(dev, GFP_KERNEL, "gpiod:%s:%u", id, index);
+ if (!name)
+ return -ENOMEM;
+
+ ret = devm_mutex_init(dev, &data->consumer_lock);
+ if (ret)
+ return ret;
+
+ data->ad.dbgfs_dir = debugfs_create_dir(name, dbgfs_entry);
+ if (IS_ERR(data->ad.dbgfs_dir))
+ return PTR_ERR(data->ad.dbgfs_dir);
+
+ return gpio_virtuser_create_debugfs_attrs(
+ gpio_virtuser_line_dbgfs_attrs,
+ ARRAY_SIZE(gpio_virtuser_line_dbgfs_attrs),
+ data->ad.dbgfs_dir, data);
+}
+
+static void gpio_virtuser_debugfs_remove(void *data)
+{
+ struct dentry *dbgfs_entry = data;
+
+ debugfs_remove_recursive(dbgfs_entry);
+}
+
+static int gpio_virtuser_prop_is_gpio(struct property *prop)
+{
+ char *dash = strrchr(prop->name, '-');
+
+ return dash && strcmp(dash, "-gpios") == 0;
+}
+
+/*
+ * If this is an OF-based system, then we iterate over properties and consider
+ * all whose names end in "-gpios". For configfs we expect an additional string
+ * array property - "gpio-virtuser,ids" - containing the list of all GPIO IDs
+ * to request.
+ */
+static int gpio_virtuser_count_ids(struct device *dev)
+{
+ struct device_node *of_node = dev_of_node(dev);
+ struct property *prop;
+ int ret = 0;
+
+ if (!of_node)
+ return device_property_string_array_count(dev,
+ "gpio-virtuser,ids");
+
+ for_each_property_of_node(of_node, prop) {
+ if (gpio_virtuser_prop_is_gpio(prop))
+ ++ret;
+ }
+
+ return ret;
+}
+
+static int gpio_virtuser_get_ids(struct device *dev, const char **ids,
+ int num_ids)
+{
+ struct device_node *of_node = dev_of_node(dev);
+ struct property *prop;
+ size_t pos = 0, diff;
+ char *dash, *tmp;
+
+ if (!of_node)
+ return device_property_read_string_array(dev,
+ "gpio-virtuser,ids",
+ ids, num_ids);
+
+ for_each_property_of_node(of_node, prop) {
+ if (!gpio_virtuser_prop_is_gpio(prop))
+ continue;
+
+ dash = strrchr(prop->name, '-');
+ diff = dash - prop->name;
+
+ tmp = devm_kmemdup(dev, prop->name, diff + 1,
+ GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ tmp[diff] = '\0';
+ ids[pos++] = tmp;
+ }
+
+ return 0;
+}
+
+static int gpio_virtuser_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct dentry *dbgfs_entry;
+ struct gpio_descs *descs;
+ int ret, num_ids = 0, i;
+ const char **ids;
+ unsigned int j;
+
+ num_ids = gpio_virtuser_count_ids(dev);
+ if (num_ids < 0)
+ return dev_err_probe(dev, num_ids,
+ "Failed to get the number of GPIOs to request\n");
+
+ if (num_ids == 0)
+ return dev_err_probe(dev, -EINVAL, "No GPIO IDs specified\n");
+
+ ids = devm_kcalloc(dev, num_ids, sizeof(*ids), GFP_KERNEL);
+ if (!ids)
+ return -ENOMEM;
+
+ ret = gpio_virtuser_get_ids(dev, ids, num_ids);
+ if (ret < 0)
+ return dev_err_probe(dev, ret,
+ "Failed to get the IDs of GPIOs to request\n");
+
+ dbgfs_entry = debugfs_create_dir(dev_name(dev), gpio_virtuser_dbg_root);
+ ret = devm_add_action_or_reset(dev, gpio_virtuser_debugfs_remove,
+ dbgfs_entry);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < num_ids; i++) {
+ descs = devm_gpiod_get_array(dev, ids[i], GPIOD_ASIS);
+ if (IS_ERR(descs))
+ return dev_err_probe(dev, PTR_ERR(descs),
+ "Failed to request the '%s' GPIOs\n",
+ ids[i]);
+
+ ret = gpio_virtuser_dbgfs_init_line_array_attrs(dev, descs,
+ ids[i],
+ dbgfs_entry);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to setup the debugfs array interface for the '%s' GPIOs\n",
+ ids[i]);
+
+ for (j = 0; j < descs->ndescs; j++) {
+ ret = gpio_virtuser_dbgfs_init_line_attrs(dev,
+ descs->desc[j], ids[i],
+ j, dbgfs_entry);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to setup the debugfs line interface for the '%s' GPIOs\n",
+ ids[i]);
+ }
+ }
+
+ return 0;
+}
+
+static const struct of_device_id gpio_virtuser_of_match[] = {
+ { .compatible = "gpio-virtuser" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gpio_virtuser_of_match);
+
+static struct platform_driver gpio_virtuser_driver = {
+ .driver = {
+ .name = "gpio-virtuser",
+ .of_match_table = gpio_virtuser_of_match,
+ },
+ .probe = gpio_virtuser_probe,
+};
+
+struct gpio_virtuser_device {
+ struct config_group group;
+
+ struct platform_device *pdev;
+ int id;
+ struct mutex lock;
+
+ struct notifier_block bus_notifier;
+ struct completion probe_completion;
+ bool driver_bound;
+
+ struct gpiod_lookup_table *lookup_table;
+
+ struct list_head lookup_list;
+};
+
+static int gpio_virtuser_bus_notifier_call(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct gpio_virtuser_device *vdev;
+ struct device *dev = data;
+ char devname[32];
+
+ vdev = container_of(nb, struct gpio_virtuser_device, bus_notifier);
+ snprintf(devname, sizeof(devname), "gpio-virtuser.%d", vdev->id);
+
+ if (!device_match_name(dev, devname))
+ return NOTIFY_DONE;
+
+ switch (action) {
+ case BUS_NOTIFY_BOUND_DRIVER:
+ vdev->driver_bound = true;
+ break;
+ case BUS_NOTIFY_DRIVER_NOT_BOUND:
+ vdev->driver_bound = false;
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+
+ complete(&vdev->probe_completion);
+ return NOTIFY_OK;
+}
+
+static struct gpio_virtuser_device *
+to_gpio_virtuser_device(struct config_item *item)
+{
+ struct config_group *group = to_config_group(item);
+
+ return container_of(group, struct gpio_virtuser_device, group);
+}
+
+static bool
+gpio_virtuser_device_is_live(struct gpio_virtuser_device *dev)
+{
+ lockdep_assert_held(&dev->lock);
+
+ return !!dev->pdev;
+}
+
+struct gpio_virtuser_lookup {
+ struct config_group group;
+
+ struct gpio_virtuser_device *parent;
+ struct list_head siblings;
+
+ char *con_id;
+
+ struct list_head entry_list;
+};
+
+static struct gpio_virtuser_lookup *
+to_gpio_virtuser_lookup(struct config_item *item)
+{
+ struct config_group *group = to_config_group(item);
+
+ return container_of(group, struct gpio_virtuser_lookup, group);
+}
+
+struct gpio_virtuser_lookup_entry {
+ struct config_group group;
+
+ struct gpio_virtuser_lookup *parent;
+ struct list_head siblings;
+
+ char *key;
+ /* Can be negative to indicate lookup by name. */
+ int offset;
+ enum gpio_lookup_flags flags;
+};
+
+static struct gpio_virtuser_lookup_entry *
+to_gpio_virtuser_lookup_entry(struct config_item *item)
+{
+ struct config_group *group = to_config_group(item);
+
+ return container_of(group, struct gpio_virtuser_lookup_entry, group);
+}
+
+static ssize_t
+gpio_virtuser_lookup_entry_config_key_show(struct config_item *item, char *page)
+{
+ struct gpio_virtuser_lookup_entry *entry =
+ to_gpio_virtuser_lookup_entry(item);
+ struct gpio_virtuser_device *dev = entry->parent->parent;
+
+ guard(mutex)(&dev->lock);
+
+ return sprintf(page, "%s\n", entry->key ?: "");
+}
+
+static ssize_t
+gpio_virtuser_lookup_entry_config_key_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct gpio_virtuser_lookup_entry *entry =
+ to_gpio_virtuser_lookup_entry(item);
+ struct gpio_virtuser_device *dev = entry->parent->parent;
+
+ char *key __free(kfree) = kstrndup(skip_spaces(page), count,
+ GFP_KERNEL);
+ if (!key)
+ return -ENOMEM;
+
+ strim(key);
+
+ guard(mutex)(&dev->lock);
+
+ if (gpio_virtuser_device_is_live(dev))
+ return -EBUSY;
+
+ kfree(entry->key);
+ entry->key = no_free_ptr(key);
+
+ return count;
+}
+
+CONFIGFS_ATTR(gpio_virtuser_lookup_entry_config_, key);
+
+static ssize_t
+gpio_virtuser_lookup_entry_config_offset_show(struct config_item *item,
+ char *page)
+{
+ struct gpio_virtuser_lookup_entry *entry =
+ to_gpio_virtuser_lookup_entry(item);
+ struct gpio_virtuser_device *dev = entry->parent->parent;
+ unsigned int offset;
+
+ scoped_guard(mutex, &dev->lock)
+ offset = entry->offset;
+
+ return sprintf(page, "%d\n", offset);
+}
+
+static ssize_t
+gpio_virtuser_lookup_entry_config_offset_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct gpio_virtuser_lookup_entry *entry =
+ to_gpio_virtuser_lookup_entry(item);
+ struct gpio_virtuser_device *dev = entry->parent->parent;
+ int offset, ret;
+
+ ret = kstrtoint(page, 0, &offset);
+ if (ret)
+ return ret;
+
+ /*
+ * Negative number here means: 'key' represents a line name to lookup.
+ * Non-negative means: 'key' represents the label of the chip with
+ * the 'offset' value representing the line within that chip.
+ *
+ * GPIOLIB uses the U16_MAX value to indicate lookup by line name so
+ * the greatest offset we can accept is (U16_MAX - 1).
+ */
+ if (offset > (U16_MAX - 1))
+ return -EINVAL;
+
+ guard(mutex)(&dev->lock);
+
+ if (gpio_virtuser_device_is_live(dev))
+ return -EBUSY;
+
+ entry->offset = offset;
+
+ return count;
+}
+
+CONFIGFS_ATTR(gpio_virtuser_lookup_entry_config_, offset);
+
+static enum gpio_lookup_flags
+gpio_virtuser_lookup_get_flags(struct config_item *item)
+{
+ struct gpio_virtuser_lookup_entry *entry =
+ to_gpio_virtuser_lookup_entry(item);
+ struct gpio_virtuser_device *dev = entry->parent->parent;
+
+ guard(mutex)(&dev->lock);
+
+ return entry->flags;
+}
+
+static ssize_t
+gpio_virtuser_lookup_entry_config_drive_show(struct config_item *item, char *page)
+{
+ enum gpio_lookup_flags flags = gpio_virtuser_lookup_get_flags(item);
+ const char *repr;
+
+ if (flags & GPIO_OPEN_DRAIN)
+ repr = "open-drain";
+ else if (flags & GPIO_OPEN_SOURCE)
+ repr = "open-source";
+ else
+ repr = "push-pull";
+
+ return sprintf(page, "%s\n", repr);
+}
+
+static ssize_t
+gpio_virtuser_lookup_entry_config_drive_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct gpio_virtuser_lookup_entry *entry =
+ to_gpio_virtuser_lookup_entry(item);
+ struct gpio_virtuser_device *dev = entry->parent->parent;
+
+ guard(mutex)(&dev->lock);
+
+ if (gpio_virtuser_device_is_live(dev))
+ return -EBUSY;
+
+ if (sysfs_streq(page, "push-pull")) {
+ entry->flags &= ~(GPIO_OPEN_DRAIN | GPIO_OPEN_SOURCE);
+ } else if (sysfs_streq(page, "open-drain")) {
+ entry->flags &= ~GPIO_OPEN_SOURCE;
+ entry->flags |= GPIO_OPEN_DRAIN;
+ } else if (sysfs_streq(page, "open-source")) {
+ entry->flags &= ~GPIO_OPEN_DRAIN;
+ entry->flags |= GPIO_OPEN_SOURCE;
+ } else {
+ count = -EINVAL;
+ }
+
+ return count;
+}
+
+CONFIGFS_ATTR(gpio_virtuser_lookup_entry_config_, drive);
+
+static ssize_t
+gpio_virtuser_lookup_entry_config_pull_show(struct config_item *item, char *page)
+{
+ enum gpio_lookup_flags flags = gpio_virtuser_lookup_get_flags(item);
+ const char *repr;
+
+ if (flags & GPIO_PULL_UP)
+ repr = "pull-up";
+ else if (flags & GPIO_PULL_DOWN)
+ repr = "pull-down";
+ else if (flags & GPIO_PULL_DISABLE)
+ repr = "pull-disabled";
+ else
+ repr = "as-is";
+
+ return sprintf(page, "%s\n", repr);
+}
+
+static ssize_t
+gpio_virtuser_lookup_entry_config_pull_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct gpio_virtuser_lookup_entry *entry =
+ to_gpio_virtuser_lookup_entry(item);
+ struct gpio_virtuser_device *dev = entry->parent->parent;
+
+ guard(mutex)(&dev->lock);
+
+ if (gpio_virtuser_device_is_live(dev))
+ return -EBUSY;
+
+ if (sysfs_streq(page, "pull-up")) {
+ entry->flags &= ~(GPIO_PULL_DOWN | GPIO_PULL_DISABLE);
+ entry->flags |= GPIO_PULL_UP;
+ } else if (sysfs_streq(page, "pull-down")) {
+ entry->flags &= ~(GPIO_PULL_UP | GPIO_PULL_DISABLE);
+ entry->flags |= GPIO_PULL_DOWN;
+ } else if (sysfs_streq(page, "pull-disabled")) {
+ entry->flags &= ~(GPIO_PULL_UP | GPIO_PULL_DOWN);
+ entry->flags |= GPIO_PULL_DISABLE;
+ } else if (sysfs_streq(page, "as-is")) {
+ entry->flags &= ~(GPIO_PULL_UP | GPIO_PULL_DOWN |
+ GPIO_PULL_DISABLE);
+ } else {
+ count = -EINVAL;
+ }
+
+ return count;
+}
+
+CONFIGFS_ATTR(gpio_virtuser_lookup_entry_config_, pull);
+
+static ssize_t
+gpio_virtuser_lookup_entry_config_active_low_show(struct config_item *item,
+ char *page)
+{
+ enum gpio_lookup_flags flags = gpio_virtuser_lookup_get_flags(item);
+
+ return sprintf(page, "%c\n", flags & GPIO_ACTIVE_LOW ? '1' : '0');
+}
+
+static ssize_t
+gpio_virtuser_lookup_entry_config_active_low_store(struct config_item *item,
+ const char *page,
+ size_t count)
+{
+ struct gpio_virtuser_lookup_entry *entry =
+ to_gpio_virtuser_lookup_entry(item);
+ struct gpio_virtuser_device *dev = entry->parent->parent;
+ bool active_low;
+ int ret;
+
+ ret = kstrtobool(page, &active_low);
+ if (ret)
+ return ret;
+
+ guard(mutex)(&dev->lock);
+
+ if (gpio_virtuser_device_is_live(dev))
+ return -EBUSY;
+
+ if (active_low)
+ entry->flags |= GPIO_ACTIVE_LOW;
+ else
+ entry->flags &= ~GPIO_ACTIVE_LOW;
+
+ return count;
+}
+
+CONFIGFS_ATTR(gpio_virtuser_lookup_entry_config_, active_low);
+
+static ssize_t
+gpio_virtuser_lookup_entry_config_transitory_show(struct config_item *item,
+ char *page)
+{
+ enum gpio_lookup_flags flags = gpio_virtuser_lookup_get_flags(item);
+
+ return sprintf(page, "%c\n", flags & GPIO_TRANSITORY ? '1' : '0');
+}
+
+static ssize_t
+gpio_virtuser_lookup_entry_config_transitory_store(struct config_item *item,
+ const char *page,
+ size_t count)
+{
+ struct gpio_virtuser_lookup_entry *entry =
+ to_gpio_virtuser_lookup_entry(item);
+ struct gpio_virtuser_device *dev = entry->parent->parent;
+ bool transitory;
+ int ret;
+
+ ret = kstrtobool(page, &transitory);
+ if (ret)
+ return ret;
+
+ guard(mutex)(&dev->lock);
+
+ if (gpio_virtuser_device_is_live(dev))
+ return -EBUSY;
+
+ if (transitory)
+ entry->flags |= GPIO_TRANSITORY;
+ else
+ entry->flags &= ~GPIO_TRANSITORY;
+
+ return count;
+}
+
+CONFIGFS_ATTR(gpio_virtuser_lookup_entry_config_, transitory);
+
+static struct configfs_attribute *gpio_virtuser_lookup_entry_config_attrs[] = {
+ &gpio_virtuser_lookup_entry_config_attr_key,
+ &gpio_virtuser_lookup_entry_config_attr_offset,
+ &gpio_virtuser_lookup_entry_config_attr_drive,
+ &gpio_virtuser_lookup_entry_config_attr_pull,
+ &gpio_virtuser_lookup_entry_config_attr_active_low,
+ &gpio_virtuser_lookup_entry_config_attr_transitory,
+ NULL
+};
+
+static ssize_t
+gpio_virtuser_device_config_dev_name_show(struct config_item *item,
+ char *page)
+{
+ struct gpio_virtuser_device *dev = to_gpio_virtuser_device(item);
+ struct platform_device *pdev;
+
+ guard(mutex)(&dev->lock);
+
+ pdev = dev->pdev;
+ if (pdev)
+ return sprintf(page, "%s\n", dev_name(&pdev->dev));
+
+ return sprintf(page, "gpio-sim.%d\n", dev->id);
+}
+
+CONFIGFS_ATTR_RO(gpio_virtuser_device_config_, dev_name);
+
+static ssize_t gpio_virtuser_device_config_live_show(struct config_item *item,
+ char *page)
+{
+ struct gpio_virtuser_device *dev = to_gpio_virtuser_device(item);
+ bool live;
+
+ scoped_guard(mutex, &dev->lock)
+ live = gpio_virtuser_device_is_live(dev);
+
+ return sprintf(page, "%c\n", live ? '1' : '0');
+}
+
+static size_t
+gpio_virtuser_get_lookup_count(struct gpio_virtuser_device *dev)
+{
+ struct gpio_virtuser_lookup *lookup;
+ size_t count = 0;
+
+ lockdep_assert_held(&dev->lock);
+
+ list_for_each_entry(lookup, &dev->lookup_list, siblings)
+ count += list_count_nodes(&lookup->entry_list);
+
+ return count;
+}
+
+static int
+gpio_virtuser_make_lookup_table(struct gpio_virtuser_device *dev)
+{
+ size_t num_entries = gpio_virtuser_get_lookup_count(dev);
+ struct gpio_virtuser_lookup_entry *entry;
+ struct gpio_virtuser_lookup *lookup;
+ struct gpiod_lookup *curr;
+ unsigned int i = 0;
+
+ lockdep_assert_held(&dev->lock);
+
+ struct gpiod_lookup_table *table __free(kfree) =
+ kzalloc(struct_size(table, table, num_entries + 1), GFP_KERNEL);
+ if (!table)
+ return -ENOMEM;
+
+ table->dev_id = kasprintf(GFP_KERNEL, "gpio-virtuser.%d", dev->id);
+ if (!table->dev_id)
+ return -ENOMEM;
+
+ list_for_each_entry(lookup, &dev->lookup_list, siblings) {
+ list_for_each_entry(entry, &lookup->entry_list, siblings) {
+ curr = &table->table[i];
+
+ curr->con_id = lookup->con_id;
+ curr->idx = i;
+ curr->key = entry->key;
+ curr->chip_hwnum = entry->offset < 0 ?
+ U16_MAX : entry->offset;
+ curr->flags = entry->flags;
+ i++;
+ }
+ }
+
+ gpiod_add_lookup_table(table);
+ dev->lookup_table = no_free_ptr(table);
+
+ return 0;
+}
+
+static struct fwnode_handle *
+gpio_virtuser_make_device_swnode(struct gpio_virtuser_device *dev)
+{
+ struct property_entry properties[2];
+ struct gpio_virtuser_lookup *lookup;
+ unsigned int i = 0;
+ size_t num_ids;
+
+ memset(properties, 0, sizeof(properties));
+
+ num_ids = list_count_nodes(&dev->lookup_list);
+ char **ids __free(kfree) = kcalloc(num_ids + 1, sizeof(*ids),
+ GFP_KERNEL);
+ if (!ids)
+ return ERR_PTR(-ENOMEM);
+
+ list_for_each_entry(lookup, &dev->lookup_list, siblings)
+ ids[i++] = lookup->con_id;
+
+ properties[0] = PROPERTY_ENTRY_STRING_ARRAY_LEN("gpio-virtuser,ids",
+ ids, num_ids);
+
+ return fwnode_create_software_node(properties, NULL);
+}
+
+static int
+gpio_virtuser_device_activate(struct gpio_virtuser_device *dev)
+{
+ struct platform_device_info pdevinfo;
+ struct fwnode_handle *swnode;
+ struct platform_device *pdev;
+ int ret;
+
+ lockdep_assert_held(&dev->lock);
+
+ if (list_empty(&dev->lookup_list))
+ return -ENODATA;
+
+ swnode = gpio_virtuser_make_device_swnode(dev);
+ if (IS_ERR(swnode))
+ return PTR_ERR(swnode);
+
+ memset(&pdevinfo, 0, sizeof(pdevinfo));
+ pdevinfo.name = "gpio-virtuser";
+ pdevinfo.id = dev->id;
+ pdevinfo.fwnode = swnode;
+
+ ret = gpio_virtuser_make_lookup_table(dev);
+ if (ret) {
+ fwnode_remove_software_node(swnode);
+ return ret;
+ }
+
+ reinit_completion(&dev->probe_completion);
+ dev->driver_bound = false;
+ bus_register_notifier(&platform_bus_type, &dev->bus_notifier);
+
+ pdev = platform_device_register_full(&pdevinfo);
+ if (IS_ERR(pdev)) {
+ bus_unregister_notifier(&platform_bus_type, &dev->bus_notifier);
+ fwnode_remove_software_node(swnode);
+ return PTR_ERR(pdev);
+ }
+
+ wait_for_completion(&dev->probe_completion);
+ bus_unregister_notifier(&platform_bus_type, &dev->bus_notifier);
+
+ if (!dev->driver_bound) {
+ platform_device_unregister(pdev);
+ fwnode_remove_software_node(swnode);
+ return -ENXIO;
+ }
+
+ dev->pdev = pdev;
+
+ return 0;
+}
+
+static void
+gpio_virtuser_device_deactivate(struct gpio_virtuser_device *dev)
+{
+ struct fwnode_handle *swnode;
+
+ lockdep_assert_held(&dev->lock);
+
+ swnode = dev_fwnode(&dev->pdev->dev);
+ platform_device_unregister(dev->pdev);
+ fwnode_remove_software_node(swnode);
+ dev->pdev = NULL;
+ gpiod_remove_lookup_table(dev->lookup_table);
+ kfree(dev->lookup_table);
+}
+
+static ssize_t
+gpio_virtuser_device_config_live_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct gpio_virtuser_device *dev = to_gpio_virtuser_device(item);
+ int ret = 0;
+ bool live;
+
+ ret = kstrtobool(page, &live);
+ if (ret)
+ return ret;
+
+ guard(mutex)(&dev->lock);
+
+ if (live == gpio_virtuser_device_is_live(dev))
+ return -EPERM;
+
+ if (live)
+ ret = gpio_virtuser_device_activate(dev);
+ else
+ gpio_virtuser_device_deactivate(dev);
+
+ return ret ?: count;
+}
+
+CONFIGFS_ATTR(gpio_virtuser_device_config_, live);
+
+static struct configfs_attribute *gpio_virtuser_device_config_attrs[] = {
+ &gpio_virtuser_device_config_attr_dev_name,
+ &gpio_virtuser_device_config_attr_live,
+ NULL
+};
+
+static void
+gpio_virtuser_lookup_entry_config_group_release(struct config_item *item)
+{
+ struct gpio_virtuser_lookup_entry *entry =
+ to_gpio_virtuser_lookup_entry(item);
+ struct gpio_virtuser_device *dev = entry->parent->parent;
+
+ guard(mutex)(&dev->lock);
+
+ list_del(&entry->siblings);
+
+ kfree(entry->key);
+ kfree(entry);
+}
+
+static struct
+configfs_item_operations gpio_virtuser_lookup_entry_config_item_ops = {
+ .release = gpio_virtuser_lookup_entry_config_group_release,
+};
+
+static const struct
+config_item_type gpio_virtuser_lookup_entry_config_group_type = {
+ .ct_item_ops = &gpio_virtuser_lookup_entry_config_item_ops,
+ .ct_attrs = gpio_virtuser_lookup_entry_config_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct config_group *
+gpio_virtuser_make_lookup_entry_group(struct config_group *group,
+ const char *name)
+{
+ struct gpio_virtuser_lookup *lookup =
+ to_gpio_virtuser_lookup(&group->cg_item);
+ struct gpio_virtuser_device *dev = lookup->parent;
+
+ guard(mutex)(&dev->lock);
+
+ if (gpio_virtuser_device_is_live(dev))
+ return ERR_PTR(-EBUSY);
+
+ struct gpio_virtuser_lookup_entry *entry __free(kfree) =
+ kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return ERR_PTR(-ENOMEM);
+
+ config_group_init_type_name(&entry->group, name,
+ &gpio_virtuser_lookup_entry_config_group_type);
+ entry->flags = GPIO_LOOKUP_FLAGS_DEFAULT;
+ entry->parent = lookup;
+ list_add_tail(&entry->siblings, &lookup->entry_list);
+
+ return &no_free_ptr(entry)->group;
+}
+
+static void gpio_virtuser_lookup_config_group_release(struct config_item *item)
+{
+ struct gpio_virtuser_lookup *lookup = to_gpio_virtuser_lookup(item);
+ struct gpio_virtuser_device *dev = lookup->parent;
+
+ guard(mutex)(&dev->lock);
+
+ list_del(&lookup->siblings);
+
+ kfree(lookup->con_id);
+ kfree(lookup);
+}
+
+static struct configfs_item_operations gpio_virtuser_lookup_config_item_ops = {
+ .release = gpio_virtuser_lookup_config_group_release,
+};
+
+static struct
+configfs_group_operations gpio_virtuser_lookup_config_group_ops = {
+ .make_group = gpio_virtuser_make_lookup_entry_group,
+};
+
+static const struct config_item_type gpio_virtuser_lookup_config_group_type = {
+ .ct_group_ops = &gpio_virtuser_lookup_config_group_ops,
+ .ct_item_ops = &gpio_virtuser_lookup_config_item_ops,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct config_group *
+gpio_virtuser_make_lookup_group(struct config_group *group, const char *name)
+{
+ struct gpio_virtuser_device *dev =
+ to_gpio_virtuser_device(&group->cg_item);
+
+ if (strlen(name) > (GPIO_VIRTUSER_NAME_BUF_LEN - 1))
+ return ERR_PTR(-E2BIG);
+
+ guard(mutex)(&dev->lock);
+
+ if (gpio_virtuser_device_is_live(dev))
+ return ERR_PTR(-EBUSY);
+
+ struct gpio_virtuser_lookup *lookup __free(kfree) =
+ kzalloc(sizeof(*lookup), GFP_KERNEL);
+ if (!lookup)
+ return ERR_PTR(-ENOMEM);
+
+ lookup->con_id = kstrdup(name, GFP_KERNEL);
+ if (!lookup->con_id)
+ return ERR_PTR(-ENOMEM);
+
+ config_group_init_type_name(&lookup->group, name,
+ &gpio_virtuser_lookup_config_group_type);
+ INIT_LIST_HEAD(&lookup->entry_list);
+ lookup->parent = dev;
+ list_add_tail(&lookup->siblings, &dev->lookup_list);
+
+ return &no_free_ptr(lookup)->group;
+}
+
+static void gpio_virtuser_device_config_group_release(struct config_item *item)
+{
+ struct gpio_virtuser_device *dev = to_gpio_virtuser_device(item);
+
+ guard(mutex)(&dev->lock);
+
+ if (gpio_virtuser_device_is_live(dev))
+ gpio_virtuser_device_deactivate(dev);
+
+ mutex_destroy(&dev->lock);
+ ida_free(&gpio_virtuser_ida, dev->id);
+ kfree(dev);
+}
+
+static struct configfs_item_operations gpio_virtuser_device_config_item_ops = {
+ .release = gpio_virtuser_device_config_group_release,
+};
+
+static struct configfs_group_operations gpio_virtuser_device_config_group_ops = {
+ .make_group = gpio_virtuser_make_lookup_group,
+};
+
+static const struct config_item_type gpio_virtuser_device_config_group_type = {
+ .ct_group_ops = &gpio_virtuser_device_config_group_ops,
+ .ct_item_ops = &gpio_virtuser_device_config_item_ops,
+ .ct_attrs = gpio_virtuser_device_config_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct config_group *
+gpio_virtuser_config_make_device_group(struct config_group *group,
+ const char *name)
+{
+ struct gpio_virtuser_device *dev __free(kfree) = kzalloc(sizeof(*dev),
+ GFP_KERNEL);
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ dev->id = ida_alloc(&gpio_virtuser_ida, GFP_KERNEL);
+ if (dev->id < 0)
+ return ERR_PTR(dev->id);
+
+ config_group_init_type_name(&dev->group, name,
+ &gpio_virtuser_device_config_group_type);
+ mutex_init(&dev->lock);
+ INIT_LIST_HEAD(&dev->lookup_list);
+ dev->bus_notifier.notifier_call = gpio_virtuser_bus_notifier_call;
+ init_completion(&dev->probe_completion);
+
+ return &no_free_ptr(dev)->group;
+}
+
+static struct configfs_group_operations gpio_virtuser_config_group_ops = {
+ .make_group = gpio_virtuser_config_make_device_group,
+};
+
+static const struct config_item_type gpio_virtuser_config_type = {
+ .ct_group_ops = &gpio_virtuser_config_group_ops,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct configfs_subsystem gpio_virtuser_config_subsys = {
+ .su_group = {
+ .cg_item = {
+ .ci_namebuf = "gpio-virtuser",
+ .ci_type = &gpio_virtuser_config_type,
+ },
+ },
+};
+
+static int __init gpio_virtuser_init(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&gpio_virtuser_driver);
+ if (ret) {
+ pr_err("Failed to register the platform driver: %d\n", ret);
+ return ret;
+ }
+
+ config_group_init(&gpio_virtuser_config_subsys.su_group);
+ mutex_init(&gpio_virtuser_config_subsys.su_mutex);
+ ret = configfs_register_subsystem(&gpio_virtuser_config_subsys);
+ if (ret) {
+ pr_err("Failed to register the '%s' configfs subsystem: %d\n",
+ gpio_virtuser_config_subsys.su_group.cg_item.ci_namebuf,
+ ret);
+ goto err_plat_drv_unreg;
+ }
+
+ gpio_virtuser_dbg_root = debugfs_create_dir("gpio-virtuser", NULL);
+ if (IS_ERR(gpio_virtuser_dbg_root)) {
+ ret = PTR_ERR(gpio_virtuser_dbg_root);
+ pr_err("Failed to create the debugfs tree: %d\n", ret);
+ goto err_configfs_unreg;
+ }
+
+ return 0;
+
+err_configfs_unreg:
+ configfs_unregister_subsystem(&gpio_virtuser_config_subsys);
+err_plat_drv_unreg:
+ mutex_destroy(&gpio_virtuser_config_subsys.su_mutex);
+ platform_driver_unregister(&gpio_virtuser_driver);
+
+ return ret;
+}
+module_init(gpio_virtuser_init);
+
+static void __exit gpio_virtuser_exit(void)
+{
+ configfs_unregister_subsystem(&gpio_virtuser_config_subsys);
+ mutex_destroy(&gpio_virtuser_config_subsys.su_mutex);
+ platform_driver_unregister(&gpio_virtuser_driver);
+ debugfs_remove_recursive(gpio_virtuser_dbg_root);
+}
+module_exit(gpio_virtuser_exit);
+
+MODULE_AUTHOR("Bartosz Golaszewski <bartosz.golaszewski@linaro.org>");
+MODULE_DESCRIPTION("Virtual GPIO consumer module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index bb063b81cee6..69cd2be9c7f3 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -976,7 +976,7 @@ __acpi_find_gpio(struct fwnode_handle *fwnode, const char *con_id, unsigned int
int i;
/* Try first from _DSD */
- for (i = 0; i < ARRAY_SIZE(gpio_suffixes); i++) {
+ for (i = 0; i < gpio_suffix_count; i++) {
if (con_id) {
snprintf(propname, sizeof(propname), "%s-%s",
con_id, gpio_suffixes[i]);
@@ -1453,7 +1453,7 @@ int acpi_gpio_count(const struct fwnode_handle *fwnode, const char *con_id)
unsigned int i;
/* Try first from _DSD */
- for (i = 0; i < ARRAY_SIZE(gpio_suffixes); i++) {
+ for (i = 0; i < gpio_suffix_count; i++) {
if (con_id)
snprintf(propname, sizeof(propname), "%s-%s",
con_id, gpio_suffixes[i]);
diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c
index 9dad67ea2597..ef08b23a56e2 100644
--- a/drivers/gpio/gpiolib-cdev.c
+++ b/drivers/gpio/gpiolib-cdev.c
@@ -89,6 +89,10 @@ struct linehandle_state {
GPIOHANDLE_REQUEST_OPEN_DRAIN | \
GPIOHANDLE_REQUEST_OPEN_SOURCE)
+#define GPIOHANDLE_REQUEST_DIRECTION_FLAGS \
+ (GPIOHANDLE_REQUEST_INPUT | \
+ GPIOHANDLE_REQUEST_OUTPUT)
+
static int linehandle_validate_flags(u32 flags)
{
/* Return an error if an unknown flag is set */
@@ -169,21 +173,21 @@ static long linehandle_set_config(struct linehandle_state *lh,
if (ret)
return ret;
+ /* Lines must be reconfigured explicitly as input or output. */
+ if (!(lflags & GPIOHANDLE_REQUEST_DIRECTION_FLAGS))
+ return -EINVAL;
+
for (i = 0; i < lh->num_descs; i++) {
desc = lh->descs[i];
- linehandle_flags_to_desc_flags(gcnf.flags, &desc->flags);
+ linehandle_flags_to_desc_flags(lflags, &desc->flags);
- /*
- * Lines have to be requested explicitly for input
- * or output, else the line will be treated "as is".
- */
if (lflags & GPIOHANDLE_REQUEST_OUTPUT) {
int val = !!gcnf.default_values[i];
ret = gpiod_direction_output(desc, val);
if (ret)
return ret;
- } else if (lflags & GPIOHANDLE_REQUEST_INPUT) {
+ } else {
ret = gpiod_direction_input(desc);
if (ret)
return ret;
@@ -1128,6 +1132,14 @@ static void edge_detector_stop(struct line *line)
/* do not change line->level - see comment in debounced_value() */
}
+static int edge_detector_fifo_init(struct linereq *req)
+{
+ if (kfifo_initialized(&req->events))
+ return 0;
+
+ return kfifo_alloc(&req->events, req->event_buffer_size, GFP_KERNEL);
+}
+
static int edge_detector_setup(struct line *line,
struct gpio_v2_line_config *lc,
unsigned int line_idx, u64 edflags)
@@ -1139,9 +1151,8 @@ static int edge_detector_setup(struct line *line,
char *label;
eflags = edflags & GPIO_V2_LINE_EDGE_FLAGS;
- if (eflags && !kfifo_initialized(&line->req->events)) {
- ret = kfifo_alloc(&line->req->events,
- line->req->event_buffer_size, GFP_KERNEL);
+ if (eflags) {
+ ret = edge_detector_fifo_init(line->req);
if (ret)
return ret;
}
@@ -1193,8 +1204,6 @@ static int edge_detector_update(struct line *line,
struct gpio_v2_line_config *lc,
unsigned int line_idx, u64 edflags)
{
- u64 eflags;
- int ret;
u64 active_edflags = READ_ONCE(line->edflags);
unsigned int debounce_period_us =
gpio_v2_line_config_debounce_period(lc, line_idx);
@@ -1210,14 +1219,9 @@ static int edge_detector_update(struct line *line,
* ensure event fifo is initialised if edge detection
* is now enabled.
*/
- eflags = edflags & GPIO_V2_LINE_EDGE_FLAGS;
- if (eflags && !kfifo_initialized(&line->req->events)) {
- ret = kfifo_alloc(&line->req->events,
- line->req->event_buffer_size,
- GFP_KERNEL);
- if (ret)
- return ret;
- }
+ if (edflags & GPIO_V2_LINE_EDGE_FLAGS)
+ return edge_detector_fifo_init(line->req);
+
return 0;
}
@@ -1530,12 +1534,14 @@ static long linereq_set_config(struct linereq *lr, void __user *ip)
line = &lr->lines[i];
desc = lr->lines[i].desc;
flags = gpio_v2_line_config_flags(&lc, i);
- gpio_v2_line_config_flags_to_desc_flags(flags, &desc->flags);
- edflags = flags & GPIO_V2_LINE_EDGE_DETECTOR_FLAGS;
/*
- * Lines have to be requested explicitly for input
- * or output, else the line will be treated "as is".
+ * Lines not explicitly reconfigured as input or output
+ * are left unchanged.
*/
+ if (!(flags & GPIO_V2_LINE_DIRECTION_FLAGS))
+ continue;
+ gpio_v2_line_config_flags_to_desc_flags(flags, &desc->flags);
+ edflags = flags & GPIO_V2_LINE_EDGE_DETECTOR_FLAGS;
if (flags & GPIO_V2_LINE_FLAG_OUTPUT) {
int val = gpio_v2_line_config_output_value(&lc, i);
@@ -1543,7 +1549,7 @@ static long linereq_set_config(struct linereq *lr, void __user *ip)
ret = gpiod_direction_output(desc, val);
if (ret)
return ret;
- } else if (flags & GPIO_V2_LINE_FLAG_INPUT) {
+ } else {
ret = gpiod_direction_input(desc);
if (ret)
return ret;
@@ -1642,16 +1648,15 @@ static ssize_t linereq_read(struct file *file, char __user *buf,
return ret;
}
- ret = kfifo_out(&lr->events, &le, 1);
- }
- if (ret != 1) {
- /*
- * This should never happen - we were holding the
- * lock from the moment we learned the fifo is no
- * longer empty until now.
- */
- ret = -EIO;
- break;
+ if (kfifo_out(&lr->events, &le, 1) != 1) {
+ /*
+ * This should never happen - we hold the
+ * lock from the moment we learned the fifo
+ * is no longer empty until now.
+ */
+ WARN(1, "failed to read from non-empty kfifo");
+ return -EIO;
+ }
}
if (copy_to_user(buf + bytes_read, &le, sizeof(le)))
@@ -1774,6 +1779,7 @@ static int linereq_create(struct gpio_device *gdev, void __user *ip)
mutex_init(&lr->config_mutex);
init_waitqueue_head(&lr->wait);
+ INIT_KFIFO(lr->events);
lr->event_buffer_size = ulr.event_buffer_size;
if (lr->event_buffer_size == 0)
lr->event_buffer_size = ulr.num_lines * 16;
@@ -1994,16 +2000,15 @@ static ssize_t lineevent_read(struct file *file, char __user *buf,
return ret;
}
- ret = kfifo_out(&le->events, &ge, 1);
- }
- if (ret != 1) {
- /*
- * This should never happen - we were holding the lock
- * from the moment we learned the fifo is no longer
- * empty until now.
- */
- ret = -EIO;
- break;
+ if (kfifo_out(&le->events, &ge, 1) != 1) {
+ /*
+ * This should never happen - we hold the
+ * lock from the moment we learned the fifo
+ * is no longer empty until now.
+ */
+ WARN(1, "failed to read from non-empty kfifo");
+ return -EIO;
+ }
}
if (copy_to_user(buf + bytes_read, &ge, ge_size))
@@ -2706,12 +2711,15 @@ static ssize_t lineinfo_watch_read(struct file *file, char __user *buf,
if (count < event_size)
return -EINVAL;
#endif
- ret = kfifo_out(&cdev->events, &event, 1);
- }
- if (ret != 1) {
- ret = -EIO;
- break;
- /* We should never get here. See lineevent_read(). */
+ if (kfifo_out(&cdev->events, &event, 1) != 1) {
+ /*
+ * This should never happen - we hold the
+ * lock from the moment we learned the fifo
+ * is no longer empty until now.
+ */
+ WARN(1, "failed to read from non-empty kfifo");
+ return -EIO;
+ }
}
#ifdef CONFIG_GPIO_CDEV_V1
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index d75f6ee37028..f6af5e7be4d1 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -103,7 +103,7 @@ int of_gpio_count(const struct fwnode_handle *fwnode, const char *con_id)
if (ret > 0)
return ret;
- for (i = 0; i < ARRAY_SIZE(gpio_suffixes); i++) {
+ for (i = 0; i < gpio_suffix_count; i++) {
if (con_id)
snprintf(propname, sizeof(propname), "%s-%s",
con_id, gpio_suffixes[i]);
@@ -203,6 +203,24 @@ static void of_gpio_try_fixup_polarity(const struct device_node *np,
*/
{ "qi,lb60", "rb-gpios", true },
#endif
+#if IS_ENABLED(CONFIG_PCI_LANTIQ)
+ /*
+ * According to the PCI specification, the RST# pin is an
+ * active-low signal. However, most of the device trees that
+ * have been widely used for a long time incorrectly describe
+ * reset GPIO as active-high, and were also using wrong name
+ * for the property.
+ */
+ { "lantiq,pci-xway", "gpio-reset", false },
+#endif
+#if IS_ENABLED(CONFIG_TOUCHSCREEN_TSC2005)
+ /*
+ * DTS for Nokia N900 incorrectly specified "active high"
+ * polarity for the reset line, while the chip actually
+ * treats it as "active low".
+ */
+ { "ti,tsc2005", "reset-gpios", false },
+#endif
};
unsigned int i;
@@ -504,9 +522,9 @@ static struct gpio_desc *of_find_gpio_rename(struct device_node *np,
{ "reset", "reset-n-io", "marvell,nfc-uart" },
{ "reset", "reset-n-io", "mrvl,nfc-uart" },
#endif
-#if !IS_ENABLED(CONFIG_PCI_LANTIQ)
+#if IS_ENABLED(CONFIG_PCI_LANTIQ)
/* MIPS Lantiq PCI */
- { "reset", "gpios-reset", "lantiq,pci-xway" },
+ { "reset", "gpio-reset", "lantiq,pci-xway" },
#endif
/*
@@ -676,7 +694,7 @@ struct gpio_desc *of_find_gpio(struct device_node *np, const char *con_id,
unsigned int i;
/* Try GPIO property "foo-gpios" and "foo-gpio" */
- for (i = 0; i < ARRAY_SIZE(gpio_suffixes); i++) {
+ for (i = 0; i < gpio_suffix_count; i++) {
if (con_id)
snprintf(prop_name, sizeof(prop_name), "%s-%s", con_id,
gpio_suffixes[i]);
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index fa62367ee929..edaeee53db75 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/acpi.h>
+#include <linux/array_size.h>
#include <linux/bitmap.h>
#include <linux/cleanup.h>
#include <linux/compat.h>
@@ -17,6 +18,7 @@
#include <linux/list.h>
#include <linux/lockdep.h>
#include <linux/module.h>
+#include <linux/nospec.h>
#include <linux/of.h>
#include <linux/pinctrl/consumer.h>
#include <linux/seq_file.h>
@@ -88,6 +90,9 @@ DEFINE_STATIC_SRCU(gpio_devices_srcu);
static DEFINE_MUTEX(gpio_machine_hogs_mutex);
static LIST_HEAD(gpio_machine_hogs);
+const char *const gpio_suffixes[] = { "gpios", "gpio" };
+const size_t gpio_suffix_count = ARRAY_SIZE(gpio_suffixes);
+
static void gpiochip_free_hogs(struct gpio_chip *gc);
static int gpiochip_add_irqchip(struct gpio_chip *gc,
struct lock_class_key *lock_key,
@@ -105,16 +110,16 @@ const char *gpiod_get_label(struct gpio_desc *desc)
unsigned long flags;
flags = READ_ONCE(desc->flags);
- if (test_bit(FLAG_USED_AS_IRQ, &flags) &&
- !test_bit(FLAG_REQUESTED, &flags))
- return "interrupt";
-
- if (!test_bit(FLAG_REQUESTED, &flags))
- return NULL;
label = srcu_dereference_check(desc->label, &desc->gdev->desc_srcu,
srcu_read_lock_held(&desc->gdev->desc_srcu));
+ if (test_bit(FLAG_USED_AS_IRQ, &flags))
+ return label->str ?: "interrupt";
+
+ if (!test_bit(FLAG_REQUESTED, &flags))
+ return NULL;
+
return label->str;
}
@@ -174,7 +179,6 @@ struct gpio_desc *gpiochip_get_desc(struct gpio_chip *gc,
{
return gpio_device_get_desc(gc->gpiodev, hwnum);
}
-EXPORT_SYMBOL_GPL(gpiochip_get_desc);
/**
* gpio_device_get_desc() - get the GPIO descriptor corresponding to the given
@@ -198,7 +202,7 @@ gpio_device_get_desc(struct gpio_device *gdev, unsigned int hwnum)
if (hwnum >= gdev->ngpio)
return ERR_PTR(-EINVAL);
- return &gdev->descs[hwnum];
+ return &gdev->descs[array_index_nospec(hwnum, gdev->ngpio)];
}
EXPORT_SYMBOL_GPL(gpio_device_get_desc);
@@ -485,7 +489,7 @@ static struct gpio_desc *gpio_name_to_desc(const char * const name)
* 1. Non-unique names are still accepted,
* 2. Name collisions within the same GPIO chip are not reported.
*/
-static int gpiochip_set_desc_names(struct gpio_chip *gc)
+static void gpiochip_set_desc_names(struct gpio_chip *gc)
{
struct gpio_device *gdev = gc->gpiodev;
int i;
@@ -504,8 +508,6 @@ static int gpiochip_set_desc_names(struct gpio_chip *gc)
/* Then add all names to the GPIO descriptors */
for (i = 0; i != gc->ngpio; ++i)
gdev->descs[i].name = gc->names[i];
-
- return 0;
}
/*
@@ -999,11 +1001,9 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
INIT_LIST_HEAD(&gdev->pin_ranges);
#endif
- if (gc->names) {
- ret = gpiochip_set_desc_names(gc);
- if (ret)
- goto err_cleanup_desc_srcu;
- }
+ if (gc->names)
+ gpiochip_set_desc_names(gc);
+
ret = gpiochip_set_names(gc);
if (ret)
goto err_cleanup_desc_srcu;
@@ -4798,11 +4798,11 @@ static void gpiolib_dbg_show(struct seq_file *s, struct gpio_device *gdev)
for_each_gpio_desc(gc, desc) {
guard(srcu)(&desc->gdev->desc_srcu);
- if (test_bit(FLAG_REQUESTED, &desc->flags)) {
+ is_irq = test_bit(FLAG_USED_AS_IRQ, &desc->flags);
+ if (is_irq || test_bit(FLAG_REQUESTED, &desc->flags)) {
gpiod_get_direction(desc);
is_out = test_bit(FLAG_IS_OUT, &desc->flags);
value = gpio_chip_get_value(gc, desc);
- is_irq = test_bit(FLAG_USED_AS_IRQ, &desc->flags);
active_low = test_bit(FLAG_ACTIVE_LOW, &desc->flags);
seq_printf(s, " gpio-%-3u (%-20.20s|%-20.20s) %s %s %s%s\n",
gpio, desc->name ?: "", gpiod_get_label(desc),
diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h
index 48e086c2f416..4de0bf1a62d3 100644
--- a/drivers/gpio/gpiolib.h
+++ b/drivers/gpio/gpiolib.h
@@ -90,7 +90,8 @@ static inline struct gpio_device *to_gpio_device(struct device *dev)
}
/* gpio suffixes used for ACPI and device tree lookup */
-static __maybe_unused const char * const gpio_suffixes[] = { "gpios", "gpio" };
+extern const char *const gpio_suffixes[];
+extern const size_t gpio_suffix_count;
/**
* struct gpio_array - Opaque descriptor for a structure of GPIO array attributes
@@ -242,6 +243,7 @@ int gpio_set_debounce_timeout(struct gpio_desc *desc, unsigned int debounce);
int gpiod_hog(struct gpio_desc *desc, const char *name,
unsigned long lflags, enum gpiod_flags dflags);
int gpiochip_get_ngpios(struct gpio_chip *gc, struct device *dev);
+struct gpio_desc *gpiochip_get_desc(struct gpio_chip *gc, unsigned int hwnum);
const char *gpiod_get_label(struct gpio_desc *desc);
/*
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 026444eeb5c6..d0aa277fc3bf 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -450,6 +450,7 @@ config DRM_PRIVACY_SCREEN
config DRM_WERROR
bool "Compile the drm subsystem with warnings as errors"
depends on DRM && EXPERT
+ depends on !WERROR
default n
help
A kernel build should not cause any compiler warnings, and this
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 8975cf41a91a..48ad0c04aa72 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -196,7 +196,7 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
return -EINVAL;
vram_size = KFD_XCP_MEMORY_SIZE(adev, xcp_id);
- if (adev->gmc.is_app_apu || adev->flags & AMD_IS_APU) {
+ if (adev->flags & AMD_IS_APU) {
system_mem_needed = size;
ttm_mem_needed = size;
}
@@ -233,7 +233,7 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
if (adev && xcp_id >= 0) {
adev->kfd.vram_used[xcp_id] += vram_needed;
adev->kfd.vram_used_aligned[xcp_id] +=
- (adev->gmc.is_app_apu || adev->flags & AMD_IS_APU) ?
+ (adev->flags & AMD_IS_APU) ?
vram_needed :
ALIGN(vram_needed, VRAM_AVAILABLITY_ALIGN);
}
@@ -261,7 +261,7 @@ void amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device *adev,
if (adev) {
adev->kfd.vram_used[xcp_id] -= size;
- if (adev->gmc.is_app_apu || adev->flags & AMD_IS_APU) {
+ if (adev->flags & AMD_IS_APU) {
adev->kfd.vram_used_aligned[xcp_id] -= size;
kfd_mem_limit.system_mem_used -= size;
kfd_mem_limit.ttm_mem_used -= size;
@@ -890,7 +890,7 @@ static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem,
* if peer device has large BAR. In contrast, access over xGMI is
* allowed for both small and large BAR configurations of peer device
*/
- if ((adev != bo_adev && !(adev->gmc.is_app_apu || adev->flags & AMD_IS_APU)) &&
+ if ((adev != bo_adev && !(adev->flags & AMD_IS_APU)) &&
((mem->domain == AMDGPU_GEM_DOMAIN_VRAM) ||
(mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) ||
(mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP))) {
@@ -1658,7 +1658,7 @@ size_t amdgpu_amdkfd_get_available_memory(struct amdgpu_device *adev,
- atomic64_read(&adev->vram_pin_size)
- reserved_for_pt;
- if (adev->gmc.is_app_apu || adev->flags & AMD_IS_APU) {
+ if (adev->flags & AMD_IS_APU) {
system_mem_available = no_system_mem_limit ?
kfd_mem_limit.max_system_mem_limit :
kfd_mem_limit.max_system_mem_limit -
@@ -1706,7 +1706,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM;
- if (adev->gmc.is_app_apu || adev->flags & AMD_IS_APU) {
+ if (adev->flags & AMD_IS_APU) {
domain = AMDGPU_GEM_DOMAIN_GTT;
alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
alloc_flags = 0;
@@ -1953,7 +1953,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
if (size) {
if (!is_imported &&
(mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_VRAM ||
- ((adev->gmc.is_app_apu || adev->flags & AMD_IS_APU) &&
+ ((adev->flags & AMD_IS_APU) &&
mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_GTT)))
*size = bo_size;
else
@@ -2376,7 +2376,7 @@ static int import_obj_create(struct amdgpu_device *adev,
(*mem)->bo = bo;
(*mem)->va = va;
(*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) &&
- !(adev->gmc.is_app_apu || adev->flags & AMD_IS_APU) ?
+ !(adev->flags & AMD_IS_APU) ?
AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT;
(*mem)->mapped_to_gpu_memory = 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
index 108003bdf1e9..2e13c7c4b2b4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
@@ -400,7 +400,7 @@ amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
mem_channel_number = vram_info->v30.channel_num;
mem_channel_width = vram_info->v30.channel_width;
if (vram_width)
- *vram_width = mem_channel_number * (1 << mem_channel_width);
+ *vram_width = mem_channel_number * 16;
break;
default:
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index ec888fc6ead8..916b6b8cf7d9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -1093,6 +1093,21 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
unsigned int i;
int r;
+ /*
+ * We can't use gang submit on with reserved VMIDs when the VM changes
+ * can't be invalidated by more than one engine at the same time.
+ */
+ if (p->gang_size > 1 && !p->adev->vm_manager.concurrent_flush) {
+ for (i = 0; i < p->gang_size; ++i) {
+ struct drm_sched_entity *entity = p->entities[i];
+ struct drm_gpu_scheduler *sched = entity->rq->sched;
+ struct amdgpu_ring *ring = to_amdgpu_ring(sched);
+
+ if (amdgpu_vmid_uses_reserved(vm, ring->vm_hub))
+ return -EINVAL;
+ }
+ }
+
r = amdgpu_vm_clear_freed(adev, vm, NULL);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 861ccff78af9..33f791d92ddf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -5220,11 +5220,14 @@ int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
dev_info(adev->dev, "GPU mode1 reset\n");
+ /* Cache the state before bus master disable. The saved config space
+ * values are used in other cases like restore after mode-2 reset.
+ */
+ amdgpu_device_cache_pci_state(adev->pdev);
+
/* disable BM */
pci_clear_master(adev->pdev);
- amdgpu_device_cache_pci_state(adev->pdev);
-
if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
dev_info(adev->dev, "GPU smu mode1 reset\n");
ret = amdgpu_dpm_mode1_reset(adev);
@@ -5944,13 +5947,18 @@ static void amdgpu_device_partner_bandwidth(struct amdgpu_device *adev,
*speed = PCI_SPEED_UNKNOWN;
*width = PCIE_LNK_WIDTH_UNKNOWN;
- while ((parent = pci_upstream_bridge(parent))) {
- /* skip upstream/downstream switches internal to dGPU*/
- if (parent->vendor == PCI_VENDOR_ID_ATI)
- continue;
- *speed = pcie_get_speed_cap(parent);
- *width = pcie_get_width_cap(parent);
- break;
+ if (amdgpu_device_pcie_dynamic_switching_supported(adev)) {
+ while ((parent = pci_upstream_bridge(parent))) {
+ /* skip upstream/downstream switches internal to dGPU*/
+ if (parent->vendor == PCI_VENDOR_ID_ATI)
+ continue;
+ *speed = pcie_get_speed_cap(parent);
+ *width = pcie_get_width_cap(parent);
+ break;
+ }
+ } else {
+ /* use the current speeds rather than max if switching is not supported */
+ pcie_bandwidth_available(adev->pdev, NULL, speed, width);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index 055ba2ea4c12..662d0f28f358 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -41,8 +41,6 @@
#include <linux/dma-buf.h>
#include <linux/dma-fence-array.h>
#include <linux/pci-p2pdma.h>
-#include <linux/pm_runtime.h>
-#include "amdgpu_trace.h"
/**
* amdgpu_dma_buf_attach - &dma_buf_ops.attach implementation
@@ -58,42 +56,11 @@ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
struct drm_gem_object *obj = dmabuf->priv;
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
- int r;
if (pci_p2pdma_distance(adev->pdev, attach->dev, false) < 0)
attach->peer2peer = false;
- r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
- trace_amdgpu_runpm_reference_dumps(1, __func__);
- if (r < 0)
- goto out;
-
return 0;
-
-out:
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
- trace_amdgpu_runpm_reference_dumps(0, __func__);
- return r;
-}
-
-/**
- * amdgpu_dma_buf_detach - &dma_buf_ops.detach implementation
- *
- * @dmabuf: DMA-buf where we remove the attachment from
- * @attach: the attachment to remove
- *
- * Called when an attachment is removed from the DMA-buf.
- */
-static void amdgpu_dma_buf_detach(struct dma_buf *dmabuf,
- struct dma_buf_attachment *attach)
-{
- struct drm_gem_object *obj = dmabuf->priv;
- struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
- struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
-
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
- trace_amdgpu_runpm_reference_dumps(0, __func__);
}
/**
@@ -267,7 +234,6 @@ static int amdgpu_dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
const struct dma_buf_ops amdgpu_dmabuf_ops = {
.attach = amdgpu_dma_buf_attach,
- .detach = amdgpu_dma_buf_detach,
.pin = amdgpu_dma_buf_pin,
.unpin = amdgpu_dma_buf_unpin,
.map_dma_buf = amdgpu_dma_buf_map,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 10832b470448..bc3ac73b6b8d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -181,7 +181,6 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, struct amd
amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
seq, flags | AMDGPU_FENCE_FLAG_INT);
pm_runtime_get_noresume(adev_to_drm(adev)->dev);
- trace_amdgpu_runpm_reference_dumps(1, __func__);
ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
if (unlikely(rcu_dereference_protected(*ptr, 1))) {
struct dma_fence *old;
@@ -309,7 +308,6 @@ bool amdgpu_fence_process(struct amdgpu_ring *ring)
dma_fence_put(fence);
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
- trace_amdgpu_runpm_reference_dumps(0, __func__);
} while (last_seq != seq);
return true;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 67c234bcf89f..3adaa4670103 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -108,6 +108,7 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
memset(&bp, 0, sizeof(bp));
*obj = NULL;
+ flags |= AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE;
bp.size = size;
bp.byte_align = alignment;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index be4629cdac04..08b9dfb65335 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -684,12 +684,17 @@ int amdgpu_gmc_flush_gpu_tlb_pasid(struct amdgpu_device *adev, uint16_t pasid,
struct amdgpu_ring *ring = &adev->gfx.kiq[inst].ring;
struct amdgpu_kiq *kiq = &adev->gfx.kiq[inst];
unsigned int ndw;
- signed long r;
+ int r;
uint32_t seq;
- if (!adev->gmc.flush_pasid_uses_kiq || !ring->sched.ready ||
- !down_read_trylock(&adev->reset_domain->sem)) {
+ /*
+ * A GPU reset should flush all TLBs anyway, so no need to do
+ * this while one is ongoing.
+ */
+ if (!down_read_trylock(&adev->reset_domain->sem))
+ return 0;
+ if (!adev->gmc.flush_pasid_uses_kiq || !ring->sched.ready) {
if (adev->gmc.flush_tlb_needs_extra_type_2)
adev->gmc.gmc_funcs->flush_gpu_tlb_pasid(adev, pasid,
2, all_hub,
@@ -703,43 +708,40 @@ int amdgpu_gmc_flush_gpu_tlb_pasid(struct amdgpu_device *adev, uint16_t pasid,
adev->gmc.gmc_funcs->flush_gpu_tlb_pasid(adev, pasid,
flush_type, all_hub,
inst);
- return 0;
- }
+ r = 0;
+ } else {
+ /* 2 dwords flush + 8 dwords fence */
+ ndw = kiq->pmf->invalidate_tlbs_size + 8;
- /* 2 dwords flush + 8 dwords fence */
- ndw = kiq->pmf->invalidate_tlbs_size + 8;
+ if (adev->gmc.flush_tlb_needs_extra_type_2)
+ ndw += kiq->pmf->invalidate_tlbs_size;
- if (adev->gmc.flush_tlb_needs_extra_type_2)
- ndw += kiq->pmf->invalidate_tlbs_size;
+ if (adev->gmc.flush_tlb_needs_extra_type_0)
+ ndw += kiq->pmf->invalidate_tlbs_size;
- if (adev->gmc.flush_tlb_needs_extra_type_0)
- ndw += kiq->pmf->invalidate_tlbs_size;
+ spin_lock(&adev->gfx.kiq[inst].ring_lock);
+ amdgpu_ring_alloc(ring, ndw);
+ if (adev->gmc.flush_tlb_needs_extra_type_2)
+ kiq->pmf->kiq_invalidate_tlbs(ring, pasid, 2, all_hub);
- spin_lock(&adev->gfx.kiq[inst].ring_lock);
- amdgpu_ring_alloc(ring, ndw);
- if (adev->gmc.flush_tlb_needs_extra_type_2)
- kiq->pmf->kiq_invalidate_tlbs(ring, pasid, 2, all_hub);
+ if (flush_type == 2 && adev->gmc.flush_tlb_needs_extra_type_0)
+ kiq->pmf->kiq_invalidate_tlbs(ring, pasid, 0, all_hub);
- if (flush_type == 2 && adev->gmc.flush_tlb_needs_extra_type_0)
- kiq->pmf->kiq_invalidate_tlbs(ring, pasid, 0, all_hub);
+ kiq->pmf->kiq_invalidate_tlbs(ring, pasid, flush_type, all_hub);
+ r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
+ if (r) {
+ amdgpu_ring_undo(ring);
+ spin_unlock(&adev->gfx.kiq[inst].ring_lock);
+ goto error_unlock_reset;
+ }
- kiq->pmf->kiq_invalidate_tlbs(ring, pasid, flush_type, all_hub);
- r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
- if (r) {
- amdgpu_ring_undo(ring);
+ amdgpu_ring_commit(ring);
spin_unlock(&adev->gfx.kiq[inst].ring_lock);
- goto error_unlock_reset;
- }
-
- amdgpu_ring_commit(ring);
- spin_unlock(&adev->gfx.kiq[inst].ring_lock);
- r = amdgpu_fence_wait_polling(ring, seq, usec_timeout);
- if (r < 1) {
- dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r);
- r = -ETIME;
- goto error_unlock_reset;
+ if (amdgpu_fence_wait_polling(ring, seq, usec_timeout) < 1) {
+ dev_err(adev->dev, "timeout waiting for kiq fence\n");
+ r = -ETIME;
+ }
}
- r = 0;
error_unlock_reset:
up_read(&adev->reset_domain->sem);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
index 3d7fcdeaf8cf..e8f6e4dbc5a4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
@@ -406,7 +406,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
if (r || !idle)
goto error;
- if (vm->reserved_vmid[vmhub] || (enforce_isolation && (vmhub == AMDGPU_GFXHUB(0)))) {
+ if (amdgpu_vmid_uses_reserved(vm, vmhub)) {
r = amdgpu_vmid_grab_reserved(vm, ring, job, &id, fence);
if (r || !id)
goto error;
@@ -456,6 +456,19 @@ error:
return r;
}
+/*
+ * amdgpu_vmid_uses_reserved - check if a VM will use a reserved VMID
+ * @vm: the VM to check
+ * @vmhub: the VMHUB which will be used
+ *
+ * Returns: True if the VM will use a reserved VMID.
+ */
+bool amdgpu_vmid_uses_reserved(struct amdgpu_vm *vm, unsigned int vmhub)
+{
+ return vm->reserved_vmid[vmhub] ||
+ (enforce_isolation && (vmhub == AMDGPU_GFXHUB(0)));
+}
+
int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev,
unsigned vmhub)
{
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
index fa8c42c83d5d..240fa6751260 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
@@ -78,6 +78,7 @@ void amdgpu_pasid_free_delayed(struct dma_resv *resv,
bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev,
struct amdgpu_vmid *id);
+bool amdgpu_vmid_uses_reserved(struct amdgpu_vm *vm, unsigned int vmhub);
int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev,
unsigned vmhub);
void amdgpu_vmid_free_reserved(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 8d8c39be6129..c556c8b653fa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -604,8 +604,6 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
if (!amdgpu_bo_support_uswc(bo->flags))
bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
- bo->flags |= AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE;
-
bo->tbo.bdev = &adev->mman.bdev;
if (bp->domain & (AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA |
AMDGPU_GEM_DOMAIN_GDS))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 4bd4602d11b1..cef9dd0a012b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -640,6 +640,20 @@ static const char *psp_gfx_cmd_name(enum psp_gfx_cmd_id cmd_id)
}
}
+static bool psp_err_warn(struct psp_context *psp)
+{
+ struct psp_gfx_cmd_resp *cmd = psp->cmd_buf_mem;
+
+ /* This response indicates reg list is already loaded */
+ if (amdgpu_ip_version(psp->adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2) &&
+ cmd->cmd_id == GFX_CMD_ID_LOAD_IP_FW &&
+ cmd->cmd.cmd_load_ip_fw.fw_type == GFX_FW_TYPE_REG_LIST &&
+ cmd->resp.status == TEE_ERROR_CANCEL)
+ return false;
+
+ return true;
+}
+
static int
psp_cmd_submit_buf(struct psp_context *psp,
struct amdgpu_firmware_info *ucode,
@@ -699,10 +713,13 @@ psp_cmd_submit_buf(struct psp_context *psp,
dev_warn(psp->adev->dev,
"failed to load ucode %s(0x%X) ",
amdgpu_ucode_name(ucode->ucode_id), ucode->ucode_id);
- dev_warn(psp->adev->dev,
- "psp gfx command %s(0x%X) failed and response status is (0x%X)\n",
- psp_gfx_cmd_name(psp->cmd_buf_mem->cmd_id), psp->cmd_buf_mem->cmd_id,
- psp->cmd_buf_mem->resp.status);
+ if (psp_err_warn(psp))
+ dev_warn(
+ psp->adev->dev,
+ "psp gfx command %s(0x%X) failed and response status is (0x%X)\n",
+ psp_gfx_cmd_name(psp->cmd_buf_mem->cmd_id),
+ psp->cmd_buf_mem->cmd_id,
+ psp->cmd_buf_mem->resp.status);
/* If any firmware (including CAP) load fails under SRIOV, it should
* return failure to stop the VF from initializing.
* Also return failure in case of timeout
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
index c8980d5f6540..7021c4a66fb5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
@@ -46,7 +46,7 @@ struct amdgpu_iv_entry;
#define AMDGPU_RAS_GPU_ERR_HBM_BIST_TEST(x) AMDGPU_GET_REG_FIELD(x, 7, 7)
#define AMDGPU_RAS_GPU_ERR_SOCKET_ID(x) AMDGPU_GET_REG_FIELD(x, 10, 8)
#define AMDGPU_RAS_GPU_ERR_AID_ID(x) AMDGPU_GET_REG_FIELD(x, 12, 11)
-#define AMDGPU_RAS_GPU_ERR_HBM_ID(x) AMDGPU_GET_REG_FIELD(x, 13, 13)
+#define AMDGPU_RAS_GPU_ERR_HBM_ID(x) AMDGPU_GET_REG_FIELD(x, 14, 13)
#define AMDGPU_RAS_GPU_ERR_BOOT_STATUS(x) AMDGPU_GET_REG_FIELD(x, 31, 31)
#define AMDGPU_RAS_BOOT_STATUS_POLLING_LIMIT 1000
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index 7aafeb763e5d..383fce40d4dd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -554,21 +554,6 @@ TRACE_EVENT(amdgpu_reset_reg_dumps,
__entry->value)
);
-TRACE_EVENT(amdgpu_runpm_reference_dumps,
- TP_PROTO(uint32_t index, const char *func),
- TP_ARGS(index, func),
- TP_STRUCT__entry(
- __field(uint32_t, index)
- __string(func, func)
- ),
- TP_fast_assign(
- __entry->index = index;
- __assign_str(func);
- ),
- TP_printk("amdgpu runpm reference dump 0x%x: 0x%s\n",
- __entry->index,
- __get_str(func))
-);
#undef AMDGPU_JOB_GET_TIMELINE_NAME
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
index e30eecd02ae1..fde66225c481 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
@@ -3,6 +3,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_simple_kms_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_vblank.h>
#include "amdgpu.h"
@@ -314,7 +315,13 @@ static int amdgpu_vkms_prepare_fb(struct drm_plane *plane,
return 0;
}
afb = to_amdgpu_framebuffer(new_state->fb);
- obj = new_state->fb->obj[0];
+
+ obj = drm_gem_fb_get_obj(new_state->fb, 0);
+ if (!obj) {
+ DRM_ERROR("Failed to get obj from framebuffer\n");
+ return -EINVAL;
+ }
+
rbo = gem_to_amdgpu_bo(obj);
adev = amdgpu_ttm_adev(rbo->tbo.bdev);
@@ -368,12 +375,19 @@ static void amdgpu_vkms_cleanup_fb(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
struct amdgpu_bo *rbo;
+ struct drm_gem_object *obj;
int r;
if (!old_state->fb)
return;
- rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
+ obj = drm_gem_fb_get_obj(old_state->fb, 0);
+ if (!obj) {
+ DRM_ERROR("Failed to get obj from framebuffer\n");
+ return;
+ }
+
+ rbo = gem_to_amdgpu_bo(obj);
r = amdgpu_bo_reserve(rbo, false);
if (unlikely(r)) {
DRM_ERROR("failed to reserve rbo before unpin\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
index 7fdd306a48a0..f07647a9a9d9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
@@ -706,11 +706,15 @@ int amdgpu_vm_pde_update(struct amdgpu_vm_update_params *params,
struct amdgpu_vm_bo_base *entry)
{
struct amdgpu_vm_bo_base *parent = amdgpu_vm_pt_parent(entry);
- struct amdgpu_bo *bo = parent->bo, *pbo;
+ struct amdgpu_bo *bo, *pbo;
struct amdgpu_vm *vm = params->vm;
uint64_t pde, pt, flags;
unsigned int level;
+ if (WARN_ON(!parent))
+ return -EINVAL;
+
+ bo = parent->bo;
for (level = 0, pbo = bo->parent; pbo; ++level)
pbo = pbo->parent;
diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
index 414ea3f560a7..d4e2aed2efa3 100644
--- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
+++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
@@ -422,7 +422,7 @@ __aqua_vanjaram_get_auto_mode(struct amdgpu_xcp_mgr *xcp_mgr)
if (adev->gmc.num_mem_partitions == num_xcc / 2)
return (adev->flags & AMD_IS_APU) ? AMDGPU_TPX_PARTITION_MODE :
- AMDGPU_QPX_PARTITION_MODE;
+ AMDGPU_CPX_PARTITION_MODE;
if (adev->gmc.num_mem_partitions == 2 && !(adev->flags & AMD_IS_APU))
return AMDGPU_DPX_PARTITION_MODE;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
index 7b16e8cca86a..f5b9f443cfdd 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
@@ -4195,9 +4195,10 @@ static u32 gfx_v9_4_3_get_cu_active_bitmap(struct amdgpu_device *adev, int xcc_i
static int gfx_v9_4_3_get_cu_info(struct amdgpu_device *adev,
struct amdgpu_cu_info *cu_info)
{
- int i, j, k, counter, xcc_id, active_cu_number = 0;
- u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
+ int i, j, k, prev_counter, counter, xcc_id, active_cu_number = 0;
+ u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0, tmp;
unsigned disable_masks[4 * 4];
+ bool is_symmetric_cus;
if (!adev || !cu_info)
return -EINVAL;
@@ -4215,6 +4216,7 @@ static int gfx_v9_4_3_get_cu_info(struct amdgpu_device *adev,
mutex_lock(&adev->grbm_idx_mutex);
for (xcc_id = 0; xcc_id < NUM_XCC(adev->gfx.xcc_mask); xcc_id++) {
+ is_symmetric_cus = true;
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
mask = 1;
@@ -4242,6 +4244,15 @@ static int gfx_v9_4_3_get_cu_info(struct amdgpu_device *adev,
ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
cu_info->ao_cu_bitmap[i][j] = ao_bitmap;
}
+ if (i && is_symmetric_cus && prev_counter != counter)
+ is_symmetric_cus = false;
+ prev_counter = counter;
+ }
+ if (is_symmetric_cus) {
+ tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_DEBUG);
+ tmp = REG_SET_FIELD(tmp, CP_CPC_DEBUG, CPC_HARVESTING_RELAUNCH_DISABLE, 1);
+ tmp = REG_SET_FIELD(tmp, CP_CPC_DEBUG, CPC_HARVESTING_DISPATCH_DISABLE, 1);
+ WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_DEBUG, tmp);
}
gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
xcc_id);
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
index 0d1407f25005..32d4519541c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
@@ -154,18 +154,18 @@ static int mes_v11_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes,
void *pkt, int size,
int api_status_off)
{
- int ndw = size / 4;
- signed long r;
- union MESAPI__MISC *x_pkt = pkt;
- struct MES_API_STATUS *api_status;
+ union MESAPI__QUERY_MES_STATUS mes_status_pkt;
+ signed long timeout = 3000000; /* 3000 ms */
struct amdgpu_device *adev = mes->adev;
struct amdgpu_ring *ring = &mes->ring;
- unsigned long flags;
- signed long timeout = 3000000; /* 3000 ms */
+ struct MES_API_STATUS *api_status;
+ union MESAPI__MISC *x_pkt = pkt;
const char *op_str, *misc_op_str;
- u32 fence_offset;
- u64 fence_gpu_addr;
- u64 *fence_ptr;
+ unsigned long flags;
+ u64 status_gpu_addr;
+ u32 status_offset;
+ u64 *status_ptr;
+ signed long r;
int ret;
if (x_pkt->header.opcode >= MES_SCH_API_MAX)
@@ -177,28 +177,38 @@ static int mes_v11_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes,
/* Worst case in sriov where all other 15 VF timeout, each VF needs about 600ms */
timeout = 15 * 600 * 1000;
}
- BUG_ON(size % 4 != 0);
- ret = amdgpu_device_wb_get(adev, &fence_offset);
+ ret = amdgpu_device_wb_get(adev, &status_offset);
if (ret)
return ret;
- fence_gpu_addr =
- adev->wb.gpu_addr + (fence_offset * 4);
- fence_ptr = (u64 *)&adev->wb.wb[fence_offset];
- *fence_ptr = 0;
+
+ status_gpu_addr = adev->wb.gpu_addr + (status_offset * 4);
+ status_ptr = (u64 *)&adev->wb.wb[status_offset];
+ *status_ptr = 0;
spin_lock_irqsave(&mes->ring_lock, flags);
- if (amdgpu_ring_alloc(ring, ndw)) {
- spin_unlock_irqrestore(&mes->ring_lock, flags);
- amdgpu_device_wb_free(adev, fence_offset);
- return -ENOMEM;
- }
+ r = amdgpu_ring_alloc(ring, (size + sizeof(mes_status_pkt)) / 4);
+ if (r)
+ goto error_unlock_free;
api_status = (struct MES_API_STATUS *)((char *)pkt + api_status_off);
- api_status->api_completion_fence_addr = fence_gpu_addr;
+ api_status->api_completion_fence_addr = status_gpu_addr;
api_status->api_completion_fence_value = 1;
- amdgpu_ring_write_multiple(ring, pkt, ndw);
+ amdgpu_ring_write_multiple(ring, pkt, size / 4);
+
+ memset(&mes_status_pkt, 0, sizeof(mes_status_pkt));
+ mes_status_pkt.header.type = MES_API_TYPE_SCHEDULER;
+ mes_status_pkt.header.opcode = MES_SCH_API_QUERY_SCHEDULER_STATUS;
+ mes_status_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
+ mes_status_pkt.api_status.api_completion_fence_addr =
+ ring->fence_drv.gpu_addr;
+ mes_status_pkt.api_status.api_completion_fence_value =
+ ++ring->fence_drv.sync_seq;
+
+ amdgpu_ring_write_multiple(ring, &mes_status_pkt,
+ sizeof(mes_status_pkt) / 4);
+
amdgpu_ring_commit(ring);
spin_unlock_irqrestore(&mes->ring_lock, flags);
@@ -206,15 +216,16 @@ static int mes_v11_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes,
misc_op_str = mes_v11_0_get_misc_op_string(x_pkt);
if (misc_op_str)
- dev_dbg(adev->dev, "MES msg=%s (%s) was emitted\n", op_str, misc_op_str);
+ dev_dbg(adev->dev, "MES msg=%s (%s) was emitted\n", op_str,
+ misc_op_str);
else if (op_str)
dev_dbg(adev->dev, "MES msg=%s was emitted\n", op_str);
else
- dev_dbg(adev->dev, "MES msg=%d was emitted\n", x_pkt->header.opcode);
+ dev_dbg(adev->dev, "MES msg=%d was emitted\n",
+ x_pkt->header.opcode);
- r = amdgpu_mes_fence_wait_polling(fence_ptr, (u64)1, timeout);
- amdgpu_device_wb_free(adev, fence_offset);
- if (r < 1) {
+ r = amdgpu_fence_wait_polling(ring, ring->fence_drv.sync_seq, timeout);
+ if (r < 1 || !*status_ptr) {
if (misc_op_str)
dev_err(adev->dev, "MES failed to respond to msg=%s (%s)\n",
@@ -229,10 +240,19 @@ static int mes_v11_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes,
while (halt_if_hws_hang)
schedule();
- return -ETIMEDOUT;
+ r = -ETIMEDOUT;
+ goto error_wb_free;
}
+ amdgpu_device_wb_free(adev, status_offset);
return 0;
+
+error_unlock_free:
+ spin_unlock_irqrestore(&mes->ring_lock, flags);
+
+error_wb_free:
+ amdgpu_device_wb_free(adev, status_offset);
+ return r;
}
static int convert_to_mes_queue_type(int queue_type)
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
index 7566973ed8f5..37b5ddd6f13b 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
+++ b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
@@ -464,8 +464,9 @@ struct psp_gfx_rb_frame
#define PSP_ERR_UNKNOWN_COMMAND 0x00000100
enum tee_error_code {
- TEE_SUCCESS = 0x00000000,
- TEE_ERROR_NOT_SUPPORTED = 0xFFFF000A,
+ TEE_SUCCESS = 0x00000000,
+ TEE_ERROR_CANCEL = 0xFFFF0002,
+ TEE_ERROR_NOT_SUPPORTED = 0xFFFF000A,
};
#endif /* _PSP_TEE_GFX_IF_H_ */
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c
index f08a32c18694..40b28298af30 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c
@@ -32,7 +32,9 @@
#include "mp/mp_14_0_2_sh_mask.h"
MODULE_FIRMWARE("amdgpu/psp_14_0_2_sos.bin");
+MODULE_FIRMWARE("amdgpu/psp_14_0_2_ta.bin");
MODULE_FIRMWARE("amdgpu/psp_14_0_3_sos.bin");
+MODULE_FIRMWARE("amdgpu/psp_14_0_3_ta.bin");
/* For large FW files the time to complete can be very long */
#define USBC_PD_POLLING_LIMIT_S 240
@@ -66,6 +68,9 @@ static int psp_v14_0_init_microcode(struct psp_context *psp)
err = psp_init_sos_microcode(psp, ucode_prefix);
if (err)
return err;
+ err = psp_init_ta_microcode(psp, ucode_prefix);
+ if (err)
+ return err;
break;
default:
BUG();
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 9596bca57212..afc57df421cd 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -408,15 +408,8 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
f2g = &gfx_v11_kfd2kgd;
break;
case IP_VERSION(11, 0, 3):
- if ((adev->pdev->device == 0x7460 &&
- adev->pdev->revision == 0x00) ||
- (adev->pdev->device == 0x7461 &&
- adev->pdev->revision == 0x00))
- /* Note: Compiler version is 11.0.5 while HW version is 11.0.3 */
- gfx_target_version = 110005;
- else
- /* Note: Compiler version is 11.0.1 while HW version is 11.0.3 */
- gfx_target_version = 110001;
+ /* Note: Compiler version is 11.0.1 while HW version is 11.0.3 */
+ gfx_target_version = 110001;
f2g = &gfx_v11_kfd2kgd;
break;
case IP_VERSION(11, 5, 0):
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
index 4816fcb9803a..8ee3d07ffbdf 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
@@ -1023,7 +1023,7 @@ int kgd2kfd_init_zone_device(struct amdgpu_device *adev)
if (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(9, 0, 1))
return -EINVAL;
- if (adev->gmc.is_app_apu || adev->flags & AMD_IS_APU)
+ if (adev->flags & AMD_IS_APU)
return 0;
pgmap = &kfddev->pgmap;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index 069b81eeea03..31e500859ab0 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -2619,8 +2619,7 @@ svm_range_best_restore_location(struct svm_range *prange,
return -1;
}
- if (node->adev->gmc.is_app_apu ||
- node->adev->flags & AMD_IS_APU)
+ if (node->adev->flags & AMD_IS_APU)
return 0;
if (prange->preferred_loc == gpuid ||
@@ -3338,8 +3337,7 @@ svm_range_best_prefetch_location(struct svm_range *prange)
goto out;
}
- if (bo_node->adev->gmc.is_app_apu ||
- bo_node->adev->flags & AMD_IS_APU) {
+ if (bo_node->adev->flags & AMD_IS_APU) {
best_loc = 0;
goto out;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
index 9c37bd0567ef..70c1776611c4 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
@@ -201,7 +201,6 @@ void svm_range_list_lock_and_flush_work(struct svm_range_list *svms, struct mm_s
* is initialized to not 0 when page migration register device memory.
*/
#define KFD_IS_SVM_API_SUPPORTED(adev) ((adev)->kfd.pgmap.type != 0 ||\
- (adev)->gmc.is_app_apu ||\
((adev)->flags & AMD_IS_APU))
void svm_range_bo_unref_async(struct svm_range_bo *svm_bo);
diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig
index 5fcd4f778dc3..47b8b49da8a7 100644
--- a/drivers/gpu/drm/amd/display/Kconfig
+++ b/drivers/gpu/drm/amd/display/Kconfig
@@ -8,7 +8,7 @@ config DRM_AMD_DC
depends on BROKEN || !CC_IS_CLANG || ARM64 || RISCV || SPARC64 || X86_64
select SND_HDA_COMPONENT if SND_HDA_CORE
# !CC_IS_CLANG: https://github.com/ClangBuiltLinux/linux/issues/1752
- select DRM_AMD_DC_FP if ARCH_HAS_KERNEL_FPU_SUPPORT && (!ARM64 || !CC_IS_CLANG)
+ select DRM_AMD_DC_FP if ARCH_HAS_KERNEL_FPU_SUPPORT && !(CC_IS_CLANG && (ARM64 || RISCV))
help
Choose this option if you want to use the new display engine
support for AMDGPU. This adds required support for Vega and
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index f1d67c6f4b98..a622aca8c649 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -9169,9 +9169,6 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
trace_amdgpu_dm_atomic_commit_tail_begin(state);
- if (dm->dc->caps.ips_support && dm->dc->idle_optimizations_allowed)
- dc_allow_idle_optimizations(dm->dc, false);
-
drm_atomic_helper_update_legacy_modeset_state(dev, state);
drm_dp_mst_atomic_wait_for_dependencies(state);
@@ -11184,6 +11181,49 @@ static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
return ret;
}
+static void parse_edid_displayid_vrr(struct drm_connector *connector,
+ struct edid *edid)
+{
+ u8 *edid_ext = NULL;
+ int i;
+ int j = 0;
+ u16 min_vfreq;
+ u16 max_vfreq;
+
+ if (edid == NULL || edid->extensions == 0)
+ return;
+
+ /* Find DisplayID extension */
+ for (i = 0; i < edid->extensions; i++) {
+ edid_ext = (void *)(edid + (i + 1));
+ if (edid_ext[0] == DISPLAYID_EXT)
+ break;
+ }
+
+ if (edid_ext == NULL)
+ return;
+
+ while (j < EDID_LENGTH) {
+ /* Get dynamic video timing range from DisplayID if available */
+ if (EDID_LENGTH - j > 13 && edid_ext[j] == 0x25 &&
+ (edid_ext[j+1] & 0xFE) == 0 && (edid_ext[j+2] == 9)) {
+ min_vfreq = edid_ext[j+9];
+ if (edid_ext[j+1] & 7)
+ max_vfreq = edid_ext[j+10] + ((edid_ext[j+11] & 3) << 8);
+ else
+ max_vfreq = edid_ext[j+10];
+
+ if (max_vfreq && min_vfreq) {
+ connector->display_info.monitor_range.max_vfreq = max_vfreq;
+ connector->display_info.monitor_range.min_vfreq = min_vfreq;
+
+ return;
+ }
+ }
+ j++;
+ }
+}
+
static int parse_amd_vsdb(struct amdgpu_dm_connector *aconnector,
struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
{
@@ -11305,6 +11345,11 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
if (!adev->dm.freesync_module)
goto update;
+ /* Some eDP panels only have the refresh rate range info in DisplayID */
+ if ((connector->display_info.monitor_range.min_vfreq == 0 ||
+ connector->display_info.monitor_range.max_vfreq == 0))
+ parse_edid_displayid_vrr(connector, edid);
+
if (edid && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
sink->sink_signal == SIGNAL_TYPE_EDP)) {
bool edid_check_required = false;
@@ -11312,9 +11357,11 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
if (is_dp_capable_without_timing_msa(adev->dm.dc,
amdgpu_dm_connector)) {
if (edid->features & DRM_EDID_FEATURE_CONTINUOUS_FREQ) {
- freesync_capable = true;
amdgpu_dm_connector->min_vfreq = connector->display_info.monitor_range.min_vfreq;
amdgpu_dm_connector->max_vfreq = connector->display_info.monitor_range.max_vfreq;
+ if (amdgpu_dm_connector->max_vfreq -
+ amdgpu_dm_connector->min_vfreq > 10)
+ freesync_capable = true;
} else {
edid_check_required = edid->version > 1 ||
(edid->version == 1 &&
@@ -11440,6 +11487,12 @@ void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
mutex_unlock(&adev->dm.dc_lock);
}
+static inline void amdgpu_dm_exit_ips_for_hw_access(struct dc *dc)
+{
+ if (dc->ctx->dmub_srv && !dc->ctx->dmub_srv->idle_exit_counter)
+ dc_exit_ips_for_hw_access(dc);
+}
+
void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
u32 value, const char *func_name)
{
@@ -11450,6 +11503,8 @@ void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
return;
}
#endif
+
+ amdgpu_dm_exit_ips_for_hw_access(ctx->dc);
cgs_write_register(ctx->cgs_device, address, value);
trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
}
@@ -11473,6 +11528,8 @@ uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
return 0;
}
+ amdgpu_dm_exit_ips_for_hw_access(ctx->dc);
+
value = cgs_read_register(ctx->cgs_device, address);
trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 35733d5c5193..a5e1a93ddaea 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -613,6 +613,9 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
&connector->base,
dev->mode_config.tile_property,
0);
+ connector->colorspace_property = master->base.colorspace_property;
+ if (connector->colorspace_property)
+ drm_connector_attach_colorspace_property(connector);
drm_connector_set_path_property(connector, pathprop);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
index 6c84b0fa40f4..0782a34689a0 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
@@ -3364,6 +3364,9 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
&mode_lib->vba.UrgentBurstFactorLumaPre[k],
&mode_lib->vba.UrgentBurstFactorChromaPre[k],
&mode_lib->vba.NotUrgentLatencyHidingPre[k]);
+
+ v->cursor_bw_pre[k] = mode_lib->vba.NumberOfCursors[k] * mode_lib->vba.CursorWidth[k][0] * mode_lib->vba.CursorBPP[k][0] /
+ 8.0 / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]) * v->VRatioPreY[i][j][k];
}
{
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
index 60f251cf973b..beed7adbbd43 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
@@ -177,7 +177,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_5_soc = {
.urgent_latency_pixel_data_only_us = 4.0,
.urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
.urgent_latency_vm_data_only_us = 4.0,
- .dram_clock_change_latency_us = 11.72,
+ .dram_clock_change_latency_us = 34.0,
.urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
.urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c
index e4f333d4fb54..a201dbb743d7 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c
@@ -215,7 +215,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_51_soc = {
.urgent_latency_pixel_data_only_us = 4.0,
.urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
.urgent_latency_vm_data_only_us = 4.0,
- .dram_clock_change_latency_us = 11.72,
+ .dram_clock_change_latency_us = 34,
.urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
.urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
index a41812598ce8..8ecc972dbffd 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
@@ -234,6 +234,7 @@ void dml2_init_socbb_params(struct dml2_context *dml2, const struct dc *in_dc, s
out->round_trip_ping_latency_dcfclk_cycles = 106;
out->smn_latency_us = 2;
out->dispclk_dppclk_vco_speed_mhz = 3600;
+ out->pct_ideal_dram_bw_after_urgent_pixel_only = 65.0;
break;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c
index 0f8b3336e26d..cbd1c1f26b7a 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c
@@ -294,7 +294,7 @@ void dml2_calculate_rq_and_dlg_params(const struct dc *dc, struct dc_state *cont
context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz = (unsigned int)in_ctx->v20.dml_core_ctx.mp.DCFCLKDeepSleep * 1000;
context->bw_ctx.bw.dcn.clk.dppclk_khz = 0;
- if (in_ctx->v20.dml_core_ctx.ms.support.FCLKChangeSupport[in_ctx->v20.scratch.mode_support_params.out_lowest_state_idx] == dml_fclock_change_unsupported)
+ if (in_ctx->v20.dml_core_ctx.ms.support.FCLKChangeSupport[0] == dml_fclock_change_unsupported)
context->bw_ctx.bw.dcn.clk.fclk_p_state_change_support = false;
else
context->bw_ctx.bw.dcn.clk.fclk_p_state_change_support = true;
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
index 5295f52e4fc8..dcced89c07b3 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
@@ -1439,3 +1439,75 @@ void dcn35_set_long_vblank(struct pipe_ctx **pipe_ctx,
}
}
}
+
+static bool should_avoid_empty_tu(struct pipe_ctx *pipe_ctx)
+{
+ /* Calculate average pixel count per TU, return false if under ~2.00 to
+ * avoid empty TUs. This is only required for DPIA tunneling as empty TUs
+ * are legal to generate for native DP links. Assume TU size 64 as there
+ * is currently no scenario where it's reprogrammed from HW default.
+ * MTPs have no such limitation, so this does not affect MST use cases.
+ */
+ unsigned int pix_clk_mhz;
+ unsigned int symclk_mhz;
+ unsigned int avg_pix_per_tu_x1000;
+ unsigned int tu_size_bytes = 64;
+ struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
+ struct dc_link_settings *link_settings = &pipe_ctx->link_config.dp_link_settings;
+ const struct dc *dc = pipe_ctx->stream->link->dc;
+
+ if (pipe_ctx->stream->link->ep_type != DISPLAY_ENDPOINT_USB4_DPIA)
+ return false;
+
+ // Not necessary for MST configurations
+ if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
+ return false;
+
+ pix_clk_mhz = timing->pix_clk_100hz / 10000;
+
+ // If this is true, can't block due to dynamic ODM
+ if (pix_clk_mhz > dc->clk_mgr->bw_params->clk_table.entries[0].dispclk_mhz)
+ return false;
+
+ switch (link_settings->link_rate) {
+ case LINK_RATE_LOW:
+ symclk_mhz = 162;
+ break;
+ case LINK_RATE_HIGH:
+ symclk_mhz = 270;
+ break;
+ case LINK_RATE_HIGH2:
+ symclk_mhz = 540;
+ break;
+ case LINK_RATE_HIGH3:
+ symclk_mhz = 810;
+ break;
+ default:
+ // We shouldn't be tunneling any other rates, something is wrong
+ ASSERT(0);
+ return false;
+ }
+
+ avg_pix_per_tu_x1000 = (1000 * pix_clk_mhz * tu_size_bytes)
+ / (symclk_mhz * link_settings->lane_count);
+
+ // Add small empirically-decided margin to account for potential jitter
+ return (avg_pix_per_tu_x1000 < 2020);
+}
+
+bool dcn35_is_dp_dig_pixel_rate_div_policy(struct pipe_ctx *pipe_ctx)
+{
+ struct dc *dc = pipe_ctx->stream->ctx->dc;
+
+ if (!is_h_timing_divisible_by_2(pipe_ctx->stream))
+ return false;
+
+ if (should_avoid_empty_tu(pipe_ctx))
+ return false;
+
+ if (dc_is_dp_signal(pipe_ctx->stream->signal) && !dc->link_srv->dp_is_128b_132b_signal(pipe_ctx) &&
+ dc->debug.enable_dp_dig_pixel_rate_div_policy)
+ return true;
+
+ return false;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h
index a731c8880d60..f0ea7d1511ae 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h
@@ -95,4 +95,6 @@ void dcn35_set_static_screen_control(struct pipe_ctx **pipe_ctx,
void dcn35_set_long_vblank(struct pipe_ctx **pipe_ctx,
int num_pipes, uint32_t v_total_min, uint32_t v_total_max);
+bool dcn35_is_dp_dig_pixel_rate_div_policy(struct pipe_ctx *pipe_ctx);
+
#endif /* __DC_HWSS_DCN35_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
index df3bf77f3fb4..199781233fd5 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
@@ -158,7 +158,7 @@ static const struct hwseq_private_funcs dcn35_private_funcs = {
.setup_hpo_hw_control = dcn35_setup_hpo_hw_control,
.calculate_dccg_k1_k2_values = dcn32_calculate_dccg_k1_k2_values,
.set_pixels_per_cycle = dcn32_set_pixels_per_cycle,
- .is_dp_dig_pixel_rate_div_policy = dcn32_is_dp_dig_pixel_rate_div_policy,
+ .is_dp_dig_pixel_rate_div_policy = dcn35_is_dp_dig_pixel_rate_div_policy,
.dsc_pg_control = dcn35_dsc_pg_control,
.dsc_pg_status = dcn32_dsc_pg_status,
.enable_plane = dcn35_enable_plane,
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
index a01d0842bf8e..d487dfcd219b 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
@@ -1590,9 +1590,17 @@ static bool retrieve_link_cap(struct dc_link *link)
return false;
}
- if (dp_is_lttpr_present(link))
+ if (dp_is_lttpr_present(link)) {
configure_lttpr_mode_transparent(link);
+ // Echo TOTAL_LTTPR_CNT back downstream
+ core_link_write_dpcd(
+ link,
+ DP_TOTAL_LTTPR_CNT,
+ &link->dpcd_caps.lttpr_caps.phy_repeater_cnt,
+ sizeof(link->dpcd_caps.lttpr_caps.phy_repeater_cnt));
+ }
+
/* Read DP tunneling information. */
status = dpcd_get_tunneling_device_data(link);
diff --git a/drivers/gpu/drm/amd/display/include/dpcd_defs.h b/drivers/gpu/drm/amd/display/include/dpcd_defs.h
index 914f28e9f224..aee5170f5fb2 100644
--- a/drivers/gpu/drm/amd/display/include/dpcd_defs.h
+++ b/drivers/gpu/drm/amd/display/include/dpcd_defs.h
@@ -177,4 +177,9 @@ enum dpcd_psr_sink_states {
#define DP_SINK_PR_PIXEL_DEVIATION_PER_LINE 0x379
#define DP_SINK_PR_MAX_NUMBER_OF_DEVIATION_LINE 0x37A
+/* Remove once drm_dp_helper.h is updated upstream */
+#ifndef DP_TOTAL_LTTPR_CNT
+#define DP_TOTAL_LTTPR_CNT 0xF000A /* 2.1 */
+#endif
+
#endif /* __DAL_DPCD_DEFS_H__ */
diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h
index 1acb2d2c5597..09cbc3afd6d8 100644
--- a/drivers/gpu/drm/amd/include/atomfirmware.h
+++ b/drivers/gpu/drm/amd/include/atomfirmware.h
@@ -734,7 +734,7 @@ struct atom_gpio_pin_lut_v2_1
{
struct atom_common_table_header table_header;
/*the real number of this included in the structure is calcualted by using the (whole structure size - the header size)/size of atom_gpio_pin_lut */
- struct atom_gpio_pin_assignment gpio_pin[8];
+ struct atom_gpio_pin_assignment gpio_pin[];
};
@@ -3583,7 +3583,7 @@ struct atom_gpio_voltage_object_v4
uint8_t phase_delay_us; // phase delay in unit of micro second
uint8_t reserved;
uint32_t gpio_mask_val; // GPIO Mask value
- struct atom_voltage_gpio_map_lut voltage_gpio_lut[1];
+ struct atom_voltage_gpio_map_lut voltage_gpio_lut[] __counted_by(gpio_entry_num);
};
struct atom_svid2_voltage_object_v4
diff --git a/drivers/gpu/drm/amd/include/pptable.h b/drivers/gpu/drm/amd/include/pptable.h
index 2e8e6c9875f6..f83ace2d7ec3 100644
--- a/drivers/gpu/drm/amd/include/pptable.h
+++ b/drivers/gpu/drm/amd/include/pptable.h
@@ -477,31 +477,30 @@ typedef struct _ATOM_PPLIB_STATE_V2
} ATOM_PPLIB_STATE_V2;
typedef struct _StateArray{
- //how many states we have
- UCHAR ucNumEntries;
-
- ATOM_PPLIB_STATE_V2 states[1];
+ //how many states we have
+ UCHAR ucNumEntries;
+
+ ATOM_PPLIB_STATE_V2 states[] /* __counted_by(ucNumEntries) */;
}StateArray;
typedef struct _ClockInfoArray{
- //how many clock levels we have
- UCHAR ucNumEntries;
-
- //sizeof(ATOM_PPLIB_CLOCK_INFO)
- UCHAR ucEntrySize;
-
- UCHAR clockInfo[];
+ //how many clock levels we have
+ UCHAR ucNumEntries;
+
+ //sizeof(ATOM_PPLIB_CLOCK_INFO)
+ UCHAR ucEntrySize;
+
+ UCHAR clockInfo[];
}ClockInfoArray;
typedef struct _NonClockInfoArray{
+ //how many non-clock levels we have. normally should be same as number of states
+ UCHAR ucNumEntries;
+ //sizeof(ATOM_PPLIB_NONCLOCK_INFO)
+ UCHAR ucEntrySize;
- //how many non-clock levels we have. normally should be same as number of states
- UCHAR ucNumEntries;
- //sizeof(ATOM_PPLIB_NONCLOCK_INFO)
- UCHAR ucEntrySize;
-
- ATOM_PPLIB_NONCLOCK_INFO nonClockInfo[];
+ ATOM_PPLIB_NONCLOCK_INFO nonClockInfo[] __counted_by(ucNumEntries);
}NonClockInfoArray;
typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Record
@@ -513,8 +512,10 @@ typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Record
typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Table
{
- UCHAR ucNumEntries; // Number of entries.
- ATOM_PPLIB_Clock_Voltage_Dependency_Record entries[1]; // Dynamically allocate entries.
+ // Number of entries.
+ UCHAR ucNumEntries;
+ // Dynamically allocate entries.
+ ATOM_PPLIB_Clock_Voltage_Dependency_Record entries[] __counted_by(ucNumEntries);
}ATOM_PPLIB_Clock_Voltage_Dependency_Table;
typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Record
@@ -529,8 +530,10 @@ typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Record
typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Table
{
- UCHAR ucNumEntries; // Number of entries.
- ATOM_PPLIB_Clock_Voltage_Limit_Record entries[1]; // Dynamically allocate entries.
+ // Number of entries.
+ UCHAR ucNumEntries;
+ // Dynamically allocate entries.
+ ATOM_PPLIB_Clock_Voltage_Limit_Record entries[] __counted_by(ucNumEntries);
}ATOM_PPLIB_Clock_Voltage_Limit_Table;
union _ATOM_PPLIB_CAC_Leakage_Record
@@ -553,8 +556,10 @@ typedef union _ATOM_PPLIB_CAC_Leakage_Record ATOM_PPLIB_CAC_Leakage_Record;
typedef struct _ATOM_PPLIB_CAC_Leakage_Table
{
- UCHAR ucNumEntries; // Number of entries.
- ATOM_PPLIB_CAC_Leakage_Record entries[1]; // Dynamically allocate entries.
+ // Number of entries.
+ UCHAR ucNumEntries;
+ // Dynamically allocate entries.
+ ATOM_PPLIB_CAC_Leakage_Record entries[] __counted_by(ucNumEntries);
}ATOM_PPLIB_CAC_Leakage_Table;
typedef struct _ATOM_PPLIB_PhaseSheddingLimits_Record
@@ -568,8 +573,10 @@ typedef struct _ATOM_PPLIB_PhaseSheddingLimits_Record
typedef struct _ATOM_PPLIB_PhaseSheddingLimits_Table
{
- UCHAR ucNumEntries; // Number of entries.
- ATOM_PPLIB_PhaseSheddingLimits_Record entries[1]; // Dynamically allocate entries.
+ // Number of entries.
+ UCHAR ucNumEntries;
+ // Dynamically allocate entries.
+ ATOM_PPLIB_PhaseSheddingLimits_Record entries[] __counted_by(ucNumEntries);
}ATOM_PPLIB_PhaseSheddingLimits_Table;
typedef struct _VCEClockInfo{
@@ -580,8 +587,8 @@ typedef struct _VCEClockInfo{
}VCEClockInfo;
typedef struct _VCEClockInfoArray{
- UCHAR ucNumEntries;
- VCEClockInfo entries[1];
+ UCHAR ucNumEntries;
+ VCEClockInfo entries[] __counted_by(ucNumEntries);
}VCEClockInfoArray;
typedef struct _ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record
@@ -592,8 +599,8 @@ typedef struct _ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record
typedef struct _ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table
{
- UCHAR numEntries;
- ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record entries[1];
+ UCHAR numEntries;
+ ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record entries[] __counted_by(numEntries);
}ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table;
typedef struct _ATOM_PPLIB_VCE_State_Record
@@ -604,8 +611,8 @@ typedef struct _ATOM_PPLIB_VCE_State_Record
typedef struct _ATOM_PPLIB_VCE_State_Table
{
- UCHAR numEntries;
- ATOM_PPLIB_VCE_State_Record entries[1];
+ UCHAR numEntries;
+ ATOM_PPLIB_VCE_State_Record entries[] __counted_by(numEntries);
}ATOM_PPLIB_VCE_State_Table;
@@ -626,8 +633,8 @@ typedef struct _UVDClockInfo{
}UVDClockInfo;
typedef struct _UVDClockInfoArray{
- UCHAR ucNumEntries;
- UVDClockInfo entries[1];
+ UCHAR ucNumEntries;
+ UVDClockInfo entries[] __counted_by(ucNumEntries);
}UVDClockInfoArray;
typedef struct _ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record
@@ -638,8 +645,8 @@ typedef struct _ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record
typedef struct _ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table
{
- UCHAR numEntries;
- ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record entries[1];
+ UCHAR numEntries;
+ ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record entries[] __counted_by(numEntries);
}ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table;
typedef struct _ATOM_PPLIB_UVD_Table
@@ -657,8 +664,8 @@ typedef struct _ATOM_PPLIB_SAMClk_Voltage_Limit_Record
}ATOM_PPLIB_SAMClk_Voltage_Limit_Record;
typedef struct _ATOM_PPLIB_SAMClk_Voltage_Limit_Table{
- UCHAR numEntries;
- ATOM_PPLIB_SAMClk_Voltage_Limit_Record entries[];
+ UCHAR numEntries;
+ ATOM_PPLIB_SAMClk_Voltage_Limit_Record entries[] __counted_by(numEntries);
}ATOM_PPLIB_SAMClk_Voltage_Limit_Table;
typedef struct _ATOM_PPLIB_SAMU_Table
@@ -675,8 +682,8 @@ typedef struct _ATOM_PPLIB_ACPClk_Voltage_Limit_Record
}ATOM_PPLIB_ACPClk_Voltage_Limit_Record;
typedef struct _ATOM_PPLIB_ACPClk_Voltage_Limit_Table{
- UCHAR numEntries;
- ATOM_PPLIB_ACPClk_Voltage_Limit_Record entries[1];
+ UCHAR numEntries;
+ ATOM_PPLIB_ACPClk_Voltage_Limit_Record entries[] __counted_by(numEntries);
}ATOM_PPLIB_ACPClk_Voltage_Limit_Table;
typedef struct _ATOM_PPLIB_ACP_Table
@@ -743,9 +750,9 @@ typedef struct ATOM_PPLIB_VQ_Budgeting_Record{
} ATOM_PPLIB_VQ_Budgeting_Record;
typedef struct ATOM_PPLIB_VQ_Budgeting_Table {
- UCHAR revid;
- UCHAR numEntries;
- ATOM_PPLIB_VQ_Budgeting_Record entries[1];
+ UCHAR revid;
+ UCHAR numEntries;
+ ATOM_PPLIB_VQ_Budgeting_Record entries[] __counted_by(numEntries);
} ATOM_PPLIB_VQ_Budgeting_Table;
#pragma pack()
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
index 6bb42d04b247..e8b6989a40f3 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
@@ -164,6 +164,8 @@ static void sumo_construct_vid_mapping_table(struct amdgpu_device *adev,
for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) {
if (table[i].ulSupportedSCLK != 0) {
+ if (table[i].usVoltageIndex >= SUMO_MAX_NUMBER_VOLTAGES)
+ continue;
vid_mapping_table->entries[table[i].usVoltageIndex].vid_7bit =
table[i].usVoltageID;
vid_mapping_table->entries[table[i].usVoltageIndex].vid_2bit =
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 7789b313285c..e1796ecf9c05 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -324,6 +324,18 @@ static int smu_dpm_set_umsch_mm_enable(struct smu_context *smu,
return ret;
}
+static int smu_set_mall_enable(struct smu_context *smu)
+{
+ int ret = 0;
+
+ if (!smu->ppt_funcs->set_mall_enable)
+ return 0;
+
+ ret = smu->ppt_funcs->set_mall_enable(smu);
+
+ return ret;
+}
+
/**
* smu_dpm_set_power_gate - power gate/ungate the specific IP block
*
@@ -1791,6 +1803,7 @@ static int smu_hw_init(void *handle)
smu_dpm_set_jpeg_enable(smu, true);
smu_dpm_set_vpe_enable(smu, true);
smu_dpm_set_umsch_mm_enable(smu, true);
+ smu_set_mall_enable(smu);
smu_set_gfx_cgpg(smu, true);
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
index 0917dec8efe3..64ccdb5f14ea 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
@@ -1395,6 +1395,11 @@ struct pptable_funcs {
int (*dpm_set_umsch_mm_enable)(struct smu_context *smu, bool enable);
/**
+ * @set_mall_enable: Init MALL power gating control.
+ */
+ int (*set_mall_enable)(struct smu_context *smu);
+
+ /**
* @notify_rlc_state: Notify RLC power state to SMU.
*/
int (*notify_rlc_state)(struct smu_context *smu, bool en);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_ppsmc.h
index c4dc5881d8df..e7f5ef49049f 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_ppsmc.h
@@ -106,8 +106,8 @@
#define PPSMC_MSG_DisableLSdma 0x35 ///< Disable LSDMA
#define PPSMC_MSG_SetSoftMaxVpe 0x36 ///<
#define PPSMC_MSG_SetSoftMinVpe 0x37 ///<
-#define PPSMC_MSG_AllocMALLCache 0x38 ///< Allocating MALL Cache
-#define PPSMC_MSG_ReleaseMALLCache 0x39 ///< Releasing MALL Cache
+#define PPSMC_MSG_MALLPowerController 0x38 ///< Set MALL control
+#define PPSMC_MSG_MALLPowerState 0x39 ///< Enter/Exit MALL PG
#define PPSMC_Message_Count 0x3A ///< Total number of PPSMC messages
/** @}*/
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
index c48214e3dc8e..2e32b085824a 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
@@ -272,7 +272,9 @@
__SMU_DUMMY_MAP(SetSoftMinVpe), \
__SMU_DUMMY_MAP(GetMetricsVersion), \
__SMU_DUMMY_MAP(EnableUCLKShadow), \
- __SMU_DUMMY_MAP(RmaDueToBadPageThreshold),
+ __SMU_DUMMY_MAP(RmaDueToBadPageThreshold), \
+ __SMU_DUMMY_MAP(MALLPowerController), \
+ __SMU_DUMMY_MAP(MALLPowerState),
#undef __SMU_DUMMY_MAP
#define __SMU_DUMMY_MAP(type) SMU_MSG_##type
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
index bc241b593db1..b6257f34a7c6 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
@@ -226,15 +226,17 @@ static int smu_v13_0_4_system_features_control(struct smu_context *smu, bool en)
struct amdgpu_device *adev = smu->adev;
int ret = 0;
- if (!en && adev->in_s4) {
- /* Adds a GFX reset as workaround just before sending the
- * MP1_UNLOAD message to prevent GC/RLC/PMFW from entering
- * an invalid state.
- */
- ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset,
- SMU_RESET_MODE_2, NULL);
- if (ret)
- return ret;
+ if (!en && !adev->in_s0ix) {
+ if (adev->in_s4) {
+ /* Adds a GFX reset as workaround just before sending the
+ * MP1_UNLOAD message to prevent GC/RLC/PMFW from entering
+ * an invalid state.
+ */
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset,
+ SMU_RESET_MODE_2, NULL);
+ if (ret)
+ return ret;
+ }
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PrepareMp1ForUnload, NULL);
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
index e4419e1561ef..18abfbd6d059 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
@@ -52,6 +52,19 @@
#define mmMP1_SMN_C2PMSG_90 0x029a
#define mmMP1_SMN_C2PMSG_90_BASE_IDX 0
+/* MALLPowerController message arguments (Defines for the Cache mode control) */
+#define SMU_MALL_PMFW_CONTROL 0
+#define SMU_MALL_DRIVER_CONTROL 1
+
+/*
+ * MALLPowerState message arguments
+ * (Defines for the Allocate/Release Cache mode if in driver mode)
+ */
+#define SMU_MALL_EXIT_PG 0
+#define SMU_MALL_ENTER_PG 1
+
+#define SMU_MALL_PG_CONFIG_DEFAULT SMU_MALL_PG_CONFIG_DRIVER_CONTROL_ALWAYS_ON
+
#define FEATURE_MASK(feature) (1ULL << feature)
#define SMC_DPM_FEATURE ( \
FEATURE_MASK(FEATURE_CCLK_DPM_BIT) | \
@@ -66,6 +79,12 @@
FEATURE_MASK(FEATURE_GFX_DPM_BIT) | \
FEATURE_MASK(FEATURE_VPE_DPM_BIT))
+enum smu_mall_pg_config {
+ SMU_MALL_PG_CONFIG_PMFW_CONTROL = 0,
+ SMU_MALL_PG_CONFIG_DRIVER_CONTROL_ALWAYS_ON = 1,
+ SMU_MALL_PG_CONFIG_DRIVER_CONTROL_ALWAYS_OFF = 2,
+};
+
static struct cmn2asic_msg_mapping smu_v14_0_0_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1),
MSG_MAP(GetSmuVersion, PPSMC_MSG_GetPmfwVersion, 1),
@@ -113,6 +132,8 @@ static struct cmn2asic_msg_mapping smu_v14_0_0_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(PowerDownUmsch, PPSMC_MSG_PowerDownUmsch, 1),
MSG_MAP(SetSoftMaxVpe, PPSMC_MSG_SetSoftMaxVpe, 1),
MSG_MAP(SetSoftMinVpe, PPSMC_MSG_SetSoftMinVpe, 1),
+ MSG_MAP(MALLPowerController, PPSMC_MSG_MALLPowerController, 1),
+ MSG_MAP(MALLPowerState, PPSMC_MSG_MALLPowerState, 1),
};
static struct cmn2asic_mapping smu_v14_0_0_feature_mask_map[SMU_FEATURE_COUNT] = {
@@ -1423,6 +1444,57 @@ static int smu_v14_0_common_get_dpm_table(struct smu_context *smu, struct dpm_cl
return 0;
}
+static int smu_v14_0_1_init_mall_power_gating(struct smu_context *smu, enum smu_mall_pg_config pg_config)
+{
+ struct amdgpu_device *adev = smu->adev;
+ int ret = 0;
+
+ if (pg_config == SMU_MALL_PG_CONFIG_PMFW_CONTROL) {
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_MALLPowerController,
+ SMU_MALL_PMFW_CONTROL, NULL);
+ if (ret) {
+ dev_err(adev->dev, "Init MALL PMFW CONTROL Failure\n");
+ return ret;
+ }
+ } else {
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_MALLPowerController,
+ SMU_MALL_DRIVER_CONTROL, NULL);
+ if (ret) {
+ dev_err(adev->dev, "Init MALL Driver CONTROL Failure\n");
+ return ret;
+ }
+
+ if (pg_config == SMU_MALL_PG_CONFIG_DRIVER_CONTROL_ALWAYS_ON) {
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_MALLPowerState,
+ SMU_MALL_EXIT_PG, NULL);
+ if (ret) {
+ dev_err(adev->dev, "EXIT MALL PG Failure\n");
+ return ret;
+ }
+ } else if (pg_config == SMU_MALL_PG_CONFIG_DRIVER_CONTROL_ALWAYS_OFF) {
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_MALLPowerState,
+ SMU_MALL_ENTER_PG, NULL);
+ if (ret) {
+ dev_err(adev->dev, "Enter MALL PG Failure\n");
+ return ret;
+ }
+ }
+ }
+
+ return ret;
+}
+
+static int smu_v14_0_common_set_mall_enable(struct smu_context *smu)
+{
+ enum smu_mall_pg_config pg_config = SMU_MALL_PG_CONFIG_DEFAULT;
+ int ret = 0;
+
+ if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1))
+ ret = smu_v14_0_1_init_mall_power_gating(smu, pg_config);
+
+ return ret;
+}
+
static const struct pptable_funcs smu_v14_0_0_ppt_funcs = {
.check_fw_status = smu_v14_0_check_fw_status,
.check_fw_version = smu_v14_0_check_fw_version,
@@ -1454,6 +1526,7 @@ static const struct pptable_funcs smu_v14_0_0_ppt_funcs = {
.dpm_set_vpe_enable = smu_v14_0_0_set_vpe_enable,
.dpm_set_umsch_mm_enable = smu_v14_0_0_set_umsch_mm_enable,
.get_dpm_clock_table = smu_v14_0_common_get_dpm_table,
+ .set_mall_enable = smu_v14_0_common_set_mall_enable,
};
static void smu_v14_0_0_set_smu_mailbox_registers(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
index 706265220292..90703f4542ab 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
@@ -1562,7 +1562,6 @@ static int smu_v14_0_2_i2c_control_init(struct smu_context *smu)
smu_i2c->port = i;
mutex_init(&smu_i2c->mutex);
control->owner = THIS_MODULE;
- control->class = I2C_CLASS_SPD;
control->dev.parent = &adev->pdev->dev;
control->algo = &smu_v14_0_2_i2c_algo;
snprintf(control->name, sizeof(control->name), "AMDGPU SMU %d", i);
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_color_mgmt.c b/drivers/gpu/drm/arm/display/komeda/komeda_color_mgmt.c
index d8e449e6ebda..50cb8f7ee6b2 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_color_mgmt.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_color_mgmt.c
@@ -72,11 +72,6 @@ struct gamma_curve_sector {
u32 segment_width;
};
-struct gamma_curve_segment {
- u32 start;
- u32 end;
-};
-
static struct gamma_curve_sector sector_tbl[] = {
{ 0, 4, 4 },
{ 16, 4, 4 },
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_dev.c b/drivers/gpu/drm/arm/display/komeda/komeda_dev.c
index 14ee79becacb..5ba62e637a61 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_dev.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_dev.c
@@ -12,10 +12,8 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/dma-mapping.h>
-#ifdef CONFIG_DEBUG_FS
#include <linux/debugfs.h>
#include <linux/seq_file.h>
-#endif
#include <drm/drm_print.h>
@@ -43,7 +41,6 @@ static int komeda_register_show(struct seq_file *sf, void *x)
DEFINE_SHOW_ATTRIBUTE(komeda_register);
-#ifdef CONFIG_DEBUG_FS
static void komeda_debugfs_init(struct komeda_dev *mdev)
{
if (!debugfs_initialized())
@@ -55,7 +52,6 @@ static void komeda_debugfs_init(struct komeda_dev *mdev)
debugfs_create_x16("err_verbosity", 0664, mdev->debugfs_root,
&mdev->err_verbosity);
}
-#endif
static ssize_t
core_id_show(struct device *dev, struct device_attribute *attr, char *buf)
@@ -265,9 +261,7 @@ struct komeda_dev *komeda_dev_create(struct device *dev)
mdev->err_verbosity = KOMEDA_DEV_PRINT_ERR_EVENTS;
-#ifdef CONFIG_DEBUG_FS
komeda_debugfs_init(mdev);
-#endif
return mdev;
@@ -286,9 +280,7 @@ void komeda_dev_destroy(struct komeda_dev *mdev)
sysfs_remove_group(&dev->kobj, &komeda_sysfs_attr_group);
-#ifdef CONFIG_DEBUG_FS
debugfs_remove_recursive(mdev->debugfs_root);
-#endif
if (mdev->aclk)
clk_prepare_enable(mdev->aclk);
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
index f3e744172673..f4e76b46ca32 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
@@ -259,7 +259,7 @@ komeda_component_get_avail_scaler(struct komeda_component *c,
u32 avail_scalers;
pipe_st = komeda_pipeline_get_state(c->pipeline, state);
- if (!pipe_st)
+ if (IS_ERR_OR_NULL(pipe_st))
return NULL;
avail_scalers = (pipe_st->active_comps & KOMEDA_PIPELINE_SCALERS) ^
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511.h b/drivers/gpu/drm/bridge/adv7511/adv7511.h
index ea271f62b214..ec0b7f3d889c 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511.h
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511.h
@@ -401,7 +401,7 @@ struct adv7511 {
#ifdef CONFIG_DRM_I2C_ADV7511_CEC
int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511);
-void adv7511_cec_irq_process(struct adv7511 *adv7511, unsigned int irq1);
+int adv7511_cec_irq_process(struct adv7511 *adv7511, unsigned int irq1);
#else
static inline int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511)
{
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c b/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c
index 44451a9658a3..2e9c88a2b5ed 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c
@@ -119,7 +119,7 @@ static void adv7511_cec_rx(struct adv7511 *adv7511, int rx_buf)
cec_received_msg(adv7511->cec_adap, &msg);
}
-void adv7511_cec_irq_process(struct adv7511 *adv7511, unsigned int irq1)
+int adv7511_cec_irq_process(struct adv7511 *adv7511, unsigned int irq1)
{
unsigned int offset = adv7511->info->reg_cec_offset;
const u32 irq_tx_mask = ADV7511_INT1_CEC_TX_READY |
@@ -131,16 +131,19 @@ void adv7511_cec_irq_process(struct adv7511 *adv7511, unsigned int irq1)
unsigned int rx_status;
int rx_order[3] = { -1, -1, -1 };
int i;
+ int irq_status = IRQ_NONE;
- if (irq1 & irq_tx_mask)
+ if (irq1 & irq_tx_mask) {
adv_cec_tx_raw_status(adv7511, irq1);
+ irq_status = IRQ_HANDLED;
+ }
if (!(irq1 & irq_rx_mask))
- return;
+ return irq_status;
if (regmap_read(adv7511->regmap_cec,
ADV7511_REG_CEC_RX_STATUS + offset, &rx_status))
- return;
+ return irq_status;
/*
* ADV7511_REG_CEC_RX_STATUS[5:0] contains the reception order of RX
@@ -172,6 +175,8 @@ void adv7511_cec_irq_process(struct adv7511 *adv7511, unsigned int irq1)
adv7511_cec_rx(adv7511, rx_buf);
}
+
+ return IRQ_HANDLED;
}
static int adv7511_cec_adap_enable(struct cec_adapter *adap, bool enable)
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
index 66ccb61e2a66..c8d2c4a157b2 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -469,6 +469,8 @@ static int adv7511_irq_process(struct adv7511 *adv7511, bool process_hpd)
{
unsigned int irq0, irq1;
int ret;
+ int cec_status = IRQ_NONE;
+ int irq_status = IRQ_NONE;
ret = regmap_read(adv7511->regmap, ADV7511_REG_INT(0), &irq0);
if (ret < 0)
@@ -478,29 +480,31 @@ static int adv7511_irq_process(struct adv7511 *adv7511, bool process_hpd)
if (ret < 0)
return ret;
- /* If there is no IRQ to handle, exit indicating no IRQ data */
- if (!(irq0 & (ADV7511_INT0_HPD | ADV7511_INT0_EDID_READY)) &&
- !(irq1 & ADV7511_INT1_DDC_ERROR))
- return -ENODATA;
-
regmap_write(adv7511->regmap, ADV7511_REG_INT(0), irq0);
regmap_write(adv7511->regmap, ADV7511_REG_INT(1), irq1);
- if (process_hpd && irq0 & ADV7511_INT0_HPD && adv7511->bridge.encoder)
+ if (process_hpd && irq0 & ADV7511_INT0_HPD && adv7511->bridge.encoder) {
schedule_work(&adv7511->hpd_work);
+ irq_status = IRQ_HANDLED;
+ }
if (irq0 & ADV7511_INT0_EDID_READY || irq1 & ADV7511_INT1_DDC_ERROR) {
adv7511->edid_read = true;
if (adv7511->i2c_main->irq)
wake_up_all(&adv7511->wq);
+ irq_status = IRQ_HANDLED;
}
#ifdef CONFIG_DRM_I2C_ADV7511_CEC
- adv7511_cec_irq_process(adv7511, irq1);
+ cec_status = adv7511_cec_irq_process(adv7511, irq1);
#endif
- return 0;
+ /* If there is no IRQ to handle, exit indicating no IRQ data */
+ if (irq_status == IRQ_HANDLED || cec_status == IRQ_HANDLED)
+ return IRQ_HANDLED;
+
+ return IRQ_NONE;
}
static irqreturn_t adv7511_irq_handler(int irq, void *devid)
@@ -509,7 +513,7 @@ static irqreturn_t adv7511_irq_handler(int irq, void *devid)
int ret;
ret = adv7511_irq_process(adv7511, true);
- return ret < 0 ? IRQ_NONE : IRQ_HANDLED;
+ return ret < 0 ? IRQ_NONE : ret;
}
/* -----------------------------------------------------------------------------
diff --git a/drivers/gpu/drm/bridge/panel.c b/drivers/gpu/drm/bridge/panel.c
index 32506524d9a2..fe5fb08c9fc4 100644
--- a/drivers/gpu/drm/bridge/panel.c
+++ b/drivers/gpu/drm/bridge/panel.c
@@ -360,9 +360,12 @@ EXPORT_SYMBOL(drm_panel_bridge_set_orientation);
static void devm_drm_panel_bridge_release(struct device *dev, void *res)
{
- struct drm_bridge **bridge = res;
+ struct drm_bridge *bridge = *(struct drm_bridge **)res;
- drm_panel_bridge_remove(*bridge);
+ if (!bridge)
+ return;
+
+ drm_bridge_remove(bridge);
}
/**
diff --git a/drivers/gpu/drm/drm_buddy.c b/drivers/gpu/drm/drm_buddy.c
index 94f8c34fc293..6a8e45e9d0ec 100644
--- a/drivers/gpu/drm/drm_buddy.c
+++ b/drivers/gpu/drm/drm_buddy.c
@@ -239,7 +239,7 @@ int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size)
if (size < chunk_size)
return -EINVAL;
- if (chunk_size < PAGE_SIZE)
+ if (chunk_size < SZ_4K)
return -EINVAL;
if (!is_power_of_2(chunk_size))
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index d612133e2cf7..117237d3528b 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -524,6 +524,9 @@ struct fb_info *drm_fb_helper_alloc_info(struct drm_fb_helper *fb_helper)
if (!info)
return ERR_PTR(-ENOMEM);
+ if (!drm_leak_fbdev_smem)
+ info->flags |= FBINFO_HIDE_SMEM_START;
+
ret = fb_alloc_cmap(&info->cmap, 256, 0);
if (ret)
goto err_release;
@@ -1860,9 +1863,6 @@ __drm_fb_helper_initial_config_and_unlock(struct drm_fb_helper *fb_helper)
info = fb_helper->info;
info->var.pixclock = 0;
- if (!drm_leak_fbdev_smem)
- info->flags |= FBINFO_HIDE_SMEM_START;
-
/* Need to drop locks to avoid recursive deadlock in
* register_framebuffer. This is ok because the only thing left to do is
* register the fbdev emulation instance in kernel_fb_helper_list. */
diff --git a/drivers/gpu/drm/drm_fbdev_dma.c b/drivers/gpu/drm/drm_fbdev_dma.c
index 6c9427bb4053..13cd754af311 100644
--- a/drivers/gpu/drm/drm_fbdev_dma.c
+++ b/drivers/gpu/drm/drm_fbdev_dma.c
@@ -130,7 +130,10 @@ static int drm_fbdev_dma_helper_fb_probe(struct drm_fb_helper *fb_helper,
info->flags |= FBINFO_READS_FAST; /* signal caching */
info->screen_size = sizes->surface_height * fb->pitches[0];
info->screen_buffer = map.vaddr;
- info->fix.smem_start = page_to_phys(virt_to_page(info->screen_buffer));
+ if (!(info->flags & FBINFO_HIDE_SMEM_START)) {
+ if (!drm_WARN_ON(dev, is_vmalloc_addr(info->screen_buffer)))
+ info->fix.smem_start = page_to_phys(virt_to_page(info->screen_buffer));
+ }
info->fix.smem_len = info->screen_size;
return 0;
diff --git a/drivers/gpu/drm/drm_fbdev_generic.c b/drivers/gpu/drm/drm_fbdev_generic.c
index 97e579c33d84..1e200d815e1a 100644
--- a/drivers/gpu/drm/drm_fbdev_generic.c
+++ b/drivers/gpu/drm/drm_fbdev_generic.c
@@ -84,7 +84,8 @@ static int drm_fbdev_generic_helper_fb_probe(struct drm_fb_helper *fb_helper,
sizes->surface_width, sizes->surface_height,
sizes->surface_bpp);
- format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth);
+ format = drm_driver_legacy_fb_format(dev, sizes->surface_bpp,
+ sizes->surface_depth);
buffer = drm_client_framebuffer_create(client, sizes->surface_width,
sizes->surface_height, format);
if (IS_ERR(buffer))
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index 638ffa4444f5..714e42b05108 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -469,14 +469,12 @@ void drm_file_update_pid(struct drm_file *filp)
dev = filp->minor->dev;
mutex_lock(&dev->filelist_mutex);
+ get_pid(pid);
old = rcu_replace_pointer(filp->pid, pid, 1);
mutex_unlock(&dev->filelist_mutex);
- if (pid != old) {
- get_pid(pid);
- synchronize_rcu();
- put_pid(old);
- }
+ synchronize_rcu();
+ put_pid(old);
}
/**
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index 177773bcdbfd..53c003983ad1 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -233,6 +233,8 @@ int drm_gem_shmem_pin_locked(struct drm_gem_shmem_object *shmem)
dma_resv_assert_held(shmem->base.resv);
+ drm_WARN_ON(shmem->base.dev, shmem->base.import_attach);
+
ret = drm_gem_shmem_get_pages(shmem);
return ret;
@@ -611,6 +613,9 @@ int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct
return ret;
}
+ if (is_cow_mapping(vma->vm_flags))
+ return -EINVAL;
+
dma_resv_lock(shmem->base.resv, NULL);
ret = drm_gem_shmem_get_pages(shmem);
dma_resv_unlock(shmem->base.resv);
diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
index aa93129c3397..3860a8ce1e2d 100644
--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
+++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
@@ -202,6 +202,12 @@ static const struct dmi_system_id orientation_data[] = {
DMI_MATCH(DMI_BOARD_NAME, "NEXT"),
},
.driver_data = (void *)&lcd800x1280_rightside_up,
+ }, { /* AYA NEO KUN */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
+ DMI_MATCH(DMI_BOARD_NAME, "KUN"),
+ },
+ .driver_data = (void *)&lcd1600x2560_rightside_up,
}, { /* Chuwi HiBook (CWI514) */
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Hampoo"),
@@ -414,13 +420,20 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Galaxy Book 10.6"),
},
.driver_data = (void *)&lcd1280x1920_rightside_up,
- }, { /* Valve Steam Deck */
+ }, { /* Valve Steam Deck (Jupiter) */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Valve"),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Jupiter"),
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "1"),
},
.driver_data = (void *)&lcd800x1280_rightside_up,
+ }, { /* Valve Steam Deck (Galileo) */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Valve"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Galileo"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "1"),
+ },
+ .driver_data = (void *)&lcd800x1280_rightside_up,
}, { /* VIOS LTH17 */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "VIOS"),
diff --git a/drivers/gpu/drm/exynos/exynos_dp.c b/drivers/gpu/drm/exynos/exynos_dp.c
index f48c4343f469..3e6d4c6aa877 100644
--- a/drivers/gpu/drm/exynos/exynos_dp.c
+++ b/drivers/gpu/drm/exynos/exynos_dp.c
@@ -285,7 +285,6 @@ struct platform_driver dp_driver = {
.remove_new = exynos_dp_remove,
.driver = {
.name = "exynos-dp",
- .owner = THIS_MODULE,
.pm = pm_ptr(&exynos_dp_pm_ops),
.of_match_table = exynos_dp_match,
},
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index fab135308b70..11a720fef32b 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -309,6 +309,7 @@ static int vidi_get_modes(struct drm_connector *connector)
struct vidi_context *ctx = ctx_from_connector(connector);
struct edid *edid;
int edid_len;
+ int count;
/*
* the edid data comes from user side and it would be set
@@ -328,7 +329,11 @@ static int vidi_get_modes(struct drm_connector *connector)
drm_connector_update_edid_property(connector, edid);
- return drm_add_edid_modes(connector, edid);
+ count = drm_add_edid_modes(connector, edid);
+
+ kfree(edid);
+
+ return count;
}
static const struct drm_connector_helper_funcs vidi_connector_helper_funcs = {
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index e968824a4c72..1e26cd4f8347 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -887,11 +887,11 @@ static int hdmi_get_modes(struct drm_connector *connector)
int ret;
if (!hdata->ddc_adpt)
- return 0;
+ goto no_edid;
edid = drm_get_edid(connector, hdata->ddc_adpt);
if (!edid)
- return 0;
+ goto no_edid;
hdata->dvi_mode = !connector->display_info.is_hdmi;
DRM_DEV_DEBUG_KMS(hdata->dev, "%s : width[%d] x height[%d]\n",
@@ -906,6 +906,9 @@ static int hdmi_get_modes(struct drm_connector *connector)
kfree(edid);
return ret;
+
+no_edid:
+ return drm_add_modes_noedid(connector, 640, 480);
}
static int hdmi_find_phy_conf(struct hdmi_context *hdata, u32 pixel_clock)
diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
index f08a6803dc18..3adc2c9ab72d 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
@@ -311,6 +311,9 @@ static int cdv_intel_lvds_get_modes(struct drm_connector *connector)
if (mode_dev->panel_fixed_mode != NULL) {
struct drm_display_mode *mode =
drm_mode_duplicate(dev, mode_dev->panel_fixed_mode);
+ if (!mode)
+ return 0;
+
drm_mode_probed_add(connector, mode);
return 1;
}
diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c
index 8486de230ec9..8d1be94a443b 100644
--- a/drivers/gpu/drm/gma500/psb_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c
@@ -504,6 +504,9 @@ static int psb_intel_lvds_get_modes(struct drm_connector *connector)
if (mode_dev->panel_fixed_mode != NULL) {
struct drm_display_mode *mode =
drm_mode_duplicate(dev, mode_dev->panel_fixed_mode);
+ if (!mode)
+ return 0;
+
drm_mode_probed_add(connector, mode);
return 1;
}
diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c
index ed81e1466c4b..40e7d862675e 100644
--- a/drivers/gpu/drm/i915/display/intel_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_audio.c
@@ -1252,17 +1252,6 @@ static const struct component_ops i915_audio_component_bind_ops = {
static void i915_audio_component_init(struct drm_i915_private *i915)
{
u32 aud_freq, aud_freq_init;
- int ret;
-
- ret = component_add_typed(i915->drm.dev,
- &i915_audio_component_bind_ops,
- I915_COMPONENT_AUDIO);
- if (ret < 0) {
- drm_err(&i915->drm,
- "failed to add audio component (%d)\n", ret);
- /* continue with reduced functionality */
- return;
- }
if (DISPLAY_VER(i915) >= 9) {
aud_freq_init = intel_de_read(i915, AUD_FREQ_CNTRL);
@@ -1285,6 +1274,21 @@ static void i915_audio_component_init(struct drm_i915_private *i915)
/* init with current cdclk */
intel_audio_cdclk_change_post(i915);
+}
+
+static void i915_audio_component_register(struct drm_i915_private *i915)
+{
+ int ret;
+
+ ret = component_add_typed(i915->drm.dev,
+ &i915_audio_component_bind_ops,
+ I915_COMPONENT_AUDIO);
+ if (ret < 0) {
+ drm_err(&i915->drm,
+ "failed to add audio component (%d)\n", ret);
+ /* continue with reduced functionality */
+ return;
+ }
i915->display.audio.component_registered = true;
}
@@ -1317,6 +1321,12 @@ void intel_audio_init(struct drm_i915_private *i915)
i915_audio_component_init(i915);
}
+void intel_audio_register(struct drm_i915_private *i915)
+{
+ if (!i915->display.audio.lpe.platdev)
+ i915_audio_component_register(i915);
+}
+
/**
* intel_audio_deinit() - deinitialize the audio driver
* @i915: the i915 drm device private data
diff --git a/drivers/gpu/drm/i915/display/intel_audio.h b/drivers/gpu/drm/i915/display/intel_audio.h
index 9327954b801e..576c061d72a4 100644
--- a/drivers/gpu/drm/i915/display/intel_audio.h
+++ b/drivers/gpu/drm/i915/display/intel_audio.h
@@ -28,6 +28,7 @@ void intel_audio_codec_get_config(struct intel_encoder *encoder,
void intel_audio_cdclk_change_pre(struct drm_i915_private *dev_priv);
void intel_audio_cdclk_change_post(struct drm_i915_private *dev_priv);
void intel_audio_init(struct drm_i915_private *dev_priv);
+void intel_audio_register(struct drm_i915_private *i915);
void intel_audio_deinit(struct drm_i915_private *dev_priv);
void intel_audio_sdp_split_update(const struct intel_crtc_state *crtc_state);
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index 3c3fc53376ce..6bff169fa8d4 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -2088,6 +2088,9 @@ icl_program_mg_dp_mode(struct intel_digital_port *dig_port,
u32 ln0, ln1, pin_assignment;
u8 width;
+ if (DISPLAY_VER(dev_priv) >= 14)
+ return;
+
if (!intel_encoder_is_tc(&dig_port->base) ||
intel_tc_port_in_tbt_alt_mode(dig_port))
return;
diff --git a/drivers/gpu/drm/i915/display/intel_display_driver.c b/drivers/gpu/drm/i915/display/intel_display_driver.c
index 89bd032ed995..794b4af38055 100644
--- a/drivers/gpu/drm/i915/display/intel_display_driver.c
+++ b/drivers/gpu/drm/i915/display/intel_display_driver.c
@@ -540,6 +540,8 @@ void intel_display_driver_register(struct drm_i915_private *i915)
intel_display_driver_enable_user_access(i915);
+ intel_audio_register(i915);
+
intel_display_debugfs_register(i915);
/*
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index e05e25cd4a94..5b3b6ae1e3d7 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -442,6 +442,10 @@ bool intel_dp_has_bigjoiner(struct intel_dp *intel_dp)
struct intel_encoder *encoder = &intel_dig_port->base;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ /* eDP MSO is not compatible with joiner */
+ if (intel_dp->mso_link_count)
+ return false;
+
return DISPLAY_VER(dev_priv) >= 12 ||
(DISPLAY_VER(dev_priv) == 11 &&
encoder->port != PORT_A);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 42619fc05de4..090724fa766c 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -255,6 +255,7 @@ struct i915_execbuffer {
struct intel_context *context; /* logical state for the request */
struct i915_gem_context *gem_context; /** caller's context */
intel_wakeref_t wakeref;
+ intel_wakeref_t wakeref_gt0;
/** our requests to build */
struct i915_request *requests[MAX_ENGINE_INSTANCE + 1];
@@ -2685,6 +2686,7 @@ static int
eb_select_engine(struct i915_execbuffer *eb)
{
struct intel_context *ce, *child;
+ struct intel_gt *gt;
unsigned int idx;
int err;
@@ -2708,10 +2710,17 @@ eb_select_engine(struct i915_execbuffer *eb)
}
}
eb->num_batches = ce->parallel.number_children + 1;
+ gt = ce->engine->gt;
for_each_child(ce, child)
intel_context_get(child);
eb->wakeref = intel_gt_pm_get(ce->engine->gt);
+ /*
+ * Keep GT0 active on MTL so that i915_vma_parked() doesn't
+ * free VMAs while execbuf ioctl is validating VMAs.
+ */
+ if (gt->info.id)
+ eb->wakeref_gt0 = intel_gt_pm_get(to_gt(gt->i915));
if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) {
err = intel_context_alloc_state(ce);
@@ -2750,6 +2759,9 @@ eb_select_engine(struct i915_execbuffer *eb)
return err;
err:
+ if (gt->info.id)
+ intel_gt_pm_put(to_gt(gt->i915), eb->wakeref_gt0);
+
intel_gt_pm_put(ce->engine->gt, eb->wakeref);
for_each_child(ce, child)
intel_context_put(child);
@@ -2763,6 +2775,12 @@ eb_put_engine(struct i915_execbuffer *eb)
struct intel_context *child;
i915_vm_put(eb->context->vm);
+ /*
+ * This works in conjunction with eb_select_engine() to prevent
+ * i915_vma_parked() from interfering while execbuf validates vmas.
+ */
+ if (eb->gt->info.id)
+ intel_gt_pm_put(to_gt(eb->gt->i915), eb->wakeref_gt0);
intel_gt_pm_put(eb->context->engine->gt, eb->wakeref);
for_each_child(eb->context, child)
intel_context_put(child);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index 3560a062d287..5d7446a48ae7 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -284,7 +284,9 @@ bool i915_gem_object_has_iomem(const struct drm_i915_gem_object *obj);
static inline bool
i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj)
{
- return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_SHRINKABLE);
+ /* TODO: make DPT shrinkable when it has no bound vmas */
+ return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_SHRINKABLE) &&
+ !obj->is_dpt;
}
static inline bool
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
index 65a931ea80e9..3527b8f446fe 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
@@ -196,7 +196,7 @@ static int verify_access(struct drm_i915_private *i915,
if (err)
goto out_file;
- mode = intel_gt_coherent_map_type(to_gt(i915), native_obj, true);
+ mode = intel_gt_coherent_map_type(to_gt(i915), native_obj, false);
vaddr = i915_gem_object_pin_map_unlocked(native_obj, mode);
if (IS_ERR(vaddr)) {
err = PTR_ERR(vaddr);
diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
index d650beb8ed22..20b9b04ec1e0 100644
--- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
@@ -263,8 +263,13 @@ static void signal_irq_work(struct irq_work *work)
i915_request_put(rq);
}
+ /* Lazy irq enabling after HW submission */
if (!READ_ONCE(b->irq_armed) && !list_empty(&b->signalers))
intel_breadcrumbs_arm_irq(b);
+
+ /* And confirm that we still want irqs enabled before we yield */
+ if (READ_ONCE(b->irq_armed) && !atomic_read(&b->active))
+ intel_breadcrumbs_disarm_irq(b);
}
struct intel_breadcrumbs *
@@ -315,13 +320,7 @@ void __intel_breadcrumbs_park(struct intel_breadcrumbs *b)
return;
/* Kick the work once more to drain the signalers, and disarm the irq */
- irq_work_sync(&b->irq_work);
- while (READ_ONCE(b->irq_armed) && !atomic_read(&b->active)) {
- local_irq_disable();
- signal_irq_work(&b->irq_work);
- local_irq_enable();
- cond_resched();
- }
+ irq_work_queue(&b->irq_work);
}
void intel_breadcrumbs_free(struct kref *kref)
@@ -404,7 +403,7 @@ static void insert_breadcrumb(struct i915_request *rq)
* the request as it may have completed and raised the interrupt as
* we were attaching it into the lists.
*/
- if (!b->irq_armed || __i915_request_is_complete(rq))
+ if (!READ_ONCE(b->irq_armed) || __i915_request_is_complete(rq))
irq_work_queue(&b->irq_work);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 5c8e9ee3b008..3b740ca25000 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -885,6 +885,12 @@ static intel_engine_mask_t init_engine_mask(struct intel_gt *gt)
if (IS_DG2(gt->i915)) {
u8 first_ccs = __ffs(CCS_MASK(gt));
+ /*
+ * Store the number of active cslices before
+ * changing the CCS engine configuration
+ */
+ gt->ccs.cslices = CCS_MASK(gt);
+
/* Mask off all the CCS engine */
info->engine_mask &= ~GENMASK(CCS3, CCS0);
/* Put back in the first CCS engine */
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
index 40371b8a9bbb..93bc1cc1ee7e 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
@@ -298,6 +298,7 @@ void i915_vma_revoke_fence(struct i915_vma *vma)
return;
GEM_BUG_ON(fence->vma != vma);
+ i915_active_wait(&fence->active);
GEM_BUG_ON(!i915_active_is_idle(&fence->active));
GEM_BUG_ON(atomic_read(&fence->pin_count));
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.c b/drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.c
index 99b71bb7da0a..3c62a44e9106 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.c
@@ -19,7 +19,7 @@ unsigned int intel_gt_apply_ccs_mode(struct intel_gt *gt)
/* Build the value for the fixed CCS load balancing */
for (cslice = 0; cslice < I915_MAX_CCS; cslice++) {
- if (CCS_MASK(gt) & BIT(cslice))
+ if (gt->ccs.cslices & BIT(cslice))
/*
* If available, assign the cslice
* to the first available engine...
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h
index def7dd0eb6f1..cfdd2ad5e954 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h
@@ -207,6 +207,14 @@ struct intel_gt {
[MAX_ENGINE_INSTANCE + 1];
enum intel_submission_method submission_method;
+ struct {
+ /*
+ * Mask of the non fused CCS slices
+ * to be used for the load balancing
+ */
+ intel_engine_mask_t cslices;
+ } ccs;
+
/*
* Default address space (either GGTT or ppGTT depending on arch).
*
diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h
index bebf28e3c479..525587cfe1af 100644
--- a/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h
+++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h
@@ -29,9 +29,9 @@
*/
#define GUC_KLV_LEN_MIN 1u
-#define GUC_KLV_0_KEY (0xffff << 16)
-#define GUC_KLV_0_LEN (0xffff << 0)
-#define GUC_KLV_n_VALUE (0xffffffff << 0)
+#define GUC_KLV_0_KEY (0xffffu << 16)
+#define GUC_KLV_0_LEN (0xffffu << 0)
+#define GUC_KLV_n_VALUE (0xffffffffu << 0)
/**
* DOC: GuC Self Config KLVs
diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c
index 7ea244d876ca..9bb997dbb4b9 100644
--- a/drivers/gpu/drm/lima/lima_gem.c
+++ b/drivers/gpu/drm/lima/lima_gem.c
@@ -185,7 +185,7 @@ static int lima_gem_pin(struct drm_gem_object *obj)
if (bo->heap_size)
return -EINVAL;
- return drm_gem_shmem_pin(&bo->base);
+ return drm_gem_shmem_pin_locked(&bo->base);
}
static int lima_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map)
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index b5f605751b0a..de811e2265da 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -952,6 +952,13 @@ static void mtk_drm_remove(struct platform_device *pdev)
of_node_put(private->comp_node[i]);
}
+static void mtk_drm_shutdown(struct platform_device *pdev)
+{
+ struct mtk_drm_private *private = platform_get_drvdata(pdev);
+
+ drm_atomic_helper_shutdown(private->drm);
+}
+
static int mtk_drm_sys_prepare(struct device *dev)
{
struct mtk_drm_private *private = dev_get_drvdata(dev);
@@ -983,6 +990,7 @@ static const struct dev_pm_ops mtk_drm_pm_ops = {
static struct platform_driver mtk_drm_platform_driver = {
.probe = mtk_drm_probe,
.remove_new = mtk_drm_remove,
+ .shutdown = mtk_drm_shutdown,
.driver = {
.name = "mediatek-drm",
.pm = &mtk_drm_pm_ops,
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index 17a5cca007e2..4bd0baa2a4f5 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -250,29 +250,20 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
if (ret)
goto free_drm;
ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_vd1_0);
- if (ret) {
- meson_canvas_free(priv->canvas, priv->canvas_id_osd1);
- goto free_drm;
- }
+ if (ret)
+ goto free_canvas_osd1;
ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_vd1_1);
- if (ret) {
- meson_canvas_free(priv->canvas, priv->canvas_id_osd1);
- meson_canvas_free(priv->canvas, priv->canvas_id_vd1_0);
- goto free_drm;
- }
+ if (ret)
+ goto free_canvas_vd1_0;
ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_vd1_2);
- if (ret) {
- meson_canvas_free(priv->canvas, priv->canvas_id_osd1);
- meson_canvas_free(priv->canvas, priv->canvas_id_vd1_0);
- meson_canvas_free(priv->canvas, priv->canvas_id_vd1_1);
- goto free_drm;
- }
+ if (ret)
+ goto free_canvas_vd1_1;
priv->vsync_irq = platform_get_irq(pdev, 0);
ret = drm_vblank_init(drm, 1);
if (ret)
- goto free_drm;
+ goto free_canvas_vd1_2;
/* Assign limits per soc revision/package */
for (i = 0 ; i < ARRAY_SIZE(meson_drm_soc_attrs) ; ++i) {
@@ -288,11 +279,11 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
*/
ret = drm_aperture_remove_framebuffers(&meson_driver);
if (ret)
- goto free_drm;
+ goto free_canvas_vd1_2;
ret = drmm_mode_config_init(drm);
if (ret)
- goto free_drm;
+ goto free_canvas_vd1_2;
drm->mode_config.max_width = 3840;
drm->mode_config.max_height = 2160;
drm->mode_config.funcs = &meson_mode_config_funcs;
@@ -307,7 +298,7 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
if (priv->afbcd.ops) {
ret = priv->afbcd.ops->init(priv);
if (ret)
- goto free_drm;
+ goto free_canvas_vd1_2;
}
/* Encoder Initialization */
@@ -371,6 +362,14 @@ uninstall_irq:
exit_afbcd:
if (priv->afbcd.ops)
priv->afbcd.ops->exit(priv);
+free_canvas_vd1_2:
+ meson_canvas_free(priv->canvas, priv->canvas_id_vd1_2);
+free_canvas_vd1_1:
+ meson_canvas_free(priv->canvas, priv->canvas_id_vd1_1);
+free_canvas_vd1_0:
+ meson_canvas_free(priv->canvas, priv->canvas_id_vd1_0);
+free_canvas_osd1:
+ meson_canvas_free(priv->canvas, priv->canvas_id_osd1);
free_drm:
drm_dev_put(drm);
diff --git a/drivers/gpu/drm/msm/registers/gen_header.py b/drivers/gpu/drm/msm/registers/gen_header.py
index fc3bfdc991d2..3926485bb197 100644
--- a/drivers/gpu/drm/msm/registers/gen_header.py
+++ b/drivers/gpu/drm/msm/registers/gen_header.py
@@ -538,7 +538,7 @@ class Parser(object):
self.variants.add(reg.domain)
def do_validate(self, schemafile):
- if self.validate == False:
+ if not self.validate:
return
try:
@@ -948,7 +948,8 @@ def main():
parser = argparse.ArgumentParser()
parser.add_argument('--rnn', type=str, required=True)
parser.add_argument('--xml', type=str, required=True)
- parser.add_argument('--validate', action=argparse.BooleanOptionalAction)
+ parser.add_argument('--validate', default=False, action='store_true')
+ parser.add_argument('--no-validate', dest='validate', action='store_false')
subparsers = parser.add_subparsers()
subparsers.required = True
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c
index 13705c5f1497..4b7497a8755c 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c
@@ -68,7 +68,7 @@ nv04_display_fini(struct drm_device *dev, bool runtime, bool suspend)
if (nv_two_heads(dev))
NVWriteCRTC(dev, 1, NV_PCRTC_INTR_EN_0, 0);
- if (!runtime)
+ if (!runtime && !drm->headless)
cancel_work_sync(&drm->hpd_work);
if (!suspend)
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
index 670c9739e5e1..2033214c4b78 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
@@ -209,6 +209,8 @@ static int nv17_tv_get_ld_modes(struct drm_encoder *encoder,
struct drm_display_mode *mode;
mode = drm_mode_duplicate(encoder->dev, tv_mode);
+ if (!mode)
+ continue;
mode->clock = tv_norm->tv_enc_mode.vrefresh *
mode->htotal / 1000 *
@@ -258,6 +260,8 @@ static int nv17_tv_get_hd_modes(struct drm_encoder *encoder,
if (modes[i].hdisplay == output_mode->hdisplay &&
modes[i].vdisplay == output_mode->vdisplay) {
mode = drm_mode_duplicate(encoder->dev, output_mode);
+ if (!mode)
+ continue;
mode->type |= DRM_MODE_TYPE_PREFERRED;
} else {
@@ -265,6 +269,8 @@ static int nv17_tv_get_hd_modes(struct drm_encoder *encoder,
modes[i].vdisplay, 60, false,
(output_mode->flags &
DRM_MODE_FLAG_INTERLACE), false);
+ if (!mode)
+ continue;
}
/* CVT modes are sometimes unsuitable... */
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index 88728a0b2c25..674dc567e179 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -2680,7 +2680,7 @@ nv50_display_fini(struct drm_device *dev, bool runtime, bool suspend)
nv50_mstm_fini(nouveau_encoder(encoder));
}
- if (!runtime)
+ if (!runtime && !drm->headless)
cancel_work_sync(&drm->hpd_work);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 79cfab53f80e..8c3c1f1e01c5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -43,11 +43,6 @@
#define BIOSLOG(sip, fmt, arg...) NV_DEBUG(sip->dev, fmt, ##arg)
#define LOG_OLD_VALUE(x)
-struct init_exec {
- bool execute;
- bool repeat;
-};
-
static bool nv_cksum(const uint8_t *data, unsigned int length)
{
/*
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 856b3ef5edb8..0c71d761d378 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -1001,6 +1001,9 @@ nouveau_connector_get_modes(struct drm_connector *connector)
struct drm_display_mode *mode;
mode = drm_mode_duplicate(dev, nv_connector->native_mode);
+ if (!mode)
+ return 0;
+
drm_mode_probed_add(connector, mode);
ret = 1;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index aed5d5b51b43..d4725a968827 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -450,6 +450,9 @@ nouveau_display_hpd_resume(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
+ if (drm->headless)
+ return;
+
spin_lock_irq(&drm->hpd_lock);
drm->hpd_pending = ~0;
spin_unlock_irq(&drm->hpd_lock);
@@ -635,7 +638,7 @@ nouveau_display_fini(struct drm_device *dev, bool suspend, bool runtime)
}
drm_connector_list_iter_end(&conn_iter);
- if (!runtime)
+ if (!runtime && !drm->headless)
cancel_work_sync(&drm->hpd_work);
drm_kms_helper_poll_disable(dev);
@@ -729,6 +732,7 @@ nouveau_display_create(struct drm_device *dev)
/* no display hw */
if (ret == -ENODEV) {
ret = 0;
+ drm->headless = true;
goto disp_create_err;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index e239c6bf4afa..25fca98a20bc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -276,6 +276,7 @@ struct nouveau_drm {
/* modesetting */
struct nvbios vbios;
struct nouveau_display *display;
+ bool headless;
struct work_struct hpd_work;
spinlock_t hpd_lock;
u32 hpd_pending;
diff --git a/drivers/gpu/drm/nouveau/nvif/object.c b/drivers/gpu/drm/nouveau/nvif/object.c
index 4d1aaee8fe15..1d19c87eaec1 100644
--- a/drivers/gpu/drm/nouveau/nvif/object.c
+++ b/drivers/gpu/drm/nouveau/nvif/object.c
@@ -142,11 +142,16 @@ nvif_object_mthd(struct nvif_object *object, u32 mthd, void *data, u32 size)
struct nvif_ioctl_v0 ioctl;
struct nvif_ioctl_mthd_v0 mthd;
} *args;
+ u32 args_size;
u8 stack[128];
int ret;
- if (sizeof(*args) + size > sizeof(stack)) {
- if (!(args = kmalloc(sizeof(*args) + size, GFP_KERNEL)))
+ if (check_add_overflow(sizeof(*args), size, &args_size))
+ return -ENOMEM;
+
+ if (args_size > sizeof(stack)) {
+ args = kmalloc(args_size, GFP_KERNEL);
+ if (!args)
return -ENOMEM;
} else {
args = (void *)stack;
@@ -157,7 +162,7 @@ nvif_object_mthd(struct nvif_object *object, u32 mthd, void *data, u32 size)
args->mthd.method = mthd;
memcpy(args->mthd.data, data, size);
- ret = nvif_object_ioctl(object, args, sizeof(*args) + size, NULL);
+ ret = nvif_object_ioctl(object, args, args_size, NULL);
memcpy(data, args->mthd.data, size);
if (args != (void *)stack)
kfree(args);
@@ -276,7 +281,15 @@ nvif_object_ctor(struct nvif_object *parent, const char *name, u32 handle,
object->map.size = 0;
if (parent) {
- if (!(args = kmalloc(sizeof(*args) + size, GFP_KERNEL))) {
+ u32 args_size;
+
+ if (check_add_overflow(sizeof(*args), size, &args_size)) {
+ nvif_object_dtor(object);
+ return -ENOMEM;
+ }
+
+ args = kmalloc(args_size, GFP_KERNEL);
+ if (!args) {
nvif_object_dtor(object);
return -ENOMEM;
}
@@ -293,8 +306,7 @@ nvif_object_ctor(struct nvif_object *parent, const char *name, u32 handle,
args->new.oclass = oclass;
memcpy(args->new.data, data, size);
- ret = nvif_object_ioctl(parent, args, sizeof(*args) + size,
- &object->priv);
+ ret = nvif_object_ioctl(parent, args, args_size, &object->priv);
memcpy(data, args->new.data, size);
kfree(args);
if (ret == 0)
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index 982324ef5a41..2ae0eb0638f3 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -340,6 +340,8 @@ config DRM_PANEL_LG_SW43408
depends on OF
depends on DRM_MIPI_DSI
depends on BACKLIGHT_CLASS_DEVICE
+ select DRM_DISPLAY_DP_HELPER
+ select DRM_DISPLAY_HELPER
help
Say Y here if you want to enable support for LG sw43408 panel.
The panel has a 1080x2160@60Hz resolution and uses 24 bit RGB per
diff --git a/drivers/gpu/drm/panel/panel-lg-sw43408.c b/drivers/gpu/drm/panel/panel-lg-sw43408.c
index 115f4702d59f..2b3a73696dce 100644
--- a/drivers/gpu/drm/panel/panel-lg-sw43408.c
+++ b/drivers/gpu/drm/panel/panel-lg-sw43408.c
@@ -182,7 +182,7 @@ static int sw43408_backlight_update_status(struct backlight_device *bl)
return mipi_dsi_dcs_set_display_brightness_large(dsi, brightness);
}
-const struct backlight_ops sw43408_backlight_ops = {
+static const struct backlight_ops sw43408_backlight_ops = {
.update_status = sw43408_backlight_update_status,
};
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index dcb6d0b6ced0..c8cdc8356c58 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -2752,6 +2752,7 @@ static const struct display_timing koe_tx26d202vm0bwa_timing = {
.vfront_porch = { 3, 5, 10 },
.vback_porch = { 2, 5, 10 },
.vsync_len = { 5, 5, 5 },
+ .flags = DISPLAY_FLAGS_DE_HIGH,
};
static const struct panel_desc koe_tx26d202vm0bwa = {
diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
index 88e80fe98112..28bfc48a9127 100644
--- a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
+++ b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
@@ -282,15 +282,15 @@ static const struct drm_display_mode et028013dma_mode = {
static const struct drm_display_mode jt240mhqs_hwt_ek_e3_mode = {
.clock = 6000,
.hdisplay = 240,
- .hsync_start = 240 + 28,
- .hsync_end = 240 + 28 + 10,
- .htotal = 240 + 28 + 10 + 10,
+ .hsync_start = 240 + 38,
+ .hsync_end = 240 + 38 + 10,
+ .htotal = 240 + 38 + 10 + 10,
.vdisplay = 280,
- .vsync_start = 280 + 8,
- .vsync_end = 280 + 8 + 4,
- .vtotal = 280 + 8 + 4 + 4,
- .width_mm = 43,
- .height_mm = 37,
+ .vsync_start = 280 + 48,
+ .vsync_end = 280 + 48 + 4,
+ .vtotal = 280 + 48 + 4 + 4,
+ .width_mm = 37,
+ .height_mm = 43,
.flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
};
@@ -643,7 +643,9 @@ static int st7789v_probe(struct spi_device *spi)
if (ret)
return dev_err_probe(dev, ret, "Failed to get backlight\n");
- of_drm_get_panel_orientation(spi->dev.of_node, &ctx->orientation);
+ ret = of_drm_get_panel_orientation(spi->dev.of_node, &ctx->orientation);
+ if (ret)
+ return dev_err_probe(&spi->dev, ret, "Failed to get orientation\n");
drm_panel_add(&ctx->panel);
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c
index d47b40b82b0b..8e0ff3efede7 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gem.c
@@ -192,7 +192,7 @@ static int panfrost_gem_pin(struct drm_gem_object *obj)
if (bo->is_heap)
return -EINVAL;
- return drm_gem_shmem_pin(&bo->base);
+ return drm_gem_shmem_pin_locked(&bo->base);
}
static enum drm_gem_object_status panfrost_gem_status(struct drm_gem_object *obj)
diff --git a/drivers/gpu/drm/panthor/panthor_drv.c b/drivers/gpu/drm/panthor/panthor_drv.c
index b8a84f26b3ef..b5e7b919f241 100644
--- a/drivers/gpu/drm/panthor/panthor_drv.c
+++ b/drivers/gpu/drm/panthor/panthor_drv.c
@@ -86,15 +86,15 @@ panthor_get_uobj_array(const struct drm_panthor_obj_array *in, u32 min_stride,
int ret = 0;
void *out_alloc;
+ if (!in->count)
+ return NULL;
+
/* User stride must be at least the minimum object size, otherwise it might
* lack useful information.
*/
if (in->stride < min_stride)
return ERR_PTR(-EINVAL);
- if (!in->count)
- return NULL;
-
out_alloc = kvmalloc_array(in->count, obj_size, GFP_KERNEL);
if (!out_alloc)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/gpu/drm/panthor/panthor_sched.c b/drivers/gpu/drm/panthor/panthor_sched.c
index 79ffcbc41d78..9a0ff48f7061 100644
--- a/drivers/gpu/drm/panthor/panthor_sched.c
+++ b/drivers/gpu/drm/panthor/panthor_sched.c
@@ -459,6 +459,16 @@ struct panthor_queue {
atomic64_t seqno;
/**
+ * @last_fence: Fence of the last submitted job.
+ *
+ * We return this fence when we get an empty command stream.
+ * This way, we are guaranteed that all earlier jobs have completed
+ * when drm_sched_job::s_fence::finished without having to feed
+ * the CS ring buffer with a dummy job that only signals the fence.
+ */
+ struct dma_fence *last_fence;
+
+ /**
* @in_flight_jobs: List containing all in-flight jobs.
*
* Used to keep track and signal panthor_job::done_fence when the
@@ -829,6 +839,9 @@ static void group_free_queue(struct panthor_group *group, struct panthor_queue *
panthor_kernel_bo_destroy(queue->ringbuf);
panthor_kernel_bo_destroy(queue->iface.mem);
+ /* Release the last_fence we were holding, if any. */
+ dma_fence_put(queue->fence_ctx.last_fence);
+
kfree(queue);
}
@@ -2784,9 +2797,6 @@ static void group_sync_upd_work(struct work_struct *work)
spin_lock(&queue->fence_ctx.lock);
list_for_each_entry_safe(job, job_tmp, &queue->fence_ctx.in_flight_jobs, node) {
- if (!job->call_info.size)
- continue;
-
if (syncobj->seqno < job->done_fence->seqno)
break;
@@ -2865,11 +2875,14 @@ queue_run_job(struct drm_sched_job *sched_job)
static_assert(sizeof(call_instrs) % 64 == 0,
"call_instrs is not aligned on a cacheline");
- /* Stream size is zero, nothing to do => return a NULL fence and let
- * drm_sched signal the parent.
+ /* Stream size is zero, nothing to do except making sure all previously
+ * submitted jobs are done before we signal the
+ * drm_sched_job::s_fence::finished fence.
*/
- if (!job->call_info.size)
- return NULL;
+ if (!job->call_info.size) {
+ job->done_fence = dma_fence_get(queue->fence_ctx.last_fence);
+ return dma_fence_get(job->done_fence);
+ }
ret = pm_runtime_resume_and_get(ptdev->base.dev);
if (drm_WARN_ON(&ptdev->base, ret))
@@ -2928,6 +2941,10 @@ queue_run_job(struct drm_sched_job *sched_job)
}
}
+ /* Update the last fence. */
+ dma_fence_put(queue->fence_ctx.last_fence);
+ queue->fence_ctx.last_fence = dma_fence_get(job->done_fence);
+
done_fence = dma_fence_get(job->done_fence);
out_unlock:
@@ -3378,10 +3395,15 @@ panthor_job_create(struct panthor_file *pfile,
goto err_put_job;
}
- job->done_fence = kzalloc(sizeof(*job->done_fence), GFP_KERNEL);
- if (!job->done_fence) {
- ret = -ENOMEM;
- goto err_put_job;
+ /* Empty command streams don't need a fence, they'll pick the one from
+ * the previously submitted job.
+ */
+ if (job->call_info.size) {
+ job->done_fence = kzalloc(sizeof(*job->done_fence), GFP_KERNEL);
+ if (!job->done_fence) {
+ ret = -ENOMEM;
+ goto err_put_job;
+ }
}
ret = drm_sched_job_init(&job->base,
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index 2ef201a072f1..e66a230331ee 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -642,7 +642,7 @@ static void radeon_gem_va_update_vm(struct radeon_device *rdev,
if (r)
goto error_unlock;
- if (bo_va->it.start)
+ if (bo_va->it.start && bo_va->bo)
r = radeon_vm_bo_update(rdev, bo_va, bo_va->bo->tbo.resource);
error_unlock:
diff --git a/drivers/gpu/drm/radeon/sumo_dpm.c b/drivers/gpu/drm/radeon/sumo_dpm.c
index 21d27e6235f3..b11f7c5bbcbe 100644
--- a/drivers/gpu/drm/radeon/sumo_dpm.c
+++ b/drivers/gpu/drm/radeon/sumo_dpm.c
@@ -1619,6 +1619,8 @@ void sumo_construct_vid_mapping_table(struct radeon_device *rdev,
for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) {
if (table[i].ulSupportedSCLK != 0) {
+ if (table[i].usVoltageIndex >= SUMO_MAX_NUMBER_VOLTAGES)
+ continue;
vid_mapping_table->entries[table[i].usVoltageIndex].vid_7bit =
table[i].usVoltageID;
vid_mapping_table->entries[table[i].usVoltageIndex].vid_2bit =
diff --git a/drivers/gpu/drm/renesas/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/renesas/shmobile/shmob_drm_drv.c
index e83c3e52251d..0250d5f00bf1 100644
--- a/drivers/gpu/drm/renesas/shmobile/shmob_drm_drv.c
+++ b/drivers/gpu/drm/renesas/shmobile/shmob_drm_drv.c
@@ -171,6 +171,13 @@ static void shmob_drm_remove(struct platform_device *pdev)
drm_kms_helper_poll_fini(ddev);
}
+static void shmob_drm_shutdown(struct platform_device *pdev)
+{
+ struct shmob_drm_device *sdev = platform_get_drvdata(pdev);
+
+ drm_atomic_helper_shutdown(&sdev->ddev);
+}
+
static int shmob_drm_probe(struct platform_device *pdev)
{
struct shmob_drm_platform_data *pdata = pdev->dev.platform_data;
@@ -273,6 +280,7 @@ static const struct of_device_id shmob_drm_of_table[] __maybe_unused = {
static struct platform_driver shmob_drm_platform_driver = {
.probe = shmob_drm_probe,
.remove_new = shmob_drm_remove,
+ .shutdown = shmob_drm_shutdown,
.driver = {
.name = "shmob-drm",
.of_match_table = of_match_ptr(shmob_drm_of_table),
diff --git a/drivers/gpu/drm/tests/drm_buddy_test.c b/drivers/gpu/drm/tests/drm_buddy_test.c
index b3be68b03610..dd8fb9f8341a 100644
--- a/drivers/gpu/drm/tests/drm_buddy_test.c
+++ b/drivers/gpu/drm/tests/drm_buddy_test.c
@@ -505,8 +505,8 @@ static void drm_test_buddy_alloc_pathological(struct kunit *test)
* Eventually we will have a fully 50% fragmented mm.
*/
- mm_size = PAGE_SIZE << max_order;
- KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, PAGE_SIZE),
+ mm_size = SZ_4K << max_order;
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, SZ_4K),
"buddy_init failed\n");
KUNIT_EXPECT_EQ(test, mm.max_order, max_order);
@@ -520,7 +520,7 @@ static void drm_test_buddy_alloc_pathological(struct kunit *test)
}
for (order = top; order--;) {
- size = get_size(order, PAGE_SIZE);
+ size = get_size(order, mm.chunk_size);
KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start,
mm_size, size, size,
&tmp, flags),
@@ -534,7 +534,7 @@ static void drm_test_buddy_alloc_pathological(struct kunit *test)
}
/* There should be one final page for this sub-allocation */
- size = get_size(0, PAGE_SIZE);
+ size = get_size(0, mm.chunk_size);
KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
size, size, &tmp, flags),
"buddy_alloc hit -ENOMEM for hole\n");
@@ -544,7 +544,7 @@ static void drm_test_buddy_alloc_pathological(struct kunit *test)
list_move_tail(&block->link, &holes);
- size = get_size(top, PAGE_SIZE);
+ size = get_size(top, mm.chunk_size);
KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
size, size, &tmp, flags),
"buddy_alloc unexpectedly succeeded at top-order %d/%d, it should be full!",
@@ -555,7 +555,7 @@ static void drm_test_buddy_alloc_pathological(struct kunit *test)
/* Nothing larger than blocks of chunk_size now available */
for (order = 1; order <= max_order; order++) {
- size = get_size(order, PAGE_SIZE);
+ size = get_size(order, mm.chunk_size);
KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
size, size, &tmp, flags),
"buddy_alloc unexpectedly succeeded at order %d, it should be full!",
@@ -584,14 +584,14 @@ static void drm_test_buddy_alloc_pessimistic(struct kunit *test)
* page left.
*/
- mm_size = PAGE_SIZE << max_order;
- KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, PAGE_SIZE),
+ mm_size = SZ_4K << max_order;
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, SZ_4K),
"buddy_init failed\n");
KUNIT_EXPECT_EQ(test, mm.max_order, max_order);
for (order = 0; order < max_order; order++) {
- size = get_size(order, PAGE_SIZE);
+ size = get_size(order, mm.chunk_size);
KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
size, size, &tmp, flags),
"buddy_alloc hit -ENOMEM with order=%d\n",
@@ -604,7 +604,7 @@ static void drm_test_buddy_alloc_pessimistic(struct kunit *test)
}
/* And now the last remaining block available */
- size = get_size(0, PAGE_SIZE);
+ size = get_size(0, mm.chunk_size);
KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
size, size, &tmp, flags),
"buddy_alloc hit -ENOMEM on final alloc\n");
@@ -616,7 +616,7 @@ static void drm_test_buddy_alloc_pessimistic(struct kunit *test)
/* Should be completely full! */
for (order = max_order; order--;) {
- size = get_size(order, PAGE_SIZE);
+ size = get_size(order, mm.chunk_size);
KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
size, size, &tmp, flags),
"buddy_alloc unexpectedly succeeded, it should be full!");
@@ -632,7 +632,7 @@ static void drm_test_buddy_alloc_pessimistic(struct kunit *test)
list_del(&block->link);
drm_buddy_free_block(&mm, block);
- size = get_size(order, PAGE_SIZE);
+ size = get_size(order, mm.chunk_size);
KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
size, size, &tmp, flags),
"buddy_alloc hit -ENOMEM with order=%d\n",
@@ -647,7 +647,7 @@ static void drm_test_buddy_alloc_pessimistic(struct kunit *test)
}
/* To confirm, now the whole mm should be available */
- size = get_size(max_order, PAGE_SIZE);
+ size = get_size(max_order, mm.chunk_size);
KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
size, size, &tmp, flags),
"buddy_alloc (realloc) hit -ENOMEM with order=%d\n",
@@ -678,15 +678,15 @@ static void drm_test_buddy_alloc_optimistic(struct kunit *test)
* try to allocate them all.
*/
- mm_size = PAGE_SIZE * ((1 << (max_order + 1)) - 1);
+ mm_size = SZ_4K * ((1 << (max_order + 1)) - 1);
- KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, PAGE_SIZE),
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, SZ_4K),
"buddy_init failed\n");
KUNIT_EXPECT_EQ(test, mm.max_order, max_order);
for (order = 0; order <= max_order; order++) {
- size = get_size(order, PAGE_SIZE);
+ size = get_size(order, mm.chunk_size);
KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
size, size, &tmp, flags),
"buddy_alloc hit -ENOMEM with order=%d\n",
@@ -699,7 +699,7 @@ static void drm_test_buddy_alloc_optimistic(struct kunit *test)
}
/* Should be completely full! */
- size = get_size(0, PAGE_SIZE);
+ size = get_size(0, mm.chunk_size);
KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
size, size, &tmp, flags),
"buddy_alloc unexpectedly succeeded, it should be full!");
@@ -716,7 +716,7 @@ static void drm_test_buddy_alloc_limit(struct kunit *test)
LIST_HEAD(allocated);
struct drm_buddy mm;
- KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, size, PAGE_SIZE));
+ KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, size, SZ_4K));
KUNIT_EXPECT_EQ_MSG(test, mm.max_order, DRM_BUDDY_MAX_ORDER,
"mm.max_order(%d) != %d\n", mm.max_order,
@@ -724,7 +724,7 @@ static void drm_test_buddy_alloc_limit(struct kunit *test)
size = mm.chunk_size << mm.max_order;
KUNIT_EXPECT_FALSE(test, drm_buddy_alloc_blocks(&mm, start, size, size,
- PAGE_SIZE, &allocated, flags));
+ mm.chunk_size, &allocated, flags));
block = list_first_entry_or_null(&allocated, struct drm_buddy_block, link);
KUNIT_EXPECT_TRUE(test, block);
@@ -734,10 +734,10 @@ static void drm_test_buddy_alloc_limit(struct kunit *test)
drm_buddy_block_order(block), mm.max_order);
KUNIT_EXPECT_EQ_MSG(test, drm_buddy_block_size(&mm, block),
- BIT_ULL(mm.max_order) * PAGE_SIZE,
+ BIT_ULL(mm.max_order) * mm.chunk_size,
"block size(%llu) != %llu\n",
drm_buddy_block_size(&mm, block),
- BIT_ULL(mm.max_order) * PAGE_SIZE);
+ BIT_ULL(mm.max_order) * mm.chunk_size);
drm_buddy_free_list(&mm, &allocated, 0);
drm_buddy_fini(&mm);
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 6396dece0db1..2427be8bc97f 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -346,6 +346,7 @@ static void ttm_bo_release(struct kref *kref)
if (!dma_resv_test_signaled(bo->base.resv,
DMA_RESV_USAGE_BOOKKEEP) ||
(want_init_on_free() && (bo->ttm != NULL)) ||
+ bo->type == ttm_bo_type_sg ||
!dma_resv_trylock(bo->base.resv)) {
/* The BO is not idle, resurrect it for delayed destroy */
ttm_bo_flush_all_fences(bo);
diff --git a/drivers/gpu/drm/vmwgfx/Kconfig b/drivers/gpu/drm/vmwgfx/Kconfig
index faddae3d6ac2..6f1ac940cbae 100644
--- a/drivers/gpu/drm/vmwgfx/Kconfig
+++ b/drivers/gpu/drm/vmwgfx/Kconfig
@@ -2,7 +2,7 @@
config DRM_VMWGFX
tristate "DRM driver for VMware Virtual GPU"
depends on DRM && PCI && MMU
- depends on X86 || ARM64
+ depends on (X86 && HYPERVISOR_GUEST) || ARM64
select DRM_TTM
select DRM_TTM_HELPER
select MAPPING_DIRTY_HELPERS
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 8f1730aeacc9..823d8d2da17c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -746,7 +746,7 @@ static int vmw_setup_pci_resources(struct vmw_private *dev,
dev->vram_size = pci_resource_len(pdev, 2);
drm_info(&dev->drm,
- "Register MMIO at 0x%pa size is %llu kiB\n",
+ "Register MMIO at 0x%pa size is %llu KiB\n",
&rmmio_start, (uint64_t)rmmio_size / 1024);
dev->rmmio = devm_ioremap(dev->drm.dev,
rmmio_start,
@@ -765,7 +765,7 @@ static int vmw_setup_pci_resources(struct vmw_private *dev,
fifo_size = pci_resource_len(pdev, 2);
drm_info(&dev->drm,
- "FIFO at %pa size is %llu kiB\n",
+ "FIFO at %pa size is %llu KiB\n",
&fifo_start, (uint64_t)fifo_size / 1024);
dev->fifo_mem = devm_memremap(dev->drm.dev,
fifo_start,
@@ -790,7 +790,7 @@ static int vmw_setup_pci_resources(struct vmw_private *dev,
* SVGA_REG_VRAM_SIZE.
*/
drm_info(&dev->drm,
- "VRAM at %pa size is %llu kiB\n",
+ "VRAM at %pa size is %llu KiB\n",
&dev->vram_start, (uint64_t)dev->vram_size / 1024);
return 0;
@@ -960,13 +960,6 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
vmw_read(dev_priv,
SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB);
- /*
- * Workaround for low memory 2D VMs to compensate for the
- * allocation taken by fbdev
- */
- if (!(dev_priv->capabilities & SVGA_CAP_3D))
- mem_size *= 3;
-
dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE;
dev_priv->max_primary_mem =
vmw_read(dev_priv, SVGA_REG_MAX_PRIMARY_MEM);
@@ -991,13 +984,13 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
dev_priv->max_primary_mem = dev_priv->vram_size;
}
drm_info(&dev_priv->drm,
- "Legacy memory limits: VRAM = %llu kB, FIFO = %llu kB, surface = %u kB\n",
+ "Legacy memory limits: VRAM = %llu KiB, FIFO = %llu KiB, surface = %u KiB\n",
(u64)dev_priv->vram_size / 1024,
(u64)dev_priv->fifo_mem_size / 1024,
dev_priv->memory_size / 1024);
drm_info(&dev_priv->drm,
- "MOB limits: max mob size = %u kB, max mob pages = %u\n",
+ "MOB limits: max mob size = %u KiB, max mob pages = %u\n",
dev_priv->max_mob_size / 1024, dev_priv->max_mob_pages);
ret = vmw_dma_masks(dev_priv);
@@ -1015,7 +1008,7 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
(unsigned)dev_priv->max_gmr_pages);
}
drm_info(&dev_priv->drm,
- "Maximum display memory size is %llu kiB\n",
+ "Maximum display memory size is %llu KiB\n",
(uint64_t)dev_priv->max_primary_mem / 1024);
/* Need mmio memory to check for fifo pitchlock cap. */
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 4ecaea0026fc..a1ce41e1c468 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -1043,9 +1043,6 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf,
int vmw_kms_write_svga(struct vmw_private *vmw_priv,
unsigned width, unsigned height, unsigned pitch,
unsigned bpp, unsigned depth);
-bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
- uint32_t pitch,
- uint32_t height);
int vmw_kms_present(struct vmw_private *dev_priv,
struct drm_file *file_priv,
struct vmw_framebuffer *vfb,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
index a0b47c9b33f5..5bd967fbcf55 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
@@ -94,14 +94,14 @@ static int vmw_gmrid_man_get_node(struct ttm_resource_manager *man,
} else
new_max_pages = gman->max_gmr_pages * 2;
if (new_max_pages > gman->max_gmr_pages && new_max_pages >= gman->used_gmr_pages) {
- DRM_WARN("vmwgfx: increasing guest mob limits to %u kB.\n",
+ DRM_WARN("vmwgfx: increasing guest mob limits to %u KiB.\n",
((new_max_pages) << (PAGE_SHIFT - 10)));
gman->max_gmr_pages = new_max_pages;
} else {
char buf[256];
snprintf(buf, sizeof(buf),
- "vmwgfx, error: guest graphics is out of memory (mob limit at: %ukB).\n",
+ "vmwgfx, error: guest graphics is out of memory (mob limit at: %u KiB).\n",
((gman->max_gmr_pages) << (PAGE_SHIFT - 10)));
vmw_host_printf(buf);
DRM_WARN("%s", buf);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 13b2820cae51..00c4ff684130 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -224,7 +224,7 @@ static bool vmw_du_cursor_plane_has_changed(struct vmw_plane_state *old_vps,
new_image = vmw_du_cursor_plane_acquire_image(new_vps);
changed = false;
- if (old_image && new_image)
+ if (old_image && new_image && old_image != new_image)
changed = memcmp(old_image, new_image, size) != 0;
return changed;
@@ -2171,13 +2171,12 @@ int vmw_kms_write_svga(struct vmw_private *vmw_priv,
return 0;
}
+static
bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
- uint32_t pitch,
- uint32_t height)
+ u64 pitch,
+ u64 height)
{
- return ((u64) pitch * (u64) height) < (u64)
- ((dev_priv->active_display_unit == vmw_du_screen_target) ?
- dev_priv->max_primary_mem : dev_priv->vram_size);
+ return (pitch * height) < (u64)dev_priv->vram_size;
}
/**
@@ -2873,25 +2872,18 @@ out_unref:
enum drm_mode_status vmw_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
+ enum drm_mode_status ret;
struct drm_device *dev = connector->dev;
struct vmw_private *dev_priv = vmw_priv(dev);
- u32 max_width = dev_priv->texture_max_width;
- u32 max_height = dev_priv->texture_max_height;
u32 assumed_cpp = 4;
if (dev_priv->assume_16bpp)
assumed_cpp = 2;
- if (dev_priv->active_display_unit == vmw_du_screen_target) {
- max_width = min(dev_priv->stdu_max_width, max_width);
- max_height = min(dev_priv->stdu_max_height, max_height);
- }
-
- if (max_width < mode->hdisplay)
- return MODE_BAD_HVALUE;
-
- if (max_height < mode->vdisplay)
- return MODE_BAD_VVALUE;
+ ret = drm_mode_validate_size(mode, dev_priv->texture_max_width,
+ dev_priv->texture_max_height);
+ if (ret != MODE_OK)
+ return ret;
if (!vmw_kms_validate_mode_vram(dev_priv,
mode->hdisplay * assumed_cpp,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
index 2651fe0ef518..1f15990d3934 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
@@ -48,8 +48,6 @@
#define RETRIES 3
-#define VMW_HYPERVISOR_MAGIC 0x564D5868
-
#define VMW_PORT_CMD_MSG 30
#define VMW_PORT_CMD_HB_MSG 0
#define VMW_PORT_CMD_OPEN_CHANNEL (MSG_TYPE_OPEN << 16 | VMW_PORT_CMD_MSG)
@@ -104,20 +102,18 @@ static const char* const mksstat_kern_name_desc[MKSSTAT_KERN_COUNT][2] =
*/
static int vmw_open_channel(struct rpc_channel *channel, unsigned int protocol)
{
- unsigned long eax, ebx, ecx, edx, si = 0, di = 0;
+ u32 ecx, edx, esi, edi;
- VMW_PORT(VMW_PORT_CMD_OPEN_CHANNEL,
- (protocol | GUESTMSG_FLAG_COOKIE), si, di,
- 0,
- VMW_HYPERVISOR_MAGIC,
- eax, ebx, ecx, edx, si, di);
+ vmware_hypercall6(VMW_PORT_CMD_OPEN_CHANNEL,
+ (protocol | GUESTMSG_FLAG_COOKIE), 0,
+ &ecx, &edx, &esi, &edi);
if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0)
return -EINVAL;
channel->channel_id = HIGH_WORD(edx);
- channel->cookie_high = si;
- channel->cookie_low = di;
+ channel->cookie_high = esi;
+ channel->cookie_low = edi;
return 0;
}
@@ -133,17 +129,13 @@ static int vmw_open_channel(struct rpc_channel *channel, unsigned int protocol)
*/
static int vmw_close_channel(struct rpc_channel *channel)
{
- unsigned long eax, ebx, ecx, edx, si, di;
-
- /* Set up additional parameters */
- si = channel->cookie_high;
- di = channel->cookie_low;
+ u32 ecx;
- VMW_PORT(VMW_PORT_CMD_CLOSE_CHANNEL,
- 0, si, di,
- channel->channel_id << 16,
- VMW_HYPERVISOR_MAGIC,
- eax, ebx, ecx, edx, si, di);
+ vmware_hypercall5(VMW_PORT_CMD_CLOSE_CHANNEL,
+ 0, channel->channel_id << 16,
+ channel->cookie_high,
+ channel->cookie_low,
+ &ecx);
if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0)
return -EINVAL;
@@ -163,24 +155,18 @@ static int vmw_close_channel(struct rpc_channel *channel)
static unsigned long vmw_port_hb_out(struct rpc_channel *channel,
const char *msg, bool hb)
{
- unsigned long si, di, eax, ebx, ecx, edx;
+ u32 ebx, ecx;
unsigned long msg_len = strlen(msg);
/* HB port can't access encrypted memory. */
if (hb && !cc_platform_has(CC_ATTR_MEM_ENCRYPT)) {
- unsigned long bp = channel->cookie_high;
- u32 channel_id = (channel->channel_id << 16);
-
- si = (uintptr_t) msg;
- di = channel->cookie_low;
-
- VMW_PORT_HB_OUT(
+ vmware_hypercall_hb_out(
(MESSAGE_STATUS_SUCCESS << 16) | VMW_PORT_CMD_HB_MSG,
- msg_len, si, di,
- VMWARE_HYPERVISOR_HB | channel_id |
- VMWARE_HYPERVISOR_OUT,
- VMW_HYPERVISOR_MAGIC, bp,
- eax, ebx, ecx, edx, si, di);
+ msg_len,
+ channel->channel_id << 16,
+ (uintptr_t) msg, channel->cookie_low,
+ channel->cookie_high,
+ &ebx);
return ebx;
}
@@ -194,14 +180,13 @@ static unsigned long vmw_port_hb_out(struct rpc_channel *channel,
memcpy(&word, msg, bytes);
msg_len -= bytes;
msg += bytes;
- si = channel->cookie_high;
- di = channel->cookie_low;
-
- VMW_PORT(VMW_PORT_CMD_MSG | (MSG_TYPE_SENDPAYLOAD << 16),
- word, si, di,
- channel->channel_id << 16,
- VMW_HYPERVISOR_MAGIC,
- eax, ebx, ecx, edx, si, di);
+
+ vmware_hypercall5(VMW_PORT_CMD_MSG |
+ (MSG_TYPE_SENDPAYLOAD << 16),
+ word, channel->channel_id << 16,
+ channel->cookie_high,
+ channel->cookie_low,
+ &ecx);
}
return ecx;
@@ -220,22 +205,17 @@ static unsigned long vmw_port_hb_out(struct rpc_channel *channel,
static unsigned long vmw_port_hb_in(struct rpc_channel *channel, char *reply,
unsigned long reply_len, bool hb)
{
- unsigned long si, di, eax, ebx, ecx, edx;
+ u32 ebx, ecx, edx;
/* HB port can't access encrypted memory */
if (hb && !cc_platform_has(CC_ATTR_MEM_ENCRYPT)) {
- unsigned long bp = channel->cookie_low;
- u32 channel_id = (channel->channel_id << 16);
-
- si = channel->cookie_high;
- di = (uintptr_t) reply;
-
- VMW_PORT_HB_IN(
+ vmware_hypercall_hb_in(
(MESSAGE_STATUS_SUCCESS << 16) | VMW_PORT_CMD_HB_MSG,
- reply_len, si, di,
- VMWARE_HYPERVISOR_HB | channel_id,
- VMW_HYPERVISOR_MAGIC, bp,
- eax, ebx, ecx, edx, si, di);
+ reply_len,
+ channel->channel_id << 16,
+ channel->cookie_high,
+ (uintptr_t) reply, channel->cookie_low,
+ &ebx);
return ebx;
}
@@ -245,14 +225,13 @@ static unsigned long vmw_port_hb_in(struct rpc_channel *channel, char *reply,
while (reply_len) {
unsigned int bytes = min_t(unsigned long, reply_len, 4);
- si = channel->cookie_high;
- di = channel->cookie_low;
-
- VMW_PORT(VMW_PORT_CMD_MSG | (MSG_TYPE_RECVPAYLOAD << 16),
- MESSAGE_STATUS_SUCCESS, si, di,
- channel->channel_id << 16,
- VMW_HYPERVISOR_MAGIC,
- eax, ebx, ecx, edx, si, di);
+ vmware_hypercall7(VMW_PORT_CMD_MSG |
+ (MSG_TYPE_RECVPAYLOAD << 16),
+ MESSAGE_STATUS_SUCCESS,
+ channel->channel_id << 16,
+ channel->cookie_high,
+ channel->cookie_low,
+ &ebx, &ecx, &edx);
if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0)
break;
@@ -276,22 +255,18 @@ static unsigned long vmw_port_hb_in(struct rpc_channel *channel, char *reply,
*/
static int vmw_send_msg(struct rpc_channel *channel, const char *msg)
{
- unsigned long eax, ebx, ecx, edx, si, di;
+ u32 ebx, ecx;
size_t msg_len = strlen(msg);
int retries = 0;
while (retries < RETRIES) {
retries++;
- /* Set up additional parameters */
- si = channel->cookie_high;
- di = channel->cookie_low;
-
- VMW_PORT(VMW_PORT_CMD_SENDSIZE,
- msg_len, si, di,
- channel->channel_id << 16,
- VMW_HYPERVISOR_MAGIC,
- eax, ebx, ecx, edx, si, di);
+ vmware_hypercall5(VMW_PORT_CMD_SENDSIZE,
+ msg_len, channel->channel_id << 16,
+ channel->cookie_high,
+ channel->cookie_low,
+ &ecx);
if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) {
/* Expected success. Give up. */
@@ -329,7 +304,7 @@ STACK_FRAME_NON_STANDARD(vmw_send_msg);
static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
size_t *msg_len)
{
- unsigned long eax, ebx, ecx, edx, si, di;
+ u32 ebx, ecx, edx;
char *reply;
size_t reply_len;
int retries = 0;
@@ -341,15 +316,11 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
while (retries < RETRIES) {
retries++;
- /* Set up additional parameters */
- si = channel->cookie_high;
- di = channel->cookie_low;
-
- VMW_PORT(VMW_PORT_CMD_RECVSIZE,
- 0, si, di,
- channel->channel_id << 16,
- VMW_HYPERVISOR_MAGIC,
- eax, ebx, ecx, edx, si, di);
+ vmware_hypercall7(VMW_PORT_CMD_RECVSIZE,
+ 0, channel->channel_id << 16,
+ channel->cookie_high,
+ channel->cookie_low,
+ &ebx, &ecx, &edx);
if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) {
DRM_ERROR("Failed to get reply size for host message.\n");
@@ -384,16 +355,12 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
reply[reply_len] = '\0';
-
- /* Ack buffer */
- si = channel->cookie_high;
- di = channel->cookie_low;
-
- VMW_PORT(VMW_PORT_CMD_RECVSTATUS,
- MESSAGE_STATUS_SUCCESS, si, di,
- channel->channel_id << 16,
- VMW_HYPERVISOR_MAGIC,
- eax, ebx, ecx, edx, si, di);
+ vmware_hypercall5(VMW_PORT_CMD_RECVSTATUS,
+ MESSAGE_STATUS_SUCCESS,
+ channel->channel_id << 16,
+ channel->cookie_high,
+ channel->cookie_low,
+ &ecx);
if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) {
kfree(reply);
@@ -652,13 +619,7 @@ static inline void reset_ppn_array(PPN64 *arr, size_t size)
*/
static inline void hypervisor_ppn_reset_all(void)
{
- unsigned long eax, ebx, ecx, edx, si = 0, di = 0;
-
- VMW_PORT(VMW_PORT_CMD_MKSGS_RESET,
- 0, si, di,
- 0,
- VMW_HYPERVISOR_MAGIC,
- eax, ebx, ecx, edx, si, di);
+ vmware_hypercall1(VMW_PORT_CMD_MKSGS_RESET, 0);
}
/**
@@ -669,13 +630,7 @@ static inline void hypervisor_ppn_reset_all(void)
*/
static inline void hypervisor_ppn_add(PPN64 pfn)
{
- unsigned long eax, ebx, ecx, edx, si = 0, di = 0;
-
- VMW_PORT(VMW_PORT_CMD_MKSGS_ADD_PPN,
- (unsigned long)pfn, si, di,
- 0,
- VMW_HYPERVISOR_MAGIC,
- eax, ebx, ecx, edx, si, di);
+ vmware_hypercall1(VMW_PORT_CMD_MKSGS_ADD_PPN, (unsigned long)pfn);
}
/**
@@ -686,13 +641,7 @@ static inline void hypervisor_ppn_add(PPN64 pfn)
*/
static inline void hypervisor_ppn_remove(PPN64 pfn)
{
- unsigned long eax, ebx, ecx, edx, si = 0, di = 0;
-
- VMW_PORT(VMW_PORT_CMD_MKSGS_REMOVE_PPN,
- (unsigned long)pfn, si, di,
- 0,
- VMW_HYPERVISOR_MAGIC,
- eax, ebx, ecx, edx, si, di);
+ vmware_hypercall1(VMW_PORT_CMD_MKSGS_REMOVE_PPN, (unsigned long)pfn);
}
#if IS_ENABLED(CONFIG_DRM_VMWGFX_MKSSTATS)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg_arm64.h b/drivers/gpu/drm/vmwgfx/vmwgfx_msg_arm64.h
index 4f40167ad61f..3c78e9338b54 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg_arm64.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg_arm64.h
@@ -34,6 +34,8 @@
#define VMWARE_HYPERVISOR_HB BIT(0)
#define VMWARE_HYPERVISOR_OUT BIT(1)
+#define VMWARE_HYPERVISOR_MAGIC 0x564D5868
+
#define X86_IO_MAGIC 0x86
#define X86_IO_W7_SIZE_SHIFT 0
@@ -45,86 +47,158 @@
#define X86_IO_W7_IMM_SHIFT 5
#define X86_IO_W7_IMM_MASK (0xff << X86_IO_W7_IMM_SHIFT)
-static inline void vmw_port(unsigned long cmd, unsigned long in_ebx,
- unsigned long in_si, unsigned long in_di,
- unsigned long flags, unsigned long magic,
- unsigned long *eax, unsigned long *ebx,
- unsigned long *ecx, unsigned long *edx,
- unsigned long *si, unsigned long *di)
+static inline
+unsigned long vmware_hypercall1(unsigned long cmd, unsigned long in1)
{
- register u64 x0 asm("x0") = magic;
- register u64 x1 asm("x1") = in_ebx;
+ register u64 x0 asm("x0") = VMWARE_HYPERVISOR_MAGIC;
+ register u64 x1 asm("x1") = in1;
register u64 x2 asm("x2") = cmd;
- register u64 x3 asm("x3") = flags | VMWARE_HYPERVISOR_PORT;
- register u64 x4 asm("x4") = in_si;
- register u64 x5 asm("x5") = in_di;
+ register u64 x3 asm("x3") = VMWARE_HYPERVISOR_PORT;
+ register u64 x7 asm("x7") = ((u64)X86_IO_MAGIC << 32) |
+ X86_IO_W7_WITH |
+ X86_IO_W7_DIR |
+ (2 << X86_IO_W7_SIZE_SHIFT);
+ asm_inline volatile (
+ "mrs xzr, mdccsr_el0; "
+ : "+r" (x0)
+ : "r" (x1), "r" (x2), "r" (x3), "r" (x7)
+ : "memory");
+
+ return x0;
+}
+
+static inline
+unsigned long vmware_hypercall5(unsigned long cmd, unsigned long in1,
+ unsigned long in3, unsigned long in4,
+ unsigned long in5, u32 *out2)
+{
+ register u64 x0 asm("x0") = VMWARE_HYPERVISOR_MAGIC;
+ register u64 x1 asm("x1") = in1;
+ register u64 x2 asm("x2") = cmd;
+ register u64 x3 asm("x3") = in3 | VMWARE_HYPERVISOR_PORT;
+ register u64 x4 asm("x4") = in4;
+ register u64 x5 asm("x5") = in5;
register u64 x7 asm("x7") = ((u64)X86_IO_MAGIC << 32) |
X86_IO_W7_WITH |
X86_IO_W7_DIR |
(2 << X86_IO_W7_SIZE_SHIFT);
- asm volatile("mrs xzr, mdccsr_el0 \n\t"
- : "+r"(x0), "+r"(x1), "+r"(x2),
- "+r"(x3), "+r"(x4), "+r"(x5)
- : "r"(x7)
- :);
- *eax = x0;
- *ebx = x1;
- *ecx = x2;
- *edx = x3;
- *si = x4;
- *di = x5;
+ asm_inline volatile (
+ "mrs xzr, mdccsr_el0; "
+ : "+r" (x0), "+r" (x2)
+ : "r" (x1), "r" (x3), "r" (x4), "r" (x5), "r" (x7)
+ : "memory");
+
+ *out2 = x2;
+ return x0;
}
-static inline void vmw_port_hb(unsigned long cmd, unsigned long in_ecx,
- unsigned long in_si, unsigned long in_di,
- unsigned long flags, unsigned long magic,
- unsigned long bp, u32 w7dir,
- unsigned long *eax, unsigned long *ebx,
- unsigned long *ecx, unsigned long *edx,
- unsigned long *si, unsigned long *di)
+static inline
+unsigned long vmware_hypercall6(unsigned long cmd, unsigned long in1,
+ unsigned long in3, u32 *out2,
+ u32 *out3, u32 *out4, u32 *out5)
{
- register u64 x0 asm("x0") = magic;
+ register u64 x0 asm("x0") = VMWARE_HYPERVISOR_MAGIC;
+ register u64 x1 asm("x1") = in1;
+ register u64 x2 asm("x2") = cmd;
+ register u64 x3 asm("x3") = in3 | VMWARE_HYPERVISOR_PORT;
+ register u64 x4 asm("x4");
+ register u64 x5 asm("x5");
+ register u64 x7 asm("x7") = ((u64)X86_IO_MAGIC << 32) |
+ X86_IO_W7_WITH |
+ X86_IO_W7_DIR |
+ (2 << X86_IO_W7_SIZE_SHIFT);
+
+ asm_inline volatile (
+ "mrs xzr, mdccsr_el0; "
+ : "+r" (x0), "+r" (x2), "+r" (x3), "=r" (x4), "=r" (x5)
+ : "r" (x1), "r" (x7)
+ : "memory");
+
+ *out2 = x2;
+ *out3 = x3;
+ *out4 = x4;
+ *out5 = x5;
+ return x0;
+}
+
+static inline
+unsigned long vmware_hypercall7(unsigned long cmd, unsigned long in1,
+ unsigned long in3, unsigned long in4,
+ unsigned long in5, u32 *out1,
+ u32 *out2, u32 *out3)
+{
+ register u64 x0 asm("x0") = VMWARE_HYPERVISOR_MAGIC;
+ register u64 x1 asm("x1") = in1;
+ register u64 x2 asm("x2") = cmd;
+ register u64 x3 asm("x3") = in3 | VMWARE_HYPERVISOR_PORT;
+ register u64 x4 asm("x4") = in4;
+ register u64 x5 asm("x5") = in5;
+ register u64 x7 asm("x7") = ((u64)X86_IO_MAGIC << 32) |
+ X86_IO_W7_WITH |
+ X86_IO_W7_DIR |
+ (2 << X86_IO_W7_SIZE_SHIFT);
+
+ asm_inline volatile (
+ "mrs xzr, mdccsr_el0; "
+ : "+r" (x0), "+r" (x1), "+r" (x2), "+r" (x3)
+ : "r" (x4), "r" (x5), "r" (x7)
+ : "memory");
+
+ *out1 = x1;
+ *out2 = x2;
+ *out3 = x3;
+ return x0;
+}
+
+static inline
+unsigned long vmware_hypercall_hb(unsigned long cmd, unsigned long in2,
+ unsigned long in3, unsigned long in4,
+ unsigned long in5, unsigned long in6,
+ u32 *out1, int dir)
+{
+ register u64 x0 asm("x0") = VMWARE_HYPERVISOR_MAGIC;
register u64 x1 asm("x1") = cmd;
- register u64 x2 asm("x2") = in_ecx;
- register u64 x3 asm("x3") = flags | VMWARE_HYPERVISOR_PORT_HB;
- register u64 x4 asm("x4") = in_si;
- register u64 x5 asm("x5") = in_di;
- register u64 x6 asm("x6") = bp;
+ register u64 x2 asm("x2") = in2;
+ register u64 x3 asm("x3") = in3 | VMWARE_HYPERVISOR_PORT_HB;
+ register u64 x4 asm("x4") = in4;
+ register u64 x5 asm("x5") = in5;
+ register u64 x6 asm("x6") = in6;
register u64 x7 asm("x7") = ((u64)X86_IO_MAGIC << 32) |
X86_IO_W7_STR |
X86_IO_W7_WITH |
- w7dir;
-
- asm volatile("mrs xzr, mdccsr_el0 \n\t"
- : "+r"(x0), "+r"(x1), "+r"(x2),
- "+r"(x3), "+r"(x4), "+r"(x5)
- : "r"(x6), "r"(x7)
- :);
- *eax = x0;
- *ebx = x1;
- *ecx = x2;
- *edx = x3;
- *si = x4;
- *di = x5;
-}
+ dir;
-#define VMW_PORT(cmd, in_ebx, in_si, in_di, flags, magic, eax, ebx, ecx, edx, \
- si, di) \
- vmw_port(cmd, in_ebx, in_si, in_di, flags, magic, &eax, &ebx, &ecx, \
- &edx, &si, &di)
+ asm_inline volatile (
+ "mrs xzr, mdccsr_el0; "
+ : "+r" (x0), "+r" (x1)
+ : "r" (x2), "r" (x3), "r" (x4), "r" (x5),
+ "r" (x6), "r" (x7)
+ : "memory");
-#define VMW_PORT_HB_OUT(cmd, in_ecx, in_si, in_di, flags, magic, bp, eax, ebx, \
- ecx, edx, si, di) \
- vmw_port_hb(cmd, in_ecx, in_si, in_di, flags, magic, bp, \
- 0, &eax, &ebx, &ecx, &edx, &si, &di)
+ *out1 = x1;
+ return x0;
+}
-#define VMW_PORT_HB_IN(cmd, in_ecx, in_si, in_di, flags, magic, bp, eax, ebx, \
- ecx, edx, si, di) \
- vmw_port_hb(cmd, in_ecx, in_si, in_di, flags, magic, bp, \
- X86_IO_W7_DIR, &eax, &ebx, &ecx, &edx, &si, &di)
+static inline
+unsigned long vmware_hypercall_hb_out(unsigned long cmd, unsigned long in2,
+ unsigned long in3, unsigned long in4,
+ unsigned long in5, unsigned long in6,
+ u32 *out1)
+{
+ return vmware_hypercall_hb(cmd, in2, in3, in4, in5, in6, out1, 0);
+}
+static inline
+unsigned long vmware_hypercall_hb_in(unsigned long cmd, unsigned long in2,
+ unsigned long in3, unsigned long in4,
+ unsigned long in5, unsigned long in6,
+ u32 *out1)
+{
+ return vmware_hypercall_hb(cmd, in2, in3, in4, in5, in6, out1,
+ X86_IO_W7_DIR);
+}
#endif
#endif /* _VMWGFX_MSG_ARM64_H */
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg_x86.h b/drivers/gpu/drm/vmwgfx/vmwgfx_msg_x86.h
index 23899d743a90..13304d34cc6e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg_x86.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg_x86.h
@@ -37,191 +37,6 @@
#include <asm/vmware.h>
-/**
- * Hypervisor-specific bi-directional communication channel. Should never
- * execute on bare metal hardware. The caller must make sure to check for
- * supported hypervisor before using these macros.
- *
- * The last two parameters are both input and output and must be initialized.
- *
- * @cmd: [IN] Message Cmd
- * @in_ebx: [IN] Message Len, through EBX
- * @in_si: [IN] Input argument through SI, set to 0 if not used
- * @in_di: [IN] Input argument through DI, set ot 0 if not used
- * @flags: [IN] hypercall flags + [channel id]
- * @magic: [IN] hypervisor magic value
- * @eax: [OUT] value of EAX register
- * @ebx: [OUT] e.g. status from an HB message status command
- * @ecx: [OUT] e.g. status from a non-HB message status command
- * @edx: [OUT] e.g. channel id
- * @si: [OUT]
- * @di: [OUT]
- */
-#define VMW_PORT(cmd, in_ebx, in_si, in_di, \
- flags, magic, \
- eax, ebx, ecx, edx, si, di) \
-({ \
- asm volatile (VMWARE_HYPERCALL : \
- "=a"(eax), \
- "=b"(ebx), \
- "=c"(ecx), \
- "=d"(edx), \
- "=S"(si), \
- "=D"(di) : \
- "a"(magic), \
- "b"(in_ebx), \
- "c"(cmd), \
- "d"(flags), \
- "S"(in_si), \
- "D"(in_di) : \
- "memory"); \
-})
-
-
-/**
- * Hypervisor-specific bi-directional communication channel. Should never
- * execute on bare metal hardware. The caller must make sure to check for
- * supported hypervisor before using these macros.
- *
- * The last 3 parameters are both input and output and must be initialized.
- *
- * @cmd: [IN] Message Cmd
- * @in_ecx: [IN] Message Len, through ECX
- * @in_si: [IN] Input argument through SI, set to 0 if not used
- * @in_di: [IN] Input argument through DI, set to 0 if not used
- * @flags: [IN] hypercall flags + [channel id]
- * @magic: [IN] hypervisor magic value
- * @bp: [IN]
- * @eax: [OUT] value of EAX register
- * @ebx: [OUT] e.g. status from an HB message status command
- * @ecx: [OUT] e.g. status from a non-HB message status command
- * @edx: [OUT] e.g. channel id
- * @si: [OUT]
- * @di: [OUT]
- */
-#ifdef __x86_64__
-
-#define VMW_PORT_HB_OUT(cmd, in_ecx, in_si, in_di, \
- flags, magic, bp, \
- eax, ebx, ecx, edx, si, di) \
-({ \
- asm volatile ( \
- UNWIND_HINT_SAVE \
- "push %%rbp;" \
- UNWIND_HINT_UNDEFINED \
- "mov %12, %%rbp;" \
- VMWARE_HYPERCALL_HB_OUT \
- "pop %%rbp;" \
- UNWIND_HINT_RESTORE : \
- "=a"(eax), \
- "=b"(ebx), \
- "=c"(ecx), \
- "=d"(edx), \
- "=S"(si), \
- "=D"(di) : \
- "a"(magic), \
- "b"(cmd), \
- "c"(in_ecx), \
- "d"(flags), \
- "S"(in_si), \
- "D"(in_di), \
- "r"(bp) : \
- "memory", "cc"); \
-})
-
-
-#define VMW_PORT_HB_IN(cmd, in_ecx, in_si, in_di, \
- flags, magic, bp, \
- eax, ebx, ecx, edx, si, di) \
-({ \
- asm volatile ( \
- UNWIND_HINT_SAVE \
- "push %%rbp;" \
- UNWIND_HINT_UNDEFINED \
- "mov %12, %%rbp;" \
- VMWARE_HYPERCALL_HB_IN \
- "pop %%rbp;" \
- UNWIND_HINT_RESTORE : \
- "=a"(eax), \
- "=b"(ebx), \
- "=c"(ecx), \
- "=d"(edx), \
- "=S"(si), \
- "=D"(di) : \
- "a"(magic), \
- "b"(cmd), \
- "c"(in_ecx), \
- "d"(flags), \
- "S"(in_si), \
- "D"(in_di), \
- "r"(bp) : \
- "memory", "cc"); \
-})
-
-#elif defined(__i386__)
-
-/*
- * In the 32-bit version of this macro, we store bp in a memory location
- * because we've ran out of registers.
- * Now we can't reference that memory location while we've modified
- * %esp or %ebp, so we first push it on the stack, just before we push
- * %ebp, and then when we need it we read it from the stack where we
- * just pushed it.
- */
-#define VMW_PORT_HB_OUT(cmd, in_ecx, in_si, in_di, \
- flags, magic, bp, \
- eax, ebx, ecx, edx, si, di) \
-({ \
- asm volatile ("push %12;" \
- "push %%ebp;" \
- "mov 0x04(%%esp), %%ebp;" \
- VMWARE_HYPERCALL_HB_OUT \
- "pop %%ebp;" \
- "add $0x04, %%esp;" : \
- "=a"(eax), \
- "=b"(ebx), \
- "=c"(ecx), \
- "=d"(edx), \
- "=S"(si), \
- "=D"(di) : \
- "a"(magic), \
- "b"(cmd), \
- "c"(in_ecx), \
- "d"(flags), \
- "S"(in_si), \
- "D"(in_di), \
- "m"(bp) : \
- "memory", "cc"); \
-})
-
-
-#define VMW_PORT_HB_IN(cmd, in_ecx, in_si, in_di, \
- flags, magic, bp, \
- eax, ebx, ecx, edx, si, di) \
-({ \
- asm volatile ("push %12;" \
- "push %%ebp;" \
- "mov 0x04(%%esp), %%ebp;" \
- VMWARE_HYPERCALL_HB_IN \
- "pop %%ebp;" \
- "add $0x04, %%esp;" : \
- "=a"(eax), \
- "=b"(ebx), \
- "=c"(ecx), \
- "=d"(edx), \
- "=S"(si), \
- "=D"(di) : \
- "a"(magic), \
- "b"(cmd), \
- "c"(in_ecx), \
- "d"(flags), \
- "S"(in_si), \
- "D"(in_di), \
- "m"(bp) : \
- "memory", "cc"); \
-})
-#endif /* defined(__i386__) */
-
#endif /* defined(__i386__) || defined(__x86_64__) */
#endif /* _VMWGFX_MSG_X86_H */
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index 2041c4d48daa..a04e0736318d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -43,7 +43,14 @@
#define vmw_connector_to_stdu(x) \
container_of(x, struct vmw_screen_target_display_unit, base.connector)
-
+/*
+ * Some renderers such as llvmpipe will align the width and height of their
+ * buffers to match their tile size. We need to keep this in mind when exposing
+ * modes to userspace so that this possible over-allocation will not exceed
+ * graphics memory. 64x64 pixels seems to be a reasonable upper bound for the
+ * tile size of current renderers.
+ */
+#define GPU_TILE_SIZE 64
enum stdu_content_type {
SAME_AS_DISPLAY = 0,
@@ -85,11 +92,6 @@ struct vmw_stdu_update {
SVGA3dCmdUpdateGBScreenTarget body;
};
-struct vmw_stdu_dma {
- SVGA3dCmdHeader header;
- SVGA3dCmdSurfaceDMA body;
-};
-
struct vmw_stdu_surface_copy {
SVGA3dCmdHeader header;
SVGA3dCmdSurfaceCopy body;
@@ -414,6 +416,7 @@ static void vmw_stdu_crtc_atomic_disable(struct drm_crtc *crtc,
{
struct vmw_private *dev_priv;
struct vmw_screen_target_display_unit *stdu;
+ struct drm_crtc_state *new_crtc_state;
int ret;
if (!crtc) {
@@ -423,6 +426,7 @@ static void vmw_stdu_crtc_atomic_disable(struct drm_crtc *crtc,
stdu = vmw_crtc_to_stdu(crtc);
dev_priv = vmw_priv(crtc->dev);
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
if (dev_priv->vkms_enabled)
drm_crtc_vblank_off(crtc);
@@ -434,6 +438,14 @@ static void vmw_stdu_crtc_atomic_disable(struct drm_crtc *crtc,
(void) vmw_stdu_update_st(dev_priv, stdu);
+ /* Don't destroy the Screen Target if we are only setting the
+ * display as inactive
+ */
+ if (new_crtc_state->enable &&
+ !new_crtc_state->active &&
+ !new_crtc_state->mode_changed)
+ return;
+
ret = vmw_stdu_destroy_st(dev_priv, stdu);
if (ret)
DRM_ERROR("Failed to destroy Screen Target\n");
@@ -829,7 +841,41 @@ static void vmw_stdu_connector_destroy(struct drm_connector *connector)
vmw_stdu_destroy(vmw_connector_to_stdu(connector));
}
+static enum drm_mode_status
+vmw_stdu_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ enum drm_mode_status ret;
+ struct drm_device *dev = connector->dev;
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ u64 assumed_cpp = dev_priv->assume_16bpp ? 2 : 4;
+ /* Align width and height to account for GPU tile over-alignment */
+ u64 required_mem = ALIGN(mode->hdisplay, GPU_TILE_SIZE) *
+ ALIGN(mode->vdisplay, GPU_TILE_SIZE) *
+ assumed_cpp;
+ required_mem = ALIGN(required_mem, PAGE_SIZE);
+
+ ret = drm_mode_validate_size(mode, dev_priv->stdu_max_width,
+ dev_priv->stdu_max_height);
+ if (ret != MODE_OK)
+ return ret;
+
+ ret = drm_mode_validate_size(mode, dev_priv->texture_max_width,
+ dev_priv->texture_max_height);
+ if (ret != MODE_OK)
+ return ret;
+ if (required_mem > dev_priv->max_primary_mem)
+ return MODE_MEM;
+
+ if (required_mem > dev_priv->max_mob_pages * PAGE_SIZE)
+ return MODE_MEM;
+
+ if (required_mem > dev_priv->max_mob_size)
+ return MODE_MEM;
+
+ return MODE_OK;
+}
static const struct drm_connector_funcs vmw_stdu_connector_funcs = {
.dpms = vmw_du_connector_dpms,
@@ -845,7 +891,7 @@ static const struct drm_connector_funcs vmw_stdu_connector_funcs = {
static const struct
drm_connector_helper_funcs vmw_stdu_connector_helper_funcs = {
.get_modes = vmw_connector_get_modes,
- .mode_valid = vmw_connector_mode_valid
+ .mode_valid = vmw_stdu_connector_mode_valid
};
diff --git a/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c b/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c
index d46f87a039f2..b3d3c065dd9d 100644
--- a/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c
+++ b/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c
@@ -159,12 +159,16 @@ void intel_hdcp_gsc_fini(struct xe_device *xe)
{
struct intel_hdcp_gsc_message *hdcp_message =
xe->display.hdcp.hdcp_message;
+ struct i915_hdcp_arbiter *arb = xe->display.hdcp.arbiter;
- if (!hdcp_message)
- return;
+ if (hdcp_message) {
+ xe_bo_unpin_map_no_vm(hdcp_message->hdcp_bo);
+ kfree(hdcp_message);
+ xe->display.hdcp.hdcp_message = NULL;
+ }
- xe_bo_unpin_map_no_vm(hdcp_message->hdcp_bo);
- kfree(hdcp_message);
+ kfree(arb);
+ xe->display.hdcp.arbiter = NULL;
}
static int xe_gsc_send_sync(struct xe_device *xe,
diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c
index bc1f794e3e61..b6f3a43d637f 100644
--- a/drivers/gpu/drm/xe/xe_bo.c
+++ b/drivers/gpu/drm/xe/xe_bo.c
@@ -317,7 +317,7 @@ static struct ttm_tt *xe_ttm_tt_create(struct ttm_buffer_object *ttm_bo,
struct xe_device *xe = xe_bo_device(bo);
struct xe_ttm_tt *tt;
unsigned long extra_pages;
- enum ttm_caching caching;
+ enum ttm_caching caching = ttm_cached;
int err;
tt = kzalloc(sizeof(*tt), GFP_KERNEL);
@@ -331,26 +331,35 @@ static struct ttm_tt *xe_ttm_tt_create(struct ttm_buffer_object *ttm_bo,
extra_pages = DIV_ROUND_UP(xe_device_ccs_bytes(xe, bo->size),
PAGE_SIZE);
- switch (bo->cpu_caching) {
- case DRM_XE_GEM_CPU_CACHING_WC:
- caching = ttm_write_combined;
- break;
- default:
- caching = ttm_cached;
- break;
- }
-
- WARN_ON((bo->flags & XE_BO_FLAG_USER) && !bo->cpu_caching);
-
/*
- * Display scanout is always non-coherent with the CPU cache.
- *
- * For Xe_LPG and beyond, PPGTT PTE lookups are also non-coherent and
- * require a CPU:WC mapping.
+ * DGFX system memory is always WB / ttm_cached, since
+ * other caching modes are only supported on x86. DGFX
+ * GPU system memory accesses are always coherent with the
+ * CPU.
*/
- if ((!bo->cpu_caching && bo->flags & XE_BO_FLAG_SCANOUT) ||
- (xe->info.graphics_verx100 >= 1270 && bo->flags & XE_BO_FLAG_PAGETABLE))
- caching = ttm_write_combined;
+ if (!IS_DGFX(xe)) {
+ switch (bo->cpu_caching) {
+ case DRM_XE_GEM_CPU_CACHING_WC:
+ caching = ttm_write_combined;
+ break;
+ default:
+ caching = ttm_cached;
+ break;
+ }
+
+ WARN_ON((bo->flags & XE_BO_FLAG_USER) && !bo->cpu_caching);
+
+ /*
+ * Display scanout is always non-coherent with the CPU cache.
+ *
+ * For Xe_LPG and beyond, PPGTT PTE lookups are also
+ * non-coherent and require a CPU:WC mapping.
+ */
+ if ((!bo->cpu_caching && bo->flags & XE_BO_FLAG_SCANOUT) ||
+ (xe->info.graphics_verx100 >= 1270 &&
+ bo->flags & XE_BO_FLAG_PAGETABLE))
+ caching = ttm_write_combined;
+ }
err = ttm_tt_init(&tt->ttm, &bo->ttm, page_flags, caching, extra_pages);
if (err) {
diff --git a/drivers/gpu/drm/xe/xe_bo_types.h b/drivers/gpu/drm/xe/xe_bo_types.h
index 86422e113d39..10450f1fbbde 100644
--- a/drivers/gpu/drm/xe/xe_bo_types.h
+++ b/drivers/gpu/drm/xe/xe_bo_types.h
@@ -66,7 +66,8 @@ struct xe_bo {
/**
* @cpu_caching: CPU caching mode. Currently only used for userspace
- * objects.
+ * objects. Exceptions are system memory on DGFX, which is always
+ * WB.
*/
u16 cpu_caching;
diff --git a/drivers/gpu/drm/xe/xe_gt_idle.c b/drivers/gpu/drm/xe/xe_gt_idle.c
index 8fc0f3f6ecc5..944770fb2daf 100644
--- a/drivers/gpu/drm/xe/xe_gt_idle.c
+++ b/drivers/gpu/drm/xe/xe_gt_idle.c
@@ -147,6 +147,13 @@ static const struct attribute *gt_idle_attrs[] = {
static void gt_idle_sysfs_fini(struct drm_device *drm, void *arg)
{
struct kobject *kobj = arg;
+ struct xe_gt *gt = kobj_to_gt(kobj->parent);
+
+ if (gt_to_xe(gt)->info.skip_guc_pc) {
+ XE_WARN_ON(xe_force_wake_get(gt_to_fw(gt), XE_FW_GT));
+ xe_gt_idle_disable_c6(gt);
+ xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
+ }
sysfs_remove_files(kobj, gt_idle_attrs);
kobject_put(kobj);
@@ -199,7 +206,7 @@ void xe_gt_idle_enable_c6(struct xe_gt *gt)
void xe_gt_idle_disable_c6(struct xe_gt *gt)
{
xe_device_assert_mem_access(gt_to_xe(gt));
- xe_force_wake_assert_held(gt_to_fw(gt), XE_FORCEWAKE_ALL);
+ xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
xe_mmio_write32(gt, PG_ENABLE, 0);
xe_mmio_write32(gt, RC_CONTROL, 0);
diff --git a/drivers/gpu/drm/xe/xe_gt_mcr.c b/drivers/gpu/drm/xe/xe_gt_mcr.c
index 577bd7043740..0443e07880a0 100644
--- a/drivers/gpu/drm/xe/xe_gt_mcr.c
+++ b/drivers/gpu/drm/xe/xe_gt_mcr.c
@@ -342,7 +342,7 @@ static void init_steering_oaddrm(struct xe_gt *gt)
else
gt->steering[OADDRM].group_target = 1;
- gt->steering[DSS].instance_target = 0; /* unused */
+ gt->steering[OADDRM].instance_target = 0; /* unused */
}
static void init_steering_sqidi_psmi(struct xe_gt *gt)
@@ -357,8 +357,8 @@ static void init_steering_sqidi_psmi(struct xe_gt *gt)
static void init_steering_inst0(struct xe_gt *gt)
{
- gt->steering[DSS].group_target = 0; /* unused */
- gt->steering[DSS].instance_target = 0; /* unused */
+ gt->steering[INSTANCE0].group_target = 0; /* unused */
+ gt->steering[INSTANCE0].instance_target = 0; /* unused */
}
static const struct {
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
index 79116ad58620..6c2cfc54442c 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
@@ -1274,6 +1274,9 @@ static void pf_reset_vf_lmtt(struct xe_device *xe, unsigned int vfid)
struct xe_tile *tile;
unsigned int tid;
+ xe_assert(xe, IS_DGFX(xe));
+ xe_assert(xe, IS_SRIOV_PF(xe));
+
for_each_tile(tile, xe, tid) {
lmtt = &tile->sriov.pf.lmtt;
xe_lmtt_drop_pages(lmtt, vfid);
@@ -1292,6 +1295,9 @@ static int pf_update_vf_lmtt(struct xe_device *xe, unsigned int vfid)
unsigned int tid;
int err;
+ xe_assert(xe, IS_DGFX(xe));
+ xe_assert(xe, IS_SRIOV_PF(xe));
+
total = 0;
for_each_tile(tile, xe, tid)
total += pf_get_vf_config_lmem(tile->primary_gt, vfid);
@@ -1337,6 +1343,7 @@ fail:
static void pf_release_vf_config_lmem(struct xe_gt *gt, struct xe_gt_sriov_config *config)
{
+ xe_gt_assert(gt, IS_DGFX(gt_to_xe(gt)));
xe_gt_assert(gt, !xe_gt_is_media_type(gt));
lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
@@ -1355,6 +1362,7 @@ static int pf_provision_vf_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
int err;
xe_gt_assert(gt, vfid);
+ xe_gt_assert(gt, IS_DGFX(xe));
xe_gt_assert(gt, !xe_gt_is_media_type(gt));
size = round_up(size, pf_get_lmem_alignment(gt));
@@ -1745,10 +1753,14 @@ static void pf_reset_config_sched(struct xe_gt *gt, struct xe_gt_sriov_config *c
static void pf_release_vf_config(struct xe_gt *gt, unsigned int vfid)
{
struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
+ struct xe_device *xe = gt_to_xe(gt);
if (!xe_gt_is_media_type(gt)) {
pf_release_vf_config_ggtt(gt, config);
- pf_release_vf_config_lmem(gt, config);
+ if (IS_DGFX(xe)) {
+ pf_release_vf_config_lmem(gt, config);
+ pf_update_vf_lmtt(xe, vfid);
+ }
}
pf_release_config_ctxs(gt, config);
pf_release_config_dbs(gt, config);
diff --git a/drivers/gpu/drm/xe/xe_guc.c b/drivers/gpu/drm/xe/xe_guc.c
index 240e7a4bbff1..5faca4fc2fef 100644
--- a/drivers/gpu/drm/xe/xe_guc.c
+++ b/drivers/gpu/drm/xe/xe_guc.c
@@ -631,8 +631,6 @@ int xe_guc_enable_communication(struct xe_guc *guc)
struct xe_device *xe = guc_to_xe(guc);
int err;
- guc_enable_irq(guc);
-
if (IS_SRIOV_VF(xe) && xe_device_has_memirq(xe)) {
struct xe_gt *gt = guc_to_gt(guc);
struct xe_tile *tile = gt_to_tile(gt);
@@ -640,6 +638,8 @@ int xe_guc_enable_communication(struct xe_guc *guc)
err = xe_memirq_init_guc(&tile->sriov.vf.memirq, guc);
if (err)
return err;
+ } else {
+ guc_enable_irq(guc);
}
xe_mmio_rmw32(guc_to_gt(guc), PMINTRMSK,
diff --git a/drivers/gpu/drm/xe/xe_guc_pc.c b/drivers/gpu/drm/xe/xe_guc_pc.c
index 509649d0e65e..23382ced4ea7 100644
--- a/drivers/gpu/drm/xe/xe_guc_pc.c
+++ b/drivers/gpu/drm/xe/xe_guc_pc.c
@@ -895,12 +895,6 @@ int xe_guc_pc_stop(struct xe_guc_pc *pc)
static void xe_guc_pc_fini(struct drm_device *drm, void *arg)
{
struct xe_guc_pc *pc = arg;
- struct xe_device *xe = pc_to_xe(pc);
-
- if (xe->info.skip_guc_pc) {
- xe_gt_idle_disable_c6(pc_to_gt(pc));
- return;
- }
XE_WARN_ON(xe_force_wake_get(gt_to_fw(pc_to_gt(pc)), XE_FORCEWAKE_ALL));
XE_WARN_ON(xe_guc_pc_gucrc_disable(pc));
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
index c7d38469fb46..e4e3658e6a13 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.c
+++ b/drivers/gpu/drm/xe/xe_guc_submit.c
@@ -1240,6 +1240,7 @@ static int guc_exec_queue_init(struct xe_exec_queue *q)
return 0;
err_entity:
+ mutex_unlock(&guc->submission_state.lock);
xe_sched_entity_fini(&ge->entity);
err_sched:
xe_sched_fini(&ge->sched);
diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c
index 9f6e9b7f11c8..198f5c2189cb 100644
--- a/drivers/gpu/drm/xe/xe_migrate.c
+++ b/drivers/gpu/drm/xe/xe_migrate.c
@@ -34,7 +34,6 @@
#include "xe_sync.h"
#include "xe_trace.h"
#include "xe_vm.h"
-#include "xe_wa.h"
/**
* struct xe_migrate - migrate context.
@@ -300,10 +299,6 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
}
/*
- * Due to workaround 16017236439, odd instance hardware copy engines are
- * faster than even instance ones.
- * This function returns the mask involving all fast copy engines and the
- * reserved copy engine to be used as logical mask for migrate engine.
* Including the reserved copy engine is required to avoid deadlocks due to
* migrate jobs servicing the faults gets stuck behind the job that faulted.
*/
@@ -317,8 +312,7 @@ static u32 xe_migrate_usm_logical_mask(struct xe_gt *gt)
if (hwe->class != XE_ENGINE_CLASS_COPY)
continue;
- if (!XE_WA(gt, 16017236439) ||
- xe_gt_is_usm_hwe(gt, hwe) || hwe->instance & 1)
+ if (xe_gt_is_usm_hwe(gt, hwe))
logical_mask |= BIT(hwe->logical_instance);
}
@@ -369,6 +363,10 @@ struct xe_migrate *xe_migrate_init(struct xe_tile *tile)
if (!hwe || !logical_mask)
return ERR_PTR(-EINVAL);
+ /*
+ * XXX: Currently only reserving 1 (likely slow) BCS instance on
+ * PVC, may want to revisit if performance is needed.
+ */
m->q = xe_exec_queue_create(xe, vm, logical_mask, 1, hwe,
EXEC_QUEUE_FLAG_KERNEL |
EXEC_QUEUE_FLAG_PERMANENT |
@@ -1336,7 +1334,7 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
GFP_KERNEL, true, 0);
if (IS_ERR(sa_bo)) {
err = PTR_ERR(sa_bo);
- goto err;
+ goto err_bb;
}
ppgtt_ofs = NUM_KERNEL_PDE +
@@ -1387,7 +1385,7 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
update_idx);
if (IS_ERR(job)) {
err = PTR_ERR(job);
- goto err_bb;
+ goto err_sa;
}
/* Wait on BO move */
@@ -1436,12 +1434,12 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
err_job:
xe_sched_job_put(job);
+err_sa:
+ drm_suballoc_free(sa_bo, NULL);
err_bb:
if (!q)
mutex_unlock(&m->job_mutex);
xe_bb_free(bb, NULL);
-err:
- drm_suballoc_free(sa_bo, NULL);
return ERR_PTR(err);
}
diff --git a/drivers/gpu/drm/xe/xe_pcode.c b/drivers/gpu/drm/xe/xe_pcode.c
index c010ef16fbf5..a5e7da8cf944 100644
--- a/drivers/gpu/drm/xe/xe_pcode.c
+++ b/drivers/gpu/drm/xe/xe_pcode.c
@@ -191,7 +191,7 @@ int xe_pcode_request(struct xe_gt *gt, u32 mbox, u32 request,
drm_WARN_ON_ONCE(&gt_to_xe(gt)->drm, timeout_base_ms > 1);
preempt_disable();
ret = pcode_try_request(gt, mbox, request, reply_mask, reply, &status,
- true, timeout_base_ms * 1000, true);
+ true, 50 * 1000, true);
preempt_enable();
out:
diff --git a/drivers/gpu/drm/xe/xe_ring_ops.c b/drivers/gpu/drm/xe/xe_ring_ops.c
index d42b3f33bd7a..aca7a9af6e84 100644
--- a/drivers/gpu/drm/xe/xe_ring_ops.c
+++ b/drivers/gpu/drm/xe/xe_ring_ops.c
@@ -80,6 +80,16 @@ static int emit_store_imm_ggtt(u32 addr, u32 value, u32 *dw, int i)
return i;
}
+static int emit_flush_dw(u32 *dw, int i)
+{
+ dw[i++] = MI_FLUSH_DW | MI_FLUSH_IMM_DW;
+ dw[i++] = 0;
+ dw[i++] = 0;
+ dw[i++] = 0;
+
+ return i;
+}
+
static int emit_flush_imm_ggtt(u32 addr, u32 value, bool invalidate_tlb,
u32 *dw, int i)
{
@@ -234,10 +244,12 @@ static void __emit_job_gen12_simple(struct xe_sched_job *job, struct xe_lrc *lrc
i = emit_bb_start(batch_addr, ppgtt_flag, dw, i);
- if (job->user_fence.used)
+ if (job->user_fence.used) {
+ i = emit_flush_dw(dw, i);
i = emit_store_imm_ppgtt_posted(job->user_fence.addr,
job->user_fence.value,
dw, i);
+ }
i = emit_flush_imm_ggtt(xe_lrc_seqno_ggtt_addr(lrc), seqno, false, dw, i);
@@ -293,10 +305,12 @@ static void __emit_job_gen12_video(struct xe_sched_job *job, struct xe_lrc *lrc,
i = emit_bb_start(batch_addr, ppgtt_flag, dw, i);
- if (job->user_fence.used)
+ if (job->user_fence.used) {
+ i = emit_flush_dw(dw, i);
i = emit_store_imm_ppgtt_posted(job->user_fence.addr,
job->user_fence.value,
dw, i);
+ }
i = emit_flush_imm_ggtt(xe_lrc_seqno_ggtt_addr(lrc), seqno, false, dw, i);
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index ce71b53ea6c5..e40f1ddebbb7 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -154,10 +154,8 @@ obj-$(CONFIG_HID_WINWING) += hid-winwing.o
obj-$(CONFIG_HID_SENSOR_HUB) += hid-sensor-hub.o
obj-$(CONFIG_HID_SENSOR_CUSTOM_SENSOR) += hid-sensor-custom.o
-hid-uclogic-test-objs := hid-uclogic-rdesc.o \
- hid-uclogic-params.o \
- hid-uclogic-rdesc-test.o
-obj-$(CONFIG_HID_KUNIT_TEST) += hid-uclogic-test.o
+hid-uclogic-test-objs := hid-uclogic-rdesc-test.o
+obj-$(CONFIG_HID_KUNIT_TEST) += hid-uclogic.o hid-uclogic-test.o
obj-$(CONFIG_USB_HID) += usbhid/
obj-$(CONFIG_USB_MOUSE) += usbhid/
diff --git a/drivers/hid/bpf/Makefile b/drivers/hid/bpf/Makefile
index cf55120cf7d6..d1f2b81788ca 100644
--- a/drivers/hid/bpf/Makefile
+++ b/drivers/hid/bpf/Makefile
@@ -8,4 +8,4 @@ LIBBPF_INCLUDE = $(srctree)/tools/lib
obj-$(CONFIG_HID_BPF) += hid_bpf.o
CFLAGS_hid_bpf_dispatch.o += -I$(LIBBPF_INCLUDE)
CFLAGS_hid_bpf_jmp_table.o += -I$(LIBBPF_INCLUDE)
-hid_bpf-objs += hid_bpf_dispatch.o hid_bpf_jmp_table.o
+hid_bpf-objs += hid_bpf_dispatch.o hid_bpf_struct_ops.o
diff --git a/drivers/hid/bpf/entrypoints/Makefile b/drivers/hid/bpf/entrypoints/Makefile
deleted file mode 100644
index 43b99b5575cf..000000000000
--- a/drivers/hid/bpf/entrypoints/Makefile
+++ /dev/null
@@ -1,93 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-OUTPUT := .output
-abs_out := $(abspath $(OUTPUT))
-
-CLANG ?= clang
-LLC ?= llc
-LLVM_STRIP ?= llvm-strip
-
-TOOLS_PATH := $(abspath ../../../../tools)
-BPFTOOL_SRC := $(TOOLS_PATH)/bpf/bpftool
-BPFTOOL_OUTPUT := $(abs_out)/bpftool
-DEFAULT_BPFTOOL := $(BPFTOOL_OUTPUT)/bootstrap/bpftool
-BPFTOOL ?= $(DEFAULT_BPFTOOL)
-
-LIBBPF_SRC := $(TOOLS_PATH)/lib/bpf
-LIBBPF_OUTPUT := $(abs_out)/libbpf
-LIBBPF_DESTDIR := $(LIBBPF_OUTPUT)
-LIBBPF_INCLUDE := $(LIBBPF_DESTDIR)/include
-BPFOBJ := $(LIBBPF_OUTPUT)/libbpf.a
-
-INCLUDES := -I$(OUTPUT) -I$(LIBBPF_INCLUDE) -I$(TOOLS_PATH)/include/uapi
-CFLAGS := -g -Wall
-
-VMLINUX_BTF_PATHS ?= $(if $(O),$(O)/vmlinux) \
- $(if $(KBUILD_OUTPUT),$(KBUILD_OUTPUT)/vmlinux) \
- ../../../../vmlinux \
- /sys/kernel/btf/vmlinux \
- /boot/vmlinux-$(shell uname -r)
-VMLINUX_BTF ?= $(abspath $(firstword $(wildcard $(VMLINUX_BTF_PATHS))))
-ifeq ($(VMLINUX_BTF),)
-$(error Cannot find a vmlinux for VMLINUX_BTF at any of "$(VMLINUX_BTF_PATHS)")
-endif
-
-ifeq ($(V),1)
-Q =
-msg =
-else
-Q = @
-msg = @printf ' %-8s %s%s\n' "$(1)" "$(notdir $(2))" "$(if $(3), $(3))";
-MAKEFLAGS += --no-print-directory
-submake_extras := feature_display=0
-endif
-
-.DELETE_ON_ERROR:
-
-.PHONY: all clean
-
-all: entrypoints.lskel.h
-
-clean:
- $(call msg,CLEAN)
- $(Q)rm -rf $(OUTPUT) entrypoints
-
-entrypoints.lskel.h: $(OUTPUT)/entrypoints.bpf.o | $(BPFTOOL)
- $(call msg,GEN-SKEL,$@)
- $(Q)$(BPFTOOL) gen skeleton -L $< > $@
-
-
-$(OUTPUT)/entrypoints.bpf.o: entrypoints.bpf.c $(OUTPUT)/vmlinux.h $(BPFOBJ) | $(OUTPUT)
- $(call msg,BPF,$@)
- $(Q)$(CLANG) -g -O2 --target=bpf $(INCLUDES) \
- -c $(filter %.c,$^) -o $@ && \
- $(LLVM_STRIP) -g $@
-
-$(OUTPUT)/vmlinux.h: $(VMLINUX_BTF) $(BPFTOOL) | $(INCLUDE_DIR)
-ifeq ($(VMLINUX_H),)
- $(call msg,GEN,,$@)
- $(Q)$(BPFTOOL) btf dump file $(VMLINUX_BTF) format c > $@
-else
- $(call msg,CP,,$@)
- $(Q)cp "$(VMLINUX_H)" $@
-endif
-
-$(OUTPUT) $(LIBBPF_OUTPUT) $(BPFTOOL_OUTPUT):
- $(call msg,MKDIR,$@)
- $(Q)mkdir -p $@
-
-$(BPFOBJ): $(wildcard $(LIBBPF_SRC)/*.[ch] $(LIBBPF_SRC)/Makefile) | $(LIBBPF_OUTPUT)
- $(Q)$(MAKE) $(submake_extras) -C $(LIBBPF_SRC) \
- OUTPUT=$(abspath $(dir $@))/ prefix= \
- DESTDIR=$(LIBBPF_DESTDIR) $(abspath $@) install_headers
-
-ifeq ($(CROSS_COMPILE),)
-$(DEFAULT_BPFTOOL): $(BPFOBJ) | $(BPFTOOL_OUTPUT)
- $(Q)$(MAKE) $(submake_extras) -C $(BPFTOOL_SRC) \
- OUTPUT=$(BPFTOOL_OUTPUT)/ \
- LIBBPF_BOOTSTRAP_OUTPUT=$(LIBBPF_OUTPUT)/ \
- LIBBPF_BOOTSTRAP_DESTDIR=$(LIBBPF_DESTDIR)/ bootstrap
-else
-$(DEFAULT_BPFTOOL): | $(BPFTOOL_OUTPUT)
- $(Q)$(MAKE) $(submake_extras) -C $(BPFTOOL_SRC) \
- OUTPUT=$(BPFTOOL_OUTPUT)/ bootstrap
-endif
diff --git a/drivers/hid/bpf/entrypoints/README b/drivers/hid/bpf/entrypoints/README
deleted file mode 100644
index 147e0d41509f..000000000000
--- a/drivers/hid/bpf/entrypoints/README
+++ /dev/null
@@ -1,4 +0,0 @@
-WARNING:
-If you change "entrypoints.bpf.c" do "make -j" in this directory to rebuild "entrypoints.skel.h".
-Make sure to have clang 10 installed.
-See Documentation/bpf/bpf_devel_QA.rst
diff --git a/drivers/hid/bpf/entrypoints/entrypoints.bpf.c b/drivers/hid/bpf/entrypoints/entrypoints.bpf.c
deleted file mode 100644
index c22921125a1a..000000000000
--- a/drivers/hid/bpf/entrypoints/entrypoints.bpf.c
+++ /dev/null
@@ -1,25 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright (c) 2022 Benjamin Tissoires */
-
-#include ".output/vmlinux.h"
-#include <bpf/bpf_helpers.h>
-#include <bpf/bpf_tracing.h>
-
-#define HID_BPF_MAX_PROGS 1024
-
-struct {
- __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
- __uint(max_entries, HID_BPF_MAX_PROGS);
- __uint(key_size, sizeof(__u32));
- __uint(value_size, sizeof(__u32));
-} hid_jmp_table SEC(".maps");
-
-SEC("fmod_ret/__hid_bpf_tail_call")
-int BPF_PROG(hid_tail_call, struct hid_bpf_ctx *hctx)
-{
- bpf_tail_call(ctx, &hid_jmp_table, hctx->index);
-
- return 0;
-}
-
-char LICENSE[] SEC("license") = "GPL";
diff --git a/drivers/hid/bpf/entrypoints/entrypoints.lskel.h b/drivers/hid/bpf/entrypoints/entrypoints.lskel.h
deleted file mode 100644
index 35618051598c..000000000000
--- a/drivers/hid/bpf/entrypoints/entrypoints.lskel.h
+++ /dev/null
@@ -1,248 +0,0 @@
-/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
-/* THIS FILE IS AUTOGENERATED BY BPFTOOL! */
-#ifndef __ENTRYPOINTS_BPF_SKEL_H__
-#define __ENTRYPOINTS_BPF_SKEL_H__
-
-#include <bpf/skel_internal.h>
-
-struct entrypoints_bpf {
- struct bpf_loader_ctx ctx;
- struct {
- struct bpf_map_desc hid_jmp_table;
- } maps;
- struct {
- struct bpf_prog_desc hid_tail_call;
- } progs;
- struct {
- int hid_tail_call_fd;
- } links;
-};
-
-static inline int
-entrypoints_bpf__hid_tail_call__attach(struct entrypoints_bpf *skel)
-{
- int prog_fd = skel->progs.hid_tail_call.prog_fd;
- int fd = skel_raw_tracepoint_open(NULL, prog_fd);
-
- if (fd > 0)
- skel->links.hid_tail_call_fd = fd;
- return fd;
-}
-
-static inline int
-entrypoints_bpf__attach(struct entrypoints_bpf *skel)
-{
- int ret = 0;
-
- ret = ret < 0 ? ret : entrypoints_bpf__hid_tail_call__attach(skel);
- return ret < 0 ? ret : 0;
-}
-
-static inline void
-entrypoints_bpf__detach(struct entrypoints_bpf *skel)
-{
- skel_closenz(skel->links.hid_tail_call_fd);
-}
-static void
-entrypoints_bpf__destroy(struct entrypoints_bpf *skel)
-{
- if (!skel)
- return;
- entrypoints_bpf__detach(skel);
- skel_closenz(skel->progs.hid_tail_call.prog_fd);
- skel_closenz(skel->maps.hid_jmp_table.map_fd);
- skel_free(skel);
-}
-static inline struct entrypoints_bpf *
-entrypoints_bpf__open(void)
-{
- struct entrypoints_bpf *skel;
-
- skel = skel_alloc(sizeof(*skel));
- if (!skel)
- goto cleanup;
- skel->ctx.sz = (void *)&skel->links - (void *)skel;
- return skel;
-cleanup:
- entrypoints_bpf__destroy(skel);
- return NULL;
-}
-
-static inline int
-entrypoints_bpf__load(struct entrypoints_bpf *skel)
-{
- struct bpf_load_and_run_opts opts = {};
- int err;
-
- opts.ctx = (struct bpf_loader_ctx *)skel;
- opts.data_sz = 2856;
- opts.data = (void *)"\
-\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
-\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
-\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
-\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
-\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
-\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
-\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
-\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
-\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
-\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
-\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
-\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
-\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
-\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
-\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
-\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
-\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
-\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
-\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
-\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
-\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
-\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
-\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
-\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
-\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
-\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
-\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
-\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
-\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
-\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
-\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
-\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
-\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x9f\xeb\x01\0\
-\x18\0\0\0\0\0\0\0\x60\x02\0\0\x60\x02\0\0\x12\x02\0\0\0\0\0\0\0\0\0\x02\x03\0\
-\0\0\x01\0\0\0\0\0\0\x01\x04\0\0\0\x20\0\0\x01\0\0\0\0\0\0\0\x03\0\0\0\0\x02\0\
-\0\0\x04\0\0\0\x03\0\0\0\x05\0\0\0\0\0\0\x01\x04\0\0\0\x20\0\0\0\0\0\0\0\0\0\0\
-\x02\x06\0\0\0\0\0\0\0\0\0\0\x03\0\0\0\0\x02\0\0\0\x04\0\0\0\0\x04\0\0\0\0\0\0\
-\0\0\0\x02\x08\0\0\0\0\0\0\0\0\0\0\x03\0\0\0\0\x02\0\0\0\x04\0\0\0\x04\0\0\0\0\
-\0\0\0\x04\0\0\x04\x20\0\0\0\x19\0\0\0\x01\0\0\0\0\0\0\0\x1e\0\0\0\x05\0\0\0\
-\x40\0\0\0\x2a\0\0\0\x07\0\0\0\x80\0\0\0\x33\0\0\0\x07\0\0\0\xc0\0\0\0\x3e\0\0\
-\0\0\0\0\x0e\x09\0\0\0\x01\0\0\0\0\0\0\0\0\0\0\x02\x0c\0\0\0\x4c\0\0\0\0\0\0\
-\x01\x08\0\0\0\x40\0\0\0\0\0\0\0\x01\0\0\x0d\x02\0\0\0\x5f\0\0\0\x0b\0\0\0\x63\
-\0\0\0\x01\0\0\x0c\x0d\0\0\0\x09\x01\0\0\x05\0\0\x04\x20\0\0\0\x15\x01\0\0\x10\
-\0\0\0\0\0\0\0\x1b\x01\0\0\x12\0\0\0\x40\0\0\0\x1f\x01\0\0\x10\0\0\0\x80\0\0\0\
-\x2e\x01\0\0\x14\0\0\0\xa0\0\0\0\0\0\0\0\x15\0\0\0\xc0\0\0\0\x3a\x01\0\0\0\0\0\
-\x08\x11\0\0\0\x40\x01\0\0\0\0\0\x01\x04\0\0\0\x20\0\0\0\0\0\0\0\0\0\0\x02\x13\
-\0\0\0\0\0\0\0\0\0\0\x0a\x1c\0\0\0\x4d\x01\0\0\x04\0\0\x06\x04\0\0\0\x5d\x01\0\
-\0\0\0\0\0\x6e\x01\0\0\x01\0\0\0\x80\x01\0\0\x02\0\0\0\x93\x01\0\0\x03\0\0\0\0\
-\0\0\0\x02\0\0\x05\x04\0\0\0\xa4\x01\0\0\x16\0\0\0\0\0\0\0\xab\x01\0\0\x16\0\0\
-\0\0\0\0\0\xb0\x01\0\0\0\0\0\x08\x02\0\0\0\xec\x01\0\0\0\0\0\x01\x01\0\0\0\x08\
-\0\0\x01\0\0\0\0\0\0\0\x03\0\0\0\0\x17\0\0\0\x04\0\0\0\x04\0\0\0\xf1\x01\0\0\0\
-\0\0\x0e\x18\0\0\0\x01\0\0\0\xf9\x01\0\0\x01\0\0\x0f\x20\0\0\0\x0a\0\0\0\0\0\0\
-\0\x20\0\0\0\xff\x01\0\0\x01\0\0\x0f\x04\0\0\0\x19\0\0\0\0\0\0\0\x04\0\0\0\x07\
-\x02\0\0\0\0\0\x07\0\0\0\0\0\x69\x6e\x74\0\x5f\x5f\x41\x52\x52\x41\x59\x5f\x53\
-\x49\x5a\x45\x5f\x54\x59\x50\x45\x5f\x5f\0\x74\x79\x70\x65\0\x6d\x61\x78\x5f\
-\x65\x6e\x74\x72\x69\x65\x73\0\x6b\x65\x79\x5f\x73\x69\x7a\x65\0\x76\x61\x6c\
-\x75\x65\x5f\x73\x69\x7a\x65\0\x68\x69\x64\x5f\x6a\x6d\x70\x5f\x74\x61\x62\x6c\
-\x65\0\x75\x6e\x73\x69\x67\x6e\x65\x64\x20\x6c\x6f\x6e\x67\x20\x6c\x6f\x6e\x67\
-\0\x63\x74\x78\0\x68\x69\x64\x5f\x74\x61\x69\x6c\x5f\x63\x61\x6c\x6c\0\x66\x6d\
-\x6f\x64\x5f\x72\x65\x74\x2f\x5f\x5f\x68\x69\x64\x5f\x62\x70\x66\x5f\x74\x61\
-\x69\x6c\x5f\x63\x61\x6c\x6c\0\x2f\x68\x6f\x6d\x65\x2f\x62\x74\x69\x73\x73\x6f\
-\x69\x72\x2f\x53\x72\x63\x2f\x68\x69\x64\x2f\x64\x72\x69\x76\x65\x72\x73\x2f\
-\x68\x69\x64\x2f\x62\x70\x66\x2f\x65\x6e\x74\x72\x79\x70\x6f\x69\x6e\x74\x73\
-\x2f\x65\x6e\x74\x72\x79\x70\x6f\x69\x6e\x74\x73\x2e\x62\x70\x66\x2e\x63\0\x69\
-\x6e\x74\x20\x42\x50\x46\x5f\x50\x52\x4f\x47\x28\x68\x69\x64\x5f\x74\x61\x69\
-\x6c\x5f\x63\x61\x6c\x6c\x2c\x20\x73\x74\x72\x75\x63\x74\x20\x68\x69\x64\x5f\
-\x62\x70\x66\x5f\x63\x74\x78\x20\x2a\x68\x63\x74\x78\x29\0\x68\x69\x64\x5f\x62\
-\x70\x66\x5f\x63\x74\x78\0\x69\x6e\x64\x65\x78\0\x68\x69\x64\0\x61\x6c\x6c\x6f\
-\x63\x61\x74\x65\x64\x5f\x73\x69\x7a\x65\0\x72\x65\x70\x6f\x72\x74\x5f\x74\x79\
-\x70\x65\0\x5f\x5f\x75\x33\x32\0\x75\x6e\x73\x69\x67\x6e\x65\x64\x20\x69\x6e\
-\x74\0\x68\x69\x64\x5f\x72\x65\x70\x6f\x72\x74\x5f\x74\x79\x70\x65\0\x48\x49\
-\x44\x5f\x49\x4e\x50\x55\x54\x5f\x52\x45\x50\x4f\x52\x54\0\x48\x49\x44\x5f\x4f\
-\x55\x54\x50\x55\x54\x5f\x52\x45\x50\x4f\x52\x54\0\x48\x49\x44\x5f\x46\x45\x41\
-\x54\x55\x52\x45\x5f\x52\x45\x50\x4f\x52\x54\0\x48\x49\x44\x5f\x52\x45\x50\x4f\
-\x52\x54\x5f\x54\x59\x50\x45\x53\0\x72\x65\x74\x76\x61\x6c\0\x73\x69\x7a\x65\0\
-\x5f\x5f\x73\x33\x32\0\x30\x3a\x30\0\x09\x62\x70\x66\x5f\x74\x61\x69\x6c\x5f\
-\x63\x61\x6c\x6c\x28\x63\x74\x78\x2c\x20\x26\x68\x69\x64\x5f\x6a\x6d\x70\x5f\
-\x74\x61\x62\x6c\x65\x2c\x20\x68\x63\x74\x78\x2d\x3e\x69\x6e\x64\x65\x78\x29\
-\x3b\0\x63\x68\x61\x72\0\x4c\x49\x43\x45\x4e\x53\x45\0\x2e\x6d\x61\x70\x73\0\
-\x6c\x69\x63\x65\x6e\x73\x65\0\x68\x69\x64\x5f\x64\x65\x76\x69\x63\x65\0\0\0\0\
-\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x8a\x04\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x03\
-\0\0\0\x04\0\0\0\x04\0\0\0\0\x04\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x68\x69\x64\x5f\
-\x6a\x6d\x70\x5f\x74\x61\x62\x6c\x65\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
-\0\0\0\0\0\0\0\0\0\0\x47\x50\x4c\0\0\0\0\0\x79\x12\0\0\0\0\0\0\x61\x23\0\0\0\0\
-\0\0\x18\x52\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x85\0\0\0\x0c\0\0\0\xb7\0\0\0\0\0\0\0\
-\x95\0\0\0\0\0\0\0\0\0\0\0\x0e\0\0\0\0\0\0\0\x8e\0\0\0\xd3\0\0\0\x05\x48\0\0\
-\x01\0\0\0\x8e\0\0\0\xba\x01\0\0\x02\x50\0\0\x05\0\0\0\x8e\0\0\0\xd3\0\0\0\x05\
-\x48\0\0\x08\0\0\0\x0f\0\0\0\xb6\x01\0\0\0\0\0\0\x1a\0\0\0\x07\0\0\0\0\0\0\0\0\
-\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x68\x69\
-\x64\x5f\x74\x61\x69\x6c\x5f\x63\x61\x6c\x6c\0\0\0\0\0\0\0\x1a\0\0\0\0\0\0\0\
-\x08\0\0\0\0\0\0\0\0\0\0\0\x01\0\0\0\x10\0\0\0\0\0\0\0\0\0\0\0\x03\0\0\0\x01\0\
-\0\0\0\0\0\0\x01\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x10\0\0\0\0\0\0\0\x5f\
-\x5f\x68\x69\x64\x5f\x62\x70\x66\x5f\x74\x61\x69\x6c\x5f\x63\x61\x6c\x6c\0\0\0\
-\0\0";
- opts.insns_sz = 1192;
- opts.insns = (void *)"\
-\xbf\x16\0\0\0\0\0\0\xbf\xa1\0\0\0\0\0\0\x07\x01\0\0\x78\xff\xff\xff\xb7\x02\0\
-\0\x88\0\0\0\xb7\x03\0\0\0\0\0\0\x85\0\0\0\x71\0\0\0\x05\0\x11\0\0\0\0\0\x61\
-\xa1\x78\xff\0\0\0\0\xd5\x01\x01\0\0\0\0\0\x85\0\0\0\xa8\0\0\0\x61\xa1\x7c\xff\
-\0\0\0\0\xd5\x01\x01\0\0\0\0\0\x85\0\0\0\xa8\0\0\0\x61\xa1\x80\xff\0\0\0\0\xd5\
-\x01\x01\0\0\0\0\0\x85\0\0\0\xa8\0\0\0\x18\x60\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x61\
-\x01\0\0\0\0\0\0\xd5\x01\x02\0\0\0\0\0\xbf\x19\0\0\0\0\0\0\x85\0\0\0\xa8\0\0\0\
-\xbf\x70\0\0\0\0\0\0\x95\0\0\0\0\0\0\0\x61\x60\x08\0\0\0\0\0\x18\x61\0\0\0\0\0\
-\0\0\0\0\0\xa8\x09\0\0\x63\x01\0\0\0\0\0\0\x61\x60\x0c\0\0\0\0\0\x18\x61\0\0\0\
-\0\0\0\0\0\0\0\xa4\x09\0\0\x63\x01\0\0\0\0\0\0\x79\x60\x10\0\0\0\0\0\x18\x61\0\
-\0\0\0\0\0\0\0\0\0\x98\x09\0\0\x7b\x01\0\0\0\0\0\0\x18\x60\0\0\0\0\0\0\0\0\0\0\
-\0\x05\0\0\x18\x61\0\0\0\0\0\0\0\0\0\0\x90\x09\0\0\x7b\x01\0\0\0\0\0\0\xb7\x01\
-\0\0\x12\0\0\0\x18\x62\0\0\0\0\0\0\0\0\0\0\x90\x09\0\0\xb7\x03\0\0\x1c\0\0\0\
-\x85\0\0\0\xa6\0\0\0\xbf\x07\0\0\0\0\0\0\xc5\x07\xd7\xff\0\0\0\0\x63\x7a\x78\
-\xff\0\0\0\0\x61\x60\x1c\0\0\0\0\0\x15\0\x03\0\0\0\0\0\x18\x61\0\0\0\0\0\0\0\0\
-\0\0\xbc\x09\0\0\x63\x01\0\0\0\0\0\0\xb7\x01\0\0\0\0\0\0\x18\x62\0\0\0\0\0\0\0\
-\0\0\0\xb0\x09\0\0\xb7\x03\0\0\x48\0\0\0\x85\0\0\0\xa6\0\0\0\xbf\x07\0\0\0\0\0\
-\0\xc5\x07\xca\xff\0\0\0\0\x18\x61\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x63\x71\0\0\0\0\
-\0\0\x18\x60\0\0\0\0\0\0\0\0\0\0\xf8\x09\0\0\x18\x61\0\0\0\0\0\0\0\0\0\0\x90\
-\x0a\0\0\x7b\x01\0\0\0\0\0\0\x18\x60\0\0\0\0\0\0\0\0\0\0\0\x0a\0\0\x18\x61\0\0\
-\0\0\0\0\0\0\0\0\x88\x0a\0\0\x7b\x01\0\0\0\0\0\0\x18\x60\0\0\0\0\0\0\0\0\0\0\
-\x38\x0a\0\0\x18\x61\0\0\0\0\0\0\0\0\0\0\xd0\x0a\0\0\x7b\x01\0\0\0\0\0\0\x18\
-\x60\0\0\0\0\0\0\0\0\0\0\x40\x0a\0\0\x18\x61\0\0\0\0\0\0\0\0\0\0\xe0\x0a\0\0\
-\x7b\x01\0\0\0\0\0\0\x18\x60\0\0\0\0\0\0\0\0\0\0\x70\x0a\0\0\x18\x61\0\0\0\0\0\
-\0\0\0\0\0\0\x0b\0\0\x7b\x01\0\0\0\0\0\0\x18\x60\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
-\x18\x61\0\0\0\0\0\0\0\0\0\0\xf8\x0a\0\0\x7b\x01\0\0\0\0\0\0\x61\x60\x08\0\0\0\
-\0\0\x18\x61\0\0\0\0\0\0\0\0\0\0\x98\x0a\0\0\x63\x01\0\0\0\0\0\0\x61\x60\x0c\0\
-\0\0\0\0\x18\x61\0\0\0\0\0\0\0\0\0\0\x9c\x0a\0\0\x63\x01\0\0\0\0\0\0\x79\x60\
-\x10\0\0\0\0\0\x18\x61\0\0\0\0\0\0\0\0\0\0\xa0\x0a\0\0\x7b\x01\0\0\0\0\0\0\x61\
-\xa0\x78\xff\0\0\0\0\x18\x61\0\0\0\0\0\0\0\0\0\0\xc8\x0a\0\0\x63\x01\0\0\0\0\0\
-\0\x18\x61\0\0\0\0\0\0\0\0\0\0\x10\x0b\0\0\xb7\x02\0\0\x14\0\0\0\xb7\x03\0\0\
-\x0c\0\0\0\xb7\x04\0\0\0\0\0\0\x85\0\0\0\xa7\0\0\0\xbf\x07\0\0\0\0\0\0\xc5\x07\
-\x91\xff\0\0\0\0\x18\x60\0\0\0\0\0\0\0\0\0\0\x80\x0a\0\0\x63\x70\x6c\0\0\0\0\0\
-\x77\x07\0\0\x20\0\0\0\x63\x70\x70\0\0\0\0\0\xb7\x01\0\0\x05\0\0\0\x18\x62\0\0\
-\0\0\0\0\0\0\0\0\x80\x0a\0\0\xb7\x03\0\0\x8c\0\0\0\x85\0\0\0\xa6\0\0\0\xbf\x07\
-\0\0\0\0\0\0\x18\x60\0\0\0\0\0\0\0\0\0\0\xf0\x0a\0\0\x61\x01\0\0\0\0\0\0\xd5\
-\x01\x02\0\0\0\0\0\xbf\x19\0\0\0\0\0\0\x85\0\0\0\xa8\0\0\0\xc5\x07\x7f\xff\0\0\
-\0\0\x63\x7a\x80\xff\0\0\0\0\x61\xa1\x78\xff\0\0\0\0\xd5\x01\x02\0\0\0\0\0\xbf\
-\x19\0\0\0\0\0\0\x85\0\0\0\xa8\0\0\0\x61\xa0\x80\xff\0\0\0\0\x63\x06\x28\0\0\0\
-\0\0\x18\x61\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x61\x10\0\0\0\0\0\0\x63\x06\x18\0\0\0\
-\0\0\xb7\0\0\0\0\0\0\0\x95\0\0\0\0\0\0\0";
- err = bpf_load_and_run(&opts);
- if (err < 0)
- return err;
- return 0;
-}
-
-static inline struct entrypoints_bpf *
-entrypoints_bpf__open_and_load(void)
-{
- struct entrypoints_bpf *skel;
-
- skel = entrypoints_bpf__open();
- if (!skel)
- return NULL;
- if (entrypoints_bpf__load(skel)) {
- entrypoints_bpf__destroy(skel);
- return NULL;
- }
- return skel;
-}
-
-__attribute__((unused)) static void
-entrypoints_bpf__assert(struct entrypoints_bpf *s __attribute__((unused)))
-{
-#ifdef __cplusplus
-#define _Static_assert static_assert
-#endif
-#ifdef __cplusplus
-#undef _Static_assert
-#endif
-}
-
-#endif /* __ENTRYPOINTS_BPF_SKEL_H__ */
diff --git a/drivers/hid/bpf/hid_bpf_dispatch.c b/drivers/hid/bpf/hid_bpf_dispatch.c
index 10289f44d0cc..a272a086c950 100644
--- a/drivers/hid/bpf/hid_bpf_dispatch.c
+++ b/drivers/hid/bpf/hid_bpf_dispatch.c
@@ -3,7 +3,7 @@
/*
* HID-BPF support for Linux
*
- * Copyright (c) 2022 Benjamin Tissoires
+ * Copyright (c) 2022-2024 Benjamin Tissoires
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -17,47 +17,25 @@
#include <linux/kfifo.h>
#include <linux/minmax.h>
#include <linux/module.h>
-#include <linux/workqueue.h>
#include "hid_bpf_dispatch.h"
-#include "entrypoints/entrypoints.lskel.h"
-struct hid_bpf_ops *hid_bpf_ops;
-EXPORT_SYMBOL(hid_bpf_ops);
-
-/**
- * hid_bpf_device_event - Called whenever an event is coming in from the device
- *
- * @ctx: The HID-BPF context
- *
- * @return %0 on success and keep processing; a positive value to change the
- * incoming size buffer; a negative error code to interrupt the processing
- * of this event
- *
- * Declare an %fmod_ret tracing bpf program to this function and attach this
- * program through hid_bpf_attach_prog() to have this helper called for
- * any incoming event from the device itself.
- *
- * The function is called while on IRQ context, so we can not sleep.
- */
-/* never used by the kernel but declared so we can load and attach a tracepoint */
-__weak noinline int hid_bpf_device_event(struct hid_bpf_ctx *ctx)
-{
- return 0;
-}
+struct hid_ops *hid_ops;
+EXPORT_SYMBOL(hid_ops);
u8 *
dispatch_hid_bpf_device_event(struct hid_device *hdev, enum hid_report_type type, u8 *data,
- u32 *size, int interrupt)
+ u32 *size, int interrupt, u64 source, bool from_bpf)
{
struct hid_bpf_ctx_kern ctx_kern = {
.ctx = {
.hid = hdev,
- .report_type = type,
.allocated_size = hdev->bpf.allocated_data,
.size = *size,
},
.data = hdev->bpf.device_data,
+ .from_bpf = from_bpf,
};
+ struct hid_bpf_ops *e;
int ret;
if (type >= HID_REPORT_TYPES)
@@ -70,10 +48,22 @@ dispatch_hid_bpf_device_event(struct hid_device *hdev, enum hid_report_type type
memset(ctx_kern.data, 0, hdev->bpf.allocated_data);
memcpy(ctx_kern.data, data, *size);
- ret = hid_bpf_prog_run(hdev, HID_BPF_PROG_TYPE_DEVICE_EVENT, &ctx_kern);
- if (ret < 0)
- return ERR_PTR(ret);
+ rcu_read_lock();
+ list_for_each_entry_rcu(e, &hdev->bpf.prog_list, list) {
+ if (e->hid_device_event) {
+ ret = e->hid_device_event(&ctx_kern.ctx, type, source);
+ if (ret < 0) {
+ rcu_read_unlock();
+ return ERR_PTR(ret);
+ }
+
+ if (ret)
+ ctx_kern.ctx.size = ret;
+ }
+ }
+ rcu_read_unlock();
+ ret = ctx_kern.ctx.size;
if (ret) {
if (ret > ctx_kern.ctx.allocated_size)
return ERR_PTR(-EINVAL);
@@ -85,25 +75,78 @@ dispatch_hid_bpf_device_event(struct hid_device *hdev, enum hid_report_type type
}
EXPORT_SYMBOL_GPL(dispatch_hid_bpf_device_event);
-/**
- * hid_bpf_rdesc_fixup - Called when the probe function parses the report
- * descriptor of the HID device
- *
- * @ctx: The HID-BPF context
- *
- * @return 0 on success and keep processing; a positive value to change the
- * incoming size buffer; a negative error code to interrupt the processing
- * of this event
- *
- * Declare an %fmod_ret tracing bpf program to this function and attach this
- * program through hid_bpf_attach_prog() to have this helper called before any
- * parsing of the report descriptor by HID.
- */
-/* never used by the kernel but declared so we can load and attach a tracepoint */
-__weak noinline int hid_bpf_rdesc_fixup(struct hid_bpf_ctx *ctx)
+int dispatch_hid_bpf_raw_requests(struct hid_device *hdev,
+ unsigned char reportnum, u8 *buf,
+ u32 size, enum hid_report_type rtype,
+ enum hid_class_request reqtype,
+ u64 source, bool from_bpf)
{
- return 0;
+ struct hid_bpf_ctx_kern ctx_kern = {
+ .ctx = {
+ .hid = hdev,
+ .allocated_size = size,
+ .size = size,
+ },
+ .data = buf,
+ .from_bpf = from_bpf,
+ };
+ struct hid_bpf_ops *e;
+ int ret, idx;
+
+ if (rtype >= HID_REPORT_TYPES)
+ return -EINVAL;
+
+ idx = srcu_read_lock(&hdev->bpf.srcu);
+ list_for_each_entry_srcu(e, &hdev->bpf.prog_list, list,
+ srcu_read_lock_held(&hdev->bpf.srcu)) {
+ if (!e->hid_hw_request)
+ continue;
+
+ ret = e->hid_hw_request(&ctx_kern.ctx, reportnum, rtype, reqtype, source);
+ if (ret)
+ goto out;
+ }
+ ret = 0;
+
+out:
+ srcu_read_unlock(&hdev->bpf.srcu, idx);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dispatch_hid_bpf_raw_requests);
+
+int dispatch_hid_bpf_output_report(struct hid_device *hdev,
+ __u8 *buf, u32 size, u64 source,
+ bool from_bpf)
+{
+ struct hid_bpf_ctx_kern ctx_kern = {
+ .ctx = {
+ .hid = hdev,
+ .allocated_size = size,
+ .size = size,
+ },
+ .data = buf,
+ .from_bpf = from_bpf,
+ };
+ struct hid_bpf_ops *e;
+ int ret, idx;
+
+ idx = srcu_read_lock(&hdev->bpf.srcu);
+ list_for_each_entry_srcu(e, &hdev->bpf.prog_list, list,
+ srcu_read_lock_held(&hdev->bpf.srcu)) {
+ if (!e->hid_hw_output_report)
+ continue;
+
+ ret = e->hid_hw_output_report(&ctx_kern.ctx, source);
+ if (ret)
+ goto out;
+ }
+ ret = 0;
+
+out:
+ srcu_read_unlock(&hdev->bpf.srcu, idx);
+ return ret;
}
+EXPORT_SYMBOL_GPL(dispatch_hid_bpf_output_report);
u8 *call_hid_bpf_rdesc_fixup(struct hid_device *hdev, u8 *rdesc, unsigned int *size)
{
@@ -116,13 +159,16 @@ u8 *call_hid_bpf_rdesc_fixup(struct hid_device *hdev, u8 *rdesc, unsigned int *s
},
};
+ if (!hdev->bpf.rdesc_ops)
+ goto ignore_bpf;
+
ctx_kern.data = kzalloc(ctx_kern.ctx.allocated_size, GFP_KERNEL);
if (!ctx_kern.data)
goto ignore_bpf;
memcpy(ctx_kern.data, rdesc, min_t(unsigned int, *size, HID_MAX_DESCRIPTOR_SIZE));
- ret = hid_bpf_prog_run(hdev, HID_BPF_PROG_TYPE_RDESC_FIXUP, &ctx_kern);
+ ret = hdev->bpf.rdesc_ops->hid_rdesc_fixup(&ctx_kern.ctx);
if (ret < 0)
goto ignore_bpf;
@@ -150,6 +196,25 @@ static int device_match_id(struct device *dev, const void *id)
return hdev->id == *(int *)id;
}
+struct hid_device *hid_get_device(unsigned int hid_id)
+{
+ struct device *dev;
+
+ if (!hid_ops)
+ return ERR_PTR(-EINVAL);
+
+ dev = bus_find_device(hid_ops->bus_type, NULL, &hid_id, device_match_id);
+ if (!dev)
+ return ERR_PTR(-EINVAL);
+
+ return to_hid_device(dev);
+}
+
+void hid_put_device(struct hid_device *hid)
+{
+ put_device(&hid->dev);
+}
+
static int __hid_bpf_allocate_data(struct hid_device *hdev, u8 **data, u32 *size)
{
u8 *alloc_data;
@@ -186,7 +251,7 @@ static int __hid_bpf_allocate_data(struct hid_device *hdev, u8 **data, u32 *size
return 0;
}
-static int hid_bpf_allocate_event_data(struct hid_device *hdev)
+int hid_bpf_allocate_event_data(struct hid_device *hdev)
{
/* hdev->bpf.device_data is already allocated, abort */
if (hdev->bpf.device_data)
@@ -203,39 +268,6 @@ int hid_bpf_reconnect(struct hid_device *hdev)
return 0;
}
-static int do_hid_bpf_attach_prog(struct hid_device *hdev, int prog_fd, struct bpf_prog *prog,
- __u32 flags)
-{
- int fd, err, prog_type;
-
- prog_type = hid_bpf_get_prog_attach_type(prog);
- if (prog_type < 0)
- return prog_type;
-
- if (prog_type >= HID_BPF_PROG_TYPE_MAX)
- return -EINVAL;
-
- if (prog_type == HID_BPF_PROG_TYPE_DEVICE_EVENT) {
- err = hid_bpf_allocate_event_data(hdev);
- if (err)
- return err;
- }
-
- fd = __hid_bpf_attach_prog(hdev, prog_type, prog_fd, prog, flags);
- if (fd < 0)
- return fd;
-
- if (prog_type == HID_BPF_PROG_TYPE_RDESC_FIXUP) {
- err = hid_bpf_reconnect(hdev);
- if (err) {
- close_fd(fd);
- return err;
- }
- }
-
- return fd;
-}
-
/* Disables missing prototype warnings */
__bpf_kfunc_start_defs();
@@ -265,63 +297,6 @@ hid_bpf_get_data(struct hid_bpf_ctx *ctx, unsigned int offset, const size_t rdwr
}
/**
- * hid_bpf_attach_prog - Attach the given @prog_fd to the given HID device
- *
- * @hid_id: the system unique identifier of the HID device
- * @prog_fd: an fd in the user process representing the program to attach
- * @flags: any logical OR combination of &enum hid_bpf_attach_flags
- *
- * @returns an fd of a bpf_link object on success (> %0), an error code otherwise.
- * Closing this fd will detach the program from the HID device (unless the bpf_link
- * is pinned to the BPF file system).
- */
-/* called from syscall */
-__bpf_kfunc int
-hid_bpf_attach_prog(unsigned int hid_id, int prog_fd, __u32 flags)
-{
- struct hid_device *hdev;
- struct bpf_prog *prog;
- struct device *dev;
- int err, fd;
-
- if (!hid_bpf_ops)
- return -EINVAL;
-
- if ((flags & ~HID_BPF_FLAG_MASK))
- return -EINVAL;
-
- dev = bus_find_device(hid_bpf_ops->bus_type, NULL, &hid_id, device_match_id);
- if (!dev)
- return -EINVAL;
-
- hdev = to_hid_device(dev);
-
- /*
- * take a ref on the prog itself, it will be released
- * on errors or when it'll be detached
- */
- prog = bpf_prog_get(prog_fd);
- if (IS_ERR(prog)) {
- err = PTR_ERR(prog);
- goto out_dev_put;
- }
-
- fd = do_hid_bpf_attach_prog(hdev, prog_fd, prog, flags);
- if (fd < 0) {
- err = fd;
- goto out_prog_put;
- }
-
- return fd;
-
- out_prog_put:
- bpf_prog_put(prog);
- out_dev_put:
- put_device(dev);
- return err;
-}
-
-/**
* hid_bpf_allocate_context - Allocate a context to the given HID device
*
* @hid_id: the system unique identifier of the HID device
@@ -333,20 +308,14 @@ hid_bpf_allocate_context(unsigned int hid_id)
{
struct hid_device *hdev;
struct hid_bpf_ctx_kern *ctx_kern = NULL;
- struct device *dev;
- if (!hid_bpf_ops)
+ hdev = hid_get_device(hid_id);
+ if (IS_ERR(hdev))
return NULL;
- dev = bus_find_device(hid_bpf_ops->bus_type, NULL, &hid_id, device_match_id);
- if (!dev)
- return NULL;
-
- hdev = to_hid_device(dev);
-
ctx_kern = kzalloc(sizeof(*ctx_kern), GFP_KERNEL);
if (!ctx_kern) {
- put_device(dev);
+ hid_put_device(hdev);
return NULL;
}
@@ -373,7 +342,7 @@ hid_bpf_release_context(struct hid_bpf_ctx *ctx)
kfree(ctx_kern);
/* get_device() is called by bus_find_device() */
- put_device(&hid->dev);
+ hid_put_device(hid);
}
static int
@@ -386,7 +355,7 @@ __hid_bpf_hw_check_params(struct hid_bpf_ctx *ctx, __u8 *buf, size_t *buf__sz,
u32 report_len;
/* check arguments */
- if (!ctx || !hid_bpf_ops || !buf)
+ if (!ctx || !hid_ops || !buf)
return -EINVAL;
switch (rtype) {
@@ -404,7 +373,7 @@ __hid_bpf_hw_check_params(struct hid_bpf_ctx *ctx, __u8 *buf, size_t *buf__sz,
hdev = (struct hid_device *)ctx->hid; /* discard const */
report_enum = hdev->report_enum + rtype;
- report = hid_bpf_ops->hid_get_report(report_enum, buf);
+ report = hid_ops->hid_get_report(report_enum, buf);
if (!report)
return -EINVAL;
@@ -431,11 +400,17 @@ __bpf_kfunc int
hid_bpf_hw_request(struct hid_bpf_ctx *ctx, __u8 *buf, size_t buf__sz,
enum hid_report_type rtype, enum hid_class_request reqtype)
{
+ struct hid_bpf_ctx_kern *ctx_kern;
struct hid_device *hdev;
size_t size = buf__sz;
u8 *dma_data;
int ret;
+ ctx_kern = container_of(ctx, struct hid_bpf_ctx_kern, ctx);
+
+ if (ctx_kern->from_bpf)
+ return -EDEADLOCK;
+
/* check arguments */
ret = __hid_bpf_hw_check_params(ctx, buf, &size, rtype);
if (ret)
@@ -459,12 +434,14 @@ hid_bpf_hw_request(struct hid_bpf_ctx *ctx, __u8 *buf, size_t buf__sz,
if (!dma_data)
return -ENOMEM;
- ret = hid_bpf_ops->hid_hw_raw_request(hdev,
+ ret = hid_ops->hid_hw_raw_request(hdev,
dma_data[0],
dma_data,
size,
rtype,
- reqtype);
+ reqtype,
+ (u64)(long)ctx,
+ true); /* prevent infinite recursions */
if (ret > 0)
memcpy(buf, dma_data, ret);
@@ -485,11 +462,16 @@ hid_bpf_hw_request(struct hid_bpf_ctx *ctx, __u8 *buf, size_t buf__sz,
__bpf_kfunc int
hid_bpf_hw_output_report(struct hid_bpf_ctx *ctx, __u8 *buf, size_t buf__sz)
{
+ struct hid_bpf_ctx_kern *ctx_kern;
struct hid_device *hdev;
size_t size = buf__sz;
u8 *dma_data;
int ret;
+ ctx_kern = container_of(ctx, struct hid_bpf_ctx_kern, ctx);
+ if (ctx_kern->from_bpf)
+ return -EDEADLOCK;
+
/* check arguments */
ret = __hid_bpf_hw_check_params(ctx, buf, &size, HID_OUTPUT_REPORT);
if (ret)
@@ -501,14 +483,56 @@ hid_bpf_hw_output_report(struct hid_bpf_ctx *ctx, __u8 *buf, size_t buf__sz)
if (!dma_data)
return -ENOMEM;
- ret = hid_bpf_ops->hid_hw_output_report(hdev,
- dma_data,
- size);
+ ret = hid_ops->hid_hw_output_report(hdev, dma_data, size, (u64)(long)ctx, true);
kfree(dma_data);
return ret;
}
+static int
+__hid_bpf_input_report(struct hid_bpf_ctx *ctx, enum hid_report_type type, u8 *buf,
+ size_t size, bool lock_already_taken)
+{
+ struct hid_bpf_ctx_kern *ctx_kern;
+ int ret;
+
+ ctx_kern = container_of(ctx, struct hid_bpf_ctx_kern, ctx);
+ if (ctx_kern->from_bpf)
+ return -EDEADLOCK;
+
+ /* check arguments */
+ ret = __hid_bpf_hw_check_params(ctx, buf, &size, type);
+ if (ret)
+ return ret;
+
+ return hid_ops->hid_input_report(ctx->hid, type, buf, size, 0, (u64)(long)ctx, true,
+ lock_already_taken);
+}
+
+/**
+ * hid_bpf_try_input_report - Inject a HID report in the kernel from a HID device
+ *
+ * @ctx: the HID-BPF context previously allocated in hid_bpf_allocate_context()
+ * @type: the type of the report (%HID_INPUT_REPORT, %HID_FEATURE_REPORT, %HID_OUTPUT_REPORT)
+ * @buf: a %PTR_TO_MEM buffer
+ * @buf__sz: the size of the data to transfer
+ *
+ * Returns %0 on success, a negative error code otherwise. This function will immediately
+ * fail if the device is not available, thus can be safely used in IRQ context.
+ */
+__bpf_kfunc int
+hid_bpf_try_input_report(struct hid_bpf_ctx *ctx, enum hid_report_type type, u8 *buf,
+ const size_t buf__sz)
+{
+ struct hid_bpf_ctx_kern *ctx_kern;
+ bool from_hid_event_hook;
+
+ ctx_kern = container_of(ctx, struct hid_bpf_ctx_kern, ctx);
+ from_hid_event_hook = ctx_kern->data && ctx_kern->data == ctx->hid->bpf.device_data;
+
+ return __hid_bpf_input_report(ctx, type, buf, buf__sz, from_hid_event_hook);
+}
+
/**
* hid_bpf_input_report - Inject a HID report in the kernel from a HID device
*
@@ -517,24 +541,26 @@ hid_bpf_hw_output_report(struct hid_bpf_ctx *ctx, __u8 *buf, size_t buf__sz)
* @buf: a %PTR_TO_MEM buffer
* @buf__sz: the size of the data to transfer
*
- * Returns %0 on success, a negative error code otherwise.
+ * Returns %0 on success, a negative error code otherwise. This function will wait for the
+ * device to be available before injecting the event, thus needs to be called in sleepable
+ * context.
*/
__bpf_kfunc int
hid_bpf_input_report(struct hid_bpf_ctx *ctx, enum hid_report_type type, u8 *buf,
const size_t buf__sz)
{
- struct hid_device *hdev;
- size_t size = buf__sz;
int ret;
- /* check arguments */
- ret = __hid_bpf_hw_check_params(ctx, buf, &size, type);
+ ret = down_interruptible(&ctx->hid->driver_input_lock);
if (ret)
return ret;
- hdev = (struct hid_device *)ctx->hid; /* discard const */
+ /* check arguments */
+ ret = __hid_bpf_input_report(ctx, type, buf, buf__sz, true /* lock_already_taken */);
- return hid_bpf_ops->hid_input_report(hdev, type, buf, size, 0);
+ up(&ctx->hid->driver_input_lock);
+
+ return ret;
}
__bpf_kfunc_end_defs();
@@ -549,6 +575,7 @@ BTF_ID_FLAGS(func, hid_bpf_release_context, KF_RELEASE | KF_SLEEPABLE)
BTF_ID_FLAGS(func, hid_bpf_hw_request, KF_SLEEPABLE)
BTF_ID_FLAGS(func, hid_bpf_hw_output_report, KF_SLEEPABLE)
BTF_ID_FLAGS(func, hid_bpf_input_report, KF_SLEEPABLE)
+BTF_ID_FLAGS(func, hid_bpf_try_input_report)
BTF_KFUNCS_END(hid_bpf_kfunc_ids)
static const struct btf_kfunc_id_set hid_bpf_kfunc_set = {
@@ -556,21 +583,8 @@ static const struct btf_kfunc_id_set hid_bpf_kfunc_set = {
.set = &hid_bpf_kfunc_ids,
};
-/* our HID-BPF entrypoints */
-BTF_SET8_START(hid_bpf_fmodret_ids)
-BTF_ID_FLAGS(func, hid_bpf_device_event)
-BTF_ID_FLAGS(func, hid_bpf_rdesc_fixup)
-BTF_ID_FLAGS(func, __hid_bpf_tail_call)
-BTF_SET8_END(hid_bpf_fmodret_ids)
-
-static const struct btf_kfunc_id_set hid_bpf_fmodret_set = {
- .owner = THIS_MODULE,
- .set = &hid_bpf_fmodret_ids,
-};
-
/* for syscall HID-BPF */
BTF_KFUNCS_START(hid_bpf_syscall_kfunc_ids)
-BTF_ID_FLAGS(func, hid_bpf_attach_prog)
BTF_ID_FLAGS(func, hid_bpf_allocate_context, KF_ACQUIRE | KF_RET_NULL)
BTF_ID_FLAGS(func, hid_bpf_release_context, KF_RELEASE)
BTF_ID_FLAGS(func, hid_bpf_hw_request)
@@ -585,14 +599,20 @@ static const struct btf_kfunc_id_set hid_bpf_syscall_kfunc_set = {
int hid_bpf_connect_device(struct hid_device *hdev)
{
- struct hid_bpf_prog_list *prog_list;
+ bool need_to_allocate = false;
+ struct hid_bpf_ops *e;
rcu_read_lock();
- prog_list = rcu_dereference(hdev->bpf.progs[HID_BPF_PROG_TYPE_DEVICE_EVENT]);
+ list_for_each_entry_rcu(e, &hdev->bpf.prog_list, list) {
+ if (e->hid_device_event) {
+ need_to_allocate = true;
+ break;
+ }
+ }
rcu_read_unlock();
/* only allocate BPF data if there are programs attached */
- if (!prog_list)
+ if (!need_to_allocate)
return 0;
return hid_bpf_allocate_event_data(hdev);
@@ -615,13 +635,18 @@ void hid_bpf_destroy_device(struct hid_device *hdev)
/* mark the device as destroyed in bpf so we don't reattach it */
hdev->bpf.destroyed = true;
- __hid_bpf_destroy_device(hdev);
+ __hid_bpf_ops_destroy_device(hdev);
+
+ synchronize_srcu(&hdev->bpf.srcu);
+ cleanup_srcu_struct(&hdev->bpf.srcu);
}
EXPORT_SYMBOL_GPL(hid_bpf_destroy_device);
-void hid_bpf_device_init(struct hid_device *hdev)
+int hid_bpf_device_init(struct hid_device *hdev)
{
- spin_lock_init(&hdev->bpf.progs_lock);
+ INIT_LIST_HEAD(&hdev->bpf.prog_list);
+ mutex_init(&hdev->bpf.prog_list_lock);
+ return init_srcu_struct(&hdev->bpf.srcu);
}
EXPORT_SYMBOL_GPL(hid_bpf_device_init);
@@ -632,30 +657,15 @@ static int __init hid_bpf_init(void)
/* Note: if we exit with an error any time here, we would entirely break HID, which
* is probably not something we want. So we log an error and return success.
*
- * This is not a big deal: the syscall allowing to attach a BPF program to a HID device
- * will not be available, so nobody will be able to use the functionality.
+ * This is not a big deal: nobody will be able to use the functionality.
*/
- err = register_btf_fmodret_id_set(&hid_bpf_fmodret_set);
- if (err) {
- pr_warn("error while registering fmodret entrypoints: %d", err);
- return 0;
- }
-
- err = hid_bpf_preload_skel();
- if (err) {
- pr_warn("error while preloading HID BPF dispatcher: %d", err);
- return 0;
- }
-
- /* register tracing kfuncs after we are sure we can load our preloaded bpf program */
- err = register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &hid_bpf_kfunc_set);
+ err = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &hid_bpf_kfunc_set);
if (err) {
pr_warn("error while setting HID BPF tracing kfuncs: %d", err);
return 0;
}
- /* register syscalls after we are sure we can load our preloaded bpf program */
err = register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &hid_bpf_syscall_kfunc_set);
if (err) {
pr_warn("error while setting HID BPF syscall kfuncs: %d", err);
@@ -665,15 +675,6 @@ static int __init hid_bpf_init(void)
return 0;
}
-static void __exit hid_bpf_exit(void)
-{
- /* HID depends on us, so if we hit that code, we are guaranteed that hid
- * has been removed and thus we do not need to clear the HID devices
- */
- hid_bpf_free_links_and_skel();
-}
-
late_initcall(hid_bpf_init);
-module_exit(hid_bpf_exit);
MODULE_AUTHOR("Benjamin Tissoires");
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/bpf/hid_bpf_dispatch.h b/drivers/hid/bpf/hid_bpf_dispatch.h
index fbe0639d09f2..44c6ea22233f 100644
--- a/drivers/hid/bpf/hid_bpf_dispatch.h
+++ b/drivers/hid/bpf/hid_bpf_dispatch.h
@@ -8,16 +8,13 @@
struct hid_bpf_ctx_kern {
struct hid_bpf_ctx ctx;
u8 *data;
+ bool from_bpf;
};
-int hid_bpf_preload_skel(void);
-void hid_bpf_free_links_and_skel(void);
-int hid_bpf_get_prog_attach_type(struct bpf_prog *prog);
-int __hid_bpf_attach_prog(struct hid_device *hdev, enum hid_bpf_prog_type prog_type, int prog_fd,
- struct bpf_prog *prog, __u32 flags);
-void __hid_bpf_destroy_device(struct hid_device *hdev);
-int hid_bpf_prog_run(struct hid_device *hdev, enum hid_bpf_prog_type type,
- struct hid_bpf_ctx_kern *ctx_kern);
+struct hid_device *hid_get_device(unsigned int hid_id);
+void hid_put_device(struct hid_device *hid);
+int hid_bpf_allocate_event_data(struct hid_device *hdev);
+void __hid_bpf_ops_destroy_device(struct hid_device *hdev);
int hid_bpf_reconnect(struct hid_device *hdev);
struct bpf_prog;
diff --git a/drivers/hid/bpf/hid_bpf_jmp_table.c b/drivers/hid/bpf/hid_bpf_jmp_table.c
deleted file mode 100644
index aa8e1c79cdf5..000000000000
--- a/drivers/hid/bpf/hid_bpf_jmp_table.c
+++ /dev/null
@@ -1,565 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-
-/*
- * HID-BPF support for Linux
- *
- * Copyright (c) 2022 Benjamin Tissoires
- */
-
-#include <linux/bitops.h>
-#include <linux/btf.h>
-#include <linux/btf_ids.h>
-#include <linux/circ_buf.h>
-#include <linux/filter.h>
-#include <linux/hid.h>
-#include <linux/hid_bpf.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/workqueue.h>
-#include "hid_bpf_dispatch.h"
-#include "entrypoints/entrypoints.lskel.h"
-
-#define HID_BPF_MAX_PROGS 1024 /* keep this in sync with preloaded bpf,
- * needs to be a power of 2 as we use it as
- * a circular buffer
- */
-
-#define NEXT(idx) (((idx) + 1) & (HID_BPF_MAX_PROGS - 1))
-#define PREV(idx) (((idx) - 1) & (HID_BPF_MAX_PROGS - 1))
-
-/*
- * represents one attached program stored in the hid jump table
- */
-struct hid_bpf_prog_entry {
- struct bpf_prog *prog;
- struct hid_device *hdev;
- enum hid_bpf_prog_type type;
- u16 idx;
-};
-
-struct hid_bpf_jmp_table {
- struct bpf_map *map;
- struct hid_bpf_prog_entry entries[HID_BPF_MAX_PROGS]; /* compacted list, circular buffer */
- int tail, head;
- struct bpf_prog *progs[HID_BPF_MAX_PROGS]; /* idx -> progs mapping */
- unsigned long enabled[BITS_TO_LONGS(HID_BPF_MAX_PROGS)];
-};
-
-#define FOR_ENTRIES(__i, __start, __end) \
- for (__i = __start; CIRC_CNT(__end, __i, HID_BPF_MAX_PROGS); __i = NEXT(__i))
-
-static struct hid_bpf_jmp_table jmp_table;
-
-static DEFINE_MUTEX(hid_bpf_attach_lock); /* held when attaching/detaching programs */
-
-static void hid_bpf_release_progs(struct work_struct *work);
-
-static DECLARE_WORK(release_work, hid_bpf_release_progs);
-
-BTF_ID_LIST(hid_bpf_btf_ids)
-BTF_ID(func, hid_bpf_device_event) /* HID_BPF_PROG_TYPE_DEVICE_EVENT */
-BTF_ID(func, hid_bpf_rdesc_fixup) /* HID_BPF_PROG_TYPE_RDESC_FIXUP */
-
-static int hid_bpf_max_programs(enum hid_bpf_prog_type type)
-{
- switch (type) {
- case HID_BPF_PROG_TYPE_DEVICE_EVENT:
- return HID_BPF_MAX_PROGS_PER_DEV;
- case HID_BPF_PROG_TYPE_RDESC_FIXUP:
- return 1;
- default:
- return -EINVAL;
- }
-}
-
-static int hid_bpf_program_count(struct hid_device *hdev,
- struct bpf_prog *prog,
- enum hid_bpf_prog_type type)
-{
- int i, n = 0;
-
- if (type >= HID_BPF_PROG_TYPE_MAX)
- return -EINVAL;
-
- FOR_ENTRIES(i, jmp_table.tail, jmp_table.head) {
- struct hid_bpf_prog_entry *entry = &jmp_table.entries[i];
-
- if (type != HID_BPF_PROG_TYPE_UNDEF && entry->type != type)
- continue;
-
- if (hdev && entry->hdev != hdev)
- continue;
-
- if (prog && entry->prog != prog)
- continue;
-
- n++;
- }
-
- return n;
-}
-
-__weak noinline int __hid_bpf_tail_call(struct hid_bpf_ctx *ctx)
-{
- return 0;
-}
-
-int hid_bpf_prog_run(struct hid_device *hdev, enum hid_bpf_prog_type type,
- struct hid_bpf_ctx_kern *ctx_kern)
-{
- struct hid_bpf_prog_list *prog_list;
- int i, idx, err = 0;
-
- rcu_read_lock();
- prog_list = rcu_dereference(hdev->bpf.progs[type]);
-
- if (!prog_list)
- goto out_unlock;
-
- for (i = 0; i < prog_list->prog_cnt; i++) {
- idx = prog_list->prog_idx[i];
-
- if (!test_bit(idx, jmp_table.enabled))
- continue;
-
- ctx_kern->ctx.index = idx;
- err = __hid_bpf_tail_call(&ctx_kern->ctx);
- if (err < 0)
- break;
- if (err)
- ctx_kern->ctx.retval = err;
- }
-
- out_unlock:
- rcu_read_unlock();
-
- return err;
-}
-
-/*
- * assign the list of programs attached to a given hid device.
- */
-static void __hid_bpf_set_hdev_progs(struct hid_device *hdev, struct hid_bpf_prog_list *new_list,
- enum hid_bpf_prog_type type)
-{
- struct hid_bpf_prog_list *old_list;
-
- spin_lock(&hdev->bpf.progs_lock);
- old_list = rcu_dereference_protected(hdev->bpf.progs[type],
- lockdep_is_held(&hdev->bpf.progs_lock));
- rcu_assign_pointer(hdev->bpf.progs[type], new_list);
- spin_unlock(&hdev->bpf.progs_lock);
- synchronize_rcu();
-
- kfree(old_list);
-}
-
-/*
- * allocate and populate the list of programs attached to a given hid device.
- *
- * Must be called under lock.
- */
-static int hid_bpf_populate_hdev(struct hid_device *hdev, enum hid_bpf_prog_type type)
-{
- struct hid_bpf_prog_list *new_list;
- int i;
-
- if (type >= HID_BPF_PROG_TYPE_MAX || !hdev)
- return -EINVAL;
-
- if (hdev->bpf.destroyed)
- return 0;
-
- new_list = kzalloc(sizeof(*new_list), GFP_KERNEL);
- if (!new_list)
- return -ENOMEM;
-
- FOR_ENTRIES(i, jmp_table.tail, jmp_table.head) {
- struct hid_bpf_prog_entry *entry = &jmp_table.entries[i];
-
- if (entry->type == type && entry->hdev == hdev &&
- test_bit(entry->idx, jmp_table.enabled))
- new_list->prog_idx[new_list->prog_cnt++] = entry->idx;
- }
-
- __hid_bpf_set_hdev_progs(hdev, new_list, type);
-
- return 0;
-}
-
-static void __hid_bpf_do_release_prog(int map_fd, unsigned int idx)
-{
- skel_map_delete_elem(map_fd, &idx);
- jmp_table.progs[idx] = NULL;
-}
-
-static void hid_bpf_release_progs(struct work_struct *work)
-{
- int i, j, n, map_fd = -1;
- bool hdev_destroyed;
-
- if (!jmp_table.map)
- return;
-
- /* retrieve a fd of our prog_array map in BPF */
- map_fd = skel_map_get_fd_by_id(jmp_table.map->id);
- if (map_fd < 0)
- return;
-
- mutex_lock(&hid_bpf_attach_lock); /* protects against attaching new programs */
-
- /* detach unused progs from HID devices */
- FOR_ENTRIES(i, jmp_table.tail, jmp_table.head) {
- struct hid_bpf_prog_entry *entry = &jmp_table.entries[i];
- enum hid_bpf_prog_type type;
- struct hid_device *hdev;
-
- if (test_bit(entry->idx, jmp_table.enabled))
- continue;
-
- /* we have an attached prog */
- if (entry->hdev) {
- hdev = entry->hdev;
- type = entry->type;
- /*
- * hdev is still valid, even if we are called after hid_destroy_device():
- * when hid_bpf_attach() gets called, it takes a ref on the dev through
- * bus_find_device()
- */
- hdev_destroyed = hdev->bpf.destroyed;
-
- hid_bpf_populate_hdev(hdev, type);
-
- /* mark all other disabled progs from hdev of the given type as detached */
- FOR_ENTRIES(j, i, jmp_table.head) {
- struct hid_bpf_prog_entry *next;
-
- next = &jmp_table.entries[j];
-
- if (test_bit(next->idx, jmp_table.enabled))
- continue;
-
- if (next->hdev == hdev && next->type == type) {
- /*
- * clear the hdev reference and decrement the device ref
- * that was taken during bus_find_device() while calling
- * hid_bpf_attach()
- */
- next->hdev = NULL;
- put_device(&hdev->dev);
- }
- }
-
- /* if type was rdesc fixup and the device is not gone, reconnect device */
- if (type == HID_BPF_PROG_TYPE_RDESC_FIXUP && !hdev_destroyed)
- hid_bpf_reconnect(hdev);
- }
- }
-
- /* remove all unused progs from the jump table */
- FOR_ENTRIES(i, jmp_table.tail, jmp_table.head) {
- struct hid_bpf_prog_entry *entry = &jmp_table.entries[i];
-
- if (test_bit(entry->idx, jmp_table.enabled))
- continue;
-
- if (entry->prog)
- __hid_bpf_do_release_prog(map_fd, entry->idx);
- }
-
- /* compact the entry list */
- n = jmp_table.tail;
- FOR_ENTRIES(i, jmp_table.tail, jmp_table.head) {
- struct hid_bpf_prog_entry *entry = &jmp_table.entries[i];
-
- if (!test_bit(entry->idx, jmp_table.enabled))
- continue;
-
- jmp_table.entries[n] = jmp_table.entries[i];
- n = NEXT(n);
- }
-
- jmp_table.head = n;
-
- mutex_unlock(&hid_bpf_attach_lock);
-
- if (map_fd >= 0)
- close_fd(map_fd);
-}
-
-static void hid_bpf_release_prog_at(int idx)
-{
- int map_fd = -1;
-
- /* retrieve a fd of our prog_array map in BPF */
- map_fd = skel_map_get_fd_by_id(jmp_table.map->id);
- if (map_fd < 0)
- return;
-
- __hid_bpf_do_release_prog(map_fd, idx);
-
- close(map_fd);
-}
-
-/*
- * Insert the given BPF program represented by its fd in the jmp table.
- * Returns the index in the jump table or a negative error.
- */
-static int hid_bpf_insert_prog(int prog_fd, struct bpf_prog *prog)
-{
- int i, index = -1, map_fd = -1, err = -EINVAL;
-
- /* retrieve a fd of our prog_array map in BPF */
- map_fd = skel_map_get_fd_by_id(jmp_table.map->id);
-
- if (map_fd < 0) {
- err = -EINVAL;
- goto out;
- }
-
- /* find the first available index in the jmp_table */
- for (i = 0; i < HID_BPF_MAX_PROGS; i++) {
- if (!jmp_table.progs[i] && index < 0) {
- /* mark the index as used */
- jmp_table.progs[i] = prog;
- index = i;
- __set_bit(i, jmp_table.enabled);
- }
- }
- if (index < 0) {
- err = -ENOMEM;
- goto out;
- }
-
- /* insert the program in the jump table */
- err = skel_map_update_elem(map_fd, &index, &prog_fd, 0);
- if (err)
- goto out;
-
- /* return the index */
- err = index;
-
- out:
- if (err < 0)
- __hid_bpf_do_release_prog(map_fd, index);
- if (map_fd >= 0)
- close_fd(map_fd);
- return err;
-}
-
-int hid_bpf_get_prog_attach_type(struct bpf_prog *prog)
-{
- int prog_type = HID_BPF_PROG_TYPE_UNDEF;
- int i;
-
- for (i = 0; i < HID_BPF_PROG_TYPE_MAX; i++) {
- if (hid_bpf_btf_ids[i] == prog->aux->attach_btf_id) {
- prog_type = i;
- break;
- }
- }
-
- return prog_type;
-}
-
-static void hid_bpf_link_release(struct bpf_link *link)
-{
- struct hid_bpf_link *hid_link =
- container_of(link, struct hid_bpf_link, link);
-
- __clear_bit(hid_link->hid_table_index, jmp_table.enabled);
- schedule_work(&release_work);
-}
-
-static void hid_bpf_link_dealloc(struct bpf_link *link)
-{
- struct hid_bpf_link *hid_link =
- container_of(link, struct hid_bpf_link, link);
-
- kfree(hid_link);
-}
-
-static void hid_bpf_link_show_fdinfo(const struct bpf_link *link,
- struct seq_file *seq)
-{
- seq_printf(seq,
- "attach_type:\tHID-BPF\n");
-}
-
-static const struct bpf_link_ops hid_bpf_link_lops = {
- .release = hid_bpf_link_release,
- .dealloc = hid_bpf_link_dealloc,
- .show_fdinfo = hid_bpf_link_show_fdinfo,
-};
-
-/* called from syscall */
-noinline int
-__hid_bpf_attach_prog(struct hid_device *hdev, enum hid_bpf_prog_type prog_type,
- int prog_fd, struct bpf_prog *prog, __u32 flags)
-{
- struct bpf_link_primer link_primer;
- struct hid_bpf_link *link;
- struct hid_bpf_prog_entry *prog_entry;
- int cnt, err = -EINVAL, prog_table_idx = -1;
-
- mutex_lock(&hid_bpf_attach_lock);
-
- link = kzalloc(sizeof(*link), GFP_USER);
- if (!link) {
- err = -ENOMEM;
- goto err_unlock;
- }
-
- bpf_link_init(&link->link, BPF_LINK_TYPE_UNSPEC,
- &hid_bpf_link_lops, prog);
-
- /* do not attach too many programs to a given HID device */
- cnt = hid_bpf_program_count(hdev, NULL, prog_type);
- if (cnt < 0) {
- err = cnt;
- goto err_unlock;
- }
-
- if (cnt >= hid_bpf_max_programs(prog_type)) {
- err = -E2BIG;
- goto err_unlock;
- }
-
- prog_table_idx = hid_bpf_insert_prog(prog_fd, prog);
- /* if the jmp table is full, abort */
- if (prog_table_idx < 0) {
- err = prog_table_idx;
- goto err_unlock;
- }
-
- if (flags & HID_BPF_FLAG_INSERT_HEAD) {
- /* take the previous prog_entry slot */
- jmp_table.tail = PREV(jmp_table.tail);
- prog_entry = &jmp_table.entries[jmp_table.tail];
- } else {
- /* take the next prog_entry slot */
- prog_entry = &jmp_table.entries[jmp_table.head];
- jmp_table.head = NEXT(jmp_table.head);
- }
-
- /* we steal the ref here */
- prog_entry->prog = prog;
- prog_entry->idx = prog_table_idx;
- prog_entry->hdev = hdev;
- prog_entry->type = prog_type;
-
- /* finally store the index in the device list */
- err = hid_bpf_populate_hdev(hdev, prog_type);
- if (err) {
- hid_bpf_release_prog_at(prog_table_idx);
- goto err_unlock;
- }
-
- link->hid_table_index = prog_table_idx;
-
- err = bpf_link_prime(&link->link, &link_primer);
- if (err)
- goto err_unlock;
-
- mutex_unlock(&hid_bpf_attach_lock);
-
- return bpf_link_settle(&link_primer);
-
- err_unlock:
- mutex_unlock(&hid_bpf_attach_lock);
-
- kfree(link);
-
- return err;
-}
-
-void __hid_bpf_destroy_device(struct hid_device *hdev)
-{
- int type, i;
- struct hid_bpf_prog_list *prog_list;
-
- rcu_read_lock();
-
- for (type = 0; type < HID_BPF_PROG_TYPE_MAX; type++) {
- prog_list = rcu_dereference(hdev->bpf.progs[type]);
-
- if (!prog_list)
- continue;
-
- for (i = 0; i < prog_list->prog_cnt; i++)
- __clear_bit(prog_list->prog_idx[i], jmp_table.enabled);
- }
-
- rcu_read_unlock();
-
- for (type = 0; type < HID_BPF_PROG_TYPE_MAX; type++)
- __hid_bpf_set_hdev_progs(hdev, NULL, type);
-
- /* schedule release of all detached progs */
- schedule_work(&release_work);
-}
-
-#define HID_BPF_PROGS_COUNT 1
-
-static struct bpf_link *links[HID_BPF_PROGS_COUNT];
-static struct entrypoints_bpf *skel;
-
-void hid_bpf_free_links_and_skel(void)
-{
- int i;
-
- /* the following is enough to release all programs attached to hid */
- if (jmp_table.map)
- bpf_map_put_with_uref(jmp_table.map);
-
- for (i = 0; i < ARRAY_SIZE(links); i++) {
- if (!IS_ERR_OR_NULL(links[i]))
- bpf_link_put(links[i]);
- }
- entrypoints_bpf__destroy(skel);
-}
-
-#define ATTACH_AND_STORE_LINK(__name) do { \
- err = entrypoints_bpf__##__name##__attach(skel); \
- if (err) \
- goto out; \
- \
- links[idx] = bpf_link_get_from_fd(skel->links.__name##_fd); \
- if (IS_ERR(links[idx])) { \
- err = PTR_ERR(links[idx]); \
- goto out; \
- } \
- \
- /* Avoid taking over stdin/stdout/stderr of init process. Zeroing out \
- * makes skel_closenz() a no-op later in iterators_bpf__destroy(). \
- */ \
- close_fd(skel->links.__name##_fd); \
- skel->links.__name##_fd = 0; \
- idx++; \
-} while (0)
-
-int hid_bpf_preload_skel(void)
-{
- int err, idx = 0;
-
- skel = entrypoints_bpf__open();
- if (!skel)
- return -ENOMEM;
-
- err = entrypoints_bpf__load(skel);
- if (err)
- goto out;
-
- jmp_table.map = bpf_map_get_with_uref(skel->maps.hid_jmp_table.map_fd);
- if (IS_ERR(jmp_table.map)) {
- err = PTR_ERR(jmp_table.map);
- goto out;
- }
-
- ATTACH_AND_STORE_LINK(hid_tail_call);
-
- return 0;
-out:
- hid_bpf_free_links_and_skel();
- return err;
-}
diff --git a/drivers/hid/bpf/hid_bpf_struct_ops.c b/drivers/hid/bpf/hid_bpf_struct_ops.c
new file mode 100644
index 000000000000..f59cce6e437f
--- /dev/null
+++ b/drivers/hid/bpf/hid_bpf_struct_ops.c
@@ -0,0 +1,307 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/*
+ * HID-BPF support for Linux
+ *
+ * Copyright (c) 2024 Benjamin Tissoires
+ */
+
+#include <linux/bitops.h>
+#include <linux/bpf_verifier.h>
+#include <linux/bpf.h>
+#include <linux/btf.h>
+#include <linux/btf_ids.h>
+#include <linux/filter.h>
+#include <linux/hid.h>
+#include <linux/hid_bpf.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/stddef.h>
+#include <linux/workqueue.h>
+#include "hid_bpf_dispatch.h"
+
+static struct btf *hid_bpf_ops_btf;
+
+static int hid_bpf_ops_init(struct btf *btf)
+{
+ hid_bpf_ops_btf = btf;
+ return 0;
+}
+
+static bool hid_bpf_ops_is_valid_access(int off, int size,
+ enum bpf_access_type type,
+ const struct bpf_prog *prog,
+ struct bpf_insn_access_aux *info)
+{
+ return bpf_tracing_btf_ctx_access(off, size, type, prog, info);
+}
+
+static int hid_bpf_ops_check_member(const struct btf_type *t,
+ const struct btf_member *member,
+ const struct bpf_prog *prog)
+{
+ u32 moff = __btf_member_bit_offset(t, member) / 8;
+
+ switch (moff) {
+ case offsetof(struct hid_bpf_ops, hid_rdesc_fixup):
+ case offsetof(struct hid_bpf_ops, hid_hw_request):
+ case offsetof(struct hid_bpf_ops, hid_hw_output_report):
+ break;
+ default:
+ if (prog->sleepable)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+struct hid_bpf_offset_write_range {
+ const char *struct_name;
+ u32 struct_length;
+ u32 start;
+ u32 end;
+};
+
+static int hid_bpf_ops_btf_struct_access(struct bpf_verifier_log *log,
+ const struct bpf_reg_state *reg,
+ int off, int size)
+{
+#define WRITE_RANGE(_name, _field, _is_string) \
+ { \
+ .struct_name = #_name, \
+ .struct_length = sizeof(struct _name), \
+ .start = offsetof(struct _name, _field), \
+ .end = offsetofend(struct _name, _field) - !!(_is_string), \
+ }
+
+ const struct hid_bpf_offset_write_range write_ranges[] = {
+ WRITE_RANGE(hid_bpf_ctx, retval, false),
+ WRITE_RANGE(hid_device, name, true),
+ WRITE_RANGE(hid_device, uniq, true),
+ WRITE_RANGE(hid_device, phys, true),
+ };
+#undef WRITE_RANGE
+ const struct btf_type *state = NULL;
+ const struct btf_type *t;
+ const char *cur = NULL;
+ int i;
+
+ t = btf_type_by_id(reg->btf, reg->btf_id);
+
+ for (i = 0; i < ARRAY_SIZE(write_ranges); i++) {
+ const struct hid_bpf_offset_write_range *write_range = &write_ranges[i];
+ s32 type_id;
+
+ /* we already found a writeable struct, but there is a
+ * new one, let's break the loop.
+ */
+ if (t == state && write_range->struct_name != cur)
+ break;
+
+ /* new struct to look for */
+ if (write_range->struct_name != cur) {
+ type_id = btf_find_by_name_kind(reg->btf, write_range->struct_name,
+ BTF_KIND_STRUCT);
+ if (type_id < 0)
+ return -EINVAL;
+
+ state = btf_type_by_id(reg->btf, type_id);
+ }
+
+ /* this is not the struct we are looking for */
+ if (t != state) {
+ cur = write_range->struct_name;
+ continue;
+ }
+
+ /* first time we see this struct, check for out of bounds */
+ if (cur != write_range->struct_name &&
+ off + size > write_range->struct_length) {
+ bpf_log(log, "write access for struct %s at off %d with size %d\n",
+ write_range->struct_name, off, size);
+ return -EACCES;
+ }
+
+ /* now check if we are in our boundaries */
+ if (off >= write_range->start && off + size <= write_range->end)
+ return NOT_INIT;
+
+ cur = write_range->struct_name;
+ }
+
+
+ if (t != state)
+ bpf_log(log, "write access to this struct is not supported\n");
+ else
+ bpf_log(log,
+ "write access at off %d with size %d on read-only part of %s\n",
+ off, size, cur);
+
+ return -EACCES;
+}
+
+static const struct bpf_verifier_ops hid_bpf_verifier_ops = {
+ .get_func_proto = bpf_base_func_proto,
+ .is_valid_access = hid_bpf_ops_is_valid_access,
+ .btf_struct_access = hid_bpf_ops_btf_struct_access,
+};
+
+static int hid_bpf_ops_init_member(const struct btf_type *t,
+ const struct btf_member *member,
+ void *kdata, const void *udata)
+{
+ const struct hid_bpf_ops *uhid_bpf_ops;
+ struct hid_bpf_ops *khid_bpf_ops;
+ u32 moff;
+
+ uhid_bpf_ops = (const struct hid_bpf_ops *)udata;
+ khid_bpf_ops = (struct hid_bpf_ops *)kdata;
+
+ moff = __btf_member_bit_offset(t, member) / 8;
+
+ switch (moff) {
+ case offsetof(struct hid_bpf_ops, hid_id):
+ /* For hid_id and flags fields, this function has to copy it
+ * and return 1 to indicate that the data has been handled by
+ * the struct_ops type, or the verifier will reject the map if
+ * the value of those fields is not zero.
+ */
+ khid_bpf_ops->hid_id = uhid_bpf_ops->hid_id;
+ return 1;
+ case offsetof(struct hid_bpf_ops, flags):
+ if (uhid_bpf_ops->flags & ~BPF_F_BEFORE)
+ return -EINVAL;
+ khid_bpf_ops->flags = uhid_bpf_ops->flags;
+ return 1;
+ }
+ return 0;
+}
+
+static int hid_bpf_reg(void *kdata, struct bpf_link *link)
+{
+ struct hid_bpf_ops *ops = kdata;
+ struct hid_device *hdev;
+ int count, err = 0;
+
+ hdev = hid_get_device(ops->hid_id);
+ if (IS_ERR(hdev))
+ return PTR_ERR(hdev);
+
+ ops->hdev = hdev;
+
+ mutex_lock(&hdev->bpf.prog_list_lock);
+
+ count = list_count_nodes(&hdev->bpf.prog_list);
+ if (count >= HID_BPF_MAX_PROGS_PER_DEV) {
+ err = -E2BIG;
+ goto out_unlock;
+ }
+
+ if (ops->hid_rdesc_fixup) {
+ if (hdev->bpf.rdesc_ops) {
+ err = -EINVAL;
+ goto out_unlock;
+ }
+
+ hdev->bpf.rdesc_ops = ops;
+ }
+
+ if (ops->hid_device_event) {
+ err = hid_bpf_allocate_event_data(hdev);
+ if (err)
+ goto out_unlock;
+ }
+
+ if (ops->flags & BPF_F_BEFORE)
+ list_add_rcu(&ops->list, &hdev->bpf.prog_list);
+ else
+ list_add_tail_rcu(&ops->list, &hdev->bpf.prog_list);
+ synchronize_srcu(&hdev->bpf.srcu);
+
+out_unlock:
+ mutex_unlock(&hdev->bpf.prog_list_lock);
+
+ if (err) {
+ if (hdev->bpf.rdesc_ops == ops)
+ hdev->bpf.rdesc_ops = NULL;
+ hid_put_device(hdev);
+ } else if (ops->hid_rdesc_fixup) {
+ hid_bpf_reconnect(hdev);
+ }
+
+ return err;
+}
+
+static void hid_bpf_unreg(void *kdata, struct bpf_link *link)
+{
+ struct hid_bpf_ops *ops = kdata;
+ struct hid_device *hdev;
+ bool reconnect = false;
+
+ hdev = ops->hdev;
+
+ /* check if __hid_bpf_ops_destroy_device() has been called */
+ if (!hdev)
+ return;
+
+ mutex_lock(&hdev->bpf.prog_list_lock);
+
+ list_del_rcu(&ops->list);
+ synchronize_srcu(&hdev->bpf.srcu);
+
+ reconnect = hdev->bpf.rdesc_ops == ops;
+ if (reconnect)
+ hdev->bpf.rdesc_ops = NULL;
+
+ mutex_unlock(&hdev->bpf.prog_list_lock);
+
+ if (reconnect)
+ hid_bpf_reconnect(hdev);
+
+ hid_put_device(hdev);
+}
+
+static int __hid_bpf_device_event(struct hid_bpf_ctx *ctx, enum hid_report_type type, u64 source)
+{
+ return 0;
+}
+
+static int __hid_bpf_rdesc_fixup(struct hid_bpf_ctx *ctx)
+{
+ return 0;
+}
+
+static struct hid_bpf_ops __bpf_hid_bpf_ops = {
+ .hid_device_event = __hid_bpf_device_event,
+ .hid_rdesc_fixup = __hid_bpf_rdesc_fixup,
+};
+
+static struct bpf_struct_ops bpf_hid_bpf_ops = {
+ .verifier_ops = &hid_bpf_verifier_ops,
+ .init = hid_bpf_ops_init,
+ .check_member = hid_bpf_ops_check_member,
+ .init_member = hid_bpf_ops_init_member,
+ .reg = hid_bpf_reg,
+ .unreg = hid_bpf_unreg,
+ .name = "hid_bpf_ops",
+ .cfi_stubs = &__bpf_hid_bpf_ops,
+ .owner = THIS_MODULE,
+};
+
+void __hid_bpf_ops_destroy_device(struct hid_device *hdev)
+{
+ struct hid_bpf_ops *e;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(e, &hdev->bpf.prog_list, list) {
+ hid_put_device(hdev);
+ e->hdev = NULL;
+ }
+ rcu_read_unlock();
+}
+
+static int __init hid_bpf_struct_ops_init(void)
+{
+ return register_bpf_struct_ops(&bpf_hid_bpf_ops, hid_bpf_ops);
+}
+late_initcall(hid_bpf_struct_ops_init);
diff --git a/drivers/hid/bpf/progs/FR-TEC__Raptor-Mach-2.bpf.c b/drivers/hid/bpf/progs/FR-TEC__Raptor-Mach-2.bpf.c
index dc26a7677d36..caec91391d32 100644
--- a/drivers/hid/bpf/progs/FR-TEC__Raptor-Mach-2.bpf.c
+++ b/drivers/hid/bpf/progs/FR-TEC__Raptor-Mach-2.bpf.c
@@ -133,7 +133,7 @@ HID_BPF_CONFIG(
* integer. We thus divide it by 30 to match what other joysticks are
* doing
*/
-SEC("fmod_ret/hid_bpf_rdesc_fixup")
+SEC(HID_BPF_RDESC_FIXUP)
int BPF_PROG(hid_fix_rdesc_raptor_mach_2, struct hid_bpf_ctx *hctx)
{
__u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, HID_MAX_DESCRIPTOR_SIZE /* size */);
@@ -152,7 +152,7 @@ int BPF_PROG(hid_fix_rdesc_raptor_mach_2, struct hid_bpf_ctx *hctx)
* divide it by 30.
* Byte 34 is always null, so it is ignored.
*/
-SEC("fmod_ret/hid_bpf_device_event")
+SEC(HID_BPF_DEVICE_EVENT)
int BPF_PROG(raptor_mach_2_fix_hat_switch, struct hid_bpf_ctx *hctx)
{
__u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 64 /* size */);
@@ -168,6 +168,11 @@ int BPF_PROG(raptor_mach_2_fix_hat_switch, struct hid_bpf_ctx *hctx)
return 0;
}
+HID_BPF_OPS(raptor_mach_2) = {
+ .hid_rdesc_fixup = (void *)hid_fix_rdesc_raptor_mach_2,
+ .hid_device_event = (void *)raptor_mach_2_fix_hat_switch,
+};
+
SEC("syscall")
int probe(struct hid_bpf_probe_args *ctx)
{
diff --git a/drivers/hid/bpf/progs/HP__Elite-Presenter.bpf.c b/drivers/hid/bpf/progs/HP__Elite-Presenter.bpf.c
index 3d14bbb6f276..c2413fa80543 100644
--- a/drivers/hid/bpf/progs/HP__Elite-Presenter.bpf.c
+++ b/drivers/hid/bpf/progs/HP__Elite-Presenter.bpf.c
@@ -30,7 +30,7 @@ HID_BPF_CONFIG(
* pointer.
*/
-SEC("fmod_ret/hid_bpf_rdesc_fixup")
+SEC(HID_BPF_RDESC_FIXUP)
int BPF_PROG(hid_fix_rdesc, struct hid_bpf_ctx *hctx)
{
__u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 4096 /* size */);
@@ -45,6 +45,10 @@ int BPF_PROG(hid_fix_rdesc, struct hid_bpf_ctx *hctx)
return 0;
}
+HID_BPF_OPS(hp_elite_presenter) = {
+ .hid_rdesc_fixup = (void *)hid_fix_rdesc,
+};
+
SEC("syscall")
int probe(struct hid_bpf_probe_args *ctx)
{
diff --git a/drivers/hid/bpf/progs/Huion__Dial-2.bpf.c b/drivers/hid/bpf/progs/Huion__Dial-2.bpf.c
new file mode 100644
index 000000000000..2411dec6db08
--- /dev/null
+++ b/drivers/hid/bpf/progs/Huion__Dial-2.bpf.c
@@ -0,0 +1,614 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2024 Red Hat, Inc
+ */
+
+#include "vmlinux.h"
+#include "hid_bpf.h"
+#include "hid_bpf_helpers.h"
+#include "hid_report_helpers.h"
+#include <bpf/bpf_tracing.h>
+
+#define VID_HUION 0x256C
+#define PID_DIAL_2 0x0060
+
+
+HID_BPF_CONFIG(
+ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, VID_HUION, PID_DIAL_2),
+);
+
+/* Filled in by udev-hid-bpf */
+char UDEV_PROP_HUION_FIRMWARE_ID[64];
+
+/* The prefix of the firmware ID we expect for this device. The full firmware
+ * string has a date suffix, e.g. HUION_T21j_221221
+ */
+char EXPECTED_FIRMWARE_ID[] = "HUION_T216_";
+
+/* How this BPF program works: the tablet has two modes, firmware mode and
+ * tablet mode. In firmware mode (out of the box) the tablet sends button events
+ * and the dial as keyboard combinations. In tablet mode it uses a vendor specific
+ * hid report to report everything instead.
+ * Depending on the mode some hid reports are never sent and the corresponding
+ * devices are mute.
+ *
+ * To switch the tablet use e.g. https://github.com/whot/huion-switcher
+ * or one of the tools from the digimend project
+ *
+ * This BPF works for both modes. The huion-switcher tool sets the
+ * HUION_FIRMWARE_ID udev property - if that is set then we disable the firmware
+ * pad and pen reports (by making them vendor collections that are ignored).
+ * If that property is not set we fix all hidraw nodes so the tablet works in
+ * either mode though the drawback is that the device will show up twice if
+ * you bind it to all event nodes
+ *
+ * Default report descriptor for the first exposed hidraw node:
+ *
+ * # HUION Huion Tablet_Q630M
+ * # 0x06, 0x00, 0xff, // Usage Page (Vendor Defined Page 1) 0
+ * # 0x09, 0x01, // Usage (Vendor Usage 1) 3
+ * # 0xa1, 0x01, // Collection (Application) 5
+ * # 0x85, 0x08, // Report ID (8) 7
+ * # 0x75, 0x58, // Report Size (88) 9
+ * # 0x95, 0x01, // Report Count (1) 11
+ * # 0x09, 0x01, // Usage (Vendor Usage 1) 13
+ * # 0x81, 0x02, // Input (Data,Var,Abs) 15
+ * # 0xc0, // End Collection 17
+ * R: 18 06 00 ff 09 01 a1 01 85 08 75 58 95 01 09 01 81 02 c0
+ *
+ * This rdesc does nothing until the tablet is switched to raw mode, see
+ * https://github.com/whot/huion-switcher
+ *
+ *
+ * Second hidraw node is the Pen. This one sends events until the tablet is
+ * switched to raw mode, then it's mute.
+ *
+ * # Report descriptor length: 93 bytes
+ * # HUION Huion Tablet_Q630M
+ * # 0x05, 0x0d, // Usage Page (Digitizers) 0
+ * # 0x09, 0x02, // Usage (Pen) 2
+ * # 0xa1, 0x01, // Collection (Application) 4
+ * # 0x85, 0x0a, // Report ID (10) 6
+ * # 0x09, 0x20, // Usage (Stylus) 8
+ * # 0xa1, 0x01, // Collection (Application) 10
+ * # 0x09, 0x42, // Usage (Tip Switch) 12
+ * # 0x09, 0x44, // Usage (Barrel Switch) 14
+ * # 0x09, 0x45, // Usage (Eraser) 16
+ * # 0x09, 0x3c, // Usage (Invert) 18
+ * # 0x15, 0x00, // Logical Minimum (0) 20
+ * # 0x25, 0x01, // Logical Maximum (1) 22
+ * # 0x75, 0x01, // Report Size (1) 24
+ * # 0x95, 0x06, // Report Count (6) 26
+ * # 0x81, 0x02, // Input (Data,Var,Abs) 28
+ * # 0x09, 0x32, // Usage (In Range) 30
+ * # 0x75, 0x01, // Report Size (1) 32
+ * # 0x95, 0x01, // Report Count (1) 34
+ * # 0x81, 0x02, // Input (Data,Var,Abs) 36
+ * # 0x81, 0x03, // Input (Cnst,Var,Abs) 38
+ * # 0x05, 0x01, // Usage Page (Generic Desktop) 40
+ * # 0x09, 0x30, // Usage (X) 42
+ * # 0x09, 0x31, // Usage (Y) 44
+ * # 0x55, 0x0d, // Unit Exponent (-3) 46
+ * # 0x65, 0x33, // Unit (EnglishLinear: in³) 48
+ * # 0x26, 0xff, 0x7f, // Logical Maximum (32767) 50
+ * # 0x35, 0x00, // Physical Minimum (0) 53
+ * # 0x46, 0x00, 0x08, // Physical Maximum (2048) 55
+ * # 0x75, 0x10, // Report Size (16) 58
+ * # 0x95, 0x02, // Report Count (2) 60
+ * # 0x81, 0x02, // Input (Data,Var,Abs) 62
+ * # 0x05, 0x0d, // Usage Page (Digitizers) 64
+ * # 0x09, 0x30, // Usage (Tip Pressure) 66
+ * # 0x26, 0xff, 0x1f, // Logical Maximum (8191) 68
+ * # 0x75, 0x10, // Report Size (16) 71
+ * # 0x95, 0x01, // Report Count (1) 73
+ * # 0x81, 0x02, // Input (Data,Var,Abs) 75
+ * # 0x09, 0x3d, // Usage (X Tilt) 77
+ * # 0x09, 0x3e, // Usage (Y Tilt) 79
+ * # 0x15, 0x81, // Logical Minimum (-127) 81
+ * # 0x25, 0x7f, // Logical Maximum (127) 83
+ * # 0x75, 0x08, // Report Size (8) 85
+ * # 0x95, 0x02, // Report Count (2) 87
+ * # 0x81, 0x02, // Input (Data,Var,Abs) 89
+ * # 0xc0, // End Collection 91
+ * # 0xc0, // End Collection 92
+ * R: 93 05 0d 09 02 a1 01 85 0a 09 20 a1 01 09 42 09 44 09 45 09 3c 15 00 25 01 75 01 95 06 81 02 09 32 75 01 95 01 81 02 81 03 05 01 09 30 09 31 55 0d 65 33 26 ff 7f 35 00 46 00 08 75 10 95 02 81 02 05 0d 09 30 26 ff 1f 75 10 95 01 81 02 09 3d 09 3e 15 81 25 7f 75 08 95 02 81 02 c0 c0
+ *
+ * Third hidraw node is the pad which sends a combination of keyboard shortcuts until
+ * the tablet is switched to raw mode, then it's mute:
+ *
+ * # Report descriptor length: 148 bytes
+ * # HUION Huion Tablet_Q630M
+ * # 0x05, 0x01, // Usage Page (Generic Desktop) 0
+ * # 0x09, 0x0e, // Usage (System Multi-Axis Controller) 2
+ * # 0xa1, 0x01, // Collection (Application) 4
+ * # 0x85, 0x11, // Report ID (17) 6
+ * # 0x05, 0x0d, // Usage Page (Digitizers) 8
+ * # 0x09, 0x21, // Usage (Puck) 10
+ * # 0xa1, 0x02, // Collection (Logical) 12
+ * # 0x15, 0x00, // Logical Minimum (0) 14
+ * # 0x25, 0x01, // Logical Maximum (1) 16
+ * # 0x75, 0x01, // Report Size (1) 18
+ * # 0x95, 0x01, // Report Count (1) 20
+ * # 0xa1, 0x00, // Collection (Physical) 22
+ * # 0x05, 0x09, // Usage Page (Button) 24
+ * # 0x09, 0x01, // Usage (Vendor Usage 0x01) 26
+ * # 0x81, 0x02, // Input (Data,Var,Abs) 28
+ * # 0x05, 0x0d, // Usage Page (Digitizers) 30
+ * # 0x09, 0x33, // Usage (Touch) 32
+ * # 0x81, 0x02, // Input (Data,Var,Abs) 34
+ * # 0x95, 0x06, // Report Count (6) 36
+ * # 0x81, 0x03, // Input (Cnst,Var,Abs) 38
+ * # 0xa1, 0x02, // Collection (Logical) 40
+ * # 0x05, 0x01, // Usage Page (Generic Desktop) 42
+ * # 0x09, 0x37, // Usage (Dial) 44
+ * # 0x16, 0x00, 0x80, // Logical Minimum (-32768) 46
+ * # 0x26, 0xff, 0x7f, // Logical Maximum (32767) 49
+ * # 0x75, 0x10, // Report Size (16) 52
+ * # 0x95, 0x01, // Report Count (1) 54
+ * # 0x81, 0x06, // Input (Data,Var,Rel) 56
+ * # 0x35, 0x00, // Physical Minimum (0) 58
+ * # 0x46, 0x10, 0x0e, // Physical Maximum (3600) 60
+ * # 0x15, 0x00, // Logical Minimum (0) 63
+ * # 0x26, 0x10, 0x0e, // Logical Maximum (3600) 65
+ * # 0x09, 0x48, // Usage (Resolution Multiplier) 68
+ * # 0xb1, 0x02, // Feature (Data,Var,Abs) 70
+ * # 0x45, 0x00, // Physical Maximum (0) 72
+ * # 0xc0, // End Collection 74
+ * # 0x75, 0x08, // Report Size (8) 75
+ * # 0x95, 0x01, // Report Count (1) 77
+ * # 0x81, 0x01, // Input (Cnst,Arr,Abs) 79
+ * # 0x75, 0x08, // Report Size (8) 81
+ * # 0x95, 0x01, // Report Count (1) 83
+ * # 0x81, 0x01, // Input (Cnst,Arr,Abs) 85
+ * # 0x75, 0x08, // Report Size (8) 87
+ * # 0x95, 0x01, // Report Count (1) 89
+ * # 0x81, 0x01, // Input (Cnst,Arr,Abs) 91
+ * # 0x75, 0x08, // Report Size (8) 93
+ * # 0x95, 0x01, // Report Count (1) 95
+ * # 0x81, 0x01, // Input (Cnst,Arr,Abs) 97
+ * # 0x75, 0x08, // Report Size (8) 99
+ * # 0x95, 0x01, // Report Count (1) 101
+ * # 0x81, 0x01, // Input (Cnst,Arr,Abs) 103
+ * # 0xc0, // End Collection 105
+ * # 0xc0, // End Collection 106
+ * # 0xc0, // End Collection 107
+ * # 0x05, 0x01, // Usage Page (Generic Desktop) 108
+ * # 0x09, 0x06, // Usage (Keyboard) 110
+ * # 0xa1, 0x01, // Collection (Application) 112
+ * # 0x85, 0x03, // Report ID (3) 114
+ * # 0x05, 0x07, // Usage Page (Keyboard) 116
+ * # 0x19, 0xe0, // Usage Minimum (224) 118
+ * # 0x29, 0xe7, // Usage Maximum (231) 120
+ * # 0x15, 0x00, // Logical Minimum (0) 122
+ * # 0x25, 0x01, // Logical Maximum (1) 124
+ * # 0x75, 0x01, // Report Size (1) 126
+ * # 0x95, 0x08, // Report Count (8) 128
+ * # 0x81, 0x02, // Input (Data,Var,Abs) 130
+ * # 0x05, 0x07, // Usage Page (Keyboard) 132
+ * # 0x19, 0x00, // Usage Minimum (0) 134
+ * # 0x29, 0xff, // Usage Maximum (255) 136
+ * # 0x26, 0xff, 0x00, // Logical Maximum (255) 138
+ * # 0x75, 0x08, // Report Size (8) 141
+ * # 0x95, 0x06, // Report Count (6) 143
+ * # 0x81, 0x00, // Input (Data,Arr,Abs) 145
+ * # 0xc0, // End Collection 147
+ * R: 148 05 01 09 0e a1 01 85 11 05 0d 09 21 a1 02 15 00 25 01 75 01 95 01 a1 00 05 09 09 01 81 02 05 0d 09 33 81 02 95 06 81 03 a1 02 05 01 09 37 16 00 80 26 ff 7f 75 10 95 01 81 06 35 00 46 10 0e 15 00 26 10 0e 09 48 b1 02 45 00 c0 75 08 95 01 81 01 75 08 95 01 81 01 75 08 95 01 81 01 75 08 95 01 81 01 75 08 95 01 81 01 c0 c0 c0 05 01 09 06 a1 01 85 03 05 07 19 e0 29 e7 15 00 25 01 75 01 95 08 81 02 05 07 19 00 29 ff 26 ff 00 75 08 95 06 81 00 c0
+ */
+
+#define PAD_REPORT_DESCRIPTOR_LENGTH 148
+#define PEN_REPORT_DESCRIPTOR_LENGTH 93
+#define VENDOR_REPORT_DESCRIPTOR_LENGTH 18
+#define PAD_REPORT_ID 3
+#define DIAL_REPORT_ID 17
+#define PEN_REPORT_ID 10
+#define VENDOR_REPORT_ID 8
+#define PAD_REPORT_LENGTH 9
+#define PEN_REPORT_LENGTH 10
+#define VENDOR_REPORT_LENGTH 12
+
+
+__u8 last_button_state;
+
+static const __u8 fixed_rdesc_pad[] = {
+ UsagePage_GenericDesktop
+ Usage_GD_Keypad
+ CollectionApplication(
+ // -- Byte 0 in report
+ ReportId(PAD_REPORT_ID)
+ LogicalRange_i8(0, 1)
+ UsagePage_Digitizers
+ Usage_Dig_TabletFunctionKeys
+ CollectionPhysical(
+ // Byte 1 in report - just exists so we get to be a tablet pad
+ Usage_Dig_BarrelSwitch // BTN_STYLUS
+ ReportCount(1)
+ ReportSize(1)
+ Input(Var|Abs)
+ ReportCount(7) // padding
+ Input(Const)
+ // Bytes 2/3 in report - just exists so we get to be a tablet pad
+ UsagePage_GenericDesktop
+ Usage_GD_X
+ Usage_GD_Y
+ ReportCount(2)
+ ReportSize(8)
+ Input(Var|Abs)
+ // Byte 4 in report is the dial
+ Usage_GD_Wheel
+ LogicalRange_i8(-1, 1)
+ ReportCount(1)
+ ReportSize(8)
+ Input(Var|Rel)
+ // Byte 5 is the button state
+ UsagePage_Button
+ UsageRange_i8(0x01, 0x8)
+ LogicalRange_i8(0x0, 0x1)
+ ReportCount(7)
+ ReportSize(1)
+ Input(Var|Abs)
+ ReportCount(1) // padding
+ Input(Const)
+ )
+ // Make sure we match our original report length
+ FixedSizeVendorReport(PAD_REPORT_LENGTH)
+ )
+};
+
+static const __u8 fixed_rdesc_pen[] = {
+ UsagePage_Digitizers
+ Usage_Dig_Pen
+ CollectionApplication(
+ // -- Byte 0 in report
+ ReportId(PEN_REPORT_ID)
+ Usage_Dig_Pen
+ CollectionPhysical(
+ // -- Byte 1 in report
+ Usage_Dig_TipSwitch
+ Usage_Dig_BarrelSwitch
+ Usage_Dig_SecondaryBarrelSwitch // maps eraser to BTN_STYLUS2
+ LogicalRange_i8(0, 1)
+ ReportSize(1)
+ ReportCount(3)
+ Input(Var|Abs)
+ ReportCount(4) // Padding
+ Input(Const)
+ Usage_Dig_InRange
+ ReportCount(1)
+ Input(Var|Abs)
+ ReportSize(16)
+ ReportCount(1)
+ PushPop(
+ UsagePage_GenericDesktop
+ Unit(cm)
+ UnitExponent(-1)
+ PhysicalRange_i16(0, 266)
+ LogicalRange_i16(0, 32767)
+ Usage_GD_X
+ Input(Var|Abs) // Bytes 2+3
+ PhysicalRange_i16(0, 166)
+ LogicalRange_i16(0, 32767)
+ Usage_GD_Y
+ Input(Var|Abs) // Bytes 4+5
+ )
+ UsagePage_Digitizers
+ Usage_Dig_TipPressure
+ LogicalRange_i16(0, 8191)
+ Input(Var|Abs) // Byte 6+7
+ ReportSize(8)
+ ReportCount(2)
+ LogicalRange_i8(-60, 60)
+ Usage_Dig_XTilt
+ Usage_Dig_YTilt
+ Input(Var|Abs) // Byte 8+9
+ )
+ )
+};
+
+static const __u8 fixed_rdesc_vendor[] = {
+ UsagePage_Digitizers
+ Usage_Dig_Pen
+ CollectionApplication(
+ // Byte 0
+ // We leave the pen on the vendor report ID
+ ReportId(VENDOR_REPORT_ID)
+ Usage_Dig_Pen
+ CollectionPhysical(
+ // Byte 1 are the buttons
+ LogicalRange_i8(0, 1)
+ ReportSize(1)
+ Usage_Dig_TipSwitch
+ Usage_Dig_BarrelSwitch
+ Usage_Dig_SecondaryBarrelSwitch
+ ReportCount(3)
+ Input(Var|Abs)
+ ReportCount(4) // Padding
+ Input(Const)
+ Usage_Dig_InRange
+ ReportCount(1)
+ Input(Var|Abs)
+ ReportSize(16)
+ ReportCount(1)
+ PushPop(
+ UsagePage_GenericDesktop
+ Unit(cm)
+ UnitExponent(-1)
+ // Note: reported logical range differs
+ // from the pen report ID for x and y
+ LogicalRange_i16(0, 53340)
+ PhysicalRange_i16(0, 266)
+ // Bytes 2/3 in report
+ Usage_GD_X
+ Input(Var|Abs)
+ LogicalRange_i16(0, 33340)
+ PhysicalRange_i16(0, 166)
+ // Bytes 4/5 in report
+ Usage_GD_Y
+ Input(Var|Abs)
+ )
+ // Bytes 6/7 in report
+ LogicalRange_i16(0, 8191)
+ Usage_Dig_TipPressure
+ Input(Var|Abs)
+ // Bytes 8/9 in report
+ ReportCount(1) // Padding
+ Input(Const)
+ LogicalRange_i8(-60, 60)
+ // Byte 10 in report
+ Usage_Dig_XTilt
+ // Byte 11 in report
+ Usage_Dig_YTilt
+ ReportSize(8)
+ ReportCount(2)
+ Input(Var|Abs)
+ )
+ )
+ UsagePage_GenericDesktop
+ Usage_GD_Keypad
+ CollectionApplication(
+ // Byte 0
+ ReportId(PAD_REPORT_ID)
+ LogicalRange_i8(0, 1)
+ UsagePage_Digitizers
+ Usage_Dig_TabletFunctionKeys
+ CollectionPhysical(
+ // Byte 1 are the buttons
+ Usage_Dig_BarrelSwitch // BTN_STYLUS, needed so we get to be a tablet pad
+ ReportCount(1)
+ ReportSize(1)
+ Input(Var|Abs)
+ ReportCount(7) // Padding
+ Input(Const)
+ // Bytes 2/3 - x/y just exist so we get to be a tablet pad
+ UsagePage_GenericDesktop
+ Usage_GD_X
+ Usage_GD_Y
+ ReportCount(2)
+ ReportSize(8)
+ Input(Var|Abs)
+ // Byte 4 is the button state
+ UsagePage_Button
+ UsageRange_i8(0x01, 0x8)
+ LogicalRange_i8(0x0, 0x1)
+ ReportCount(8)
+ ReportSize(1)
+ Input(Var|Abs)
+ // Byte 5 is the top dial
+ UsagePage_GenericDesktop
+ Usage_GD_Wheel
+ LogicalRange_i8(-1, 1)
+ ReportCount(1)
+ ReportSize(8)
+ Input(Var|Rel)
+ // Byte 6 is the bottom dial
+ UsagePage_Consumer
+ Usage_Con_ACPan
+ Input(Var|Rel)
+ )
+ // Make sure we match our original report length
+ FixedSizeVendorReport(VENDOR_REPORT_LENGTH)
+ )
+};
+
+static const __u8 disabled_rdesc_pen[] = {
+ FixedSizeVendorReport(PEN_REPORT_LENGTH)
+};
+
+static const __u8 disabled_rdesc_pad[] = {
+ FixedSizeVendorReport(PAD_REPORT_LENGTH)
+};
+
+SEC(HID_BPF_RDESC_FIXUP)
+int BPF_PROG(dial_2_fix_rdesc, struct hid_bpf_ctx *hctx)
+{
+ __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, HID_MAX_DESCRIPTOR_SIZE /* size */);
+ __s32 rdesc_size = hctx->size;
+ __u8 have_fw_id;
+
+ if (!data)
+ return 0; /* EPERM check */
+
+ /* If we have a firmware ID and it matches our expected prefix, we
+ * disable the default pad/pen nodes. They won't send events
+ * but cause duplicate devices.
+ */
+ have_fw_id = __builtin_memcmp(UDEV_PROP_HUION_FIRMWARE_ID,
+ EXPECTED_FIRMWARE_ID,
+ sizeof(EXPECTED_FIRMWARE_ID) - 1) == 0;
+ if (rdesc_size == PAD_REPORT_DESCRIPTOR_LENGTH) {
+ if (have_fw_id) {
+ __builtin_memcpy(data, disabled_rdesc_pad, sizeof(disabled_rdesc_pad));
+ return sizeof(disabled_rdesc_pad);
+ }
+
+ __builtin_memcpy(data, fixed_rdesc_pad, sizeof(fixed_rdesc_pad));
+ return sizeof(fixed_rdesc_pad);
+ }
+ if (rdesc_size == PEN_REPORT_DESCRIPTOR_LENGTH) {
+ if (have_fw_id) {
+ __builtin_memcpy(data, disabled_rdesc_pen, sizeof(disabled_rdesc_pen));
+ return sizeof(disabled_rdesc_pen);
+ }
+
+ __builtin_memcpy(data, fixed_rdesc_pen, sizeof(fixed_rdesc_pen));
+ return sizeof(fixed_rdesc_pen);
+ }
+ /* Always fix the vendor mode so the tablet will work even if nothing sets
+ * the udev property (e.g. huion-switcher run manually)
+ */
+ if (rdesc_size == VENDOR_REPORT_DESCRIPTOR_LENGTH) {
+ __builtin_memcpy(data, fixed_rdesc_vendor, sizeof(fixed_rdesc_vendor));
+ return sizeof(fixed_rdesc_vendor);
+
+ }
+ return 0;
+}
+
+SEC(HID_BPF_DEVICE_EVENT)
+int BPF_PROG(dial_2_fix_events, struct hid_bpf_ctx *hctx)
+{
+ __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 16 /* size */);
+ static __u8 button;
+
+ if (!data)
+ return 0; /* EPERM check */
+
+ /* Only sent if tablet is in default mode */
+ if (data[0] == PAD_REPORT_ID) {
+ /* Nicely enough, this device only supports one button down at a time so
+ * the reports are easy to match. Buttons numbered from the top
+ * Button released: 03 00 00 00 00 00 00 00
+ * Button 1: 03 00 05 00 00 00 00 00 -> b
+ * Button 2: 03 00 08 00 00 00 00 00 -> e
+ * Button 3: 03 00 0c 00 00 00 00 00 -> i
+ * Button 4: 03 00 e0 16 00 00 00 00 -> Ctrl S
+ * Button 5: 03 00 2c 00 00 00 00 00 -> space
+ * Button 6: 03 00 e0 e2 1d 00 00 00 -> Ctrl Alt Z
+ */
+ button &= 0xc0;
+
+ switch ((data[2] << 16) | (data[3] << 8) | data[4]) {
+ case 0x000000:
+ break;
+ case 0x050000:
+ button |= BIT(0);
+ break;
+ case 0x080000:
+ button |= BIT(1);
+ break;
+ case 0x0c0000:
+ button |= BIT(2);
+ break;
+ case 0xe01600:
+ button |= BIT(3);
+ break;
+ case 0x2c0000:
+ button |= BIT(4);
+ break;
+ case 0xe0e21d:
+ button |= BIT(5);
+ break;
+ }
+
+ __u8 report[8] = {PAD_REPORT_ID, 0x0, 0x0, 0x0, 0x00, button};
+
+ __builtin_memcpy(data, report, sizeof(report));
+ return sizeof(report);
+ }
+
+ /* Only sent if tablet is in default mode */
+ if (data[0] == DIAL_REPORT_ID) {
+ /*
+ * In default mode, both dials are merged together:
+ *
+ * Dial down: 11 00 ff ff 00 00 00 00 00 -> Dial -1
+ * Dial up: 11 00 01 00 00 00 00 00 00 -> Dial 1
+ */
+ __u16 dial = data[3] << 8 | data[2];
+
+ button &= 0x3f;
+ button |= !!data[1] << 6;
+
+ __u8 report[] = {PAD_REPORT_ID, 0x0, 0x0, 0x0, dial, button};
+
+ __builtin_memcpy(data, report, sizeof(report));
+ return sizeof(report);
+ }
+
+ /* Nothing to do for the PEN_REPORT_ID, it's already mapped */
+
+ /* Only sent if tablet is in raw mode */
+ if (data[0] == VENDOR_REPORT_ID) {
+ /* Pad reports */
+ if (data[1] & 0x20) {
+ /* See fixed_rdesc_pad */
+ struct pad_report {
+ __u8 report_id;
+ __u8 btn_stylus;
+ __u8 x;
+ __u8 y;
+ __u8 buttons;
+ __u8 dial_1;
+ __u8 dial_2;
+ } __attribute__((packed)) *pad_report;
+ __u8 dial_1 = 0, dial_2 = 0;
+
+ /* Dial report */
+ if (data[1] == 0xf1) {
+ __u8 d = 0;
+
+ if (data[5] == 2)
+ d = 0xff;
+ else
+ d = data[5];
+
+ if (data[3] == 1)
+ dial_1 = d;
+ else
+ dial_2 = d;
+ } else {
+ /* data[4] are the buttons, mapped correctly */
+ last_button_state = data[4];
+ dial_1 = 0; // dial
+ dial_2 = 0;
+ }
+
+ pad_report = (struct pad_report *)data;
+
+ pad_report->report_id = PAD_REPORT_ID;
+ pad_report->btn_stylus = 0;
+ pad_report->x = 0;
+ pad_report->y = 0;
+ pad_report->buttons = last_button_state;
+ pad_report->dial_1 = dial_1;
+ pad_report->dial_2 = dial_2;
+
+ return sizeof(struct pad_report);
+ }
+
+ /* Pen reports need nothing done */
+ }
+
+ return 0;
+}
+
+HID_BPF_OPS(inspiroy_dial2) = {
+ .hid_device_event = (void *)dial_2_fix_events,
+ .hid_rdesc_fixup = (void *)dial_2_fix_rdesc,
+};
+
+SEC("syscall")
+int probe(struct hid_bpf_probe_args *ctx)
+{
+ switch (ctx->rdesc_size) {
+ case PAD_REPORT_DESCRIPTOR_LENGTH:
+ case PEN_REPORT_DESCRIPTOR_LENGTH:
+ case VENDOR_REPORT_DESCRIPTOR_LENGTH:
+ ctx->retval = 0;
+ break;
+ default:
+ ctx->retval = -EINVAL;
+ }
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/drivers/hid/bpf/progs/Huion__Inspiroy-2-S.bpf.c b/drivers/hid/bpf/progs/Huion__Inspiroy-2-S.bpf.c
new file mode 100644
index 000000000000..b09b80132368
--- /dev/null
+++ b/drivers/hid/bpf/progs/Huion__Inspiroy-2-S.bpf.c
@@ -0,0 +1,534 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2024 Red Hat, Inc
+ */
+
+#include "vmlinux.h"
+#include "hid_bpf.h"
+#include "hid_bpf_helpers.h"
+#include "hid_report_helpers.h"
+#include <bpf/bpf_tracing.h>
+
+#define VID_HUION 0x256C
+#define PID_INSPIROY_2_S 0x0066
+
+HID_BPF_CONFIG(
+ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, VID_HUION, PID_INSPIROY_2_S),
+);
+
+/* Filled in by udev-hid-bpf */
+char UDEV_PROP_HUION_FIRMWARE_ID[64];
+
+/* The prefix of the firmware ID we expect for this device. The full firmware
+ * string has a date suffix, e.g. HUION_T21j_221221
+ */
+char EXPECTED_FIRMWARE_ID[] = "HUION_T21j_";
+
+/* How this BPF program works: the tablet has two modes, firmware mode and
+ * tablet mode. In firmware mode (out of the box) the tablet sends button events
+ * and the dial as keyboard combinations. In tablet mode it uses a vendor specific
+ * hid report to report everything instead.
+ * Depending on the mode some hid reports are never sent and the corresponding
+ * devices are mute.
+ *
+ * To switch the tablet use e.g. https://github.com/whot/huion-switcher
+ * or one of the tools from the digimend project
+ *
+ * This BPF works for both modes. The huion-switcher tool sets the
+ * HUION_FIRMWARE_ID udev property - if that is set then we disable the firmware
+ * pad and pen reports (by making them vendor collections that are ignored).
+ * If that property is not set we fix all hidraw nodes so the tablet works in
+ * either mode though the drawback is that the device will show up twice if
+ * you bind it to all event nodes
+ *
+ * Default report descriptor for the first exposed hidraw node:
+ *
+ * # HUION Huion Tablet_H641P
+ * # Report descriptor length: 18 bytes
+ * # 0x06, 0x00, 0xff, // Usage Page (Vendor Defined Page 0xFF00) 0
+ * # 0x09, 0x01, // Usage (Vendor Usage 0x01) 3
+ * # 0xa1, 0x01, // Collection (Application) 5
+ * # 0x85, 0x08, // Report ID (8) 7
+ * # 0x75, 0x58, // Report Size (88) 9
+ * # 0x95, 0x01, // Report Count (1) 11
+ * # 0x09, 0x01, // Usage (Vendor Usage 0x01) 13
+ * # 0x81, 0x02, // Input (Data,Var,Abs) 15
+ * # 0xc0, // End Collection 17
+ * R: 18 06 00 ff 09 01 a1 01 85 08 75 58 95 01 09 01 81 02 c0
+ *
+ * This rdesc does nothing until the tablet is switched to raw mode, see
+ * https://github.com/whot/huion-switcher
+ *
+ *
+ * Second hidraw node is the Pen. This one sends events until the tablet is
+ * switched to raw mode, then it's mute.
+ *
+ * # Report descriptor length: 93 bytes
+ * # 0x05, 0x0d, // Usage Page (Digitizers) 0
+ * # 0x09, 0x02, // Usage (Pen) 2
+ * # 0xa1, 0x01, // Collection (Application) 4
+ * # 0x85, 0x0a, // Report ID (10) 6
+ * # 0x09, 0x20, // Usage (Stylus) 8
+ * # 0xa1, 0x01, // Collection (Application) 10
+ * # 0x09, 0x42, // Usage (Tip Switch) 12
+ * # 0x09, 0x44, // Usage (Barrel Switch) 14
+ * # 0x09, 0x45, // Usage (Eraser) 16
+ * # 0x09, 0x3c, // Usage (Invert) 18 <-- has no Invert eraser
+ * # 0x15, 0x00, // Logical Minimum (0) 20
+ * # 0x25, 0x01, // Logical Maximum (1) 22
+ * # 0x75, 0x01, // Report Size (1) 24
+ * # 0x95, 0x06, // Report Count (6) 26
+ * # 0x81, 0x02, // Input (Data,Var,Abs) 28
+ * # 0x09, 0x32, // Usage (In Range) 30
+ * # 0x75, 0x01, // Report Size (1) 32
+ * # 0x95, 0x01, // Report Count (1) 34
+ * # 0x81, 0x02, // Input (Data,Var,Abs) 36
+ * # 0x81, 0x03, // Input (Cnst,Var,Abs) 38
+ * # 0x05, 0x01, // Usage Page (Generic Desktop) 40
+ * # 0x09, 0x30, // Usage (X) 42
+ * # 0x09, 0x31, // Usage (Y) 44
+ * # 0x55, 0x0d, // Unit Exponent (-3) 46 <-- change to -2
+ * # 0x65, 0x33, // Unit (EnglishLinear: in³) 48 <-- change in³ to in
+ * # 0x26, 0xff, 0x7f, // Logical Maximum (32767) 50
+ * # 0x35, 0x00, // Physical Minimum (0) 53
+ * # 0x46, 0x00, 0x08, // Physical Maximum (2048) 55 <-- invalid size
+ * # 0x75, 0x10, // Report Size (16) 58
+ * # 0x95, 0x02, // Report Count (2) 60
+ * # 0x81, 0x02, // Input (Data,Var,Abs) 62
+ * # 0x05, 0x0d, // Usage Page (Digitizers) 64
+ * # 0x09, 0x30, // Usage (Tip Pressure) 66
+ * # 0x26, 0xff, 0x1f, // Logical Maximum (8191) 68
+ * # 0x75, 0x10, // Report Size (16) 71
+ * # 0x95, 0x01, // Report Count (1) 73
+ * # 0x81, 0x02, // Input (Data,Var,Abs) 75
+ * # 0x09, 0x3d, // Usage (X Tilt) 77 <-- No tilt reported
+ * # 0x09, 0x3e, // Usage (Y Tilt) 79
+ * # 0x15, 0x81, // Logical Minimum (-127) 81
+ * # 0x25, 0x7f, // Logical Maximum (127) 83
+ * # 0x75, 0x08, // Report Size (8) 85
+ * # 0x95, 0x02, // Report Count (2) 87
+ * # 0x81, 0x02, // Input (Data,Var,Abs) 89
+ * # 0xc0, // End Collection 91
+ * # 0xc0, // End Collection 92
+ * R: 93 05 0d 09 02 a1 01 85 0a 09 20 a1 01 09 42 09 44 09 45 09 3c 15 00 25 01 7501 95 06 81 02 09 32 75 01 95 01 81 02 81 03 05 01 09 30 09 31 55 0d 65 33 26 ff7f 35 00 46 00 08 75 10 95 02 81 02 05 0d 09 30 26 ff 1f 75 10 95 01 81 02 09 3d09 3e 15 81 25 7f 75 08 95 02 81 02 c0 c0
+ *
+ * Third hidraw node is the pad which sends a combination of keyboard shortcuts until
+ * the tablet is switched to raw mode, then it's mute:
+ *
+ * # Report descriptor length: 65 bytes
+ * # 0x05, 0x01, // Usage Page (Generic Desktop) 0
+ * # 0x09, 0x06, // Usage (Keyboard) 2
+ * # 0xa1, 0x01, // Collection (Application) 4
+ * # 0x85, 0x03, // Report ID (3) 6
+ * # 0x05, 0x07, // Usage Page (Keyboard/Keypad) 8
+ * # 0x19, 0xe0, // UsageMinimum (224) 10
+ * # 0x29, 0xe7, // UsageMaximum (231) 12
+ * # 0x15, 0x00, // Logical Minimum (0) 14
+ * # 0x25, 0x01, // Logical Maximum (1) 16
+ * # 0x75, 0x01, // Report Size (1) 18
+ * # 0x95, 0x08, // Report Count (8) 20
+ * # 0x81, 0x02, // Input (Data,Var,Abs) 22
+ * # 0x05, 0x07, // Usage Page (Keyboard/Keypad) 24
+ * # 0x19, 0x00, // UsageMinimum (0) 26
+ * # 0x29, 0xff, // UsageMaximum (255) 28
+ * # 0x26, 0xff, 0x00, // Logical Maximum (255) 30
+ * # 0x75, 0x08, // Report Size (8) 33
+ * # 0x95, 0x06, // Report Count (6) 35
+ * # 0x81, 0x00, // Input (Data,Arr,Abs) 37
+ * # 0xc0, // End Collection 39
+ * # 0x05, 0x0c, // Usage Page (Consumer) 40
+ * # 0x09, 0x01, // Usage (Consumer Control) 42
+ * # 0xa1, 0x01, // Collection (Application) 44
+ * # 0x85, 0x04, // Report ID (4) 46
+ * # 0x19, 0x00, // UsageMinimum (0) 48
+ * # 0x2a, 0x3c, 0x02, // UsageMaximum (572) 50
+ * # 0x15, 0x00, // Logical Minimum (0) 53
+ * # 0x26, 0x3c, 0x02, // Logical Maximum (572) 55
+ * # 0x95, 0x01, // Report Count (1) 58
+ * # 0x75, 0x10, // Report Size (16) 60
+ * # 0x81, 0x00, // Input (Data,Arr,Abs) 62
+ * # 0xc0, // End Collection 64
+ * R: 65 05 01 09 06 a1 01 85 03 05 07 19 e0 29 e7 15 00 25 01 75 01 95 08 81 02 0507 19 00 29 ff 26 ff 00 75 08 95 06 81 00 c0 05 0c 09 01 a1 01 85 04 19 00 2a 3c02 15 00 26 3c 02 95 01 75 10 81 00 c0
+ * N: HUION Huion Tablet_H641P
+ */
+
+#define PAD_REPORT_DESCRIPTOR_LENGTH 65
+#define PEN_REPORT_DESCRIPTOR_LENGTH 93
+#define VENDOR_REPORT_DESCRIPTOR_LENGTH 18
+#define PAD_REPORT_ID 3
+#define PEN_REPORT_ID 10
+#define VENDOR_REPORT_ID 8
+#define PAD_REPORT_LENGTH 8
+#define PEN_REPORT_LENGTH 10
+#define VENDOR_REPORT_LENGTH 12
+
+
+__u8 last_button_state;
+
+static const __u8 fixed_rdesc_pad[] = {
+ UsagePage_GenericDesktop
+ Usage_GD_Keypad
+ CollectionApplication(
+ // -- Byte 0 in report
+ ReportId(PAD_REPORT_ID)
+ LogicalRange_i8(0, 1)
+ UsagePage_Digitizers
+ Usage_Dig_TabletFunctionKeys
+ CollectionPhysical(
+ // Byte 1 in report - just exists so we get to be a tablet pad
+ Usage_Dig_BarrelSwitch // BTN_STYLUS
+ ReportCount(1)
+ ReportSize(1)
+ Input(Var|Abs)
+ ReportCount(7) // padding
+ Input(Const)
+ // Bytes 2/3 in report - just exists so we get to be a tablet pad
+ UsagePage_GenericDesktop
+ Usage_GD_X
+ Usage_GD_Y
+ ReportCount(2)
+ ReportSize(8)
+ Input(Var|Abs)
+ // Byte 4 in report is the wheel
+ Usage_GD_Wheel
+ LogicalRange_i8(-1, 1)
+ ReportCount(1)
+ ReportSize(8)
+ Input(Var|Rel)
+ // Byte 5 is the button state
+ UsagePage_Button
+ UsageRange_i8(0x01, 0x6)
+ LogicalRange_i8(0x01, 0x6)
+ ReportCount(1)
+ ReportSize(8)
+ Input(Arr|Abs)
+ )
+ // Make sure we match our original report length
+ FixedSizeVendorReport(PAD_REPORT_LENGTH)
+ )
+};
+
+static const __u8 fixed_rdesc_pen[] = {
+ UsagePage_Digitizers
+ Usage_Dig_Pen
+ CollectionApplication(
+ // -- Byte 0 in report
+ ReportId(PEN_REPORT_ID)
+ Usage_Dig_Pen
+ CollectionPhysical(
+ // -- Byte 1 in report
+ Usage_Dig_TipSwitch
+ Usage_Dig_BarrelSwitch
+ Usage_Dig_SecondaryBarrelSwitch // maps eraser to BTN_STYLUS2
+ LogicalRange_i8(0, 1)
+ ReportSize(1)
+ ReportCount(3)
+ Input(Var|Abs)
+ ReportCount(4) // Padding
+ Input(Const)
+ Usage_Dig_InRange
+ ReportCount(1)
+ Input(Var|Abs)
+ ReportSize(16)
+ ReportCount(1)
+ PushPop(
+ UsagePage_GenericDesktop
+ Unit(cm)
+ UnitExponent(-1)
+ PhysicalRange_i16(0, 160)
+ LogicalRange_i16(0, 32767)
+ Usage_GD_X
+ Input(Var|Abs) // Bytes 2+3
+ PhysicalRange_i16(0, 100)
+ LogicalRange_i16(0, 32767)
+ Usage_GD_Y
+ Input(Var|Abs) // Bytes 4+5
+ )
+ UsagePage_Digitizers
+ Usage_Dig_TipPressure
+ LogicalRange_i16(0, 8191)
+ Input(Var|Abs) // Byte 6+7
+ // Two bytes padding so we don't need to change the report at all
+ ReportSize(8)
+ ReportCount(2)
+ Input(Const) // Byte 6+7
+ )
+ )
+};
+
+static const __u8 fixed_rdesc_vendor[] = {
+ UsagePage_Digitizers
+ Usage_Dig_Pen
+ CollectionApplication(
+ // Byte 0
+ // We leave the pen on the vendor report ID
+ ReportId(VENDOR_REPORT_ID)
+ Usage_Dig_Pen
+ CollectionPhysical(
+ // Byte 1 are the buttons
+ LogicalRange_i8(0, 1)
+ ReportSize(1)
+ Usage_Dig_TipSwitch
+ Usage_Dig_BarrelSwitch
+ Usage_Dig_SecondaryBarrelSwitch
+ ReportCount(3)
+ Input(Var|Abs)
+ ReportCount(4) // Padding
+ Input(Const)
+ Usage_Dig_InRange
+ ReportCount(1)
+ Input(Var|Abs)
+ ReportSize(16)
+ ReportCount(1)
+ PushPop(
+ UsagePage_GenericDesktop
+ Unit(cm)
+ UnitExponent(-1)
+ // Note: reported logical range differs
+ // from the pen report ID for x and y
+ LogicalRange_i16(0, 32000)
+ PhysicalRange_i16(0, 160)
+ // Bytes 2/3 in report
+ Usage_GD_X
+ Input(Var|Abs)
+ LogicalRange_i16(0, 20000)
+ PhysicalRange_i16(0, 100)
+ // Bytes 4/5 in report
+ Usage_GD_Y
+ Input(Var|Abs)
+ )
+ // Bytes 6/7 in report
+ LogicalRange_i16(0, 8192)
+ Usage_Dig_TipPressure
+ Input(Var|Abs)
+ )
+ )
+ UsagePage_GenericDesktop
+ Usage_GD_Keypad
+ CollectionApplication(
+ // Byte 0
+ ReportId(PAD_REPORT_ID)
+ LogicalRange_i8(0, 1)
+ UsagePage_Digitizers
+ Usage_Dig_TabletFunctionKeys
+ CollectionPhysical(
+ // Byte 1 are the buttons
+ Usage_Dig_BarrelSwitch // BTN_STYLUS, needed so we get to be a tablet pad
+ ReportCount(1)
+ ReportSize(1)
+ Input(Var|Abs)
+ ReportCount(7) // Padding
+ Input(Const)
+ // Bytes 2/3 - x/y just exist so we get to be a tablet pad
+ UsagePage_GenericDesktop
+ Usage_GD_X
+ Usage_GD_Y
+ ReportCount(2)
+ ReportSize(8)
+ Input(Var|Abs)
+ // Byte 4 is the button state
+ UsagePage_Button
+ UsageRange_i8(0x01, 0x6)
+ LogicalRange_i8(0x0, 0x1)
+ ReportCount(6)
+ ReportSize(1)
+ Input(Var|Abs)
+ ReportCount(2)
+ Input(Const)
+ // Byte 5 is the wheel
+ UsagePage_GenericDesktop
+ Usage_GD_Wheel
+ LogicalRange_i8(-1, 1)
+ ReportCount(1)
+ ReportSize(8)
+ Input(Var|Rel)
+ )
+ // Make sure we match our original report length
+ FixedSizeVendorReport(VENDOR_REPORT_LENGTH)
+ )
+};
+
+static const __u8 disabled_rdesc_pen[] = {
+ FixedSizeVendorReport(PEN_REPORT_LENGTH)
+};
+
+static const __u8 disabled_rdesc_pad[] = {
+ FixedSizeVendorReport(PAD_REPORT_LENGTH)
+};
+
+SEC(HID_BPF_RDESC_FIXUP)
+int BPF_PROG(hid_fix_rdesc, struct hid_bpf_ctx *hctx)
+{
+ __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, HID_MAX_DESCRIPTOR_SIZE /* size */);
+ __s32 rdesc_size = hctx->size;
+ __u8 have_fw_id;
+
+ if (!data)
+ return 0; /* EPERM check */
+
+ /* If we have a firmware ID and it matches our expected prefix, we
+ * disable the default pad/pen nodes. They won't send events
+ * but cause duplicate devices.
+ */
+ have_fw_id = __builtin_memcmp(UDEV_PROP_HUION_FIRMWARE_ID,
+ EXPECTED_FIRMWARE_ID,
+ sizeof(EXPECTED_FIRMWARE_ID) - 1) == 0;
+ if (rdesc_size == PAD_REPORT_DESCRIPTOR_LENGTH) {
+ if (have_fw_id) {
+ __builtin_memcpy(data, disabled_rdesc_pad, sizeof(disabled_rdesc_pad));
+ return sizeof(disabled_rdesc_pad);
+ }
+
+ __builtin_memcpy(data, fixed_rdesc_pad, sizeof(fixed_rdesc_pad));
+ return sizeof(fixed_rdesc_pad);
+ }
+ if (rdesc_size == PEN_REPORT_DESCRIPTOR_LENGTH) {
+ if (have_fw_id) {
+ __builtin_memcpy(data, disabled_rdesc_pen, sizeof(disabled_rdesc_pen));
+ return sizeof(disabled_rdesc_pen);
+ }
+
+ __builtin_memcpy(data, fixed_rdesc_pen, sizeof(fixed_rdesc_pen));
+ return sizeof(fixed_rdesc_pen);
+ }
+ /* Always fix the vendor mode so the tablet will work even if nothing sets
+ * the udev property (e.g. huion-switcher run manually)
+ */
+ if (rdesc_size == VENDOR_REPORT_DESCRIPTOR_LENGTH) {
+ __builtin_memcpy(data, fixed_rdesc_vendor, sizeof(fixed_rdesc_vendor));
+ return sizeof(fixed_rdesc_vendor);
+
+ }
+ return 0;
+}
+
+SEC(HID_BPF_DEVICE_EVENT)
+int BPF_PROG(inspiroy_2_fix_events, struct hid_bpf_ctx *hctx)
+{
+ __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 10 /* size */);
+
+ if (!data)
+ return 0; /* EPERM check */
+
+ /* Only sent if tablet is in default mode */
+ if (data[0] == PAD_REPORT_ID) {
+ /* Nicely enough, this device only supports one button down at a time so
+ * the reports are easy to match. Buttons numbered from the top
+ * Button released: 03 00 00 00 00 00 00 00
+ * Button 1: 03 00 05 00 00 00 00 00 -> b
+ * Button 2: 03 00 0c 00 00 00 00 00 -> i
+ * Button 3: 03 00 08 00 00 00 00 00 -> e
+ * Button 4: 03 01 16 00 00 00 00 00 -> Ctrl S
+ * Button 5: 03 00 2c 00 00 00 00 00 -> space
+ * Button 6: 03 05 1d 00 00 00 00 00 -> Ctrl Alt Z
+ *
+ * Wheel down: 03 01 2d 00 00 00 00 00 -> Ctrl -
+ * Wheel up: 03 01 2e 00 00 00 00 00 -> Ctrl =
+ */
+ __u8 button = 0;
+ __u8 wheel = 0;
+
+ switch (data[1] << 8 | data[2]) {
+ case 0x0000:
+ break;
+ case 0x0005:
+ button = 1;
+ break;
+ case 0x000c:
+ button = 2;
+ break;
+ case 0x0008:
+ button = 3;
+ break;
+ case 0x0116:
+ button = 4;
+ break;
+ case 0x002c:
+ button = 5;
+ break;
+ case 0x051d:
+ button = 6;
+ break;
+ case 0x012d:
+ wheel = -1;
+ break;
+ case 0x012e:
+ wheel = 1;
+ break;
+
+ }
+
+ __u8 report[6] = {PAD_REPORT_ID, 0x0, 0x0, 0x0, wheel, button};
+
+ __builtin_memcpy(data, report, sizeof(report));
+ return sizeof(report);
+ }
+
+ /* Nothing to do for the PEN_REPORT_ID, it's already mapped */
+
+ /* Only sent if tablet is in raw mode */
+ if (data[0] == VENDOR_REPORT_ID) {
+ /* Pad reports */
+ if (data[1] & 0x20) {
+ /* See fixed_rdesc_pad */
+ struct pad_report {
+ __u8 report_id;
+ __u8 btn_stylus;
+ __u8 x;
+ __u8 y;
+ __u8 buttons;
+ __u8 wheel;
+ } __attribute__((packed)) *pad_report;
+ __u8 wheel = 0;
+
+ /* Wheel report */
+ if (data[1] == 0xf1) {
+ if (data[5] == 2)
+ wheel = 0xff;
+ else
+ wheel = data[5];
+ } else {
+ /* data[4] are the buttons, mapped correctly */
+ last_button_state = data[4];
+ wheel = 0; // wheel
+ }
+
+ pad_report = (struct pad_report *)data;
+
+ pad_report->report_id = PAD_REPORT_ID;
+ pad_report->btn_stylus = 0;
+ pad_report->x = 0;
+ pad_report->y = 0;
+ pad_report->buttons = last_button_state;
+ pad_report->wheel = wheel;
+
+ return sizeof(struct pad_report);
+ }
+
+ /* Pen reports need nothing done */
+ }
+
+ return 0;
+}
+
+HID_BPF_OPS(inspiroy_2) = {
+ .hid_device_event = (void *)inspiroy_2_fix_events,
+ .hid_rdesc_fixup = (void *)hid_fix_rdesc,
+};
+
+SEC("syscall")
+int probe(struct hid_bpf_probe_args *ctx)
+{
+ switch (ctx->rdesc_size) {
+ case PAD_REPORT_DESCRIPTOR_LENGTH:
+ case PEN_REPORT_DESCRIPTOR_LENGTH:
+ case VENDOR_REPORT_DESCRIPTOR_LENGTH:
+ ctx->retval = 0;
+ break;
+ default:
+ ctx->retval = -EINVAL;
+ }
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/drivers/hid/bpf/progs/Huion__Kamvas-Pro-19.bpf.c b/drivers/hid/bpf/progs/Huion__Kamvas-Pro-19.bpf.c
index ff759f2276f9..a4a4f324aedd 100644
--- a/drivers/hid/bpf/progs/Huion__Kamvas-Pro-19.bpf.c
+++ b/drivers/hid/bpf/progs/Huion__Kamvas-Pro-19.bpf.c
@@ -191,7 +191,7 @@ static const __u8 fixed_rdesc[] = {
0xc0, // End Collection 327
};
-SEC("fmod_ret/hid_bpf_rdesc_fixup")
+SEC(HID_BPF_RDESC_FIXUP)
int BPF_PROG(hid_fix_rdesc_huion_kamvas_pro_19, struct hid_bpf_ctx *hctx)
{
__u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, HID_MAX_DESCRIPTOR_SIZE /* size */);
@@ -215,7 +215,7 @@ int BPF_PROG(hid_fix_rdesc_huion_kamvas_pro_19, struct hid_bpf_ctx *hctx)
* - if there was this out-of-proximity event, we are entering
* eraser mode, and we will until the next out-of-proximity.
*/
-SEC("fmod_ret/hid_bpf_device_event")
+SEC(HID_BPF_DEVICE_EVENT)
int BPF_PROG(kamvas_pro_19_fix_3rd_button, struct hid_bpf_ctx *hctx)
{
__u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 10 /* size */);
@@ -255,6 +255,11 @@ int BPF_PROG(kamvas_pro_19_fix_3rd_button, struct hid_bpf_ctx *hctx)
return 0;
}
+HID_BPF_OPS(huion_Kamvas_pro_19) = {
+ .hid_rdesc_fixup = (void *)hid_fix_rdesc_huion_kamvas_pro_19,
+ .hid_device_event = (void *)kamvas_pro_19_fix_3rd_button,
+};
+
SEC("syscall")
int probe(struct hid_bpf_probe_args *ctx)
{
diff --git a/drivers/hid/bpf/progs/IOGEAR__Kaliber-MMOmentum.bpf.c b/drivers/hid/bpf/progs/IOGEAR__Kaliber-MMOmentum.bpf.c
index 225cbefdbf0e..82f1950445dd 100644
--- a/drivers/hid/bpf/progs/IOGEAR__Kaliber-MMOmentum.bpf.c
+++ b/drivers/hid/bpf/progs/IOGEAR__Kaliber-MMOmentum.bpf.c
@@ -21,7 +21,7 @@ HID_BPF_CONFIG(
* We just fix the report descriptor to enable those missing 7 buttons.
*/
-SEC("fmod_ret/hid_bpf_rdesc_fixup")
+SEC(HID_BPF_RDESC_FIXUP)
int BPF_PROG(hid_fix_rdesc, struct hid_bpf_ctx *hctx)
{
const u8 offsets[] = {84, 112, 140};
@@ -45,6 +45,10 @@ int BPF_PROG(hid_fix_rdesc, struct hid_bpf_ctx *hctx)
return 0;
}
+HID_BPF_OPS(iogear_kaliber_momentum) = {
+ .hid_rdesc_fixup = (void *)hid_fix_rdesc,
+};
+
SEC("syscall")
int probe(struct hid_bpf_probe_args *ctx)
{
diff --git a/drivers/hid/bpf/progs/Makefile b/drivers/hid/bpf/progs/Makefile
index 63ed7e02adf1..ec1fc642fd63 100644
--- a/drivers/hid/bpf/progs/Makefile
+++ b/drivers/hid/bpf/progs/Makefile
@@ -56,7 +56,7 @@ clean:
%.bpf.o: %.bpf.c vmlinux.h $(BPFOBJ) | $(OUTPUT)
$(call msg,BPF,$@)
- $(Q)$(CLANG) -g -O2 --target=bpf $(INCLUDES) \
+ $(Q)$(CLANG) -g -O2 --target=bpf -Wall -Werror $(INCLUDES) \
-c $(filter %.c,$^) -o $@ && \
$(LLVM_STRIP) -g $@
diff --git a/drivers/hid/bpf/progs/Microsoft__XBox-Elite-2.bpf.c b/drivers/hid/bpf/progs/Microsoft__Xbox-Elite-2.bpf.c
index c04abecab8ee..382bba735a2c 100644
--- a/drivers/hid/bpf/progs/Microsoft__XBox-Elite-2.bpf.c
+++ b/drivers/hid/bpf/progs/Microsoft__Xbox-Elite-2.bpf.c
@@ -15,20 +15,19 @@ HID_BPF_CONFIG(
);
/*
- * When using the XBox Wireless Controller Elite 2 over Bluetooth,
- * the device exports the paddle on the back of the device as a single
+ * When using the Xbox Wireless Controller Elite 2 over Bluetooth,
+ * the device exports the paddles on the back of the device as a single
* bitfield value of usage "Assign Selection".
*
- * The kernel doesn't process those usages properly and report KEY_UNKNOWN
- * for it.
+ * The kernel doesn't process the paddles usage properly and reports KEY_UNKNOWN.
*
- * SDL doesn't know how to interprete that KEY_UNKNOWN and thus ignores the paddles.
+ * SDL doesn't know how to interpret KEY_UNKNOWN and thus ignores the paddles.
*
* Given that over USB the kernel uses BTN_TRIGGER_HAPPY[5-8], we
- * can tweak the report descriptor to make the kernel interprete it properly:
- * - we need an application collection of gamepad (so we have to close the current
+ * can tweak the report descriptor to make the kernel interpret it properly:
+ * - We need an application collection of gamepad (so we have to close the current
* Consumer Control one)
- * - we need to change the usage to be buttons from 0x15 to 0x18
+ * - We need to change the usage to be buttons from 0x15 to 0x18
*/
#define OFFSET_ASSIGN_SELECTION 211
@@ -93,7 +92,7 @@ _Static_assert(sizeof(rdesc_assign_selection) == sizeof(fixed_rdesc_assign_selec
_Static_assert(sizeof(rdesc_assign_selection) + OFFSET_ASSIGN_SELECTION < ORIGINAL_RDESC_SIZE,
"Rdesc at given offset is too big");
-SEC("fmod_ret/hid_bpf_rdesc_fixup")
+SEC(HID_BPF_RDESC_FIXUP)
int BPF_PROG(hid_fix_rdesc, struct hid_bpf_ctx *hctx)
{
__u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 4096 /* size */);
@@ -114,6 +113,10 @@ int BPF_PROG(hid_fix_rdesc, struct hid_bpf_ctx *hctx)
return 0;
}
+HID_BPF_OPS(xbox_elite_2) = {
+ .hid_rdesc_fixup = (void *)hid_fix_rdesc,
+};
+
SEC("syscall")
int probe(struct hid_bpf_probe_args *ctx)
{
diff --git a/drivers/hid/bpf/progs/Thrustmaster__TCA-Yoke-Boeing.bpf.c b/drivers/hid/bpf/progs/Thrustmaster__TCA-Yoke-Boeing.bpf.c
new file mode 100644
index 000000000000..ecf775a99346
--- /dev/null
+++ b/drivers/hid/bpf/progs/Thrustmaster__TCA-Yoke-Boeing.bpf.c
@@ -0,0 +1,144 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2024 Kumar Swarnam Iyer (kumar.s.iyer65@gmail.com)
+ */
+
+#include "vmlinux.h"
+#include "hid_bpf.h"
+#include "hid_bpf_helpers.h"
+#include <bpf/bpf_tracing.h>
+
+#define VID_THRUSTMASTER 0x044F
+#define PID_TCA_YOKE_BOEING 0x0409
+
+HID_BPF_CONFIG(
+ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, VID_THRUSTMASTER, PID_TCA_YOKE_BOEING)
+);
+
+/* The original HID descriptor of the Thrustmaster TCA Yoke Boeing joystick contains
+ * an Input field that shows up as an axis, ABS_MISC in Linux. But it is not possible
+ * to assign an actual physical control to this axis as they're all taken up. There
+ * are 2 vendor-defined inputs where the Input type appears to be defined wrongly.
+ * This bpf attempts to fix this by changing the Inputs so that it doesn't show up in
+ * Linux at all.
+ * This version is the short version fix that only changes 2 fields in the descriptor
+ * instead of the whole report descriptor.
+ * For reference, this is the original report descriptor:
+ *
+ * 0x05, 0x01, // Usage Page (Generic Desktop) 0
+ * 0x09, 0x04, // Usage (Joystick) 2
+ * 0xa1, 0x01, // Collection (Application) 4
+ * 0x85, 0x01, // Report ID (1) 6
+ * 0x09, 0x39, // Usage (Hat switch) 8
+ * 0x15, 0x00, // Logical Minimum (0) 10
+ * 0x25, 0x07, // Logical Maximum (7) 12
+ * 0x35, 0x00, // Physical Minimum (0) 14
+ * 0x46, 0x3b, 0x01, // Physical Maximum (315) 16
+ * 0x65, 0x14, // Unit (EnglishRotation: deg) 19
+ * 0x75, 0x04, // Report Size (4) 21
+ * 0x95, 0x01, // Report Count (1) 23
+ * 0x81, 0x42, // Input (Data,Var,Abs,Null) 25
+ * 0x65, 0x00, // Unit (None) 27
+ * 0x05, 0x09, // Usage Page (Button) 29
+ * 0x19, 0x01, // Usage Minimum (1) 31
+ * 0x29, 0x12, // Usage Maximum (18) 33
+ * 0x15, 0x00, // Logical Minimum (0) 35
+ * 0x25, 0x01, // Logical Maximum (1) 37
+ * 0x75, 0x01, // Report Size (1) 39
+ * 0x95, 0x12, // Report Count (18) 41
+ * 0x81, 0x02, // Input (Data,Var,Abs) 43
+ * 0x95, 0x02, // Report Count (2) 45
+ * 0x81, 0x03, // Input (Cnst,Var,Abs) 47
+ * 0x05, 0x01, // Usage Page (Generic Desktop) 49
+ * 0x09, 0x31, // Usage (Y) 51
+ * 0x09, 0x30, // Usage (X) 53
+ * 0x09, 0x32, // Usage (Z) 55
+ * 0x09, 0x34, // Usage (Ry) 57
+ * 0x09, 0x33, // Usage (Rx) 59
+ * 0x09, 0x35, // Usage (Rz) 61
+ * 0x15, 0x00, // Logical Minimum (0) 63
+ * 0x27, 0xff, 0xff, 0x00, 0x00, // Logical Maximum (65535) 65
+ * 0x75, 0x10, // Report Size (16) 70
+ * 0x95, 0x06, // Report Count (6) 72
+ * 0x81, 0x02, // Input (Data,Var,Abs) 74
+ * 0x06, 0xf0, 0xff, // Usage Page (Vendor Usage Page 0xfff0) 76
+ * 0x09, 0x59, // Usage (Vendor Usage 0x59) 79
+ * 0x15, 0x00, // Logical Minimum (0) 81
+ * 0x26, 0xff, 0x00, // Logical Maximum (255) 83
+ * 0x75, 0x08, // Report Size (8) 86
+ * 0x95, 0x01, // Report Count (1) 88
+ * 0x81, 0x02, // Input (Data,Var,Abs) 90 --> Needs to be changed
+ * 0x09, 0x51, // Usage (Vendor Usage 0x51) 92
+ * 0x15, 0x00, // Logical Minimum (0) 94
+ * 0x26, 0xff, 0x00, // Logical Maximum (255) 96
+ * 0x75, 0x08, // Report Size (8) 99
+ * 0x95, 0x20, // Report Count (32) 101 --> Needs to be changed
+ * 0x81, 0x02, // Input (Data,Var,Abs) 103
+ * 0x09, 0x50, // Usage (Vendor Usage 0x50) 105
+ * 0x15, 0x00, // Logical Minimum (0) 107
+ * 0x26, 0xff, 0x00, // Logical Maximum (255) 109
+ * 0x75, 0x08, // Report Size (8) 112
+ * 0x95, 0x0f, // Report Count (15) 114
+ * 0x81, 0x03, // Input (Cnst,Var,Abs) 116
+ * 0x09, 0x47, // Usage (Vendor Usage 0x47) 118
+ * 0x85, 0xf2, // Report ID (242) 120
+ * 0x15, 0x00, // Logical Minimum (0) 122
+ * 0x26, 0xff, 0x00, // Logical Maximum (255) 124
+ * 0x75, 0x08, // Report Size (8) 127
+ * 0x95, 0x3f, // Report Count (63) 129
+ * 0xb1, 0x02, // Feature (Data,Var,Abs) 131
+ * 0x09, 0x48, // Usage (Vendor Usage 0x48) 133
+ * 0x85, 0xf3, // Report ID (243) 135
+ * 0x15, 0x00, // Logical Minimum (0) 137
+ * 0x26, 0xff, 0x00, // Logical Maximum (255) 139
+ * 0x75, 0x08, // Report Size (8) 142
+ * 0x95, 0x3f, // Report Count (63) 144
+ * 0xb1, 0x02, // Feature (Data,Var,Abs) 146
+ * 0xc0, // End Collection 148
+ */
+
+SEC(HID_BPF_RDESC_FIXUP)
+int BPF_PROG(hid_fix_rdesc_tca_yoke, struct hid_bpf_ctx *hctx)
+{
+ const int expected_length = 148;
+
+ if (hctx->size != expected_length)
+ return 0;
+
+ __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, HID_MAX_DESCRIPTOR_SIZE /* size */);
+
+ if (!data)
+ return 0; /* EPERM */
+
+ /* Safety check, our probe() should take care of this though */
+ if (data[1] != 0x01 /* Generic Desktop */ || data[3] != 0x04 /* Joystick */)
+ return 0;
+
+ /* The report descriptor sets incorrect Input items in 2 places, resulting in a
+ * non-existing axis showing up.
+ * This change sets the correct Input which prevents the axis from showing up in Linux.
+ */
+
+ if (data[90] == 0x81 && /* Input */
+ data[103] == 0x81) { /* Input */
+ data[91] = 0x03; /* Input set to 0x03 Constant, Variable Absolute */
+ data[104] = 0x03; /* Input set to 0X03 Constant, Variable Absolute */
+ }
+
+ return 0;
+}
+
+HID_BPF_OPS(tca_yoke) = {
+ .hid_rdesc_fixup = (void *)hid_fix_rdesc_tca_yoke,
+};
+
+SEC("syscall")
+int probe(struct hid_bpf_probe_args *ctx)
+{
+ /* ensure the kernel isn't fixed already */
+ if (ctx->rdesc[91] != 0x02) /* Input for 0x59 Usage type has changed */
+ ctx->retval = -EINVAL;
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/drivers/hid/bpf/progs/Wacom__ArtPen.bpf.c b/drivers/hid/bpf/progs/Wacom__ArtPen.bpf.c
index dc05aa48faa7..2da680bc4e11 100644
--- a/drivers/hid/bpf/progs/Wacom__ArtPen.bpf.c
+++ b/drivers/hid/bpf/progs/Wacom__ArtPen.bpf.c
@@ -101,7 +101,7 @@ static inline __u8 *get_u8(__u8 *data, unsigned int offset)
return (__u8 *)get_bits(data, offset);
}
-SEC("fmod_ret/hid_bpf_device_event")
+SEC(HID_BPF_DEVICE_EVENT)
int BPF_PROG(artpen_pressure_interpolate, struct hid_bpf_ctx *hctx)
{
__u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, PEN_REPORT_LEN /* size */);
@@ -139,6 +139,10 @@ int BPF_PROG(artpen_pressure_interpolate, struct hid_bpf_ctx *hctx)
return 0;
}
+HID_BPF_OPS(wacom_artpen) = {
+ .hid_device_event = (void *)artpen_pressure_interpolate,
+};
+
SEC("syscall")
int probe(struct hid_bpf_probe_args *ctx)
{
diff --git a/drivers/hid/bpf/progs/XPPen__Artist24.bpf.c b/drivers/hid/bpf/progs/XPPen__Artist24.bpf.c
index e1be6a12bb75..c24fe905c50d 100644
--- a/drivers/hid/bpf/progs/XPPen__Artist24.bpf.c
+++ b/drivers/hid/bpf/progs/XPPen__Artist24.bpf.c
@@ -78,8 +78,6 @@ static const __u8 fixed_rdesc[] = {
0xc0, // End Collection 106
};
-#define BIT(n) (1UL << n)
-
#define TIP_SWITCH BIT(0)
#define BARREL_SWITCH BIT(1)
#define ERASER BIT(2)
@@ -91,7 +89,7 @@ static const __u8 fixed_rdesc[] = {
#define U16(index) (data[index] | (data[index + 1] << 8))
-SEC("fmod_ret/hid_bpf_rdesc_fixup")
+SEC(HID_BPF_RDESC_FIXUP)
int BPF_PROG(hid_fix_rdesc_xppen_artist24, struct hid_bpf_ctx *hctx)
{
__u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 4096 /* size */);
@@ -152,13 +150,12 @@ static __u8 prev_state = 0;
* E: TipSwitch InRange
*
*/
-SEC("fmod_ret/hid_bpf_device_event")
+SEC(HID_BPF_DEVICE_EVENT)
int BPF_PROG(xppen_24_fix_eraser, struct hid_bpf_ctx *hctx)
{
__u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 10 /* size */);
__u8 current_state, changed_state;
bool prev_tip;
- __u16 tilt;
if (!data)
return 0; /* EPERM check */
@@ -209,6 +206,11 @@ int BPF_PROG(xppen_24_fix_eraser, struct hid_bpf_ctx *hctx)
return 0;
}
+HID_BPF_OPS(xppen_artist_24) = {
+ .hid_rdesc_fixup = (void *)hid_fix_rdesc_xppen_artist24,
+ .hid_device_event = (void *)xppen_24_fix_eraser,
+};
+
SEC("syscall")
int probe(struct hid_bpf_probe_args *ctx)
{
diff --git a/drivers/hid/bpf/progs/XPPen__ArtistPro16Gen2.bpf.c b/drivers/hid/bpf/progs/XPPen__ArtistPro16Gen2.bpf.c
index 65ef10036126..a669525691aa 100644
--- a/drivers/hid/bpf/progs/XPPen__ArtistPro16Gen2.bpf.c
+++ b/drivers/hid/bpf/progs/XPPen__ArtistPro16Gen2.bpf.c
@@ -82,7 +82,7 @@ static const __u8 fixed_rdesc[] = {
0xc0, // End Collection 112
};
-SEC("fmod_ret/hid_bpf_rdesc_fixup")
+SEC(HID_BPF_RDESC_FIXUP)
int BPF_PROG(hid_fix_rdesc_xppen_artistpro16gen2, struct hid_bpf_ctx *hctx)
{
__u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 4096 /* size */);
@@ -105,8 +105,7 @@ int BPF_PROG(hid_fix_rdesc_xppen_artistpro16gen2, struct hid_bpf_ctx *hctx)
return sizeof(fixed_rdesc);
}
-SEC("fmod_ret/hid_bpf_device_event")
-int BPF_PROG(xppen_16_fix_eraser, struct hid_bpf_ctx *hctx)
+static int xppen_16_fix_eraser(struct hid_bpf_ctx *hctx)
{
__u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 10 /* size */);
@@ -207,8 +206,7 @@ static void compensate_coordinates_by_tilt(__u8 *data, const __u8 idx,
data[idx+1] = coords >> 8;
}
-SEC("fmod_ret/hid_bpf_device_event")
-int BPF_PROG(xppen_16_fix_angle_offset, struct hid_bpf_ctx *hctx)
+static int xppen_16_fix_angle_offset(struct hid_bpf_ctx *hctx)
{
__u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 10 /* size */);
@@ -254,6 +252,22 @@ int BPF_PROG(xppen_16_fix_angle_offset, struct hid_bpf_ctx *hctx)
return 0;
}
+SEC(HID_BPF_DEVICE_EVENT)
+int BPF_PROG(xppen_artist_pro_16_device_event, struct hid_bpf_ctx *hctx)
+{
+ int ret = xppen_16_fix_angle_offset(hctx);
+
+ if (ret)
+ return ret;
+
+ return xppen_16_fix_eraser(hctx);
+}
+
+HID_BPF_OPS(xppen_artist_pro_16) = {
+ .hid_rdesc_fixup = (void *)hid_fix_rdesc_xppen_artistpro16gen2,
+ .hid_device_event = (void *)xppen_artist_pro_16_device_event,
+};
+
SEC("syscall")
int probe(struct hid_bpf_probe_args *ctx)
{
diff --git a/drivers/hid/bpf/progs/XPPen__DecoMini4.bpf.c b/drivers/hid/bpf/progs/XPPen__DecoMini4.bpf.c
new file mode 100644
index 000000000000..46d5c459d0c9
--- /dev/null
+++ b/drivers/hid/bpf/progs/XPPen__DecoMini4.bpf.c
@@ -0,0 +1,231 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2024 José Expósito
+ */
+
+#include "vmlinux.h"
+#include "hid_bpf.h"
+#include "hid_bpf_helpers.h"
+#include <bpf/bpf_tracing.h>
+
+#define VID_UGEE 0x28BD
+#define PID_DECO_MINI_4 0x0929
+#define RDESC_SIZE_PAD 177
+#define RDESC_SIZE_PEN 109
+#define PAD_REPORT_ID 0x06
+
+/*
+ * XP-Pen devices return a descriptor with the values the driver should use when
+ * one of its interfaces is queried. For this device the descriptor is:
+ *
+ * 0E 03 60 4F 88 3B 06 00 FF 1F D8 13
+ * ----- ----- ----- -----
+ * | | | |
+ * | | | `- Resolution: 5080 (13d8)
+ * | | `- Maximum pressure: 8191 (1FFF)
+ * | `- Logical maximum Y: 15240 (3B88)
+ * `- Logical maximum X: 20320 (4F60)
+ *
+ * The physical maximum is calculated as (logical_max * 1000) / resolution.
+ */
+#define LOGICAL_MAX_X 0x60, 0x4F
+#define LOGICAL_MAX_Y 0x88, 0x3B
+#define PHYSICAL_MAX_X 0xA0, 0x0F
+#define PHYSICAL_MAX_Y 0xB8, 0x0B
+#define PRESSURE_MAX 0xFF, 0x1F
+
+HID_BPF_CONFIG(
+ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, VID_UGEE, PID_DECO_MINI_4)
+);
+
+/*
+ * The tablet send these values when the pad buttons are pressed individually:
+ *
+ * Buttons released: 06 00 00 00 00 00 00 00
+ * Button 1: 06 00 05 00 00 00 00 00 -> b
+ * Button 2: 06 00 08 00 00 00 00 00 -> e
+ * Button 3: 06 04 00 00 00 00 00 00 -> LAlt
+ * Button 4: 06 00 2c 00 00 00 00 00 -> Space
+ * Button 5: 06 01 16 00 00 00 00 00 -> LControl + s
+ * Button 6: 06 01 1d 00 00 00 00 00 -> LControl + z
+ *
+ * When multiple buttons are pressed at the same time, the values used to
+ * identify the buttons are identical, but they appear in different bytes of the
+ * record. For example, when button 2 (0x08) and button 1 (0x05) are pressed,
+ * this is the report:
+ *
+ * Buttons 2 and 1: 06 00 08 05 00 00 00 00 -> e + b
+ *
+ * Buttons 1, 2, 4, 5 and 6 can be matched by finding their values in the
+ * report.
+ *
+ * Button 3 is pressed when the 3rd bit is 1. For example, pressing buttons 3
+ * and 5 generates this report:
+ *
+ * Buttons 3 and 5: 06 05 16 00 00 00 00 00 -> LControl + LAlt + s
+ * -- --
+ * | |
+ * | `- Button 5 (0x16)
+ * `- 0x05 = 0101. Button 3 is pressed
+ * ^
+ *
+ * pad_buttons contains a list of buttons that can be matched in
+ * HID_BPF_DEVICE_EVENT. Button 3 as it has a dedicated bit.
+ */
+static const __u8 pad_buttons[] = { 0x05, 0x08, 0x00, 0x2C, 0x16, 0x1D };
+
+static const __u8 fixed_pad_rdesc[] = {
+ 0x05, 0x01, /* Usage Page (Desktop), */
+ 0x09, 0x07, /* Usage (Keypad), */
+ 0xA1, 0x01, /* Collection (Application), */
+ 0x85, 0x06, /* Report ID (6), */
+ 0x05, 0x0D, /* Usage Page (Digitizer), */
+ 0x09, 0x39, /* Usage (Tablet Function Keys), */
+ 0xA0, /* Collection (Physical), */
+ 0x05, 0x09, /* Usage Page (Button), */
+ 0x75, 0x01, /* Report Size (1), */
+ 0x95, 0x06, /* Report Count (6), */
+ 0x19, 0x01, /* Usage Minimum (01h), */
+ 0x29, 0x06, /* Usage Maximum (06h), */
+ 0x14, /* Logical Minimum (0), */
+ 0x25, 0x01, /* Logical Maximum (1), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x95, 0x32, /* Report Count (50), */
+ 0x81, 0x01, /* Input (Constant), */
+ 0xC0, /* End Collection, */
+ 0xC0 /* End Collection */
+};
+
+static const __u8 fixed_pen_rdesc[] = {
+ 0x05, 0x0d, /* Usage Page (Digitizers), */
+ 0x09, 0x01, /* Usage (Digitizer), */
+ 0xa1, 0x01, /* Collection (Application), */
+ 0x85, 0x07, /* Report ID (7), */
+ 0x09, 0x20, /* Usage (Stylus), */
+ 0xa1, 0x00, /* Collection (Physical), */
+ 0x09, 0x42, /* Usage (Tip Switch), */
+ 0x09, 0x44, /* Usage (Barrel Switch), */
+ 0x09, 0x46, /* Usage (Tablet Pick), */
+ 0x75, 0x01, /* Report Size (1), */
+ 0x95, 0x03, /* Report Count (3), */
+ 0x14, /* Logical Minimum (0), */
+ 0x25, 0x01, /* Logical Maximum (1), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x95, 0x02, /* Report Count (2), */
+ 0x81, 0x03, /* Input (Constant, Variable), */
+ 0x09, 0x32, /* Usage (In Range), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x95, 0x02, /* Report Count (2), */
+ 0x81, 0x03, /* Input (Constant, Variable), */
+ 0x75, 0x10, /* Report Size (16), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0x35, 0x00, /* Physical Minimum (0), */
+ 0xa4, /* Push, */
+ 0x05, 0x01, /* Usage Page (Desktop), */
+ 0x09, 0x30, /* Usage (X), */
+ 0x65, 0x13, /* Unit (Inch), */
+ 0x55, 0x0d, /* Unit Exponent (-3), */
+ 0x26, LOGICAL_MAX_X, /* Logical Maximum, */
+ 0x46, PHYSICAL_MAX_X, /* Physical Maximum, */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x09, 0x31, /* Usage (Y), */
+ 0x26, LOGICAL_MAX_Y, /* Logical Maximum, */
+ 0x46, PHYSICAL_MAX_Y, /* Physical Maximum, */
+ 0x81, 0x02, /* Input (Variable), */
+ 0xb4, /* Pop, */
+ 0x09, 0x30, /* Usage (Tip Pressure), */
+ 0x45, 0x00, /* Physical Maximum (0), */
+ 0x26, PRESSURE_MAX, /* Logical Maximum, */
+ 0x75, 0x0D, /* Report Size (13), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x75, 0x01, /* Report Size (1), */
+ 0x95, 0x13, /* Report Count (19), */
+ 0x81, 0x01, /* Input (Constant), */
+ 0xc0, /* End Collection, */
+ 0xc0, /* End Collection */
+};
+
+static const size_t fixed_pad_rdesc_size = sizeof(fixed_pad_rdesc);
+static const size_t fixed_pen_rdesc_size = sizeof(fixed_pen_rdesc);
+
+SEC(HID_BPF_RDESC_FIXUP)
+int BPF_PROG(hid_rdesc_fixup_xppen_deco_mini_4, struct hid_bpf_ctx *hctx)
+{
+ __u8 *data = hid_bpf_get_data(hctx, 0, HID_MAX_DESCRIPTOR_SIZE);
+
+ if (!data)
+ return 0; /* EPERM check */
+
+ if (hctx->size == RDESC_SIZE_PAD) {
+ __builtin_memcpy(data, fixed_pad_rdesc, fixed_pad_rdesc_size);
+ return fixed_pad_rdesc_size;
+ } else if (hctx->size == RDESC_SIZE_PEN) {
+ __builtin_memcpy(data, fixed_pen_rdesc, fixed_pen_rdesc_size);
+ return fixed_pen_rdesc_size;
+ }
+
+ return 0;
+}
+
+SEC(HID_BPF_DEVICE_EVENT)
+int BPF_PROG(hid_device_event_xppen_deco_mini_4, struct hid_bpf_ctx *hctx)
+{
+ __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 8 /* size */);
+ __u8 button_mask = 0;
+ int d, b;
+
+ if (!data)
+ return 0; /* EPERM check */
+
+ if (data[0] != PAD_REPORT_ID)
+ return 0;
+
+ /* data[1] stores the status of BTN_2 in the 3rd bit*/
+ if (data[1] & BIT(2))
+ button_mask |= BIT(2);
+
+ /* The rest of the descriptor stores the buttons as in pad_buttons */
+ for (d = 2; d < 8; d++) {
+ for (b = 0; b < sizeof(pad_buttons); b++) {
+ if (data[d] != 0 && data[d] == pad_buttons[b])
+ button_mask |= BIT(b);
+ }
+ }
+
+ __u8 report[8] = {PAD_REPORT_ID, button_mask, 0x00};
+
+ __builtin_memcpy(data, report, sizeof(report));
+
+ return 0;
+}
+
+HID_BPF_OPS(deco_mini_4) = {
+ .hid_device_event = (void *)hid_device_event_xppen_deco_mini_4,
+ .hid_rdesc_fixup = (void *)hid_rdesc_fixup_xppen_deco_mini_4,
+};
+
+SEC("syscall")
+int probe(struct hid_bpf_probe_args *ctx)
+{
+ /*
+ * The device has 2 modes: The compatibility mode, enabled by default,
+ * and the raw mode, that can be activated by sending a buffer of magic
+ * data to a certain USB endpoint.
+ *
+ * Depending on the mode, different interfaces of the device are used:
+ * - First interface: Pad in compatibility mode
+ * - Second interface: Pen in compatibility mode
+ * - Third interface: Only used in raw mode
+ *
+ * We'll use the device in compatibility mode.
+ */
+ ctx->retval = ctx->rdesc_size != RDESC_SIZE_PAD &&
+ ctx->rdesc_size != RDESC_SIZE_PEN;
+ if (ctx->retval)
+ ctx->retval = -EINVAL;
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/drivers/hid/bpf/progs/hid_bpf.h b/drivers/hid/bpf/progs/hid_bpf.h
index 7ee371cac2e1..7cabd1b2e837 100644
--- a/drivers/hid/bpf/progs/hid_bpf.h
+++ b/drivers/hid/bpf/progs/hid_bpf.h
@@ -5,6 +5,12 @@
#ifndef ____HID_BPF__H
#define ____HID_BPF__H
+#define HID_BPF_DEVICE_EVENT "struct_ops/hid_device_event"
+#define HID_BPF_RDESC_FIXUP "struct_ops/hid_rdesc_fixup"
+#define HID_BPF_OPS(name) SEC(".struct_ops.link") \
+ struct hid_bpf_ops name
+#define hid_set_name(_hdev, _name) __builtin_memcpy(_hdev->name, _name, sizeof(_name))
+
struct hid_bpf_probe_args {
unsigned int hid;
unsigned int rdesc_size;
diff --git a/drivers/hid/bpf/progs/hid_bpf_helpers.h b/drivers/hid/bpf/progs/hid_bpf_helpers.h
index 8f226f6e886b..3ba24d125a08 100644
--- a/drivers/hid/bpf/progs/hid_bpf_helpers.h
+++ b/drivers/hid/bpf/progs/hid_bpf_helpers.h
@@ -66,6 +66,7 @@ extern int hid_bpf_hw_request(struct hid_bpf_ctx *ctx,
#define HID_VID_ANY 0x0000
#define HID_PID_ANY 0x0000
+#define BIT(n) (1UL << (n))
#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
/* Helper macro to convert (foo, __LINE__) into foo134 so we can use __LINE__ for
diff --git a/drivers/hid/bpf/progs/hid_report_helpers.h b/drivers/hid/bpf/progs/hid_report_helpers.h
new file mode 100644
index 000000000000..0aa1df438eeb
--- /dev/null
+++ b/drivers/hid/bpf/progs/hid_report_helpers.h
@@ -0,0 +1,2960 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2024 Red Hat, Inc
+ */
+
+// THIS FILE IS GENERATED, DO NOT EDIT
+
+#pragma once
+
+
+/* Macros for composing HID reports.
+ *
+ * HID Fields are added manually to the template, please add to it as needed
+ * for any individual device. The Usage Pages and Usages are generated.
+ *
+ * Some macros have a _i8, _i16, or _i32 suffix. Pick the
+ * right suffix given the passed-in value.
+ */
+
+/*
+ * This macro behaves like static_assert(), failing to
+ * compile if its argument is not true. However, it always
+ * returns 0, which allows using it everywhere an expression
+ * can be used.
+ */
+#define must_be(e, msg_) \
+( \
+ 0 * (int) sizeof( \
+ struct { \
+ _Static_assert(e, msg_); \
+ int ISO_C_forbids_a_struct_with_no_members; \
+ } \
+ ) \
+)
+
+/* Ensure the given value fits within 8/16/32 bits */
+#define i4(v_) (((__u8)(v_) & 0xf) + must_be((v_) >= -0x8 && (v_) <= 0x7, "not a i4"))
+#define i8(v_) ((__u8)(v_) + must_be((v_) >= -0x80 && (v_) <= 0xff, "not a i8/u8"))
+#define i16(v_) ((__u16)(v_) + must_be((v_) >= -0x8000 && (v_) <= 0xffff, "not a i16/u16"))
+#define i32(v_) ((__u32)(v_) + must_be((v_) >= -0x80000000L && (v_) <= 0xffffffffL, \
+ "not a i32/u16"))
+
+/* Split a value across multiple bytes in LE order */
+#define LE16(v_) i16(v_) & 0xff, ((v_) >> 8) & 0xff
+#define LE32(v_) i32(v_) & 0xff, ((v_) >> 8) & 0xff, ((v_) >> 16) & 0xff, ((v_) >> 24) & 0xff
+
+/* Collections require two items in the report descriptor, the start
+ * of the collection (0xa?) and the EndCollection item (0xc?).
+ * This macro provides both, use like this:
+ *
+ * static const __u8 fixed_rdesc[] = {
+ * UsagePage_Generic_Desktop
+ * Usage_GD_Keyboard
+ * CollectionApplication( ← Open the collection
+ * ReportId(3)
+ * LogicalRange_i8(0, 1)
+ * // other fields
+ * ) ← End EndCollection
+ *
+ * Collections may be nested.
+ */
+#define Collection(col_, ...) 0xa1, i8(col_), __VA_ARGS__ 0xc0,
+#define CollectionPhysical(...) Collection(0x00, __VA_ARGS__)
+#define CollectionApplication(...) Collection(0x01, __VA_ARGS__)
+#define CollectionLogical(...) Collection(0x02, __VA_ARGS__)
+
+/* See Collections, this macro provides Push and Pop with
+ * elements in between
+ */
+#define PushPop(...) 0xa4, __VA_ARGS__ 0xb4,
+
+/* Arguments to use in bitwise-or for Input, Output, Feature */
+#define Const 0x1
+#define Var 0x2
+#define Arr 0x0
+#define Abs 0x0
+#define Rel 0x4
+
+/* Use like this: Input(Var|Abs) */
+#define Input(i_) 0x081, i8(i_),
+#define Output(i_) 0x091, i8(i_),
+#define Feature(i_) 0x0b1, i8(i_),
+
+#define ReportId(id_) 0x85, i8(id_),
+#define ReportSize(sz_) 0x75, i8(sz_),
+#define ReportCount(cnt_) 0x95, i8(cnt_),
+
+#define LogicalRange_i8(min_, max_) 0x15, i8(min_), 0x25, i8(max_),
+#define LogicalRange_i16(min_, max_) 0x16, LE16(min_), 0x26, LE16(max_),
+#define LogicalRange_i32(min_, max_) 0x17, LE32(min_), 0x27, LE32(max_),
+
+#define PhysicalRange_i8(min_, max_) 0x35, i8(min_), 0x45, i8(max_),
+#define PhysicalRange_i16(min_, max_) 0x36, LE16(min_), 0x46, LE16(max_),
+#define PhysicalRange_i32(min_, max_) 0x37, LE32(min_), 0x47, LE32(max_),
+
+#define UsageRange_i8(min_, max_) 0x19, i8(min_), 0x29, i8(max_),
+#define UsageRange_i16(min_, max_) 0x1a, LE16(min_), 0x2a, LE16(max_),
+
+#define UsagePage_i8(p_) 0x05, i8(p_),
+#define UsagePage_i16(p_) 0x06, LE16(p_),
+
+#define Usage_i8(u_) 0x09, i8(u_),
+#define Usage_i16(u_) 0x0a, LE16(u_),
+#define Usage_i32(u_) 0x0b, LE32(u_),
+
+#define SILinear 0x1
+#define SIRotation 0x2
+#define EnglishLinear 0x3
+#define EnglishRotation 0x4
+#define cm (SILinear | (0x1 << 4))
+#define rad (SIRotation | (0x1 << 4))
+#define deg (EnglishRotation | (0x1 << 4))
+#define in (EnglishLinear | (0x1 << 4))
+/* Use as Unit(cm) or Unit(rad) or similar.
+ * This macro currently defaults to exponent 1 only, so no
+ * cm^2 or others
+ */
+#define Unit(u_) Unit_i8(u_)
+#define Unit_i8(u_) 0x65, i8(u_),
+#define Unit_i16(u_) 0x66, i16(u_),
+#define Unit_i32(u_) 0x67, i32(u_),
+
+#define UnitExponent(u_) 0x55, i4(u_),
+
+/* A macro to generate a vendor-specific padding-only
+ * report with Report ID 0xac of the given size in bytes.
+ * The size is inclusive of the 1 byte Report ID prefix.
+ *
+ * HID-BPF requires that at least one report has
+ * the same size as the original report from the device.
+ * The easy way to ensure that is to add this
+ * macro as the last element of your CollectionApplication
+ * other reports can be of any size less than this.
+ *
+ * e.g.
+ * static __u8 fixed_rdesc = [
+ * UsagePage_Generic_Desktop
+ * Usage_GD_Keyboard
+ * CollectionApplication(
+ * ... intended rdesc items go here ...
+ * FixedSizeVendorReport(12)
+ * )
+ * ];
+ *
+ * If the FixedSizeVendorReport is placed outside
+ * a CollectionApplication it will result in
+ * an extra useless evdev node being created.
+ */
+#define FixedSizeVendorReport(bytes_) \
+ UsagePage_Vendor(0xffff) \
+ Usage_i8(0x01) \
+ CollectionPhysical( \
+ ReportId(0xac) \
+ ReportSize(8) \
+ ReportCount((bytes_) - 1) \
+ Input(Const) \
+ )
+
+/* ----- Generated Usage Pages and Usages ------ */
+#define UsagePage_GenericDesktop UsagePage_i8(0x1)
+#define UsagePage_SimulationControls UsagePage_i8(0x2)
+#define UsagePage_VRControls UsagePage_i8(0x3)
+#define UsagePage_SportControls UsagePage_i8(0x4)
+#define UsagePage_GameControls UsagePage_i8(0x5)
+#define UsagePage_GenericDeviceControls UsagePage_i8(0x6)
+#define UsagePage_KeyboardKeypad UsagePage_i8(0x7)
+#define UsagePage_LED UsagePage_i8(0x8)
+#define UsagePage_Button UsagePage_i8(0x9)
+#define UsagePage_Ordinal UsagePage_i8(0xa)
+#define UsagePage_TelephonyDevice UsagePage_i8(0xb)
+#define UsagePage_Consumer UsagePage_i8(0xc)
+#define UsagePage_Digitizers UsagePage_i8(0xd)
+#define UsagePage_Haptics UsagePage_i8(0xe)
+#define UsagePage_PhysicalInputDevice UsagePage_i8(0xf)
+#define UsagePage_Unicode UsagePage_i8(0x10)
+#define UsagePage_SoC UsagePage_i8(0x11)
+#define UsagePage_EyeandHeadTrackers UsagePage_i8(0x12)
+#define UsagePage_AuxiliaryDisplay UsagePage_i8(0x14)
+#define UsagePage_Sensors UsagePage_i8(0x20)
+#define UsagePage_MedicalInstrument UsagePage_i8(0x40)
+#define UsagePage_BrailleDisplay UsagePage_i8(0x41)
+#define UsagePage_LightingAndIllumination UsagePage_i8(0x59)
+#define UsagePage_Monitor UsagePage_i8(0x80)
+#define UsagePage_MonitorEnumerated UsagePage_i8(0x81)
+#define UsagePage_VESAVirtualControls UsagePage_i8(0x82)
+#define UsagePage_Power UsagePage_i8(0x84)
+#define UsagePage_BatterySystem UsagePage_i8(0x85)
+#define UsagePage_BarcodeScanner UsagePage_i8(0x8c)
+#define UsagePage_Scales UsagePage_i8(0x8d)
+#define UsagePage_MagneticStripeReader UsagePage_i8(0x8e)
+#define UsagePage_CameraControl UsagePage_i8(0x90)
+#define UsagePage_Arcade UsagePage_i8(0x91)
+#define UsagePage_FIDOAlliance UsagePage_i16(0xf1d0)
+#define UsagePage_Vendor(u_) \
+ UsagePage_i16((u_) + must_be(((u_) & 0xff00) == 0xff00, "not a 0xff00 vendor page"))
+
+#define Usage_GD_Pointer Usage_i8(0x1)
+#define Usage_GD_Mouse Usage_i8(0x2)
+#define Usage_GD_Joystick Usage_i8(0x4)
+#define Usage_GD_Gamepad Usage_i8(0x5)
+#define Usage_GD_Keyboard Usage_i8(0x6)
+#define Usage_GD_Keypad Usage_i8(0x7)
+#define Usage_GD_MultiaxisController Usage_i8(0x8)
+#define Usage_GD_TabletPCSystemControls Usage_i8(0x9)
+#define Usage_GD_WaterCoolingDevice Usage_i8(0xa)
+#define Usage_GD_ComputerChassisDevice Usage_i8(0xb)
+#define Usage_GD_WirelessRadioControls Usage_i8(0xc)
+#define Usage_GD_PortableDeviceControl Usage_i8(0xd)
+#define Usage_GD_SystemMultiAxisController Usage_i8(0xe)
+#define Usage_GD_SpatialController Usage_i8(0xf)
+#define Usage_GD_AssistiveControl Usage_i8(0x10)
+#define Usage_GD_DeviceDock Usage_i8(0x11)
+#define Usage_GD_DockableDevice Usage_i8(0x12)
+#define Usage_GD_CallStateManagementControl Usage_i8(0x13)
+#define Usage_GD_X Usage_i8(0x30)
+#define Usage_GD_Y Usage_i8(0x31)
+#define Usage_GD_Z Usage_i8(0x32)
+#define Usage_GD_Rx Usage_i8(0x33)
+#define Usage_GD_Ry Usage_i8(0x34)
+#define Usage_GD_Rz Usage_i8(0x35)
+#define Usage_GD_Slider Usage_i8(0x36)
+#define Usage_GD_Dial Usage_i8(0x37)
+#define Usage_GD_Wheel Usage_i8(0x38)
+#define Usage_GD_HatSwitch Usage_i8(0x39)
+#define Usage_GD_CountedBuffer Usage_i8(0x3a)
+#define Usage_GD_ByteCount Usage_i8(0x3b)
+#define Usage_GD_MotionWakeup Usage_i8(0x3c)
+#define Usage_GD_Start Usage_i8(0x3d)
+#define Usage_GD_Select Usage_i8(0x3e)
+#define Usage_GD_Vx Usage_i8(0x40)
+#define Usage_GD_Vy Usage_i8(0x41)
+#define Usage_GD_Vz Usage_i8(0x42)
+#define Usage_GD_Vbrx Usage_i8(0x43)
+#define Usage_GD_Vbry Usage_i8(0x44)
+#define Usage_GD_Vbrz Usage_i8(0x45)
+#define Usage_GD_Vno Usage_i8(0x46)
+#define Usage_GD_FeatureNotification Usage_i8(0x47)
+#define Usage_GD_ResolutionMultiplier Usage_i8(0x48)
+#define Usage_GD_Qx Usage_i8(0x49)
+#define Usage_GD_Qy Usage_i8(0x4a)
+#define Usage_GD_Qz Usage_i8(0x4b)
+#define Usage_GD_Qw Usage_i8(0x4c)
+#define Usage_GD_SystemControl Usage_i8(0x80)
+#define Usage_GD_SystemPowerDown Usage_i8(0x81)
+#define Usage_GD_SystemSleep Usage_i8(0x82)
+#define Usage_GD_SystemWakeUp Usage_i8(0x83)
+#define Usage_GD_SystemContextMenu Usage_i8(0x84)
+#define Usage_GD_SystemMainMenu Usage_i8(0x85)
+#define Usage_GD_SystemAppMenu Usage_i8(0x86)
+#define Usage_GD_SystemMenuHelp Usage_i8(0x87)
+#define Usage_GD_SystemMenuExit Usage_i8(0x88)
+#define Usage_GD_SystemMenuSelect Usage_i8(0x89)
+#define Usage_GD_SystemMenuRight Usage_i8(0x8a)
+#define Usage_GD_SystemMenuLeft Usage_i8(0x8b)
+#define Usage_GD_SystemMenuUp Usage_i8(0x8c)
+#define Usage_GD_SystemMenuDown Usage_i8(0x8d)
+#define Usage_GD_SystemColdRestart Usage_i8(0x8e)
+#define Usage_GD_SystemWarmRestart Usage_i8(0x8f)
+#define Usage_GD_DpadUp Usage_i8(0x90)
+#define Usage_GD_DpadDown Usage_i8(0x91)
+#define Usage_GD_DpadRight Usage_i8(0x92)
+#define Usage_GD_DpadLeft Usage_i8(0x93)
+#define Usage_GD_IndexTrigger Usage_i8(0x94)
+#define Usage_GD_PalmTrigger Usage_i8(0x95)
+#define Usage_GD_Thumbstick Usage_i8(0x96)
+#define Usage_GD_SystemFunctionShift Usage_i8(0x97)
+#define Usage_GD_SystemFunctionShiftLock Usage_i8(0x98)
+#define Usage_GD_SystemFunctionShiftLockIndicator Usage_i8(0x99)
+#define Usage_GD_SystemDismissNotification Usage_i8(0x9a)
+#define Usage_GD_SystemDoNotDisturb Usage_i8(0x9b)
+#define Usage_GD_SystemDock Usage_i8(0xa0)
+#define Usage_GD_SystemUndock Usage_i8(0xa1)
+#define Usage_GD_SystemSetup Usage_i8(0xa2)
+#define Usage_GD_SystemBreak Usage_i8(0xa3)
+#define Usage_GD_SystemDebuggerBreak Usage_i8(0xa4)
+#define Usage_GD_ApplicationBreak Usage_i8(0xa5)
+#define Usage_GD_ApplicationDebuggerBreak Usage_i8(0xa6)
+#define Usage_GD_SystemSpeakerMute Usage_i8(0xa7)
+#define Usage_GD_SystemHibernate Usage_i8(0xa8)
+#define Usage_GD_SystemMicrophoneMute Usage_i8(0xa9)
+#define Usage_GD_SystemDisplayInvert Usage_i8(0xb0)
+#define Usage_GD_SystemDisplayInternal Usage_i8(0xb1)
+#define Usage_GD_SystemDisplayExternal Usage_i8(0xb2)
+#define Usage_GD_SystemDisplayBoth Usage_i8(0xb3)
+#define Usage_GD_SystemDisplayDual Usage_i8(0xb4)
+#define Usage_GD_SystemDisplayToggleIntExtMode Usage_i8(0xb5)
+#define Usage_GD_SystemDisplaySwapPrimarySecondary Usage_i8(0xb6)
+#define Usage_GD_SystemDisplayToggleLCDAutoscale Usage_i8(0xb7)
+#define Usage_GD_SensorZone Usage_i8(0xc0)
+#define Usage_GD_RPM Usage_i8(0xc1)
+#define Usage_GD_CoolantLevel Usage_i8(0xc2)
+#define Usage_GD_CoolantCriticalLevel Usage_i8(0xc3)
+#define Usage_GD_CoolantPump Usage_i8(0xc4)
+#define Usage_GD_ChassisEnclosure Usage_i8(0xc5)
+#define Usage_GD_WirelessRadioButton Usage_i8(0xc6)
+#define Usage_GD_WirelessRadioLED Usage_i8(0xc7)
+#define Usage_GD_WirelessRadioSliderSwitch Usage_i8(0xc8)
+#define Usage_GD_SystemDisplayRotationLockButton Usage_i8(0xc9)
+#define Usage_GD_SystemDisplayRotationLockSliderSwitch Usage_i8(0xca)
+#define Usage_GD_ControlEnable Usage_i8(0xcb)
+#define Usage_GD_DockableDeviceUniqueID Usage_i8(0xd0)
+#define Usage_GD_DockableDeviceVendorID Usage_i8(0xd1)
+#define Usage_GD_DockableDevicePrimaryUsagePage Usage_i8(0xd2)
+#define Usage_GD_DockableDevicePrimaryUsageID Usage_i8(0xd3)
+#define Usage_GD_DockableDeviceDockingState Usage_i8(0xd4)
+#define Usage_GD_DockableDeviceDisplayOcclusion Usage_i8(0xd5)
+#define Usage_GD_DockableDeviceObjectType Usage_i8(0xd6)
+#define Usage_GD_CallActiveLED Usage_i8(0xe0)
+#define Usage_GD_CallMuteToggle Usage_i8(0xe1)
+#define Usage_GD_CallMuteLED Usage_i8(0xe2)
+#define Usage_SC_FlightSimulationDevice Usage_i8(0x1)
+#define Usage_SC_AutomobileSimulationDevice Usage_i8(0x2)
+#define Usage_SC_TankSimulationDevice Usage_i8(0x3)
+#define Usage_SC_SpaceshipSimulationDevice Usage_i8(0x4)
+#define Usage_SC_SubmarineSimulationDevice Usage_i8(0x5)
+#define Usage_SC_SailingSimulationDevice Usage_i8(0x6)
+#define Usage_SC_MotorcycleSimulationDevice Usage_i8(0x7)
+#define Usage_SC_SportsSimulationDevice Usage_i8(0x8)
+#define Usage_SC_AirplaneSimulationDevice Usage_i8(0x9)
+#define Usage_SC_HelicopterSimulationDevice Usage_i8(0xa)
+#define Usage_SC_MagicCarpetSimulationDevice Usage_i8(0xb)
+#define Usage_SC_BicycleSimulationDevice Usage_i8(0xc)
+#define Usage_SC_FlightControlStick Usage_i8(0x20)
+#define Usage_SC_FlightStick Usage_i8(0x21)
+#define Usage_SC_CyclicControl Usage_i8(0x22)
+#define Usage_SC_CyclicTrim Usage_i8(0x23)
+#define Usage_SC_FlightYoke Usage_i8(0x24)
+#define Usage_SC_TrackControl Usage_i8(0x25)
+#define Usage_SC_Aileron Usage_i8(0xb0)
+#define Usage_SC_AileronTrim Usage_i8(0xb1)
+#define Usage_SC_AntiTorqueControl Usage_i8(0xb2)
+#define Usage_SC_AutopilotEnable Usage_i8(0xb3)
+#define Usage_SC_ChaffRelease Usage_i8(0xb4)
+#define Usage_SC_CollectiveControl Usage_i8(0xb5)
+#define Usage_SC_DiveBrake Usage_i8(0xb6)
+#define Usage_SC_ElectronicCountermeasures Usage_i8(0xb7)
+#define Usage_SC_Elevator Usage_i8(0xb8)
+#define Usage_SC_ElevatorTrim Usage_i8(0xb9)
+#define Usage_SC_Rudder Usage_i8(0xba)
+#define Usage_SC_Throttle Usage_i8(0xbb)
+#define Usage_SC_FlightCommunications Usage_i8(0xbc)
+#define Usage_SC_FlareRelease Usage_i8(0xbd)
+#define Usage_SC_LandingGear Usage_i8(0xbe)
+#define Usage_SC_ToeBrake Usage_i8(0xbf)
+#define Usage_SC_Trigger Usage_i8(0xc0)
+#define Usage_SC_WeaponsArm Usage_i8(0xc1)
+#define Usage_SC_WeaponsSelect Usage_i8(0xc2)
+#define Usage_SC_WingFlaps Usage_i8(0xc3)
+#define Usage_SC_Accelerator Usage_i8(0xc4)
+#define Usage_SC_Brake Usage_i8(0xc5)
+#define Usage_SC_Clutch Usage_i8(0xc6)
+#define Usage_SC_Shifter Usage_i8(0xc7)
+#define Usage_SC_Steering Usage_i8(0xc8)
+#define Usage_SC_TurretDirection Usage_i8(0xc9)
+#define Usage_SC_BarrelElevation Usage_i8(0xca)
+#define Usage_SC_DivePlane Usage_i8(0xcb)
+#define Usage_SC_Ballast Usage_i8(0xcc)
+#define Usage_SC_BicycleCrank Usage_i8(0xcd)
+#define Usage_SC_HandleBars Usage_i8(0xce)
+#define Usage_SC_FrontBrake Usage_i8(0xcf)
+#define Usage_SC_RearBrake Usage_i8(0xd0)
+#define Usage_VRC_Belt Usage_i8(0x1)
+#define Usage_VRC_BodySuit Usage_i8(0x2)
+#define Usage_VRC_Flexor Usage_i8(0x3)
+#define Usage_VRC_Glove Usage_i8(0x4)
+#define Usage_VRC_HeadTracker Usage_i8(0x5)
+#define Usage_VRC_HeadMountedDisplay Usage_i8(0x6)
+#define Usage_VRC_HandTracker Usage_i8(0x7)
+#define Usage_VRC_Oculometer Usage_i8(0x8)
+#define Usage_VRC_Vest Usage_i8(0x9)
+#define Usage_VRC_AnimatronicDevice Usage_i8(0xa)
+#define Usage_VRC_StereoEnable Usage_i8(0x20)
+#define Usage_VRC_DisplayEnable Usage_i8(0x21)
+#define Usage_SC_BaseballBat Usage_i8(0x1)
+#define Usage_SC_GolfClub Usage_i8(0x2)
+#define Usage_SC_RowingMachine Usage_i8(0x3)
+#define Usage_SC_Treadmill Usage_i8(0x4)
+#define Usage_SC_Oar Usage_i8(0x30)
+#define Usage_SC_Slope Usage_i8(0x31)
+#define Usage_SC_Rate Usage_i8(0x32)
+#define Usage_SC_StickSpeed Usage_i8(0x33)
+#define Usage_SC_StickFaceAngle Usage_i8(0x34)
+#define Usage_SC_StickHeelToe Usage_i8(0x35)
+#define Usage_SC_StickFollowThrough Usage_i8(0x36)
+#define Usage_SC_StickTempo Usage_i8(0x37)
+#define Usage_SC_StickType Usage_i8(0x38)
+#define Usage_SC_StickHeight Usage_i8(0x39)
+#define Usage_SC_Putter Usage_i8(0x50)
+#define Usage_SC_OneIron Usage_i8(0x51)
+#define Usage_SC_TwoIron Usage_i8(0x52)
+#define Usage_SC_ThreeIron Usage_i8(0x53)
+#define Usage_SC_FourIron Usage_i8(0x54)
+#define Usage_SC_FiveIron Usage_i8(0x55)
+#define Usage_SC_SixIron Usage_i8(0x56)
+#define Usage_SC_SevenIron Usage_i8(0x57)
+#define Usage_SC_EightIron Usage_i8(0x58)
+#define Usage_SC_NineIron Usage_i8(0x59)
+#define Usage_SC_One0Iron Usage_i8(0x5a)
+#define Usage_SC_One1Iron Usage_i8(0x5b)
+#define Usage_SC_SandWedge Usage_i8(0x5c)
+#define Usage_SC_LoftWedge Usage_i8(0x5d)
+#define Usage_SC_PowerWedge Usage_i8(0x5e)
+#define Usage_SC_OneWood Usage_i8(0x5f)
+#define Usage_SC_ThreeWood Usage_i8(0x60)
+#define Usage_SC_FiveWood Usage_i8(0x61)
+#define Usage_SC_SevenWood Usage_i8(0x62)
+#define Usage_SC_NineWood Usage_i8(0x63)
+#define Usage_GC_ThreeDGameController Usage_i8(0x1)
+#define Usage_GC_PinballDevice Usage_i8(0x2)
+#define Usage_GC_GunDevice Usage_i8(0x3)
+#define Usage_GC_PointofView Usage_i8(0x20)
+#define Usage_GC_TurnRightLeft Usage_i8(0x21)
+#define Usage_GC_PitchForwardBackward Usage_i8(0x22)
+#define Usage_GC_RollRightLeft Usage_i8(0x23)
+#define Usage_GC_MoveRightLeft Usage_i8(0x24)
+#define Usage_GC_MoveForwardBackward Usage_i8(0x25)
+#define Usage_GC_MoveUpDown Usage_i8(0x26)
+#define Usage_GC_LeanRightLeft Usage_i8(0x27)
+#define Usage_GC_LeanForwardBackward Usage_i8(0x28)
+#define Usage_GC_HeightofPOV Usage_i8(0x29)
+#define Usage_GC_Flipper Usage_i8(0x2a)
+#define Usage_GC_SecondaryFlipper Usage_i8(0x2b)
+#define Usage_GC_Bump Usage_i8(0x2c)
+#define Usage_GC_NewGame Usage_i8(0x2d)
+#define Usage_GC_ShootBall Usage_i8(0x2e)
+#define Usage_GC_Player Usage_i8(0x2f)
+#define Usage_GC_GunBolt Usage_i8(0x30)
+#define Usage_GC_GunClip Usage_i8(0x31)
+#define Usage_GC_GunSelector Usage_i8(0x32)
+#define Usage_GC_GunSingleShot Usage_i8(0x33)
+#define Usage_GC_GunBurst Usage_i8(0x34)
+#define Usage_GC_GunAutomatic Usage_i8(0x35)
+#define Usage_GC_GunSafety Usage_i8(0x36)
+#define Usage_GC_GamepadFireJump Usage_i8(0x37)
+#define Usage_GC_GamepadTrigger Usage_i8(0x39)
+#define Usage_GC_FormfittingGamepad Usage_i8(0x3a)
+#define Usage_GDC_BackgroundNonuserControls Usage_i8(0x1)
+#define Usage_GDC_BatteryStrength Usage_i8(0x20)
+#define Usage_GDC_WirelessChannel Usage_i8(0x21)
+#define Usage_GDC_WirelessID Usage_i8(0x22)
+#define Usage_GDC_DiscoverWirelessControl Usage_i8(0x23)
+#define Usage_GDC_SecurityCodeCharacterEntered Usage_i8(0x24)
+#define Usage_GDC_SecurityCodeCharacterErased Usage_i8(0x25)
+#define Usage_GDC_SecurityCodeCleared Usage_i8(0x26)
+#define Usage_GDC_SequenceID Usage_i8(0x27)
+#define Usage_GDC_SequenceIDReset Usage_i8(0x28)
+#define Usage_GDC_RFSignalStrength Usage_i8(0x29)
+#define Usage_GDC_SoftwareVersion Usage_i8(0x2a)
+#define Usage_GDC_ProtocolVersion Usage_i8(0x2b)
+#define Usage_GDC_HardwareVersion Usage_i8(0x2c)
+#define Usage_GDC_Major Usage_i8(0x2d)
+#define Usage_GDC_Minor Usage_i8(0x2e)
+#define Usage_GDC_Revision Usage_i8(0x2f)
+#define Usage_GDC_Handedness Usage_i8(0x30)
+#define Usage_GDC_EitherHand Usage_i8(0x31)
+#define Usage_GDC_LeftHand Usage_i8(0x32)
+#define Usage_GDC_RightHand Usage_i8(0x33)
+#define Usage_GDC_BothHands Usage_i8(0x34)
+#define Usage_GDC_GripPoseOffset Usage_i8(0x40)
+#define Usage_GDC_PointerPoseOffset Usage_i8(0x41)
+#define Usage_KK_ErrorRollOver Usage_i8(0x1)
+#define Usage_KK_POSTFail Usage_i8(0x2)
+#define Usage_KK_ErrorUndefined Usage_i8(0x3)
+#define Usage_KK_KeyboardA Usage_i8(0x4)
+#define Usage_KK_KeyboardB Usage_i8(0x5)
+#define Usage_KK_KeyboardC Usage_i8(0x6)
+#define Usage_KK_KeyboardD Usage_i8(0x7)
+#define Usage_KK_KeyboardE Usage_i8(0x8)
+#define Usage_KK_KeyboardF Usage_i8(0x9)
+#define Usage_KK_KeyboardG Usage_i8(0xa)
+#define Usage_KK_KeyboardH Usage_i8(0xb)
+#define Usage_KK_KeyboardI Usage_i8(0xc)
+#define Usage_KK_KeyboardJ Usage_i8(0xd)
+#define Usage_KK_KeyboardK Usage_i8(0xe)
+#define Usage_KK_KeyboardL Usage_i8(0xf)
+#define Usage_KK_KeyboardM Usage_i8(0x10)
+#define Usage_KK_KeyboardN Usage_i8(0x11)
+#define Usage_KK_KeyboardO Usage_i8(0x12)
+#define Usage_KK_KeyboardP Usage_i8(0x13)
+#define Usage_KK_KeyboardQ Usage_i8(0x14)
+#define Usage_KK_KeyboardR Usage_i8(0x15)
+#define Usage_KK_KeyboardS Usage_i8(0x16)
+#define Usage_KK_KeyboardT Usage_i8(0x17)
+#define Usage_KK_KeyboardU Usage_i8(0x18)
+#define Usage_KK_KeyboardV Usage_i8(0x19)
+#define Usage_KK_KeyboardW Usage_i8(0x1a)
+#define Usage_KK_KeyboardX Usage_i8(0x1b)
+#define Usage_KK_KeyboardY Usage_i8(0x1c)
+#define Usage_KK_KeyboardZ Usage_i8(0x1d)
+#define Usage_KK_Keyboard1andBang Usage_i8(0x1e)
+#define Usage_KK_Keyboard2andAt Usage_i8(0x1f)
+#define Usage_KK_Keyboard3andHash Usage_i8(0x20)
+#define Usage_KK_Keyboard4andDollar Usage_i8(0x21)
+#define Usage_KK_Keyboard5andPercent Usage_i8(0x22)
+#define Usage_KK_Keyboard6andCaret Usage_i8(0x23)
+#define Usage_KK_Keyboard7andAmpersand Usage_i8(0x24)
+#define Usage_KK_Keyboard8andStar Usage_i8(0x25)
+#define Usage_KK_Keyboard9andLeftBracket Usage_i8(0x26)
+#define Usage_KK_Keyboard0andRightBracket Usage_i8(0x27)
+#define Usage_KK_KeyboardReturnEnter Usage_i8(0x28)
+#define Usage_KK_KeyboardEscape Usage_i8(0x29)
+#define Usage_KK_KeyboardDelete Usage_i8(0x2a)
+#define Usage_KK_KeyboardTab Usage_i8(0x2b)
+#define Usage_KK_KeyboardSpacebar Usage_i8(0x2c)
+#define Usage_KK_KeyboardDashandUnderscore Usage_i8(0x2d)
+#define Usage_KK_KeyboardEqualsandPlus Usage_i8(0x2e)
+#define Usage_KK_KeyboardLeftBrace Usage_i8(0x2f)
+#define Usage_KK_KeyboardRightBrace Usage_i8(0x30)
+#define Usage_KK_KeyboardBackslashandPipe Usage_i8(0x31)
+#define Usage_KK_KeyboardNonUSHashandTilde Usage_i8(0x32)
+#define Usage_KK_KeyboardSemiColonandColon Usage_i8(0x33)
+#define Usage_KK_KeyboardLeftAposandDouble Usage_i8(0x34)
+#define Usage_KK_KeyboardGraveAccentandTilde Usage_i8(0x35)
+#define Usage_KK_KeyboardCommaandLessThan Usage_i8(0x36)
+#define Usage_KK_KeyboardPeriodandGreaterThan Usage_i8(0x37)
+#define Usage_KK_KeyboardForwardSlashandQuestionMark Usage_i8(0x38)
+#define Usage_KK_KeyboardCapsLock Usage_i8(0x39)
+#define Usage_KK_KeyboardF1 Usage_i8(0x3a)
+#define Usage_KK_KeyboardF2 Usage_i8(0x3b)
+#define Usage_KK_KeyboardF3 Usage_i8(0x3c)
+#define Usage_KK_KeyboardF4 Usage_i8(0x3d)
+#define Usage_KK_KeyboardF5 Usage_i8(0x3e)
+#define Usage_KK_KeyboardF6 Usage_i8(0x3f)
+#define Usage_KK_KeyboardF7 Usage_i8(0x40)
+#define Usage_KK_KeyboardF8 Usage_i8(0x41)
+#define Usage_KK_KeyboardF9 Usage_i8(0x42)
+#define Usage_KK_KeyboardF10 Usage_i8(0x43)
+#define Usage_KK_KeyboardF11 Usage_i8(0x44)
+#define Usage_KK_KeyboardF12 Usage_i8(0x45)
+#define Usage_KK_KeyboardPrintScreen Usage_i8(0x46)
+#define Usage_KK_KeyboardScrollLock Usage_i8(0x47)
+#define Usage_KK_KeyboardPause Usage_i8(0x48)
+#define Usage_KK_KeyboardInsert Usage_i8(0x49)
+#define Usage_KK_KeyboardHome Usage_i8(0x4a)
+#define Usage_KK_KeyboardPageUp Usage_i8(0x4b)
+#define Usage_KK_KeyboardDeleteForward Usage_i8(0x4c)
+#define Usage_KK_KeyboardEnd Usage_i8(0x4d)
+#define Usage_KK_KeyboardPageDown Usage_i8(0x4e)
+#define Usage_KK_KeyboardRightArrow Usage_i8(0x4f)
+#define Usage_KK_KeyboardLeftArrow Usage_i8(0x50)
+#define Usage_KK_KeyboardDownArrow Usage_i8(0x51)
+#define Usage_KK_KeyboardUpArrow Usage_i8(0x52)
+#define Usage_KK_KeypadNumLockandClear Usage_i8(0x53)
+#define Usage_KK_KeypadForwardSlash Usage_i8(0x54)
+#define Usage_KK_KeypadStar Usage_i8(0x55)
+#define Usage_KK_KeypadDash Usage_i8(0x56)
+#define Usage_KK_KeypadPlus Usage_i8(0x57)
+#define Usage_KK_KeypadENTER Usage_i8(0x58)
+#define Usage_KK_Keypad1andEnd Usage_i8(0x59)
+#define Usage_KK_Keypad2andDownArrow Usage_i8(0x5a)
+#define Usage_KK_Keypad3andPageDn Usage_i8(0x5b)
+#define Usage_KK_Keypad4andLeftArrow Usage_i8(0x5c)
+#define Usage_KK_Keypad5 Usage_i8(0x5d)
+#define Usage_KK_Keypad6andRightArrow Usage_i8(0x5e)
+#define Usage_KK_Keypad7andHome Usage_i8(0x5f)
+#define Usage_KK_Keypad8andUpArrow Usage_i8(0x60)
+#define Usage_KK_Keypad9andPageUp Usage_i8(0x61)
+#define Usage_KK_Keypad0andInsert Usage_i8(0x62)
+#define Usage_KK_KeypadPeriodandDelete Usage_i8(0x63)
+#define Usage_KK_KeyboardNonUSBackslashandPipe Usage_i8(0x64)
+#define Usage_KK_KeyboardApplication Usage_i8(0x65)
+#define Usage_KK_KeyboardPower Usage_i8(0x66)
+#define Usage_KK_KeypadEquals Usage_i8(0x67)
+#define Usage_KK_KeyboardF13 Usage_i8(0x68)
+#define Usage_KK_KeyboardF14 Usage_i8(0x69)
+#define Usage_KK_KeyboardF15 Usage_i8(0x6a)
+#define Usage_KK_KeyboardF16 Usage_i8(0x6b)
+#define Usage_KK_KeyboardF17 Usage_i8(0x6c)
+#define Usage_KK_KeyboardF18 Usage_i8(0x6d)
+#define Usage_KK_KeyboardF19 Usage_i8(0x6e)
+#define Usage_KK_KeyboardF20 Usage_i8(0x6f)
+#define Usage_KK_KeyboardF21 Usage_i8(0x70)
+#define Usage_KK_KeyboardF22 Usage_i8(0x71)
+#define Usage_KK_KeyboardF23 Usage_i8(0x72)
+#define Usage_KK_KeyboardF24 Usage_i8(0x73)
+#define Usage_KK_KeyboardExecute Usage_i8(0x74)
+#define Usage_KK_KeyboardHelp Usage_i8(0x75)
+#define Usage_KK_KeyboardMenu Usage_i8(0x76)
+#define Usage_KK_KeyboardSelect Usage_i8(0x77)
+#define Usage_KK_KeyboardStop Usage_i8(0x78)
+#define Usage_KK_KeyboardAgain Usage_i8(0x79)
+#define Usage_KK_KeyboardUndo Usage_i8(0x7a)
+#define Usage_KK_KeyboardCut Usage_i8(0x7b)
+#define Usage_KK_KeyboardCopy Usage_i8(0x7c)
+#define Usage_KK_KeyboardPaste Usage_i8(0x7d)
+#define Usage_KK_KeyboardFind Usage_i8(0x7e)
+#define Usage_KK_KeyboardMute Usage_i8(0x7f)
+#define Usage_KK_KeyboardVolumeUp Usage_i8(0x80)
+#define Usage_KK_KeyboardVolumeDown Usage_i8(0x81)
+#define Usage_KK_KeyboardLockingCapsLock Usage_i8(0x82)
+#define Usage_KK_KeyboardLockingNumLock Usage_i8(0x83)
+#define Usage_KK_KeyboardLockingScrollLock Usage_i8(0x84)
+#define Usage_KK_KeypadComma Usage_i8(0x85)
+#define Usage_KK_KeypadEqualSign Usage_i8(0x86)
+#define Usage_KK_KeyboardInternational1 Usage_i8(0x87)
+#define Usage_KK_KeyboardInternational2 Usage_i8(0x88)
+#define Usage_KK_KeyboardInternational3 Usage_i8(0x89)
+#define Usage_KK_KeyboardInternational4 Usage_i8(0x8a)
+#define Usage_KK_KeyboardInternational5 Usage_i8(0x8b)
+#define Usage_KK_KeyboardInternational6 Usage_i8(0x8c)
+#define Usage_KK_KeyboardInternational7 Usage_i8(0x8d)
+#define Usage_KK_KeyboardInternational8 Usage_i8(0x8e)
+#define Usage_KK_KeyboardInternational9 Usage_i8(0x8f)
+#define Usage_KK_KeyboardLANG1 Usage_i8(0x90)
+#define Usage_KK_KeyboardLANG2 Usage_i8(0x91)
+#define Usage_KK_KeyboardLANG3 Usage_i8(0x92)
+#define Usage_KK_KeyboardLANG4 Usage_i8(0x93)
+#define Usage_KK_KeyboardLANG5 Usage_i8(0x94)
+#define Usage_KK_KeyboardLANG6 Usage_i8(0x95)
+#define Usage_KK_KeyboardLANG7 Usage_i8(0x96)
+#define Usage_KK_KeyboardLANG8 Usage_i8(0x97)
+#define Usage_KK_KeyboardLANG9 Usage_i8(0x98)
+#define Usage_KK_KeyboardAlternateErase Usage_i8(0x99)
+#define Usage_KK_KeyboardSysReqAttention Usage_i8(0x9a)
+#define Usage_KK_KeyboardCancel Usage_i8(0x9b)
+#define Usage_KK_KeyboardClear Usage_i8(0x9c)
+#define Usage_KK_KeyboardPrior Usage_i8(0x9d)
+#define Usage_KK_KeyboardReturn Usage_i8(0x9e)
+#define Usage_KK_KeyboardSeparator Usage_i8(0x9f)
+#define Usage_KK_KeyboardOut Usage_i8(0xa0)
+#define Usage_KK_KeyboardOper Usage_i8(0xa1)
+#define Usage_KK_KeyboardClearAgain Usage_i8(0xa2)
+#define Usage_KK_KeyboardCrSelProps Usage_i8(0xa3)
+#define Usage_KK_KeyboardExSel Usage_i8(0xa4)
+#define Usage_KK_KeypadDouble0 Usage_i8(0xb0)
+#define Usage_KK_KeypadTriple0 Usage_i8(0xb1)
+#define Usage_KK_ThousandsSeparator Usage_i8(0xb2)
+#define Usage_KK_DecimalSeparator Usage_i8(0xb3)
+#define Usage_KK_CurrencyUnit Usage_i8(0xb4)
+#define Usage_KK_CurrencySubunit Usage_i8(0xb5)
+#define Usage_KK_KeypadLeftBracket Usage_i8(0xb6)
+#define Usage_KK_KeypadRightBracket Usage_i8(0xb7)
+#define Usage_KK_KeypadLeftBrace Usage_i8(0xb8)
+#define Usage_KK_KeypadRightBrace Usage_i8(0xb9)
+#define Usage_KK_KeypadTab Usage_i8(0xba)
+#define Usage_KK_KeypadBackspace Usage_i8(0xbb)
+#define Usage_KK_KeypadA Usage_i8(0xbc)
+#define Usage_KK_KeypadB Usage_i8(0xbd)
+#define Usage_KK_KeypadC Usage_i8(0xbe)
+#define Usage_KK_KeypadD Usage_i8(0xbf)
+#define Usage_KK_KeypadE Usage_i8(0xc0)
+#define Usage_KK_KeypadF Usage_i8(0xc1)
+#define Usage_KK_KeypadXOR Usage_i8(0xc2)
+#define Usage_KK_KeypadCaret Usage_i8(0xc3)
+#define Usage_KK_KeypadPercentage Usage_i8(0xc4)
+#define Usage_KK_KeypadLess Usage_i8(0xc5)
+#define Usage_KK_KeypadGreater Usage_i8(0xc6)
+#define Usage_KK_KeypadAmpersand Usage_i8(0xc7)
+#define Usage_KK_KeypadDoubleAmpersand Usage_i8(0xc8)
+#define Usage_KK_KeypadBar Usage_i8(0xc9)
+#define Usage_KK_KeypadDoubleBar Usage_i8(0xca)
+#define Usage_KK_KeypadColon Usage_i8(0xcb)
+#define Usage_KK_KeypadHash Usage_i8(0xcc)
+#define Usage_KK_KeypadSpace Usage_i8(0xcd)
+#define Usage_KK_KeypadAt Usage_i8(0xce)
+#define Usage_KK_KeypadBang Usage_i8(0xcf)
+#define Usage_KK_KeypadMemoryStore Usage_i8(0xd0)
+#define Usage_KK_KeypadMemoryRecall Usage_i8(0xd1)
+#define Usage_KK_KeypadMemoryClear Usage_i8(0xd2)
+#define Usage_KK_KeypadMemoryAdd Usage_i8(0xd3)
+#define Usage_KK_KeypadMemorySubtract Usage_i8(0xd4)
+#define Usage_KK_KeypadMemoryMultiply Usage_i8(0xd5)
+#define Usage_KK_KeypadMemoryDivide Usage_i8(0xd6)
+#define Usage_KK_KeypadPlusMinus Usage_i8(0xd7)
+#define Usage_KK_KeypadClear Usage_i8(0xd8)
+#define Usage_KK_KeypadClearEntry Usage_i8(0xd9)
+#define Usage_KK_KeypadBinary Usage_i8(0xda)
+#define Usage_KK_KeypadOctal Usage_i8(0xdb)
+#define Usage_KK_KeypadDecimal Usage_i8(0xdc)
+#define Usage_KK_KeypadHexadecimal Usage_i8(0xdd)
+#define Usage_KK_KeyboardLeftControl Usage_i8(0xe0)
+#define Usage_KK_KeyboardLeftShift Usage_i8(0xe1)
+#define Usage_KK_KeyboardLeftAlt Usage_i8(0xe2)
+#define Usage_KK_KeyboardLeftGUI Usage_i8(0xe3)
+#define Usage_KK_KeyboardRightControl Usage_i8(0xe4)
+#define Usage_KK_KeyboardRightShift Usage_i8(0xe5)
+#define Usage_KK_KeyboardRightAlt Usage_i8(0xe6)
+#define Usage_KK_KeyboardRightGUI Usage_i8(0xe7)
+#define Usage_LED_NumLock Usage_i8(0x1)
+#define Usage_LED_CapsLock Usage_i8(0x2)
+#define Usage_LED_ScrollLock Usage_i8(0x3)
+#define Usage_LED_Compose Usage_i8(0x4)
+#define Usage_LED_Kana Usage_i8(0x5)
+#define Usage_LED_Power Usage_i8(0x6)
+#define Usage_LED_Shift Usage_i8(0x7)
+#define Usage_LED_DoNotDisturb Usage_i8(0x8)
+#define Usage_LED_Mute Usage_i8(0x9)
+#define Usage_LED_ToneEnable Usage_i8(0xa)
+#define Usage_LED_HighCutFilter Usage_i8(0xb)
+#define Usage_LED_LowCutFilter Usage_i8(0xc)
+#define Usage_LED_EqualizerEnable Usage_i8(0xd)
+#define Usage_LED_SoundFieldOn Usage_i8(0xe)
+#define Usage_LED_SurroundOn Usage_i8(0xf)
+#define Usage_LED_Repeat Usage_i8(0x10)
+#define Usage_LED_Stereo Usage_i8(0x11)
+#define Usage_LED_SamplingRateDetect Usage_i8(0x12)
+#define Usage_LED_Spinning Usage_i8(0x13)
+#define Usage_LED_CAV Usage_i8(0x14)
+#define Usage_LED_CLV Usage_i8(0x15)
+#define Usage_LED_RecordingFormatDetect Usage_i8(0x16)
+#define Usage_LED_OffHook Usage_i8(0x17)
+#define Usage_LED_Ring Usage_i8(0x18)
+#define Usage_LED_MessageWaiting Usage_i8(0x19)
+#define Usage_LED_DataMode Usage_i8(0x1a)
+#define Usage_LED_BatteryOperation Usage_i8(0x1b)
+#define Usage_LED_BatteryOK Usage_i8(0x1c)
+#define Usage_LED_BatteryLow Usage_i8(0x1d)
+#define Usage_LED_Speaker Usage_i8(0x1e)
+#define Usage_LED_Headset Usage_i8(0x1f)
+#define Usage_LED_Hold Usage_i8(0x20)
+#define Usage_LED_Microphone Usage_i8(0x21)
+#define Usage_LED_Coverage Usage_i8(0x22)
+#define Usage_LED_NightMode Usage_i8(0x23)
+#define Usage_LED_SendCalls Usage_i8(0x24)
+#define Usage_LED_CallPickup Usage_i8(0x25)
+#define Usage_LED_Conference Usage_i8(0x26)
+#define Usage_LED_Standby Usage_i8(0x27)
+#define Usage_LED_CameraOn Usage_i8(0x28)
+#define Usage_LED_CameraOff Usage_i8(0x29)
+#define Usage_LED_OnLine Usage_i8(0x2a)
+#define Usage_LED_OffLine Usage_i8(0x2b)
+#define Usage_LED_Busy Usage_i8(0x2c)
+#define Usage_LED_Ready Usage_i8(0x2d)
+#define Usage_LED_PaperOut Usage_i8(0x2e)
+#define Usage_LED_PaperJam Usage_i8(0x2f)
+#define Usage_LED_Remote Usage_i8(0x30)
+#define Usage_LED_Forward Usage_i8(0x31)
+#define Usage_LED_Reverse Usage_i8(0x32)
+#define Usage_LED_Stop Usage_i8(0x33)
+#define Usage_LED_Rewind Usage_i8(0x34)
+#define Usage_LED_FastForward Usage_i8(0x35)
+#define Usage_LED_Play Usage_i8(0x36)
+#define Usage_LED_Pause Usage_i8(0x37)
+#define Usage_LED_Record Usage_i8(0x38)
+#define Usage_LED_Error Usage_i8(0x39)
+#define Usage_LED_UsageSelectedIndicator Usage_i8(0x3a)
+#define Usage_LED_UsageInUseIndicator Usage_i8(0x3b)
+#define Usage_LED_UsageMultiModeIndicator Usage_i8(0x3c)
+#define Usage_LED_IndicatorOn Usage_i8(0x3d)
+#define Usage_LED_IndicatorFlash Usage_i8(0x3e)
+#define Usage_LED_IndicatorSlowBlink Usage_i8(0x3f)
+#define Usage_LED_IndicatorFastBlink Usage_i8(0x40)
+#define Usage_LED_IndicatorOff Usage_i8(0x41)
+#define Usage_LED_FlashOnTime Usage_i8(0x42)
+#define Usage_LED_SlowBlinkOnTime Usage_i8(0x43)
+#define Usage_LED_SlowBlinkOffTime Usage_i8(0x44)
+#define Usage_LED_FastBlinkOnTime Usage_i8(0x45)
+#define Usage_LED_FastBlinkOffTime Usage_i8(0x46)
+#define Usage_LED_UsageIndicatorColor Usage_i8(0x47)
+#define Usage_LED_IndicatorRed Usage_i8(0x48)
+#define Usage_LED_IndicatorGreen Usage_i8(0x49)
+#define Usage_LED_IndicatorAmber Usage_i8(0x4a)
+#define Usage_LED_GenericIndicator Usage_i8(0x4b)
+#define Usage_LED_SystemSuspend Usage_i8(0x4c)
+#define Usage_LED_ExternalPowerConnected Usage_i8(0x4d)
+#define Usage_LED_IndicatorBlue Usage_i8(0x4e)
+#define Usage_LED_IndicatorOrange Usage_i8(0x4f)
+#define Usage_LED_GoodStatus Usage_i8(0x50)
+#define Usage_LED_WarningStatus Usage_i8(0x51)
+#define Usage_LED_RGBLED Usage_i8(0x52)
+#define Usage_LED_RedLEDChannel Usage_i8(0x53)
+#define Usage_LED_BlueLEDChannel Usage_i8(0x54)
+#define Usage_LED_GreenLEDChannel Usage_i8(0x55)
+#define Usage_LED_LEDIntensity Usage_i8(0x56)
+#define Usage_LED_SystemMicrophoneMute Usage_i8(0x57)
+#define Usage_LED_PlayerIndicator Usage_i8(0x60)
+#define Usage_LED_Player1 Usage_i8(0x61)
+#define Usage_LED_Player2 Usage_i8(0x62)
+#define Usage_LED_Player3 Usage_i8(0x63)
+#define Usage_LED_Player4 Usage_i8(0x64)
+#define Usage_LED_Player5 Usage_i8(0x65)
+#define Usage_LED_Player6 Usage_i8(0x66)
+#define Usage_LED_Player7 Usage_i8(0x67)
+#define Usage_LED_Player8 Usage_i8(0x68)
+#define Usage_TD_Phone Usage_i8(0x1)
+#define Usage_TD_AnsweringMachine Usage_i8(0x2)
+#define Usage_TD_MessageControls Usage_i8(0x3)
+#define Usage_TD_Handset Usage_i8(0x4)
+#define Usage_TD_Headset Usage_i8(0x5)
+#define Usage_TD_TelephonyKeyPad Usage_i8(0x6)
+#define Usage_TD_ProgrammableButton Usage_i8(0x7)
+#define Usage_TD_HookSwitch Usage_i8(0x20)
+#define Usage_TD_Flash Usage_i8(0x21)
+#define Usage_TD_Feature Usage_i8(0x22)
+#define Usage_TD_Hold Usage_i8(0x23)
+#define Usage_TD_Redial Usage_i8(0x24)
+#define Usage_TD_Transfer Usage_i8(0x25)
+#define Usage_TD_Drop Usage_i8(0x26)
+#define Usage_TD_Park Usage_i8(0x27)
+#define Usage_TD_ForwardCalls Usage_i8(0x28)
+#define Usage_TD_AlternateFunction Usage_i8(0x29)
+#define Usage_TD_Line Usage_i8(0x2a)
+#define Usage_TD_SpeakerPhone Usage_i8(0x2b)
+#define Usage_TD_Conference Usage_i8(0x2c)
+#define Usage_TD_RingEnable Usage_i8(0x2d)
+#define Usage_TD_RingSelect Usage_i8(0x2e)
+#define Usage_TD_PhoneMute Usage_i8(0x2f)
+#define Usage_TD_CallerID Usage_i8(0x30)
+#define Usage_TD_Send Usage_i8(0x31)
+#define Usage_TD_SpeedDial Usage_i8(0x50)
+#define Usage_TD_StoreNumber Usage_i8(0x51)
+#define Usage_TD_RecallNumber Usage_i8(0x52)
+#define Usage_TD_PhoneDirectory Usage_i8(0x53)
+#define Usage_TD_VoiceMail Usage_i8(0x70)
+#define Usage_TD_ScreenCalls Usage_i8(0x71)
+#define Usage_TD_DoNotDisturb Usage_i8(0x72)
+#define Usage_TD_Message Usage_i8(0x73)
+#define Usage_TD_AnswerOnOff Usage_i8(0x74)
+#define Usage_TD_InsideDialTone Usage_i8(0x90)
+#define Usage_TD_OutsideDialTone Usage_i8(0x91)
+#define Usage_TD_InsideRingTone Usage_i8(0x92)
+#define Usage_TD_OutsideRingTone Usage_i8(0x93)
+#define Usage_TD_PriorityRingTone Usage_i8(0x94)
+#define Usage_TD_InsideRingback Usage_i8(0x95)
+#define Usage_TD_PriorityRingback Usage_i8(0x96)
+#define Usage_TD_LineBusyTone Usage_i8(0x97)
+#define Usage_TD_ReorderTone Usage_i8(0x98)
+#define Usage_TD_CallWaitingTone Usage_i8(0x99)
+#define Usage_TD_ConfirmationTone1 Usage_i8(0x9a)
+#define Usage_TD_ConfirmationTone2 Usage_i8(0x9b)
+#define Usage_TD_TonesOff Usage_i8(0x9c)
+#define Usage_TD_OutsideRingback Usage_i8(0x9d)
+#define Usage_TD_Ringer Usage_i8(0x9e)
+#define Usage_TD_PhoneKey0 Usage_i8(0xb0)
+#define Usage_TD_PhoneKey1 Usage_i8(0xb1)
+#define Usage_TD_PhoneKey2 Usage_i8(0xb2)
+#define Usage_TD_PhoneKey3 Usage_i8(0xb3)
+#define Usage_TD_PhoneKey4 Usage_i8(0xb4)
+#define Usage_TD_PhoneKey5 Usage_i8(0xb5)
+#define Usage_TD_PhoneKey6 Usage_i8(0xb6)
+#define Usage_TD_PhoneKey7 Usage_i8(0xb7)
+#define Usage_TD_PhoneKey8 Usage_i8(0xb8)
+#define Usage_TD_PhoneKey9 Usage_i8(0xb9)
+#define Usage_TD_PhoneKeyStar Usage_i8(0xba)
+#define Usage_TD_PhoneKeyPound Usage_i8(0xbb)
+#define Usage_TD_PhoneKeyA Usage_i8(0xbc)
+#define Usage_TD_PhoneKeyB Usage_i8(0xbd)
+#define Usage_TD_PhoneKeyC Usage_i8(0xbe)
+#define Usage_TD_PhoneKeyD Usage_i8(0xbf)
+#define Usage_TD_PhoneCallHistoryKey Usage_i8(0xc0)
+#define Usage_TD_PhoneCallerIDKey Usage_i8(0xc1)
+#define Usage_TD_PhoneSettingsKey Usage_i8(0xc2)
+#define Usage_TD_HostControl Usage_i8(0xf0)
+#define Usage_TD_HostAvailable Usage_i8(0xf1)
+#define Usage_TD_HostCallActive Usage_i8(0xf2)
+#define Usage_TD_ActivateHandsetAudio Usage_i8(0xf3)
+#define Usage_TD_RingType Usage_i8(0xf4)
+#define Usage_TD_RedialablePhoneNumber Usage_i8(0xf5)
+#define Usage_TD_StopRingTone Usage_i8(0xf8)
+#define Usage_TD_PSTNRingTone Usage_i8(0xf9)
+#define Usage_TD_HostRingTone Usage_i8(0xfa)
+#define Usage_TD_AlertSoundError Usage_i8(0xfb)
+#define Usage_TD_AlertSoundConfirm Usage_i8(0xfc)
+#define Usage_TD_AlertSoundNotification Usage_i8(0xfd)
+#define Usage_TD_SilentRing Usage_i8(0xfe)
+#define Usage_TD_EmailMessageWaiting Usage_i16(0x108)
+#define Usage_TD_VoicemailMessageWaiting Usage_i16(0x109)
+#define Usage_TD_HostHold Usage_i16(0x10a)
+#define Usage_TD_IncomingCallHistoryCount Usage_i16(0x110)
+#define Usage_TD_OutgoingCallHistoryCount Usage_i16(0x111)
+#define Usage_TD_IncomingCallHistory Usage_i16(0x112)
+#define Usage_TD_OutgoingCallHistory Usage_i16(0x113)
+#define Usage_TD_PhoneLocale Usage_i16(0x114)
+#define Usage_TD_PhoneTimeSecond Usage_i16(0x140)
+#define Usage_TD_PhoneTimeMinute Usage_i16(0x141)
+#define Usage_TD_PhoneTimeHour Usage_i16(0x142)
+#define Usage_TD_PhoneDateDay Usage_i16(0x143)
+#define Usage_TD_PhoneDateMonth Usage_i16(0x144)
+#define Usage_TD_PhoneDateYear Usage_i16(0x145)
+#define Usage_TD_HandsetNickname Usage_i16(0x146)
+#define Usage_TD_AddressBookID Usage_i16(0x147)
+#define Usage_TD_CallDuration Usage_i16(0x14a)
+#define Usage_TD_DualModePhone Usage_i16(0x14b)
+#define Usage_Con_ConsumerControl Usage_i8(0x1)
+#define Usage_Con_NumericKeyPad Usage_i8(0x2)
+#define Usage_Con_ProgrammableButtons Usage_i8(0x3)
+#define Usage_Con_Microphone Usage_i8(0x4)
+#define Usage_Con_Headphone Usage_i8(0x5)
+#define Usage_Con_GraphicEqualizer Usage_i8(0x6)
+#define Usage_Con_Plus10 Usage_i8(0x20)
+#define Usage_Con_Plus100 Usage_i8(0x21)
+#define Usage_Con_AMPM Usage_i8(0x22)
+#define Usage_Con_Power Usage_i8(0x30)
+#define Usage_Con_Reset Usage_i8(0x31)
+#define Usage_Con_Sleep Usage_i8(0x32)
+#define Usage_Con_SleepAfter Usage_i8(0x33)
+#define Usage_Con_SleepMode Usage_i8(0x34)
+#define Usage_Con_Illumination Usage_i8(0x35)
+#define Usage_Con_FunctionButtons Usage_i8(0x36)
+#define Usage_Con_Menu Usage_i8(0x40)
+#define Usage_Con_MenuPick Usage_i8(0x41)
+#define Usage_Con_MenuUp Usage_i8(0x42)
+#define Usage_Con_MenuDown Usage_i8(0x43)
+#define Usage_Con_MenuLeft Usage_i8(0x44)
+#define Usage_Con_MenuRight Usage_i8(0x45)
+#define Usage_Con_MenuEscape Usage_i8(0x46)
+#define Usage_Con_MenuValueIncrease Usage_i8(0x47)
+#define Usage_Con_MenuValueDecrease Usage_i8(0x48)
+#define Usage_Con_DataOnScreen Usage_i8(0x60)
+#define Usage_Con_ClosedCaption Usage_i8(0x61)
+#define Usage_Con_ClosedCaptionSelect Usage_i8(0x62)
+#define Usage_Con_VCRTV Usage_i8(0x63)
+#define Usage_Con_BroadcastMode Usage_i8(0x64)
+#define Usage_Con_Snapshot Usage_i8(0x65)
+#define Usage_Con_Still Usage_i8(0x66)
+#define Usage_Con_PictureinPictureToggle Usage_i8(0x67)
+#define Usage_Con_PictureinPictureSwap Usage_i8(0x68)
+#define Usage_Con_RedMenuButton Usage_i8(0x69)
+#define Usage_Con_GreenMenuButton Usage_i8(0x6a)
+#define Usage_Con_BlueMenuButton Usage_i8(0x6b)
+#define Usage_Con_YellowMenuButton Usage_i8(0x6c)
+#define Usage_Con_Aspect Usage_i8(0x6d)
+#define Usage_Con_ThreeDModeSelect Usage_i8(0x6e)
+#define Usage_Con_DisplayBrightnessIncrement Usage_i8(0x6f)
+#define Usage_Con_DisplayBrightnessDecrement Usage_i8(0x70)
+#define Usage_Con_DisplayBrightness Usage_i8(0x71)
+#define Usage_Con_DisplayBacklightToggle Usage_i8(0x72)
+#define Usage_Con_DisplaySetBrightnesstoMinimum Usage_i8(0x73)
+#define Usage_Con_DisplaySetBrightnesstoMaximum Usage_i8(0x74)
+#define Usage_Con_DisplaySetAutoBrightness Usage_i8(0x75)
+#define Usage_Con_CameraAccessEnabled Usage_i8(0x76)
+#define Usage_Con_CameraAccessDisabled Usage_i8(0x77)
+#define Usage_Con_CameraAccessToggle Usage_i8(0x78)
+#define Usage_Con_KeyboardBrightnessIncrement Usage_i8(0x79)
+#define Usage_Con_KeyboardBrightnessDecrement Usage_i8(0x7a)
+#define Usage_Con_KeyboardBacklightSetLevel Usage_i8(0x7b)
+#define Usage_Con_KeyboardBacklightOOC Usage_i8(0x7c)
+#define Usage_Con_KeyboardBacklightSetMinimum Usage_i8(0x7d)
+#define Usage_Con_KeyboardBacklightSetMaximum Usage_i8(0x7e)
+#define Usage_Con_KeyboardBacklightAuto Usage_i8(0x7f)
+#define Usage_Con_Selection Usage_i8(0x80)
+#define Usage_Con_AssignSelection Usage_i8(0x81)
+#define Usage_Con_ModeStep Usage_i8(0x82)
+#define Usage_Con_RecallLast Usage_i8(0x83)
+#define Usage_Con_EnterChannel Usage_i8(0x84)
+#define Usage_Con_OrderMovie Usage_i8(0x85)
+#define Usage_Con_Channel Usage_i8(0x86)
+#define Usage_Con_MediaSelection Usage_i8(0x87)
+#define Usage_Con_MediaSelectComputer Usage_i8(0x88)
+#define Usage_Con_MediaSelectTV Usage_i8(0x89)
+#define Usage_Con_MediaSelectWWW Usage_i8(0x8a)
+#define Usage_Con_MediaSelectDVD Usage_i8(0x8b)
+#define Usage_Con_MediaSelectTelephone Usage_i8(0x8c)
+#define Usage_Con_MediaSelectProgramGuide Usage_i8(0x8d)
+#define Usage_Con_MediaSelectVideoPhone Usage_i8(0x8e)
+#define Usage_Con_MediaSelectGames Usage_i8(0x8f)
+#define Usage_Con_MediaSelectMessages Usage_i8(0x90)
+#define Usage_Con_MediaSelectCD Usage_i8(0x91)
+#define Usage_Con_MediaSelectVCR Usage_i8(0x92)
+#define Usage_Con_MediaSelectTuner Usage_i8(0x93)
+#define Usage_Con_Quit Usage_i8(0x94)
+#define Usage_Con_Help Usage_i8(0x95)
+#define Usage_Con_MediaSelectTape Usage_i8(0x96)
+#define Usage_Con_MediaSelectCable Usage_i8(0x97)
+#define Usage_Con_MediaSelectSatellite Usage_i8(0x98)
+#define Usage_Con_MediaSelectSecurity Usage_i8(0x99)
+#define Usage_Con_MediaSelectHome Usage_i8(0x9a)
+#define Usage_Con_MediaSelectCall Usage_i8(0x9b)
+#define Usage_Con_ChannelIncrement Usage_i8(0x9c)
+#define Usage_Con_ChannelDecrement Usage_i8(0x9d)
+#define Usage_Con_MediaSelectSAP Usage_i8(0x9e)
+#define Usage_Con_VCRPlus Usage_i8(0xa0)
+#define Usage_Con_Once Usage_i8(0xa1)
+#define Usage_Con_Daily Usage_i8(0xa2)
+#define Usage_Con_Weekly Usage_i8(0xa3)
+#define Usage_Con_Monthly Usage_i8(0xa4)
+#define Usage_Con_Play Usage_i8(0xb0)
+#define Usage_Con_Pause Usage_i8(0xb1)
+#define Usage_Con_Record Usage_i8(0xb2)
+#define Usage_Con_FastForward Usage_i8(0xb3)
+#define Usage_Con_Rewind Usage_i8(0xb4)
+#define Usage_Con_ScanNextTrack Usage_i8(0xb5)
+#define Usage_Con_ScanPreviousTrack Usage_i8(0xb6)
+#define Usage_Con_Stop Usage_i8(0xb7)
+#define Usage_Con_Eject Usage_i8(0xb8)
+#define Usage_Con_RandomPlay Usage_i8(0xb9)
+#define Usage_Con_SelectDisc Usage_i8(0xba)
+#define Usage_Con_EnterDisc Usage_i8(0xbb)
+#define Usage_Con_Repeat Usage_i8(0xbc)
+#define Usage_Con_Tracking Usage_i8(0xbd)
+#define Usage_Con_TrackNormal Usage_i8(0xbe)
+#define Usage_Con_SlowTracking Usage_i8(0xbf)
+#define Usage_Con_FrameForward Usage_i8(0xc0)
+#define Usage_Con_FrameBack Usage_i8(0xc1)
+#define Usage_Con_Mark Usage_i8(0xc2)
+#define Usage_Con_ClearMark Usage_i8(0xc3)
+#define Usage_Con_RepeatFromMark Usage_i8(0xc4)
+#define Usage_Con_ReturnToMark Usage_i8(0xc5)
+#define Usage_Con_SearchMarkForward Usage_i8(0xc6)
+#define Usage_Con_SearchMarkBackwards Usage_i8(0xc7)
+#define Usage_Con_CounterReset Usage_i8(0xc8)
+#define Usage_Con_ShowCounter Usage_i8(0xc9)
+#define Usage_Con_TrackingIncrement Usage_i8(0xca)
+#define Usage_Con_TrackingDecrement Usage_i8(0xcb)
+#define Usage_Con_StopEject Usage_i8(0xcc)
+#define Usage_Con_PlayPause Usage_i8(0xcd)
+#define Usage_Con_PlaySkip Usage_i8(0xce)
+#define Usage_Con_VoiceCommand Usage_i8(0xcf)
+#define Usage_Con_InvokeCaptureInterface Usage_i8(0xd0)
+#define Usage_Con_StartorStopGameRecording Usage_i8(0xd1)
+#define Usage_Con_HistoricalGameCapture Usage_i8(0xd2)
+#define Usage_Con_CaptureGameScreenshot Usage_i8(0xd3)
+#define Usage_Con_ShoworHideRecordingIndicator Usage_i8(0xd4)
+#define Usage_Con_StartorStopMicrophoneCapture Usage_i8(0xd5)
+#define Usage_Con_StartorStopCameraCapture Usage_i8(0xd6)
+#define Usage_Con_StartorStopGameBroadcast Usage_i8(0xd7)
+#define Usage_Con_StartorStopVoiceDictationSession Usage_i8(0xd8)
+#define Usage_Con_InvokeDismissEmojiPicker Usage_i8(0xd9)
+#define Usage_Con_Volume Usage_i8(0xe0)
+#define Usage_Con_Balance Usage_i8(0xe1)
+#define Usage_Con_Mute Usage_i8(0xe2)
+#define Usage_Con_Bass Usage_i8(0xe3)
+#define Usage_Con_Treble Usage_i8(0xe4)
+#define Usage_Con_BassBoost Usage_i8(0xe5)
+#define Usage_Con_SurroundMode Usage_i8(0xe6)
+#define Usage_Con_Loudness Usage_i8(0xe7)
+#define Usage_Con_MPX Usage_i8(0xe8)
+#define Usage_Con_VolumeIncrement Usage_i8(0xe9)
+#define Usage_Con_VolumeDecrement Usage_i8(0xea)
+#define Usage_Con_SpeedSelect Usage_i8(0xf0)
+#define Usage_Con_PlaybackSpeed Usage_i8(0xf1)
+#define Usage_Con_StandardPlay Usage_i8(0xf2)
+#define Usage_Con_LongPlay Usage_i8(0xf3)
+#define Usage_Con_ExtendedPlay Usage_i8(0xf4)
+#define Usage_Con_Slow Usage_i8(0xf5)
+#define Usage_Con_FanEnable Usage_i16(0x100)
+#define Usage_Con_FanSpeed Usage_i16(0x101)
+#define Usage_Con_LightEnable Usage_i16(0x102)
+#define Usage_Con_LightIlluminationLevel Usage_i16(0x103)
+#define Usage_Con_ClimateControlEnable Usage_i16(0x104)
+#define Usage_Con_RoomTemperature Usage_i16(0x105)
+#define Usage_Con_SecurityEnable Usage_i16(0x106)
+#define Usage_Con_FireAlarm Usage_i16(0x107)
+#define Usage_Con_PoliceAlarm Usage_i16(0x108)
+#define Usage_Con_Proximity Usage_i16(0x109)
+#define Usage_Con_Motion Usage_i16(0x10a)
+#define Usage_Con_DuressAlarm Usage_i16(0x10b)
+#define Usage_Con_HoldupAlarm Usage_i16(0x10c)
+#define Usage_Con_MedicalAlarm Usage_i16(0x10d)
+#define Usage_Con_BalanceRight Usage_i16(0x150)
+#define Usage_Con_BalanceLeft Usage_i16(0x151)
+#define Usage_Con_BassIncrement Usage_i16(0x152)
+#define Usage_Con_BassDecrement Usage_i16(0x153)
+#define Usage_Con_TrebleIncrement Usage_i16(0x154)
+#define Usage_Con_TrebleDecrement Usage_i16(0x155)
+#define Usage_Con_SpeakerSystem Usage_i16(0x160)
+#define Usage_Con_ChannelLeft Usage_i16(0x161)
+#define Usage_Con_ChannelRight Usage_i16(0x162)
+#define Usage_Con_ChannelCenter Usage_i16(0x163)
+#define Usage_Con_ChannelFront Usage_i16(0x164)
+#define Usage_Con_ChannelCenterFront Usage_i16(0x165)
+#define Usage_Con_ChannelSide Usage_i16(0x166)
+#define Usage_Con_ChannelSurround Usage_i16(0x167)
+#define Usage_Con_ChannelLowFrequencyEnhancement Usage_i16(0x168)
+#define Usage_Con_ChannelTop Usage_i16(0x169)
+#define Usage_Con_ChannelUnknown Usage_i16(0x16a)
+#define Usage_Con_Subchannel Usage_i16(0x170)
+#define Usage_Con_SubchannelIncrement Usage_i16(0x171)
+#define Usage_Con_SubchannelDecrement Usage_i16(0x172)
+#define Usage_Con_AlternateAudioIncrement Usage_i16(0x173)
+#define Usage_Con_AlternateAudioDecrement Usage_i16(0x174)
+#define Usage_Con_ApplicationLaunchButtons Usage_i16(0x180)
+#define Usage_Con_ALLaunchButtonConfigurationTool Usage_i16(0x181)
+#define Usage_Con_ALProgrammableButtonConfiguration Usage_i16(0x182)
+#define Usage_Con_ALConsumerControlConfiguration Usage_i16(0x183)
+#define Usage_Con_ALWordProcessor Usage_i16(0x184)
+#define Usage_Con_ALTextEditor Usage_i16(0x185)
+#define Usage_Con_ALSpreadsheet Usage_i16(0x186)
+#define Usage_Con_ALGraphicsEditor Usage_i16(0x187)
+#define Usage_Con_ALPresentationApp Usage_i16(0x188)
+#define Usage_Con_ALDatabaseApp Usage_i16(0x189)
+#define Usage_Con_ALEmailReader Usage_i16(0x18a)
+#define Usage_Con_ALNewsreader Usage_i16(0x18b)
+#define Usage_Con_ALVoicemail Usage_i16(0x18c)
+#define Usage_Con_ALContactsAddressBook Usage_i16(0x18d)
+#define Usage_Con_ALCalendarSchedule Usage_i16(0x18e)
+#define Usage_Con_ALTaskProjectManager Usage_i16(0x18f)
+#define Usage_Con_ALLogJournalTimecard Usage_i16(0x190)
+#define Usage_Con_ALCheckbookFinance Usage_i16(0x191)
+#define Usage_Con_ALCalculator Usage_i16(0x192)
+#define Usage_Con_ALAVCapturePlayback Usage_i16(0x193)
+#define Usage_Con_ALLocalMachineBrowser Usage_i16(0x194)
+#define Usage_Con_ALLANWANBrowser Usage_i16(0x195)
+#define Usage_Con_ALInternetBrowser Usage_i16(0x196)
+#define Usage_Con_ALRemoteNetworkingISPConnect Usage_i16(0x197)
+#define Usage_Con_ALNetworkConference Usage_i16(0x198)
+#define Usage_Con_ALNetworkChat Usage_i16(0x199)
+#define Usage_Con_ALTelephonyDialer Usage_i16(0x19a)
+#define Usage_Con_ALLogon Usage_i16(0x19b)
+#define Usage_Con_ALLogoff Usage_i16(0x19c)
+#define Usage_Con_ALLogonLogoff Usage_i16(0x19d)
+#define Usage_Con_ALTerminalLockScreensaver Usage_i16(0x19e)
+#define Usage_Con_ALControlPanel Usage_i16(0x19f)
+#define Usage_Con_ALCommandLineProcessorRun Usage_i16(0x1a0)
+#define Usage_Con_ALProcessTaskManager Usage_i16(0x1a1)
+#define Usage_Con_ALSelectTaskApplication Usage_i16(0x1a2)
+#define Usage_Con_ALNextTaskApplication Usage_i16(0x1a3)
+#define Usage_Con_ALPreviousTaskApplication Usage_i16(0x1a4)
+#define Usage_Con_ALPreemptiveHaltTaskApplication Usage_i16(0x1a5)
+#define Usage_Con_ALIntegratedHelpCenter Usage_i16(0x1a6)
+#define Usage_Con_ALDocuments Usage_i16(0x1a7)
+#define Usage_Con_ALThesaurus Usage_i16(0x1a8)
+#define Usage_Con_ALDictionary Usage_i16(0x1a9)
+#define Usage_Con_ALDesktop Usage_i16(0x1aa)
+#define Usage_Con_ALSpellCheck Usage_i16(0x1ab)
+#define Usage_Con_ALGrammarCheck Usage_i16(0x1ac)
+#define Usage_Con_ALWirelessStatus Usage_i16(0x1ad)
+#define Usage_Con_ALKeyboardLayout Usage_i16(0x1ae)
+#define Usage_Con_ALVirusProtection Usage_i16(0x1af)
+#define Usage_Con_ALEncryption Usage_i16(0x1b0)
+#define Usage_Con_ALScreenSaver Usage_i16(0x1b1)
+#define Usage_Con_ALAlarms Usage_i16(0x1b2)
+#define Usage_Con_ALClock Usage_i16(0x1b3)
+#define Usage_Con_ALFileBrowser Usage_i16(0x1b4)
+#define Usage_Con_ALPowerStatus Usage_i16(0x1b5)
+#define Usage_Con_ALImageBrowser Usage_i16(0x1b6)
+#define Usage_Con_ALAudioBrowser Usage_i16(0x1b7)
+#define Usage_Con_ALMovieBrowser Usage_i16(0x1b8)
+#define Usage_Con_ALDigitalRightsManager Usage_i16(0x1b9)
+#define Usage_Con_ALDigitalWallet Usage_i16(0x1ba)
+#define Usage_Con_ALInstantMessaging Usage_i16(0x1bc)
+#define Usage_Con_ALOEMFeaturesTipsTutorialBrowser Usage_i16(0x1bd)
+#define Usage_Con_ALOEMHelp Usage_i16(0x1be)
+#define Usage_Con_ALOnlineCommunity Usage_i16(0x1bf)
+#define Usage_Con_ALEntertainmentContentBrowser Usage_i16(0x1c0)
+#define Usage_Con_ALOnlineShoppingBrowser Usage_i16(0x1c1)
+#define Usage_Con_ALSmartCardInformationHelp Usage_i16(0x1c2)
+#define Usage_Con_ALMarketMonitorFinanceBrowser Usage_i16(0x1c3)
+#define Usage_Con_ALCustomizedCorporateNewsBrowser Usage_i16(0x1c4)
+#define Usage_Con_ALOnlineActivityBrowser Usage_i16(0x1c5)
+#define Usage_Con_ALResearchSearchBrowser Usage_i16(0x1c6)
+#define Usage_Con_ALAudioPlayer Usage_i16(0x1c7)
+#define Usage_Con_ALMessageStatus Usage_i16(0x1c8)
+#define Usage_Con_ALContactSync Usage_i16(0x1c9)
+#define Usage_Con_ALNavigation Usage_i16(0x1ca)
+#define Usage_Con_ALContextawareDesktopAssistant Usage_i16(0x1cb)
+#define Usage_Con_GenericGUIApplicationControls Usage_i16(0x200)
+#define Usage_Con_ACNew Usage_i16(0x201)
+#define Usage_Con_ACOpen Usage_i16(0x202)
+#define Usage_Con_ACClose Usage_i16(0x203)
+#define Usage_Con_ACExit Usage_i16(0x204)
+#define Usage_Con_ACMaximize Usage_i16(0x205)
+#define Usage_Con_ACMinimize Usage_i16(0x206)
+#define Usage_Con_ACSave Usage_i16(0x207)
+#define Usage_Con_ACPrint Usage_i16(0x208)
+#define Usage_Con_ACProperties Usage_i16(0x209)
+#define Usage_Con_ACUndo Usage_i16(0x21a)
+#define Usage_Con_ACCopy Usage_i16(0x21b)
+#define Usage_Con_ACCut Usage_i16(0x21c)
+#define Usage_Con_ACPaste Usage_i16(0x21d)
+#define Usage_Con_ACSelectAll Usage_i16(0x21e)
+#define Usage_Con_ACFind Usage_i16(0x21f)
+#define Usage_Con_ACFindandReplace Usage_i16(0x220)
+#define Usage_Con_ACSearch Usage_i16(0x221)
+#define Usage_Con_ACGoTo Usage_i16(0x222)
+#define Usage_Con_ACHome Usage_i16(0x223)
+#define Usage_Con_ACBack Usage_i16(0x224)
+#define Usage_Con_ACForward Usage_i16(0x225)
+#define Usage_Con_ACStop Usage_i16(0x226)
+#define Usage_Con_ACRefresh Usage_i16(0x227)
+#define Usage_Con_ACPreviousLink Usage_i16(0x228)
+#define Usage_Con_ACNextLink Usage_i16(0x229)
+#define Usage_Con_ACBookmarks Usage_i16(0x22a)
+#define Usage_Con_ACHistory Usage_i16(0x22b)
+#define Usage_Con_ACSubscriptions Usage_i16(0x22c)
+#define Usage_Con_ACZoomIn Usage_i16(0x22d)
+#define Usage_Con_ACZoomOut Usage_i16(0x22e)
+#define Usage_Con_ACZoom Usage_i16(0x22f)
+#define Usage_Con_ACFullScreenView Usage_i16(0x230)
+#define Usage_Con_ACNormalView Usage_i16(0x231)
+#define Usage_Con_ACViewToggle Usage_i16(0x232)
+#define Usage_Con_ACScrollUp Usage_i16(0x233)
+#define Usage_Con_ACScrollDown Usage_i16(0x234)
+#define Usage_Con_ACScroll Usage_i16(0x235)
+#define Usage_Con_ACPanLeft Usage_i16(0x236)
+#define Usage_Con_ACPanRight Usage_i16(0x237)
+#define Usage_Con_ACPan Usage_i16(0x238)
+#define Usage_Con_ACNewWindow Usage_i16(0x239)
+#define Usage_Con_ACTileHorizontally Usage_i16(0x23a)
+#define Usage_Con_ACTileVertically Usage_i16(0x23b)
+#define Usage_Con_ACFormat Usage_i16(0x23c)
+#define Usage_Con_ACEdit Usage_i16(0x23d)
+#define Usage_Con_ACBold Usage_i16(0x23e)
+#define Usage_Con_ACItalics Usage_i16(0x23f)
+#define Usage_Con_ACUnderline Usage_i16(0x240)
+#define Usage_Con_ACStrikethrough Usage_i16(0x241)
+#define Usage_Con_ACSubscript Usage_i16(0x242)
+#define Usage_Con_ACSuperscript Usage_i16(0x243)
+#define Usage_Con_ACAllCaps Usage_i16(0x244)
+#define Usage_Con_ACRotate Usage_i16(0x245)
+#define Usage_Con_ACResize Usage_i16(0x246)
+#define Usage_Con_ACFlipHorizontal Usage_i16(0x247)
+#define Usage_Con_ACFlipVertical Usage_i16(0x248)
+#define Usage_Con_ACMirrorHorizontal Usage_i16(0x249)
+#define Usage_Con_ACMirrorVertical Usage_i16(0x24a)
+#define Usage_Con_ACFontSelect Usage_i16(0x24b)
+#define Usage_Con_ACFontColor Usage_i16(0x24c)
+#define Usage_Con_ACFontSize Usage_i16(0x24d)
+#define Usage_Con_ACJustifyLeft Usage_i16(0x24e)
+#define Usage_Con_ACJustifyCenterH Usage_i16(0x24f)
+#define Usage_Con_ACJustifyRight Usage_i16(0x250)
+#define Usage_Con_ACJustifyBlockH Usage_i16(0x251)
+#define Usage_Con_ACJustifyTop Usage_i16(0x252)
+#define Usage_Con_ACJustifyCenterV Usage_i16(0x253)
+#define Usage_Con_ACJustifyBottom Usage_i16(0x254)
+#define Usage_Con_ACJustifyBlockV Usage_i16(0x255)
+#define Usage_Con_ACIndentDecrease Usage_i16(0x256)
+#define Usage_Con_ACIndentIncrease Usage_i16(0x257)
+#define Usage_Con_ACNumberedList Usage_i16(0x258)
+#define Usage_Con_ACRestartNumbering Usage_i16(0x259)
+#define Usage_Con_ACBulletedList Usage_i16(0x25a)
+#define Usage_Con_ACPromote Usage_i16(0x25b)
+#define Usage_Con_ACDemote Usage_i16(0x25c)
+#define Usage_Con_ACYes Usage_i16(0x25d)
+#define Usage_Con_ACNo Usage_i16(0x25e)
+#define Usage_Con_ACCancel Usage_i16(0x25f)
+#define Usage_Con_ACCatalog Usage_i16(0x260)
+#define Usage_Con_ACBuyCheckout Usage_i16(0x261)
+#define Usage_Con_ACAddtoCart Usage_i16(0x262)
+#define Usage_Con_ACExpand Usage_i16(0x263)
+#define Usage_Con_ACExpandAll Usage_i16(0x264)
+#define Usage_Con_ACCollapse Usage_i16(0x265)
+#define Usage_Con_ACCollapseAll Usage_i16(0x266)
+#define Usage_Con_ACPrintPreview Usage_i16(0x267)
+#define Usage_Con_ACPasteSpecial Usage_i16(0x268)
+#define Usage_Con_ACInsertMode Usage_i16(0x269)
+#define Usage_Con_ACDelete Usage_i16(0x26a)
+#define Usage_Con_ACLock Usage_i16(0x26b)
+#define Usage_Con_ACUnlock Usage_i16(0x26c)
+#define Usage_Con_ACProtect Usage_i16(0x26d)
+#define Usage_Con_ACUnprotect Usage_i16(0x26e)
+#define Usage_Con_ACAttachComment Usage_i16(0x26f)
+#define Usage_Con_ACDeleteComment Usage_i16(0x270)
+#define Usage_Con_ACViewComment Usage_i16(0x271)
+#define Usage_Con_ACSelectWord Usage_i16(0x272)
+#define Usage_Con_ACSelectSentence Usage_i16(0x273)
+#define Usage_Con_ACSelectParagraph Usage_i16(0x274)
+#define Usage_Con_ACSelectColumn Usage_i16(0x275)
+#define Usage_Con_ACSelectRow Usage_i16(0x276)
+#define Usage_Con_ACSelectTable Usage_i16(0x277)
+#define Usage_Con_ACSelectObject Usage_i16(0x278)
+#define Usage_Con_ACRedoRepeat Usage_i16(0x279)
+#define Usage_Con_ACSort Usage_i16(0x27a)
+#define Usage_Con_ACSortAscending Usage_i16(0x27b)
+#define Usage_Con_ACSortDescending Usage_i16(0x27c)
+#define Usage_Con_ACFilter Usage_i16(0x27d)
+#define Usage_Con_ACSetClock Usage_i16(0x27e)
+#define Usage_Con_ACViewClock Usage_i16(0x27f)
+#define Usage_Con_ACSelectTimeZone Usage_i16(0x280)
+#define Usage_Con_ACEditTimeZones Usage_i16(0x281)
+#define Usage_Con_ACSetAlarm Usage_i16(0x282)
+#define Usage_Con_ACClearAlarm Usage_i16(0x283)
+#define Usage_Con_ACSnoozeAlarm Usage_i16(0x284)
+#define Usage_Con_ACResetAlarm Usage_i16(0x285)
+#define Usage_Con_ACSynchronize Usage_i16(0x286)
+#define Usage_Con_ACSendReceive Usage_i16(0x287)
+#define Usage_Con_ACSendTo Usage_i16(0x288)
+#define Usage_Con_ACReply Usage_i16(0x289)
+#define Usage_Con_ACReplyAll Usage_i16(0x28a)
+#define Usage_Con_ACForwardMsg Usage_i16(0x28b)
+#define Usage_Con_ACSend Usage_i16(0x28c)
+#define Usage_Con_ACAttachFile Usage_i16(0x28d)
+#define Usage_Con_ACUpload Usage_i16(0x28e)
+#define Usage_Con_ACDownloadSaveTargetAs Usage_i16(0x28f)
+#define Usage_Con_ACSetBorders Usage_i16(0x290)
+#define Usage_Con_ACInsertRow Usage_i16(0x291)
+#define Usage_Con_ACInsertColumn Usage_i16(0x292)
+#define Usage_Con_ACInsertFile Usage_i16(0x293)
+#define Usage_Con_ACInsertPicture Usage_i16(0x294)
+#define Usage_Con_ACInsertObject Usage_i16(0x295)
+#define Usage_Con_ACInsertSymbol Usage_i16(0x296)
+#define Usage_Con_ACSaveandClose Usage_i16(0x297)
+#define Usage_Con_ACRename Usage_i16(0x298)
+#define Usage_Con_ACMerge Usage_i16(0x299)
+#define Usage_Con_ACSplit Usage_i16(0x29a)
+#define Usage_Con_ACDisributeHorizontally Usage_i16(0x29b)
+#define Usage_Con_ACDistributeVertically Usage_i16(0x29c)
+#define Usage_Con_ACNextKeyboardLayoutSelect Usage_i16(0x29d)
+#define Usage_Con_ACNavigationGuidance Usage_i16(0x29e)
+#define Usage_Con_ACDesktopShowAllWindows Usage_i16(0x29f)
+#define Usage_Con_ACSoftKeyLeft Usage_i16(0x2a0)
+#define Usage_Con_ACSoftKeyRight Usage_i16(0x2a1)
+#define Usage_Con_ACDesktopShowAllApplications Usage_i16(0x2a2)
+#define Usage_Con_ACIdleKeepAlive Usage_i16(0x2b0)
+#define Usage_Con_ExtendedKeyboardAttributesCollection Usage_i16(0x2c0)
+#define Usage_Con_KeyboardFormFactor Usage_i16(0x2c1)
+#define Usage_Con_KeyboardKeyType Usage_i16(0x2c2)
+#define Usage_Con_KeyboardPhysicalLayout Usage_i16(0x2c3)
+#define Usage_Con_VendorSpecificKeyboardPhysicalLayout Usage_i16(0x2c4)
+#define Usage_Con_KeyboardIETFLanguageTagIndex Usage_i16(0x2c5)
+#define Usage_Con_ImplementedKeyboardInputAssistControls Usage_i16(0x2c6)
+#define Usage_Con_KeyboardInputAssistPrevious Usage_i16(0x2c7)
+#define Usage_Con_KeyboardInputAssistNext Usage_i16(0x2c8)
+#define Usage_Con_KeyboardInputAssistPreviousGroup Usage_i16(0x2c9)
+#define Usage_Con_KeyboardInputAssistNextGroup Usage_i16(0x2ca)
+#define Usage_Con_KeyboardInputAssistAccept Usage_i16(0x2cb)
+#define Usage_Con_KeyboardInputAssistCancel Usage_i16(0x2cc)
+#define Usage_Con_PrivacyScreenToggle Usage_i16(0x2d0)
+#define Usage_Con_PrivacyScreenLevelDecrement Usage_i16(0x2d1)
+#define Usage_Con_PrivacyScreenLevelIncrement Usage_i16(0x2d2)
+#define Usage_Con_PrivacyScreenLevelMinimum Usage_i16(0x2d3)
+#define Usage_Con_PrivacyScreenLevelMaximum Usage_i16(0x2d4)
+#define Usage_Con_ContactEdited Usage_i16(0x500)
+#define Usage_Con_ContactAdded Usage_i16(0x501)
+#define Usage_Con_ContactRecordActive Usage_i16(0x502)
+#define Usage_Con_ContactIndex Usage_i16(0x503)
+#define Usage_Con_ContactNickname Usage_i16(0x504)
+#define Usage_Con_ContactFirstName Usage_i16(0x505)
+#define Usage_Con_ContactLastName Usage_i16(0x506)
+#define Usage_Con_ContactFullName Usage_i16(0x507)
+#define Usage_Con_ContactPhoneNumberPersonal Usage_i16(0x508)
+#define Usage_Con_ContactPhoneNumberBusiness Usage_i16(0x509)
+#define Usage_Con_ContactPhoneNumberMobile Usage_i16(0x50a)
+#define Usage_Con_ContactPhoneNumberPager Usage_i16(0x50b)
+#define Usage_Con_ContactPhoneNumberFax Usage_i16(0x50c)
+#define Usage_Con_ContactPhoneNumberOther Usage_i16(0x50d)
+#define Usage_Con_ContactEmailPersonal Usage_i16(0x50e)
+#define Usage_Con_ContactEmailBusiness Usage_i16(0x50f)
+#define Usage_Con_ContactEmailOther Usage_i16(0x510)
+#define Usage_Con_ContactEmailMain Usage_i16(0x511)
+#define Usage_Con_ContactSpeedDialNumber Usage_i16(0x512)
+#define Usage_Con_ContactStatusFlag Usage_i16(0x513)
+#define Usage_Con_ContactMisc Usage_i16(0x514)
+#define Usage_Dig_Digitizer Usage_i8(0x1)
+#define Usage_Dig_Pen Usage_i8(0x2)
+#define Usage_Dig_LightPen Usage_i8(0x3)
+#define Usage_Dig_TouchScreen Usage_i8(0x4)
+#define Usage_Dig_TouchPad Usage_i8(0x5)
+#define Usage_Dig_Whiteboard Usage_i8(0x6)
+#define Usage_Dig_CoordinateMeasuringMachine Usage_i8(0x7)
+#define Usage_Dig_ThreeDDigitizer Usage_i8(0x8)
+#define Usage_Dig_StereoPlotter Usage_i8(0x9)
+#define Usage_Dig_ArticulatedArm Usage_i8(0xa)
+#define Usage_Dig_Armature Usage_i8(0xb)
+#define Usage_Dig_MultiplePointDigitizer Usage_i8(0xc)
+#define Usage_Dig_FreeSpaceWand Usage_i8(0xd)
+#define Usage_Dig_DeviceConfiguration Usage_i8(0xe)
+#define Usage_Dig_CapacitiveHeatMapDigitizer Usage_i8(0xf)
+#define Usage_Dig_Stylus Usage_i8(0x20)
+#define Usage_Dig_Puck Usage_i8(0x21)
+#define Usage_Dig_Finger Usage_i8(0x22)
+#define Usage_Dig_Devicesettings Usage_i8(0x23)
+#define Usage_Dig_CharacterGesture Usage_i8(0x24)
+#define Usage_Dig_TipPressure Usage_i8(0x30)
+#define Usage_Dig_BarrelPressure Usage_i8(0x31)
+#define Usage_Dig_InRange Usage_i8(0x32)
+#define Usage_Dig_Touch Usage_i8(0x33)
+#define Usage_Dig_Untouch Usage_i8(0x34)
+#define Usage_Dig_Tap Usage_i8(0x35)
+#define Usage_Dig_Quality Usage_i8(0x36)
+#define Usage_Dig_DataValid Usage_i8(0x37)
+#define Usage_Dig_TransducerIndex Usage_i8(0x38)
+#define Usage_Dig_TabletFunctionKeys Usage_i8(0x39)
+#define Usage_Dig_ProgramChangeKeys Usage_i8(0x3a)
+#define Usage_Dig_BatteryStrength Usage_i8(0x3b)
+#define Usage_Dig_Invert Usage_i8(0x3c)
+#define Usage_Dig_XTilt Usage_i8(0x3d)
+#define Usage_Dig_YTilt Usage_i8(0x3e)
+#define Usage_Dig_Azimuth Usage_i8(0x3f)
+#define Usage_Dig_Altitude Usage_i8(0x40)
+#define Usage_Dig_Twist Usage_i8(0x41)
+#define Usage_Dig_TipSwitch Usage_i8(0x42)
+#define Usage_Dig_SecondaryTipSwitch Usage_i8(0x43)
+#define Usage_Dig_BarrelSwitch Usage_i8(0x44)
+#define Usage_Dig_Eraser Usage_i8(0x45)
+#define Usage_Dig_TabletPick Usage_i8(0x46)
+#define Usage_Dig_TouchValid Usage_i8(0x47)
+#define Usage_Dig_Width Usage_i8(0x48)
+#define Usage_Dig_Height Usage_i8(0x49)
+#define Usage_Dig_ContactIdentifier Usage_i8(0x51)
+#define Usage_Dig_DeviceMode Usage_i8(0x52)
+#define Usage_Dig_DeviceIdentifier Usage_i8(0x53)
+#define Usage_Dig_ContactCount Usage_i8(0x54)
+#define Usage_Dig_ContactCountMaximum Usage_i8(0x55)
+#define Usage_Dig_ScanTime Usage_i8(0x56)
+#define Usage_Dig_SurfaceSwitch Usage_i8(0x57)
+#define Usage_Dig_ButtonSwitch Usage_i8(0x58)
+#define Usage_Dig_PadType Usage_i8(0x59)
+#define Usage_Dig_SecondaryBarrelSwitch Usage_i8(0x5a)
+#define Usage_Dig_TransducerSerialNumber Usage_i8(0x5b)
+#define Usage_Dig_PreferredColor Usage_i8(0x5c)
+#define Usage_Dig_PreferredColorisLocked Usage_i8(0x5d)
+#define Usage_Dig_PreferredLineWidth Usage_i8(0x5e)
+#define Usage_Dig_PreferredLineWidthisLocked Usage_i8(0x5f)
+#define Usage_Dig_LatencyMode Usage_i8(0x60)
+#define Usage_Dig_GestureCharacterQuality Usage_i8(0x61)
+#define Usage_Dig_CharacterGestureDataLength Usage_i8(0x62)
+#define Usage_Dig_CharacterGestureData Usage_i8(0x63)
+#define Usage_Dig_GestureCharacterEncoding Usage_i8(0x64)
+#define Usage_Dig_UTF8CharacterGestureEncoding Usage_i8(0x65)
+#define Usage_Dig_UTF16LittleEndianCharacterGestureEncoding Usage_i8(0x66)
+#define Usage_Dig_UTF16BigEndianCharacterGestureEncoding Usage_i8(0x67)
+#define Usage_Dig_UTF32LittleEndianCharacterGestureEncoding Usage_i8(0x68)
+#define Usage_Dig_UTF32BigEndianCharacterGestureEncoding Usage_i8(0x69)
+#define Usage_Dig_CapacitiveHeatMapProtocolVendorID Usage_i8(0x6a)
+#define Usage_Dig_CapacitiveHeatMapProtocolVersion Usage_i8(0x6b)
+#define Usage_Dig_CapacitiveHeatMapFrameData Usage_i8(0x6c)
+#define Usage_Dig_GestureCharacterEnable Usage_i8(0x6d)
+#define Usage_Dig_TransducerSerialNumberPart2 Usage_i8(0x6e)
+#define Usage_Dig_NoPreferredColor Usage_i8(0x6f)
+#define Usage_Dig_PreferredLineStyle Usage_i8(0x70)
+#define Usage_Dig_PreferredLineStyleisLocked Usage_i8(0x71)
+#define Usage_Dig_Ink Usage_i8(0x72)
+#define Usage_Dig_Pencil Usage_i8(0x73)
+#define Usage_Dig_Highlighter Usage_i8(0x74)
+#define Usage_Dig_ChiselMarker Usage_i8(0x75)
+#define Usage_Dig_Brush Usage_i8(0x76)
+#define Usage_Dig_NoPreference Usage_i8(0x77)
+#define Usage_Dig_DigitizerDiagnostic Usage_i8(0x80)
+#define Usage_Dig_DigitizerError Usage_i8(0x81)
+#define Usage_Dig_ErrNormalStatus Usage_i8(0x82)
+#define Usage_Dig_ErrTransducersExceeded Usage_i8(0x83)
+#define Usage_Dig_ErrFullTransFeaturesUnavailable Usage_i8(0x84)
+#define Usage_Dig_ErrChargeLow Usage_i8(0x85)
+#define Usage_Dig_TransducerSoftwareInfo Usage_i8(0x90)
+#define Usage_Dig_TransducerVendorId Usage_i8(0x91)
+#define Usage_Dig_TransducerProductId Usage_i8(0x92)
+#define Usage_Dig_DeviceSupportedProtocols Usage_i8(0x93)
+#define Usage_Dig_TransducerSupportedProtocols Usage_i8(0x94)
+#define Usage_Dig_NoProtocol Usage_i8(0x95)
+#define Usage_Dig_WacomAESProtocol Usage_i8(0x96)
+#define Usage_Dig_USIProtocol Usage_i8(0x97)
+#define Usage_Dig_MicrosoftPenProtocol Usage_i8(0x98)
+#define Usage_Dig_SupportedReportRates Usage_i8(0xa0)
+#define Usage_Dig_ReportRate Usage_i8(0xa1)
+#define Usage_Dig_TransducerConnected Usage_i8(0xa2)
+#define Usage_Dig_SwitchDisabled Usage_i8(0xa3)
+#define Usage_Dig_SwitchUnimplemented Usage_i8(0xa4)
+#define Usage_Dig_TransducerSwitches Usage_i8(0xa5)
+#define Usage_Dig_TransducerIndexSelector Usage_i8(0xa6)
+#define Usage_Dig_ButtonPressThreshold Usage_i8(0xb0)
+#define Usage_Hap_SimpleHapticController Usage_i8(0x1)
+#define Usage_Hap_WaveformList Usage_i8(0x10)
+#define Usage_Hap_DurationList Usage_i8(0x11)
+#define Usage_Hap_AutoTrigger Usage_i8(0x20)
+#define Usage_Hap_ManualTrigger Usage_i8(0x21)
+#define Usage_Hap_AutoTriggerAssociatedControl Usage_i8(0x22)
+#define Usage_Hap_Intensity Usage_i8(0x23)
+#define Usage_Hap_RepeatCount Usage_i8(0x24)
+#define Usage_Hap_RetriggerPeriod Usage_i8(0x25)
+#define Usage_Hap_WaveformVendorPage Usage_i8(0x26)
+#define Usage_Hap_WaveformVendorID Usage_i8(0x27)
+#define Usage_Hap_WaveformCutoffTime Usage_i8(0x28)
+#define Usage_Hap_WaveformNone Usage_i16(0x1001)
+#define Usage_Hap_WaveformStop Usage_i16(0x1002)
+#define Usage_Hap_WaveformClick Usage_i16(0x1003)
+#define Usage_Hap_WaveformBuzzContinuous Usage_i16(0x1004)
+#define Usage_Hap_WaveformRumbleContinuous Usage_i16(0x1005)
+#define Usage_Hap_WaveformPress Usage_i16(0x1006)
+#define Usage_Hap_WaveformRelease Usage_i16(0x1007)
+#define Usage_Hap_WaveformHover Usage_i16(0x1008)
+#define Usage_Hap_WaveformSuccess Usage_i16(0x1009)
+#define Usage_Hap_WaveformError Usage_i16(0x100a)
+#define Usage_Hap_WaveformInkContinuous Usage_i16(0x100b)
+#define Usage_Hap_WaveformPencilContinuous Usage_i16(0x100c)
+#define Usage_Hap_WaveformMarkerContinuous Usage_i16(0x100d)
+#define Usage_Hap_WaveformChiselMarkerContinuous Usage_i16(0x100e)
+#define Usage_Hap_WaveformBrushContinuous Usage_i16(0x100f)
+#define Usage_Hap_WaveformEraserContinuous Usage_i16(0x1010)
+#define Usage_Hap_WaveformSparkleContinuous Usage_i16(0x1011)
+#define Usage_PID_PhysicalInputDevice Usage_i8(0x1)
+#define Usage_PID_Normal Usage_i8(0x20)
+#define Usage_PID_SetEffectReport Usage_i8(0x21)
+#define Usage_PID_EffectParameterBlockIndex Usage_i8(0x22)
+#define Usage_PID_ParameterBlockOffset Usage_i8(0x23)
+#define Usage_PID_ROMFlag Usage_i8(0x24)
+#define Usage_PID_EffectType Usage_i8(0x25)
+#define Usage_PID_ETConstantForce Usage_i8(0x26)
+#define Usage_PID_ETRamp Usage_i8(0x27)
+#define Usage_PID_ETCustomForce Usage_i8(0x28)
+#define Usage_PID_ETSquare Usage_i8(0x30)
+#define Usage_PID_ETSine Usage_i8(0x31)
+#define Usage_PID_ETTriangle Usage_i8(0x32)
+#define Usage_PID_ETSawtoothUp Usage_i8(0x33)
+#define Usage_PID_ETSawtoothDown Usage_i8(0x34)
+#define Usage_PID_ETSpring Usage_i8(0x40)
+#define Usage_PID_ETDamper Usage_i8(0x41)
+#define Usage_PID_ETInertia Usage_i8(0x42)
+#define Usage_PID_ETFriction Usage_i8(0x43)
+#define Usage_PID_Duration Usage_i8(0x50)
+#define Usage_PID_SamplePeriod Usage_i8(0x51)
+#define Usage_PID_Gain Usage_i8(0x52)
+#define Usage_PID_TriggerButton Usage_i8(0x53)
+#define Usage_PID_TriggerRepeatInterval Usage_i8(0x54)
+#define Usage_PID_AxesEnable Usage_i8(0x55)
+#define Usage_PID_DirectionEnable Usage_i8(0x56)
+#define Usage_PID_Direction Usage_i8(0x57)
+#define Usage_PID_TypeSpecificBlockOffset Usage_i8(0x58)
+#define Usage_PID_BlockType Usage_i8(0x59)
+#define Usage_PID_SetEnvelopeReport Usage_i8(0x5a)
+#define Usage_PID_AttackLevel Usage_i8(0x5b)
+#define Usage_PID_AttackTime Usage_i8(0x5c)
+#define Usage_PID_FadeLevel Usage_i8(0x5d)
+#define Usage_PID_FadeTime Usage_i8(0x5e)
+#define Usage_PID_SetConditionReport Usage_i8(0x5f)
+#define Usage_PID_CenterPointOffset Usage_i8(0x60)
+#define Usage_PID_PositiveCoefficient Usage_i8(0x61)
+#define Usage_PID_NegativeCoefficient Usage_i8(0x62)
+#define Usage_PID_PositiveSaturation Usage_i8(0x63)
+#define Usage_PID_NegativeSaturation Usage_i8(0x64)
+#define Usage_PID_DeadBand Usage_i8(0x65)
+#define Usage_PID_DownloadForceSample Usage_i8(0x66)
+#define Usage_PID_IsochCustomForceEnable Usage_i8(0x67)
+#define Usage_PID_CustomForceDataReport Usage_i8(0x68)
+#define Usage_PID_CustomForceData Usage_i8(0x69)
+#define Usage_PID_CustomForceVendorDefinedData Usage_i8(0x6a)
+#define Usage_PID_SetCustomForceReport Usage_i8(0x6b)
+#define Usage_PID_CustomForceDataOffset Usage_i8(0x6c)
+#define Usage_PID_SampleCount Usage_i8(0x6d)
+#define Usage_PID_SetPeriodicReport Usage_i8(0x6e)
+#define Usage_PID_Offset Usage_i8(0x6f)
+#define Usage_PID_Magnitude Usage_i8(0x70)
+#define Usage_PID_Phase Usage_i8(0x71)
+#define Usage_PID_Period Usage_i8(0x72)
+#define Usage_PID_SetConstantForceReport Usage_i8(0x73)
+#define Usage_PID_SetRampForceReport Usage_i8(0x74)
+#define Usage_PID_RampStart Usage_i8(0x75)
+#define Usage_PID_RampEnd Usage_i8(0x76)
+#define Usage_PID_EffectOperationReport Usage_i8(0x77)
+#define Usage_PID_EffectOperation Usage_i8(0x78)
+#define Usage_PID_OpEffectStart Usage_i8(0x79)
+#define Usage_PID_OpEffectStartSolo Usage_i8(0x7a)
+#define Usage_PID_OpEffectStop Usage_i8(0x7b)
+#define Usage_PID_LoopCount Usage_i8(0x7c)
+#define Usage_PID_DeviceGainReport Usage_i8(0x7d)
+#define Usage_PID_DeviceGain Usage_i8(0x7e)
+#define Usage_PID_ParameterBlockPoolsReport Usage_i8(0x7f)
+#define Usage_PID_RAMPoolSize Usage_i8(0x80)
+#define Usage_PID_ROMPoolSize Usage_i8(0x81)
+#define Usage_PID_ROMEffectBlockCount Usage_i8(0x82)
+#define Usage_PID_SimultaneousEffectsMax Usage_i8(0x83)
+#define Usage_PID_PoolAlignment Usage_i8(0x84)
+#define Usage_PID_ParameterBlockMoveReport Usage_i8(0x85)
+#define Usage_PID_MoveSource Usage_i8(0x86)
+#define Usage_PID_MoveDestination Usage_i8(0x87)
+#define Usage_PID_MoveLength Usage_i8(0x88)
+#define Usage_PID_EffectParameterBlockLoadReport Usage_i8(0x89)
+#define Usage_PID_EffectParameterBlockLoadStatus Usage_i8(0x8b)
+#define Usage_PID_BlockLoadSuccess Usage_i8(0x8c)
+#define Usage_PID_BlockLoadFull Usage_i8(0x8d)
+#define Usage_PID_BlockLoadError Usage_i8(0x8e)
+#define Usage_PID_BlockHandle Usage_i8(0x8f)
+#define Usage_PID_EffectParameterBlockFreeReport Usage_i8(0x90)
+#define Usage_PID_TypeSpecificBlockHandle Usage_i8(0x91)
+#define Usage_PID_PIDStateReport Usage_i8(0x92)
+#define Usage_PID_EffectPlaying Usage_i8(0x94)
+#define Usage_PID_PIDDeviceControlReport Usage_i8(0x95)
+#define Usage_PID_PIDDeviceControl Usage_i8(0x96)
+#define Usage_PID_DCEnableActuators Usage_i8(0x97)
+#define Usage_PID_DCDisableActuators Usage_i8(0x98)
+#define Usage_PID_DCStopAllEffects Usage_i8(0x99)
+#define Usage_PID_DCReset Usage_i8(0x9a)
+#define Usage_PID_DCPause Usage_i8(0x9b)
+#define Usage_PID_DCContinue Usage_i8(0x9c)
+#define Usage_PID_DevicePaused Usage_i8(0x9f)
+#define Usage_PID_ActuatorsEnabled Usage_i8(0xa0)
+#define Usage_PID_SafetySwitch Usage_i8(0xa4)
+#define Usage_PID_ActuatorOverrideSwitch Usage_i8(0xa5)
+#define Usage_PID_ActuatorPower Usage_i8(0xa6)
+#define Usage_PID_StartDelay Usage_i8(0xa7)
+#define Usage_PID_ParameterBlockSize Usage_i8(0xa8)
+#define Usage_PID_DeviceManagedPool Usage_i8(0xa9)
+#define Usage_PID_SharedParameterBlocks Usage_i8(0xaa)
+#define Usage_PID_CreateNewEffectParameterBlockReport Usage_i8(0xab)
+#define Usage_PID_RAMPoolAvailable Usage_i8(0xac)
+#define Usage_SC_SocControl Usage_i8(0x1)
+#define Usage_SC_FirmwareTransfer Usage_i8(0x2)
+#define Usage_SC_FirmwareFileId Usage_i8(0x3)
+#define Usage_SC_FileOffsetInBytes Usage_i8(0x4)
+#define Usage_SC_FileTransferSizeMaxInBytes Usage_i8(0x5)
+#define Usage_SC_FilePayload Usage_i8(0x6)
+#define Usage_SC_FilePayloadSizeInBytes Usage_i8(0x7)
+#define Usage_SC_FilePayloadContainsLastBytes Usage_i8(0x8)
+#define Usage_SC_FileTransferStop Usage_i8(0x9)
+#define Usage_SC_FileTransferTillEnd Usage_i8(0xa)
+#define Usage_EHT_EyeTracker Usage_i8(0x1)
+#define Usage_EHT_HeadTracker Usage_i8(0x2)
+#define Usage_EHT_TrackingData Usage_i8(0x10)
+#define Usage_EHT_Capabilities Usage_i8(0x11)
+#define Usage_EHT_Configuration Usage_i8(0x12)
+#define Usage_EHT_Status Usage_i8(0x13)
+#define Usage_EHT_Control Usage_i8(0x14)
+#define Usage_EHT_SensorTimestamp Usage_i8(0x20)
+#define Usage_EHT_PositionX Usage_i8(0x21)
+#define Usage_EHT_PositionY Usage_i8(0x22)
+#define Usage_EHT_PositionZ Usage_i8(0x23)
+#define Usage_EHT_GazePoint Usage_i8(0x24)
+#define Usage_EHT_LeftEyePosition Usage_i8(0x25)
+#define Usage_EHT_RightEyePosition Usage_i8(0x26)
+#define Usage_EHT_HeadPosition Usage_i8(0x27)
+#define Usage_EHT_HeadDirectionPoint Usage_i8(0x28)
+#define Usage_EHT_RotationaboutXaxis Usage_i8(0x29)
+#define Usage_EHT_RotationaboutYaxis Usage_i8(0x2a)
+#define Usage_EHT_RotationaboutZaxis Usage_i8(0x2b)
+#define Usage_EHT_TrackerQuality Usage_i16(0x100)
+#define Usage_EHT_MinimumTrackingDistance Usage_i16(0x101)
+#define Usage_EHT_OptimumTrackingDistance Usage_i16(0x102)
+#define Usage_EHT_MaximumTrackingDistance Usage_i16(0x103)
+#define Usage_EHT_MaximumScreenPlaneWidth Usage_i16(0x104)
+#define Usage_EHT_MaximumScreenPlaneHeight Usage_i16(0x105)
+#define Usage_EHT_DisplayManufacturerID Usage_i16(0x200)
+#define Usage_EHT_DisplayProductID Usage_i16(0x201)
+#define Usage_EHT_DisplaySerialNumber Usage_i16(0x202)
+#define Usage_EHT_DisplayManufacturerDate Usage_i16(0x203)
+#define Usage_EHT_CalibratedScreenWidth Usage_i16(0x204)
+#define Usage_EHT_CalibratedScreenHeight Usage_i16(0x205)
+#define Usage_EHT_SamplingFrequency Usage_i16(0x300)
+#define Usage_EHT_ConfigurationStatus Usage_i16(0x301)
+#define Usage_EHT_DeviceModeRequest Usage_i16(0x400)
+#define Usage_AD_AlphanumericDisplay Usage_i8(0x1)
+#define Usage_AD_AuxiliaryDisplay Usage_i8(0x2)
+#define Usage_AD_DisplayAttributesReport Usage_i8(0x20)
+#define Usage_AD_ASCIICharacterSet Usage_i8(0x21)
+#define Usage_AD_DataReadBack Usage_i8(0x22)
+#define Usage_AD_FontReadBack Usage_i8(0x23)
+#define Usage_AD_DisplayControlReport Usage_i8(0x24)
+#define Usage_AD_ClearDisplay Usage_i8(0x25)
+#define Usage_AD_DisplayEnable Usage_i8(0x26)
+#define Usage_AD_ScreenSaverDelay Usage_i8(0x27)
+#define Usage_AD_ScreenSaverEnable Usage_i8(0x28)
+#define Usage_AD_VerticalScroll Usage_i8(0x29)
+#define Usage_AD_HorizontalScroll Usage_i8(0x2a)
+#define Usage_AD_CharacterReport Usage_i8(0x2b)
+#define Usage_AD_DisplayData Usage_i8(0x2c)
+#define Usage_AD_DisplayStatus Usage_i8(0x2d)
+#define Usage_AD_StatNotReady Usage_i8(0x2e)
+#define Usage_AD_StatReady Usage_i8(0x2f)
+#define Usage_AD_ErrNotaloadablecharacter Usage_i8(0x30)
+#define Usage_AD_ErrFontdatacannotberead Usage_i8(0x31)
+#define Usage_AD_CursorPositionReport Usage_i8(0x32)
+#define Usage_AD_Row Usage_i8(0x33)
+#define Usage_AD_Column Usage_i8(0x34)
+#define Usage_AD_Rows Usage_i8(0x35)
+#define Usage_AD_Columns Usage_i8(0x36)
+#define Usage_AD_CursorPixelPositioning Usage_i8(0x37)
+#define Usage_AD_CursorMode Usage_i8(0x38)
+#define Usage_AD_CursorEnable Usage_i8(0x39)
+#define Usage_AD_CursorBlink Usage_i8(0x3a)
+#define Usage_AD_FontReport Usage_i8(0x3b)
+#define Usage_AD_FontData Usage_i8(0x3c)
+#define Usage_AD_CharacterWidth Usage_i8(0x3d)
+#define Usage_AD_CharacterHeight Usage_i8(0x3e)
+#define Usage_AD_CharacterSpacingHorizontal Usage_i8(0x3f)
+#define Usage_AD_CharacterSpacingVertical Usage_i8(0x40)
+#define Usage_AD_UnicodeCharacterSet Usage_i8(0x41)
+#define Usage_AD_Font7Segment Usage_i8(0x42)
+#define Usage_AD_SevenSegmentDirectMap Usage_i8(0x43)
+#define Usage_AD_Font14Segment Usage_i8(0x44)
+#define Usage_AD_One4SegmentDirectMap Usage_i8(0x45)
+#define Usage_AD_DisplayBrightness Usage_i8(0x46)
+#define Usage_AD_DisplayContrast Usage_i8(0x47)
+#define Usage_AD_CharacterAttribute Usage_i8(0x48)
+#define Usage_AD_AttributeReadback Usage_i8(0x49)
+#define Usage_AD_AttributeData Usage_i8(0x4a)
+#define Usage_AD_CharAttrEnhance Usage_i8(0x4b)
+#define Usage_AD_CharAttrUnderline Usage_i8(0x4c)
+#define Usage_AD_CharAttrBlink Usage_i8(0x4d)
+#define Usage_AD_BitmapSizeX Usage_i8(0x80)
+#define Usage_AD_BitmapSizeY Usage_i8(0x81)
+#define Usage_AD_MaxBlitSize Usage_i8(0x82)
+#define Usage_AD_BitDepthFormat Usage_i8(0x83)
+#define Usage_AD_DisplayOrientation Usage_i8(0x84)
+#define Usage_AD_PaletteReport Usage_i8(0x85)
+#define Usage_AD_PaletteDataSize Usage_i8(0x86)
+#define Usage_AD_PaletteDataOffset Usage_i8(0x87)
+#define Usage_AD_PaletteData Usage_i8(0x88)
+#define Usage_AD_BlitReport Usage_i8(0x8a)
+#define Usage_AD_BlitRectangleX1 Usage_i8(0x8b)
+#define Usage_AD_BlitRectangleY1 Usage_i8(0x8c)
+#define Usage_AD_BlitRectangleX2 Usage_i8(0x8d)
+#define Usage_AD_BlitRectangleY2 Usage_i8(0x8e)
+#define Usage_AD_BlitData Usage_i8(0x8f)
+#define Usage_AD_SoftButton Usage_i8(0x90)
+#define Usage_AD_SoftButtonID Usage_i8(0x91)
+#define Usage_AD_SoftButtonSide Usage_i8(0x92)
+#define Usage_AD_SoftButtonOffset1 Usage_i8(0x93)
+#define Usage_AD_SoftButtonOffset2 Usage_i8(0x94)
+#define Usage_AD_SoftButtonReport Usage_i8(0x95)
+#define Usage_AD_SoftKeys Usage_i8(0xc2)
+#define Usage_AD_DisplayDataExtensions Usage_i8(0xcc)
+#define Usage_AD_CharacterMapping Usage_i8(0xcf)
+#define Usage_AD_UnicodeEquivalent Usage_i8(0xdd)
+#define Usage_AD_CharacterPageMapping Usage_i8(0xdf)
+#define Usage_AD_RequestReport Usage_i16(0xff)
+#define Usage_Sen_Sensor Usage_i8(0x1)
+#define Usage_Sen_Biometric Usage_i8(0x10)
+#define Usage_Sen_BiometricHumanPresence Usage_i8(0x11)
+#define Usage_Sen_BiometricHumanProximity Usage_i8(0x12)
+#define Usage_Sen_BiometricHumanTouch Usage_i8(0x13)
+#define Usage_Sen_BiometricBloodPressure Usage_i8(0x14)
+#define Usage_Sen_BiometricBodyTemperature Usage_i8(0x15)
+#define Usage_Sen_BiometricHeartRate Usage_i8(0x16)
+#define Usage_Sen_BiometricHeartRateVariability Usage_i8(0x17)
+#define Usage_Sen_BiometricPeripheralOxygenSaturation Usage_i8(0x18)
+#define Usage_Sen_BiometricRespiratoryRate Usage_i8(0x19)
+#define Usage_Sen_Electrical Usage_i8(0x20)
+#define Usage_Sen_ElectricalCapacitance Usage_i8(0x21)
+#define Usage_Sen_ElectricalCurrent Usage_i8(0x22)
+#define Usage_Sen_ElectricalPower Usage_i8(0x23)
+#define Usage_Sen_ElectricalInductance Usage_i8(0x24)
+#define Usage_Sen_ElectricalResistance Usage_i8(0x25)
+#define Usage_Sen_ElectricalVoltage Usage_i8(0x26)
+#define Usage_Sen_ElectricalPotentiometer Usage_i8(0x27)
+#define Usage_Sen_ElectricalFrequency Usage_i8(0x28)
+#define Usage_Sen_ElectricalPeriod Usage_i8(0x29)
+#define Usage_Sen_Environmental Usage_i8(0x30)
+#define Usage_Sen_EnvironmentalAtmosphericPressure Usage_i8(0x31)
+#define Usage_Sen_EnvironmentalHumidity Usage_i8(0x32)
+#define Usage_Sen_EnvironmentalTemperature Usage_i8(0x33)
+#define Usage_Sen_EnvironmentalWindDirection Usage_i8(0x34)
+#define Usage_Sen_EnvironmentalWindSpeed Usage_i8(0x35)
+#define Usage_Sen_EnvironmentalAirQuality Usage_i8(0x36)
+#define Usage_Sen_EnvironmentalHeatIndex Usage_i8(0x37)
+#define Usage_Sen_EnvironmentalSurfaceTemperature Usage_i8(0x38)
+#define Usage_Sen_EnvironmentalVolatileOrganicCompounds Usage_i8(0x39)
+#define Usage_Sen_EnvironmentalObjectPresence Usage_i8(0x3a)
+#define Usage_Sen_EnvironmentalObjectProximity Usage_i8(0x3b)
+#define Usage_Sen_Light Usage_i8(0x40)
+#define Usage_Sen_LightAmbientLight Usage_i8(0x41)
+#define Usage_Sen_LightConsumerInfrared Usage_i8(0x42)
+#define Usage_Sen_LightInfraredLight Usage_i8(0x43)
+#define Usage_Sen_LightVisibleLight Usage_i8(0x44)
+#define Usage_Sen_LightUltravioletLight Usage_i8(0x45)
+#define Usage_Sen_Location Usage_i8(0x50)
+#define Usage_Sen_LocationBroadcast Usage_i8(0x51)
+#define Usage_Sen_LocationDeadReckoning Usage_i8(0x52)
+#define Usage_Sen_LocationGPSGlobalPositioningSystem Usage_i8(0x53)
+#define Usage_Sen_LocationLookup Usage_i8(0x54)
+#define Usage_Sen_LocationOther Usage_i8(0x55)
+#define Usage_Sen_LocationStatic Usage_i8(0x56)
+#define Usage_Sen_LocationTriangulation Usage_i8(0x57)
+#define Usage_Sen_Mechanical Usage_i8(0x60)
+#define Usage_Sen_MechanicalBooleanSwitch Usage_i8(0x61)
+#define Usage_Sen_MechanicalBooleanSwitchArray Usage_i8(0x62)
+#define Usage_Sen_MechanicalMultivalueSwitch Usage_i8(0x63)
+#define Usage_Sen_MechanicalForce Usage_i8(0x64)
+#define Usage_Sen_MechanicalPressure Usage_i8(0x65)
+#define Usage_Sen_MechanicalStrain Usage_i8(0x66)
+#define Usage_Sen_MechanicalWeight Usage_i8(0x67)
+#define Usage_Sen_MechanicalHapticVibrator Usage_i8(0x68)
+#define Usage_Sen_MechanicalHallEffectSwitch Usage_i8(0x69)
+#define Usage_Sen_Motion Usage_i8(0x70)
+#define Usage_Sen_MotionAccelerometer1D Usage_i8(0x71)
+#define Usage_Sen_MotionAccelerometer2D Usage_i8(0x72)
+#define Usage_Sen_MotionAccelerometer3D Usage_i8(0x73)
+#define Usage_Sen_MotionGyrometer1D Usage_i8(0x74)
+#define Usage_Sen_MotionGyrometer2D Usage_i8(0x75)
+#define Usage_Sen_MotionGyrometer3D Usage_i8(0x76)
+#define Usage_Sen_MotionMotionDetector Usage_i8(0x77)
+#define Usage_Sen_MotionSpeedometer Usage_i8(0x78)
+#define Usage_Sen_MotionAccelerometer Usage_i8(0x79)
+#define Usage_Sen_MotionGyrometer Usage_i8(0x7a)
+#define Usage_Sen_MotionGravityVector Usage_i8(0x7b)
+#define Usage_Sen_MotionLinearAccelerometer Usage_i8(0x7c)
+#define Usage_Sen_Orientation Usage_i8(0x80)
+#define Usage_Sen_OrientationCompass1D Usage_i8(0x81)
+#define Usage_Sen_OrientationCompass2D Usage_i8(0x82)
+#define Usage_Sen_OrientationCompass3D Usage_i8(0x83)
+#define Usage_Sen_OrientationInclinometer1D Usage_i8(0x84)
+#define Usage_Sen_OrientationInclinometer2D Usage_i8(0x85)
+#define Usage_Sen_OrientationInclinometer3D Usage_i8(0x86)
+#define Usage_Sen_OrientationDistance1D Usage_i8(0x87)
+#define Usage_Sen_OrientationDistance2D Usage_i8(0x88)
+#define Usage_Sen_OrientationDistance3D Usage_i8(0x89)
+#define Usage_Sen_OrientationDeviceOrientation Usage_i8(0x8a)
+#define Usage_Sen_OrientationCompass Usage_i8(0x8b)
+#define Usage_Sen_OrientationInclinometer Usage_i8(0x8c)
+#define Usage_Sen_OrientationDistance Usage_i8(0x8d)
+#define Usage_Sen_OrientationRelativeOrientation Usage_i8(0x8e)
+#define Usage_Sen_OrientationSimpleOrientation Usage_i8(0x8f)
+#define Usage_Sen_Scanner Usage_i8(0x90)
+#define Usage_Sen_ScannerBarcode Usage_i8(0x91)
+#define Usage_Sen_ScannerRFID Usage_i8(0x92)
+#define Usage_Sen_ScannerNFC Usage_i8(0x93)
+#define Usage_Sen_Time Usage_i8(0xa0)
+#define Usage_Sen_TimeAlarmTimer Usage_i8(0xa1)
+#define Usage_Sen_TimeRealTimeClock Usage_i8(0xa2)
+#define Usage_Sen_PersonalActivity Usage_i8(0xb0)
+#define Usage_Sen_PersonalActivityActivityDetection Usage_i8(0xb1)
+#define Usage_Sen_PersonalActivityDevicePosition Usage_i8(0xb2)
+#define Usage_Sen_PersonalActivityFloorTracker Usage_i8(0xb3)
+#define Usage_Sen_PersonalActivityPedometer Usage_i8(0xb4)
+#define Usage_Sen_PersonalActivityStepDetection Usage_i8(0xb5)
+#define Usage_Sen_OrientationExtended Usage_i8(0xc0)
+#define Usage_Sen_OrientationExtendedGeomagneticOrientation Usage_i8(0xc1)
+#define Usage_Sen_OrientationExtendedMagnetometer Usage_i8(0xc2)
+#define Usage_Sen_Gesture Usage_i8(0xd0)
+#define Usage_Sen_GestureChassisFlipGesture Usage_i8(0xd1)
+#define Usage_Sen_GestureHingeFoldGesture Usage_i8(0xd2)
+#define Usage_Sen_Other Usage_i8(0xe0)
+#define Usage_Sen_OtherCustom Usage_i8(0xe1)
+#define Usage_Sen_OtherGeneric Usage_i8(0xe2)
+#define Usage_Sen_OtherGenericEnumerator Usage_i8(0xe3)
+#define Usage_Sen_OtherHingeAngle Usage_i8(0xe4)
+#define Usage_Sen_VendorReserved1 Usage_i8(0xf0)
+#define Usage_Sen_VendorReserved2 Usage_i8(0xf1)
+#define Usage_Sen_VendorReserved3 Usage_i8(0xf2)
+#define Usage_Sen_VendorReserved4 Usage_i8(0xf3)
+#define Usage_Sen_VendorReserved5 Usage_i8(0xf4)
+#define Usage_Sen_VendorReserved6 Usage_i8(0xf5)
+#define Usage_Sen_VendorReserved7 Usage_i8(0xf6)
+#define Usage_Sen_VendorReserved8 Usage_i8(0xf7)
+#define Usage_Sen_VendorReserved9 Usage_i8(0xf8)
+#define Usage_Sen_VendorReserved10 Usage_i8(0xf9)
+#define Usage_Sen_VendorReserved11 Usage_i8(0xfa)
+#define Usage_Sen_VendorReserved12 Usage_i8(0xfb)
+#define Usage_Sen_VendorReserved13 Usage_i8(0xfc)
+#define Usage_Sen_VendorReserved14 Usage_i8(0xfd)
+#define Usage_Sen_VendorReserved15 Usage_i8(0xfe)
+#define Usage_Sen_VendorReserved16 Usage_i16(0xff)
+#define Usage_Sen_Event Usage_i16(0x200)
+#define Usage_Sen_EventSensorState Usage_i16(0x201)
+#define Usage_Sen_EventSensorEvent Usage_i16(0x202)
+#define Usage_Sen_Property Usage_i16(0x300)
+#define Usage_Sen_PropertyFriendlyName Usage_i16(0x301)
+#define Usage_Sen_PropertyPersistentUniqueID Usage_i16(0x302)
+#define Usage_Sen_PropertySensorStatus Usage_i16(0x303)
+#define Usage_Sen_PropertyMinimumReportInterval Usage_i16(0x304)
+#define Usage_Sen_PropertySensorManufacturer Usage_i16(0x305)
+#define Usage_Sen_PropertySensorModel Usage_i16(0x306)
+#define Usage_Sen_PropertySensorSerialNumber Usage_i16(0x307)
+#define Usage_Sen_PropertySensorDescription Usage_i16(0x308)
+#define Usage_Sen_PropertySensorConnectionType Usage_i16(0x309)
+#define Usage_Sen_PropertySensorDevicePath Usage_i16(0x30a)
+#define Usage_Sen_PropertyHardwareRevision Usage_i16(0x30b)
+#define Usage_Sen_PropertyFirmwareVersion Usage_i16(0x30c)
+#define Usage_Sen_PropertyReleaseDate Usage_i16(0x30d)
+#define Usage_Sen_PropertyReportInterval Usage_i16(0x30e)
+#define Usage_Sen_PropertyChangeSensitivityAbsolute Usage_i16(0x30f)
+#define Usage_Sen_PropertyChangeSensitivityPercentofRange Usage_i16(0x310)
+#define Usage_Sen_PropertyChangeSensitivityPercentRelative Usage_i16(0x311)
+#define Usage_Sen_PropertyAccuracy Usage_i16(0x312)
+#define Usage_Sen_PropertyResolution Usage_i16(0x313)
+#define Usage_Sen_PropertyMaximum Usage_i16(0x314)
+#define Usage_Sen_PropertyMinimum Usage_i16(0x315)
+#define Usage_Sen_PropertyReportingState Usage_i16(0x316)
+#define Usage_Sen_PropertySamplingRate Usage_i16(0x317)
+#define Usage_Sen_PropertyResponseCurve Usage_i16(0x318)
+#define Usage_Sen_PropertyPowerState Usage_i16(0x319)
+#define Usage_Sen_PropertyMaximumFIFOEvents Usage_i16(0x31a)
+#define Usage_Sen_PropertyReportLatency Usage_i16(0x31b)
+#define Usage_Sen_PropertyFlushFIFOEvents Usage_i16(0x31c)
+#define Usage_Sen_PropertyMaximumPowerConsumption Usage_i16(0x31d)
+#define Usage_Sen_PropertyIsPrimary Usage_i16(0x31e)
+#define Usage_Sen_PropertyHumanPresenceDetectionType Usage_i16(0x31f)
+#define Usage_Sen_DataFieldLocation Usage_i16(0x400)
+#define Usage_Sen_DataFieldAltitudeAntennaSeaLevel Usage_i16(0x402)
+#define Usage_Sen_DataFieldDifferentialReferenceStationID Usage_i16(0x403)
+#define Usage_Sen_DataFieldAltitudeEllipsoidError Usage_i16(0x404)
+#define Usage_Sen_DataFieldAltitudeEllipsoid Usage_i16(0x405)
+#define Usage_Sen_DataFieldAltitudeSeaLevelError Usage_i16(0x406)
+#define Usage_Sen_DataFieldAltitudeSeaLevel Usage_i16(0x407)
+#define Usage_Sen_DataFieldDifferentialGPSDataAge Usage_i16(0x408)
+#define Usage_Sen_DataFieldErrorRadius Usage_i16(0x409)
+#define Usage_Sen_DataFieldFixQuality Usage_i16(0x40a)
+#define Usage_Sen_DataFieldFixType Usage_i16(0x40b)
+#define Usage_Sen_DataFieldGeoidalSeparation Usage_i16(0x40c)
+#define Usage_Sen_DataFieldGPSOperationMode Usage_i16(0x40d)
+#define Usage_Sen_DataFieldGPSSelectionMode Usage_i16(0x40e)
+#define Usage_Sen_DataFieldGPSStatus Usage_i16(0x40f)
+#define Usage_Sen_DataFieldPositionDilutionofPrecision Usage_i16(0x410)
+#define Usage_Sen_DataFieldHorizontalDilutionofPrecision Usage_i16(0x411)
+#define Usage_Sen_DataFieldVerticalDilutionofPrecision Usage_i16(0x412)
+#define Usage_Sen_DataFieldLatitude Usage_i16(0x413)
+#define Usage_Sen_DataFieldLongitude Usage_i16(0x414)
+#define Usage_Sen_DataFieldTrueHeading Usage_i16(0x415)
+#define Usage_Sen_DataFieldMagneticHeading Usage_i16(0x416)
+#define Usage_Sen_DataFieldMagneticVariation Usage_i16(0x417)
+#define Usage_Sen_DataFieldSpeed Usage_i16(0x418)
+#define Usage_Sen_DataFieldSatellitesinView Usage_i16(0x419)
+#define Usage_Sen_DataFieldSatellitesinViewAzimuth Usage_i16(0x41a)
+#define Usage_Sen_DataFieldSatellitesinViewElevation Usage_i16(0x41b)
+#define Usage_Sen_DataFieldSatellitesinViewIDs Usage_i16(0x41c)
+#define Usage_Sen_DataFieldSatellitesinViewPRNs Usage_i16(0x41d)
+#define Usage_Sen_DataFieldSatellitesinViewSNRatios Usage_i16(0x41e)
+#define Usage_Sen_DataFieldSatellitesUsedCount Usage_i16(0x41f)
+#define Usage_Sen_DataFieldSatellitesUsedPRNs Usage_i16(0x420)
+#define Usage_Sen_DataFieldNMEASentence Usage_i16(0x421)
+#define Usage_Sen_DataFieldAddressLine1 Usage_i16(0x422)
+#define Usage_Sen_DataFieldAddressLine2 Usage_i16(0x423)
+#define Usage_Sen_DataFieldCity Usage_i16(0x424)
+#define Usage_Sen_DataFieldStateorProvince Usage_i16(0x425)
+#define Usage_Sen_DataFieldCountryorRegion Usage_i16(0x426)
+#define Usage_Sen_DataFieldPostalCode Usage_i16(0x427)
+#define Usage_Sen_PropertyLocation Usage_i16(0x42a)
+#define Usage_Sen_PropertyLocationDesiredAccuracy Usage_i16(0x42b)
+#define Usage_Sen_DataFieldEnvironmental Usage_i16(0x430)
+#define Usage_Sen_DataFieldAtmosphericPressure Usage_i16(0x431)
+#define Usage_Sen_DataFieldRelativeHumidity Usage_i16(0x433)
+#define Usage_Sen_DataFieldTemperature Usage_i16(0x434)
+#define Usage_Sen_DataFieldWindDirection Usage_i16(0x435)
+#define Usage_Sen_DataFieldWindSpeed Usage_i16(0x436)
+#define Usage_Sen_DataFieldAirQualityIndex Usage_i16(0x437)
+#define Usage_Sen_DataFieldEquivalentCO2 Usage_i16(0x438)
+#define Usage_Sen_DataFieldVolatileOrganicCompoundConcentration Usage_i16(0x439)
+#define Usage_Sen_DataFieldObjectPresence Usage_i16(0x43a)
+#define Usage_Sen_DataFieldObjectProximityRange Usage_i16(0x43b)
+#define Usage_Sen_DataFieldObjectProximityOutofRange Usage_i16(0x43c)
+#define Usage_Sen_PropertyEnvironmental Usage_i16(0x440)
+#define Usage_Sen_PropertyReferencePressure Usage_i16(0x441)
+#define Usage_Sen_DataFieldMotion Usage_i16(0x450)
+#define Usage_Sen_DataFieldMotionState Usage_i16(0x451)
+#define Usage_Sen_DataFieldAcceleration Usage_i16(0x452)
+#define Usage_Sen_DataFieldAccelerationAxisX Usage_i16(0x453)
+#define Usage_Sen_DataFieldAccelerationAxisY Usage_i16(0x454)
+#define Usage_Sen_DataFieldAccelerationAxisZ Usage_i16(0x455)
+#define Usage_Sen_DataFieldAngularVelocity Usage_i16(0x456)
+#define Usage_Sen_DataFieldAngularVelocityaboutXAxis Usage_i16(0x457)
+#define Usage_Sen_DataFieldAngularVelocityaboutYAxis Usage_i16(0x458)
+#define Usage_Sen_DataFieldAngularVelocityaboutZAxis Usage_i16(0x459)
+#define Usage_Sen_DataFieldAngularPosition Usage_i16(0x45a)
+#define Usage_Sen_DataFieldAngularPositionaboutXAxis Usage_i16(0x45b)
+#define Usage_Sen_DataFieldAngularPositionaboutYAxis Usage_i16(0x45c)
+#define Usage_Sen_DataFieldAngularPositionaboutZAxis Usage_i16(0x45d)
+#define Usage_Sen_DataFieldMotionSpeed Usage_i16(0x45e)
+#define Usage_Sen_DataFieldMotionIntensity Usage_i16(0x45f)
+#define Usage_Sen_DataFieldOrientation Usage_i16(0x470)
+#define Usage_Sen_DataFieldHeading Usage_i16(0x471)
+#define Usage_Sen_DataFieldHeadingXAxis Usage_i16(0x472)
+#define Usage_Sen_DataFieldHeadingYAxis Usage_i16(0x473)
+#define Usage_Sen_DataFieldHeadingZAxis Usage_i16(0x474)
+#define Usage_Sen_DataFieldHeadingCompensatedMagneticNorth Usage_i16(0x475)
+#define Usage_Sen_DataFieldHeadingCompensatedTrueNorth Usage_i16(0x476)
+#define Usage_Sen_DataFieldHeadingMagneticNorth Usage_i16(0x477)
+#define Usage_Sen_DataFieldHeadingTrueNorth Usage_i16(0x478)
+#define Usage_Sen_DataFieldDistance Usage_i16(0x479)
+#define Usage_Sen_DataFieldDistanceXAxis Usage_i16(0x47a)
+#define Usage_Sen_DataFieldDistanceYAxis Usage_i16(0x47b)
+#define Usage_Sen_DataFieldDistanceZAxis Usage_i16(0x47c)
+#define Usage_Sen_DataFieldDistanceOutofRange Usage_i16(0x47d)
+#define Usage_Sen_DataFieldTilt Usage_i16(0x47e)
+#define Usage_Sen_DataFieldTiltXAxis Usage_i16(0x47f)
+#define Usage_Sen_DataFieldTiltYAxis Usage_i16(0x480)
+#define Usage_Sen_DataFieldTiltZAxis Usage_i16(0x481)
+#define Usage_Sen_DataFieldRotationMatrix Usage_i16(0x482)
+#define Usage_Sen_DataFieldQuaternion Usage_i16(0x483)
+#define Usage_Sen_DataFieldMagneticFlux Usage_i16(0x484)
+#define Usage_Sen_DataFieldMagneticFluxXAxis Usage_i16(0x485)
+#define Usage_Sen_DataFieldMagneticFluxYAxis Usage_i16(0x486)
+#define Usage_Sen_DataFieldMagneticFluxZAxis Usage_i16(0x487)
+#define Usage_Sen_DataFieldMagnetometerAccuracy Usage_i16(0x488)
+#define Usage_Sen_DataFieldSimpleOrientationDirection Usage_i16(0x489)
+#define Usage_Sen_DataFieldMechanical Usage_i16(0x490)
+#define Usage_Sen_DataFieldBooleanSwitchState Usage_i16(0x491)
+#define Usage_Sen_DataFieldBooleanSwitchArrayStates Usage_i16(0x492)
+#define Usage_Sen_DataFieldMultivalueSwitchValue Usage_i16(0x493)
+#define Usage_Sen_DataFieldForce Usage_i16(0x494)
+#define Usage_Sen_DataFieldAbsolutePressure Usage_i16(0x495)
+#define Usage_Sen_DataFieldGaugePressure Usage_i16(0x496)
+#define Usage_Sen_DataFieldStrain Usage_i16(0x497)
+#define Usage_Sen_DataFieldWeight Usage_i16(0x498)
+#define Usage_Sen_PropertyMechanical Usage_i16(0x4a0)
+#define Usage_Sen_PropertyVibrationState Usage_i16(0x4a1)
+#define Usage_Sen_PropertyForwardVibrationSpeed Usage_i16(0x4a2)
+#define Usage_Sen_PropertyBackwardVibrationSpeed Usage_i16(0x4a3)
+#define Usage_Sen_DataFieldBiometric Usage_i16(0x4b0)
+#define Usage_Sen_DataFieldHumanPresence Usage_i16(0x4b1)
+#define Usage_Sen_DataFieldHumanProximityRange Usage_i16(0x4b2)
+#define Usage_Sen_DataFieldHumanProximityOutofRange Usage_i16(0x4b3)
+#define Usage_Sen_DataFieldHumanTouchState Usage_i16(0x4b4)
+#define Usage_Sen_DataFieldBloodPressure Usage_i16(0x4b5)
+#define Usage_Sen_DataFieldBloodPressureDiastolic Usage_i16(0x4b6)
+#define Usage_Sen_DataFieldBloodPressureSystolic Usage_i16(0x4b7)
+#define Usage_Sen_DataFieldHeartRate Usage_i16(0x4b8)
+#define Usage_Sen_DataFieldRestingHeartRate Usage_i16(0x4b9)
+#define Usage_Sen_DataFieldHeartbeatInterval Usage_i16(0x4ba)
+#define Usage_Sen_DataFieldRespiratoryRate Usage_i16(0x4bb)
+#define Usage_Sen_DataFieldSpO2 Usage_i16(0x4bc)
+#define Usage_Sen_DataFieldHumanAttentionDetected Usage_i16(0x4bd)
+#define Usage_Sen_DataFieldHumanHeadAzimuth Usage_i16(0x4be)
+#define Usage_Sen_DataFieldHumanHeadAltitude Usage_i16(0x4bf)
+#define Usage_Sen_DataFieldHumanHeadRoll Usage_i16(0x4c0)
+#define Usage_Sen_DataFieldHumanHeadPitch Usage_i16(0x4c1)
+#define Usage_Sen_DataFieldHumanHeadYaw Usage_i16(0x4c2)
+#define Usage_Sen_DataFieldHumanCorrelationId Usage_i16(0x4c3)
+#define Usage_Sen_DataFieldLight Usage_i16(0x4d0)
+#define Usage_Sen_DataFieldIlluminance Usage_i16(0x4d1)
+#define Usage_Sen_DataFieldColorTemperature Usage_i16(0x4d2)
+#define Usage_Sen_DataFieldChromaticity Usage_i16(0x4d3)
+#define Usage_Sen_DataFieldChromaticityX Usage_i16(0x4d4)
+#define Usage_Sen_DataFieldChromaticityY Usage_i16(0x4d5)
+#define Usage_Sen_DataFieldConsumerIRSentenceReceive Usage_i16(0x4d6)
+#define Usage_Sen_DataFieldInfraredLight Usage_i16(0x4d7)
+#define Usage_Sen_DataFieldRedLight Usage_i16(0x4d8)
+#define Usage_Sen_DataFieldGreenLight Usage_i16(0x4d9)
+#define Usage_Sen_DataFieldBlueLight Usage_i16(0x4da)
+#define Usage_Sen_DataFieldUltravioletALight Usage_i16(0x4db)
+#define Usage_Sen_DataFieldUltravioletBLight Usage_i16(0x4dc)
+#define Usage_Sen_DataFieldUltravioletIndex Usage_i16(0x4dd)
+#define Usage_Sen_DataFieldNearInfraredLight Usage_i16(0x4de)
+#define Usage_Sen_PropertyLight Usage_i16(0x4df)
+#define Usage_Sen_PropertyConsumerIRSentenceSend Usage_i16(0x4e0)
+#define Usage_Sen_PropertyAutoBrightnessPreferred Usage_i16(0x4e2)
+#define Usage_Sen_PropertyAutoColorPreferred Usage_i16(0x4e3)
+#define Usage_Sen_DataFieldScanner Usage_i16(0x4f0)
+#define Usage_Sen_DataFieldRFIDTag40Bit Usage_i16(0x4f1)
+#define Usage_Sen_DataFieldNFCSentenceReceive Usage_i16(0x4f2)
+#define Usage_Sen_PropertyScanner Usage_i16(0x4f8)
+#define Usage_Sen_PropertyNFCSentenceSend Usage_i16(0x4f9)
+#define Usage_Sen_DataFieldElectrical Usage_i16(0x500)
+#define Usage_Sen_DataFieldCapacitance Usage_i16(0x501)
+#define Usage_Sen_DataFieldCurrent Usage_i16(0x502)
+#define Usage_Sen_DataFieldElectricalPower Usage_i16(0x503)
+#define Usage_Sen_DataFieldInductance Usage_i16(0x504)
+#define Usage_Sen_DataFieldResistance Usage_i16(0x505)
+#define Usage_Sen_DataFieldVoltage Usage_i16(0x506)
+#define Usage_Sen_DataFieldFrequency Usage_i16(0x507)
+#define Usage_Sen_DataFieldPeriod Usage_i16(0x508)
+#define Usage_Sen_DataFieldPercentofRange Usage_i16(0x509)
+#define Usage_Sen_DataFieldTime Usage_i16(0x520)
+#define Usage_Sen_DataFieldYear Usage_i16(0x521)
+#define Usage_Sen_DataFieldMonth Usage_i16(0x522)
+#define Usage_Sen_DataFieldDay Usage_i16(0x523)
+#define Usage_Sen_DataFieldDayofWeek Usage_i16(0x524)
+#define Usage_Sen_DataFieldHour Usage_i16(0x525)
+#define Usage_Sen_DataFieldMinute Usage_i16(0x526)
+#define Usage_Sen_DataFieldSecond Usage_i16(0x527)
+#define Usage_Sen_DataFieldMillisecond Usage_i16(0x528)
+#define Usage_Sen_DataFieldTimestamp Usage_i16(0x529)
+#define Usage_Sen_DataFieldJulianDayofYear Usage_i16(0x52a)
+#define Usage_Sen_DataFieldTimeSinceSystemBoot Usage_i16(0x52b)
+#define Usage_Sen_PropertyTime Usage_i16(0x530)
+#define Usage_Sen_PropertyTimeZoneOffsetfromUTC Usage_i16(0x531)
+#define Usage_Sen_PropertyTimeZoneName Usage_i16(0x532)
+#define Usage_Sen_PropertyDaylightSavingsTimeObserved Usage_i16(0x533)
+#define Usage_Sen_PropertyTimeTrimAdjustment Usage_i16(0x534)
+#define Usage_Sen_PropertyArmAlarm Usage_i16(0x535)
+#define Usage_Sen_DataFieldCustom Usage_i16(0x540)
+#define Usage_Sen_DataFieldCustomUsage Usage_i16(0x541)
+#define Usage_Sen_DataFieldCustomBooleanArray Usage_i16(0x542)
+#define Usage_Sen_DataFieldCustomValue Usage_i16(0x543)
+#define Usage_Sen_DataFieldCustomValue1 Usage_i16(0x544)
+#define Usage_Sen_DataFieldCustomValue2 Usage_i16(0x545)
+#define Usage_Sen_DataFieldCustomValue3 Usage_i16(0x546)
+#define Usage_Sen_DataFieldCustomValue4 Usage_i16(0x547)
+#define Usage_Sen_DataFieldCustomValue5 Usage_i16(0x548)
+#define Usage_Sen_DataFieldCustomValue6 Usage_i16(0x549)
+#define Usage_Sen_DataFieldCustomValue7 Usage_i16(0x54a)
+#define Usage_Sen_DataFieldCustomValue8 Usage_i16(0x54b)
+#define Usage_Sen_DataFieldCustomValue9 Usage_i16(0x54c)
+#define Usage_Sen_DataFieldCustomValue10 Usage_i16(0x54d)
+#define Usage_Sen_DataFieldCustomValue11 Usage_i16(0x54e)
+#define Usage_Sen_DataFieldCustomValue12 Usage_i16(0x54f)
+#define Usage_Sen_DataFieldCustomValue13 Usage_i16(0x550)
+#define Usage_Sen_DataFieldCustomValue14 Usage_i16(0x551)
+#define Usage_Sen_DataFieldCustomValue15 Usage_i16(0x552)
+#define Usage_Sen_DataFieldCustomValue16 Usage_i16(0x553)
+#define Usage_Sen_DataFieldCustomValue17 Usage_i16(0x554)
+#define Usage_Sen_DataFieldCustomValue18 Usage_i16(0x555)
+#define Usage_Sen_DataFieldCustomValue19 Usage_i16(0x556)
+#define Usage_Sen_DataFieldCustomValue20 Usage_i16(0x557)
+#define Usage_Sen_DataFieldCustomValue21 Usage_i16(0x558)
+#define Usage_Sen_DataFieldCustomValue22 Usage_i16(0x559)
+#define Usage_Sen_DataFieldCustomValue23 Usage_i16(0x55a)
+#define Usage_Sen_DataFieldCustomValue24 Usage_i16(0x55b)
+#define Usage_Sen_DataFieldCustomValue25 Usage_i16(0x55c)
+#define Usage_Sen_DataFieldCustomValue26 Usage_i16(0x55d)
+#define Usage_Sen_DataFieldCustomValue27 Usage_i16(0x55e)
+#define Usage_Sen_DataFieldCustomValue28 Usage_i16(0x55f)
+#define Usage_Sen_DataFieldGeneric Usage_i16(0x560)
+#define Usage_Sen_DataFieldGenericGUIDorPROPERTYKEY Usage_i16(0x561)
+#define Usage_Sen_DataFieldGenericCategoryGUID Usage_i16(0x562)
+#define Usage_Sen_DataFieldGenericTypeGUID Usage_i16(0x563)
+#define Usage_Sen_DataFieldGenericEventPROPERTYKEY Usage_i16(0x564)
+#define Usage_Sen_DataFieldGenericPropertyPROPERTYKEY Usage_i16(0x565)
+#define Usage_Sen_DataFieldGenericDataFieldPROPERTYKEY Usage_i16(0x566)
+#define Usage_Sen_DataFieldGenericEvent Usage_i16(0x567)
+#define Usage_Sen_DataFieldGenericProperty Usage_i16(0x568)
+#define Usage_Sen_DataFieldGenericDataField Usage_i16(0x569)
+#define Usage_Sen_DataFieldEnumeratorTableRowIndex Usage_i16(0x56a)
+#define Usage_Sen_DataFieldEnumeratorTableRowCount Usage_i16(0x56b)
+#define Usage_Sen_DataFieldGenericGUIDorPROPERTYKEYkind Usage_i16(0x56c)
+#define Usage_Sen_DataFieldGenericGUID Usage_i16(0x56d)
+#define Usage_Sen_DataFieldGenericPROPERTYKEY Usage_i16(0x56e)
+#define Usage_Sen_DataFieldGenericTopLevelCollectionID Usage_i16(0x56f)
+#define Usage_Sen_DataFieldGenericReportID Usage_i16(0x570)
+#define Usage_Sen_DataFieldGenericReportItemPositionIndex Usage_i16(0x571)
+#define Usage_Sen_DataFieldGenericFirmwareVARTYPE Usage_i16(0x572)
+#define Usage_Sen_DataFieldGenericUnitofMeasure Usage_i16(0x573)
+#define Usage_Sen_DataFieldGenericUnitExponent Usage_i16(0x574)
+#define Usage_Sen_DataFieldGenericReportSize Usage_i16(0x575)
+#define Usage_Sen_DataFieldGenericReportCount Usage_i16(0x576)
+#define Usage_Sen_PropertyGeneric Usage_i16(0x580)
+#define Usage_Sen_PropertyEnumeratorTableRowIndex Usage_i16(0x581)
+#define Usage_Sen_PropertyEnumeratorTableRowCount Usage_i16(0x582)
+#define Usage_Sen_DataFieldPersonalActivity Usage_i16(0x590)
+#define Usage_Sen_DataFieldActivityType Usage_i16(0x591)
+#define Usage_Sen_DataFieldActivityState Usage_i16(0x592)
+#define Usage_Sen_DataFieldDevicePosition Usage_i16(0x593)
+#define Usage_Sen_DataFieldStepCount Usage_i16(0x594)
+#define Usage_Sen_DataFieldStepCountReset Usage_i16(0x595)
+#define Usage_Sen_DataFieldStepDuration Usage_i16(0x596)
+#define Usage_Sen_DataFieldStepType Usage_i16(0x597)
+#define Usage_Sen_PropertyMinimumActivityDetectionInterval Usage_i16(0x5a0)
+#define Usage_Sen_PropertySupportedActivityTypes Usage_i16(0x5a1)
+#define Usage_Sen_PropertySubscribedActivityTypes Usage_i16(0x5a2)
+#define Usage_Sen_PropertySupportedStepTypes Usage_i16(0x5a3)
+#define Usage_Sen_PropertySubscribedStepTypes Usage_i16(0x5a4)
+#define Usage_Sen_PropertyFloorHeight Usage_i16(0x5a5)
+#define Usage_Sen_DataFieldCustomTypeID Usage_i16(0x5b0)
+#define Usage_Sen_PropertyCustom Usage_i16(0x5c0)
+#define Usage_Sen_PropertyCustomValue1 Usage_i16(0x5c1)
+#define Usage_Sen_PropertyCustomValue2 Usage_i16(0x5c2)
+#define Usage_Sen_PropertyCustomValue3 Usage_i16(0x5c3)
+#define Usage_Sen_PropertyCustomValue4 Usage_i16(0x5c4)
+#define Usage_Sen_PropertyCustomValue5 Usage_i16(0x5c5)
+#define Usage_Sen_PropertyCustomValue6 Usage_i16(0x5c6)
+#define Usage_Sen_PropertyCustomValue7 Usage_i16(0x5c7)
+#define Usage_Sen_PropertyCustomValue8 Usage_i16(0x5c8)
+#define Usage_Sen_PropertyCustomValue9 Usage_i16(0x5c9)
+#define Usage_Sen_PropertyCustomValue10 Usage_i16(0x5ca)
+#define Usage_Sen_PropertyCustomValue11 Usage_i16(0x5cb)
+#define Usage_Sen_PropertyCustomValue12 Usage_i16(0x5cc)
+#define Usage_Sen_PropertyCustomValue13 Usage_i16(0x5cd)
+#define Usage_Sen_PropertyCustomValue14 Usage_i16(0x5ce)
+#define Usage_Sen_PropertyCustomValue15 Usage_i16(0x5cf)
+#define Usage_Sen_PropertyCustomValue16 Usage_i16(0x5d0)
+#define Usage_Sen_DataFieldHinge Usage_i16(0x5e0)
+#define Usage_Sen_DataFieldHingeAngle Usage_i16(0x5e1)
+#define Usage_Sen_DataFieldGestureSensor Usage_i16(0x5f0)
+#define Usage_Sen_DataFieldGestureState Usage_i16(0x5f1)
+#define Usage_Sen_DataFieldHingeFoldInitialAngle Usage_i16(0x5f2)
+#define Usage_Sen_DataFieldHingeFoldFinalAngle Usage_i16(0x5f3)
+#define Usage_Sen_DataFieldHingeFoldContributingPanel Usage_i16(0x5f4)
+#define Usage_Sen_DataFieldHingeFoldType Usage_i16(0x5f5)
+#define Usage_Sen_SensorStateUndefined Usage_i16(0x800)
+#define Usage_Sen_SensorStateReady Usage_i16(0x801)
+#define Usage_Sen_SensorStateNotAvailable Usage_i16(0x802)
+#define Usage_Sen_SensorStateNoData Usage_i16(0x803)
+#define Usage_Sen_SensorStateInitializing Usage_i16(0x804)
+#define Usage_Sen_SensorStateAccessDenied Usage_i16(0x805)
+#define Usage_Sen_SensorStateError Usage_i16(0x806)
+#define Usage_Sen_SensorEventUnknown Usage_i16(0x810)
+#define Usage_Sen_SensorEventStateChanged Usage_i16(0x811)
+#define Usage_Sen_SensorEventPropertyChanged Usage_i16(0x812)
+#define Usage_Sen_SensorEventDataUpdated Usage_i16(0x813)
+#define Usage_Sen_SensorEventPollResponse Usage_i16(0x814)
+#define Usage_Sen_SensorEventChangeSensitivity Usage_i16(0x815)
+#define Usage_Sen_SensorEventRangeMaximumReached Usage_i16(0x816)
+#define Usage_Sen_SensorEventRangeMinimumReached Usage_i16(0x817)
+#define Usage_Sen_SensorEventHighThresholdCrossUpward Usage_i16(0x818)
+#define Usage_Sen_SensorEventHighThresholdCrossDownward Usage_i16(0x819)
+#define Usage_Sen_SensorEventLowThresholdCrossUpward Usage_i16(0x81a)
+#define Usage_Sen_SensorEventLowThresholdCrossDownward Usage_i16(0x81b)
+#define Usage_Sen_SensorEventZeroThresholdCrossUpward Usage_i16(0x81c)
+#define Usage_Sen_SensorEventZeroThresholdCrossDownward Usage_i16(0x81d)
+#define Usage_Sen_SensorEventPeriodExceeded Usage_i16(0x81e)
+#define Usage_Sen_SensorEventFrequencyExceeded Usage_i16(0x81f)
+#define Usage_Sen_SensorEventComplexTrigger Usage_i16(0x820)
+#define Usage_Sen_ConnectionTypePCIntegrated Usage_i16(0x830)
+#define Usage_Sen_ConnectionTypePCAttached Usage_i16(0x831)
+#define Usage_Sen_ConnectionTypePCExternal Usage_i16(0x832)
+#define Usage_Sen_ReportingStateReportNoEvents Usage_i16(0x840)
+#define Usage_Sen_ReportingStateReportAllEvents Usage_i16(0x841)
+#define Usage_Sen_ReportingStateReportThresholdEvents Usage_i16(0x842)
+#define Usage_Sen_ReportingStateWakeOnNoEvents Usage_i16(0x843)
+#define Usage_Sen_ReportingStateWakeOnAllEvents Usage_i16(0x844)
+#define Usage_Sen_ReportingStateWakeOnThresholdEvents Usage_i16(0x845)
+#define Usage_Sen_ReportingStateAnytime Usage_i16(0x846)
+#define Usage_Sen_PowerStateUndefined Usage_i16(0x850)
+#define Usage_Sen_PowerStateD0FullPower Usage_i16(0x851)
+#define Usage_Sen_PowerStateD1LowPower Usage_i16(0x852)
+#define Usage_Sen_PowerStateD2StandbyPowerwithWakeup Usage_i16(0x853)
+#define Usage_Sen_PowerStateD3SleepwithWakeup Usage_i16(0x854)
+#define Usage_Sen_PowerStateD4PowerOff Usage_i16(0x855)
+#define Usage_Sen_AccuracyDefault Usage_i16(0x860)
+#define Usage_Sen_AccuracyHigh Usage_i16(0x861)
+#define Usage_Sen_AccuracyMedium Usage_i16(0x862)
+#define Usage_Sen_AccuracyLow Usage_i16(0x863)
+#define Usage_Sen_FixQualityNoFix Usage_i16(0x870)
+#define Usage_Sen_FixQualityGPS Usage_i16(0x871)
+#define Usage_Sen_FixQualityDGPS Usage_i16(0x872)
+#define Usage_Sen_FixTypeNoFix Usage_i16(0x880)
+#define Usage_Sen_FixTypeGPSSPSModeFixValid Usage_i16(0x881)
+#define Usage_Sen_FixTypeDGPSSPSModeFixValid Usage_i16(0x882)
+#define Usage_Sen_FixTypeGPSPPSModeFixValid Usage_i16(0x883)
+#define Usage_Sen_FixTypeRealTimeKinematic Usage_i16(0x884)
+#define Usage_Sen_FixTypeFloatRTK Usage_i16(0x885)
+#define Usage_Sen_FixTypeEstimateddeadreckoned Usage_i16(0x886)
+#define Usage_Sen_FixTypeManualInputMode Usage_i16(0x887)
+#define Usage_Sen_FixTypeSimulatorMode Usage_i16(0x888)
+#define Usage_Sen_GPSOperationModeManual Usage_i16(0x890)
+#define Usage_Sen_GPSOperationModeAutomatic Usage_i16(0x891)
+#define Usage_Sen_GPSSelectionModeAutonomous Usage_i16(0x8a0)
+#define Usage_Sen_GPSSelectionModeDGPS Usage_i16(0x8a1)
+#define Usage_Sen_GPSSelectionModeEstimateddeadreckoned Usage_i16(0x8a2)
+#define Usage_Sen_GPSSelectionModeManualInput Usage_i16(0x8a3)
+#define Usage_Sen_GPSSelectionModeSimulator Usage_i16(0x8a4)
+#define Usage_Sen_GPSSelectionModeDataNotValid Usage_i16(0x8a5)
+#define Usage_Sen_GPSStatusDataValid Usage_i16(0x8b0)
+#define Usage_Sen_GPSStatusDataNotValid Usage_i16(0x8b1)
+#define Usage_Sen_DayofWeekSunday Usage_i16(0x8c0)
+#define Usage_Sen_DayofWeekMonday Usage_i16(0x8c1)
+#define Usage_Sen_DayofWeekTuesday Usage_i16(0x8c2)
+#define Usage_Sen_DayofWeekWednesday Usage_i16(0x8c3)
+#define Usage_Sen_DayofWeekThursday Usage_i16(0x8c4)
+#define Usage_Sen_DayofWeekFriday Usage_i16(0x8c5)
+#define Usage_Sen_DayofWeekSaturday Usage_i16(0x8c6)
+#define Usage_Sen_KindCategory Usage_i16(0x8d0)
+#define Usage_Sen_KindType Usage_i16(0x8d1)
+#define Usage_Sen_KindEvent Usage_i16(0x8d2)
+#define Usage_Sen_KindProperty Usage_i16(0x8d3)
+#define Usage_Sen_KindDataField Usage_i16(0x8d4)
+#define Usage_Sen_MagnetometerAccuracyLow Usage_i16(0x8e0)
+#define Usage_Sen_MagnetometerAccuracyMedium Usage_i16(0x8e1)
+#define Usage_Sen_MagnetometerAccuracyHigh Usage_i16(0x8e2)
+#define Usage_Sen_SimpleOrientationDirectionNotRotated Usage_i16(0x8f0)
+#define Usage_Sen_SimpleOrientationDirectionRotated90DegreesCCW Usage_i16(0x8f1)
+#define Usage_Sen_SimpleOrientationDirectionRotated180DegreesCCW Usage_i16(0x8f2)
+#define Usage_Sen_SimpleOrientationDirectionRotated270DegreesCCW Usage_i16(0x8f3)
+#define Usage_Sen_SimpleOrientationDirectionFaceUp Usage_i16(0x8f4)
+#define Usage_Sen_SimpleOrientationDirectionFaceDown Usage_i16(0x8f5)
+#define Usage_Sen_VT_NULL Usage_i16(0x900)
+#define Usage_Sen_VT_BOOL Usage_i16(0x901)
+#define Usage_Sen_VT_UI1 Usage_i16(0x902)
+#define Usage_Sen_VT_I1 Usage_i16(0x903)
+#define Usage_Sen_VT_UI2 Usage_i16(0x904)
+#define Usage_Sen_VT_I2 Usage_i16(0x905)
+#define Usage_Sen_VT_UI4 Usage_i16(0x906)
+#define Usage_Sen_VT_I4 Usage_i16(0x907)
+#define Usage_Sen_VT_UI8 Usage_i16(0x908)
+#define Usage_Sen_VT_I8 Usage_i16(0x909)
+#define Usage_Sen_VT_R4 Usage_i16(0x90a)
+#define Usage_Sen_VT_R8 Usage_i16(0x90b)
+#define Usage_Sen_VT_WSTR Usage_i16(0x90c)
+#define Usage_Sen_VT_STR Usage_i16(0x90d)
+#define Usage_Sen_VT_CLSID Usage_i16(0x90e)
+#define Usage_Sen_VT_VECTORVT_UI1 Usage_i16(0x90f)
+#define Usage_Sen_VT_F16E0 Usage_i16(0x910)
+#define Usage_Sen_VT_F16E1 Usage_i16(0x911)
+#define Usage_Sen_VT_F16E2 Usage_i16(0x912)
+#define Usage_Sen_VT_F16E3 Usage_i16(0x913)
+#define Usage_Sen_VT_F16E4 Usage_i16(0x914)
+#define Usage_Sen_VT_F16E5 Usage_i16(0x915)
+#define Usage_Sen_VT_F16E6 Usage_i16(0x916)
+#define Usage_Sen_VT_F16E7 Usage_i16(0x917)
+#define Usage_Sen_VT_F16E8 Usage_i16(0x918)
+#define Usage_Sen_VT_F16E9 Usage_i16(0x919)
+#define Usage_Sen_VT_F16EA Usage_i16(0x91a)
+#define Usage_Sen_VT_F16EB Usage_i16(0x91b)
+#define Usage_Sen_VT_F16EC Usage_i16(0x91c)
+#define Usage_Sen_VT_F16ED Usage_i16(0x91d)
+#define Usage_Sen_VT_F16EE Usage_i16(0x91e)
+#define Usage_Sen_VT_F16EF Usage_i16(0x91f)
+#define Usage_Sen_VT_F32E0 Usage_i16(0x920)
+#define Usage_Sen_VT_F32E1 Usage_i16(0x921)
+#define Usage_Sen_VT_F32E2 Usage_i16(0x922)
+#define Usage_Sen_VT_F32E3 Usage_i16(0x923)
+#define Usage_Sen_VT_F32E4 Usage_i16(0x924)
+#define Usage_Sen_VT_F32E5 Usage_i16(0x925)
+#define Usage_Sen_VT_F32E6 Usage_i16(0x926)
+#define Usage_Sen_VT_F32E7 Usage_i16(0x927)
+#define Usage_Sen_VT_F32E8 Usage_i16(0x928)
+#define Usage_Sen_VT_F32E9 Usage_i16(0x929)
+#define Usage_Sen_VT_F32EA Usage_i16(0x92a)
+#define Usage_Sen_VT_F32EB Usage_i16(0x92b)
+#define Usage_Sen_VT_F32EC Usage_i16(0x92c)
+#define Usage_Sen_VT_F32ED Usage_i16(0x92d)
+#define Usage_Sen_VT_F32EE Usage_i16(0x92e)
+#define Usage_Sen_VT_F32EF Usage_i16(0x92f)
+#define Usage_Sen_ActivityTypeUnknown Usage_i16(0x930)
+#define Usage_Sen_ActivityTypeStationary Usage_i16(0x931)
+#define Usage_Sen_ActivityTypeFidgeting Usage_i16(0x932)
+#define Usage_Sen_ActivityTypeWalking Usage_i16(0x933)
+#define Usage_Sen_ActivityTypeRunning Usage_i16(0x934)
+#define Usage_Sen_ActivityTypeInVehicle Usage_i16(0x935)
+#define Usage_Sen_ActivityTypeBiking Usage_i16(0x936)
+#define Usage_Sen_ActivityTypeIdle Usage_i16(0x937)
+#define Usage_Sen_UnitNotSpecified Usage_i16(0x940)
+#define Usage_Sen_UnitLux Usage_i16(0x941)
+#define Usage_Sen_UnitDegreesKelvin Usage_i16(0x942)
+#define Usage_Sen_UnitDegreesCelsius Usage_i16(0x943)
+#define Usage_Sen_UnitPascal Usage_i16(0x944)
+#define Usage_Sen_UnitNewton Usage_i16(0x945)
+#define Usage_Sen_UnitMetersSecond Usage_i16(0x946)
+#define Usage_Sen_UnitKilogram Usage_i16(0x947)
+#define Usage_Sen_UnitMeter Usage_i16(0x948)
+#define Usage_Sen_UnitMetersSecondSecond Usage_i16(0x949)
+#define Usage_Sen_UnitFarad Usage_i16(0x94a)
+#define Usage_Sen_UnitAmpere Usage_i16(0x94b)
+#define Usage_Sen_UnitWatt Usage_i16(0x94c)
+#define Usage_Sen_UnitHenry Usage_i16(0x94d)
+#define Usage_Sen_UnitOhm Usage_i16(0x94e)
+#define Usage_Sen_UnitVolt Usage_i16(0x94f)
+#define Usage_Sen_UnitHertz Usage_i16(0x950)
+#define Usage_Sen_UnitBar Usage_i16(0x951)
+#define Usage_Sen_UnitDegreesAnticlockwise Usage_i16(0x952)
+#define Usage_Sen_UnitDegreesClockwise Usage_i16(0x953)
+#define Usage_Sen_UnitDegrees Usage_i16(0x954)
+#define Usage_Sen_UnitDegreesSecond Usage_i16(0x955)
+#define Usage_Sen_UnitDegreesSecondSecond Usage_i16(0x956)
+#define Usage_Sen_UnitKnot Usage_i16(0x957)
+#define Usage_Sen_UnitPercent Usage_i16(0x958)
+#define Usage_Sen_UnitSecond Usage_i16(0x959)
+#define Usage_Sen_UnitMillisecond Usage_i16(0x95a)
+#define Usage_Sen_UnitG Usage_i16(0x95b)
+#define Usage_Sen_UnitBytes Usage_i16(0x95c)
+#define Usage_Sen_UnitMilligauss Usage_i16(0x95d)
+#define Usage_Sen_UnitBits Usage_i16(0x95e)
+#define Usage_Sen_ActivityStateNoStateChange Usage_i16(0x960)
+#define Usage_Sen_ActivityStateStartActivity Usage_i16(0x961)
+#define Usage_Sen_ActivityStateEndActivity Usage_i16(0x962)
+#define Usage_Sen_Exponent0 Usage_i16(0x970)
+#define Usage_Sen_Exponent1 Usage_i16(0x971)
+#define Usage_Sen_Exponent2 Usage_i16(0x972)
+#define Usage_Sen_Exponent3 Usage_i16(0x973)
+#define Usage_Sen_Exponent4 Usage_i16(0x974)
+#define Usage_Sen_Exponent5 Usage_i16(0x975)
+#define Usage_Sen_Exponent6 Usage_i16(0x976)
+#define Usage_Sen_Exponent7 Usage_i16(0x977)
+#define Usage_Sen_Exponent8 Usage_i16(0x978)
+#define Usage_Sen_Exponent9 Usage_i16(0x979)
+#define Usage_Sen_ExponentA Usage_i16(0x97a)
+#define Usage_Sen_ExponentB Usage_i16(0x97b)
+#define Usage_Sen_ExponentC Usage_i16(0x97c)
+#define Usage_Sen_ExponentD Usage_i16(0x97d)
+#define Usage_Sen_ExponentE Usage_i16(0x97e)
+#define Usage_Sen_ExponentF Usage_i16(0x97f)
+#define Usage_Sen_DevicePositionUnknown Usage_i16(0x980)
+#define Usage_Sen_DevicePositionUnchanged Usage_i16(0x981)
+#define Usage_Sen_DevicePositionOnDesk Usage_i16(0x982)
+#define Usage_Sen_DevicePositionInHand Usage_i16(0x983)
+#define Usage_Sen_DevicePositionMovinginBag Usage_i16(0x984)
+#define Usage_Sen_DevicePositionStationaryinBag Usage_i16(0x985)
+#define Usage_Sen_StepTypeUnknown Usage_i16(0x990)
+#define Usage_Sen_StepTypeWalking Usage_i16(0x991)
+#define Usage_Sen_StepTypeRunning Usage_i16(0x992)
+#define Usage_Sen_GestureStateUnknown Usage_i16(0x9a0)
+#define Usage_Sen_GestureStateStarted Usage_i16(0x9a1)
+#define Usage_Sen_GestureStateCompleted Usage_i16(0x9a2)
+#define Usage_Sen_GestureStateCancelled Usage_i16(0x9a3)
+#define Usage_Sen_HingeFoldContributingPanelUnknown Usage_i16(0x9b0)
+#define Usage_Sen_HingeFoldContributingPanelPanel1 Usage_i16(0x9b1)
+#define Usage_Sen_HingeFoldContributingPanelPanel2 Usage_i16(0x9b2)
+#define Usage_Sen_HingeFoldContributingPanelBoth Usage_i16(0x9b3)
+#define Usage_Sen_HingeFoldTypeUnknown Usage_i16(0x9b4)
+#define Usage_Sen_HingeFoldTypeIncreasing Usage_i16(0x9b5)
+#define Usage_Sen_HingeFoldTypeDecreasing Usage_i16(0x9b6)
+#define Usage_Sen_HumanPresenceDetectionTypeVendorDefinedNonBiometric Usage_i16(0x9c0)
+#define Usage_Sen_HumanPresenceDetectionTypeVendorDefinedBiometric Usage_i16(0x9c1)
+#define Usage_Sen_HumanPresenceDetectionTypeFacialBiometric Usage_i16(0x9c2)
+#define Usage_Sen_HumanPresenceDetectionTypeAudioBiometric Usage_i16(0x9c3)
+#define Usage_Sen_ModifierChangeSensitivityAbsolute Usage_i16(0x1000)
+#define Usage_Sen_ModifierMaximum Usage_i16(0x2000)
+#define Usage_Sen_ModifierMinimum Usage_i16(0x3000)
+#define Usage_Sen_ModifierAccuracy Usage_i16(0x4000)
+#define Usage_Sen_ModifierResolution Usage_i16(0x5000)
+#define Usage_Sen_ModifierThresholdHigh Usage_i16(0x6000)
+#define Usage_Sen_ModifierThresholdLow Usage_i16(0x7000)
+#define Usage_Sen_ModifierCalibrationOffset Usage_i16(0x8000)
+#define Usage_Sen_ModifierCalibrationMultiplier Usage_i16(0x9000)
+#define Usage_Sen_ModifierReportInterval Usage_i16(0xa000)
+#define Usage_Sen_ModifierFrequencyMax Usage_i16(0xb000)
+#define Usage_Sen_ModifierPeriodMax Usage_i16(0xc000)
+#define Usage_Sen_ModifierChangeSensitivityPercentofRange Usage_i16(0xd000)
+#define Usage_Sen_ModifierChangeSensitivityPercentRelative Usage_i16(0xe000)
+#define Usage_Sen_ModifierVendorReserved Usage_i16(0xf000)
+#define Usage_MI_MedicalUltrasound Usage_i8(0x1)
+#define Usage_MI_VCRAcquisition Usage_i8(0x20)
+#define Usage_MI_FreezeThaw Usage_i8(0x21)
+#define Usage_MI_ClipStore Usage_i8(0x22)
+#define Usage_MI_Update Usage_i8(0x23)
+#define Usage_MI_Next Usage_i8(0x24)
+#define Usage_MI_Save Usage_i8(0x25)
+#define Usage_MI_Print Usage_i8(0x26)
+#define Usage_MI_MicrophoneEnable Usage_i8(0x27)
+#define Usage_MI_Cine Usage_i8(0x40)
+#define Usage_MI_TransmitPower Usage_i8(0x41)
+#define Usage_MI_Volume Usage_i8(0x42)
+#define Usage_MI_Focus Usage_i8(0x43)
+#define Usage_MI_Depth Usage_i8(0x44)
+#define Usage_MI_SoftStepPrimary Usage_i8(0x60)
+#define Usage_MI_SoftStepSecondary Usage_i8(0x61)
+#define Usage_MI_DepthGainCompensation Usage_i8(0x70)
+#define Usage_MI_ZoomSelect Usage_i8(0x80)
+#define Usage_MI_ZoomAdjust Usage_i8(0x81)
+#define Usage_MI_SpectralDopplerModeSelect Usage_i8(0x82)
+#define Usage_MI_SpectralDopplerAdjust Usage_i8(0x83)
+#define Usage_MI_ColorDopplerModeSelect Usage_i8(0x84)
+#define Usage_MI_ColorDopplerAdjust Usage_i8(0x85)
+#define Usage_MI_MotionModeSelect Usage_i8(0x86)
+#define Usage_MI_MotionModeAdjust Usage_i8(0x87)
+#define Usage_MI_TwoDModeSelect Usage_i8(0x88)
+#define Usage_MI_TwoDModeAdjust Usage_i8(0x89)
+#define Usage_MI_SoftControlSelect Usage_i8(0xa0)
+#define Usage_MI_SoftControlAdjust Usage_i8(0xa1)
+#define Usage_BD_BrailleDisplay Usage_i8(0x1)
+#define Usage_BD_BrailleRow Usage_i8(0x2)
+#define Usage_BD_EightDotBrailleCell Usage_i8(0x3)
+#define Usage_BD_SixDotBrailleCell Usage_i8(0x4)
+#define Usage_BD_NumberofBrailleCells Usage_i8(0x5)
+#define Usage_BD_ScreenReaderControl Usage_i8(0x6)
+#define Usage_BD_ScreenReaderIdentifier Usage_i8(0x7)
+#define Usage_BD_RouterSet1 Usage_i8(0xfa)
+#define Usage_BD_RouterSet2 Usage_i8(0xfb)
+#define Usage_BD_RouterSet3 Usage_i8(0xfc)
+#define Usage_BD_RouterKey Usage_i16(0x100)
+#define Usage_BD_RowRouterKey Usage_i16(0x101)
+#define Usage_BD_BrailleButtons Usage_i16(0x200)
+#define Usage_BD_BrailleKeyboardDot1 Usage_i16(0x201)
+#define Usage_BD_BrailleKeyboardDot2 Usage_i16(0x202)
+#define Usage_BD_BrailleKeyboardDot3 Usage_i16(0x203)
+#define Usage_BD_BrailleKeyboardDot4 Usage_i16(0x204)
+#define Usage_BD_BrailleKeyboardDot5 Usage_i16(0x205)
+#define Usage_BD_BrailleKeyboardDot6 Usage_i16(0x206)
+#define Usage_BD_BrailleKeyboardDot7 Usage_i16(0x207)
+#define Usage_BD_BrailleKeyboardDot8 Usage_i16(0x208)
+#define Usage_BD_BrailleKeyboardSpace Usage_i16(0x209)
+#define Usage_BD_BrailleKeyboardLeftSpace Usage_i16(0x20a)
+#define Usage_BD_BrailleKeyboardRightSpace Usage_i16(0x20b)
+#define Usage_BD_BrailleFaceControls Usage_i16(0x20c)
+#define Usage_BD_BrailleLeftControls Usage_i16(0x20d)
+#define Usage_BD_BrailleRightControls Usage_i16(0x20e)
+#define Usage_BD_BrailleTopControls Usage_i16(0x20f)
+#define Usage_BD_BrailleJoystickCenter Usage_i16(0x210)
+#define Usage_BD_BrailleJoystickUp Usage_i16(0x211)
+#define Usage_BD_BrailleJoystickDown Usage_i16(0x212)
+#define Usage_BD_BrailleJoystickLeft Usage_i16(0x213)
+#define Usage_BD_BrailleJoystickRight Usage_i16(0x214)
+#define Usage_BD_BrailleDPadCenter Usage_i16(0x215)
+#define Usage_BD_BrailleDPadUp Usage_i16(0x216)
+#define Usage_BD_BrailleDPadDown Usage_i16(0x217)
+#define Usage_BD_BrailleDPadLeft Usage_i16(0x218)
+#define Usage_BD_BrailleDPadRight Usage_i16(0x219)
+#define Usage_BD_BraillePanLeft Usage_i16(0x21a)
+#define Usage_BD_BraillePanRight Usage_i16(0x21b)
+#define Usage_BD_BrailleRockerUp Usage_i16(0x21c)
+#define Usage_BD_BrailleRockerDown Usage_i16(0x21d)
+#define Usage_BD_BrailleRockerPress Usage_i16(0x21e)
+#define Usage_LAI_LampArray Usage_i8(0x1)
+#define Usage_LAI_LampArrayAttributesReport Usage_i8(0x2)
+#define Usage_LAI_LampCount Usage_i8(0x3)
+#define Usage_LAI_BoundingBoxWidthInMicrometers Usage_i8(0x4)
+#define Usage_LAI_BoundingBoxHeightInMicrometers Usage_i8(0x5)
+#define Usage_LAI_BoundingBoxDepthInMicrometers Usage_i8(0x6)
+#define Usage_LAI_LampArrayKind Usage_i8(0x7)
+#define Usage_LAI_MinUpdateIntervalInMicroseconds Usage_i8(0x8)
+#define Usage_LAI_LampAttributesRequestReport Usage_i8(0x20)
+#define Usage_LAI_LampId Usage_i8(0x21)
+#define Usage_LAI_LampAttributesResponseReport Usage_i8(0x22)
+#define Usage_LAI_PositionXInMicrometers Usage_i8(0x23)
+#define Usage_LAI_PositionYInMicrometers Usage_i8(0x24)
+#define Usage_LAI_PositionZInMicrometers Usage_i8(0x25)
+#define Usage_LAI_LampPurposes Usage_i8(0x26)
+#define Usage_LAI_UpdateLatencyInMicroseconds Usage_i8(0x27)
+#define Usage_LAI_RedLevelCount Usage_i8(0x28)
+#define Usage_LAI_GreenLevelCount Usage_i8(0x29)
+#define Usage_LAI_BlueLevelCount Usage_i8(0x2a)
+#define Usage_LAI_IntensityLevelCount Usage_i8(0x2b)
+#define Usage_LAI_IsProgrammable Usage_i8(0x2c)
+#define Usage_LAI_InputBinding Usage_i8(0x2d)
+#define Usage_LAI_LampMultiUpdateReport Usage_i8(0x50)
+#define Usage_LAI_RedUpdateChannel Usage_i8(0x51)
+#define Usage_LAI_GreenUpdateChannel Usage_i8(0x52)
+#define Usage_LAI_BlueUpdateChannel Usage_i8(0x53)
+#define Usage_LAI_IntensityUpdateChannel Usage_i8(0x54)
+#define Usage_LAI_LampUpdateFlags Usage_i8(0x55)
+#define Usage_LAI_LampRangeUpdateReport Usage_i8(0x60)
+#define Usage_LAI_LampIdStart Usage_i8(0x61)
+#define Usage_LAI_LampIdEnd Usage_i8(0x62)
+#define Usage_LAI_LampArrayControlReport Usage_i8(0x70)
+#define Usage_LAI_AutonomousMode Usage_i8(0x71)
+#define Usage_Mon_MonitorControl Usage_i8(0x1)
+#define Usage_Mon_EDIDInformation Usage_i8(0x2)
+#define Usage_Mon_VDIFInformation Usage_i8(0x3)
+#define Usage_Mon_VESAVersion Usage_i8(0x4)
+#define Usage_VESAVC_Degauss Usage_i8(0x1)
+#define Usage_VESAVC_Brightness Usage_i8(0x10)
+#define Usage_VESAVC_Contrast Usage_i8(0x12)
+#define Usage_VESAVC_RedVideoGain Usage_i8(0x16)
+#define Usage_VESAVC_GreenVideoGain Usage_i8(0x18)
+#define Usage_VESAVC_BlueVideoGain Usage_i8(0x1a)
+#define Usage_VESAVC_Focus Usage_i8(0x1c)
+#define Usage_VESAVC_HorizontalPosition Usage_i8(0x20)
+#define Usage_VESAVC_HorizontalSize Usage_i8(0x22)
+#define Usage_VESAVC_HorizontalPincushion Usage_i8(0x24)
+#define Usage_VESAVC_HorizontalPincushionBalance Usage_i8(0x26)
+#define Usage_VESAVC_HorizontalMisconvergence Usage_i8(0x28)
+#define Usage_VESAVC_HorizontalLinearity Usage_i8(0x2a)
+#define Usage_VESAVC_HorizontalLinearityBalance Usage_i8(0x2c)
+#define Usage_VESAVC_VerticalPosition Usage_i8(0x30)
+#define Usage_VESAVC_VerticalSize Usage_i8(0x32)
+#define Usage_VESAVC_VerticalPincushion Usage_i8(0x34)
+#define Usage_VESAVC_VerticalPincushionBalance Usage_i8(0x36)
+#define Usage_VESAVC_VerticalMisconvergence Usage_i8(0x38)
+#define Usage_VESAVC_VerticalLinearity Usage_i8(0x3a)
+#define Usage_VESAVC_VerticalLinearityBalance Usage_i8(0x3c)
+#define Usage_VESAVC_ParallelogramDistortionKeyBalance Usage_i8(0x40)
+#define Usage_VESAVC_TrapezoidalDistortionKey Usage_i8(0x42)
+#define Usage_VESAVC_TiltRotation Usage_i8(0x44)
+#define Usage_VESAVC_TopCornerDistortionControl Usage_i8(0x46)
+#define Usage_VESAVC_TopCornerDistortionBalance Usage_i8(0x48)
+#define Usage_VESAVC_BottomCornerDistortionControl Usage_i8(0x4a)
+#define Usage_VESAVC_BottomCornerDistortionBalance Usage_i8(0x4c)
+#define Usage_VESAVC_HorizontalMoiré Usage_i8(0x56)
+#define Usage_VESAVC_VerticalMoiré Usage_i8(0x58)
+#define Usage_VESAVC_InputLevelSelect Usage_i8(0x5e)
+#define Usage_VESAVC_InputSourceSelect Usage_i8(0x60)
+#define Usage_VESAVC_RedVideoBlackLevel Usage_i8(0x6c)
+#define Usage_VESAVC_GreenVideoBlackLevel Usage_i8(0x6e)
+#define Usage_VESAVC_BlueVideoBlackLevel Usage_i8(0x70)
+#define Usage_VESAVC_AutoSizeCenter Usage_i8(0xa2)
+#define Usage_VESAVC_PolarityHorizontalSynchronization Usage_i8(0xa4)
+#define Usage_VESAVC_PolarityVerticalSynchronization Usage_i8(0xa6)
+#define Usage_VESAVC_SynchronizationType Usage_i8(0xa8)
+#define Usage_VESAVC_ScreenOrientation Usage_i8(0xaa)
+#define Usage_VESAVC_HorizontalFrequency Usage_i8(0xac)
+#define Usage_VESAVC_VerticalFrequency Usage_i8(0xae)
+#define Usage_VESAVC_Settings Usage_i8(0xb0)
+#define Usage_VESAVC_OnScreenDisplay Usage_i8(0xca)
+#define Usage_VESAVC_StereoMode Usage_i8(0xd4)
+#define Usage_Pow_iName Usage_i8(0x1)
+#define Usage_Pow_PresentStatus Usage_i8(0x2)
+#define Usage_Pow_ChangedStatus Usage_i8(0x3)
+#define Usage_Pow_UPS Usage_i8(0x4)
+#define Usage_Pow_PowerSupply Usage_i8(0x5)
+#define Usage_Pow_BatterySystem Usage_i8(0x10)
+#define Usage_Pow_BatterySystemId Usage_i8(0x11)
+#define Usage_Pow_Battery Usage_i8(0x12)
+#define Usage_Pow_BatteryId Usage_i8(0x13)
+#define Usage_Pow_Charger Usage_i8(0x14)
+#define Usage_Pow_ChargerId Usage_i8(0x15)
+#define Usage_Pow_PowerConverter Usage_i8(0x16)
+#define Usage_Pow_PowerConverterId Usage_i8(0x17)
+#define Usage_Pow_OutletSystem Usage_i8(0x18)
+#define Usage_Pow_OutletSystemId Usage_i8(0x19)
+#define Usage_Pow_Input Usage_i8(0x1a)
+#define Usage_Pow_InputId Usage_i8(0x1b)
+#define Usage_Pow_Output Usage_i8(0x1c)
+#define Usage_Pow_OutputId Usage_i8(0x1d)
+#define Usage_Pow_Flow Usage_i8(0x1e)
+#define Usage_Pow_FlowId Usage_i8(0x1f)
+#define Usage_Pow_Outlet Usage_i8(0x20)
+#define Usage_Pow_OutletId Usage_i8(0x21)
+#define Usage_Pow_Gang Usage_i8(0x22)
+#define Usage_Pow_GangId Usage_i8(0x23)
+#define Usage_Pow_PowerSummary Usage_i8(0x24)
+#define Usage_Pow_PowerSummaryId Usage_i8(0x25)
+#define Usage_Pow_Voltage Usage_i8(0x30)
+#define Usage_Pow_Current Usage_i8(0x31)
+#define Usage_Pow_Frequency Usage_i8(0x32)
+#define Usage_Pow_ApparentPower Usage_i8(0x33)
+#define Usage_Pow_ActivePower Usage_i8(0x34)
+#define Usage_Pow_PercentLoad Usage_i8(0x35)
+#define Usage_Pow_Temperature Usage_i8(0x36)
+#define Usage_Pow_Humidity Usage_i8(0x37)
+#define Usage_Pow_BadCount Usage_i8(0x38)
+#define Usage_Pow_ConfigVoltage Usage_i8(0x40)
+#define Usage_Pow_ConfigCurrent Usage_i8(0x41)
+#define Usage_Pow_ConfigFrequency Usage_i8(0x42)
+#define Usage_Pow_ConfigApparentPower Usage_i8(0x43)
+#define Usage_Pow_ConfigActivePower Usage_i8(0x44)
+#define Usage_Pow_ConfigPercentLoad Usage_i8(0x45)
+#define Usage_Pow_ConfigTemperature Usage_i8(0x46)
+#define Usage_Pow_ConfigHumidity Usage_i8(0x47)
+#define Usage_Pow_SwitchOnControl Usage_i8(0x50)
+#define Usage_Pow_SwitchOffControl Usage_i8(0x51)
+#define Usage_Pow_ToggleControl Usage_i8(0x52)
+#define Usage_Pow_LowVoltageTransfer Usage_i8(0x53)
+#define Usage_Pow_HighVoltageTransfer Usage_i8(0x54)
+#define Usage_Pow_DelayBeforeReboot Usage_i8(0x55)
+#define Usage_Pow_DelayBeforeStartup Usage_i8(0x56)
+#define Usage_Pow_DelayBeforeShutdown Usage_i8(0x57)
+#define Usage_Pow_Test Usage_i8(0x58)
+#define Usage_Pow_ModuleReset Usage_i8(0x59)
+#define Usage_Pow_AudibleAlarmControl Usage_i8(0x5a)
+#define Usage_Pow_Present Usage_i8(0x60)
+#define Usage_Pow_Good Usage_i8(0x61)
+#define Usage_Pow_InternalFailure Usage_i8(0x62)
+#define Usage_Pow_VoltagOutOfRange Usage_i8(0x63)
+#define Usage_Pow_FrequencyOutOfRange Usage_i8(0x64)
+#define Usage_Pow_Overload Usage_i8(0x65)
+#define Usage_Pow_OverCharged Usage_i8(0x66)
+#define Usage_Pow_OverTemperature Usage_i8(0x67)
+#define Usage_Pow_ShutdownRequested Usage_i8(0x68)
+#define Usage_Pow_ShutdownImminent Usage_i8(0x69)
+#define Usage_Pow_SwitchOnOff Usage_i8(0x6b)
+#define Usage_Pow_Switchable Usage_i8(0x6c)
+#define Usage_Pow_Used Usage_i8(0x6d)
+#define Usage_Pow_Boost Usage_i8(0x6e)
+#define Usage_Pow_Buck Usage_i8(0x6f)
+#define Usage_Pow_Initialized Usage_i8(0x70)
+#define Usage_Pow_Tested Usage_i8(0x71)
+#define Usage_Pow_AwaitingPower Usage_i8(0x72)
+#define Usage_Pow_CommunicationLost Usage_i8(0x73)
+#define Usage_Pow_iManufacturer Usage_i8(0xfd)
+#define Usage_Pow_iProduct Usage_i8(0xfe)
+#define Usage_Pow_iSerialNumber Usage_i16(0xff)
+#define Usage_BS_SmartBatteryBatteryMode Usage_i8(0x1)
+#define Usage_BS_SmartBatteryBatteryStatus Usage_i8(0x2)
+#define Usage_BS_SmartBatteryAlarmWarning Usage_i8(0x3)
+#define Usage_BS_SmartBatteryChargerMode Usage_i8(0x4)
+#define Usage_BS_SmartBatteryChargerStatus Usage_i8(0x5)
+#define Usage_BS_SmartBatteryChargerSpecInfo Usage_i8(0x6)
+#define Usage_BS_SmartBatterySelectorState Usage_i8(0x7)
+#define Usage_BS_SmartBatterySelectorPresets Usage_i8(0x8)
+#define Usage_BS_SmartBatterySelectorInfo Usage_i8(0x9)
+#define Usage_BS_OptionalMfgFunction1 Usage_i8(0x10)
+#define Usage_BS_OptionalMfgFunction2 Usage_i8(0x11)
+#define Usage_BS_OptionalMfgFunction3 Usage_i8(0x12)
+#define Usage_BS_OptionalMfgFunction4 Usage_i8(0x13)
+#define Usage_BS_OptionalMfgFunction5 Usage_i8(0x14)
+#define Usage_BS_ConnectionToSMBus Usage_i8(0x15)
+#define Usage_BS_OutputConnection Usage_i8(0x16)
+#define Usage_BS_ChargerConnection Usage_i8(0x17)
+#define Usage_BS_BatteryInsertion Usage_i8(0x18)
+#define Usage_BS_UseNext Usage_i8(0x19)
+#define Usage_BS_OKToUse Usage_i8(0x1a)
+#define Usage_BS_BatterySupported Usage_i8(0x1b)
+#define Usage_BS_SelectorRevision Usage_i8(0x1c)
+#define Usage_BS_ChargingIndicator Usage_i8(0x1d)
+#define Usage_BS_ManufacturerAccess Usage_i8(0x28)
+#define Usage_BS_RemainingCapacityLimit Usage_i8(0x29)
+#define Usage_BS_RemainingTimeLimit Usage_i8(0x2a)
+#define Usage_BS_AtRate Usage_i8(0x2b)
+#define Usage_BS_CapacityMode Usage_i8(0x2c)
+#define Usage_BS_BroadcastToCharger Usage_i8(0x2d)
+#define Usage_BS_PrimaryBattery Usage_i8(0x2e)
+#define Usage_BS_ChargeController Usage_i8(0x2f)
+#define Usage_BS_TerminateCharge Usage_i8(0x40)
+#define Usage_BS_TerminateDischarge Usage_i8(0x41)
+#define Usage_BS_BelowRemainingCapacityLimit Usage_i8(0x42)
+#define Usage_BS_RemainingTimeLimitExpired Usage_i8(0x43)
+#define Usage_BS_Charging Usage_i8(0x44)
+#define Usage_BS_Discharging Usage_i8(0x45)
+#define Usage_BS_FullyCharged Usage_i8(0x46)
+#define Usage_BS_FullyDischarged Usage_i8(0x47)
+#define Usage_BS_ConditioningFlag Usage_i8(0x48)
+#define Usage_BS_AtRateOK Usage_i8(0x49)
+#define Usage_BS_SmartBatteryErrorCode Usage_i8(0x4a)
+#define Usage_BS_NeedReplacement Usage_i8(0x4b)
+#define Usage_BS_AtRateTimeToFull Usage_i8(0x60)
+#define Usage_BS_AtRateTimeToEmpty Usage_i8(0x61)
+#define Usage_BS_AverageCurrent Usage_i8(0x62)
+#define Usage_BS_MaxError Usage_i8(0x63)
+#define Usage_BS_RelativeStateOfCharge Usage_i8(0x64)
+#define Usage_BS_AbsoluteStateOfCharge Usage_i8(0x65)
+#define Usage_BS_RemainingCapacity Usage_i8(0x66)
+#define Usage_BS_FullChargeCapacity Usage_i8(0x67)
+#define Usage_BS_RunTimeToEmpty Usage_i8(0x68)
+#define Usage_BS_AverageTimeToEmpty Usage_i8(0x69)
+#define Usage_BS_AverageTimeToFull Usage_i8(0x6a)
+#define Usage_BS_CycleCount Usage_i8(0x6b)
+#define Usage_BS_BatteryPackModelLevel Usage_i8(0x80)
+#define Usage_BS_InternalChargeController Usage_i8(0x81)
+#define Usage_BS_PrimaryBatterySupport Usage_i8(0x82)
+#define Usage_BS_DesignCapacity Usage_i8(0x83)
+#define Usage_BS_SpecificationInfo Usage_i8(0x84)
+#define Usage_BS_ManufactureDate Usage_i8(0x85)
+#define Usage_BS_SerialNumber Usage_i8(0x86)
+#define Usage_BS_iManufacturerName Usage_i8(0x87)
+#define Usage_BS_iDeviceName Usage_i8(0x88)
+#define Usage_BS_iDeviceChemistry Usage_i8(0x89)
+#define Usage_BS_ManufacturerData Usage_i8(0x8a)
+#define Usage_BS_Rechargable Usage_i8(0x8b)
+#define Usage_BS_WarningCapacityLimit Usage_i8(0x8c)
+#define Usage_BS_CapacityGranularity1 Usage_i8(0x8d)
+#define Usage_BS_CapacityGranularity2 Usage_i8(0x8e)
+#define Usage_BS_iOEMInformation Usage_i8(0x8f)
+#define Usage_BS_InhibitCharge Usage_i8(0xc0)
+#define Usage_BS_EnablePolling Usage_i8(0xc1)
+#define Usage_BS_ResetToZero Usage_i8(0xc2)
+#define Usage_BS_ACPresent Usage_i8(0xd0)
+#define Usage_BS_BatteryPresent Usage_i8(0xd1)
+#define Usage_BS_PowerFail Usage_i8(0xd2)
+#define Usage_BS_AlarmInhibited Usage_i8(0xd3)
+#define Usage_BS_ThermistorUnderRange Usage_i8(0xd4)
+#define Usage_BS_ThermistorHot Usage_i8(0xd5)
+#define Usage_BS_ThermistorCold Usage_i8(0xd6)
+#define Usage_BS_ThermistorOverRange Usage_i8(0xd7)
+#define Usage_BS_VoltageOutOfRange Usage_i8(0xd8)
+#define Usage_BS_CurrentOutOfRange Usage_i8(0xd9)
+#define Usage_BS_CurrentNotRegulated Usage_i8(0xda)
+#define Usage_BS_VoltageNotRegulated Usage_i8(0xdb)
+#define Usage_BS_MasterMode Usage_i8(0xdc)
+#define Usage_BS_ChargerSelectorSupport Usage_i8(0xf0)
+#define Usage_BS_ChargerSpec Usage_i8(0xf1)
+#define Usage_BS_Level2 Usage_i8(0xf2)
+#define Usage_BS_Level3 Usage_i8(0xf3)
+#define Usage_BS_BarcodeBadgeReader Usage_i8(0x1)
+#define Usage_BS_BarcodeScanner Usage_i8(0x2)
+#define Usage_BS_DumbBarCodeScanner Usage_i8(0x3)
+#define Usage_BS_CordlessScannerBase Usage_i8(0x4)
+#define Usage_BS_BarCodeScannerCradle Usage_i8(0x5)
+#define Usage_BS_AttributeReport Usage_i8(0x10)
+#define Usage_BS_SettingsReport Usage_i8(0x11)
+#define Usage_BS_ScannedDataReport Usage_i8(0x12)
+#define Usage_BS_RawScannedDataReport Usage_i8(0x13)
+#define Usage_BS_TriggerReport Usage_i8(0x14)
+#define Usage_BS_StatusReport Usage_i8(0x15)
+#define Usage_BS_UPCEANControlReport Usage_i8(0x16)
+#define Usage_BS_EAN23LabelControlReport Usage_i8(0x17)
+#define Usage_BS_Code39ControlReport Usage_i8(0x18)
+#define Usage_BS_Interleaved2of5ControlReport Usage_i8(0x19)
+#define Usage_BS_Standard2of5ControlReport Usage_i8(0x1a)
+#define Usage_BS_MSIPlesseyControlReport Usage_i8(0x1b)
+#define Usage_BS_CodabarControlReport Usage_i8(0x1c)
+#define Usage_BS_Code128ControlReport Usage_i8(0x1d)
+#define Usage_BS_Misc1DControlReport Usage_i8(0x1e)
+#define Usage_BS_TwoDControlReport Usage_i8(0x1f)
+#define Usage_BS_AimingPointerMode Usage_i8(0x30)
+#define Usage_BS_BarCodePresentSensor Usage_i8(0x31)
+#define Usage_BS_Class1ALaser Usage_i8(0x32)
+#define Usage_BS_Class2Laser Usage_i8(0x33)
+#define Usage_BS_HeaterPresent Usage_i8(0x34)
+#define Usage_BS_ContactScanner Usage_i8(0x35)
+#define Usage_BS_ElectronicArticleSurveillanceNotification Usage_i8(0x36)
+#define Usage_BS_ConstantElectronicArticleSurveillance Usage_i8(0x37)
+#define Usage_BS_ErrorIndication Usage_i8(0x38)
+#define Usage_BS_FixedBeeper Usage_i8(0x39)
+#define Usage_BS_GoodDecodeIndication Usage_i8(0x3a)
+#define Usage_BS_HandsFreeScanning Usage_i8(0x3b)
+#define Usage_BS_IntrinsicallySafe Usage_i8(0x3c)
+#define Usage_BS_KlasseEinsLaser Usage_i8(0x3d)
+#define Usage_BS_LongRangeScanner Usage_i8(0x3e)
+#define Usage_BS_MirrorSpeedControl Usage_i8(0x3f)
+#define Usage_BS_NotOnFileIndication Usage_i8(0x40)
+#define Usage_BS_ProgrammableBeeper Usage_i8(0x41)
+#define Usage_BS_Triggerless Usage_i8(0x42)
+#define Usage_BS_Wand Usage_i8(0x43)
+#define Usage_BS_WaterResistant Usage_i8(0x44)
+#define Usage_BS_MultiRangeScanner Usage_i8(0x45)
+#define Usage_BS_ProximitySensor Usage_i8(0x46)
+#define Usage_BS_FragmentDecoding Usage_i8(0x4d)
+#define Usage_BS_ScannerReadConfidence Usage_i8(0x4e)
+#define Usage_BS_DataPrefix Usage_i8(0x4f)
+#define Usage_BS_PrefixAIMI Usage_i8(0x50)
+#define Usage_BS_PrefixNone Usage_i8(0x51)
+#define Usage_BS_PrefixProprietary Usage_i8(0x52)
+#define Usage_BS_ActiveTime Usage_i8(0x55)
+#define Usage_BS_AimingLaserPattern Usage_i8(0x56)
+#define Usage_BS_BarCodePresent Usage_i8(0x57)
+#define Usage_BS_BeeperState Usage_i8(0x58)
+#define Usage_BS_LaserOnTime Usage_i8(0x59)
+#define Usage_BS_LaserState Usage_i8(0x5a)
+#define Usage_BS_LockoutTime Usage_i8(0x5b)
+#define Usage_BS_MotorState Usage_i8(0x5c)
+#define Usage_BS_MotorTimeout Usage_i8(0x5d)
+#define Usage_BS_PowerOnResetScanner Usage_i8(0x5e)
+#define Usage_BS_PreventReadofBarcodes Usage_i8(0x5f)
+#define Usage_BS_InitiateBarcodeRead Usage_i8(0x60)
+#define Usage_BS_TriggerState Usage_i8(0x61)
+#define Usage_BS_TriggerMode Usage_i8(0x62)
+#define Usage_BS_TriggerModeBlinkingLaserOn Usage_i8(0x63)
+#define Usage_BS_TriggerModeContinuousLaserOn Usage_i8(0x64)
+#define Usage_BS_TriggerModeLaseronwhilePulled Usage_i8(0x65)
+#define Usage_BS_TriggerModeLaserstaysonafterrelease Usage_i8(0x66)
+#define Usage_BS_CommitParameterstoNVM Usage_i8(0x6d)
+#define Usage_BS_ParameterScanning Usage_i8(0x6e)
+#define Usage_BS_ParametersChanged Usage_i8(0x6f)
+#define Usage_BS_Setparameterdefaultvalues Usage_i8(0x70)
+#define Usage_BS_ScannerInCradle Usage_i8(0x75)
+#define Usage_BS_ScannerInRange Usage_i8(0x76)
+#define Usage_BS_AimDuration Usage_i8(0x7a)
+#define Usage_BS_GoodReadLampDuration Usage_i8(0x7b)
+#define Usage_BS_GoodReadLampIntensity Usage_i8(0x7c)
+#define Usage_BS_GoodReadLED Usage_i8(0x7d)
+#define Usage_BS_GoodReadToneFrequency Usage_i8(0x7e)
+#define Usage_BS_GoodReadToneLength Usage_i8(0x7f)
+#define Usage_BS_GoodReadToneVolume Usage_i8(0x80)
+#define Usage_BS_NoReadMessage Usage_i8(0x82)
+#define Usage_BS_NotonFileVolume Usage_i8(0x83)
+#define Usage_BS_PowerupBeep Usage_i8(0x84)
+#define Usage_BS_SoundErrorBeep Usage_i8(0x85)
+#define Usage_BS_SoundGoodReadBeep Usage_i8(0x86)
+#define Usage_BS_SoundNotOnFileBeep Usage_i8(0x87)
+#define Usage_BS_GoodReadWhentoWrite Usage_i8(0x88)
+#define Usage_BS_GRWTIAfterDecode Usage_i8(0x89)
+#define Usage_BS_GRWTIBeepLampaftertransmit Usage_i8(0x8a)
+#define Usage_BS_GRWTINoBeepLampuseatall Usage_i8(0x8b)
+#define Usage_BS_BooklandEAN Usage_i8(0x91)
+#define Usage_BS_ConvertEAN8to13Type Usage_i8(0x92)
+#define Usage_BS_ConvertUPCAtoEAN13 Usage_i8(0x93)
+#define Usage_BS_ConvertUPCEtoA Usage_i8(0x94)
+#define Usage_BS_EAN13 Usage_i8(0x95)
+#define Usage_BS_EAN8 Usage_i8(0x96)
+#define Usage_BS_EAN99128Mandatory Usage_i8(0x97)
+#define Usage_BS_EAN99P5128Optional Usage_i8(0x98)
+#define Usage_BS_EnableEANTwoLabel Usage_i8(0x99)
+#define Usage_BS_UPCEAN Usage_i8(0x9a)
+#define Usage_BS_UPCEANCouponCode Usage_i8(0x9b)
+#define Usage_BS_UPCEANPeriodicals Usage_i8(0x9c)
+#define Usage_BS_UPCA Usage_i8(0x9d)
+#define Usage_BS_UPCAwith128Mandatory Usage_i8(0x9e)
+#define Usage_BS_UPCAwith128Optional Usage_i8(0x9f)
+#define Usage_BS_UPCAwithP5Optional Usage_i8(0xa0)
+#define Usage_BS_UPCE Usage_i8(0xa1)
+#define Usage_BS_UPCE1 Usage_i8(0xa2)
+#define Usage_BS_Periodical Usage_i8(0xa9)
+#define Usage_BS_PeriodicalAutoDiscriminatePlus2 Usage_i8(0xaa)
+#define Usage_BS_PeriodicalOnlyDecodewithPlus2 Usage_i8(0xab)
+#define Usage_BS_PeriodicalIgnorePlus2 Usage_i8(0xac)
+#define Usage_BS_PeriodicalAutoDiscriminatePlus5 Usage_i8(0xad)
+#define Usage_BS_PeriodicalOnlyDecodewithPlus5 Usage_i8(0xae)
+#define Usage_BS_PeriodicalIgnorePlus5 Usage_i8(0xaf)
+#define Usage_BS_Check Usage_i8(0xb0)
+#define Usage_BS_CheckDisablePrice Usage_i8(0xb1)
+#define Usage_BS_CheckEnable4digitPrice Usage_i8(0xb2)
+#define Usage_BS_CheckEnable5digitPrice Usage_i8(0xb3)
+#define Usage_BS_CheckEnableEuropean4digitPrice Usage_i8(0xb4)
+#define Usage_BS_CheckEnableEuropean5digitPrice Usage_i8(0xb5)
+#define Usage_BS_EANTwoLabel Usage_i8(0xb7)
+#define Usage_BS_EANThreeLabel Usage_i8(0xb8)
+#define Usage_BS_EAN8FlagDigit1 Usage_i8(0xb9)
+#define Usage_BS_EAN8FlagDigit2 Usage_i8(0xba)
+#define Usage_BS_EAN8FlagDigit3 Usage_i8(0xbb)
+#define Usage_BS_EAN13FlagDigit1 Usage_i8(0xbc)
+#define Usage_BS_EAN13FlagDigit2 Usage_i8(0xbd)
+#define Usage_BS_EAN13FlagDigit3 Usage_i8(0xbe)
+#define Usage_BS_AddEAN23LabelDefinition Usage_i8(0xbf)
+#define Usage_BS_ClearallEAN23LabelDefinitions Usage_i8(0xc0)
+#define Usage_BS_Codabar Usage_i8(0xc3)
+#define Usage_BS_Code128 Usage_i8(0xc4)
+#define Usage_BS_Code39 Usage_i8(0xc7)
+#define Usage_BS_Code93 Usage_i8(0xc8)
+#define Usage_BS_FullASCIIConversion Usage_i8(0xc9)
+#define Usage_BS_Interleaved2of5 Usage_i8(0xca)
+#define Usage_BS_ItalianPharmacyCode Usage_i8(0xcb)
+#define Usage_BS_MSIPlessey Usage_i8(0xcc)
+#define Usage_BS_Standard2of5IATA Usage_i8(0xcd)
+#define Usage_BS_Standard2of5 Usage_i8(0xce)
+#define Usage_BS_TransmitStartStop Usage_i8(0xd3)
+#define Usage_BS_TriOptic Usage_i8(0xd4)
+#define Usage_BS_UCCEAN128 Usage_i8(0xd5)
+#define Usage_BS_CheckDigit Usage_i8(0xd6)
+#define Usage_BS_CheckDigitDisable Usage_i8(0xd7)
+#define Usage_BS_CheckDigitEnableInterleaved2of5OPCC Usage_i8(0xd8)
+#define Usage_BS_CheckDigitEnableInterleaved2of5USS Usage_i8(0xd9)
+#define Usage_BS_CheckDigitEnableStandard2of5OPCC Usage_i8(0xda)
+#define Usage_BS_CheckDigitEnableStandard2of5USS Usage_i8(0xdb)
+#define Usage_BS_CheckDigitEnableOneMSIPlessey Usage_i8(0xdc)
+#define Usage_BS_CheckDigitEnableTwoMSIPlessey Usage_i8(0xdd)
+#define Usage_BS_CheckDigitCodabarEnable Usage_i8(0xde)
+#define Usage_BS_CheckDigitCode39Enable Usage_i8(0xdf)
+#define Usage_BS_TransmitCheckDigit Usage_i8(0xf0)
+#define Usage_BS_DisableCheckDigitTransmit Usage_i8(0xf1)
+#define Usage_BS_EnableCheckDigitTransmit Usage_i8(0xf2)
+#define Usage_BS_SymbologyIdentifier1 Usage_i8(0xfb)
+#define Usage_BS_SymbologyIdentifier2 Usage_i8(0xfc)
+#define Usage_BS_SymbologyIdentifier3 Usage_i8(0xfd)
+#define Usage_BS_DecodedData Usage_i8(0xfe)
+#define Usage_BS_DecodeDataContinued Usage_i16(0xff)
+#define Usage_BS_BarSpaceData Usage_i16(0x100)
+#define Usage_BS_ScannerDataAccuracy Usage_i16(0x101)
+#define Usage_BS_RawDataPolarity Usage_i16(0x102)
+#define Usage_BS_PolarityInvertedBarCode Usage_i16(0x103)
+#define Usage_BS_PolarityNormalBarCode Usage_i16(0x104)
+#define Usage_BS_MinimumLengthtoDecode Usage_i16(0x106)
+#define Usage_BS_MaximumLengthtoDecode Usage_i16(0x107)
+#define Usage_BS_DiscreteLengthtoDecode1 Usage_i16(0x108)
+#define Usage_BS_DiscreteLengthtoDecode2 Usage_i16(0x109)
+#define Usage_BS_DataLengthMethod Usage_i16(0x10a)
+#define Usage_BS_DLMethodReadany Usage_i16(0x10b)
+#define Usage_BS_DLMethodCheckinRange Usage_i16(0x10c)
+#define Usage_BS_DLMethodCheckforDiscrete Usage_i16(0x10d)
+#define Usage_BS_AztecCode Usage_i16(0x110)
+#define Usage_BS_BC412 Usage_i16(0x111)
+#define Usage_BS_ChannelCode Usage_i16(0x112)
+#define Usage_BS_Code16 Usage_i16(0x113)
+#define Usage_BS_Code32 Usage_i16(0x114)
+#define Usage_BS_Code49 Usage_i16(0x115)
+#define Usage_BS_CodeOne Usage_i16(0x116)
+#define Usage_BS_Colorcode Usage_i16(0x117)
+#define Usage_BS_DataMatrix Usage_i16(0x118)
+#define Usage_BS_MaxiCode Usage_i16(0x119)
+#define Usage_BS_MicroPDF Usage_i16(0x11a)
+#define Usage_BS_PDF417 Usage_i16(0x11b)
+#define Usage_BS_PosiCode Usage_i16(0x11c)
+#define Usage_BS_QRCode Usage_i16(0x11d)
+#define Usage_BS_SuperCode Usage_i16(0x11e)
+#define Usage_BS_UltraCode Usage_i16(0x11f)
+#define Usage_BS_USD5SlugCode Usage_i16(0x120)
+#define Usage_BS_VeriCode Usage_i16(0x121)
+#define Usage_Sca_Scales Usage_i8(0x1)
+#define Usage_Sca_ScaleDevice Usage_i8(0x20)
+#define Usage_Sca_ScaleClass Usage_i8(0x21)
+#define Usage_Sca_ScaleClassIMetric Usage_i8(0x22)
+#define Usage_Sca_ScaleClassIIMetric Usage_i8(0x23)
+#define Usage_Sca_ScaleClassIIIMetric Usage_i8(0x24)
+#define Usage_Sca_ScaleClassIIILMetric Usage_i8(0x25)
+#define Usage_Sca_ScaleClassIVMetric Usage_i8(0x26)
+#define Usage_Sca_ScaleClassIIIEnglish Usage_i8(0x27)
+#define Usage_Sca_ScaleClassIIILEnglish Usage_i8(0x28)
+#define Usage_Sca_ScaleClassIVEnglish Usage_i8(0x29)
+#define Usage_Sca_ScaleClassGeneric Usage_i8(0x2a)
+#define Usage_Sca_ScaleAttributeReport Usage_i8(0x30)
+#define Usage_Sca_ScaleControlReport Usage_i8(0x31)
+#define Usage_Sca_ScaleDataReport Usage_i8(0x32)
+#define Usage_Sca_ScaleStatusReport Usage_i8(0x33)
+#define Usage_Sca_ScaleWeightLimitReport Usage_i8(0x34)
+#define Usage_Sca_ScaleStatisticsReport Usage_i8(0x35)
+#define Usage_Sca_DataWeight Usage_i8(0x40)
+#define Usage_Sca_DataScaling Usage_i8(0x41)
+#define Usage_Sca_WeightUnit Usage_i8(0x50)
+#define Usage_Sca_WeightUnitMilligram Usage_i8(0x51)
+#define Usage_Sca_WeightUnitGram Usage_i8(0x52)
+#define Usage_Sca_WeightUnitKilogram Usage_i8(0x53)
+#define Usage_Sca_WeightUnitCarats Usage_i8(0x54)
+#define Usage_Sca_WeightUnitTaels Usage_i8(0x55)
+#define Usage_Sca_WeightUnitGrains Usage_i8(0x56)
+#define Usage_Sca_WeightUnitPennyweights Usage_i8(0x57)
+#define Usage_Sca_WeightUnitMetricTon Usage_i8(0x58)
+#define Usage_Sca_WeightUnitAvoirTon Usage_i8(0x59)
+#define Usage_Sca_WeightUnitTroyOunce Usage_i8(0x5a)
+#define Usage_Sca_WeightUnitOunce Usage_i8(0x5b)
+#define Usage_Sca_WeightUnitPound Usage_i8(0x5c)
+#define Usage_Sca_CalibrationCount Usage_i8(0x60)
+#define Usage_Sca_ReZeroCount Usage_i8(0x61)
+#define Usage_Sca_ScaleStatus Usage_i8(0x70)
+#define Usage_Sca_ScaleStatusFault Usage_i8(0x71)
+#define Usage_Sca_ScaleStatusStableatCenterofZero Usage_i8(0x72)
+#define Usage_Sca_ScaleStatusInMotion Usage_i8(0x73)
+#define Usage_Sca_ScaleStatusWeightStable Usage_i8(0x74)
+#define Usage_Sca_ScaleStatusUnderZero Usage_i8(0x75)
+#define Usage_Sca_ScaleStatusOverWeightLimit Usage_i8(0x76)
+#define Usage_Sca_ScaleStatusRequiresCalibration Usage_i8(0x77)
+#define Usage_Sca_ScaleStatusRequiresRezeroing Usage_i8(0x78)
+#define Usage_Sca_ZeroScale Usage_i8(0x80)
+#define Usage_Sca_EnforcedZeroReturn Usage_i8(0x81)
+#define Usage_MSR_MSRDeviceReadOnly Usage_i8(0x1)
+#define Usage_MSR_Track1Length Usage_i8(0x11)
+#define Usage_MSR_Track2Length Usage_i8(0x12)
+#define Usage_MSR_Track3Length Usage_i8(0x13)
+#define Usage_MSR_TrackJISLength Usage_i8(0x14)
+#define Usage_MSR_TrackData Usage_i8(0x20)
+#define Usage_MSR_Track1Data Usage_i8(0x21)
+#define Usage_MSR_Track2Data Usage_i8(0x22)
+#define Usage_MSR_Track3Data Usage_i8(0x23)
+#define Usage_MSR_TrackJISData Usage_i8(0x24)
+#define Usage_CC_CameraAutofocus Usage_i8(0x20)
+#define Usage_CC_CameraShutter Usage_i8(0x21)
+#define Usage_Arc_GeneralPurposeIOCard Usage_i8(0x1)
+#define Usage_Arc_CoinDoor Usage_i8(0x2)
+#define Usage_Arc_WatchdogTimer Usage_i8(0x3)
+#define Usage_Arc_GeneralPurposeAnalogInputState Usage_i8(0x30)
+#define Usage_Arc_GeneralPurposeDigitalInputState Usage_i8(0x31)
+#define Usage_Arc_GeneralPurposeOpticalInputState Usage_i8(0x32)
+#define Usage_Arc_GeneralPurposeDigitalOutputState Usage_i8(0x33)
+#define Usage_Arc_NumberofCoinDoors Usage_i8(0x34)
+#define Usage_Arc_CoinDrawerDropCount Usage_i8(0x35)
+#define Usage_Arc_CoinDrawerStart Usage_i8(0x36)
+#define Usage_Arc_CoinDrawerService Usage_i8(0x37)
+#define Usage_Arc_CoinDrawerTilt Usage_i8(0x38)
+#define Usage_Arc_CoinDoorTest Usage_i8(0x39)
+#define Usage_Arc_CoinDoorLockout Usage_i8(0x40)
+#define Usage_Arc_WatchdogTimeout Usage_i8(0x41)
+#define Usage_Arc_WatchdogAction Usage_i8(0x42)
+#define Usage_Arc_WatchdogReboot Usage_i8(0x43)
+#define Usage_Arc_WatchdogRestart Usage_i8(0x44)
+#define Usage_Arc_AlarmInput Usage_i8(0x45)
+#define Usage_Arc_CoinDoorCounter Usage_i8(0x46)
+#define Usage_Arc_IODirectionMapping Usage_i8(0x47)
+#define Usage_Arc_SetIODirectionMapping Usage_i8(0x48)
+#define Usage_Arc_ExtendedOpticalInputState Usage_i8(0x49)
+#define Usage_Arc_PinPadInputState Usage_i8(0x4a)
+#define Usage_Arc_PinPadStatus Usage_i8(0x4b)
+#define Usage_Arc_PinPadOutput Usage_i8(0x4c)
+#define Usage_Arc_PinPadCommand Usage_i8(0x4d)
+#define Usage_FIDOA_U2FAuthenticatorDevice Usage_i8(0x1)
+#define Usage_FIDOA_InputReportData Usage_i8(0x20)
+#define Usage_FIDOA_OutputReportData Usage_i8(0x21)
diff --git a/drivers/hid/hid-a4tech.c b/drivers/hid/hid-a4tech.c
index 2cbc32dda7f7..54bfaf61182b 100644
--- a/drivers/hid/hid-a4tech.c
+++ b/drivers/hid/hid-a4tech.c
@@ -163,4 +163,5 @@ static struct hid_driver a4_driver = {
};
module_hid_driver(a4_driver);
+MODULE_DESCRIPTION("HID driver for some a4tech \"special\" devices");
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index bd022e004356..af5cf94f9dea 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -8,6 +8,8 @@
* Copyright (c) 2006-2007 Jiri Kosina
* Copyright (c) 2008 Jiri Slaby <jirislaby@gmail.com>
* Copyright (c) 2019 Paul Pawlowski <paul@mrarm.io>
+ * Copyright (c) 2023 Orlando Chamberlain <orlandoch.dev@gmail.com>
+ * Copyright (c) 2024 Aditya Garg <gargaditya08@live.com>
*/
/*
@@ -23,6 +25,7 @@
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/leds.h>
+#include <dt-bindings/leds/common.h>
#include "hid-ids.h"
@@ -38,12 +41,17 @@
#define APPLE_RDESC_BATTERY BIT(9)
#define APPLE_BACKLIGHT_CTL BIT(10)
#define APPLE_IS_NON_APPLE BIT(11)
+#define APPLE_MAGIC_BACKLIGHT BIT(12)
#define APPLE_FLAG_FKEY 0x01
#define HID_COUNTRY_INTERNATIONAL_ISO 13
#define APPLE_BATTERY_TIMEOUT_MS 60000
+#define HID_USAGE_MAGIC_BL 0xff00000f
+#define APPLE_MAGIC_REPORT_ID_POWER 3
+#define APPLE_MAGIC_REPORT_ID_BRIGHTNESS 1
+
static unsigned int fnmode = 3;
module_param(fnmode, uint, 0644);
MODULE_PARM_DESC(fnmode, "Mode of fn key on Apple keyboards (0 = disabled, "
@@ -81,6 +89,12 @@ struct apple_sc_backlight {
struct hid_device *hdev;
};
+struct apple_magic_backlight {
+ struct led_classdev cdev;
+ struct hid_report *brightness;
+ struct hid_report *power;
+};
+
struct apple_sc {
struct hid_device *hdev;
unsigned long quirks;
@@ -822,6 +836,66 @@ cleanup_and_exit:
return ret;
}
+static void apple_magic_backlight_report_set(struct hid_report *rep, s32 value, u8 rate)
+{
+ rep->field[0]->value[0] = value;
+ rep->field[1]->value[0] = 0x5e; /* Mimic Windows */
+ rep->field[1]->value[0] |= rate << 8;
+
+ hid_hw_request(rep->device, rep, HID_REQ_SET_REPORT);
+}
+
+static void apple_magic_backlight_set(struct apple_magic_backlight *backlight,
+ int brightness, char rate)
+{
+ apple_magic_backlight_report_set(backlight->power, brightness ? 1 : 0, rate);
+ if (brightness)
+ apple_magic_backlight_report_set(backlight->brightness, brightness, rate);
+}
+
+static int apple_magic_backlight_led_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct apple_magic_backlight *backlight = container_of(led_cdev,
+ struct apple_magic_backlight, cdev);
+
+ apple_magic_backlight_set(backlight, brightness, 1);
+ return 0;
+}
+
+static int apple_magic_backlight_init(struct hid_device *hdev)
+{
+ struct apple_magic_backlight *backlight;
+ struct hid_report_enum *report_enum;
+
+ /*
+ * Ensure this usb endpoint is for the keyboard backlight, not touchbar
+ * backlight.
+ */
+ if (hdev->collection[0].usage != HID_USAGE_MAGIC_BL)
+ return -ENODEV;
+
+ backlight = devm_kzalloc(&hdev->dev, sizeof(*backlight), GFP_KERNEL);
+ if (!backlight)
+ return -ENOMEM;
+
+ report_enum = &hdev->report_enum[HID_FEATURE_REPORT];
+ backlight->brightness = report_enum->report_id_hash[APPLE_MAGIC_REPORT_ID_BRIGHTNESS];
+ backlight->power = report_enum->report_id_hash[APPLE_MAGIC_REPORT_ID_POWER];
+
+ if (!backlight->brightness || !backlight->power)
+ return -ENODEV;
+
+ backlight->cdev.name = ":white:" LED_FUNCTION_KBD_BACKLIGHT;
+ backlight->cdev.max_brightness = backlight->brightness->field[0]->logical_maximum;
+ backlight->cdev.brightness_set_blocking = apple_magic_backlight_led_set;
+
+ apple_magic_backlight_set(backlight, 0, 0);
+
+ return devm_led_classdev_register(&hdev->dev, &backlight->cdev);
+
+}
+
static int apple_probe(struct hid_device *hdev,
const struct hid_device_id *id)
{
@@ -860,7 +934,18 @@ static int apple_probe(struct hid_device *hdev,
if (quirks & APPLE_BACKLIGHT_CTL)
apple_backlight_init(hdev);
+ if (quirks & APPLE_MAGIC_BACKLIGHT) {
+ ret = apple_magic_backlight_init(hdev);
+ if (ret)
+ goto out_err;
+ }
+
return 0;
+
+out_err:
+ del_timer_sync(&asc->battery_timer);
+ hid_hw_stop(hdev);
+ return ret;
}
static void apple_remove(struct hid_device *hdev)
@@ -1073,6 +1158,8 @@ static const struct hid_device_id apple_devices[] = {
.driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK | APPLE_RDESC_BATTERY },
{ HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_2021),
.driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_TOUCHBAR_BACKLIGHT),
+ .driver_data = APPLE_MAGIC_BACKLIGHT },
{ }
};
@@ -1091,4 +1178,5 @@ static struct hid_driver apple_driver = {
};
module_hid_driver(apple_driver);
+MODULE_DESCRIPTION("Apple USB HID quirks support for Linux");
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c
index 02de2bf4f790..37e6d25593c2 100644
--- a/drivers/hid/hid-asus.c
+++ b/drivers/hid/hid-asus.c
@@ -1204,8 +1204,8 @@ static __u8 *asus_report_fixup(struct hid_device *hdev, __u8 *rdesc,
}
/* match many more n-key devices */
- if (drvdata->quirks & QUIRK_ROG_NKEY_KEYBOARD) {
- for (int i = 0; i < *rsize + 1; i++) {
+ if (drvdata->quirks & QUIRK_ROG_NKEY_KEYBOARD && *rsize > 15) {
+ for (int i = 0; i < *rsize - 15; i++) {
/* offset to the count from 0x5a report part always 14 */
if (rdesc[i] == 0x85 && rdesc[i + 1] == 0x5a &&
rdesc[i + 14] == 0x95 && rdesc[i + 15] == 0x05) {
diff --git a/drivers/hid/hid-aureal.c b/drivers/hid/hid-aureal.c
index ac8946f80e22..cf1a562d8523 100644
--- a/drivers/hid/hid-aureal.c
+++ b/drivers/hid/hid-aureal.c
@@ -41,4 +41,5 @@ static struct hid_driver aureal_driver = {
};
module_hid_driver(aureal_driver);
+MODULE_DESCRIPTION("HID driver for Aureal Cy se W-01RN USB_V3.1 devices");
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-belkin.c b/drivers/hid/hid-belkin.c
index fc0b3bb383cc..75aaed35ee9f 100644
--- a/drivers/hid/hid-belkin.c
+++ b/drivers/hid/hid-belkin.c
@@ -85,4 +85,5 @@ static struct hid_driver belkin_driver = {
};
module_hid_driver(belkin_driver);
+MODULE_DESCRIPTION("HID driver for some belkin \"special\" devices");
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-betopff.c b/drivers/hid/hid-betopff.c
index 25ed7b9a917e..a6d5f030d023 100644
--- a/drivers/hid/hid-betopff.c
+++ b/drivers/hid/hid-betopff.c
@@ -162,4 +162,5 @@ static struct hid_driver betop_driver = {
};
module_hid_driver(betop_driver);
+MODULE_DESCRIPTION("Force feedback support for Betop based devices");
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-bigbenff.c b/drivers/hid/hid-bigbenff.c
index a02cb517b4c4..be17af3d9c0c 100644
--- a/drivers/hid/hid-bigbenff.c
+++ b/drivers/hid/hid-bigbenff.c
@@ -490,4 +490,5 @@ static struct hid_driver bigben_driver = {
};
module_hid_driver(bigben_driver);
+MODULE_DESCRIPTION("LED & force feedback support for BigBen Interactive");
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-cherry.c b/drivers/hid/hid-cherry.c
index 6a71187b5cf6..549c73b05b8d 100644
--- a/drivers/hid/hid-cherry.c
+++ b/drivers/hid/hid-cherry.c
@@ -68,4 +68,5 @@ static struct hid_driver ch_driver = {
};
module_hid_driver(ch_driver);
+MODULE_DESCRIPTION("HID driver for some cherry \"special\" devices");
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-chicony.c b/drivers/hid/hid-chicony.c
index f04d2aa23efe..99954c6b3242 100644
--- a/drivers/hid/hid-chicony.c
+++ b/drivers/hid/hid-chicony.c
@@ -152,4 +152,5 @@ static struct hid_driver ch_driver = {
};
module_hid_driver(ch_driver);
+MODULE_DESCRIPTION("HID driver for some chicony \"special\" devices");
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index b1fa0378e8f4..254006178426 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -95,9 +95,9 @@ static struct hid_field *hid_register_field(struct hid_report *report, unsigned
return NULL;
}
- field = kzalloc((sizeof(struct hid_field) +
- usages * sizeof(struct hid_usage) +
- 3 * usages * sizeof(unsigned int)), GFP_KERNEL);
+ field = kvzalloc((sizeof(struct hid_field) +
+ usages * sizeof(struct hid_usage) +
+ 3 * usages * sizeof(unsigned int)), GFP_KERNEL);
if (!field)
return NULL;
@@ -661,7 +661,7 @@ static void hid_free_report(struct hid_report *report)
kfree(report->field_entries);
for (n = 0; n < report->maxfield; n++)
- kfree(report->field[n]);
+ kvfree(report->field[n]);
kfree(report);
}
@@ -1448,7 +1448,6 @@ static void implement(const struct hid_device *hid, u8 *report,
hid_warn(hid,
"%s() called with too large value %d (n: %d)! (%s)\n",
__func__, value, n, current->comm);
- WARN_ON(1);
value &= m;
}
}
@@ -2026,19 +2025,10 @@ out:
}
EXPORT_SYMBOL_GPL(hid_report_raw_event);
-/**
- * hid_input_report - report data from lower layer (usb, bt...)
- *
- * @hid: hid device
- * @type: HID report type (HID_*_REPORT)
- * @data: report contents
- * @size: size of data parameter
- * @interrupt: distinguish between interrupt and control transfers
- *
- * This is data entry for lower layers.
- */
-int hid_input_report(struct hid_device *hid, enum hid_report_type type, u8 *data, u32 size,
- int interrupt)
+
+static int __hid_input_report(struct hid_device *hid, enum hid_report_type type,
+ u8 *data, u32 size, int interrupt, u64 source, bool from_bpf,
+ bool lock_already_taken)
{
struct hid_report_enum *report_enum;
struct hid_driver *hdrv;
@@ -2048,8 +2038,13 @@ int hid_input_report(struct hid_device *hid, enum hid_report_type type, u8 *data
if (!hid)
return -ENODEV;
- if (down_trylock(&hid->driver_input_lock))
+ ret = down_trylock(&hid->driver_input_lock);
+ if (lock_already_taken && !ret) {
+ up(&hid->driver_input_lock);
+ return -EINVAL;
+ } else if (!lock_already_taken && ret) {
return -EBUSY;
+ }
if (!hid->driver) {
ret = -ENODEV;
@@ -2058,7 +2053,7 @@ int hid_input_report(struct hid_device *hid, enum hid_report_type type, u8 *data
report_enum = hid->report_enum + type;
hdrv = hid->driver;
- data = dispatch_hid_bpf_device_event(hid, type, data, &size, interrupt);
+ data = dispatch_hid_bpf_device_event(hid, type, data, &size, interrupt, source, from_bpf);
if (IS_ERR(data)) {
ret = PTR_ERR(data);
goto unlock;
@@ -2090,9 +2085,29 @@ int hid_input_report(struct hid_device *hid, enum hid_report_type type, u8 *data
ret = hid_report_raw_event(hid, type, data, size, interrupt);
unlock:
- up(&hid->driver_input_lock);
+ if (!lock_already_taken)
+ up(&hid->driver_input_lock);
return ret;
}
+
+/**
+ * hid_input_report - report data from lower layer (usb, bt...)
+ *
+ * @hid: hid device
+ * @type: HID report type (HID_*_REPORT)
+ * @data: report contents
+ * @size: size of data parameter
+ * @interrupt: distinguish between interrupt and control transfers
+ *
+ * This is data entry for lower layers.
+ */
+int hid_input_report(struct hid_device *hid, enum hid_report_type type, u8 *data, u32 size,
+ int interrupt)
+{
+ return __hid_input_report(hid, type, data, size, interrupt, 0,
+ false, /* from_bpf */
+ false /* lock_already_taken */);
+}
EXPORT_SYMBOL_GPL(hid_input_report);
bool hid_match_one_id(const struct hid_device *hdev,
@@ -2393,6 +2408,30 @@ void hid_hw_request(struct hid_device *hdev,
}
EXPORT_SYMBOL_GPL(hid_hw_request);
+int __hid_hw_raw_request(struct hid_device *hdev,
+ unsigned char reportnum, __u8 *buf,
+ size_t len, enum hid_report_type rtype,
+ enum hid_class_request reqtype,
+ u64 source, bool from_bpf)
+{
+ unsigned int max_buffer_size = HID_MAX_BUFFER_SIZE;
+ int ret;
+
+ if (hdev->ll_driver->max_buffer_size)
+ max_buffer_size = hdev->ll_driver->max_buffer_size;
+
+ if (len < 1 || len > max_buffer_size || !buf)
+ return -EINVAL;
+
+ ret = dispatch_hid_bpf_raw_requests(hdev, reportnum, buf, len, rtype,
+ reqtype, source, from_bpf);
+ if (ret)
+ return ret;
+
+ return hdev->ll_driver->raw_request(hdev, reportnum, buf, len,
+ rtype, reqtype);
+}
+
/**
* hid_hw_raw_request - send report request to device
*
@@ -2411,7 +2450,15 @@ int hid_hw_raw_request(struct hid_device *hdev,
unsigned char reportnum, __u8 *buf,
size_t len, enum hid_report_type rtype, enum hid_class_request reqtype)
{
+ return __hid_hw_raw_request(hdev, reportnum, buf, len, rtype, reqtype, 0, false);
+}
+EXPORT_SYMBOL_GPL(hid_hw_raw_request);
+
+int __hid_hw_output_report(struct hid_device *hdev, __u8 *buf, size_t len, u64 source,
+ bool from_bpf)
+{
unsigned int max_buffer_size = HID_MAX_BUFFER_SIZE;
+ int ret;
if (hdev->ll_driver->max_buffer_size)
max_buffer_size = hdev->ll_driver->max_buffer_size;
@@ -2419,10 +2466,15 @@ int hid_hw_raw_request(struct hid_device *hdev,
if (len < 1 || len > max_buffer_size || !buf)
return -EINVAL;
- return hdev->ll_driver->raw_request(hdev, reportnum, buf, len,
- rtype, reqtype);
+ ret = dispatch_hid_bpf_output_report(hdev, buf, len, source, from_bpf);
+ if (ret)
+ return ret;
+
+ if (hdev->ll_driver->output_report)
+ return hdev->ll_driver->output_report(hdev, buf, len);
+
+ return -ENOSYS;
}
-EXPORT_SYMBOL_GPL(hid_hw_raw_request);
/**
* hid_hw_output_report - send output report to device
@@ -2435,18 +2487,7 @@ EXPORT_SYMBOL_GPL(hid_hw_raw_request);
*/
int hid_hw_output_report(struct hid_device *hdev, __u8 *buf, size_t len)
{
- unsigned int max_buffer_size = HID_MAX_BUFFER_SIZE;
-
- if (hdev->ll_driver->max_buffer_size)
- max_buffer_size = hdev->ll_driver->max_buffer_size;
-
- if (len < 1 || len > max_buffer_size || !buf)
- return -EINVAL;
-
- if (hdev->ll_driver->output_report)
- return hdev->ll_driver->output_report(hdev, buf, len);
-
- return -ENOSYS;
+ return __hid_hw_output_report(hdev, buf, len, 0, false);
}
EXPORT_SYMBOL_GPL(hid_hw_output_report);
@@ -2855,9 +2896,15 @@ struct hid_device *hid_allocate_device(void)
mutex_init(&hdev->ll_open_lock);
kref_init(&hdev->ref);
- hid_bpf_device_init(hdev);
+ ret = hid_bpf_device_init(hdev);
+ if (ret)
+ goto out_err;
return hdev;
+
+out_err:
+ hid_destroy_device(hdev);
+ return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(hid_allocate_device);
@@ -2971,11 +3018,11 @@ int hid_check_keys_pressed(struct hid_device *hid)
EXPORT_SYMBOL_GPL(hid_check_keys_pressed);
#ifdef CONFIG_HID_BPF
-static struct hid_bpf_ops hid_ops = {
+static struct hid_ops __hid_ops = {
.hid_get_report = hid_get_report,
- .hid_hw_raw_request = hid_hw_raw_request,
- .hid_hw_output_report = hid_hw_output_report,
- .hid_input_report = hid_input_report,
+ .hid_hw_raw_request = __hid_hw_raw_request,
+ .hid_hw_output_report = __hid_hw_output_report,
+ .hid_input_report = __hid_input_report,
.owner = THIS_MODULE,
.bus_type = &hid_bus_type,
};
@@ -2992,7 +3039,7 @@ static int __init hid_init(void)
}
#ifdef CONFIG_HID_BPF
- hid_bpf_ops = &hid_ops;
+ hid_ops = &__hid_ops;
#endif
ret = hidraw_init();
@@ -3011,7 +3058,7 @@ err:
static void __exit hid_exit(void)
{
#ifdef CONFIG_HID_BPF
- hid_bpf_ops = NULL;
+ hid_ops = NULL;
#endif
hid_debug_exit();
hidraw_exit();
@@ -3025,4 +3072,5 @@ module_exit(hid_exit);
MODULE_AUTHOR("Andreas Gal");
MODULE_AUTHOR("Vojtech Pavlik");
MODULE_AUTHOR("Jiri Kosina");
+MODULE_DESCRIPTION("HID support for Linux");
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-cypress.c b/drivers/hid/hid-cypress.c
index b88f889b3932..b952b235e70a 100644
--- a/drivers/hid/hid-cypress.c
+++ b/drivers/hid/hid-cypress.c
@@ -176,4 +176,5 @@ static struct hid_driver cp_driver = {
};
module_hid_driver(cp_driver);
+MODULE_DESCRIPTION("HID driver for some cypress \"special\" devices");
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
index 87a961cae775..d5abfe652fb5 100644
--- a/drivers/hid/hid-debug.c
+++ b/drivers/hid/hid-debug.c
@@ -3366,6 +3366,8 @@ static const char *keys[KEY_MAX + 1] = {
[KEY_CAMERA_ACCESS_ENABLE] = "CameraAccessEnable",
[KEY_CAMERA_ACCESS_DISABLE] = "CameraAccessDisable",
[KEY_CAMERA_ACCESS_TOGGLE] = "CameraAccessToggle",
+ [KEY_ACCESSIBILITY] = "Accessibility",
+ [KEY_DO_NOT_DISTURB] = "DoNotDisturb",
[KEY_DICTATE] = "Dictate",
[KEY_MICMUTE] = "MicrophoneMute",
[KEY_BRIGHTNESS_MIN] = "BrightnessMin",
diff --git a/drivers/hid/hid-dr.c b/drivers/hid/hid-dr.c
index 947f19f8685f..c88224a96e9e 100644
--- a/drivers/hid/hid-dr.c
+++ b/drivers/hid/hid-dr.c
@@ -316,4 +316,5 @@ static struct hid_driver dr_driver = {
};
module_hid_driver(dr_driver);
+MODULE_DESCRIPTION("Force feedback support for DragonRise Inc. game controllers");
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-elecom.c b/drivers/hid/hid-elecom.c
index 4fa45ee77503..5973a3bab29f 100644
--- a/drivers/hid/hid-elecom.c
+++ b/drivers/hid/hid-elecom.c
@@ -136,4 +136,5 @@ static struct hid_driver elecom_driver = {
};
module_hid_driver(elecom_driver);
+MODULE_DESCRIPTION("HID driver for ELECOM devices");
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-elo.c b/drivers/hid/hid-elo.c
index 2876cb6a7dca..cf17bdd14d9c 100644
--- a/drivers/hid/hid-elo.c
+++ b/drivers/hid/hid-elo.c
@@ -313,4 +313,5 @@ static void __exit elo_driver_exit(void)
module_exit(elo_driver_exit);
MODULE_AUTHOR("Jiri Slaby <jslaby@suse.cz>");
+MODULE_DESCRIPTION("HID driver for ELO usb touchscreen 4000/4500");
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-emsff.c b/drivers/hid/hid-emsff.c
index c34f2e5a049f..60bfb6a924d7 100644
--- a/drivers/hid/hid-emsff.c
+++ b/drivers/hid/hid-emsff.c
@@ -144,5 +144,6 @@ static struct hid_driver ems_driver = {
};
module_hid_driver(ems_driver);
+MODULE_DESCRIPTION("Force feedback support for EMS Trio Linker Plus II");
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-evision.c b/drivers/hid/hid-evision.c
index ef6b4b435215..bb5997078491 100644
--- a/drivers/hid/hid-evision.c
+++ b/drivers/hid/hid-evision.c
@@ -50,4 +50,5 @@ static struct hid_driver evision_driver = {
};
module_hid_driver(evision_driver);
+MODULE_DESCRIPTION("HID driver for EVision devices");
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-ezkey.c b/drivers/hid/hid-ezkey.c
index d14f91d78c96..0e28bc0b87fa 100644
--- a/drivers/hid/hid-ezkey.c
+++ b/drivers/hid/hid-ezkey.c
@@ -75,4 +75,5 @@ static struct hid_driver ez_driver = {
};
module_hid_driver(ez_driver);
+MODULE_DESCRIPTION("HID driver for some ezkey \"special\" devices");
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-gaff.c b/drivers/hid/hid-gaff.c
index ecbd3995a4eb..c6db8b6cc8ee 100644
--- a/drivers/hid/hid-gaff.c
+++ b/drivers/hid/hid-gaff.c
@@ -169,4 +169,5 @@ static struct hid_driver ga_driver = {
};
module_hid_driver(ga_driver);
+MODULE_DESCRIPTION("Force feedback support for GreenAsia (Product ID 0x12) based devices");
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-google-hammer.c b/drivers/hid/hid-google-hammer.c
index 25331695ae32..6e4ebc349e45 100644
--- a/drivers/hid/hid-google-hammer.c
+++ b/drivers/hid/hid-google-hammer.c
@@ -641,4 +641,5 @@ static void __exit hammer_exit(void)
}
module_exit(hammer_exit);
+MODULE_DESCRIPTION("HID driver for Google Hammer device.");
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-google-stadiaff.c b/drivers/hid/hid-google-stadiaff.c
index 3731575562ab..6b38d2421d3d 100644
--- a/drivers/hid/hid-google-stadiaff.c
+++ b/drivers/hid/hid-google-stadiaff.c
@@ -155,4 +155,5 @@ static struct hid_driver stadia_driver = {
};
module_hid_driver(stadia_driver);
+MODULE_DESCRIPTION("Google Stadia controller rumble support.");
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-gyration.c b/drivers/hid/hid-gyration.c
index b99a611479b3..6606b57abe83 100644
--- a/drivers/hid/hid-gyration.c
+++ b/drivers/hid/hid-gyration.c
@@ -87,4 +87,5 @@ static struct hid_driver gyration_driver = {
};
module_hid_driver(gyration_driver);
+MODULE_DESCRIPTION("HID driver for some gyration \"special\" devices");
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-holtek-kbd.c b/drivers/hid/hid-holtek-kbd.c
index b346d68a06f5..1f014ac54e14 100644
--- a/drivers/hid/hid-holtek-kbd.c
+++ b/drivers/hid/hid-holtek-kbd.c
@@ -180,4 +180,5 @@ static struct hid_driver holtek_kbd_driver = {
};
module_hid_driver(holtek_kbd_driver);
+MODULE_DESCRIPTION("HID driver for Holtek keyboard");
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-holtek-mouse.c b/drivers/hid/hid-holtek-mouse.c
index 7c907939bfae..343730c28e2d 100644
--- a/drivers/hid/hid-holtek-mouse.c
+++ b/drivers/hid/hid-holtek-mouse.c
@@ -110,4 +110,5 @@ static struct hid_driver holtek_mouse_driver = {
};
module_hid_driver(holtek_mouse_driver);
+MODULE_DESCRIPTION("HID driver for Holtek gaming mice");
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 61d2a21affa2..72d56ee7ce1b 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -423,6 +423,8 @@
#define I2C_DEVICE_ID_HP_SPECTRE_X360_13_AW0020NG 0x29DF
#define I2C_DEVICE_ID_ASUS_TP420IA_TOUCHSCREEN 0x2BC8
#define I2C_DEVICE_ID_ASUS_GV301RA_TOUCHSCREEN 0x2C82
+#define I2C_DEVICE_ID_ASUS_UX3402_TOUCHSCREEN 0x2F2C
+#define I2C_DEVICE_ID_ASUS_UX6404_TOUCHSCREEN 0x4116
#define USB_DEVICE_ID_ASUS_UX550VE_TOUCHSCREEN 0x2544
#define USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN 0x2706
#define I2C_DEVICE_ID_SURFACE_GO_TOUCHSCREEN 0x261A
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index e03d300d2bac..c9094a4f281e 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -377,6 +377,10 @@ static const struct hid_device_id hid_battery_quirks[] = {
HID_BATTERY_QUIRK_IGNORE },
{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_ASUS_GV301RA_TOUCHSCREEN),
HID_BATTERY_QUIRK_IGNORE },
+ { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_ASUS_UX3402_TOUCHSCREEN),
+ HID_BATTERY_QUIRK_IGNORE },
+ { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_ASUS_UX6404_TOUCHSCREEN),
+ HID_BATTERY_QUIRK_IGNORE },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN),
HID_BATTERY_QUIRK_IGNORE },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ASUS_UX550VE_TOUCHSCREEN),
@@ -833,9 +837,18 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
break;
}
+ if ((usage->hid & 0xf0) == 0x90) { /* SystemControl*/
+ switch (usage->hid & 0xf) {
+ case 0xb: map_key_clear(KEY_DO_NOT_DISTURB); break;
+ default: goto ignore;
+ }
+ break;
+ }
+
if ((usage->hid & 0xf0) == 0xa0) { /* SystemControl */
switch (usage->hid & 0xf) {
case 0x9: map_key_clear(KEY_MICMUTE); break;
+ case 0xa: map_key_clear(KEY_ACCESSIBILITY); break;
default: goto ignore;
}
break;
diff --git a/drivers/hid/hid-ite.c b/drivers/hid/hid-ite.c
index 75ebfcf31889..6a7281bc27c9 100644
--- a/drivers/hid/hid-ite.c
+++ b/drivers/hid/hid-ite.c
@@ -141,4 +141,5 @@ static struct hid_driver ite_driver = {
module_hid_driver(ite_driver);
MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
+MODULE_DESCRIPTION("HID driver for some ITE \"special\" devices");
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-kensington.c b/drivers/hid/hid-kensington.c
index b31f7f431a3f..16839027981f 100644
--- a/drivers/hid/hid-kensington.c
+++ b/drivers/hid/hid-kensington.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * HID driver for Kensigton Slimblade Trackball
+ * HID driver for Kensington Slimblade Trackball
*
* Copyright (c) 2009 Jiri Kosina
*/
@@ -46,4 +46,5 @@ static struct hid_driver ks_driver = {
};
module_hid_driver(ks_driver);
+MODULE_DESCRIPTION("HID driver for Kensington Slimblade Trackball");
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-keytouch.c b/drivers/hid/hid-keytouch.c
index 73bf8642dfe3..a972943baaea 100644
--- a/drivers/hid/hid-keytouch.c
+++ b/drivers/hid/hid-keytouch.c
@@ -48,5 +48,6 @@ static struct hid_driver keytouch_driver = {
};
module_hid_driver(keytouch_driver);
+MODULE_DESCRIPTION("HID driver for Keytouch devices not fully compliant with HID standard");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jiri Kosina");
diff --git a/drivers/hid/hid-kye.c b/drivers/hid/hid-kye.c
index 70ceb9437332..ca2ba3da2458 100644
--- a/drivers/hid/hid-kye.c
+++ b/drivers/hid/hid-kye.c
@@ -671,4 +671,5 @@ static struct hid_driver kye_driver = {
};
module_hid_driver(kye_driver);
+MODULE_DESCRIPTION("HID driver for Kye/Genius devices not fully compliant with HID standard");
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-lcpower.c b/drivers/hid/hid-lcpower.c
index 8acd3ee5ada5..58953d11ded7 100644
--- a/drivers/hid/hid-lcpower.c
+++ b/drivers/hid/hid-lcpower.c
@@ -53,4 +53,5 @@ static struct hid_driver ts_driver = {
};
module_hid_driver(ts_driver);
+MODULE_DESCRIPTION("HID driver for LC Power Model RC1000MCE");
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-lenovo.c b/drivers/hid/hid-lenovo.c
index e6b2ae68b8fb..e5e72aa5260a 100644
--- a/drivers/hid/hid-lenovo.c
+++ b/drivers/hid/hid-lenovo.c
@@ -1442,4 +1442,5 @@ static struct hid_driver lenovo_driver = {
};
module_hid_driver(lenovo_driver);
+MODULE_DESCRIPTION("HID driver for IBM/Lenovo");
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-letsketch.c b/drivers/hid/hid-letsketch.c
index 97f047f18136..229820fda960 100644
--- a/drivers/hid/hid-letsketch.c
+++ b/drivers/hid/hid-letsketch.c
@@ -319,4 +319,5 @@ static struct hid_driver letsketch_driver = {
module_hid_driver(letsketch_driver);
MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
+MODULE_DESCRIPTION("Driver for the LetSketch / VSON WP9620N drawing tablet");
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-lg-g15.c b/drivers/hid/hid-lg-g15.c
index acbec1dcf196..53e7b90f9cc3 100644
--- a/drivers/hid/hid-lg-g15.c
+++ b/drivers/hid/hid-lg-g15.c
@@ -954,4 +954,5 @@ static struct hid_driver lg_g15_driver = {
module_hid_driver(lg_g15_driver);
MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
+MODULE_DESCRIPTION("HID driver for gaming keys on Logitech gaming keyboards");
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c
index fb3f7258009c..cfe2f4f6e93f 100644
--- a/drivers/hid/hid-lg.c
+++ b/drivers/hid/hid-lg.c
@@ -942,4 +942,5 @@ module_param_named(lg4ff_no_autoswitch, lg4ff_no_autoswitch, int, S_IRUGO);
MODULE_PARM_DESC(lg4ff_no_autoswitch, "Do not switch multimode wheels to their native mode automatically");
#endif
+MODULE_DESCRIPTION("HID driver for some logitech \"special\" devices");
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
index 3c3c497b6b91..d9580bc3e19a 100644
--- a/drivers/hid/hid-logitech-dj.c
+++ b/drivers/hid/hid-logitech-dj.c
@@ -1284,8 +1284,10 @@ static int logi_dj_recv_switch_to_dj_mode(struct dj_receiver_dev *djrcv_dev,
*/
msleep(50);
- if (retval)
+ if (retval) {
+ kfree(dj_report);
return retval;
+ }
}
/*
@@ -2045,6 +2047,7 @@ static struct hid_driver logi_djreceiver_driver = {
module_hid_driver(logi_djreceiver_driver);
+MODULE_DESCRIPTION("HID driver for Logitech receivers");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Logitech");
MODULE_AUTHOR("Nestor Lopez Casado");
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
index b81d5bcc76a7..400d70e6dbe2 100644
--- a/drivers/hid/hid-logitech-hidpp.c
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -27,6 +27,7 @@
#include "usbhid/usbhid.h"
#include "hid-ids.h"
+MODULE_DESCRIPTION("Support for Logitech devices relying on the HID++ specification");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Benjamin Tissoires <benjamin.tissoires@gmail.com>");
MODULE_AUTHOR("Nestor Lopez Casado <nlopezcasad@logitech.com>");
diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
index a46ff4e8b99f..2eb285b97fc0 100644
--- a/drivers/hid/hid-magicmouse.c
+++ b/drivers/hid/hid-magicmouse.c
@@ -968,4 +968,5 @@ static struct hid_driver magicmouse_driver = {
};
module_hid_driver(magicmouse_driver);
+MODULE_DESCRIPTION("Apple \"Magic\" Wireless Mouse driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-maltron.c b/drivers/hid/hid-maltron.c
index dcd6db6a646e..caba0def938c 100644
--- a/drivers/hid/hid-maltron.c
+++ b/drivers/hid/hid-maltron.c
@@ -162,4 +162,5 @@ static struct hid_driver maltron_driver = {
};
module_hid_driver(maltron_driver);
+MODULE_DESCRIPTION("HID driver for Maltron L90");
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-mcp2221.c b/drivers/hid/hid-mcp2221.c
index da5ea5a23b08..0f93c22a479f 100644
--- a/drivers/hid/hid-mcp2221.c
+++ b/drivers/hid/hid-mcp2221.c
@@ -1048,7 +1048,7 @@ static int mcp_iio_channels(struct mcp2221 *mcp)
break;
default:
continue;
- };
+ }
chan->type = IIO_VOLTAGE;
chan->indexed = 1;
diff --git a/drivers/hid/hid-megaworld.c b/drivers/hid/hid-megaworld.c
index 599657863cb9..0476d7d16e7f 100644
--- a/drivers/hid/hid-megaworld.c
+++ b/drivers/hid/hid-megaworld.c
@@ -122,4 +122,5 @@ static struct hid_driver mwctrl_driver = {
};
module_hid_driver(mwctrl_driver);
+MODULE_DESCRIPTION("Vibration support for Mega World controllers");
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-mf.c b/drivers/hid/hid-mf.c
index 92d7ecd41a78..49a4052a1496 100644
--- a/drivers/hid/hid-mf.c
+++ b/drivers/hid/hid-mf.c
@@ -166,4 +166,5 @@ static struct hid_driver mf_driver = {
};
module_hid_driver(mf_driver);
+MODULE_DESCRIPTION("Force feedback support for Mayflash game controller adapters.");
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-microsoft.c b/drivers/hid/hid-microsoft.c
index 9345e2bfd56e..4cf0fcddb379 100644
--- a/drivers/hid/hid-microsoft.c
+++ b/drivers/hid/hid-microsoft.c
@@ -475,4 +475,5 @@ static struct hid_driver ms_driver = {
};
module_hid_driver(ms_driver);
+MODULE_DESCRIPTION("HID driver for some microsoft \"special\" devices");
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-monterey.c b/drivers/hid/hid-monterey.c
index c63f9f1e61b8..989681f73d77 100644
--- a/drivers/hid/hid-monterey.c
+++ b/drivers/hid/hid-monterey.c
@@ -62,4 +62,5 @@ static struct hid_driver mr_driver = {
};
module_hid_driver(mr_driver);
+MODULE_DESCRIPTION("HID driver for some monterey \"special\" devices");
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-nintendo.c b/drivers/hid/hid-nintendo.c
index b4a97803eca3..58cd0506e431 100644
--- a/drivers/hid/hid-nintendo.c
+++ b/drivers/hid/hid-nintendo.c
@@ -658,7 +658,6 @@ struct joycon_ctlr {
(ctlr->ctlr_type == JOYCON_CTLR_TYPE_JCR || \
ctlr->ctlr_type == JOYCON_CTLR_TYPE_PRO)
-
/*
* Controller device helpers
*
@@ -669,31 +668,11 @@ struct joycon_ctlr {
* These helpers are most useful early during the HID probe or in conjunction
* with the capability helpers below.
*/
-static inline bool joycon_device_is_procon(struct joycon_ctlr *ctlr)
-{
- return ctlr->hdev->product == USB_DEVICE_ID_NINTENDO_PROCON;
-}
-
static inline bool joycon_device_is_chrggrip(struct joycon_ctlr *ctlr)
{
return ctlr->hdev->product == USB_DEVICE_ID_NINTENDO_CHRGGRIP;
}
-static inline bool joycon_device_is_snescon(struct joycon_ctlr *ctlr)
-{
- return ctlr->hdev->product == USB_DEVICE_ID_NINTENDO_SNESCON;
-}
-
-static inline bool joycon_device_is_gencon(struct joycon_ctlr *ctlr)
-{
- return ctlr->hdev->product == USB_DEVICE_ID_NINTENDO_GENCON;
-}
-
-static inline bool joycon_device_is_n64con(struct joycon_ctlr *ctlr)
-{
- return ctlr->hdev->product == USB_DEVICE_ID_NINTENDO_N64CON;
-}
-
/*
* Controller type helpers
*
@@ -2725,13 +2704,13 @@ static int nintendo_hid_probe(struct hid_device *hdev,
ret = joycon_power_supply_create(ctlr);
if (ret) {
hid_err(hdev, "Failed to create power_supply; ret=%d\n", ret);
- goto err_close;
+ goto err_ida;
}
ret = joycon_input_create(ctlr);
if (ret) {
hid_err(hdev, "Failed to create input device; ret=%d\n", ret);
- goto err_close;
+ goto err_ida;
}
ctlr->ctlr_state = JOYCON_CTLR_STATE_READ;
@@ -2739,6 +2718,8 @@ static int nintendo_hid_probe(struct hid_device *hdev,
hid_dbg(hdev, "probe - success\n");
return 0;
+err_ida:
+ ida_free(&nintendo_player_id_allocator, ctlr->player_id);
err_close:
hid_hw_close(hdev);
err_stop:
diff --git a/drivers/hid/hid-ntrig.c b/drivers/hid/hid-ntrig.c
index b5d26f03fe6b..2738ce947434 100644
--- a/drivers/hid/hid-ntrig.c
+++ b/drivers/hid/hid-ntrig.c
@@ -1029,4 +1029,5 @@ static struct hid_driver ntrig_driver = {
};
module_hid_driver(ntrig_driver);
+MODULE_DESCRIPTION("HID driver for N-Trig touchscreens");
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-nvidia-shield.c b/drivers/hid/hid-nvidia-shield.c
index 58b15750dbb0..ff9078ad1961 100644
--- a/drivers/hid/hid-nvidia-shield.c
+++ b/drivers/hid/hid-nvidia-shield.c
@@ -283,7 +283,9 @@ static struct input_dev *shield_haptics_create(
return haptics;
input_set_capability(haptics, EV_FF, FF_RUMBLE);
- input_ff_create_memless(haptics, NULL, play_effect);
+ ret = input_ff_create_memless(haptics, NULL, play_effect);
+ if (ret)
+ goto err;
ret = input_register_device(haptics);
if (ret)
diff --git a/drivers/hid/hid-ortek.c b/drivers/hid/hid-ortek.c
index 9a4770d79c64..99e3b06a8331 100644
--- a/drivers/hid/hid-ortek.c
+++ b/drivers/hid/hid-ortek.c
@@ -51,4 +51,5 @@ static struct hid_driver ortek_driver = {
};
module_hid_driver(ortek_driver);
+MODULE_DESCRIPTION("HID driver for Ortek PKB-1700/WKB-2000/Skycable wireless keyboard and mouse trackpad");
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-petalynx.c b/drivers/hid/hid-petalynx.c
index ea0af9f7ad90..5e47634bb07d 100644
--- a/drivers/hid/hid-petalynx.c
+++ b/drivers/hid/hid-petalynx.c
@@ -102,4 +102,5 @@ static struct hid_driver pl_driver = {
};
module_hid_driver(pl_driver);
+MODULE_DESCRIPTION("HID driver for some petalynx \"special\" devices");
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-pl.c b/drivers/hid/hid-pl.c
index 93fb07ec3180..3c8827081dea 100644
--- a/drivers/hid/hid-pl.c
+++ b/drivers/hid/hid-pl.c
@@ -219,4 +219,5 @@ static struct hid_driver pl_driver = {
};
module_hid_driver(pl_driver);
+MODULE_DESCRIPTION("Force feedback support for PantherLord/GreenAsia based devices");
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-primax.c b/drivers/hid/hid-primax.c
index 1e6413d07cae..e44d79dff8de 100644
--- a/drivers/hid/hid-primax.c
+++ b/drivers/hid/hid-primax.c
@@ -70,4 +70,5 @@ static struct hid_driver px_driver = {
module_hid_driver(px_driver);
MODULE_AUTHOR("Terry Lambert <tlambert@google.com>");
+MODULE_DESCRIPTION("HID driver for primax and similar keyboards with in-band modifiers");
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-prodikeys.c b/drivers/hid/hid-prodikeys.c
index a593ed62c969..757361593e52 100644
--- a/drivers/hid/hid-prodikeys.c
+++ b/drivers/hid/hid-prodikeys.c
@@ -862,4 +862,5 @@ static struct hid_driver pk_driver = {
};
module_hid_driver(pk_driver);
+MODULE_DESCRIPTION("HID driver for the Prodikeys PC-MIDI Keyboard");
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-razer.c b/drivers/hid/hid-razer.c
index 740df148b0ef..7f48258c61f7 100644
--- a/drivers/hid/hid-razer.c
+++ b/drivers/hid/hid-razer.c
@@ -122,4 +122,5 @@ static struct hid_driver razer_driver = {
module_hid_driver(razer_driver);
MODULE_AUTHOR("Jelle van der Waa <jvanderwaa@redhat.com>");
+MODULE_DESCRIPTION("HID driver for gaming keys on Razer Blackwidow gaming keyboards");
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-redragon.c b/drivers/hid/hid-redragon.c
index 73c9d4c4fa34..07d803513f27 100644
--- a/drivers/hid/hid-redragon.c
+++ b/drivers/hid/hid-redragon.c
@@ -59,4 +59,5 @@ static struct hid_driver redragon_driver = {
module_hid_driver(redragon_driver);
+MODULE_DESCRIPTION("HID driver for Redragon keyboards");
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-retrode.c b/drivers/hid/hid-retrode.c
index 6a08e25aa296..7997627fdccc 100644
--- a/drivers/hid/hid-retrode.c
+++ b/drivers/hid/hid-retrode.c
@@ -94,4 +94,5 @@ static struct hid_driver retrode_driver = {
module_hid_driver(retrode_driver);
+MODULE_DESCRIPTION("HID driver for Retrode 2 controller adapter and plug-in extensions");
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-saitek.c b/drivers/hid/hid-saitek.c
index b84e975977c4..85ac8def368f 100644
--- a/drivers/hid/hid-saitek.c
+++ b/drivers/hid/hid-saitek.c
@@ -204,4 +204,5 @@ static struct hid_driver saitek_driver = {
};
module_hid_driver(saitek_driver);
+MODULE_DESCRIPTION("HID driver for Saitek devices.");
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-samsung.c b/drivers/hid/hid-samsung.c
index 08fb25b8459a..d4e27142245c 100644
--- a/drivers/hid/hid-samsung.c
+++ b/drivers/hid/hid-samsung.c
@@ -561,4 +561,5 @@ static struct hid_driver samsung_driver = {
};
module_hid_driver(samsung_driver);
+MODULE_DESCRIPTION("HID driver for some samsung \"special\" devices");
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-semitek.c b/drivers/hid/hid-semitek.c
index ba6607d5e051..710766f6839d 100644
--- a/drivers/hid/hid-semitek.c
+++ b/drivers/hid/hid-semitek.c
@@ -37,4 +37,5 @@ static struct hid_driver semitek_driver = {
};
module_hid_driver(semitek_driver);
+MODULE_DESCRIPTION("HID driver for Semitek keyboards");
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-sjoy.c b/drivers/hid/hid-sjoy.c
index 49971be7c3ff..d3a777f52a3f 100644
--- a/drivers/hid/hid-sjoy.c
+++ b/drivers/hid/hid-sjoy.c
@@ -168,6 +168,7 @@ static struct hid_driver sjoy_driver = {
};
module_hid_driver(sjoy_driver);
+MODULE_DESCRIPTION("Force feedback support for SmartJoy PLUS PS2->USB adapter");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jussi Kivilinna");
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index 5a07a91a89ae..eac75f98f08a 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -2308,4 +2308,5 @@ static void __exit sony_exit(void)
module_init(sony_init);
module_exit(sony_exit);
+MODULE_DESCRIPTION("HID driver for Sony / PS2 / PS3 / PS4 BD devices");
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-speedlink.c b/drivers/hid/hid-speedlink.c
index 9e75f1aae0ca..22ee078c42c6 100644
--- a/drivers/hid/hid-speedlink.c
+++ b/drivers/hid/hid-speedlink.c
@@ -75,4 +75,5 @@ static struct hid_driver speedlink_driver = {
};
module_hid_driver(speedlink_driver);
+MODULE_DESCRIPTION("HID driver for Speedlink Vicious and Divine Cezanne (USB mouse)");
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-steam.c b/drivers/hid/hid-steam.c
index f166188c21ec..bf8b633114be 100644
--- a/drivers/hid/hid-steam.c
+++ b/drivers/hid/hid-steam.c
@@ -45,6 +45,7 @@
#include <linux/power_supply.h>
#include "hid-ids.h"
+MODULE_DESCRIPTION("HID driver for Valve Steam Controller");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Rodrigo Rivas Costa <rodrigorivascosta@gmail.com>");
@@ -1267,7 +1268,7 @@ static int steam_probe(struct hid_device *hdev,
steam->client_hdev = steam_create_client_hid(hdev);
if (IS_ERR(steam->client_hdev)) {
ret = PTR_ERR(steam->client_hdev);
- goto err_stream_unregister;
+ goto err_steam_unregister;
}
steam->client_hdev->driver_data = steam;
@@ -1279,7 +1280,7 @@ static int steam_probe(struct hid_device *hdev,
err_destroy:
hid_destroy_device(steam->client_hdev);
-err_stream_unregister:
+err_steam_unregister:
if (steam->connected)
steam_unregister(steam);
err_hw_close:
diff --git a/drivers/hid/hid-steelseries.c b/drivers/hid/hid-steelseries.c
index b3edadf42d6d..2154e14f55f1 100644
--- a/drivers/hid/hid-steelseries.c
+++ b/drivers/hid/hid-steelseries.c
@@ -662,6 +662,7 @@ static struct hid_driver steelseries_driver = {
};
module_hid_driver(steelseries_driver);
+MODULE_DESCRIPTION("HID driver for Steelseries devices");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Bastien Nocera <hadess@hadess.net>");
MODULE_AUTHOR("Simon Wood <simon@mungewell.org>");
diff --git a/drivers/hid/hid-sunplus.c b/drivers/hid/hid-sunplus.c
index aa2855c2ed4e..f32e60d4420f 100644
--- a/drivers/hid/hid-sunplus.c
+++ b/drivers/hid/hid-sunplus.c
@@ -62,4 +62,5 @@ static struct hid_driver sp_driver = {
};
module_hid_driver(sp_driver);
+MODULE_DESCRIPTION("HID driver for some sunplus \"special\" devices");
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-tivo.c b/drivers/hid/hid-tivo.c
index 68eb08b63945..827bf67abeb9 100644
--- a/drivers/hid/hid-tivo.c
+++ b/drivers/hid/hid-tivo.c
@@ -73,5 +73,6 @@ static struct hid_driver tivo_driver = {
};
module_hid_driver(tivo_driver);
+MODULE_DESCRIPTION("HID driver for TiVo Slide Bluetooth remote");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jarod Wilson <jarod@redhat.com>");
diff --git a/drivers/hid/hid-tmff.c b/drivers/hid/hid-tmff.c
index 4040cd98dafe..fcd859aa3a8c 100644
--- a/drivers/hid/hid-tmff.c
+++ b/drivers/hid/hid-tmff.c
@@ -265,4 +265,5 @@ static struct hid_driver tm_driver = {
};
module_hid_driver(tm_driver);
+MODULE_DESCRIPTION("Force feedback support for various HID compliant devices by ThrustMaster");
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-topseed.c b/drivers/hid/hid-topseed.c
index 2125327b8de1..645e36cd83a6 100644
--- a/drivers/hid/hid-topseed.c
+++ b/drivers/hid/hid-topseed.c
@@ -78,4 +78,5 @@ static struct hid_driver ts_driver = {
};
module_hid_driver(ts_driver);
+MODULE_DESCRIPTION("HID driver for TopSeed Cyberlink remote");
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-twinhan.c b/drivers/hid/hid-twinhan.c
index 14af794146c0..0ef5194085b2 100644
--- a/drivers/hid/hid-twinhan.c
+++ b/drivers/hid/hid-twinhan.c
@@ -131,4 +131,5 @@ static struct hid_driver twinhan_driver = {
};
module_hid_driver(twinhan_driver);
+MODULE_DESCRIPTION("HID driver for TwinHan IR remote control");
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-uclogic-core.c b/drivers/hid/hid-uclogic-core.c
index ad74cbc9a0aa..b176f9c0dd52 100644
--- a/drivers/hid/hid-uclogic-core.c
+++ b/drivers/hid/hid-uclogic-core.c
@@ -567,7 +567,9 @@ module_hid_driver(uclogic_driver);
MODULE_AUTHOR("Martin Rusko");
MODULE_AUTHOR("Nikolai Kondrashov");
+MODULE_DESCRIPTION("HID driver for UC-Logic devices not fully compliant with HID standard");
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("HID driver for UC-Logic devices not fully compliant with HID standard");
#ifdef CONFIG_HID_KUNIT_TEST
#include "hid-uclogic-core-test.c"
diff --git a/drivers/hid/hid-uclogic-rdesc-test.c b/drivers/hid/hid-uclogic-rdesc-test.c
index 90bf4e586e01..d6b18213f45f 100644
--- a/drivers/hid/hid-uclogic-rdesc-test.c
+++ b/drivers/hid/hid-uclogic-rdesc-test.c
@@ -9,6 +9,8 @@
#include <kunit/test.h>
#include "./hid-uclogic-rdesc.h"
+MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING);
+
struct uclogic_template_case {
const char *name;
const __u8 *template;
diff --git a/drivers/hid/hid-uclogic-rdesc.c b/drivers/hid/hid-uclogic-rdesc.c
index b6dfdf6356a6..acfa591ab52f 100644
--- a/drivers/hid/hid-uclogic-rdesc.c
+++ b/drivers/hid/hid-uclogic-rdesc.c
@@ -17,6 +17,7 @@
#include "hid-uclogic-rdesc.h"
#include <linux/slab.h>
#include <asm/unaligned.h>
+#include <kunit/visibility.h>
/* Fixed WP4030U report descriptor */
__u8 uclogic_rdesc_wp4030u_fixed_arr[] = {
@@ -689,10 +690,10 @@ const size_t uclogic_rdesc_v2_pen_template_size =
0xA0, /* Collection (Physical), */ \
0x05, 0x09, /* Usage Page (Button), */ \
0x19, 0x01, /* Usage Minimum (01h), */ \
- 0x29, 0x03, /* Usage Maximum (03h), */ \
- 0x95, 0x03, /* Report Count (3), */ \
+ 0x29, 0x0A, /* Usage Maximum (0Ah), */ \
+ 0x95, 0x0A, /* Report Count (10), */ \
0x81, 0x02, /* Input (Variable), */ \
- 0x95, ((_size) * 8 - 45), \
+ 0x95, ((_size) * 8 - 52), \
/* Report Count (padding), */ \
0x81, 0x01, /* Input (Constant), */ \
0xC0, /* End Collection, */ \
@@ -789,7 +790,8 @@ const __u8 uclogic_rdesc_v2_frame_touch_strip_arr[] = {
0x95, 0x01, /* Report Count (1), */
0x81, 0x02, /* Input (Variable), */
0x05, 0x01, /* Usage Page (Desktop), */
- 0x09, 0x38, /* Usage (Wheel), */
+ 0x09, 0x33, /* Usage (Rx), */
+ 0x09, 0x34, /* Usage (Ry), */
0x95, 0x01, /* Report Count (1), */
0x15, 0x00, /* Logical Minimum (0), */
0x25, 0x07, /* Logical Maximum (7), */
@@ -1242,3 +1244,4 @@ __u8 *uclogic_rdesc_template_apply(const __u8 *template_ptr,
return rdesc_ptr;
}
+EXPORT_SYMBOL_IF_KUNIT(uclogic_rdesc_template_apply);
diff --git a/drivers/hid/hid-viewsonic.c b/drivers/hid/hid-viewsonic.c
index 8024b1d370e2..668c2adb77b6 100644
--- a/drivers/hid/hid-viewsonic.c
+++ b/drivers/hid/hid-viewsonic.c
@@ -102,4 +102,5 @@ static struct hid_driver viewsonic_driver = {
};
module_hid_driver(viewsonic_driver);
+MODULE_DESCRIPTION("HID driver for ViewSonic devices not fully compliant with HID standard");
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-vivaldi-common.c b/drivers/hid/hid-vivaldi-common.c
index b0af2be94895..bf734055d4b6 100644
--- a/drivers/hid/hid-vivaldi-common.c
+++ b/drivers/hid/hid-vivaldi-common.c
@@ -138,4 +138,5 @@ const struct attribute_group *vivaldi_attribute_groups[] = {
};
EXPORT_SYMBOL_GPL(vivaldi_attribute_groups);
+MODULE_DESCRIPTION("Helpers for ChromeOS HID Vivaldi keyboards");
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-waltop.c b/drivers/hid/hid-waltop.c
index bc355b1a5b30..1e590c61eef5 100644
--- a/drivers/hid/hid-waltop.c
+++ b/drivers/hid/hid-waltop.c
@@ -742,4 +742,5 @@ static struct hid_driver waltop_driver = {
};
module_hid_driver(waltop_driver);
+MODULE_DESCRIPTION("HID driver for Waltop devices not fully compliant with HID standard");
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-winwing.c b/drivers/hid/hid-winwing.c
index 0e224d1a6466..10a5d87ccb96 100644
--- a/drivers/hid/hid-winwing.c
+++ b/drivers/hid/hid-winwing.c
@@ -223,4 +223,5 @@ static struct hid_driver winwing_driver = {
};
module_hid_driver(winwing_driver);
+MODULE_DESCRIPTION("HID driver for WinWing Orion 2 throttle");
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-xinmo.c b/drivers/hid/hid-xinmo.c
index 5c2860a9d8c9..66b8bfb6e647 100644
--- a/drivers/hid/hid-xinmo.c
+++ b/drivers/hid/hid-xinmo.c
@@ -56,4 +56,5 @@ static struct hid_driver xinmo_driver = {
};
module_hid_driver(xinmo_driver);
+MODULE_DESCRIPTION("HID driver for Xin-Mo devices");
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-zpff.c b/drivers/hid/hid-zpff.c
index 3abaca045869..aacf7f137b18 100644
--- a/drivers/hid/hid-zpff.c
+++ b/drivers/hid/hid-zpff.c
@@ -138,4 +138,5 @@ static struct hid_driver zp_driver = {
};
module_hid_driver(zp_driver);
+MODULE_DESCRIPTION("Force feedback support for Zeroplus based devices");
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-zydacron.c b/drivers/hid/hid-zydacron.c
index 0d003caee113..998a3db19c1f 100644
--- a/drivers/hid/hid-zydacron.c
+++ b/drivers/hid/hid-zydacron.c
@@ -205,4 +205,5 @@ static struct hid_driver zc_driver = {
};
module_hid_driver(zc_driver);
+MODULE_DESCRIPTION("HID driver for zydacron remote control");
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index 2bc762d31ac7..716294e40e8a 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -140,7 +140,7 @@ static ssize_t hidraw_send_report(struct file *file, const char __user *buffer,
if ((report_type == HID_OUTPUT_REPORT) &&
!(dev->quirks & HID_QUIRK_NO_OUTPUT_REPORTS_ON_INTR_EP)) {
- ret = hid_hw_output_report(dev, buf, count);
+ ret = __hid_hw_output_report(dev, buf, count, (u64)(long)file, false);
/*
* compatibility with old implementation of USB-HID and I2C-HID:
* if the device does not support receiving output reports,
@@ -150,8 +150,8 @@ static ssize_t hidraw_send_report(struct file *file, const char __user *buffer,
goto out_free;
}
- ret = hid_hw_raw_request(dev, buf[0], buf, count, report_type,
- HID_REQ_SET_REPORT);
+ ret = __hid_hw_raw_request(dev, buf[0], buf, count, report_type,
+ HID_REQ_SET_REPORT, (u64)(long)file, false);
out_free:
kfree(buf);
@@ -227,8 +227,8 @@ static ssize_t hidraw_get_report(struct file *file, char __user *buffer, size_t
goto out_free;
}
- ret = hid_hw_raw_request(dev, report_number, buf, count, report_type,
- HID_REQ_GET_REPORT);
+ ret = __hid_hw_raw_request(dev, report_number, buf, count, report_type,
+ HID_REQ_GET_REPORT, (u64)(long)file, false);
if (ret < 0)
goto out_free;
diff --git a/drivers/hid/i2c-hid/i2c-hid-of-elan.c b/drivers/hid/i2c-hid/i2c-hid-of-elan.c
index 5b91fb106cfc..091e37933225 100644
--- a/drivers/hid/i2c-hid/i2c-hid-of-elan.c
+++ b/drivers/hid/i2c-hid/i2c-hid-of-elan.c
@@ -31,6 +31,7 @@ struct i2c_hid_of_elan {
struct regulator *vcc33;
struct regulator *vccio;
struct gpio_desc *reset_gpio;
+ bool no_reset_on_power_off;
const struct elan_i2c_hid_chip_data *chip_data;
};
@@ -40,17 +41,17 @@ static int elan_i2c_hid_power_up(struct i2chid_ops *ops)
container_of(ops, struct i2c_hid_of_elan, ops);
int ret;
+ gpiod_set_value_cansleep(ihid_elan->reset_gpio, 1);
+
if (ihid_elan->vcc33) {
ret = regulator_enable(ihid_elan->vcc33);
if (ret)
- return ret;
+ goto err_deassert_reset;
}
ret = regulator_enable(ihid_elan->vccio);
- if (ret) {
- regulator_disable(ihid_elan->vcc33);
- return ret;
- }
+ if (ret)
+ goto err_disable_vcc33;
if (ihid_elan->chip_data->post_power_delay_ms)
msleep(ihid_elan->chip_data->post_power_delay_ms);
@@ -60,6 +61,15 @@ static int elan_i2c_hid_power_up(struct i2chid_ops *ops)
msleep(ihid_elan->chip_data->post_gpio_reset_on_delay_ms);
return 0;
+
+err_disable_vcc33:
+ if (ihid_elan->vcc33)
+ regulator_disable(ihid_elan->vcc33);
+err_deassert_reset:
+ if (ihid_elan->no_reset_on_power_off)
+ gpiod_set_value_cansleep(ihid_elan->reset_gpio, 0);
+
+ return ret;
}
static void elan_i2c_hid_power_down(struct i2chid_ops *ops)
@@ -67,7 +77,14 @@ static void elan_i2c_hid_power_down(struct i2chid_ops *ops)
struct i2c_hid_of_elan *ihid_elan =
container_of(ops, struct i2c_hid_of_elan, ops);
- gpiod_set_value_cansleep(ihid_elan->reset_gpio, 1);
+ /*
+ * Do not assert reset when the hardware allows for it to remain
+ * deasserted regardless of the state of the (shared) power supply to
+ * avoid wasting power when the supply is left on.
+ */
+ if (!ihid_elan->no_reset_on_power_off)
+ gpiod_set_value_cansleep(ihid_elan->reset_gpio, 1);
+
if (ihid_elan->chip_data->post_gpio_reset_off_delay_ms)
msleep(ihid_elan->chip_data->post_gpio_reset_off_delay_ms);
@@ -79,6 +96,7 @@ static void elan_i2c_hid_power_down(struct i2chid_ops *ops)
static int i2c_hid_of_elan_probe(struct i2c_client *client)
{
struct i2c_hid_of_elan *ihid_elan;
+ int ret;
ihid_elan = devm_kzalloc(&client->dev, sizeof(*ihid_elan), GFP_KERNEL);
if (!ihid_elan)
@@ -93,21 +111,38 @@ static int i2c_hid_of_elan_probe(struct i2c_client *client)
if (IS_ERR(ihid_elan->reset_gpio))
return PTR_ERR(ihid_elan->reset_gpio);
+ ihid_elan->no_reset_on_power_off = of_property_read_bool(client->dev.of_node,
+ "no-reset-on-power-off");
+
ihid_elan->vccio = devm_regulator_get(&client->dev, "vccio");
- if (IS_ERR(ihid_elan->vccio))
- return PTR_ERR(ihid_elan->vccio);
+ if (IS_ERR(ihid_elan->vccio)) {
+ ret = PTR_ERR(ihid_elan->vccio);
+ goto err_deassert_reset;
+ }
ihid_elan->chip_data = device_get_match_data(&client->dev);
if (ihid_elan->chip_data->main_supply_name) {
ihid_elan->vcc33 = devm_regulator_get(&client->dev,
ihid_elan->chip_data->main_supply_name);
- if (IS_ERR(ihid_elan->vcc33))
- return PTR_ERR(ihid_elan->vcc33);
+ if (IS_ERR(ihid_elan->vcc33)) {
+ ret = PTR_ERR(ihid_elan->vcc33);
+ goto err_deassert_reset;
+ }
}
- return i2c_hid_core_probe(client, &ihid_elan->ops,
- ihid_elan->chip_data->hid_descriptor_address, 0);
+ ret = i2c_hid_core_probe(client, &ihid_elan->ops,
+ ihid_elan->chip_data->hid_descriptor_address, 0);
+ if (ret)
+ goto err_deassert_reset;
+
+ return 0;
+
+err_deassert_reset:
+ if (ihid_elan->no_reset_on_power_off)
+ gpiod_set_value_cansleep(ihid_elan->reset_gpio, 0);
+
+ return ret;
}
static const struct elan_i2c_hid_chip_data elan_ekth6915_chip_data = {
diff --git a/drivers/hid/intel-ish-hid/ishtp/bus.c b/drivers/hid/intel-ish-hid/ishtp/bus.c
index 03d5601ce807..cc76b295b632 100644
--- a/drivers/hid/intel-ish-hid/ishtp/bus.c
+++ b/drivers/hid/intel-ish-hid/ishtp/bus.c
@@ -844,6 +844,7 @@ EXPORT_SYMBOL(ishtp_device);
/**
* ishtp_wait_resume() - Wait for IPC resume
+ * @dev: ishtp device
*
* Wait for IPC resume
*
@@ -931,4 +932,5 @@ static void __exit ishtp_bus_unregister(void)
module_init(ishtp_bus_register);
module_exit(ishtp_bus_unregister);
+MODULE_DESCRIPTION("ISHTP bus driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/intel-ish-hid/ishtp/loader.c b/drivers/hid/intel-ish-hid/ishtp/loader.c
index 993f8b390e57..fcca070bdecb 100644
--- a/drivers/hid/intel-ish-hid/ishtp/loader.c
+++ b/drivers/hid/intel-ish-hid/ishtp/loader.c
@@ -84,8 +84,8 @@ static int loader_write_message(struct ishtp_device *dev, void *buf, int len)
static int loader_xfer_cmd(struct ishtp_device *dev, void *req, int req_len,
void *resp, int resp_len)
{
- struct loader_msg_header *req_hdr = req;
- struct loader_msg_header *resp_hdr = resp;
+ union loader_msg_header req_hdr;
+ union loader_msg_header resp_hdr;
struct device *devc = dev->devc;
int rv;
@@ -93,34 +93,37 @@ static int loader_xfer_cmd(struct ishtp_device *dev, void *req, int req_len,
dev->fw_loader_rx_size = resp_len;
rv = loader_write_message(dev, req, req_len);
+ req_hdr.val32 = le32_to_cpup(req);
+
if (rv < 0) {
- dev_err(devc, "write cmd %u failed:%d\n", req_hdr->command, rv);
+ dev_err(devc, "write cmd %u failed:%d\n", req_hdr.command, rv);
return rv;
}
/* Wait the ACK */
wait_event_interruptible_timeout(dev->wait_loader_recvd_msg, dev->fw_loader_received,
ISHTP_LOADER_TIMEOUT);
+ resp_hdr.val32 = le32_to_cpup(resp);
dev->fw_loader_rx_size = 0;
dev->fw_loader_rx_buf = NULL;
if (!dev->fw_loader_received) {
- dev_err(devc, "wait response of cmd %u timeout\n", req_hdr->command);
+ dev_err(devc, "wait response of cmd %u timeout\n", req_hdr.command);
return -ETIMEDOUT;
}
- if (!resp_hdr->is_response) {
- dev_err(devc, "not a response for %u\n", req_hdr->command);
+ if (!resp_hdr.is_response) {
+ dev_err(devc, "not a response for %u\n", req_hdr.command);
return -EBADMSG;
}
- if (req_hdr->command != resp_hdr->command) {
- dev_err(devc, "unexpected cmd response %u:%u\n", req_hdr->command,
- resp_hdr->command);
+ if (req_hdr.command != resp_hdr.command) {
+ dev_err(devc, "unexpected cmd response %u:%u\n", req_hdr.command,
+ resp_hdr.command);
return -EBADMSG;
}
- if (resp_hdr->status) {
- dev_err(devc, "cmd %u failed %u\n", req_hdr->command, resp_hdr->status);
+ if (resp_hdr.status) {
+ dev_err(devc, "cmd %u failed %u\n", req_hdr.command, resp_hdr.status);
return -EIO;
}
@@ -138,12 +141,13 @@ static void release_dma_bufs(struct ishtp_device *dev,
struct loader_xfer_dma_fragment *fragment,
void **dma_bufs, u32 fragment_size)
{
+ dma_addr_t dma_addr;
int i;
for (i = 0; i < FRAGMENT_MAX_NUM; i++) {
if (dma_bufs[i]) {
- dma_free_coherent(dev->devc, fragment_size, dma_bufs[i],
- fragment->fragment_tbl[i].ddr_adrs);
+ dma_addr = le64_to_cpu(fragment->fragment_tbl[i].ddr_adrs);
+ dma_free_coherent(dev->devc, fragment_size, dma_bufs[i], dma_addr);
dma_bufs[i] = NULL;
}
}
@@ -156,29 +160,33 @@ static void release_dma_bufs(struct ishtp_device *dev,
* @fragment: The ISHTP firmware fragment descriptor
* @dma_bufs: The array of DMA fragment buffers
* @fragment_size: The size of a single DMA fragment
+ * @fragment_count: Number of fragments
*
* Return: 0 on success, negative error code on failure
*/
static int prepare_dma_bufs(struct ishtp_device *dev,
const struct firmware *ish_fw,
struct loader_xfer_dma_fragment *fragment,
- void **dma_bufs, u32 fragment_size)
+ void **dma_bufs, u32 fragment_size, u32 fragment_count)
{
+ dma_addr_t dma_addr;
u32 offset = 0;
+ u32 length;
int i;
- for (i = 0; i < fragment->fragment_cnt && offset < ish_fw->size; i++) {
- dma_bufs[i] = dma_alloc_coherent(dev->devc, fragment_size,
- &fragment->fragment_tbl[i].ddr_adrs, GFP_KERNEL);
+ for (i = 0; i < fragment_count && offset < ish_fw->size; i++) {
+ dma_bufs[i] = dma_alloc_coherent(dev->devc, fragment_size, &dma_addr, GFP_KERNEL);
if (!dma_bufs[i])
return -ENOMEM;
- fragment->fragment_tbl[i].length = clamp(ish_fw->size - offset, 0, fragment_size);
- fragment->fragment_tbl[i].fw_off = offset;
- memcpy(dma_bufs[i], ish_fw->data + offset, fragment->fragment_tbl[i].length);
+ fragment->fragment_tbl[i].ddr_adrs = cpu_to_le64(dma_addr);
+ length = clamp(ish_fw->size - offset, 0, fragment_size);
+ fragment->fragment_tbl[i].length = cpu_to_le32(length);
+ fragment->fragment_tbl[i].fw_off = cpu_to_le32(offset);
+ memcpy(dma_bufs[i], ish_fw->data + offset, length);
clflush_cache_range(dma_bufs[i], fragment_size);
- offset += fragment->fragment_tbl[i].length;
+ offset += length;
}
return 0;
@@ -206,17 +214,17 @@ void ishtp_loader_work(struct work_struct *work)
{
DEFINE_RAW_FLEX(struct loader_xfer_dma_fragment, fragment, fragment_tbl, FRAGMENT_MAX_NUM);
struct ishtp_device *dev = container_of(work, struct ishtp_device, work_fw_loader);
- struct loader_xfer_query query = {
- .header.command = LOADER_CMD_XFER_QUERY,
- };
- struct loader_start start = {
- .header.command = LOADER_CMD_START,
- };
+ union loader_msg_header query_hdr = { .command = LOADER_CMD_XFER_QUERY, };
+ union loader_msg_header start_hdr = { .command = LOADER_CMD_START, };
+ union loader_msg_header fragment_hdr = { .command = LOADER_CMD_XFER_FRAGMENT, };
+ struct loader_xfer_query query = { .header = cpu_to_le32(query_hdr.val32), };
+ struct loader_start start = { .header = cpu_to_le32(start_hdr.val32), };
union loader_recv_message recv_msg;
char *filename = dev->driver_data->fw_filename;
const struct firmware *ish_fw;
void *dma_bufs[FRAGMENT_MAX_NUM] = {};
u32 fragment_size;
+ u32 fragment_count;
int retry = ISHTP_LOADER_RETRY_TIMES;
int rv;
@@ -226,23 +234,24 @@ void ishtp_loader_work(struct work_struct *work)
return;
}
- fragment->fragment.header.command = LOADER_CMD_XFER_FRAGMENT;
- fragment->fragment.xfer_mode = LOADER_XFER_MODE_DMA;
- fragment->fragment.is_last = 1;
- fragment->fragment.size = ish_fw->size;
+ fragment->fragment.header = cpu_to_le32(fragment_hdr.val32);
+ fragment->fragment.xfer_mode = cpu_to_le32(LOADER_XFER_MODE_DMA);
+ fragment->fragment.is_last = cpu_to_le32(1);
+ fragment->fragment.size = cpu_to_le32(ish_fw->size);
/* Calculate the size of a single DMA fragment */
fragment_size = PFN_ALIGN(DIV_ROUND_UP(ish_fw->size, FRAGMENT_MAX_NUM));
/* Calculate the count of DMA fragments */
- fragment->fragment_cnt = DIV_ROUND_UP(ish_fw->size, fragment_size);
+ fragment_count = DIV_ROUND_UP(ish_fw->size, fragment_size);
+ fragment->fragment_cnt = cpu_to_le32(fragment_count);
- rv = prepare_dma_bufs(dev, ish_fw, fragment, dma_bufs, fragment_size);
+ rv = prepare_dma_bufs(dev, ish_fw, fragment, dma_bufs, fragment_size, fragment_count);
if (rv) {
dev_err(dev->devc, "prepare DMA buffer failed.\n");
goto out;
}
do {
- query.image_size = ish_fw->size;
+ query.image_size = cpu_to_le32(ish_fw->size);
rv = loader_xfer_cmd(dev, &query, sizeof(query), recv_msg.raw_data,
sizeof(struct loader_xfer_query_ack));
if (rv)
@@ -255,7 +264,7 @@ void ishtp_loader_work(struct work_struct *work)
recv_msg.query_ack.version_build);
rv = loader_xfer_cmd(dev, fragment,
- struct_size(fragment, fragment_tbl, fragment->fragment_cnt),
+ struct_size(fragment, fragment_tbl, fragment_count),
recv_msg.raw_data, sizeof(struct loader_xfer_fragment_ack));
if (rv)
continue; /* try again if failed */
diff --git a/drivers/hid/intel-ish-hid/ishtp/loader.h b/drivers/hid/intel-ish-hid/ishtp/loader.h
index 7aa45ebc3f7b..308b96085a4d 100644
--- a/drivers/hid/intel-ish-hid/ishtp/loader.h
+++ b/drivers/hid/intel-ish-hid/ishtp/loader.h
@@ -30,19 +30,23 @@ struct work_struct;
#define LOADER_XFER_MODE_DMA BIT(0)
/**
- * struct loader_msg_header - ISHTP firmware loader message header
+ * union loader_msg_header - ISHTP firmware loader message header
* @command: Command type
* @is_response: Indicates if the message is a response
* @has_next: Indicates if there is a next message
* @reserved: Reserved for future use
* @status: Status of the message
- */
-struct loader_msg_header {
- __le32 command:7;
- __le32 is_response:1;
- __le32 has_next:1;
- __le32 reserved:15;
- __le32 status:8;
+ * @val32: entire header as a 32-bit value
+ */
+union loader_msg_header {
+ struct {
+ __u32 command:7;
+ __u32 is_response:1;
+ __u32 has_next:1;
+ __u32 reserved:15;
+ __u32 status:8;
+ };
+ __u32 val32;
};
/**
@@ -51,7 +55,7 @@ struct loader_msg_header {
* @image_size: Size of the image
*/
struct loader_xfer_query {
- struct loader_msg_header header;
+ __le32 header;
__le32 image_size;
};
@@ -103,7 +107,7 @@ struct loader_capability {
* @capability: Loader capability
*/
struct loader_xfer_query_ack {
- struct loader_msg_header header;
+ __le32 header;
__le16 version_major;
__le16 version_minor;
__le16 version_hotfix;
@@ -122,7 +126,7 @@ struct loader_xfer_query_ack {
* @is_last: Is last
*/
struct loader_xfer_fragment {
- struct loader_msg_header header;
+ __le32 header;
__le32 xfer_mode;
__le32 offset;
__le32 size;
@@ -134,7 +138,7 @@ struct loader_xfer_fragment {
* @header: Header of the message
*/
struct loader_xfer_fragment_ack {
- struct loader_msg_header header;
+ __le32 header;
};
/**
@@ -170,7 +174,7 @@ struct loader_xfer_dma_fragment {
* @header: Header of the message
*/
struct loader_start {
- struct loader_msg_header header;
+ __le32 header;
};
/**
@@ -178,10 +182,11 @@ struct loader_start {
* @header: Header of the message
*/
struct loader_start_ack {
- struct loader_msg_header header;
+ __le32 header;
};
union loader_recv_message {
+ __le32 header;
struct loader_xfer_query_ack query_ack;
struct loader_xfer_fragment_ack fragment_ack;
struct loader_start_ack start_ack;
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index a90ed2ceae84..cb687ea7325c 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -19,6 +19,7 @@
#include <linux/list.h>
#include <linux/mm.h>
#include <linux/mutex.h>
+#include <linux/property.h>
#include <linux/spinlock.h>
#include <asm/unaligned.h>
#include <asm/byteorder.h>
@@ -1374,6 +1375,7 @@ static int usbhid_probe(struct usb_interface *intf, const struct usb_device_id *
hid->hiddev_report_event = hiddev_report_event;
#endif
hid->dev.parent = &intf->dev;
+ device_set_node(&hid->dev, dev_fwnode(&intf->dev));
hid->bus = BUS_USB;
hid->vendor = le16_to_cpu(dev->descriptor.idVendor);
hid->product = le16_to_cpu(dev->descriptor.idProduct);
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index a8ad728354cb..e0d676c74f14 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -45,8 +45,8 @@ int hv_init(void)
* This involves a hypercall.
*/
int hv_post_message(union hv_connection_id connection_id,
- enum hv_message_type message_type,
- void *payload, size_t payload_size)
+ enum hv_message_type message_type,
+ void *payload, size_t payload_size)
{
struct hv_input_post_message *aligned_msg;
unsigned long flags;
@@ -86,7 +86,7 @@ int hv_post_message(union hv_connection_id connection_id,
status = HV_STATUS_INVALID_PARAMETER;
} else {
status = hv_do_hypercall(HVCALL_POST_MESSAGE,
- aligned_msg, NULL);
+ aligned_msg, NULL);
}
local_irq_restore(flags);
@@ -111,7 +111,7 @@ int hv_synic_alloc(void)
hv_context.hv_numa_map = kcalloc(nr_node_ids, sizeof(struct cpumask),
GFP_KERNEL);
- if (hv_context.hv_numa_map == NULL) {
+ if (!hv_context.hv_numa_map) {
pr_err("Unable to allocate NUMA map\n");
goto err;
}
@@ -120,11 +120,11 @@ int hv_synic_alloc(void)
hv_cpu = per_cpu_ptr(hv_context.cpu_context, cpu);
tasklet_init(&hv_cpu->msg_dpc,
- vmbus_on_msg_dpc, (unsigned long) hv_cpu);
+ vmbus_on_msg_dpc, (unsigned long)hv_cpu);
if (ms_hyperv.paravisor_present && hv_isolation_type_tdx()) {
hv_cpu->post_msg_page = (void *)get_zeroed_page(GFP_ATOMIC);
- if (hv_cpu->post_msg_page == NULL) {
+ if (!hv_cpu->post_msg_page) {
pr_err("Unable to allocate post msg page\n");
goto err;
}
@@ -147,14 +147,14 @@ int hv_synic_alloc(void)
if (!ms_hyperv.paravisor_present && !hv_root_partition) {
hv_cpu->synic_message_page =
(void *)get_zeroed_page(GFP_ATOMIC);
- if (hv_cpu->synic_message_page == NULL) {
+ if (!hv_cpu->synic_message_page) {
pr_err("Unable to allocate SYNIC message page\n");
goto err;
}
hv_cpu->synic_event_page =
(void *)get_zeroed_page(GFP_ATOMIC);
- if (hv_cpu->synic_event_page == NULL) {
+ if (!hv_cpu->synic_event_page) {
pr_err("Unable to allocate SYNIC event page\n");
free_page((unsigned long)hv_cpu->synic_message_page);
@@ -203,14 +203,13 @@ err:
return ret;
}
-
void hv_synic_free(void)
{
int cpu, ret;
for_each_present_cpu(cpu) {
- struct hv_per_cpu_context *hv_cpu
- = per_cpu_ptr(hv_context.cpu_context, cpu);
+ struct hv_per_cpu_context *hv_cpu =
+ per_cpu_ptr(hv_context.cpu_context, cpu);
/* It's better to leak the page if the encryption fails. */
if (ms_hyperv.paravisor_present && hv_isolation_type_tdx()) {
@@ -262,8 +261,8 @@ void hv_synic_free(void)
*/
void hv_synic_enable_regs(unsigned int cpu)
{
- struct hv_per_cpu_context *hv_cpu
- = per_cpu_ptr(hv_context.cpu_context, cpu);
+ struct hv_per_cpu_context *hv_cpu =
+ per_cpu_ptr(hv_context.cpu_context, cpu);
union hv_synic_simp simp;
union hv_synic_siefp siefp;
union hv_synic_sint shared_sint;
@@ -277,8 +276,8 @@ void hv_synic_enable_regs(unsigned int cpu)
/* Mask out vTOM bit. ioremap_cache() maps decrypted */
u64 base = (simp.base_simp_gpa << HV_HYP_PAGE_SHIFT) &
~ms_hyperv.shared_gpa_boundary;
- hv_cpu->synic_message_page
- = (void *)ioremap_cache(base, HV_HYP_PAGE_SIZE);
+ hv_cpu->synic_message_page =
+ (void *)ioremap_cache(base, HV_HYP_PAGE_SIZE);
if (!hv_cpu->synic_message_page)
pr_err("Fail to map synic message page.\n");
} else {
@@ -296,8 +295,8 @@ void hv_synic_enable_regs(unsigned int cpu)
/* Mask out vTOM bit. ioremap_cache() maps decrypted */
u64 base = (siefp.base_siefp_gpa << HV_HYP_PAGE_SHIFT) &
~ms_hyperv.shared_gpa_boundary;
- hv_cpu->synic_event_page
- = (void *)ioremap_cache(base, HV_HYP_PAGE_SIZE);
+ hv_cpu->synic_event_page =
+ (void *)ioremap_cache(base, HV_HYP_PAGE_SIZE);
if (!hv_cpu->synic_event_page)
pr_err("Fail to map synic event page.\n");
} else {
@@ -348,8 +347,8 @@ int hv_synic_init(unsigned int cpu)
*/
void hv_synic_disable_regs(unsigned int cpu)
{
- struct hv_per_cpu_context *hv_cpu
- = per_cpu_ptr(hv_context.cpu_context, cpu);
+ struct hv_per_cpu_context *hv_cpu =
+ per_cpu_ptr(hv_context.cpu_context, cpu);
union hv_synic_sint shared_sint;
union hv_synic_simp simp;
union hv_synic_siefp siefp;
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index e000fa3b9f97..0e7427c2baf5 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -25,6 +25,7 @@
#include <linux/notifier.h>
#include <linux/percpu_counter.h>
#include <linux/page_reporting.h>
+#include <linux/sizes.h>
#include <linux/hyperv.h>
#include <asm/hyperv-tlfs.h>
@@ -41,8 +42,6 @@
* Begin protocol definitions.
*/
-
-
/*
* Protocol versions. The low word is the minor version, the high word the major
* version.
@@ -71,8 +70,6 @@ enum {
DYNMEM_PROTOCOL_VERSION_CURRENT = DYNMEM_PROTOCOL_VERSION_WIN10
};
-
-
/*
* Message Types
*/
@@ -101,7 +98,6 @@ enum dm_message_type {
DM_VERSION_1_MAX = 12
};
-
/*
* Structures defining the dynamic memory management
* protocol.
@@ -115,7 +111,6 @@ union dm_version {
__u32 version;
} __packed;
-
union dm_caps {
struct {
__u64 balloon:1;
@@ -148,8 +143,6 @@ union dm_mem_page_range {
__u64 page_range;
} __packed;
-
-
/*
* The header for all dynamic memory messages:
*
@@ -174,7 +167,6 @@ struct dm_message {
__u8 data[]; /* enclosed message */
} __packed;
-
/*
* Specific message types supporting the dynamic memory protocol.
*/
@@ -271,7 +263,6 @@ struct dm_status {
__u32 io_diff;
} __packed;
-
/*
* Message to ask the guest to allocate memory - balloon up message.
* This message is sent from the host to the guest. The guest may not be
@@ -286,14 +277,13 @@ struct dm_balloon {
__u32 reservedz;
} __packed;
-
/*
* Balloon response message; this message is sent from the guest
* to the host in response to the balloon message.
*
* reservedz: Reserved; must be set to zero.
* more_pages: If FALSE, this is the last message of the transaction.
- * if TRUE there will atleast one more message from the guest.
+ * if TRUE there will be at least one more message from the guest.
*
* range_count: The number of ranges in the range array.
*
@@ -314,7 +304,7 @@ struct dm_balloon_response {
* to the guest to give guest more memory.
*
* more_pages: If FALSE, this is the last message of the transaction.
- * if TRUE there will atleast one more message from the guest.
+ * if TRUE there will be at least one more message from the guest.
*
* reservedz: Reserved; must be set to zero.
*
@@ -342,7 +332,6 @@ struct dm_unballoon_response {
struct dm_header hdr;
} __packed;
-
/*
* Hot add request message. Message sent from the host to the guest.
*
@@ -390,7 +379,6 @@ enum dm_info_type {
MAX_INFO_TYPE
};
-
/*
* Header for the information message.
*/
@@ -425,11 +413,11 @@ struct dm_info_msg {
* The range start_pfn : end_pfn specifies the range
* that the host has asked us to hot add. The range
* start_pfn : ha_end_pfn specifies the range that we have
- * currently hot added. We hot add in multiples of 128M
- * chunks; it is possible that we may not be able to bring
- * online all the pages in the region. The range
+ * currently hot added. We hot add in chunks equal to the
+ * memory block size; it is possible that we may not be able
+ * to bring online all the pages in the region. The range
* covered_start_pfn:covered_end_pfn defines the pages that can
- * be brough online.
+ * be brought online.
*/
struct hv_hotadd_state {
@@ -480,10 +468,10 @@ static unsigned long last_post_time;
static int hv_hypercall_multi_failure;
-module_param(hot_add, bool, (S_IRUGO | S_IWUSR));
+module_param(hot_add, bool, 0644);
MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add");
-module_param(pressure_report_delay, uint, (S_IRUGO | S_IWUSR));
+module_param(pressure_report_delay, uint, 0644);
MODULE_PARM_DESC(pressure_report_delay, "Delay in secs in reporting pressure");
static atomic_t trans_id = ATOMIC_INIT(0);
@@ -502,11 +490,13 @@ enum hv_dm_state {
DM_INIT_ERROR
};
-
static __u8 recv_buffer[HV_HYP_PAGE_SIZE];
static __u8 balloon_up_send_buffer[HV_HYP_PAGE_SIZE];
+
+static unsigned long ha_pages_in_chunk;
+#define HA_BYTES_IN_CHUNK (ha_pages_in_chunk << PAGE_SHIFT)
+
#define PAGES_IN_2M (2 * 1024 * 1024 / PAGE_SIZE)
-#define HA_CHUNK (128 * 1024 * 1024 / PAGE_SIZE)
struct hv_dynmem_device {
struct hv_device *dev;
@@ -595,12 +585,12 @@ static inline bool has_pfn_is_backed(struct hv_hotadd_state *has,
struct hv_hotadd_gap *gap;
/* The page is not backed. */
- if ((pfn < has->covered_start_pfn) || (pfn >= has->covered_end_pfn))
+ if (pfn < has->covered_start_pfn || pfn >= has->covered_end_pfn)
return false;
/* Check for gaps. */
list_for_each_entry(gap, &has->gap_list, list) {
- if ((pfn >= gap->start_pfn) && (pfn < gap->end_pfn))
+ if (pfn >= gap->start_pfn && pfn < gap->end_pfn)
return false;
}
@@ -724,28 +714,21 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size,
unsigned long processed_pfn;
unsigned long total_pfn = pfn_count;
- for (i = 0; i < (size/HA_CHUNK); i++) {
- start_pfn = start + (i * HA_CHUNK);
+ for (i = 0; i < (size/ha_pages_in_chunk); i++) {
+ start_pfn = start + (i * ha_pages_in_chunk);
scoped_guard(spinlock_irqsave, &dm_device.ha_lock) {
- has->ha_end_pfn += HA_CHUNK;
-
- if (total_pfn > HA_CHUNK) {
- processed_pfn = HA_CHUNK;
- total_pfn -= HA_CHUNK;
- } else {
- processed_pfn = total_pfn;
- total_pfn = 0;
- }
-
- has->covered_end_pfn += processed_pfn;
+ has->ha_end_pfn += ha_pages_in_chunk;
+ processed_pfn = umin(total_pfn, ha_pages_in_chunk);
+ total_pfn -= processed_pfn;
+ has->covered_end_pfn += processed_pfn;
}
reinit_completion(&dm_device.ol_waitevent);
nid = memory_add_physaddr_to_nid(PFN_PHYS(start_pfn));
ret = add_memory(nid, PFN_PHYS((start_pfn)),
- (HA_CHUNK << PAGE_SHIFT), MHP_MERGE_RESOURCE);
+ HA_BYTES_IN_CHUNK, MHP_MERGE_RESOURCE);
if (ret) {
pr_err("hot_add memory failed error is %d\n", ret);
@@ -760,7 +743,7 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size,
do_hot_add = false;
}
scoped_guard(spinlock_irqsave, &dm_device.ha_lock) {
- has->ha_end_pfn -= HA_CHUNK;
+ has->ha_end_pfn -= ha_pages_in_chunk;
has->covered_end_pfn -= processed_pfn;
}
break;
@@ -787,8 +770,8 @@ static void hv_online_page(struct page *pg, unsigned int order)
guard(spinlock_irqsave)(&dm_device.ha_lock);
list_for_each_entry(has, &dm_device.ha_region_list, list) {
/* The page belongs to a different HAS. */
- if ((pfn < has->start_pfn) ||
- (pfn + (1UL << order) > has->end_pfn))
+ if (pfn < has->start_pfn ||
+ (pfn + (1UL << order) > has->end_pfn))
continue;
hv_bring_pgs_online(has, pfn, 1UL << order);
@@ -800,7 +783,7 @@ static int pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
{
struct hv_hotadd_state *has;
struct hv_hotadd_gap *gap;
- unsigned long residual, new_inc;
+ unsigned long residual;
int ret = 0;
guard(spinlock_irqsave)(&dm_device.ha_lock);
@@ -836,15 +819,9 @@ static int pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
* our current limit; extend it.
*/
if ((start_pfn + pfn_cnt) > has->end_pfn) {
+ /* Extend the region by multiples of ha_pages_in_chunk */
residual = (start_pfn + pfn_cnt - has->end_pfn);
- /*
- * Extend the region by multiples of HA_CHUNK.
- */
- new_inc = (residual / HA_CHUNK) * HA_CHUNK;
- if (residual % HA_CHUNK)
- new_inc += HA_CHUNK;
-
- has->end_pfn += new_inc;
+ has->end_pfn += ALIGN(residual, ha_pages_in_chunk);
}
ret = 1;
@@ -855,7 +832,7 @@ static int pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
}
static unsigned long handle_pg_range(unsigned long pg_start,
- unsigned long pg_count)
+ unsigned long pg_count)
{
unsigned long start_pfn = pg_start;
unsigned long pfn_cnt = pg_count;
@@ -866,7 +843,7 @@ static unsigned long handle_pg_range(unsigned long pg_start,
unsigned long res = 0, flags;
pr_debug("Hot adding %lu pages starting at pfn 0x%lx.\n", pg_count,
- pg_start);
+ pg_start);
spin_lock_irqsave(&dm_device.ha_lock, flags);
list_for_each_entry(has, &dm_device.ha_region_list, list) {
@@ -902,22 +879,19 @@ static unsigned long handle_pg_range(unsigned long pg_start,
if (start_pfn > has->start_pfn &&
online_section_nr(pfn_to_section_nr(start_pfn)))
hv_bring_pgs_online(has, start_pfn, pgs_ol);
-
}
- if ((has->ha_end_pfn < has->end_pfn) && (pfn_cnt > 0)) {
+ if (has->ha_end_pfn < has->end_pfn && pfn_cnt > 0) {
/*
* We have some residual hot add range
* that needs to be hot added; hot add
* it now. Hot add a multiple of
- * HA_CHUNK that fully covers the pages
+ * ha_pages_in_chunk that fully covers the pages
* we have.
*/
size = (has->end_pfn - has->ha_end_pfn);
if (pfn_cnt <= size) {
- size = ((pfn_cnt / HA_CHUNK) * HA_CHUNK);
- if (pfn_cnt % HA_CHUNK)
- size += HA_CHUNK;
+ size = ALIGN(pfn_cnt, ha_pages_in_chunk);
} else {
pfn_cnt = size;
}
@@ -1010,10 +984,7 @@ static void hot_add_req(struct work_struct *dummy)
rg_start = dm->ha_wrk.ha_region_range.finfo.start_page;
rg_sz = dm->ha_wrk.ha_region_range.finfo.page_cnt;
- if ((rg_start == 0) && (!dm->host_specified_ha_region)) {
- unsigned long region_size;
- unsigned long region_start;
-
+ if (rg_start == 0 && !dm->host_specified_ha_region) {
/*
* The host has not specified the hot-add region.
* Based on the hot-add page range being specified,
@@ -1021,19 +992,13 @@ static void hot_add_req(struct work_struct *dummy)
* that need to be hot-added while ensuring the alignment
* and size requirements of Linux as it relates to hot-add.
*/
- region_size = (pfn_cnt / HA_CHUNK) * HA_CHUNK;
- if (pfn_cnt % HA_CHUNK)
- region_size += HA_CHUNK;
-
- region_start = (pg_start / HA_CHUNK) * HA_CHUNK;
-
- rg_start = region_start;
- rg_sz = region_size;
+ rg_start = ALIGN_DOWN(pg_start, ha_pages_in_chunk);
+ rg_sz = ALIGN(pfn_cnt, ha_pages_in_chunk);
}
if (do_hot_add)
resp.page_count = process_hot_add(pg_start, pfn_cnt,
- rg_start, rg_sz);
+ rg_start, rg_sz);
dm->num_pages_added += resp.page_count;
#endif
@@ -1211,11 +1176,10 @@ static void post_status(struct hv_dynmem_device *dm)
sizeof(struct dm_status),
(unsigned long)NULL,
VM_PKT_DATA_INBAND, 0);
-
}
static void free_balloon_pages(struct hv_dynmem_device *dm,
- union dm_mem_page_range *range_array)
+ union dm_mem_page_range *range_array)
{
int num_pages = range_array->finfo.page_cnt;
__u64 start_frame = range_array->finfo.start_page;
@@ -1231,8 +1195,6 @@ static void free_balloon_pages(struct hv_dynmem_device *dm,
}
}
-
-
static unsigned int alloc_balloon_pages(struct hv_dynmem_device *dm,
unsigned int num_pages,
struct dm_balloon_response *bl_resp,
@@ -1278,7 +1240,6 @@ static unsigned int alloc_balloon_pages(struct hv_dynmem_device *dm,
page_to_pfn(pg);
bl_resp->range_array[i].finfo.page_cnt = alloc_unit;
bl_resp->hdr.size += sizeof(union dm_mem_page_range);
-
}
return i * alloc_unit;
@@ -1332,7 +1293,7 @@ static void balloon_up(struct work_struct *dummy)
if (num_ballooned == 0 || num_ballooned == num_pages) {
pr_debug("Ballooned %u out of %u requested pages.\n",
- num_pages, dm_device.balloon_wrk.num_pages);
+ num_pages, dm_device.balloon_wrk.num_pages);
bl_resp->more_pages = 0;
done = true;
@@ -1366,16 +1327,15 @@ static void balloon_up(struct work_struct *dummy)
for (i = 0; i < bl_resp->range_count; i++)
free_balloon_pages(&dm_device,
- &bl_resp->range_array[i]);
+ &bl_resp->range_array[i]);
done = true;
}
}
-
}
static void balloon_down(struct hv_dynmem_device *dm,
- struct dm_unballoon_request *req)
+ struct dm_unballoon_request *req)
{
union dm_mem_page_range *range_array = req->range_array;
int range_count = req->range_count;
@@ -1389,7 +1349,7 @@ static void balloon_down(struct hv_dynmem_device *dm,
}
pr_debug("Freed %u ballooned pages.\n",
- prev_pages_ballooned - dm->num_pages_ballooned);
+ prev_pages_ballooned - dm->num_pages_ballooned);
if (req->more_pages == 1)
return;
@@ -1414,8 +1374,7 @@ static int dm_thread_func(void *dm_dev)
struct hv_dynmem_device *dm = dm_dev;
while (!kthread_should_stop()) {
- wait_for_completion_interruptible_timeout(
- &dm_device.config_event, 1*HZ);
+ wait_for_completion_interruptible_timeout(&dm_device.config_event, 1 * HZ);
/*
* The host expects us to post information on the memory
* pressure every second.
@@ -1439,9 +1398,8 @@ static int dm_thread_func(void *dm_dev)
return 0;
}
-
static void version_resp(struct hv_dynmem_device *dm,
- struct dm_version_response *vresp)
+ struct dm_version_response *vresp)
{
struct dm_version_request version_req;
int ret;
@@ -1502,7 +1460,7 @@ version_error:
}
static void cap_resp(struct hv_dynmem_device *dm,
- struct dm_capabilities_resp_msg *cap_resp)
+ struct dm_capabilities_resp_msg *cap_resp)
{
if (!cap_resp->is_accepted) {
pr_err("Capabilities not accepted by host\n");
@@ -1535,7 +1493,7 @@ static void balloon_onchannelcallback(void *context)
switch (dm_hdr->type) {
case DM_VERSION_RESPONSE:
version_resp(dm,
- (struct dm_version_response *)dm_msg);
+ (struct dm_version_response *)dm_msg);
break;
case DM_CAPABILITIES_RESPONSE:
@@ -1565,7 +1523,7 @@ static void balloon_onchannelcallback(void *context)
dm->state = DM_BALLOON_DOWN;
balloon_down(dm,
- (struct dm_unballoon_request *)recv_buffer);
+ (struct dm_unballoon_request *)recv_buffer);
break;
case DM_MEM_HOT_ADD_REQUEST:
@@ -1603,17 +1561,15 @@ static void balloon_onchannelcallback(void *context)
default:
pr_warn_ratelimited("Unhandled message: type: %d\n", dm_hdr->type);
-
}
}
-
}
#define HV_LARGE_REPORTING_ORDER 9
#define HV_LARGE_REPORTING_LEN (HV_HYP_PAGE_SIZE << \
HV_LARGE_REPORTING_ORDER)
static int hv_free_page_report(struct page_reporting_dev_info *pr_dev_info,
- struct scatterlist *sgl, unsigned int nents)
+ struct scatterlist *sgl, unsigned int nents)
{
unsigned long flags;
struct hv_memory_hint *hint;
@@ -1648,7 +1604,7 @@ static int hv_free_page_report(struct page_reporting_dev_info *pr_dev_info,
*/
/* page reporting for pages 2MB or higher */
- if (order >= HV_LARGE_REPORTING_ORDER ) {
+ if (order >= HV_LARGE_REPORTING_ORDER) {
range->page.largepage = 1;
range->page_size = HV_GPA_PAGE_RANGE_PAGE_SIZE_2MB;
range->base_large_pfn = page_to_hvpfn(
@@ -1662,23 +1618,21 @@ static int hv_free_page_report(struct page_reporting_dev_info *pr_dev_info,
range->page.additional_pages =
(sg->length / HV_HYP_PAGE_SIZE) - 1;
}
-
}
status = hv_do_rep_hypercall(HV_EXT_CALL_MEMORY_HEAT_HINT, nents, 0,
hint, NULL);
local_irq_restore(flags);
if (!hv_result_success(status)) {
-
pr_err("Cold memory discard hypercall failed with status %llx\n",
- status);
+ status);
if (hv_hypercall_multi_failure > 0)
hv_hypercall_multi_failure++;
if (hv_result(status) == HV_STATUS_INVALID_PARAMETER) {
pr_err("Underlying Hyper-V does not support order less than 9. Hypercall failed\n");
pr_err("Defaulting to page_reporting_order %d\n",
- pageblock_order);
+ pageblock_order);
page_reporting_order = pageblock_order;
hv_hypercall_multi_failure++;
return -EINVAL;
@@ -1712,7 +1666,7 @@ static void enable_page_reporting(void)
pr_err("Failed to enable cold memory discard: %d\n", ret);
} else {
pr_info("Cold memory discard hint enabled with order %d\n",
- page_reporting_order);
+ page_reporting_order);
}
}
@@ -1795,7 +1749,7 @@ static int balloon_connect_vsp(struct hv_device *dev)
if (ret)
goto out;
- t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ);
+ t = wait_for_completion_timeout(&dm_device.host_event, 5 * HZ);
if (t == 0) {
ret = -ETIMEDOUT;
goto out;
@@ -1831,10 +1785,13 @@ static int balloon_connect_vsp(struct hv_device *dev)
cap_msg.caps.cap_bits.hot_add = hot_add_enabled();
/*
- * Specify our alignment requirements as it relates
- * memory hot-add. Specify 128MB alignment.
+ * Specify our alignment requirements for memory hot-add. The value is
+ * the log base 2 of the number of megabytes in a chunk. For example,
+ * with 256 MiB chunks, the value is 8. The number of MiB in a chunk
+ * must be a power of 2.
*/
- cap_msg.caps.cap_bits.hot_add_alignment = 7;
+ cap_msg.caps.cap_bits.hot_add_alignment =
+ ilog2(HA_BYTES_IN_CHUNK / SZ_1M);
/*
* Currently the host does not use these
@@ -1850,7 +1807,7 @@ static int balloon_connect_vsp(struct hv_device *dev)
if (ret)
goto out;
- t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ);
+ t = wait_for_completion_timeout(&dm_device.host_event, 5 * HZ);
if (t == 0) {
ret = -ETIMEDOUT;
goto out;
@@ -1891,8 +1848,8 @@ static int hv_balloon_debug_show(struct seq_file *f, void *offset)
char *sname;
seq_printf(f, "%-22s: %u.%u\n", "host_version",
- DYNMEM_MAJOR_VERSION(dm->version),
- DYNMEM_MINOR_VERSION(dm->version));
+ DYNMEM_MAJOR_VERSION(dm->version),
+ DYNMEM_MINOR_VERSION(dm->version));
seq_printf(f, "%-22s:", "capabilities");
if (ballooning_enabled())
@@ -1941,10 +1898,10 @@ static int hv_balloon_debug_show(struct seq_file *f, void *offset)
seq_printf(f, "%-22s: %u\n", "pages_ballooned", dm->num_pages_ballooned);
seq_printf(f, "%-22s: %lu\n", "total_pages_committed",
- get_pages_committed(dm));
+ get_pages_committed(dm));
seq_printf(f, "%-22s: %llu\n", "max_dynamic_page_count",
- dm->max_dynamic_page_count);
+ dm->max_dynamic_page_count);
return 0;
}
@@ -1954,7 +1911,7 @@ DEFINE_SHOW_ATTRIBUTE(hv_balloon_debug);
static void hv_balloon_debugfs_init(struct hv_dynmem_device *b)
{
debugfs_create_file("hv-balloon", 0444, NULL, b,
- &hv_balloon_debug_fops);
+ &hv_balloon_debug_fops);
}
static void hv_balloon_debugfs_exit(struct hv_dynmem_device *b)
@@ -1984,8 +1941,23 @@ static int balloon_probe(struct hv_device *dev,
hot_add = false;
#ifdef CONFIG_MEMORY_HOTPLUG
+ /*
+ * Hot-add must operate in chunks that are of size equal to the
+ * memory block size because that's what the core add_memory()
+ * interface requires. The Hyper-V interface requires that the memory
+ * block size be a power of 2, which is guaranteed by the check in
+ * memory_dev_init().
+ */
+ ha_pages_in_chunk = memory_block_size_bytes() / PAGE_SIZE;
do_hot_add = hot_add;
#else
+ /*
+ * Without MEMORY_HOTPLUG, the guest returns a failure status for all
+ * hot add requests from Hyper-V, and the chunk size is used only to
+ * specify alignment to Hyper-V as required by the host/guest protocol.
+ * Somewhat arbitrarily, use 128 MiB.
+ */
+ ha_pages_in_chunk = SZ_128M / PAGE_SIZE;
do_hot_add = false;
#endif
dm_device.dev = dev;
@@ -2097,7 +2069,6 @@ static int balloon_suspend(struct hv_device *hv_dev)
tasklet_enable(&hv_dev->channel->callback_event);
return 0;
-
}
static int balloon_resume(struct hv_device *dev)
@@ -2156,7 +2127,6 @@ static struct hv_driver balloon_drv = {
static int __init init_balloon_drv(void)
{
-
return vmbus_driver_register(&balloon_drv);
}
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index e14ae18a973b..b60fe2e58ad6 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -105,18 +105,6 @@ config SENSORS_AD7418
This driver can also be built as a module. If so, the module
will be called ad7418.
-config SENSORS_ADM1021
- tristate "Analog Devices ADM1021 and compatibles"
- depends on I2C
- depends on SENSORS_LM90=n
- help
- If you say yes here you get support for Analog Devices ADM1021
- and ADM1023 sensor chips and clones: Maxim MAX1617 and MAX1617A,
- Genesys Logic GL523SM, National Semiconductor LM84 and TI THMC10.
-
- This driver can also be built as a module. If so, the module
- will be called adm1021.
-
config SENSORS_ADM1025
tristate "Analog Devices ADM1025 and compatibles"
depends on I2C
@@ -506,6 +494,17 @@ config SENSORS_CORSAIR_PSU
This driver can also be built as a module. If so, the module
will be called corsair-psu.
+config SENSORS_CROS_EC
+ tristate "ChromeOS Embedded Controller sensors"
+ depends on MFD_CROS_EC_DEV
+ default MFD_CROS_EC_DEV
+ help
+ If you say yes here you get support for ChromeOS Embedded Controller
+ sensors.
+
+ This driver can also be built as a module. If so, the module
+ will be called cros_ec_hwmon.
+
config SENSORS_DRIVETEMP
tristate "Hard disk drives with temperature sensors"
depends on SCSI && ATA
@@ -1241,18 +1240,6 @@ config SENSORS_MAX6639
This driver can also be built as a module. If so, the module
will be called max6639.
-config SENSORS_MAX6642
- tristate "Maxim MAX6642 sensor chip"
- depends on I2C
- depends on SENSORS_LM90=n
- help
- If you say yes here you get support for MAX6642 sensor chip.
- MAX6642 is a SMBus-Compatible Remote/Local Temperature Sensor
- with Overtemperature Alarm from Maxim.
-
- This driver can also be built as a module. If so, the module
- will be called max6642.
-
config SENSORS_MAX6650
tristate "Maxim MAX6650 sensor chip"
depends on I2C
@@ -2127,6 +2114,7 @@ config SENSORS_ADS7871
config SENSORS_AMC6821
tristate "Texas Instruments AMC6821"
depends on I2C
+ select REGMAP_I2C
help
If you say yes here you get support for the Texas Instruments
AMC6821 hardware monitoring chips.
@@ -2181,6 +2169,37 @@ config SENSORS_INA3221
This driver can also be built as a module. If so, the module
will be called ina3221.
+config SENSORS_SPD5118
+ tristate "SPD5118 Compliant Temperature Sensors"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ If you say yes here you get support for SPD5118 (JEDEC JESD300)
+ compliant temperature sensors. Such sensors are found on DDR5 memory
+ modules.
+
+ This driver can also be built as a module. If so, the module
+ will be called spd5118.
+
+config SENSORS_SPD5118_DETECT
+ bool "Enable detect function"
+ depends on SENSORS_SPD5118
+ default (!DMI || !X86)
+ help
+ If enabled, the driver auto-detects if a chip in the SPD address
+ range is compliant to the SPD51888 standard and auto-instantiates
+ if that is the case. If disabled, SPD5118 compliant devices have
+ to be instantiated by other means. On X86 systems with DMI support
+ this will typically be done from DMI DDR detection code in the
+ I2C SMBus subsystem. Devicetree based systems will instantiate
+ attached devices if the DIMMs are listed in the devicetree file.
+
+ Disabling the detect function will speed up boot time and reduce
+ the risk of mis-detecting SPD5118 compliant devices. However, it
+ may result in missed DIMMs under some circumstances.
+
+ If unsure, say Y.
+
config SENSORS_TC74
tristate "Microchip TC74"
depends on I2C
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index e3f25475d1f0..b1c7056c37db 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -31,7 +31,6 @@ obj-$(CONFIG_SENSORS_AD7414) += ad7414.o
obj-$(CONFIG_SENSORS_AD7418) += ad7418.o
obj-$(CONFIG_SENSORS_ADC128D818) += adc128d818.o
obj-$(CONFIG_SENSORS_ADCXX) += adcxx.o
-obj-$(CONFIG_SENSORS_ADM1021) += adm1021.o
obj-$(CONFIG_SENSORS_ADM1025) += adm1025.o
obj-$(CONFIG_SENSORS_ADM1026) += adm1026.o
obj-$(CONFIG_SENSORS_ADM1029) += adm1029.o
@@ -64,6 +63,7 @@ obj-$(CONFIG_SENSORS_CHIPCAP2) += chipcap2.o
obj-$(CONFIG_SENSORS_CORETEMP) += coretemp.o
obj-$(CONFIG_SENSORS_CORSAIR_CPRO) += corsair-cpro.o
obj-$(CONFIG_SENSORS_CORSAIR_PSU) += corsair-psu.o
+obj-$(CONFIG_SENSORS_CROS_EC) += cros_ec_hwmon.o
obj-$(CONFIG_SENSORS_DA9052_ADC)+= da9052-hwmon.o
obj-$(CONFIG_SENSORS_DA9055)+= da9055-hwmon.o
obj-$(CONFIG_SENSORS_DELL_SMM) += dell-smm-hwmon.o
@@ -154,7 +154,6 @@ obj-$(CONFIG_SENSORS_MAX31760) += max31760.o
obj-$(CONFIG_SENSORS_MAX6620) += max6620.o
obj-$(CONFIG_SENSORS_MAX6621) += max6621.o
obj-$(CONFIG_SENSORS_MAX6639) += max6639.o
-obj-$(CONFIG_SENSORS_MAX6642) += max6642.o
obj-$(CONFIG_SENSORS_MAX6650) += max6650.o
obj-$(CONFIG_SENSORS_MAX6697) += max6697.o
obj-$(CONFIG_SENSORS_MAX31790) += max31790.o
@@ -207,6 +206,7 @@ obj-$(CONFIG_SENSORS_SMSC47B397)+= smsc47b397.o
obj-$(CONFIG_SENSORS_SMSC47M1) += smsc47m1.o
obj-$(CONFIG_SENSORS_SMSC47M192)+= smsc47m192.o
obj-$(CONFIG_SENSORS_SPARX5) += sparx5-temp.o
+obj-$(CONFIG_SENSORS_SPD5118) += spd5118.o
obj-$(CONFIG_SENSORS_STTS751) += stts751.o
obj-$(CONFIG_SENSORS_SURFACE_FAN)+= surface_fan.o
obj-$(CONFIG_SENSORS_SY7636A) += sy7636a-hwmon.o
diff --git a/drivers/hwmon/ad7418.c b/drivers/hwmon/ad7418.c
index 4829f83ff52e..7a132accdf8a 100644
--- a/drivers/hwmon/ad7418.c
+++ b/drivers/hwmon/ad7418.c
@@ -230,8 +230,6 @@ static void ad7418_init_client(struct i2c_client *client)
}
}
-static const struct i2c_device_id ad7418_id[];
-
static int ad7418_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
@@ -252,10 +250,7 @@ static int ad7418_probe(struct i2c_client *client)
mutex_init(&data->lock);
data->client = client;
- if (dev->of_node)
- data->type = (uintptr_t)of_device_get_match_data(dev);
- else
- data->type = i2c_match_id(ad7418_id, client)->driver_data;
+ data->type = (uintptr_t)i2c_get_match_data(client);
switch (data->type) {
case ad7416:
diff --git a/drivers/hwmon/adc128d818.c b/drivers/hwmon/adc128d818.c
index 8ac6e735ec5c..5e805d4ee76a 100644
--- a/drivers/hwmon/adc128d818.c
+++ b/drivers/hwmon/adc128d818.c
@@ -175,7 +175,7 @@ static ssize_t adc128_in_store(struct device *dev,
mutex_lock(&data->update_lock);
/* 10 mV LSB on limit registers */
- regval = clamp_val(DIV_ROUND_CLOSEST(val, 10), 0, 255);
+ regval = DIV_ROUND_CLOSEST(clamp_val(val, 0, 2550), 10);
data->in[index][nr] = regval << 4;
reg = index == 1 ? ADC128_REG_IN_MIN(nr) : ADC128_REG_IN_MAX(nr);
i2c_smbus_write_byte_data(data->client, reg, regval);
@@ -213,7 +213,7 @@ static ssize_t adc128_temp_store(struct device *dev,
return err;
mutex_lock(&data->update_lock);
- regval = clamp_val(DIV_ROUND_CLOSEST(val, 1000), -128, 127);
+ regval = DIV_ROUND_CLOSEST(clamp_val(val, -128000, 127000), 1000);
data->temp[index] = regval << 1;
i2c_smbus_write_byte_data(data->client,
index == 1 ? ADC128_REG_TEMP_MAX
diff --git a/drivers/hwmon/adm1021.c b/drivers/hwmon/adm1021.c
deleted file mode 100644
index 7c15398ebb37..000000000000
--- a/drivers/hwmon/adm1021.c
+++ /dev/null
@@ -1,505 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * adm1021.c - Part of lm_sensors, Linux kernel modules for hardware
- * monitoring
- * Copyright (c) 1998, 1999 Frodo Looijaard <frodol@dds.nl> and
- * Philip Edelbrock <phil@netroedge.com>
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/jiffies.h>
-#include <linux/i2c.h>
-#include <linux/hwmon.h>
-#include <linux/hwmon-sysfs.h>
-#include <linux/err.h>
-#include <linux/mutex.h>
-
-
-/* Addresses to scan */
-static const unsigned short normal_i2c[] = {
- 0x18, 0x19, 0x1a, 0x29, 0x2a, 0x2b, 0x4c, 0x4d, 0x4e, I2C_CLIENT_END };
-
-enum chips {
- adm1021, adm1023, max1617, max1617a, thmc10, lm84, gl523sm, mc1066 };
-
-/* adm1021 constants specified below */
-
-/* The adm1021 registers */
-/* Read-only */
-/* For nr in 0-1 */
-#define ADM1021_REG_TEMP(nr) (nr)
-#define ADM1021_REG_STATUS 0x02
-/* 0x41 = AD, 0x49 = TI, 0x4D = Maxim, 0x23 = Genesys , 0x54 = Onsemi */
-#define ADM1021_REG_MAN_ID 0xFE
-/* ADM1021 = 0x0X, ADM1023 = 0x3X */
-#define ADM1021_REG_DEV_ID 0xFF
-/* These use different addresses for reading/writing */
-#define ADM1021_REG_CONFIG_R 0x03
-#define ADM1021_REG_CONFIG_W 0x09
-#define ADM1021_REG_CONV_RATE_R 0x04
-#define ADM1021_REG_CONV_RATE_W 0x0A
-/* These are for the ADM1023's additional precision on the remote temp sensor */
-#define ADM1023_REG_REM_TEMP_PREC 0x10
-#define ADM1023_REG_REM_OFFSET 0x11
-#define ADM1023_REG_REM_OFFSET_PREC 0x12
-#define ADM1023_REG_REM_TOS_PREC 0x13
-#define ADM1023_REG_REM_THYST_PREC 0x14
-/* limits */
-/* For nr in 0-1 */
-#define ADM1021_REG_TOS_R(nr) (0x05 + 2 * (nr))
-#define ADM1021_REG_TOS_W(nr) (0x0B + 2 * (nr))
-#define ADM1021_REG_THYST_R(nr) (0x06 + 2 * (nr))
-#define ADM1021_REG_THYST_W(nr) (0x0C + 2 * (nr))
-/* write-only */
-#define ADM1021_REG_ONESHOT 0x0F
-
-/* Initial values */
-
-/*
- * Note: Even though I left the low and high limits named os and hyst,
- * they don't quite work like a thermostat the way the LM75 does. I.e.,
- * a lower temp than THYST actually triggers an alarm instead of
- * clearing it. Weird, ey? --Phil
- */
-
-/* Each client has this additional data */
-struct adm1021_data {
- struct i2c_client *client;
- enum chips type;
-
- const struct attribute_group *groups[3];
-
- struct mutex update_lock;
- bool valid; /* true if following fields are valid */
- char low_power; /* !=0 if device in low power mode */
- unsigned long last_updated; /* In jiffies */
-
- int temp_max[2]; /* Register values */
- int temp_min[2];
- int temp[2];
- u8 alarms;
- /* Special values for ADM1023 only */
- u8 remote_temp_offset;
- u8 remote_temp_offset_prec;
-};
-
-/* (amalysh) read only mode, otherwise any limit's writing confuse BIOS */
-static bool read_only;
-
-static struct adm1021_data *adm1021_update_device(struct device *dev)
-{
- struct adm1021_data *data = dev_get_drvdata(dev);
- struct i2c_client *client = data->client;
-
- mutex_lock(&data->update_lock);
-
- if (time_after(jiffies, data->last_updated + HZ + HZ / 2)
- || !data->valid) {
- int i;
-
- dev_dbg(dev, "Starting adm1021 update\n");
-
- for (i = 0; i < 2; i++) {
- data->temp[i] = 1000 *
- (s8) i2c_smbus_read_byte_data(
- client, ADM1021_REG_TEMP(i));
- data->temp_max[i] = 1000 *
- (s8) i2c_smbus_read_byte_data(
- client, ADM1021_REG_TOS_R(i));
- if (data->type != lm84) {
- data->temp_min[i] = 1000 *
- (s8) i2c_smbus_read_byte_data(client,
- ADM1021_REG_THYST_R(i));
- }
- }
- data->alarms = i2c_smbus_read_byte_data(client,
- ADM1021_REG_STATUS) & 0x7c;
- if (data->type == adm1023) {
- /*
- * The ADM1023 provides 3 extra bits of precision for
- * the remote sensor in extra registers.
- */
- data->temp[1] += 125 * (i2c_smbus_read_byte_data(
- client, ADM1023_REG_REM_TEMP_PREC) >> 5);
- data->temp_max[1] += 125 * (i2c_smbus_read_byte_data(
- client, ADM1023_REG_REM_TOS_PREC) >> 5);
- data->temp_min[1] += 125 * (i2c_smbus_read_byte_data(
- client, ADM1023_REG_REM_THYST_PREC) >> 5);
- data->remote_temp_offset =
- i2c_smbus_read_byte_data(client,
- ADM1023_REG_REM_OFFSET);
- data->remote_temp_offset_prec =
- i2c_smbus_read_byte_data(client,
- ADM1023_REG_REM_OFFSET_PREC);
- }
- data->last_updated = jiffies;
- data->valid = true;
- }
-
- mutex_unlock(&data->update_lock);
-
- return data;
-}
-
-static ssize_t temp_show(struct device *dev, struct device_attribute *devattr,
- char *buf)
-{
- int index = to_sensor_dev_attr(devattr)->index;
- struct adm1021_data *data = adm1021_update_device(dev);
-
- return sprintf(buf, "%d\n", data->temp[index]);
-}
-
-static ssize_t temp_max_show(struct device *dev,
- struct device_attribute *devattr, char *buf)
-{
- int index = to_sensor_dev_attr(devattr)->index;
- struct adm1021_data *data = adm1021_update_device(dev);
-
- return sprintf(buf, "%d\n", data->temp_max[index]);
-}
-
-static ssize_t temp_min_show(struct device *dev,
- struct device_attribute *devattr, char *buf)
-{
- int index = to_sensor_dev_attr(devattr)->index;
- struct adm1021_data *data = adm1021_update_device(dev);
-
- return sprintf(buf, "%d\n", data->temp_min[index]);
-}
-
-static ssize_t alarm_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- int index = to_sensor_dev_attr(attr)->index;
- struct adm1021_data *data = adm1021_update_device(dev);
- return sprintf(buf, "%u\n", (data->alarms >> index) & 1);
-}
-
-static ssize_t alarms_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct adm1021_data *data = adm1021_update_device(dev);
- return sprintf(buf, "%u\n", data->alarms);
-}
-
-static ssize_t temp_max_store(struct device *dev,
- struct device_attribute *devattr,
- const char *buf, size_t count)
-{
- int index = to_sensor_dev_attr(devattr)->index;
- struct adm1021_data *data = dev_get_drvdata(dev);
- struct i2c_client *client = data->client;
- long temp;
- int reg_val, err;
-
- err = kstrtol(buf, 10, &temp);
- if (err)
- return err;
- temp /= 1000;
-
- mutex_lock(&data->update_lock);
- reg_val = clamp_val(temp, -128, 127);
- data->temp_max[index] = reg_val * 1000;
- if (!read_only)
- i2c_smbus_write_byte_data(client, ADM1021_REG_TOS_W(index),
- reg_val);
- mutex_unlock(&data->update_lock);
-
- return count;
-}
-
-static ssize_t temp_min_store(struct device *dev,
- struct device_attribute *devattr,
- const char *buf, size_t count)
-{
- int index = to_sensor_dev_attr(devattr)->index;
- struct adm1021_data *data = dev_get_drvdata(dev);
- struct i2c_client *client = data->client;
- long temp;
- int reg_val, err;
-
- err = kstrtol(buf, 10, &temp);
- if (err)
- return err;
- temp /= 1000;
-
- mutex_lock(&data->update_lock);
- reg_val = clamp_val(temp, -128, 127);
- data->temp_min[index] = reg_val * 1000;
- if (!read_only)
- i2c_smbus_write_byte_data(client, ADM1021_REG_THYST_W(index),
- reg_val);
- mutex_unlock(&data->update_lock);
-
- return count;
-}
-
-static ssize_t low_power_show(struct device *dev,
- struct device_attribute *devattr, char *buf)
-{
- struct adm1021_data *data = adm1021_update_device(dev);
- return sprintf(buf, "%d\n", data->low_power);
-}
-
-static ssize_t low_power_store(struct device *dev,
- struct device_attribute *devattr,
- const char *buf, size_t count)
-{
- struct adm1021_data *data = dev_get_drvdata(dev);
- struct i2c_client *client = data->client;
- char low_power;
- unsigned long val;
- int err;
-
- err = kstrtoul(buf, 10, &val);
- if (err)
- return err;
- low_power = val != 0;
-
- mutex_lock(&data->update_lock);
- if (low_power != data->low_power) {
- int config = i2c_smbus_read_byte_data(
- client, ADM1021_REG_CONFIG_R);
- data->low_power = low_power;
- i2c_smbus_write_byte_data(client, ADM1021_REG_CONFIG_W,
- (config & 0xBF) | (low_power << 6));
- }
- mutex_unlock(&data->update_lock);
-
- return count;
-}
-
-
-static SENSOR_DEVICE_ATTR_RO(temp1_input, temp, 0);
-static SENSOR_DEVICE_ATTR_RW(temp1_max, temp_max, 0);
-static SENSOR_DEVICE_ATTR_RW(temp1_min, temp_min, 0);
-static SENSOR_DEVICE_ATTR_RO(temp2_input, temp, 1);
-static SENSOR_DEVICE_ATTR_RW(temp2_max, temp_max, 1);
-static SENSOR_DEVICE_ATTR_RW(temp2_min, temp_min, 1);
-static SENSOR_DEVICE_ATTR_RO(temp1_max_alarm, alarm, 6);
-static SENSOR_DEVICE_ATTR_RO(temp1_min_alarm, alarm, 5);
-static SENSOR_DEVICE_ATTR_RO(temp2_max_alarm, alarm, 4);
-static SENSOR_DEVICE_ATTR_RO(temp2_min_alarm, alarm, 3);
-static SENSOR_DEVICE_ATTR_RO(temp2_fault, alarm, 2);
-
-static DEVICE_ATTR_RO(alarms);
-static DEVICE_ATTR_RW(low_power);
-
-static struct attribute *adm1021_attributes[] = {
- &sensor_dev_attr_temp1_max.dev_attr.attr,
- &sensor_dev_attr_temp1_input.dev_attr.attr,
- &sensor_dev_attr_temp2_max.dev_attr.attr,
- &sensor_dev_attr_temp2_input.dev_attr.attr,
- &sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
- &sensor_dev_attr_temp2_max_alarm.dev_attr.attr,
- &sensor_dev_attr_temp2_fault.dev_attr.attr,
- &dev_attr_alarms.attr,
- &dev_attr_low_power.attr,
- NULL
-};
-
-static const struct attribute_group adm1021_group = {
- .attrs = adm1021_attributes,
-};
-
-static struct attribute *adm1021_min_attributes[] = {
- &sensor_dev_attr_temp1_min.dev_attr.attr,
- &sensor_dev_attr_temp2_min.dev_attr.attr,
- &sensor_dev_attr_temp1_min_alarm.dev_attr.attr,
- &sensor_dev_attr_temp2_min_alarm.dev_attr.attr,
- NULL
-};
-
-static const struct attribute_group adm1021_min_group = {
- .attrs = adm1021_min_attributes,
-};
-
-/* Return 0 if detection is successful, -ENODEV otherwise */
-static int adm1021_detect(struct i2c_client *client,
- struct i2c_board_info *info)
-{
- struct i2c_adapter *adapter = client->adapter;
- const char *type_name;
- int reg, conv_rate, status, config, man_id, dev_id;
-
- if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
- pr_debug("detect failed, smbus byte data not supported!\n");
- return -ENODEV;
- }
-
- status = i2c_smbus_read_byte_data(client, ADM1021_REG_STATUS);
- conv_rate = i2c_smbus_read_byte_data(client,
- ADM1021_REG_CONV_RATE_R);
- config = i2c_smbus_read_byte_data(client, ADM1021_REG_CONFIG_R);
-
- /* Check unused bits */
- if ((status & 0x03) || (config & 0x3F) || (conv_rate & 0xF8)) {
- pr_debug("detect failed, chip not detected!\n");
- return -ENODEV;
- }
-
- /* Determine the chip type. */
- man_id = i2c_smbus_read_byte_data(client, ADM1021_REG_MAN_ID);
- dev_id = i2c_smbus_read_byte_data(client, ADM1021_REG_DEV_ID);
-
- if (man_id < 0 || dev_id < 0)
- return -ENODEV;
-
- if (man_id == 0x4d && dev_id == 0x01) {
- /*
- * dev_id 0x01 matches MAX6680, MAX6695, MAX6696, and possibly
- * others. Read register which is unsupported on MAX1617 but
- * exists on all those chips and compare with the dev_id
- * register. If it matches, it may be a MAX1617A.
- */
- reg = i2c_smbus_read_byte_data(client,
- ADM1023_REG_REM_TEMP_PREC);
- if (reg != dev_id)
- return -ENODEV;
- type_name = "max1617a";
- } else if (man_id == 0x41) {
- if ((dev_id & 0xF0) == 0x30)
- type_name = "adm1023";
- else if ((dev_id & 0xF0) == 0x00)
- type_name = "adm1021";
- else
- return -ENODEV;
- } else if (man_id == 0x49)
- type_name = "thmc10";
- else if (man_id == 0x23)
- type_name = "gl523sm";
- else if (man_id == 0x54)
- type_name = "mc1066";
- else {
- int lte, rte, lhi, rhi, llo, rlo;
-
- /* extra checks for LM84 and MAX1617 to avoid misdetections */
-
- llo = i2c_smbus_read_byte_data(client, ADM1021_REG_THYST_R(0));
- rlo = i2c_smbus_read_byte_data(client, ADM1021_REG_THYST_R(1));
-
- /* fail if any of the additional register reads failed */
- if (llo < 0 || rlo < 0)
- return -ENODEV;
-
- lte = i2c_smbus_read_byte_data(client, ADM1021_REG_TEMP(0));
- rte = i2c_smbus_read_byte_data(client, ADM1021_REG_TEMP(1));
- lhi = i2c_smbus_read_byte_data(client, ADM1021_REG_TOS_R(0));
- rhi = i2c_smbus_read_byte_data(client, ADM1021_REG_TOS_R(1));
-
- /*
- * Fail for negative temperatures and negative high limits.
- * This check also catches read errors on the tested registers.
- */
- if ((s8)lte < 0 || (s8)rte < 0 || (s8)lhi < 0 || (s8)rhi < 0)
- return -ENODEV;
-
- /* fail if all registers hold the same value */
- if (lte == rte && lte == lhi && lte == rhi && lte == llo
- && lte == rlo)
- return -ENODEV;
-
- /*
- * LM84 Mfr ID is in a different place,
- * and it has more unused bits. Registers at 0xfe and 0xff
- * are undefined and return the most recently read value,
- * here the value of the configuration register.
- */
- if (conv_rate == 0x00
- && man_id == config && dev_id == config
- && (config & 0x7F) == 0x00
- && (status & 0xAB) == 0x00) {
- type_name = "lm84";
- } else {
- if ((config & 0x3f) || (status & 0x03))
- return -ENODEV;
- /* fail if low limits are larger than high limits */
- if ((s8)llo > lhi || (s8)rlo > rhi)
- return -ENODEV;
- type_name = "max1617";
- }
- }
-
- pr_debug("Detected chip %s at adapter %d, address 0x%02x.\n",
- type_name, i2c_adapter_id(adapter), client->addr);
- strscpy(info->type, type_name, I2C_NAME_SIZE);
-
- return 0;
-}
-
-static void adm1021_init_client(struct i2c_client *client)
-{
- /* Enable ADC and disable suspend mode */
- i2c_smbus_write_byte_data(client, ADM1021_REG_CONFIG_W,
- i2c_smbus_read_byte_data(client, ADM1021_REG_CONFIG_R) & 0xBF);
- /* Set Conversion rate to 1/sec (this can be tinkered with) */
- i2c_smbus_write_byte_data(client, ADM1021_REG_CONV_RATE_W, 0x04);
-}
-
-static const struct i2c_device_id adm1021_id[];
-
-static int adm1021_probe(struct i2c_client *client)
-{
- struct device *dev = &client->dev;
- struct adm1021_data *data;
- struct device *hwmon_dev;
-
- data = devm_kzalloc(dev, sizeof(struct adm1021_data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- data->client = client;
- data->type = i2c_match_id(adm1021_id, client)->driver_data;
- mutex_init(&data->update_lock);
-
- /* Initialize the ADM1021 chip */
- if (data->type != lm84 && !read_only)
- adm1021_init_client(client);
-
- data->groups[0] = &adm1021_group;
- if (data->type != lm84)
- data->groups[1] = &adm1021_min_group;
-
- hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
- data, data->groups);
-
- return PTR_ERR_OR_ZERO(hwmon_dev);
-}
-
-static const struct i2c_device_id adm1021_id[] = {
- { "adm1021", adm1021 },
- { "adm1023", adm1023 },
- { "max1617", max1617 },
- { "max1617a", max1617a },
- { "thmc10", thmc10 },
- { "lm84", lm84 },
- { "gl523sm", gl523sm },
- { "mc1066", mc1066 },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, adm1021_id);
-
-static struct i2c_driver adm1021_driver = {
- .class = I2C_CLASS_HWMON,
- .driver = {
- .name = "adm1021",
- },
- .probe = adm1021_probe,
- .id_table = adm1021_id,
- .detect = adm1021_detect,
- .address_list = normal_i2c,
-};
-
-module_i2c_driver(adm1021_driver);
-
-MODULE_AUTHOR("Frodo Looijaard <frodol@dds.nl> and "
- "Philip Edelbrock <phil@netroedge.com>");
-MODULE_DESCRIPTION("adm1021 driver");
-MODULE_LICENSE("GPL");
-
-module_param(read_only, bool, 0);
-MODULE_PARM_DESC(read_only, "Don't set any values, read only mode");
diff --git a/drivers/hwmon/adm1031.c b/drivers/hwmon/adm1031.c
index 88c7e0d62d08..343118532cdb 100644
--- a/drivers/hwmon/adm1031.c
+++ b/drivers/hwmon/adm1031.c
@@ -1021,8 +1021,6 @@ static void adm1031_init_client(struct i2c_client *client)
data->update_interval = update_intervals[i];
}
-static const struct i2c_device_id adm1031_id[];
-
static int adm1031_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
@@ -1035,7 +1033,7 @@ static int adm1031_probe(struct i2c_client *client)
i2c_set_clientdata(client, data);
data->client = client;
- data->chip_type = i2c_match_id(adm1031_id, client)->driver_data;
+ data->chip_type = (uintptr_t)i2c_get_match_data(client);
mutex_init(&data->update_lock);
if (data->chip_type == adm1030)
diff --git a/drivers/hwmon/ads7828.c b/drivers/hwmon/ads7828.c
index 809e830f52a6..436637264056 100644
--- a/drivers/hwmon/ads7828.c
+++ b/drivers/hwmon/ads7828.c
@@ -99,8 +99,6 @@ static const struct regmap_config ads2830_regmap_config = {
.val_bits = 8,
};
-static const struct i2c_device_id ads7828_device_ids[];
-
static int ads7828_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
@@ -138,10 +136,7 @@ static int ads7828_probe(struct i2c_client *client)
}
}
- if (client->dev.of_node)
- chip = (uintptr_t)of_device_get_match_data(&client->dev);
- else
- chip = i2c_match_id(ads7828_device_ids, client)->driver_data;
+ chip = (uintptr_t)i2c_get_match_data(client);
/* Bound Vref with min/max values */
vref_mv = clamp_val(vref_mv, ADS7828_EXT_VREF_MV_MIN,
diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c
index 4224ffb30483..bc186c61a2c0 100644
--- a/drivers/hwmon/adt7475.c
+++ b/drivers/hwmon/adt7475.c
@@ -1676,7 +1676,6 @@ static int adt7475_probe(struct i2c_client *client)
struct device *hwmon_dev;
int i, ret = 0, revision, group_num = 0;
u8 config3;
- const struct i2c_device_id *id = i2c_match_id(adt7475_id, client);
data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL);
if (data == NULL)
@@ -1686,10 +1685,7 @@ static int adt7475_probe(struct i2c_client *client)
data->client = client;
i2c_set_clientdata(client, data);
- if (client->dev.of_node)
- chip = (uintptr_t)of_device_get_match_data(&client->dev);
- else
- chip = id->driver_data;
+ chip = (uintptr_t)i2c_get_match_data(client);
/* Initialize device-specific values */
switch (chip) {
@@ -1717,7 +1713,7 @@ static int adt7475_probe(struct i2c_client *client)
if (!(config3 & CONFIG3_SMBALERT))
data->has_pwm2 = 1;
/* Meaning of this bit is inverted for the ADT7473-1 */
- if (id->driver_data == adt7473 && revision >= 1)
+ if (chip == adt7473 && revision >= 1)
data->has_pwm2 = !data->has_pwm2;
data->config4 = adt7475_read(REG_CONFIG4);
@@ -1730,12 +1726,12 @@ static int adt7475_probe(struct i2c_client *client)
* because 2 different pins (TACH4 and +2.5 Vin) can be used for
* this function
*/
- if (id->driver_data == adt7490) {
+ if (chip == adt7490) {
if ((data->config4 & CONFIG4_PINFUNC) == 0x1 &&
!(config3 & CONFIG3_THERM))
data->has_fan4 = 1;
}
- if (id->driver_data == adt7476 || id->driver_data == adt7490) {
+ if (chip == adt7476 || chip == adt7490) {
if (!(config3 & CONFIG3_THERM) ||
(data->config4 & CONFIG4_PINFUNC) == 0x1)
data->has_voltage |= (1 << 0); /* in0 */
@@ -1745,7 +1741,7 @@ static int adt7475_probe(struct i2c_client *client)
* On the ADT7476, the +12V input pin may instead be used as VID5,
* and VID pins may alternatively be used as GPIO
*/
- if (id->driver_data == adt7476) {
+ if (chip == adt7476) {
u8 vid = adt7475_read(REG_VID);
if (!(vid & VID_VIDSEL))
data->has_voltage |= (1 << 4); /* in4 */
@@ -1829,7 +1825,7 @@ static int adt7475_probe(struct i2c_client *client)
}
dev_info(&client->dev, "%s device, revision %d\n",
- names[id->driver_data], revision);
+ names[chip], revision);
if ((data->has_voltage & 0x11) || data->has_fan4 || data->has_pwm2)
dev_info(&client->dev, "Optional features:%s%s%s%s%s\n",
(data->has_voltage & (1 << 0)) ? " in0" : "",
@@ -1900,7 +1896,7 @@ static void adt7475_read_pwm(struct i2c_client *client, int index)
data->pwm[CONTROL][index] &= ~0xE0;
data->pwm[CONTROL][index] |= (7 << 5);
- i2c_smbus_write_byte_data(client, PWM_CONFIG_REG(index),
+ i2c_smbus_write_byte_data(client, PWM_REG(index),
data->pwm[INPUT][index]);
i2c_smbus_write_byte_data(client, PWM_CONFIG_REG(index),
diff --git a/drivers/hwmon/aht10.c b/drivers/hwmon/aht10.c
index f136bf3ff40a..312ef3e98754 100644
--- a/drivers/hwmon/aht10.c
+++ b/drivers/hwmon/aht10.c
@@ -331,8 +331,7 @@ static const struct hwmon_chip_info aht10_chip_info = {
static int aht10_probe(struct i2c_client *client)
{
- const struct i2c_device_id *id = i2c_match_id(aht10_id, client);
- enum aht10_variant variant = id->driver_data;
+ enum aht10_variant variant = (uintptr_t)i2c_get_match_data(client);
struct device *device = &client->dev;
struct device *hwmon_dev;
struct aht10_data *data;
diff --git a/drivers/hwmon/amc6821.c b/drivers/hwmon/amc6821.c
index 9b02b304c2f5..ec94392fcb65 100644
--- a/drivers/hwmon/amc6821.c
+++ b/drivers/hwmon/amc6821.c
@@ -6,18 +6,24 @@
*
* Based on max6650.c:
* Copyright (C) 2007 Hans J. Koch <hjk@hansjkoch.de>
+ *
+ * Conversion to regmap and with_info API:
+ * Copyright (C) 2024 Guenter Roeck <linux@roeck-us.net>
*/
-#include <linux/kernel.h> /* Needed for KERN_INFO */
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/jiffies.h>
-#include <linux/i2c.h>
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/bits.h>
+#include <linux/err.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
-#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/minmax.h>
+#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
/*
* Addresses to scan.
@@ -36,519 +42,594 @@ module_param(pwminv, int, 0444);
static int init = 1; /*Power-on initialization.*/
module_param(init, int, 0444);
-enum chips { amc6821 };
-
-#define AMC6821_REG_DEV_ID 0x3D
-#define AMC6821_REG_COMP_ID 0x3E
-#define AMC6821_REG_CONF1 0x00
-#define AMC6821_REG_CONF2 0x01
-#define AMC6821_REG_CONF3 0x3F
-#define AMC6821_REG_CONF4 0x04
-#define AMC6821_REG_STAT1 0x02
-#define AMC6821_REG_STAT2 0x03
-#define AMC6821_REG_TDATA_LOW 0x08
-#define AMC6821_REG_TDATA_HI 0x09
-#define AMC6821_REG_LTEMP_HI 0x0A
-#define AMC6821_REG_RTEMP_HI 0x0B
-#define AMC6821_REG_LTEMP_LIMIT_MIN 0x15
-#define AMC6821_REG_LTEMP_LIMIT_MAX 0x14
-#define AMC6821_REG_RTEMP_LIMIT_MIN 0x19
-#define AMC6821_REG_RTEMP_LIMIT_MAX 0x18
-#define AMC6821_REG_LTEMP_CRIT 0x1B
-#define AMC6821_REG_RTEMP_CRIT 0x1D
-#define AMC6821_REG_PSV_TEMP 0x1C
-#define AMC6821_REG_DCY 0x22
-#define AMC6821_REG_LTEMP_FAN_CTRL 0x24
-#define AMC6821_REG_RTEMP_FAN_CTRL 0x25
-#define AMC6821_REG_DCY_LOW_TEMP 0x21
-
-#define AMC6821_REG_TACH_LLIMITL 0x10
-#define AMC6821_REG_TACH_LLIMITH 0x11
-#define AMC6821_REG_TACH_HLIMITL 0x12
-#define AMC6821_REG_TACH_HLIMITH 0x13
-
-#define AMC6821_CONF1_START 0x01
-#define AMC6821_CONF1_FAN_INT_EN 0x02
-#define AMC6821_CONF1_FANIE 0x04
-#define AMC6821_CONF1_PWMINV 0x08
-#define AMC6821_CONF1_FAN_FAULT_EN 0x10
-#define AMC6821_CONF1_FDRC0 0x20
-#define AMC6821_CONF1_FDRC1 0x40
-#define AMC6821_CONF1_THERMOVIE 0x80
-
-#define AMC6821_CONF2_PWM_EN 0x01
-#define AMC6821_CONF2_TACH_MODE 0x02
-#define AMC6821_CONF2_TACH_EN 0x04
-#define AMC6821_CONF2_RTFIE 0x08
-#define AMC6821_CONF2_LTOIE 0x10
-#define AMC6821_CONF2_RTOIE 0x20
-#define AMC6821_CONF2_PSVIE 0x40
-#define AMC6821_CONF2_RST 0x80
-
-#define AMC6821_CONF3_THERM_FAN_EN 0x80
-#define AMC6821_CONF3_REV_MASK 0x0F
-
-#define AMC6821_CONF4_OVREN 0x10
-#define AMC6821_CONF4_TACH_FAST 0x20
-#define AMC6821_CONF4_PSPR 0x40
-#define AMC6821_CONF4_MODE 0x80
-
-#define AMC6821_STAT1_RPM_ALARM 0x01
-#define AMC6821_STAT1_FANS 0x02
-#define AMC6821_STAT1_RTH 0x04
-#define AMC6821_STAT1_RTL 0x08
-#define AMC6821_STAT1_R_THERM 0x10
-#define AMC6821_STAT1_RTF 0x20
-#define AMC6821_STAT1_LTH 0x40
-#define AMC6821_STAT1_LTL 0x80
-
-#define AMC6821_STAT2_RTC 0x08
-#define AMC6821_STAT2_LTC 0x10
-#define AMC6821_STAT2_LPSV 0x20
-#define AMC6821_STAT2_L_THERM 0x40
-#define AMC6821_STAT2_THERM_IN 0x80
-
-enum {IDX_TEMP1_INPUT = 0, IDX_TEMP1_MIN, IDX_TEMP1_MAX,
- IDX_TEMP1_CRIT, IDX_TEMP2_INPUT, IDX_TEMP2_MIN,
- IDX_TEMP2_MAX, IDX_TEMP2_CRIT,
- TEMP_IDX_LEN, };
-
-static const u8 temp_reg[] = {AMC6821_REG_LTEMP_HI,
- AMC6821_REG_LTEMP_LIMIT_MIN,
- AMC6821_REG_LTEMP_LIMIT_MAX,
- AMC6821_REG_LTEMP_CRIT,
- AMC6821_REG_RTEMP_HI,
- AMC6821_REG_RTEMP_LIMIT_MIN,
- AMC6821_REG_RTEMP_LIMIT_MAX,
- AMC6821_REG_RTEMP_CRIT, };
-
-enum {IDX_FAN1_INPUT = 0, IDX_FAN1_MIN, IDX_FAN1_MAX,
- FAN1_IDX_LEN, };
-
-static const u8 fan_reg_low[] = {AMC6821_REG_TDATA_LOW,
- AMC6821_REG_TACH_LLIMITL,
- AMC6821_REG_TACH_HLIMITL, };
-
-
-static const u8 fan_reg_hi[] = {AMC6821_REG_TDATA_HI,
- AMC6821_REG_TACH_LLIMITH,
- AMC6821_REG_TACH_HLIMITH, };
+#define AMC6821_REG_DEV_ID 0x3D
+#define AMC6821_REG_COMP_ID 0x3E
+#define AMC6821_REG_CONF1 0x00
+#define AMC6821_REG_CONF2 0x01
+#define AMC6821_REG_CONF3 0x3F
+#define AMC6821_REG_CONF4 0x04
+#define AMC6821_REG_STAT1 0x02
+#define AMC6821_REG_STAT2 0x03
+#define AMC6821_REG_TEMP_LO 0x06
+#define AMC6821_REG_TDATA_LOW 0x08
+#define AMC6821_REG_TDATA_HI 0x09
+#define AMC6821_REG_LTEMP_HI 0x0A
+#define AMC6821_REG_RTEMP_HI 0x0B
+#define AMC6821_REG_LTEMP_LIMIT_MIN 0x15
+#define AMC6821_REG_LTEMP_LIMIT_MAX 0x14
+#define AMC6821_REG_RTEMP_LIMIT_MIN 0x19
+#define AMC6821_REG_RTEMP_LIMIT_MAX 0x18
+#define AMC6821_REG_LTEMP_CRIT 0x1B
+#define AMC6821_REG_RTEMP_CRIT 0x1D
+#define AMC6821_REG_PSV_TEMP 0x1C
+#define AMC6821_REG_DCY 0x22
+#define AMC6821_REG_LTEMP_FAN_CTRL 0x24
+#define AMC6821_REG_RTEMP_FAN_CTRL 0x25
+#define AMC6821_REG_DCY_LOW_TEMP 0x21
+
+#define AMC6821_REG_TACH_LLIMITL 0x10
+#define AMC6821_REG_TACH_HLIMITL 0x12
+#define AMC6821_REG_TACH_SETTINGL 0x1e
+
+#define AMC6821_CONF1_START BIT(0)
+#define AMC6821_CONF1_FAN_INT_EN BIT(1)
+#define AMC6821_CONF1_FANIE BIT(2)
+#define AMC6821_CONF1_PWMINV BIT(3)
+#define AMC6821_CONF1_FAN_FAULT_EN BIT(4)
+#define AMC6821_CONF1_FDRC0 BIT(5)
+#define AMC6821_CONF1_FDRC1 BIT(6)
+#define AMC6821_CONF1_THERMOVIE BIT(7)
+
+#define AMC6821_CONF2_PWM_EN BIT(0)
+#define AMC6821_CONF2_TACH_MODE BIT(1)
+#define AMC6821_CONF2_TACH_EN BIT(2)
+#define AMC6821_CONF2_RTFIE BIT(3)
+#define AMC6821_CONF2_LTOIE BIT(4)
+#define AMC6821_CONF2_RTOIE BIT(5)
+#define AMC6821_CONF2_PSVIE BIT(6)
+#define AMC6821_CONF2_RST BIT(7)
+
+#define AMC6821_CONF3_THERM_FAN_EN BIT(7)
+#define AMC6821_CONF3_REV_MASK GENMASK(3, 0)
+
+#define AMC6821_CONF4_OVREN BIT(4)
+#define AMC6821_CONF4_TACH_FAST BIT(5)
+#define AMC6821_CONF4_PSPR BIT(6)
+#define AMC6821_CONF4_MODE BIT(7)
+
+#define AMC6821_STAT1_RPM_ALARM BIT(0)
+#define AMC6821_STAT1_FANS BIT(1)
+#define AMC6821_STAT1_RTH BIT(2)
+#define AMC6821_STAT1_RTL BIT(3)
+#define AMC6821_STAT1_R_THERM BIT(4)
+#define AMC6821_STAT1_RTF BIT(5)
+#define AMC6821_STAT1_LTH BIT(6)
+#define AMC6821_STAT1_LTL BIT(7)
+
+#define AMC6821_STAT2_RTC BIT(3)
+#define AMC6821_STAT2_LTC BIT(4)
+#define AMC6821_STAT2_LPSV BIT(5)
+#define AMC6821_STAT2_L_THERM BIT(6)
+#define AMC6821_STAT2_THERM_IN BIT(7)
+
+#define AMC6821_TEMP_SLOPE_MASK GENMASK(2, 0)
+#define AMC6821_TEMP_LIMIT_MASK GENMASK(7, 3)
/*
* Client data (each client gets its own)
*/
struct amc6821_data {
- struct i2c_client *client;
+ struct regmap *regmap;
struct mutex update_lock;
- bool valid; /* false until following fields are valid */
- unsigned long last_updated; /* in jiffies */
+};
- /* register values */
- int temp[TEMP_IDX_LEN];
+/*
+ * Return 0 on success or negative error code.
+ *
+ * temps returns set of three temperatures, in °C:
+ * temps[0]: Passive cooling temperature, applies to both channels
+ * temps[1]: Low temperature, start slope calculations
+ * temps[2]: High temperature
+ *
+ * Channel 0: local, channel 1: remote.
+ */
+static int amc6821_get_auto_point_temps(struct regmap *regmap, int channel, u8 *temps)
+{
+ u32 pwm, regval;
+ int err;
- u16 fan[FAN1_IDX_LEN];
- u8 fan1_div;
+ err = regmap_read(regmap, AMC6821_REG_DCY_LOW_TEMP, &pwm);
+ if (err)
+ return err;
- u8 pwm1;
- u8 temp1_auto_point_temp[3];
- u8 temp2_auto_point_temp[3];
- u8 pwm1_auto_point_pwm[3];
- u8 pwm1_enable;
- u8 pwm1_auto_channels_temp;
+ err = regmap_read(regmap, AMC6821_REG_PSV_TEMP, &regval);
+ if (err)
+ return err;
+ temps[0] = regval;
- u8 stat1;
- u8 stat2;
-};
+ err = regmap_read(regmap,
+ channel ? AMC6821_REG_RTEMP_FAN_CTRL : AMC6821_REG_LTEMP_FAN_CTRL,
+ &regval);
+ if (err)
+ return err;
+ temps[1] = FIELD_GET(AMC6821_TEMP_LIMIT_MASK, regval) * 4;
+
+ /* slope is 32 >> <slope bits> in °C */
+ regval = 32 >> FIELD_GET(AMC6821_TEMP_SLOPE_MASK, regval);
+ if (regval)
+ temps[2] = temps[1] + DIV_ROUND_CLOSEST(255 - pwm, regval);
+ else
+ temps[2] = 255;
+
+ return 0;
+}
-static struct amc6821_data *amc6821_update_device(struct device *dev)
+static int amc6821_temp_read_values(struct regmap *regmap, u32 attr, int channel, long *val)
{
- struct amc6821_data *data = dev_get_drvdata(dev);
- struct i2c_client *client = data->client;
- int timeout = HZ;
- u8 reg;
- int i;
+ int reg, err;
+ u32 regval;
- mutex_lock(&data->update_lock);
+ switch (attr) {
+ case hwmon_temp_input:
+ reg = channel ? AMC6821_REG_RTEMP_HI : AMC6821_REG_LTEMP_HI;
+ break;
+ case hwmon_temp_min:
+ reg = channel ? AMC6821_REG_RTEMP_LIMIT_MIN : AMC6821_REG_LTEMP_LIMIT_MIN;
+ break;
+ case hwmon_temp_max:
+ reg = channel ? AMC6821_REG_RTEMP_LIMIT_MAX : AMC6821_REG_LTEMP_LIMIT_MAX;
+ break;
+ case hwmon_temp_crit:
+ reg = channel ? AMC6821_REG_RTEMP_CRIT : AMC6821_REG_LTEMP_CRIT;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ err = regmap_read(regmap, reg, &regval);
+ if (err)
+ return err;
+ *val = sign_extend32(regval, 7) * 1000;
+ return 0;
+}
- if (time_after(jiffies, data->last_updated + timeout) ||
- !data->valid) {
-
- for (i = 0; i < TEMP_IDX_LEN; i++)
- data->temp[i] = (int8_t)i2c_smbus_read_byte_data(
- client, temp_reg[i]);
-
- data->stat1 = i2c_smbus_read_byte_data(client,
- AMC6821_REG_STAT1);
- data->stat2 = i2c_smbus_read_byte_data(client,
- AMC6821_REG_STAT2);
-
- data->pwm1 = i2c_smbus_read_byte_data(client,
- AMC6821_REG_DCY);
- for (i = 0; i < FAN1_IDX_LEN; i++) {
- data->fan[i] = i2c_smbus_read_byte_data(
- client,
- fan_reg_low[i]);
- data->fan[i] += i2c_smbus_read_byte_data(
- client,
- fan_reg_hi[i]) << 8;
- }
- data->fan1_div = i2c_smbus_read_byte_data(client,
- AMC6821_REG_CONF4);
- data->fan1_div = data->fan1_div & AMC6821_CONF4_PSPR ? 4 : 2;
-
- data->pwm1_auto_point_pwm[0] = 0;
- data->pwm1_auto_point_pwm[2] = 255;
- data->pwm1_auto_point_pwm[1] = i2c_smbus_read_byte_data(client,
- AMC6821_REG_DCY_LOW_TEMP);
-
- data->temp1_auto_point_temp[0] =
- i2c_smbus_read_byte_data(client,
- AMC6821_REG_PSV_TEMP);
- data->temp2_auto_point_temp[0] =
- data->temp1_auto_point_temp[0];
- reg = i2c_smbus_read_byte_data(client,
- AMC6821_REG_LTEMP_FAN_CTRL);
- data->temp1_auto_point_temp[1] = (reg & 0xF8) >> 1;
- reg &= 0x07;
- reg = 0x20 >> reg;
- if (reg > 0)
- data->temp1_auto_point_temp[2] =
- data->temp1_auto_point_temp[1] +
- (data->pwm1_auto_point_pwm[2] -
- data->pwm1_auto_point_pwm[1]) / reg;
- else
- data->temp1_auto_point_temp[2] = 255;
-
- reg = i2c_smbus_read_byte_data(client,
- AMC6821_REG_RTEMP_FAN_CTRL);
- data->temp2_auto_point_temp[1] = (reg & 0xF8) >> 1;
- reg &= 0x07;
- reg = 0x20 >> reg;
- if (reg > 0)
- data->temp2_auto_point_temp[2] =
- data->temp2_auto_point_temp[1] +
- (data->pwm1_auto_point_pwm[2] -
- data->pwm1_auto_point_pwm[1]) / reg;
- else
- data->temp2_auto_point_temp[2] = 255;
-
- reg = i2c_smbus_read_byte_data(client, AMC6821_REG_CONF1);
- reg = (reg >> 5) & 0x3;
- switch (reg) {
- case 0: /*open loop: software sets pwm1*/
- data->pwm1_auto_channels_temp = 0;
- data->pwm1_enable = 1;
+static int amc6821_read_alarms(struct regmap *regmap, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ int reg, mask, err;
+ u32 regval;
+
+ switch (type) {
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_min_alarm:
+ reg = AMC6821_REG_STAT1;
+ mask = channel ? AMC6821_STAT1_RTL : AMC6821_STAT1_LTL;
break;
- case 2: /*closed loop: remote T (temp2)*/
- data->pwm1_auto_channels_temp = 2;
- data->pwm1_enable = 2;
+ case hwmon_temp_max_alarm:
+ reg = AMC6821_REG_STAT1;
+ mask = channel ? AMC6821_STAT1_RTH : AMC6821_STAT1_LTH;
break;
- case 3: /*closed loop: local and remote T (temp2)*/
- data->pwm1_auto_channels_temp = 3;
- data->pwm1_enable = 3;
+ case hwmon_temp_crit_alarm:
+ reg = AMC6821_REG_STAT2;
+ mask = channel ? AMC6821_STAT2_RTC : AMC6821_STAT2_LTC;
break;
- case 1: /*
- * semi-open loop: software sets rpm, chip controls
- * pwm1, currently not implemented
- */
- data->pwm1_auto_channels_temp = 0;
- data->pwm1_enable = 0;
+ case hwmon_temp_fault:
+ reg = AMC6821_REG_STAT1;
+ mask = AMC6821_STAT1_RTF;
break;
+ default:
+ return -EOPNOTSUPP;
}
-
- data->last_updated = jiffies;
- data->valid = true;
+ break;
+ case hwmon_fan:
+ switch (attr) {
+ case hwmon_fan_fault:
+ reg = AMC6821_REG_STAT1;
+ mask = AMC6821_STAT1_FANS;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+ default:
+ return -EOPNOTSUPP;
}
- mutex_unlock(&data->update_lock);
- return data;
+ err = regmap_read(regmap, reg, &regval);
+ if (err)
+ return err;
+ *val = !!(regval & mask);
+ return 0;
}
-static ssize_t temp_show(struct device *dev, struct device_attribute *devattr,
- char *buf)
+static int amc6821_temp_read(struct device *dev, u32 attr, int channel, long *val)
{
- struct amc6821_data *data = amc6821_update_device(dev);
- int ix = to_sensor_dev_attr(devattr)->index;
+ struct amc6821_data *data = dev_get_drvdata(dev);
- return sprintf(buf, "%d\n", data->temp[ix] * 1000);
+ switch (attr) {
+ case hwmon_temp_input:
+ case hwmon_temp_min:
+ case hwmon_temp_max:
+ case hwmon_temp_crit:
+ return amc6821_temp_read_values(data->regmap, attr, channel, val);
+ case hwmon_temp_min_alarm:
+ case hwmon_temp_max_alarm:
+ case hwmon_temp_crit_alarm:
+ case hwmon_temp_fault:
+ return amc6821_read_alarms(data->regmap, hwmon_temp, attr, channel, val);
+ default:
+ return -EOPNOTSUPP;
+ }
}
-static ssize_t temp_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static int amc6821_temp_write(struct device *dev, u32 attr, int channel, long val)
{
struct amc6821_data *data = dev_get_drvdata(dev);
- struct i2c_client *client = data->client;
- int ix = to_sensor_dev_attr(attr)->index;
- long val;
+ int reg;
- int ret = kstrtol(buf, 10, &val);
- if (ret)
- return ret;
- val = clamp_val(val / 1000, -128, 127);
+ val = DIV_ROUND_CLOSEST(clamp_val(val, -128000, 127000), 1000);
- mutex_lock(&data->update_lock);
- data->temp[ix] = val;
- if (i2c_smbus_write_byte_data(client, temp_reg[ix], data->temp[ix])) {
- dev_err(&client->dev, "Register write error, aborting.\n");
- count = -EIO;
+ switch (attr) {
+ case hwmon_temp_min:
+ reg = channel ? AMC6821_REG_RTEMP_LIMIT_MIN : AMC6821_REG_LTEMP_LIMIT_MIN;
+ break;
+ case hwmon_temp_max:
+ reg = channel ? AMC6821_REG_RTEMP_LIMIT_MAX : AMC6821_REG_LTEMP_LIMIT_MAX;
+ break;
+ case hwmon_temp_crit:
+ reg = channel ? AMC6821_REG_RTEMP_CRIT : AMC6821_REG_LTEMP_CRIT;
+ break;
+ default:
+ return -EOPNOTSUPP;
}
- mutex_unlock(&data->update_lock);
- return count;
+ return regmap_write(data->regmap, reg, val);
}
-static ssize_t temp_alarm_show(struct device *dev,
- struct device_attribute *devattr, char *buf)
+static int amc6821_pwm_read(struct device *dev, u32 attr, long *val)
{
- struct amc6821_data *data = amc6821_update_device(dev);
- int ix = to_sensor_dev_attr(devattr)->index;
- u8 flag;
+ struct amc6821_data *data = dev_get_drvdata(dev);
+ struct regmap *regmap = data->regmap;
+ u32 regval;
+ int err;
- switch (ix) {
- case IDX_TEMP1_MIN:
- flag = data->stat1 & AMC6821_STAT1_LTL;
- break;
- case IDX_TEMP1_MAX:
- flag = data->stat1 & AMC6821_STAT1_LTH;
+ switch (attr) {
+ case hwmon_pwm_enable:
+ err = regmap_read(regmap, AMC6821_REG_CONF1, &regval);
+ if (err)
+ return err;
+ switch (regval & (AMC6821_CONF1_FDRC0 | AMC6821_CONF1_FDRC1)) {
+ case 0:
+ *val = 1; /* manual */
+ break;
+ case AMC6821_CONF1_FDRC0:
+ *val = 4; /* target rpm (fan1_target) controlled */
+ break;
+ case AMC6821_CONF1_FDRC1:
+ *val = 2; /* remote temp controlled */
+ break;
+ default:
+ *val = 3; /* max(local, remote) temp controlled */
+ break;
+ }
+ return 0;
+ case hwmon_pwm_mode:
+ err = regmap_read(regmap, AMC6821_REG_CONF2, &regval);
+ if (err)
+ return err;
+ *val = !!(regval & AMC6821_CONF2_TACH_MODE);
+ return 0;
+ case hwmon_pwm_auto_channels_temp:
+ err = regmap_read(regmap, AMC6821_REG_CONF1, &regval);
+ if (err)
+ return err;
+ switch (regval & (AMC6821_CONF1_FDRC0 | AMC6821_CONF1_FDRC1)) {
+ case 0:
+ case AMC6821_CONF1_FDRC0:
+ *val = 0; /* manual or target rpm controlled */
+ break;
+ case AMC6821_CONF1_FDRC1:
+ *val = 2; /* remote temp controlled */
+ break;
+ default:
+ *val = 3; /* max(local, remote) temp controlled */
+ break;
+ }
+ return 0;
+ case hwmon_pwm_input:
+ err = regmap_read(regmap, AMC6821_REG_DCY, &regval);
+ if (err)
+ return err;
+ *val = regval;
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int amc6821_pwm_write(struct device *dev, u32 attr, long val)
+{
+ struct amc6821_data *data = dev_get_drvdata(dev);
+ struct regmap *regmap = data->regmap;
+ u32 mode;
+
+ switch (attr) {
+ case hwmon_pwm_enable:
+ switch (val) {
+ case 1:
+ mode = 0;
+ break;
+ case 2:
+ mode = AMC6821_CONF1_FDRC1;
+ break;
+ case 3:
+ mode = AMC6821_CONF1_FDRC0 | AMC6821_CONF1_FDRC1;
+ break;
+ case 4:
+ mode = AMC6821_CONF1_FDRC0;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return regmap_update_bits(regmap, AMC6821_REG_CONF1,
+ AMC6821_CONF1_FDRC0 | AMC6821_CONF1_FDRC1,
+ mode);
+ case hwmon_pwm_mode:
+ if (val < 0 || val > 1)
+ return -EINVAL;
+ return regmap_update_bits(regmap, AMC6821_REG_CONF2,
+ AMC6821_CONF2_TACH_MODE,
+ val ? AMC6821_CONF2_TACH_MODE : 0);
break;
- case IDX_TEMP1_CRIT:
- flag = data->stat2 & AMC6821_STAT2_LTC;
+ case hwmon_pwm_input:
+ if (val < 0 || val > 255)
+ return -EINVAL;
+ return regmap_write(regmap, AMC6821_REG_DCY, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int amc6821_fan_read_rpm(struct regmap *regmap, u32 attr, long *val)
+{
+ int reg, err;
+ u8 regs[2];
+ u32 regval;
+
+ switch (attr) {
+ case hwmon_fan_input:
+ reg = AMC6821_REG_TDATA_LOW;
break;
- case IDX_TEMP2_MIN:
- flag = data->stat1 & AMC6821_STAT1_RTL;
+ case hwmon_fan_min:
+ reg = AMC6821_REG_TACH_LLIMITL;
break;
- case IDX_TEMP2_MAX:
- flag = data->stat1 & AMC6821_STAT1_RTH;
+ case hwmon_fan_max:
+ reg = AMC6821_REG_TACH_HLIMITL;
break;
- case IDX_TEMP2_CRIT:
- flag = data->stat2 & AMC6821_STAT2_RTC;
+ case hwmon_fan_target:
+ reg = AMC6821_REG_TACH_SETTINGL;
break;
default:
- dev_dbg(dev, "Unknown attr->index (%d).\n", ix);
- return -EINVAL;
+ return -EOPNOTSUPP;
}
- if (flag)
- return sprintf(buf, "1");
- else
- return sprintf(buf, "0");
-}
-static ssize_t temp2_fault_show(struct device *dev,
- struct device_attribute *devattr, char *buf)
-{
- struct amc6821_data *data = amc6821_update_device(dev);
- if (data->stat1 & AMC6821_STAT1_RTF)
- return sprintf(buf, "1");
- else
- return sprintf(buf, "0");
-}
+ err = regmap_bulk_read(regmap, reg, regs, 2);
+ if (err)
+ return err;
-static ssize_t pwm1_show(struct device *dev, struct device_attribute *devattr,
- char *buf)
-{
- struct amc6821_data *data = amc6821_update_device(dev);
- return sprintf(buf, "%d\n", data->pwm1);
+ regval = (regs[1] << 8) | regs[0];
+ *val = regval ? 6000000 / regval : 0;
+
+ return 0;
}
-static ssize_t pwm1_store(struct device *dev,
- struct device_attribute *devattr, const char *buf,
- size_t count)
+static int amc6821_fan_read(struct device *dev, u32 attr, long *val)
{
struct amc6821_data *data = dev_get_drvdata(dev);
- struct i2c_client *client = data->client;
- long val;
- int ret = kstrtol(buf, 10, &val);
- if (ret)
- return ret;
-
- mutex_lock(&data->update_lock);
- data->pwm1 = clamp_val(val , 0, 255);
- i2c_smbus_write_byte_data(client, AMC6821_REG_DCY, data->pwm1);
- mutex_unlock(&data->update_lock);
- return count;
-}
+ struct regmap *regmap = data->regmap;
+ u32 regval;
+ int err;
-static ssize_t pwm1_enable_show(struct device *dev,
- struct device_attribute *devattr, char *buf)
-{
- struct amc6821_data *data = amc6821_update_device(dev);
- return sprintf(buf, "%d\n", data->pwm1_enable);
+ switch (attr) {
+ case hwmon_fan_input:
+ case hwmon_fan_min:
+ case hwmon_fan_max:
+ case hwmon_fan_target:
+ return amc6821_fan_read_rpm(regmap, attr, val);
+ case hwmon_fan_fault:
+ return amc6821_read_alarms(regmap, hwmon_fan, attr, 0, val);
+ case hwmon_fan_pulses:
+ err = regmap_read(regmap, AMC6821_REG_CONF4, &regval);
+ if (err)
+ return err;
+ *val = (regval & AMC6821_CONF4_PSPR) ? 4 : 2;
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
}
-static ssize_t pwm1_enable_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static int amc6821_fan_write(struct device *dev, u32 attr, long val)
{
struct amc6821_data *data = dev_get_drvdata(dev);
- struct i2c_client *client = data->client;
- long val;
- int config = kstrtol(buf, 10, &val);
- if (config)
- return config;
-
- mutex_lock(&data->update_lock);
- config = i2c_smbus_read_byte_data(client, AMC6821_REG_CONF1);
- if (config < 0) {
- dev_err(&client->dev,
- "Error reading configuration register, aborting.\n");
- count = config;
- goto unlock;
+ struct regmap *regmap = data->regmap;
+ u8 regs[2];
+ int reg;
+
+ if (attr == hwmon_fan_pulses) {
+ if (val != 2 && val != 4)
+ return -EINVAL;
+ return regmap_update_bits(regmap, AMC6821_REG_CONF4,
+ AMC6821_CONF4_PSPR,
+ val == 4 ? AMC6821_CONF4_PSPR : 0);
}
- switch (val) {
- case 1:
- config &= ~AMC6821_CONF1_FDRC0;
- config &= ~AMC6821_CONF1_FDRC1;
+ if (val < 0)
+ return -EINVAL;
+
+ switch (attr) {
+ case hwmon_fan_min:
+ if (!val) /* no unlimited minimum speed */
+ return -EINVAL;
+ reg = AMC6821_REG_TACH_LLIMITL;
break;
- case 2:
- config &= ~AMC6821_CONF1_FDRC0;
- config |= AMC6821_CONF1_FDRC1;
+ case hwmon_fan_max:
+ reg = AMC6821_REG_TACH_HLIMITL;
break;
- case 3:
- config |= AMC6821_CONF1_FDRC0;
- config |= AMC6821_CONF1_FDRC1;
+ case hwmon_fan_target:
+ if (!val) /* no unlimited target speed */
+ return -EINVAL;
+ reg = AMC6821_REG_TACH_SETTINGL;
break;
default:
- count = -EINVAL;
- goto unlock;
+ return -EOPNOTSUPP;
}
- if (i2c_smbus_write_byte_data(client, AMC6821_REG_CONF1, config)) {
- dev_err(&client->dev,
- "Configuration register write error, aborting.\n");
- count = -EIO;
- }
-unlock:
- mutex_unlock(&data->update_lock);
- return count;
-}
-static ssize_t pwm1_auto_channels_temp_show(struct device *dev,
- struct device_attribute *devattr,
- char *buf)
-{
- struct amc6821_data *data = amc6821_update_device(dev);
- return sprintf(buf, "%d\n", data->pwm1_auto_channels_temp);
+ val = val ? 6000000 / clamp_val(val, 1, 6000000) : 0;
+ val = clamp_val(val, 0, 0xffff);
+
+ regs[0] = val & 0xff;
+ regs[1] = val >> 8;
+
+ return regmap_bulk_write(data->regmap, reg, regs, 2);
}
static ssize_t temp_auto_point_temp_show(struct device *dev,
struct device_attribute *devattr,
char *buf)
{
+ struct amc6821_data *data = dev_get_drvdata(dev);
int ix = to_sensor_dev_attr_2(devattr)->index;
int nr = to_sensor_dev_attr_2(devattr)->nr;
- struct amc6821_data *data = amc6821_update_device(dev);
- switch (nr) {
- case 1:
- return sprintf(buf, "%d\n",
- data->temp1_auto_point_temp[ix] * 1000);
- case 2:
- return sprintf(buf, "%d\n",
- data->temp2_auto_point_temp[ix] * 1000);
- default:
- dev_dbg(dev, "Unknown attr->nr (%d).\n", nr);
- return -EINVAL;
- }
+ u8 temps[3];
+ int err;
+
+ mutex_lock(&data->update_lock);
+ err = amc6821_get_auto_point_temps(data->regmap, nr, temps);
+ mutex_unlock(&data->update_lock);
+ if (err)
+ return err;
+
+ return sysfs_emit(buf, "%d\n", temps[ix] * 1000);
}
static ssize_t pwm1_auto_point_pwm_show(struct device *dev,
struct device_attribute *devattr,
char *buf)
{
+ struct amc6821_data *data = dev_get_drvdata(dev);
int ix = to_sensor_dev_attr(devattr)->index;
- struct amc6821_data *data = amc6821_update_device(dev);
- return sprintf(buf, "%d\n", data->pwm1_auto_point_pwm[ix]);
+ u32 val;
+ int err;
+
+ switch (ix) {
+ case 0:
+ val = 0;
+ break;
+ case 1:
+ err = regmap_read(data->regmap, AMC6821_REG_DCY_LOW_TEMP, &val);
+ if (err)
+ return err;
+ break;
+ default:
+ val = 255;
+ break;
+ }
+ return sysfs_emit(buf, "%d\n", val);
}
-static inline ssize_t set_slope_register(struct i2c_client *client,
- u8 reg,
- u8 dpwm,
- u8 *ptemp)
+/*
+ * Set TEMP[0-4] (low temperature) and SLP[0-2] (slope) of local or remote
+ * TEMP-FAN control register.
+ *
+ * Return 0 on success or negative error code.
+ *
+ * Channel 0: local, channel 1: remote
+ */
+static inline int set_slope_register(struct regmap *regmap, int channel, u8 *temps)
{
- int dt;
- u8 tmp;
+ u8 regval = FIELD_PREP(AMC6821_TEMP_LIMIT_MASK, temps[1] / 4);
+ u8 tmp, dpwm;
+ int err, dt;
+ u32 pwm;
+
+ err = regmap_read(regmap, AMC6821_REG_DCY_LOW_TEMP, &pwm);
+ if (err)
+ return err;
- dt = ptemp[2]-ptemp[1];
+ dpwm = 255 - pwm;
+
+ dt = temps[2] - temps[1];
for (tmp = 4; tmp > 0; tmp--) {
- if (dt * (0x20 >> tmp) >= dpwm)
+ if (dt * (32 >> tmp) >= dpwm)
break;
}
- tmp |= (ptemp[1] & 0x7C) << 1;
- if (i2c_smbus_write_byte_data(client,
- reg, tmp)) {
- dev_err(&client->dev, "Register write error, aborting.\n");
- return -EIO;
- }
- return 0;
+ regval |= FIELD_PREP(AMC6821_TEMP_SLOPE_MASK, tmp);
+
+ return regmap_write(regmap,
+ channel ? AMC6821_REG_RTEMP_FAN_CTRL : AMC6821_REG_LTEMP_FAN_CTRL,
+ regval);
}
static ssize_t temp_auto_point_temp_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct amc6821_data *data = amc6821_update_device(dev);
- struct i2c_client *client = data->client;
+ struct amc6821_data *data = dev_get_drvdata(dev);
int ix = to_sensor_dev_attr_2(attr)->index;
int nr = to_sensor_dev_attr_2(attr)->nr;
- u8 *ptemp;
- u8 reg;
- int dpwm;
+ struct regmap *regmap = data->regmap;
+ u8 temps[3], otemps[3];
long val;
- int ret = kstrtol(buf, 10, &val);
+ int ret;
+
+ ret = kstrtol(buf, 10, &val);
if (ret)
return ret;
- switch (nr) {
- case 1:
- ptemp = data->temp1_auto_point_temp;
- reg = AMC6821_REG_LTEMP_FAN_CTRL;
- break;
- case 2:
- ptemp = data->temp2_auto_point_temp;
- reg = AMC6821_REG_RTEMP_FAN_CTRL;
- break;
- default:
- dev_dbg(dev, "Unknown attr->nr (%d).\n", nr);
- return -EINVAL;
- }
-
mutex_lock(&data->update_lock);
- data->valid = false;
+
+ ret = amc6821_get_auto_point_temps(data->regmap, nr, temps);
+ if (ret)
+ goto unlock;
switch (ix) {
case 0:
- ptemp[0] = clamp_val(val / 1000, 0,
- data->temp1_auto_point_temp[1]);
- ptemp[0] = clamp_val(ptemp[0], 0,
- data->temp2_auto_point_temp[1]);
- ptemp[0] = clamp_val(ptemp[0], 0, 63);
- if (i2c_smbus_write_byte_data(
- client,
- AMC6821_REG_PSV_TEMP,
- ptemp[0])) {
- dev_err(&client->dev,
- "Register write error, aborting.\n");
- count = -EIO;
- }
- goto EXIT;
+ /*
+ * Passive cooling temperature. Range limit against low limit
+ * of both channels.
+ */
+ ret = amc6821_get_auto_point_temps(data->regmap, 1 - nr, otemps);
+ if (ret)
+ goto unlock;
+ val = DIV_ROUND_CLOSEST(clamp_val(val, 0, 63000), 1000);
+ val = clamp_val(val, 0, min(temps[1], otemps[1]));
+ ret = regmap_write(regmap, AMC6821_REG_PSV_TEMP, val);
+ break;
case 1:
- ptemp[1] = clamp_val(val / 1000, (ptemp[0] & 0x7C) + 4, 124);
- ptemp[1] &= 0x7C;
- ptemp[2] = clamp_val(ptemp[2], ptemp[1] + 1, 255);
+ /*
+ * Low limit; must be between passive and high limit,
+ * and not exceed 124. Step size is 4 degrees C.
+ */
+ val = clamp_val(val, DIV_ROUND_UP(temps[0], 4) * 4000, 124000);
+ temps[1] = DIV_ROUND_CLOSEST(val, 4000) * 4;
+ val = temps[1] / 4;
+ /* Auto-adjust high limit if necessary */
+ temps[2] = clamp_val(temps[2], temps[1] + 1, 255);
+ ret = set_slope_register(regmap, nr, temps);
break;
case 2:
- ptemp[2] = clamp_val(val / 1000, ptemp[1]+1, 255);
+ /* high limit, must be higher than low limit */
+ val = clamp_val(val, (temps[1] + 1) * 1000, 255000);
+ temps[2] = DIV_ROUND_CLOSEST(val, 1000);
+ ret = set_slope_register(regmap, nr, temps);
break;
default:
- dev_dbg(dev, "Unknown attr->index (%d).\n", ix);
- count = -EINVAL;
- goto EXIT;
+ ret = -EINVAL;
+ break;
}
- dpwm = data->pwm1_auto_point_pwm[2] - data->pwm1_auto_point_pwm[1];
- if (set_slope_register(client, reg, dpwm, ptemp))
- count = -EIO;
-
-EXIT:
+unlock:
mutex_unlock(&data->update_lock);
- return count;
+ return ret ? : count;
}
static ssize_t pwm1_auto_point_pwm_store(struct device *dev,
@@ -556,204 +637,52 @@ static ssize_t pwm1_auto_point_pwm_store(struct device *dev,
const char *buf, size_t count)
{
struct amc6821_data *data = dev_get_drvdata(dev);
- struct i2c_client *client = data->client;
- int dpwm;
- long val;
- int ret = kstrtol(buf, 10, &val);
- if (ret)
- return ret;
-
- mutex_lock(&data->update_lock);
- data->pwm1_auto_point_pwm[1] = clamp_val(val, 0, 254);
- if (i2c_smbus_write_byte_data(client, AMC6821_REG_DCY_LOW_TEMP,
- data->pwm1_auto_point_pwm[1])) {
- dev_err(&client->dev, "Register write error, aborting.\n");
- count = -EIO;
- goto EXIT;
- }
- dpwm = data->pwm1_auto_point_pwm[2] - data->pwm1_auto_point_pwm[1];
- if (set_slope_register(client, AMC6821_REG_LTEMP_FAN_CTRL, dpwm,
- data->temp1_auto_point_temp)) {
- count = -EIO;
- goto EXIT;
- }
- if (set_slope_register(client, AMC6821_REG_RTEMP_FAN_CTRL, dpwm,
- data->temp2_auto_point_temp)) {
- count = -EIO;
- goto EXIT;
- }
-
-EXIT:
- data->valid = false;
- mutex_unlock(&data->update_lock);
- return count;
-}
-
-static ssize_t fan_show(struct device *dev, struct device_attribute *devattr,
- char *buf)
-{
- struct amc6821_data *data = amc6821_update_device(dev);
- int ix = to_sensor_dev_attr(devattr)->index;
- if (0 == data->fan[ix])
- return sprintf(buf, "0");
- return sprintf(buf, "%d\n", (int)(6000000 / data->fan[ix]));
-}
+ struct regmap *regmap = data->regmap;
+ int i, ret;
+ u8 val;
-static ssize_t fan1_fault_show(struct device *dev,
- struct device_attribute *devattr, char *buf)
-{
- struct amc6821_data *data = amc6821_update_device(dev);
- if (data->stat1 & AMC6821_STAT1_FANS)
- return sprintf(buf, "1");
- else
- return sprintf(buf, "0");
-}
-
-static ssize_t fan_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct amc6821_data *data = dev_get_drvdata(dev);
- struct i2c_client *client = data->client;
- long val;
- int ix = to_sensor_dev_attr(attr)->index;
- int ret = kstrtol(buf, 10, &val);
+ ret = kstrtou8(buf, 10, &val);
if (ret)
return ret;
- val = 1 > val ? 0xFFFF : 6000000/val;
mutex_lock(&data->update_lock);
- data->fan[ix] = (u16) clamp_val(val, 1, 0xFFFF);
- if (i2c_smbus_write_byte_data(client, fan_reg_low[ix],
- data->fan[ix] & 0xFF)) {
- dev_err(&client->dev, "Register write error, aborting.\n");
- count = -EIO;
- goto EXIT;
- }
- if (i2c_smbus_write_byte_data(client,
- fan_reg_hi[ix], data->fan[ix] >> 8)) {
- dev_err(&client->dev, "Register write error, aborting.\n");
- count = -EIO;
- }
-EXIT:
- mutex_unlock(&data->update_lock);
- return count;
-}
-
-static ssize_t fan1_div_show(struct device *dev,
- struct device_attribute *devattr, char *buf)
-{
- struct amc6821_data *data = amc6821_update_device(dev);
- return sprintf(buf, "%d\n", data->fan1_div);
-}
+ ret = regmap_write(regmap, AMC6821_REG_DCY_LOW_TEMP, val);
+ if (ret)
+ goto unlock;
-static ssize_t fan1_div_store(struct device *dev,
- struct device_attribute *attr, const char *buf,
- size_t count)
-{
- struct amc6821_data *data = dev_get_drvdata(dev);
- struct i2c_client *client = data->client;
- long val;
- int config = kstrtol(buf, 10, &val);
- if (config)
- return config;
+ for (i = 0; i < 2; i++) {
+ u8 temps[3];
- mutex_lock(&data->update_lock);
- config = i2c_smbus_read_byte_data(client, AMC6821_REG_CONF4);
- if (config < 0) {
- dev_err(&client->dev,
- "Error reading configuration register, aborting.\n");
- count = config;
- goto EXIT;
- }
- switch (val) {
- case 2:
- config &= ~AMC6821_CONF4_PSPR;
- data->fan1_div = 2;
- break;
- case 4:
- config |= AMC6821_CONF4_PSPR;
- data->fan1_div = 4;
- break;
- default:
- count = -EINVAL;
- goto EXIT;
- }
- if (i2c_smbus_write_byte_data(client, AMC6821_REG_CONF4, config)) {
- dev_err(&client->dev,
- "Configuration register write error, aborting.\n");
- count = -EIO;
+ ret = amc6821_get_auto_point_temps(regmap, i, temps);
+ if (ret)
+ break;
+ ret = set_slope_register(regmap, i, temps);
+ if (ret)
+ break;
}
-EXIT:
+unlock:
mutex_unlock(&data->update_lock);
- return count;
+ return ret ? : count;
}
-static SENSOR_DEVICE_ATTR_RO(temp1_input, temp, IDX_TEMP1_INPUT);
-static SENSOR_DEVICE_ATTR_RW(temp1_min, temp, IDX_TEMP1_MIN);
-static SENSOR_DEVICE_ATTR_RW(temp1_max, temp, IDX_TEMP1_MAX);
-static SENSOR_DEVICE_ATTR_RW(temp1_crit, temp, IDX_TEMP1_CRIT);
-static SENSOR_DEVICE_ATTR_RO(temp1_min_alarm, temp_alarm, IDX_TEMP1_MIN);
-static SENSOR_DEVICE_ATTR_RO(temp1_max_alarm, temp_alarm, IDX_TEMP1_MAX);
-static SENSOR_DEVICE_ATTR_RO(temp1_crit_alarm, temp_alarm, IDX_TEMP1_CRIT);
-static SENSOR_DEVICE_ATTR_RO(temp2_input, temp, IDX_TEMP2_INPUT);
-static SENSOR_DEVICE_ATTR_RW(temp2_min, temp, IDX_TEMP2_MIN);
-static SENSOR_DEVICE_ATTR_RW(temp2_max, temp, IDX_TEMP2_MAX);
-static SENSOR_DEVICE_ATTR_RW(temp2_crit, temp, IDX_TEMP2_CRIT);
-static SENSOR_DEVICE_ATTR_RO(temp2_fault, temp2_fault, 0);
-static SENSOR_DEVICE_ATTR_RO(temp2_min_alarm, temp_alarm, IDX_TEMP2_MIN);
-static SENSOR_DEVICE_ATTR_RO(temp2_max_alarm, temp_alarm, IDX_TEMP2_MAX);
-static SENSOR_DEVICE_ATTR_RO(temp2_crit_alarm, temp_alarm, IDX_TEMP2_CRIT);
-static SENSOR_DEVICE_ATTR_RO(fan1_input, fan, IDX_FAN1_INPUT);
-static SENSOR_DEVICE_ATTR_RW(fan1_min, fan, IDX_FAN1_MIN);
-static SENSOR_DEVICE_ATTR_RW(fan1_max, fan, IDX_FAN1_MAX);
-static SENSOR_DEVICE_ATTR_RO(fan1_fault, fan1_fault, 0);
-static SENSOR_DEVICE_ATTR_RW(fan1_div, fan1_div, 0);
-
-static SENSOR_DEVICE_ATTR_RW(pwm1, pwm1, 0);
-static SENSOR_DEVICE_ATTR_RW(pwm1_enable, pwm1_enable, 0);
static SENSOR_DEVICE_ATTR_RO(pwm1_auto_point1_pwm, pwm1_auto_point_pwm, 0);
static SENSOR_DEVICE_ATTR_RW(pwm1_auto_point2_pwm, pwm1_auto_point_pwm, 1);
static SENSOR_DEVICE_ATTR_RO(pwm1_auto_point3_pwm, pwm1_auto_point_pwm, 2);
-static SENSOR_DEVICE_ATTR_RO(pwm1_auto_channels_temp, pwm1_auto_channels_temp,
- 0);
static SENSOR_DEVICE_ATTR_2_RO(temp1_auto_point1_temp, temp_auto_point_temp,
- 1, 0);
+ 0, 0);
static SENSOR_DEVICE_ATTR_2_RW(temp1_auto_point2_temp, temp_auto_point_temp,
- 1, 1);
+ 0, 1);
static SENSOR_DEVICE_ATTR_2_RW(temp1_auto_point3_temp, temp_auto_point_temp,
- 1, 2);
+ 0, 2);
static SENSOR_DEVICE_ATTR_2_RW(temp2_auto_point1_temp, temp_auto_point_temp,
- 2, 0);
+ 1, 0);
static SENSOR_DEVICE_ATTR_2_RW(temp2_auto_point2_temp, temp_auto_point_temp,
- 2, 1);
+ 1, 1);
static SENSOR_DEVICE_ATTR_2_RW(temp2_auto_point3_temp, temp_auto_point_temp,
- 2, 2);
+ 1, 2);
static struct attribute *amc6821_attrs[] = {
- &sensor_dev_attr_temp1_input.dev_attr.attr,
- &sensor_dev_attr_temp1_min.dev_attr.attr,
- &sensor_dev_attr_temp1_max.dev_attr.attr,
- &sensor_dev_attr_temp1_crit.dev_attr.attr,
- &sensor_dev_attr_temp1_min_alarm.dev_attr.attr,
- &sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
- &sensor_dev_attr_temp1_crit_alarm.dev_attr.attr,
- &sensor_dev_attr_temp2_input.dev_attr.attr,
- &sensor_dev_attr_temp2_min.dev_attr.attr,
- &sensor_dev_attr_temp2_max.dev_attr.attr,
- &sensor_dev_attr_temp2_crit.dev_attr.attr,
- &sensor_dev_attr_temp2_min_alarm.dev_attr.attr,
- &sensor_dev_attr_temp2_max_alarm.dev_attr.attr,
- &sensor_dev_attr_temp2_crit_alarm.dev_attr.attr,
- &sensor_dev_attr_temp2_fault.dev_attr.attr,
- &sensor_dev_attr_fan1_input.dev_attr.attr,
- &sensor_dev_attr_fan1_min.dev_attr.attr,
- &sensor_dev_attr_fan1_max.dev_attr.attr,
- &sensor_dev_attr_fan1_fault.dev_attr.attr,
- &sensor_dev_attr_fan1_div.dev_attr.attr,
- &sensor_dev_attr_pwm1.dev_attr.attr,
- &sensor_dev_attr_pwm1_enable.dev_attr.attr,
- &sensor_dev_attr_pwm1_auto_channels_temp.dev_attr.attr,
&sensor_dev_attr_pwm1_auto_point1_pwm.dev_attr.attr,
&sensor_dev_attr_pwm1_auto_point2_pwm.dev_attr.attr,
&sensor_dev_attr_pwm1_auto_point3_pwm.dev_attr.attr,
@@ -765,13 +694,118 @@ static struct attribute *amc6821_attrs[] = {
&sensor_dev_attr_temp2_auto_point3_temp.dev_attr.attr,
NULL
};
-
ATTRIBUTE_GROUPS(amc6821);
+static int amc6821_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ switch (type) {
+ case hwmon_temp:
+ return amc6821_temp_read(dev, attr, channel, val);
+ case hwmon_fan:
+ return amc6821_fan_read(dev, attr, val);
+ case hwmon_pwm:
+ return amc6821_pwm_read(dev, attr, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int amc6821_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ switch (type) {
+ case hwmon_temp:
+ return amc6821_temp_write(dev, attr, channel, val);
+ case hwmon_fan:
+ return amc6821_fan_write(dev, attr, val);
+ case hwmon_pwm:
+ return amc6821_pwm_write(dev, attr, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static umode_t amc6821_is_visible(const void *data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ switch (type) {
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_input:
+ case hwmon_temp_min_alarm:
+ case hwmon_temp_max_alarm:
+ case hwmon_temp_crit_alarm:
+ case hwmon_temp_fault:
+ return 0444;
+ case hwmon_temp_min:
+ case hwmon_temp_max:
+ case hwmon_temp_crit:
+ return 0644;
+ default:
+ return 0;
+ }
+ case hwmon_fan:
+ switch (attr) {
+ case hwmon_fan_input:
+ case hwmon_fan_fault:
+ return 0444;
+ case hwmon_fan_pulses:
+ case hwmon_fan_min:
+ case hwmon_fan_max:
+ case hwmon_fan_target:
+ return 0644;
+ default:
+ return 0;
+ }
+ case hwmon_pwm:
+ switch (attr) {
+ case hwmon_pwm_mode:
+ case hwmon_pwm_enable:
+ case hwmon_pwm_input:
+ return 0644;
+ case hwmon_pwm_auto_channels_temp:
+ return 0444;
+ default:
+ return 0;
+ }
+ default:
+ return 0;
+ }
+}
+
+static const struct hwmon_channel_info * const amc6821_info[] = {
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX |
+ HWMON_T_CRIT | HWMON_T_MIN_ALARM |
+ HWMON_T_MAX_ALARM | HWMON_T_CRIT_ALARM,
+ HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX |
+ HWMON_T_CRIT | HWMON_T_MIN_ALARM |
+ HWMON_T_MAX_ALARM | HWMON_T_CRIT_ALARM |
+ HWMON_T_FAULT),
+ HWMON_CHANNEL_INFO(fan,
+ HWMON_F_INPUT | HWMON_F_MIN | HWMON_F_MAX |
+ HWMON_F_TARGET | HWMON_F_PULSES | HWMON_F_FAULT),
+ HWMON_CHANNEL_INFO(pwm,
+ HWMON_PWM_INPUT | HWMON_PWM_ENABLE | HWMON_PWM_MODE |
+ HWMON_PWM_AUTO_CHANNELS_TEMP),
+ NULL
+};
+
+static const struct hwmon_ops amc6821_hwmon_ops = {
+ .is_visible = amc6821_is_visible,
+ .read = amc6821_read,
+ .write = amc6821_write,
+};
+
+static const struct hwmon_chip_info amc6821_chip_info = {
+ .ops = &amc6821_hwmon_ops,
+ .info = amc6821_info,
+};
+
/* Return 0 if detection is successful, -ENODEV otherwise */
-static int amc6821_detect(
- struct i2c_client *client,
- struct i2c_board_info *info)
+static int amc6821_detect(struct i2c_client *client, struct i2c_board_info *info)
{
struct i2c_adapter *adapter = client->adapter;
int address = client->addr;
@@ -814,121 +848,90 @@ static int amc6821_detect(
return 0;
}
-static int amc6821_init_client(struct i2c_client *client)
+static int amc6821_init_client(struct amc6821_data *data)
{
- int config;
- int err = -EIO;
+ struct regmap *regmap = data->regmap;
+ int err;
if (init) {
- config = i2c_smbus_read_byte_data(client, AMC6821_REG_CONF4);
-
- if (config < 0) {
- dev_err(&client->dev,
- "Error reading configuration register, aborting.\n");
- return err;
- }
-
- config |= AMC6821_CONF4_MODE;
-
- if (i2c_smbus_write_byte_data(client, AMC6821_REG_CONF4,
- config)) {
- dev_err(&client->dev,
- "Configuration register write error, aborting.\n");
- return err;
- }
-
- config = i2c_smbus_read_byte_data(client, AMC6821_REG_CONF3);
-
- if (config < 0) {
- dev_err(&client->dev,
- "Error reading configuration register, aborting.\n");
- return err;
- }
-
- dev_info(&client->dev, "Revision %d\n", config & 0x0f);
-
- config &= ~AMC6821_CONF3_THERM_FAN_EN;
-
- if (i2c_smbus_write_byte_data(client, AMC6821_REG_CONF3,
- config)) {
- dev_err(&client->dev,
- "Configuration register write error, aborting.\n");
- return err;
- }
-
- config = i2c_smbus_read_byte_data(client, AMC6821_REG_CONF2);
-
- if (config < 0) {
- dev_err(&client->dev,
- "Error reading configuration register, aborting.\n");
+ err = regmap_set_bits(regmap, AMC6821_REG_CONF4, AMC6821_CONF4_MODE);
+ if (err)
return err;
- }
-
- config &= ~AMC6821_CONF2_RTFIE;
- config &= ~AMC6821_CONF2_LTOIE;
- config &= ~AMC6821_CONF2_RTOIE;
- if (i2c_smbus_write_byte_data(client,
- AMC6821_REG_CONF2, config)) {
- dev_err(&client->dev,
- "Configuration register write error, aborting.\n");
+ err = regmap_clear_bits(regmap, AMC6821_REG_CONF3, AMC6821_CONF3_THERM_FAN_EN);
+ if (err)
return err;
- }
-
- config = i2c_smbus_read_byte_data(client, AMC6821_REG_CONF1);
-
- if (config < 0) {
- dev_err(&client->dev,
- "Error reading configuration register, aborting.\n");
+ err = regmap_clear_bits(regmap, AMC6821_REG_CONF2,
+ AMC6821_CONF2_RTFIE |
+ AMC6821_CONF2_LTOIE |
+ AMC6821_CONF2_RTOIE);
+ if (err)
return err;
- }
- config &= ~AMC6821_CONF1_THERMOVIE;
- config &= ~AMC6821_CONF1_FANIE;
- config |= AMC6821_CONF1_START;
- if (pwminv)
- config |= AMC6821_CONF1_PWMINV;
- else
- config &= ~AMC6821_CONF1_PWMINV;
-
- if (i2c_smbus_write_byte_data(
- client, AMC6821_REG_CONF1, config)) {
- dev_err(&client->dev,
- "Configuration register write error, aborting.\n");
+ err = regmap_update_bits(regmap, AMC6821_REG_CONF1,
+ AMC6821_CONF1_THERMOVIE | AMC6821_CONF1_FANIE |
+ AMC6821_CONF1_START | AMC6821_CONF1_PWMINV,
+ AMC6821_CONF1_START |
+ (pwminv ? AMC6821_CONF1_PWMINV : 0));
+ if (err)
return err;
- }
}
return 0;
}
+static bool amc6821_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case AMC6821_REG_STAT1:
+ case AMC6821_REG_STAT2:
+ case AMC6821_REG_TEMP_LO:
+ case AMC6821_REG_TDATA_LOW:
+ case AMC6821_REG_LTEMP_HI:
+ case AMC6821_REG_RTEMP_HI:
+ case AMC6821_REG_TDATA_HI:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static const struct regmap_config amc6821_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = AMC6821_REG_CONF3,
+ .volatile_reg = amc6821_volatile_reg,
+ .cache_type = REGCACHE_MAPLE,
+};
+
static int amc6821_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct amc6821_data *data;
struct device *hwmon_dev;
+ struct regmap *regmap;
int err;
data = devm_kzalloc(dev, sizeof(struct amc6821_data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- data->client = client;
- mutex_init(&data->update_lock);
+ regmap = devm_regmap_init_i2c(client, &amc6821_regmap_config);
+ if (IS_ERR(regmap))
+ return dev_err_probe(dev, PTR_ERR(regmap),
+ "Failed to initialize regmap\n");
+ data->regmap = regmap;
- /*
- * Initialize the amc6821 chip
- */
- err = amc6821_init_client(client);
+ err = amc6821_init_client(data);
if (err)
return err;
- hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
- data,
- amc6821_groups);
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name,
+ data, &amc6821_chip_info,
+ amc6821_groups);
return PTR_ERR_OR_ZERO(hwmon_dev);
}
static const struct i2c_device_id amc6821_id[] = {
- { "amc6821", amc6821 },
+ { "amc6821", 0 },
{ }
};
@@ -937,7 +940,6 @@ MODULE_DEVICE_TABLE(i2c, amc6821_id);
static const struct of_device_id __maybe_unused amc6821_of_match[] = {
{
.compatible = "ti,amc6821",
- .data = (void *)amc6821,
},
{ }
};
diff --git a/drivers/hwmon/asus-ec-sensors.c b/drivers/hwmon/asus-ec-sensors.c
index 36f9e38000d5..6bb8d7b1d219 100644
--- a/drivers/hwmon/asus-ec-sensors.c
+++ b/drivers/hwmon/asus-ec-sensors.c
@@ -322,6 +322,14 @@ static const struct ec_board_info board_info_pro_art_x570_creator_wifi = {
.family = family_amd_500_series,
};
+static const struct ec_board_info board_info_pro_art_x670E_creator_wifi = {
+ .sensors = SENSOR_TEMP_CPU | SENSOR_TEMP_CPU_PACKAGE |
+ SENSOR_TEMP_MB | SENSOR_TEMP_VRM |
+ SENSOR_TEMP_T_SENSOR,
+ .mutex_path = ACPI_GLOBAL_LOCK_PSEUDO_PATH,
+ .family = family_amd_600_series,
+};
+
static const struct ec_board_info board_info_pro_art_b550_creator = {
.sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
SENSOR_TEMP_T_SENSOR |
@@ -486,6 +494,8 @@ static const struct dmi_system_id dmi_table[] = {
&board_info_prime_x570_pro),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("ProArt X570-CREATOR WIFI",
&board_info_pro_art_x570_creator_wifi),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ProArt X670E-CREATOR WIFI",
+ &board_info_pro_art_x670E_creator_wifi),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("ProArt B550-CREATOR",
&board_info_pro_art_b550_creator),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("Pro WS X570-ACE",
diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
index d778a2aaefec..3751c1e3eddd 100644
--- a/drivers/hwmon/asus_atk0110.c
+++ b/drivers/hwmon/asus_atk0110.c
@@ -1389,4 +1389,5 @@ static void __exit atk0110_exit(void)
module_init(atk0110_init);
module_exit(atk0110_exit);
+MODULE_DESCRIPTION("ASUS ATK0110 driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/corsair-cpro.c b/drivers/hwmon/corsair-cpro.c
index 3e63666a61bd..e1a7f7aa7f80 100644
--- a/drivers/hwmon/corsair-cpro.c
+++ b/drivers/hwmon/corsair-cpro.c
@@ -10,11 +10,13 @@
#include <linux/bitops.h>
#include <linux/completion.h>
+#include <linux/debugfs.h>
#include <linux/hid.h>
#include <linux/hwmon.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/types.h>
@@ -28,6 +30,8 @@
#define LABEL_LENGTH 11
#define REQ_TIMEOUT 300
+#define CTL_GET_FW_VER 0x02 /* returns the firmware version in bytes 1-3 */
+#define CTL_GET_BL_VER 0x06 /* returns the bootloader version in bytes 1-2 */
#define CTL_GET_TMP_CNCT 0x10 /*
* returns in bytes 1-4 for each temp sensor:
* 0 not connected
@@ -78,6 +82,7 @@
struct ccp_device {
struct hid_device *hdev;
struct device *hwmon_dev;
+ struct dentry *debugfs;
/* For reinitializing the completion below */
spinlock_t wait_input_report_lock;
struct completion wait_input_report;
@@ -88,6 +93,8 @@ struct ccp_device {
DECLARE_BITMAP(temp_cnct, NUM_TEMP_SENSORS);
DECLARE_BITMAP(fan_cnct, NUM_FANS);
char fan_label[6][LABEL_LENGTH];
+ u8 firmware_ver[3];
+ u8 bootloader_ver[2];
};
/* converts response error in buffer to errno */
@@ -496,6 +503,83 @@ static int get_temp_cnct(struct ccp_device *ccp)
return 0;
}
+/* read firmware version */
+static int get_fw_version(struct ccp_device *ccp)
+{
+ int ret;
+
+ ret = send_usb_cmd(ccp, CTL_GET_FW_VER, 0, 0, 0);
+ if (ret) {
+ hid_notice(ccp->hdev, "Failed to read firmware version.\n");
+ return ret;
+ }
+ ccp->firmware_ver[0] = ccp->buffer[1];
+ ccp->firmware_ver[1] = ccp->buffer[2];
+ ccp->firmware_ver[2] = ccp->buffer[3];
+
+ return 0;
+}
+
+/* read bootloader version */
+static int get_bl_version(struct ccp_device *ccp)
+{
+ int ret;
+
+ ret = send_usb_cmd(ccp, CTL_GET_BL_VER, 0, 0, 0);
+ if (ret) {
+ hid_notice(ccp->hdev, "Failed to read bootloader version.\n");
+ return ret;
+ }
+ ccp->bootloader_ver[0] = ccp->buffer[1];
+ ccp->bootloader_ver[1] = ccp->buffer[2];
+
+ return 0;
+}
+
+static int firmware_show(struct seq_file *seqf, void *unused)
+{
+ struct ccp_device *ccp = seqf->private;
+
+ seq_printf(seqf, "%d.%d.%d\n",
+ ccp->firmware_ver[0],
+ ccp->firmware_ver[1],
+ ccp->firmware_ver[2]);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(firmware);
+
+static int bootloader_show(struct seq_file *seqf, void *unused)
+{
+ struct ccp_device *ccp = seqf->private;
+
+ seq_printf(seqf, "%d.%d\n",
+ ccp->bootloader_ver[0],
+ ccp->bootloader_ver[1]);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(bootloader);
+
+static void ccp_debugfs_init(struct ccp_device *ccp)
+{
+ char name[32];
+ int ret;
+
+ scnprintf(name, sizeof(name), "corsaircpro-%s", dev_name(&ccp->hdev->dev));
+ ccp->debugfs = debugfs_create_dir(name, NULL);
+
+ ret = get_fw_version(ccp);
+ if (!ret)
+ debugfs_create_file("firmware_version", 0444,
+ ccp->debugfs, ccp, &firmware_fops);
+
+ ret = get_bl_version(ccp);
+ if (!ret)
+ debugfs_create_file("bootloader_version", 0444,
+ ccp->debugfs, ccp, &bootloader_fops);
+}
+
static int ccp_probe(struct hid_device *hdev, const struct hid_device_id *id)
{
struct ccp_device *ccp;
@@ -542,6 +626,9 @@ static int ccp_probe(struct hid_device *hdev, const struct hid_device_id *id)
ret = get_fan_cnct(ccp);
if (ret)
goto out_hw_close;
+
+ ccp_debugfs_init(ccp);
+
ccp->hwmon_dev = hwmon_device_register_with_info(&hdev->dev, "corsaircpro",
ccp, &ccp_chip_info, NULL);
if (IS_ERR(ccp->hwmon_dev)) {
@@ -562,6 +649,7 @@ static void ccp_remove(struct hid_device *hdev)
{
struct ccp_device *ccp = hid_get_drvdata(hdev);
+ debugfs_remove_recursive(ccp->debugfs);
hwmon_device_unregister(ccp->hwmon_dev);
hid_hw_close(hdev);
hid_hw_stop(hdev);
@@ -582,6 +670,7 @@ static struct hid_driver ccp_driver = {
};
MODULE_DEVICE_TABLE(hid, ccp_devices);
+MODULE_DESCRIPTION("Corsair Commander Pro controller driver");
MODULE_LICENSE("GPL");
static int __init ccp_init(void)
diff --git a/drivers/hwmon/corsair-psu.c b/drivers/hwmon/corsair-psu.c
index 2c7c92272fe3..f8f22b8a67cd 100644
--- a/drivers/hwmon/corsair-psu.c
+++ b/drivers/hwmon/corsair-psu.c
@@ -875,15 +875,16 @@ static const struct hid_device_id corsairpsu_idtable[] = {
{ HID_USB_DEVICE(0x1b1c, 0x1c04) }, /* Corsair HX650i */
{ HID_USB_DEVICE(0x1b1c, 0x1c05) }, /* Corsair HX750i */
{ HID_USB_DEVICE(0x1b1c, 0x1c06) }, /* Corsair HX850i */
- { HID_USB_DEVICE(0x1b1c, 0x1c07) }, /* Corsair HX1000i Series 2022 */
- { HID_USB_DEVICE(0x1b1c, 0x1c08) }, /* Corsair HX1200i */
+ { HID_USB_DEVICE(0x1b1c, 0x1c07) }, /* Corsair HX1000i Legacy */
+ { HID_USB_DEVICE(0x1b1c, 0x1c08) }, /* Corsair HX1200i Legacy */
{ HID_USB_DEVICE(0x1b1c, 0x1c09) }, /* Corsair RM550i */
{ HID_USB_DEVICE(0x1b1c, 0x1c0a) }, /* Corsair RM650i */
{ HID_USB_DEVICE(0x1b1c, 0x1c0b) }, /* Corsair RM750i */
{ HID_USB_DEVICE(0x1b1c, 0x1c0c) }, /* Corsair RM850i */
{ HID_USB_DEVICE(0x1b1c, 0x1c0d) }, /* Corsair RM1000i */
{ HID_USB_DEVICE(0x1b1c, 0x1c1e) }, /* Corsair HX1000i Series 2023 */
- { HID_USB_DEVICE(0x1b1c, 0x1c1f) }, /* Corsair HX1500i Series 2022 and 2023 */
+ { HID_USB_DEVICE(0x1b1c, 0x1c1f) }, /* Corsair HX1500i Legacy and Series 2023 */
+ { HID_USB_DEVICE(0x1b1c, 0x1c23) }, /* Corsair HX1200i Series 2023 */
{ },
};
MODULE_DEVICE_TABLE(hid, corsairpsu_idtable);
diff --git a/drivers/hwmon/cros_ec_hwmon.c b/drivers/hwmon/cros_ec_hwmon.c
new file mode 100644
index 000000000000..5514cf780b8b
--- /dev/null
+++ b/drivers/hwmon/cros_ec_hwmon.c
@@ -0,0 +1,283 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * ChromeOS EC driver for hwmon
+ *
+ * Copyright (C) 2024 Thomas Weißschuh <linux@weissschuh.net>
+ */
+
+#include <linux/device.h>
+#include <linux/hwmon.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/cros_ec_commands.h>
+#include <linux/platform_data/cros_ec_proto.h>
+#include <linux/types.h>
+#include <linux/units.h>
+
+#define DRV_NAME "cros-ec-hwmon"
+
+struct cros_ec_hwmon_priv {
+ struct cros_ec_device *cros_ec;
+ const char *temp_sensor_names[EC_TEMP_SENSOR_ENTRIES + EC_TEMP_SENSOR_B_ENTRIES];
+ u8 usable_fans;
+};
+
+static int cros_ec_hwmon_read_fan_speed(struct cros_ec_device *cros_ec, u8 index, u16 *speed)
+{
+ int ret;
+ __le16 __speed;
+
+ ret = cros_ec_cmd_readmem(cros_ec, EC_MEMMAP_FAN + index * 2, 2, &__speed);
+ if (ret < 0)
+ return ret;
+
+ *speed = le16_to_cpu(__speed);
+ return 0;
+}
+
+static int cros_ec_hwmon_read_temp(struct cros_ec_device *cros_ec, u8 index, u8 *temp)
+{
+ unsigned int offset;
+ int ret;
+
+ if (index < EC_TEMP_SENSOR_ENTRIES)
+ offset = EC_MEMMAP_TEMP_SENSOR + index;
+ else
+ offset = EC_MEMMAP_TEMP_SENSOR_B + index - EC_TEMP_SENSOR_ENTRIES;
+
+ ret = cros_ec_cmd_readmem(cros_ec, offset, 1, temp);
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+
+static bool cros_ec_hwmon_is_error_fan(u16 speed)
+{
+ return speed == EC_FAN_SPEED_NOT_PRESENT || speed == EC_FAN_SPEED_STALLED;
+}
+
+static bool cros_ec_hwmon_is_error_temp(u8 temp)
+{
+ return temp == EC_TEMP_SENSOR_NOT_PRESENT ||
+ temp == EC_TEMP_SENSOR_ERROR ||
+ temp == EC_TEMP_SENSOR_NOT_POWERED ||
+ temp == EC_TEMP_SENSOR_NOT_CALIBRATED;
+}
+
+static long cros_ec_hwmon_temp_to_millicelsius(u8 temp)
+{
+ return kelvin_to_millicelsius((((long)temp) + EC_TEMP_SENSOR_OFFSET));
+}
+
+static int cros_ec_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct cros_ec_hwmon_priv *priv = dev_get_drvdata(dev);
+ int ret = -EOPNOTSUPP;
+ u16 speed;
+ u8 temp;
+
+ if (type == hwmon_fan) {
+ if (attr == hwmon_fan_input) {
+ ret = cros_ec_hwmon_read_fan_speed(priv->cros_ec, channel, &speed);
+ if (ret == 0) {
+ if (cros_ec_hwmon_is_error_fan(speed))
+ ret = -ENODATA;
+ else
+ *val = speed;
+ }
+ } else if (attr == hwmon_fan_fault) {
+ ret = cros_ec_hwmon_read_fan_speed(priv->cros_ec, channel, &speed);
+ if (ret == 0)
+ *val = cros_ec_hwmon_is_error_fan(speed);
+ }
+ } else if (type == hwmon_temp) {
+ if (attr == hwmon_temp_input) {
+ ret = cros_ec_hwmon_read_temp(priv->cros_ec, channel, &temp);
+ if (ret == 0) {
+ if (cros_ec_hwmon_is_error_temp(temp))
+ ret = -ENODATA;
+ else
+ *val = cros_ec_hwmon_temp_to_millicelsius(temp);
+ }
+ } else if (attr == hwmon_temp_fault) {
+ ret = cros_ec_hwmon_read_temp(priv->cros_ec, channel, &temp);
+ if (ret == 0)
+ *val = cros_ec_hwmon_is_error_temp(temp);
+ }
+ }
+
+ return ret;
+}
+
+static int cros_ec_hwmon_read_string(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, const char **str)
+{
+ struct cros_ec_hwmon_priv *priv = dev_get_drvdata(dev);
+
+ if (type == hwmon_temp && attr == hwmon_temp_label) {
+ *str = priv->temp_sensor_names[channel];
+ return 0;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static umode_t cros_ec_hwmon_is_visible(const void *data, enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ const struct cros_ec_hwmon_priv *priv = data;
+
+ if (type == hwmon_fan) {
+ if (priv->usable_fans & BIT(channel))
+ return 0444;
+ } else if (type == hwmon_temp) {
+ if (priv->temp_sensor_names[channel])
+ return 0444;
+ }
+
+ return 0;
+}
+
+static const struct hwmon_channel_info * const cros_ec_hwmon_info[] = {
+ HWMON_CHANNEL_INFO(fan,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT),
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT | HWMON_T_FAULT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_FAULT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_FAULT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_FAULT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_FAULT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_FAULT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_FAULT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_FAULT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_FAULT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_FAULT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_FAULT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_FAULT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_FAULT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_FAULT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_FAULT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_FAULT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_FAULT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_FAULT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_FAULT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_FAULT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_FAULT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_FAULT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_FAULT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_FAULT | HWMON_T_LABEL),
+ NULL
+};
+
+static const struct hwmon_ops cros_ec_hwmon_ops = {
+ .read = cros_ec_hwmon_read,
+ .read_string = cros_ec_hwmon_read_string,
+ .is_visible = cros_ec_hwmon_is_visible,
+};
+
+static const struct hwmon_chip_info cros_ec_hwmon_chip_info = {
+ .ops = &cros_ec_hwmon_ops,
+ .info = cros_ec_hwmon_info,
+};
+
+static void cros_ec_hwmon_probe_temp_sensors(struct device *dev, struct cros_ec_hwmon_priv *priv,
+ u8 thermal_version)
+{
+ struct ec_params_temp_sensor_get_info req = {};
+ struct ec_response_temp_sensor_get_info resp;
+ size_t candidates, i, sensor_name_size;
+ int ret;
+ u8 temp;
+
+ if (thermal_version < 2)
+ candidates = EC_TEMP_SENSOR_ENTRIES;
+ else
+ candidates = ARRAY_SIZE(priv->temp_sensor_names);
+
+ for (i = 0; i < candidates; i++) {
+ if (cros_ec_hwmon_read_temp(priv->cros_ec, i, &temp) < 0)
+ continue;
+
+ if (temp == EC_TEMP_SENSOR_NOT_PRESENT)
+ continue;
+
+ req.id = i;
+ ret = cros_ec_cmd(priv->cros_ec, 0, EC_CMD_TEMP_SENSOR_GET_INFO,
+ &req, sizeof(req), &resp, sizeof(resp));
+ if (ret < 0)
+ continue;
+
+ sensor_name_size = strnlen(resp.sensor_name, sizeof(resp.sensor_name));
+ priv->temp_sensor_names[i] = devm_kasprintf(dev, GFP_KERNEL, "%.*s",
+ (int)sensor_name_size,
+ resp.sensor_name);
+ }
+}
+
+static void cros_ec_hwmon_probe_fans(struct cros_ec_hwmon_priv *priv)
+{
+ u16 speed;
+ size_t i;
+ int ret;
+
+ for (i = 0; i < EC_FAN_SPEED_ENTRIES; i++) {
+ ret = cros_ec_hwmon_read_fan_speed(priv->cros_ec, i, &speed);
+ if (ret == 0 && speed != EC_FAN_SPEED_NOT_PRESENT)
+ priv->usable_fans |= BIT(i);
+ }
+}
+
+static int cros_ec_hwmon_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct cros_ec_dev *ec_dev = dev_get_drvdata(dev->parent);
+ struct cros_ec_device *cros_ec = ec_dev->ec_dev;
+ struct cros_ec_hwmon_priv *priv;
+ struct device *hwmon_dev;
+ u8 thermal_version;
+ int ret;
+
+ ret = cros_ec_cmd_readmem(cros_ec, EC_MEMMAP_THERMAL_VERSION, 1, &thermal_version);
+ if (ret < 0)
+ return ret;
+
+ /* Covers both fan and temp sensors */
+ if (thermal_version == 0)
+ return -ENODEV;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->cros_ec = cros_ec;
+
+ cros_ec_hwmon_probe_temp_sensors(dev, priv, thermal_version);
+ cros_ec_hwmon_probe_fans(priv);
+
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, "cros_ec", priv,
+ &cros_ec_hwmon_chip_info, NULL);
+
+ return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static const struct platform_device_id cros_ec_hwmon_id[] = {
+ { DRV_NAME, 0 },
+ {}
+};
+
+static struct platform_driver cros_ec_hwmon_driver = {
+ .driver.name = DRV_NAME,
+ .probe = cros_ec_hwmon_probe,
+ .id_table = cros_ec_hwmon_id,
+};
+module_platform_driver(cros_ec_hwmon_driver);
+
+MODULE_DEVICE_TABLE(platform, cros_ec_hwmon_id);
+MODULE_DESCRIPTION("ChromeOS EC Hardware Monitoring Driver");
+MODULE_AUTHOR("Thomas Weißschuh <linux@weissschuh.net");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
index 48a81c64f00d..0362a13f6525 100644
--- a/drivers/hwmon/dell-smm-hwmon.c
+++ b/drivers/hwmon/dell-smm-hwmon.c
@@ -1264,6 +1264,13 @@ static const struct dmi_system_id i8k_dmi_table[] __initconst = {
},
},
{
+ .ident = "Dell OptiPlex 7060",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "OptiPlex 7060"),
+ },
+ },
+ {
.ident = "Dell Precision",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
@@ -1545,6 +1552,14 @@ static const struct dmi_system_id i8k_whitelist_fan_control[] __initconst = {
},
.driver_data = (void *)&i8k_fan_control_data[I8K_FAN_30A3_31A3],
},
+ {
+ .ident = "Dell G15 5511",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Dell G15 5511"),
+ },
+ .driver_data = (void *)&i8k_fan_control_data[I8K_FAN_30A3_31A3],
+ },
{ }
};
diff --git a/drivers/hwmon/dme1737.c b/drivers/hwmon/dme1737.c
index 3dcef221041d..1a9b28dc91e6 100644
--- a/drivers/hwmon/dme1737.c
+++ b/drivers/hwmon/dme1737.c
@@ -2461,8 +2461,6 @@ static int dme1737_i2c_detect(struct i2c_client *client,
return 0;
}
-static const struct i2c_device_id dme1737_id[];
-
static int dme1737_i2c_probe(struct i2c_client *client)
{
struct dme1737_data *data;
@@ -2474,7 +2472,7 @@ static int dme1737_i2c_probe(struct i2c_client *client)
return -ENOMEM;
i2c_set_clientdata(client, data);
- data->type = i2c_match_id(dme1737_id, client)->driver_data;
+ data->type = (uintptr_t)i2c_get_match_data(client);
data->client = client;
data->name = client->name;
mutex_init(&data->update_lock);
diff --git a/drivers/hwmon/ds1621.c b/drivers/hwmon/ds1621.c
index bffbc8040171..42ec34cb8a5f 100644
--- a/drivers/hwmon/ds1621.c
+++ b/drivers/hwmon/ds1621.c
@@ -342,8 +342,6 @@ static const struct attribute_group ds1621_group = {
};
__ATTRIBUTE_GROUPS(ds1621);
-static const struct i2c_device_id ds1621_id[];
-
static int ds1621_probe(struct i2c_client *client)
{
struct ds1621_data *data;
@@ -356,7 +354,7 @@ static int ds1621_probe(struct i2c_client *client)
mutex_init(&data->update_lock);
- data->kind = i2c_match_id(ds1621_id, client)->driver_data;
+ data->kind = (uintptr_t)i2c_get_match_data(client);
data->client = client;
/* Initialize the DS1621 chip */
diff --git a/drivers/hwmon/f75375s.c b/drivers/hwmon/f75375s.c
index 8c572bb64f5d..7e867f132420 100644
--- a/drivers/hwmon/f75375s.c
+++ b/drivers/hwmon/f75375s.c
@@ -111,31 +111,6 @@ struct f75375_data {
s8 temp_max_hyst[2];
};
-static int f75375_detect(struct i2c_client *client,
- struct i2c_board_info *info);
-static int f75375_probe(struct i2c_client *client);
-static void f75375_remove(struct i2c_client *client);
-
-static const struct i2c_device_id f75375_id[] = {
- { "f75373", f75373 },
- { "f75375", f75375 },
- { "f75387", f75387 },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, f75375_id);
-
-static struct i2c_driver f75375_driver = {
- .class = I2C_CLASS_HWMON,
- .driver = {
- .name = "f75375",
- },
- .probe = f75375_probe,
- .remove = f75375_remove,
- .id_table = f75375_id,
- .detect = f75375_detect,
- .address_list = normal_i2c,
-};
-
static inline int f75375_read8(struct i2c_client *client, u8 reg)
{
return i2c_smbus_read_byte_data(client, reg);
@@ -830,7 +805,7 @@ static int f75375_probe(struct i2c_client *client)
i2c_set_clientdata(client, data);
mutex_init(&data->update_lock);
- data->kind = i2c_match_id(f75375_id, client)->driver_data;
+ data->kind = (uintptr_t)i2c_get_match_data(client);
err = sysfs_create_group(&client->dev.kobj, &f75375_group);
if (err)
@@ -901,6 +876,25 @@ static int f75375_detect(struct i2c_client *client,
return 0;
}
+static const struct i2c_device_id f75375_id[] = {
+ { "f75373", f75373 },
+ { "f75375", f75375 },
+ { "f75387", f75387 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, f75375_id);
+
+static struct i2c_driver f75375_driver = {
+ .class = I2C_CLASS_HWMON,
+ .driver = {
+ .name = "f75375",
+ },
+ .probe = f75375_probe,
+ .remove = f75375_remove,
+ .id_table = f75375_id,
+ .detect = f75375_detect,
+ .address_list = normal_i2c,
+};
module_i2c_driver(f75375_driver);
MODULE_AUTHOR("Riku Voipio");
diff --git a/drivers/hwmon/fschmd.c b/drivers/hwmon/fschmd.c
index b30512a705a7..1811f84d835e 100644
--- a/drivers/hwmon/fschmd.c
+++ b/drivers/hwmon/fschmd.c
@@ -1087,7 +1087,7 @@ static int fschmd_probe(struct i2c_client *client)
"Heracles", "Heimdall", "Hades", "Syleus" };
static const int watchdog_minors[] = { WATCHDOG_MINOR, 212, 213, 214, 215 };
int i, err;
- enum chips kind = i2c_match_id(fschmd_id, client)->driver_data;
+ enum chips kind = (uintptr_t)i2c_get_match_data(client);
data = kzalloc(sizeof(struct fschmd_data), GFP_KERNEL);
if (!data)
diff --git a/drivers/hwmon/g762.c b/drivers/hwmon/g762.c
index af1228708e25..4fa3aa1271da 100644
--- a/drivers/hwmon/g762.c
+++ b/drivers/hwmon/g762.c
@@ -44,6 +44,7 @@
#define DRVNAME "g762"
static const struct i2c_device_id g762_id[] = {
+ { "g761" },
{ "g762" },
{ "g763" },
{ }
@@ -69,6 +70,7 @@ enum g762_regs {
#define G762_REG_FAN_CMD1_PWM_POLARITY 0x02 /* PWM polarity */
#define G762_REG_FAN_CMD1_PULSE_PER_REV 0x01 /* pulse per fan revolution */
+#define G761_REG_FAN_CMD2_FAN_CLOCK 0x20 /* choose internal clock*/
#define G762_REG_FAN_CMD2_GEAR_MODE_1 0x08 /* fan gear mode */
#define G762_REG_FAN_CMD2_GEAR_MODE_0 0x04
#define G762_REG_FAN_CMD2_FAN_STARTV_1 0x02 /* fan startup voltage */
@@ -115,6 +117,7 @@ enum g762_regs {
struct g762_data {
struct i2c_client *client;
+ bool internal_clock;
struct clk *clk;
/* update mutex */
@@ -566,6 +569,7 @@ static int do_set_fan_startv(struct device *dev, unsigned long val)
#ifdef CONFIG_OF
static const struct of_device_id g762_dt_match[] = {
+ { .compatible = "gmt,g761" },
{ .compatible = "gmt,g762" },
{ .compatible = "gmt,g763" },
{ },
@@ -597,6 +601,21 @@ static int g762_of_clock_enable(struct i2c_client *client)
if (!client->dev.of_node)
return 0;
+ data = i2c_get_clientdata(client);
+
+ /*
+ * Skip CLK detection and handling if we use internal clock.
+ * This is only valid for g761.
+ */
+ data->internal_clock = of_device_is_compatible(client->dev.of_node,
+ "gmt,g761") &&
+ !of_property_present(client->dev.of_node,
+ "clocks");
+ if (data->internal_clock) {
+ do_set_clk_freq(&client->dev, 32768);
+ return 0;
+ }
+
clk = of_clk_get(client->dev.of_node, 0);
if (IS_ERR(clk)) {
dev_err(&client->dev, "failed to get clock\n");
@@ -616,7 +635,6 @@ static int g762_of_clock_enable(struct i2c_client *client)
goto clk_unprep;
}
- data = i2c_get_clientdata(client);
data->clk = clk;
ret = devm_add_action(&client->dev, g762_of_clock_disable, data);
@@ -1025,16 +1043,26 @@ ATTRIBUTE_GROUPS(g762);
static inline int g762_fan_init(struct device *dev)
{
struct g762_data *data = g762_update_client(dev);
+ int ret;
if (IS_ERR(data))
return PTR_ERR(data);
+ /* internal_clock can only be set with compatible g761 */
+ if (data->internal_clock)
+ data->fan_cmd2 |= G761_REG_FAN_CMD2_FAN_CLOCK;
+
data->fan_cmd1 |= G762_REG_FAN_CMD1_DET_FAN_FAIL;
data->fan_cmd1 |= G762_REG_FAN_CMD1_DET_FAN_OOC;
data->valid = false;
- return i2c_smbus_write_byte_data(data->client, G762_REG_FAN_CMD1,
- data->fan_cmd1);
+ ret = i2c_smbus_write_byte_data(data->client, G762_REG_FAN_CMD1,
+ data->fan_cmd1);
+ if (ret)
+ return ret;
+
+ return i2c_smbus_write_byte_data(data->client, G762_REG_FAN_CMD2,
+ data->fan_cmd2);
}
static int g762_probe(struct i2c_client *client)
@@ -1056,15 +1084,16 @@ static int g762_probe(struct i2c_client *client)
data->client = client;
mutex_init(&data->update_lock);
- /* Enable fan failure detection and fan out of control protection */
- ret = g762_fan_init(dev);
+ /* Get configuration via DT ... */
+ ret = g762_of_clock_enable(client);
if (ret)
return ret;
- /* Get configuration via DT ... */
- ret = g762_of_clock_enable(client);
+ /* Enable fan failure detection and fan out of control protection */
+ ret = g762_fan_init(dev);
if (ret)
return ret;
+
ret = g762_of_prop_import(client);
if (ret)
return ret;
diff --git a/drivers/hwmon/gsc-hwmon.c b/drivers/hwmon/gsc-hwmon.c
index 1501ceb551e7..cb2f01dc4326 100644
--- a/drivers/hwmon/gsc-hwmon.c
+++ b/drivers/hwmon/gsc-hwmon.c
@@ -39,7 +39,7 @@ struct gsc_hwmon_data {
struct hwmon_chip_info chip;
};
-static struct regmap_bus gsc_hwmon_regmap_bus = {
+static const struct regmap_bus gsc_hwmon_regmap_bus = {
.reg_read = gsc_read,
.reg_write = gsc_write,
};
@@ -249,7 +249,6 @@ gsc_hwmon_get_devtree_pdata(struct device *dev)
{
struct gsc_hwmon_platform_data *pdata;
struct gsc_hwmon_channel *ch;
- struct fwnode_handle *child;
struct device_node *fan;
int nchannels;
@@ -276,25 +275,21 @@ gsc_hwmon_get_devtree_pdata(struct device *dev)
ch = pdata->channels;
/* allocate structures for channels and count instances of each type */
- device_for_each_child_node(dev, child) {
+ device_for_each_child_node_scoped(dev, child) {
if (fwnode_property_read_string(child, "label", &ch->name)) {
dev_err(dev, "channel without label\n");
- fwnode_handle_put(child);
return ERR_PTR(-EINVAL);
}
if (fwnode_property_read_u32(child, "reg", &ch->reg)) {
dev_err(dev, "channel without reg\n");
- fwnode_handle_put(child);
return ERR_PTR(-EINVAL);
}
if (fwnode_property_read_u32(child, "gw,mode", &ch->mode)) {
dev_err(dev, "channel without mode\n");
- fwnode_handle_put(child);
return ERR_PTR(-EINVAL);
}
if (ch->mode > mode_max) {
dev_err(dev, "invalid channel mode\n");
- fwnode_handle_put(child);
return ERR_PTR(-EINVAL);
}
diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
index 3b259c425ab7..a362080d41fa 100644
--- a/drivers/hwmon/hwmon.c
+++ b/drivers/hwmon/hwmon.c
@@ -14,6 +14,7 @@
#include <linux/err.h>
#include <linux/gfp.h>
#include <linux/hwmon.h>
+#include <linux/i2c.h>
#include <linux/idr.h>
#include <linux/kstrtox.h>
#include <linux/list.h>
@@ -136,7 +137,7 @@ static void hwmon_dev_release(struct device *dev)
kfree(hwdev);
}
-static struct class hwmon_class = {
+static const struct class hwmon_class = {
.name = "hwmon",
.dev_groups = hwmon_dev_attr_groups,
.dev_release = hwmon_dev_release,
@@ -309,6 +310,114 @@ static int hwmon_attr_base(enum hwmon_sensor_types type)
return 1;
}
+#if IS_REACHABLE(CONFIG_I2C)
+
+/*
+ * PEC support
+ *
+ * The 'pec' attribute is attached to I2C client devices. It is only provided
+ * if the i2c controller supports PEC.
+ *
+ * The mutex ensures that PEC configuration between i2c device and the hardware
+ * is consistent. Use a single mutex because attribute writes are supposed to be
+ * rare, and maintaining a separate mutex for each hardware monitoring device
+ * would add substantial complexity to the driver for little if any gain.
+ *
+ * The hardware monitoring device is identified as child of the i2c client
+ * device. This assumes that only a single hardware monitoring device is
+ * attached to an i2c client device.
+ */
+
+static DEFINE_MUTEX(hwmon_pec_mutex);
+
+static int hwmon_match_device(struct device *dev, void *data)
+{
+ return dev->class == &hwmon_class;
+}
+
+static ssize_t pec_show(struct device *dev, struct device_attribute *dummy,
+ char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+
+ return sysfs_emit(buf, "%d\n", !!(client->flags & I2C_CLIENT_PEC));
+}
+
+static ssize_t pec_store(struct device *dev, struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct hwmon_device *hwdev;
+ struct device *hdev;
+ bool val;
+ int err;
+
+ err = kstrtobool(buf, &val);
+ if (err < 0)
+ return err;
+
+ hdev = device_find_child(dev, NULL, hwmon_match_device);
+ if (!hdev)
+ return -ENODEV;
+
+ mutex_lock(&hwmon_pec_mutex);
+
+ /*
+ * If there is no write function, we assume that chip specific
+ * handling is not required.
+ */
+ hwdev = to_hwmon_device(hdev);
+ if (hwdev->chip->ops->write) {
+ err = hwdev->chip->ops->write(hdev, hwmon_chip, hwmon_chip_pec, 0, val);
+ if (err && err != -EOPNOTSUPP)
+ goto unlock;
+ }
+
+ if (!val)
+ client->flags &= ~I2C_CLIENT_PEC;
+ else
+ client->flags |= I2C_CLIENT_PEC;
+
+ err = count;
+unlock:
+ mutex_unlock(&hwmon_pec_mutex);
+ put_device(hdev);
+
+ return err;
+}
+
+static DEVICE_ATTR_RW(pec);
+
+static void hwmon_remove_pec(void *dev)
+{
+ device_remove_file(dev, &dev_attr_pec);
+}
+
+static int hwmon_pec_register(struct device *hdev)
+{
+ struct i2c_client *client = i2c_verify_client(hdev->parent);
+ int err;
+
+ if (!client)
+ return -EINVAL;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_PEC))
+ return 0;
+
+ err = device_create_file(&client->dev, &dev_attr_pec);
+ if (err)
+ return err;
+
+ return devm_add_action_or_reset(hdev, hwmon_remove_pec, &client->dev);
+}
+
+#else /* CONFIG_I2C */
+static int hwmon_pec_register(struct device *hdev)
+{
+ return -EINVAL;
+}
+#endif /* CONFIG_I2C */
+
/* sysfs attribute management */
static ssize_t hwmon_attr_show(struct device *dev,
@@ -397,10 +506,6 @@ static struct attribute *hwmon_genattr(const void *drvdata,
const char *name;
bool is_string = is_string_attr(type, attr);
- /* The attribute is invisible if there is no template string */
- if (!template)
- return ERR_PTR(-ENOENT);
-
mode = ops->is_visible(drvdata, type, attr, index);
if (!mode)
return ERR_PTR(-ENOENT);
@@ -712,8 +817,8 @@ static int hwmon_genattrs(const void *drvdata,
attr = __ffs(attr_mask);
attr_mask &= ~BIT(attr);
- if (attr >= template_size)
- return -EINVAL;
+ if (attr >= template_size || !templates[attr])
+ continue; /* attribute is invisible */
a = hwmon_genattr(drvdata, info->type, attr, i,
templates[attr], ops);
if (IS_ERR(a)) {
@@ -849,16 +954,26 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
INIT_LIST_HEAD(&hwdev->tzdata);
if (hdev->of_node && chip && chip->ops->read &&
- chip->info[0]->type == hwmon_chip &&
- (chip->info[0]->config[0] & HWMON_C_REGISTER_TZ)) {
- err = hwmon_thermal_register_sensors(hdev);
- if (err) {
- device_unregister(hdev);
- /*
- * Don't worry about hwdev; hwmon_dev_release(), called
- * from device_unregister(), will free it.
- */
- goto ida_remove;
+ chip->info[0]->type == hwmon_chip) {
+ u32 config = chip->info[0]->config[0];
+
+ if (config & HWMON_C_REGISTER_TZ) {
+ err = hwmon_thermal_register_sensors(hdev);
+ if (err) {
+ device_unregister(hdev);
+ /*
+ * Don't worry about hwdev; hwmon_dev_release(),
+ * called from device_unregister(), will free it.
+ */
+ goto ida_remove;
+ }
+ }
+ if (config & HWMON_C_PEC) {
+ err = hwmon_pec_register(hdev);
+ if (err) {
+ device_unregister(hdev);
+ goto ida_remove;
+ }
}
}
diff --git a/drivers/hwmon/iio_hwmon.c b/drivers/hwmon/iio_hwmon.c
index 4c8a80847891..fab32e1e15f2 100644
--- a/drivers/hwmon/iio_hwmon.c
+++ b/drivers/hwmon/iio_hwmon.c
@@ -49,16 +49,17 @@ static ssize_t iio_hwmon_read_val(struct device *dev,
struct iio_channel *chan = &state->channels[sattr->index];
enum iio_chan_type type;
- ret = iio_read_channel_processed(chan, &result);
- if (ret < 0)
- return ret;
-
ret = iio_get_channel_type(chan, &type);
if (ret < 0)
return ret;
if (type == IIO_POWER)
- result *= 1000; /* mili-Watts to micro-Watts conversion */
+ /* mili-Watts to micro-Watts conversion */
+ ret = iio_read_channel_processed_scale(chan, &result, 1000);
+ else
+ ret = iio_read_channel_processed(chan, &result);
+ if (ret < 0)
+ return ret;
return sprintf(buf, "%d\n", result);
}
diff --git a/drivers/hwmon/ina238.c b/drivers/hwmon/ina238.c
index 855626f1bc01..2d9f12f68d50 100644
--- a/drivers/hwmon/ina238.c
+++ b/drivers/hwmon/ina238.c
@@ -96,7 +96,7 @@
#define INA238_BUS_VOLTAGE_LSB 3125 /* 3.125 mV/lsb */
#define INA238_DIE_TEMP_LSB 125 /* 125 mC/lsb */
-static struct regmap_config ina238_regmap_config = {
+static const struct regmap_config ina238_regmap_config = {
.max_register = INA238_REGISTERS,
.reg_bits = 8,
.val_bits = 16,
diff --git a/drivers/hwmon/ina2xx.c b/drivers/hwmon/ina2xx.c
index d8415d1f21fc..9ab4205622e2 100644
--- a/drivers/hwmon/ina2xx.c
+++ b/drivers/hwmon/ina2xx.c
@@ -73,6 +73,11 @@
#define INA226_READ_AVG(reg) (((reg) & INA226_AVG_RD_MASK) >> 9)
#define INA226_SHIFT_AVG(val) ((val) << 9)
+#define INA226_ALERT_POLARITY_MASK 0x0002
+#define INA226_SHIFT_ALERT_POLARITY(val) ((val) << 1)
+#define INA226_ALERT_POL_LOW 0
+#define INA226_ALERT_POL_HIGH 1
+
/* bit number of alert functions in Mask/Enable Register */
#define INA226_SHUNT_OVER_VOLTAGE_BIT 15
#define INA226_SHUNT_UNDER_VOLTAGE_BIT 14
@@ -178,6 +183,14 @@ static u16 ina226_interval_to_reg(int interval)
return INA226_SHIFT_AVG(avg_bits);
}
+static int ina2xx_set_alert_polarity(struct ina2xx_data *data,
+ unsigned long val)
+{
+ return regmap_update_bits(data->regmap, INA226_MASK_ENABLE,
+ INA226_ALERT_POLARITY_MASK,
+ INA226_SHIFT_ALERT_POLARITY(val));
+}
+
/*
* Calibration register is set to the best value, which eliminates
* truncation errors on calculating current register in hardware.
@@ -612,8 +625,6 @@ static const struct attribute_group ina226_group = {
.attrs = ina226_attrs,
};
-static const struct i2c_device_id ina2xx_id[];
-
static int ina2xx_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
@@ -623,10 +634,7 @@ static int ina2xx_probe(struct i2c_client *client)
int ret, group = 0;
enum ina2xx_ids chip;
- if (client->dev.of_node)
- chip = (uintptr_t)of_device_get_match_data(&client->dev);
- else
- chip = i2c_match_id(ina2xx_id, client)->driver_data;
+ chip = (uintptr_t)i2c_get_match_data(client);
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
@@ -659,6 +667,25 @@ static int ina2xx_probe(struct i2c_client *client)
if (ret)
return dev_err_probe(dev, ret, "failed to enable vs regulator\n");
+ if (chip == ina226) {
+ if (of_property_read_bool(dev->of_node, "ti,alert-polarity-active-high")) {
+ ret = ina2xx_set_alert_polarity(data,
+ INA226_ALERT_POL_HIGH);
+ if (ret < 0) {
+ return dev_err_probe(dev, ret,
+ "failed to set alert polarity active high\n");
+ }
+ } else {
+ /* Set default value i.e active low */
+ ret = ina2xx_set_alert_polarity(data,
+ INA226_ALERT_POL_LOW);
+ if (ret < 0) {
+ return dev_err_probe(dev, ret,
+ "failed to set alert polarity active low\n");
+ }
+ }
+ }
+
ret = ina2xx_init(data);
if (ret < 0) {
dev_err(dev, "error configuring the device: %d\n", ret);
diff --git a/drivers/hwmon/intel-m10-bmc-hwmon.c b/drivers/hwmon/intel-m10-bmc-hwmon.c
index 6500ca548f9c..ca2dff158925 100644
--- a/drivers/hwmon/intel-m10-bmc-hwmon.c
+++ b/drivers/hwmon/intel-m10-bmc-hwmon.c
@@ -429,7 +429,7 @@ static const struct m10bmc_sdata n6000bmc_curr_tbl[] = {
};
static const struct m10bmc_sdata n6000bmc_power_tbl[] = {
- { 0x724, 0x0, 0x0, 0x0, 0x0, 1, "Board Power" },
+ { 0x724, 0x0, 0x0, 0x0, 0x0, 1000, "Board Power" },
};
static const struct hwmon_channel_info * const n6000bmc_hinfo[] = {
diff --git a/drivers/hwmon/jc42.c b/drivers/hwmon/jc42.c
index 7092f8f025b8..a260cff750a5 100644
--- a/drivers/hwmon/jc42.c
+++ b/drivers/hwmon/jc42.c
@@ -79,20 +79,9 @@ static const unsigned short normal_i2c[] = {
#define AT30TS00_DEVID 0x8201
#define AT30TS00_DEVID_MASK 0xffff
-#define AT30TSE004_DEVID 0x2200
-#define AT30TSE004_DEVID_MASK 0xffff
-
-/* Giantec */
-#define GT30TS00_DEVID 0x2200
-#define GT30TS00_DEVID_MASK 0xff00
-
#define GT34TS02_DEVID 0x3300
#define GT34TS02_DEVID_MASK 0xff00
-/* IDT */
-#define TSE2004_DEVID 0x2200
-#define TSE2004_DEVID_MASK 0xff00
-
#define TS3000_DEVID 0x2900 /* Also matches TSE2002 */
#define TS3000_DEVID_MASK 0xff00
@@ -116,9 +105,6 @@ static const unsigned short normal_i2c[] = {
#define MCP98243_DEVID 0x2100
#define MCP98243_DEVID_MASK 0xfffc
-#define MCP98244_DEVID 0x2200
-#define MCP98244_DEVID_MASK 0xfffc
-
#define MCP9843_DEVID 0x0000 /* Also matches mcp9805 */
#define MCP9843_DEVID_MASK 0xfffe
@@ -136,12 +122,6 @@ static const unsigned short normal_i2c[] = {
#define CAT34TS02C_DEVID 0x0a00
#define CAT34TS02C_DEVID_MASK 0xfff0
-#define CAT34TS04_DEVID 0x2200
-#define CAT34TS04_DEVID_MASK 0xfff0
-
-#define N34TS04_DEVID 0x2230
-#define N34TS04_DEVID_MASK 0xfff0
-
/* ST Microelectronics */
#define STTS424_DEVID 0x0101
#define STTS424_DEVID_MASK 0xffff
@@ -152,15 +132,12 @@ static const unsigned short normal_i2c[] = {
#define STTS2002_DEVID 0x0300
#define STTS2002_DEVID_MASK 0xffff
-#define STTS2004_DEVID 0x2201
-#define STTS2004_DEVID_MASK 0xffff
-
#define STTS3000_DEVID 0x0200
#define STTS3000_DEVID_MASK 0xffff
-/* Seiko Instruments */
-#define S34TS04A_DEVID 0x2221
-#define S34TS04A_DEVID_MASK 0xffff
+/* TSE2004 compliant sensors */
+#define TSE2004_DEVID 0x2200
+#define TSE2004_DEVID_MASK 0xff00
static u16 jc42_hysteresis[] = { 0, 1500, 3000, 6000 };
@@ -173,8 +150,8 @@ struct jc42_chips {
static struct jc42_chips jc42_chips[] = {
{ ADT_MANID, ADT7408_DEVID, ADT7408_DEVID_MASK },
{ ATMEL_MANID, AT30TS00_DEVID, AT30TS00_DEVID_MASK },
- { ATMEL_MANID2, AT30TSE004_DEVID, AT30TSE004_DEVID_MASK },
- { GT_MANID, GT30TS00_DEVID, GT30TS00_DEVID_MASK },
+ { ATMEL_MANID2, TSE2004_DEVID, TSE2004_DEVID_MASK },
+ { GT_MANID, TSE2004_DEVID, TSE2004_DEVID_MASK },
{ GT_MANID2, GT34TS02_DEVID, GT34TS02_DEVID_MASK },
{ IDT_MANID, TSE2004_DEVID, TSE2004_DEVID_MASK },
{ IDT_MANID, TS3000_DEVID, TS3000_DEVID_MASK },
@@ -184,19 +161,19 @@ static struct jc42_chips jc42_chips[] = {
{ MCP_MANID, MCP9808_DEVID, MCP9808_DEVID_MASK },
{ MCP_MANID, MCP98242_DEVID, MCP98242_DEVID_MASK },
{ MCP_MANID, MCP98243_DEVID, MCP98243_DEVID_MASK },
- { MCP_MANID, MCP98244_DEVID, MCP98244_DEVID_MASK },
+ { MCP_MANID, TSE2004_DEVID, TSE2004_DEVID_MASK },
{ MCP_MANID, MCP9843_DEVID, MCP9843_DEVID_MASK },
{ NXP_MANID, SE97_DEVID, SE97_DEVID_MASK },
{ ONS_MANID, CAT6095_DEVID, CAT6095_DEVID_MASK },
{ ONS_MANID, CAT34TS02C_DEVID, CAT34TS02C_DEVID_MASK },
- { ONS_MANID, CAT34TS04_DEVID, CAT34TS04_DEVID_MASK },
- { ONS_MANID, N34TS04_DEVID, N34TS04_DEVID_MASK },
+ { ONS_MANID, TSE2004_DEVID, TSE2004_DEVID_MASK },
+ { ONS_MANID, TSE2004_DEVID, TSE2004_DEVID_MASK },
{ NXP_MANID, SE98_DEVID, SE98_DEVID_MASK },
- { SI_MANID, S34TS04A_DEVID, S34TS04A_DEVID_MASK },
+ { SI_MANID, TSE2004_DEVID, TSE2004_DEVID_MASK },
{ STM_MANID, STTS424_DEVID, STTS424_DEVID_MASK },
{ STM_MANID, STTS424E_DEVID, STTS424E_DEVID_MASK },
{ STM_MANID, STTS2002_DEVID, STTS2002_DEVID_MASK },
- { STM_MANID, STTS2004_DEVID, STTS2004_DEVID_MASK },
+ { STM_MANID, TSE2004_DEVID, TSE2004_DEVID_MASK },
{ STM_MANID, STTS3000_DEVID, STTS3000_DEVID_MASK },
};
@@ -436,7 +413,11 @@ static int jc42_detect(struct i2c_client *client, struct i2c_board_info *info)
if (cap < 0 || config < 0 || manid < 0 || devid < 0)
return -ENODEV;
- if ((cap & 0xff00) || (config & 0xf800))
+ if ((cap & 0xff00) || (config & 0xf820))
+ return -ENODEV;
+
+ if ((devid & TSE2004_DEVID_MASK) == TSE2004_DEVID &&
+ (cap & 0x00e7) != 0x00e7)
return -ENODEV;
for (i = 0; i < ARRAY_SIZE(jc42_chips); i++) {
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
index 8092312c0a87..543526bac042 100644
--- a/drivers/hwmon/k10temp.c
+++ b/drivers/hwmon/k10temp.c
@@ -101,7 +101,6 @@ struct k10temp_data {
#define TCCD_BIT(x) ((x) + 2)
#define HAVE_TEMP(d, channel) ((d)->show_temp & BIT(channel))
-#define HAVE_TDIE(d) HAVE_TEMP(d, TDIE_BIT)
struct tctl_offset {
u8 model;
@@ -153,8 +152,16 @@ static void read_tempreg_nb_f15(struct pci_dev *pdev, u32 *regval)
static void read_tempreg_nb_zen(struct pci_dev *pdev, u32 *regval)
{
- amd_smn_read(amd_pci_dev_to_node_id(pdev),
- ZEN_REPORTED_TEMP_CTRL_BASE, regval);
+ if (amd_smn_read(amd_pci_dev_to_node_id(pdev),
+ ZEN_REPORTED_TEMP_CTRL_BASE, regval))
+ *regval = 0;
+}
+
+static int read_ccd_temp_reg(struct k10temp_data *data, int ccd, u32 *regval)
+{
+ u16 node_id = amd_pci_dev_to_node_id(data->pdev);
+
+ return amd_smn_read(node_id, ZEN_CCD_TEMP(data->ccd_offset, ccd), regval);
}
static long get_raw_temp(struct k10temp_data *data)
@@ -205,6 +212,7 @@ static int k10temp_read_temp(struct device *dev, u32 attr, int channel,
long *val)
{
struct k10temp_data *data = dev_get_drvdata(dev);
+ int ret = -EOPNOTSUPP;
u32 regval;
switch (attr) {
@@ -221,13 +229,15 @@ static int k10temp_read_temp(struct device *dev, u32 attr, int channel,
*val = 0;
break;
case 2 ... 13: /* Tccd{1-12} */
- amd_smn_read(amd_pci_dev_to_node_id(data->pdev),
- ZEN_CCD_TEMP(data->ccd_offset, channel - 2),
- &regval);
+ ret = read_ccd_temp_reg(data, channel - 2, &regval);
+
+ if (ret)
+ return ret;
+
*val = (regval & ZEN_CCD_TEMP_MASK) * 125 - 49000;
break;
default:
- return -EOPNOTSUPP;
+ return ret;
}
break;
case hwmon_temp_max:
@@ -243,7 +253,7 @@ static int k10temp_read_temp(struct device *dev, u32 attr, int channel,
- ((regval >> 24) & 0xf)) * 500 + 52000;
break;
default:
- return -EOPNOTSUPP;
+ return ret;
}
return 0;
}
@@ -259,11 +269,11 @@ static int k10temp_read(struct device *dev, enum hwmon_sensor_types type,
}
}
-static umode_t k10temp_is_visible(const void *_data,
+static umode_t k10temp_is_visible(const void *drvdata,
enum hwmon_sensor_types type,
u32 attr, int channel)
{
- const struct k10temp_data *data = _data;
+ const struct k10temp_data *data = drvdata;
struct pci_dev *pdev = data->pdev;
u32 reg;
@@ -374,15 +384,25 @@ static const struct hwmon_chip_info k10temp_chip_info = {
.info = k10temp_info,
};
-static void k10temp_get_ccd_support(struct pci_dev *pdev,
- struct k10temp_data *data, int limit)
+static void k10temp_get_ccd_support(struct k10temp_data *data, int limit)
{
u32 regval;
int i;
for (i = 0; i < limit; i++) {
- amd_smn_read(amd_pci_dev_to_node_id(pdev),
- ZEN_CCD_TEMP(data->ccd_offset, i), &regval);
+ /*
+ * Ignore inaccessible CCDs.
+ *
+ * Some systems will return a register value of 0, and the TEMP_VALID
+ * bit check below will naturally fail.
+ *
+ * Other systems will return a PCI_ERROR_RESPONSE (0xFFFFFFFF) for
+ * the register value. And this will incorrectly pass the TEMP_VALID
+ * bit check.
+ */
+ if (read_ccd_temp_reg(data, i, &regval))
+ continue;
+
if (regval & ZEN_CCD_TEMP_VALID)
data->show_temp |= BIT(TCCD_BIT(i));
}
@@ -434,18 +454,18 @@ static int k10temp_probe(struct pci_dev *pdev, const struct pci_device_id *id)
case 0x11: /* Zen APU */
case 0x18: /* Zen+ APU */
data->ccd_offset = 0x154;
- k10temp_get_ccd_support(pdev, data, 4);
+ k10temp_get_ccd_support(data, 4);
break;
case 0x31: /* Zen2 Threadripper */
case 0x60: /* Renoir */
case 0x68: /* Lucienne */
case 0x71: /* Zen2 */
data->ccd_offset = 0x154;
- k10temp_get_ccd_support(pdev, data, 8);
+ k10temp_get_ccd_support(data, 8);
break;
case 0xa0 ... 0xaf:
data->ccd_offset = 0x300;
- k10temp_get_ccd_support(pdev, data, 8);
+ k10temp_get_ccd_support(data, 8);
break;
}
} else if (boot_cpu_data.x86 == 0x19) {
@@ -459,21 +479,21 @@ static int k10temp_probe(struct pci_dev *pdev, const struct pci_device_id *id)
case 0x21: /* Zen3 Ryzen Desktop */
case 0x50 ... 0x5f: /* Green Sardine */
data->ccd_offset = 0x154;
- k10temp_get_ccd_support(pdev, data, 8);
+ k10temp_get_ccd_support(data, 8);
break;
case 0x40 ... 0x4f: /* Yellow Carp */
data->ccd_offset = 0x300;
- k10temp_get_ccd_support(pdev, data, 8);
+ k10temp_get_ccd_support(data, 8);
break;
case 0x60 ... 0x6f:
case 0x70 ... 0x7f:
data->ccd_offset = 0x308;
- k10temp_get_ccd_support(pdev, data, 8);
+ k10temp_get_ccd_support(data, 8);
break;
case 0x10 ... 0x1f:
case 0xa0 ... 0xaf:
data->ccd_offset = 0x300;
- k10temp_get_ccd_support(pdev, data, 12);
+ k10temp_get_ccd_support(data, 12);
break;
}
} else if (boot_cpu_data.x86 == 0x1a) {
diff --git a/drivers/hwmon/lm63.c b/drivers/hwmon/lm63.c
index 0878a044dd8e..035176a98ce9 100644
--- a/drivers/hwmon/lm63.c
+++ b/drivers/hwmon/lm63.c
@@ -1104,10 +1104,7 @@ static int lm63_probe(struct i2c_client *client)
mutex_init(&data->update_lock);
/* Set the device type */
- if (client->dev.of_node)
- data->kind = (uintptr_t)of_device_get_match_data(&client->dev);
- else
- data->kind = i2c_match_id(lm63_id, client)->driver_data;
+ data->kind = (uintptr_t)i2c_get_match_data(client);
if (data->kind == lm64)
data->temp2_offset = 16000;
diff --git a/drivers/hwmon/lm70.c b/drivers/hwmon/lm70.c
index 481e4e1f8f4f..0d5a250cb672 100644
--- a/drivers/hwmon/lm70.c
+++ b/drivers/hwmon/lm70.c
@@ -169,11 +169,7 @@ static int lm70_probe(struct spi_device *spi)
struct lm70 *p_lm70;
int chip;
- if (dev_fwnode(&spi->dev))
- chip = (int)(uintptr_t)device_get_match_data(&spi->dev);
- else
- chip = spi_get_device_id(spi)->driver_data;
-
+ chip = (kernel_ulong_t)spi_get_device_match_data(spi);
/* signaling is SPI_MODE_0 */
if ((spi->mode & SPI_MODE_X_MASK) != SPI_MODE_0)
diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c
index e00750718536..2c2205aec7d4 100644
--- a/drivers/hwmon/lm75.c
+++ b/drivers/hwmon/lm75.c
@@ -625,20 +625,12 @@ static void lm75_remove(void *data)
i2c_smbus_write_byte_data(client, LM75_REG_CONF, lm75->orig_conf);
}
-static const struct i2c_device_id lm75_ids[];
-
static int lm75_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct device *hwmon_dev;
struct lm75_data *data;
int status, err;
- enum lm75_type kind;
-
- if (client->dev.of_node)
- kind = (uintptr_t)of_device_get_match_data(&client->dev);
- else
- kind = i2c_match_id(lm75_ids, client)->driver_data;
if (!i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA))
@@ -649,7 +641,7 @@ static int lm75_probe(struct i2c_client *client)
return -ENOMEM;
data->client = client;
- data->kind = kind;
+ data->kind = (uintptr_t)i2c_get_match_data(client);
data->vs = devm_regulator_get(dev, "vs");
if (IS_ERR(data->vs))
diff --git a/drivers/hwmon/lm78.c b/drivers/hwmon/lm78.c
index b739c354311b..8b53bb312069 100644
--- a/drivers/hwmon/lm78.c
+++ b/drivers/hwmon/lm78.c
@@ -627,8 +627,6 @@ static int lm78_i2c_detect(struct i2c_client *client,
return -ENODEV;
}
-static const struct i2c_device_id lm78_i2c_id[];
-
static int lm78_i2c_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
@@ -640,7 +638,7 @@ static int lm78_i2c_probe(struct i2c_client *client)
return -ENOMEM;
data->client = client;
- data->type = i2c_match_id(lm78_i2c_id, client)->driver_data;
+ data->type = (uintptr_t)i2c_get_match_data(client);
/* Initialize the LM78 chip */
lm78_init_device(data);
diff --git a/drivers/hwmon/lm83.c b/drivers/hwmon/lm83.c
index b333c9bde4e6..f800fe2ef18b 100644
--- a/drivers/hwmon/lm83.c
+++ b/drivers/hwmon/lm83.c
@@ -417,13 +417,6 @@ static int lm83_detect(struct i2c_client *client,
return 0;
}
-static const struct i2c_device_id lm83_id[] = {
- { "lm83", lm83 },
- { "lm82", lm82 },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, lm83_id);
-
static int lm83_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
@@ -438,7 +431,7 @@ static int lm83_probe(struct i2c_client *client)
if (IS_ERR(data->regmap))
return PTR_ERR(data->regmap);
- data->type = i2c_match_id(lm83_id, client)->driver_data;
+ data->type = (uintptr_t)i2c_get_match_data(client);
hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name,
data, &lm83_chip_info, NULL);
@@ -449,6 +442,13 @@ static int lm83_probe(struct i2c_client *client)
* Driver data (common to all clients)
*/
+static const struct i2c_device_id lm83_id[] = {
+ { "lm83", lm83 },
+ { "lm82", lm82 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, lm83_id);
+
static struct i2c_driver lm83_driver = {
.class = I2C_CLASS_HWMON,
.driver = {
diff --git a/drivers/hwmon/lm85.c b/drivers/hwmon/lm85.c
index 68c210002357..1c244ed75122 100644
--- a/drivers/hwmon/lm85.c
+++ b/drivers/hwmon/lm85.c
@@ -1544,8 +1544,6 @@ static int lm85_detect(struct i2c_client *client, struct i2c_board_info *info)
return 0;
}
-static const struct i2c_device_id lm85_id[];
-
static int lm85_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
@@ -1558,10 +1556,7 @@ static int lm85_probe(struct i2c_client *client)
return -ENOMEM;
data->client = client;
- if (client->dev.of_node)
- data->type = (uintptr_t)of_device_get_match_data(&client->dev);
- else
- data->type = i2c_match_id(lm85_id, client)->driver_data;
+ data->type = (uintptr_t)i2c_get_match_data(client);
mutex_init(&data->update_lock);
/* Fill in the chip specific driver values */
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
index e0d7454a301c..ca5c52b38c0f 100644
--- a/drivers/hwmon/lm90.c
+++ b/drivers/hwmon/lm90.c
@@ -1270,42 +1270,6 @@ static int lm90_update_device(struct device *dev)
return 0;
}
-/* pec used for devices with PEC support */
-static ssize_t pec_show(struct device *dev, struct device_attribute *dummy,
- char *buf)
-{
- struct i2c_client *client = to_i2c_client(dev);
-
- return sprintf(buf, "%d\n", !!(client->flags & I2C_CLIENT_PEC));
-}
-
-static ssize_t pec_store(struct device *dev, struct device_attribute *dummy,
- const char *buf, size_t count)
-{
- struct i2c_client *client = to_i2c_client(dev);
- long val;
- int err;
-
- err = kstrtol(buf, 10, &val);
- if (err < 0)
- return err;
-
- switch (val) {
- case 0:
- client->flags &= ~I2C_CLIENT_PEC;
- break;
- case 1:
- client->flags |= I2C_CLIENT_PEC;
- break;
- default:
- return -EINVAL;
- }
-
- return count;
-}
-
-static DEVICE_ATTR_RW(pec);
-
static int lm90_temp_get_resolution(struct lm90_data *data, int index)
{
switch (index) {
@@ -2659,11 +2623,6 @@ static irqreturn_t lm90_irq_thread(int irq, void *dev_id)
return IRQ_NONE;
}
-static void lm90_remove_pec(void *dev)
-{
- device_remove_file(dev, &dev_attr_pec);
-}
-
static int lm90_probe_channel_from_dt(struct i2c_client *client,
struct device_node *child,
struct lm90_data *data)
@@ -2764,10 +2723,7 @@ static int lm90_probe(struct i2c_client *client)
INIT_WORK(&data->report_work, lm90_report_alarms);
/* Set the device type */
- if (client->dev.of_node)
- data->kind = (uintptr_t)of_device_get_match_data(&client->dev);
- else
- data->kind = i2c_match_id(lm90_id, client)->driver_data;
+ data->kind = (uintptr_t)i2c_get_match_data(client);
/*
* Different devices have different alarm bits triggering the
@@ -2802,6 +2758,8 @@ static int lm90_probe(struct i2c_client *client)
data->chip_config[0] |= HWMON_C_UPDATE_INTERVAL;
if (data->flags & LM90_HAVE_FAULTQUEUE)
data->chip_config[0] |= HWMON_C_TEMP_SAMPLES;
+ if (data->flags & (LM90_HAVE_PEC | LM90_HAVE_PARTIAL_PEC))
+ data->chip_config[0] |= HWMON_C_PEC;
data->info[1] = &data->temp_info;
info = &data->temp_info;
@@ -2878,19 +2836,6 @@ static int lm90_probe(struct i2c_client *client)
return err;
}
- /*
- * The 'pec' attribute is attached to the i2c device and thus created
- * separately.
- */
- if (data->flags & (LM90_HAVE_PEC | LM90_HAVE_PARTIAL_PEC)) {
- err = device_create_file(dev, &dev_attr_pec);
- if (err)
- return err;
- err = devm_add_action_or_reset(dev, lm90_remove_pec, dev);
- if (err)
- return err;
- }
-
hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name,
data, &data->chip,
NULL);
diff --git a/drivers/hwmon/lm95234.c b/drivers/hwmon/lm95234.c
index 67b9d7636ee4..9a7afdb49895 100644
--- a/drivers/hwmon/lm95234.c
+++ b/drivers/hwmon/lm95234.c
@@ -301,7 +301,8 @@ static ssize_t tcrit2_store(struct device *dev, struct device_attribute *attr,
if (ret < 0)
return ret;
- val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 0, index ? 255 : 127);
+ val = DIV_ROUND_CLOSEST(clamp_val(val, 0, (index ? 255 : 127) * 1000),
+ 1000);
mutex_lock(&data->update_lock);
data->tcrit2[index] = val;
@@ -350,7 +351,7 @@ static ssize_t tcrit1_store(struct device *dev, struct device_attribute *attr,
if (ret < 0)
return ret;
- val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 0, 255);
+ val = DIV_ROUND_CLOSEST(clamp_val(val, 0, 255000), 1000);
mutex_lock(&data->update_lock);
data->tcrit1[index] = val;
@@ -391,7 +392,7 @@ static ssize_t tcrit1_hyst_store(struct device *dev,
if (ret < 0)
return ret;
- val = DIV_ROUND_CLOSEST(val, 1000);
+ val = DIV_ROUND_CLOSEST(clamp_val(val, -255000, 255000), 1000);
val = clamp_val((int)data->tcrit1[index] - val, 0, 31);
mutex_lock(&data->update_lock);
@@ -431,7 +432,7 @@ static ssize_t offset_store(struct device *dev, struct device_attribute *attr,
return ret;
/* Accuracy is 1/2 degrees C */
- val = clamp_val(DIV_ROUND_CLOSEST(val, 500), -128, 127);
+ val = DIV_ROUND_CLOSEST(clamp_val(val, -64000, 63500), 500);
mutex_lock(&data->update_lock);
data->toffset[index] = val;
@@ -677,10 +678,9 @@ static int lm95234_init_client(struct i2c_client *client)
return 0;
}
-static const struct i2c_device_id lm95234_id[];
-
static int lm95234_probe(struct i2c_client *client)
{
+ enum chips type = (uintptr_t)i2c_get_match_data(client);
struct device *dev = &client->dev;
struct lm95234_data *data;
struct device *hwmon_dev;
@@ -699,7 +699,7 @@ static int lm95234_probe(struct i2c_client *client)
return err;
data->groups[0] = &lm95234_common_group;
- if (i2c_match_id(lm95234_id, client)->driver_data == lm95234)
+ if (type == lm95234)
data->groups[1] = &lm95234_group;
hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
diff --git a/drivers/hwmon/ltc2991.c b/drivers/hwmon/ltc2991.c
index 06750bb93c23..573cd8f5721b 100644
--- a/drivers/hwmon/ltc2991.c
+++ b/drivers/hwmon/ltc2991.c
@@ -225,8 +225,8 @@ static umode_t ltc2991_is_visible(const void *data,
case hwmon_temp:
switch (attr) {
case hwmon_temp_input:
- if (st->temp_en[channel] ||
- channel == LTC2991_T_INT_CH_NR)
+ if (channel == LTC2991_T_INT_CH_NR ||
+ st->temp_en[channel])
return 0444;
break;
}
@@ -284,7 +284,6 @@ static const struct regmap_config ltc2991_regmap_config = {
static int ltc2991_init(struct ltc2991_state *st, struct device *dev)
{
- struct fwnode_handle *child;
int ret;
u32 val, addr;
u8 v5_v8_reg_data = 0, v1_v4_reg_data = 0;
@@ -294,17 +293,13 @@ static int ltc2991_init(struct ltc2991_state *st, struct device *dev)
return dev_err_probe(dev, ret,
"failed to enable regulator\n");
- device_for_each_child_node(dev, child) {
+ device_for_each_child_node_scoped(dev, child) {
ret = fwnode_property_read_u32(child, "reg", &addr);
- if (ret < 0) {
- fwnode_handle_put(child);
+ if (ret < 0)
return ret;
- }
- if (addr > 3) {
- fwnode_handle_put(child);
+ if (addr > 3)
return -EINVAL;
- }
ret = fwnode_property_read_u32(child,
"shunt-resistor-micro-ohms",
diff --git a/drivers/hwmon/ltc2992.c b/drivers/hwmon/ltc2992.c
index 229aed15d5ca..d4a93223cd3b 100644
--- a/drivers/hwmon/ltc2992.c
+++ b/drivers/hwmon/ltc2992.c
@@ -876,9 +876,11 @@ static int ltc2992_parse_dt(struct ltc2992_state *st)
ret = fwnode_property_read_u32(child, "shunt-resistor-micro-ohms", &val);
if (!ret) {
- if (!val)
+ if (!val) {
+ fwnode_handle_put(child);
return dev_err_probe(&st->client->dev, -EINVAL,
"shunt resistor value cannot be zero\n");
+ }
st->r_sense_uohm[addr] = val;
}
}
diff --git a/drivers/hwmon/max16065.c b/drivers/hwmon/max16065.c
index aa38c45adc09..7ce9a89f93a0 100644
--- a/drivers/hwmon/max16065.c
+++ b/drivers/hwmon/max16065.c
@@ -493,8 +493,6 @@ static const struct attribute_group max16065_max_group = {
.is_visible = max16065_secondary_is_visible,
};
-static const struct i2c_device_id max16065_id[];
-
static int max16065_probe(struct i2c_client *client)
{
struct i2c_adapter *adapter = client->adapter;
@@ -505,7 +503,7 @@ static int max16065_probe(struct i2c_client *client)
bool have_secondary; /* true if chip has secondary limits */
bool secondary_is_max = false; /* secondary limits reflect max */
int groups = 0;
- const struct i2c_device_id *id = i2c_match_id(max16065_id, client);
+ enum chips chip = (uintptr_t)i2c_get_match_data(client);
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA
| I2C_FUNC_SMBUS_READ_WORD_DATA))
@@ -518,9 +516,9 @@ static int max16065_probe(struct i2c_client *client)
data->client = client;
mutex_init(&data->update_lock);
- data->num_adc = max16065_num_adc[id->driver_data];
- data->have_current = max16065_have_current[id->driver_data];
- have_secondary = max16065_have_secondary[id->driver_data];
+ data->num_adc = max16065_num_adc[chip];
+ data->have_current = max16065_have_current[chip];
+ have_secondary = max16065_have_secondary[chip];
if (have_secondary) {
val = i2c_smbus_read_byte_data(client, MAX16065_SW_ENABLE);
diff --git a/drivers/hwmon/max1668.c b/drivers/hwmon/max1668.c
index c4a02edefbee..9fc583ebb11b 100644
--- a/drivers/hwmon/max1668.c
+++ b/drivers/hwmon/max1668.c
@@ -391,8 +391,6 @@ static int max1668_detect(struct i2c_client *client,
return 0;
}
-static const struct i2c_device_id max1668_id[];
-
static int max1668_probe(struct i2c_client *client)
{
struct i2c_adapter *adapter = client->adapter;
@@ -408,7 +406,7 @@ static int max1668_probe(struct i2c_client *client)
return -ENOMEM;
data->client = client;
- data->type = i2c_match_id(max1668_id, client)->driver_data;
+ data->type = (uintptr_t)i2c_get_match_data(client);
mutex_init(&data->update_lock);
/* sysfs hooks */
diff --git a/drivers/hwmon/max31827.c b/drivers/hwmon/max31827.c
index f8a13b30f100..48e8f8ba4d05 100644
--- a/drivers/hwmon/max31827.c
+++ b/drivers/hwmon/max31827.c
@@ -24,6 +24,7 @@
#define MAX31827_CONFIGURATION_1SHOT_MASK BIT(0)
#define MAX31827_CONFIGURATION_CNV_RATE_MASK GENMASK(3, 1)
+#define MAX31827_CONFIGURATION_PEC_EN_MASK BIT(4)
#define MAX31827_CONFIGURATION_TIMEOUT_MASK BIT(5)
#define MAX31827_CONFIGURATION_RESOLUTION_MASK GENMASK(7, 6)
#define MAX31827_CONFIGURATION_ALRM_POL_MASK BIT(8)
@@ -46,6 +47,11 @@
#define MAX31827_M_DGR_TO_16_BIT(x) (((x) << 4) / 1000)
#define MAX31827_DEVICE_ENABLE(x) ((x) ? 0xA : 0x0)
+/*
+ * The enum passed in the .data pointer of struct of_device_id must
+ * start with a value != 0 since that is a requirement for using
+ * device_get_match_data().
+ */
enum chips { max31827 = 1, max31828, max31829 };
enum max31827_cnv {
@@ -382,7 +388,8 @@ static int max31827_write(struct device *dev, enum hwmon_sensor_types type,
}
case hwmon_chip:
- if (attr == hwmon_chip_update_interval) {
+ switch (attr) {
+ case hwmon_chip_update_interval:
if (!st->enable)
return -EINVAL;
@@ -410,14 +417,18 @@ static int max31827_write(struct device *dev, enum hwmon_sensor_types type,
return ret;
st->update_interval = val;
- }
- break;
+ return 0;
+ case hwmon_chip_pec:
+ return regmap_update_bits(st->regmap, MAX31827_CONFIGURATION_REG,
+ MAX31827_CONFIGURATION_PEC_EN_MASK,
+ val ? MAX31827_CONFIGURATION_PEC_EN_MASK : 0);
+ default:
+ return -EOPNOTSUPP;
+ }
default:
return -EOPNOTSUPP;
}
-
- return 0;
}
static ssize_t temp1_resolution_show(struct device *dev,
@@ -583,7 +594,7 @@ static const struct hwmon_channel_info *max31827_info[] = {
HWMON_T_MIN_HYST | HWMON_T_MIN_ALARM |
HWMON_T_MAX | HWMON_T_MAX_HYST |
HWMON_T_MAX_ALARM),
- HWMON_CHANNEL_INFO(chip, HWMON_C_UPDATE_INTERVAL),
+ HWMON_CHANNEL_INFO(chip, HWMON_C_UPDATE_INTERVAL | HWMON_C_PEC),
NULL,
};
diff --git a/drivers/hwmon/max6639.c b/drivers/hwmon/max6639.c
index cbb595fe47aa..f54720d3d2ce 100644
--- a/drivers/hwmon/max6639.c
+++ b/drivers/hwmon/max6639.c
@@ -21,6 +21,7 @@
#include <linux/mutex.h>
#include <linux/platform_data/max6639.h>
#include <linux/regmap.h>
+#include <linux/util_macros.h>
/* Addresses to scan */
static const unsigned short normal_i2c[] = { 0x2c, 0x2e, 0x2f, I2C_CLIENT_END };
@@ -55,13 +56,17 @@ static const unsigned short normal_i2c[] = { 0x2c, 0x2e, 0x2f, I2C_CLIENT_END };
#define MAX6639_GCONFIG_PWM_FREQ_HI 0x08
#define MAX6639_FAN_CONFIG1_PWM 0x80
-
+#define MAX6639_FAN_CONFIG3_FREQ_MASK 0x03
#define MAX6639_FAN_CONFIG3_THERM_FULL_SPEED 0x40
#define MAX6639_NUM_CHANNELS 2
static const int rpm_ranges[] = { 2000, 4000, 8000, 16000 };
+/* Supported PWM frequency */
+static const unsigned int freq_table[] = { 20, 33, 50, 100, 5000, 8333, 12500,
+ 25000 };
+
#define FAN_FROM_REG(val, rpm_range) ((val) == 0 || (val) == 255 ? \
0 : (rpm_ranges[rpm_range] * 30) / (val))
#define TEMP_LIMIT_TO_REG(val) clamp_val((val) / 1000, 0, 255)
@@ -71,21 +76,19 @@ static const int rpm_ranges[] = { 2000, 4000, 8000, 16000 };
*/
struct max6639_data {
struct regmap *regmap;
+ struct mutex update_lock;
/* Register values initialized only once */
- u8 ppr; /* Pulses per rotation 0..3 for 1..4 ppr */
- u8 rpm_range; /* Index in above rpm_ranges table */
+ u8 ppr[MAX6639_NUM_CHANNELS]; /* Pulses per rotation 0..3 for 1..4 ppr */
+ u8 rpm_range[MAX6639_NUM_CHANNELS]; /* Index in above rpm_ranges table */
/* Optional regulator for FAN supply */
struct regulator *reg;
};
-static ssize_t temp_input_show(struct device *dev,
- struct device_attribute *dev_attr, char *buf)
+static int max6639_temp_read_input(struct device *dev, int channel, long *temp)
{
- long temp;
struct max6639_data *data = dev_get_drvdata(dev);
- struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
unsigned int val;
int res;
@@ -93,251 +96,444 @@ static ssize_t temp_input_show(struct device *dev,
* Lock isn't needed as MAX6639_REG_TEMP wpnt change for at least 250ms after reading
* MAX6639_REG_TEMP_EXT
*/
- res = regmap_read(data->regmap, MAX6639_REG_TEMP_EXT(attr->index), &val);
+ res = regmap_read(data->regmap, MAX6639_REG_TEMP_EXT(channel), &val);
if (res < 0)
return res;
- temp = val >> 5;
- res = regmap_read(data->regmap, MAX6639_REG_TEMP(attr->index), &val);
+ *temp = val >> 5;
+ res = regmap_read(data->regmap, MAX6639_REG_TEMP(channel), &val);
if (res < 0)
return res;
- temp |= val << 3;
- temp *= 125;
+ *temp |= val << 3;
+ *temp *= 125;
- return sprintf(buf, "%ld\n", temp);
+ return 0;
}
-static ssize_t temp_fault_show(struct device *dev,
- struct device_attribute *dev_attr, char *buf)
+static int max6639_temp_read_fault(struct device *dev, int channel, long *fault)
{
struct max6639_data *data = dev_get_drvdata(dev);
- struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
unsigned int val;
int res;
- res = regmap_read(data->regmap, MAX6639_REG_TEMP_EXT(attr->index), &val);
+ res = regmap_read(data->regmap, MAX6639_REG_TEMP_EXT(channel), &val);
if (res < 0)
return res;
- return sprintf(buf, "%d\n", val & 1);
+ *fault = val & 1;
+
+ return 0;
}
-static ssize_t temp_max_show(struct device *dev,
- struct device_attribute *dev_attr, char *buf)
+static int max6639_temp_read_max(struct device *dev, int channel, long *max)
{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
struct max6639_data *data = dev_get_drvdata(dev);
unsigned int val;
int res;
- res = regmap_read(data->regmap, MAX6639_REG_THERM_LIMIT(attr->index), &val);
+ res = regmap_read(data->regmap, MAX6639_REG_THERM_LIMIT(channel), &val);
if (res < 0)
return res;
- return sprintf(buf, "%d\n", (val * 1000));
+ *max = (long)val * 1000;
+
+ return 0;
}
-static ssize_t temp_max_store(struct device *dev,
- struct device_attribute *dev_attr,
- const char *buf, size_t count)
+static int max6639_temp_read_crit(struct device *dev, int channel, long *crit)
{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
struct max6639_data *data = dev_get_drvdata(dev);
- unsigned long val;
+ unsigned int val;
int res;
- res = kstrtoul(buf, 10, &val);
- if (res)
+ res = regmap_read(data->regmap, MAX6639_REG_ALERT_LIMIT(channel), &val);
+ if (res < 0)
return res;
- regmap_write(data->regmap, MAX6639_REG_THERM_LIMIT(attr->index),
- TEMP_LIMIT_TO_REG(val));
- return count;
+ *crit = (long)val * 1000;
+
+ return 0;
}
-static ssize_t temp_crit_show(struct device *dev,
- struct device_attribute *dev_attr, char *buf)
+static int max6639_temp_read_emergency(struct device *dev, int channel, long *emerg)
{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
struct max6639_data *data = dev_get_drvdata(dev);
unsigned int val;
int res;
- res = regmap_read(data->regmap, MAX6639_REG_ALERT_LIMIT(attr->index), &val);
+ res = regmap_read(data->regmap, MAX6639_REG_OT_LIMIT(channel), &val);
if (res < 0)
return res;
- return sprintf(buf, "%d\n", (val * 1000));
+ *emerg = (long)val * 1000;
+
+ return 0;
}
-static ssize_t temp_crit_store(struct device *dev,
- struct device_attribute *dev_attr,
- const char *buf, size_t count)
+static int max6639_get_status(struct device *dev, unsigned int *status)
{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
struct max6639_data *data = dev_get_drvdata(dev);
- unsigned long val;
+ unsigned int val;
int res;
- res = kstrtoul(buf, 10, &val);
- if (res)
+ res = regmap_read(data->regmap, MAX6639_REG_STATUS, &val);
+ if (res < 0)
return res;
- regmap_write(data->regmap, MAX6639_REG_ALERT_LIMIT(attr->index),
- TEMP_LIMIT_TO_REG(val));
- return count;
+ *status = val;
+
+ return 0;
}
-static ssize_t temp_emergency_show(struct device *dev,
- struct device_attribute *dev_attr,
- char *buf)
+static int max6639_temp_set_max(struct max6639_data *data, int channel, long val)
{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
- struct max6639_data *data = dev_get_drvdata(dev);
- unsigned int val;
int res;
- res = regmap_read(data->regmap, MAX6639_REG_OT_LIMIT(attr->index), &val);
- if (res < 0)
- return res;
-
- return sprintf(buf, "%d\n", (val * 1000));
+ res = regmap_write(data->regmap, MAX6639_REG_THERM_LIMIT(channel),
+ TEMP_LIMIT_TO_REG(val));
+ return res;
}
-static ssize_t temp_emergency_store(struct device *dev,
- struct device_attribute *dev_attr,
- const char *buf, size_t count)
+static int max6639_temp_set_crit(struct max6639_data *data, int channel, long val)
{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
- struct max6639_data *data = dev_get_drvdata(dev);
- unsigned long val;
int res;
- res = kstrtoul(buf, 10, &val);
- if (res)
- return res;
+ res = regmap_write(data->regmap, MAX6639_REG_ALERT_LIMIT(channel), TEMP_LIMIT_TO_REG(val));
- regmap_write(data->regmap, MAX6639_REG_OT_LIMIT(attr->index), TEMP_LIMIT_TO_REG(val));
+ return res;
+}
- return count;
+static int max6639_temp_set_emergency(struct max6639_data *data, int channel, long val)
+{
+ int res;
+
+ res = regmap_write(data->regmap, MAX6639_REG_OT_LIMIT(channel), TEMP_LIMIT_TO_REG(val));
+
+ return res;
}
-static ssize_t pwm_show(struct device *dev, struct device_attribute *dev_attr,
- char *buf)
+static int max6639_read_fan(struct device *dev, u32 attr, int channel,
+ long *fan_val)
{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
struct max6639_data *data = dev_get_drvdata(dev);
unsigned int val;
int res;
- res = regmap_read(data->regmap, MAX6639_REG_TARGTDUTY(attr->index), &val);
- if (res < 0)
- return res;
+ switch (attr) {
+ case hwmon_fan_input:
+ res = regmap_read(data->regmap, MAX6639_REG_FAN_CNT(channel), &val);
+ if (res < 0)
+ return res;
+ *fan_val = FAN_FROM_REG(val, data->rpm_range[channel]);
+ return 0;
+ case hwmon_fan_fault:
+ res = max6639_get_status(dev, &val);
+ if (res < 0)
+ return res;
+ *fan_val = !!(val & BIT(1 - channel));
+ return 0;
+ case hwmon_fan_pulses:
+ *fan_val = data->ppr[channel];
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
- return sprintf(buf, "%d\n", val * 255 / 120);
+static int max6639_set_ppr(struct max6639_data *data, int channel, u8 ppr)
+{
+ /* Decrement the PPR value and shift left by 6 to match the register format */
+ return regmap_write(data->regmap, MAX6639_REG_FAN_PPR(channel), ppr-- << 6);
}
-static ssize_t pwm_store(struct device *dev,
- struct device_attribute *dev_attr, const char *buf,
- size_t count)
+static int max6639_write_fan(struct device *dev, u32 attr, int channel,
+ long val)
{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
struct max6639_data *data = dev_get_drvdata(dev);
- unsigned long val;
- int res;
+ int err;
- res = kstrtoul(buf, 10, &val);
- if (res)
- return res;
+ switch (attr) {
+ case hwmon_fan_pulses:
+ if (val <= 0 || val > 4)
+ return -EINVAL;
- val = clamp_val(val, 0, 255);
+ mutex_lock(&data->update_lock);
+ /* Set Fan pulse per revolution */
+ err = max6639_set_ppr(data, channel, val);
+ if (err < 0) {
+ mutex_unlock(&data->update_lock);
+ return err;
+ }
+ data->ppr[channel] = val;
- regmap_write(data->regmap, MAX6639_REG_TARGTDUTY(attr->index), val * 120 / 255);
+ mutex_unlock(&data->update_lock);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
- return count;
+static umode_t max6639_fan_is_visible(const void *_data, u32 attr, int channel)
+{
+ switch (attr) {
+ case hwmon_fan_input:
+ case hwmon_fan_fault:
+ return 0444;
+ case hwmon_fan_pulses:
+ return 0644;
+ default:
+ return 0;
+ }
}
-static ssize_t fan_input_show(struct device *dev,
- struct device_attribute *dev_attr, char *buf)
+static int max6639_read_pwm(struct device *dev, u32 attr, int channel,
+ long *pwm_val)
{
struct max6639_data *data = dev_get_drvdata(dev);
- struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
unsigned int val;
int res;
+ u8 i;
+
+ switch (attr) {
+ case hwmon_pwm_input:
+ res = regmap_read(data->regmap, MAX6639_REG_TARGTDUTY(channel), &val);
+ if (res < 0)
+ return res;
+ *pwm_val = val * 255 / 120;
+ return 0;
+ case hwmon_pwm_freq:
+ mutex_lock(&data->update_lock);
+ res = regmap_read(data->regmap, MAX6639_REG_FAN_CONFIG3(channel), &val);
+ if (res < 0) {
+ mutex_unlock(&data->update_lock);
+ return res;
+ }
+ i = val & MAX6639_FAN_CONFIG3_FREQ_MASK;
- res = regmap_read(data->regmap, MAX6639_REG_FAN_CNT(attr->index), &val);
- if (res < 0)
- return res;
+ res = regmap_read(data->regmap, MAX6639_REG_GCONFIG, &val);
+ if (res < 0) {
+ mutex_unlock(&data->update_lock);
+ return res;
+ }
+
+ if (val & MAX6639_GCONFIG_PWM_FREQ_HI)
+ i |= 0x4;
+ i &= 0x7;
+ *pwm_val = freq_table[i];
- return sprintf(buf, "%d\n", FAN_FROM_REG(val, data->rpm_range));
+ mutex_unlock(&data->update_lock);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
}
-static ssize_t alarm_show(struct device *dev,
- struct device_attribute *dev_attr, char *buf)
+static int max6639_write_pwm(struct device *dev, u32 attr, int channel,
+ long val)
{
struct max6639_data *data = dev_get_drvdata(dev);
- struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
- unsigned int val;
+ int err;
+ u8 i;
+
+ switch (attr) {
+ case hwmon_pwm_input:
+ if (val < 0 || val > 255)
+ return -EINVAL;
+ err = regmap_write(data->regmap, MAX6639_REG_TARGTDUTY(channel),
+ val * 120 / 255);
+ return err;
+ case hwmon_pwm_freq:
+ val = clamp_val(val, 0, 25000);
+
+ i = find_closest(val, freq_table, ARRAY_SIZE(freq_table));
+
+ mutex_lock(&data->update_lock);
+ err = regmap_update_bits(data->regmap, MAX6639_REG_FAN_CONFIG3(channel),
+ MAX6639_FAN_CONFIG3_FREQ_MASK, i);
+ if (err < 0) {
+ mutex_unlock(&data->update_lock);
+ return err;
+ }
+
+ if (i >> 2)
+ err = regmap_set_bits(data->regmap, MAX6639_REG_GCONFIG,
+ MAX6639_GCONFIG_PWM_FREQ_HI);
+ else
+ err = regmap_clear_bits(data->regmap, MAX6639_REG_GCONFIG,
+ MAX6639_GCONFIG_PWM_FREQ_HI);
+
+ mutex_unlock(&data->update_lock);
+ return err;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static umode_t max6639_pwm_is_visible(const void *_data, u32 attr, int channel)
+{
+ switch (attr) {
+ case hwmon_pwm_input:
+ case hwmon_pwm_freq:
+ return 0644;
+ default:
+ return 0;
+ }
+}
+
+static int max6639_read_temp(struct device *dev, u32 attr, int channel,
+ long *val)
+{
+ unsigned int status;
int res;
- res = regmap_read(data->regmap, MAX6639_REG_STATUS, &val);
- if (res < 0)
+ switch (attr) {
+ case hwmon_temp_input:
+ res = max6639_temp_read_input(dev, channel, val);
return res;
+ case hwmon_temp_fault:
+ res = max6639_temp_read_fault(dev, channel, val);
+ return res;
+ case hwmon_temp_max:
+ res = max6639_temp_read_max(dev, channel, val);
+ return res;
+ case hwmon_temp_crit:
+ res = max6639_temp_read_crit(dev, channel, val);
+ return res;
+ case hwmon_temp_emergency:
+ res = max6639_temp_read_emergency(dev, channel, val);
+ return res;
+ case hwmon_temp_max_alarm:
+ res = max6639_get_status(dev, &status);
+ if (res < 0)
+ return res;
+ *val = !!(status & BIT(3 - channel));
+ return 0;
+ case hwmon_temp_crit_alarm:
+ res = max6639_get_status(dev, &status);
+ if (res < 0)
+ return res;
+ *val = !!(status & BIT(7 - channel));
+ return 0;
+ case hwmon_temp_emergency_alarm:
+ res = max6639_get_status(dev, &status);
+ if (res < 0)
+ return res;
+ *val = !!(status & BIT(5 - channel));
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
- return sprintf(buf, "%d\n", !!(val & (1 << attr->index)));
+static int max6639_write_temp(struct device *dev, u32 attr, int channel,
+ long val)
+{
+ struct max6639_data *data = dev_get_drvdata(dev);
+
+ switch (attr) {
+ case hwmon_temp_max:
+ return max6639_temp_set_max(data, channel, val);
+ case hwmon_temp_crit:
+ return max6639_temp_set_crit(data, channel, val);
+ case hwmon_temp_emergency:
+ return max6639_temp_set_emergency(data, channel, val);
+ default:
+ return -EOPNOTSUPP;
+ }
}
-static SENSOR_DEVICE_ATTR_RO(temp1_input, temp_input, 0);
-static SENSOR_DEVICE_ATTR_RO(temp2_input, temp_input, 1);
-static SENSOR_DEVICE_ATTR_RO(temp1_fault, temp_fault, 0);
-static SENSOR_DEVICE_ATTR_RO(temp2_fault, temp_fault, 1);
-static SENSOR_DEVICE_ATTR_RW(temp1_max, temp_max, 0);
-static SENSOR_DEVICE_ATTR_RW(temp2_max, temp_max, 1);
-static SENSOR_DEVICE_ATTR_RW(temp1_crit, temp_crit, 0);
-static SENSOR_DEVICE_ATTR_RW(temp2_crit, temp_crit, 1);
-static SENSOR_DEVICE_ATTR_RW(temp1_emergency, temp_emergency, 0);
-static SENSOR_DEVICE_ATTR_RW(temp2_emergency, temp_emergency, 1);
-static SENSOR_DEVICE_ATTR_RW(pwm1, pwm, 0);
-static SENSOR_DEVICE_ATTR_RW(pwm2, pwm, 1);
-static SENSOR_DEVICE_ATTR_RO(fan1_input, fan_input, 0);
-static SENSOR_DEVICE_ATTR_RO(fan2_input, fan_input, 1);
-static SENSOR_DEVICE_ATTR_RO(fan1_fault, alarm, 1);
-static SENSOR_DEVICE_ATTR_RO(fan2_fault, alarm, 0);
-static SENSOR_DEVICE_ATTR_RO(temp1_max_alarm, alarm, 3);
-static SENSOR_DEVICE_ATTR_RO(temp2_max_alarm, alarm, 2);
-static SENSOR_DEVICE_ATTR_RO(temp1_crit_alarm, alarm, 7);
-static SENSOR_DEVICE_ATTR_RO(temp2_crit_alarm, alarm, 6);
-static SENSOR_DEVICE_ATTR_RO(temp1_emergency_alarm, alarm, 5);
-static SENSOR_DEVICE_ATTR_RO(temp2_emergency_alarm, alarm, 4);
-
-
-static struct attribute *max6639_attrs[] = {
- &sensor_dev_attr_temp1_input.dev_attr.attr,
- &sensor_dev_attr_temp2_input.dev_attr.attr,
- &sensor_dev_attr_temp1_fault.dev_attr.attr,
- &sensor_dev_attr_temp2_fault.dev_attr.attr,
- &sensor_dev_attr_temp1_max.dev_attr.attr,
- &sensor_dev_attr_temp2_max.dev_attr.attr,
- &sensor_dev_attr_temp1_crit.dev_attr.attr,
- &sensor_dev_attr_temp2_crit.dev_attr.attr,
- &sensor_dev_attr_temp1_emergency.dev_attr.attr,
- &sensor_dev_attr_temp2_emergency.dev_attr.attr,
- &sensor_dev_attr_pwm1.dev_attr.attr,
- &sensor_dev_attr_pwm2.dev_attr.attr,
- &sensor_dev_attr_fan1_input.dev_attr.attr,
- &sensor_dev_attr_fan2_input.dev_attr.attr,
- &sensor_dev_attr_fan1_fault.dev_attr.attr,
- &sensor_dev_attr_fan2_fault.dev_attr.attr,
- &sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
- &sensor_dev_attr_temp2_max_alarm.dev_attr.attr,
- &sensor_dev_attr_temp1_crit_alarm.dev_attr.attr,
- &sensor_dev_attr_temp2_crit_alarm.dev_attr.attr,
- &sensor_dev_attr_temp1_emergency_alarm.dev_attr.attr,
- &sensor_dev_attr_temp2_emergency_alarm.dev_attr.attr,
+static umode_t max6639_temp_is_visible(const void *_data, u32 attr, int channel)
+{
+ switch (attr) {
+ case hwmon_temp_input:
+ case hwmon_temp_fault:
+ case hwmon_temp_max_alarm:
+ case hwmon_temp_crit_alarm:
+ case hwmon_temp_emergency_alarm:
+ return 0444;
+ case hwmon_temp_max:
+ case hwmon_temp_crit:
+ case hwmon_temp_emergency:
+ return 0644;
+ default:
+ return 0;
+ }
+}
+
+static int max6639_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ switch (type) {
+ case hwmon_fan:
+ return max6639_read_fan(dev, attr, channel, val);
+ case hwmon_pwm:
+ return max6639_read_pwm(dev, attr, channel, val);
+ case hwmon_temp:
+ return max6639_read_temp(dev, attr, channel, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int max6639_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ switch (type) {
+ case hwmon_fan:
+ return max6639_write_fan(dev, attr, channel, val);
+ case hwmon_pwm:
+ return max6639_write_pwm(dev, attr, channel, val);
+ case hwmon_temp:
+ return max6639_write_temp(dev, attr, channel, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static umode_t max6639_is_visible(const void *data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ switch (type) {
+ case hwmon_fan:
+ return max6639_fan_is_visible(data, attr, channel);
+ case hwmon_pwm:
+ return max6639_pwm_is_visible(data, attr, channel);
+ case hwmon_temp:
+ return max6639_temp_is_visible(data, attr, channel);
+ default:
+ return 0;
+ }
+}
+
+static const struct hwmon_channel_info * const max6639_info[] = {
+ HWMON_CHANNEL_INFO(fan,
+ HWMON_F_INPUT | HWMON_F_FAULT | HWMON_F_PULSES,
+ HWMON_F_INPUT | HWMON_F_FAULT | HWMON_F_PULSES),
+ HWMON_CHANNEL_INFO(pwm,
+ HWMON_PWM_INPUT | HWMON_PWM_FREQ,
+ HWMON_PWM_INPUT | HWMON_PWM_FREQ),
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT | HWMON_T_FAULT | HWMON_T_MAX | HWMON_T_MAX_ALARM |
+ HWMON_T_CRIT | HWMON_T_CRIT_ALARM | HWMON_T_EMERGENCY |
+ HWMON_T_EMERGENCY_ALARM,
+ HWMON_T_INPUT | HWMON_T_FAULT | HWMON_T_MAX | HWMON_T_MAX_ALARM |
+ HWMON_T_CRIT | HWMON_T_CRIT_ALARM | HWMON_T_EMERGENCY |
+ HWMON_T_EMERGENCY_ALARM),
NULL
};
-ATTRIBUTE_GROUPS(max6639);
+
+static const struct hwmon_ops max6639_hwmon_ops = {
+ .is_visible = max6639_is_visible,
+ .read = max6639_read,
+ .write = max6639_write,
+};
+
+static const struct hwmon_chip_info max6639_chip_info = {
+ .ops = &max6639_hwmon_ops,
+ .info = max6639_info,
+};
/*
* returns respective index in rpm_ranges table
@@ -355,11 +551,6 @@ static int rpm_range_to_reg(int range)
return 1; /* default: 4000 RPM */
}
-static int max6639_set_ppr(struct max6639_data *data, u8 channel, u8 ppr)
-{
- return regmap_write(data->regmap, MAX6639_REG_FAN_PPR(channel), ppr << 6);
-}
-
static int max6639_init_client(struct i2c_client *client,
struct max6639_data *data)
{
@@ -380,30 +571,34 @@ static int max6639_init_client(struct i2c_client *client,
ppr = max6639_info->ppr;
else
ppr = 2;
- ppr -= 1;
+
+ data->ppr[0] = ppr;
+ data->ppr[1] = ppr;
if (max6639_info)
rpm_range = rpm_range_to_reg(max6639_info->rpm_range);
- data->rpm_range = rpm_range;
+ data->rpm_range[0] = rpm_range;
+ data->rpm_range[1] = rpm_range;
for (i = 0; i < MAX6639_NUM_CHANNELS; i++) {
-
/* Set Fan pulse per revolution */
- err = max6639_set_ppr(data, i, ppr);
+ err = max6639_set_ppr(data, i, data->ppr[i]);
if (err)
return err;
/* Fans config PWM, RPM */
err = regmap_write(data->regmap, MAX6639_REG_FAN_CONFIG1(i),
- MAX6639_FAN_CONFIG1_PWM | rpm_range);
+ MAX6639_FAN_CONFIG1_PWM | data->rpm_range[i]);
if (err)
return err;
/* Fans PWM polarity high by default */
- if (max6639_info && max6639_info->pwm_polarity == 0)
- err = regmap_write(data->regmap, MAX6639_REG_FAN_CONFIG2a(i), 0x00);
- else
- err = regmap_write(data->regmap, MAX6639_REG_FAN_CONFIG2a(i), 0x02);
+ if (max6639_info) {
+ if (max6639_info->pwm_polarity == 0)
+ err = regmap_write(data->regmap, MAX6639_REG_FAN_CONFIG2a(i), 0x00);
+ else
+ err = regmap_write(data->regmap, MAX6639_REG_FAN_CONFIG2a(i), 0x02);
+ }
if (err)
return err;
@@ -529,14 +724,17 @@ static int max6639_probe(struct i2c_client *client)
}
}
+ mutex_init(&data->update_lock);
+
/* Initialize the max6639 chip */
err = max6639_init_client(client, data);
if (err < 0)
return err;
- hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
- data,
- max6639_groups);
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name,
+ data, &max6639_chip_info,
+ NULL);
+
return PTR_ERR_OR_ZERO(hwmon_dev);
}
diff --git a/drivers/hwmon/max6642.c b/drivers/hwmon/max6642.c
deleted file mode 100644
index 9302ab233910..000000000000
--- a/drivers/hwmon/max6642.c
+++ /dev/null
@@ -1,314 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Driver for +/-1 degree C, SMBus-Compatible Remote/Local Temperature Sensor
- * with Overtemperature Alarm
- *
- * Copyright (C) 2011 AppearTV AS
- *
- * Derived from:
- *
- * Based on the max1619 driver.
- * Copyright (C) 2003-2004 Oleksij Rempel <bug-track@fisher-privat.net>
- * Jean Delvare <jdelvare@suse.de>
- *
- * The MAX6642 is a sensor chip made by Maxim.
- * It reports up to two temperatures (its own plus up to
- * one external one). Complete datasheet can be
- * obtained from Maxim's website at:
- * http://datasheets.maxim-ic.com/en/ds/MAX6642.pdf
- */
-
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/jiffies.h>
-#include <linux/i2c.h>
-#include <linux/hwmon.h>
-#include <linux/hwmon-sysfs.h>
-#include <linux/err.h>
-#include <linux/mutex.h>
-#include <linux/sysfs.h>
-
-static const unsigned short normal_i2c[] = {
- 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, I2C_CLIENT_END };
-
-/*
- * The MAX6642 registers
- */
-
-#define MAX6642_REG_R_MAN_ID 0xFE
-#define MAX6642_REG_R_CONFIG 0x03
-#define MAX6642_REG_W_CONFIG 0x09
-#define MAX6642_REG_R_STATUS 0x02
-#define MAX6642_REG_R_LOCAL_TEMP 0x00
-#define MAX6642_REG_R_LOCAL_TEMPL 0x11
-#define MAX6642_REG_R_LOCAL_HIGH 0x05
-#define MAX6642_REG_W_LOCAL_HIGH 0x0B
-#define MAX6642_REG_R_REMOTE_TEMP 0x01
-#define MAX6642_REG_R_REMOTE_TEMPL 0x10
-#define MAX6642_REG_R_REMOTE_HIGH 0x07
-#define MAX6642_REG_W_REMOTE_HIGH 0x0D
-
-/*
- * Conversions
- */
-
-static int temp_from_reg10(int val)
-{
- return val * 250;
-}
-
-static int temp_from_reg(int val)
-{
- return val * 1000;
-}
-
-static int temp_to_reg(int val)
-{
- return val / 1000;
-}
-
-/*
- * Client data (each client gets its own)
- */
-
-struct max6642_data {
- struct i2c_client *client;
- struct mutex update_lock;
- bool valid; /* zero until following fields are valid */
- unsigned long last_updated; /* in jiffies */
-
- /* registers values */
- u16 temp_input[2]; /* local/remote */
- u16 temp_high[2]; /* local/remote */
- u8 alarms;
-};
-
-/*
- * Real code
- */
-
-static void max6642_init_client(struct max6642_data *data,
- struct i2c_client *client)
-{
- u8 config;
-
- /*
- * Start the conversions.
- */
- config = i2c_smbus_read_byte_data(client, MAX6642_REG_R_CONFIG);
- if (config & 0x40)
- i2c_smbus_write_byte_data(client, MAX6642_REG_W_CONFIG,
- config & 0xBF); /* run */
-
- data->temp_high[0] = i2c_smbus_read_byte_data(client,
- MAX6642_REG_R_LOCAL_HIGH);
- data->temp_high[1] = i2c_smbus_read_byte_data(client,
- MAX6642_REG_R_REMOTE_HIGH);
-}
-
-/* Return 0 if detection is successful, -ENODEV otherwise */
-static int max6642_detect(struct i2c_client *client,
- struct i2c_board_info *info)
-{
- struct i2c_adapter *adapter = client->adapter;
- u8 reg_config, reg_status, man_id;
-
- if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
- return -ENODEV;
-
- /* identification */
- man_id = i2c_smbus_read_byte_data(client, MAX6642_REG_R_MAN_ID);
- if (man_id != 0x4D)
- return -ENODEV;
-
- /* sanity check */
- if (i2c_smbus_read_byte_data(client, 0x04) != 0x4D
- || i2c_smbus_read_byte_data(client, 0x06) != 0x4D
- || i2c_smbus_read_byte_data(client, 0xff) != 0x4D)
- return -ENODEV;
-
- /*
- * We read the config and status register, the 4 lower bits in the
- * config register should be zero and bit 5, 3, 1 and 0 should be
- * zero in the status register.
- */
- reg_config = i2c_smbus_read_byte_data(client, MAX6642_REG_R_CONFIG);
- if ((reg_config & 0x0f) != 0x00)
- return -ENODEV;
-
- /* in between, another round of sanity checks */
- if (i2c_smbus_read_byte_data(client, 0x04) != reg_config
- || i2c_smbus_read_byte_data(client, 0x06) != reg_config
- || i2c_smbus_read_byte_data(client, 0xff) != reg_config)
- return -ENODEV;
-
- reg_status = i2c_smbus_read_byte_data(client, MAX6642_REG_R_STATUS);
- if ((reg_status & 0x2b) != 0x00)
- return -ENODEV;
-
- strscpy(info->type, "max6642", I2C_NAME_SIZE);
-
- return 0;
-}
-
-static struct max6642_data *max6642_update_device(struct device *dev)
-{
- struct max6642_data *data = dev_get_drvdata(dev);
- struct i2c_client *client = data->client;
- u16 val, tmp;
-
- mutex_lock(&data->update_lock);
-
- if (time_after(jiffies, data->last_updated + HZ) || !data->valid) {
- dev_dbg(dev, "Updating max6642 data.\n");
- val = i2c_smbus_read_byte_data(client,
- MAX6642_REG_R_LOCAL_TEMPL);
- tmp = (val >> 6) & 3;
- val = i2c_smbus_read_byte_data(client,
- MAX6642_REG_R_LOCAL_TEMP);
- val = (val << 2) | tmp;
- data->temp_input[0] = val;
- val = i2c_smbus_read_byte_data(client,
- MAX6642_REG_R_REMOTE_TEMPL);
- tmp = (val >> 6) & 3;
- val = i2c_smbus_read_byte_data(client,
- MAX6642_REG_R_REMOTE_TEMP);
- val = (val << 2) | tmp;
- data->temp_input[1] = val;
- data->alarms = i2c_smbus_read_byte_data(client,
- MAX6642_REG_R_STATUS);
-
- data->last_updated = jiffies;
- data->valid = true;
- }
-
- mutex_unlock(&data->update_lock);
-
- return data;
-}
-
-/*
- * Sysfs stuff
- */
-
-static ssize_t temp_max10_show(struct device *dev,
- struct device_attribute *dev_attr, char *buf)
-{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
- struct max6642_data *data = max6642_update_device(dev);
-
- return sprintf(buf, "%d\n",
- temp_from_reg10(data->temp_input[attr->index]));
-}
-
-static ssize_t temp_max_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct sensor_device_attribute_2 *attr2 = to_sensor_dev_attr_2(attr);
- struct max6642_data *data = max6642_update_device(dev);
-
- return sprintf(buf, "%d\n", temp_from_reg(data->temp_high[attr2->nr]));
-}
-
-static ssize_t temp_max_store(struct device *dev,
- struct device_attribute *attr, const char *buf,
- size_t count)
-{
- struct sensor_device_attribute_2 *attr2 = to_sensor_dev_attr_2(attr);
- struct max6642_data *data = dev_get_drvdata(dev);
- unsigned long val;
- int err;
-
- err = kstrtoul(buf, 10, &val);
- if (err < 0)
- return err;
-
- mutex_lock(&data->update_lock);
- data->temp_high[attr2->nr] = clamp_val(temp_to_reg(val), 0, 255);
- i2c_smbus_write_byte_data(data->client, attr2->index,
- data->temp_high[attr2->nr]);
- mutex_unlock(&data->update_lock);
- return count;
-}
-
-static ssize_t alarm_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- int bitnr = to_sensor_dev_attr(attr)->index;
- struct max6642_data *data = max6642_update_device(dev);
- return sprintf(buf, "%d\n", (data->alarms >> bitnr) & 1);
-}
-
-static SENSOR_DEVICE_ATTR_RO(temp1_input, temp_max10, 0);
-static SENSOR_DEVICE_ATTR_RO(temp2_input, temp_max10, 1);
-static SENSOR_DEVICE_ATTR_2_RW(temp1_max, temp_max, 0,
- MAX6642_REG_W_LOCAL_HIGH);
-static SENSOR_DEVICE_ATTR_2_RW(temp2_max, temp_max, 1,
- MAX6642_REG_W_REMOTE_HIGH);
-static SENSOR_DEVICE_ATTR_RO(temp2_fault, alarm, 2);
-static SENSOR_DEVICE_ATTR_RO(temp1_max_alarm, alarm, 6);
-static SENSOR_DEVICE_ATTR_RO(temp2_max_alarm, alarm, 4);
-
-static struct attribute *max6642_attrs[] = {
- &sensor_dev_attr_temp1_input.dev_attr.attr,
- &sensor_dev_attr_temp2_input.dev_attr.attr,
- &sensor_dev_attr_temp1_max.dev_attr.attr,
- &sensor_dev_attr_temp2_max.dev_attr.attr,
-
- &sensor_dev_attr_temp2_fault.dev_attr.attr,
- &sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
- &sensor_dev_attr_temp2_max_alarm.dev_attr.attr,
- NULL
-};
-ATTRIBUTE_GROUPS(max6642);
-
-static int max6642_probe(struct i2c_client *client)
-{
- struct device *dev = &client->dev;
- struct max6642_data *data;
- struct device *hwmon_dev;
-
- data = devm_kzalloc(dev, sizeof(struct max6642_data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- data->client = client;
- mutex_init(&data->update_lock);
-
- /* Initialize the MAX6642 chip */
- max6642_init_client(data, client);
-
- hwmon_dev = devm_hwmon_device_register_with_groups(&client->dev,
- client->name, data,
- max6642_groups);
- return PTR_ERR_OR_ZERO(hwmon_dev);
-}
-
-/*
- * Driver data (common to all clients)
- */
-
-static const struct i2c_device_id max6642_id[] = {
- { "max6642" },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, max6642_id);
-
-static struct i2c_driver max6642_driver = {
- .class = I2C_CLASS_HWMON,
- .driver = {
- .name = "max6642",
- },
- .probe = max6642_probe,
- .id_table = max6642_id,
- .detect = max6642_detect,
- .address_list = normal_i2c,
-};
-
-module_i2c_driver(max6642_driver);
-
-MODULE_AUTHOR("Per Dalen <per.dalen@appeartv.com>");
-MODULE_DESCRIPTION("MAX6642 sensor driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/max6697.c b/drivers/hwmon/max6697.c
index d161ba0e7813..20981f9443dd 100644
--- a/drivers/hwmon/max6697.c
+++ b/drivers/hwmon/max6697.c
@@ -311,6 +311,7 @@ static ssize_t temp_store(struct device *dev,
return ret;
mutex_lock(&data->update_lock);
+ temp = clamp_val(temp, -1000000, 1000000); /* prevent underflow */
temp = DIV_ROUND_CLOSEST(temp, 1000) + data->temp_offset;
temp = clamp_val(temp, 0, data->type == max6581 ? 255 : 127);
data->temp[nr][index] = temp;
@@ -428,14 +429,14 @@ static SENSOR_DEVICE_ATTR_RO(temp6_max_alarm, alarm, 20);
static SENSOR_DEVICE_ATTR_RO(temp7_max_alarm, alarm, 21);
static SENSOR_DEVICE_ATTR_RO(temp8_max_alarm, alarm, 23);
-static SENSOR_DEVICE_ATTR_RO(temp1_crit_alarm, alarm, 14);
+static SENSOR_DEVICE_ATTR_RO(temp1_crit_alarm, alarm, 15);
static SENSOR_DEVICE_ATTR_RO(temp2_crit_alarm, alarm, 8);
static SENSOR_DEVICE_ATTR_RO(temp3_crit_alarm, alarm, 9);
static SENSOR_DEVICE_ATTR_RO(temp4_crit_alarm, alarm, 10);
static SENSOR_DEVICE_ATTR_RO(temp5_crit_alarm, alarm, 11);
static SENSOR_DEVICE_ATTR_RO(temp6_crit_alarm, alarm, 12);
static SENSOR_DEVICE_ATTR_RO(temp7_crit_alarm, alarm, 13);
-static SENSOR_DEVICE_ATTR_RO(temp8_crit_alarm, alarm, 15);
+static SENSOR_DEVICE_ATTR_RO(temp8_crit_alarm, alarm, 14);
static SENSOR_DEVICE_ATTR_RO(temp2_fault, alarm, 1);
static SENSOR_DEVICE_ATTR_RO(temp3_fault, alarm, 2);
@@ -684,8 +685,6 @@ done:
return 0;
}
-static const struct i2c_device_id max6697_id[];
-
static int max6697_probe(struct i2c_client *client)
{
struct i2c_adapter *adapter = client->adapter;
@@ -701,10 +700,7 @@ static int max6697_probe(struct i2c_client *client)
if (!data)
return -ENOMEM;
- if (client->dev.of_node)
- data->type = (uintptr_t)of_device_get_match_data(&client->dev);
- else
- data->type = i2c_match_id(max6697_id, client)->driver_data;
+ data->type = (uintptr_t)i2c_get_match_data(client);
data->chip = &max6697_chip_data[data->type];
data->client = client;
mutex_init(&data->update_lock);
diff --git a/drivers/hwmon/mcp3021.c b/drivers/hwmon/mcp3021.c
index 9814eaf24564..bcddf6804d3a 100644
--- a/drivers/hwmon/mcp3021.c
+++ b/drivers/hwmon/mcp3021.c
@@ -116,13 +116,12 @@ static const struct hwmon_chip_info mcp3021_chip_info = {
.info = mcp3021_info,
};
-static const struct i2c_device_id mcp3021_id[];
-
static int mcp3021_probe(struct i2c_client *client)
{
struct mcp3021_data *data = NULL;
struct device_node *np = client->dev.of_node;
struct device *hwmon_dev;
+ enum chips type;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
return -ENODEV;
@@ -149,7 +148,8 @@ static int mcp3021_probe(struct i2c_client *client)
data->vdd = MCP3021_VDD_REF_DEFAULT;
}
- switch (i2c_match_id(mcp3021_id, client)->driver_data) {
+ type = (uintptr_t)i2c_get_match_data(client);
+ switch (type) {
case mcp3021:
data->sar_shift = MCP3021_SAR_SHIFT;
data->sar_mask = MCP3021_SAR_MASK;
diff --git a/drivers/hwmon/mr75203.c b/drivers/hwmon/mr75203.c
index 50a8b9c3f94d..7848198f8996 100644
--- a/drivers/hwmon/mr75203.c
+++ b/drivers/hwmon/mr75203.c
@@ -925,4 +925,5 @@ static struct platform_driver moortec_pvt_driver = {
};
module_platform_driver(moortec_pvt_driver);
+MODULE_DESCRIPTION("Moortec Semiconductor MR75203 PVT Controller driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/hwmon/nct6683.c b/drivers/hwmon/nct6683.c
index 0d016fedb9c2..f71615e06a8f 100644
--- a/drivers/hwmon/nct6683.c
+++ b/drivers/hwmon/nct6683.c
@@ -1236,6 +1236,8 @@ static int nct6683_probe(struct platform_device *pdev)
default:
if (!force)
return -ENODEV;
+ dev_warn(dev, "Enabling support for unknown customer ID 0x%04x\n", data->customer_id);
+ break;
}
nct6683_init_device(data);
diff --git a/drivers/hwmon/nct6775-core.c b/drivers/hwmon/nct6775-core.c
index 9fbab8f02334..934fed3dd586 100644
--- a/drivers/hwmon/nct6775-core.c
+++ b/drivers/hwmon/nct6775-core.c
@@ -2262,7 +2262,7 @@ store_temp_offset(struct device *dev, struct device_attribute *attr,
if (err < 0)
return err;
- val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), -128, 127);
+ val = DIV_ROUND_CLOSEST(clamp_val(val, -128000, 127000), 1000);
mutex_lock(&data->update_lock);
data->temp_offset[nr] = val;
diff --git a/drivers/hwmon/nct6775.h b/drivers/hwmon/nct6775.h
index d31e7a030216..296eff99d003 100644
--- a/drivers/hwmon/nct6775.h
+++ b/drivers/hwmon/nct6775.h
@@ -4,7 +4,7 @@
#include <linux/types.h>
-enum kinds { nct6106 = 1, nct6116, nct6775, nct6776, nct6779, nct6791, nct6792,
+enum kinds { nct6106, nct6116, nct6775, nct6776, nct6779, nct6791, nct6792,
nct6793, nct6795, nct6796, nct6797, nct6798, nct6799 };
enum pwm_enable { off, manual, thermal_cruise, speed_cruise, sf3, sf4 };
diff --git a/drivers/hwmon/nzxt-smart2.c b/drivers/hwmon/nzxt-smart2.c
index 7aa586eb74be..df6fa72a6b59 100644
--- a/drivers/hwmon/nzxt-smart2.c
+++ b/drivers/hwmon/nzxt-smart2.c
@@ -799,6 +799,7 @@ static const struct hid_device_id nzxt_smart2_hid_id_table[] = {
{ HID_USB_DEVICE(0x1e71, 0x2010) }, /* NZXT RGB & Fan Controller */
{ HID_USB_DEVICE(0x1e71, 0x2011) }, /* NZXT RGB & Fan Controller (6 RGB) */
{ HID_USB_DEVICE(0x1e71, 0x2019) }, /* NZXT RGB & Fan Controller (6 RGB) */
+ { HID_USB_DEVICE(0x1e71, 0x2020) }, /* NZXT RGB & Fan Controller (6 RGB) */
{},
};
diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig
index 08e82c457356..a4f02cad92fd 100644
--- a/drivers/hwmon/pmbus/Kconfig
+++ b/drivers/hwmon/pmbus/Kconfig
@@ -337,6 +337,15 @@ config SENSORS_MP2888
This driver can also be built as a module. If so, the module will
be called mp2888.
+config SENSORS_MP2891
+ tristate "MPS MP2891"
+ help
+ If you say yes here you get hardware monitoring support for MPS
+ MP2891 Dual Loop Digital Multi-Phase Controller.
+
+ This driver can also be built as a module. If so, the module will
+ be called mp2891.
+
config SENSORS_MP2975
tristate "MPS MP2975"
help
@@ -346,6 +355,15 @@ config SENSORS_MP2975
This driver can also be built as a module. If so, the module will
be called mp2975.
+config SENSORS_MP2993
+ tristate "MPS MP2993"
+ help
+ If you say yes here you get hardware monitoring support for MPS
+ MP2993 Dual Loop Digital Multi-Phase Controller.
+
+ This driver can also be built as a module. If so, the module will
+ be called mp2993.
+
config SENSORS_MP2975_REGULATOR
depends on SENSORS_MP2975 && REGULATOR
bool "Regulator support for MPS MP2975"
@@ -362,6 +380,15 @@ config SENSORS_MP5023
This driver can also be built as a module. If so, the module will
be called mp5023.
+config SENSORS_MP5920
+ tristate "MPS MP5920"
+ help
+ If you say yes here you get hardware monitoring support for Monolithic
+ MP5920.
+
+ This driver can also be built as a module. If so, the module will
+ be called mp5920.
+
config SENSORS_MP5990
tristate "MPS MP5990"
help
@@ -371,6 +398,15 @@ config SENSORS_MP5990
This driver can also be built as a module. If so, the module will
be called mp5990.
+config SENSORS_MP9941
+ tristate "MPS MP9941"
+ help
+ If you say yes here you get hardware monitoring support for MPS
+ MP9941.
+
+ This driver can also be built as a module. If so, the module will
+ be called mp9941.
+
config SENSORS_MPQ7932_REGULATOR
bool "Regulator support for MPQ7932"
depends on SENSORS_MPQ7932 && REGULATOR
diff --git a/drivers/hwmon/pmbus/Makefile b/drivers/hwmon/pmbus/Makefile
index 2279b3327bbf..d00bcc758b97 100644
--- a/drivers/hwmon/pmbus/Makefile
+++ b/drivers/hwmon/pmbus/Makefile
@@ -36,9 +36,13 @@ obj-$(CONFIG_SENSORS_MAX34440) += max34440.o
obj-$(CONFIG_SENSORS_MAX8688) += max8688.o
obj-$(CONFIG_SENSORS_MP2856) += mp2856.o
obj-$(CONFIG_SENSORS_MP2888) += mp2888.o
+obj-$(CONFIG_SENSORS_MP2891) += mp2891.o
obj-$(CONFIG_SENSORS_MP2975) += mp2975.o
+obj-$(CONFIG_SENSORS_MP2993) += mp2993.o
obj-$(CONFIG_SENSORS_MP5023) += mp5023.o
+obj-$(CONFIG_SENSORS_MP5920) += mp5920.o
obj-$(CONFIG_SENSORS_MP5990) += mp5990.o
+obj-$(CONFIG_SENSORS_MP9941) += mp9941.o
obj-$(CONFIG_SENSORS_MPQ7932) += mpq7932.o
obj-$(CONFIG_SENSORS_MPQ8785) += mpq8785.o
obj-$(CONFIG_SENSORS_PLI1209BC) += pli1209bc.o
diff --git a/drivers/hwmon/pmbus/lm25066.c b/drivers/hwmon/pmbus/lm25066.c
index cfffa4cdc0df..c36c124d1a2d 100644
--- a/drivers/hwmon/pmbus/lm25066.c
+++ b/drivers/hwmon/pmbus/lm25066.c
@@ -17,7 +17,7 @@
#include <linux/of.h>
#include "pmbus.h"
-enum chips { lm25056 = 1, lm25066, lm5064, lm5066, lm5066i };
+enum chips { lm25056, lm25066, lm5064, lm5066, lm5066i };
#define LM25066_READ_VAUX 0xd0
#define LM25066_MFR_READ_IIN 0xd1
diff --git a/drivers/hwmon/pmbus/ltc4286.c b/drivers/hwmon/pmbus/ltc4286.c
index 9e7ceeb7e789..aabd0bcdfeee 100644
--- a/drivers/hwmon/pmbus/ltc4286.c
+++ b/drivers/hwmon/pmbus/ltc4286.c
@@ -58,8 +58,8 @@ static struct pmbus_driver_info ltc4286_info = {
};
static const struct i2c_device_id ltc4286_id[] = {
- { "ltc4286", 0 },
- { "ltc4287", 1 },
+ { "ltc4286", },
+ { "ltc4287", },
{}
};
MODULE_DEVICE_TABLE(i2c, ltc4286_id);
diff --git a/drivers/hwmon/pmbus/mp2856.c b/drivers/hwmon/pmbus/mp2856.c
index 6969350f5d7d..41bb86667091 100644
--- a/drivers/hwmon/pmbus/mp2856.c
+++ b/drivers/hwmon/pmbus/mp2856.c
@@ -46,7 +46,7 @@
#define MP2856_PAGE_NUM 2
-enum chips { mp2856 = 1, mp2857 };
+enum chips { mp2856, mp2857 };
static const int mp2856_max_phases[][MP2856_PAGE_NUM] = {
[mp2856] = { MP2856_MAX_PHASE_RAIL1, MP2856_MAX_PHASE_RAIL2 },
@@ -66,7 +66,6 @@ struct mp2856_data {
int vout_format[MP2856_PAGE_NUM];
int curr_sense_gain[MP2856_PAGE_NUM];
int max_phases[MP2856_PAGE_NUM];
- enum chips chip_id;
};
#define to_mp2856_data(x) container_of(x, struct mp2856_data, info)
@@ -397,6 +396,7 @@ static int mp2856_probe(struct i2c_client *client)
{
struct pmbus_driver_info *info;
struct mp2856_data *data;
+ enum chips chip_id;
int ret;
data = devm_kzalloc(&client->dev, sizeof(struct mp2856_data),
@@ -404,9 +404,9 @@ static int mp2856_probe(struct i2c_client *client)
if (!data)
return -ENOMEM;
- data->chip_id = (enum chips)(uintptr_t)i2c_get_match_data(client);
+ chip_id = (kernel_ulong_t)i2c_get_match_data(client);
- memcpy(data->max_phases, mp2856_max_phases[data->chip_id],
+ memcpy(data->max_phases, mp2856_max_phases[chip_id],
sizeof(data->max_phases));
memcpy(&data->info, &mp2856_info, sizeof(*info));
diff --git a/drivers/hwmon/pmbus/mp2891.c b/drivers/hwmon/pmbus/mp2891.c
new file mode 100644
index 000000000000..bb28b15a9103
--- /dev/null
+++ b/drivers/hwmon/pmbus/mp2891.c
@@ -0,0 +1,600 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Hardware monitoring driver for MPS Multi-phase Digital VR Controllers(MP2891)
+ */
+
+#include <linux/bitfield.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include "pmbus.h"
+
+/*
+ * Vender specific registers, the register MFR_SVI3_IOUT_PRT(0x65),
+ * MFR_VOUT_LOOP_CTRL(0xBD), READ_PIN_EST(0x94)and READ_IIN_EST(0x95)
+ * redefine the standard PMBUS register. The MFR_SVI3_IOUT_PRT(0x65)
+ * is used to identify the iout scale and the MFR_VOUT_LOOP_CTRL(0xBD)
+ * is used to identify the vout scale. The READ_PIN_EST(0x94) is used
+ * to read input power per rail. The MP2891 does not have standard
+ * READ_IIN register(0x89), the iin telemetry can be obtained through
+ * the vendor redefined register READ_IIN_EST(0x95).
+ */
+#define MFR_VOUT_LOOP_CTRL 0xBD
+#define READ_PIN_EST 0x94
+#define READ_IIN_EST 0x95
+#define MFR_SVI3_IOUT_PRT 0x65
+
+#define MP2891_TEMP_LIMIT_OFFSET 40
+#define MP2891_PIN_LIMIT_UINT 2
+#define MP2891_IOUT_LIMIT_UINT 8
+#define MP2891_IOUT_SCALE_DIV 32
+#define MP2891_VOUT_SCALE_DIV 100
+#define MP2891_OVUV_DELTA_SCALE 50
+#define MP2891_OV_LIMIT_SCALE 20
+#define MP2891_UV_LIMIT_SCALE 5
+
+#define MP2891_PAGE_NUM 2
+
+#define MP2891_RAIL1_FUNC (PMBUS_HAVE_VIN | PMBUS_HAVE_VOUT | \
+ PMBUS_HAVE_IOUT | PMBUS_HAVE_TEMP | \
+ PMBUS_HAVE_POUT | PMBUS_HAVE_PIN | \
+ PMBUS_HAVE_IIN | PMBUS_HAVE_STATUS_VOUT | \
+ PMBUS_HAVE_STATUS_IOUT | \
+ PMBUS_HAVE_STATUS_INPUT | \
+ PMBUS_HAVE_STATUS_TEMP)
+
+#define MP2891_RAIL2_FUNC (PMBUS_HAVE_VOUT | PMBUS_HAVE_IOUT | \
+ PMBUS_HAVE_TEMP | PMBUS_HAVE_POUT | \
+ PMBUS_HAVE_PIN | PMBUS_HAVE_IIN | \
+ PMBUS_HAVE_STATUS_VOUT | \
+ PMBUS_HAVE_STATUS_IOUT | \
+ PMBUS_HAVE_STATUS_INPUT | \
+ PMBUS_HAVE_STATUS_TEMP)
+
+struct mp2891_data {
+ struct pmbus_driver_info info;
+ int vout_scale[MP2891_PAGE_NUM];
+ int iout_scale[MP2891_PAGE_NUM];
+};
+
+#define to_mp2891_data(x) container_of(x, struct mp2891_data, info)
+
+/* Converts a LINEAR11 value to DIRECT format */
+static u16 mp2891_reg2data_linear11(u16 word)
+{
+ s16 exponent;
+ s32 mantissa;
+ s64 val;
+
+ exponent = ((s16)word) >> 11;
+ mantissa = ((s16)((word & 0x7ff) << 5)) >> 5;
+ val = mantissa;
+
+ if (exponent >= 0)
+ val <<= exponent;
+ else
+ val >>= -exponent;
+
+ return val;
+}
+
+static int
+mp2891_identify_vout_scale(struct i2c_client *client, struct pmbus_driver_info *info,
+ int page)
+{
+ struct mp2891_data *data = to_mp2891_data(info);
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(client, PMBUS_PAGE, page);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_read_word_data(client, MFR_VOUT_LOOP_CTRL);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * The output voltage is equal to the READ_VOUT(0x8B) register value multiplied
+ * by vout_scale.
+ * Obtain vout scale from the register MFR_VOUT_LOOP_CTRL, bits 15-14,bit 13.
+ * If MFR_VOUT_LOOP_CTRL[13] = 1, the vout scale is below:
+ * 2.5mV/LSB
+ * If MFR_VOUT_LOOP_CTRL[13] = 0, the vout scale is decided by
+ * MFR_VOUT_LOOP_CTRL[15:14]:
+ * 00b - 6.25mV/LSB, 01b - 5mV/LSB, 10b - 2mV/LSB, 11b - 1mV
+ */
+ if (ret & GENMASK(13, 13)) {
+ data->vout_scale[page] = 250;
+ } else {
+ ret = FIELD_GET(GENMASK(15, 14), ret);
+ if (ret == 0)
+ data->vout_scale[page] = 625;
+ else if (ret == 1)
+ data->vout_scale[page] = 500;
+ else if (ret == 2)
+ data->vout_scale[page] = 200;
+ else
+ data->vout_scale[page] = 100;
+ }
+
+ return 0;
+}
+
+static int
+mp2891_identify_iout_scale(struct i2c_client *client, struct pmbus_driver_info *info,
+ int page)
+{
+ struct mp2891_data *data = to_mp2891_data(info);
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(client, PMBUS_PAGE, page);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_read_word_data(client, MFR_SVI3_IOUT_PRT);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * The output current is equal to the READ_IOUT(0x8C) register value
+ * multiplied by iout_scale.
+ * Obtain iout_scale from the register MFR_SVI3_IOUT_PRT[2:0].
+ * The value is selected as below:
+ * 000b - 1A/LSB, 001b - (1/32)A/LSB, 010b - (1/16)A/LSB,
+ * 011b - (1/8)A/LSB, 100b - (1/4)A/LSB, 101b - (1/2)A/LSB
+ * 110b - 1A/LSB, 111b - 2A/LSB
+ */
+ switch (ret & GENMASK(2, 0)) {
+ case 0:
+ case 6:
+ data->iout_scale[page] = 32;
+ break;
+ case 1:
+ data->iout_scale[page] = 1;
+ break;
+ case 2:
+ data->iout_scale[page] = 2;
+ break;
+ case 3:
+ data->iout_scale[page] = 4;
+ break;
+ case 4:
+ data->iout_scale[page] = 8;
+ break;
+ case 5:
+ data->iout_scale[page] = 16;
+ break;
+ default:
+ data->iout_scale[page] = 64;
+ break;
+ }
+
+ return 0;
+}
+
+static int mp2891_identify(struct i2c_client *client, struct pmbus_driver_info *info)
+{
+ int ret;
+
+ /* Identify vout scale for rail 1. */
+ ret = mp2891_identify_vout_scale(client, info, 0);
+ if (ret < 0)
+ return ret;
+
+ /* Identify vout scale for rail 2. */
+ ret = mp2891_identify_vout_scale(client, info, 1);
+ if (ret < 0)
+ return ret;
+
+ /* Identify iout scale for rail 1. */
+ ret = mp2891_identify_iout_scale(client, info, 0);
+ if (ret < 0)
+ return ret;
+
+ /* Identify iout scale for rail 2. */
+ return mp2891_identify_iout_scale(client, info, 1);
+}
+
+static int mp2891_read_byte_data(struct i2c_client *client, int page, int reg)
+{
+ int ret;
+
+ switch (reg) {
+ case PMBUS_VOUT_MODE:
+ /*
+ * The MP2891 does not follow standard PMBus protocol completely, the
+ * PMBUS_VOUT_MODE(0x20) in MP2891 is reserved and 0x00 is always
+ * returned when the register is read. But the calculation of vout in
+ * this driver is based on direct format. As a result, the format of
+ * vout is enforced to direct.
+ */
+ ret = PB_VOUT_MODE_DIRECT;
+ break;
+ default:
+ ret = -ENODATA;
+ break;
+ }
+
+ return ret;
+}
+
+static int mp2891_read_word_data(struct i2c_client *client, int page,
+ int phase, int reg)
+{
+ const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
+ struct mp2891_data *data = to_mp2891_data(info);
+ int ret;
+
+ switch (reg) {
+ case PMBUS_READ_VIN:
+ ret = pmbus_read_word_data(client, page, phase, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = ret & GENMASK(9, 0);
+ break;
+ case PMBUS_READ_IIN:
+ /*
+ * The MP2891 does not have standard PMBUS_READ_IIN register(0x89),
+ * the iin telemetry can be obtained through the vender redefined
+ * register READ_IIN_EST(0x95). The MP2891 PMBUS_READ_IIN register
+ * is linear11 format, But the pout scale is set to 1A/Lsb(using
+ * r/m/b scale). As a result, the iin read from MP2891 should be
+ * calculated to A, then return the result to pmbus core.
+ */
+ ret = pmbus_read_word_data(client, page, phase, READ_IIN_EST);
+ if (ret < 0)
+ return ret;
+
+ ret = mp2891_reg2data_linear11(ret);
+ break;
+ case PMBUS_READ_PIN:
+ /*
+ * The MP2891 has standard PMBUS_READ_PIN register(0x97), but this
+ * is not used to read the input power per rail. The input power
+ * per rail is read through the vender redefined register
+ * READ_PIN_EST(0x94). The MP2891 PMBUS_READ_PIN register is linear11
+ * format, But the pout scale is set to 1W/Lsb(using r/m/b scale).
+ * As a result, the pin read from MP2891 should be calculated to W,
+ * then return the result to pmbus core.
+ */
+ ret = pmbus_read_word_data(client, page, phase, READ_PIN_EST);
+ if (ret < 0)
+ return ret;
+
+ ret = mp2891_reg2data_linear11(ret);
+ break;
+ case PMBUS_READ_POUT:
+ /*
+ * The MP2891 PMBUS_READ_POUT register is linear11 format, and the
+ * exponent is not a constant value. But the pout scale is set to
+ * 1W/Lsb(using r/m/b scale). As a result, the pout read from MP2891
+ * should be calculated to W, then return the result to pmbus core.
+ */
+ ret = pmbus_read_word_data(client, page, phase, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = mp2891_reg2data_linear11(ret);
+ break;
+ case PMBUS_READ_VOUT:
+ case PMBUS_VOUT_UV_WARN_LIMIT:
+ ret = pmbus_read_word_data(client, page, phase, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = DIV_ROUND_CLOSEST(ret * data->vout_scale[page], MP2891_VOUT_SCALE_DIV);
+ break;
+ case PMBUS_READ_IOUT:
+ ret = pmbus_read_word_data(client, page, phase, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = DIV_ROUND_CLOSEST((ret & GENMASK(10, 0)) * data->iout_scale[page],
+ MP2891_IOUT_SCALE_DIV);
+ break;
+ case PMBUS_OT_FAULT_LIMIT:
+ case PMBUS_OT_WARN_LIMIT:
+ /*
+ * The scale of MP2891 PMBUS_OT_FAULT_LIMIT and PMBUS_OT_WARN_LIMIT
+ * is 1°C/LSB and they have 40°C offset.
+ */
+ ret = pmbus_read_word_data(client, page, phase, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = (ret & GENMASK(7, 0)) - MP2891_TEMP_LIMIT_OFFSET;
+ break;
+ case PMBUS_VIN_OV_FAULT_LIMIT:
+ /*
+ * The MP2891 PMBUS_VIN_OV_FAULT_LIMIT scale is 125mV/Lsb.
+ * but the vin scale is set to 31.25mV/Lsb(using r/m/b scale).
+ * As a result, the limit value should be multiplied by 4.
+ */
+ ret = pmbus_read_word_data(client, page, phase, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = (ret & GENMASK(7, 0)) * 4;
+ break;
+ case PMBUS_VOUT_UV_FAULT_LIMIT:
+ ret = pmbus_read_word_data(client, page, phase, reg);
+ if (ret < 0)
+ return ret;
+
+ if (FIELD_GET(GENMASK(11, 8), ret))
+ ret = FIELD_GET(GENMASK(7, 0), ret) * MP2891_UV_LIMIT_SCALE -
+ (FIELD_GET(GENMASK(11, 8), ret) + 1) * MP2891_OVUV_DELTA_SCALE;
+ else
+ ret = FIELD_GET(GENMASK(7, 0), ret) * MP2891_UV_LIMIT_SCALE;
+
+ ret = ret < 0 ? 0 : ret;
+ break;
+ case PMBUS_VOUT_OV_FAULT_LIMIT:
+ ret = pmbus_read_word_data(client, page, phase, reg);
+ if (ret < 0)
+ return ret;
+
+ if (FIELD_GET(GENMASK(11, 8), ret))
+ ret = FIELD_GET(GENMASK(7, 0), ret) * MP2891_OV_LIMIT_SCALE +
+ (FIELD_GET(GENMASK(11, 8), ret) + 1) * MP2891_OVUV_DELTA_SCALE;
+ else
+ ret = FIELD_GET(GENMASK(7, 0), ret) * MP2891_OV_LIMIT_SCALE;
+ break;
+ case PMBUS_IOUT_OC_WARN_LIMIT:
+ case PMBUS_IOUT_OC_FAULT_LIMIT:
+ ret = pmbus_read_word_data(client, page, phase, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = DIV_ROUND_CLOSEST((ret & GENMASK(7, 0)) * data->iout_scale[page] *
+ MP2891_IOUT_LIMIT_UINT, MP2891_IOUT_SCALE_DIV);
+ break;
+ case PMBUS_IIN_OC_WARN_LIMIT:
+ /*
+ * The scale of PMBUS_IIN_OC_WARN_LIMIT is 0.5A/Lsb, but the iin scale
+ * is set to 1A/Lsb(using r/m/b scale), so the word data should be
+ * divided by 2.
+ */
+ ret = pmbus_read_word_data(client, 0, phase, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = DIV_ROUND_CLOSEST((ret & GENMASK(9, 0)), 2);
+ break;
+ case PMBUS_PIN_OP_WARN_LIMIT:
+ /*
+ * The scale of PMBUS_PIN_OP_WARN_LIMIT is 2W/Lsb, but the pin scale
+ * is set to 1W/Lsb(using r/m/b scale), so the word data should be
+ * multiplied by 2.
+ */
+ ret = pmbus_read_word_data(client, 0, phase, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = (ret & GENMASK(9, 0)) * MP2891_PIN_LIMIT_UINT;
+ break;
+ case PMBUS_READ_TEMPERATURE_1:
+ case PMBUS_VIN_UV_FAULT_LIMIT:
+ case PMBUS_VIN_UV_WARN_LIMIT:
+ ret = -ENODATA;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int mp2891_write_word_data(struct i2c_client *client, int page, int reg,
+ u16 word)
+{
+ const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
+ struct mp2891_data *data = to_mp2891_data(info);
+ int ret;
+
+ switch (reg) {
+ case PMBUS_VOUT_UV_WARN_LIMIT:
+ ret = pmbus_write_word_data(client, page, reg,
+ DIV_ROUND_CLOSEST(word * MP2891_VOUT_SCALE_DIV,
+ data->vout_scale[page]));
+ break;
+ case PMBUS_VOUT_UV_FAULT_LIMIT:
+ /*
+ * The PMBUS_VOUT_UV_FAULT_LIMIT[7:0] is the limit value, and bit8-bit15
+ * should not be changed.
+ */
+ ret = pmbus_read_word_data(client, page, 0xff, reg);
+ if (ret < 0)
+ return ret;
+
+ if (FIELD_GET(GENMASK(11, 8), ret))
+ ret = pmbus_write_word_data(client, page, reg,
+ (ret & ~GENMASK(7, 0)) |
+ FIELD_PREP(GENMASK(7, 0),
+ DIV_ROUND_CLOSEST(word +
+ (FIELD_GET(GENMASK(11, 8), ret) + 1) *
+ MP2891_OVUV_DELTA_SCALE,
+ MP2891_UV_LIMIT_SCALE)));
+ else
+ ret = pmbus_write_word_data(client, page, reg,
+ (ret & ~GENMASK(7, 0)) |
+ FIELD_PREP(GENMASK(7, 0),
+ DIV_ROUND_CLOSEST(word,
+ MP2891_UV_LIMIT_SCALE)));
+ break;
+ case PMBUS_VOUT_OV_FAULT_LIMIT:
+ /*
+ * The PMBUS_VOUT_OV_FAULT_LIMIT[7:0] is the limit value, and bit8-bit15
+ * should not be changed.
+ */
+ ret = pmbus_read_word_data(client, page, 0xff, reg);
+ if (ret < 0)
+ return ret;
+
+ if (FIELD_GET(GENMASK(11, 8), ret))
+ ret = pmbus_write_word_data(client, page, reg,
+ (ret & ~GENMASK(7, 0)) |
+ FIELD_PREP(GENMASK(7, 0),
+ DIV_ROUND_CLOSEST(word -
+ (FIELD_GET(GENMASK(11, 8), ret) + 1) *
+ MP2891_OVUV_DELTA_SCALE,
+ MP2891_OV_LIMIT_SCALE)));
+ else
+ ret = pmbus_write_word_data(client, page, reg,
+ (ret & ~GENMASK(7, 0)) |
+ FIELD_PREP(GENMASK(7, 0),
+ DIV_ROUND_CLOSEST(word,
+ MP2891_OV_LIMIT_SCALE)));
+ break;
+ case PMBUS_VIN_OV_FAULT_LIMIT:
+ /*
+ * The PMBUS_VIN_OV_FAULT_LIMIT[7:0] is the limit value, and bit8-bit15
+ * should not be changed. The scale of PMBUS_VIN_OV_FAULT_LIMIT is 125mV/Lsb,
+ * but the vin scale is set to 31.25mV/Lsb(using r/m/b scale), so the word data
+ * should be divided by 4.
+ */
+ ret = pmbus_read_word_data(client, page, 0xff, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = pmbus_write_word_data(client, page, reg,
+ (ret & ~GENMASK(7, 0)) |
+ FIELD_PREP(GENMASK(7, 0),
+ DIV_ROUND_CLOSEST(word, 4)));
+ break;
+ case PMBUS_OT_FAULT_LIMIT:
+ case PMBUS_OT_WARN_LIMIT:
+ /*
+ * The scale of MP2891 PMBUS_OT_FAULT_LIMIT and PMBUS_OT_WARN_LIMIT
+ * have 40°C offset. The bit0-bit7 is the limit value, and bit8-bit15
+ * should not be changed.
+ */
+ ret = pmbus_read_word_data(client, page, 0xff, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = pmbus_write_word_data(client, page, reg,
+ (ret & ~GENMASK(7, 0)) |
+ FIELD_PREP(GENMASK(7, 0), word + MP2891_TEMP_LIMIT_OFFSET));
+ break;
+ case PMBUS_IOUT_OC_WARN_LIMIT:
+ case PMBUS_IOUT_OC_FAULT_LIMIT:
+ ret = pmbus_write_word_data(client, page, reg,
+ DIV_ROUND_CLOSEST(word * MP2891_IOUT_SCALE_DIV,
+ MP2891_IOUT_LIMIT_UINT *
+ data->iout_scale[page]));
+ break;
+ case PMBUS_IIN_OC_WARN_LIMIT:
+ /*
+ * The scale of PMBUS_IIN_OC_WARN_LIMIT is 0.5A/Lsb, but the iin scale
+ * is set to 1A/Lsb(using r/m/b scale), so the word data should be
+ * multiplied by 2.
+ */
+ ret = pmbus_write_word_data(client, page, reg, word * 2);
+ break;
+ case PMBUS_PIN_OP_WARN_LIMIT:
+ /*
+ * The scale of PMBUS_PIN_OP_WARN_LIMIT is 2W/Lsb, but the pin scale
+ * is set to 1W/Lsb(using r/m/b scale), so the word data should be
+ * divided by 2.
+ */
+ ret = pmbus_write_word_data(client, page, reg,
+ DIV_ROUND_CLOSEST(word, MP2891_PIN_LIMIT_UINT));
+ break;
+ case PMBUS_VIN_UV_FAULT_LIMIT:
+ case PMBUS_VIN_UV_WARN_LIMIT:
+ ret = -ENODATA;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static const struct pmbus_driver_info mp2891_info = {
+ .pages = MP2891_PAGE_NUM,
+ .format[PSC_VOLTAGE_IN] = direct,
+ .format[PSC_CURRENT_IN] = direct,
+ .format[PSC_CURRENT_OUT] = direct,
+ .format[PSC_TEMPERATURE] = direct,
+ .format[PSC_POWER] = direct,
+ .format[PSC_VOLTAGE_OUT] = direct,
+
+ /* set vin scale 31.25mV/Lsb */
+ .m[PSC_VOLTAGE_IN] = 32,
+ .R[PSC_VOLTAGE_IN] = 0,
+ .b[PSC_VOLTAGE_IN] = 0,
+
+ /* set temp scale 1000m°C/Lsb */
+ .m[PSC_TEMPERATURE] = 1,
+ .R[PSC_TEMPERATURE] = 0,
+ .b[PSC_TEMPERATURE] = 0,
+
+ .m[PSC_CURRENT_IN] = 1,
+ .R[PSC_CURRENT_IN] = 0,
+ .b[PSC_CURRENT_IN] = 0,
+
+ .m[PSC_CURRENT_OUT] = 1,
+ .R[PSC_CURRENT_OUT] = 0,
+ .b[PSC_CURRENT_OUT] = 0,
+
+ .m[PSC_POWER] = 1,
+ .R[PSC_POWER] = 0,
+ .b[PSC_POWER] = 0,
+
+ .m[PSC_VOLTAGE_OUT] = 1,
+ .R[PSC_VOLTAGE_OUT] = 3,
+ .b[PSC_VOLTAGE_OUT] = 0,
+
+ .func[0] = MP2891_RAIL1_FUNC,
+ .func[1] = MP2891_RAIL2_FUNC,
+ .read_word_data = mp2891_read_word_data,
+ .write_word_data = mp2891_write_word_data,
+ .read_byte_data = mp2891_read_byte_data,
+ .identify = mp2891_identify,
+};
+
+static int mp2891_probe(struct i2c_client *client)
+{
+ struct mp2891_data *data;
+
+ data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ memcpy(&data->info, &mp2891_info, sizeof(mp2891_info));
+
+ return pmbus_do_probe(client, &data->info);
+}
+
+static const struct i2c_device_id mp2891_id[] = {
+ {"mp2891", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, mp2891_id);
+
+static const struct of_device_id __maybe_unused mp2891_of_match[] = {
+ {.compatible = "mps,mp2891"},
+ {}
+};
+MODULE_DEVICE_TABLE(of, mp2891_of_match);
+
+static struct i2c_driver mp2891_driver = {
+ .driver = {
+ .name = "mp2891",
+ .of_match_table = mp2891_of_match,
+ },
+ .probe = mp2891_probe,
+ .id_table = mp2891_id,
+};
+
+module_i2c_driver(mp2891_driver);
+
+MODULE_AUTHOR("Noah Wang <noahwang.wang@outlook.com>");
+MODULE_DESCRIPTION("PMBus driver for MPS MP2891");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(PMBUS);
diff --git a/drivers/hwmon/pmbus/mp2993.c b/drivers/hwmon/pmbus/mp2993.c
new file mode 100644
index 000000000000..944593e13231
--- /dev/null
+++ b/drivers/hwmon/pmbus/mp2993.c
@@ -0,0 +1,261 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Hardware monitoring driver for MPS Multi-phase Digital VR Controllers(MP2993)
+ */
+
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include "pmbus.h"
+
+#define MP2993_VOUT_OVUV_UINT 125
+#define MP2993_VOUT_OVUV_DIV 64
+#define MP2993_VIN_LIMIT_UINT 1
+#define MP2993_VIN_LIMIT_DIV 8
+#define MP2993_READ_VIN_UINT 1
+#define MP2993_READ_VIN_DIV 32
+
+#define MP2993_PAGE_NUM 2
+
+#define MP2993_RAIL1_FUNC (PMBUS_HAVE_VIN | PMBUS_HAVE_VOUT | \
+ PMBUS_HAVE_IOUT | PMBUS_HAVE_POUT | \
+ PMBUS_HAVE_TEMP | PMBUS_HAVE_PIN | \
+ PMBUS_HAVE_IIN | \
+ PMBUS_HAVE_STATUS_VOUT | \
+ PMBUS_HAVE_STATUS_IOUT | \
+ PMBUS_HAVE_STATUS_TEMP | \
+ PMBUS_HAVE_STATUS_INPUT)
+
+#define MP2993_RAIL2_FUNC (PMBUS_HAVE_VOUT | PMBUS_HAVE_IOUT | \
+ PMBUS_HAVE_POUT | PMBUS_HAVE_TEMP | \
+ PMBUS_HAVE_STATUS_VOUT | \
+ PMBUS_HAVE_STATUS_IOUT | \
+ PMBUS_HAVE_STATUS_TEMP | \
+ PMBUS_HAVE_STATUS_INPUT)
+
+/* Converts a linear11 data exponent to a specified value */
+static u16 mp2993_linear11_exponent_transfer(u16 word, u16 expect_exponent)
+{
+ s16 exponent, mantissa, target_exponent;
+
+ exponent = ((s16)word) >> 11;
+ mantissa = ((s16)((word & 0x7ff) << 5)) >> 5;
+ target_exponent = (s16)((expect_exponent & 0x1f) << 11) >> 11;
+
+ if (exponent > target_exponent)
+ mantissa = mantissa << (exponent - target_exponent);
+ else
+ mantissa = mantissa >> (target_exponent - exponent);
+
+ return (mantissa & 0x7ff) | ((expect_exponent << 11) & 0xf800);
+}
+
+static int
+mp2993_set_vout_format(struct i2c_client *client, int page, int format)
+{
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(client, PMBUS_PAGE, page);
+ if (ret < 0)
+ return ret;
+
+ return i2c_smbus_write_byte_data(client, PMBUS_VOUT_MODE, format);
+}
+
+static int mp2993_identify(struct i2c_client *client, struct pmbus_driver_info *info)
+{
+ int ret;
+
+ /* Set vout to direct format for rail1. */
+ ret = mp2993_set_vout_format(client, 0, PB_VOUT_MODE_DIRECT);
+ if (ret < 0)
+ return ret;
+
+ /* Set vout to direct format for rail2. */
+ return mp2993_set_vout_format(client, 1, PB_VOUT_MODE_DIRECT);
+}
+
+static int mp2993_read_word_data(struct i2c_client *client, int page, int phase,
+ int reg)
+{
+ int ret;
+
+ switch (reg) {
+ case PMBUS_VOUT_OV_FAULT_LIMIT:
+ case PMBUS_VOUT_UV_FAULT_LIMIT:
+ ret = pmbus_read_word_data(client, page, phase, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = DIV_ROUND_CLOSEST(ret * MP2993_VOUT_OVUV_UINT, MP2993_VOUT_OVUV_DIV);
+ break;
+ case PMBUS_OT_FAULT_LIMIT:
+ case PMBUS_OT_WARN_LIMIT:
+ /*
+ * The MP2993 ot fault limit value and ot warn limit value
+ * per rail are always the same, so only PMBUS_OT_FAULT_LIMIT
+ * and PMBUS_OT_WARN_LIMIT register in page 0 are defined to
+ * indicates the limit value.
+ */
+ ret = pmbus_read_word_data(client, 0, phase, reg);
+ break;
+ case PMBUS_READ_VIN:
+ /* The MP2993 vin scale is (1/32V)/Lsb */
+ ret = pmbus_read_word_data(client, page, phase, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = DIV_ROUND_CLOSEST((ret & GENMASK(9, 0)) * MP2993_READ_VIN_UINT,
+ MP2993_READ_VIN_DIV);
+ break;
+ case PMBUS_VIN_OV_FAULT_LIMIT:
+ case PMBUS_VIN_OV_WARN_LIMIT:
+ case PMBUS_VIN_UV_WARN_LIMIT:
+ case PMBUS_VIN_UV_FAULT_LIMIT:
+ /* The MP2993 vin limit scale is (1/8V)/Lsb */
+ ret = pmbus_read_word_data(client, page, phase, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = DIV_ROUND_CLOSEST((ret & GENMASK(7, 0)) * MP2993_VIN_LIMIT_UINT,
+ MP2993_VIN_LIMIT_DIV);
+ break;
+ case PMBUS_READ_IOUT:
+ case PMBUS_READ_IIN:
+ case PMBUS_IIN_OC_WARN_LIMIT:
+ case PMBUS_IOUT_OC_FAULT_LIMIT:
+ case PMBUS_IOUT_OC_WARN_LIMIT:
+ case PMBUS_READ_VOUT:
+ case PMBUS_READ_PIN:
+ case PMBUS_READ_POUT:
+ case PMBUS_READ_TEMPERATURE_1:
+ ret = -ENODATA;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int mp2993_write_word_data(struct i2c_client *client, int page, int reg,
+ u16 word)
+{
+ int ret;
+
+ switch (reg) {
+ case PMBUS_VOUT_OV_FAULT_LIMIT:
+ case PMBUS_VOUT_UV_FAULT_LIMIT:
+ ret = DIV_ROUND_CLOSEST(word * MP2993_VOUT_OVUV_DIV, MP2993_VOUT_OVUV_UINT);
+ ret = pmbus_write_word_data(client, 0, reg, ret);
+ break;
+ case PMBUS_OT_FAULT_LIMIT:
+ case PMBUS_OT_WARN_LIMIT:
+ /*
+ * The MP2993 ot fault limit value and ot warn limit value
+ * per rail are always the same, so only PMBUS_OT_FAULT_LIMIT
+ * and PMBUS_OT_WARN_LIMIT register in page 0 are defined to
+ * config the ot limit value.
+ */
+ ret = pmbus_write_word_data(client, 0, reg, word);
+ break;
+ case PMBUS_VIN_OV_FAULT_LIMIT:
+ case PMBUS_VIN_OV_WARN_LIMIT:
+ case PMBUS_VIN_UV_WARN_LIMIT:
+ case PMBUS_VIN_UV_FAULT_LIMIT:
+ /* The MP2993 vin limit scale is (1/8V)/Lsb */
+ ret = pmbus_write_word_data(client, 0, reg,
+ DIV_ROUND_CLOSEST(word * MP2993_VIN_LIMIT_DIV,
+ MP2993_VIN_LIMIT_UINT));
+ break;
+ case PMBUS_IIN_OC_WARN_LIMIT:
+ /*
+ * The PMBUS_IIN_OC_WARN_LIMIT of MP2993 is linear11 format,
+ * and the exponent is a constant value(5'b00000), so the
+ * exponent of word parameter should be converted to 5'b00000.
+ */
+ ret = pmbus_write_word_data(client, page, reg,
+ mp2993_linear11_exponent_transfer(word, 0x00));
+ break;
+ //
+ case PMBUS_IOUT_OC_FAULT_LIMIT:
+ case PMBUS_IOUT_OC_WARN_LIMIT:
+ /*
+ * The PMBUS_IOUT_OC_FAULT_LIMIT and PMBUS_IOUT_OC_WARN_LIMIT
+ * of MP2993 can be regarded as linear11 format, and the
+ * exponent is a 5'b00001 or 5'b00000. To ensure a larger
+ * range of limit value, so the exponent of word parameter
+ * should be converted to 5'b00001.
+ */
+ ret = pmbus_write_word_data(client, page, reg,
+ mp2993_linear11_exponent_transfer(word, 0x01));
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static struct pmbus_driver_info mp2993_info = {
+ .pages = MP2993_PAGE_NUM,
+ .format[PSC_VOLTAGE_IN] = direct,
+ .format[PSC_CURRENT_IN] = linear,
+ .format[PSC_CURRENT_OUT] = linear,
+ .format[PSC_TEMPERATURE] = direct,
+ .format[PSC_POWER] = linear,
+ .format[PSC_VOLTAGE_OUT] = direct,
+
+ .m[PSC_VOLTAGE_OUT] = 1,
+ .R[PSC_VOLTAGE_OUT] = 3,
+ .b[PSC_VOLTAGE_OUT] = 0,
+
+ .m[PSC_VOLTAGE_IN] = 1,
+ .R[PSC_VOLTAGE_IN] = 0,
+ .b[PSC_VOLTAGE_IN] = 0,
+
+ .m[PSC_TEMPERATURE] = 1,
+ .R[PSC_TEMPERATURE] = 0,
+ .b[PSC_TEMPERATURE] = 0,
+
+ .func[0] = MP2993_RAIL1_FUNC,
+ .func[1] = MP2993_RAIL2_FUNC,
+ .read_word_data = mp2993_read_word_data,
+ .write_word_data = mp2993_write_word_data,
+ .identify = mp2993_identify,
+};
+
+static int mp2993_probe(struct i2c_client *client)
+{
+ return pmbus_do_probe(client, &mp2993_info);
+}
+
+static const struct i2c_device_id mp2993_id[] = {
+ {"mp2993", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, mp2993_id);
+
+static const struct of_device_id __maybe_unused mp2993_of_match[] = {
+ {.compatible = "mps,mp2993"},
+ {}
+};
+MODULE_DEVICE_TABLE(of, mp2993_of_match);
+
+static struct i2c_driver mp2993_driver = {
+ .driver = {
+ .name = "mp2993",
+ .of_match_table = mp2993_of_match,
+ },
+ .probe = mp2993_probe,
+ .id_table = mp2993_id,
+};
+
+module_i2c_driver(mp2993_driver);
+
+MODULE_AUTHOR("Noah Wang <noahwang.wang@outlook.com>");
+MODULE_DESCRIPTION("PMBus driver for MPS MP2993");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(PMBUS);
diff --git a/drivers/hwmon/pmbus/mp5920.c b/drivers/hwmon/pmbus/mp5920.c
new file mode 100644
index 000000000000..f6d7527ade7d
--- /dev/null
+++ b/drivers/hwmon/pmbus/mp5920.c
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Hardware monitoring driver for MP5920 and compatible chips.
+ */
+
+#include <linux/i2c.h>
+#include <linux/minmax.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include "pmbus.h"
+
+static struct pmbus_driver_info mp5920_info = {
+ .pages = 1,
+ .format[PSC_VOLTAGE_IN] = direct,
+ .format[PSC_VOLTAGE_OUT] = direct,
+ .format[PSC_CURRENT_OUT] = direct,
+ .format[PSC_POWER] = direct,
+ .format[PSC_TEMPERATURE] = direct,
+ .m[PSC_VOLTAGE_IN] = 2266,
+ .b[PSC_VOLTAGE_IN] = 0,
+ .R[PSC_VOLTAGE_IN] = -1,
+ .m[PSC_VOLTAGE_OUT] = 2266,
+ .b[PSC_VOLTAGE_OUT] = 0,
+ .R[PSC_VOLTAGE_OUT] = -1,
+ .m[PSC_CURRENT_OUT] = 546,
+ .b[PSC_CURRENT_OUT] = 0,
+ .R[PSC_CURRENT_OUT] = -2,
+ .m[PSC_POWER] = 5840,
+ .b[PSC_POWER] = 0,
+ .R[PSC_POWER] = -3,
+ .m[PSC_TEMPERATURE] = 1067,
+ .b[PSC_TEMPERATURE] = 20500,
+ .R[PSC_TEMPERATURE] = -2,
+ .func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_VOUT |
+ PMBUS_HAVE_IOUT | PMBUS_HAVE_POUT |
+ PMBUS_HAVE_TEMP,
+};
+
+static int mp5920_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ int ret;
+ u8 buf[I2C_SMBUS_BLOCK_MAX];
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_READ_WORD_DATA))
+ return -ENODEV;
+
+ ret = i2c_smbus_read_block_data(client, PMBUS_MFR_MODEL, buf);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to read PMBUS_MFR_MODEL\n");
+
+ if (ret != 6 || strncmp(buf, "MP5920", 6)) {
+ return dev_err_probe(dev, -ENODEV, "Model '%.*s' not supported\n",
+ min_t(int, ret, sizeof(buf)), buf);
+ }
+
+ return pmbus_do_probe(client, &mp5920_info);
+}
+
+static const struct of_device_id mp5920_of_match[] = {
+ { .compatible = "mps,mp5920" },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, mp5920_of_match);
+
+static const struct i2c_device_id mp5920_id[] = {
+ { "mp5920" },
+ { }
+};
+
+MODULE_DEVICE_TABLE(i2c, mp5920_id);
+
+static struct i2c_driver mp5920_driver = {
+ .driver = {
+ .name = "mp5920",
+ .of_match_table = mp5920_of_match,
+ },
+ .probe = mp5920_probe,
+ .id_table = mp5920_id,
+};
+
+module_i2c_driver(mp5920_driver);
+
+MODULE_AUTHOR("Tony Ao <tony_ao@wiwynn.com>");
+MODULE_AUTHOR("Alex Vdovydchenko <xzeol@yahoo.com>");
+MODULE_DESCRIPTION("PMBus driver for MP5920 HSC");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(PMBUS);
diff --git a/drivers/hwmon/pmbus/mp9941.c b/drivers/hwmon/pmbus/mp9941.c
new file mode 100644
index 000000000000..543955cfce67
--- /dev/null
+++ b/drivers/hwmon/pmbus/mp9941.c
@@ -0,0 +1,319 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Hardware monitoring driver for MPS Multi-phase Digital VR Controllers(MP9941)
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include "pmbus.h"
+
+/*
+ * Vender specific registers. The MFR_ICC_MAX(0x02) is used to
+ * config the iin scale. The MFR_RESO_SET(0xC7) is used to
+ * config the vout format. The MFR_VR_MULTI_CONFIG_R1(0x0D) is
+ * used to identify the vout vid step.
+ */
+#define MFR_ICC_MAX 0x02
+#define MFR_RESO_SET 0xC7
+#define MFR_VR_MULTI_CONFIG_R1 0x0D
+
+#define MP9941_VIN_LIMIT_UINT 1
+#define MP9941_VIN_LIMIT_DIV 8
+#define MP9941_READ_VIN_UINT 1
+#define MP9941_READ_VIN_DIV 32
+
+#define MP9941_PAGE_NUM 1
+
+#define MP9941_RAIL1_FUNC (PMBUS_HAVE_VIN | PMBUS_HAVE_VOUT | \
+ PMBUS_HAVE_IOUT | PMBUS_HAVE_POUT | \
+ PMBUS_HAVE_TEMP | PMBUS_HAVE_PIN | \
+ PMBUS_HAVE_IIN | \
+ PMBUS_HAVE_STATUS_VOUT | \
+ PMBUS_HAVE_STATUS_IOUT | \
+ PMBUS_HAVE_STATUS_TEMP | \
+ PMBUS_HAVE_STATUS_INPUT)
+
+struct mp9941_data {
+ struct pmbus_driver_info info;
+ int vid_resolution;
+};
+
+#define to_mp9941_data(x) container_of(x, struct mp9941_data, info)
+
+static int mp9941_set_vout_format(struct i2c_client *client)
+{
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(client, PMBUS_PAGE, 0);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_read_word_data(client, MFR_RESO_SET);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * page = 0, MFR_RESO_SET[7:6] defines the vout format
+ * 2'b11 set the vout format as direct
+ */
+ ret = (ret & ~GENMASK(7, 6)) | FIELD_PREP(GENMASK(7, 6), 3);
+
+ return i2c_smbus_write_word_data(client, MFR_RESO_SET, ret);
+}
+
+static int
+mp9941_identify_vid_resolution(struct i2c_client *client, struct pmbus_driver_info *info)
+{
+ struct mp9941_data *data = to_mp9941_data(info);
+ int ret;
+
+ /*
+ * page = 2, MFR_VR_MULTI_CONFIG_R1[4:4] defines rail1 vid step value
+ * 1'b0 represents the vid step value is 10mV
+ * 1'b1 represents the vid step value is 5mV
+ */
+ ret = i2c_smbus_write_byte_data(client, PMBUS_PAGE, 2);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_read_word_data(client, MFR_VR_MULTI_CONFIG_R1);
+ if (ret < 0)
+ return ret;
+
+ if (FIELD_GET(GENMASK(4, 4), ret))
+ data->vid_resolution = 5;
+ else
+ data->vid_resolution = 10;
+
+ return 0;
+}
+
+static int mp9941_identify_iin_scale(struct i2c_client *client)
+{
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(client, PMBUS_PAGE, 0);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_read_word_data(client, MFR_RESO_SET);
+ if (ret < 0)
+ return ret;
+
+ ret = (ret & ~GENMASK(3, 2)) | FIELD_PREP(GENMASK(3, 2), 0);
+
+ ret = i2c_smbus_write_word_data(client, MFR_RESO_SET, ret);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * page = 2, MFR_ICC_MAX[15:13] defines the iin scale
+ * 3'b000 set the iout scale as 0.5A/Lsb
+ */
+ ret = i2c_smbus_write_byte_data(client, PMBUS_PAGE, 2);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_read_word_data(client, MFR_ICC_MAX);
+ if (ret < 0)
+ return ret;
+
+ ret = (ret & ~GENMASK(15, 13)) | FIELD_PREP(GENMASK(15, 13), 0);
+
+ return i2c_smbus_write_word_data(client, MFR_ICC_MAX, ret);
+}
+
+static int mp9941_identify(struct i2c_client *client, struct pmbus_driver_info *info)
+{
+ int ret;
+
+ ret = mp9941_identify_iin_scale(client);
+ if (ret < 0)
+ return ret;
+
+ ret = mp9941_identify_vid_resolution(client, info);
+ if (ret < 0)
+ return ret;
+
+ return mp9941_set_vout_format(client);
+}
+
+static int mp9941_read_word_data(struct i2c_client *client, int page, int phase,
+ int reg)
+{
+ const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
+ struct mp9941_data *data = to_mp9941_data(info);
+ int ret;
+
+ switch (reg) {
+ case PMBUS_READ_VIN:
+ /* The MP9941 vin scale is (1/32V)/Lsb */
+ ret = pmbus_read_word_data(client, page, phase, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = DIV_ROUND_CLOSEST((ret & GENMASK(9, 0)) * MP9941_READ_VIN_UINT,
+ MP9941_READ_VIN_DIV);
+ break;
+ case PMBUS_READ_IIN:
+ ret = pmbus_read_word_data(client, page, phase, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = ret & GENMASK(10, 0);
+ break;
+ case PMBUS_VIN_OV_FAULT_LIMIT:
+ /* The MP9941 vin ov limit scale is (1/8V)/Lsb */
+ ret = pmbus_read_word_data(client, page, phase, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = DIV_ROUND_CLOSEST((ret & GENMASK(7, 0)) * MP9941_VIN_LIMIT_UINT,
+ MP9941_VIN_LIMIT_DIV);
+ break;
+ case PMBUS_IIN_OC_WARN_LIMIT:
+ ret = pmbus_read_word_data(client, page, phase, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = ret & GENMASK(7, 0);
+ break;
+ case PMBUS_VOUT_UV_FAULT_LIMIT:
+ case PMBUS_MFR_VOUT_MIN:
+ case PMBUS_MFR_VOUT_MAX:
+ /*
+ * The vout scale is set to 1mV/Lsb(using r/m/b scale).
+ * But the vout uv limit and vout max/min scale is 1VID/Lsb,
+ * so the vout uv limit and vout max/min value should be
+ * multiplied by vid resolution.
+ */
+ ret = pmbus_read_word_data(client, page, phase, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = ret * data->vid_resolution;
+ break;
+ case PMBUS_READ_IOUT:
+ case PMBUS_READ_POUT:
+ case PMBUS_READ_TEMPERATURE_1:
+ case PMBUS_READ_VOUT:
+ case PMBUS_READ_PIN:
+ case PMBUS_OT_FAULT_LIMIT:
+ case PMBUS_OT_WARN_LIMIT:
+ ret = -ENODATA;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int mp9941_write_word_data(struct i2c_client *client, int page, int reg,
+ u16 word)
+{
+ const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
+ struct mp9941_data *data = to_mp9941_data(info);
+ int ret;
+
+ switch (reg) {
+ case PMBUS_VIN_OV_FAULT_LIMIT:
+ /* The MP9941 vin ov limit scale is (1/8V)/Lsb */
+ ret = pmbus_write_word_data(client, page, reg,
+ DIV_ROUND_CLOSEST(word * MP9941_VIN_LIMIT_DIV,
+ MP9941_VIN_LIMIT_UINT));
+ break;
+ case PMBUS_VOUT_UV_FAULT_LIMIT:
+ case PMBUS_MFR_VOUT_MIN:
+ case PMBUS_MFR_VOUT_MAX:
+ ret = pmbus_write_word_data(client, page, reg,
+ DIV_ROUND_CLOSEST(word, data->vid_resolution));
+ break;
+ case PMBUS_IIN_OC_WARN_LIMIT:
+ case PMBUS_OT_FAULT_LIMIT:
+ case PMBUS_OT_WARN_LIMIT:
+ ret = -ENODATA;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static const struct pmbus_driver_info mp9941_info = {
+ .pages = MP9941_PAGE_NUM,
+ .format[PSC_VOLTAGE_IN] = direct,
+ .format[PSC_CURRENT_IN] = direct,
+ .format[PSC_CURRENT_OUT] = linear,
+ .format[PSC_POWER] = linear,
+ .format[PSC_TEMPERATURE] = direct,
+ .format[PSC_VOLTAGE_OUT] = direct,
+
+ .m[PSC_TEMPERATURE] = 1,
+ .R[PSC_TEMPERATURE] = 0,
+ .b[PSC_TEMPERATURE] = 0,
+
+ .m[PSC_VOLTAGE_IN] = 1,
+ .R[PSC_VOLTAGE_IN] = 0,
+ .b[PSC_VOLTAGE_IN] = 0,
+
+ .m[PSC_CURRENT_IN] = 2,
+ .R[PSC_CURRENT_IN] = 0,
+ .b[PSC_CURRENT_IN] = 0,
+
+ .m[PSC_VOLTAGE_OUT] = 1,
+ .R[PSC_VOLTAGE_OUT] = 3,
+ .b[PSC_VOLTAGE_OUT] = 0,
+
+ .func[0] = MP9941_RAIL1_FUNC,
+ .read_word_data = mp9941_read_word_data,
+ .write_word_data = mp9941_write_word_data,
+ .identify = mp9941_identify,
+};
+
+static int mp9941_probe(struct i2c_client *client)
+{
+ struct mp9941_data *data;
+
+ data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ memcpy(&data->info, &mp9941_info, sizeof(mp9941_info));
+
+ return pmbus_do_probe(client, &data->info);
+}
+
+static const struct i2c_device_id mp9941_id[] = {
+ {"mp9941", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, mp9941_id);
+
+static const struct of_device_id __maybe_unused mp9941_of_match[] = {
+ {.compatible = "mps,mp9941"},
+ {}
+};
+MODULE_DEVICE_TABLE(of, mp9941_of_match);
+
+static struct i2c_driver mp9941_driver = {
+ .driver = {
+ .name = "mp9941",
+ .of_match_table = mp9941_of_match,
+ },
+ .probe = mp9941_probe,
+ .id_table = mp9941_id,
+};
+
+module_i2c_driver(mp9941_driver);
+
+MODULE_AUTHOR("Noah Wang <noahwang.wang@outlook.com>");
+MODULE_DESCRIPTION("PMBus driver for MPS MP9941");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(PMBUS);
diff --git a/drivers/hwmon/powr1220.c b/drivers/hwmon/powr1220.c
index 2388d0565e7e..5f9ca6543530 100644
--- a/drivers/hwmon/powr1220.c
+++ b/drivers/hwmon/powr1220.c
@@ -279,12 +279,11 @@ static const struct hwmon_chip_info powr1220_chip_info = {
.info = powr1220_info,
};
-static const struct i2c_device_id powr1220_ids[];
-
static int powr1220_probe(struct i2c_client *client)
{
struct powr1220_data *data;
struct device *hwmon_dev;
+ enum powr1xxx_chips chip;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return -ENODEV;
@@ -293,7 +292,8 @@ static int powr1220_probe(struct i2c_client *client)
if (!data)
return -ENOMEM;
- switch (i2c_match_id(powr1220_ids, client)->driver_data) {
+ chip = (uintptr_t)i2c_get_match_data(client);
+ switch (chip) {
case powr1014:
data->max_channels = 10;
break;
diff --git a/drivers/hwmon/sht3x.c b/drivers/hwmon/sht3x.c
index c0d02fbcdb76..650b0bcc2359 100644
--- a/drivers/hwmon/sht3x.c
+++ b/drivers/hwmon/sht3x.c
@@ -882,15 +882,6 @@ static const struct hwmon_chip_info sht3x_chip_info = {
.info = sht3x_channel_info,
};
-/* device ID table */
-static const struct i2c_device_id sht3x_ids[] = {
- {"sht3x", sht3x},
- {"sts3x", sts3x},
- {}
-};
-
-MODULE_DEVICE_TABLE(i2c, sht3x_ids);
-
static int sht3x_probe(struct i2c_client *client)
{
int ret;
@@ -920,7 +911,7 @@ static int sht3x_probe(struct i2c_client *client)
data->mode = 0;
data->last_update = jiffies - msecs_to_jiffies(3000);
data->client = client;
- data->chip_id = i2c_match_id(sht3x_ids, client)->driver_data;
+ data->chip_id = (uintptr_t)i2c_get_match_data(client);
crc8_populate_msb(sht3x_crc8_table, SHT3X_CRC8_POLYNOMIAL);
sht3x_select_command(data);
@@ -963,6 +954,15 @@ static int sht3x_probe(struct i2c_client *client)
return PTR_ERR_OR_ZERO(hwmon_dev);
}
+/* device ID table */
+static const struct i2c_device_id sht3x_ids[] = {
+ {"sht3x", sht3x},
+ {"sts3x", sts3x},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, sht3x_ids);
+
static struct i2c_driver sht3x_i2c_driver = {
.driver.name = "sht3x",
.probe = sht3x_probe,
diff --git a/drivers/hwmon/shtc1.c b/drivers/hwmon/shtc1.c
index 1f96e94967ee..2ac906e8e173 100644
--- a/drivers/hwmon/shtc1.c
+++ b/drivers/hwmon/shtc1.c
@@ -186,8 +186,6 @@ static void shtc1_select_command(struct shtc1_data *data)
}
}
-static const struct i2c_device_id shtc1_id[];
-
static int shtc1_probe(struct i2c_client *client)
{
int ret;
@@ -195,7 +193,7 @@ static int shtc1_probe(struct i2c_client *client)
char id_reg_buf[2];
struct shtc1_data *data;
struct device *hwmon_dev;
- enum shtcx_chips chip = i2c_match_id(shtc1_id, client)->driver_data;
+ enum shtcx_chips chip = (uintptr_t)i2c_get_match_data(client);
struct i2c_adapter *adap = client->adapter;
struct device *dev = &client->dev;
struct device_node *np = dev->of_node;
@@ -238,7 +236,7 @@ static int shtc1_probe(struct i2c_client *client)
if (np) {
data->setup.blocking_io = of_property_read_bool(np, "sensirion,blocking-io");
- data->setup.high_precision = !of_property_read_bool(np, "sensicon,low-precision");
+ data->setup.high_precision = !of_property_read_bool(np, "sensirion,low-precision");
} else {
if (client->dev.platform_data)
data->setup = *(struct shtc1_platform_data *)dev->platform_data;
diff --git a/drivers/hwmon/spd5118.c b/drivers/hwmon/spd5118.c
new file mode 100644
index 000000000000..fcbce5a01e55
--- /dev/null
+++ b/drivers/hwmon/spd5118.c
@@ -0,0 +1,703 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Driver for Jedec 5118 compliant temperature sensors
+ *
+ * Derived from https://github.com/Steve-Tech/SPD5118-DKMS
+ * Originally from T/2 driver at https://t2sde.org/packages/linux
+ * Copyright (c) 2023 René Rebe, ExactCODE GmbH; Germany.
+ *
+ * Copyright (c) 2024 Guenter Roeck
+ *
+ * Inspired by ee1004.c and jc42.c.
+ *
+ * SPD5118 compliant temperature sensors are typically used on DDR5
+ * memory modules.
+ */
+
+#include <linux/bitops.h>
+#include <linux/bits.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/hwmon.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/nvmem-provider.h>
+#include <linux/pm.h>
+#include <linux/regmap.h>
+#include <linux/units.h>
+
+/* Addresses to scan */
+static const unsigned short normal_i2c[] = {
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, I2C_CLIENT_END };
+
+/* SPD5118 registers. */
+#define SPD5118_REG_TYPE 0x00 /* MR0:MR1 */
+#define SPD5118_REG_REVISION 0x02 /* MR2 */
+#define SPD5118_REG_VENDOR 0x03 /* MR3:MR4 */
+#define SPD5118_REG_CAPABILITY 0x05 /* MR5 */
+#define SPD5118_REG_I2C_LEGACY_MODE 0x0B /* MR11 */
+#define SPD5118_REG_TEMP_CLR 0x13 /* MR19 */
+#define SPD5118_REG_ERROR_CLR 0x14 /* MR20 */
+#define SPD5118_REG_TEMP_CONFIG 0x1A /* MR26 */
+#define SPD5118_REG_TEMP_MAX 0x1c /* MR28:MR29 */
+#define SPD5118_REG_TEMP_MIN 0x1e /* MR30:MR31 */
+#define SPD5118_REG_TEMP_CRIT 0x20 /* MR32:MR33 */
+#define SPD5118_REG_TEMP_LCRIT 0x22 /* MR34:MR35 */
+#define SPD5118_REG_TEMP 0x31 /* MR49:MR50 */
+#define SPD5118_REG_TEMP_STATUS 0x33 /* MR51 */
+
+#define SPD5118_TEMP_STATUS_HIGH BIT(0)
+#define SPD5118_TEMP_STATUS_LOW BIT(1)
+#define SPD5118_TEMP_STATUS_CRIT BIT(2)
+#define SPD5118_TEMP_STATUS_LCRIT BIT(3)
+
+#define SPD5118_CAP_TS_SUPPORT BIT(1) /* temperature sensor support */
+
+#define SPD5118_TS_DISABLE BIT(0) /* temperature sensor disable */
+
+#define SPD5118_LEGACY_MODE_ADDR BIT(3)
+#define SPD5118_LEGACY_PAGE_MASK GENMASK(2, 0)
+#define SPD5118_LEGACY_MODE_MASK (SPD5118_LEGACY_MODE_ADDR | SPD5118_LEGACY_PAGE_MASK)
+
+#define SPD5118_NUM_PAGES 8
+#define SPD5118_PAGE_SIZE 128
+#define SPD5118_PAGE_SHIFT 7
+#define SPD5118_PAGE_MASK GENMASK(6, 0)
+#define SPD5118_EEPROM_BASE 0x80
+#define SPD5118_EEPROM_SIZE (SPD5118_PAGE_SIZE * SPD5118_NUM_PAGES)
+
+/* Temperature unit in millicelsius */
+#define SPD5118_TEMP_UNIT (MILLIDEGREE_PER_DEGREE / 4)
+/* Representable temperature range in millicelsius */
+#define SPD5118_TEMP_RANGE_MIN -256000
+#define SPD5118_TEMP_RANGE_MAX 255750
+
+struct spd5118_data {
+ struct regmap *regmap;
+ struct mutex nvmem_lock;
+};
+
+/* hwmon */
+
+static int spd5118_temp_from_reg(u16 reg)
+{
+ int temp = sign_extend32(reg >> 2, 10);
+
+ return temp * SPD5118_TEMP_UNIT;
+}
+
+static u16 spd5118_temp_to_reg(long temp)
+{
+ temp = clamp_val(temp, SPD5118_TEMP_RANGE_MIN, SPD5118_TEMP_RANGE_MAX);
+ return (DIV_ROUND_CLOSEST(temp, SPD5118_TEMP_UNIT) & 0x7ff) << 2;
+}
+
+static int spd5118_read_temp(struct regmap *regmap, u32 attr, long *val)
+{
+ int reg, err;
+ u8 regval[2];
+ u16 temp;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ reg = SPD5118_REG_TEMP;
+ break;
+ case hwmon_temp_max:
+ reg = SPD5118_REG_TEMP_MAX;
+ break;
+ case hwmon_temp_min:
+ reg = SPD5118_REG_TEMP_MIN;
+ break;
+ case hwmon_temp_crit:
+ reg = SPD5118_REG_TEMP_CRIT;
+ break;
+ case hwmon_temp_lcrit:
+ reg = SPD5118_REG_TEMP_LCRIT;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ err = regmap_bulk_read(regmap, reg, regval, 2);
+ if (err)
+ return err;
+
+ temp = (regval[1] << 8) | regval[0];
+
+ *val = spd5118_temp_from_reg(temp);
+ return 0;
+}
+
+static int spd5118_read_alarm(struct regmap *regmap, u32 attr, long *val)
+{
+ unsigned int mask, regval;
+ int err;
+
+ switch (attr) {
+ case hwmon_temp_max_alarm:
+ mask = SPD5118_TEMP_STATUS_HIGH;
+ break;
+ case hwmon_temp_min_alarm:
+ mask = SPD5118_TEMP_STATUS_LOW;
+ break;
+ case hwmon_temp_crit_alarm:
+ mask = SPD5118_TEMP_STATUS_CRIT;
+ break;
+ case hwmon_temp_lcrit_alarm:
+ mask = SPD5118_TEMP_STATUS_LCRIT;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ err = regmap_read(regmap, SPD5118_REG_TEMP_STATUS, &regval);
+ if (err < 0)
+ return err;
+ *val = !!(regval & mask);
+ if (*val)
+ return regmap_write(regmap, SPD5118_REG_TEMP_CLR, mask);
+ return 0;
+}
+
+static int spd5118_read_enable(struct regmap *regmap, long *val)
+{
+ u32 regval;
+ int err;
+
+ err = regmap_read(regmap, SPD5118_REG_TEMP_CONFIG, &regval);
+ if (err < 0)
+ return err;
+ *val = !(regval & SPD5118_TS_DISABLE);
+ return 0;
+}
+
+static int spd5118_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct regmap *regmap = dev_get_drvdata(dev);
+
+ if (type != hwmon_temp)
+ return -EOPNOTSUPP;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ case hwmon_temp_max:
+ case hwmon_temp_min:
+ case hwmon_temp_crit:
+ case hwmon_temp_lcrit:
+ return spd5118_read_temp(regmap, attr, val);
+ case hwmon_temp_max_alarm:
+ case hwmon_temp_min_alarm:
+ case hwmon_temp_crit_alarm:
+ case hwmon_temp_lcrit_alarm:
+ return spd5118_read_alarm(regmap, attr, val);
+ case hwmon_temp_enable:
+ return spd5118_read_enable(regmap, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int spd5118_write_temp(struct regmap *regmap, u32 attr, long val)
+{
+ u8 regval[2];
+ u16 temp;
+ int reg;
+
+ switch (attr) {
+ case hwmon_temp_max:
+ reg = SPD5118_REG_TEMP_MAX;
+ break;
+ case hwmon_temp_min:
+ reg = SPD5118_REG_TEMP_MIN;
+ break;
+ case hwmon_temp_crit:
+ reg = SPD5118_REG_TEMP_CRIT;
+ break;
+ case hwmon_temp_lcrit:
+ reg = SPD5118_REG_TEMP_LCRIT;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ temp = spd5118_temp_to_reg(val);
+ regval[0] = temp & 0xff;
+ regval[1] = temp >> 8;
+
+ return regmap_bulk_write(regmap, reg, regval, 2);
+}
+
+static int spd5118_write_enable(struct regmap *regmap, long val)
+{
+ if (val && val != 1)
+ return -EINVAL;
+
+ return regmap_update_bits(regmap, SPD5118_REG_TEMP_CONFIG,
+ SPD5118_TS_DISABLE,
+ val ? 0 : SPD5118_TS_DISABLE);
+}
+
+static int spd5118_temp_write(struct regmap *regmap, u32 attr, long val)
+{
+ switch (attr) {
+ case hwmon_temp_max:
+ case hwmon_temp_min:
+ case hwmon_temp_crit:
+ case hwmon_temp_lcrit:
+ return spd5118_write_temp(regmap, attr, val);
+ case hwmon_temp_enable:
+ return spd5118_write_enable(regmap, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int spd5118_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ struct regmap *regmap = dev_get_drvdata(dev);
+
+ switch (type) {
+ case hwmon_temp:
+ return spd5118_temp_write(regmap, attr, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static umode_t spd5118_is_visible(const void *_data, enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ if (type != hwmon_temp)
+ return 0;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ return 0444;
+ case hwmon_temp_min:
+ case hwmon_temp_max:
+ case hwmon_temp_lcrit:
+ case hwmon_temp_crit:
+ case hwmon_temp_enable:
+ return 0644;
+ case hwmon_temp_min_alarm:
+ case hwmon_temp_max_alarm:
+ case hwmon_temp_crit_alarm:
+ case hwmon_temp_lcrit_alarm:
+ return 0444;
+ default:
+ return 0;
+ }
+}
+
+static inline bool spd5118_parity8(u8 w)
+{
+ w ^= w >> 4;
+ return (0x6996 >> (w & 0xf)) & 1;
+}
+
+/*
+ * Bank and vendor id are 8-bit fields with seven data bits and odd parity.
+ * Vendor IDs 0 and 0x7f are invalid.
+ * See Jedec standard JEP106BJ for details and a list of assigned vendor IDs.
+ */
+static bool spd5118_vendor_valid(u8 bank, u8 id)
+{
+ if (!spd5118_parity8(bank) || !spd5118_parity8(id))
+ return false;
+
+ id &= 0x7f;
+ return id && id != 0x7f;
+}
+
+/* Return 0 if detection is successful, -ENODEV otherwise */
+static int spd5118_detect(struct i2c_client *client, struct i2c_board_info *info)
+{
+ struct i2c_adapter *adapter = client->adapter;
+ int regval;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA |
+ I2C_FUNC_SMBUS_WORD_DATA))
+ return -ENODEV;
+
+ regval = i2c_smbus_read_word_swapped(client, SPD5118_REG_TYPE);
+ if (regval != 0x5118)
+ return -ENODEV;
+
+ regval = i2c_smbus_read_word_data(client, SPD5118_REG_VENDOR);
+ if (regval < 0 || !spd5118_vendor_valid(regval & 0xff, regval >> 8))
+ return -ENODEV;
+
+ regval = i2c_smbus_read_byte_data(client, SPD5118_REG_CAPABILITY);
+ if (regval < 0)
+ return -ENODEV;
+ if (!(regval & SPD5118_CAP_TS_SUPPORT) || (regval & 0xfc))
+ return -ENODEV;
+
+ regval = i2c_smbus_read_byte_data(client, SPD5118_REG_TEMP_CLR);
+ if (regval)
+ return -ENODEV;
+ regval = i2c_smbus_read_byte_data(client, SPD5118_REG_ERROR_CLR);
+ if (regval)
+ return -ENODEV;
+
+ regval = i2c_smbus_read_byte_data(client, SPD5118_REG_REVISION);
+ if (regval < 0 || (regval & 0xc1))
+ return -ENODEV;
+
+ regval = i2c_smbus_read_byte_data(client, SPD5118_REG_TEMP_CONFIG);
+ if (regval < 0)
+ return -ENODEV;
+ if (regval & ~SPD5118_TS_DISABLE)
+ return -ENODEV;
+
+ strscpy(info->type, "spd5118", I2C_NAME_SIZE);
+ return 0;
+}
+
+static const struct hwmon_channel_info *spd5118_info[] = {
+ HWMON_CHANNEL_INFO(chip,
+ HWMON_C_REGISTER_TZ),
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT |
+ HWMON_T_LCRIT | HWMON_T_LCRIT_ALARM |
+ HWMON_T_MIN | HWMON_T_MIN_ALARM |
+ HWMON_T_MAX | HWMON_T_MAX_ALARM |
+ HWMON_T_CRIT | HWMON_T_CRIT_ALARM |
+ HWMON_T_ENABLE),
+ NULL
+};
+
+static const struct hwmon_ops spd5118_hwmon_ops = {
+ .is_visible = spd5118_is_visible,
+ .read = spd5118_read,
+ .write = spd5118_write,
+};
+
+static const struct hwmon_chip_info spd5118_chip_info = {
+ .ops = &spd5118_hwmon_ops,
+ .info = spd5118_info,
+};
+
+/* nvmem */
+
+static ssize_t spd5118_nvmem_read_page(struct regmap *regmap, char *buf,
+ unsigned int offset, size_t count)
+{
+ int addr = (offset >> SPD5118_PAGE_SHIFT) * 0x100 + SPD5118_EEPROM_BASE;
+ int err;
+
+ offset &= SPD5118_PAGE_MASK;
+
+ /* Can't cross page boundaries */
+ if (offset + count > SPD5118_PAGE_SIZE)
+ count = SPD5118_PAGE_SIZE - offset;
+
+ err = regmap_bulk_read(regmap, addr + offset, buf, count);
+ if (err)
+ return err;
+
+ return count;
+}
+
+static int spd5118_nvmem_read(void *priv, unsigned int off, void *val, size_t count)
+{
+ struct spd5118_data *data = priv;
+ char *buf = val;
+ int ret;
+
+ if (unlikely(!count))
+ return count;
+
+ if (off + count > SPD5118_EEPROM_SIZE)
+ return -EINVAL;
+
+ mutex_lock(&data->nvmem_lock);
+
+ while (count) {
+ ret = spd5118_nvmem_read_page(data->regmap, buf, off, count);
+ if (ret < 0) {
+ mutex_unlock(&data->nvmem_lock);
+ return ret;
+ }
+ buf += ret;
+ off += ret;
+ count -= ret;
+ }
+ mutex_unlock(&data->nvmem_lock);
+ return 0;
+}
+
+static int spd5118_nvmem_init(struct device *dev, struct spd5118_data *data)
+{
+ struct nvmem_config nvmem_config = {
+ .type = NVMEM_TYPE_EEPROM,
+ .name = dev_name(dev),
+ .id = NVMEM_DEVID_NONE,
+ .dev = dev,
+ .base_dev = dev,
+ .read_only = true,
+ .root_only = false,
+ .owner = THIS_MODULE,
+ .compat = true,
+ .reg_read = spd5118_nvmem_read,
+ .priv = data,
+ .stride = 1,
+ .word_size = 1,
+ .size = SPD5118_EEPROM_SIZE,
+ };
+ struct nvmem_device *nvmem;
+
+ nvmem = devm_nvmem_register(dev, &nvmem_config);
+ return PTR_ERR_OR_ZERO(nvmem);
+}
+
+/* regmap */
+
+static bool spd5118_writeable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case SPD5118_REG_I2C_LEGACY_MODE:
+ case SPD5118_REG_TEMP_CLR:
+ case SPD5118_REG_TEMP_CONFIG:
+ case SPD5118_REG_TEMP_MAX:
+ case SPD5118_REG_TEMP_MAX + 1:
+ case SPD5118_REG_TEMP_MIN:
+ case SPD5118_REG_TEMP_MIN + 1:
+ case SPD5118_REG_TEMP_CRIT:
+ case SPD5118_REG_TEMP_CRIT + 1:
+ case SPD5118_REG_TEMP_LCRIT:
+ case SPD5118_REG_TEMP_LCRIT + 1:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool spd5118_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case SPD5118_REG_TEMP_CLR:
+ case SPD5118_REG_ERROR_CLR:
+ case SPD5118_REG_TEMP:
+ case SPD5118_REG_TEMP + 1:
+ case SPD5118_REG_TEMP_STATUS:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static const struct regmap_range_cfg spd5118_regmap_range_cfg[] = {
+ {
+ .selector_reg = SPD5118_REG_I2C_LEGACY_MODE,
+ .selector_mask = SPD5118_LEGACY_PAGE_MASK,
+ .selector_shift = 0,
+ .window_start = 0,
+ .window_len = 0x100,
+ .range_min = 0,
+ .range_max = 0x7ff,
+ },
+};
+
+static const struct regmap_config spd5118_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0x7ff,
+ .writeable_reg = spd5118_writeable_reg,
+ .volatile_reg = spd5118_volatile_reg,
+ .cache_type = REGCACHE_MAPLE,
+
+ .ranges = spd5118_regmap_range_cfg,
+ .num_ranges = ARRAY_SIZE(spd5118_regmap_range_cfg),
+};
+
+static int spd5118_init(struct i2c_client *client)
+{
+ struct i2c_adapter *adapter = client->adapter;
+ int err, regval, mode;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA |
+ I2C_FUNC_SMBUS_WORD_DATA))
+ return -ENODEV;
+
+ regval = i2c_smbus_read_word_swapped(client, SPD5118_REG_TYPE);
+ if (regval < 0 || (regval && regval != 0x5118))
+ return -ENODEV;
+
+ /*
+ * If the device type registers return 0, it is possible that the chip
+ * has a non-zero page selected and takes the specification literally,
+ * i.e. disables access to volatile registers besides the page register
+ * if the page is not 0. Try to identify such chips.
+ */
+ if (!regval) {
+ /* Vendor ID registers must also be 0 */
+ regval = i2c_smbus_read_word_data(client, SPD5118_REG_VENDOR);
+ if (regval)
+ return -ENODEV;
+
+ /* The selected page in MR11 must not be 0 */
+ mode = i2c_smbus_read_byte_data(client, SPD5118_REG_I2C_LEGACY_MODE);
+ if (mode < 0 || (mode & ~SPD5118_LEGACY_MODE_MASK) ||
+ !(mode & SPD5118_LEGACY_PAGE_MASK))
+ return -ENODEV;
+
+ err = i2c_smbus_write_byte_data(client, SPD5118_REG_I2C_LEGACY_MODE,
+ mode & SPD5118_LEGACY_MODE_ADDR);
+ if (err)
+ return -ENODEV;
+
+ /*
+ * If the device type registers are still bad after selecting
+ * page 0, this is not a SPD5118 device. Restore original
+ * legacy mode register value and abort.
+ */
+ regval = i2c_smbus_read_word_swapped(client, SPD5118_REG_TYPE);
+ if (regval != 0x5118) {
+ i2c_smbus_write_byte_data(client, SPD5118_REG_I2C_LEGACY_MODE, mode);
+ return -ENODEV;
+ }
+ }
+
+ /* We are reasonably sure that this is really a SPD5118 hub controller */
+ return 0;
+}
+
+static int spd5118_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ unsigned int regval, revision, vendor, bank;
+ struct spd5118_data *data;
+ struct device *hwmon_dev;
+ struct regmap *regmap;
+ int err;
+
+ err = spd5118_init(client);
+ if (err)
+ return err;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ regmap = devm_regmap_init_i2c(client, &spd5118_regmap_config);
+ if (IS_ERR(regmap))
+ return dev_err_probe(dev, PTR_ERR(regmap), "regmap init failed\n");
+
+ err = regmap_read(regmap, SPD5118_REG_CAPABILITY, &regval);
+ if (err)
+ return err;
+ if (!(regval & SPD5118_CAP_TS_SUPPORT))
+ return -ENODEV;
+
+ err = regmap_read(regmap, SPD5118_REG_REVISION, &revision);
+ if (err)
+ return err;
+
+ err = regmap_read(regmap, SPD5118_REG_VENDOR, &bank);
+ if (err)
+ return err;
+ err = regmap_read(regmap, SPD5118_REG_VENDOR + 1, &vendor);
+ if (err)
+ return err;
+ if (!spd5118_vendor_valid(bank, vendor))
+ return -ENODEV;
+
+ data->regmap = regmap;
+ mutex_init(&data->nvmem_lock);
+ dev_set_drvdata(dev, data);
+
+ err = spd5118_nvmem_init(dev, data);
+ /* Ignore if NVMEM support is disabled */
+ if (err && err != -EOPNOTSUPP) {
+ dev_err_probe(dev, err, "failed to register nvmem\n");
+ return err;
+ }
+
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, "spd5118",
+ regmap, &spd5118_chip_info,
+ NULL);
+ if (IS_ERR(hwmon_dev))
+ return PTR_ERR(hwmon_dev);
+
+ /*
+ * From JESD300-5B
+ * MR2 bits [5:4]: Major revision, 1..4
+ * MR2 bits [3:1]: Minor revision, 0..8? Probably a typo, assume 1..8
+ */
+ dev_info(dev, "DDR5 temperature sensor: vendor 0x%02x:0x%02x revision %d.%d\n",
+ bank & 0x7f, vendor, ((revision >> 4) & 0x03) + 1, ((revision >> 1) & 0x07) + 1);
+
+ return 0;
+}
+
+static int spd5118_suspend(struct device *dev)
+{
+ struct spd5118_data *data = dev_get_drvdata(dev);
+ struct regmap *regmap = data->regmap;
+ u32 regval;
+ int err;
+
+ /*
+ * Make sure the configuration register in the regmap cache is current
+ * before bypassing it.
+ */
+ err = regmap_read(regmap, SPD5118_REG_TEMP_CONFIG, &regval);
+ if (err < 0)
+ return err;
+
+ regcache_cache_bypass(regmap, true);
+ regmap_update_bits(regmap, SPD5118_REG_TEMP_CONFIG, SPD5118_TS_DISABLE,
+ SPD5118_TS_DISABLE);
+ regcache_cache_bypass(regmap, false);
+
+ regcache_cache_only(regmap, true);
+ regcache_mark_dirty(regmap);
+
+ return 0;
+}
+
+static int spd5118_resume(struct device *dev)
+{
+ struct spd5118_data *data = dev_get_drvdata(dev);
+ struct regmap *regmap = data->regmap;
+
+ regcache_cache_only(regmap, false);
+ return regcache_sync(regmap);
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(spd5118_pm_ops, spd5118_suspend, spd5118_resume);
+
+static const struct i2c_device_id spd5118_id[] = {
+ { "spd5118", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, spd5118_id);
+
+static const struct of_device_id spd5118_of_ids[] = {
+ { .compatible = "jedec,spd5118", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, spd5118_of_ids);
+
+static struct i2c_driver spd5118_driver = {
+ .class = I2C_CLASS_HWMON,
+ .driver = {
+ .name = "spd5118",
+ .of_match_table = spd5118_of_ids,
+ .pm = pm_sleep_ptr(&spd5118_pm_ops),
+ },
+ .probe = spd5118_probe,
+ .id_table = spd5118_id,
+ .detect = IS_ENABLED(CONFIG_SENSORS_SPD5118_DETECT) ? spd5118_detect : NULL,
+ .address_list = IS_ENABLED(CONFIG_SENSORS_SPD5118_DETECT) ? normal_i2c : NULL,
+};
+
+module_i2c_driver(spd5118_driver);
+
+MODULE_AUTHOR("René Rebe <rene@exactcode.de>");
+MODULE_AUTHOR("Guenter Roeck <linux@roeck-us.net>");
+MODULE_DESCRIPTION("SPD 5118 driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/thmc50.c b/drivers/hwmon/thmc50.c
index 68ba26bc9014..0cbdb91698b1 100644
--- a/drivers/hwmon/thmc50.c
+++ b/drivers/hwmon/thmc50.c
@@ -377,8 +377,6 @@ static void thmc50_init_client(struct thmc50_data *data)
i2c_smbus_write_byte_data(client, THMC50_REG_CONF, config);
}
-static const struct i2c_device_id thmc50_id[];
-
static int thmc50_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
@@ -391,7 +389,7 @@ static int thmc50_probe(struct i2c_client *client)
return -ENOMEM;
data->client = client;
- data->type = i2c_match_id(thmc50_id, client)->driver_data;
+ data->type = (uintptr_t)i2c_get_match_data(client);
mutex_init(&data->update_lock);
thmc50_init_client(data);
diff --git a/drivers/hwmon/tmp401.c b/drivers/hwmon/tmp401.c
index df1b45a62e80..853dbe708ff5 100644
--- a/drivers/hwmon/tmp401.c
+++ b/drivers/hwmon/tmp401.c
@@ -693,7 +693,7 @@ static int tmp401_probe(struct i2c_client *client)
data->client = client;
mutex_init(&data->update_lock);
- data->kind = i2c_match_id(tmp401_id, client)->driver_data;
+ data->kind = (uintptr_t)i2c_get_match_data(client);
data->regmap = devm_regmap_init(dev, NULL, data, &tmp401_regmap_config);
if (IS_ERR(data->regmap))
diff --git a/drivers/hwmon/tmp421.c b/drivers/hwmon/tmp421.c
index 10b66c9ce045..7a6f9532e594 100644
--- a/drivers/hwmon/tmp421.c
+++ b/drivers/hwmon/tmp421.c
@@ -446,11 +446,7 @@ static int tmp421_probe(struct i2c_client *client)
return -ENOMEM;
mutex_init(&data->update_lock);
- if (client->dev.of_node)
- data->channels = (unsigned long)
- of_device_get_match_data(&client->dev);
- else
- data->channels = i2c_match_id(tmp421_id, client)->driver_data;
+ data->channels = (unsigned long)i2c_get_match_data(client);
data->client = client;
for (i = 0; i < data->channels; i++) {
diff --git a/drivers/hwmon/tmp464.c b/drivers/hwmon/tmp464.c
index f58ca4c6acb6..3ee1137533d6 100644
--- a/drivers/hwmon/tmp464.c
+++ b/drivers/hwmon/tmp464.c
@@ -666,10 +666,7 @@ static int tmp464_probe(struct i2c_client *client)
mutex_init(&data->update_lock);
- if (dev->of_node)
- data->channels = (int)(unsigned long)of_device_get_match_data(&client->dev);
- else
- data->channels = i2c_match_id(tmp464_id, client)->driver_data;
+ data->channels = (int)(unsigned long)i2c_get_match_data(client);
data->regmap = devm_regmap_init_i2c(client, &tmp464_regmap_config);
if (IS_ERR(data->regmap))
diff --git a/drivers/hwmon/tmp513.c b/drivers/hwmon/tmp513.c
index ea6f4416c124..926d28cd3fab 100644
--- a/drivers/hwmon/tmp513.c
+++ b/drivers/hwmon/tmp513.c
@@ -159,7 +159,7 @@ static const u8 TMP51X_CURR_INPUT[2] = {
TMP51X_BUS_CURRENT_RESULT
};
-static struct regmap_config tmp51x_regmap_config = {
+static const struct regmap_config tmp51x_regmap_config = {
.reg_bits = 8,
.val_bits = 16,
.max_register = TMP51X_MAX_REGISTER_ADDR,
diff --git a/drivers/hwmon/tps23861.c b/drivers/hwmon/tps23861.c
index d33ecbac00d6..dfcfb09d9f3c 100644
--- a/drivers/hwmon/tps23861.c
+++ b/drivers/hwmon/tps23861.c
@@ -117,7 +117,7 @@ struct tps23861_data {
struct dentry *debugfs_dir;
};
-static struct regmap_config tps23861_regmap_config = {
+static const struct regmap_config tps23861_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.max_register = 0x6f,
diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c
index fe960c0a624f..7d7d70afde65 100644
--- a/drivers/hwmon/w83627ehf.c
+++ b/drivers/hwmon/w83627ehf.c
@@ -895,7 +895,7 @@ store_target_temp(struct device *dev, struct device_attribute *attr,
if (err < 0)
return err;
- val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 0, 127);
+ val = DIV_ROUND_CLOSEST(clamp_val(val, 0, 127000), 1000);
mutex_lock(&data->update_lock);
data->target_temp[nr] = val;
@@ -920,7 +920,7 @@ store_tolerance(struct device *dev, struct device_attribute *attr,
return err;
/* Limit the temp to 0C - 15C */
- val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 0, 15);
+ val = DIV_ROUND_CLOSEST(clamp_val(val, 0, 15000), 1000);
mutex_lock(&data->update_lock);
reg = w83627ehf_read_value(data, W83627EHF_REG_TOLERANCE[nr]);
diff --git a/drivers/hwmon/w83781d.c b/drivers/hwmon/w83781d.c
index cba5ec432e6d..b7957c84d235 100644
--- a/drivers/hwmon/w83781d.c
+++ b/drivers/hwmon/w83781d.c
@@ -1192,8 +1192,6 @@ static void w83781d_remove_files(struct device *dev)
sysfs_remove_group(&dev->kobj, &w83781d_group_other);
}
-static const struct i2c_device_id w83781d_ids[];
-
static int w83781d_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
@@ -1208,7 +1206,7 @@ static int w83781d_probe(struct i2c_client *client)
mutex_init(&data->lock);
mutex_init(&data->update_lock);
- data->type = i2c_match_id(w83781d_ids, client)->driver_data;
+ data->type = (uintptr_t)i2c_get_match_data(client);
data->client = client;
/* attach secondary i2c lm75-like clients */
diff --git a/drivers/hwmon/w83795.c b/drivers/hwmon/w83795.c
index c446e00db658..5174db69db5e 100644
--- a/drivers/hwmon/w83795.c
+++ b/drivers/hwmon/w83795.c
@@ -2134,8 +2134,6 @@ static void w83795_apply_temp_config(struct w83795_data *data, u8 config,
}
}
-static const struct i2c_device_id w83795_id[];
-
static int w83795_probe(struct i2c_client *client)
{
int i;
@@ -2149,7 +2147,7 @@ static int w83795_probe(struct i2c_client *client)
return -ENOMEM;
i2c_set_clientdata(client, data);
- data->chip_type = i2c_match_id(w83795_id, client)->driver_data;
+ data->chip_type = (uintptr_t)i2c_get_match_data(client);
data->bank = i2c_smbus_read_byte_data(client, W83795_REG_BANKSEL);
mutex_init(&data->update_lock);
diff --git a/drivers/i2c/Kconfig b/drivers/i2c/Kconfig
index 9388823bb0bb..44710267d669 100644
--- a/drivers/i2c/Kconfig
+++ b/drivers/i2c/Kconfig
@@ -135,7 +135,7 @@ config I2C_SLAVE_EEPROM
Documentation/i2c/slave-eeprom-backend.rst for further details.
config I2C_SLAVE_TESTUNIT
- tristate "I2C eeprom testunit driver"
+ tristate "I2C testunit driver"
help
This backend can be used to trigger test cases for I2C bus masters
which require a remote device with certain capabilities, e.g.
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index 3d65934f5eb4..78d0561339e5 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -29,8 +29,7 @@ obj-$(CONFIG_I2C_SIS630) += i2c-sis630.o
obj-$(CONFIG_I2C_SIS96X) += i2c-sis96x.o
obj-$(CONFIG_I2C_VIA) += i2c-via.o
obj-$(CONFIG_I2C_VIAPRO) += i2c-viapro.o
-i2c-zhaoxin-objs := i2c-viai2c-zhaoxin.o i2c-viai2c-common.o
-obj-$(CONFIG_I2C_ZHAOXIN) += i2c-zhaoxin.o
+obj-$(CONFIG_I2C_ZHAOXIN) += i2c-viai2c-zhaoxin.o i2c-viai2c-common.o
# Mac SMBus host controller drivers
obj-$(CONFIG_I2C_HYDRA) += i2c-hydra.o
@@ -120,8 +119,7 @@ obj-$(CONFIG_I2C_TEGRA_BPMP) += i2c-tegra-bpmp.o
obj-$(CONFIG_I2C_UNIPHIER) += i2c-uniphier.o
obj-$(CONFIG_I2C_UNIPHIER_F) += i2c-uniphier-f.o
obj-$(CONFIG_I2C_VERSATILE) += i2c-versatile.o
-i2c-wmt-objs := i2c-viai2c-wmt.o i2c-viai2c-common.o
-obj-$(CONFIG_I2C_WMT) += i2c-wmt.o
+obj-$(CONFIG_I2C_WMT) += i2c-viai2c-wmt.o i2c-viai2c-common.o
i2c-octeon-objs := i2c-octeon-core.o i2c-octeon-platdrv.o
obj-$(CONFIG_I2C_OCTEON) += i2c-octeon.o
i2c-thunderx-objs := i2c-octeon-core.o i2c-thunderx-pcidrv.o
diff --git a/drivers/i2c/busses/i2c-at91-slave.c b/drivers/i2c/busses/i2c-at91-slave.c
index d6eeea5166c0..131a67d9d4a6 100644
--- a/drivers/i2c/busses/i2c-at91-slave.c
+++ b/drivers/i2c/busses/i2c-at91-slave.c
@@ -106,8 +106,7 @@ static int at91_unreg_slave(struct i2c_client *slave)
static u32 at91_twi_func(struct i2c_adapter *adapter)
{
- return I2C_FUNC_SLAVE | I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL
- | I2C_FUNC_SMBUS_READ_BLOCK_DATA;
+ return I2C_FUNC_SLAVE;
}
static const struct i2c_algorithm at91_twi_algorithm_slave = {
diff --git a/drivers/i2c/busses/i2c-designware-slave.c b/drivers/i2c/busses/i2c-designware-slave.c
index 2e079cf20bb5..78e2c47e3d7d 100644
--- a/drivers/i2c/busses/i2c-designware-slave.c
+++ b/drivers/i2c/busses/i2c-designware-slave.c
@@ -220,7 +220,7 @@ static const struct i2c_algorithm i2c_dw_algo = {
void i2c_dw_configure_slave(struct dw_i2c_dev *dev)
{
- dev->functionality = I2C_FUNC_SLAVE | DW_IC_DEFAULT_FUNCTIONALITY;
+ dev->functionality = I2C_FUNC_SLAVE;
dev->slave_cfg = DW_IC_CON_RX_FIFO_FULL_HLD_CTRL |
DW_IC_CON_RESTART_EN | DW_IC_CON_STOP_DET_IFADDRESSED;
diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c
index 56a4dabf5a38..4ad670a80a63 100644
--- a/drivers/i2c/busses/i2c-ocores.c
+++ b/drivers/i2c/busses/i2c-ocores.c
@@ -431,8 +431,8 @@ static int ocores_init(struct device *dev, struct ocores_i2c *i2c)
oc_setreg(i2c, OCI2C_PREHIGH, prescale >> 8);
/* Init the device */
- oc_setreg(i2c, OCI2C_CMD, OCI2C_CMD_IACK);
oc_setreg(i2c, OCI2C_CONTROL, ctrl | OCI2C_CTRL_EN);
+ oc_setreg(i2c, OCI2C_CMD, OCI2C_CMD_IACK);
return 0;
}
diff --git a/drivers/i2c/busses/i2c-pnx.c b/drivers/i2c/busses/i2c-pnx.c
index a12525b3186b..f448505d5468 100644
--- a/drivers/i2c/busses/i2c-pnx.c
+++ b/drivers/i2c/busses/i2c-pnx.c
@@ -15,7 +15,6 @@
#include <linux/ioport.h>
#include <linux/delay.h>
#include <linux/i2c.h>
-#include <linux/timer.h>
#include <linux/completion.h>
#include <linux/platform_device.h>
#include <linux/io.h>
@@ -32,7 +31,6 @@ struct i2c_pnx_mif {
int ret; /* Return value */
int mode; /* Interface mode */
struct completion complete; /* I/O completion */
- struct timer_list timer; /* Timeout */
u8 * buf; /* Data buffer */
int len; /* Length of data buffer */
int order; /* RX Bytes to order via TX */
@@ -117,24 +115,6 @@ static inline int wait_reset(struct i2c_pnx_algo_data *data)
return (timeout <= 0);
}
-static inline void i2c_pnx_arm_timer(struct i2c_pnx_algo_data *alg_data)
-{
- struct timer_list *timer = &alg_data->mif.timer;
- unsigned long expires = msecs_to_jiffies(alg_data->timeout);
-
- if (expires <= 1)
- expires = 2;
-
- del_timer_sync(timer);
-
- dev_dbg(&alg_data->adapter.dev, "Timer armed at %lu plus %lu jiffies.\n",
- jiffies, expires);
-
- timer->expires = jiffies + expires;
-
- add_timer(timer);
-}
-
/**
* i2c_pnx_start - start a device
* @slave_addr: slave address
@@ -259,8 +239,6 @@ static int i2c_pnx_master_xmit(struct i2c_pnx_algo_data *alg_data)
~(mcntrl_afie | mcntrl_naie | mcntrl_drmie),
I2C_REG_CTL(alg_data));
- del_timer_sync(&alg_data->mif.timer);
-
dev_dbg(&alg_data->adapter.dev,
"%s(): Waking up xfer routine.\n",
__func__);
@@ -276,8 +254,6 @@ static int i2c_pnx_master_xmit(struct i2c_pnx_algo_data *alg_data)
~(mcntrl_afie | mcntrl_naie | mcntrl_drmie),
I2C_REG_CTL(alg_data));
- /* Stop timer. */
- del_timer_sync(&alg_data->mif.timer);
dev_dbg(&alg_data->adapter.dev,
"%s(): Waking up xfer routine after zero-xfer.\n",
__func__);
@@ -364,8 +340,6 @@ static int i2c_pnx_master_rcv(struct i2c_pnx_algo_data *alg_data)
mcntrl_drmie | mcntrl_daie);
iowrite32(ctl, I2C_REG_CTL(alg_data));
- /* Kill timer. */
- del_timer_sync(&alg_data->mif.timer);
complete(&alg_data->mif.complete);
}
}
@@ -400,8 +374,6 @@ static irqreturn_t i2c_pnx_interrupt(int irq, void *dev_id)
mcntrl_drmie);
iowrite32(ctl, I2C_REG_CTL(alg_data));
- /* Stop timer, to prevent timeout. */
- del_timer_sync(&alg_data->mif.timer);
complete(&alg_data->mif.complete);
} else if (stat & mstatus_nai) {
/* Slave did not acknowledge, generate a STOP */
@@ -419,8 +391,6 @@ static irqreturn_t i2c_pnx_interrupt(int irq, void *dev_id)
/* Our return value. */
alg_data->mif.ret = -EIO;
- /* Stop timer, to prevent timeout. */
- del_timer_sync(&alg_data->mif.timer);
complete(&alg_data->mif.complete);
} else {
/*
@@ -453,9 +423,8 @@ static irqreturn_t i2c_pnx_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static void i2c_pnx_timeout(struct timer_list *t)
+static void i2c_pnx_timeout(struct i2c_pnx_algo_data *alg_data)
{
- struct i2c_pnx_algo_data *alg_data = from_timer(alg_data, t, mif.timer);
u32 ctl;
dev_err(&alg_data->adapter.dev,
@@ -472,7 +441,6 @@ static void i2c_pnx_timeout(struct timer_list *t)
iowrite32(ctl, I2C_REG_CTL(alg_data));
wait_reset(alg_data);
alg_data->mif.ret = -EIO;
- complete(&alg_data->mif.complete);
}
static inline void bus_reset_if_active(struct i2c_pnx_algo_data *alg_data)
@@ -514,6 +482,7 @@ i2c_pnx_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
struct i2c_msg *pmsg;
int rc = 0, completed = 0, i;
struct i2c_pnx_algo_data *alg_data = adap->algo_data;
+ unsigned long time_left;
u32 stat;
dev_dbg(&alg_data->adapter.dev,
@@ -548,7 +517,6 @@ i2c_pnx_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
dev_dbg(&alg_data->adapter.dev, "%s(): mode %d, %d bytes\n",
__func__, alg_data->mif.mode, alg_data->mif.len);
- i2c_pnx_arm_timer(alg_data);
/* initialize the completion var */
init_completion(&alg_data->mif.complete);
@@ -564,7 +532,10 @@ i2c_pnx_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
break;
/* Wait for completion */
- wait_for_completion(&alg_data->mif.complete);
+ time_left = wait_for_completion_timeout(&alg_data->mif.complete,
+ alg_data->timeout);
+ if (time_left == 0)
+ i2c_pnx_timeout(alg_data);
if (!(rc = alg_data->mif.ret))
completed++;
@@ -653,7 +624,10 @@ static int i2c_pnx_probe(struct platform_device *pdev)
alg_data->adapter.algo_data = alg_data;
alg_data->adapter.nr = pdev->id;
- alg_data->timeout = I2C_PNX_TIMEOUT_DEFAULT;
+ alg_data->timeout = msecs_to_jiffies(I2C_PNX_TIMEOUT_DEFAULT);
+ if (alg_data->timeout <= 1)
+ alg_data->timeout = 2;
+
#ifdef CONFIG_OF
alg_data->adapter.dev.of_node = of_node_get(pdev->dev.of_node);
if (pdev->dev.of_node) {
@@ -673,8 +647,6 @@ static int i2c_pnx_probe(struct platform_device *pdev)
if (IS_ERR(alg_data->clk))
return PTR_ERR(alg_data->clk);
- timer_setup(&alg_data->mif.timer, i2c_pnx_timeout, 0);
-
snprintf(alg_data->adapter.name, sizeof(alg_data->adapter.name),
"%s", pdev->name);
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index 828aa2ea0fe4..185a5d60f101 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -257,6 +257,14 @@ static void rcar_i2c_init(struct rcar_i2c_priv *priv)
}
}
+static void rcar_i2c_reset_slave(struct rcar_i2c_priv *priv)
+{
+ rcar_i2c_write(priv, ICSIER, 0);
+ rcar_i2c_write(priv, ICSSR, 0);
+ rcar_i2c_write(priv, ICSCR, SDBS);
+ rcar_i2c_write(priv, ICSAR, 0); /* Gen2: must be 0 if not using slave */
+}
+
static int rcar_i2c_bus_barrier(struct rcar_i2c_priv *priv)
{
int ret;
@@ -875,6 +883,10 @@ static int rcar_i2c_do_reset(struct rcar_i2c_priv *priv)
{
int ret;
+ /* Don't reset if a slave instance is currently running */
+ if (priv->slave)
+ return -EISCONN;
+
ret = reset_control_reset(priv->rstc);
if (ret)
return ret;
@@ -903,10 +915,10 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
/* Gen3+ needs a reset. That also allows RXDMA once */
if (priv->devtype >= I2C_RCAR_GEN3) {
- priv->flags &= ~ID_P_NO_RXDMA;
ret = rcar_i2c_do_reset(priv);
if (ret)
goto out;
+ priv->flags &= ~ID_P_NO_RXDMA;
}
rcar_i2c_init(priv);
@@ -1033,11 +1045,8 @@ static int rcar_unreg_slave(struct i2c_client *slave)
/* ensure no irq is running before clearing ptr */
disable_irq(priv->irq);
- rcar_i2c_write(priv, ICSIER, 0);
- rcar_i2c_write(priv, ICSSR, 0);
+ rcar_i2c_reset_slave(priv);
enable_irq(priv->irq);
- rcar_i2c_write(priv, ICSCR, SDBS);
- rcar_i2c_write(priv, ICSAR, 0); /* Gen2: must be 0 if not using slave */
priv->slave = NULL;
@@ -1152,7 +1161,9 @@ static int rcar_i2c_probe(struct platform_device *pdev)
goto out_pm_disable;
}
- rcar_i2c_write(priv, ICSAR, 0); /* Gen2: must be 0 if not using slave */
+ /* Bring hardware to known state */
+ rcar_i2c_init(priv);
+ rcar_i2c_reset_slave(priv);
if (priv->devtype < I2C_RCAR_GEN3) {
irqflags |= IRQF_NO_THREAD;
@@ -1168,6 +1179,7 @@ static int rcar_i2c_probe(struct platform_device *pdev)
if (of_property_read_bool(dev->of_node, "smbus"))
priv->flags |= ID_P_HOST_NOTIFY;
+ /* R-Car Gen3+ needs a reset before every transfer */
if (priv->devtype >= I2C_RCAR_GEN3) {
priv->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
if (IS_ERR(priv->rstc)) {
@@ -1178,6 +1190,9 @@ static int rcar_i2c_probe(struct platform_device *pdev)
ret = reset_control_status(priv->rstc);
if (ret < 0)
goto out_pm_put;
+
+ /* hard reset disturbs HostNotify local target, so disable it */
+ priv->flags &= ~ID_P_HOST_NOTIFY;
}
ret = platform_get_irq(pdev, 0);
diff --git a/drivers/i2c/busses/i2c-synquacer.c b/drivers/i2c/busses/i2c-synquacer.c
index 31ecb2c7e978..4eccbcd0fbfc 100644
--- a/drivers/i2c/busses/i2c-synquacer.c
+++ b/drivers/i2c/busses/i2c-synquacer.c
@@ -138,7 +138,6 @@ struct synquacer_i2c {
int irq;
struct device *dev;
void __iomem *base;
- struct clk *pclk;
u32 pclkrate;
u32 speed_khz;
u32 timeout_ms;
@@ -535,6 +534,7 @@ static const struct i2c_adapter synquacer_i2c_ops = {
static int synquacer_i2c_probe(struct platform_device *pdev)
{
struct synquacer_i2c *i2c;
+ struct clk *pclk;
u32 bus_speed;
int ret;
@@ -550,13 +550,12 @@ static int synquacer_i2c_probe(struct platform_device *pdev)
device_property_read_u32(&pdev->dev, "socionext,pclk-rate",
&i2c->pclkrate);
- i2c->pclk = devm_clk_get_enabled(&pdev->dev, "pclk");
- if (IS_ERR(i2c->pclk))
- return dev_err_probe(&pdev->dev, PTR_ERR(i2c->pclk),
+ pclk = devm_clk_get_enabled(&pdev->dev, "pclk");
+ if (IS_ERR(pclk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(pclk),
"failed to get and enable clock\n");
- dev_dbg(&pdev->dev, "clock source %p\n", i2c->pclk);
- i2c->pclkrate = clk_get_rate(i2c->pclk);
+ i2c->pclkrate = clk_get_rate(pclk);
if (i2c->pclkrate < SYNQUACER_I2C_MIN_CLK_RATE ||
i2c->pclkrate > SYNQUACER_I2C_MAX_CLK_RATE)
diff --git a/drivers/i2c/busses/i2c-viai2c-common.c b/drivers/i2c/busses/i2c-viai2c-common.c
index 1844d13f1f79..162b31306cba 100644
--- a/drivers/i2c/busses/i2c-viai2c-common.c
+++ b/drivers/i2c/busses/i2c-viai2c-common.c
@@ -17,6 +17,7 @@ int viai2c_wait_bus_not_busy(struct viai2c *i2c)
return 0;
}
+EXPORT_SYMBOL_GPL(viai2c_wait_bus_not_busy);
static int viai2c_write(struct viai2c *i2c, struct i2c_msg *pmsg, int last)
{
@@ -121,6 +122,7 @@ int viai2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
return (ret < 0) ? ret : i;
}
+EXPORT_SYMBOL_GPL(viai2c_xfer);
/*
* Main process of the byte mode xfer
@@ -130,7 +132,7 @@ int viai2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
* 0: there is still data that needs to be transferred
* -EIO: error occurred
*/
-static int viai2c_irq_xfer(struct viai2c *i2c)
+int viai2c_irq_xfer(struct viai2c *i2c)
{
u16 val;
struct i2c_msg *msg = i2c->msg;
@@ -171,51 +173,11 @@ static int viai2c_irq_xfer(struct viai2c *i2c)
return i2c->xfered_len == msg->len;
}
-
-int __weak viai2c_fifo_irq_xfer(struct viai2c *i2c, bool irq)
-{
- return 0;
-}
-
-static irqreturn_t viai2c_isr(int irq, void *data)
-{
- struct viai2c *i2c = data;
- u8 status;
-
- /* save the status and write-clear it */
- status = readw(i2c->base + VIAI2C_REG_ISR);
- if (!status && i2c->platform == VIAI2C_PLAT_ZHAOXIN)
- return IRQ_NONE;
-
- writew(status, i2c->base + VIAI2C_REG_ISR);
-
- i2c->ret = 0;
- if (status & VIAI2C_ISR_NACK_ADDR)
- i2c->ret = -EIO;
-
- if (i2c->platform == VIAI2C_PLAT_WMT && (status & VIAI2C_ISR_SCL_TIMEOUT))
- i2c->ret = -ETIMEDOUT;
-
- if (!i2c->ret) {
- if (i2c->mode == VIAI2C_BYTE_MODE)
- i2c->ret = viai2c_irq_xfer(i2c);
- else
- i2c->ret = viai2c_fifo_irq_xfer(i2c, true);
- }
-
- /* All the data has been successfully transferred or error occurred */
- if (i2c->ret)
- complete(&i2c->complete);
-
- return IRQ_HANDLED;
-}
+EXPORT_SYMBOL_GPL(viai2c_irq_xfer);
int viai2c_init(struct platform_device *pdev, struct viai2c **pi2c, int plat)
{
- int err;
- int irq_flags;
struct viai2c *i2c;
- struct device_node *np = pdev->dev.of_node;
i2c = devm_kzalloc(&pdev->dev, sizeof(*i2c), GFP_KERNEL);
if (!i2c)
@@ -225,28 +187,8 @@ int viai2c_init(struct platform_device *pdev, struct viai2c **pi2c, int plat)
if (IS_ERR(i2c->base))
return PTR_ERR(i2c->base);
- if (plat == VIAI2C_PLAT_WMT) {
- irq_flags = 0;
- i2c->irq = irq_of_parse_and_map(np, 0);
- if (!i2c->irq)
- return -EINVAL;
- } else if (plat == VIAI2C_PLAT_ZHAOXIN) {
- irq_flags = IRQF_SHARED;
- i2c->irq = platform_get_irq(pdev, 0);
- if (i2c->irq < 0)
- return i2c->irq;
- } else {
- return dev_err_probe(&pdev->dev, -EINVAL, "wrong platform type\n");
- }
-
i2c->platform = plat;
- err = devm_request_irq(&pdev->dev, i2c->irq, viai2c_isr,
- irq_flags, pdev->name, i2c);
- if (err)
- return dev_err_probe(&pdev->dev, err,
- "failed to request irq %i\n", i2c->irq);
-
i2c->dev = &pdev->dev;
init_completion(&i2c->complete);
platform_set_drvdata(pdev, i2c);
@@ -254,3 +196,8 @@ int viai2c_init(struct platform_device *pdev, struct viai2c **pi2c, int plat)
*pi2c = i2c;
return 0;
}
+EXPORT_SYMBOL_GPL(viai2c_init);
+
+MODULE_DESCRIPTION("Via/Wondermedia/Zhaoxin I2C master-mode bus adapter");
+MODULE_AUTHOR("Tony Prisk <linux@prisktech.co.nz>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/busses/i2c-viai2c-common.h b/drivers/i2c/busses/i2c-viai2c-common.h
index 81e827c54434..00f17733223c 100644
--- a/drivers/i2c/busses/i2c-viai2c-common.h
+++ b/drivers/i2c/busses/i2c-viai2c-common.h
@@ -80,6 +80,6 @@ struct viai2c {
int viai2c_wait_bus_not_busy(struct viai2c *i2c);
int viai2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num);
int viai2c_init(struct platform_device *pdev, struct viai2c **pi2c, int plat);
-int viai2c_fifo_irq_xfer(struct viai2c *i2c, bool irq);
+int viai2c_irq_xfer(struct viai2c *i2c);
#endif
diff --git a/drivers/i2c/busses/i2c-viai2c-wmt.c b/drivers/i2c/busses/i2c-viai2c-wmt.c
index e1988f946026..420fd10fe3aa 100644
--- a/drivers/i2c/busses/i2c-viai2c-wmt.c
+++ b/drivers/i2c/busses/i2c-viai2c-wmt.c
@@ -72,6 +72,32 @@ static int wmt_i2c_reset_hardware(struct viai2c *i2c)
return 0;
}
+static irqreturn_t wmt_i2c_isr(int irq, void *data)
+{
+ struct viai2c *i2c = data;
+ u8 status;
+
+ /* save the status and write-clear it */
+ status = readw(i2c->base + VIAI2C_REG_ISR);
+ writew(status, i2c->base + VIAI2C_REG_ISR);
+
+ i2c->ret = 0;
+ if (status & VIAI2C_ISR_NACK_ADDR)
+ i2c->ret = -EIO;
+
+ if (status & VIAI2C_ISR_SCL_TIMEOUT)
+ i2c->ret = -ETIMEDOUT;
+
+ if (!i2c->ret)
+ i2c->ret = viai2c_irq_xfer(i2c);
+
+ /* All the data has been successfully transferred or error occurred */
+ if (i2c->ret)
+ complete(&i2c->complete);
+
+ return IRQ_HANDLED;
+}
+
static int wmt_i2c_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
@@ -84,6 +110,16 @@ static int wmt_i2c_probe(struct platform_device *pdev)
if (err)
return err;
+ i2c->irq = platform_get_irq(pdev, 0);
+ if (i2c->irq < 0)
+ return i2c->irq;
+
+ err = devm_request_irq(&pdev->dev, i2c->irq, wmt_i2c_isr,
+ 0, pdev->name, i2c);
+ if (err)
+ return dev_err_probe(&pdev->dev, err,
+ "failed to request irq %i\n", i2c->irq);
+
i2c->clk = of_clk_get(np, 0);
if (IS_ERR(i2c->clk)) {
dev_err(&pdev->dev, "unable to request clock\n");
diff --git a/drivers/i2c/busses/i2c-viai2c-zhaoxin.c b/drivers/i2c/busses/i2c-viai2c-zhaoxin.c
index 7e3ac2a3e1fd..ab3e44e147e9 100644
--- a/drivers/i2c/busses/i2c-viai2c-zhaoxin.c
+++ b/drivers/i2c/busses/i2c-viai2c-zhaoxin.c
@@ -49,8 +49,7 @@ struct viai2c_zhaoxin {
u16 xfer_len;
};
-/* 'irq == true' means in interrupt context */
-int viai2c_fifo_irq_xfer(struct viai2c *i2c, bool irq)
+static int viai2c_fifo_xfer(struct viai2c *i2c)
{
u16 i;
u8 tmp;
@@ -59,17 +58,6 @@ int viai2c_fifo_irq_xfer(struct viai2c *i2c, bool irq)
bool read = !!(msg->flags & I2C_M_RD);
struct viai2c_zhaoxin *priv = i2c->pltfm_priv;
- if (irq) {
- /* get the received data */
- if (read)
- for (i = 0; i < priv->xfer_len; i++)
- msg->buf[i2c->xfered_len + i] = ioread8(base + ZXI2C_REG_HRDR);
-
- i2c->xfered_len += priv->xfer_len;
- if (i2c->xfered_len == msg->len)
- return 1;
- }
-
/* reset fifo buffer */
tmp = ioread8(base + ZXI2C_REG_HCR);
iowrite8(tmp | ZXI2C_HCR_RST_FIFO, base + ZXI2C_REG_HCR);
@@ -92,18 +80,59 @@ int viai2c_fifo_irq_xfer(struct viai2c *i2c, bool irq)
iowrite8(tmp, base + VIAI2C_REG_CR);
}
- if (irq) {
- /* continue transmission */
- tmp = ioread8(base + VIAI2C_REG_CR);
- iowrite8(tmp |= VIAI2C_CR_CPU_RDY, base + VIAI2C_REG_CR);
+ u16 tcr_val = i2c->tcr;
+
+ /* start transmission */
+ tcr_val |= read ? VIAI2C_TCR_READ : 0;
+ writew(tcr_val | msg->addr, base + VIAI2C_REG_TCR);
+
+ return 0;
+}
+
+static int viai2c_fifo_irq_xfer(struct viai2c *i2c)
+{
+ u16 i;
+ u8 tmp;
+ struct i2c_msg *msg = i2c->msg;
+ void __iomem *base = i2c->base;
+ bool read = !!(msg->flags & I2C_M_RD);
+ struct viai2c_zhaoxin *priv = i2c->pltfm_priv;
+
+ /* get the received data */
+ if (read)
+ for (i = 0; i < priv->xfer_len; i++)
+ msg->buf[i2c->xfered_len + i] = ioread8(base + ZXI2C_REG_HRDR);
+
+ i2c->xfered_len += priv->xfer_len;
+ if (i2c->xfered_len == msg->len)
+ return 1;
+
+ /* reset fifo buffer */
+ tmp = ioread8(base + ZXI2C_REG_HCR);
+ iowrite8(tmp | ZXI2C_HCR_RST_FIFO, base + ZXI2C_REG_HCR);
+
+ /* set xfer len */
+ priv->xfer_len = min_t(u16, msg->len - i2c->xfered_len, ZXI2C_FIFO_SIZE);
+ if (read) {
+ iowrite8(priv->xfer_len - 1, base + ZXI2C_REG_HRLR);
} else {
- u16 tcr_val = i2c->tcr;
+ iowrite8(priv->xfer_len - 1, base + ZXI2C_REG_HTLR);
+ /* set write data */
+ for (i = 0; i < priv->xfer_len; i++)
+ iowrite8(msg->buf[i2c->xfered_len + i], base + ZXI2C_REG_HTDR);
+ }
- /* start transmission */
- tcr_val |= read ? VIAI2C_TCR_READ : 0;
- writew(tcr_val | msg->addr, base + VIAI2C_REG_TCR);
+ /* prepare to stop transmission */
+ if (priv->hrv && msg->len == (i2c->xfered_len + priv->xfer_len)) {
+ tmp = ioread8(base + VIAI2C_REG_CR);
+ tmp |= read ? VIAI2C_CR_RX_END : VIAI2C_CR_TX_END;
+ iowrite8(tmp, base + VIAI2C_REG_CR);
}
+ /* continue transmission */
+ tmp = ioread8(base + VIAI2C_REG_CR);
+ iowrite8(tmp |= VIAI2C_CR_CPU_RDY, base + VIAI2C_REG_CR);
+
return 0;
}
@@ -135,7 +164,7 @@ static int zxi2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int
priv->xfer_len = 0;
i2c->xfered_len = 0;
- viai2c_fifo_irq_xfer(i2c, 0);
+ viai2c_fifo_xfer(i2c);
if (!wait_for_completion_timeout(&i2c->complete, VIAI2C_TIMEOUT))
return -ETIMEDOUT;
@@ -228,6 +257,36 @@ static void zxi2c_get_bus_speed(struct viai2c *i2c)
dev_info(i2c->dev, "speed mode is %s\n", i2c_freq_mode_string(params[0]));
}
+static irqreturn_t zxi2c_isr(int irq, void *data)
+{
+ struct viai2c *i2c = data;
+ u8 status;
+
+ /* save the status and write-clear it */
+ status = readw(i2c->base + VIAI2C_REG_ISR);
+ if (!status)
+ return IRQ_NONE;
+
+ writew(status, i2c->base + VIAI2C_REG_ISR);
+
+ i2c->ret = 0;
+ if (status & VIAI2C_ISR_NACK_ADDR)
+ i2c->ret = -EIO;
+
+ if (!i2c->ret) {
+ if (i2c->mode == VIAI2C_BYTE_MODE)
+ i2c->ret = viai2c_irq_xfer(i2c);
+ else
+ i2c->ret = viai2c_fifo_irq_xfer(i2c);
+ }
+
+ /* All the data has been successfully transferred or error occurred */
+ if (i2c->ret)
+ complete(&i2c->complete);
+
+ return IRQ_HANDLED;
+}
+
static int zxi2c_probe(struct platform_device *pdev)
{
int error;
@@ -239,6 +298,16 @@ static int zxi2c_probe(struct platform_device *pdev)
if (error)
return error;
+ i2c->irq = platform_get_irq(pdev, 0);
+ if (i2c->irq < 0)
+ return i2c->irq;
+
+ error = devm_request_irq(&pdev->dev, i2c->irq, zxi2c_isr,
+ IRQF_SHARED, pdev->name, i2c);
+ if (error)
+ return dev_err_probe(&pdev->dev, error,
+ "failed to request irq %i\n", i2c->irq);
+
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index db0d1ac82910..7e7b15440832 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -1067,6 +1067,7 @@ EXPORT_SYMBOL(i2c_find_device_by_fwnode);
static const struct i2c_device_id dummy_id[] = {
{ "dummy", 0 },
+ { "smbus_host_notify", 0 },
{ },
};
diff --git a/drivers/i2c/i2c-slave-testunit.c b/drivers/i2c/i2c-slave-testunit.c
index a49642bbae4b..23a11e4e9256 100644
--- a/drivers/i2c/i2c-slave-testunit.c
+++ b/drivers/i2c/i2c-slave-testunit.c
@@ -118,9 +118,19 @@ static int i2c_slave_testunit_slave_cb(struct i2c_client *client,
queue_delayed_work(system_long_wq, &tu->worker,
msecs_to_jiffies(10 * tu->regs[TU_REG_DELAY]));
}
- fallthrough;
+
+ /*
+ * Reset reg_idx to avoid that work gets queued again in case of
+ * STOP after a following read message. But do not clear TU regs
+ * here because we still need them in the workqueue!
+ */
+ tu->reg_idx = 0;
+ break;
case I2C_SLAVE_WRITE_REQUESTED:
+ if (test_bit(TU_FLAG_IN_PROCESS, &tu->flags))
+ return -EBUSY;
+
memset(tu->regs, 0, TU_NUM_REGS);
tu->reg_idx = 0;
break;
diff --git a/drivers/i2c/i2c-smbus.c b/drivers/i2c/i2c-smbus.c
index 97f338b123b1..f809f0ef2004 100644
--- a/drivers/i2c/i2c-smbus.c
+++ b/drivers/i2c/i2c-smbus.c
@@ -308,7 +308,7 @@ EXPORT_SYMBOL_GPL(i2c_free_slave_host_notify_device);
* target systems are the same.
* Restrictions to automatic SPD instantiation:
* - Only works if all filled slots have the same memory type
- * - Only works for DDR, DDR2, DDR3 and DDR4 for now
+ * - Only works for (LP)DDR memory types up to DDR5
* - Only works on systems with 1 to 8 memory slots
*/
#if IS_ENABLED(CONFIG_DMI)
@@ -382,6 +382,10 @@ void i2c_register_spd(struct i2c_adapter *adap)
case 0x1E: /* LPDDR4 */
name = "ee1004";
break;
+ case 0x22: /* DDR5 */
+ case 0x23: /* LPDDR5 */
+ name = "spd5118";
+ break;
default:
dev_info(&adap->dev,
"Memory type 0x%02x not supported yet, not instantiating SPD\n",
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index e486027f8b07..9aab7abc2ae9 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -1494,53 +1494,53 @@ static const struct idle_cpu idle_cpu_srf __initconst = {
};
static const struct x86_cpu_id intel_idle_ids[] __initconst = {
- X86_MATCH_INTEL_FAM6_MODEL(NEHALEM_EP, &idle_cpu_nhx),
- X86_MATCH_INTEL_FAM6_MODEL(NEHALEM, &idle_cpu_nehalem),
- X86_MATCH_INTEL_FAM6_MODEL(NEHALEM_G, &idle_cpu_nehalem),
- X86_MATCH_INTEL_FAM6_MODEL(WESTMERE, &idle_cpu_nehalem),
- X86_MATCH_INTEL_FAM6_MODEL(WESTMERE_EP, &idle_cpu_nhx),
- X86_MATCH_INTEL_FAM6_MODEL(NEHALEM_EX, &idle_cpu_nhx),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_BONNELL, &idle_cpu_atom),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_BONNELL_MID, &idle_cpu_lincroft),
- X86_MATCH_INTEL_FAM6_MODEL(WESTMERE_EX, &idle_cpu_nhx),
- X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE, &idle_cpu_snb),
- X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE_X, &idle_cpu_snx),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_SALTWELL, &idle_cpu_atom),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT, &idle_cpu_byt),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT_MID, &idle_cpu_tangier),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT, &idle_cpu_cht),
- X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE, &idle_cpu_ivb),
- X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE_X, &idle_cpu_ivt),
- X86_MATCH_INTEL_FAM6_MODEL(HASWELL, &idle_cpu_hsw),
- X86_MATCH_INTEL_FAM6_MODEL(HASWELL_X, &idle_cpu_hsx),
- X86_MATCH_INTEL_FAM6_MODEL(HASWELL_L, &idle_cpu_hsw),
- X86_MATCH_INTEL_FAM6_MODEL(HASWELL_G, &idle_cpu_hsw),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT_D, &idle_cpu_avn),
- X86_MATCH_INTEL_FAM6_MODEL(BROADWELL, &idle_cpu_bdw),
- X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_G, &idle_cpu_bdw),
- X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, &idle_cpu_bdx),
- X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_D, &idle_cpu_bdx),
- X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_L, &idle_cpu_skl),
- X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE, &idle_cpu_skl),
- X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE_L, &idle_cpu_skl),
- X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE, &idle_cpu_skl),
- X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, &idle_cpu_skx),
- X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, &idle_cpu_icx),
- X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, &idle_cpu_icx),
- X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, &idle_cpu_adl),
- X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, &idle_cpu_adl_l),
- X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L, &idle_cpu_mtl_l),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_GRACEMONT, &idle_cpu_gmt),
- X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &idle_cpu_spr),
- X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X, &idle_cpu_spr),
- X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNL, &idle_cpu_knl),
- X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNM, &idle_cpu_knl),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, &idle_cpu_bxt),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_PLUS, &idle_cpu_bxt),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_D, &idle_cpu_dnv),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, &idle_cpu_snr),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT, &idle_cpu_grr),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT_X, &idle_cpu_srf),
+ X86_MATCH_VFM(INTEL_NEHALEM_EP, &idle_cpu_nhx),
+ X86_MATCH_VFM(INTEL_NEHALEM, &idle_cpu_nehalem),
+ X86_MATCH_VFM(INTEL_NEHALEM_G, &idle_cpu_nehalem),
+ X86_MATCH_VFM(INTEL_WESTMERE, &idle_cpu_nehalem),
+ X86_MATCH_VFM(INTEL_WESTMERE_EP, &idle_cpu_nhx),
+ X86_MATCH_VFM(INTEL_NEHALEM_EX, &idle_cpu_nhx),
+ X86_MATCH_VFM(INTEL_ATOM_BONNELL, &idle_cpu_atom),
+ X86_MATCH_VFM(INTEL_ATOM_BONNELL_MID, &idle_cpu_lincroft),
+ X86_MATCH_VFM(INTEL_WESTMERE_EX, &idle_cpu_nhx),
+ X86_MATCH_VFM(INTEL_SANDYBRIDGE, &idle_cpu_snb),
+ X86_MATCH_VFM(INTEL_SANDYBRIDGE_X, &idle_cpu_snx),
+ X86_MATCH_VFM(INTEL_ATOM_SALTWELL, &idle_cpu_atom),
+ X86_MATCH_VFM(INTEL_ATOM_SILVERMONT, &idle_cpu_byt),
+ X86_MATCH_VFM(INTEL_ATOM_SILVERMONT_MID, &idle_cpu_tangier),
+ X86_MATCH_VFM(INTEL_ATOM_AIRMONT, &idle_cpu_cht),
+ X86_MATCH_VFM(INTEL_IVYBRIDGE, &idle_cpu_ivb),
+ X86_MATCH_VFM(INTEL_IVYBRIDGE_X, &idle_cpu_ivt),
+ X86_MATCH_VFM(INTEL_HASWELL, &idle_cpu_hsw),
+ X86_MATCH_VFM(INTEL_HASWELL_X, &idle_cpu_hsx),
+ X86_MATCH_VFM(INTEL_HASWELL_L, &idle_cpu_hsw),
+ X86_MATCH_VFM(INTEL_HASWELL_G, &idle_cpu_hsw),
+ X86_MATCH_VFM(INTEL_ATOM_SILVERMONT_D, &idle_cpu_avn),
+ X86_MATCH_VFM(INTEL_BROADWELL, &idle_cpu_bdw),
+ X86_MATCH_VFM(INTEL_BROADWELL_G, &idle_cpu_bdw),
+ X86_MATCH_VFM(INTEL_BROADWELL_X, &idle_cpu_bdx),
+ X86_MATCH_VFM(INTEL_BROADWELL_D, &idle_cpu_bdx),
+ X86_MATCH_VFM(INTEL_SKYLAKE_L, &idle_cpu_skl),
+ X86_MATCH_VFM(INTEL_SKYLAKE, &idle_cpu_skl),
+ X86_MATCH_VFM(INTEL_KABYLAKE_L, &idle_cpu_skl),
+ X86_MATCH_VFM(INTEL_KABYLAKE, &idle_cpu_skl),
+ X86_MATCH_VFM(INTEL_SKYLAKE_X, &idle_cpu_skx),
+ X86_MATCH_VFM(INTEL_ICELAKE_X, &idle_cpu_icx),
+ X86_MATCH_VFM(INTEL_ICELAKE_D, &idle_cpu_icx),
+ X86_MATCH_VFM(INTEL_ALDERLAKE, &idle_cpu_adl),
+ X86_MATCH_VFM(INTEL_ALDERLAKE_L, &idle_cpu_adl_l),
+ X86_MATCH_VFM(INTEL_METEORLAKE_L, &idle_cpu_mtl_l),
+ X86_MATCH_VFM(INTEL_ATOM_GRACEMONT, &idle_cpu_gmt),
+ X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, &idle_cpu_spr),
+ X86_MATCH_VFM(INTEL_EMERALDRAPIDS_X, &idle_cpu_spr),
+ X86_MATCH_VFM(INTEL_XEON_PHI_KNL, &idle_cpu_knl),
+ X86_MATCH_VFM(INTEL_XEON_PHI_KNM, &idle_cpu_knl),
+ X86_MATCH_VFM(INTEL_ATOM_GOLDMONT, &idle_cpu_bxt),
+ X86_MATCH_VFM(INTEL_ATOM_GOLDMONT_PLUS, &idle_cpu_bxt),
+ X86_MATCH_VFM(INTEL_ATOM_GOLDMONT_D, &idle_cpu_dnv),
+ X86_MATCH_VFM(INTEL_ATOM_TREMONT_D, &idle_cpu_snr),
+ X86_MATCH_VFM(INTEL_ATOM_CRESTMONT, &idle_cpu_grr),
+ X86_MATCH_VFM(INTEL_ATOM_CRESTMONT_X, &idle_cpu_srf),
{}
};
@@ -1990,27 +1990,27 @@ static void __init intel_idle_init_cstates_icpu(struct cpuidle_driver *drv)
{
int cstate;
- switch (boot_cpu_data.x86_model) {
- case INTEL_FAM6_IVYBRIDGE_X:
+ switch (boot_cpu_data.x86_vfm) {
+ case INTEL_IVYBRIDGE_X:
ivt_idle_state_table_update();
break;
- case INTEL_FAM6_ATOM_GOLDMONT:
- case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
+ case INTEL_ATOM_GOLDMONT:
+ case INTEL_ATOM_GOLDMONT_PLUS:
bxt_idle_state_table_update();
break;
- case INTEL_FAM6_SKYLAKE:
+ case INTEL_SKYLAKE:
sklh_idle_state_table_update();
break;
- case INTEL_FAM6_SKYLAKE_X:
+ case INTEL_SKYLAKE_X:
skx_idle_state_table_update();
break;
- case INTEL_FAM6_SAPPHIRERAPIDS_X:
- case INTEL_FAM6_EMERALDRAPIDS_X:
+ case INTEL_SAPPHIRERAPIDS_X:
+ case INTEL_EMERALDRAPIDS_X:
spr_idle_state_table_update();
break;
- case INTEL_FAM6_ALDERLAKE:
- case INTEL_FAM6_ALDERLAKE_L:
- case INTEL_FAM6_ATOM_GRACEMONT:
+ case INTEL_ALDERLAKE:
+ case INTEL_ALDERLAKE_L:
+ case INTEL_ATOM_GRACEMONT:
adl_idle_state_table_update();
break;
}
diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig
index c2da5066e9a7..80b57d3ee3a7 100644
--- a/drivers/iio/accel/Kconfig
+++ b/drivers/iio/accel/Kconfig
@@ -330,6 +330,8 @@ config DMARD10
config FXLS8962AF
tristate
depends on I2C || !I2C # cannot be built-in for modular I2C
+ select IIO_BUFFER
+ select IIO_KFIFO_BUF
config FXLS8962AF_I2C
tristate "NXP FXLS8962AF/FXLS8964AF Accelerometer I2C Driver"
diff --git a/drivers/iio/adc/ad7173.c b/drivers/iio/adc/ad7173.c
index a7826bba0852..b26d4575e256 100644
--- a/drivers/iio/adc/ad7173.c
+++ b/drivers/iio/adc/ad7173.c
@@ -145,6 +145,7 @@ struct ad7173_device_info {
unsigned int id;
char *name;
bool has_temp;
+ bool has_input_buf;
bool has_int_ref;
bool has_ref2;
u8 num_gpios;
@@ -212,18 +213,21 @@ static const struct ad7173_device_info ad7173_device_info[] = {
.num_configs = 4,
.num_gpios = 2,
.has_temp = true,
+ .has_input_buf = true,
.has_int_ref = true,
.clock = 2 * HZ_PER_MHZ,
.sinc5_data_rates = ad7173_sinc5_data_rates,
.num_sinc5_data_rates = ARRAY_SIZE(ad7173_sinc5_data_rates),
},
[ID_AD7172_4] = {
+ .name = "ad7172-4",
.id = AD7172_4_ID,
.num_inputs = 9,
.num_channels = 8,
.num_configs = 8,
.num_gpios = 4,
.has_temp = false,
+ .has_input_buf = true,
.has_ref2 = true,
.clock = 2 * HZ_PER_MHZ,
.sinc5_data_rates = ad7173_sinc5_data_rates,
@@ -237,6 +241,7 @@ static const struct ad7173_device_info ad7173_device_info[] = {
.num_configs = 8,
.num_gpios = 4,
.has_temp = true,
+ .has_input_buf = true,
.has_int_ref = true,
.has_ref2 = true,
.clock = 2 * HZ_PER_MHZ,
@@ -251,18 +256,21 @@ static const struct ad7173_device_info ad7173_device_info[] = {
.num_configs = 4,
.num_gpios = 2,
.has_temp = true,
+ .has_input_buf = true,
.has_int_ref = true,
.clock = 16 * HZ_PER_MHZ,
.sinc5_data_rates = ad7175_sinc5_data_rates,
.num_sinc5_data_rates = ARRAY_SIZE(ad7175_sinc5_data_rates),
},
[ID_AD7175_8] = {
+ .name = "ad7175-8",
.id = AD7175_8_ID,
.num_inputs = 17,
.num_channels = 16,
.num_configs = 8,
.num_gpios = 4,
.has_temp = true,
+ .has_input_buf = true,
.has_int_ref = true,
.has_ref2 = true,
.clock = 16 * HZ_PER_MHZ,
@@ -277,18 +285,21 @@ static const struct ad7173_device_info ad7173_device_info[] = {
.num_configs = 4,
.num_gpios = 2,
.has_temp = false,
+ .has_input_buf = false,
.has_int_ref = true,
.clock = 16 * HZ_PER_MHZ,
.sinc5_data_rates = ad7175_sinc5_data_rates,
.num_sinc5_data_rates = ARRAY_SIZE(ad7175_sinc5_data_rates),
},
[ID_AD7177_2] = {
+ .name = "ad7177-2",
.id = AD7177_ID,
.num_inputs = 5,
.num_channels = 4,
.num_configs = 4,
.num_gpios = 2,
.has_temp = true,
+ .has_input_buf = true,
.has_int_ref = true,
.clock = 16 * HZ_PER_MHZ,
.odr_start_value = AD7177_ODR_START_VALUE,
@@ -532,6 +543,7 @@ static int ad7173_append_status(struct ad_sigma_delta *sd, bool append)
unsigned int interface_mode = st->interface_mode;
int ret;
+ interface_mode &= ~AD7173_INTERFACE_DATA_STAT;
interface_mode |= AD7173_INTERFACE_DATA_STAT_EN(append);
ret = ad_sd_write_reg(&st->sd, AD7173_REG_INTERFACE_MODE, 2, interface_mode);
if (ret)
@@ -705,7 +717,7 @@ static int ad7173_write_raw(struct iio_dev *indio_dev,
{
struct ad7173_state *st = iio_priv(indio_dev);
struct ad7173_channel_config *cfg;
- unsigned int freq, i, reg;
+ unsigned int freq, i;
int ret;
ret = iio_device_claim_direct_mode(indio_dev);
@@ -721,16 +733,7 @@ static int ad7173_write_raw(struct iio_dev *indio_dev,
cfg = &st->channels[chan->address].cfg;
cfg->odr = i;
-
- if (!cfg->live)
- break;
-
- ret = ad_sd_read_reg(&st->sd, AD7173_REG_FILTER(cfg->cfg_slot), 2, &reg);
- if (ret)
- break;
- reg &= ~AD7173_FILTER_ODR0_MASK;
- reg |= FIELD_PREP(AD7173_FILTER_ODR0_MASK, i);
- ret = ad_sd_write_reg(&st->sd, AD7173_REG_FILTER(cfg->cfg_slot), 2, reg);
+ cfg->live = false;
break;
default:
@@ -792,8 +795,7 @@ static const struct iio_chan_spec ad7173_channel_template = {
.type = IIO_VOLTAGE,
.indexed = 1,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
- BIT(IIO_CHAN_INFO_SCALE),
- .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_SAMP_FREQ),
.scan_type = {
.sign = 'u',
.realbits = 24,
@@ -804,12 +806,11 @@ static const struct iio_chan_spec ad7173_channel_template = {
static const struct iio_chan_spec ad7173_temp_iio_channel_template = {
.type = IIO_TEMP,
- .indexed = 1,
.channel = AD7173_AIN_TEMP_POS,
.channel2 = AD7173_AIN_TEMP_NEG,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
- BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_OFFSET),
- .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_OFFSET) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ),
.scan_type = {
.sign = 'u',
.realbits = 24,
@@ -932,7 +933,7 @@ static int ad7173_fw_parse_channel_config(struct iio_dev *indio_dev)
AD7173_CH_ADDRESS(chan_arr[chan_index].channel,
chan_arr[chan_index].channel2);
chan_st_priv->cfg.bipolar = false;
- chan_st_priv->cfg.input_buf = true;
+ chan_st_priv->cfg.input_buf = st->info->has_input_buf;
chan_st_priv->cfg.ref_sel = AD7173_SETUP_REF_SEL_INT_REF;
st->adc_mode |= AD7173_ADC_MODE_REF_EN;
@@ -989,7 +990,7 @@ static int ad7173_fw_parse_channel_config(struct iio_dev *indio_dev)
chan_st_priv->ain = AD7173_CH_ADDRESS(ain[0], ain[1]);
chan_st_priv->chan_reg = chan_index;
- chan_st_priv->cfg.input_buf = true;
+ chan_st_priv->cfg.input_buf = st->info->has_input_buf;
chan_st_priv->cfg.odr = 0;
chan_st_priv->cfg.bipolar = fwnode_property_read_bool(child, "bipolar");
diff --git a/drivers/iio/adc/ad7266.c b/drivers/iio/adc/ad7266.c
index 353a97f9c086..13ea8a1073d2 100644
--- a/drivers/iio/adc/ad7266.c
+++ b/drivers/iio/adc/ad7266.c
@@ -157,6 +157,8 @@ static int ad7266_read_raw(struct iio_dev *indio_dev,
ret = ad7266_read_single(st, val, chan->address);
iio_device_release_direct_mode(indio_dev);
+ if (ret < 0)
+ return ret;
*val = (*val >> 2) & 0xfff;
if (chan->scan_type.sign == 's')
*val = sign_extend32(*val,
diff --git a/drivers/iio/adc/ad9467.c b/drivers/iio/adc/ad9467.c
index e85b763b9ffc..8f5b9c3f6e3d 100644
--- a/drivers/iio/adc/ad9467.c
+++ b/drivers/iio/adc/ad9467.c
@@ -243,11 +243,11 @@ static void __ad9467_get_scale(struct ad9467_state *st, int index,
}
static const struct iio_chan_spec ad9434_channels[] = {
- AD9467_CHAN(0, 0, 12, 'S'),
+ AD9467_CHAN(0, 0, 12, 's'),
};
static const struct iio_chan_spec ad9467_channels[] = {
- AD9467_CHAN(0, 0, 16, 'S'),
+ AD9467_CHAN(0, 0, 16, 's'),
};
static const struct ad9467_chip_info ad9467_chip_tbl = {
diff --git a/drivers/iio/adc/xilinx-ams.c b/drivers/iio/adc/xilinx-ams.c
index f0b71a1220e0..f52abf759260 100644
--- a/drivers/iio/adc/xilinx-ams.c
+++ b/drivers/iio/adc/xilinx-ams.c
@@ -414,8 +414,12 @@ static void ams_enable_channel_sequence(struct iio_dev *indio_dev)
/* Run calibration of PS & PL as part of the sequence */
scan_mask = BIT(0) | BIT(AMS_PS_SEQ_MAX);
- for (i = 0; i < indio_dev->num_channels; i++)
- scan_mask |= BIT_ULL(indio_dev->channels[i].scan_index);
+ for (i = 0; i < indio_dev->num_channels; i++) {
+ const struct iio_chan_spec *chan = &indio_dev->channels[i];
+
+ if (chan->scan_index < AMS_CTRL_SEQ_BASE)
+ scan_mask |= BIT_ULL(chan->scan_index);
+ }
if (ams->ps_base) {
/* put sysmon in a soft reset to change the sequence */
diff --git a/drivers/iio/chemical/bme680.h b/drivers/iio/chemical/bme680.h
index 4edc5d21cb9f..f959252a4fe6 100644
--- a/drivers/iio/chemical/bme680.h
+++ b/drivers/iio/chemical/bme680.h
@@ -54,7 +54,9 @@
#define BME680_NB_CONV_MASK GENMASK(3, 0)
#define BME680_REG_MEAS_STAT_0 0x1D
+#define BME680_NEW_DATA_BIT BIT(7)
#define BME680_GAS_MEAS_BIT BIT(6)
+#define BME680_MEAS_BIT BIT(5)
/* Calibration Parameters */
#define BME680_T2_LSB_REG 0x8A
diff --git a/drivers/iio/chemical/bme680_core.c b/drivers/iio/chemical/bme680_core.c
index ef5e0e46fd34..500f56834b01 100644
--- a/drivers/iio/chemical/bme680_core.c
+++ b/drivers/iio/chemical/bme680_core.c
@@ -10,6 +10,7 @@
*/
#include <linux/acpi.h>
#include <linux/bitfield.h>
+#include <linux/delay.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/log2.h>
@@ -38,7 +39,7 @@ struct bme680_calib {
s8 par_h3;
s8 par_h4;
s8 par_h5;
- s8 par_h6;
+ u8 par_h6;
s8 par_h7;
s8 par_gh1;
s16 par_gh2;
@@ -342,10 +343,10 @@ static s16 bme680_compensate_temp(struct bme680_data *data,
if (!calib->par_t2)
bme680_read_calib(data, calib);
- var1 = (adc_temp >> 3) - (calib->par_t1 << 1);
+ var1 = (adc_temp >> 3) - ((s32)calib->par_t1 << 1);
var2 = (var1 * calib->par_t2) >> 11;
var3 = ((var1 >> 1) * (var1 >> 1)) >> 12;
- var3 = (var3 * (calib->par_t3 << 4)) >> 14;
+ var3 = (var3 * ((s32)calib->par_t3 << 4)) >> 14;
data->t_fine = var2 + var3;
calc_temp = (data->t_fine * 5 + 128) >> 8;
@@ -368,9 +369,9 @@ static u32 bme680_compensate_press(struct bme680_data *data,
var1 = (data->t_fine >> 1) - 64000;
var2 = ((((var1 >> 2) * (var1 >> 2)) >> 11) * calib->par_p6) >> 2;
var2 = var2 + (var1 * calib->par_p5 << 1);
- var2 = (var2 >> 2) + (calib->par_p4 << 16);
+ var2 = (var2 >> 2) + ((s32)calib->par_p4 << 16);
var1 = (((((var1 >> 2) * (var1 >> 2)) >> 13) *
- (calib->par_p3 << 5)) >> 3) +
+ ((s32)calib->par_p3 << 5)) >> 3) +
((calib->par_p2 * var1) >> 1);
var1 = var1 >> 18;
var1 = ((32768 + var1) * calib->par_p1) >> 15;
@@ -388,7 +389,7 @@ static u32 bme680_compensate_press(struct bme680_data *data,
var3 = ((press_comp >> 8) * (press_comp >> 8) *
(press_comp >> 8) * calib->par_p10) >> 17;
- press_comp += (var1 + var2 + var3 + (calib->par_p7 << 7)) >> 4;
+ press_comp += (var1 + var2 + var3 + ((s32)calib->par_p7 << 7)) >> 4;
return press_comp;
}
@@ -414,7 +415,7 @@ static u32 bme680_compensate_humid(struct bme680_data *data,
(((temp_scaled * ((temp_scaled * calib->par_h5) / 100))
>> 6) / 100) + (1 << 14))) >> 10;
var3 = var1 * var2;
- var4 = calib->par_h6 << 7;
+ var4 = (s32)calib->par_h6 << 7;
var4 = (var4 + ((temp_scaled * calib->par_h7) / 100)) >> 4;
var5 = ((var3 >> 14) * (var3 >> 14)) >> 10;
var6 = (var4 * var5) >> 1;
@@ -532,6 +533,43 @@ static u8 bme680_oversampling_to_reg(u8 val)
return ilog2(val) + 1;
}
+/*
+ * Taken from Bosch BME680 API:
+ * https://github.com/boschsensortec/BME68x_SensorAPI/blob/v4.4.8/bme68x.c#L490
+ */
+static int bme680_wait_for_eoc(struct bme680_data *data)
+{
+ struct device *dev = regmap_get_device(data->regmap);
+ unsigned int check;
+ int ret;
+ /*
+ * (Sum of oversampling ratios * time per oversampling) +
+ * TPH measurement + gas measurement + wait transition from forced mode
+ * + heater duration
+ */
+ int wait_eoc_us = ((data->oversampling_temp + data->oversampling_press +
+ data->oversampling_humid) * 1936) + (477 * 4) +
+ (477 * 5) + 1000 + (data->heater_dur * 1000);
+
+ usleep_range(wait_eoc_us, wait_eoc_us + 100);
+
+ ret = regmap_read(data->regmap, BME680_REG_MEAS_STAT_0, &check);
+ if (ret) {
+ dev_err(dev, "failed to read measurement status register.\n");
+ return ret;
+ }
+ if (check & BME680_MEAS_BIT) {
+ dev_err(dev, "Device measurement cycle incomplete.\n");
+ return -EBUSY;
+ }
+ if (!(check & BME680_NEW_DATA_BIT)) {
+ dev_err(dev, "No new data available from the device.\n");
+ return -ENODATA;
+ }
+
+ return 0;
+}
+
static int bme680_chip_config(struct bme680_data *data)
{
struct device *dev = regmap_get_device(data->regmap);
@@ -622,6 +660,10 @@ static int bme680_read_temp(struct bme680_data *data, int *val)
if (ret < 0)
return ret;
+ ret = bme680_wait_for_eoc(data);
+ if (ret)
+ return ret;
+
ret = regmap_bulk_read(data->regmap, BME680_REG_TEMP_MSB,
&tmp, 3);
if (ret < 0) {
@@ -678,7 +720,7 @@ static int bme680_read_press(struct bme680_data *data,
}
*val = bme680_compensate_press(data, adc_press);
- *val2 = 100;
+ *val2 = 1000;
return IIO_VAL_FRACTIONAL;
}
@@ -738,6 +780,10 @@ static int bme680_read_gas(struct bme680_data *data,
if (ret < 0)
return ret;
+ ret = bme680_wait_for_eoc(data);
+ if (ret)
+ return ret;
+
ret = regmap_read(data->regmap, BME680_REG_MEAS_STAT_0, &check);
if (check & BME680_GAS_MEAS_BIT) {
dev_err(dev, "gas measurement incomplete\n");
diff --git a/drivers/iio/common/inv_sensors/inv_sensors_timestamp.c b/drivers/iio/common/inv_sensors/inv_sensors_timestamp.c
index fa205f17bd90..f44458c380d9 100644
--- a/drivers/iio/common/inv_sensors/inv_sensors_timestamp.c
+++ b/drivers/iio/common/inv_sensors/inv_sensors_timestamp.c
@@ -60,11 +60,15 @@ EXPORT_SYMBOL_NS_GPL(inv_sensors_timestamp_init, IIO_INV_SENSORS_TIMESTAMP);
int inv_sensors_timestamp_update_odr(struct inv_sensors_timestamp *ts,
uint32_t period, bool fifo)
{
+ uint32_t mult;
+
/* when FIFO is on, prevent odr change if one is already pending */
if (fifo && ts->new_mult != 0)
return -EAGAIN;
- ts->new_mult = period / ts->chip.clock_period;
+ mult = period / ts->chip.clock_period;
+ if (mult != ts->mult)
+ ts->new_mult = mult;
return 0;
}
diff --git a/drivers/iio/dac/Kconfig b/drivers/iio/dac/Kconfig
index 3c2bf620f00f..ee0d9798d8b4 100644
--- a/drivers/iio/dac/Kconfig
+++ b/drivers/iio/dac/Kconfig
@@ -133,7 +133,7 @@ config AD5624R_SPI
config AD9739A
tristate "Analog Devices AD9739A RF DAC spi driver"
- depends on SPI || COMPILE_TEST
+ depends on SPI
select REGMAP_SPI
select IIO_BACKEND
help
diff --git a/drivers/iio/dac/ad5592r-base.c b/drivers/iio/dac/ad5592r-base.c
index 076bc9ecfb49..4763402dbcd6 100644
--- a/drivers/iio/dac/ad5592r-base.c
+++ b/drivers/iio/dac/ad5592r-base.c
@@ -415,7 +415,7 @@ static int ad5592r_read_raw(struct iio_dev *iio_dev,
s64 tmp = *val * (3767897513LL / 25LL);
*val = div_s64_rem(tmp, 1000000000LL, val2);
- return IIO_VAL_INT_PLUS_MICRO;
+ return IIO_VAL_INT_PLUS_NANO;
}
mutex_lock(&st->lock);
diff --git a/drivers/iio/humidity/hdc3020.c b/drivers/iio/humidity/hdc3020.c
index cdc4789213ba..a82dcc3da421 100644
--- a/drivers/iio/humidity/hdc3020.c
+++ b/drivers/iio/humidity/hdc3020.c
@@ -19,6 +19,7 @@
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/interrupt.h>
+#include <linux/math64.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/pm.h>
@@ -66,8 +67,10 @@
#define HDC3020_CRC8_POLYNOMIAL 0x31
-#define HDC3020_MIN_TEMP -40
-#define HDC3020_MAX_TEMP 125
+#define HDC3020_MIN_TEMP_MICRO -39872968
+#define HDC3020_MAX_TEMP_MICRO 124875639
+#define HDC3020_MAX_TEMP_HYST_MICRO 164748607
+#define HDC3020_MAX_HUM_MICRO 99220264
struct hdc3020_data {
struct i2c_client *client;
@@ -368,6 +371,105 @@ static int hdc3020_write_raw(struct iio_dev *indio_dev,
return -EINVAL;
}
+static int hdc3020_thresh_get_temp(u16 thresh)
+{
+ int temp;
+
+ /*
+ * Get the temperature threshold from 9 LSBs, shift them to get
+ * the truncated temperature threshold representation and
+ * calculate the threshold according to the formula in the
+ * datasheet. Result is degree celsius scaled by 65535.
+ */
+ temp = FIELD_GET(HDC3020_THRESH_TEMP_MASK, thresh) <<
+ HDC3020_THRESH_TEMP_TRUNC_SHIFT;
+
+ return -2949075 + (175 * temp);
+}
+
+static int hdc3020_thresh_get_hum(u16 thresh)
+{
+ int hum;
+
+ /*
+ * Get the humidity threshold from 7 MSBs, shift them to get the
+ * truncated humidity threshold representation and calculate the
+ * threshold according to the formula in the datasheet. Result is
+ * percent scaled by 65535.
+ */
+ hum = FIELD_GET(HDC3020_THRESH_HUM_MASK, thresh) <<
+ HDC3020_THRESH_HUM_TRUNC_SHIFT;
+
+ return hum * 100;
+}
+
+static u16 hdc3020_thresh_set_temp(int s_temp, u16 curr_thresh)
+{
+ u64 temp;
+ u16 thresh;
+
+ /*
+ * Calculate temperature threshold, shift it down to get the
+ * truncated threshold representation in the 9LSBs while keeping
+ * the current humidity threshold in the 7 MSBs.
+ */
+ temp = (u64)(s_temp + 45000000) * 65535ULL;
+ temp = div_u64(temp, 1000000 * 175) >> HDC3020_THRESH_TEMP_TRUNC_SHIFT;
+ thresh = FIELD_PREP(HDC3020_THRESH_TEMP_MASK, temp);
+ thresh |= (FIELD_GET(HDC3020_THRESH_HUM_MASK, curr_thresh) <<
+ HDC3020_THRESH_HUM_TRUNC_SHIFT);
+
+ return thresh;
+}
+
+static u16 hdc3020_thresh_set_hum(int s_hum, u16 curr_thresh)
+{
+ u64 hum;
+ u16 thresh;
+
+ /*
+ * Calculate humidity threshold, shift it down and up to get the
+ * truncated threshold representation in the 7MSBs while keeping
+ * the current temperature threshold in the 9 LSBs.
+ */
+ hum = (u64)(s_hum) * 65535ULL;
+ hum = div_u64(hum, 1000000 * 100) >> HDC3020_THRESH_HUM_TRUNC_SHIFT;
+ thresh = FIELD_PREP(HDC3020_THRESH_HUM_MASK, hum);
+ thresh |= FIELD_GET(HDC3020_THRESH_TEMP_MASK, curr_thresh);
+
+ return thresh;
+}
+
+static
+int hdc3020_thresh_clr(s64 s_thresh, s64 s_hyst, enum iio_event_direction dir)
+{
+ s64 s_clr;
+
+ /*
+ * Include directions when calculation the clear value,
+ * since hysteresis is unsigned by definition and the
+ * clear value is an absolute value which is signed.
+ */
+ if (dir == IIO_EV_DIR_RISING)
+ s_clr = s_thresh - s_hyst;
+ else
+ s_clr = s_thresh + s_hyst;
+
+ /* Divide by 65535 to get units of micro */
+ return div_s64(s_clr, 65535);
+}
+
+static int _hdc3020_write_thresh(struct hdc3020_data *data, u16 reg, u16 val)
+{
+ u8 buf[5];
+
+ put_unaligned_be16(reg, buf);
+ put_unaligned_be16(val, buf + 2);
+ buf[4] = crc8(hdc3020_crc8_table, buf + 2, 2, CRC8_INIT_VALUE);
+
+ return hdc3020_write_bytes(data, buf, 5);
+}
+
static int hdc3020_write_thresh(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan,
enum iio_event_type type,
@@ -376,67 +478,126 @@ static int hdc3020_write_thresh(struct iio_dev *indio_dev,
int val, int val2)
{
struct hdc3020_data *data = iio_priv(indio_dev);
- u8 buf[5];
- u64 tmp;
- u16 reg;
- int ret;
-
- /* Supported temperature range is from –40 to 125 degree celsius */
- if (val < HDC3020_MIN_TEMP || val > HDC3020_MAX_TEMP)
- return -EINVAL;
-
- /* Select threshold register */
- if (info == IIO_EV_INFO_VALUE) {
- if (dir == IIO_EV_DIR_RISING)
- reg = HDC3020_S_T_RH_THRESH_HIGH;
- else
- reg = HDC3020_S_T_RH_THRESH_LOW;
+ u16 reg, reg_val, reg_thresh_rd, reg_clr_rd, reg_thresh_wr, reg_clr_wr;
+ s64 s_thresh, s_hyst, s_clr;
+ int s_val, thresh, clr, ret;
+
+ /* Select threshold registers */
+ if (dir == IIO_EV_DIR_RISING) {
+ reg_thresh_rd = HDC3020_R_T_RH_THRESH_HIGH;
+ reg_thresh_wr = HDC3020_S_T_RH_THRESH_HIGH;
+ reg_clr_rd = HDC3020_R_T_RH_THRESH_HIGH_CLR;
+ reg_clr_wr = HDC3020_S_T_RH_THRESH_HIGH_CLR;
} else {
- if (dir == IIO_EV_DIR_RISING)
- reg = HDC3020_S_T_RH_THRESH_HIGH_CLR;
- else
- reg = HDC3020_S_T_RH_THRESH_LOW_CLR;
+ reg_thresh_rd = HDC3020_R_T_RH_THRESH_LOW;
+ reg_thresh_wr = HDC3020_S_T_RH_THRESH_LOW;
+ reg_clr_rd = HDC3020_R_T_RH_THRESH_LOW_CLR;
+ reg_clr_wr = HDC3020_S_T_RH_THRESH_LOW_CLR;
}
guard(mutex)(&data->lock);
- ret = hdc3020_read_be16(data, reg);
+ ret = hdc3020_read_be16(data, reg_thresh_rd);
+ if (ret < 0)
+ return ret;
+
+ thresh = ret;
+ ret = hdc3020_read_be16(data, reg_clr_rd);
if (ret < 0)
return ret;
+ clr = ret;
+ /* Scale value to include decimal part into calculations */
+ s_val = (val < 0) ? (val * 1000000 - val2) : (val * 1000000 + val2);
switch (chan->type) {
case IIO_TEMP:
- /*
- * Calculate temperature threshold, shift it down to get the
- * truncated threshold representation in the 9LSBs while keeping
- * the current humidity threshold in the 7 MSBs.
- */
- tmp = ((u64)(((val + 45) * MICRO) + val2)) * 65535ULL;
- tmp = div_u64(tmp, MICRO * 175);
- val = tmp >> HDC3020_THRESH_TEMP_TRUNC_SHIFT;
- val = FIELD_PREP(HDC3020_THRESH_TEMP_MASK, val);
- val |= (FIELD_GET(HDC3020_THRESH_HUM_MASK, ret) <<
- HDC3020_THRESH_HUM_TRUNC_SHIFT);
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ s_val = max(s_val, HDC3020_MIN_TEMP_MICRO);
+ s_val = min(s_val, HDC3020_MAX_TEMP_MICRO);
+ reg = reg_thresh_wr;
+ reg_val = hdc3020_thresh_set_temp(s_val, thresh);
+ ret = _hdc3020_write_thresh(data, reg, reg_val);
+ if (ret < 0)
+ return ret;
+
+ /* Calculate old hysteresis */
+ s_thresh = (s64)hdc3020_thresh_get_temp(thresh) * 1000000;
+ s_clr = (s64)hdc3020_thresh_get_temp(clr) * 1000000;
+ s_hyst = div_s64(abs(s_thresh - s_clr), 65535);
+ /* Set new threshold */
+ thresh = reg_val;
+ /* Set old hysteresis */
+ s_val = s_hyst;
+ fallthrough;
+ case IIO_EV_INFO_HYSTERESIS:
+ /*
+ * Function hdc3020_thresh_get_temp returns temperature
+ * in degree celsius scaled by 65535. Scale by 1000000
+ * to be able to subtract scaled hysteresis value.
+ */
+ s_thresh = (s64)hdc3020_thresh_get_temp(thresh) * 1000000;
+ /*
+ * Units of s_val are in micro degree celsius, scale by
+ * 65535 to get same units as s_thresh.
+ */
+ s_val = min(abs(s_val), HDC3020_MAX_TEMP_HYST_MICRO);
+ s_hyst = (s64)s_val * 65535;
+ s_clr = hdc3020_thresh_clr(s_thresh, s_hyst, dir);
+ s_clr = max(s_clr, HDC3020_MIN_TEMP_MICRO);
+ s_clr = min(s_clr, HDC3020_MAX_TEMP_MICRO);
+ reg = reg_clr_wr;
+ reg_val = hdc3020_thresh_set_temp(s_clr, clr);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
break;
case IIO_HUMIDITYRELATIVE:
- /*
- * Calculate humidity threshold, shift it down and up to get the
- * truncated threshold representation in the 7MSBs while keeping
- * the current temperature threshold in the 9 LSBs.
- */
- tmp = ((u64)((val * MICRO) + val2)) * 65535ULL;
- tmp = div_u64(tmp, MICRO * 100);
- val = tmp >> HDC3020_THRESH_HUM_TRUNC_SHIFT;
- val = FIELD_PREP(HDC3020_THRESH_HUM_MASK, val);
- val |= FIELD_GET(HDC3020_THRESH_TEMP_MASK, ret);
+ s_val = (s_val < 0) ? 0 : min(s_val, HDC3020_MAX_HUM_MICRO);
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ reg = reg_thresh_wr;
+ reg_val = hdc3020_thresh_set_hum(s_val, thresh);
+ ret = _hdc3020_write_thresh(data, reg, reg_val);
+ if (ret < 0)
+ return ret;
+
+ /* Calculate old hysteresis */
+ s_thresh = (s64)hdc3020_thresh_get_hum(thresh) * 1000000;
+ s_clr = (s64)hdc3020_thresh_get_hum(clr) * 1000000;
+ s_hyst = div_s64(abs(s_thresh - s_clr), 65535);
+ /* Set new threshold */
+ thresh = reg_val;
+ /* Try to set old hysteresis */
+ s_val = min(abs(s_hyst), HDC3020_MAX_HUM_MICRO);
+ fallthrough;
+ case IIO_EV_INFO_HYSTERESIS:
+ /*
+ * Function hdc3020_thresh_get_hum returns relative
+ * humidity in percent scaled by 65535. Scale by 1000000
+ * to be able to subtract scaled hysteresis value.
+ */
+ s_thresh = (s64)hdc3020_thresh_get_hum(thresh) * 1000000;
+ /*
+ * Units of s_val are in micro percent, scale by 65535
+ * to get same units as s_thresh.
+ */
+ s_hyst = (s64)s_val * 65535;
+ s_clr = hdc3020_thresh_clr(s_thresh, s_hyst, dir);
+ s_clr = max(s_clr, 0);
+ s_clr = min(s_clr, HDC3020_MAX_HUM_MICRO);
+ reg = reg_clr_wr;
+ reg_val = hdc3020_thresh_set_hum(s_clr, clr);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
break;
default:
return -EOPNOTSUPP;
}
- put_unaligned_be16(reg, buf);
- put_unaligned_be16(val, buf + 2);
- buf[4] = crc8(hdc3020_crc8_table, buf + 2, 2, CRC8_INIT_VALUE);
- return hdc3020_write_bytes(data, buf, 5);
+ return _hdc3020_write_thresh(data, reg, reg_val);
}
static int hdc3020_read_thresh(struct iio_dev *indio_dev,
@@ -447,48 +608,60 @@ static int hdc3020_read_thresh(struct iio_dev *indio_dev,
int *val, int *val2)
{
struct hdc3020_data *data = iio_priv(indio_dev);
- u16 reg;
- int ret;
+ u16 reg_thresh, reg_clr;
+ int thresh, clr, ret;
- /* Select threshold register */
- if (info == IIO_EV_INFO_VALUE) {
- if (dir == IIO_EV_DIR_RISING)
- reg = HDC3020_R_T_RH_THRESH_HIGH;
- else
- reg = HDC3020_R_T_RH_THRESH_LOW;
+ /* Select threshold registers */
+ if (dir == IIO_EV_DIR_RISING) {
+ reg_thresh = HDC3020_R_T_RH_THRESH_HIGH;
+ reg_clr = HDC3020_R_T_RH_THRESH_HIGH_CLR;
} else {
- if (dir == IIO_EV_DIR_RISING)
- reg = HDC3020_R_T_RH_THRESH_HIGH_CLR;
- else
- reg = HDC3020_R_T_RH_THRESH_LOW_CLR;
+ reg_thresh = HDC3020_R_T_RH_THRESH_LOW;
+ reg_clr = HDC3020_R_T_RH_THRESH_LOW_CLR;
}
guard(mutex)(&data->lock);
- ret = hdc3020_read_be16(data, reg);
+ ret = hdc3020_read_be16(data, reg_thresh);
if (ret < 0)
return ret;
switch (chan->type) {
case IIO_TEMP:
- /*
- * Get the temperature threshold from 9 LSBs, shift them to get
- * the truncated temperature threshold representation and
- * calculate the threshold according to the formula in the
- * datasheet.
- */
- *val = FIELD_GET(HDC3020_THRESH_TEMP_MASK, ret);
- *val = *val << HDC3020_THRESH_TEMP_TRUNC_SHIFT;
- *val = -2949075 + (175 * (*val));
+ thresh = hdc3020_thresh_get_temp(ret);
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ *val = thresh;
+ break;
+ case IIO_EV_INFO_HYSTERESIS:
+ ret = hdc3020_read_be16(data, reg_clr);
+ if (ret < 0)
+ return ret;
+
+ clr = hdc3020_thresh_get_temp(ret);
+ *val = abs(thresh - clr);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
*val2 = 65535;
return IIO_VAL_FRACTIONAL;
case IIO_HUMIDITYRELATIVE:
- /*
- * Get the humidity threshold from 7 MSBs, shift them to get the
- * truncated humidity threshold representation and calculate the
- * threshold according to the formula in the datasheet.
- */
- *val = FIELD_GET(HDC3020_THRESH_HUM_MASK, ret);
- *val = (*val << HDC3020_THRESH_HUM_TRUNC_SHIFT) * 100;
+ thresh = hdc3020_thresh_get_hum(ret);
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ *val = thresh;
+ break;
+ case IIO_EV_INFO_HYSTERESIS:
+ ret = hdc3020_read_be16(data, reg_clr);
+ if (ret < 0)
+ return ret;
+
+ clr = hdc3020_thresh_get_hum(ret);
+ *val = abs(thresh - clr);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
*val2 = 65535;
return IIO_VAL_FRACTIONAL;
default:
diff --git a/drivers/iio/imu/bmi323/bmi323_core.c b/drivers/iio/imu/bmi323/bmi323_core.c
index 5d42ab9b176a..67d74a1a1b26 100644
--- a/drivers/iio/imu/bmi323/bmi323_core.c
+++ b/drivers/iio/imu/bmi323/bmi323_core.c
@@ -1391,7 +1391,7 @@ static irqreturn_t bmi323_trigger_handler(int irq, void *p)
&data->buffer.channels,
ARRAY_SIZE(data->buffer.channels));
if (ret)
- return IRQ_NONE;
+ goto out;
} else {
for_each_set_bit(bit, indio_dev->active_scan_mask,
BMI323_CHAN_MAX) {
@@ -1400,13 +1400,14 @@ static irqreturn_t bmi323_trigger_handler(int irq, void *p)
&data->buffer.channels[index++],
BMI323_BYTES_PER_SAMPLE);
if (ret)
- return IRQ_NONE;
+ goto out;
}
}
iio_push_to_buffers_with_timestamp(indio_dev, &data->buffer,
iio_get_time_ns(indio_dev));
+out:
iio_trigger_notify_done(indio_dev->trig);
return IRQ_HANDLED;
diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c
index 83d8504ebfff..4b2566693614 100644
--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c
+++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c
@@ -130,10 +130,6 @@ static int inv_icm42600_accel_update_scan_mode(struct iio_dev *indio_dev,
/* update data FIFO write */
inv_sensors_timestamp_apply_odr(ts, 0, 0, 0);
ret = inv_icm42600_buffer_set_fifo_en(st, fifo_en | st->fifo.en);
- if (ret)
- goto out_unlock;
-
- ret = inv_icm42600_buffer_update_watermark(st);
out_unlock:
mutex_unlock(&st->lock);
diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c
index 63b85ec88c13..a8cf74c84c3c 100644
--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c
+++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c
@@ -222,10 +222,15 @@ int inv_icm42600_buffer_update_watermark(struct inv_icm42600_state *st)
latency_accel = period_accel * wm_accel;
/* 0 value for watermark means that the sensor is turned off */
+ if (wm_gyro == 0 && wm_accel == 0)
+ return 0;
+
if (latency_gyro == 0) {
watermark = wm_accel;
+ st->fifo.watermark.eff_accel = wm_accel;
} else if (latency_accel == 0) {
watermark = wm_gyro;
+ st->fifo.watermark.eff_gyro = wm_gyro;
} else {
/* compute the smallest latency that is a multiple of both */
if (latency_gyro <= latency_accel)
@@ -241,6 +246,13 @@ int inv_icm42600_buffer_update_watermark(struct inv_icm42600_state *st)
watermark = latency / period;
if (watermark < 1)
watermark = 1;
+ /* update effective watermark */
+ st->fifo.watermark.eff_gyro = latency / period_gyro;
+ if (st->fifo.watermark.eff_gyro < 1)
+ st->fifo.watermark.eff_gyro = 1;
+ st->fifo.watermark.eff_accel = latency / period_accel;
+ if (st->fifo.watermark.eff_accel < 1)
+ st->fifo.watermark.eff_accel = 1;
}
/* compute watermark value in bytes */
@@ -514,7 +526,7 @@ int inv_icm42600_buffer_fifo_parse(struct inv_icm42600_state *st)
/* handle gyroscope timestamp and FIFO data parsing */
if (st->fifo.nb.gyro > 0) {
ts = &gyro_st->ts;
- inv_sensors_timestamp_interrupt(ts, st->fifo.nb.gyro,
+ inv_sensors_timestamp_interrupt(ts, st->fifo.watermark.eff_gyro,
st->timestamp.gyro);
ret = inv_icm42600_gyro_parse_fifo(st->indio_gyro);
if (ret)
@@ -524,7 +536,7 @@ int inv_icm42600_buffer_fifo_parse(struct inv_icm42600_state *st)
/* handle accelerometer timestamp and FIFO data parsing */
if (st->fifo.nb.accel > 0) {
ts = &accel_st->ts;
- inv_sensors_timestamp_interrupt(ts, st->fifo.nb.accel,
+ inv_sensors_timestamp_interrupt(ts, st->fifo.watermark.eff_accel,
st->timestamp.accel);
ret = inv_icm42600_accel_parse_fifo(st->indio_accel);
if (ret)
@@ -577,6 +589,9 @@ int inv_icm42600_buffer_init(struct inv_icm42600_state *st)
unsigned int val;
int ret;
+ st->fifo.watermark.eff_gyro = 1;
+ st->fifo.watermark.eff_accel = 1;
+
/*
* Default FIFO configuration (bits 7 to 5)
* - use invalid value
diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.h b/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.h
index 8b85ee333bf8..f6c85daf42b0 100644
--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.h
+++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.h
@@ -32,6 +32,8 @@ struct inv_icm42600_fifo {
struct {
unsigned int gyro;
unsigned int accel;
+ unsigned int eff_gyro;
+ unsigned int eff_accel;
} watermark;
size_t count;
struct {
diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
index 96116a68ab29..62fdae530334 100644
--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
+++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
@@ -537,6 +537,7 @@ static int inv_icm42600_irq_init(struct inv_icm42600_state *st, int irq,
if (ret)
return ret;
+ irq_type |= IRQF_ONESHOT;
return devm_request_threaded_irq(dev, irq, inv_icm42600_irq_timestamp,
inv_icm42600_irq_handler, irq_type,
"inv_icm42600", st);
diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c
index e6f8de80128c..938af5b640b0 100644
--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c
+++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c
@@ -130,10 +130,6 @@ static int inv_icm42600_gyro_update_scan_mode(struct iio_dev *indio_dev,
/* update data FIFO write */
inv_sensors_timestamp_apply_odr(ts, 0, 0, 0);
ret = inv_icm42600_buffer_set_fifo_en(st, fifo_en | st->fifo.en);
- if (ret)
- goto out_unlock;
-
- ret = inv_icm42600_buffer_update_watermark(st);
out_unlock:
mutex_unlock(&st->lock);
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
index 0dc0f22a5582..3d3b27f28c9d 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
@@ -100,8 +100,8 @@ irqreturn_t inv_mpu6050_read_fifo(int irq, void *p)
goto end_session;
/* Each FIFO data contains all sensors, so same number for FIFO and sensor data */
fifo_period = NSEC_PER_SEC / INV_MPU6050_DIVIDER_TO_FIFO_RATE(st->chip_config.divider);
- inv_sensors_timestamp_interrupt(&st->timestamp, nb, pf->timestamp);
- inv_sensors_timestamp_apply_odr(&st->timestamp, fifo_period, nb, 0);
+ inv_sensors_timestamp_interrupt(&st->timestamp, 1, pf->timestamp);
+ inv_sensors_timestamp_apply_odr(&st->timestamp, fifo_period, 1, 0);
/* clear internal data buffer for avoiding kernel data leak */
memset(data, 0, sizeof(data));
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
index 1b603567ccc8..84273660ca2e 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
@@ -300,6 +300,7 @@ int inv_mpu6050_probe_trigger(struct iio_dev *indio_dev, int irq_type)
if (!st->trig)
return -ENOMEM;
+ irq_type |= IRQF_ONESHOT;
ret = devm_request_threaded_irq(&indio_dev->dev, st->irq,
&inv_mpu6050_interrupt_timestamp,
&inv_mpu6050_interrupt_handle,
diff --git a/drivers/iio/industrialio-trigger.c b/drivers/iio/industrialio-trigger.c
index 16de57846bd9..2e84776f4fbd 100644
--- a/drivers/iio/industrialio-trigger.c
+++ b/drivers/iio/industrialio-trigger.c
@@ -315,7 +315,7 @@ int iio_trigger_attach_poll_func(struct iio_trigger *trig,
* this is the case if the IIO device and the trigger device share the
* same parent device.
*/
- if (iio_validate_own_trigger(pf->indio_dev, trig))
+ if (!iio_validate_own_trigger(pf->indio_dev, trig))
trig->attached_own_device = true;
return ret;
diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c
index 52d773261828..485e6fc44a04 100644
--- a/drivers/iio/inkern.c
+++ b/drivers/iio/inkern.c
@@ -721,7 +721,7 @@ int iio_read_channel_processed_scale(struct iio_channel *chan, int *val,
return ret;
*val *= scale;
- return 0;
+ return ret;
} else {
ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_RAW);
if (ret < 0)
diff --git a/drivers/iio/light/apds9306.c b/drivers/iio/light/apds9306.c
index d6627b3e6000..66a063ea3db4 100644
--- a/drivers/iio/light/apds9306.c
+++ b/drivers/iio/light/apds9306.c
@@ -583,8 +583,8 @@ static int apds9306_intg_time_set(struct apds9306_data *data, int val2)
return ret;
intg_old = iio_gts_find_int_time_by_sel(&data->gts, intg_time_idx);
- if (ret < 0)
- return ret;
+ if (intg_old < 0)
+ return intg_old;
if (intg_old == val2)
return 0;
diff --git a/drivers/iio/pressure/bmp280-core.c b/drivers/iio/pressure/bmp280-core.c
index 09f53d987c7d..221fa2c552ae 100644
--- a/drivers/iio/pressure/bmp280-core.c
+++ b/drivers/iio/pressure/bmp280-core.c
@@ -1394,12 +1394,12 @@ static int bmp580_read_temp(struct bmp280_data *data, int *val, int *val2)
/*
* Temperature is returned in Celsius degrees in fractional
- * form down 2^16. We rescale by x1000 to return milli Celsius
- * to respect IIO ABI.
+ * form down 2^16. We rescale by x1000 to return millidegrees
+ * Celsius to respect IIO ABI.
*/
- *val = raw_temp * 1000;
- *val2 = 16;
- return IIO_VAL_FRACTIONAL_LOG2;
+ raw_temp = sign_extend32(raw_temp, 23);
+ *val = ((s64)raw_temp * 1000) / (1 << 16);
+ return IIO_VAL_INT;
}
static int bmp580_read_press(struct bmp280_data *data, int *val, int *val2)
diff --git a/drivers/iio/temperature/mlx90635.c b/drivers/iio/temperature/mlx90635.c
index 1f5c962c1818..f7f88498ba0e 100644
--- a/drivers/iio/temperature/mlx90635.c
+++ b/drivers/iio/temperature/mlx90635.c
@@ -947,9 +947,9 @@ static int mlx90635_probe(struct i2c_client *client)
"failed to allocate regmap\n");
regmap_ee = devm_regmap_init_i2c(client, &mlx90635_regmap_ee);
- if (IS_ERR(regmap))
- return dev_err_probe(&client->dev, PTR_ERR(regmap),
- "failed to allocate regmap\n");
+ if (IS_ERR(regmap_ee))
+ return dev_err_probe(&client->dev, PTR_ERR(regmap_ee),
+ "failed to allocate EEPROM regmap\n");
mlx90635 = iio_priv(indio_dev);
i2c_set_clientdata(client, indio_dev);
diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
index 9dca451ed522..6974922e5609 100644
--- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h
+++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
@@ -107,8 +107,6 @@ struct bnxt_re_gsi_context {
struct bnxt_re_sqp_entries *sqp_tbl;
};
-#define BNXT_RE_MIN_MSIX 2
-#define BNXT_RE_MAX_MSIX 9
#define BNXT_RE_AEQ_IDX 0
#define BNXT_RE_NQ_IDX 1
#define BNXT_RE_GEN_P5_MAX_VF 64
@@ -168,7 +166,7 @@ struct bnxt_re_dev {
struct bnxt_qplib_rcfw rcfw;
/* NQ */
- struct bnxt_qplib_nq nq[BNXT_RE_MAX_MSIX];
+ struct bnxt_qplib_nq nq[BNXT_MAX_ROCE_MSIX];
/* Device Resources */
struct bnxt_qplib_dev_attr dev_attr;
diff --git a/drivers/infiniband/hw/mana/mr.c b/drivers/infiniband/hw/mana/mr.c
index 4f13423ecdbd..887b09dd86e7 100644
--- a/drivers/infiniband/hw/mana/mr.c
+++ b/drivers/infiniband/hw/mana/mr.c
@@ -112,6 +112,7 @@ struct ib_mr *mana_ib_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 length,
"start 0x%llx, iova 0x%llx length 0x%llx access_flags 0x%x",
start, iova, length, access_flags);
+ access_flags &= ~IB_ACCESS_OPTIONAL;
if (access_flags & ~VALID_MR_FLAGS)
return ERR_PTR(-EINVAL);
diff --git a/drivers/infiniband/hw/mana/qp.c b/drivers/infiniband/hw/mana/qp.c
index ba13c5abf8ef..2d411a16a127 100644
--- a/drivers/infiniband/hw/mana/qp.c
+++ b/drivers/infiniband/hw/mana/qp.c
@@ -21,7 +21,7 @@ static int mana_ib_cfg_vport_steering(struct mana_ib_dev *dev,
gc = mdev_to_gc(dev);
- req_buf_size = struct_size(req, indir_tab, MANA_INDIRECT_TABLE_SIZE);
+ req_buf_size = struct_size(req, indir_tab, MANA_INDIRECT_TABLE_DEF_SIZE);
req = kzalloc(req_buf_size, GFP_KERNEL);
if (!req)
return -ENOMEM;
@@ -41,18 +41,18 @@ static int mana_ib_cfg_vport_steering(struct mana_ib_dev *dev,
if (log_ind_tbl_size)
req->rss_enable = true;
- req->num_indir_entries = MANA_INDIRECT_TABLE_SIZE;
+ req->num_indir_entries = MANA_INDIRECT_TABLE_DEF_SIZE;
req->indir_tab_offset = offsetof(struct mana_cfg_rx_steer_req_v2,
indir_tab);
req->update_indir_tab = true;
req->cqe_coalescing_enable = 1;
/* The ind table passed to the hardware must have
- * MANA_INDIRECT_TABLE_SIZE entries. Adjust the verb
+ * MANA_INDIRECT_TABLE_DEF_SIZE entries. Adjust the verb
* ind_table to MANA_INDIRECT_TABLE_SIZE if required
*/
ibdev_dbg(&dev->ib_dev, "ind table size %u\n", 1 << log_ind_tbl_size);
- for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) {
+ for (i = 0; i < MANA_INDIRECT_TABLE_DEF_SIZE; i++) {
req->indir_tab[i] = ind_table[i % (1 << log_ind_tbl_size)];
ibdev_dbg(&dev->ib_dev, "index %u handle 0x%llx\n", i,
req->indir_tab[i]);
@@ -137,7 +137,7 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
}
ind_tbl_size = 1 << ind_tbl->log_ind_tbl_size;
- if (ind_tbl_size > MANA_INDIRECT_TABLE_SIZE) {
+ if (ind_tbl_size > MANA_INDIRECT_TABLE_DEF_SIZE) {
ibdev_dbg(&mdev->ib_dev,
"Indirect table size %d exceeding limit\n",
ind_tbl_size);
diff --git a/drivers/infiniband/hw/mlx5/counters.c b/drivers/infiniband/hw/mlx5/counters.c
index 8300ce622835..4f6c1968a2ee 100644
--- a/drivers/infiniband/hw/mlx5/counters.c
+++ b/drivers/infiniband/hw/mlx5/counters.c
@@ -83,6 +83,8 @@ static const struct mlx5_ib_counter extended_err_cnts[] = {
INIT_Q_COUNTER(resp_remote_access_errors),
INIT_Q_COUNTER(resp_cqe_flush_error),
INIT_Q_COUNTER(req_cqe_flush_error),
+ INIT_Q_COUNTER(req_transport_retries_exceeded),
+ INIT_Q_COUNTER(req_rnr_retries_exceeded),
};
static const struct mlx5_ib_counter roce_accl_cnts[] = {
@@ -102,6 +104,8 @@ static const struct mlx5_ib_counter vport_extended_err_cnts[] = {
INIT_VPORT_Q_COUNTER(resp_remote_access_errors),
INIT_VPORT_Q_COUNTER(resp_cqe_flush_error),
INIT_VPORT_Q_COUNTER(req_cqe_flush_error),
+ INIT_VPORT_Q_COUNTER(req_transport_retries_exceeded),
+ INIT_VPORT_Q_COUNTER(req_rnr_retries_exceeded),
};
static const struct mlx5_ib_counter vport_roce_accl_cnts[] = {
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 2366c46eebc8..086de6a022f9 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -1810,7 +1810,7 @@ static int set_ucontext_resp(struct ib_ucontext *uctx,
}
resp->qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp);
- if (dev->wc_support)
+ if (mlx5_wc_support_get(dev->mdev))
resp->bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev,
log_bf_reg_size);
resp->cache_line_size = cache_line_size();
@@ -2337,7 +2337,7 @@ static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vm
switch (command) {
case MLX5_IB_MMAP_WC_PAGE:
case MLX5_IB_MMAP_ALLOC_WC:
- if (!dev->wc_support)
+ if (!mlx5_wc_support_get(dev->mdev))
return -EPERM;
fallthrough;
case MLX5_IB_MMAP_NC_PAGE:
@@ -3612,7 +3612,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_UAR_OBJ_ALLOC)(
alloc_type != MLX5_IB_UAPI_UAR_ALLOC_TYPE_NC)
return -EOPNOTSUPP;
- if (!to_mdev(c->ibucontext.device)->wc_support &&
+ if (!mlx5_wc_support_get(to_mdev(c->ibucontext.device)->mdev) &&
alloc_type == MLX5_IB_UAPI_UAR_ALLOC_TYPE_BF)
return -EOPNOTSUPP;
@@ -3759,25 +3759,13 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
spin_lock_init(&dev->dm.lock);
dev->dm.dev = mdev;
return 0;
-err:
- mlx5r_macsec_dealloc_gids(dev);
err_mp:
mlx5_ib_cleanup_multiport_master(dev);
+err:
+ mlx5r_macsec_dealloc_gids(dev);
return err;
}
-static int mlx5_ib_enable_driver(struct ib_device *dev)
-{
- struct mlx5_ib_dev *mdev = to_mdev(dev);
- int ret;
-
- ret = mlx5_ib_test_wc(mdev);
- mlx5_ib_dbg(mdev, "Write-Combining %s",
- mdev->wc_support ? "supported" : "not supported");
-
- return ret;
-}
-
static const struct ib_device_ops mlx5_ib_dev_ops = {
.owner = THIS_MODULE,
.driver_id = RDMA_DRIVER_MLX5,
@@ -3808,7 +3796,6 @@ static const struct ib_device_ops mlx5_ib_dev_ops = {
.drain_rq = mlx5_ib_drain_rq,
.drain_sq = mlx5_ib_drain_sq,
.device_group = &mlx5_attr_group,
- .enable_driver = mlx5_ib_enable_driver,
.get_dev_fw_str = get_dev_fw_str,
.get_dma_mr = mlx5_ib_get_dma_mr,
.get_link_layer = mlx5_ib_port_link_layer,
diff --git a/drivers/infiniband/hw/mlx5/mem.c b/drivers/infiniband/hw/mlx5/mem.c
index 5a22be14d958..af321f6ef7f5 100644
--- a/drivers/infiniband/hw/mlx5/mem.c
+++ b/drivers/infiniband/hw/mlx5/mem.c
@@ -30,10 +30,8 @@
* SOFTWARE.
*/
-#include <linux/io.h>
#include <rdma/ib_umem_odp.h>
#include "mlx5_ib.h"
-#include <linux/jiffies.h>
/*
* Fill in a physical address list. ib_umem_num_dma_blocks() entries will be
@@ -95,199 +93,3 @@ unsigned long __mlx5_umem_find_best_quantized_pgoff(
return 0;
return page_size;
}
-
-#define WR_ID_BF 0xBF
-#define WR_ID_END 0xBAD
-#define TEST_WC_NUM_WQES 255
-#define TEST_WC_POLLING_MAX_TIME_JIFFIES msecs_to_jiffies(100)
-static int post_send_nop(struct mlx5_ib_dev *dev, struct ib_qp *ibqp, u64 wr_id,
- bool signaled)
-{
- struct mlx5_ib_qp *qp = to_mqp(ibqp);
- struct mlx5_wqe_ctrl_seg *ctrl;
- struct mlx5_bf *bf = &qp->bf;
- __be32 mmio_wqe[16] = {};
- unsigned long flags;
- unsigned int idx;
-
- if (unlikely(dev->mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR))
- return -EIO;
-
- spin_lock_irqsave(&qp->sq.lock, flags);
-
- idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1);
- ctrl = mlx5_frag_buf_get_wqe(&qp->sq.fbc, idx);
-
- memset(ctrl, 0, sizeof(struct mlx5_wqe_ctrl_seg));
- ctrl->fm_ce_se = signaled ? MLX5_WQE_CTRL_CQ_UPDATE : 0;
- ctrl->opmod_idx_opcode =
- cpu_to_be32(((u32)(qp->sq.cur_post) << 8) | MLX5_OPCODE_NOP);
- ctrl->qpn_ds = cpu_to_be32((sizeof(struct mlx5_wqe_ctrl_seg) / 16) |
- (qp->trans_qp.base.mqp.qpn << 8));
-
- qp->sq.wrid[idx] = wr_id;
- qp->sq.w_list[idx].opcode = MLX5_OPCODE_NOP;
- qp->sq.wqe_head[idx] = qp->sq.head + 1;
- qp->sq.cur_post += DIV_ROUND_UP(sizeof(struct mlx5_wqe_ctrl_seg),
- MLX5_SEND_WQE_BB);
- qp->sq.w_list[idx].next = qp->sq.cur_post;
- qp->sq.head++;
-
- memcpy(mmio_wqe, ctrl, sizeof(*ctrl));
- ((struct mlx5_wqe_ctrl_seg *)&mmio_wqe)->fm_ce_se |=
- MLX5_WQE_CTRL_CQ_UPDATE;
-
- /* Make sure that descriptors are written before
- * updating doorbell record and ringing the doorbell
- */
- wmb();
-
- qp->db.db[MLX5_SND_DBR] = cpu_to_be32(qp->sq.cur_post);
-
- /* Make sure doorbell record is visible to the HCA before
- * we hit doorbell
- */
- wmb();
- __iowrite64_copy(bf->bfreg->map + bf->offset, mmio_wqe,
- sizeof(mmio_wqe) / 8);
-
- bf->offset ^= bf->buf_size;
-
- spin_unlock_irqrestore(&qp->sq.lock, flags);
-
- return 0;
-}
-
-static int test_wc_poll_cq_result(struct mlx5_ib_dev *dev, struct ib_cq *cq)
-{
- int ret;
- struct ib_wc wc = {};
- unsigned long end = jiffies + TEST_WC_POLLING_MAX_TIME_JIFFIES;
-
- do {
- ret = ib_poll_cq(cq, 1, &wc);
- if (ret < 0 || wc.status)
- return ret < 0 ? ret : -EINVAL;
- if (ret)
- break;
- } while (!time_after(jiffies, end));
-
- if (!ret)
- return -ETIMEDOUT;
-
- if (wc.wr_id != WR_ID_BF)
- ret = 0;
-
- return ret;
-}
-
-static int test_wc_do_send(struct mlx5_ib_dev *dev, struct ib_qp *qp)
-{
- int err, i;
-
- for (i = 0; i < TEST_WC_NUM_WQES; i++) {
- err = post_send_nop(dev, qp, WR_ID_BF, false);
- if (err)
- return err;
- }
-
- return post_send_nop(dev, qp, WR_ID_END, true);
-}
-
-int mlx5_ib_test_wc(struct mlx5_ib_dev *dev)
-{
- struct ib_cq_init_attr cq_attr = { .cqe = TEST_WC_NUM_WQES + 1 };
- int port_type_cap = MLX5_CAP_GEN(dev->mdev, port_type);
- struct ib_qp_init_attr qp_init_attr = {
- .cap = { .max_send_wr = TEST_WC_NUM_WQES },
- .qp_type = IB_QPT_UD,
- .sq_sig_type = IB_SIGNAL_REQ_WR,
- .create_flags = MLX5_IB_QP_CREATE_WC_TEST,
- };
- struct ib_qp_attr qp_attr = { .port_num = 1 };
- struct ib_device *ibdev = &dev->ib_dev;
- struct ib_qp *qp;
- struct ib_cq *cq;
- struct ib_pd *pd;
- int ret;
-
- if (!MLX5_CAP_GEN(dev->mdev, bf))
- return 0;
-
- if (!dev->mdev->roce.roce_en &&
- port_type_cap == MLX5_CAP_PORT_TYPE_ETH) {
- if (mlx5_core_is_pf(dev->mdev))
- dev->wc_support = arch_can_pci_mmap_wc();
- return 0;
- }
-
- ret = mlx5_alloc_bfreg(dev->mdev, &dev->wc_bfreg, true, false);
- if (ret)
- goto print_err;
-
- if (!dev->wc_bfreg.wc)
- goto out1;
-
- pd = ib_alloc_pd(ibdev, 0);
- if (IS_ERR(pd)) {
- ret = PTR_ERR(pd);
- goto out1;
- }
-
- cq = ib_create_cq(ibdev, NULL, NULL, NULL, &cq_attr);
- if (IS_ERR(cq)) {
- ret = PTR_ERR(cq);
- goto out2;
- }
-
- qp_init_attr.recv_cq = cq;
- qp_init_attr.send_cq = cq;
- qp = ib_create_qp(pd, &qp_init_attr);
- if (IS_ERR(qp)) {
- ret = PTR_ERR(qp);
- goto out3;
- }
-
- qp_attr.qp_state = IB_QPS_INIT;
- ret = ib_modify_qp(qp, &qp_attr,
- IB_QP_STATE | IB_QP_PORT | IB_QP_PKEY_INDEX |
- IB_QP_QKEY);
- if (ret)
- goto out4;
-
- qp_attr.qp_state = IB_QPS_RTR;
- ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE);
- if (ret)
- goto out4;
-
- qp_attr.qp_state = IB_QPS_RTS;
- ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_SQ_PSN);
- if (ret)
- goto out4;
-
- ret = test_wc_do_send(dev, qp);
- if (ret < 0)
- goto out4;
-
- ret = test_wc_poll_cq_result(dev, cq);
- if (ret > 0) {
- dev->wc_support = true;
- ret = 0;
- }
-
-out4:
- ib_destroy_qp(qp);
-out3:
- ib_destroy_cq(cq);
-out2:
- ib_dealloc_pd(pd);
-out1:
- mlx5_free_bfreg(dev->mdev, &dev->wc_bfreg);
-print_err:
- if (ret)
- mlx5_ib_err(
- dev,
- "Error %d while trying to test write-combining support\n",
- ret);
- return ret;
-}
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index f255a12e26a0..b68779e9d86c 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -341,7 +341,6 @@ struct mlx5_ib_flow_db {
* rely on the range reserved for that use in the ib_qp_create_flags enum.
*/
#define MLX5_IB_QP_CREATE_SQPN_QP1 IB_QP_CREATE_RESERVED_START
-#define MLX5_IB_QP_CREATE_WC_TEST (IB_QP_CREATE_RESERVED_START << 1)
struct wr_list {
u16 opcode;
@@ -1123,7 +1122,6 @@ struct mlx5_ib_dev {
u8 ib_active:1;
u8 is_rep:1;
u8 lag_active:1;
- u8 wc_support:1;
u8 fill_delay;
struct umr_common umrc;
/* sync used page count stats
@@ -1149,7 +1147,6 @@ struct mlx5_ib_dev {
/* Array with num_ports elements */
struct mlx5_ib_port *port;
struct mlx5_sq_bfreg bfreg;
- struct mlx5_sq_bfreg wc_bfreg;
struct mlx5_sq_bfreg fp_bfreg;
struct mlx5_ib_delay_drop delay_drop;
const struct mlx5_ib_profile *profile;
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index ecc111ed5d86..d3c1f63791a2 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -246,6 +246,7 @@ static void set_cache_mkc(struct mlx5_cache_ent *ent, void *mkc)
MLX5_SET(mkc, mkc, access_mode_1_0, ent->rb_key.access_mode & 0x3);
MLX5_SET(mkc, mkc, access_mode_4_2,
(ent->rb_key.access_mode >> 2) & 0x7);
+ MLX5_SET(mkc, mkc, ma_translation_mode, !!ent->rb_key.ats);
MLX5_SET(mkc, mkc, translations_octword_size,
get_mkc_octo_size(ent->rb_key.access_mode,
@@ -641,10 +642,8 @@ static int mlx5_cache_ent_insert(struct mlx5_mkey_cache *cache,
new = &((*new)->rb_left);
if (cmp < 0)
new = &((*new)->rb_right);
- if (cmp == 0) {
- mutex_unlock(&cache->rb_lock);
+ if (cmp == 0)
return -EEXIST;
- }
}
/* Add new node and rebalance tree. */
@@ -719,6 +718,8 @@ static struct mlx5_ib_mr *_mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
}
mr->mmkey.cache_ent = ent;
mr->mmkey.type = MLX5_MKEY_MR;
+ mr->mmkey.rb_key = ent->rb_key;
+ mr->mmkey.cacheable = true;
init_waitqueue_head(&mr->mmkey.wait);
return mr;
}
@@ -1169,7 +1170,6 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
mr->ibmr.pd = pd;
mr->umem = umem;
mr->page_shift = order_base_2(page_size);
- mr->mmkey.cacheable = true;
set_mr_fields(dev, mr, umem->length, access_flags, iova);
return mr;
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index e2164f813607..e8c0fead4062 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -1107,8 +1107,6 @@ static int _create_kernel_qp(struct mlx5_ib_dev *dev,
if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR)
qp->bf.bfreg = &dev->fp_bfreg;
- else if (qp->flags & MLX5_IB_QP_CREATE_WC_TEST)
- qp->bf.bfreg = &dev->wc_bfreg;
else
qp->bf.bfreg = &dev->bfreg;
@@ -2959,14 +2957,6 @@ static void process_create_flag(struct mlx5_ib_dev *dev, int *flags, int flag,
return;
}
- if (flag == MLX5_IB_QP_CREATE_WC_TEST) {
- /*
- * Special case, if condition didn't meet, it won't be error,
- * just different in-kernel flow.
- */
- *flags &= ~MLX5_IB_QP_CREATE_WC_TEST;
- return;
- }
mlx5_ib_dbg(dev, "Verbs create QP flag 0x%X is not supported\n", flag);
}
@@ -3027,8 +3017,6 @@ static int process_create_flags(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
IB_QP_CREATE_PCI_WRITE_END_PADDING,
MLX5_CAP_GEN(mdev, end_pad), qp);
- process_create_flag(dev, &create_flags, MLX5_IB_QP_CREATE_WC_TEST,
- qp_type != MLX5_IB_QPT_REG_UMR, qp);
process_create_flag(dev, &create_flags, MLX5_IB_QP_CREATE_SQPN_QP1,
true, qp);
@@ -4609,10 +4597,6 @@ static bool mlx5_ib_modify_qp_allowed(struct mlx5_ib_dev *dev,
if (qp->type == IB_QPT_RAW_PACKET || qp->type == MLX5_IB_QPT_REG_UMR)
return true;
- /* Internal QP used for wc testing, with NOPs in wq */
- if (qp->flags & MLX5_IB_QP_CREATE_WC_TEST)
- return true;
-
return false;
}
diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
index a056ea835da5..84be0c3d5699 100644
--- a/drivers/infiniband/hw/mlx5/srq.c
+++ b/drivers/infiniband/hw/mlx5/srq.c
@@ -199,17 +199,20 @@ int mlx5_ib_create_srq(struct ib_srq *ib_srq,
int err;
struct mlx5_srq_attr in = {};
__u32 max_srq_wqes = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
+ __u32 max_sge_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq) /
+ sizeof(struct mlx5_wqe_data_seg);
if (init_attr->srq_type != IB_SRQT_BASIC &&
init_attr->srq_type != IB_SRQT_XRC &&
init_attr->srq_type != IB_SRQT_TM)
return -EOPNOTSUPP;
- /* Sanity check SRQ size before proceeding */
- if (init_attr->attr.max_wr >= max_srq_wqes) {
- mlx5_ib_dbg(dev, "max_wr %d, cap %d\n",
- init_attr->attr.max_wr,
- max_srq_wqes);
+ /* Sanity check SRQ and sge size before proceeding */
+ if (init_attr->attr.max_wr >= max_srq_wqes ||
+ init_attr->attr.max_sge > max_sge_sz) {
+ mlx5_ib_dbg(dev, "max_wr %d,wr_cap %d,max_sge %d, sge_cap:%d\n",
+ init_attr->attr.max_wr, max_srq_wqes,
+ init_attr->attr.max_sge, max_sge_sz);
return -EINVAL;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index c6a7fa3054fa..6596a85723c9 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -344,6 +344,19 @@ static enum resp_states rxe_resp_check_length(struct rxe_qp *qp,
* receive buffer later. For rmda operations additional
* length checks are performed in check_rkey.
*/
+ if ((qp_type(qp) == IB_QPT_GSI) || (qp_type(qp) == IB_QPT_UD)) {
+ unsigned int payload = payload_size(pkt);
+ unsigned int recv_buffer_len = 0;
+ int i;
+
+ for (i = 0; i < qp->resp.wqe->dma.num_sge; i++)
+ recv_buffer_len += qp->resp.wqe->dma.sge[i].length;
+ if (payload + 40 > recv_buffer_len) {
+ rxe_dbg_qp(qp, "The receive buffer is too small for this UD packet.\n");
+ return RESPST_ERR_LENGTH;
+ }
+ }
+
if (pkt->mask & RXE_PAYLOAD_MASK && ((qp_type(qp) == IB_QPT_RC) ||
(qp_type(qp) == IB_QPT_UC))) {
unsigned int mtu = qp->mtu;
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index c7d4d8ab5a09..de6238ee4379 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -812,7 +812,7 @@ static void copy_inline_data_to_wqe(struct rxe_send_wqe *wqe,
int i;
for (i = 0; i < ibwr->num_sge; i++, sge++) {
- memcpy(p, ib_virt_dma_to_page(sge->addr), sge->length);
+ memcpy(p, ib_virt_dma_to_ptr(sge->addr), sge->length);
p += sge->length;
}
}
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 70f0654c58b6..2b8370ecf42a 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -209,6 +209,7 @@ static const struct xpad_device {
{ 0x0738, 0xf738, "Super SFIV FightStick TE S", 0, XTYPE_XBOX360 },
{ 0x07ff, 0xffff, "Mad Catz GamePad", 0, XTYPE_XBOX360 },
{ 0x0b05, 0x1a38, "ASUS ROG RAIKIRI", 0, XTYPE_XBOXONE },
+ { 0x0b05, 0x1abb, "ASUS ROG RAIKIRI PRO", 0, XTYPE_XBOXONE },
{ 0x0c12, 0x0005, "Intec wireless", 0, XTYPE_XBOX },
{ 0x0c12, 0x8801, "Nyko Xbox Controller", 0, XTYPE_XBOX },
{ 0x0c12, 0x8802, "Zeroplus Xbox Controller", 0, XTYPE_XBOX },
diff --git a/drivers/input/misc/88pm886-onkey.c b/drivers/input/misc/88pm886-onkey.c
new file mode 100644
index 000000000000..284ff5190b6e
--- /dev/null
+++ b/drivers/input/misc/88pm886-onkey.c
@@ -0,0 +1,98 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <linux/mfd/88pm886.h>
+
+struct pm886_onkey {
+ struct input_dev *idev;
+ struct pm886_chip *chip;
+};
+
+static irqreturn_t pm886_onkey_irq_handler(int irq, void *data)
+{
+ struct pm886_onkey *onkey = data;
+ struct regmap *regmap = onkey->chip->regmap;
+ struct input_dev *idev = onkey->idev;
+ struct device *parent = idev->dev.parent;
+ unsigned int val;
+ int err;
+
+ err = regmap_read(regmap, PM886_REG_STATUS1, &val);
+ if (err) {
+ dev_err(parent, "Failed to read status: %d\n", err);
+ return IRQ_NONE;
+ }
+ val &= PM886_ONKEY_STS1;
+
+ input_report_key(idev, KEY_POWER, val);
+ input_sync(idev);
+
+ return IRQ_HANDLED;
+}
+
+static int pm886_onkey_probe(struct platform_device *pdev)
+{
+ struct pm886_chip *chip = dev_get_drvdata(pdev->dev.parent);
+ struct device *dev = &pdev->dev;
+ struct pm886_onkey *onkey;
+ struct input_dev *idev;
+ int irq, err;
+
+ onkey = devm_kzalloc(dev, sizeof(*onkey), GFP_KERNEL);
+ if (!onkey)
+ return -ENOMEM;
+
+ onkey->chip = chip;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return dev_err_probe(dev, irq, "Failed to get IRQ\n");
+
+ idev = devm_input_allocate_device(dev);
+ if (!idev) {
+ dev_err(dev, "Failed to allocate input device\n");
+ return -ENOMEM;
+ }
+ onkey->idev = idev;
+
+ idev->name = "88pm886-onkey";
+ idev->phys = "88pm886-onkey/input0";
+ idev->id.bustype = BUS_I2C;
+
+ input_set_capability(idev, EV_KEY, KEY_POWER);
+
+ err = devm_request_threaded_irq(dev, irq, NULL, pm886_onkey_irq_handler,
+ IRQF_ONESHOT | IRQF_NO_SUSPEND, "onkey",
+ onkey);
+ if (err)
+ return dev_err_probe(dev, err, "Failed to request IRQ\n");
+
+ err = input_register_device(idev);
+ if (err)
+ return dev_err_probe(dev, err, "Failed to register input device\n");
+
+ return 0;
+}
+
+static const struct platform_device_id pm886_onkey_id_table[] = {
+ { "88pm886-onkey", },
+ { }
+};
+MODULE_DEVICE_TABLE(platform, pm886_onkey_id_table);
+
+static struct platform_driver pm886_onkey_driver = {
+ .driver = {
+ .name = "88pm886-onkey",
+ },
+ .probe = pm886_onkey_probe,
+ .id_table = pm886_onkey_id_table,
+};
+module_platform_driver(pm886_onkey_driver);
+
+MODULE_DESCRIPTION("Marvell 88PM886 onkey driver");
+MODULE_AUTHOR("Karel Balej <balejk@matfyz.cz>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index 6ba984d7f0b1..6a852c76331b 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -33,6 +33,13 @@ config INPUT_88PM80X_ONKEY
To compile this driver as a module, choose M here: the module
will be called 88pm80x_onkey.
+config INPUT_88PM886_ONKEY
+ tristate "Marvell 88PM886 onkey support"
+ depends on MFD_88PM886_PMIC
+ help
+ Support the onkey of Marvell 88PM886 PMIC as an input device
+ reporting power button status.
+
config INPUT_AB8500_PONKEY
tristate "AB8500 Pon (PowerOn) Key"
depends on AB8500_CORE
@@ -140,6 +147,16 @@ config INPUT_BMA150
To compile this driver as a module, choose M here: the
module will be called bma150.
+config INPUT_CS40L50_VIBRA
+ tristate "CS40L50 Haptic Driver support"
+ depends on MFD_CS40L50_CORE
+ help
+ Say Y here to enable support for Cirrus Logic's CS40L50
+ haptic driver.
+
+ To compile this driver as a module, choose M here: the
+ module will be called cs40l50-vibra.
+
config INPUT_E3X0_BUTTON
tristate "NI Ettus Research USRP E3xx Button support."
default n
diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile
index 04296a4abe8e..4f7f736831ba 100644
--- a/drivers/input/misc/Makefile
+++ b/drivers/input/misc/Makefile
@@ -7,6 +7,7 @@
obj-$(CONFIG_INPUT_88PM860X_ONKEY) += 88pm860x_onkey.o
obj-$(CONFIG_INPUT_88PM80X_ONKEY) += 88pm80x_onkey.o
+obj-$(CONFIG_INPUT_88PM886_ONKEY) += 88pm886-onkey.o
obj-$(CONFIG_INPUT_AB8500_PONKEY) += ab8500-ponkey.o
obj-$(CONFIG_INPUT_AD714X) += ad714x.o
obj-$(CONFIG_INPUT_AD714X_I2C) += ad714x-i2c.o
@@ -28,6 +29,7 @@ obj-$(CONFIG_INPUT_CMA3000) += cma3000_d0x.o
obj-$(CONFIG_INPUT_CMA3000_I2C) += cma3000_d0x_i2c.o
obj-$(CONFIG_INPUT_COBALT_BTNS) += cobalt_btns.o
obj-$(CONFIG_INPUT_CPCAP_PWRBUTTON) += cpcap-pwrbutton.o
+obj-$(CONFIG_INPUT_CS40L50_VIBRA) += cs40l50-vibra.o
obj-$(CONFIG_INPUT_DA7280_HAPTICS) += da7280.o
obj-$(CONFIG_INPUT_DA9052_ONKEY) += da9052_onkey.o
obj-$(CONFIG_INPUT_DA9055_ONKEY) += da9055_onkey.o
diff --git a/drivers/input/misc/cs40l50-vibra.c b/drivers/input/misc/cs40l50-vibra.c
new file mode 100644
index 000000000000..03bdb7c26ec0
--- /dev/null
+++ b/drivers/input/misc/cs40l50-vibra.c
@@ -0,0 +1,555 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * CS40L50 Advanced Haptic Driver with waveform memory,
+ * integrated DSP, and closed-loop algorithms
+ *
+ * Copyright 2024 Cirrus Logic, Inc.
+ *
+ * Author: James Ogletree <james.ogletree@cirrus.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/input.h>
+#include <linux/mfd/cs40l50.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+/* Wavetables */
+#define CS40L50_RAM_INDEX_START 0x1000000
+#define CS40L50_RAM_INDEX_END 0x100007F
+#define CS40L50_RTH_INDEX_START 0x1400000
+#define CS40L50_RTH_INDEX_END 0x1400001
+#define CS40L50_ROM_INDEX_START 0x1800000
+#define CS40L50_ROM_INDEX_END 0x180001A
+#define CS40L50_TYPE_PCM 8
+#define CS40L50_TYPE_PWLE 12
+#define CS40L50_PCM_ID 0x0
+#define CS40L50_OWT_CUSTOM_DATA_SIZE 2
+#define CS40L50_CUSTOM_DATA_MASK 0xFFFFU
+
+/* DSP */
+#define CS40L50_GPIO_BASE 0x2804140
+#define CS40L50_OWT_BASE 0x2805C34
+#define CS40L50_OWT_SIZE 0x2805C38
+#define CS40L50_OWT_NEXT 0x2805C3C
+#define CS40L50_EFFECTS_MAX 1
+
+/* GPIO */
+#define CS40L50_GPIO_NUM_MASK GENMASK(14, 12)
+#define CS40L50_GPIO_EDGE_MASK BIT(15)
+#define CS40L50_GPIO_MAPPING_NONE 0
+#define CS40L50_GPIO_DISABLE 0x1FF
+
+enum cs40l50_bank_type {
+ CS40L50_WVFRM_BANK_RAM,
+ CS40L50_WVFRM_BANK_ROM,
+ CS40L50_WVFRM_BANK_OWT,
+ CS40L50_WVFRM_BANK_NUM,
+};
+
+/* Describes an area in DSP memory populated by effects */
+struct cs40l50_bank {
+ enum cs40l50_bank_type type;
+ u32 base_index;
+ u32 max_index;
+};
+
+struct cs40l50_effect {
+ enum cs40l50_bank_type type;
+ struct list_head list;
+ u32 gpio_reg;
+ u32 index;
+ int id;
+};
+
+/* Describes haptic interface of loaded DSP firmware */
+struct cs40l50_vibra_dsp {
+ struct cs40l50_bank *banks;
+ u32 gpio_base_reg;
+ u32 owt_offset_reg;
+ u32 owt_size_reg;
+ u32 owt_base_reg;
+ u32 push_owt_cmd;
+ u32 delete_owt_cmd;
+ u32 stop_cmd;
+ int (*write)(struct device *dev, struct regmap *regmap, u32 val);
+};
+
+/* Describes configuration and state of haptic operations */
+struct cs40l50_vibra {
+ struct device *dev;
+ struct regmap *regmap;
+ struct input_dev *input;
+ struct workqueue_struct *vib_wq;
+ struct list_head effect_head;
+ struct cs40l50_vibra_dsp dsp;
+};
+
+struct cs40l50_work {
+ struct cs40l50_vibra *vib;
+ struct ff_effect *effect;
+ struct work_struct work;
+ s16 *custom_data;
+ int custom_len;
+ int count;
+ int error;
+};
+
+static struct cs40l50_bank cs40l50_banks[] = {
+ {
+ .type = CS40L50_WVFRM_BANK_RAM,
+ .base_index = CS40L50_RAM_INDEX_START,
+ .max_index = CS40L50_RAM_INDEX_END,
+ },
+ {
+ .type = CS40L50_WVFRM_BANK_ROM,
+ .base_index = CS40L50_ROM_INDEX_START,
+ .max_index = CS40L50_ROM_INDEX_END,
+ },
+ {
+ .type = CS40L50_WVFRM_BANK_OWT,
+ .base_index = CS40L50_RTH_INDEX_START,
+ .max_index = CS40L50_RTH_INDEX_END,
+ },
+};
+
+static struct cs40l50_vibra_dsp cs40l50_dsp = {
+ .banks = cs40l50_banks,
+ .gpio_base_reg = CS40L50_GPIO_BASE,
+ .owt_base_reg = CS40L50_OWT_BASE,
+ .owt_offset_reg = CS40L50_OWT_NEXT,
+ .owt_size_reg = CS40L50_OWT_SIZE,
+ .push_owt_cmd = CS40L50_OWT_PUSH,
+ .delete_owt_cmd = CS40L50_OWT_DELETE,
+ .stop_cmd = CS40L50_STOP_PLAYBACK,
+ .write = cs40l50_dsp_write,
+};
+
+static struct cs40l50_effect *cs40l50_find_effect(int id, struct list_head *effect_head)
+{
+ struct cs40l50_effect *effect;
+
+ list_for_each_entry(effect, effect_head, list)
+ if (effect->id == id)
+ return effect;
+
+ return NULL;
+}
+
+static int cs40l50_effect_bank_set(struct cs40l50_work *work_data,
+ struct cs40l50_effect *effect)
+{
+ s16 bank_type = work_data->custom_data[0] & CS40L50_CUSTOM_DATA_MASK;
+
+ if (bank_type >= CS40L50_WVFRM_BANK_NUM) {
+ dev_err(work_data->vib->dev, "Invalid bank (%d)\n", bank_type);
+ return -EINVAL;
+ }
+
+ if (work_data->custom_len > CS40L50_OWT_CUSTOM_DATA_SIZE)
+ effect->type = CS40L50_WVFRM_BANK_OWT;
+ else
+ effect->type = bank_type;
+
+ return 0;
+}
+
+static int cs40l50_effect_index_set(struct cs40l50_work *work_data,
+ struct cs40l50_effect *effect)
+{
+ struct cs40l50_vibra *vib = work_data->vib;
+ struct cs40l50_effect *owt_effect;
+ u32 base_index, max_index;
+
+ base_index = vib->dsp.banks[effect->type].base_index;
+ max_index = vib->dsp.banks[effect->type].max_index;
+
+ effect->index = base_index;
+
+ switch (effect->type) {
+ case CS40L50_WVFRM_BANK_OWT:
+ list_for_each_entry(owt_effect, &vib->effect_head, list)
+ if (owt_effect->type == CS40L50_WVFRM_BANK_OWT)
+ effect->index++;
+ break;
+ case CS40L50_WVFRM_BANK_ROM:
+ case CS40L50_WVFRM_BANK_RAM:
+ effect->index += work_data->custom_data[1] & CS40L50_CUSTOM_DATA_MASK;
+ break;
+ default:
+ dev_err(vib->dev, "Bank type %d not supported\n", effect->type);
+ return -EINVAL;
+ }
+
+ if (effect->index > max_index || effect->index < base_index) {
+ dev_err(vib->dev, "Index out of bounds: %u\n", effect->index);
+ return -ENOSPC;
+ }
+
+ return 0;
+}
+
+static int cs40l50_effect_gpio_mapping_set(struct cs40l50_work *work_data,
+ struct cs40l50_effect *effect)
+{
+ u16 gpio_edge, gpio_num, button = work_data->effect->trigger.button;
+ struct cs40l50_vibra *vib = work_data->vib;
+
+ if (button) {
+ gpio_num = FIELD_GET(CS40L50_GPIO_NUM_MASK, button);
+ gpio_edge = FIELD_GET(CS40L50_GPIO_EDGE_MASK, button);
+ effect->gpio_reg = vib->dsp.gpio_base_reg + (gpio_num * 8) - gpio_edge;
+
+ return regmap_write(vib->regmap, effect->gpio_reg, button);
+ }
+
+ effect->gpio_reg = CS40L50_GPIO_MAPPING_NONE;
+
+ return 0;
+}
+
+struct cs40l50_owt_header {
+ u32 type;
+ u32 data_words;
+ u32 offset;
+} __packed;
+
+static int cs40l50_upload_owt(struct cs40l50_work *work_data)
+{
+ u8 *new_owt_effect_data __free(kfree) = NULL;
+ struct cs40l50_vibra *vib = work_data->vib;
+ size_t len = work_data->custom_len * 2;
+ struct cs40l50_owt_header header;
+ u32 offset, size;
+ int error;
+
+ error = regmap_read(vib->regmap, vib->dsp.owt_size_reg, &size);
+ if (error)
+ return error;
+
+ if ((size * sizeof(u32)) < sizeof(header) + len) {
+ dev_err(vib->dev, "No space in open wavetable for effect\n");
+ return -ENOSPC;
+ }
+
+ header.type = work_data->custom_data[0] == CS40L50_PCM_ID ? CS40L50_TYPE_PCM :
+ CS40L50_TYPE_PWLE;
+ header.offset = sizeof(header) / sizeof(u32);
+ header.data_words = len / sizeof(u32);
+
+ new_owt_effect_data = kmalloc(sizeof(header) + len, GFP_KERNEL);
+
+ memcpy(new_owt_effect_data, &header, sizeof(header));
+ memcpy(new_owt_effect_data + sizeof(header), work_data->custom_data, len);
+
+ error = regmap_read(vib->regmap, vib->dsp.owt_offset_reg, &offset);
+ if (error)
+ return error;
+
+ error = regmap_bulk_write(vib->regmap, vib->dsp.owt_base_reg +
+ (offset * sizeof(u32)), new_owt_effect_data,
+ sizeof(header) + len);
+ if (error)
+ return error;
+
+ error = vib->dsp.write(vib->dev, vib->regmap, vib->dsp.push_owt_cmd);
+ if (error)
+ return error;
+
+ return 0;
+}
+
+static void cs40l50_add_worker(struct work_struct *work)
+{
+ struct cs40l50_work *work_data = container_of(work, struct cs40l50_work, work);
+ struct cs40l50_vibra *vib = work_data->vib;
+ struct cs40l50_effect *effect;
+ bool is_new = false;
+ int error;
+
+ error = pm_runtime_resume_and_get(vib->dev);
+ if (error)
+ goto err_exit;
+
+ /* Update effect if already uploaded, otherwise create new effect */
+ effect = cs40l50_find_effect(work_data->effect->id, &vib->effect_head);
+ if (!effect) {
+ effect = kzalloc(sizeof(*effect), GFP_KERNEL);
+ if (!effect) {
+ error = -ENOMEM;
+ goto err_pm;
+ }
+
+ effect->id = work_data->effect->id;
+ is_new = true;
+ }
+
+ error = cs40l50_effect_bank_set(work_data, effect);
+ if (error)
+ goto err_free;
+
+ error = cs40l50_effect_index_set(work_data, effect);
+ if (error)
+ goto err_free;
+
+ error = cs40l50_effect_gpio_mapping_set(work_data, effect);
+ if (error)
+ goto err_free;
+
+ if (effect->type == CS40L50_WVFRM_BANK_OWT)
+ error = cs40l50_upload_owt(work_data);
+err_free:
+ if (is_new) {
+ if (error)
+ kfree(effect);
+ else
+ list_add(&effect->list, &vib->effect_head);
+ }
+err_pm:
+ pm_runtime_mark_last_busy(vib->dev);
+ pm_runtime_put_autosuspend(vib->dev);
+err_exit:
+ work_data->error = error;
+}
+
+static int cs40l50_add(struct input_dev *dev, struct ff_effect *effect,
+ struct ff_effect *old)
+{
+ struct ff_periodic_effect *periodic = &effect->u.periodic;
+ struct cs40l50_vibra *vib = input_get_drvdata(dev);
+ struct cs40l50_work work_data;
+
+ if (effect->type != FF_PERIODIC || periodic->waveform != FF_CUSTOM) {
+ dev_err(vib->dev, "Type (%#X) or waveform (%#X) unsupported\n",
+ effect->type, periodic->waveform);
+ return -EINVAL;
+ }
+
+ work_data.custom_data = memdup_array_user(effect->u.periodic.custom_data,
+ effect->u.periodic.custom_len,
+ sizeof(s16));
+ if (IS_ERR(work_data.custom_data))
+ return PTR_ERR(work_data.custom_data);
+
+ work_data.custom_len = effect->u.periodic.custom_len;
+ work_data.vib = vib;
+ work_data.effect = effect;
+ INIT_WORK(&work_data.work, cs40l50_add_worker);
+
+ /* Push to the workqueue to serialize with playbacks */
+ queue_work(vib->vib_wq, &work_data.work);
+ flush_work(&work_data.work);
+
+ kfree(work_data.custom_data);
+
+ return work_data.error;
+}
+
+static void cs40l50_start_worker(struct work_struct *work)
+{
+ struct cs40l50_work *work_data = container_of(work, struct cs40l50_work, work);
+ struct cs40l50_vibra *vib = work_data->vib;
+ struct cs40l50_effect *start_effect;
+
+ if (pm_runtime_resume_and_get(vib->dev) < 0)
+ goto err_free;
+
+ start_effect = cs40l50_find_effect(work_data->effect->id, &vib->effect_head);
+ if (start_effect) {
+ while (--work_data->count >= 0) {
+ vib->dsp.write(vib->dev, vib->regmap, start_effect->index);
+ usleep_range(work_data->effect->replay.length,
+ work_data->effect->replay.length + 100);
+ }
+ } else {
+ dev_err(vib->dev, "Effect to play not found\n");
+ }
+
+ pm_runtime_mark_last_busy(vib->dev);
+ pm_runtime_put_autosuspend(vib->dev);
+err_free:
+ kfree(work_data);
+}
+
+static void cs40l50_stop_worker(struct work_struct *work)
+{
+ struct cs40l50_work *work_data = container_of(work, struct cs40l50_work, work);
+ struct cs40l50_vibra *vib = work_data->vib;
+
+ if (pm_runtime_resume_and_get(vib->dev) < 0)
+ return;
+
+ vib->dsp.write(vib->dev, vib->regmap, vib->dsp.stop_cmd);
+
+ pm_runtime_mark_last_busy(vib->dev);
+ pm_runtime_put_autosuspend(vib->dev);
+
+ kfree(work_data);
+}
+
+static int cs40l50_playback(struct input_dev *dev, int effect_id, int val)
+{
+ struct cs40l50_vibra *vib = input_get_drvdata(dev);
+ struct cs40l50_work *work_data;
+
+ work_data = kzalloc(sizeof(*work_data), GFP_ATOMIC);
+ if (!work_data)
+ return -ENOMEM;
+
+ work_data->vib = vib;
+
+ if (val > 0) {
+ work_data->effect = &dev->ff->effects[effect_id];
+ work_data->count = val;
+ INIT_WORK(&work_data->work, cs40l50_start_worker);
+ } else {
+ /* Stop the amplifier as device drives only one effect */
+ INIT_WORK(&work_data->work, cs40l50_stop_worker);
+ }
+
+ queue_work(vib->vib_wq, &work_data->work);
+
+ return 0;
+}
+
+static void cs40l50_erase_worker(struct work_struct *work)
+{
+ struct cs40l50_work *work_data = container_of(work, struct cs40l50_work, work);
+ struct cs40l50_effect *erase_effect, *owt_effect;
+ struct cs40l50_vibra *vib = work_data->vib;
+ int error;
+
+ error = pm_runtime_resume_and_get(vib->dev);
+ if (error)
+ goto err_exit;
+
+ erase_effect = cs40l50_find_effect(work_data->effect->id, &vib->effect_head);
+ if (!erase_effect) {
+ dev_err(vib->dev, "Effect to erase not found\n");
+ error = -EINVAL;
+ goto err_pm;
+ }
+
+ if (erase_effect->gpio_reg != CS40L50_GPIO_MAPPING_NONE) {
+ error = regmap_write(vib->regmap, erase_effect->gpio_reg,
+ CS40L50_GPIO_DISABLE);
+ if (error)
+ goto err_pm;
+ }
+
+ if (erase_effect->type == CS40L50_WVFRM_BANK_OWT) {
+ error = vib->dsp.write(vib->dev, vib->regmap,
+ vib->dsp.delete_owt_cmd |
+ (erase_effect->index & 0xFF));
+ if (error)
+ goto err_pm;
+
+ list_for_each_entry(owt_effect, &vib->effect_head, list)
+ if (owt_effect->type == CS40L50_WVFRM_BANK_OWT &&
+ owt_effect->index > erase_effect->index)
+ owt_effect->index--;
+ }
+
+ list_del(&erase_effect->list);
+ kfree(erase_effect);
+err_pm:
+ pm_runtime_mark_last_busy(vib->dev);
+ pm_runtime_put_autosuspend(vib->dev);
+err_exit:
+ work_data->error = error;
+}
+
+static int cs40l50_erase(struct input_dev *dev, int effect_id)
+{
+ struct cs40l50_vibra *vib = input_get_drvdata(dev);
+ struct cs40l50_work work_data;
+
+ work_data.vib = vib;
+ work_data.effect = &dev->ff->effects[effect_id];
+
+ INIT_WORK(&work_data.work, cs40l50_erase_worker);
+
+ /* Push to workqueue to serialize with playbacks */
+ queue_work(vib->vib_wq, &work_data.work);
+ flush_work(&work_data.work);
+
+ return work_data.error;
+}
+
+static void cs40l50_remove_wq(void *data)
+{
+ flush_workqueue(data);
+ destroy_workqueue(data);
+}
+
+static int cs40l50_vibra_probe(struct platform_device *pdev)
+{
+ struct cs40l50 *cs40l50 = dev_get_drvdata(pdev->dev.parent);
+ struct cs40l50_vibra *vib;
+ int error;
+
+ vib = devm_kzalloc(pdev->dev.parent, sizeof(*vib), GFP_KERNEL);
+ if (!vib)
+ return -ENOMEM;
+
+ vib->dev = cs40l50->dev;
+ vib->regmap = cs40l50->regmap;
+ vib->dsp = cs40l50_dsp;
+
+ vib->input = devm_input_allocate_device(vib->dev);
+ if (!vib->input)
+ return -ENOMEM;
+
+ vib->input->id.product = cs40l50->devid;
+ vib->input->id.version = cs40l50->revid;
+ vib->input->name = "cs40l50_vibra";
+
+ input_set_drvdata(vib->input, vib);
+ input_set_capability(vib->input, EV_FF, FF_PERIODIC);
+ input_set_capability(vib->input, EV_FF, FF_CUSTOM);
+
+ error = input_ff_create(vib->input, CS40L50_EFFECTS_MAX);
+ if (error) {
+ dev_err(vib->dev, "Failed to create input device\n");
+ return error;
+ }
+
+ vib->input->ff->upload = cs40l50_add;
+ vib->input->ff->playback = cs40l50_playback;
+ vib->input->ff->erase = cs40l50_erase;
+
+ INIT_LIST_HEAD(&vib->effect_head);
+
+ vib->vib_wq = alloc_ordered_workqueue("vib_wq", WQ_HIGHPRI);
+ if (!vib->vib_wq)
+ return -ENOMEM;
+
+ error = devm_add_action_or_reset(vib->dev, cs40l50_remove_wq, vib->vib_wq);
+ if (error)
+ return error;
+
+ error = input_register_device(vib->input);
+ if (error)
+ return error;
+
+ return 0;
+}
+
+static const struct platform_device_id cs40l50_vibra_id_match[] = {
+ { "cs40l50-vibra", },
+ {}
+};
+MODULE_DEVICE_TABLE(platform, cs40l50_vibra_id_match);
+
+static struct platform_driver cs40l50_vibra_driver = {
+ .probe = cs40l50_vibra_probe,
+ .id_table = cs40l50_vibra_id_match,
+ .driver = {
+ .name = "cs40l50-vibra",
+ },
+};
+module_platform_driver(cs40l50_vibra_driver);
+
+MODULE_DESCRIPTION("CS40L50 Advanced Haptic Driver");
+MODULE_AUTHOR("James Ogletree, Cirrus Logic Inc. <james.ogletree@cirrus.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 4e38229404b4..b4723ea395eb 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -1477,15 +1477,46 @@ static void elantech_disconnect(struct psmouse *psmouse)
}
/*
+ * Some hw_version 4 models fail to properly activate absolute mode on
+ * resume without going through disable/enable cycle.
+ */
+static const struct dmi_system_id elantech_needs_reenable[] = {
+#if defined(CONFIG_DMI) && defined(CONFIG_X86)
+ {
+ /* Lenovo N24 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "81AF"),
+ },
+ },
+#endif
+ { }
+};
+
+/*
* Put the touchpad back into absolute mode when reconnecting
*/
static int elantech_reconnect(struct psmouse *psmouse)
{
+ int err;
+
psmouse_reset(psmouse);
if (elantech_detect(psmouse, 0))
return -1;
+ if (dmi_check_system(elantech_needs_reenable)) {
+ err = ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_DISABLE);
+ if (err)
+ psmouse_warn(psmouse, "failed to deactivate mouse on %s: %d\n",
+ psmouse->ps2dev.serio->phys, err);
+
+ err = ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_ENABLE);
+ if (err)
+ psmouse_warn(psmouse, "failed to reactivate mouse on %s: %d\n",
+ psmouse->ps2dev.serio->phys, err);
+ }
+
if (elantech_set_absolute_mode(psmouse)) {
psmouse_err(psmouse,
"failed to put touchpad back into absolute mode.\n");
diff --git a/drivers/input/mouse/vmmouse.c b/drivers/input/mouse/vmmouse.c
index ea9eff7c8099..fb1d986a6895 100644
--- a/drivers/input/mouse/vmmouse.c
+++ b/drivers/input/mouse/vmmouse.c
@@ -21,19 +21,16 @@
#include "psmouse.h"
#include "vmmouse.h"
-#define VMMOUSE_PROTO_MAGIC 0x564D5868U
-
/*
* Main commands supported by the vmmouse hypervisor port.
*/
-#define VMMOUSE_PROTO_CMD_GETVERSION 10
-#define VMMOUSE_PROTO_CMD_ABSPOINTER_DATA 39
-#define VMMOUSE_PROTO_CMD_ABSPOINTER_STATUS 40
-#define VMMOUSE_PROTO_CMD_ABSPOINTER_COMMAND 41
-#define VMMOUSE_PROTO_CMD_ABSPOINTER_RESTRICT 86
+#define VMWARE_CMD_ABSPOINTER_DATA 39
+#define VMWARE_CMD_ABSPOINTER_STATUS 40
+#define VMWARE_CMD_ABSPOINTER_COMMAND 41
+#define VMWARE_CMD_ABSPOINTER_RESTRICT 86
/*
- * Subcommands for VMMOUSE_PROTO_CMD_ABSPOINTER_COMMAND
+ * Subcommands for VMWARE_CMD_ABSPOINTER_COMMAND
*/
#define VMMOUSE_CMD_ENABLE 0x45414552U
#define VMMOUSE_CMD_DISABLE 0x000000f5U
@@ -76,28 +73,6 @@ struct vmmouse_data {
char dev_name[128];
};
-/*
- * Hypervisor-specific bi-directional communication channel
- * implementing the vmmouse protocol. Should never execute on
- * bare metal hardware.
- */
-#define VMMOUSE_CMD(cmd, in1, out1, out2, out3, out4) \
-({ \
- unsigned long __dummy1, __dummy2; \
- __asm__ __volatile__ (VMWARE_HYPERCALL : \
- "=a"(out1), \
- "=b"(out2), \
- "=c"(out3), \
- "=d"(out4), \
- "=S"(__dummy1), \
- "=D"(__dummy2) : \
- "a"(VMMOUSE_PROTO_MAGIC), \
- "b"(in1), \
- "c"(VMMOUSE_PROTO_CMD_##cmd), \
- "d"(0) : \
- "memory"); \
-})
-
/**
* vmmouse_report_button - report button state on the correct input device
*
@@ -145,14 +120,12 @@ static psmouse_ret_t vmmouse_report_events(struct psmouse *psmouse)
struct input_dev *abs_dev = priv->abs_dev;
struct input_dev *pref_dev;
u32 status, x, y, z;
- u32 dummy1, dummy2, dummy3;
unsigned int queue_length;
unsigned int count = 255;
while (count--) {
/* See if we have motion data. */
- VMMOUSE_CMD(ABSPOINTER_STATUS, 0,
- status, dummy1, dummy2, dummy3);
+ status = vmware_hypercall1(VMWARE_CMD_ABSPOINTER_STATUS, 0);
if ((status & VMMOUSE_ERROR) == VMMOUSE_ERROR) {
psmouse_err(psmouse, "failed to fetch status data\n");
/*
@@ -172,7 +145,8 @@ static psmouse_ret_t vmmouse_report_events(struct psmouse *psmouse)
}
/* Now get it */
- VMMOUSE_CMD(ABSPOINTER_DATA, 4, status, x, y, z);
+ status = vmware_hypercall4(VMWARE_CMD_ABSPOINTER_DATA, 4,
+ &x, &y, &z);
/*
* And report what we've got. Prefer to report button
@@ -247,14 +221,10 @@ static psmouse_ret_t vmmouse_process_byte(struct psmouse *psmouse)
static void vmmouse_disable(struct psmouse *psmouse)
{
u32 status;
- u32 dummy1, dummy2, dummy3, dummy4;
-
- VMMOUSE_CMD(ABSPOINTER_COMMAND, VMMOUSE_CMD_DISABLE,
- dummy1, dummy2, dummy3, dummy4);
- VMMOUSE_CMD(ABSPOINTER_STATUS, 0,
- status, dummy1, dummy2, dummy3);
+ vmware_hypercall1(VMWARE_CMD_ABSPOINTER_COMMAND, VMMOUSE_CMD_DISABLE);
+ status = vmware_hypercall1(VMWARE_CMD_ABSPOINTER_STATUS, 0);
if ((status & VMMOUSE_ERROR) != VMMOUSE_ERROR)
psmouse_warn(psmouse, "failed to disable vmmouse device\n");
}
@@ -271,26 +241,24 @@ static void vmmouse_disable(struct psmouse *psmouse)
static int vmmouse_enable(struct psmouse *psmouse)
{
u32 status, version;
- u32 dummy1, dummy2, dummy3, dummy4;
/*
* Try enabling the device. If successful, we should be able to
* read valid version ID back from it.
*/
- VMMOUSE_CMD(ABSPOINTER_COMMAND, VMMOUSE_CMD_ENABLE,
- dummy1, dummy2, dummy3, dummy4);
+ vmware_hypercall1(VMWARE_CMD_ABSPOINTER_COMMAND, VMMOUSE_CMD_ENABLE);
/*
* See if version ID can be retrieved.
*/
- VMMOUSE_CMD(ABSPOINTER_STATUS, 0, status, dummy1, dummy2, dummy3);
+ status = vmware_hypercall1(VMWARE_CMD_ABSPOINTER_STATUS, 0);
if ((status & 0x0000ffff) == 0) {
psmouse_dbg(psmouse, "empty flags - assuming no device\n");
return -ENXIO;
}
- VMMOUSE_CMD(ABSPOINTER_DATA, 1 /* single item */,
- version, dummy1, dummy2, dummy3);
+ version = vmware_hypercall1(VMWARE_CMD_ABSPOINTER_DATA,
+ 1 /* single item */);
if (version != VMMOUSE_VERSION_ID) {
psmouse_dbg(psmouse, "Unexpected version value: %u vs %u\n",
(unsigned) version, VMMOUSE_VERSION_ID);
@@ -301,11 +269,11 @@ static int vmmouse_enable(struct psmouse *psmouse)
/*
* Restrict ioport access, if possible.
*/
- VMMOUSE_CMD(ABSPOINTER_RESTRICT, VMMOUSE_RESTRICT_CPL0,
- dummy1, dummy2, dummy3, dummy4);
+ vmware_hypercall1(VMWARE_CMD_ABSPOINTER_RESTRICT,
+ VMMOUSE_RESTRICT_CPL0);
- VMMOUSE_CMD(ABSPOINTER_COMMAND, VMMOUSE_CMD_REQUEST_ABSOLUTE,
- dummy1, dummy2, dummy3, dummy4);
+ vmware_hypercall1(VMWARE_CMD_ABSPOINTER_COMMAND,
+ VMMOUSE_CMD_REQUEST_ABSOLUTE);
return 0;
}
@@ -342,7 +310,7 @@ static bool vmmouse_check_hypervisor(void)
*/
int vmmouse_detect(struct psmouse *psmouse, bool set_properties)
{
- u32 response, version, dummy1, dummy2;
+ u32 response, version, type;
if (!vmmouse_check_hypervisor()) {
psmouse_dbg(psmouse,
@@ -351,9 +319,9 @@ int vmmouse_detect(struct psmouse *psmouse, bool set_properties)
}
/* Check if the device is present */
- response = ~VMMOUSE_PROTO_MAGIC;
- VMMOUSE_CMD(GETVERSION, 0, version, response, dummy1, dummy2);
- if (response != VMMOUSE_PROTO_MAGIC || version == 0xffffffffU)
+ response = ~VMWARE_HYPERVISOR_MAGIC;
+ version = vmware_hypercall3(VMWARE_CMD_GETVERSION, 0, &response, &type);
+ if (response != VMWARE_HYPERVISOR_MAGIC || version == 0xffffffffU)
return -ENXIO;
if (set_properties) {
diff --git a/drivers/input/serio/i8042-acpipnpio.h b/drivers/input/serio/i8042-acpipnpio.h
index dfc6c581873b..5b50475ec414 100644
--- a/drivers/input/serio/i8042-acpipnpio.h
+++ b/drivers/input/serio/i8042-acpipnpio.h
@@ -76,7 +76,7 @@ static inline void i8042_write_command(int val)
#define SERIO_QUIRK_PROBE_DEFER BIT(5)
#define SERIO_QUIRK_RESET_ALWAYS BIT(6)
#define SERIO_QUIRK_RESET_NEVER BIT(7)
-#define SERIO_QUIRK_DIECT BIT(8)
+#define SERIO_QUIRK_DIRECT BIT(8)
#define SERIO_QUIRK_DUMBKBD BIT(9)
#define SERIO_QUIRK_NOLOOP BIT(10)
#define SERIO_QUIRK_NOTIMEOUT BIT(11)
@@ -1332,6 +1332,20 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
.driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
+ {
+ /*
+ * The Ayaneo Kun is a handheld device where some the buttons
+ * are handled by an AT keyboard. The keyboard is usually
+ * detected as raw, but sometimes, usually after a cold boot,
+ * it is detected as translated. Make sure that the keyboard
+ * is always in raw mode.
+ */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
+ DMI_MATCH(DMI_BOARD_NAME, "KUN"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_DIRECT)
+ },
{ }
};
@@ -1655,7 +1669,7 @@ static void __init i8042_check_quirks(void)
if (quirks & SERIO_QUIRK_RESET_NEVER)
i8042_reset = I8042_RESET_NEVER;
}
- if (quirks & SERIO_QUIRK_DIECT)
+ if (quirks & SERIO_QUIRK_DIRECT)
i8042_direct = true;
if (quirks & SERIO_QUIRK_DUMBKBD)
i8042_dumbkbd = true;
diff --git a/drivers/input/tests/input_test.c b/drivers/input/tests/input_test.c
index 2fa5b725ae0a..9711ced180b8 100644
--- a/drivers/input/tests/input_test.c
+++ b/drivers/input/tests/input_test.c
@@ -31,7 +31,7 @@ static int input_test_init(struct kunit *test)
ret = input_register_device(input_dev);
if (ret) {
input_free_device(input_dev);
- KUNIT_ASSERT_FAILURE(test, "Register device failed: %d", ret);
+ KUNIT_FAIL_AND_ABORT(test, "Register device failed: %d", ret);
}
test->priv = input_dev;
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
index d2bbb436a77d..4d13db13b9e5 100644
--- a/drivers/input/touchscreen/ads7846.c
+++ b/drivers/input/touchscreen/ads7846.c
@@ -1111,6 +1111,16 @@ static const struct of_device_id ads7846_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, ads7846_dt_ids);
+static const struct spi_device_id ads7846_spi_ids[] = {
+ { "tsc2046", 7846 },
+ { "ads7843", 7843 },
+ { "ads7845", 7845 },
+ { "ads7846", 7846 },
+ { "ads7873", 7873 },
+ { },
+};
+MODULE_DEVICE_TABLE(spi, ads7846_spi_ids);
+
static const struct ads7846_platform_data *ads7846_get_props(struct device *dev)
{
struct ads7846_platform_data *pdata;
@@ -1386,10 +1396,10 @@ static struct spi_driver ads7846_driver = {
},
.probe = ads7846_probe,
.remove = ads7846_remove,
+ .id_table = ads7846_spi_ids,
};
module_spi_driver(ads7846_driver);
MODULE_DESCRIPTION("ADS7846 TouchScreen Driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("spi:ads7846");
diff --git a/drivers/input/touchscreen/ili210x.c b/drivers/input/touchscreen/ili210x.c
index 31ffdc2a93f3..79bdb2b10949 100644
--- a/drivers/input/touchscreen/ili210x.c
+++ b/drivers/input/touchscreen/ili210x.c
@@ -261,8 +261,8 @@ static int ili251x_read_touch_data(struct i2c_client *client, u8 *data)
if (!error && data[0] == 2) {
error = i2c_master_recv(client, data + ILI251X_DATA_SIZE1,
ILI251X_DATA_SIZE2);
- if (error >= 0 && error != ILI251X_DATA_SIZE2)
- error = -EIO;
+ if (error >= 0)
+ error = error == ILI251X_DATA_SIZE2 ? 0 : -EIO;
}
return error;
diff --git a/drivers/input/touchscreen/silead.c b/drivers/input/touchscreen/silead.c
index bbd366dcb69a..6a42b27c4599 100644
--- a/drivers/input/touchscreen/silead.c
+++ b/drivers/input/touchscreen/silead.c
@@ -71,7 +71,6 @@ struct silead_ts_data {
struct regulator_bulk_data regulators[2];
char fw_name[64];
struct touchscreen_properties prop;
- u32 max_fingers;
u32 chip_id;
struct input_mt_pos pos[SILEAD_MAX_FINGERS];
int slots[SILEAD_MAX_FINGERS];
@@ -136,7 +135,7 @@ static int silead_ts_request_input_dev(struct silead_ts_data *data)
touchscreen_parse_properties(data->input, true, &data->prop);
silead_apply_efi_fw_min_max(data);
- input_mt_init_slots(data->input, data->max_fingers,
+ input_mt_init_slots(data->input, SILEAD_MAX_FINGERS,
INPUT_MT_DIRECT | INPUT_MT_DROP_UNUSED |
INPUT_MT_TRACK);
@@ -256,10 +255,10 @@ static void silead_ts_read_data(struct i2c_client *client)
return;
}
- if (buf[0] > data->max_fingers) {
+ if (buf[0] > SILEAD_MAX_FINGERS) {
dev_warn(dev, "More touches reported then supported %d > %d\n",
- buf[0], data->max_fingers);
- buf[0] = data->max_fingers;
+ buf[0], SILEAD_MAX_FINGERS);
+ buf[0] = SILEAD_MAX_FINGERS;
}
if (silead_ts_handle_pen_data(data, buf))
@@ -315,7 +314,6 @@ sync:
static int silead_ts_init(struct i2c_client *client)
{
- struct silead_ts_data *data = i2c_get_clientdata(client);
int error;
error = i2c_smbus_write_byte_data(client, SILEAD_REG_RESET,
@@ -325,7 +323,7 @@ static int silead_ts_init(struct i2c_client *client)
usleep_range(SILEAD_CMD_SLEEP_MIN, SILEAD_CMD_SLEEP_MAX);
error = i2c_smbus_write_byte_data(client, SILEAD_REG_TOUCH_NR,
- data->max_fingers);
+ SILEAD_MAX_FINGERS);
if (error)
goto i2c_write_err;
usleep_range(SILEAD_CMD_SLEEP_MIN, SILEAD_CMD_SLEEP_MAX);
@@ -591,13 +589,6 @@ static void silead_ts_read_props(struct i2c_client *client)
const char *str;
int error;
- error = device_property_read_u32(dev, "silead,max-fingers",
- &data->max_fingers);
- if (error) {
- dev_dbg(dev, "Max fingers read error %d\n", error);
- data->max_fingers = 5; /* Most devices handle up-to 5 fingers */
- }
-
error = device_property_read_string(dev, "firmware-name", &str);
if (!error)
snprintf(data->fw_name, sizeof(data->fw_name),
diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h
index 2fde1302a584..2d5945c982bd 100644
--- a/drivers/iommu/amd/amd_iommu.h
+++ b/drivers/iommu/amd/amd_iommu.h
@@ -129,7 +129,8 @@ static inline int check_feature_gpt_level(void)
static inline bool amd_iommu_gt_ppr_supported(void)
{
return (check_feature(FEATURE_GT) &&
- check_feature(FEATURE_PPR));
+ check_feature(FEATURE_PPR) &&
+ check_feature(FEATURE_EPHSUP));
}
static inline u64 iommu_virt_to_phys(void *vaddr)
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index a18e74878f68..c89d85b54a1a 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -1626,8 +1626,17 @@ static void __init free_pci_segments(void)
}
}
+static void __init free_sysfs(struct amd_iommu *iommu)
+{
+ if (iommu->iommu.dev) {
+ iommu_device_unregister(&iommu->iommu);
+ iommu_device_sysfs_remove(&iommu->iommu);
+ }
+}
+
static void __init free_iommu_one(struct amd_iommu *iommu)
{
+ free_sysfs(iommu);
free_cwwb_sem(iommu);
free_command_buffer(iommu);
free_event_buffer(iommu);
@@ -2734,6 +2743,7 @@ static void early_enable_iommu(struct amd_iommu *iommu)
iommu_enable_command_buffer(iommu);
iommu_enable_event_buffer(iommu);
iommu_set_exclusion_range(iommu);
+ iommu_enable_gt(iommu);
iommu_enable_ga(iommu);
iommu_enable_xt(iommu);
iommu_enable_irtcachedis(iommu);
@@ -3353,7 +3363,7 @@ int amd_iommu_reenable(int mode)
return 0;
}
-int __init amd_iommu_enable_faulting(unsigned int cpu)
+int amd_iommu_enable_faulting(unsigned int cpu)
{
/* We enable MSI later when PCI is initialized */
return 0;
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index 52d83730a22a..b19e8c0f48fa 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -2032,7 +2032,6 @@ static int do_attach(struct iommu_dev_data *dev_data,
struct protection_domain *domain)
{
struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data);
- struct pci_dev *pdev;
int ret = 0;
/* Update data structures */
@@ -2047,30 +2046,13 @@ static int do_attach(struct iommu_dev_data *dev_data,
domain->dev_iommu[iommu->index] += 1;
domain->dev_cnt += 1;
- pdev = dev_is_pci(dev_data->dev) ? to_pci_dev(dev_data->dev) : NULL;
+ /* Setup GCR3 table */
if (pdom_is_sva_capable(domain)) {
ret = init_gcr3_table(dev_data, domain);
if (ret)
return ret;
-
- if (pdev) {
- pdev_enable_caps(pdev);
-
- /*
- * Device can continue to function even if IOPF
- * enablement failed. Hence in error path just
- * disable device PRI support.
- */
- if (amd_iommu_iopf_add_device(iommu, dev_data))
- pdev_disable_cap_pri(pdev);
- }
- } else if (pdev) {
- pdev_enable_cap_ats(pdev);
}
- /* Update device table */
- amd_iommu_dev_update_dte(dev_data, true);
-
return ret;
}
@@ -2079,6 +2061,12 @@ static void do_detach(struct iommu_dev_data *dev_data)
struct protection_domain *domain = dev_data->domain;
struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data);
+ /* Clear DTE and flush the entry */
+ amd_iommu_dev_update_dte(dev_data, false);
+
+ /* Flush IOTLB and wait for the flushes to finish */
+ amd_iommu_domain_flush_all(domain);
+
/* Clear GCR3 table */
if (pdom_is_sva_capable(domain))
destroy_gcr3_table(dev_data, domain);
@@ -2087,12 +2075,6 @@ static void do_detach(struct iommu_dev_data *dev_data)
dev_data->domain = NULL;
list_del(&dev_data->list);
- /* Clear DTE and flush the entry */
- amd_iommu_dev_update_dte(dev_data, false);
-
- /* Flush IOTLB and wait for the flushes to finish */
- amd_iommu_domain_flush_all(domain);
-
/* decrease reference counters - needs to happen after the flushes */
domain->dev_iommu[iommu->index] -= 1;
domain->dev_cnt -= 1;
@@ -2163,6 +2145,11 @@ static void detach_device(struct device *dev)
do_detach(dev_data);
+out:
+ spin_unlock(&dev_data->lock);
+
+ spin_unlock_irqrestore(&domain->lock, flags);
+
/* Remove IOPF handler */
if (ppr)
amd_iommu_iopf_remove_device(iommu, dev_data);
@@ -2170,10 +2157,6 @@ static void detach_device(struct device *dev)
if (dev_is_pci(dev))
pdev_disable_caps(to_pci_dev(dev));
-out:
- spin_unlock(&dev_data->lock);
-
- spin_unlock_irqrestore(&domain->lock, flags);
}
static struct iommu_device *amd_iommu_probe_device(struct device *dev)
@@ -2485,6 +2468,7 @@ static int amd_iommu_attach_device(struct iommu_domain *dom,
struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev);
struct protection_domain *domain = to_pdomain(dom);
struct amd_iommu *iommu = get_amd_iommu_from_dev(dev);
+ struct pci_dev *pdev;
int ret;
/*
@@ -2517,7 +2501,23 @@ static int amd_iommu_attach_device(struct iommu_domain *dom,
}
#endif
- iommu_completion_wait(iommu);
+ pdev = dev_is_pci(dev_data->dev) ? to_pci_dev(dev_data->dev) : NULL;
+ if (pdev && pdom_is_sva_capable(domain)) {
+ pdev_enable_caps(pdev);
+
+ /*
+ * Device can continue to function even if IOPF
+ * enablement failed. Hence in error path just
+ * disable device PRI support.
+ */
+ if (amd_iommu_iopf_add_device(iommu, dev_data))
+ pdev_disable_cap_pri(pdev);
+ } else if (pdev) {
+ pdev_enable_cap_ats(pdev);
+ }
+
+ /* Update device table */
+ amd_iommu_dev_update_dte(dev_data, true);
return ret;
}
diff --git a/drivers/iommu/amd/ppr.c b/drivers/iommu/amd/ppr.c
index 091423bb8aac..7c67d69f0b8c 100644
--- a/drivers/iommu/amd/ppr.c
+++ b/drivers/iommu/amd/ppr.c
@@ -222,8 +222,7 @@ int amd_iommu_iopf_init(struct amd_iommu *iommu)
if (iommu->iopf_queue)
return ret;
- snprintf(iommu->iopfq_name, sizeof(iommu->iopfq_name),
- "amdiommu-%#x-iopfq",
+ snprintf(iommu->iopfq_name, sizeof(iommu->iopfq_name), "amdvi-%#x",
PCI_SEG_DEVID_TO_SBDF(iommu->pci_seg->id, iommu->devid));
iommu->iopf_queue = iopf_queue_alloc(iommu->iopfq_name);
@@ -249,40 +248,26 @@ void amd_iommu_page_response(struct device *dev, struct iopf_fault *evt,
int amd_iommu_iopf_add_device(struct amd_iommu *iommu,
struct iommu_dev_data *dev_data)
{
- unsigned long flags;
int ret = 0;
if (!dev_data->pri_enabled)
return ret;
- raw_spin_lock_irqsave(&iommu->lock, flags);
-
- if (!iommu->iopf_queue) {
- ret = -EINVAL;
- goto out_unlock;
- }
+ if (!iommu->iopf_queue)
+ return -EINVAL;
ret = iopf_queue_add_device(iommu->iopf_queue, dev_data->dev);
if (ret)
- goto out_unlock;
+ return ret;
dev_data->ppr = true;
-
-out_unlock:
- raw_spin_unlock_irqrestore(&iommu->lock, flags);
- return ret;
+ return 0;
}
/* Its assumed that caller has verified that device was added to iopf queue */
void amd_iommu_iopf_remove_device(struct amd_iommu *iommu,
struct iommu_dev_data *dev_data)
{
- unsigned long flags;
-
- raw_spin_lock_irqsave(&iommu->lock, flags);
-
iopf_queue_remove_device(iommu->iopf_queue, dev_data->dev);
dev_data->ppr = false;
-
- raw_spin_unlock_irqrestore(&iommu->lock, flags);
}
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index f731e4b2a417..43520e7275cc 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -686,15 +686,15 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, struct device *dev
/* Check the domain allows at least some access to the device... */
if (map) {
- dma_addr_t base = dma_range_map_min(map);
- if (base > domain->geometry.aperture_end ||
+ if (dma_range_map_min(map) > domain->geometry.aperture_end ||
dma_range_map_max(map) < domain->geometry.aperture_start) {
pr_warn("specified DMA range outside IOMMU capability\n");
return -EFAULT;
}
- /* ...then finally give it a kicking to make sure it fits */
- base_pfn = max(base, domain->geometry.aperture_start) >> order;
}
+ /* ...then finally give it a kicking to make sure it fits */
+ base_pfn = max_t(unsigned long, base_pfn,
+ domain->geometry.aperture_start >> order);
/* start_pfn is always nonzero for an already-initialised domain */
mutex_lock(&cookie->mutex);
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index 2e9811bf2a4e..fd11a080380c 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -2114,12 +2114,6 @@ static int dmar_domain_attach_device(struct dmar_domain *domain,
if (ret)
return ret;
- ret = cache_tag_assign_domain(domain, dev, IOMMU_NO_PASID);
- if (ret) {
- domain_detach_iommu(domain, iommu);
- return ret;
- }
-
info->domain = domain;
spin_lock_irqsave(&domain->lock, flags);
list_add(&info->link, &domain->devices);
@@ -2137,15 +2131,21 @@ static int dmar_domain_attach_device(struct dmar_domain *domain,
else
ret = intel_pasid_setup_second_level(iommu, domain, dev, IOMMU_NO_PASID);
- if (ret) {
- device_block_translation(dev);
- return ret;
- }
+ if (ret)
+ goto out_block_translation;
if (sm_supported(info->iommu) || !domain_type_is_si(info->domain))
iommu_enable_pci_caps(info);
+ ret = cache_tag_assign_domain(domain, dev, IOMMU_NO_PASID);
+ if (ret)
+ goto out_block_translation;
+
return 0;
+
+out_block_translation:
+ device_block_translation(dev);
+ return ret;
}
/**
diff --git a/drivers/irqchip/irq-gic-common.c b/drivers/irqchip/irq-gic-common.c
index afd6a1841715..c776f9142610 100644
--- a/drivers/irqchip/irq-gic-common.c
+++ b/drivers/irqchip/irq-gic-common.c
@@ -7,6 +7,7 @@
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/irqchip/arm-gic.h>
+#include <linux/kernel.h>
#include "irq-gic-common.h"
@@ -45,7 +46,7 @@ void gic_enable_quirks(u32 iidr, const struct gic_quirk *quirks,
}
int gic_configure_irq(unsigned int irq, unsigned int type,
- void __iomem *base, void (*sync_access)(void))
+ void __iomem *base)
{
u32 confmask = 0x2 << ((irq % 16) * 2);
u32 confoff = (irq / 16) * 4;
@@ -84,14 +85,10 @@ int gic_configure_irq(unsigned int irq, unsigned int type,
raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
- if (sync_access)
- sync_access();
-
return ret;
}
-void gic_dist_config(void __iomem *base, int gic_irqs,
- void (*sync_access)(void))
+void gic_dist_config(void __iomem *base, int gic_irqs, u8 priority)
{
unsigned int i;
@@ -106,7 +103,8 @@ void gic_dist_config(void __iomem *base, int gic_irqs,
* Set priority on all global interrupts.
*/
for (i = 32; i < gic_irqs; i += 4)
- writel_relaxed(GICD_INT_DEF_PRI_X4, base + GIC_DIST_PRI + i);
+ writel_relaxed(REPEAT_BYTE_U32(priority),
+ base + GIC_DIST_PRI + i);
/*
* Deactivate and disable all SPIs. Leave the PPI and SGIs
@@ -118,12 +116,9 @@ void gic_dist_config(void __iomem *base, int gic_irqs,
writel_relaxed(GICD_INT_EN_CLR_X32,
base + GIC_DIST_ENABLE_CLEAR + i / 8);
}
-
- if (sync_access)
- sync_access();
}
-void gic_cpu_config(void __iomem *base, int nr, void (*sync_access)(void))
+void gic_cpu_config(void __iomem *base, int nr, u8 priority)
{
int i;
@@ -142,9 +137,6 @@ void gic_cpu_config(void __iomem *base, int nr, void (*sync_access)(void))
* Set priority on PPI and SGI interrupts
*/
for (i = 0; i < nr; i += 4)
- writel_relaxed(GICD_INT_DEF_PRI_X4,
+ writel_relaxed(REPEAT_BYTE_U32(priority),
base + GIC_DIST_PRI + i * 4 / 4);
-
- if (sync_access)
- sync_access();
}
diff --git a/drivers/irqchip/irq-gic-common.h b/drivers/irqchip/irq-gic-common.h
index f407cce9ecaa..e8eab72ef195 100644
--- a/drivers/irqchip/irq-gic-common.h
+++ b/drivers/irqchip/irq-gic-common.h
@@ -20,10 +20,9 @@ struct gic_quirk {
};
int gic_configure_irq(unsigned int irq, unsigned int type,
- void __iomem *base, void (*sync_access)(void));
-void gic_dist_config(void __iomem *base, int gic_irqs,
- void (*sync_access)(void));
-void gic_cpu_config(void __iomem *base, int nr, void (*sync_access)(void));
+ void __iomem *base);
+void gic_dist_config(void __iomem *base, int gic_irqs, u8 priority);
+void gic_cpu_config(void __iomem *base, int nr, u8 priority);
void gic_enable_quirks(u32 iidr, const struct gic_quirk *quirks,
void *data);
void gic_enable_of_quirks(const struct device_node *np,
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 40ebf1726393..42e63272154e 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -59,7 +59,7 @@ static u32 lpi_id_bits;
#define LPI_PROPBASE_SZ ALIGN(BIT(LPI_NRBITS), SZ_64K)
#define LPI_PENDBASE_SZ ALIGN(BIT(LPI_NRBITS) / 8, SZ_64K)
-#define LPI_PROP_DEFAULT_PRIO GICD_INT_DEF_PRI
+static u8 __ro_after_init lpi_prop_prio;
/*
* Collection structure - just an ID, and a redistributor address to
@@ -1846,28 +1846,22 @@ static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info)
{
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
u32 event = its_get_event_id(d);
- int ret = 0;
if (!info->map)
return -EINVAL;
- raw_spin_lock(&its_dev->event_map.vlpi_lock);
-
if (!its_dev->event_map.vm) {
struct its_vlpi_map *maps;
maps = kcalloc(its_dev->event_map.nr_lpis, sizeof(*maps),
GFP_ATOMIC);
- if (!maps) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!maps)
+ return -ENOMEM;
its_dev->event_map.vm = info->map->vm;
its_dev->event_map.vlpi_maps = maps;
} else if (its_dev->event_map.vm != info->map->vm) {
- ret = -EINVAL;
- goto out;
+ return -EINVAL;
}
/* Get our private copy of the mapping information */
@@ -1899,46 +1893,32 @@ static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info)
its_dev->event_map.nr_vlpis++;
}
-out:
- raw_spin_unlock(&its_dev->event_map.vlpi_lock);
- return ret;
+ return 0;
}
static int its_vlpi_get(struct irq_data *d, struct its_cmd_info *info)
{
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
struct its_vlpi_map *map;
- int ret = 0;
-
- raw_spin_lock(&its_dev->event_map.vlpi_lock);
map = get_vlpi_map(d);
- if (!its_dev->event_map.vm || !map) {
- ret = -EINVAL;
- goto out;
- }
+ if (!its_dev->event_map.vm || !map)
+ return -EINVAL;
/* Copy our mapping information to the incoming request */
*info->map = *map;
-out:
- raw_spin_unlock(&its_dev->event_map.vlpi_lock);
- return ret;
+ return 0;
}
static int its_vlpi_unmap(struct irq_data *d)
{
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
u32 event = its_get_event_id(d);
- int ret = 0;
-
- raw_spin_lock(&its_dev->event_map.vlpi_lock);
- if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) {
- ret = -EINVAL;
- goto out;
- }
+ if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d))
+ return -EINVAL;
/* Drop the virtual mapping */
its_send_discard(its_dev, event);
@@ -1946,7 +1926,7 @@ static int its_vlpi_unmap(struct irq_data *d)
/* and restore the physical one */
irqd_clr_forwarded_to_vcpu(d);
its_send_mapti(its_dev, d->hwirq, event);
- lpi_update_config(d, 0xff, (LPI_PROP_DEFAULT_PRIO |
+ lpi_update_config(d, 0xff, (lpi_prop_prio |
LPI_PROP_ENABLED |
LPI_PROP_GROUP1));
@@ -1962,9 +1942,7 @@ static int its_vlpi_unmap(struct irq_data *d)
kfree(its_dev->event_map.vlpi_maps);
}
-out:
- raw_spin_unlock(&its_dev->event_map.vlpi_lock);
- return ret;
+ return 0;
}
static int its_vlpi_prop_update(struct irq_data *d, struct its_cmd_info *info)
@@ -1992,6 +1970,8 @@ static int its_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
if (!is_v4(its_dev->its))
return -EINVAL;
+ guard(raw_spinlock_irq)(&its_dev->event_map.vlpi_lock);
+
/* Unmap request? */
if (!info)
return its_vlpi_unmap(d);
@@ -2201,8 +2181,8 @@ static void its_lpi_free(unsigned long *bitmap, u32 base, u32 nr_ids)
static void gic_reset_prop_table(void *va)
{
- /* Priority 0xa0, Group-1, disabled */
- memset(va, LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1, LPI_PROPBASE_SZ);
+ /* Regular IRQ priority, Group-1, disabled */
+ memset(va, lpi_prop_prio | LPI_PROP_GROUP1, LPI_PROPBASE_SZ);
/* Make sure the GIC will observe the written configuration */
gic_flush_dcache_to_poc(va, LPI_PROPBASE_SZ);
@@ -5670,7 +5650,7 @@ int __init its_lpi_memreserve_init(void)
}
int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
- struct irq_domain *parent_domain)
+ struct irq_domain *parent_domain, u8 irq_prio)
{
struct device_node *of_node;
struct its_node *its;
@@ -5680,6 +5660,7 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
gic_rdists = rdists;
+ lpi_prop_prio = irq_prio;
its_parent = parent_domain;
of_node = to_of_node(handle);
if (of_node)
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 6fb276504bcc..6393f3d780e9 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -12,6 +12,7 @@
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
+#include <linux/kernel.h>
#include <linux/kstrtox.h>
#include <linux/of.h>
#include <linux/of_address.h>
@@ -24,6 +25,7 @@
#include <linux/irqchip.h>
#include <linux/irqchip/arm-gic-common.h>
#include <linux/irqchip/arm-gic-v3.h>
+#include <linux/irqchip/arm-gic-v3-prio.h>
#include <linux/irqchip/irq-partition-percpu.h>
#include <linux/bitfield.h>
#include <linux/bits.h>
@@ -36,7 +38,8 @@
#include "irq-gic-common.h"
-#define GICD_INT_NMI_PRI (GICD_INT_DEF_PRI & ~0x80)
+static u8 dist_prio_irq __ro_after_init = GICV3_PRIO_IRQ;
+static u8 dist_prio_nmi __ro_after_init = GICV3_PRIO_NMI;
#define FLAGS_WORKAROUND_GICR_WAKER_MSM8996 (1ULL << 0)
#define FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539 (1ULL << 1)
@@ -44,6 +47,8 @@
#define GIC_IRQ_TYPE_PARTITION (GIC_IRQ_TYPE_LPI + 1)
+static struct cpumask broken_rdists __read_mostly __maybe_unused;
+
struct redist_region {
void __iomem *redist_base;
phys_addr_t phys_base;
@@ -108,29 +113,96 @@ static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key);
*/
static DEFINE_STATIC_KEY_FALSE(supports_pseudo_nmis);
-DEFINE_STATIC_KEY_FALSE(gic_nonsecure_priorities);
-EXPORT_SYMBOL(gic_nonsecure_priorities);
+static u32 gic_get_pribits(void)
+{
+ u32 pribits;
-/*
- * When the Non-secure world has access to group 0 interrupts (as a
- * consequence of SCR_EL3.FIQ == 0), reading the ICC_RPR_EL1 register will
- * return the Distributor's view of the interrupt priority.
- *
- * When GIC security is enabled (GICD_CTLR.DS == 0), the interrupt priority
- * written by software is moved to the Non-secure range by the Distributor.
- *
- * If both are true (which is when gic_nonsecure_priorities gets enabled),
- * we need to shift down the priority programmed by software to match it
- * against the value returned by ICC_RPR_EL1.
- */
-#define GICD_INT_RPR_PRI(priority) \
- ({ \
- u32 __priority = (priority); \
- if (static_branch_unlikely(&gic_nonsecure_priorities)) \
- __priority = 0x80 | (__priority >> 1); \
- \
- __priority; \
- })
+ pribits = gic_read_ctlr();
+ pribits &= ICC_CTLR_EL1_PRI_BITS_MASK;
+ pribits >>= ICC_CTLR_EL1_PRI_BITS_SHIFT;
+ pribits++;
+
+ return pribits;
+}
+
+static bool gic_has_group0(void)
+{
+ u32 val;
+ u32 old_pmr;
+
+ old_pmr = gic_read_pmr();
+
+ /*
+ * Let's find out if Group0 is under control of EL3 or not by
+ * setting the highest possible, non-zero priority in PMR.
+ *
+ * If SCR_EL3.FIQ is set, the priority gets shifted down in
+ * order for the CPU interface to set bit 7, and keep the
+ * actual priority in the non-secure range. In the process, it
+ * looses the least significant bit and the actual priority
+ * becomes 0x80. Reading it back returns 0, indicating that
+ * we're don't have access to Group0.
+ */
+ gic_write_pmr(BIT(8 - gic_get_pribits()));
+ val = gic_read_pmr();
+
+ gic_write_pmr(old_pmr);
+
+ return val != 0;
+}
+
+static inline bool gic_dist_security_disabled(void)
+{
+ return readl_relaxed(gic_data.dist_base + GICD_CTLR) & GICD_CTLR_DS;
+}
+
+static bool cpus_have_security_disabled __ro_after_init;
+static bool cpus_have_group0 __ro_after_init;
+
+static void __init gic_prio_init(void)
+{
+ cpus_have_security_disabled = gic_dist_security_disabled();
+ cpus_have_group0 = gic_has_group0();
+
+ /*
+ * How priority values are used by the GIC depends on two things:
+ * the security state of the GIC (controlled by the GICD_CTRL.DS bit)
+ * and if Group 0 interrupts can be delivered to Linux in the non-secure
+ * world as FIQs (controlled by the SCR_EL3.FIQ bit). These affect the
+ * way priorities are presented in ICC_PMR_EL1 and in the distributor:
+ *
+ * GICD_CTRL.DS | SCR_EL3.FIQ | ICC_PMR_EL1 | Distributor
+ * -------------------------------------------------------
+ * 1 | - | unchanged | unchanged
+ * -------------------------------------------------------
+ * 0 | 1 | non-secure | non-secure
+ * -------------------------------------------------------
+ * 0 | 0 | unchanged | non-secure
+ *
+ * In the non-secure view reads and writes are modified:
+ *
+ * - A value written is right-shifted by one and the MSB is set,
+ * forcing the priority into the non-secure range.
+ *
+ * - A value read is left-shifted by one.
+ *
+ * In the first two cases, where ICC_PMR_EL1 and the interrupt priority
+ * are both either modified or unchanged, we can use the same set of
+ * priorities.
+ *
+ * In the last case, where only the interrupt priorities are modified to
+ * be in the non-secure range, we program the non-secure values into
+ * the distributor to match the PMR values we want.
+ */
+ if (cpus_have_group0 & !cpus_have_security_disabled) {
+ dist_prio_irq = __gicv3_prio_to_ns(dist_prio_irq);
+ dist_prio_nmi = __gicv3_prio_to_ns(dist_prio_nmi);
+ }
+
+ pr_info("GICD_CTRL.DS=%d, SCR_EL3.FIQ=%d\n",
+ cpus_have_security_disabled,
+ !cpus_have_group0);
+}
/* rdist_nmi_refs[n] == number of cpus having the rdist interrupt n set as NMI */
static refcount_t *rdist_nmi_refs;
@@ -556,7 +628,7 @@ static int gic_irq_nmi_setup(struct irq_data *d)
desc->handle_irq = handle_fasteoi_nmi;
}
- gic_irq_set_prio(d, GICD_INT_NMI_PRI);
+ gic_irq_set_prio(d, dist_prio_nmi);
return 0;
}
@@ -591,7 +663,7 @@ static void gic_irq_nmi_teardown(struct irq_data *d)
desc->handle_irq = handle_fasteoi_irq;
}
- gic_irq_set_prio(d, GICD_INT_DEF_PRI);
+ gic_irq_set_prio(d, dist_prio_irq);
}
static bool gic_arm64_erratum_2941627_needed(struct irq_data *d)
@@ -670,7 +742,7 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
offset = convert_offset_index(d, GICD_ICFGR, &index);
- ret = gic_configure_irq(index, type, base + offset, NULL);
+ ret = gic_configure_irq(index, type, base + offset);
if (ret && (range == PPI_RANGE || range == EPPI_RANGE)) {
/* Misconfigured PPIs are usually not fatal */
pr_warn("GIC: PPI INTID%ld is secure or misconfigured\n", irq);
@@ -753,7 +825,7 @@ static bool gic_rpr_is_nmi_prio(void)
if (!gic_supports_nmi())
return false;
- return unlikely(gic_read_rpr() == GICD_INT_RPR_PRI(GICD_INT_NMI_PRI));
+ return unlikely(gic_read_rpr() == GICV3_PRIO_NMI);
}
static bool gic_irqnr_is_special(u32 irqnr)
@@ -866,44 +938,6 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs
__gic_handle_irq_from_irqson(regs);
}
-static u32 gic_get_pribits(void)
-{
- u32 pribits;
-
- pribits = gic_read_ctlr();
- pribits &= ICC_CTLR_EL1_PRI_BITS_MASK;
- pribits >>= ICC_CTLR_EL1_PRI_BITS_SHIFT;
- pribits++;
-
- return pribits;
-}
-
-static bool gic_has_group0(void)
-{
- u32 val;
- u32 old_pmr;
-
- old_pmr = gic_read_pmr();
-
- /*
- * Let's find out if Group0 is under control of EL3 or not by
- * setting the highest possible, non-zero priority in PMR.
- *
- * If SCR_EL3.FIQ is set, the priority gets shifted down in
- * order for the CPU interface to set bit 7, and keep the
- * actual priority in the non-secure range. In the process, it
- * looses the least significant bit and the actual priority
- * becomes 0x80. Reading it back returns 0, indicating that
- * we're don't have access to Group0.
- */
- gic_write_pmr(BIT(8 - gic_get_pribits()));
- val = gic_read_pmr();
-
- gic_write_pmr(old_pmr);
-
- return val != 0;
-}
-
static void __init gic_dist_init(void)
{
unsigned int i;
@@ -937,10 +971,11 @@ static void __init gic_dist_init(void)
writel_relaxed(0, base + GICD_ICFGRnE + i / 4);
for (i = 0; i < GIC_ESPI_NR; i += 4)
- writel_relaxed(GICD_INT_DEF_PRI_X4, base + GICD_IPRIORITYRnE + i);
+ writel_relaxed(REPEAT_BYTE_U32(dist_prio_irq),
+ base + GICD_IPRIORITYRnE + i);
/* Now do the common stuff */
- gic_dist_config(base, GIC_LINE_NR, NULL);
+ gic_dist_config(base, GIC_LINE_NR, dist_prio_irq);
val = GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1;
if (gic_data.rdists.gicd_typer2 & GICD_TYPER2_nASSGIcap) {
@@ -1119,12 +1154,6 @@ static void gic_update_rdist_properties(void)
gic_data.rdists.has_vpend_valid_dirty ? "Valid+Dirty " : "");
}
-/* Check whether it's single security state view */
-static inline bool gic_dist_security_disabled(void)
-{
- return readl_relaxed(gic_data.dist_base + GICD_CTLR) & GICD_CTLR_DS;
-}
-
static void gic_cpu_sys_reg_init(void)
{
int i, cpu = smp_processor_id();
@@ -1152,18 +1181,14 @@ static void gic_cpu_sys_reg_init(void)
write_gicreg(DEFAULT_PMR_VALUE, ICC_PMR_EL1);
} else if (gic_supports_nmi()) {
/*
- * Mismatch configuration with boot CPU, the system is likely
- * to die as interrupt masking will not work properly on all
- * CPUs
+ * Check that all CPUs use the same priority space.
*
- * The boot CPU calls this function before enabling NMI support,
- * and as a result we'll never see this warning in the boot path
- * for that CPU.
+ * If there's a mismatch with the boot CPU, the system is
+ * likely to die as interrupt masking will not work properly on
+ * all CPUs.
*/
- if (static_branch_unlikely(&gic_nonsecure_priorities))
- WARN_ON(!group0 || gic_dist_security_disabled());
- else
- WARN_ON(group0 && !gic_dist_security_disabled());
+ WARN_ON(group0 != cpus_have_group0);
+ WARN_ON(gic_dist_security_disabled() != cpus_have_security_disabled);
}
/*
@@ -1282,7 +1307,8 @@ static void gic_cpu_init(void)
for (i = 0; i < gic_data.ppi_nr + SGI_NR; i += 32)
writel_relaxed(~0, rbase + GICR_IGROUPR0 + i / 8);
- gic_cpu_config(rbase, gic_data.ppi_nr + SGI_NR, gic_redist_wait_for_rwp);
+ gic_cpu_config(rbase, gic_data.ppi_nr + SGI_NR, dist_prio_irq);
+ gic_redist_wait_for_rwp();
/* initialise system registers */
gic_cpu_sys_reg_init();
@@ -1293,6 +1319,18 @@ static void gic_cpu_init(void)
#define MPIDR_TO_SGI_RS(mpidr) (MPIDR_RS(mpidr) << ICC_SGI1R_RS_SHIFT)
#define MPIDR_TO_SGI_CLUSTER_ID(mpidr) ((mpidr) & ~0xFUL)
+/*
+ * gic_starting_cpu() is called after the last point where cpuhp is allowed
+ * to fail. So pre check for problems earlier.
+ */
+static int gic_check_rdist(unsigned int cpu)
+{
+ if (cpumask_test_cpu(cpu, &broken_rdists))
+ return -EINVAL;
+
+ return 0;
+}
+
static int gic_starting_cpu(unsigned int cpu)
{
gic_cpu_init();
@@ -1384,6 +1422,10 @@ static void __init gic_smp_init(void)
};
int base_sgi;
+ cpuhp_setup_state_nocalls(CPUHP_BP_PREPARE_DYN,
+ "irqchip/arm/gicv3:checkrdist",
+ gic_check_rdist, NULL);
+
cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GIC_STARTING,
"irqchip/arm/gicv3:starting",
gic_starting_cpu, NULL);
@@ -1948,36 +1990,6 @@ static void gic_enable_nmi_support(void)
pr_info("Pseudo-NMIs enabled using %s ICC_PMR_EL1 synchronisation\n",
gic_has_relaxed_pmr_sync() ? "relaxed" : "forced");
- /*
- * How priority values are used by the GIC depends on two things:
- * the security state of the GIC (controlled by the GICD_CTRL.DS bit)
- * and if Group 0 interrupts can be delivered to Linux in the non-secure
- * world as FIQs (controlled by the SCR_EL3.FIQ bit). These affect the
- * ICC_PMR_EL1 register and the priority that software assigns to
- * interrupts:
- *
- * GICD_CTRL.DS | SCR_EL3.FIQ | ICC_PMR_EL1 | Group 1 priority
- * -----------------------------------------------------------
- * 1 | - | unchanged | unchanged
- * -----------------------------------------------------------
- * 0 | 1 | non-secure | non-secure
- * -----------------------------------------------------------
- * 0 | 0 | unchanged | non-secure
- *
- * where non-secure means that the value is right-shifted by one and the
- * MSB bit set, to make it fit in the non-secure priority range.
- *
- * In the first two cases, where ICC_PMR_EL1 and the interrupt priority
- * are both either modified or unchanged, we can use the same set of
- * priorities.
- *
- * In the last case, where only the interrupt priorities are modified to
- * be in the non-secure range, we use a different PMR value to mask IRQs
- * and the rest of the values that we use remain unchanged.
- */
- if (gic_has_group0() && !gic_dist_security_disabled())
- static_branch_enable(&gic_nonsecure_priorities);
-
static_branch_enable(&supports_pseudo_nmis);
if (static_branch_likely(&supports_deactivate_key))
@@ -2058,6 +2070,7 @@ static int __init gic_init_bases(phys_addr_t dist_phys_base,
gic_update_rdist_properties();
+ gic_prio_init();
gic_dist_init();
gic_cpu_init();
gic_enable_nmi_support();
@@ -2065,7 +2078,7 @@ static int __init gic_init_bases(phys_addr_t dist_phys_base,
gic_cpu_pm_init();
if (gic_dist_supports_lpis()) {
- its_init(handle, &gic_data.rdists, gic_data.domain);
+ its_init(handle, &gic_data.rdists, gic_data.domain, dist_prio_irq);
its_cpu_init();
its_lpi_memreserve_init();
} else {
@@ -2365,8 +2378,24 @@ gic_acpi_parse_madt_gicc(union acpi_subtable_headers *header,
u32 size = reg == GIC_PIDR2_ARCH_GICv4 ? SZ_64K * 4 : SZ_64K * 2;
void __iomem *redist_base;
- if (!acpi_gicc_is_usable(gicc))
+ /* Neither enabled or online capable means it doesn't exist, skip it */
+ if (!(gicc->flags & (ACPI_MADT_ENABLED | ACPI_MADT_GICC_ONLINE_CAPABLE)))
+ return 0;
+
+ /*
+ * Capable but disabled CPUs can be brought online later. What about
+ * the redistributor? ACPI doesn't want to say!
+ * Virtual hotplug systems can use the MADT's "always-on" GICR entries.
+ * Otherwise, prevent such CPUs from being brought online.
+ */
+ if (!(gicc->flags & ACPI_MADT_ENABLED)) {
+ int cpu = get_cpu_for_acpi_id(gicc->uid);
+
+ pr_warn("CPU %u's redistributor is inaccessible: this CPU can't be brought online\n", cpu);
+ if (cpu >= 0)
+ cpumask_set_cpu(cpu, &broken_rdists);
return 0;
+ }
redist_base = ioremap(gicc->gicr_base_address, size);
if (!redist_base)
@@ -2413,21 +2442,15 @@ static int __init gic_acpi_match_gicc(union acpi_subtable_headers *header,
/*
* If GICC is enabled and has valid gicr base address, then it means
- * GICR base is presented via GICC
+ * GICR base is presented via GICC. The redistributor is only known to
+ * be accessible if the GICC is marked as enabled. If this bit is not
+ * set, we'd need to add the redistributor at runtime, which isn't
+ * supported.
*/
- if (acpi_gicc_is_usable(gicc) && gicc->gicr_base_address) {
+ if (gicc->flags & ACPI_MADT_ENABLED && gicc->gicr_base_address)
acpi_data.enabled_rdists++;
- return 0;
- }
- /*
- * It's perfectly valid firmware can pass disabled GICC entry, driver
- * should not treat as errors, skip the entry instead of probe fail.
- */
- if (!acpi_gicc_is_usable(gicc))
- return 0;
-
- return -ENODEV;
+ return 0;
}
static int __init gic_acpi_count_gicr_regions(void)
@@ -2483,7 +2506,8 @@ static int __init gic_acpi_parse_virt_madt_gicc(union acpi_subtable_headers *hea
int maint_irq_mode;
static int first_madt = true;
- if (!acpi_gicc_is_usable(gicc))
+ if (!(gicc->flags &
+ (ACPI_MADT_ENABLED | ACPI_MADT_GICC_ONLINE_CAPABLE)))
return 0;
maint_irq_mode = (gicc->flags & ACPI_MADT_VGIC_IRQ_MODE) ?
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 98aa383e39db..3be7bd8cd8cd 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -303,7 +303,7 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
type != IRQ_TYPE_EDGE_RISING)
return -EINVAL;
- ret = gic_configure_irq(gicirq, type, base + GIC_DIST_CONFIG, NULL);
+ ret = gic_configure_irq(gicirq, type, base + GIC_DIST_CONFIG);
if (ret && gicirq < 32) {
/* Misconfigured PPIs are usually not fatal */
pr_warn("GIC: PPI%ld is secure or misconfigured\n", gicirq - 16);
@@ -479,7 +479,7 @@ static void gic_dist_init(struct gic_chip_data *gic)
for (i = 32; i < gic_irqs; i += 4)
writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4);
- gic_dist_config(base, gic_irqs, NULL);
+ gic_dist_config(base, gic_irqs, GICD_INT_DEF_PRI);
writel_relaxed(GICD_ENABLE, base + GIC_DIST_CTRL);
}
@@ -516,7 +516,7 @@ static int gic_cpu_init(struct gic_chip_data *gic)
gic_cpu_map[i] &= ~cpu_mask;
}
- gic_cpu_config(dist_base, 32, NULL);
+ gic_cpu_config(dist_base, 32, GICD_INT_DEF_PRI);
writel_relaxed(GICC_INT_PRI_THRESHOLD, base + GIC_CPU_PRIMASK);
gic_cpu_if_up(gic);
@@ -608,7 +608,7 @@ void gic_dist_restore(struct gic_chip_data *gic)
dist_base + GIC_DIST_CONFIG + i * 4);
for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
- writel_relaxed(GICD_INT_DEF_PRI_X4,
+ writel_relaxed(REPEAT_BYTE_U32(GICD_INT_DEF_PRI),
dist_base + GIC_DIST_PRI + i * 4);
for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
@@ -697,7 +697,7 @@ void gic_cpu_restore(struct gic_chip_data *gic)
writel_relaxed(ptr[i], dist_base + GIC_DIST_CONFIG + i * 4);
for (i = 0; i < DIV_ROUND_UP(32, 4); i++)
- writel_relaxed(GICD_INT_DEF_PRI_X4,
+ writel_relaxed(REPEAT_BYTE_U32(GICD_INT_DEF_PRI),
dist_base + GIC_DIST_PRI + i * 4);
writel_relaxed(GICC_INT_PRI_THRESHOLD, cpu_base + GIC_CPU_PRIMASK);
diff --git a/drivers/irqchip/irq-hip04.c b/drivers/irqchip/irq-hip04.c
index 46161f6ff289..31c3f70a5d5e 100644
--- a/drivers/irqchip/irq-hip04.c
+++ b/drivers/irqchip/irq-hip04.c
@@ -130,7 +130,7 @@ static int hip04_irq_set_type(struct irq_data *d, unsigned int type)
raw_spin_lock(&irq_controller_lock);
- ret = gic_configure_irq(irq, type, base + GIC_DIST_CONFIG, NULL);
+ ret = gic_configure_irq(irq, type, base + GIC_DIST_CONFIG);
if (ret && irq < 32) {
/* Misconfigured PPIs are usually not fatal */
pr_warn("GIC: PPI%d is secure or misconfigured\n", irq - 16);
@@ -260,7 +260,7 @@ static void __init hip04_irq_dist_init(struct hip04_irq_data *intc)
for (i = 32; i < nr_irqs; i += 2)
writel_relaxed(cpumask, base + GIC_DIST_TARGET + ((i * 2) & ~3));
- gic_dist_config(base, nr_irqs, NULL);
+ gic_dist_config(base, nr_irqs, GICD_INT_DEF_PRI);
writel_relaxed(1, base + GIC_DIST_CTRL);
}
@@ -287,7 +287,7 @@ static void hip04_irq_cpu_init(struct hip04_irq_data *intc)
if (i != cpu)
hip04_cpu_map[i] &= ~cpu_mask;
- gic_cpu_config(dist_base, 32, NULL);
+ gic_cpu_config(dist_base, 32, GICD_INT_DEF_PRI);
writel_relaxed(0xf0, base + GIC_CPU_PRIMASK);
writel_relaxed(1, base + GIC_CPU_CTRL);
diff --git a/drivers/irqchip/irq-loongson-eiointc.c b/drivers/irqchip/irq-loongson-eiointc.c
index c7ddebf312ad..b1f2080be2be 100644
--- a/drivers/irqchip/irq-loongson-eiointc.c
+++ b/drivers/irqchip/irq-loongson-eiointc.c
@@ -15,6 +15,7 @@
#include <linux/irqchip/chained_irq.h>
#include <linux/kernel.h>
#include <linux/syscore_ops.h>
+#include <asm/numa.h>
#define EIOINTC_REG_NODEMAP 0x14a0
#define EIOINTC_REG_IPMAP 0x14c0
@@ -339,7 +340,7 @@ static int __init pch_msi_parse_madt(union acpi_subtable_headers *header,
int node;
if (cpu_has_flatmode)
- node = cpu_to_node(eiointc_priv[nr_pics - 1]->node * CORES_PER_EIO_NODE);
+ node = early_cpu_to_node(eiointc_priv[nr_pics - 1]->node * CORES_PER_EIO_NODE);
else
node = eiointc_priv[nr_pics - 1]->node;
@@ -431,7 +432,7 @@ int __init eiointc_acpi_init(struct irq_domain *parent,
goto out_free_handle;
if (cpu_has_flatmode)
- node = cpu_to_node(acpi_eiointc->node * CORES_PER_EIO_NODE);
+ node = early_cpu_to_node(acpi_eiointc->node * CORES_PER_EIO_NODE);
else
node = acpi_eiointc->node;
acpi_set_vec_parent(node, priv->eiointc_domain, pch_group);
diff --git a/drivers/irqchip/irq-loongson-liointc.c b/drivers/irqchip/irq-loongson-liointc.c
index e4b33aed1c97..7c4fe7ab4b83 100644
--- a/drivers/irqchip/irq-loongson-liointc.c
+++ b/drivers/irqchip/irq-loongson-liointc.c
@@ -28,7 +28,7 @@
#define LIOINTC_INTC_CHIP_START 0x20
-#define LIOINTC_REG_INTC_STATUS (LIOINTC_INTC_CHIP_START + 0x20)
+#define LIOINTC_REG_INTC_STATUS(core) (LIOINTC_INTC_CHIP_START + 0x20 + (core) * 8)
#define LIOINTC_REG_INTC_EN_STATUS (LIOINTC_INTC_CHIP_START + 0x04)
#define LIOINTC_REG_INTC_ENABLE (LIOINTC_INTC_CHIP_START + 0x08)
#define LIOINTC_REG_INTC_DISABLE (LIOINTC_INTC_CHIP_START + 0x0c)
@@ -217,7 +217,7 @@ static int liointc_init(phys_addr_t addr, unsigned long size, int revision,
goto out_free_priv;
for (i = 0; i < LIOINTC_NUM_CORES; i++)
- priv->core_isr[i] = base + LIOINTC_REG_INTC_STATUS;
+ priv->core_isr[i] = base + LIOINTC_REG_INTC_STATUS(i);
for (i = 0; i < LIOINTC_NUM_PARENT; i++)
priv->handler[i].parent_int_map = parent_int_map[i];
diff --git a/drivers/irqchip/irq-riscv-intc.c b/drivers/irqchip/irq-riscv-intc.c
index 9e71c4428814..4f3a12383a1e 100644
--- a/drivers/irqchip/irq-riscv-intc.c
+++ b/drivers/irqchip/irq-riscv-intc.c
@@ -253,8 +253,9 @@ IRQCHIP_DECLARE(andes, "andestech,cpu-intc", riscv_intc_init);
static int __init riscv_intc_acpi_init(union acpi_subtable_headers *header,
const unsigned long end)
{
- struct fwnode_handle *fn;
struct acpi_madt_rintc *rintc;
+ struct fwnode_handle *fn;
+ int rc;
rintc = (struct acpi_madt_rintc *)header;
@@ -273,7 +274,11 @@ static int __init riscv_intc_acpi_init(union acpi_subtable_headers *header,
return -ENOMEM;
}
- return riscv_intc_init_common(fn, &riscv_intc_chip);
+ rc = riscv_intc_init_common(fn, &riscv_intc_chip);
+ if (rc)
+ irq_domain_free_fwnode(fn);
+
+ return rc;
}
IRQCHIP_ACPI_DECLARE(riscv_intc, ACPI_MADT_TYPE_RINTC, NULL,
diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
index 8fb183ced1e7..9e22f7e378f5 100644
--- a/drivers/irqchip/irq-sifive-plic.c
+++ b/drivers/irqchip/irq-sifive-plic.c
@@ -85,7 +85,7 @@ struct plic_handler {
struct plic_priv *priv;
};
static int plic_parent_irq __ro_after_init;
-static bool plic_cpuhp_setup_done __ro_after_init;
+static bool plic_global_setup_done __ro_after_init;
static DEFINE_PER_CPU(struct plic_handler, plic_handlers);
static int plic_irq_set_type(struct irq_data *d, unsigned int type);
@@ -487,10 +487,8 @@ static int plic_probe(struct platform_device *pdev)
unsigned long plic_quirks = 0;
struct plic_handler *handler;
u32 nr_irqs, parent_hwirq;
- struct irq_domain *domain;
struct plic_priv *priv;
irq_hw_number_t hwirq;
- bool cpuhp_setup;
if (is_of_node(dev->fwnode)) {
const struct of_device_id *id;
@@ -549,14 +547,6 @@ static int plic_probe(struct platform_device *pdev)
continue;
}
- /* Find parent domain and register chained handler */
- domain = irq_find_matching_fwnode(riscv_get_intc_hwnode(), DOMAIN_BUS_ANY);
- if (!plic_parent_irq && domain) {
- plic_parent_irq = irq_create_mapping(domain, RV_IRQ_EXT);
- if (plic_parent_irq)
- irq_set_chained_handler(plic_parent_irq, plic_handle_irq);
- }
-
/*
* When running in M-mode we need to ignore the S-mode handler.
* Here we assume it always comes later, but that might be a
@@ -597,25 +587,35 @@ done:
goto fail_cleanup_contexts;
/*
- * We can have multiple PLIC instances so setup cpuhp state
+ * We can have multiple PLIC instances so setup global state
* and register syscore operations only once after context
* handlers of all online CPUs are initialized.
*/
- if (!plic_cpuhp_setup_done) {
- cpuhp_setup = true;
+ if (!plic_global_setup_done) {
+ struct irq_domain *domain;
+ bool global_setup = true;
+
for_each_online_cpu(cpu) {
handler = per_cpu_ptr(&plic_handlers, cpu);
if (!handler->present) {
- cpuhp_setup = false;
+ global_setup = false;
break;
}
}
- if (cpuhp_setup) {
+
+ if (global_setup) {
+ /* Find parent domain and register chained handler */
+ domain = irq_find_matching_fwnode(riscv_get_intc_hwnode(), DOMAIN_BUS_ANY);
+ if (domain)
+ plic_parent_irq = irq_create_mapping(domain, RV_IRQ_EXT);
+ if (plic_parent_irq)
+ irq_set_chained_handler(plic_parent_irq, plic_handle_irq);
+
cpuhp_setup_state(CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING,
"irqchip/sifive/plic:starting",
plic_starting_cpu, plic_dying_cpu);
register_syscore_ops(&plic_irq_syscore_ops);
- plic_cpuhp_setup_done = true;
+ plic_global_setup_done = true;
}
}
diff --git a/drivers/isdn/hardware/mISDN/avmfritz.c b/drivers/isdn/hardware/mISDN/avmfritz.c
index f68569bfef7a..509b362d6465 100644
--- a/drivers/isdn/hardware/mISDN/avmfritz.c
+++ b/drivers/isdn/hardware/mISDN/avmfritz.c
@@ -159,6 +159,7 @@ set_debug(const char *val, const struct kernel_param *kp)
}
MODULE_AUTHOR("Karsten Keil");
+MODULE_DESCRIPTION("mISDN driver for AVM FRITZ!CARD PCI ISDN cards");
MODULE_LICENSE("GPL v2");
MODULE_VERSION(AVMFRITZ_REV);
module_param_call(debug, set_debug, param_get_uint, &debug, S_IRUGO | S_IWUSR);
diff --git a/drivers/isdn/hardware/mISDN/hfcmulti.c b/drivers/isdn/hardware/mISDN/hfcmulti.c
index 2e5cb9dde3ec..0d2928d8aeae 100644
--- a/drivers/isdn/hardware/mISDN/hfcmulti.c
+++ b/drivers/isdn/hardware/mISDN/hfcmulti.c
@@ -221,6 +221,7 @@ static uint hwid = HWID_NONE;
static int HFC_cnt, E1_cnt, bmask_cnt, Port_cnt, PCM_cnt = 99;
MODULE_AUTHOR("Andreas Eversberg");
+MODULE_DESCRIPTION("mISDN driver for hfc-4s/hfc-8s/hfc-e1 based cards");
MODULE_LICENSE("GPL");
MODULE_VERSION(HFC_MULTI_VERSION);
module_param(debug, uint, S_IRUGO | S_IWUSR);
diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c
index fe391de1aba3..ce7bccc9faa3 100644
--- a/drivers/isdn/hardware/mISDN/hfcpci.c
+++ b/drivers/isdn/hardware/mISDN/hfcpci.c
@@ -48,6 +48,7 @@ static struct timer_list hfc_tl;
static unsigned long hfc_jiffies;
MODULE_AUTHOR("Karsten Keil");
+MODULE_DESCRIPTION("mISDN driver for CCD's hfc-pci based cards");
MODULE_LICENSE("GPL");
module_param(debug, uint, S_IRUGO | S_IWUSR);
module_param(poll, uint, S_IRUGO | S_IWUSR);
diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.c b/drivers/isdn/hardware/mISDN/hfcsusb.c
index b82b89888a5e..e54419a4e731 100644
--- a/drivers/isdn/hardware/mISDN/hfcsusb.c
+++ b/drivers/isdn/hardware/mISDN/hfcsusb.c
@@ -31,6 +31,7 @@ static DEFINE_RWLOCK(HFClock);
MODULE_AUTHOR("Martin Bachem");
+MODULE_DESCRIPTION("mISDN driver for Colognechip HFC-S USB chip");
MODULE_LICENSE("GPL");
module_param(debug, uint, S_IRUGO | S_IWUSR);
module_param(poll, int, 0);
diff --git a/drivers/isdn/hardware/mISDN/mISDNinfineon.c b/drivers/isdn/hardware/mISDN/mISDNinfineon.c
index 88d592bafdb0..30876a012711 100644
--- a/drivers/isdn/hardware/mISDN/mISDNinfineon.c
+++ b/drivers/isdn/hardware/mISDN/mISDNinfineon.c
@@ -245,6 +245,7 @@ set_debug(const char *val, const struct kernel_param *kp)
}
MODULE_AUTHOR("Karsten Keil");
+MODULE_DESCRIPTION("mISDN driver for cards based on Infineon ISDN chipsets");
MODULE_LICENSE("GPL v2");
MODULE_VERSION(INFINEON_REV);
module_param_call(debug, set_debug, param_get_uint, &debug, S_IRUGO | S_IWUSR);
diff --git a/drivers/isdn/hardware/mISDN/mISDNipac.c b/drivers/isdn/hardware/mISDN/mISDNipac.c
index 4f8d85bb3ce1..d0b7271fbda1 100644
--- a/drivers/isdn/hardware/mISDN/mISDNipac.c
+++ b/drivers/isdn/hardware/mISDN/mISDNipac.c
@@ -21,6 +21,7 @@
MODULE_AUTHOR("Karsten Keil");
MODULE_VERSION(ISAC_REV);
+MODULE_DESCRIPTION("mISDN driver for ISAC specific functions");
MODULE_LICENSE("GPL v2");
#define ReadISAC(is, o) (is->read_reg(is->dch.hw, o + is->off))
diff --git a/drivers/isdn/hardware/mISDN/mISDNisar.c b/drivers/isdn/hardware/mISDN/mISDNisar.c
index 48b3d43e2502..b3e03c410544 100644
--- a/drivers/isdn/hardware/mISDN/mISDNisar.c
+++ b/drivers/isdn/hardware/mISDN/mISDNisar.c
@@ -22,6 +22,7 @@
#define ISAR_REV "2.1"
MODULE_AUTHOR("Karsten Keil");
+MODULE_DESCRIPTION("mISDN driver for ISAR (Siemens PSB 7110) specific functions");
MODULE_LICENSE("GPL v2");
MODULE_VERSION(ISAR_REV);
diff --git a/drivers/isdn/hardware/mISDN/netjet.c b/drivers/isdn/hardware/mISDN/netjet.c
index 566c790a9481..d163850c295e 100644
--- a/drivers/isdn/hardware/mISDN/netjet.c
+++ b/drivers/isdn/hardware/mISDN/netjet.c
@@ -114,6 +114,7 @@ set_debug(const char *val, const struct kernel_param *kp)
}
MODULE_AUTHOR("Karsten Keil");
+MODULE_DESCRIPTION("mISDN driver for NETJet cards");
MODULE_LICENSE("GPL v2");
MODULE_VERSION(NETJET_REV);
module_param_call(debug, set_debug, param_get_uint, &debug, S_IRUGO | S_IWUSR);
diff --git a/drivers/isdn/hardware/mISDN/speedfax.c b/drivers/isdn/hardware/mISDN/speedfax.c
index b530c78eca8e..0c405261d940 100644
--- a/drivers/isdn/hardware/mISDN/speedfax.c
+++ b/drivers/isdn/hardware/mISDN/speedfax.c
@@ -97,6 +97,7 @@ set_debug(const char *val, const struct kernel_param *kp)
}
MODULE_AUTHOR("Karsten Keil");
+MODULE_DESCRIPTION("mISDN driver for Sedlbauer Speedfax+ cards");
MODULE_LICENSE("GPL v2");
MODULE_VERSION(SPEEDFAX_REV);
MODULE_FIRMWARE("isdn/ISAR.BIN");
diff --git a/drivers/isdn/hardware/mISDN/w6692.c b/drivers/isdn/hardware/mISDN/w6692.c
index f3b8db7b48fe..ee69212ac351 100644
--- a/drivers/isdn/hardware/mISDN/w6692.c
+++ b/drivers/isdn/hardware/mISDN/w6692.c
@@ -101,6 +101,7 @@ set_debug(const char *val, const struct kernel_param *kp)
}
MODULE_AUTHOR("Karsten Keil");
+MODULE_DESCRIPTION("mISDN driver for Winbond w6692 based cards");
MODULE_LICENSE("GPL v2");
MODULE_VERSION(W6692_REV);
module_param_call(debug, set_debug, param_get_uint, &debug, S_IRUGO | S_IWUSR);
diff --git a/drivers/isdn/mISDN/core.c b/drivers/isdn/mISDN/core.c
index ab8513a7acd5..e34a7a46754e 100644
--- a/drivers/isdn/mISDN/core.c
+++ b/drivers/isdn/mISDN/core.c
@@ -14,6 +14,7 @@
static u_int debug;
MODULE_AUTHOR("Karsten Keil");
+MODULE_DESCRIPTION("Modular ISDN core driver");
MODULE_LICENSE("GPL");
module_param(debug, uint, S_IRUGO | S_IWUSR);
diff --git a/drivers/isdn/mISDN/dsp_blowfish.c b/drivers/isdn/mISDN/dsp_blowfish.c
index 0aa572f3858d..0e77c282c862 100644
--- a/drivers/isdn/mISDN/dsp_blowfish.c
+++ b/drivers/isdn/mISDN/dsp_blowfish.c
@@ -73,11 +73,6 @@
* crypto-api for faster implementation
*/
-struct bf_ctx {
- u32 p[18];
- u32 s[1024];
-};
-
static const u32 bf_pbox[16 + 2] = {
0x243f6a88, 0x85a308d3, 0x13198a2e, 0x03707344,
0xa4093822, 0x299f31d0, 0x082efa98, 0xec4e6c89,
diff --git a/drivers/isdn/mISDN/dsp_core.c b/drivers/isdn/mISDN/dsp_core.c
index fae95f166688..753232e9fc36 100644
--- a/drivers/isdn/mISDN/dsp_core.c
+++ b/drivers/isdn/mISDN/dsp_core.c
@@ -172,6 +172,7 @@ module_param(debug, uint, S_IRUGO | S_IWUSR);
module_param(options, uint, S_IRUGO | S_IWUSR);
module_param(poll, uint, S_IRUGO | S_IWUSR);
module_param(dtmfthreshold, uint, S_IRUGO | S_IWUSR);
+MODULE_DESCRIPTION("mISDN driver for Digital Audio Processing of transparent data");
MODULE_LICENSE("GPL");
/*int spinnest = 0;*/
diff --git a/drivers/isdn/mISDN/l1oip_core.c b/drivers/isdn/mISDN/l1oip_core.c
index f010b35a0531..a5ad88a960d0 100644
--- a/drivers/isdn/mISDN/l1oip_core.c
+++ b/drivers/isdn/mISDN/l1oip_core.c
@@ -245,6 +245,7 @@ static int debug;
static int ulaw;
MODULE_AUTHOR("Andreas Eversberg");
+MODULE_DESCRIPTION("mISDN driver for tunneling layer 1 over IP");
MODULE_LICENSE("GPL");
module_param_array(type, uint, NULL, S_IRUGO | S_IWUSR);
module_param_array(codec, uint, NULL, S_IRUGO | S_IWUSR);
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 05e6af88b88c..8d9d8da376e4 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -179,6 +179,21 @@ config LEDS_CR0014114
To compile this driver as a module, choose M here: the module
will be called leds-cr0014114.
+config LEDS_CROS_EC
+ tristate "LED Support for ChromeOS EC"
+ depends on MFD_CROS_EC_DEV
+ depends on LEDS_CLASS_MULTICOLOR
+ select LEDS_TRIGGERS
+ default MFD_CROS_EC_DEV
+ help
+ This option enables support for LEDs managed by ChromeOS ECs.
+ All LEDs exposed by the EC are supported in multicolor mode.
+ A hardware trigger to switch back to the automatic behaviour is
+ provided.
+
+ To compile this driver as a module, choose M here: the module
+ will be called leds-cros_ec.
+
config LEDS_EL15203000
tristate "LED Support for Crane EL15203000"
depends on LEDS_CLASS
@@ -414,7 +429,7 @@ config LEDS_LP50XX
module will be called leds-lp50xx.
config LEDS_LP55XX_COMMON
- tristate "Common Driver for TI/National LP5521/5523/55231/5562/8501"
+ tristate "Common Driver for TI/National LP5521/5523/55231/5562/5569/8501"
depends on LEDS_CLASS
depends on LEDS_CLASS_MULTICOLOR
depends on OF
@@ -422,8 +437,8 @@ config LEDS_LP55XX_COMMON
select FW_LOADER
select FW_LOADER_USER_HELPER
help
- This option supports common operations for LP5521/5523/55231/5562/8501
- devices.
+ This option supports common operations for LP5521/5523/55231/5562/5569/
+ 8501 devices.
config LEDS_LP5521
tristate "LED Support for N.S. LP5521 LED driver chip"
@@ -456,6 +471,16 @@ config LEDS_LP5562
Driver provides direct control via LED class and interface for
programming the engines.
+config LEDS_LP5569
+ tristate "LED Support for TI LP5569 LED driver chip"
+ depends on LEDS_CLASS && I2C
+ depends on LEDS_LP55XX_COMMON
+ help
+ If you say yes here you get support for TI LP5569 LED driver.
+ It is 9 channels chip with programmable engines.
+ Driver provides direct control via LED class and interface for
+ programming the engines.
+
config LEDS_LP8501
tristate "LED Support for TI LP8501 LED driver chip"
depends on LEDS_CLASS && I2C
@@ -869,7 +894,6 @@ config LEDS_SPI_BYTE
tristate "LED support for SPI LED controller with a single byte"
depends on LEDS_CLASS
depends on SPI
- depends on OF
help
This option enables support for LED controller which use a single byte
for controlling the brightness. Currently the following controller is
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index effdfc6f1e95..18afbb5a23ee 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -26,6 +26,7 @@ obj-$(CONFIG_LEDS_CLEVO_MAIL) += leds-clevo-mail.o
obj-$(CONFIG_LEDS_COBALT_QUBE) += leds-cobalt-qube.o
obj-$(CONFIG_LEDS_COBALT_RAQ) += leds-cobalt-raq.o
obj-$(CONFIG_LEDS_CPCAP) += leds-cpcap.o
+obj-$(CONFIG_LEDS_CROS_EC) += leds-cros_ec.o
obj-$(CONFIG_LEDS_DA903X) += leds-da903x.o
obj-$(CONFIG_LEDS_DA9052) += leds-da9052.o
obj-$(CONFIG_LEDS_GPIO) += leds-gpio.o
@@ -51,6 +52,7 @@ obj-$(CONFIG_LEDS_LP50XX) += leds-lp50xx.o
obj-$(CONFIG_LEDS_LP5521) += leds-lp5521.o
obj-$(CONFIG_LEDS_LP5523) += leds-lp5523.o
obj-$(CONFIG_LEDS_LP5562) += leds-lp5562.o
+obj-$(CONFIG_LEDS_LP5569) += leds-lp5569.o
obj-$(CONFIG_LEDS_LP55XX_COMMON) += leds-lp55xx-common.o
obj-$(CONFIG_LEDS_LP8501) += leds-lp8501.o
obj-$(CONFIG_LEDS_LP8788) += leds-lp8788.o
diff --git a/drivers/leds/blink/leds-bcm63138.c b/drivers/leds/blink/leds-bcm63138.c
index 2cf2761e4914..3a5e0b98bfbc 100644
--- a/drivers/leds/blink/leds-bcm63138.c
+++ b/drivers/leds/blink/leds-bcm63138.c
@@ -303,5 +303,6 @@ static struct platform_driver bcm63138_leds_driver = {
module_platform_driver(bcm63138_leds_driver);
MODULE_AUTHOR("Rafał Miłecki");
+MODULE_DESCRIPTION("Broadcom BCM63138 SoC LED driver");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(of, bcm63138_leds_of_match_table);
diff --git a/drivers/leds/flash/Kconfig b/drivers/leds/flash/Kconfig
index 809b6d98bb3e..f39f0bfe6eef 100644
--- a/drivers/leds/flash/Kconfig
+++ b/drivers/leds/flash/Kconfig
@@ -121,4 +121,15 @@ config LEDS_SGM3140
This option enables support for the SGM3140 500mA Buck/Boost Charge
Pump LED Driver.
+config LEDS_SY7802
+ tristate "LED support for the Silergy SY7802"
+ depends on I2C && OF
+ depends on GPIOLIB
+ select REGMAP_I2C
+ help
+ This option enables support for the SY7802 flash LED controller.
+ SY7802 includes torch and flash functions with programmable current.
+
+ This driver can be built as a module, it will be called "leds-sy7802".
+
endif # LEDS_CLASS_FLASH
diff --git a/drivers/leds/flash/Makefile b/drivers/leds/flash/Makefile
index 91d60a4b7952..48860eeced79 100644
--- a/drivers/leds/flash/Makefile
+++ b/drivers/leds/flash/Makefile
@@ -11,3 +11,4 @@ obj-$(CONFIG_LEDS_QCOM_FLASH) += leds-qcom-flash.o
obj-$(CONFIG_LEDS_RT4505) += leds-rt4505.o
obj-$(CONFIG_LEDS_RT8515) += leds-rt8515.o
obj-$(CONFIG_LEDS_SGM3140) += leds-sgm3140.o
+obj-$(CONFIG_LEDS_SY7802) += leds-sy7802.o
diff --git a/drivers/leds/flash/leds-as3645a.c b/drivers/leds/flash/leds-as3645a.c
index 12c2609c1137..2c6ef321b7c8 100644
--- a/drivers/leds/flash/leds-as3645a.c
+++ b/drivers/leds/flash/leds-as3645a.c
@@ -743,8 +743,8 @@ static void as3645a_remove(struct i2c_client *client)
}
static const struct i2c_device_id as3645a_id_table[] = {
- { AS_NAME, 0 },
- { },
+ { AS_NAME },
+ { }
};
MODULE_DEVICE_TABLE(i2c, as3645a_id_table);
diff --git a/drivers/leds/flash/leds-mt6360.c b/drivers/leds/flash/leds-mt6360.c
index 1b75b4d36834..4c74f1cf01f0 100644
--- a/drivers/leds/flash/leds-mt6360.c
+++ b/drivers/leds/flash/leds-mt6360.c
@@ -643,14 +643,17 @@ static int mt6360_init_isnk_properties(struct mt6360_led *led,
ret = fwnode_property_read_u32(child, "reg", &reg);
if (ret || reg > MT6360_LED_ISNK3 ||
- priv->leds_active & BIT(reg))
+ priv->leds_active & BIT(reg)) {
+ fwnode_handle_put(child);
return -EINVAL;
+ }
ret = fwnode_property_read_u32(child, "color", &color);
if (ret) {
dev_err(priv->dev,
"led %d, no color specified\n",
led->led_no);
+ fwnode_handle_put(child);
return ret;
}
diff --git a/drivers/leds/flash/leds-qcom-flash.c b/drivers/leds/flash/leds-qcom-flash.c
index 7c99a3039171..bf70bf6fb0d5 100644
--- a/drivers/leds/flash/leds-qcom-flash.c
+++ b/drivers/leds/flash/leds-qcom-flash.c
@@ -505,6 +505,7 @@ qcom_flash_v4l2_init(struct device *dev, struct qcom_flash_led *led, struct fwno
struct qcom_flash_data *flash_data = led->flash_data;
struct v4l2_flash_config v4l2_cfg = { 0 };
struct led_flash_setting *intensity = &v4l2_cfg.intensity;
+ struct v4l2_flash *v4l2_flash;
if (!(led->flash.led_cdev.flags & LED_DEV_CAP_FLASH))
return 0;
@@ -523,9 +524,12 @@ qcom_flash_v4l2_init(struct device *dev, struct qcom_flash_led *led, struct fwno
LED_FAULT_OVER_TEMPERATURE |
LED_FAULT_TIMEOUT;
- flash_data->v4l2_flash[flash_data->leds_count] =
- v4l2_flash_init(dev, fwnode, &led->flash, &qcom_v4l2_flash_ops, &v4l2_cfg);
- return PTR_ERR_OR_ZERO(flash_data->v4l2_flash);
+ v4l2_flash = v4l2_flash_init(dev, fwnode, &led->flash, &qcom_v4l2_flash_ops, &v4l2_cfg);
+ if (IS_ERR(v4l2_flash))
+ return PTR_ERR(v4l2_flash);
+
+ flash_data->v4l2_flash[flash_data->leds_count] = v4l2_flash;
+ return 0;
}
# else
static int
diff --git a/drivers/leds/flash/leds-rt4505.c b/drivers/leds/flash/leds-rt4505.c
index 1ae5b387f4a5..f16358b8dfc1 100644
--- a/drivers/leds/flash/leds-rt4505.c
+++ b/drivers/leds/flash/leds-rt4505.c
@@ -426,4 +426,5 @@ static struct i2c_driver rt4505_driver = {
module_i2c_driver(rt4505_driver);
MODULE_AUTHOR("ChiYuan Huang <cy_huang@richtek.com>");
+MODULE_DESCRIPTION("Richtek RT4505 LED driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/flash/leds-sy7802.c b/drivers/leds/flash/leds-sy7802.c
new file mode 100644
index 000000000000..ddac836762af
--- /dev/null
+++ b/drivers/leds/flash/leds-sy7802.c
@@ -0,0 +1,539 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Silergy SY7802 flash LED driver with an I2C interface
+ *
+ * Copyright 2024 André Apitzsch <git@apitzsch.eu>
+ */
+
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/led-class-flash.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+
+#define SY7802_MAX_LEDS 2
+#define SY7802_LED_JOINT 2
+
+#define SY7802_REG_ENABLE 0x10
+#define SY7802_REG_TORCH_BRIGHTNESS 0xa0
+#define SY7802_REG_FLASH_BRIGHTNESS 0xb0
+#define SY7802_REG_FLASH_DURATION 0xc0
+#define SY7802_REG_FLAGS 0xd0
+#define SY7802_REG_CONFIG_1 0xe0
+#define SY7802_REG_CONFIG_2 0xf0
+#define SY7802_REG_VIN_MONITOR 0x80
+#define SY7802_REG_LAST_FLASH 0x81
+#define SY7802_REG_VLED_MONITOR 0x30
+#define SY7802_REG_ADC_DELAY 0x31
+#define SY7802_REG_DEV_ID 0xff
+
+#define SY7802_MODE_OFF 0
+#define SY7802_MODE_TORCH 2
+#define SY7802_MODE_FLASH 3
+#define SY7802_MODE_MASK GENMASK(1, 0)
+
+#define SY7802_LEDS_SHIFT 3
+#define SY7802_LEDS_MASK(_id) (BIT(_id) << SY7802_LEDS_SHIFT)
+#define SY7802_LEDS_MASK_ALL (SY7802_LEDS_MASK(0) | SY7802_LEDS_MASK(1))
+
+#define SY7802_TORCH_CURRENT_SHIFT 3
+#define SY7802_TORCH_CURRENT_MASK(_id) \
+ (GENMASK(2, 0) << (SY7802_TORCH_CURRENT_SHIFT * (_id)))
+#define SY7802_TORCH_CURRENT_MASK_ALL \
+ (SY7802_TORCH_CURRENT_MASK(0) | SY7802_TORCH_CURRENT_MASK(1))
+
+#define SY7802_FLASH_CURRENT_SHIFT 4
+#define SY7802_FLASH_CURRENT_MASK(_id) \
+ (GENMASK(3, 0) << (SY7802_FLASH_CURRENT_SHIFT * (_id)))
+#define SY7802_FLASH_CURRENT_MASK_ALL \
+ (SY7802_FLASH_CURRENT_MASK(0) | SY7802_FLASH_CURRENT_MASK(1))
+
+#define SY7802_TIMEOUT_DEFAULT_US 512000U
+#define SY7802_TIMEOUT_MIN_US 32000U
+#define SY7802_TIMEOUT_MAX_US 1024000U
+#define SY7802_TIMEOUT_STEPSIZE_US 32000U
+
+#define SY7802_TORCH_BRIGHTNESS_MAX 8
+
+#define SY7802_FLASH_BRIGHTNESS_DEFAULT 14
+#define SY7802_FLASH_BRIGHTNESS_MIN 0
+#define SY7802_FLASH_BRIGHTNESS_MAX 15
+#define SY7802_FLASH_BRIGHTNESS_STEP 1
+
+#define SY7802_FLAG_TIMEOUT BIT(0)
+#define SY7802_FLAG_THERMAL_SHUTDOWN BIT(1)
+#define SY7802_FLAG_LED_FAULT BIT(2)
+#define SY7802_FLAG_TX1_INTERRUPT BIT(3)
+#define SY7802_FLAG_TX2_INTERRUPT BIT(4)
+#define SY7802_FLAG_LED_THERMAL_FAULT BIT(5)
+#define SY7802_FLAG_FLASH_INPUT_VOLTAGE_LOW BIT(6)
+#define SY7802_FLAG_INPUT_VOLTAGE_LOW BIT(7)
+
+#define SY7802_CHIP_ID 0x51
+
+static const struct reg_default sy7802_regmap_defs[] = {
+ { SY7802_REG_ENABLE, SY7802_LEDS_MASK_ALL },
+ { SY7802_REG_TORCH_BRIGHTNESS, 0x92 },
+ { SY7802_REG_FLASH_BRIGHTNESS, SY7802_FLASH_BRIGHTNESS_DEFAULT |
+ SY7802_FLASH_BRIGHTNESS_DEFAULT << SY7802_FLASH_CURRENT_SHIFT },
+ { SY7802_REG_FLASH_DURATION, 0x6f },
+ { SY7802_REG_FLAGS, 0x0 },
+ { SY7802_REG_CONFIG_1, 0x68 },
+ { SY7802_REG_CONFIG_2, 0xf0 },
+};
+
+struct sy7802_led {
+ struct led_classdev_flash flash;
+ struct sy7802 *chip;
+ u8 led_id;
+};
+
+struct sy7802 {
+ struct device *dev;
+ struct regmap *regmap;
+ struct mutex mutex;
+
+ struct gpio_desc *enable_gpio;
+ struct regulator *vin_regulator;
+
+ unsigned int fled_strobe_used;
+ unsigned int fled_torch_used;
+ unsigned int leds_active;
+ int num_leds;
+ struct sy7802_led leds[] __counted_by(num_leds);
+};
+
+static int sy7802_torch_brightness_set(struct led_classdev *lcdev, enum led_brightness brightness)
+{
+ struct sy7802_led *led = container_of(lcdev, struct sy7802_led, flash.led_cdev);
+ struct sy7802 *chip = led->chip;
+ u32 fled_torch_used_tmp;
+ u32 led_enable_mask;
+ u32 enable_mask;
+ u32 torch_mask;
+ u32 val;
+ int ret;
+
+ mutex_lock(&chip->mutex);
+
+ if (chip->fled_strobe_used) {
+ dev_warn(chip->dev, "Cannot set torch brightness whilst strobe is enabled\n");
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ if (brightness)
+ fled_torch_used_tmp = chip->fled_torch_used | BIT(led->led_id);
+ else
+ fled_torch_used_tmp = chip->fled_torch_used & ~BIT(led->led_id);
+
+ led_enable_mask = led->led_id == SY7802_LED_JOINT ?
+ SY7802_LEDS_MASK_ALL :
+ SY7802_LEDS_MASK(led->led_id);
+
+ val = brightness ? led_enable_mask : SY7802_MODE_OFF;
+ if (fled_torch_used_tmp)
+ val |= SY7802_MODE_TORCH;
+
+ /* Disable torch to apply brightness */
+ ret = regmap_update_bits(chip->regmap, SY7802_REG_ENABLE, SY7802_MODE_MASK,
+ SY7802_MODE_OFF);
+ if (ret)
+ goto unlock;
+
+ torch_mask = led->led_id == SY7802_LED_JOINT ?
+ SY7802_TORCH_CURRENT_MASK_ALL :
+ SY7802_TORCH_CURRENT_MASK(led->led_id);
+
+ /* Register expects brightness between 0 and MAX_BRIGHTNESS - 1 */
+ if (brightness)
+ brightness -= 1;
+
+ brightness |= (brightness << SY7802_TORCH_CURRENT_SHIFT);
+
+ ret = regmap_update_bits(chip->regmap, SY7802_REG_TORCH_BRIGHTNESS, torch_mask, brightness);
+ if (ret)
+ goto unlock;
+
+ enable_mask = SY7802_MODE_MASK | led_enable_mask;
+ ret = regmap_update_bits(chip->regmap, SY7802_REG_ENABLE, enable_mask, val);
+ if (ret)
+ goto unlock;
+
+ chip->fled_torch_used = fled_torch_used_tmp;
+
+unlock:
+ mutex_unlock(&chip->mutex);
+ return ret;
+}
+
+static int sy7802_flash_brightness_set(struct led_classdev_flash *fl_cdev, u32 brightness)
+{
+ struct sy7802_led *led = container_of(fl_cdev, struct sy7802_led, flash);
+ struct led_flash_setting *s = &fl_cdev->brightness;
+ u32 val = (brightness - s->min) / s->step;
+ struct sy7802 *chip = led->chip;
+ u32 flash_mask;
+ int ret;
+
+ val |= (val << SY7802_FLASH_CURRENT_SHIFT);
+ flash_mask = led->led_id == SY7802_LED_JOINT ?
+ SY7802_FLASH_CURRENT_MASK_ALL :
+ SY7802_FLASH_CURRENT_MASK(led->led_id);
+
+ mutex_lock(&chip->mutex);
+ ret = regmap_update_bits(chip->regmap, SY7802_REG_FLASH_BRIGHTNESS, flash_mask, val);
+ mutex_unlock(&chip->mutex);
+
+ return ret;
+}
+
+static int sy7802_strobe_set(struct led_classdev_flash *fl_cdev, bool state)
+{
+ struct sy7802_led *led = container_of(fl_cdev, struct sy7802_led, flash);
+ struct sy7802 *chip = led->chip;
+ u32 fled_strobe_used_tmp;
+ u32 led_enable_mask;
+ u32 enable_mask;
+ u32 val;
+ int ret;
+
+ mutex_lock(&chip->mutex);
+
+ if (chip->fled_torch_used) {
+ dev_warn(chip->dev, "Cannot set strobe brightness whilst torch is enabled\n");
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ if (state)
+ fled_strobe_used_tmp = chip->fled_strobe_used | BIT(led->led_id);
+ else
+ fled_strobe_used_tmp = chip->fled_strobe_used & ~BIT(led->led_id);
+
+ led_enable_mask = led->led_id == SY7802_LED_JOINT ?
+ SY7802_LEDS_MASK_ALL :
+ SY7802_LEDS_MASK(led->led_id);
+
+ val = state ? led_enable_mask : SY7802_MODE_OFF;
+ if (fled_strobe_used_tmp)
+ val |= SY7802_MODE_FLASH;
+
+ enable_mask = SY7802_MODE_MASK | led_enable_mask;
+ ret = regmap_update_bits(chip->regmap, SY7802_REG_ENABLE, enable_mask, val);
+
+ if (ret)
+ goto unlock;
+
+ chip->fled_strobe_used = fled_strobe_used_tmp;
+
+unlock:
+ mutex_unlock(&chip->mutex);
+ return ret;
+}
+
+static int sy7802_strobe_get(struct led_classdev_flash *fl_cdev, bool *state)
+{
+ struct sy7802_led *led = container_of(fl_cdev, struct sy7802_led, flash);
+ struct sy7802 *chip = led->chip;
+
+ mutex_lock(&chip->mutex);
+ *state = !!(chip->fled_strobe_used & BIT(led->led_id));
+ mutex_unlock(&chip->mutex);
+
+ return 0;
+}
+
+static int sy7802_timeout_set(struct led_classdev_flash *fl_cdev, u32 timeout)
+{
+ struct sy7802_led *led = container_of(fl_cdev, struct sy7802_led, flash);
+ struct led_flash_setting *s = &fl_cdev->timeout;
+ u32 val = (timeout - s->min) / s->step;
+ struct sy7802 *chip = led->chip;
+
+ return regmap_write(chip->regmap, SY7802_REG_FLASH_DURATION, val);
+}
+
+static int sy7802_fault_get(struct led_classdev_flash *fl_cdev, u32 *fault)
+{
+ struct sy7802_led *led = container_of(fl_cdev, struct sy7802_led, flash);
+ struct sy7802 *chip = led->chip;
+ u32 val, led_faults = 0;
+ int ret;
+
+ /* NOTE: reading register clears fault status */
+ ret = regmap_read(chip->regmap, SY7802_REG_FLAGS, &val);
+ if (ret)
+ return ret;
+
+ if (val & (SY7802_FLAG_FLASH_INPUT_VOLTAGE_LOW | SY7802_FLAG_INPUT_VOLTAGE_LOW))
+ led_faults |= LED_FAULT_INPUT_VOLTAGE;
+
+ if (val & SY7802_FLAG_THERMAL_SHUTDOWN)
+ led_faults |= LED_FAULT_OVER_TEMPERATURE;
+
+ if (val & SY7802_FLAG_TIMEOUT)
+ led_faults |= LED_FAULT_TIMEOUT;
+
+ *fault = led_faults;
+ return 0;
+}
+
+static const struct led_flash_ops sy7802_flash_ops = {
+ .flash_brightness_set = sy7802_flash_brightness_set,
+ .strobe_set = sy7802_strobe_set,
+ .strobe_get = sy7802_strobe_get,
+ .timeout_set = sy7802_timeout_set,
+ .fault_get = sy7802_fault_get,
+};
+
+static void sy7802_init_flash_brightness(struct led_classdev_flash *fl_cdev)
+{
+ struct led_flash_setting *s;
+
+ /* Init flash brightness setting */
+ s = &fl_cdev->brightness;
+ s->min = SY7802_FLASH_BRIGHTNESS_MIN;
+ s->max = SY7802_FLASH_BRIGHTNESS_MAX;
+ s->step = SY7802_FLASH_BRIGHTNESS_STEP;
+ s->val = SY7802_FLASH_BRIGHTNESS_DEFAULT;
+}
+
+static void sy7802_init_flash_timeout(struct led_classdev_flash *fl_cdev)
+{
+ struct led_flash_setting *s;
+
+ /* Init flash timeout setting */
+ s = &fl_cdev->timeout;
+ s->min = SY7802_TIMEOUT_MIN_US;
+ s->max = SY7802_TIMEOUT_MAX_US;
+ s->step = SY7802_TIMEOUT_STEPSIZE_US;
+ s->val = SY7802_TIMEOUT_DEFAULT_US;
+}
+
+static int sy7802_led_register(struct device *dev, struct sy7802_led *led,
+ struct device_node *np)
+{
+ struct led_init_data init_data = {};
+ int ret;
+
+ init_data.fwnode = of_fwnode_handle(np);
+
+ ret = devm_led_classdev_flash_register_ext(dev, &led->flash, &init_data);
+ if (ret) {
+ dev_err(dev, "Couldn't register flash %d\n", led->led_id);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int sy7802_init_flash_properties(struct device *dev, struct sy7802_led *led,
+ struct device_node *np)
+{
+ struct led_classdev_flash *flash = &led->flash;
+ struct led_classdev *lcdev = &flash->led_cdev;
+ u32 sources[SY7802_MAX_LEDS];
+ int i, num, ret;
+
+ num = of_property_count_u32_elems(np, "led-sources");
+ if (num < 1) {
+ dev_err(dev, "Not specified or wrong number of led-sources\n");
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32_array(np, "led-sources", sources, num);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < num; i++) {
+ if (sources[i] >= SY7802_MAX_LEDS)
+ return -EINVAL;
+ if (led->chip->leds_active & BIT(sources[i]))
+ return -EINVAL;
+ led->chip->leds_active |= BIT(sources[i]);
+ }
+
+ /* If both channels are specified in 'led-sources', joint flash output mode is used */
+ led->led_id = num == 2 ? SY7802_LED_JOINT : sources[0];
+
+ lcdev->max_brightness = SY7802_TORCH_BRIGHTNESS_MAX;
+ lcdev->brightness_set_blocking = sy7802_torch_brightness_set;
+ lcdev->flags |= LED_DEV_CAP_FLASH;
+
+ flash->ops = &sy7802_flash_ops;
+
+ sy7802_init_flash_brightness(flash);
+ sy7802_init_flash_timeout(flash);
+
+ return 0;
+}
+
+static int sy7802_chip_check(struct sy7802 *chip)
+{
+ struct device *dev = chip->dev;
+ u32 chipid;
+ int ret;
+
+ ret = regmap_read(chip->regmap, SY7802_REG_DEV_ID, &chipid);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to read chip ID\n");
+
+ if (chipid != SY7802_CHIP_ID)
+ return dev_err_probe(dev, -ENODEV, "Unsupported chip detected: %x\n", chipid);
+
+ return 0;
+}
+
+static void sy7802_enable(struct sy7802 *chip)
+{
+ gpiod_set_value_cansleep(chip->enable_gpio, 1);
+ usleep_range(200, 300);
+}
+
+static void sy7802_disable(struct sy7802 *chip)
+{
+ gpiod_set_value_cansleep(chip->enable_gpio, 0);
+}
+
+static int sy7802_probe_dt(struct sy7802 *chip)
+{
+ struct device_node *np = dev_of_node(chip->dev);
+ int child_num;
+ int ret;
+
+ regmap_write(chip->regmap, SY7802_REG_ENABLE, SY7802_MODE_OFF);
+ regmap_write(chip->regmap, SY7802_REG_TORCH_BRIGHTNESS, LED_OFF);
+
+ child_num = 0;
+ for_each_available_child_of_node_scoped(np, child) {
+ struct sy7802_led *led = chip->leds + child_num;
+
+ led->chip = chip;
+ led->led_id = child_num;
+
+ ret = sy7802_init_flash_properties(chip->dev, led, child);
+ if (ret)
+ return ret;
+
+ ret = sy7802_led_register(chip->dev, led, child);
+ if (ret)
+ return ret;
+
+ child_num++;
+ }
+ return 0;
+}
+
+static void sy7802_chip_disable_action(void *data)
+{
+ struct sy7802 *chip = data;
+
+ sy7802_disable(chip);
+}
+
+static void sy7802_regulator_disable_action(void *data)
+{
+ struct sy7802 *chip = data;
+
+ regulator_disable(chip->vin_regulator);
+}
+
+static const struct regmap_config sy7802_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0xff,
+ .cache_type = REGCACHE_MAPLE,
+ .reg_defaults = sy7802_regmap_defs,
+ .num_reg_defaults = ARRAY_SIZE(sy7802_regmap_defs),
+};
+
+static int sy7802_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct sy7802 *chip;
+ size_t count;
+ int ret;
+
+ count = device_get_child_node_count(dev);
+ if (!count || count > SY7802_MAX_LEDS)
+ return dev_err_probe(dev, -EINVAL, "Invalid amount of LED nodes %zu\n", count);
+
+ chip = devm_kzalloc(dev, struct_size(chip, leds, count), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ chip->num_leds = count;
+
+ chip->dev = dev;
+ i2c_set_clientdata(client, chip);
+
+ chip->enable_gpio = devm_gpiod_get(dev, "enable", GPIOD_OUT_LOW);
+ ret = PTR_ERR_OR_ZERO(chip->enable_gpio);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to request enable gpio\n");
+
+ chip->vin_regulator = devm_regulator_get(dev, "vin");
+ ret = PTR_ERR_OR_ZERO(chip->vin_regulator);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to request regulator\n");
+
+ ret = regulator_enable(chip->vin_regulator);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to enable regulator\n");
+
+ ret = devm_add_action_or_reset(dev, sy7802_regulator_disable_action, chip);
+ if (ret)
+ return ret;
+
+ ret = devm_mutex_init(dev, &chip->mutex);
+ if (ret)
+ return ret;
+
+ mutex_lock(&chip->mutex);
+
+ chip->regmap = devm_regmap_init_i2c(client, &sy7802_regmap_config);
+ if (IS_ERR(chip->regmap)) {
+ ret = PTR_ERR(chip->regmap);
+ dev_err_probe(dev, ret, "Failed to allocate register map\n");
+ goto error;
+ }
+
+ ret = sy7802_probe_dt(chip);
+ if (ret < 0)
+ goto error;
+
+ sy7802_enable(chip);
+
+ ret = devm_add_action_or_reset(dev, sy7802_chip_disable_action, chip);
+ if (ret)
+ goto error;
+
+ ret = sy7802_chip_check(chip);
+
+error:
+ mutex_unlock(&chip->mutex);
+ return ret;
+}
+
+static const struct of_device_id __maybe_unused sy7802_leds_match[] = {
+ { .compatible = "silergy,sy7802", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, sy7802_leds_match);
+
+static struct i2c_driver sy7802_driver = {
+ .driver = {
+ .name = "sy7802",
+ .of_match_table = of_match_ptr(sy7802_leds_match),
+ },
+ .probe = sy7802_probe,
+};
+module_i2c_driver(sy7802_driver);
+
+MODULE_AUTHOR("André Apitzsch <git@apitzsch.eu>");
+MODULE_DESCRIPTION("Silergy SY7802 flash LED driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/leds/led-class-multicolor.c b/drivers/leds/led-class-multicolor.c
index ec62a4811613..30c1ecb5f361 100644
--- a/drivers/leds/led-class-multicolor.c
+++ b/drivers/leds/led-class-multicolor.c
@@ -101,7 +101,7 @@ static ssize_t multi_index_show(struct device *dev,
for (i = 0; i < mcled_cdev->num_colors; i++) {
index = mcled_cdev->subled_info[i].color_index;
- len += sprintf(buf + len, "%s", led_colors[index]);
+ len += sprintf(buf + len, "%s", led_get_color_name(index));
if (i < mcled_cdev->num_colors - 1)
len += sprintf(buf + len, " ");
}
@@ -134,6 +134,7 @@ int led_classdev_multicolor_register_ext(struct device *parent,
return -EINVAL;
led_cdev = &mcled_cdev->led_cdev;
+ led_cdev->flags |= LED_MULTI_COLOR;
mcled_cdev->led_cdev.groups = led_multicolor_groups;
return led_classdev_register_ext(parent, led_cdev, init_data);
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index 24fcff682b24..06b97fd49ad9 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -258,7 +258,6 @@ struct led_classdev *of_led_get(struct device_node *np, int index)
led_dev = class_find_device_by_of_node(&leds_class, led_node);
of_node_put(led_node);
- put_device(led_dev);
return led_module_get(led_dev);
}
@@ -503,6 +502,11 @@ int led_classdev_register_ext(struct device *parent,
ret = led_classdev_next_name(proposed_name, final_name, sizeof(final_name));
if (ret < 0)
return ret;
+ else if (ret && led_cdev->flags & LED_REJECT_NAME_CONFLICT)
+ return -EEXIST;
+ else if (ret)
+ dev_warn(parent, "Led %s renamed to %s due to name collision\n",
+ proposed_name, final_name);
if (led_cdev->color >= LED_COLOR_ID_MAX)
dev_warn(parent, "LED %s color identifier out of range\n", final_name);
@@ -518,10 +522,6 @@ int led_classdev_register_ext(struct device *parent,
if (init_data && init_data->fwnode)
device_set_node(led_cdev->dev, init_data->fwnode);
- if (ret)
- dev_warn(parent, "Led %s renamed to %s due to name collision",
- proposed_name, dev_name(led_cdev->dev));
-
if (led_cdev->flags & LED_BRIGHT_HW_CHANGED) {
ret = led_add_brightness_hw_changed(led_cdev);
if (ret) {
@@ -552,12 +552,6 @@ int led_classdev_register_ext(struct device *parent,
led_init_core(led_cdev);
#ifdef CONFIG_LEDS_TRIGGERS
- /*
- * If no default trigger was given and hw_control_trigger is set,
- * make it the default trigger.
- */
- if (!led_cdev->default_trigger && led_cdev->hw_control_trigger)
- led_cdev->default_trigger = led_cdev->hw_control_trigger;
led_trigger_set_default(led_cdev);
#endif
diff --git a/drivers/leds/led-core.c b/drivers/leds/led-core.c
index 89c9806cc97f..001c290bc07b 100644
--- a/drivers/leds/led-core.c
+++ b/drivers/leds/led-core.c
@@ -8,6 +8,7 @@
*/
#include <linux/kernel.h>
+#include <linux/led-class-multicolor.h>
#include <linux/leds.h>
#include <linux/list.h>
#include <linux/module.h>
@@ -25,7 +26,7 @@ EXPORT_SYMBOL_GPL(leds_list_lock);
LIST_HEAD(leds_list);
EXPORT_SYMBOL_GPL(leds_list);
-const char * const led_colors[LED_COLOR_ID_MAX] = {
+static const char * const led_colors[LED_COLOR_ID_MAX] = {
[LED_COLOR_ID_WHITE] = "white",
[LED_COLOR_ID_RED] = "red",
[LED_COLOR_ID_GREEN] = "green",
@@ -42,7 +43,6 @@ const char * const led_colors[LED_COLOR_ID_MAX] = {
[LED_COLOR_ID_CYAN] = "cyan",
[LED_COLOR_ID_LIME] = "lime",
};
-EXPORT_SYMBOL_GPL(led_colors);
static int __led_set_brightness(struct led_classdev *led_cdev, unsigned int value)
{
@@ -122,15 +122,22 @@ static void led_timer_function(struct timer_list *t)
static void set_brightness_delayed_set_brightness(struct led_classdev *led_cdev,
unsigned int value)
{
- int ret = 0;
+ int ret;
ret = __led_set_brightness(led_cdev, value);
- if (ret == -ENOTSUPP)
+ if (ret == -ENOTSUPP) {
ret = __led_set_brightness_blocking(led_cdev, value);
- if (ret < 0 &&
- /* LED HW might have been unplugged, therefore don't warn */
- !(ret == -ENODEV && (led_cdev->flags & LED_UNREGISTERING) &&
- (led_cdev->flags & LED_HW_PLUGGABLE)))
+ if (ret == -ENOTSUPP)
+ /* No back-end support to set a fixed brightness value */
+ return;
+ }
+
+ /* LED HW might have been unplugged, therefore don't warn */
+ if (ret == -ENODEV && led_cdev->flags & LED_UNREGISTERING &&
+ led_cdev->flags & LED_HW_PLUGGABLE)
+ return;
+
+ if (ret < 0)
dev_err(led_cdev->dev,
"Setting an LED's brightness failed (%d)\n", ret);
}
@@ -362,6 +369,36 @@ int led_set_brightness_sync(struct led_classdev *led_cdev, unsigned int value)
}
EXPORT_SYMBOL_GPL(led_set_brightness_sync);
+/*
+ * This is a led-core function because just like led_set_brightness()
+ * it is used in the kernel by e.g. triggers.
+ */
+void led_mc_set_brightness(struct led_classdev *led_cdev,
+ unsigned int *intensity_value, unsigned int num_colors,
+ unsigned int brightness)
+{
+ struct led_classdev_mc *mcled_cdev;
+ unsigned int i;
+
+ if (!(led_cdev->flags & LED_MULTI_COLOR)) {
+ dev_err_once(led_cdev->dev, "error not a multi-color LED\n");
+ return;
+ }
+
+ mcled_cdev = lcdev_to_mccdev(led_cdev);
+ if (num_colors != mcled_cdev->num_colors) {
+ dev_err_once(led_cdev->dev, "error num_colors mismatch %u != %u\n",
+ num_colors, mcled_cdev->num_colors);
+ return;
+ }
+
+ for (i = 0; i < mcled_cdev->num_colors; i++)
+ mcled_cdev->subled_info[i].intensity = intensity_value[i];
+
+ led_set_brightness(led_cdev, brightness);
+}
+EXPORT_SYMBOL_GPL(led_mc_set_brightness);
+
int led_update_brightness(struct led_classdev *led_cdev)
{
int ret;
@@ -534,6 +571,15 @@ int led_compose_name(struct device *dev, struct led_init_data *init_data,
}
EXPORT_SYMBOL_GPL(led_compose_name);
+const char *led_get_color_name(u8 color_id)
+{
+ if (color_id >= ARRAY_SIZE(led_colors))
+ return NULL;
+
+ return led_colors[color_id];
+}
+EXPORT_SYMBOL_GPL(led_get_color_name);
+
enum led_default_state led_init_default_state_get(struct fwnode_handle *fwnode)
{
const char *state = NULL;
diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c
index b1b323b19301..78eb20093b2c 100644
--- a/drivers/leds/led-triggers.c
+++ b/drivers/leds/led-triggers.c
@@ -179,9 +179,9 @@ int led_trigger_set(struct led_classdev *led_cdev, struct led_trigger *trig)
cancel_work_sync(&led_cdev->set_brightness_work);
led_stop_software_blink(led_cdev);
+ device_remove_groups(led_cdev->dev, led_cdev->trigger->groups);
if (led_cdev->trigger->deactivate)
led_cdev->trigger->deactivate(led_cdev);
- device_remove_groups(led_cdev->dev, led_cdev->trigger->groups);
led_cdev->trigger = NULL;
led_cdev->trigger_data = NULL;
led_cdev->activated = false;
@@ -194,6 +194,19 @@ int led_trigger_set(struct led_classdev *led_cdev, struct led_trigger *trig)
spin_unlock(&trig->leddev_list_lock);
led_cdev->trigger = trig;
+ /*
+ * Some activate() calls use led_trigger_event() to initialize
+ * the brightness of the LED for which the trigger is being set.
+ * Ensure the led_cdev is visible on trig->led_cdevs for this.
+ */
+ synchronize_rcu();
+
+ /*
+ * If "set brightness to 0" is pending in workqueue,
+ * we don't want that to be reordered after ->activate()
+ */
+ flush_work(&led_cdev->set_brightness_work);
+
ret = 0;
if (trig->activate)
ret = trig->activate(led_cdev);
@@ -396,6 +409,26 @@ void led_trigger_event(struct led_trigger *trig,
}
EXPORT_SYMBOL_GPL(led_trigger_event);
+void led_mc_trigger_event(struct led_trigger *trig,
+ unsigned int *intensity_value, unsigned int num_colors,
+ enum led_brightness brightness)
+{
+ struct led_classdev *led_cdev;
+
+ if (!trig)
+ return;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(led_cdev, &trig->led_cdevs, trig_list) {
+ if (!(led_cdev->flags & LED_MULTI_COLOR))
+ continue;
+
+ led_mc_set_brightness(led_cdev, intensity_value, num_colors, brightness);
+ }
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(led_mc_trigger_event);
+
static void led_trigger_blink_setup(struct led_trigger *trig,
unsigned long delay_on,
unsigned long delay_off,
diff --git a/drivers/leds/leds-an30259a.c b/drivers/leds/leds-an30259a.c
index decfca447d8a..a42cc4bc6917 100644
--- a/drivers/leds/leds-an30259a.c
+++ b/drivers/leds/leds-an30259a.c
@@ -331,8 +331,8 @@ static const struct of_device_id an30259a_match_table[] = {
MODULE_DEVICE_TABLE(of, an30259a_match_table);
static const struct i2c_device_id an30259a_id[] = {
- { "an30259a", 0 },
- { /* sentinel */ },
+ { "an30259a" },
+ { /* sentinel */ }
};
MODULE_DEVICE_TABLE(i2c, an30259a_id);
diff --git a/drivers/leds/leds-bd2802.c b/drivers/leds/leds-bd2802.c
index 0792ea126cea..2a08c5f27608 100644
--- a/drivers/leds/leds-bd2802.c
+++ b/drivers/leds/leds-bd2802.c
@@ -776,7 +776,7 @@ static int bd2802_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(bd2802_pm, bd2802_suspend, bd2802_resume);
static const struct i2c_device_id bd2802_id[] = {
- { "BD2802", 0 },
+ { "BD2802" },
{ }
};
MODULE_DEVICE_TABLE(i2c, bd2802_id);
diff --git a/drivers/leds/leds-blinkm.c b/drivers/leds/leds-blinkm.c
index 2782da1a1930..e40b87aead2d 100644
--- a/drivers/leds/leds-blinkm.c
+++ b/drivers/leds/leds-blinkm.c
@@ -718,7 +718,7 @@ static void blinkm_remove(struct i2c_client *client)
}
static const struct i2c_device_id blinkm_id[] = {
- {"blinkm", 0},
+ { "blinkm" },
{}
};
diff --git a/drivers/leds/leds-cros_ec.c b/drivers/leds/leds-cros_ec.c
new file mode 100644
index 000000000000..275522b81ea5
--- /dev/null
+++ b/drivers/leds/leds-cros_ec.c
@@ -0,0 +1,277 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * ChromeOS EC LED Driver
+ *
+ * Copyright (C) 2024 Thomas Weißschuh <linux@weissschuh.net>
+ */
+
+#include <linux/device.h>
+#include <linux/leds.h>
+#include <linux/led-class-multicolor.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/cros_ec_commands.h>
+#include <linux/platform_data/cros_ec_proto.h>
+
+static const char * const cros_ec_led_functions[] = {
+ [EC_LED_ID_BATTERY_LED] = LED_FUNCTION_CHARGING,
+ [EC_LED_ID_POWER_LED] = LED_FUNCTION_POWER,
+ [EC_LED_ID_ADAPTER_LED] = "adapter",
+ [EC_LED_ID_LEFT_LED] = "left",
+ [EC_LED_ID_RIGHT_LED] = "right",
+ [EC_LED_ID_RECOVERY_HW_REINIT_LED] = "recovery-hw-reinit",
+ [EC_LED_ID_SYSRQ_DEBUG_LED] = "sysrq-debug",
+};
+
+static_assert(ARRAY_SIZE(cros_ec_led_functions) == EC_LED_ID_COUNT);
+
+static const int cros_ec_led_to_linux_id[] = {
+ [EC_LED_COLOR_RED] = LED_COLOR_ID_RED,
+ [EC_LED_COLOR_GREEN] = LED_COLOR_ID_GREEN,
+ [EC_LED_COLOR_BLUE] = LED_COLOR_ID_BLUE,
+ [EC_LED_COLOR_YELLOW] = LED_COLOR_ID_YELLOW,
+ [EC_LED_COLOR_WHITE] = LED_COLOR_ID_WHITE,
+ [EC_LED_COLOR_AMBER] = LED_COLOR_ID_AMBER,
+};
+
+static_assert(ARRAY_SIZE(cros_ec_led_to_linux_id) == EC_LED_COLOR_COUNT);
+
+static const int cros_ec_linux_to_ec_id[] = {
+ [LED_COLOR_ID_RED] = EC_LED_COLOR_RED,
+ [LED_COLOR_ID_GREEN] = EC_LED_COLOR_GREEN,
+ [LED_COLOR_ID_BLUE] = EC_LED_COLOR_BLUE,
+ [LED_COLOR_ID_YELLOW] = EC_LED_COLOR_YELLOW,
+ [LED_COLOR_ID_WHITE] = EC_LED_COLOR_WHITE,
+ [LED_COLOR_ID_AMBER] = EC_LED_COLOR_AMBER,
+};
+
+struct cros_ec_led_priv {
+ struct led_classdev_mc led_mc_cdev;
+ struct cros_ec_device *cros_ec;
+ enum ec_led_id led_id;
+};
+
+static inline struct cros_ec_led_priv *cros_ec_led_cdev_to_priv(struct led_classdev *led_cdev)
+{
+ return container_of(lcdev_to_mccdev(led_cdev), struct cros_ec_led_priv, led_mc_cdev);
+}
+
+union cros_ec_led_cmd_data {
+ struct ec_params_led_control req;
+ struct ec_response_led_control resp;
+} __packed;
+
+static int cros_ec_led_send_cmd(struct cros_ec_device *cros_ec,
+ union cros_ec_led_cmd_data *arg)
+{
+ int ret;
+ struct {
+ struct cros_ec_command msg;
+ union cros_ec_led_cmd_data data;
+ } __packed buf = {
+ .msg = {
+ .version = 1,
+ .command = EC_CMD_LED_CONTROL,
+ .insize = sizeof(arg->resp),
+ .outsize = sizeof(arg->req),
+ },
+ .data.req = arg->req
+ };
+
+ ret = cros_ec_cmd_xfer_status(cros_ec, &buf.msg);
+ if (ret < 0)
+ return ret;
+
+ arg->resp = buf.data.resp;
+
+ return 0;
+}
+
+static int cros_ec_led_trigger_activate(struct led_classdev *led_cdev)
+{
+ struct cros_ec_led_priv *priv = cros_ec_led_cdev_to_priv(led_cdev);
+ union cros_ec_led_cmd_data arg = {};
+
+ arg.req.led_id = priv->led_id;
+ arg.req.flags = EC_LED_FLAGS_AUTO;
+
+ return cros_ec_led_send_cmd(priv->cros_ec, &arg);
+}
+
+static struct led_hw_trigger_type cros_ec_led_trigger_type;
+
+static struct led_trigger cros_ec_led_trigger = {
+ .name = "chromeos-auto",
+ .trigger_type = &cros_ec_led_trigger_type,
+ .activate = cros_ec_led_trigger_activate,
+};
+
+static int cros_ec_led_brightness_set_blocking(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct cros_ec_led_priv *priv = cros_ec_led_cdev_to_priv(led_cdev);
+ union cros_ec_led_cmd_data arg = {};
+ enum ec_led_colors led_color;
+ struct mc_subled *subled;
+ size_t i;
+
+ led_mc_calc_color_components(&priv->led_mc_cdev, brightness);
+
+ arg.req.led_id = priv->led_id;
+
+ for (i = 0; i < priv->led_mc_cdev.num_colors; i++) {
+ subled = &priv->led_mc_cdev.subled_info[i];
+ led_color = cros_ec_linux_to_ec_id[subled->color_index];
+ arg.req.brightness[led_color] = subled->brightness;
+ }
+
+ return cros_ec_led_send_cmd(priv->cros_ec, &arg);
+}
+
+static int cros_ec_led_count_subleds(struct device *dev,
+ struct ec_response_led_control *resp,
+ unsigned int *max_brightness)
+{
+ unsigned int range, common_range = 0;
+ int num_subleds = 0;
+ size_t i;
+
+ for (i = 0; i < EC_LED_COLOR_COUNT; i++) {
+ range = resp->brightness_range[i];
+
+ if (!range)
+ continue;
+
+ num_subleds++;
+
+ if (!common_range)
+ common_range = range;
+
+ if (common_range != range) {
+ /* The multicolor LED API expects a uniform max_brightness */
+ dev_err(dev, "Inconsistent LED brightness values\n");
+ return -EINVAL;
+ }
+ }
+
+ if (!num_subleds)
+ return -EINVAL;
+
+ *max_brightness = common_range;
+ return num_subleds;
+}
+
+static const char *cros_ec_led_get_color_name(struct led_classdev_mc *led_mc_cdev)
+{
+ int color;
+
+ if (led_mc_cdev->num_colors == 1)
+ color = led_mc_cdev->subled_info[0].color_index;
+ else
+ color = LED_COLOR_ID_MULTI;
+
+ return led_get_color_name(color);
+}
+
+static int cros_ec_led_probe_one(struct device *dev, struct cros_ec_device *cros_ec,
+ enum ec_led_id id)
+{
+ union cros_ec_led_cmd_data arg = {};
+ struct cros_ec_led_priv *priv;
+ struct led_classdev *led_cdev;
+ struct mc_subled *subleds;
+ int i, ret, num_subleds;
+ size_t subled;
+
+ arg.req.led_id = id;
+ arg.req.flags = EC_LED_FLAGS_QUERY;
+ ret = cros_ec_led_send_cmd(cros_ec, &arg);
+ if (ret == -EINVAL)
+ return 0; /* Unknown LED, skip */
+ if (ret == -EOPNOTSUPP)
+ return -ENODEV;
+ if (ret < 0)
+ return ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ num_subleds = cros_ec_led_count_subleds(dev, &arg.resp,
+ &priv->led_mc_cdev.led_cdev.max_brightness);
+ if (num_subleds < 0)
+ return num_subleds;
+
+ priv->cros_ec = cros_ec;
+ priv->led_id = id;
+
+ subleds = devm_kcalloc(dev, num_subleds, sizeof(*subleds), GFP_KERNEL);
+ if (!subleds)
+ return -ENOMEM;
+
+ subled = 0;
+ for (i = 0; i < EC_LED_COLOR_COUNT; i++) {
+ if (!arg.resp.brightness_range[i])
+ continue;
+
+ subleds[subled].color_index = cros_ec_led_to_linux_id[i];
+ if (subled == 0)
+ subleds[subled].intensity = 100;
+ subled++;
+ }
+
+ priv->led_mc_cdev.subled_info = subleds;
+ priv->led_mc_cdev.num_colors = num_subleds;
+
+ led_cdev = &priv->led_mc_cdev.led_cdev;
+ led_cdev->brightness_set_blocking = cros_ec_led_brightness_set_blocking;
+ led_cdev->trigger_type = &cros_ec_led_trigger_type;
+ led_cdev->default_trigger = cros_ec_led_trigger.name;
+ led_cdev->hw_control_trigger = cros_ec_led_trigger.name;
+
+ led_cdev->name = devm_kasprintf(dev, GFP_KERNEL, "chromeos:%s:%s",
+ cros_ec_led_get_color_name(&priv->led_mc_cdev),
+ cros_ec_led_functions[id]);
+ if (!led_cdev->name)
+ return -ENOMEM;
+
+ return devm_led_classdev_multicolor_register(dev, &priv->led_mc_cdev);
+}
+
+static int cros_ec_led_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct cros_ec_dev *ec_dev = dev_get_drvdata(dev->parent);
+ struct cros_ec_device *cros_ec = ec_dev->ec_dev;
+ int i, ret = 0;
+
+ ret = devm_led_trigger_register(dev, &cros_ec_led_trigger);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < EC_LED_ID_COUNT; i++) {
+ ret = cros_ec_led_probe_one(dev, cros_ec, i);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+static const struct platform_device_id cros_ec_led_id[] = {
+ { "cros-ec-led", 0 },
+ {}
+};
+
+static struct platform_driver cros_ec_led_driver = {
+ .driver.name = "cros-ec-led",
+ .probe = cros_ec_led_probe,
+ .id_table = cros_ec_led_id,
+};
+module_platform_driver(cros_ec_led_driver);
+
+MODULE_DEVICE_TABLE(platform, cros_ec_led_id);
+MODULE_DESCRIPTION("ChromeOS EC LED Driver");
+MODULE_AUTHOR("Thomas Weißschuh <linux@weissschuh.net");
+MODULE_LICENSE("GPL");
diff --git a/drivers/leds/leds-is31fl319x.c b/drivers/leds/leds-is31fl319x.c
index 66c65741202e..5e1a4d39a107 100644
--- a/drivers/leds/leds-is31fl319x.c
+++ b/drivers/leds/leds-is31fl319x.c
@@ -140,7 +140,7 @@ static const struct reg_default is31fl3190_reg_defaults[] = {
{ IS31FL3190_PWM(2), 0x00 },
};
-static struct regmap_config is31fl3190_regmap_config = {
+static const struct regmap_config is31fl3190_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.max_register = IS31FL3190_RESET,
@@ -178,7 +178,7 @@ static const struct reg_default is31fl3196_reg_defaults[] = {
{ IS31FL3196_PWM(8), 0x00 },
};
-static struct regmap_config is31fl3196_regmap_config = {
+static const struct regmap_config is31fl3196_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.max_register = IS31FL3196_REG_CNT,
diff --git a/drivers/leds/leds-lm3530.c b/drivers/leds/leds-lm3530.c
index a2feef8e4ac5..e44a3db106c3 100644
--- a/drivers/leds/leds-lm3530.c
+++ b/drivers/leds/leds-lm3530.c
@@ -478,7 +478,7 @@ static void lm3530_remove(struct i2c_client *client)
}
static const struct i2c_device_id lm3530_id[] = {
- {LM3530_NAME, 0},
+ { LM3530_NAME },
{}
};
MODULE_DEVICE_TABLE(i2c, lm3530_id);
diff --git a/drivers/leds/leds-lm3532.c b/drivers/leds/leds-lm3532.c
index 8c90701dc50d..54b5650877f7 100644
--- a/drivers/leds/leds-lm3532.c
+++ b/drivers/leds/leds-lm3532.c
@@ -726,7 +726,7 @@ static const struct of_device_id of_lm3532_leds_match[] = {
MODULE_DEVICE_TABLE(of, of_lm3532_leds_match);
static const struct i2c_device_id lm3532_id[] = {
- {LM3532_NAME, 0},
+ { LM3532_NAME },
{}
};
MODULE_DEVICE_TABLE(i2c, lm3532_id);
diff --git a/drivers/leds/leds-lm3642.c b/drivers/leds/leds-lm3642.c
index 6eee52e211be..61629d5d6703 100644
--- a/drivers/leds/leds-lm3642.c
+++ b/drivers/leds/leds-lm3642.c
@@ -390,7 +390,7 @@ static void lm3642_remove(struct i2c_client *client)
}
static const struct i2c_device_id lm3642_id[] = {
- {LM3642_NAME, 0},
+ { LM3642_NAME },
{}
};
diff --git a/drivers/leds/leds-lm3697.c b/drivers/leds/leds-lm3697.c
index 380d17a58fe9..99de2a331727 100644
--- a/drivers/leds/leds-lm3697.c
+++ b/drivers/leds/leds-lm3697.c
@@ -360,7 +360,7 @@ static void lm3697_remove(struct i2c_client *client)
}
static const struct i2c_device_id lm3697_id[] = {
- { "lm3697", 0 },
+ { "lm3697" },
{ }
};
MODULE_DEVICE_TABLE(i2c, lm3697_id);
diff --git a/drivers/leds/leds-lp3944.c b/drivers/leds/leds-lp3944.c
index 8ea746c499d1..ccfeee49ea78 100644
--- a/drivers/leds/leds-lp3944.c
+++ b/drivers/leds/leds-lp3944.c
@@ -417,7 +417,7 @@ static void lp3944_remove(struct i2c_client *client)
/* lp3944 i2c driver struct */
static const struct i2c_device_id lp3944_id[] = {
- {"lp3944", 0},
+ { "lp3944" },
{}
};
diff --git a/drivers/leds/leds-lp3952.c b/drivers/leds/leds-lp3952.c
index ff7bae2447dd..17219a582704 100644
--- a/drivers/leds/leds-lp3952.c
+++ b/drivers/leds/leds-lp3952.c
@@ -266,7 +266,7 @@ static int lp3952_probe(struct i2c_client *client)
}
static const struct i2c_device_id lp3952_id[] = {
- {LP3952_NAME, 0},
+ { LP3952_NAME },
{}
};
MODULE_DEVICE_TABLE(i2c, lp3952_id);
diff --git a/drivers/leds/leds-lp5521.c b/drivers/leds/leds-lp5521.c
index f9c8b568b652..7564b9953408 100644
--- a/drivers/leds/leds-lp5521.c
+++ b/drivers/leds/leds-lp5521.c
@@ -9,6 +9,7 @@
* Milo(Woogyom) Kim <milo.kim@ti.com>
*/
+#include <linux/cleanup.h>
#include <linux/delay.h>
#include <linux/firmware.h>
#include <linux/i2c.h>
@@ -21,7 +22,6 @@
#include "leds-lp55xx-common.h"
-#define LP5521_PROGRAM_LENGTH 32
#define LP5521_MAX_LEDS 3
#define LP5521_CMD_DIRECT 0x3F
@@ -73,29 +73,6 @@
/* Reset register value */
#define LP5521_RESET 0xFF
-/* Program Memory Operations */
-#define LP5521_MODE_R_M 0x30 /* Operation Mode Register */
-#define LP5521_MODE_G_M 0x0C
-#define LP5521_MODE_B_M 0x03
-#define LP5521_LOAD_R 0x10
-#define LP5521_LOAD_G 0x04
-#define LP5521_LOAD_B 0x01
-
-#define LP5521_R_IS_LOADING(mode) \
- ((mode & LP5521_MODE_R_M) == LP5521_LOAD_R)
-#define LP5521_G_IS_LOADING(mode) \
- ((mode & LP5521_MODE_G_M) == LP5521_LOAD_G)
-#define LP5521_B_IS_LOADING(mode) \
- ((mode & LP5521_MODE_B_M) == LP5521_LOAD_B)
-
-#define LP5521_EXEC_R_M 0x30 /* Enable Register */
-#define LP5521_EXEC_G_M 0x0C
-#define LP5521_EXEC_B_M 0x03
-#define LP5521_EXEC_M 0x3F
-#define LP5521_RUN_R 0x20
-#define LP5521_RUN_G 0x08
-#define LP5521_RUN_B 0x02
-
static inline void lp5521_wait_opmode_done(void)
{
/* operation mode change needs to be longer than 153 us */
@@ -108,170 +85,21 @@ static inline void lp5521_wait_enable_done(void)
usleep_range(500, 600);
}
-static void lp5521_set_led_current(struct lp55xx_led *led, u8 led_current)
-{
- led->led_current = led_current;
- lp55xx_write(led->chip, LP5521_REG_LED_CURRENT_BASE + led->chan_nr,
- led_current);
-}
-
-static void lp5521_load_engine(struct lp55xx_chip *chip)
-{
- enum lp55xx_engine_index idx = chip->engine_idx;
- static const u8 mask[] = {
- [LP55XX_ENGINE_1] = LP5521_MODE_R_M,
- [LP55XX_ENGINE_2] = LP5521_MODE_G_M,
- [LP55XX_ENGINE_3] = LP5521_MODE_B_M,
- };
-
- static const u8 val[] = {
- [LP55XX_ENGINE_1] = LP5521_LOAD_R,
- [LP55XX_ENGINE_2] = LP5521_LOAD_G,
- [LP55XX_ENGINE_3] = LP5521_LOAD_B,
- };
-
- lp55xx_update_bits(chip, LP5521_REG_OP_MODE, mask[idx], val[idx]);
-
- lp5521_wait_opmode_done();
-}
-
-static void lp5521_stop_all_engines(struct lp55xx_chip *chip)
-{
- lp55xx_write(chip, LP5521_REG_OP_MODE, 0);
- lp5521_wait_opmode_done();
-}
-
-static void lp5521_stop_engine(struct lp55xx_chip *chip)
-{
- enum lp55xx_engine_index idx = chip->engine_idx;
- static const u8 mask[] = {
- [LP55XX_ENGINE_1] = LP5521_MODE_R_M,
- [LP55XX_ENGINE_2] = LP5521_MODE_G_M,
- [LP55XX_ENGINE_3] = LP5521_MODE_B_M,
- };
-
- lp55xx_update_bits(chip, LP5521_REG_OP_MODE, mask[idx], 0);
-
- lp5521_wait_opmode_done();
-}
-
static void lp5521_run_engine(struct lp55xx_chip *chip, bool start)
{
int ret;
- u8 mode;
- u8 exec;
/* stop engine */
if (!start) {
- lp5521_stop_engine(chip);
+ lp55xx_stop_engine(chip);
lp55xx_write(chip, LP5521_REG_OP_MODE, LP5521_CMD_DIRECT);
lp5521_wait_opmode_done();
return;
}
- /*
- * To run the engine,
- * operation mode and enable register should updated at the same time
- */
-
- ret = lp55xx_read(chip, LP5521_REG_OP_MODE, &mode);
- if (ret)
- return;
-
- ret = lp55xx_read(chip, LP5521_REG_ENABLE, &exec);
- if (ret)
- return;
-
- /* change operation mode to RUN only when each engine is loading */
- if (LP5521_R_IS_LOADING(mode)) {
- mode = (mode & ~LP5521_MODE_R_M) | LP5521_RUN_R;
- exec = (exec & ~LP5521_EXEC_R_M) | LP5521_RUN_R;
- }
-
- if (LP5521_G_IS_LOADING(mode)) {
- mode = (mode & ~LP5521_MODE_G_M) | LP5521_RUN_G;
- exec = (exec & ~LP5521_EXEC_G_M) | LP5521_RUN_G;
- }
-
- if (LP5521_B_IS_LOADING(mode)) {
- mode = (mode & ~LP5521_MODE_B_M) | LP5521_RUN_B;
- exec = (exec & ~LP5521_EXEC_B_M) | LP5521_RUN_B;
- }
-
- lp55xx_write(chip, LP5521_REG_OP_MODE, mode);
- lp5521_wait_opmode_done();
-
- lp55xx_update_bits(chip, LP5521_REG_ENABLE, LP5521_EXEC_M, exec);
- lp5521_wait_enable_done();
-}
-
-static int lp5521_update_program_memory(struct lp55xx_chip *chip,
- const u8 *data, size_t size)
-{
- enum lp55xx_engine_index idx = chip->engine_idx;
- u8 pattern[LP5521_PROGRAM_LENGTH] = {0};
- static const u8 addr[] = {
- [LP55XX_ENGINE_1] = LP5521_REG_R_PROG_MEM,
- [LP55XX_ENGINE_2] = LP5521_REG_G_PROG_MEM,
- [LP55XX_ENGINE_3] = LP5521_REG_B_PROG_MEM,
- };
- unsigned cmd;
- char c[3];
- int nrchars;
- int ret;
- int offset = 0;
- int i = 0;
-
- while ((offset < size - 1) && (i < LP5521_PROGRAM_LENGTH)) {
- /* separate sscanfs because length is working only for %s */
- ret = sscanf(data + offset, "%2s%n ", c, &nrchars);
- if (ret != 1)
- goto err;
-
- ret = sscanf(c, "%2x", &cmd);
- if (ret != 1)
- goto err;
-
- pattern[i] = (u8)cmd;
- offset += nrchars;
- i++;
- }
-
- /* Each instruction is 16bit long. Check that length is even */
- if (i % 2)
- goto err;
-
- for (i = 0; i < LP5521_PROGRAM_LENGTH; i++) {
- ret = lp55xx_write(chip, addr[idx] + i, pattern[i]);
- if (ret)
- return -EINVAL;
- }
-
- return size;
-
-err:
- dev_err(&chip->cl->dev, "wrong pattern format\n");
- return -EINVAL;
-}
-
-static void lp5521_firmware_loaded(struct lp55xx_chip *chip)
-{
- const struct firmware *fw = chip->fw;
-
- if (fw->size > LP5521_PROGRAM_LENGTH) {
- dev_err(&chip->cl->dev, "firmware data size overflow: %zu\n",
- fw->size);
- return;
- }
-
- /*
- * Program memory sequence
- * 1) set engine mode to "LOAD"
- * 2) write firmware data into program memory
- */
-
- lp5521_load_engine(chip);
- lp5521_update_program_memory(chip, fw->data, fw->size);
+ ret = lp55xx_run_engine_common(chip);
+ if (!ret)
+ lp5521_wait_enable_done();
}
static int lp5521_post_init_device(struct lp55xx_chip *chip)
@@ -350,114 +178,6 @@ static int lp5521_run_selftest(struct lp55xx_chip *chip, char *buf)
return 0;
}
-static int lp5521_multicolor_brightness(struct lp55xx_led *led)
-{
- struct lp55xx_chip *chip = led->chip;
- int ret;
- int i;
-
- mutex_lock(&chip->lock);
- for (i = 0; i < led->mc_cdev.num_colors; i++) {
- ret = lp55xx_write(chip,
- LP5521_REG_LED_PWM_BASE +
- led->mc_cdev.subled_info[i].channel,
- led->mc_cdev.subled_info[i].brightness);
- if (ret)
- break;
- }
- mutex_unlock(&chip->lock);
- return ret;
-}
-
-static int lp5521_led_brightness(struct lp55xx_led *led)
-{
- struct lp55xx_chip *chip = led->chip;
- int ret;
-
- mutex_lock(&chip->lock);
- ret = lp55xx_write(chip, LP5521_REG_LED_PWM_BASE + led->chan_nr,
- led->brightness);
- mutex_unlock(&chip->lock);
-
- return ret;
-}
-
-static ssize_t show_engine_mode(struct device *dev,
- struct device_attribute *attr,
- char *buf, int nr)
-{
- struct lp55xx_led *led = i2c_get_clientdata(to_i2c_client(dev));
- struct lp55xx_chip *chip = led->chip;
- enum lp55xx_engine_mode mode = chip->engines[nr - 1].mode;
-
- switch (mode) {
- case LP55XX_ENGINE_RUN:
- return sprintf(buf, "run\n");
- case LP55XX_ENGINE_LOAD:
- return sprintf(buf, "load\n");
- case LP55XX_ENGINE_DISABLED:
- default:
- return sprintf(buf, "disabled\n");
- }
-}
-show_mode(1)
-show_mode(2)
-show_mode(3)
-
-static ssize_t store_engine_mode(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len, int nr)
-{
- struct lp55xx_led *led = i2c_get_clientdata(to_i2c_client(dev));
- struct lp55xx_chip *chip = led->chip;
- struct lp55xx_engine *engine = &chip->engines[nr - 1];
-
- mutex_lock(&chip->lock);
-
- chip->engine_idx = nr;
-
- if (!strncmp(buf, "run", 3)) {
- lp5521_run_engine(chip, true);
- engine->mode = LP55XX_ENGINE_RUN;
- } else if (!strncmp(buf, "load", 4)) {
- lp5521_stop_engine(chip);
- lp5521_load_engine(chip);
- engine->mode = LP55XX_ENGINE_LOAD;
- } else if (!strncmp(buf, "disabled", 8)) {
- lp5521_stop_engine(chip);
- engine->mode = LP55XX_ENGINE_DISABLED;
- }
-
- mutex_unlock(&chip->lock);
-
- return len;
-}
-store_mode(1)
-store_mode(2)
-store_mode(3)
-
-static ssize_t store_engine_load(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len, int nr)
-{
- struct lp55xx_led *led = i2c_get_clientdata(to_i2c_client(dev));
- struct lp55xx_chip *chip = led->chip;
- int ret;
-
- mutex_lock(&chip->lock);
-
- chip->engine_idx = nr;
- lp5521_load_engine(chip);
- ret = lp5521_update_program_memory(chip, buf, len);
-
- mutex_unlock(&chip->lock);
-
- return ret;
-}
-store_load(1)
-store_load(2)
-store_load(3)
-
static ssize_t lp5521_selftest(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -466,20 +186,20 @@ static ssize_t lp5521_selftest(struct device *dev,
struct lp55xx_chip *chip = led->chip;
int ret;
- mutex_lock(&chip->lock);
+ guard(mutex)(&chip->lock);
+
ret = lp5521_run_selftest(chip, buf);
- mutex_unlock(&chip->lock);
return sysfs_emit(buf, "%s\n", ret ? "FAIL" : "OK");
}
/* device attributes */
-static LP55XX_DEV_ATTR_RW(engine1_mode, show_engine1_mode, store_engine1_mode);
-static LP55XX_DEV_ATTR_RW(engine2_mode, show_engine2_mode, store_engine2_mode);
-static LP55XX_DEV_ATTR_RW(engine3_mode, show_engine3_mode, store_engine3_mode);
-static LP55XX_DEV_ATTR_WO(engine1_load, store_engine1_load);
-static LP55XX_DEV_ATTR_WO(engine2_load, store_engine2_load);
-static LP55XX_DEV_ATTR_WO(engine3_load, store_engine3_load);
+LP55XX_DEV_ATTR_ENGINE_MODE(1);
+LP55XX_DEV_ATTR_ENGINE_MODE(2);
+LP55XX_DEV_ATTR_ENGINE_MODE(3);
+LP55XX_DEV_ATTR_ENGINE_LOAD(1);
+LP55XX_DEV_ATTR_ENGINE_LOAD(2);
+LP55XX_DEV_ATTR_ENGINE_LOAD(3);
static LP55XX_DEV_ATTR_RO(selftest, lp5521_selftest);
static struct attribute *lp5521_attributes[] = {
@@ -499,6 +219,12 @@ static const struct attribute_group lp5521_group = {
/* Chip specific configurations */
static struct lp55xx_device_config lp5521_cfg = {
+ .reg_op_mode = {
+ .addr = LP5521_REG_OP_MODE,
+ },
+ .reg_exec = {
+ .addr = LP5521_REG_ENABLE,
+ },
.reset = {
.addr = LP5521_REG_RESET,
.val = LP5521_RESET,
@@ -507,97 +233,33 @@ static struct lp55xx_device_config lp5521_cfg = {
.addr = LP5521_REG_ENABLE,
.val = LP5521_ENABLE_DEFAULT,
},
+ .prog_mem_base = {
+ .addr = LP5521_REG_R_PROG_MEM,
+ },
+ .reg_led_pwm_base = {
+ .addr = LP5521_REG_LED_PWM_BASE,
+ },
+ .reg_led_current_base = {
+ .addr = LP5521_REG_LED_CURRENT_BASE,
+ },
.max_channel = LP5521_MAX_LEDS,
.post_init_device = lp5521_post_init_device,
- .brightness_fn = lp5521_led_brightness,
- .multicolor_brightness_fn = lp5521_multicolor_brightness,
- .set_led_current = lp5521_set_led_current,
- .firmware_cb = lp5521_firmware_loaded,
+ .brightness_fn = lp55xx_led_brightness,
+ .multicolor_brightness_fn = lp55xx_multicolor_brightness,
+ .set_led_current = lp55xx_set_led_current,
+ .firmware_cb = lp55xx_firmware_loaded_cb,
.run_engine = lp5521_run_engine,
.dev_attr_group = &lp5521_group,
};
-static int lp5521_probe(struct i2c_client *client)
-{
- const struct i2c_device_id *id = i2c_client_get_device_id(client);
- int ret;
- struct lp55xx_chip *chip;
- struct lp55xx_led *led;
- struct lp55xx_platform_data *pdata = dev_get_platdata(&client->dev);
- struct device_node *np = dev_of_node(&client->dev);
-
- chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
- if (!chip)
- return -ENOMEM;
-
- chip->cfg = &lp5521_cfg;
-
- if (!pdata) {
- if (np) {
- pdata = lp55xx_of_populate_pdata(&client->dev, np,
- chip);
- if (IS_ERR(pdata))
- return PTR_ERR(pdata);
- } else {
- dev_err(&client->dev, "no platform data\n");
- return -EINVAL;
- }
- }
-
- led = devm_kcalloc(&client->dev,
- pdata->num_channels, sizeof(*led), GFP_KERNEL);
- if (!led)
- return -ENOMEM;
-
- chip->cl = client;
- chip->pdata = pdata;
-
- mutex_init(&chip->lock);
-
- i2c_set_clientdata(client, led);
-
- ret = lp55xx_init_device(chip);
- if (ret)
- goto err_init;
-
- dev_info(&client->dev, "%s programmable led chip found\n", id->name);
-
- ret = lp55xx_register_leds(led, chip);
- if (ret)
- goto err_out;
-
- ret = lp55xx_register_sysfs(chip);
- if (ret) {
- dev_err(&client->dev, "registering sysfs failed\n");
- goto err_out;
- }
-
- return 0;
-
-err_out:
- lp55xx_deinit_device(chip);
-err_init:
- return ret;
-}
-
-static void lp5521_remove(struct i2c_client *client)
-{
- struct lp55xx_led *led = i2c_get_clientdata(client);
- struct lp55xx_chip *chip = led->chip;
-
- lp5521_stop_all_engines(chip);
- lp55xx_unregister_sysfs(chip);
- lp55xx_deinit_device(chip);
-}
-
static const struct i2c_device_id lp5521_id[] = {
- { "lp5521", 0 }, /* Three channel chip */
+ { "lp5521", .driver_data = (kernel_ulong_t)&lp5521_cfg, }, /* Three channel chip */
{ }
};
MODULE_DEVICE_TABLE(i2c, lp5521_id);
static const struct of_device_id of_lp5521_leds_match[] = {
- { .compatible = "national,lp5521", },
+ { .compatible = "national,lp5521", .data = &lp5521_cfg, },
{},
};
@@ -608,8 +270,8 @@ static struct i2c_driver lp5521_driver = {
.name = "lp5521",
.of_match_table = of_lp5521_leds_match,
},
- .probe = lp5521_probe,
- .remove = lp5521_remove,
+ .probe = lp55xx_probe,
+ .remove = lp55xx_remove,
.id_table = lp5521_id,
};
diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c
index 38de853f9939..4ed3e735260c 100644
--- a/drivers/leds/leds-lp5523.c
+++ b/drivers/leds/leds-lp5523.c
@@ -9,6 +9,7 @@
* Milo(Woogyom) Kim <milo.kim@ti.com>
*/
+#include <linux/cleanup.h>
#include <linux/delay.h>
#include <linux/firmware.h>
#include <linux/i2c.h>
@@ -21,7 +22,6 @@
#include "leds-lp55xx-common.h"
-#define LP5523_PROGRAM_LENGTH 32 /* bytes */
/* Memory is used like this:
* 0x00 engine 1 program
* 0x10 engine 2 program
@@ -30,6 +30,7 @@
* 0x40 engine 2 muxing info
* 0x50 engine 3 muxing info
*/
+#define LP5523_PAGES_PER_ENGINE 1
#define LP5523_MAX_LEDS 9
/* Registers */
@@ -41,7 +42,10 @@
#define LP5523_REG_LED_PWM_BASE 0x16
#define LP5523_REG_LED_CURRENT_BASE 0x26
#define LP5523_REG_CONFIG 0x36
+
#define LP5523_REG_STATUS 0x3A
+#define LP5523_ENGINE_BUSY BIT(4)
+
#define LP5523_REG_RESET 0x3D
#define LP5523_REG_LED_TEST_CTRL 0x41
#define LP5523_REG_LED_TEST_ADC 0x42
@@ -70,61 +74,8 @@
#define LP5523_EXT_CLK_USED 0x08
#define LP5523_ENG_STATUS_MASK 0x07
-#define LP5523_FADER_MAPPING_MASK 0xC0
-#define LP5523_FADER_MAPPING_SHIFT 6
-
-/* Memory Page Selection */
-#define LP5523_PAGE_ENG1 0
-#define LP5523_PAGE_ENG2 1
-#define LP5523_PAGE_ENG3 2
-#define LP5523_PAGE_MUX1 3
-#define LP5523_PAGE_MUX2 4
-#define LP5523_PAGE_MUX3 5
-
-/* Program Memory Operations */
-#define LP5523_MODE_ENG1_M 0x30 /* Operation Mode Register */
-#define LP5523_MODE_ENG2_M 0x0C
-#define LP5523_MODE_ENG3_M 0x03
-#define LP5523_LOAD_ENG1 0x10
-#define LP5523_LOAD_ENG2 0x04
-#define LP5523_LOAD_ENG3 0x01
-
-#define LP5523_ENG1_IS_LOADING(mode) \
- ((mode & LP5523_MODE_ENG1_M) == LP5523_LOAD_ENG1)
-#define LP5523_ENG2_IS_LOADING(mode) \
- ((mode & LP5523_MODE_ENG2_M) == LP5523_LOAD_ENG2)
-#define LP5523_ENG3_IS_LOADING(mode) \
- ((mode & LP5523_MODE_ENG3_M) == LP5523_LOAD_ENG3)
-
-#define LP5523_EXEC_ENG1_M 0x30 /* Enable Register */
-#define LP5523_EXEC_ENG2_M 0x0C
-#define LP5523_EXEC_ENG3_M 0x03
-#define LP5523_EXEC_M 0x3F
-#define LP5523_RUN_ENG1 0x20
-#define LP5523_RUN_ENG2 0x08
-#define LP5523_RUN_ENG3 0x02
-
-#define LED_ACTIVE(mux, led) (!!(mux & (0x0001 << led)))
-
-enum lp5523_chip_id {
- LP5523,
- LP55231,
-};
-
static int lp5523_init_program_engine(struct lp55xx_chip *chip);
-static inline void lp5523_wait_opmode_done(void)
-{
- usleep_range(1000, 2000);
-}
-
-static void lp5523_set_led_current(struct lp55xx_led *led, u8 led_current)
-{
- led->led_current = led_current;
- lp55xx_write(led->chip, LP5523_REG_LED_CURRENT_BASE + led->chan_nr,
- led_current);
-}
-
static int lp5523_post_init_device(struct lp55xx_chip *chip)
{
int ret;
@@ -156,114 +107,16 @@ static int lp5523_post_init_device(struct lp55xx_chip *chip)
return lp5523_init_program_engine(chip);
}
-static void lp5523_load_engine(struct lp55xx_chip *chip)
-{
- enum lp55xx_engine_index idx = chip->engine_idx;
- static const u8 mask[] = {
- [LP55XX_ENGINE_1] = LP5523_MODE_ENG1_M,
- [LP55XX_ENGINE_2] = LP5523_MODE_ENG2_M,
- [LP55XX_ENGINE_3] = LP5523_MODE_ENG3_M,
- };
-
- static const u8 val[] = {
- [LP55XX_ENGINE_1] = LP5523_LOAD_ENG1,
- [LP55XX_ENGINE_2] = LP5523_LOAD_ENG2,
- [LP55XX_ENGINE_3] = LP5523_LOAD_ENG3,
- };
-
- lp55xx_update_bits(chip, LP5523_REG_OP_MODE, mask[idx], val[idx]);
-
- lp5523_wait_opmode_done();
-}
-
-static void lp5523_load_engine_and_select_page(struct lp55xx_chip *chip)
-{
- enum lp55xx_engine_index idx = chip->engine_idx;
- static const u8 page_sel[] = {
- [LP55XX_ENGINE_1] = LP5523_PAGE_ENG1,
- [LP55XX_ENGINE_2] = LP5523_PAGE_ENG2,
- [LP55XX_ENGINE_3] = LP5523_PAGE_ENG3,
- };
-
- lp5523_load_engine(chip);
-
- lp55xx_write(chip, LP5523_REG_PROG_PAGE_SEL, page_sel[idx]);
-}
-
-static void lp5523_stop_all_engines(struct lp55xx_chip *chip)
-{
- lp55xx_write(chip, LP5523_REG_OP_MODE, 0);
- lp5523_wait_opmode_done();
-}
-
-static void lp5523_stop_engine(struct lp55xx_chip *chip)
-{
- enum lp55xx_engine_index idx = chip->engine_idx;
- static const u8 mask[] = {
- [LP55XX_ENGINE_1] = LP5523_MODE_ENG1_M,
- [LP55XX_ENGINE_2] = LP5523_MODE_ENG2_M,
- [LP55XX_ENGINE_3] = LP5523_MODE_ENG3_M,
- };
-
- lp55xx_update_bits(chip, LP5523_REG_OP_MODE, mask[idx], 0);
-
- lp5523_wait_opmode_done();
-}
-
-static void lp5523_turn_off_channels(struct lp55xx_chip *chip)
-{
- int i;
-
- for (i = 0; i < LP5523_MAX_LEDS; i++)
- lp55xx_write(chip, LP5523_REG_LED_PWM_BASE + i, 0);
-}
-
static void lp5523_run_engine(struct lp55xx_chip *chip, bool start)
{
- int ret;
- u8 mode;
- u8 exec;
-
/* stop engine */
if (!start) {
- lp5523_stop_engine(chip);
- lp5523_turn_off_channels(chip);
+ lp55xx_stop_engine(chip);
+ lp55xx_turn_off_channels(chip);
return;
}
- /*
- * To run the engine,
- * operation mode and enable register should updated at the same time
- */
-
- ret = lp55xx_read(chip, LP5523_REG_OP_MODE, &mode);
- if (ret)
- return;
-
- ret = lp55xx_read(chip, LP5523_REG_ENABLE, &exec);
- if (ret)
- return;
-
- /* change operation mode to RUN only when each engine is loading */
- if (LP5523_ENG1_IS_LOADING(mode)) {
- mode = (mode & ~LP5523_MODE_ENG1_M) | LP5523_RUN_ENG1;
- exec = (exec & ~LP5523_EXEC_ENG1_M) | LP5523_RUN_ENG1;
- }
-
- if (LP5523_ENG2_IS_LOADING(mode)) {
- mode = (mode & ~LP5523_MODE_ENG2_M) | LP5523_RUN_ENG2;
- exec = (exec & ~LP5523_EXEC_ENG2_M) | LP5523_RUN_ENG2;
- }
-
- if (LP5523_ENG3_IS_LOADING(mode)) {
- mode = (mode & ~LP5523_MODE_ENG3_M) | LP5523_RUN_ENG3;
- exec = (exec & ~LP5523_EXEC_ENG3_M) | LP5523_RUN_ENG3;
- }
-
- lp55xx_write(chip, LP5523_REG_OP_MODE, mode);
- lp5523_wait_opmode_done();
-
- lp55xx_update_bits(chip, LP5523_REG_ENABLE, LP5523_EXEC_M, exec);
+ lp55xx_run_engine_common(chip);
}
static int lp5523_init_program_engine(struct lp55xx_chip *chip)
@@ -273,7 +126,7 @@ static int lp5523_init_program_engine(struct lp55xx_chip *chip)
int ret;
u8 status;
/* one pattern per engine setting LED MUX start and stop addresses */
- static const u8 pattern[][LP5523_PROGRAM_LENGTH] = {
+ static const u8 pattern[][LP55xx_BYTES_PER_PAGE] = {
{ 0x9c, 0x30, 0x9c, 0xb0, 0x9d, 0x80, 0xd8, 0x00, 0},
{ 0x9c, 0x40, 0x9c, 0xc0, 0x9d, 0x80, 0xd8, 0x00, 0},
{ 0x9c, 0x50, 0x9c, 0xd0, 0x9d, 0x80, 0xd8, 0x00, 0},
@@ -295,9 +148,9 @@ static int lp5523_init_program_engine(struct lp55xx_chip *chip)
/* write LED MUX address space for each engine */
for (i = LP55XX_ENGINE_1; i <= LP55XX_ENGINE_3; i++) {
chip->engine_idx = i;
- lp5523_load_engine_and_select_page(chip);
+ lp55xx_load_engine(chip);
- for (j = 0; j < LP5523_PROGRAM_LENGTH; j++) {
+ for (j = 0; j < LP55xx_BYTES_PER_PAGE; j++) {
ret = lp55xx_write(chip, LP5523_REG_PROG_MEM + j,
pattern[i - 1][j]);
if (ret)
@@ -322,261 +175,9 @@ static int lp5523_init_program_engine(struct lp55xx_chip *chip)
}
out:
- lp5523_stop_all_engines(chip);
- return ret;
-}
-
-static int lp5523_update_program_memory(struct lp55xx_chip *chip,
- const u8 *data, size_t size)
-{
- u8 pattern[LP5523_PROGRAM_LENGTH] = {0};
- unsigned int cmd;
- char c[3];
- int nrchars;
- int ret;
- int offset = 0;
- int i = 0;
-
- while ((offset < size - 1) && (i < LP5523_PROGRAM_LENGTH)) {
- /* separate sscanfs because length is working only for %s */
- ret = sscanf(data + offset, "%2s%n ", c, &nrchars);
- if (ret != 1)
- goto err;
-
- ret = sscanf(c, "%2x", &cmd);
- if (ret != 1)
- goto err;
-
- pattern[i] = (u8)cmd;
- offset += nrchars;
- i++;
- }
-
- /* Each instruction is 16bit long. Check that length is even */
- if (i % 2)
- goto err;
-
- for (i = 0; i < LP5523_PROGRAM_LENGTH; i++) {
- ret = lp55xx_write(chip, LP5523_REG_PROG_MEM + i, pattern[i]);
- if (ret)
- return -EINVAL;
- }
-
- return size;
-
-err:
- dev_err(&chip->cl->dev, "wrong pattern format\n");
- return -EINVAL;
-}
-
-static void lp5523_firmware_loaded(struct lp55xx_chip *chip)
-{
- const struct firmware *fw = chip->fw;
-
- if (fw->size > LP5523_PROGRAM_LENGTH) {
- dev_err(&chip->cl->dev, "firmware data size overflow: %zu\n",
- fw->size);
- return;
- }
-
- /*
- * Program memory sequence
- * 1) set engine mode to "LOAD"
- * 2) write firmware data into program memory
- */
-
- lp5523_load_engine_and_select_page(chip);
- lp5523_update_program_memory(chip, fw->data, fw->size);
-}
-
-static ssize_t show_engine_mode(struct device *dev,
- struct device_attribute *attr,
- char *buf, int nr)
-{
- struct lp55xx_led *led = i2c_get_clientdata(to_i2c_client(dev));
- struct lp55xx_chip *chip = led->chip;
- enum lp55xx_engine_mode mode = chip->engines[nr - 1].mode;
-
- switch (mode) {
- case LP55XX_ENGINE_RUN:
- return sprintf(buf, "run\n");
- case LP55XX_ENGINE_LOAD:
- return sprintf(buf, "load\n");
- case LP55XX_ENGINE_DISABLED:
- default:
- return sprintf(buf, "disabled\n");
- }
-}
-show_mode(1)
-show_mode(2)
-show_mode(3)
-
-static ssize_t store_engine_mode(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len, int nr)
-{
- struct lp55xx_led *led = i2c_get_clientdata(to_i2c_client(dev));
- struct lp55xx_chip *chip = led->chip;
- struct lp55xx_engine *engine = &chip->engines[nr - 1];
-
- mutex_lock(&chip->lock);
-
- chip->engine_idx = nr;
-
- if (!strncmp(buf, "run", 3)) {
- lp5523_run_engine(chip, true);
- engine->mode = LP55XX_ENGINE_RUN;
- } else if (!strncmp(buf, "load", 4)) {
- lp5523_stop_engine(chip);
- lp5523_load_engine(chip);
- engine->mode = LP55XX_ENGINE_LOAD;
- } else if (!strncmp(buf, "disabled", 8)) {
- lp5523_stop_engine(chip);
- engine->mode = LP55XX_ENGINE_DISABLED;
- }
-
- mutex_unlock(&chip->lock);
-
- return len;
-}
-store_mode(1)
-store_mode(2)
-store_mode(3)
-
-static int lp5523_mux_parse(const char *buf, u16 *mux, size_t len)
-{
- u16 tmp_mux = 0;
- int i;
-
- len = min_t(int, len, LP5523_MAX_LEDS);
-
- for (i = 0; i < len; i++) {
- switch (buf[i]) {
- case '1':
- tmp_mux |= (1 << i);
- break;
- case '0':
- break;
- case '\n':
- i = len;
- break;
- default:
- return -1;
- }
- }
- *mux = tmp_mux;
-
- return 0;
-}
-
-static void lp5523_mux_to_array(u16 led_mux, char *array)
-{
- int i, pos = 0;
-
- for (i = 0; i < LP5523_MAX_LEDS; i++)
- pos += sprintf(array + pos, "%x", LED_ACTIVE(led_mux, i));
-
- array[pos] = '\0';
-}
-
-static ssize_t show_engine_leds(struct device *dev,
- struct device_attribute *attr,
- char *buf, int nr)
-{
- struct lp55xx_led *led = i2c_get_clientdata(to_i2c_client(dev));
- struct lp55xx_chip *chip = led->chip;
- char mux[LP5523_MAX_LEDS + 1];
-
- lp5523_mux_to_array(chip->engines[nr - 1].led_mux, mux);
-
- return sprintf(buf, "%s\n", mux);
-}
-show_leds(1)
-show_leds(2)
-show_leds(3)
-
-static int lp5523_load_mux(struct lp55xx_chip *chip, u16 mux, int nr)
-{
- struct lp55xx_engine *engine = &chip->engines[nr - 1];
- int ret;
- static const u8 mux_page[] = {
- [LP55XX_ENGINE_1] = LP5523_PAGE_MUX1,
- [LP55XX_ENGINE_2] = LP5523_PAGE_MUX2,
- [LP55XX_ENGINE_3] = LP5523_PAGE_MUX3,
- };
-
- lp5523_load_engine(chip);
-
- ret = lp55xx_write(chip, LP5523_REG_PROG_PAGE_SEL, mux_page[nr]);
- if (ret)
- return ret;
-
- ret = lp55xx_write(chip, LP5523_REG_PROG_MEM, (u8)(mux >> 8));
- if (ret)
- return ret;
-
- ret = lp55xx_write(chip, LP5523_REG_PROG_MEM + 1, (u8)(mux));
- if (ret)
- return ret;
-
- engine->led_mux = mux;
- return 0;
-}
-
-static ssize_t store_engine_leds(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len, int nr)
-{
- struct lp55xx_led *led = i2c_get_clientdata(to_i2c_client(dev));
- struct lp55xx_chip *chip = led->chip;
- struct lp55xx_engine *engine = &chip->engines[nr - 1];
- u16 mux = 0;
- ssize_t ret;
-
- if (lp5523_mux_parse(buf, &mux, len))
- return -EINVAL;
-
- mutex_lock(&chip->lock);
-
- chip->engine_idx = nr;
- ret = -EINVAL;
-
- if (engine->mode != LP55XX_ENGINE_LOAD)
- goto leave;
-
- if (lp5523_load_mux(chip, mux, nr))
- goto leave;
-
- ret = len;
-leave:
- mutex_unlock(&chip->lock);
+ lp55xx_stop_all_engine(chip);
return ret;
}
-store_leds(1)
-store_leds(2)
-store_leds(3)
-
-static ssize_t store_engine_load(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len, int nr)
-{
- struct lp55xx_led *led = i2c_get_clientdata(to_i2c_client(dev));
- struct lp55xx_chip *chip = led->chip;
- int ret;
-
- mutex_lock(&chip->lock);
-
- chip->engine_idx = nr;
- lp5523_load_engine_and_select_page(chip);
- ret = lp5523_update_program_memory(chip, buf, len);
-
- mutex_unlock(&chip->lock);
-
- return ret;
-}
-store_load(1)
-store_load(2)
-store_load(3)
static ssize_t lp5523_selftest(struct device *dev,
struct device_attribute *attr,
@@ -588,16 +189,16 @@ static ssize_t lp5523_selftest(struct device *dev,
int ret, pos = 0;
u8 status, adc, vdd, i;
- mutex_lock(&chip->lock);
+ guard(mutex)(&chip->lock);
ret = lp55xx_read(chip, LP5523_REG_STATUS, &status);
if (ret < 0)
- goto fail;
+ return sysfs_emit(buf, "FAIL\n");
/* Check that ext clock is really in use if requested */
if (pdata->clock_mode == LP55XX_CLOCK_EXT) {
if ((status & LP5523_EXT_CLK_USED) == 0)
- goto fail;
+ return sysfs_emit(buf, "FAIL\n");
}
/* Measure VDD (i.e. VBAT) first (channel 16 corresponds to VDD) */
@@ -605,14 +206,14 @@ static ssize_t lp5523_selftest(struct device *dev,
usleep_range(3000, 6000); /* ADC conversion time is typically 2.7 ms */
ret = lp55xx_read(chip, LP5523_REG_STATUS, &status);
if (ret < 0)
- goto fail;
+ return sysfs_emit(buf, "FAIL\n");
if (!(status & LP5523_LEDTEST_DONE))
usleep_range(3000, 6000); /* Was not ready. Wait little bit */
ret = lp55xx_read(chip, LP5523_REG_LED_TEST_ADC, &vdd);
if (ret < 0)
- goto fail;
+ return sysfs_emit(buf, "FAIL\n");
vdd--; /* There may be some fluctuation in measurement */
@@ -635,18 +236,18 @@ static ssize_t lp5523_selftest(struct device *dev,
usleep_range(3000, 6000);
ret = lp55xx_read(chip, LP5523_REG_STATUS, &status);
if (ret < 0)
- goto fail;
+ return sysfs_emit(buf, "FAIL\n");
if (!(status & LP5523_LEDTEST_DONE))
usleep_range(3000, 6000); /* Was not ready. Wait. */
ret = lp55xx_read(chip, LP5523_REG_LED_TEST_ADC, &adc);
if (ret < 0)
- goto fail;
+ return sysfs_emit(buf, "FAIL\n");
if (adc >= vdd || adc < LP5523_ADC_SHORTCIRC_LIM)
- pos += sprintf(buf + pos, "LED %d FAIL\n",
- led->chan_nr);
+ pos += sysfs_emit_at(buf, pos, "LED %d FAIL\n",
+ led->chan_nr);
lp55xx_write(chip, LP5523_REG_LED_PWM_BASE + led->chan_nr,
0x00);
@@ -656,198 +257,25 @@ static ssize_t lp5523_selftest(struct device *dev,
led->led_current);
led++;
}
- if (pos == 0)
- pos = sprintf(buf, "OK\n");
- goto release_lock;
-fail:
- pos = sprintf(buf, "FAIL\n");
-
-release_lock:
- mutex_unlock(&chip->lock);
-
- return pos;
-}
-
-#define show_fader(nr) \
-static ssize_t show_master_fader##nr(struct device *dev, \
- struct device_attribute *attr, \
- char *buf) \
-{ \
- return show_master_fader(dev, attr, buf, nr); \
-}
-
-#define store_fader(nr) \
-static ssize_t store_master_fader##nr(struct device *dev, \
- struct device_attribute *attr, \
- const char *buf, size_t len) \
-{ \
- return store_master_fader(dev, attr, buf, len, nr); \
-}
-
-static ssize_t show_master_fader(struct device *dev,
- struct device_attribute *attr,
- char *buf, int nr)
-{
- struct lp55xx_led *led = i2c_get_clientdata(to_i2c_client(dev));
- struct lp55xx_chip *chip = led->chip;
- int ret;
- u8 val;
- mutex_lock(&chip->lock);
- ret = lp55xx_read(chip, LP5523_REG_MASTER_FADER_BASE + nr - 1, &val);
- mutex_unlock(&chip->lock);
-
- if (ret == 0)
- ret = sprintf(buf, "%u\n", val);
-
- return ret;
+ return pos == 0 ? sysfs_emit(buf, "OK\n") : pos;
}
-show_fader(1)
-show_fader(2)
-show_fader(3)
-
-static ssize_t store_master_fader(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len, int nr)
-{
- struct lp55xx_led *led = i2c_get_clientdata(to_i2c_client(dev));
- struct lp55xx_chip *chip = led->chip;
- int ret;
- unsigned long val;
-
- if (kstrtoul(buf, 0, &val))
- return -EINVAL;
-
- if (val > 0xff)
- return -EINVAL;
-
- mutex_lock(&chip->lock);
- ret = lp55xx_write(chip, LP5523_REG_MASTER_FADER_BASE + nr - 1,
- (u8)val);
- mutex_unlock(&chip->lock);
-
- if (ret == 0)
- ret = len;
- return ret;
-}
-store_fader(1)
-store_fader(2)
-store_fader(3)
-
-static ssize_t show_master_fader_leds(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct lp55xx_led *led = i2c_get_clientdata(to_i2c_client(dev));
- struct lp55xx_chip *chip = led->chip;
- int i, ret, pos = 0;
- u8 val;
-
- mutex_lock(&chip->lock);
-
- for (i = 0; i < LP5523_MAX_LEDS; i++) {
- ret = lp55xx_read(chip, LP5523_REG_LED_CTRL_BASE + i, &val);
- if (ret)
- goto leave;
-
- val = (val & LP5523_FADER_MAPPING_MASK)
- >> LP5523_FADER_MAPPING_SHIFT;
- if (val > 3) {
- ret = -EINVAL;
- goto leave;
- }
- buf[pos++] = val + '0';
- }
- buf[pos++] = '\n';
- ret = pos;
-leave:
- mutex_unlock(&chip->lock);
- return ret;
-}
-
-static ssize_t store_master_fader_leds(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
-{
- struct lp55xx_led *led = i2c_get_clientdata(to_i2c_client(dev));
- struct lp55xx_chip *chip = led->chip;
- int i, n, ret;
- u8 val;
-
- n = min_t(int, len, LP5523_MAX_LEDS);
-
- mutex_lock(&chip->lock);
-
- for (i = 0; i < n; i++) {
- if (buf[i] >= '0' && buf[i] <= '3') {
- val = (buf[i] - '0') << LP5523_FADER_MAPPING_SHIFT;
- ret = lp55xx_update_bits(chip,
- LP5523_REG_LED_CTRL_BASE + i,
- LP5523_FADER_MAPPING_MASK,
- val);
- if (ret)
- goto leave;
- } else {
- ret = -EINVAL;
- goto leave;
- }
- }
- ret = len;
-leave:
- mutex_unlock(&chip->lock);
- return ret;
-}
-
-static int lp5523_multicolor_brightness(struct lp55xx_led *led)
-{
- struct lp55xx_chip *chip = led->chip;
- int ret;
- int i;
-
- mutex_lock(&chip->lock);
- for (i = 0; i < led->mc_cdev.num_colors; i++) {
- ret = lp55xx_write(chip,
- LP5523_REG_LED_PWM_BASE +
- led->mc_cdev.subled_info[i].channel,
- led->mc_cdev.subled_info[i].brightness);
- if (ret)
- break;
- }
- mutex_unlock(&chip->lock);
- return ret;
-}
-
-static int lp5523_led_brightness(struct lp55xx_led *led)
-{
- struct lp55xx_chip *chip = led->chip;
- int ret;
-
- mutex_lock(&chip->lock);
- ret = lp55xx_write(chip, LP5523_REG_LED_PWM_BASE + led->chan_nr,
- led->brightness);
- mutex_unlock(&chip->lock);
- return ret;
-}
-
-static LP55XX_DEV_ATTR_RW(engine1_mode, show_engine1_mode, store_engine1_mode);
-static LP55XX_DEV_ATTR_RW(engine2_mode, show_engine2_mode, store_engine2_mode);
-static LP55XX_DEV_ATTR_RW(engine3_mode, show_engine3_mode, store_engine3_mode);
-static LP55XX_DEV_ATTR_RW(engine1_leds, show_engine1_leds, store_engine1_leds);
-static LP55XX_DEV_ATTR_RW(engine2_leds, show_engine2_leds, store_engine2_leds);
-static LP55XX_DEV_ATTR_RW(engine3_leds, show_engine3_leds, store_engine3_leds);
-static LP55XX_DEV_ATTR_WO(engine1_load, store_engine1_load);
-static LP55XX_DEV_ATTR_WO(engine2_load, store_engine2_load);
-static LP55XX_DEV_ATTR_WO(engine3_load, store_engine3_load);
+LP55XX_DEV_ATTR_ENGINE_MODE(1);
+LP55XX_DEV_ATTR_ENGINE_MODE(2);
+LP55XX_DEV_ATTR_ENGINE_MODE(3);
+LP55XX_DEV_ATTR_ENGINE_LEDS(1);
+LP55XX_DEV_ATTR_ENGINE_LEDS(2);
+LP55XX_DEV_ATTR_ENGINE_LEDS(3);
+LP55XX_DEV_ATTR_ENGINE_LOAD(1);
+LP55XX_DEV_ATTR_ENGINE_LOAD(2);
+LP55XX_DEV_ATTR_ENGINE_LOAD(3);
static LP55XX_DEV_ATTR_RO(selftest, lp5523_selftest);
-static LP55XX_DEV_ATTR_RW(master_fader1, show_master_fader1,
- store_master_fader1);
-static LP55XX_DEV_ATTR_RW(master_fader2, show_master_fader2,
- store_master_fader2);
-static LP55XX_DEV_ATTR_RW(master_fader3, show_master_fader3,
- store_master_fader3);
-static LP55XX_DEV_ATTR_RW(master_fader_leds, show_master_fader_leds,
- store_master_fader_leds);
+LP55XX_DEV_ATTR_MASTER_FADER(1);
+LP55XX_DEV_ATTR_MASTER_FADER(2);
+LP55XX_DEV_ATTR_MASTER_FADER(3);
+static LP55XX_DEV_ATTR_RW(master_fader_leds, lp55xx_show_master_fader_leds,
+ lp55xx_store_master_fader_leds);
static struct attribute *lp5523_attributes[] = {
&dev_attr_engine1_mode.attr,
@@ -873,6 +301,16 @@ static const struct attribute_group lp5523_group = {
/* Chip specific configurations */
static struct lp55xx_device_config lp5523_cfg = {
+ .reg_op_mode = {
+ .addr = LP5523_REG_OP_MODE,
+ },
+ .reg_exec = {
+ .addr = LP5523_REG_ENABLE,
+ },
+ .engine_busy = {
+ .addr = LP5523_REG_STATUS,
+ .mask = LP5523_ENGINE_BUSY,
+ },
.reset = {
.addr = LP5523_REG_RESET,
.val = LP5523_RESET,
@@ -881,100 +319,43 @@ static struct lp55xx_device_config lp5523_cfg = {
.addr = LP5523_REG_ENABLE,
.val = LP5523_ENABLE,
},
+ .prog_mem_base = {
+ .addr = LP5523_REG_PROG_MEM,
+ },
+ .reg_led_pwm_base = {
+ .addr = LP5523_REG_LED_PWM_BASE,
+ },
+ .reg_led_current_base = {
+ .addr = LP5523_REG_LED_CURRENT_BASE,
+ },
+ .reg_master_fader_base = {
+ .addr = LP5523_REG_MASTER_FADER_BASE,
+ },
+ .reg_led_ctrl_base = {
+ .addr = LP5523_REG_LED_CTRL_BASE,
+ },
+ .pages_per_engine = LP5523_PAGES_PER_ENGINE,
.max_channel = LP5523_MAX_LEDS,
.post_init_device = lp5523_post_init_device,
- .brightness_fn = lp5523_led_brightness,
- .multicolor_brightness_fn = lp5523_multicolor_brightness,
- .set_led_current = lp5523_set_led_current,
- .firmware_cb = lp5523_firmware_loaded,
+ .brightness_fn = lp55xx_led_brightness,
+ .multicolor_brightness_fn = lp55xx_multicolor_brightness,
+ .set_led_current = lp55xx_set_led_current,
+ .firmware_cb = lp55xx_firmware_loaded_cb,
.run_engine = lp5523_run_engine,
.dev_attr_group = &lp5523_group,
};
-static int lp5523_probe(struct i2c_client *client)
-{
- const struct i2c_device_id *id = i2c_client_get_device_id(client);
- int ret;
- struct lp55xx_chip *chip;
- struct lp55xx_led *led;
- struct lp55xx_platform_data *pdata = dev_get_platdata(&client->dev);
- struct device_node *np = dev_of_node(&client->dev);
-
- chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
- if (!chip)
- return -ENOMEM;
-
- chip->cfg = &lp5523_cfg;
-
- if (!pdata) {
- if (np) {
- pdata = lp55xx_of_populate_pdata(&client->dev, np,
- chip);
- if (IS_ERR(pdata))
- return PTR_ERR(pdata);
- } else {
- dev_err(&client->dev, "no platform data\n");
- return -EINVAL;
- }
- }
-
- led = devm_kcalloc(&client->dev,
- pdata->num_channels, sizeof(*led), GFP_KERNEL);
- if (!led)
- return -ENOMEM;
-
- chip->cl = client;
- chip->pdata = pdata;
-
- mutex_init(&chip->lock);
-
- i2c_set_clientdata(client, led);
-
- ret = lp55xx_init_device(chip);
- if (ret)
- goto err_init;
-
- dev_info(&client->dev, "%s Programmable led chip found\n", id->name);
-
- ret = lp55xx_register_leds(led, chip);
- if (ret)
- goto err_out;
-
- ret = lp55xx_register_sysfs(chip);
- if (ret) {
- dev_err(&client->dev, "registering sysfs failed\n");
- goto err_out;
- }
-
- return 0;
-
-err_out:
- lp55xx_deinit_device(chip);
-err_init:
- return ret;
-}
-
-static void lp5523_remove(struct i2c_client *client)
-{
- struct lp55xx_led *led = i2c_get_clientdata(client);
- struct lp55xx_chip *chip = led->chip;
-
- lp5523_stop_all_engines(chip);
- lp55xx_unregister_sysfs(chip);
- lp55xx_deinit_device(chip);
-}
-
static const struct i2c_device_id lp5523_id[] = {
- { "lp5523", LP5523 },
- { "lp55231", LP55231 },
+ { "lp5523", .driver_data = (kernel_ulong_t)&lp5523_cfg, },
+ { "lp55231", .driver_data = (kernel_ulong_t)&lp5523_cfg, },
{ }
};
MODULE_DEVICE_TABLE(i2c, lp5523_id);
static const struct of_device_id of_lp5523_leds_match[] = {
- { .compatible = "national,lp5523", },
- { .compatible = "ti,lp55231", },
+ { .compatible = "national,lp5523", .data = &lp5523_cfg, },
+ { .compatible = "ti,lp55231", .data = &lp5523_cfg, },
{},
};
@@ -985,8 +366,8 @@ static struct i2c_driver lp5523_driver = {
.name = "lp5523x",
.of_match_table = of_lp5523_leds_match,
},
- .probe = lp5523_probe,
- .remove = lp5523_remove,
+ .probe = lp55xx_probe,
+ .remove = lp55xx_remove,
.id_table = lp5523_id,
};
diff --git a/drivers/leds/leds-lp5562.c b/drivers/leds/leds-lp5562.c
index 39db9aeb67c5..b26bcc81d079 100644
--- a/drivers/leds/leds-lp5562.c
+++ b/drivers/leds/leds-lp5562.c
@@ -7,6 +7,7 @@
* Author: Milo(Woogyom) Kim <milo.kim@ti.com>
*/
+#include <linux/cleanup.h>
#include <linux/delay.h>
#include <linux/firmware.h>
#include <linux/i2c.h>
@@ -19,7 +20,6 @@
#include "leds-lp55xx-common.h"
-#define LP5562_PROGRAM_LENGTH 32
#define LP5562_MAX_LEDS 4
/* ENABLE Register 00h */
@@ -38,21 +38,6 @@
/* OPMODE Register 01h */
#define LP5562_REG_OP_MODE 0x01
-#define LP5562_MODE_ENG1_M 0x30
-#define LP5562_MODE_ENG2_M 0x0C
-#define LP5562_MODE_ENG3_M 0x03
-#define LP5562_LOAD_ENG1 0x10
-#define LP5562_LOAD_ENG2 0x04
-#define LP5562_LOAD_ENG3 0x01
-#define LP5562_RUN_ENG1 0x20
-#define LP5562_RUN_ENG2 0x08
-#define LP5562_RUN_ENG3 0x02
-#define LP5562_ENG1_IS_LOADING(mode) \
- ((mode & LP5562_MODE_ENG1_M) == LP5562_LOAD_ENG1)
-#define LP5562_ENG2_IS_LOADING(mode) \
- ((mode & LP5562_MODE_ENG2_M) == LP5562_LOAD_ENG2)
-#define LP5562_ENG3_IS_LOADING(mode) \
- ((mode & LP5562_MODE_ENG3_M) == LP5562_LOAD_ENG3)
/* BRIGHTNESS Registers */
#define LP5562_REG_R_PWM 0x04
@@ -124,160 +109,24 @@ static void lp5562_set_led_current(struct lp55xx_led *led, u8 led_current)
lp55xx_write(led->chip, addr[led->chan_nr], led_current);
}
-static void lp5562_load_engine(struct lp55xx_chip *chip)
-{
- enum lp55xx_engine_index idx = chip->engine_idx;
- static const u8 mask[] = {
- [LP55XX_ENGINE_1] = LP5562_MODE_ENG1_M,
- [LP55XX_ENGINE_2] = LP5562_MODE_ENG2_M,
- [LP55XX_ENGINE_3] = LP5562_MODE_ENG3_M,
- };
-
- static const u8 val[] = {
- [LP55XX_ENGINE_1] = LP5562_LOAD_ENG1,
- [LP55XX_ENGINE_2] = LP5562_LOAD_ENG2,
- [LP55XX_ENGINE_3] = LP5562_LOAD_ENG3,
- };
-
- lp55xx_update_bits(chip, LP5562_REG_OP_MODE, mask[idx], val[idx]);
-
- lp5562_wait_opmode_done();
-}
-
-static void lp5562_stop_engine(struct lp55xx_chip *chip)
-{
- lp55xx_write(chip, LP5562_REG_OP_MODE, LP5562_CMD_DISABLE);
- lp5562_wait_opmode_done();
-}
-
static void lp5562_run_engine(struct lp55xx_chip *chip, bool start)
{
int ret;
- u8 mode;
- u8 exec;
/* stop engine */
if (!start) {
lp55xx_write(chip, LP5562_REG_ENABLE, LP5562_ENABLE_DEFAULT);
lp5562_wait_enable_done();
- lp5562_stop_engine(chip);
+ lp55xx_stop_all_engine(chip);
lp55xx_write(chip, LP5562_REG_ENG_SEL, LP5562_ENG_SEL_PWM);
lp55xx_write(chip, LP5562_REG_OP_MODE, LP5562_CMD_DIRECT);
lp5562_wait_opmode_done();
return;
}
- /*
- * To run the engine,
- * operation mode and enable register should updated at the same time
- */
-
- ret = lp55xx_read(chip, LP5562_REG_OP_MODE, &mode);
- if (ret)
- return;
-
- ret = lp55xx_read(chip, LP5562_REG_ENABLE, &exec);
- if (ret)
- return;
-
- /* change operation mode to RUN only when each engine is loading */
- if (LP5562_ENG1_IS_LOADING(mode)) {
- mode = (mode & ~LP5562_MODE_ENG1_M) | LP5562_RUN_ENG1;
- exec = (exec & ~LP5562_EXEC_ENG1_M) | LP5562_RUN_ENG1;
- }
-
- if (LP5562_ENG2_IS_LOADING(mode)) {
- mode = (mode & ~LP5562_MODE_ENG2_M) | LP5562_RUN_ENG2;
- exec = (exec & ~LP5562_EXEC_ENG2_M) | LP5562_RUN_ENG2;
- }
-
- if (LP5562_ENG3_IS_LOADING(mode)) {
- mode = (mode & ~LP5562_MODE_ENG3_M) | LP5562_RUN_ENG3;
- exec = (exec & ~LP5562_EXEC_ENG3_M) | LP5562_RUN_ENG3;
- }
-
- lp55xx_write(chip, LP5562_REG_OP_MODE, mode);
- lp5562_wait_opmode_done();
-
- lp55xx_update_bits(chip, LP5562_REG_ENABLE, LP5562_EXEC_M, exec);
- lp5562_wait_enable_done();
-}
-
-static int lp5562_update_firmware(struct lp55xx_chip *chip,
- const u8 *data, size_t size)
-{
- enum lp55xx_engine_index idx = chip->engine_idx;
- u8 pattern[LP5562_PROGRAM_LENGTH] = {0};
- static const u8 addr[] = {
- [LP55XX_ENGINE_1] = LP5562_REG_PROG_MEM_ENG1,
- [LP55XX_ENGINE_2] = LP5562_REG_PROG_MEM_ENG2,
- [LP55XX_ENGINE_3] = LP5562_REG_PROG_MEM_ENG3,
- };
- unsigned cmd;
- char c[3];
- int program_size;
- int nrchars;
- int offset = 0;
- int ret;
- int i;
-
- /* clear program memory before updating */
- for (i = 0; i < LP5562_PROGRAM_LENGTH; i++)
- lp55xx_write(chip, addr[idx] + i, 0);
-
- i = 0;
- while ((offset < size - 1) && (i < LP5562_PROGRAM_LENGTH)) {
- /* separate sscanfs because length is working only for %s */
- ret = sscanf(data + offset, "%2s%n ", c, &nrchars);
- if (ret != 1)
- goto err;
-
- ret = sscanf(c, "%2x", &cmd);
- if (ret != 1)
- goto err;
-
- pattern[i] = (u8)cmd;
- offset += nrchars;
- i++;
- }
-
- /* Each instruction is 16bit long. Check that length is even */
- if (i % 2)
- goto err;
-
- program_size = i;
- for (i = 0; i < program_size; i++)
- lp55xx_write(chip, addr[idx] + i, pattern[i]);
-
- return 0;
-
-err:
- dev_err(&chip->cl->dev, "wrong pattern format\n");
- return -EINVAL;
-}
-
-static void lp5562_firmware_loaded(struct lp55xx_chip *chip)
-{
- const struct firmware *fw = chip->fw;
-
- /*
- * the firmware is encoded in ascii hex character, with 2 chars
- * per byte
- */
- if (fw->size > (LP5562_PROGRAM_LENGTH * 2)) {
- dev_err(&chip->cl->dev, "firmware data size overflow: %zu\n",
- fw->size);
- return;
- }
-
- /*
- * Program memory sequence
- * 1) set engine mode to "LOAD"
- * 2) write firmware data into program memory
- */
-
- lp5562_load_engine(chip);
- lp5562_update_firmware(chip, fw->data, fw->size);
+ ret = lp55xx_run_engine_common(chip);
+ if (!ret)
+ lp5562_wait_enable_done();
}
static int lp5562_post_init_device(struct lp55xx_chip *chip)
@@ -323,9 +172,9 @@ static int lp5562_led_brightness(struct lp55xx_led *led)
};
int ret;
- mutex_lock(&chip->lock);
+ guard(mutex)(&chip->lock);
+
ret = lp55xx_write(chip, addr[led->chan_nr], led->brightness);
- mutex_unlock(&chip->lock);
return ret;
}
@@ -348,9 +197,9 @@ static void lp5562_write_program_memory(struct lp55xx_chip *chip,
/* check the size of program count */
static inline bool _is_pc_overflow(struct lp55xx_predef_pattern *ptn)
{
- return ptn->size_r >= LP5562_PROGRAM_LENGTH ||
- ptn->size_g >= LP5562_PROGRAM_LENGTH ||
- ptn->size_b >= LP5562_PROGRAM_LENGTH;
+ return ptn->size_r >= LP55xx_BYTES_PER_PAGE ||
+ ptn->size_g >= LP55xx_BYTES_PER_PAGE ||
+ ptn->size_b >= LP55xx_BYTES_PER_PAGE;
}
static int lp5562_run_predef_led_pattern(struct lp55xx_chip *chip, int mode)
@@ -369,7 +218,7 @@ static int lp5562_run_predef_led_pattern(struct lp55xx_chip *chip, int mode)
return -EINVAL;
}
- lp5562_stop_engine(chip);
+ lp55xx_stop_all_engine(chip);
/* Set LED map as RGB */
lp55xx_write(chip, LP5562_REG_ENG_SEL, LP5562_ENG_SEL_RGB);
@@ -377,7 +226,7 @@ static int lp5562_run_predef_led_pattern(struct lp55xx_chip *chip, int mode)
/* Load engines */
for (i = LP55XX_ENGINE_1; i <= LP55XX_ENGINE_3; i++) {
chip->engine_idx = i;
- lp5562_load_engine(chip);
+ lp55xx_load_engine(chip);
}
/* Clear program registers */
@@ -420,9 +269,9 @@ static ssize_t lp5562_store_pattern(struct device *dev,
if (mode > num_patterns || !ptn)
return -EINVAL;
- mutex_lock(&chip->lock);
+ guard(mutex)(&chip->lock);
+
ret = lp5562_run_predef_led_pattern(chip, mode);
- mutex_unlock(&chip->lock);
if (ret)
return ret;
@@ -472,9 +321,9 @@ static ssize_t lp5562_store_engine_mux(struct device *dev,
return -EINVAL;
}
- mutex_lock(&chip->lock);
+ guard(mutex)(&chip->lock);
+
lp55xx_update_bits(chip, LP5562_REG_ENG_SEL, mask, val);
- mutex_unlock(&chip->lock);
return len;
}
@@ -495,6 +344,12 @@ static const struct attribute_group lp5562_group = {
/* Chip specific configurations */
static struct lp55xx_device_config lp5562_cfg = {
.max_channel = LP5562_MAX_LEDS,
+ .reg_op_mode = {
+ .addr = LP5562_REG_OP_MODE,
+ },
+ .reg_exec = {
+ .addr = LP5562_REG_ENABLE,
+ },
.reset = {
.addr = LP5562_REG_RESET,
.val = LP5562_RESET,
@@ -503,94 +358,25 @@ static struct lp55xx_device_config lp5562_cfg = {
.addr = LP5562_REG_ENABLE,
.val = LP5562_ENABLE_DEFAULT,
},
+ .prog_mem_base = {
+ .addr = LP5562_REG_PROG_MEM_ENG1,
+ },
.post_init_device = lp5562_post_init_device,
.set_led_current = lp5562_set_led_current,
.brightness_fn = lp5562_led_brightness,
.run_engine = lp5562_run_engine,
- .firmware_cb = lp5562_firmware_loaded,
+ .firmware_cb = lp55xx_firmware_loaded_cb,
.dev_attr_group = &lp5562_group,
};
-static int lp5562_probe(struct i2c_client *client)
-{
- int ret;
- struct lp55xx_chip *chip;
- struct lp55xx_led *led;
- struct lp55xx_platform_data *pdata = dev_get_platdata(&client->dev);
- struct device_node *np = dev_of_node(&client->dev);
-
- chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
- if (!chip)
- return -ENOMEM;
-
- chip->cfg = &lp5562_cfg;
-
- if (!pdata) {
- if (np) {
- pdata = lp55xx_of_populate_pdata(&client->dev, np,
- chip);
- if (IS_ERR(pdata))
- return PTR_ERR(pdata);
- } else {
- dev_err(&client->dev, "no platform data\n");
- return -EINVAL;
- }
- }
-
-
- led = devm_kcalloc(&client->dev,
- pdata->num_channels, sizeof(*led), GFP_KERNEL);
- if (!led)
- return -ENOMEM;
-
- chip->cl = client;
- chip->pdata = pdata;
-
- mutex_init(&chip->lock);
-
- i2c_set_clientdata(client, led);
-
- ret = lp55xx_init_device(chip);
- if (ret)
- goto err_init;
-
- ret = lp55xx_register_leds(led, chip);
- if (ret)
- goto err_out;
-
- ret = lp55xx_register_sysfs(chip);
- if (ret) {
- dev_err(&client->dev, "registering sysfs failed\n");
- goto err_out;
- }
-
- return 0;
-
-err_out:
- lp55xx_deinit_device(chip);
-err_init:
- return ret;
-}
-
-static void lp5562_remove(struct i2c_client *client)
-{
- struct lp55xx_led *led = i2c_get_clientdata(client);
- struct lp55xx_chip *chip = led->chip;
-
- lp5562_stop_engine(chip);
-
- lp55xx_unregister_sysfs(chip);
- lp55xx_deinit_device(chip);
-}
-
static const struct i2c_device_id lp5562_id[] = {
- { "lp5562", 0 },
+ { "lp5562", .driver_data = (kernel_ulong_t)&lp5562_cfg, },
{ }
};
MODULE_DEVICE_TABLE(i2c, lp5562_id);
static const struct of_device_id of_lp5562_leds_match[] = {
- { .compatible = "ti,lp5562", },
+ { .compatible = "ti,lp5562", .data = &lp5562_cfg, },
{},
};
@@ -601,8 +387,8 @@ static struct i2c_driver lp5562_driver = {
.name = "lp5562",
.of_match_table = of_lp5562_leds_match,
},
- .probe = lp5562_probe,
- .remove = lp5562_remove,
+ .probe = lp55xx_probe,
+ .remove = lp55xx_remove,
.id_table = lp5562_id,
};
diff --git a/drivers/leds/leds-lp5569.c b/drivers/leds/leds-lp5569.c
new file mode 100644
index 000000000000..786f2aa35319
--- /dev/null
+++ b/drivers/leds/leds-lp5569.c
@@ -0,0 +1,544 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2024 Christian Marangi <ansuelsmth@gmail.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/cleanup.h>
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/i2c.h>
+#include <linux/iopoll.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/platform_data/leds-lp55xx.h>
+#include <linux/slab.h>
+#include <dt-bindings/leds/leds-lp55xx.h>
+
+#include "leds-lp55xx-common.h"
+
+#define LP5569_MAX_LEDS 9
+
+/* Memory is used like this:
+ * 0x00 engine 1 program (4 pages)
+ * 0x40 engine 2 program (4 pages)
+ * 0x80 engine 3 program (4 pages)
+ * 0xc0 engine 1 muxing info (1 page)
+ * 0xd0 engine 2 muxing info (1 page)
+ * 0xe0 engine 3 muxing info (1 page)
+ */
+#define LP5569_PAGES_PER_ENGINE 4
+
+#define LP5569_REG_ENABLE 0x00
+#define LP5569_ENABLE BIT(6)
+
+#define LP5569_REG_EXEC_CTRL 0x01
+#define LP5569_MODE_ENG_SHIFT 2
+
+#define LP5569_REG_OP_MODE 0x02
+#define LP5569_EXEC_ENG_SHIFT 2
+
+#define LP5569_REG_ENABLE_LEDS_MSB 0x04
+#define LP5569_REG_ENABLE_LEDS_LSB 0x05
+#define LP5569_REG_LED_CTRL_BASE 0x07
+#define LP5569_FADER_MAPPING_MASK GENMASK(7, 5)
+#define LP5569_REG_LED_PWM_BASE 0x16
+#define LP5569_REG_LED_CURRENT_BASE 0x22
+#define LP5569_REG_MISC 0x2F
+#define LP5569_AUTO_INC BIT(6)
+#define LP5569_PWR_SAVE BIT(5)
+#define LP5569_CP_MODE_MASK GENMASK(4, 3)
+#define LP5569_PWM_PWR_SAVE BIT(2)
+#define LP5569_INTERNAL_CLK BIT(0)
+#define LP5569_REG_MISC2 0x33
+#define LP5569_LED_SHORT_TEST BIT(4)
+#define LP5569_LED_OPEN_TEST BIT(3)
+#define LP5569_REG_STATUS 0x3C
+#define LP5569_MASK_BUSY BIT(7)
+#define LP5569_STARTUP_BUSY BIT(6)
+#define LP5569_ENGINE_BUSY BIT(5)
+#define LP5569_ENGINE1_INT BIT(2)
+#define LP5569_ENGINE2_INT BIT(1)
+#define LP5569_ENGINE3_INT BIT(0)
+#define LP5569_ENG_STATUS_MASK (LP5569_ENGINE1_INT | LP5569_ENGINE2_INT | \
+ LP5569_ENGINE3_INT)
+#define LP5569_REG_IO_CONTROL 0x3D
+#define LP5569_CLK_OUTPUT BIT(3)
+#define LP5569_REG_RESET 0x3F
+#define LP5569_RESET 0xFF
+#define LP5569_REG_MASTER_FADER_BASE 0x46
+#define LP5569_REG_CH1_PROG_START 0x4B
+#define LP5569_REG_CH2_PROG_START 0x4C
+#define LP5569_REG_CH3_PROG_START 0x4D
+#define LP5569_REG_PROG_PAGE_SEL 0x4F
+#define LP5569_REG_PROG_MEM 0x50
+#define LP5569_REG_LED_FAULT1 0x81
+#define LP5569_LED_FAULT8 BIT(0)
+#define LP5569_REG_LED_FAULT2 0x82
+#define LP5569_LED_FAULT7 BIT(7)
+#define LP5569_LED_FAULT6 BIT(6)
+#define LP5569_LED_FAULT5 BIT(5)
+#define LP5569_LED_FAULT4 BIT(4)
+#define LP5569_LED_FAULT3 BIT(3)
+#define LP5569_LED_FAULT2 BIT(2)
+#define LP5569_LED_FAULT1 BIT(1)
+#define LP5569_LED_FAULT0 BIT(0)
+
+#define LP5569_ENG1_PROG_ADDR 0x0
+#define LP5569_ENG2_PROG_ADDR 0x40
+#define LP5569_ENG3_PROG_ADDR 0x80
+#define LP5569_ENG1_MUX_ADDR 0xc0
+#define LP5569_ENG2_MUX_ADDR 0xd0
+#define LP5569_ENG3_MUX_ADDR 0xe0
+
+#define LP5569_STARTUP_SLEEP 500
+
+#define LEDn_STATUS_FAULT(n, status) ((status) >> (n) & BIT(0))
+
+#define LP5569_DEFAULT_CONFIG \
+ (LP5569_AUTO_INC | LP5569_PWR_SAVE | LP5569_PWM_PWR_SAVE)
+
+static void lp5569_run_engine(struct lp55xx_chip *chip, bool start)
+{
+ if (!start) {
+ lp55xx_stop_engine(chip);
+ lp55xx_turn_off_channels(chip);
+ return;
+ }
+
+ lp55xx_run_engine_common(chip);
+}
+
+static int lp5569_init_program_engine(struct lp55xx_chip *chip)
+{
+ int i;
+ int j;
+ int ret;
+ u8 status;
+ /* Precompiled pattern per ENGINE setting LED MUX start and stop addresses */
+ static const u8 pattern[][LP55xx_BYTES_PER_PAGE] = {
+ { 0x9c, LP5569_ENG1_MUX_ADDR, 0x9c, 0xb0, 0x9d, 0x80, 0xd8, 0x00, 0},
+ { 0x9c, LP5569_ENG2_MUX_ADDR, 0x9c, 0xc0, 0x9d, 0x80, 0xd8, 0x00, 0},
+ { 0x9c, LP5569_ENG3_MUX_ADDR, 0x9c, 0xd0, 0x9d, 0x80, 0xd8, 0x00, 0},
+ };
+
+ /* Setup each ENGINE program start address */
+ ret = lp55xx_write(chip, LP5569_REG_CH1_PROG_START, LP5569_ENG1_PROG_ADDR);
+ if (ret)
+ return ret;
+
+ ret = lp55xx_write(chip, LP5569_REG_CH2_PROG_START, LP5569_ENG2_PROG_ADDR);
+ if (ret)
+ return ret;
+
+ ret = lp55xx_write(chip, LP5569_REG_CH3_PROG_START, LP5569_ENG3_PROG_ADDR);
+ if (ret)
+ return ret;
+
+ /* Write precompiled pattern for LED MUX address space for each ENGINE */
+ for (i = LP55XX_ENGINE_1; i <= LP55XX_ENGINE_3; i++) {
+ chip->engine_idx = i;
+ lp55xx_load_engine(chip);
+
+ for (j = 0; j < LP55xx_BYTES_PER_PAGE; j++) {
+ ret = lp55xx_write(chip, LP5569_REG_PROG_MEM + j,
+ pattern[i - 1][j]);
+ if (ret)
+ goto out;
+ }
+ }
+
+ lp5569_run_engine(chip, true);
+
+ /* Let the programs run for couple of ms and check the engine status */
+ usleep_range(3000, 6000);
+ lp55xx_read(chip, LP5569_REG_STATUS, &status);
+ status = FIELD_GET(LP5569_ENG_STATUS_MASK, status);
+
+ if (status != LP5569_ENG_STATUS_MASK) {
+ dev_err(&chip->cl->dev,
+ "could not configure LED engine, status = 0x%.2x\n",
+ status);
+ ret = -EINVAL;
+ }
+
+out:
+ lp55xx_stop_all_engine(chip);
+ return ret;
+}
+
+static int lp5569_post_init_device(struct lp55xx_chip *chip)
+{
+ int ret;
+ u8 val;
+
+ val = LP5569_DEFAULT_CONFIG;
+ val |= FIELD_PREP(LP5569_CP_MODE_MASK, chip->pdata->charge_pump_mode);
+ ret = lp55xx_write(chip, LP5569_REG_MISC, val);
+ if (ret)
+ return ret;
+
+ if (chip->pdata->clock_mode == LP55XX_CLOCK_INT) {
+ /* Internal clock MUST be configured before CLK output */
+ ret = lp55xx_update_bits(chip, LP5569_REG_MISC,
+ LP5569_INTERNAL_CLK,
+ LP5569_INTERNAL_CLK);
+ if (ret)
+ return ret;
+
+ ret = lp55xx_update_bits(chip, LP5569_REG_IO_CONTROL,
+ LP5569_CLK_OUTPUT,
+ LP5569_CLK_OUTPUT);
+ if (ret)
+ return ret;
+ }
+
+ ret = lp55xx_write(chip, LP5569_REG_ENABLE, LP5569_ENABLE);
+ if (ret)
+ return ret;
+
+ read_poll_timeout(lp55xx_read, ret, !(val & LP5569_STARTUP_BUSY),
+ LP5569_STARTUP_SLEEP, LP5569_STARTUP_SLEEP * 10, false,
+ chip, LP5569_REG_STATUS, &val);
+
+ return lp5569_init_program_engine(chip);
+}
+
+static ssize_t lp5569_led_open_test(struct lp55xx_led *led, char *buf)
+{
+ struct lp55xx_chip *chip = led->chip;
+ struct lp55xx_platform_data *pdata = chip->pdata;
+ bool leds_fault[LP5569_MAX_LEDS];
+ struct lp55xx_led *led_tmp = led;
+ int i, ret, pos = 0;
+ u8 status;
+
+ /* Set in STANDBY state */
+ ret = lp55xx_write(chip, LP5569_REG_ENABLE, 0);
+ if (ret)
+ goto exit;
+
+ /* Wait 1ms for device to enter STANDBY state */
+ usleep_range(1000, 2000);
+
+ /* Set Charge Pump to 1.5x */
+ ret = lp55xx_update_bits(chip, LP5569_REG_MISC,
+ FIELD_PREP(LP5569_CP_MODE_MASK, LP55XX_CP_BOOST),
+ LP5569_CP_MODE_MASK);
+ if (ret)
+ goto exit;
+
+ /* Enable LED Open Test */
+ ret = lp55xx_update_bits(chip, LP5569_REG_MISC2, LP5569_LED_OPEN_TEST,
+ LP5569_LED_OPEN_TEST);
+ if (ret)
+ goto exit;
+
+ /* Put Device in NORMAL state */
+ ret = lp55xx_write(chip, LP5569_REG_ENABLE, LP5569_ENABLE);
+ if (ret)
+ goto exit;
+
+ /* Wait 500 us for device to enter NORMAL state */
+ usleep_range(500, 750);
+
+ /* Enable LED and set to 100% brightness */
+ for (i = 0; i < pdata->num_channels; i++) {
+ ret = lp55xx_write(chip, LP5569_REG_LED_PWM_BASE + led_tmp->chan_nr,
+ LED_FULL);
+ if (ret)
+ goto exit;
+
+ led_tmp++;
+ }
+
+ /* Wait 500 us for device to fill status regs */
+ usleep_range(500, 750);
+
+ /* Parse status led fault 1 regs */
+ ret = lp55xx_read(chip, LP5569_REG_LED_FAULT1, &status);
+ if (ret < 0)
+ goto exit;
+
+ for (i = 0; i < 8; i++)
+ leds_fault[i] = !!((status >> i) & 0x1);
+
+ /* Parse status led fault 2 regs */
+ ret = lp55xx_read(chip, LP5569_REG_LED_FAULT2, &status);
+ if (ret < 0)
+ goto exit;
+
+ for (i = 0; i < 1; i++)
+ leds_fault[i + 8] = !!((status >> i) & 0x1);
+
+ /* Report LED fault */
+ led_tmp = led;
+ for (i = 0; i < pdata->num_channels; i++) {
+ if (leds_fault[led_tmp->chan_nr])
+ pos += sysfs_emit_at(buf, pos, "LED %d OPEN FAIL\n",
+ led_tmp->chan_nr);
+
+ led_tmp++;
+ }
+
+ ret = pos;
+
+exit:
+ /* Disable LED Open Test */
+ lp55xx_update_bits(chip, LP5569_REG_MISC2, LP5569_LED_OPEN_TEST, 0);
+
+ led_tmp = led;
+ for (i = 0; i < pdata->num_channels; i++) {
+ lp55xx_write(chip, LP5569_REG_LED_PWM_BASE + led_tmp->chan_nr, 0);
+
+ led_tmp++;
+ }
+
+ return ret;
+}
+
+static ssize_t lp5569_led_short_test(struct lp55xx_led *led, char *buf)
+{
+ struct lp55xx_chip *chip = led->chip;
+ struct lp55xx_platform_data *pdata = chip->pdata;
+ bool leds_fault[LP5569_MAX_LEDS];
+ struct lp55xx_led *led_tmp = led;
+ int i, ret, pos = 0;
+ u8 status;
+
+ /* Set in STANDBY state */
+ ret = lp55xx_write(chip, LP5569_REG_ENABLE, 0);
+ if (ret)
+ goto exit;
+
+ /* Wait 1ms for device to enter STANDBY state */
+ usleep_range(1000, 2000);
+
+ /* Set Charge Pump to 1x */
+ ret = lp55xx_update_bits(chip, LP5569_REG_MISC,
+ FIELD_PREP(LP5569_CP_MODE_MASK, LP55XX_CP_BYPASS),
+ LP5569_CP_MODE_MASK);
+ if (ret)
+ goto exit;
+
+ /* Enable LED and set to 100% brightness and current to 100% (25.5mA) */
+ for (i = 0; i < pdata->num_channels; i++) {
+ ret = lp55xx_write(chip, LP5569_REG_LED_PWM_BASE + led_tmp->chan_nr,
+ LED_FULL);
+ if (ret)
+ goto exit;
+
+ ret = lp55xx_write(chip, LP5569_REG_LED_CURRENT_BASE + led_tmp->chan_nr,
+ LED_FULL);
+ if (ret)
+ goto exit;
+
+ led_tmp++;
+ }
+
+ /* Put Device in NORMAL state */
+ ret = lp55xx_write(chip, LP5569_REG_ENABLE, LP5569_ENABLE);
+ if (ret)
+ goto exit;
+
+ /* Wait 500 us for device to enter NORMAL state */
+ usleep_range(500, 750);
+
+ /* Enable LED Shorted Test */
+ ret = lp55xx_update_bits(chip, LP5569_REG_MISC2, LP5569_LED_OPEN_TEST,
+ LP5569_LED_SHORT_TEST);
+ if (ret)
+ goto exit;
+
+ /* Wait 500 us for device to fill status regs */
+ usleep_range(500, 750);
+
+ /* Parse status led fault 1 regs */
+ ret = lp55xx_read(chip, LP5569_REG_LED_FAULT1, &status);
+ if (ret < 0)
+ goto exit;
+
+ for (i = 0; i < 8; i++)
+ leds_fault[i] = !!LEDn_STATUS_FAULT(i, status);
+
+ /* Parse status led fault 2 regs */
+ ret = lp55xx_read(chip, LP5569_REG_LED_FAULT2, &status);
+ if (ret < 0)
+ goto exit;
+
+ for (i = 0; i < 1; i++)
+ leds_fault[i + 8] = !!LEDn_STATUS_FAULT(i, status);
+
+ /* Report LED fault */
+ led_tmp = led;
+ for (i = 0; i < pdata->num_channels; i++) {
+ if (leds_fault[led_tmp->chan_nr])
+ pos += sysfs_emit_at(buf, pos, "LED %d SHORTED FAIL\n",
+ led_tmp->chan_nr);
+
+ led_tmp++;
+ }
+
+ ret = pos;
+
+exit:
+ /* Disable LED Shorted Test */
+ lp55xx_update_bits(chip, LP5569_REG_MISC2, LP5569_LED_SHORT_TEST, 0);
+
+ led_tmp = led;
+ for (i = 0; i < pdata->num_channels; i++) {
+ lp55xx_write(chip, LP5569_REG_LED_PWM_BASE + led_tmp->chan_nr, 0);
+
+ led_tmp++;
+ }
+
+ return ret;
+}
+
+static ssize_t lp5569_selftest(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct lp55xx_led *led = i2c_get_clientdata(to_i2c_client(dev));
+ struct lp55xx_chip *chip = led->chip;
+ int i, pos = 0;
+
+ guard(mutex)(&chip->lock);
+
+ /* Test LED Open */
+ pos = lp5569_led_open_test(led, buf);
+ if (pos < 0)
+ return sprintf(buf, "FAIL\n");
+
+ /* Test LED Shorted */
+ pos += lp5569_led_short_test(led, buf);
+ if (pos < 0)
+ return sprintf(buf, "FAIL\n");
+
+ for (i = 0; i < chip->pdata->num_channels; i++) {
+ /* Restore current */
+ lp55xx_write(chip, LP5569_REG_LED_CURRENT_BASE + led->chan_nr,
+ led->led_current);
+
+ /* Restore brightness */
+ lp55xx_write(chip, LP5569_REG_LED_PWM_BASE + led->chan_nr,
+ led->brightness);
+ led++;
+ }
+
+ return pos == 0 ? sysfs_emit(buf, "OK\n") : pos;
+}
+
+LP55XX_DEV_ATTR_ENGINE_MODE(1);
+LP55XX_DEV_ATTR_ENGINE_MODE(2);
+LP55XX_DEV_ATTR_ENGINE_MODE(3);
+LP55XX_DEV_ATTR_ENGINE_LEDS(1);
+LP55XX_DEV_ATTR_ENGINE_LEDS(2);
+LP55XX_DEV_ATTR_ENGINE_LEDS(3);
+LP55XX_DEV_ATTR_ENGINE_LOAD(1);
+LP55XX_DEV_ATTR_ENGINE_LOAD(2);
+LP55XX_DEV_ATTR_ENGINE_LOAD(3);
+static LP55XX_DEV_ATTR_RO(selftest, lp5569_selftest);
+LP55XX_DEV_ATTR_MASTER_FADER(1);
+LP55XX_DEV_ATTR_MASTER_FADER(2);
+LP55XX_DEV_ATTR_MASTER_FADER(3);
+static LP55XX_DEV_ATTR_RW(master_fader_leds, lp55xx_show_master_fader_leds,
+ lp55xx_store_master_fader_leds);
+
+static struct attribute *lp5569_attributes[] = {
+ &dev_attr_engine1_mode.attr,
+ &dev_attr_engine2_mode.attr,
+ &dev_attr_engine3_mode.attr,
+ &dev_attr_engine1_load.attr,
+ &dev_attr_engine2_load.attr,
+ &dev_attr_engine3_load.attr,
+ &dev_attr_engine1_leds.attr,
+ &dev_attr_engine2_leds.attr,
+ &dev_attr_engine3_leds.attr,
+ &dev_attr_selftest.attr,
+ &dev_attr_master_fader1.attr,
+ &dev_attr_master_fader2.attr,
+ &dev_attr_master_fader3.attr,
+ &dev_attr_master_fader_leds.attr,
+ NULL,
+};
+
+static const struct attribute_group lp5569_group = {
+ .attrs = lp5569_attributes,
+};
+
+/* Chip specific configurations */
+static struct lp55xx_device_config lp5569_cfg = {
+ .reg_op_mode = {
+ .addr = LP5569_REG_OP_MODE,
+ .shift = LP5569_MODE_ENG_SHIFT,
+ },
+ .reg_exec = {
+ .addr = LP5569_REG_EXEC_CTRL,
+ .shift = LP5569_EXEC_ENG_SHIFT,
+ },
+ .reset = {
+ .addr = LP5569_REG_RESET,
+ .val = LP5569_RESET,
+ },
+ .enable = {
+ .addr = LP5569_REG_ENABLE,
+ .val = LP5569_ENABLE,
+ },
+ .prog_mem_base = {
+ .addr = LP5569_REG_PROG_MEM,
+ },
+ .reg_led_pwm_base = {
+ .addr = LP5569_REG_LED_PWM_BASE,
+ },
+ .reg_led_current_base = {
+ .addr = LP5569_REG_LED_CURRENT_BASE,
+ },
+ .reg_master_fader_base = {
+ .addr = LP5569_REG_MASTER_FADER_BASE,
+ },
+ .reg_led_ctrl_base = {
+ .addr = LP5569_REG_LED_CTRL_BASE,
+ },
+ .pages_per_engine = LP5569_PAGES_PER_ENGINE,
+ .max_channel = LP5569_MAX_LEDS,
+ .post_init_device = lp5569_post_init_device,
+ .brightness_fn = lp55xx_led_brightness,
+ .multicolor_brightness_fn = lp55xx_multicolor_brightness,
+ .set_led_current = lp55xx_set_led_current,
+ .firmware_cb = lp55xx_firmware_loaded_cb,
+ .run_engine = lp5569_run_engine,
+ .dev_attr_group = &lp5569_group,
+};
+
+static const struct i2c_device_id lp5569_id[] = {
+ { "lp5569", .driver_data = (kernel_ulong_t)&lp5569_cfg, },
+ { }
+};
+
+MODULE_DEVICE_TABLE(i2c, lp5569_id);
+
+static const struct of_device_id of_lp5569_leds_match[] = {
+ { .compatible = "ti,lp5569", .data = &lp5569_cfg, },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, of_lp5569_leds_match);
+
+static struct i2c_driver lp5569_driver = {
+ .driver = {
+ .name = "lp5569",
+ .of_match_table = of_lp5569_leds_match,
+ },
+ .probe = lp55xx_probe,
+ .remove = lp55xx_remove,
+ .id_table = lp5569_id,
+};
+
+module_i2c_driver(lp5569_driver);
+
+MODULE_AUTHOR("Christian Marangi <ansuelsmth@gmail.com>");
+MODULE_DESCRIPTION("LP5569 LED engine");
+MODULE_LICENSE("GPL");
diff --git a/drivers/leds/leds-lp55xx-common.c b/drivers/leds/leds-lp55xx-common.c
index 8e7074f0fee0..29e7142dca72 100644
--- a/drivers/leds/leds-lp55xx-common.c
+++ b/drivers/leds/leds-lp55xx-common.c
@@ -9,10 +9,13 @@
* Derived from leds-lp5521.c, leds-lp5523.c
*/
+#include <linux/bitfield.h>
+#include <linux/cleanup.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/firmware.h>
#include <linux/i2c.h>
+#include <linux/iopoll.h>
#include <linux/leds.h>
#include <linux/module.h>
#include <linux/platform_data/leds-lp55xx.h>
@@ -22,6 +25,50 @@
#include "leds-lp55xx-common.h"
+/* OP MODE require at least 153 us to clear regs */
+#define LP55XX_CMD_SLEEP 200
+
+#define LP55xx_PROGRAM_PAGES 16
+#define LP55xx_MAX_PROGRAM_LENGTH (LP55xx_BYTES_PER_PAGE * 4) /* 128 bytes (4 pages) */
+
+/*
+ * Program Memory Operations
+ * Same Mask for each engine for both mode and exec
+ * ENG1 GENMASK(3, 2)
+ * ENG2 GENMASK(5, 4)
+ * ENG3 GENMASK(7, 6)
+ */
+#define LP55xx_MODE_DISABLE_ALL_ENG 0x0
+#define LP55xx_MODE_ENG_MASK GENMASK(1, 0)
+#define LP55xx_MODE_DISABLE_ENG FIELD_PREP_CONST(LP55xx_MODE_ENG_MASK, 0x0)
+#define LP55xx_MODE_LOAD_ENG FIELD_PREP_CONST(LP55xx_MODE_ENG_MASK, 0x1)
+#define LP55xx_MODE_RUN_ENG FIELD_PREP_CONST(LP55xx_MODE_ENG_MASK, 0x2)
+#define LP55xx_MODE_HALT_ENG FIELD_PREP_CONST(LP55xx_MODE_ENG_MASK, 0x3)
+
+#define LP55xx_MODE_ENGn_SHIFT(n, shift) ((shift) + (2 * (3 - (n))))
+#define LP55xx_MODE_ENGn_MASK(n, shift) (LP55xx_MODE_ENG_MASK << LP55xx_MODE_ENGn_SHIFT(n, shift))
+#define LP55xx_MODE_ENGn_GET(n, mode, shift) \
+ (((mode) >> LP55xx_MODE_ENGn_SHIFT(n, shift)) & LP55xx_MODE_ENG_MASK)
+
+#define LP55xx_EXEC_ENG_MASK GENMASK(1, 0)
+#define LP55xx_EXEC_HOLD_ENG FIELD_PREP_CONST(LP55xx_EXEC_ENG_MASK, 0x0)
+#define LP55xx_EXEC_STEP_ENG FIELD_PREP_CONST(LP55xx_EXEC_ENG_MASK, 0x1)
+#define LP55xx_EXEC_RUN_ENG FIELD_PREP_CONST(LP55xx_EXEC_ENG_MASK, 0x2)
+#define LP55xx_EXEC_ONCE_ENG FIELD_PREP_CONST(LP55xx_EXEC_ENG_MASK, 0x3)
+
+#define LP55xx_EXEC_ENGn_SHIFT(n, shift) ((shift) + (2 * (3 - (n))))
+#define LP55xx_EXEC_ENGn_MASK(n, shift) (LP55xx_EXEC_ENG_MASK << LP55xx_EXEC_ENGn_SHIFT(n, shift))
+
+/* Memory Page Selection */
+#define LP55xx_REG_PROG_PAGE_SEL 0x4f
+/* If supported, each ENGINE have an equal amount of pages offset from page 0 */
+#define LP55xx_PAGE_OFFSET(n, pages) (((n) - 1) * (pages))
+
+#define LED_ACTIVE(mux, led) (!!((mux) & (0x0001 << (led))))
+
+/* MASTER FADER common property */
+#define LP55xx_FADER_MAPPING_MASK GENMASK(7, 6)
+
/* External clock rate */
#define LP55XX_CLK_32K 32768
@@ -40,9 +87,259 @@ static struct lp55xx_led *mcled_cdev_to_led(struct led_classdev_mc *mc_cdev)
return container_of(mc_cdev, struct lp55xx_led, mc_cdev);
}
+static void lp55xx_wait_opmode_done(struct lp55xx_chip *chip)
+{
+ const struct lp55xx_device_config *cfg = chip->cfg;
+ int __always_unused ret;
+ u8 val;
+
+ /*
+ * Recent chip supports BUSY bit for engine.
+ * Check support by checking if val is not 0.
+ * For legacy device, sleep at least 153 us.
+ */
+ if (cfg->engine_busy.val) {
+ read_poll_timeout(lp55xx_read, ret, !(val & cfg->engine_busy.mask),
+ LP55XX_CMD_SLEEP, LP55XX_CMD_SLEEP * 10, false,
+ chip, cfg->engine_busy.addr, &val);
+ } else {
+ usleep_range(LP55XX_CMD_SLEEP, LP55XX_CMD_SLEEP * 2);
+ }
+}
+
+void lp55xx_stop_all_engine(struct lp55xx_chip *chip)
+{
+ const struct lp55xx_device_config *cfg = chip->cfg;
+
+ lp55xx_write(chip, cfg->reg_op_mode.addr, LP55xx_MODE_DISABLE_ALL_ENG);
+ lp55xx_wait_opmode_done(chip);
+}
+EXPORT_SYMBOL_GPL(lp55xx_stop_all_engine);
+
+void lp55xx_load_engine(struct lp55xx_chip *chip)
+{
+ enum lp55xx_engine_index idx = chip->engine_idx;
+ const struct lp55xx_device_config *cfg = chip->cfg;
+ u8 mask, val;
+
+ mask = LP55xx_MODE_ENGn_MASK(idx, cfg->reg_op_mode.shift);
+ val = LP55xx_MODE_LOAD_ENG << LP55xx_MODE_ENGn_SHIFT(idx, cfg->reg_op_mode.shift);
+
+ lp55xx_update_bits(chip, cfg->reg_op_mode.addr, mask, val);
+ lp55xx_wait_opmode_done(chip);
+
+ /* Setup PAGE if supported (pages_per_engine not 0)*/
+ if (cfg->pages_per_engine)
+ lp55xx_write(chip, LP55xx_REG_PROG_PAGE_SEL,
+ LP55xx_PAGE_OFFSET(idx, cfg->pages_per_engine));
+}
+EXPORT_SYMBOL_GPL(lp55xx_load_engine);
+
+int lp55xx_run_engine_common(struct lp55xx_chip *chip)
+{
+ const struct lp55xx_device_config *cfg = chip->cfg;
+ u8 mode, exec;
+ int i, ret;
+
+ /* To run the engine, both OP MODE and EXEC needs to be put in RUN mode */
+ ret = lp55xx_read(chip, cfg->reg_op_mode.addr, &mode);
+ if (ret)
+ return ret;
+
+ ret = lp55xx_read(chip, cfg->reg_exec.addr, &exec);
+ if (ret)
+ return ret;
+
+ /* Switch to RUN only for engine that were put in LOAD previously */
+ for (i = LP55XX_ENGINE_1; i <= LP55XX_ENGINE_3; i++) {
+ if (LP55xx_MODE_ENGn_GET(i, mode, cfg->reg_op_mode.shift) != LP55xx_MODE_LOAD_ENG)
+ continue;
+
+ mode &= ~LP55xx_MODE_ENGn_MASK(i, cfg->reg_op_mode.shift);
+ mode |= LP55xx_MODE_RUN_ENG << LP55xx_MODE_ENGn_SHIFT(i, cfg->reg_op_mode.shift);
+ exec &= ~LP55xx_EXEC_ENGn_MASK(i, cfg->reg_exec.shift);
+ exec |= LP55xx_EXEC_RUN_ENG << LP55xx_EXEC_ENGn_SHIFT(i, cfg->reg_exec.shift);
+ }
+
+ lp55xx_write(chip, cfg->reg_op_mode.addr, mode);
+ lp55xx_wait_opmode_done(chip);
+ lp55xx_write(chip, cfg->reg_exec.addr, exec);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(lp55xx_run_engine_common);
+
+int lp55xx_update_program_memory(struct lp55xx_chip *chip,
+ const u8 *data, size_t size)
+{
+ enum lp55xx_engine_index idx = chip->engine_idx;
+ const struct lp55xx_device_config *cfg = chip->cfg;
+ u8 pattern[LP55xx_MAX_PROGRAM_LENGTH] = { };
+ u8 start_addr = cfg->prog_mem_base.addr;
+ int page, i = 0, offset = 0;
+ int program_length, ret;
+
+ program_length = LP55xx_BYTES_PER_PAGE;
+ if (cfg->pages_per_engine)
+ program_length *= cfg->pages_per_engine;
+
+ while ((offset < size - 1) && (i < program_length)) {
+ unsigned int cmd;
+ int nrchars;
+ char c[3];
+
+ /* separate sscanfs because length is working only for %s */
+ ret = sscanf(data + offset, "%2s%n ", c, &nrchars);
+ if (ret != 1)
+ goto err;
+
+ ret = sscanf(c, "%2x", &cmd);
+ if (ret != 1)
+ goto err;
+
+ pattern[i] = (u8)cmd;
+ offset += nrchars;
+ i++;
+ }
+
+ /* Each instruction is 16bit long. Check that length is even */
+ if (i % 2)
+ goto err;
+
+ /*
+ * For legacy LED chip with no page support, engine base address are
+ * one after another at offset of 32.
+ * For LED chip that support page, PAGE is already set in load_engine.
+ */
+ if (!cfg->pages_per_engine)
+ start_addr += LP55xx_BYTES_PER_PAGE * idx;
+
+ for (page = 0; page < program_length / LP55xx_BYTES_PER_PAGE; page++) {
+ /* Write to the next page each 32 bytes (if supported) */
+ if (cfg->pages_per_engine)
+ lp55xx_write(chip, LP55xx_REG_PROG_PAGE_SEL,
+ LP55xx_PAGE_OFFSET(idx, cfg->pages_per_engine) + page);
+
+ for (i = 0; i < LP55xx_BYTES_PER_PAGE; i++) {
+ ret = lp55xx_write(chip, start_addr + i,
+ pattern[i + (page * LP55xx_BYTES_PER_PAGE)]);
+ if (ret)
+ return -EINVAL;
+ }
+ }
+
+ return size;
+
+err:
+ dev_err(&chip->cl->dev, "wrong pattern format\n");
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(lp55xx_update_program_memory);
+
+void lp55xx_firmware_loaded_cb(struct lp55xx_chip *chip)
+{
+ const struct lp55xx_device_config *cfg = chip->cfg;
+ const struct firmware *fw = chip->fw;
+ int program_length;
+
+ program_length = LP55xx_BYTES_PER_PAGE;
+ if (cfg->pages_per_engine)
+ program_length *= cfg->pages_per_engine;
+
+ /*
+ * the firmware is encoded in ascii hex character, with 2 chars
+ * per byte
+ */
+ if (fw->size > program_length * 2) {
+ dev_err(&chip->cl->dev, "firmware data size overflow: %zu\n",
+ fw->size);
+ return;
+ }
+
+ /*
+ * Program memory sequence
+ * 1) set engine mode to "LOAD"
+ * 2) write firmware data into program memory
+ */
+
+ lp55xx_load_engine(chip);
+ lp55xx_update_program_memory(chip, fw->data, fw->size);
+}
+EXPORT_SYMBOL_GPL(lp55xx_firmware_loaded_cb);
+
+int lp55xx_led_brightness(struct lp55xx_led *led)
+{
+ struct lp55xx_chip *chip = led->chip;
+ const struct lp55xx_device_config *cfg = chip->cfg;
+ int ret;
+
+ guard(mutex)(&chip->lock);
+
+ ret = lp55xx_write(chip, cfg->reg_led_pwm_base.addr + led->chan_nr,
+ led->brightness);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(lp55xx_led_brightness);
+
+int lp55xx_multicolor_brightness(struct lp55xx_led *led)
+{
+ struct lp55xx_chip *chip = led->chip;
+ const struct lp55xx_device_config *cfg = chip->cfg;
+ int ret;
+ int i;
+
+ guard(mutex)(&chip->lock);
+
+ for (i = 0; i < led->mc_cdev.num_colors; i++) {
+ ret = lp55xx_write(chip,
+ cfg->reg_led_pwm_base.addr +
+ led->mc_cdev.subled_info[i].channel,
+ led->mc_cdev.subled_info[i].brightness);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(lp55xx_multicolor_brightness);
+
+void lp55xx_set_led_current(struct lp55xx_led *led, u8 led_current)
+{
+ struct lp55xx_chip *chip = led->chip;
+ const struct lp55xx_device_config *cfg = chip->cfg;
+
+ led->led_current = led_current;
+ lp55xx_write(led->chip, cfg->reg_led_current_base.addr + led->chan_nr,
+ led_current);
+}
+EXPORT_SYMBOL_GPL(lp55xx_set_led_current);
+
+void lp55xx_turn_off_channels(struct lp55xx_chip *chip)
+{
+ const struct lp55xx_device_config *cfg = chip->cfg;
+ int i;
+
+ for (i = 0; i < cfg->max_channel; i++)
+ lp55xx_write(chip, cfg->reg_led_pwm_base.addr + i, 0);
+}
+EXPORT_SYMBOL_GPL(lp55xx_turn_off_channels);
+
+void lp55xx_stop_engine(struct lp55xx_chip *chip)
+{
+ enum lp55xx_engine_index idx = chip->engine_idx;
+ const struct lp55xx_device_config *cfg = chip->cfg;
+ u8 mask;
+
+ mask = LP55xx_MODE_ENGn_MASK(idx, cfg->reg_op_mode.shift);
+ lp55xx_update_bits(chip, cfg->reg_op_mode.addr, mask, 0);
+
+ lp55xx_wait_opmode_done(chip);
+}
+EXPORT_SYMBOL_GPL(lp55xx_stop_engine);
+
static void lp55xx_reset_device(struct lp55xx_chip *chip)
{
- struct lp55xx_device_config *cfg = chip->cfg;
+ const struct lp55xx_device_config *cfg = chip->cfg;
u8 addr = cfg->reset.addr;
u8 val = cfg->reset.val;
@@ -52,7 +349,7 @@ static void lp55xx_reset_device(struct lp55xx_chip *chip)
static int lp55xx_detect_device(struct lp55xx_chip *chip)
{
- struct lp55xx_device_config *cfg = chip->cfg;
+ const struct lp55xx_device_config *cfg = chip->cfg;
u8 addr = cfg->enable.addr;
u8 val = cfg->enable.val;
int ret;
@@ -75,7 +372,7 @@ static int lp55xx_detect_device(struct lp55xx_chip *chip)
static int lp55xx_post_init_device(struct lp55xx_chip *chip)
{
- struct lp55xx_device_config *cfg = chip->cfg;
+ const struct lp55xx_device_config *cfg = chip->cfg;
if (!cfg->post_init_device)
return 0;
@@ -109,9 +406,9 @@ static ssize_t led_current_store(struct device *dev,
if (!chip->cfg->set_led_current)
return len;
- mutex_lock(&chip->lock);
+ guard(mutex)(&chip->lock);
+
chip->cfg->set_led_current(led, (u8)curr);
- mutex_unlock(&chip->lock);
return len;
}
@@ -140,7 +437,7 @@ static int lp55xx_set_mc_brightness(struct led_classdev *cdev,
{
struct led_classdev_mc *mc_dev = lcdev_to_mccdev(cdev);
struct lp55xx_led *led = mcled_cdev_to_led(mc_dev);
- struct lp55xx_device_config *cfg = led->chip->cfg;
+ const struct lp55xx_device_config *cfg = led->chip->cfg;
led_mc_calc_color_components(&led->mc_cdev, brightness);
return cfg->multicolor_brightness_fn(led);
@@ -151,7 +448,7 @@ static int lp55xx_set_brightness(struct led_classdev *cdev,
enum led_brightness brightness)
{
struct lp55xx_led *led = cdev_to_lp55xx_led(cdev);
- struct lp55xx_device_config *cfg = led->chip->cfg;
+ const struct lp55xx_device_config *cfg = led->chip->cfg;
led->brightness = (u8)brightness;
return cfg->brightness_fn(led);
@@ -161,7 +458,7 @@ static int lp55xx_init_led(struct lp55xx_led *led,
struct lp55xx_chip *chip, int chan)
{
struct lp55xx_platform_data *pdata = chip->pdata;
- struct lp55xx_device_config *cfg = chip->cfg;
+ const struct lp55xx_device_config *cfg = chip->cfg;
struct device *dev = &chip->cl->dev;
int max_channel = cfg->max_channel;
struct mc_subled *mc_led_info;
@@ -246,14 +543,12 @@ static void lp55xx_firmware_loaded(const struct firmware *fw, void *context)
}
/* handling firmware data is chip dependent */
- mutex_lock(&chip->lock);
-
- chip->engines[idx - 1].mode = LP55XX_ENGINE_LOAD;
- chip->fw = fw;
- if (chip->cfg->firmware_cb)
- chip->cfg->firmware_cb(chip);
-
- mutex_unlock(&chip->lock);
+ scoped_guard(mutex, &chip->lock) {
+ chip->engines[idx - 1].mode = LP55XX_ENGINE_LOAD;
+ chip->fw = fw;
+ if (chip->cfg->firmware_cb)
+ chip->cfg->firmware_cb(chip);
+ }
/* firmware should be released for other channel use */
release_firmware(chip->fw);
@@ -270,8 +565,8 @@ static int lp55xx_request_firmware(struct lp55xx_chip *chip)
}
static ssize_t select_engine_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct lp55xx_led *led = i2c_get_clientdata(to_i2c_client(dev));
struct lp55xx_chip *chip = led->chip;
@@ -280,8 +575,8 @@ static ssize_t select_engine_show(struct device *dev,
}
static ssize_t select_engine_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
+ struct device_attribute *attr,
+ const char *buf, size_t len)
{
struct lp55xx_led *led = i2c_get_clientdata(to_i2c_client(dev));
struct lp55xx_chip *chip = led->chip;
@@ -297,10 +592,10 @@ static ssize_t select_engine_store(struct device *dev,
case LP55XX_ENGINE_1:
case LP55XX_ENGINE_2:
case LP55XX_ENGINE_3:
- mutex_lock(&chip->lock);
- chip->engine_idx = val;
- ret = lp55xx_request_firmware(chip);
- mutex_unlock(&chip->lock);
+ scoped_guard(mutex, &chip->lock) {
+ chip->engine_idx = val;
+ ret = lp55xx_request_firmware(chip);
+ }
break;
default:
dev_err(dev, "%lu: invalid engine index. (1, 2, 3)\n", val);
@@ -322,8 +617,8 @@ static inline void lp55xx_run_engine(struct lp55xx_chip *chip, bool start)
}
static ssize_t run_engine_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
+ struct device_attribute *attr,
+ const char *buf, size_t len)
{
struct lp55xx_led *led = i2c_get_clientdata(to_i2c_client(dev));
struct lp55xx_chip *chip = led->chip;
@@ -339,9 +634,9 @@ static ssize_t run_engine_store(struct device *dev,
return len;
}
- mutex_lock(&chip->lock);
+ guard(mutex)(&chip->lock);
+
lp55xx_run_engine(chip, true);
- mutex_unlock(&chip->lock);
return len;
}
@@ -349,6 +644,279 @@ static ssize_t run_engine_store(struct device *dev,
static DEVICE_ATTR_RW(select_engine);
static DEVICE_ATTR_WO(run_engine);
+ssize_t lp55xx_show_engine_mode(struct device *dev,
+ struct device_attribute *attr,
+ char *buf, int nr)
+{
+ struct lp55xx_led *led = i2c_get_clientdata(to_i2c_client(dev));
+ struct lp55xx_chip *chip = led->chip;
+ enum lp55xx_engine_mode mode = chip->engines[nr - 1].mode;
+
+ switch (mode) {
+ case LP55XX_ENGINE_RUN:
+ return sysfs_emit(buf, "run\n");
+ case LP55XX_ENGINE_LOAD:
+ return sysfs_emit(buf, "load\n");
+ case LP55XX_ENGINE_DISABLED:
+ default:
+ return sysfs_emit(buf, "disabled\n");
+ }
+}
+EXPORT_SYMBOL_GPL(lp55xx_show_engine_mode);
+
+ssize_t lp55xx_store_engine_mode(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len, int nr)
+{
+ struct lp55xx_led *led = i2c_get_clientdata(to_i2c_client(dev));
+ struct lp55xx_chip *chip = led->chip;
+ const struct lp55xx_device_config *cfg = chip->cfg;
+ struct lp55xx_engine *engine = &chip->engines[nr - 1];
+
+ guard(mutex)(&chip->lock);
+
+ chip->engine_idx = nr;
+
+ if (!strncmp(buf, "run", 3)) {
+ cfg->run_engine(chip, true);
+ engine->mode = LP55XX_ENGINE_RUN;
+ } else if (!strncmp(buf, "load", 4)) {
+ lp55xx_stop_engine(chip);
+ lp55xx_load_engine(chip);
+ engine->mode = LP55XX_ENGINE_LOAD;
+ } else if (!strncmp(buf, "disabled", 8)) {
+ lp55xx_stop_engine(chip);
+ engine->mode = LP55XX_ENGINE_DISABLED;
+ }
+
+ return len;
+}
+EXPORT_SYMBOL_GPL(lp55xx_store_engine_mode);
+
+ssize_t lp55xx_store_engine_load(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len, int nr)
+{
+ struct lp55xx_led *led = i2c_get_clientdata(to_i2c_client(dev));
+ struct lp55xx_chip *chip = led->chip;
+ int ret;
+
+ guard(mutex)(&chip->lock);
+
+ chip->engine_idx = nr;
+ lp55xx_load_engine(chip);
+ ret = lp55xx_update_program_memory(chip, buf, len);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(lp55xx_store_engine_load);
+
+static int lp55xx_mux_parse(struct lp55xx_chip *chip, const char *buf,
+ u16 *mux, size_t len)
+{
+ const struct lp55xx_device_config *cfg = chip->cfg;
+ u16 tmp_mux = 0;
+ int i;
+
+ len = min_t(int, len, cfg->max_channel);
+
+ for (i = 0; i < len; i++) {
+ switch (buf[i]) {
+ case '1':
+ tmp_mux |= (1 << i);
+ break;
+ case '0':
+ break;
+ case '\n':
+ i = len;
+ break;
+ default:
+ return -1;
+ }
+ }
+ *mux = tmp_mux;
+
+ return 0;
+}
+
+ssize_t lp55xx_show_engine_leds(struct device *dev,
+ struct device_attribute *attr,
+ char *buf, int nr)
+{
+ struct lp55xx_led *led = i2c_get_clientdata(to_i2c_client(dev));
+ struct lp55xx_chip *chip = led->chip;
+ const struct lp55xx_device_config *cfg = chip->cfg;
+ unsigned int led_active;
+ int i, pos = 0;
+
+ for (i = 0; i < cfg->max_channel; i++) {
+ led_active = LED_ACTIVE(chip->engines[nr - 1].led_mux, i);
+ pos += sysfs_emit_at(buf, pos, "%x", led_active);
+ }
+
+ pos += sysfs_emit_at(buf, pos, "\n");
+
+ return pos;
+}
+EXPORT_SYMBOL_GPL(lp55xx_show_engine_leds);
+
+static int lp55xx_load_mux(struct lp55xx_chip *chip, u16 mux, int nr)
+{
+ struct lp55xx_engine *engine = &chip->engines[nr - 1];
+ const struct lp55xx_device_config *cfg = chip->cfg;
+ u8 mux_page;
+ int ret;
+
+ lp55xx_load_engine(chip);
+
+ /* Derive the MUX page offset by starting at the end of the ENGINE pages */
+ mux_page = cfg->pages_per_engine * LP55XX_ENGINE_MAX + (nr - 1);
+ ret = lp55xx_write(chip, LP55xx_REG_PROG_PAGE_SEL, mux_page);
+ if (ret)
+ return ret;
+
+ ret = lp55xx_write(chip, cfg->prog_mem_base.addr, (u8)(mux >> 8));
+ if (ret)
+ return ret;
+
+ ret = lp55xx_write(chip, cfg->prog_mem_base.addr + 1, (u8)(mux));
+ if (ret)
+ return ret;
+
+ engine->led_mux = mux;
+ return 0;
+}
+
+ssize_t lp55xx_store_engine_leds(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len, int nr)
+{
+ struct lp55xx_led *led = i2c_get_clientdata(to_i2c_client(dev));
+ struct lp55xx_chip *chip = led->chip;
+ struct lp55xx_engine *engine = &chip->engines[nr - 1];
+ u16 mux = 0;
+
+ if (lp55xx_mux_parse(chip, buf, &mux, len))
+ return -EINVAL;
+
+ guard(mutex)(&chip->lock);
+
+ chip->engine_idx = nr;
+
+ if (engine->mode != LP55XX_ENGINE_LOAD)
+ return -EINVAL;
+
+ if (lp55xx_load_mux(chip, mux, nr))
+ return -EINVAL;
+
+ return len;
+}
+EXPORT_SYMBOL_GPL(lp55xx_store_engine_leds);
+
+ssize_t lp55xx_show_master_fader(struct device *dev,
+ struct device_attribute *attr,
+ char *buf, int nr)
+{
+ struct lp55xx_led *led = i2c_get_clientdata(to_i2c_client(dev));
+ struct lp55xx_chip *chip = led->chip;
+ const struct lp55xx_device_config *cfg = chip->cfg;
+ int ret;
+ u8 val;
+
+ guard(mutex)(&chip->lock);
+
+ ret = lp55xx_read(chip, cfg->reg_master_fader_base.addr + nr - 1, &val);
+
+ return ret ? ret : sysfs_emit(buf, "%u\n", val);
+}
+EXPORT_SYMBOL_GPL(lp55xx_show_master_fader);
+
+ssize_t lp55xx_store_master_fader(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len, int nr)
+{
+ struct lp55xx_led *led = i2c_get_clientdata(to_i2c_client(dev));
+ struct lp55xx_chip *chip = led->chip;
+ const struct lp55xx_device_config *cfg = chip->cfg;
+ int ret;
+ unsigned long val;
+
+ if (kstrtoul(buf, 0, &val))
+ return -EINVAL;
+
+ if (val > 0xff)
+ return -EINVAL;
+
+ guard(mutex)(&chip->lock);
+
+ ret = lp55xx_write(chip, cfg->reg_master_fader_base.addr + nr - 1,
+ (u8)val);
+
+ return ret ? ret : len;
+}
+EXPORT_SYMBOL_GPL(lp55xx_store_master_fader);
+
+ssize_t lp55xx_show_master_fader_leds(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct lp55xx_led *led = i2c_get_clientdata(to_i2c_client(dev));
+ struct lp55xx_chip *chip = led->chip;
+ const struct lp55xx_device_config *cfg = chip->cfg;
+ int i, ret, pos = 0;
+ u8 val;
+
+ guard(mutex)(&chip->lock);
+
+ for (i = 0; i < cfg->max_channel; i++) {
+ ret = lp55xx_read(chip, cfg->reg_led_ctrl_base.addr + i, &val);
+ if (ret)
+ return ret;
+
+ val = FIELD_GET(LP55xx_FADER_MAPPING_MASK, val);
+ if (val > FIELD_MAX(LP55xx_FADER_MAPPING_MASK)) {
+ return -EINVAL;
+ }
+ buf[pos++] = val + '0';
+ }
+ buf[pos++] = '\n';
+
+ return pos;
+}
+EXPORT_SYMBOL_GPL(lp55xx_show_master_fader_leds);
+
+ssize_t lp55xx_store_master_fader_leds(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct lp55xx_led *led = i2c_get_clientdata(to_i2c_client(dev));
+ struct lp55xx_chip *chip = led->chip;
+ const struct lp55xx_device_config *cfg = chip->cfg;
+ int i, n, ret;
+ u8 val;
+
+ n = min_t(int, len, cfg->max_channel);
+
+ guard(mutex)(&chip->lock);
+
+ for (i = 0; i < n; i++) {
+ if (buf[i] >= '0' && buf[i] <= '3') {
+ val = (buf[i] - '0') << __bf_shf(LP55xx_FADER_MAPPING_MASK);
+ ret = lp55xx_update_bits(chip,
+ cfg->reg_led_ctrl_base.addr + i,
+ LP55xx_FADER_MAPPING_MASK,
+ val);
+ if (ret)
+ return ret;
+ } else {
+ return -EINVAL;
+ }
+ }
+
+ return len;
+}
+EXPORT_SYMBOL_GPL(lp55xx_store_master_fader_leds);
+
static struct attribute *lp55xx_engine_attributes[] = {
&dev_attr_select_engine.attr,
&dev_attr_run_engine.attr,
@@ -423,10 +991,21 @@ use_internal_clk:
}
EXPORT_SYMBOL_GPL(lp55xx_is_extclk_used);
-int lp55xx_init_device(struct lp55xx_chip *chip)
+static void lp55xx_deinit_device(struct lp55xx_chip *chip)
+{
+ struct lp55xx_platform_data *pdata = chip->pdata;
+
+ if (chip->clk)
+ clk_disable_unprepare(chip->clk);
+
+ if (pdata->enable_gpiod)
+ gpiod_set_value(pdata->enable_gpiod, 0);
+}
+
+static int lp55xx_init_device(struct lp55xx_chip *chip)
{
struct lp55xx_platform_data *pdata;
- struct lp55xx_device_config *cfg;
+ const struct lp55xx_device_config *cfg;
struct device *dev = &chip->cl->dev;
int ret = 0;
@@ -476,24 +1055,11 @@ err_post_init:
err:
return ret;
}
-EXPORT_SYMBOL_GPL(lp55xx_init_device);
-void lp55xx_deinit_device(struct lp55xx_chip *chip)
+static int lp55xx_register_leds(struct lp55xx_led *led, struct lp55xx_chip *chip)
{
struct lp55xx_platform_data *pdata = chip->pdata;
-
- if (chip->clk)
- clk_disable_unprepare(chip->clk);
-
- if (pdata->enable_gpiod)
- gpiod_set_value(pdata->enable_gpiod, 0);
-}
-EXPORT_SYMBOL_GPL(lp55xx_deinit_device);
-
-int lp55xx_register_leds(struct lp55xx_led *led, struct lp55xx_chip *chip)
-{
- struct lp55xx_platform_data *pdata = chip->pdata;
- struct lp55xx_device_config *cfg = chip->cfg;
+ const struct lp55xx_device_config *cfg = chip->cfg;
int num_channels = pdata->num_channels;
struct lp55xx_led *each;
u8 led_current;
@@ -530,12 +1096,11 @@ int lp55xx_register_leds(struct lp55xx_led *led, struct lp55xx_chip *chip)
err_init_led:
return ret;
}
-EXPORT_SYMBOL_GPL(lp55xx_register_leds);
-int lp55xx_register_sysfs(struct lp55xx_chip *chip)
+static int lp55xx_register_sysfs(struct lp55xx_chip *chip)
{
struct device *dev = &chip->cl->dev;
- struct lp55xx_device_config *cfg = chip->cfg;
+ const struct lp55xx_device_config *cfg = chip->cfg;
int ret;
if (!cfg->run_engine || !cfg->firmware_cb)
@@ -549,19 +1114,17 @@ dev_specific_attrs:
return cfg->dev_attr_group ?
sysfs_create_group(&dev->kobj, cfg->dev_attr_group) : 0;
}
-EXPORT_SYMBOL_GPL(lp55xx_register_sysfs);
-void lp55xx_unregister_sysfs(struct lp55xx_chip *chip)
+static void lp55xx_unregister_sysfs(struct lp55xx_chip *chip)
{
struct device *dev = &chip->cl->dev;
- struct lp55xx_device_config *cfg = chip->cfg;
+ const struct lp55xx_device_config *cfg = chip->cfg;
if (cfg->dev_attr_group)
sysfs_remove_group(&dev->kobj, cfg->dev_attr_group);
sysfs_remove_group(&dev->kobj, &lp55xx_engine_attr_group);
}
-EXPORT_SYMBOL_GPL(lp55xx_unregister_sysfs);
static int lp55xx_parse_common_child(struct device_node *np,
struct lp55xx_led_config *cfg,
@@ -654,9 +1217,9 @@ static int lp55xx_parse_logical_led(struct device_node *np,
return ret;
}
-struct lp55xx_platform_data *lp55xx_of_populate_pdata(struct device *dev,
- struct device_node *np,
- struct lp55xx_chip *chip)
+static struct lp55xx_platform_data *lp55xx_of_populate_pdata(struct device *dev,
+ struct device_node *np,
+ struct lp55xx_chip *chip)
{
struct device_node *child;
struct lp55xx_platform_data *pdata;
@@ -713,7 +1276,92 @@ struct lp55xx_platform_data *lp55xx_of_populate_pdata(struct device *dev,
return pdata;
}
-EXPORT_SYMBOL_GPL(lp55xx_of_populate_pdata);
+
+int lp55xx_probe(struct i2c_client *client)
+{
+ const struct i2c_device_id *id = i2c_client_get_device_id(client);
+ int program_length, ret;
+ struct lp55xx_chip *chip;
+ struct lp55xx_led *led;
+ struct lp55xx_platform_data *pdata = dev_get_platdata(&client->dev);
+ struct device_node *np = dev_of_node(&client->dev);
+
+ chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ chip->cfg = i2c_get_match_data(client);
+
+ if (!pdata) {
+ if (np) {
+ pdata = lp55xx_of_populate_pdata(&client->dev, np,
+ chip);
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
+ } else {
+ dev_err(&client->dev, "no platform data\n");
+ return -EINVAL;
+ }
+ }
+
+ /* Validate max program page */
+ program_length = LP55xx_BYTES_PER_PAGE;
+ if (chip->cfg->pages_per_engine)
+ program_length *= chip->cfg->pages_per_engine;
+
+ /* support a max of 128bytes */
+ if (program_length > LP55xx_MAX_PROGRAM_LENGTH) {
+ dev_err(&client->dev, "invalid pages_per_engine configured\n");
+ return -EINVAL;
+ }
+
+ led = devm_kcalloc(&client->dev,
+ pdata->num_channels, sizeof(*led), GFP_KERNEL);
+ if (!led)
+ return -ENOMEM;
+
+ chip->cl = client;
+ chip->pdata = pdata;
+
+ mutex_init(&chip->lock);
+
+ i2c_set_clientdata(client, led);
+
+ ret = lp55xx_init_device(chip);
+ if (ret)
+ goto err_init;
+
+ dev_info(&client->dev, "%s Programmable led chip found\n", id->name);
+
+ ret = lp55xx_register_leds(led, chip);
+ if (ret)
+ goto err_out;
+
+ ret = lp55xx_register_sysfs(chip);
+ if (ret) {
+ dev_err(&client->dev, "registering sysfs failed\n");
+ goto err_out;
+ }
+
+ return 0;
+
+err_out:
+ lp55xx_deinit_device(chip);
+err_init:
+ return ret;
+}
+EXPORT_SYMBOL_GPL(lp55xx_probe);
+
+void lp55xx_remove(struct i2c_client *client)
+{
+ struct lp55xx_led *led = i2c_get_clientdata(client);
+ struct lp55xx_chip *chip = led->chip;
+
+ lp55xx_stop_all_engine(chip);
+ lp55xx_unregister_sysfs(chip);
+ lp55xx_deinit_device(chip);
+}
+EXPORT_SYMBOL_GPL(lp55xx_remove);
MODULE_AUTHOR("Milo Kim <milo.kim@ti.com>");
MODULE_DESCRIPTION("LP55xx Common Driver");
diff --git a/drivers/leds/leds-lp55xx-common.h b/drivers/leds/leds-lp55xx-common.h
index 2f38c5b33830..1bb7c559662c 100644
--- a/drivers/leds/leds-lp55xx-common.h
+++ b/drivers/leds/leds-lp55xx-common.h
@@ -14,6 +14,8 @@
#include <linux/led-class-multicolor.h>
+#define LP55xx_BYTES_PER_PAGE 32 /* bytes */
+
enum lp55xx_engine_index {
LP55XX_ENGINE_INVALID,
LP55XX_ENGINE_1,
@@ -35,45 +37,62 @@ enum lp55xx_engine_mode {
#define LP55XX_DEV_ATTR_WO(name, store) \
DEVICE_ATTR(name, S_IWUSR, NULL, store)
-#define show_mode(nr) \
+#define LP55XX_DEV_ATTR_ENGINE_MODE(nr) \
static ssize_t show_engine##nr##_mode(struct device *dev, \
- struct device_attribute *attr, \
- char *buf) \
+ struct device_attribute *attr, \
+ char *buf) \
{ \
- return show_engine_mode(dev, attr, buf, nr); \
-}
-
-#define store_mode(nr) \
+ return lp55xx_show_engine_mode(dev, attr, buf, nr); \
+} \
static ssize_t store_engine##nr##_mode(struct device *dev, \
- struct device_attribute *attr, \
- const char *buf, size_t len) \
+ struct device_attribute *attr, \
+ const char *buf, size_t len) \
{ \
- return store_engine_mode(dev, attr, buf, len, nr); \
-}
+ return lp55xx_store_engine_mode(dev, attr, buf, len, nr); \
+} \
+static LP55XX_DEV_ATTR_RW(engine##nr##_mode, show_engine##nr##_mode, \
+ store_engine##nr##_mode)
-#define show_leds(nr) \
+#define LP55XX_DEV_ATTR_ENGINE_LEDS(nr) \
static ssize_t show_engine##nr##_leds(struct device *dev, \
- struct device_attribute *attr, \
- char *buf) \
+ struct device_attribute *attr, \
+ char *buf) \
{ \
- return show_engine_leds(dev, attr, buf, nr); \
-}
-
-#define store_leds(nr) \
-static ssize_t store_engine##nr##_leds(struct device *dev, \
- struct device_attribute *attr, \
- const char *buf, size_t len) \
-{ \
- return store_engine_leds(dev, attr, buf, len, nr); \
-}
-
-#define store_load(nr) \
+ return lp55xx_show_engine_leds(dev, attr, buf, nr); \
+} \
+static ssize_t store_engine##nr##_leds(struct device *dev, \
+ struct device_attribute *attr, \
+ const char *buf, size_t len) \
+{ \
+ return lp55xx_store_engine_leds(dev, attr, buf, len, nr); \
+} \
+static LP55XX_DEV_ATTR_RW(engine##nr##_leds, show_engine##nr##_leds, \
+ store_engine##nr##_leds)
+
+#define LP55XX_DEV_ATTR_ENGINE_LOAD(nr) \
static ssize_t store_engine##nr##_load(struct device *dev, \
+ struct device_attribute *attr, \
+ const char *buf, size_t len) \
+{ \
+ return lp55xx_store_engine_load(dev, attr, buf, len, nr); \
+} \
+static LP55XX_DEV_ATTR_WO(engine##nr##_load, store_engine##nr##_load)
+
+#define LP55XX_DEV_ATTR_MASTER_FADER(nr) \
+static ssize_t show_master_fader##nr(struct device *dev, \
struct device_attribute *attr, \
- const char *buf, size_t len) \
+ char *buf) \
{ \
- return store_engine_load(dev, attr, buf, len, nr); \
-}
+ return lp55xx_show_master_fader(dev, attr, buf, nr); \
+} \
+static ssize_t store_master_fader##nr(struct device *dev, \
+ struct device_attribute *attr, \
+ const char *buf, size_t len) \
+{ \
+ return lp55xx_store_master_fader(dev, attr, buf, len, nr); \
+} \
+static LP55XX_DEV_ATTR_RW(master_fader##nr, show_master_fader##nr, \
+ store_master_fader##nr)
struct lp55xx_led;
struct lp55xx_chip;
@@ -81,17 +100,31 @@ struct lp55xx_chip;
/*
* struct lp55xx_reg
* @addr : Register address
- * @val : Register value
+ * @val : Register value (can also used as mask or shift)
*/
struct lp55xx_reg {
u8 addr;
- u8 val;
+ union {
+ u8 val;
+ u8 mask;
+ u8 shift;
+ };
};
/*
* struct lp55xx_device_config
+ * @reg_op_mode : Chip specific OP MODE reg addr
+ * @engine_busy : Chip specific engine busy
+ * (if not supported 153 us sleep)
* @reset : Chip specific reset command
* @enable : Chip specific enable command
+ * @prog_mem_base : Chip specific base reg address for chip SMEM programming
+ * @reg_led_pwm_base : Chip specific base reg address for LED PWM conf
+ * @reg_led_current_base : Chip specific base reg address for LED current conf
+ * @reg_master_fader_base : Chip specific base reg address for master fader base
+ * @reg_led_ctrl_base : Chip specific base reg address for LED ctrl base
+ * @pages_per_engine : Assigned pages for each engine
+ * (if not set chip doesn't support pages)
* @max_channel : Maximum number of channels
* @post_init_device : Chip specific initialization code
* @brightness_fn : Brightness function
@@ -102,8 +135,17 @@ struct lp55xx_reg {
* @dev_attr_group : Device specific attributes
*/
struct lp55xx_device_config {
+ const struct lp55xx_reg reg_op_mode; /* addr, shift */
+ const struct lp55xx_reg reg_exec; /* addr, shift */
+ const struct lp55xx_reg engine_busy; /* addr, mask */
const struct lp55xx_reg reset;
const struct lp55xx_reg enable;
+ const struct lp55xx_reg prog_mem_base;
+ const struct lp55xx_reg reg_led_pwm_base;
+ const struct lp55xx_reg reg_led_current_base;
+ const struct lp55xx_reg reg_master_fader_base;
+ const struct lp55xx_reg reg_led_ctrl_base;
+ const int pages_per_engine;
const int max_channel;
/* define if the device has specific initialization process */
@@ -155,7 +197,7 @@ struct lp55xx_chip {
struct lp55xx_platform_data *pdata;
struct mutex lock; /* lock for user-space interface */
int num_leds;
- struct lp55xx_device_config *cfg;
+ const struct lp55xx_device_config *cfg;
enum lp55xx_engine_index engine_idx;
struct lp55xx_engine engines[LP55XX_ENGINE_MAX];
const struct firmware *fw;
@@ -191,21 +233,50 @@ extern int lp55xx_update_bits(struct lp55xx_chip *chip, u8 reg,
/* external clock detection */
extern bool lp55xx_is_extclk_used(struct lp55xx_chip *chip);
-/* common device init/deinit functions */
-extern int lp55xx_init_device(struct lp55xx_chip *chip);
-extern void lp55xx_deinit_device(struct lp55xx_chip *chip);
-
-/* common LED class device functions */
-extern int lp55xx_register_leds(struct lp55xx_led *led,
- struct lp55xx_chip *chip);
+/* common chip functions */
+extern void lp55xx_stop_all_engine(struct lp55xx_chip *chip);
+extern void lp55xx_load_engine(struct lp55xx_chip *chip);
+extern int lp55xx_run_engine_common(struct lp55xx_chip *chip);
+extern int lp55xx_update_program_memory(struct lp55xx_chip *chip,
+ const u8 *data, size_t size);
+extern void lp55xx_firmware_loaded_cb(struct lp55xx_chip *chip);
+extern int lp55xx_led_brightness(struct lp55xx_led *led);
+extern int lp55xx_multicolor_brightness(struct lp55xx_led *led);
+extern void lp55xx_set_led_current(struct lp55xx_led *led, u8 led_current);
+extern void lp55xx_turn_off_channels(struct lp55xx_chip *chip);
+extern void lp55xx_stop_engine(struct lp55xx_chip *chip);
-/* common device attributes functions */
-extern int lp55xx_register_sysfs(struct lp55xx_chip *chip);
-extern void lp55xx_unregister_sysfs(struct lp55xx_chip *chip);
+/* common probe/remove function */
+extern int lp55xx_probe(struct i2c_client *client);
+extern void lp55xx_remove(struct i2c_client *client);
-/* common device tree population function */
-extern struct lp55xx_platform_data
-*lp55xx_of_populate_pdata(struct device *dev, struct device_node *np,
- struct lp55xx_chip *chip);
+/* common sysfs function */
+extern ssize_t lp55xx_show_engine_mode(struct device *dev,
+ struct device_attribute *attr,
+ char *buf, int nr);
+extern ssize_t lp55xx_store_engine_mode(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len, int nr);
+extern ssize_t lp55xx_store_engine_load(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len, int nr);
+extern ssize_t lp55xx_show_engine_leds(struct device *dev,
+ struct device_attribute *attr,
+ char *buf, int nr);
+extern ssize_t lp55xx_store_engine_leds(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len, int nr);
+extern ssize_t lp55xx_show_master_fader(struct device *dev,
+ struct device_attribute *attr,
+ char *buf, int nr);
+extern ssize_t lp55xx_store_master_fader(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len, int nr);
+extern ssize_t lp55xx_show_master_fader_leds(struct device *dev,
+ struct device_attribute *attr,
+ char *buf);
+extern ssize_t lp55xx_store_master_fader_leds(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len);
#endif /* _LEDS_LP55XX_COMMON_H */
diff --git a/drivers/leds/leds-lp8501.c b/drivers/leds/leds-lp8501.c
index ac50aa88939a..ee4ff4586bc0 100644
--- a/drivers/leds/leds-lp8501.c
+++ b/drivers/leds/leds-lp8501.c
@@ -20,27 +20,14 @@
#include "leds-lp55xx-common.h"
-#define LP8501_PROGRAM_LENGTH 32
+#define LP8501_PAGES_PER_ENGINE 1
#define LP8501_MAX_LEDS 9
/* Registers */
#define LP8501_REG_ENABLE 0x00
#define LP8501_ENABLE BIT(6)
-#define LP8501_EXEC_M 0x3F
-#define LP8501_EXEC_ENG1_M 0x30
-#define LP8501_EXEC_ENG2_M 0x0C
-#define LP8501_EXEC_ENG3_M 0x03
-#define LP8501_RUN_ENG1 0x20
-#define LP8501_RUN_ENG2 0x08
-#define LP8501_RUN_ENG3 0x02
#define LP8501_REG_OP_MODE 0x01
-#define LP8501_MODE_ENG1_M 0x30
-#define LP8501_MODE_ENG2_M 0x0C
-#define LP8501_MODE_ENG3_M 0x03
-#define LP8501_LOAD_ENG1 0x10
-#define LP8501_LOAD_ENG2 0x04
-#define LP8501_LOAD_ENG3 0x01
#define LP8501_REG_PWR_CONFIG 0x05
#define LP8501_PWR_CONFIG_M 0x03
@@ -58,35 +45,14 @@
#define LP8501_INT_CLK BIT(0)
#define LP8501_DEFAULT_CFG (LP8501_PWM_PSAVE | LP8501_AUTO_INC | LP8501_PWR_SAVE)
+#define LP8501_REG_STATUS 0x3A
+#define LP8501_ENGINE_BUSY BIT(4)
+
#define LP8501_REG_RESET 0x3D
#define LP8501_RESET 0xFF
-#define LP8501_REG_PROG_PAGE_SEL 0x4F
-#define LP8501_PAGE_ENG1 0
-#define LP8501_PAGE_ENG2 1
-#define LP8501_PAGE_ENG3 2
-
#define LP8501_REG_PROG_MEM 0x50
-#define LP8501_ENG1_IS_LOADING(mode) \
- ((mode & LP8501_MODE_ENG1_M) == LP8501_LOAD_ENG1)
-#define LP8501_ENG2_IS_LOADING(mode) \
- ((mode & LP8501_MODE_ENG2_M) == LP8501_LOAD_ENG2)
-#define LP8501_ENG3_IS_LOADING(mode) \
- ((mode & LP8501_MODE_ENG3_M) == LP8501_LOAD_ENG3)
-
-static inline void lp8501_wait_opmode_done(void)
-{
- usleep_range(1000, 2000);
-}
-
-static void lp8501_set_led_current(struct lp55xx_led *led, u8 led_current)
-{
- led->led_current = led_current;
- lp55xx_write(led->chip, LP8501_REG_LED_CURRENT_BASE + led->chan_nr,
- led_current);
-}
-
static int lp8501_post_init_device(struct lp55xx_chip *chip)
{
int ret;
@@ -113,178 +79,30 @@ static int lp8501_post_init_device(struct lp55xx_chip *chip)
LP8501_PWR_CONFIG_M, chip->pdata->pwr_sel);
}
-static void lp8501_load_engine(struct lp55xx_chip *chip)
-{
- enum lp55xx_engine_index idx = chip->engine_idx;
- static const u8 mask[] = {
- [LP55XX_ENGINE_1] = LP8501_MODE_ENG1_M,
- [LP55XX_ENGINE_2] = LP8501_MODE_ENG2_M,
- [LP55XX_ENGINE_3] = LP8501_MODE_ENG3_M,
- };
-
- static const u8 val[] = {
- [LP55XX_ENGINE_1] = LP8501_LOAD_ENG1,
- [LP55XX_ENGINE_2] = LP8501_LOAD_ENG2,
- [LP55XX_ENGINE_3] = LP8501_LOAD_ENG3,
- };
-
- static const u8 page_sel[] = {
- [LP55XX_ENGINE_1] = LP8501_PAGE_ENG1,
- [LP55XX_ENGINE_2] = LP8501_PAGE_ENG2,
- [LP55XX_ENGINE_3] = LP8501_PAGE_ENG3,
- };
-
- lp55xx_update_bits(chip, LP8501_REG_OP_MODE, mask[idx], val[idx]);
-
- lp8501_wait_opmode_done();
-
- lp55xx_write(chip, LP8501_REG_PROG_PAGE_SEL, page_sel[idx]);
-}
-
-static void lp8501_stop_engine(struct lp55xx_chip *chip)
-{
- lp55xx_write(chip, LP8501_REG_OP_MODE, 0);
- lp8501_wait_opmode_done();
-}
-
-static void lp8501_turn_off_channels(struct lp55xx_chip *chip)
-{
- int i;
-
- for (i = 0; i < LP8501_MAX_LEDS; i++)
- lp55xx_write(chip, LP8501_REG_LED_PWM_BASE + i, 0);
-}
-
static void lp8501_run_engine(struct lp55xx_chip *chip, bool start)
{
- int ret;
- u8 mode;
- u8 exec;
-
/* stop engine */
if (!start) {
- lp8501_stop_engine(chip);
- lp8501_turn_off_channels(chip);
- return;
- }
-
- /*
- * To run the engine,
- * operation mode and enable register should updated at the same time
- */
-
- ret = lp55xx_read(chip, LP8501_REG_OP_MODE, &mode);
- if (ret)
- return;
-
- ret = lp55xx_read(chip, LP8501_REG_ENABLE, &exec);
- if (ret)
- return;
-
- /* change operation mode to RUN only when each engine is loading */
- if (LP8501_ENG1_IS_LOADING(mode)) {
- mode = (mode & ~LP8501_MODE_ENG1_M) | LP8501_RUN_ENG1;
- exec = (exec & ~LP8501_EXEC_ENG1_M) | LP8501_RUN_ENG1;
- }
-
- if (LP8501_ENG2_IS_LOADING(mode)) {
- mode = (mode & ~LP8501_MODE_ENG2_M) | LP8501_RUN_ENG2;
- exec = (exec & ~LP8501_EXEC_ENG2_M) | LP8501_RUN_ENG2;
- }
-
- if (LP8501_ENG3_IS_LOADING(mode)) {
- mode = (mode & ~LP8501_MODE_ENG3_M) | LP8501_RUN_ENG3;
- exec = (exec & ~LP8501_EXEC_ENG3_M) | LP8501_RUN_ENG3;
- }
-
- lp55xx_write(chip, LP8501_REG_OP_MODE, mode);
- lp8501_wait_opmode_done();
-
- lp55xx_update_bits(chip, LP8501_REG_ENABLE, LP8501_EXEC_M, exec);
-}
-
-static int lp8501_update_program_memory(struct lp55xx_chip *chip,
- const u8 *data, size_t size)
-{
- u8 pattern[LP8501_PROGRAM_LENGTH] = {0};
- unsigned cmd;
- char c[3];
- int update_size;
- int nrchars;
- int offset = 0;
- int ret;
- int i;
-
- /* clear program memory before updating */
- for (i = 0; i < LP8501_PROGRAM_LENGTH; i++)
- lp55xx_write(chip, LP8501_REG_PROG_MEM + i, 0);
-
- i = 0;
- while ((offset < size - 1) && (i < LP8501_PROGRAM_LENGTH)) {
- /* separate sscanfs because length is working only for %s */
- ret = sscanf(data + offset, "%2s%n ", c, &nrchars);
- if (ret != 1)
- goto err;
-
- ret = sscanf(c, "%2x", &cmd);
- if (ret != 1)
- goto err;
-
- pattern[i] = (u8)cmd;
- offset += nrchars;
- i++;
- }
-
- /* Each instruction is 16bit long. Check that length is even */
- if (i % 2)
- goto err;
-
- update_size = i;
- for (i = 0; i < update_size; i++)
- lp55xx_write(chip, LP8501_REG_PROG_MEM + i, pattern[i]);
-
- return 0;
-
-err:
- dev_err(&chip->cl->dev, "wrong pattern format\n");
- return -EINVAL;
-}
-
-static void lp8501_firmware_loaded(struct lp55xx_chip *chip)
-{
- const struct firmware *fw = chip->fw;
-
- if (fw->size > LP8501_PROGRAM_LENGTH) {
- dev_err(&chip->cl->dev, "firmware data size overflow: %zu\n",
- fw->size);
+ lp55xx_stop_all_engine(chip);
+ lp55xx_turn_off_channels(chip);
return;
}
- /*
- * Program memory sequence
- * 1) set engine mode to "LOAD"
- * 2) write firmware data into program memory
- */
-
- lp8501_load_engine(chip);
- lp8501_update_program_memory(chip, fw->data, fw->size);
-}
-
-static int lp8501_led_brightness(struct lp55xx_led *led)
-{
- struct lp55xx_chip *chip = led->chip;
- int ret;
-
- mutex_lock(&chip->lock);
- ret = lp55xx_write(chip, LP8501_REG_LED_PWM_BASE + led->chan_nr,
- led->brightness);
- mutex_unlock(&chip->lock);
-
- return ret;
+ lp55xx_run_engine_common(chip);
}
/* Chip specific configurations */
static struct lp55xx_device_config lp8501_cfg = {
+ .reg_op_mode = {
+ .addr = LP8501_REG_OP_MODE,
+ },
+ .reg_exec = {
+ .addr = LP8501_REG_ENABLE,
+ },
+ .engine_busy = {
+ .addr = LP8501_REG_STATUS,
+ .mask = LP8501_ENGINE_BUSY,
+ },
.reset = {
.addr = LP8501_REG_RESET,
.val = LP8501_RESET,
@@ -293,95 +111,32 @@ static struct lp55xx_device_config lp8501_cfg = {
.addr = LP8501_REG_ENABLE,
.val = LP8501_ENABLE,
},
+ .prog_mem_base = {
+ .addr = LP8501_REG_PROG_MEM,
+ },
+ .reg_led_pwm_base = {
+ .addr = LP8501_REG_LED_PWM_BASE,
+ },
+ .reg_led_current_base = {
+ .addr = LP8501_REG_LED_CURRENT_BASE,
+ },
+ .pages_per_engine = LP8501_PAGES_PER_ENGINE,
.max_channel = LP8501_MAX_LEDS,
.post_init_device = lp8501_post_init_device,
- .brightness_fn = lp8501_led_brightness,
- .set_led_current = lp8501_set_led_current,
- .firmware_cb = lp8501_firmware_loaded,
+ .brightness_fn = lp55xx_led_brightness,
+ .set_led_current = lp55xx_set_led_current,
+ .firmware_cb = lp55xx_firmware_loaded_cb,
.run_engine = lp8501_run_engine,
};
-static int lp8501_probe(struct i2c_client *client)
-{
- const struct i2c_device_id *id = i2c_client_get_device_id(client);
- int ret;
- struct lp55xx_chip *chip;
- struct lp55xx_led *led;
- struct lp55xx_platform_data *pdata = dev_get_platdata(&client->dev);
- struct device_node *np = dev_of_node(&client->dev);
-
- chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
- if (!chip)
- return -ENOMEM;
-
- chip->cfg = &lp8501_cfg;
-
- if (!pdata) {
- if (np) {
- pdata = lp55xx_of_populate_pdata(&client->dev, np,
- chip);
- if (IS_ERR(pdata))
- return PTR_ERR(pdata);
- } else {
- dev_err(&client->dev, "no platform data\n");
- return -EINVAL;
- }
- }
-
- led = devm_kcalloc(&client->dev,
- pdata->num_channels, sizeof(*led), GFP_KERNEL);
- if (!led)
- return -ENOMEM;
-
- chip->cl = client;
- chip->pdata = pdata;
-
- mutex_init(&chip->lock);
-
- i2c_set_clientdata(client, led);
-
- ret = lp55xx_init_device(chip);
- if (ret)
- goto err_init;
-
- dev_info(&client->dev, "%s Programmable led chip found\n", id->name);
-
- ret = lp55xx_register_leds(led, chip);
- if (ret)
- goto err_out;
-
- ret = lp55xx_register_sysfs(chip);
- if (ret) {
- dev_err(&client->dev, "registering sysfs failed\n");
- goto err_out;
- }
-
- return 0;
-
-err_out:
- lp55xx_deinit_device(chip);
-err_init:
- return ret;
-}
-
-static void lp8501_remove(struct i2c_client *client)
-{
- struct lp55xx_led *led = i2c_get_clientdata(client);
- struct lp55xx_chip *chip = led->chip;
-
- lp8501_stop_engine(chip);
- lp55xx_unregister_sysfs(chip);
- lp55xx_deinit_device(chip);
-}
-
static const struct i2c_device_id lp8501_id[] = {
- { "lp8501", 0 },
+ { "lp8501", .driver_data = (kernel_ulong_t)&lp8501_cfg, },
{ }
};
MODULE_DEVICE_TABLE(i2c, lp8501_id);
static const struct of_device_id of_lp8501_leds_match[] = {
- { .compatible = "ti,lp8501", },
+ { .compatible = "ti,lp8501", .data = &lp8501_cfg, },
{},
};
@@ -392,8 +147,8 @@ static struct i2c_driver lp8501_driver = {
.name = "lp8501",
.of_match_table = of_lp8501_leds_match,
},
- .probe = lp8501_probe,
- .remove = lp8501_remove,
+ .probe = lp55xx_probe,
+ .remove = lp55xx_remove,
.id_table = lp8501_id,
};
diff --git a/drivers/leds/leds-lp8860.c b/drivers/leds/leds-lp8860.c
index 19b621012e58..7a136fd81720 100644
--- a/drivers/leds/leds-lp8860.c
+++ b/drivers/leds/leds-lp8860.c
@@ -459,7 +459,7 @@ static void lp8860_remove(struct i2c_client *client)
}
static const struct i2c_device_id lp8860_id[] = {
- { "lp8860", 0 },
+ { "lp8860" },
{ }
};
MODULE_DEVICE_TABLE(i2c, lp8860_id);
diff --git a/drivers/leds/leds-pca9532.c b/drivers/leds/leds-pca9532.c
index bf8bb8fc007c..9f3fac66a11c 100644
--- a/drivers/leds/leds-pca9532.c
+++ b/drivers/leds/leds-pca9532.c
@@ -29,6 +29,9 @@
#define LED_SHIFT(led) (LED_NUM(led) * 2)
#define LED_MASK(led) (0x3 << LED_SHIFT(led))
+#define PCA9532_PWM_PERIOD_DIV 152
+#define PCA9532_PWM_DUTY_DIV 256
+
#define ldev_to_led(c) container_of(c, struct pca9532_led, ldev)
struct pca9532_chip_info {
@@ -45,8 +48,12 @@ struct pca9532_data {
struct gpio_chip gpio;
#endif
const struct pca9532_chip_info *chip_info;
+
+#define PCA9532_PWM_ID_0 0
+#define PCA9532_PWM_ID_1 1
u8 pwm[2];
u8 psc[2];
+ bool hw_blink;
};
static int pca9532_probe(struct i2c_client *client);
@@ -181,39 +188,74 @@ static int pca9532_set_brightness(struct led_classdev *led_cdev,
led->state = PCA9532_ON;
else {
led->state = PCA9532_PWM0; /* Thecus: hardcode one pwm */
- err = pca9532_calcpwm(led->client, 0, 0, value);
+ err = pca9532_calcpwm(led->client, PCA9532_PWM_ID_0, 0, value);
if (err)
return err;
}
if (led->state == PCA9532_PWM0)
- pca9532_setpwm(led->client, 0);
+ pca9532_setpwm(led->client, PCA9532_PWM_ID_0);
pca9532_setled(led);
return err;
}
+static int pca9532_update_hw_blink(struct pca9532_led *led,
+ unsigned long delay_on, unsigned long delay_off)
+{
+ struct pca9532_data *data = i2c_get_clientdata(led->client);
+ unsigned int psc;
+ int i;
+
+ /* Look for others LEDs that already use PWM1 */
+ for (i = 0; i < data->chip_info->num_leds; i++) {
+ struct pca9532_led *other = &data->leds[i];
+
+ if (other == led)
+ continue;
+
+ if (other->state == PCA9532_PWM1) {
+ if (other->ldev.blink_delay_on != delay_on ||
+ other->ldev.blink_delay_off != delay_off) {
+ dev_err(&led->client->dev,
+ "HW can handle only one blink configuration at a time\n");
+ return -EINVAL;
+ }
+ }
+ }
+
+ psc = ((delay_on + delay_off) * PCA9532_PWM_PERIOD_DIV - 1) / 1000;
+ if (psc > U8_MAX) {
+ dev_err(&led->client->dev, "Blink period too long to be handled by hardware\n");
+ return -EINVAL;
+ }
+
+ led->state = PCA9532_PWM1;
+ data->psc[PCA9532_PWM_ID_1] = psc;
+ data->pwm[PCA9532_PWM_ID_1] = (delay_on * PCA9532_PWM_DUTY_DIV) / (delay_on + delay_off);
+
+ return pca9532_setpwm(data->client, PCA9532_PWM_ID_1);
+}
+
static int pca9532_set_blink(struct led_classdev *led_cdev,
unsigned long *delay_on, unsigned long *delay_off)
{
struct pca9532_led *led = ldev_to_led(led_cdev);
struct i2c_client *client = led->client;
- int psc;
- int err = 0;
+ struct pca9532_data *data = i2c_get_clientdata(client);
+ int err;
+
+ if (!data->hw_blink)
+ return -EINVAL;
if (*delay_on == 0 && *delay_off == 0) {
/* led subsystem ask us for a blink rate */
- *delay_on = 1000;
- *delay_off = 1000;
+ *delay_on = 500;
+ *delay_off = 500;
}
- if (*delay_on != *delay_off || *delay_on > 1690 || *delay_on < 6)
- return -EINVAL;
- /* Thecus specific: only use PSC/PWM 0 */
- psc = (*delay_on * 152-1)/1000;
- err = pca9532_calcpwm(client, 0, psc, led_cdev->brightness);
+ err = pca9532_update_hw_blink(led, *delay_on, *delay_off);
if (err)
return err;
- if (led->state == PCA9532_PWM0)
- pca9532_setpwm(led->client, 0);
+
pca9532_setled(led);
return 0;
@@ -229,9 +271,9 @@ static int pca9532_event(struct input_dev *dev, unsigned int type,
/* XXX: allow different kind of beeps with psc/pwm modifications */
if (value > 1 && value < 32767)
- data->pwm[1] = 127;
+ data->pwm[PCA9532_PWM_ID_1] = 127;
else
- data->pwm[1] = 0;
+ data->pwm[PCA9532_PWM_ID_1] = 0;
schedule_work(&data->work);
@@ -246,7 +288,7 @@ static void pca9532_input_work(struct work_struct *work)
mutex_lock(&data->update_lock);
i2c_smbus_write_byte_data(data->client, PCA9532_REG_PWM(maxleds, 1),
- data->pwm[1]);
+ data->pwm[PCA9532_PWM_ID_1]);
mutex_unlock(&data->update_lock);
}
@@ -359,6 +401,7 @@ static int pca9532_configure(struct i2c_client *client,
data->psc[i]);
}
+ data->hw_blink = true;
for (i = 0; i < data->chip_info->num_leds; i++) {
struct pca9532_led *led = &data->leds[i];
struct pca9532_led *pled = &pdata->leds[i];
@@ -393,6 +436,8 @@ static int pca9532_configure(struct i2c_client *client,
pca9532_setled(led);
break;
case PCA9532_TYPE_N2100_BEEP:
+ /* PWM1 is reserved for beeper so blink will not use hardware */
+ data->hw_blink = false;
BUG_ON(data->idev);
led->state = PCA9532_PWM1;
pca9532_setled(led);
@@ -475,9 +520,9 @@ pca9532_of_populate_pdata(struct device *dev, struct device_node *np)
pdata->gpio_base = -1;
- of_property_read_u8_array(np, "nxp,pwm", &pdata->pwm[0],
+ of_property_read_u8_array(np, "nxp,pwm", &pdata->pwm[PCA9532_PWM_ID_0],
ARRAY_SIZE(pdata->pwm));
- of_property_read_u8_array(np, "nxp,psc", &pdata->psc[0],
+ of_property_read_u8_array(np, "nxp,psc", &pdata->psc[PCA9532_PWM_ID_0],
ARRAY_SIZE(pdata->psc));
for_each_available_child_of_node(np, child) {
diff --git a/drivers/leds/leds-powernv.c b/drivers/leds/leds-powernv.c
index 4f01acb75727..49ab8c9a3f29 100644
--- a/drivers/leds/leds-powernv.c
+++ b/drivers/leds/leds-powernv.c
@@ -246,29 +246,25 @@ static int powernv_led_classdev(struct platform_device *pdev,
const char *cur = NULL;
int rc = -1;
struct property *p;
- struct device_node *np;
struct powernv_led_data *powernv_led;
struct device *dev = &pdev->dev;
- for_each_available_child_of_node(led_node, np) {
+ for_each_available_child_of_node_scoped(led_node, np) {
p = of_find_property(np, "led-types", NULL);
while ((cur = of_prop_next_string(p, cur)) != NULL) {
powernv_led = devm_kzalloc(dev, sizeof(*powernv_led),
GFP_KERNEL);
- if (!powernv_led) {
- of_node_put(np);
+ if (!powernv_led)
return -ENOMEM;
- }
powernv_led->common = powernv_led_common;
powernv_led->loc_code = (char *)np->name;
rc = powernv_led_create(dev, powernv_led, cur);
- if (rc) {
- of_node_put(np);
+ if (rc)
return rc;
- }
+
} /* while end */
}
@@ -278,12 +274,11 @@ static int powernv_led_classdev(struct platform_device *pdev,
/* Platform driver probe */
static int powernv_led_probe(struct platform_device *pdev)
{
- struct device_node *led_node;
struct powernv_led_common *powernv_led_common;
struct device *dev = &pdev->dev;
- int rc;
+ struct device_node *led_node
+ __free(device_node) = of_find_node_by_path("/ibm,opal/leds");
- led_node = of_find_node_by_path("/ibm,opal/leds");
if (!led_node) {
dev_err(dev, "%s: LED parent device node not found\n",
__func__);
@@ -292,20 +287,15 @@ static int powernv_led_probe(struct platform_device *pdev)
powernv_led_common = devm_kzalloc(dev, sizeof(*powernv_led_common),
GFP_KERNEL);
- if (!powernv_led_common) {
- rc = -ENOMEM;
- goto out;
- }
+ if (!powernv_led_common)
+ return -ENOMEM;
mutex_init(&powernv_led_common->lock);
powernv_led_common->max_led_type = cpu_to_be64(OPAL_SLOT_LED_TYPE_MAX);
platform_set_drvdata(pdev, powernv_led_common);
- rc = powernv_led_classdev(pdev, led_node, powernv_led_common);
-out:
- of_node_put(led_node);
- return rc;
+ return powernv_led_classdev(pdev, led_node, powernv_led_common);
}
/* Platform driver remove */
diff --git a/drivers/leds/leds-spi-byte.c b/drivers/leds/leds-spi-byte.c
index 96296db5f410..d24d0ddf347c 100644
--- a/drivers/leds/leds-spi-byte.c
+++ b/drivers/leds/leds-spi-byte.c
@@ -29,10 +29,11 @@
*/
#include <linux/leds.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/spi/spi.h>
#include <linux/mutex.h>
+#include <linux/property.h>
+#include <linux/spi/spi.h>
#include <uapi/linux/uleds.h>
struct spi_byte_chipdef {
@@ -55,13 +56,6 @@ static const struct spi_byte_chipdef ubnt_acb_spi_led_cdef = {
.max_value = 0x3F,
};
-static const struct of_device_id spi_byte_dt_ids[] = {
- { .compatible = "ubnt,acb-spi-led", .data = &ubnt_acb_spi_led_cdef },
- {},
-};
-
-MODULE_DEVICE_TABLE(of, spi_byte_dt_ids);
-
static int spi_byte_brightness_set_blocking(struct led_classdev *dev,
enum led_brightness brightness)
{
@@ -80,73 +74,60 @@ static int spi_byte_brightness_set_blocking(struct led_classdev *dev,
static int spi_byte_probe(struct spi_device *spi)
{
- struct device_node *child;
+ struct fwnode_handle *child __free(fwnode_handle) = NULL;
struct device *dev = &spi->dev;
struct spi_byte_led *led;
struct led_init_data init_data = {};
- const char *state;
+ enum led_default_state state;
int ret;
- if (of_get_available_child_count(dev_of_node(dev)) != 1) {
+ if (device_get_child_node_count(dev) != 1) {
dev_err(dev, "Device must have exactly one LED sub-node.");
return -EINVAL;
}
- child = of_get_next_available_child(dev_of_node(dev), NULL);
led = devm_kzalloc(dev, sizeof(*led), GFP_KERNEL);
if (!led)
return -ENOMEM;
+ ret = devm_mutex_init(dev, &led->mutex);
+ if (ret)
+ return ret;
+
led->spi = spi;
- mutex_init(&led->mutex);
led->cdef = device_get_match_data(dev);
led->ldev.brightness = LED_OFF;
led->ldev.max_brightness = led->cdef->max_value - led->cdef->off_value;
led->ldev.brightness_set_blocking = spi_byte_brightness_set_blocking;
- state = of_get_property(child, "default-state", NULL);
- if (state) {
- if (!strcmp(state, "on")) {
- led->ldev.brightness = led->ldev.max_brightness;
- } else if (strcmp(state, "off")) {
- /* all other cases except "off" */
- dev_err(dev, "default-state can only be 'on' or 'off'");
- return -EINVAL;
- }
- }
+ child = device_get_next_child_node(dev, NULL);
+
+ state = led_init_default_state_get(child);
+ if (state == LEDS_DEFSTATE_ON)
+ led->ldev.brightness = led->ldev.max_brightness;
spi_byte_brightness_set_blocking(&led->ldev,
led->ldev.brightness);
- init_data.fwnode = of_fwnode_handle(child);
+ init_data.fwnode = child;
init_data.devicename = "leds-spi-byte";
init_data.default_label = ":";
- ret = devm_led_classdev_register_ext(&spi->dev, &led->ldev, &init_data);
- if (ret) {
- mutex_destroy(&led->mutex);
- return ret;
- }
- spi_set_drvdata(spi, led);
-
- return 0;
+ return devm_led_classdev_register_ext(dev, &led->ldev, &init_data);
}
-static void spi_byte_remove(struct spi_device *spi)
-{
- struct spi_byte_led *led = spi_get_drvdata(spi);
-
- mutex_destroy(&led->mutex);
-}
+static const struct of_device_id spi_byte_dt_ids[] = {
+ { .compatible = "ubnt,acb-spi-led", .data = &ubnt_acb_spi_led_cdef },
+ {}
+};
+MODULE_DEVICE_TABLE(of, spi_byte_dt_ids);
static struct spi_driver spi_byte_driver = {
.probe = spi_byte_probe,
- .remove = spi_byte_remove,
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = spi_byte_dt_ids,
},
};
-
module_spi_driver(spi_byte_driver);
MODULE_AUTHOR("Christian Mauderer <oss@c-mauderer.de>");
diff --git a/drivers/leds/leds-ss4200.c b/drivers/leds/leds-ss4200.c
index fcaa34706b6c..2ef9fc7371bd 100644
--- a/drivers/leds/leds-ss4200.c
+++ b/drivers/leds/leds-ss4200.c
@@ -356,8 +356,10 @@ static int ich7_lpc_probe(struct pci_dev *dev,
nas_gpio_pci_dev = dev;
status = pci_read_config_dword(dev, PMBASE, &g_pm_io_base);
- if (status)
+ if (status) {
+ status = pcibios_err_to_errno(status);
goto out;
+ }
g_pm_io_base &= 0x00000ff80;
status = pci_read_config_dword(dev, GPIO_CTRL, &gc);
@@ -369,8 +371,9 @@ static int ich7_lpc_probe(struct pci_dev *dev,
}
status = pci_read_config_dword(dev, GPIO_BASE, &nas_gpio_io_base);
- if (0 > status) {
+ if (status) {
dev_info(&dev->dev, "Unable to read GPIOBASE.\n");
+ status = pcibios_err_to_errno(status);
goto out;
}
dev_dbg(&dev->dev, ": GPIOBASE = 0x%08x\n", nas_gpio_io_base);
diff --git a/drivers/leds/leds-tlc591xx.c b/drivers/leds/leds-tlc591xx.c
index 945e831ef4ac..6605e08a042a 100644
--- a/drivers/leds/leds-tlc591xx.c
+++ b/drivers/leds/leds-tlc591xx.c
@@ -146,7 +146,7 @@ MODULE_DEVICE_TABLE(of, of_tlc591xx_leds_match);
static int
tlc591xx_probe(struct i2c_client *client)
{
- struct device_node *np, *child;
+ struct device_node *np;
struct device *dev = &client->dev;
const struct tlc591xx *tlc591xx;
struct tlc591xx_priv *priv;
@@ -182,22 +182,20 @@ tlc591xx_probe(struct i2c_client *client)
if (err < 0)
return err;
- for_each_available_child_of_node(np, child) {
+ for_each_available_child_of_node_scoped(np, child) {
struct tlc591xx_led *led;
struct led_init_data init_data = {};
init_data.fwnode = of_fwnode_handle(child);
err = of_property_read_u32(child, "reg", &reg);
- if (err) {
- of_node_put(child);
+ if (err)
return err;
- }
+
if (reg < 0 || reg >= tlc591xx->max_leds ||
- priv->leds[reg].active) {
- of_node_put(child);
+ priv->leds[reg].active)
return -EINVAL;
- }
+
led = &priv->leds[reg];
led->active = true;
@@ -207,12 +205,10 @@ tlc591xx_probe(struct i2c_client *client)
led->ldev.max_brightness = TLC591XX_MAX_BRIGHTNESS;
err = devm_led_classdev_register_ext(dev, &led->ldev,
&init_data);
- if (err < 0) {
- of_node_put(child);
+ if (err < 0)
return dev_err_probe(dev, err,
"couldn't register LED %s\n",
led->ldev.name);
- }
}
return 0;
}
diff --git a/drivers/leds/leds-turris-omnia.c b/drivers/leds/leds-turris-omnia.c
index b443f8c989fa..39f740be058f 100644
--- a/drivers/leds/leds-turris-omnia.c
+++ b/drivers/leds/leds-turris-omnia.c
@@ -534,7 +534,7 @@ static const struct of_device_id of_omnia_leds_match[] = {
};
static const struct i2c_device_id omnia_id[] = {
- { "omnia", 0 },
+ { "omnia" },
{ }
};
MODULE_DEVICE_TABLE(i2c, omnia_id);
diff --git a/drivers/leds/leds.h b/drivers/leds/leds.h
index 1138e2ab82e5..d7999e7372a4 100644
--- a/drivers/leds/leds.h
+++ b/drivers/leds/leds.h
@@ -30,6 +30,5 @@ ssize_t led_trigger_write(struct file *filp, struct kobject *kobj,
extern struct rw_semaphore leds_list_lock;
extern struct list_head leds_list;
-extern const char * const led_colors[LED_COLOR_ID_MAX];
#endif /* __LEDS_H_INCLUDED */
diff --git a/drivers/leds/rgb/Kconfig b/drivers/leds/rgb/Kconfig
index 8fc12d6a2958..222d943d826a 100644
--- a/drivers/leds/rgb/Kconfig
+++ b/drivers/leds/rgb/Kconfig
@@ -17,7 +17,6 @@ config LEDS_GROUP_MULTICOLOR
config LEDS_KTD202X
tristate "LED support for KTD202x Chips"
depends on I2C
- depends on OF
select REGMAP_I2C
help
This option enables support for the Kinetic KTD2026/KTD2027
diff --git a/drivers/leds/rgb/leds-ktd202x.c b/drivers/leds/rgb/leds-ktd202x.c
index 514965795a10..d5c442163c46 100644
--- a/drivers/leds/rgb/leds-ktd202x.c
+++ b/drivers/leds/rgb/leds-ktd202x.c
@@ -99,7 +99,7 @@ struct ktd202x {
struct device *dev;
struct regmap *regmap;
bool enabled;
- int num_leds;
+ unsigned long num_leds;
struct ktd202x_led leds[] __counted_by(num_leds);
};
@@ -381,16 +381,19 @@ static int ktd202x_blink_mc_set(struct led_classdev *cdev,
mc->num_colors);
}
-static int ktd202x_setup_led_rgb(struct ktd202x *chip, struct device_node *np,
+static int ktd202x_setup_led_rgb(struct ktd202x *chip, struct fwnode_handle *fwnode,
struct ktd202x_led *led, struct led_init_data *init_data)
{
+ struct fwnode_handle *child;
struct led_classdev *cdev;
- struct device_node *child;
struct mc_subled *info;
int num_channels;
int i = 0;
- num_channels = of_get_available_child_count(np);
+ num_channels = 0;
+ fwnode_for_each_available_child_node(fwnode, child)
+ num_channels++;
+
if (!num_channels || num_channels > chip->num_leds)
return -EINVAL;
@@ -398,22 +401,22 @@ static int ktd202x_setup_led_rgb(struct ktd202x *chip, struct device_node *np,
if (!info)
return -ENOMEM;
- for_each_available_child_of_node(np, child) {
+ fwnode_for_each_available_child_node(fwnode, child) {
u32 mono_color;
u32 reg;
int ret;
- ret = of_property_read_u32(child, "reg", &reg);
+ ret = fwnode_property_read_u32(child, "reg", &reg);
if (ret != 0 || reg >= chip->num_leds) {
- dev_err(chip->dev, "invalid 'reg' of %pOFn\n", child);
- of_node_put(child);
- return -EINVAL;
+ dev_err(chip->dev, "invalid 'reg' of %pfw\n", child);
+ fwnode_handle_put(child);
+ return ret;
}
- ret = of_property_read_u32(child, "color", &mono_color);
+ ret = fwnode_property_read_u32(child, "color", &mono_color);
if (ret < 0 && ret != -EINVAL) {
- dev_err(chip->dev, "failed to parse 'color' of %pOF\n", child);
- of_node_put(child);
+ dev_err(chip->dev, "failed to parse 'color' of %pfw\n", child);
+ fwnode_handle_put(child);
return ret;
}
@@ -433,16 +436,16 @@ static int ktd202x_setup_led_rgb(struct ktd202x *chip, struct device_node *np,
return devm_led_classdev_multicolor_register_ext(chip->dev, &led->mcdev, init_data);
}
-static int ktd202x_setup_led_single(struct ktd202x *chip, struct device_node *np,
+static int ktd202x_setup_led_single(struct ktd202x *chip, struct fwnode_handle *fwnode,
struct ktd202x_led *led, struct led_init_data *init_data)
{
struct led_classdev *cdev;
u32 reg;
int ret;
- ret = of_property_read_u32(np, "reg", &reg);
+ ret = fwnode_property_read_u32(fwnode, "reg", &reg);
if (ret != 0 || reg >= chip->num_leds) {
- dev_err(chip->dev, "invalid 'reg' of %pOFn\n", np);
+ dev_err(chip->dev, "invalid 'reg' of %pfw\n", fwnode);
return -EINVAL;
}
led->index = reg;
@@ -454,7 +457,7 @@ static int ktd202x_setup_led_single(struct ktd202x *chip, struct device_node *np
return devm_led_classdev_register_ext(chip->dev, &led->cdev, init_data);
}
-static int ktd202x_add_led(struct ktd202x *chip, struct device_node *np, unsigned int index)
+static int ktd202x_add_led(struct ktd202x *chip, struct fwnode_handle *fwnode, unsigned int index)
{
struct ktd202x_led *led = &chip->leds[index];
struct led_init_data init_data = {};
@@ -463,21 +466,21 @@ static int ktd202x_add_led(struct ktd202x *chip, struct device_node *np, unsigne
int ret;
/* Color property is optional in single color case */
- ret = of_property_read_u32(np, "color", &color);
+ ret = fwnode_property_read_u32(fwnode, "color", &color);
if (ret < 0 && ret != -EINVAL) {
- dev_err(chip->dev, "failed to parse 'color' of %pOF\n", np);
+ dev_err(chip->dev, "failed to parse 'color' of %pfw\n", fwnode);
return ret;
}
led->chip = chip;
- init_data.fwnode = of_fwnode_handle(np);
+ init_data.fwnode = fwnode;
if (color == LED_COLOR_ID_RGB) {
cdev = &led->mcdev.led_cdev;
- ret = ktd202x_setup_led_rgb(chip, np, led, &init_data);
+ ret = ktd202x_setup_led_rgb(chip, fwnode, led, &init_data);
} else {
cdev = &led->cdev;
- ret = ktd202x_setup_led_single(chip, np, led, &init_data);
+ ret = ktd202x_setup_led_single(chip, fwnode, led, &init_data);
}
if (ret) {
@@ -490,15 +493,14 @@ static int ktd202x_add_led(struct ktd202x *chip, struct device_node *np, unsigne
return 0;
}
-static int ktd202x_probe_dt(struct ktd202x *chip)
+static int ktd202x_probe_fw(struct ktd202x *chip)
{
- struct device_node *np = dev_of_node(chip->dev), *child;
+ struct fwnode_handle *child;
+ struct device *dev = chip->dev;
int count;
int i = 0;
- chip->num_leds = (int)(unsigned long)of_device_get_match_data(chip->dev);
-
- count = of_get_available_child_count(np);
+ count = device_get_child_node_count(dev);
if (!count || count > chip->num_leds)
return -EINVAL;
@@ -507,11 +509,11 @@ static int ktd202x_probe_dt(struct ktd202x *chip)
/* Allow the device to execute the complete reset */
usleep_range(200, 300);
- for_each_available_child_of_node(np, child) {
+ device_for_each_child_node(dev, child) {
int ret = ktd202x_add_led(chip, child, i);
if (ret) {
- of_node_put(child);
+ fwnode_handle_put(child);
return ret;
}
i++;
@@ -554,6 +556,12 @@ static int ktd202x_probe(struct i2c_client *client)
return ret;
}
+ ret = devm_mutex_init(dev, &chip->mutex);
+ if (ret)
+ return ret;
+
+ chip->num_leds = (unsigned long)i2c_get_match_data(client);
+
chip->regulators[0].supply = "vin";
chip->regulators[1].supply = "vio";
ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(chip->regulators), chip->regulators);
@@ -568,7 +576,7 @@ static int ktd202x_probe(struct i2c_client *client)
return ret;
}
- ret = ktd202x_probe_dt(chip);
+ ret = ktd202x_probe_fw(chip);
if (ret < 0) {
regulator_bulk_disable(ARRAY_SIZE(chip->regulators), chip->regulators);
return ret;
@@ -580,8 +588,6 @@ static int ktd202x_probe(struct i2c_client *client)
return ret;
}
- mutex_init(&chip->mutex);
-
return 0;
}
@@ -590,8 +596,6 @@ static void ktd202x_remove(struct i2c_client *client)
struct ktd202x *chip = i2c_get_clientdata(client);
ktd202x_chip_disable(chip);
-
- mutex_destroy(&chip->mutex);
}
static void ktd202x_shutdown(struct i2c_client *client)
@@ -602,10 +606,17 @@ static void ktd202x_shutdown(struct i2c_client *client)
regmap_write(chip->regmap, KTD202X_REG_RESET_CONTROL, KTD202X_RSTR_RESET);
}
+static const struct i2c_device_id ktd202x_id[] = {
+ {"ktd2026", KTD2026_NUM_LEDS},
+ {"ktd2027", KTD2027_NUM_LEDS},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, ktd202x_id);
+
static const struct of_device_id ktd202x_match_table[] = {
{ .compatible = "kinetic,ktd2026", .data = (void *)KTD2026_NUM_LEDS },
{ .compatible = "kinetic,ktd2027", .data = (void *)KTD2027_NUM_LEDS },
- {},
+ {}
};
MODULE_DEVICE_TABLE(of, ktd202x_match_table);
@@ -617,6 +628,7 @@ static struct i2c_driver ktd202x_driver = {
.probe = ktd202x_probe,
.remove = ktd202x_remove,
.shutdown = ktd202x_shutdown,
+ .id_table = ktd202x_id,
};
module_i2c_driver(ktd202x_driver);
diff --git a/drivers/leds/rgb/leds-ncp5623.c b/drivers/leds/rgb/leds-ncp5623.c
index 2be4ff918516..f18156683375 100644
--- a/drivers/leds/rgb/leds-ncp5623.c
+++ b/drivers/leds/rgb/leds-ncp5623.c
@@ -183,16 +183,12 @@ static int ncp5623_probe(struct i2c_client *client)
fwnode_for_each_available_child_node(mc_node, led_node) {
ret = fwnode_property_read_u32(led_node, "color", &color_index);
- if (ret) {
- fwnode_handle_put(led_node);
- goto release_mc_node;
- }
+ if (ret)
+ goto release_led_node;
ret = fwnode_property_read_u32(led_node, "reg", &reg);
- if (ret) {
- fwnode_handle_put(led_node);
- goto release_mc_node;
- }
+ if (ret)
+ goto release_led_node;
subled_info[ncp->mc_dev.num_colors].channel = reg;
subled_info[ncp->mc_dev.num_colors++].color_index = color_index;
@@ -223,6 +219,10 @@ release_mc_node:
fwnode_handle_put(mc_node);
return ret;
+
+release_led_node:
+ fwnode_handle_put(led_node);
+ goto release_mc_node;
}
static void ncp5623_remove(struct i2c_client *client)
diff --git a/drivers/leds/rgb/leds-qcom-lpg.c b/drivers/leds/rgb/leds-qcom-lpg.c
index 9467c796bd04..e74b2ceed1c2 100644
--- a/drivers/leds/rgb/leds-qcom-lpg.c
+++ b/drivers/leds/rgb/leds-qcom-lpg.c
@@ -2,7 +2,7 @@
/*
* Copyright (c) 2017-2022 Linaro Ltd
* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
- * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/bits.h>
#include <linux/bitfield.h>
@@ -254,6 +254,9 @@ static int lpg_clear_pbs_trigger(struct lpg *lpg, unsigned int lut_mask)
u8 val = 0;
int rc;
+ if (!lpg->lpg_chan_sdam)
+ return 0;
+
lpg->pbs_en_bitmap &= (~lut_mask);
if (!lpg->pbs_en_bitmap) {
rc = nvmem_device_write(lpg->lpg_chan_sdam, SDAM_REG_PBS_SEQ_EN, 1, &val);
@@ -276,6 +279,9 @@ static int lpg_set_pbs_trigger(struct lpg *lpg, unsigned int lut_mask)
u8 val = PBS_SW_TRIG_BIT;
int rc;
+ if (!lpg->lpg_chan_sdam)
+ return 0;
+
if (!lpg->pbs_en_bitmap) {
rc = nvmem_device_write(lpg->lpg_chan_sdam, SDAM_REG_PBS_SEQ_EN, 1, &val);
if (rc < 0)
diff --git a/drivers/leds/simple/simatic-ipc-leds-gpio-apollolake.c b/drivers/leds/simple/simatic-ipc-leds-gpio-apollolake.c
index 4183ee71fcce..726c186391af 100644
--- a/drivers/leds/simple/simatic-ipc-leds-gpio-apollolake.c
+++ b/drivers/leds/simple/simatic-ipc-leds-gpio-apollolake.c
@@ -60,6 +60,7 @@ static struct platform_driver simatic_ipc_led_gpio_apollolake_driver = {
};
module_platform_driver(simatic_ipc_led_gpio_apollolake_driver);
+MODULE_DESCRIPTION("LED driver for Siemens Simatic IPCs based on Intel Apollo Lake GPIO");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:" KBUILD_MODNAME);
MODULE_SOFTDEP("pre: simatic-ipc-leds-gpio-core platform:apollolake-pinctrl");
diff --git a/drivers/leds/simple/simatic-ipc-leds-gpio-core.c b/drivers/leds/simple/simatic-ipc-leds-gpio-core.c
index 85003fd7f1aa..9bc5f361a06b 100644
--- a/drivers/leds/simple/simatic-ipc-leds-gpio-core.c
+++ b/drivers/leds/simple/simatic-ipc-leds-gpio-core.c
@@ -102,6 +102,7 @@ out:
}
EXPORT_SYMBOL_GPL(simatic_ipc_leds_gpio_probe);
+MODULE_DESCRIPTION("Siemens SIMATIC IPC core driver for GPIO based LEDs");
MODULE_LICENSE("GPL v2");
MODULE_SOFTDEP("pre: platform:leds-gpio");
MODULE_AUTHOR("Henning Schild <henning.schild@siemens.com>");
diff --git a/drivers/leds/simple/simatic-ipc-leds-gpio-elkhartlake.c b/drivers/leds/simple/simatic-ipc-leds-gpio-elkhartlake.c
index 4a53d4dbf52f..3fec96c549c1 100644
--- a/drivers/leds/simple/simatic-ipc-leds-gpio-elkhartlake.c
+++ b/drivers/leds/simple/simatic-ipc-leds-gpio-elkhartlake.c
@@ -50,6 +50,7 @@ static struct platform_driver simatic_ipc_led_gpio_elkhartlake_driver = {
};
module_platform_driver(simatic_ipc_led_gpio_elkhartlake_driver);
+MODULE_DESCRIPTION("LED driver for Siemens Simatic IPCs based on Intel Elkhart Lake GPIO");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:" KBUILD_MODNAME);
MODULE_SOFTDEP("pre: simatic-ipc-leds-gpio-core platform:elkhartlake-pinctrl");
diff --git a/drivers/leds/simple/simatic-ipc-leds-gpio-f7188x.c b/drivers/leds/simple/simatic-ipc-leds-gpio-f7188x.c
index 7a5018639aaf..a1f10952513c 100644
--- a/drivers/leds/simple/simatic-ipc-leds-gpio-f7188x.c
+++ b/drivers/leds/simple/simatic-ipc-leds-gpio-f7188x.c
@@ -100,6 +100,7 @@ static struct platform_driver simatic_ipc_led_gpio_driver = {
};
module_platform_driver(simatic_ipc_led_gpio_driver);
+MODULE_DESCRIPTION("LED driver for Siemens Simatic IPCs based on Nuvoton GPIO");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:" KBUILD_MODNAME);
MODULE_SOFTDEP("pre: simatic-ipc-leds-gpio-core gpio_f7188x");
diff --git a/drivers/leds/simple/simatic-ipc-leds.c b/drivers/leds/simple/simatic-ipc-leds.c
index 2124f6d09930..348679f0d1b7 100644
--- a/drivers/leds/simple/simatic-ipc-leds.c
+++ b/drivers/leds/simple/simatic-ipc-leds.c
@@ -128,6 +128,7 @@ static struct platform_driver simatic_ipc_led_driver = {
};
module_platform_driver(simatic_ipc_led_driver);
+MODULE_DESCRIPTION("LED driver for Siemens Simatic IPCs");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:" KBUILD_MODNAME);
MODULE_AUTHOR("Henning Schild <henning.schild@siemens.com>");
diff --git a/drivers/leds/trigger/Kconfig b/drivers/leds/trigger/Kconfig
index 31576952e181..c11282a74b5a 100644
--- a/drivers/leds/trigger/Kconfig
+++ b/drivers/leds/trigger/Kconfig
@@ -145,4 +145,20 @@ config LEDS_TRIGGER_TTY
When build as a module this driver will be called ledtrig-tty.
+config LEDS_TRIGGER_INPUT_EVENTS
+ tristate "LED Input events trigger"
+ depends on INPUT
+ help
+ Turn LEDs on when there is input (/dev/input/event*) activity and turn
+ them back off again after there has been no activity for 5 seconds.
+
+ This is primarily intended to control LEDs which are a backlight for
+ capacitive touch-buttons, such as e.g. the menu / home / back buttons
+ found on the bottom bezel of many older smartphones and tablets.
+
+ This can also be used to turn on the keyboard backlight LED on
+ input events and turn the keyboard backlight off again when idle.
+
+ When build as a module this driver will be called ledtrig-input-events.
+
endif # LEDS_TRIGGERS
diff --git a/drivers/leds/trigger/Makefile b/drivers/leds/trigger/Makefile
index 242f6c4e3453..3b3628889f68 100644
--- a/drivers/leds/trigger/Makefile
+++ b/drivers/leds/trigger/Makefile
@@ -15,3 +15,4 @@ obj-$(CONFIG_LEDS_TRIGGER_PANIC) += ledtrig-panic.o
obj-$(CONFIG_LEDS_TRIGGER_NETDEV) += ledtrig-netdev.o
obj-$(CONFIG_LEDS_TRIGGER_PATTERN) += ledtrig-pattern.o
obj-$(CONFIG_LEDS_TRIGGER_TTY) += ledtrig-tty.o
+obj-$(CONFIG_LEDS_TRIGGER_INPUT_EVENTS) += ledtrig-input-events.o
diff --git a/drivers/leds/trigger/ledtrig-input-events.c b/drivers/leds/trigger/ledtrig-input-events.c
new file mode 100644
index 000000000000..1c79731562c2
--- /dev/null
+++ b/drivers/leds/trigger/ledtrig-input-events.c
@@ -0,0 +1,165 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Input Events LED trigger
+ *
+ * Copyright (C) 2024 Hans de Goede <hansg@kernel.org>
+ */
+
+#include <linux/input.h>
+#include <linux/jiffies.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include "../leds.h"
+
+static unsigned long led_off_delay_ms = 5000;
+module_param(led_off_delay_ms, ulong, 0644);
+MODULE_PARM_DESC(led_off_delay_ms,
+ "Specify delay in ms for turning LEDs off after last input event");
+
+static struct input_events_data {
+ struct delayed_work work;
+ spinlock_t lock;
+ /* To avoid repeatedly setting the brightness while there are events */
+ bool led_on;
+ unsigned long led_off_time;
+} input_events_data;
+
+static struct led_trigger *input_events_led_trigger;
+
+static void led_input_events_work(struct work_struct *work)
+{
+ struct input_events_data *data =
+ container_of(work, struct input_events_data, work.work);
+
+ spin_lock_irq(&data->lock);
+
+ /*
+ * This time_after_eq() check avoids a race where this work starts
+ * running before a new event pushed led_off_time back.
+ */
+ if (time_after_eq(jiffies, data->led_off_time)) {
+ led_trigger_event(input_events_led_trigger, LED_OFF);
+ data->led_on = false;
+ }
+
+ spin_unlock_irq(&data->lock);
+}
+
+static void input_events_event(struct input_handle *handle, unsigned int type,
+ unsigned int code, int val)
+{
+ struct input_events_data *data = &input_events_data;
+ unsigned long led_off_delay = msecs_to_jiffies(led_off_delay_ms);
+ unsigned long flags;
+
+ spin_lock_irqsave(&data->lock, flags);
+
+ if (!data->led_on) {
+ led_trigger_event(input_events_led_trigger, LED_FULL);
+ data->led_on = true;
+ }
+ data->led_off_time = jiffies + led_off_delay;
+
+ spin_unlock_irqrestore(&data->lock, flags);
+
+ mod_delayed_work(system_wq, &data->work, led_off_delay);
+}
+
+static int input_events_connect(struct input_handler *handler, struct input_dev *dev,
+ const struct input_device_id *id)
+{
+ struct input_handle *handle;
+ int ret;
+
+ handle = kzalloc(sizeof(*handle), GFP_KERNEL);
+ if (!handle)
+ return -ENOMEM;
+
+ handle->dev = dev;
+ handle->handler = handler;
+ handle->name = KBUILD_MODNAME;
+
+ ret = input_register_handle(handle);
+ if (ret)
+ goto err_free_handle;
+
+ ret = input_open_device(handle);
+ if (ret)
+ goto err_unregister_handle;
+
+ return 0;
+
+err_unregister_handle:
+ input_unregister_handle(handle);
+err_free_handle:
+ kfree(handle);
+ return ret;
+}
+
+static void input_events_disconnect(struct input_handle *handle)
+{
+ input_close_device(handle);
+ input_unregister_handle(handle);
+ kfree(handle);
+}
+
+static const struct input_device_id input_events_ids[] = {
+ {
+ .flags = INPUT_DEVICE_ID_MATCH_EVBIT,
+ .evbit = { BIT_MASK(EV_KEY) },
+ },
+ {
+ .flags = INPUT_DEVICE_ID_MATCH_EVBIT,
+ .evbit = { BIT_MASK(EV_REL) },
+ },
+ {
+ .flags = INPUT_DEVICE_ID_MATCH_EVBIT,
+ .evbit = { BIT_MASK(EV_ABS) },
+ },
+ { }
+};
+
+static struct input_handler input_events_handler = {
+ .name = KBUILD_MODNAME,
+ .event = input_events_event,
+ .connect = input_events_connect,
+ .disconnect = input_events_disconnect,
+ .id_table = input_events_ids,
+};
+
+static int __init input_events_init(void)
+{
+ int ret;
+
+ INIT_DELAYED_WORK(&input_events_data.work, led_input_events_work);
+ spin_lock_init(&input_events_data.lock);
+
+ led_trigger_register_simple("input-events", &input_events_led_trigger);
+
+ ret = input_register_handler(&input_events_handler);
+ if (ret) {
+ led_trigger_unregister_simple(input_events_led_trigger);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void __exit input_events_exit(void)
+{
+ input_unregister_handler(&input_events_handler);
+ cancel_delayed_work_sync(&input_events_data.work);
+ led_trigger_unregister_simple(input_events_led_trigger);
+}
+
+module_init(input_events_init);
+module_exit(input_events_exit);
+
+MODULE_AUTHOR("Hans de Goede <hansg@kernel.org>");
+MODULE_DESCRIPTION("Input Events LED trigger");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("ledtrig:input-events");
diff --git a/drivers/leds/trigger/ledtrig-timer.c b/drivers/leds/trigger/ledtrig-timer.c
index b4688d1d9d2b..1d213c999d40 100644
--- a/drivers/leds/trigger/ledtrig-timer.c
+++ b/drivers/leds/trigger/ledtrig-timer.c
@@ -110,11 +110,6 @@ static int timer_trig_activate(struct led_classdev *led_cdev)
led_cdev->flags &= ~LED_INIT_DEFAULT_TRIGGER;
}
- /*
- * If "set brightness to 0" is pending in workqueue, we don't
- * want that to be reordered after blink_set()
- */
- flush_work(&led_cdev->set_brightness_work);
led_blink_set(led_cdev, &led_cdev->blink_delay_on,
&led_cdev->blink_delay_off);
diff --git a/drivers/mailbox/zynqmp-ipi-mailbox.c b/drivers/mailbox/zynqmp-ipi-mailbox.c
index 7c90bac3de21..4acf5612487c 100644
--- a/drivers/mailbox/zynqmp-ipi-mailbox.c
+++ b/drivers/mailbox/zynqmp-ipi-mailbox.c
@@ -850,7 +850,6 @@ static int xlnx_mbox_init_sgi(struct platform_device *pdev,
return ret;
}
- irq_to_desc(pdata->virq_sgi);
irq_set_status_flags(pdata->virq_sgi, IRQ_PER_CPU);
/* Setup function for the CPU hot-plug cases */
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
index ce13c272c387..48ce750bf70a 100644
--- a/drivers/md/bcache/alloc.c
+++ b/drivers/md/bcache/alloc.c
@@ -129,12 +129,9 @@ static inline bool can_inc_bucket_gen(struct bucket *b)
bool bch_can_invalidate_bucket(struct cache *ca, struct bucket *b)
{
- BUG_ON(!ca->set->gc_mark_valid);
-
- return (!GC_MARK(b) ||
- GC_MARK(b) == GC_MARK_RECLAIMABLE) &&
- !atomic_read(&b->pin) &&
- can_inc_bucket_gen(b);
+ return (ca->set->gc_mark_valid || b->reclaimable_in_gc) &&
+ ((!GC_MARK(b) || GC_MARK(b) == GC_MARK_RECLAIMABLE) &&
+ !atomic_read(&b->pin) && can_inc_bucket_gen(b));
}
void __bch_invalidate_one_bucket(struct cache *ca, struct bucket *b)
@@ -148,6 +145,7 @@ void __bch_invalidate_one_bucket(struct cache *ca, struct bucket *b)
bch_inc_gen(ca, b);
b->prio = INITIAL_PRIO;
atomic_inc(&b->pin);
+ b->reclaimable_in_gc = 0;
}
static void bch_invalidate_one_bucket(struct cache *ca, struct bucket *b)
@@ -352,8 +350,7 @@ static int bch_allocator_thread(void *arg)
*/
retry_invalidate:
- allocator_wait(ca, ca->set->gc_mark_valid &&
- !ca->invalidate_needs_gc);
+ allocator_wait(ca, !ca->invalidate_needs_gc);
invalidate_buckets(ca);
/*
@@ -501,8 +498,8 @@ int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
ca = c->cache;
b = bch_bucket_alloc(ca, reserve, wait);
- if (b == -1)
- goto err;
+ if (b < 0)
+ return -1;
k->ptr[0] = MAKE_PTR(ca->buckets[b].gen,
bucket_to_sector(c, b),
@@ -511,10 +508,6 @@ int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
SET_KEY_PTRS(k, 1);
return 0;
-err:
- bch_bucket_free(c, k);
- bkey_put(c, k);
- return -1;
}
int bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 4e6afa89921f..1d33e40d26ea 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -200,6 +200,7 @@ struct bucket {
uint8_t gen;
uint8_t last_gc; /* Most out of date gen in the btree */
uint16_t gc_mark; /* Bitfield used by GC. See below for field */
+ uint16_t reclaimable_in_gc:1;
};
/*
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index d011a7154d33..4e6ccf2c8a0b 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -1741,18 +1741,20 @@ static void btree_gc_start(struct cache_set *c)
mutex_lock(&c->bucket_lock);
- c->gc_mark_valid = 0;
c->gc_done = ZERO_KEY;
ca = c->cache;
for_each_bucket(b, ca) {
b->last_gc = b->gen;
+ if (bch_can_invalidate_bucket(ca, b))
+ b->reclaimable_in_gc = 1;
if (!atomic_read(&b->pin)) {
SET_GC_MARK(b, 0);
SET_GC_SECTORS_USED(b, 0);
}
}
+ c->gc_mark_valid = 0;
mutex_unlock(&c->bucket_lock);
}
@@ -1809,6 +1811,9 @@ static void bch_btree_gc_finish(struct cache_set *c)
for_each_bucket(b, ca) {
c->need_gc = max(c->need_gc, bucket_gc_gen(b));
+ if (b->reclaimable_in_gc)
+ b->reclaimable_in_gc = 0;
+
if (atomic_read(&b->pin))
continue;
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 83d112bd2b1c..af345dc6fde1 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -369,10 +369,24 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
struct io *i;
if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
- c->gc_stats.in_use > CUTOFF_CACHE_ADD ||
(bio_op(bio) == REQ_OP_DISCARD))
goto skip;
+ if (c->gc_stats.in_use > CUTOFF_CACHE_ADD) {
+ /*
+ * If cached buckets are all clean now, 'true' will be
+ * returned and all requests will bypass the cache device.
+ * Then c->sectors_to_gc has no chance to be negative, and
+ * gc thread won't wake up and caching won't work forever.
+ * Here call force_wake_up_gc() to avoid such aftermath.
+ */
+ if (BDEV_STATE(&dc->sb) == BDEV_STATE_CLEAN &&
+ c->gc_mark_valid)
+ force_wake_up_gc(c);
+
+ goto skip;
+ }
+
if (mode == CACHE_MODE_NONE ||
(mode == CACHE_MODE_WRITEAROUND &&
op_is_write(bio_op(bio))))
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 4d11fc664cb0..b5d6ef430b86 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -897,7 +897,6 @@ static int bcache_device_init(struct bcache_device *d, unsigned int block_size,
sector_t sectors, struct block_device *cached_bdev,
const struct block_device_operations *ops)
{
- struct request_queue *q;
const size_t max_stripes = min_t(size_t, INT_MAX,
SIZE_MAX / sizeof(atomic_t));
struct queue_limits lim = {
@@ -909,6 +908,7 @@ static int bcache_device_init(struct bcache_device *d, unsigned int block_size,
.io_min = block_size,
.logical_block_size = block_size,
.physical_block_size = block_size,
+ .features = BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA,
};
uint64_t n;
int idx;
@@ -974,13 +974,6 @@ static int bcache_device_init(struct bcache_device *d, unsigned int block_size,
d->disk->minors = BCACHE_MINORS;
d->disk->fops = ops;
d->disk->private_data = d;
-
- q = d->disk->queue;
-
- blk_queue_flag_set(QUEUE_FLAG_NONROT, d->disk->queue);
-
- blk_queue_write_cache(q, true, true);
-
return 0;
out_bioset_exit:
@@ -1423,8 +1416,8 @@ static int cached_dev_init(struct cached_dev *dc, unsigned int block_size)
}
if (bdev_io_opt(dc->bdev))
- dc->partial_stripes_expensive =
- q->limits.raid_partial_stripes_expensive;
+ dc->partial_stripes_expensive = !!(q->limits.features &
+ BLK_FEAT_RAID_PARTIAL_STRIPES_EXPENSIVE);
ret = bcache_device_init(&dc->disk, block_size,
bdev_nr_sectors(dc->bdev) - dc->sb.data_offset,
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 16884b585053..2d8dd9283ff4 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -3403,7 +3403,6 @@ static void set_discard_limits(struct cache *cache, struct queue_limits *limits)
limits->max_hw_discard_sectors = origin_limits->max_hw_discard_sectors;
limits->discard_granularity = origin_limits->discard_granularity;
limits->discard_alignment = origin_limits->discard_alignment;
- limits->discard_misaligned = origin_limits->discard_misaligned;
}
static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits)
diff --git a/drivers/md/dm-clone-target.c b/drivers/md/dm-clone-target.c
index ad79b52ffc14..b4384a8b13e3 100644
--- a/drivers/md/dm-clone-target.c
+++ b/drivers/md/dm-clone-target.c
@@ -2059,7 +2059,6 @@ static void set_discard_limits(struct clone *clone, struct queue_limits *limits)
limits->max_hw_discard_sectors = dest_limits->max_hw_discard_sectors;
limits->discard_granularity = dest_limits->discard_granularity;
limits->discard_alignment = dest_limits->discard_alignment;
- limits->discard_misaligned = dest_limits->discard_misaligned;
limits->max_discard_segments = dest_limits->max_discard_segments;
}
diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
index 08700bfc3e23..14a44c0f8286 100644
--- a/drivers/md/dm-core.h
+++ b/drivers/md/dm-core.h
@@ -206,7 +206,6 @@ struct dm_table {
bool integrity_supported:1;
bool singleton:1;
- unsigned integrity_added:1;
/*
* Indicates the rw permissions for the new logical device. This
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 1b7a97cc3779..6c013ceb0e5f 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -1176,8 +1176,8 @@ static int crypt_integrity_ctr(struct crypt_config *cc, struct dm_target *ti)
struct blk_integrity *bi = blk_get_integrity(cc->dev->bdev->bd_disk);
struct mapped_device *md = dm_table_get_md(ti->table);
- /* From now we require underlying device with our integrity profile */
- if (!bi || strcasecmp(bi->profile->name, "DM-DIF-EXT-TAG")) {
+ /* We require an underlying device with non-PI metadata */
+ if (!bi || bi->csum_type != BLK_INTEGRITY_CSUM_NONE) {
ti->error = "Integrity profile not supported.";
return -EINVAL;
}
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index 417fddebe367..2a89f8eb4713 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -350,25 +350,6 @@ static struct kmem_cache *journal_io_cache;
#define DEBUG_bytes(bytes, len, msg, ...) do { } while (0)
#endif
-static void dm_integrity_prepare(struct request *rq)
-{
-}
-
-static void dm_integrity_complete(struct request *rq, unsigned int nr_bytes)
-{
-}
-
-/*
- * DM Integrity profile, protection is performed layer above (dm-crypt)
- */
-static const struct blk_integrity_profile dm_integrity_profile = {
- .name = "DM-DIF-EXT-TAG",
- .generate_fn = NULL,
- .verify_fn = NULL,
- .prepare_fn = dm_integrity_prepare,
- .complete_fn = dm_integrity_complete,
-};
-
static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map);
static void integrity_bio_wait(struct work_struct *w);
static void dm_integrity_dtr(struct dm_target *ti);
@@ -3494,6 +3475,17 @@ static void dm_integrity_io_hints(struct dm_target *ti, struct queue_limits *lim
limits->dma_alignment = limits->logical_block_size - 1;
limits->discard_granularity = ic->sectors_per_block << SECTOR_SHIFT;
}
+
+ if (!ic->internal_hash) {
+ struct blk_integrity *bi = &limits->integrity;
+
+ memset(bi, 0, sizeof(*bi));
+ bi->tuple_size = ic->tag_size;
+ bi->tag_size = bi->tuple_size;
+ bi->interval_exp =
+ ic->sb->log2_sectors_per_block + SECTOR_SHIFT;
+ }
+
limits->max_integrity_segments = USHRT_MAX;
}
@@ -3650,20 +3642,6 @@ try_smaller_buffer:
return 0;
}
-static void dm_integrity_set(struct dm_target *ti, struct dm_integrity_c *ic)
-{
- struct gendisk *disk = dm_disk(dm_table_get_md(ti->table));
- struct blk_integrity bi;
-
- memset(&bi, 0, sizeof(bi));
- bi.profile = &dm_integrity_profile;
- bi.tuple_size = ic->tag_size;
- bi.tag_size = bi.tuple_size;
- bi.interval_exp = ic->sb->log2_sectors_per_block + SECTOR_SHIFT;
-
- blk_integrity_register(disk, &bi);
-}
-
static void dm_integrity_free_page_list(struct page_list *pl)
{
unsigned int i;
@@ -4649,9 +4627,6 @@ try_smaller_buffer:
}
}
- if (!ic->internal_hash)
- dm_integrity_set(ti, ic);
-
ti->num_flush_bios = 1;
ti->flush_supported = true;
if (ic->discard)
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index abe88d1e6735..052c00c1eb15 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -3542,7 +3542,7 @@ static void raid_status(struct dm_target *ti, status_type_t type,
recovery = rs->md.recovery;
state = decipher_sync_action(mddev, recovery);
progress = rs_get_progress(rs, recovery, state, resync_max_sectors);
- resync_mismatches = (mddev->last_sync_action && !strcasecmp(mddev->last_sync_action, "check")) ?
+ resync_mismatches = mddev->last_sync_action == ACTION_CHECK ?
atomic64_read(&mddev->resync_mismatches) : 0;
/* HM FIXME: do we want another state char for raid0? It shows 'D'/'A'/'-' now */
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index cc66a27c363a..92d18dcdb3e9 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -425,6 +425,13 @@ static int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
q->limits.logical_block_size,
q->limits.alignment_offset,
(unsigned long long) start << SECTOR_SHIFT);
+
+ /*
+ * Only stack the integrity profile if the target doesn't have native
+ * integrity support.
+ */
+ if (!dm_target_has_integrity(ti->type))
+ queue_limits_stack_integrity_bdev(limits, bdev);
return 0;
}
@@ -572,6 +579,12 @@ int dm_split_args(int *argc, char ***argvp, char *input)
return 0;
}
+static void dm_set_stacking_limits(struct queue_limits *limits)
+{
+ blk_set_stacking_limits(limits);
+ limits->features |= BLK_FEAT_IO_STAT | BLK_FEAT_NOWAIT | BLK_FEAT_POLL;
+}
+
/*
* Impose necessary and sufficient conditions on a devices's table such
* that any incoming bio which respects its logical_block_size can be
@@ -610,7 +623,7 @@ static int validate_hardware_logical_block_alignment(struct dm_table *t,
for (i = 0; i < t->num_targets; i++) {
ti = dm_table_get_target(t, i);
- blk_set_stacking_limits(&ti_limits);
+ dm_set_stacking_limits(&ti_limits);
/* combine all target devices' limits */
if (ti->type->iterate_devices)
@@ -702,9 +715,6 @@ int dm_table_add_target(struct dm_table *t, const char *type,
t->immutable_target_type = ti->type;
}
- if (dm_target_has_integrity(ti->type))
- t->integrity_added = 1;
-
ti->table = t;
ti->begin = start;
ti->len = len;
@@ -1014,14 +1024,13 @@ bool dm_table_request_based(struct dm_table *t)
return __table_type_request_based(dm_table_get_type(t));
}
-static bool dm_table_supports_poll(struct dm_table *t);
-
static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *md)
{
enum dm_queue_mode type = dm_table_get_type(t);
unsigned int per_io_data_size = 0, front_pad, io_front_pad;
unsigned int min_pool_size = 0, pool_size;
struct dm_md_mempools *pools;
+ unsigned int bioset_flags = 0;
if (unlikely(type == DM_TYPE_NONE)) {
DMERR("no table type is set, can't allocate mempools");
@@ -1038,6 +1047,9 @@ static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *
goto init_bs;
}
+ if (md->queue->limits.features & BLK_FEAT_POLL)
+ bioset_flags |= BIOSET_PERCPU_CACHE;
+
for (unsigned int i = 0; i < t->num_targets; i++) {
struct dm_target *ti = dm_table_get_target(t, i);
@@ -1050,8 +1062,7 @@ static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *
io_front_pad = roundup(per_io_data_size,
__alignof__(struct dm_io)) + DM_IO_BIO_OFFSET;
- if (bioset_init(&pools->io_bs, pool_size, io_front_pad,
- dm_table_supports_poll(t) ? BIOSET_PERCPU_CACHE : 0))
+ if (bioset_init(&pools->io_bs, pool_size, io_front_pad, bioset_flags))
goto out_free_pools;
if (t->integrity_supported &&
bioset_integrity_create(&pools->io_bs, pool_size))
@@ -1119,99 +1130,6 @@ static int dm_table_build_index(struct dm_table *t)
return r;
}
-static bool integrity_profile_exists(struct gendisk *disk)
-{
- return !!blk_get_integrity(disk);
-}
-
-/*
- * Get a disk whose integrity profile reflects the table's profile.
- * Returns NULL if integrity support was inconsistent or unavailable.
- */
-static struct gendisk *dm_table_get_integrity_disk(struct dm_table *t)
-{
- struct list_head *devices = dm_table_get_devices(t);
- struct dm_dev_internal *dd = NULL;
- struct gendisk *prev_disk = NULL, *template_disk = NULL;
-
- for (unsigned int i = 0; i < t->num_targets; i++) {
- struct dm_target *ti = dm_table_get_target(t, i);
-
- if (!dm_target_passes_integrity(ti->type))
- goto no_integrity;
- }
-
- list_for_each_entry(dd, devices, list) {
- template_disk = dd->dm_dev->bdev->bd_disk;
- if (!integrity_profile_exists(template_disk))
- goto no_integrity;
- else if (prev_disk &&
- blk_integrity_compare(prev_disk, template_disk) < 0)
- goto no_integrity;
- prev_disk = template_disk;
- }
-
- return template_disk;
-
-no_integrity:
- if (prev_disk)
- DMWARN("%s: integrity not set: %s and %s profile mismatch",
- dm_device_name(t->md),
- prev_disk->disk_name,
- template_disk->disk_name);
- return NULL;
-}
-
-/*
- * Register the mapped device for blk_integrity support if the
- * underlying devices have an integrity profile. But all devices may
- * not have matching profiles (checking all devices isn't reliable
- * during table load because this table may use other DM device(s) which
- * must be resumed before they will have an initialized integity
- * profile). Consequently, stacked DM devices force a 2 stage integrity
- * profile validation: First pass during table load, final pass during
- * resume.
- */
-static int dm_table_register_integrity(struct dm_table *t)
-{
- struct mapped_device *md = t->md;
- struct gendisk *template_disk = NULL;
-
- /* If target handles integrity itself do not register it here. */
- if (t->integrity_added)
- return 0;
-
- template_disk = dm_table_get_integrity_disk(t);
- if (!template_disk)
- return 0;
-
- if (!integrity_profile_exists(dm_disk(md))) {
- t->integrity_supported = true;
- /*
- * Register integrity profile during table load; we can do
- * this because the final profile must match during resume.
- */
- blk_integrity_register(dm_disk(md),
- blk_get_integrity(template_disk));
- return 0;
- }
-
- /*
- * If DM device already has an initialized integrity
- * profile the new profile should not conflict.
- */
- if (blk_integrity_compare(dm_disk(md), template_disk) < 0) {
- DMERR("%s: conflict with existing integrity profile: %s profile mismatch",
- dm_device_name(t->md),
- template_disk->disk_name);
- return 1;
- }
-
- /* Preserve existing integrity profile */
- t->integrity_supported = true;
- return 0;
-}
-
#ifdef CONFIG_BLK_INLINE_ENCRYPTION
struct dm_crypto_profile {
@@ -1423,12 +1341,6 @@ int dm_table_complete(struct dm_table *t)
return r;
}
- r = dm_table_register_integrity(t);
- if (r) {
- DMERR("could not register integrity profile.");
- return r;
- }
-
r = dm_table_construct_crypto_profile(t);
if (r) {
DMERR("could not construct crypto profile.");
@@ -1493,14 +1405,6 @@ struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
return &t->targets[(KEYS_PER_NODE * n) + k];
}
-static int device_not_poll_capable(struct dm_target *ti, struct dm_dev *dev,
- sector_t start, sector_t len, void *data)
-{
- struct request_queue *q = bdev_get_queue(dev->bdev);
-
- return !test_bit(QUEUE_FLAG_POLL, &q->queue_flags);
-}
-
/*
* type->iterate_devices() should be called when the sanity check needs to
* iterate and check all underlying data devices. iterate_devices() will
@@ -1548,19 +1452,6 @@ static int count_device(struct dm_target *ti, struct dm_dev *dev,
return 0;
}
-static bool dm_table_supports_poll(struct dm_table *t)
-{
- for (unsigned int i = 0; i < t->num_targets; i++) {
- struct dm_target *ti = dm_table_get_target(t, i);
-
- if (!ti->type->iterate_devices ||
- ti->type->iterate_devices(ti, device_not_poll_capable, NULL))
- return false;
- }
-
- return true;
-}
-
/*
* Check whether a table has no data devices attached using each
* target's iterate_devices method.
@@ -1686,12 +1577,20 @@ int dm_calculate_queue_limits(struct dm_table *t,
unsigned int zone_sectors = 0;
bool zoned = false;
- blk_set_stacking_limits(limits);
+ dm_set_stacking_limits(limits);
+
+ t->integrity_supported = true;
+ for (unsigned int i = 0; i < t->num_targets; i++) {
+ struct dm_target *ti = dm_table_get_target(t, i);
+
+ if (!dm_target_passes_integrity(ti->type))
+ t->integrity_supported = false;
+ }
for (unsigned int i = 0; i < t->num_targets; i++) {
struct dm_target *ti = dm_table_get_target(t, i);
- blk_set_stacking_limits(&ti_limits);
+ dm_set_stacking_limits(&ti_limits);
if (!ti->type->iterate_devices) {
/* Set I/O hints portion of queue limits */
@@ -1706,12 +1605,12 @@ int dm_calculate_queue_limits(struct dm_table *t,
ti->type->iterate_devices(ti, dm_set_device_limits,
&ti_limits);
- if (!zoned && ti_limits.zoned) {
+ if (!zoned && (ti_limits.features & BLK_FEAT_ZONED)) {
/*
* After stacking all limits, validate all devices
* in table support this zoned model and zone sectors.
*/
- zoned = ti_limits.zoned;
+ zoned = (ti_limits.features & BLK_FEAT_ZONED);
zone_sectors = ti_limits.chunk_sectors;
}
@@ -1738,6 +1637,18 @@ combine_limits:
dm_device_name(t->md),
(unsigned long long) ti->begin,
(unsigned long long) ti->len);
+
+ if (t->integrity_supported ||
+ dm_target_has_integrity(ti->type)) {
+ if (!queue_limits_stack_integrity(limits, &ti_limits)) {
+ DMWARN("%s: adding target device (start sect %llu len %llu) "
+ "disabled integrity support due to incompatibility",
+ dm_device_name(t->md),
+ (unsigned long long) ti->begin,
+ (unsigned long long) ti->len);
+ t->integrity_supported = false;
+ }
+ }
}
/*
@@ -1747,12 +1658,12 @@ combine_limits:
* zoned model on host-managed zoned block devices.
* BUT...
*/
- if (limits->zoned) {
+ if (limits->features & BLK_FEAT_ZONED) {
/*
* ...IF the above limits stacking determined a zoned model
* validate that all of the table's devices conform to it.
*/
- zoned = limits->zoned;
+ zoned = limits->features & BLK_FEAT_ZONED;
zone_sectors = limits->chunk_sectors;
}
if (validate_hardware_zoned(t, zoned, zone_sectors))
@@ -1762,63 +1673,15 @@ combine_limits:
}
/*
- * Verify that all devices have an integrity profile that matches the
- * DM device's registered integrity profile. If the profiles don't
- * match then unregister the DM device's integrity profile.
+ * Check if a target requires flush support even if none of the underlying
+ * devices need it (e.g. to persist target-specific metadata).
*/
-static void dm_table_verify_integrity(struct dm_table *t)
-{
- struct gendisk *template_disk = NULL;
-
- if (t->integrity_added)
- return;
-
- if (t->integrity_supported) {
- /*
- * Verify that the original integrity profile
- * matches all the devices in this table.
- */
- template_disk = dm_table_get_integrity_disk(t);
- if (template_disk &&
- blk_integrity_compare(dm_disk(t->md), template_disk) >= 0)
- return;
- }
-
- if (integrity_profile_exists(dm_disk(t->md))) {
- DMWARN("%s: unable to establish an integrity profile",
- dm_device_name(t->md));
- blk_integrity_unregister(dm_disk(t->md));
- }
-}
-
-static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev,
- sector_t start, sector_t len, void *data)
-{
- unsigned long flush = (unsigned long) data;
- struct request_queue *q = bdev_get_queue(dev->bdev);
-
- return (q->queue_flags & flush);
-}
-
-static bool dm_table_supports_flush(struct dm_table *t, unsigned long flush)
+static bool dm_table_supports_flush(struct dm_table *t)
{
- /*
- * Require at least one underlying device to support flushes.
- * t->devices includes internal dm devices such as mirror logs
- * so we need to use iterate_devices here, which targets
- * supporting flushes must provide.
- */
for (unsigned int i = 0; i < t->num_targets; i++) {
struct dm_target *ti = dm_table_get_target(t, i);
- if (!ti->num_flush_bios)
- continue;
-
- if (ti->flush_supported)
- return true;
-
- if (ti->type->iterate_devices &&
- ti->type->iterate_devices(ti, device_flush_capable, (void *) flush))
+ if (ti->num_flush_bios && ti->flush_supported)
return true;
}
@@ -1839,20 +1702,6 @@ static int device_dax_write_cache_enabled(struct dm_target *ti,
return false;
}
-static int device_is_rotational(struct dm_target *ti, struct dm_dev *dev,
- sector_t start, sector_t len, void *data)
-{
- return !bdev_nonrot(dev->bdev);
-}
-
-static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev,
- sector_t start, sector_t len, void *data)
-{
- struct request_queue *q = bdev_get_queue(dev->bdev);
-
- return !blk_queue_add_random(q);
-}
-
static int device_not_write_zeroes_capable(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
@@ -1877,12 +1726,6 @@ static bool dm_table_supports_write_zeroes(struct dm_table *t)
return true;
}
-static int device_not_nowait_capable(struct dm_target *ti, struct dm_dev *dev,
- sector_t start, sector_t len, void *data)
-{
- return !bdev_nowait(dev->bdev);
-}
-
static bool dm_table_supports_nowait(struct dm_table *t)
{
for (unsigned int i = 0; i < t->num_targets; i++) {
@@ -1890,10 +1733,6 @@ static bool dm_table_supports_nowait(struct dm_table *t)
if (!dm_target_supports_nowait(ti->type))
return false;
-
- if (!ti->type->iterate_devices ||
- ti->type->iterate_devices(ti, device_not_nowait_capable, NULL))
- return false;
}
return true;
@@ -1950,29 +1789,25 @@ static bool dm_table_supports_secure_erase(struct dm_table *t)
return true;
}
-static int device_requires_stable_pages(struct dm_target *ti,
- struct dm_dev *dev, sector_t start,
- sector_t len, void *data)
-{
- return bdev_stable_writes(dev->bdev);
-}
-
int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
struct queue_limits *limits)
{
- bool wc = false, fua = false;
int r;
- if (dm_table_supports_nowait(t))
- blk_queue_flag_set(QUEUE_FLAG_NOWAIT, q);
- else
- blk_queue_flag_clear(QUEUE_FLAG_NOWAIT, q);
+ if (!dm_table_supports_nowait(t))
+ limits->features &= ~BLK_FEAT_NOWAIT;
+
+ /*
+ * The current polling impementation does not support request based
+ * stacking.
+ */
+ if (!__table_type_bio_based(t->type))
+ limits->features &= ~BLK_FEAT_POLL;
if (!dm_table_supports_discards(t)) {
limits->max_hw_discard_sectors = 0;
limits->discard_granularity = 0;
limits->discard_alignment = 0;
- limits->discard_misaligned = 0;
}
if (!dm_table_supports_write_zeroes(t))
@@ -1981,86 +1816,43 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
if (!dm_table_supports_secure_erase(t))
limits->max_secure_erase_sectors = 0;
- r = queue_limits_set(q, limits);
- if (r)
- return r;
-
- if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_WC))) {
- wc = true;
- if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_FUA)))
- fua = true;
- }
- blk_queue_write_cache(q, wc, fua);
+ if (dm_table_supports_flush(t))
+ limits->features |= BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA;
if (dm_table_supports_dax(t, device_not_dax_capable)) {
- blk_queue_flag_set(QUEUE_FLAG_DAX, q);
+ limits->features |= BLK_FEAT_DAX;
if (dm_table_supports_dax(t, device_not_dax_synchronous_capable))
set_dax_synchronous(t->md->dax_dev);
} else
- blk_queue_flag_clear(QUEUE_FLAG_DAX, q);
+ limits->features &= ~BLK_FEAT_DAX;
if (dm_table_any_dev_attr(t, device_dax_write_cache_enabled, NULL))
dax_write_cache(t->md->dax_dev, true);
- /* Ensure that all underlying devices are non-rotational. */
- if (dm_table_any_dev_attr(t, device_is_rotational, NULL))
- blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
- else
- blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
-
- dm_table_verify_integrity(t);
-
- /*
- * Some devices don't use blk_integrity but still want stable pages
- * because they do their own checksumming.
- * If any underlying device requires stable pages, a table must require
- * them as well. Only targets that support iterate_devices are considered:
- * don't want error, zero, etc to require stable pages.
- */
- if (dm_table_any_dev_attr(t, device_requires_stable_pages, NULL))
- blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, q);
- else
- blk_queue_flag_clear(QUEUE_FLAG_STABLE_WRITES, q);
-
- /*
- * Determine whether or not this queue's I/O timings contribute
- * to the entropy pool, Only request-based targets use this.
- * Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not
- * have it set.
- */
- if (blk_queue_add_random(q) &&
- dm_table_any_dev_attr(t, device_is_not_random, NULL))
- blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
-
- /*
- * For a zoned target, setup the zones related queue attributes
- * and resources necessary for zone append emulation if necessary.
- */
- if (blk_queue_is_zoned(q)) {
- r = dm_set_zones_restrictions(t, q);
+ /* For a zoned table, setup the zone related queue attributes. */
+ if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
+ (limits->features & BLK_FEAT_ZONED)) {
+ r = dm_set_zones_restrictions(t, q, limits);
if (r)
return r;
- if (blk_queue_is_zoned(q) &&
- !static_key_enabled(&zoned_enabled.key))
- static_branch_enable(&zoned_enabled);
}
- dm_update_crypto_profile(q, t);
+ r = queue_limits_set(q, limits);
+ if (r)
+ return r;
/*
- * Check for request-based device is left to
- * dm_mq_init_request_queue()->blk_mq_init_allocated_queue().
- *
- * For bio-based device, only set QUEUE_FLAG_POLL when all
- * underlying devices supporting polling.
+ * Now that the limits are set, check the zones mapped by the table
+ * and setup the resources for zone append emulation if necessary.
*/
- if (__table_type_bio_based(t->type)) {
- if (dm_table_supports_poll(t))
- blk_queue_flag_set(QUEUE_FLAG_POLL, q);
- else
- blk_queue_flag_clear(QUEUE_FLAG_POLL, q);
+ if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
+ (limits->features & BLK_FEAT_ZONED)) {
+ r = dm_revalidate_zones(t, q);
+ if (r)
+ return r;
}
+ dm_update_crypto_profile(q, t);
return 0;
}
diff --git a/drivers/md/dm-vdo/dm-vdo-target.c b/drivers/md/dm-vdo/dm-vdo-target.c
index b423bec6458b..9d51f72a9d66 100644
--- a/drivers/md/dm-vdo/dm-vdo-target.c
+++ b/drivers/md/dm-vdo/dm-vdo-target.c
@@ -945,7 +945,7 @@ static void vdo_io_hints(struct dm_target *ti, struct queue_limits *limits)
* The value is used by dm-thin to determine whether to pass down discards. The block layer
* splits large discards on this boundary when this is set.
*/
- limits->max_discard_sectors =
+ limits->max_hw_discard_sectors =
(vdo->device_config->max_discard_blocks * VDO_SECTORS_PER_BLOCK);
/*
diff --git a/drivers/md/dm-zone.c b/drivers/md/dm-zone.c
index 8e6bcb0d786a..c0d41c36e06e 100644
--- a/drivers/md/dm-zone.c
+++ b/drivers/md/dm-zone.c
@@ -13,8 +13,6 @@
#define DM_MSG_PREFIX "zone"
-#define DM_ZONE_INVALID_WP_OFST UINT_MAX
-
/*
* For internal zone reports bypassing the top BIO submission path.
*/
@@ -146,65 +144,27 @@ bool dm_is_zone_write(struct mapped_device *md, struct bio *bio)
}
/*
- * Count conventional zones of a mapped zoned device. If the device
- * only has conventional zones, do not expose it as zoned.
- */
-static int dm_check_zoned_cb(struct blk_zone *zone, unsigned int idx,
- void *data)
-{
- unsigned int *nr_conv_zones = data;
-
- if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
- (*nr_conv_zones)++;
-
- return 0;
-}
-
-static int dm_check_zoned(struct mapped_device *md, struct dm_table *t)
-{
- struct gendisk *disk = md->disk;
- unsigned int nr_conv_zones = 0;
- int ret;
-
- /* Count conventional zones */
- md->zone_revalidate_map = t;
- ret = dm_blk_report_zones(disk, 0, UINT_MAX,
- dm_check_zoned_cb, &nr_conv_zones);
- md->zone_revalidate_map = NULL;
- if (ret < 0) {
- DMERR("Check zoned failed %d", ret);
- return ret;
- }
-
- /*
- * If we only have conventional zones, expose the mapped device as
- * a regular device.
- */
- if (nr_conv_zones >= ret) {
- disk->queue->limits.max_open_zones = 0;
- disk->queue->limits.max_active_zones = 0;
- disk->queue->limits.zoned = false;
- clear_bit(DMF_EMULATE_ZONE_APPEND, &md->flags);
- disk->nr_zones = 0;
- }
-
- return 0;
-}
-
-/*
* Revalidate the zones of a mapped device to initialize resource necessary
* for zone append emulation. Note that we cannot simply use the block layer
* blk_revalidate_disk_zones() function here as the mapped device is suspended
* (this is called from __bind() context).
*/
-static int dm_revalidate_zones(struct mapped_device *md, struct dm_table *t)
+int dm_revalidate_zones(struct dm_table *t, struct request_queue *q)
{
+ struct mapped_device *md = t->md;
struct gendisk *disk = md->disk;
int ret;
+ if (!get_capacity(disk))
+ return 0;
+
/* Revalidate only if something changed. */
- if (!disk->nr_zones || disk->nr_zones != md->nr_zones)
+ if (!disk->nr_zones || disk->nr_zones != md->nr_zones) {
+ DMINFO("%s using %s zone append",
+ disk->disk_name,
+ queue_emulates_zone_append(q) ? "emulated" : "native");
md->nr_zones = 0;
+ }
if (md->nr_zones)
return 0;
@@ -251,11 +211,130 @@ static bool dm_table_supports_zone_append(struct dm_table *t)
return true;
}
-int dm_set_zones_restrictions(struct dm_table *t, struct request_queue *q)
+struct dm_device_zone_count {
+ sector_t start;
+ sector_t len;
+ unsigned int total_nr_seq_zones;
+ unsigned int target_nr_seq_zones;
+};
+
+/*
+ * Count the total number of and the number of mapped sequential zones of a
+ * target zoned device.
+ */
+static int dm_device_count_zones_cb(struct blk_zone *zone,
+ unsigned int idx, void *data)
+{
+ struct dm_device_zone_count *zc = data;
+
+ if (zone->type != BLK_ZONE_TYPE_CONVENTIONAL) {
+ zc->total_nr_seq_zones++;
+ if (zone->start >= zc->start &&
+ zone->start < zc->start + zc->len)
+ zc->target_nr_seq_zones++;
+ }
+
+ return 0;
+}
+
+static int dm_device_count_zones(struct dm_dev *dev,
+ struct dm_device_zone_count *zc)
{
- struct mapped_device *md = t->md;
int ret;
+ ret = blkdev_report_zones(dev->bdev, 0, BLK_ALL_ZONES,
+ dm_device_count_zones_cb, zc);
+ if (ret < 0)
+ return ret;
+ if (!ret)
+ return -EIO;
+ return 0;
+}
+
+struct dm_zone_resource_limits {
+ unsigned int mapped_nr_seq_zones;
+ struct queue_limits *lim;
+ bool reliable_limits;
+};
+
+static int device_get_zone_resource_limits(struct dm_target *ti,
+ struct dm_dev *dev, sector_t start,
+ sector_t len, void *data)
+{
+ struct dm_zone_resource_limits *zlim = data;
+ struct gendisk *disk = dev->bdev->bd_disk;
+ unsigned int max_open_zones, max_active_zones;
+ int ret;
+ struct dm_device_zone_count zc = {
+ .start = start,
+ .len = len,
+ };
+
+ /*
+ * If the target is not the whole device, the device zone resources may
+ * be shared between different targets. Check this by counting the
+ * number of mapped sequential zones: if this number is smaller than the
+ * total number of sequential zones of the target device, then resource
+ * sharing may happen and the zone limits will not be reliable.
+ */
+ ret = dm_device_count_zones(dev, &zc);
+ if (ret) {
+ DMERR("Count %s zones failed %d", disk->disk_name, ret);
+ return ret;
+ }
+
+ /*
+ * If the target does not map any sequential zones, then we do not need
+ * any zone resource limits.
+ */
+ if (!zc.target_nr_seq_zones)
+ return 0;
+
+ /*
+ * If the target does not map all sequential zones, the limits
+ * will not be reliable and we cannot use REQ_OP_ZONE_RESET_ALL.
+ */
+ if (zc.target_nr_seq_zones < zc.total_nr_seq_zones) {
+ zlim->reliable_limits = false;
+ ti->zone_reset_all_supported = false;
+ }
+
+ /*
+ * If the target maps less sequential zones than the limit values, then
+ * we do not have limits for this target.
+ */
+ max_active_zones = disk->queue->limits.max_active_zones;
+ if (max_active_zones >= zc.target_nr_seq_zones)
+ max_active_zones = 0;
+ zlim->lim->max_active_zones =
+ min_not_zero(max_active_zones, zlim->lim->max_active_zones);
+
+ max_open_zones = disk->queue->limits.max_open_zones;
+ if (max_open_zones >= zc.target_nr_seq_zones)
+ max_open_zones = 0;
+ zlim->lim->max_open_zones =
+ min_not_zero(max_open_zones, zlim->lim->max_open_zones);
+
+ /*
+ * Also count the total number of sequential zones for the mapped
+ * device so that when we are done inspecting all its targets, we are
+ * able to check if the mapped device actually has any sequential zones.
+ */
+ zlim->mapped_nr_seq_zones += zc.target_nr_seq_zones;
+
+ return 0;
+}
+
+int dm_set_zones_restrictions(struct dm_table *t, struct request_queue *q,
+ struct queue_limits *lim)
+{
+ struct mapped_device *md = t->md;
+ struct gendisk *disk = md->disk;
+ struct dm_zone_resource_limits zlim = {
+ .reliable_limits = true,
+ .lim = lim,
+ };
+
/*
* Check if zone append is natively supported, and if not, set the
* mapped device queue as needing zone append emulation.
@@ -265,29 +344,68 @@ int dm_set_zones_restrictions(struct dm_table *t, struct request_queue *q)
clear_bit(DMF_EMULATE_ZONE_APPEND, &md->flags);
} else {
set_bit(DMF_EMULATE_ZONE_APPEND, &md->flags);
- blk_queue_max_zone_append_sectors(q, 0);
+ lim->max_zone_append_sectors = 0;
}
- if (!get_capacity(md->disk))
- return 0;
+ /*
+ * Determine the max open and max active zone limits for the mapped
+ * device by inspecting the zone resource limits and the zones mapped
+ * by each target.
+ */
+ for (unsigned int i = 0; i < t->num_targets; i++) {
+ struct dm_target *ti = dm_table_get_target(t, i);
+
+ /*
+ * Assume that the target can accept REQ_OP_ZONE_RESET_ALL.
+ * device_get_zone_resource_limits() may adjust this if one of
+ * the device used by the target does not have all its
+ * sequential write required zones mapped.
+ */
+ ti->zone_reset_all_supported = true;
+
+ if (!ti->type->iterate_devices ||
+ ti->type->iterate_devices(ti,
+ device_get_zone_resource_limits, &zlim)) {
+ DMERR("Could not determine %s zone resource limits",
+ disk->disk_name);
+ return -ENODEV;
+ }
+ }
/*
- * Check that the mapped device will indeed be zoned, that is, that it
- * has sequential write required zones.
+ * If we only have conventional zones mapped, expose the mapped device
+ + as a regular device.
*/
- ret = dm_check_zoned(md, t);
- if (ret)
- return ret;
- if (!blk_queue_is_zoned(q))
+ if (!zlim.mapped_nr_seq_zones) {
+ lim->max_open_zones = 0;
+ lim->max_active_zones = 0;
+ lim->max_zone_append_sectors = 0;
+ lim->zone_write_granularity = 0;
+ lim->chunk_sectors = 0;
+ lim->features &= ~BLK_FEAT_ZONED;
+ clear_bit(DMF_EMULATE_ZONE_APPEND, &md->flags);
+ md->nr_zones = 0;
+ disk->nr_zones = 0;
return 0;
-
- if (!md->disk->nr_zones) {
- DMINFO("%s using %s zone append",
- md->disk->disk_name,
- queue_emulates_zone_append(q) ? "emulated" : "native");
}
- return dm_revalidate_zones(md, t);
+ /*
+ * Warn once (when the capacity is not yet set) if the mapped device is
+ * partially using zone resources of the target devices as that leads to
+ * unreliable limits, i.e. if another mapped device uses the same
+ * underlying devices, we cannot enforce zone limits to guarantee that
+ * writing will not lead to errors. Note that we really should return
+ * an error for such case but there is no easy way to find out if
+ * another mapped device uses the same underlying zoned devices.
+ */
+ if (!get_capacity(disk) && !zlim.reliable_limits)
+ DMWARN("%s zone resource limits may be unreliable",
+ disk->disk_name);
+
+ if (lim->features & BLK_FEAT_ZONED &&
+ !static_key_enabled(&zoned_enabled.key))
+ static_branch_enable(&zoned_enabled);
+ return 0;
}
/*
@@ -312,3 +430,39 @@ void dm_zone_endio(struct dm_io *io, struct bio *clone)
return;
}
+
+static int dm_zone_need_reset_cb(struct blk_zone *zone, unsigned int idx,
+ void *data)
+{
+ /*
+ * For an all-zones reset, ignore conventional, empty, read-only
+ * and offline zones.
+ */
+ switch (zone->cond) {
+ case BLK_ZONE_COND_NOT_WP:
+ case BLK_ZONE_COND_EMPTY:
+ case BLK_ZONE_COND_READONLY:
+ case BLK_ZONE_COND_OFFLINE:
+ return 0;
+ default:
+ set_bit(idx, (unsigned long *)data);
+ return 0;
+ }
+}
+
+int dm_zone_get_reset_bitmap(struct mapped_device *md, struct dm_table *t,
+ sector_t sector, unsigned int nr_zones,
+ unsigned long *need_reset)
+{
+ int ret;
+
+ ret = dm_blk_do_report_zones(md, t, sector, nr_zones,
+ dm_zone_need_reset_cb, need_reset);
+ if (ret != nr_zones) {
+ DMERR("Get %s zone reset bitmap failed\n",
+ md->disk->disk_name);
+ return -EIO;
+ }
+
+ return 0;
+}
diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
index 12236e6f46f3..cd0ee144973f 100644
--- a/drivers/md/dm-zoned-target.c
+++ b/drivers/md/dm-zoned-target.c
@@ -1009,7 +1009,7 @@ static void dmz_io_hints(struct dm_target *ti, struct queue_limits *limits)
limits->max_sectors = chunk_sectors;
/* We are exposing a drive-managed zoned block device */
- limits->zoned = false;
+ limits->features &= ~BLK_FEAT_ZONED;
}
/*
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 13037d6a6f62..4b1b69e576a5 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1188,7 +1188,7 @@ static sector_t __max_io_len(struct dm_target *ti, sector_t sector,
return len;
return min_t(sector_t, len,
min(max_sectors ? : queue_max_sectors(ti->table->md->queue),
- blk_chunk_sectors_left(target_offset, max_granularity)));
+ blk_boundary_sectors_left(target_offset, max_granularity)));
}
static inline sector_t max_io_len(struct dm_target *ti, sector_t sector)
@@ -1598,20 +1598,19 @@ static void __send_abnormal_io(struct clone_info *ci, struct dm_target *ti,
static bool is_abnormal_io(struct bio *bio)
{
- enum req_op op = bio_op(bio);
-
- if (op != REQ_OP_READ && op != REQ_OP_WRITE && op != REQ_OP_FLUSH) {
- switch (op) {
- case REQ_OP_DISCARD:
- case REQ_OP_SECURE_ERASE:
- case REQ_OP_WRITE_ZEROES:
- return true;
- default:
- break;
- }
+ switch (bio_op(bio)) {
+ case REQ_OP_READ:
+ case REQ_OP_WRITE:
+ case REQ_OP_FLUSH:
+ return false;
+ case REQ_OP_DISCARD:
+ case REQ_OP_SECURE_ERASE:
+ case REQ_OP_WRITE_ZEROES:
+ case REQ_OP_ZONE_RESET_ALL:
+ return true;
+ default:
+ return false;
}
-
- return false;
}
static blk_status_t __process_abnormal_io(struct clone_info *ci,
@@ -1776,6 +1775,119 @@ static inline bool dm_zone_plug_bio(struct mapped_device *md, struct bio *bio)
{
return dm_emulate_zone_append(md) && blk_zone_plug_bio(bio, 0);
}
+
+static blk_status_t __send_zone_reset_all_emulated(struct clone_info *ci,
+ struct dm_target *ti)
+{
+ struct bio_list blist = BIO_EMPTY_LIST;
+ struct mapped_device *md = ci->io->md;
+ unsigned int zone_sectors = md->disk->queue->limits.chunk_sectors;
+ unsigned long *need_reset;
+ unsigned int i, nr_zones, nr_reset;
+ unsigned int num_bios = 0;
+ blk_status_t sts = BLK_STS_OK;
+ sector_t sector = ti->begin;
+ struct bio *clone;
+ int ret;
+
+ nr_zones = ti->len >> ilog2(zone_sectors);
+ need_reset = bitmap_zalloc(nr_zones, GFP_NOIO);
+ if (!need_reset)
+ return BLK_STS_RESOURCE;
+
+ ret = dm_zone_get_reset_bitmap(md, ci->map, ti->begin,
+ nr_zones, need_reset);
+ if (ret) {
+ sts = BLK_STS_IOERR;
+ goto free_bitmap;
+ }
+
+ /* If we have no zone to reset, we are done. */
+ nr_reset = bitmap_weight(need_reset, nr_zones);
+ if (!nr_reset)
+ goto free_bitmap;
+
+ atomic_add(nr_zones, &ci->io->io_count);
+
+ for (i = 0; i < nr_zones; i++) {
+
+ if (!test_bit(i, need_reset)) {
+ sector += zone_sectors;
+ continue;
+ }
+
+ if (bio_list_empty(&blist)) {
+ /* This may take a while, so be nice to others */
+ if (num_bios)
+ cond_resched();
+
+ /*
+ * We may need to reset thousands of zones, so let's
+ * not go crazy with the clone allocation.
+ */
+ alloc_multiple_bios(&blist, ci, ti, min(nr_reset, 32),
+ NULL, GFP_NOIO);
+ }
+
+ /* Get a clone and change it to a regular reset operation. */
+ clone = bio_list_pop(&blist);
+ clone->bi_opf &= ~REQ_OP_MASK;
+ clone->bi_opf |= REQ_OP_ZONE_RESET | REQ_SYNC;
+ clone->bi_iter.bi_sector = sector;
+ clone->bi_iter.bi_size = 0;
+ __map_bio(clone);
+
+ sector += zone_sectors;
+ num_bios++;
+ nr_reset--;
+ }
+
+ WARN_ON_ONCE(!bio_list_empty(&blist));
+ atomic_sub(nr_zones - num_bios, &ci->io->io_count);
+ ci->sector_count = 0;
+
+free_bitmap:
+ bitmap_free(need_reset);
+
+ return sts;
+}
+
+static void __send_zone_reset_all_native(struct clone_info *ci,
+ struct dm_target *ti)
+{
+ unsigned int bios;
+
+ atomic_add(1, &ci->io->io_count);
+ bios = __send_duplicate_bios(ci, ti, 1, NULL, GFP_NOIO);
+ atomic_sub(1 - bios, &ci->io->io_count);
+
+ ci->sector_count = 0;
+}
+
+static blk_status_t __send_zone_reset_all(struct clone_info *ci)
+{
+ struct dm_table *t = ci->map;
+ blk_status_t sts = BLK_STS_OK;
+
+ for (unsigned int i = 0; i < t->num_targets; i++) {
+ struct dm_target *ti = dm_table_get_target(t, i);
+
+ if (ti->zone_reset_all_supported) {
+ __send_zone_reset_all_native(ci, ti);
+ continue;
+ }
+
+ sts = __send_zone_reset_all_emulated(ci, ti);
+ if (sts != BLK_STS_OK)
+ break;
+ }
+
+ /* Release the reference that alloc_io() took for submission. */
+ atomic_sub(1, &ci->io->io_count);
+
+ return sts;
+}
+
#else
static inline bool dm_zone_bio_needs_split(struct mapped_device *md,
struct bio *bio)
@@ -1786,6 +1898,10 @@ static inline bool dm_zone_plug_bio(struct mapped_device *md, struct bio *bio)
{
return false;
}
+static blk_status_t __send_zone_reset_all(struct clone_info *ci)
+{
+ return BLK_STS_NOTSUPP;
+}
#endif
/*
@@ -1799,9 +1915,14 @@ static void dm_split_and_process_bio(struct mapped_device *md,
blk_status_t error = BLK_STS_OK;
bool is_abnormal, need_split;
- need_split = is_abnormal = is_abnormal_io(bio);
- if (static_branch_unlikely(&zoned_enabled))
- need_split = is_abnormal || dm_zone_bio_needs_split(md, bio);
+ is_abnormal = is_abnormal_io(bio);
+ if (static_branch_unlikely(&zoned_enabled)) {
+ /* Special case REQ_OP_ZONE_RESET_ALL as it cannot be split. */
+ need_split = (bio_op(bio) != REQ_OP_ZONE_RESET_ALL) &&
+ (is_abnormal || dm_zone_bio_needs_split(md, bio));
+ } else {
+ need_split = is_abnormal;
+ }
if (unlikely(need_split)) {
/*
@@ -1842,6 +1963,12 @@ static void dm_split_and_process_bio(struct mapped_device *md,
goto out;
}
+ if (static_branch_unlikely(&zoned_enabled) &&
+ (bio_op(bio) == REQ_OP_ZONE_RESET_ALL)) {
+ error = __send_zone_reset_all(&ci);
+ goto out;
+ }
+
error = __split_and_process_bio(&ci);
if (error || !ci.sector_count)
goto out;
@@ -2386,22 +2513,15 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
struct table_device *td;
int r;
- switch (type) {
- case DM_TYPE_REQUEST_BASED:
+ WARN_ON_ONCE(type == DM_TYPE_NONE);
+
+ if (type == DM_TYPE_REQUEST_BASED) {
md->disk->fops = &dm_rq_blk_dops;
r = dm_mq_init_request_queue(md, t);
if (r) {
DMERR("Cannot initialize queue for request-based dm mapped device");
return r;
}
- break;
- case DM_TYPE_BIO_BASED:
- case DM_TYPE_DAX_BIO_BASED:
- blk_queue_flag_set(QUEUE_FLAG_IO_STAT, md->queue);
- break;
- case DM_TYPE_NONE:
- WARN_ON_ONCE(true);
- break;
}
r = dm_calculate_queue_limits(t, &limits);
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index e0c57f19839b..cc466ad5cb1d 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -101,13 +101,18 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t);
/*
* Zoned targets related functions.
*/
-int dm_set_zones_restrictions(struct dm_table *t, struct request_queue *q);
+int dm_set_zones_restrictions(struct dm_table *t, struct request_queue *q,
+ struct queue_limits *lim);
+int dm_revalidate_zones(struct dm_table *t, struct request_queue *q);
void dm_zone_endio(struct dm_io *io, struct bio *clone);
#ifdef CONFIG_BLK_DEV_ZONED
int dm_blk_report_zones(struct gendisk *disk, sector_t sector,
unsigned int nr_zones, report_zones_cb cb, void *data);
bool dm_is_zone_write(struct mapped_device *md, struct bio *bio);
int dm_zone_map_bio(struct dm_target_io *io);
+int dm_zone_get_reset_bitmap(struct mapped_device *md, struct dm_table *t,
+ sector_t sector, unsigned int nr_zones,
+ unsigned long *need_reset);
#else
#define dm_blk_report_zones NULL
static inline bool dm_is_zone_write(struct mapped_device *md, struct bio *bio)
diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
index 0a2d37eb38ef..08232d8dc815 100644
--- a/drivers/md/md-bitmap.c
+++ b/drivers/md/md-bitmap.c
@@ -227,6 +227,8 @@ static int __write_sb_page(struct md_rdev *rdev, struct bitmap *bitmap,
struct block_device *bdev;
struct mddev *mddev = bitmap->mddev;
struct bitmap_storage *store = &bitmap->storage;
+ unsigned int bitmap_limit = (bitmap->storage.file_pages - pg_index) <<
+ PAGE_SHIFT;
loff_t sboff, offset = mddev->bitmap_info.offset;
sector_t ps = pg_index * PAGE_SIZE / SECTOR_SIZE;
unsigned int size = PAGE_SIZE;
@@ -269,11 +271,9 @@ static int __write_sb_page(struct md_rdev *rdev, struct bitmap *bitmap,
if (size == 0)
/* bitmap runs in to data */
return -EINVAL;
- } else {
- /* DATA METADATA BITMAP - no problems */
}
- md_super_write(mddev, rdev, sboff + ps, (int) size, page);
+ md_super_write(mddev, rdev, sboff + ps, (int)min(size, bitmap_limit), page);
return 0;
}
diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
index 8e36a0feec09..c1ea214bfc91 100644
--- a/drivers/md/md-cluster.c
+++ b/drivers/md/md-cluster.c
@@ -887,7 +887,7 @@ static int join(struct mddev *mddev, int nodes)
memset(str, 0, 64);
sprintf(str, "%pU", mddev->uuid);
ret = dlm_new_lockspace(str, mddev->bitmap_info.cluster_name,
- 0, LVB_SIZE, &md_ls_ops, mddev,
+ DLM_LSFL_SOFTIRQ, LVB_SIZE, &md_ls_ops, mddev,
&ops_rv, &cinfo->lockspace);
if (ret)
goto err;
@@ -1570,7 +1570,7 @@ out:
return err;
}
-static struct md_cluster_operations cluster_ops = {
+static const struct md_cluster_operations cluster_ops = {
.join = join,
.leave = leave,
.slot_number = slot_number,
diff --git a/drivers/md/md.c b/drivers/md/md.c
index aff9118ff697..64693913ed18 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -69,13 +69,23 @@
#include "md-bitmap.h"
#include "md-cluster.h"
+static const char *action_name[NR_SYNC_ACTIONS] = {
+ [ACTION_RESYNC] = "resync",
+ [ACTION_RECOVER] = "recover",
+ [ACTION_CHECK] = "check",
+ [ACTION_REPAIR] = "repair",
+ [ACTION_RESHAPE] = "reshape",
+ [ACTION_FROZEN] = "frozen",
+ [ACTION_IDLE] = "idle",
+};
+
/* pers_list is a list of registered personalities protected by pers_lock. */
static LIST_HEAD(pers_list);
static DEFINE_SPINLOCK(pers_lock);
static const struct kobj_type md_ktype;
-struct md_cluster_operations *md_cluster_ops;
+const struct md_cluster_operations *md_cluster_ops;
EXPORT_SYMBOL(md_cluster_ops);
static struct module *md_cluster_mod;
@@ -479,7 +489,6 @@ int mddev_suspend(struct mddev *mddev, bool interruptible)
*/
WRITE_ONCE(mddev->suspended, mddev->suspended + 1);
- del_timer_sync(&mddev->safemode_timer);
/* restrict memory reclaim I/O during raid array is suspend */
mddev->noio_flag = memalloc_noio_save();
@@ -550,13 +559,9 @@ static void md_end_flush(struct bio *bio)
rdev_dec_pending(rdev, mddev);
- if (atomic_dec_and_test(&mddev->flush_pending)) {
- /* The pair is percpu_ref_get() from md_flush_request() */
- percpu_ref_put(&mddev->active_io);
-
+ if (atomic_dec_and_test(&mddev->flush_pending))
/* The pre-request flush has finished */
queue_work(md_wq, &mddev->flush_work);
- }
}
static void md_submit_flush_data(struct work_struct *ws);
@@ -587,12 +592,8 @@ static void submit_flushes(struct work_struct *ws)
rcu_read_lock();
}
rcu_read_unlock();
- if (atomic_dec_and_test(&mddev->flush_pending)) {
- /* The pair is percpu_ref_get() from md_flush_request() */
- percpu_ref_put(&mddev->active_io);
-
+ if (atomic_dec_and_test(&mddev->flush_pending))
queue_work(md_wq, &mddev->flush_work);
- }
}
static void md_submit_flush_data(struct work_struct *ws)
@@ -617,8 +618,20 @@ static void md_submit_flush_data(struct work_struct *ws)
bio_endio(bio);
} else {
bio->bi_opf &= ~REQ_PREFLUSH;
- md_handle_request(mddev, bio);
+
+ /*
+ * make_requst() will never return error here, it only
+ * returns error in raid5_make_request() by dm-raid.
+ * Since dm always splits data and flush operation into
+ * two separate io, io size of flush submitted by dm
+ * always is 0, make_request() will not be called here.
+ */
+ if (WARN_ON_ONCE(!mddev->pers->make_request(mddev, bio)))
+ bio_io_error(bio);
}
+
+ /* The pair is percpu_ref_get() from md_flush_request() */
+ percpu_ref_put(&mddev->active_io);
}
/*
@@ -654,24 +667,22 @@ bool md_flush_request(struct mddev *mddev, struct bio *bio)
WARN_ON(percpu_ref_is_zero(&mddev->active_io));
percpu_ref_get(&mddev->active_io);
mddev->flush_bio = bio;
- bio = NULL;
- }
- spin_unlock_irq(&mddev->lock);
-
- if (!bio) {
+ spin_unlock_irq(&mddev->lock);
INIT_WORK(&mddev->flush_work, submit_flushes);
queue_work(md_wq, &mddev->flush_work);
- } else {
- /* flush was performed for some other bio while we waited. */
- if (bio->bi_iter.bi_size == 0)
- /* an empty barrier - all done */
- bio_endio(bio);
- else {
- bio->bi_opf &= ~REQ_PREFLUSH;
- return false;
- }
+ return true;
}
- return true;
+
+ /* flush was performed for some other bio while we waited. */
+ spin_unlock_irq(&mddev->lock);
+ if (bio->bi_iter.bi_size == 0) {
+ /* pure flush without data - all done */
+ bio_endio(bio);
+ return true;
+ }
+
+ bio->bi_opf &= ~REQ_PREFLUSH;
+ return false;
}
EXPORT_SYMBOL(md_flush_request);
@@ -742,7 +753,6 @@ int mddev_init(struct mddev *mddev)
mutex_init(&mddev->open_mutex);
mutex_init(&mddev->reconfig_mutex);
- mutex_init(&mddev->sync_mutex);
mutex_init(&mddev->suspend_mutex);
mutex_init(&mddev->bitmap_info.mutex);
INIT_LIST_HEAD(&mddev->disks);
@@ -758,7 +768,7 @@ int mddev_init(struct mddev *mddev)
init_waitqueue_head(&mddev->recovery_wait);
mddev->reshape_position = MaxSector;
mddev->reshape_backwards = 0;
- mddev->last_sync_action = "none";
+ mddev->last_sync_action = ACTION_IDLE;
mddev->resync_min = 0;
mddev->resync_max = MaxSector;
mddev->level = LEVEL_NONE;
@@ -2410,36 +2420,10 @@ static LIST_HEAD(pending_raid_disks);
*/
int md_integrity_register(struct mddev *mddev)
{
- struct md_rdev *rdev, *reference = NULL;
-
if (list_empty(&mddev->disks))
return 0; /* nothing to do */
- if (mddev_is_dm(mddev) || blk_get_integrity(mddev->gendisk))
- return 0; /* shouldn't register, or already is */
- rdev_for_each(rdev, mddev) {
- /* skip spares and non-functional disks */
- if (test_bit(Faulty, &rdev->flags))
- continue;
- if (rdev->raid_disk < 0)
- continue;
- if (!reference) {
- /* Use the first rdev as the reference */
- reference = rdev;
- continue;
- }
- /* does this rdev's profile match the reference profile? */
- if (blk_integrity_compare(reference->bdev->bd_disk,
- rdev->bdev->bd_disk) < 0)
- return -EINVAL;
- }
- if (!reference || !bdev_get_integrity(reference->bdev))
- return 0;
- /*
- * All component devices are integrity capable and have matching
- * profiles, register the common profile for the md device.
- */
- blk_integrity_register(mddev->gendisk,
- bdev_get_integrity(reference->bdev));
+ if (mddev_is_dm(mddev) || !blk_get_integrity(mddev->gendisk))
+ return 0; /* shouldn't register */
pr_debug("md: data integrity enabled on %s\n", mdname(mddev));
if (bioset_integrity_create(&mddev->bio_set, BIO_POOL_SIZE) ||
@@ -2459,32 +2443,6 @@ int md_integrity_register(struct mddev *mddev)
}
EXPORT_SYMBOL(md_integrity_register);
-/*
- * Attempt to add an rdev, but only if it is consistent with the current
- * integrity profile
- */
-int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev)
-{
- struct blk_integrity *bi_mddev;
-
- if (mddev_is_dm(mddev))
- return 0;
-
- bi_mddev = blk_get_integrity(mddev->gendisk);
-
- if (!bi_mddev) /* nothing to do */
- return 0;
-
- if (blk_integrity_compare(mddev->gendisk, rdev->bdev->bd_disk) != 0) {
- pr_err("%s: incompatible integrity profile for %pg\n",
- mdname(mddev), rdev->bdev);
- return -ENXIO;
- }
-
- return 0;
-}
-EXPORT_SYMBOL(md_integrity_add_rdev);
-
static bool rdev_read_only(struct md_rdev *rdev)
{
return bdev_read_only(rdev->bdev) ||
@@ -4867,30 +4825,81 @@ out_unlock:
static struct md_sysfs_entry md_metadata =
__ATTR_PREALLOC(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
-static ssize_t
-action_show(struct mddev *mddev, char *page)
+enum sync_action md_sync_action(struct mddev *mddev)
{
- char *type = "idle";
unsigned long recovery = mddev->recovery;
+
+ /*
+ * frozen has the highest priority, means running sync_thread will be
+ * stopped immediately, and no new sync_thread can start.
+ */
if (test_bit(MD_RECOVERY_FROZEN, &recovery))
- type = "frozen";
- else if (test_bit(MD_RECOVERY_RUNNING, &recovery) ||
- (md_is_rdwr(mddev) && test_bit(MD_RECOVERY_NEEDED, &recovery))) {
- if (test_bit(MD_RECOVERY_RESHAPE, &recovery))
- type = "reshape";
- else if (test_bit(MD_RECOVERY_SYNC, &recovery)) {
- if (!test_bit(MD_RECOVERY_REQUESTED, &recovery))
- type = "resync";
- else if (test_bit(MD_RECOVERY_CHECK, &recovery))
- type = "check";
- else
- type = "repair";
- } else if (test_bit(MD_RECOVERY_RECOVER, &recovery))
- type = "recover";
- else if (mddev->reshape_position != MaxSector)
- type = "reshape";
+ return ACTION_FROZEN;
+
+ /*
+ * read-only array can't register sync_thread, and it can only
+ * add/remove spares.
+ */
+ if (!md_is_rdwr(mddev))
+ return ACTION_IDLE;
+
+ /*
+ * idle means no sync_thread is running, and no new sync_thread is
+ * requested.
+ */
+ if (!test_bit(MD_RECOVERY_RUNNING, &recovery) &&
+ !test_bit(MD_RECOVERY_NEEDED, &recovery))
+ return ACTION_IDLE;
+
+ if (test_bit(MD_RECOVERY_RESHAPE, &recovery) ||
+ mddev->reshape_position != MaxSector)
+ return ACTION_RESHAPE;
+
+ if (test_bit(MD_RECOVERY_RECOVER, &recovery))
+ return ACTION_RECOVER;
+
+ if (test_bit(MD_RECOVERY_SYNC, &recovery)) {
+ /*
+ * MD_RECOVERY_CHECK must be paired with
+ * MD_RECOVERY_REQUESTED.
+ */
+ if (test_bit(MD_RECOVERY_CHECK, &recovery))
+ return ACTION_CHECK;
+ if (test_bit(MD_RECOVERY_REQUESTED, &recovery))
+ return ACTION_REPAIR;
+ return ACTION_RESYNC;
}
- return sprintf(page, "%s\n", type);
+
+ /*
+ * MD_RECOVERY_NEEDED or MD_RECOVERY_RUNNING is set, however, no
+ * sync_action is specified.
+ */
+ return ACTION_IDLE;
+}
+
+enum sync_action md_sync_action_by_name(const char *page)
+{
+ enum sync_action action;
+
+ for (action = 0; action < NR_SYNC_ACTIONS; ++action) {
+ if (cmd_match(page, action_name[action]))
+ return action;
+ }
+
+ return NR_SYNC_ACTIONS;
+}
+
+const char *md_sync_action_name(enum sync_action action)
+{
+ return action_name[action];
+}
+
+static ssize_t
+action_show(struct mddev *mddev, char *page)
+{
+ enum sync_action action = md_sync_action(mddev);
+
+ return sprintf(page, "%s\n", md_sync_action_name(action));
}
/**
@@ -4899,15 +4908,10 @@ action_show(struct mddev *mddev, char *page)
* @locked: if set, reconfig_mutex will still be held after this function
* return; if not set, reconfig_mutex will be released after this
* function return.
- * @check_seq: if set, only wait for curent running sync_thread to stop, noted
- * that new sync_thread can still start.
*/
-static void stop_sync_thread(struct mddev *mddev, bool locked, bool check_seq)
+static void stop_sync_thread(struct mddev *mddev, bool locked)
{
- int sync_seq;
-
- if (check_seq)
- sync_seq = atomic_read(&mddev->sync_seq);
+ int sync_seq = atomic_read(&mddev->sync_seq);
if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
if (!locked)
@@ -4928,7 +4932,8 @@ static void stop_sync_thread(struct mddev *mddev, bool locked, bool check_seq)
wait_event(resync_wait,
!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
- (check_seq && sync_seq != atomic_read(&mddev->sync_seq)));
+ (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery) &&
+ sync_seq != atomic_read(&mddev->sync_seq)));
if (locked)
mddev_lock_nointr(mddev);
@@ -4939,7 +4944,7 @@ void md_idle_sync_thread(struct mddev *mddev)
lockdep_assert_held(&mddev->reconfig_mutex);
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
- stop_sync_thread(mddev, true, true);
+ stop_sync_thread(mddev, true);
}
EXPORT_SYMBOL_GPL(md_idle_sync_thread);
@@ -4948,7 +4953,7 @@ void md_frozen_sync_thread(struct mddev *mddev)
lockdep_assert_held(&mddev->reconfig_mutex);
set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
- stop_sync_thread(mddev, true, false);
+ stop_sync_thread(mddev, true);
}
EXPORT_SYMBOL_GPL(md_frozen_sync_thread);
@@ -4963,100 +4968,127 @@ void md_unfrozen_sync_thread(struct mddev *mddev)
}
EXPORT_SYMBOL_GPL(md_unfrozen_sync_thread);
-static void idle_sync_thread(struct mddev *mddev)
+static int mddev_start_reshape(struct mddev *mddev)
{
- mutex_lock(&mddev->sync_mutex);
- clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
-
- if (mddev_lock(mddev)) {
- mutex_unlock(&mddev->sync_mutex);
- return;
- }
-
- stop_sync_thread(mddev, false, true);
- mutex_unlock(&mddev->sync_mutex);
-}
+ int ret;
-static void frozen_sync_thread(struct mddev *mddev)
-{
- mutex_lock(&mddev->sync_mutex);
- set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+ if (mddev->pers->start_reshape == NULL)
+ return -EINVAL;
- if (mddev_lock(mddev)) {
- mutex_unlock(&mddev->sync_mutex);
- return;
+ if (mddev->reshape_position == MaxSector ||
+ mddev->pers->check_reshape == NULL ||
+ mddev->pers->check_reshape(mddev)) {
+ clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+ ret = mddev->pers->start_reshape(mddev);
+ if (ret)
+ return ret;
+ } else {
+ /*
+ * If reshape is still in progress, and md_check_recovery() can
+ * continue to reshape, don't restart reshape because data can
+ * be corrupted for raid456.
+ */
+ clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
}
- stop_sync_thread(mddev, false, false);
- mutex_unlock(&mddev->sync_mutex);
+ sysfs_notify_dirent_safe(mddev->sysfs_degraded);
+ return 0;
}
static ssize_t
action_store(struct mddev *mddev, const char *page, size_t len)
{
+ int ret;
+ enum sync_action action;
+
if (!mddev->pers || !mddev->pers->sync_request)
return -EINVAL;
+retry:
+ if (work_busy(&mddev->sync_work))
+ flush_work(&mddev->sync_work);
- if (cmd_match(page, "idle"))
- idle_sync_thread(mddev);
- else if (cmd_match(page, "frozen"))
- frozen_sync_thread(mddev);
- else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
- return -EBUSY;
- else if (cmd_match(page, "resync"))
- clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
- else if (cmd_match(page, "recover")) {
- clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
- set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
- } else if (cmd_match(page, "reshape")) {
- int err;
- if (mddev->pers->start_reshape == NULL)
- return -EINVAL;
- err = mddev_lock(mddev);
- if (!err) {
- if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
- err = -EBUSY;
- } else if (mddev->reshape_position == MaxSector ||
- mddev->pers->check_reshape == NULL ||
- mddev->pers->check_reshape(mddev)) {
- clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
- err = mddev->pers->start_reshape(mddev);
- } else {
- /*
- * If reshape is still in progress, and
- * md_check_recovery() can continue to reshape,
- * don't restart reshape because data can be
- * corrupted for raid456.
- */
- clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
- }
- mddev_unlock(mddev);
+ ret = mddev_lock(mddev);
+ if (ret)
+ return ret;
+
+ if (work_busy(&mddev->sync_work)) {
+ mddev_unlock(mddev);
+ goto retry;
+ }
+
+ action = md_sync_action_by_name(page);
+
+ /* TODO: mdadm rely on "idle" to start sync_thread. */
+ if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
+ switch (action) {
+ case ACTION_FROZEN:
+ md_frozen_sync_thread(mddev);
+ ret = len;
+ goto out;
+ case ACTION_IDLE:
+ md_idle_sync_thread(mddev);
+ break;
+ case ACTION_RESHAPE:
+ case ACTION_RECOVER:
+ case ACTION_CHECK:
+ case ACTION_REPAIR:
+ case ACTION_RESYNC:
+ ret = -EBUSY;
+ goto out;
+ default:
+ ret = -EINVAL;
+ goto out;
}
- if (err)
- return err;
- sysfs_notify_dirent_safe(mddev->sysfs_degraded);
} else {
- if (cmd_match(page, "check"))
+ switch (action) {
+ case ACTION_FROZEN:
+ set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+ ret = len;
+ goto out;
+ case ACTION_RESHAPE:
+ clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+ ret = mddev_start_reshape(mddev);
+ if (ret)
+ goto out;
+ break;
+ case ACTION_RECOVER:
+ clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+ set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
+ break;
+ case ACTION_CHECK:
set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
- else if (!cmd_match(page, "repair"))
- return -EINVAL;
- clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
- set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
- set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
+ fallthrough;
+ case ACTION_REPAIR:
+ set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
+ set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
+ fallthrough;
+ case ACTION_RESYNC:
+ case ACTION_IDLE:
+ clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+ break;
+ default:
+ ret = -EINVAL;
+ goto out;
+ }
}
+
if (mddev->ro == MD_AUTO_READ) {
/* A write to sync_action is enough to justify
* canceling read-auto mode
*/
- flush_work(&mddev->sync_work);
mddev->ro = MD_RDWR;
md_wakeup_thread(mddev->sync_thread);
}
+
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
sysfs_notify_dirent_safe(mddev->sysfs_action);
- return len;
+ ret = len;
+
+out:
+ mddev_unlock(mddev);
+ return ret;
}
static struct md_sysfs_entry md_scan_mode =
@@ -5065,7 +5097,8 @@ __ATTR_PREALLOC(sync_action, S_IRUGO|S_IWUSR, action_show, action_store);
static ssize_t
last_sync_action_show(struct mddev *mddev, char *page)
{
- return sprintf(page, "%s\n", mddev->last_sync_action);
+ return sprintf(page, "%s\n",
+ md_sync_action_name(mddev->last_sync_action));
}
static struct md_sysfs_entry md_last_scan_mode = __ATTR_RO(last_sync_action);
@@ -5755,14 +5788,20 @@ static const struct kobj_type md_ktype = {
int mdp_major = 0;
/* stack the limit for all rdevs into lim */
-void mddev_stack_rdev_limits(struct mddev *mddev, struct queue_limits *lim)
+int mddev_stack_rdev_limits(struct mddev *mddev, struct queue_limits *lim,
+ unsigned int flags)
{
struct md_rdev *rdev;
rdev_for_each(rdev, mddev) {
queue_limits_stack_bdev(lim, rdev->bdev, rdev->data_offset,
mddev->gendisk->disk_name);
+ if ((flags & MDDEV_STACK_INTEGRITY) &&
+ !queue_limits_stack_integrity_bdev(lim, rdev->bdev))
+ return -EINVAL;
}
+
+ return 0;
}
EXPORT_SYMBOL_GPL(mddev_stack_rdev_limits);
@@ -5777,6 +5816,14 @@ int mddev_stack_new_rdev(struct mddev *mddev, struct md_rdev *rdev)
lim = queue_limits_start_update(mddev->gendisk->queue);
queue_limits_stack_bdev(&lim, rdev->bdev, rdev->data_offset,
mddev->gendisk->disk_name);
+
+ if (!queue_limits_stack_integrity_bdev(&lim, rdev->bdev)) {
+ pr_err("%s: incompatible integrity profile for %pg\n",
+ mdname(mddev), rdev->bdev);
+ queue_limits_cancel_update(mddev->gendisk->queue);
+ return -ENXIO;
+ }
+
return queue_limits_commit_update(mddev->gendisk->queue, &lim);
}
EXPORT_SYMBOL_GPL(mddev_stack_new_rdev);
@@ -5806,6 +5853,14 @@ static void mddev_delayed_delete(struct work_struct *ws)
kobject_put(&mddev->kobj);
}
+void md_init_stacking_limits(struct queue_limits *lim)
+{
+ blk_set_stacking_limits(lim);
+ lim->features = BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA |
+ BLK_FEAT_IO_STAT | BLK_FEAT_NOWAIT;
+}
+EXPORT_SYMBOL_GPL(md_init_stacking_limits);
+
struct mddev *md_alloc(dev_t dev, char *name)
{
/*
@@ -5823,7 +5878,7 @@ struct mddev *md_alloc(dev_t dev, char *name)
int partitioned;
int shift;
int unit;
- int error ;
+ int error;
/*
* Wait for any previous instance of this device to be completely
@@ -5881,7 +5936,6 @@ struct mddev *md_alloc(dev_t dev, char *name)
disk->fops = &md_fops;
disk->private_data = mddev;
- blk_queue_write_cache(disk->queue, true, true);
disk->events |= DISK_EVENT_MEDIA_CHANGE;
mddev->gendisk = disk;
error = add_disk(disk);
@@ -6185,28 +6239,6 @@ int md_run(struct mddev *mddev)
}
}
- if (!mddev_is_dm(mddev)) {
- struct request_queue *q = mddev->gendisk->queue;
- bool nonrot = true;
-
- rdev_for_each(rdev, mddev) {
- if (rdev->raid_disk >= 0 && !bdev_nonrot(rdev->bdev)) {
- nonrot = false;
- break;
- }
- }
- if (mddev->degraded)
- nonrot = false;
- if (nonrot)
- blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
- else
- blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
- blk_queue_flag_set(QUEUE_FLAG_IO_STAT, q);
-
- /* Set the NOWAIT flags if all underlying devices support it */
- if (nowait)
- blk_queue_flag_set(QUEUE_FLAG_NOWAIT, q);
- }
if (pers->sync_request) {
if (mddev->kobj.sd &&
sysfs_create_group(&mddev->kobj, &md_redundancy_group))
@@ -6437,7 +6469,7 @@ void md_stop_writes(struct mddev *mddev)
{
mddev_lock_nointr(mddev);
set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
- stop_sync_thread(mddev, true, false);
+ stop_sync_thread(mddev, true);
__md_stop_writes(mddev);
mddev_unlock(mddev);
}
@@ -6505,7 +6537,7 @@ static int md_set_readonly(struct mddev *mddev)
set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
}
- stop_sync_thread(mddev, false, false);
+ stop_sync_thread(mddev, false);
wait_event(mddev->sb_wait,
!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
mddev_lock_nointr(mddev);
@@ -6551,7 +6583,7 @@ static int do_md_stop(struct mddev *mddev, int mode)
set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
}
- stop_sync_thread(mddev, true, false);
+ stop_sync_thread(mddev, true);
if (mddev->sysfs_active ||
test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
@@ -7166,15 +7198,6 @@ static int hot_add_disk(struct mddev *mddev, dev_t dev)
if (!mddev->thread)
md_update_sb(mddev, 1);
/*
- * If the new disk does not support REQ_NOWAIT,
- * disable on the whole MD.
- */
- if (!bdev_nowait(rdev->bdev)) {
- pr_info("%s: Disabling nowait because %pg does not support nowait\n",
- mdname(mddev), rdev->bdev);
- blk_queue_flag_clear(QUEUE_FLAG_NOWAIT, mddev->gendisk->queue);
- }
- /*
* Kick recovery, maybe this spare has to be added to the
* array immediately.
*/
@@ -7742,12 +7765,6 @@ static int md_ioctl(struct block_device *bdev, blk_mode_t mode,
return get_bitmap_file(mddev, argp);
}
- if (cmd == HOT_REMOVE_DISK)
- /* need to ensure recovery thread has run */
- wait_event_interruptible_timeout(mddev->sb_wait,
- !test_bit(MD_RECOVERY_NEEDED,
- &mddev->recovery),
- msecs_to_jiffies(5000));
if (cmd == STOP_ARRAY || cmd == STOP_ARRAY_RO) {
/* Need to flush page cache, and ensure no-one else opens
* and writes
@@ -8520,7 +8537,7 @@ int unregister_md_personality(struct md_personality *p)
}
EXPORT_SYMBOL(unregister_md_personality);
-int register_md_cluster_operations(struct md_cluster_operations *ops,
+int register_md_cluster_operations(const struct md_cluster_operations *ops,
struct module *module)
{
int ret = 0;
@@ -8641,12 +8658,12 @@ EXPORT_SYMBOL(md_done_sync);
* A return value of 'false' means that the write wasn't recorded
* and cannot proceed as the array is being suspend.
*/
-bool md_write_start(struct mddev *mddev, struct bio *bi)
+void md_write_start(struct mddev *mddev, struct bio *bi)
{
int did_change = 0;
if (bio_data_dir(bi) != WRITE)
- return true;
+ return;
BUG_ON(mddev->ro == MD_RDONLY);
if (mddev->ro == MD_AUTO_READ) {
@@ -8679,15 +8696,9 @@ bool md_write_start(struct mddev *mddev, struct bio *bi)
if (did_change)
sysfs_notify_dirent_safe(mddev->sysfs_state);
if (!mddev->has_superblocks)
- return true;
+ return;
wait_event(mddev->sb_wait,
- !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) ||
- is_md_suspended(mddev));
- if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
- percpu_ref_put(&mddev->writes_pending);
- return false;
- }
- return true;
+ !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
}
EXPORT_SYMBOL(md_write_start);
@@ -8835,6 +8846,77 @@ void md_allow_write(struct mddev *mddev)
}
EXPORT_SYMBOL_GPL(md_allow_write);
+static sector_t md_sync_max_sectors(struct mddev *mddev,
+ enum sync_action action)
+{
+ switch (action) {
+ case ACTION_RESYNC:
+ case ACTION_CHECK:
+ case ACTION_REPAIR:
+ atomic64_set(&mddev->resync_mismatches, 0);
+ fallthrough;
+ case ACTION_RESHAPE:
+ return mddev->resync_max_sectors;
+ case ACTION_RECOVER:
+ return mddev->dev_sectors;
+ default:
+ return 0;
+ }
+}
+
+static sector_t md_sync_position(struct mddev *mddev, enum sync_action action)
+{
+ sector_t start = 0;
+ struct md_rdev *rdev;
+
+ switch (action) {
+ case ACTION_CHECK:
+ case ACTION_REPAIR:
+ return mddev->resync_min;
+ case ACTION_RESYNC:
+ if (!mddev->bitmap)
+ return mddev->recovery_cp;
+ return 0;
+ case ACTION_RESHAPE:
+ /*
+ * If the original node aborts reshaping then we continue the
+ * reshaping, so set again to avoid restart reshape from the
+ * first beginning
+ */
+ if (mddev_is_clustered(mddev) &&
+ mddev->reshape_position != MaxSector)
+ return mddev->reshape_position;
+ return 0;
+ case ACTION_RECOVER:
+ start = MaxSector;
+ rcu_read_lock();
+ rdev_for_each_rcu(rdev, mddev)
+ if (rdev->raid_disk >= 0 &&
+ !test_bit(Journal, &rdev->flags) &&
+ !test_bit(Faulty, &rdev->flags) &&
+ !test_bit(In_sync, &rdev->flags) &&
+ rdev->recovery_offset < start)
+ start = rdev->recovery_offset;
+ rcu_read_unlock();
+
+ /* If there is a bitmap, we need to make sure all
+ * writes that started before we added a spare
+ * complete before we start doing a recovery.
+ * Otherwise the write might complete and (via
+ * bitmap_endwrite) set a bit in the bitmap after the
+ * recovery has checked that bit and skipped that
+ * region.
+ */
+ if (mddev->bitmap) {
+ mddev->pers->quiesce(mddev, 1);
+ mddev->pers->quiesce(mddev, 0);
+ }
+ return start;
+ default:
+ return MaxSector;
+ }
+}
+
#define SYNC_MARKS 10
#define SYNC_MARK_STEP (3*HZ)
#define UPDATE_FREQUENCY (5*60*HZ)
@@ -8851,7 +8933,8 @@ void md_do_sync(struct md_thread *thread)
sector_t last_check;
int skipped = 0;
struct md_rdev *rdev;
- char *desc, *action = NULL;
+ enum sync_action action;
+ const char *desc;
struct blk_plug plug;
int ret;
@@ -8882,21 +8965,9 @@ void md_do_sync(struct md_thread *thread)
goto skip;
}
- if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
- if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) {
- desc = "data-check";
- action = "check";
- } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
- desc = "requested-resync";
- action = "repair";
- } else
- desc = "resync";
- } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
- desc = "reshape";
- else
- desc = "recovery";
-
- mddev->last_sync_action = action ?: desc;
+ action = md_sync_action(mddev);
+ desc = md_sync_action_name(action);
+ mddev->last_sync_action = action;
/*
* Before starting a resync we must have set curr_resync to
@@ -8964,56 +9035,8 @@ void md_do_sync(struct md_thread *thread)
spin_unlock(&all_mddevs_lock);
} while (mddev->curr_resync < MD_RESYNC_DELAYED);
- j = 0;
- if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
- /* resync follows the size requested by the personality,
- * which defaults to physical size, but can be virtual size
- */
- max_sectors = mddev->resync_max_sectors;
- atomic64_set(&mddev->resync_mismatches, 0);
- /* we don't use the checkpoint if there's a bitmap */
- if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
- j = mddev->resync_min;
- else if (!mddev->bitmap)
- j = mddev->recovery_cp;
-
- } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
- max_sectors = mddev->resync_max_sectors;
- /*
- * If the original node aborts reshaping then we continue the
- * reshaping, so set j again to avoid restart reshape from the
- * first beginning
- */
- if (mddev_is_clustered(mddev) &&
- mddev->reshape_position != MaxSector)
- j = mddev->reshape_position;
- } else {
- /* recovery follows the physical size of devices */
- max_sectors = mddev->dev_sectors;
- j = MaxSector;
- rcu_read_lock();
- rdev_for_each_rcu(rdev, mddev)
- if (rdev->raid_disk >= 0 &&
- !test_bit(Journal, &rdev->flags) &&
- !test_bit(Faulty, &rdev->flags) &&
- !test_bit(In_sync, &rdev->flags) &&
- rdev->recovery_offset < j)
- j = rdev->recovery_offset;
- rcu_read_unlock();
-
- /* If there is a bitmap, we need to make sure all
- * writes that started before we added a spare
- * complete before we start doing a recovery.
- * Otherwise the write might complete and (via
- * bitmap_endwrite) set a bit in the bitmap after the
- * recovery has checked that bit and skipped that
- * region.
- */
- if (mddev->bitmap) {
- mddev->pers->quiesce(mddev, 1);
- mddev->pers->quiesce(mddev, 0);
- }
- }
+ max_sectors = md_sync_max_sectors(mddev, action);
+ j = md_sync_position(mddev, action);
pr_info("md: %s of RAID array %s\n", desc, mdname(mddev));
pr_debug("md: minimum _guaranteed_ speed: %d KB/sec/disk.\n", speed_min(mddev));
@@ -9095,7 +9118,8 @@ void md_do_sync(struct md_thread *thread)
if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
break;
- sectors = mddev->pers->sync_request(mddev, j, &skipped);
+ sectors = mddev->pers->sync_request(mddev, j, max_sectors,
+ &skipped);
if (sectors == 0) {
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
break;
@@ -9185,7 +9209,7 @@ void md_do_sync(struct md_thread *thread)
mddev->curr_resync_completed = mddev->curr_resync;
sysfs_notify_dirent_safe(mddev->sysfs_completed);
}
- mddev->pers->sync_request(mddev, max_sectors, &skipped);
+ mddev->pers->sync_request(mddev, max_sectors, max_sectors, &skipped);
if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
mddev->curr_resync > MD_RESYNC_ACTIVE) {
diff --git a/drivers/md/md.h b/drivers/md/md.h
index ca085ecad504..a0d6827dced9 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -34,6 +34,61 @@
*/
#define MD_FAILFAST (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT)
+/* Status of sync thread. */
+enum sync_action {
+ /*
+ * Represent by MD_RECOVERY_SYNC, start when:
+ * 1) after assemble, sync data from first rdev to other copies, this
+ * must be done first before other sync actions and will only execute
+ * once;
+ * 2) resize the array(notice that this is not reshape), sync data for
+ * the new range;
+ */
+ ACTION_RESYNC,
+ /*
+ * Represent by MD_RECOVERY_RECOVER, start when:
+ * 1) for new replacement, sync data based on the replace rdev or
+ * available copies from other rdev;
+ * 2) for new member disk while the array is degraded, sync data from
+ * other rdev;
+ * 3) reassemble after power failure or re-add a hot removed rdev, sync
+ * data from first rdev to other copies based on bitmap;
+ */
+ ACTION_RECOVER,
+ /*
+ * Represent by MD_RECOVERY_SYNC | MD_RECOVERY_REQUESTED |
+ * MD_RECOVERY_CHECK, start when user echo "check" to sysfs api
+ * sync_action, used to check if data copies from differenct rdev are
+ * the same. The number of mismatch sectors will be exported to user
+ * by sysfs api mismatch_cnt;
+ */
+ ACTION_CHECK,
+ /*
+ * Represent by MD_RECOVERY_SYNC | MD_RECOVERY_REQUESTED, start when
+ * user echo "repair" to sysfs api sync_action, usually paired with
+ * ACTION_CHECK, used to force syncing data once user found that there
+ * are inconsistent data,
+ */
+ ACTION_REPAIR,
+ /*
+ * Represent by MD_RECOVERY_RESHAPE, start when new member disk is added
+ * to the conf, notice that this is different from spares or
+ * replacement;
+ */
+ ACTION_RESHAPE,
+ /*
+ * Represent by MD_RECOVERY_FROZEN, can be set by sysfs api sync_action
+ * or internal usage like setting the array read-only, will forbid above
+ * actions.
+ */
+ ACTION_FROZEN,
+ /*
+ * All above actions don't match.
+ */
+ ACTION_IDLE,
+ NR_SYNC_ACTIONS,
+};
+
/*
* The struct embedded in rdev is used to serialize IO.
*/
@@ -371,13 +426,12 @@ struct mddev {
struct md_thread __rcu *thread; /* management thread */
struct md_thread __rcu *sync_thread; /* doing resync or reconstruct */
- /* 'last_sync_action' is initialized to "none". It is set when a
- * sync operation (i.e "data-check", "requested-resync", "resync",
- * "recovery", or "reshape") is started. It holds this value even
+ /*
+ * Set when a sync operation is started. It holds this value even
* when the sync thread is "frozen" (interrupted) or "idle" (stopped
- * or finished). It is overwritten when a new sync operation is begun.
+ * or finished). It is overwritten when a new sync operation is begun.
*/
- char *last_sync_action;
+ enum sync_action last_sync_action;
sector_t curr_resync; /* last block scheduled */
/* As resync requests can complete out of order, we cannot easily track
* how much resync has been completed. So we occasionally pause until
@@ -540,8 +594,6 @@ struct mddev {
*/
struct list_head deleting;
- /* Used to synchronize idle and frozen for action_store() */
- struct mutex sync_mutex;
/* The sequence number for sync thread */
atomic_t sync_seq;
@@ -551,22 +603,46 @@ struct mddev {
};
enum recovery_flags {
+ /* flags for sync thread running status */
+
+ /*
+ * set when one of sync action is set and new sync thread need to be
+ * registered, or just add/remove spares from conf.
+ */
+ MD_RECOVERY_NEEDED,
+ /* sync thread is running, or about to be started */
+ MD_RECOVERY_RUNNING,
+ /* sync thread needs to be aborted for some reason */
+ MD_RECOVERY_INTR,
+ /* sync thread is done and is waiting to be unregistered */
+ MD_RECOVERY_DONE,
+ /* running sync thread must abort immediately, and not restart */
+ MD_RECOVERY_FROZEN,
+ /* waiting for pers->start() to finish */
+ MD_RECOVERY_WAIT,
+ /* interrupted because io-error */
+ MD_RECOVERY_ERROR,
+
+ /* flags determines sync action, see details in enum sync_action */
+
+ /* if just this flag is set, action is resync. */
+ MD_RECOVERY_SYNC,
+ /*
+ * paired with MD_RECOVERY_SYNC, if MD_RECOVERY_CHECK is not set,
+ * action is repair, means user requested resync.
+ */
+ MD_RECOVERY_REQUESTED,
/*
- * If neither SYNC or RESHAPE are set, then it is a recovery.
+ * paired with MD_RECOVERY_SYNC and MD_RECOVERY_REQUESTED, action is
+ * check.
*/
- MD_RECOVERY_RUNNING, /* a thread is running, or about to be started */
- MD_RECOVERY_SYNC, /* actually doing a resync, not a recovery */
- MD_RECOVERY_RECOVER, /* doing recovery, or need to try it. */
- MD_RECOVERY_INTR, /* resync needs to be aborted for some reason */
- MD_RECOVERY_DONE, /* thread is done and is waiting to be reaped */
- MD_RECOVERY_NEEDED, /* we might need to start a resync/recover */
- MD_RECOVERY_REQUESTED, /* user-space has requested a sync (used with SYNC) */
- MD_RECOVERY_CHECK, /* user-space request for check-only, no repair */
- MD_RECOVERY_RESHAPE, /* A reshape is happening */
- MD_RECOVERY_FROZEN, /* User request to abort, and not restart, any action */
- MD_RECOVERY_ERROR, /* sync-action interrupted because io-error */
- MD_RECOVERY_WAIT, /* waiting for pers->start() to finish */
- MD_RESYNCING_REMOTE, /* remote node is running resync thread */
+ MD_RECOVERY_CHECK,
+ /* recovery, or need to try it */
+ MD_RECOVERY_RECOVER,
+ /* reshape */
+ MD_RECOVERY_RESHAPE,
+ /* remote node is running resync thread */
+ MD_RESYNCING_REMOTE,
};
enum md_ro_state {
@@ -653,7 +729,8 @@ struct md_personality
int (*hot_add_disk) (struct mddev *mddev, struct md_rdev *rdev);
int (*hot_remove_disk) (struct mddev *mddev, struct md_rdev *rdev);
int (*spare_active) (struct mddev *mddev);
- sector_t (*sync_request)(struct mddev *mddev, sector_t sector_nr, int *skipped);
+ sector_t (*sync_request)(struct mddev *mddev, sector_t sector_nr,
+ sector_t max_sector, int *skipped);
int (*resize) (struct mddev *mddev, sector_t sectors);
sector_t (*size) (struct mddev *mddev, sector_t sectors, int raid_disks);
int (*check_reshape) (struct mddev *mddev);
@@ -772,7 +849,7 @@ static inline void safe_put_page(struct page *p)
extern int register_md_personality(struct md_personality *p);
extern int unregister_md_personality(struct md_personality *p);
-extern int register_md_cluster_operations(struct md_cluster_operations *ops,
+extern int register_md_cluster_operations(const struct md_cluster_operations *ops,
struct module *module);
extern int unregister_md_cluster_operations(void);
extern int md_setup_cluster(struct mddev *mddev, int nodes);
@@ -785,7 +862,10 @@ extern void md_unregister_thread(struct mddev *mddev, struct md_thread __rcu **t
extern void md_wakeup_thread(struct md_thread __rcu *thread);
extern void md_check_recovery(struct mddev *mddev);
extern void md_reap_sync_thread(struct mddev *mddev);
-extern bool md_write_start(struct mddev *mddev, struct bio *bi);
+extern enum sync_action md_sync_action(struct mddev *mddev);
+extern enum sync_action md_sync_action_by_name(const char *page);
+extern const char *md_sync_action_name(enum sync_action action);
+extern void md_write_start(struct mddev *mddev, struct bio *bi);
extern void md_write_inc(struct mddev *mddev, struct bio *bi);
extern void md_write_end(struct mddev *mddev);
extern void md_done_sync(struct mddev *mddev, int blocks, int ok);
@@ -809,11 +889,11 @@ extern void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev);
extern void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors);
extern int md_check_no_bitmap(struct mddev *mddev);
extern int md_integrity_register(struct mddev *mddev);
-extern int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev);
extern int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale);
extern int mddev_init(struct mddev *mddev);
extern void mddev_destroy(struct mddev *mddev);
+void md_init_stacking_limits(struct queue_limits *lim);
struct mddev *md_alloc(dev_t dev, char *name);
void mddev_put(struct mddev *mddev);
extern int md_run(struct mddev *mddev);
@@ -852,7 +932,7 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
}
}
-extern struct md_cluster_operations *md_cluster_ops;
+extern const struct md_cluster_operations *md_cluster_ops;
static inline int mddev_is_clustered(struct mddev *mddev)
{
return mddev->cluster_info && mddev->bitmap_info.nodes > 1;
@@ -908,7 +988,9 @@ void md_autostart_arrays(int part);
int md_set_array_info(struct mddev *mddev, struct mdu_array_info_s *info);
int md_add_new_disk(struct mddev *mddev, struct mdu_disk_info_s *info);
int do_md_run(struct mddev *mddev);
-void mddev_stack_rdev_limits(struct mddev *mddev, struct queue_limits *lim);
+#define MDDEV_STACK_INTEGRITY (1u << 0)
+int mddev_stack_rdev_limits(struct mddev *mddev, struct queue_limits *lim,
+ unsigned int flags);
int mddev_stack_new_rdev(struct mddev *mddev, struct md_rdev *rdev);
void mddev_update_io_opt(struct mddev *mddev, unsigned int nr_stripes);
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index c5d4aeb68404..32d587524778 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -365,30 +365,30 @@ static sector_t raid0_size(struct mddev *mddev, sector_t sectors, int raid_disks
return array_sectors;
}
-static void free_conf(struct mddev *mddev, struct r0conf *conf)
-{
- kfree(conf->strip_zone);
- kfree(conf->devlist);
- kfree(conf);
-}
-
static void raid0_free(struct mddev *mddev, void *priv)
{
struct r0conf *conf = priv;
- free_conf(mddev, conf);
+ kfree(conf->strip_zone);
+ kfree(conf->devlist);
+ kfree(conf);
}
static int raid0_set_limits(struct mddev *mddev)
{
struct queue_limits lim;
+ int err;
- blk_set_stacking_limits(&lim);
+ md_init_stacking_limits(&lim);
lim.max_hw_sectors = mddev->chunk_sectors;
lim.max_write_zeroes_sectors = mddev->chunk_sectors;
lim.io_min = mddev->chunk_sectors << 9;
lim.io_opt = lim.io_min * mddev->raid_disks;
- mddev_stack_rdev_limits(mddev, &lim);
+ err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY);
+ if (err) {
+ queue_limits_cancel_update(mddev->gendisk->queue);
+ return err;
+ }
return queue_limits_set(mddev->gendisk->queue, &lim);
}
@@ -415,7 +415,7 @@ static int raid0_run(struct mddev *mddev)
if (!mddev_is_dm(mddev)) {
ret = raid0_set_limits(mddev);
if (ret)
- goto out_free_conf;
+ return ret;
}
/* calculate array device size */
@@ -427,13 +427,7 @@ static int raid0_run(struct mddev *mddev)
dump_zones(mddev);
- ret = md_integrity_register(mddev);
- if (ret)
- goto out_free_conf;
- return 0;
-out_free_conf:
- free_conf(mddev, conf);
- return ret;
+ return md_integrity_register(mddev);
}
/*
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 7b8a71ca66dd..04a0c2ca1732 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1687,8 +1687,7 @@ static bool raid1_make_request(struct mddev *mddev, struct bio *bio)
if (bio_data_dir(bio) == READ)
raid1_read_request(mddev, bio, sectors, NULL);
else {
- if (!md_write_start(mddev,bio))
- return false;
+ md_write_start(mddev,bio);
raid1_write_request(mddev, bio, sectors);
}
return true;
@@ -1907,9 +1906,6 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
if (mddev->recovery_disabled == conf->recovery_disabled)
return -EBUSY;
- if (md_integrity_add_rdev(rdev, mddev))
- return -ENXIO;
-
if (rdev->raid_disk >= 0)
first = last = rdev->raid_disk;
@@ -2757,12 +2753,12 @@ static struct r1bio *raid1_alloc_init_r1buf(struct r1conf *conf)
*/
static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
- int *skipped)
+ sector_t max_sector, int *skipped)
{
struct r1conf *conf = mddev->private;
struct r1bio *r1_bio;
struct bio *bio;
- sector_t max_sector, nr_sectors;
+ sector_t nr_sectors;
int disk = -1;
int i;
int wonly = -1;
@@ -2778,7 +2774,6 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
if (init_resync(conf))
return 0;
- max_sector = mddev->dev_sectors;
if (sector_nr >= max_sector) {
/* If we aborted, we need to abort the
* sync on the 'current' bitmap chunk (there will
@@ -3197,14 +3192,18 @@ static struct r1conf *setup_conf(struct mddev *mddev)
static int raid1_set_limits(struct mddev *mddev)
{
struct queue_limits lim;
+ int err;
- blk_set_stacking_limits(&lim);
+ md_init_stacking_limits(&lim);
lim.max_write_zeroes_sectors = 0;
- mddev_stack_rdev_limits(mddev, &lim);
+ err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY);
+ if (err) {
+ queue_limits_cancel_update(mddev->gendisk->queue);
+ return err;
+ }
return queue_limits_set(mddev->gendisk->queue, &lim);
}
-static void raid1_free(struct mddev *mddev, void *priv);
static int raid1_run(struct mddev *mddev)
{
struct r1conf *conf;
@@ -3238,7 +3237,7 @@ static int raid1_run(struct mddev *mddev)
if (!mddev_is_dm(mddev)) {
ret = raid1_set_limits(mddev);
if (ret)
- goto abort;
+ return ret;
}
mddev->degraded = 0;
@@ -3252,8 +3251,7 @@ static int raid1_run(struct mddev *mddev)
*/
if (conf->raid_disks - mddev->degraded < 1) {
md_unregister_thread(mddev, &conf->thread);
- ret = -EINVAL;
- goto abort;
+ return -EINVAL;
}
if (conf->raid_disks - mddev->degraded == 1)
@@ -3277,14 +3275,8 @@ static int raid1_run(struct mddev *mddev)
md_set_array_sectors(mddev, raid1_size(mddev, 0, 0));
ret = md_integrity_register(mddev);
- if (ret) {
+ if (ret)
md_unregister_thread(mddev, &mddev->thread);
- goto abort;
- }
- return 0;
-
-abort:
- raid1_free(mddev, conf);
return ret;
}
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index a4556d2e46bf..2a9c4ee982e0 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1836,8 +1836,7 @@ static bool raid10_make_request(struct mddev *mddev, struct bio *bio)
&& md_flush_request(mddev, bio))
return true;
- if (!md_write_start(mddev, bio))
- return false;
+ md_write_start(mddev, bio);
if (unlikely(bio_op(bio) == REQ_OP_DISCARD))
if (!raid10_handle_discard(mddev, bio))
@@ -2083,9 +2082,6 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
if (rdev->saved_raid_disk < 0 && !_enough(conf, 1, -1))
return -EINVAL;
- if (md_integrity_add_rdev(rdev, mddev))
- return -ENXIO;
-
if (rdev->raid_disk >= 0)
first = last = rdev->raid_disk;
@@ -3140,12 +3136,12 @@ static void raid10_set_cluster_sync_high(struct r10conf *conf)
*/
static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
- int *skipped)
+ sector_t max_sector, int *skipped)
{
struct r10conf *conf = mddev->private;
struct r10bio *r10_bio;
struct bio *biolist = NULL, *bio;
- sector_t max_sector, nr_sectors;
+ sector_t nr_sectors;
int i;
int max_sync;
sector_t sync_blocks;
@@ -3175,10 +3171,6 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
return 0;
skipped:
- max_sector = mddev->dev_sectors;
- if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
- test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
- max_sector = mddev->resync_max_sectors;
if (sector_nr >= max_sector) {
conf->cluster_sync_low = 0;
conf->cluster_sync_high = 0;
@@ -3980,12 +3972,17 @@ static int raid10_set_queue_limits(struct mddev *mddev)
{
struct r10conf *conf = mddev->private;
struct queue_limits lim;
+ int err;
- blk_set_stacking_limits(&lim);
+ md_init_stacking_limits(&lim);
lim.max_write_zeroes_sectors = 0;
lim.io_min = mddev->chunk_sectors << 9;
lim.io_opt = lim.io_min * raid10_nr_stripes(conf);
- mddev_stack_rdev_limits(mddev, &lim);
+ err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY);
+ if (err) {
+ queue_limits_cancel_update(mddev->gendisk->queue);
+ return err;
+ }
return queue_limits_set(mddev->gendisk->queue, &lim);
}
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 2bd1ce9b3922..c14cf2410365 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -155,7 +155,7 @@ static int raid6_idx_to_slot(int idx, struct stripe_head *sh,
return slot;
}
-static void print_raid5_conf (struct r5conf *conf);
+static void print_raid5_conf(struct r5conf *conf);
static int stripe_operations_active(struct stripe_head *sh)
{
@@ -5899,6 +5899,39 @@ out:
return ret;
}
+enum reshape_loc {
+ LOC_NO_RESHAPE,
+ LOC_AHEAD_OF_RESHAPE,
+ LOC_INSIDE_RESHAPE,
+ LOC_BEHIND_RESHAPE,
+};
+
+static enum reshape_loc get_reshape_loc(struct mddev *mddev,
+ struct r5conf *conf, sector_t logical_sector)
+{
+ sector_t reshape_progress, reshape_safe;
+ /*
+ * Spinlock is needed as reshape_progress may be
+ * 64bit on a 32bit platform, and so it might be
+ * possible to see a half-updated value
+ * Of course reshape_progress could change after
+ * the lock is dropped, so once we get a reference
+ * to the stripe that we think it is, we will have
+ * to check again.
+ */
+ spin_lock_irq(&conf->device_lock);
+ reshape_progress = conf->reshape_progress;
+ reshape_safe = conf->reshape_safe;
+ spin_unlock_irq(&conf->device_lock);
+ if (reshape_progress == MaxSector)
+ return LOC_NO_RESHAPE;
+ if (ahead_of_reshape(mddev, logical_sector, reshape_progress))
+ return LOC_AHEAD_OF_RESHAPE;
+ if (ahead_of_reshape(mddev, logical_sector, reshape_safe))
+ return LOC_INSIDE_RESHAPE;
+ return LOC_BEHIND_RESHAPE;
+}
+
static enum stripe_result make_stripe_request(struct mddev *mddev,
struct r5conf *conf, struct stripe_request_ctx *ctx,
sector_t logical_sector, struct bio *bi)
@@ -5913,28 +5946,14 @@ static enum stripe_result make_stripe_request(struct mddev *mddev,
seq = read_seqcount_begin(&conf->gen_lock);
if (unlikely(conf->reshape_progress != MaxSector)) {
- /*
- * Spinlock is needed as reshape_progress may be
- * 64bit on a 32bit platform, and so it might be
- * possible to see a half-updated value
- * Of course reshape_progress could change after
- * the lock is dropped, so once we get a reference
- * to the stripe that we think it is, we will have
- * to check again.
- */
- spin_lock_irq(&conf->device_lock);
- if (ahead_of_reshape(mddev, logical_sector,
- conf->reshape_progress)) {
- previous = 1;
- } else {
- if (ahead_of_reshape(mddev, logical_sector,
- conf->reshape_safe)) {
- spin_unlock_irq(&conf->device_lock);
- ret = STRIPE_SCHEDULE_AND_RETRY;
- goto out;
- }
+ enum reshape_loc loc = get_reshape_loc(mddev, conf,
+ logical_sector);
+ if (loc == LOC_INSIDE_RESHAPE) {
+ ret = STRIPE_SCHEDULE_AND_RETRY;
+ goto out;
}
- spin_unlock_irq(&conf->device_lock);
+ if (loc == LOC_AHEAD_OF_RESHAPE)
+ previous = 1;
}
new_sector = raid5_compute_sector(conf, logical_sector, previous,
@@ -6078,8 +6097,7 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
ctx.do_flush = bi->bi_opf & REQ_PREFLUSH;
}
- if (!md_write_start(mddev, bi))
- return false;
+ md_write_start(mddev, bi);
/*
* If array is degraded, better not do chunk aligned read because
* later we might have to read it again in order to reconstruct
@@ -6113,8 +6131,7 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
/* Bail out if conflicts with reshape and REQ_NOWAIT is set */
if ((bi->bi_opf & REQ_NOWAIT) &&
(conf->reshape_progress != MaxSector) &&
- !ahead_of_reshape(mddev, logical_sector, conf->reshape_progress) &&
- ahead_of_reshape(mddev, logical_sector, conf->reshape_safe)) {
+ get_reshape_loc(mddev, conf, logical_sector) == LOC_INSIDE_RESHAPE) {
bio_wouldblock_error(bi);
if (rw == WRITE)
md_write_end(mddev);
@@ -6255,7 +6272,9 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk
safepos = conf->reshape_safe;
sector_div(safepos, data_disks);
if (mddev->reshape_backwards) {
- BUG_ON(writepos < reshape_sectors);
+ if (WARN_ON(writepos < reshape_sectors))
+ return MaxSector;
+
writepos -= reshape_sectors;
readpos += reshape_sectors;
safepos += reshape_sectors;
@@ -6273,14 +6292,18 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk
* to set 'stripe_addr' which is where we will write to.
*/
if (mddev->reshape_backwards) {
- BUG_ON(conf->reshape_progress == 0);
+ if (WARN_ON(conf->reshape_progress == 0))
+ return MaxSector;
+
stripe_addr = writepos;
- BUG_ON((mddev->dev_sectors &
- ~((sector_t)reshape_sectors - 1))
- - reshape_sectors - stripe_addr
- != sector_nr);
+ if (WARN_ON((mddev->dev_sectors &
+ ~((sector_t)reshape_sectors - 1)) -
+ reshape_sectors - stripe_addr != sector_nr))
+ return MaxSector;
} else {
- BUG_ON(writepos != sector_nr + reshape_sectors);
+ if (WARN_ON(writepos != sector_nr + reshape_sectors))
+ return MaxSector;
+
stripe_addr = sector_nr;
}
@@ -6458,11 +6481,10 @@ ret:
}
static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_nr,
- int *skipped)
+ sector_t max_sector, int *skipped)
{
struct r5conf *conf = mddev->private;
struct stripe_head *sh;
- sector_t max_sector = mddev->dev_sectors;
sector_t sync_blocks;
int still_degraded = 0;
int i;
@@ -7082,12 +7104,14 @@ raid5_store_skip_copy(struct mddev *mddev, const char *page, size_t len)
err = -ENODEV;
else if (new != conf->skip_copy) {
struct request_queue *q = mddev->gendisk->queue;
+ struct queue_limits lim = queue_limits_start_update(q);
conf->skip_copy = new;
if (new)
- blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, q);
+ lim.features |= BLK_FEAT_STABLE_WRITES;
else
- blk_queue_flag_clear(QUEUE_FLAG_STABLE_WRITES, q);
+ lim.features &= ~BLK_FEAT_STABLE_WRITES;
+ err = queue_limits_commit_update(q, &lim);
}
mddev_unlock_and_resume(mddev);
return err ?: len;
@@ -7562,11 +7586,11 @@ static struct r5conf *setup_conf(struct mddev *mddev)
if (test_bit(Replacement, &rdev->flags)) {
if (disk->replacement)
goto abort;
- RCU_INIT_POINTER(disk->replacement, rdev);
+ disk->replacement = rdev;
} else {
if (disk->rdev)
goto abort;
- RCU_INIT_POINTER(disk->rdev, rdev);
+ disk->rdev = rdev;
}
if (test_bit(In_sync, &rdev->flags)) {
@@ -7702,13 +7726,13 @@ static int raid5_set_limits(struct mddev *mddev)
*/
stripe = roundup_pow_of_two(data_disks * (mddev->chunk_sectors << 9));
- blk_set_stacking_limits(&lim);
+ md_init_stacking_limits(&lim);
lim.io_min = mddev->chunk_sectors << 9;
lim.io_opt = lim.io_min * (conf->raid_disks - conf->max_degraded);
- lim.raid_partial_stripes_expensive = 1;
+ lim.features |= BLK_FEAT_RAID_PARTIAL_STRIPES_EXPENSIVE;
lim.discard_granularity = stripe;
lim.max_write_zeroes_sectors = 0;
- mddev_stack_rdev_limits(mddev, &lim);
+ mddev_stack_rdev_limits(mddev, &lim, 0);
rdev_for_each(rdev, mddev)
queue_limits_stack_bdev(&lim, rdev->bdev, rdev->new_data_offset,
mddev->gendisk->disk_name);
@@ -8048,7 +8072,7 @@ static void raid5_status(struct seq_file *seq, struct mddev *mddev)
seq_printf (seq, "]");
}
-static void print_raid5_conf (struct r5conf *conf)
+static void print_raid5_conf(struct r5conf *conf)
{
struct md_rdev *rdev;
int i;
@@ -8062,15 +8086,13 @@ static void print_raid5_conf (struct r5conf *conf)
conf->raid_disks,
conf->raid_disks - conf->mddev->degraded);
- rcu_read_lock();
for (i = 0; i < conf->raid_disks; i++) {
- rdev = rcu_dereference(conf->disks[i].rdev);
+ rdev = conf->disks[i].rdev;
if (rdev)
pr_debug(" disk %d, o:%d, dev:%pg\n",
i, !test_bit(Faulty, &rdev->flags),
rdev->bdev);
}
- rcu_read_unlock();
}
static int raid5_spare_active(struct mddev *mddev)
diff --git a/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c b/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c
index 40a8ebfcfce2..4bd4e324abc9 100644
--- a/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c
+++ b/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c
@@ -301,10 +301,10 @@ static int ipu6_isys_stream_start(struct ipu6_isys_video *av,
out_requeue:
if (bl && bl->nbufs)
ipu6_isys_buffer_list_queue(bl,
- (IPU6_ISYS_BUFFER_LIST_FL_INCOMING |
- error) ?
+ IPU6_ISYS_BUFFER_LIST_FL_INCOMING |
+ (error ?
IPU6_ISYS_BUFFER_LIST_FL_SET_STATE :
- 0, error ? VB2_BUF_STATE_ERROR :
+ 0), error ? VB2_BUF_STATE_ERROR :
VB2_BUF_STATE_QUEUED);
flush_firmware_streamon_fail(stream);
diff --git a/drivers/media/pci/intel/ipu6/ipu6-isys-video.c b/drivers/media/pci/intel/ipu6/ipu6-isys-video.c
index c8a33e1e910c..06090cc0a476 100644
--- a/drivers/media/pci/intel/ipu6/ipu6-isys-video.c
+++ b/drivers/media/pci/intel/ipu6/ipu6-isys-video.c
@@ -943,7 +943,7 @@ ipu6_isys_query_stream_by_source(struct ipu6_isys *isys, int source, u8 vc)
return NULL;
if (source < 0) {
- dev_err(&stream->isys->adev->auxdev.dev,
+ dev_err(&isys->adev->auxdev.dev,
"query stream with invalid port number\n");
return NULL;
}
diff --git a/drivers/media/pci/intel/ipu6/ipu6-isys.c b/drivers/media/pci/intel/ipu6/ipu6-isys.c
index 5992138c7290..c4aff2e2009b 100644
--- a/drivers/media/pci/intel/ipu6/ipu6-isys.c
+++ b/drivers/media/pci/intel/ipu6/ipu6-isys.c
@@ -678,6 +678,12 @@ static int isys_notifier_bound(struct v4l2_async_notifier *notifier,
container_of(asc, struct sensor_async_sd, asc);
int ret;
+ if (s_asd->csi2.port >= isys->pdata->ipdata->csi2.nports) {
+ dev_err(&isys->adev->auxdev.dev, "invalid csi2 port %u\n",
+ s_asd->csi2.port);
+ return -EINVAL;
+ }
+
ret = ipu_bridge_instantiate_vcm(sd->dev);
if (ret) {
dev_err(&isys->adev->auxdev.dev, "instantiate vcm failed\n");
@@ -793,7 +799,7 @@ static int isys_register_devices(struct ipu6_isys *isys)
isys->v4l2_dev.mdev = &isys->media_dev;
isys->v4l2_dev.ctrl_handler = NULL;
- ret = v4l2_device_register(&pdev->dev, &isys->v4l2_dev);
+ ret = v4l2_device_register(dev, &isys->v4l2_dev);
if (ret < 0)
goto out_media_device_unregister;
@@ -925,39 +931,18 @@ static const struct dev_pm_ops isys_pm_ops = {
.resume = isys_resume,
};
-static void isys_remove(struct auxiliary_device *auxdev)
+static void free_fw_msg_bufs(struct ipu6_isys *isys)
{
- struct ipu6_bus_device *adev = auxdev_to_adev(auxdev);
- struct ipu6_isys *isys = dev_get_drvdata(&auxdev->dev);
- struct ipu6_device *isp = adev->isp;
+ struct device *dev = &isys->adev->auxdev.dev;
struct isys_fw_msgs *fwmsg, *safe;
- unsigned int i;
list_for_each_entry_safe(fwmsg, safe, &isys->framebuflist, head)
- dma_free_attrs(&auxdev->dev, sizeof(struct isys_fw_msgs),
- fwmsg, fwmsg->dma_addr, 0);
+ dma_free_attrs(dev, sizeof(struct isys_fw_msgs), fwmsg,
+ fwmsg->dma_addr, 0);
list_for_each_entry_safe(fwmsg, safe, &isys->framebuflist_fw, head)
- dma_free_attrs(&auxdev->dev, sizeof(struct isys_fw_msgs),
- fwmsg, fwmsg->dma_addr, 0);
-
- isys_unregister_devices(isys);
- isys_notifier_cleanup(isys);
-
- cpu_latency_qos_remove_request(&isys->pm_qos);
-
- if (!isp->secure_mode) {
- ipu6_cpd_free_pkg_dir(adev);
- ipu6_buttress_unmap_fw_image(adev, &adev->fw_sgt);
- release_firmware(adev->fw);
- }
-
- for (i = 0; i < IPU6_ISYS_MAX_STREAMS; i++)
- mutex_destroy(&isys->streams[i].mutex);
-
- isys_iwake_watermark_cleanup(isys);
- mutex_destroy(&isys->stream_mutex);
- mutex_destroy(&isys->mutex);
+ dma_free_attrs(dev, sizeof(struct isys_fw_msgs), fwmsg,
+ fwmsg->dma_addr, 0);
}
static int alloc_fw_msg_bufs(struct ipu6_isys *isys, int amount)
@@ -1140,12 +1125,14 @@ static int isys_probe(struct auxiliary_device *auxdev,
ret = isys_register_devices(isys);
if (ret)
- goto out_remove_pkg_dir_shared_buffer;
+ goto free_fw_msg_bufs;
ipu6_mmu_hw_cleanup(adev->mmu);
return 0;
+free_fw_msg_bufs:
+ free_fw_msg_bufs(isys);
out_remove_pkg_dir_shared_buffer:
if (!isp->secure_mode)
ipu6_cpd_free_pkg_dir(adev);
@@ -1167,6 +1154,34 @@ release_firmware:
return ret;
}
+static void isys_remove(struct auxiliary_device *auxdev)
+{
+ struct ipu6_bus_device *adev = auxdev_to_adev(auxdev);
+ struct ipu6_isys *isys = dev_get_drvdata(&auxdev->dev);
+ struct ipu6_device *isp = adev->isp;
+ unsigned int i;
+
+ free_fw_msg_bufs(isys);
+
+ isys_unregister_devices(isys);
+ isys_notifier_cleanup(isys);
+
+ cpu_latency_qos_remove_request(&isys->pm_qos);
+
+ if (!isp->secure_mode) {
+ ipu6_cpd_free_pkg_dir(adev);
+ ipu6_buttress_unmap_fw_image(adev, &adev->fw_sgt);
+ release_firmware(adev->fw);
+ }
+
+ for (i = 0; i < IPU6_ISYS_MAX_STREAMS; i++)
+ mutex_destroy(&isys->streams[i].mutex);
+
+ isys_iwake_watermark_cleanup(isys);
+ mutex_destroy(&isys->stream_mutex);
+ mutex_destroy(&isys->mutex);
+}
+
struct fwmsg {
int type;
char *msg;
diff --git a/drivers/media/pci/intel/ipu6/ipu6.c b/drivers/media/pci/intel/ipu6/ipu6.c
index d2bebd208461..bbd646378ab3 100644
--- a/drivers/media/pci/intel/ipu6/ipu6.c
+++ b/drivers/media/pci/intel/ipu6/ipu6.c
@@ -285,7 +285,7 @@ EXPORT_SYMBOL_NS_GPL(ipu6_configure_spc, INTEL_IPU6);
#define IPU6_ISYS_CSI2_NPORTS 4
#define IPU6SE_ISYS_CSI2_NPORTS 4
#define IPU6_TGL_ISYS_CSI2_NPORTS 8
-#define IPU6EP_MTL_ISYS_CSI2_NPORTS 4
+#define IPU6EP_MTL_ISYS_CSI2_NPORTS 6
static void ipu6_internal_pdata_init(struct ipu6_device *isp)
{
@@ -727,9 +727,6 @@ static void ipu6_pci_remove(struct pci_dev *pdev)
pm_runtime_forbid(&pdev->dev);
pm_runtime_get_noresume(&pdev->dev);
- pci_release_regions(pdev);
- pci_disable_device(pdev);
-
release_firmware(isp->cpd_fw);
ipu6_mmu_cleanup(psys_mmu);
diff --git a/drivers/media/pci/intel/ivsc/Kconfig b/drivers/media/pci/intel/ivsc/Kconfig
index 407a800c81bc..a7d9607ecdc6 100644
--- a/drivers/media/pci/intel/ivsc/Kconfig
+++ b/drivers/media/pci/intel/ivsc/Kconfig
@@ -4,6 +4,7 @@
config INTEL_VSC
tristate "Intel Visual Sensing Controller"
depends on INTEL_MEI && ACPI && VIDEO_DEV
+ depends on IPU_BRIDGE || !IPU_BRIDGE
select MEDIA_CONTROLLER
select VIDEO_V4L2_SUBDEV_API
select V4L2_FWNODE
diff --git a/drivers/media/pci/intel/ivsc/mei_csi.c b/drivers/media/pci/intel/ivsc/mei_csi.c
index 89b582a221ab..f04a89584334 100644
--- a/drivers/media/pci/intel/ivsc/mei_csi.c
+++ b/drivers/media/pci/intel/ivsc/mei_csi.c
@@ -677,10 +677,13 @@ static int mei_csi_probe(struct mei_cl_device *cldev,
return -ENODEV;
ret = ipu_bridge_init(&ipu->dev, ipu_bridge_parse_ssdb);
+ put_device(&ipu->dev);
if (ret < 0)
return ret;
- if (WARN_ON(!dev_fwnode(dev)))
+ if (!dev_fwnode(dev)) {
+ dev_err(dev, "mei-csi probed without device fwnode!\n");
return -ENXIO;
+ }
csi = devm_kzalloc(dev, sizeof(struct mei_csi), GFP_KERNEL);
if (!csi)
diff --git a/drivers/media/pci/mgb4/mgb4_core.c b/drivers/media/pci/mgb4/mgb4_core.c
index 60498a5abebf..ab4f07e2e560 100644
--- a/drivers/media/pci/mgb4/mgb4_core.c
+++ b/drivers/media/pci/mgb4/mgb4_core.c
@@ -642,9 +642,6 @@ static void mgb4_remove(struct pci_dev *pdev)
struct mgb4_dev *mgbdev = pci_get_drvdata(pdev);
int i;
-#ifdef CONFIG_DEBUG_FS
- debugfs_remove_recursive(mgbdev->debugfs);
-#endif
#if IS_REACHABLE(CONFIG_HWMON)
hwmon_device_unregister(mgbdev->hwmon_dev);
#endif
@@ -659,6 +656,10 @@ static void mgb4_remove(struct pci_dev *pdev)
if (mgbdev->vin[i])
mgb4_vin_free(mgbdev->vin[i]);
+#ifdef CONFIG_DEBUG_FS
+ debugfs_remove_recursive(mgbdev->debugfs);
+#endif
+
device_remove_groups(&mgbdev->pdev->dev, mgb4_pci_groups);
free_spi(mgbdev);
free_i2c(mgbdev);
diff --git a/drivers/media/pci/saa7134/saa7134-cards.c b/drivers/media/pci/saa7134/saa7134-cards.c
index 1280696f65f2..e80fb4ebfda6 100644
--- a/drivers/media/pci/saa7134/saa7134-cards.c
+++ b/drivers/media/pci/saa7134/saa7134-cards.c
@@ -5152,7 +5152,7 @@ struct saa7134_board saa7134_boards[] = {
},
},
[SAA7134_BOARD_AVERMEDIA_STUDIO_507UA] = {
- /* Andy Shevchenko <andy@smile.org.ua> */
+ /* Andy Shevchenko <andy@kernel.org> */
.name = "Avermedia AVerTV Studio 507UA",
.audio_clock = 0x00187de7,
.tuner_type = TUNER_PHILIPS_FM1216ME_MK3, /* Should be MK5 */
diff --git a/drivers/media/platform/qcom/venus/pm_helpers.c b/drivers/media/platform/qcom/venus/pm_helpers.c
index 502822059498..4ce76ce6dd4d 100644
--- a/drivers/media/platform/qcom/venus/pm_helpers.c
+++ b/drivers/media/platform/qcom/venus/pm_helpers.c
@@ -412,10 +412,9 @@ static int vcodec_control_v4(struct venus_core *core, u32 coreid, bool enable)
u32 val;
int ret;
- if (IS_V6(core)) {
- ctrl = core->wrapper_base + WRAPPER_CORE_POWER_CONTROL_V6;
- stat = core->wrapper_base + WRAPPER_CORE_POWER_STATUS_V6;
- } else if (coreid == VIDC_CORE_ID_1) {
+ if (IS_V6(core))
+ return dev_pm_genpd_set_hwmode(core->pmdomains->pd_devs[coreid], !enable);
+ else if (coreid == VIDC_CORE_ID_1) {
ctrl = core->wrapper_base + WRAPPER_VCODEC0_MMCC_POWER_CONTROL;
stat = core->wrapper_base + WRAPPER_VCODEC0_MMCC_POWER_STATUS;
} else {
@@ -451,9 +450,11 @@ static int poweroff_coreid(struct venus_core *core, unsigned int coreid_mask)
vcodec_clks_disable(core, core->vcodec0_clks);
- ret = vcodec_control_v4(core, VIDC_CORE_ID_1, false);
- if (ret)
- return ret;
+ if (!IS_V6(core)) {
+ ret = vcodec_control_v4(core, VIDC_CORE_ID_1, false);
+ if (ret)
+ return ret;
+ }
ret = pm_runtime_put_sync(core->pmdomains->pd_devs[1]);
if (ret < 0)
@@ -467,9 +468,11 @@ static int poweroff_coreid(struct venus_core *core, unsigned int coreid_mask)
vcodec_clks_disable(core, core->vcodec1_clks);
- ret = vcodec_control_v4(core, VIDC_CORE_ID_2, false);
- if (ret)
- return ret;
+ if (!IS_V6(core)) {
+ ret = vcodec_control_v4(core, VIDC_CORE_ID_2, false);
+ if (ret)
+ return ret;
+ }
ret = pm_runtime_put_sync(core->pmdomains->pd_devs[2]);
if (ret < 0)
@@ -488,9 +491,11 @@ static int poweron_coreid(struct venus_core *core, unsigned int coreid_mask)
if (ret < 0)
return ret;
- ret = vcodec_control_v4(core, VIDC_CORE_ID_1, true);
- if (ret)
- return ret;
+ if (!IS_V6(core)) {
+ ret = vcodec_control_v4(core, VIDC_CORE_ID_1, true);
+ if (ret)
+ return ret;
+ }
ret = vcodec_clks_enable(core, core->vcodec0_clks);
if (ret)
@@ -506,9 +511,11 @@ static int poweron_coreid(struct venus_core *core, unsigned int coreid_mask)
if (ret < 0)
return ret;
- ret = vcodec_control_v4(core, VIDC_CORE_ID_2, true);
- if (ret)
- return ret;
+ if (!IS_V6(core)) {
+ ret = vcodec_control_v4(core, VIDC_CORE_ID_2, true);
+ if (ret)
+ return ret;
+ }
ret = vcodec_clks_enable(core, core->vcodec1_clks);
if (ret)
diff --git a/drivers/memory/Kconfig b/drivers/memory/Kconfig
index 8efdd1f97139..c82d8d8a16ea 100644
--- a/drivers/memory/Kconfig
+++ b/drivers/memory/Kconfig
@@ -167,7 +167,7 @@ config FSL_CORENET_CF
represents a coherency violation.
config FSL_IFC
- bool "Freescale IFC driver" if COMPILE_TEST
+ bool "Freescale IFC driver"
depends on FSL_SOC || ARCH_LAYERSCAPE || SOC_LS1021A || COMPILE_TEST
depends on HAS_IOMEM
diff --git a/drivers/memstick/host/Kconfig b/drivers/memstick/host/Kconfig
index 4113343da056..fcd2c2cc3cb4 100644
--- a/drivers/memstick/host/Kconfig
+++ b/drivers/memstick/host/Kconfig
@@ -44,16 +44,6 @@ config MEMSTICK_R592
To compile this driver as a module, choose M here: the module will
be called r592.
-config MEMSTICK_REALTEK_PCI
- tristate "Realtek PCI-E Memstick Card Interface Driver"
- depends on MISC_RTSX_PCI
- help
- Say Y here to include driver code to support Memstick card interface
- of Realtek PCI-E card reader
-
- To compile this driver as a module, choose M here: the module will
- be called rtsx_pci_ms.
-
config MEMSTICK_REALTEK_USB
tristate "Realtek USB Memstick Card Interface Driver"
depends on MISC_RTSX_USB
diff --git a/drivers/memstick/host/Makefile b/drivers/memstick/host/Makefile
index 1abaa03ee68c..0c90df33165d 100644
--- a/drivers/memstick/host/Makefile
+++ b/drivers/memstick/host/Makefile
@@ -6,5 +6,4 @@
obj-$(CONFIG_MEMSTICK_TIFM_MS) += tifm_ms.o
obj-$(CONFIG_MEMSTICK_JMICRON_38X) += jmb38x_ms.o
obj-$(CONFIG_MEMSTICK_R592) += r592.o
-obj-$(CONFIG_MEMSTICK_REALTEK_PCI) += rtsx_pci_ms.o
obj-$(CONFIG_MEMSTICK_REALTEK_USB) += rtsx_usb_ms.o
diff --git a/drivers/memstick/host/rtsx_pci_ms.c b/drivers/memstick/host/rtsx_pci_ms.c
deleted file mode 100644
index 980a54513e6c..000000000000
--- a/drivers/memstick/host/rtsx_pci_ms.c
+++ /dev/null
@@ -1,638 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/* Realtek PCI-Express Memstick Card Interface driver
- *
- * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
- *
- * Author:
- * Wei WANG <wei_wang@realsil.com.cn>
- */
-
-#include <linux/module.h>
-#include <linux/highmem.h>
-#include <linux/delay.h>
-#include <linux/platform_device.h>
-#include <linux/memstick.h>
-#include <linux/rtsx_pci.h>
-#include <asm/unaligned.h>
-
-struct realtek_pci_ms {
- struct platform_device *pdev;
- struct rtsx_pcr *pcr;
- struct memstick_host *msh;
- struct memstick_request *req;
-
- struct mutex host_mutex;
- struct work_struct handle_req;
-
- u8 ssc_depth;
- unsigned int clock;
- unsigned char ifmode;
- bool eject;
-};
-
-static inline struct device *ms_dev(struct realtek_pci_ms *host)
-{
- return &(host->pdev->dev);
-}
-
-static inline void ms_clear_error(struct realtek_pci_ms *host)
-{
- rtsx_pci_write_register(host->pcr, CARD_STOP,
- MS_STOP | MS_CLR_ERR, MS_STOP | MS_CLR_ERR);
-}
-
-#ifdef DEBUG
-
-static void ms_print_debug_regs(struct realtek_pci_ms *host)
-{
- struct rtsx_pcr *pcr = host->pcr;
- u16 i;
- u8 *ptr;
-
- /* Print MS host internal registers */
- rtsx_pci_init_cmd(pcr);
- for (i = 0xFD40; i <= 0xFD44; i++)
- rtsx_pci_add_cmd(pcr, READ_REG_CMD, i, 0, 0);
- for (i = 0xFD52; i <= 0xFD69; i++)
- rtsx_pci_add_cmd(pcr, READ_REG_CMD, i, 0, 0);
- rtsx_pci_send_cmd(pcr, 100);
-
- ptr = rtsx_pci_get_cmd_data(pcr);
- for (i = 0xFD40; i <= 0xFD44; i++)
- dev_dbg(ms_dev(host), "0x%04X: 0x%02x\n", i, *(ptr++));
- for (i = 0xFD52; i <= 0xFD69; i++)
- dev_dbg(ms_dev(host), "0x%04X: 0x%02x\n", i, *(ptr++));
-}
-
-#else
-
-#define ms_print_debug_regs(host)
-
-#endif
-
-static int ms_power_on(struct realtek_pci_ms *host)
-{
- struct rtsx_pcr *pcr = host->pcr;
- int err;
-
- rtsx_pci_init_cmd(pcr);
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_SELECT, 0x07, MS_MOD_SEL);
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_SHARE_MODE,
- CARD_SHARE_MASK, CARD_SHARE_48_MS);
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_CLK_EN,
- MS_CLK_EN, MS_CLK_EN);
- err = rtsx_pci_send_cmd(pcr, 100);
- if (err < 0)
- return err;
-
- err = rtsx_pci_card_pull_ctl_enable(pcr, RTSX_MS_CARD);
- if (err < 0)
- return err;
-
- err = rtsx_pci_card_power_on(pcr, RTSX_MS_CARD);
- if (err < 0)
- return err;
-
- /* Wait ms power stable */
- msleep(150);
-
- err = rtsx_pci_write_register(pcr, CARD_OE,
- MS_OUTPUT_EN, MS_OUTPUT_EN);
- if (err < 0)
- return err;
-
- return 0;
-}
-
-static int ms_power_off(struct realtek_pci_ms *host)
-{
- struct rtsx_pcr *pcr = host->pcr;
- int err;
-
- rtsx_pci_init_cmd(pcr);
-
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_CLK_EN, MS_CLK_EN, 0);
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_OE, MS_OUTPUT_EN, 0);
-
- err = rtsx_pci_send_cmd(pcr, 100);
- if (err < 0)
- return err;
-
- err = rtsx_pci_card_power_off(pcr, RTSX_MS_CARD);
- if (err < 0)
- return err;
-
- return rtsx_pci_card_pull_ctl_disable(pcr, RTSX_MS_CARD);
-}
-
-static int ms_transfer_data(struct realtek_pci_ms *host, unsigned char data_dir,
- u8 tpc, u8 cfg, struct scatterlist *sg)
-{
- struct rtsx_pcr *pcr = host->pcr;
- int err;
- unsigned int length = sg->length;
- u16 sec_cnt = (u16)(length / 512);
- u8 val, trans_mode, dma_dir;
- struct memstick_dev *card = host->msh->card;
- bool pro_card = card->id.type == MEMSTICK_TYPE_PRO;
-
- dev_dbg(ms_dev(host), "%s: tpc = 0x%02x, data_dir = %s, length = %d\n",
- __func__, tpc, (data_dir == READ) ? "READ" : "WRITE",
- length);
-
- if (data_dir == READ) {
- dma_dir = DMA_DIR_FROM_CARD;
- trans_mode = pro_card ? MS_TM_AUTO_READ : MS_TM_NORMAL_READ;
- } else {
- dma_dir = DMA_DIR_TO_CARD;
- trans_mode = pro_card ? MS_TM_AUTO_WRITE : MS_TM_NORMAL_WRITE;
- }
-
- rtsx_pci_init_cmd(pcr);
-
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, MS_TPC, 0xFF, tpc);
- if (pro_card) {
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, MS_SECTOR_CNT_H,
- 0xFF, (u8)(sec_cnt >> 8));
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, MS_SECTOR_CNT_L,
- 0xFF, (u8)sec_cnt);
- }
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, MS_TRANS_CFG, 0xFF, cfg);
-
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, IRQSTAT0,
- DMA_DONE_INT, DMA_DONE_INT);
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, DMATC3, 0xFF, (u8)(length >> 24));
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, DMATC2, 0xFF, (u8)(length >> 16));
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, DMATC1, 0xFF, (u8)(length >> 8));
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, DMATC0, 0xFF, (u8)length);
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, DMACTL,
- 0x03 | DMA_PACK_SIZE_MASK, dma_dir | DMA_EN | DMA_512);
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_DATA_SOURCE,
- 0x01, RING_BUFFER);
-
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, MS_TRANSFER,
- 0xFF, MS_TRANSFER_START | trans_mode);
- rtsx_pci_add_cmd(pcr, CHECK_REG_CMD, MS_TRANSFER,
- MS_TRANSFER_END, MS_TRANSFER_END);
-
- rtsx_pci_send_cmd_no_wait(pcr);
-
- err = rtsx_pci_transfer_data(pcr, sg, 1, data_dir == READ, 10000);
- if (err < 0) {
- ms_clear_error(host);
- return err;
- }
-
- rtsx_pci_read_register(pcr, MS_TRANS_CFG, &val);
- if (pro_card) {
- if (val & (MS_INT_CMDNK | MS_INT_ERR |
- MS_CRC16_ERR | MS_RDY_TIMEOUT))
- return -EIO;
- } else {
- if (val & (MS_CRC16_ERR | MS_RDY_TIMEOUT))
- return -EIO;
- }
-
- return 0;
-}
-
-static int ms_write_bytes(struct realtek_pci_ms *host, u8 tpc,
- u8 cfg, u8 cnt, u8 *data, u8 *int_reg)
-{
- struct rtsx_pcr *pcr = host->pcr;
- int err, i;
-
- dev_dbg(ms_dev(host), "%s: tpc = 0x%02x\n", __func__, tpc);
-
- if (!data)
- return -EINVAL;
-
- rtsx_pci_init_cmd(pcr);
-
- for (i = 0; i < cnt; i++)
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
- PPBUF_BASE2 + i, 0xFF, data[i]);
- if (cnt % 2)
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
- PPBUF_BASE2 + i, 0xFF, 0xFF);
-
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, MS_TPC, 0xFF, tpc);
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, MS_BYTE_CNT, 0xFF, cnt);
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, MS_TRANS_CFG, 0xFF, cfg);
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_DATA_SOURCE,
- 0x01, PINGPONG_BUFFER);
-
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, MS_TRANSFER,
- 0xFF, MS_TRANSFER_START | MS_TM_WRITE_BYTES);
- rtsx_pci_add_cmd(pcr, CHECK_REG_CMD, MS_TRANSFER,
- MS_TRANSFER_END, MS_TRANSFER_END);
- if (int_reg)
- rtsx_pci_add_cmd(pcr, READ_REG_CMD, MS_TRANS_CFG, 0, 0);
-
- err = rtsx_pci_send_cmd(pcr, 5000);
- if (err < 0) {
- u8 val;
-
- rtsx_pci_read_register(pcr, MS_TRANS_CFG, &val);
- dev_dbg(ms_dev(host), "MS_TRANS_CFG: 0x%02x\n", val);
-
- if (int_reg)
- *int_reg = val & 0x0F;
-
- ms_print_debug_regs(host);
-
- ms_clear_error(host);
-
- if (!(tpc & 0x08)) {
- if (val & MS_CRC16_ERR)
- return -EIO;
- } else {
- if (!(val & 0x80)) {
- if (val & (MS_INT_ERR | MS_INT_CMDNK))
- return -EIO;
- }
- }
-
- return -ETIMEDOUT;
- }
-
- if (int_reg) {
- u8 *ptr = rtsx_pci_get_cmd_data(pcr) + 1;
- *int_reg = *ptr & 0x0F;
- }
-
- return 0;
-}
-
-static int ms_read_bytes(struct realtek_pci_ms *host, u8 tpc,
- u8 cfg, u8 cnt, u8 *data, u8 *int_reg)
-{
- struct rtsx_pcr *pcr = host->pcr;
- int err, i;
- u8 *ptr;
-
- dev_dbg(ms_dev(host), "%s: tpc = 0x%02x\n", __func__, tpc);
-
- if (!data)
- return -EINVAL;
-
- rtsx_pci_init_cmd(pcr);
-
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, MS_TPC, 0xFF, tpc);
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, MS_BYTE_CNT, 0xFF, cnt);
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, MS_TRANS_CFG, 0xFF, cfg);
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_DATA_SOURCE,
- 0x01, PINGPONG_BUFFER);
-
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, MS_TRANSFER,
- 0xFF, MS_TRANSFER_START | MS_TM_READ_BYTES);
- rtsx_pci_add_cmd(pcr, CHECK_REG_CMD, MS_TRANSFER,
- MS_TRANSFER_END, MS_TRANSFER_END);
- for (i = 0; i < cnt - 1; i++)
- rtsx_pci_add_cmd(pcr, READ_REG_CMD, PPBUF_BASE2 + i, 0, 0);
- if (cnt % 2)
- rtsx_pci_add_cmd(pcr, READ_REG_CMD, PPBUF_BASE2 + cnt, 0, 0);
- else
- rtsx_pci_add_cmd(pcr, READ_REG_CMD,
- PPBUF_BASE2 + cnt - 1, 0, 0);
- if (int_reg)
- rtsx_pci_add_cmd(pcr, READ_REG_CMD, MS_TRANS_CFG, 0, 0);
-
- err = rtsx_pci_send_cmd(pcr, 5000);
- if (err < 0) {
- u8 val;
-
- rtsx_pci_read_register(pcr, MS_TRANS_CFG, &val);
- dev_dbg(ms_dev(host), "MS_TRANS_CFG: 0x%02x\n", val);
-
- if (int_reg)
- *int_reg = val & 0x0F;
-
- ms_print_debug_regs(host);
-
- ms_clear_error(host);
-
- if (!(tpc & 0x08)) {
- if (val & MS_CRC16_ERR)
- return -EIO;
- } else {
- if (!(val & 0x80)) {
- if (val & (MS_INT_ERR | MS_INT_CMDNK))
- return -EIO;
- }
- }
-
- return -ETIMEDOUT;
- }
-
- ptr = rtsx_pci_get_cmd_data(pcr) + 1;
- for (i = 0; i < cnt; i++)
- data[i] = *ptr++;
-
- if (int_reg)
- *int_reg = *ptr & 0x0F;
-
- return 0;
-}
-
-static int rtsx_pci_ms_issue_cmd(struct realtek_pci_ms *host)
-{
- struct memstick_request *req = host->req;
- int err = 0;
- u8 cfg = 0, int_reg;
-
- dev_dbg(ms_dev(host), "%s\n", __func__);
-
- if (req->need_card_int) {
- if (host->ifmode != MEMSTICK_SERIAL)
- cfg = WAIT_INT;
- }
-
- if (req->long_data) {
- err = ms_transfer_data(host, req->data_dir,
- req->tpc, cfg, &(req->sg));
- } else {
- if (req->data_dir == READ) {
- err = ms_read_bytes(host, req->tpc, cfg,
- req->data_len, req->data, &int_reg);
- } else {
- err = ms_write_bytes(host, req->tpc, cfg,
- req->data_len, req->data, &int_reg);
- }
- }
- if (err < 0)
- return err;
-
- if (req->need_card_int && (host->ifmode == MEMSTICK_SERIAL)) {
- err = ms_read_bytes(host, MS_TPC_GET_INT,
- NO_WAIT_INT, 1, &int_reg, NULL);
- if (err < 0)
- return err;
- }
-
- if (req->need_card_int) {
- dev_dbg(ms_dev(host), "int_reg: 0x%02x\n", int_reg);
-
- if (int_reg & MS_INT_CMDNK)
- req->int_reg |= MEMSTICK_INT_CMDNAK;
- if (int_reg & MS_INT_BREQ)
- req->int_reg |= MEMSTICK_INT_BREQ;
- if (int_reg & MS_INT_ERR)
- req->int_reg |= MEMSTICK_INT_ERR;
- if (int_reg & MS_INT_CED)
- req->int_reg |= MEMSTICK_INT_CED;
- }
-
- return 0;
-}
-
-static void rtsx_pci_ms_handle_req(struct work_struct *work)
-{
- struct realtek_pci_ms *host = container_of(work,
- struct realtek_pci_ms, handle_req);
- struct rtsx_pcr *pcr = host->pcr;
- struct memstick_host *msh = host->msh;
- int rc;
-
- mutex_lock(&pcr->pcr_mutex);
-
- rtsx_pci_start_run(pcr);
-
- rtsx_pci_switch_clock(host->pcr, host->clock, host->ssc_depth,
- false, true, false);
- rtsx_pci_write_register(pcr, CARD_SELECT, 0x07, MS_MOD_SEL);
- rtsx_pci_write_register(pcr, CARD_SHARE_MODE,
- CARD_SHARE_MASK, CARD_SHARE_48_MS);
-
- if (!host->req) {
- do {
- rc = memstick_next_req(msh, &host->req);
- dev_dbg(ms_dev(host), "next req %d\n", rc);
-
- if (!rc)
- host->req->error = rtsx_pci_ms_issue_cmd(host);
- } while (!rc);
- }
-
- mutex_unlock(&pcr->pcr_mutex);
-}
-
-static void rtsx_pci_ms_request(struct memstick_host *msh)
-{
- struct realtek_pci_ms *host = memstick_priv(msh);
-
- dev_dbg(ms_dev(host), "--> %s\n", __func__);
-
- if (rtsx_pci_card_exclusive_check(host->pcr, RTSX_MS_CARD))
- return;
-
- schedule_work(&host->handle_req);
-}
-
-static int rtsx_pci_ms_set_param(struct memstick_host *msh,
- enum memstick_param param, int value)
-{
- struct realtek_pci_ms *host = memstick_priv(msh);
- struct rtsx_pcr *pcr = host->pcr;
- unsigned int clock = 0;
- u8 ssc_depth = 0;
- int err;
-
- dev_dbg(ms_dev(host), "%s: param = %d, value = %d\n",
- __func__, param, value);
-
- err = rtsx_pci_card_exclusive_check(host->pcr, RTSX_MS_CARD);
- if (err)
- return err;
-
- switch (param) {
- case MEMSTICK_POWER:
- if (value == MEMSTICK_POWER_ON)
- err = ms_power_on(host);
- else if (value == MEMSTICK_POWER_OFF)
- err = ms_power_off(host);
- else
- return -EINVAL;
- break;
-
- case MEMSTICK_INTERFACE:
- if (value == MEMSTICK_SERIAL) {
- clock = 19000000;
- ssc_depth = RTSX_SSC_DEPTH_500K;
-
- err = rtsx_pci_write_register(pcr, MS_CFG, 0x58,
- MS_BUS_WIDTH_1 | PUSH_TIME_DEFAULT);
- if (err < 0)
- return err;
- } else if (value == MEMSTICK_PAR4) {
- clock = 39000000;
- ssc_depth = RTSX_SSC_DEPTH_1M;
-
- err = rtsx_pci_write_register(pcr, MS_CFG,
- 0x58, MS_BUS_WIDTH_4 | PUSH_TIME_ODD);
- if (err < 0)
- return err;
- } else {
- return -EINVAL;
- }
-
- err = rtsx_pci_switch_clock(pcr, clock,
- ssc_depth, false, true, false);
- if (err < 0)
- return err;
-
- host->ssc_depth = ssc_depth;
- host->clock = clock;
- host->ifmode = value;
- break;
- }
-
- return 0;
-}
-
-#ifdef CONFIG_PM
-
-static int rtsx_pci_ms_suspend(struct platform_device *pdev, pm_message_t state)
-{
- struct realtek_pci_ms *host = platform_get_drvdata(pdev);
- struct memstick_host *msh = host->msh;
-
- dev_dbg(ms_dev(host), "--> %s\n", __func__);
-
- memstick_suspend_host(msh);
- return 0;
-}
-
-static int rtsx_pci_ms_resume(struct platform_device *pdev)
-{
- struct realtek_pci_ms *host = platform_get_drvdata(pdev);
- struct memstick_host *msh = host->msh;
-
- dev_dbg(ms_dev(host), "--> %s\n", __func__);
-
- memstick_resume_host(msh);
- return 0;
-}
-
-#else /* CONFIG_PM */
-
-#define rtsx_pci_ms_suspend NULL
-#define rtsx_pci_ms_resume NULL
-
-#endif /* CONFIG_PM */
-
-static void rtsx_pci_ms_card_event(struct platform_device *pdev)
-{
- struct realtek_pci_ms *host = platform_get_drvdata(pdev);
-
- memstick_detect_change(host->msh);
-}
-
-static int rtsx_pci_ms_drv_probe(struct platform_device *pdev)
-{
- struct memstick_host *msh;
- struct realtek_pci_ms *host;
- struct rtsx_pcr *pcr;
- struct pcr_handle *handle = pdev->dev.platform_data;
- int rc;
-
- if (!handle)
- return -ENXIO;
-
- pcr = handle->pcr;
- if (!pcr)
- return -ENXIO;
-
- dev_dbg(&(pdev->dev),
- ": Realtek PCI-E Memstick controller found\n");
-
- msh = memstick_alloc_host(sizeof(*host), &pdev->dev);
- if (!msh)
- return -ENOMEM;
-
- host = memstick_priv(msh);
- host->pcr = pcr;
- host->msh = msh;
- host->pdev = pdev;
- platform_set_drvdata(pdev, host);
- pcr->slots[RTSX_MS_CARD].p_dev = pdev;
- pcr->slots[RTSX_MS_CARD].card_event = rtsx_pci_ms_card_event;
-
- mutex_init(&host->host_mutex);
-
- INIT_WORK(&host->handle_req, rtsx_pci_ms_handle_req);
- msh->request = rtsx_pci_ms_request;
- msh->set_param = rtsx_pci_ms_set_param;
- msh->caps = MEMSTICK_CAP_PAR4;
-
- rc = memstick_add_host(msh);
- if (rc) {
- memstick_free_host(msh);
- return rc;
- }
-
- return 0;
-}
-
-static void rtsx_pci_ms_drv_remove(struct platform_device *pdev)
-{
- struct realtek_pci_ms *host = platform_get_drvdata(pdev);
- struct rtsx_pcr *pcr;
- struct memstick_host *msh;
- int rc;
-
- pcr = host->pcr;
- pcr->slots[RTSX_MS_CARD].p_dev = NULL;
- pcr->slots[RTSX_MS_CARD].card_event = NULL;
- msh = host->msh;
- host->eject = true;
- cancel_work_sync(&host->handle_req);
-
- mutex_lock(&host->host_mutex);
- if (host->req) {
- dev_dbg(&(pdev->dev),
- "%s: Controller removed during transfer\n",
- dev_name(&msh->dev));
-
- rtsx_pci_complete_unfinished_transfer(pcr);
-
- host->req->error = -ENOMEDIUM;
- do {
- rc = memstick_next_req(msh, &host->req);
- if (!rc)
- host->req->error = -ENOMEDIUM;
- } while (!rc);
- }
- mutex_unlock(&host->host_mutex);
-
- memstick_remove_host(msh);
- memstick_free_host(msh);
-
- dev_dbg(&(pdev->dev),
- ": Realtek PCI-E Memstick controller has been removed\n");
-}
-
-static struct platform_device_id rtsx_pci_ms_ids[] = {
- {
- .name = DRV_NAME_RTSX_PCI_MS,
- }, {
- /* sentinel */
- }
-};
-MODULE_DEVICE_TABLE(platform, rtsx_pci_ms_ids);
-
-static struct platform_driver rtsx_pci_ms_driver = {
- .probe = rtsx_pci_ms_drv_probe,
- .remove_new = rtsx_pci_ms_drv_remove,
- .id_table = rtsx_pci_ms_ids,
- .suspend = rtsx_pci_ms_suspend,
- .resume = rtsx_pci_ms_resume,
- .driver = {
- .name = DRV_NAME_RTSX_PCI_MS,
- },
-};
-module_platform_driver(rtsx_pci_ms_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Wei WANG <wei_wang@realsil.com.cn>");
-MODULE_DESCRIPTION("Realtek PCI-E Memstick Card Host Driver");
diff --git a/drivers/mfd/88pm800.c b/drivers/mfd/88pm800.c
index 300caa067335..384ecf5301d2 100644
--- a/drivers/mfd/88pm800.c
+++ b/drivers/mfd/88pm800.c
@@ -116,7 +116,7 @@ enum {
#define PM800_CHIP_GEN_ID_NUM 0x3
static const struct i2c_device_id pm80x_id_table[] = {
- {"88PM800", 0},
+ { "88PM800" },
{} /* NULL terminated */
};
MODULE_DEVICE_TABLE(i2c, pm80x_id_table);
diff --git a/drivers/mfd/88pm805.c b/drivers/mfd/88pm805.c
index 68417c3c4f5a..205f0762a928 100644
--- a/drivers/mfd/88pm805.c
+++ b/drivers/mfd/88pm805.c
@@ -30,7 +30,7 @@
#include <linux/delay.h>
static const struct i2c_device_id pm80x_id_table[] = {
- {"88PM805", 0},
+ { "88PM805" },
{} /* NULL terminated */
};
MODULE_DEVICE_TABLE(i2c, pm80x_id_table);
diff --git a/drivers/mfd/88pm860x-core.c b/drivers/mfd/88pm860x-core.c
index 151bf03e772d..7f003f71e1af 100644
--- a/drivers/mfd/88pm860x-core.c
+++ b/drivers/mfd/88pm860x-core.c
@@ -1233,7 +1233,7 @@ static int pm860x_resume(struct device *dev)
static DEFINE_SIMPLE_DEV_PM_OPS(pm860x_pm_ops, pm860x_suspend, pm860x_resume);
static const struct i2c_device_id pm860x_id_table[] = {
- { "88PM860x", 0 },
+ { "88PM860x" },
{}
};
MODULE_DEVICE_TABLE(i2c, pm860x_id_table);
diff --git a/drivers/mfd/88pm886.c b/drivers/mfd/88pm886.c
new file mode 100644
index 000000000000..dbe9efc027d2
--- /dev/null
+++ b/drivers/mfd/88pm886.c
@@ -0,0 +1,148 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/i2c.h>
+#include <linux/mfd/core.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/reboot.h>
+#include <linux/regmap.h>
+
+#include <linux/mfd/88pm886.h>
+
+static const struct regmap_config pm886_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = PM886_REG_RTC_SPARE6,
+};
+
+static struct regmap_irq pm886_regmap_irqs[] = {
+ REGMAP_IRQ_REG(PM886_IRQ_ONKEY, 0, PM886_INT_ENA1_ONKEY),
+};
+
+static struct regmap_irq_chip pm886_regmap_irq_chip = {
+ .name = "88pm886",
+ .irqs = pm886_regmap_irqs,
+ .num_irqs = ARRAY_SIZE(pm886_regmap_irqs),
+ .num_regs = 4,
+ .status_base = PM886_REG_INT_STATUS1,
+ .ack_base = PM886_REG_INT_STATUS1,
+ .unmask_base = PM886_REG_INT_ENA_1,
+};
+
+static struct resource pm886_onkey_resources[] = {
+ DEFINE_RES_IRQ_NAMED(PM886_IRQ_ONKEY, "88pm886-onkey"),
+};
+
+static struct mfd_cell pm886_devs[] = {
+ MFD_CELL_RES("88pm886-onkey", pm886_onkey_resources),
+ MFD_CELL_NAME("88pm886-regulator"),
+};
+
+static int pm886_power_off_handler(struct sys_off_data *sys_off_data)
+{
+ struct pm886_chip *chip = sys_off_data->cb_data;
+ struct regmap *regmap = chip->regmap;
+ struct device *dev = &chip->client->dev;
+ int err;
+
+ err = regmap_update_bits(regmap, PM886_REG_MISC_CONFIG1, PM886_SW_PDOWN, PM886_SW_PDOWN);
+ if (err) {
+ dev_err(dev, "Failed to power off the device: %d\n", err);
+ return NOTIFY_BAD;
+ }
+ return NOTIFY_DONE;
+}
+
+static int pm886_setup_irq(struct pm886_chip *chip,
+ struct regmap_irq_chip_data **irq_data)
+{
+ struct regmap *regmap = chip->regmap;
+ struct device *dev = &chip->client->dev;
+ int err;
+
+ /* Set interrupt clearing mode to clear on write. */
+ err = regmap_update_bits(regmap, PM886_REG_MISC_CONFIG2,
+ PM886_INT_INV | PM886_INT_CLEAR | PM886_INT_MASK_MODE,
+ PM886_INT_WC);
+ if (err) {
+ dev_err(dev, "Failed to set interrupt clearing mode: %d\n", err);
+ return err;
+ }
+
+ err = devm_regmap_add_irq_chip(dev, regmap, chip->client->irq,
+ IRQF_ONESHOT, 0, &pm886_regmap_irq_chip,
+ irq_data);
+ if (err) {
+ dev_err(dev, "Failed to request IRQ: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int pm886_probe(struct i2c_client *client)
+{
+ struct regmap_irq_chip_data *irq_data;
+ struct device *dev = &client->dev;
+ struct pm886_chip *chip;
+ struct regmap *regmap;
+ unsigned int chip_id;
+ int err;
+
+ chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ chip->client = client;
+ chip->chip_id = (uintptr_t)device_get_match_data(dev);
+ i2c_set_clientdata(client, chip);
+
+ regmap = devm_regmap_init_i2c(client, &pm886_regmap_config);
+ if (IS_ERR(regmap))
+ return dev_err_probe(dev, PTR_ERR(regmap), "Failed to initialize regmap\n");
+ chip->regmap = regmap;
+
+ err = regmap_read(regmap, PM886_REG_ID, &chip_id);
+ if (err)
+ return dev_err_probe(dev, err, "Failed to read chip ID\n");
+
+ if (chip->chip_id != chip_id)
+ return dev_err_probe(dev, -EINVAL, "Unsupported chip: 0x%x\n", chip_id);
+
+ err = pm886_setup_irq(chip, &irq_data);
+ if (err)
+ return err;
+
+ err = devm_mfd_add_devices(dev, PLATFORM_DEVID_NONE, pm886_devs, ARRAY_SIZE(pm886_devs),
+ NULL, 0, regmap_irq_get_domain(irq_data));
+ if (err)
+ return dev_err_probe(dev, err, "Failed to add devices\n");
+
+ err = devm_register_power_off_handler(dev, pm886_power_off_handler, chip);
+ if (err)
+ return dev_err_probe(dev, err, "Failed to register power off handler\n");
+
+ device_init_wakeup(dev, device_property_read_bool(dev, "wakeup-source"));
+
+ return 0;
+}
+
+static const struct of_device_id pm886_of_match[] = {
+ { .compatible = "marvell,88pm886-a1", .data = (void *)PM886_A1_CHIP_ID },
+ { }
+};
+MODULE_DEVICE_TABLE(of, pm886_of_match);
+
+static struct i2c_driver pm886_i2c_driver = {
+ .driver = {
+ .name = "88pm886",
+ .of_match_table = pm886_of_match,
+ },
+ .probe = pm886_probe,
+};
+module_i2c_driver(pm886_i2c_driver);
+
+MODULE_DESCRIPTION("Marvell 88PM886 PMIC driver");
+MODULE_AUTHOR("Karel Balej <balejk@matfyz.cz>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 266b4f54af60..bc8be2e593b6 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -794,6 +794,18 @@ config MFD_88PM860X
select individual components like voltage regulators, RTC and
battery-charger under the corresponding menus.
+config MFD_88PM886_PMIC
+ bool "Marvell 88PM886 PMIC"
+ depends on I2C=y
+ depends on OF
+ select REGMAP_I2C
+ select REGMAP_IRQ
+ select MFD_CORE
+ help
+ This enables support for Marvell 88PM886 Power Management IC.
+ This includes the I2C driver and the core APIs _only_, you have to
+ select individual components like onkey under the corresponding menus.
+
config MFD_MAX14577
tristate "Maxim Semiconductor MAX14577/77836 MUIC + Charger Support"
depends on I2C
@@ -2089,6 +2101,19 @@ config MFD_ROHM_BD957XMUF
BD9573MUF Power Management ICs. BD9576 and BD9573 are primarily
designed to be used to power R-Car series processors.
+config MFD_ROHM_BD96801
+ tristate "ROHM BD96801 Power Management IC"
+ depends on I2C=y
+ depends on OF
+ select REGMAP_I2C
+ select REGMAP_IRQ
+ select MFD_CORE
+ help
+ Select this option to get support for the ROHM BD96801 Power
+ Management IC. The ROHM BD96801 is a highly scalable Power Management
+ IC for industrial and automotive use. The BD96801 can be used as a
+ master PMIC in a chained PMIC solution with suitable companion PMICs.
+
config MFD_STM32_LPTIMER
tristate "Support for STM32 Low-Power Timer"
depends on (ARCH_STM32 && OF) || COMPILE_TEST
@@ -2208,6 +2233,7 @@ config MFD_ACER_A500_EC
config MFD_QCOM_PM8008
tristate "QCOM PM8008 Power Management IC"
depends on I2C && OF
+ select MFD_CORE
select REGMAP_I2C
select REGMAP_IRQ
help
@@ -2243,6 +2269,36 @@ config MCP_UCB1200_TS
endmenu
+config MFD_CS40L50_CORE
+ tristate
+ select MFD_CORE
+ select FW_CS_DSP
+ select REGMAP_IRQ
+
+config MFD_CS40L50_I2C
+ tristate "Cirrus Logic CS40L50 (I2C)"
+ select REGMAP_I2C
+ select MFD_CS40L50_CORE
+ depends on I2C
+ help
+ Select this to support the Cirrus Logic CS40L50 Haptic
+ Driver over I2C.
+
+ This driver can be built as a module. If built as a module it will be
+ called "cs40l50-i2c".
+
+config MFD_CS40L50_SPI
+ tristate "Cirrus Logic CS40L50 (SPI)"
+ select REGMAP_SPI
+ select MFD_CS40L50_CORE
+ depends on SPI
+ help
+ Select this to support the Cirrus Logic CS40L50 Haptic
+ Driver over SPI.
+
+ This driver can be built as a module. If built as a module it will be
+ called "cs40l50-spi".
+
config MFD_VEXPRESS_SYSREG
tristate "Versatile Express System Registers"
depends on VEXPRESS_CONFIG && GPIOLIB
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index c66f07edcd0e..02b651cd7535 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -7,6 +7,7 @@
obj-$(CONFIG_MFD_88PM860X) += 88pm860x.o
obj-$(CONFIG_MFD_88PM800) += 88pm800.o 88pm80x.o
obj-$(CONFIG_MFD_88PM805) += 88pm805.o 88pm80x.o
+obj-$(CONFIG_MFD_88PM886_PMIC) += 88pm886.o
obj-$(CONFIG_MFD_ACT8945A) += act8945a.o
obj-$(CONFIG_MFD_SM501) += sm501.o
obj-$(CONFIG_ARCH_BCM2835) += bcm2835-pm.o
@@ -88,6 +89,10 @@ obj-$(CONFIG_MFD_MADERA) += madera.o
obj-$(CONFIG_MFD_MADERA_I2C) += madera-i2c.o
obj-$(CONFIG_MFD_MADERA_SPI) += madera-spi.o
+obj-$(CONFIG_MFD_CS40L50_CORE) += cs40l50-core.o
+obj-$(CONFIG_MFD_CS40L50_I2C) += cs40l50-i2c.o
+obj-$(CONFIG_MFD_CS40L50_SPI) += cs40l50-spi.o
+
obj-$(CONFIG_TPS6105X) += tps6105x.o
obj-$(CONFIG_TPS65010) += tps65010.o
obj-$(CONFIG_TPS6507X) += tps6507x.o
@@ -264,6 +269,7 @@ obj-$(CONFIG_RAVE_SP_CORE) += rave-sp.o
obj-$(CONFIG_MFD_ROHM_BD71828) += rohm-bd71828.o
obj-$(CONFIG_MFD_ROHM_BD718XX) += rohm-bd718x7.o
obj-$(CONFIG_MFD_ROHM_BD957XMUF) += rohm-bd9576.o
+obj-$(CONFIG_MFD_ROHM_BD96801) += rohm-bd96801.o
obj-$(CONFIG_MFD_STMFX) += stmfx.o
obj-$(CONFIG_MFD_KHADAS_MCU) += khadas-mcu.o
obj-$(CONFIG_MFD_ACER_A500_EC) += acer-ec-a500.o
@@ -280,7 +286,5 @@ obj-$(CONFIG_MFD_INTEL_M10_BMC_PMCI) += intel-m10-bmc-pmci.o
obj-$(CONFIG_MFD_ATC260X) += atc260x-core.o
obj-$(CONFIG_MFD_ATC260X_I2C) += atc260x-i2c.o
-rsmu-i2c-objs := rsmu_core.o rsmu_i2c.o
-rsmu-spi-objs := rsmu_core.o rsmu_spi.o
-obj-$(CONFIG_MFD_RSMU_I2C) += rsmu-i2c.o
-obj-$(CONFIG_MFD_RSMU_SPI) += rsmu-spi.o
+obj-$(CONFIG_MFD_RSMU_I2C) += rsmu_i2c.o rsmu_core.o
+obj-$(CONFIG_MFD_RSMU_SPI) += rsmu_spi.o rsmu_core.o
diff --git a/drivers/mfd/aat2870-core.c b/drivers/mfd/aat2870-core.c
index 2fee62f1016c..8ef510e84688 100644
--- a/drivers/mfd/aat2870-core.c
+++ b/drivers/mfd/aat2870-core.c
@@ -439,7 +439,7 @@ static DEFINE_SIMPLE_DEV_PM_OPS(aat2870_pm_ops, aat2870_i2c_suspend,
aat2870_i2c_resume);
static const struct i2c_device_id aat2870_i2c_id_table[] = {
- { "aat2870", 0 },
+ { "aat2870" },
{ }
};
diff --git a/drivers/mfd/act8945a.c b/drivers/mfd/act8945a.c
index 4e32ac3d573e..cafefb4451cb 100644
--- a/drivers/mfd/act8945a.c
+++ b/drivers/mfd/act8945a.c
@@ -54,7 +54,7 @@ static int act8945a_i2c_probe(struct i2c_client *i2c)
}
static const struct i2c_device_id act8945a_i2c_id[] = {
- { "act8945a", 0 },
+ { "act8945a" },
{}
};
MODULE_DEVICE_TABLE(i2c, act8945a_i2c_id);
diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c
index 19a0adf8ce3d..85ff8717d850 100644
--- a/drivers/mfd/arizona-core.c
+++ b/drivers/mfd/arizona-core.c
@@ -1429,4 +1429,5 @@ int arizona_dev_exit(struct arizona *arizona)
}
EXPORT_SYMBOL_GPL(arizona_dev_exit);
+MODULE_DESCRIPTION("Wolfson Arizona core driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/arizona-spi.c b/drivers/mfd/arizona-spi.c
index de5d894ac04a..eaa2b2bc5dd0 100644
--- a/drivers/mfd/arizona-spi.c
+++ b/drivers/mfd/arizona-spi.c
@@ -190,19 +190,12 @@ static int arizona_spi_acpi_probe(struct arizona *arizona)
static int arizona_spi_probe(struct spi_device *spi)
{
- const struct spi_device_id *id = spi_get_device_id(spi);
- const void *match_data;
struct arizona *arizona;
const struct regmap_config *regmap_config = NULL;
unsigned long type = 0;
int ret;
- match_data = device_get_match_data(&spi->dev);
- if (match_data)
- type = (unsigned long)match_data;
- else if (id)
- type = id->driver_data;
-
+ type = (unsigned long)spi_get_device_match_data(spi);
switch (type) {
case WM5102:
if (IS_ENABLED(CONFIG_MFD_WM5102))
diff --git a/drivers/mfd/as3722.c b/drivers/mfd/as3722.c
index bec047bdd088..6c0d89b0c7e3 100644
--- a/drivers/mfd/as3722.c
+++ b/drivers/mfd/as3722.c
@@ -430,8 +430,8 @@ static const struct of_device_id as3722_of_match[] = {
MODULE_DEVICE_TABLE(of, as3722_of_match);
static const struct i2c_device_id as3722_i2c_id[] = {
- { "as3722", 0 },
- {},
+ { "as3722" },
+ {}
};
MODULE_DEVICE_TABLE(i2c, as3722_i2c_id);
diff --git a/drivers/mfd/axp20x-i2c.c b/drivers/mfd/axp20x-i2c.c
index b8e7ac89f697..791a0b4cb64b 100644
--- a/drivers/mfd/axp20x-i2c.c
+++ b/drivers/mfd/axp20x-i2c.c
@@ -75,18 +75,18 @@ MODULE_DEVICE_TABLE(of, axp20x_i2c_of_match);
#endif
static const struct i2c_device_id axp20x_i2c_id[] = {
- { "axp152", 0 },
- { "axp192", 0 },
- { "axp202", 0 },
- { "axp209", 0 },
- { "axp221", 0 },
- { "axp223", 0 },
- { "axp313a", 0 },
- { "axp717", 0 },
- { "axp803", 0 },
- { "axp806", 0 },
- { "axp15060", 0 },
- { },
+ { "axp152" },
+ { "axp192" },
+ { "axp202" },
+ { "axp209" },
+ { "axp221" },
+ { "axp223" },
+ { "axp313a" },
+ { "axp717" },
+ { "axp803" },
+ { "axp806" },
+ { "axp15060" },
+ { }
};
MODULE_DEVICE_TABLE(i2c, axp20x_i2c_id);
diff --git a/drivers/mfd/axp20x.c b/drivers/mfd/axp20x.c
index f2c0f144c0fc..dacd3c96c9f5 100644
--- a/drivers/mfd/axp20x.c
+++ b/drivers/mfd/axp20x.c
@@ -210,6 +210,7 @@ static const struct regmap_access_table axp313a_volatile_table = {
static const struct regmap_range axp717_writeable_ranges[] = {
regmap_reg_range(AXP717_IRQ0_EN, AXP717_IRQ4_EN),
+ regmap_reg_range(AXP717_IRQ0_STATE, AXP717_IRQ4_STATE),
regmap_reg_range(AXP717_DCDC_OUTPUT_CONTROL, AXP717_CPUSLDO_CONTROL),
};
diff --git a/drivers/mfd/bd9571mwv.c b/drivers/mfd/bd9571mwv.c
index 0a955178d469..e7c2ac74d998 100644
--- a/drivers/mfd/bd9571mwv.c
+++ b/drivers/mfd/bd9571mwv.c
@@ -268,7 +268,7 @@ static const struct of_device_id bd9571mwv_of_match_table[] = {
MODULE_DEVICE_TABLE(of, bd9571mwv_of_match_table);
static const struct i2c_device_id bd9571mwv_id_table[] = {
- { "bd9571mwv", 0 },
+ { "bd9571mwv" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(i2c, bd9571mwv_id_table);
diff --git a/drivers/mfd/cros_ec_dev.c b/drivers/mfd/cros_ec_dev.c
index a52d59cc2b1e..e2aae8918679 100644
--- a/drivers/mfd/cros_ec_dev.c
+++ b/drivers/mfd/cros_ec_dev.c
@@ -87,6 +87,7 @@ static const struct mfd_cell cros_ec_sensorhub_cells[] = {
};
static const struct mfd_cell cros_usbpd_charger_cells[] = {
+ { .name = "cros-charge-control", },
{ .name = "cros-usbpd-charger", },
{ .name = "cros-usbpd-logger", },
};
@@ -99,6 +100,14 @@ static const struct mfd_cell cros_ec_wdt_cells[] = {
{ .name = "cros-ec-wdt", }
};
+static const struct mfd_cell cros_ec_led_cells[] = {
+ { .name = "cros-ec-led", },
+};
+
+static const struct mfd_cell cros_ec_keyboard_leds_cells[] = {
+ { .name = "cros-keyboard-leds", },
+};
+
static const struct cros_feature_to_cells cros_subdevices[] = {
{
.id = EC_FEATURE_CEC,
@@ -125,11 +134,22 @@ static const struct cros_feature_to_cells cros_subdevices[] = {
.mfd_cells = cros_ec_wdt_cells,
.num_cells = ARRAY_SIZE(cros_ec_wdt_cells),
},
+ {
+ .id = EC_FEATURE_LED,
+ .mfd_cells = cros_ec_led_cells,
+ .num_cells = ARRAY_SIZE(cros_ec_led_cells),
+ },
+ {
+ .id = EC_FEATURE_PWM_KEYB,
+ .mfd_cells = cros_ec_keyboard_leds_cells,
+ .num_cells = ARRAY_SIZE(cros_ec_keyboard_leds_cells),
+ },
};
static const struct mfd_cell cros_ec_platform_cells[] = {
{ .name = "cros-ec-chardev", },
{ .name = "cros-ec-debugfs", },
+ { .name = "cros-ec-hwmon", },
{ .name = "cros-ec-sysfs", },
};
diff --git a/drivers/mfd/cs40l50-core.c b/drivers/mfd/cs40l50-core.c
new file mode 100644
index 000000000000..26e7a769eb14
--- /dev/null
+++ b/drivers/mfd/cs40l50-core.c
@@ -0,0 +1,570 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * CS40L50 Advanced Haptic Driver with waveform memory,
+ * integrated DSP, and closed-loop algorithms
+ *
+ * Copyright 2024 Cirrus Logic, Inc.
+ *
+ * Author: James Ogletree <james.ogletree@cirrus.com>
+ */
+
+#include <linux/firmware/cirrus/cs_dsp.h>
+#include <linux/firmware/cirrus/wmfw.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/cs40l50.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+
+static const struct mfd_cell cs40l50_devs[] = {
+ { .name = "cs40l50-codec", },
+ { .name = "cs40l50-vibra", },
+};
+
+const struct regmap_config cs40l50_regmap = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .reg_format_endian = REGMAP_ENDIAN_BIG,
+ .val_format_endian = REGMAP_ENDIAN_BIG,
+};
+EXPORT_SYMBOL_GPL(cs40l50_regmap);
+
+static const char * const cs40l50_supplies[] = {
+ "vdd-io",
+};
+
+static const struct regmap_irq cs40l50_reg_irqs[] = {
+ REGMAP_IRQ_REG(CS40L50_DSP_QUEUE_IRQ, CS40L50_IRQ1_INT_2_OFFSET,
+ CS40L50_DSP_QUEUE_MASK),
+ REGMAP_IRQ_REG(CS40L50_AMP_SHORT_IRQ, CS40L50_IRQ1_INT_1_OFFSET,
+ CS40L50_AMP_SHORT_MASK),
+ REGMAP_IRQ_REG(CS40L50_TEMP_ERR_IRQ, CS40L50_IRQ1_INT_8_OFFSET,
+ CS40L50_TEMP_ERR_MASK),
+ REGMAP_IRQ_REG(CS40L50_BST_UVP_IRQ, CS40L50_IRQ1_INT_9_OFFSET,
+ CS40L50_BST_UVP_MASK),
+ REGMAP_IRQ_REG(CS40L50_BST_SHORT_IRQ, CS40L50_IRQ1_INT_9_OFFSET,
+ CS40L50_BST_SHORT_MASK),
+ REGMAP_IRQ_REG(CS40L50_BST_ILIMIT_IRQ, CS40L50_IRQ1_INT_9_OFFSET,
+ CS40L50_BST_ILIMIT_MASK),
+ REGMAP_IRQ_REG(CS40L50_UVLO_VDDBATT_IRQ, CS40L50_IRQ1_INT_10_OFFSET,
+ CS40L50_UVLO_VDDBATT_MASK),
+ REGMAP_IRQ_REG(CS40L50_GLOBAL_ERROR_IRQ, CS40L50_IRQ1_INT_18_OFFSET,
+ CS40L50_GLOBAL_ERROR_MASK),
+};
+
+static struct regmap_irq_chip cs40l50_irq_chip = {
+ .name = "cs40l50",
+ .status_base = CS40L50_IRQ1_INT_1,
+ .mask_base = CS40L50_IRQ1_MASK_1,
+ .ack_base = CS40L50_IRQ1_INT_1,
+ .num_regs = 22,
+ .irqs = cs40l50_reg_irqs,
+ .num_irqs = ARRAY_SIZE(cs40l50_reg_irqs),
+ .runtime_pm = true,
+};
+
+int cs40l50_dsp_write(struct device *dev, struct regmap *regmap, u32 val)
+{
+ int i, ret;
+ u32 ack;
+
+ /* Device NAKs if hibernating, so optionally retry */
+ for (i = 0; i < CS40L50_DSP_TIMEOUT_COUNT; i++) {
+ ret = regmap_write(regmap, CS40L50_DSP_QUEUE, val);
+ if (!ret)
+ break;
+
+ usleep_range(CS40L50_DSP_POLL_US, CS40L50_DSP_POLL_US + 100);
+ }
+
+ /* If the write never took place, no need to check for the ACK */
+ if (i == CS40L50_DSP_TIMEOUT_COUNT) {
+ dev_err(dev, "Timed out writing %#X to DSP: %d\n", val, ret);
+ return ret;
+ }
+
+ ret = regmap_read_poll_timeout(regmap, CS40L50_DSP_QUEUE, ack, !ack,
+ CS40L50_DSP_POLL_US,
+ CS40L50_DSP_POLL_US * CS40L50_DSP_TIMEOUT_COUNT);
+ if (ret)
+ dev_err(dev, "DSP failed to ACK %#X: %d\n", val, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cs40l50_dsp_write);
+
+static const struct cs_dsp_region cs40l50_dsp_regions[] = {
+ { .type = WMFW_HALO_PM_PACKED, .base = CS40L50_PMEM_0 },
+ { .type = WMFW_HALO_XM_PACKED, .base = CS40L50_XMEM_PACKED_0 },
+ { .type = WMFW_HALO_YM_PACKED, .base = CS40L50_YMEM_PACKED_0 },
+ { .type = WMFW_ADSP2_XM, .base = CS40L50_XMEM_UNPACKED24_0 },
+ { .type = WMFW_ADSP2_YM, .base = CS40L50_YMEM_UNPACKED24_0 },
+};
+
+static const struct reg_sequence cs40l50_internal_vamp_config[] = {
+ { CS40L50_BST_LPMODE_SEL, CS40L50_DCM_LOW_POWER },
+ { CS40L50_BLOCK_ENABLES2, CS40L50_OVERTEMP_WARN },
+};
+
+static const struct reg_sequence cs40l50_irq_mask_override[] = {
+ { CS40L50_IRQ1_MASK_2, CS40L50_IRQ_MASK_2_OVERRIDE },
+ { CS40L50_IRQ1_MASK_20, CS40L50_IRQ_MASK_20_OVERRIDE },
+};
+
+static int cs40l50_wseq_init(struct cs40l50 *cs40l50)
+{
+ struct cs_dsp *dsp = &cs40l50->dsp;
+
+ cs40l50->wseqs[CS40L50_STANDBY].ctl = cs_dsp_get_ctl(dsp, "STANDBY_SEQUENCE",
+ WMFW_ADSP2_XM,
+ CS40L50_PM_ALGO);
+ if (!cs40l50->wseqs[CS40L50_STANDBY].ctl) {
+ dev_err(cs40l50->dev, "Control not found for standby sequence\n");
+ return -ENOENT;
+ }
+
+ cs40l50->wseqs[CS40L50_ACTIVE].ctl = cs_dsp_get_ctl(dsp, "ACTIVE_SEQUENCE",
+ WMFW_ADSP2_XM,
+ CS40L50_PM_ALGO);
+ if (!cs40l50->wseqs[CS40L50_ACTIVE].ctl) {
+ dev_err(cs40l50->dev, "Control not found for active sequence\n");
+ return -ENOENT;
+ }
+
+ cs40l50->wseqs[CS40L50_PWR_ON].ctl = cs_dsp_get_ctl(dsp, "PM_PWR_ON_SEQ",
+ WMFW_ADSP2_XM,
+ CS40L50_PM_ALGO);
+ if (!cs40l50->wseqs[CS40L50_PWR_ON].ctl) {
+ dev_err(cs40l50->dev, "Control not found for power-on sequence\n");
+ return -ENOENT;
+ }
+
+ return cs_dsp_wseq_init(&cs40l50->dsp, cs40l50->wseqs, ARRAY_SIZE(cs40l50->wseqs));
+}
+
+static int cs40l50_dsp_config(struct cs40l50 *cs40l50)
+{
+ int ret;
+
+ /* Configure internal V_AMP supply */
+ ret = regmap_multi_reg_write(cs40l50->regmap, cs40l50_internal_vamp_config,
+ ARRAY_SIZE(cs40l50_internal_vamp_config));
+ if (ret)
+ return ret;
+
+ ret = cs_dsp_wseq_multi_write(&cs40l50->dsp, &cs40l50->wseqs[CS40L50_PWR_ON],
+ cs40l50_internal_vamp_config, CS_DSP_WSEQ_FULL,
+ ARRAY_SIZE(cs40l50_internal_vamp_config), false);
+ if (ret)
+ return ret;
+
+ /* Override firmware defaults for IRQ masks */
+ ret = regmap_multi_reg_write(cs40l50->regmap, cs40l50_irq_mask_override,
+ ARRAY_SIZE(cs40l50_irq_mask_override));
+ if (ret)
+ return ret;
+
+ return cs_dsp_wseq_multi_write(&cs40l50->dsp, &cs40l50->wseqs[CS40L50_PWR_ON],
+ cs40l50_irq_mask_override, CS_DSP_WSEQ_FULL,
+ ARRAY_SIZE(cs40l50_irq_mask_override), false);
+}
+
+static int cs40l50_dsp_post_run(struct cs_dsp *dsp)
+{
+ struct cs40l50 *cs40l50 = container_of(dsp, struct cs40l50, dsp);
+ int ret;
+
+ ret = cs40l50_wseq_init(cs40l50);
+ if (ret)
+ return ret;
+
+ ret = cs40l50_dsp_config(cs40l50);
+ if (ret) {
+ dev_err(cs40l50->dev, "Failed to configure DSP: %d\n", ret);
+ return ret;
+ }
+
+ ret = devm_mfd_add_devices(cs40l50->dev, PLATFORM_DEVID_NONE, cs40l50_devs,
+ ARRAY_SIZE(cs40l50_devs), NULL, 0, NULL);
+ if (ret)
+ dev_err(cs40l50->dev, "Failed to add child devices: %d\n", ret);
+
+ return ret;
+}
+
+static const struct cs_dsp_client_ops client_ops = {
+ .post_run = cs40l50_dsp_post_run,
+};
+
+static void cs40l50_dsp_remove(void *data)
+{
+ cs_dsp_remove(data);
+}
+
+static int cs40l50_dsp_init(struct cs40l50 *cs40l50)
+{
+ int ret;
+
+ cs40l50->dsp.num = 1;
+ cs40l50->dsp.type = WMFW_HALO;
+ cs40l50->dsp.dev = cs40l50->dev;
+ cs40l50->dsp.regmap = cs40l50->regmap;
+ cs40l50->dsp.base = CS40L50_CORE_BASE;
+ cs40l50->dsp.base_sysinfo = CS40L50_SYS_INFO_ID;
+ cs40l50->dsp.mem = cs40l50_dsp_regions;
+ cs40l50->dsp.num_mems = ARRAY_SIZE(cs40l50_dsp_regions);
+ cs40l50->dsp.no_core_startstop = true;
+ cs40l50->dsp.client_ops = &client_ops;
+
+ ret = cs_dsp_halo_init(&cs40l50->dsp);
+ if (ret)
+ return ret;
+
+ return devm_add_action_or_reset(cs40l50->dev, cs40l50_dsp_remove,
+ &cs40l50->dsp);
+}
+
+static int cs40l50_reset_dsp(struct cs40l50 *cs40l50)
+{
+ int ret;
+
+ mutex_lock(&cs40l50->lock);
+
+ if (cs40l50->dsp.running)
+ cs_dsp_stop(&cs40l50->dsp);
+
+ if (cs40l50->dsp.booted)
+ cs_dsp_power_down(&cs40l50->dsp);
+
+ ret = cs40l50_dsp_write(cs40l50->dev, cs40l50->regmap, CS40L50_SHUTDOWN);
+ if (ret)
+ goto err_mutex;
+
+ ret = cs_dsp_power_up(&cs40l50->dsp, cs40l50->fw, "cs40l50.wmfw",
+ cs40l50->bin, "cs40l50.bin", "cs40l50");
+ if (ret)
+ goto err_mutex;
+
+ ret = cs40l50_dsp_write(cs40l50->dev, cs40l50->regmap, CS40L50_SYSTEM_RESET);
+ if (ret)
+ goto err_mutex;
+
+ ret = cs40l50_dsp_write(cs40l50->dev, cs40l50->regmap, CS40L50_PREVENT_HIBER);
+ if (ret)
+ goto err_mutex;
+
+ ret = cs_dsp_run(&cs40l50->dsp);
+err_mutex:
+ mutex_unlock(&cs40l50->lock);
+
+ return ret;
+}
+
+static void cs40l50_dsp_power_down(void *data)
+{
+ cs_dsp_power_down(data);
+}
+
+static void cs40l50_dsp_stop(void *data)
+{
+ cs_dsp_stop(data);
+}
+
+static void cs40l50_dsp_bringup(const struct firmware *bin, void *context)
+{
+ struct cs40l50 *cs40l50 = context;
+ u32 nwaves;
+ int ret;
+
+ /* Wavetable is optional; bringup DSP regardless */
+ cs40l50->bin = bin;
+
+ ret = cs40l50_reset_dsp(cs40l50);
+ if (ret) {
+ dev_err(cs40l50->dev, "Failed to reset DSP: %d\n", ret);
+ goto err_fw;
+ }
+
+ ret = regmap_read(cs40l50->regmap, CS40L50_NUM_WAVES, &nwaves);
+ if (ret)
+ goto err_fw;
+
+ dev_info(cs40l50->dev, "%u RAM effects loaded\n", nwaves);
+
+ /* Add teardown actions for first-time bringup */
+ ret = devm_add_action_or_reset(cs40l50->dev, cs40l50_dsp_power_down,
+ &cs40l50->dsp);
+ if (ret) {
+ dev_err(cs40l50->dev, "Failed to add power down action: %d\n", ret);
+ goto err_fw;
+ }
+
+ ret = devm_add_action_or_reset(cs40l50->dev, cs40l50_dsp_stop, &cs40l50->dsp);
+ if (ret)
+ dev_err(cs40l50->dev, "Failed to add stop action: %d\n", ret);
+err_fw:
+ release_firmware(cs40l50->bin);
+ release_firmware(cs40l50->fw);
+}
+
+static void cs40l50_request_firmware(const struct firmware *fw, void *context)
+{
+ struct cs40l50 *cs40l50 = context;
+ int ret;
+
+ if (!fw) {
+ dev_err(cs40l50->dev, "No firmware file found\n");
+ return;
+ }
+
+ cs40l50->fw = fw;
+
+ ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_UEVENT, CS40L50_WT,
+ cs40l50->dev, GFP_KERNEL, cs40l50,
+ cs40l50_dsp_bringup);
+ if (ret) {
+ dev_err(cs40l50->dev, "Failed to request %s: %d\n", CS40L50_WT, ret);
+ release_firmware(cs40l50->fw);
+ }
+}
+
+struct cs40l50_irq {
+ const char *name;
+ int virq;
+};
+
+static struct cs40l50_irq cs40l50_irqs[] = {
+ { "DSP", },
+ { "Global", },
+ { "Boost UVLO", },
+ { "Boost current limit", },
+ { "Boost short", },
+ { "Boost undervolt", },
+ { "Overtemp", },
+ { "Amp short", },
+};
+
+static const struct reg_sequence cs40l50_err_rls[] = {
+ { CS40L50_ERR_RLS, CS40L50_GLOBAL_ERR_RLS_SET },
+ { CS40L50_ERR_RLS, CS40L50_GLOBAL_ERR_RLS_CLEAR },
+};
+
+static irqreturn_t cs40l50_hw_err(int irq, void *data)
+{
+ struct cs40l50 *cs40l50 = data;
+ int ret = 0, i;
+
+ mutex_lock(&cs40l50->lock);
+
+ /* Log hardware interrupt and execute error release sequence */
+ for (i = 1; i < ARRAY_SIZE(cs40l50_irqs); i++) {
+ if (cs40l50_irqs[i].virq == irq) {
+ dev_err(cs40l50->dev, "%s error\n", cs40l50_irqs[i].name);
+ ret = regmap_multi_reg_write(cs40l50->regmap, cs40l50_err_rls,
+ ARRAY_SIZE(cs40l50_err_rls));
+ break;
+ }
+ }
+
+ mutex_unlock(&cs40l50->lock);
+ return IRQ_RETVAL(!ret);
+}
+
+static irqreturn_t cs40l50_dsp_queue(int irq, void *data)
+{
+ struct cs40l50 *cs40l50 = data;
+ u32 rd_ptr, val, wt_ptr;
+ int ret = 0;
+
+ mutex_lock(&cs40l50->lock);
+
+ /* Read from DSP queue, log, and update read pointer */
+ while (!ret) {
+ ret = regmap_read(cs40l50->regmap, CS40L50_DSP_QUEUE_WT, &wt_ptr);
+ if (ret)
+ break;
+
+ ret = regmap_read(cs40l50->regmap, CS40L50_DSP_QUEUE_RD, &rd_ptr);
+ if (ret)
+ break;
+
+ /* Check if queue is empty */
+ if (wt_ptr == rd_ptr)
+ break;
+
+ ret = regmap_read(cs40l50->regmap, rd_ptr, &val);
+ if (ret)
+ break;
+
+ dev_dbg(cs40l50->dev, "DSP payload: %#X", val);
+
+ rd_ptr += sizeof(u32);
+
+ if (rd_ptr > CS40L50_DSP_QUEUE_END)
+ rd_ptr = CS40L50_DSP_QUEUE_BASE;
+
+ ret = regmap_write(cs40l50->regmap, CS40L50_DSP_QUEUE_RD, rd_ptr);
+ }
+
+ mutex_unlock(&cs40l50->lock);
+
+ return IRQ_RETVAL(!ret);
+}
+
+static int cs40l50_irq_init(struct cs40l50 *cs40l50)
+{
+ int ret, i, virq;
+
+ ret = devm_regmap_add_irq_chip(cs40l50->dev, cs40l50->regmap, cs40l50->irq,
+ IRQF_ONESHOT | IRQF_SHARED, 0,
+ &cs40l50_irq_chip, &cs40l50->irq_data);
+ if (ret) {
+ dev_err(cs40l50->dev, "Failed adding IRQ chip\n");
+ return ret;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(cs40l50_irqs); i++) {
+ virq = regmap_irq_get_virq(cs40l50->irq_data, i);
+ if (virq < 0) {
+ dev_err(cs40l50->dev, "Failed getting virq for %s\n",
+ cs40l50_irqs[i].name);
+ return virq;
+ }
+
+ cs40l50_irqs[i].virq = virq;
+
+ /* Handle DSP and hardware interrupts separately */
+ ret = devm_request_threaded_irq(cs40l50->dev, virq, NULL,
+ i ? cs40l50_hw_err : cs40l50_dsp_queue,
+ IRQF_ONESHOT | IRQF_SHARED,
+ cs40l50_irqs[i].name, cs40l50);
+ if (ret) {
+ return dev_err_probe(cs40l50->dev, ret,
+ "Failed requesting %s IRQ\n",
+ cs40l50_irqs[i].name);
+ }
+ }
+
+ return 0;
+}
+
+static int cs40l50_get_model(struct cs40l50 *cs40l50)
+{
+ int ret;
+
+ ret = regmap_read(cs40l50->regmap, CS40L50_DEVID, &cs40l50->devid);
+ if (ret)
+ return ret;
+
+ if (cs40l50->devid != CS40L50_DEVID_A)
+ return -EINVAL;
+
+ ret = regmap_read(cs40l50->regmap, CS40L50_REVID, &cs40l50->revid);
+ if (ret)
+ return ret;
+
+ if (cs40l50->revid < CS40L50_REVID_B0)
+ return -EINVAL;
+
+ dev_dbg(cs40l50->dev, "Cirrus Logic CS40L50 rev. %02X\n", cs40l50->revid);
+
+ return 0;
+}
+
+static int cs40l50_pm_runtime_setup(struct device *dev)
+{
+ int ret;
+
+ pm_runtime_set_autosuspend_delay(dev, CS40L50_AUTOSUSPEND_MS);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_get_noresume(dev);
+ ret = pm_runtime_set_active(dev);
+ if (ret)
+ return ret;
+
+ return devm_pm_runtime_enable(dev);
+}
+
+int cs40l50_probe(struct cs40l50 *cs40l50)
+{
+ struct device *dev = cs40l50->dev;
+ int ret;
+
+ mutex_init(&cs40l50->lock);
+
+ cs40l50->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(cs40l50->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(cs40l50->reset_gpio),
+ "Failed getting reset GPIO\n");
+
+ ret = devm_regulator_bulk_get_enable(dev, ARRAY_SIZE(cs40l50_supplies),
+ cs40l50_supplies);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed getting supplies\n");
+
+ /* Ensure minimum reset pulse width */
+ usleep_range(CS40L50_RESET_PULSE_US, CS40L50_RESET_PULSE_US + 100);
+
+ gpiod_set_value_cansleep(cs40l50->reset_gpio, 0);
+
+ /* Wait for control port to be ready */
+ usleep_range(CS40L50_CP_READY_US, CS40L50_CP_READY_US + 100);
+
+ ret = cs40l50_get_model(cs40l50);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get part number\n");
+
+ ret = cs40l50_dsp_init(cs40l50);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to initialize DSP\n");
+
+ ret = cs40l50_pm_runtime_setup(dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to initialize runtime PM\n");
+
+ ret = cs40l50_irq_init(cs40l50);
+ if (ret)
+ return ret;
+
+ ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_UEVENT, CS40L50_FW,
+ dev, GFP_KERNEL, cs40l50, cs40l50_request_firmware);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to request %s\n", CS40L50_FW);
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cs40l50_probe);
+
+int cs40l50_remove(struct cs40l50 *cs40l50)
+{
+ gpiod_set_value_cansleep(cs40l50->reset_gpio, 1);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cs40l50_remove);
+
+static int cs40l50_runtime_suspend(struct device *dev)
+{
+ struct cs40l50 *cs40l50 = dev_get_drvdata(dev);
+
+ return regmap_write(cs40l50->regmap, CS40L50_DSP_QUEUE, CS40L50_ALLOW_HIBER);
+}
+
+static int cs40l50_runtime_resume(struct device *dev)
+{
+ struct cs40l50 *cs40l50 = dev_get_drvdata(dev);
+
+ return cs40l50_dsp_write(dev, cs40l50->regmap, CS40L50_PREVENT_HIBER);
+}
+
+EXPORT_GPL_DEV_PM_OPS(cs40l50_pm_ops) = {
+ RUNTIME_PM_OPS(cs40l50_runtime_suspend, cs40l50_runtime_resume, NULL)
+};
+
+MODULE_DESCRIPTION("CS40L50 Advanced Haptic Driver");
+MODULE_AUTHOR("James Ogletree, Cirrus Logic Inc. <james.ogletree@cirrus.com>");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(FW_CS_DSP);
diff --git a/drivers/mfd/cs40l50-i2c.c b/drivers/mfd/cs40l50-i2c.c
new file mode 100644
index 000000000000..639be743d956
--- /dev/null
+++ b/drivers/mfd/cs40l50-i2c.c
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * CS40L50 Advanced Haptic Driver with waveform memory,
+ * integrated DSP, and closed-loop algorithms
+ *
+ * Copyright 2024 Cirrus Logic, Inc.
+ *
+ * Author: James Ogletree <james.ogletree@cirrus.com>
+ */
+
+#include <linux/i2c.h>
+#include <linux/mfd/cs40l50.h>
+
+static int cs40l50_i2c_probe(struct i2c_client *i2c)
+{
+ struct cs40l50 *cs40l50;
+
+ cs40l50 = devm_kzalloc(&i2c->dev, sizeof(*cs40l50), GFP_KERNEL);
+ if (!cs40l50)
+ return -ENOMEM;
+
+ i2c_set_clientdata(i2c, cs40l50);
+
+ cs40l50->dev = &i2c->dev;
+ cs40l50->irq = i2c->irq;
+
+ cs40l50->regmap = devm_regmap_init_i2c(i2c, &cs40l50_regmap);
+ if (IS_ERR(cs40l50->regmap))
+ return dev_err_probe(cs40l50->dev, PTR_ERR(cs40l50->regmap),
+ "Failed to initialize register map\n");
+
+ return cs40l50_probe(cs40l50);
+}
+
+static void cs40l50_i2c_remove(struct i2c_client *i2c)
+{
+ struct cs40l50 *cs40l50 = i2c_get_clientdata(i2c);
+
+ cs40l50_remove(cs40l50);
+}
+
+static const struct i2c_device_id cs40l50_id_i2c[] = {
+ { "cs40l50" },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, cs40l50_id_i2c);
+
+static const struct of_device_id cs40l50_of_match[] = {
+ { .compatible = "cirrus,cs40l50" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, cs40l50_of_match);
+
+static struct i2c_driver cs40l50_i2c_driver = {
+ .driver = {
+ .name = "cs40l50",
+ .of_match_table = cs40l50_of_match,
+ .pm = pm_ptr(&cs40l50_pm_ops),
+ },
+ .id_table = cs40l50_id_i2c,
+ .probe = cs40l50_i2c_probe,
+ .remove = cs40l50_i2c_remove,
+};
+module_i2c_driver(cs40l50_i2c_driver);
+
+MODULE_DESCRIPTION("CS40L50 I2C Driver");
+MODULE_AUTHOR("James Ogletree, Cirrus Logic Inc. <james.ogletree@cirrus.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/cs40l50-spi.c b/drivers/mfd/cs40l50-spi.c
new file mode 100644
index 000000000000..53526b595a0d
--- /dev/null
+++ b/drivers/mfd/cs40l50-spi.c
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * CS40L50 Advanced Haptic Driver with waveform memory,
+ * integrated DSP, and closed-loop algorithms
+ *
+ * Copyright 2024 Cirrus Logic, Inc.
+ *
+ * Author: James Ogletree <james.ogletree@cirrus.com>
+ */
+
+#include <linux/mfd/cs40l50.h>
+#include <linux/spi/spi.h>
+
+static int cs40l50_spi_probe(struct spi_device *spi)
+{
+ struct cs40l50 *cs40l50;
+
+ cs40l50 = devm_kzalloc(&spi->dev, sizeof(*cs40l50), GFP_KERNEL);
+ if (!cs40l50)
+ return -ENOMEM;
+
+ spi_set_drvdata(spi, cs40l50);
+
+ cs40l50->dev = &spi->dev;
+ cs40l50->irq = spi->irq;
+
+ cs40l50->regmap = devm_regmap_init_spi(spi, &cs40l50_regmap);
+ if (IS_ERR(cs40l50->regmap))
+ return dev_err_probe(cs40l50->dev, PTR_ERR(cs40l50->regmap),
+ "Failed to initialize register map\n");
+
+ return cs40l50_probe(cs40l50);
+}
+
+static void cs40l50_spi_remove(struct spi_device *spi)
+{
+ struct cs40l50 *cs40l50 = spi_get_drvdata(spi);
+
+ cs40l50_remove(cs40l50);
+}
+
+static const struct spi_device_id cs40l50_id_spi[] = {
+ { "cs40l50" },
+ {}
+};
+MODULE_DEVICE_TABLE(spi, cs40l50_id_spi);
+
+static const struct of_device_id cs40l50_of_match[] = {
+ { .compatible = "cirrus,cs40l50" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, cs40l50_of_match);
+
+static struct spi_driver cs40l50_spi_driver = {
+ .driver = {
+ .name = "cs40l50",
+ .of_match_table = cs40l50_of_match,
+ .pm = pm_ptr(&cs40l50_pm_ops),
+ },
+ .id_table = cs40l50_id_spi,
+ .probe = cs40l50_spi_probe,
+ .remove = cs40l50_spi_remove,
+};
+module_spi_driver(cs40l50_spi_driver);
+
+MODULE_DESCRIPTION("CS40L50 SPI Driver");
+MODULE_AUTHOR("James Ogletree, Cirrus Logic Inc. <james.ogletree@cirrus.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/da9055-i2c.c b/drivers/mfd/da9055-i2c.c
index 9a5f51b60bad..6c1981832aaf 100644
--- a/drivers/mfd/da9055-i2c.c
+++ b/drivers/mfd/da9055-i2c.c
@@ -54,7 +54,7 @@ static void da9055_i2c_remove(struct i2c_client *i2c)
* and CODEC, which must be different to operate together.
*/
static const struct i2c_device_id da9055_i2c_id[] = {
- {"da9055-pmic", 0},
+ { "da9055-pmic" },
{ }
};
MODULE_DEVICE_TABLE(i2c, da9055_i2c_id);
diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c
index c36a101df7be..1362b3f64ade 100644
--- a/drivers/mfd/intel-lpss-pci.c
+++ b/drivers/mfd/intel-lpss-pci.c
@@ -103,7 +103,7 @@ static const struct software_node spt_spi_node = {
.properties = spt_spi_properties,
};
-static const struct intel_lpss_platform_info spt_info = {
+static const struct intel_lpss_platform_info spt_spi_info = {
.clk_rate = 120000000,
.swnode = &spt_spi_node,
};
@@ -148,7 +148,7 @@ static const struct software_node bxt_spi_node = {
.properties = bxt_spi_properties,
};
-static const struct intel_lpss_platform_info bxt_info = {
+static const struct intel_lpss_platform_info bxt_spi_info = {
.clk_rate = 100000000,
.swnode = &bxt_spi_node,
};
@@ -216,7 +216,7 @@ static const struct software_node cnl_spi_node = {
.properties = cnl_spi_properties,
};
-static const struct intel_lpss_platform_info cnl_info = {
+static const struct intel_lpss_platform_info cnl_spi_info = {
.clk_rate = 120000000,
.swnode = &cnl_spi_node,
};
@@ -240,7 +240,7 @@ static const struct software_node tgl_spi_node = {
.properties = tgl_spi_properties,
};
-static const struct intel_lpss_platform_info tgl_info = {
+static const struct intel_lpss_platform_info tgl_spi_info = {
.clk_rate = 100000000,
.swnode = &tgl_spi_node,
};
@@ -249,8 +249,8 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
/* CML-LP */
{ PCI_VDEVICE(INTEL, 0x02a8), (kernel_ulong_t)&spt_uart_info },
{ PCI_VDEVICE(INTEL, 0x02a9), (kernel_ulong_t)&spt_uart_info },
- { PCI_VDEVICE(INTEL, 0x02aa), (kernel_ulong_t)&cnl_info },
- { PCI_VDEVICE(INTEL, 0x02ab), (kernel_ulong_t)&cnl_info },
+ { PCI_VDEVICE(INTEL, 0x02aa), (kernel_ulong_t)&cnl_spi_info },
+ { PCI_VDEVICE(INTEL, 0x02ab), (kernel_ulong_t)&cnl_spi_info },
{ PCI_VDEVICE(INTEL, 0x02c5), (kernel_ulong_t)&cnl_i2c_info },
{ PCI_VDEVICE(INTEL, 0x02c6), (kernel_ulong_t)&cnl_i2c_info },
{ PCI_VDEVICE(INTEL, 0x02c7), (kernel_ulong_t)&spt_uart_info },
@@ -258,18 +258,18 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x02e9), (kernel_ulong_t)&cnl_i2c_info },
{ PCI_VDEVICE(INTEL, 0x02ea), (kernel_ulong_t)&cnl_i2c_info },
{ PCI_VDEVICE(INTEL, 0x02eb), (kernel_ulong_t)&cnl_i2c_info },
- { PCI_VDEVICE(INTEL, 0x02fb), (kernel_ulong_t)&cnl_info },
+ { PCI_VDEVICE(INTEL, 0x02fb), (kernel_ulong_t)&cnl_spi_info },
/* CML-H */
{ PCI_VDEVICE(INTEL, 0x06a8), (kernel_ulong_t)&spt_uart_info },
{ PCI_VDEVICE(INTEL, 0x06a9), (kernel_ulong_t)&spt_uart_info },
- { PCI_VDEVICE(INTEL, 0x06aa), (kernel_ulong_t)&cnl_info },
- { PCI_VDEVICE(INTEL, 0x06ab), (kernel_ulong_t)&cnl_info },
+ { PCI_VDEVICE(INTEL, 0x06aa), (kernel_ulong_t)&cnl_spi_info },
+ { PCI_VDEVICE(INTEL, 0x06ab), (kernel_ulong_t)&cnl_spi_info },
{ PCI_VDEVICE(INTEL, 0x06c7), (kernel_ulong_t)&spt_uart_info },
{ PCI_VDEVICE(INTEL, 0x06e8), (kernel_ulong_t)&cnl_i2c_info },
{ PCI_VDEVICE(INTEL, 0x06e9), (kernel_ulong_t)&cnl_i2c_info },
{ PCI_VDEVICE(INTEL, 0x06ea), (kernel_ulong_t)&cnl_i2c_info },
{ PCI_VDEVICE(INTEL, 0x06eb), (kernel_ulong_t)&cnl_i2c_info },
- { PCI_VDEVICE(INTEL, 0x06fb), (kernel_ulong_t)&cnl_info },
+ { PCI_VDEVICE(INTEL, 0x06fb), (kernel_ulong_t)&cnl_spi_info },
/* BXT A-Step */
{ PCI_VDEVICE(INTEL, 0x0aac), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x0aae), (kernel_ulong_t)&bxt_i2c_info },
@@ -282,9 +282,9 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x0abc), (kernel_ulong_t)&bxt_uart_info },
{ PCI_VDEVICE(INTEL, 0x0abe), (kernel_ulong_t)&bxt_uart_info },
{ PCI_VDEVICE(INTEL, 0x0ac0), (kernel_ulong_t)&bxt_uart_info },
- { PCI_VDEVICE(INTEL, 0x0ac2), (kernel_ulong_t)&bxt_info },
- { PCI_VDEVICE(INTEL, 0x0ac4), (kernel_ulong_t)&bxt_info },
- { PCI_VDEVICE(INTEL, 0x0ac6), (kernel_ulong_t)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0x0ac2), (kernel_ulong_t)&bxt_spi_info },
+ { PCI_VDEVICE(INTEL, 0x0ac4), (kernel_ulong_t)&bxt_spi_info },
+ { PCI_VDEVICE(INTEL, 0x0ac6), (kernel_ulong_t)&bxt_spi_info },
{ PCI_VDEVICE(INTEL, 0x0aee), (kernel_ulong_t)&bxt_uart_info },
/* BXT B-Step */
{ PCI_VDEVICE(INTEL, 0x1aac), (kernel_ulong_t)&bxt_i2c_info },
@@ -298,9 +298,9 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x1abc), (kernel_ulong_t)&bxt_uart_info },
{ PCI_VDEVICE(INTEL, 0x1abe), (kernel_ulong_t)&bxt_uart_info },
{ PCI_VDEVICE(INTEL, 0x1ac0), (kernel_ulong_t)&bxt_uart_info },
- { PCI_VDEVICE(INTEL, 0x1ac2), (kernel_ulong_t)&bxt_info },
- { PCI_VDEVICE(INTEL, 0x1ac4), (kernel_ulong_t)&bxt_info },
- { PCI_VDEVICE(INTEL, 0x1ac6), (kernel_ulong_t)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0x1ac2), (kernel_ulong_t)&bxt_spi_info },
+ { PCI_VDEVICE(INTEL, 0x1ac4), (kernel_ulong_t)&bxt_spi_info },
+ { PCI_VDEVICE(INTEL, 0x1ac6), (kernel_ulong_t)&bxt_spi_info },
{ PCI_VDEVICE(INTEL, 0x1aee), (kernel_ulong_t)&bxt_uart_info },
/* EBG */
{ PCI_VDEVICE(INTEL, 0x1bad), (kernel_ulong_t)&bxt_uart_info },
@@ -317,15 +317,15 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x31bc), (kernel_ulong_t)&bxt_uart_info },
{ PCI_VDEVICE(INTEL, 0x31be), (kernel_ulong_t)&bxt_uart_info },
{ PCI_VDEVICE(INTEL, 0x31c0), (kernel_ulong_t)&bxt_uart_info },
- { PCI_VDEVICE(INTEL, 0x31c2), (kernel_ulong_t)&bxt_info },
- { PCI_VDEVICE(INTEL, 0x31c4), (kernel_ulong_t)&bxt_info },
- { PCI_VDEVICE(INTEL, 0x31c6), (kernel_ulong_t)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0x31c2), (kernel_ulong_t)&bxt_spi_info },
+ { PCI_VDEVICE(INTEL, 0x31c4), (kernel_ulong_t)&bxt_spi_info },
+ { PCI_VDEVICE(INTEL, 0x31c6), (kernel_ulong_t)&bxt_spi_info },
{ PCI_VDEVICE(INTEL, 0x31ee), (kernel_ulong_t)&bxt_uart_info },
/* ICL-LP */
{ PCI_VDEVICE(INTEL, 0x34a8), (kernel_ulong_t)&spt_uart_info },
{ PCI_VDEVICE(INTEL, 0x34a9), (kernel_ulong_t)&spt_uart_info },
- { PCI_VDEVICE(INTEL, 0x34aa), (kernel_ulong_t)&cnl_info },
- { PCI_VDEVICE(INTEL, 0x34ab), (kernel_ulong_t)&cnl_info },
+ { PCI_VDEVICE(INTEL, 0x34aa), (kernel_ulong_t)&cnl_spi_info },
+ { PCI_VDEVICE(INTEL, 0x34ab), (kernel_ulong_t)&cnl_spi_info },
{ PCI_VDEVICE(INTEL, 0x34c5), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x34c6), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x34c7), (kernel_ulong_t)&spt_uart_info },
@@ -333,15 +333,15 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x34e9), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x34ea), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x34eb), (kernel_ulong_t)&bxt_i2c_info },
- { PCI_VDEVICE(INTEL, 0x34fb), (kernel_ulong_t)&cnl_info },
+ { PCI_VDEVICE(INTEL, 0x34fb), (kernel_ulong_t)&cnl_spi_info },
/* ICL-N */
{ PCI_VDEVICE(INTEL, 0x38a8), (kernel_ulong_t)&spt_uart_info },
/* TGL-H */
{ PCI_VDEVICE(INTEL, 0x43a7), (kernel_ulong_t)&bxt_uart_info },
{ PCI_VDEVICE(INTEL, 0x43a8), (kernel_ulong_t)&bxt_uart_info },
{ PCI_VDEVICE(INTEL, 0x43a9), (kernel_ulong_t)&bxt_uart_info },
- { PCI_VDEVICE(INTEL, 0x43aa), (kernel_ulong_t)&tgl_info },
- { PCI_VDEVICE(INTEL, 0x43ab), (kernel_ulong_t)&tgl_info },
+ { PCI_VDEVICE(INTEL, 0x43aa), (kernel_ulong_t)&tgl_spi_info },
+ { PCI_VDEVICE(INTEL, 0x43ab), (kernel_ulong_t)&tgl_spi_info },
{ PCI_VDEVICE(INTEL, 0x43ad), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x43ae), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x43d8), (kernel_ulong_t)&bxt_i2c_info },
@@ -350,14 +350,14 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x43e9), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x43ea), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x43eb), (kernel_ulong_t)&bxt_i2c_info },
- { PCI_VDEVICE(INTEL, 0x43fb), (kernel_ulong_t)&tgl_info },
- { PCI_VDEVICE(INTEL, 0x43fd), (kernel_ulong_t)&tgl_info },
+ { PCI_VDEVICE(INTEL, 0x43fb), (kernel_ulong_t)&tgl_spi_info },
+ { PCI_VDEVICE(INTEL, 0x43fd), (kernel_ulong_t)&tgl_spi_info },
/* EHL */
{ PCI_VDEVICE(INTEL, 0x4b28), (kernel_ulong_t)&bxt_uart_info },
{ PCI_VDEVICE(INTEL, 0x4b29), (kernel_ulong_t)&bxt_uart_info },
- { PCI_VDEVICE(INTEL, 0x4b2a), (kernel_ulong_t)&bxt_info },
- { PCI_VDEVICE(INTEL, 0x4b2b), (kernel_ulong_t)&bxt_info },
- { PCI_VDEVICE(INTEL, 0x4b37), (kernel_ulong_t)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0x4b2a), (kernel_ulong_t)&bxt_spi_info },
+ { PCI_VDEVICE(INTEL, 0x4b2b), (kernel_ulong_t)&bxt_spi_info },
+ { PCI_VDEVICE(INTEL, 0x4b37), (kernel_ulong_t)&bxt_spi_info },
{ PCI_VDEVICE(INTEL, 0x4b44), (kernel_ulong_t)&ehl_i2c_info },
{ PCI_VDEVICE(INTEL, 0x4b45), (kernel_ulong_t)&ehl_i2c_info },
{ PCI_VDEVICE(INTEL, 0x4b4b), (kernel_ulong_t)&ehl_i2c_info },
@@ -370,8 +370,8 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
/* JSL */
{ PCI_VDEVICE(INTEL, 0x4da8), (kernel_ulong_t)&spt_uart_info },
{ PCI_VDEVICE(INTEL, 0x4da9), (kernel_ulong_t)&spt_uart_info },
- { PCI_VDEVICE(INTEL, 0x4daa), (kernel_ulong_t)&cnl_info },
- { PCI_VDEVICE(INTEL, 0x4dab), (kernel_ulong_t)&cnl_info },
+ { PCI_VDEVICE(INTEL, 0x4daa), (kernel_ulong_t)&cnl_spi_info },
+ { PCI_VDEVICE(INTEL, 0x4dab), (kernel_ulong_t)&cnl_spi_info },
{ PCI_VDEVICE(INTEL, 0x4dc5), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x4dc6), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x4dc7), (kernel_ulong_t)&spt_uart_info },
@@ -379,12 +379,12 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x4de9), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x4dea), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x4deb), (kernel_ulong_t)&bxt_i2c_info },
- { PCI_VDEVICE(INTEL, 0x4dfb), (kernel_ulong_t)&cnl_info },
+ { PCI_VDEVICE(INTEL, 0x4dfb), (kernel_ulong_t)&cnl_spi_info },
/* ADL-P */
{ PCI_VDEVICE(INTEL, 0x51a8), (kernel_ulong_t)&bxt_uart_info },
{ PCI_VDEVICE(INTEL, 0x51a9), (kernel_ulong_t)&bxt_uart_info },
- { PCI_VDEVICE(INTEL, 0x51aa), (kernel_ulong_t)&tgl_info },
- { PCI_VDEVICE(INTEL, 0x51ab), (kernel_ulong_t)&tgl_info },
+ { PCI_VDEVICE(INTEL, 0x51aa), (kernel_ulong_t)&tgl_spi_info },
+ { PCI_VDEVICE(INTEL, 0x51ab), (kernel_ulong_t)&tgl_spi_info },
{ PCI_VDEVICE(INTEL, 0x51c5), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x51c6), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x51c7), (kernel_ulong_t)&bxt_uart_info },
@@ -394,12 +394,12 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x51e9), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x51ea), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x51eb), (kernel_ulong_t)&bxt_i2c_info },
- { PCI_VDEVICE(INTEL, 0x51fb), (kernel_ulong_t)&tgl_info },
+ { PCI_VDEVICE(INTEL, 0x51fb), (kernel_ulong_t)&tgl_spi_info },
/* ADL-M */
{ PCI_VDEVICE(INTEL, 0x54a8), (kernel_ulong_t)&bxt_uart_info },
{ PCI_VDEVICE(INTEL, 0x54a9), (kernel_ulong_t)&bxt_uart_info },
- { PCI_VDEVICE(INTEL, 0x54aa), (kernel_ulong_t)&tgl_info },
- { PCI_VDEVICE(INTEL, 0x54ab), (kernel_ulong_t)&tgl_info },
+ { PCI_VDEVICE(INTEL, 0x54aa), (kernel_ulong_t)&tgl_spi_info },
+ { PCI_VDEVICE(INTEL, 0x54ab), (kernel_ulong_t)&tgl_spi_info },
{ PCI_VDEVICE(INTEL, 0x54c5), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x54c6), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x54c7), (kernel_ulong_t)&bxt_uart_info },
@@ -407,7 +407,7 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x54e9), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x54ea), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x54eb), (kernel_ulong_t)&bxt_i2c_info },
- { PCI_VDEVICE(INTEL, 0x54fb), (kernel_ulong_t)&tgl_info },
+ { PCI_VDEVICE(INTEL, 0x54fb), (kernel_ulong_t)&tgl_spi_info },
/* APL */
{ PCI_VDEVICE(INTEL, 0x5aac), (kernel_ulong_t)&apl_i2c_info },
{ PCI_VDEVICE(INTEL, 0x5aae), (kernel_ulong_t)&apl_i2c_info },
@@ -420,46 +420,46 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x5abc), (kernel_ulong_t)&bxt_uart_info },
{ PCI_VDEVICE(INTEL, 0x5abe), (kernel_ulong_t)&bxt_uart_info },
{ PCI_VDEVICE(INTEL, 0x5ac0), (kernel_ulong_t)&bxt_uart_info },
- { PCI_VDEVICE(INTEL, 0x5ac2), (kernel_ulong_t)&bxt_info },
- { PCI_VDEVICE(INTEL, 0x5ac4), (kernel_ulong_t)&bxt_info },
- { PCI_VDEVICE(INTEL, 0x5ac6), (kernel_ulong_t)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0x5ac2), (kernel_ulong_t)&bxt_spi_info },
+ { PCI_VDEVICE(INTEL, 0x5ac4), (kernel_ulong_t)&bxt_spi_info },
+ { PCI_VDEVICE(INTEL, 0x5ac6), (kernel_ulong_t)&bxt_spi_info },
{ PCI_VDEVICE(INTEL, 0x5aee), (kernel_ulong_t)&bxt_uart_info },
/* RPL-S */
{ PCI_VDEVICE(INTEL, 0x7a28), (kernel_ulong_t)&bxt_uart_info },
{ PCI_VDEVICE(INTEL, 0x7a29), (kernel_ulong_t)&bxt_uart_info },
- { PCI_VDEVICE(INTEL, 0x7a2a), (kernel_ulong_t)&tgl_info },
- { PCI_VDEVICE(INTEL, 0x7a2b), (kernel_ulong_t)&tgl_info },
+ { PCI_VDEVICE(INTEL, 0x7a2a), (kernel_ulong_t)&tgl_spi_info },
+ { PCI_VDEVICE(INTEL, 0x7a2b), (kernel_ulong_t)&tgl_spi_info },
{ PCI_VDEVICE(INTEL, 0x7a4c), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x7a4d), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x7a4e), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x7a4f), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x7a5c), (kernel_ulong_t)&bxt_uart_info },
- { PCI_VDEVICE(INTEL, 0x7a79), (kernel_ulong_t)&tgl_info },
- { PCI_VDEVICE(INTEL, 0x7a7b), (kernel_ulong_t)&tgl_info },
+ { PCI_VDEVICE(INTEL, 0x7a79), (kernel_ulong_t)&tgl_spi_info },
+ { PCI_VDEVICE(INTEL, 0x7a7b), (kernel_ulong_t)&tgl_spi_info },
{ PCI_VDEVICE(INTEL, 0x7a7c), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x7a7d), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x7a7e), (kernel_ulong_t)&bxt_uart_info },
/* ADL-S */
{ PCI_VDEVICE(INTEL, 0x7aa8), (kernel_ulong_t)&bxt_uart_info },
{ PCI_VDEVICE(INTEL, 0x7aa9), (kernel_ulong_t)&bxt_uart_info },
- { PCI_VDEVICE(INTEL, 0x7aaa), (kernel_ulong_t)&tgl_info },
- { PCI_VDEVICE(INTEL, 0x7aab), (kernel_ulong_t)&tgl_info },
+ { PCI_VDEVICE(INTEL, 0x7aaa), (kernel_ulong_t)&tgl_spi_info },
+ { PCI_VDEVICE(INTEL, 0x7aab), (kernel_ulong_t)&tgl_spi_info },
{ PCI_VDEVICE(INTEL, 0x7acc), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x7acd), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x7ace), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x7acf), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x7adc), (kernel_ulong_t)&bxt_uart_info },
- { PCI_VDEVICE(INTEL, 0x7af9), (kernel_ulong_t)&tgl_info },
- { PCI_VDEVICE(INTEL, 0x7afb), (kernel_ulong_t)&tgl_info },
+ { PCI_VDEVICE(INTEL, 0x7af9), (kernel_ulong_t)&tgl_spi_info },
+ { PCI_VDEVICE(INTEL, 0x7afb), (kernel_ulong_t)&tgl_spi_info },
{ PCI_VDEVICE(INTEL, 0x7afc), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x7afd), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x7afe), (kernel_ulong_t)&bxt_uart_info },
/* MTL-P */
{ PCI_VDEVICE(INTEL, 0x7e25), (kernel_ulong_t)&bxt_uart_info },
{ PCI_VDEVICE(INTEL, 0x7e26), (kernel_ulong_t)&bxt_uart_info },
- { PCI_VDEVICE(INTEL, 0x7e27), (kernel_ulong_t)&tgl_info },
- { PCI_VDEVICE(INTEL, 0x7e30), (kernel_ulong_t)&tgl_info },
- { PCI_VDEVICE(INTEL, 0x7e46), (kernel_ulong_t)&tgl_info },
+ { PCI_VDEVICE(INTEL, 0x7e27), (kernel_ulong_t)&tgl_spi_info },
+ { PCI_VDEVICE(INTEL, 0x7e30), (kernel_ulong_t)&tgl_spi_info },
+ { PCI_VDEVICE(INTEL, 0x7e46), (kernel_ulong_t)&tgl_spi_info },
{ PCI_VDEVICE(INTEL, 0x7e50), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x7e51), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x7e52), (kernel_ulong_t)&bxt_uart_info },
@@ -470,22 +470,22 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
/* MTP-S */
{ PCI_VDEVICE(INTEL, 0x7f28), (kernel_ulong_t)&bxt_uart_info },
{ PCI_VDEVICE(INTEL, 0x7f29), (kernel_ulong_t)&bxt_uart_info },
- { PCI_VDEVICE(INTEL, 0x7f2a), (kernel_ulong_t)&tgl_info },
- { PCI_VDEVICE(INTEL, 0x7f2b), (kernel_ulong_t)&tgl_info },
+ { PCI_VDEVICE(INTEL, 0x7f2a), (kernel_ulong_t)&tgl_spi_info },
+ { PCI_VDEVICE(INTEL, 0x7f2b), (kernel_ulong_t)&tgl_spi_info },
{ PCI_VDEVICE(INTEL, 0x7f4c), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x7f4d), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x7f4e), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x7f4f), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x7f5c), (kernel_ulong_t)&bxt_uart_info },
{ PCI_VDEVICE(INTEL, 0x7f5d), (kernel_ulong_t)&bxt_uart_info },
- { PCI_VDEVICE(INTEL, 0x7f5e), (kernel_ulong_t)&tgl_info },
- { PCI_VDEVICE(INTEL, 0x7f5f), (kernel_ulong_t)&tgl_info },
+ { PCI_VDEVICE(INTEL, 0x7f5e), (kernel_ulong_t)&tgl_spi_info },
+ { PCI_VDEVICE(INTEL, 0x7f5f), (kernel_ulong_t)&tgl_spi_info },
{ PCI_VDEVICE(INTEL, 0x7f7a), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x7f7b), (kernel_ulong_t)&bxt_i2c_info },
/* LKF */
{ PCI_VDEVICE(INTEL, 0x98a8), (kernel_ulong_t)&bxt_uart_info },
{ PCI_VDEVICE(INTEL, 0x98a9), (kernel_ulong_t)&bxt_uart_info },
- { PCI_VDEVICE(INTEL, 0x98aa), (kernel_ulong_t)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0x98aa), (kernel_ulong_t)&bxt_spi_info },
{ PCI_VDEVICE(INTEL, 0x98c5), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x98c6), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x98c7), (kernel_ulong_t)&bxt_uart_info },
@@ -496,8 +496,8 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
/* SPT-LP */
{ PCI_VDEVICE(INTEL, 0x9d27), (kernel_ulong_t)&spt_uart_info },
{ PCI_VDEVICE(INTEL, 0x9d28), (kernel_ulong_t)&spt_uart_info },
- { PCI_VDEVICE(INTEL, 0x9d29), (kernel_ulong_t)&spt_info },
- { PCI_VDEVICE(INTEL, 0x9d2a), (kernel_ulong_t)&spt_info },
+ { PCI_VDEVICE(INTEL, 0x9d29), (kernel_ulong_t)&spt_spi_info },
+ { PCI_VDEVICE(INTEL, 0x9d2a), (kernel_ulong_t)&spt_spi_info },
{ PCI_VDEVICE(INTEL, 0x9d60), (kernel_ulong_t)&spt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x9d61), (kernel_ulong_t)&spt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x9d62), (kernel_ulong_t)&spt_i2c_info },
@@ -508,8 +508,8 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
/* CNL-LP */
{ PCI_VDEVICE(INTEL, 0x9da8), (kernel_ulong_t)&spt_uart_info },
{ PCI_VDEVICE(INTEL, 0x9da9), (kernel_ulong_t)&spt_uart_info },
- { PCI_VDEVICE(INTEL, 0x9daa), (kernel_ulong_t)&cnl_info },
- { PCI_VDEVICE(INTEL, 0x9dab), (kernel_ulong_t)&cnl_info },
+ { PCI_VDEVICE(INTEL, 0x9daa), (kernel_ulong_t)&cnl_spi_info },
+ { PCI_VDEVICE(INTEL, 0x9dab), (kernel_ulong_t)&cnl_spi_info },
{ PCI_VDEVICE(INTEL, 0x9dc5), (kernel_ulong_t)&cnl_i2c_info },
{ PCI_VDEVICE(INTEL, 0x9dc6), (kernel_ulong_t)&cnl_i2c_info },
{ PCI_VDEVICE(INTEL, 0x9dc7), (kernel_ulong_t)&spt_uart_info },
@@ -517,12 +517,12 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x9de9), (kernel_ulong_t)&cnl_i2c_info },
{ PCI_VDEVICE(INTEL, 0x9dea), (kernel_ulong_t)&cnl_i2c_info },
{ PCI_VDEVICE(INTEL, 0x9deb), (kernel_ulong_t)&cnl_i2c_info },
- { PCI_VDEVICE(INTEL, 0x9dfb), (kernel_ulong_t)&cnl_info },
+ { PCI_VDEVICE(INTEL, 0x9dfb), (kernel_ulong_t)&cnl_spi_info },
/* TGL-LP */
{ PCI_VDEVICE(INTEL, 0xa0a8), (kernel_ulong_t)&bxt_uart_info },
{ PCI_VDEVICE(INTEL, 0xa0a9), (kernel_ulong_t)&bxt_uart_info },
- { PCI_VDEVICE(INTEL, 0xa0aa), (kernel_ulong_t)&cnl_info },
- { PCI_VDEVICE(INTEL, 0xa0ab), (kernel_ulong_t)&cnl_info },
+ { PCI_VDEVICE(INTEL, 0xa0aa), (kernel_ulong_t)&cnl_spi_info },
+ { PCI_VDEVICE(INTEL, 0xa0ab), (kernel_ulong_t)&cnl_spi_info },
{ PCI_VDEVICE(INTEL, 0xa0c5), (kernel_ulong_t)&spt_i2c_info },
{ PCI_VDEVICE(INTEL, 0xa0c6), (kernel_ulong_t)&spt_i2c_info },
{ PCI_VDEVICE(INTEL, 0xa0c7), (kernel_ulong_t)&bxt_uart_info },
@@ -532,20 +532,20 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0xa0db), (kernel_ulong_t)&bxt_uart_info },
{ PCI_VDEVICE(INTEL, 0xa0dc), (kernel_ulong_t)&bxt_uart_info },
{ PCI_VDEVICE(INTEL, 0xa0dd), (kernel_ulong_t)&bxt_uart_info },
- { PCI_VDEVICE(INTEL, 0xa0de), (kernel_ulong_t)&cnl_info },
- { PCI_VDEVICE(INTEL, 0xa0df), (kernel_ulong_t)&cnl_info },
+ { PCI_VDEVICE(INTEL, 0xa0de), (kernel_ulong_t)&cnl_spi_info },
+ { PCI_VDEVICE(INTEL, 0xa0df), (kernel_ulong_t)&cnl_spi_info },
{ PCI_VDEVICE(INTEL, 0xa0e8), (kernel_ulong_t)&spt_i2c_info },
{ PCI_VDEVICE(INTEL, 0xa0e9), (kernel_ulong_t)&spt_i2c_info },
{ PCI_VDEVICE(INTEL, 0xa0ea), (kernel_ulong_t)&spt_i2c_info },
{ PCI_VDEVICE(INTEL, 0xa0eb), (kernel_ulong_t)&spt_i2c_info },
- { PCI_VDEVICE(INTEL, 0xa0fb), (kernel_ulong_t)&cnl_info },
- { PCI_VDEVICE(INTEL, 0xa0fd), (kernel_ulong_t)&cnl_info },
- { PCI_VDEVICE(INTEL, 0xa0fe), (kernel_ulong_t)&cnl_info },
+ { PCI_VDEVICE(INTEL, 0xa0fb), (kernel_ulong_t)&cnl_spi_info },
+ { PCI_VDEVICE(INTEL, 0xa0fd), (kernel_ulong_t)&cnl_spi_info },
+ { PCI_VDEVICE(INTEL, 0xa0fe), (kernel_ulong_t)&cnl_spi_info },
/* SPT-H */
{ PCI_VDEVICE(INTEL, 0xa127), (kernel_ulong_t)&spt_uart_info },
{ PCI_VDEVICE(INTEL, 0xa128), (kernel_ulong_t)&spt_uart_info },
- { PCI_VDEVICE(INTEL, 0xa129), (kernel_ulong_t)&spt_info },
- { PCI_VDEVICE(INTEL, 0xa12a), (kernel_ulong_t)&spt_info },
+ { PCI_VDEVICE(INTEL, 0xa129), (kernel_ulong_t)&spt_spi_info },
+ { PCI_VDEVICE(INTEL, 0xa12a), (kernel_ulong_t)&spt_spi_info },
{ PCI_VDEVICE(INTEL, 0xa160), (kernel_ulong_t)&spt_i2c_info },
{ PCI_VDEVICE(INTEL, 0xa161), (kernel_ulong_t)&spt_i2c_info },
{ PCI_VDEVICE(INTEL, 0xa162), (kernel_ulong_t)&spt_i2c_info },
@@ -553,8 +553,8 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
/* KBL-H */
{ PCI_VDEVICE(INTEL, 0xa2a7), (kernel_ulong_t)&spt_uart_info },
{ PCI_VDEVICE(INTEL, 0xa2a8), (kernel_ulong_t)&spt_uart_info },
- { PCI_VDEVICE(INTEL, 0xa2a9), (kernel_ulong_t)&spt_info },
- { PCI_VDEVICE(INTEL, 0xa2aa), (kernel_ulong_t)&spt_info },
+ { PCI_VDEVICE(INTEL, 0xa2a9), (kernel_ulong_t)&spt_spi_info },
+ { PCI_VDEVICE(INTEL, 0xa2aa), (kernel_ulong_t)&spt_spi_info },
{ PCI_VDEVICE(INTEL, 0xa2e0), (kernel_ulong_t)&spt_i2c_info },
{ PCI_VDEVICE(INTEL, 0xa2e1), (kernel_ulong_t)&spt_i2c_info },
{ PCI_VDEVICE(INTEL, 0xa2e2), (kernel_ulong_t)&spt_i2c_info },
@@ -563,19 +563,19 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
/* CNL-H */
{ PCI_VDEVICE(INTEL, 0xa328), (kernel_ulong_t)&spt_uart_info },
{ PCI_VDEVICE(INTEL, 0xa329), (kernel_ulong_t)&spt_uart_info },
- { PCI_VDEVICE(INTEL, 0xa32a), (kernel_ulong_t)&cnl_info },
- { PCI_VDEVICE(INTEL, 0xa32b), (kernel_ulong_t)&cnl_info },
+ { PCI_VDEVICE(INTEL, 0xa32a), (kernel_ulong_t)&cnl_spi_info },
+ { PCI_VDEVICE(INTEL, 0xa32b), (kernel_ulong_t)&cnl_spi_info },
{ PCI_VDEVICE(INTEL, 0xa347), (kernel_ulong_t)&spt_uart_info },
{ PCI_VDEVICE(INTEL, 0xa368), (kernel_ulong_t)&cnl_i2c_info },
{ PCI_VDEVICE(INTEL, 0xa369), (kernel_ulong_t)&cnl_i2c_info },
{ PCI_VDEVICE(INTEL, 0xa36a), (kernel_ulong_t)&cnl_i2c_info },
{ PCI_VDEVICE(INTEL, 0xa36b), (kernel_ulong_t)&cnl_i2c_info },
- { PCI_VDEVICE(INTEL, 0xa37b), (kernel_ulong_t)&cnl_info },
+ { PCI_VDEVICE(INTEL, 0xa37b), (kernel_ulong_t)&cnl_spi_info },
/* CML-V */
{ PCI_VDEVICE(INTEL, 0xa3a7), (kernel_ulong_t)&spt_uart_info },
{ PCI_VDEVICE(INTEL, 0xa3a8), (kernel_ulong_t)&spt_uart_info },
- { PCI_VDEVICE(INTEL, 0xa3a9), (kernel_ulong_t)&spt_info },
- { PCI_VDEVICE(INTEL, 0xa3aa), (kernel_ulong_t)&spt_info },
+ { PCI_VDEVICE(INTEL, 0xa3a9), (kernel_ulong_t)&spt_spi_info },
+ { PCI_VDEVICE(INTEL, 0xa3aa), (kernel_ulong_t)&spt_spi_info },
{ PCI_VDEVICE(INTEL, 0xa3e0), (kernel_ulong_t)&spt_i2c_info },
{ PCI_VDEVICE(INTEL, 0xa3e1), (kernel_ulong_t)&spt_i2c_info },
{ PCI_VDEVICE(INTEL, 0xa3e2), (kernel_ulong_t)&spt_i2c_info },
@@ -584,9 +584,9 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
/* LNL-M */
{ PCI_VDEVICE(INTEL, 0xa825), (kernel_ulong_t)&bxt_uart_info },
{ PCI_VDEVICE(INTEL, 0xa826), (kernel_ulong_t)&bxt_uart_info },
- { PCI_VDEVICE(INTEL, 0xa827), (kernel_ulong_t)&tgl_info },
- { PCI_VDEVICE(INTEL, 0xa830), (kernel_ulong_t)&tgl_info },
- { PCI_VDEVICE(INTEL, 0xa846), (kernel_ulong_t)&tgl_info },
+ { PCI_VDEVICE(INTEL, 0xa827), (kernel_ulong_t)&tgl_spi_info },
+ { PCI_VDEVICE(INTEL, 0xa830), (kernel_ulong_t)&tgl_spi_info },
+ { PCI_VDEVICE(INTEL, 0xa846), (kernel_ulong_t)&tgl_spi_info },
{ PCI_VDEVICE(INTEL, 0xa850), (kernel_ulong_t)&ehl_i2c_info },
{ PCI_VDEVICE(INTEL, 0xa851), (kernel_ulong_t)&ehl_i2c_info },
{ PCI_VDEVICE(INTEL, 0xa852), (kernel_ulong_t)&bxt_uart_info },
diff --git a/drivers/mfd/intel_soc_pmic_bxtwc.c b/drivers/mfd/intel_soc_pmic_bxtwc.c
index 8dac0d41f64f..ba32cacfc499 100644
--- a/drivers/mfd/intel_soc_pmic_bxtwc.c
+++ b/drivers/mfd/intel_soc_pmic_bxtwc.c
@@ -581,5 +581,6 @@ static struct platform_driver bxtwc_driver = {
module_platform_driver(bxtwc_driver);
+MODULE_DESCRIPTION("Intel Broxton Whiskey Cove PMIC MFD core driver");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Qipeng Zha <qipeng.zha@intel.com>");
diff --git a/drivers/mfd/intel_soc_pmic_crc.c b/drivers/mfd/intel_soc_pmic_crc.c
index 581f81cbaa24..876d017f74fe 100644
--- a/drivers/mfd/intel_soc_pmic_crc.c
+++ b/drivers/mfd/intel_soc_pmic_crc.c
@@ -137,7 +137,9 @@ static const struct regmap_irq_chip crystal_cove_irq_chip = {
/* PWM consumed by the Intel GFX */
static struct pwm_lookup crc_pwm_lookup[] = {
- PWM_LOOKUP("crystal_cove_pwm", 0, "0000:00:02.0", "pwm_pmic_backlight", 0, PWM_POLARITY_NORMAL),
+ PWM_LOOKUP_WITH_MODULE("crystal_cove_pwm", 0, "0000:00:02.0",
+ "pwm_pmic_backlight", 0, PWM_POLARITY_NORMAL,
+ "pwm-crc"),
};
struct crystal_cove_config {
diff --git a/drivers/mfd/lm3533-core.c b/drivers/mfd/lm3533-core.c
index c211183cecb2..0a2409d00b2e 100644
--- a/drivers/mfd/lm3533-core.c
+++ b/drivers/mfd/lm3533-core.c
@@ -11,7 +11,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/err.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/mfd/core.h>
#include <linux/regmap.h>
@@ -225,14 +225,12 @@ static int lm3533_set_lvled_config(struct lm3533 *lm3533, u8 lvled, u8 led)
static void lm3533_enable(struct lm3533 *lm3533)
{
- if (gpio_is_valid(lm3533->gpio_hwen))
- gpio_set_value(lm3533->gpio_hwen, 1);
+ gpiod_set_value(lm3533->hwen, 1);
}
static void lm3533_disable(struct lm3533 *lm3533)
{
- if (gpio_is_valid(lm3533->gpio_hwen))
- gpio_set_value(lm3533->gpio_hwen, 0);
+ gpiod_set_value(lm3533->hwen, 0);
}
enum lm3533_attribute_type {
@@ -483,18 +481,10 @@ static int lm3533_device_init(struct lm3533 *lm3533)
return -EINVAL;
}
- lm3533->gpio_hwen = pdata->gpio_hwen;
-
- if (gpio_is_valid(lm3533->gpio_hwen)) {
- ret = devm_gpio_request_one(lm3533->dev, lm3533->gpio_hwen,
- GPIOF_OUT_INIT_LOW, "lm3533-hwen");
- if (ret < 0) {
- dev_err(lm3533->dev,
- "failed to request HWEN GPIO %d\n",
- lm3533->gpio_hwen);
- return ret;
- }
- }
+ lm3533->hwen = devm_gpiod_get(lm3533->dev, NULL, GPIOD_OUT_LOW);
+ if (IS_ERR(lm3533->hwen))
+ return dev_err_probe(lm3533->dev, PTR_ERR(lm3533->hwen), "failed to request HWEN GPIO\n");
+ gpiod_set_consumer_name(lm3533->hwen, "lm3533-hwen");
lm3533_enable(lm3533);
@@ -614,8 +604,8 @@ static void lm3533_i2c_remove(struct i2c_client *i2c)
}
static const struct i2c_device_id lm3533_i2c_ids[] = {
- { "lm3533", 0 },
- { },
+ { "lm3533" },
+ { }
};
MODULE_DEVICE_TABLE(i2c, lm3533_i2c_ids);
diff --git a/drivers/mfd/lp3943.c b/drivers/mfd/lp3943.c
index 7f749a23dca8..6764553147e4 100644
--- a/drivers/mfd/lp3943.c
+++ b/drivers/mfd/lp3943.c
@@ -126,7 +126,7 @@ static int lp3943_probe(struct i2c_client *cl)
}
static const struct i2c_device_id lp3943_ids[] = {
- { "lp3943", 0 },
+ { "lp3943" },
{ }
};
MODULE_DEVICE_TABLE(i2c, lp3943_ids);
diff --git a/drivers/mfd/lp873x.c b/drivers/mfd/lp873x.c
index de7ab7aed3c6..e8c5c89c2a76 100644
--- a/drivers/mfd/lp873x.c
+++ b/drivers/mfd/lp873x.c
@@ -68,8 +68,8 @@ static const struct of_device_id of_lp873x_match_table[] = {
MODULE_DEVICE_TABLE(of, of_lp873x_match_table);
static const struct i2c_device_id lp873x_id_table[] = {
- { "lp873x", 0 },
- { },
+ { "lp873x" },
+ { }
};
MODULE_DEVICE_TABLE(i2c, lp873x_id_table);
diff --git a/drivers/mfd/lp87565.c b/drivers/mfd/lp87565.c
index 08c62ddfb4f5..9488d3793c10 100644
--- a/drivers/mfd/lp87565.c
+++ b/drivers/mfd/lp87565.c
@@ -106,8 +106,8 @@ static void lp87565_shutdown(struct i2c_client *client)
}
static const struct i2c_device_id lp87565_id_table[] = {
- { "lp87565-q1", 0 },
- { },
+ { "lp87565-q1" },
+ { }
};
MODULE_DEVICE_TABLE(i2c, lp87565_id_table);
diff --git a/drivers/mfd/lp8788.c b/drivers/mfd/lp8788.c
index f371eeb042e0..32f255378f5a 100644
--- a/drivers/mfd/lp8788.c
+++ b/drivers/mfd/lp8788.c
@@ -216,7 +216,7 @@ static void lp8788_remove(struct i2c_client *cl)
}
static const struct i2c_device_id lp8788_ids[] = {
- {"lp8788", 0},
+ { "lp8788" },
{ }
};
MODULE_DEVICE_TABLE(i2c, lp8788_ids);
diff --git a/drivers/mfd/madera-spi.c b/drivers/mfd/madera-spi.c
index ad07ebe29e59..ce9e90322c9c 100644
--- a/drivers/mfd/madera-spi.c
+++ b/drivers/mfd/madera-spi.c
@@ -18,21 +18,14 @@
static int madera_spi_probe(struct spi_device *spi)
{
- const struct spi_device_id *id = spi_get_device_id(spi);
struct madera *madera;
const struct regmap_config *regmap_16bit_config = NULL;
const struct regmap_config *regmap_32bit_config = NULL;
- const void *of_data;
unsigned long type;
const char *name;
int ret;
- of_data = of_device_get_match_data(&spi->dev);
- if (of_data)
- type = (unsigned long)of_data;
- else
- type = id->driver_data;
-
+ type = (unsigned long)spi_get_device_match_data(spi);
switch (type) {
case CS47L15:
if (IS_ENABLED(CONFIG_MFD_CS47L15)) {
diff --git a/drivers/mfd/max14577.c b/drivers/mfd/max14577.c
index 8f7472c76009..67bf4de4c0c1 100644
--- a/drivers/mfd/max14577.c
+++ b/drivers/mfd/max14577.c
@@ -397,7 +397,7 @@ static int max14577_i2c_probe(struct i2c_client *i2c)
return ret;
}
- max14577->dev_type = (enum maxim_device_type)i2c_get_match_data(i2c);
+ max14577->dev_type = (kernel_ulong_t)i2c_get_match_data(i2c);
max14577_print_dev_type(max14577);
diff --git a/drivers/mfd/max8907.c b/drivers/mfd/max8907.c
index accf426234b6..7bac1d651771 100644
--- a/drivers/mfd/max8907.c
+++ b/drivers/mfd/max8907.c
@@ -300,7 +300,7 @@ MODULE_DEVICE_TABLE(of, max8907_of_match);
#endif
static const struct i2c_device_id max8907_i2c_id[] = {
- {"max8907", 0},
+ { "max8907" },
{}
};
MODULE_DEVICE_TABLE(i2c, max8907_i2c_id);
diff --git a/drivers/mfd/max8925-i2c.c b/drivers/mfd/max8925-i2c.c
index 7608000488f9..556aea7ec0a0 100644
--- a/drivers/mfd/max8925-i2c.c
+++ b/drivers/mfd/max8925-i2c.c
@@ -127,8 +127,8 @@ EXPORT_SYMBOL(max8925_set_bits);
static const struct i2c_device_id max8925_id_table[] = {
- { "max8925", 0 },
- { },
+ { "max8925" },
+ { }
};
static int max8925_dt_init(struct device_node *np, struct device *dev,
diff --git a/drivers/mfd/menelaus.c b/drivers/mfd/menelaus.c
index 662604ea97f2..a125d40fa121 100644
--- a/drivers/mfd/menelaus.c
+++ b/drivers/mfd/menelaus.c
@@ -29,7 +29,6 @@
#include <linux/bcd.h>
#include <linux/slab.h>
#include <linux/mfd/menelaus.h>
-#include <linux/gpio.h>
#include <asm/mach/irq.h>
@@ -1231,7 +1230,7 @@ static void menelaus_remove(struct i2c_client *client)
}
static const struct i2c_device_id menelaus_id[] = {
- { "menelaus", 0 },
+ { "menelaus" },
{ }
};
MODULE_DEVICE_TABLE(i2c, menelaus_id);
diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
index 6ad5c93027af..76bd316a50af 100644
--- a/drivers/mfd/mfd-core.c
+++ b/drivers/mfd/mfd-core.c
@@ -87,7 +87,7 @@ static void mfd_acpi_add_device(const struct mfd_cell *cell,
}
}
- ACPI_COMPANION_SET(&pdev->dev, adev ?: parent);
+ device_set_node(&pdev->dev, acpi_fwnode_handle(adev ?: parent));
}
#else
static inline void mfd_acpi_add_device(const struct mfd_cell *cell,
@@ -131,8 +131,7 @@ allocate_of_node:
of_entry->np = np;
list_add_tail(&of_entry->list, &mfd_of_node_list);
- pdev->dev.of_node = np;
- pdev->dev.fwnode = &np->fwnode;
+ device_set_node(&pdev->dev, of_fwnode_handle(np));
#endif
return 0;
}
@@ -437,5 +436,6 @@ int devm_mfd_add_devices(struct device *dev, int id,
}
EXPORT_SYMBOL(devm_mfd_add_devices);
+MODULE_DESCRIPTION("Core MFD support");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Ian Molton, Dmitry Baryshkov");
diff --git a/drivers/mfd/mt6397-core.c b/drivers/mfd/mt6397-core.c
index 4fd4a2da5ad7..c2939e785818 100644
--- a/drivers/mfd/mt6397-core.c
+++ b/drivers/mfd/mt6397-core.c
@@ -135,6 +135,9 @@ static const struct mfd_cell mt6323_devs[] = {
static const struct mfd_cell mt6357_devs[] = {
{
+ .name = "mt6359-auxadc",
+ .of_compatible = "mediatek,mt6357-auxadc"
+ }, {
.name = "mt6357-regulator",
}, {
.name = "mt6357-rtc",
@@ -175,6 +178,9 @@ static const struct mfd_cell mt6331_mt6332_devs[] = {
static const struct mfd_cell mt6358_devs[] = {
{
+ .name = "mt6359-auxadc",
+ .of_compatible = "mediatek,mt6358-auxadc"
+ }, {
.name = "mt6358-regulator",
.of_compatible = "mediatek,mt6358-regulator"
}, {
@@ -194,6 +200,10 @@ static const struct mfd_cell mt6358_devs[] = {
};
static const struct mfd_cell mt6359_devs[] = {
+ {
+ .name = "mt6359-auxadc",
+ .of_compatible = "mediatek,mt6359-auxadc"
+ },
{ .name = "mt6359-regulator", },
{
.name = "mt6359-rtc",
diff --git a/drivers/mfd/mxs-lradc.c b/drivers/mfd/mxs-lradc.c
index 73893890b50a..b2ebb5433121 100644
--- a/drivers/mfd/mxs-lradc.c
+++ b/drivers/mfd/mxs-lradc.c
@@ -137,7 +137,7 @@ static int mxs_lradc_probe(struct platform_device *pdev)
if (!lradc)
return -ENOMEM;
- lradc->soc = (enum mxs_lradc_id)device_get_match_data(&pdev->dev);
+ lradc->soc = (kernel_ulong_t)device_get_match_data(&pdev->dev);
lradc->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(lradc->clk)) {
diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c
index 949feb03d4f8..6de7ba752345 100644
--- a/drivers/mfd/omap-usb-host.c
+++ b/drivers/mfd/omap-usb-host.c
@@ -13,7 +13,6 @@
#include <linux/delay.h>
#include <linux/clk.h>
#include <linux/dma-mapping.h>
-#include <linux/gpio.h>
#include <linux/platform_device.h>
#include <linux/platform_data/usb-omap.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/mfd/omap-usb-tll.c b/drivers/mfd/omap-usb-tll.c
index b6303ddb013b..5f25ac514ff2 100644
--- a/drivers/mfd/omap-usb-tll.c
+++ b/drivers/mfd/omap-usb-tll.c
@@ -98,8 +98,8 @@
struct usbtll_omap {
void __iomem *base;
- int nch; /* num. of channels */
- struct clk *ch_clk[]; /* must be the last member */
+ int nch;
+ struct clk *ch_clk[] __counted_by(nch);
};
/*-------------------------------------------------------------------------*/
@@ -230,8 +230,7 @@ static int usbtll_omap_probe(struct platform_device *pdev)
break;
}
- tll = devm_kzalloc(dev, sizeof(*tll) + sizeof(tll->ch_clk[nch]),
- GFP_KERNEL);
+ tll = devm_kzalloc(dev, struct_size(tll, ch_clk, nch), GFP_KERNEL);
if (!tll) {
pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
diff --git a/drivers/mfd/pcf50633-gpio.c b/drivers/mfd/pcf50633-gpio.c
index 4d2b53b12eeb..3e368219479a 100644
--- a/drivers/mfd/pcf50633-gpio.c
+++ b/drivers/mfd/pcf50633-gpio.c
@@ -88,4 +88,5 @@ int pcf50633_gpio_power_supply_set(struct pcf50633 *pcf,
}
EXPORT_SYMBOL_GPL(pcf50633_gpio_power_supply_set);
+MODULE_DESCRIPTION("NXP PCF50633 GPIO Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/qcom-pm8008.c b/drivers/mfd/qcom-pm8008.c
index 3ac3742f438b..60204cc9a2dc 100644
--- a/drivers/mfd/qcom-pm8008.c
+++ b/drivers/mfd/qcom-pm8008.c
@@ -4,10 +4,13 @@
*/
#include <linux/bitops.h>
+#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
+#include <linux/ioport.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
+#include <linux/mfd/core.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
@@ -15,8 +18,6 @@
#include <linux/regmap.h>
#include <linux/slab.h>
-#include <dt-bindings/mfd/qcom-pm8008.h>
-
#define I2C_INTR_STATUS_BASE 0x0550
#define INT_RT_STS_OFFSET 0x10
#define INT_SET_TYPE_OFFSET 0x11
@@ -37,34 +38,54 @@ enum {
#define PM8008_PERIPH_0_BASE 0x900
#define PM8008_PERIPH_1_BASE 0x2400
-#define PM8008_PERIPH_2_BASE 0xC000
-#define PM8008_PERIPH_3_BASE 0xC100
+#define PM8008_PERIPH_2_BASE 0xc000
+#define PM8008_PERIPH_3_BASE 0xc100
#define PM8008_TEMP_ALARM_ADDR PM8008_PERIPH_1_BASE
#define PM8008_GPIO1_ADDR PM8008_PERIPH_2_BASE
#define PM8008_GPIO2_ADDR PM8008_PERIPH_3_BASE
+/* PM8008 IRQ numbers */
+#define PM8008_IRQ_MISC_UVLO 0
+#define PM8008_IRQ_MISC_OVLO 1
+#define PM8008_IRQ_MISC_OTST2 2
+#define PM8008_IRQ_MISC_OTST3 3
+#define PM8008_IRQ_MISC_LDO_OCP 4
+#define PM8008_IRQ_TEMP_ALARM 5
+#define PM8008_IRQ_GPIO1 6
+#define PM8008_IRQ_GPIO2 7
+
enum {
SET_TYPE_INDEX,
POLARITY_HI_INDEX,
POLARITY_LO_INDEX,
};
-static unsigned int pm8008_config_regs[] = {
+static const unsigned int pm8008_config_regs[] = {
INT_SET_TYPE_OFFSET,
INT_POL_HIGH_OFFSET,
INT_POL_LOW_OFFSET,
};
-static struct regmap_irq pm8008_irqs[] = {
- REGMAP_IRQ_REG(PM8008_IRQ_MISC_UVLO, PM8008_MISC, BIT(0)),
- REGMAP_IRQ_REG(PM8008_IRQ_MISC_OVLO, PM8008_MISC, BIT(1)),
- REGMAP_IRQ_REG(PM8008_IRQ_MISC_OTST2, PM8008_MISC, BIT(2)),
- REGMAP_IRQ_REG(PM8008_IRQ_MISC_OTST3, PM8008_MISC, BIT(3)),
- REGMAP_IRQ_REG(PM8008_IRQ_MISC_LDO_OCP, PM8008_MISC, BIT(4)),
- REGMAP_IRQ_REG(PM8008_IRQ_TEMP_ALARM, PM8008_TEMP_ALARM, BIT(0)),
- REGMAP_IRQ_REG(PM8008_IRQ_GPIO1, PM8008_GPIO1, BIT(0)),
- REGMAP_IRQ_REG(PM8008_IRQ_GPIO2, PM8008_GPIO2, BIT(0)),
+#define _IRQ(_irq, _off, _mask, _types) \
+ [_irq] = { \
+ .reg_offset = (_off), \
+ .mask = (_mask), \
+ .type = { \
+ .type_reg_offset = (_off), \
+ .types_supported = (_types), \
+ }, \
+ }
+
+static const struct regmap_irq pm8008_irqs[] = {
+ _IRQ(PM8008_IRQ_MISC_UVLO, PM8008_MISC, BIT(0), IRQ_TYPE_EDGE_RISING),
+ _IRQ(PM8008_IRQ_MISC_OVLO, PM8008_MISC, BIT(1), IRQ_TYPE_EDGE_RISING),
+ _IRQ(PM8008_IRQ_MISC_OTST2, PM8008_MISC, BIT(2), IRQ_TYPE_EDGE_RISING),
+ _IRQ(PM8008_IRQ_MISC_OTST3, PM8008_MISC, BIT(3), IRQ_TYPE_EDGE_RISING),
+ _IRQ(PM8008_IRQ_MISC_LDO_OCP, PM8008_MISC, BIT(4), IRQ_TYPE_EDGE_RISING),
+ _IRQ(PM8008_IRQ_TEMP_ALARM, PM8008_TEMP_ALARM,BIT(0), IRQ_TYPE_SENSE_MASK),
+ _IRQ(PM8008_IRQ_GPIO1, PM8008_GPIO1, BIT(0), IRQ_TYPE_SENSE_MASK),
+ _IRQ(PM8008_IRQ_GPIO2, PM8008_GPIO2, BIT(0), IRQ_TYPE_SENSE_MASK),
};
static const unsigned int pm8008_periph_base[] = {
@@ -118,8 +139,8 @@ static int pm8008_set_type_config(unsigned int **buf, unsigned int type,
return 0;
}
-static struct regmap_irq_chip pm8008_irq_chip = {
- .name = "pm8008_irq",
+static const struct regmap_irq_chip pm8008_irq_chip = {
+ .name = "pm8008",
.main_status = I2C_INTR_STATUS_BASE,
.num_main_regs = 1,
.irqs = pm8008_irqs,
@@ -137,62 +158,106 @@ static struct regmap_irq_chip pm8008_irq_chip = {
.get_irq_reg = pm8008_get_irq_reg,
};
-static struct regmap_config qcom_mfd_regmap_cfg = {
+static const struct regmap_config qcom_mfd_regmap_cfg = {
+ .name = "primary",
.reg_bits = 16,
.val_bits = 8,
- .max_register = 0xFFFF,
+ .max_register = 0xffff,
};
-static int pm8008_probe_irq_peripherals(struct device *dev,
- struct regmap *regmap,
- int client_irq)
-{
- int rc, i;
- struct regmap_irq_type *type;
- struct regmap_irq_chip_data *irq_data;
-
- for (i = 0; i < ARRAY_SIZE(pm8008_irqs); i++) {
- type = &pm8008_irqs[i].type;
+static const struct regmap_config pm8008_regmap_cfg_2 = {
+ .name = "secondary",
+ .reg_bits = 16,
+ .val_bits = 8,
+ .max_register = 0xffff,
+};
- type->type_reg_offset = pm8008_irqs[i].reg_offset;
+static const struct resource pm8008_temp_res[] = {
+ DEFINE_RES_MEM(PM8008_TEMP_ALARM_ADDR, 0x100),
+ DEFINE_RES_IRQ(PM8008_IRQ_TEMP_ALARM),
+};
- if (type->type_reg_offset == PM8008_MISC)
- type->types_supported = IRQ_TYPE_EDGE_RISING;
- else
- type->types_supported = (IRQ_TYPE_EDGE_BOTH |
- IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW);
- }
+static const struct mfd_cell pm8008_cells[] = {
+ MFD_CELL_NAME("pm8008-regulator"),
+ MFD_CELL_RES("qpnp-temp-alarm", pm8008_temp_res),
+ MFD_CELL_NAME("pm8008-gpio"),
+};
- rc = devm_regmap_add_irq_chip(dev, regmap, client_irq,
- IRQF_SHARED, 0, &pm8008_irq_chip, &irq_data);
- if (rc) {
- dev_err(dev, "Failed to add IRQ chip: %d\n", rc);
- return rc;
- }
+static void devm_irq_domain_fwnode_release(void *data)
+{
+ struct fwnode_handle *fwnode = data;
- return 0;
+ irq_domain_free_fwnode(fwnode);
}
static int pm8008_probe(struct i2c_client *client)
{
- int rc;
- struct device *dev;
- struct regmap *regmap;
+ struct regmap_irq_chip_data *irq_data;
+ struct device *dev = &client->dev;
+ struct regmap *regmap, *regmap2;
+ struct fwnode_handle *fwnode;
+ struct i2c_client *dummy;
+ struct gpio_desc *reset;
+ char *name;
+ int ret;
+
+ dummy = devm_i2c_new_dummy_device(dev, client->adapter, client->addr + 1);
+ if (IS_ERR(dummy)) {
+ ret = PTR_ERR(dummy);
+ dev_err(dev, "failed to claim second address: %d\n", ret);
+ return ret;
+ }
+
+ regmap2 = devm_regmap_init_i2c(dummy, &qcom_mfd_regmap_cfg);
+ if (IS_ERR(regmap2))
+ return PTR_ERR(regmap2);
+
+ ret = regmap_attach_dev(dev, regmap2, &pm8008_regmap_cfg_2);
+ if (ret)
+ return ret;
- dev = &client->dev;
+ /* Default regmap must be attached last. */
regmap = devm_regmap_init_i2c(client, &qcom_mfd_regmap_cfg);
if (IS_ERR(regmap))
return PTR_ERR(regmap);
- i2c_set_clientdata(client, regmap);
+ reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(reset))
+ return PTR_ERR(reset);
- if (of_property_read_bool(dev->of_node, "interrupt-controller")) {
- rc = pm8008_probe_irq_peripherals(dev, regmap, client->irq);
- if (rc)
- dev_err(dev, "Failed to probe irq periphs: %d\n", rc);
+ /*
+ * The PMIC does not appear to require a post-reset delay, but wait
+ * for a millisecond for now anyway.
+ */
+ usleep_range(1000, 2000);
+
+ name = devm_kasprintf(dev, GFP_KERNEL, "%pOF-internal", dev->of_node);
+ if (!name)
+ return -ENOMEM;
+
+ name = strreplace(name, '/', ':');
+
+ fwnode = irq_domain_alloc_named_fwnode(name);
+ if (!fwnode)
+ return -ENOMEM;
+
+ ret = devm_add_action_or_reset(dev, devm_irq_domain_fwnode_release, fwnode);
+ if (ret)
+ return ret;
+
+ ret = devm_regmap_add_irq_chip_fwnode(dev, fwnode, regmap, client->irq,
+ IRQF_SHARED, 0, &pm8008_irq_chip, &irq_data);
+ if (ret) {
+ dev_err(dev, "failed to add IRQ chip: %d\n", ret);
+ return ret;
}
- return devm_of_platform_populate(dev);
+ /* Needed by GPIO driver. */
+ dev_set_drvdata(dev, regmap_irq_get_domain(irq_data));
+
+ return devm_mfd_add_devices(dev, PLATFORM_DEVID_AUTO, pm8008_cells,
+ ARRAY_SIZE(pm8008_cells), NULL, 0,
+ regmap_irq_get_domain(irq_data));
}
static const struct of_device_id pm8008_match[] = {
@@ -210,4 +275,5 @@ static struct i2c_driver pm8008_mfd_driver = {
};
module_i2c_driver(pm8008_mfd_driver);
+MODULE_DESCRIPTION("QCOM PM8008 Power Management IC driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/retu-mfd.c b/drivers/mfd/retu-mfd.c
index b50cfa7f4b8f..9184e553fafd 100644
--- a/drivers/mfd/retu-mfd.c
+++ b/drivers/mfd/retu-mfd.c
@@ -300,8 +300,8 @@ static void retu_remove(struct i2c_client *i2c)
}
static const struct i2c_device_id retu_id[] = {
- { "retu", 0 },
- { "tahvo", 0 },
+ { "retu" },
+ { "tahvo" },
{ }
};
MODULE_DEVICE_TABLE(i2c, retu_id);
diff --git a/drivers/mfd/rohm-bd96801.c b/drivers/mfd/rohm-bd96801.c
new file mode 100644
index 000000000000..714f08ed544b
--- /dev/null
+++ b/drivers/mfd/rohm-bd96801.c
@@ -0,0 +1,273 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2024 ROHM Semiconductors
+ *
+ * ROHM BD96801 PMIC driver
+ *
+ * This version of the "BD86801 scalable PMIC"'s driver supports only very
+ * basic set of the PMIC features. Most notably, there is no support for
+ * the ERRB interrupt and the configurations which should be done when the
+ * PMIC is in STBY mode.
+ *
+ * Supporting the ERRB interrupt would require dropping the regmap-IRQ
+ * usage or working around (or accepting a presense of) a naming conflict
+ * in debugFS IRQs.
+ *
+ * Being able to reliably do the configurations like changing the
+ * regulator safety limits (like limits for the over/under -voltages, over
+ * current, thermal protection) would require the configuring driver to be
+ * synchronized with entity causing the PMIC state transitions. Eg, one
+ * should be able to ensure the PMIC is in STBY state when the
+ * configurations are applied to the hardware. How and when the PMIC state
+ * transitions are to be done is likely to be very system specific, as will
+ * be the need to configure these safety limits. Hence it's not simple to
+ * come up with a generic solution.
+ *
+ * Users who require the ERRB handling and STBY state configurations can
+ * have a look at the original RFC:
+ * https://lore.kernel.org/all/cover.1712920132.git.mazziesaccount@gmail.com/
+ * which implements a workaround to debugFS naming conflict and some of
+ * the safety limit configurations - but leaves the state change handling
+ * and synchronization to be implemented.
+ *
+ * It would be great to hear (and receive a patch!) if you implement the
+ * STBY configuration support or a proper fix to the debugFS naming
+ * conflict in your downstream driver ;)
+ */
+
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/core.h>
+#include <linux/module.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <linux/types.h>
+
+#include <linux/mfd/rohm-bd96801.h>
+#include <linux/mfd/rohm-generic.h>
+
+static const struct resource regulator_intb_irqs[] = {
+ DEFINE_RES_IRQ_NAMED(BD96801_TW_STAT, "bd96801-core-thermal"),
+
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK1_OCPH_STAT, "bd96801-buck1-overcurr-h"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK1_OCPL_STAT, "bd96801-buck1-overcurr-l"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK1_OCPN_STAT, "bd96801-buck1-overcurr-n"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK1_OVD_STAT, "bd96801-buck1-overvolt"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK1_UVD_STAT, "bd96801-buck1-undervolt"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK1_TW_CH_STAT, "bd96801-buck1-thermal"),
+
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK2_OCPH_STAT, "bd96801-buck2-overcurr-h"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK2_OCPL_STAT, "bd96801-buck2-overcurr-l"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK2_OCPN_STAT, "bd96801-buck2-overcurr-n"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK2_OVD_STAT, "bd96801-buck2-overvolt"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK2_UVD_STAT, "bd96801-buck2-undervolt"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK2_TW_CH_STAT, "bd96801-buck2-thermal"),
+
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK3_OCPH_STAT, "bd96801-buck3-overcurr-h"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK3_OCPL_STAT, "bd96801-buck3-overcurr-l"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK3_OCPN_STAT, "bd96801-buck3-overcurr-n"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK3_OVD_STAT, "bd96801-buck3-overvolt"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK3_UVD_STAT, "bd96801-buck3-undervolt"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK3_TW_CH_STAT, "bd96801-buck3-thermal"),
+
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK4_OCPH_STAT, "bd96801-buck4-overcurr-h"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK4_OCPL_STAT, "bd96801-buck4-overcurr-l"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK4_OCPN_STAT, "bd96801-buck4-overcurr-n"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK4_OVD_STAT, "bd96801-buck4-overvolt"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK4_UVD_STAT, "bd96801-buck4-undervolt"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK4_TW_CH_STAT, "bd96801-buck4-thermal"),
+
+ DEFINE_RES_IRQ_NAMED(BD96801_LDO5_OCPH_STAT, "bd96801-ldo5-overcurr"),
+ DEFINE_RES_IRQ_NAMED(BD96801_LDO5_OVD_STAT, "bd96801-ldo5-overvolt"),
+ DEFINE_RES_IRQ_NAMED(BD96801_LDO5_UVD_STAT, "bd96801-ldo5-undervolt"),
+
+ DEFINE_RES_IRQ_NAMED(BD96801_LDO6_OCPH_STAT, "bd96801-ldo6-overcurr"),
+ DEFINE_RES_IRQ_NAMED(BD96801_LDO6_OVD_STAT, "bd96801-ldo6-overvolt"),
+ DEFINE_RES_IRQ_NAMED(BD96801_LDO6_UVD_STAT, "bd96801-ldo6-undervolt"),
+
+ DEFINE_RES_IRQ_NAMED(BD96801_LDO7_OCPH_STAT, "bd96801-ldo7-overcurr"),
+ DEFINE_RES_IRQ_NAMED(BD96801_LDO7_OVD_STAT, "bd96801-ldo7-overvolt"),
+ DEFINE_RES_IRQ_NAMED(BD96801_LDO7_UVD_STAT, "bd96801-ldo7-undervolt"),
+};
+
+static const struct resource wdg_intb_irqs[] = {
+ DEFINE_RES_IRQ_NAMED(BD96801_WDT_ERR_STAT, "bd96801-wdg"),
+};
+
+static struct mfd_cell bd96801_cells[] = {
+ {
+ .name = "bd96801-wdt",
+ .resources = wdg_intb_irqs,
+ .num_resources = ARRAY_SIZE(wdg_intb_irqs),
+ }, {
+ .name = "bd96801-regulator",
+ .resources = regulator_intb_irqs,
+ .num_resources = ARRAY_SIZE(regulator_intb_irqs),
+ },
+};
+
+static const struct regmap_range bd96801_volatile_ranges[] = {
+ /* Status registers */
+ regmap_reg_range(BD96801_REG_WD_FEED, BD96801_REG_WD_FAILCOUNT),
+ regmap_reg_range(BD96801_REG_WD_ASK, BD96801_REG_WD_ASK),
+ regmap_reg_range(BD96801_REG_WD_STATUS, BD96801_REG_WD_STATUS),
+ regmap_reg_range(BD96801_REG_PMIC_STATE, BD96801_REG_INT_LDO7_INTB),
+ /* Registers which do not update value unless PMIC is in STBY */
+ regmap_reg_range(BD96801_REG_SSCG_CTRL, BD96801_REG_SHD_INTB),
+ regmap_reg_range(BD96801_REG_BUCK_OVP, BD96801_REG_BOOT_OVERTIME),
+ /*
+ * LDO control registers have single bit (LDO MODE) which does not
+ * change when we write it unless PMIC is in STBY. It's safer to not
+ * cache it.
+ */
+ regmap_reg_range(BD96801_LDO5_VOL_LVL_REG, BD96801_LDO7_VOL_LVL_REG),
+};
+
+static const struct regmap_access_table volatile_regs = {
+ .yes_ranges = bd96801_volatile_ranges,
+ .n_yes_ranges = ARRAY_SIZE(bd96801_volatile_ranges),
+};
+
+static const struct regmap_irq bd96801_intb_irqs[] = {
+ /* STATUS SYSTEM INTB */
+ REGMAP_IRQ_REG(BD96801_TW_STAT, 0, BD96801_TW_STAT_MASK),
+ REGMAP_IRQ_REG(BD96801_WDT_ERR_STAT, 0, BD96801_WDT_ERR_STAT_MASK),
+ REGMAP_IRQ_REG(BD96801_I2C_ERR_STAT, 0, BD96801_I2C_ERR_STAT_MASK),
+ REGMAP_IRQ_REG(BD96801_CHIP_IF_ERR_STAT, 0, BD96801_CHIP_IF_ERR_STAT_MASK),
+ /* STATUS BUCK1 INTB */
+ REGMAP_IRQ_REG(BD96801_BUCK1_OCPH_STAT, 1, BD96801_BUCK_OCPH_STAT_MASK),
+ REGMAP_IRQ_REG(BD96801_BUCK1_OCPL_STAT, 1, BD96801_BUCK_OCPL_STAT_MASK),
+ REGMAP_IRQ_REG(BD96801_BUCK1_OCPN_STAT, 1, BD96801_BUCK_OCPN_STAT_MASK),
+ REGMAP_IRQ_REG(BD96801_BUCK1_OVD_STAT, 1, BD96801_BUCK_OVD_STAT_MASK),
+ REGMAP_IRQ_REG(BD96801_BUCK1_UVD_STAT, 1, BD96801_BUCK_UVD_STAT_MASK),
+ REGMAP_IRQ_REG(BD96801_BUCK1_TW_CH_STAT, 1, BD96801_BUCK_TW_CH_STAT_MASK),
+ /* BUCK 2 INTB */
+ REGMAP_IRQ_REG(BD96801_BUCK2_OCPH_STAT, 2, BD96801_BUCK_OCPH_STAT_MASK),
+ REGMAP_IRQ_REG(BD96801_BUCK2_OCPL_STAT, 2, BD96801_BUCK_OCPL_STAT_MASK),
+ REGMAP_IRQ_REG(BD96801_BUCK2_OCPN_STAT, 2, BD96801_BUCK_OCPN_STAT_MASK),
+ REGMAP_IRQ_REG(BD96801_BUCK2_OVD_STAT, 2, BD96801_BUCK_OVD_STAT_MASK),
+ REGMAP_IRQ_REG(BD96801_BUCK2_UVD_STAT, 2, BD96801_BUCK_UVD_STAT_MASK),
+ REGMAP_IRQ_REG(BD96801_BUCK2_TW_CH_STAT, 2, BD96801_BUCK_TW_CH_STAT_MASK),
+ /* BUCK 3 INTB */
+ REGMAP_IRQ_REG(BD96801_BUCK3_OCPH_STAT, 3, BD96801_BUCK_OCPH_STAT_MASK),
+ REGMAP_IRQ_REG(BD96801_BUCK3_OCPL_STAT, 3, BD96801_BUCK_OCPL_STAT_MASK),
+ REGMAP_IRQ_REG(BD96801_BUCK3_OCPN_STAT, 3, BD96801_BUCK_OCPN_STAT_MASK),
+ REGMAP_IRQ_REG(BD96801_BUCK3_OVD_STAT, 3, BD96801_BUCK_OVD_STAT_MASK),
+ REGMAP_IRQ_REG(BD96801_BUCK3_UVD_STAT, 3, BD96801_BUCK_UVD_STAT_MASK),
+ REGMAP_IRQ_REG(BD96801_BUCK3_TW_CH_STAT, 3, BD96801_BUCK_TW_CH_STAT_MASK),
+ /* BUCK 4 INTB */
+ REGMAP_IRQ_REG(BD96801_BUCK4_OCPH_STAT, 4, BD96801_BUCK_OCPH_STAT_MASK),
+ REGMAP_IRQ_REG(BD96801_BUCK4_OCPL_STAT, 4, BD96801_BUCK_OCPL_STAT_MASK),
+ REGMAP_IRQ_REG(BD96801_BUCK4_OCPN_STAT, 4, BD96801_BUCK_OCPN_STAT_MASK),
+ REGMAP_IRQ_REG(BD96801_BUCK4_OVD_STAT, 4, BD96801_BUCK_OVD_STAT_MASK),
+ REGMAP_IRQ_REG(BD96801_BUCK4_UVD_STAT, 4, BD96801_BUCK_UVD_STAT_MASK),
+ REGMAP_IRQ_REG(BD96801_BUCK4_TW_CH_STAT, 4, BD96801_BUCK_TW_CH_STAT_MASK),
+ /* LDO5 INTB */
+ REGMAP_IRQ_REG(BD96801_LDO5_OCPH_STAT, 5, BD96801_LDO_OCPH_STAT_MASK),
+ REGMAP_IRQ_REG(BD96801_LDO5_OVD_STAT, 5, BD96801_LDO_OVD_STAT_MASK),
+ REGMAP_IRQ_REG(BD96801_LDO5_UVD_STAT, 5, BD96801_LDO_UVD_STAT_MASK),
+ /* LDO6 INTB */
+ REGMAP_IRQ_REG(BD96801_LDO6_OCPH_STAT, 6, BD96801_LDO_OCPH_STAT_MASK),
+ REGMAP_IRQ_REG(BD96801_LDO6_OVD_STAT, 6, BD96801_LDO_OVD_STAT_MASK),
+ REGMAP_IRQ_REG(BD96801_LDO6_UVD_STAT, 6, BD96801_LDO_UVD_STAT_MASK),
+ /* LDO7 INTB */
+ REGMAP_IRQ_REG(BD96801_LDO7_OCPH_STAT, 7, BD96801_LDO_OCPH_STAT_MASK),
+ REGMAP_IRQ_REG(BD96801_LDO7_OVD_STAT, 7, BD96801_LDO_OVD_STAT_MASK),
+ REGMAP_IRQ_REG(BD96801_LDO7_UVD_STAT, 7, BD96801_LDO_UVD_STAT_MASK),
+};
+
+static struct regmap_irq_chip bd96801_irq_chip_intb = {
+ .name = "bd96801-irq-intb",
+ .main_status = BD96801_REG_INT_MAIN,
+ .num_main_regs = 1,
+ .irqs = &bd96801_intb_irqs[0],
+ .num_irqs = ARRAY_SIZE(bd96801_intb_irqs),
+ .status_base = BD96801_REG_INT_SYS_INTB,
+ .mask_base = BD96801_REG_MASK_SYS_INTB,
+ .ack_base = BD96801_REG_INT_SYS_INTB,
+ .init_ack_masked = true,
+ .num_regs = 8,
+ .irq_reg_stride = 1,
+};
+
+static const struct regmap_config bd96801_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .volatile_table = &volatile_regs,
+ .cache_type = REGCACHE_RBTREE,
+};
+
+static int bd96801_i2c_probe(struct i2c_client *i2c)
+{
+ struct regmap_irq_chip_data *intb_irq_data;
+ const struct fwnode_handle *fwnode;
+ struct irq_domain *intb_domain;
+ struct regmap *regmap;
+ int ret, intb_irq;
+
+ fwnode = dev_fwnode(&i2c->dev);
+ if (!fwnode)
+ return dev_err_probe(&i2c->dev, -EINVAL, "Failed to find fwnode\n");
+
+ intb_irq = fwnode_irq_get_byname(fwnode, "intb");
+ if (intb_irq < 0)
+ return dev_err_probe(&i2c->dev, intb_irq, "INTB IRQ not configured\n");
+
+ regmap = devm_regmap_init_i2c(i2c, &bd96801_regmap_config);
+ if (IS_ERR(regmap))
+ return dev_err_probe(&i2c->dev, PTR_ERR(regmap),
+ "Regmap initialization failed\n");
+
+ ret = regmap_write(regmap, BD96801_LOCK_REG, BD96801_UNLOCK);
+ if (ret)
+ return dev_err_probe(&i2c->dev, ret, "Failed to unlock PMIC\n");
+
+ ret = devm_regmap_add_irq_chip(&i2c->dev, regmap, intb_irq,
+ IRQF_ONESHOT, 0, &bd96801_irq_chip_intb,
+ &intb_irq_data);
+ if (ret)
+ return dev_err_probe(&i2c->dev, ret, "Failed to add INTB IRQ chip\n");
+
+ intb_domain = regmap_irq_get_domain(intb_irq_data);
+
+ ret = devm_mfd_add_devices(&i2c->dev, PLATFORM_DEVID_AUTO,
+ bd96801_cells,
+ ARRAY_SIZE(bd96801_cells), NULL, 0,
+ intb_domain);
+ if (ret)
+ dev_err(&i2c->dev, "Failed to create subdevices\n");
+
+ return ret;
+}
+
+static const struct of_device_id bd96801_of_match[] = {
+ { .compatible = "rohm,bd96801", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, bd96801_of_match);
+
+static struct i2c_driver bd96801_i2c_driver = {
+ .driver = {
+ .name = "rohm-bd96801",
+ .of_match_table = bd96801_of_match,
+ },
+ .probe = bd96801_i2c_probe,
+};
+
+static int __init bd96801_i2c_init(void)
+{
+ return i2c_add_driver(&bd96801_i2c_driver);
+}
+
+/* Initialise early so consumer devices can complete system boot */
+subsys_initcall(bd96801_i2c_init);
+
+static void __exit bd96801_i2c_exit(void)
+{
+ i2c_del_driver(&bd96801_i2c_driver);
+}
+module_exit(bd96801_i2c_exit);
+
+MODULE_AUTHOR("Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>");
+MODULE_DESCRIPTION("ROHM BD96801 Power Management IC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/rsmu_core.c b/drivers/mfd/rsmu_core.c
index 29437fd0bd5b..fd04a6e5dfa3 100644
--- a/drivers/mfd/rsmu_core.c
+++ b/drivers/mfd/rsmu_core.c
@@ -78,11 +78,13 @@ int rsmu_core_init(struct rsmu_ddata *rsmu)
return ret;
}
+EXPORT_SYMBOL_GPL(rsmu_core_init);
void rsmu_core_exit(struct rsmu_ddata *rsmu)
{
mutex_destroy(&rsmu->lock);
}
+EXPORT_SYMBOL_GPL(rsmu_core_exit);
MODULE_DESCRIPTION("Renesas SMU core driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/rt4831.c b/drivers/mfd/rt4831.c
index f8d6dc55b558..1ab8870e4ebf 100644
--- a/drivers/mfd/rt4831.c
+++ b/drivers/mfd/rt4831.c
@@ -115,4 +115,5 @@ static struct i2c_driver rt4831_driver = {
module_i2c_driver(rt4831_driver);
MODULE_AUTHOR("ChiYuan Huang <cy_huang@richtek.com>");
+MODULE_DESCRIPTION("Richtek RT4831 core driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/ssbi.c b/drivers/mfd/ssbi.c
index f849f2d34ec7..6e7aff6e2746 100644
--- a/drivers/mfd/ssbi.c
+++ b/drivers/mfd/ssbi.c
@@ -319,6 +319,7 @@ static struct platform_driver ssbi_driver = {
};
module_platform_driver(ssbi_driver);
+MODULE_DESCRIPTION("Qualcomm Single-wire Serial Bus Interface (SSBI) driver");
MODULE_LICENSE("GPL v2");
MODULE_VERSION("1.0");
MODULE_ALIAS("platform:ssbi");
diff --git a/drivers/mfd/stw481x.c b/drivers/mfd/stw481x.c
index f35c3af680dd..5ed64d53c23d 100644
--- a/drivers/mfd/stw481x.c
+++ b/drivers/mfd/stw481x.c
@@ -222,8 +222,8 @@ static int stw481x_probe(struct i2c_client *client)
* the structure of the I2C core.
*/
static const struct i2c_device_id stw481x_id[] = {
- { "stw481x", 0 },
- { },
+ { "stw481x" },
+ { }
};
MODULE_DEVICE_TABLE(i2c, stw481x_id);
diff --git a/drivers/mfd/syscon.c b/drivers/mfd/syscon.c
index 7d0e91164cba..33f1e07ab24d 100644
--- a/drivers/mfd/syscon.c
+++ b/drivers/mfd/syscon.c
@@ -192,6 +192,54 @@ static struct regmap *device_node_get_regmap(struct device_node *np,
return syscon->regmap;
}
+/**
+ * of_syscon_register_regmap() - Register regmap for specified device node
+ * @np: Device tree node
+ * @regmap: Pointer to regmap object
+ *
+ * Register an externally created regmap object with syscon for the specified
+ * device tree node. This regmap will then be returned to client drivers using
+ * the syscon_regmap_lookup_by_phandle() API.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int of_syscon_register_regmap(struct device_node *np, struct regmap *regmap)
+{
+ struct syscon *entry, *syscon = NULL;
+ int ret;
+
+ if (!np || !regmap)
+ return -EINVAL;
+
+ syscon = kzalloc(sizeof(*syscon), GFP_KERNEL);
+ if (!syscon)
+ return -ENOMEM;
+
+ /* check if syscon entry already exists */
+ spin_lock(&syscon_list_slock);
+
+ list_for_each_entry(entry, &syscon_list, list)
+ if (entry->np == np) {
+ ret = -EEXIST;
+ goto err_unlock;
+ }
+
+ syscon->regmap = regmap;
+ syscon->np = np;
+
+ /* register the regmap in syscon list */
+ list_add_tail(&syscon->list, &syscon_list);
+ spin_unlock(&syscon_list_slock);
+
+ return 0;
+
+err_unlock:
+ spin_unlock(&syscon_list_slock);
+ kfree(syscon);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(of_syscon_register_regmap);
+
struct regmap *device_node_to_regmap(struct device_node *np)
{
return device_node_get_regmap(np, false);
diff --git a/drivers/mfd/timberdale.c b/drivers/mfd/timberdale.c
index a41e9a3e2064..b059713db875 100644
--- a/drivers/mfd/timberdale.c
+++ b/drivers/mfd/timberdale.c
@@ -12,6 +12,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/mfd/core.h>
+#include <linux/property.h>
#include <linux/slab.h>
#include <linux/timb_gpio.h>
@@ -25,7 +26,6 @@
#include <linux/spi/max7301.h>
#include <linux/spi/mc33880.h>
-#include <linux/platform_data/tsc2007.h>
#include <linux/platform_data/media/timb_radio.h>
#include <linux/platform_data/media/timb_video.h>
@@ -49,16 +49,21 @@ struct timberdale_device {
/*--------------------------------------------------------------------------*/
-static struct tsc2007_platform_data timberdale_tsc2007_platform_data = {
- .model = 2003,
- .x_plate_ohms = 100
+static const struct property_entry timberdale_tsc2007_properties[] = {
+ PROPERTY_ENTRY_U32("ti,x-plate-ohms", 100),
+ { }
+};
+
+static const struct software_node timberdale_tsc2007_node = {
+ .name = "tsc2007",
+ .properties = timberdale_tsc2007_properties,
};
static struct i2c_board_info timberdale_i2c_board_info[] = {
{
I2C_BOARD_INFO("tsc2007", 0x48),
- .platform_data = &timberdale_tsc2007_platform_data,
- .irq = IRQ_TIMBERDALE_TSC_INT
+ .irq = IRQ_TIMBERDALE_TSC_INT,
+ .swnode = &timberdale_tsc2007_node,
},
};
@@ -853,4 +858,5 @@ module_pci_driver(timberdale_pci_driver);
MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>");
MODULE_VERSION(DRV_VERSION);
+MODULE_DESCRIPTION("Timberdale FPGA MFD driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/tps6105x.c b/drivers/mfd/tps6105x.c
index 5601f6d0d874..0da1cecb5af6 100644
--- a/drivers/mfd/tps6105x.c
+++ b/drivers/mfd/tps6105x.c
@@ -191,8 +191,8 @@ static void tps6105x_remove(struct i2c_client *client)
}
static const struct i2c_device_id tps6105x_id[] = {
- { "tps61050", 0 },
- { "tps61052", 0 },
+ { "tps61050" },
+ { "tps61052" },
{ }
};
MODULE_DEVICE_TABLE(i2c, tps6105x_id);
diff --git a/drivers/mfd/tps6507x.c b/drivers/mfd/tps6507x.c
index 95dafb0e9f00..9865512dc7cc 100644
--- a/drivers/mfd/tps6507x.c
+++ b/drivers/mfd/tps6507x.c
@@ -103,7 +103,7 @@ static int tps6507x_i2c_probe(struct i2c_client *i2c)
}
static const struct i2c_device_id tps6507x_i2c_id[] = {
- { "tps6507x", 0 },
+ { "tps6507x" },
{ }
};
MODULE_DEVICE_TABLE(i2c, tps6507x_i2c_id);
diff --git a/drivers/mfd/tps65086.c b/drivers/mfd/tps65086.c
index fdce81b33f60..5ef0a7e0d61d 100644
--- a/drivers/mfd/tps65086.c
+++ b/drivers/mfd/tps65086.c
@@ -127,7 +127,7 @@ static void tps65086_remove(struct i2c_client *client)
}
static const struct i2c_device_id tps65086_id_table[] = {
- { "tps65086", 0 },
+ { "tps65086" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(i2c, tps65086_id_table);
diff --git a/drivers/mfd/tps65090.c b/drivers/mfd/tps65090.c
index b764badaa62a..b82cd484ac85 100644
--- a/drivers/mfd/tps65090.c
+++ b/drivers/mfd/tps65090.c
@@ -225,8 +225,8 @@ err_irq_exit:
static const struct i2c_device_id tps65090_id_table[] = {
- { "tps65090", 0 },
- { },
+ { "tps65090" },
+ { }
};
static struct i2c_driver tps65090_driver = {
diff --git a/drivers/mfd/tps6586x.c b/drivers/mfd/tps6586x.c
index 03c65bbf2143..82714899efb2 100644
--- a/drivers/mfd/tps6586x.c
+++ b/drivers/mfd/tps6586x.c
@@ -642,8 +642,8 @@ static SIMPLE_DEV_PM_OPS(tps6586x_pm_ops, tps6586x_i2c_suspend,
tps6586x_i2c_resume);
static const struct i2c_device_id tps6586x_id_table[] = {
- { "tps6586x", 0 },
- { },
+ { "tps6586x" },
+ { }
};
MODULE_DEVICE_TABLE(i2c, tps6586x_id_table);
diff --git a/drivers/mfd/tps65912-core.c b/drivers/mfd/tps65912-core.c
index 2305ea60367a..87ee6aac3763 100644
--- a/drivers/mfd/tps65912-core.c
+++ b/drivers/mfd/tps65912-core.c
@@ -90,29 +90,22 @@ int tps65912_device_init(struct tps65912 *tps)
{
int ret;
- ret = regmap_add_irq_chip(tps->regmap, tps->irq, IRQF_ONESHOT, 0,
- &tps65912_irq_chip, &tps->irq_data);
+ ret = devm_regmap_add_irq_chip(tps->dev, tps->regmap, tps->irq,
+ IRQF_ONESHOT, 0, &tps65912_irq_chip,
+ &tps->irq_data);
if (ret)
return ret;
- ret = mfd_add_devices(tps->dev, PLATFORM_DEVID_AUTO, tps65912_cells,
- ARRAY_SIZE(tps65912_cells), NULL, 0,
- regmap_irq_get_domain(tps->irq_data));
- if (ret) {
- regmap_del_irq_chip(tps->irq, tps->irq_data);
+ ret = devm_mfd_add_devices(tps->dev, PLATFORM_DEVID_AUTO, tps65912_cells,
+ ARRAY_SIZE(tps65912_cells), NULL, 0,
+ regmap_irq_get_domain(tps->irq_data));
+ if (ret)
return ret;
- }
return 0;
}
EXPORT_SYMBOL_GPL(tps65912_device_init);
-void tps65912_device_exit(struct tps65912 *tps)
-{
- regmap_del_irq_chip(tps->irq, tps->irq_data);
-}
-EXPORT_SYMBOL_GPL(tps65912_device_exit);
-
MODULE_AUTHOR("Andrew F. Davis <afd@ti.com>");
MODULE_DESCRIPTION("TPS65912x MFD Driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/tps65912-i2c.c b/drivers/mfd/tps65912-i2c.c
index 3c5ac781b6c1..138e50497b51 100644
--- a/drivers/mfd/tps65912-i2c.c
+++ b/drivers/mfd/tps65912-i2c.c
@@ -42,15 +42,8 @@ static int tps65912_i2c_probe(struct i2c_client *client)
return tps65912_device_init(tps);
}
-static void tps65912_i2c_remove(struct i2c_client *client)
-{
- struct tps65912 *tps = i2c_get_clientdata(client);
-
- tps65912_device_exit(tps);
-}
-
static const struct i2c_device_id tps65912_i2c_id_table[] = {
- { "tps65912", 0 },
+ { "tps65912" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(i2c, tps65912_i2c_id_table);
@@ -61,7 +54,6 @@ static struct i2c_driver tps65912_i2c_driver = {
.of_match_table = tps65912_i2c_of_match_table,
},
.probe = tps65912_i2c_probe,
- .remove = tps65912_i2c_remove,
.id_table = tps65912_i2c_id_table,
};
module_i2c_driver(tps65912_i2c_driver);
diff --git a/drivers/mfd/tps65912-spi.c b/drivers/mfd/tps65912-spi.c
index 9e976f9c6bbe..2a77dccd6059 100644
--- a/drivers/mfd/tps65912-spi.c
+++ b/drivers/mfd/tps65912-spi.c
@@ -42,13 +42,6 @@ static int tps65912_spi_probe(struct spi_device *spi)
return tps65912_device_init(tps);
}
-static void tps65912_spi_remove(struct spi_device *spi)
-{
- struct tps65912 *tps = spi_get_drvdata(spi);
-
- tps65912_device_exit(tps);
-}
-
static const struct spi_device_id tps65912_spi_id_table[] = {
{ "tps65912", 0 },
{ /* sentinel */ }
@@ -61,7 +54,6 @@ static struct spi_driver tps65912_spi_driver = {
.of_match_table = tps65912_spi_of_match_table,
},
.probe = tps65912_spi_probe,
- .remove = tps65912_spi_remove,
.id_table = tps65912_spi_id_table,
};
module_spi_driver(tps65912_spi_driver);
diff --git a/drivers/mfd/tps6594-core.c b/drivers/mfd/tps6594-core.c
index c59f3d7e32b0..a7223e873cd1 100644
--- a/drivers/mfd/tps6594-core.c
+++ b/drivers/mfd/tps6594-core.c
@@ -513,7 +513,7 @@ static int tps6594_check_crc_mode(struct tps6594 *tps, bool primary_pmic)
} else {
regmap_reg = TPS6594_REG_SERIAL_IF_CONFIG;
mask_val = TPS6594_BIT_I2C1_SPI_CRC_EN;
- };
+ }
/*
* Check if CRC is enabled.
diff --git a/drivers/mfd/twl6040.c b/drivers/mfd/twl6040.c
index 9ce34dfd99b3..c184e8bfab7c 100644
--- a/drivers/mfd/twl6040.c
+++ b/drivers/mfd/twl6040.c
@@ -817,9 +817,9 @@ static void twl6040_remove(struct i2c_client *client)
}
static const struct i2c_device_id twl6040_i2c_id[] = {
- { "twl6040", 0, },
- { "twl6041", 0, },
- { },
+ { "twl6040" },
+ { "twl6041" },
+ { }
};
MODULE_DEVICE_TABLE(i2c, twl6040_i2c_id);
diff --git a/drivers/mfd/vexpress-sysreg.c b/drivers/mfd/vexpress-sysreg.c
index eab82619ec31..d34d58ce46db 100644
--- a/drivers/mfd/vexpress-sysreg.c
+++ b/drivers/mfd/vexpress-sysreg.c
@@ -132,4 +132,5 @@ static struct platform_driver vexpress_sysreg_driver = {
};
module_platform_driver(vexpress_sysreg_driver);
+MODULE_DESCRIPTION("Versatile Express system registers driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/wl1273-core.c b/drivers/mfd/wl1273-core.c
index e2a7fccaed01..2f185e93318e 100644
--- a/drivers/mfd/wl1273-core.c
+++ b/drivers/mfd/wl1273-core.c
@@ -13,7 +13,7 @@
#define DRIVER_DESC "WL1273 FM Radio Core"
static const struct i2c_device_id wl1273_driver_id_table[] = {
- { WL1273_FM_DRIVER_NAME, 0 },
+ { WL1273_FM_DRIVER_NAME },
{ }
};
MODULE_DEVICE_TABLE(i2c, wl1273_driver_id_table);
diff --git a/drivers/mfd/wm8350-i2c.c b/drivers/mfd/wm8350-i2c.c
index c2a7d7069975..767c176b12a7 100644
--- a/drivers/mfd/wm8350-i2c.c
+++ b/drivers/mfd/wm8350-i2c.c
@@ -41,9 +41,9 @@ static int wm8350_i2c_probe(struct i2c_client *i2c)
}
static const struct i2c_device_id wm8350_i2c_id[] = {
- { "wm8350", 0 },
- { "wm8351", 0 },
- { "wm8352", 0 },
+ { "wm8350" },
+ { "wm8351" },
+ { "wm8352" },
{ }
};
diff --git a/drivers/mfd/wm8400-core.c b/drivers/mfd/wm8400-core.c
index ddfb234849dd..8ecfe878a5ba 100644
--- a/drivers/mfd/wm8400-core.c
+++ b/drivers/mfd/wm8400-core.c
@@ -135,7 +135,7 @@ static int wm8400_i2c_probe(struct i2c_client *i2c)
}
static const struct i2c_device_id wm8400_i2c_id[] = {
- { "wm8400", 0 },
+ { "wm8400" },
{ }
};
diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c
index d5ac066f9db4..094c0b3dbd97 100644
--- a/drivers/mfd/wm8994-core.c
+++ b/drivers/mfd/wm8994-core.c
@@ -622,7 +622,7 @@ static int wm8994_i2c_probe(struct i2c_client *i2c)
wm8994->dev = &i2c->dev;
wm8994->irq = i2c->irq;
- wm8994->type = (enum wm8994_type)i2c_get_match_data(i2c);
+ wm8994->type = (kernel_ulong_t)i2c_get_match_data(i2c);
wm8994->regmap = devm_regmap_init_i2c(i2c, &wm8994_base_regmap_config);
if (IS_ERR(wm8994->regmap)) {
diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
index 4c67e2c5a82e..a7a2bcedb37e 100644
--- a/drivers/misc/fastrpc.c
+++ b/drivers/misc/fastrpc.c
@@ -1238,6 +1238,7 @@ static int fastrpc_init_create_static_process(struct fastrpc_user *fl,
struct fastrpc_phy_page pages[1];
char *name;
int err;
+ bool scm_done = false;
struct {
int pgid;
u32 namelen;
@@ -1289,6 +1290,7 @@ static int fastrpc_init_create_static_process(struct fastrpc_user *fl,
fl->cctx->remote_heap->phys, fl->cctx->remote_heap->size, err);
goto err_map;
}
+ scm_done = true;
}
}
@@ -1320,10 +1322,11 @@ static int fastrpc_init_create_static_process(struct fastrpc_user *fl,
goto err_invoke;
kfree(args);
+ kfree(name);
return 0;
err_invoke:
- if (fl->cctx->vmcount) {
+ if (fl->cctx->vmcount && scm_done) {
u64 src_perms = 0;
struct qcom_scm_vmperm dst_perms;
u32 i;
@@ -1693,16 +1696,20 @@ static int fastrpc_get_info_from_dsp(struct fastrpc_user *fl, uint32_t *dsp_attr
{
struct fastrpc_invoke_args args[2] = { 0 };
- /* Capability filled in userspace */
+ /*
+ * Capability filled in userspace. This carries the information
+ * about the remoteproc support which is fetched from the remoteproc
+ * sysfs node by userspace.
+ */
dsp_attr_buf[0] = 0;
+ dsp_attr_buf_len -= 1;
args[0].ptr = (u64)(uintptr_t)&dsp_attr_buf_len;
args[0].length = sizeof(dsp_attr_buf_len);
args[0].fd = -1;
args[1].ptr = (u64)(uintptr_t)&dsp_attr_buf[1];
- args[1].length = dsp_attr_buf_len;
+ args[1].length = dsp_attr_buf_len * sizeof(u32);
args[1].fd = -1;
- fl->pd = USER_PD;
return fastrpc_internal_invoke(fl, true, FASTRPC_DSP_UTILITIES_HANDLE,
FASTRPC_SCALARS(0, 1, 1), args);
@@ -1730,7 +1737,7 @@ static int fastrpc_get_info_from_kernel(struct fastrpc_ioctl_capability *cap,
if (!dsp_attributes)
return -ENOMEM;
- err = fastrpc_get_info_from_dsp(fl, dsp_attributes, FASTRPC_MAX_DSP_ATTRIBUTES_LEN);
+ err = fastrpc_get_info_from_dsp(fl, dsp_attributes, FASTRPC_MAX_DSP_ATTRIBUTES);
if (err == DSP_UNSUPPORTED_API) {
dev_info(&cctx->rpdev->dev,
"Warning: DSP capabilities not supported on domain: %d\n", domain);
@@ -1783,7 +1790,7 @@ static int fastrpc_get_dsp_info(struct fastrpc_user *fl, char __user *argp)
if (err)
return err;
- if (copy_to_user(argp, &cap.capability, sizeof(cap.capability)))
+ if (copy_to_user(argp, &cap, sizeof(cap)))
return -EFAULT;
return 0;
@@ -2080,6 +2087,16 @@ err_invoke:
return err;
}
+static int is_attach_rejected(struct fastrpc_user *fl)
+{
+ /* Check if the device node is non-secure */
+ if (!fl->is_secure_dev) {
+ dev_dbg(&fl->cctx->rpdev->dev, "untrusted app trying to attach to privileged DSP PD\n");
+ return -EACCES;
+ }
+ return 0;
+}
+
static long fastrpc_device_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
@@ -2092,13 +2109,19 @@ static long fastrpc_device_ioctl(struct file *file, unsigned int cmd,
err = fastrpc_invoke(fl, argp);
break;
case FASTRPC_IOCTL_INIT_ATTACH:
- err = fastrpc_init_attach(fl, ROOT_PD);
+ err = is_attach_rejected(fl);
+ if (!err)
+ err = fastrpc_init_attach(fl, ROOT_PD);
break;
case FASTRPC_IOCTL_INIT_ATTACH_SNS:
- err = fastrpc_init_attach(fl, SENSORS_PD);
+ err = is_attach_rejected(fl);
+ if (!err)
+ err = fastrpc_init_attach(fl, SENSORS_PD);
break;
case FASTRPC_IOCTL_INIT_CREATE_STATIC:
- err = fastrpc_init_create_static_process(fl, argp);
+ err = is_attach_rejected(fl);
+ if (!err)
+ err = fastrpc_init_create_static_process(fl, argp);
break;
case FASTRPC_IOCTL_INIT_CREATE:
err = fastrpc_init_create_process(fl, argp);
diff --git a/drivers/misc/lkdtm/bugs.c b/drivers/misc/lkdtm/bugs.c
index 5178c02b21eb..62ba01525479 100644
--- a/drivers/misc/lkdtm/bugs.c
+++ b/drivers/misc/lkdtm/bugs.c
@@ -286,6 +286,35 @@ static void lkdtm_HARDLOCKUP(void)
cpu_relax();
}
+static void __lkdtm_SMP_CALL_LOCKUP(void *unused)
+{
+ for (;;)
+ cpu_relax();
+}
+
+static void lkdtm_SMP_CALL_LOCKUP(void)
+{
+ unsigned int cpu, target;
+
+ cpus_read_lock();
+
+ cpu = get_cpu();
+ target = cpumask_any_but(cpu_online_mask, cpu);
+
+ if (target >= nr_cpu_ids) {
+ pr_err("FAIL: no other online CPUs\n");
+ goto out_put_cpus;
+ }
+
+ smp_call_function_single(target, __lkdtm_SMP_CALL_LOCKUP, NULL, 1);
+
+ pr_err("FAIL: did not hang\n");
+
+out_put_cpus:
+ put_cpu();
+ cpus_read_unlock();
+}
+
static void lkdtm_SPINLOCKUP(void)
{
/* Must be called twice to trigger. */
@@ -680,6 +709,7 @@ static struct crashtype crashtypes[] = {
CRASHTYPE(UNALIGNED_LOAD_STORE_WRITE),
CRASHTYPE(SOFTLOCKUP),
CRASHTYPE(HARDLOCKUP),
+ CRASHTYPE(SMP_CALL_LOCKUP),
CRASHTYPE(SPINLOCKUP),
CRASHTYPE(HUNG_TASK),
CRASHTYPE(OVERFLOW_SIGNED),
diff --git a/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gp.c b/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gp.c
index 32af2b14ff34..34c9be437432 100644
--- a/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gp.c
+++ b/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gp.c
@@ -69,8 +69,10 @@ static int gp_aux_bus_probe(struct pci_dev *pdev, const struct pci_device_id *id
aux_bus->aux_device_wrapper[1] = kzalloc(sizeof(*aux_bus->aux_device_wrapper[1]),
GFP_KERNEL);
- if (!aux_bus->aux_device_wrapper[1])
- return -ENOMEM;
+ if (!aux_bus->aux_device_wrapper[1]) {
+ retval = -ENOMEM;
+ goto err_aux_dev_add_0;
+ }
retval = ida_alloc(&gp_client_ida, GFP_KERNEL);
if (retval < 0)
@@ -111,6 +113,7 @@ static int gp_aux_bus_probe(struct pci_dev *pdev, const struct pci_device_id *id
err_aux_dev_add_1:
auxiliary_device_uninit(&aux_bus->aux_device_wrapper[1]->aux_dev);
+ goto err_aux_dev_add_0;
err_aux_dev_init_1:
ida_free(&gp_client_ida, aux_bus->aux_device_wrapper[1]->aux_dev.id);
@@ -120,6 +123,7 @@ err_ida_alloc_1:
err_aux_dev_add_0:
auxiliary_device_uninit(&aux_bus->aux_device_wrapper[0]->aux_dev);
+ goto err_ret;
err_aux_dev_init_0:
ida_free(&gp_client_ida, aux_bus->aux_device_wrapper[0]->aux_dev.id);
@@ -127,6 +131,7 @@ err_aux_dev_init_0:
err_ida_alloc_0:
kfree(aux_bus->aux_device_wrapper[0]);
+err_ret:
return retval;
}
diff --git a/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_otpe2p.c b/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_otpe2p.c
index 16695cb5e69c..7c3d8bedf90b 100644
--- a/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_otpe2p.c
+++ b/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_otpe2p.c
@@ -153,7 +153,6 @@ static int pci1xxxx_eeprom_read(void *priv_t, unsigned int off,
buf[byte] = readl(rb + MMAP_EEPROM_OFFSET(EEPROM_DATA_REG));
}
- ret = byte;
error:
release_sys_lock(priv);
return ret;
@@ -197,7 +196,6 @@ static int pci1xxxx_eeprom_write(void *priv_t, unsigned int off,
goto error;
}
}
- ret = byte;
error:
release_sys_lock(priv);
return ret;
@@ -258,7 +256,6 @@ static int pci1xxxx_otp_read(void *priv_t, unsigned int off,
buf[byte] = readl(rb + MMAP_OTP_OFFSET(OTP_RD_DATA_OFFSET));
}
- ret = byte;
error:
release_sys_lock(priv);
return ret;
@@ -315,7 +312,6 @@ static int pci1xxxx_otp_write(void *priv_t, unsigned int off,
goto error;
}
}
- ret = byte;
error:
release_sys_lock(priv);
return ret;
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index 79e6f3c1341f..40c3fe26f76d 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -329,7 +329,7 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
}
if (!mei_cl_is_connected(cl)) {
- cl_err(dev, cl, "is not connected");
+ cl_dbg(dev, cl, "is not connected");
rets = -ENODEV;
goto out;
}
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index 7f59dd38c32f..6589635f8ba3 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -385,8 +385,10 @@ static int mei_me_pci_resume(struct device *device)
}
err = mei_restart(dev);
- if (err)
+ if (err) {
+ free_irq(pdev->irq, dev);
return err;
+ }
/* Start timer if stopped in suspend */
schedule_delayed_work(&dev->timer_work, HZ);
diff --git a/drivers/misc/mei/platform-vsc.c b/drivers/misc/mei/platform-vsc.c
index b543e6b9f3cf..d02f6e881139 100644
--- a/drivers/misc/mei/platform-vsc.c
+++ b/drivers/misc/mei/platform-vsc.c
@@ -28,8 +28,8 @@
#define MEI_VSC_MAX_MSG_SIZE 512
-#define MEI_VSC_POLL_DELAY_US (50 * USEC_PER_MSEC)
-#define MEI_VSC_POLL_TIMEOUT_US (200 * USEC_PER_MSEC)
+#define MEI_VSC_POLL_DELAY_US (100 * USEC_PER_MSEC)
+#define MEI_VSC_POLL_TIMEOUT_US (400 * USEC_PER_MSEC)
#define mei_dev_to_vsc_hw(dev) ((struct mei_vsc_hw *)((dev)->hw))
@@ -399,41 +399,32 @@ static void mei_vsc_remove(struct platform_device *pdev)
static int mei_vsc_suspend(struct device *dev)
{
- struct mei_device *mei_dev = dev_get_drvdata(dev);
- struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev);
+ struct mei_device *mei_dev;
+ int ret = 0;
- mei_stop(mei_dev);
+ mei_dev = dev_get_drvdata(dev);
+ if (!mei_dev)
+ return -ENODEV;
- mei_disable_interrupts(mei_dev);
+ mutex_lock(&mei_dev->device_lock);
- vsc_tp_free_irq(hw->tp);
+ if (!mei_write_is_idle(mei_dev))
+ ret = -EAGAIN;
- return 0;
+ mutex_unlock(&mei_dev->device_lock);
+
+ return ret;
}
static int mei_vsc_resume(struct device *dev)
{
- struct mei_device *mei_dev = dev_get_drvdata(dev);
- struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev);
- int ret;
-
- ret = vsc_tp_request_irq(hw->tp);
- if (ret)
- return ret;
-
- ret = mei_restart(mei_dev);
- if (ret)
- goto err_free;
+ struct mei_device *mei_dev;
- /* start timer if stopped in suspend */
- schedule_delayed_work(&mei_dev->timer_work, HZ);
+ mei_dev = dev_get_drvdata(dev);
+ if (!mei_dev)
+ return -ENODEV;
return 0;
-
-err_free:
- vsc_tp_free_irq(hw->tp);
-
- return ret;
}
static DEFINE_SIMPLE_DEV_PM_OPS(mei_vsc_pm_ops, mei_vsc_suspend, mei_vsc_resume);
diff --git a/drivers/misc/mei/vsc-fw-loader.c b/drivers/misc/mei/vsc-fw-loader.c
index ffa4ccd96a10..084d0205f97d 100644
--- a/drivers/misc/mei/vsc-fw-loader.c
+++ b/drivers/misc/mei/vsc-fw-loader.c
@@ -204,7 +204,7 @@ struct vsc_img_frag {
/**
* struct vsc_fw_loader - represent vsc firmware loader
- * @dev: device used to request fimware
+ * @dev: device used to request firmware
* @tp: transport layer used with the firmware loader
* @csi: CSI image
* @ace: ACE image
@@ -252,7 +252,7 @@ static int vsc_get_sensor_name(struct vsc_fw_loader *fw_loader,
{
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER };
union acpi_object obj = {
- .type = ACPI_TYPE_INTEGER,
+ .integer.type = ACPI_TYPE_INTEGER,
.integer.value = 1,
};
struct acpi_object_list arg_list = {
diff --git a/drivers/misc/mei/vsc-tp.c b/drivers/misc/mei/vsc-tp.c
index e6a98dba8a73..1618cca9a731 100644
--- a/drivers/misc/mei/vsc-tp.c
+++ b/drivers/misc/mei/vsc-tp.c
@@ -331,12 +331,12 @@ int vsc_tp_rom_xfer(struct vsc_tp *tp, const void *obuf, void *ibuf, size_t len)
return ret;
}
- ret = vsc_tp_dev_xfer(tp, tp->tx_buf, tp->rx_buf, len);
+ ret = vsc_tp_dev_xfer(tp, tp->tx_buf, ibuf ? tp->rx_buf : NULL, len);
if (ret)
return ret;
if (ibuf)
- cpu_to_be32_array(ibuf, tp->rx_buf, words);
+ be32_to_cpu_array(ibuf, tp->rx_buf, words);
return ret;
}
@@ -568,6 +568,19 @@ static void vsc_tp_remove(struct spi_device *spi)
free_irq(spi->irq, tp);
}
+static void vsc_tp_shutdown(struct spi_device *spi)
+{
+ struct vsc_tp *tp = spi_get_drvdata(spi);
+
+ platform_device_unregister(tp->pdev);
+
+ mutex_destroy(&tp->mutex);
+
+ vsc_tp_reset(tp);
+
+ free_irq(spi->irq, tp);
+}
+
static const struct acpi_device_id vsc_tp_acpi_ids[] = {
{ "INTC1009" }, /* Raptor Lake */
{ "INTC1058" }, /* Tiger Lake */
@@ -580,6 +593,7 @@ MODULE_DEVICE_TABLE(acpi, vsc_tp_acpi_ids);
static struct spi_driver vsc_tp_driver = {
.probe = vsc_tp_probe,
.remove = vsc_tp_remove,
+ .shutdown = vsc_tp_shutdown,
.driver = {
.name = "vsc-tp",
.acpi_match_table = vsc_tp_acpi_ids,
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 367509b5b646..2c9963248fcb 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -2466,8 +2466,7 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
struct mmc_blk_data *md;
int devidx, ret;
char cap_str[10];
- bool cache_enabled = false;
- bool fua_enabled = false;
+ unsigned int features = 0;
devidx = ida_alloc_max(&mmc_blk_ida, max_devices - 1, GFP_KERNEL);
if (devidx < 0) {
@@ -2499,7 +2498,24 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
*/
md->read_only = mmc_blk_readonly(card);
- md->disk = mmc_init_queue(&md->queue, card);
+ if (mmc_host_cmd23(card->host)) {
+ if ((mmc_card_mmc(card) &&
+ card->csd.mmca_vsn >= CSD_SPEC_VER_3) ||
+ (mmc_card_sd(card) &&
+ card->scr.cmds & SD_SCR_CMD23_SUPPORT))
+ md->flags |= MMC_BLK_CMD23;
+ }
+
+ if (md->flags & MMC_BLK_CMD23 &&
+ ((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) ||
+ card->ext_csd.rel_sectors)) {
+ md->flags |= MMC_BLK_REL_WR;
+ features |= (BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA);
+ } else if (mmc_cache_enabled(card->host)) {
+ features |= BLK_FEAT_WRITE_CACHE;
+ }
+
+ md->disk = mmc_init_queue(&md->queue, card, features);
if (IS_ERR(md->disk)) {
ret = PTR_ERR(md->disk);
goto err_kfree;
@@ -2539,26 +2555,6 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
set_capacity(md->disk, size);
- if (mmc_host_cmd23(card->host)) {
- if ((mmc_card_mmc(card) &&
- card->csd.mmca_vsn >= CSD_SPEC_VER_3) ||
- (mmc_card_sd(card) &&
- card->scr.cmds & SD_SCR_CMD23_SUPPORT))
- md->flags |= MMC_BLK_CMD23;
- }
-
- if (md->flags & MMC_BLK_CMD23 &&
- ((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) ||
- card->ext_csd.rel_sectors)) {
- md->flags |= MMC_BLK_REL_WR;
- fua_enabled = true;
- cache_enabled = true;
- }
- if (mmc_cache_enabled(card->host))
- cache_enabled = true;
-
- blk_queue_write_cache(md->queue.queue, cache_enabled, fua_enabled);
-
string_get_size((u64)size, 512, STRING_UNITS_2,
cap_str, sizeof(cap_str));
pr_info("%s: %s %s %s%s\n",
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index a8c17b4cd737..d6c819dd68ed 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -2362,4 +2362,5 @@ static void __exit mmc_exit(void)
subsys_initcall(mmc_init);
module_exit(mmc_exit);
+MODULE_DESCRIPTION("MMC core driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/core/pwrseq_emmc.c b/drivers/mmc/core/pwrseq_emmc.c
index 3b6d69cefb4e..96fa4c508900 100644
--- a/drivers/mmc/core/pwrseq_emmc.c
+++ b/drivers/mmc/core/pwrseq_emmc.c
@@ -115,4 +115,5 @@ static struct platform_driver mmc_pwrseq_emmc_driver = {
};
module_platform_driver(mmc_pwrseq_emmc_driver);
+MODULE_DESCRIPTION("Hardware reset support for eMMC");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/core/pwrseq_sd8787.c b/drivers/mmc/core/pwrseq_sd8787.c
index 0c5808fc3206..f24bbd68e251 100644
--- a/drivers/mmc/core/pwrseq_sd8787.c
+++ b/drivers/mmc/core/pwrseq_sd8787.c
@@ -130,4 +130,5 @@ static struct platform_driver mmc_pwrseq_sd8787_driver = {
};
module_platform_driver(mmc_pwrseq_sd8787_driver);
+MODULE_DESCRIPTION("Power sequence support for Marvell SD8787 BT + Wifi chip");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/core/pwrseq_simple.c b/drivers/mmc/core/pwrseq_simple.c
index df9588503ad0..154a8921ae75 100644
--- a/drivers/mmc/core/pwrseq_simple.c
+++ b/drivers/mmc/core/pwrseq_simple.c
@@ -159,4 +159,5 @@ static struct platform_driver mmc_pwrseq_simple_driver = {
};
module_platform_driver(mmc_pwrseq_simple_driver);
+MODULE_DESCRIPTION("Simple power sequence management for MMC");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
index 241cdc2b2a2a..d0b3ca8a11f0 100644
--- a/drivers/mmc/core/queue.c
+++ b/drivers/mmc/core/queue.c
@@ -344,10 +344,12 @@ static const struct blk_mq_ops mmc_mq_ops = {
};
static struct gendisk *mmc_alloc_disk(struct mmc_queue *mq,
- struct mmc_card *card)
+ struct mmc_card *card, unsigned int features)
{
struct mmc_host *host = card->host;
- struct queue_limits lim = { };
+ struct queue_limits lim = {
+ .features = features,
+ };
struct gendisk *disk;
if (mmc_can_erase(card))
@@ -376,18 +378,16 @@ static struct gendisk *mmc_alloc_disk(struct mmc_queue *mq,
lim.max_segments = host->max_segs;
}
+ if (mmc_host_is_spi(host) && host->use_spi_crc)
+ lim.features |= BLK_FEAT_STABLE_WRITES;
+
disk = blk_mq_alloc_disk(&mq->tag_set, &lim, mq);
if (IS_ERR(disk))
return disk;
mq->queue = disk->queue;
- if (mmc_host_is_spi(host) && host->use_spi_crc)
- blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, mq->queue);
blk_queue_rq_timeout(mq->queue, 60 * HZ);
- blk_queue_flag_set(QUEUE_FLAG_NONROT, mq->queue);
- blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, mq->queue);
-
dma_set_max_seg_size(mmc_dev(host), queue_max_segment_size(mq->queue));
INIT_WORK(&mq->recovery_work, mmc_mq_recovery_handler);
@@ -413,10 +413,12 @@ static inline bool mmc_merge_capable(struct mmc_host *host)
* mmc_init_queue - initialise a queue structure.
* @mq: mmc queue
* @card: mmc card to attach this queue
+ * @features: block layer features (BLK_FEAT_*)
*
* Initialise a MMC card request queue.
*/
-struct gendisk *mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card)
+struct gendisk *mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
+ unsigned int features)
{
struct mmc_host *host = card->host;
struct gendisk *disk;
@@ -460,7 +462,7 @@ struct gendisk *mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card)
return ERR_PTR(ret);
- disk = mmc_alloc_disk(mq, card);
+ disk = mmc_alloc_disk(mq, card, features);
if (IS_ERR(disk))
blk_mq_free_tag_set(&mq->tag_set);
return disk;
diff --git a/drivers/mmc/core/queue.h b/drivers/mmc/core/queue.h
index 9ade3bcbb714..1498840a4ea0 100644
--- a/drivers/mmc/core/queue.h
+++ b/drivers/mmc/core/queue.h
@@ -94,7 +94,8 @@ struct mmc_queue {
struct work_struct complete_work;
};
-struct gendisk *mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card);
+struct gendisk *mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
+ unsigned int features);
extern void mmc_cleanup_queue(struct mmc_queue *);
extern void mmc_queue_suspend(struct mmc_queue *);
extern void mmc_queue_resume(struct mmc_queue *);
diff --git a/drivers/mmc/core/sdio_uart.c b/drivers/mmc/core/sdio_uart.c
index 575ebbce378e..6b7471dba3bf 100644
--- a/drivers/mmc/core/sdio_uart.c
+++ b/drivers/mmc/core/sdio_uart.c
@@ -1162,4 +1162,5 @@ module_init(sdio_uart_init);
module_exit(sdio_uart_exit);
MODULE_AUTHOR("Nicolas Pitre");
+MODULE_DESCRIPTION("SDIO UART/GPS driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index bb0d4fb0892a..eb3ecfe05591 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -1016,7 +1016,7 @@ config MMC_SDHCI_MICROCHIP_PIC32
config MMC_SDHCI_BRCMSTB
tristate "Broadcom SDIO/SD/MMC support"
- depends on ARCH_BRCMSTB || BMIPS_GENERIC || COMPILE_TEST
+ depends on ARCH_BRCMSTB || ARCH_BCM2835 || BMIPS_GENERIC || COMPILE_TEST
depends on MMC_SDHCI_PLTFM
select MMC_CQHCI
default ARCH_BRCMSTB || BMIPS_GENERIC
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index 8199d9620075..6490df54a6f5 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -33,6 +33,7 @@
#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <linux/pinctrl/consumer.h>
+#include <linux/workqueue.h>
#include <asm/cacheflush.h>
#include <asm/io.h>
@@ -272,12 +273,12 @@ struct atmel_mci_dma {
* EVENT_DATA_ERROR is pending.
* @stop_cmdr: Value to be loaded into CMDR when the stop command is
* to be sent.
- * @tasklet: Tasklet running the request state machine.
+ * @bh_work: Work running the request state machine.
* @pending_events: Bitmask of events flagged by the interrupt handler
- * to be processed by the tasklet.
+ * to be processed by the work.
* @completed_events: Bitmask of events which the state machine has
* processed.
- * @state: Tasklet state.
+ * @state: Work state.
* @queue: List of slots waiting for access to the controller.
* @need_clock_update: Update the clock rate before the next request.
* @need_reset: Reset controller before next request.
@@ -352,7 +353,7 @@ struct atmel_mci {
u32 data_status;
u32 stop_cmdr;
- struct tasklet_struct tasklet;
+ struct work_struct bh_work;
unsigned long pending_events;
unsigned long completed_events;
enum atmel_mci_state state;
@@ -735,7 +736,7 @@ static void atmci_timeout_timer(struct timer_list *t)
host->need_reset = 1;
host->state = STATE_END_REQUEST;
smp_wmb();
- tasklet_schedule(&host->tasklet);
+ queue_work(system_bh_wq, &host->bh_work);
}
static inline unsigned int atmci_ns_to_clocks(struct atmel_mci *host,
@@ -958,7 +959,7 @@ static void atmci_pdc_complete(struct atmel_mci *host)
dev_dbg(dev, "(%s) set pending xfer complete\n", __func__);
atmci_set_pending(host, EVENT_XFER_COMPLETE);
- tasklet_schedule(&host->tasklet);
+ queue_work(system_bh_wq, &host->bh_work);
}
static void atmci_dma_cleanup(struct atmel_mci *host)
@@ -972,7 +973,7 @@ static void atmci_dma_cleanup(struct atmel_mci *host)
}
/*
- * This function is called by the DMA driver from tasklet context.
+ * This function is called by the DMA driver from bh context.
*/
static void atmci_dma_complete(void *arg)
{
@@ -995,7 +996,7 @@ static void atmci_dma_complete(void *arg)
if (data) {
dev_dbg(dev, "(%s) set pending xfer complete\n", __func__);
atmci_set_pending(host, EVENT_XFER_COMPLETE);
- tasklet_schedule(&host->tasklet);
+ queue_work(system_bh_wq, &host->bh_work);
/*
* Regardless of what the documentation says, we have
@@ -1008,7 +1009,7 @@ static void atmci_dma_complete(void *arg)
* haven't seen all the potential error bits yet.
*
* The interrupt handler will schedule a different
- * tasklet to finish things up when the data transfer
+ * bh work to finish things up when the data transfer
* is completely done.
*
* We may not complete the mmc request here anyway
@@ -1745,9 +1746,9 @@ static void atmci_detect_change(struct timer_list *t)
}
}
-static void atmci_tasklet_func(struct tasklet_struct *t)
+static void atmci_work_func(struct work_struct *t)
{
- struct atmel_mci *host = from_tasklet(host, t, tasklet);
+ struct atmel_mci *host = from_work(host, t, bh_work);
struct mmc_request *mrq = host->mrq;
struct mmc_data *data = host->data;
struct device *dev = host->dev;
@@ -1759,7 +1760,7 @@ static void atmci_tasklet_func(struct tasklet_struct *t)
state = host->state;
- dev_vdbg(dev, "tasklet: state %u pending/completed/mask %lx/%lx/%x\n",
+ dev_vdbg(dev, "bh_work: state %u pending/completed/mask %lx/%lx/%x\n",
state, host->pending_events, host->completed_events,
atmci_readl(host, ATMCI_IMR));
@@ -2118,7 +2119,7 @@ static irqreturn_t atmci_interrupt(int irq, void *dev_id)
dev_dbg(dev, "set pending data error\n");
smp_wmb();
atmci_set_pending(host, EVENT_DATA_ERROR);
- tasklet_schedule(&host->tasklet);
+ queue_work(system_bh_wq, &host->bh_work);
}
if (pending & ATMCI_TXBUFE) {
@@ -2187,7 +2188,7 @@ static irqreturn_t atmci_interrupt(int irq, void *dev_id)
smp_wmb();
dev_dbg(dev, "set pending notbusy\n");
atmci_set_pending(host, EVENT_NOTBUSY);
- tasklet_schedule(&host->tasklet);
+ queue_work(system_bh_wq, &host->bh_work);
}
if (pending & ATMCI_NOTBUSY) {
@@ -2196,7 +2197,7 @@ static irqreturn_t atmci_interrupt(int irq, void *dev_id)
smp_wmb();
dev_dbg(dev, "set pending notbusy\n");
atmci_set_pending(host, EVENT_NOTBUSY);
- tasklet_schedule(&host->tasklet);
+ queue_work(system_bh_wq, &host->bh_work);
}
if (pending & ATMCI_RXRDY)
@@ -2211,7 +2212,7 @@ static irqreturn_t atmci_interrupt(int irq, void *dev_id)
smp_wmb();
dev_dbg(dev, "set pending cmd rdy\n");
atmci_set_pending(host, EVENT_CMD_RDY);
- tasklet_schedule(&host->tasklet);
+ queue_work(system_bh_wq, &host->bh_work);
}
if (pending & (ATMCI_SDIOIRQA | ATMCI_SDIOIRQB))
@@ -2487,7 +2488,7 @@ static int atmci_probe(struct platform_device *pdev)
host->mapbase = regs->start;
- tasklet_setup(&host->tasklet, atmci_tasklet_func);
+ INIT_WORK(&host->bh_work, atmci_work_func);
ret = request_irq(irq, atmci_interrupt, 0, dev_name(dev), host);
if (ret) {
diff --git a/drivers/mmc/host/au1xmmc.c b/drivers/mmc/host/au1xmmc.c
index b5a5c6a2fe8b..6e80bcb668ec 100644
--- a/drivers/mmc/host/au1xmmc.c
+++ b/drivers/mmc/host/au1xmmc.c
@@ -42,6 +42,7 @@
#include <linux/leds.h>
#include <linux/mmc/host.h>
#include <linux/slab.h>
+#include <linux/workqueue.h>
#include <asm/io.h>
#include <asm/mach-au1x00/au1000.h>
@@ -113,8 +114,8 @@ struct au1xmmc_host {
int irq;
- struct tasklet_struct finish_task;
- struct tasklet_struct data_task;
+ struct work_struct finish_bh_work;
+ struct work_struct data_bh_work;
struct au1xmmc_platform_data *platdata;
struct platform_device *pdev;
struct resource *ioarea;
@@ -253,9 +254,9 @@ static void au1xmmc_finish_request(struct au1xmmc_host *host)
mmc_request_done(host->mmc, mrq);
}
-static void au1xmmc_tasklet_finish(struct tasklet_struct *t)
+static void au1xmmc_finish_bh_work(struct work_struct *t)
{
- struct au1xmmc_host *host = from_tasklet(host, t, finish_task);
+ struct au1xmmc_host *host = from_work(host, t, finish_bh_work);
au1xmmc_finish_request(host);
}
@@ -363,9 +364,9 @@ static void au1xmmc_data_complete(struct au1xmmc_host *host, u32 status)
au1xmmc_finish_request(host);
}
-static void au1xmmc_tasklet_data(struct tasklet_struct *t)
+static void au1xmmc_data_bh_work(struct work_struct *t)
{
- struct au1xmmc_host *host = from_tasklet(host, t, data_task);
+ struct au1xmmc_host *host = from_work(host, t, data_bh_work);
u32 status = __raw_readl(HOST_STATUS(host));
au1xmmc_data_complete(host, status);
@@ -425,7 +426,7 @@ static void au1xmmc_send_pio(struct au1xmmc_host *host)
if (host->flags & HOST_F_STOP)
SEND_STOP(host);
- tasklet_schedule(&host->data_task);
+ queue_work(system_bh_wq, &host->data_bh_work);
}
}
@@ -505,7 +506,7 @@ static void au1xmmc_receive_pio(struct au1xmmc_host *host)
if (host->flags & HOST_F_STOP)
SEND_STOP(host);
- tasklet_schedule(&host->data_task);
+ queue_work(system_bh_wq, &host->data_bh_work);
}
}
@@ -561,7 +562,7 @@ static void au1xmmc_cmd_complete(struct au1xmmc_host *host, u32 status)
if (!trans || cmd->error) {
IRQ_OFF(host, SD_CONFIG_TH | SD_CONFIG_RA | SD_CONFIG_RF);
- tasklet_schedule(&host->finish_task);
+ queue_work(system_bh_wq, &host->finish_bh_work);
return;
}
@@ -797,7 +798,7 @@ static irqreturn_t au1xmmc_irq(int irq, void *dev_id)
IRQ_OFF(host, SD_CONFIG_NE | SD_CONFIG_TH);
/* IRQ_OFF(host, SD_CONFIG_TH | SD_CONFIG_RA | SD_CONFIG_RF); */
- tasklet_schedule(&host->finish_task);
+ queue_work(system_bh_wq, &host->finish_bh_work);
}
#if 0
else if (status & SD_STATUS_DD) {
@@ -806,7 +807,7 @@ static irqreturn_t au1xmmc_irq(int irq, void *dev_id)
au1xmmc_receive_pio(host);
else {
au1xmmc_data_complete(host, status);
- /* tasklet_schedule(&host->data_task); */
+ /* queue_work(system_bh_wq, &host->data_bh_work); */
}
}
#endif
@@ -854,7 +855,7 @@ static void au1xmmc_dbdma_callback(int irq, void *dev_id)
if (host->flags & HOST_F_STOP)
SEND_STOP(host);
- tasklet_schedule(&host->data_task);
+ queue_work(system_bh_wq, &host->data_bh_work);
}
static int au1xmmc_dbdma_init(struct au1xmmc_host *host)
@@ -1039,9 +1040,9 @@ static int au1xmmc_probe(struct platform_device *pdev)
if (host->platdata)
mmc->caps &= ~(host->platdata->mask_host_caps);
- tasklet_setup(&host->data_task, au1xmmc_tasklet_data);
+ INIT_WORK(&host->data_bh_work, au1xmmc_data_bh_work);
- tasklet_setup(&host->finish_task, au1xmmc_tasklet_finish);
+ INIT_WORK(&host->finish_bh_work, au1xmmc_finish_bh_work);
if (has_dbdma()) {
ret = au1xmmc_dbdma_init(host);
@@ -1091,8 +1092,8 @@ out5:
if (host->flags & HOST_F_DBDMA)
au1xmmc_dbdma_shutdown(host);
- tasklet_kill(&host->data_task);
- tasklet_kill(&host->finish_task);
+ cancel_work_sync(&host->data_bh_work);
+ cancel_work_sync(&host->finish_bh_work);
if (host->platdata && host->platdata->cd_setup &&
!(mmc->caps & MMC_CAP_NEEDS_POLL))
@@ -1135,8 +1136,8 @@ static void au1xmmc_remove(struct platform_device *pdev)
__raw_writel(0, HOST_CONFIG2(host));
wmb(); /* drain writebuffer */
- tasklet_kill(&host->data_task);
- tasklet_kill(&host->finish_task);
+ cancel_work_sync(&host->data_bh_work);
+ cancel_work_sync(&host->finish_bh_work);
if (host->flags & HOST_F_DBDMA)
au1xmmc_dbdma_shutdown(host);
diff --git a/drivers/mmc/host/cb710-mmc.c b/drivers/mmc/host/cb710-mmc.c
index 0aec33b88bef..902f7f20abaa 100644
--- a/drivers/mmc/host/cb710-mmc.c
+++ b/drivers/mmc/host/cb710-mmc.c
@@ -493,7 +493,7 @@ static void cb710_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
if (!cb710_mmc_command(mmc, mrq->cmd) && mrq->stop)
cb710_mmc_command(mmc, mrq->stop);
- tasklet_schedule(&reader->finish_req_tasklet);
+ queue_work(system_bh_wq, &reader->finish_req_bh_work);
}
static int cb710_mmc_powerup(struct cb710_slot *slot)
@@ -646,10 +646,10 @@ static int cb710_mmc_irq_handler(struct cb710_slot *slot)
return 1;
}
-static void cb710_mmc_finish_request_tasklet(struct tasklet_struct *t)
+static void cb710_mmc_finish_request_bh_work(struct work_struct *t)
{
- struct cb710_mmc_reader *reader = from_tasklet(reader, t,
- finish_req_tasklet);
+ struct cb710_mmc_reader *reader = from_work(reader, t,
+ finish_req_bh_work);
struct mmc_request *mrq = reader->mrq;
reader->mrq = NULL;
@@ -718,8 +718,8 @@ static int cb710_mmc_init(struct platform_device *pdev)
reader = mmc_priv(mmc);
- tasklet_setup(&reader->finish_req_tasklet,
- cb710_mmc_finish_request_tasklet);
+ INIT_WORK(&reader->finish_req_bh_work,
+ cb710_mmc_finish_request_bh_work);
spin_lock_init(&reader->irq_lock);
cb710_dump_regs(chip, CB710_DUMP_REGS_MMC);
@@ -763,7 +763,7 @@ static void cb710_mmc_exit(struct platform_device *pdev)
cb710_write_port_32(slot, CB710_MMC_CONFIG_PORT, 0);
cb710_write_port_16(slot, CB710_MMC_CONFIGB_PORT, 0);
- tasklet_kill(&reader->finish_req_tasklet);
+ cancel_work_sync(&reader->finish_req_bh_work);
mmc_free_host(mmc);
}
diff --git a/drivers/mmc/host/cb710-mmc.h b/drivers/mmc/host/cb710-mmc.h
index 5e053077dbed..59abaccaad10 100644
--- a/drivers/mmc/host/cb710-mmc.h
+++ b/drivers/mmc/host/cb710-mmc.h
@@ -8,10 +8,11 @@
#define LINUX_CB710_MMC_H
#include <linux/cb710.h>
+#include <linux/workqueue.h>
/* per-MMC-reader structure */
struct cb710_mmc_reader {
- struct tasklet_struct finish_req_tasklet;
+ struct work_struct finish_req_bh_work;
struct mmc_request *mrq;
spinlock_t irq_lock;
unsigned char last_power_mode;
diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
index d7427894e0bc..9cbde800685d 100644
--- a/drivers/mmc/host/davinci_mmc.c
+++ b/drivers/mmc/host/davinci_mmc.c
@@ -224,6 +224,9 @@ static void davinci_fifo_data_trans(struct mmc_davinci_host *host,
}
p = sgm->addr;
+ if (n > sgm->length)
+ n = sgm->length;
+
/* NOTE: we never transfer more than rw_threshold bytes
* to/from the fifo here; there's no I/O overlap.
* This also assumes that access width( i.e. ACCWD) is 4 bytes
@@ -1184,7 +1187,7 @@ static int davinci_mmcsd_probe(struct platform_device *pdev)
struct mmc_davinci_host *host = NULL;
struct mmc_host *mmc = NULL;
struct resource *r, *mem = NULL;
- int ret, irq;
+ int ret, irq, bus_width;
size_t mem_size;
const struct platform_device_id *id_entry;
@@ -1314,9 +1317,14 @@ static int davinci_mmcsd_probe(struct platform_device *pdev)
rename_region(mem, mmc_hostname(mmc));
+ if (mmc->caps & MMC_CAP_8_BIT_DATA)
+ bus_width = 8;
+ else if (mmc->caps & MMC_CAP_4_BIT_DATA)
+ bus_width = 4;
+ else
+ bus_width = 1;
dev_info(mmc_dev(host->mmc), "Using %s, %d-bit mode\n",
- host->use_dma ? "DMA" : "PIO",
- (mmc->caps & MMC_CAP_4_BIT_DATA) ? 4 : 1);
+ host->use_dma ? "DMA" : "PIO", bus_width);
return 0;
diff --git a/drivers/mmc/host/dw_mmc-bluefield.c b/drivers/mmc/host/dw_mmc-bluefield.c
index 4747e5698f48..24e0b604b405 100644
--- a/drivers/mmc/host/dw_mmc-bluefield.c
+++ b/drivers/mmc/host/dw_mmc-bluefield.c
@@ -3,6 +3,7 @@
* Copyright (C) 2018 Mellanox Technologies.
*/
+#include <linux/arm-smccc.h>
#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/mmc/host.h>
@@ -20,6 +21,9 @@
#define BLUEFIELD_UHS_REG_EXT_SAMPLE 2
#define BLUEFIELD_UHS_REG_EXT_DRIVE 4
+/* SMC call for RST_N */
+#define BLUEFIELD_SMC_SET_EMMC_RST_N 0x82000007
+
static void dw_mci_bluefield_set_ios(struct dw_mci *host, struct mmc_ios *ios)
{
u32 reg;
@@ -34,8 +38,20 @@ static void dw_mci_bluefield_set_ios(struct dw_mci *host, struct mmc_ios *ios)
mci_writel(host, UHS_REG_EXT, reg);
}
+static void dw_mci_bluefield_hw_reset(struct dw_mci *host)
+{
+ struct arm_smccc_res res = { 0 };
+
+ arm_smccc_smc(BLUEFIELD_SMC_SET_EMMC_RST_N, 0, 0, 0, 0, 0, 0, 0,
+ &res);
+
+ if (res.a0)
+ pr_err("RST_N failed.\n");
+}
+
static const struct dw_mci_drv_data bluefield_drv_data = {
- .set_ios = dw_mci_bluefield_set_ios
+ .set_ios = dw_mci_bluefield_set_ios,
+ .hw_reset = dw_mci_bluefield_hw_reset
};
static const struct of_device_id dw_mci_bluefield_match[] = {
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 8e2d676b9239..2333ef4893ee 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -493,7 +493,7 @@ static void dw_mci_dmac_complete_dma(void *arg)
*/
if (data) {
set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
- tasklet_schedule(&host->tasklet);
+ queue_work(system_bh_wq, &host->bh_work);
}
}
@@ -1617,6 +1617,7 @@ static void dw_mci_hw_reset(struct mmc_host *mmc)
{
struct dw_mci_slot *slot = mmc_priv(mmc);
struct dw_mci *host = slot->host;
+ const struct dw_mci_drv_data *drv_data = host->drv_data;
int reset;
if (host->use_dma == TRANS_MODE_IDMAC)
@@ -1626,6 +1627,11 @@ static void dw_mci_hw_reset(struct mmc_host *mmc)
SDMMC_CTRL_FIFO_RESET))
return;
+ if (drv_data && drv_data->hw_reset) {
+ drv_data->hw_reset(host);
+ return;
+ }
+
/*
* According to eMMC spec, card reset procedure:
* tRstW >= 1us: RST_n pulse width
@@ -1834,7 +1840,7 @@ static enum hrtimer_restart dw_mci_fault_timer(struct hrtimer *t)
if (!host->data_status) {
host->data_status = SDMMC_INT_DCRC;
set_bit(EVENT_DATA_ERROR, &host->pending_events);
- tasklet_schedule(&host->tasklet);
+ queue_work(system_bh_wq, &host->bh_work);
}
spin_unlock_irqrestore(&host->irq_lock, flags);
@@ -2056,9 +2062,9 @@ static bool dw_mci_clear_pending_data_complete(struct dw_mci *host)
return true;
}
-static void dw_mci_tasklet_func(struct tasklet_struct *t)
+static void dw_mci_work_func(struct work_struct *t)
{
- struct dw_mci *host = from_tasklet(host, t, tasklet);
+ struct dw_mci *host = from_work(host, t, bh_work);
struct mmc_data *data;
struct mmc_command *cmd;
struct mmc_request *mrq;
@@ -2113,7 +2119,7 @@ static void dw_mci_tasklet_func(struct tasklet_struct *t)
* will waste a bit of time (we already know
* the command was bad), it can't cause any
* errors since it's possible it would have
- * taken place anyway if this tasklet got
+ * taken place anyway if this bh work got
* delayed. Allowing the transfer to take place
* avoids races and keeps things simple.
*/
@@ -2706,7 +2712,7 @@ static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
smp_wmb(); /* drain writebuffer */
set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
- tasklet_schedule(&host->tasklet);
+ queue_work(system_bh_wq, &host->bh_work);
dw_mci_start_fault_timer(host);
}
@@ -2774,7 +2780,7 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
set_bit(EVENT_DATA_COMPLETE,
&host->pending_events);
- tasklet_schedule(&host->tasklet);
+ queue_work(system_bh_wq, &host->bh_work);
spin_unlock(&host->irq_lock);
}
@@ -2793,7 +2799,7 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
dw_mci_read_data_pio(host, true);
}
set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
- tasklet_schedule(&host->tasklet);
+ queue_work(system_bh_wq, &host->bh_work);
spin_unlock(&host->irq_lock);
}
@@ -3098,7 +3104,7 @@ static void dw_mci_cmd11_timer(struct timer_list *t)
host->cmd_status = SDMMC_INT_RTO;
set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
- tasklet_schedule(&host->tasklet);
+ queue_work(system_bh_wq, &host->bh_work);
}
static void dw_mci_cto_timer(struct timer_list *t)
@@ -3144,7 +3150,7 @@ static void dw_mci_cto_timer(struct timer_list *t)
*/
host->cmd_status = SDMMC_INT_RTO;
set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
- tasklet_schedule(&host->tasklet);
+ queue_work(system_bh_wq, &host->bh_work);
break;
default:
dev_warn(host->dev, "Unexpected command timeout, state %d\n",
@@ -3195,7 +3201,7 @@ static void dw_mci_dto_timer(struct timer_list *t)
host->data_status = SDMMC_INT_DRTO;
set_bit(EVENT_DATA_ERROR, &host->pending_events);
set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
- tasklet_schedule(&host->tasklet);
+ queue_work(system_bh_wq, &host->bh_work);
break;
default:
dev_warn(host->dev, "Unexpected data timeout, state %d\n",
@@ -3435,7 +3441,7 @@ int dw_mci_probe(struct dw_mci *host)
else
host->fifo_reg = host->regs + DATA_240A_OFFSET;
- tasklet_setup(&host->tasklet, dw_mci_tasklet_func);
+ INIT_WORK(&host->bh_work, dw_mci_work_func);
ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
host->irq_flags, "dw-mci", host);
if (ret)
diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
index 4ed81f94f7ca..6447b916990d 100644
--- a/drivers/mmc/host/dw_mmc.h
+++ b/drivers/mmc/host/dw_mmc.h
@@ -17,6 +17,7 @@
#include <linux/fault-inject.h>
#include <linux/hrtimer.h>
#include <linux/interrupt.h>
+#include <linux/workqueue.h>
enum dw_mci_state {
STATE_IDLE = 0,
@@ -89,12 +90,12 @@ struct dw_mci_dma_slave {
* @stop_cmdr: Value to be loaded into CMDR when the stop command is
* to be sent.
* @dir_status: Direction of current transfer.
- * @tasklet: Tasklet running the request state machine.
+ * @bh_work: Work running the request state machine.
* @pending_events: Bitmask of events flagged by the interrupt handler
- * to be processed by the tasklet.
+ * to be processed by bh work.
* @completed_events: Bitmask of events which the state machine has
* processed.
- * @state: Tasklet state.
+ * @state: BH work state.
* @queue: List of slots waiting for access to the controller.
* @bus_hz: The rate of @mck in Hz. This forms the basis for MMC bus
* rate and timeout calculations.
@@ -194,7 +195,7 @@ struct dw_mci {
u32 data_status;
u32 stop_cmdr;
u32 dir_status;
- struct tasklet_struct tasklet;
+ struct work_struct bh_work;
unsigned long pending_events;
unsigned long completed_events;
enum dw_mci_state state;
@@ -565,6 +566,7 @@ struct dw_mci_slot {
* @execute_tuning: implementation specific tuning procedure.
* @set_data_timeout: implementation specific timeout.
* @get_drto_clks: implementation specific cycle count for data read timeout.
+ * @hw_reset: implementation specific HW reset.
*
* Provide controller implementation specific extensions. The usage of this
* data structure is fully optional and usage of each member in this structure
@@ -585,5 +587,6 @@ struct dw_mci_drv_data {
void (*set_data_timeout)(struct dw_mci *host,
unsigned int timeout_ns);
u32 (*get_drto_clks)(struct dw_mci *host);
+ void (*hw_reset)(struct dw_mci *host);
};
#endif /* _DW_MMC_H_ */
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index 09d7a6a0dc1a..c9caa1ece7ef 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -1208,7 +1208,10 @@ static int mmc_spi_probe(struct spi_device *spi)
* that's the only reason not to use a few MHz for f_min (until
* the upper layer reads the target frequency from the CSD).
*/
- mmc->f_min = 400000;
+ if (spi->controller->min_speed_hz > 400000)
+ dev_warn(&spi->dev,"Controller unable to reduce bus clock to 400 KHz\n");
+
+ mmc->f_min = max(spi->controller->min_speed_hz, 400000);
mmc->f_max = spi->max_speed_hz;
host = mmc_priv(mmc);
diff --git a/drivers/mmc/host/moxart-mmc.c b/drivers/mmc/host/moxart-mmc.c
index 9a5f75163aca..8ede4ce93271 100644
--- a/drivers/mmc/host/moxart-mmc.c
+++ b/drivers/mmc/host/moxart-mmc.c
@@ -131,10 +131,12 @@ struct moxart_host {
struct dma_async_tx_descriptor *tx_desc;
struct mmc_host *mmc;
struct mmc_request *mrq;
+ struct scatterlist *cur_sg;
struct completion dma_complete;
struct completion pio_complete;
- struct sg_mapping_iter sg_miter;
+ u32 num_sg;
+ u32 data_remain;
u32 data_len;
u32 fifo_width;
u32 timeout;
@@ -146,6 +148,35 @@ struct moxart_host {
bool is_removed;
};
+static inline void moxart_init_sg(struct moxart_host *host,
+ struct mmc_data *data)
+{
+ host->cur_sg = data->sg;
+ host->num_sg = data->sg_len;
+ host->data_remain = host->cur_sg->length;
+
+ if (host->data_remain > host->data_len)
+ host->data_remain = host->data_len;
+}
+
+static inline int moxart_next_sg(struct moxart_host *host)
+{
+ int remain;
+ struct mmc_data *data = host->mrq->cmd->data;
+
+ host->cur_sg++;
+ host->num_sg--;
+
+ if (host->num_sg > 0) {
+ host->data_remain = host->cur_sg->length;
+ remain = host->data_len - data->bytes_xfered;
+ if (remain > 0 && remain < host->data_remain)
+ host->data_remain = remain;
+ }
+
+ return host->num_sg;
+}
+
static int moxart_wait_for_status(struct moxart_host *host,
u32 mask, u32 *status)
{
@@ -278,29 +309,14 @@ static void moxart_transfer_dma(struct mmc_data *data, struct moxart_host *host)
static void moxart_transfer_pio(struct moxart_host *host)
{
- struct sg_mapping_iter *sgm = &host->sg_miter;
struct mmc_data *data = host->mrq->cmd->data;
u32 *sgp, len = 0, remain, status;
if (host->data_len == data->bytes_xfered)
return;
- /*
- * By updating sgm->consumes this will get a proper pointer into the
- * buffer at any time.
- */
- if (!sg_miter_next(sgm)) {
- /* This shold not happen */
- dev_err(mmc_dev(host->mmc), "ran out of scatterlist prematurely\n");
- data->error = -EINVAL;
- complete(&host->pio_complete);
- return;
- }
- sgp = sgm->addr;
- remain = sgm->length;
- if (remain > host->data_len)
- remain = host->data_len;
- sgm->consumed = 0;
+ sgp = sg_virt(host->cur_sg);
+ remain = host->data_remain;
if (data->flags & MMC_DATA_WRITE) {
while (remain > 0) {
@@ -315,7 +331,6 @@ static void moxart_transfer_pio(struct moxart_host *host)
sgp++;
len += 4;
}
- sgm->consumed += len;
remain -= len;
}
@@ -332,22 +347,22 @@ static void moxart_transfer_pio(struct moxart_host *host)
sgp++;
len += 4;
}
- sgm->consumed += len;
remain -= len;
}
}
- data->bytes_xfered += sgm->consumed;
- if (host->data_len == data->bytes_xfered) {
+ data->bytes_xfered += host->data_remain - remain;
+ host->data_remain = remain;
+
+ if (host->data_len != data->bytes_xfered)
+ moxart_next_sg(host);
+ else
complete(&host->pio_complete);
- return;
- }
}
static void moxart_prepare_data(struct moxart_host *host)
{
struct mmc_data *data = host->mrq->cmd->data;
- unsigned int flags = SG_MITER_ATOMIC; /* Used from IRQ */
u32 datactrl;
int blksz_bits;
@@ -358,19 +373,15 @@ static void moxart_prepare_data(struct moxart_host *host)
blksz_bits = ffs(data->blksz) - 1;
BUG_ON(1 << blksz_bits != data->blksz);
+ moxart_init_sg(host, data);
+
datactrl = DCR_DATA_EN | (blksz_bits & DCR_BLK_SIZE);
- if (data->flags & MMC_DATA_WRITE) {
- flags |= SG_MITER_FROM_SG;
+ if (data->flags & MMC_DATA_WRITE)
datactrl |= DCR_DATA_WRITE;
- } else {
- flags |= SG_MITER_TO_SG;
- }
if (moxart_use_dma(host))
datactrl |= DCR_DMA_EN;
- else
- sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
writel(DCR_DATA_FIFO_RESET, host->base + REG_DATA_CONTROL);
writel(MASK_DATA | FIFO_URUN | FIFO_ORUN, host->base + REG_CLEAR);
@@ -443,9 +454,6 @@ static void moxart_request(struct mmc_host *mmc, struct mmc_request *mrq)
}
request_done:
- if (!moxart_use_dma(host))
- sg_miter_stop(&host->sg_miter);
-
spin_unlock_irqrestore(&host->lock, flags);
mmc_request_done(host->mmc, mrq);
}
diff --git a/drivers/mmc/host/of_mmc_spi.c b/drivers/mmc/host/of_mmc_spi.c
index bf54776fb26c..05939f30a5ae 100644
--- a/drivers/mmc/host/of_mmc_spi.c
+++ b/drivers/mmc/host/of_mmc_spi.c
@@ -19,6 +19,7 @@
#include <linux/mmc/core.h>
#include <linux/mmc/host.h>
+MODULE_DESCRIPTION("OpenFirmware bindings for the MMC-over-SPI driver");
MODULE_LICENSE("GPL");
struct of_mmc_spi {
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index a8ee0df47148..335350a4e99a 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -28,6 +28,7 @@
#include <linux/slab.h>
#include <linux/gpio/consumer.h>
#include <linux/platform_data/mmc-omap.h>
+#include <linux/workqueue.h>
#define OMAP_MMC_REG_CMD 0x00
@@ -105,7 +106,7 @@ struct mmc_omap_slot {
u16 power_mode;
unsigned int fclk_freq;
- struct tasklet_struct cover_tasklet;
+ struct work_struct cover_bh_work;
struct timer_list cover_timer;
unsigned cover_open;
@@ -873,18 +874,18 @@ void omap_mmc_notify_cover_event(struct device *dev, int num, int is_closed)
sysfs_notify(&slot->mmc->class_dev.kobj, NULL, "cover_switch");
}
- tasklet_hi_schedule(&slot->cover_tasklet);
+ queue_work(system_bh_highpri_wq, &slot->cover_bh_work);
}
static void mmc_omap_cover_timer(struct timer_list *t)
{
struct mmc_omap_slot *slot = from_timer(slot, t, cover_timer);
- tasklet_schedule(&slot->cover_tasklet);
+ queue_work(system_bh_wq, &slot->cover_bh_work);
}
-static void mmc_omap_cover_handler(struct tasklet_struct *t)
+static void mmc_omap_cover_bh_handler(struct work_struct *t)
{
- struct mmc_omap_slot *slot = from_tasklet(slot, t, cover_tasklet);
+ struct mmc_omap_slot *slot = from_work(slot, t, cover_bh_work);
int cover_open = mmc_omap_cover_is_open(slot);
mmc_detect_change(slot->mmc, 0);
@@ -1314,7 +1315,7 @@ static int mmc_omap_new_slot(struct mmc_omap_host *host, int id)
if (slot->pdata->get_cover_state != NULL) {
timer_setup(&slot->cover_timer, mmc_omap_cover_timer, 0);
- tasklet_setup(&slot->cover_tasklet, mmc_omap_cover_handler);
+ INIT_WORK(&slot->cover_bh_work, mmc_omap_cover_bh_handler);
}
r = mmc_add_host(mmc);
@@ -1333,7 +1334,7 @@ static int mmc_omap_new_slot(struct mmc_omap_host *host, int id)
&dev_attr_cover_switch);
if (r < 0)
goto err_remove_slot_name;
- tasklet_schedule(&slot->cover_tasklet);
+ queue_work(system_bh_wq, &slot->cover_bh_work);
}
return 0;
@@ -1356,7 +1357,7 @@ static void mmc_omap_remove_slot(struct mmc_omap_slot *slot)
if (slot->pdata->get_cover_state != NULL)
device_remove_file(&mmc->class_dev, &dev_attr_cover_switch);
- tasklet_kill(&slot->cover_tasklet);
+ cancel_work_sync(&slot->cover_bh_work);
del_timer_sync(&slot->cover_timer);
flush_workqueue(slot->host->mmc_omap_wq);
diff --git a/drivers/mmc/host/renesas_sdhi.h b/drivers/mmc/host/renesas_sdhi.h
index 586f94d4dbfd..f12a87442338 100644
--- a/drivers/mmc/host/renesas_sdhi.h
+++ b/drivers/mmc/host/renesas_sdhi.h
@@ -11,6 +11,7 @@
#include <linux/dmaengine.h>
#include <linux/platform_device.h>
+#include <linux/workqueue.h>
#include "tmio_mmc.h"
struct renesas_sdhi_scc {
@@ -67,7 +68,7 @@ struct renesas_sdhi_dma {
dma_filter_fn filter;
void (*enable)(struct tmio_mmc_host *host, bool enable);
struct completion dma_dataend;
- struct tasklet_struct dma_complete;
+ struct work_struct dma_complete;
};
struct renesas_sdhi {
@@ -93,6 +94,7 @@ struct renesas_sdhi {
unsigned int tap_set;
struct reset_control *rstc;
+ struct tmio_mmc_host *host;
};
#define host_to_priv(host) \
diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c
index 12f4faaaf4ee..f73b84bae0c4 100644
--- a/drivers/mmc/host/renesas_sdhi_core.c
+++ b/drivers/mmc/host/renesas_sdhi_core.c
@@ -22,13 +22,13 @@
#include <linux/delay.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
-#include <linux/mfd/tmio.h>
#include <linux/mmc/host.h>
#include <linux/mmc/mmc.h>
#include <linux/mmc/slot-gpio.h>
#include <linux/module.h>
#include <linux/pinctrl/consumer.h>
#include <linux/pinctrl/pinctrl-state.h>
+#include <linux/platform_data/tmio.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/regulator/consumer.h>
@@ -970,6 +970,8 @@ int renesas_sdhi_probe(struct platform_device *pdev,
if (IS_ERR(host))
return PTR_ERR(host);
+ priv->host = host;
+
if (of_data) {
mmc_data->flags |= of_data->tmio_flags;
mmc_data->ocr_mask = of_data->tmio_ocr_mask;
@@ -1162,4 +1164,5 @@ void renesas_sdhi_remove(struct platform_device *pdev)
}
EXPORT_SYMBOL_GPL(renesas_sdhi_remove);
+MODULE_DESCRIPTION("Renesas SDHI core driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
index 422fa63a2e99..caf1d2e23343 100644
--- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c
+++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
@@ -11,13 +11,14 @@
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/io-64-nonatomic-hi-lo.h>
-#include <linux/mfd/tmio.h>
#include <linux/mmc/host.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/platform_device.h>
#include <linux/pagemap.h>
+#include <linux/platform_data/tmio.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/scatterlist.h>
#include <linux/sys_soc.h>
@@ -337,7 +338,7 @@ static bool renesas_sdhi_internal_dmac_dma_irq(struct tmio_mmc_host *host)
writel(status ^ dma_irqs, host->ctl + DM_CM_INFO1);
set_bit(SDHI_DMA_END_FLAG_DMA, &dma_priv->end_flags);
if (test_bit(SDHI_DMA_END_FLAG_ACCESS, &dma_priv->end_flags))
- tasklet_schedule(&dma_priv->dma_complete);
+ queue_work(system_bh_wq, &dma_priv->dma_complete);
}
return status & dma_irqs;
@@ -352,7 +353,7 @@ renesas_sdhi_internal_dmac_dataend_dma(struct tmio_mmc_host *host)
set_bit(SDHI_DMA_END_FLAG_ACCESS, &dma_priv->end_flags);
if (test_bit(SDHI_DMA_END_FLAG_DMA, &dma_priv->end_flags) ||
host->data->error)
- tasklet_schedule(&dma_priv->dma_complete);
+ queue_work(system_bh_wq, &dma_priv->dma_complete);
}
/*
@@ -440,9 +441,9 @@ force_pio:
renesas_sdhi_internal_dmac_enable_dma(host, false);
}
-static void renesas_sdhi_internal_dmac_issue_tasklet_fn(unsigned long arg)
+static void renesas_sdhi_internal_dmac_issue_work_fn(struct work_struct *work)
{
- struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg;
+ struct tmio_mmc_host *host = from_work(host, work, dma_issue);
struct renesas_sdhi *priv = host_to_priv(host);
tmio_mmc_enable_mmc_irqs(host, TMIO_STAT_DATAEND);
@@ -454,7 +455,7 @@ static void renesas_sdhi_internal_dmac_issue_tasklet_fn(unsigned long arg)
/* on CMD errors, simulate DMA end immediately */
set_bit(SDHI_DMA_END_FLAG_DMA, &priv->dma_priv.end_flags);
if (test_bit(SDHI_DMA_END_FLAG_ACCESS, &priv->dma_priv.end_flags))
- tasklet_schedule(&priv->dma_priv.dma_complete);
+ queue_work(system_bh_wq, &priv->dma_priv.dma_complete);
}
}
@@ -484,9 +485,11 @@ static bool renesas_sdhi_internal_dmac_complete(struct tmio_mmc_host *host)
return true;
}
-static void renesas_sdhi_internal_dmac_complete_tasklet_fn(unsigned long arg)
+static void renesas_sdhi_internal_dmac_complete_work_fn(struct work_struct *work)
{
- struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg;
+ struct renesas_sdhi_dma *dma_priv = from_work(dma_priv, work, dma_complete);
+ struct renesas_sdhi *priv = container_of(dma_priv, typeof(*priv), dma_priv);
+ struct tmio_mmc_host *host = priv->host;
spin_lock_irq(&host->lock);
if (!renesas_sdhi_internal_dmac_complete(host))
@@ -544,12 +547,10 @@ renesas_sdhi_internal_dmac_request_dma(struct tmio_mmc_host *host,
/* Each value is set to non-zero to assume "enabling" each DMA */
host->chan_rx = host->chan_tx = (void *)0xdeadbeaf;
- tasklet_init(&priv->dma_priv.dma_complete,
- renesas_sdhi_internal_dmac_complete_tasklet_fn,
- (unsigned long)host);
- tasklet_init(&host->dma_issue,
- renesas_sdhi_internal_dmac_issue_tasklet_fn,
- (unsigned long)host);
+ INIT_WORK(&priv->dma_priv.dma_complete,
+ renesas_sdhi_internal_dmac_complete_work_fn);
+ INIT_WORK(&host->dma_issue,
+ renesas_sdhi_internal_dmac_issue_work_fn);
/* Add pre_req and post_req */
host->ops.pre_req = renesas_sdhi_internal_dmac_pre_req;
diff --git a/drivers/mmc/host/renesas_sdhi_sys_dmac.c b/drivers/mmc/host/renesas_sdhi_sys_dmac.c
index 9cf7f9feab72..0ba3f62a9b49 100644
--- a/drivers/mmc/host/renesas_sdhi_sys_dmac.c
+++ b/drivers/mmc/host/renesas_sdhi_sys_dmac.c
@@ -11,13 +11,14 @@
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
-#include <linux/mfd/tmio.h>
#include <linux/mmc/host.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/platform_device.h>
#include <linux/pagemap.h>
+#include <linux/platform_data/tmio.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/scatterlist.h>
#include <linux/sys_soc.h>
@@ -312,9 +313,9 @@ static void renesas_sdhi_sys_dmac_start_dma(struct tmio_mmc_host *host,
}
}
-static void renesas_sdhi_sys_dmac_issue_tasklet_fn(unsigned long priv)
+static void renesas_sdhi_sys_dmac_issue_work_fn(struct work_struct *work)
{
- struct tmio_mmc_host *host = (struct tmio_mmc_host *)priv;
+ struct tmio_mmc_host *host = from_work(host, work, dma_issue);
struct dma_chan *chan = NULL;
spin_lock_irq(&host->lock);
@@ -401,9 +402,8 @@ static void renesas_sdhi_sys_dmac_request_dma(struct tmio_mmc_host *host,
goto ebouncebuf;
init_completion(&priv->dma_priv.dma_dataend);
- tasklet_init(&host->dma_issue,
- renesas_sdhi_sys_dmac_issue_tasklet_fn,
- (unsigned long)host);
+ INIT_WORK(&host->dma_issue,
+ renesas_sdhi_sys_dmac_issue_work_fn);
}
renesas_sdhi_sys_dmac_enable_dma(host, true);
diff --git a/drivers/mmc/host/sdhci-bcm-kona.c b/drivers/mmc/host/sdhci-bcm-kona.c
index cb9152c6a65d..e067c7f5c537 100644
--- a/drivers/mmc/host/sdhci-bcm-kona.c
+++ b/drivers/mmc/host/sdhci-bcm-kona.c
@@ -107,7 +107,7 @@ static void sdhci_bcm_kona_sd_init(struct sdhci_host *host)
* Software emulation of the SD card insertion/removal. Set insert=1 for insert
* and insert=0 for removal. The card detection is done by GPIO. For Broadcom
* IP to function properly the bit 0 of CORESTAT register needs to be set/reset
- * to generate the CD IRQ handled in sdhci.c which schedules card_tasklet.
+* to generate the CD IRQ handled in sdhci.c
*/
static int sdhci_bcm_kona_sd_card_emulate(struct sdhci_host *host, int insert)
{
diff --git a/drivers/mmc/host/sdhci-brcmstb.c b/drivers/mmc/host/sdhci-brcmstb.c
index 9053526fa212..031a4b514d16 100644
--- a/drivers/mmc/host/sdhci-brcmstb.c
+++ b/drivers/mmc/host/sdhci-brcmstb.c
@@ -24,12 +24,28 @@
#define BRCMSTB_MATCH_FLAGS_NO_64BIT BIT(0)
#define BRCMSTB_MATCH_FLAGS_BROKEN_TIMEOUT BIT(1)
#define BRCMSTB_MATCH_FLAGS_HAS_CLOCK_GATE BIT(2)
+#define BRCMSTB_MATCH_FLAGS_USE_CARD_BUSY BIT(4)
#define BRCMSTB_PRIV_FLAGS_HAS_CQE BIT(0)
#define BRCMSTB_PRIV_FLAGS_GATE_CLOCK BIT(1)
#define SDHCI_ARASAN_CQE_BASE_ADDR 0x200
+#define SDIO_CFG_CQ_CAPABILITY 0x4c
+#define SDIO_CFG_CQ_CAPABILITY_FMUL GENMASK(13, 12)
+
+#define SDIO_CFG_CTRL 0x0
+#define SDIO_CFG_CTRL_SDCD_N_TEST_EN BIT(31)
+#define SDIO_CFG_CTRL_SDCD_N_TEST_LEV BIT(30)
+
+#define SDIO_CFG_MAX_50MHZ_MODE 0x1ac
+#define SDIO_CFG_MAX_50MHZ_MODE_STRAP_OVERRIDE BIT(31)
+#define SDIO_CFG_MAX_50MHZ_MODE_ENABLE BIT(0)
+
+#define MMC_CAP_HSE_MASK (MMC_CAP2_HSX00_1_2V | MMC_CAP2_HSX00_1_8V)
+/* Select all SD UHS type I SDR speed above 50MB/s */
+#define MMC_CAP_UHS_I_SDR_MASK (MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104)
+
struct sdhci_brcmstb_priv {
void __iomem *cfg_regs;
unsigned int flags;
@@ -38,6 +54,7 @@ struct sdhci_brcmstb_priv {
};
struct brcmstb_match_priv {
+ void (*cfginit)(struct sdhci_host *host);
void (*hs400es)(struct mmc_host *mmc, struct mmc_ios *ios);
struct sdhci_ops *ops;
const unsigned int flags;
@@ -168,6 +185,33 @@ static void sdhci_brcmstb_set_uhs_signaling(struct sdhci_host *host,
sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
}
+static void sdhci_brcmstb_cfginit_2712(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_brcmstb_priv *brcmstb_priv = sdhci_pltfm_priv(pltfm_host);
+ u32 reg;
+
+ /*
+ * If we support a speed that requires tuning,
+ * then select the delay line PHY as the clock source.
+ */
+ if ((host->mmc->caps & MMC_CAP_UHS_I_SDR_MASK) || (host->mmc->caps2 & MMC_CAP_HSE_MASK)) {
+ reg = readl(brcmstb_priv->cfg_regs + SDIO_CFG_MAX_50MHZ_MODE);
+ reg &= ~SDIO_CFG_MAX_50MHZ_MODE_ENABLE;
+ reg |= SDIO_CFG_MAX_50MHZ_MODE_STRAP_OVERRIDE;
+ writel(reg, brcmstb_priv->cfg_regs + SDIO_CFG_MAX_50MHZ_MODE);
+ }
+
+ if ((host->mmc->caps & MMC_CAP_NONREMOVABLE) ||
+ (host->mmc->caps & MMC_CAP_NEEDS_POLL)) {
+ /* Force presence */
+ reg = readl(brcmstb_priv->cfg_regs + SDIO_CFG_CTRL);
+ reg &= ~SDIO_CFG_CTRL_SDCD_N_TEST_LEV;
+ reg |= SDIO_CFG_CTRL_SDCD_N_TEST_EN;
+ writel(reg, brcmstb_priv->cfg_regs + SDIO_CFG_CTRL);
+ }
+}
+
static void sdhci_brcmstb_dumpregs(struct mmc_host *mmc)
{
sdhci_dumpregs(mmc_priv(mmc));
@@ -200,6 +244,14 @@ static struct sdhci_ops sdhci_brcmstb_ops = {
.set_uhs_signaling = sdhci_set_uhs_signaling,
};
+static struct sdhci_ops sdhci_brcmstb_ops_2712 = {
+ .set_clock = sdhci_set_clock,
+ .set_power = sdhci_set_power_and_bus_voltage,
+ .set_bus_width = sdhci_set_bus_width,
+ .reset = sdhci_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
+};
+
static struct sdhci_ops sdhci_brcmstb_ops_7216 = {
.set_clock = sdhci_brcmstb_set_clock,
.set_bus_width = sdhci_set_bus_width,
@@ -214,6 +266,11 @@ static struct sdhci_ops sdhci_brcmstb_ops_74165b0 = {
.set_uhs_signaling = sdhci_brcmstb_set_uhs_signaling,
};
+static const struct brcmstb_match_priv match_priv_2712 = {
+ .cfginit = sdhci_brcmstb_cfginit_2712,
+ .ops = &sdhci_brcmstb_ops_2712,
+};
+
static struct brcmstb_match_priv match_priv_7425 = {
.flags = BRCMSTB_MATCH_FLAGS_NO_64BIT |
BRCMSTB_MATCH_FLAGS_BROKEN_TIMEOUT,
@@ -238,6 +295,7 @@ static struct brcmstb_match_priv match_priv_74165b0 = {
};
static const struct of_device_id __maybe_unused sdhci_brcm_of_match[] = {
+ { .compatible = "brcm,bcm2712-sdhci", .data = &match_priv_2712 },
{ .compatible = "brcm,bcm7425-sdhci", .data = &match_priv_7425 },
{ .compatible = "brcm,bcm7445-sdhci", .data = &match_priv_7445 },
{ .compatible = "brcm,bcm7216-sdhci", .data = &match_priv_7216 },
@@ -370,6 +428,9 @@ static int sdhci_brcmstb_probe(struct platform_device *pdev)
(host->mmc->caps2 & MMC_CAP2_HS400_ES))
host->mmc_host_ops.hs400_enhanced_strobe = match_priv->hs400es;
+ if (match_priv->cfginit)
+ match_priv->cfginit(host);
+
/*
* Supply the existing CAPS, but clear the UHS modes. This
* will allow these modes to be specified by device tree
@@ -384,6 +445,9 @@ static int sdhci_brcmstb_probe(struct platform_device *pdev)
if (match_priv->flags & BRCMSTB_MATCH_FLAGS_BROKEN_TIMEOUT)
host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
+ if (!(match_priv->flags & BRCMSTB_MATCH_FLAGS_USE_CARD_BUSY))
+ host->mmc_host_ops.card_busy = NULL;
+
/* Change the base clock frequency if the DT property exists */
if (device_property_read_u32(&pdev->dev, "clock-frequency",
&priv->base_freq_hz) != 0)
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index 40a6e2f8145a..8f0bc6dca2b0 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -201,6 +201,9 @@
/* ERR004536 is not applicable for the IP */
#define ESDHC_FLAG_SKIP_ERR004536 BIT(17)
+/* The IP does not have GPIO CD wake capabilities */
+#define ESDHC_FLAG_SKIP_CD_WAKE BIT(18)
+
enum wp_types {
ESDHC_WP_NONE, /* no WP, neither controller nor gpio */
ESDHC_WP_CONTROLLER, /* mmc controller internal WP */
@@ -298,7 +301,7 @@ static struct esdhc_soc_data usdhc_s32g2_data = {
.flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_MAN_TUNING
| ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200
| ESDHC_FLAG_HS400 | ESDHC_FLAG_HS400_ES
- | ESDHC_FLAG_SKIP_ERR004536,
+ | ESDHC_FLAG_SKIP_ERR004536 | ESDHC_FLAG_SKIP_CD_WAKE,
};
static struct esdhc_soc_data usdhc_imx7ulp_data = {
@@ -1706,7 +1709,6 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
}
pltfm_host->clk = imx_data->clk_per;
- pltfm_host->clock = clk_get_rate(pltfm_host->clk);
err = clk_prepare_enable(imx_data->clk_per);
if (err)
goto free_sdhci;
@@ -1717,6 +1719,13 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
if (err)
goto disable_ipg_clk;
+ pltfm_host->clock = clk_get_rate(pltfm_host->clk);
+ if (!pltfm_host->clock) {
+ dev_err(mmc_dev(host->mmc), "could not get clk rate\n");
+ err = -EINVAL;
+ goto disable_ahb_clk;
+ }
+
imx_data->pinctrl = devm_pinctrl_get(&pdev->dev);
if (IS_ERR(imx_data->pinctrl))
dev_warn(mmc_dev(host->mmc), "could not get pinctrl\n");
@@ -1726,7 +1735,8 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
host->mmc->caps |= MMC_CAP_1_8V_DDR | MMC_CAP_3_3V_DDR;
/* GPIO CD can be set as a wakeup source */
- host->mmc->caps |= MMC_CAP_CD_WAKE;
+ if (!(imx_data->socdata->flags & ESDHC_FLAG_SKIP_CD_WAKE))
+ host->mmc->caps |= MMC_CAP_CD_WAKE;
if (!(imx_data->socdata->flags & ESDHC_FLAG_HS200))
host->quirks2 |= SDHCI_QUIRK2_BROKEN_HS200;
diff --git a/drivers/mmc/host/sdhci-of-dwcmshc.c b/drivers/mmc/host/sdhci-of-dwcmshc.c
index 39edf04fedcf..e79aa4b3b6c3 100644
--- a/drivers/mmc/host/sdhci-of-dwcmshc.c
+++ b/drivers/mmc/host/sdhci-of-dwcmshc.c
@@ -908,6 +908,7 @@ static const struct sdhci_ops sdhci_dwcmshc_rk35xx_ops = {
.get_max_clock = rk35xx_get_max_clock,
.reset = rk35xx_sdhci_reset,
.adma_write_desc = dwcmshc_adma_write_desc,
+ .irq = dwcmshc_cqe_irq_handler,
};
static const struct sdhci_ops sdhci_dwcmshc_th1520_ops = {
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index ef89ec382bfe..ed45ed0bdafd 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -1319,6 +1319,23 @@ static const struct sdhci_pci_fixes sdhci_intel_mrfld_mmc = {
.probe_slot = intel_mrfld_mmc_probe_slot,
};
+#define JMB388_SAMPLE_COUNT 5
+
+static int jmicron_jmb388_get_ro(struct mmc_host *mmc)
+{
+ int i, ro_count;
+
+ ro_count = 0;
+ for (i = 0; i < JMB388_SAMPLE_COUNT; i++) {
+ if (sdhci_get_ro(mmc) > 0) {
+ if (++ro_count > JMB388_SAMPLE_COUNT / 2)
+ return 1;
+ }
+ msleep(30);
+ }
+ return 0;
+}
+
static int jmicron_pmos(struct sdhci_pci_chip *chip, int on)
{
u8 scratch;
@@ -1326,7 +1343,7 @@ static int jmicron_pmos(struct sdhci_pci_chip *chip, int on)
ret = pci_read_config_byte(chip->pdev, 0xAE, &scratch);
if (ret)
- return ret;
+ goto fail;
/*
* Turn PMOS on [bit 0], set over current detection to 2.4 V
@@ -1337,7 +1354,10 @@ static int jmicron_pmos(struct sdhci_pci_chip *chip, int on)
else
scratch &= ~0x47;
- return pci_write_config_byte(chip->pdev, 0xAE, scratch);
+ ret = pci_write_config_byte(chip->pdev, 0xAE, scratch);
+
+fail:
+ return pcibios_err_to_errno(ret);
}
static int jmicron_probe(struct sdhci_pci_chip *chip)
@@ -1400,11 +1420,6 @@ static int jmicron_probe(struct sdhci_pci_chip *chip)
return ret;
}
- /* quirk for unsable RO-detection on JM388 chips */
- if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_SD ||
- chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD)
- chip->quirks |= SDHCI_QUIRK_UNSTABLE_RO_DETECT;
-
return 0;
}
@@ -1459,6 +1474,11 @@ static int jmicron_probe_slot(struct sdhci_pci_slot *slot)
slot->host->mmc->caps |= MMC_CAP_BUS_WIDTH_TEST;
+ /* Handle unstable RO-detection on JM388 chips */
+ if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_SD ||
+ slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD)
+ slot->host->mmc_host_ops.get_ro = jmicron_jmb388_get_ro;
+
return 0;
}
@@ -2202,7 +2222,7 @@ static int sdhci_pci_probe(struct pci_dev *pdev,
ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &slots);
if (ret)
- return ret;
+ return pcibios_err_to_errno(ret);
slots = PCI_SLOT_INFO_SLOTS(slots) + 1;
dev_dbg(&pdev->dev, "found %d slot(s)\n", slots);
@@ -2211,7 +2231,7 @@ static int sdhci_pci_probe(struct pci_dev *pdev,
ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &first_bar);
if (ret)
- return ret;
+ return pcibios_err_to_errno(ret);
first_bar &= PCI_SLOT_INFO_FIRST_BAR_MASK;
diff --git a/drivers/mmc/host/sdhci-pci-o2micro.c b/drivers/mmc/host/sdhci-pci-o2micro.c
index d4a02184784a..058bef1c7e41 100644
--- a/drivers/mmc/host/sdhci-pci-o2micro.c
+++ b/drivers/mmc/host/sdhci-pci-o2micro.c
@@ -823,7 +823,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
ret = pci_read_config_byte(chip->pdev,
O2_SD_LOCK_WP, &scratch);
if (ret)
- return ret;
+ goto read_fail;
scratch &= 0x7f;
pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
@@ -834,7 +834,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
ret = pci_read_config_byte(chip->pdev,
O2_SD_CLKREQ, &scratch);
if (ret)
- return ret;
+ goto read_fail;
scratch |= 0x20;
pci_write_config_byte(chip->pdev, O2_SD_CLKREQ, scratch);
@@ -843,7 +843,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
*/
ret = pci_read_config_byte(chip->pdev, O2_SD_CAPS, &scratch);
if (ret)
- return ret;
+ goto read_fail;
scratch |= 0x01;
pci_write_config_byte(chip->pdev, O2_SD_CAPS, scratch);
pci_write_config_byte(chip->pdev, O2_SD_CAPS, 0x73);
@@ -856,7 +856,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
ret = pci_read_config_byte(chip->pdev,
O2_SD_INF_MOD, &scratch);
if (ret)
- return ret;
+ goto read_fail;
scratch |= 0x08;
pci_write_config_byte(chip->pdev, O2_SD_INF_MOD, scratch);
@@ -864,7 +864,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
ret = pci_read_config_byte(chip->pdev,
O2_SD_LOCK_WP, &scratch);
if (ret)
- return ret;
+ goto read_fail;
scratch |= 0x80;
pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
break;
@@ -875,7 +875,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
ret = pci_read_config_byte(chip->pdev,
O2_SD_LOCK_WP, &scratch);
if (ret)
- return ret;
+ goto read_fail;
scratch &= 0x7f;
pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
@@ -886,7 +886,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
O2_SD_FUNC_REG0,
&scratch_32);
if (ret)
- return ret;
+ goto read_fail;
scratch_32 = ((scratch_32 & 0xFF000000) >> 24);
/* Check Whether subId is 0x11 or 0x12 */
@@ -898,7 +898,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
O2_SD_FUNC_REG4,
&scratch_32);
if (ret)
- return ret;
+ goto read_fail;
/* Enable Base Clk setting change */
scratch_32 |= O2_SD_FREG4_ENABLE_CLK_SET;
@@ -921,7 +921,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
ret = pci_read_config_dword(chip->pdev,
O2_SD_CLK_SETTING, &scratch_32);
if (ret)
- return ret;
+ goto read_fail;
scratch_32 &= ~(0xFF00);
scratch_32 |= 0x07E0C800;
@@ -931,14 +931,14 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
ret = pci_read_config_dword(chip->pdev,
O2_SD_CLKREQ, &scratch_32);
if (ret)
- return ret;
+ goto read_fail;
scratch_32 |= 0x3;
pci_write_config_dword(chip->pdev, O2_SD_CLKREQ, scratch_32);
ret = pci_read_config_dword(chip->pdev,
O2_SD_PLL_SETTING, &scratch_32);
if (ret)
- return ret;
+ goto read_fail;
scratch_32 &= ~(0x1F3F070E);
scratch_32 |= 0x18270106;
@@ -949,7 +949,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
ret = pci_read_config_dword(chip->pdev,
O2_SD_CAP_REG2, &scratch_32);
if (ret)
- return ret;
+ goto read_fail;
scratch_32 &= ~(0xE0);
pci_write_config_dword(chip->pdev,
O2_SD_CAP_REG2, scratch_32);
@@ -961,7 +961,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
ret = pci_read_config_byte(chip->pdev,
O2_SD_LOCK_WP, &scratch);
if (ret)
- return ret;
+ goto read_fail;
scratch |= 0x80;
pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
break;
@@ -971,7 +971,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
ret = pci_read_config_byte(chip->pdev,
O2_SD_LOCK_WP, &scratch);
if (ret)
- return ret;
+ goto read_fail;
scratch &= 0x7f;
pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
@@ -979,7 +979,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
ret = pci_read_config_dword(chip->pdev,
O2_SD_PLL_SETTING, &scratch_32);
if (ret)
- return ret;
+ goto read_fail;
if ((scratch_32 & 0xff000000) == 0x01000000) {
scratch_32 &= 0x0000FFFF;
@@ -998,7 +998,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
O2_SD_FUNC_REG4,
&scratch_32);
if (ret)
- return ret;
+ goto read_fail;
scratch_32 |= (1 << 22);
pci_write_config_dword(chip->pdev,
O2_SD_FUNC_REG4, scratch_32);
@@ -1017,7 +1017,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
ret = pci_read_config_byte(chip->pdev,
O2_SD_LOCK_WP, &scratch);
if (ret)
- return ret;
+ goto read_fail;
scratch |= 0x80;
pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
break;
@@ -1028,7 +1028,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
/* UnLock WP */
ret = pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch);
if (ret)
- return ret;
+ goto read_fail;
scratch &= 0x7f;
pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
@@ -1057,13 +1057,16 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
/* Lock WP */
ret = pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch);
if (ret)
- return ret;
+ goto read_fail;
scratch |= 0x80;
pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
break;
}
return 0;
+
+read_fail:
+ return pcibios_err_to_errno(ret);
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 746f4cf7ab03..4b91c9e96635 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -2513,50 +2513,34 @@ out:
}
EXPORT_SYMBOL_GPL(sdhci_get_cd_nogpio);
-static int sdhci_check_ro(struct sdhci_host *host)
+int sdhci_get_ro(struct mmc_host *mmc)
{
- unsigned long flags;
+ struct sdhci_host *host = mmc_priv(mmc);
+ bool allow_invert = false;
int is_readonly;
- spin_lock_irqsave(&host->lock, flags);
-
- if (host->flags & SDHCI_DEVICE_DEAD)
+ if (host->flags & SDHCI_DEVICE_DEAD) {
is_readonly = 0;
- else if (host->ops->get_ro)
+ } else if (host->ops->get_ro) {
is_readonly = host->ops->get_ro(host);
- else if (mmc_can_gpio_ro(host->mmc))
- is_readonly = mmc_gpio_get_ro(host->mmc);
- else
+ } else if (mmc_can_gpio_ro(mmc)) {
+ is_readonly = mmc_gpio_get_ro(mmc);
+ /* Do not invert twice */
+ allow_invert = !(mmc->caps2 & MMC_CAP2_RO_ACTIVE_HIGH);
+ } else {
is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE)
& SDHCI_WRITE_PROTECT);
+ allow_invert = true;
+ }
- spin_unlock_irqrestore(&host->lock, flags);
-
- /* This quirk needs to be replaced by a callback-function later */
- return host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT ?
- !is_readonly : is_readonly;
-}
-
-#define SAMPLE_COUNT 5
-
-static int sdhci_get_ro(struct mmc_host *mmc)
-{
- struct sdhci_host *host = mmc_priv(mmc);
- int i, ro_count;
-
- if (!(host->quirks & SDHCI_QUIRK_UNSTABLE_RO_DETECT))
- return sdhci_check_ro(host);
+ if (is_readonly >= 0 &&
+ allow_invert &&
+ (host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT))
+ is_readonly = !is_readonly;
- ro_count = 0;
- for (i = 0; i < SAMPLE_COUNT; i++) {
- if (sdhci_check_ro(host)) {
- if (++ro_count > SAMPLE_COUNT / 2)
- return 1;
- }
- msleep(30);
- }
- return 0;
+ return is_readonly;
}
+EXPORT_SYMBOL_GPL(sdhci_get_ro);
static void sdhci_hw_reset(struct mmc_host *mmc)
{
@@ -4724,6 +4708,21 @@ int sdhci_setup_host(struct sdhci_host *host)
if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC) {
host->max_adma = 65532; /* 32-bit alignment */
mmc->max_seg_size = 65535;
+ /*
+ * sdhci_adma_table_pre() expects to define 1 DMA
+ * descriptor per segment, so the maximum segment size
+ * is set accordingly. SDHCI allows up to 64KiB per DMA
+ * descriptor (16-bit field), but some controllers do
+ * not support "zero means 65536" reducing the maximum
+ * for them to 65535. That is a problem if PAGE_SIZE is
+ * 64KiB because the block layer does not support
+ * max_seg_size < PAGE_SIZE, however
+ * sdhci_adma_table_pre() has a workaround to handle
+ * that case, and split the descriptor. Refer also
+ * comment in sdhci_adma_table_pre().
+ */
+ if (mmc->max_seg_size < PAGE_SIZE)
+ mmc->max_seg_size = PAGE_SIZE;
} else {
mmc->max_seg_size = 65536;
}
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 957c7a917ffb..f531b617f28d 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -437,8 +437,6 @@ struct sdhci_host {
#define SDHCI_QUIRK_NO_HISPD_BIT (1<<29)
/* Controller treats ADMA descriptors with length 0000h incorrectly */
#define SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC (1<<30)
-/* The read-only detection via SDHCI_PRESENT_STATE register is unstable */
-#define SDHCI_QUIRK_UNSTABLE_RO_DETECT (1<<31)
unsigned int quirks2; /* More deviations from spec. */
@@ -788,6 +786,7 @@ void sdhci_set_power_and_bus_voltage(struct sdhci_host *host,
void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode,
unsigned short vdd);
int sdhci_get_cd_nogpio(struct mmc_host *mmc);
+int sdhci_get_ro(struct mmc_host *mmc);
void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq);
int sdhci_request_atomic(struct mmc_host *mmc, struct mmc_request *mrq);
void sdhci_set_bus_width(struct sdhci_host *host, int width);
diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c
index 17ad32cfc0c3..64e10f7c9faa 100644
--- a/drivers/mmc/host/sdhci_am654.c
+++ b/drivers/mmc/host/sdhci_am654.c
@@ -90,7 +90,7 @@
/* Command Queue Host Controller Interface Base address */
#define SDHCI_AM654_CQE_BASE_ADDR 0x200
-static struct regmap_config sdhci_am654_regmap_config = {
+static const struct regmap_config sdhci_am654_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
diff --git a/drivers/mmc/host/tifm_sd.c b/drivers/mmc/host/tifm_sd.c
index b5a2f2f25ad9..aea14bf3e2e8 100644
--- a/drivers/mmc/host/tifm_sd.c
+++ b/drivers/mmc/host/tifm_sd.c
@@ -13,6 +13,7 @@
#include <linux/highmem.h>
#include <linux/scatterlist.h>
#include <linux/module.h>
+#include <linux/workqueue.h>
#include <asm/io.h>
#define DRIVER_NAME "tifm_sd"
@@ -97,7 +98,7 @@ struct tifm_sd {
unsigned int clk_div;
unsigned long timeout_jiffies;
- struct tasklet_struct finish_tasklet;
+ struct work_struct finish_bh_work;
struct timer_list timer;
struct mmc_request *req;
@@ -463,7 +464,7 @@ static void tifm_sd_check_status(struct tifm_sd *host)
}
}
finish_request:
- tasklet_schedule(&host->finish_tasklet);
+ queue_work(system_bh_wq, &host->finish_bh_work);
}
/* Called from interrupt handler */
@@ -723,9 +724,9 @@ err_out:
mmc_request_done(mmc, mrq);
}
-static void tifm_sd_end_cmd(struct tasklet_struct *t)
+static void tifm_sd_end_cmd(struct work_struct *t)
{
- struct tifm_sd *host = from_tasklet(host, t, finish_tasklet);
+ struct tifm_sd *host = from_work(host, t, finish_bh_work);
struct tifm_dev *sock = host->dev;
struct mmc_host *mmc = tifm_get_drvdata(sock);
struct mmc_request *mrq;
@@ -960,7 +961,7 @@ static int tifm_sd_probe(struct tifm_dev *sock)
*/
mmc->max_busy_timeout = TIFM_MMCSD_REQ_TIMEOUT_MS;
- tasklet_setup(&host->finish_tasklet, tifm_sd_end_cmd);
+ INIT_WORK(&host->finish_bh_work, tifm_sd_end_cmd);
timer_setup(&host->timer, tifm_sd_abort, 0);
mmc->ops = &tifm_sd_ops;
@@ -999,7 +1000,7 @@ static void tifm_sd_remove(struct tifm_dev *sock)
writel(0, sock->addr + SOCK_MMCSD_INT_ENABLE);
spin_unlock_irqrestore(&sock->lock, flags);
- tasklet_kill(&host->finish_tasklet);
+ cancel_work_sync(&host->finish_bh_work);
spin_lock_irqsave(&sock->lock, flags);
if (host->req) {
@@ -1009,7 +1010,7 @@ static void tifm_sd_remove(struct tifm_dev *sock)
host->req->cmd->error = -ENOMEDIUM;
if (host->req->stop)
host->req->stop->error = -ENOMEDIUM;
- tasklet_schedule(&host->finish_tasklet);
+ queue_work(system_bh_wq, &host->finish_bh_work);
}
spin_unlock_irqrestore(&sock->lock, flags);
mmc_remove_host(mmc);
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
index de56e6534aea..a75755f31d31 100644
--- a/drivers/mmc/host/tmio_mmc.h
+++ b/drivers/mmc/host/tmio_mmc.h
@@ -21,6 +21,7 @@
#include <linux/scatterlist.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
+#include <linux/workqueue.h>
#define CTL_SD_CMD 0x00
#define CTL_ARG_REG 0x04
@@ -139,9 +140,6 @@ struct tmio_mmc_host {
struct mmc_host *mmc;
struct mmc_host_ops ops;
- /* Callbacks for clock / power control */
- void (*set_pwr)(struct platform_device *host, int state);
-
/* pio related stuff */
struct scatterlist *sg_ptr;
struct scatterlist *sg_orig;
@@ -156,7 +154,7 @@ struct tmio_mmc_host {
bool dma_on;
struct dma_chan *chan_rx;
struct dma_chan *chan_tx;
- struct tasklet_struct dma_issue;
+ struct work_struct dma_issue;
struct scatterlist bounce_sg;
u8 *bounce_buf;
diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c
index 93e912afd3ae..b359876cc33d 100644
--- a/drivers/mmc/host/tmio_mmc_core.c
+++ b/drivers/mmc/host/tmio_mmc_core.c
@@ -31,13 +31,14 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
-#include <linux/mfd/tmio.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
#include <linux/mmc/mmc.h>
#include <linux/mmc/slot-gpio.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/pagemap.h>
+#include <linux/platform_data/tmio.h>
#include <linux/platform_device.h>
#include <linux/pm_qos.h>
#include <linux/pm_runtime.h>
@@ -608,7 +609,7 @@ static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host, unsigned int stat)
} else {
tmio_mmc_disable_mmc_irqs(host,
TMIO_MASK_READOP);
- tasklet_schedule(&host->dma_issue);
+ queue_work(system_bh_wq, &host->dma_issue);
}
} else {
if (!host->dma_on) {
@@ -616,7 +617,7 @@ static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host, unsigned int stat)
} else {
tmio_mmc_disable_mmc_irqs(host,
TMIO_MASK_WRITEOP);
- tasklet_schedule(&host->dma_issue);
+ queue_work(system_bh_wq, &host->dma_issue);
}
}
} else {
@@ -880,9 +881,6 @@ static void tmio_mmc_power_on(struct tmio_mmc_host *host, unsigned short vdd)
/* .set_ios() is returning void, so, no chance to report an error */
- if (host->set_pwr)
- host->set_pwr(host->pdev, 1);
-
if (!IS_ERR(mmc->supply.vmmc)) {
ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
/*
@@ -916,9 +914,6 @@ static void tmio_mmc_power_off(struct tmio_mmc_host *host)
if (!IS_ERR(mmc->supply.vmmc))
mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
-
- if (host->set_pwr)
- host->set_pwr(host->pdev, 0);
}
static unsigned int tmio_mmc_get_timeout_cycles(struct tmio_mmc_host *host)
@@ -1160,8 +1155,6 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host)
if (pdata->flags & TMIO_MMC_USE_BUSY_TIMEOUT && !_host->get_timeout_cycles)
_host->get_timeout_cycles = tmio_mmc_get_timeout_cycles;
- _host->set_pwr = pdata->set_pwr;
-
ret = tmio_mmc_init_ocr(_host);
if (ret < 0)
return ret;
@@ -1319,4 +1312,5 @@ int tmio_mmc_host_runtime_resume(struct device *dev)
EXPORT_SYMBOL_GPL(tmio_mmc_host_runtime_resume);
#endif
+MODULE_DESCRIPTION("TMIO MMC core driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/uniphier-sd.c b/drivers/mmc/host/uniphier-sd.c
index 1404989e6151..46ee8a0b2b85 100644
--- a/drivers/mmc/host/uniphier-sd.c
+++ b/drivers/mmc/host/uniphier-sd.c
@@ -9,11 +9,11 @@
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/mfd/syscon.h>
-#include <linux/mfd/tmio.h>
#include <linux/mmc/host.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/pinctrl/consumer.h>
+#include <linux/platform_data/tmio.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/reset.h>
@@ -90,9 +90,9 @@ static void uniphier_sd_dma_endisable(struct tmio_mmc_host *host, int enable)
}
/* external DMA engine */
-static void uniphier_sd_external_dma_issue(struct tasklet_struct *t)
+static void uniphier_sd_external_dma_issue(struct work_struct *t)
{
- struct tmio_mmc_host *host = from_tasklet(host, t, dma_issue);
+ struct tmio_mmc_host *host = from_work(host, t, dma_issue);
struct uniphier_sd_priv *priv = uniphier_sd_priv(host);
uniphier_sd_dma_endisable(host, 1);
@@ -199,7 +199,7 @@ static void uniphier_sd_external_dma_request(struct tmio_mmc_host *host,
host->chan_rx = chan;
host->chan_tx = chan;
- tasklet_setup(&host->dma_issue, uniphier_sd_external_dma_issue);
+ INIT_WORK(&host->dma_issue, uniphier_sd_external_dma_issue);
}
static void uniphier_sd_external_dma_release(struct tmio_mmc_host *host)
@@ -236,9 +236,9 @@ static const struct tmio_mmc_dma_ops uniphier_sd_external_dma_ops = {
.dataend = uniphier_sd_external_dma_dataend,
};
-static void uniphier_sd_internal_dma_issue(struct tasklet_struct *t)
+static void uniphier_sd_internal_dma_issue(struct work_struct *t)
{
- struct tmio_mmc_host *host = from_tasklet(host, t, dma_issue);
+ struct tmio_mmc_host *host = from_work(host, t, dma_issue);
unsigned long flags;
spin_lock_irqsave(&host->lock, flags);
@@ -317,7 +317,7 @@ static void uniphier_sd_internal_dma_request(struct tmio_mmc_host *host,
host->chan_tx = (void *)0xdeadbeaf;
- tasklet_setup(&host->dma_issue, uniphier_sd_internal_dma_issue);
+ INIT_WORK(&host->dma_issue, uniphier_sd_internal_dma_issue);
}
static void uniphier_sd_internal_dma_release(struct tmio_mmc_host *host)
diff --git a/drivers/mmc/host/via-sdmmc.c b/drivers/mmc/host/via-sdmmc.c
index ba6044b16e07..f77457105ec3 100644
--- a/drivers/mmc/host/via-sdmmc.c
+++ b/drivers/mmc/host/via-sdmmc.c
@@ -12,6 +12,7 @@
#include <linux/interrupt.h>
#include <linux/mmc/host.h>
+#include <linux/workqueue.h>
#define DRV_NAME "via_sdmmc"
@@ -307,7 +308,7 @@ struct via_crdr_mmc_host {
struct sdhcreg pm_sdhc_reg;
struct work_struct carddet_work;
- struct tasklet_struct finish_tasklet;
+ struct work_struct finish_bh_work;
struct timer_list timer;
spinlock_t lock;
@@ -643,7 +644,7 @@ static void via_sdc_finish_data(struct via_crdr_mmc_host *host)
if (data->stop)
via_sdc_send_command(host, data->stop);
else
- tasklet_schedule(&host->finish_tasklet);
+ queue_work(system_bh_wq, &host->finish_bh_work);
}
static void via_sdc_finish_command(struct via_crdr_mmc_host *host)
@@ -653,7 +654,7 @@ static void via_sdc_finish_command(struct via_crdr_mmc_host *host)
host->cmd->error = 0;
if (!host->cmd->data)
- tasklet_schedule(&host->finish_tasklet);
+ queue_work(system_bh_wq, &host->finish_bh_work);
host->cmd = NULL;
}
@@ -682,7 +683,7 @@ static void via_sdc_request(struct mmc_host *mmc, struct mmc_request *mrq)
status = readw(host->sdhc_mmiobase + VIA_CRDR_SDSTATUS);
if (!(status & VIA_CRDR_SDSTS_SLOTG) || host->reject) {
host->mrq->cmd->error = -ENOMEDIUM;
- tasklet_schedule(&host->finish_tasklet);
+ queue_work(system_bh_wq, &host->finish_bh_work);
} else {
via_sdc_send_command(host, mrq->cmd);
}
@@ -848,7 +849,7 @@ static void via_sdc_cmd_isr(struct via_crdr_mmc_host *host, u16 intmask)
host->cmd->error = -EILSEQ;
if (host->cmd->error)
- tasklet_schedule(&host->finish_tasklet);
+ queue_work(system_bh_wq, &host->finish_bh_work);
else if (intmask & VIA_CRDR_SDSTS_CRD)
via_sdc_finish_command(host);
}
@@ -955,16 +956,16 @@ static void via_sdc_timeout(struct timer_list *t)
sdhost->cmd->error = -ETIMEDOUT;
else
sdhost->mrq->cmd->error = -ETIMEDOUT;
- tasklet_schedule(&sdhost->finish_tasklet);
+ queue_work(system_bh_wq, &sdhost->finish_bh_work);
}
}
spin_unlock_irqrestore(&sdhost->lock, flags);
}
-static void via_sdc_tasklet_finish(struct tasklet_struct *t)
+static void via_sdc_finish_bh_work(struct work_struct *t)
{
- struct via_crdr_mmc_host *host = from_tasklet(host, t, finish_tasklet);
+ struct via_crdr_mmc_host *host = from_work(host, t, finish_bh_work);
unsigned long flags;
struct mmc_request *mrq;
@@ -1005,7 +1006,7 @@ static void via_sdc_card_detect(struct work_struct *work)
pr_err("%s: Card removed during transfer!\n",
mmc_hostname(host->mmc));
host->mrq->cmd->error = -ENOMEDIUM;
- tasklet_schedule(&host->finish_tasklet);
+ queue_work(system_bh_wq, &host->finish_bh_work);
}
spin_unlock_irqrestore(&host->lock, flags);
@@ -1051,7 +1052,7 @@ static void via_init_mmc_host(struct via_crdr_mmc_host *host)
INIT_WORK(&host->carddet_work, via_sdc_card_detect);
- tasklet_setup(&host->finish_tasklet, via_sdc_tasklet_finish);
+ INIT_WORK(&host->finish_bh_work, via_sdc_finish_bh_work);
addrbase = host->sdhc_mmiobase;
writel(0x0, addrbase + VIA_CRDR_SDINTMASK);
@@ -1193,7 +1194,7 @@ static void via_sd_remove(struct pci_dev *pcidev)
sdhost->mrq->cmd->error = -ENOMEDIUM;
if (sdhost->mrq->stop)
sdhost->mrq->stop->error = -ENOMEDIUM;
- tasklet_schedule(&sdhost->finish_tasklet);
+ queue_work(system_bh_wq, &sdhost->finish_bh_work);
}
spin_unlock_irqrestore(&sdhost->lock, flags);
@@ -1203,7 +1204,7 @@ static void via_sd_remove(struct pci_dev *pcidev)
del_timer_sync(&sdhost->timer);
- tasklet_kill(&sdhost->finish_tasklet);
+ cancel_work_sync(&sdhost->finish_bh_work);
/* switch off power */
gatt = readb(sdhost->pcictrl_mmiobase + VIA_CRDR_PCICLKGATT);
diff --git a/drivers/mmc/host/wbsd.c b/drivers/mmc/host/wbsd.c
index f0562f712d98..6e20405d0430 100644
--- a/drivers/mmc/host/wbsd.c
+++ b/drivers/mmc/host/wbsd.c
@@ -459,7 +459,7 @@ static void wbsd_empty_fifo(struct wbsd_host *host)
* FIFO threshold interrupts properly.
*/
if ((data->blocks * data->blksz - data->bytes_xfered) < 16)
- tasklet_schedule(&host->fifo_tasklet);
+ queue_work(system_bh_wq, &host->fifo_bh_work);
}
static void wbsd_fill_fifo(struct wbsd_host *host)
@@ -524,7 +524,7 @@ static void wbsd_fill_fifo(struct wbsd_host *host)
* 'FIFO empty' under certain conditions. So we
* need to be a bit more pro-active.
*/
- tasklet_schedule(&host->fifo_tasklet);
+ queue_work(system_bh_wq, &host->fifo_bh_work);
}
static void wbsd_prepare_data(struct wbsd_host *host, struct mmc_data *data)
@@ -746,7 +746,7 @@ static void wbsd_request(struct mmc_host *mmc, struct mmc_request *mrq)
struct mmc_command *cmd;
/*
- * Disable tasklets to avoid a deadlock.
+ * Disable bh works to avoid a deadlock.
*/
spin_lock_bh(&host->lock);
@@ -821,7 +821,7 @@ static void wbsd_request(struct mmc_host *mmc, struct mmc_request *mrq)
* Dirty fix for hardware bug.
*/
if (host->dma == -1)
- tasklet_schedule(&host->fifo_tasklet);
+ queue_work(system_bh_wq, &host->fifo_bh_work);
spin_unlock_bh(&host->lock);
@@ -961,13 +961,13 @@ static void wbsd_reset_ignore(struct timer_list *t)
* Card status might have changed during the
* blackout.
*/
- tasklet_schedule(&host->card_tasklet);
+ queue_work(system_bh_wq, &host->card_bh_work);
spin_unlock_bh(&host->lock);
}
/*
- * Tasklets
+ * BH Works
*/
static inline struct mmc_data *wbsd_get_data(struct wbsd_host *host)
@@ -987,9 +987,9 @@ static inline struct mmc_data *wbsd_get_data(struct wbsd_host *host)
return host->mrq->cmd->data;
}
-static void wbsd_tasklet_card(struct tasklet_struct *t)
+static void wbsd_card_bh_work(struct work_struct *t)
{
- struct wbsd_host *host = from_tasklet(host, t, card_tasklet);
+ struct wbsd_host *host = from_work(host, t, card_bh_work);
u8 csr;
int delay = -1;
@@ -1020,7 +1020,7 @@ static void wbsd_tasklet_card(struct tasklet_struct *t)
wbsd_reset(host);
host->mrq->cmd->error = -ENOMEDIUM;
- tasklet_schedule(&host->finish_tasklet);
+ queue_work(system_bh_wq, &host->finish_bh_work);
}
delay = 0;
@@ -1036,9 +1036,9 @@ static void wbsd_tasklet_card(struct tasklet_struct *t)
mmc_detect_change(host->mmc, msecs_to_jiffies(delay));
}
-static void wbsd_tasklet_fifo(struct tasklet_struct *t)
+static void wbsd_fifo_bh_work(struct work_struct *t)
{
- struct wbsd_host *host = from_tasklet(host, t, fifo_tasklet);
+ struct wbsd_host *host = from_work(host, t, fifo_bh_work);
struct mmc_data *data;
spin_lock(&host->lock);
@@ -1060,16 +1060,16 @@ static void wbsd_tasklet_fifo(struct tasklet_struct *t)
*/
if (host->num_sg == 0) {
wbsd_write_index(host, WBSD_IDX_FIFOEN, 0);
- tasklet_schedule(&host->finish_tasklet);
+ queue_work(system_bh_wq, &host->finish_bh_work);
}
end:
spin_unlock(&host->lock);
}
-static void wbsd_tasklet_crc(struct tasklet_struct *t)
+static void wbsd_crc_bh_work(struct work_struct *t)
{
- struct wbsd_host *host = from_tasklet(host, t, crc_tasklet);
+ struct wbsd_host *host = from_work(host, t, crc_bh_work);
struct mmc_data *data;
spin_lock(&host->lock);
@@ -1085,15 +1085,15 @@ static void wbsd_tasklet_crc(struct tasklet_struct *t)
data->error = -EILSEQ;
- tasklet_schedule(&host->finish_tasklet);
+ queue_work(system_bh_wq, &host->finish_bh_work);
end:
spin_unlock(&host->lock);
}
-static void wbsd_tasklet_timeout(struct tasklet_struct *t)
+static void wbsd_timeout_bh_work(struct work_struct *t)
{
- struct wbsd_host *host = from_tasklet(host, t, timeout_tasklet);
+ struct wbsd_host *host = from_work(host, t, timeout_bh_work);
struct mmc_data *data;
spin_lock(&host->lock);
@@ -1109,15 +1109,15 @@ static void wbsd_tasklet_timeout(struct tasklet_struct *t)
data->error = -ETIMEDOUT;
- tasklet_schedule(&host->finish_tasklet);
+ queue_work(system_bh_wq, &host->finish_bh_work);
end:
spin_unlock(&host->lock);
}
-static void wbsd_tasklet_finish(struct tasklet_struct *t)
+static void wbsd_finish_bh_work(struct work_struct *t)
{
- struct wbsd_host *host = from_tasklet(host, t, finish_tasklet);
+ struct wbsd_host *host = from_work(host, t, finish_bh_work);
struct mmc_data *data;
spin_lock(&host->lock);
@@ -1156,18 +1156,18 @@ static irqreturn_t wbsd_irq(int irq, void *dev_id)
host->isr |= isr;
/*
- * Schedule tasklets as needed.
+ * Schedule bh work as needed.
*/
if (isr & WBSD_INT_CARD)
- tasklet_schedule(&host->card_tasklet);
+ queue_work(system_bh_wq, &host->card_bh_work);
if (isr & WBSD_INT_FIFO_THRE)
- tasklet_schedule(&host->fifo_tasklet);
+ queue_work(system_bh_wq, &host->fifo_bh_work);
if (isr & WBSD_INT_CRC)
- tasklet_hi_schedule(&host->crc_tasklet);
+ queue_work(system_bh_highpri_wq, &host->crc_bh_work);
if (isr & WBSD_INT_TIMEOUT)
- tasklet_hi_schedule(&host->timeout_tasklet);
+ queue_work(system_bh_highpri_wq, &host->timeout_bh_work);
if (isr & WBSD_INT_TC)
- tasklet_schedule(&host->finish_tasklet);
+ queue_work(system_bh_wq, &host->finish_bh_work);
return IRQ_HANDLED;
}
@@ -1443,13 +1443,13 @@ static int wbsd_request_irq(struct wbsd_host *host, int irq)
int ret;
/*
- * Set up tasklets. Must be done before requesting interrupt.
+ * Set up bh works. Must be done before requesting interrupt.
*/
- tasklet_setup(&host->card_tasklet, wbsd_tasklet_card);
- tasklet_setup(&host->fifo_tasklet, wbsd_tasklet_fifo);
- tasklet_setup(&host->crc_tasklet, wbsd_tasklet_crc);
- tasklet_setup(&host->timeout_tasklet, wbsd_tasklet_timeout);
- tasklet_setup(&host->finish_tasklet, wbsd_tasklet_finish);
+ INIT_WORK(&host->card_bh_work, wbsd_card_bh_work);
+ INIT_WORK(&host->fifo_bh_work, wbsd_fifo_bh_work);
+ INIT_WORK(&host->crc_bh_work, wbsd_crc_bh_work);
+ INIT_WORK(&host->timeout_bh_work, wbsd_timeout_bh_work);
+ INIT_WORK(&host->finish_bh_work, wbsd_finish_bh_work);
/*
* Allocate interrupt.
@@ -1472,11 +1472,11 @@ static void wbsd_release_irq(struct wbsd_host *host)
host->irq = 0;
- tasklet_kill(&host->card_tasklet);
- tasklet_kill(&host->fifo_tasklet);
- tasklet_kill(&host->crc_tasklet);
- tasklet_kill(&host->timeout_tasklet);
- tasklet_kill(&host->finish_tasklet);
+ cancel_work_sync(&host->card_bh_work);
+ cancel_work_sync(&host->fifo_bh_work);
+ cancel_work_sync(&host->crc_bh_work);
+ cancel_work_sync(&host->timeout_bh_work);
+ cancel_work_sync(&host->finish_bh_work);
}
/*
diff --git a/drivers/mmc/host/wbsd.h b/drivers/mmc/host/wbsd.h
index be30b4d8ce4c..970886831305 100644
--- a/drivers/mmc/host/wbsd.h
+++ b/drivers/mmc/host/wbsd.h
@@ -171,11 +171,11 @@ struct wbsd_host
int irq; /* Interrupt */
int dma; /* DMA channel */
- struct tasklet_struct card_tasklet; /* Tasklet structures */
- struct tasklet_struct fifo_tasklet;
- struct tasklet_struct crc_tasklet;
- struct tasklet_struct timeout_tasklet;
- struct tasklet_struct finish_tasklet;
+ struct work_struct card_bh_work; /* Work structures */
+ struct work_struct fifo_bh_work;
+ struct work_struct crc_bh_work;
+ struct work_struct timeout_bh_work;
+ struct work_struct finish_bh_work;
struct timer_list ignore_timer; /* Ignore detection timer */
};
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index 3caa0717d46c..47ead84407cd 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -336,6 +336,8 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
lim.logical_block_size = tr->blksize;
if (tr->discard)
lim.max_hw_discard_sectors = UINT_MAX;
+ if (tr->flush)
+ lim.features |= BLK_FEAT_WRITE_CACHE;
/* Create gendisk */
gd = blk_mq_alloc_disk(new->tag_set, &lim, new);
@@ -372,13 +374,6 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
/* Create the request queue */
spin_lock_init(&new->queue_lock);
INIT_LIST_HEAD(&new->rq_list);
-
- if (tr->flush)
- blk_queue_write_cache(new->rq, true, false);
-
- blk_queue_flag_set(QUEUE_FLAG_NONROT, new->rq);
- blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, new->rq);
-
gd->queue = new->rq;
if (new->readonly)
diff --git a/drivers/mtd/nand/raw/Kconfig b/drivers/mtd/nand/raw/Kconfig
index cbf8ae85e1ae..614257308516 100644
--- a/drivers/mtd/nand/raw/Kconfig
+++ b/drivers/mtd/nand/raw/Kconfig
@@ -234,8 +234,7 @@ config MTD_NAND_FSL_IFC
tristate "Freescale IFC NAND controller"
depends on FSL_SOC || ARCH_LAYERSCAPE || SOC_LS1021A || COMPILE_TEST
depends on HAS_IOMEM
- select FSL_IFC
- select MEMORY
+ depends on FSL_IFC
help
Various Freescale chips e.g P1010, include a NAND Flash machine
with built-in hardware ECC capabilities.
diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c
index d7dbbd469b89..53e16d39af4b 100644
--- a/drivers/mtd/nand/raw/nand_base.c
+++ b/drivers/mtd/nand/raw/nand_base.c
@@ -1093,28 +1093,32 @@ static int nand_fill_column_cycles(struct nand_chip *chip, u8 *addrs,
unsigned int offset_in_page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
+ bool ident_stage = !mtd->writesize;
- /* Make sure the offset is less than the actual page size. */
- if (offset_in_page > mtd->writesize + mtd->oobsize)
- return -EINVAL;
+ /* Bypass all checks during NAND identification */
+ if (likely(!ident_stage)) {
+ /* Make sure the offset is less than the actual page size. */
+ if (offset_in_page > mtd->writesize + mtd->oobsize)
+ return -EINVAL;
- /*
- * On small page NANDs, there's a dedicated command to access the OOB
- * area, and the column address is relative to the start of the OOB
- * area, not the start of the page. Asjust the address accordingly.
- */
- if (mtd->writesize <= 512 && offset_in_page >= mtd->writesize)
- offset_in_page -= mtd->writesize;
+ /*
+ * On small page NANDs, there's a dedicated command to access the OOB
+ * area, and the column address is relative to the start of the OOB
+ * area, not the start of the page. Asjust the address accordingly.
+ */
+ if (mtd->writesize <= 512 && offset_in_page >= mtd->writesize)
+ offset_in_page -= mtd->writesize;
- /*
- * The offset in page is expressed in bytes, if the NAND bus is 16-bit
- * wide, then it must be divided by 2.
- */
- if (chip->options & NAND_BUSWIDTH_16) {
- if (WARN_ON(offset_in_page % 2))
- return -EINVAL;
+ /*
+ * The offset in page is expressed in bytes, if the NAND bus is 16-bit
+ * wide, then it must be divided by 2.
+ */
+ if (chip->options & NAND_BUSWIDTH_16) {
+ if (WARN_ON(offset_in_page % 2))
+ return -EINVAL;
- offset_in_page /= 2;
+ offset_in_page /= 2;
+ }
}
addrs[0] = offset_in_page;
@@ -1123,7 +1127,7 @@ static int nand_fill_column_cycles(struct nand_chip *chip, u8 *addrs,
* Small page NANDs use 1 cycle for the columns, while large page NANDs
* need 2
*/
- if (mtd->writesize <= 512)
+ if (!ident_stage && mtd->writesize <= 512)
return 1;
addrs[1] = offset_in_page >> 8;
@@ -1436,16 +1440,19 @@ int nand_change_read_column_op(struct nand_chip *chip,
unsigned int len, bool force_8bit)
{
struct mtd_info *mtd = nand_to_mtd(chip);
+ bool ident_stage = !mtd->writesize;
if (len && !buf)
return -EINVAL;
- if (offset_in_page + len > mtd->writesize + mtd->oobsize)
- return -EINVAL;
+ if (!ident_stage) {
+ if (offset_in_page + len > mtd->writesize + mtd->oobsize)
+ return -EINVAL;
- /* Small page NANDs do not support column change. */
- if (mtd->writesize <= 512)
- return -ENOTSUPP;
+ /* Small page NANDs do not support column change. */
+ if (mtd->writesize <= 512)
+ return -ENOTSUPP;
+ }
if (nand_has_exec_op(chip)) {
const struct nand_interface_config *conf =
@@ -2173,7 +2180,7 @@ EXPORT_SYMBOL_GPL(nand_reset_op);
int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len,
bool force_8bit, bool check_only)
{
- if (!len || !buf)
+ if (!len || (!check_only && !buf))
return -EINVAL;
if (nand_has_exec_op(chip)) {
@@ -6301,6 +6308,7 @@ static const struct nand_ops rawnand_ops = {
static int nand_scan_tail(struct nand_chip *chip)
{
struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_device *base = &chip->base;
struct nand_ecc_ctrl *ecc = &chip->ecc;
int ret, i;
@@ -6445,9 +6453,13 @@ static int nand_scan_tail(struct nand_chip *chip)
if (!ecc->write_oob_raw)
ecc->write_oob_raw = ecc->write_oob;
- /* propagate ecc info to mtd_info */
+ /* Propagate ECC info to the generic NAND and MTD layers */
mtd->ecc_strength = ecc->strength;
+ if (!base->ecc.ctx.conf.strength)
+ base->ecc.ctx.conf.strength = ecc->strength;
mtd->ecc_step_size = ecc->size;
+ if (!base->ecc.ctx.conf.step_size)
+ base->ecc.ctx.conf.step_size = ecc->size;
/*
* Set the number of read / write steps for one page depending on ECC
@@ -6455,6 +6467,8 @@ static int nand_scan_tail(struct nand_chip *chip)
*/
if (!ecc->steps)
ecc->steps = mtd->writesize / ecc->size;
+ if (!base->ecc.ctx.nsteps)
+ base->ecc.ctx.nsteps = ecc->steps;
if (ecc->steps * ecc->size != mtd->writesize) {
WARN(1, "Invalid ECC parameters\n");
ret = -EINVAL;
diff --git a/drivers/mtd/nand/raw/rockchip-nand-controller.c b/drivers/mtd/nand/raw/rockchip-nand-controller.c
index 7baaef69d70a..55580447633b 100644
--- a/drivers/mtd/nand/raw/rockchip-nand-controller.c
+++ b/drivers/mtd/nand/raw/rockchip-nand-controller.c
@@ -420,13 +420,13 @@ static int rk_nfc_setup_interface(struct nand_chip *chip, int target,
u32 rate, tc2rw, trwpw, trw2c;
u32 temp;
- if (target < 0)
- return 0;
-
timings = nand_get_sdr_timings(conf);
if (IS_ERR(timings))
return -EOPNOTSUPP;
+ if (target < 0)
+ return 0;
+
if (IS_ERR(nfc->nfc_clk))
rate = clk_get_rate(nfc->ahb_clk);
else
diff --git a/drivers/net/arcnet/com20020-isa.c b/drivers/net/arcnet/com20020-isa.c
index 293a621e654c..fef2ac2852a8 100644
--- a/drivers/net/arcnet/com20020-isa.c
+++ b/drivers/net/arcnet/com20020-isa.c
@@ -137,6 +137,7 @@ module_param(backplane, int, 0);
module_param(clockp, int, 0);
module_param(clockm, int, 0);
+MODULE_DESCRIPTION("ARCnet COM20020 chipset ISA driver");
MODULE_LICENSE("GPL");
static struct net_device *my_dev;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 3c3fcce4acd4..af9ddd3902cc 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -5755,10 +5755,10 @@ static void bond_ethtool_get_drvinfo(struct net_device *bond_dev,
}
static int bond_ethtool_get_ts_info(struct net_device *bond_dev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct bonding *bond = netdev_priv(bond_dev);
- struct ethtool_ts_info ts_info;
+ struct kernel_ethtool_ts_info ts_info;
struct net_device *real_dev;
bool sw_tx_support = false;
struct list_head *iter;
@@ -5773,6 +5773,9 @@ static int bond_ethtool_get_ts_info(struct net_device *bond_dev,
if (real_dev) {
ret = ethtool_get_ts_info_by_layer(real_dev, info);
} else {
+ info->phc_index = -1;
+ info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE;
/* Check if all slaves support software tx timestamping */
rcu_read_lock();
bond_for_each_slave_rcu(bond, slave, iter) {
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index 0cacd7027e35..bc80fb6397dc 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -1214,9 +1214,9 @@ static int bond_option_arp_ip_targets_set(struct bonding *bond,
__be32 target;
if (newval->string) {
- if (!in4_pton(newval->string+1, -1, (u8 *)&target, -1, NULL)) {
- netdev_err(bond->dev, "invalid ARP target %pI4 specified\n",
- &target);
+ if (strlen(newval->string) < 1 ||
+ !in4_pton(newval->string + 1, -1, (u8 *)&target, -1, NULL)) {
+ netdev_err(bond->dev, "invalid ARP target specified\n");
return ret;
}
if (newval->string[0] == '+')
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index 2e31db55d927..7f9b60a42d29 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -187,9 +187,8 @@ config CAN_SLCAN
slcand) can be found in the can-utils at the linux-can project, see
https://github.com/linux-can/can-utils for details.
- The slcan driver supports up to 10 CAN netdevices by default which
- can be changed by the 'maxdev=xx' module option. This driver can
- also be built as a module. If so, the module will be called slcan.
+ This driver can also be built as a module. If so, the module
+ will be called slcan.
config CAN_SUN4I
tristate "Allwinner A10 CAN controller"
diff --git a/drivers/net/can/dev/dev.c b/drivers/net/can/dev/dev.c
index 83e724e0ab87..87828f953073 100644
--- a/drivers/net/can/dev/dev.c
+++ b/drivers/net/can/dev/dev.c
@@ -376,7 +376,7 @@ EXPORT_SYMBOL(can_eth_ioctl_hwts);
* supporting hardware timestamps
*/
int can_ethtool_op_get_ts_info_hwts(struct net_device *dev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
info->so_timestamping =
SOF_TIMESTAMPING_TX_SOFTWARE |
diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c
index 7b5028b67cd5..a60d9efd5f8d 100644
--- a/drivers/net/can/kvaser_pciefd.c
+++ b/drivers/net/can/kvaser_pciefd.c
@@ -29,10 +29,10 @@ MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices");
#define KVASER_PCIEFD_CAN_TX_MAX_COUNT 17U
#define KVASER_PCIEFD_MAX_CAN_CHANNELS 8UL
#define KVASER_PCIEFD_DMA_COUNT 2U
-
#define KVASER_PCIEFD_DMA_SIZE (4U * 1024U)
#define KVASER_PCIEFD_VENDOR 0x1a07
+
/* Altera based devices */
#define KVASER_PCIEFD_4HS_DEVICE_ID 0x000d
#define KVASER_PCIEFD_2HS_V2_DEVICE_ID 0x000e
@@ -550,7 +550,7 @@ static void kvaser_pciefd_disable_err_gen(struct kvaser_pciefd_can *can)
spin_unlock_irqrestore(&can->lock, irq);
}
-static void kvaser_pciefd_set_tx_irq(struct kvaser_pciefd_can *can)
+static inline void kvaser_pciefd_set_tx_irq(struct kvaser_pciefd_can *can)
{
u32 msk;
@@ -711,17 +711,17 @@ static void kvaser_pciefd_pwm_start(struct kvaser_pciefd_can *can)
static int kvaser_pciefd_open(struct net_device *netdev)
{
- int err;
+ int ret;
struct kvaser_pciefd_can *can = netdev_priv(netdev);
- err = open_candev(netdev);
- if (err)
- return err;
+ ret = open_candev(netdev);
+ if (ret)
+ return ret;
- err = kvaser_pciefd_bus_on(can);
- if (err) {
+ ret = kvaser_pciefd_bus_on(can);
+ if (ret) {
close_candev(netdev);
- return err;
+ return ret;
}
return 0;
@@ -1032,15 +1032,15 @@ static int kvaser_pciefd_reg_candev(struct kvaser_pciefd *pcie)
int i;
for (i = 0; i < pcie->nr_channels; i++) {
- int err = register_candev(pcie->can[i]->can.dev);
+ int ret = register_candev(pcie->can[i]->can.dev);
- if (err) {
+ if (ret) {
int j;
/* Unregister all successfully registered devices. */
for (j = 0; j < i; j++)
unregister_candev(pcie->can[j]->can.dev);
- return err;
+ return ret;
}
}
@@ -1619,7 +1619,7 @@ static int kvaser_pciefd_read_packet(struct kvaser_pciefd *pcie, int *start_pos,
/* Position does not point to the end of the package,
* corrupted packet size?
*/
- if ((*start_pos + size) != pos)
+ if (unlikely((*start_pos + size) != pos))
return -EIO;
/* Point to the next packet header, if any */
@@ -1640,31 +1640,24 @@ static int kvaser_pciefd_read_buffer(struct kvaser_pciefd *pcie, int dma_buf)
return res;
}
-static void kvaser_pciefd_receive_irq(struct kvaser_pciefd *pcie)
+static u32 kvaser_pciefd_receive_irq(struct kvaser_pciefd *pcie)
{
u32 irq = ioread32(KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG);
- if (irq & KVASER_PCIEFD_SRB_IRQ_DPD0) {
+ if (irq & KVASER_PCIEFD_SRB_IRQ_DPD0)
kvaser_pciefd_read_buffer(pcie, 0);
- /* Reset DMA buffer 0 */
- iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0,
- KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG);
- }
- if (irq & KVASER_PCIEFD_SRB_IRQ_DPD1) {
+ if (irq & KVASER_PCIEFD_SRB_IRQ_DPD1)
kvaser_pciefd_read_buffer(pcie, 1);
- /* Reset DMA buffer 1 */
- iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1,
- KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG);
- }
- if (irq & KVASER_PCIEFD_SRB_IRQ_DOF0 ||
- irq & KVASER_PCIEFD_SRB_IRQ_DOF1 ||
- irq & KVASER_PCIEFD_SRB_IRQ_DUF0 ||
- irq & KVASER_PCIEFD_SRB_IRQ_DUF1)
+ if (unlikely(irq & KVASER_PCIEFD_SRB_IRQ_DOF0 ||
+ irq & KVASER_PCIEFD_SRB_IRQ_DOF1 ||
+ irq & KVASER_PCIEFD_SRB_IRQ_DUF0 ||
+ irq & KVASER_PCIEFD_SRB_IRQ_DUF1))
dev_err(&pcie->pci->dev, "DMA IRQ error 0x%08X\n", irq);
iowrite32(irq, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG);
+ return irq;
}
static void kvaser_pciefd_transmit_irq(struct kvaser_pciefd_can *can)
@@ -1691,27 +1684,33 @@ static irqreturn_t kvaser_pciefd_irq_handler(int irq, void *dev)
{
struct kvaser_pciefd *pcie = (struct kvaser_pciefd *)dev;
const struct kvaser_pciefd_irq_mask *irq_mask = pcie->driver_data->irq_mask;
- u32 board_irq = ioread32(KVASER_PCIEFD_PCI_IRQ_ADDR(pcie));
+ u32 pci_irq = ioread32(KVASER_PCIEFD_PCI_IRQ_ADDR(pcie));
+ u32 srb_irq = 0;
int i;
- if (!(board_irq & irq_mask->all))
+ if (!(pci_irq & irq_mask->all))
return IRQ_NONE;
- if (board_irq & irq_mask->kcan_rx0)
- kvaser_pciefd_receive_irq(pcie);
+ if (pci_irq & irq_mask->kcan_rx0)
+ srb_irq = kvaser_pciefd_receive_irq(pcie);
for (i = 0; i < pcie->nr_channels; i++) {
- if (!pcie->can[i]) {
- dev_err(&pcie->pci->dev,
- "IRQ mask points to unallocated controller\n");
- break;
- }
-
- /* Check that mask matches channel (i) IRQ mask */
- if (board_irq & irq_mask->kcan_tx[i])
+ if (pci_irq & irq_mask->kcan_tx[i])
kvaser_pciefd_transmit_irq(pcie->can[i]);
}
+ if (srb_irq & KVASER_PCIEFD_SRB_IRQ_DPD0) {
+ /* Reset DMA buffer 0, may trigger new interrupt */
+ iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0,
+ KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG);
+ }
+
+ if (srb_irq & KVASER_PCIEFD_SRB_IRQ_DPD1) {
+ /* Reset DMA buffer 1, may trigger new interrupt */
+ iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1,
+ KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG);
+ }
+
return IRQ_HANDLED;
}
@@ -1733,7 +1732,7 @@ static void kvaser_pciefd_teardown_can_ctrls(struct kvaser_pciefd *pcie)
static int kvaser_pciefd_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
- int err;
+ int ret;
struct kvaser_pciefd *pcie;
const struct kvaser_pciefd_irq_mask *irq_mask;
void __iomem *irq_en_base;
@@ -1747,39 +1746,52 @@ static int kvaser_pciefd_probe(struct pci_dev *pdev,
pcie->driver_data = (const struct kvaser_pciefd_driver_data *)id->driver_data;
irq_mask = pcie->driver_data->irq_mask;
- err = pci_enable_device(pdev);
- if (err)
- return err;
+ ret = pci_enable_device(pdev);
+ if (ret)
+ return ret;
- err = pci_request_regions(pdev, KVASER_PCIEFD_DRV_NAME);
- if (err)
+ ret = pci_request_regions(pdev, KVASER_PCIEFD_DRV_NAME);
+ if (ret)
goto err_disable_pci;
pcie->reg_base = pci_iomap(pdev, 0, 0);
if (!pcie->reg_base) {
- err = -ENOMEM;
+ ret = -ENOMEM;
goto err_release_regions;
}
- err = kvaser_pciefd_setup_board(pcie);
- if (err)
+ ret = kvaser_pciefd_setup_board(pcie);
+ if (ret)
goto err_pci_iounmap;
- err = kvaser_pciefd_setup_dma(pcie);
- if (err)
+ ret = kvaser_pciefd_setup_dma(pcie);
+ if (ret)
goto err_pci_iounmap;
pci_set_master(pdev);
- err = kvaser_pciefd_setup_can_ctrls(pcie);
- if (err)
+ ret = kvaser_pciefd_setup_can_ctrls(pcie);
+ if (ret)
goto err_teardown_can_ctrls;
- err = request_irq(pcie->pci->irq, kvaser_pciefd_irq_handler,
- IRQF_SHARED, KVASER_PCIEFD_DRV_NAME, pcie);
- if (err)
+ ret = pci_alloc_irq_vectors(pcie->pci, 1, 1, PCI_IRQ_INTX | PCI_IRQ_MSI);
+ if (ret < 0) {
+ dev_err(&pcie->pci->dev, "Failed to allocate IRQ vectors.\n");
goto err_teardown_can_ctrls;
+ }
+
+ ret = pci_irq_vector(pcie->pci, 0);
+ if (ret < 0)
+ goto err_pci_free_irq_vectors;
+
+ pcie->pci->irq = ret;
+ ret = request_irq(pcie->pci->irq, kvaser_pciefd_irq_handler,
+ IRQF_SHARED, KVASER_PCIEFD_DRV_NAME, pcie);
+ if (ret) {
+ dev_err(&pcie->pci->dev, "Failed to request IRQ %d\n", pcie->pci->irq);
+ goto err_pci_free_irq_vectors;
+ }
iowrite32(KVASER_PCIEFD_SRB_IRQ_DPD0 | KVASER_PCIEFD_SRB_IRQ_DPD1,
KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG);
@@ -1797,8 +1809,8 @@ static int kvaser_pciefd_probe(struct pci_dev *pdev,
iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1,
KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG);
- err = kvaser_pciefd_reg_candev(pcie);
- if (err)
+ ret = kvaser_pciefd_reg_candev(pcie);
+ if (ret)
goto err_free_irq;
return 0;
@@ -1808,6 +1820,9 @@ err_free_irq:
iowrite32(0, irq_en_base);
free_irq(pcie->pci->irq, pcie);
+err_pci_free_irq_vectors:
+ pci_free_irq_vectors(pcie->pci);
+
err_teardown_can_ctrls:
kvaser_pciefd_teardown_can_ctrls(pcie);
iowrite32(0, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CTRL_REG);
@@ -1822,7 +1837,7 @@ err_release_regions:
err_disable_pci:
pci_disable_device(pdev);
- return err;
+ return ret;
}
static void kvaser_pciefd_remove_all_ctrls(struct kvaser_pciefd *pcie)
@@ -1853,7 +1868,7 @@ static void kvaser_pciefd_remove(struct pci_dev *pdev)
iowrite32(0, KVASER_PCIEFD_PCI_IEN_ADDR(pcie));
free_irq(pcie->pci->irq, pcie);
-
+ pci_free_irq_vectors(pcie->pci);
pci_iounmap(pdev, pcie->reg_base);
pci_release_regions(pdev);
pci_disable_device(pdev);
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index 14b231c4d7ec..7f63f866083e 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -379,38 +379,72 @@ m_can_txe_fifo_read(struct m_can_classdev *cdev, u32 fgi, u32 offset, u32 *val)
return cdev->ops->read_fifo(cdev, addr_offset, val, 1);
}
-static void m_can_config_endisable(struct m_can_classdev *cdev, bool enable)
-{
- u32 cccr = m_can_read(cdev, M_CAN_CCCR);
- u32 timeout = 10;
- u32 val = 0;
-
- /* Clear the Clock stop request if it was set */
- if (cccr & CCCR_CSR)
- cccr &= ~CCCR_CSR;
-
- if (enable) {
- /* enable m_can configuration */
- m_can_write(cdev, M_CAN_CCCR, cccr | CCCR_INIT);
- udelay(5);
- /* CCCR.CCE can only be set/reset while CCCR.INIT = '1' */
- m_can_write(cdev, M_CAN_CCCR, cccr | CCCR_INIT | CCCR_CCE);
- } else {
- m_can_write(cdev, M_CAN_CCCR, cccr & ~(CCCR_INIT | CCCR_CCE));
+static int m_can_cccr_update_bits(struct m_can_classdev *cdev, u32 mask, u32 val)
+{
+ u32 val_before = m_can_read(cdev, M_CAN_CCCR);
+ u32 val_after = (val_before & ~mask) | val;
+ size_t tries = 10;
+
+ if (!(mask & CCCR_INIT) && !(val_before & CCCR_INIT)) {
+ dev_err(cdev->dev,
+ "refusing to configure device when in normal mode\n");
+ return -EBUSY;
}
- /* there's a delay for module initialization */
- if (enable)
- val = CCCR_INIT | CCCR_CCE;
+ /* The chip should be in standby mode when changing the CCCR register,
+ * and some chips set the CSR and CSA bits when in standby. Furthermore,
+ * the CSR and CSA bits should be written as zeros, even when they read
+ * ones.
+ */
+ val_after &= ~(CCCR_CSR | CCCR_CSA);
- while ((m_can_read(cdev, M_CAN_CCCR) & (CCCR_INIT | CCCR_CCE)) != val) {
- if (timeout == 0) {
- netdev_warn(cdev->net, "Failed to init module\n");
- return;
- }
- timeout--;
- udelay(1);
+ while (tries--) {
+ u32 val_read;
+
+ /* Write the desired value in each try, as setting some bits in
+ * the CCCR register require other bits to be set first. E.g.
+ * setting the NISO bit requires setting the CCE bit first.
+ */
+ m_can_write(cdev, M_CAN_CCCR, val_after);
+
+ val_read = m_can_read(cdev, M_CAN_CCCR) & ~(CCCR_CSR | CCCR_CSA);
+
+ if (val_read == val_after)
+ return 0;
+
+ usleep_range(1, 5);
}
+
+ return -ETIMEDOUT;
+}
+
+static int m_can_config_enable(struct m_can_classdev *cdev)
+{
+ int err;
+
+ /* CCCR_INIT must be set in order to set CCCR_CCE, but access to
+ * configuration registers should only be enabled when in standby mode,
+ * where CCCR_INIT is always set.
+ */
+ err = m_can_cccr_update_bits(cdev, CCCR_CCE, CCCR_CCE);
+ if (err)
+ netdev_err(cdev->net, "failed to enable configuration mode\n");
+
+ return err;
+}
+
+static int m_can_config_disable(struct m_can_classdev *cdev)
+{
+ int err;
+
+ /* Only clear CCCR_CCE, since CCCR_INIT cannot be cleared while in
+ * standby mode
+ */
+ err = m_can_cccr_update_bits(cdev, CCCR_CCE, 0);
+ if (err)
+ netdev_err(cdev->net, "failed to disable configuration registers\n");
+
+ return err;
}
static void m_can_interrupt_enable(struct m_can_classdev *cdev, u32 interrupts)
@@ -1403,7 +1437,9 @@ static int m_can_chip_config(struct net_device *dev)
interrupts &= ~(IR_ARA | IR_ELO | IR_DRX | IR_TEFF | IR_TFE | IR_TCF |
IR_HPM | IR_RF1F | IR_RF1W | IR_RF1N | IR_RF0F);
- m_can_config_endisable(cdev, true);
+ err = m_can_config_enable(cdev);
+ if (err)
+ return err;
/* RX Buffer/FIFO Element Size 64 bytes data field */
m_can_write(cdev, M_CAN_RXESC,
@@ -1521,7 +1557,9 @@ static int m_can_chip_config(struct net_device *dev)
FIELD_PREP(TSCC_TCP_MASK, 0xf) |
FIELD_PREP(TSCC_TSS_MASK, TSCC_TSS_INTERNAL));
- m_can_config_endisable(cdev, false);
+ err = m_can_config_disable(cdev);
+ if (err)
+ return err;
if (cdev->ops->init)
cdev->ops->init(cdev);
@@ -1550,7 +1588,11 @@ static int m_can_start(struct net_device *dev)
cdev->tx_fifo_putidx = FIELD_GET(TXFQS_TFQPI_MASK,
m_can_read(cdev, M_CAN_TXFQS));
- return 0;
+ ret = m_can_cccr_update_bits(cdev, CCCR_INIT, 0);
+ if (ret)
+ netdev_err(dev, "failed to enter normal mode\n");
+
+ return ret;
}
static int m_can_set_mode(struct net_device *dev, enum can_mode mode)
@@ -1599,43 +1641,37 @@ static int m_can_check_core_release(struct m_can_classdev *cdev)
}
/* Selectable Non ISO support only in version 3.2.x
- * This function checks if the bit is writable.
+ * Return 1 if the bit is writable, 0 if it is not, or negative on error.
*/
-static bool m_can_niso_supported(struct m_can_classdev *cdev)
+static int m_can_niso_supported(struct m_can_classdev *cdev)
{
- u32 cccr_reg, cccr_poll = 0;
- int niso_timeout = -ETIMEDOUT;
- int i;
+ int ret, niso;
- m_can_config_endisable(cdev, true);
- cccr_reg = m_can_read(cdev, M_CAN_CCCR);
- cccr_reg |= CCCR_NISO;
- m_can_write(cdev, M_CAN_CCCR, cccr_reg);
+ ret = m_can_config_enable(cdev);
+ if (ret)
+ return ret;
- for (i = 0; i <= 10; i++) {
- cccr_poll = m_can_read(cdev, M_CAN_CCCR);
- if (cccr_poll == cccr_reg) {
- niso_timeout = 0;
- break;
- }
+ /* First try to set the NISO bit. */
+ niso = m_can_cccr_update_bits(cdev, CCCR_NISO, CCCR_NISO);
- usleep_range(1, 5);
+ /* Then clear the it again. */
+ ret = m_can_cccr_update_bits(cdev, CCCR_NISO, 0);
+ if (ret) {
+ dev_err(cdev->dev, "failed to revert the NON-ISO bit in CCCR\n");
+ return ret;
}
- /* Clear NISO */
- cccr_reg &= ~(CCCR_NISO);
- m_can_write(cdev, M_CAN_CCCR, cccr_reg);
-
- m_can_config_endisable(cdev, false);
+ ret = m_can_config_disable(cdev);
+ if (ret)
+ return ret;
- /* return false if time out (-ETIMEDOUT), else return true */
- return !niso_timeout;
+ return niso == 0;
}
static int m_can_dev_setup(struct m_can_classdev *cdev)
{
struct net_device *dev = cdev->net;
- int m_can_version, err;
+ int m_can_version, err, niso;
m_can_version = m_can_check_core_release(cdev);
/* return if unsupported version */
@@ -1684,9 +1720,11 @@ static int m_can_dev_setup(struct m_can_classdev *cdev)
cdev->can.bittiming_const = &m_can_bittiming_const_31X;
cdev->can.data_bittiming_const = &m_can_data_bittiming_const_31X;
- cdev->can.ctrlmode_supported |=
- (m_can_niso_supported(cdev) ?
- CAN_CTRLMODE_FD_NON_ISO : 0);
+ niso = m_can_niso_supported(cdev);
+ if (niso < 0)
+ return niso;
+ if (niso)
+ cdev->can.ctrlmode_supported |= CAN_CTRLMODE_FD_NON_ISO;
break;
default:
dev_err(cdev->dev, "Unsupported version number: %2d",
@@ -1694,21 +1732,26 @@ static int m_can_dev_setup(struct m_can_classdev *cdev)
return -EINVAL;
}
- if (cdev->ops->init)
- cdev->ops->init(cdev);
-
- return 0;
+ /* Forcing standby mode should be redundant, as the chip should be in
+ * standby after a reset. Write the INIT bit anyways, should the chip
+ * be configured by previous stage.
+ */
+ return m_can_cccr_update_bits(cdev, CCCR_INIT, CCCR_INIT);
}
static void m_can_stop(struct net_device *dev)
{
struct m_can_classdev *cdev = netdev_priv(dev);
+ int ret;
/* disable all interrupts */
m_can_disable_all_interrupts(cdev);
/* Set init mode to disengage from the network */
- m_can_config_endisable(cdev, true);
+ ret = m_can_cccr_update_bits(cdev, CCCR_INIT, CCCR_INIT);
+ if (ret)
+ netdev_err(dev, "failed to enter standby mode: %pe\n",
+ ERR_PTR(ret));
/* set the state as STOPPED */
cdev->can.state = CAN_STATE_STOPPED;
diff --git a/drivers/net/can/m_can/m_can.h b/drivers/net/can/m_can/m_can.h
index 3a9edc292593..92b2bd8628e6 100644
--- a/drivers/net/can/m_can/m_can.h
+++ b/drivers/net/can/m_can/m_can.h
@@ -91,7 +91,7 @@ struct m_can_classdev {
ktime_t irq_timer_wait;
- struct m_can_ops *ops;
+ const struct m_can_ops *ops;
int version;
u32 irqstatus;
diff --git a/drivers/net/can/m_can/m_can_pci.c b/drivers/net/can/m_can/m_can_pci.c
index 45400de4163d..d72fe771dfc7 100644
--- a/drivers/net/can/m_can/m_can_pci.c
+++ b/drivers/net/can/m_can/m_can_pci.c
@@ -77,7 +77,7 @@ static int iomap_write_fifo(struct m_can_classdev *cdev, int offset,
return 0;
}
-static struct m_can_ops m_can_pci_ops = {
+static const struct m_can_ops m_can_pci_ops = {
.read_reg = iomap_read_reg,
.write_reg = iomap_write_reg,
.write_fifo = iomap_write_fifo,
diff --git a/drivers/net/can/m_can/m_can_platform.c b/drivers/net/can/m_can/m_can_platform.c
index df0367124b4c..983ab80260dd 100644
--- a/drivers/net/can/m_can/m_can_platform.c
+++ b/drivers/net/can/m_can/m_can_platform.c
@@ -68,7 +68,7 @@ static int iomap_write_fifo(struct m_can_classdev *cdev, int offset,
return 0;
}
-static struct m_can_ops m_can_plat_ops = {
+static const struct m_can_ops m_can_plat_ops = {
.read_reg = iomap_read_reg,
.write_reg = iomap_write_reg,
.write_fifo = iomap_write_fifo,
diff --git a/drivers/net/can/m_can/tcan4x5x-core.c b/drivers/net/can/m_can/tcan4x5x-core.c
index a42600dac70d..2f73bf3abad8 100644
--- a/drivers/net/can/m_can/tcan4x5x-core.c
+++ b/drivers/net/can/m_can/tcan4x5x-core.c
@@ -357,7 +357,7 @@ static int tcan4x5x_get_gpios(struct m_can_classdev *cdev,
return 0;
}
-static struct m_can_ops tcan4x5x_ops = {
+static const struct m_can_ops tcan4x5x_ops = {
.init = tcan4x5x_init,
.read_reg = tcan4x5x_read_reg,
.write_reg = tcan4x5x_write_reg,
@@ -453,10 +453,17 @@ static int tcan4x5x_can_probe(struct spi_device *spi)
goto out_power;
}
- ret = tcan4x5x_init(mcan_class);
+ tcan4x5x_check_wake(priv);
+
+ ret = tcan4x5x_write_tcan_reg(mcan_class, TCAN4X5X_INT_EN, 0);
if (ret) {
- dev_err(&spi->dev, "tcan initialization failed %pe\n",
- ERR_PTR(ret));
+ dev_err(&spi->dev, "Disabling interrupts failed %pe\n", ERR_PTR(ret));
+ goto out_power;
+ }
+
+ ret = tcan4x5x_clear_interrupts(mcan_class);
+ if (ret) {
+ dev_err(&spi->dev, "Clearing interrupts failed %pe\n", ERR_PTR(ret));
goto out_power;
}
diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c
index a6829cdc0e81..8c2a7bc64d3d 100644
--- a/drivers/net/can/mscan/mscan.c
+++ b/drivers/net/can/mscan/mscan.c
@@ -34,12 +34,6 @@ static const struct can_bittiming_const mscan_bittiming_const = {
.brp_inc = 1,
};
-struct mscan_state {
- u8 mode;
- u8 canrier;
- u8 cantier;
-};
-
static enum can_state state_map[] = {
CAN_STATE_ERROR_ACTIVE,
CAN_STATE_ERROR_WARNING,
diff --git a/drivers/net/can/peak_canfd/peak_canfd.c b/drivers/net/can/peak_canfd/peak_canfd.c
index 31c9c127e24b..b50005397463 100644
--- a/drivers/net/can/peak_canfd/peak_canfd.c
+++ b/drivers/net/can/peak_canfd/peak_canfd.c
@@ -777,7 +777,7 @@ static const struct net_device_ops peak_canfd_netdev_ops = {
};
static int peak_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
info->so_timestamping =
SOF_TIMESTAMPING_TX_SOFTWARE |
diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c
index b82842718735..c919668bbe7a 100644
--- a/drivers/net/can/rcar/rcar_canfd.c
+++ b/drivers/net/can/rcar/rcar_canfd.c
@@ -508,12 +508,6 @@
*/
#define RCANFD_CFFIFO_IDX 0
-/* fCAN clock select register settings */
-enum rcar_canfd_fcanclk {
- RCANFD_CANFDCLK = 0, /* CANFD clock */
- RCANFD_EXTCLK, /* Externally input clock */
-};
-
struct rcar_canfd_global;
struct rcar_canfd_hw_info {
@@ -545,8 +539,8 @@ struct rcar_canfd_global {
struct platform_device *pdev; /* Respective platform device */
struct clk *clkp; /* Peripheral clock */
struct clk *can_clk; /* fCAN clock */
- enum rcar_canfd_fcanclk fcan; /* CANFD or Ext clock */
unsigned long channels_mask; /* Enabled channels mask */
+ bool extclk; /* CANFD or Ext clock */
bool fdmode; /* CAN FD or Classical CAN only mode */
struct reset_control *rstc1;
struct reset_control *rstc2;
@@ -633,28 +627,28 @@ static inline void rcar_canfd_update(u32 mask, u32 val, u32 __iomem *reg)
static inline u32 rcar_canfd_read(void __iomem *base, u32 offset)
{
- return readl(base + (offset));
+ return readl(base + offset);
}
static inline void rcar_canfd_write(void __iomem *base, u32 offset, u32 val)
{
- writel(val, base + (offset));
+ writel(val, base + offset);
}
static void rcar_canfd_set_bit(void __iomem *base, u32 reg, u32 val)
{
- rcar_canfd_update(val, val, base + (reg));
+ rcar_canfd_update(val, val, base + reg);
}
static void rcar_canfd_clear_bit(void __iomem *base, u32 reg, u32 val)
{
- rcar_canfd_update(val, 0, base + (reg));
+ rcar_canfd_update(val, 0, base + reg);
}
static void rcar_canfd_update_bit(void __iomem *base, u32 reg,
u32 mask, u32 val)
{
- rcar_canfd_update(mask, val, base + (reg));
+ rcar_canfd_update(mask, val, base + reg);
}
static void rcar_canfd_get_data(struct rcar_canfd_channel *priv,
@@ -665,7 +659,7 @@ static void rcar_canfd_get_data(struct rcar_canfd_channel *priv,
lwords = DIV_ROUND_UP(cf->len, sizeof(u32));
for (i = 0; i < lwords; i++)
*((u32 *)cf->data + i) =
- rcar_canfd_read(priv->base, off + (i * sizeof(u32)));
+ rcar_canfd_read(priv->base, off + i * sizeof(u32));
}
static void rcar_canfd_put_data(struct rcar_canfd_channel *priv,
@@ -675,7 +669,7 @@ static void rcar_canfd_put_data(struct rcar_canfd_channel *priv,
lwords = DIV_ROUND_UP(cf->len, sizeof(u32));
for (i = 0; i < lwords; i++)
- rcar_canfd_write(priv->base, off + (i * sizeof(u32)),
+ rcar_canfd_write(priv->base, off + i * sizeof(u32),
*((u32 *)cf->data + i));
}
@@ -777,7 +771,7 @@ static void rcar_canfd_configure_controller(struct rcar_canfd_global *gpriv)
cfg |= RCANFD_GCFG_CMPOC;
/* Set External Clock if selected */
- if (gpriv->fcan != RCANFD_CANFDCLK)
+ if (gpriv->extclk)
cfg |= RCANFD_GCFG_DCS;
rcar_canfd_set_bit(gpriv->base, RCANFD_GCFG, cfg);
@@ -1941,16 +1935,12 @@ static int rcar_canfd_probe(struct platform_device *pdev)
return dev_err_probe(dev, PTR_ERR(gpriv->can_clk),
"cannot get canfd clock\n");
- gpriv->fcan = RCANFD_CANFDCLK;
-
+ /* CANFD clock may be further divided within the IP */
+ fcan_freq = clk_get_rate(gpriv->can_clk) / info->postdiv;
} else {
- gpriv->fcan = RCANFD_EXTCLK;
+ fcan_freq = clk_get_rate(gpriv->can_clk);
+ gpriv->extclk = true;
}
- fcan_freq = clk_get_rate(gpriv->can_clk);
-
- if (gpriv->fcan == RCANFD_CANFDCLK)
- /* CANFD clock is further divided by (1/2) within the IP */
- fcan_freq /= info->postdiv;
addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(addr)) {
@@ -2059,8 +2049,9 @@ static int rcar_canfd_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, gpriv);
- dev_info(dev, "global operational state (clk %d, fdmode %d)\n",
- gpriv->fcan, gpriv->fdmode);
+ dev_info(dev, "global operational state (%s clk, %s mode)\n",
+ gpriv->extclk ? "ext" : "canfd",
+ gpriv->fdmode ? "fd" : "classical");
return 0;
fail_channel:
diff --git a/drivers/net/can/sja1000/plx_pci.c b/drivers/net/can/sja1000/plx_pci.c
index 5de1ebb0c6f0..67e5316c6372 100644
--- a/drivers/net/can/sja1000/plx_pci.c
+++ b/drivers/net/can/sja1000/plx_pci.c
@@ -122,7 +122,6 @@ struct plx_pci_card {
#define TEWS_PCI_VENDOR_ID 0x1498
#define TEWS_PCI_DEVICE_ID_TMPC810 0x032A
-#define CTI_PCI_VENDOR_ID 0x12c4
#define CTI_PCI_DEVICE_ID_CRG001 0x0900
#define MOXA_PCI_VENDOR_ID 0x1393
@@ -358,7 +357,7 @@ static const struct pci_device_id plx_pci_tbl[] = {
{
/* Connect Tech Inc. CANpro/104-Plus Opto (CRG001) card */
PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030,
- CTI_PCI_VENDOR_ID, CTI_PCI_DEVICE_ID_CRG001,
+ PCI_SUBVENDOR_ID_CONNECT_TECH, CTI_PCI_DEVICE_ID_CRG001,
0, 0,
(kernel_ulong_t)&plx_pci_card_info_cti
},
diff --git a/drivers/net/can/spi/hi311x.c b/drivers/net/can/spi/hi311x.c
index e1b8533a602e..148d974ebb21 100644
--- a/drivers/net/can/spi/hi311x.c
+++ b/drivers/net/can/spi/hi311x.c
@@ -830,7 +830,6 @@ static int hi3110_can_probe(struct spi_device *spi)
struct device *dev = &spi->dev;
struct net_device *net;
struct hi3110_priv *priv;
- const void *match;
struct clk *clk;
u32 freq;
int ret;
@@ -874,11 +873,7 @@ static int hi3110_can_probe(struct spi_device *spi)
CAN_CTRLMODE_LISTENONLY |
CAN_CTRLMODE_BERR_REPORTING;
- match = device_get_match_data(dev);
- if (match)
- priv->model = (enum hi3110_model)(uintptr_t)match;
- else
- priv->model = spi_get_device_id(spi)->driver_data;
+ priv->model = (enum hi3110_model)(uintptr_t)spi_get_device_match_data(spi);
priv->net = net;
priv->clk = clk;
diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
index 79c4bab5f724..3b8736ff0345 100644
--- a/drivers/net/can/spi/mcp251x.c
+++ b/drivers/net/can/spi/mcp251x.c
@@ -28,7 +28,6 @@
#include <linux/device.h>
#include <linux/ethtool.h>
#include <linux/freezer.h>
-#include <linux/gpio.h>
#include <linux/gpio/driver.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -482,9 +481,9 @@ static int mcp251x_gpio_get_direction(struct gpio_chip *chip,
unsigned int offset)
{
if (mcp251x_gpio_is_input(offset))
- return GPIOF_DIR_IN;
+ return GPIO_LINE_DIRECTION_IN;
- return GPIOF_DIR_OUT;
+ return GPIO_LINE_DIRECTION_OUT;
}
static int mcp251x_gpio_get(struct gpio_chip *chip, unsigned int offset)
@@ -1301,7 +1300,6 @@ MODULE_DEVICE_TABLE(spi, mcp251x_id_table);
static int mcp251x_can_probe(struct spi_device *spi)
{
- const void *match = device_get_match_data(&spi->dev);
struct net_device *net;
struct mcp251x_priv *priv;
struct clk *clk;
@@ -1339,10 +1337,7 @@ static int mcp251x_can_probe(struct spi_device *spi)
priv->can.clock.freq = freq / 2;
priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES |
CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_LISTENONLY;
- if (match)
- priv->model = (enum mcp251x_model)(uintptr_t)match;
- else
- priv->model = spi_get_device_id(spi)->driver_data;
+ priv->model = (enum mcp251x_model)(uintptr_t)spi_get_device_match_data(spi);
priv->net = net;
priv->clk = clk;
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
index 1d9057dc44f2..3e7526274e34 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
@@ -2,7 +2,7 @@
//
// mcp251xfd - Microchip MCP251xFD Family CAN controller driver
//
-// Copyright (c) 2019, 2020, 2021 Pengutronix,
+// Copyright (c) 2019, 2020, 2021, 2023 Pengutronix,
// Marc Kleine-Budde <kernel@pengutronix.de>
//
// Based on:
@@ -744,6 +744,7 @@ static void mcp251xfd_chip_stop(struct mcp251xfd_priv *priv,
mcp251xfd_chip_interrupts_disable(priv);
mcp251xfd_chip_rx_int_disable(priv);
+ mcp251xfd_timestamp_stop(priv);
mcp251xfd_chip_sleep(priv);
}
@@ -763,6 +764,8 @@ static int mcp251xfd_chip_start(struct mcp251xfd_priv *priv)
if (err)
goto out_chip_stop;
+ mcp251xfd_timestamp_start(priv);
+
err = mcp251xfd_set_bittiming(priv);
if (err)
goto out_chip_stop;
@@ -791,7 +794,7 @@ static int mcp251xfd_chip_start(struct mcp251xfd_priv *priv)
return 0;
- out_chip_stop:
+out_chip_stop:
mcp251xfd_dump(priv);
mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED);
@@ -867,18 +870,18 @@ static int mcp251xfd_get_berr_counter(const struct net_device *ndev,
static struct sk_buff *
mcp251xfd_alloc_can_err_skb(struct mcp251xfd_priv *priv,
- struct can_frame **cf, u32 *timestamp)
+ struct can_frame **cf, u32 *ts_raw)
{
struct sk_buff *skb;
int err;
- err = mcp251xfd_get_timestamp(priv, timestamp);
+ err = mcp251xfd_get_timestamp_raw(priv, ts_raw);
if (err)
return NULL;
skb = alloc_can_err_skb(priv->ndev, cf);
if (skb)
- mcp251xfd_skb_set_timestamp(priv, skb, *timestamp);
+ mcp251xfd_skb_set_timestamp_raw(priv, skb, *ts_raw);
return skb;
}
@@ -889,7 +892,7 @@ static int mcp251xfd_handle_rxovif(struct mcp251xfd_priv *priv)
struct mcp251xfd_rx_ring *ring;
struct sk_buff *skb;
struct can_frame *cf;
- u32 timestamp, rxovif;
+ u32 ts_raw, rxovif;
int err, i;
stats->rx_over_errors++;
@@ -924,14 +927,14 @@ static int mcp251xfd_handle_rxovif(struct mcp251xfd_priv *priv)
return err;
}
- skb = mcp251xfd_alloc_can_err_skb(priv, &cf, &timestamp);
+ skb = mcp251xfd_alloc_can_err_skb(priv, &cf, &ts_raw);
if (!skb)
return 0;
cf->can_id |= CAN_ERR_CRTL;
cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
- err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp);
+ err = can_rx_offload_queue_timestamp(&priv->offload, skb, ts_raw);
if (err)
stats->rx_fifo_errors++;
@@ -948,12 +951,12 @@ static int mcp251xfd_handle_txatif(struct mcp251xfd_priv *priv)
static int mcp251xfd_handle_ivmif(struct mcp251xfd_priv *priv)
{
struct net_device_stats *stats = &priv->ndev->stats;
- u32 bdiag1, timestamp;
+ u32 bdiag1, ts_raw;
struct sk_buff *skb;
struct can_frame *cf = NULL;
int err;
- err = mcp251xfd_get_timestamp(priv, &timestamp);
+ err = mcp251xfd_get_timestamp_raw(priv, &ts_raw);
if (err)
return err;
@@ -1035,8 +1038,8 @@ static int mcp251xfd_handle_ivmif(struct mcp251xfd_priv *priv)
if (!cf)
return 0;
- mcp251xfd_skb_set_timestamp(priv, skb, timestamp);
- err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp);
+ mcp251xfd_skb_set_timestamp_raw(priv, skb, ts_raw);
+ err = can_rx_offload_queue_timestamp(&priv->offload, skb, ts_raw);
if (err)
stats->rx_fifo_errors++;
@@ -1049,7 +1052,7 @@ static int mcp251xfd_handle_cerrif(struct mcp251xfd_priv *priv)
struct sk_buff *skb;
struct can_frame *cf = NULL;
enum can_state new_state, rx_state, tx_state;
- u32 trec, timestamp;
+ u32 trec, ts_raw;
int err;
err = regmap_read(priv->map_reg, MCP251XFD_REG_TREC, &trec);
@@ -1079,7 +1082,7 @@ static int mcp251xfd_handle_cerrif(struct mcp251xfd_priv *priv)
/* The skb allocation might fail, but can_change_state()
* handles cf == NULL.
*/
- skb = mcp251xfd_alloc_can_err_skb(priv, &cf, &timestamp);
+ skb = mcp251xfd_alloc_can_err_skb(priv, &cf, &ts_raw);
can_change_state(priv->ndev, cf, tx_state, rx_state);
if (new_state == CAN_STATE_BUS_OFF) {
@@ -1110,7 +1113,7 @@ static int mcp251xfd_handle_cerrif(struct mcp251xfd_priv *priv)
cf->data[7] = bec.rxerr;
}
- err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp);
+ err = can_rx_offload_queue_timestamp(&priv->offload, skb, ts_raw);
if (err)
stats->rx_fifo_errors++;
@@ -1135,7 +1138,7 @@ mcp251xfd_handle_modif(const struct mcp251xfd_priv *priv, bool *set_normal_mode)
return 0;
}
- /* According to MCP2517FD errata DS80000792B 1., during a TX
+ /* According to MCP2517FD errata DS80000792C 1., during a TX
* MAB underflow, the controller will transition to Restricted
* Operation Mode or Listen Only Mode (depending on SERR2LOM).
*
@@ -1180,7 +1183,7 @@ static int mcp251xfd_handle_serrif(struct mcp251xfd_priv *priv)
/* TX MAB underflow
*
- * According to MCP2517FD Errata DS80000792B 1. a TX MAB
+ * According to MCP2517FD Errata DS80000792C 1. a TX MAB
* underflow is indicated by SERRIF and MODIF.
*
* In addition to the effects mentioned in the Errata, there
@@ -1224,7 +1227,7 @@ static int mcp251xfd_handle_serrif(struct mcp251xfd_priv *priv)
/* RX MAB overflow
*
- * According to MCP2517FD Errata DS80000792B 1. a RX MAB
+ * According to MCP2517FD Errata DS80000792C 1. a RX MAB
* overflow is indicated by SERRIF.
*
* In addition to the effects mentioned in the Errata, (most
@@ -1331,7 +1334,8 @@ mcp251xfd_handle_eccif(struct mcp251xfd_priv *priv, bool set_normal_mode)
return err;
/* Errata Reference:
- * mcp2517fd: DS80000789B, mcp2518fd: DS80000792C 2.
+ * mcp2517fd: DS80000789C 3., mcp2518fd: DS80000792E 2.,
+ * mcp251863: DS80000984A 2.
*
* ECC single error correction does not work in all cases:
*
@@ -1576,7 +1580,7 @@ static irqreturn_t mcp251xfd_irq(int irq, void *dev_id)
handled = IRQ_HANDLED;
} while (1);
- out_fail:
+out_fail:
can_rx_offload_threaded_irq_finish(&priv->offload);
netdev_err(priv->ndev, "IRQ handler returned %d (intf=0x%08x).\n",
@@ -1610,19 +1614,29 @@ static int mcp251xfd_open(struct net_device *ndev)
if (err)
goto out_mcp251xfd_ring_free;
+ mcp251xfd_timestamp_init(priv);
+
err = mcp251xfd_chip_start(priv);
if (err)
goto out_transceiver_disable;
- mcp251xfd_timestamp_init(priv);
clear_bit(MCP251XFD_FLAGS_DOWN, priv->flags);
can_rx_offload_enable(&priv->offload);
+ priv->wq = alloc_ordered_workqueue("%s-mcp251xfd_wq",
+ WQ_FREEZABLE | WQ_MEM_RECLAIM,
+ dev_name(&spi->dev));
+ if (!priv->wq) {
+ err = -ENOMEM;
+ goto out_can_rx_offload_disable;
+ }
+ INIT_WORK(&priv->tx_work, mcp251xfd_tx_obj_write_sync);
+
err = request_threaded_irq(spi->irq, NULL, mcp251xfd_irq,
IRQF_SHARED | IRQF_ONESHOT,
dev_name(&spi->dev), priv);
if (err)
- goto out_can_rx_offload_disable;
+ goto out_destroy_workqueue;
err = mcp251xfd_chip_interrupts_enable(priv);
if (err)
@@ -1632,20 +1646,21 @@ static int mcp251xfd_open(struct net_device *ndev)
return 0;
- out_free_irq:
+out_free_irq:
free_irq(spi->irq, priv);
- out_can_rx_offload_disable:
+out_destroy_workqueue:
+ destroy_workqueue(priv->wq);
+out_can_rx_offload_disable:
can_rx_offload_disable(&priv->offload);
set_bit(MCP251XFD_FLAGS_DOWN, priv->flags);
- mcp251xfd_timestamp_stop(priv);
- out_transceiver_disable:
+out_transceiver_disable:
mcp251xfd_transceiver_disable(priv);
- out_mcp251xfd_ring_free:
+out_mcp251xfd_ring_free:
mcp251xfd_ring_free(priv);
- out_pm_runtime_put:
+out_pm_runtime_put:
mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED);
pm_runtime_put(ndev->dev.parent);
- out_close_candev:
+out_close_candev:
close_candev(ndev);
return err;
@@ -1661,8 +1676,8 @@ static int mcp251xfd_stop(struct net_device *ndev)
hrtimer_cancel(&priv->tx_irq_timer);
mcp251xfd_chip_interrupts_disable(priv);
free_irq(ndev->irq, priv);
+ destroy_workqueue(priv->wq);
can_rx_offload_disable(&priv->offload);
- mcp251xfd_timestamp_stop(priv);
mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED);
mcp251xfd_transceiver_disable(priv);
mcp251xfd_ring_free(priv);
@@ -1808,9 +1823,9 @@ mcp251xfd_register_get_dev_id(const struct mcp251xfd_priv *priv, u32 *dev_id,
*effective_speed_hz_slow = xfer[0].effective_speed_hz;
*effective_speed_hz_fast = xfer[1].effective_speed_hz;
- out_kfree_buf_tx:
+out_kfree_buf_tx:
kfree(buf_tx);
- out_kfree_buf_rx:
+out_kfree_buf_rx:
kfree(buf_rx);
return err;
@@ -1924,13 +1939,13 @@ static int mcp251xfd_register(struct mcp251xfd_priv *priv)
return 0;
- out_unregister_candev:
+out_unregister_candev:
unregister_candev(ndev);
- out_chip_sleep:
+out_chip_sleep:
mcp251xfd_chip_sleep(priv);
- out_runtime_disable:
+out_runtime_disable:
pm_runtime_disable(ndev->dev.parent);
- out_runtime_put_noidle:
+out_runtime_put_noidle:
pm_runtime_put_noidle(ndev->dev.parent);
mcp251xfd_clks_and_vdd_disable(priv);
@@ -1989,7 +2004,6 @@ MODULE_DEVICE_TABLE(spi, mcp251xfd_id_table);
static int mcp251xfd_probe(struct spi_device *spi)
{
- const void *match;
struct net_device *ndev;
struct mcp251xfd_priv *priv;
struct gpio_desc *rx_int;
@@ -2081,16 +2095,11 @@ static int mcp251xfd_probe(struct spi_device *spi)
priv->pll_enable = pll_enable;
priv->reg_vdd = reg_vdd;
priv->reg_xceiver = reg_xceiver;
-
- match = device_get_match_data(&spi->dev);
- if (match)
- priv->devtype_data = *(struct mcp251xfd_devtype_data *)match;
- else
- priv->devtype_data = *(struct mcp251xfd_devtype_data *)
- spi_get_device_id(spi)->driver_data;
+ priv->devtype_data = *(struct mcp251xfd_devtype_data *)spi_get_device_match_data(spi);
/* Errata Reference:
- * mcp2517fd: DS80000792C 5., mcp2518fd: DS80000789C 4.
+ * mcp2517fd: DS80000792C 5., mcp2518fd: DS80000789E 4.,
+ * mcp251863: DS80000984A 4.
*
* The SPI can write corrupted data to the RAM at fast SPI
* speeds:
@@ -2150,9 +2159,9 @@ static int mcp251xfd_probe(struct spi_device *spi)
return 0;
- out_can_rx_offload_del:
+out_can_rx_offload_del:
can_rx_offload_del(&priv->offload);
- out_free_candev:
+out_free_candev:
spi->max_speed_hz = priv->spi_max_speed_hz_orig;
free_candev(ndev);
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c
index 004eaf96262b..050321345304 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c
@@ -94,7 +94,7 @@ static void mcp251xfd_dump_registers(const struct mcp251xfd_priv *priv,
kfree(buf);
}
- out:
+out:
mcp251xfd_dump_header(iter, MCP251XFD_DUMP_OBJECT_TYPE_REG, reg);
}
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c
index 92b7bc7f14b9..65150e762007 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c
@@ -397,7 +397,7 @@ mcp251xfd_regmap_crc_read(void *context,
return err;
}
- out:
+out:
memcpy(val_buf, buf_rx->data, val_len);
return 0;
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c
index bfe4caa0c99d..7bd2bcb5cf87 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c
@@ -206,6 +206,7 @@ mcp251xfd_ring_init_rx(struct mcp251xfd_priv *priv, u16 *base, u8 *fifo_nr)
int i, j;
mcp251xfd_for_each_rx_ring(priv, rx_ring, i) {
+ rx_ring->last_valid = timecounter_read(&priv->tc);
rx_ring->head = 0;
rx_ring->tail = 0;
rx_ring->base = *base;
@@ -485,6 +486,8 @@ int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv)
clear_bit(MCP251XFD_FLAGS_FD_MODE, priv->flags);
}
+ tx_ring->obj_num_shift_to_u8 = BITS_PER_TYPE(tx_ring->obj_num) -
+ ilog2(tx_ring->obj_num);
tx_ring->obj_size = tx_obj_size;
rem = priv->rx_obj_num;
@@ -507,6 +510,8 @@ int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv)
}
rx_ring->obj_num = rx_obj_num;
+ rx_ring->obj_num_shift_to_u8 = BITS_PER_TYPE(rx_ring->obj_num_shift_to_u8) -
+ ilog2(rx_obj_num);
rx_ring->obj_size = rx_obj_size;
priv->rx[i] = rx_ring;
}
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c
index ced8d9c81f8c..fe897f3e4c12 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c
@@ -2,7 +2,7 @@
//
// mcp251xfd - Microchip MCP251xFD Family CAN controller driver
//
-// Copyright (c) 2019, 2020, 2021 Pengutronix,
+// Copyright (c) 2019, 2020, 2021, 2023 Pengutronix,
// Marc Kleine-Budde <kernel@pengutronix.de>
//
// Based on:
@@ -16,23 +16,14 @@
#include "mcp251xfd.h"
-static inline int
-mcp251xfd_rx_head_get_from_chip(const struct mcp251xfd_priv *priv,
- const struct mcp251xfd_rx_ring *ring,
- u8 *rx_head, bool *fifo_empty)
+static inline bool mcp251xfd_rx_fifo_sta_empty(const u32 fifo_sta)
{
- u32 fifo_sta;
- int err;
-
- err = regmap_read(priv->map_reg, MCP251XFD_REG_FIFOSTA(ring->fifo_nr),
- &fifo_sta);
- if (err)
- return err;
-
- *rx_head = FIELD_GET(MCP251XFD_REG_FIFOSTA_FIFOCI_MASK, fifo_sta);
- *fifo_empty = !(fifo_sta & MCP251XFD_REG_FIFOSTA_TFNRFNIF);
+ return !(fifo_sta & MCP251XFD_REG_FIFOSTA_TFNRFNIF);
+}
- return 0;
+static inline bool mcp251xfd_rx_fifo_sta_full(const u32 fifo_sta)
+{
+ return fifo_sta & MCP251XFD_REG_FIFOSTA_TFERFFIF;
}
static inline int
@@ -80,29 +71,49 @@ mcp251xfd_check_rx_tail(const struct mcp251xfd_priv *priv,
}
static int
-mcp251xfd_rx_ring_update(const struct mcp251xfd_priv *priv,
- struct mcp251xfd_rx_ring *ring)
+mcp251xfd_get_rx_len(const struct mcp251xfd_priv *priv,
+ const struct mcp251xfd_rx_ring *ring,
+ u8 *len_p)
{
- u32 new_head;
- u8 chip_rx_head;
- bool fifo_empty;
+ const u8 shift = ring->obj_num_shift_to_u8;
+ u8 chip_head, tail, len;
+ u32 fifo_sta;
int err;
- err = mcp251xfd_rx_head_get_from_chip(priv, ring, &chip_rx_head,
- &fifo_empty);
- if (err || fifo_empty)
+ err = regmap_read(priv->map_reg, MCP251XFD_REG_FIFOSTA(ring->fifo_nr),
+ &fifo_sta);
+ if (err)
+ return err;
+
+ if (mcp251xfd_rx_fifo_sta_empty(fifo_sta)) {
+ *len_p = 0;
+ return 0;
+ }
+
+ if (mcp251xfd_rx_fifo_sta_full(fifo_sta)) {
+ *len_p = ring->obj_num;
+ return 0;
+ }
+
+ chip_head = FIELD_GET(MCP251XFD_REG_FIFOSTA_FIFOCI_MASK, fifo_sta);
+
+ err = mcp251xfd_check_rx_tail(priv, ring);
+ if (err)
return err;
+ tail = mcp251xfd_get_rx_tail(ring);
- /* chip_rx_head, is the next RX-Object filled by the HW.
- * The new RX head must be >= the old head.
+ /* First shift to full u8. The subtraction works on signed
+ * values, that keeps the difference steady around the u8
+ * overflow. The right shift acts on len, which is an u8.
*/
- new_head = round_down(ring->head, ring->obj_num) + chip_rx_head;
- if (new_head <= ring->head)
- new_head += ring->obj_num;
+ BUILD_BUG_ON(sizeof(ring->obj_num) != sizeof(chip_head));
+ BUILD_BUG_ON(sizeof(ring->obj_num) != sizeof(tail));
+ BUILD_BUG_ON(sizeof(ring->obj_num) != sizeof(len));
- ring->head = new_head;
+ len = (chip_head << shift) - (tail << shift);
+ *len_p = len >> shift;
- return mcp251xfd_check_rx_tail(priv, ring);
+ return 0;
}
static void
@@ -148,8 +159,6 @@ mcp251xfd_hw_rx_obj_to_skb(const struct mcp251xfd_priv *priv,
if (!(hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_RTR))
memcpy(cfd->data, hw_rx_obj->data, cfd->len);
-
- mcp251xfd_skb_set_timestamp(priv, skb, hw_rx_obj->ts);
}
static int
@@ -160,8 +169,26 @@ mcp251xfd_handle_rxif_one(struct mcp251xfd_priv *priv,
struct net_device_stats *stats = &priv->ndev->stats;
struct sk_buff *skb;
struct canfd_frame *cfd;
+ u64 timestamp;
int err;
+ /* According to mcp2518fd erratum DS80000789E 6. the FIFOCI
+ * bits of a FIFOSTA register, here the RX FIFO head index
+ * might be corrupted and we might process past the RX FIFO's
+ * head into old CAN frames.
+ *
+ * Compare the timestamp of currently processed CAN frame with
+ * last valid frame received. Abort with -EBADMSG if an old
+ * CAN frame is detected.
+ */
+ timestamp = timecounter_cyc2time(&priv->tc, hw_rx_obj->ts);
+ if (timestamp <= ring->last_valid) {
+ stats->rx_fifo_errors++;
+
+ return -EBADMSG;
+ }
+ ring->last_valid = timestamp;
+
if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_FDF)
skb = alloc_canfd_skb(priv->ndev, &cfd);
else
@@ -172,6 +199,7 @@ mcp251xfd_handle_rxif_one(struct mcp251xfd_priv *priv,
return 0;
}
+ mcp251xfd_skb_set_timestamp(skb, timestamp);
mcp251xfd_hw_rx_obj_to_skb(priv, hw_rx_obj, skb);
err = can_rx_offload_queue_timestamp(&priv->offload, skb, hw_rx_obj->ts);
if (err)
@@ -198,51 +226,80 @@ mcp251xfd_rx_obj_read(const struct mcp251xfd_priv *priv,
}
static int
+mcp251xfd_handle_rxif_ring_uinc(const struct mcp251xfd_priv *priv,
+ struct mcp251xfd_rx_ring *ring,
+ u8 len)
+{
+ int offset;
+ int err;
+
+ if (!len)
+ return 0;
+
+ ring->head += len;
+
+ /* Increment the RX FIFO tail pointer 'len' times in a
+ * single SPI message.
+ *
+ * Note:
+ * Calculate offset, so that the SPI transfer ends on
+ * the last message of the uinc_xfer array, which has
+ * "cs_change == 0", to properly deactivate the chip
+ * select.
+ */
+ offset = ARRAY_SIZE(ring->uinc_xfer) - len;
+ err = spi_sync_transfer(priv->spi,
+ ring->uinc_xfer + offset, len);
+ if (err)
+ return err;
+
+ ring->tail += len;
+
+ return 0;
+}
+
+static int
mcp251xfd_handle_rxif_ring(struct mcp251xfd_priv *priv,
struct mcp251xfd_rx_ring *ring)
{
struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj = ring->obj;
- u8 rx_tail, len;
+ u8 rx_tail, len, l;
int err, i;
- err = mcp251xfd_rx_ring_update(priv, ring);
+ err = mcp251xfd_get_rx_len(priv, ring, &len);
if (err)
return err;
- while ((len = mcp251xfd_get_rx_linear_len(ring))) {
- int offset;
-
+ while ((l = mcp251xfd_get_rx_linear_len(ring, len))) {
rx_tail = mcp251xfd_get_rx_tail(ring);
err = mcp251xfd_rx_obj_read(priv, ring, hw_rx_obj,
- rx_tail, len);
+ rx_tail, l);
if (err)
return err;
- for (i = 0; i < len; i++) {
+ for (i = 0; i < l; i++) {
err = mcp251xfd_handle_rxif_one(priv, ring,
(void *)hw_rx_obj +
i * ring->obj_size);
- if (err)
+
+ /* -EBADMSG means we're affected by mcp2518fd
+ * erratum DS80000789E 6., i.e. the timestamp
+ * in the RX object is older that the last
+ * valid received CAN frame. Don't process any
+ * further and mark processed frames as good.
+ */
+ if (err == -EBADMSG)
+ return mcp251xfd_handle_rxif_ring_uinc(priv, ring, i);
+ else if (err)
return err;
}
- /* Increment the RX FIFO tail pointer 'len' times in a
- * single SPI message.
- *
- * Note:
- * Calculate offset, so that the SPI transfer ends on
- * the last message of the uinc_xfer array, which has
- * "cs_change == 0", to properly deactivate the chip
- * select.
- */
- offset = ARRAY_SIZE(ring->uinc_xfer) - len;
- err = spi_sync_transfer(priv->spi,
- ring->uinc_xfer + offset, len);
+ err = mcp251xfd_handle_rxif_ring_uinc(priv, ring, l);
if (err)
return err;
- ring->tail += len;
+ len -= l;
}
return 0;
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c
index e5bd57b65aaf..f732556d233a 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c
@@ -2,7 +2,7 @@
//
// mcp251xfd - Microchip MCP251xFD Family CAN controller driver
//
-// Copyright (c) 2019, 2020, 2021 Pengutronix,
+// Copyright (c) 2019, 2020, 2021, 2023 Pengutronix,
// Marc Kleine-Budde <kernel@pengutronix.de>
//
// Based on:
@@ -16,6 +16,11 @@
#include "mcp251xfd.h"
+static inline bool mcp251xfd_tx_fifo_sta_full(u32 fifo_sta)
+{
+ return !(fifo_sta & MCP251XFD_REG_FIFOSTA_TFNRFNIF);
+}
+
static inline int
mcp251xfd_tef_tail_get_from_chip(const struct mcp251xfd_priv *priv,
u8 *tef_tail)
@@ -56,60 +61,43 @@ static int mcp251xfd_check_tef_tail(const struct mcp251xfd_priv *priv)
}
static int
-mcp251xfd_handle_tefif_recover(const struct mcp251xfd_priv *priv, const u32 seq)
-{
- const struct mcp251xfd_tx_ring *tx_ring = priv->tx;
- u32 tef_sta;
- int err;
-
- err = regmap_read(priv->map_reg, MCP251XFD_REG_TEFSTA, &tef_sta);
- if (err)
- return err;
-
- if (tef_sta & MCP251XFD_REG_TEFSTA_TEFOVIF) {
- netdev_err(priv->ndev,
- "Transmit Event FIFO buffer overflow.\n");
- return -ENOBUFS;
- }
-
- netdev_info(priv->ndev,
- "Transmit Event FIFO buffer %s. (seq=0x%08x, tef_tail=0x%08x, tef_head=0x%08x, tx_head=0x%08x).\n",
- tef_sta & MCP251XFD_REG_TEFSTA_TEFFIF ?
- "full" : tef_sta & MCP251XFD_REG_TEFSTA_TEFNEIF ?
- "not empty" : "empty",
- seq, priv->tef->tail, priv->tef->head, tx_ring->head);
-
- /* The Sequence Number in the TEF doesn't match our tef_tail. */
- return -EAGAIN;
-}
-
-static int
mcp251xfd_handle_tefif_one(struct mcp251xfd_priv *priv,
const struct mcp251xfd_hw_tef_obj *hw_tef_obj,
unsigned int *frame_len_ptr)
{
struct net_device_stats *stats = &priv->ndev->stats;
+ u32 seq, tef_tail_masked, tef_tail;
struct sk_buff *skb;
- u32 seq, seq_masked, tef_tail_masked, tef_tail;
- seq = FIELD_GET(MCP251XFD_OBJ_FLAGS_SEQ_MCP2518FD_MASK,
+ /* Use the MCP2517FD mask on the MCP2518FD, too. We only
+ * compare 7 bits, this is enough to detect old TEF objects.
+ */
+ seq = FIELD_GET(MCP251XFD_OBJ_FLAGS_SEQ_MCP2517FD_MASK,
hw_tef_obj->flags);
-
- /* Use the MCP2517FD mask on the MCP2518FD, too. We only
- * compare 7 bits, this should be enough to detect
- * net-yet-completed, i.e. old TEF objects.
- */
- seq_masked = seq &
- field_mask(MCP251XFD_OBJ_FLAGS_SEQ_MCP2517FD_MASK);
tef_tail_masked = priv->tef->tail &
field_mask(MCP251XFD_OBJ_FLAGS_SEQ_MCP2517FD_MASK);
- if (seq_masked != tef_tail_masked)
- return mcp251xfd_handle_tefif_recover(priv, seq);
+
+ /* According to mcp2518fd erratum DS80000789E 6. the FIFOCI
+ * bits of a FIFOSTA register, here the TX FIFO tail index
+ * might be corrupted and we might process past the TEF FIFO's
+ * head into old CAN frames.
+ *
+ * Compare the sequence number of the currently processed CAN
+ * frame with the expected sequence number. Abort with
+ * -EBADMSG if an old CAN frame is detected.
+ */
+ if (seq != tef_tail_masked) {
+ netdev_dbg(priv->ndev, "%s: chip=0x%02x ring=0x%02x\n", __func__,
+ seq, tef_tail_masked);
+ stats->tx_fifo_errors++;
+
+ return -EBADMSG;
+ }
tef_tail = mcp251xfd_get_tef_tail(priv);
skb = priv->can.echo_skb[tef_tail];
if (skb)
- mcp251xfd_skb_set_timestamp(priv, skb, hw_tef_obj->ts);
+ mcp251xfd_skb_set_timestamp_raw(priv, skb, hw_tef_obj->ts);
stats->tx_bytes +=
can_rx_offload_get_echo_skb_queue_timestamp(&priv->offload,
tef_tail, hw_tef_obj->ts,
@@ -120,28 +108,44 @@ mcp251xfd_handle_tefif_one(struct mcp251xfd_priv *priv,
return 0;
}
-static int mcp251xfd_tef_ring_update(struct mcp251xfd_priv *priv)
+static int
+mcp251xfd_get_tef_len(struct mcp251xfd_priv *priv, u8 *len_p)
{
const struct mcp251xfd_tx_ring *tx_ring = priv->tx;
- unsigned int new_head;
- u8 chip_tx_tail;
+ const u8 shift = tx_ring->obj_num_shift_to_u8;
+ u8 chip_tx_tail, tail, len;
+ u32 fifo_sta;
int err;
- err = mcp251xfd_tx_tail_get_from_chip(priv, &chip_tx_tail);
+ err = regmap_read(priv->map_reg, MCP251XFD_REG_FIFOSTA(priv->tx->fifo_nr),
+ &fifo_sta);
if (err)
return err;
- /* chip_tx_tail, is the next TX-Object send by the HW.
- * The new TEF head must be >= the old head, ...
+ if (mcp251xfd_tx_fifo_sta_full(fifo_sta)) {
+ *len_p = tx_ring->obj_num;
+ return 0;
+ }
+
+ chip_tx_tail = FIELD_GET(MCP251XFD_REG_FIFOSTA_FIFOCI_MASK, fifo_sta);
+
+ err = mcp251xfd_check_tef_tail(priv);
+ if (err)
+ return err;
+ tail = mcp251xfd_get_tef_tail(priv);
+
+ /* First shift to full u8. The subtraction works on signed
+ * values, that keeps the difference steady around the u8
+ * overflow. The right shift acts on len, which is an u8.
*/
- new_head = round_down(priv->tef->head, tx_ring->obj_num) + chip_tx_tail;
- if (new_head <= priv->tef->head)
- new_head += tx_ring->obj_num;
+ BUILD_BUG_ON(sizeof(tx_ring->obj_num) != sizeof(chip_tx_tail));
+ BUILD_BUG_ON(sizeof(tx_ring->obj_num) != sizeof(tail));
+ BUILD_BUG_ON(sizeof(tx_ring->obj_num) != sizeof(len));
- /* ... but it cannot exceed the TX head. */
- priv->tef->head = min(new_head, tx_ring->head);
+ len = (chip_tx_tail << shift) - (tail << shift);
+ *len_p = len >> shift;
- return mcp251xfd_check_tef_tail(priv);
+ return 0;
}
static inline int
@@ -182,13 +186,12 @@ int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv)
u8 tef_tail, len, l;
int err, i;
- err = mcp251xfd_tef_ring_update(priv);
+ err = mcp251xfd_get_tef_len(priv, &len);
if (err)
return err;
tef_tail = mcp251xfd_get_tef_tail(priv);
- len = mcp251xfd_get_tef_len(priv);
- l = mcp251xfd_get_tef_linear_len(priv);
+ l = mcp251xfd_get_tef_linear_len(priv, len);
err = mcp251xfd_tef_obj_read(priv, hw_tef_obj, tef_tail, l);
if (err)
return err;
@@ -203,12 +206,12 @@ int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv)
unsigned int frame_len = 0;
err = mcp251xfd_handle_tefif_one(priv, &hw_tef_obj[i], &frame_len);
- /* -EAGAIN means the Sequence Number in the TEF
- * doesn't match our tef_tail. This can happen if we
- * read the TEF objects too early. Leave loop let the
- * interrupt handler call us again.
+ /* -EBADMSG means we're affected by mcp2518fd erratum
+ * DS80000789E 6., i.e. the Sequence Number in the TEF
+ * doesn't match our tef_tail. Don't process any
+ * further and mark processed frames as good.
*/
- if (err == -EAGAIN)
+ if (err == -EBADMSG)
goto out_netif_wake_queue;
if (err)
return err;
@@ -216,13 +219,15 @@ int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv)
total_frame_len += frame_len;
}
- out_netif_wake_queue:
+out_netif_wake_queue:
len = i; /* number of handled goods TEFs */
if (len) {
struct mcp251xfd_tef_ring *ring = priv->tef;
struct mcp251xfd_tx_ring *tx_ring = priv->tx;
int offset;
+ ring->head += len;
+
/* Increment the TEF FIFO tail pointer 'len' times in
* a single SPI message.
*
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-timestamp.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-timestamp.c
index 712e09186987..202ca0d24d03 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-timestamp.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-timestamp.c
@@ -2,7 +2,7 @@
//
// mcp251xfd - Microchip MCP251xFD Family CAN controller driver
//
-// Copyright (c) 2021 Pengutronix,
+// Copyright (c) 2021, 2023 Pengutronix,
// Marc Kleine-Budde <kernel@pengutronix.de>
//
@@ -11,20 +11,20 @@
#include "mcp251xfd.h"
-static u64 mcp251xfd_timestamp_read(const struct cyclecounter *cc)
+static u64 mcp251xfd_timestamp_raw_read(const struct cyclecounter *cc)
{
const struct mcp251xfd_priv *priv;
- u32 timestamp = 0;
+ u32 ts_raw = 0;
int err;
priv = container_of(cc, struct mcp251xfd_priv, cc);
- err = mcp251xfd_get_timestamp(priv, &timestamp);
+ err = mcp251xfd_get_timestamp_raw(priv, &ts_raw);
if (err)
netdev_err(priv->ndev,
"Error %d while reading timestamp. HW timestamps may be inaccurate.",
err);
- return timestamp;
+ return ts_raw;
}
static void mcp251xfd_timestamp_work(struct work_struct *work)
@@ -39,28 +39,21 @@ static void mcp251xfd_timestamp_work(struct work_struct *work)
MCP251XFD_TIMESTAMP_WORK_DELAY_SEC * HZ);
}
-void mcp251xfd_skb_set_timestamp(const struct mcp251xfd_priv *priv,
- struct sk_buff *skb, u32 timestamp)
-{
- struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb);
- u64 ns;
-
- ns = timecounter_cyc2time(&priv->tc, timestamp);
- hwtstamps->hwtstamp = ns_to_ktime(ns);
-}
-
void mcp251xfd_timestamp_init(struct mcp251xfd_priv *priv)
{
struct cyclecounter *cc = &priv->cc;
- cc->read = mcp251xfd_timestamp_read;
+ cc->read = mcp251xfd_timestamp_raw_read;
cc->mask = CYCLECOUNTER_MASK(32);
cc->shift = 1;
cc->mult = clocksource_hz2mult(priv->can.clock.freq, cc->shift);
- timecounter_init(&priv->tc, &priv->cc, ktime_get_real_ns());
-
INIT_DELAYED_WORK(&priv->timestamp, mcp251xfd_timestamp_work);
+}
+
+void mcp251xfd_timestamp_start(struct mcp251xfd_priv *priv)
+{
+ timecounter_init(&priv->tc, &priv->cc, ktime_get_real_ns());
schedule_delayed_work(&priv->timestamp,
MCP251XFD_TIMESTAMP_WORK_DELAY_SEC * HZ);
}
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-tx.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tx.c
index 160528d3cc26..b1de8052a45c 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-tx.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tx.c
@@ -131,6 +131,39 @@ mcp251xfd_tx_obj_from_skb(const struct mcp251xfd_priv *priv,
tx_obj->xfer[0].len = len;
}
+static void mcp251xfd_tx_failure_drop(const struct mcp251xfd_priv *priv,
+ struct mcp251xfd_tx_ring *tx_ring,
+ int err)
+{
+ struct net_device *ndev = priv->ndev;
+ struct net_device_stats *stats = &ndev->stats;
+ unsigned int frame_len = 0;
+ u8 tx_head;
+
+ tx_ring->head--;
+ stats->tx_dropped++;
+ tx_head = mcp251xfd_get_tx_head(tx_ring);
+ can_free_echo_skb(ndev, tx_head, &frame_len);
+ netdev_completed_queue(ndev, 1, frame_len);
+ netif_wake_queue(ndev);
+
+ if (net_ratelimit())
+ netdev_err(priv->ndev, "ERROR in %s: %d\n", __func__, err);
+}
+
+void mcp251xfd_tx_obj_write_sync(struct work_struct *work)
+{
+ struct mcp251xfd_priv *priv = container_of(work, struct mcp251xfd_priv,
+ tx_work);
+ struct mcp251xfd_tx_obj *tx_obj = priv->tx_work_obj;
+ struct mcp251xfd_tx_ring *tx_ring = priv->tx;
+ int err;
+
+ err = spi_sync(priv->spi, &tx_obj->msg);
+ if (err)
+ mcp251xfd_tx_failure_drop(priv, tx_ring, err);
+}
+
static int mcp251xfd_tx_obj_write(const struct mcp251xfd_priv *priv,
struct mcp251xfd_tx_obj *tx_obj)
{
@@ -162,6 +195,11 @@ static bool mcp251xfd_tx_busy(const struct mcp251xfd_priv *priv,
return false;
}
+static bool mcp251xfd_work_busy(struct work_struct *work)
+{
+ return work_busy(work);
+}
+
netdev_tx_t mcp251xfd_start_xmit(struct sk_buff *skb,
struct net_device *ndev)
{
@@ -175,7 +213,8 @@ netdev_tx_t mcp251xfd_start_xmit(struct sk_buff *skb,
if (can_dev_dropped_skb(ndev, skb))
return NETDEV_TX_OK;
- if (mcp251xfd_tx_busy(priv, tx_ring))
+ if (mcp251xfd_tx_busy(priv, tx_ring) ||
+ mcp251xfd_work_busy(&priv->tx_work))
return NETDEV_TX_BUSY;
tx_obj = mcp251xfd_get_tx_obj_next(tx_ring);
@@ -193,13 +232,13 @@ netdev_tx_t mcp251xfd_start_xmit(struct sk_buff *skb,
netdev_sent_queue(priv->ndev, frame_len);
err = mcp251xfd_tx_obj_write(priv, tx_obj);
- if (err)
- goto out_err;
-
- return NETDEV_TX_OK;
-
- out_err:
- netdev_err(priv->ndev, "ERROR in %s: %d\n", __func__, err);
+ if (err == -EBUSY) {
+ netif_stop_queue(ndev);
+ priv->tx_work_obj = tx_obj;
+ queue_work(priv->wq, &priv->tx_work);
+ } else if (err) {
+ mcp251xfd_tx_failure_drop(priv, tx_ring, err);
+ }
return NETDEV_TX_OK;
}
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
index 24510b3b8020..dcbbd2b2fae8 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
@@ -2,7 +2,7 @@
*
* mcp251xfd - Microchip MCP251xFD Family CAN controller driver
*
- * Copyright (c) 2019, 2020, 2021 Pengutronix,
+ * Copyright (c) 2019, 2020, 2021, 2023 Pengutronix,
* Marc Kleine-Budde <kernel@pengutronix.de>
* Copyright (c) 2019 Martin Sperl <kernel@martin.sperl.org>
*/
@@ -524,6 +524,7 @@ struct mcp251xfd_tef_ring {
/* u8 obj_num equals tx_ring->obj_num */
/* u8 obj_size equals sizeof(struct mcp251xfd_hw_tef_obj) */
+ /* u8 obj_num_shift_to_u8 equals tx_ring->obj_num_shift_to_u8 */
union mcp251xfd_write_reg_buf irq_enable_buf;
struct spi_transfer irq_enable_xfer;
@@ -542,6 +543,7 @@ struct mcp251xfd_tx_ring {
u8 nr;
u8 fifo_nr;
u8 obj_num;
+ u8 obj_num_shift_to_u8;
u8 obj_size;
struct mcp251xfd_tx_obj obj[MCP251XFD_TX_OBJ_NUM_MAX];
@@ -552,10 +554,14 @@ struct mcp251xfd_rx_ring {
unsigned int head;
unsigned int tail;
+ /* timestamp of the last valid received CAN frame */
+ u64 last_valid;
+
u16 base;
u8 nr;
u8 fifo_nr;
u8 obj_num;
+ u8 obj_num_shift_to_u8;
u8 obj_size;
union mcp251xfd_write_reg_buf irq_enable_buf;
@@ -633,6 +639,10 @@ struct mcp251xfd_priv {
struct mcp251xfd_rx_ring *rx[MCP251XFD_FIFO_RX_NUM];
struct mcp251xfd_tx_ring tx[MCP251XFD_FIFO_TX_NUM];
+ struct workqueue_struct *wq;
+ struct work_struct tx_work;
+ struct mcp251xfd_tx_obj *tx_work_obj;
+
DECLARE_BITMAP(flags, __MCP251XFD_FLAGS_SIZE__);
u8 rx_ring_num;
@@ -805,10 +815,27 @@ mcp251xfd_spi_cmd_write(const struct mcp251xfd_priv *priv,
return data;
}
-static inline int mcp251xfd_get_timestamp(const struct mcp251xfd_priv *priv,
- u32 *timestamp)
+static inline int mcp251xfd_get_timestamp_raw(const struct mcp251xfd_priv *priv,
+ u32 *ts_raw)
+{
+ return regmap_read(priv->map_reg, MCP251XFD_REG_TBC, ts_raw);
+}
+
+static inline void mcp251xfd_skb_set_timestamp(struct sk_buff *skb, u64 ns)
{
- return regmap_read(priv->map_reg, MCP251XFD_REG_TBC, timestamp);
+ struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb);
+
+ hwtstamps->hwtstamp = ns_to_ktime(ns);
+}
+
+static inline
+void mcp251xfd_skb_set_timestamp_raw(const struct mcp251xfd_priv *priv,
+ struct sk_buff *skb, u32 ts_raw)
+{
+ u64 ns;
+
+ ns = timecounter_cyc2time(&priv->tc, ts_raw);
+ mcp251xfd_skb_set_timestamp(skb, ns);
}
static inline u16 mcp251xfd_get_tef_obj_addr(u8 n)
@@ -857,17 +884,8 @@ static inline u8 mcp251xfd_get_tef_tail(const struct mcp251xfd_priv *priv)
return priv->tef->tail & (priv->tx->obj_num - 1);
}
-static inline u8 mcp251xfd_get_tef_len(const struct mcp251xfd_priv *priv)
-{
- return priv->tef->head - priv->tef->tail;
-}
-
-static inline u8 mcp251xfd_get_tef_linear_len(const struct mcp251xfd_priv *priv)
+static inline u8 mcp251xfd_get_tef_linear_len(const struct mcp251xfd_priv *priv, u8 len)
{
- u8 len;
-
- len = mcp251xfd_get_tef_len(priv);
-
return min_t(u8, len, priv->tx->obj_num - mcp251xfd_get_tef_tail(priv));
}
@@ -910,18 +928,9 @@ static inline u8 mcp251xfd_get_rx_tail(const struct mcp251xfd_rx_ring *ring)
return ring->tail & (ring->obj_num - 1);
}
-static inline u8 mcp251xfd_get_rx_len(const struct mcp251xfd_rx_ring *ring)
-{
- return ring->head - ring->tail;
-}
-
static inline u8
-mcp251xfd_get_rx_linear_len(const struct mcp251xfd_rx_ring *ring)
+mcp251xfd_get_rx_linear_len(const struct mcp251xfd_rx_ring *ring, u8 len)
{
- u8 len;
-
- len = mcp251xfd_get_rx_len(ring);
-
return min_t(u8, len, ring->obj_num - mcp251xfd_get_rx_tail(ring));
}
@@ -947,11 +956,11 @@ void mcp251xfd_ring_free(struct mcp251xfd_priv *priv);
int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv);
int mcp251xfd_handle_rxif(struct mcp251xfd_priv *priv);
int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv);
-void mcp251xfd_skb_set_timestamp(const struct mcp251xfd_priv *priv,
- struct sk_buff *skb, u32 timestamp);
void mcp251xfd_timestamp_init(struct mcp251xfd_priv *priv);
+void mcp251xfd_timestamp_start(struct mcp251xfd_priv *priv);
void mcp251xfd_timestamp_stop(struct mcp251xfd_priv *priv);
+void mcp251xfd_tx_obj_write_sync(struct work_struct *work);
netdev_tx_t mcp251xfd_start_xmit(struct sk_buff *skb,
struct net_device *ndev);
diff --git a/drivers/net/can/usb/Kconfig b/drivers/net/can/usb/Kconfig
index bd58c636d465..3e1fba12c0c3 100644
--- a/drivers/net/can/usb/Kconfig
+++ b/drivers/net/can/usb/Kconfig
@@ -91,6 +91,7 @@ config CAN_KVASER_USB
- Kvaser Leaf Light R v2
- Kvaser Mini PCI Express HS
- Kvaser Mini PCI Express 2xHS
+ - Kvaser Mini PCIe 1xCAN
- Kvaser USBcan Light 2xHS
- Kvaser USBcan II HS/HS
- Kvaser USBcan II HS/LS
@@ -111,12 +112,14 @@ config CAN_KVASER_USB
- Kvaser USBcan Light 4xHS
- Kvaser USBcan Pro 2xHS v2
- Kvaser USBcan Pro 4xHS
+ - Kvaser USBcan Pro 5xCAN
- Kvaser USBcan Pro 5xHS
- Kvaser U100
- Kvaser U100P
- Kvaser U100S
- ATI Memorator Pro 2xHS v2
- ATI USBcan Pro 2xHS v2
+ - Vining 800
If unsure, say N.
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index 65c962f76898..bc86e9b329fd 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -40,6 +40,9 @@
#define USB_ABE_CANDEBUGGER_FD_VENDOR_ID 0x16d0
#define USB_ABE_CANDEBUGGER_FD_PRODUCT_ID 0x10b8
+#define USB_XYLANTA_SAINT3_VENDOR_ID 0x16d0
+#define USB_XYLANTA_SAINT3_PRODUCT_ID 0x0f30
+
#define GS_USB_ENDPOINT_IN 1
#define GS_USB_ENDPOINT_OUT 2
@@ -1145,7 +1148,7 @@ static int gs_usb_set_phys_id(struct net_device *netdev,
}
static int gs_usb_get_ts_info(struct net_device *netdev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct gs_can *dev = netdev_priv(netdev);
@@ -1530,6 +1533,8 @@ static const struct usb_device_id gs_usb_table[] = {
USB_CES_CANEXT_FD_PRODUCT_ID, 0) },
{ USB_DEVICE_INTERFACE_NUMBER(USB_ABE_CANDEBUGGER_FD_VENDOR_ID,
USB_ABE_CANDEBUGGER_FD_PRODUCT_ID, 0) },
+ { USB_DEVICE_INTERFACE_NUMBER(USB_XYLANTA_SAINT3_VENDOR_ID,
+ USB_XYLANTA_SAINT3_PRODUCT_ID, 0) },
{} /* Terminating entry */
};
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
index 8faf8a462c05..daa34b532aa8 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
@@ -89,6 +89,9 @@
#define USB_HYBRID_CANLIN_PRODUCT_ID 0x0115
#define USB_HYBRID_PRO_CANLIN_PRODUCT_ID 0x0116
#define USB_LEAF_V3_PRODUCT_ID 0x0117
+#define USB_VINING_800_PRODUCT_ID 0x0119
+#define USB_USBCAN_PRO_5XCAN_PRODUCT_ID 0x011A
+#define USB_MINI_PCIE_1XCAN_PRODUCT_ID 0x011B
static const struct kvaser_usb_driver_info kvaser_usb_driver_info_hydra = {
.quirks = KVASER_USB_QUIRK_HAS_HARDWARE_TIMESTAMP,
@@ -125,6 +128,7 @@ static const struct kvaser_usb_driver_info kvaser_usb_driver_info_leaf_err_liste
static const struct kvaser_usb_driver_info kvaser_usb_driver_info_leafimx = {
.quirks = 0,
+ .family = KVASER_LEAF,
.ops = &kvaser_usb_leaf_dev_ops,
};
@@ -238,6 +242,12 @@ static const struct usb_device_id kvaser_usb_table[] = {
.driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_V3_PRODUCT_ID),
.driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_VINING_800_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_PRO_5XCAN_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_1XCAN_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
{ }
};
MODULE_DEVICE_TABLE(usb, kvaser_usb_table);
@@ -294,7 +304,7 @@ int kvaser_usb_send_cmd_async(struct kvaser_usb_net_priv *priv, void *cmd,
}
usb_free_urb(urb);
- return 0;
+ return err;
}
int kvaser_usb_can_rx_over_error(struct net_device *netdev)
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index 1efa39e134f4..3d68fef46ded 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -897,7 +897,7 @@ int peak_usb_set_eeprom(struct net_device *netdev,
return 0;
}
-int pcan_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
+int pcan_get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info *info)
{
info->so_timestamping =
SOF_TIMESTAMPING_TX_SOFTWARE |
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.h b/drivers/net/can/usb/peak_usb/pcan_usb_core.h
index f6cf84bb718f..abab00930b9d 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.h
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.h
@@ -145,7 +145,7 @@ void peak_usb_get_ts_time(struct peak_time_ref *time_ref, u32 ts, ktime_t *tv);
int peak_usb_netif_rx_64(struct sk_buff *skb, u32 ts_low, u32 ts_high);
void peak_usb_async_complete(struct urb *urb);
void peak_usb_restart_complete(struct peak_usb_device *dev);
-int pcan_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info);
+int pcan_get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info *info);
/* common 32-bit CAN channel ID ethtool management */
int peak_usb_get_eeprom_len(struct net_device *netdev);
diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
index fae0120473f8..d944911d7f05 100644
--- a/drivers/net/can/xilinx_can.c
+++ b/drivers/net/can/xilinx_can.c
@@ -6,7 +6,7 @@
* Copyright (C) 2017 - 2018 Sandvik Mining and Construction Oy
*
* Description:
- * This driver is developed for Axi CAN IP and for Zynq CANPS Controller.
+ * This driver is developed for AXI CAN IP, AXI CANFD IP, CANPS and CANFD PS Controller.
*/
#include <linux/bitfield.h>
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index 3092b391031a..2d10b4d6cfbb 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -102,6 +102,7 @@ config NET_DSA_SMSC_LAN9303
tristate
select NET_DSA_TAG_LAN9303
select REGMAP
+ imply SMSC_PHY
help
This enables support for the Microchip LAN9303/LAN9354 3 port ethernet
switch chips.
@@ -126,7 +127,7 @@ config NET_DSA_SMSC_LAN9303_MDIO
config NET_DSA_VITESSE_VSC73XX
tristate
- select NET_DSA_TAG_NONE
+ select NET_DSA_TAG_VSC73XX_8021Q
select FIXED_PHY
select VITESSE_PHY
select GPIOLIB
diff --git a/drivers/net/dsa/hirschmann/hellcreek.h b/drivers/net/dsa/hirschmann/hellcreek.h
index 6874cb9dc361..9c2ed2ba79da 100644
--- a/drivers/net/dsa/hirschmann/hellcreek.h
+++ b/drivers/net/dsa/hirschmann/hellcreek.h
@@ -12,14 +12,16 @@
#include <linux/bitmap.h>
#include <linux/bitops.h>
+#include <linux/container_of.h>
#include <linux/device.h>
-#include <linux/kernel.h>
-#include <linux/mutex.h>
-#include <linux/workqueue.h>
#include <linux/leds.h>
+#include <linux/mutex.h>
#include <linux/platform_data/hirschmann-hellcreek.h>
#include <linux/ptp_clock_kernel.h>
#include <linux/timecounter.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
#include <net/dsa.h>
#include <net/pkt_sched.h>
diff --git a/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c b/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c
index bd7aacc71a63..ca2500aba96f 100644
--- a/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c
+++ b/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c
@@ -16,7 +16,7 @@
#include "hellcreek_ptp.h"
int hellcreek_get_ts_info(struct dsa_switch *ds, int port,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct hellcreek *hellcreek = ds->priv;
diff --git a/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.h b/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.h
index 71af77efb28b..7d88da2134f2 100644
--- a/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.h
+++ b/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.h
@@ -48,7 +48,7 @@ void hellcreek_port_txtstamp(struct dsa_switch *ds, int port,
struct sk_buff *skb);
int hellcreek_get_ts_info(struct dsa_switch *ds, int port,
- struct ethtool_ts_info *info);
+ struct kernel_ethtool_ts_info *info);
long hellcreek_hwtstamp_work(struct ptp_clock_info *ptp);
diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c
index 02f07b870f10..268949939636 100644
--- a/drivers/net/dsa/lan9303-core.c
+++ b/drivers/net/dsa/lan9303-core.c
@@ -1047,31 +1047,31 @@ static int lan9303_get_sset_count(struct dsa_switch *ds, int port, int sset)
return ARRAY_SIZE(lan9303_mib);
}
-static int lan9303_phy_read(struct dsa_switch *ds, int phy, int regnum)
+static int lan9303_phy_read(struct dsa_switch *ds, int port, int regnum)
{
struct lan9303 *chip = ds->priv;
int phy_base = chip->phy_addr_base;
- if (phy == phy_base)
+ if (port == 0)
return lan9303_virt_phy_reg_read(chip, regnum);
- if (phy > phy_base + 2)
+ if (port > 2)
return -ENODEV;
- return chip->ops->phy_read(chip, phy, regnum);
+ return chip->ops->phy_read(chip, phy_base + port, regnum);
}
-static int lan9303_phy_write(struct dsa_switch *ds, int phy, int regnum,
+static int lan9303_phy_write(struct dsa_switch *ds, int port, int regnum,
u16 val)
{
struct lan9303 *chip = ds->priv;
int phy_base = chip->phy_addr_base;
- if (phy == phy_base)
+ if (port == 0)
return lan9303_virt_phy_reg_write(chip, regnum, val);
- if (phy > phy_base + 2)
+ if (port > 2)
return -ENODEV;
- return chip->ops->phy_write(chip, phy, regnum, val);
+ return chip->ops->phy_write(chip, phy_base + port, regnum, val);
}
static int lan9303_port_enable(struct dsa_switch *ds, int port,
@@ -1099,7 +1099,7 @@ static void lan9303_port_disable(struct dsa_switch *ds, int port)
vlan_vid_del(dsa_port_to_conduit(dp), htons(ETH_P_8021Q), port);
lan9303_disable_processing_port(chip, port);
- lan9303_phy_write(ds, chip->phy_addr_base + port, MII_BMCR, BMCR_PDOWN);
+ lan9303_phy_write(ds, port, MII_BMCR, BMCR_PDOWN);
}
static int lan9303_port_bridge_join(struct dsa_switch *ds, int port,
@@ -1374,8 +1374,6 @@ static const struct dsa_switch_ops lan9303_switch_ops = {
static int lan9303_register_switch(struct lan9303 *chip)
{
- int base;
-
chip->ds = devm_kzalloc(chip->dev, sizeof(*chip->ds), GFP_KERNEL);
if (!chip->ds)
return -ENOMEM;
@@ -1385,8 +1383,7 @@ static int lan9303_register_switch(struct lan9303 *chip)
chip->ds->priv = chip;
chip->ds->ops = &lan9303_switch_ops;
chip->ds->phylink_mac_ops = &lan9303_phylink_mac_ops;
- base = chip->phy_addr_base;
- chip->ds->phys_mii_mask = GENMASK(LAN9303_NUM_PORTS - 1 + base, base);
+ chip->ds->phys_mii_mask = GENMASK(LAN9303_NUM_PORTS - 1, 0);
return dsa_register_switch(chip->ds);
}
diff --git a/drivers/net/dsa/lan9303_i2c.c b/drivers/net/dsa/lan9303_i2c.c
index bbbec322bc4f..c62d27cdc117 100644
--- a/drivers/net/dsa/lan9303_i2c.c
+++ b/drivers/net/dsa/lan9303_i2c.c
@@ -89,7 +89,7 @@ static void lan9303_i2c_shutdown(struct i2c_client *client)
/*-------------------------------------------------------------------------*/
static const struct i2c_device_id lan9303_i2c_id[] = {
- { "lan9303", 0 },
+ { "lan9303" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(i2c, lan9303_i2c_id);
diff --git a/drivers/net/dsa/lan9303_mdio.c b/drivers/net/dsa/lan9303_mdio.c
index 167a86f39f27..0ac4857e5ee8 100644
--- a/drivers/net/dsa/lan9303_mdio.c
+++ b/drivers/net/dsa/lan9303_mdio.c
@@ -58,19 +58,19 @@ static int lan9303_mdio_read(void *ctx, uint32_t reg, uint32_t *val)
return 0;
}
-static int lan9303_mdio_phy_write(struct lan9303 *chip, int phy, int reg,
+static int lan9303_mdio_phy_write(struct lan9303 *chip, int addr, int reg,
u16 val)
{
struct lan9303_mdio *sw_dev = dev_get_drvdata(chip->dev);
- return mdiobus_write_nested(sw_dev->device->bus, phy, reg, val);
+ return mdiobus_write_nested(sw_dev->device->bus, addr, reg, val);
}
-static int lan9303_mdio_phy_read(struct lan9303 *chip, int phy, int reg)
+static int lan9303_mdio_phy_read(struct lan9303 *chip, int addr, int reg)
{
struct lan9303_mdio *sw_dev = dev_get_drvdata(chip->dev);
- return mdiobus_read_nested(sw_dev->device->bus, phy, reg);
+ return mdiobus_read_nested(sw_dev->device->bus, addr, reg);
}
static const struct lan9303_phy_ops lan9303_mdio_phy_ops = {
diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c
index a557049e34f5..fcd4505f4925 100644
--- a/drivers/net/dsa/lantiq_gswip.c
+++ b/drivers/net/dsa/lantiq_gswip.c
@@ -236,7 +236,9 @@
#define GSWIP_TABLE_ACTIVE_VLAN 0x01
#define GSWIP_TABLE_VLAN_MAPPING 0x02
#define GSWIP_TABLE_MAC_BRIDGE 0x0b
-#define GSWIP_TABLE_MAC_BRIDGE_STATIC 0x01 /* Static not, aging entry */
+#define GSWIP_TABLE_MAC_BRIDGE_KEY3_FID GENMASK(5, 0) /* Filtering identifier */
+#define GSWIP_TABLE_MAC_BRIDGE_VAL0_PORT GENMASK(7, 4) /* Port on learned entries */
+#define GSWIP_TABLE_MAC_BRIDGE_VAL1_STATIC BIT(0) /* Static, non-aging entry */
#define XRX200_GPHY_FW_ALIGN (16 * 1024)
@@ -653,14 +655,8 @@ static int gswip_add_single_port_br(struct gswip_priv *priv, int port, bool add)
struct gswip_pce_table_entry vlan_active = {0,};
struct gswip_pce_table_entry vlan_mapping = {0,};
unsigned int cpu_port = priv->hw_info->cpu_port;
- unsigned int max_ports = priv->hw_info->max_ports;
int err;
- if (port >= max_ports) {
- dev_err(priv->dev, "single port for %i supported\n", port);
- return -EIO;
- }
-
vlan_active.index = port + 1;
vlan_active.table = GSWIP_TABLE_ACTIVE_VLAN;
vlan_active.key[0] = 0; /* vid */
@@ -695,13 +691,18 @@ static int gswip_port_enable(struct dsa_switch *ds, int port,
struct gswip_priv *priv = ds->priv;
int err;
- if (!dsa_is_user_port(ds, port))
- return 0;
-
if (!dsa_is_cpu_port(ds, port)) {
+ u32 mdio_phy = 0;
+
err = gswip_add_single_port_br(priv, port, true);
if (err)
return err;
+
+ if (phydev)
+ mdio_phy = phydev->mdio.addr & GSWIP_MDIO_PHY_ADDR_MASK;
+
+ gswip_mdio_mask(priv, GSWIP_MDIO_PHY_ADDR_MASK, mdio_phy,
+ GSWIP_MDIO_PHYp(port));
}
/* RMON Counter Enable for port */
@@ -714,16 +715,6 @@ static int gswip_port_enable(struct dsa_switch *ds, int port,
gswip_switch_mask(priv, 0, GSWIP_SDMA_PCTRL_EN,
GSWIP_SDMA_PCTRLp(port));
- if (!dsa_is_cpu_port(ds, port)) {
- u32 mdio_phy = 0;
-
- if (phydev)
- mdio_phy = phydev->mdio.addr & GSWIP_MDIO_PHY_ADDR_MASK;
-
- gswip_mdio_mask(priv, GSWIP_MDIO_PHY_ADDR_MASK, mdio_phy,
- GSWIP_MDIO_PHYp(port));
- }
-
return 0;
}
@@ -731,9 +722,6 @@ static void gswip_port_disable(struct dsa_switch *ds, int port)
{
struct gswip_priv *priv = ds->priv;
- if (!dsa_is_user_port(ds, port))
- return;
-
gswip_switch_mask(priv, GSWIP_FDMA_PCTRL_EN, 0,
GSWIP_FDMA_PCTRLp(port));
gswip_switch_mask(priv, GSWIP_SDMA_PCTRL_EN, 0,
@@ -792,7 +780,7 @@ static int gswip_port_vlan_filtering(struct dsa_switch *ds, int port,
}
if (vlan_filtering) {
- /* Use port based VLAN tag */
+ /* Use tag based VLAN */
gswip_switch_mask(priv,
GSWIP_PCE_VCTRL_VSR,
GSWIP_PCE_VCTRL_UVR | GSWIP_PCE_VCTRL_VIMR |
@@ -801,7 +789,7 @@ static int gswip_port_vlan_filtering(struct dsa_switch *ds, int port,
gswip_switch_mask(priv, GSWIP_PCE_PCTRL_0_TVM, 0,
GSWIP_PCE_PCTRL_0p(port));
} else {
- /* Use port based VLAN tag */
+ /* Use port based VLAN */
gswip_switch_mask(priv,
GSWIP_PCE_VCTRL_UVR | GSWIP_PCE_VCTRL_VIMR |
GSWIP_PCE_VCTRL_VEMR,
@@ -836,7 +824,7 @@ static int gswip_setup(struct dsa_switch *ds)
err = gswip_pce_load_microcode(priv);
if (err) {
- dev_err(priv->dev, "writing PCE microcode failed, %i", err);
+ dev_err(priv->dev, "writing PCE microcode failed, %i\n", err);
return err;
}
@@ -898,8 +886,6 @@ static int gswip_setup(struct dsa_switch *ds)
ds->mtu_enforcement_ingress = true;
- gswip_port_enable(ds, cpu_port, NULL);
-
ds->configure_vlan_while_not_filtering = false;
return 0;
@@ -1314,10 +1300,11 @@ static void gswip_port_fast_age(struct dsa_switch *ds, int port)
if (!mac_bridge.valid)
continue;
- if (mac_bridge.val[1] & GSWIP_TABLE_MAC_BRIDGE_STATIC)
+ if (mac_bridge.val[1] & GSWIP_TABLE_MAC_BRIDGE_VAL1_STATIC)
continue;
- if (((mac_bridge.val[0] & GENMASK(7, 4)) >> 4) != port)
+ if (port != FIELD_GET(GSWIP_TABLE_MAC_BRIDGE_VAL0_PORT,
+ mac_bridge.val[0]))
continue;
mac_bridge.valid = false;
@@ -1383,7 +1370,8 @@ static int gswip_port_fdb(struct dsa_switch *ds, int port,
}
if (fid == -1) {
- dev_err(priv->dev, "Port not part of a bridge\n");
+ dev_err(priv->dev, "no FID found for bridge %s\n",
+ bridge->name);
return -EINVAL;
}
@@ -1392,9 +1380,9 @@ static int gswip_port_fdb(struct dsa_switch *ds, int port,
mac_bridge.key[0] = addr[5] | (addr[4] << 8);
mac_bridge.key[1] = addr[3] | (addr[2] << 8);
mac_bridge.key[2] = addr[1] | (addr[0] << 8);
- mac_bridge.key[3] = fid;
+ mac_bridge.key[3] = FIELD_PREP(GSWIP_TABLE_MAC_BRIDGE_KEY3_FID, fid);
mac_bridge.val[0] = add ? BIT(port) : 0; /* port map */
- mac_bridge.val[1] = GSWIP_TABLE_MAC_BRIDGE_STATIC;
+ mac_bridge.val[1] = GSWIP_TABLE_MAC_BRIDGE_VAL1_STATIC;
mac_bridge.valid = add;
err = gswip_pce_table_entry_write(priv, &mac_bridge);
@@ -1423,7 +1411,7 @@ static int gswip_port_fdb_dump(struct dsa_switch *ds, int port,
{
struct gswip_priv *priv = ds->priv;
struct gswip_pce_table_entry mac_bridge = {0,};
- unsigned char addr[6];
+ unsigned char addr[ETH_ALEN];
int i;
int err;
@@ -1448,14 +1436,15 @@ static int gswip_port_fdb_dump(struct dsa_switch *ds, int port,
addr[2] = (mac_bridge.key[1] >> 8) & 0xff;
addr[1] = mac_bridge.key[2] & 0xff;
addr[0] = (mac_bridge.key[2] >> 8) & 0xff;
- if (mac_bridge.val[1] & GSWIP_TABLE_MAC_BRIDGE_STATIC) {
+ if (mac_bridge.val[1] & GSWIP_TABLE_MAC_BRIDGE_VAL1_STATIC) {
if (mac_bridge.val[0] & BIT(port)) {
err = cb(addr, 0, true, data);
if (err)
return err;
}
} else {
- if (((mac_bridge.val[0] & GENMASK(7, 4)) >> 4) == port) {
+ if (port == FIELD_GET(GSWIP_TABLE_MAC_BRIDGE_VAL0_PORT,
+ mac_bridge.val[0])) {
err = cb(addr, 0, false, data);
if (err)
return err;
@@ -1474,12 +1463,11 @@ static int gswip_port_max_mtu(struct dsa_switch *ds, int port)
static int gswip_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
{
struct gswip_priv *priv = ds->priv;
- int cpu_port = priv->hw_info->cpu_port;
/* CPU port always has maximum mtu of user ports, so use it to set
* switch frame size, including 8 byte special header.
*/
- if (port == cpu_port) {
+ if (dsa_is_cpu_port(ds, port)) {
new_mtu += 8;
gswip_switch_w(priv, VLAN_ETH_HLEN + new_mtu + ETH_FCS_LEN,
GSWIP_MAC_FLEN);
@@ -1516,6 +1504,7 @@ static void gswip_xrx200_phylink_get_caps(struct dsa_switch *ds, int port,
case 2:
case 3:
case 4:
+ case 6:
__set_bit(PHY_INTERFACE_MODE_INTERNAL,
config->supported_interfaces);
break;
@@ -1547,6 +1536,7 @@ static void gswip_xrx300_phylink_get_caps(struct dsa_switch *ds, int port,
case 2:
case 3:
case 4:
+ case 6:
__set_bit(PHY_INTERFACE_MODE_INTERNAL,
config->supported_interfaces);
break;
@@ -1790,7 +1780,7 @@ static u32 gswip_bcm_ram_entry_read(struct gswip_priv *priv, u32 table,
err = gswip_switch_r_timeout(priv, GSWIP_BM_RAM_CTRL,
GSWIP_BM_RAM_CTRL_BAS);
if (err) {
- dev_err(priv->dev, "timeout while reading table: %u, index: %u",
+ dev_err(priv->dev, "timeout while reading table: %u, index: %u\n",
table, index);
return 0;
}
@@ -1929,11 +1919,9 @@ static int gswip_gphy_fw_load(struct gswip_priv *priv, struct gswip_gphy_fw *gph
msleep(200);
ret = request_firmware(&fw, gphy_fw->fw_name, dev);
- if (ret) {
- dev_err(dev, "failed to load firmware: %s, error: %i\n",
- gphy_fw->fw_name, ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to load firmware: %s\n",
+ gphy_fw->fw_name);
/* GPHY cores need the firmware code in a persistent and contiguous
* memory area with a 16 kB boundary aligned start address.
@@ -1946,9 +1934,9 @@ static int gswip_gphy_fw_load(struct gswip_priv *priv, struct gswip_gphy_fw *gph
dev_addr = ALIGN(dma_addr, XRX200_GPHY_FW_ALIGN);
memcpy(fw_addr, fw->data, fw->size);
} else {
- dev_err(dev, "failed to alloc firmware memory\n");
release_firmware(fw);
- return -ENOMEM;
+ return dev_err_probe(dev, -ENOMEM,
+ "failed to alloc firmware memory\n");
}
release_firmware(fw);
@@ -1975,8 +1963,8 @@ static int gswip_gphy_fw_probe(struct gswip_priv *priv,
gphy_fw->clk_gate = devm_clk_get(dev, gphyname);
if (IS_ERR(gphy_fw->clk_gate)) {
- dev_err(dev, "Failed to lookup gate clock\n");
- return PTR_ERR(gphy_fw->clk_gate);
+ return dev_err_probe(dev, PTR_ERR(gphy_fw->clk_gate),
+ "Failed to lookup gate clock\n");
}
ret = of_property_read_u32(gphy_fw_np, "reg", &gphy_fw->fw_addr_offset);
@@ -1996,8 +1984,8 @@ static int gswip_gphy_fw_probe(struct gswip_priv *priv,
gphy_fw->fw_name = priv->gphy_fw_name_cfg->ge_firmware_name;
break;
default:
- dev_err(dev, "Unknown GPHY mode %d\n", gphy_mode);
- return -EINVAL;
+ return dev_err_probe(dev, -EINVAL, "Unknown GPHY mode %d\n",
+ gphy_mode);
}
gphy_fw->reset = of_reset_control_array_get_exclusive(gphy_fw_np);
@@ -2019,7 +2007,7 @@ static void gswip_gphy_fw_remove(struct gswip_priv *priv,
ret = regmap_write(priv->rcu_regmap, gphy_fw->fw_addr_offset, 0);
if (ret)
- dev_err(priv->dev, "can not reset GPHY FW pointer");
+ dev_err(priv->dev, "can not reset GPHY FW pointer\n");
clk_disable_unprepare(gphy_fw->clk_gate);
@@ -2048,8 +2036,9 @@ static int gswip_gphy_fw_list(struct gswip_priv *priv,
priv->gphy_fw_name_cfg = &xrx200a2x_gphy_data;
break;
default:
- dev_err(dev, "unknown GSWIP version: 0x%x", version);
- return -ENOENT;
+ return dev_err_probe(dev, -ENOENT,
+ "unknown GSWIP version: 0x%x\n",
+ version);
}
}
@@ -2057,10 +2046,9 @@ static int gswip_gphy_fw_list(struct gswip_priv *priv,
if (match && match->data)
priv->gphy_fw_name_cfg = match->data;
- if (!priv->gphy_fw_name_cfg) {
- dev_err(dev, "GPHY compatible type not supported");
- return -ENOENT;
- }
+ if (!priv->gphy_fw_name_cfg)
+ return dev_err_probe(dev, -ENOENT,
+ "GPHY compatible type not supported\n");
priv->num_gphy_fw = of_get_available_child_count(gphy_fw_list_np);
if (!priv->num_gphy_fw)
@@ -2161,8 +2149,8 @@ static int gswip_probe(struct platform_device *pdev)
return -EINVAL;
break;
default:
- dev_err(dev, "unknown GSWIP version: 0x%x", version);
- return -ENOENT;
+ return dev_err_probe(dev, -ENOENT,
+ "unknown GSWIP version: 0x%x\n", version);
}
/* bring up the mdio bus */
@@ -2170,28 +2158,27 @@ static int gswip_probe(struct platform_device *pdev)
if (gphy_fw_np) {
err = gswip_gphy_fw_list(priv, gphy_fw_np, version);
of_node_put(gphy_fw_np);
- if (err) {
- dev_err(dev, "gphy fw probe failed\n");
- return err;
- }
+ if (err)
+ return dev_err_probe(dev, err,
+ "gphy fw probe failed\n");
}
/* bring up the mdio bus */
err = gswip_mdio(priv);
if (err) {
- dev_err(dev, "mdio probe failed\n");
+ dev_err_probe(dev, err, "mdio probe failed\n");
goto gphy_fw_remove;
}
err = dsa_register_switch(priv->ds);
if (err) {
- dev_err(dev, "dsa switch register failed: %i\n", err);
+ dev_err_probe(dev, err, "dsa switch registration failed\n");
goto gphy_fw_remove;
}
if (!dsa_is_cpu_port(priv->ds, priv->hw_info->cpu_port)) {
- dev_err(dev, "wrong CPU port defined, HW only supports port: %i",
- priv->hw_info->cpu_port);
- err = -EINVAL;
+ err = dev_err_probe(dev, -EINVAL,
+ "wrong CPU port defined, HW only supports port: %i\n",
+ priv->hw_info->cpu_port);
goto disable_switch;
}
diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c
index f8ad7833f5d9..425e20daf1e9 100644
--- a/drivers/net/dsa/microchip/ksz9477.c
+++ b/drivers/net/dsa/microchip/ksz9477.c
@@ -355,10 +355,8 @@ int ksz9477_reset_switch(struct ksz_device *dev)
SPI_AUTO_EDGE_DETECTION, 0);
/* default configuration */
- ksz_read8(dev, REG_SW_LUE_CTRL_1, &data8);
- data8 = SW_AGING_ENABLE | SW_LINK_AUTO_AGING |
- SW_SRC_ADDR_FILTER | SW_FLUSH_STP_TABLE | SW_FLUSH_MSTP_TABLE;
- ksz_write8(dev, REG_SW_LUE_CTRL_1, data8);
+ ksz_write8(dev, REG_SW_LUE_CTRL_1,
+ SW_AGING_ENABLE | SW_LINK_AUTO_AGING | SW_SRC_ADDR_FILTER);
/* disable interrupts */
ksz_write32(dev, REG_SW_INT_MASK__4, SWITCH_INT_MASK);
@@ -429,6 +427,57 @@ void ksz9477_freeze_mib(struct ksz_device *dev, int port, bool freeze)
mutex_unlock(&p->mib.cnt_mutex);
}
+int ksz9477_errata_monitor(struct ksz_device *dev, int port,
+ u64 tx_late_col)
+{
+ u32 pmavbc;
+ u8 status;
+ u16 pqm;
+ int ret;
+
+ ret = ksz_pread8(dev, port, REG_PORT_STATUS_0, &status);
+ if (ret)
+ return ret;
+ if (!(FIELD_GET(PORT_INTF_SPEED_MASK, status) == PORT_INTF_SPEED_NONE) &&
+ !(status & PORT_INTF_FULL_DUPLEX)) {
+ /* Errata DS80000754 recommends monitoring potential faults in
+ * half-duplex mode. The switch might not be able to communicate anymore
+ * in these states.
+ * If you see this message, please read the errata-sheet for more information:
+ * https://ww1.microchip.com/downloads/aemDocuments/documents/UNG/ProductDocuments/Errata/KSZ9477S-Errata-DS80000754.pdf
+ * To workaround this issue, half-duplex mode should be avoided.
+ * A software reset could be implemented to recover from this state.
+ */
+ dev_warn_once(dev->dev,
+ "Half-duplex detected on port %d, transmission halt may occur\n",
+ port);
+ if (tx_late_col != 0) {
+ /* Transmission halt with late collisions */
+ dev_crit_once(dev->dev,
+ "TX late collisions detected, transmission may be halted on port %d\n",
+ port);
+ }
+ ret = ksz_read8(dev, REG_SW_LUE_CTRL_0, &status);
+ if (ret)
+ return ret;
+ if (status & SW_VLAN_ENABLE) {
+ ret = ksz_pread16(dev, port, REG_PORT_QM_TX_CNT_0__4, &pqm);
+ if (ret)
+ return ret;
+ ret = ksz_read32(dev, REG_PMAVBC, &pmavbc);
+ if (ret)
+ return ret;
+ if ((FIELD_GET(PMAVBC_MASK, pmavbc) <= PMAVBC_MIN) ||
+ (FIELD_GET(PORT_QM_TX_CNT_M, pqm) >= PORT_QM_TX_CNT_MAX)) {
+ /* Transmission halt with Half-Duplex and VLAN */
+ dev_crit_once(dev->dev,
+ "resources out of limits, transmission may be halted\n");
+ }
+ }
+ }
+ return ret;
+}
+
void ksz9477_port_init_cnt(struct ksz_device *dev, int port)
{
struct ksz_port_mib *mib = &dev->ports[port].mib;
@@ -1299,6 +1348,10 @@ int ksz9477_setup(struct dsa_switch *ds)
/* Enable REG_SW_MTU__2 reg by setting SW_JUMBO_PACKET */
ksz_cfg(dev, REG_SW_MAC_CTRL_1, SW_JUMBO_PACKET, true);
+ /* Use collision based back pressure mode. */
+ ksz_cfg(dev, REG_SW_MAC_CTRL_1, SW_BACK_PRESSURE,
+ SW_BACK_PRESSURE_COLLISION);
+
/* Now we can configure default MTU value */
ret = regmap_update_bits(ksz_regmap_16(dev), REG_SW_MTU__2, REG_SW_MTU_MASK,
VLAN_ETH_FRAME_LEN + ETH_FCS_LEN);
diff --git a/drivers/net/dsa/microchip/ksz9477.h b/drivers/net/dsa/microchip/ksz9477.h
index ce1e656b800b..239a281da10b 100644
--- a/drivers/net/dsa/microchip/ksz9477.h
+++ b/drivers/net/dsa/microchip/ksz9477.h
@@ -36,6 +36,8 @@ int ksz9477_port_mirror_add(struct ksz_device *dev, int port,
bool ingress, struct netlink_ext_ack *extack);
void ksz9477_port_mirror_del(struct ksz_device *dev, int port,
struct dsa_mall_mirror_tc_entry *mirror);
+int ksz9477_errata_monitor(struct ksz_device *dev, int port,
+ u64 tx_late_col);
void ksz9477_get_caps(struct ksz_device *dev, int port,
struct phylink_config *config);
int ksz9477_fdb_dump(struct ksz_device *dev, int port,
diff --git a/drivers/net/dsa/microchip/ksz9477_i2c.c b/drivers/net/dsa/microchip/ksz9477_i2c.c
index 82bebee4615c..7d7560f23a73 100644
--- a/drivers/net/dsa/microchip/ksz9477_i2c.c
+++ b/drivers/net/dsa/microchip/ksz9477_i2c.c
@@ -72,8 +72,8 @@ static void ksz9477_i2c_shutdown(struct i2c_client *i2c)
}
static const struct i2c_device_id ksz9477_i2c_id[] = {
- { "ksz9477-switch", 0 },
- {},
+ { "ksz9477-switch" },
+ {}
};
MODULE_DEVICE_TABLE(i2c, ksz9477_i2c_id);
diff --git a/drivers/net/dsa/microchip/ksz9477_reg.h b/drivers/net/dsa/microchip/ksz9477_reg.h
index f3a205ee483f..d5354c600ea1 100644
--- a/drivers/net/dsa/microchip/ksz9477_reg.h
+++ b/drivers/net/dsa/microchip/ksz9477_reg.h
@@ -247,6 +247,7 @@
#define REG_SW_MAC_CTRL_1 0x0331
#define SW_BACK_PRESSURE BIT(5)
+#define SW_BACK_PRESSURE_COLLISION 0
#define FAIR_FLOW_CTRL BIT(4)
#define NO_EXC_COLLISION_DROP BIT(3)
#define SW_JUMBO_PACKET BIT(2)
@@ -842,8 +843,8 @@
#define REG_PORT_STATUS_0 0x0030
-#define PORT_INTF_SPEED_M 0x3
-#define PORT_INTF_SPEED_S 3
+#define PORT_INTF_SPEED_MASK GENMASK(4, 3)
+#define PORT_INTF_SPEED_NONE GENMASK(1, 0)
#define PORT_INTF_FULL_DUPLEX BIT(2)
#define PORT_TX_FLOW_CTRL BIT(1)
#define PORT_RX_FLOW_CTRL BIT(0)
@@ -1167,6 +1168,11 @@
#define PORT_RMII_CLK_SEL BIT(7)
#define PORT_MII_SEL_EDGE BIT(5)
+#define REG_PMAVBC 0x03AC
+
+#define PMAVBC_MASK GENMASK(26, 16)
+#define PMAVBC_MIN 0x580
+
/* 4 - MAC */
#define REG_PORT_MAC_CTRL_0 0x0400
@@ -1494,6 +1500,7 @@
#define PORT_QM_TX_CNT_USED_S 0
#define PORT_QM_TX_CNT_M (BIT(11) - 1)
+#define PORT_QM_TX_CNT_MAX 0x200
#define REG_PORT_QM_TX_CNT_1__4 0x0A14
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index 1e0085cd9a9a..b074b4bb0629 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -1382,6 +1382,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.tc_cbs_supported = true,
.ops = &ksz9477_dev_ops,
.phylink_mac_ops = &ksz9477_phylink_mac_ops,
+ .phy_errata_9477 = true,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
.reg_mib_cnt = MIB_COUNTER_NUM,
@@ -1416,6 +1417,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.num_ipms = 8,
.ops = &ksz9477_dev_ops,
.phylink_mac_ops = &ksz9477_phylink_mac_ops,
+ .phy_errata_9477 = true,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
.reg_mib_cnt = MIB_COUNTER_NUM,
@@ -1450,6 +1452,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.num_ipms = 8,
.ops = &ksz9477_dev_ops,
.phylink_mac_ops = &ksz9477_phylink_mac_ops,
+ .phy_errata_9477 = true,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
.reg_mib_cnt = MIB_COUNTER_NUM,
@@ -1540,6 +1543,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.tc_cbs_supported = true,
.ops = &ksz9477_dev_ops,
.phylink_mac_ops = &ksz9477_phylink_mac_ops,
+ .phy_errata_9477 = true,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
.reg_mib_cnt = MIB_COUNTER_NUM,
@@ -1820,6 +1824,7 @@ void ksz_r_mib_stats64(struct ksz_device *dev, int port)
struct rtnl_link_stats64 *stats;
struct ksz_stats_raw *raw;
struct ksz_port_mib *mib;
+ int ret;
mib = &dev->ports[port].mib;
stats = &mib->stats64;
@@ -1861,6 +1866,12 @@ void ksz_r_mib_stats64(struct ksz_device *dev, int port)
pstats->rx_pause_frames = raw->rx_pause;
spin_unlock(&mib->stats64_lock);
+
+ if (dev->info->phy_errata_9477) {
+ ret = ksz9477_errata_monitor(dev, port, raw->tx_late_col);
+ if (ret)
+ dev_err(dev->dev, "Failed to monitor transmission halt\n");
+ }
}
void ksz88xx_r_mib_stats64(struct ksz_device *dev, int port)
@@ -2185,7 +2196,7 @@ static void ksz_irq_bus_sync_unlock(struct irq_data *d)
struct ksz_device *dev = kirq->dev;
int ret;
- ret = ksz_write32(dev, kirq->reg_mask, kirq->masked);
+ ret = ksz_write8(dev, kirq->reg_mask, kirq->masked);
if (ret)
dev_err(dev->dev, "failed to change IRQ mask\n");
@@ -3105,7 +3116,8 @@ static void ksz_set_xmii(struct ksz_device *dev, int port,
/* On KSZ9893, disable RGMII in-band status support */
if (dev->chip_id == KSZ9893_CHIP_ID ||
dev->chip_id == KSZ8563_CHIP_ID ||
- dev->chip_id == KSZ9563_CHIP_ID)
+ dev->chip_id == KSZ9563_CHIP_ID ||
+ is_lan937x(dev))
data8 &= ~P_MII_MAC_MODE;
break;
default:
@@ -3142,7 +3154,7 @@ phy_interface_t ksz_get_xmii(struct ksz_device *dev, int port, bool gbit)
else
interface = PHY_INTERFACE_MODE_MII;
} else if (val == bitval[P_RMII_SEL]) {
- interface = PHY_INTERFACE_MODE_RGMII;
+ interface = PHY_INTERFACE_MODE_RMII;
} else {
interface = PHY_INTERFACE_MODE_RGMII;
if (data8 & P_RGMII_ID_EG_ENABLE)
@@ -3906,6 +3918,13 @@ static int ksz_hsr_join(struct dsa_switch *ds, int port, struct net_device *hsr,
return -EOPNOTSUPP;
}
+ /* KSZ9477 can only perform HSR offloading for up to two ports */
+ if (hweight8(dev->hsr_ports) >= 2) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot offload more than two ports - using software HSR");
+ return -EOPNOTSUPP;
+ }
+
/* Self MAC address filtering, to avoid frames traversing
* the HSR ring more than once.
*/
diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h
index c784fd23a993..5f0a628b9849 100644
--- a/drivers/net/dsa/microchip/ksz_common.h
+++ b/drivers/net/dsa/microchip/ksz_common.h
@@ -22,6 +22,7 @@
/* all KSZ switches count ports from 1 */
#define KSZ_PORT_1 0
#define KSZ_PORT_2 1
+#define KSZ_PORT_4 3
struct ksz_device;
struct ksz_port;
@@ -66,6 +67,7 @@ struct ksz_chip_data {
bool tc_cbs_supported;
const struct ksz_dev_ops *ops;
const struct phylink_mac_ops *phylink_mac_ops;
+ bool phy_errata_9477;
bool ksz87xx_eee_link_erratum;
const struct ksz_mib_names *mib_names;
int mib_cnt;
@@ -636,6 +638,12 @@ static inline int is_lan937x(struct ksz_device *dev)
dev->chip_id == LAN9374_CHIP_ID;
}
+static inline bool is_lan937x_tx_phy(struct ksz_device *dev, int port)
+{
+ return (dev->chip_id == LAN9371_CHIP_ID ||
+ dev->chip_id == LAN9372_CHIP_ID) && port == KSZ_PORT_4;
+}
+
/* STP State Defines */
#define PORT_TX_ENABLE BIT(2)
#define PORT_RX_ENABLE BIT(1)
diff --git a/drivers/net/dsa/microchip/ksz_ptp.c b/drivers/net/dsa/microchip/ksz_ptp.c
index 1fe105913c75..f0bd46e5d4ec 100644
--- a/drivers/net/dsa/microchip/ksz_ptp.c
+++ b/drivers/net/dsa/microchip/ksz_ptp.c
@@ -293,7 +293,7 @@ static int ksz_ptp_enable_mode(struct ksz_device *dev)
/* The function is return back the capability of timestamping feature when
* requested through ethtool -T <interface> utility
*/
-int ksz_get_ts_info(struct dsa_switch *ds, int port, struct ethtool_ts_info *ts)
+int ksz_get_ts_info(struct dsa_switch *ds, int port, struct kernel_ethtool_ts_info *ts)
{
struct ksz_device *dev = ds->priv;
struct ksz_ptp_data *ptp_data;
diff --git a/drivers/net/dsa/microchip/ksz_ptp.h b/drivers/net/dsa/microchip/ksz_ptp.h
index 0ca8ca4f804e..2f1783c0d723 100644
--- a/drivers/net/dsa/microchip/ksz_ptp.h
+++ b/drivers/net/dsa/microchip/ksz_ptp.h
@@ -38,7 +38,7 @@ int ksz_ptp_clock_register(struct dsa_switch *ds);
void ksz_ptp_clock_unregister(struct dsa_switch *ds);
int ksz_get_ts_info(struct dsa_switch *ds, int port,
- struct ethtool_ts_info *ts);
+ struct kernel_ethtool_ts_info *ts);
int ksz_hwtstamp_get(struct dsa_switch *ds, int port, struct ifreq *ifr);
int ksz_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr);
void ksz_port_txtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb);
diff --git a/drivers/net/dsa/microchip/lan937x_main.c b/drivers/net/dsa/microchip/lan937x_main.c
index b479a628b1ae..824d9309a3d3 100644
--- a/drivers/net/dsa/microchip/lan937x_main.c
+++ b/drivers/net/dsa/microchip/lan937x_main.c
@@ -55,6 +55,9 @@ static int lan937x_vphy_ind_addr_wr(struct ksz_device *dev, int addr, int reg)
u16 addr_base = REG_PORT_T1_PHY_CTRL_BASE;
u16 temp;
+ if (is_lan937x_tx_phy(dev, addr))
+ addr_base = REG_PORT_TX_PHY_CTRL_BASE;
+
/* get register address based on the logical port */
temp = PORT_CTRL_ADDR(addr, (addr_base + (reg << 2)));
@@ -320,6 +323,9 @@ void lan937x_phylink_get_caps(struct ksz_device *dev, int port,
/* MII/RMII/RGMII ports */
config->mac_capabilities |= MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
MAC_100HD | MAC_10 | MAC_1000FD;
+ } else if (is_lan937x_tx_phy(dev, port)) {
+ config->mac_capabilities |= MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_100HD | MAC_10;
}
}
@@ -370,23 +376,33 @@ int lan937x_setup(struct dsa_switch *ds)
ds->vlan_filtering_is_global = true;
/* Enable aggressive back off for half duplex & UNH mode */
- lan937x_cfg(dev, REG_SW_MAC_CTRL_0,
- (SW_PAUSE_UNH_MODE | SW_NEW_BACKOFF | SW_AGGR_BACKOFF),
- true);
+ ret = lan937x_cfg(dev, REG_SW_MAC_CTRL_0, (SW_PAUSE_UNH_MODE |
+ SW_NEW_BACKOFF |
+ SW_AGGR_BACKOFF), true);
+ if (ret < 0)
+ return ret;
/* If NO_EXC_COLLISION_DROP bit is set, the switch will not drop
* packets when 16 or more collisions occur
*/
- lan937x_cfg(dev, REG_SW_MAC_CTRL_1, NO_EXC_COLLISION_DROP, true);
+ ret = lan937x_cfg(dev, REG_SW_MAC_CTRL_1, NO_EXC_COLLISION_DROP, true);
+ if (ret < 0)
+ return ret;
/* enable global MIB counter freeze function */
- lan937x_cfg(dev, REG_SW_MAC_CTRL_6, SW_MIB_COUNTER_FREEZE, true);
+ ret = lan937x_cfg(dev, REG_SW_MAC_CTRL_6, SW_MIB_COUNTER_FREEZE, true);
+ if (ret < 0)
+ return ret;
/* disable CLK125 & CLK25, 1: disable, 0: enable */
- lan937x_cfg(dev, REG_SW_GLOBAL_OUTPUT_CTRL__1,
- (SW_CLK125_ENB | SW_CLK25_ENB), true);
+ ret = lan937x_cfg(dev, REG_SW_GLOBAL_OUTPUT_CTRL__1,
+ (SW_CLK125_ENB | SW_CLK25_ENB), true);
+ if (ret < 0)
+ return ret;
- return 0;
+ /* Disable global VPHY support. Related to CPU interface only? */
+ return ksz_rmw32(dev, REG_SW_CFG_STRAP_OVR, SW_VPHY_DISABLE,
+ SW_VPHY_DISABLE);
}
void lan937x_teardown(struct dsa_switch *ds)
diff --git a/drivers/net/dsa/microchip/lan937x_reg.h b/drivers/net/dsa/microchip/lan937x_reg.h
index 45b606b6429f..2f22a9d01de3 100644
--- a/drivers/net/dsa/microchip/lan937x_reg.h
+++ b/drivers/net/dsa/microchip/lan937x_reg.h
@@ -37,6 +37,10 @@
#define SW_CLK125_ENB BIT(1)
#define SW_CLK25_ENB BIT(0)
+/* 2 - PHY Control */
+#define REG_SW_CFG_STRAP_OVR 0x0214
+#define SW_VPHY_DISABLE BIT(31)
+
/* 3 - Operation Control */
#define REG_SW_OPERATION 0x0300
@@ -147,6 +151,7 @@
/* 1 - Phy */
#define REG_PORT_T1_PHY_CTRL_BASE 0x0100
+#define REG_PORT_TX_PHY_CTRL_BASE 0x0280
/* 3 - xMII */
#define PORT_SGMII_SEL BIT(7)
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 598434d8d6e4..ec18e68bf3a8 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -1302,13 +1302,62 @@ mt7530_stp_state_set(struct dsa_switch *ds, int port, u8 state)
FID_PST(FID_BRIDGED, stp_state));
}
+static void mt7530_update_port_member(struct mt7530_priv *priv, int port,
+ const struct net_device *bridge_dev,
+ bool join) __must_hold(&priv->reg_mutex)
+{
+ struct dsa_port *dp = dsa_to_port(priv->ds, port), *other_dp;
+ struct mt7530_port *p = &priv->ports[port], *other_p;
+ struct dsa_port *cpu_dp = dp->cpu_dp;
+ u32 port_bitmap = BIT(cpu_dp->index);
+ int other_port;
+ bool isolated;
+
+ dsa_switch_for_each_user_port(other_dp, priv->ds) {
+ other_port = other_dp->index;
+ other_p = &priv->ports[other_port];
+
+ if (dp == other_dp)
+ continue;
+
+ /* Add/remove this port to/from the port matrix of the other
+ * ports in the same bridge. If the port is disabled, port
+ * matrix is kept and not being setup until the port becomes
+ * enabled.
+ */
+ if (!dsa_port_offloads_bridge_dev(other_dp, bridge_dev))
+ continue;
+
+ isolated = p->isolated && other_p->isolated;
+
+ if (join && !isolated) {
+ other_p->pm |= PCR_MATRIX(BIT(port));
+ port_bitmap |= BIT(other_port);
+ } else {
+ other_p->pm &= ~PCR_MATRIX(BIT(port));
+ }
+
+ if (other_p->enable)
+ mt7530_rmw(priv, MT7530_PCR_P(other_port),
+ PCR_MATRIX_MASK, other_p->pm);
+ }
+
+ /* Add/remove the all other ports to this port matrix. For !join
+ * (leaving the bridge), only the CPU port will remain in the port matrix
+ * of this port.
+ */
+ p->pm = PCR_MATRIX(port_bitmap);
+ if (priv->ports[port].enable)
+ mt7530_rmw(priv, MT7530_PCR_P(port), PCR_MATRIX_MASK, p->pm);
+}
+
static int
mt7530_port_pre_bridge_flags(struct dsa_switch *ds, int port,
struct switchdev_brport_flags flags,
struct netlink_ext_ack *extack)
{
if (flags.mask & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD |
- BR_BCAST_FLOOD))
+ BR_BCAST_FLOOD | BR_ISOLATED))
return -EINVAL;
return 0;
@@ -1337,6 +1386,17 @@ mt7530_port_bridge_flags(struct dsa_switch *ds, int port,
mt7530_rmw(priv, MT753X_MFC, BC_FFP(BIT(port)),
flags.val & BR_BCAST_FLOOD ? BC_FFP(BIT(port)) : 0);
+ if (flags.mask & BR_ISOLATED) {
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct net_device *bridge_dev = dsa_port_bridge_dev_get(dp);
+
+ priv->ports[port].isolated = !!(flags.val & BR_ISOLATED);
+
+ mutex_lock(&priv->reg_mutex);
+ mt7530_update_port_member(priv, port, bridge_dev, true);
+ mutex_unlock(&priv->reg_mutex);
+ }
+
return 0;
}
@@ -1345,39 +1405,11 @@ mt7530_port_bridge_join(struct dsa_switch *ds, int port,
struct dsa_bridge bridge, bool *tx_fwd_offload,
struct netlink_ext_ack *extack)
{
- struct dsa_port *dp = dsa_to_port(ds, port), *other_dp;
- struct dsa_port *cpu_dp = dp->cpu_dp;
- u32 port_bitmap = BIT(cpu_dp->index);
struct mt7530_priv *priv = ds->priv;
mutex_lock(&priv->reg_mutex);
- dsa_switch_for_each_user_port(other_dp, ds) {
- int other_port = other_dp->index;
-
- if (dp == other_dp)
- continue;
-
- /* Add this port to the port matrix of the other ports in the
- * same bridge. If the port is disabled, port matrix is kept
- * and not being setup until the port becomes enabled.
- */
- if (!dsa_port_offloads_bridge(other_dp, &bridge))
- continue;
-
- if (priv->ports[other_port].enable)
- mt7530_set(priv, MT7530_PCR_P(other_port),
- PCR_MATRIX(BIT(port)));
- priv->ports[other_port].pm |= PCR_MATRIX(BIT(port));
-
- port_bitmap |= BIT(other_port);
- }
-
- /* Add the all other ports to this port matrix. */
- if (priv->ports[port].enable)
- mt7530_rmw(priv, MT7530_PCR_P(port),
- PCR_MATRIX_MASK, PCR_MATRIX(port_bitmap));
- priv->ports[port].pm |= PCR_MATRIX(port_bitmap);
+ mt7530_update_port_member(priv, port, bridge.dev, true);
/* Set to fallback mode for independent VLAN learning */
mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK,
@@ -1478,38 +1510,11 @@ static void
mt7530_port_bridge_leave(struct dsa_switch *ds, int port,
struct dsa_bridge bridge)
{
- struct dsa_port *dp = dsa_to_port(ds, port), *other_dp;
- struct dsa_port *cpu_dp = dp->cpu_dp;
struct mt7530_priv *priv = ds->priv;
mutex_lock(&priv->reg_mutex);
- dsa_switch_for_each_user_port(other_dp, ds) {
- int other_port = other_dp->index;
-
- if (dp == other_dp)
- continue;
-
- /* Remove this port from the port matrix of the other ports
- * in the same bridge. If the port is disabled, port matrix
- * is kept and not being setup until the port becomes enabled.
- */
- if (!dsa_port_offloads_bridge(other_dp, &bridge))
- continue;
-
- if (priv->ports[other_port].enable)
- mt7530_clear(priv, MT7530_PCR_P(other_port),
- PCR_MATRIX(BIT(port)));
- priv->ports[other_port].pm &= ~PCR_MATRIX(BIT(port));
- }
-
- /* Set the cpu port to be the only one in the port matrix of
- * this port.
- */
- if (priv->ports[port].enable)
- mt7530_rmw(priv, MT7530_PCR_P(port), PCR_MATRIX_MASK,
- PCR_MATRIX(BIT(cpu_dp->index)));
- priv->ports[port].pm = PCR_MATRIX(BIT(cpu_dp->index));
+ mt7530_update_port_member(priv, port, bridge.dev, false);
/* When a port is removed from the bridge, the port would be set up
* back to the default as is at initial boot which is a VLAN-unaware
diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h
index 2ea4e24628c6..28592123070b 100644
--- a/drivers/net/dsa/mt7530.h
+++ b/drivers/net/dsa/mt7530.h
@@ -721,6 +721,7 @@ struct mt7530_fdb {
*/
struct mt7530_port {
bool enable;
+ bool isolated;
u32 pm;
u16 pvid;
struct phylink_pcs *sgmii_pcs;
diff --git a/drivers/net/dsa/mv88e6xxx/hwtstamp.c b/drivers/net/dsa/mv88e6xxx/hwtstamp.c
index 331b4ca089ff..49e6e1355142 100644
--- a/drivers/net/dsa/mv88e6xxx/hwtstamp.c
+++ b/drivers/net/dsa/mv88e6xxx/hwtstamp.c
@@ -64,7 +64,7 @@ static int mv88e6xxx_ptp_read(struct mv88e6xxx_chip *chip, int addr,
#define TX_TSTAMP_TIMEOUT msecs_to_jiffies(40)
int mv88e6xxx_get_ts_info(struct dsa_switch *ds, int port,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
const struct mv88e6xxx_ptp_ops *ptp_ops;
struct mv88e6xxx_chip *chip;
diff --git a/drivers/net/dsa/mv88e6xxx/hwtstamp.h b/drivers/net/dsa/mv88e6xxx/hwtstamp.h
index cf7fb6d660b1..85acc758e3eb 100644
--- a/drivers/net/dsa/mv88e6xxx/hwtstamp.h
+++ b/drivers/net/dsa/mv88e6xxx/hwtstamp.h
@@ -121,7 +121,7 @@ void mv88e6xxx_port_txtstamp(struct dsa_switch *ds, int port,
struct sk_buff *skb);
int mv88e6xxx_get_ts_info(struct dsa_switch *ds, int port,
- struct ethtool_ts_info *info);
+ struct kernel_ethtool_ts_info *info);
int mv88e6xxx_hwtstamp_setup(struct mv88e6xxx_chip *chip);
void mv88e6xxx_hwtstamp_free(struct mv88e6xxx_chip *chip);
@@ -157,7 +157,7 @@ static inline void mv88e6xxx_port_txtstamp(struct dsa_switch *ds, int port,
}
static inline int mv88e6xxx_get_ts_info(struct dsa_switch *ds, int port,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
return -EOPNOTSUPP;
}
diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
index 61e95487732d..e554699f06d4 100644
--- a/drivers/net/dsa/ocelot/felix.c
+++ b/drivers/net/dsa/ocelot/felix.c
@@ -1050,24 +1050,32 @@ static void felix_phylink_get_caps(struct dsa_switch *ds, int port,
config->supported_interfaces);
}
-static void felix_phylink_mac_config(struct dsa_switch *ds, int port,
+static void felix_phylink_mac_config(struct phylink_config *config,
unsigned int mode,
const struct phylink_link_state *state)
{
- struct ocelot *ocelot = ds->priv;
- struct felix *felix = ocelot_to_felix(ocelot);
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct ocelot *ocelot = dp->ds->priv;
+ int port = dp->index;
+ struct felix *felix;
+
+ felix = ocelot_to_felix(ocelot);
if (felix->info->phylink_mac_config)
felix->info->phylink_mac_config(ocelot, port, mode, state);
}
-static struct phylink_pcs *felix_phylink_mac_select_pcs(struct dsa_switch *ds,
- int port,
- phy_interface_t iface)
+static struct phylink_pcs *
+felix_phylink_mac_select_pcs(struct phylink_config *config,
+ phy_interface_t iface)
{
- struct ocelot *ocelot = ds->priv;
- struct felix *felix = ocelot_to_felix(ocelot);
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct ocelot *ocelot = dp->ds->priv;
struct phylink_pcs *pcs = NULL;
+ int port = dp->index;
+ struct felix *felix;
+
+ felix = ocelot_to_felix(ocelot);
if (felix->pcs && felix->pcs[port])
pcs = felix->pcs[port];
@@ -1075,11 +1083,13 @@ static struct phylink_pcs *felix_phylink_mac_select_pcs(struct dsa_switch *ds,
return pcs;
}
-static void felix_phylink_mac_link_down(struct dsa_switch *ds, int port,
+static void felix_phylink_mac_link_down(struct phylink_config *config,
unsigned int link_an_mode,
phy_interface_t interface)
{
- struct ocelot *ocelot = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct ocelot *ocelot = dp->ds->priv;
+ int port = dp->index;
struct felix *felix;
felix = ocelot_to_felix(ocelot);
@@ -1088,15 +1098,19 @@ static void felix_phylink_mac_link_down(struct dsa_switch *ds, int port,
felix->info->quirks);
}
-static void felix_phylink_mac_link_up(struct dsa_switch *ds, int port,
+static void felix_phylink_mac_link_up(struct phylink_config *config,
+ struct phy_device *phydev,
unsigned int link_an_mode,
phy_interface_t interface,
- struct phy_device *phydev,
int speed, int duplex,
bool tx_pause, bool rx_pause)
{
- struct ocelot *ocelot = ds->priv;
- struct felix *felix = ocelot_to_felix(ocelot);
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct ocelot *ocelot = dp->ds->priv;
+ int port = dp->index;
+ struct felix *felix;
+
+ felix = ocelot_to_felix(ocelot);
ocelot_phylink_mac_link_up(ocelot, port, phydev, link_an_mode,
interface, speed, duplex, tx_pause, rx_pause,
@@ -1220,7 +1234,7 @@ static int felix_get_sset_count(struct dsa_switch *ds, int port, int sset)
}
static int felix_get_ts_info(struct dsa_switch *ds, int port,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct ocelot *ocelot = ds->priv;
@@ -1583,6 +1597,15 @@ static int felix_setup(struct dsa_switch *ds)
felix_port_qos_map_init(ocelot, dp->index);
}
+ if (felix->info->request_irq) {
+ err = felix->info->request_irq(ocelot);
+ if (err) {
+ dev_err(ocelot->dev, "Failed to request IRQ: %pe\n",
+ ERR_PTR(err));
+ goto out_deinit_ports;
+ }
+ }
+
err = ocelot_devlink_sb_register(ocelot);
if (err)
goto out_deinit_ports;
@@ -2083,7 +2106,14 @@ static void felix_get_mm_stats(struct dsa_switch *ds, int port,
ocelot_port_get_mm_stats(ocelot, port, stats);
}
-const struct dsa_switch_ops felix_switch_ops = {
+static const struct phylink_mac_ops felix_phylink_mac_ops = {
+ .mac_select_pcs = felix_phylink_mac_select_pcs,
+ .mac_config = felix_phylink_mac_config,
+ .mac_link_down = felix_phylink_mac_link_down,
+ .mac_link_up = felix_phylink_mac_link_up,
+};
+
+static const struct dsa_switch_ops felix_switch_ops = {
.get_tag_protocol = felix_get_tag_protocol,
.change_tag_protocol = felix_change_tag_protocol,
.connect_tag_protocol = felix_connect_tag_protocol,
@@ -2104,10 +2134,6 @@ const struct dsa_switch_ops felix_switch_ops = {
.get_sset_count = felix_get_sset_count,
.get_ts_info = felix_get_ts_info,
.phylink_get_caps = felix_phylink_get_caps,
- .phylink_mac_config = felix_phylink_mac_config,
- .phylink_mac_select_pcs = felix_phylink_mac_select_pcs,
- .phylink_mac_link_down = felix_phylink_mac_link_down,
- .phylink_mac_link_up = felix_phylink_mac_link_up,
.port_enable = felix_port_enable,
.port_fast_age = felix_port_fast_age,
.port_fdb_dump = felix_fdb_dump,
@@ -2166,7 +2192,53 @@ const struct dsa_switch_ops felix_switch_ops = {
.port_set_host_flood = felix_port_set_host_flood,
.port_change_conduit = felix_port_change_conduit,
};
-EXPORT_SYMBOL_GPL(felix_switch_ops);
+
+int felix_register_switch(struct device *dev, resource_size_t switch_base,
+ int num_flooding_pgids, bool ptp,
+ bool mm_supported,
+ enum dsa_tag_protocol init_tag_proto,
+ const struct felix_info *info)
+{
+ struct dsa_switch *ds;
+ struct ocelot *ocelot;
+ struct felix *felix;
+ int err;
+
+ felix = devm_kzalloc(dev, sizeof(*felix), GFP_KERNEL);
+ if (!felix)
+ return -ENOMEM;
+
+ ds = devm_kzalloc(dev, sizeof(*ds), GFP_KERNEL);
+ if (!ds)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, felix);
+
+ ocelot = &felix->ocelot;
+ ocelot->dev = dev;
+ ocelot->num_flooding_pgids = num_flooding_pgids;
+ ocelot->ptp = ptp;
+ ocelot->mm_supported = mm_supported;
+
+ felix->info = info;
+ felix->switch_base = switch_base;
+ felix->ds = ds;
+ felix->tag_proto = init_tag_proto;
+
+ ds->dev = dev;
+ ds->num_ports = info->num_ports;
+ ds->num_tx_queues = OCELOT_NUM_TC;
+ ds->ops = &felix_switch_ops;
+ ds->phylink_mac_ops = &felix_phylink_mac_ops;
+ ds->priv = ocelot;
+
+ err = dsa_register_switch(ds);
+ if (err)
+ dev_err_probe(dev, err, "Failed to register DSA switch\n");
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(felix_register_switch);
struct net_device *felix_port_to_netdev(struct ocelot *ocelot, int port)
{
diff --git a/drivers/net/dsa/ocelot/felix.h b/drivers/net/dsa/ocelot/felix.h
index dbf5872fe367..211991f494e3 100644
--- a/drivers/net/dsa/ocelot/felix.h
+++ b/drivers/net/dsa/ocelot/felix.h
@@ -32,7 +32,6 @@ struct felix_info {
const u32 *port_modes;
int num_mact_rows;
int num_ports;
- int num_tx_queues;
struct vcap_props *vcap;
u16 vcap_pol_base;
u16 vcap_pol_max;
@@ -64,6 +63,7 @@ struct felix_info {
const struct phylink_link_state *state);
int (*configure_serdes)(struct ocelot *ocelot, int port,
struct device_node *portnp);
+ int (*request_irq)(struct ocelot *ocelot);
};
/* Methods for initializing the hardware resources specific to a tagging
@@ -82,8 +82,6 @@ struct felix_tag_proto_ops {
struct netlink_ext_ack *extack);
};
-extern const struct dsa_switch_ops felix_switch_ops;
-
/* DSA glue / front-end for struct ocelot */
struct felix {
struct dsa_switch *ds;
@@ -99,6 +97,11 @@ struct felix {
unsigned long host_flood_mc_mask;
};
+int felix_register_switch(struct device *dev, resource_size_t switch_base,
+ int num_flooding_pgids, bool ptp,
+ bool mm_supported,
+ enum dsa_tag_protocol init_tag_proto,
+ const struct felix_info *info);
struct net_device *felix_port_to_netdev(struct ocelot *ocelot, int port);
int felix_netdev_to_port(struct net_device *dev);
diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
index 85952d841f28..ba37a566da39 100644
--- a/drivers/net/dsa/ocelot/felix_vsc9959.c
+++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
@@ -2605,6 +2605,28 @@ set:
}
}
+/* The INTB interrupt is shared between for PTP TX timestamp availability
+ * notification and MAC Merge status change on each port.
+ */
+static irqreturn_t vsc9959_irq_handler(int irq, void *data)
+{
+ struct ocelot *ocelot = data;
+
+ ocelot_get_txtstamp(ocelot);
+ ocelot_mm_irq(ocelot);
+
+ return IRQ_HANDLED;
+}
+
+static int vsc9959_request_irq(struct ocelot *ocelot)
+{
+ struct pci_dev *pdev = to_pci_dev(ocelot->dev);
+
+ return devm_request_threaded_irq(ocelot->dev, pdev->irq, NULL,
+ &vsc9959_irq_handler, IRQF_ONESHOT,
+ "felix-intb", ocelot);
+}
+
static const struct ocelot_ops vsc9959_ops = {
.reset = vsc9959_reset,
.wm_enc = vsc9959_wm_enc,
@@ -2636,7 +2658,6 @@ static const struct felix_info felix_info_vsc9959 = {
.vcap_pol_max2 = 0,
.num_mact_rows = 2048,
.num_ports = VSC9959_NUM_PORTS,
- .num_tx_queues = OCELOT_NUM_TC,
.quirks = FELIX_MAC_QUIRKS,
.quirk_no_xtr_irq = true,
.ptp_caps = &vsc9959_ptp_caps,
@@ -2645,98 +2666,36 @@ static const struct felix_info felix_info_vsc9959 = {
.port_modes = vsc9959_port_modes,
.port_setup_tc = vsc9959_port_setup_tc,
.port_sched_speed_set = vsc9959_sched_speed_set,
+ .request_irq = vsc9959_request_irq,
};
-/* The INTB interrupt is shared between for PTP TX timestamp availability
- * notification and MAC Merge status change on each port.
- */
-static irqreturn_t felix_irq_handler(int irq, void *data)
-{
- struct ocelot *ocelot = (struct ocelot *)data;
-
- ocelot_get_txtstamp(ocelot);
- ocelot_mm_irq(ocelot);
-
- return IRQ_HANDLED;
-}
-
static int felix_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
- struct dsa_switch *ds;
- struct ocelot *ocelot;
- struct felix *felix;
+ struct device *dev = &pdev->dev;
+ resource_size_t switch_base;
int err;
- if (pdev->dev.of_node && !of_device_is_available(pdev->dev.of_node)) {
- dev_info(&pdev->dev, "device is disabled, skipping\n");
- return -ENODEV;
- }
-
err = pci_enable_device(pdev);
if (err) {
- dev_err(&pdev->dev, "device enable failed\n");
- goto err_pci_enable;
- }
-
- felix = kzalloc(sizeof(struct felix), GFP_KERNEL);
- if (!felix) {
- err = -ENOMEM;
- dev_err(&pdev->dev, "Failed to allocate driver memory\n");
- goto err_alloc_felix;
+ dev_err(dev, "device enable failed: %pe\n", ERR_PTR(err));
+ return err;
}
- pci_set_drvdata(pdev, felix);
- ocelot = &felix->ocelot;
- ocelot->dev = &pdev->dev;
- ocelot->num_flooding_pgids = OCELOT_NUM_TC;
- felix->info = &felix_info_vsc9959;
- felix->switch_base = pci_resource_start(pdev, VSC9959_SWITCH_PCI_BAR);
-
pci_set_master(pdev);
- err = devm_request_threaded_irq(&pdev->dev, pdev->irq, NULL,
- &felix_irq_handler, IRQF_ONESHOT,
- "felix-intb", ocelot);
- if (err) {
- dev_err(&pdev->dev, "Failed to request irq\n");
- goto err_alloc_irq;
- }
-
- ocelot->ptp = 1;
- ocelot->mm_supported = true;
-
- ds = kzalloc(sizeof(struct dsa_switch), GFP_KERNEL);
- if (!ds) {
- err = -ENOMEM;
- dev_err(&pdev->dev, "Failed to allocate DSA switch\n");
- goto err_alloc_ds;
- }
+ switch_base = pci_resource_start(pdev, VSC9959_SWITCH_PCI_BAR);
- ds->dev = &pdev->dev;
- ds->num_ports = felix->info->num_ports;
- ds->num_tx_queues = felix->info->num_tx_queues;
- ds->ops = &felix_switch_ops;
- ds->priv = ocelot;
- felix->ds = ds;
- felix->tag_proto = DSA_TAG_PROTO_OCELOT;
-
- err = dsa_register_switch(ds);
- if (err) {
- dev_err_probe(&pdev->dev, err, "Failed to register DSA switch\n");
- goto err_register_ds;
- }
+ err = felix_register_switch(dev, switch_base, OCELOT_NUM_TC,
+ true, true, DSA_TAG_PROTO_OCELOT,
+ &felix_info_vsc9959);
+ if (err)
+ goto out_disable;
return 0;
-err_register_ds:
- kfree(ds);
-err_alloc_ds:
-err_alloc_irq:
- kfree(felix);
-err_alloc_felix:
+out_disable:
pci_disable_device(pdev);
-err_pci_enable:
return err;
}
@@ -2749,9 +2708,6 @@ static void felix_pci_remove(struct pci_dev *pdev)
dsa_unregister_switch(felix->ds);
- kfree(felix->ds);
- kfree(felix);
-
pci_disable_device(pdev);
}
diff --git a/drivers/net/dsa/ocelot/ocelot_ext.c b/drivers/net/dsa/ocelot/ocelot_ext.c
index 22187d831c4b..5632a7248cd4 100644
--- a/drivers/net/dsa/ocelot/ocelot_ext.c
+++ b/drivers/net/dsa/ocelot/ocelot_ext.c
@@ -57,7 +57,6 @@ static const struct felix_info vsc7512_info = {
.vcap = vsc7514_vcap_props,
.num_mact_rows = 1024,
.num_ports = VSC7514_NUM_PORTS,
- .num_tx_queues = OCELOT_NUM_TC,
.port_modes = vsc7512_port_modes,
.phylink_mac_config = ocelot_phylink_mac_config,
.configure_serdes = ocelot_port_configure_serdes,
@@ -65,54 +64,8 @@ static const struct felix_info vsc7512_info = {
static int ocelot_ext_probe(struct platform_device *pdev)
{
- struct device *dev = &pdev->dev;
- struct dsa_switch *ds;
- struct ocelot *ocelot;
- struct felix *felix;
- int err;
-
- felix = kzalloc(sizeof(*felix), GFP_KERNEL);
- if (!felix)
- return -ENOMEM;
-
- dev_set_drvdata(dev, felix);
-
- ocelot = &felix->ocelot;
- ocelot->dev = dev;
-
- ocelot->num_flooding_pgids = 1;
-
- felix->info = &vsc7512_info;
-
- ds = kzalloc(sizeof(*ds), GFP_KERNEL);
- if (!ds) {
- err = -ENOMEM;
- dev_err_probe(dev, err, "Failed to allocate DSA switch\n");
- goto err_free_felix;
- }
-
- ds->dev = dev;
- ds->num_ports = felix->info->num_ports;
- ds->num_tx_queues = felix->info->num_tx_queues;
-
- ds->ops = &felix_switch_ops;
- ds->priv = ocelot;
- felix->ds = ds;
- felix->tag_proto = DSA_TAG_PROTO_OCELOT;
-
- err = dsa_register_switch(ds);
- if (err) {
- dev_err_probe(dev, err, "Failed to register DSA switch\n");
- goto err_free_ds;
- }
-
- return 0;
-
-err_free_ds:
- kfree(ds);
-err_free_felix:
- kfree(felix);
- return err;
+ return felix_register_switch(&pdev->dev, 0, 1, false, false,
+ DSA_TAG_PROTO_OCELOT, &vsc7512_info);
}
static void ocelot_ext_remove(struct platform_device *pdev)
@@ -123,9 +76,6 @@ static void ocelot_ext_remove(struct platform_device *pdev)
return;
dsa_unregister_switch(felix->ds);
-
- kfree(felix->ds);
- kfree(felix);
}
static void ocelot_ext_shutdown(struct platform_device *pdev)
diff --git a/drivers/net/dsa/ocelot/seville_vsc9953.c b/drivers/net/dsa/ocelot/seville_vsc9953.c
index 049930da0521..70782649c395 100644
--- a/drivers/net/dsa/ocelot/seville_vsc9953.c
+++ b/drivers/net/dsa/ocelot/seville_vsc9953.c
@@ -963,7 +963,6 @@ static const struct felix_info seville_info_vsc9953 = {
.quirks = FELIX_MAC_QUIRKS,
.num_mact_rows = 2048,
.num_ports = VSC9953_NUM_PORTS,
- .num_tx_queues = OCELOT_NUM_TC,
.mdio_bus_alloc = vsc9953_mdio_bus_alloc,
.mdio_bus_free = vsc9953_mdio_bus_free,
.port_modes = vsc9953_port_modes,
@@ -971,62 +970,18 @@ static const struct felix_info seville_info_vsc9953 = {
static int seville_probe(struct platform_device *pdev)
{
- struct dsa_switch *ds;
- struct ocelot *ocelot;
+ struct device *dev = &pdev->dev;
struct resource *res;
- struct felix *felix;
- int err;
-
- felix = kzalloc(sizeof(struct felix), GFP_KERNEL);
- if (!felix) {
- err = -ENOMEM;
- dev_err(&pdev->dev, "Failed to allocate driver memory\n");
- goto err_alloc_felix;
- }
-
- platform_set_drvdata(pdev, felix);
-
- ocelot = &felix->ocelot;
- ocelot->dev = &pdev->dev;
- ocelot->num_flooding_pgids = 1;
- felix->info = &seville_info_vsc9953;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
- err = -EINVAL;
- dev_err(&pdev->dev, "Invalid resource\n");
- goto err_alloc_felix;
- }
- felix->switch_base = res->start;
-
- ds = kzalloc(sizeof(struct dsa_switch), GFP_KERNEL);
- if (!ds) {
- err = -ENOMEM;
- dev_err(&pdev->dev, "Failed to allocate DSA switch\n");
- goto err_alloc_ds;
- }
-
- ds->dev = &pdev->dev;
- ds->num_ports = felix->info->num_ports;
- ds->ops = &felix_switch_ops;
- ds->priv = ocelot;
- felix->ds = ds;
- felix->tag_proto = DSA_TAG_PROTO_SEVILLE;
-
- err = dsa_register_switch(ds);
- if (err) {
- dev_err(&pdev->dev, "Failed to register DSA switch: %d\n", err);
- goto err_register_ds;
+ dev_err(dev, "Invalid resource\n");
+ return -EINVAL;
}
- return 0;
-
-err_register_ds:
- kfree(ds);
-err_alloc_ds:
-err_alloc_felix:
- kfree(felix);
- return err;
+ return felix_register_switch(dev, res->start, 1, false, false,
+ DSA_TAG_PROTO_SEVILLE,
+ &seville_info_vsc9953);
}
static void seville_remove(struct platform_device *pdev)
@@ -1037,9 +992,6 @@ static void seville_remove(struct platform_device *pdev)
return;
dsa_unregister_switch(felix->ds);
-
- kfree(felix->ds);
- kfree(felix);
}
static void seville_shutdown(struct platform_device *pdev)
diff --git a/drivers/net/dsa/qca/ar9331.c b/drivers/net/dsa/qca/ar9331.c
index 968cb81088bf..e9f2c67bc15f 100644
--- a/drivers/net/dsa/qca/ar9331.c
+++ b/drivers/net/dsa/qca/ar9331.c
@@ -1021,7 +1021,7 @@ static const struct regmap_config ar9331_mdio_regmap_config = {
.cache_type = REGCACHE_MAPLE,
};
-static struct regmap_bus ar9331_sw_bus = {
+static const struct regmap_bus ar9331_sw_bus = {
.reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
.val_format_endian_default = REGMAP_ENDIAN_NATIVE,
.read = ar9331_mdio_read,
diff --git a/drivers/net/dsa/qca/qca8k-8xxx.c b/drivers/net/dsa/qca/qca8k-8xxx.c
index b3c27cf538e8..f8d8c70642c4 100644
--- a/drivers/net/dsa/qca/qca8k-8xxx.c
+++ b/drivers/net/dsa/qca/qca8k-8xxx.c
@@ -565,7 +565,7 @@ qca8k_regmap_update_bits(void *ctx, uint32_t reg, uint32_t mask, uint32_t write_
return qca8k_regmap_update_bits_mii(priv, reg, mask, write_val);
}
-static struct regmap_config qca8k_regmap_config = {
+static const struct regmap_config qca8k_regmap_config = {
.reg_bits = 16,
.val_bits = 32,
.reg_stride = 4,
diff --git a/drivers/net/dsa/qca/qca8k-common.c b/drivers/net/dsa/qca/qca8k-common.c
index 7f80035c5441..560c74c4ac3d 100644
--- a/drivers/net/dsa/qca/qca8k-common.c
+++ b/drivers/net/dsa/qca/qca8k-common.c
@@ -614,11 +614,57 @@ void qca8k_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
qca8k_port_configure_learning(ds, port, learning);
}
+static int qca8k_update_port_member(struct qca8k_priv *priv, int port,
+ const struct net_device *bridge_dev,
+ bool join)
+{
+ bool isolated = !!(priv->port_isolated_map & BIT(port)), other_isolated;
+ struct dsa_port *dp = dsa_to_port(priv->ds, port), *other_dp;
+ u32 port_mask = BIT(dp->cpu_dp->index);
+ int i, ret;
+
+ for (i = 0; i < QCA8K_NUM_PORTS; i++) {
+ if (i == port)
+ continue;
+ if (dsa_is_cpu_port(priv->ds, i))
+ continue;
+
+ other_dp = dsa_to_port(priv->ds, i);
+ if (!dsa_port_offloads_bridge_dev(other_dp, bridge_dev))
+ continue;
+
+ other_isolated = !!(priv->port_isolated_map & BIT(i));
+
+ /* Add/remove this port to/from the portvlan mask of the other
+ * ports in the bridge
+ */
+ if (join && !(isolated && other_isolated)) {
+ port_mask |= BIT(i);
+ ret = regmap_set_bits(priv->regmap,
+ QCA8K_PORT_LOOKUP_CTRL(i),
+ BIT(port));
+ } else {
+ ret = regmap_clear_bits(priv->regmap,
+ QCA8K_PORT_LOOKUP_CTRL(i),
+ BIT(port));
+ }
+
+ if (ret)
+ return ret;
+ }
+
+ /* Add/remove all other ports to/from this port's portvlan mask */
+ ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
+ QCA8K_PORT_LOOKUP_MEMBER, port_mask);
+
+ return ret;
+}
+
int qca8k_port_pre_bridge_flags(struct dsa_switch *ds, int port,
struct switchdev_brport_flags flags,
struct netlink_ext_ack *extack)
{
- if (flags.mask & ~BR_LEARNING)
+ if (flags.mask & ~(BR_LEARNING | BR_ISOLATED))
return -EINVAL;
return 0;
@@ -628,6 +674,7 @@ int qca8k_port_bridge_flags(struct dsa_switch *ds, int port,
struct switchdev_brport_flags flags,
struct netlink_ext_ack *extack)
{
+ struct qca8k_priv *priv = ds->priv;
int ret;
if (flags.mask & BR_LEARNING) {
@@ -637,6 +684,20 @@ int qca8k_port_bridge_flags(struct dsa_switch *ds, int port,
return ret;
}
+ if (flags.mask & BR_ISOLATED) {
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct net_device *bridge_dev = dsa_port_bridge_dev_get(dp);
+
+ if (flags.val & BR_ISOLATED)
+ priv->port_isolated_map |= BIT(port);
+ else
+ priv->port_isolated_map &= ~BIT(port);
+
+ ret = qca8k_update_port_member(priv, port, bridge_dev, true);
+ if (ret)
+ return ret;
+ }
+
return 0;
}
@@ -646,62 +707,21 @@ int qca8k_port_bridge_join(struct dsa_switch *ds, int port,
struct netlink_ext_ack *extack)
{
struct qca8k_priv *priv = ds->priv;
- int port_mask, cpu_port;
- int i, ret;
-
- cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
- port_mask = BIT(cpu_port);
-
- for (i = 0; i < QCA8K_NUM_PORTS; i++) {
- if (dsa_is_cpu_port(ds, i))
- continue;
- if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
- continue;
- /* Add this port to the portvlan mask of the other ports
- * in the bridge
- */
- ret = regmap_set_bits(priv->regmap,
- QCA8K_PORT_LOOKUP_CTRL(i),
- BIT(port));
- if (ret)
- return ret;
- if (i != port)
- port_mask |= BIT(i);
- }
- /* Add all other ports to this ports portvlan mask */
- ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
- QCA8K_PORT_LOOKUP_MEMBER, port_mask);
-
- return ret;
+ return qca8k_update_port_member(priv, port, bridge.dev, true);
}
void qca8k_port_bridge_leave(struct dsa_switch *ds, int port,
struct dsa_bridge bridge)
{
struct qca8k_priv *priv = ds->priv;
- int cpu_port, i;
-
- cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
-
- for (i = 0; i < QCA8K_NUM_PORTS; i++) {
- if (dsa_is_cpu_port(ds, i))
- continue;
- if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
- continue;
- /* Remove this port to the portvlan mask of the other ports
- * in the bridge
- */
- regmap_clear_bits(priv->regmap,
- QCA8K_PORT_LOOKUP_CTRL(i),
- BIT(port));
- }
+ int err;
- /* Set the cpu port to be the only one in the portvlan mask of
- * this port
- */
- qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
- QCA8K_PORT_LOOKUP_MEMBER, BIT(cpu_port));
+ err = qca8k_update_port_member(priv, port, bridge.dev, false);
+ if (err)
+ dev_err(priv->dev,
+ "Failed to update switch config for bridge leave: %d\n",
+ err);
}
void qca8k_port_fast_age(struct dsa_switch *ds, int port)
diff --git a/drivers/net/dsa/qca/qca8k-leds.c b/drivers/net/dsa/qca/qca8k-leds.c
index 811ebeeff4ed..43ac68052baf 100644
--- a/drivers/net/dsa/qca/qca8k-leds.c
+++ b/drivers/net/dsa/qca/qca8k-leds.c
@@ -431,8 +431,11 @@ qca8k_parse_port_leds(struct qca8k_priv *priv, struct fwnode_handle *port, int p
init_data.devicename = kasprintf(GFP_KERNEL, "%s:0%d",
priv->internal_mdio_bus->id,
port_num);
- if (!init_data.devicename)
+ if (!init_data.devicename) {
+ fwnode_handle_put(led);
+ fwnode_handle_put(leds);
return -ENOMEM;
+ }
ret = devm_led_classdev_register_ext(priv->dev, &port_led->cdev, &init_data);
if (ret)
@@ -441,6 +444,7 @@ qca8k_parse_port_leds(struct qca8k_priv *priv, struct fwnode_handle *port, int p
kfree(init_data.devicename);
}
+ fwnode_handle_put(leds);
return 0;
}
@@ -471,9 +475,13 @@ qca8k_setup_led_ctrl(struct qca8k_priv *priv)
* the correct port for LED setup.
*/
ret = qca8k_parse_port_leds(priv, port, qca8k_port_to_phy(port_num));
- if (ret)
+ if (ret) {
+ fwnode_handle_put(port);
+ fwnode_handle_put(ports);
return ret;
+ }
}
+ fwnode_handle_put(ports);
return 0;
}
diff --git a/drivers/net/dsa/qca/qca8k.h b/drivers/net/dsa/qca/qca8k.h
index 2184d8d2d5a9..3664a2e2f1f6 100644
--- a/drivers/net/dsa/qca/qca8k.h
+++ b/drivers/net/dsa/qca/qca8k.h
@@ -451,6 +451,7 @@ struct qca8k_priv {
* Bit 1: port enabled. Bit 0: port disabled.
*/
u8 port_enabled_map;
+ u8 port_isolated_map;
struct qca8k_ports_config ports_config;
struct regmap *regmap;
struct mii_bus *bus;
diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
index ee0fb1c343f1..c7282ce3d11c 100644
--- a/drivers/net/dsa/sja1105/sja1105_main.c
+++ b/drivers/net/dsa/sja1105/sja1105_main.c
@@ -2133,14 +2133,13 @@ static int sja1105_bridge_join(struct dsa_switch *ds, int port,
if (rc)
return rc;
- rc = dsa_tag_8021q_bridge_join(ds, port, bridge);
+ rc = dsa_tag_8021q_bridge_join(ds, port, bridge, tx_fwd_offload,
+ extack);
if (rc) {
sja1105_bridge_member(ds, port, bridge, false);
return rc;
}
- *tx_fwd_offload = true;
-
return 0;
}
@@ -3167,8 +3166,7 @@ static int sja1105_setup(struct dsa_switch *ds)
ds->vlan_filtering_is_global = true;
ds->untag_bridge_pvid = true;
ds->fdb_isolation = true;
- /* tag_8021q has 3 bits for the VBID, and the value 0 is reserved */
- ds->max_num_bridges = 7;
+ ds->max_num_bridges = DSA_TAG_8021Q_MAX_NUM_BRIDGES;
/* Advertise the 8 egress queues */
ds->num_tx_queues = SJA1105_NUM_TC;
diff --git a/drivers/net/dsa/sja1105/sja1105_ptp.c b/drivers/net/dsa/sja1105/sja1105_ptp.c
index a7d41e781398..a1f4ca6ad888 100644
--- a/drivers/net/dsa/sja1105/sja1105_ptp.c
+++ b/drivers/net/dsa/sja1105/sja1105_ptp.c
@@ -111,7 +111,7 @@ int sja1105_hwtstamp_get(struct dsa_switch *ds, int port, struct ifreq *ifr)
}
int sja1105_get_ts_info(struct dsa_switch *ds, int port,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct sja1105_private *priv = ds->priv;
struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
diff --git a/drivers/net/dsa/sja1105/sja1105_ptp.h b/drivers/net/dsa/sja1105/sja1105_ptp.h
index 416461ee95d2..8add2bd5f728 100644
--- a/drivers/net/dsa/sja1105/sja1105_ptp.h
+++ b/drivers/net/dsa/sja1105/sja1105_ptp.h
@@ -101,7 +101,7 @@ void sja1105pqrs_ptp_cmd_packing(u8 *buf, struct sja1105_ptp_cmd *cmd,
enum packing_op op);
int sja1105_get_ts_info(struct dsa_switch *ds, int port,
- struct ethtool_ts_info *ts);
+ struct kernel_ethtool_ts_info *ts);
void sja1105_ptp_txtstamp_skb(struct dsa_switch *ds, int slot,
struct sk_buff *clone);
diff --git a/drivers/net/dsa/vitesse-vsc73xx-core.c b/drivers/net/dsa/vitesse-vsc73xx-core.c
index 4b031fefcec6..d9d3e30fd47a 100644
--- a/drivers/net/dsa/vitesse-vsc73xx-core.c
+++ b/drivers/net/dsa/vitesse-vsc73xx-core.c
@@ -22,9 +22,11 @@
#include <linux/of_mdio.h>
#include <linux/bitops.h>
#include <linux/if_bridge.h>
+#include <linux/if_vlan.h>
#include <linux/etherdevice.h>
#include <linux/gpio/consumer.h>
#include <linux/gpio/driver.h>
+#include <linux/dsa/8021q.h>
#include <linux/random.h>
#include <net/dsa.h>
@@ -62,6 +64,8 @@
#define VSC73XX_CAT_DROP 0x6e
#define VSC73XX_CAT_PR_MISC_L2 0x6f
#define VSC73XX_CAT_PR_USR_PRIO 0x75
+#define VSC73XX_CAT_VLAN_MISC 0x79
+#define VSC73XX_CAT_PORT_VLAN 0x7a
#define VSC73XX_Q_MISC_CONF 0xdf
/* MAC_CFG register bits */
@@ -122,6 +126,17 @@
#define VSC73XX_ADVPORTM_IO_LOOPBACK BIT(1)
#define VSC73XX_ADVPORTM_HOST_LOOPBACK BIT(0)
+/* TXUPDCFG transmit modify setup bits */
+#define VSC73XX_TXUPDCFG_DSCP_REWR_MODE GENMASK(20, 19)
+#define VSC73XX_TXUPDCFG_DSCP_REWR_ENA BIT(18)
+#define VSC73XX_TXUPDCFG_TX_INT_TO_USRPRIO_ENA BIT(17)
+#define VSC73XX_TXUPDCFG_TX_UNTAGGED_VID GENMASK(15, 4)
+#define VSC73XX_TXUPDCFG_TX_UNTAGGED_VID_ENA BIT(3)
+#define VSC73XX_TXUPDCFG_TX_UPDATE_CRC_CPU_ENA BIT(1)
+#define VSC73XX_TXUPDCFG_TX_INSERT_TAG BIT(0)
+
+#define VSC73XX_TXUPDCFG_TX_UNTAGGED_VID_SHIFT 4
+
/* CAT_DROP categorizer frame dropping register bits */
#define VSC73XX_CAT_DROP_DROP_MC_SMAC_ENA BIT(6)
#define VSC73XX_CAT_DROP_FWD_CTRL_ENA BIT(4)
@@ -135,6 +150,15 @@
#define VSC73XX_Q_MISC_CONF_EARLY_TX_512 (1 << 1)
#define VSC73XX_Q_MISC_CONF_MAC_PAUSE_MODE BIT(0)
+/* CAT_VLAN_MISC categorizer VLAN miscellaneous bits */
+#define VSC73XX_CAT_VLAN_MISC_VLAN_TCI_IGNORE_ENA BIT(8)
+#define VSC73XX_CAT_VLAN_MISC_VLAN_KEEP_TAG_ENA BIT(7)
+
+/* CAT_PORT_VLAN categorizer port VLAN */
+#define VSC73XX_CAT_PORT_VLAN_VLAN_CFI BIT(15)
+#define VSC73XX_CAT_PORT_VLAN_VLAN_USR_PRIO GENMASK(14, 12)
+#define VSC73XX_CAT_PORT_VLAN_VLAN_VID GENMASK(11, 0)
+
/* Frame analyzer block 2 registers */
#define VSC73XX_STORMLIMIT 0x02
#define VSC73XX_ADVLEARN 0x03
@@ -164,6 +188,10 @@
#define VSC73XX_AGENCTRL 0xf0
#define VSC73XX_CAPRST 0xff
+#define VSC73XX_SRCMASKS_CPU_COPY BIT(27)
+#define VSC73XX_SRCMASKS_MIRROR BIT(26)
+#define VSC73XX_SRCMASKS_PORTS_MASK GENMASK(7, 0)
+
#define VSC73XX_MACACCESS_CPU_COPY BIT(14)
#define VSC73XX_MACACCESS_FWD_KILL BIT(13)
#define VSC73XX_MACACCESS_IGNORE_VLAN BIT(12)
@@ -185,7 +213,8 @@
#define VSC73XX_VLANACCESS_VLAN_MIRROR BIT(29)
#define VSC73XX_VLANACCESS_VLAN_SRC_CHECK BIT(28)
#define VSC73XX_VLANACCESS_VLAN_PORT_MASK GENMASK(9, 2)
-#define VSC73XX_VLANACCESS_VLAN_TBL_CMD_MASK GENMASK(2, 0)
+#define VSC73XX_VLANACCESS_VLAN_PORT_MASK_SHIFT 2
+#define VSC73XX_VLANACCESS_VLAN_TBL_CMD_MASK GENMASK(1, 0)
#define VSC73XX_VLANACCESS_VLAN_TBL_CMD_IDLE 0
#define VSC73XX_VLANACCESS_VLAN_TBL_CMD_READ_ENTRY 1
#define VSC73XX_VLANACCESS_VLAN_TBL_CMD_WRITE_ENTRY 2
@@ -343,6 +372,17 @@ static const struct vsc73xx_counter vsc73xx_tx_counters[] = {
{ 29, "TxQoSClass3" }, /* non-standard counter */
};
+struct vsc73xx_vlan_summary {
+ size_t num_tagged;
+ size_t num_untagged;
+};
+
+enum vsc73xx_port_vlan_conf {
+ VSC73XX_VLAN_FILTER,
+ VSC73XX_VLAN_FILTER_UNTAG_ALL,
+ VSC73XX_VLAN_IGNORE,
+};
+
int vsc73xx_is_addr_valid(u8 block, u8 subblock)
{
switch (block) {
@@ -557,16 +597,103 @@ static enum dsa_tag_protocol vsc73xx_get_tag_protocol(struct dsa_switch *ds,
* cannot access the tag. (See "Internal frame header" section
* 3.9.1 in the manual.)
*/
- return DSA_TAG_PROTO_NONE;
+ return DSA_TAG_PROTO_VSC73XX_8021Q;
+}
+
+static int vsc73xx_wait_for_vlan_table_cmd(struct vsc73xx *vsc)
+{
+ int ret, err;
+ u32 val;
+
+ ret = read_poll_timeout(vsc73xx_read, err,
+ err < 0 ||
+ ((val & VSC73XX_VLANACCESS_VLAN_TBL_CMD_MASK) ==
+ VSC73XX_VLANACCESS_VLAN_TBL_CMD_IDLE),
+ VSC73XX_POLL_SLEEP_US, VSC73XX_POLL_TIMEOUT_US,
+ false, vsc, VSC73XX_BLOCK_ANALYZER,
+ 0, VSC73XX_VLANACCESS, &val);
+ if (ret)
+ return ret;
+ return err;
+}
+
+static int
+vsc73xx_read_vlan_table_entry(struct vsc73xx *vsc, u16 vid, u8 *portmap)
+{
+ u32 val;
+ int ret;
+
+ vsc73xx_write(vsc, VSC73XX_BLOCK_ANALYZER, 0, VSC73XX_VLANTIDX, vid);
+
+ ret = vsc73xx_wait_for_vlan_table_cmd(vsc);
+ if (ret)
+ return ret;
+
+ vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ANALYZER, 0, VSC73XX_VLANACCESS,
+ VSC73XX_VLANACCESS_VLAN_TBL_CMD_MASK,
+ VSC73XX_VLANACCESS_VLAN_TBL_CMD_READ_ENTRY);
+
+ ret = vsc73xx_wait_for_vlan_table_cmd(vsc);
+ if (ret)
+ return ret;
+
+ vsc73xx_read(vsc, VSC73XX_BLOCK_ANALYZER, 0, VSC73XX_VLANACCESS, &val);
+ *portmap = (val & VSC73XX_VLANACCESS_VLAN_PORT_MASK) >>
+ VSC73XX_VLANACCESS_VLAN_PORT_MASK_SHIFT;
+
+ return 0;
+}
+
+static int
+vsc73xx_write_vlan_table_entry(struct vsc73xx *vsc, u16 vid, u8 portmap)
+{
+ int ret;
+
+ vsc73xx_write(vsc, VSC73XX_BLOCK_ANALYZER, 0, VSC73XX_VLANTIDX, vid);
+
+ ret = vsc73xx_wait_for_vlan_table_cmd(vsc);
+ if (ret)
+ return ret;
+
+ vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ANALYZER, 0, VSC73XX_VLANACCESS,
+ VSC73XX_VLANACCESS_VLAN_TBL_CMD_MASK |
+ VSC73XX_VLANACCESS_VLAN_SRC_CHECK |
+ VSC73XX_VLANACCESS_VLAN_PORT_MASK,
+ VSC73XX_VLANACCESS_VLAN_TBL_CMD_WRITE_ENTRY |
+ VSC73XX_VLANACCESS_VLAN_SRC_CHECK |
+ (portmap << VSC73XX_VLANACCESS_VLAN_PORT_MASK_SHIFT));
+
+ return vsc73xx_wait_for_vlan_table_cmd(vsc);
+}
+
+static int
+vsc73xx_update_vlan_table(struct vsc73xx *vsc, int port, u16 vid, bool set)
+{
+ u8 portmap;
+ int ret;
+
+ ret = vsc73xx_read_vlan_table_entry(vsc, vid, &portmap);
+ if (ret)
+ return ret;
+
+ if (set)
+ portmap |= BIT(port);
+ else
+ portmap &= ~BIT(port);
+
+ return vsc73xx_write_vlan_table_entry(vsc, vid, portmap);
}
static int vsc73xx_setup(struct dsa_switch *ds)
{
struct vsc73xx *vsc = ds->priv;
- int i;
+ int i, ret;
dev_info(vsc->dev, "set up the switch\n");
+ ds->untag_bridge_pvid = true;
+ ds->max_num_bridges = DSA_TAG_8021Q_MAX_NUM_BRIDGES;
+
/* Issue RESET */
vsc73xx_write(vsc, VSC73XX_BLOCK_SYSTEM, 0, VSC73XX_GLORESET,
VSC73XX_GLORESET_MASTER_RESET);
@@ -594,7 +721,7 @@ static int vsc73xx_setup(struct dsa_switch *ds)
VSC73XX_MACACCESS,
VSC73XX_MACACCESS_CMD_CLEAR_TABLE);
- /* Clear VLAN table */
+ /* Set VLAN table to default values */
vsc73xx_write(vsc, VSC73XX_BLOCK_ANALYZER, 0,
VSC73XX_VLANACCESS,
VSC73XX_VLANACCESS_VLAN_TBL_CMD_CLEAR_TABLE);
@@ -623,9 +750,9 @@ static int vsc73xx_setup(struct dsa_switch *ds)
vsc73xx_write(vsc, VSC73XX_BLOCK_SYSTEM, 0, VSC73XX_GMIIDELAY,
VSC73XX_GMIIDELAY_GMII0_GTXDELAY_2_0_NS |
VSC73XX_GMIIDELAY_GMII0_RXDELAY_2_0_NS);
- /* Enable reception of frames on all ports */
- vsc73xx_write(vsc, VSC73XX_BLOCK_ANALYZER, 0, VSC73XX_RECVMASK,
- 0x5f);
+ /* Ingess VLAN reception mask (table 145) */
+ vsc73xx_write(vsc, VSC73XX_BLOCK_ANALYZER, 0, VSC73XX_VLANMASK,
+ 0xff);
/* IP multicast flood mask (table 144) */
vsc73xx_write(vsc, VSC73XX_BLOCK_ANALYZER, 0, VSC73XX_IFLODMSK,
0xff);
@@ -638,7 +765,24 @@ static int vsc73xx_setup(struct dsa_switch *ds)
udelay(4);
- return 0;
+ /* Clear VLAN table */
+ for (i = 0; i < VLAN_N_VID; i++)
+ vsc73xx_write_vlan_table_entry(vsc, i, 0);
+
+ INIT_LIST_HEAD(&vsc->vlans);
+
+ rtnl_lock();
+ ret = dsa_tag_8021q_register(ds, htons(ETH_P_8021Q));
+ rtnl_unlock();
+
+ return ret;
+}
+
+static void vsc73xx_teardown(struct dsa_switch *ds)
+{
+ rtnl_lock();
+ dsa_tag_8021q_unregister(ds);
+ rtnl_unlock();
}
static void vsc73xx_init_port(struct vsc73xx *vsc, int port)
@@ -788,10 +932,6 @@ static void vsc73xx_mac_link_down(struct phylink_config *config,
/* Allow backward dropping of frames from this port */
vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ARBITER, 0,
VSC73XX_SBACKWDROP, BIT(port), BIT(port));
-
- /* Receive mask (disable forwarding) */
- vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ANALYZER, 0,
- VSC73XX_RECVMASK, BIT(port), 0);
}
static void vsc73xx_mac_link_up(struct phylink_config *config,
@@ -828,6 +968,12 @@ static void vsc73xx_mac_link_up(struct phylink_config *config,
val |= seed << VSC73XX_MAC_CFG_SEED_OFFSET;
val |= VSC73XX_MAC_CFG_SEED_LOAD;
val |= VSC73XX_MAC_CFG_WEXC_DIS;
+
+ /* Those bits are responsible for MTU only. Kernel takes care about MTU,
+ * let's enable +8 bytes frame length unconditionally.
+ */
+ val |= VSC73XX_MAC_CFG_VLAN_AWR | VSC73XX_MAC_CFG_VLAN_DBLAWR;
+
vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, port, VSC73XX_MAC_CFG, val);
/* Flow control for the PHY facing ports:
@@ -844,10 +990,6 @@ static void vsc73xx_mac_link_up(struct phylink_config *config,
vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ARBITER, 0,
VSC73XX_ARBDISC, BIT(port), 0);
- /* Enable port (forwarding) in the receive mask */
- vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ANALYZER, 0,
- VSC73XX_RECVMASK, BIT(port), BIT(port));
-
/* Disallow backward dropping of frames from this port */
vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ARBITER, 0,
VSC73XX_SBACKWDROP, BIT(port), 0);
@@ -860,6 +1002,257 @@ static void vsc73xx_mac_link_up(struct phylink_config *config,
VSC73XX_MAC_CFG_TX_EN | VSC73XX_MAC_CFG_RX_EN);
}
+static bool vsc73xx_tag_8021q_active(struct dsa_port *dp)
+{
+ return !dsa_port_is_vlan_filtering(dp);
+}
+
+static struct vsc73xx_bridge_vlan *
+vsc73xx_bridge_vlan_find(struct vsc73xx *vsc, u16 vid)
+{
+ struct vsc73xx_bridge_vlan *vlan;
+
+ list_for_each_entry(vlan, &vsc->vlans, list)
+ if (vlan->vid == vid)
+ return vlan;
+
+ return NULL;
+}
+
+static void
+vsc73xx_bridge_vlan_remove_port(struct vsc73xx_bridge_vlan *vsc73xx_vlan,
+ int port)
+{
+ vsc73xx_vlan->portmask &= ~BIT(port);
+
+ if (vsc73xx_vlan->portmask)
+ return;
+
+ list_del(&vsc73xx_vlan->list);
+ kfree(vsc73xx_vlan);
+}
+
+static void vsc73xx_bridge_vlan_summary(struct vsc73xx *vsc, int port,
+ struct vsc73xx_vlan_summary *summary,
+ u16 ignored_vid)
+{
+ size_t num_tagged = 0, num_untagged = 0;
+ struct vsc73xx_bridge_vlan *vlan;
+
+ list_for_each_entry(vlan, &vsc->vlans, list) {
+ if (!(vlan->portmask & BIT(port)) || vlan->vid == ignored_vid)
+ continue;
+
+ if (vlan->untagged & BIT(port))
+ num_untagged++;
+ else
+ num_tagged++;
+ }
+
+ summary->num_untagged = num_untagged;
+ summary->num_tagged = num_tagged;
+}
+
+static u16 vsc73xx_find_first_vlan_untagged(struct vsc73xx *vsc, int port)
+{
+ struct vsc73xx_bridge_vlan *vlan;
+
+ list_for_each_entry(vlan, &vsc->vlans, list)
+ if ((vlan->portmask & BIT(port)) &&
+ (vlan->untagged & BIT(port)))
+ return vlan->vid;
+
+ return VLAN_N_VID;
+}
+
+static int vsc73xx_set_vlan_conf(struct vsc73xx *vsc, int port,
+ enum vsc73xx_port_vlan_conf port_vlan_conf)
+{
+ u32 val = 0;
+ int ret;
+
+ if (port_vlan_conf == VSC73XX_VLAN_IGNORE)
+ val = VSC73XX_CAT_VLAN_MISC_VLAN_TCI_IGNORE_ENA |
+ VSC73XX_CAT_VLAN_MISC_VLAN_KEEP_TAG_ENA;
+
+ ret = vsc73xx_update_bits(vsc, VSC73XX_BLOCK_MAC, port,
+ VSC73XX_CAT_VLAN_MISC,
+ VSC73XX_CAT_VLAN_MISC_VLAN_TCI_IGNORE_ENA |
+ VSC73XX_CAT_VLAN_MISC_VLAN_KEEP_TAG_ENA, val);
+ if (ret)
+ return ret;
+
+ val = (port_vlan_conf == VSC73XX_VLAN_FILTER) ?
+ VSC73XX_TXUPDCFG_TX_INSERT_TAG : 0;
+
+ return vsc73xx_update_bits(vsc, VSC73XX_BLOCK_MAC, port,
+ VSC73XX_TXUPDCFG,
+ VSC73XX_TXUPDCFG_TX_INSERT_TAG, val);
+}
+
+/**
+ * vsc73xx_vlan_commit_conf - Update VLAN configuration of a port
+ * @vsc: Switch private data structure
+ * @port: Port index on which to operate
+ *
+ * Update the VLAN behavior of a port to make sure that when it is under
+ * a VLAN filtering bridge, the port is either filtering with tag
+ * preservation, or filtering with all VLANs egress-untagged. Otherwise,
+ * the port ignores VLAN tags from packets and applies the port-based
+ * VID.
+ *
+ * Must be called when changes are made to:
+ * - the bridge VLAN filtering state of the port
+ * - the number or attributes of VLANs from the bridge VLAN table,
+ * while the port is currently VLAN-aware
+ *
+ * Return: 0 on success, or negative errno on error.
+ */
+static int vsc73xx_vlan_commit_conf(struct vsc73xx *vsc, int port)
+{
+ enum vsc73xx_port_vlan_conf port_vlan_conf = VSC73XX_VLAN_IGNORE;
+ struct dsa_port *dp = dsa_to_port(vsc->ds, port);
+
+ if (port == CPU_PORT) {
+ port_vlan_conf = VSC73XX_VLAN_FILTER;
+ } else if (dsa_port_is_vlan_filtering(dp)) {
+ struct vsc73xx_vlan_summary summary;
+
+ port_vlan_conf = VSC73XX_VLAN_FILTER;
+
+ vsc73xx_bridge_vlan_summary(vsc, port, &summary, VLAN_N_VID);
+ if (summary.num_tagged == 0)
+ port_vlan_conf = VSC73XX_VLAN_FILTER_UNTAG_ALL;
+ }
+
+ return vsc73xx_set_vlan_conf(vsc, port, port_vlan_conf);
+}
+
+static int
+vsc73xx_vlan_change_untagged(struct vsc73xx *vsc, int port, u16 vid, bool set)
+{
+ u32 val = 0;
+
+ if (set)
+ val = VSC73XX_TXUPDCFG_TX_UNTAGGED_VID_ENA |
+ ((vid << VSC73XX_TXUPDCFG_TX_UNTAGGED_VID_SHIFT) &
+ VSC73XX_TXUPDCFG_TX_UNTAGGED_VID);
+
+ return vsc73xx_update_bits(vsc, VSC73XX_BLOCK_MAC, port,
+ VSC73XX_TXUPDCFG,
+ VSC73XX_TXUPDCFG_TX_UNTAGGED_VID_ENA |
+ VSC73XX_TXUPDCFG_TX_UNTAGGED_VID, val);
+}
+
+/**
+ * vsc73xx_vlan_commit_untagged - Update native VLAN of a port
+ * @vsc: Switch private data structure
+ * @port: Port index on which to operate
+ *
+ * Update the native VLAN of a port (the one VLAN which is transmitted
+ * as egress-tagged on a trunk port) when port is in VLAN filtering mode and
+ * only one untagged vid is configured.
+ * In other cases no need to configure it because switch can untag all vlans on
+ * the port.
+ *
+ * Return: 0 on success, or negative errno on error.
+ */
+static int vsc73xx_vlan_commit_untagged(struct vsc73xx *vsc, int port)
+{
+ struct dsa_port *dp = dsa_to_port(vsc->ds, port);
+ struct vsc73xx_vlan_summary summary;
+ u16 vid = 0;
+ bool valid;
+
+ if (!dsa_port_is_vlan_filtering(dp))
+ /* Port is configured to untag all vlans in that case.
+ * No need to commit untagged config change.
+ */
+ return 0;
+
+ vsc73xx_bridge_vlan_summary(vsc, port, &summary, VLAN_N_VID);
+
+ if (summary.num_untagged > 1)
+ /* Port must untag all vlans in that case.
+ * No need to commit untagged config change.
+ */
+ return 0;
+
+ valid = (summary.num_untagged == 1);
+ if (valid)
+ vid = vsc73xx_find_first_vlan_untagged(vsc, port);
+
+ return vsc73xx_vlan_change_untagged(vsc, port, vid, valid);
+}
+
+static int
+vsc73xx_vlan_change_pvid(struct vsc73xx *vsc, int port, u16 vid, bool set)
+{
+ u32 val = 0;
+ int ret;
+
+ val = set ? 0 : VSC73XX_CAT_DROP_UNTAGGED_ENA;
+
+ ret = vsc73xx_update_bits(vsc, VSC73XX_BLOCK_MAC, port,
+ VSC73XX_CAT_DROP,
+ VSC73XX_CAT_DROP_UNTAGGED_ENA, val);
+ if (!set || ret)
+ return ret;
+
+ return vsc73xx_update_bits(vsc, VSC73XX_BLOCK_MAC, port,
+ VSC73XX_CAT_PORT_VLAN,
+ VSC73XX_CAT_PORT_VLAN_VLAN_VID,
+ vid & VSC73XX_CAT_PORT_VLAN_VLAN_VID);
+}
+
+/**
+ * vsc73xx_vlan_commit_pvid - Update port-based default VLAN of a port
+ * @vsc: Switch private data structure
+ * @port: Port index on which to operate
+ *
+ * Update the PVID of a port so that it follows either the bridge PVID
+ * configuration, when the bridge is currently VLAN-aware, or the PVID
+ * from tag_8021q, when the port is standalone or under a VLAN-unaware
+ * bridge. A port with no PVID drops all untagged and VID 0 tagged
+ * traffic.
+ *
+ * Must be called when changes are made to:
+ * - the bridge VLAN filtering state of the port
+ * - the number or attributes of VLANs from the bridge VLAN table,
+ * while the port is currently VLAN-aware
+ *
+ * Return: 0 on success, or negative errno on error.
+ */
+static int vsc73xx_vlan_commit_pvid(struct vsc73xx *vsc, int port)
+{
+ struct vsc73xx_portinfo *portinfo = &vsc->portinfo[port];
+ bool valid = portinfo->pvid_tag_8021q_configured;
+ struct dsa_port *dp = dsa_to_port(vsc->ds, port);
+ u16 vid = portinfo->pvid_tag_8021q;
+
+ if (dsa_port_is_vlan_filtering(dp)) {
+ vid = portinfo->pvid_vlan_filtering;
+ valid = portinfo->pvid_vlan_filtering_configured;
+ }
+
+ return vsc73xx_vlan_change_pvid(vsc, port, vid, valid);
+}
+
+static int vsc73xx_vlan_commit_settings(struct vsc73xx *vsc, int port)
+{
+ int ret;
+
+ ret = vsc73xx_vlan_commit_untagged(vsc, port);
+ if (ret)
+ return ret;
+
+ ret = vsc73xx_vlan_commit_pvid(vsc, port);
+ if (ret)
+ return ret;
+
+ return vsc73xx_vlan_commit_conf(vsc, port);
+}
+
static int vsc73xx_port_enable(struct dsa_switch *ds, int port,
struct phy_device *phy)
{
@@ -868,7 +1261,7 @@ static int vsc73xx_port_enable(struct dsa_switch *ds, int port,
dev_info(vsc->dev, "enable port %d\n", port);
vsc73xx_init_port(vsc, port);
- return 0;
+ return vsc73xx_vlan_commit_settings(vsc, port);
}
static void vsc73xx_port_disable(struct dsa_switch *ds, int port)
@@ -1039,6 +1432,303 @@ static void vsc73xx_phylink_get_caps(struct dsa_switch *dsa, int port,
config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100 | MAC_1000;
}
+static int
+vsc73xx_port_vlan_filtering(struct dsa_switch *ds, int port,
+ bool vlan_filtering, struct netlink_ext_ack *extack)
+{
+ struct vsc73xx *vsc = ds->priv;
+
+ /* The commit to hardware processed below is required because vsc73xx
+ * is using tag_8021q. When vlan_filtering is disabled, tag_8021q uses
+ * pvid/untagged vlans for port recognition. The values configured for
+ * vlans and pvid/untagged states are stored in portinfo structure.
+ * When vlan_filtering is enabled, we need to restore pvid/untagged from
+ * portinfo structure. Analogous routine is processed when
+ * vlan_filtering is disabled, but values used for tag_8021q are
+ * restored.
+ */
+
+ return vsc73xx_vlan_commit_settings(vsc, port);
+}
+
+static int vsc73xx_port_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack)
+{
+ bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct vsc73xx_bridge_vlan *vsc73xx_vlan;
+ struct vsc73xx_vlan_summary summary;
+ struct vsc73xx_portinfo *portinfo;
+ struct vsc73xx *vsc = ds->priv;
+ bool commit_to_hardware;
+ int ret = 0;
+
+ /* Be sure to deny alterations to the configuration done by tag_8021q.
+ */
+ if (vid_is_dsa_8021q(vlan->vid)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Range 3072-4095 reserved for dsa_8021q operation");
+ return -EBUSY;
+ }
+
+ /* The processed vlan->vid is excluded from the search because the VLAN
+ * can be re-added with a different set of flags, so it's easiest to
+ * ignore its old flags from the VLAN database software copy.
+ */
+ vsc73xx_bridge_vlan_summary(vsc, port, &summary, vlan->vid);
+
+ /* VSC73XX allows only three untagged states: none, one or all */
+ if ((untagged && summary.num_tagged > 0 && summary.num_untagged > 0) ||
+ (!untagged && summary.num_untagged > 1)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Port can have only none, one or all untagged vlan");
+ return -EBUSY;
+ }
+
+ vsc73xx_vlan = vsc73xx_bridge_vlan_find(vsc, vlan->vid);
+
+ if (!vsc73xx_vlan) {
+ vsc73xx_vlan = kzalloc(sizeof(*vsc73xx_vlan), GFP_KERNEL);
+ if (!vsc73xx_vlan)
+ return -ENOMEM;
+
+ vsc73xx_vlan->vid = vlan->vid;
+
+ list_add_tail(&vsc73xx_vlan->list, &vsc->vlans);
+ }
+
+ vsc73xx_vlan->portmask |= BIT(port);
+
+ /* CPU port must be always tagged because source port identification is
+ * based on tag_8021q.
+ */
+ if (port == CPU_PORT)
+ goto update_vlan_table;
+
+ if (untagged)
+ vsc73xx_vlan->untagged |= BIT(port);
+ else
+ vsc73xx_vlan->untagged &= ~BIT(port);
+
+ portinfo = &vsc->portinfo[port];
+
+ if (pvid) {
+ portinfo->pvid_vlan_filtering_configured = true;
+ portinfo->pvid_vlan_filtering = vlan->vid;
+ } else if (portinfo->pvid_vlan_filtering_configured &&
+ portinfo->pvid_vlan_filtering == vlan->vid) {
+ portinfo->pvid_vlan_filtering_configured = false;
+ }
+
+ commit_to_hardware = !vsc73xx_tag_8021q_active(dp);
+ if (commit_to_hardware) {
+ ret = vsc73xx_vlan_commit_settings(vsc, port);
+ if (ret)
+ goto err;
+ }
+
+update_vlan_table:
+ ret = vsc73xx_update_vlan_table(vsc, port, vlan->vid, true);
+ if (!ret)
+ return 0;
+err:
+ vsc73xx_bridge_vlan_remove_port(vsc73xx_vlan, port);
+ return ret;
+}
+
+static int vsc73xx_port_vlan_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct vsc73xx_bridge_vlan *vsc73xx_vlan;
+ struct vsc73xx_portinfo *portinfo;
+ struct vsc73xx *vsc = ds->priv;
+ bool commit_to_hardware;
+ int ret;
+
+ ret = vsc73xx_update_vlan_table(vsc, port, vlan->vid, false);
+ if (ret)
+ return ret;
+
+ portinfo = &vsc->portinfo[port];
+
+ if (portinfo->pvid_vlan_filtering_configured &&
+ portinfo->pvid_vlan_filtering == vlan->vid)
+ portinfo->pvid_vlan_filtering_configured = false;
+
+ vsc73xx_vlan = vsc73xx_bridge_vlan_find(vsc, vlan->vid);
+
+ if (vsc73xx_vlan)
+ vsc73xx_bridge_vlan_remove_port(vsc73xx_vlan, port);
+
+ commit_to_hardware = !vsc73xx_tag_8021q_active(dsa_to_port(ds, port));
+
+ if (commit_to_hardware)
+ return vsc73xx_vlan_commit_settings(vsc, port);
+
+ return 0;
+}
+
+static int vsc73xx_tag_8021q_vlan_add(struct dsa_switch *ds, int port, u16 vid,
+ u16 flags)
+{
+ bool pvid = flags & BRIDGE_VLAN_INFO_PVID;
+ struct vsc73xx_portinfo *portinfo;
+ struct vsc73xx *vsc = ds->priv;
+ bool commit_to_hardware;
+ int ret;
+
+ portinfo = &vsc->portinfo[port];
+
+ if (pvid) {
+ portinfo->pvid_tag_8021q_configured = true;
+ portinfo->pvid_tag_8021q = vid;
+ }
+
+ commit_to_hardware = vsc73xx_tag_8021q_active(dsa_to_port(ds, port));
+ if (commit_to_hardware) {
+ ret = vsc73xx_vlan_commit_settings(vsc, port);
+ if (ret)
+ return ret;
+ }
+
+ return vsc73xx_update_vlan_table(vsc, port, vid, true);
+}
+
+static int vsc73xx_tag_8021q_vlan_del(struct dsa_switch *ds, int port, u16 vid)
+{
+ struct vsc73xx_portinfo *portinfo;
+ struct vsc73xx *vsc = ds->priv;
+
+ portinfo = &vsc->portinfo[port];
+
+ if (portinfo->pvid_tag_8021q_configured &&
+ portinfo->pvid_tag_8021q == vid) {
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ bool commit_to_hardware;
+ int err;
+
+ portinfo->pvid_tag_8021q_configured = false;
+
+ commit_to_hardware = vsc73xx_tag_8021q_active(dp);
+ if (commit_to_hardware) {
+ err = vsc73xx_vlan_commit_settings(vsc, port);
+ if (err)
+ return err;
+ }
+ }
+
+ return vsc73xx_update_vlan_table(vsc, port, vid, false);
+}
+
+static int vsc73xx_port_pre_bridge_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ if (flags.mask & ~BR_LEARNING)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int vsc73xx_port_bridge_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ if (flags.mask & BR_LEARNING) {
+ u32 val = flags.val & BR_LEARNING ? BIT(port) : 0;
+ struct vsc73xx *vsc = ds->priv;
+
+ return vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ANALYZER, 0,
+ VSC73XX_LEARNMASK, BIT(port), val);
+ }
+
+ return 0;
+}
+
+static void vsc73xx_refresh_fwd_map(struct dsa_switch *ds, int port, u8 state)
+{
+ struct dsa_port *other_dp, *dp = dsa_to_port(ds, port);
+ struct vsc73xx *vsc = ds->priv;
+ u16 mask;
+
+ if (state != BR_STATE_FORWARDING) {
+ /* Ports that aren't in the forwarding state must not
+ * forward packets anywhere.
+ */
+ vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ANALYZER, 0,
+ VSC73XX_SRCMASKS + port,
+ VSC73XX_SRCMASKS_PORTS_MASK, 0);
+
+ dsa_switch_for_each_available_port(other_dp, ds) {
+ if (other_dp == dp)
+ continue;
+ vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ANALYZER, 0,
+ VSC73XX_SRCMASKS + other_dp->index,
+ BIT(port), 0);
+ }
+
+ return;
+ }
+
+ /* Forwarding ports must forward to the CPU and to other ports
+ * in the same bridge
+ */
+ vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ANALYZER, 0,
+ VSC73XX_SRCMASKS + CPU_PORT, BIT(port), BIT(port));
+
+ mask = BIT(CPU_PORT);
+
+ dsa_switch_for_each_user_port(other_dp, ds) {
+ int other_port = other_dp->index;
+
+ if (port == other_port || !dsa_port_bridge_same(dp, other_dp) ||
+ other_dp->stp_state != BR_STATE_FORWARDING)
+ continue;
+
+ mask |= BIT(other_port);
+
+ vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ANALYZER, 0,
+ VSC73XX_SRCMASKS + other_port,
+ BIT(port), BIT(port));
+ }
+
+ vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ANALYZER, 0,
+ VSC73XX_SRCMASKS + port,
+ VSC73XX_SRCMASKS_PORTS_MASK, mask);
+}
+
+/* FIXME: STP frames aren't forwarded at this moment. BPDU frames are
+ * forwarded only from and to PI/SI interface. For more info see chapter
+ * 2.7.1 (CPU Forwarding) in datasheet.
+ * This function is required for tag_8021q operations.
+ */
+static void vsc73xx_port_stp_state_set(struct dsa_switch *ds, int port,
+ u8 state)
+{
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct vsc73xx *vsc = ds->priv;
+ u32 val = 0;
+
+ if (state == BR_STATE_LEARNING || state == BR_STATE_FORWARDING)
+ val = dp->learning ? BIT(port) : 0;
+
+ vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ANALYZER, 0,
+ VSC73XX_LEARNMASK, BIT(port), val);
+
+ val = (state == BR_STATE_BLOCKING || state == BR_STATE_DISABLED) ?
+ 0 : BIT(port);
+ vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ANALYZER, 0,
+ VSC73XX_RECVMASK, BIT(port), val);
+
+ /* CPU Port should always forward packets when user ports are forwarding
+ * so let's configure it from other ports only.
+ */
+ if (port != CPU_PORT)
+ vsc73xx_refresh_fwd_map(ds, port, state);
+}
+
static const struct phylink_mac_ops vsc73xx_phylink_mac_ops = {
.mac_config = vsc73xx_mac_config,
.mac_link_down = vsc73xx_mac_link_down,
@@ -1048,6 +1738,7 @@ static const struct phylink_mac_ops vsc73xx_phylink_mac_ops = {
static const struct dsa_switch_ops vsc73xx_ds_ops = {
.get_tag_protocol = vsc73xx_get_tag_protocol,
.setup = vsc73xx_setup,
+ .teardown = vsc73xx_teardown,
.phy_read = vsc73xx_phy_read,
.phy_write = vsc73xx_phy_write,
.get_strings = vsc73xx_get_strings,
@@ -1055,9 +1746,19 @@ static const struct dsa_switch_ops vsc73xx_ds_ops = {
.get_sset_count = vsc73xx_get_sset_count,
.port_enable = vsc73xx_port_enable,
.port_disable = vsc73xx_port_disable,
+ .port_pre_bridge_flags = vsc73xx_port_pre_bridge_flags,
+ .port_bridge_flags = vsc73xx_port_bridge_flags,
+ .port_bridge_join = dsa_tag_8021q_bridge_join,
+ .port_bridge_leave = dsa_tag_8021q_bridge_leave,
.port_change_mtu = vsc73xx_change_mtu,
.port_max_mtu = vsc73xx_get_max_mtu,
+ .port_stp_state_set = vsc73xx_port_stp_state_set,
+ .port_vlan_filtering = vsc73xx_port_vlan_filtering,
+ .port_vlan_add = vsc73xx_port_vlan_add,
+ .port_vlan_del = vsc73xx_port_vlan_del,
.phylink_get_caps = vsc73xx_phylink_get_caps,
+ .tag_8021q_vlan_add = vsc73xx_tag_8021q_vlan_add,
+ .tag_8021q_vlan_del = vsc73xx_tag_8021q_vlan_del,
};
static int vsc73xx_gpio_get(struct gpio_chip *chip, unsigned int offset)
diff --git a/drivers/net/dsa/vitesse-vsc73xx.h b/drivers/net/dsa/vitesse-vsc73xx.h
index 2997f7e108b1..3ca579acc798 100644
--- a/drivers/net/dsa/vitesse-vsc73xx.h
+++ b/drivers/net/dsa/vitesse-vsc73xx.h
@@ -15,6 +15,22 @@
#define VSC73XX_MAX_NUM_PORTS 8
/**
+ * struct vsc73xx_portinfo - port data structure: contains storage data
+ * @pvid_vlan_filtering: pvid vlan number used in vlan filtering mode
+ * @pvid_tag_8021q: pvid vlan number used in tag_8021q mode
+ * @pvid_vlan_filtering_configured: informs if port has configured pvid in vlan
+ * filtering mode
+ * @pvid_tag_8021q_configured: imforms if port have configured pvid in tag_8021q
+ * mode
+ */
+struct vsc73xx_portinfo {
+ u16 pvid_vlan_filtering;
+ u16 pvid_tag_8021q;
+ bool pvid_vlan_filtering_configured;
+ bool pvid_tag_8021q_configured;
+};
+
+/**
* struct vsc73xx - VSC73xx state container: main data structure
* @dev: The device pointer
* @reset: The descriptor for the GPIO line tied to the reset pin
@@ -25,6 +41,10 @@
* @addr: MAC address used in flow control frames
* @ops: Structure with hardware-dependent operations
* @priv: Pointer to the configuration interface structure
+ * @portinfo: Storage table portinfo structructures
+ * @vlans: List of configured vlans. Contains port mask and untagged status of
+ * every vlan configured in port vlan operation. It doesn't cover tag_8021q
+ * vlans.
*/
struct vsc73xx {
struct device *dev;
@@ -35,6 +55,8 @@ struct vsc73xx {
u8 addr[ETH_ALEN];
const struct vsc73xx_ops *ops;
void *priv;
+ struct vsc73xx_portinfo portinfo[VSC73XX_MAX_NUM_PORTS];
+ struct list_head vlans;
};
/**
@@ -49,6 +71,21 @@ struct vsc73xx_ops {
u32 val);
};
+/**
+ * struct vsc73xx_bridge_vlan - VSC73xx driver structure which keeps vlan
+ * database copy
+ * @vid: VLAN number
+ * @portmask: each bit represents one port
+ * @untagged: each bit represents one port configured with @vid untagged
+ * @list: list structure
+ */
+struct vsc73xx_bridge_vlan {
+ u16 vid;
+ u8 portmask;
+ u8 untagged;
+ struct list_head list;
+};
+
int vsc73xx_is_addr_valid(u8 block, u8 subblock);
int vsc73xx_probe(struct vsc73xx *vsc);
void vsc73xx_remove(struct vsc73xx *vsc);
diff --git a/drivers/net/dsa/xrs700x/xrs700x_i2c.c b/drivers/net/dsa/xrs700x/xrs700x_i2c.c
index c1179d7311f7..9b731dea78c1 100644
--- a/drivers/net/dsa/xrs700x/xrs700x_i2c.c
+++ b/drivers/net/dsa/xrs700x/xrs700x_i2c.c
@@ -127,8 +127,8 @@ static void xrs700x_i2c_shutdown(struct i2c_client *i2c)
}
static const struct i2c_device_id xrs700x_i2c_id[] = {
- { "xrs700x-switch", 0 },
- {},
+ { "xrs700x-switch" },
+ {}
};
MODULE_DEVICE_TABLE(i2c, xrs700x_i2c_id);
diff --git a/drivers/net/ethernet/8390/ne2k-pci.c b/drivers/net/ethernet/8390/ne2k-pci.c
index 65f56a98c0a0..1a34da07c0db 100644
--- a/drivers/net/ethernet/8390/ne2k-pci.c
+++ b/drivers/net/ethernet/8390/ne2k-pci.c
@@ -186,17 +186,6 @@ static void ne2k_pci_block_output(struct net_device *dev, const int count,
static const struct ethtool_ops ne2k_pci_ethtool_ops;
-
-/* There is no room in the standard 8390 structure for extra info we need,
- * so we build a meta/outer-wrapper structure..
- */
-struct ne2k_pci_card {
- struct net_device *dev;
- struct pci_dev *pci_dev;
-};
-
-
-
/* NEx000-clone boards have a Station Address (SA) PROM (SAPROM) in the packet
* buffer memory space. By-the-spec NE2000 clones have 0x57,0x57 in bytes
* 0x0e,0x0f of the SAPROM, while other supposed NE2000 clones must be
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 6a19b5393ed1..0baac25db4f8 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -122,6 +122,7 @@ source "drivers/net/ethernet/litex/Kconfig"
source "drivers/net/ethernet/marvell/Kconfig"
source "drivers/net/ethernet/mediatek/Kconfig"
source "drivers/net/ethernet/mellanox/Kconfig"
+source "drivers/net/ethernet/meta/Kconfig"
source "drivers/net/ethernet/micrel/Kconfig"
source "drivers/net/ethernet/microchip/Kconfig"
source "drivers/net/ethernet/mscc/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 0d872d4efcd1..c03203439c0e 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -59,6 +59,7 @@ obj-$(CONFIG_NET_VENDOR_LITEX) += litex/
obj-$(CONFIG_NET_VENDOR_MARVELL) += marvell/
obj-$(CONFIG_NET_VENDOR_MEDIATEK) += mediatek/
obj-$(CONFIG_NET_VENDOR_MELLANOX) += mellanox/
+obj-$(CONFIG_NET_VENDOR_META) += meta/
obj-$(CONFIG_NET_VENDOR_MICREL) += micrel/
obj-$(CONFIG_NET_VENDOR_MICROCHIP) += microchip/
obj-$(CONFIG_NET_VENDOR_MICROSEMI) += mscc/
diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c
index 857361c74f5d..e1b8794b14c9 100644
--- a/drivers/net/ethernet/adaptec/starfire.c
+++ b/drivers/net/ethernet/adaptec/starfire.c
@@ -441,14 +441,6 @@ enum rx_desc_bits {
};
/* Completion queue entry. */
-struct short_rx_done_desc {
- __le32 status; /* Low 16 bits is length. */
-};
-struct basic_rx_done_desc {
- __le32 status; /* Low 16 bits is length. */
- __le16 vlanid;
- __le16 status2;
-};
struct csum_rx_done_desc {
__le32 status; /* Low 16 bits is length. */
__le16 csum; /* Partial checksum */
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
index 2d8a66ea82fa..713a595370bf 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
@@ -312,7 +312,6 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
struct ena_com_io_sq *io_sq)
{
size_t size;
- int dev_node = 0;
memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr));
@@ -325,12 +324,9 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
size = io_sq->desc_entry_size * io_sq->q_depth;
if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
- dev_node = dev_to_node(ena_dev->dmadev);
- set_dev_node(ena_dev->dmadev, ctx->numa_node);
io_sq->desc_addr.virt_addr =
dma_alloc_coherent(ena_dev->dmadev, size, &io_sq->desc_addr.phys_addr,
GFP_KERNEL);
- set_dev_node(ena_dev->dmadev, dev_node);
if (!io_sq->desc_addr.virt_addr) {
io_sq->desc_addr.virt_addr =
dma_alloc_coherent(ena_dev->dmadev, size,
@@ -354,10 +350,7 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
size = (size_t)io_sq->bounce_buf_ctrl.buffer_size *
io_sq->bounce_buf_ctrl.buffers_num;
- dev_node = dev_to_node(ena_dev->dmadev);
- set_dev_node(ena_dev->dmadev, ctx->numa_node);
io_sq->bounce_buf_ctrl.base_buffer = devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
- set_dev_node(ena_dev->dmadev, dev_node);
if (!io_sq->bounce_buf_ctrl.base_buffer)
io_sq->bounce_buf_ctrl.base_buffer =
devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
@@ -397,7 +390,6 @@ static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
struct ena_com_io_cq *io_cq)
{
size_t size;
- int prev_node = 0;
memset(&io_cq->cdesc_addr, 0x0, sizeof(io_cq->cdesc_addr));
@@ -409,11 +401,8 @@ static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
- prev_node = dev_to_node(ena_dev->dmadev);
- set_dev_node(ena_dev->dmadev, ctx->numa_node);
io_cq->cdesc_addr.virt_addr =
dma_alloc_coherent(ena_dev->dmadev, size, &io_cq->cdesc_addr.phys_addr, GFP_KERNEL);
- set_dev_node(ena_dev->dmadev, prev_node);
if (!io_cq->cdesc_addr.virt_addr) {
io_cq->cdesc_addr.virt_addr =
dma_alloc_coherent(ena_dev->dmadev, size, &io_cq->cdesc_addr.phys_addr,
diff --git a/drivers/net/ethernet/amd/7990.c b/drivers/net/ethernet/amd/7990.c
index ef512cf89abf..27792a52b6cf 100644
--- a/drivers/net/ethernet/amd/7990.c
+++ b/drivers/net/ethernet/amd/7990.c
@@ -667,4 +667,5 @@ void lance_poll(struct net_device *dev)
EXPORT_SYMBOL_GPL(lance_poll);
#endif
+MODULE_DESCRIPTION("LANCE Ethernet IC generic routines");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/amd/a2065.c b/drivers/net/ethernet/amd/a2065.c
index 68983b717145..1ca26a8c40eb 100644
--- a/drivers/net/ethernet/amd/a2065.c
+++ b/drivers/net/ethernet/amd/a2065.c
@@ -781,4 +781,5 @@ static void __exit a2065_cleanup_module(void)
module_init(a2065_init_module);
module_exit(a2065_cleanup_module);
+MODULE_DESCRIPTION("Commodore A2065 Ethernet driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/amd/ariadne.c b/drivers/net/ethernet/amd/ariadne.c
index 38153e633231..fa201da567ed 100644
--- a/drivers/net/ethernet/amd/ariadne.c
+++ b/drivers/net/ethernet/amd/ariadne.c
@@ -790,4 +790,5 @@ static void __exit ariadne_cleanup_module(void)
module_init(ariadne_init_module);
module_exit(ariadne_cleanup_module);
+MODULE_DESCRIPTION("Ariadne Ethernet Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/amd/atarilance.c b/drivers/net/ethernet/amd/atarilance.c
index 751454d305c6..8c8cc7d0f42d 100644
--- a/drivers/net/ethernet/amd/atarilance.c
+++ b/drivers/net/ethernet/amd/atarilance.c
@@ -79,6 +79,7 @@ static int lance_debug = 1;
#endif
module_param(lance_debug, int, 0);
MODULE_PARM_DESC(lance_debug, "atarilance debug level (0-3)");
+MODULE_DESCRIPTION("Atari LANCE Ethernet driver");
MODULE_LICENSE("GPL");
/* Print debug messages on probing? */
diff --git a/drivers/net/ethernet/amd/hplance.c b/drivers/net/ethernet/amd/hplance.c
index 055fda11c572..df42294530cb 100644
--- a/drivers/net/ethernet/amd/hplance.c
+++ b/drivers/net/ethernet/amd/hplance.c
@@ -234,4 +234,5 @@ static void __exit hplance_cleanup_module(void)
module_init(hplance_init_module);
module_exit(hplance_cleanup_module);
+MODULE_DESCRIPTION("HP300 on-board LANCE Ethernet driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/amd/lance.c b/drivers/net/ethernet/amd/lance.c
index 6cf38180cc01..b1e6620ad41d 100644
--- a/drivers/net/ethernet/amd/lance.c
+++ b/drivers/net/ethernet/amd/lance.c
@@ -385,6 +385,7 @@ static void __exit lance_cleanup_module(void)
}
module_exit(lance_cleanup_module);
#endif /* MODULE */
+MODULE_DESCRIPTION("AMD LANCE/PCnet Ethernet driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/amd/mvme147.c b/drivers/net/ethernet/amd/mvme147.c
index 410c7b67eba4..c156566c0906 100644
--- a/drivers/net/ethernet/amd/mvme147.c
+++ b/drivers/net/ethernet/amd/mvme147.c
@@ -178,6 +178,7 @@ static int m147lance_close(struct net_device *dev)
return 0;
}
+MODULE_DESCRIPTION("MVME147 LANCE Ethernet driver");
MODULE_LICENSE("GPL");
static struct net_device *dev_mvme147_lance;
diff --git a/drivers/net/ethernet/amd/sun3lance.c b/drivers/net/ethernet/amd/sun3lance.c
index 246f34c43765..c60df4a21158 100644
--- a/drivers/net/ethernet/amd/sun3lance.c
+++ b/drivers/net/ethernet/amd/sun3lance.c
@@ -74,6 +74,7 @@ static int lance_debug = 1;
#endif
module_param(lance_debug, int, 0);
MODULE_PARM_DESC(lance_debug, "SUN3 Lance debug level (0-3)");
+MODULE_DESCRIPTION("Sun3/Sun3x on-board LANCE Ethernet driver");
MODULE_LICENSE("GPL");
#define DPRINTK(n,a) \
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
index 58e7e88aae5b..21407a26f806 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
@@ -577,7 +577,7 @@ static int xgbe_set_rxfh(struct net_device *netdev,
}
static int xgbe_get_ts_info(struct net_device *netdev,
- struct ethtool_ts_info *ts_info)
+ struct kernel_ethtool_ts_info *ts_info)
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
index a2606ee3b0a5..d0aecd1d7357 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
@@ -652,7 +652,7 @@ static int aq_ethtool_set_wol(struct net_device *ndev,
}
static int aq_ethtool_get_ts_info(struct net_device *ndev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
diff --git a/drivers/net/ethernet/arc/Kconfig b/drivers/net/ethernet/arc/Kconfig
index 0a67612af228..0d400a7d8d91 100644
--- a/drivers/net/ethernet/arc/Kconfig
+++ b/drivers/net/ethernet/arc/Kconfig
@@ -23,16 +23,6 @@ config ARC_EMAC_CORE
select PHYLIB
select CRC32
-config ARC_EMAC
- tristate "ARC EMAC support"
- select ARC_EMAC_CORE
- depends on OF_IRQ
- depends on ARC || COMPILE_TEST
- help
- On some legacy ARC (Synopsys) FPGA boards such as ARCAngel4/ML50x
- non-standard on-chip ethernet device ARC EMAC 10/100 is used.
- Say Y here if you have such a board. If unsure, say N.
-
config EMAC_ROCKCHIP
tristate "Rockchip EMAC support"
select ARC_EMAC_CORE
diff --git a/drivers/net/ethernet/arc/Makefile b/drivers/net/ethernet/arc/Makefile
index d63ada577c8e..23586eefec44 100644
--- a/drivers/net/ethernet/arc/Makefile
+++ b/drivers/net/ethernet/arc/Makefile
@@ -5,5 +5,4 @@
arc_emac-objs := emac_main.o emac_mdio.o
obj-$(CONFIG_ARC_EMAC_CORE) += arc_emac.o
-obj-$(CONFIG_ARC_EMAC) += emac_arc.o
obj-$(CONFIG_EMAC_ROCKCHIP) += emac_rockchip.o
diff --git a/drivers/net/ethernet/arc/emac_arc.c b/drivers/net/ethernet/arc/emac_arc.c
deleted file mode 100644
index a3afddb23ee8..000000000000
--- a/drivers/net/ethernet/arc/emac_arc.c
+++ /dev/null
@@ -1,88 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/**
- * DOC: emac_arc.c - ARC EMAC specific glue layer
- *
- * Copyright (C) 2014 Romain Perier
- *
- * Romain Perier <romain.perier@gmail.com>
- */
-
-#include <linux/etherdevice.h>
-#include <linux/module.h>
-#include <linux/of_net.h>
-#include <linux/platform_device.h>
-
-#include "emac.h"
-
-#define DRV_NAME "emac_arc"
-
-static int emac_arc_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct arc_emac_priv *priv;
- phy_interface_t interface;
- struct net_device *ndev;
- int err;
-
- if (!dev->of_node)
- return -ENODEV;
-
- ndev = alloc_etherdev(sizeof(struct arc_emac_priv));
- if (!ndev)
- return -ENOMEM;
- platform_set_drvdata(pdev, ndev);
- SET_NETDEV_DEV(ndev, dev);
-
- priv = netdev_priv(ndev);
- priv->drv_name = DRV_NAME;
-
- err = of_get_phy_mode(dev->of_node, &interface);
- if (err) {
- if (err == -ENODEV)
- interface = PHY_INTERFACE_MODE_MII;
- else
- goto out_netdev;
- }
-
- priv->clk = devm_clk_get(dev, "hclk");
- if (IS_ERR(priv->clk)) {
- dev_err(dev, "failed to retrieve host clock from device tree\n");
- err = -EINVAL;
- goto out_netdev;
- }
-
- err = arc_emac_probe(ndev, interface);
-out_netdev:
- if (err)
- free_netdev(ndev);
- return err;
-}
-
-static void emac_arc_remove(struct platform_device *pdev)
-{
- struct net_device *ndev = platform_get_drvdata(pdev);
-
- arc_emac_remove(ndev);
- free_netdev(ndev);
-}
-
-static const struct of_device_id emac_arc_dt_ids[] = {
- { .compatible = "snps,arc-emac" },
- { /* Sentinel */ }
-};
-MODULE_DEVICE_TABLE(of, emac_arc_dt_ids);
-
-static struct platform_driver emac_arc_driver = {
- .probe = emac_arc_probe,
- .remove_new = emac_arc_remove,
- .driver = {
- .name = DRV_NAME,
- .of_match_table = emac_arc_dt_ids,
- },
-};
-
-module_platform_driver(emac_arc_driver);
-
-MODULE_AUTHOR("Romain Perier <romain.perier@gmail.com>");
-MODULE_DESCRIPTION("ARC EMAC platform driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp.c b/drivers/net/ethernet/broadcom/asp2/bcmasp.c
index a806dadc4196..20c6529ec135 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp.c
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp.c
@@ -1380,6 +1380,7 @@ static int bcmasp_probe(struct platform_device *pdev)
dev_err(dev, "Cannot create eth interface %d\n", i);
bcmasp_remove_intfs(priv);
of_node_put(intf_node);
+ ret = -ENOMEM;
goto of_put_exit;
}
list_add_tail(&intf->list, &priv->intfs);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index e2a4e1088b7f..9580ab83d387 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -1262,7 +1262,7 @@ enum {
struct bnx2x_fw_stats_req {
struct stats_query_header hdr;
- struct stats_query_entry query[FP_SB_MAX_E1x+
+ struct stats_query_entry query[FP_SB_MAX_E2 +
BNX2X_FIRST_QUEUE_QUERY_IDX];
};
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 58956ed8f531..c7b56a5e5425 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -3634,7 +3634,7 @@ static int bnx2x_set_channels(struct net_device *dev,
}
static int bnx2x_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct bnx2x *bp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index c437ca1c0fd3..bb3be33c1bbd 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -456,8 +456,9 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
dma_addr_t mapping;
unsigned int length, pad = 0;
u32 len, free_size, vlan_tag_flags, cfa_action, flags;
- u16 prod, last_frag;
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
struct pci_dev *pdev = bp->pdev;
+ u16 prod, last_frag, txts_prod;
struct bnxt_tx_ring_info *txr;
struct bnxt_sw_tx_bd *tx_buf;
__le32 lflags = 0;
@@ -509,23 +510,29 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
}
- if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
- struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && ptp &&
+ ptp->tx_tstamp_en) {
+ if (bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP) {
+ lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP);
+ tx_buf->is_ts_pkt = 1;
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ } else if (!skb_is_gso(skb)) {
+ u16 seq_id, hdr_off;
- if (ptp && ptp->tx_tstamp_en && !skb_is_gso(skb) &&
- atomic_dec_if_positive(&ptp->tx_avail) >= 0) {
- if (!bnxt_ptp_parse(skb, &ptp->tx_seqid,
- &ptp->tx_hdr_off)) {
+ if (!bnxt_ptp_parse(skb, &seq_id, &hdr_off) &&
+ !bnxt_ptp_get_txts_prod(ptp, &txts_prod)) {
if (vlan_tag_flags)
- ptp->tx_hdr_off += VLAN_HLEN;
+ hdr_off += VLAN_HLEN;
lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP);
+ tx_buf->is_ts_pkt = 1;
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
- } else {
- atomic_inc(&bp->ptp_cfg->tx_avail);
+
+ ptp->txts_req[txts_prod].tx_seqid = seq_id;
+ ptp->txts_req[txts_prod].tx_hdr_off = hdr_off;
+ tx_buf->txts_prod = txts_prod;
}
}
}
-
if (unlikely(skb->no_fcs))
lflags |= cpu_to_le32(TX_BD_FLAGS_NO_CRC);
@@ -732,9 +739,6 @@ tx_done:
return NETDEV_TX_OK;
tx_dma_error:
- if (BNXT_TX_PTP_IS_SET(lflags))
- atomic_inc(&bp->ptp_cfg->tx_avail);
-
last_frag = i;
/* start back at beginning and unmap skb */
@@ -756,6 +760,13 @@ tx_dma_error:
tx_free:
dev_kfree_skb_any(skb);
tx_kick_pending:
+ if (BNXT_TX_PTP_IS_SET(lflags)) {
+ txr->tx_buf_ring[txr->tx_prod].is_ts_pkt = 0;
+ atomic64_inc(&bp->ptp_cfg->stats.ts_err);
+ if (!(bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP))
+ /* set SKB to err so PTP worker will clean up */
+ ptp->txts_req[txts_prod].tx_skb = ERR_PTR(-EIO);
+ }
if (txr->kick_pending)
bnxt_txr_db_kick(bp, txr, txr->tx_prod);
txr->tx_buf_ring[txr->tx_prod].skb = NULL;
@@ -763,7 +774,8 @@ tx_kick_pending:
return NETDEV_TX_OK;
}
-static void __bnxt_tx_int(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
+/* Returns true if some remaining TX packets not processed. */
+static bool __bnxt_tx_int(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
int budget)
{
struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
@@ -772,24 +784,33 @@ static void __bnxt_tx_int(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
unsigned int tx_bytes = 0;
u16 cons = txr->tx_cons;
int tx_pkts = 0;
+ bool rc = false;
while (RING_TX(bp, cons) != hw_cons) {
struct bnxt_sw_tx_bd *tx_buf;
struct sk_buff *skb;
+ bool is_ts_pkt;
int j, last;
tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)];
- cons = NEXT_TX(cons);
skb = tx_buf->skb;
- tx_buf->skb = NULL;
if (unlikely(!skb)) {
bnxt_sched_reset_txr(bp, txr, cons);
- return;
+ return rc;
+ }
+
+ is_ts_pkt = tx_buf->is_ts_pkt;
+ if (is_ts_pkt && (bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP)) {
+ rc = true;
+ break;
}
+ cons = NEXT_TX(cons);
tx_pkts++;
tx_bytes += skb->len;
+ tx_buf->skb = NULL;
+ tx_buf->is_ts_pkt = 0;
if (tx_buf->is_push) {
tx_buf->is_push = 0;
@@ -809,13 +830,11 @@ static void __bnxt_tx_int(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
skb_frag_size(&skb_shinfo(skb)->frags[j]),
DMA_TO_DEVICE);
}
- if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
+ if (unlikely(is_ts_pkt)) {
if (BNXT_CHIP_P5(bp)) {
/* PTP worker takes ownership of the skb */
- if (!bnxt_get_tx_ts_p5(bp, skb))
- skb = NULL;
- else
- atomic_inc(&bp->ptp_cfg->tx_avail);
+ bnxt_get_tx_ts_p5(bp, skb, tx_buf->txts_prod);
+ skb = NULL;
}
}
@@ -830,18 +849,22 @@ next_tx_int:
__netif_txq_completed_wake(txq, tx_pkts, tx_bytes,
bnxt_tx_avail(bp, txr), bp->tx_wake_thresh,
READ_ONCE(txr->dev_state) == BNXT_DEV_STATE_CLOSING);
+
+ return rc;
}
static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
{
struct bnxt_tx_ring_info *txr;
+ bool more = false;
int i;
bnxt_for_each_napi_tx(i, bnapi, txr) {
if (txr->tx_hw_cons != RING_TX(bp, txr->tx_cons))
- __bnxt_tx_int(bp, txr, budget);
+ more |= __bnxt_tx_int(bp, txr, budget);
}
- bnapi->events &= ~BNXT_TX_CMP_EVENT;
+ if (!more)
+ bnapi->events &= ~BNXT_TX_CMP_EVENT;
}
static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
@@ -2907,6 +2930,8 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
cpr->has_more_work = 1;
break;
}
+ } else if (cmp_type == CMP_TYPE_TX_L2_PKT_TS_CMP) {
+ bnxt_tx_ts_cmp(bp, bnapi, (struct tx_ts_cmp *)txcmp);
} else if (cmp_type >= CMP_TYPE_RX_L2_CMP &&
cmp_type <= CMP_TYPE_RX_L2_TPA_START_V3_CMP) {
if (likely(budget))
@@ -2938,8 +2963,10 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
}
}
- if (event & BNXT_REDIRECT_EVENT)
+ if (event & BNXT_REDIRECT_EVENT) {
xdp_do_flush();
+ event &= ~BNXT_REDIRECT_EVENT;
+ }
if (event & BNXT_TX_EVENT) {
struct bnxt_tx_ring_info *txr = bnapi->tx_ring[0];
@@ -2949,6 +2976,7 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
wmb();
bnxt_db_write_relaxed(bp, &txr->tx_db, prod);
+ event &= ~BNXT_TX_EVENT;
}
cpr->cp_raw_cons = raw_cons;
@@ -2966,13 +2994,14 @@ static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi,
struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
+ bnapi->events &= ~BNXT_RX_EVENT;
}
if (bnapi->events & BNXT_AGG_EVENT) {
struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
+ bnapi->events &= ~BNXT_AGG_EVENT;
}
- bnapi->events &= BNXT_TX_CMP_EVENT;
}
static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
@@ -3309,37 +3338,12 @@ static void bnxt_free_tx_skbs(struct bnxt *bp)
}
}
-static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
+static void bnxt_free_one_rx_ring(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
{
- struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
struct pci_dev *pdev = bp->pdev;
- struct bnxt_tpa_idx_map *map;
- int i, max_idx, max_agg_idx;
+ int i, max_idx;
max_idx = bp->rx_nr_pages * RX_DESC_CNT;
- max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
- if (!rxr->rx_tpa)
- goto skip_rx_tpa_free;
-
- for (i = 0; i < bp->max_tpa; i++) {
- struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i];
- u8 *data = tpa_info->data;
-
- if (!data)
- continue;
-
- dma_unmap_single_attrs(&pdev->dev, tpa_info->mapping,
- bp->rx_buf_use_size, bp->rx_dir,
- DMA_ATTR_WEAK_ORDERING);
-
- tpa_info->data = NULL;
-
- skb_free_frag(data);
- }
-
-skip_rx_tpa_free:
- if (!rxr->rx_buf_ring)
- goto skip_rx_buf_free;
for (i = 0; i < max_idx; i++) {
struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i];
@@ -3359,12 +3363,15 @@ skip_rx_tpa_free:
skb_free_frag(data);
}
}
+}
-skip_rx_buf_free:
- if (!rxr->rx_agg_ring)
- goto skip_rx_agg_free;
+static void bnxt_free_one_rx_agg_ring(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
+{
+ int i, max_idx;
+
+ max_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
- for (i = 0; i < max_agg_idx; i++) {
+ for (i = 0; i < max_idx; i++) {
struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i];
struct page *page = rx_agg_buf->page;
@@ -3376,6 +3383,45 @@ skip_rx_buf_free:
page_pool_recycle_direct(rxr->page_pool, page);
}
+}
+
+static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
+{
+ struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
+ struct pci_dev *pdev = bp->pdev;
+ struct bnxt_tpa_idx_map *map;
+ int i;
+
+ if (!rxr->rx_tpa)
+ goto skip_rx_tpa_free;
+
+ for (i = 0; i < bp->max_tpa; i++) {
+ struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i];
+ u8 *data = tpa_info->data;
+
+ if (!data)
+ continue;
+
+ dma_unmap_single_attrs(&pdev->dev, tpa_info->mapping,
+ bp->rx_buf_use_size, bp->rx_dir,
+ DMA_ATTR_WEAK_ORDERING);
+
+ tpa_info->data = NULL;
+
+ skb_free_frag(data);
+ }
+
+skip_rx_tpa_free:
+ if (!rxr->rx_buf_ring)
+ goto skip_rx_buf_free;
+
+ bnxt_free_one_rx_ring(bp, rxr);
+
+skip_rx_buf_free:
+ if (!rxr->rx_agg_ring)
+ goto skip_rx_agg_free;
+
+ bnxt_free_one_rx_agg_ring(bp, rxr);
skip_rx_agg_free:
map = rxr->rx_tpa_idx_map;
@@ -3972,6 +4018,62 @@ static int bnxt_alloc_cp_rings(struct bnxt *bp)
return 0;
}
+static void bnxt_init_rx_ring_struct(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr)
+{
+ struct bnxt_ring_mem_info *rmem;
+ struct bnxt_ring_struct *ring;
+
+ ring = &rxr->rx_ring_struct;
+ rmem = &ring->ring_mem;
+ rmem->nr_pages = bp->rx_nr_pages;
+ rmem->page_size = HW_RXBD_RING_SIZE;
+ rmem->pg_arr = (void **)rxr->rx_desc_ring;
+ rmem->dma_arr = rxr->rx_desc_mapping;
+ rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
+ rmem->vmem = (void **)&rxr->rx_buf_ring;
+
+ ring = &rxr->rx_agg_ring_struct;
+ rmem = &ring->ring_mem;
+ rmem->nr_pages = bp->rx_agg_nr_pages;
+ rmem->page_size = HW_RXBD_RING_SIZE;
+ rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
+ rmem->dma_arr = rxr->rx_agg_desc_mapping;
+ rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
+ rmem->vmem = (void **)&rxr->rx_agg_ring;
+}
+
+static void bnxt_reset_rx_ring_struct(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr)
+{
+ struct bnxt_ring_mem_info *rmem;
+ struct bnxt_ring_struct *ring;
+ int i;
+
+ rxr->page_pool->p.napi = NULL;
+ rxr->page_pool = NULL;
+
+ ring = &rxr->rx_ring_struct;
+ rmem = &ring->ring_mem;
+ rmem->pg_tbl = NULL;
+ rmem->pg_tbl_map = 0;
+ for (i = 0; i < rmem->nr_pages; i++) {
+ rmem->pg_arr[i] = NULL;
+ rmem->dma_arr[i] = 0;
+ }
+ *rmem->vmem = NULL;
+
+ ring = &rxr->rx_agg_ring_struct;
+ rmem = &ring->ring_mem;
+ rmem->pg_tbl = NULL;
+ rmem->pg_tbl_map = 0;
+ for (i = 0; i < rmem->nr_pages; i++) {
+ rmem->pg_arr[i] = NULL;
+ rmem->dma_arr[i] = 0;
+ }
+ *rmem->vmem = NULL;
+}
+
static void bnxt_init_ring_struct(struct bnxt *bp)
{
int i, j;
@@ -4054,37 +4156,55 @@ static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
}
}
-static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr)
+static void bnxt_alloc_one_rx_ring_skb(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr,
+ int ring_nr)
{
- struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
- struct net_device *dev = bp->dev;
u32 prod;
int i;
prod = rxr->rx_prod;
for (i = 0; i < bp->rx_ring_size; i++) {
if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL)) {
- netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n",
+ netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
ring_nr, i, bp->rx_ring_size);
break;
}
prod = NEXT_RX(prod);
}
rxr->rx_prod = prod;
+}
- if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
- return 0;
+static void bnxt_alloc_one_rx_ring_page(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr,
+ int ring_nr)
+{
+ u32 prod;
+ int i;
prod = rxr->rx_agg_prod;
for (i = 0; i < bp->rx_agg_ring_size; i++) {
if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL)) {
- netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n",
+ netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d pages only\n",
ring_nr, i, bp->rx_ring_size);
break;
}
prod = NEXT_RX_AGG(prod);
}
rxr->rx_agg_prod = prod;
+}
+
+static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr)
+{
+ struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
+ int i;
+
+ bnxt_alloc_one_rx_ring_skb(bp, rxr, ring_nr);
+
+ if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
+ return 0;
+
+ bnxt_alloc_one_rx_ring_page(bp, rxr, ring_nr);
if (rxr->rx_tpa) {
dma_addr_t mapping;
@@ -4103,9 +4223,9 @@ static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr)
return 0;
}
-static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
+static void bnxt_init_one_rx_ring_rxbd(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr)
{
- struct bnxt_rx_ring_info *rxr;
struct bnxt_ring_struct *ring;
u32 type;
@@ -4115,28 +4235,43 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
if (NET_IP_ALIGN == 2)
type |= RX_BD_FLAGS_SOP;
- rxr = &bp->rx_ring[ring_nr];
ring = &rxr->rx_ring_struct;
bnxt_init_rxbd_pages(ring, type);
-
- netif_queue_set_napi(bp->dev, ring_nr, NETDEV_QUEUE_TYPE_RX,
- &rxr->bnapi->napi);
-
- if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
- bpf_prog_add(bp->xdp_prog, 1);
- rxr->xdp_prog = bp->xdp_prog;
- }
ring->fw_ring_id = INVALID_HW_RING_ID;
+}
+
+static void bnxt_init_one_rx_agg_ring_rxbd(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr)
+{
+ struct bnxt_ring_struct *ring;
+ u32 type;
ring = &rxr->rx_agg_ring_struct;
ring->fw_ring_id = INVALID_HW_RING_ID;
-
if ((bp->flags & BNXT_FLAG_AGG_RINGS)) {
type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
bnxt_init_rxbd_pages(ring, type);
}
+}
+
+static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
+{
+ struct bnxt_rx_ring_info *rxr;
+
+ rxr = &bp->rx_ring[ring_nr];
+ bnxt_init_one_rx_ring_rxbd(bp, rxr);
+
+ netif_queue_set_napi(bp->dev, ring_nr, NETDEV_QUEUE_TYPE_RX,
+ &rxr->bnapi->napi);
+
+ if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
+ bpf_prog_add(bp->xdp_prog, 1);
+ rxr->xdp_prog = bp->xdp_prog;
+ }
+
+ bnxt_init_one_rx_agg_ring_rxbd(bp, rxr);
return bnxt_alloc_one_rx_ring(bp, ring_nr);
}
@@ -5835,17 +5970,20 @@ bnxt_cfg_rfs_ring_tbl_idx(struct bnxt *bp,
struct hwrm_cfa_ntuple_filter_alloc_input *req,
struct bnxt_ntuple_filter *fltr)
{
- struct bnxt_rss_ctx *rss_ctx, *tmp;
u16 rxq = fltr->base.rxq;
if (fltr->base.flags & BNXT_ACT_RSS_CTX) {
- list_for_each_entry_safe(rss_ctx, tmp, &bp->rss_ctx_list, list) {
- if (rss_ctx->index == fltr->base.fw_vnic_id) {
- struct bnxt_vnic_info *vnic = &rss_ctx->vnic;
+ struct ethtool_rxfh_context *ctx;
+ struct bnxt_rss_ctx *rss_ctx;
+ struct bnxt_vnic_info *vnic;
- req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
- break;
- }
+ ctx = xa_load(&bp->dev->ethtool->rss_ctx,
+ fltr->base.fw_vnic_id);
+ if (ctx) {
+ rss_ctx = ethtool_rxfh_context_priv(ctx);
+ vnic = &rss_ctx->vnic;
+
+ req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
}
return;
}
@@ -6084,10 +6222,9 @@ static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct);
}
-int bnxt_alloc_rss_indir_tbl(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx)
+static int bnxt_alloc_rss_indir_tbl(struct bnxt *bp)
{
int entries;
- u16 *tbl;
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
entries = BNXT_MAX_RSS_TABLE_ENTRIES_P5;
@@ -6095,22 +6232,19 @@ int bnxt_alloc_rss_indir_tbl(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx)
entries = HW_HASH_INDEX_SIZE;
bp->rss_indir_tbl_entries = entries;
- tbl = kmalloc_array(entries, sizeof(*bp->rss_indir_tbl), GFP_KERNEL);
- if (!tbl)
+ bp->rss_indir_tbl =
+ kmalloc_array(entries, sizeof(*bp->rss_indir_tbl), GFP_KERNEL);
+ if (!bp->rss_indir_tbl)
return -ENOMEM;
- if (rss_ctx)
- rss_ctx->rss_indir_tbl = tbl;
- else
- bp->rss_indir_tbl = tbl;
-
return 0;
}
-void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx)
+void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp,
+ struct ethtool_rxfh_context *rss_ctx)
{
u16 max_rings, max_entries, pad, i;
- u16 *rss_indir_tbl;
+ u32 *rss_indir_tbl;
if (!bp->rx_nr_rings)
return;
@@ -6122,7 +6256,7 @@ void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx)
max_entries = bnxt_get_rxfh_indir_size(bp->dev);
if (rss_ctx)
- rss_indir_tbl = &rss_ctx->rss_indir_tbl[0];
+ rss_indir_tbl = ethtool_rxfh_context_indir(rss_ctx);
else
rss_indir_tbl = &bp->rss_indir_tbl[0];
@@ -6131,12 +6265,12 @@ void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx)
pad = bp->rss_indir_tbl_entries - max_entries;
if (pad)
- memset(&rss_indir_tbl[i], 0, pad * sizeof(u16));
+ memset(&rss_indir_tbl[i], 0, pad * sizeof(*rss_indir_tbl));
}
static u16 bnxt_get_max_rss_ring(struct bnxt *bp)
{
- u16 i, tbl_size, max_ring = 0;
+ u32 i, tbl_size, max_ring = 0;
if (!bp->rss_indir_tbl)
return 0;
@@ -6188,7 +6322,7 @@ static void bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp,
if (vnic->flags & BNXT_VNIC_NTUPLE_FLAG)
j = ethtool_rxfh_indir_default(i, bp->rx_nr_rings);
else if (vnic->flags & BNXT_VNIC_RSSCTX_FLAG)
- j = vnic->rss_ctx->rss_indir_tbl[i];
+ j = ethtool_rxfh_context_indir(vnic->rss_ctx)[i];
else
j = bp->rss_indir_tbl[i];
rxr = &bp->rx_ring[j];
@@ -6675,6 +6809,7 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
switch (ring_type) {
case HWRM_RING_ALLOC_TX: {
struct bnxt_tx_ring_info *txr;
+ u16 flags = 0;
txr = container_of(ring, struct bnxt_tx_ring_info,
tx_ring_struct);
@@ -6688,6 +6823,9 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
if (bp->flags & BNXT_FLAG_TX_COAL_CMPL)
req->cmpl_coal_cnt =
RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_64;
+ if ((bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP) && bp->ptp_cfg)
+ flags |= RING_ALLOC_REQ_FLAGS_TX_PKT_TS_CMPL_ENABLE;
+ req->flags = cpu_to_le16(flags);
break;
}
case HWRM_RING_ALLOC_RX:
@@ -6861,6 +6999,48 @@ static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
bnxt_set_db_mask(bp, db, ring_type);
}
+static int bnxt_hwrm_rx_ring_alloc(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr)
+{
+ struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
+ struct bnxt_napi *bnapi = rxr->bnapi;
+ u32 type = HWRM_RING_ALLOC_RX;
+ u32 map_idx = bnapi->index;
+ int rc;
+
+ rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
+ if (rc)
+ return rc;
+
+ bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
+ bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
+
+ return 0;
+}
+
+static int bnxt_hwrm_rx_agg_ring_alloc(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr)
+{
+ struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
+ u32 type = HWRM_RING_ALLOC_AGG;
+ u32 grp_idx = ring->grp_idx;
+ u32 map_idx;
+ int rc;
+
+ map_idx = grp_idx + bp->rx_nr_rings;
+ rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
+ if (rc)
+ return rc;
+
+ bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx,
+ ring->fw_ring_id);
+ bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
+ bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
+ bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
+
+ return 0;
+}
+
static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
{
bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS);
@@ -6926,24 +7106,21 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id);
}
- type = HWRM_RING_ALLOC_RX;
for (i = 0; i < bp->rx_nr_rings; i++) {
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
- struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
- struct bnxt_napi *bnapi = rxr->bnapi;
- u32 map_idx = bnapi->index;
- rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
+ rc = bnxt_hwrm_rx_ring_alloc(bp, rxr);
if (rc)
goto err_out;
- bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
/* If we have agg rings, post agg buffers first. */
if (!agg_rings)
bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
- bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
struct bnxt_cp_ring_info *cpr2 = rxr->rx_cpr;
+ struct bnxt_napi *bnapi = rxr->bnapi;
u32 type2 = HWRM_RING_ALLOC_CMPL;
+ struct bnxt_ring_struct *ring;
+ u32 map_idx = bnapi->index;
ring = &cpr2->cp_ring_struct;
ring->handle = BNXT_SET_NQ_HDL(cpr2);
@@ -6957,23 +7134,10 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
}
if (agg_rings) {
- type = HWRM_RING_ALLOC_AGG;
for (i = 0; i < bp->rx_nr_rings; i++) {
- struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
- struct bnxt_ring_struct *ring =
- &rxr->rx_agg_ring_struct;
- u32 grp_idx = ring->grp_idx;
- u32 map_idx = grp_idx + bp->rx_nr_rings;
-
- rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
+ rc = bnxt_hwrm_rx_agg_ring_alloc(bp, &bp->rx_ring[i]);
if (rc)
goto err_out;
-
- bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx,
- ring->fw_ring_id);
- bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
- bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
- bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
}
}
err_out:
@@ -7013,6 +7177,50 @@ exit:
return 0;
}
+static void bnxt_hwrm_rx_ring_free(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr,
+ bool close_path)
+{
+ struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
+ u32 grp_idx = rxr->bnapi->index;
+ u32 cmpl_ring_id;
+
+ if (ring->fw_ring_id == INVALID_HW_RING_ID)
+ return;
+
+ cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
+ hwrm_ring_free_send_msg(bp, ring,
+ RING_FREE_REQ_RING_TYPE_RX,
+ close_path ? cmpl_ring_id :
+ INVALID_HW_RING_ID);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+ bp->grp_info[grp_idx].rx_fw_ring_id = INVALID_HW_RING_ID;
+}
+
+static void bnxt_hwrm_rx_agg_ring_free(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr,
+ bool close_path)
+{
+ struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
+ u32 grp_idx = rxr->bnapi->index;
+ u32 type, cmpl_ring_id;
+
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
+ type = RING_FREE_REQ_RING_TYPE_RX_AGG;
+ else
+ type = RING_FREE_REQ_RING_TYPE_RX;
+
+ if (ring->fw_ring_id == INVALID_HW_RING_ID)
+ return;
+
+ cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
+ hwrm_ring_free_send_msg(bp, ring, type,
+ close_path ? cmpl_ring_id :
+ INVALID_HW_RING_ID);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+ bp->grp_info[grp_idx].agg_fw_ring_id = INVALID_HW_RING_ID;
+}
+
static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
{
u32 type;
@@ -7037,42 +7245,8 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
}
for (i = 0; i < bp->rx_nr_rings; i++) {
- struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
- struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
- u32 grp_idx = rxr->bnapi->index;
-
- if (ring->fw_ring_id != INVALID_HW_RING_ID) {
- u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
-
- hwrm_ring_free_send_msg(bp, ring,
- RING_FREE_REQ_RING_TYPE_RX,
- close_path ? cmpl_ring_id :
- INVALID_HW_RING_ID);
- ring->fw_ring_id = INVALID_HW_RING_ID;
- bp->grp_info[grp_idx].rx_fw_ring_id =
- INVALID_HW_RING_ID;
- }
- }
-
- if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
- type = RING_FREE_REQ_RING_TYPE_RX_AGG;
- else
- type = RING_FREE_REQ_RING_TYPE_RX;
- for (i = 0; i < bp->rx_nr_rings; i++) {
- struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
- struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
- u32 grp_idx = rxr->bnapi->index;
-
- if (ring->fw_ring_id != INVALID_HW_RING_ID) {
- u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
-
- hwrm_ring_free_send_msg(bp, ring, type,
- close_path ? cmpl_ring_id :
- INVALID_HW_RING_ID);
- ring->fw_ring_id = INVALID_HW_RING_ID;
- bp->grp_info[grp_idx].agg_fw_ring_id =
- INVALID_HW_RING_ID;
- }
+ bnxt_hwrm_rx_ring_free(bp, &bp->rx_ring[i], close_path);
+ bnxt_hwrm_rx_agg_ring_free(bp, &bp->rx_ring[i], close_path);
}
/* The completion rings are about to be freed. After that the
@@ -8832,7 +9006,7 @@ static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
u8 flags;
int rc;
- if (bp->hwrm_spec_code < 0x10801 || !BNXT_CHIP_P5(bp)) {
+ if (bp->hwrm_spec_code < 0x10801 || !BNXT_CHIP_P5_PLUS(bp)) {
rc = -ENODEV;
goto no_ptp;
}
@@ -8848,7 +9022,8 @@ static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
goto exit;
flags = resp->flags;
- if (!(flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS)) {
+ if (BNXT_CHIP_P5_AND_MINUS(bp) &&
+ !(flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS)) {
rc = -ENODEV;
goto exit;
}
@@ -8861,10 +9036,13 @@ static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
ptp->bp = bp;
bp->ptp_cfg = ptp;
}
- if (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK) {
+
+ if (flags &
+ (PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK |
+ PORT_MAC_PTP_QCFG_RESP_FLAGS_64B_PHC_TIME)) {
ptp->refclk_regs[0] = le32_to_cpu(resp->ts_ref_clock_reg_lower);
ptp->refclk_regs[1] = le32_to_cpu(resp->ts_ref_clock_reg_upper);
- } else if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
+ } else if (BNXT_CHIP_P5(bp)) {
ptp->refclk_regs[0] = BNXT_TS_REG_TIMESYNC_TS0_LOWER;
ptp->refclk_regs[1] = BNXT_TS_REG_TIMESYNC_TS0_UPPER;
} else {
@@ -8946,6 +9124,8 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
bp->fw_cap |= BNXT_FW_CAP_RX_ALL_PKT_TS;
if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_UDP_GSO_SUPPORTED)
bp->flags |= BNXT_FLAG_UDP_GSO_CAP;
+ if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_TX_PKT_TS_CMPL_SUPPORTED)
+ bp->fw_cap |= BNXT_FW_CAP_TX_TS_CMP;
bp->tx_push_thresh = 0;
if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) &&
@@ -8996,6 +9176,7 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
#endif
}
+ bp->tso_max_segs = le16_to_cpu(resp->max_tso_segs);
hwrm_func_qcaps_exit:
hwrm_req_drop(bp, req);
@@ -10013,10 +10194,12 @@ void bnxt_del_one_rss_ctx(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx,
struct bnxt_ntuple_filter *ntp_fltr;
int i;
- bnxt_hwrm_vnic_free_one(bp, &rss_ctx->vnic);
- for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++) {
- if (vnic->fw_rss_cos_lb_ctx[i] != INVALID_HW_RING_ID)
- bnxt_hwrm_vnic_ctx_free_one(bp, vnic, i);
+ if (netif_running(bp->dev)) {
+ bnxt_hwrm_vnic_free_one(bp, &rss_ctx->vnic);
+ for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++) {
+ if (vnic->fw_rss_cos_lb_ctx[i] != INVALID_HW_RING_ID)
+ bnxt_hwrm_vnic_ctx_free_one(bp, vnic, i);
+ }
}
if (!all)
return;
@@ -10037,19 +10220,17 @@ void bnxt_del_one_rss_ctx(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx,
dma_free_coherent(&bp->pdev->dev, vnic->rss_table_size,
vnic->rss_table,
vnic->rss_table_dma_addr);
- kfree(rss_ctx->rss_indir_tbl);
- list_del(&rss_ctx->list);
bp->num_rss_ctx--;
- clear_bit(rss_ctx->index, bp->rss_ctx_bmap);
- kfree(rss_ctx);
}
static void bnxt_hwrm_realloc_rss_ctx_vnic(struct bnxt *bp)
{
bool set_tpa = !!(bp->flags & BNXT_FLAG_TPA);
- struct bnxt_rss_ctx *rss_ctx, *tmp;
+ struct ethtool_rxfh_context *ctx;
+ unsigned long context;
- list_for_each_entry_safe(rss_ctx, tmp, &bp->rss_ctx_list, list) {
+ xa_for_each(&bp->dev->ethtool->rss_ctx, context, ctx) {
+ struct bnxt_rss_ctx *rss_ctx = ethtool_rxfh_context_priv(ctx);
struct bnxt_vnic_info *vnic = &rss_ctx->vnic;
if (bnxt_hwrm_vnic_alloc(bp, vnic, 0, bp->rx_nr_rings) ||
@@ -10058,42 +10239,20 @@ static void bnxt_hwrm_realloc_rss_ctx_vnic(struct bnxt *bp)
netdev_err(bp->dev, "Failed to restore RSS ctx %d\n",
rss_ctx->index);
bnxt_del_one_rss_ctx(bp, rss_ctx, true);
+ ethtool_rxfh_context_lost(bp->dev, rss_ctx->index);
}
}
}
-struct bnxt_rss_ctx *bnxt_alloc_rss_ctx(struct bnxt *bp)
-{
- struct bnxt_rss_ctx *rss_ctx = NULL;
-
- rss_ctx = kzalloc(sizeof(*rss_ctx), GFP_KERNEL);
- if (rss_ctx) {
- rss_ctx->vnic.rss_ctx = rss_ctx;
- list_add_tail(&rss_ctx->list, &bp->rss_ctx_list);
- bp->num_rss_ctx++;
- }
- return rss_ctx;
-}
-
-void bnxt_clear_rss_ctxs(struct bnxt *bp, bool all)
+void bnxt_clear_rss_ctxs(struct bnxt *bp)
{
- struct bnxt_rss_ctx *rss_ctx, *tmp;
-
- list_for_each_entry_safe(rss_ctx, tmp, &bp->rss_ctx_list, list)
- bnxt_del_one_rss_ctx(bp, rss_ctx, all);
+ struct ethtool_rxfh_context *ctx;
+ unsigned long context;
- if (all)
- bitmap_free(bp->rss_ctx_bmap);
-}
+ xa_for_each(&bp->dev->ethtool->rss_ctx, context, ctx) {
+ struct bnxt_rss_ctx *rss_ctx = ethtool_rxfh_context_priv(ctx);
-static void bnxt_init_multi_rss_ctx(struct bnxt *bp)
-{
- bp->rss_ctx_bmap = bitmap_zalloc(BNXT_RSS_CTX_BMAP_LEN, GFP_KERNEL);
- if (bp->rss_ctx_bmap) {
- /* burn index 0 since we cannot have context 0 */
- __set_bit(0, bp->rss_ctx_bmap);
- INIT_LIST_HEAD(&bp->rss_ctx_list);
- bp->rss_cap |= BNXT_RSS_CAP_MULTI_RSS_CTX;
+ bnxt_del_one_rss_ctx(bp, rss_ctx, false);
}
}
@@ -11986,8 +12145,8 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
/* VF-reps may need to be re-opened after the PF is re-opened */
if (BNXT_PF(bp))
bnxt_vf_reps_open(bp);
- if (bp->ptp_cfg)
- atomic_set(&bp->ptp_cfg->tx_avail, BNXT_MAX_TX_TS);
+ if (bp->ptp_cfg && !(bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP))
+ WRITE_ONCE(bp->ptp_cfg->tx_avail, BNXT_MAX_TX_TS);
bnxt_ptp_init_rtc(bp, true);
bnxt_ptp_cfg_tstamp_filters(bp);
if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp))
@@ -12140,7 +12299,7 @@ static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
msleep(20);
if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp))
- bnxt_clear_rss_ctxs(bp, false);
+ bnxt_clear_rss_ctxs(bp);
/* Flush rings and disable interrupts */
bnxt_shutdown_nic(bp, irq_re_init);
@@ -12669,7 +12828,11 @@ bool bnxt_rfs_capable(struct bnxt *bp, bool new_rss_ctx)
if (!BNXT_NEW_RM(bp))
return true;
- if (hwr.vnic == bp->hw_resc.resv_vnics &&
+ /* Do not reduce VNIC and RSS ctx reservations. There is a FW
+ * issue that will mess up the default VNIC if we reduce the
+ * reservations.
+ */
+ if (hwr.vnic <= bp->hw_resc.resv_vnics &&
hwr.rss_ctx <= bp->hw_resc.resv_rsscos_ctxs)
return true;
@@ -14820,6 +14983,220 @@ static const struct netdev_stat_ops bnxt_stat_ops = {
.get_base_stats = bnxt_get_base_stats,
};
+static int bnxt_alloc_rx_agg_bmap(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
+{
+ u16 mem_size;
+
+ rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
+ mem_size = rxr->rx_agg_bmap_size / 8;
+ rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
+ if (!rxr->rx_agg_bmap)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int bnxt_queue_mem_alloc(struct net_device *dev, void *qmem, int idx)
+{
+ struct bnxt_rx_ring_info *rxr, *clone;
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_ring_struct *ring;
+ int rc;
+
+ rxr = &bp->rx_ring[idx];
+ clone = qmem;
+ memcpy(clone, rxr, sizeof(*rxr));
+ bnxt_init_rx_ring_struct(bp, clone);
+ bnxt_reset_rx_ring_struct(bp, clone);
+
+ clone->rx_prod = 0;
+ clone->rx_agg_prod = 0;
+ clone->rx_sw_agg_prod = 0;
+ clone->rx_next_cons = 0;
+
+ rc = bnxt_alloc_rx_page_pool(bp, clone, rxr->page_pool->p.nid);
+ if (rc)
+ return rc;
+
+ ring = &clone->rx_ring_struct;
+ rc = bnxt_alloc_ring(bp, &ring->ring_mem);
+ if (rc)
+ goto err_free_rx_ring;
+
+ if (bp->flags & BNXT_FLAG_AGG_RINGS) {
+ ring = &clone->rx_agg_ring_struct;
+ rc = bnxt_alloc_ring(bp, &ring->ring_mem);
+ if (rc)
+ goto err_free_rx_agg_ring;
+
+ rc = bnxt_alloc_rx_agg_bmap(bp, clone);
+ if (rc)
+ goto err_free_rx_agg_ring;
+ }
+
+ bnxt_init_one_rx_ring_rxbd(bp, clone);
+ bnxt_init_one_rx_agg_ring_rxbd(bp, clone);
+
+ bnxt_alloc_one_rx_ring_skb(bp, clone, idx);
+ if (bp->flags & BNXT_FLAG_AGG_RINGS)
+ bnxt_alloc_one_rx_ring_page(bp, clone, idx);
+
+ return 0;
+
+err_free_rx_agg_ring:
+ bnxt_free_ring(bp, &clone->rx_agg_ring_struct.ring_mem);
+err_free_rx_ring:
+ bnxt_free_ring(bp, &clone->rx_ring_struct.ring_mem);
+ clone->page_pool->p.napi = NULL;
+ page_pool_destroy(clone->page_pool);
+ clone->page_pool = NULL;
+ return rc;
+}
+
+static void bnxt_queue_mem_free(struct net_device *dev, void *qmem)
+{
+ struct bnxt_rx_ring_info *rxr = qmem;
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_ring_struct *ring;
+
+ bnxt_free_one_rx_ring(bp, rxr);
+ bnxt_free_one_rx_agg_ring(bp, rxr);
+
+ page_pool_destroy(rxr->page_pool);
+ rxr->page_pool = NULL;
+
+ ring = &rxr->rx_ring_struct;
+ bnxt_free_ring(bp, &ring->ring_mem);
+
+ ring = &rxr->rx_agg_ring_struct;
+ bnxt_free_ring(bp, &ring->ring_mem);
+
+ kfree(rxr->rx_agg_bmap);
+ rxr->rx_agg_bmap = NULL;
+}
+
+static void bnxt_copy_rx_ring(struct bnxt *bp,
+ struct bnxt_rx_ring_info *dst,
+ struct bnxt_rx_ring_info *src)
+{
+ struct bnxt_ring_mem_info *dst_rmem, *src_rmem;
+ struct bnxt_ring_struct *dst_ring, *src_ring;
+ int i;
+
+ dst_ring = &dst->rx_ring_struct;
+ dst_rmem = &dst_ring->ring_mem;
+ src_ring = &src->rx_ring_struct;
+ src_rmem = &src_ring->ring_mem;
+
+ WARN_ON(dst_rmem->nr_pages != src_rmem->nr_pages);
+ WARN_ON(dst_rmem->page_size != src_rmem->page_size);
+ WARN_ON(dst_rmem->flags != src_rmem->flags);
+ WARN_ON(dst_rmem->depth != src_rmem->depth);
+ WARN_ON(dst_rmem->vmem_size != src_rmem->vmem_size);
+ WARN_ON(dst_rmem->ctx_mem != src_rmem->ctx_mem);
+
+ dst_rmem->pg_tbl = src_rmem->pg_tbl;
+ dst_rmem->pg_tbl_map = src_rmem->pg_tbl_map;
+ *dst_rmem->vmem = *src_rmem->vmem;
+ for (i = 0; i < dst_rmem->nr_pages; i++) {
+ dst_rmem->pg_arr[i] = src_rmem->pg_arr[i];
+ dst_rmem->dma_arr[i] = src_rmem->dma_arr[i];
+ }
+
+ if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
+ return;
+
+ dst_ring = &dst->rx_agg_ring_struct;
+ dst_rmem = &dst_ring->ring_mem;
+ src_ring = &src->rx_agg_ring_struct;
+ src_rmem = &src_ring->ring_mem;
+
+ WARN_ON(dst_rmem->nr_pages != src_rmem->nr_pages);
+ WARN_ON(dst_rmem->page_size != src_rmem->page_size);
+ WARN_ON(dst_rmem->flags != src_rmem->flags);
+ WARN_ON(dst_rmem->depth != src_rmem->depth);
+ WARN_ON(dst_rmem->vmem_size != src_rmem->vmem_size);
+ WARN_ON(dst_rmem->ctx_mem != src_rmem->ctx_mem);
+ WARN_ON(dst->rx_agg_bmap_size != src->rx_agg_bmap_size);
+
+ dst_rmem->pg_tbl = src_rmem->pg_tbl;
+ dst_rmem->pg_tbl_map = src_rmem->pg_tbl_map;
+ *dst_rmem->vmem = *src_rmem->vmem;
+ for (i = 0; i < dst_rmem->nr_pages; i++) {
+ dst_rmem->pg_arr[i] = src_rmem->pg_arr[i];
+ dst_rmem->dma_arr[i] = src_rmem->dma_arr[i];
+ }
+
+ dst->rx_agg_bmap = src->rx_agg_bmap;
+}
+
+static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_rx_ring_info *rxr, *clone;
+ struct bnxt_cp_ring_info *cpr;
+ int rc;
+
+ rxr = &bp->rx_ring[idx];
+ clone = qmem;
+
+ rxr->rx_prod = clone->rx_prod;
+ rxr->rx_agg_prod = clone->rx_agg_prod;
+ rxr->rx_sw_agg_prod = clone->rx_sw_agg_prod;
+ rxr->rx_next_cons = clone->rx_next_cons;
+ rxr->page_pool = clone->page_pool;
+
+ bnxt_copy_rx_ring(bp, rxr, clone);
+
+ rc = bnxt_hwrm_rx_ring_alloc(bp, rxr);
+ if (rc)
+ return rc;
+ rc = bnxt_hwrm_rx_agg_ring_alloc(bp, rxr);
+ if (rc)
+ goto err_free_hwrm_rx_ring;
+
+ bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
+ if (bp->flags & BNXT_FLAG_AGG_RINGS)
+ bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
+
+ napi_enable(&rxr->bnapi->napi);
+
+ cpr = &rxr->bnapi->cp_ring;
+ cpr->sw_stats->rx.rx_resets++;
+
+ return 0;
+
+err_free_hwrm_rx_ring:
+ bnxt_hwrm_rx_ring_free(bp, rxr, false);
+ return rc;
+}
+
+static int bnxt_queue_stop(struct net_device *dev, void *qmem, int idx)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_rx_ring_info *rxr;
+
+ rxr = &bp->rx_ring[idx];
+ napi_disable(&rxr->bnapi->napi);
+ bnxt_hwrm_rx_ring_free(bp, rxr, false);
+ bnxt_hwrm_rx_agg_ring_free(bp, rxr, false);
+ rxr->rx_next_cons = 0;
+ page_pool_disable_direct_recycling(rxr->page_pool);
+
+ memcpy(qmem, rxr, sizeof(*rxr));
+ bnxt_init_rx_ring_struct(bp, qmem);
+
+ return 0;
+}
+
+static const struct netdev_queue_mgmt_ops bnxt_queue_mgmt_ops = {
+ .ndo_queue_mem_size = sizeof(struct bnxt_rx_ring_info),
+ .ndo_queue_mem_alloc = bnxt_queue_mem_alloc,
+ .ndo_queue_mem_free = bnxt_queue_mem_free,
+ .ndo_queue_start = bnxt_queue_start,
+ .ndo_queue_stop = bnxt_queue_stop,
+};
+
static void bnxt_remove_one(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
@@ -14837,8 +15214,7 @@ static void bnxt_remove_one(struct pci_dev *pdev)
bnxt_free_l2_filters(bp, true);
bnxt_free_ntp_fltrs(bp, true);
- if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp))
- bnxt_clear_rss_ctxs(bp, true);
+ WARN_ON(bp->num_rss_ctx);
clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
/* Flush any pending tasks */
cancel_work_sync(&bp->sp_task);
@@ -15285,6 +15661,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->stat_ops = &bnxt_stat_ops;
dev->watchdog_timeo = BNXT_TX_TIMEOUT;
dev->ethtool_ops = &bnxt_ethtool_ops;
+ dev->queue_mgmt_ops = &bnxt_queue_mgmt_ops;
pci_set_drvdata(pdev, dev);
rc = bnxt_alloc_hwrm_resources(bp);
@@ -15307,7 +15684,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
bp->flags |= BNXT_FLAG_CHIP_P7;
}
- rc = bnxt_alloc_rss_indir_tbl(bp, NULL);
+ rc = bnxt_alloc_rss_indir_tbl(bp);
if (rc)
goto init_err_pci_clean;
@@ -15363,6 +15740,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->priv_flags |= IFF_UNICAST_FLT;
netif_set_tso_max_size(dev, GSO_MAX_SIZE);
+ if (bp->tso_max_segs)
+ netif_set_tso_max_segs(dev, bp->tso_max_segs);
dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
NETDEV_XDP_ACT_RX_SG;
@@ -15462,8 +15841,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
INIT_LIST_HEAD(&bp->usr_fltr_list);
if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
- bnxt_init_multi_rss_ctx(bp);
-
+ bp->rss_cap |= BNXT_RSS_CAP_MULTI_RSS_CTX;
rc = register_netdev(dev);
if (rc)
@@ -15486,8 +15864,6 @@ init_err_dl:
bnxt_clear_int_mode(bp);
init_err_pci_clean:
- if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp))
- bnxt_clear_rss_ctxs(bp, true);
bnxt_hwrm_func_drv_unrgtr(bp);
bnxt_free_hwrm_resources(bp);
bnxt_hwmon_uninit(bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 656ab81c0272..6bbdc718c3a7 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -181,6 +181,32 @@ struct tx_cmp {
#define TX_CMP_SQ_CONS_IDX(txcmp) \
(le32_to_cpu((txcmp)->sq_cons_idx) & TX_CMP_SQ_CONS_IDX_MASK)
+struct tx_ts_cmp {
+ __le32 tx_ts_cmp_flags_type;
+ #define TX_TS_CMP_FLAGS_ERROR (1 << 6)
+ #define TX_TS_CMP_FLAGS_TS_TYPE (1 << 7)
+ #define TX_TS_CMP_FLAGS_TS_TYPE_PM (0 << 7)
+ #define TX_TS_CMP_FLAGS_TS_TYPE_PA (1 << 7)
+ #define TX_TS_CMP_FLAGS_TS_FALLBACK (1 << 8)
+ #define TX_TS_CMP_TS_SUB_NS (0xf << 12)
+ #define TX_TS_CMP_TS_NS_MID (0xffff << 16)
+ #define TX_TS_CMP_TS_NS_MID_SFT 16
+ u32 tx_ts_cmp_opaque;
+ __le32 tx_ts_cmp_errors_v;
+ #define TX_TS_CMP_V (1 << 0)
+ #define TX_TS_CMP_TS_INVALID_ERR (1 << 10)
+ __le32 tx_ts_cmp_ts_ns_lo;
+};
+
+#define BNXT_GET_TX_TS_48B_NS(tscmp) \
+ (le32_to_cpu((tscmp)->tx_ts_cmp_ts_ns_lo) | \
+ ((u64)(le32_to_cpu((tscmp)->tx_ts_cmp_flags_type) & \
+ TX_TS_CMP_TS_NS_MID) << TX_TS_CMP_TS_NS_MID_SFT))
+
+#define BNXT_TX_TS_ERR(tscmp) \
+ (((tscmp)->tx_ts_cmp_flags_type & cpu_to_le32(TX_TS_CMP_FLAGS_ERROR)) &&\
+ ((tscmp)->tx_ts_cmp_errors_v & cpu_to_le32(TX_TS_CMP_TS_INVALID_ERR)))
+
struct rx_cmp {
__le32 rx_cmp_len_flags_type;
#define RX_CMP_CMP_TYPE (0x3f << 0)
@@ -848,11 +874,14 @@ struct bnxt_sw_tx_bd {
DEFINE_DMA_UNMAP_ADDR(mapping);
DEFINE_DMA_UNMAP_LEN(len);
struct page *page;
- u8 is_gso;
+ u8 is_ts_pkt;
u8 is_push;
u8 action;
unsigned short nr_frags;
- u16 rx_prod;
+ union {
+ u16 rx_prod;
+ u16 txts_prod;
+ };
};
struct bnxt_sw_rx_bd {
@@ -1257,19 +1286,16 @@ struct bnxt_vnic_info {
#define BNXT_VNIC_RFS_NEW_RSS_FLAG 0x10
#define BNXT_VNIC_NTUPLE_FLAG 0x20
#define BNXT_VNIC_RSSCTX_FLAG 0x40
- struct bnxt_rss_ctx *rss_ctx;
+ struct ethtool_rxfh_context *rss_ctx;
u32 vnic_id;
};
struct bnxt_rss_ctx {
- struct list_head list;
struct bnxt_vnic_info vnic;
- u16 *rss_indir_tbl;
u8 index;
};
#define BNXT_MAX_ETH_RSS_CTX 32
-#define BNXT_RSS_CTX_BMAP_LEN (BNXT_MAX_ETH_RSS_CTX + 1)
#define BNXT_VNIC_ID_INVALID 0xffffffff
struct bnxt_hw_rings {
@@ -1434,6 +1460,57 @@ struct bnxt_l2_filter {
atomic_t refcnt;
};
+/* Compat version of hwrm_port_phy_qcfg_output capped at 96 bytes. The
+ * first 95 bytes are identical to hwrm_port_phy_qcfg_output in bnxt_hsi.h.
+ * The last valid byte in the compat version is different.
+ */
+struct hwrm_port_phy_qcfg_output_compat {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 link;
+ u8 active_fec_signal_mode;
+ __le16 link_speed;
+ u8 duplex_cfg;
+ u8 pause;
+ __le16 support_speeds;
+ __le16 force_link_speed;
+ u8 auto_mode;
+ u8 auto_pause;
+ __le16 auto_link_speed;
+ __le16 auto_link_speed_mask;
+ u8 wirespeed;
+ u8 lpbk;
+ u8 force_pause;
+ u8 module_status;
+ __le32 preemphasis;
+ u8 phy_maj;
+ u8 phy_min;
+ u8 phy_bld;
+ u8 phy_type;
+ u8 media_type;
+ u8 xcvr_pkg_type;
+ u8 eee_config_phy_addr;
+ u8 parallel_detect;
+ __le16 link_partner_adv_speeds;
+ u8 link_partner_adv_auto_mode;
+ u8 link_partner_adv_pause;
+ __le16 adv_eee_link_speed_mask;
+ __le16 link_partner_adv_eee_link_speed_mask;
+ __le32 xcvr_identifier_type_tx_lpi_timer;
+ __le16 fec_cfg;
+ u8 duplex_state;
+ u8 option_flags;
+ char phy_vendor_name[16];
+ char phy_vendor_partnumber[16];
+ __le16 support_pam4_speeds;
+ __le16 force_pam4_link_speed;
+ __le16 auto_pam4_link_speed_mask;
+ u8 link_partner_pam4_adv_speeds;
+ u8 valid;
+};
+
struct bnxt_link_info {
u8 phy_type;
u8 media_type;
@@ -2186,9 +2263,17 @@ struct bnxt {
(BNXT_CHIP_NUM_58700((bp)->chip_num) && \
!BNXT_CHIP_TYPE_NITRO_A0(bp)))
+/* Chip class phase 3.x */
+#define BNXT_CHIP_P3(bp) \
+ (BNXT_CHIP_NUM_57X0X((bp)->chip_num) || \
+ BNXT_CHIP_TYPE_NITRO_A0(bp))
+
#define BNXT_CHIP_P4_PLUS(bp) \
(BNXT_CHIP_P4(bp) || BNXT_CHIP_P5_PLUS(bp))
+#define BNXT_CHIP_P5_AND_MINUS(bp) \
+ (BNXT_CHIP_P3(bp) || BNXT_CHIP_P4(bp) || BNXT_CHIP_P5(bp))
+
struct bnxt_aux_priv *aux_priv;
struct bnxt_en_dev *edev;
@@ -2243,11 +2328,9 @@ struct bnxt {
/* grp_info indexed by completion ring index */
struct bnxt_ring_grp_info *grp_info;
struct bnxt_vnic_info *vnic_info;
- struct list_head rss_ctx_list;
- unsigned long *rss_ctx_bmap;
u32 num_rss_ctx;
int nr_vnics;
- u16 *rss_indir_tbl;
+ u32 *rss_indir_tbl;
u16 rss_indir_tbl_entries;
u32 rss_hash_cfg;
u32 rss_hash_delta;
@@ -2267,6 +2350,7 @@ struct bnxt {
u8 rss_hash_key_updated:1;
u16 max_mtu;
+ u16 tso_max_segs;
u8 max_tc;
u8 max_lltc; /* lossless TCs */
struct bnxt_queue_info q_info[BNXT_MAX_QUEUE];
@@ -2332,6 +2416,7 @@ struct bnxt {
#define BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2 BIT_ULL(16)
#define BNXT_FW_CAP_PCIE_STATS_SUPPORTED BIT_ULL(17)
#define BNXT_FW_CAP_EXT_STATS_SUPPORTED BIT_ULL(18)
+ #define BNXT_FW_CAP_TX_TS_CMP BIT_ULL(19)
#define BNXT_FW_CAP_ERR_RECOVER_RELOAD BIT_ULL(20)
#define BNXT_FW_CAP_HOT_RESET BIT_ULL(21)
#define BNXT_FW_CAP_PTP_RTC BIT_ULL(22)
@@ -2722,8 +2807,8 @@ int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, struct bnxt_vnic_info *vnic,
u32 tpa_flags);
void bnxt_fill_ipv6_mask(__be32 mask[4]);
-int bnxt_alloc_rss_indir_tbl(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx);
-void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx);
+void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp,
+ struct ethtool_rxfh_context *rss_ctx);
int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings);
int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic);
int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic,
@@ -2757,8 +2842,7 @@ int bnxt_hwrm_vnic_rss_cfg_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic);
int __bnxt_setup_vnic_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic);
void bnxt_del_one_rss_ctx(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx,
bool all);
-struct bnxt_rss_ctx *bnxt_alloc_rss_ctx(struct bnxt *bp);
-void bnxt_clear_rss_ctxs(struct bnxt *bp, bool all);
+void bnxt_clear_rss_ctxs(struct bnxt *bp);
int bnxt_open_nic(struct bnxt *, bool, bool);
int bnxt_half_open_nic(struct bnxt *bp);
void bnxt_half_close_nic(struct bnxt *bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 8763f8a01457..d00ef0063820 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -970,7 +970,7 @@ static int bnxt_set_channels(struct net_device *dev,
bnxt_clear_usr_fltrs(bp, true);
if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp))
- bnxt_clear_rss_ctxs(bp, false);
+ bnxt_clear_rss_ctxs(bp);
if (netif_running(dev)) {
if (BNXT_PF(bp)) {
/* TODO CHIMP_FW: Send message to all VF's
@@ -1210,19 +1210,18 @@ fltr_err:
static struct bnxt_rss_ctx *bnxt_get_rss_ctx_from_index(struct bnxt *bp,
u32 index)
{
- struct bnxt_rss_ctx *rss_ctx, *tmp;
+ struct ethtool_rxfh_context *ctx;
- list_for_each_entry_safe(rss_ctx, tmp, &bp->rss_ctx_list, list)
- if (rss_ctx->index == index)
- return rss_ctx;
- return NULL;
+ ctx = xa_load(&bp->dev->ethtool->rss_ctx, index);
+ if (!ctx)
+ return NULL;
+ return ethtool_rxfh_context_priv(ctx);
}
-static int bnxt_alloc_rss_ctx_rss_table(struct bnxt *bp,
- struct bnxt_rss_ctx *rss_ctx)
+static int bnxt_alloc_vnic_rss_table(struct bnxt *bp,
+ struct bnxt_vnic_info *vnic)
{
int size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5);
- struct bnxt_vnic_info *vnic = &rss_ctx->vnic;
vnic->rss_table_size = size + HW_HASH_KEY_SIZE;
vnic->rss_table = dma_alloc_coherent(&bp->pdev->dev,
@@ -1801,10 +1800,9 @@ static u32 bnxt_get_rxfh_key_size(struct net_device *dev)
static int bnxt_get_rxfh(struct net_device *dev,
struct ethtool_rxfh_param *rxfh)
{
- u32 rss_context = rxfh->rss_context;
struct bnxt_rss_ctx *rss_ctx = NULL;
struct bnxt *bp = netdev_priv(dev);
- u16 *indir_tbl = bp->rss_indir_tbl;
+ u32 *indir_tbl = bp->rss_indir_tbl;
struct bnxt_vnic_info *vnic;
u32 i, tbl_size;
@@ -1815,10 +1813,13 @@ static int bnxt_get_rxfh(struct net_device *dev,
vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
if (rxfh->rss_context) {
- rss_ctx = bnxt_get_rss_ctx_from_index(bp, rss_context);
- if (!rss_ctx)
+ struct ethtool_rxfh_context *ctx;
+
+ ctx = xa_load(&bp->dev->ethtool->rss_ctx, rxfh->rss_context);
+ if (!ctx)
return -EINVAL;
- indir_tbl = rss_ctx->rss_indir_tbl;
+ indir_tbl = ethtool_rxfh_context_indir(ctx);
+ rss_ctx = ethtool_rxfh_context_priv(ctx);
vnic = &rss_ctx->vnic;
}
@@ -1834,8 +1835,9 @@ static int bnxt_get_rxfh(struct net_device *dev,
return 0;
}
-static void bnxt_modify_rss(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx,
- struct ethtool_rxfh_param *rxfh)
+static void bnxt_modify_rss(struct bnxt *bp, struct ethtool_rxfh_context *ctx,
+ struct bnxt_rss_ctx *rss_ctx,
+ const struct ethtool_rxfh_param *rxfh)
{
if (rxfh->key) {
if (rss_ctx) {
@@ -1848,29 +1850,21 @@ static void bnxt_modify_rss(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx,
}
if (rxfh->indir) {
u32 i, pad, tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
- u16 *indir_tbl = bp->rss_indir_tbl;
+ u32 *indir_tbl = bp->rss_indir_tbl;
if (rss_ctx)
- indir_tbl = rss_ctx->rss_indir_tbl;
+ indir_tbl = ethtool_rxfh_context_indir(ctx);
for (i = 0; i < tbl_size; i++)
indir_tbl[i] = rxfh->indir[i];
pad = bp->rss_indir_tbl_entries - tbl_size;
if (pad)
- memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16));
+ memset(&indir_tbl[i], 0, pad * sizeof(*indir_tbl));
}
}
-static int bnxt_set_rxfh_context(struct bnxt *bp,
- struct ethtool_rxfh_param *rxfh,
- struct netlink_ext_ack *extack)
+static int bnxt_rxfh_context_check(struct bnxt *bp,
+ struct netlink_ext_ack *extack)
{
- u32 *rss_context = &rxfh->rss_context;
- struct bnxt_rss_ctx *rss_ctx;
- struct bnxt_vnic_info *vnic;
- bool modify = false;
- int bit_id;
- int rc;
-
if (!BNXT_SUPPORTS_MULTI_RSS_CTX(bp)) {
NL_SET_ERR_MSG_MOD(extack, "RSS contexts not supported");
return -EOPNOTSUPP;
@@ -1881,21 +1875,22 @@ static int bnxt_set_rxfh_context(struct bnxt *bp,
return -EAGAIN;
}
- if (*rss_context != ETH_RXFH_CONTEXT_ALLOC) {
- rss_ctx = bnxt_get_rss_ctx_from_index(bp, *rss_context);
- if (!rss_ctx) {
- NL_SET_ERR_MSG_FMT_MOD(extack, "RSS context %u not found",
- *rss_context);
- return -EINVAL;
- }
- if (*rss_context && rxfh->rss_delete) {
- bnxt_del_one_rss_ctx(bp, rss_ctx, true);
- return 0;
- }
- modify = true;
- vnic = &rss_ctx->vnic;
- goto modify_context;
- }
+ return 0;
+}
+
+static int bnxt_create_rxfh_context(struct net_device *dev,
+ struct ethtool_rxfh_context *ctx,
+ const struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_rss_ctx *rss_ctx;
+ struct bnxt_vnic_info *vnic;
+ int rc;
+
+ rc = bnxt_rxfh_context_check(bp, extack);
+ if (rc)
+ return rc;
if (bp->num_rss_ctx >= BNXT_MAX_ETH_RSS_CTX) {
NL_SET_ERR_MSG_FMT_MOD(extack, "Out of RSS contexts, maximum %u",
@@ -1908,22 +1903,19 @@ static int bnxt_set_rxfh_context(struct bnxt *bp,
return -ENOMEM;
}
- rss_ctx = bnxt_alloc_rss_ctx(bp);
- if (!rss_ctx)
- return -ENOMEM;
+ rss_ctx = ethtool_rxfh_context_priv(ctx);
+
+ bp->num_rss_ctx++;
vnic = &rss_ctx->vnic;
+ vnic->rss_ctx = ctx;
vnic->flags |= BNXT_VNIC_RSSCTX_FLAG;
vnic->vnic_id = BNXT_VNIC_ID_INVALID;
- rc = bnxt_alloc_rss_ctx_rss_table(bp, rss_ctx);
- if (rc)
- goto out;
-
- rc = bnxt_alloc_rss_indir_tbl(bp, rss_ctx);
+ rc = bnxt_alloc_vnic_rss_table(bp, vnic);
if (rc)
goto out;
- bnxt_set_dflt_rss_indir_tbl(bp, rss_ctx);
+ bnxt_set_dflt_rss_indir_tbl(bp, ctx);
memcpy(vnic->rss_hash_key, bp->rss_hash_key, HW_HASH_KEY_SIZE);
rc = bnxt_hwrm_vnic_alloc(bp, vnic, 0, bp->rx_nr_rings);
@@ -1937,11 +1929,7 @@ static int bnxt_set_rxfh_context(struct bnxt *bp,
NL_SET_ERR_MSG_MOD(extack, "Unable to setup TPA");
goto out;
}
-modify_context:
- bnxt_modify_rss(bp, rss_ctx, rxfh);
-
- if (modify)
- return bnxt_hwrm_vnic_rss_cfg_p5(bp, vnic);
+ bnxt_modify_rss(bp, ctx, rss_ctx, rxfh);
rc = __bnxt_setup_vnic_p5(bp, vnic);
if (rc) {
@@ -1949,21 +1937,47 @@ modify_context:
goto out;
}
- bit_id = bitmap_find_free_region(bp->rss_ctx_bmap,
- BNXT_RSS_CTX_BMAP_LEN, 0);
- if (bit_id < 0) {
- rc = -ENOMEM;
- goto out;
- }
- rss_ctx->index = (u16)bit_id;
- *rss_context = rss_ctx->index;
-
+ rss_ctx->index = rxfh->rss_context;
return 0;
out:
bnxt_del_one_rss_ctx(bp, rss_ctx, true);
return rc;
}
+static int bnxt_modify_rxfh_context(struct net_device *dev,
+ struct ethtool_rxfh_context *ctx,
+ const struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_rss_ctx *rss_ctx;
+ int rc;
+
+ rc = bnxt_rxfh_context_check(bp, extack);
+ if (rc)
+ return rc;
+
+ rss_ctx = ethtool_rxfh_context_priv(ctx);
+
+ bnxt_modify_rss(bp, ctx, rss_ctx, rxfh);
+
+ return bnxt_hwrm_vnic_rss_cfg_p5(bp, &rss_ctx->vnic);
+}
+
+static int bnxt_remove_rxfh_context(struct net_device *dev,
+ struct ethtool_rxfh_context *ctx,
+ u32 rss_context,
+ struct netlink_ext_ack *extack)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_rss_ctx *rss_ctx;
+
+ rss_ctx = ethtool_rxfh_context_priv(ctx);
+
+ bnxt_del_one_rss_ctx(bp, rss_ctx, true);
+ return 0;
+}
+
static int bnxt_set_rxfh(struct net_device *dev,
struct ethtool_rxfh_param *rxfh,
struct netlink_ext_ack *extack)
@@ -1974,10 +1988,7 @@ static int bnxt_set_rxfh(struct net_device *dev,
if (rxfh->hfunc && rxfh->hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
- if (rxfh->rss_context)
- return bnxt_set_rxfh_context(bp, rxfh, extack);
-
- bnxt_modify_rss(bp, NULL, rxfh);
+ bnxt_modify_rss(bp, NULL, NULL, rxfh);
bnxt_clear_usr_fltrs(bp, false);
if (netif_running(bp->dev)) {
@@ -5013,7 +5024,7 @@ static int bnxt_get_dump_data(struct net_device *dev, struct ethtool_dump *dump,
}
static int bnxt_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct bnxt *bp = netdev_priv(dev);
struct bnxt_ptp_cfg *ptp;
@@ -5233,6 +5244,19 @@ static void bnxt_get_rmon_stats(struct net_device *dev,
*ranges = bnxt_rmon_ranges;
}
+static void bnxt_get_ptp_stats(struct net_device *dev,
+ struct ethtool_ts_stats *ts_stats)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+
+ if (ptp) {
+ ts_stats->pkts = ptp->stats.ts_pkts;
+ ts_stats->lost = ptp->stats.ts_lost;
+ ts_stats->err = atomic64_read(&ptp->stats.ts_err);
+ }
+}
+
static void bnxt_get_link_ext_stats(struct net_device *dev,
struct ethtool_link_ext_stats *stats)
{
@@ -5256,6 +5280,9 @@ void bnxt_ethtool_free(struct bnxt *bp)
const struct ethtool_ops bnxt_ethtool_ops = {
.cap_link_lanes_supported = 1,
.cap_rss_ctx_supported = 1,
+ .rxfh_max_context_id = BNXT_MAX_ETH_RSS_CTX,
+ .rxfh_indir_space = BNXT_MAX_RSS_TABLE_ENTRIES_P5,
+ .rxfh_priv_size = sizeof(struct bnxt_rss_ctx),
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES |
ETHTOOL_COALESCE_USECS_IRQ |
@@ -5293,6 +5320,9 @@ const struct ethtool_ops bnxt_ethtool_ops = {
.get_rxfh_key_size = bnxt_get_rxfh_key_size,
.get_rxfh = bnxt_get_rxfh,
.set_rxfh = bnxt_set_rxfh,
+ .create_rxfh_context = bnxt_create_rxfh_context,
+ .modify_rxfh_context = bnxt_modify_rxfh_context,
+ .remove_rxfh_context = bnxt_remove_rxfh_context,
.flash_device = bnxt_flash_device,
.get_eeprom_len = bnxt_get_eeprom_len,
.get_eeprom = bnxt_get_eeprom,
@@ -5316,4 +5346,5 @@ const struct ethtool_ops bnxt_ethtool_ops = {
.get_eth_mac_stats = bnxt_get_eth_mac_stats,
.get_eth_ctrl_stats = bnxt_get_eth_ctrl_stats,
.get_rmon_stats = bnxt_get_rmon_stats,
+ .get_ts_stats = bnxt_get_ptp_stats,
};
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index 06ea86c80be1..f219709f9563 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -2,7 +2,7 @@
*
* Copyright (c) 2014-2016 Broadcom Corporation
* Copyright (c) 2014-2018 Broadcom Limited
- * Copyright (c) 2018-2023 Broadcom Inc.
+ * Copyright (c) 2018-2024 Broadcom Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -500,7 +500,11 @@ struct cmd_nums {
#define HWRM_TFC_IF_TBL_GET 0x399UL
#define HWRM_TFC_TBL_SCOPE_CONFIG_GET 0x39aUL
#define HWRM_TFC_RESC_USAGE_QUERY 0x39bUL
+ #define HWRM_QUEUE_PFCWD_TIMEOUT_QCAPS 0x39cUL
+ #define HWRM_QUEUE_PFCWD_TIMEOUT_CFG 0x39dUL
+ #define HWRM_QUEUE_PFCWD_TIMEOUT_QCFG 0x39eUL
#define HWRM_SV 0x400UL
+ #define HWRM_DBG_LOG_BUFFER_FLUSH 0xff0fUL
#define HWRM_DBG_READ_DIRECT 0xff10UL
#define HWRM_DBG_READ_INDIRECT 0xff11UL
#define HWRM_DBG_WRITE_DIRECT 0xff12UL
@@ -609,8 +613,8 @@ struct hwrm_err_output {
#define HWRM_VERSION_MAJOR 1
#define HWRM_VERSION_MINOR 10
#define HWRM_VERSION_UPDATE 3
-#define HWRM_VERSION_RSVD 39
-#define HWRM_VERSION_STR "1.10.3.39"
+#define HWRM_VERSION_RSVD 44
+#define HWRM_VERSION_STR "1.10.3.44"
/* hwrm_ver_get_input (size:192b/24B) */
struct hwrm_ver_get_input {
@@ -664,6 +668,7 @@ struct hwrm_ver_get_output {
#define VER_GET_RESP_DEV_CAPS_CFG_CFA_TFLIB_SUPPORTED 0x2000UL
#define VER_GET_RESP_DEV_CAPS_CFG_CFA_TRUFLOW_SUPPORTED 0x4000UL
#define VER_GET_RESP_DEV_CAPS_CFG_SECURE_BOOT_CAPABLE 0x8000UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_SECURE_SOC_CAPABLE 0x10000UL
u8 roce_fw_maj_8b;
u8 roce_fw_min_8b;
u8 roce_fw_bld_8b;
@@ -843,7 +848,9 @@ struct hwrm_async_event_cmpl {
#define ASYNC_EVENT_CMPL_EVENT_ID_HW_DOORBELL_RECOVERY_READ_ERROR 0x49UL
#define ASYNC_EVENT_CMPL_EVENT_ID_CTX_ERROR 0x4aUL
#define ASYNC_EVENT_CMPL_EVENT_ID_UDCC_SESSION_CHANGE 0x4bUL
- #define ASYNC_EVENT_CMPL_EVENT_ID_MAX_RGTR_EVENT_ID 0x4cUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_DBG_BUF_PRODUCER 0x4cUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PEER_MMAP_CHANGE 0x4dUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_MAX_RGTR_EVENT_ID 0x4eUL
#define ASYNC_EVENT_CMPL_EVENT_ID_FW_TRACE_MSG 0xfeUL
#define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL
#define ASYNC_EVENT_CMPL_EVENT_ID_LAST ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR
@@ -1326,13 +1333,13 @@ struct hwrm_async_event_cmpl_error_report_base {
u8 timestamp_lo;
__le16 timestamp_hi;
__le32 event_data1;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_RESERVED 0x0UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM 0x1UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL 0x2UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_NVM 0x3UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD 0x4UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_RESERVED 0x0UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL 0x2UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_NVM 0x3UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD 0x4UL
#define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_THERMAL_THRESHOLD 0x5UL
#define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED 0x6UL
#define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED
@@ -1814,6 +1821,9 @@ struct hwrm_func_qcaps_output {
#define FUNC_QCAPS_RESP_FLAGS_EXT2_SW_MAX_RESOURCE_LIMITS_SUPPORTED 0x800000UL
#define FUNC_QCAPS_RESP_FLAGS_EXT2_TF_INGRESS_NIC_FLOW_SUPPORTED 0x1000000UL
#define FUNC_QCAPS_RESP_FLAGS_EXT2_LPBK_STATS_SUPPORTED 0x2000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_TF_EGRESS_NIC_FLOW_SUPPORTED 0x4000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_MULTI_LOSSLESS_QUEUES_SUPPORTED 0x8000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_PEER_MMAP_SUPPORTED 0x10000000UL
__le16 tunnel_disable_flag;
#define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_VXLAN 0x1UL
#define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_NGE 0x2UL
@@ -1828,7 +1838,7 @@ struct hwrm_func_qcaps_output {
#define FUNC_QCAPS_RESP_XID_PARTITION_CAP_RX_CK 0x2UL
u8 device_serial_number[8];
__le16 ctxs_per_partition;
- u8 unused_2[2];
+ __le16 max_tso_segs;
__le32 roce_vf_max_av;
__le32 roce_vf_max_cq;
__le32 roce_vf_max_mrw;
@@ -2449,6 +2459,7 @@ struct hwrm_func_drv_rgtr_input {
#define FUNC_DRV_RGTR_REQ_FLAGS_NPAR_1_2_SUPPORT 0x200UL
#define FUNC_DRV_RGTR_REQ_FLAGS_ASYM_QUEUE_CFG_SUPPORT 0x400UL
#define FUNC_DRV_RGTR_REQ_FLAGS_TF_INGRESS_NIC_FLOW_MODE 0x800UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_TF_EGRESS_NIC_FLOW_MODE 0x1000UL
__le32 enables;
#define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE 0x1UL
#define FUNC_DRV_RGTR_REQ_ENABLES_VER 0x2UL
@@ -3660,22 +3671,24 @@ struct hwrm_func_backing_store_cfg_v2_input {
__le16 target_id;
__le64 resp_addr;
__le16 type;
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_QP 0x0UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRQ 0x1UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CQ 0x2UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_VNIC 0x3UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_STAT 0x4UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SP_TQM_RING 0x5UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_FP_TQM_RING 0x6UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MRAV 0xeUL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TIM 0xfUL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MP_TQM_RING 0x15UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TBL_SCOPE 0x1cUL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_XID_PARTITION 0x1dUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TX_CK 0x13UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RX_CK 0x14UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TBL_SCOPE 0x1cUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_XID_PARTITION 0x1dUL
#define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRT_TRACE 0x1eUL
#define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRT2_TRACE 0x1fUL
#define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CRT_TRACE 0x20UL
@@ -3772,18 +3785,20 @@ struct hwrm_func_backing_store_qcfg_v2_output {
__le16 seq_id;
__le16 resp_len;
__le16 type;
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_QP 0x0UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRQ 0x1UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CQ 0x2UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_VNIC 0x3UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_STAT 0x4UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SP_TQM_RING 0x5UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_FP_TQM_RING 0x6UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MRAV 0xeUL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TIM 0xfUL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MP_TQM_RING 0x15UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TBL_SCOPE 0x1cUL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_XID_PARTITION 0x1dUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TX_CK 0x13UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_RX_CK 0x14UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TBL_SCOPE 0x1cUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_XID_PARTITION 0x1dUL
#define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRT_TRACE 0x1eUL
#define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRT2_TRACE 0x1fUL
#define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CRT_TRACE 0x20UL
@@ -3876,22 +3891,24 @@ struct hwrm_func_backing_store_qcaps_v2_input {
__le16 target_id;
__le64 resp_addr;
__le16 type;
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QP 0x0UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ 0x1UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ 0x2UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_VNIC 0x3UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_STAT 0x4UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SP_TQM_RING 0x5UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_FP_TQM_RING 0x6UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MRAV 0xeUL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TIM 0xfUL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MP_TQM_RING 0x15UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TBL_SCOPE 0x1cUL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_XID_PARTITION 0x1dUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TX_CK 0x13UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RX_CK 0x14UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TBL_SCOPE 0x1cUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_XID_PARTITION 0x1dUL
#define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRT_TRACE 0x1eUL
#define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRT2_TRACE 0x1fUL
#define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CRT_TRACE 0x20UL
@@ -3911,22 +3928,24 @@ struct hwrm_func_backing_store_qcaps_v2_output {
__le16 seq_id;
__le16 resp_len;
__le16 type;
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_QP 0x0UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRQ 0x1UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CQ 0x2UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_VNIC 0x3UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_STAT 0x4UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SP_TQM_RING 0x5UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_FP_TQM_RING 0x6UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MRAV 0xeUL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TIM 0xfUL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MP_TQM_RING 0x15UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SQ_DB_SHADOW 0x16UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RQ_DB_SHADOW 0x17UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRQ_DB_SHADOW 0x18UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CQ_DB_SHADOW 0x19UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TBL_SCOPE 0x1cUL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_XID_PARTITION 0x1dUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TX_CK 0x13UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RX_CK 0x14UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SQ_DB_SHADOW 0x16UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RQ_DB_SHADOW 0x17UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRQ_DB_SHADOW 0x18UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CQ_DB_SHADOW 0x19UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TBL_SCOPE 0x1cUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_XID_PARTITION 0x1dUL
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRT_TRACE 0x1eUL
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRT2_TRACE 0x1fUL
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CRT_TRACE 0x20UL
@@ -4202,7 +4221,8 @@ struct hwrm_port_phy_cfg_input {
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_100GB_PAM4_112 0x3eaUL
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_200GB_PAM4_112 0x7d2UL
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_400GB_PAM4_112 0xfa2UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_LAST PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_400GB_PAM4_112
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_800GB_PAM4_112 0x1f42UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_LAST PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_800GB_PAM4_112
__le16 auto_link_speeds2_mask;
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_1GB 0x1UL
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_10GB 0x2UL
@@ -4217,6 +4237,7 @@ struct hwrm_port_phy_cfg_input {
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_100GB_PAM4_112 0x400UL
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_200GB_PAM4_112 0x800UL
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_400GB_PAM4_112 0x1000UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_800GB_PAM4_112 0x2000UL
u8 unused_2[6];
};
@@ -4292,6 +4313,7 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_LINK_SPEED_100GB 0x3e8UL
#define PORT_PHY_QCFG_RESP_LINK_SPEED_200GB 0x7d0UL
#define PORT_PHY_QCFG_RESP_LINK_SPEED_400GB 0xfa0UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_800GB 0x1f40UL
#define PORT_PHY_QCFG_RESP_LINK_SPEED_10MB 0xffffUL
#define PORT_PHY_QCFG_RESP_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_LINK_SPEED_10MB
u8 duplex_cfg;
@@ -4451,7 +4473,13 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASESR4 0x35UL
#define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASELR4 0x36UL
#define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASEER4 0x37UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_LAST PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASEER4
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASECR8 0x38UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASESR8 0x39UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASELR8 0x3aUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASEER8 0x3bUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASEFR8 0x3cUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASEDR8 0x3dUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_LAST PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASEDR8
u8 media_type;
#define PORT_PHY_QCFG_RESP_MEDIA_TYPE_UNKNOWN 0x0UL
#define PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP 0x1UL
@@ -5049,33 +5077,43 @@ struct hwrm_port_qstats_ext_output {
u8 valid;
};
-/* hwrm_port_lpbk_qstats_input (size:128b/16B) */
+/* hwrm_port_lpbk_qstats_input (size:256b/32B) */
struct hwrm_port_lpbk_qstats_input {
__le16 req_type;
__le16 cmpl_ring;
__le16 seq_id;
__le16 target_id;
__le64 resp_addr;
+ __le16 lpbk_stat_size;
+ u8 flags;
+ #define PORT_LPBK_QSTATS_REQ_FLAGS_COUNTER_MASK 0x1UL
+ u8 unused_0[5];
+ __le64 lpbk_stat_host_addr;
};
-/* hwrm_port_lpbk_qstats_output (size:768b/96B) */
+/* hwrm_port_lpbk_qstats_output (size:128b/16B) */
struct hwrm_port_lpbk_qstats_output {
__le16 error_code;
__le16 req_type;
__le16 seq_id;
__le16 resp_len;
+ __le16 lpbk_stat_size;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* port_lpbk_stats (size:640b/80B) */
+struct port_lpbk_stats {
__le64 lpbk_ucast_frames;
__le64 lpbk_mcast_frames;
__le64 lpbk_bcast_frames;
__le64 lpbk_ucast_bytes;
__le64 lpbk_mcast_bytes;
__le64 lpbk_bcast_bytes;
- __le64 tx_stat_discard;
- __le64 tx_stat_error;
- __le64 rx_stat_discard;
- __le64 rx_stat_error;
- u8 unused_0[7];
- u8 valid;
+ __le64 lpbk_tx_discards;
+ __le64 lpbk_tx_errors;
+ __le64 lpbk_rx_discards;
+ __le64 lpbk_rx_errors;
};
/* hwrm_port_ecn_qstats_input (size:256b/32B) */
@@ -5140,13 +5178,15 @@ struct hwrm_port_clr_stats_output {
u8 valid;
};
-/* hwrm_port_lpbk_clr_stats_input (size:128b/16B) */
+/* hwrm_port_lpbk_clr_stats_input (size:192b/24B) */
struct hwrm_port_lpbk_clr_stats_input {
__le16 req_type;
__le16 cmpl_ring;
__le16 seq_id;
__le16 target_id;
__le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
};
/* hwrm_port_lpbk_clr_stats_output (size:128b/16B) */
@@ -5287,10 +5327,11 @@ struct hwrm_port_phy_qcaps_output {
#define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_100G 0x2UL
#define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_200G 0x4UL
__le16 flags2;
- #define PORT_PHY_QCAPS_RESP_FLAGS2_PAUSE_UNSUPPORTED 0x1UL
- #define PORT_PHY_QCAPS_RESP_FLAGS2_PFC_UNSUPPORTED 0x2UL
- #define PORT_PHY_QCAPS_RESP_FLAGS2_BANK_ADDR_SUPPORTED 0x4UL
- #define PORT_PHY_QCAPS_RESP_FLAGS2_SPEEDS2_SUPPORTED 0x8UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS2_PAUSE_UNSUPPORTED 0x1UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS2_PFC_UNSUPPORTED 0x2UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS2_BANK_ADDR_SUPPORTED 0x4UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS2_SPEEDS2_SUPPORTED 0x8UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS2_REMOTE_LPBK_UNSUPPORTED 0x10UL
u8 internal_port_cnt;
u8 unused_0;
__le16 supported_speeds2_force_mode;
@@ -7443,17 +7484,17 @@ struct hwrm_cfa_l2_filter_cfg_input {
__le16 target_id;
__le64 resp_addr;
__le32 flags;
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH 0x1UL
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_TX 0x0UL
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX 0x1UL
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_DROP 0x2UL
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_MASK 0xcUL
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_SFT 2
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_NO_ROCE_L2 (0x0UL << 2)
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_L2 (0x1UL << 2)
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_ROCE (0x2UL << 2)
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_ROCE
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH 0x1UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_TX 0x0UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX 0x1UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_DROP 0x2UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_MASK 0xcUL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_SFT 2
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_NO_ROCE_L2 (0x0UL << 2)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_L2 (0x1UL << 2)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_ROCE (0x2UL << 2)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_ROCE
#define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_MASK 0x30UL
#define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_SFT 4
#define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_NO_UPDATE (0x0UL << 4)
@@ -8520,17 +8561,17 @@ struct hwrm_tunnel_dst_port_query_input {
__le16 target_id;
__le64 resp_addr;
u8 tunnel_type;
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN 0x1UL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GENEVE 0x5UL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ECPRI 0xeUL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_SRV6 0xfUL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GRE 0x11UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ECPRI 0xeUL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_SRV6 0xfUL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GRE 0x11UL
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR 0x12UL
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES01 0x13UL
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES02 0x14UL
@@ -8576,17 +8617,17 @@ struct hwrm_tunnel_dst_port_alloc_input {
__le16 target_id;
__le64 resp_addr;
u8 tunnel_type;
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ECPRI 0xeUL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_SRV6 0xfUL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GRE 0x11UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ECPRI 0xeUL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_SRV6 0xfUL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GRE 0x11UL
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR 0x12UL
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES01 0x13UL
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES02 0x14UL
@@ -8635,17 +8676,17 @@ struct hwrm_tunnel_dst_port_free_input {
__le16 target_id;
__le64 resp_addr;
u8 tunnel_type;
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN 0x1UL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE 0x5UL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ECPRI 0xeUL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_SRV6 0xfUL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GRE 0x11UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ECPRI 0xeUL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_SRV6 0xfUL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GRE 0x11UL
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR 0x12UL
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES01 0x13UL
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES02 0x14UL
@@ -9109,6 +9150,7 @@ struct hwrm_struct_hdr {
#define STRUCT_HDR_STRUCT_ID_LLDP_GENERIC 0x424UL
#define STRUCT_HDR_STRUCT_ID_LLDP_DEVICE 0x426UL
#define STRUCT_HDR_STRUCT_ID_POWER_BKUP 0x427UL
+ #define STRUCT_HDR_STRUCT_ID_PEER_MMAP 0x429UL
#define STRUCT_HDR_STRUCT_ID_AFM_OPAQUE 0x1UL
#define STRUCT_HDR_STRUCT_ID_PORT_DESCRIPTION 0xaUL
#define STRUCT_HDR_STRUCT_ID_RSS_V2 0x64UL
@@ -9758,6 +9800,9 @@ struct hwrm_dbg_coredump_initiate_input {
__le16 instance;
__le16 unused_0;
u8 seg_flags;
+ #define DBG_COREDUMP_INITIATE_REQ_SEG_FLAGS_LIVE_DATA 0x1UL
+ #define DBG_COREDUMP_INITIATE_REQ_SEG_FLAGS_CRASH_DATA 0x2UL
+ #define DBG_COREDUMP_INITIATE_REQ_SEG_FLAGS_COLLECT_CTX_L1_CACHE 0x4UL
u8 unused_1[7];
};
@@ -10433,13 +10478,13 @@ struct hwrm_selftest_irq_output {
/* dbc_dbc (size:64b/8B) */
struct dbc_dbc {
- u32 index;
+ __le32 index;
#define DBC_DBC_INDEX_MASK 0xffffffUL
#define DBC_DBC_INDEX_SFT 0
#define DBC_DBC_EPOCH 0x1000000UL
#define DBC_DBC_TOGGLE_MASK 0x6000000UL
#define DBC_DBC_TOGGLE_SFT 25
- u32 type_path_xid;
+ __le32 type_path_xid;
#define DBC_DBC_XID_MASK 0xfffffUL
#define DBC_DBC_XID_SFT 0
#define DBC_DBC_PATH_MASK 0x3000000UL
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c
index 1df3d56cc4b5..d2fd2d04ed47 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c
@@ -680,7 +680,7 @@ static int __hwrm_send(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx)
req_type);
else if (rc && rc != HWRM_ERR_CODE_PF_UNAVAILABLE)
hwrm_err(bp, ctx, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
- req_type, token->seq_id, rc);
+ req_type, le16_to_cpu(ctx->req->seq_id), rc);
rc = __hwrm_to_stderr(rc);
exit:
if (token)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
index e661ab154d6b..37d42423459c 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
@@ -110,7 +110,7 @@ static void bnxt_ptp_get_current_time(struct bnxt *bp)
}
static int bnxt_hwrm_port_ts_query(struct bnxt *bp, u32 flags, u64 *ts,
- u32 txts_tmo)
+ u32 txts_tmo, int slot)
{
struct hwrm_port_ts_query_output *resp;
struct hwrm_port_ts_query_input *req;
@@ -123,11 +123,12 @@ static int bnxt_hwrm_port_ts_query(struct bnxt *bp, u32 flags, u64 *ts,
req->flags = cpu_to_le32(flags);
if ((flags & PORT_TS_QUERY_REQ_FLAGS_PATH) ==
PORT_TS_QUERY_REQ_FLAGS_PATH_TX) {
+ struct bnxt_ptp_tx_req *txts_req = &bp->ptp_cfg->txts_req[slot];
u32 tmo_us = txts_tmo * 1000;
req->enables = cpu_to_le16(BNXT_PTP_QTS_TX_ENABLES);
- req->ptp_seq_id = cpu_to_le32(bp->ptp_cfg->tx_seqid);
- req->ptp_hdr_offset = cpu_to_le16(bp->ptp_cfg->tx_hdr_off);
+ req->ptp_seq_id = cpu_to_le32(txts_req->tx_seqid);
+ req->ptp_hdr_offset = cpu_to_le16(txts_req->tx_hdr_off);
if (!tmo_us)
tmo_us = BNXT_PTP_QTS_TIMEOUT;
tmo_us = min(tmo_us, BNXT_PTP_QTS_MAX_TMO_US);
@@ -656,6 +657,14 @@ static int bnxt_map_ptp_regs(struct bnxt *bp)
(ptp->refclk_regs[i] & BNXT_GRC_OFFSET_MASK);
return 0;
}
+ if (bp->flags & BNXT_FLAG_CHIP_P7) {
+ for (i = 0; i < 2; i++) {
+ if (reg_arr[i] & BNXT_GRC_BASE_MASK)
+ return -EINVAL;
+ ptp->refclk_mapped_regs[i] = reg_arr[i];
+ }
+ return 0;
+ }
return -ENODEV;
}
@@ -674,41 +683,44 @@ static u64 bnxt_cc_read(const struct cyclecounter *cc)
return ns;
}
-static void bnxt_stamp_tx_skb(struct bnxt *bp, struct sk_buff *skb)
+static int bnxt_stamp_tx_skb(struct bnxt *bp, int slot)
{
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
struct skb_shared_hwtstamps timestamp;
+ struct bnxt_ptp_tx_req *txts_req;
unsigned long now = jiffies;
u64 ts = 0, ns = 0;
u32 tmo = 0;
int rc;
- if (!ptp->txts_pending)
- ptp->abs_txts_tmo = now + msecs_to_jiffies(ptp->txts_tmo);
- if (!time_after_eq(now, ptp->abs_txts_tmo))
- tmo = jiffies_to_msecs(ptp->abs_txts_tmo - now);
+ txts_req = &ptp->txts_req[slot];
+ /* make sure bnxt_get_tx_ts_p5() has updated abs_txts_tmo */
+ smp_rmb();
+ if (!time_after_eq(now, txts_req->abs_txts_tmo))
+ tmo = jiffies_to_msecs(txts_req->abs_txts_tmo - now);
rc = bnxt_hwrm_port_ts_query(bp, PORT_TS_QUERY_REQ_FLAGS_PATH_TX, &ts,
- tmo);
+ tmo, slot);
if (!rc) {
memset(&timestamp, 0, sizeof(timestamp));
spin_lock_bh(&ptp->ptp_lock);
ns = timecounter_cyc2time(&ptp->tc, ts);
spin_unlock_bh(&ptp->ptp_lock);
timestamp.hwtstamp = ns_to_ktime(ns);
- skb_tstamp_tx(ptp->tx_skb, &timestamp);
+ skb_tstamp_tx(txts_req->tx_skb, &timestamp);
+ ptp->stats.ts_pkts++;
} else {
- if (!time_after_eq(jiffies, ptp->abs_txts_tmo)) {
- ptp->txts_pending = true;
- return;
- }
+ if (!time_after_eq(jiffies, txts_req->abs_txts_tmo))
+ return -EAGAIN;
+
+ ptp->stats.ts_lost++;
netdev_warn_once(bp->dev,
"TS query for TX timer failed rc = %x\n", rc);
}
- dev_kfree_skb_any(ptp->tx_skb);
- ptp->tx_skb = NULL;
- atomic_inc(&ptp->tx_avail);
- ptp->txts_pending = false;
+ dev_kfree_skb_any(txts_req->tx_skb);
+ txts_req->tx_skb = NULL;
+
+ return 0;
}
static long bnxt_ptp_ts_aux_work(struct ptp_clock_info *ptp_info)
@@ -717,12 +729,30 @@ static long bnxt_ptp_ts_aux_work(struct ptp_clock_info *ptp_info)
ptp_info);
unsigned long now = jiffies;
struct bnxt *bp = ptp->bp;
+ u16 cons = ptp->txts_cons;
+ u32 num_requests;
+ int rc = 0;
+
+ num_requests = BNXT_MAX_TX_TS - READ_ONCE(ptp->tx_avail);
+ while (num_requests--) {
+ if (IS_ERR(ptp->txts_req[cons].tx_skb))
+ goto next_slot;
+ if (!ptp->txts_req[cons].tx_skb)
+ break;
+ rc = bnxt_stamp_tx_skb(bp, cons);
+ if (rc == -EAGAIN)
+ break;
+next_slot:
+ BNXT_PTP_INC_TX_AVAIL(ptp);
+ cons = NEXT_TXTS(cons);
+ }
+ ptp->txts_cons = cons;
- if (ptp->tx_skb)
- bnxt_stamp_tx_skb(bp, ptp->tx_skb);
-
- if (!time_after_eq(now, ptp->next_period))
+ if (!time_after_eq(now, ptp->next_period)) {
+ if (rc == -EAGAIN)
+ return 0;
return ptp->next_period - now;
+ }
bnxt_ptp_get_current_time(bp);
ptp->next_period = now + HZ;
@@ -732,22 +762,37 @@ static long bnxt_ptp_ts_aux_work(struct ptp_clock_info *ptp_info)
spin_unlock_bh(&ptp->ptp_lock);
ptp->next_overflow_check = now + BNXT_PHC_OVERFLOW_PERIOD;
}
- if (ptp->txts_pending)
+ if (rc == -EAGAIN)
return 0;
return HZ;
}
-int bnxt_get_tx_ts_p5(struct bnxt *bp, struct sk_buff *skb)
+int bnxt_ptp_get_txts_prod(struct bnxt_ptp_cfg *ptp, u16 *prod)
+{
+ spin_lock_bh(&ptp->ptp_tx_lock);
+ if (ptp->tx_avail) {
+ *prod = ptp->txts_prod;
+ ptp->txts_prod = NEXT_TXTS(*prod);
+ ptp->tx_avail--;
+ spin_unlock_bh(&ptp->ptp_tx_lock);
+ return 0;
+ }
+ spin_unlock_bh(&ptp->ptp_tx_lock);
+ atomic64_inc(&ptp->stats.ts_err);
+ return -ENOSPC;
+}
+
+void bnxt_get_tx_ts_p5(struct bnxt *bp, struct sk_buff *skb, u16 prod)
{
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ struct bnxt_ptp_tx_req *txts_req;
- if (ptp->tx_skb) {
- netdev_err(bp->dev, "deferring skb:one SKB is still outstanding\n");
- return -EBUSY;
- }
- ptp->tx_skb = skb;
+ txts_req = &ptp->txts_req[prod];
+ txts_req->abs_txts_tmo = jiffies + msecs_to_jiffies(ptp->txts_tmo);
+ /* make sure abs_txts_tmo is written first */
+ smp_wmb();
+ txts_req->tx_skb = skb;
ptp_schedule_worker(ptp->ptp_clock, 0);
- return 0;
}
int bnxt_get_rx_ts_p5(struct bnxt *bp, u64 *ts, u32 pkt_ts)
@@ -766,6 +811,38 @@ int bnxt_get_rx_ts_p5(struct bnxt *bp, u64 *ts, u32 pkt_ts)
return 0;
}
+void bnxt_tx_ts_cmp(struct bnxt *bp, struct bnxt_napi *bnapi,
+ struct tx_ts_cmp *tscmp)
+{
+ struct skb_shared_hwtstamps timestamp = {};
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ u32 opaque = tscmp->tx_ts_cmp_opaque;
+ struct bnxt_tx_ring_info *txr;
+ struct bnxt_sw_tx_bd *tx_buf;
+ u64 ts, ns;
+ u16 cons;
+
+ txr = bnapi->tx_ring[TX_OPAQUE_RING(opaque)];
+ ts = BNXT_GET_TX_TS_48B_NS(tscmp);
+ cons = TX_OPAQUE_IDX(opaque);
+ tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)];
+ if (tx_buf->is_ts_pkt) {
+ if (BNXT_TX_TS_ERR(tscmp)) {
+ netdev_err(bp->dev,
+ "timestamp completion error 0x%x 0x%x\n",
+ le32_to_cpu(tscmp->tx_ts_cmp_flags_type),
+ le32_to_cpu(tscmp->tx_ts_cmp_errors_v));
+ } else {
+ spin_lock_bh(&ptp->ptp_lock);
+ ns = timecounter_cyc2time(&ptp->tc, ts);
+ spin_unlock_bh(&ptp->ptp_lock);
+ timestamp.hwtstamp = ns_to_ktime(ns);
+ skb_tstamp_tx(tx_buf->skb, &timestamp);
+ }
+ tx_buf->is_ts_pkt = 0;
+ }
+}
+
static const struct ptp_clock_info bnxt_ptp_caps = {
.owner = THIS_MODULE,
.name = "bnxt clock",
@@ -912,7 +989,7 @@ int bnxt_ptp_init_rtc(struct bnxt *bp, bool phc_cfg)
return rc;
} else {
rc = bnxt_hwrm_port_ts_query(bp, PORT_TS_QUERY_REQ_FLAGS_CURRENT_TIME,
- &ns, 0);
+ &ns, 0, 0);
if (rc)
return rc;
}
@@ -952,8 +1029,9 @@ int bnxt_ptp_init(struct bnxt *bp, bool phc_cfg)
bnxt_ptp_free(bp);
- atomic_set(&ptp->tx_avail, BNXT_MAX_TX_TS);
+ WRITE_ONCE(ptp->tx_avail, BNXT_MAX_TX_TS);
spin_lock_init(&ptp->ptp_lock);
+ spin_lock_init(&ptp->ptp_tx_lock);
if (BNXT_PTP_USE_RTC(bp)) {
bnxt_ptp_timecounter_init(bp, false);
@@ -979,7 +1057,12 @@ int bnxt_ptp_init(struct bnxt *bp, bool phc_cfg)
rc = err;
goto out;
}
- if (BNXT_CHIP_P5(bp)) {
+
+ ptp->stats.ts_pkts = 0;
+ ptp->stats.ts_lost = 0;
+ atomic64_set(&ptp->stats.ts_err, 0);
+
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
spin_lock_bh(&ptp->ptp_lock);
bnxt_refclk_read(bp, NULL, &ptp->current_time);
WRITE_ONCE(ptp->old_time, ptp->current_time);
@@ -998,6 +1081,7 @@ out:
void bnxt_ptp_clear(struct bnxt *bp)
{
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ int i;
if (!ptp)
return;
@@ -1009,9 +1093,12 @@ void bnxt_ptp_clear(struct bnxt *bp)
kfree(ptp->ptp_info.pin_config);
ptp->ptp_info.pin_config = NULL;
- if (ptp->tx_skb) {
- dev_kfree_skb_any(ptp->tx_skb);
- ptp->tx_skb = NULL;
+ for (i = 0; i < BNXT_MAX_TX_TS; i++) {
+ if (ptp->txts_req[i].tx_skb) {
+ dev_kfree_skb_any(ptp->txts_req[i].tx_skb);
+ ptp->txts_req[i].tx_skb = NULL;
+ }
}
+
bnxt_unmap_ptp_regs(bp);
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
index 2c3415c8fc03..a9a2f9a18c9c 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
@@ -79,6 +79,22 @@ struct bnxt_pps {
struct pps_pin pins[BNXT_MAX_TSIO_PINS];
};
+struct bnxt_ptp_stats {
+ u64 ts_pkts;
+ u64 ts_lost;
+ atomic64_t ts_err;
+};
+
+#define BNXT_MAX_TX_TS 4
+#define NEXT_TXTS(idx) (((idx) + 1) & (BNXT_MAX_TX_TS - 1))
+
+struct bnxt_ptp_tx_req {
+ struct sk_buff *tx_skb;
+ u16 tx_seqid;
+ u16 tx_hdr_off;
+ unsigned long abs_txts_tmo;
+};
+
struct bnxt_ptp_cfg {
struct ptp_clock_info ptp_info;
struct ptp_clock *ptp_clock;
@@ -87,7 +103,8 @@ struct bnxt_ptp_cfg {
struct bnxt_pps pps_info;
/* serialize timecounter access */
spinlock_t ptp_lock;
- struct sk_buff *tx_skb;
+ /* serialize ts tx request queuing */
+ spinlock_t ptp_tx_lock;
u64 current_time;
u64 old_time;
unsigned long next_period;
@@ -96,11 +113,10 @@ struct bnxt_ptp_cfg {
/* a 23b shift cyclecounter will overflow in ~36 mins. Check overflow every 18 mins. */
#define BNXT_PHC_OVERFLOW_PERIOD (18 * 60 * HZ)
- u16 tx_seqid;
- u16 tx_hdr_off;
+ struct bnxt_ptp_tx_req txts_req[BNXT_MAX_TX_TS];
+
struct bnxt *bp;
- atomic_t tx_avail;
-#define BNXT_MAX_TX_TS 1
+ u32 tx_avail;
u16 rxctl;
#define BNXT_PTP_MSG_SYNC (1 << 0)
#define BNXT_PTP_MSG_DELAY_REQ (1 << 1)
@@ -117,14 +133,16 @@ struct bnxt_ptp_cfg {
BNXT_PTP_MSG_PDELAY_REQ | \
BNXT_PTP_MSG_PDELAY_RESP)
u8 tx_tstamp_en:1;
- u8 txts_pending:1;
int rx_filter;
u32 tstamp_filters;
u32 refclk_regs[2];
u32 refclk_mapped_regs[2];
u32 txts_tmo;
- unsigned long abs_txts_tmo;
+ u16 txts_prod;
+ u16 txts_cons;
+
+ struct bnxt_ptp_stats stats;
};
#if BITS_PER_LONG == 32
@@ -139,6 +157,13 @@ do { \
((dst) = READ_ONCE(src))
#endif
+#define BNXT_PTP_INC_TX_AVAIL(ptp) \
+do { \
+ spin_lock_bh(&(ptp)->ptp_tx_lock); \
+ (ptp)->tx_avail++; \
+ spin_unlock_bh(&(ptp)->ptp_tx_lock); \
+} while (0)
+
int bnxt_ptp_parse(struct sk_buff *skb, u16 *seq_id, u16 *hdr_off);
void bnxt_ptp_update_current_time(struct bnxt *bp);
void bnxt_ptp_pps_event(struct bnxt *bp, u32 data1, u32 data2);
@@ -146,8 +171,11 @@ int bnxt_ptp_cfg_tstamp_filters(struct bnxt *bp);
void bnxt_ptp_reapply_pps(struct bnxt *bp);
int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr);
int bnxt_hwtstamp_get(struct net_device *dev, struct ifreq *ifr);
-int bnxt_get_tx_ts_p5(struct bnxt *bp, struct sk_buff *skb);
+int bnxt_ptp_get_txts_prod(struct bnxt_ptp_cfg *ptp, u16 *prod);
+void bnxt_get_tx_ts_p5(struct bnxt *bp, struct sk_buff *skb, u16 prod);
int bnxt_get_rx_ts_p5(struct bnxt *bp, u64 *ts, u32 pkt_ts);
+void bnxt_tx_ts_cmp(struct bnxt *bp, struct bnxt_napi *bnapi,
+ struct tx_ts_cmp *tscmp);
void bnxt_ptp_rtc_timecounter_init(struct bnxt_ptp_cfg *ptp, u64 ns);
int bnxt_ptp_init_rtc(struct bnxt *bp, bool phc_cfg);
int bnxt_ptp_init(struct bnxt *bp, bool phc_cfg);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index 175192ebaa77..22898d3d088b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -950,8 +950,11 @@ static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
struct hwrm_fwd_resp_input *req;
int rc;
- if (BNXT_FWD_RESP_SIZE_ERR(msg_size))
+ if (BNXT_FWD_RESP_SIZE_ERR(msg_size)) {
+ netdev_warn_once(bp->dev, "HWRM fwd response too big (%d bytes)\n",
+ msg_size);
return -EINVAL;
+ }
rc = hwrm_req_init(bp, req, HWRM_FWD_RESP);
if (!rc) {
@@ -1085,7 +1088,7 @@ static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf)
rc = bnxt_hwrm_exec_fwd_resp(
bp, vf, sizeof(struct hwrm_port_phy_qcfg_input));
} else {
- struct hwrm_port_phy_qcfg_output phy_qcfg_resp = {0};
+ struct hwrm_port_phy_qcfg_output_compat phy_qcfg_resp = {};
struct hwrm_port_phy_qcfg_input *phy_qcfg_req;
phy_qcfg_req =
@@ -1096,6 +1099,11 @@ static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf)
mutex_unlock(&bp->link_lock);
phy_qcfg_resp.resp_len = cpu_to_le16(sizeof(phy_qcfg_resp));
phy_qcfg_resp.seq_id = phy_qcfg_req->seq_id;
+ /* New SPEEDS2 fields are beyond the legacy structure, so
+ * clear the SPEEDS2_SUPPORTED flag.
+ */
+ phy_qcfg_resp.option_flags &=
+ ~PORT_PHY_QCAPS_RESP_FLAGS2_SPEEDS2_SUPPORTED;
phy_qcfg_resp.valid = 1;
if (vf->flags & BNXT_VF_LINK_UP) {
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 1589a49b876c..0ec5f01551f9 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -6141,7 +6141,7 @@ static void tg3_refclk_write(struct tg3 *tp, u64 newval)
static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
static inline void tg3_full_unlock(struct tg3 *tp);
-static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
+static int tg3_get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info *info)
{
struct tg3 *tp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/brocade/bna/bna_types.h b/drivers/net/ethernet/brocade/bna/bna_types.h
index a5ebd7110e07..986f43d27711 100644
--- a/drivers/net/ethernet/brocade/bna/bna_types.h
+++ b/drivers/net/ethernet/brocade/bna/bna_types.h
@@ -416,7 +416,7 @@ struct bna_ib {
/* Tx object */
/* Tx datapath control structure */
-#define BNA_Q_NAME_SIZE 16
+#define BNA_Q_NAME_SIZE (IFNAMSIZ + 6)
struct bna_tcb {
/* Fast path */
void **sw_qpt;
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index fe121d36112d..ece6f3b48327 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -1534,8 +1534,9 @@ bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info,
for (i = 0; i < num_txqs; i++) {
vector_num = tx_info->tcb[i]->intr_vector;
- sprintf(tx_info->tcb[i]->name, "%s TXQ %d", bnad->netdev->name,
- tx_id + tx_info->tcb[i]->id);
+ snprintf(tx_info->tcb[i]->name, BNA_Q_NAME_SIZE, "%s TXQ %d",
+ bnad->netdev->name,
+ tx_id + tx_info->tcb[i]->id);
err = request_irq(bnad->msix_table[vector_num].vector,
(irq_handler_t)bnad_msix_tx, 0,
tx_info->tcb[i]->name,
@@ -1585,9 +1586,9 @@ bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info,
for (i = 0; i < num_rxps; i++) {
vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
- sprintf(rx_info->rx_ctrl[i].ccb->name, "%s CQ %d",
- bnad->netdev->name,
- rx_id + rx_info->rx_ctrl[i].ccb->id);
+ snprintf(rx_info->rx_ctrl[i].ccb->name, BNA_Q_NAME_SIZE,
+ "%s CQ %d", bnad->netdev->name,
+ rx_id + rx_info->rx_ctrl[i].ccb->id);
err = request_irq(bnad->msix_table[vector_num].vector,
(irq_handler_t)bnad_msix_rx, 0,
rx_info->rx_ctrl[i].ccb->name,
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index aa5700ac9c00..ea71612f6b36 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -645,6 +645,10 @@
#define GEM_T2OFST_OFFSET 0 /* offset value */
#define GEM_T2OFST_SIZE 7
+/* Bitfields in queue pointer registers */
+#define MACB_QUEUE_DISABLE_OFFSET 0 /* disable queue */
+#define MACB_QUEUE_DISABLE_SIZE 1
+
/* Offset for screener type 2 compare values (T2CMPOFST).
* Note the offset is applied after the specified point,
* e.g. GEM_T2COMPOFST_ETYPE denotes the EtherType field, so an offset
@@ -733,6 +737,7 @@
#define MACB_CAPS_NEEDS_RSTONUBR 0x00000100
#define MACB_CAPS_MIIONRGMII 0x00000200
#define MACB_CAPS_NEED_TSUCLK 0x00000400
+#define MACB_CAPS_QUEUE_DISABLE 0x00000800
#define MACB_CAPS_PCS 0x01000000
#define MACB_CAPS_HIGH_SPEED 0x02000000
#define MACB_CAPS_CLK_HW_CHG 0x04000000
@@ -1163,7 +1168,7 @@ struct macb_ptp_info {
s32 (*get_ptp_max_adj)(void);
unsigned int (*get_tsu_rate)(struct macb *bp);
int (*get_ts_info)(struct net_device *dev,
- struct ethtool_ts_info *info);
+ struct kernel_ethtool_ts_info *info);
int (*get_hwtst)(struct net_device *netdev,
struct kernel_hwtstamp_config *tstamp_config);
int (*set_hwtst)(struct net_device *netdev,
@@ -1254,6 +1259,8 @@ struct macb {
u32 (*macb_reg_readl)(struct macb *bp, int offset);
void (*macb_reg_writel)(struct macb *bp, int offset, u32 value);
+ struct macb_dma_desc *rx_ring_tieoff;
+ dma_addr_t rx_ring_tieoff_dma;
size_t rx_buffer_size;
unsigned int rx_ring_size;
@@ -1299,6 +1306,7 @@ struct macb {
unsigned int jumbo_max_len;
u32 wol;
+ u32 wolopts;
/* holds value of rx watermark value for pbuf_rxcutthru register */
u32 rx_watermark;
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 241ce9a2fa99..11665be3a22c 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -38,6 +38,7 @@
#include <linux/ptp_classify.h>
#include <linux/reset.h>
#include <linux/firmware/xlnx-zynqmp.h>
+#include <linux/inetdevice.h>
#include "macb.h"
/* This structure is only used for MACB on SiFive FU540 devices */
@@ -84,8 +85,7 @@ struct sifive_fu540_macb_mgmt {
#define GEM_MTU_MIN_SIZE ETH_MIN_MTU
#define MACB_NETIF_LSO NETIF_F_TSO
-#define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0)
-#define MACB_WOL_ENABLED (0x1 << 1)
+#define MACB_WOL_ENABLED BIT(0)
#define HS_SPEED_10000M 4
#define MACB_SERDES_RATE_10G 1
@@ -2477,6 +2477,12 @@ static void macb_free_consistent(struct macb *bp)
unsigned int q;
int size;
+ if (bp->rx_ring_tieoff) {
+ dma_free_coherent(&bp->pdev->dev, macb_dma_desc_get_size(bp),
+ bp->rx_ring_tieoff, bp->rx_ring_tieoff_dma);
+ bp->rx_ring_tieoff = NULL;
+ }
+
bp->macbgem_ops.mog_free_rx_buffers(bp);
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
@@ -2568,6 +2574,16 @@ static int macb_alloc_consistent(struct macb *bp)
if (bp->macbgem_ops.mog_alloc_rx_buffers(bp))
goto out_err;
+ /* Required for tie off descriptor for PM cases */
+ if (!(bp->caps & MACB_CAPS_QUEUE_DISABLE)) {
+ bp->rx_ring_tieoff = dma_alloc_coherent(&bp->pdev->dev,
+ macb_dma_desc_get_size(bp),
+ &bp->rx_ring_tieoff_dma,
+ GFP_KERNEL);
+ if (!bp->rx_ring_tieoff)
+ goto out_err;
+ }
+
return 0;
out_err:
@@ -2575,6 +2591,19 @@ out_err:
return -ENOMEM;
}
+static void macb_init_tieoff(struct macb *bp)
+{
+ struct macb_dma_desc *desc = bp->rx_ring_tieoff;
+
+ if (bp->caps & MACB_CAPS_QUEUE_DISABLE)
+ return;
+ /* Setup a wrapping descriptor with no free slots
+ * (WRAP and USED) to tie off/disable unused RX queues.
+ */
+ macb_set_addr(bp, desc, MACB_BIT(RX_WRAP) | MACB_BIT(RX_USED));
+ desc->ctrl = 0;
+}
+
static void gem_init_rings(struct macb *bp)
{
struct macb_queue *queue;
@@ -2598,6 +2627,7 @@ static void gem_init_rings(struct macb *bp)
gem_rx_refill(queue);
}
+ macb_init_tieoff(bp);
}
static void macb_init_rings(struct macb *bp)
@@ -2615,6 +2645,8 @@ static void macb_init_rings(struct macb *bp)
bp->queues[0].tx_head = 0;
bp->queues[0].tx_tail = 0;
desc->ctrl |= MACB_BIT(TX_WRAP);
+
+ macb_init_tieoff(bp);
}
static void macb_reset_hw(struct macb *bp)
@@ -3246,13 +3278,11 @@ static void macb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
struct macb *bp = netdev_priv(netdev);
- if (bp->wol & MACB_WOL_HAS_MAGIC_PACKET) {
- phylink_ethtool_get_wol(bp->phylink, wol);
- wol->supported |= WAKE_MAGIC;
+ phylink_ethtool_get_wol(bp->phylink, wol);
+ wol->supported |= (WAKE_MAGIC | WAKE_ARP);
- if (bp->wol & MACB_WOL_ENABLED)
- wol->wolopts |= WAKE_MAGIC;
- }
+ /* Add macb wolopts to phy wolopts */
+ wol->wolopts |= bp->wolopts;
}
static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
@@ -3262,22 +3292,15 @@ static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
/* Pass the order to phylink layer */
ret = phylink_ethtool_set_wol(bp->phylink, wol);
- /* Don't manage WoL on MAC if handled by the PHY
- * or if there's a failure in talking to the PHY
- */
- if (!ret || ret != -EOPNOTSUPP)
+ /* Don't manage WoL on MAC, if PHY set_wol() fails */
+ if (ret && ret != -EOPNOTSUPP)
return ret;
- if (!(bp->wol & MACB_WOL_HAS_MAGIC_PACKET) ||
- (wol->wolopts & ~WAKE_MAGIC))
- return -EOPNOTSUPP;
-
- if (wol->wolopts & WAKE_MAGIC)
- bp->wol |= MACB_WOL_ENABLED;
- else
- bp->wol &= ~MACB_WOL_ENABLED;
+ bp->wolopts = (wol->wolopts & WAKE_MAGIC) ? WAKE_MAGIC : 0;
+ bp->wolopts |= (wol->wolopts & WAKE_ARP) ? WAKE_ARP : 0;
+ bp->wol = (wol->wolopts) ? MACB_WOL_ENABLED : 0;
- device_set_wakeup_enable(&bp->pdev->dev, bp->wol & MACB_WOL_ENABLED);
+ device_set_wakeup_enable(&bp->pdev->dev, bp->wol);
return 0;
}
@@ -3376,7 +3399,7 @@ static s32 gem_get_ptp_max_adj(void)
}
static int gem_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct macb *bp = netdev_priv(dev);
@@ -3417,7 +3440,7 @@ static struct macb_ptp_info gem_ptp_info = {
#endif
static int macb_get_ts_info(struct net_device *netdev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct macb *bp = netdev_priv(netdev);
@@ -4917,7 +4940,8 @@ static const struct macb_config sama7g5_emac_config = {
static const struct macb_config versal_config = {
.caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO |
- MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_BD_RD_PREFETCH | MACB_CAPS_NEED_TSUCLK,
+ MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_BD_RD_PREFETCH | MACB_CAPS_NEED_TSUCLK |
+ MACB_CAPS_QUEUE_DISABLE,
.dma_burst_length = 16,
.clk_init = macb_clk_init,
.init = init_reset_optional,
@@ -5053,9 +5077,7 @@ static int macb_probe(struct platform_device *pdev)
bp->max_tx_length = GEM_MAX_TX_LEN;
bp->wol = 0;
- if (of_property_read_bool(np, "magic-packet"))
- bp->wol |= MACB_WOL_HAS_MAGIC_PACKET;
- device_set_wakeup_capable(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET);
+ device_set_wakeup_capable(&pdev->dev, 1);
bp->usrio = macb_config->usrio;
@@ -5211,10 +5233,13 @@ static int __maybe_unused macb_suspend(struct device *dev)
{
struct net_device *netdev = dev_get_drvdata(dev);
struct macb *bp = netdev_priv(netdev);
+ struct in_ifaddr *ifa = NULL;
struct macb_queue *queue;
+ struct in_device *idev;
unsigned long flags;
unsigned int q;
int err;
+ u32 tmp;
if (!device_may_wakeup(&bp->dev->dev))
phy_exit(bp->sgmii_phy);
@@ -5223,18 +5248,54 @@ static int __maybe_unused macb_suspend(struct device *dev)
return 0;
if (bp->wol & MACB_WOL_ENABLED) {
+ /* Check for IP address in WOL ARP mode */
+ idev = __in_dev_get_rcu(bp->dev);
+ if (idev && idev->ifa_list)
+ ifa = rcu_access_pointer(idev->ifa_list);
+ if ((bp->wolopts & WAKE_ARP) && !ifa) {
+ netdev_err(netdev, "IP address not assigned as required by WoL walk ARP\n");
+ return -EOPNOTSUPP;
+ }
spin_lock_irqsave(&bp->lock, flags);
- /* Flush all status bits */
- macb_writel(bp, TSR, -1);
- macb_writel(bp, RSR, -1);
+
+ /* Disable Tx and Rx engines before disabling the queues,
+ * this is mandatory as per the IP spec sheet
+ */
+ tmp = macb_readl(bp, NCR);
+ macb_writel(bp, NCR, tmp & ~(MACB_BIT(TE) | MACB_BIT(RE)));
for (q = 0, queue = bp->queues; q < bp->num_queues;
++q, ++queue) {
+ /* Disable RX queues */
+ if (bp->caps & MACB_CAPS_QUEUE_DISABLE) {
+ queue_writel(queue, RBQP, MACB_BIT(QUEUE_DISABLE));
+ } else {
+ /* Tie off RX queues */
+ queue_writel(queue, RBQP,
+ lower_32_bits(bp->rx_ring_tieoff_dma));
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ queue_writel(queue, RBQPH,
+ upper_32_bits(bp->rx_ring_tieoff_dma));
+#endif
+ }
/* Disable all interrupts */
queue_writel(queue, IDR, -1);
queue_readl(queue, ISR);
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
queue_writel(queue, ISR, -1);
}
+ /* Enable Receive engine */
+ macb_writel(bp, NCR, tmp | MACB_BIT(RE));
+ /* Flush all status bits */
+ macb_writel(bp, TSR, -1);
+ macb_writel(bp, RSR, -1);
+
+ tmp = (bp->wolopts & WAKE_MAGIC) ? MACB_BIT(MAG) : 0;
+ if (bp->wolopts & WAKE_ARP) {
+ tmp |= MACB_BIT(ARP);
+ /* write IP address into register */
+ tmp |= MACB_BFEXT(IP, be32_to_cpu(ifa->ifa_local));
+ }
+
/* Change interrupt handler and
* Enable WoL IRQ on queue 0
*/
@@ -5250,7 +5311,7 @@ static int __maybe_unused macb_suspend(struct device *dev)
return err;
}
queue_writel(bp->queues, IER, GEM_BIT(WOL));
- gem_writel(bp, WOL, MACB_BIT(MAG));
+ gem_writel(bp, WOL, tmp);
} else {
err = devm_request_irq(dev, bp->queues[0].irq, macb_wol_interrupt,
IRQF_SHARED, netdev->name, bp->queues);
@@ -5262,7 +5323,7 @@ static int __maybe_unused macb_suspend(struct device *dev)
return err;
}
queue_writel(bp->queues, IER, MACB_BIT(WOL));
- macb_writel(bp, WOL, MACB_BIT(MAG));
+ macb_writel(bp, WOL, tmp);
}
spin_unlock_irqrestore(&bp->lock, flags);
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
index d3e07b6ed5e1..5835965dbc32 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
@@ -2497,7 +2497,7 @@ ret_intrmod:
}
static int lio_get_ts_info(struct net_device *netdev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct lio *lio = GET_LIO(netdev);
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index 34f02a8ec2ca..1d79f6eaa41f 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -92,12 +92,6 @@ static int octeon_console_debug_enabled(u32 console)
/* time to wait for possible in-flight requests in milliseconds */
#define WAIT_INFLIGHT_REQUEST msecs_to_jiffies(1000)
-struct oct_link_status_resp {
- u64 rh;
- struct oct_link_info link_info;
- u64 status;
-};
-
struct oct_timestamp_resp {
u64 rh;
u64 timestamp;
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c
index 96c6ea12279f..989b4ddae342 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c
@@ -272,13 +272,12 @@ lio_vf_rep_copy_packet(struct octeon_device *oct,
pg_info->page_offset;
memcpy(skb->data, va, MIN_SKB_SIZE);
skb_put(skb, MIN_SKB_SIZE);
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+ pg_info->page,
+ pg_info->page_offset + MIN_SKB_SIZE,
+ len - MIN_SKB_SIZE,
+ LIO_RXBUFFER_SZ);
}
-
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
- pg_info->page,
- pg_info->page_offset + MIN_SKB_SIZE,
- len - MIN_SKB_SIZE,
- LIO_RXBUFFER_SZ);
} else {
struct octeon_skb_page_info *pg_info =
((struct octeon_skb_page_info *)(skb->cb));
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
index 0d6ee30affb9..eef12fdd246d 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
@@ -30,11 +30,6 @@
#include "cn23xx_pf_device.h"
#include "cn23xx_vf_device.h"
-struct niclist {
- struct list_head list;
- void *ptr;
-};
-
struct __dispatch {
struct list_head list;
struct octeon_recv_info *rinfo;
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
index 34125b8cd935..6a04d2530176 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
@@ -836,7 +836,7 @@ static int nicvf_set_pauseparam(struct net_device *dev,
}
static int nicvf_get_ts_info(struct net_device *netdev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct nicvf *nic = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index a317feb8decb..a40c266c37f2 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -54,7 +54,7 @@ struct lmac {
bool link_up;
int lmacid; /* ID within BGX */
int lmacid_bd; /* ID on board */
- struct net_device netdev;
+ struct net_device *netdev;
struct phy_device *phydev;
unsigned int last_duplex;
unsigned int last_link;
@@ -590,10 +590,12 @@ static void bgx_sgmii_change_link_state(struct lmac *lmac)
static void bgx_lmac_handler(struct net_device *netdev)
{
- struct lmac *lmac = container_of(netdev, struct lmac, netdev);
struct phy_device *phydev;
+ struct lmac *lmac, **priv;
int link_changed = 0;
+ priv = netdev_priv(netdev);
+ lmac = *priv;
phydev = lmac->phydev;
if (!phydev->link && lmac->last_link)
@@ -1052,12 +1054,18 @@ static int phy_interface_mode(u8 lmac_type)
static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid)
{
- struct lmac *lmac;
+ struct lmac *lmac, **priv;
u64 cfg;
lmac = &bgx->lmac[lmacid];
lmac->bgx = bgx;
+ lmac->netdev = alloc_netdev_dummy(sizeof(struct lmac *));
+ if (!lmac->netdev)
+ return -ENOMEM;
+ priv = netdev_priv(lmac->netdev);
+ *priv = lmac;
+
if ((lmac->lmac_type == BGX_MODE_SGMII) ||
(lmac->lmac_type == BGX_MODE_QSGMII) ||
(lmac->lmac_type == BGX_MODE_RGMII)) {
@@ -1116,7 +1124,7 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid)
}
lmac->phydev->dev_flags = 0;
- if (phy_connect_direct(&lmac->netdev, lmac->phydev,
+ if (phy_connect_direct(lmac->netdev, lmac->phydev,
bgx_lmac_handler,
phy_interface_mode(lmac->lmac_type)))
return -ENODEV;
@@ -1183,6 +1191,7 @@ static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid)
(lmac->lmac_type != BGX_MODE_10G_KR) && lmac->phydev)
phy_disconnect(lmac->phydev);
+ free_netdev(lmac->netdev);
lmac->phydev = NULL;
}
@@ -1414,7 +1423,7 @@ static acpi_status bgx_acpi_register_phy(acpi_handle handle,
acpi_get_mac_address(dev, adev, bgx->lmac[bgx->acpi_lmac_idx].mac);
- SET_NETDEV_DEV(&bgx->lmac[bgx->acpi_lmac_idx].netdev, dev);
+ SET_NETDEV_DEV(bgx->lmac[bgx->acpi_lmac_idx].netdev, dev);
bgx->lmac[bgx->acpi_lmac_idx].lmacid = bgx->acpi_lmac_idx;
bgx->acpi_lmac_idx++; /* move to next LMAC */
@@ -1483,7 +1492,7 @@ static int bgx_init_of_phy(struct bgx *bgx)
of_get_mac_address(node, bgx->lmac[lmac].mac);
- SET_NETDEV_DEV(&bgx->lmac[lmac].netdev, &bgx->pdev->dev);
+ SET_NETDEV_DEV(bgx->lmac[lmac].netdev, &bgx->pdev->dev);
bgx->lmac[lmac].lmacid = lmac;
phy_np = of_parse_phandle(node, "phy-handle", 0);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
index 47eecde36285..3d091947ae00 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
@@ -1550,7 +1550,7 @@ out_free_fw:
return ret;
}
-static int get_ts_info(struct net_device *dev, struct ethtool_ts_info *ts_info)
+static int get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info *ts_info)
{
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
diff --git a/drivers/net/ethernet/cirrus/mac89x0.c b/drivers/net/ethernet/cirrus/mac89x0.c
index 887876f35f10..84b300fee2bb 100644
--- a/drivers/net/ethernet/cirrus/mac89x0.c
+++ b/drivers/net/ethernet/cirrus/mac89x0.c
@@ -554,6 +554,7 @@ static int set_mac_address(struct net_device *dev, void *addr)
return 0;
}
+MODULE_DESCRIPTION("Macintosh CS89x0-based Ethernet driver");
MODULE_LICENSE("GPL");
static void mac89x0_device_remove(struct platform_device *pdev)
diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
index 241906697019..f2f1055880b2 100644
--- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c
+++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
@@ -599,7 +599,7 @@ static int enic_set_rxfh(struct net_device *netdev,
}
static int enic_get_ts_info(struct net_device *netdev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
SOF_TIMESTAMPING_RX_SOFTWARE |
@@ -608,6 +608,28 @@ static int enic_get_ts_info(struct net_device *netdev,
return 0;
}
+static void enic_get_channels(struct net_device *netdev,
+ struct ethtool_channels *channels)
+{
+ struct enic *enic = netdev_priv(netdev);
+
+ switch (vnic_dev_get_intr_mode(enic->vdev)) {
+ case VNIC_DEV_INTR_MODE_MSIX:
+ channels->max_rx = ENIC_RQ_MAX;
+ channels->max_tx = ENIC_WQ_MAX;
+ channels->rx_count = enic->rq_count;
+ channels->tx_count = enic->wq_count;
+ break;
+ case VNIC_DEV_INTR_MODE_MSI:
+ case VNIC_DEV_INTR_MODE_INTX:
+ channels->max_combined = 1;
+ channels->combined_count = 1;
+ break;
+ default:
+ break;
+ }
+}
+
static const struct ethtool_ops enic_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_USE_ADAPTIVE_RX |
@@ -632,6 +654,7 @@ static const struct ethtool_ops enic_ethtool_ops = {
.set_rxfh = enic_set_rxfh,
.get_link_ksettings = enic_get_ksettings,
.get_ts_info = enic_get_ts_info,
+ .get_channels = enic_get_channels,
};
void enic_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index f604119efc80..5f26fc3ad655 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -1117,18 +1117,30 @@ static int enic_set_vf_port(struct net_device *netdev, int vf,
pp->request = nla_get_u8(port[IFLA_PORT_REQUEST]);
if (port[IFLA_PORT_PROFILE]) {
+ if (nla_len(port[IFLA_PORT_PROFILE]) != PORT_PROFILE_MAX) {
+ memcpy(pp, &prev_pp, sizeof(*pp));
+ return -EINVAL;
+ }
pp->set |= ENIC_SET_NAME;
memcpy(pp->name, nla_data(port[IFLA_PORT_PROFILE]),
PORT_PROFILE_MAX);
}
if (port[IFLA_PORT_INSTANCE_UUID]) {
+ if (nla_len(port[IFLA_PORT_INSTANCE_UUID]) != PORT_UUID_MAX) {
+ memcpy(pp, &prev_pp, sizeof(*pp));
+ return -EINVAL;
+ }
pp->set |= ENIC_SET_INSTANCE;
memcpy(pp->instance_uuid,
nla_data(port[IFLA_PORT_INSTANCE_UUID]), PORT_UUID_MAX);
}
if (port[IFLA_PORT_HOST_UUID]) {
+ if (nla_len(port[IFLA_PORT_HOST_UUID]) != PORT_UUID_MAX) {
+ memcpy(pp, &prev_pp, sizeof(*pp));
+ return -EINVAL;
+ }
pp->set |= ENIC_SET_HOST;
memcpy(pp->host_uuid,
nla_data(port[IFLA_PORT_HOST_UUID]), PORT_UUID_MAX);
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
index 5f0c9e1771db..73e1c71c5092 100644
--- a/drivers/net/ethernet/cortina/gemini.c
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -79,7 +79,8 @@ MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
#define GMAC0_IRQ4_8 (GMAC0_MIB_INT_BIT | GMAC0_RX_OVERRUN_INT_BIT)
#define GMAC_OFFLOAD_FEATURES (NETIF_F_SG | NETIF_F_IP_CSUM | \
- NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM)
+ NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | \
+ NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6)
/**
* struct gmac_queue_page - page buffer per-page info
@@ -287,13 +288,13 @@ static void gmac_set_flow_control(struct net_device *netdev, bool tx, bool rx)
spin_unlock_irqrestore(&port->config_lock, flags);
}
-static void gmac_speed_set(struct net_device *netdev)
+static void gmac_adjust_link(struct net_device *netdev)
{
struct gemini_ethernet_port *port = netdev_priv(netdev);
struct phy_device *phydev = netdev->phydev;
union gmac_status status, old_status;
- int pause_tx = 0;
- int pause_rx = 0;
+ bool pause_tx = false;
+ bool pause_rx = false;
status.bits32 = readl(port->gmac_base + GMAC_STATUS);
old_status.bits32 = status.bits32;
@@ -328,14 +329,9 @@ static void gmac_speed_set(struct net_device *netdev)
}
if (phydev->duplex == DUPLEX_FULL) {
- u16 lcladv = phy_read(phydev, MII_ADVERTISE);
- u16 rmtadv = phy_read(phydev, MII_LPA);
- u8 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
-
- if (cap & FLOW_CTRL_RX)
- pause_rx = 1;
- if (cap & FLOW_CTRL_TX)
- pause_tx = 1;
+ phy_get_pause(phydev, &pause_tx, &pause_rx);
+ netdev_dbg(netdev, "set negotiated pause params pause TX = %s, pause RX = %s\n",
+ pause_tx ? "ON" : "OFF", pause_rx ? "ON" : "OFF");
}
gmac_set_flow_control(netdev, pause_tx, pause_rx);
@@ -366,7 +362,7 @@ static int gmac_setup_phy(struct net_device *netdev)
phy = of_phy_get_and_connect(netdev,
dev->of_node,
- gmac_speed_set);
+ gmac_adjust_link);
if (!phy)
return -ENODEV;
netdev->phydev = phy;
@@ -1148,13 +1144,25 @@ static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb,
skb_frag_t *skb_frag;
dma_addr_t mapping;
void *buffer;
+ u16 mss;
int ret;
- /* TODO: implement proper TSO using MTU in word3 */
word1 = skb->len;
word3 = SOF_BIT;
- if (skb->len >= ETH_FRAME_LEN) {
+ mss = skb_shinfo(skb)->gso_size;
+ if (mss) {
+ /* This means we are dealing with TCP and skb->len is the
+ * sum total of all the segments. The TSO will deal with
+ * chopping this up for us.
+ */
+ /* The accelerator needs the full frame size here */
+ mss += skb_tcp_all_headers(skb);
+ netdev_dbg(netdev, "segment offloading mss = %04x len=%04x\n",
+ mss, skb->len);
+ word1 |= TSS_MTU_ENABLE_BIT;
+ word3 |= mss;
+ } else if (skb->len >= ETH_FRAME_LEN) {
/* Hardware offloaded checksumming isn't working on frames
* bigger than 1514 bytes. A hypothesis about this is that the
* checksum buffer is only 1518 bytes, so when the frames get
@@ -1169,7 +1177,9 @@ static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb,
return ret;
}
word1 |= TSS_BYPASS_BIT;
- } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ }
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
int tcp = 0;
/* We do not switch off the checksumming on non TCP/UDP
@@ -2116,6 +2126,19 @@ static void gmac_get_pauseparam(struct net_device *netdev,
pparam->autoneg = true;
}
+static int gmac_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pparam)
+{
+ struct phy_device *phydev = netdev->phydev;
+
+ if (!pparam->autoneg)
+ return -EOPNOTSUPP;
+
+ phy_set_asym_pause(phydev, pparam->rx_pause, pparam->tx_pause);
+
+ return 0;
+}
+
static void gmac_get_ringparam(struct net_device *netdev,
struct ethtool_ringparam *rp,
struct kernel_ethtool_ringparam *kernel_rp,
@@ -2236,6 +2259,7 @@ static const struct ethtool_ops gmac_351x_ethtool_ops = {
.set_link_ksettings = gmac_set_ksettings,
.nway_reset = gmac_nway_reset,
.get_pauseparam = gmac_get_pauseparam,
+ .set_pauseparam = gmac_set_pauseparam,
.get_ringparam = gmac_get_ringparam,
.set_ringparam = gmac_set_ringparam,
.get_coalesce = gmac_get_coalesce,
diff --git a/drivers/net/ethernet/engleder/tsnep_ethtool.c b/drivers/net/ethernet/engleder/tsnep_ethtool.c
index 65ec1abc9442..9aa286ba1f00 100644
--- a/drivers/net/ethernet/engleder/tsnep_ethtool.c
+++ b/drivers/net/ethernet/engleder/tsnep_ethtool.c
@@ -305,7 +305,7 @@ static void tsnep_ethtool_get_channels(struct net_device *netdev,
}
static int tsnep_ethtool_get_ts_info(struct net_device *netdev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct tsnep_adapter *adapter = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index baa0b3c2ce6f..cfe6b57b1da0 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -371,6 +371,7 @@ static int dpaa_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
void *type_data)
{
struct dpaa_priv *priv = netdev_priv(net_dev);
+ int num_txqs_per_tc = dpaa_num_txqs_per_tc();
struct tc_mqprio_qopt *mqprio = type_data;
u8 num_tc;
int i;
@@ -398,12 +399,12 @@ static int dpaa_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
netdev_set_num_tc(net_dev, num_tc);
for (i = 0; i < num_tc; i++)
- netdev_set_tc_queue(net_dev, i, DPAA_TC_TXQ_NUM,
- i * DPAA_TC_TXQ_NUM);
+ netdev_set_tc_queue(net_dev, i, num_txqs_per_tc,
+ i * num_txqs_per_tc);
out:
priv->num_tc = num_tc ? : 1;
- netif_set_real_num_tx_queues(net_dev, priv->num_tc * DPAA_TC_TXQ_NUM);
+ netif_set_real_num_tx_queues(net_dev, priv->num_tc * num_txqs_per_tc);
return 0;
}
@@ -649,7 +650,7 @@ static inline void dpaa_assign_wq(struct dpaa_fq *fq, int idx)
fq->wq = 6;
break;
case FQ_TYPE_TX:
- switch (idx / DPAA_TC_TXQ_NUM) {
+ switch (idx / dpaa_num_txqs_per_tc()) {
case 0:
/* Low priority (best effort) */
fq->wq = 6;
@@ -667,8 +668,8 @@ static inline void dpaa_assign_wq(struct dpaa_fq *fq, int idx)
fq->wq = 0;
break;
default:
- WARN(1, "Too many TX FQs: more than %d!\n",
- DPAA_ETH_TXQ_NUM);
+ WARN(1, "Too many TX FQs: more than %zu!\n",
+ dpaa_max_num_txqs());
}
break;
default:
@@ -740,7 +741,8 @@ static int dpaa_alloc_all_fqs(struct device *dev, struct list_head *list,
port_fqs->rx_pcdq = &dpaa_fq[0];
- if (!dpaa_fq_alloc(dev, 0, DPAA_ETH_TXQ_NUM, list, FQ_TYPE_TX_CONF_MQ))
+ if (!dpaa_fq_alloc(dev, 0, dpaa_max_num_txqs(), list,
+ FQ_TYPE_TX_CONF_MQ))
goto fq_alloc_failed;
dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_TX_ERROR);
@@ -755,7 +757,7 @@ static int dpaa_alloc_all_fqs(struct device *dev, struct list_head *list,
port_fqs->tx_defq = &dpaa_fq[0];
- if (!dpaa_fq_alloc(dev, 0, DPAA_ETH_TXQ_NUM, list, FQ_TYPE_TX))
+ if (!dpaa_fq_alloc(dev, 0, dpaa_max_num_txqs(), list, FQ_TYPE_TX))
goto fq_alloc_failed;
return 0;
@@ -931,14 +933,18 @@ static inline void dpaa_setup_egress(const struct dpaa_priv *priv,
}
}
-static void dpaa_fq_setup(struct dpaa_priv *priv,
- const struct dpaa_fq_cbs *fq_cbs,
- struct fman_port *tx_port)
+static int dpaa_fq_setup(struct dpaa_priv *priv,
+ const struct dpaa_fq_cbs *fq_cbs,
+ struct fman_port *tx_port)
{
int egress_cnt = 0, conf_cnt = 0, num_portals = 0, portal_cnt = 0, cpu;
const cpumask_t *affine_cpus = qman_affine_cpus();
- u16 channels[NR_CPUS];
struct dpaa_fq *fq;
+ u16 *channels;
+
+ channels = kcalloc(num_possible_cpus(), sizeof(u16), GFP_KERNEL);
+ if (!channels)
+ return -ENOMEM;
for_each_cpu_and(cpu, affine_cpus, cpu_online_mask)
channels[num_portals++] = qman_affine_channel(cpu);
@@ -965,11 +971,7 @@ static void dpaa_fq_setup(struct dpaa_priv *priv,
case FQ_TYPE_TX:
dpaa_setup_egress(priv, fq, tx_port,
&fq_cbs->egress_ern);
- /* If we have more Tx queues than the number of cores,
- * just ignore the extra ones.
- */
- if (egress_cnt < DPAA_ETH_TXQ_NUM)
- priv->egress_fqs[egress_cnt++] = &fq->fq_base;
+ priv->egress_fqs[egress_cnt++] = &fq->fq_base;
break;
case FQ_TYPE_TX_CONF_MQ:
priv->conf_fqs[conf_cnt++] = &fq->fq_base;
@@ -987,16 +989,9 @@ static void dpaa_fq_setup(struct dpaa_priv *priv,
}
}
- /* Make sure all CPUs receive a corresponding Tx queue. */
- while (egress_cnt < DPAA_ETH_TXQ_NUM) {
- list_for_each_entry(fq, &priv->dpaa_fq_list, list) {
- if (fq->fq_type != FQ_TYPE_TX)
- continue;
- priv->egress_fqs[egress_cnt++] = &fq->fq_base;
- if (egress_cnt == DPAA_ETH_TXQ_NUM)
- break;
- }
- }
+ kfree(channels);
+
+ return 0;
}
static inline int dpaa_tx_fq_to_id(const struct dpaa_priv *priv,
@@ -1004,7 +999,7 @@ static inline int dpaa_tx_fq_to_id(const struct dpaa_priv *priv,
{
int i;
- for (i = 0; i < DPAA_ETH_TXQ_NUM; i++)
+ for (i = 0; i < dpaa_max_num_txqs(); i++)
if (priv->egress_fqs[i] == tx_fq)
return i;
@@ -3324,7 +3319,7 @@ static int dpaa_eth_probe(struct platform_device *pdev)
/* Allocate this early, so we can store relevant information in
* the private area
*/
- net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA_ETH_TXQ_NUM);
+ net_dev = alloc_etherdev_mq(sizeof(*priv), dpaa_max_num_txqs());
if (!net_dev) {
dev_err(dev, "alloc_etherdev_mq() failed\n");
return -ENOMEM;
@@ -3339,6 +3334,22 @@ static int dpaa_eth_probe(struct platform_device *pdev)
priv->msg_enable = netif_msg_init(debug, DPAA_MSG_DEFAULT);
+ priv->egress_fqs = devm_kcalloc(dev, dpaa_max_num_txqs(),
+ sizeof(*priv->egress_fqs),
+ GFP_KERNEL);
+ if (!priv->egress_fqs) {
+ err = -ENOMEM;
+ goto free_netdev;
+ }
+
+ priv->conf_fqs = devm_kcalloc(dev, dpaa_max_num_txqs(),
+ sizeof(*priv->conf_fqs),
+ GFP_KERNEL);
+ if (!priv->conf_fqs) {
+ err = -ENOMEM;
+ goto free_netdev;
+ }
+
mac_dev = dpaa_mac_dev_get(pdev);
if (IS_ERR(mac_dev)) {
netdev_err(net_dev, "dpaa_mac_dev_get() failed\n");
@@ -3416,7 +3427,9 @@ static int dpaa_eth_probe(struct platform_device *pdev)
*/
dpaa_eth_add_channel(priv->channel, &pdev->dev);
- dpaa_fq_setup(priv, &dpaa_fq_cbs, priv->mac_dev->port[TX]);
+ err = dpaa_fq_setup(priv, &dpaa_fq_cbs, priv->mac_dev->port[TX]);
+ if (err)
+ goto free_dpaa_bps;
/* Create a congestion group for this netdev, with
* dynamically-allocated CGR ID.
@@ -3462,7 +3475,8 @@ static int dpaa_eth_probe(struct platform_device *pdev)
}
priv->num_tc = 1;
- netif_set_real_num_tx_queues(net_dev, priv->num_tc * DPAA_TC_TXQ_NUM);
+ netif_set_real_num_tx_queues(net_dev,
+ priv->num_tc * dpaa_num_txqs_per_tc());
/* Initialize NAPI */
err = dpaa_napi_add(net_dev);
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
index ac3c8ed57bbe..7ed659eb08de 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
@@ -18,10 +18,6 @@
/* Number of prioritised traffic classes */
#define DPAA_TC_NUM 4
-/* Number of Tx queues per traffic class */
-#define DPAA_TC_TXQ_NUM NR_CPUS
-/* Total number of Tx queues */
-#define DPAA_ETH_TXQ_NUM (DPAA_TC_NUM * DPAA_TC_TXQ_NUM)
/* More detailed FQ types - used for fine-grained WQ assignments */
enum dpaa_fq_type {
@@ -142,8 +138,8 @@ struct dpaa_priv {
struct mac_device *mac_dev;
struct device *rx_dma_dev;
struct device *tx_dma_dev;
- struct qman_fq *egress_fqs[DPAA_ETH_TXQ_NUM];
- struct qman_fq *conf_fqs[DPAA_ETH_TXQ_NUM];
+ struct qman_fq **egress_fqs;
+ struct qman_fq **conf_fqs;
u16 channel;
struct list_head dpaa_fq_list;
@@ -185,4 +181,16 @@ extern const struct ethtool_ops dpaa_ethtool_ops;
/* from dpaa_eth_sysfs.c */
void dpaa_eth_sysfs_remove(struct device *dev);
void dpaa_eth_sysfs_init(struct device *dev);
+
+static inline size_t dpaa_num_txqs_per_tc(void)
+{
+ return num_possible_cpus();
+}
+
+/* Total number of Tx queues */
+static inline size_t dpaa_max_num_txqs(void)
+{
+ return DPAA_TC_NUM * dpaa_num_txqs_per_tc();
+}
+
#endif /* __DPAA_H */
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c
index 4fee74c024bd..aad470e9caea 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c
@@ -35,7 +35,6 @@ static ssize_t dpaa_eth_show_fqids(struct device *dev,
u32 last_fqid = 0;
ssize_t bytes = 0;
char *str;
- int i = 0;
list_for_each_entry_safe(fq, tmp, &priv->dpaa_fq_list, list) {
switch (fq->fq_type) {
@@ -85,7 +84,6 @@ static ssize_t dpaa_eth_show_fqids(struct device *dev,
prev = fq;
prevstr = str;
- i++;
}
if (prev) {
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
index 5bd0b36d1feb..b0060cf96090 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
@@ -394,7 +394,7 @@ static int dpaa_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
}
static int dpaa_get_ts_info(struct net_device *net_dev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct device *dev = net_dev->dev.parent;
struct device_node *mac_node = dev->of_node;
@@ -457,12 +457,16 @@ static int dpaa_set_coalesce(struct net_device *dev,
struct netlink_ext_ack *extack)
{
const cpumask_t *cpus = qman_affine_cpus();
- bool needs_revert[NR_CPUS] = {false};
struct qman_portal *portal;
u32 period, prev_period;
u8 thresh, prev_thresh;
+ bool *needs_revert;
int cpu, res;
+ needs_revert = kcalloc(num_possible_cpus(), sizeof(bool), GFP_KERNEL);
+ if (!needs_revert)
+ return -ENOMEM;
+
period = c->rx_coalesce_usecs;
thresh = c->rx_max_coalesced_frames;
@@ -485,6 +489,8 @@ static int dpaa_set_coalesce(struct net_device *dev,
needs_revert[cpu] = true;
}
+ kfree(needs_revert);
+
return 0;
revert_values:
@@ -498,6 +504,8 @@ revert_values:
qman_dqrr_set_ithresh(portal, prev_thresh);
}
+ kfree(needs_revert);
+
return res;
}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
index e80e9388c71f..7f476519b7ad 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
@@ -794,7 +794,7 @@ int dpaa2_phc_index = -1;
EXPORT_SYMBOL(dpaa2_phc_index);
static int dpaa2_eth_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
if (!dpaa2_ptp)
return ethtool_op_get_ts_info(dev, info);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
index f7753ea5b57e..5e684b23c5f5 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
@@ -841,7 +841,7 @@ static int enetc_set_coalesce(struct net_device *ndev,
}
static int enetc_get_ts_info(struct net_device *ndev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
int *phc_idx;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index a72d8a2eb0b3..a923cb95cdc6 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1361,6 +1361,12 @@ fec_stop(struct net_device *ndev)
writel(FEC_ECR_ETHEREN, fep->hwp + FEC_ECNTRL);
writel(rmii_mode, fep->hwp + FEC_R_CNTRL);
}
+
+ if (fep->bufdesc_ex) {
+ val = readl(fep->hwp + FEC_ECNTRL);
+ val |= FEC_ECR_EN1588;
+ writel(val, fep->hwp + FEC_ECNTRL);
+ }
}
static void
@@ -2762,7 +2768,7 @@ static void fec_enet_get_regs(struct net_device *ndev,
}
static int fec_enet_get_ts_info(struct net_device *ndev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct fec_enet_private *fep = netdev_priv(ndev);
@@ -4130,6 +4136,14 @@ free_queue_mem:
return ret;
}
+static void fec_enet_deinit(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ netif_napi_del(&fep->napi);
+ fec_enet_free_queue(ndev);
+}
+
#ifdef CONFIG_OF
static int fec_reset_phy(struct platform_device *pdev)
{
@@ -4524,6 +4538,7 @@ failed_register:
fec_enet_mii_remove(fep);
failed_mii_init:
failed_irq:
+ fec_enet_deinit(ndev);
failed_init:
fec_ptp_stop(pdev);
failed_reset:
@@ -4587,6 +4602,7 @@ fec_drv_remove(struct platform_device *pdev)
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_disable(&pdev->dev);
+ fec_enet_deinit(ndev);
free_netdev(ndev);
}
diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c
index 92b8f4ab26f1..796e6f4e583d 100644
--- a/drivers/net/ethernet/freescale/fman/fman_memac.c
+++ b/drivers/net/ethernet/freescale/fman/fman_memac.c
@@ -1066,7 +1066,6 @@ int memac_initialization(struct mac_device *mac_dev,
struct fman_mac_params *params)
{
int err;
- struct device_node *fixed;
struct phylink_pcs *pcs;
struct fman_mac *memac;
unsigned long capabilities;
@@ -1222,18 +1221,15 @@ int memac_initialization(struct mac_device *mac_dev,
memac->rgmii_no_half_duplex = true;
/* Most boards should use MLO_AN_INBAND, but existing boards don't have
- * a managed property. Default to MLO_AN_INBAND if nothing else is
- * specified. We need to be careful and not enable this if we have a
- * fixed link or if we are using MII or RGMII, since those
- * configurations modes don't use in-band autonegotiation.
+ * a managed property. Default to MLO_AN_INBAND rather than MLO_AN_PHY.
+ * Phylink will allow this to be overriden by a fixed link. We need to
+ * be careful and not enable this if we are using MII or RGMII, since
+ * those configurations modes don't use in-band autonegotiation.
*/
- fixed = of_get_child_by_name(mac_node, "fixed-link");
- if (!fixed && !of_property_read_bool(mac_node, "fixed-link") &&
- !of_property_read_bool(mac_node, "managed") &&
+ if (!of_property_read_bool(mac_node, "managed") &&
mac_dev->phy_if != PHY_INTERFACE_MODE_MII &&
!phy_interface_mode_is_rgmii(mac_dev->phy_if))
- mac_dev->phylink_config.ovr_an_inband = true;
- of_node_put(fixed);
+ mac_dev->phylink_config.default_an_inband = true;
err = memac_init(mac_dev->fman_mac);
if (err < 0)
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 7a15b9245698..f581402ad740 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -1448,7 +1448,7 @@ static int gfar_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
}
static int gfar_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct gfar_private *priv = netdev_priv(dev);
struct platform_device *ptp_dev;
diff --git a/drivers/net/ethernet/fungible/funeth/funeth_ethtool.c b/drivers/net/ethernet/fungible/funeth/funeth_ethtool.c
index 4edd0adfc6c7..7f081e6e8c87 100644
--- a/drivers/net/ethernet/fungible/funeth/funeth_ethtool.c
+++ b/drivers/net/ethernet/fungible/funeth/funeth_ethtool.c
@@ -1040,7 +1040,7 @@ static int fun_set_rxfh(struct net_device *netdev,
}
static int fun_get_ts_info(struct net_device *netdev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
diff --git a/drivers/net/ethernet/google/gve/Makefile b/drivers/net/ethernet/google/gve/Makefile
index b9a6be76531b..9ed07080b38a 100644
--- a/drivers/net/ethernet/google/gve/Makefile
+++ b/drivers/net/ethernet/google/gve/Makefile
@@ -1,4 +1,4 @@
# Makefile for the Google virtual Ethernet (gve) driver
obj-$(CONFIG_GVE) += gve.o
-gve-objs := gve_main.o gve_tx.o gve_tx_dqo.o gve_rx.o gve_rx_dqo.o gve_ethtool.o gve_adminq.o gve_utils.o
+gve-objs := gve_main.o gve_tx.o gve_tx_dqo.o gve_rx.o gve_rx_dqo.o gve_ethtool.o gve_adminq.o gve_utils.o gve_flow_rule.o
diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h
index ae1e21c9b0a5..84ac004d3953 100644
--- a/drivers/net/ethernet/google/gve/gve.h
+++ b/drivers/net/ethernet/google/gve/gve.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: (GPL-2.0 OR MIT)
* Google virtual Ethernet (gve) driver
*
- * Copyright (C) 2015-2021 Google, Inc.
+ * Copyright (C) 2015-2024 Google LLC
*/
#ifndef _GVE_H_
@@ -60,6 +60,11 @@
#define GVE_DEFAULT_RX_BUFFER_OFFSET 2048
+#define GVE_FLOW_RULES_CACHE_SIZE \
+ (GVE_ADMINQ_BUFFER_SIZE / sizeof(struct gve_adminq_queried_flow_rule))
+#define GVE_FLOW_RULE_IDS_CACHE_SIZE \
+ (GVE_ADMINQ_BUFFER_SIZE / sizeof(((struct gve_adminq_queried_flow_rule *)0)->location))
+
#define GVE_XDP_ACTIONS 5
#define GVE_GQ_TX_MIN_PKT_DESC_BYTES 182
@@ -678,6 +683,39 @@ enum gve_queue_format {
GVE_DQO_QPL_FORMAT = 0x4,
};
+struct gve_flow_spec {
+ __be32 src_ip[4];
+ __be32 dst_ip[4];
+ union {
+ struct {
+ __be16 src_port;
+ __be16 dst_port;
+ };
+ __be32 spi;
+ };
+ union {
+ u8 tos;
+ u8 tclass;
+ };
+};
+
+struct gve_flow_rule {
+ u32 location;
+ u16 flow_type;
+ u16 action;
+ struct gve_flow_spec key;
+ struct gve_flow_spec mask;
+};
+
+struct gve_flow_rules_cache {
+ bool rules_cache_synced; /* False if the driver's rules_cache is outdated */
+ struct gve_adminq_queried_flow_rule *rules_cache;
+ __be32 *rule_ids_cache;
+ /* The total number of queried rules that stored in the caches */
+ u32 rules_cache_num;
+ u32 rule_ids_cache_num;
+};
+
struct gve_priv {
struct net_device *dev;
struct gve_tx_ring *tx; /* array of tx_cfg.num_queues */
@@ -724,6 +762,7 @@ struct gve_priv {
union gve_adminq_command *adminq;
dma_addr_t adminq_bus_addr;
struct dma_pool *adminq_pool;
+ struct mutex adminq_lock; /* Protects adminq command execution */
u32 adminq_mask; /* masks prod_cnt to adminq size */
u32 adminq_prod_cnt; /* free-running count of AQ cmds executed */
u32 adminq_cmd_fail; /* free-running count of AQ cmds failed */
@@ -743,6 +782,8 @@ struct gve_priv {
u32 adminq_report_link_speed_cnt;
u32 adminq_get_ptype_map_cnt;
u32 adminq_verify_driver_compatibility_cnt;
+ u32 adminq_query_flow_rules_cnt;
+ u32 adminq_cfg_flow_rule_cnt;
/* Global stats */
u32 interface_up_cnt; /* count of times interface turned up since last reset */
@@ -785,6 +826,11 @@ struct gve_priv {
u16 header_buf_size; /* device configured, header-split supported if non-zero */
bool header_split_enabled; /* True if the header split is enabled by the user */
+
+ u32 max_flow_rules;
+ u32 num_flow_rules;
+
+ struct gve_flow_rules_cache flow_rules_cache;
};
enum gve_service_task_flags_bit {
@@ -1124,6 +1170,12 @@ int gve_adjust_config(struct gve_priv *priv,
int gve_adjust_queues(struct gve_priv *priv,
struct gve_queue_config new_rx_config,
struct gve_queue_config new_tx_config);
+/* flow steering rule */
+int gve_get_flow_rule_entry(struct gve_priv *priv, struct ethtool_rxnfc *cmd);
+int gve_get_flow_rule_ids(struct gve_priv *priv, struct ethtool_rxnfc *cmd, u32 *rule_locs);
+int gve_add_flow_rule(struct gve_priv *priv, struct ethtool_rxnfc *cmd);
+int gve_del_flow_rule(struct gve_priv *priv, struct ethtool_rxnfc *cmd);
+int gve_flow_rules_reset(struct gve_priv *priv);
/* report stats handling */
void gve_handle_report_stats(struct gve_priv *priv);
/* exported by ethtool.c */
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c
index 8ca0def176ef..c5bbc1b7524e 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.c
+++ b/drivers/net/ethernet/google/gve/gve_adminq.c
@@ -44,6 +44,7 @@ void gve_parse_device_option(struct gve_priv *priv,
struct gve_device_option_jumbo_frames **dev_op_jumbo_frames,
struct gve_device_option_dqo_qpl **dev_op_dqo_qpl,
struct gve_device_option_buffer_sizes **dev_op_buffer_sizes,
+ struct gve_device_option_flow_steering **dev_op_flow_steering,
struct gve_device_option_modify_ring **dev_op_modify_ring)
{
u32 req_feat_mask = be32_to_cpu(option->required_features_mask);
@@ -189,6 +190,23 @@ void gve_parse_device_option(struct gve_priv *priv,
if (option_length == GVE_DEVICE_OPTION_NO_MIN_RING_SIZE)
priv->default_min_ring_size = true;
break;
+ case GVE_DEV_OPT_ID_FLOW_STEERING:
+ if (option_length < sizeof(**dev_op_flow_steering) ||
+ req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_FLOW_STEERING) {
+ dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
+ "Flow Steering",
+ (int)sizeof(**dev_op_flow_steering),
+ GVE_DEV_OPT_REQ_FEAT_MASK_FLOW_STEERING,
+ option_length, req_feat_mask);
+ break;
+ }
+
+ if (option_length > sizeof(**dev_op_flow_steering))
+ dev_warn(&priv->pdev->dev,
+ GVE_DEVICE_OPTION_TOO_BIG_FMT,
+ "Flow Steering");
+ *dev_op_flow_steering = (void *)(option + 1);
+ break;
default:
/* If we don't recognize the option just continue
* without doing anything.
@@ -208,6 +226,7 @@ gve_process_device_options(struct gve_priv *priv,
struct gve_device_option_jumbo_frames **dev_op_jumbo_frames,
struct gve_device_option_dqo_qpl **dev_op_dqo_qpl,
struct gve_device_option_buffer_sizes **dev_op_buffer_sizes,
+ struct gve_device_option_flow_steering **dev_op_flow_steering,
struct gve_device_option_modify_ring **dev_op_modify_ring)
{
const int num_options = be16_to_cpu(descriptor->num_device_options);
@@ -230,7 +249,7 @@ gve_process_device_options(struct gve_priv *priv,
dev_op_gqi_rda, dev_op_gqi_qpl,
dev_op_dqo_rda, dev_op_jumbo_frames,
dev_op_dqo_qpl, dev_op_buffer_sizes,
- dev_op_modify_ring);
+ dev_op_flow_steering, dev_op_modify_ring);
dev_opt = next_opt;
}
@@ -268,6 +287,8 @@ int gve_adminq_alloc(struct device *dev, struct gve_priv *priv)
priv->adminq_report_stats_cnt = 0;
priv->adminq_report_link_speed_cnt = 0;
priv->adminq_get_ptype_map_cnt = 0;
+ priv->adminq_query_flow_rules_cnt = 0;
+ priv->adminq_cfg_flow_rule_cnt = 0;
/* Setup Admin queue with the device */
if (priv->pdev->revision < 0x1) {
@@ -284,6 +305,7 @@ int gve_adminq_alloc(struct device *dev, struct gve_priv *priv)
&priv->reg_bar0->adminq_base_address_lo);
iowrite32be(GVE_DRIVER_STATUS_RUN_MASK, &priv->reg_bar0->driver_status);
}
+ mutex_init(&priv->adminq_lock);
gve_set_admin_queue_ok(priv);
return 0;
}
@@ -460,6 +482,8 @@ static int gve_adminq_issue_cmd(struct gve_priv *priv,
memcpy(cmd, cmd_orig, sizeof(*cmd_orig));
opcode = be32_to_cpu(READ_ONCE(cmd->opcode));
+ if (opcode == GVE_ADMINQ_EXTENDED_COMMAND)
+ opcode = be32_to_cpu(cmd->extended_command.inner_opcode);
switch (opcode) {
case GVE_ADMINQ_DESCRIBE_DEVICE:
@@ -504,6 +528,12 @@ static int gve_adminq_issue_cmd(struct gve_priv *priv,
case GVE_ADMINQ_VERIFY_DRIVER_COMPATIBILITY:
priv->adminq_verify_driver_compatibility_cnt++;
break;
+ case GVE_ADMINQ_QUERY_FLOW_RULES:
+ priv->adminq_query_flow_rules_cnt++;
+ break;
+ case GVE_ADMINQ_CONFIGURE_FLOW_RULE:
+ priv->adminq_cfg_flow_rule_cnt++;
+ break;
default:
dev_err(&priv->pdev->dev, "unknown AQ command opcode %d\n", opcode);
}
@@ -511,28 +541,58 @@ static int gve_adminq_issue_cmd(struct gve_priv *priv,
return 0;
}
-/* This function is not threadsafe - the caller is responsible for any
- * necessary locks.
- * The caller is also responsible for making sure there are no commands
- * waiting to be executed.
- */
static int gve_adminq_execute_cmd(struct gve_priv *priv,
union gve_adminq_command *cmd_orig)
{
u32 tail, head;
int err;
+ mutex_lock(&priv->adminq_lock);
tail = ioread32be(&priv->reg_bar0->adminq_event_counter);
head = priv->adminq_prod_cnt;
- if (tail != head)
- // This is not a valid path
- return -EINVAL;
+ if (tail != head) {
+ err = -EINVAL;
+ goto out;
+ }
err = gve_adminq_issue_cmd(priv, cmd_orig);
if (err)
- return err;
+ goto out;
- return gve_adminq_kick_and_wait(priv);
+ err = gve_adminq_kick_and_wait(priv);
+
+out:
+ mutex_unlock(&priv->adminq_lock);
+ return err;
+}
+
+static int gve_adminq_execute_extended_cmd(struct gve_priv *priv, u32 opcode,
+ size_t cmd_size, void *cmd_orig)
+{
+ union gve_adminq_command cmd;
+ dma_addr_t inner_cmd_bus;
+ void *inner_cmd;
+ int err;
+
+ inner_cmd = dma_alloc_coherent(&priv->pdev->dev, cmd_size,
+ &inner_cmd_bus, GFP_KERNEL);
+ if (!inner_cmd)
+ return -ENOMEM;
+
+ memcpy(inner_cmd, cmd_orig, cmd_size);
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.opcode = cpu_to_be32(GVE_ADMINQ_EXTENDED_COMMAND);
+ cmd.extended_command = (struct gve_adminq_extended_command) {
+ .inner_opcode = cpu_to_be32(opcode),
+ .inner_length = cpu_to_be32(cmd_size),
+ .inner_command_addr = cpu_to_be64(inner_cmd_bus),
+ };
+
+ err = gve_adminq_execute_cmd(priv, &cmd);
+
+ dma_free_coherent(&priv->pdev->dev, cmd_size, inner_cmd, inner_cmd_bus);
+ return err;
}
/* The device specifies that the management vector can either be the first irq
@@ -805,6 +865,8 @@ static void gve_enable_supported_features(struct gve_priv *priv,
*dev_op_dqo_qpl,
const struct gve_device_option_buffer_sizes
*dev_op_buffer_sizes,
+ const struct gve_device_option_flow_steering
+ *dev_op_flow_steering,
const struct gve_device_option_modify_ring
*dev_op_modify_ring)
{
@@ -857,10 +919,23 @@ static void gve_enable_supported_features(struct gve_priv *priv,
priv->min_tx_desc_cnt = be16_to_cpu(dev_op_modify_ring->min_tx_ring_size);
}
}
+
+ if (dev_op_flow_steering &&
+ (supported_features_mask & GVE_SUP_FLOW_STEERING_MASK)) {
+ if (dev_op_flow_steering->max_flow_rules) {
+ priv->max_flow_rules =
+ be32_to_cpu(dev_op_flow_steering->max_flow_rules);
+ priv->dev->hw_features |= NETIF_F_NTUPLE;
+ dev_info(&priv->pdev->dev,
+ "FLOW STEERING device option enabled with max rule limit of %u.\n",
+ priv->max_flow_rules);
+ }
+ }
}
int gve_adminq_describe_device(struct gve_priv *priv)
{
+ struct gve_device_option_flow_steering *dev_op_flow_steering = NULL;
struct gve_device_option_buffer_sizes *dev_op_buffer_sizes = NULL;
struct gve_device_option_jumbo_frames *dev_op_jumbo_frames = NULL;
struct gve_device_option_modify_ring *dev_op_modify_ring = NULL;
@@ -897,6 +972,7 @@ int gve_adminq_describe_device(struct gve_priv *priv)
&dev_op_gqi_qpl, &dev_op_dqo_rda,
&dev_op_jumbo_frames, &dev_op_dqo_qpl,
&dev_op_buffer_sizes,
+ &dev_op_flow_steering,
&dev_op_modify_ring);
if (err)
goto free_device_descriptor;
@@ -958,7 +1034,8 @@ int gve_adminq_describe_device(struct gve_priv *priv)
gve_enable_supported_features(priv, supported_features_mask,
dev_op_jumbo_frames, dev_op_dqo_qpl,
- dev_op_buffer_sizes, dev_op_modify_ring);
+ dev_op_buffer_sizes, dev_op_flow_steering,
+ dev_op_modify_ring);
free_device_descriptor:
dma_pool_free(priv->adminq_pool, descriptor, descriptor_bus);
@@ -1121,3 +1198,130 @@ err:
ptype_map_bus);
return err;
}
+
+static int
+gve_adminq_configure_flow_rule(struct gve_priv *priv,
+ struct gve_adminq_configure_flow_rule *flow_rule_cmd)
+{
+ int err = gve_adminq_execute_extended_cmd(priv,
+ GVE_ADMINQ_CONFIGURE_FLOW_RULE,
+ sizeof(struct gve_adminq_configure_flow_rule),
+ flow_rule_cmd);
+
+ if (err) {
+ dev_err(&priv->pdev->dev, "Timeout to configure the flow rule, trigger reset");
+ gve_reset(priv, true);
+ } else {
+ priv->flow_rules_cache.rules_cache_synced = false;
+ }
+
+ return err;
+}
+
+int gve_adminq_add_flow_rule(struct gve_priv *priv, struct gve_adminq_flow_rule *rule, u32 loc)
+{
+ struct gve_adminq_configure_flow_rule flow_rule_cmd = {
+ .opcode = cpu_to_be16(GVE_FLOW_RULE_CFG_ADD),
+ .location = cpu_to_be32(loc),
+ .rule = *rule,
+ };
+
+ return gve_adminq_configure_flow_rule(priv, &flow_rule_cmd);
+}
+
+int gve_adminq_del_flow_rule(struct gve_priv *priv, u32 loc)
+{
+ struct gve_adminq_configure_flow_rule flow_rule_cmd = {
+ .opcode = cpu_to_be16(GVE_FLOW_RULE_CFG_DEL),
+ .location = cpu_to_be32(loc),
+ };
+
+ return gve_adminq_configure_flow_rule(priv, &flow_rule_cmd);
+}
+
+int gve_adminq_reset_flow_rules(struct gve_priv *priv)
+{
+ struct gve_adminq_configure_flow_rule flow_rule_cmd = {
+ .opcode = cpu_to_be16(GVE_FLOW_RULE_CFG_RESET),
+ };
+
+ return gve_adminq_configure_flow_rule(priv, &flow_rule_cmd);
+}
+
+/* In the dma memory that the driver allocated for the device to query the flow rules, the device
+ * will first write it with a struct of gve_query_flow_rules_descriptor. Next to it, the device
+ * will write an array of rules or rule ids with the count that specified in the descriptor.
+ * For GVE_FLOW_RULE_QUERY_STATS, the device will only write the descriptor.
+ */
+static int gve_adminq_process_flow_rules_query(struct gve_priv *priv, u16 query_opcode,
+ struct gve_query_flow_rules_descriptor *descriptor)
+{
+ struct gve_flow_rules_cache *flow_rules_cache = &priv->flow_rules_cache;
+ u32 num_queried_rules, total_memory_len, rule_info_len;
+ void *rule_info;
+
+ total_memory_len = be32_to_cpu(descriptor->total_length);
+ num_queried_rules = be32_to_cpu(descriptor->num_queried_rules);
+ rule_info = (void *)(descriptor + 1);
+
+ switch (query_opcode) {
+ case GVE_FLOW_RULE_QUERY_RULES:
+ rule_info_len = num_queried_rules * sizeof(*flow_rules_cache->rules_cache);
+ if (sizeof(*descriptor) + rule_info_len != total_memory_len) {
+ dev_err(&priv->dev->dev, "flow rules query is out of memory.\n");
+ return -ENOMEM;
+ }
+
+ memcpy(flow_rules_cache->rules_cache, rule_info, rule_info_len);
+ flow_rules_cache->rules_cache_num = num_queried_rules;
+ break;
+ case GVE_FLOW_RULE_QUERY_IDS:
+ rule_info_len = num_queried_rules * sizeof(*flow_rules_cache->rule_ids_cache);
+ if (sizeof(*descriptor) + rule_info_len != total_memory_len) {
+ dev_err(&priv->dev->dev, "flow rule ids query is out of memory.\n");
+ return -ENOMEM;
+ }
+
+ memcpy(flow_rules_cache->rule_ids_cache, rule_info, rule_info_len);
+ flow_rules_cache->rule_ids_cache_num = num_queried_rules;
+ break;
+ case GVE_FLOW_RULE_QUERY_STATS:
+ priv->num_flow_rules = be32_to_cpu(descriptor->num_flow_rules);
+ priv->max_flow_rules = be32_to_cpu(descriptor->max_flow_rules);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int gve_adminq_query_flow_rules(struct gve_priv *priv, u16 query_opcode, u32 starting_loc)
+{
+ struct gve_query_flow_rules_descriptor *descriptor;
+ union gve_adminq_command cmd;
+ dma_addr_t descriptor_bus;
+ int err = 0;
+
+ memset(&cmd, 0, sizeof(cmd));
+ descriptor = dma_pool_alloc(priv->adminq_pool, GFP_KERNEL, &descriptor_bus);
+ if (!descriptor)
+ return -ENOMEM;
+
+ cmd.opcode = cpu_to_be32(GVE_ADMINQ_QUERY_FLOW_RULES);
+ cmd.query_flow_rules = (struct gve_adminq_query_flow_rules) {
+ .opcode = cpu_to_be16(query_opcode),
+ .starting_rule_id = cpu_to_be32(starting_loc),
+ .available_length = cpu_to_be64(GVE_ADMINQ_BUFFER_SIZE),
+ .rule_descriptor_addr = cpu_to_be64(descriptor_bus),
+ };
+ err = gve_adminq_execute_cmd(priv, &cmd);
+ if (err)
+ goto out;
+
+ err = gve_adminq_process_flow_rules_query(priv, query_opcode, descriptor);
+
+out:
+ dma_pool_free(priv->adminq_pool, descriptor, descriptor_bus);
+ return err;
+}
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.h b/drivers/net/ethernet/google/gve/gve_adminq.h
index e64f0dbe744d..ed1370c9b197 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.h
+++ b/drivers/net/ethernet/google/gve/gve_adminq.h
@@ -25,6 +25,19 @@ enum gve_adminq_opcodes {
GVE_ADMINQ_REPORT_LINK_SPEED = 0xD,
GVE_ADMINQ_GET_PTYPE_MAP = 0xE,
GVE_ADMINQ_VERIFY_DRIVER_COMPATIBILITY = 0xF,
+ GVE_ADMINQ_QUERY_FLOW_RULES = 0x10,
+
+ /* For commands that are larger than 56 bytes */
+ GVE_ADMINQ_EXTENDED_COMMAND = 0xFF,
+};
+
+/* The normal adminq command is restricted to be 56 bytes at maximum. For the
+ * longer adminq command, it is wrapped by GVE_ADMINQ_EXTENDED_COMMAND with
+ * inner opcode of gve_adminq_extended_cmd_opcodes specified. The inner command
+ * is written in the dma memory allocated by GVE_ADMINQ_EXTENDED_COMMAND.
+ */
+enum gve_adminq_extended_cmd_opcodes {
+ GVE_ADMINQ_CONFIGURE_FLOW_RULE = 0x101,
};
/* Admin queue status codes */
@@ -143,6 +156,14 @@ struct gve_device_option_modify_ring {
static_assert(sizeof(struct gve_device_option_modify_ring) == 12);
+struct gve_device_option_flow_steering {
+ __be32 supported_features_mask;
+ __be32 reserved;
+ __be32 max_flow_rules;
+};
+
+static_assert(sizeof(struct gve_device_option_flow_steering) == 12);
+
/* Terminology:
*
* RDA - Raw DMA Addressing - Buffers associated with SKBs are directly DMA
@@ -160,6 +181,7 @@ enum gve_dev_opt_id {
GVE_DEV_OPT_ID_DQO_QPL = 0x7,
GVE_DEV_OPT_ID_JUMBO_FRAMES = 0x8,
GVE_DEV_OPT_ID_BUFFER_SIZES = 0xa,
+ GVE_DEV_OPT_ID_FLOW_STEERING = 0xb,
};
enum gve_dev_opt_req_feat_mask {
@@ -171,12 +193,14 @@ enum gve_dev_opt_req_feat_mask {
GVE_DEV_OPT_REQ_FEAT_MASK_DQO_QPL = 0x0,
GVE_DEV_OPT_REQ_FEAT_MASK_BUFFER_SIZES = 0x0,
GVE_DEV_OPT_REQ_FEAT_MASK_MODIFY_RING = 0x0,
+ GVE_DEV_OPT_REQ_FEAT_MASK_FLOW_STEERING = 0x0,
};
enum gve_sup_feature_mask {
GVE_SUP_MODIFY_RING_MASK = 1 << 0,
GVE_SUP_JUMBO_FRAMES_MASK = 1 << 2,
GVE_SUP_BUFFER_SIZES_MASK = 1 << 4,
+ GVE_SUP_FLOW_STEERING_MASK = 1 << 5,
};
#define GVE_DEV_OPT_LEN_GQI_RAW_ADDRESSING 0x0
@@ -208,6 +232,14 @@ enum gve_driver_capbility {
#define GVE_DRIVER_CAPABILITY_FLAGS3 0x0
#define GVE_DRIVER_CAPABILITY_FLAGS4 0x0
+struct gve_adminq_extended_command {
+ __be32 inner_opcode;
+ __be32 inner_length;
+ __be64 inner_command_addr;
+};
+
+static_assert(sizeof(struct gve_adminq_extended_command) == 16);
+
struct gve_driver_info {
u8 os_type; /* 0x01 = Linux */
u8 driver_major;
@@ -412,6 +444,71 @@ struct gve_adminq_get_ptype_map {
__be64 ptype_map_addr;
};
+/* Flow-steering related definitions */
+enum gve_adminq_flow_rule_cfg_opcode {
+ GVE_FLOW_RULE_CFG_ADD = 0,
+ GVE_FLOW_RULE_CFG_DEL = 1,
+ GVE_FLOW_RULE_CFG_RESET = 2,
+};
+
+enum gve_adminq_flow_rule_query_opcode {
+ GVE_FLOW_RULE_QUERY_RULES = 0,
+ GVE_FLOW_RULE_QUERY_IDS = 1,
+ GVE_FLOW_RULE_QUERY_STATS = 2,
+};
+
+enum gve_adminq_flow_type {
+ GVE_FLOW_TYPE_TCPV4,
+ GVE_FLOW_TYPE_UDPV4,
+ GVE_FLOW_TYPE_SCTPV4,
+ GVE_FLOW_TYPE_AHV4,
+ GVE_FLOW_TYPE_ESPV4,
+ GVE_FLOW_TYPE_TCPV6,
+ GVE_FLOW_TYPE_UDPV6,
+ GVE_FLOW_TYPE_SCTPV6,
+ GVE_FLOW_TYPE_AHV6,
+ GVE_FLOW_TYPE_ESPV6,
+};
+
+/* Flow-steering command */
+struct gve_adminq_flow_rule {
+ __be16 flow_type;
+ __be16 action; /* RX queue id */
+ struct gve_flow_spec key;
+ struct gve_flow_spec mask;
+};
+
+struct gve_adminq_configure_flow_rule {
+ __be16 opcode;
+ u8 padding[2];
+ struct gve_adminq_flow_rule rule;
+ __be32 location;
+};
+
+static_assert(sizeof(struct gve_adminq_configure_flow_rule) == 92);
+
+struct gve_query_flow_rules_descriptor {
+ __be32 num_flow_rules;
+ __be32 max_flow_rules;
+ __be32 num_queried_rules;
+ __be32 total_length;
+};
+
+struct gve_adminq_queried_flow_rule {
+ __be32 location;
+ struct gve_adminq_flow_rule flow_rule;
+};
+
+struct gve_adminq_query_flow_rules {
+ __be16 opcode;
+ u8 padding[2];
+ __be32 starting_rule_id;
+ __be64 available_length; /* The dma memory length that the driver allocated */
+ __be64 rule_descriptor_addr; /* The dma memory address */
+};
+
+static_assert(sizeof(struct gve_adminq_query_flow_rules) == 24);
+
union gve_adminq_command {
struct {
__be32 opcode;
@@ -432,6 +529,8 @@ union gve_adminq_command {
struct gve_adminq_get_ptype_map get_ptype_map;
struct gve_adminq_verify_driver_compatibility
verify_driver_compatibility;
+ struct gve_adminq_query_flow_rules query_flow_rules;
+ struct gve_adminq_extended_command extended_command;
};
};
u8 reserved[64];
@@ -465,6 +564,10 @@ int gve_adminq_verify_driver_compatibility(struct gve_priv *priv,
u64 driver_info_len,
dma_addr_t driver_info_addr);
int gve_adminq_report_link_speed(struct gve_priv *priv);
+int gve_adminq_add_flow_rule(struct gve_priv *priv, struct gve_adminq_flow_rule *rule, u32 loc);
+int gve_adminq_del_flow_rule(struct gve_priv *priv, u32 loc);
+int gve_adminq_reset_flow_rules(struct gve_priv *priv);
+int gve_adminq_query_flow_rules(struct gve_priv *priv, u16 query_opcode, u32 starting_loc);
struct gve_ptype_lut;
int gve_adminq_get_ptype_map_dqo(struct gve_priv *priv,
diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c
index fe1741d482b4..3480ff5c7ed6 100644
--- a/drivers/net/ethernet/google/gve/gve_ethtool.c
+++ b/drivers/net/ethernet/google/gve/gve_ethtool.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: (GPL-2.0 OR MIT)
/* Google virtual Ethernet (gve) driver
*
- * Copyright (C) 2015-2021 Google, Inc.
+ * Copyright (C) 2015-2024 Google LLC
*/
#include <linux/rtnetlink.h>
@@ -74,7 +74,8 @@ static const char gve_gstrings_adminq_stats[][ETH_GSTRING_LEN] = {
"adminq_create_tx_queue_cnt", "adminq_create_rx_queue_cnt",
"adminq_destroy_tx_queue_cnt", "adminq_destroy_rx_queue_cnt",
"adminq_dcfg_device_resources_cnt", "adminq_set_driver_parameter_cnt",
- "adminq_report_stats_cnt", "adminq_report_link_speed_cnt", "adminq_get_ptype_map_cnt"
+ "adminq_report_stats_cnt", "adminq_report_link_speed_cnt", "adminq_get_ptype_map_cnt",
+ "adminq_query_flow_rules", "adminq_cfg_flow_rule",
};
static const char gve_gstrings_priv_flags[][ETH_GSTRING_LEN] = {
@@ -450,6 +451,8 @@ gve_get_ethtool_stats(struct net_device *netdev,
data[i++] = priv->adminq_report_stats_cnt;
data[i++] = priv->adminq_report_link_speed_cnt;
data[i++] = priv->adminq_get_ptype_map_cnt;
+ data[i++] = priv->adminq_query_flow_rules_cnt;
+ data[i++] = priv->adminq_cfg_flow_rule_cnt;
}
static void gve_get_channels(struct net_device *netdev,
@@ -772,6 +775,69 @@ static int gve_set_coalesce(struct net_device *netdev,
return 0;
}
+static int gve_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
+{
+ struct gve_priv *priv = netdev_priv(netdev);
+ int err = 0;
+
+ if (!(netdev->features & NETIF_F_NTUPLE))
+ return -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXCLSRLINS:
+ err = gve_add_flow_rule(priv, cmd);
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ err = gve_del_flow_rule(priv, cmd);
+ break;
+ case ETHTOOL_SRXFH:
+ err = -EOPNOTSUPP;
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static int gve_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, u32 *rule_locs)
+{
+ struct gve_priv *priv = netdev_priv(netdev);
+ int err = 0;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXRINGS:
+ cmd->data = priv->rx_cfg.num_queues;
+ break;
+ case ETHTOOL_GRXCLSRLCNT:
+ if (!priv->max_flow_rules)
+ return -EOPNOTSUPP;
+
+ err = gve_adminq_query_flow_rules(priv, GVE_FLOW_RULE_QUERY_STATS, 0);
+ if (err)
+ return err;
+
+ cmd->rule_cnt = priv->num_flow_rules;
+ cmd->data = priv->max_flow_rules;
+ break;
+ case ETHTOOL_GRXCLSRULE:
+ err = gve_get_flow_rule_entry(priv, cmd);
+ break;
+ case ETHTOOL_GRXCLSRLALL:
+ err = gve_get_flow_rule_ids(priv, cmd, (u32 *)rule_locs);
+ break;
+ case ETHTOOL_GRXFH:
+ err = -EOPNOTSUPP;
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
const struct ethtool_ops gve_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS,
.supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT,
@@ -783,6 +849,8 @@ const struct ethtool_ops gve_ethtool_ops = {
.get_msglevel = gve_get_msglevel,
.set_channels = gve_set_channels,
.get_channels = gve_get_channels,
+ .set_rxnfc = gve_set_rxnfc,
+ .get_rxnfc = gve_get_rxnfc,
.get_link = ethtool_op_get_link,
.get_coalesce = gve_get_coalesce,
.set_coalesce = gve_set_coalesce,
diff --git a/drivers/net/ethernet/google/gve/gve_flow_rule.c b/drivers/net/ethernet/google/gve/gve_flow_rule.c
new file mode 100644
index 000000000000..0bb8cd1876a3
--- /dev/null
+++ b/drivers/net/ethernet/google/gve/gve_flow_rule.c
@@ -0,0 +1,298 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/* Google virtual Ethernet (gve) driver
+ *
+ * Copyright (C) 2015-2024 Google LLC
+ */
+
+#include "gve.h"
+#include "gve_adminq.h"
+
+static
+int gve_fill_ethtool_flow_spec(struct ethtool_rx_flow_spec *fsp,
+ struct gve_adminq_queried_flow_rule *rule)
+{
+ struct gve_adminq_flow_rule *flow_rule = &rule->flow_rule;
+ static const u16 flow_type_lut[] = {
+ [GVE_FLOW_TYPE_TCPV4] = TCP_V4_FLOW,
+ [GVE_FLOW_TYPE_UDPV4] = UDP_V4_FLOW,
+ [GVE_FLOW_TYPE_SCTPV4] = SCTP_V4_FLOW,
+ [GVE_FLOW_TYPE_AHV4] = AH_V4_FLOW,
+ [GVE_FLOW_TYPE_ESPV4] = ESP_V4_FLOW,
+ [GVE_FLOW_TYPE_TCPV6] = TCP_V6_FLOW,
+ [GVE_FLOW_TYPE_UDPV6] = UDP_V6_FLOW,
+ [GVE_FLOW_TYPE_SCTPV6] = SCTP_V6_FLOW,
+ [GVE_FLOW_TYPE_AHV6] = AH_V6_FLOW,
+ [GVE_FLOW_TYPE_ESPV6] = ESP_V6_FLOW,
+ };
+
+ if (be16_to_cpu(flow_rule->flow_type) >= ARRAY_SIZE(flow_type_lut))
+ return -EINVAL;
+
+ fsp->flow_type = flow_type_lut[be16_to_cpu(flow_rule->flow_type)];
+
+ memset(&fsp->h_u, 0, sizeof(fsp->h_u));
+ memset(&fsp->h_ext, 0, sizeof(fsp->h_ext));
+ memset(&fsp->m_u, 0, sizeof(fsp->m_u));
+ memset(&fsp->m_ext, 0, sizeof(fsp->m_ext));
+
+ switch (fsp->flow_type) {
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ fsp->h_u.tcp_ip4_spec.ip4src = flow_rule->key.src_ip[0];
+ fsp->h_u.tcp_ip4_spec.ip4dst = flow_rule->key.dst_ip[0];
+ fsp->h_u.tcp_ip4_spec.psrc = flow_rule->key.src_port;
+ fsp->h_u.tcp_ip4_spec.pdst = flow_rule->key.dst_port;
+ fsp->h_u.tcp_ip4_spec.tos = flow_rule->key.tos;
+ fsp->m_u.tcp_ip4_spec.ip4src = flow_rule->mask.src_ip[0];
+ fsp->m_u.tcp_ip4_spec.ip4dst = flow_rule->mask.dst_ip[0];
+ fsp->m_u.tcp_ip4_spec.psrc = flow_rule->mask.src_port;
+ fsp->m_u.tcp_ip4_spec.pdst = flow_rule->mask.dst_port;
+ fsp->m_u.tcp_ip4_spec.tos = flow_rule->mask.tos;
+ break;
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ fsp->h_u.ah_ip4_spec.ip4src = flow_rule->key.src_ip[0];
+ fsp->h_u.ah_ip4_spec.ip4dst = flow_rule->key.dst_ip[0];
+ fsp->h_u.ah_ip4_spec.spi = flow_rule->key.spi;
+ fsp->h_u.ah_ip4_spec.tos = flow_rule->key.tos;
+ fsp->m_u.ah_ip4_spec.ip4src = flow_rule->mask.src_ip[0];
+ fsp->m_u.ah_ip4_spec.ip4dst = flow_rule->mask.dst_ip[0];
+ fsp->m_u.ah_ip4_spec.spi = flow_rule->mask.spi;
+ fsp->m_u.ah_ip4_spec.tos = flow_rule->mask.tos;
+ break;
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ memcpy(fsp->h_u.tcp_ip6_spec.ip6src, &flow_rule->key.src_ip,
+ sizeof(struct in6_addr));
+ memcpy(fsp->h_u.tcp_ip6_spec.ip6dst, &flow_rule->key.dst_ip,
+ sizeof(struct in6_addr));
+ fsp->h_u.tcp_ip6_spec.psrc = flow_rule->key.src_port;
+ fsp->h_u.tcp_ip6_spec.pdst = flow_rule->key.dst_port;
+ fsp->h_u.tcp_ip6_spec.tclass = flow_rule->key.tclass;
+ memcpy(fsp->m_u.tcp_ip6_spec.ip6src, &flow_rule->mask.src_ip,
+ sizeof(struct in6_addr));
+ memcpy(fsp->m_u.tcp_ip6_spec.ip6dst, &flow_rule->mask.dst_ip,
+ sizeof(struct in6_addr));
+ fsp->m_u.tcp_ip6_spec.psrc = flow_rule->mask.src_port;
+ fsp->m_u.tcp_ip6_spec.pdst = flow_rule->mask.dst_port;
+ fsp->m_u.tcp_ip6_spec.tclass = flow_rule->mask.tclass;
+ break;
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ memcpy(fsp->h_u.ah_ip6_spec.ip6src, &flow_rule->key.src_ip,
+ sizeof(struct in6_addr));
+ memcpy(fsp->h_u.ah_ip6_spec.ip6dst, &flow_rule->key.dst_ip,
+ sizeof(struct in6_addr));
+ fsp->h_u.ah_ip6_spec.spi = flow_rule->key.spi;
+ fsp->h_u.ah_ip6_spec.tclass = flow_rule->key.tclass;
+ memcpy(fsp->m_u.ah_ip6_spec.ip6src, &flow_rule->mask.src_ip,
+ sizeof(struct in6_addr));
+ memcpy(fsp->m_u.ah_ip6_spec.ip6dst, &flow_rule->mask.dst_ip,
+ sizeof(struct in6_addr));
+ fsp->m_u.ah_ip6_spec.spi = flow_rule->mask.spi;
+ fsp->m_u.ah_ip6_spec.tclass = flow_rule->mask.tclass;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ fsp->ring_cookie = be16_to_cpu(flow_rule->action);
+
+ return 0;
+}
+
+static int gve_generate_flow_rule(struct gve_priv *priv, struct ethtool_rx_flow_spec *fsp,
+ struct gve_adminq_flow_rule *rule)
+{
+ static const u16 flow_type_lut[] = {
+ [TCP_V4_FLOW] = GVE_FLOW_TYPE_TCPV4,
+ [UDP_V4_FLOW] = GVE_FLOW_TYPE_UDPV4,
+ [SCTP_V4_FLOW] = GVE_FLOW_TYPE_SCTPV4,
+ [AH_V4_FLOW] = GVE_FLOW_TYPE_AHV4,
+ [ESP_V4_FLOW] = GVE_FLOW_TYPE_ESPV4,
+ [TCP_V6_FLOW] = GVE_FLOW_TYPE_TCPV6,
+ [UDP_V6_FLOW] = GVE_FLOW_TYPE_UDPV6,
+ [SCTP_V6_FLOW] = GVE_FLOW_TYPE_SCTPV6,
+ [AH_V6_FLOW] = GVE_FLOW_TYPE_AHV6,
+ [ESP_V6_FLOW] = GVE_FLOW_TYPE_ESPV6,
+ };
+ u32 flow_type;
+
+ if (fsp->ring_cookie == RX_CLS_FLOW_DISC)
+ return -EOPNOTSUPP;
+
+ if (fsp->ring_cookie >= priv->rx_cfg.num_queues)
+ return -EINVAL;
+
+ rule->action = cpu_to_be16(fsp->ring_cookie);
+
+ flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
+ if (!flow_type || flow_type >= ARRAY_SIZE(flow_type_lut))
+ return -EINVAL;
+
+ rule->flow_type = cpu_to_be16(flow_type_lut[flow_type]);
+
+ switch (flow_type) {
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ rule->key.src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src;
+ rule->key.dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst;
+ rule->key.src_port = fsp->h_u.tcp_ip4_spec.psrc;
+ rule->key.dst_port = fsp->h_u.tcp_ip4_spec.pdst;
+ rule->mask.src_ip[0] = fsp->m_u.tcp_ip4_spec.ip4src;
+ rule->mask.dst_ip[0] = fsp->m_u.tcp_ip4_spec.ip4dst;
+ rule->mask.src_port = fsp->m_u.tcp_ip4_spec.psrc;
+ rule->mask.dst_port = fsp->m_u.tcp_ip4_spec.pdst;
+ break;
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ rule->key.src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src;
+ rule->key.dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst;
+ rule->key.spi = fsp->h_u.ah_ip4_spec.spi;
+ rule->mask.src_ip[0] = fsp->m_u.tcp_ip4_spec.ip4src;
+ rule->mask.dst_ip[0] = fsp->m_u.tcp_ip4_spec.ip4dst;
+ rule->mask.spi = fsp->m_u.ah_ip4_spec.spi;
+ break;
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ memcpy(&rule->key.src_ip, fsp->h_u.tcp_ip6_spec.ip6src,
+ sizeof(struct in6_addr));
+ memcpy(&rule->key.dst_ip, fsp->h_u.tcp_ip6_spec.ip6dst,
+ sizeof(struct in6_addr));
+ rule->key.src_port = fsp->h_u.tcp_ip6_spec.psrc;
+ rule->key.dst_port = fsp->h_u.tcp_ip6_spec.pdst;
+ memcpy(&rule->mask.src_ip, fsp->m_u.tcp_ip6_spec.ip6src,
+ sizeof(struct in6_addr));
+ memcpy(&rule->mask.dst_ip, fsp->m_u.tcp_ip6_spec.ip6dst,
+ sizeof(struct in6_addr));
+ rule->mask.src_port = fsp->m_u.tcp_ip6_spec.psrc;
+ rule->mask.dst_port = fsp->m_u.tcp_ip6_spec.pdst;
+ break;
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ memcpy(&rule->key.src_ip, fsp->h_u.usr_ip6_spec.ip6src,
+ sizeof(struct in6_addr));
+ memcpy(&rule->key.dst_ip, fsp->h_u.usr_ip6_spec.ip6dst,
+ sizeof(struct in6_addr));
+ rule->key.spi = fsp->h_u.ah_ip6_spec.spi;
+ memcpy(&rule->mask.src_ip, fsp->m_u.usr_ip6_spec.ip6src,
+ sizeof(struct in6_addr));
+ memcpy(&rule->mask.dst_ip, fsp->m_u.usr_ip6_spec.ip6dst,
+ sizeof(struct in6_addr));
+ rule->key.spi = fsp->h_u.ah_ip6_spec.spi;
+ break;
+ default:
+ /* not doing un-parsed flow types */
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int gve_get_flow_rule_entry(struct gve_priv *priv, struct ethtool_rxnfc *cmd)
+{
+ struct gve_adminq_queried_flow_rule *rules_cache = priv->flow_rules_cache.rules_cache;
+ struct ethtool_rx_flow_spec *fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
+ u32 *cache_num = &priv->flow_rules_cache.rules_cache_num;
+ struct gve_adminq_queried_flow_rule *rule = NULL;
+ int err = 0;
+ u32 i;
+
+ if (!priv->max_flow_rules)
+ return -EOPNOTSUPP;
+
+ if (!priv->flow_rules_cache.rules_cache_synced ||
+ fsp->location < be32_to_cpu(rules_cache[0].location) ||
+ fsp->location > be32_to_cpu(rules_cache[*cache_num - 1].location)) {
+ err = gve_adminq_query_flow_rules(priv, GVE_FLOW_RULE_QUERY_RULES, fsp->location);
+ if (err)
+ return err;
+
+ priv->flow_rules_cache.rules_cache_synced = true;
+ }
+
+ for (i = 0; i < *cache_num; i++) {
+ if (fsp->location == be32_to_cpu(rules_cache[i].location)) {
+ rule = &rules_cache[i];
+ break;
+ }
+ }
+
+ if (!rule)
+ return -EINVAL;
+
+ err = gve_fill_ethtool_flow_spec(fsp, rule);
+
+ return err;
+}
+
+int gve_get_flow_rule_ids(struct gve_priv *priv, struct ethtool_rxnfc *cmd, u32 *rule_locs)
+{
+ __be32 *rule_ids_cache = priv->flow_rules_cache.rule_ids_cache;
+ u32 *cache_num = &priv->flow_rules_cache.rule_ids_cache_num;
+ u32 starting_rule_id = 0;
+ u32 i = 0, j = 0;
+ int err = 0;
+
+ if (!priv->max_flow_rules)
+ return -EOPNOTSUPP;
+
+ do {
+ err = gve_adminq_query_flow_rules(priv, GVE_FLOW_RULE_QUERY_IDS,
+ starting_rule_id);
+ if (err)
+ return err;
+
+ for (i = 0; i < *cache_num; i++) {
+ if (j >= cmd->rule_cnt)
+ return -EMSGSIZE;
+
+ rule_locs[j++] = be32_to_cpu(rule_ids_cache[i]);
+ starting_rule_id = be32_to_cpu(rule_ids_cache[i]) + 1;
+ }
+ } while (*cache_num != 0);
+ cmd->data = priv->max_flow_rules;
+
+ return err;
+}
+
+int gve_add_flow_rule(struct gve_priv *priv, struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp = &cmd->fs;
+ struct gve_adminq_flow_rule *rule = NULL;
+ int err;
+
+ if (!priv->max_flow_rules)
+ return -EOPNOTSUPP;
+
+ rule = kvzalloc(sizeof(*rule), GFP_KERNEL);
+ if (!rule)
+ return -ENOMEM;
+
+ err = gve_generate_flow_rule(priv, fsp, rule);
+ if (err)
+ goto out;
+
+ err = gve_adminq_add_flow_rule(priv, rule, fsp->location);
+
+out:
+ kvfree(rule);
+ if (err)
+ dev_err(&priv->pdev->dev, "Failed to add the flow rule: %u", fsp->location);
+
+ return err;
+}
+
+int gve_del_flow_rule(struct gve_priv *priv, struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
+
+ if (!priv->max_flow_rules)
+ return -EOPNOTSUPP;
+
+ return gve_adminq_del_flow_rule(priv, fsp->location);
+}
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index cabf7d4bcecb..9744b426940e 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: (GPL-2.0 OR MIT)
/* Google virtual Ethernet (gve) driver
*
- * Copyright (C) 2015-2021 Google, Inc.
+ * Copyright (C) 2015-2024 Google LLC
*/
#include <linux/bpf.h>
@@ -141,6 +141,49 @@ static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s)
}
}
+static int gve_alloc_flow_rule_caches(struct gve_priv *priv)
+{
+ struct gve_flow_rules_cache *flow_rules_cache = &priv->flow_rules_cache;
+ int err = 0;
+
+ if (!priv->max_flow_rules)
+ return 0;
+
+ flow_rules_cache->rules_cache =
+ kvcalloc(GVE_FLOW_RULES_CACHE_SIZE, sizeof(*flow_rules_cache->rules_cache),
+ GFP_KERNEL);
+ if (!flow_rules_cache->rules_cache) {
+ dev_err(&priv->pdev->dev, "Cannot alloc flow rules cache\n");
+ return -ENOMEM;
+ }
+
+ flow_rules_cache->rule_ids_cache =
+ kvcalloc(GVE_FLOW_RULE_IDS_CACHE_SIZE, sizeof(*flow_rules_cache->rule_ids_cache),
+ GFP_KERNEL);
+ if (!flow_rules_cache->rule_ids_cache) {
+ dev_err(&priv->pdev->dev, "Cannot alloc flow rule ids cache\n");
+ err = -ENOMEM;
+ goto free_rules_cache;
+ }
+
+ return 0;
+
+free_rules_cache:
+ kvfree(flow_rules_cache->rules_cache);
+ flow_rules_cache->rules_cache = NULL;
+ return err;
+}
+
+static void gve_free_flow_rule_caches(struct gve_priv *priv)
+{
+ struct gve_flow_rules_cache *flow_rules_cache = &priv->flow_rules_cache;
+
+ kvfree(flow_rules_cache->rule_ids_cache);
+ flow_rules_cache->rule_ids_cache = NULL;
+ kvfree(flow_rules_cache->rules_cache);
+ flow_rules_cache->rules_cache = NULL;
+}
+
static int gve_alloc_counter_array(struct gve_priv *priv)
{
priv->counter_array =
@@ -521,9 +564,12 @@ static int gve_setup_device_resources(struct gve_priv *priv)
{
int err;
- err = gve_alloc_counter_array(priv);
+ err = gve_alloc_flow_rule_caches(priv);
if (err)
return err;
+ err = gve_alloc_counter_array(priv);
+ if (err)
+ goto abort_with_flow_rule_caches;
err = gve_alloc_notify_blocks(priv);
if (err)
goto abort_with_counter;
@@ -575,6 +621,8 @@ abort_with_ntfy_blocks:
gve_free_notify_blocks(priv);
abort_with_counter:
gve_free_counter_array(priv);
+abort_with_flow_rule_caches:
+ gve_free_flow_rule_caches(priv);
return err;
}
@@ -587,6 +635,12 @@ static void gve_teardown_device_resources(struct gve_priv *priv)
/* Tell device its resources are being freed */
if (gve_get_device_resources_ok(priv)) {
+ err = gve_flow_rules_reset(priv);
+ if (err) {
+ dev_err(&priv->pdev->dev,
+ "Failed to reset flow rules: err=%d\n", err);
+ gve_trigger_reset(priv);
+ }
/* detach the stats report */
err = gve_adminq_report_stats(priv, 0, 0x0, GVE_STATS_REPORT_TIMER_PERIOD);
if (err) {
@@ -606,6 +660,7 @@ static void gve_teardown_device_resources(struct gve_priv *priv)
kvfree(priv->ptype_lut_dqo);
priv->ptype_lut_dqo = NULL;
+ gve_free_flow_rule_caches(priv);
gve_free_counter_array(priv);
gve_free_notify_blocks(priv);
gve_free_stats_report(priv);
@@ -1730,6 +1785,14 @@ static int gve_xdp(struct net_device *dev, struct netdev_bpf *xdp)
}
}
+int gve_flow_rules_reset(struct gve_priv *priv)
+{
+ if (!priv->max_flow_rules)
+ return 0;
+
+ return gve_adminq_reset_flow_rules(priv);
+}
+
int gve_adjust_config(struct gve_priv *priv,
struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
@@ -2003,15 +2066,21 @@ static int gve_set_features(struct net_device *netdev,
netdev->features ^= NETIF_F_LRO;
if (netif_carrier_ok(netdev)) {
err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg);
- if (err) {
- /* Revert the change on error. */
- netdev->features = orig_features;
- return err;
- }
+ if (err)
+ goto revert_features;
}
}
+ if ((netdev->features & NETIF_F_NTUPLE) && !(features & NETIF_F_NTUPLE)) {
+ err = gve_flow_rules_reset(priv);
+ if (err)
+ goto revert_features;
+ }
return 0;
+
+revert_features:
+ netdev->features = orig_features;
+ return err;
}
static const struct net_device_ops gve_netdev_ops = {
diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
index c1c912de59c7..1154c1d8f66f 100644
--- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
@@ -647,11 +647,13 @@ static void gve_rx_skb_hash(struct sk_buff *skb,
skb_set_hash(skb, le32_to_cpu(compl_desc->hash), hash_type);
}
-static void gve_rx_free_skb(struct gve_rx_ring *rx)
+static void gve_rx_free_skb(struct napi_struct *napi, struct gve_rx_ring *rx)
{
if (!rx->ctx.skb_head)
return;
+ if (rx->ctx.skb_head == napi->skb)
+ napi->skb = NULL;
dev_kfree_skb_any(rx->ctx.skb_head);
rx->ctx.skb_head = NULL;
rx->ctx.skb_tail = NULL;
@@ -950,7 +952,7 @@ int gve_rx_poll_dqo(struct gve_notify_block *block, int budget)
err = gve_rx_dqo(napi, rx, compl_desc, complq->head, rx->q_num);
if (err < 0) {
- gve_rx_free_skb(rx);
+ gve_rx_free_skb(napi, rx);
u64_stats_update_begin(&rx->statss);
if (err == -ENOMEM)
rx->rx_skb_alloc_fail++;
@@ -993,7 +995,7 @@ int gve_rx_poll_dqo(struct gve_notify_block *block, int budget)
/* gve_rx_complete_skb() will consume skb if successful */
if (gve_rx_complete_skb(rx, napi, compl_desc, feat) != 0) {
- gve_rx_free_skb(rx);
+ gve_rx_free_skb(napi, rx);
u64_stats_update_begin(&rx->statss);
rx->rx_desc_err_dropped_pkt++;
u64_stats_update_end(&rx->statss);
diff --git a/drivers/net/ethernet/google/gve/gve_tx_dqo.c b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
index fe1b26a4d736..0b3cca3fc792 100644
--- a/drivers/net/ethernet/google/gve/gve_tx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
@@ -555,28 +555,18 @@ static int gve_prep_tso(struct sk_buff *skb)
if (unlikely(skb_shinfo(skb)->gso_size < GVE_TX_MIN_TSO_MSS_DQO))
return -1;
+ if (!(skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
+ return -EINVAL;
+
/* Needed because we will modify header. */
err = skb_cow_head(skb, 0);
if (err < 0)
return err;
tcp = tcp_hdr(skb);
-
- /* Remove payload length from checksum. */
paylen = skb->len - skb_transport_offset(skb);
-
- switch (skb_shinfo(skb)->gso_type) {
- case SKB_GSO_TCPV4:
- case SKB_GSO_TCPV6:
- csum_replace_by_diff(&tcp->check,
- (__force __wsum)htonl(paylen));
-
- /* Compute length of segmentation header. */
- header_len = skb_tcp_all_headers(skb);
- break;
- default:
- return -EINVAL;
- }
+ csum_replace_by_diff(&tcp->check, (__force __wsum)htonl(paylen));
+ header_len = skb_tcp_all_headers(skb);
if (unlikely(header_len > GVE_TX_MAX_HDR_SIZE_DQO))
return -EINVAL;
diff --git a/drivers/net/ethernet/hisilicon/hns3/Makefile b/drivers/net/ethernet/hisilicon/hns3/Makefile
index 8e9293e57bfd..e8af26da1fc1 100644
--- a/drivers/net/ethernet/hisilicon/hns3/Makefile
+++ b/drivers/net/ethernet/hisilicon/hns3/Makefile
@@ -15,15 +15,14 @@ hns3-objs = hns3_enet.o hns3_ethtool.o hns3_debugfs.o
hns3-$(CONFIG_HNS3_DCB) += hns3_dcbnl.o
-obj-$(CONFIG_HNS3_HCLGEVF) += hclgevf.o
+obj-$(CONFIG_HNS3_HCLGEVF) += hclgevf.o hclge-common.o
-hclgevf-objs = hns3vf/hclgevf_main.o hns3vf/hclgevf_mbx.o hns3vf/hclgevf_devlink.o hns3vf/hclgevf_regs.o \
- hns3_common/hclge_comm_cmd.o hns3_common/hclge_comm_rss.o hns3_common/hclge_comm_tqp_stats.o
+hclge-common-objs += hns3_common/hclge_comm_cmd.o hns3_common/hclge_comm_rss.o hns3_common/hclge_comm_tqp_stats.o
-obj-$(CONFIG_HNS3_HCLGE) += hclge.o
+hclgevf-objs = hns3vf/hclgevf_main.o hns3vf/hclgevf_mbx.o hns3vf/hclgevf_devlink.o hns3vf/hclgevf_regs.o
+
+obj-$(CONFIG_HNS3_HCLGE) += hclge.o hclge-common.o
hclge-objs = hns3pf/hclge_main.o hns3pf/hclge_mdio.o hns3pf/hclge_tm.o hns3pf/hclge_regs.o \
hns3pf/hclge_mbx.o hns3pf/hclge_err.o hns3pf/hclge_debugfs.o hns3pf/hclge_ptp.o hns3pf/hclge_devlink.o \
- hns3_common/hclge_comm_cmd.o hns3_common/hclge_comm_rss.o hns3_common/hclge_comm_tqp_stats.o
-
hclge-$(CONFIG_HNS3_DCB) += hns3pf/hclge_dcb.o
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 7cebb08bd320..27dbe367f3d3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -786,7 +786,7 @@ struct hnae3_ae_ops {
void (*get_rx_hwts)(struct hnae3_handle *handle, struct sk_buff *skb,
u32 nsec, u32 sec);
int (*get_ts_info)(struct hnae3_handle *handle,
- struct ethtool_ts_info *info);
+ struct kernel_ethtool_ts_info *info);
int (*get_link_diagnosis_info)(struct hnae3_handle *handle,
u32 *status_code);
void (*clean_vf_config)(struct hnae3_ae_dev *ae_dev, int num_vfs);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c
index ea40b594dbac..4ad4e8ab2f1f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c
@@ -48,6 +48,7 @@ void hclge_comm_cmd_reuse_desc(struct hclge_desc *desc, bool is_read)
else
desc->flag &= cpu_to_le16(~HCLGE_COMM_CMD_FLAG_WR);
}
+EXPORT_SYMBOL_GPL(hclge_comm_cmd_reuse_desc);
static void hclge_comm_set_default_capability(struct hnae3_ae_dev *ae_dev,
bool is_pf)
@@ -72,6 +73,7 @@ void hclge_comm_cmd_setup_basic_desc(struct hclge_desc *desc,
if (is_read)
desc->flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_WR);
}
+EXPORT_SYMBOL_GPL(hclge_comm_cmd_setup_basic_desc);
int hclge_comm_firmware_compat_config(struct hnae3_ae_dev *ae_dev,
struct hclge_comm_hw *hw, bool en)
@@ -517,6 +519,7 @@ int hclge_comm_cmd_send(struct hclge_comm_hw *hw, struct hclge_desc *desc,
return ret;
}
+EXPORT_SYMBOL_GPL(hclge_comm_cmd_send);
static void hclge_comm_cmd_uninit_regs(struct hclge_comm_hw *hw)
{
@@ -553,6 +556,7 @@ void hclge_comm_cmd_uninit(struct hnae3_ae_dev *ae_dev,
hclge_comm_free_cmd_desc(&cmdq->csq);
hclge_comm_free_cmd_desc(&cmdq->crq);
}
+EXPORT_SYMBOL_GPL(hclge_comm_cmd_uninit);
int hclge_comm_cmd_queue_init(struct pci_dev *pdev, struct hclge_comm_hw *hw)
{
@@ -591,6 +595,7 @@ err_csq:
hclge_comm_free_cmd_desc(&hw->cmq.csq);
return ret;
}
+EXPORT_SYMBOL_GPL(hclge_comm_cmd_queue_init);
void hclge_comm_cmd_init_ops(struct hclge_comm_hw *hw,
const struct hclge_comm_cmq_ops *ops)
@@ -602,6 +607,7 @@ void hclge_comm_cmd_init_ops(struct hclge_comm_hw *hw,
cmdq->ops.trace_cmd_get = ops->trace_cmd_get;
}
}
+EXPORT_SYMBOL_GPL(hclge_comm_cmd_init_ops);
int hclge_comm_cmd_init(struct hnae3_ae_dev *ae_dev, struct hclge_comm_hw *hw,
u32 *fw_version, bool is_pf,
@@ -672,3 +678,8 @@ err_cmd_init:
return ret;
}
+EXPORT_SYMBOL_GPL(hclge_comm_cmd_init);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("HNS3: Hisilicon Ethernet PF/VF Common Library");
+MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c
index b4ae2160aff4..4e2bb6556b1c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c
@@ -62,6 +62,7 @@ int hclge_comm_rss_init_cfg(struct hnae3_handle *nic,
return 0;
}
+EXPORT_SYMBOL_GPL(hclge_comm_rss_init_cfg);
void hclge_comm_get_rss_tc_info(u16 rss_size, u8 hw_tc_map, u16 *tc_offset,
u16 *tc_valid, u16 *tc_size)
@@ -78,6 +79,7 @@ void hclge_comm_get_rss_tc_info(u16 rss_size, u8 hw_tc_map, u16 *tc_offset,
tc_offset[i] = (hw_tc_map & BIT(i)) ? rss_size * i : 0;
}
}
+EXPORT_SYMBOL_GPL(hclge_comm_get_rss_tc_info);
int hclge_comm_set_rss_tc_mode(struct hclge_comm_hw *hw, u16 *tc_offset,
u16 *tc_valid, u16 *tc_size)
@@ -113,6 +115,7 @@ int hclge_comm_set_rss_tc_mode(struct hclge_comm_hw *hw, u16 *tc_offset,
return ret;
}
+EXPORT_SYMBOL_GPL(hclge_comm_set_rss_tc_mode);
int hclge_comm_set_rss_hash_key(struct hclge_comm_rss_cfg *rss_cfg,
struct hclge_comm_hw *hw, const u8 *key,
@@ -143,6 +146,7 @@ int hclge_comm_set_rss_hash_key(struct hclge_comm_rss_cfg *rss_cfg,
return 0;
}
+EXPORT_SYMBOL_GPL(hclge_comm_set_rss_hash_key);
int hclge_comm_set_rss_tuple(struct hnae3_ae_dev *ae_dev,
struct hclge_comm_hw *hw,
@@ -185,11 +189,13 @@ int hclge_comm_set_rss_tuple(struct hnae3_ae_dev *ae_dev,
rss_cfg->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
return 0;
}
+EXPORT_SYMBOL_GPL(hclge_comm_set_rss_tuple);
u32 hclge_comm_get_rss_key_size(struct hnae3_handle *handle)
{
return HCLGE_COMM_RSS_KEY_SIZE;
}
+EXPORT_SYMBOL_GPL(hclge_comm_get_rss_key_size);
int hclge_comm_parse_rss_hfunc(struct hclge_comm_rss_cfg *rss_cfg,
const u8 hfunc, u8 *hash_algo)
@@ -217,6 +223,7 @@ void hclge_comm_rss_indir_init_cfg(struct hnae3_ae_dev *ae_dev,
for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
rss_cfg->rss_indirection_tbl[i] = i % rss_cfg->rss_size;
}
+EXPORT_SYMBOL_GPL(hclge_comm_rss_indir_init_cfg);
int hclge_comm_get_rss_tuple(struct hclge_comm_rss_cfg *rss_cfg, int flow_type,
u8 *tuple_sets)
@@ -250,6 +257,7 @@ int hclge_comm_get_rss_tuple(struct hclge_comm_rss_cfg *rss_cfg, int flow_type,
return 0;
}
+EXPORT_SYMBOL_GPL(hclge_comm_get_rss_tuple);
static void
hclge_comm_append_rss_msb_info(struct hclge_comm_rss_ind_tbl_cmd *req,
@@ -304,6 +312,7 @@ int hclge_comm_set_rss_indir_table(struct hnae3_ae_dev *ae_dev,
}
return 0;
}
+EXPORT_SYMBOL_GPL(hclge_comm_set_rss_indir_table);
int hclge_comm_set_rss_input_tuple(struct hclge_comm_hw *hw,
struct hclge_comm_rss_cfg *rss_cfg)
@@ -332,6 +341,7 @@ int hclge_comm_set_rss_input_tuple(struct hclge_comm_hw *hw,
"failed to configure rss input, ret = %d.\n", ret);
return ret;
}
+EXPORT_SYMBOL_GPL(hclge_comm_set_rss_input_tuple);
void hclge_comm_get_rss_hash_info(struct hclge_comm_rss_cfg *rss_cfg, u8 *key,
u8 *hfunc)
@@ -355,6 +365,7 @@ void hclge_comm_get_rss_hash_info(struct hclge_comm_rss_cfg *rss_cfg, u8 *key,
if (key)
memcpy(key, rss_cfg->rss_hash_key, HCLGE_COMM_RSS_KEY_SIZE);
}
+EXPORT_SYMBOL_GPL(hclge_comm_get_rss_hash_info);
void hclge_comm_get_rss_indir_tbl(struct hclge_comm_rss_cfg *rss_cfg,
u32 *indir, u16 rss_ind_tbl_size)
@@ -367,6 +378,7 @@ void hclge_comm_get_rss_indir_tbl(struct hclge_comm_rss_cfg *rss_cfg,
for (i = 0; i < rss_ind_tbl_size; i++)
indir[i] = rss_cfg->rss_indirection_tbl[i];
}
+EXPORT_SYMBOL_GPL(hclge_comm_get_rss_indir_tbl);
int hclge_comm_set_rss_algo_key(struct hclge_comm_hw *hw, const u8 hfunc,
const u8 *key)
@@ -408,6 +420,7 @@ int hclge_comm_set_rss_algo_key(struct hclge_comm_hw *hw, const u8 hfunc,
return 0;
}
+EXPORT_SYMBOL_GPL(hclge_comm_set_rss_algo_key);
static u8 hclge_comm_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
{
@@ -502,3 +515,4 @@ u64 hclge_comm_convert_rss_tuple(u8 tuple_sets)
return tuple_data;
}
+EXPORT_SYMBOL_GPL(hclge_comm_convert_rss_tuple);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c
index 618f66d9586b..2b31188ff555 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c
@@ -26,6 +26,7 @@ u64 *hclge_comm_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
return buff;
}
+EXPORT_SYMBOL_GPL(hclge_comm_tqps_get_stats);
int hclge_comm_tqps_get_sset_count(struct hnae3_handle *handle)
{
@@ -33,6 +34,7 @@ int hclge_comm_tqps_get_sset_count(struct hnae3_handle *handle)
return kinfo->num_tqps * HCLGE_COMM_QUEUE_PAIR_SIZE;
}
+EXPORT_SYMBOL_GPL(hclge_comm_tqps_get_sset_count);
u8 *hclge_comm_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
{
@@ -56,6 +58,7 @@ u8 *hclge_comm_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
return buff;
}
+EXPORT_SYMBOL_GPL(hclge_comm_tqps_get_strings);
int hclge_comm_tqps_update_stats(struct hnae3_handle *handle,
struct hclge_comm_hw *hw)
@@ -99,6 +102,7 @@ int hclge_comm_tqps_update_stats(struct hnae3_handle *handle,
return 0;
}
+EXPORT_SYMBOL_GPL(hclge_comm_tqps_update_stats);
void hclge_comm_reset_tqp_stats(struct hnae3_handle *handle)
{
@@ -113,3 +117,4 @@ void hclge_comm_reset_tqp_stats(struct hnae3_handle *handle)
memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
}
}
+EXPORT_SYMBOL_GPL(hclge_comm_reset_tqp_stats);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index ff71fb1eced9..a5fc0209d628 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -3535,6 +3535,9 @@ static int hns3_alloc_ring_buffers(struct hns3_enet_ring *ring)
ret = hns3_alloc_and_attach_buffer(ring, i);
if (ret)
goto out_buffer_fail;
+
+ if (!(i % HNS3_RESCHED_BD_NUM))
+ cond_resched();
}
return 0;
@@ -5107,6 +5110,7 @@ int hns3_init_all_ring(struct hns3_nic_priv *priv)
}
u64_stats_init(&priv->ring[i].syncp);
+ cond_resched();
}
return 0;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index acd756b0c7c9..d36c4ed16d8d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -214,6 +214,8 @@ enum hns3_nic_state {
#define HNS3_CQ_MODE_EQE 1U
#define HNS3_CQ_MODE_CQE 0U
+#define HNS3_RESCHED_BD_NUM 1024
+
enum hns3_pkt_l2t_type {
HNS3_L2_TYPE_UNICAST,
HNS3_L2_TYPE_MULTICAST,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index 941cb529d671..b1e988347347 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -2009,7 +2009,7 @@ static int hns3_set_tunable(struct net_device *netdev,
ETHTOOL_RING_USE_TX_PUSH)
static int hns3_get_ts_info(struct net_device *netdev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct hnae3_handle *handle = hns3_get_handle(netdev);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 43cc6ee4d87d..82574ce0194f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -3086,9 +3086,7 @@ static void hclge_push_link_status(struct hclge_dev *hdev)
static void hclge_update_link_status(struct hclge_dev *hdev)
{
- struct hnae3_handle *rhandle = &hdev->vport[0].roce;
struct hnae3_handle *handle = &hdev->vport[0].nic;
- struct hnae3_client *rclient = hdev->roce_client;
struct hnae3_client *client = hdev->nic_client;
int state;
int ret;
@@ -3112,8 +3110,15 @@ static void hclge_update_link_status(struct hclge_dev *hdev)
client->ops->link_status_change(handle, state);
hclge_config_mac_tnl_int(hdev, state);
- if (rclient && rclient->ops->link_status_change)
- rclient->ops->link_status_change(rhandle, state);
+
+ if (test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state)) {
+ struct hnae3_handle *rhandle = &hdev->vport[0].roce;
+ struct hnae3_client *rclient = hdev->roce_client;
+
+ if (rclient && rclient->ops->link_status_change)
+ rclient->ops->link_status_change(rhandle,
+ state);
+ }
hclge_push_link_status(hdev);
}
@@ -11319,6 +11324,12 @@ clear_roce:
return ret;
}
+static bool hclge_uninit_need_wait(struct hclge_dev *hdev)
+{
+ return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
+ test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
+}
+
static void hclge_uninit_client_instance(struct hnae3_client *client,
struct hnae3_ae_dev *ae_dev)
{
@@ -11327,7 +11338,7 @@ static void hclge_uninit_client_instance(struct hnae3_client *client,
if (hdev->roce_client) {
clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
- while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
+ while (hclge_uninit_need_wait(hdev))
msleep(HCLGE_WAIT_RESET_DONE);
hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
index 507d7ce26d83..5fff8ed388f8 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
@@ -378,7 +378,7 @@ int hclge_ptp_set_cfg(struct hclge_dev *hdev, struct ifreq *ifr)
}
int hclge_ptp_get_ts_info(struct hnae3_handle *handle,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h
index bbee74cd8404..63483636c074 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h
@@ -138,6 +138,6 @@ int hclge_ptp_set_cfg(struct hclge_dev *hdev, struct ifreq *ifr);
int hclge_ptp_init(struct hclge_dev *hdev);
void hclge_ptp_uninit(struct hclge_dev *hdev);
int hclge_ptp_get_ts_info(struct hnae3_handle *handle,
- struct ethtool_ts_info *info);
+ struct kernel_ethtool_ts_info *info);
int hclge_ptp_cfg_qry(struct hclge_dev *hdev, u32 *cfg);
#endif
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 5e9a93bdb518..23ebeb143987 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -2482,6 +2482,18 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
(tx_pool->consumer_index + 1) % tx_pool->num_buffers;
tx_buff = &tx_pool->tx_buff[bufidx];
+
+ /* Sanity checks on our free map to make sure it points to an index
+ * that is not being occupied by another skb. If skb memory is
+ * not freed then we see congestion control kick in and halt tx.
+ */
+ if (unlikely(tx_buff->skb)) {
+ dev_warn_ratelimited(dev, "TX free map points to untracked skb (%s %d idx=%d)\n",
+ skb_is_gso(skb) ? "tso_pool" : "tx_pool",
+ queue_num, bufidx);
+ dev_kfree_skb_any(tx_buff->skb);
+ }
+
tx_buff->skb = skb;
tx_buff->index = bufidx;
tx_buff->pool_index = queue_num;
@@ -4061,6 +4073,12 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free)
adapter->num_active_tx_scrqs = 0;
}
+ /* Clean any remaining outstanding SKBs
+ * we freed the irq so we won't be hearing
+ * from them
+ */
+ clean_tx_pools(adapter);
+
if (adapter->rx_scrq) {
for (i = 0; i < adapter->num_active_rx_scrqs; i++) {
if (!adapter->rx_scrq[i])
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index e0287fbd501d..0375c7448a57 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -384,17 +384,6 @@ config IGC_LEDS
Optional support for controlling the NIC LED's with the netdev
LED trigger.
-config IDPF
- tristate "Intel(R) Infrastructure Data Path Function Support"
- depends on PCI_MSI
- select DIMLIB
- select PAGE_POOL
- select PAGE_POOL_STATS
- help
- This driver supports Intel(R) Infrastructure Data Path Function
- devices.
-
- To compile this driver as a module, choose M here. The module
- will be called idpf.
+source "drivers/net/ethernet/intel/idpf/Kconfig"
endif # NET_VENDOR_INTEL
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index 9b068d40778d..aa139b67a55b 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -161,7 +161,6 @@
#define FIRMWARE_D102E "e100/d102e_ucode.bin"
MODULE_DESCRIPTION(DRV_DESCRIPTION);
-MODULE_AUTHOR(DRV_COPYRIGHT);
MODULE_LICENSE("GPL v2");
MODULE_FIRMWARE(FIRMWARE_D101M);
MODULE_FIRMWARE(FIRMWARE_D101S);
diff --git a/drivers/net/ethernet/intel/e1000/Makefile b/drivers/net/ethernet/intel/e1000/Makefile
index 314c52d44b7c..79491dec47e1 100644
--- a/drivers/net/ethernet/intel/e1000/Makefile
+++ b/drivers/net/ethernet/intel/e1000/Makefile
@@ -7,4 +7,4 @@
obj-$(CONFIG_E1000) += e1000.o
-e1000-objs := e1000_main.o e1000_hw.o e1000_ethtool.o e1000_param.o
+e1000-y := e1000_main.o e1000_hw.o e1000_ethtool.o e1000_param.o
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 60fff9a6c53e..ab7ae418d294 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -187,7 +187,6 @@ static struct pci_driver e1000_driver = {
.err_handler = &e1000_err_handler
};
-MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/intel/e1000e/Makefile b/drivers/net/ethernet/intel/e1000e/Makefile
index 0baa15503c38..18f22b6374d5 100644
--- a/drivers/net/ethernet/intel/e1000e/Makefile
+++ b/drivers/net/ethernet/intel/e1000e/Makefile
@@ -10,7 +10,6 @@ subdir-ccflags-y += -I$(src)
obj-$(CONFIG_E1000E) += e1000e.o
-e1000e-objs := 82571.o ich8lan.o 80003es2lan.o \
- mac.o manage.o nvm.o phy.o \
- param.o ethtool.o netdev.o ptp.o
-
+e1000e-y := 82571.o ich8lan.o 80003es2lan.o \
+ mac.o manage.o nvm.o phy.o \
+ param.o ethtool.o netdev.o ptp.o
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index 85da20778e0f..9364bc2b4eb1 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -2263,7 +2263,7 @@ static int e1000e_set_eee(struct net_device *netdev, struct ethtool_keee *edata)
}
static int e1000e_get_ts_info(struct net_device *netdev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index f9e94be36e97..ce227b56cf72 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -1109,6 +1109,46 @@ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
}
/**
+ * e1000e_force_smbus - Force interfaces to transition to SMBUS mode.
+ * @hw: pointer to the HW structure
+ *
+ * Force the MAC and the PHY to SMBUS mode. Assumes semaphore already
+ * acquired.
+ *
+ * Return: 0 on success, negative errno on failure.
+ **/
+static s32 e1000e_force_smbus(struct e1000_hw *hw)
+{
+ u16 smb_ctrl = 0;
+ u32 ctrl_ext;
+ s32 ret_val;
+
+ /* Switching PHY interface always returns MDI error
+ * so disable retry mechanism to avoid wasting time
+ */
+ e1000e_disable_phy_retry(hw);
+
+ /* Force SMBus mode in the PHY */
+ ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &smb_ctrl);
+ if (ret_val) {
+ e1000e_enable_phy_retry(hw);
+ return ret_val;
+ }
+
+ smb_ctrl |= CV_SMB_CTRL_FORCE_SMBUS;
+ e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, smb_ctrl);
+
+ e1000e_enable_phy_retry(hw);
+
+ /* Force SMBus mode in the MAC */
+ ctrl_ext = er32(CTRL_EXT);
+ ctrl_ext |= E1000_CTRL_EXT_FORCE_SMBUS;
+ ew32(CTRL_EXT, ctrl_ext);
+
+ return 0;
+}
+
+/**
* e1000_enable_ulp_lpt_lp - configure Ultra Low Power mode for LynxPoint-LP
* @hw: pointer to the HW structure
* @to_sx: boolean indicating a system power state transition to Sx
@@ -1165,6 +1205,14 @@ s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
if (ret_val)
goto out;
+ if (hw->mac.type != e1000_pch_mtp) {
+ ret_val = e1000e_force_smbus(hw);
+ if (ret_val) {
+ e_dbg("Failed to force SMBUS: %d\n", ret_val);
+ goto release;
+ }
+ }
+
/* Si workaround for ULP entry flow on i127/rev6 h/w. Enable
* LPLU and disable Gig speed when entering ULP
*/
@@ -1225,6 +1273,13 @@ s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
}
release:
+ if (hw->mac.type == e1000_pch_mtp) {
+ ret_val = e1000e_force_smbus(hw);
+ if (ret_val)
+ e_dbg("Failed to force SMBUS over MTL system: %d\n",
+ ret_val);
+ }
+
hw->phy.ops.release(hw);
out:
if (ret_val)
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 220d62fca55d..360ee26557f7 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -6363,49 +6363,49 @@ static void e1000e_s0ix_entry_flow(struct e1000_adapter *adapter)
mac_data |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
ew32(EXTCNF_CTRL, mac_data);
- /* Enable the Dynamic Power Gating in the MAC */
- mac_data = er32(FEXTNVM7);
- mac_data |= BIT(22);
- ew32(FEXTNVM7, mac_data);
-
/* Disable disconnected cable conditioning for Power Gating */
mac_data = er32(DPGFR);
mac_data |= BIT(2);
ew32(DPGFR, mac_data);
- /* Don't wake from dynamic Power Gating with clock request */
- mac_data = er32(FEXTNVM12);
- mac_data |= BIT(12);
- ew32(FEXTNVM12, mac_data);
-
- /* Ungate PGCB clock */
- mac_data = er32(FEXTNVM9);
- mac_data &= ~BIT(28);
- ew32(FEXTNVM9, mac_data);
-
- /* Enable K1 off to enable mPHY Power Gating */
- mac_data = er32(FEXTNVM6);
- mac_data |= BIT(31);
- ew32(FEXTNVM6, mac_data);
-
- /* Enable mPHY power gating for any link and speed */
- mac_data = er32(FEXTNVM8);
- mac_data |= BIT(9);
- ew32(FEXTNVM8, mac_data);
-
/* Enable the Dynamic Clock Gating in the DMA and MAC */
mac_data = er32(CTRL_EXT);
mac_data |= E1000_CTRL_EXT_DMA_DYN_CLK_EN;
ew32(CTRL_EXT, mac_data);
-
- /* No MAC DPG gating SLP_S0 in modern standby
- * Switch the logic of the lanphypc to use PMC counter
- */
- mac_data = er32(FEXTNVM5);
- mac_data |= BIT(7);
- ew32(FEXTNVM5, mac_data);
}
+ /* Enable the Dynamic Power Gating in the MAC */
+ mac_data = er32(FEXTNVM7);
+ mac_data |= BIT(22);
+ ew32(FEXTNVM7, mac_data);
+
+ /* Don't wake from dynamic Power Gating with clock request */
+ mac_data = er32(FEXTNVM12);
+ mac_data |= BIT(12);
+ ew32(FEXTNVM12, mac_data);
+
+ /* Ungate PGCB clock */
+ mac_data = er32(FEXTNVM9);
+ mac_data &= ~BIT(28);
+ ew32(FEXTNVM9, mac_data);
+
+ /* Enable K1 off to enable mPHY Power Gating */
+ mac_data = er32(FEXTNVM6);
+ mac_data |= BIT(31);
+ ew32(FEXTNVM6, mac_data);
+
+ /* Enable mPHY power gating for any link and speed */
+ mac_data = er32(FEXTNVM8);
+ mac_data |= BIT(9);
+ ew32(FEXTNVM8, mac_data);
+
+ /* No MAC DPG gating SLP_S0 in modern standby
+ * Switch the logic of the lanphypc to use PMC counter
+ */
+ mac_data = er32(FEXTNVM5);
+ mac_data |= BIT(7);
+ ew32(FEXTNVM5, mac_data);
+
/* Disable the time synchronization clock */
mac_data = er32(FEXTNVM7);
mac_data |= BIT(31);
@@ -6498,33 +6498,6 @@ static void e1000e_s0ix_exit_flow(struct e1000_adapter *adapter)
} else {
/* Request driver unconfigure the device from S0ix */
- /* Disable the Dynamic Power Gating in the MAC */
- mac_data = er32(FEXTNVM7);
- mac_data &= 0xFFBFFFFF;
- ew32(FEXTNVM7, mac_data);
-
- /* Disable mPHY power gating for any link and speed */
- mac_data = er32(FEXTNVM8);
- mac_data &= ~BIT(9);
- ew32(FEXTNVM8, mac_data);
-
- /* Disable K1 off */
- mac_data = er32(FEXTNVM6);
- mac_data &= ~BIT(31);
- ew32(FEXTNVM6, mac_data);
-
- /* Disable Ungate PGCB clock */
- mac_data = er32(FEXTNVM9);
- mac_data |= BIT(28);
- ew32(FEXTNVM9, mac_data);
-
- /* Cancel not waking from dynamic
- * Power Gating with clock request
- */
- mac_data = er32(FEXTNVM12);
- mac_data &= ~BIT(12);
- ew32(FEXTNVM12, mac_data);
-
/* Cancel disable disconnected cable conditioning
* for Power Gating
*/
@@ -6537,13 +6510,6 @@ static void e1000e_s0ix_exit_flow(struct e1000_adapter *adapter)
mac_data &= 0xFFF7FFFF;
ew32(CTRL_EXT, mac_data);
- /* Revert the lanphypc logic to use the internal Gbe counter
- * and not the PMC counter
- */
- mac_data = er32(FEXTNVM5);
- mac_data &= 0xFFFFFF7F;
- ew32(FEXTNVM5, mac_data);
-
/* Enable the periodic inband message,
* Request PCIe clock in K1 page770_17[10:9] =01b
*/
@@ -6581,6 +6547,40 @@ static void e1000e_s0ix_exit_flow(struct e1000_adapter *adapter)
mac_data &= ~BIT(31);
mac_data |= BIT(0);
ew32(FEXTNVM7, mac_data);
+
+ /* Disable the Dynamic Power Gating in the MAC */
+ mac_data = er32(FEXTNVM7);
+ mac_data &= 0xFFBFFFFF;
+ ew32(FEXTNVM7, mac_data);
+
+ /* Disable mPHY power gating for any link and speed */
+ mac_data = er32(FEXTNVM8);
+ mac_data &= ~BIT(9);
+ ew32(FEXTNVM8, mac_data);
+
+ /* Disable K1 off */
+ mac_data = er32(FEXTNVM6);
+ mac_data &= ~BIT(31);
+ ew32(FEXTNVM6, mac_data);
+
+ /* Disable Ungate PGCB clock */
+ mac_data = er32(FEXTNVM9);
+ mac_data |= BIT(28);
+ ew32(FEXTNVM9, mac_data);
+
+ /* Cancel not waking from dynamic
+ * Power Gating with clock request
+ */
+ mac_data = er32(FEXTNVM12);
+ mac_data &= ~BIT(12);
+ ew32(FEXTNVM12, mac_data);
+
+ /* Revert the lanphypc logic to use the internal Gbe counter
+ * and not the PMC counter
+ */
+ mac_data = er32(FEXTNVM5);
+ mac_data &= 0xFFFFFF7F;
+ ew32(FEXTNVM5, mac_data);
}
static int e1000e_pm_freeze(struct device *dev)
@@ -6623,7 +6623,6 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
struct e1000_hw *hw = &adapter->hw;
u32 ctrl, ctrl_ext, rctl, status, wufc;
int retval = 0;
- u16 smb_ctrl;
/* Runtime suspend should only enable wakeup for link changes */
if (runtime)
@@ -6697,23 +6696,6 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
if (retval)
return retval;
}
-
- /* Force SMBUS to allow WOL */
- /* Switching PHY interface always returns MDI error
- * so disable retry mechanism to avoid wasting time
- */
- e1000e_disable_phy_retry(hw);
-
- e1e_rphy(hw, CV_SMB_CTRL, &smb_ctrl);
- smb_ctrl |= CV_SMB_CTRL_FORCE_SMBUS;
- e1e_wphy(hw, CV_SMB_CTRL, smb_ctrl);
-
- e1000e_enable_phy_retry(hw);
-
- /* Force SMBus mode in MAC */
- ctrl_ext = er32(CTRL_EXT);
- ctrl_ext |= E1000_CTRL_EXT_FORCE_SMBUS;
- ew32(CTRL_EXT, ctrl_ext);
}
/* Ensure that the appropriate bits are set in LPI_CTRL
@@ -7987,7 +7969,6 @@ static void __exit e1000_exit_module(void)
}
module_exit(e1000_exit_module);
-MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c
index bbcfd529399b..89d57dd911dc 100644
--- a/drivers/net/ethernet/intel/e1000e/ptp.c
+++ b/drivers/net/ethernet/intel/e1000e/ptp.c
@@ -124,7 +124,8 @@ static int e1000e_phc_get_syncdevicetime(ktime_t *device,
sys_cycles = er32(PLTSTMPH);
sys_cycles <<= 32;
sys_cycles |= er32(PLTSTMPL);
- *system = convert_art_to_tsc(sys_cycles);
+ system->cycles = sys_cycles;
+ system->cs_id = CSID_X86_ART;
return 0;
}
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index fc373472e4e1..142f07ca8bc0 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -17,7 +17,6 @@ static const char fm10k_driver_string[] = DRV_SUMMARY;
static const char fm10k_copyright[] =
"Copyright(c) 2013 - 2019 Intel Corporation.";
-MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
MODULE_DESCRIPTION(DRV_SUMMARY);
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/intel/i40e/Makefile b/drivers/net/ethernet/intel/i40e/Makefile
index cad93f323bd5..9faa4339a76c 100644
--- a/drivers/net/ethernet/intel/i40e/Makefile
+++ b/drivers/net/ethernet/intel/i40e/Makefile
@@ -10,7 +10,7 @@ subdir-ccflags-y += -I$(src)
obj-$(CONFIG_I40E) += i40e.o
-i40e-objs := i40e_main.o \
+i40e-y := i40e_main.o \
i40e_ethtool.o \
i40e_adminq.o \
i40e_common.o \
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index bca2084cc54b..d546567e0286 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -735,7 +735,7 @@ __i40e_pf_next_veb(struct i40e_pf *pf, int *idx)
_i++, _veb = __i40e_pf_next_veb(_pf, &_i))
/**
- * i40e_mac_to_hkey - Convert a 6-byte MAC Address to a u64 hash key
+ * i40e_addr_to_hkey - Convert a 6-byte MAC Address to a u64 hash key
* @macaddr: the MAC Address as the base key
*
* Simply copies the address and returns it as a u64 for hashing
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.h b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
index ee86d2c53079..55b5bb884d73 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
@@ -109,10 +109,6 @@ static inline int i40e_aq_rc_to_posix(int aq_ret, int aq_rc)
-EFBIG, /* I40E_AQ_RC_EFBIG */
};
- /* aq_rc is invalid if AQ timed out */
- if (aq_ret == -EIO)
- return -EAGAIN;
-
if (!((u32)aq_rc < (sizeof(aq_to_posix) / sizeof((aq_to_posix)[0]))))
return -ERANGE;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 4e28785c9fb2..1d0d2e526adb 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -2546,7 +2546,7 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset,
}
static int i40e_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct i40e_pf *pf = i40e_netdev_to_pf(dev);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 1f188c052828..cbcfada7b357 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -98,7 +98,6 @@ static int debug = -1;
module_param(debug, uint, 0);
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all), Debug mask (0x8XXXXXXX)");
-MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
MODULE_IMPORT_NS(LIBIE);
MODULE_LICENSE("GPL v2");
@@ -11171,6 +11170,8 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit,
ret = i40e_reset(pf);
if (!ret)
i40e_rebuild(pf, reinit, lock_acquired);
+ else
+ dev_err(&pf->pdev->dev, "%s: i40e_reset() FAILED", __func__);
}
/**
@@ -13291,6 +13292,10 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi, struct bpf_prog *prog,
bool need_reset;
int i;
+ /* VSI shall be deleted in a moment, block loading new programs */
+ if (prog && test_bit(__I40E_IN_REMOVE, pf->state))
+ return -EINVAL;
+
/* Don't allow frames that span over multiple buffers */
if (vsi->netdev->mtu > frame_size - I40E_PACKET_HDR_PAD) {
NL_SET_ERR_MSG_MOD(extack, "MTU too large for linear frames and XDP prog does not support frags");
@@ -13299,14 +13304,9 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi, struct bpf_prog *prog,
/* When turning XDP on->off/off->on we reset and rebuild the rings. */
need_reset = (i40e_enabled_xdp_vsi(vsi) != !!prog);
-
if (need_reset)
i40e_prep_for_reset(pf);
- /* VSI shall be deleted in a moment, just return EINVAL */
- if (test_bit(__I40E_IN_REMOVE, pf->state))
- return -EINVAL;
-
old_prog = xchg(&vsi->xdp_prog, prog);
if (need_reset) {
@@ -16335,6 +16335,139 @@ unmap:
}
/**
+ * i40e_enable_mc_magic_wake - enable multicast magic packet wake up
+ * using the mac_address_write admin q function
+ * @pf: pointer to i40e_pf struct
+ **/
+static void i40e_enable_mc_magic_wake(struct i40e_pf *pf)
+{
+ struct i40e_vsi *main_vsi = i40e_pf_get_main_vsi(pf);
+ struct i40e_hw *hw = &pf->hw;
+ u8 mac_addr[6];
+ u16 flags = 0;
+ int ret;
+
+ /* Get current MAC address in case it's an LAA */
+ if (main_vsi && main_vsi->netdev) {
+ ether_addr_copy(mac_addr, main_vsi->netdev->dev_addr);
+ } else {
+ dev_err(&pf->pdev->dev,
+ "Failed to retrieve MAC address; using default\n");
+ ether_addr_copy(mac_addr, hw->mac.addr);
+ }
+
+ /* The FW expects the mac address write cmd to first be called with
+ * one of these flags before calling it again with the multicast
+ * enable flags.
+ */
+ flags = I40E_AQC_WRITE_TYPE_LAA_WOL;
+
+ if (hw->func_caps.flex10_enable && hw->partition_id != 1)
+ flags = I40E_AQC_WRITE_TYPE_LAA_ONLY;
+
+ ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL);
+ if (ret) {
+ dev_err(&pf->pdev->dev,
+ "Failed to update MAC address registers; cannot enable Multicast Magic packet wake up");
+ return;
+ }
+
+ flags = I40E_AQC_MC_MAG_EN
+ | I40E_AQC_WOL_PRESERVE_ON_PFR
+ | I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG;
+ ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL);
+ if (ret)
+ dev_err(&pf->pdev->dev,
+ "Failed to enable Multicast Magic Packet wake up\n");
+}
+
+/**
+ * i40e_io_suspend - suspend all IO operations
+ * @pf: pointer to i40e_pf struct
+ *
+ **/
+static int i40e_io_suspend(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+
+ set_bit(__I40E_DOWN, pf->state);
+
+ /* Ensure service task will not be running */
+ del_timer_sync(&pf->service_timer);
+ cancel_work_sync(&pf->service_task);
+
+ /* Client close must be called explicitly here because the timer
+ * has been stopped.
+ */
+ i40e_notify_client_of_netdev_close(pf, false);
+
+ if (test_bit(I40E_HW_CAP_WOL_MC_MAGIC_PKT_WAKE, pf->hw.caps) &&
+ pf->wol_en)
+ i40e_enable_mc_magic_wake(pf);
+
+ /* Since we're going to destroy queues during the
+ * i40e_clear_interrupt_scheme() we should hold the RTNL lock for this
+ * whole section
+ */
+ rtnl_lock();
+
+ i40e_prep_for_reset(pf);
+
+ wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
+ wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
+
+ /* Clear the interrupt scheme and release our IRQs so that the system
+ * can safely hibernate even when there are a large number of CPUs.
+ * Otherwise hibernation might fail when mapping all the vectors back
+ * to CPU0.
+ */
+ i40e_clear_interrupt_scheme(pf);
+
+ rtnl_unlock();
+
+ return 0;
+}
+
+/**
+ * i40e_io_resume - resume IO operations
+ * @pf: pointer to i40e_pf struct
+ *
+ **/
+static int i40e_io_resume(struct i40e_pf *pf)
+{
+ struct device *dev = &pf->pdev->dev;
+ int err;
+
+ /* We need to hold the RTNL lock prior to restoring interrupt schemes,
+ * since we're going to be restoring queues
+ */
+ rtnl_lock();
+
+ /* We cleared the interrupt scheme when we suspended, so we need to
+ * restore it now to resume device functionality.
+ */
+ err = i40e_restore_interrupt_scheme(pf);
+ if (err) {
+ dev_err(dev, "Cannot restore interrupt scheme: %d\n",
+ err);
+ }
+
+ clear_bit(__I40E_DOWN, pf->state);
+ i40e_reset_and_rebuild(pf, false, true);
+
+ rtnl_unlock();
+
+ /* Clear suspended state last after everything is recovered */
+ clear_bit(__I40E_SUSPENDED, pf->state);
+
+ /* Restart the service task */
+ mod_timer(&pf->service_timer,
+ round_jiffies(jiffies + pf->service_timer_period));
+
+ return 0;
+}
+
+/**
* i40e_pci_error_detected - warning that something funky happened in PCI land
* @pdev: PCI device information struct
* @error: the type of PCI error
@@ -16358,7 +16491,7 @@ static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
/* shutdown all operations */
if (!test_bit(__I40E_SUSPENDED, pf->state))
- i40e_prep_for_reset(pf);
+ i40e_io_suspend(pf);
/* Request a slot reset */
return PCI_ERS_RESULT_NEED_RESET;
@@ -16380,7 +16513,8 @@ static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev)
u32 reg;
dev_dbg(&pdev->dev, "%s\n", __func__);
- if (pci_enable_device_mem(pdev)) {
+ /* enable I/O and memory of the device */
+ if (pci_enable_device(pdev)) {
dev_info(&pdev->dev,
"Cannot re-enable PCI device after reset.\n");
result = PCI_ERS_RESULT_DISCONNECT;
@@ -16443,54 +16577,7 @@ static void i40e_pci_error_resume(struct pci_dev *pdev)
if (test_bit(__I40E_SUSPENDED, pf->state))
return;
- i40e_handle_reset_warning(pf, false);
-}
-
-/**
- * i40e_enable_mc_magic_wake - enable multicast magic packet wake up
- * using the mac_address_write admin q function
- * @pf: pointer to i40e_pf struct
- **/
-static void i40e_enable_mc_magic_wake(struct i40e_pf *pf)
-{
- struct i40e_vsi *main_vsi = i40e_pf_get_main_vsi(pf);
- struct i40e_hw *hw = &pf->hw;
- u8 mac_addr[6];
- u16 flags = 0;
- int ret;
-
- /* Get current MAC address in case it's an LAA */
- if (main_vsi && main_vsi->netdev) {
- ether_addr_copy(mac_addr, main_vsi->netdev->dev_addr);
- } else {
- dev_err(&pf->pdev->dev,
- "Failed to retrieve MAC address; using default\n");
- ether_addr_copy(mac_addr, hw->mac.addr);
- }
-
- /* The FW expects the mac address write cmd to first be called with
- * one of these flags before calling it again with the multicast
- * enable flags.
- */
- flags = I40E_AQC_WRITE_TYPE_LAA_WOL;
-
- if (hw->func_caps.flex10_enable && hw->partition_id != 1)
- flags = I40E_AQC_WRITE_TYPE_LAA_ONLY;
-
- ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL);
- if (ret) {
- dev_err(&pf->pdev->dev,
- "Failed to update MAC address registers; cannot enable Multicast Magic packet wake up");
- return;
- }
-
- flags = I40E_AQC_MC_MAG_EN
- | I40E_AQC_WOL_PRESERVE_ON_PFR
- | I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG;
- ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL);
- if (ret)
- dev_err(&pf->pdev->dev,
- "Failed to enable Multicast Magic Packet wake up\n");
+ i40e_io_resume(pf);
}
/**
@@ -16552,48 +16639,11 @@ static void i40e_shutdown(struct pci_dev *pdev)
static int i40e_suspend(struct device *dev)
{
struct i40e_pf *pf = dev_get_drvdata(dev);
- struct i40e_hw *hw = &pf->hw;
/* If we're already suspended, then there is nothing to do */
if (test_and_set_bit(__I40E_SUSPENDED, pf->state))
return 0;
-
- set_bit(__I40E_DOWN, pf->state);
-
- /* Ensure service task will not be running */
- del_timer_sync(&pf->service_timer);
- cancel_work_sync(&pf->service_task);
-
- /* Client close must be called explicitly here because the timer
- * has been stopped.
- */
- i40e_notify_client_of_netdev_close(pf, false);
-
- if (test_bit(I40E_HW_CAP_WOL_MC_MAGIC_PKT_WAKE, pf->hw.caps) &&
- pf->wol_en)
- i40e_enable_mc_magic_wake(pf);
-
- /* Since we're going to destroy queues during the
- * i40e_clear_interrupt_scheme() we should hold the RTNL lock for this
- * whole section
- */
- rtnl_lock();
-
- i40e_prep_for_reset(pf);
-
- wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
- wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
-
- /* Clear the interrupt scheme and release our IRQs so that the system
- * can safely hibernate even when there are a large number of CPUs.
- * Otherwise hibernation might fail when mapping all the vectors back
- * to CPU0.
- */
- i40e_clear_interrupt_scheme(pf);
-
- rtnl_unlock();
-
- return 0;
+ return i40e_io_suspend(pf);
}
/**
@@ -16603,39 +16653,11 @@ static int i40e_suspend(struct device *dev)
static int i40e_resume(struct device *dev)
{
struct i40e_pf *pf = dev_get_drvdata(dev);
- int err;
/* If we're not suspended, then there is nothing to do */
if (!test_bit(__I40E_SUSPENDED, pf->state))
return 0;
-
- /* We need to hold the RTNL lock prior to restoring interrupt schemes,
- * since we're going to be restoring queues
- */
- rtnl_lock();
-
- /* We cleared the interrupt scheme when we suspended, so we need to
- * restore it now to resume device functionality.
- */
- err = i40e_restore_interrupt_scheme(pf);
- if (err) {
- dev_err(dev, "Cannot restore interrupt scheme: %d\n",
- err);
- }
-
- clear_bit(__I40E_DOWN, pf->state);
- i40e_reset_and_rebuild(pf, false, true);
-
- rtnl_unlock();
-
- /* Clear suspended state last after everything is recovered */
- clear_bit(__I40E_SUSPENDED, pf->state);
-
- /* Restart the service task */
- mod_timer(&pf->service_timer,
- round_jiffies(jiffies + pf->service_timer_period));
-
- return 0;
+ return i40e_io_resume(pf);
}
static const struct pci_error_handlers i40e_err_handler = {
diff --git a/drivers/net/ethernet/intel/iavf/Makefile b/drivers/net/ethernet/intel/iavf/Makefile
index 2d154a4e2fd7..356ac9faa5bf 100644
--- a/drivers/net/ethernet/intel/iavf/Makefile
+++ b/drivers/net/ethernet/intel/iavf/Makefile
@@ -11,6 +11,5 @@ subdir-ccflags-y += -I$(src)
obj-$(CONFIG_IAVF) += iavf.o
-iavf-objs := iavf_main.o iavf_ethtool.o iavf_virtchnl.o iavf_fdir.o \
- iavf_adv_rss.o \
- iavf_txrx.o iavf_common.o iavf_adminq.o
+iavf-y := iavf_main.o iavf_ethtool.o iavf_virtchnl.o iavf_fdir.o \
+ iavf_adv_rss.o iavf_txrx.o iavf_common.o iavf_adminq.o
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index c6dff0963053..ff11bafb3b4f 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -45,7 +45,6 @@ static const struct pci_device_id iavf_pci_tbl[] = {
MODULE_DEVICE_TABLE(pci, iavf_pci_tbl);
MODULE_ALIAS("i40evf");
-MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
MODULE_DESCRIPTION("Intel(R) Ethernet Adaptive Virtual Function Network Driver");
MODULE_IMPORT_NS(LIBETH);
MODULE_IMPORT_NS(LIBIE);
diff --git a/drivers/net/ethernet/intel/ice/devlink/devlink.c b/drivers/net/ethernet/intel/ice/devlink/devlink.c
index c4b69655cdf5..810a901d7afd 100644
--- a/drivers/net/ethernet/intel/ice/devlink/devlink.c
+++ b/drivers/net/ethernet/intel/ice/devlink/devlink.c
@@ -794,10 +794,8 @@ int ice_devlink_rate_init_tx_topology(struct devlink *devlink, struct ice_vsi *v
tc_node = pi->root->children[0];
mutex_lock(&pi->sched_lock);
- devl_lock(devlink);
for (i = 0; i < tc_node->num_children; i++)
ice_traverse_tx_tree(devlink, tc_node->children[i], tc_node, pf);
- devl_unlock(devlink);
mutex_unlock(&pi->sched_lock);
return 0;
@@ -1383,12 +1381,132 @@ ice_devlink_enable_iw_validate(struct devlink *devlink, u32 id,
return 0;
}
+#define DEVLINK_LOCAL_FWD_DISABLED_STR "disabled"
+#define DEVLINK_LOCAL_FWD_ENABLED_STR "enabled"
+#define DEVLINK_LOCAL_FWD_PRIORITIZED_STR "prioritized"
+
+/**
+ * ice_devlink_local_fwd_mode_to_str - Get string for local_fwd mode.
+ * @mode: local forwarding for mode used in port_info struct.
+ *
+ * Return: Mode respective string or "Invalid".
+ */
+static const char *
+ice_devlink_local_fwd_mode_to_str(enum ice_local_fwd_mode mode)
+{
+ switch (mode) {
+ case ICE_LOCAL_FWD_MODE_ENABLED:
+ return DEVLINK_LOCAL_FWD_ENABLED_STR;
+ case ICE_LOCAL_FWD_MODE_PRIORITIZED:
+ return DEVLINK_LOCAL_FWD_PRIORITIZED_STR;
+ case ICE_LOCAL_FWD_MODE_DISABLED:
+ return DEVLINK_LOCAL_FWD_DISABLED_STR;
+ }
+
+ return "Invalid";
+}
+
+/**
+ * ice_devlink_local_fwd_str_to_mode - Get local_fwd mode from string name.
+ * @mode_str: local forwarding mode string.
+ *
+ * Return: Mode value or negative number if invalid.
+ */
+static int ice_devlink_local_fwd_str_to_mode(const char *mode_str)
+{
+ if (!strcmp(mode_str, DEVLINK_LOCAL_FWD_ENABLED_STR))
+ return ICE_LOCAL_FWD_MODE_ENABLED;
+ else if (!strcmp(mode_str, DEVLINK_LOCAL_FWD_PRIORITIZED_STR))
+ return ICE_LOCAL_FWD_MODE_PRIORITIZED;
+ else if (!strcmp(mode_str, DEVLINK_LOCAL_FWD_DISABLED_STR))
+ return ICE_LOCAL_FWD_MODE_DISABLED;
+
+ return -EINVAL;
+}
+
+/**
+ * ice_devlink_local_fwd_get - Get local_fwd parameter.
+ * @devlink: Pointer to the devlink instance.
+ * @id: The parameter ID to set.
+ * @ctx: Context to store the parameter value.
+ *
+ * Return: Zero.
+ */
+static int ice_devlink_local_fwd_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct ice_pf *pf = devlink_priv(devlink);
+ struct ice_port_info *pi;
+ const char *mode_str;
+
+ pi = pf->hw.port_info;
+ mode_str = ice_devlink_local_fwd_mode_to_str(pi->local_fwd_mode);
+ snprintf(ctx->val.vstr, sizeof(ctx->val.vstr), "%s", mode_str);
+
+ return 0;
+}
+
+/**
+ * ice_devlink_local_fwd_set - Set local_fwd parameter.
+ * @devlink: Pointer to the devlink instance.
+ * @id: The parameter ID to set.
+ * @ctx: Context to get the parameter value.
+ * @extack: Netlink extended ACK structure.
+ *
+ * Return: Zero.
+ */
+static int ice_devlink_local_fwd_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
+{
+ int new_local_fwd_mode = ice_devlink_local_fwd_str_to_mode(ctx->val.vstr);
+ struct ice_pf *pf = devlink_priv(devlink);
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_port_info *pi;
+
+ pi = pf->hw.port_info;
+ if (pi->local_fwd_mode != new_local_fwd_mode) {
+ pi->local_fwd_mode = new_local_fwd_mode;
+ dev_info(dev, "Setting local_fwd to %s\n", ctx->val.vstr);
+ ice_schedule_reset(pf, ICE_RESET_CORER);
+ }
+
+ return 0;
+}
+
+/**
+ * ice_devlink_local_fwd_validate - Validate passed local_fwd parameter value.
+ * @devlink: Unused pointer to devlink instance.
+ * @id: The parameter ID to validate.
+ * @val: Value to validate.
+ * @extack: Netlink extended ACK structure.
+ *
+ * Supported values are:
+ * "enabled" - local_fwd is enabled, "disabled" - local_fwd is disabled
+ * "prioritized" - local_fwd traffic is prioritized in scheduling.
+ *
+ * Return: Zero when passed parameter value is supported. Negative value on
+ * error.
+ */
+static int ice_devlink_local_fwd_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ if (ice_devlink_local_fwd_str_to_mode(val.vstr) < 0) {
+ NL_SET_ERR_MSG_MOD(extack, "Error: Requested value is not supported.");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
enum ice_param_id {
ICE_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
ICE_DEVLINK_PARAM_ID_TX_SCHED_LAYERS,
+ ICE_DEVLINK_PARAM_ID_LOCAL_FWD,
};
-static const struct devlink_param ice_devlink_params[] = {
+static const struct devlink_param ice_dvl_rdma_params[] = {
DEVLINK_PARAM_GENERIC(ENABLE_ROCE, BIT(DEVLINK_PARAM_CMODE_RUNTIME),
ice_devlink_enable_roce_get,
ice_devlink_enable_roce_set,
@@ -1397,6 +1515,9 @@ static const struct devlink_param ice_devlink_params[] = {
ice_devlink_enable_iw_get,
ice_devlink_enable_iw_set,
ice_devlink_enable_iw_validate),
+};
+
+static const struct devlink_param ice_dvl_sched_params[] = {
DEVLINK_PARAM_DRIVER(ICE_DEVLINK_PARAM_ID_TX_SCHED_LAYERS,
"tx_scheduling_layers",
DEVLINK_PARAM_TYPE_U8,
@@ -1404,6 +1525,12 @@ static const struct devlink_param ice_devlink_params[] = {
ice_devlink_tx_sched_layers_get,
ice_devlink_tx_sched_layers_set,
ice_devlink_tx_sched_layers_validate),
+ DEVLINK_PARAM_DRIVER(ICE_DEVLINK_PARAM_ID_LOCAL_FWD,
+ "local_forwarding", DEVLINK_PARAM_TYPE_STRING,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ ice_devlink_local_fwd_get,
+ ice_devlink_local_fwd_set,
+ ice_devlink_local_fwd_validate),
};
static void ice_devlink_free(void *devlink_ptr)
@@ -1464,21 +1591,31 @@ int ice_devlink_register_params(struct ice_pf *pf)
{
struct devlink *devlink = priv_to_devlink(pf);
struct ice_hw *hw = &pf->hw;
- size_t params_size;
+ int status;
- params_size = ARRAY_SIZE(ice_devlink_params);
+ status = devl_params_register(devlink, ice_dvl_rdma_params,
+ ARRAY_SIZE(ice_dvl_rdma_params));
+ if (status)
+ return status;
- if (!hw->func_caps.common_cap.tx_sched_topo_comp_mode_en)
- params_size--;
+ if (hw->func_caps.common_cap.tx_sched_topo_comp_mode_en)
+ status = devl_params_register(devlink, ice_dvl_sched_params,
+ ARRAY_SIZE(ice_dvl_sched_params));
- return devl_params_register(devlink, ice_devlink_params,
- params_size);
+ return status;
}
void ice_devlink_unregister_params(struct ice_pf *pf)
{
- devl_params_unregister(priv_to_devlink(pf), ice_devlink_params,
- ARRAY_SIZE(ice_devlink_params));
+ struct devlink *devlink = priv_to_devlink(pf);
+ struct ice_hw *hw = &pf->hw;
+
+ devl_params_unregister(devlink, ice_dvl_rdma_params,
+ ARRAY_SIZE(ice_dvl_rdma_params));
+
+ if (hw->func_caps.common_cap.tx_sched_topo_comp_mode_en)
+ devl_params_unregister(devlink, ice_dvl_sched_params,
+ ARRAY_SIZE(ice_dvl_sched_params));
}
#define ICE_DEVLINK_READ_BLK_SIZE (1024 * 1024)
diff --git a/drivers/net/ethernet/intel/ice/devlink/devlink_port.c b/drivers/net/ethernet/intel/ice/devlink/devlink_port.c
index 13e6790d3cae..00fed5a61d62 100644
--- a/drivers/net/ethernet/intel/ice/devlink/devlink_port.c
+++ b/drivers/net/ethernet/intel/ice/devlink/devlink_port.c
@@ -373,6 +373,62 @@ void ice_devlink_destroy_pf_port(struct ice_pf *pf)
}
/**
+ * ice_devlink_port_get_vf_fn_mac - .port_fn_hw_addr_get devlink handler
+ * @port: devlink port structure
+ * @hw_addr: MAC address of the port
+ * @hw_addr_len: length of MAC address
+ * @extack: extended netdev ack structure
+ *
+ * Callback for the devlink .port_fn_hw_addr_get operation
+ * Return: zero on success or an error code on failure.
+ */
+static int ice_devlink_port_get_vf_fn_mac(struct devlink_port *port,
+ u8 *hw_addr, int *hw_addr_len,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_vf *vf = container_of(port, struct ice_vf, devlink_port);
+
+ ether_addr_copy(hw_addr, vf->dev_lan_addr);
+ *hw_addr_len = ETH_ALEN;
+
+ return 0;
+}
+
+/**
+ * ice_devlink_port_set_vf_fn_mac - .port_fn_hw_addr_set devlink handler
+ * @port: devlink port structure
+ * @hw_addr: MAC address of the port
+ * @hw_addr_len: length of MAC address
+ * @extack: extended netdev ack structure
+ *
+ * Callback for the devlink .port_fn_hw_addr_set operation
+ * Return: zero on success or an error code on failure.
+ */
+static int ice_devlink_port_set_vf_fn_mac(struct devlink_port *port,
+ const u8 *hw_addr,
+ int hw_addr_len,
+ struct netlink_ext_ack *extack)
+
+{
+ struct devlink_port_attrs *attrs = &port->attrs;
+ struct devlink_port_pci_vf_attrs *pci_vf;
+ struct devlink *devlink = port->devlink;
+ struct ice_pf *pf;
+ u16 vf_id;
+
+ pf = devlink_priv(devlink);
+ pci_vf = &attrs->pci_vf;
+ vf_id = pci_vf->vf;
+
+ return __ice_set_vf_mac(pf, vf_id, hw_addr);
+}
+
+static const struct devlink_port_ops ice_devlink_vf_port_ops = {
+ .port_fn_hw_addr_get = ice_devlink_port_get_vf_fn_mac,
+ .port_fn_hw_addr_set = ice_devlink_port_set_vf_fn_mac,
+};
+
+/**
* ice_devlink_create_vf_port - Create a devlink port for this VF
* @vf: the VF to create a port for
*
@@ -407,7 +463,8 @@ int ice_devlink_create_vf_port(struct ice_vf *vf)
devlink_port_attrs_set(devlink_port, &attrs);
devlink = priv_to_devlink(pf);
- err = devlink_port_register(devlink, devlink_port, vsi->idx);
+ err = devl_port_register_with_ops(devlink, devlink_port, vsi->idx,
+ &ice_devlink_vf_port_ops);
if (err) {
dev_err(dev, "Failed to create devlink port for VF %d, error %d\n",
vf->vf_id, err);
@@ -426,5 +483,5 @@ int ice_devlink_create_vf_port(struct ice_vf *vf)
void ice_devlink_destroy_vf_port(struct ice_vf *vf)
{
devl_rate_leaf_destroy(&vf->devlink_port);
- devlink_port_unregister(&vf->devlink_port);
+ devl_port_unregister(&vf->devlink_port);
}
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 6ad8002b22e1..99a75a59078e 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -409,7 +409,6 @@ struct ice_vsi {
struct ice_tc_cfg tc_cfg;
struct bpf_prog *xdp_prog;
struct ice_tx_ring **xdp_rings; /* XDP ring array */
- unsigned long *af_xdp_zc_qps; /* tracks AF_XDP ZC enabled qps */
u16 num_xdp_txq; /* Used XDP queues */
u8 xdp_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */
@@ -747,6 +746,25 @@ static inline void ice_set_ring_xdp(struct ice_tx_ring *ring)
}
/**
+ * ice_get_xp_from_qid - get ZC XSK buffer pool bound to a queue ID
+ * @vsi: pointer to VSI
+ * @qid: index of a queue to look at XSK buff pool presence
+ *
+ * Return: A pointer to xsk_buff_pool structure if there is a buffer pool
+ * attached and configured as zero-copy, NULL otherwise.
+ */
+static inline struct xsk_buff_pool *ice_get_xp_from_qid(struct ice_vsi *vsi,
+ u16 qid)
+{
+ struct xsk_buff_pool *pool = xsk_get_pool_from_qid(vsi->netdev, qid);
+
+ if (!ice_is_xdp_ena_vsi(vsi))
+ return NULL;
+
+ return (pool && pool->dev) ? pool : NULL;
+}
+
+/**
* ice_xsk_pool - get XSK buffer pool bound to a ring
* @ring: Rx ring to use
*
@@ -758,10 +776,7 @@ static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_rx_ring *ring)
struct ice_vsi *vsi = ring->vsi;
u16 qid = ring->q_index;
- if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps))
- return NULL;
-
- return xsk_get_pool_from_qid(vsi->netdev, qid);
+ return ice_get_xp_from_qid(vsi, qid);
}
/**
@@ -786,12 +801,7 @@ static inline void ice_tx_xsk_pool(struct ice_vsi *vsi, u16 qid)
if (!ring)
return;
- if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps)) {
- ring->xsk_pool = NULL;
- return;
- }
-
- ring->xsk_pool = xsk_get_pool_from_qid(vsi->netdev, qid);
+ ring->xsk_pool = ice_get_xp_from_qid(vsi, qid);
}
/**
@@ -920,9 +930,17 @@ int ice_down(struct ice_vsi *vsi);
int ice_down_up(struct ice_vsi *vsi);
int ice_vsi_cfg_lan(struct ice_vsi *vsi);
struct ice_vsi *ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi);
+
+enum ice_xdp_cfg {
+ ICE_XDP_CFG_FULL, /* Fully apply new config in .ndo_bpf() */
+ ICE_XDP_CFG_PART, /* Save/use part of config in VSI rebuild */
+};
+
int ice_vsi_determine_xdp_res(struct ice_vsi *vsi);
-int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog);
-int ice_destroy_xdp_rings(struct ice_vsi *vsi);
+int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,
+ enum ice_xdp_cfg cfg_type);
+int ice_destroy_xdp_rings(struct ice_vsi *vsi, enum ice_xdp_cfg cfg_type);
+void ice_map_xdp_rings(struct ice_vsi *vsi);
int
ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
u32 flags);
diff --git a/drivers/net/ethernet/intel/ice/ice_adapter.c b/drivers/net/ethernet/intel/ice/ice_adapter.c
index 52d15ef7f4b1..ad84d8ad49a6 100644
--- a/drivers/net/ethernet/intel/ice/ice_adapter.c
+++ b/drivers/net/ethernet/intel/ice/ice_adapter.c
@@ -11,6 +11,7 @@
#include "ice_adapter.h"
static DEFINE_XARRAY(ice_adapters);
+static DEFINE_MUTEX(ice_adapters_mutex);
/* PCI bus number is 8 bits. Slot is 5 bits. Domain can have the rest. */
#define INDEX_FIELD_DOMAIN GENMASK(BITS_PER_LONG - 1, 13)
@@ -47,8 +48,6 @@ static void ice_adapter_free(struct ice_adapter *adapter)
kfree(adapter);
}
-DEFINE_FREE(ice_adapter_free, struct ice_adapter*, if (_T) ice_adapter_free(_T))
-
/**
* ice_adapter_get - Get a shared ice_adapter structure.
* @pdev: Pointer to the pci_dev whose driver is getting the ice_adapter.
@@ -64,27 +63,26 @@ DEFINE_FREE(ice_adapter_free, struct ice_adapter*, if (_T) ice_adapter_free(_T))
*/
struct ice_adapter *ice_adapter_get(const struct pci_dev *pdev)
{
- struct ice_adapter *ret, __free(ice_adapter_free) *adapter = NULL;
unsigned long index = ice_adapter_index(pdev);
-
- adapter = ice_adapter_new();
- if (!adapter)
- return ERR_PTR(-ENOMEM);
-
- xa_lock(&ice_adapters);
- ret = __xa_cmpxchg(&ice_adapters, index, NULL, adapter, GFP_KERNEL);
- if (xa_is_err(ret)) {
- ret = ERR_PTR(xa_err(ret));
- goto unlock;
- }
- if (ret) {
- refcount_inc(&ret->refcount);
- goto unlock;
+ struct ice_adapter *adapter;
+ int err;
+
+ scoped_guard(mutex, &ice_adapters_mutex) {
+ err = xa_insert(&ice_adapters, index, NULL, GFP_KERNEL);
+ if (err == -EBUSY) {
+ adapter = xa_load(&ice_adapters, index);
+ refcount_inc(&adapter->refcount);
+ return adapter;
+ }
+ if (err)
+ return ERR_PTR(err);
+
+ adapter = ice_adapter_new();
+ if (!adapter)
+ return ERR_PTR(-ENOMEM);
+ xa_store(&ice_adapters, index, adapter, GFP_KERNEL);
}
- ret = no_free_ptr(adapter);
-unlock:
- xa_unlock(&ice_adapters);
- return ret;
+ return adapter;
}
/**
@@ -94,23 +92,21 @@ unlock:
* Releases the reference to ice_adapter previously obtained with
* ice_adapter_get.
*
- * Context: Any.
+ * Context: Process, may sleep.
*/
void ice_adapter_put(const struct pci_dev *pdev)
{
unsigned long index = ice_adapter_index(pdev);
struct ice_adapter *adapter;
- xa_lock(&ice_adapters);
- adapter = xa_load(&ice_adapters, index);
- if (WARN_ON(!adapter))
- goto unlock;
+ scoped_guard(mutex, &ice_adapters_mutex) {
+ adapter = xa_load(&ice_adapters, index);
+ if (WARN_ON(!adapter))
+ return;
+ if (!refcount_dec_and_test(&adapter->refcount))
+ return;
- if (!refcount_dec_and_test(&adapter->refcount))
- goto unlock;
-
- WARN_ON(__xa_erase(&ice_adapters, index) != adapter);
+ WARN_ON(xa_erase(&ice_adapters, index) != adapter);
+ }
ice_adapter_free(adapter);
-unlock:
- xa_unlock(&ice_adapters);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index e76c388b9905..66f02988d549 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -122,6 +122,7 @@ struct ice_aqc_list_caps_elem {
#define ICE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT 0x0077
#define ICE_AQC_CAPS_NVM_MGMT 0x0080
#define ICE_AQC_CAPS_TX_SCHED_TOPO_COMP_MODE 0x0085
+#define ICE_AQC_CAPS_NAC_TOPOLOGY 0x0087
#define ICE_AQC_CAPS_FW_LAG_SUPPORT 0x0092
#define ICE_AQC_BIT_ROCEV2_LAG 0x01
#define ICE_AQC_BIT_SRIOV_LAG 0x02
@@ -231,6 +232,13 @@ struct ice_aqc_get_sw_cfg_resp_elem {
#define ICE_AQC_GET_SW_CONF_RESP_IS_VF BIT(15)
};
+/* Loopback port parameter mode values. */
+enum ice_local_fwd_mode {
+ ICE_LOCAL_FWD_MODE_ENABLED = 0,
+ ICE_LOCAL_FWD_MODE_DISABLED = 1,
+ ICE_LOCAL_FWD_MODE_PRIORITIZED = 2,
+};
+
/* Set Port parameters, (direct, 0x0203) */
struct ice_aqc_set_port_params {
__le16 cmd_flags;
@@ -239,7 +247,9 @@ struct ice_aqc_set_port_params {
__le16 swid;
#define ICE_AQC_PORT_SWID_VALID BIT(15)
#define ICE_AQC_PORT_SWID_M 0xFF
- u8 reserved[10];
+ u8 local_fwd_mode;
+#define ICE_AQC_SET_P_PARAMS_LOCAL_FWD_MODE_VALID BIT(2)
+ u8 reserved[9];
};
/* These resource type defines are used for all switch resource
@@ -1460,6 +1470,55 @@ struct ice_aqc_get_sensor_reading_resp {
} data;
};
+/* DNL call command (indirect 0x0682)
+ * Struct is used for both command and response
+ */
+struct ice_aqc_dnl_call_command {
+ u8 ctx; /* Used in command, reserved in response */
+ u8 reserved;
+ __le16 activity_id;
+#define ICE_AQC_ACT_ID_DNL 0x1129
+ __le32 reserved1;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+struct ice_aqc_dnl_equa_param {
+ __le16 data_in;
+#define ICE_AQC_RX_EQU_SHIFT 8
+#define ICE_AQC_RX_EQU_PRE2 (0x10 << ICE_AQC_RX_EQU_SHIFT)
+#define ICE_AQC_RX_EQU_PRE1 (0x11 << ICE_AQC_RX_EQU_SHIFT)
+#define ICE_AQC_RX_EQU_POST1 (0x12 << ICE_AQC_RX_EQU_SHIFT)
+#define ICE_AQC_RX_EQU_BFLF (0x13 << ICE_AQC_RX_EQU_SHIFT)
+#define ICE_AQC_RX_EQU_BFHF (0x14 << ICE_AQC_RX_EQU_SHIFT)
+#define ICE_AQC_RX_EQU_DRATE (0x15 << ICE_AQC_RX_EQU_SHIFT)
+#define ICE_AQC_TX_EQU_PRE1 0x0
+#define ICE_AQC_TX_EQU_PRE3 0x3
+#define ICE_AQC_TX_EQU_ATTEN 0x4
+#define ICE_AQC_TX_EQU_POST1 0x8
+#define ICE_AQC_TX_EQU_PRE2 0xC
+ __le16 op_code_serdes_sel;
+#define ICE_AQC_OP_CODE_SHIFT 4
+#define ICE_AQC_OP_CODE_RX_EQU (0x9 << ICE_AQC_OP_CODE_SHIFT)
+#define ICE_AQC_OP_CODE_TX_EQU (0x10 << ICE_AQC_OP_CODE_SHIFT)
+ __le32 reserved[3];
+};
+
+struct ice_aqc_dnl_equa_respon {
+ /* Equalization value can be negative */
+ int val;
+ __le32 reserved[3];
+};
+
+/* DNL call command/response buffer (indirect 0x0682) */
+struct ice_aqc_dnl_call {
+ union {
+ struct ice_aqc_dnl_equa_param txrx_equa_reqs;
+ __le32 stores[4];
+ struct ice_aqc_dnl_equa_respon txrx_equa_resp;
+ } sto;
+};
+
struct ice_aqc_link_topo_params {
u8 lport_num;
u8 lport_num_valid;
@@ -2563,6 +2622,7 @@ struct ice_aq_desc {
struct ice_aqc_get_link_status get_link_status;
struct ice_aqc_event_lan_overflow lan_overflow;
struct ice_aqc_get_link_topo get_link_topo;
+ struct ice_aqc_dnl_call_command dnl_call;
struct ice_aqc_i2c read_write_i2c;
struct ice_aqc_read_i2c_resp read_i2c_resp;
struct ice_aqc_get_set_tx_topo get_set_tx_topo;
@@ -2687,6 +2747,7 @@ enum ice_adminq_opc {
ice_aqc_opc_set_phy_rec_clk_out = 0x0630,
ice_aqc_opc_get_phy_rec_clk_out = 0x0631,
ice_aqc_opc_get_sensor_reading = 0x0632,
+ ice_aqc_opc_dnl_call = 0x0682,
ice_aqc_opc_get_link_topo = 0x06E0,
ice_aqc_opc_read_i2c = 0x06E2,
ice_aqc_opc_write_i2c = 0x06E3,
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index 687f6cb2b917..5d396c1a7731 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -842,6 +842,9 @@ void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
}
rx_rings_rem -= rx_rings_per_v;
}
+
+ if (ice_is_xdp_ena_vsi(vsi))
+ ice_map_xdp_rings(vsi);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_cgu_regs.h b/drivers/net/ethernet/intel/ice/ice_cgu_regs.h
index 57abd52386d0..10d9d74f3545 100644
--- a/drivers/net/ethernet/intel/ice/ice_cgu_regs.h
+++ b/drivers/net/ethernet/intel/ice/ice_cgu_regs.h
@@ -23,7 +23,18 @@ union nac_cgu_dword9 {
u32 clk_synce0_amp : 2;
u32 one_pps_out_amp : 2;
u32 misc24 : 12;
- } field;
+ };
+ u32 val;
+};
+
+#define NAC_CGU_DWORD16_E825C 0x40
+union nac_cgu_dword16_e825c {
+ struct {
+ u32 synce_remndr : 6;
+ u32 synce_phlmt_en : 1;
+ u32 misc13 : 17;
+ u32 tspll_ck_refclkfreq : 8;
+ };
u32 val;
};
@@ -39,7 +50,7 @@ union nac_cgu_dword19 {
u32 japll_ndivratio : 4;
u32 japll_iref_ndivratio : 3;
u32 misc27 : 1;
- } field;
+ };
u32 val;
};
@@ -63,7 +74,23 @@ union nac_cgu_dword22 {
u32 fdpllclk_sel_div2 : 1;
u32 time1588clk_sel_div2 : 1;
u32 misc3 : 1;
- } field;
+ };
+ u32 val;
+};
+
+#define NAC_CGU_DWORD23_E825C 0x5C
+union nac_cgu_dword23_e825c {
+ struct {
+ u32 cgupll_fbdiv_intgr : 10;
+ u32 ux56pll_fbdiv_intgr : 10;
+ u32 misc20 : 4;
+ u32 ts_pll_enable : 1;
+ u32 time_sync_tspll_align_sel : 1;
+ u32 ext_synce_sel : 1;
+ u32 ref1588_ck_div : 4;
+ u32 time_ref_sel : 1;
+
+ };
u32 val;
};
@@ -77,7 +104,7 @@ union nac_cgu_dword24 {
u32 ext_synce_sel : 1;
u32 ref1588_ck_div : 4;
u32 time_ref_sel : 1;
- } field;
+ };
u32 val;
};
@@ -92,7 +119,7 @@ union tspll_cntr_bist_settings {
u32 i_plllock_cnt_6_0 : 7;
u32 i_plllock_cnt_10_7 : 4;
u32 reserved200 : 4;
- } field;
+ };
u32 val;
};
@@ -109,7 +136,45 @@ union tspll_ro_bwm_lf {
u32 afcdone_cri : 1;
u32 feedfwrdgain_cal_cri_7_0 : 8;
u32 m2fbdivmod_cri_7_0 : 8;
- } field;
+ };
+ u32 val;
+};
+
+#define TSPLL_RO_LOCK_E825C 0x3f0
+union tspll_ro_lock_e825c {
+ struct {
+ u32 bw_freqov_high_cri_7_0 : 8;
+ u32 bw_freqov_high_cri_9_8 : 2;
+ u32 reserved455 : 1;
+ u32 plllock_gain_tran_cri : 1;
+ u32 plllock_true_lock_cri : 1;
+ u32 pllunlock_flag_cri : 1;
+ u32 afcerr_cri : 1;
+ u32 afcdone_cri : 1;
+ u32 feedfwrdgain_cal_cri_7_0 : 8;
+ u32 reserved462 : 8;
+ };
+ u32 val;
+};
+
+#define TSPLL_BW_TDC_E825C 0x31c
+union tspll_bw_tdc_e825c {
+ struct {
+ u32 i_tdc_offset_lock_1_0 : 2;
+ u32 i_bbthresh1_2_0 : 3;
+ u32 i_bbthresh2_2_0 : 3;
+ u32 i_tdcsel_1_0 : 2;
+ u32 i_tdcovccorr_en_h : 1;
+ u32 i_divretimeren : 1;
+ u32 i_bw_ampmeas_window : 1;
+ u32 i_bw_lowerbound_2_0 : 3;
+ u32 i_bw_upperbound_2_0 : 3;
+ u32 i_bw_mode_1_0 : 2;
+ u32 i_ft_mode_sel_2_0 : 3;
+ u32 i_bwphase_4_0 : 5;
+ u32 i_plllock_sel_1_0 : 2;
+ u32 i_afc_divratio : 1;
+ };
u32 val;
};
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 5649b257e631..009716a12a26 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -240,6 +240,30 @@ bool ice_is_e810t(struct ice_hw *hw)
}
/**
+ * ice_is_e822 - Check if a device is E822 family device
+ * @hw: pointer to the hardware structure
+ *
+ * Return: true if the device is E822 based, false if not.
+ */
+bool ice_is_e822(struct ice_hw *hw)
+{
+ switch (hw->device_id) {
+ case ICE_DEV_ID_E822C_BACKPLANE:
+ case ICE_DEV_ID_E822C_QSFP:
+ case ICE_DEV_ID_E822C_SFP:
+ case ICE_DEV_ID_E822C_10G_BASE_T:
+ case ICE_DEV_ID_E822C_SGMII:
+ case ICE_DEV_ID_E822L_BACKPLANE:
+ case ICE_DEV_ID_E822L_SFP:
+ case ICE_DEV_ID_E822L_10G_BASE_T:
+ case ICE_DEV_ID_E822L_SGMII:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
* ice_is_e823
* @hw: pointer to the hardware structure
*
@@ -910,6 +934,9 @@ static int ice_init_fltr_mgmt_struct(struct ice_hw *hw)
INIT_LIST_HEAD(&sw->vsi_list_map_head);
sw->prof_res_bm_init = 0;
+ /* Initialize recipe count with default recipes read from NVM */
+ sw->recp_cnt = ICE_SW_LKUP_LAST;
+
status = ice_init_def_sw_recp(hw);
if (status) {
devm_kfree(ice_hw_to_dev(hw), hw->switch_info);
@@ -937,14 +964,7 @@ static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
}
recps = sw->recp_list;
for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
- struct ice_recp_grp_entry *rg_entry, *tmprg_entry;
-
recps[i].root_rid = i;
- list_for_each_entry_safe(rg_entry, tmprg_entry,
- &recps[i].rg_list, l_entry) {
- list_del(&rg_entry->l_entry);
- devm_kfree(ice_hw_to_dev(hw), rg_entry);
- }
if (recps[i].adv_rule) {
struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
@@ -969,7 +989,6 @@ static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
devm_kfree(ice_hw_to_dev(hw), lst_itr);
}
}
- devm_kfree(ice_hw_to_dev(hw), recps[i].root_buf);
}
ice_rm_all_sw_replay_rule_info(hw);
devm_kfree(ice_hw_to_dev(hw), sw->recp_list);
@@ -1062,6 +1081,7 @@ int ice_init_hw(struct ice_hw *hw)
goto err_unroll_cqinit;
}
+ hw->port_info->local_fwd_mode = ICE_LOCAL_FWD_MODE_ENABLED;
/* set the back pointer to HW */
hw->port_info->hw = hw;
@@ -1473,8 +1493,9 @@ ice_sbq_send_cmd(struct ice_hw *hw, struct ice_sbq_cmd_desc *desc,
* ice_sbq_rw_reg - Fill Sideband Queue command
* @hw: pointer to the HW struct
* @in: message info to be filled in descriptor
+ * @flags: control queue descriptor flags
*/
-int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in)
+int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in, u16 flags)
{
struct ice_sbq_cmd_desc desc = {0};
struct ice_sbq_msg_req msg = {0};
@@ -1498,7 +1519,7 @@ int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in)
*/
msg_len -= sizeof(msg.data);
- desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags = cpu_to_le16(flags);
desc.opcode = cpu_to_le16(ice_sbq_opc_neigh_dev_req);
desc.param0.cmd_len = cpu_to_le16(msg_len);
status = ice_sbq_send_cmd(hw, &desc, &msg, msg_len, NULL);
@@ -2290,8 +2311,13 @@ ice_parse_1588_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
info->tmr_index_owned = ((number & ICE_TS_TMR_IDX_OWND_M) != 0);
info->tmr_index_assoc = ((number & ICE_TS_TMR_IDX_ASSOC_M) != 0);
- info->clk_freq = FIELD_GET(ICE_TS_CLK_FREQ_M, number);
- info->clk_src = ((number & ICE_TS_CLK_SRC_M) != 0);
+ if (!ice_is_e825c(hw)) {
+ info->clk_freq = FIELD_GET(ICE_TS_CLK_FREQ_M, number);
+ info->clk_src = ((number & ICE_TS_CLK_SRC_M) != 0);
+ } else {
+ info->clk_freq = ICE_TIME_REF_FREQ_156_250;
+ info->clk_src = ICE_CLK_SRC_TCXO;
+ }
if (info->clk_freq < NUM_ICE_TIME_REF_FREQ) {
info->time_ref = (enum ice_time_ref_freq)info->clk_freq;
@@ -2565,6 +2591,34 @@ ice_parse_sensor_reading_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
}
/**
+ * ice_parse_nac_topo_dev_caps - Parse ICE_AQC_CAPS_NAC_TOPOLOGY cap
+ * @hw: pointer to the HW struct
+ * @dev_p: pointer to device capabilities structure
+ * @cap: capability element to parse
+ *
+ * Parse ICE_AQC_CAPS_NAC_TOPOLOGY for device capabilities.
+ */
+static void ice_parse_nac_topo_dev_caps(struct ice_hw *hw,
+ struct ice_hw_dev_caps *dev_p,
+ struct ice_aqc_list_caps_elem *cap)
+{
+ dev_p->nac_topo.mode = le32_to_cpu(cap->number);
+ dev_p->nac_topo.id = le32_to_cpu(cap->phys_id) & ICE_NAC_TOPO_ID_M;
+
+ dev_info(ice_hw_to_dev(hw),
+ "PF is configured in %s mode with IP instance ID %d\n",
+ (dev_p->nac_topo.mode & ICE_NAC_TOPO_PRIMARY_M) ?
+ "primary" : "secondary", dev_p->nac_topo.id);
+
+ ice_debug(hw, ICE_DBG_INIT, "dev caps: nac topology is_primary = %d\n",
+ !!(dev_p->nac_topo.mode & ICE_NAC_TOPO_PRIMARY_M));
+ ice_debug(hw, ICE_DBG_INIT, "dev caps: nac topology is_dual = %d\n",
+ !!(dev_p->nac_topo.mode & ICE_NAC_TOPO_DUAL_M));
+ ice_debug(hw, ICE_DBG_INIT, "dev caps: nac topology id = %d\n",
+ dev_p->nac_topo.id);
+}
+
+/**
* ice_parse_dev_caps - Parse device capabilities
* @hw: pointer to the HW struct
* @dev_p: pointer to device capabilities structure
@@ -2615,6 +2669,9 @@ ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
case ICE_AQC_CAPS_SENSOR_READING:
ice_parse_sensor_reading_cap(hw, dev_p, &cap_resp[i]);
break;
+ case ICE_AQC_CAPS_NAC_TOPOLOGY:
+ ice_parse_nac_topo_dev_caps(hw, dev_p, &cap_resp[i]);
+ break;
default:
/* Don't list common capabilities as unknown */
if (!found)
@@ -3010,6 +3067,9 @@ ice_aq_set_port_params(struct ice_port_info *pi, bool double_vlan,
cmd_flags |= ICE_AQC_SET_P_PARAMS_DOUBLE_VLAN_ENA;
cmd->cmd_flags = cpu_to_le16(cmd_flags);
+ cmd->local_fwd_mode = pi->local_fwd_mode |
+ ICE_AQC_SET_P_PARAMS_LOCAL_FWD_MODE_VALID;
+
return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
}
@@ -3043,11 +3103,13 @@ bool ice_is_100m_speed_supported(struct ice_hw *hw)
* Note: In the structure of [phy_type_low, phy_type_high], there should
* be one bit set, as this function will convert one PHY type to its
* speed.
- * If no bit gets set, ICE_AQ_LINK_SPEED_UNKNOWN will be returned
- * If more than one bit gets set, ICE_AQ_LINK_SPEED_UNKNOWN will be returned
+ *
+ * Return:
+ * * PHY speed for recognized PHY type
+ * * If no bit gets set, ICE_AQ_LINK_SPEED_UNKNOWN will be returned
+ * * If more than one bit gets set, ICE_AQ_LINK_SPEED_UNKNOWN will be returned
*/
-static u16
-ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high)
+u16 ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high)
{
u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
@@ -3148,6 +3210,16 @@ ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high)
case ICE_PHY_TYPE_HIGH_100G_AUI2:
speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB;
break;
+ case ICE_PHY_TYPE_HIGH_200G_CR4_PAM4:
+ case ICE_PHY_TYPE_HIGH_200G_SR4:
+ case ICE_PHY_TYPE_HIGH_200G_FR4:
+ case ICE_PHY_TYPE_HIGH_200G_LR4:
+ case ICE_PHY_TYPE_HIGH_200G_DR4:
+ case ICE_PHY_TYPE_HIGH_200G_KR4_PAM4:
+ case ICE_PHY_TYPE_HIGH_200G_AUI4_AOC_ACC:
+ case ICE_PHY_TYPE_HIGH_200G_AUI4:
+ speed_phy_type_high = ICE_AQ_LINK_SPEED_200GB;
+ break;
default:
speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
break;
@@ -3299,6 +3371,100 @@ int ice_update_link_info(struct ice_port_info *pi)
}
/**
+ * ice_aq_get_phy_equalization - function to read serdes equaliser
+ * value from firmware using admin queue command.
+ * @hw: pointer to the HW struct
+ * @data_in: represents the serdes equalization parameter requested
+ * @op_code: represents the serdes number and flag to represent tx or rx
+ * @serdes_num: represents the serdes number
+ * @output: pointer to the caller-supplied buffer to return serdes equaliser
+ *
+ * Return: non-zero status on error and 0 on success.
+ */
+int ice_aq_get_phy_equalization(struct ice_hw *hw, u16 data_in, u16 op_code,
+ u8 serdes_num, int *output)
+{
+ struct ice_aqc_dnl_call_command *cmd;
+ struct ice_aqc_dnl_call buf = {};
+ struct ice_aq_desc desc;
+ int err;
+
+ buf.sto.txrx_equa_reqs.data_in = cpu_to_le16(data_in);
+ buf.sto.txrx_equa_reqs.op_code_serdes_sel =
+ cpu_to_le16(op_code | (serdes_num & 0xF));
+ cmd = &desc.params.dnl_call;
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dnl_call);
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_BUF |
+ ICE_AQ_FLAG_RD |
+ ICE_AQ_FLAG_SI);
+ desc.datalen = cpu_to_le16(sizeof(struct ice_aqc_dnl_call));
+ cmd->activity_id = cpu_to_le16(ICE_AQC_ACT_ID_DNL);
+
+ err = ice_aq_send_cmd(hw, &desc, &buf, sizeof(struct ice_aqc_dnl_call),
+ NULL);
+ *output = err ? 0 : buf.sto.txrx_equa_resp.val;
+
+ return err;
+}
+
+#define FEC_REG_PORT(port) { \
+ FEC_CORR_LOW_REG_PORT##port, \
+ FEC_CORR_HIGH_REG_PORT##port, \
+ FEC_UNCORR_LOW_REG_PORT##port, \
+ FEC_UNCORR_HIGH_REG_PORT##port, \
+}
+
+static const u32 fec_reg[][ICE_FEC_MAX] = {
+ FEC_REG_PORT(0),
+ FEC_REG_PORT(1),
+ FEC_REG_PORT(2),
+ FEC_REG_PORT(3)
+};
+
+/**
+ * ice_aq_get_fec_stats - reads fec stats from phy
+ * @hw: pointer to the HW struct
+ * @pcs_quad: represents pcsquad of user input serdes
+ * @pcs_port: represents the pcs port number part of above pcs quad
+ * @fec_type: represents FEC stats type
+ * @output: pointer to the caller-supplied buffer to return requested fec stats
+ *
+ * Return: non-zero status on error and 0 on success.
+ */
+int ice_aq_get_fec_stats(struct ice_hw *hw, u16 pcs_quad, u16 pcs_port,
+ enum ice_fec_stats_types fec_type, u32 *output)
+{
+ u16 flag = (ICE_AQ_FLAG_RD | ICE_AQ_FLAG_BUF | ICE_AQ_FLAG_SI);
+ struct ice_sbq_msg_input msg = {};
+ u32 receiver_id, reg_offset;
+ int err;
+
+ if (pcs_port > 3)
+ return -EINVAL;
+
+ reg_offset = fec_reg[pcs_port][fec_type];
+
+ if (pcs_quad == 0)
+ receiver_id = FEC_RECEIVER_ID_PCS0;
+ else if (pcs_quad == 1)
+ receiver_id = FEC_RECEIVER_ID_PCS1;
+ else
+ return -EINVAL;
+
+ msg.msg_addr_low = lower_16_bits(reg_offset);
+ msg.msg_addr_high = receiver_id;
+ msg.opcode = ice_sbq_msg_rd;
+ msg.dest_dev = rmn_0;
+
+ err = ice_sbq_rw_reg(hw, &msg, flag);
+ if (err)
+ return err;
+
+ *output = msg.data;
+ return 0;
+}
+
+/**
* ice_cache_phy_user_req
* @pi: port information structure
* @cache_data: PHY logging data
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index ffb22c7ce28b..66f29bac783a 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -17,13 +17,34 @@
#define ICE_SQ_SEND_DELAY_TIME_MS 10
#define ICE_SQ_SEND_MAX_EXECUTE 3
+#define FEC_REG_SHIFT 2
+#define FEC_RECV_ID_SHIFT 4
+#define FEC_CORR_LOW_REG_PORT0 (0x02 << FEC_REG_SHIFT)
+#define FEC_CORR_HIGH_REG_PORT0 (0x03 << FEC_REG_SHIFT)
+#define FEC_UNCORR_LOW_REG_PORT0 (0x04 << FEC_REG_SHIFT)
+#define FEC_UNCORR_HIGH_REG_PORT0 (0x05 << FEC_REG_SHIFT)
+#define FEC_CORR_LOW_REG_PORT1 (0x42 << FEC_REG_SHIFT)
+#define FEC_CORR_HIGH_REG_PORT1 (0x43 << FEC_REG_SHIFT)
+#define FEC_UNCORR_LOW_REG_PORT1 (0x44 << FEC_REG_SHIFT)
+#define FEC_UNCORR_HIGH_REG_PORT1 (0x45 << FEC_REG_SHIFT)
+#define FEC_CORR_LOW_REG_PORT2 (0x4A << FEC_REG_SHIFT)
+#define FEC_CORR_HIGH_REG_PORT2 (0x4B << FEC_REG_SHIFT)
+#define FEC_UNCORR_LOW_REG_PORT2 (0x4C << FEC_REG_SHIFT)
+#define FEC_UNCORR_HIGH_REG_PORT2 (0x4D << FEC_REG_SHIFT)
+#define FEC_CORR_LOW_REG_PORT3 (0x52 << FEC_REG_SHIFT)
+#define FEC_CORR_HIGH_REG_PORT3 (0x53 << FEC_REG_SHIFT)
+#define FEC_UNCORR_LOW_REG_PORT3 (0x54 << FEC_REG_SHIFT)
+#define FEC_UNCORR_HIGH_REG_PORT3 (0x55 << FEC_REG_SHIFT)
+#define FEC_RECEIVER_ID_PCS0 (0x33 << FEC_RECV_ID_SHIFT)
+#define FEC_RECEIVER_ID_PCS1 (0x34 << FEC_RECV_ID_SHIFT)
+
int ice_init_hw(struct ice_hw *hw);
void ice_deinit_hw(struct ice_hw *hw);
int ice_check_reset(struct ice_hw *hw);
int ice_reset(struct ice_hw *hw, enum ice_reset_req req);
int ice_create_all_ctrlq(struct ice_hw *hw);
int ice_init_all_ctrlq(struct ice_hw *hw);
-void ice_shutdown_all_ctrlq(struct ice_hw *hw);
+void ice_shutdown_all_ctrlq(struct ice_hw *hw, bool unloading);
void ice_destroy_all_ctrlq(struct ice_hw *hw);
int
ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
@@ -121,6 +142,11 @@ int
ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
struct ice_port_info *pi);
bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps);
+int ice_aq_get_phy_equalization(struct ice_hw *hw, u16 data_in, u16 op_code,
+ u8 serdes_num, int *output);
+int
+ice_aq_get_fec_stats(struct ice_hw *hw, u16 pcs_quad, u16 pcs_port,
+ enum ice_fec_stats_types fec_type, u32 *output);
enum ice_fc_mode ice_caps_to_fc_mode(u8 caps);
enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options);
@@ -201,7 +227,7 @@ int ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle);
void ice_replay_post(struct ice_hw *hw);
struct ice_q_ctx *
ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle);
-int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in);
+int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in, u16 flag);
int
ice_aq_get_cgu_abilities(struct ice_hw *hw,
struct ice_aqc_get_cgu_abilities *abilities);
@@ -249,6 +275,7 @@ void
ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
u64 *prev_stat, u64 *cur_stat);
bool ice_is_e810t(struct ice_hw *hw);
+bool ice_is_e822(struct ice_hw *hw);
bool ice_is_e823(struct ice_hw *hw);
bool ice_is_e825c(struct ice_hw *hw);
int
@@ -261,6 +288,7 @@ int
ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
bool *value, struct ice_sq_cd *cd);
bool ice_is_100m_speed_supported(struct ice_hw *hw);
+u16 ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high);
int
ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
struct ice_sq_cd *cd);
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c
index ffe660f34992..ffaa6511c455 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.c
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.c
@@ -510,16 +510,19 @@ shutdown_sq_out:
*/
static bool ice_aq_ver_check(struct ice_hw *hw)
{
- if (hw->api_maj_ver > EXP_FW_API_VER_MAJOR) {
+ u8 exp_fw_api_ver_major = EXP_FW_API_VER_MAJOR_BY_MAC(hw);
+ u8 exp_fw_api_ver_minor = EXP_FW_API_VER_MINOR_BY_MAC(hw);
+
+ if (hw->api_maj_ver > exp_fw_api_ver_major) {
/* Major API version is newer than expected, don't load */
dev_warn(ice_hw_to_dev(hw),
"The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
return false;
- } else if (hw->api_maj_ver == EXP_FW_API_VER_MAJOR) {
- if (hw->api_min_ver > (EXP_FW_API_VER_MINOR + 2))
+ } else if (hw->api_maj_ver == exp_fw_api_ver_major) {
+ if (hw->api_min_ver > (exp_fw_api_ver_minor + 2))
dev_info(ice_hw_to_dev(hw),
"The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
- else if ((hw->api_min_ver + 2) < EXP_FW_API_VER_MINOR)
+ else if ((hw->api_min_ver + 2) < exp_fw_api_ver_minor)
dev_info(ice_hw_to_dev(hw),
"The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
} else {
@@ -684,10 +687,12 @@ struct ice_ctl_q_info *ice_get_sbq(struct ice_hw *hw)
* ice_shutdown_ctrlq - shutdown routine for any control queue
* @hw: pointer to the hardware structure
* @q_type: specific Control queue type
+ * @unloading: is the driver unloading itself
*
* NOTE: this function does not destroy the control queue locks.
*/
-static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
+static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type,
+ bool unloading)
{
struct ice_ctl_q_info *cq;
@@ -695,7 +700,7 @@ static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
case ICE_CTL_Q_ADMIN:
cq = &hw->adminq;
if (ice_check_sq_alive(hw, cq))
- ice_aq_q_shutdown(hw, true);
+ ice_aq_q_shutdown(hw, unloading);
break;
case ICE_CTL_Q_SB:
cq = &hw->sbq;
@@ -714,20 +719,21 @@ static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
/**
* ice_shutdown_all_ctrlq - shutdown routine for all control queues
* @hw: pointer to the hardware structure
+ * @unloading: is the driver unloading itself
*
* NOTE: this function does not destroy the control queue locks. The driver
* may call this at runtime to shutdown and later restart control queues, such
* as in response to a reset event.
*/
-void ice_shutdown_all_ctrlq(struct ice_hw *hw)
+void ice_shutdown_all_ctrlq(struct ice_hw *hw, bool unloading)
{
/* Shutdown FW admin queue */
- ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN);
+ ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN, unloading);
/* Shutdown PHY Sideband */
if (ice_is_sbq_supported(hw))
- ice_shutdown_ctrlq(hw, ICE_CTL_Q_SB);
+ ice_shutdown_ctrlq(hw, ICE_CTL_Q_SB, unloading);
/* Shutdown PF-VF Mailbox */
- ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX);
+ ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX, unloading);
}
/**
@@ -759,7 +765,7 @@ int ice_init_all_ctrlq(struct ice_hw *hw)
break;
ice_debug(hw, ICE_DBG_AQ_MSG, "Retry Admin Queue init due to FW critical error\n");
- ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN);
+ ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN, true);
msleep(ICE_CTL_Q_ADMIN_INIT_MSEC);
} while (retry++ < ICE_CTL_Q_ADMIN_INIT_TIMEOUT);
@@ -840,7 +846,7 @@ static void ice_destroy_ctrlq_locks(struct ice_ctl_q_info *cq)
void ice_destroy_all_ctrlq(struct ice_hw *hw)
{
/* shut down all the control queues first */
- ice_shutdown_all_ctrlq(hw);
+ ice_shutdown_all_ctrlq(hw, true);
ice_destroy_ctrlq_locks(&hw->adminq);
if (ice_is_sbq_supported(hw))
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.h b/drivers/net/ethernet/intel/ice/ice_controlq.h
index 8f2fd1613a95..1d54b1cdb1c5 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.h
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.h
@@ -21,9 +21,18 @@
/* Defines that help manage the driver vs FW API checks.
* Take a look at ice_aq_ver_check in ice_controlq.c for actual usage.
*/
-#define EXP_FW_API_VER_BRANCH 0x00
-#define EXP_FW_API_VER_MAJOR 0x01
-#define EXP_FW_API_VER_MINOR 0x05
+#define EXP_FW_API_VER_MAJOR_E810 0x01
+#define EXP_FW_API_VER_MINOR_E810 0x05
+
+#define EXP_FW_API_VER_MAJOR_E830 0x01
+#define EXP_FW_API_VER_MINOR_E830 0x07
+
+#define EXP_FW_API_VER_MAJOR_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? \
+ EXP_FW_API_VER_MAJOR_E830 : \
+ EXP_FW_API_VER_MAJOR_E810)
+#define EXP_FW_API_VER_MINOR_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? \
+ EXP_FW_API_VER_MINOR_E830 : \
+ EXP_FW_API_VER_MINOR_E810)
/* Different control queue types: These are mainly for SW consumption. */
enum ice_ctl_q {
diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.c b/drivers/net/ethernet/intel/ice/ice_ddp.c
index ce5034ed2b24..f182179529b7 100644
--- a/drivers/net/ethernet/intel/ice/ice_ddp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ddp.c
@@ -1339,6 +1339,7 @@ ice_dwnld_cfg_bufs_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 start,
for (i = 0; i < count; i++) {
bool last = false;
+ int try_cnt = 0;
int status;
bh = (struct ice_buf_hdr *)(bufs + start + i);
@@ -1346,8 +1347,26 @@ ice_dwnld_cfg_bufs_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 start,
if (indicate_last)
last = ice_is_last_download_buffer(bh, i, count);
- status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE, last,
- &offset, &info, NULL);
+ while (1) {
+ status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE,
+ last, &offset, &info,
+ NULL);
+ if (hw->adminq.sq_last_status != ICE_AQ_RC_ENOSEC &&
+ hw->adminq.sq_last_status != ICE_AQ_RC_EBADSIG)
+ break;
+
+ try_cnt++;
+
+ if (try_cnt == 5)
+ break;
+
+ msleep(20);
+ }
+
+ if (try_cnt)
+ dev_dbg(ice_hw_to_dev(hw),
+ "ice_aq_download_pkg number of retries: %d\n",
+ try_cnt);
/* Save AQ status from download package */
if (status) {
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c
index b102db8b829a..3cfa071e3718 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch.c
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c
@@ -117,17 +117,10 @@ static int ice_eswitch_setup_repr(struct ice_pf *pf, struct ice_repr *repr)
struct ice_vsi *vsi = repr->src_vsi;
struct metadata_dst *dst;
- ice_remove_vsi_fltr(&pf->hw, vsi->idx);
repr->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX,
GFP_KERNEL);
if (!repr->dst)
- goto err_add_mac_fltr;
-
- if (ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof))
- goto err_dst_free;
-
- if (ice_vsi_add_vlan_zero(vsi))
- goto err_update_security;
+ return -ENOMEM;
netif_keep_dst(uplink_vsi->netdev);
@@ -136,16 +129,48 @@ static int ice_eswitch_setup_repr(struct ice_pf *pf, struct ice_repr *repr)
dst->u.port_info.lower_dev = uplink_vsi->netdev;
return 0;
+}
-err_update_security:
+/**
+ * ice_eswitch_cfg_vsi - configure VSI to work in slow-path
+ * @vsi: VSI structure of representee
+ * @mac: representee MAC
+ *
+ * Return: 0 on success, non-zero on error.
+ */
+int ice_eswitch_cfg_vsi(struct ice_vsi *vsi, const u8 *mac)
+{
+ int err;
+
+ ice_remove_vsi_fltr(&vsi->back->hw, vsi->idx);
+
+ err = ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof);
+ if (err)
+ goto err_update_security;
+
+ err = ice_vsi_add_vlan_zero(vsi);
+ if (err)
+ goto err_vlan_zero;
+
+ return 0;
+
+err_vlan_zero:
ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
-err_dst_free:
- metadata_dst_free(repr->dst);
- repr->dst = NULL;
-err_add_mac_fltr:
- ice_fltr_add_mac_and_broadcast(vsi, repr->parent_mac, ICE_FWD_TO_VSI);
+err_update_security:
+ ice_fltr_add_mac_and_broadcast(vsi, mac, ICE_FWD_TO_VSI);
- return -ENODEV;
+ return err;
+}
+
+/**
+ * ice_eswitch_decfg_vsi - unroll changes done to VSI for switchdev
+ * @vsi: VSI structure of representee
+ * @mac: representee MAC
+ */
+void ice_eswitch_decfg_vsi(struct ice_vsi *vsi, const u8 *mac)
+{
+ ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
+ ice_fltr_add_mac_and_broadcast(vsi, mac, ICE_FWD_TO_VSI);
}
/**
@@ -153,16 +178,16 @@ err_add_mac_fltr:
* @repr_id: representor ID
* @vsi: VSI for which port representor is configured
*/
-void ice_eswitch_update_repr(unsigned long repr_id, struct ice_vsi *vsi)
+void ice_eswitch_update_repr(unsigned long *repr_id, struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
struct ice_repr *repr;
- int ret;
+ int err;
if (!ice_is_switchdev_running(pf))
return;
- repr = xa_load(&pf->eswitch.reprs, repr_id);
+ repr = xa_load(&pf->eswitch.reprs, *repr_id);
if (!repr)
return;
@@ -172,12 +197,19 @@ void ice_eswitch_update_repr(unsigned long repr_id, struct ice_vsi *vsi)
if (repr->br_port)
repr->br_port->vsi = vsi;
- ret = ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof);
- if (ret) {
- ice_fltr_add_mac_and_broadcast(vsi, repr->parent_mac,
- ICE_FWD_TO_VSI);
+ err = ice_eswitch_cfg_vsi(vsi, repr->parent_mac);
+ if (err)
dev_err(ice_pf_to_dev(pf), "Failed to update VSI of port representor %d",
repr->id);
+
+ /* The VSI number is different, reload the PR with new id */
+ if (repr->id != vsi->vsi_num) {
+ xa_erase(&pf->eswitch.reprs, repr->id);
+ repr->id = vsi->vsi_num;
+ if (xa_insert(&pf->eswitch.reprs, repr->id, repr, GFP_KERNEL))
+ dev_err(ice_pf_to_dev(pf), "Failed to reload port representor %d",
+ repr->id);
+ *repr_id = repr->id;
}
}
@@ -423,6 +455,7 @@ static void ice_eswitch_start_reprs(struct ice_pf *pf)
int
ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf)
{
+ struct devlink *devlink = priv_to_devlink(pf);
struct ice_repr *repr;
int err;
@@ -437,7 +470,9 @@ ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf)
ice_eswitch_stop_reprs(pf);
+ devl_lock(devlink);
repr = ice_repr_add_vf(vf);
+ devl_unlock(devlink);
if (IS_ERR(repr)) {
err = PTR_ERR(repr);
goto err_create_repr;
@@ -460,7 +495,9 @@ ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf)
err_xa_alloc:
ice_eswitch_release_repr(pf, repr);
err_setup_repr:
+ devl_lock(devlink);
ice_repr_rem_vf(repr);
+ devl_unlock(devlink);
err_create_repr:
if (xa_empty(&pf->eswitch.reprs))
ice_eswitch_disable_switchdev(pf);
@@ -484,6 +521,7 @@ void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf)
ice_eswitch_disable_switchdev(pf);
ice_eswitch_release_repr(pf, repr);
+ devl_lock(devlink);
ice_repr_rem_vf(repr);
if (xa_empty(&pf->eswitch.reprs)) {
@@ -491,28 +529,11 @@ void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf)
* no point in keeping the nodes
*/
ice_devlink_rate_clear_tx_topology(ice_get_main_vsi(pf));
- devl_lock(devlink);
devl_rate_nodes_destroy(devlink);
- devl_unlock(devlink);
} else {
ice_eswitch_start_reprs(pf);
}
-}
-
-/**
- * ice_eswitch_rebuild - rebuild eswitch
- * @pf: pointer to PF structure
- */
-void ice_eswitch_rebuild(struct ice_pf *pf)
-{
- struct ice_repr *repr;
- unsigned long id;
-
- if (!ice_is_switchdev_running(pf))
- return;
-
- xa_for_each(&pf->eswitch.reprs, id, repr)
- ice_eswitch_detach(pf, repr->vf);
+ devl_unlock(devlink);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.h b/drivers/net/ethernet/intel/ice/ice_eswitch.h
index e2e5c0c75e7d..78fd39a6935d 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch.h
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch.h
@@ -10,7 +10,6 @@
void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf);
int
ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf);
-void ice_eswitch_rebuild(struct ice_pf *pf);
int ice_eswitch_mode_get(struct devlink *devlink, u16 *mode);
int
@@ -18,7 +17,7 @@ ice_eswitch_mode_set(struct devlink *devlink, u16 mode,
struct netlink_ext_ack *extack);
bool ice_is_eswitch_mode_switchdev(struct ice_pf *pf);
-void ice_eswitch_update_repr(unsigned long repr_id, struct ice_vsi *vsi);
+void ice_eswitch_update_repr(unsigned long *repr_id, struct ice_vsi *vsi);
void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf);
@@ -28,6 +27,9 @@ netdev_tx_t
ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev);
struct net_device *ice_eswitch_get_target(struct ice_rx_ring *rx_ring,
union ice_32b_rx_flex_desc *rx_desc);
+
+int ice_eswitch_cfg_vsi(struct ice_vsi *vsi, const u8 *mac);
+void ice_eswitch_decfg_vsi(struct ice_vsi *vsi, const u8 *mac);
#else /* CONFIG_ICE_SWITCHDEV */
static inline void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf) { }
@@ -44,18 +46,13 @@ ice_eswitch_set_target_vsi(struct sk_buff *skb,
struct ice_tx_offload_params *off) { }
static inline void
-ice_eswitch_update_repr(unsigned long repr_id, struct ice_vsi *vsi) { }
+ice_eswitch_update_repr(unsigned long *repr_id, struct ice_vsi *vsi) { }
static inline int ice_eswitch_configure(struct ice_pf *pf)
{
return 0;
}
-static inline int ice_eswitch_rebuild(struct ice_pf *pf)
-{
- return -EOPNOTSUPP;
-}
-
static inline int ice_eswitch_mode_get(struct devlink *devlink, u16 *mode)
{
return DEVLINK_ESWITCH_MODE_LEGACY;
@@ -85,5 +82,12 @@ ice_eswitch_get_target(struct ice_rx_ring *rx_ring,
{
return rx_ring->netdev;
}
+
+static inline int ice_eswitch_cfg_vsi(struct ice_vsi *vsi, const u8 *mac)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void ice_eswitch_decfg_vsi(struct ice_vsi *vsi, const u8 *mac) { }
#endif /* CONFIG_ICE_SWITCHDEV */
#endif /* _ICE_ESWITCH_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch_br.c b/drivers/net/ethernet/intel/ice/ice_eswitch_br.c
index ac5beecd028b..f5aceb32bf4d 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch_br.c
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch_br.c
@@ -896,7 +896,8 @@ ice_eswitch_br_port_deinit(struct ice_esw_br *bridge,
if (br_port->type == ICE_ESWITCH_BR_UPLINK_PORT && vsi->back) {
vsi->back->br_port = NULL;
} else {
- struct ice_repr *repr = ice_repr_get_by_vsi(vsi);
+ struct ice_repr *repr =
+ ice_repr_get(vsi->back, br_port->repr_id);
if (repr)
repr->br_port = NULL;
@@ -937,6 +938,7 @@ ice_eswitch_br_vf_repr_port_init(struct ice_esw_br *bridge,
br_port->vsi = repr->src_vsi;
br_port->vsi_idx = br_port->vsi->idx;
br_port->type = ICE_ESWITCH_BR_VF_REPR_PORT;
+ br_port->repr_id = repr->id;
repr->br_port = br_port;
err = xa_insert(&bridge->ports, br_port->vsi_idx, br_port, GFP_KERNEL);
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch_br.h b/drivers/net/ethernet/intel/ice/ice_eswitch_br.h
index 85a8fadb2928..c15c7344d7f8 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch_br.h
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch_br.h
@@ -46,6 +46,7 @@ struct ice_esw_br_port {
enum ice_esw_br_port_type type;
u16 vsi_idx;
u16 pvid;
+ u32 repr_id;
struct xarray vlans;
};
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 62c8205fceba..8c990c976132 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -463,7 +463,354 @@ ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
static int ice_get_regs_len(struct net_device __always_unused *netdev)
{
- return sizeof(ice_regs_dump_list);
+ return (sizeof(ice_regs_dump_list) +
+ sizeof(struct ice_regdump_to_ethtool));
+}
+
+/**
+ * ice_ethtool_get_maxspeed - Get the max speed for given lport
+ * @hw: pointer to the HW struct
+ * @lport: logical port for which max speed is requested
+ * @max_speed: return max speed for input lport
+ *
+ * Return: 0 on success, negative on failure.
+ */
+static int ice_ethtool_get_maxspeed(struct ice_hw *hw, u8 lport, u8 *max_speed)
+{
+ struct ice_aqc_get_port_options_elem options[ICE_AQC_PORT_OPT_MAX] = {};
+ bool active_valid = false, pending_valid = true;
+ u8 option_count = ICE_AQC_PORT_OPT_MAX;
+ u8 active_idx = 0, pending_idx = 0;
+ int status;
+
+ status = ice_aq_get_port_options(hw, options, &option_count, lport,
+ true, &active_idx, &active_valid,
+ &pending_idx, &pending_valid);
+ if (status)
+ return -EIO;
+ if (!active_valid)
+ return -EINVAL;
+
+ *max_speed = options[active_idx].max_lane_speed & ICE_AQC_PORT_OPT_MAX_LANE_M;
+ return 0;
+}
+
+/**
+ * ice_is_serdes_muxed - returns whether serdes is muxed in hardware
+ * @hw: pointer to the HW struct
+ *
+ * Return: true when serdes is muxed, false when serdes is not muxed.
+ */
+static bool ice_is_serdes_muxed(struct ice_hw *hw)
+{
+ u32 reg_value = rd32(hw, GLGEN_SWITCH_MODE_CONFIG);
+
+ return FIELD_GET(GLGEN_SWITCH_MODE_CONFIG_25X4_QUAD_M, reg_value);
+}
+
+static int ice_map_port_topology_for_sfp(struct ice_port_topology *port_topology,
+ u8 lport, bool is_muxed)
+{
+ switch (lport) {
+ case 0:
+ port_topology->pcs_quad_select = 0;
+ port_topology->pcs_port = 0;
+ port_topology->primary_serdes_lane = 0;
+ break;
+ case 1:
+ port_topology->pcs_quad_select = 1;
+ port_topology->pcs_port = 0;
+ if (is_muxed)
+ port_topology->primary_serdes_lane = 2;
+ else
+ port_topology->primary_serdes_lane = 4;
+ break;
+ case 2:
+ port_topology->pcs_quad_select = 0;
+ port_topology->pcs_port = 1;
+ port_topology->primary_serdes_lane = 1;
+ break;
+ case 3:
+ port_topology->pcs_quad_select = 1;
+ port_topology->pcs_port = 1;
+ if (is_muxed)
+ port_topology->primary_serdes_lane = 3;
+ else
+ port_topology->primary_serdes_lane = 5;
+ break;
+ case 4:
+ port_topology->pcs_quad_select = 0;
+ port_topology->pcs_port = 2;
+ port_topology->primary_serdes_lane = 2;
+ break;
+ case 5:
+ port_topology->pcs_quad_select = 1;
+ port_topology->pcs_port = 2;
+ port_topology->primary_serdes_lane = 6;
+ break;
+ case 6:
+ port_topology->pcs_quad_select = 0;
+ port_topology->pcs_port = 3;
+ port_topology->primary_serdes_lane = 3;
+ break;
+ case 7:
+ port_topology->pcs_quad_select = 1;
+ port_topology->pcs_port = 3;
+ port_topology->primary_serdes_lane = 7;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ice_map_port_topology_for_qsfp(struct ice_port_topology *port_topology,
+ u8 lport, bool is_muxed)
+{
+ switch (lport) {
+ case 0:
+ port_topology->pcs_quad_select = 0;
+ port_topology->pcs_port = 0;
+ port_topology->primary_serdes_lane = 0;
+ break;
+ case 1:
+ port_topology->pcs_quad_select = 1;
+ port_topology->pcs_port = 0;
+ if (is_muxed)
+ port_topology->primary_serdes_lane = 2;
+ else
+ port_topology->primary_serdes_lane = 4;
+ break;
+ case 2:
+ port_topology->pcs_quad_select = 0;
+ port_topology->pcs_port = 1;
+ port_topology->primary_serdes_lane = 1;
+ break;
+ case 3:
+ port_topology->pcs_quad_select = 1;
+ port_topology->pcs_port = 1;
+ if (is_muxed)
+ port_topology->primary_serdes_lane = 3;
+ else
+ port_topology->primary_serdes_lane = 5;
+ break;
+ case 4:
+ port_topology->pcs_quad_select = 0;
+ port_topology->pcs_port = 2;
+ port_topology->primary_serdes_lane = 2;
+ break;
+ case 5:
+ port_topology->pcs_quad_select = 1;
+ port_topology->pcs_port = 2;
+ port_topology->primary_serdes_lane = 6;
+ break;
+ case 6:
+ port_topology->pcs_quad_select = 0;
+ port_topology->pcs_port = 3;
+ port_topology->primary_serdes_lane = 3;
+ break;
+ case 7:
+ port_topology->pcs_quad_select = 1;
+ port_topology->pcs_port = 3;
+ port_topology->primary_serdes_lane = 7;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_get_port_topology - returns physical topology like pcsquad, pcsport,
+ * serdes number
+ * @hw: pointer to the HW struct
+ * @lport: logical port for which physical info requested
+ * @port_topology: buffer to hold port topology
+ *
+ * Return: 0 on success, negative on failure.
+ */
+static int ice_get_port_topology(struct ice_hw *hw, u8 lport,
+ struct ice_port_topology *port_topology)
+{
+ struct ice_aqc_get_link_topo cmd = {};
+ u16 node_handle = 0;
+ u8 cage_type = 0;
+ bool is_muxed;
+ int err;
+ u8 ctx;
+
+ ctx = ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE << ICE_AQC_LINK_TOPO_NODE_TYPE_S;
+ ctx |= ICE_AQC_LINK_TOPO_NODE_CTX_PORT << ICE_AQC_LINK_TOPO_NODE_CTX_S;
+ cmd.addr.topo_params.node_type_ctx = ctx;
+
+ err = ice_aq_get_netlist_node(hw, &cmd, &cage_type, &node_handle);
+ if (err)
+ return -EINVAL;
+
+ is_muxed = ice_is_serdes_muxed(hw);
+
+ if (cage_type == 0x11 || /* SFP+ */
+ cage_type == 0x12) { /* SFP28 */
+ port_topology->serdes_lane_count = 1;
+ err = ice_map_port_topology_for_sfp(port_topology, lport, is_muxed);
+ if (err)
+ return err;
+ } else if (cage_type == 0x13 || /* QSFP */
+ cage_type == 0x14) { /* QSFP28 */
+ u8 max_speed = 0;
+
+ err = ice_ethtool_get_maxspeed(hw, lport, &max_speed);
+ if (err)
+ return err;
+
+ if (max_speed == ICE_AQC_PORT_OPT_MAX_LANE_100G)
+ port_topology->serdes_lane_count = 4;
+ else if (max_speed == ICE_AQC_PORT_OPT_MAX_LANE_50G)
+ port_topology->serdes_lane_count = 2;
+ else
+ port_topology->serdes_lane_count = 1;
+
+ err = ice_map_port_topology_for_qsfp(port_topology, lport, is_muxed);
+ if (err)
+ return err;
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_get_tx_rx_equa - read serdes tx rx equaliser param
+ * @hw: pointer to the HW struct
+ * @serdes_num: represents the serdes number
+ * @ptr: structure to read all serdes parameter for given serdes
+ *
+ * Return: all serdes equalization parameter supported per serdes number
+ */
+static int ice_get_tx_rx_equa(struct ice_hw *hw, u8 serdes_num,
+ struct ice_serdes_equalization_to_ethtool *ptr)
+{
+ int err;
+
+ err = ice_aq_get_phy_equalization(hw, ICE_AQC_TX_EQU_PRE1,
+ ICE_AQC_OP_CODE_TX_EQU, serdes_num,
+ &ptr->tx_equalization_pre1);
+ if (err)
+ return err;
+
+ err = ice_aq_get_phy_equalization(hw, ICE_AQC_TX_EQU_PRE3,
+ ICE_AQC_OP_CODE_TX_EQU, serdes_num,
+ &ptr->tx_equalization_pre3);
+ if (err)
+ return err;
+
+ err = ice_aq_get_phy_equalization(hw, ICE_AQC_TX_EQU_ATTEN,
+ ICE_AQC_OP_CODE_TX_EQU, serdes_num,
+ &ptr->tx_equalization_atten);
+ if (err)
+ return err;
+
+ err = ice_aq_get_phy_equalization(hw, ICE_AQC_TX_EQU_POST1,
+ ICE_AQC_OP_CODE_TX_EQU, serdes_num,
+ &ptr->tx_equalization_post1);
+ if (err)
+ return err;
+
+ err = ice_aq_get_phy_equalization(hw, ICE_AQC_TX_EQU_PRE2,
+ ICE_AQC_OP_CODE_TX_EQU, serdes_num,
+ &ptr->tx_equalization_pre2);
+ if (err)
+ return err;
+
+ err = ice_aq_get_phy_equalization(hw, ICE_AQC_RX_EQU_PRE2,
+ ICE_AQC_OP_CODE_RX_EQU, serdes_num,
+ &ptr->rx_equalization_pre2);
+ if (err)
+ return err;
+
+ err = ice_aq_get_phy_equalization(hw, ICE_AQC_RX_EQU_PRE1,
+ ICE_AQC_OP_CODE_RX_EQU, serdes_num,
+ &ptr->rx_equalization_pre1);
+ if (err)
+ return err;
+
+ err = ice_aq_get_phy_equalization(hw, ICE_AQC_RX_EQU_POST1,
+ ICE_AQC_OP_CODE_RX_EQU, serdes_num,
+ &ptr->rx_equalization_post1);
+ if (err)
+ return err;
+
+ err = ice_aq_get_phy_equalization(hw, ICE_AQC_RX_EQU_BFLF,
+ ICE_AQC_OP_CODE_RX_EQU, serdes_num,
+ &ptr->rx_equalization_bflf);
+ if (err)
+ return err;
+
+ err = ice_aq_get_phy_equalization(hw, ICE_AQC_RX_EQU_BFHF,
+ ICE_AQC_OP_CODE_RX_EQU, serdes_num,
+ &ptr->rx_equalization_bfhf);
+ if (err)
+ return err;
+
+ err = ice_aq_get_phy_equalization(hw, ICE_AQC_RX_EQU_DRATE,
+ ICE_AQC_OP_CODE_RX_EQU, serdes_num,
+ &ptr->rx_equalization_drate);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+/**
+ * ice_get_extended_regs - returns FEC correctable, uncorrectable stats per
+ * pcsquad, pcsport
+ * @netdev: pointer to net device structure
+ * @p: output buffer to fill requested register dump
+ *
+ * Return: 0 on success, negative on failure.
+ */
+static int ice_get_extended_regs(struct net_device *netdev, void *p)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_regdump_to_ethtool *ice_prv_regs_buf;
+ struct ice_port_topology port_topology = {};
+ struct ice_port_info *pi;
+ struct ice_pf *pf;
+ struct ice_hw *hw;
+ unsigned int i;
+ int err;
+
+ pf = np->vsi->back;
+ hw = &pf->hw;
+ pi = np->vsi->port_info;
+
+ /* Serdes parameters are not supported if not the PF VSI */
+ if (np->vsi->type != ICE_VSI_PF || !pi)
+ return -EINVAL;
+
+ err = ice_get_port_topology(hw, pi->lport, &port_topology);
+ if (err)
+ return -EINVAL;
+ if (port_topology.serdes_lane_count > 4)
+ return -EINVAL;
+
+ ice_prv_regs_buf = p;
+
+ /* Get serdes equalization parameter for available serdes */
+ for (i = 0; i < port_topology.serdes_lane_count; i++) {
+ u8 serdes_num = 0;
+
+ serdes_num = port_topology.primary_serdes_lane + i;
+ err = ice_get_tx_rx_equa(hw, serdes_num,
+ &ice_prv_regs_buf->equalization[i]);
+ if (err)
+ return -EINVAL;
+ }
+
+ return 0;
}
static void
@@ -475,10 +822,12 @@ ice_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
u32 *regs_buf = (u32 *)p;
unsigned int i;
- regs->version = 1;
+ regs->version = 2;
for (i = 0; i < ARRAY_SIZE(ice_regs_dump_list); ++i)
regs_buf[i] = rd32(hw, ice_regs_dump_list[i]);
+
+ ice_get_extended_regs(netdev, (void *)&regs_buf[i]);
}
static u32 ice_get_msglevel(struct net_device *netdev)
@@ -3434,7 +3783,7 @@ ice_set_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh,
}
static int
-ice_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
+ice_get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info *info)
{
struct ice_pf *pf = ice_netdev_to_pf(dev);
@@ -4282,6 +4631,94 @@ ice_get_module_eeprom(struct net_device *netdev,
return 0;
}
+/**
+ * ice_get_port_fec_stats - returns FEC correctable, uncorrectable stats per
+ * pcsquad, pcsport
+ * @hw: pointer to the HW struct
+ * @pcs_quad: pcsquad for input port
+ * @pcs_port: pcsport for input port
+ * @fec_stats: buffer to hold FEC statistics for given port
+ *
+ * Return: 0 on success, negative on failure.
+ */
+static int ice_get_port_fec_stats(struct ice_hw *hw, u16 pcs_quad, u16 pcs_port,
+ struct ethtool_fec_stats *fec_stats)
+{
+ u32 fec_uncorr_low_val = 0, fec_uncorr_high_val = 0;
+ u32 fec_corr_low_val = 0, fec_corr_high_val = 0;
+ int err;
+
+ if (pcs_quad > 1 || pcs_port > 3)
+ return -EINVAL;
+
+ err = ice_aq_get_fec_stats(hw, pcs_quad, pcs_port, ICE_FEC_CORR_LOW,
+ &fec_corr_low_val);
+ if (err)
+ return err;
+
+ err = ice_aq_get_fec_stats(hw, pcs_quad, pcs_port, ICE_FEC_CORR_HIGH,
+ &fec_corr_high_val);
+ if (err)
+ return err;
+
+ err = ice_aq_get_fec_stats(hw, pcs_quad, pcs_port,
+ ICE_FEC_UNCORR_LOW,
+ &fec_uncorr_low_val);
+ if (err)
+ return err;
+
+ err = ice_aq_get_fec_stats(hw, pcs_quad, pcs_port,
+ ICE_FEC_UNCORR_HIGH,
+ &fec_uncorr_high_val);
+ if (err)
+ return err;
+
+ fec_stats->uncorrectable_blocks.total = (fec_corr_high_val << 16) +
+ fec_corr_low_val;
+ fec_stats->corrected_blocks.total = (fec_uncorr_high_val << 16) +
+ fec_uncorr_low_val;
+ return 0;
+}
+
+/**
+ * ice_get_fec_stats - returns FEC correctable, uncorrectable stats per netdev
+ * @netdev: network interface device structure
+ * @fec_stats: buffer to hold FEC statistics for given port
+ *
+ */
+static void ice_get_fec_stats(struct net_device *netdev,
+ struct ethtool_fec_stats *fec_stats)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_port_topology port_topology;
+ struct ice_port_info *pi;
+ struct ice_pf *pf;
+ struct ice_hw *hw;
+ int err;
+
+ pf = np->vsi->back;
+ hw = &pf->hw;
+ pi = np->vsi->port_info;
+
+ /* Serdes parameters are not supported if not the PF VSI */
+ if (np->vsi->type != ICE_VSI_PF || !pi)
+ return;
+
+ err = ice_get_port_topology(hw, pi->lport, &port_topology);
+ if (err) {
+ netdev_info(netdev, "Extended register dump failed Lport %d\n",
+ pi->lport);
+ return;
+ }
+
+ /* Get FEC correctable, uncorrectable counter */
+ err = ice_get_port_fec_stats(hw, port_topology.pcs_quad_select,
+ port_topology.pcs_port, fec_stats);
+ if (err)
+ netdev_info(netdev, "FEC stats get failed Lport %d Err %d\n",
+ pi->lport, err);
+}
+
static const struct ethtool_ops ice_ethtool_ops = {
.cap_rss_ctx_supported = true,
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
@@ -4290,6 +4727,7 @@ static const struct ethtool_ops ice_ethtool_ops = {
.cap_rss_sym_xor_supported = true,
.get_link_ksettings = ice_get_link_ksettings,
.set_link_ksettings = ice_set_link_ksettings,
+ .get_fec_stats = ice_get_fec_stats,
.get_drvinfo = ice_get_drvinfo,
.get_regs_len = ice_get_regs_len,
.get_regs = ice_get_regs,
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.h b/drivers/net/ethernet/intel/ice/ice_ethtool.h
index b88e3da06f13..9acccae38625 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.h
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.h
@@ -9,6 +9,35 @@ struct ice_phy_type_to_ethtool {
u8 link_mode;
};
+struct ice_serdes_equalization_to_ethtool {
+ int rx_equalization_pre2;
+ int rx_equalization_pre1;
+ int rx_equalization_post1;
+ int rx_equalization_bflf;
+ int rx_equalization_bfhf;
+ int rx_equalization_drate;
+ int tx_equalization_pre1;
+ int tx_equalization_pre3;
+ int tx_equalization_atten;
+ int tx_equalization_post1;
+ int tx_equalization_pre2;
+};
+
+struct ice_regdump_to_ethtool {
+ /* A multilane port can have max 4 serdes */
+ struct ice_serdes_equalization_to_ethtool equalization[4];
+};
+
+/* Port topology from lport i.e.
+ * serdes mapping, pcsquad, macport, cage etc...
+ */
+struct ice_port_topology {
+ u16 pcs_port;
+ u16 primary_serdes_lane;
+ u16 serdes_lane_count;
+ u16 pcs_quad_select;
+};
+
/* Macro to make PHY type to Ethtool link mode table entry.
* The index is the PHY type.
*/
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index cfac1d432c15..91cbae1eec89 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -157,6 +157,8 @@
#define GLGEN_RTRIG_CORER_M BIT(0)
#define GLGEN_RTRIG_GLOBR_M BIT(1)
#define GLGEN_STAT 0x000B612C
+#define GLGEN_SWITCH_MODE_CONFIG 0x000B81E0
+#define GLGEN_SWITCH_MODE_CONFIG_25X4_QUAD_M BIT(2)
#define GLGEN_VFLRSTAT(_i) (0x00093A04 + ((_i) * 4))
#define PFGEN_CTRL 0x00091000
#define PFGEN_CTRL_PFSWR_M BIT(0)
@@ -177,6 +179,8 @@
#define GLINT_CTL_ITR_GRAN_50_M ICE_M(0xF, 24)
#define GLINT_CTL_ITR_GRAN_25_S 28
#define GLINT_CTL_ITR_GRAN_25_M ICE_M(0xF, 28)
+#define GLGEN_MAC_LINK_TOPO 0x000B81DC
+#define GLGEN_MAC_LINK_TOPO_LINK_TOPO_M GENMASK(1, 0)
#define GLINT_DYN_CTL(_INT) (0x00160000 + ((_INT) * 4))
#define GLINT_DYN_CTL_INTENA_M BIT(0)
#define GLINT_DYN_CTL_CLEARPBA_M BIT(1)
diff --git a/drivers/net/ethernet/intel/ice/ice_hwmon.c b/drivers/net/ethernet/intel/ice/ice_hwmon.c
index e4c2c1bff6c0..b7aa6812510a 100644
--- a/drivers/net/ethernet/intel/ice/ice_hwmon.c
+++ b/drivers/net/ethernet/intel/ice/ice_hwmon.c
@@ -96,7 +96,7 @@ static bool ice_is_internal_reading_supported(struct ice_pf *pf)
unsigned long sensors = pf->hw.dev_caps.supported_sensors;
- return _test_bit(ICE_SENSOR_SUPPORT_E810_INT_TEMP_BIT, &sensors);
+ return test_bit(ICE_SENSOR_SUPPORT_E810_INT_TEMP_BIT, &sensors);
};
void ice_hwmon_init(struct ice_pf *pf)
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 5371e91f6bbb..f559e60992fa 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -114,14 +114,8 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi)
if (!vsi->q_vectors)
goto err_vectors;
- vsi->af_xdp_zc_qps = bitmap_zalloc(max_t(int, vsi->alloc_txq, vsi->alloc_rxq), GFP_KERNEL);
- if (!vsi->af_xdp_zc_qps)
- goto err_zc_qps;
-
return 0;
-err_zc_qps:
- devm_kfree(dev, vsi->q_vectors);
err_vectors:
devm_kfree(dev, vsi->rxq_map);
err_rxq_map:
@@ -309,8 +303,6 @@ static void ice_vsi_free_arrays(struct ice_vsi *vsi)
dev = ice_pf_to_dev(pf);
- bitmap_free(vsi->af_xdp_zc_qps);
- vsi->af_xdp_zc_qps = NULL;
/* free the ring and vector containers */
devm_kfree(dev, vsi->q_vectors);
vsi->q_vectors = NULL;
@@ -2282,22 +2274,23 @@ static int ice_vsi_cfg_def(struct ice_vsi *vsi)
if (ret)
goto unroll_vector_base;
- ice_vsi_map_rings_to_vectors(vsi);
-
- /* Associate q_vector rings to napi */
- ice_vsi_set_napi_queues(vsi);
-
- vsi->stat_offsets_loaded = false;
-
if (ice_is_xdp_ena_vsi(vsi)) {
ret = ice_vsi_determine_xdp_res(vsi);
if (ret)
goto unroll_vector_base;
- ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog);
+ ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog,
+ ICE_XDP_CFG_PART);
if (ret)
goto unroll_vector_base;
}
+ ice_vsi_map_rings_to_vectors(vsi);
+
+ /* Associate q_vector rings to napi */
+ ice_vsi_set_napi_queues(vsi);
+
+ vsi->stat_offsets_loaded = false;
+
/* ICE_VSI_CTRL does not need RSS so skip RSS processing */
if (vsi->type != ICE_VSI_CTRL)
/* Do not exit if configuring RSS had an issue, at
@@ -2437,7 +2430,7 @@ void ice_vsi_decfg(struct ice_vsi *vsi)
/* return value check can be skipped here, it always returns
* 0 if reset is in progress
*/
- ice_destroy_xdp_rings(vsi);
+ ice_destroy_xdp_rings(vsi, ICE_XDP_CFG_PART);
ice_vsi_clear_rings(vsi);
ice_vsi_free_q_vectors(vsi);
@@ -2587,8 +2580,8 @@ void ice_vsi_free_irq(struct ice_vsi *vsi)
if (!IS_ENABLED(CONFIG_RFS_ACCEL))
irq_set_affinity_notifier(irq_num, NULL);
- /* clear the affinity_mask in the IRQ descriptor */
- irq_set_affinity_hint(irq_num, NULL);
+ /* clear the affinity_hint in the IRQ descriptor */
+ irq_update_affinity_hint(irq_num, NULL);
synchronize_irq(irq_num);
devm_free_irq(ice_pf_to_dev(pf), irq_num, vsi->q_vectors[i]);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index f60c022f7960..ec636be4d17d 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -35,7 +35,6 @@ static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation.";
#define ICE_DDP_PKG_PATH "intel/ice/ddp/"
#define ICE_DDP_PKG_FILE ICE_DDP_PKG_PATH "ice.pkg"
-MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
MODULE_DESCRIPTION(DRV_SUMMARY);
MODULE_IMPORT_NS(LIBIE);
MODULE_LICENSE("GPL v2");
@@ -623,7 +622,7 @@ skip:
if (hw->port_info)
ice_sched_clear_port(hw->port_info);
- ice_shutdown_all_ctrlq(hw);
+ ice_shutdown_all_ctrlq(hw, false);
set_bit(ICE_PREPARED_FOR_RESET, pf->state);
}
@@ -805,6 +804,9 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
}
switch (vsi->port_info->phy.link_info.link_speed) {
+ case ICE_AQ_LINK_SPEED_200GB:
+ speed = "200 G";
+ break;
case ICE_AQ_LINK_SPEED_100GB:
speed = "100 G";
break;
@@ -2607,7 +2609,7 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
}
/* assign the mask for this irq */
- irq_set_affinity_hint(irq_num, &q_vector->affinity_mask);
+ irq_update_affinity_hint(irq_num, &q_vector->affinity_mask);
}
err = ice_set_cpu_rx_rmap(vsi);
@@ -2625,7 +2627,7 @@ free_q_irqs:
irq_num = vsi->q_vectors[vector]->irq.virq;
if (!IS_ENABLED(CONFIG_RFS_ACCEL))
irq_set_affinity_notifier(irq_num, NULL);
- irq_set_affinity_hint(irq_num, NULL);
+ irq_update_affinity_hint(irq_num, NULL);
devm_free_irq(dev, irq_num, &vsi->q_vectors[vector]);
}
return err;
@@ -2707,17 +2709,72 @@ static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog)
bpf_prog_put(old_prog);
}
+static struct ice_tx_ring *ice_xdp_ring_from_qid(struct ice_vsi *vsi, int qid)
+{
+ struct ice_q_vector *q_vector;
+ struct ice_tx_ring *ring;
+
+ if (static_key_enabled(&ice_xdp_locking_key))
+ return vsi->xdp_rings[qid % vsi->num_xdp_txq];
+
+ q_vector = vsi->rx_rings[qid]->q_vector;
+ ice_for_each_tx_ring(ring, q_vector->tx)
+ if (ice_ring_is_xdp(ring))
+ return ring;
+
+ return NULL;
+}
+
+/**
+ * ice_map_xdp_rings - Map XDP rings to interrupt vectors
+ * @vsi: the VSI with XDP rings being configured
+ *
+ * Map XDP rings to interrupt vectors and perform the configuration steps
+ * dependent on the mapping.
+ */
+void ice_map_xdp_rings(struct ice_vsi *vsi)
+{
+ int xdp_rings_rem = vsi->num_xdp_txq;
+ int v_idx, q_idx;
+
+ /* follow the logic from ice_vsi_map_rings_to_vectors */
+ ice_for_each_q_vector(vsi, v_idx) {
+ struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
+ int xdp_rings_per_v, q_id, q_base;
+
+ xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem,
+ vsi->num_q_vectors - v_idx);
+ q_base = vsi->num_xdp_txq - xdp_rings_rem;
+
+ for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) {
+ struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_id];
+
+ xdp_ring->q_vector = q_vector;
+ xdp_ring->next = q_vector->tx.tx_ring;
+ q_vector->tx.tx_ring = xdp_ring;
+ }
+ xdp_rings_rem -= xdp_rings_per_v;
+ }
+
+ ice_for_each_rxq(vsi, q_idx) {
+ vsi->rx_rings[q_idx]->xdp_ring = ice_xdp_ring_from_qid(vsi,
+ q_idx);
+ ice_tx_xsk_pool(vsi, q_idx);
+ }
+}
+
/**
* ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP
* @vsi: VSI to bring up Tx rings used by XDP
* @prog: bpf program that will be assigned to VSI
+ * @cfg_type: create from scratch or restore the existing configuration
*
* Return 0 on success and negative value on error
*/
-int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
+int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,
+ enum ice_xdp_cfg cfg_type)
{
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
- int xdp_rings_rem = vsi->num_xdp_txq;
struct ice_pf *pf = vsi->back;
struct ice_qs_cfg xdp_qs_cfg = {
.qs_mutex = &pf->avail_q_mutex,
@@ -2730,8 +2787,7 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
.mapping_mode = ICE_VSI_MAP_CONTIG
};
struct device *dev;
- int i, v_idx;
- int status;
+ int status, i;
dev = ice_pf_to_dev(pf);
vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq,
@@ -2750,49 +2806,15 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
if (ice_xdp_alloc_setup_rings(vsi))
goto clear_xdp_rings;
- /* follow the logic from ice_vsi_map_rings_to_vectors */
- ice_for_each_q_vector(vsi, v_idx) {
- struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
- int xdp_rings_per_v, q_id, q_base;
-
- xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem,
- vsi->num_q_vectors - v_idx);
- q_base = vsi->num_xdp_txq - xdp_rings_rem;
-
- for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) {
- struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_id];
-
- xdp_ring->q_vector = q_vector;
- xdp_ring->next = q_vector->tx.tx_ring;
- q_vector->tx.tx_ring = xdp_ring;
- }
- xdp_rings_rem -= xdp_rings_per_v;
- }
-
- ice_for_each_rxq(vsi, i) {
- if (static_key_enabled(&ice_xdp_locking_key)) {
- vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i % vsi->num_xdp_txq];
- } else {
- struct ice_q_vector *q_vector = vsi->rx_rings[i]->q_vector;
- struct ice_tx_ring *ring;
-
- ice_for_each_tx_ring(ring, q_vector->tx) {
- if (ice_ring_is_xdp(ring)) {
- vsi->rx_rings[i]->xdp_ring = ring;
- break;
- }
- }
- }
- ice_tx_xsk_pool(vsi, i);
- }
-
/* omit the scheduler update if in reset path; XDP queues will be
* taken into account at the end of ice_vsi_rebuild, where
* ice_cfg_vsi_lan is being called
*/
- if (ice_is_reset_in_progress(pf->state))
+ if (cfg_type == ICE_XDP_CFG_PART)
return 0;
+ ice_map_xdp_rings(vsi);
+
/* tell the Tx scheduler that right now we have
* additional queues
*/
@@ -2842,22 +2864,21 @@ err_map_xdp:
/**
* ice_destroy_xdp_rings - undo the configuration made by ice_prepare_xdp_rings
* @vsi: VSI to remove XDP rings
+ * @cfg_type: disable XDP permanently or allow it to be restored later
*
* Detach XDP rings from irq vectors, clean up the PF bitmap and free
* resources
*/
-int ice_destroy_xdp_rings(struct ice_vsi *vsi)
+int ice_destroy_xdp_rings(struct ice_vsi *vsi, enum ice_xdp_cfg cfg_type)
{
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
struct ice_pf *pf = vsi->back;
int i, v_idx;
/* q_vectors are freed in reset path so there's no point in detaching
- * rings; in case of rebuild being triggered not from reset bits
- * in pf->state won't be set, so additionally check first q_vector
- * against NULL
+ * rings
*/
- if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
+ if (cfg_type == ICE_XDP_CFG_PART)
goto free_qmap;
ice_for_each_q_vector(vsi, v_idx) {
@@ -2898,7 +2919,7 @@ free_qmap:
if (static_key_enabled(&ice_xdp_locking_key))
static_branch_dec(&ice_xdp_locking_key);
- if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
+ if (cfg_type == ICE_XDP_CFG_PART)
return 0;
ice_vsi_assign_bpf_prog(vsi, NULL);
@@ -3009,7 +3030,8 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
if (xdp_ring_err) {
NL_SET_ERR_MSG_MOD(extack, "Not enough Tx resources for XDP");
} else {
- xdp_ring_err = ice_prepare_xdp_rings(vsi, prog);
+ xdp_ring_err = ice_prepare_xdp_rings(vsi, prog,
+ ICE_XDP_CFG_FULL);
if (xdp_ring_err)
NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
}
@@ -3020,7 +3042,7 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Rx resources failed");
} else if (ice_is_xdp_ena_vsi(vsi) && !prog) {
xdp_features_clear_redirect_target(vsi->netdev);
- xdp_ring_err = ice_destroy_xdp_rings(vsi);
+ xdp_ring_err = ice_destroy_xdp_rings(vsi, ICE_XDP_CFG_FULL);
if (xdp_ring_err)
NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed");
/* reallocate Rx queues that were used for zero-copy */
@@ -4116,7 +4138,7 @@ bool ice_is_wol_supported(struct ice_hw *hw)
int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked)
{
struct ice_pf *pf = vsi->back;
- int err = 0, timeout = 50;
+ int i, err = 0, timeout = 50;
if (!new_rx && !new_tx)
return -EINVAL;
@@ -4135,15 +4157,32 @@ int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked)
/* set for the next time the netdev is started */
if (!netif_running(vsi->netdev)) {
- ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
+ err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
+ if (err)
+ goto rebuild_err;
dev_dbg(ice_pf_to_dev(pf), "Link is down, queue count change happens when link is brought up\n");
goto done;
}
ice_vsi_close(vsi);
- ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
+ err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
+ if (err)
+ goto rebuild_err;
+
+ ice_for_each_traffic_class(i) {
+ if (vsi->tc_cfg.ena_tc & BIT(i))
+ netdev_set_tc_queue(vsi->netdev,
+ vsi->tc_cfg.tc_info[i].netdev_tc,
+ vsi->tc_cfg.tc_info[i].qcount_tx,
+ vsi->tc_cfg.tc_info[i].qoffset);
+ }
ice_pf_dcb_recfg(pf, locked);
ice_vsi_open(vsi);
+ goto done;
+
+rebuild_err:
+ dev_err(ice_pf_to_dev(pf), "Error during VSI rebuild: %d. Unload and reload the driver.\n",
+ err);
done:
clear_bit(ICE_CFG_BUSY, pf->state);
return err;
@@ -5459,7 +5498,7 @@ static void ice_prepare_for_shutdown(struct ice_pf *pf)
if (pf->vsi[v])
pf->vsi[v]->vsi_num = 0;
- ice_shutdown_all_ctrlq(hw);
+ ice_shutdown_all_ctrlq(hw, true);
}
/**
@@ -5544,7 +5583,7 @@ static int ice_suspend(struct device *dev)
*/
disabled = ice_service_task_stop(pf);
- ice_unplug_aux_dev(pf);
+ ice_deinit_rdma(pf);
/* Already suspended?, then there is nothing to do */
if (test_and_set_bit(ICE_SUSPENDED, pf->state)) {
@@ -5624,6 +5663,11 @@ static int ice_resume(struct device *dev)
if (ret)
dev_err(dev, "Cannot restore interrupt scheme: %d\n", ret);
+ ret = ice_init_rdma(pf);
+ if (ret)
+ dev_err(dev, "Reinitialize RDMA during resume failed: %d\n",
+ ret);
+
clear_bit(ICE_DOWN, pf->state);
/* Now perform PF reset and rebuild */
reset_type = ICE_RESET_PFR;
@@ -7658,8 +7702,6 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
goto err_vsi_rebuild;
}
- ice_eswitch_rebuild(pf);
-
if (reset_type == ICE_RESET_PFR) {
err = ice_rebuild_channels(pf);
if (err) {
@@ -7714,7 +7756,7 @@ err_vsi_rebuild:
err_sched_init_port:
ice_sched_cleanup_all(hw);
err_init_ctrlq:
- ice_shutdown_all_ctrlq(hw);
+ ice_shutdown_all_ctrlq(hw, false);
set_bit(ICE_RESET_FAILED, pf->state);
clear_recovery:
/* set this bit in PF state to control service task scheduling */
diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c
index 84eab92dc03c..59e8879ac059 100644
--- a/drivers/net/ethernet/intel/ice/ice_nvm.c
+++ b/drivers/net/ethernet/intel/ice/ice_nvm.c
@@ -374,11 +374,25 @@ ice_read_nvm_module(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u1
*
* Read the specified word from the copy of the Shadow RAM found in the
* specified NVM module.
+ *
+ * Note that the Shadow RAM copy is always located after the CSS header, and
+ * is aligned to 64-byte (32-word) offsets.
*/
static int
ice_read_nvm_sr_copy(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u16 *data)
{
- return ice_read_nvm_module(hw, bank, ICE_NVM_SR_COPY_WORD_OFFSET + offset, data);
+ u32 sr_copy;
+
+ switch (bank) {
+ case ICE_ACTIVE_FLASH_BANK:
+ sr_copy = roundup(hw->flash.banks.active_css_hdr_len, 32);
+ break;
+ case ICE_INACTIVE_FLASH_BANK:
+ sr_copy = roundup(hw->flash.banks.inactive_css_hdr_len, 32);
+ break;
+ }
+
+ return ice_read_nvm_module(hw, bank, sr_copy + offset, data);
}
/**
@@ -440,8 +454,7 @@ int
ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
u16 module_type)
{
- u16 pfa_len, pfa_ptr;
- u16 next_tlv;
+ u16 pfa_len, pfa_ptr, next_tlv, max_tlv;
int status;
status = ice_read_sr_word(hw, ICE_SR_PFA_PTR, &pfa_ptr);
@@ -454,11 +467,23 @@ ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
ice_debug(hw, ICE_DBG_INIT, "Failed to read PFA length.\n");
return status;
}
+
+ /* The Preserved Fields Area contains a sequence of Type-Length-Value
+ * structures which define its contents. The PFA length includes all
+ * of the TLVs, plus the initial length word itself, *and* one final
+ * word at the end after all of the TLVs.
+ */
+ if (check_add_overflow(pfa_ptr, pfa_len - 1, &max_tlv)) {
+ dev_warn(ice_hw_to_dev(hw), "PFA starts at offset %u. PFA length of %u caused 16-bit arithmetic overflow.\n",
+ pfa_ptr, pfa_len);
+ return -EINVAL;
+ }
+
/* Starting with first TLV after PFA length, iterate through the list
* of TLVs to find the requested one.
*/
next_tlv = pfa_ptr + 1;
- while (next_tlv < pfa_ptr + pfa_len) {
+ while (next_tlv < max_tlv) {
u16 tlv_sub_module_type;
u16 tlv_len;
@@ -482,10 +507,13 @@ ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
}
return -EINVAL;
}
- /* Check next TLV, i.e. current TLV pointer + length + 2 words
- * (for current TLV's type and length)
- */
- next_tlv = next_tlv + tlv_len + 2;
+
+ if (check_add_overflow(next_tlv, 2, &next_tlv) ||
+ check_add_overflow(next_tlv, tlv_len, &next_tlv)) {
+ dev_warn(ice_hw_to_dev(hw), "TLV of type %u and length 0x%04x caused 16-bit arithmetic overflow. The PFA starts at 0x%04x and has length of 0x%04x\n",
+ tlv_sub_module_type, tlv_len, pfa_ptr, pfa_len);
+ return -EINVAL;
+ }
}
/* Module does not exist */
return -ENOENT;
@@ -1010,6 +1038,72 @@ static int ice_determine_active_flash_banks(struct ice_hw *hw)
}
/**
+ * ice_get_nvm_css_hdr_len - Read the CSS header length from the NVM CSS header
+ * @hw: pointer to the HW struct
+ * @bank: whether to read from the active or inactive flash bank
+ * @hdr_len: storage for header length in words
+ *
+ * Read the CSS header length from the NVM CSS header and add the Authentication
+ * header size, and then convert to words.
+ *
+ * Return: zero on success, or a negative error code on failure.
+ */
+static int
+ice_get_nvm_css_hdr_len(struct ice_hw *hw, enum ice_bank_select bank,
+ u32 *hdr_len)
+{
+ u16 hdr_len_l, hdr_len_h;
+ u32 hdr_len_dword;
+ int status;
+
+ status = ice_read_nvm_module(hw, bank, ICE_NVM_CSS_HDR_LEN_L,
+ &hdr_len_l);
+ if (status)
+ return status;
+
+ status = ice_read_nvm_module(hw, bank, ICE_NVM_CSS_HDR_LEN_H,
+ &hdr_len_h);
+ if (status)
+ return status;
+
+ /* CSS header length is in DWORD, so convert to words and add
+ * authentication header size
+ */
+ hdr_len_dword = hdr_len_h << 16 | hdr_len_l;
+ *hdr_len = (hdr_len_dword * 2) + ICE_NVM_AUTH_HEADER_LEN;
+
+ return 0;
+}
+
+/**
+ * ice_determine_css_hdr_len - Discover CSS header length for the device
+ * @hw: pointer to the HW struct
+ *
+ * Determine the size of the CSS header at the start of the NVM module. This
+ * is useful for locating the Shadow RAM copy in the NVM, as the Shadow RAM is
+ * always located just after the CSS header.
+ *
+ * Return: zero on success, or a negative error code on failure.
+ */
+static int ice_determine_css_hdr_len(struct ice_hw *hw)
+{
+ struct ice_bank_info *banks = &hw->flash.banks;
+ int status;
+
+ status = ice_get_nvm_css_hdr_len(hw, ICE_ACTIVE_FLASH_BANK,
+ &banks->active_css_hdr_len);
+ if (status)
+ return status;
+
+ status = ice_get_nvm_css_hdr_len(hw, ICE_INACTIVE_FLASH_BANK,
+ &banks->inactive_css_hdr_len);
+ if (status)
+ return status;
+
+ return 0;
+}
+
+/**
* ice_init_nvm - initializes NVM setting
* @hw: pointer to the HW struct
*
@@ -1055,6 +1149,12 @@ int ice_init_nvm(struct ice_hw *hw)
return status;
}
+ status = ice_determine_css_hdr_len(hw);
+ if (status) {
+ ice_debug(hw, ICE_DBG_NVM, "Failed to determine Shadow RAM copy offsets.\n");
+ return status;
+ }
+
status = ice_get_nvm_ver_info(hw, ICE_ACTIVE_FLASH_BANK, &flash->nvm);
if (status) {
ice_debug(hw, ICE_DBG_INIT, "Failed to read NVM info.\n");
diff --git a/drivers/net/ethernet/intel/ice/ice_protocol_type.h b/drivers/net/ethernet/intel/ice/ice_protocol_type.h
index 755a9c55267c..7c09ea0f03ba 100644
--- a/drivers/net/ethernet/intel/ice/ice_protocol_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_protocol_type.h
@@ -7,18 +7,24 @@
/* Each recipe can match up to 5 different fields. Fields to match can be meta-
* data, values extracted from packet headers, or results from other recipes.
- * One of the 5 fields is reserved for matching the switch ID. So, up to 4
- * recipes can provide intermediate results to another one through chaining,
- * e.g. recipes 0, 1, 2, and 3 can provide intermediate results to recipe 4.
+ * Therefore, up to 5 recipes can provide intermediate results to another one
+ * through chaining, e.g. recipes 0, 1, 2, 3 and 4 can provide intermediate
+ * results to recipe 5. Note that one of the fields in one of the recipes must
+ * always be reserved for matching the switch ID.
*/
-#define ICE_NUM_WORDS_RECIPE 4
+#define ICE_NUM_WORDS_RECIPE 5
-/* Max recipes that can be chained */
+/* Max recipes that can be chained, not including the last one, which combines
+ * intermediate results.
+ */
#define ICE_MAX_CHAIN_RECIPE 5
-/* 1 word reserved for switch ID from allowed 5 words.
- * So a recipe can have max 4 words. And you can chain 5 such recipes
- * together. So maximum words that can be programmed for look up is 5 * 4.
+/* Total max recipes in chain recipe (including intermediate results) */
+#define ICE_MAX_CHAIN_RECIPE_RES (ICE_MAX_CHAIN_RECIPE + 1)
+
+/* A recipe can have max 5 words, and 5 recipes can be chained together (using
+ * the 6th one, which would contain only result indexes). So maximum words that
+ * can be programmed for lookup is 5 * 5 (not including intermediate results).
*/
#define ICE_MAX_CHAIN_WORDS (ICE_NUM_WORDS_RECIPE * ICE_MAX_CHAIN_RECIPE)
@@ -449,32 +455,11 @@ struct ice_prot_ext_tbl_entry {
/* Extractions to be looked up for a given recipe */
struct ice_prot_lkup_ext {
- u16 prot_type;
u8 n_val_words;
/* create a buffer to hold max words per recipe */
- u16 field_off[ICE_MAX_CHAIN_WORDS];
u16 field_mask[ICE_MAX_CHAIN_WORDS];
struct ice_fv_word fv_words[ICE_MAX_CHAIN_WORDS];
-
- /* Indicate field offsets that have field vector indices assigned */
- DECLARE_BITMAP(done, ICE_MAX_CHAIN_WORDS);
};
-struct ice_pref_recipe_group {
- u8 n_val_pairs; /* Number of valid pairs */
- struct ice_fv_word pairs[ICE_NUM_WORDS_RECIPE];
- u16 mask[ICE_NUM_WORDS_RECIPE];
-};
-
-struct ice_recp_grp_entry {
- struct list_head l_entry;
-
-#define ICE_INVAL_CHAIN_IND 0xFF
- u16 rid;
- u8 chain_idx;
- u16 fv_idx[ICE_NUM_WORDS_RECIPE];
- u16 fv_mask[ICE_NUM_WORDS_RECIPE];
- struct ice_pref_recipe_group r_group;
-};
#endif /* _ICE_PROTOCOL_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
index 0f17fc1181d2..51fac8f18cb0 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
@@ -7,8 +7,6 @@
#define E810_OUT_PROP_DELAY_NS 1
-#define UNKNOWN_INCVAL_E82X 0x100000000ULL
-
static const struct ptp_pin_desc ice_pin_desc_e810t[] = {
/* name idx func chan */
{ "GNSS", GNSS, PTP_PF_EXTTS, 0, { 0, } },
@@ -813,7 +811,7 @@ static enum ice_tx_tstamp_work ice_ptp_tx_tstamp_owner(struct ice_pf *pf)
}
mutex_unlock(&pf->ptp.ports_owner.lock);
- for (i = 0; i < ICE_MAX_QUAD; i++) {
+ for (i = 0; i < ICE_GET_QUAD_NUM(pf->hw.ptp.num_lports); i++) {
u64 tstamp_ready;
int err;
@@ -1014,6 +1012,28 @@ ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
}
/**
+ * ice_ptp_init_tx_eth56g - Initialize tracking for Tx timestamps
+ * @pf: Board private structure
+ * @tx: the Tx tracking structure to initialize
+ * @port: the port this structure tracks
+ *
+ * Initialize the Tx timestamp tracker for this port. ETH56G PHYs
+ * have independent memory blocks for all ports.
+ *
+ * Return: 0 for success, -ENOMEM when failed to allocate Tx tracker
+ */
+static int ice_ptp_init_tx_eth56g(struct ice_pf *pf, struct ice_ptp_tx *tx,
+ u8 port)
+{
+ tx->block = port;
+ tx->offset = 0;
+ tx->len = INDEX_PER_PORT_ETH56G;
+ tx->has_ready_bitmap = 1;
+
+ return ice_ptp_alloc_tx_tracker(tx);
+}
+
+/**
* ice_ptp_init_tx_e82x - Initialize tracking for Tx timestamps
* @pf: Board private structure
* @tx: the Tx tracking structure to initialize
@@ -1027,7 +1047,7 @@ ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
static int
ice_ptp_init_tx_e82x(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port)
{
- tx->block = port / ICE_PORTS_PER_QUAD;
+ tx->block = ICE_GET_QUAD_NUM(port);
tx->offset = (port % ICE_PORTS_PER_QUAD) * INDEX_PER_PORT_E82X;
tx->len = INDEX_PER_PORT_E82X;
tx->has_ready_bitmap = 1;
@@ -1210,12 +1230,7 @@ static u64 ice_base_incval(struct ice_pf *pf)
struct ice_hw *hw = &pf->hw;
u64 incval;
- if (ice_is_e810(hw))
- incval = ICE_PTP_NOMINAL_INCVAL_E810;
- else if (ice_e82x_time_ref(hw) < NUM_ICE_TIME_REF_FREQ)
- incval = ice_e82x_nominal_incval(ice_e82x_time_ref(hw));
- else
- incval = UNKNOWN_INCVAL_E82X;
+ incval = ice_get_base_incval(hw);
dev_dbg(ice_pf_to_dev(pf), "PTP: using base increment value of 0x%016llx\n",
incval);
@@ -1229,8 +1244,8 @@ static u64 ice_base_incval(struct ice_pf *pf)
*/
static int ice_ptp_check_tx_fifo(struct ice_ptp_port *port)
{
- int quad = port->port_num / ICE_PORTS_PER_QUAD;
int offs = port->port_num % ICE_PORTS_PER_QUAD;
+ int quad = ICE_GET_QUAD_NUM(port->port_num);
struct ice_pf *pf;
struct ice_hw *hw;
u32 val, phy_sts;
@@ -1348,10 +1363,19 @@ ice_ptp_port_phy_stop(struct ice_ptp_port *ptp_port)
mutex_lock(&ptp_port->ps_lock);
- kthread_cancel_delayed_work_sync(&ptp_port->ov_work);
+ switch (hw->ptp.phy_model) {
+ case ICE_PHY_ETH56G:
+ err = ice_stop_phy_timer_eth56g(hw, port, true);
+ break;
+ case ICE_PHY_E82X:
+ kthread_cancel_delayed_work_sync(&ptp_port->ov_work);
- err = ice_stop_phy_timer_e82x(hw, port, true);
- if (err)
+ err = ice_stop_phy_timer_e82x(hw, port, true);
+ break;
+ default:
+ err = -ENODEV;
+ }
+ if (err && err != -EBUSY)
dev_err(ice_pf_to_dev(pf), "PTP failed to set PHY port %d down, err %d\n",
port, err);
@@ -1385,27 +1409,39 @@ ice_ptp_port_phy_restart(struct ice_ptp_port *ptp_port)
mutex_lock(&ptp_port->ps_lock);
- kthread_cancel_delayed_work_sync(&ptp_port->ov_work);
+ switch (hw->ptp.phy_model) {
+ case ICE_PHY_ETH56G:
+ err = ice_start_phy_timer_eth56g(hw, port);
+ break;
+ case ICE_PHY_E82X:
+ /* Start the PHY timer in Vernier mode */
+ kthread_cancel_delayed_work_sync(&ptp_port->ov_work);
- /* temporarily disable Tx timestamps while calibrating PHY offset */
- spin_lock_irqsave(&ptp_port->tx.lock, flags);
- ptp_port->tx.calibrating = true;
- spin_unlock_irqrestore(&ptp_port->tx.lock, flags);
- ptp_port->tx_fifo_busy_cnt = 0;
+ /* temporarily disable Tx timestamps while calibrating
+ * PHY offset
+ */
+ spin_lock_irqsave(&ptp_port->tx.lock, flags);
+ ptp_port->tx.calibrating = true;
+ spin_unlock_irqrestore(&ptp_port->tx.lock, flags);
+ ptp_port->tx_fifo_busy_cnt = 0;
- /* Start the PHY timer in Vernier mode */
- err = ice_start_phy_timer_e82x(hw, port);
- if (err)
- goto out_unlock;
+ /* Start the PHY timer in Vernier mode */
+ err = ice_start_phy_timer_e82x(hw, port);
+ if (err)
+ break;
- /* Enable Tx timestamps right away */
- spin_lock_irqsave(&ptp_port->tx.lock, flags);
- ptp_port->tx.calibrating = false;
- spin_unlock_irqrestore(&ptp_port->tx.lock, flags);
+ /* Enable Tx timestamps right away */
+ spin_lock_irqsave(&ptp_port->tx.lock, flags);
+ ptp_port->tx.calibrating = false;
+ spin_unlock_irqrestore(&ptp_port->tx.lock, flags);
- kthread_queue_delayed_work(pf->ptp.kworker, &ptp_port->ov_work, 0);
+ kthread_queue_delayed_work(pf->ptp.kworker, &ptp_port->ov_work,
+ 0);
+ break;
+ default:
+ err = -ENODEV;
+ }
-out_unlock:
if (err)
dev_err(ice_pf_to_dev(pf), "PTP failed to set PHY port %d up, err %d\n",
port, err);
@@ -1429,20 +1465,23 @@ void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup)
if (pf->ptp.state != ICE_PTP_READY)
return;
- if (WARN_ON_ONCE(port >= ICE_NUM_EXTERNAL_PORTS))
+ if (WARN_ON_ONCE(port >= hw->ptp.num_lports))
return;
ptp_port = &pf->ptp.port;
+ if (ice_is_e825c(hw) && hw->ptp.is_2x50g_muxed_topo)
+ port *= 2;
if (WARN_ON_ONCE(ptp_port->port_num != port))
return;
/* Update cached link status for this port immediately */
ptp_port->link_up = linkup;
- switch (hw->phy_model) {
+ switch (hw->ptp.phy_model) {
case ICE_PHY_E810:
/* Do not reconfigure E810 PHY */
return;
+ case ICE_PHY_ETH56G:
case ICE_PHY_E82X:
ice_ptp_port_phy_restart(ptp_port);
return;
@@ -1457,42 +1496,62 @@ void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup)
* @ena: bool value to enable or disable interrupt
* @threshold: Minimum number of packets at which intr is triggered
*
- * Utility function to enable or disable Tx timestamp interrupt and threshold
+ * Utility function to configure all the PHY interrupt settings, including
+ * whether the PHY interrupt is enabled, and what threshold to use. Also
+ * configures The E82X timestamp owner to react to interrupts from all PHYs.
+ *
+ * Return: 0 on success, -EOPNOTSUPP when PHY model incorrect, other error codes
+ * when failed to configure PHY interrupt for E82X
*/
static int ice_ptp_cfg_phy_interrupt(struct ice_pf *pf, bool ena, u32 threshold)
{
+ struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
- int err = 0;
- int quad;
- u32 val;
ice_ptp_reset_ts_memory(hw);
- for (quad = 0; quad < ICE_MAX_QUAD; quad++) {
- err = ice_read_quad_reg_e82x(hw, quad, Q_REG_TX_MEM_GBL_CFG,
- &val);
- if (err)
- break;
+ switch (hw->ptp.phy_model) {
+ case ICE_PHY_ETH56G: {
+ int port;
- if (ena) {
- val |= Q_REG_TX_MEM_GBL_CFG_INTR_ENA_M;
- val &= ~Q_REG_TX_MEM_GBL_CFG_INTR_THR_M;
- val |= FIELD_PREP(Q_REG_TX_MEM_GBL_CFG_INTR_THR_M,
- threshold);
- } else {
- val &= ~Q_REG_TX_MEM_GBL_CFG_INTR_ENA_M;
+ for (port = 0; port < hw->ptp.num_lports; port++) {
+ int err;
+
+ err = ice_phy_cfg_intr_eth56g(hw, port, ena, threshold);
+ if (err) {
+ dev_err(dev, "Failed to configure PHY interrupt for port %d, err %d\n",
+ port, err);
+ return err;
+ }
}
- err = ice_write_quad_reg_e82x(hw, quad, Q_REG_TX_MEM_GBL_CFG,
- val);
- if (err)
- break;
+ return 0;
}
+ case ICE_PHY_E82X: {
+ int quad;
- if (err)
- dev_err(ice_pf_to_dev(pf), "PTP failed in intr ena, err %d\n",
- err);
- return err;
+ for (quad = 0; quad < ICE_GET_QUAD_NUM(hw->ptp.num_lports);
+ quad++) {
+ int err;
+
+ err = ice_phy_cfg_intr_e82x(hw, quad, ena, threshold);
+ if (err) {
+ dev_err(dev, "Failed to configure PHY interrupt for quad %d, err %d\n",
+ quad, err);
+ return err;
+ }
+ }
+
+ return 0;
+ }
+ case ICE_PHY_E810:
+ return 0;
+ case ICE_PHY_UNSUP:
+ default:
+ dev_warn(dev, "%s: Unexpected PHY model %d\n", __func__,
+ hw->ptp.phy_model);
+ return -EOPNOTSUPP;
+ }
}
/**
@@ -1559,6 +1618,10 @@ void ice_ptp_extts_event(struct ice_pf *pf)
u8 chan, tmr_idx;
u32 hi, lo;
+ /* Don't process timestamp events if PTP is not ready */
+ if (pf->ptp.state != ICE_PTP_READY)
+ return;
+
tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
/* Event time is captured by one of the two matched registers
* GLTSYN_EVNT_L: 32 LSB of sampled time event
@@ -1584,27 +1647,33 @@ void ice_ptp_extts_event(struct ice_pf *pf)
/**
* ice_ptp_cfg_extts - Configure EXTTS pin and channel
* @pf: Board private structure
- * @ena: true to enable; false to disable
* @chan: GPIO channel (0-3)
- * @gpio_pin: GPIO pin
- * @extts_flags: request flags from the ptp_extts_request.flags
+ * @config: desired EXTTS configuration.
+ * @store: If set to true, the values will be stored
+ *
+ * Configure an external timestamp event on the requested channel.
+ *
+ * Return: 0 on success, -EOPNOTUSPP on unsupported flags
*/
-static int
-ice_ptp_cfg_extts(struct ice_pf *pf, bool ena, unsigned int chan, u32 gpio_pin,
- unsigned int extts_flags)
+static int ice_ptp_cfg_extts(struct ice_pf *pf, unsigned int chan,
+ struct ice_extts_channel *config, bool store)
{
u32 func, aux_reg, gpio_reg, irq_reg;
struct ice_hw *hw = &pf->hw;
u8 tmr_idx;
- if (chan > (unsigned int)pf->ptp.info.n_ext_ts)
- return -EINVAL;
+ /* Reject requests with unsupported flags */
+ if (config->flags & ~(PTP_ENABLE_FEATURE |
+ PTP_RISING_EDGE |
+ PTP_FALLING_EDGE |
+ PTP_STRICT_FLAGS))
+ return -EOPNOTSUPP;
tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
irq_reg = rd32(hw, PFINT_OICR_ENA);
- if (ena) {
+ if (config->ena) {
/* Enable the interrupt */
irq_reg |= PFINT_OICR_TSYN_EVNT_M;
aux_reg = GLTSYN_AUX_IN_0_INT_ENA_M;
@@ -1613,9 +1682,9 @@ ice_ptp_cfg_extts(struct ice_pf *pf, bool ena, unsigned int chan, u32 gpio_pin,
#define GLTSYN_AUX_IN_0_EVNTLVL_FALLING_EDGE BIT(1)
/* set event level to requested edge */
- if (extts_flags & PTP_FALLING_EDGE)
+ if (config->flags & PTP_FALLING_EDGE)
aux_reg |= GLTSYN_AUX_IN_0_EVNTLVL_FALLING_EDGE;
- if (extts_flags & PTP_RISING_EDGE)
+ if (config->flags & PTP_RISING_EDGE)
aux_reg |= GLTSYN_AUX_IN_0_EVNTLVL_RISING_EDGE;
/* Write GPIO CTL reg.
@@ -1636,12 +1705,52 @@ ice_ptp_cfg_extts(struct ice_pf *pf, bool ena, unsigned int chan, u32 gpio_pin,
wr32(hw, PFINT_OICR_ENA, irq_reg);
wr32(hw, GLTSYN_AUX_IN(chan, tmr_idx), aux_reg);
- wr32(hw, GLGEN_GPIO_CTL(gpio_pin), gpio_reg);
+ wr32(hw, GLGEN_GPIO_CTL(config->gpio_pin), gpio_reg);
+
+ if (store)
+ memcpy(&pf->ptp.extts_channels[chan], config, sizeof(*config));
return 0;
}
/**
+ * ice_ptp_disable_all_extts - Disable all EXTTS channels
+ * @pf: Board private structure
+ */
+static void ice_ptp_disable_all_extts(struct ice_pf *pf)
+{
+ struct ice_extts_channel extts_cfg = {};
+ int i;
+
+ for (i = 0; i < pf->ptp.info.n_ext_ts; i++) {
+ if (pf->ptp.extts_channels[i].ena) {
+ extts_cfg.gpio_pin = pf->ptp.extts_channels[i].gpio_pin;
+ extts_cfg.ena = false;
+ ice_ptp_cfg_extts(pf, i, &extts_cfg, false);
+ }
+ }
+
+ synchronize_irq(pf->oicr_irq.virq);
+}
+
+/**
+ * ice_ptp_enable_all_extts - Enable all EXTTS channels
+ * @pf: Board private structure
+ *
+ * Called during reset to restore user configuration.
+ */
+static void ice_ptp_enable_all_extts(struct ice_pf *pf)
+{
+ int i;
+
+ for (i = 0; i < pf->ptp.info.n_ext_ts; i++) {
+ if (pf->ptp.extts_channels[i].ena)
+ ice_ptp_cfg_extts(pf, i, &pf->ptp.extts_channels[i],
+ false);
+ }
+}
+
+/**
* ice_ptp_cfg_clkout - Configure clock to generate periodic wave
* @pf: Board private structure
* @chan: GPIO channel (0-3)
@@ -1659,6 +1768,9 @@ static int ice_ptp_cfg_clkout(struct ice_pf *pf, unsigned int chan,
u32 func, val, gpio_pin;
u8 tmr_idx;
+ if (config && config->flags & ~PTP_PEROUT_PHASE)
+ return -EOPNOTSUPP;
+
tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
/* 0. Reset mode & out_en in AUX_OUT */
@@ -1714,8 +1826,7 @@ static int ice_ptp_cfg_clkout(struct ice_pf *pf, unsigned int chan,
* maintaining phase
*/
if (start_time < current_time)
- start_time = div64_u64(current_time + NSEC_PER_SEC - 1,
- NSEC_PER_SEC) * NSEC_PER_SEC + phase;
+ start_time = roundup_u64(current_time, NSEC_PER_SEC) + phase;
if (ice_is_e810(hw))
start_time -= E810_OUT_PROP_DELAY_NS;
@@ -1795,17 +1906,18 @@ ice_ptp_gpio_enable_e810(struct ptp_clock_info *info,
struct ptp_clock_request *rq, int on)
{
struct ice_pf *pf = ptp_info_to_pf(info);
- struct ice_perout_channel clk_cfg = {0};
bool sma_pres = false;
unsigned int chan;
u32 gpio_pin;
- int err;
if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL))
sma_pres = true;
switch (rq->type) {
case PTP_CLK_REQ_PEROUT:
+ {
+ struct ice_perout_channel clk_cfg = {};
+
chan = rq->perout.index;
if (sma_pres) {
if (chan == ice_pin_desc_e810t[SMA1].chan)
@@ -1825,15 +1937,19 @@ ice_ptp_gpio_enable_e810(struct ptp_clock_info *info,
clk_cfg.gpio_pin = chan;
}
+ clk_cfg.flags = rq->perout.flags;
clk_cfg.period = ((rq->perout.period.sec * NSEC_PER_SEC) +
rq->perout.period.nsec);
clk_cfg.start_time = ((rq->perout.start.sec * NSEC_PER_SEC) +
rq->perout.start.nsec);
clk_cfg.ena = !!on;
- err = ice_ptp_cfg_clkout(pf, chan, &clk_cfg, true);
- break;
+ return ice_ptp_cfg_clkout(pf, chan, &clk_cfg, true);
+ }
case PTP_CLK_REQ_EXTTS:
+ {
+ struct ice_extts_channel extts_cfg = {};
+
chan = rq->extts.index;
if (sma_pres) {
if (chan < ice_pin_desc_e810t[SMA2].chan)
@@ -1849,14 +1965,15 @@ ice_ptp_gpio_enable_e810(struct ptp_clock_info *info,
gpio_pin = chan;
}
- err = ice_ptp_cfg_extts(pf, !!on, chan, gpio_pin,
- rq->extts.flags);
- break;
+ extts_cfg.flags = rq->extts.flags;
+ extts_cfg.gpio_pin = gpio_pin;
+ extts_cfg.ena = !!on;
+
+ return ice_ptp_cfg_extts(pf, chan, &extts_cfg, true);
+ }
default:
return -EOPNOTSUPP;
}
-
- return err;
}
/**
@@ -1869,26 +1986,32 @@ static int ice_ptp_gpio_enable_e823(struct ptp_clock_info *info,
struct ptp_clock_request *rq, int on)
{
struct ice_pf *pf = ptp_info_to_pf(info);
- struct ice_perout_channel clk_cfg = {0};
- int err;
switch (rq->type) {
case PTP_CLK_REQ_PPS:
+ {
+ struct ice_perout_channel clk_cfg = {};
+
+ clk_cfg.flags = rq->perout.flags;
clk_cfg.gpio_pin = PPS_PIN_INDEX;
clk_cfg.period = NSEC_PER_SEC;
clk_cfg.ena = !!on;
- err = ice_ptp_cfg_clkout(pf, PPS_CLK_GEN_CHAN, &clk_cfg, true);
- break;
+ return ice_ptp_cfg_clkout(pf, PPS_CLK_GEN_CHAN, &clk_cfg, true);
+ }
case PTP_CLK_REQ_EXTTS:
- err = ice_ptp_cfg_extts(pf, !!on, rq->extts.index,
- TIME_SYNC_PIN_INDEX, rq->extts.flags);
- break;
+ {
+ struct ice_extts_channel extts_cfg = {};
+
+ extts_cfg.flags = rq->extts.flags;
+ extts_cfg.gpio_pin = TIME_SYNC_PIN_INDEX;
+ extts_cfg.ena = !!on;
+
+ return ice_ptp_cfg_extts(pf, rq->extts.index, &extts_cfg, true);
+ }
default:
return -EOPNOTSUPP;
}
-
- return err;
}
/**
@@ -1929,11 +2052,14 @@ ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts)
struct ice_hw *hw = &pf->hw;
int err;
- /* For Vernier mode, we need to recalibrate after new settime
- * Start with disabling timestamp block
+ /* For Vernier mode on E82X, we need to recalibrate after new settime.
+ * Start with marking timestamps as invalid.
*/
- if (pf->ptp.port.link_up)
- ice_ptp_port_phy_stop(&pf->ptp.port);
+ if (hw->ptp.phy_model == ICE_PHY_E82X) {
+ err = ice_ptp_clear_phy_offset_ready_e82x(hw);
+ if (err)
+ dev_warn(ice_pf_to_dev(pf), "Failed to mark timestamps as invalid before settime\n");
+ }
if (!ice_ptp_lock(hw)) {
err = -EBUSY;
@@ -1953,7 +2079,7 @@ ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts)
ice_ptp_enable_all_clkout(pf);
/* Recalibrate and re-enable timestamp blocks for E822/E823 */
- if (hw->phy_model == ICE_PHY_E82X)
+ if (hw->ptp.phy_model == ICE_PHY_E82X)
ice_ptp_restart_all_phy(pf);
exit:
if (err) {
@@ -2091,7 +2217,8 @@ ice_ptp_get_syncdevicetime(ktime_t *device,
hh_ts_lo = rd32(hw, GLHH_ART_TIME_L);
hh_ts_hi = rd32(hw, GLHH_ART_TIME_H);
hh_ts = ((u64)hh_ts_hi << 32) | hh_ts_lo;
- *system = convert_art_ns_to_tsc(hh_ts);
+ system->cycles = hh_ts;
+ system->cs_id = CSID_X86_ART;
/* Read Device source clock time */
hh_ts_lo = rd32(hw, GLTSYN_HHTIME_L(tmr_idx));
hh_ts_hi = rd32(hw, GLTSYN_HHTIME_H(tmr_idx));
@@ -2578,7 +2705,7 @@ static void ice_ptp_maybe_trigger_tx_interrupt(struct ice_pf *pf)
if (!ice_pf_src_tmr_owned(pf))
return;
- for (i = 0; i < ICE_MAX_QUAD; i++) {
+ for (i = 0; i < ICE_GET_QUAD_NUM(hw->ptp.num_lports); i++) {
u64 tstamp_ready;
int err;
@@ -2720,6 +2847,10 @@ static int ice_ptp_rebuild_owner(struct ice_pf *pf)
ice_ptp_restart_all_phy(pf);
}
+ /* Re-enable all periodic outputs and external timestamp events */
+ ice_ptp_enable_all_clkout(pf);
+ ice_ptp_enable_all_extts(pf);
+
return 0;
}
@@ -3010,12 +3141,10 @@ static int ice_ptp_init_owner(struct ice_pf *pf)
/* Release the global hardware lock */
ice_ptp_unlock(hw);
- if (!ice_is_e810(hw)) {
- /* Enable quad interrupts */
- err = ice_ptp_cfg_phy_interrupt(pf, true, 1);
- if (err)
- goto err_exit;
- }
+ /* Configure PHY interrupt settings */
+ err = ice_ptp_cfg_phy_interrupt(pf, true, 1);
+ if (err)
+ goto err_exit;
/* Ensure we have a clock device */
err = ice_ptp_create_clock(pf);
@@ -3076,7 +3205,10 @@ static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port)
mutex_init(&ptp_port->ps_lock);
- switch (hw->phy_model) {
+ switch (hw->ptp.phy_model) {
+ case ICE_PHY_ETH56G:
+ return ice_ptp_init_tx_eth56g(pf, &ptp_port->tx,
+ ptp_port->port_num);
case ICE_PHY_E810:
return ice_ptp_init_tx_e810(pf, &ptp_port->tx);
case ICE_PHY_E82X:
@@ -3171,7 +3303,7 @@ static void ice_ptp_remove_auxbus_device(struct ice_pf *pf)
*/
static void ice_ptp_init_tx_interrupt_mode(struct ice_pf *pf)
{
- switch (pf->hw.phy_model) {
+ switch (pf->hw.ptp.phy_model) {
case ICE_PHY_E82X:
/* E822 based PHY has the clock owner process the interrupt
* for all ports.
@@ -3207,7 +3339,7 @@ void ice_ptp_init(struct ice_pf *pf)
ptp->state = ICE_PTP_INITIALIZING;
- ice_ptp_init_phy_model(hw);
+ ice_ptp_init_hw(hw);
ice_ptp_init_tx_interrupt_mode(pf);
@@ -3221,6 +3353,9 @@ void ice_ptp_init(struct ice_pf *pf)
}
ptp->port.port_num = hw->pf_id;
+ if (ice_is_e825c(hw) && hw->ptp.is_2x50g_muxed_topo)
+ ptp->port.port_num = hw->pf_id * 2;
+
err = ice_ptp_init_port(pf, &ptp->port);
if (err)
goto err;
@@ -3275,6 +3410,8 @@ void ice_ptp_release(struct ice_pf *pf)
ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx);
+ ice_ptp_disable_all_extts(pf);
+
kthread_cancel_delayed_work_sync(&pf->ptp.work);
ice_ptp_port_phy_stop(&pf->ptp.port);
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.h b/drivers/net/ethernet/intel/ice/ice_ptp.h
index 3af20025043a..2db2257a0fb2 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.h
@@ -29,10 +29,17 @@ enum ice_ptp_pin_e810t {
struct ice_perout_channel {
bool ena;
u32 gpio_pin;
+ u32 flags;
u64 period;
u64 start_time;
};
+struct ice_extts_channel {
+ bool ena;
+ u32 gpio_pin;
+ u32 flags;
+};
+
/* The ice hardware captures Tx hardware timestamps in the PHY. The timestamp
* is stored in a buffer of registers. Depending on the specific hardware,
* this buffer might be shared across multiple PHY ports.
@@ -153,6 +160,7 @@ struct ice_ptp_tx {
#define INDEX_PER_QUAD 64
#define INDEX_PER_PORT_E82X 16
#define INDEX_PER_PORT_E810 64
+#define INDEX_PER_PORT_ETH56G 64
/**
* struct ice_ptp_port - data used to initialize an external port for PTP
@@ -226,6 +234,7 @@ enum ice_ptp_state {
* @ext_ts_irq: the external timestamp IRQ in use
* @kworker: kwork thread for handling periodic work
* @perout_channels: periodic output data
+ * @extts_channels: channels for external timestamps
* @info: structure defining PTP hardware capabilities
* @clock: pointer to registered PTP clock device
* @tstamp_config: hardware timestamping configuration
@@ -249,6 +258,7 @@ struct ice_ptp {
u8 ext_ts_irq;
struct kthread_worker *kworker;
struct ice_perout_channel perout_channels[GLTSYN_TGT_H_IDX_MAX];
+ struct ice_extts_channel extts_channels[GLTSYN_TGT_H_IDX_MAX];
struct ptp_clock_info info;
struct ptp_clock *clock;
struct hwtstamp_config tstamp_config;
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_consts.h b/drivers/net/ethernet/intel/ice/ice_ptp_consts.h
index 2c4dab0c48ab..e6980b94a6c1 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_consts.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_consts.h
@@ -9,6 +9,321 @@
*/
/* Constants defined for the PTP 1588 clock hardware. */
+const struct ice_phy_reg_info_eth56g eth56g_phy_res[NUM_ETH56G_PHY_RES] = {
+ /* ETH56G_PHY_REG_PTP */
+ {
+ /* base_addr */
+ {
+ 0x092000,
+ 0x126000,
+ 0x1BA000,
+ 0x24E000,
+ 0x2E2000,
+ },
+ /* step */
+ 0x98,
+ },
+ /* ETH56G_PHY_MEM_PTP */
+ {
+ /* base_addr */
+ {
+ 0x093000,
+ 0x127000,
+ 0x1BB000,
+ 0x24F000,
+ 0x2E3000,
+ },
+ /* step */
+ 0x200,
+ },
+ /* ETH56G_PHY_REG_XPCS */
+ {
+ /* base_addr */
+ {
+ 0x000000,
+ 0x009400,
+ 0x128000,
+ 0x1BC000,
+ 0x250000,
+ },
+ /* step */
+ 0x21000,
+ },
+ /* ETH56G_PHY_REG_MAC */
+ {
+ /* base_addr */
+ {
+ 0x085000,
+ 0x119000,
+ 0x1AD000,
+ 0x241000,
+ 0x2D5000,
+ },
+ /* step */
+ 0x1000,
+ },
+ /* ETH56G_PHY_REG_GPCS */
+ {
+ /* base_addr */
+ {
+ 0x084000,
+ 0x118000,
+ 0x1AC000,
+ 0x240000,
+ 0x2D4000,
+ },
+ /* step */
+ 0x400,
+ },
+};
+
+const
+struct ice_eth56g_mac_reg_cfg eth56g_mac_cfg[NUM_ICE_ETH56G_LNK_SPD] = {
+ [ICE_ETH56G_LNK_SPD_1G] = {
+ .tx_mode = { .def = 6, },
+ .rx_mode = { .def = 6, },
+ .blks_per_clk = 1,
+ .blktime = 0x4000, /* 32 */
+ .tx_offset = {
+ .serdes = 0x6666, /* 51.2 */
+ .no_fec = 0xd066, /* 104.2 */
+ .sfd = 0x3000, /* 24 */
+ .onestep = 0x30000 /* 384 */
+ },
+ .rx_offset = {
+ .serdes = 0xffffc59a, /* -29.2 */
+ .no_fec = 0xffff0a80, /* -122.75 */
+ .sfd = 0x2c00, /* 22 */
+ .bs_ds = 0x19a /* 0.8 */
+ /* Dynamic bitslip 0 equals to 10 */
+ }
+ },
+ [ICE_ETH56G_LNK_SPD_2_5G] = {
+ .tx_mode = { .def = 6, },
+ .rx_mode = { .def = 6, },
+ .blks_per_clk = 1,
+ .blktime = 0x199a, /* 12.8 */
+ .tx_offset = {
+ .serdes = 0x28f6, /* 20.48 */
+ .no_fec = 0x53b8, /* 41.86 */
+ .sfd = 0x1333, /* 9.6 */
+ .onestep = 0x13333 /* 153.6 */
+ },
+ .rx_offset = {
+ .serdes = 0xffffe8a4, /* -11.68 */
+ .no_fec = 0xffff9a76, /* -50.77 */
+ .sfd = 0xf33, /* 7.6 */
+ .bs_ds = 0xa4 /* 0.32 */
+ }
+ },
+ [ICE_ETH56G_LNK_SPD_10G] = {
+ .tx_mode = { .def = 1, },
+ .rx_mode = { .def = 1, },
+ .blks_per_clk = 1,
+ .blktime = 0x666, /* 3.2 */
+ .tx_offset = {
+ .serdes = 0x234c, /* 17.6484848 */
+ .no_fec = 0x8e80, /* 71.25 */
+ .fc = 0xb4a4, /* 90.32 */
+ .sfd = 0x4a4, /* 2.32 */
+ .onestep = 0x4ccd /* 38.4 */
+ },
+ .rx_offset = {
+ .serdes = 0xffffeb27, /* -10.42424 */
+ .no_fec = 0xffffcccd, /* -25.6 */
+ .fc = 0xfffe0014, /* -255.96 */
+ .sfd = 0x4a4, /* 2.32 */
+ .bs_ds = 0x32 /* 0.0969697 */
+ }
+ },
+ [ICE_ETH56G_LNK_SPD_25G] = {
+ .tx_mode = {
+ .def = 1,
+ .rs = 4
+ },
+ .tx_mk_dly = 4,
+ .tx_cw_dly = {
+ .def = 1,
+ .onestep = 6
+ },
+ .rx_mode = {
+ .def = 1,
+ .rs = 4
+ },
+ .rx_mk_dly = {
+ .def = 1,
+ .rs = 1
+ },
+ .rx_cw_dly = {
+ .def = 1,
+ .rs = 1
+ },
+ .blks_per_clk = 1,
+ .blktime = 0x28f, /* 1.28 */
+ .mktime = 0x147b, /* 10.24, only if RS-FEC enabled */
+ .tx_offset = {
+ .serdes = 0xe1e, /* 7.0593939 */
+ .no_fec = 0x3857, /* 28.17 */
+ .fc = 0x48c3, /* 36.38 */
+ .rs = 0x8100, /* 64.5 */
+ .sfd = 0x1dc, /* 0.93 */
+ .onestep = 0x1eb8 /* 15.36 */
+ },
+ .rx_offset = {
+ .serdes = 0xfffff7a9, /* -4.1697 */
+ .no_fec = 0xffffe71a, /* -12.45 */
+ .fc = 0xfffe894d, /* -187.35 */
+ .rs = 0xfffff8cd, /* -3.6 */
+ .sfd = 0x1dc, /* 0.93 */
+ .bs_ds = 0x14 /* 0.0387879, RS-FEC 0 */
+ }
+ },
+ [ICE_ETH56G_LNK_SPD_40G] = {
+ .tx_mode = { .def = 3 },
+ .tx_mk_dly = 4,
+ .tx_cw_dly = {
+ .def = 1,
+ .onestep = 6
+ },
+ .rx_mode = { .def = 4 },
+ .rx_mk_dly = { .def = 1 },
+ .rx_cw_dly = { .def = 1 },
+ .blktime = 0x333, /* 1.6 */
+ .mktime = 0xccd, /* 6.4 */
+ .tx_offset = {
+ .serdes = 0x234c, /* 17.6484848 */
+ .no_fec = 0x5a8a, /* 45.27 */
+ .fc = 0x81b8, /* 64.86 */
+ .sfd = 0x4a4, /* 2.32 */
+ .onestep = 0x1333 /* 9.6 */
+ },
+ .rx_offset = {
+ .serdes = 0xffffeb27, /* -10.42424 */
+ .no_fec = 0xfffff594, /* -5.21 */
+ .fc = 0xfffe3080, /* -231.75 */
+ .sfd = 0x4a4, /* 2.32 */
+ .bs_ds = 0xccd /* 6.4 */
+ }
+ },
+ [ICE_ETH56G_LNK_SPD_50G] = {
+ .tx_mode = { .def = 5 },
+ .tx_mk_dly = 4,
+ .tx_cw_dly = {
+ .def = 1,
+ .onestep = 6
+ },
+ .rx_mode = { .def = 5 },
+ .rx_mk_dly = { .def = 1 },
+ .rx_cw_dly = { .def = 1 },
+ .blktime = 0x28f, /* 1.28 */
+ .mktime = 0xa3d, /* 5.12 */
+ .tx_offset = {
+ .serdes = 0x13ba, /* 9.86353 */
+ .rs = 0x5400, /* 42 */
+ .sfd = 0xe6, /* 0.45 */
+ .onestep = 0xf5c /* 7.68 */
+ },
+ .rx_offset = {
+ .serdes = 0xfffff7e8, /* -4.04706 */
+ .rs = 0xfffff994, /* -3.21 */
+ .sfd = 0xe6 /* 0.45 */
+ }
+ },
+ [ICE_ETH56G_LNK_SPD_50G2] = {
+ .tx_mode = {
+ .def = 3,
+ .rs = 2
+ },
+ .tx_mk_dly = 4,
+ .tx_cw_dly = {
+ .def = 1,
+ .onestep = 6
+ },
+ .rx_mode = {
+ .def = 4,
+ .rs = 1
+ },
+ .rx_mk_dly = { .def = 1 },
+ .rx_cw_dly = { .def = 1 },
+ .blktime = 0x28f, /* 1.28 */
+ .mktime = 0xa3d, /* 5.12 */
+ .tx_offset = {
+ .serdes = 0xe1e, /* 7.0593939 */
+ .no_fec = 0x3d33, /* 30.6 */
+ .rs = 0x5057, /* 40.17 */
+ .sfd = 0x1dc, /* 0.93 */
+ .onestep = 0xf5c /* 7.68 */
+ },
+ .rx_offset = {
+ .serdes = 0xfffff7a9, /* -4.1697 */
+ .no_fec = 0xfffff8cd, /* -3.6 */
+ .rs = 0xfffff21a, /* -6.95 */
+ .sfd = 0x1dc, /* 0.93 */
+ .bs_ds = 0xa3d /* 5.12, RS-FEC 0x633 (3.1) */
+ }
+ },
+ [ICE_ETH56G_LNK_SPD_100G] = {
+ .tx_mode = {
+ .def = 3,
+ .rs = 2
+ },
+ .tx_mk_dly = 10,
+ .tx_cw_dly = {
+ .def = 3,
+ .onestep = 6
+ },
+ .rx_mode = {
+ .def = 4,
+ .rs = 1
+ },
+ .rx_mk_dly = { .def = 5 },
+ .rx_cw_dly = { .def = 5 },
+ .blks_per_clk = 1,
+ .blktime = 0x148, /* 0.64 */
+ .mktime = 0x199a, /* 12.8 */
+ .tx_offset = {
+ .serdes = 0xe1e, /* 7.0593939 */
+ .no_fec = 0x67ec, /* 51.96 */
+ .rs = 0x44fb, /* 34.49 */
+ .sfd = 0x1dc, /* 0.93 */
+ .onestep = 0xf5c /* 7.68 */
+ },
+ .rx_offset = {
+ .serdes = 0xfffff7a9, /* -4.1697 */
+ .no_fec = 0xfffff5a9, /* -5.17 */
+ .rs = 0xfffff6e6, /* -4.55 */
+ .sfd = 0x1dc, /* 0.93 */
+ .bs_ds = 0x199a /* 12.8, RS-FEC 0x31b (1.552) */
+ }
+ },
+ [ICE_ETH56G_LNK_SPD_100G2] = {
+ .tx_mode = { .def = 5 },
+ .tx_mk_dly = 10,
+ .tx_cw_dly = {
+ .def = 3,
+ .onestep = 6
+ },
+ .rx_mode = { .def = 5 },
+ .rx_mk_dly = { .def = 5 },
+ .rx_cw_dly = { .def = 5 },
+ .blks_per_clk = 1,
+ .blktime = 0x148, /* 0.64 */
+ .mktime = 0x199a, /* 12.8 */
+ .tx_offset = {
+ .serdes = 0x13ba, /* 9.86353 */
+ .rs = 0x460a, /* 35.02 */
+ .sfd = 0xe6, /* 0.45 */
+ .onestep = 0xf5c /* 7.68 */
+ },
+ .rx_offset = {
+ .serdes = 0xfffff7e8, /* -4.04706 */
+ .rs = 0xfffff548, /* -5.36 */
+ .sfd = 0xe6, /* 0.45 */
+ .bs_ds = 0x303 /* 1.506 */
+ }
+ }
+};
+
/* struct ice_time_ref_info_e82x
*
* E822 hardware can use different sources as the reference for the PTP
@@ -155,6 +470,93 @@ const struct ice_cgu_pll_params_e82x e822_cgu_params[NUM_ICE_TIME_REF_FREQ] = {
},
};
+const
+struct ice_cgu_pll_params_e825c e825c_cgu_params[NUM_ICE_TIME_REF_FREQ] = {
+ /* ICE_TIME_REF_FREQ_25_000 -> 25 MHz */
+ {
+ /* tspll_ck_refclkfreq */
+ 0x19,
+ /* tspll_ndivratio */
+ 1,
+ /* tspll_fbdiv_intgr */
+ 320,
+ /* tspll_fbdiv_frac */
+ 0,
+ /* ref1588_ck_div */
+ 0,
+ },
+
+ /* ICE_TIME_REF_FREQ_122_880 -> 122.88 MHz */
+ {
+ /* tspll_ck_refclkfreq */
+ 0x29,
+ /* tspll_ndivratio */
+ 3,
+ /* tspll_fbdiv_intgr */
+ 195,
+ /* tspll_fbdiv_frac */
+ 1342177280UL,
+ /* ref1588_ck_div */
+ 0,
+ },
+
+ /* ICE_TIME_REF_FREQ_125_000 -> 125 MHz */
+ {
+ /* tspll_ck_refclkfreq */
+ 0x3E,
+ /* tspll_ndivratio */
+ 2,
+ /* tspll_fbdiv_intgr */
+ 128,
+ /* tspll_fbdiv_frac */
+ 0,
+ /* ref1588_ck_div */
+ 0,
+ },
+
+ /* ICE_TIME_REF_FREQ_153_600 -> 153.6 MHz */
+ {
+ /* tspll_ck_refclkfreq */
+ 0x33,
+ /* tspll_ndivratio */
+ 3,
+ /* tspll_fbdiv_intgr */
+ 156,
+ /* tspll_fbdiv_frac */
+ 1073741824UL,
+ /* ref1588_ck_div */
+ 0,
+ },
+
+ /* ICE_TIME_REF_FREQ_156_250 -> 156.25 MHz */
+ {
+ /* tspll_ck_refclkfreq */
+ 0x1F,
+ /* tspll_ndivratio */
+ 5,
+ /* tspll_fbdiv_intgr */
+ 256,
+ /* tspll_fbdiv_frac */
+ 0,
+ /* ref1588_ck_div */
+ 0,
+ },
+
+ /* ICE_TIME_REF_FREQ_245_760 -> 245.76 MHz */
+ {
+ /* tspll_ck_refclkfreq */
+ 0x52,
+ /* tspll_ndivratio */
+ 3,
+ /* tspll_fbdiv_intgr */
+ 97,
+ /* tspll_fbdiv_frac */
+ 2818572288UL,
+ /* ref1588_ck_div */
+ 0,
+ },
+};
+
/* struct ice_vernier_info_e82x
*
* E822 hardware calibrates the delay of the timestamp indication from the
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
index 2b9423a173bb..3a33e6b9b313 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
@@ -2,6 +2,7 @@
/* Copyright (C) 2021, Intel Corporation. */
#include <linux/delay.h>
+#include <linux/iopoll.h>
#include "ice_common.h"
#include "ice_ptp_hw.h"
#include "ice_ptp_consts.h"
@@ -227,40 +228,632 @@ static u64 ice_ptp_read_src_incval(struct ice_hw *hw)
}
/**
- * ice_ptp_src_cmd - Prepare source timer for a timer command
- * @hw: pointer to HW structure
+ * ice_read_cgu_reg_e82x - Read a CGU register
+ * @hw: pointer to the HW struct
+ * @addr: Register address to read
+ * @val: storage for register value read
+ *
+ * Read the contents of a register of the Clock Generation Unit. Only
+ * applicable to E822 devices.
+ *
+ * Return: 0 on success, other error codes when failed to read from CGU
+ */
+static int ice_read_cgu_reg_e82x(struct ice_hw *hw, u32 addr, u32 *val)
+{
+ struct ice_sbq_msg_input cgu_msg = {
+ .opcode = ice_sbq_msg_rd,
+ .dest_dev = cgu,
+ .msg_addr_low = addr
+ };
+ int err;
+
+ err = ice_sbq_rw_reg(hw, &cgu_msg, ICE_AQ_FLAG_RD);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read CGU register 0x%04x, err %d\n",
+ addr, err);
+ return err;
+ }
+
+ *val = cgu_msg.data;
+
+ return 0;
+}
+
+/**
+ * ice_write_cgu_reg_e82x - Write a CGU register
+ * @hw: pointer to the HW struct
+ * @addr: Register address to write
+ * @val: value to write into the register
+ *
+ * Write the specified value to a register of the Clock Generation Unit. Only
+ * applicable to E822 devices.
+ *
+ * Return: 0 on success, other error codes when failed to write to CGU
+ */
+static int ice_write_cgu_reg_e82x(struct ice_hw *hw, u32 addr, u32 val)
+{
+ struct ice_sbq_msg_input cgu_msg = {
+ .opcode = ice_sbq_msg_wr,
+ .dest_dev = cgu,
+ .msg_addr_low = addr,
+ .data = val
+ };
+ int err;
+
+ err = ice_sbq_rw_reg(hw, &cgu_msg, ICE_AQ_FLAG_RD);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write CGU register 0x%04x, err %d\n",
+ addr, err);
+ return err;
+ }
+
+ return err;
+}
+
+/**
+ * ice_clk_freq_str - Convert time_ref_freq to string
+ * @clk_freq: Clock frequency
+ *
+ * Return: specified TIME_REF clock frequency converted to a string
+ */
+static const char *ice_clk_freq_str(enum ice_time_ref_freq clk_freq)
+{
+ switch (clk_freq) {
+ case ICE_TIME_REF_FREQ_25_000:
+ return "25 MHz";
+ case ICE_TIME_REF_FREQ_122_880:
+ return "122.88 MHz";
+ case ICE_TIME_REF_FREQ_125_000:
+ return "125 MHz";
+ case ICE_TIME_REF_FREQ_153_600:
+ return "153.6 MHz";
+ case ICE_TIME_REF_FREQ_156_250:
+ return "156.25 MHz";
+ case ICE_TIME_REF_FREQ_245_760:
+ return "245.76 MHz";
+ default:
+ return "Unknown";
+ }
+}
+
+/**
+ * ice_clk_src_str - Convert time_ref_src to string
+ * @clk_src: Clock source
+ *
+ * Return: specified clock source converted to its string name
+ */
+static const char *ice_clk_src_str(enum ice_clk_src clk_src)
+{
+ switch (clk_src) {
+ case ICE_CLK_SRC_TCXO:
+ return "TCXO";
+ case ICE_CLK_SRC_TIME_REF:
+ return "TIME_REF";
+ default:
+ return "Unknown";
+ }
+}
+
+/**
+ * ice_cfg_cgu_pll_e82x - Configure the Clock Generation Unit
+ * @hw: pointer to the HW struct
+ * @clk_freq: Clock frequency to program
+ * @clk_src: Clock source to select (TIME_REF, or TCXO)
+ *
+ * Configure the Clock Generation Unit with the desired clock frequency and
+ * time reference, enabling the PLL which drives the PTP hardware clock.
+ *
+ * Return:
+ * * %0 - success
+ * * %-EINVAL - input parameters are incorrect
+ * * %-EBUSY - failed to lock TS PLL
+ * * %other - CGU read/write failure
+ */
+static int ice_cfg_cgu_pll_e82x(struct ice_hw *hw,
+ enum ice_time_ref_freq clk_freq,
+ enum ice_clk_src clk_src)
+{
+ union tspll_ro_bwm_lf bwm_lf;
+ union nac_cgu_dword19 dw19;
+ union nac_cgu_dword22 dw22;
+ union nac_cgu_dword24 dw24;
+ union nac_cgu_dword9 dw9;
+ int err;
+
+ if (clk_freq >= NUM_ICE_TIME_REF_FREQ) {
+ dev_warn(ice_hw_to_dev(hw), "Invalid TIME_REF frequency %u\n",
+ clk_freq);
+ return -EINVAL;
+ }
+
+ if (clk_src >= NUM_ICE_CLK_SRC) {
+ dev_warn(ice_hw_to_dev(hw), "Invalid clock source %u\n",
+ clk_src);
+ return -EINVAL;
+ }
+
+ if (clk_src == ICE_CLK_SRC_TCXO &&
+ clk_freq != ICE_TIME_REF_FREQ_25_000) {
+ dev_warn(ice_hw_to_dev(hw),
+ "TCXO only supports 25 MHz frequency\n");
+ return -EINVAL;
+ }
+
+ err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD9, &dw9.val);
+ if (err)
+ return err;
+
+ err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD24, &dw24.val);
+ if (err)
+ return err;
+
+ err = ice_read_cgu_reg_e82x(hw, TSPLL_RO_BWM_LF, &bwm_lf.val);
+ if (err)
+ return err;
+
+ /* Log the current clock configuration */
+ ice_debug(hw, ICE_DBG_PTP, "Current CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n",
+ dw24.ts_pll_enable ? "enabled" : "disabled",
+ ice_clk_src_str(dw24.time_ref_sel),
+ ice_clk_freq_str(dw9.time_ref_freq_sel),
+ bwm_lf.plllock_true_lock_cri ? "locked" : "unlocked");
+
+ /* Disable the PLL before changing the clock source or frequency */
+ if (dw24.ts_pll_enable) {
+ dw24.ts_pll_enable = 0;
+
+ err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val);
+ if (err)
+ return err;
+ }
+
+ /* Set the frequency */
+ dw9.time_ref_freq_sel = clk_freq;
+ err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD9, dw9.val);
+ if (err)
+ return err;
+
+ /* Configure the TS PLL feedback divisor */
+ err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD19, &dw19.val);
+ if (err)
+ return err;
+
+ dw19.tspll_fbdiv_intgr = e822_cgu_params[clk_freq].feedback_div;
+ dw19.tspll_ndivratio = 1;
+
+ err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD19, dw19.val);
+ if (err)
+ return err;
+
+ /* Configure the TS PLL post divisor */
+ err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD22, &dw22.val);
+ if (err)
+ return err;
+
+ dw22.time1588clk_div = e822_cgu_params[clk_freq].post_pll_div;
+ dw22.time1588clk_sel_div2 = 0;
+
+ err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD22, dw22.val);
+ if (err)
+ return err;
+
+ /* Configure the TS PLL pre divisor and clock source */
+ err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD24, &dw24.val);
+ if (err)
+ return err;
+
+ dw24.ref1588_ck_div = e822_cgu_params[clk_freq].refclk_pre_div;
+ dw24.tspll_fbdiv_frac = e822_cgu_params[clk_freq].frac_n_div;
+ dw24.time_ref_sel = clk_src;
+
+ err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val);
+ if (err)
+ return err;
+
+ /* Finally, enable the PLL */
+ dw24.ts_pll_enable = 1;
+
+ err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val);
+ if (err)
+ return err;
+
+ /* Wait to verify if the PLL locks */
+ usleep_range(1000, 5000);
+
+ err = ice_read_cgu_reg_e82x(hw, TSPLL_RO_BWM_LF, &bwm_lf.val);
+ if (err)
+ return err;
+
+ if (!bwm_lf.plllock_true_lock_cri) {
+ dev_warn(ice_hw_to_dev(hw), "CGU PLL failed to lock\n");
+ return -EBUSY;
+ }
+
+ /* Log the current clock configuration */
+ ice_debug(hw, ICE_DBG_PTP, "New CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n",
+ dw24.ts_pll_enable ? "enabled" : "disabled",
+ ice_clk_src_str(dw24.time_ref_sel),
+ ice_clk_freq_str(dw9.time_ref_freq_sel),
+ bwm_lf.plllock_true_lock_cri ? "locked" : "unlocked");
+
+ return 0;
+}
+
+/**
+ * ice_cfg_cgu_pll_e825c - Configure the Clock Generation Unit for E825-C
+ * @hw: pointer to the HW struct
+ * @clk_freq: Clock frequency to program
+ * @clk_src: Clock source to select (TIME_REF, or TCXO)
+ *
+ * Configure the Clock Generation Unit with the desired clock frequency and
+ * time reference, enabling the PLL which drives the PTP hardware clock.
+ *
+ * Return:
+ * * %0 - success
+ * * %-EINVAL - input parameters are incorrect
+ * * %-EBUSY - failed to lock TS PLL
+ * * %other - CGU read/write failure
+ */
+static int ice_cfg_cgu_pll_e825c(struct ice_hw *hw,
+ enum ice_time_ref_freq clk_freq,
+ enum ice_clk_src clk_src)
+{
+ union tspll_ro_lock_e825c ro_lock;
+ union nac_cgu_dword16_e825c dw16;
+ union nac_cgu_dword23_e825c dw23;
+ union nac_cgu_dword19 dw19;
+ union nac_cgu_dword22 dw22;
+ union nac_cgu_dword24 dw24;
+ union nac_cgu_dword9 dw9;
+ int err;
+
+ if (clk_freq >= NUM_ICE_TIME_REF_FREQ) {
+ dev_warn(ice_hw_to_dev(hw), "Invalid TIME_REF frequency %u\n",
+ clk_freq);
+ return -EINVAL;
+ }
+
+ if (clk_src >= NUM_ICE_CLK_SRC) {
+ dev_warn(ice_hw_to_dev(hw), "Invalid clock source %u\n",
+ clk_src);
+ return -EINVAL;
+ }
+
+ if (clk_src == ICE_CLK_SRC_TCXO &&
+ clk_freq != ICE_TIME_REF_FREQ_156_250) {
+ dev_warn(ice_hw_to_dev(hw),
+ "TCXO only supports 156.25 MHz frequency\n");
+ return -EINVAL;
+ }
+
+ err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD9, &dw9.val);
+ if (err)
+ return err;
+
+ err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD24, &dw24.val);
+ if (err)
+ return err;
+
+ err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD16_E825C, &dw16.val);
+ if (err)
+ return err;
+
+ err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD23_E825C, &dw23.val);
+ if (err)
+ return err;
+
+ err = ice_read_cgu_reg_e82x(hw, TSPLL_RO_LOCK_E825C, &ro_lock.val);
+ if (err)
+ return err;
+
+ /* Log the current clock configuration */
+ ice_debug(hw, ICE_DBG_PTP, "Current CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n",
+ dw24.ts_pll_enable ? "enabled" : "disabled",
+ ice_clk_src_str(dw23.time_ref_sel),
+ ice_clk_freq_str(dw9.time_ref_freq_sel),
+ ro_lock.plllock_true_lock_cri ? "locked" : "unlocked");
+
+ /* Disable the PLL before changing the clock source or frequency */
+ if (dw23.ts_pll_enable) {
+ dw23.ts_pll_enable = 0;
+
+ err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD23_E825C,
+ dw23.val);
+ if (err)
+ return err;
+ }
+
+ /* Set the frequency */
+ dw9.time_ref_freq_sel = clk_freq;
+
+ /* Enable the correct receiver */
+ if (clk_src == ICE_CLK_SRC_TCXO) {
+ dw9.time_ref_en = 0;
+ dw9.clk_eref0_en = 1;
+ } else {
+ dw9.time_ref_en = 1;
+ dw9.clk_eref0_en = 0;
+ }
+ err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD9, dw9.val);
+ if (err)
+ return err;
+
+ /* Choose the referenced frequency */
+ dw16.tspll_ck_refclkfreq =
+ e825c_cgu_params[clk_freq].tspll_ck_refclkfreq;
+ err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD16_E825C, dw16.val);
+ if (err)
+ return err;
+
+ /* Configure the TS PLL feedback divisor */
+ err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD19, &dw19.val);
+ if (err)
+ return err;
+
+ dw19.tspll_fbdiv_intgr =
+ e825c_cgu_params[clk_freq].tspll_fbdiv_intgr;
+ dw19.tspll_ndivratio =
+ e825c_cgu_params[clk_freq].tspll_ndivratio;
+
+ err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD19, dw19.val);
+ if (err)
+ return err;
+
+ /* Configure the TS PLL post divisor */
+ err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD22, &dw22.val);
+ if (err)
+ return err;
+
+ /* These two are constant for E825C */
+ dw22.time1588clk_div = 5;
+ dw22.time1588clk_sel_div2 = 0;
+
+ err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD22, dw22.val);
+ if (err)
+ return err;
+
+ /* Configure the TS PLL pre divisor and clock source */
+ err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD23_E825C, &dw23.val);
+ if (err)
+ return err;
+
+ dw23.ref1588_ck_div =
+ e825c_cgu_params[clk_freq].ref1588_ck_div;
+ dw23.time_ref_sel = clk_src;
+
+ err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD23_E825C, dw23.val);
+ if (err)
+ return err;
+
+ dw24.tspll_fbdiv_frac =
+ e825c_cgu_params[clk_freq].tspll_fbdiv_frac;
+
+ err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val);
+ if (err)
+ return err;
+
+ /* Finally, enable the PLL */
+ dw23.ts_pll_enable = 1;
+
+ err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD23_E825C, dw23.val);
+ if (err)
+ return err;
+
+ /* Wait to verify if the PLL locks */
+ usleep_range(1000, 5000);
+
+ err = ice_read_cgu_reg_e82x(hw, TSPLL_RO_LOCK_E825C, &ro_lock.val);
+ if (err)
+ return err;
+
+ if (!ro_lock.plllock_true_lock_cri) {
+ dev_warn(ice_hw_to_dev(hw), "CGU PLL failed to lock\n");
+ return -EBUSY;
+ }
+
+ /* Log the current clock configuration */
+ ice_debug(hw, ICE_DBG_PTP, "New CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n",
+ dw24.ts_pll_enable ? "enabled" : "disabled",
+ ice_clk_src_str(dw23.time_ref_sel),
+ ice_clk_freq_str(dw9.time_ref_freq_sel),
+ ro_lock.plllock_true_lock_cri ? "locked" : "unlocked");
+
+ return 0;
+}
+
+/**
+ * ice_cfg_cgu_pll_dis_sticky_bits_e82x - disable TS PLL sticky bits
+ * @hw: pointer to the HW struct
+ *
+ * Configure the Clock Generation Unit TS PLL sticky bits so they don't latch on
+ * losing TS PLL lock, but always show current state.
+ *
+ * Return: 0 on success, other error codes when failed to read/write CGU
+ */
+static int ice_cfg_cgu_pll_dis_sticky_bits_e82x(struct ice_hw *hw)
+{
+ union tspll_cntr_bist_settings cntr_bist;
+ int err;
+
+ err = ice_read_cgu_reg_e82x(hw, TSPLL_CNTR_BIST_SETTINGS,
+ &cntr_bist.val);
+ if (err)
+ return err;
+
+ /* Disable sticky lock detection so lock err reported is accurate */
+ cntr_bist.i_plllock_sel_0 = 0;
+ cntr_bist.i_plllock_sel_1 = 0;
+
+ return ice_write_cgu_reg_e82x(hw, TSPLL_CNTR_BIST_SETTINGS,
+ cntr_bist.val);
+}
+
+/**
+ * ice_cfg_cgu_pll_dis_sticky_bits_e825c - disable TS PLL sticky bits for E825-C
+ * @hw: pointer to the HW struct
+ *
+ * Configure the Clock Generation Unit TS PLL sticky bits so they don't latch on
+ * losing TS PLL lock, but always show current state.
+ *
+ * Return: 0 on success, other error codes when failed to read/write CGU
+ */
+static int ice_cfg_cgu_pll_dis_sticky_bits_e825c(struct ice_hw *hw)
+{
+ union tspll_bw_tdc_e825c bw_tdc;
+ int err;
+
+ err = ice_read_cgu_reg_e82x(hw, TSPLL_BW_TDC_E825C, &bw_tdc.val);
+ if (err)
+ return err;
+
+ bw_tdc.i_plllock_sel_1_0 = 0;
+
+ return ice_write_cgu_reg_e82x(hw, TSPLL_BW_TDC_E825C, bw_tdc.val);
+}
+
+/**
+ * ice_init_cgu_e82x - Initialize CGU with settings from firmware
+ * @hw: pointer to the HW structure
+ *
+ * Initialize the Clock Generation Unit of the E822 device.
+ *
+ * Return: 0 on success, other error codes when failed to read/write/cfg CGU
+ */
+static int ice_init_cgu_e82x(struct ice_hw *hw)
+{
+ struct ice_ts_func_info *ts_info = &hw->func_caps.ts_func_info;
+ int err;
+
+ /* Disable sticky lock detection so lock err reported is accurate */
+ if (ice_is_e825c(hw))
+ err = ice_cfg_cgu_pll_dis_sticky_bits_e825c(hw);
+ else
+ err = ice_cfg_cgu_pll_dis_sticky_bits_e82x(hw);
+ if (err)
+ return err;
+
+ /* Configure the CGU PLL using the parameters from the function
+ * capabilities.
+ */
+ if (ice_is_e825c(hw))
+ err = ice_cfg_cgu_pll_e825c(hw, ts_info->time_ref,
+ (enum ice_clk_src)ts_info->clk_src);
+ else
+ err = ice_cfg_cgu_pll_e82x(hw, ts_info->time_ref,
+ (enum ice_clk_src)ts_info->clk_src);
+
+ return err;
+}
+
+/**
+ * ice_ptp_tmr_cmd_to_src_reg - Convert to source timer command value
+ * @hw: pointer to HW struct
* @cmd: Timer command
*
- * Prepare the source timer for an upcoming timer sync command.
+ * Return: the source timer command register value for the given PTP timer
+ * command.
*/
-void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
+static u32 ice_ptp_tmr_cmd_to_src_reg(struct ice_hw *hw,
+ enum ice_ptp_tmr_cmd cmd)
{
- u32 cmd_val;
- u8 tmr_idx;
+ u32 cmd_val, tmr_idx;
+
+ switch (cmd) {
+ case ICE_PTP_INIT_TIME:
+ cmd_val = GLTSYN_CMD_INIT_TIME;
+ break;
+ case ICE_PTP_INIT_INCVAL:
+ cmd_val = GLTSYN_CMD_INIT_INCVAL;
+ break;
+ case ICE_PTP_ADJ_TIME:
+ cmd_val = GLTSYN_CMD_ADJ_TIME;
+ break;
+ case ICE_PTP_ADJ_TIME_AT_TIME:
+ cmd_val = GLTSYN_CMD_ADJ_INIT_TIME;
+ break;
+ case ICE_PTP_NOP:
+ case ICE_PTP_READ_TIME:
+ cmd_val = GLTSYN_CMD_READ_TIME;
+ break;
+ default:
+ dev_warn(ice_hw_to_dev(hw),
+ "Ignoring unrecognized timer command %u\n", cmd);
+ cmd_val = 0;
+ }
tmr_idx = ice_get_ptp_src_clock_index(hw);
- cmd_val = tmr_idx << SEL_CPK_SRC;
+
+ return tmr_idx << SEL_CPK_SRC | cmd_val;
+}
+
+/**
+ * ice_ptp_tmr_cmd_to_port_reg- Convert to port timer command value
+ * @hw: pointer to HW struct
+ * @cmd: Timer command
+ *
+ * Note that some hardware families use a different command register value for
+ * the PHY ports, while other hardware families use the same register values
+ * as the source timer.
+ *
+ * Return: the PHY port timer command register value for the given PTP timer
+ * command.
+ */
+static u32 ice_ptp_tmr_cmd_to_port_reg(struct ice_hw *hw,
+ enum ice_ptp_tmr_cmd cmd)
+{
+ u32 cmd_val, tmr_idx;
+
+ /* Certain hardware families share the same register values for the
+ * port register and source timer register.
+ */
+ switch (hw->ptp.phy_model) {
+ case ICE_PHY_E810:
+ return ice_ptp_tmr_cmd_to_src_reg(hw, cmd) & TS_CMD_MASK_E810;
+ default:
+ break;
+ }
switch (cmd) {
case ICE_PTP_INIT_TIME:
- cmd_val |= GLTSYN_CMD_INIT_TIME;
+ cmd_val = PHY_CMD_INIT_TIME;
break;
case ICE_PTP_INIT_INCVAL:
- cmd_val |= GLTSYN_CMD_INIT_INCVAL;
+ cmd_val = PHY_CMD_INIT_INCVAL;
break;
case ICE_PTP_ADJ_TIME:
- cmd_val |= GLTSYN_CMD_ADJ_TIME;
+ cmd_val = PHY_CMD_ADJ_TIME;
break;
case ICE_PTP_ADJ_TIME_AT_TIME:
- cmd_val |= GLTSYN_CMD_ADJ_INIT_TIME;
+ cmd_val = PHY_CMD_ADJ_TIME_AT_TIME;
break;
case ICE_PTP_READ_TIME:
- cmd_val |= GLTSYN_CMD_READ_TIME;
+ cmd_val = PHY_CMD_READ_TIME;
break;
case ICE_PTP_NOP:
+ cmd_val = 0;
break;
+ default:
+ dev_warn(ice_hw_to_dev(hw),
+ "Ignoring unrecognized timer command %u\n", cmd);
+ cmd_val = 0;
}
+ tmr_idx = ice_get_ptp_src_clock_index(hw);
+
+ return tmr_idx << SEL_PHY_SRC | cmd_val;
+}
+
+/**
+ * ice_ptp_src_cmd - Prepare source timer for a timer command
+ * @hw: pointer to HW structure
+ * @cmd: Timer command
+ *
+ * Prepare the source timer for an upcoming timer sync command.
+ */
+void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
+{
+ u32 cmd_val = ice_ptp_tmr_cmd_to_src_reg(hw, cmd);
+
wr32(hw, GLTSYN_CMD, cmd_val);
}
@@ -281,6 +874,1832 @@ static void ice_ptp_exec_tmr_cmd(struct ice_hw *hw)
ice_flush(hw);
}
+/* 56G PHY device functions
+ *
+ * The following functions operate on devices with the ETH 56G PHY.
+ */
+
+/**
+ * ice_write_phy_eth56g - Write a PHY port register
+ * @hw: pointer to the HW struct
+ * @phy_idx: PHY index
+ * @addr: PHY register address
+ * @val: Value to write
+ *
+ * Return: 0 on success, other error codes when failed to write to PHY
+ */
+static int ice_write_phy_eth56g(struct ice_hw *hw, u8 phy_idx, u32 addr,
+ u32 val)
+{
+ struct ice_sbq_msg_input phy_msg;
+ int err;
+
+ phy_msg.opcode = ice_sbq_msg_wr;
+
+ phy_msg.msg_addr_low = lower_16_bits(addr);
+ phy_msg.msg_addr_high = upper_16_bits(addr);
+
+ phy_msg.data = val;
+ phy_msg.dest_dev = hw->ptp.phy.eth56g.phy_addr[phy_idx];
+
+ err = ice_sbq_rw_reg(hw, &phy_msg, ICE_AQ_FLAG_RD);
+
+ if (err)
+ ice_debug(hw, ICE_DBG_PTP, "PTP failed to send msg to phy %d\n",
+ err);
+
+ return err;
+}
+
+/**
+ * ice_read_phy_eth56g - Read a PHY port register
+ * @hw: pointer to the HW struct
+ * @phy_idx: PHY index
+ * @addr: PHY register address
+ * @val: Value to write
+ *
+ * Return: 0 on success, other error codes when failed to read from PHY
+ */
+static int ice_read_phy_eth56g(struct ice_hw *hw, u8 phy_idx, u32 addr,
+ u32 *val)
+{
+ struct ice_sbq_msg_input phy_msg;
+ int err;
+
+ phy_msg.opcode = ice_sbq_msg_rd;
+
+ phy_msg.msg_addr_low = lower_16_bits(addr);
+ phy_msg.msg_addr_high = upper_16_bits(addr);
+
+ phy_msg.data = 0;
+ phy_msg.dest_dev = hw->ptp.phy.eth56g.phy_addr[phy_idx];
+
+ err = ice_sbq_rw_reg(hw, &phy_msg, ICE_AQ_FLAG_RD);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "PTP failed to send msg to phy %d\n",
+ err);
+ return err;
+ }
+
+ *val = phy_msg.data;
+
+ return 0;
+}
+
+/**
+ * ice_phy_res_address_eth56g - Calculate a PHY port register address
+ * @port: Port number to be written
+ * @res_type: resource type (register/memory)
+ * @offset: Offset from PHY port register base
+ * @addr: The result address
+ *
+ * Return:
+ * * %0 - success
+ * * %EINVAL - invalid port number or resource type
+ */
+static int ice_phy_res_address_eth56g(u8 port, enum eth56g_res_type res_type,
+ u32 offset, u32 *addr)
+{
+ u8 lane = port % ICE_PORTS_PER_QUAD;
+ u8 phy = ICE_GET_QUAD_NUM(port);
+
+ if (res_type >= NUM_ETH56G_PHY_RES)
+ return -EINVAL;
+
+ *addr = eth56g_phy_res[res_type].base[phy] +
+ lane * eth56g_phy_res[res_type].step + offset;
+ return 0;
+}
+
+/**
+ * ice_write_port_eth56g - Write a PHY port register
+ * @hw: pointer to the HW struct
+ * @offset: PHY register offset
+ * @port: Port number
+ * @val: Value to write
+ * @res_type: resource type (register/memory)
+ *
+ * Return:
+ * * %0 - success
+ * * %EINVAL - invalid port number or resource type
+ * * %other - failed to write to PHY
+ */
+static int ice_write_port_eth56g(struct ice_hw *hw, u8 port, u32 offset,
+ u32 val, enum eth56g_res_type res_type)
+{
+ u8 phy_port = port % hw->ptp.ports_per_phy;
+ u8 phy_idx = port / hw->ptp.ports_per_phy;
+ u32 addr;
+ int err;
+
+ if (port >= hw->ptp.num_lports)
+ return -EINVAL;
+
+ err = ice_phy_res_address_eth56g(phy_port, res_type, offset, &addr);
+ if (err)
+ return err;
+
+ return ice_write_phy_eth56g(hw, phy_idx, addr, val);
+}
+
+/**
+ * ice_read_port_eth56g - Read a PHY port register
+ * @hw: pointer to the HW struct
+ * @offset: PHY register offset
+ * @port: Port number
+ * @val: Value to write
+ * @res_type: resource type (register/memory)
+ *
+ * Return:
+ * * %0 - success
+ * * %EINVAL - invalid port number or resource type
+ * * %other - failed to read from PHY
+ */
+static int ice_read_port_eth56g(struct ice_hw *hw, u8 port, u32 offset,
+ u32 *val, enum eth56g_res_type res_type)
+{
+ u8 phy_port = port % hw->ptp.ports_per_phy;
+ u8 phy_idx = port / hw->ptp.ports_per_phy;
+ u32 addr;
+ int err;
+
+ if (port >= hw->ptp.num_lports)
+ return -EINVAL;
+
+ err = ice_phy_res_address_eth56g(phy_port, res_type, offset, &addr);
+ if (err)
+ return err;
+
+ return ice_read_phy_eth56g(hw, phy_idx, addr, val);
+}
+
+/**
+ * ice_write_ptp_reg_eth56g - Write a PHY port register
+ * @hw: pointer to the HW struct
+ * @port: Port number to be written
+ * @offset: Offset from PHY port register base
+ * @val: Value to write
+ *
+ * Return:
+ * * %0 - success
+ * * %EINVAL - invalid port number or resource type
+ * * %other - failed to write to PHY
+ */
+static int ice_write_ptp_reg_eth56g(struct ice_hw *hw, u8 port, u16 offset,
+ u32 val)
+{
+ return ice_write_port_eth56g(hw, port, offset, val, ETH56G_PHY_REG_PTP);
+}
+
+/**
+ * ice_write_mac_reg_eth56g - Write a MAC PHY port register
+ * parameter
+ * @hw: pointer to the HW struct
+ * @port: Port number to be written
+ * @offset: Offset from PHY port register base
+ * @val: Value to write
+ *
+ * Return:
+ * * %0 - success
+ * * %EINVAL - invalid port number or resource type
+ * * %other - failed to write to PHY
+ */
+static int ice_write_mac_reg_eth56g(struct ice_hw *hw, u8 port, u32 offset,
+ u32 val)
+{
+ return ice_write_port_eth56g(hw, port, offset, val, ETH56G_PHY_REG_MAC);
+}
+
+/**
+ * ice_write_xpcs_reg_eth56g - Write a PHY port register
+ * @hw: pointer to the HW struct
+ * @port: Port number to be written
+ * @offset: Offset from PHY port register base
+ * @val: Value to write
+ *
+ * Return:
+ * * %0 - success
+ * * %EINVAL - invalid port number or resource type
+ * * %other - failed to write to PHY
+ */
+static int ice_write_xpcs_reg_eth56g(struct ice_hw *hw, u8 port, u32 offset,
+ u32 val)
+{
+ return ice_write_port_eth56g(hw, port, offset, val,
+ ETH56G_PHY_REG_XPCS);
+}
+
+/**
+ * ice_read_ptp_reg_eth56g - Read a PHY port register
+ * @hw: pointer to the HW struct
+ * @port: Port number to be read
+ * @offset: Offset from PHY port register base
+ * @val: Pointer to the value to read (out param)
+ *
+ * Return:
+ * * %0 - success
+ * * %EINVAL - invalid port number or resource type
+ * * %other - failed to read from PHY
+ */
+static int ice_read_ptp_reg_eth56g(struct ice_hw *hw, u8 port, u16 offset,
+ u32 *val)
+{
+ return ice_read_port_eth56g(hw, port, offset, val, ETH56G_PHY_REG_PTP);
+}
+
+/**
+ * ice_read_mac_reg_eth56g - Read a PHY port register
+ * @hw: pointer to the HW struct
+ * @port: Port number to be read
+ * @offset: Offset from PHY port register base
+ * @val: Pointer to the value to read (out param)
+ *
+ * Return:
+ * * %0 - success
+ * * %EINVAL - invalid port number or resource type
+ * * %other - failed to read from PHY
+ */
+static int ice_read_mac_reg_eth56g(struct ice_hw *hw, u8 port, u16 offset,
+ u32 *val)
+{
+ return ice_read_port_eth56g(hw, port, offset, val, ETH56G_PHY_REG_MAC);
+}
+
+/**
+ * ice_read_gpcs_reg_eth56g - Read a PHY port register
+ * @hw: pointer to the HW struct
+ * @port: Port number to be read
+ * @offset: Offset from PHY port register base
+ * @val: Pointer to the value to read (out param)
+ *
+ * Return:
+ * * %0 - success
+ * * %EINVAL - invalid port number or resource type
+ * * %other - failed to read from PHY
+ */
+static int ice_read_gpcs_reg_eth56g(struct ice_hw *hw, u8 port, u16 offset,
+ u32 *val)
+{
+ return ice_read_port_eth56g(hw, port, offset, val, ETH56G_PHY_REG_GPCS);
+}
+
+/**
+ * ice_read_port_mem_eth56g - Read a PHY port memory location
+ * @hw: pointer to the HW struct
+ * @port: Port number to be read
+ * @offset: Offset from PHY port register base
+ * @val: Pointer to the value to read (out param)
+ *
+ * Return:
+ * * %0 - success
+ * * %EINVAL - invalid port number or resource type
+ * * %other - failed to read from PHY
+ */
+static int ice_read_port_mem_eth56g(struct ice_hw *hw, u8 port, u16 offset,
+ u32 *val)
+{
+ return ice_read_port_eth56g(hw, port, offset, val, ETH56G_PHY_MEM_PTP);
+}
+
+/**
+ * ice_write_port_mem_eth56g - Write a PHY port memory location
+ * @hw: pointer to the HW struct
+ * @port: Port number to be read
+ * @offset: Offset from PHY port register base
+ * @val: Pointer to the value to read (out param)
+ *
+ * Return:
+ * * %0 - success
+ * * %EINVAL - invalid port number or resource type
+ * * %other - failed to write to PHY
+ */
+static int ice_write_port_mem_eth56g(struct ice_hw *hw, u8 port, u16 offset,
+ u32 val)
+{
+ return ice_write_port_eth56g(hw, port, offset, val, ETH56G_PHY_MEM_PTP);
+}
+
+/**
+ * ice_is_64b_phy_reg_eth56g - Check if this is a 64bit PHY register
+ * @low_addr: the low address to check
+ * @high_addr: on return, contains the high address of the 64bit register
+ *
+ * Write the appropriate high register offset to use.
+ *
+ * Return: true if the provided low address is one of the known 64bit PHY values
+ * represented as two 32bit registers, false otherwise.
+ */
+static bool ice_is_64b_phy_reg_eth56g(u16 low_addr, u16 *high_addr)
+{
+ switch (low_addr) {
+ case PHY_REG_TX_TIMER_INC_PRE_L:
+ *high_addr = PHY_REG_TX_TIMER_INC_PRE_U;
+ return true;
+ case PHY_REG_RX_TIMER_INC_PRE_L:
+ *high_addr = PHY_REG_RX_TIMER_INC_PRE_U;
+ return true;
+ case PHY_REG_TX_CAPTURE_L:
+ *high_addr = PHY_REG_TX_CAPTURE_U;
+ return true;
+ case PHY_REG_RX_CAPTURE_L:
+ *high_addr = PHY_REG_RX_CAPTURE_U;
+ return true;
+ case PHY_REG_TOTAL_TX_OFFSET_L:
+ *high_addr = PHY_REG_TOTAL_TX_OFFSET_U;
+ return true;
+ case PHY_REG_TOTAL_RX_OFFSET_L:
+ *high_addr = PHY_REG_TOTAL_RX_OFFSET_U;
+ return true;
+ case PHY_REG_TX_MEMORY_STATUS_L:
+ *high_addr = PHY_REG_TX_MEMORY_STATUS_U;
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * ice_is_40b_phy_reg_eth56g - Check if this is a 40bit PHY register
+ * @low_addr: the low address to check
+ * @high_addr: on return, contains the high address of the 40bit value
+ *
+ * Write the appropriate high register offset to use.
+ *
+ * Return: true if the provided low address is one of the known 40bit PHY
+ * values split into two registers with the lower 8 bits in the low register and
+ * the upper 32 bits in the high register, false otherwise.
+ */
+static bool ice_is_40b_phy_reg_eth56g(u16 low_addr, u16 *high_addr)
+{
+ switch (low_addr) {
+ case PHY_REG_TIMETUS_L:
+ *high_addr = PHY_REG_TIMETUS_U;
+ return true;
+ case PHY_PCS_REF_TUS_L:
+ *high_addr = PHY_PCS_REF_TUS_U;
+ return true;
+ case PHY_PCS_REF_INC_L:
+ *high_addr = PHY_PCS_REF_INC_U;
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * ice_read_64b_phy_reg_eth56g - Read a 64bit value from PHY registers
+ * @hw: pointer to the HW struct
+ * @port: PHY port to read from
+ * @low_addr: offset of the lower register to read from
+ * @val: on return, the contents of the 64bit value from the PHY registers
+ * @res_type: resource type
+ *
+ * Check if the caller has specified a known 40 bit register offset and read
+ * the two registers associated with a 40bit value and return it in the val
+ * pointer.
+ *
+ * Return:
+ * * %0 - success
+ * * %EINVAL - not a 64 bit register
+ * * %other - failed to read from PHY
+ */
+static int ice_read_64b_phy_reg_eth56g(struct ice_hw *hw, u8 port, u16 low_addr,
+ u64 *val, enum eth56g_res_type res_type)
+{
+ u16 high_addr;
+ u32 lo, hi;
+ int err;
+
+ if (!ice_is_64b_phy_reg_eth56g(low_addr, &high_addr))
+ return -EINVAL;
+
+ err = ice_read_port_eth56g(hw, port, low_addr, &lo, res_type);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read from low register %#08x\n, err %d",
+ low_addr, err);
+ return err;
+ }
+
+ err = ice_read_port_eth56g(hw, port, high_addr, &hi, res_type);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read from high register %#08x\n, err %d",
+ high_addr, err);
+ return err;
+ }
+
+ *val = ((u64)hi << 32) | lo;
+
+ return 0;
+}
+
+/**
+ * ice_read_64b_ptp_reg_eth56g - Read a 64bit value from PHY registers
+ * @hw: pointer to the HW struct
+ * @port: PHY port to read from
+ * @low_addr: offset of the lower register to read from
+ * @val: on return, the contents of the 64bit value from the PHY registers
+ *
+ * Check if the caller has specified a known 40 bit register offset and read
+ * the two registers associated with a 40bit value and return it in the val
+ * pointer.
+ *
+ * Return:
+ * * %0 - success
+ * * %EINVAL - not a 64 bit register
+ * * %other - failed to read from PHY
+ */
+static int ice_read_64b_ptp_reg_eth56g(struct ice_hw *hw, u8 port, u16 low_addr,
+ u64 *val)
+{
+ return ice_read_64b_phy_reg_eth56g(hw, port, low_addr, val,
+ ETH56G_PHY_REG_PTP);
+}
+
+/**
+ * ice_write_40b_phy_reg_eth56g - Write a 40b value to the PHY
+ * @hw: pointer to the HW struct
+ * @port: port to write to
+ * @low_addr: offset of the low register
+ * @val: 40b value to write
+ * @res_type: resource type
+ *
+ * Check if the caller has specified a known 40 bit register offset and write
+ * provided 40b value to the two associated registers by splitting it up into
+ * two chunks, the lower 8 bits and the upper 32 bits.
+ *
+ * Return:
+ * * %0 - success
+ * * %EINVAL - not a 40 bit register
+ * * %other - failed to write to PHY
+ */
+static int ice_write_40b_phy_reg_eth56g(struct ice_hw *hw, u8 port,
+ u16 low_addr, u64 val,
+ enum eth56g_res_type res_type)
+{
+ u16 high_addr;
+ u32 lo, hi;
+ int err;
+
+ if (!ice_is_40b_phy_reg_eth56g(low_addr, &high_addr))
+ return -EINVAL;
+
+ lo = FIELD_GET(P_REG_40B_LOW_M, val);
+ hi = (u32)(val >> P_REG_40B_HIGH_S);
+
+ err = ice_write_port_eth56g(hw, port, low_addr, lo, res_type);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write to low register 0x%08x\n, err %d",
+ low_addr, err);
+ return err;
+ }
+
+ err = ice_write_port_eth56g(hw, port, high_addr, hi, res_type);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write to high register 0x%08x\n, err %d",
+ high_addr, err);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_write_40b_ptp_reg_eth56g - Write a 40b value to the PHY
+ * @hw: pointer to the HW struct
+ * @port: port to write to
+ * @low_addr: offset of the low register
+ * @val: 40b value to write
+ *
+ * Check if the caller has specified a known 40 bit register offset and write
+ * provided 40b value to the two associated registers by splitting it up into
+ * two chunks, the lower 8 bits and the upper 32 bits.
+ *
+ * Return:
+ * * %0 - success
+ * * %EINVAL - not a 40 bit register
+ * * %other - failed to write to PHY
+ */
+static int ice_write_40b_ptp_reg_eth56g(struct ice_hw *hw, u8 port,
+ u16 low_addr, u64 val)
+{
+ return ice_write_40b_phy_reg_eth56g(hw, port, low_addr, val,
+ ETH56G_PHY_REG_PTP);
+}
+
+/**
+ * ice_write_64b_phy_reg_eth56g - Write a 64bit value to PHY registers
+ * @hw: pointer to the HW struct
+ * @port: PHY port to read from
+ * @low_addr: offset of the lower register to read from
+ * @val: the contents of the 64bit value to write to PHY
+ * @res_type: resource type
+ *
+ * Check if the caller has specified a known 64 bit register offset and write
+ * the 64bit value to the two associated 32bit PHY registers.
+ *
+ * Return:
+ * * %0 - success
+ * * %EINVAL - not a 64 bit register
+ * * %other - failed to write to PHY
+ */
+static int ice_write_64b_phy_reg_eth56g(struct ice_hw *hw, u8 port,
+ u16 low_addr, u64 val,
+ enum eth56g_res_type res_type)
+{
+ u16 high_addr;
+ u32 lo, hi;
+ int err;
+
+ if (!ice_is_64b_phy_reg_eth56g(low_addr, &high_addr))
+ return -EINVAL;
+
+ lo = lower_32_bits(val);
+ hi = upper_32_bits(val);
+
+ err = ice_write_port_eth56g(hw, port, low_addr, lo, res_type);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write to low register 0x%08x\n, err %d",
+ low_addr, err);
+ return err;
+ }
+
+ err = ice_write_port_eth56g(hw, port, high_addr, hi, res_type);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write to high register 0x%08x\n, err %d",
+ high_addr, err);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_write_64b_ptp_reg_eth56g - Write a 64bit value to PHY registers
+ * @hw: pointer to the HW struct
+ * @port: PHY port to read from
+ * @low_addr: offset of the lower register to read from
+ * @val: the contents of the 64bit value to write to PHY
+ *
+ * Check if the caller has specified a known 64 bit register offset and write
+ * the 64bit value to the two associated 32bit PHY registers.
+ *
+ * Return:
+ * * %0 - success
+ * * %EINVAL - not a 64 bit register
+ * * %other - failed to write to PHY
+ */
+static int ice_write_64b_ptp_reg_eth56g(struct ice_hw *hw, u8 port,
+ u16 low_addr, u64 val)
+{
+ return ice_write_64b_phy_reg_eth56g(hw, port, low_addr, val,
+ ETH56G_PHY_REG_PTP);
+}
+
+/**
+ * ice_read_ptp_tstamp_eth56g - Read a PHY timestamp out of the port memory
+ * @hw: pointer to the HW struct
+ * @port: the port to read from
+ * @idx: the timestamp index to read
+ * @tstamp: on return, the 40bit timestamp value
+ *
+ * Read a 40bit timestamp value out of the two associated entries in the
+ * port memory block of the internal PHYs of the 56G devices.
+ *
+ * Return:
+ * * %0 - success
+ * * %other - failed to read from PHY
+ */
+static int ice_read_ptp_tstamp_eth56g(struct ice_hw *hw, u8 port, u8 idx,
+ u64 *tstamp)
+{
+ u16 lo_addr, hi_addr;
+ u32 lo, hi;
+ int err;
+
+ lo_addr = (u16)PHY_TSTAMP_L(idx);
+ hi_addr = (u16)PHY_TSTAMP_U(idx);
+
+ err = ice_read_port_mem_eth56g(hw, port, lo_addr, &lo);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read low PTP timestamp register, err %d\n",
+ err);
+ return err;
+ }
+
+ err = ice_read_port_mem_eth56g(hw, port, hi_addr, &hi);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read high PTP timestamp register, err %d\n",
+ err);
+ return err;
+ }
+
+ /* For 56G based internal PHYs, the timestamp is reported with the
+ * lower 8 bits in the low register, and the upper 32 bits in the high
+ * register.
+ */
+ *tstamp = ((u64)hi) << TS_PHY_HIGH_S | ((u64)lo & TS_PHY_LOW_M);
+
+ return 0;
+}
+
+/**
+ * ice_clear_ptp_tstamp_eth56g - Clear a timestamp from the quad block
+ * @hw: pointer to the HW struct
+ * @port: the quad to read from
+ * @idx: the timestamp index to reset
+ *
+ * Read and then forcibly clear the timestamp index to ensure the valid bit is
+ * cleared and the timestamp status bit is reset in the PHY port memory of
+ * internal PHYs of the 56G devices.
+ *
+ * To directly clear the contents of the timestamp block entirely, discarding
+ * all timestamp data at once, software should instead use
+ * ice_ptp_reset_ts_memory_quad_eth56g().
+ *
+ * This function should only be called on an idx whose bit is set according to
+ * ice_get_phy_tx_tstamp_ready().
+ *
+ * Return:
+ * * %0 - success
+ * * %other - failed to write to PHY
+ */
+static int ice_clear_ptp_tstamp_eth56g(struct ice_hw *hw, u8 port, u8 idx)
+{
+ u64 unused_tstamp;
+ u16 lo_addr;
+ int err;
+
+ /* Read the timestamp register to ensure the timestamp status bit is
+ * cleared.
+ */
+ err = ice_read_ptp_tstamp_eth56g(hw, port, idx, &unused_tstamp);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read the PHY timestamp register for port %u, idx %u, err %d\n",
+ port, idx, err);
+ }
+
+ lo_addr = (u16)PHY_TSTAMP_L(idx);
+
+ err = ice_write_port_mem_eth56g(hw, port, lo_addr, 0);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to clear low PTP timestamp register for port %u, idx %u, err %d\n",
+ port, idx, err);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_ptp_reset_ts_memory_eth56g - Clear all timestamps from the port block
+ * @hw: pointer to the HW struct
+ */
+static void ice_ptp_reset_ts_memory_eth56g(struct ice_hw *hw)
+{
+ unsigned int port;
+
+ for (port = 0; port < hw->ptp.num_lports; port++) {
+ ice_write_ptp_reg_eth56g(hw, port, PHY_REG_TX_MEMORY_STATUS_L,
+ 0);
+ ice_write_ptp_reg_eth56g(hw, port, PHY_REG_TX_MEMORY_STATUS_U,
+ 0);
+ }
+}
+
+/**
+ * ice_ptp_prep_port_time_eth56g - Prepare one PHY port with initial time
+ * @hw: pointer to the HW struct
+ * @port: port number
+ * @time: time to initialize the PHY port clocks to
+ *
+ * Write a new initial time value into registers of a specific PHY port.
+ *
+ * Return:
+ * * %0 - success
+ * * %other - failed to write to PHY
+ */
+static int ice_ptp_prep_port_time_eth56g(struct ice_hw *hw, u8 port,
+ u64 time)
+{
+ int err;
+
+ /* Tx case */
+ err = ice_write_64b_ptp_reg_eth56g(hw, port, PHY_REG_TX_TIMER_INC_PRE_L,
+ time);
+ if (err)
+ return err;
+
+ /* Rx case */
+ return ice_write_64b_ptp_reg_eth56g(hw, port,
+ PHY_REG_RX_TIMER_INC_PRE_L, time);
+}
+
+/**
+ * ice_ptp_prep_phy_time_eth56g - Prepare PHY port with initial time
+ * @hw: pointer to the HW struct
+ * @time: Time to initialize the PHY port clocks to
+ *
+ * Program the PHY port registers with a new initial time value. The port
+ * clock will be initialized once the driver issues an ICE_PTP_INIT_TIME sync
+ * command. The time value is the upper 32 bits of the PHY timer, usually in
+ * units of nominal nanoseconds.
+ *
+ * Return:
+ * * %0 - success
+ * * %other - failed to write to PHY
+ */
+static int ice_ptp_prep_phy_time_eth56g(struct ice_hw *hw, u32 time)
+{
+ u64 phy_time;
+ u8 port;
+
+ /* The time represents the upper 32 bits of the PHY timer, so we need
+ * to shift to account for this when programming.
+ */
+ phy_time = (u64)time << 32;
+
+ for (port = 0; port < hw->ptp.num_lports; port++) {
+ int err;
+
+ err = ice_ptp_prep_port_time_eth56g(hw, port, phy_time);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write init time for port %u, err %d\n",
+ port, err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ice_ptp_prep_port_adj_eth56g - Prepare a single port for time adjust
+ * @hw: pointer to HW struct
+ * @port: Port number to be programmed
+ * @time: time in cycles to adjust the port clocks
+ *
+ * Program the port for an atomic adjustment by writing the Tx and Rx timer
+ * registers. The atomic adjustment won't be completed until the driver issues
+ * an ICE_PTP_ADJ_TIME command.
+ *
+ * Note that time is not in units of nanoseconds. It is in clock time
+ * including the lower sub-nanosecond portion of the port timer.
+ *
+ * Negative adjustments are supported using 2s complement arithmetic.
+ *
+ * Return:
+ * * %0 - success
+ * * %other - failed to write to PHY
+ */
+static int ice_ptp_prep_port_adj_eth56g(struct ice_hw *hw, u8 port, s64 time)
+{
+ u32 l_time, u_time;
+ int err;
+
+ l_time = lower_32_bits(time);
+ u_time = upper_32_bits(time);
+
+ /* Tx case */
+ err = ice_write_ptp_reg_eth56g(hw, port, PHY_REG_TX_TIMER_INC_PRE_L,
+ l_time);
+ if (err)
+ goto exit_err;
+
+ err = ice_write_ptp_reg_eth56g(hw, port, PHY_REG_TX_TIMER_INC_PRE_U,
+ u_time);
+ if (err)
+ goto exit_err;
+
+ /* Rx case */
+ err = ice_write_ptp_reg_eth56g(hw, port, PHY_REG_RX_TIMER_INC_PRE_L,
+ l_time);
+ if (err)
+ goto exit_err;
+
+ err = ice_write_ptp_reg_eth56g(hw, port, PHY_REG_RX_TIMER_INC_PRE_U,
+ u_time);
+ if (err)
+ goto exit_err;
+
+ return 0;
+
+exit_err:
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write time adjust for port %u, err %d\n",
+ port, err);
+ return err;
+}
+
+/**
+ * ice_ptp_prep_phy_adj_eth56g - Prep PHY ports for a time adjustment
+ * @hw: pointer to HW struct
+ * @adj: adjustment in nanoseconds
+ *
+ * Prepare the PHY ports for an atomic time adjustment by programming the PHY
+ * Tx and Rx port registers. The actual adjustment is completed by issuing an
+ * ICE_PTP_ADJ_TIME or ICE_PTP_ADJ_TIME_AT_TIME sync command.
+ *
+ * Return:
+ * * %0 - success
+ * * %other - failed to write to PHY
+ */
+static int ice_ptp_prep_phy_adj_eth56g(struct ice_hw *hw, s32 adj)
+{
+ s64 cycles;
+ u8 port;
+
+ /* The port clock supports adjustment of the sub-nanosecond portion of
+ * the clock (lowest 32 bits). We shift the provided adjustment in
+ * nanoseconds by 32 to calculate the appropriate adjustment to program
+ * into the PHY ports.
+ */
+ cycles = (s64)adj << 32;
+
+ for (port = 0; port < hw->ptp.num_lports; port++) {
+ int err;
+
+ err = ice_ptp_prep_port_adj_eth56g(hw, port, cycles);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_ptp_prep_phy_incval_eth56g - Prepare PHY ports for time adjustment
+ * @hw: pointer to HW struct
+ * @incval: new increment value to prepare
+ *
+ * Prepare each of the PHY ports for a new increment value by programming the
+ * port's TIMETUS registers. The new increment value will be updated after
+ * issuing an ICE_PTP_INIT_INCVAL command.
+ *
+ * Return:
+ * * %0 - success
+ * * %other - failed to write to PHY
+ */
+static int ice_ptp_prep_phy_incval_eth56g(struct ice_hw *hw, u64 incval)
+{
+ u8 port;
+
+ for (port = 0; port < hw->ptp.num_lports; port++) {
+ int err;
+
+ err = ice_write_40b_ptp_reg_eth56g(hw, port, PHY_REG_TIMETUS_L,
+ incval);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write incval for port %u, err %d\n",
+ port, err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ice_ptp_read_port_capture_eth56g - Read a port's local time capture
+ * @hw: pointer to HW struct
+ * @port: Port number to read
+ * @tx_ts: on return, the Tx port time capture
+ * @rx_ts: on return, the Rx port time capture
+ *
+ * Read the port's Tx and Rx local time capture values.
+ *
+ * Return:
+ * * %0 - success
+ * * %other - failed to read from PHY
+ */
+static int ice_ptp_read_port_capture_eth56g(struct ice_hw *hw, u8 port,
+ u64 *tx_ts, u64 *rx_ts)
+{
+ int err;
+
+ /* Tx case */
+ err = ice_read_64b_ptp_reg_eth56g(hw, port, PHY_REG_TX_CAPTURE_L,
+ tx_ts);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read REG_TX_CAPTURE, err %d\n",
+ err);
+ return err;
+ }
+
+ ice_debug(hw, ICE_DBG_PTP, "tx_init = %#016llx\n", *tx_ts);
+
+ /* Rx case */
+ err = ice_read_64b_ptp_reg_eth56g(hw, port, PHY_REG_RX_CAPTURE_L,
+ rx_ts);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_CAPTURE, err %d\n",
+ err);
+ return err;
+ }
+
+ ice_debug(hw, ICE_DBG_PTP, "rx_init = %#016llx\n", *rx_ts);
+
+ return 0;
+}
+
+/**
+ * ice_ptp_write_port_cmd_eth56g - Prepare a single PHY port for a timer command
+ * @hw: pointer to HW struct
+ * @port: Port to which cmd has to be sent
+ * @cmd: Command to be sent to the port
+ *
+ * Prepare the requested port for an upcoming timer sync command.
+ *
+ * Return:
+ * * %0 - success
+ * * %other - failed to write to PHY
+ */
+static int ice_ptp_write_port_cmd_eth56g(struct ice_hw *hw, u8 port,
+ enum ice_ptp_tmr_cmd cmd)
+{
+ u32 val = ice_ptp_tmr_cmd_to_port_reg(hw, cmd);
+ int err;
+
+ /* Tx case */
+ err = ice_write_ptp_reg_eth56g(hw, port, PHY_REG_TX_TMR_CMD, val);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write back TX_TMR_CMD, err %d\n",
+ err);
+ return err;
+ }
+
+ /* Rx case */
+ err = ice_write_ptp_reg_eth56g(hw, port, PHY_REG_RX_TMR_CMD, val);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write back RX_TMR_CMD, err %d\n",
+ err);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_phy_get_speed_eth56g - Get link speed based on PHY link type
+ * @li: pointer to link information struct
+ *
+ * Return: simplified ETH56G PHY speed
+ */
+static enum ice_eth56g_link_spd
+ice_phy_get_speed_eth56g(struct ice_link_status *li)
+{
+ u16 speed = ice_get_link_speed_based_on_phy_type(li->phy_type_low,
+ li->phy_type_high);
+
+ switch (speed) {
+ case ICE_AQ_LINK_SPEED_1000MB:
+ return ICE_ETH56G_LNK_SPD_1G;
+ case ICE_AQ_LINK_SPEED_2500MB:
+ return ICE_ETH56G_LNK_SPD_2_5G;
+ case ICE_AQ_LINK_SPEED_10GB:
+ return ICE_ETH56G_LNK_SPD_10G;
+ case ICE_AQ_LINK_SPEED_25GB:
+ return ICE_ETH56G_LNK_SPD_25G;
+ case ICE_AQ_LINK_SPEED_40GB:
+ return ICE_ETH56G_LNK_SPD_40G;
+ case ICE_AQ_LINK_SPEED_50GB:
+ switch (li->phy_type_low) {
+ case ICE_PHY_TYPE_LOW_50GBASE_SR:
+ case ICE_PHY_TYPE_LOW_50GBASE_FR:
+ case ICE_PHY_TYPE_LOW_50GBASE_LR:
+ case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
+ case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
+ case ICE_PHY_TYPE_LOW_50G_AUI1:
+ return ICE_ETH56G_LNK_SPD_50G;
+ default:
+ return ICE_ETH56G_LNK_SPD_50G2;
+ }
+ case ICE_AQ_LINK_SPEED_100GB:
+ if (li->phy_type_high ||
+ li->phy_type_low == ICE_PHY_TYPE_LOW_100GBASE_SR2)
+ return ICE_ETH56G_LNK_SPD_100G2;
+ else
+ return ICE_ETH56G_LNK_SPD_100G;
+ default:
+ return ICE_ETH56G_LNK_SPD_1G;
+ }
+}
+
+/**
+ * ice_phy_cfg_parpcs_eth56g - Configure TUs per PAR/PCS clock cycle
+ * @hw: pointer to the HW struct
+ * @port: port to configure
+ *
+ * Configure the number of TUs for the PAR and PCS clocks used as part of the
+ * timestamp calibration process.
+ *
+ * Return:
+ * * %0 - success
+ * * %other - PHY read/write failed
+ */
+static int ice_phy_cfg_parpcs_eth56g(struct ice_hw *hw, u8 port)
+{
+ u8 port_blk = port & ~(ICE_PORTS_PER_QUAD - 1);
+ u32 val;
+ int err;
+
+ err = ice_write_xpcs_reg_eth56g(hw, port, PHY_VENDOR_TXLANE_THRESH,
+ ICE_ETH56G_NOMINAL_THRESH4);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read VENDOR_TXLANE_THRESH, status: %d",
+ err);
+ return err;
+ }
+
+ switch (ice_phy_get_speed_eth56g(&hw->port_info->phy.link_info)) {
+ case ICE_ETH56G_LNK_SPD_1G:
+ case ICE_ETH56G_LNK_SPD_2_5G:
+ err = ice_read_ptp_reg_eth56g(hw, port_blk,
+ PHY_GPCS_CONFIG_REG0, &val);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read PHY_GPCS_CONFIG_REG0, status: %d",
+ err);
+ return err;
+ }
+
+ val &= ~PHY_GPCS_CONFIG_REG0_TX_THR_M;
+ val |= FIELD_PREP(PHY_GPCS_CONFIG_REG0_TX_THR_M,
+ ICE_ETH56G_NOMINAL_TX_THRESH);
+
+ err = ice_write_ptp_reg_eth56g(hw, port_blk,
+ PHY_GPCS_CONFIG_REG0, val);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write PHY_GPCS_CONFIG_REG0, status: %d",
+ err);
+ return err;
+ }
+ break;
+ default:
+ break;
+ }
+
+ err = ice_write_40b_ptp_reg_eth56g(hw, port, PHY_PCS_REF_TUS_L,
+ ICE_ETH56G_NOMINAL_PCS_REF_TUS);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write PHY_PCS_REF_TUS, status: %d",
+ err);
+ return err;
+ }
+
+ err = ice_write_40b_ptp_reg_eth56g(hw, port, PHY_PCS_REF_INC_L,
+ ICE_ETH56G_NOMINAL_PCS_REF_INC);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write PHY_PCS_REF_INC, status: %d",
+ err);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_phy_cfg_ptp_1step_eth56g - Configure 1-step PTP settings
+ * @hw: Pointer to the HW struct
+ * @port: Port to configure
+ *
+ * Return:
+ * * %0 - success
+ * * %other - PHY read/write failed
+ */
+int ice_phy_cfg_ptp_1step_eth56g(struct ice_hw *hw, u8 port)
+{
+ u8 port_blk = port & ~(ICE_PORTS_PER_QUAD - 1);
+ u8 blk_port = port & (ICE_PORTS_PER_QUAD - 1);
+ bool enable, sfd_ena;
+ u32 val, peer_delay;
+ int err;
+
+ enable = hw->ptp.phy.eth56g.onestep_ena;
+ peer_delay = hw->ptp.phy.eth56g.peer_delay;
+ sfd_ena = hw->ptp.phy.eth56g.sfd_ena;
+
+ /* PHY_PTP_1STEP_CONFIG */
+ err = ice_read_ptp_reg_eth56g(hw, port_blk, PHY_PTP_1STEP_CONFIG, &val);
+ if (err)
+ return err;
+
+ if (enable)
+ val |= blk_port;
+ else
+ val &= ~blk_port;
+
+ val &= ~(PHY_PTP_1STEP_T1S_UP64_M | PHY_PTP_1STEP_T1S_DELTA_M);
+
+ err = ice_write_ptp_reg_eth56g(hw, port_blk, PHY_PTP_1STEP_CONFIG, val);
+ if (err)
+ return err;
+
+ /* PHY_PTP_1STEP_PEER_DELAY */
+ val = FIELD_PREP(PHY_PTP_1STEP_PD_DELAY_M, peer_delay);
+ if (peer_delay)
+ val |= PHY_PTP_1STEP_PD_ADD_PD_M;
+ val |= PHY_PTP_1STEP_PD_DLY_V_M;
+ err = ice_write_ptp_reg_eth56g(hw, port_blk,
+ PHY_PTP_1STEP_PEER_DELAY(blk_port), val);
+ if (err)
+ return err;
+
+ val &= ~PHY_PTP_1STEP_PD_DLY_V_M;
+ err = ice_write_ptp_reg_eth56g(hw, port_blk,
+ PHY_PTP_1STEP_PEER_DELAY(blk_port), val);
+ if (err)
+ return err;
+
+ /* PHY_MAC_XIF_MODE */
+ err = ice_read_mac_reg_eth56g(hw, port, PHY_MAC_XIF_MODE, &val);
+ if (err)
+ return err;
+
+ val &= ~(PHY_MAC_XIF_1STEP_ENA_M | PHY_MAC_XIF_TS_BIN_MODE_M |
+ PHY_MAC_XIF_TS_SFD_ENA_M | PHY_MAC_XIF_GMII_TS_SEL_M);
+
+ switch (ice_phy_get_speed_eth56g(&hw->port_info->phy.link_info)) {
+ case ICE_ETH56G_LNK_SPD_1G:
+ case ICE_ETH56G_LNK_SPD_2_5G:
+ val |= PHY_MAC_XIF_GMII_TS_SEL_M;
+ break;
+ default:
+ break;
+ }
+
+ val |= FIELD_PREP(PHY_MAC_XIF_1STEP_ENA_M, enable) |
+ FIELD_PREP(PHY_MAC_XIF_TS_BIN_MODE_M, enable) |
+ FIELD_PREP(PHY_MAC_XIF_TS_SFD_ENA_M, sfd_ena);
+
+ return ice_write_mac_reg_eth56g(hw, port, PHY_MAC_XIF_MODE, val);
+}
+
+/**
+ * mul_u32_u32_fx_q9 - Multiply two u32 fixed point Q9 values
+ * @a: multiplier value
+ * @b: multiplicand value
+ *
+ * Return: result of multiplication
+ */
+static u32 mul_u32_u32_fx_q9(u32 a, u32 b)
+{
+ return (u32)(((u64)a * b) >> ICE_ETH56G_MAC_CFG_FRAC_W);
+}
+
+/**
+ * add_u32_u32_fx - Add two u32 fixed point values and discard overflow
+ * @a: first value
+ * @b: second value
+ *
+ * Return: result of addition
+ */
+static u32 add_u32_u32_fx(u32 a, u32 b)
+{
+ return lower_32_bits(((u64)a + b));
+}
+
+/**
+ * ice_ptp_calc_bitslip_eth56g - Calculate bitslip value
+ * @hw: pointer to the HW struct
+ * @port: port to configure
+ * @bs: bitslip multiplier
+ * @fc: FC-FEC enabled
+ * @rs: RS-FEC enabled
+ * @spd: link speed
+ *
+ * Return: calculated bitslip value
+ */
+static u32 ice_ptp_calc_bitslip_eth56g(struct ice_hw *hw, u8 port, u32 bs,
+ bool fc, bool rs,
+ enum ice_eth56g_link_spd spd)
+{
+ u8 port_offset = port & (ICE_PORTS_PER_QUAD - 1);
+ u8 port_blk = port & ~(ICE_PORTS_PER_QUAD - 1);
+ u32 bitslip;
+ int err;
+
+ if (!bs || rs)
+ return 0;
+
+ if (spd == ICE_ETH56G_LNK_SPD_1G || spd == ICE_ETH56G_LNK_SPD_2_5G)
+ err = ice_read_gpcs_reg_eth56g(hw, port, PHY_GPCS_BITSLIP,
+ &bitslip);
+ else
+ err = ice_read_ptp_reg_eth56g(hw, port_blk,
+ PHY_REG_SD_BIT_SLIP(port_offset),
+ &bitslip);
+ if (err)
+ return 0;
+
+ if (spd == ICE_ETH56G_LNK_SPD_1G && !bitslip) {
+ /* Bitslip register value of 0 corresponds to 10 so substitute
+ * it for calculations
+ */
+ bitslip = 10;
+ } else if (spd == ICE_ETH56G_LNK_SPD_10G ||
+ spd == ICE_ETH56G_LNK_SPD_25G) {
+ if (fc)
+ bitslip = bitslip * 2 + 32;
+ else
+ bitslip = (u32)((s32)bitslip * -1 + 20);
+ }
+
+ bitslip <<= ICE_ETH56G_MAC_CFG_FRAC_W;
+ return mul_u32_u32_fx_q9(bitslip, bs);
+}
+
+/**
+ * ice_ptp_calc_deskew_eth56g - Calculate deskew value
+ * @hw: pointer to the HW struct
+ * @port: port to configure
+ * @ds: deskew multiplier
+ * @rs: RS-FEC enabled
+ * @spd: link speed
+ *
+ * Return: calculated deskew value
+ */
+static u32 ice_ptp_calc_deskew_eth56g(struct ice_hw *hw, u8 port, u32 ds,
+ bool rs, enum ice_eth56g_link_spd spd)
+{
+ u32 deskew_i, deskew_f;
+ int err;
+
+ if (!ds)
+ return 0;
+
+ read_poll_timeout(ice_read_ptp_reg_eth56g, err,
+ FIELD_GET(PHY_REG_DESKEW_0_VALID, deskew_i), 500,
+ 50 * USEC_PER_MSEC, false, hw, port, PHY_REG_DESKEW_0,
+ &deskew_i);
+ if (err)
+ return err;
+
+ deskew_f = FIELD_GET(PHY_REG_DESKEW_0_RLEVEL_FRAC, deskew_i);
+ deskew_i = FIELD_GET(PHY_REG_DESKEW_0_RLEVEL, deskew_i);
+
+ if (rs && spd == ICE_ETH56G_LNK_SPD_50G2)
+ ds = 0x633; /* 3.1 */
+ else if (rs && spd == ICE_ETH56G_LNK_SPD_100G)
+ ds = 0x31b; /* 1.552 */
+
+ deskew_i = FIELD_PREP(ICE_ETH56G_MAC_CFG_RX_OFFSET_INT, deskew_i);
+ /* Shift 3 fractional bits to the end of the integer part */
+ deskew_f <<= ICE_ETH56G_MAC_CFG_FRAC_W - PHY_REG_DESKEW_0_RLEVEL_FRAC_W;
+ return mul_u32_u32_fx_q9(deskew_i | deskew_f, ds);
+}
+
+/**
+ * ice_phy_set_offsets_eth56g - Set Tx/Rx offset values
+ * @hw: pointer to the HW struct
+ * @port: port to configure
+ * @spd: link speed
+ * @cfg: structure to store output values
+ * @fc: FC-FEC enabled
+ * @rs: RS-FEC enabled
+ *
+ * Return:
+ * * %0 - success
+ * * %other - failed to write to PHY
+ */
+static int ice_phy_set_offsets_eth56g(struct ice_hw *hw, u8 port,
+ enum ice_eth56g_link_spd spd,
+ const struct ice_eth56g_mac_reg_cfg *cfg,
+ bool fc, bool rs)
+{
+ u32 rx_offset, tx_offset, bs_ds;
+ bool onestep, sfd;
+
+ onestep = hw->ptp.phy.eth56g.onestep_ena;
+ sfd = hw->ptp.phy.eth56g.sfd_ena;
+ bs_ds = cfg->rx_offset.bs_ds;
+
+ if (fc)
+ rx_offset = cfg->rx_offset.fc;
+ else if (rs)
+ rx_offset = cfg->rx_offset.rs;
+ else
+ rx_offset = cfg->rx_offset.no_fec;
+
+ rx_offset = add_u32_u32_fx(rx_offset, cfg->rx_offset.serdes);
+ if (sfd)
+ rx_offset = add_u32_u32_fx(rx_offset, cfg->rx_offset.sfd);
+
+ if (spd < ICE_ETH56G_LNK_SPD_40G)
+ bs_ds = ice_ptp_calc_bitslip_eth56g(hw, port, bs_ds, fc, rs,
+ spd);
+ else
+ bs_ds = ice_ptp_calc_deskew_eth56g(hw, port, bs_ds, rs, spd);
+ rx_offset = add_u32_u32_fx(rx_offset, bs_ds);
+ rx_offset &= ICE_ETH56G_MAC_CFG_RX_OFFSET_INT |
+ ICE_ETH56G_MAC_CFG_RX_OFFSET_FRAC;
+
+ if (fc)
+ tx_offset = cfg->tx_offset.fc;
+ else if (rs)
+ tx_offset = cfg->tx_offset.rs;
+ else
+ tx_offset = cfg->tx_offset.no_fec;
+ tx_offset += cfg->tx_offset.serdes + cfg->tx_offset.sfd * sfd +
+ cfg->tx_offset.onestep * onestep;
+
+ ice_write_mac_reg_eth56g(hw, port, PHY_MAC_RX_OFFSET, rx_offset);
+ return ice_write_mac_reg_eth56g(hw, port, PHY_MAC_TX_OFFSET, tx_offset);
+}
+
+/**
+ * ice_phy_cfg_mac_eth56g - Configure MAC for PTP
+ * @hw: Pointer to the HW struct
+ * @port: Port to configure
+ *
+ * Return:
+ * * %0 - success
+ * * %other - failed to write to PHY
+ */
+static int ice_phy_cfg_mac_eth56g(struct ice_hw *hw, u8 port)
+{
+ const struct ice_eth56g_mac_reg_cfg *cfg;
+ enum ice_eth56g_link_spd spd;
+ struct ice_link_status *li;
+ bool fc = false;
+ bool rs = false;
+ bool onestep;
+ u32 val;
+ int err;
+
+ onestep = hw->ptp.phy.eth56g.onestep_ena;
+ li = &hw->port_info->phy.link_info;
+ spd = ice_phy_get_speed_eth56g(li);
+ if (!!(li->an_info & ICE_AQ_FEC_EN)) {
+ if (spd == ICE_ETH56G_LNK_SPD_10G) {
+ fc = true;
+ } else {
+ fc = !!(li->fec_info & ICE_AQ_LINK_25G_KR_FEC_EN);
+ rs = !!(li->fec_info & ~ICE_AQ_LINK_25G_KR_FEC_EN);
+ }
+ }
+ cfg = &eth56g_mac_cfg[spd];
+
+ err = ice_write_mac_reg_eth56g(hw, port, PHY_MAC_RX_MODULO, 0);
+ if (err)
+ return err;
+
+ err = ice_write_mac_reg_eth56g(hw, port, PHY_MAC_TX_MODULO, 0);
+ if (err)
+ return err;
+
+ val = FIELD_PREP(PHY_MAC_TSU_CFG_TX_MODE_M,
+ cfg->tx_mode.def + rs * cfg->tx_mode.rs) |
+ FIELD_PREP(PHY_MAC_TSU_CFG_TX_MII_MK_DLY_M, cfg->tx_mk_dly) |
+ FIELD_PREP(PHY_MAC_TSU_CFG_TX_MII_CW_DLY_M,
+ cfg->tx_cw_dly.def +
+ onestep * cfg->tx_cw_dly.onestep) |
+ FIELD_PREP(PHY_MAC_TSU_CFG_RX_MODE_M,
+ cfg->rx_mode.def + rs * cfg->rx_mode.rs) |
+ FIELD_PREP(PHY_MAC_TSU_CFG_RX_MII_MK_DLY_M,
+ cfg->rx_mk_dly.def + rs * cfg->rx_mk_dly.rs) |
+ FIELD_PREP(PHY_MAC_TSU_CFG_RX_MII_CW_DLY_M,
+ cfg->rx_cw_dly.def + rs * cfg->rx_cw_dly.rs) |
+ FIELD_PREP(PHY_MAC_TSU_CFG_BLKS_PER_CLK_M, cfg->blks_per_clk);
+ err = ice_write_mac_reg_eth56g(hw, port, PHY_MAC_TSU_CONFIG, val);
+ if (err)
+ return err;
+
+ err = ice_write_mac_reg_eth56g(hw, port, PHY_MAC_BLOCKTIME,
+ cfg->blktime);
+ if (err)
+ return err;
+
+ err = ice_phy_set_offsets_eth56g(hw, port, spd, cfg, fc, rs);
+ if (err)
+ return err;
+
+ if (spd == ICE_ETH56G_LNK_SPD_25G && !rs)
+ val = 0;
+ else
+ val = cfg->mktime;
+
+ return ice_write_mac_reg_eth56g(hw, port, PHY_MAC_MARKERTIME, val);
+}
+
+/**
+ * ice_phy_cfg_intr_eth56g - Configure TX timestamp interrupt
+ * @hw: pointer to the HW struct
+ * @port: the timestamp port
+ * @ena: enable or disable interrupt
+ * @threshold: interrupt threshold
+ *
+ * Configure TX timestamp interrupt for the specified port
+ *
+ * Return:
+ * * %0 - success
+ * * %other - PHY read/write failed
+ */
+int ice_phy_cfg_intr_eth56g(struct ice_hw *hw, u8 port, bool ena, u8 threshold)
+{
+ int err;
+ u32 val;
+
+ err = ice_read_ptp_reg_eth56g(hw, port, PHY_REG_TS_INT_CONFIG, &val);
+ if (err)
+ return err;
+
+ if (ena) {
+ val |= PHY_TS_INT_CONFIG_ENA_M;
+ val &= ~PHY_TS_INT_CONFIG_THRESHOLD_M;
+ val |= FIELD_PREP(PHY_TS_INT_CONFIG_THRESHOLD_M, threshold);
+ } else {
+ val &= ~PHY_TS_INT_CONFIG_ENA_M;
+ }
+
+ return ice_write_ptp_reg_eth56g(hw, port, PHY_REG_TS_INT_CONFIG, val);
+}
+
+/**
+ * ice_read_phy_and_phc_time_eth56g - Simultaneously capture PHC and PHY time
+ * @hw: pointer to the HW struct
+ * @port: the PHY port to read
+ * @phy_time: on return, the 64bit PHY timer value
+ * @phc_time: on return, the lower 64bits of PHC time
+ *
+ * Issue a ICE_PTP_READ_TIME timer command to simultaneously capture the PHY
+ * and PHC timer values.
+ *
+ * Return:
+ * * %0 - success
+ * * %other - PHY read/write failed
+ */
+static int ice_read_phy_and_phc_time_eth56g(struct ice_hw *hw, u8 port,
+ u64 *phy_time, u64 *phc_time)
+{
+ u64 tx_time, rx_time;
+ u32 zo, lo;
+ u8 tmr_idx;
+ int err;
+
+ tmr_idx = ice_get_ptp_src_clock_index(hw);
+
+ /* Prepare the PHC timer for a ICE_PTP_READ_TIME capture command */
+ ice_ptp_src_cmd(hw, ICE_PTP_READ_TIME);
+
+ /* Prepare the PHY timer for a ICE_PTP_READ_TIME capture command */
+ err = ice_ptp_one_port_cmd(hw, port, ICE_PTP_READ_TIME);
+ if (err)
+ return err;
+
+ /* Issue the sync to start the ICE_PTP_READ_TIME capture */
+ ice_ptp_exec_tmr_cmd(hw);
+
+ /* Read the captured PHC time from the shadow time registers */
+ zo = rd32(hw, GLTSYN_SHTIME_0(tmr_idx));
+ lo = rd32(hw, GLTSYN_SHTIME_L(tmr_idx));
+ *phc_time = (u64)lo << 32 | zo;
+
+ /* Read the captured PHY time from the PHY shadow registers */
+ err = ice_ptp_read_port_capture_eth56g(hw, port, &tx_time, &rx_time);
+ if (err)
+ return err;
+
+ /* If the PHY Tx and Rx timers don't match, log a warning message.
+ * Note that this should not happen in normal circumstances since the
+ * driver always programs them together.
+ */
+ if (tx_time != rx_time)
+ dev_warn(ice_hw_to_dev(hw), "PHY port %u Tx and Rx timers do not match, tx_time 0x%016llX, rx_time 0x%016llX\n",
+ port, tx_time, rx_time);
+
+ *phy_time = tx_time;
+
+ return 0;
+}
+
+/**
+ * ice_sync_phy_timer_eth56g - Synchronize the PHY timer with PHC timer
+ * @hw: pointer to the HW struct
+ * @port: the PHY port to synchronize
+ *
+ * Perform an adjustment to ensure that the PHY and PHC timers are in sync.
+ * This is done by issuing a ICE_PTP_READ_TIME command which triggers a
+ * simultaneous read of the PHY timer and PHC timer. Then we use the
+ * difference to calculate an appropriate 2s complement addition to add
+ * to the PHY timer in order to ensure it reads the same value as the
+ * primary PHC timer.
+ *
+ * Return:
+ * * %0 - success
+ * * %-EBUSY- failed to acquire PTP semaphore
+ * * %other - PHY read/write failed
+ */
+static int ice_sync_phy_timer_eth56g(struct ice_hw *hw, u8 port)
+{
+ u64 phc_time, phy_time, difference;
+ int err;
+
+ if (!ice_ptp_lock(hw)) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to acquire PTP semaphore\n");
+ return -EBUSY;
+ }
+
+ err = ice_read_phy_and_phc_time_eth56g(hw, port, &phy_time, &phc_time);
+ if (err)
+ goto err_unlock;
+
+ /* Calculate the amount required to add to the port time in order for
+ * it to match the PHC time.
+ *
+ * Note that the port adjustment is done using 2s complement
+ * arithmetic. This is convenient since it means that we can simply
+ * calculate the difference between the PHC time and the port time,
+ * and it will be interpreted correctly.
+ */
+
+ ice_ptp_src_cmd(hw, ICE_PTP_NOP);
+ difference = phc_time - phy_time;
+
+ err = ice_ptp_prep_port_adj_eth56g(hw, port, (s64)difference);
+ if (err)
+ goto err_unlock;
+
+ err = ice_ptp_one_port_cmd(hw, port, ICE_PTP_ADJ_TIME);
+ if (err)
+ goto err_unlock;
+
+ /* Issue the sync to activate the time adjustment */
+ ice_ptp_exec_tmr_cmd(hw);
+
+ /* Re-capture the timer values to flush the command registers and
+ * verify that the time was properly adjusted.
+ */
+ err = ice_read_phy_and_phc_time_eth56g(hw, port, &phy_time, &phc_time);
+ if (err)
+ goto err_unlock;
+
+ dev_info(ice_hw_to_dev(hw),
+ "Port %u PHY time synced to PHC: 0x%016llX, 0x%016llX\n",
+ port, phy_time, phc_time);
+
+err_unlock:
+ ice_ptp_unlock(hw);
+ return err;
+}
+
+/**
+ * ice_stop_phy_timer_eth56g - Stop the PHY clock timer
+ * @hw: pointer to the HW struct
+ * @port: the PHY port to stop
+ * @soft_reset: if true, hold the SOFT_RESET bit of PHY_REG_PS
+ *
+ * Stop the clock of a PHY port. This must be done as part of the flow to
+ * re-calibrate Tx and Rx timestamping offsets whenever the clock time is
+ * initialized or when link speed changes.
+ *
+ * Return:
+ * * %0 - success
+ * * %other - failed to write to PHY
+ */
+int ice_stop_phy_timer_eth56g(struct ice_hw *hw, u8 port, bool soft_reset)
+{
+ int err;
+
+ err = ice_write_ptp_reg_eth56g(hw, port, PHY_REG_TX_OFFSET_READY, 0);
+ if (err)
+ return err;
+
+ err = ice_write_ptp_reg_eth56g(hw, port, PHY_REG_RX_OFFSET_READY, 0);
+ if (err)
+ return err;
+
+ ice_debug(hw, ICE_DBG_PTP, "Disabled clock on PHY port %u\n", port);
+
+ return 0;
+}
+
+/**
+ * ice_start_phy_timer_eth56g - Start the PHY clock timer
+ * @hw: pointer to the HW struct
+ * @port: the PHY port to start
+ *
+ * Start the clock of a PHY port. This must be done as part of the flow to
+ * re-calibrate Tx and Rx timestamping offsets whenever the clock time is
+ * initialized or when link speed changes.
+ *
+ * Return:
+ * * %0 - success
+ * * %other - PHY read/write failed
+ */
+int ice_start_phy_timer_eth56g(struct ice_hw *hw, u8 port)
+{
+ u32 lo, hi;
+ u64 incval;
+ u8 tmr_idx;
+ int err;
+
+ tmr_idx = ice_get_ptp_src_clock_index(hw);
+
+ err = ice_stop_phy_timer_eth56g(hw, port, false);
+ if (err)
+ return err;
+
+ ice_ptp_src_cmd(hw, ICE_PTP_NOP);
+
+ err = ice_phy_cfg_parpcs_eth56g(hw, port);
+ if (err)
+ return err;
+
+ err = ice_phy_cfg_ptp_1step_eth56g(hw, port);
+ if (err)
+ return err;
+
+ err = ice_phy_cfg_mac_eth56g(hw, port);
+ if (err)
+ return err;
+
+ lo = rd32(hw, GLTSYN_INCVAL_L(tmr_idx));
+ hi = rd32(hw, GLTSYN_INCVAL_H(tmr_idx));
+ incval = (u64)hi << 32 | lo;
+
+ err = ice_write_40b_ptp_reg_eth56g(hw, port, PHY_REG_TIMETUS_L, incval);
+ if (err)
+ return err;
+
+ err = ice_ptp_one_port_cmd(hw, port, ICE_PTP_INIT_INCVAL);
+ if (err)
+ return err;
+
+ ice_ptp_exec_tmr_cmd(hw);
+
+ err = ice_sync_phy_timer_eth56g(hw, port);
+ if (err)
+ return err;
+
+ err = ice_write_ptp_reg_eth56g(hw, port, PHY_REG_TX_OFFSET_READY, 1);
+ if (err)
+ return err;
+
+ err = ice_write_ptp_reg_eth56g(hw, port, PHY_REG_RX_OFFSET_READY, 1);
+ if (err)
+ return err;
+
+ ice_debug(hw, ICE_DBG_PTP, "Enabled clock on PHY port %u\n", port);
+
+ return 0;
+}
+
+/**
+ * ice_sb_access_ena_eth56g - Enable SB devices (PHY and others) access
+ * @hw: pointer to HW struct
+ * @enable: Enable or disable access
+ *
+ * Enable sideband devices (PHY and others) access.
+ */
+static void ice_sb_access_ena_eth56g(struct ice_hw *hw, bool enable)
+{
+ u32 val = rd32(hw, PF_SB_REM_DEV_CTL);
+
+ if (enable)
+ val |= BIT(eth56g_phy_0) | BIT(cgu) | BIT(eth56g_phy_1);
+ else
+ val &= ~(BIT(eth56g_phy_0) | BIT(cgu) | BIT(eth56g_phy_1));
+
+ wr32(hw, PF_SB_REM_DEV_CTL, val);
+}
+
+/**
+ * ice_ptp_init_phc_eth56g - Perform E82X specific PHC initialization
+ * @hw: pointer to HW struct
+ *
+ * Perform PHC initialization steps specific to E82X devices.
+ *
+ * Return:
+ * * %0 - success
+ * * %other - failed to initialize CGU
+ */
+static int ice_ptp_init_phc_eth56g(struct ice_hw *hw)
+{
+ ice_sb_access_ena_eth56g(hw, true);
+ /* Initialize the Clock Generation Unit */
+ return ice_init_cgu_e82x(hw);
+}
+
+/**
+ * ice_ptp_read_tx_hwtstamp_status_eth56g - Get TX timestamp status
+ * @hw: pointer to the HW struct
+ * @ts_status: the timestamp mask pointer
+ *
+ * Read the PHY Tx timestamp status mask indicating which ports have Tx
+ * timestamps available.
+ *
+ * Return:
+ * * %0 - success
+ * * %other - failed to read from PHY
+ */
+int ice_ptp_read_tx_hwtstamp_status_eth56g(struct ice_hw *hw, u32 *ts_status)
+{
+ const struct ice_eth56g_params *params = &hw->ptp.phy.eth56g;
+ u8 phy, mask;
+ u32 status;
+
+ mask = (1 << hw->ptp.ports_per_phy) - 1;
+ *ts_status = 0;
+
+ for (phy = 0; phy < params->num_phys; phy++) {
+ int err;
+
+ err = ice_read_phy_eth56g(hw, phy, PHY_PTP_INT_STATUS, &status);
+ if (err)
+ return err;
+
+ *ts_status |= (status & mask) << (phy * hw->ptp.ports_per_phy);
+ }
+
+ ice_debug(hw, ICE_DBG_PTP, "PHY interrupt err: %x\n", *ts_status);
+
+ return 0;
+}
+
+/**
+ * ice_get_phy_tx_tstamp_ready_eth56g - Read the Tx memory status register
+ * @hw: pointer to the HW struct
+ * @port: the PHY port to read from
+ * @tstamp_ready: contents of the Tx memory status register
+ *
+ * Read the PHY_REG_TX_MEMORY_STATUS register indicating which timestamps in
+ * the PHY are ready. A set bit means the corresponding timestamp is valid and
+ * ready to be captured from the PHY timestamp block.
+ *
+ * Return:
+ * * %0 - success
+ * * %other - failed to read from PHY
+ */
+static int ice_get_phy_tx_tstamp_ready_eth56g(struct ice_hw *hw, u8 port,
+ u64 *tstamp_ready)
+{
+ int err;
+
+ err = ice_read_64b_ptp_reg_eth56g(hw, port, PHY_REG_TX_MEMORY_STATUS_L,
+ tstamp_ready);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_MEMORY_STATUS for port %u, err %d\n",
+ port, err);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_is_muxed_topo - detect breakout 2x50G topology for E825C
+ * @hw: pointer to the HW struct
+ *
+ * Return: true if it's 2x50 breakout topology, false otherwise
+ */
+static bool ice_is_muxed_topo(struct ice_hw *hw)
+{
+ u8 link_topo;
+ bool mux;
+ u32 val;
+
+ val = rd32(hw, GLGEN_SWITCH_MODE_CONFIG);
+ mux = FIELD_GET(GLGEN_SWITCH_MODE_CONFIG_25X4_QUAD_M, val);
+ val = rd32(hw, GLGEN_MAC_LINK_TOPO);
+ link_topo = FIELD_GET(GLGEN_MAC_LINK_TOPO_LINK_TOPO_M, val);
+
+ return (mux && link_topo == ICE_LINK_TOPO_UP_TO_2_LINKS);
+}
+
+/**
+ * ice_ptp_init_phy_e825c - initialize PHY parameters
+ * @hw: pointer to the HW struct
+ */
+static void ice_ptp_init_phy_e825c(struct ice_hw *hw)
+{
+ struct ice_ptp_hw *ptp = &hw->ptp;
+ struct ice_eth56g_params *params;
+ u8 phy;
+
+ ptp->phy_model = ICE_PHY_ETH56G;
+ params = &ptp->phy.eth56g;
+ params->onestep_ena = false;
+ params->peer_delay = 0;
+ params->sfd_ena = false;
+ params->phy_addr[0] = eth56g_phy_0;
+ params->phy_addr[1] = eth56g_phy_1;
+ params->num_phys = 2;
+ ptp->ports_per_phy = 4;
+ ptp->num_lports = params->num_phys * ptp->ports_per_phy;
+
+ ice_sb_access_ena_eth56g(hw, true);
+ for (phy = 0; phy < params->num_phys; phy++) {
+ u32 phy_rev;
+ int err;
+
+ err = ice_read_phy_eth56g(hw, phy, PHY_REG_REVISION, &phy_rev);
+ if (err || phy_rev != PHY_REVISION_ETH56G) {
+ ptp->phy_model = ICE_PHY_UNSUP;
+ return;
+ }
+ }
+
+ ptp->is_2x50g_muxed_topo = ice_is_muxed_topo(hw);
+}
+
/* E822 family functions
*
* The following functions operate on the E822 family of devices.
@@ -288,18 +2707,21 @@ static void ice_ptp_exec_tmr_cmd(struct ice_hw *hw)
/**
* ice_fill_phy_msg_e82x - Fill message data for a PHY register access
+ * @hw: pointer to the HW struct
* @msg: the PHY message buffer to fill in
* @port: the port to access
* @offset: the register offset
*/
-static void
-ice_fill_phy_msg_e82x(struct ice_sbq_msg_input *msg, u8 port, u16 offset)
+static void ice_fill_phy_msg_e82x(struct ice_hw *hw,
+ struct ice_sbq_msg_input *msg, u8 port,
+ u16 offset)
{
int phy_port, phy, quadtype;
- phy_port = port % ICE_PORTS_PER_PHY_E82X;
- phy = port / ICE_PORTS_PER_PHY_E82X;
- quadtype = (port / ICE_PORTS_PER_QUAD) % ICE_QUADS_PER_PHY_E82X;
+ phy_port = port % hw->ptp.ports_per_phy;
+ phy = port / hw->ptp.ports_per_phy;
+ quadtype = ICE_GET_QUAD_NUM(port) %
+ ICE_GET_QUAD_NUM(hw->ptp.ports_per_phy);
if (quadtype == 0) {
msg->msg_addr_low = P_Q0_L(P_0_BASE + offset, phy_port);
@@ -430,10 +2852,10 @@ ice_read_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 offset, u32 *val)
struct ice_sbq_msg_input msg = {0};
int err;
- ice_fill_phy_msg_e82x(&msg, port, offset);
+ ice_fill_phy_msg_e82x(hw, &msg, port, offset);
msg.opcode = ice_sbq_msg_rd;
- err = ice_sbq_rw_reg(hw, &msg);
+ err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
err);
@@ -507,11 +2929,11 @@ ice_write_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 offset, u32 val)
struct ice_sbq_msg_input msg = {0};
int err;
- ice_fill_phy_msg_e82x(&msg, port, offset);
+ ice_fill_phy_msg_e82x(hw, &msg, port, offset);
msg.opcode = ice_sbq_msg_wr;
msg.data = val;
- err = ice_sbq_rw_reg(hw, &msg);
+ err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
err);
@@ -546,8 +2968,7 @@ ice_write_40b_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
low_addr);
return -EINVAL;
}
-
- low = (u32)(val & P_REG_40B_LOW_M);
+ low = FIELD_GET(P_REG_40B_LOW_M, val);
high = (u32)(val >> P_REG_40B_HIGH_S);
err = ice_write_phy_reg_e82x(hw, port, low_addr, low);
@@ -617,24 +3038,30 @@ ice_write_64b_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
/**
* ice_fill_quad_msg_e82x - Fill message data for quad register access
+ * @hw: pointer to the HW struct
* @msg: the PHY message buffer to fill in
* @quad: the quad to access
* @offset: the register offset
*
* Fill a message buffer for accessing a register in a quad shared between
* multiple PHYs.
+ *
+ * Return:
+ * * %0 - OK
+ * * %-EINVAL - invalid quad number
*/
-static int
-ice_fill_quad_msg_e82x(struct ice_sbq_msg_input *msg, u8 quad, u16 offset)
+static int ice_fill_quad_msg_e82x(struct ice_hw *hw,
+ struct ice_sbq_msg_input *msg, u8 quad,
+ u16 offset)
{
u32 addr;
- if (quad >= ICE_MAX_QUAD)
+ if (quad >= ICE_GET_QUAD_NUM(hw->ptp.num_lports))
return -EINVAL;
msg->dest_dev = rmn_0;
- if ((quad % ICE_QUADS_PER_PHY_E82X) == 0)
+ if (!(quad % ICE_GET_QUAD_NUM(hw->ptp.ports_per_phy)))
addr = Q_0_BASE + offset;
else
addr = Q_1_BASE + offset;
@@ -661,13 +3088,13 @@ ice_read_quad_reg_e82x(struct ice_hw *hw, u8 quad, u16 offset, u32 *val)
struct ice_sbq_msg_input msg = {0};
int err;
- err = ice_fill_quad_msg_e82x(&msg, quad, offset);
+ err = ice_fill_quad_msg_e82x(hw, &msg, quad, offset);
if (err)
return err;
msg.opcode = ice_sbq_msg_rd;
- err = ice_sbq_rw_reg(hw, &msg);
+ err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
err);
@@ -695,14 +3122,14 @@ ice_write_quad_reg_e82x(struct ice_hw *hw, u8 quad, u16 offset, u32 val)
struct ice_sbq_msg_input msg = {0};
int err;
- err = ice_fill_quad_msg_e82x(&msg, quad, offset);
+ err = ice_fill_quad_msg_e82x(hw, &msg, quad, offset);
if (err)
return err;
msg.opcode = ice_sbq_msg_wr;
msg.data = val;
- err = ice_sbq_rw_reg(hw, &msg);
+ err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
err);
@@ -751,7 +3178,7 @@ ice_read_phy_tstamp_e82x(struct ice_hw *hw, u8 quad, u8 idx, u64 *tstamp)
* lower 8 bits in the low register, and the upper 32 bits in the high
* register.
*/
- *tstamp = ((u64)hi) << TS_PHY_HIGH_S | ((u64)lo & TS_PHY_LOW_M);
+ *tstamp = FIELD_PREP(TS_PHY_HIGH_M, hi) | FIELD_PREP(TS_PHY_LOW_M, lo);
return 0;
}
@@ -816,294 +3243,11 @@ static void ice_ptp_reset_ts_memory_e82x(struct ice_hw *hw)
{
unsigned int quad;
- for (quad = 0; quad < ICE_MAX_QUAD; quad++)
+ for (quad = 0; quad < ICE_GET_QUAD_NUM(hw->ptp.num_lports); quad++)
ice_ptp_reset_ts_memory_quad_e82x(hw, quad);
}
/**
- * ice_read_cgu_reg_e82x - Read a CGU register
- * @hw: pointer to the HW struct
- * @addr: Register address to read
- * @val: storage for register value read
- *
- * Read the contents of a register of the Clock Generation Unit. Only
- * applicable to E822 devices.
- */
-static int
-ice_read_cgu_reg_e82x(struct ice_hw *hw, u32 addr, u32 *val)
-{
- struct ice_sbq_msg_input cgu_msg;
- int err;
-
- cgu_msg.opcode = ice_sbq_msg_rd;
- cgu_msg.dest_dev = cgu;
- cgu_msg.msg_addr_low = addr;
- cgu_msg.msg_addr_high = 0x0;
-
- err = ice_sbq_rw_reg(hw, &cgu_msg);
- if (err) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to read CGU register 0x%04x, err %d\n",
- addr, err);
- return err;
- }
-
- *val = cgu_msg.data;
-
- return err;
-}
-
-/**
- * ice_write_cgu_reg_e82x - Write a CGU register
- * @hw: pointer to the HW struct
- * @addr: Register address to write
- * @val: value to write into the register
- *
- * Write the specified value to a register of the Clock Generation Unit. Only
- * applicable to E822 devices.
- */
-static int
-ice_write_cgu_reg_e82x(struct ice_hw *hw, u32 addr, u32 val)
-{
- struct ice_sbq_msg_input cgu_msg;
- int err;
-
- cgu_msg.opcode = ice_sbq_msg_wr;
- cgu_msg.dest_dev = cgu;
- cgu_msg.msg_addr_low = addr;
- cgu_msg.msg_addr_high = 0x0;
- cgu_msg.data = val;
-
- err = ice_sbq_rw_reg(hw, &cgu_msg);
- if (err) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to write CGU register 0x%04x, err %d\n",
- addr, err);
- return err;
- }
-
- return err;
-}
-
-/**
- * ice_clk_freq_str - Convert time_ref_freq to string
- * @clk_freq: Clock frequency
- *
- * Convert the specified TIME_REF clock frequency to a string.
- */
-static const char *ice_clk_freq_str(u8 clk_freq)
-{
- switch ((enum ice_time_ref_freq)clk_freq) {
- case ICE_TIME_REF_FREQ_25_000:
- return "25 MHz";
- case ICE_TIME_REF_FREQ_122_880:
- return "122.88 MHz";
- case ICE_TIME_REF_FREQ_125_000:
- return "125 MHz";
- case ICE_TIME_REF_FREQ_153_600:
- return "153.6 MHz";
- case ICE_TIME_REF_FREQ_156_250:
- return "156.25 MHz";
- case ICE_TIME_REF_FREQ_245_760:
- return "245.76 MHz";
- default:
- return "Unknown";
- }
-}
-
-/**
- * ice_clk_src_str - Convert time_ref_src to string
- * @clk_src: Clock source
- *
- * Convert the specified clock source to its string name.
- */
-static const char *ice_clk_src_str(u8 clk_src)
-{
- switch ((enum ice_clk_src)clk_src) {
- case ICE_CLK_SRC_TCX0:
- return "TCX0";
- case ICE_CLK_SRC_TIME_REF:
- return "TIME_REF";
- default:
- return "Unknown";
- }
-}
-
-/**
- * ice_cfg_cgu_pll_e82x - Configure the Clock Generation Unit
- * @hw: pointer to the HW struct
- * @clk_freq: Clock frequency to program
- * @clk_src: Clock source to select (TIME_REF, or TCX0)
- *
- * Configure the Clock Generation Unit with the desired clock frequency and
- * time reference, enabling the PLL which drives the PTP hardware clock.
- */
-static int
-ice_cfg_cgu_pll_e82x(struct ice_hw *hw, enum ice_time_ref_freq clk_freq,
- enum ice_clk_src clk_src)
-{
- union tspll_ro_bwm_lf bwm_lf;
- union nac_cgu_dword19 dw19;
- union nac_cgu_dword22 dw22;
- union nac_cgu_dword24 dw24;
- union nac_cgu_dword9 dw9;
- int err;
-
- if (clk_freq >= NUM_ICE_TIME_REF_FREQ) {
- dev_warn(ice_hw_to_dev(hw), "Invalid TIME_REF frequency %u\n",
- clk_freq);
- return -EINVAL;
- }
-
- if (clk_src >= NUM_ICE_CLK_SRC) {
- dev_warn(ice_hw_to_dev(hw), "Invalid clock source %u\n",
- clk_src);
- return -EINVAL;
- }
-
- if (clk_src == ICE_CLK_SRC_TCX0 &&
- clk_freq != ICE_TIME_REF_FREQ_25_000) {
- dev_warn(ice_hw_to_dev(hw),
- "TCX0 only supports 25 MHz frequency\n");
- return -EINVAL;
- }
-
- err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD9, &dw9.val);
- if (err)
- return err;
-
- err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD24, &dw24.val);
- if (err)
- return err;
-
- err = ice_read_cgu_reg_e82x(hw, TSPLL_RO_BWM_LF, &bwm_lf.val);
- if (err)
- return err;
-
- /* Log the current clock configuration */
- ice_debug(hw, ICE_DBG_PTP, "Current CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n",
- dw24.field.ts_pll_enable ? "enabled" : "disabled",
- ice_clk_src_str(dw24.field.time_ref_sel),
- ice_clk_freq_str(dw9.field.time_ref_freq_sel),
- bwm_lf.field.plllock_true_lock_cri ? "locked" : "unlocked");
-
- /* Disable the PLL before changing the clock source or frequency */
- if (dw24.field.ts_pll_enable) {
- dw24.field.ts_pll_enable = 0;
-
- err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val);
- if (err)
- return err;
- }
-
- /* Set the frequency */
- dw9.field.time_ref_freq_sel = clk_freq;
- err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD9, dw9.val);
- if (err)
- return err;
-
- /* Configure the TS PLL feedback divisor */
- err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD19, &dw19.val);
- if (err)
- return err;
-
- dw19.field.tspll_fbdiv_intgr = e822_cgu_params[clk_freq].feedback_div;
- dw19.field.tspll_ndivratio = 1;
-
- err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD19, dw19.val);
- if (err)
- return err;
-
- /* Configure the TS PLL post divisor */
- err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD22, &dw22.val);
- if (err)
- return err;
-
- dw22.field.time1588clk_div = e822_cgu_params[clk_freq].post_pll_div;
- dw22.field.time1588clk_sel_div2 = 0;
-
- err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD22, dw22.val);
- if (err)
- return err;
-
- /* Configure the TS PLL pre divisor and clock source */
- err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD24, &dw24.val);
- if (err)
- return err;
-
- dw24.field.ref1588_ck_div = e822_cgu_params[clk_freq].refclk_pre_div;
- dw24.field.tspll_fbdiv_frac = e822_cgu_params[clk_freq].frac_n_div;
- dw24.field.time_ref_sel = clk_src;
-
- err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val);
- if (err)
- return err;
-
- /* Finally, enable the PLL */
- dw24.field.ts_pll_enable = 1;
-
- err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val);
- if (err)
- return err;
-
- /* Wait to verify if the PLL locks */
- usleep_range(1000, 5000);
-
- err = ice_read_cgu_reg_e82x(hw, TSPLL_RO_BWM_LF, &bwm_lf.val);
- if (err)
- return err;
-
- if (!bwm_lf.field.plllock_true_lock_cri) {
- dev_warn(ice_hw_to_dev(hw), "CGU PLL failed to lock\n");
- return -EBUSY;
- }
-
- /* Log the current clock configuration */
- ice_debug(hw, ICE_DBG_PTP, "New CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n",
- dw24.field.ts_pll_enable ? "enabled" : "disabled",
- ice_clk_src_str(dw24.field.time_ref_sel),
- ice_clk_freq_str(dw9.field.time_ref_freq_sel),
- bwm_lf.field.plllock_true_lock_cri ? "locked" : "unlocked");
-
- return 0;
-}
-
-/**
- * ice_init_cgu_e82x - Initialize CGU with settings from firmware
- * @hw: pointer to the HW structure
- *
- * Initialize the Clock Generation Unit of the E822 device.
- */
-static int ice_init_cgu_e82x(struct ice_hw *hw)
-{
- struct ice_ts_func_info *ts_info = &hw->func_caps.ts_func_info;
- union tspll_cntr_bist_settings cntr_bist;
- int err;
-
- err = ice_read_cgu_reg_e82x(hw, TSPLL_CNTR_BIST_SETTINGS,
- &cntr_bist.val);
- if (err)
- return err;
-
- /* Disable sticky lock detection so lock err reported is accurate */
- cntr_bist.field.i_plllock_sel_0 = 0;
- cntr_bist.field.i_plllock_sel_1 = 0;
-
- err = ice_write_cgu_reg_e82x(hw, TSPLL_CNTR_BIST_SETTINGS,
- cntr_bist.val);
- if (err)
- return err;
-
- /* Configure the CGU PLL using the parameters from the function
- * capabilities.
- */
- err = ice_cfg_cgu_pll_e82x(hw, ts_info->time_ref,
- (enum ice_clk_src)ts_info->clk_src);
- if (err)
- return err;
-
- return 0;
-}
-
-/**
* ice_ptp_set_vernier_wl - Set the window length for vernier calibration
* @hw: pointer to the HW struct
*
@@ -1113,7 +3257,7 @@ static int ice_ptp_set_vernier_wl(struct ice_hw *hw)
{
u8 port;
- for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
+ for (port = 0; port < hw->ptp.num_lports; port++) {
int err;
err = ice_write_phy_reg_e82x(hw, port, P_REG_WL,
@@ -1137,15 +3281,14 @@ static int ice_ptp_set_vernier_wl(struct ice_hw *hw)
static int ice_ptp_init_phc_e82x(struct ice_hw *hw)
{
int err;
- u32 regval;
+ u32 val;
/* Enable reading switch and PHY registers over the sideband queue */
#define PF_SB_REM_DEV_CTL_SWITCH_READ BIT(1)
#define PF_SB_REM_DEV_CTL_PHY0 BIT(2)
- regval = rd32(hw, PF_SB_REM_DEV_CTL);
- regval |= (PF_SB_REM_DEV_CTL_SWITCH_READ |
- PF_SB_REM_DEV_CTL_PHY0);
- wr32(hw, PF_SB_REM_DEV_CTL, regval);
+ val = rd32(hw, PF_SB_REM_DEV_CTL);
+ val |= (PF_SB_REM_DEV_CTL_SWITCH_READ | PF_SB_REM_DEV_CTL_PHY0);
+ wr32(hw, PF_SB_REM_DEV_CTL, val);
/* Initialize the Clock Generation Unit */
err = ice_init_cgu_e82x(hw);
@@ -1178,7 +3321,7 @@ ice_ptp_prep_phy_time_e82x(struct ice_hw *hw, u32 time)
*/
phy_time = (u64)time << 32;
- for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
+ for (port = 0; port < hw->ptp.num_lports; port++) {
/* Tx case */
err = ice_write_64b_phy_reg_e82x(hw, port,
P_REG_TX_TIMER_INC_PRE_L,
@@ -1281,7 +3424,7 @@ ice_ptp_prep_phy_adj_e82x(struct ice_hw *hw, s32 adj)
else
cycles = -(((s64)-adj) << 32);
- for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
+ for (port = 0; port < hw->ptp.num_lports; port++) {
int err;
err = ice_ptp_prep_port_adj_e82x(hw, port, cycles);
@@ -1307,7 +3450,7 @@ ice_ptp_prep_phy_incval_e82x(struct ice_hw *hw, u64 incval)
int err;
u8 port;
- for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
+ for (port = 0; port < hw->ptp.num_lports; port++) {
err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_TIMETUS_L,
incval);
if (err)
@@ -1372,51 +3515,20 @@ ice_ptp_read_port_capture(struct ice_hw *hw, u8 port, u64 *tx_ts, u64 *rx_ts)
*
* Prepare the requested port for an upcoming timer sync command.
*
- * Do not use this function directly. If you want to configure exactly one
- * port, use ice_ptp_one_port_cmd() instead.
+ * Note there is no equivalent of this operation on E810, as that device
+ * always handles all external PHYs internally.
+ *
+ * Return:
+ * * %0 - success
+ * * %other - failed to write to PHY
*/
static int ice_ptp_write_port_cmd_e82x(struct ice_hw *hw, u8 port,
enum ice_ptp_tmr_cmd cmd)
{
- u32 cmd_val, val;
- u8 tmr_idx;
+ u32 val = ice_ptp_tmr_cmd_to_port_reg(hw, cmd);
int err;
- tmr_idx = ice_get_ptp_src_clock_index(hw);
- cmd_val = tmr_idx << SEL_PHY_SRC;
- switch (cmd) {
- case ICE_PTP_INIT_TIME:
- cmd_val |= PHY_CMD_INIT_TIME;
- break;
- case ICE_PTP_INIT_INCVAL:
- cmd_val |= PHY_CMD_INIT_INCVAL;
- break;
- case ICE_PTP_ADJ_TIME:
- cmd_val |= PHY_CMD_ADJ_TIME;
- break;
- case ICE_PTP_READ_TIME:
- cmd_val |= PHY_CMD_READ_TIME;
- break;
- case ICE_PTP_ADJ_TIME_AT_TIME:
- cmd_val |= PHY_CMD_ADJ_TIME_AT_TIME;
- break;
- case ICE_PTP_NOP:
- break;
- }
-
/* Tx case */
- /* Read, modify, write */
- err = ice_read_phy_reg_e82x(hw, port, P_REG_TX_TMR_CMD, &val);
- if (err) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_TMR_CMD, err %d\n",
- err);
- return err;
- }
-
- /* Modify necessary bits only and perform write */
- val &= ~TS_CMD_MASK;
- val |= cmd_val;
-
err = ice_write_phy_reg_e82x(hw, port, P_REG_TX_TMR_CMD, val);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to write back TX_TMR_CMD, err %d\n",
@@ -1425,19 +3537,8 @@ static int ice_ptp_write_port_cmd_e82x(struct ice_hw *hw, u8 port,
}
/* Rx case */
- /* Read, modify, write */
- err = ice_read_phy_reg_e82x(hw, port, P_REG_RX_TMR_CMD, &val);
- if (err) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_TMR_CMD, err %d\n",
- err);
- return err;
- }
-
- /* Modify necessary bits only and perform write */
- val &= ~TS_CMD_MASK;
- val |= cmd_val;
-
- err = ice_write_phy_reg_e82x(hw, port, P_REG_RX_TMR_CMD, val);
+ err = ice_write_phy_reg_e82x(hw, port, P_REG_RX_TMR_CMD,
+ val | TS_CMD_RX_TYPE);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to write back RX_TMR_CMD, err %d\n",
err);
@@ -1447,63 +3548,6 @@ static int ice_ptp_write_port_cmd_e82x(struct ice_hw *hw, u8 port,
return 0;
}
-/**
- * ice_ptp_one_port_cmd - Prepare one port for a timer command
- * @hw: pointer to the HW struct
- * @configured_port: the port to configure with configured_cmd
- * @configured_cmd: timer command to prepare on the configured_port
- *
- * Prepare the configured_port for the configured_cmd, and prepare all other
- * ports for ICE_PTP_NOP. This causes the configured_port to execute the
- * desired command while all other ports perform no operation.
- */
-static int
-ice_ptp_one_port_cmd(struct ice_hw *hw, u8 configured_port,
- enum ice_ptp_tmr_cmd configured_cmd)
-{
- u8 port;
-
- for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
- enum ice_ptp_tmr_cmd cmd;
- int err;
-
- if (port == configured_port)
- cmd = configured_cmd;
- else
- cmd = ICE_PTP_NOP;
-
- err = ice_ptp_write_port_cmd_e82x(hw, port, cmd);
- if (err)
- return err;
- }
-
- return 0;
-}
-
-/**
- * ice_ptp_port_cmd_e82x - Prepare all ports for a timer command
- * @hw: pointer to the HW struct
- * @cmd: timer command to prepare
- *
- * Prepare all ports connected to this device for an upcoming timer sync
- * command.
- */
-static int
-ice_ptp_port_cmd_e82x(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
-{
- u8 port;
-
- for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
- int err;
-
- err = ice_ptp_write_port_cmd_e82x(hw, port, cmd);
- if (err)
- return err;
- }
-
- return 0;
-}
-
/* E822 Vernier calibration functions
*
* The following functions are used as part of the vernier calibration of
@@ -1606,7 +3650,7 @@ static void ice_phy_cfg_lane_e82x(struct ice_hw *hw, u8 port)
return;
}
- quad = port / ICE_PORTS_PER_QUAD;
+ quad = ICE_GET_QUAD_NUM(port);
err = ice_read_quad_reg_e82x(hw, quad, Q_REG_TX_MEM_GBL_CFG, &val);
if (err) {
@@ -2327,6 +4371,40 @@ int ice_phy_cfg_rx_offset_e82x(struct ice_hw *hw, u8 port)
}
/**
+ * ice_ptp_clear_phy_offset_ready_e82x - Clear PHY TX_/RX_OFFSET_READY registers
+ * @hw: pointer to the HW struct
+ *
+ * Clear PHY TX_/RX_OFFSET_READY registers, effectively marking all transmitted
+ * and received timestamps as invalid.
+ *
+ * Return: 0 on success, other error codes when failed to write to PHY
+ */
+int ice_ptp_clear_phy_offset_ready_e82x(struct ice_hw *hw)
+{
+ u8 port;
+
+ for (port = 0; port < hw->ptp.num_lports; port++) {
+ int err;
+
+ err = ice_write_phy_reg_e82x(hw, port, P_REG_TX_OR, 0);
+ if (err) {
+ dev_warn(ice_hw_to_dev(hw),
+ "Failed to clear PHY TX_OFFSET_READY register\n");
+ return err;
+ }
+
+ err = ice_write_phy_reg_e82x(hw, port, P_REG_RX_OR, 0);
+ if (err) {
+ dev_warn(ice_hw_to_dev(hw),
+ "Failed to clear PHY RX_OFFSET_READY register\n");
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+/**
* ice_read_phy_and_phc_time_e82x - Simultaneously capture PHC and PHY time
* @hw: pointer to the HW struct
* @port: the PHY port to read
@@ -2636,6 +4714,48 @@ ice_get_phy_tx_tstamp_ready_e82x(struct ice_hw *hw, u8 quad, u64 *tstamp_ready)
return 0;
}
+/**
+ * ice_phy_cfg_intr_e82x - Configure TX timestamp interrupt
+ * @hw: pointer to the HW struct
+ * @quad: the timestamp quad
+ * @ena: enable or disable interrupt
+ * @threshold: interrupt threshold
+ *
+ * Configure TX timestamp interrupt for the specified quad
+ *
+ * Return: 0 on success, other error codes when failed to read/write quad
+ */
+
+int ice_phy_cfg_intr_e82x(struct ice_hw *hw, u8 quad, bool ena, u8 threshold)
+{
+ int err;
+ u32 val;
+
+ err = ice_read_quad_reg_e82x(hw, quad, Q_REG_TX_MEM_GBL_CFG, &val);
+ if (err)
+ return err;
+
+ val &= ~Q_REG_TX_MEM_GBL_CFG_INTR_ENA_M;
+ if (ena) {
+ val |= Q_REG_TX_MEM_GBL_CFG_INTR_ENA_M;
+ val &= ~Q_REG_TX_MEM_GBL_CFG_INTR_THR_M;
+ val |= FIELD_PREP(Q_REG_TX_MEM_GBL_CFG_INTR_THR_M, threshold);
+ }
+
+ return ice_write_quad_reg_e82x(hw, quad, Q_REG_TX_MEM_GBL_CFG, val);
+}
+
+/**
+ * ice_ptp_init_phy_e82x - initialize PHY parameters
+ * @ptp: pointer to the PTP HW struct
+ */
+static void ice_ptp_init_phy_e82x(struct ice_ptp_hw *ptp)
+{
+ ptp->phy_model = ICE_PHY_E82X;
+ ptp->num_lports = 8;
+ ptp->ports_per_phy = 8;
+}
+
/* E810 functions
*
* The following functions operate on the E810 series devices which use
@@ -2660,7 +4780,7 @@ static int ice_read_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 *val)
msg.opcode = ice_sbq_msg_rd;
msg.dest_dev = rmn_0;
- err = ice_sbq_rw_reg(hw, &msg);
+ err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
err);
@@ -2691,7 +4811,7 @@ static int ice_write_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 val)
msg.dest_dev = rmn_0;
msg.data = val;
- err = ice_sbq_rw_reg(hw, &msg);
+ err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
err);
@@ -2863,17 +4983,21 @@ static int ice_clear_phy_tstamp_e810(struct ice_hw *hw, u8 lport, u8 idx)
}
/**
- * ice_ptp_init_phy_e810 - Enable PTP function on the external PHY
+ * ice_ptp_init_phc_e810 - Perform E810 specific PHC initialization
* @hw: pointer to HW struct
*
- * Enable the timesync PTP functionality for the external PHY connected to
- * this function.
+ * Perform E810-specific PTP hardware clock initialization steps.
+ *
+ * Return: 0 on success, other error codes when failed to initialize TimeSync
*/
-int ice_ptp_init_phy_e810(struct ice_hw *hw)
+static int ice_ptp_init_phc_e810(struct ice_hw *hw)
{
u8 tmr_idx;
int err;
+ /* Ensure synchronization delay is zero */
+ wr32(hw, GLTSYN_SYNC_DLAY, 0);
+
tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_ENA(tmr_idx),
GLTSYN_ENA_TSYN_ENA_M);
@@ -2885,21 +5009,6 @@ int ice_ptp_init_phy_e810(struct ice_hw *hw)
}
/**
- * ice_ptp_init_phc_e810 - Perform E810 specific PHC initialization
- * @hw: pointer to HW struct
- *
- * Perform E810-specific PTP hardware clock initialization steps.
- */
-static int ice_ptp_init_phc_e810(struct ice_hw *hw)
-{
- /* Ensure synchronization delay is zero */
- wr32(hw, GLTSYN_SYNC_DLAY, 0);
-
- /* Initialize the PHY */
- return ice_ptp_init_phy_e810(hw);
-}
-
-/**
* ice_ptp_prep_phy_time_e810 - Prepare PHY port with initial time
* @hw: Board private structure
* @time: Time to initialize the PHY port clock to
@@ -3020,47 +5129,9 @@ static int ice_ptp_prep_phy_incval_e810(struct ice_hw *hw, u64 incval)
*/
static int ice_ptp_port_cmd_e810(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
{
- u32 cmd_val, val;
- int err;
-
- switch (cmd) {
- case ICE_PTP_INIT_TIME:
- cmd_val = GLTSYN_CMD_INIT_TIME;
- break;
- case ICE_PTP_INIT_INCVAL:
- cmd_val = GLTSYN_CMD_INIT_INCVAL;
- break;
- case ICE_PTP_ADJ_TIME:
- cmd_val = GLTSYN_CMD_ADJ_TIME;
- break;
- case ICE_PTP_READ_TIME:
- cmd_val = GLTSYN_CMD_READ_TIME;
- break;
- case ICE_PTP_ADJ_TIME_AT_TIME:
- cmd_val = GLTSYN_CMD_ADJ_INIT_TIME;
- break;
- case ICE_PTP_NOP:
- return 0;
- }
+ u32 val = ice_ptp_tmr_cmd_to_port_reg(hw, cmd);
- /* Read, modify, write */
- err = ice_read_phy_reg_e810(hw, ETH_GLTSYN_CMD, &val);
- if (err) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to read GLTSYN_CMD, err %d\n", err);
- return err;
- }
-
- /* Modify necessary bits only and perform write */
- val &= ~TS_CMD_MASK_E810;
- val |= cmd_val;
-
- err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_CMD, val);
- if (err) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to write back GLTSYN_CMD, err %d\n", err);
- return err;
- }
-
- return 0;
+ return ice_write_phy_reg_e810(hw, E810_ETH_GLTSYN_CMD, val);
}
/**
@@ -3242,6 +5313,17 @@ int ice_read_pca9575_reg_e810t(struct ice_hw *hw, u8 offset, u8 *data)
return ice_aq_read_i2c(hw, link_topo, 0, addr, 1, data, NULL);
}
+/**
+ * ice_ptp_init_phy_e810 - initialize PHY parameters
+ * @ptp: pointer to the PTP HW struct
+ */
+static void ice_ptp_init_phy_e810(struct ice_ptp_hw *ptp)
+{
+ ptp->phy_model = ICE_PHY_E810;
+ ptp->num_lports = 8;
+ ptp->ports_per_phy = 4;
+}
+
/* Device agnostic functions
*
* The following functions implement shared behavior common to both E822 and
@@ -3299,18 +5381,126 @@ void ice_ptp_unlock(struct ice_hw *hw)
}
/**
- * ice_ptp_init_phy_model - Initialize hw->phy_model based on device type
+ * ice_ptp_init_hw - Initialize hw based on device type
* @hw: pointer to the HW structure
*
- * Determine the PHY model for the device, and initialize hw->phy_model
+ * Determine the PHY model for the device, and initialize hw
* for use by other functions.
*/
-void ice_ptp_init_phy_model(struct ice_hw *hw)
+void ice_ptp_init_hw(struct ice_hw *hw)
{
- if (ice_is_e810(hw))
- hw->phy_model = ICE_PHY_E810;
+ struct ice_ptp_hw *ptp = &hw->ptp;
+
+ if (ice_is_e822(hw) || ice_is_e823(hw))
+ ice_ptp_init_phy_e82x(ptp);
+ else if (ice_is_e810(hw))
+ ice_ptp_init_phy_e810(ptp);
+ else if (ice_is_e825c(hw))
+ ice_ptp_init_phy_e825c(hw);
else
- hw->phy_model = ICE_PHY_E82X;
+ ptp->phy_model = ICE_PHY_UNSUP;
+}
+
+/**
+ * ice_ptp_write_port_cmd - Prepare a single PHY port for a timer command
+ * @hw: pointer to HW struct
+ * @port: Port to which cmd has to be sent
+ * @cmd: Command to be sent to the port
+ *
+ * Prepare one port for the upcoming timer sync command. Do not use this for
+ * programming only a single port, instead use ice_ptp_one_port_cmd() to
+ * ensure non-modified ports get properly initialized to ICE_PTP_NOP.
+ *
+ * Return:
+ * * %0 - success
+ * %-EBUSY - PHY type not supported
+ * * %other - failed to write port command
+ */
+static int ice_ptp_write_port_cmd(struct ice_hw *hw, u8 port,
+ enum ice_ptp_tmr_cmd cmd)
+{
+ switch (hw->ptp.phy_model) {
+ case ICE_PHY_ETH56G:
+ return ice_ptp_write_port_cmd_eth56g(hw, port, cmd);
+ case ICE_PHY_E82X:
+ return ice_ptp_write_port_cmd_e82x(hw, port, cmd);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/**
+ * ice_ptp_one_port_cmd - Program one PHY port for a timer command
+ * @hw: pointer to HW struct
+ * @configured_port: the port that should execute the command
+ * @configured_cmd: the command to be executed on the configured port
+ *
+ * Prepare one port for executing a timer command, while preparing all other
+ * ports to ICE_PTP_NOP. This allows executing a command on a single port
+ * while ensuring all other ports do not execute stale commands.
+ *
+ * Return:
+ * * %0 - success
+ * * %other - failed to write port command
+ */
+int ice_ptp_one_port_cmd(struct ice_hw *hw, u8 configured_port,
+ enum ice_ptp_tmr_cmd configured_cmd)
+{
+ u32 port;
+
+ for (port = 0; port < hw->ptp.num_lports; port++) {
+ int err;
+
+ /* Program the configured port with the configured command,
+ * program all other ports with ICE_PTP_NOP.
+ */
+ if (port == configured_port)
+ err = ice_ptp_write_port_cmd(hw, port, configured_cmd);
+ else
+ err = ice_ptp_write_port_cmd(hw, port, ICE_PTP_NOP);
+
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_ptp_port_cmd - Prepare PHY ports for a timer sync command
+ * @hw: pointer to HW struct
+ * @cmd: the timer command to setup
+ *
+ * Prepare all PHY ports on this device for the requested timer command. For
+ * some families this can be done in one shot, but for other families each
+ * port must be configured individually.
+ *
+ * Return:
+ * * %0 - success
+ * * %other - failed to write port command
+ */
+static int ice_ptp_port_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
+{
+ u32 port;
+
+ /* PHY models which can program all ports simultaneously */
+ switch (hw->ptp.phy_model) {
+ case ICE_PHY_E810:
+ return ice_ptp_port_cmd_e810(hw, cmd);
+ default:
+ break;
+ }
+
+ /* PHY models which require programming each port separately */
+ for (port = 0; port < hw->ptp.num_lports; port++) {
+ int err;
+
+ err = ice_ptp_write_port_cmd(hw, port, cmd);
+ if (err)
+ return err;
+ }
+
+ return 0;
}
/**
@@ -3331,17 +5521,7 @@ static int ice_ptp_tmr_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
ice_ptp_src_cmd(hw, cmd);
/* Next, prepare the ports */
- switch (hw->phy_model) {
- case ICE_PHY_E810:
- err = ice_ptp_port_cmd_e810(hw, cmd);
- break;
- case ICE_PHY_E82X:
- err = ice_ptp_port_cmd_e82x(hw, cmd);
- break;
- default:
- err = -EOPNOTSUPP;
- }
-
+ err = ice_ptp_port_cmd(hw, cmd);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to prepare PHY ports for timer command %u, err %d\n",
cmd, err);
@@ -3383,7 +5563,11 @@ int ice_ptp_init_time(struct ice_hw *hw, u64 time)
/* PHY timers */
/* Fill Rx and Tx ports and send msg to PHY */
- switch (hw->phy_model) {
+ switch (hw->ptp.phy_model) {
+ case ICE_PHY_ETH56G:
+ err = ice_ptp_prep_phy_time_eth56g(hw,
+ (u32)(time & 0xFFFFFFFF));
+ break;
case ICE_PHY_E810:
err = ice_ptp_prep_phy_time_e810(hw, time & 0xFFFFFFFF);
break;
@@ -3425,7 +5609,10 @@ int ice_ptp_write_incval(struct ice_hw *hw, u64 incval)
wr32(hw, GLTSYN_SHADJ_L(tmr_idx), lower_32_bits(incval));
wr32(hw, GLTSYN_SHADJ_H(tmr_idx), upper_32_bits(incval));
- switch (hw->phy_model) {
+ switch (hw->ptp.phy_model) {
+ case ICE_PHY_ETH56G:
+ err = ice_ptp_prep_phy_incval_eth56g(hw, incval);
+ break;
case ICE_PHY_E810:
err = ice_ptp_prep_phy_incval_e810(hw, incval);
break;
@@ -3491,7 +5678,10 @@ int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj)
wr32(hw, GLTSYN_SHADJ_L(tmr_idx), 0);
wr32(hw, GLTSYN_SHADJ_H(tmr_idx), adj);
- switch (hw->phy_model) {
+ switch (hw->ptp.phy_model) {
+ case ICE_PHY_ETH56G:
+ err = ice_ptp_prep_phy_adj_eth56g(hw, adj);
+ break;
case ICE_PHY_E810:
err = ice_ptp_prep_phy_adj_e810(hw, adj);
break;
@@ -3521,7 +5711,9 @@ int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj)
*/
int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp)
{
- switch (hw->phy_model) {
+ switch (hw->ptp.phy_model) {
+ case ICE_PHY_ETH56G:
+ return ice_read_ptp_tstamp_eth56g(hw, block, idx, tstamp);
case ICE_PHY_E810:
return ice_read_phy_tstamp_e810(hw, block, idx, tstamp);
case ICE_PHY_E82X:
@@ -3549,7 +5741,9 @@ int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp)
*/
int ice_clear_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx)
{
- switch (hw->phy_model) {
+ switch (hw->ptp.phy_model) {
+ case ICE_PHY_ETH56G:
+ return ice_clear_ptp_tstamp_eth56g(hw, block, idx);
case ICE_PHY_E810:
return ice_clear_phy_tstamp_e810(hw, block, idx);
case ICE_PHY_E82X:
@@ -3610,7 +5804,10 @@ static int ice_get_pf_c827_idx(struct ice_hw *hw, u8 *idx)
*/
void ice_ptp_reset_ts_memory(struct ice_hw *hw)
{
- switch (hw->phy_model) {
+ switch (hw->ptp.phy_model) {
+ case ICE_PHY_ETH56G:
+ ice_ptp_reset_ts_memory_eth56g(hw);
+ break;
case ICE_PHY_E82X:
ice_ptp_reset_ts_memory_e82x(hw);
break;
@@ -3636,7 +5833,9 @@ int ice_ptp_init_phc(struct ice_hw *hw)
/* Clear event err indications for auxiliary pins */
(void)rd32(hw, GLTSYN_STAT(src_idx));
- switch (hw->phy_model) {
+ switch (hw->ptp.phy_model) {
+ case ICE_PHY_ETH56G:
+ return ice_ptp_init_phc_eth56g(hw);
case ICE_PHY_E810:
return ice_ptp_init_phc_e810(hw);
case ICE_PHY_E82X:
@@ -3659,7 +5858,10 @@ int ice_ptp_init_phc(struct ice_hw *hw)
*/
int ice_get_phy_tx_tstamp_ready(struct ice_hw *hw, u8 block, u64 *tstamp_ready)
{
- switch (hw->phy_model) {
+ switch (hw->ptp.phy_model) {
+ case ICE_PHY_ETH56G:
+ return ice_get_phy_tx_tstamp_ready_eth56g(hw, block,
+ tstamp_ready);
case ICE_PHY_E810:
return ice_get_phy_tx_tstamp_ready_e810(hw, block,
tstamp_ready);
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
index 1f3e03124430..0852a34ade91 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
@@ -41,6 +41,41 @@ enum ice_ptp_fec_mode {
ICE_PTP_FEC_MODE_RS_FEC
};
+enum eth56g_res_type {
+ ETH56G_PHY_REG_PTP,
+ ETH56G_PHY_MEM_PTP,
+ ETH56G_PHY_REG_XPCS,
+ ETH56G_PHY_REG_MAC,
+ ETH56G_PHY_REG_GPCS,
+ NUM_ETH56G_PHY_RES
+};
+
+enum ice_eth56g_link_spd {
+ ICE_ETH56G_LNK_SPD_1G,
+ ICE_ETH56G_LNK_SPD_2_5G,
+ ICE_ETH56G_LNK_SPD_10G,
+ ICE_ETH56G_LNK_SPD_25G,
+ ICE_ETH56G_LNK_SPD_40G,
+ ICE_ETH56G_LNK_SPD_50G,
+ ICE_ETH56G_LNK_SPD_50G2,
+ ICE_ETH56G_LNK_SPD_100G,
+ ICE_ETH56G_LNK_SPD_100G2,
+ NUM_ICE_ETH56G_LNK_SPD /* Must be last */
+};
+
+/**
+ * struct ice_phy_reg_info_eth56g - ETH56G PHY register parameters
+ * @base: base address for each PHY block
+ * @step: step between PHY lanes
+ *
+ * Characteristic information for the various PHY register parameters in the
+ * ETH56G devices
+ */
+struct ice_phy_reg_info_eth56g {
+ u32 base[NUM_ETH56G_PHY_RES];
+ u32 step;
+};
+
/**
* struct ice_time_ref_info_e82x
* @pll_freq: Frequency of PLL that drives timer ticks in Hz
@@ -94,8 +129,75 @@ struct ice_vernier_info_e82x {
u32 rx_fixed_delay;
};
+#define ICE_ETH56G_MAC_CFG_RX_OFFSET_INT GENMASK(19, 9)
+#define ICE_ETH56G_MAC_CFG_RX_OFFSET_FRAC GENMASK(8, 0)
+#define ICE_ETH56G_MAC_CFG_FRAC_W 9
/**
- * struct ice_cgu_pll_params_e82x
+ * struct ice_eth56g_mac_reg_cfg - MAC config values for specific PTP registers
+ * @tx_mode: Tx timestamp compensation mode
+ * @tx_mk_dly: Tx timestamp marker start strobe delay
+ * @tx_cw_dly: Tx timestamp codeword start strobe delay
+ * @rx_mode: Rx timestamp compensation mode
+ * @rx_mk_dly: Rx timestamp marker start strobe delay
+ * @rx_cw_dly: Rx timestamp codeword start strobe delay
+ * @blks_per_clk: number of blocks transferred per clock cycle
+ * @blktime: block time, fixed point
+ * @mktime: marker time, fixed point
+ * @tx_offset: total Tx offset, fixed point
+ * @rx_offset: total Rx offset, contains value for bitslip/deskew, fixed point
+ *
+ * All fixed point registers except Rx offset are 23 bit unsigned ints with
+ * a 9 bit fractional.
+ * Rx offset is 11 bit unsigned int with a 9 bit fractional.
+ */
+struct ice_eth56g_mac_reg_cfg {
+ struct {
+ u8 def;
+ u8 rs;
+ } tx_mode;
+ u8 tx_mk_dly;
+ struct {
+ u8 def;
+ u8 onestep;
+ } tx_cw_dly;
+ struct {
+ u8 def;
+ u8 rs;
+ } rx_mode;
+ struct {
+ u8 def;
+ u8 rs;
+ } rx_mk_dly;
+ struct {
+ u8 def;
+ u8 rs;
+ } rx_cw_dly;
+ u8 blks_per_clk;
+ u16 blktime;
+ u16 mktime;
+ struct {
+ u32 serdes;
+ u32 no_fec;
+ u32 fc;
+ u32 rs;
+ u32 sfd;
+ u32 onestep;
+ } tx_offset;
+ struct {
+ u32 serdes;
+ u32 no_fec;
+ u32 fc;
+ u32 rs;
+ u32 sfd;
+ u32 bs_ds;
+ } rx_offset;
+};
+
+extern
+const struct ice_eth56g_mac_reg_cfg eth56g_mac_cfg[NUM_ICE_ETH56G_LNK_SPD];
+
+/**
+ * struct ice_cgu_pll_params_e82x - E82X CGU parameters
* @refclk_pre_div: Reference clock pre-divisor
* @feedback_div: Feedback divisor
* @frac_n_div: Fractional divisor
@@ -185,9 +287,34 @@ struct ice_cgu_pin_desc {
extern const struct
ice_cgu_pll_params_e82x e822_cgu_params[NUM_ICE_TIME_REF_FREQ];
+/**
+ * struct ice_cgu_pll_params_e825c - E825C CGU parameters
+ * @tspll_ck_refclkfreq: tspll_ck_refclkfreq selection
+ * @tspll_ndivratio: ndiv ratio that goes directly to the pll
+ * @tspll_fbdiv_intgr: TS PLL integer feedback divide
+ * @tspll_fbdiv_frac: TS PLL fractional feedback divide
+ * @ref1588_ck_div: clock divider for tspll ref
+ *
+ * Clock Generation Unit parameters used to program the PLL based on the
+ * selected TIME_REF/TCXO frequency.
+ */
+struct ice_cgu_pll_params_e825c {
+ u32 tspll_ck_refclkfreq;
+ u32 tspll_ndivratio;
+ u32 tspll_fbdiv_intgr;
+ u32 tspll_fbdiv_frac;
+ u32 ref1588_ck_div;
+};
+
+extern const struct
+ice_cgu_pll_params_e825c e825c_cgu_params[NUM_ICE_TIME_REF_FREQ];
+
#define E810C_QSFP_C827_0_HANDLE 2
#define E810C_QSFP_C827_1_HANDLE 3
+/* Table of constants related to possible ETH56G PHY resources */
+extern const struct ice_phy_reg_info_eth56g eth56g_phy_res[NUM_ETH56G_PHY_RES];
+
/* Table of constants related to possible TIME_REF sources */
extern const struct ice_time_ref_info_e82x e822_time_ref[NUM_ICE_TIME_REF_FREQ];
@@ -197,7 +324,9 @@ extern const struct ice_vernier_info_e82x e822_vernier[NUM_ICE_PTP_LNK_SPD];
/* Increment value to generate nanoseconds in the GLTSYN_TIME_L register for
* the E810 devices. Based off of a PLL with an 812.5 MHz frequency.
*/
-#define ICE_PTP_NOMINAL_INCVAL_E810 0x13b13b13bULL
+#define ICE_E810_PLL_FREQ 812500000
+#define ICE_PTP_NOMINAL_INCVAL_E810 0x13b13b13bULL
+#define E810_OUT_PROP_DELAY_NS 1
/* Device agnostic functions */
u8 ice_get_ptp_src_clock_index(struct ice_hw *hw);
@@ -208,11 +337,15 @@ int ice_ptp_init_time(struct ice_hw *hw, u64 time);
int ice_ptp_write_incval(struct ice_hw *hw, u64 incval);
int ice_ptp_write_incval_locked(struct ice_hw *hw, u64 incval);
int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj);
+int ice_ptp_clear_phy_offset_ready_e82x(struct ice_hw *hw);
int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp);
int ice_clear_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx);
void ice_ptp_reset_ts_memory(struct ice_hw *hw);
int ice_ptp_init_phc(struct ice_hw *hw);
+void ice_ptp_init_hw(struct ice_hw *hw);
int ice_get_phy_tx_tstamp_ready(struct ice_hw *hw, u8 block, u64 *tstamp_ready);
+int ice_ptp_one_port_cmd(struct ice_hw *hw, u8 configured_port,
+ enum ice_ptp_tmr_cmd configured_cmd);
/* E822 family functions */
int ice_read_quad_reg_e82x(struct ice_hw *hw, u8 quad, u16 offset, u32 *val);
@@ -264,9 +397,9 @@ int ice_stop_phy_timer_e82x(struct ice_hw *hw, u8 port, bool soft_reset);
int ice_start_phy_timer_e82x(struct ice_hw *hw, u8 port);
int ice_phy_cfg_tx_offset_e82x(struct ice_hw *hw, u8 port);
int ice_phy_cfg_rx_offset_e82x(struct ice_hw *hw, u8 port);
+int ice_phy_cfg_intr_e82x(struct ice_hw *hw, u8 quad, bool ena, u8 threshold);
/* E810 family functions */
-int ice_ptp_init_phy_e810(struct ice_hw *hw);
int ice_read_sma_ctrl_e810t(struct ice_hw *hw, u8 *data);
int ice_write_sma_ctrl_e810t(struct ice_hw *hw, u8 data);
int ice_read_pca9575_reg_e810t(struct ice_hw *hw, u8 offset, u8 *data);
@@ -280,11 +413,44 @@ int ice_get_cgu_state(struct ice_hw *hw, u8 dpll_idx,
u8 *ref_state, u8 *eec_mode, s64 *phase_offset,
enum dpll_lock_status *dpll_state);
int ice_get_cgu_rclk_pin_info(struct ice_hw *hw, u8 *base_idx, u8 *pin_num);
-
-void ice_ptp_init_phy_model(struct ice_hw *hw);
int ice_cgu_get_output_pin_state_caps(struct ice_hw *hw, u8 pin_id,
unsigned long *caps);
+/* ETH56G family functions */
+int ice_ptp_read_tx_hwtstamp_status_eth56g(struct ice_hw *hw, u32 *ts_status);
+int ice_stop_phy_timer_eth56g(struct ice_hw *hw, u8 port, bool soft_reset);
+int ice_start_phy_timer_eth56g(struct ice_hw *hw, u8 port);
+int ice_phy_cfg_tx_offset_eth56g(struct ice_hw *hw, u8 port);
+int ice_phy_cfg_rx_offset_eth56g(struct ice_hw *hw, u8 port);
+int ice_phy_cfg_intr_eth56g(struct ice_hw *hw, u8 port, bool ena, u8 threshold);
+int ice_phy_cfg_ptp_1step_eth56g(struct ice_hw *hw, u8 port);
+
+#define ICE_ETH56G_NOMINAL_INCVAL 0x140000000ULL
+#define ICE_ETH56G_NOMINAL_PCS_REF_TUS 0x100000000ULL
+#define ICE_ETH56G_NOMINAL_PCS_REF_INC 0x300000000ULL
+#define ICE_ETH56G_NOMINAL_THRESH4 0x7777
+#define ICE_ETH56G_NOMINAL_TX_THRESH 0x6
+
+/**
+ * ice_get_base_incval - Get base clock increment value
+ * @hw: pointer to the HW struct
+ *
+ * Return: base clock increment value for supported PHYs, 0 otherwise
+ */
+static inline u64 ice_get_base_incval(struct ice_hw *hw)
+{
+ switch (hw->ptp.phy_model) {
+ case ICE_PHY_ETH56G:
+ return ICE_ETH56G_NOMINAL_INCVAL;
+ case ICE_PHY_E810:
+ return ICE_PTP_NOMINAL_INCVAL_E810;
+ case ICE_PHY_E82X:
+ return ice_e82x_nominal_incval(ice_e82x_time_ref(hw));
+ default:
+ return 0;
+ }
+}
+
#define PFTSYN_SEM_BYTES 4
#define ICE_PTP_CLOCK_INDEX_0 0x00
@@ -312,6 +478,7 @@ int ice_cgu_get_output_pin_state_caps(struct ice_hw *hw, u8 pin_id,
#define TS_CMD_MASK_E810 0xFF
#define TS_CMD_MASK 0xF
#define SYNC_EXEC_CMD 0x3
+#define TS_CMD_RX_TYPE ICE_M(0x18, 0x4)
/* Macros to derive port low and high addresses on both quads */
#define P_Q0_L(a, p) ((((a) + (0x2000 * (p)))) & 0xFFFF)
@@ -344,11 +511,8 @@ int ice_cgu_get_output_pin_state_caps(struct ice_hw *hw, u8 pin_id,
#define Q_REG_TX_MEM_GBL_CFG 0xC08
#define Q_REG_TX_MEM_GBL_CFG_LANE_TYPE_S 0
#define Q_REG_TX_MEM_GBL_CFG_LANE_TYPE_M BIT(0)
-#define Q_REG_TX_MEM_GBL_CFG_TX_TYPE_S 1
#define Q_REG_TX_MEM_GBL_CFG_TX_TYPE_M ICE_M(0xFF, 1)
-#define Q_REG_TX_MEM_GBL_CFG_INTR_THR_S 9
#define Q_REG_TX_MEM_GBL_CFG_INTR_THR_M ICE_M(0x3F, 9)
-#define Q_REG_TX_MEM_GBL_CFG_INTR_ENA_S 15
#define Q_REG_TX_MEM_GBL_CFG_INTR_ENA_M BIT(15)
/* Tx Timestamp data registers */
@@ -380,7 +544,7 @@ int ice_cgu_get_output_pin_state_caps(struct ice_hw *hw, u8 pin_id,
#define P_REG_TIMETUS_L 0x410
#define P_REG_TIMETUS_U 0x414
-#define P_REG_40B_LOW_M 0xFF
+#define P_REG_40B_LOW_M GENMASK(7, 0)
#define P_REG_40B_HIGH_S 8
/* PHY window length registers */
@@ -487,7 +651,7 @@ int ice_cgu_get_output_pin_state_caps(struct ice_hw *hw, u8 pin_id,
#define ETH_GLTSYN_SHADJ_H(_i) (0x0300037C + ((_i) * 32))
/* E810 timer command register */
-#define ETH_GLTSYN_CMD 0x03000344
+#define E810_ETH_GLTSYN_CMD 0x03000344
/* Source timer incval macros */
#define INCVAL_HIGH_M 0xFF
@@ -549,4 +713,115 @@ int ice_cgu_get_output_pin_state_caps(struct ice_hw *hw, u8 pin_id,
/* E810T PCA9575 IO controller pin control */
#define ICE_E810T_P0_GNSS_PRSNT_N BIT(4)
+/* ETH56G PHY register addresses */
+/* Timestamp PHY incval registers */
+#define PHY_REG_TIMETUS_L 0x8
+#define PHY_REG_TIMETUS_U 0xC
+
+/* Timestamp PCS registers */
+#define PHY_PCS_REF_TUS_L 0x18
+#define PHY_PCS_REF_TUS_U 0x1C
+
+/* Timestamp PCS ref incval registers */
+#define PHY_PCS_REF_INC_L 0x20
+#define PHY_PCS_REF_INC_U 0x24
+
+/* Timestamp init registers */
+#define PHY_REG_RX_TIMER_INC_PRE_L 0x64
+#define PHY_REG_RX_TIMER_INC_PRE_U 0x68
+#define PHY_REG_TX_TIMER_INC_PRE_L 0x44
+#define PHY_REG_TX_TIMER_INC_PRE_U 0x48
+
+/* Timestamp match and adjust target registers */
+#define PHY_REG_RX_TIMER_CNT_ADJ_L 0x6C
+#define PHY_REG_RX_TIMER_CNT_ADJ_U 0x70
+#define PHY_REG_TX_TIMER_CNT_ADJ_L 0x4C
+#define PHY_REG_TX_TIMER_CNT_ADJ_U 0x50
+
+/* Timestamp command registers */
+#define PHY_REG_TX_TMR_CMD 0x40
+#define PHY_REG_RX_TMR_CMD 0x60
+
+/* Phy offset ready registers */
+#define PHY_REG_TX_OFFSET_READY 0x54
+#define PHY_REG_RX_OFFSET_READY 0x74
+
+/* Phy total offset registers */
+#define PHY_REG_TOTAL_TX_OFFSET_L 0x38
+#define PHY_REG_TOTAL_TX_OFFSET_U 0x3C
+#define PHY_REG_TOTAL_RX_OFFSET_L 0x58
+#define PHY_REG_TOTAL_RX_OFFSET_U 0x5C
+
+/* Timestamp capture registers */
+#define PHY_REG_TX_CAPTURE_L 0x78
+#define PHY_REG_TX_CAPTURE_U 0x7C
+#define PHY_REG_RX_CAPTURE_L 0x8C
+#define PHY_REG_RX_CAPTURE_U 0x90
+
+/* Memory status registers */
+#define PHY_REG_TX_MEMORY_STATUS_L 0x80
+#define PHY_REG_TX_MEMORY_STATUS_U 0x84
+
+/* Interrupt config register */
+#define PHY_REG_TS_INT_CONFIG 0x88
+
+/* XIF mode config register */
+#define PHY_MAC_XIF_MODE 0x24
+#define PHY_MAC_XIF_1STEP_ENA_M ICE_M(0x1, 5)
+#define PHY_MAC_XIF_TS_BIN_MODE_M ICE_M(0x1, 11)
+#define PHY_MAC_XIF_TS_SFD_ENA_M ICE_M(0x1, 20)
+#define PHY_MAC_XIF_GMII_TS_SEL_M ICE_M(0x1, 21)
+
+/* GPCS config register */
+#define PHY_GPCS_CONFIG_REG0 0x268
+#define PHY_GPCS_CONFIG_REG0_TX_THR_M ICE_M(0xF, 24)
+#define PHY_GPCS_BITSLIP 0x5C
+
+#define PHY_TS_INT_CONFIG_THRESHOLD_M ICE_M(0x3F, 0)
+#define PHY_TS_INT_CONFIG_ENA_M BIT(6)
+
+/* 1-step PTP config */
+#define PHY_PTP_1STEP_CONFIG 0x270
+#define PHY_PTP_1STEP_T1S_UP64_M ICE_M(0xF, 4)
+#define PHY_PTP_1STEP_T1S_DELTA_M ICE_M(0xF, 8)
+#define PHY_PTP_1STEP_PEER_DELAY(_port) (0x274 + 4 * (_port))
+#define PHY_PTP_1STEP_PD_ADD_PD_M ICE_M(0x1, 0)
+#define PHY_PTP_1STEP_PD_DELAY_M ICE_M(0x3fffffff, 1)
+#define PHY_PTP_1STEP_PD_DLY_V_M ICE_M(0x1, 31)
+
+/* Macros to derive offsets for TimeStampLow and TimeStampHigh */
+#define PHY_TSTAMP_L(x) (((x) * 8) + 0)
+#define PHY_TSTAMP_U(x) (((x) * 8) + 4)
+
+#define PHY_REG_REVISION 0x85000
+
+#define PHY_REG_DESKEW_0 0x94
+#define PHY_REG_DESKEW_0_RLEVEL GENMASK(6, 0)
+#define PHY_REG_DESKEW_0_RLEVEL_FRAC GENMASK(9, 7)
+#define PHY_REG_DESKEW_0_RLEVEL_FRAC_W 3
+#define PHY_REG_DESKEW_0_VALID GENMASK(10, 10)
+
+#define PHY_REG_GPCS_BITSLIP 0x5C
+#define PHY_REG_SD_BIT_SLIP(_port_offset) (0x29C + 4 * (_port_offset))
+#define PHY_REVISION_ETH56G 0x10200
+#define PHY_VENDOR_TXLANE_THRESH 0x2000C
+
+#define PHY_MAC_TSU_CONFIG 0x40
+#define PHY_MAC_TSU_CFG_RX_MODE_M ICE_M(0x7, 0)
+#define PHY_MAC_TSU_CFG_RX_MII_CW_DLY_M ICE_M(0x7, 4)
+#define PHY_MAC_TSU_CFG_RX_MII_MK_DLY_M ICE_M(0x7, 8)
+#define PHY_MAC_TSU_CFG_TX_MODE_M ICE_M(0x7, 12)
+#define PHY_MAC_TSU_CFG_TX_MII_CW_DLY_M ICE_M(0x1F, 16)
+#define PHY_MAC_TSU_CFG_TX_MII_MK_DLY_M ICE_M(0x1F, 21)
+#define PHY_MAC_TSU_CFG_BLKS_PER_CLK_M ICE_M(0x1, 28)
+#define PHY_MAC_RX_MODULO 0x44
+#define PHY_MAC_RX_OFFSET 0x48
+#define PHY_MAC_RX_OFFSET_M ICE_M(0xFFFFFF, 0)
+#define PHY_MAC_TX_MODULO 0x4C
+#define PHY_MAC_BLOCKTIME 0x50
+#define PHY_MAC_MARKERTIME 0x54
+#define PHY_MAC_TX_OFFSET 0x58
+
+#define PHY_PTP_INT_STATUS 0x7FD140
+
#endif /* _ICE_PTP_HW_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_repr.c b/drivers/net/ethernet/intel/ice/ice_repr.c
index d367f4c66dcd..bdda3401e343 100644
--- a/drivers/net/ethernet/intel/ice/ice_repr.c
+++ b/drivers/net/ethernet/intel/ice/ice_repr.c
@@ -285,9 +285,7 @@ ice_repr_reg_netdev(struct net_device *netdev)
static void ice_repr_remove_node(struct devlink_port *devlink_port)
{
- devl_lock(devlink_port->devlink);
devl_rate_leaf_destroy(devlink_port);
- devl_unlock(devlink_port->devlink);
}
/**
@@ -308,6 +306,7 @@ static void ice_repr_rem(struct ice_repr *repr)
void ice_repr_rem_vf(struct ice_repr *repr)
{
ice_repr_remove_node(&repr->vf->devlink_port);
+ ice_eswitch_decfg_vsi(repr->src_vsi, repr->parent_mac);
unregister_netdev(repr->netdev);
ice_devlink_destroy_vf_port(repr->vf);
ice_virtchnl_set_dflt_ops(repr->vf);
@@ -403,11 +402,17 @@ struct ice_repr *ice_repr_add_vf(struct ice_vf *vf)
if (err)
goto err_netdev;
+ err = ice_eswitch_cfg_vsi(repr->src_vsi, repr->parent_mac);
+ if (err)
+ goto err_cfg_vsi;
+
ice_virtchnl_set_repr_ops(vf);
ice_repr_set_tx_topology(vf->pf);
return repr;
+err_cfg_vsi:
+ unregister_netdev(repr->netdev);
err_netdev:
ice_repr_rem(repr);
err_repr_add:
@@ -415,12 +420,9 @@ err_repr_add:
return ERR_PTR(err);
}
-struct ice_repr *ice_repr_get_by_vsi(struct ice_vsi *vsi)
+struct ice_repr *ice_repr_get(struct ice_pf *pf, u32 id)
{
- if (!vsi->vf)
- return NULL;
-
- return xa_load(&vsi->back->eswitch.reprs, vsi->vf->repr_id);
+ return xa_load(&pf->eswitch.reprs, id);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_repr.h b/drivers/net/ethernet/intel/ice/ice_repr.h
index cff730b15ca0..488661b2900b 100644
--- a/drivers/net/ethernet/intel/ice/ice_repr.h
+++ b/drivers/net/ethernet/intel/ice/ice_repr.h
@@ -35,9 +35,8 @@ void ice_repr_stop_tx_queues(struct ice_repr *repr);
struct ice_repr *ice_netdev_to_repr(const struct net_device *netdev);
bool ice_is_port_repr_netdev(const struct net_device *netdev);
-struct ice_repr *ice_repr_get_by_vsi(struct ice_vsi *vsi);
-
void ice_repr_inc_tx_stats(struct ice_repr *repr, unsigned int len,
int xmit_status);
void ice_repr_inc_rx_stats(struct net_device *netdev, unsigned int len);
+struct ice_repr *ice_repr_get(struct ice_pf *pf, u32 id);
#endif
diff --git a/drivers/net/ethernet/intel/ice/ice_sbq_cmd.h b/drivers/net/ethernet/intel/ice/ice_sbq_cmd.h
index ead75fe2bcda..3b0054faf70c 100644
--- a/drivers/net/ethernet/intel/ice/ice_sbq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_sbq_cmd.h
@@ -47,10 +47,12 @@ struct ice_sbq_evt_desc {
};
enum ice_sbq_msg_dev {
- rmn_0 = 0x02,
- rmn_1 = 0x03,
- rmn_2 = 0x04,
- cgu = 0x06
+ eth56g_phy_0 = 0x02,
+ rmn_0 = 0x02,
+ rmn_1 = 0x03,
+ rmn_2 = 0x04,
+ cgu = 0x06,
+ eth56g_phy_1 = 0x0D,
};
enum ice_sbq_msg_opcode {
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c
index 067712f4923f..55ef33208456 100644
--- a/drivers/net/ethernet/intel/ice/ice_sriov.c
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.c
@@ -1416,21 +1416,23 @@ out_put_vf:
}
/**
- * ice_set_vf_mac
- * @netdev: network interface device structure
+ * __ice_set_vf_mac - program VF MAC address
+ * @pf: PF to be configure
* @vf_id: VF identifier
* @mac: MAC address
*
* program VF MAC address
+ * Return: zero on success or an error code on failure
*/
-int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
+int __ice_set_vf_mac(struct ice_pf *pf, u16 vf_id, const u8 *mac)
{
- struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct device *dev;
struct ice_vf *vf;
int ret;
+ dev = ice_pf_to_dev(pf);
if (is_multicast_ether_addr(mac)) {
- netdev_err(netdev, "%pM not a valid unicast address\n", mac);
+ dev_err(dev, "%pM not a valid unicast address\n", mac);
return -EINVAL;
}
@@ -1459,13 +1461,13 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
if (is_zero_ether_addr(mac)) {
/* VF will send VIRTCHNL_OP_ADD_ETH_ADDR message with its MAC */
vf->pf_set_mac = false;
- netdev_info(netdev, "Removing MAC on VF %d. VF driver will be reinitialized\n",
- vf->vf_id);
+ dev_info(dev, "Removing MAC on VF %d. VF driver will be reinitialized\n",
+ vf->vf_id);
} else {
/* PF will add MAC rule for the VF */
vf->pf_set_mac = true;
- netdev_info(netdev, "Setting MAC %pM on VF %d. VF driver will be reinitialized\n",
- mac, vf_id);
+ dev_info(dev, "Setting MAC %pM on VF %d. VF driver will be reinitialized\n",
+ mac, vf_id);
}
ice_reset_vf(vf, ICE_VF_RESET_NOTIFY);
@@ -1477,6 +1479,20 @@ out_put_vf:
}
/**
+ * ice_set_vf_mac - .ndo_set_vf_mac handler
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @mac: MAC address
+ *
+ * program VF MAC address
+ * Return: zero on success or an error code on failure
+ */
+int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
+{
+ return __ice_set_vf_mac(ice_netdev_to_pf(netdev), vf_id, mac);
+}
+
+/**
* ice_set_vf_trust
* @netdev: network interface device structure
* @vf_id: VF identifier
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.h b/drivers/net/ethernet/intel/ice/ice_sriov.h
index 8f22313474d6..96549ca5c52c 100644
--- a/drivers/net/ethernet/intel/ice/ice_sriov.h
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.h
@@ -28,6 +28,7 @@
#ifdef CONFIG_PCI_IOV
void ice_process_vflr_event(struct ice_pf *pf);
int ice_sriov_configure(struct pci_dev *pdev, int num_vfs);
+int __ice_set_vf_mac(struct ice_pf *pf, u16 vf_id, const u8 *mac);
int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac);
int
ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi);
@@ -81,6 +82,13 @@ ice_sriov_configure(struct pci_dev __always_unused *pdev,
}
static inline int
+__ice_set_vf_mac(struct ice_pf __always_unused *pf,
+ u16 __always_unused vf_id, const u8 __always_unused *mac)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
ice_set_vf_mac(struct net_device __always_unused *netdev,
int __always_unused vf_id, u8 __always_unused *mac)
{
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 94d6670d0901..3caafcdc301f 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -3,6 +3,7 @@
#include "ice_lib.h"
#include "ice_switch.h"
+#include "ice_trace.h"
#define ICE_ETH_DA_OFFSET 0
#define ICE_ETH_ETHTYPE_OFFSET 12
@@ -1471,7 +1472,6 @@ int ice_init_def_sw_recp(struct ice_hw *hw)
recps[i].root_rid = i;
INIT_LIST_HEAD(&recps[i].filt_rules);
INIT_LIST_HEAD(&recps[i].filt_replay_rules);
- INIT_LIST_HEAD(&recps[i].rg_list);
mutex_init(&recps[i].filt_rule_lock);
}
@@ -1899,7 +1899,8 @@ ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
lkup_type == ICE_SW_LKUP_PROMISC ||
lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
- lkup_type == ICE_SW_LKUP_DFLT) {
+ lkup_type == ICE_SW_LKUP_DFLT ||
+ lkup_type == ICE_SW_LKUP_LAST) {
sw_buf->res_type = cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
} else if (lkup_type == ICE_SW_LKUP_VLAN) {
if (opc == ice_aqc_opc_alloc_res)
@@ -1961,6 +1962,15 @@ ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT)
status = -ENOENT;
+ if (!status) {
+ if (opc == ice_aqc_opc_add_sw_rules)
+ hw->switch_info->rule_cnt += num_rules;
+ else if (opc == ice_aqc_opc_remove_sw_rules)
+ hw->switch_info->rule_cnt -= num_rules;
+ }
+
+ trace_ice_aq_sw_rules(hw->switch_info);
+
return status;
}
@@ -2181,8 +2191,10 @@ int ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
sw_buf->res_type = cpu_to_le16(res_type);
status = ice_aq_alloc_free_res(hw, sw_buf, buf_len,
ice_aqc_opc_alloc_res);
- if (!status)
+ if (!status) {
*rid = le16_to_cpu(sw_buf->elem[0].e.sw_resp);
+ hw->switch_info->recp_cnt++;
+ }
return status;
}
@@ -2196,7 +2208,13 @@ int ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
*/
static int ice_free_recipe_res(struct ice_hw *hw, u16 rid)
{
- return ice_free_hw_res(hw, ICE_AQC_RES_TYPE_RECIPE, 1, &rid);
+ int status;
+
+ status = ice_free_hw_res(hw, ICE_AQC_RES_TYPE_RECIPE, 1, &rid);
+ if (!status)
+ hw->switch_info->recp_cnt--;
+
+ return status;
}
/**
@@ -2281,20 +2299,6 @@ static void ice_get_recp_to_prof_map(struct ice_hw *hw)
}
/**
- * ice_collect_result_idx - copy result index values
- * @buf: buffer that contains the result index
- * @recp: the recipe struct to copy data into
- */
-static void
-ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
- struct ice_sw_recipe *recp)
-{
- if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
- set_bit(buf->content.result_indx & ~ICE_AQ_RECIPE_RESULT_EN,
- recp->res_idxs);
-}
-
-/**
* ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
* @hw: pointer to hardware structure
* @recps: struct that we need to populate
@@ -2352,18 +2356,10 @@ ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
- struct ice_recp_grp_entry *rg_entry;
u8 i, prof, idx, prot = 0;
bool is_root;
u16 off = 0;
- rg_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rg_entry),
- GFP_KERNEL);
- if (!rg_entry) {
- status = -ENOMEM;
- goto err_unroll;
- }
-
idx = root_bufs.recipe_indx;
is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT;
@@ -2376,11 +2372,8 @@ ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
prof = find_first_bit(recipe_to_profile[idx],
ICE_MAX_NUM_PROFILES);
for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
- u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
-
- rg_entry->fv_idx[i] = lkup_indx;
- rg_entry->fv_mask[i] =
- le16_to_cpu(root_bufs.content.mask[i + 1]);
+ u8 lkup_indx = root_bufs.content.lkup_indx[i];
+ u16 lkup_mask = le16_to_cpu(root_bufs.content.mask[i]);
/* If the recipe is a chained recipe then all its
* child recipe's result will have a result index.
@@ -2391,26 +2384,21 @@ ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
* has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
* valid offset value.
*/
- if (test_bit(rg_entry->fv_idx[i], hw->switch_info->prof_res_bm[prof]) ||
- rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE ||
- rg_entry->fv_idx[i] == 0)
+ if (!lkup_indx ||
+ (lkup_indx & ICE_AQ_RECIPE_LKUP_IGNORE) ||
+ test_bit(lkup_indx,
+ hw->switch_info->prof_res_bm[prof]))
continue;
- ice_find_prot_off(hw, ICE_BLK_SW, prof,
- rg_entry->fv_idx[i], &prot, &off);
+ ice_find_prot_off(hw, ICE_BLK_SW, prof, lkup_indx,
+ &prot, &off);
lkup_exts->fv_words[fv_word_idx].prot_id = prot;
lkup_exts->fv_words[fv_word_idx].off = off;
- lkup_exts->field_mask[fv_word_idx] =
- rg_entry->fv_mask[i];
+ lkup_exts->field_mask[fv_word_idx] = lkup_mask;
fv_word_idx++;
}
- /* populate rg_list with the data from the child entry of this
- * recipe
- */
- list_add(&rg_entry->l_entry, &recps[rid].rg_list);
/* Propagate some data to the recipe database */
- recps[idx].is_root = !!is_root;
recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
recps[idx].need_pass_l2 = root_bufs.content.act_ctrl &
ICE_AQ_RECIPE_ACT_NEED_PASS_L2;
@@ -2418,11 +2406,8 @@ ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
ICE_AQ_RECIPE_ACT_ALLOW_PASS_L2;
bitmap_zero(recps[idx].res_idxs, ICE_MAX_FV_WORDS);
if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) {
- recps[idx].chain_idx = root_bufs.content.result_indx &
- ~ICE_AQ_RECIPE_RESULT_EN;
- set_bit(recps[idx].chain_idx, recps[idx].res_idxs);
- } else {
- recps[idx].chain_idx = ICE_INVAL_CHAIN_IND;
+ set_bit(root_bufs.content.result_indx &
+ ~ICE_AQ_RECIPE_RESULT_EN, recps[idx].res_idxs);
}
if (!is_root) {
@@ -2442,15 +2427,6 @@ ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
/* Complete initialization of the root recipe entry */
lkup_exts->n_val_words = fv_word_idx;
- recps[rid].big_recp = (num_recps > 1);
- recps[rid].n_grp_count = (u8)num_recps;
- recps[rid].root_buf = devm_kmemdup(ice_hw_to_dev(hw), tmp,
- recps[rid].n_grp_count * sizeof(*recps[rid].root_buf),
- GFP_KERNEL);
- if (!recps[rid].root_buf) {
- status = -ENOMEM;
- goto err_unroll;
- }
/* Copy result indexes */
bitmap_copy(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS);
@@ -2922,7 +2898,8 @@ ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
lkup_type == ICE_SW_LKUP_PROMISC ||
lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
- lkup_type == ICE_SW_LKUP_DFLT)
+ lkup_type == ICE_SW_LKUP_DFLT ||
+ lkup_type == ICE_SW_LKUP_LAST)
rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
ICE_AQC_SW_RULES_T_VSI_LIST_SET;
else if (lkup_type == ICE_SW_LKUP_VLAN)
@@ -4766,11 +4743,6 @@ ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
continue;
}
- /* Skip inverse action recipes */
- if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl &
- ICE_AQ_RECIPE_ACT_INV_ACT)
- continue;
-
/* if number of words we are looking for match */
if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
struct ice_fv_word *ar = recp[i].lkup_exts.fv_words;
@@ -4895,110 +4867,55 @@ ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
}
/**
- * ice_create_first_fit_recp_def - Create a recipe grouping
- * @hw: pointer to the hardware structure
- * @lkup_exts: an array of protocol header extractions
- * @rg_list: pointer to a list that stores new recipe groups
- * @recp_cnt: pointer to a variable that stores returned number of recipe groups
- *
- * Using first fit algorithm, take all the words that are still not done
- * and start grouping them in 4-word groups. Each group makes up one
- * recipe.
- */
-static int
-ice_create_first_fit_recp_def(struct ice_hw *hw,
- struct ice_prot_lkup_ext *lkup_exts,
- struct list_head *rg_list,
- u8 *recp_cnt)
-{
- struct ice_pref_recipe_group *grp = NULL;
- u8 j;
-
- *recp_cnt = 0;
-
- /* Walk through every word in the rule to check if it is not done. If so
- * then this word needs to be part of a new recipe.
- */
- for (j = 0; j < lkup_exts->n_val_words; j++)
- if (!test_bit(j, lkup_exts->done)) {
- if (!grp ||
- grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
- struct ice_recp_grp_entry *entry;
-
- entry = devm_kzalloc(ice_hw_to_dev(hw),
- sizeof(*entry),
- GFP_KERNEL);
- if (!entry)
- return -ENOMEM;
- list_add(&entry->l_entry, rg_list);
- grp = &entry->r_group;
- (*recp_cnt)++;
- }
-
- grp->pairs[grp->n_val_pairs].prot_id =
- lkup_exts->fv_words[j].prot_id;
- grp->pairs[grp->n_val_pairs].off =
- lkup_exts->fv_words[j].off;
- grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
- grp->n_val_pairs++;
- }
-
- return 0;
-}
-
-/**
* ice_fill_fv_word_index - fill in the field vector indices for a recipe group
* @hw: pointer to the hardware structure
- * @fv_list: field vector with the extraction sequence information
- * @rg_list: recipe groupings with protocol-offset pairs
+ * @rm: recipe management list entry
*
* Helper function to fill in the field vector indices for protocol-offset
* pairs. These indexes are then ultimately programmed into a recipe.
*/
static int
-ice_fill_fv_word_index(struct ice_hw *hw, struct list_head *fv_list,
- struct list_head *rg_list)
+ice_fill_fv_word_index(struct ice_hw *hw, struct ice_sw_recipe *rm)
{
struct ice_sw_fv_list_entry *fv;
- struct ice_recp_grp_entry *rg;
struct ice_fv_word *fv_ext;
+ u8 i;
- if (list_empty(fv_list))
- return 0;
+ if (list_empty(&rm->fv_list))
+ return -EINVAL;
- fv = list_first_entry(fv_list, struct ice_sw_fv_list_entry,
+ fv = list_first_entry(&rm->fv_list, struct ice_sw_fv_list_entry,
list_entry);
fv_ext = fv->fv_ptr->ew;
- list_for_each_entry(rg, rg_list, l_entry) {
- u8 i;
-
- for (i = 0; i < rg->r_group.n_val_pairs; i++) {
- struct ice_fv_word *pr;
- bool found = false;
- u16 mask;
- u8 j;
+ /* Add switch id as the first word. */
+ rm->fv_idx[0] = ICE_AQ_SW_ID_LKUP_IDX;
+ rm->fv_mask[0] = ICE_AQ_SW_ID_LKUP_MASK;
+ rm->n_ext_words++;
- pr = &rg->r_group.pairs[i];
- mask = rg->r_group.mask[i];
-
- for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
- if (fv_ext[j].prot_id == pr->prot_id &&
- fv_ext[j].off == pr->off) {
- found = true;
+ for (i = 1; i < rm->n_ext_words; i++) {
+ struct ice_fv_word *fv_word = &rm->ext_words[i - 1];
+ u16 fv_mask = rm->word_masks[i - 1];
+ bool found = false;
+ u8 j;
- /* Store index of field vector */
- rg->fv_idx[i] = j;
- rg->fv_mask[i] = mask;
- break;
- }
+ for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++) {
+ if (fv_ext[j].prot_id == fv_word->prot_id &&
+ fv_ext[j].off == fv_word->off) {
+ found = true;
- /* Protocol/offset could not be found, caller gave an
- * invalid pair
- */
- if (!found)
- return -EINVAL;
+ /* Store index of field vector */
+ rm->fv_idx[i] = j;
+ rm->fv_mask[i] = fv_mask;
+ break;
+ }
}
+
+ /* Protocol/offset could not be found, caller gave an invalid
+ * pair.
+ */
+ if (!found)
+ return -EINVAL;
}
return 0;
@@ -5072,335 +4989,223 @@ ice_find_free_recp_res_idx(struct ice_hw *hw, const unsigned long *profiles,
}
/**
- * ice_add_sw_recipe - function to call AQ calls to create switch recipe
- * @hw: pointer to hardware structure
- * @rm: recipe management list entry
- * @profiles: bitmap of profiles that will be associated.
+ * ice_calc_recp_cnt - calculate number of recipes based on word count
+ * @word_cnt: number of lookup words
+ *
+ * Word count should include switch ID word and regular lookup words.
+ * Returns: number of recipes required to fit @word_cnt, including extra recipes
+ * needed for recipe chaining (if needed).
*/
-static int
-ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
- unsigned long *profiles)
+static int ice_calc_recp_cnt(u8 word_cnt)
{
- DECLARE_BITMAP(result_idx_bm, ICE_MAX_FV_WORDS);
- struct ice_aqc_recipe_content *content;
- struct ice_aqc_recipe_data_elem *tmp;
- struct ice_aqc_recipe_data_elem *buf;
- struct ice_recp_grp_entry *entry;
- u16 free_res_idx;
- u16 recipe_count;
- u8 chain_idx;
- u8 recps = 0;
- int status;
+ /* All words fit in a single recipe, no need for chaining. */
+ if (word_cnt <= ICE_NUM_WORDS_RECIPE)
+ return 1;
- /* When more than one recipe are required, another recipe is needed to
- * chain them together. Matching a tunnel metadata ID takes up one of
- * the match fields in the chaining recipe reducing the number of
- * chained recipes by one.
+ /* Recipe chaining required. Result indexes are fitted right after
+ * regular lookup words. In some cases a new recipe must be added in
+ * order to fit result indexes.
+ *
+ * While the word count increases, every 5 words an extra recipe needs
+ * to be added. However, by adding a recipe, one word for its result
+ * index must also be added, therefore every 4 words recipe count
+ * increases by 1. This calculation does not apply to word count == 1,
+ * which is handled above.
*/
- /* check number of free result indices */
- bitmap_zero(result_idx_bm, ICE_MAX_FV_WORDS);
- free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
+ return (word_cnt + 2) / (ICE_NUM_WORDS_RECIPE - 1);
+}
- ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n",
- free_res_idx, rm->n_grp_count);
+static void fill_recipe_template(struct ice_aqc_recipe_data_elem *recp, u16 rid,
+ const struct ice_sw_recipe *rm)
+{
+ int i;
- if (rm->n_grp_count > 1) {
- if (rm->n_grp_count > free_res_idx)
- return -ENOSPC;
+ recp->recipe_indx = rid;
+ recp->content.act_ctrl |= ICE_AQ_RECIPE_ACT_PRUNE_INDX_M;
- rm->n_grp_count++;
+ for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
+ recp->content.lkup_indx[i] = ICE_AQ_RECIPE_LKUP_IGNORE;
+ recp->content.mask[i] = cpu_to_le16(0);
}
- if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE)
- return -ENOSPC;
+ set_bit(rid, (unsigned long *)recp->recipe_bitmap);
+ recp->content.act_ctrl_fwd_priority = rm->priority;
- tmp = kcalloc(ICE_MAX_NUM_RECIPES, sizeof(*tmp), GFP_KERNEL);
- if (!tmp)
- return -ENOMEM;
-
- buf = devm_kcalloc(ice_hw_to_dev(hw), rm->n_grp_count, sizeof(*buf),
- GFP_KERNEL);
- if (!buf) {
- status = -ENOMEM;
- goto err_mem;
- }
+ if (rm->need_pass_l2)
+ recp->content.act_ctrl |= ICE_AQ_RECIPE_ACT_NEED_PASS_L2;
- bitmap_zero(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
- recipe_count = ICE_MAX_NUM_RECIPES;
- status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
- NULL);
- if (status || recipe_count == 0)
- goto err_unroll;
+ if (rm->allow_pass_l2)
+ recp->content.act_ctrl |= ICE_AQ_RECIPE_ACT_ALLOW_PASS_L2;
+}
- /* Allocate the recipe resources, and configure them according to the
- * match fields from protocol headers and extracted field vectors.
- */
- chain_idx = find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
- list_for_each_entry(entry, &rm->rg_list, l_entry) {
- u8 i;
+static void bookkeep_recipe(struct ice_sw_recipe *recipe,
+ struct ice_aqc_recipe_data_elem *r,
+ const struct ice_sw_recipe *rm)
+{
+ memcpy(recipe->r_bitmap, r->recipe_bitmap, sizeof(recipe->r_bitmap));
- status = ice_alloc_recipe(hw, &entry->rid);
- if (status)
- goto err_unroll;
+ recipe->priority = r->content.act_ctrl_fwd_priority;
+ recipe->tun_type = rm->tun_type;
+ recipe->need_pass_l2 = rm->need_pass_l2;
+ recipe->allow_pass_l2 = rm->allow_pass_l2;
+ recipe->recp_created = true;
+}
- content = &buf[recps].content;
+/* For memcpy in ice_add_sw_recipe. */
+static_assert(sizeof_field(struct ice_aqc_recipe_data_elem, recipe_bitmap) ==
+ sizeof_field(struct ice_sw_recipe, r_bitmap));
- /* Clear the result index of the located recipe, as this will be
- * updated, if needed, later in the recipe creation process.
- */
- tmp[0].content.result_indx = 0;
+/**
+ * ice_add_sw_recipe - function to call AQ calls to create switch recipe
+ * @hw: pointer to hardware structure
+ * @rm: recipe management list entry
+ * @profiles: bitmap of profiles that will be associated.
+ */
+static int
+ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
+ unsigned long *profiles)
+{
+ struct ice_aqc_recipe_data_elem *buf __free(kfree) = NULL;
+ DECLARE_BITMAP(result_idx_bm, ICE_MAX_FV_WORDS);
+ struct ice_aqc_recipe_data_elem *root;
+ struct ice_sw_recipe *recipe;
+ u16 free_res_idx, rid;
+ int lookup = 0;
+ int recp_cnt;
+ int status;
+ int word;
+ int i;
- buf[recps] = tmp[0];
- buf[recps].recipe_indx = (u8)entry->rid;
- /* if the recipe is a non-root recipe RID should be programmed
- * as 0 for the rules to be applied correctly.
- */
- content->rid = 0;
- memset(&content->lkup_indx, 0,
- sizeof(content->lkup_indx));
-
- /* All recipes use look-up index 0 to match switch ID. */
- content->lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
- content->mask[0] = cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK);
- /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
- * to be 0
- */
- for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
- content->lkup_indx[i] = 0x80;
- content->mask[i] = 0;
- }
+ recp_cnt = ice_calc_recp_cnt(rm->n_ext_words);
- for (i = 0; i < entry->r_group.n_val_pairs; i++) {
- content->lkup_indx[i + 1] = entry->fv_idx[i];
- content->mask[i + 1] = cpu_to_le16(entry->fv_mask[i]);
- }
+ bitmap_zero(result_idx_bm, ICE_MAX_FV_WORDS);
+ bitmap_zero(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
- if (rm->n_grp_count > 1) {
- /* Checks to see if there really is a valid result index
- * that can be used.
- */
- if (chain_idx >= ICE_MAX_FV_WORDS) {
- ice_debug(hw, ICE_DBG_SW, "No chain index available\n");
- status = -ENOSPC;
- goto err_unroll;
- }
+ /* Check number of free result indices */
+ free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
- entry->chain_idx = chain_idx;
- content->result_indx =
- ICE_AQ_RECIPE_RESULT_EN |
- FIELD_PREP(ICE_AQ_RECIPE_RESULT_DATA_M,
- chain_idx);
- clear_bit(chain_idx, result_idx_bm);
- chain_idx = find_first_bit(result_idx_bm,
- ICE_MAX_FV_WORDS);
- }
+ ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n",
+ free_res_idx, recp_cnt);
- /* fill recipe dependencies */
- bitmap_zero((unsigned long *)buf[recps].recipe_bitmap,
- ICE_MAX_NUM_RECIPES);
- set_bit(buf[recps].recipe_indx,
- (unsigned long *)buf[recps].recipe_bitmap);
- content->act_ctrl_fwd_priority = rm->priority;
+ /* Last recipe doesn't need result index */
+ if (recp_cnt - 1 > free_res_idx)
+ return -ENOSPC;
- if (rm->need_pass_l2)
- content->act_ctrl |= ICE_AQ_RECIPE_ACT_NEED_PASS_L2;
+ if (recp_cnt > ICE_MAX_CHAIN_RECIPE_RES)
+ return -E2BIG;
- if (rm->allow_pass_l2)
- content->act_ctrl |= ICE_AQ_RECIPE_ACT_ALLOW_PASS_L2;
- recps++;
- }
+ buf = kcalloc(recp_cnt, sizeof(*buf), GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
- if (rm->n_grp_count == 1) {
- rm->root_rid = buf[0].recipe_indx;
- set_bit(buf[0].recipe_indx, rm->r_bitmap);
- buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
- if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
- memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
- sizeof(buf[0].recipe_bitmap));
- } else {
- status = -EINVAL;
- goto err_unroll;
- }
- /* Applicable only for ROOT_RECIPE, set the fwd_priority for
- * the recipe which is getting created if specified
- * by user. Usually any advanced switch filter, which results
- * into new extraction sequence, ended up creating a new recipe
- * of type ROOT and usually recipes are associated with profiles
- * Switch rule referreing newly created recipe, needs to have
- * either/or 'fwd' or 'join' priority, otherwise switch rule
- * evaluation will not happen correctly. In other words, if
- * switch rule to be evaluated on priority basis, then recipe
- * needs to have priority, otherwise it will be evaluated last.
- */
- buf[0].content.act_ctrl_fwd_priority = rm->priority;
- } else {
- struct ice_recp_grp_entry *last_chain_entry;
- u16 rid, i;
+ /* Setup the non-root subrecipes. These do not contain lookups for other
+ * subrecipes results. Set associated recipe only to own recipe index.
+ * Each non-root subrecipe needs a free result index from FV.
+ *
+ * Note: only done if there is more than one recipe.
+ */
+ for (i = 0; i < recp_cnt - 1; i++) {
+ struct ice_aqc_recipe_content *content;
+ u8 result_idx;
- /* Allocate the last recipe that will chain the outcomes of the
- * other recipes together
- */
status = ice_alloc_recipe(hw, &rid);
if (status)
- goto err_unroll;
+ return status;
- content = &buf[recps].content;
+ fill_recipe_template(&buf[i], rid, rm);
- buf[recps].recipe_indx = (u8)rid;
- content->rid = (u8)rid;
- content->rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
- /* the new entry created should also be part of rg_list to
- * make sure we have complete recipe
+ result_idx = find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
+ /* Check if there really is a valid result index that can be
+ * used.
*/
- last_chain_entry = devm_kzalloc(ice_hw_to_dev(hw),
- sizeof(*last_chain_entry),
- GFP_KERNEL);
- if (!last_chain_entry) {
- status = -ENOMEM;
- goto err_unroll;
- }
- last_chain_entry->rid = rid;
- memset(&content->lkup_indx, 0, sizeof(content->lkup_indx));
- /* All recipes use look-up index 0 to match switch ID. */
- content->lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
- content->mask[0] = cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK);
- for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
- content->lkup_indx[i] = ICE_AQ_RECIPE_LKUP_IGNORE;
- content->mask[i] = 0;
+ if (result_idx >= ICE_MAX_FV_WORDS) {
+ ice_debug(hw, ICE_DBG_SW, "No chain index available\n");
+ return -ENOSPC;
}
+ clear_bit(result_idx, result_idx_bm);
- i = 1;
- /* update r_bitmap with the recp that is used for chaining */
+ content = &buf[i].content;
+ content->result_indx = ICE_AQ_RECIPE_RESULT_EN |
+ FIELD_PREP(ICE_AQ_RECIPE_RESULT_DATA_M,
+ result_idx);
+
+ /* Set recipe association to be used for root recipe */
set_bit(rid, rm->r_bitmap);
- /* this is the recipe that chains all the other recipes so it
- * should not have a chaining ID to indicate the same
- */
- last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
- list_for_each_entry(entry, &rm->rg_list, l_entry) {
- last_chain_entry->fv_idx[i] = entry->chain_idx;
- content->lkup_indx[i] = entry->chain_idx;
- content->mask[i++] = cpu_to_le16(0xFFFF);
- set_bit(entry->rid, rm->r_bitmap);
- }
- list_add(&last_chain_entry->l_entry, &rm->rg_list);
- if (sizeof(buf[recps].recipe_bitmap) >=
- sizeof(rm->r_bitmap)) {
- memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
- sizeof(buf[recps].recipe_bitmap));
- } else {
- status = -EINVAL;
- goto err_unroll;
+
+ word = 0;
+ while (lookup < rm->n_ext_words &&
+ word < ICE_NUM_WORDS_RECIPE) {
+ content->lkup_indx[word] = rm->fv_idx[lookup];
+ content->mask[word] = cpu_to_le16(rm->fv_mask[lookup]);
+
+ lookup++;
+ word++;
}
- content->act_ctrl_fwd_priority = rm->priority;
- recps++;
- rm->root_rid = (u8)rid;
+ recipe = &hw->switch_info->recp_list[rid];
+ set_bit(result_idx, recipe->res_idxs);
+ bookkeep_recipe(recipe, &buf[i], rm);
}
- status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
- if (status)
- goto err_unroll;
- status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
- ice_release_change_lock(hw);
+ /* Setup the root recipe */
+ status = ice_alloc_recipe(hw, &rid);
if (status)
- goto err_unroll;
-
- /* Every recipe that just got created add it to the recipe
- * book keeping list
- */
- list_for_each_entry(entry, &rm->rg_list, l_entry) {
- struct ice_switch_info *sw = hw->switch_info;
- bool is_root, idx_found = false;
- struct ice_sw_recipe *recp;
- u16 idx, buf_idx = 0;
-
- /* find buffer index for copying some data */
- for (idx = 0; idx < rm->n_grp_count; idx++)
- if (buf[idx].recipe_indx == entry->rid) {
- buf_idx = idx;
- idx_found = true;
- }
+ return status;
- if (!idx_found) {
- status = -EIO;
- goto err_unroll;
- }
+ recipe = &hw->switch_info->recp_list[rid];
+ root = &buf[recp_cnt - 1];
+ fill_recipe_template(root, rid, rm);
- recp = &sw->recp_list[entry->rid];
- is_root = (rm->root_rid == entry->rid);
- recp->is_root = is_root;
+ /* Set recipe association, use previously set bitmap and own rid */
+ set_bit(rid, rm->r_bitmap);
+ memcpy(root->recipe_bitmap, rm->r_bitmap, sizeof(root->recipe_bitmap));
- recp->root_rid = entry->rid;
- recp->big_recp = (is_root && rm->n_grp_count > 1);
+ /* For non-root recipes rid should be 0, for root it should be correct
+ * rid value ored with 0x80 (is root bit).
+ */
+ root->content.rid = rid | ICE_AQ_RECIPE_ID_IS_ROOT;
- memcpy(&recp->ext_words, entry->r_group.pairs,
- entry->r_group.n_val_pairs * sizeof(struct ice_fv_word));
+ /* Fill remaining lookups in root recipe */
+ word = 0;
+ while (lookup < rm->n_ext_words &&
+ word < ICE_NUM_WORDS_RECIPE /* should always be true */) {
+ root->content.lkup_indx[word] = rm->fv_idx[lookup];
+ root->content.mask[word] = cpu_to_le16(rm->fv_mask[lookup]);
- memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap,
- sizeof(recp->r_bitmap));
+ lookup++;
+ word++;
+ }
- /* Copy non-result fv index values and masks to recipe. This
- * call will also update the result recipe bitmask.
+ /* Fill result indexes as lookups */
+ i = 0;
+ while (i < recp_cnt - 1 &&
+ word < ICE_NUM_WORDS_RECIPE /* should always be true */) {
+ root->content.lkup_indx[word] = buf[i].content.result_indx &
+ ~ICE_AQ_RECIPE_RESULT_EN;
+ root->content.mask[word] = cpu_to_le16(0xffff);
+ /* For bookkeeping, it is needed to mark FV index as used for
+ * intermediate result.
*/
- ice_collect_result_idx(&buf[buf_idx], recp);
+ set_bit(root->content.lkup_indx[word], recipe->res_idxs);
- /* for non-root recipes, also copy to the root, this allows
- * easier matching of a complete chained recipe
- */
- if (!is_root)
- ice_collect_result_idx(&buf[buf_idx],
- &sw->recp_list[rm->root_rid]);
-
- recp->n_ext_words = entry->r_group.n_val_pairs;
- recp->chain_idx = entry->chain_idx;
- recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
- recp->n_grp_count = rm->n_grp_count;
- recp->tun_type = rm->tun_type;
- recp->need_pass_l2 = rm->need_pass_l2;
- recp->allow_pass_l2 = rm->allow_pass_l2;
- recp->recp_created = true;
+ i++;
+ word++;
}
- rm->root_buf = buf;
- kfree(tmp);
- return status;
-err_unroll:
-err_mem:
- kfree(tmp);
- devm_kfree(ice_hw_to_dev(hw), buf);
- return status;
-}
+ rm->root_rid = rid;
+ bookkeep_recipe(&hw->switch_info->recp_list[rid], root, rm);
-/**
- * ice_create_recipe_group - creates recipe group
- * @hw: pointer to hardware structure
- * @rm: recipe management list entry
- * @lkup_exts: lookup elements
- */
-static int
-ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
- struct ice_prot_lkup_ext *lkup_exts)
-{
- u8 recp_count = 0;
- int status;
-
- rm->n_grp_count = 0;
+ /* Program the recipe */
+ status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
+ if (status)
+ return status;
- /* Create recipes for words that are marked not done by packing them
- * as best fit.
- */
- status = ice_create_first_fit_recp_def(hw, lkup_exts,
- &rm->rg_list, &recp_count);
- if (!status) {
- rm->n_grp_count += recp_count;
- rm->n_ext_words = lkup_exts->n_val_words;
- memcpy(&rm->ext_words, lkup_exts->fv_words,
- sizeof(rm->ext_words));
- memcpy(rm->word_masks, lkup_exts->field_mask,
- sizeof(rm->word_masks));
- }
+ status = ice_aq_add_recipe(hw, buf, recp_cnt, NULL);
+ ice_release_change_lock(hw);
+ if (status)
+ return status;
- return status;
+ return 0;
}
/* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule
@@ -5507,9 +5312,7 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
DECLARE_BITMAP(fv_bitmap, ICE_MAX_NUM_PROFILES);
DECLARE_BITMAP(profiles, ICE_MAX_NUM_PROFILES);
struct ice_prot_lkup_ext *lkup_exts;
- struct ice_recp_grp_entry *r_entry;
struct ice_sw_fv_list_entry *fvit;
- struct ice_recp_grp_entry *r_tmp;
struct ice_sw_fv_list_entry *tmp;
struct ice_sw_recipe *rm;
int status = 0;
@@ -5551,7 +5354,6 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
* headers being programmed.
*/
INIT_LIST_HEAD(&rm->fv_list);
- INIT_LIST_HEAD(&rm->rg_list);
/* Get bitmap of field vectors (profiles) that are compatible with the
* rule request; only these will be searched in the subsequent call to
@@ -5563,12 +5365,10 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
if (status)
goto err_unroll;
- /* Group match words into recipes using preferred recipe grouping
- * criteria.
- */
- status = ice_create_recipe_group(hw, rm, lkup_exts);
- if (status)
- goto err_unroll;
+ /* Copy FV words and masks from lkup_exts to recipe struct. */
+ rm->n_ext_words = lkup_exts->n_val_words;
+ memcpy(rm->ext_words, lkup_exts->fv_words, sizeof(rm->ext_words));
+ memcpy(rm->word_masks, lkup_exts->field_mask, sizeof(rm->word_masks));
/* set the recipe priority if specified */
rm->priority = (u8)rinfo->priority;
@@ -5579,7 +5379,7 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
/* Find offsets from the field vector. Pick the first one for all the
* recipes.
*/
- status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
+ status = ice_fill_fv_word_index(hw, rm);
if (status)
goto err_unroll;
@@ -5657,17 +5457,11 @@ err_free_recipe:
}
err_unroll:
- list_for_each_entry_safe(r_entry, r_tmp, &rm->rg_list, l_entry) {
- list_del(&r_entry->l_entry);
- devm_kfree(ice_hw_to_dev(hw), r_entry);
- }
-
list_for_each_entry_safe(fvit, tmp, &rm->fv_list, list_entry) {
list_del(&fvit->list_entry);
devm_kfree(ice_hw_to_dev(hw), fvit);
}
- devm_kfree(ice_hw_to_dev(hw), rm->root_buf);
kfree(rm);
err_free_lkup_exts:
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h
index ad98e98c812d..671d7a5f359f 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.h
+++ b/drivers/net/ethernet/intel/ice/ice_switch.h
@@ -216,7 +216,6 @@ struct ice_sw_recipe {
/* For a chained recipe the root recipe is what should be used for
* programming rules
*/
- u8 is_root;
u8 root_rid;
u8 recp_created;
@@ -227,19 +226,8 @@ struct ice_sw_recipe {
*/
struct ice_fv_word ext_words[ICE_MAX_CHAIN_WORDS];
u16 word_masks[ICE_MAX_CHAIN_WORDS];
-
- /* if this recipe is a collection of other recipe */
- u8 big_recp;
-
- /* if this recipe is part of another bigger recipe then chain index
- * corresponding to this recipe
- */
- u8 chain_idx;
-
- /* if this recipe is a collection of other recipe then count of other
- * recipes and recipe IDs of those recipes
- */
- u8 n_grp_count;
+ u8 fv_idx[ICE_MAX_CHAIN_WORDS];
+ u16 fv_mask[ICE_MAX_CHAIN_WORDS];
/* Bit map specifying the IDs associated with this group of recipe */
DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES);
@@ -272,10 +260,6 @@ struct ice_sw_recipe {
u8 need_pass_l2:1;
u8 allow_pass_l2:1;
- struct list_head rg_list;
-
- /* AQ buffer associated with this recipe */
- struct ice_aqc_recipe_data_elem *root_buf;
/* This struct saves the fv_words for a given lookup */
struct ice_prot_lkup_ext lkup_exts;
};
diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
index 8bd24b33f3a6..e6923f8121a9 100644
--- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
@@ -1353,6 +1353,7 @@ ice_parse_tunnel_attr(struct net_device *dev, struct flow_rule *rule,
struct ice_tc_flower_fltr *fltr)
{
struct ice_tc_flower_lyr_2_4_hdrs *headers = &fltr->outer_headers;
+ struct netlink_ext_ack *extack = fltr->extack;
struct flow_match_control enc_control;
fltr->tunnel_type = ice_tc_tun_get_type(dev);
@@ -1373,6 +1374,9 @@ ice_parse_tunnel_attr(struct net_device *dev, struct flow_rule *rule,
flow_rule_match_enc_control(rule, &enc_control);
+ if (flow_rule_has_enc_control_flags(enc_control.mask->flags, extack))
+ return -EOPNOTSUPP;
+
if (enc_control.key->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
struct flow_match_ipv4_addrs match;
diff --git a/drivers/net/ethernet/intel/ice/ice_trace.h b/drivers/net/ethernet/intel/ice/ice_trace.h
index 244cddd2a9ea..07aab6e130cd 100644
--- a/drivers/net/ethernet/intel/ice/ice_trace.h
+++ b/drivers/net/ethernet/intel/ice/ice_trace.h
@@ -330,6 +330,24 @@ DEFINE_EVENT(ice_esw_br_port_template,
TP_ARGS(port)
);
+DECLARE_EVENT_CLASS(ice_switch_stats_template,
+ TP_PROTO(struct ice_switch_info *sw_info),
+ TP_ARGS(sw_info),
+ TP_STRUCT__entry(__field(u16, rule_cnt)
+ __field(u8, recp_cnt)),
+ TP_fast_assign(__entry->rule_cnt = sw_info->rule_cnt;
+ __entry->recp_cnt = sw_info->recp_cnt;),
+ TP_printk("rules=%u recipes=%u",
+ __entry->rule_cnt,
+ __entry->recp_cnt)
+);
+
+DEFINE_EVENT(ice_switch_stats_template,
+ ice_aq_sw_rules,
+ TP_PROTO(struct ice_switch_info *sw_info),
+ TP_ARGS(sw_info)
+);
+
/* End tracepoints */
#endif /* _ICE_TRACE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index f0796a93f428..96037bef3e78 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -71,6 +71,14 @@ enum ice_aq_res_ids {
ICE_GLOBAL_CFG_LOCK_RES_ID
};
+enum ice_fec_stats_types {
+ ICE_FEC_CORR_LOW,
+ ICE_FEC_CORR_HIGH,
+ ICE_FEC_UNCORR_LOW,
+ ICE_FEC_UNCORR_HIGH,
+ ICE_FEC_MAX
+};
+
/* FW update timeout definitions are in milliseconds */
#define ICE_NVM_TIMEOUT 180000
#define ICE_CHANGE_LOCK_TIMEOUT 1000
@@ -322,12 +330,14 @@ enum ice_time_ref_freq {
ICE_TIME_REF_FREQ_156_250 = 4,
ICE_TIME_REF_FREQ_245_760 = 5,
- NUM_ICE_TIME_REF_FREQ
+ NUM_ICE_TIME_REF_FREQ,
+
+ ICE_TIME_REF_FREQ_INVALID = -1,
};
/* Clock source specification */
enum ice_clk_src {
- ICE_CLK_SRC_TCX0 = 0, /* Temperature compensated oscillator */
+ ICE_CLK_SRC_TCXO = 0, /* Temperature compensated oscillator */
ICE_CLK_SRC_TIME_REF = 1, /* Use TIME_REF reference clock */
NUM_ICE_CLK_SRC
@@ -372,6 +382,15 @@ struct ice_ts_dev_info {
u8 ts_ll_int_read;
};
+#define ICE_NAC_TOPO_PRIMARY_M BIT(0)
+#define ICE_NAC_TOPO_DUAL_M BIT(1)
+#define ICE_NAC_TOPO_ID_M GENMASK(0xF, 0)
+
+struct ice_nac_topology {
+ u32 mode;
+ u8 id;
+};
+
/* Function specific capabilities */
struct ice_hw_func_caps {
struct ice_hw_common_caps common_cap;
@@ -393,6 +412,7 @@ struct ice_hw_dev_caps {
u32 num_flow_director_fltr; /* Number of FD filters available */
struct ice_ts_dev_info ts_dev_info;
u32 num_funcs;
+ struct ice_nac_topology nac_topo;
/* bitmap of supported sensors
* bit 0 - internal temperature sensor
* bit 31:1 - Reserved
@@ -482,6 +502,8 @@ struct ice_bank_info {
u32 orom_size; /* Size of OROM bank */
u32 netlist_ptr; /* Pointer to 1st Netlist bank */
u32 netlist_size; /* Size of Netlist bank */
+ u32 active_css_hdr_len; /* Active CSS header length */
+ u32 inactive_css_hdr_len; /* Inactive CSS header length */
enum ice_flash_bank nvm_bank; /* Active NVM bank */
enum ice_flash_bank orom_bank; /* Active OROM bank */
enum ice_flash_bank netlist_bank; /* Active Netlist bank */
@@ -716,6 +738,7 @@ struct ice_port_info {
u16 sw_id; /* Initial switch ID belongs to port */
u16 pf_vf_num;
u8 port_state;
+ u8 local_fwd_mode;
#define ICE_SCHED_PORT_STATE_INIT 0x0
#define ICE_SCHED_PORT_STATE_READY 0x1
u8 lport;
@@ -739,6 +762,8 @@ struct ice_switch_info {
struct ice_sw_recipe *recp_list;
u16 prof_res_bm_init;
u16 max_used_prof_index;
+ u16 rule_cnt;
+ u8 recp_cnt;
DECLARE_BITMAP(prof_res_bm[ICE_MAX_NUM_PROFILES], ICE_MAX_FV_WORDS);
};
@@ -818,11 +843,43 @@ struct ice_mbx_data {
u16 async_watermark_val;
};
+#define ICE_PORTS_PER_QUAD 4
+#define ICE_GET_QUAD_NUM(port) ((port) / ICE_PORTS_PER_QUAD)
+
+struct ice_eth56g_params {
+ u8 num_phys;
+ u8 phy_addr[2];
+ bool onestep_ena;
+ bool sfd_ena;
+ u32 peer_delay;
+};
+
+union ice_phy_params {
+ struct ice_eth56g_params eth56g;
+};
+
/* PHY model */
enum ice_phy_model {
ICE_PHY_UNSUP = -1,
- ICE_PHY_E810 = 1,
+ ICE_PHY_E810 = 1,
ICE_PHY_E82X,
+ ICE_PHY_ETH56G,
+};
+
+/* Global Link Topology */
+enum ice_global_link_topo {
+ ICE_LINK_TOPO_UP_TO_2_LINKS,
+ ICE_LINK_TOPO_UP_TO_4_LINKS,
+ ICE_LINK_TOPO_UP_TO_8_LINKS,
+ ICE_LINK_TOPO_RESERVED,
+};
+
+struct ice_ptp_hw {
+ enum ice_phy_model phy_model;
+ union ice_phy_params phy;
+ u8 num_lports;
+ u8 ports_per_phy;
+ bool is_2x50g_muxed_topo;
};
/* Port hardware description */
@@ -846,7 +903,6 @@ struct ice_hw {
u8 revision_id;
u8 pf_id; /* device profile info */
- enum ice_phy_model phy_model;
u16 max_burst_size; /* driver sets this value */
@@ -909,12 +965,7 @@ struct ice_hw {
/* INTRL granularity in 1 us */
u8 intrl_gran;
-#define ICE_MAX_QUAD 2
-#define ICE_QUADS_PER_PHY_E82X 2
-#define ICE_PORTS_PER_PHY_E82X 8
-#define ICE_PORTS_PER_QUAD 4
-#define ICE_PORTS_PER_PHY_E810 4
-#define ICE_NUM_EXTERNAL_PORTS (ICE_MAX_QUAD * ICE_PORTS_PER_QUAD)
+ struct ice_ptp_hw ptp;
/* Active package version (currently active) */
struct ice_pkg_ver active_pkg_ver;
@@ -1087,17 +1138,13 @@ struct ice_aq_get_set_rss_lut_params {
#define ICE_SR_SECTOR_SIZE_IN_WORDS 0x800
/* CSS Header words */
+#define ICE_NVM_CSS_HDR_LEN_L 0x02
+#define ICE_NVM_CSS_HDR_LEN_H 0x03
#define ICE_NVM_CSS_SREV_L 0x14
#define ICE_NVM_CSS_SREV_H 0x15
-/* Length of CSS header section in words */
-#define ICE_CSS_HEADER_LENGTH 330
-
-/* Offset of Shadow RAM copy in the NVM bank area. */
-#define ICE_NVM_SR_COPY_WORD_OFFSET roundup(ICE_CSS_HEADER_LENGTH, 32)
-
-/* Size in bytes of Option ROM trailer */
-#define ICE_NVM_OROM_TRAILER_LENGTH (2 * ICE_CSS_HEADER_LENGTH)
+/* Length of Authentication header section in words */
+#define ICE_NVM_AUTH_HEADER_LEN 0x08
/* The Link Topology Netlist section is stored as a series of words. It is
* stored in the NVM as a TLV, with the first two words containing the type
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
index 48a8d462d76a..5635e9da2212 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
@@ -948,7 +948,7 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
goto out_unlock;
}
- ice_eswitch_update_repr(vf->repr_id, vsi);
+ ice_eswitch_update_repr(&vf->repr_id, vsi);
/* if the VF has been reset allow it to come up again */
ice_mbx_clear_malvf(&vf->mbx_info);
diff --git a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c
index 2e9ad27cb9d1..6e8f2aab6080 100644
--- a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c
@@ -45,14 +45,15 @@ int ice_vsi_add_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan)
return -EINVAL;
err = ice_fltr_add_vlan(vsi, vlan);
- if (err && err != -EEXIST) {
+ if (!err)
+ vsi->num_vlan++;
+ else if (err == -EEXIST)
+ err = 0;
+ else
dev_err(ice_pf_to_dev(vsi->back), "Failure Adding VLAN %d on VSI %i, status %d\n",
vlan->vid, vsi->vsi_num, err);
- return err;
- }
- vsi->num_vlan++;
- return 0;
+ return err;
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index 7541f223bf4f..a65955eb23c0 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -269,7 +269,6 @@ static int ice_xsk_pool_disable(struct ice_vsi *vsi, u16 qid)
if (!pool)
return -EINVAL;
- clear_bit(qid, vsi->af_xdp_zc_qps);
xsk_pool_dma_unmap(pool, ICE_RX_DMA_ATTR);
return 0;
@@ -300,8 +299,6 @@ ice_xsk_pool_enable(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
if (err)
return err;
- set_bit(qid, vsi->af_xdp_zc_qps);
-
return 0;
}
@@ -349,11 +346,13 @@ ice_realloc_rx_xdp_bufs(struct ice_rx_ring *rx_ring, bool pool_present)
int ice_realloc_zc_buf(struct ice_vsi *vsi, bool zc)
{
struct ice_rx_ring *rx_ring;
- unsigned long q;
+ uint i;
+
+ ice_for_each_rxq(vsi, i) {
+ rx_ring = vsi->rx_rings[i];
+ if (!rx_ring->xsk_pool)
+ continue;
- for_each_set_bit(q, vsi->af_xdp_zc_qps,
- max_t(int, vsi->alloc_txq, vsi->alloc_rxq)) {
- rx_ring = vsi->rx_rings[q];
if (ice_realloc_rx_xdp_bufs(rx_ring, zc))
return -ENOMEM;
}
diff --git a/drivers/net/ethernet/intel/idpf/Kconfig b/drivers/net/ethernet/intel/idpf/Kconfig
new file mode 100644
index 000000000000..1addd663acad
--- /dev/null
+++ b/drivers/net/ethernet/intel/idpf/Kconfig
@@ -0,0 +1,26 @@
+# SPDX-License-Identifier: GPL-2.0-only
+# Copyright (C) 2024 Intel Corporation
+
+config IDPF
+ tristate "Intel(R) Infrastructure Data Path Function Support"
+ depends on PCI_MSI
+ select DIMLIB
+ select LIBETH
+ help
+ This driver supports Intel(R) Infrastructure Data Path Function
+ devices.
+
+ To compile this driver as a module, choose M here. The module
+ will be called idpf.
+
+if IDPF
+
+config IDPF_SINGLEQ
+ bool "idpf singleq support"
+ help
+ This option enables support for legacy single Rx/Tx queues w/no
+ completion and fill queues. Only enable if you have hardware which
+ wants to work in this mode as it increases the driver size and adds
+ runtme checks on hotpath.
+
+endif # IDPF
diff --git a/drivers/net/ethernet/intel/idpf/Makefile b/drivers/net/ethernet/intel/idpf/Makefile
index 6844ead2f3ac..2ce01a0b5898 100644
--- a/drivers/net/ethernet/intel/idpf/Makefile
+++ b/drivers/net/ethernet/intel/idpf/Makefile
@@ -12,7 +12,8 @@ idpf-y := \
idpf_ethtool.o \
idpf_lib.o \
idpf_main.o \
- idpf_singleq_txrx.o \
idpf_txrx.o \
idpf_virtchnl.o \
idpf_vf_dev.o
+
+idpf-$(CONFIG_IDPF_SINGLEQ) += idpf_singleq_txrx.o
diff --git a/drivers/net/ethernet/intel/idpf/idpf.h b/drivers/net/ethernet/intel/idpf/idpf.h
index e7a036538246..2c31ad87587a 100644
--- a/drivers/net/ethernet/intel/idpf/idpf.h
+++ b/drivers/net/ethernet/intel/idpf/idpf.h
@@ -17,10 +17,8 @@ struct idpf_vport_max_q;
#include <linux/sctp.h>
#include <linux/ethtool_netlink.h>
#include <net/gro.h>
-#include <linux/dim.h>
#include "virtchnl2.h"
-#include "idpf_lan_txrx.h"
#include "idpf_txrx.h"
#include "idpf_controlq.h"
@@ -266,7 +264,6 @@ struct idpf_port_stats {
* the worst case.
* @num_bufqs_per_qgrp: Buffer queues per RX queue in a given grouping
* @bufq_desc_count: Buffer queue descriptor count
- * @bufq_size: Size of buffers in ring (e.g. 2K, 4K, etc)
* @num_rxq_grp: Number of RX queues in a group
* @rxq_grps: Total number of RX groups. Number of groups * number of RX per
* group will yield total number of RX queues.
@@ -302,7 +299,7 @@ struct idpf_vport {
u16 num_txq_grp;
struct idpf_txq_group *txq_grps;
u32 txq_model;
- struct idpf_queue **txqs;
+ struct idpf_tx_queue **txqs;
bool crc_enable;
u16 num_rxq;
@@ -310,11 +307,10 @@ struct idpf_vport {
u32 rxq_desc_count;
u8 num_bufqs_per_qgrp;
u32 bufq_desc_count[IDPF_MAX_BUFQS_PER_RXQ_GRP];
- u32 bufq_size[IDPF_MAX_BUFQS_PER_RXQ_GRP];
u16 num_rxq_grp;
struct idpf_rxq_group *rxq_grps;
u32 rxq_model;
- struct idpf_rx_ptype_decoded rx_ptype_lkup[IDPF_RX_MAX_PTYPE];
+ struct libeth_rx_pt *rx_ptype_lkup;
struct idpf_adapter *adapter;
struct net_device *netdev;
@@ -601,7 +597,8 @@ struct idpf_adapter {
*/
static inline int idpf_is_queue_model_split(u16 q_model)
{
- return q_model == VIRTCHNL2_QUEUE_MODEL_SPLIT;
+ return !IS_ENABLED(CONFIG_IDPF_SINGLEQ) ||
+ q_model == VIRTCHNL2_QUEUE_MODEL_SPLIT;
}
#define idpf_is_cap_ena(adapter, field, flag) \
diff --git a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
index 1885ba618981..3806ddd3ce4a 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
@@ -437,22 +437,24 @@ struct idpf_stats {
.stat_offset = offsetof(_type, _stat) \
}
-/* Helper macro for defining some statistics related to queues */
-#define IDPF_QUEUE_STAT(_name, _stat) \
- IDPF_STAT(struct idpf_queue, _name, _stat)
+/* Helper macros for defining some statistics related to queues */
+#define IDPF_RX_QUEUE_STAT(_name, _stat) \
+ IDPF_STAT(struct idpf_rx_queue, _name, _stat)
+#define IDPF_TX_QUEUE_STAT(_name, _stat) \
+ IDPF_STAT(struct idpf_tx_queue, _name, _stat)
/* Stats associated with a Tx queue */
static const struct idpf_stats idpf_gstrings_tx_queue_stats[] = {
- IDPF_QUEUE_STAT("pkts", q_stats.tx.packets),
- IDPF_QUEUE_STAT("bytes", q_stats.tx.bytes),
- IDPF_QUEUE_STAT("lso_pkts", q_stats.tx.lso_pkts),
+ IDPF_TX_QUEUE_STAT("pkts", q_stats.packets),
+ IDPF_TX_QUEUE_STAT("bytes", q_stats.bytes),
+ IDPF_TX_QUEUE_STAT("lso_pkts", q_stats.lso_pkts),
};
/* Stats associated with an Rx queue */
static const struct idpf_stats idpf_gstrings_rx_queue_stats[] = {
- IDPF_QUEUE_STAT("pkts", q_stats.rx.packets),
- IDPF_QUEUE_STAT("bytes", q_stats.rx.bytes),
- IDPF_QUEUE_STAT("rx_gro_hw_pkts", q_stats.rx.rsc_pkts),
+ IDPF_RX_QUEUE_STAT("pkts", q_stats.packets),
+ IDPF_RX_QUEUE_STAT("bytes", q_stats.bytes),
+ IDPF_RX_QUEUE_STAT("rx_gro_hw_pkts", q_stats.rsc_pkts),
};
#define IDPF_TX_QUEUE_STATS_LEN ARRAY_SIZE(idpf_gstrings_tx_queue_stats)
@@ -563,8 +565,6 @@ static void idpf_get_stat_strings(struct net_device *netdev, u8 *data)
for (i = 0; i < vport_config->max_q.max_rxq; i++)
idpf_add_qstat_strings(&data, idpf_gstrings_rx_queue_stats,
"rx", i);
-
- page_pool_ethtool_stats_get_strings(data);
}
/**
@@ -598,7 +598,6 @@ static int idpf_get_sset_count(struct net_device *netdev, int sset)
struct idpf_netdev_priv *np = netdev_priv(netdev);
struct idpf_vport_config *vport_config;
u16 max_txq, max_rxq;
- unsigned int size;
if (sset != ETH_SS_STATS)
return -EINVAL;
@@ -617,11 +616,8 @@ static int idpf_get_sset_count(struct net_device *netdev, int sset)
max_txq = vport_config->max_q.max_txq;
max_rxq = vport_config->max_q.max_rxq;
- size = IDPF_PORT_STATS_LEN + (IDPF_TX_QUEUE_STATS_LEN * max_txq) +
+ return IDPF_PORT_STATS_LEN + (IDPF_TX_QUEUE_STATS_LEN * max_txq) +
(IDPF_RX_QUEUE_STATS_LEN * max_rxq);
- size += page_pool_ethtool_stats_get_count();
-
- return size;
}
/**
@@ -633,7 +629,7 @@ static int idpf_get_sset_count(struct net_device *netdev, int sset)
* Copies the stat data defined by the pointer and stat structure pair into
* the memory supplied as data. If the pointer is null, data will be zero'd.
*/
-static void idpf_add_one_ethtool_stat(u64 *data, void *pstat,
+static void idpf_add_one_ethtool_stat(u64 *data, const void *pstat,
const struct idpf_stats *stat)
{
char *p;
@@ -671,6 +667,7 @@ static void idpf_add_one_ethtool_stat(u64 *data, void *pstat,
* idpf_add_queue_stats - copy queue statistics into supplied buffer
* @data: ethtool stats buffer
* @q: the queue to copy
+ * @type: type of the queue
*
* Queue statistics must be copied while protected by u64_stats_fetch_begin,
* so we can't directly use idpf_add_ethtool_stats. Assumes that queue stats
@@ -681,19 +678,23 @@ static void idpf_add_one_ethtool_stat(u64 *data, void *pstat,
*
* This function expects to be called while under rcu_read_lock().
*/
-static void idpf_add_queue_stats(u64 **data, struct idpf_queue *q)
+static void idpf_add_queue_stats(u64 **data, const void *q,
+ enum virtchnl2_queue_type type)
{
+ const struct u64_stats_sync *stats_sync;
const struct idpf_stats *stats;
unsigned int start;
unsigned int size;
unsigned int i;
- if (q->q_type == VIRTCHNL2_QUEUE_TYPE_RX) {
+ if (type == VIRTCHNL2_QUEUE_TYPE_RX) {
size = IDPF_RX_QUEUE_STATS_LEN;
stats = idpf_gstrings_rx_queue_stats;
+ stats_sync = &((const struct idpf_rx_queue *)q)->stats_sync;
} else {
size = IDPF_TX_QUEUE_STATS_LEN;
stats = idpf_gstrings_tx_queue_stats;
+ stats_sync = &((const struct idpf_tx_queue *)q)->stats_sync;
}
/* To avoid invalid statistics values, ensure that we keep retrying
@@ -701,10 +702,10 @@ static void idpf_add_queue_stats(u64 **data, struct idpf_queue *q)
* u64_stats_fetch_retry.
*/
do {
- start = u64_stats_fetch_begin(&q->stats_sync);
+ start = u64_stats_fetch_begin(stats_sync);
for (i = 0; i < size; i++)
idpf_add_one_ethtool_stat(&(*data)[i], q, &stats[i]);
- } while (u64_stats_fetch_retry(&q->stats_sync, start));
+ } while (u64_stats_fetch_retry(stats_sync, start));
/* Once we successfully copy the stats in, update the data pointer */
*data += size;
@@ -793,7 +794,7 @@ static void idpf_collect_queue_stats(struct idpf_vport *vport)
for (j = 0; j < num_rxq; j++) {
u64 hw_csum_err, hsplit, hsplit_hbo, bad_descs;
struct idpf_rx_queue_stats *stats;
- struct idpf_queue *rxq;
+ struct idpf_rx_queue *rxq;
unsigned int start;
if (idpf_is_queue_model_split(vport->rxq_model))
@@ -807,7 +808,7 @@ static void idpf_collect_queue_stats(struct idpf_vport *vport)
do {
start = u64_stats_fetch_begin(&rxq->stats_sync);
- stats = &rxq->q_stats.rx;
+ stats = &rxq->q_stats;
hw_csum_err = u64_stats_read(&stats->hw_csum_err);
hsplit = u64_stats_read(&stats->hsplit_pkts);
hsplit_hbo = u64_stats_read(&stats->hsplit_buf_ovf);
@@ -828,7 +829,7 @@ static void idpf_collect_queue_stats(struct idpf_vport *vport)
for (j = 0; j < txq_grp->num_txq; j++) {
u64 linearize, qbusy, skb_drops, dma_map_errs;
- struct idpf_queue *txq = txq_grp->txqs[j];
+ struct idpf_tx_queue *txq = txq_grp->txqs[j];
struct idpf_tx_queue_stats *stats;
unsigned int start;
@@ -838,7 +839,7 @@ static void idpf_collect_queue_stats(struct idpf_vport *vport)
do {
start = u64_stats_fetch_begin(&txq->stats_sync);
- stats = &txq->q_stats.tx;
+ stats = &txq->q_stats;
linearize = u64_stats_read(&stats->linearize);
qbusy = u64_stats_read(&stats->q_busy);
skb_drops = u64_stats_read(&stats->skb_drops);
@@ -869,7 +870,6 @@ static void idpf_get_ethtool_stats(struct net_device *netdev,
{
struct idpf_netdev_priv *np = netdev_priv(netdev);
struct idpf_vport_config *vport_config;
- struct page_pool_stats pp_stats = { };
struct idpf_vport *vport;
unsigned int total = 0;
unsigned int i, j;
@@ -896,12 +896,12 @@ static void idpf_get_ethtool_stats(struct net_device *netdev,
qtype = VIRTCHNL2_QUEUE_TYPE_TX;
for (j = 0; j < txq_grp->num_txq; j++, total++) {
- struct idpf_queue *txq = txq_grp->txqs[j];
+ struct idpf_tx_queue *txq = txq_grp->txqs[j];
if (!txq)
idpf_add_empty_queue_stats(&data, qtype);
else
- idpf_add_queue_stats(&data, txq);
+ idpf_add_queue_stats(&data, txq, qtype);
}
}
@@ -929,7 +929,7 @@ static void idpf_get_ethtool_stats(struct net_device *netdev,
num_rxq = rxq_grp->singleq.num_rxq;
for (j = 0; j < num_rxq; j++, total++) {
- struct idpf_queue *rxq;
+ struct idpf_rx_queue *rxq;
if (is_splitq)
rxq = &rxq_grp->splitq.rxq_sets[j]->rxq;
@@ -938,93 +938,77 @@ static void idpf_get_ethtool_stats(struct net_device *netdev,
if (!rxq)
idpf_add_empty_queue_stats(&data, qtype);
else
- idpf_add_queue_stats(&data, rxq);
-
- /* In splitq mode, don't get page pool stats here since
- * the pools are attached to the buffer queues
- */
- if (is_splitq)
- continue;
-
- if (rxq)
- page_pool_get_stats(rxq->pp, &pp_stats);
- }
- }
-
- for (i = 0; i < vport->num_rxq_grp; i++) {
- for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
- struct idpf_queue *rxbufq =
- &vport->rxq_grps[i].splitq.bufq_sets[j].bufq;
-
- page_pool_get_stats(rxbufq->pp, &pp_stats);
+ idpf_add_queue_stats(&data, rxq, qtype);
}
}
for (; total < vport_config->max_q.max_rxq; total++)
idpf_add_empty_queue_stats(&data, VIRTCHNL2_QUEUE_TYPE_RX);
- page_pool_ethtool_stats_get(data, &pp_stats);
-
rcu_read_unlock();
idpf_vport_ctrl_unlock(netdev);
}
/**
- * idpf_find_rxq - find rxq from q index
+ * idpf_find_rxq_vec - find rxq vector from q index
* @vport: virtual port associated to queue
* @q_num: q index used to find queue
*
- * returns pointer to rx queue
+ * returns pointer to rx vector
*/
-static struct idpf_queue *idpf_find_rxq(struct idpf_vport *vport, int q_num)
+static struct idpf_q_vector *idpf_find_rxq_vec(const struct idpf_vport *vport,
+ int q_num)
{
int q_grp, q_idx;
if (!idpf_is_queue_model_split(vport->rxq_model))
- return vport->rxq_grps->singleq.rxqs[q_num];
+ return vport->rxq_grps->singleq.rxqs[q_num]->q_vector;
q_grp = q_num / IDPF_DFLT_SPLITQ_RXQ_PER_GROUP;
q_idx = q_num % IDPF_DFLT_SPLITQ_RXQ_PER_GROUP;
- return &vport->rxq_grps[q_grp].splitq.rxq_sets[q_idx]->rxq;
+ return vport->rxq_grps[q_grp].splitq.rxq_sets[q_idx]->rxq.q_vector;
}
/**
- * idpf_find_txq - find txq from q index
+ * idpf_find_txq_vec - find txq vector from q index
* @vport: virtual port associated to queue
* @q_num: q index used to find queue
*
- * returns pointer to tx queue
+ * returns pointer to tx vector
*/
-static struct idpf_queue *idpf_find_txq(struct idpf_vport *vport, int q_num)
+static struct idpf_q_vector *idpf_find_txq_vec(const struct idpf_vport *vport,
+ int q_num)
{
int q_grp;
if (!idpf_is_queue_model_split(vport->txq_model))
- return vport->txqs[q_num];
+ return vport->txqs[q_num]->q_vector;
q_grp = q_num / IDPF_DFLT_SPLITQ_TXQ_PER_GROUP;
- return vport->txq_grps[q_grp].complq;
+ return vport->txq_grps[q_grp].complq->q_vector;
}
/**
* __idpf_get_q_coalesce - get ITR values for specific queue
* @ec: ethtool structure to fill with driver's coalesce settings
- * @q: quuee of Rx or Tx
+ * @q_vector: queue vector corresponding to this queue
+ * @type: queue type
*/
static void __idpf_get_q_coalesce(struct ethtool_coalesce *ec,
- struct idpf_queue *q)
+ const struct idpf_q_vector *q_vector,
+ enum virtchnl2_queue_type type)
{
- if (q->q_type == VIRTCHNL2_QUEUE_TYPE_RX) {
+ if (type == VIRTCHNL2_QUEUE_TYPE_RX) {
ec->use_adaptive_rx_coalesce =
- IDPF_ITR_IS_DYNAMIC(q->q_vector->rx_intr_mode);
- ec->rx_coalesce_usecs = q->q_vector->rx_itr_value;
+ IDPF_ITR_IS_DYNAMIC(q_vector->rx_intr_mode);
+ ec->rx_coalesce_usecs = q_vector->rx_itr_value;
} else {
ec->use_adaptive_tx_coalesce =
- IDPF_ITR_IS_DYNAMIC(q->q_vector->tx_intr_mode);
- ec->tx_coalesce_usecs = q->q_vector->tx_itr_value;
+ IDPF_ITR_IS_DYNAMIC(q_vector->tx_intr_mode);
+ ec->tx_coalesce_usecs = q_vector->tx_itr_value;
}
}
@@ -1040,8 +1024,8 @@ static int idpf_get_q_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec,
u32 q_num)
{
- struct idpf_netdev_priv *np = netdev_priv(netdev);
- struct idpf_vport *vport;
+ const struct idpf_netdev_priv *np = netdev_priv(netdev);
+ const struct idpf_vport *vport;
int err = 0;
idpf_vport_ctrl_lock(netdev);
@@ -1056,10 +1040,12 @@ static int idpf_get_q_coalesce(struct net_device *netdev,
}
if (q_num < vport->num_rxq)
- __idpf_get_q_coalesce(ec, idpf_find_rxq(vport, q_num));
+ __idpf_get_q_coalesce(ec, idpf_find_rxq_vec(vport, q_num),
+ VIRTCHNL2_QUEUE_TYPE_RX);
if (q_num < vport->num_txq)
- __idpf_get_q_coalesce(ec, idpf_find_txq(vport, q_num));
+ __idpf_get_q_coalesce(ec, idpf_find_txq_vec(vport, q_num),
+ VIRTCHNL2_QUEUE_TYPE_TX);
unlock_mutex:
idpf_vport_ctrl_unlock(netdev);
@@ -1103,16 +1089,15 @@ static int idpf_get_per_q_coalesce(struct net_device *netdev, u32 q_num,
/**
* __idpf_set_q_coalesce - set ITR values for specific queue
* @ec: ethtool structure from user to update ITR settings
- * @q: queue for which itr values has to be set
+ * @qv: queue vector for which itr values has to be set
* @is_rxq: is queue type rx
*
* Returns 0 on success, negative otherwise.
*/
-static int __idpf_set_q_coalesce(struct ethtool_coalesce *ec,
- struct idpf_queue *q, bool is_rxq)
+static int __idpf_set_q_coalesce(const struct ethtool_coalesce *ec,
+ struct idpf_q_vector *qv, bool is_rxq)
{
u32 use_adaptive_coalesce, coalesce_usecs;
- struct idpf_q_vector *qv = q->q_vector;
bool is_dim_ena = false;
u16 itr_val;
@@ -1128,7 +1113,7 @@ static int __idpf_set_q_coalesce(struct ethtool_coalesce *ec,
itr_val = qv->tx_itr_value;
}
if (coalesce_usecs != itr_val && use_adaptive_coalesce) {
- netdev_err(q->vport->netdev, "Cannot set coalesce usecs if adaptive enabled\n");
+ netdev_err(qv->vport->netdev, "Cannot set coalesce usecs if adaptive enabled\n");
return -EINVAL;
}
@@ -1137,7 +1122,7 @@ static int __idpf_set_q_coalesce(struct ethtool_coalesce *ec,
return 0;
if (coalesce_usecs > IDPF_ITR_MAX) {
- netdev_err(q->vport->netdev,
+ netdev_err(qv->vport->netdev,
"Invalid value, %d-usecs range is 0-%d\n",
coalesce_usecs, IDPF_ITR_MAX);
@@ -1146,7 +1131,7 @@ static int __idpf_set_q_coalesce(struct ethtool_coalesce *ec,
if (coalesce_usecs % 2) {
coalesce_usecs--;
- netdev_info(q->vport->netdev,
+ netdev_info(qv->vport->netdev,
"HW only supports even ITR values, ITR rounded to %d\n",
coalesce_usecs);
}
@@ -1185,15 +1170,16 @@ static int __idpf_set_q_coalesce(struct ethtool_coalesce *ec,
*
* Return 0 on success, and negative on failure
*/
-static int idpf_set_q_coalesce(struct idpf_vport *vport,
- struct ethtool_coalesce *ec,
+static int idpf_set_q_coalesce(const struct idpf_vport *vport,
+ const struct ethtool_coalesce *ec,
int q_num, bool is_rxq)
{
- struct idpf_queue *q;
+ struct idpf_q_vector *qv;
- q = is_rxq ? idpf_find_rxq(vport, q_num) : idpf_find_txq(vport, q_num);
+ qv = is_rxq ? idpf_find_rxq_vec(vport, q_num) :
+ idpf_find_txq_vec(vport, q_num);
- if (q && __idpf_set_q_coalesce(ec, q, is_rxq))
+ if (qv && __idpf_set_q_coalesce(ec, qv, is_rxq))
return -EINVAL;
return 0;
diff --git a/drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h
index a5752dcab888..8c7f8ef8f1a1 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h
+++ b/drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h
@@ -4,6 +4,8 @@
#ifndef _IDPF_LAN_TXRX_H_
#define _IDPF_LAN_TXRX_H_
+#include <linux/bits.h>
+
enum idpf_rss_hash {
IDPF_HASH_INVALID = 0,
/* Values 1 - 28 are reserved for future use */
diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c
index 52ceda6306a3..5dbf2b4ba1b0 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_lib.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c
@@ -4,8 +4,7 @@
#include "idpf.h"
#include "idpf_virtchnl.h"
-static const struct net_device_ops idpf_netdev_ops_splitq;
-static const struct net_device_ops idpf_netdev_ops_singleq;
+static const struct net_device_ops idpf_netdev_ops;
/**
* idpf_init_vector_stack - Fill the MSIX vector stack with vector index
@@ -69,7 +68,7 @@ static void idpf_deinit_vector_stack(struct idpf_adapter *adapter)
static void idpf_mb_intr_rel_irq(struct idpf_adapter *adapter)
{
clear_bit(IDPF_MB_INTR_MODE, adapter->flags);
- free_irq(adapter->msix_entries[0].vector, adapter);
+ kfree(free_irq(adapter->msix_entries[0].vector, adapter));
queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task, 0);
}
@@ -124,15 +123,14 @@ static void idpf_mb_irq_enable(struct idpf_adapter *adapter)
*/
static int idpf_mb_intr_req_irq(struct idpf_adapter *adapter)
{
- struct idpf_q_vector *mb_vector = &adapter->mb_vector;
int irq_num, mb_vidx = 0, err;
+ char *name;
irq_num = adapter->msix_entries[mb_vidx].vector;
- mb_vector->name = kasprintf(GFP_KERNEL, "%s-%s-%d",
- dev_driver_string(&adapter->pdev->dev),
- "Mailbox", mb_vidx);
- err = request_irq(irq_num, adapter->irq_mb_handler, 0,
- mb_vector->name, adapter);
+ name = kasprintf(GFP_KERNEL, "%s-%s-%d",
+ dev_driver_string(&adapter->pdev->dev),
+ "Mailbox", mb_vidx);
+ err = request_irq(irq_num, adapter->irq_mb_handler, 0, name, adapter);
if (err) {
dev_err(&adapter->pdev->dev,
"IRQ request for mailbox failed, error: %d\n", err);
@@ -765,10 +763,7 @@ static int idpf_cfg_netdev(struct idpf_vport *vport)
}
/* assign netdev_ops */
- if (idpf_is_queue_model_split(vport->txq_model))
- netdev->netdev_ops = &idpf_netdev_ops_splitq;
- else
- netdev->netdev_ops = &idpf_netdev_ops_singleq;
+ netdev->netdev_ops = &idpf_netdev_ops;
/* setup watchdog timeout value to be 5 second */
netdev->watchdog_timeo = 5 * HZ;
@@ -946,6 +941,9 @@ static void idpf_decfg_netdev(struct idpf_vport *vport)
{
struct idpf_adapter *adapter = vport->adapter;
+ kfree(vport->rx_ptype_lkup);
+ vport->rx_ptype_lkup = NULL;
+
unregister_netdev(vport->netdev);
free_netdev(vport->netdev);
vport->netdev = NULL;
@@ -1318,14 +1316,14 @@ static void idpf_rx_init_buf_tail(struct idpf_vport *vport)
if (idpf_is_queue_model_split(vport->rxq_model)) {
for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
- struct idpf_queue *q =
+ const struct idpf_buf_queue *q =
&grp->splitq.bufq_sets[j].bufq;
writel(q->next_to_alloc, q->tail);
}
} else {
for (j = 0; j < grp->singleq.num_rxq; j++) {
- struct idpf_queue *q =
+ const struct idpf_rx_queue *q =
grp->singleq.rxqs[j];
writel(q->next_to_alloc, q->tail);
@@ -1394,6 +1392,7 @@ static int idpf_vport_open(struct idpf_vport *vport, bool alloc_res)
}
idpf_rx_init_buf_tail(vport);
+ idpf_vport_intr_ena(vport);
err = idpf_send_config_queues_msg(vport);
if (err) {
@@ -1854,7 +1853,7 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport,
enum idpf_vport_state current_state = np->state;
struct idpf_adapter *adapter = vport->adapter;
struct idpf_vport *new_vport;
- int err, i;
+ int err;
/* If the system is low on memory, we can end up in bad state if we
* free all the memory for queue resources and try to allocate them
@@ -1928,46 +1927,6 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport,
*/
memcpy(vport, new_vport, offsetof(struct idpf_vport, link_speed_mbps));
- /* Since idpf_vport_queues_alloc was called with new_port, the queue
- * back pointers are currently pointing to the local new_vport. Reset
- * the backpointers to the original vport here
- */
- for (i = 0; i < vport->num_txq_grp; i++) {
- struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
- int j;
-
- tx_qgrp->vport = vport;
- for (j = 0; j < tx_qgrp->num_txq; j++)
- tx_qgrp->txqs[j]->vport = vport;
-
- if (idpf_is_queue_model_split(vport->txq_model))
- tx_qgrp->complq->vport = vport;
- }
-
- for (i = 0; i < vport->num_rxq_grp; i++) {
- struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
- struct idpf_queue *q;
- u16 num_rxq;
- int j;
-
- rx_qgrp->vport = vport;
- for (j = 0; j < vport->num_bufqs_per_qgrp; j++)
- rx_qgrp->splitq.bufq_sets[j].bufq.vport = vport;
-
- if (idpf_is_queue_model_split(vport->rxq_model))
- num_rxq = rx_qgrp->splitq.num_rxq_sets;
- else
- num_rxq = rx_qgrp->singleq.num_rxq;
-
- for (j = 0; j < num_rxq; j++) {
- if (idpf_is_queue_model_split(vport->rxq_model))
- q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
- else
- q = rx_qgrp->singleq.rxqs[j];
- q->vport = vport;
- }
- }
-
if (reset_cause == IDPF_SR_Q_CHANGE)
idpf_vport_alloc_vec_indexes(vport);
@@ -2392,24 +2351,10 @@ void idpf_free_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem)
mem->pa = 0;
}
-static const struct net_device_ops idpf_netdev_ops_splitq = {
- .ndo_open = idpf_open,
- .ndo_stop = idpf_stop,
- .ndo_start_xmit = idpf_tx_splitq_start,
- .ndo_features_check = idpf_features_check,
- .ndo_set_rx_mode = idpf_set_rx_mode,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_set_mac_address = idpf_set_mac,
- .ndo_change_mtu = idpf_change_mtu,
- .ndo_get_stats64 = idpf_get_stats64,
- .ndo_set_features = idpf_set_features,
- .ndo_tx_timeout = idpf_tx_timeout,
-};
-
-static const struct net_device_ops idpf_netdev_ops_singleq = {
+static const struct net_device_ops idpf_netdev_ops = {
.ndo_open = idpf_open,
.ndo_stop = idpf_stop,
- .ndo_start_xmit = idpf_tx_singleq_start,
+ .ndo_start_xmit = idpf_tx_start,
.ndo_features_check = idpf_features_check,
.ndo_set_rx_mode = idpf_set_rx_mode,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/ethernet/intel/idpf/idpf_main.c b/drivers/net/ethernet/intel/idpf/idpf_main.c
index f784eea044bd..db476b3314c8 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_main.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_main.c
@@ -8,6 +8,7 @@
#define DRV_SUMMARY "Intel(R) Infrastructure Data Path Function Linux Driver"
MODULE_DESCRIPTION(DRV_SUMMARY);
+MODULE_IMPORT_NS(LIBETH);
MODULE_LICENSE("GPL");
/**
diff --git a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
index 27b93592c4ba..fe64febf7436 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2023 Intel Corporation */
+#include <net/libeth/rx.h>
+
#include "idpf.h"
/**
@@ -186,7 +188,7 @@ static int idpf_tx_singleq_csum(struct sk_buff *skb,
* and gets a physical address for each memory location and programs
* it and the length into the transmit base mode descriptor.
*/
-static void idpf_tx_singleq_map(struct idpf_queue *tx_q,
+static void idpf_tx_singleq_map(struct idpf_tx_queue *tx_q,
struct idpf_tx_buf *first,
struct idpf_tx_offload_params *offloads)
{
@@ -205,12 +207,12 @@ static void idpf_tx_singleq_map(struct idpf_queue *tx_q,
data_len = skb->data_len;
size = skb_headlen(skb);
- tx_desc = IDPF_BASE_TX_DESC(tx_q, i);
+ tx_desc = &tx_q->base_tx[i];
dma = dma_map_single(tx_q->dev, skb->data, size, DMA_TO_DEVICE);
/* write each descriptor with CRC bit */
- if (tx_q->vport->crc_enable)
+ if (idpf_queue_has(CRC_EN, tx_q))
td_cmd |= IDPF_TX_DESC_CMD_ICRC;
for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
@@ -239,7 +241,7 @@ static void idpf_tx_singleq_map(struct idpf_queue *tx_q,
i++;
if (i == tx_q->desc_count) {
- tx_desc = IDPF_BASE_TX_DESC(tx_q, 0);
+ tx_desc = &tx_q->base_tx[0];
i = 0;
}
@@ -259,7 +261,7 @@ static void idpf_tx_singleq_map(struct idpf_queue *tx_q,
i++;
if (i == tx_q->desc_count) {
- tx_desc = IDPF_BASE_TX_DESC(tx_q, 0);
+ tx_desc = &tx_q->base_tx[0];
i = 0;
}
@@ -285,7 +287,7 @@ static void idpf_tx_singleq_map(struct idpf_queue *tx_q,
/* set next_to_watch value indicating a packet is present */
first->next_to_watch = tx_desc;
- nq = netdev_get_tx_queue(tx_q->vport->netdev, tx_q->idx);
+ nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
netdev_tx_sent_queue(nq, first->bytecount);
idpf_tx_buf_hw_update(tx_q, i, netdev_xmit_more());
@@ -299,7 +301,7 @@ static void idpf_tx_singleq_map(struct idpf_queue *tx_q,
* ring entry to reflect that this index is a context descriptor
*/
static struct idpf_base_tx_ctx_desc *
-idpf_tx_singleq_get_ctx_desc(struct idpf_queue *txq)
+idpf_tx_singleq_get_ctx_desc(struct idpf_tx_queue *txq)
{
struct idpf_base_tx_ctx_desc *ctx_desc;
int ntu = txq->next_to_use;
@@ -307,7 +309,7 @@ idpf_tx_singleq_get_ctx_desc(struct idpf_queue *txq)
memset(&txq->tx_buf[ntu], 0, sizeof(struct idpf_tx_buf));
txq->tx_buf[ntu].ctx_entry = true;
- ctx_desc = IDPF_BASE_TX_CTX_DESC(txq, ntu);
+ ctx_desc = &txq->base_ctx[ntu];
IDPF_SINGLEQ_BUMP_RING_IDX(txq, ntu);
txq->next_to_use = ntu;
@@ -320,7 +322,7 @@ idpf_tx_singleq_get_ctx_desc(struct idpf_queue *txq)
* @txq: queue to send buffer on
* @offload: offload parameter structure
**/
-static void idpf_tx_singleq_build_ctx_desc(struct idpf_queue *txq,
+static void idpf_tx_singleq_build_ctx_desc(struct idpf_tx_queue *txq,
struct idpf_tx_offload_params *offload)
{
struct idpf_base_tx_ctx_desc *desc = idpf_tx_singleq_get_ctx_desc(txq);
@@ -333,7 +335,7 @@ static void idpf_tx_singleq_build_ctx_desc(struct idpf_queue *txq,
qw1 |= FIELD_PREP(IDPF_TXD_CTX_QW1_MSS_M, offload->mss);
u64_stats_update_begin(&txq->stats_sync);
- u64_stats_inc(&txq->q_stats.tx.lso_pkts);
+ u64_stats_inc(&txq->q_stats.lso_pkts);
u64_stats_update_end(&txq->stats_sync);
}
@@ -351,8 +353,8 @@ static void idpf_tx_singleq_build_ctx_desc(struct idpf_queue *txq,
*
* Returns NETDEV_TX_OK if sent, else an error code
*/
-static netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb,
- struct idpf_queue *tx_q)
+netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb,
+ struct idpf_tx_queue *tx_q)
{
struct idpf_tx_offload_params offload = { };
struct idpf_tx_buf *first;
@@ -409,53 +411,25 @@ out_drop:
}
/**
- * idpf_tx_singleq_start - Selects the right Tx queue to send buffer
- * @skb: send buffer
- * @netdev: network interface device structure
- *
- * Returns NETDEV_TX_OK if sent, else an error code
- */
-netdev_tx_t idpf_tx_singleq_start(struct sk_buff *skb,
- struct net_device *netdev)
-{
- struct idpf_vport *vport = idpf_netdev_to_vport(netdev);
- struct idpf_queue *tx_q;
-
- tx_q = vport->txqs[skb_get_queue_mapping(skb)];
-
- /* hardware can't handle really short frames, hardware padding works
- * beyond this point
- */
- if (skb_put_padto(skb, IDPF_TX_MIN_PKT_LEN)) {
- idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false);
-
- return NETDEV_TX_OK;
- }
-
- return idpf_tx_singleq_frame(skb, tx_q);
-}
-
-/**
* idpf_tx_singleq_clean - Reclaim resources from queue
* @tx_q: Tx queue to clean
* @napi_budget: Used to determine if we are in netpoll
* @cleaned: returns number of packets cleaned
*
*/
-static bool idpf_tx_singleq_clean(struct idpf_queue *tx_q, int napi_budget,
+static bool idpf_tx_singleq_clean(struct idpf_tx_queue *tx_q, int napi_budget,
int *cleaned)
{
- unsigned int budget = tx_q->vport->compln_clean_budget;
unsigned int total_bytes = 0, total_pkts = 0;
struct idpf_base_tx_desc *tx_desc;
+ u32 budget = tx_q->clean_budget;
s16 ntc = tx_q->next_to_clean;
struct idpf_netdev_priv *np;
struct idpf_tx_buf *tx_buf;
- struct idpf_vport *vport;
struct netdev_queue *nq;
bool dont_wake;
- tx_desc = IDPF_BASE_TX_DESC(tx_q, ntc);
+ tx_desc = &tx_q->base_tx[ntc];
tx_buf = &tx_q->tx_buf[ntc];
ntc -= tx_q->desc_count;
@@ -517,7 +491,7 @@ static bool idpf_tx_singleq_clean(struct idpf_queue *tx_q, int napi_budget,
if (unlikely(!ntc)) {
ntc -= tx_q->desc_count;
tx_buf = tx_q->tx_buf;
- tx_desc = IDPF_BASE_TX_DESC(tx_q, 0);
+ tx_desc = &tx_q->base_tx[0];
}
/* unmap any remaining paged data */
@@ -540,7 +514,7 @@ fetch_next_txq_desc:
if (unlikely(!ntc)) {
ntc -= tx_q->desc_count;
tx_buf = tx_q->tx_buf;
- tx_desc = IDPF_BASE_TX_DESC(tx_q, 0);
+ tx_desc = &tx_q->base_tx[0];
}
} while (likely(budget));
@@ -550,16 +524,15 @@ fetch_next_txq_desc:
*cleaned += total_pkts;
u64_stats_update_begin(&tx_q->stats_sync);
- u64_stats_add(&tx_q->q_stats.tx.packets, total_pkts);
- u64_stats_add(&tx_q->q_stats.tx.bytes, total_bytes);
+ u64_stats_add(&tx_q->q_stats.packets, total_pkts);
+ u64_stats_add(&tx_q->q_stats.bytes, total_bytes);
u64_stats_update_end(&tx_q->stats_sync);
- vport = tx_q->vport;
- np = netdev_priv(vport->netdev);
- nq = netdev_get_tx_queue(vport->netdev, tx_q->idx);
+ np = netdev_priv(tx_q->netdev);
+ nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
dont_wake = np->state != __IDPF_VPORT_UP ||
- !netif_carrier_ok(vport->netdev);
+ !netif_carrier_ok(tx_q->netdev);
__netif_txq_completed_wake(nq, total_pkts, total_bytes,
IDPF_DESC_UNUSED(tx_q), IDPF_TX_WAKE_THRESH,
dont_wake);
@@ -584,7 +557,7 @@ static bool idpf_tx_singleq_clean_all(struct idpf_q_vector *q_vec, int budget,
budget_per_q = num_txq ? max(budget / num_txq, 1) : 0;
for (i = 0; i < num_txq; i++) {
- struct idpf_queue *q;
+ struct idpf_tx_queue *q;
q = q_vec->tx[i];
clean_complete &= idpf_tx_singleq_clean(q, budget_per_q,
@@ -614,14 +587,9 @@ static bool idpf_rx_singleq_test_staterr(const union virtchnl2_rx_desc *rx_desc,
/**
* idpf_rx_singleq_is_non_eop - process handling of non-EOP buffers
- * @rxq: Rx ring being processed
* @rx_desc: Rx descriptor for current buffer
- * @skb: Current socket buffer containing buffer in progress
- * @ntc: next to clean
*/
-static bool idpf_rx_singleq_is_non_eop(struct idpf_queue *rxq,
- union virtchnl2_rx_desc *rx_desc,
- struct sk_buff *skb, u16 ntc)
+static bool idpf_rx_singleq_is_non_eop(const union virtchnl2_rx_desc *rx_desc)
{
/* if we are the last buffer then there is nothing else to do */
if (likely(idpf_rx_singleq_test_staterr(rx_desc, IDPF_RXD_EOF_SINGLEQ)))
@@ -635,98 +603,82 @@ static bool idpf_rx_singleq_is_non_eop(struct idpf_queue *rxq,
* @rxq: Rx ring being processed
* @skb: skb currently being received and modified
* @csum_bits: checksum bits from descriptor
- * @ptype: the packet type decoded by hardware
+ * @decoded: the packet type decoded by hardware
*
* skb->protocol must be set before this function is called
*/
-static void idpf_rx_singleq_csum(struct idpf_queue *rxq, struct sk_buff *skb,
- struct idpf_rx_csum_decoded *csum_bits,
- u16 ptype)
+static void idpf_rx_singleq_csum(struct idpf_rx_queue *rxq,
+ struct sk_buff *skb,
+ struct idpf_rx_csum_decoded csum_bits,
+ struct libeth_rx_pt decoded)
{
- struct idpf_rx_ptype_decoded decoded;
bool ipv4, ipv6;
/* check if Rx checksum is enabled */
- if (unlikely(!(rxq->vport->netdev->features & NETIF_F_RXCSUM)))
+ if (!libeth_rx_pt_has_checksum(rxq->netdev, decoded))
return;
/* check if HW has decoded the packet and checksum */
- if (unlikely(!(csum_bits->l3l4p)))
- return;
-
- decoded = rxq->vport->rx_ptype_lkup[ptype];
- if (unlikely(!(decoded.known && decoded.outer_ip)))
+ if (unlikely(!csum_bits.l3l4p))
return;
- ipv4 = IDPF_RX_PTYPE_TO_IPV(&decoded, IDPF_RX_PTYPE_OUTER_IPV4);
- ipv6 = IDPF_RX_PTYPE_TO_IPV(&decoded, IDPF_RX_PTYPE_OUTER_IPV6);
+ ipv4 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV4;
+ ipv6 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV6;
/* Check if there were any checksum errors */
- if (unlikely(ipv4 && (csum_bits->ipe || csum_bits->eipe)))
+ if (unlikely(ipv4 && (csum_bits.ipe || csum_bits.eipe)))
goto checksum_fail;
/* Device could not do any checksum offload for certain extension
* headers as indicated by setting IPV6EXADD bit
*/
- if (unlikely(ipv6 && csum_bits->ipv6exadd))
+ if (unlikely(ipv6 && csum_bits.ipv6exadd))
return;
/* check for L4 errors and handle packets that were not able to be
* checksummed due to arrival speed
*/
- if (unlikely(csum_bits->l4e))
+ if (unlikely(csum_bits.l4e))
goto checksum_fail;
- if (unlikely(csum_bits->nat && csum_bits->eudpe))
+ if (unlikely(csum_bits.nat && csum_bits.eudpe))
goto checksum_fail;
/* Handle packets that were not able to be checksummed due to arrival
* speed, in this case the stack can compute the csum.
*/
- if (unlikely(csum_bits->pprs))
+ if (unlikely(csum_bits.pprs))
return;
/* If there is an outer header present that might contain a checksum
* we need to bump the checksum level by 1 to reflect the fact that
* we are indicating we validated the inner checksum.
*/
- if (decoded.tunnel_type >= IDPF_RX_PTYPE_TUNNEL_IP_GRENAT)
+ if (decoded.tunnel_type >= LIBETH_RX_PT_TUNNEL_IP_GRENAT)
skb->csum_level = 1;
- /* Only report checksum unnecessary for ICMP, TCP, UDP, or SCTP */
- switch (decoded.inner_prot) {
- case IDPF_RX_PTYPE_INNER_PROT_ICMP:
- case IDPF_RX_PTYPE_INNER_PROT_TCP:
- case IDPF_RX_PTYPE_INNER_PROT_UDP:
- case IDPF_RX_PTYPE_INNER_PROT_SCTP:
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- return;
- default:
- return;
- }
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ return;
checksum_fail:
u64_stats_update_begin(&rxq->stats_sync);
- u64_stats_inc(&rxq->q_stats.rx.hw_csum_err);
+ u64_stats_inc(&rxq->q_stats.hw_csum_err);
u64_stats_update_end(&rxq->stats_sync);
}
/**
* idpf_rx_singleq_base_csum - Indicate in skb if hw indicated a good cksum
- * @rx_q: Rx completion queue
- * @skb: skb currently being received and modified
* @rx_desc: the receive descriptor
- * @ptype: Rx packet type
*
* This function only operates on the VIRTCHNL2_RXDID_1_32B_BASE_M base 32byte
* descriptor writeback format.
+ *
+ * Return: parsed checksum status.
**/
-static void idpf_rx_singleq_base_csum(struct idpf_queue *rx_q,
- struct sk_buff *skb,
- union virtchnl2_rx_desc *rx_desc,
- u16 ptype)
+static struct idpf_rx_csum_decoded
+idpf_rx_singleq_base_csum(const union virtchnl2_rx_desc *rx_desc)
{
- struct idpf_rx_csum_decoded csum_bits;
+ struct idpf_rx_csum_decoded csum_bits = { };
u32 rx_error, rx_status;
u64 qword;
@@ -745,28 +697,23 @@ static void idpf_rx_singleq_base_csum(struct idpf_queue *rx_q,
rx_status);
csum_bits.ipv6exadd = FIELD_GET(VIRTCHNL2_RX_BASE_DESC_STATUS_IPV6EXADD_M,
rx_status);
- csum_bits.nat = 0;
- csum_bits.eudpe = 0;
- idpf_rx_singleq_csum(rx_q, skb, &csum_bits, ptype);
+ return csum_bits;
}
/**
* idpf_rx_singleq_flex_csum - Indicate in skb if hw indicated a good cksum
- * @rx_q: Rx completion queue
- * @skb: skb currently being received and modified
* @rx_desc: the receive descriptor
- * @ptype: Rx packet type
*
* This function only operates on the VIRTCHNL2_RXDID_2_FLEX_SQ_NIC flexible
* descriptor writeback format.
+ *
+ * Return: parsed checksum status.
**/
-static void idpf_rx_singleq_flex_csum(struct idpf_queue *rx_q,
- struct sk_buff *skb,
- union virtchnl2_rx_desc *rx_desc,
- u16 ptype)
+static struct idpf_rx_csum_decoded
+idpf_rx_singleq_flex_csum(const union virtchnl2_rx_desc *rx_desc)
{
- struct idpf_rx_csum_decoded csum_bits;
+ struct idpf_rx_csum_decoded csum_bits = { };
u16 rx_status0, rx_status1;
rx_status0 = le16_to_cpu(rx_desc->flex_nic_wb.status_error0);
@@ -786,9 +733,8 @@ static void idpf_rx_singleq_flex_csum(struct idpf_queue *rx_q,
rx_status0);
csum_bits.nat = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_STATUS1_NAT_M,
rx_status1);
- csum_bits.pprs = 0;
- idpf_rx_singleq_csum(rx_q, skb, &csum_bits, ptype);
+ return csum_bits;
}
/**
@@ -801,14 +747,14 @@ static void idpf_rx_singleq_flex_csum(struct idpf_queue *rx_q,
* This function only operates on the VIRTCHNL2_RXDID_1_32B_BASE_M base 32byte
* descriptor writeback format.
**/
-static void idpf_rx_singleq_base_hash(struct idpf_queue *rx_q,
+static void idpf_rx_singleq_base_hash(struct idpf_rx_queue *rx_q,
struct sk_buff *skb,
- union virtchnl2_rx_desc *rx_desc,
- struct idpf_rx_ptype_decoded *decoded)
+ const union virtchnl2_rx_desc *rx_desc,
+ struct libeth_rx_pt decoded)
{
u64 mask, qw1;
- if (unlikely(!(rx_q->vport->netdev->features & NETIF_F_RXHASH)))
+ if (!libeth_rx_pt_has_hash(rx_q->netdev, decoded))
return;
mask = VIRTCHNL2_RX_BASE_DESC_FLTSTAT_RSS_HASH_M;
@@ -817,7 +763,7 @@ static void idpf_rx_singleq_base_hash(struct idpf_queue *rx_q,
if (FIELD_GET(mask, qw1) == mask) {
u32 hash = le32_to_cpu(rx_desc->base_wb.qword0.hi_dword.rss);
- skb_set_hash(skb, hash, idpf_ptype_to_htype(decoded));
+ libeth_rx_pt_set_hash(skb, hash, decoded);
}
}
@@ -831,18 +777,20 @@ static void idpf_rx_singleq_base_hash(struct idpf_queue *rx_q,
* This function only operates on the VIRTCHNL2_RXDID_2_FLEX_SQ_NIC flexible
* descriptor writeback format.
**/
-static void idpf_rx_singleq_flex_hash(struct idpf_queue *rx_q,
+static void idpf_rx_singleq_flex_hash(struct idpf_rx_queue *rx_q,
struct sk_buff *skb,
- union virtchnl2_rx_desc *rx_desc,
- struct idpf_rx_ptype_decoded *decoded)
+ const union virtchnl2_rx_desc *rx_desc,
+ struct libeth_rx_pt decoded)
{
- if (unlikely(!(rx_q->vport->netdev->features & NETIF_F_RXHASH)))
+ if (!libeth_rx_pt_has_hash(rx_q->netdev, decoded))
return;
if (FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_STATUS0_RSS_VALID_M,
- le16_to_cpu(rx_desc->flex_nic_wb.status_error0)))
- skb_set_hash(skb, le32_to_cpu(rx_desc->flex_nic_wb.rss_hash),
- idpf_ptype_to_htype(decoded));
+ le16_to_cpu(rx_desc->flex_nic_wb.status_error0))) {
+ u32 hash = le32_to_cpu(rx_desc->flex_nic_wb.rss_hash);
+
+ libeth_rx_pt_set_hash(skb, hash, decoded);
+ }
}
/**
@@ -857,25 +805,45 @@ static void idpf_rx_singleq_flex_hash(struct idpf_queue *rx_q,
* order to populate the hash, checksum, VLAN, protocol, and
* other fields within the skb.
*/
-static void idpf_rx_singleq_process_skb_fields(struct idpf_queue *rx_q,
- struct sk_buff *skb,
- union virtchnl2_rx_desc *rx_desc,
- u16 ptype)
+static void
+idpf_rx_singleq_process_skb_fields(struct idpf_rx_queue *rx_q,
+ struct sk_buff *skb,
+ const union virtchnl2_rx_desc *rx_desc,
+ u16 ptype)
{
- struct idpf_rx_ptype_decoded decoded =
- rx_q->vport->rx_ptype_lkup[ptype];
+ struct libeth_rx_pt decoded = rx_q->rx_ptype_lkup[ptype];
+ struct idpf_rx_csum_decoded csum_bits;
/* modifies the skb - consumes the enet header */
- skb->protocol = eth_type_trans(skb, rx_q->vport->netdev);
+ skb->protocol = eth_type_trans(skb, rx_q->netdev);
/* Check if we're using base mode descriptor IDs */
if (rx_q->rxdids == VIRTCHNL2_RXDID_1_32B_BASE_M) {
- idpf_rx_singleq_base_hash(rx_q, skb, rx_desc, &decoded);
- idpf_rx_singleq_base_csum(rx_q, skb, rx_desc, ptype);
+ idpf_rx_singleq_base_hash(rx_q, skb, rx_desc, decoded);
+ csum_bits = idpf_rx_singleq_base_csum(rx_desc);
} else {
- idpf_rx_singleq_flex_hash(rx_q, skb, rx_desc, &decoded);
- idpf_rx_singleq_flex_csum(rx_q, skb, rx_desc, ptype);
+ idpf_rx_singleq_flex_hash(rx_q, skb, rx_desc, decoded);
+ csum_bits = idpf_rx_singleq_flex_csum(rx_desc);
}
+
+ idpf_rx_singleq_csum(rx_q, skb, csum_bits, decoded);
+ skb_record_rx_queue(skb, rx_q->idx);
+}
+
+/**
+ * idpf_rx_buf_hw_update - Store the new tail and head values
+ * @rxq: queue to bump
+ * @val: new head index
+ */
+static void idpf_rx_buf_hw_update(struct idpf_rx_queue *rxq, u32 val)
+{
+ rxq->next_to_use = val;
+
+ if (unlikely(!rxq->tail))
+ return;
+
+ /* writel has an implicit memory barrier */
+ writel(val, rxq->tail);
}
/**
@@ -885,24 +853,28 @@ static void idpf_rx_singleq_process_skb_fields(struct idpf_queue *rx_q,
*
* Returns false if all allocations were successful, true if any fail
*/
-bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_queue *rx_q,
+bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_rx_queue *rx_q,
u16 cleaned_count)
{
struct virtchnl2_singleq_rx_buf_desc *desc;
+ const struct libeth_fq_fp fq = {
+ .pp = rx_q->pp,
+ .fqes = rx_q->rx_buf,
+ .truesize = rx_q->truesize,
+ .count = rx_q->desc_count,
+ };
u16 nta = rx_q->next_to_alloc;
- struct idpf_rx_buf *buf;
if (!cleaned_count)
return false;
- desc = IDPF_SINGLEQ_RX_BUF_DESC(rx_q, nta);
- buf = &rx_q->rx_buf.buf[nta];
+ desc = &rx_q->single_buf[nta];
do {
dma_addr_t addr;
- addr = idpf_alloc_page(rx_q->pp, buf, rx_q->rx_buf_size);
- if (unlikely(addr == DMA_MAPPING_ERROR))
+ addr = libeth_rx_alloc(&fq, nta);
+ if (addr == DMA_MAPPING_ERROR)
break;
/* Refresh the desc even if buffer_addrs didn't change
@@ -912,11 +884,9 @@ bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_queue *rx_q,
desc->hdr_addr = 0;
desc++;
- buf++;
nta++;
if (unlikely(nta == rx_q->desc_count)) {
- desc = IDPF_SINGLEQ_RX_BUF_DESC(rx_q, 0);
- buf = rx_q->rx_buf.buf;
+ desc = &rx_q->single_buf[0];
nta = 0;
}
@@ -933,7 +903,6 @@ bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_queue *rx_q,
/**
* idpf_rx_singleq_extract_base_fields - Extract fields from the Rx descriptor
- * @rx_q: Rx descriptor queue
* @rx_desc: the descriptor to process
* @fields: storage for extracted values
*
@@ -943,9 +912,9 @@ bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_queue *rx_q,
* This function only operates on the VIRTCHNL2_RXDID_1_32B_BASE_M base 32byte
* descriptor writeback format.
*/
-static void idpf_rx_singleq_extract_base_fields(struct idpf_queue *rx_q,
- union virtchnl2_rx_desc *rx_desc,
- struct idpf_rx_extracted *fields)
+static void
+idpf_rx_singleq_extract_base_fields(const union virtchnl2_rx_desc *rx_desc,
+ struct idpf_rx_extracted *fields)
{
u64 qword;
@@ -957,7 +926,6 @@ static void idpf_rx_singleq_extract_base_fields(struct idpf_queue *rx_q,
/**
* idpf_rx_singleq_extract_flex_fields - Extract fields from the Rx descriptor
- * @rx_q: Rx descriptor queue
* @rx_desc: the descriptor to process
* @fields: storage for extracted values
*
@@ -967,9 +935,9 @@ static void idpf_rx_singleq_extract_base_fields(struct idpf_queue *rx_q,
* This function only operates on the VIRTCHNL2_RXDID_2_FLEX_SQ_NIC flexible
* descriptor writeback format.
*/
-static void idpf_rx_singleq_extract_flex_fields(struct idpf_queue *rx_q,
- union virtchnl2_rx_desc *rx_desc,
- struct idpf_rx_extracted *fields)
+static void
+idpf_rx_singleq_extract_flex_fields(const union virtchnl2_rx_desc *rx_desc,
+ struct idpf_rx_extracted *fields)
{
fields->size = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_PKT_LEN_M,
le16_to_cpu(rx_desc->flex_nic_wb.pkt_len));
@@ -984,14 +952,15 @@ static void idpf_rx_singleq_extract_flex_fields(struct idpf_queue *rx_q,
* @fields: storage for extracted values
*
*/
-static void idpf_rx_singleq_extract_fields(struct idpf_queue *rx_q,
- union virtchnl2_rx_desc *rx_desc,
- struct idpf_rx_extracted *fields)
+static void
+idpf_rx_singleq_extract_fields(const struct idpf_rx_queue *rx_q,
+ const union virtchnl2_rx_desc *rx_desc,
+ struct idpf_rx_extracted *fields)
{
if (rx_q->rxdids == VIRTCHNL2_RXDID_1_32B_BASE_M)
- idpf_rx_singleq_extract_base_fields(rx_q, rx_desc, fields);
+ idpf_rx_singleq_extract_base_fields(rx_desc, fields);
else
- idpf_rx_singleq_extract_flex_fields(rx_q, rx_desc, fields);
+ idpf_rx_singleq_extract_flex_fields(rx_desc, fields);
}
/**
@@ -1001,7 +970,7 @@ static void idpf_rx_singleq_extract_fields(struct idpf_queue *rx_q,
*
* Returns true if there's any budget left (e.g. the clean is finished)
*/
-static int idpf_rx_singleq_clean(struct idpf_queue *rx_q, int budget)
+static int idpf_rx_singleq_clean(struct idpf_rx_queue *rx_q, int budget)
{
unsigned int total_rx_bytes = 0, total_rx_pkts = 0;
struct sk_buff *skb = rx_q->skb;
@@ -1016,7 +985,7 @@ static int idpf_rx_singleq_clean(struct idpf_queue *rx_q, int budget)
struct idpf_rx_buf *rx_buf;
/* get the Rx desc from Rx queue based on 'next_to_clean' */
- rx_desc = IDPF_RX_DESC(rx_q, ntc);
+ rx_desc = &rx_q->rx[ntc];
/* status_error_ptype_len will always be zero for unused
* descriptors because it's cleared in cleanup, and overlaps
@@ -1036,29 +1005,27 @@ static int idpf_rx_singleq_clean(struct idpf_queue *rx_q, int budget)
idpf_rx_singleq_extract_fields(rx_q, rx_desc, &fields);
- rx_buf = &rx_q->rx_buf.buf[ntc];
- if (!fields.size) {
- idpf_rx_put_page(rx_buf);
+ rx_buf = &rx_q->rx_buf[ntc];
+ if (!libeth_rx_sync_for_cpu(rx_buf, fields.size))
goto skip_data;
- }
- idpf_rx_sync_for_cpu(rx_buf, fields.size);
if (skb)
idpf_rx_add_frag(rx_buf, skb, fields.size);
else
- skb = idpf_rx_construct_skb(rx_q, rx_buf, fields.size);
+ skb = idpf_rx_build_skb(rx_buf, fields.size);
/* exit if we failed to retrieve a buffer */
if (!skb)
break;
skip_data:
- IDPF_SINGLEQ_BUMP_RING_IDX(rx_q, ntc);
+ rx_buf->page = NULL;
+ IDPF_SINGLEQ_BUMP_RING_IDX(rx_q, ntc);
cleaned_count++;
/* skip if it is non EOP desc */
- if (idpf_rx_singleq_is_non_eop(rx_q, rx_desc, skb, ntc))
+ if (idpf_rx_singleq_is_non_eop(rx_desc) || unlikely(!skb))
continue;
#define IDPF_RXD_ERR_S FIELD_PREP(VIRTCHNL2_RX_BASE_DESC_QW1_ERROR_M, \
@@ -1084,7 +1051,7 @@ skip_data:
rx_desc, fields.rx_ptype);
/* send completed skb up the stack */
- napi_gro_receive(&rx_q->q_vector->napi, skb);
+ napi_gro_receive(rx_q->pp->p.napi, skb);
skb = NULL;
/* update budget accounting */
@@ -1095,12 +1062,13 @@ skip_data:
rx_q->next_to_clean = ntc;
+ page_pool_nid_changed(rx_q->pp, numa_mem_id());
if (cleaned_count)
failure = idpf_rx_singleq_buf_hw_alloc_all(rx_q, cleaned_count);
u64_stats_update_begin(&rx_q->stats_sync);
- u64_stats_add(&rx_q->q_stats.rx.packets, total_rx_pkts);
- u64_stats_add(&rx_q->q_stats.rx.bytes, total_rx_bytes);
+ u64_stats_add(&rx_q->q_stats.packets, total_rx_pkts);
+ u64_stats_add(&rx_q->q_stats.bytes, total_rx_bytes);
u64_stats_update_end(&rx_q->stats_sync);
/* guarantee a trip back through this routine if there was a failure */
@@ -1127,7 +1095,7 @@ static bool idpf_rx_singleq_clean_all(struct idpf_q_vector *q_vec, int budget,
*/
budget_per_q = num_rxq ? max(budget / num_rxq, 1) : 0;
for (i = 0; i < num_rxq; i++) {
- struct idpf_queue *rxq = q_vec->rx[i];
+ struct idpf_rx_queue *rxq = q_vec->rx[i];
int pkts_cleaned_per_q;
pkts_cleaned_per_q = idpf_rx_singleq_clean(rxq, budget_per_q);
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
index 285da2177ee4..af2879f03b8d 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
@@ -1,9 +1,14 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2023 Intel Corporation */
+#include <net/libeth/rx.h>
+
#include "idpf.h"
#include "idpf_virtchnl.h"
+static bool idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs,
+ unsigned int count);
+
/**
* idpf_buf_lifo_push - push a buffer pointer onto stack
* @stack: pointer to stack struct
@@ -60,7 +65,8 @@ void idpf_tx_timeout(struct net_device *netdev, unsigned int txqueue)
* @tx_q: the queue that owns the buffer
* @tx_buf: the buffer to free
*/
-static void idpf_tx_buf_rel(struct idpf_queue *tx_q, struct idpf_tx_buf *tx_buf)
+static void idpf_tx_buf_rel(struct idpf_tx_queue *tx_q,
+ struct idpf_tx_buf *tx_buf)
{
if (tx_buf->skb) {
if (dma_unmap_len(tx_buf, len))
@@ -86,8 +92,9 @@ static void idpf_tx_buf_rel(struct idpf_queue *tx_q, struct idpf_tx_buf *tx_buf)
* idpf_tx_buf_rel_all - Free any empty Tx buffers
* @txq: queue to be cleaned
*/
-static void idpf_tx_buf_rel_all(struct idpf_queue *txq)
+static void idpf_tx_buf_rel_all(struct idpf_tx_queue *txq)
{
+ struct idpf_buf_lifo *buf_stack;
u16 i;
/* Buffers already cleared, nothing to do */
@@ -101,39 +108,58 @@ static void idpf_tx_buf_rel_all(struct idpf_queue *txq)
kfree(txq->tx_buf);
txq->tx_buf = NULL;
- if (!txq->buf_stack.bufs)
+ if (!idpf_queue_has(FLOW_SCH_EN, txq))
return;
- for (i = 0; i < txq->buf_stack.size; i++)
- kfree(txq->buf_stack.bufs[i]);
+ buf_stack = &txq->stash->buf_stack;
+ if (!buf_stack->bufs)
+ return;
- kfree(txq->buf_stack.bufs);
- txq->buf_stack.bufs = NULL;
+ for (i = 0; i < buf_stack->size; i++)
+ kfree(buf_stack->bufs[i]);
+
+ kfree(buf_stack->bufs);
+ buf_stack->bufs = NULL;
}
/**
* idpf_tx_desc_rel - Free Tx resources per queue
* @txq: Tx descriptor ring for a specific queue
- * @bufq: buffer q or completion q
*
* Free all transmit software resources
*/
-static void idpf_tx_desc_rel(struct idpf_queue *txq, bool bufq)
+static void idpf_tx_desc_rel(struct idpf_tx_queue *txq)
{
- if (bufq)
- idpf_tx_buf_rel_all(txq);
+ idpf_tx_buf_rel_all(txq);
if (!txq->desc_ring)
return;
dmam_free_coherent(txq->dev, txq->size, txq->desc_ring, txq->dma);
txq->desc_ring = NULL;
- txq->next_to_alloc = 0;
txq->next_to_use = 0;
txq->next_to_clean = 0;
}
/**
+ * idpf_compl_desc_rel - Free completion resources per queue
+ * @complq: completion queue
+ *
+ * Free all completion software resources.
+ */
+static void idpf_compl_desc_rel(struct idpf_compl_queue *complq)
+{
+ if (!complq->comp)
+ return;
+
+ dma_free_coherent(complq->netdev->dev.parent, complq->size,
+ complq->comp, complq->dma);
+ complq->comp = NULL;
+ complq->next_to_use = 0;
+ complq->next_to_clean = 0;
+}
+
+/**
* idpf_tx_desc_rel_all - Free Tx Resources for All Queues
* @vport: virtual port structure
*
@@ -150,10 +176,10 @@ static void idpf_tx_desc_rel_all(struct idpf_vport *vport)
struct idpf_txq_group *txq_grp = &vport->txq_grps[i];
for (j = 0; j < txq_grp->num_txq; j++)
- idpf_tx_desc_rel(txq_grp->txqs[j], true);
+ idpf_tx_desc_rel(txq_grp->txqs[j]);
if (idpf_is_queue_model_split(vport->txq_model))
- idpf_tx_desc_rel(txq_grp->complq, false);
+ idpf_compl_desc_rel(txq_grp->complq);
}
}
@@ -163,8 +189,9 @@ static void idpf_tx_desc_rel_all(struct idpf_vport *vport)
*
* Returns 0 on success, negative on failure
*/
-static int idpf_tx_buf_alloc_all(struct idpf_queue *tx_q)
+static int idpf_tx_buf_alloc_all(struct idpf_tx_queue *tx_q)
{
+ struct idpf_buf_lifo *buf_stack;
int buf_size;
int i;
@@ -180,22 +207,26 @@ static int idpf_tx_buf_alloc_all(struct idpf_queue *tx_q)
for (i = 0; i < tx_q->desc_count; i++)
tx_q->tx_buf[i].compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG;
+ if (!idpf_queue_has(FLOW_SCH_EN, tx_q))
+ return 0;
+
+ buf_stack = &tx_q->stash->buf_stack;
+
/* Initialize tx buf stack for out-of-order completions if
* flow scheduling offload is enabled
*/
- tx_q->buf_stack.bufs =
- kcalloc(tx_q->desc_count, sizeof(struct idpf_tx_stash *),
- GFP_KERNEL);
- if (!tx_q->buf_stack.bufs)
+ buf_stack->bufs = kcalloc(tx_q->desc_count, sizeof(*buf_stack->bufs),
+ GFP_KERNEL);
+ if (!buf_stack->bufs)
return -ENOMEM;
- tx_q->buf_stack.size = tx_q->desc_count;
- tx_q->buf_stack.top = tx_q->desc_count;
+ buf_stack->size = tx_q->desc_count;
+ buf_stack->top = tx_q->desc_count;
for (i = 0; i < tx_q->desc_count; i++) {
- tx_q->buf_stack.bufs[i] = kzalloc(sizeof(*tx_q->buf_stack.bufs[i]),
- GFP_KERNEL);
- if (!tx_q->buf_stack.bufs[i])
+ buf_stack->bufs[i] = kzalloc(sizeof(*buf_stack->bufs[i]),
+ GFP_KERNEL);
+ if (!buf_stack->bufs[i])
return -ENOMEM;
}
@@ -204,28 +235,22 @@ static int idpf_tx_buf_alloc_all(struct idpf_queue *tx_q)
/**
* idpf_tx_desc_alloc - Allocate the Tx descriptors
+ * @vport: vport to allocate resources for
* @tx_q: the tx ring to set up
- * @bufq: buffer or completion queue
*
* Returns 0 on success, negative on failure
*/
-static int idpf_tx_desc_alloc(struct idpf_queue *tx_q, bool bufq)
+static int idpf_tx_desc_alloc(const struct idpf_vport *vport,
+ struct idpf_tx_queue *tx_q)
{
struct device *dev = tx_q->dev;
- u32 desc_sz;
int err;
- if (bufq) {
- err = idpf_tx_buf_alloc_all(tx_q);
- if (err)
- goto err_alloc;
-
- desc_sz = sizeof(struct idpf_base_tx_desc);
- } else {
- desc_sz = sizeof(struct idpf_splitq_tx_compl_desc);
- }
+ err = idpf_tx_buf_alloc_all(tx_q);
+ if (err)
+ goto err_alloc;
- tx_q->size = tx_q->desc_count * desc_sz;
+ tx_q->size = tx_q->desc_count * sizeof(*tx_q->base_tx);
/* Allocate descriptors also round up to nearest 4K */
tx_q->size = ALIGN(tx_q->size, 4096);
@@ -238,20 +263,44 @@ static int idpf_tx_desc_alloc(struct idpf_queue *tx_q, bool bufq)
goto err_alloc;
}
- tx_q->next_to_alloc = 0;
tx_q->next_to_use = 0;
tx_q->next_to_clean = 0;
- set_bit(__IDPF_Q_GEN_CHK, tx_q->flags);
+ idpf_queue_set(GEN_CHK, tx_q);
return 0;
err_alloc:
- idpf_tx_desc_rel(tx_q, bufq);
+ idpf_tx_desc_rel(tx_q);
return err;
}
/**
+ * idpf_compl_desc_alloc - allocate completion descriptors
+ * @vport: vport to allocate resources for
+ * @complq: completion queue to set up
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+static int idpf_compl_desc_alloc(const struct idpf_vport *vport,
+ struct idpf_compl_queue *complq)
+{
+ complq->size = array_size(complq->desc_count, sizeof(*complq->comp));
+
+ complq->comp = dma_alloc_coherent(complq->netdev->dev.parent,
+ complq->size, &complq->dma,
+ GFP_KERNEL);
+ if (!complq->comp)
+ return -ENOMEM;
+
+ complq->next_to_use = 0;
+ complq->next_to_clean = 0;
+ idpf_queue_set(GEN_CHK, complq);
+
+ return 0;
+}
+
+/**
* idpf_tx_desc_alloc_all - allocate all queues Tx resources
* @vport: virtual port private structure
*
@@ -259,7 +308,6 @@ err_alloc:
*/
static int idpf_tx_desc_alloc_all(struct idpf_vport *vport)
{
- struct device *dev = &vport->adapter->pdev->dev;
int err = 0;
int i, j;
@@ -268,13 +316,14 @@ static int idpf_tx_desc_alloc_all(struct idpf_vport *vport)
*/
for (i = 0; i < vport->num_txq_grp; i++) {
for (j = 0; j < vport->txq_grps[i].num_txq; j++) {
- struct idpf_queue *txq = vport->txq_grps[i].txqs[j];
+ struct idpf_tx_queue *txq = vport->txq_grps[i].txqs[j];
u8 gen_bits = 0;
u16 bufidx_mask;
- err = idpf_tx_desc_alloc(txq, true);
+ err = idpf_tx_desc_alloc(vport, txq);
if (err) {
- dev_err(dev, "Allocation for Tx Queue %u failed\n",
+ pci_err(vport->adapter->pdev,
+ "Allocation for Tx Queue %u failed\n",
i);
goto err_out;
}
@@ -312,9 +361,10 @@ static int idpf_tx_desc_alloc_all(struct idpf_vport *vport)
continue;
/* Setup completion queues */
- err = idpf_tx_desc_alloc(vport->txq_grps[i].complq, false);
+ err = idpf_compl_desc_alloc(vport, vport->txq_grps[i].complq);
if (err) {
- dev_err(dev, "Allocation for Tx Completion Queue %u failed\n",
+ pci_err(vport->adapter->pdev,
+ "Allocation for Tx Completion Queue %u failed\n",
i);
goto err_out;
}
@@ -329,70 +379,97 @@ err_out:
/**
* idpf_rx_page_rel - Release an rx buffer page
- * @rxq: the queue that owns the buffer
* @rx_buf: the buffer to free
*/
-static void idpf_rx_page_rel(struct idpf_queue *rxq, struct idpf_rx_buf *rx_buf)
+static void idpf_rx_page_rel(struct libeth_fqe *rx_buf)
{
if (unlikely(!rx_buf->page))
return;
- page_pool_put_full_page(rxq->pp, rx_buf->page, false);
+ page_pool_put_full_page(rx_buf->page->pp, rx_buf->page, false);
rx_buf->page = NULL;
- rx_buf->page_offset = 0;
+ rx_buf->offset = 0;
}
/**
* idpf_rx_hdr_buf_rel_all - Release header buffer memory
- * @rxq: queue to use
+ * @bufq: queue to use
*/
-static void idpf_rx_hdr_buf_rel_all(struct idpf_queue *rxq)
+static void idpf_rx_hdr_buf_rel_all(struct idpf_buf_queue *bufq)
{
- struct idpf_adapter *adapter = rxq->vport->adapter;
+ struct libeth_fq fq = {
+ .fqes = bufq->hdr_buf,
+ .pp = bufq->hdr_pp,
+ };
- dma_free_coherent(&adapter->pdev->dev,
- rxq->desc_count * IDPF_HDR_BUF_SIZE,
- rxq->rx_buf.hdr_buf_va,
- rxq->rx_buf.hdr_buf_pa);
- rxq->rx_buf.hdr_buf_va = NULL;
+ for (u32 i = 0; i < bufq->desc_count; i++)
+ idpf_rx_page_rel(&bufq->hdr_buf[i]);
+
+ libeth_rx_fq_destroy(&fq);
+ bufq->hdr_buf = NULL;
+ bufq->hdr_pp = NULL;
}
/**
- * idpf_rx_buf_rel_all - Free all Rx buffer resources for a queue
- * @rxq: queue to be cleaned
+ * idpf_rx_buf_rel_bufq - Free all Rx buffer resources for a buffer queue
+ * @bufq: queue to be cleaned
*/
-static void idpf_rx_buf_rel_all(struct idpf_queue *rxq)
+static void idpf_rx_buf_rel_bufq(struct idpf_buf_queue *bufq)
{
- u16 i;
+ struct libeth_fq fq = {
+ .fqes = bufq->buf,
+ .pp = bufq->pp,
+ };
/* queue already cleared, nothing to do */
- if (!rxq->rx_buf.buf)
+ if (!bufq->buf)
return;
/* Free all the bufs allocated and given to hw on Rx queue */
- for (i = 0; i < rxq->desc_count; i++)
- idpf_rx_page_rel(rxq, &rxq->rx_buf.buf[i]);
+ for (u32 i = 0; i < bufq->desc_count; i++)
+ idpf_rx_page_rel(&bufq->buf[i]);
- if (rxq->rx_hsplit_en)
- idpf_rx_hdr_buf_rel_all(rxq);
+ if (idpf_queue_has(HSPLIT_EN, bufq))
+ idpf_rx_hdr_buf_rel_all(bufq);
- page_pool_destroy(rxq->pp);
- rxq->pp = NULL;
+ libeth_rx_fq_destroy(&fq);
+ bufq->buf = NULL;
+ bufq->pp = NULL;
+}
+
+/**
+ * idpf_rx_buf_rel_all - Free all Rx buffer resources for a receive queue
+ * @rxq: queue to be cleaned
+ */
+static void idpf_rx_buf_rel_all(struct idpf_rx_queue *rxq)
+{
+ struct libeth_fq fq = {
+ .fqes = rxq->rx_buf,
+ .pp = rxq->pp,
+ };
- kfree(rxq->rx_buf.buf);
- rxq->rx_buf.buf = NULL;
+ if (!rxq->rx_buf)
+ return;
+
+ for (u32 i = 0; i < rxq->desc_count; i++)
+ idpf_rx_page_rel(&rxq->rx_buf[i]);
+
+ libeth_rx_fq_destroy(&fq);
+ rxq->rx_buf = NULL;
+ rxq->pp = NULL;
}
/**
* idpf_rx_desc_rel - Free a specific Rx q resources
* @rxq: queue to clean the resources from
- * @bufq: buffer q or completion q
- * @q_model: single or split q model
+ * @dev: device to free DMA memory
+ * @model: single or split queue model
*
* Free a specific rx queue resources
*/
-static void idpf_rx_desc_rel(struct idpf_queue *rxq, bool bufq, s32 q_model)
+static void idpf_rx_desc_rel(struct idpf_rx_queue *rxq, struct device *dev,
+ u32 model)
{
if (!rxq)
return;
@@ -402,7 +479,7 @@ static void idpf_rx_desc_rel(struct idpf_queue *rxq, bool bufq, s32 q_model)
rxq->skb = NULL;
}
- if (bufq || !idpf_is_queue_model_split(q_model))
+ if (!idpf_is_queue_model_split(model))
idpf_rx_buf_rel_all(rxq);
rxq->next_to_alloc = 0;
@@ -411,11 +488,35 @@ static void idpf_rx_desc_rel(struct idpf_queue *rxq, bool bufq, s32 q_model)
if (!rxq->desc_ring)
return;
- dmam_free_coherent(rxq->dev, rxq->size, rxq->desc_ring, rxq->dma);
+ dmam_free_coherent(dev, rxq->size, rxq->desc_ring, rxq->dma);
rxq->desc_ring = NULL;
}
/**
+ * idpf_rx_desc_rel_bufq - free buffer queue resources
+ * @bufq: buffer queue to clean the resources from
+ * @dev: device to free DMA memory
+ */
+static void idpf_rx_desc_rel_bufq(struct idpf_buf_queue *bufq,
+ struct device *dev)
+{
+ if (!bufq)
+ return;
+
+ idpf_rx_buf_rel_bufq(bufq);
+
+ bufq->next_to_alloc = 0;
+ bufq->next_to_clean = 0;
+ bufq->next_to_use = 0;
+
+ if (!bufq->split_buf)
+ return;
+
+ dma_free_coherent(dev, bufq->size, bufq->split_buf, bufq->dma);
+ bufq->split_buf = NULL;
+}
+
+/**
* idpf_rx_desc_rel_all - Free Rx Resources for All Queues
* @vport: virtual port structure
*
@@ -423,6 +524,7 @@ static void idpf_rx_desc_rel(struct idpf_queue *rxq, bool bufq, s32 q_model)
*/
static void idpf_rx_desc_rel_all(struct idpf_vport *vport)
{
+ struct device *dev = &vport->adapter->pdev->dev;
struct idpf_rxq_group *rx_qgrp;
u16 num_rxq;
int i, j;
@@ -435,15 +537,15 @@ static void idpf_rx_desc_rel_all(struct idpf_vport *vport)
if (!idpf_is_queue_model_split(vport->rxq_model)) {
for (j = 0; j < rx_qgrp->singleq.num_rxq; j++)
- idpf_rx_desc_rel(rx_qgrp->singleq.rxqs[j],
- false, vport->rxq_model);
+ idpf_rx_desc_rel(rx_qgrp->singleq.rxqs[j], dev,
+ VIRTCHNL2_QUEUE_MODEL_SINGLE);
continue;
}
num_rxq = rx_qgrp->splitq.num_rxq_sets;
for (j = 0; j < num_rxq; j++)
idpf_rx_desc_rel(&rx_qgrp->splitq.rxq_sets[j]->rxq,
- false, vport->rxq_model);
+ dev, VIRTCHNL2_QUEUE_MODEL_SPLIT);
if (!rx_qgrp->splitq.bufq_sets)
continue;
@@ -452,45 +554,50 @@ static void idpf_rx_desc_rel_all(struct idpf_vport *vport)
struct idpf_bufq_set *bufq_set =
&rx_qgrp->splitq.bufq_sets[j];
- idpf_rx_desc_rel(&bufq_set->bufq, true,
- vport->rxq_model);
+ idpf_rx_desc_rel_bufq(&bufq_set->bufq, dev);
}
}
}
/**
* idpf_rx_buf_hw_update - Store the new tail and head values
- * @rxq: queue to bump
+ * @bufq: queue to bump
* @val: new head index
*/
-void idpf_rx_buf_hw_update(struct idpf_queue *rxq, u32 val)
+static void idpf_rx_buf_hw_update(struct idpf_buf_queue *bufq, u32 val)
{
- rxq->next_to_use = val;
+ bufq->next_to_use = val;
- if (unlikely(!rxq->tail))
+ if (unlikely(!bufq->tail))
return;
/* writel has an implicit memory barrier */
- writel(val, rxq->tail);
+ writel(val, bufq->tail);
}
/**
* idpf_rx_hdr_buf_alloc_all - Allocate memory for header buffers
- * @rxq: ring to use
+ * @bufq: ring to use
*
* Returns 0 on success, negative on failure.
*/
-static int idpf_rx_hdr_buf_alloc_all(struct idpf_queue *rxq)
+static int idpf_rx_hdr_buf_alloc_all(struct idpf_buf_queue *bufq)
{
- struct idpf_adapter *adapter = rxq->vport->adapter;
-
- rxq->rx_buf.hdr_buf_va =
- dma_alloc_coherent(&adapter->pdev->dev,
- IDPF_HDR_BUF_SIZE * rxq->desc_count,
- &rxq->rx_buf.hdr_buf_pa,
- GFP_KERNEL);
- if (!rxq->rx_buf.hdr_buf_va)
- return -ENOMEM;
+ struct libeth_fq fq = {
+ .count = bufq->desc_count,
+ .type = LIBETH_FQE_HDR,
+ .nid = idpf_q_vector_to_mem(bufq->q_vector),
+ };
+ int ret;
+
+ ret = libeth_rx_fq_create(&fq, &bufq->q_vector->napi);
+ if (ret)
+ return ret;
+
+ bufq->hdr_pp = fq.pp;
+ bufq->hdr_buf = fq.fqes;
+ bufq->hdr_truesize = fq.truesize;
+ bufq->rx_hbuf_size = fq.buf_len;
return 0;
}
@@ -502,19 +609,20 @@ static int idpf_rx_hdr_buf_alloc_all(struct idpf_queue *rxq)
*/
static void idpf_rx_post_buf_refill(struct idpf_sw_queue *refillq, u16 buf_id)
{
- u16 nta = refillq->next_to_alloc;
+ u32 nta = refillq->next_to_use;
/* store the buffer ID and the SW maintained GEN bit to the refillq */
refillq->ring[nta] =
FIELD_PREP(IDPF_RX_BI_BUFID_M, buf_id) |
FIELD_PREP(IDPF_RX_BI_GEN_M,
- test_bit(__IDPF_Q_GEN_CHK, refillq->flags));
+ idpf_queue_has(GEN_CHK, refillq));
if (unlikely(++nta == refillq->desc_count)) {
nta = 0;
- change_bit(__IDPF_Q_GEN_CHK, refillq->flags);
+ idpf_queue_change(GEN_CHK, refillq);
}
- refillq->next_to_alloc = nta;
+
+ refillq->next_to_use = nta;
}
/**
@@ -524,24 +632,35 @@ static void idpf_rx_post_buf_refill(struct idpf_sw_queue *refillq, u16 buf_id)
*
* Returns false if buffer could not be allocated, true otherwise.
*/
-static bool idpf_rx_post_buf_desc(struct idpf_queue *bufq, u16 buf_id)
+static bool idpf_rx_post_buf_desc(struct idpf_buf_queue *bufq, u16 buf_id)
{
struct virtchnl2_splitq_rx_buf_desc *splitq_rx_desc = NULL;
+ struct libeth_fq_fp fq = {
+ .count = bufq->desc_count,
+ };
u16 nta = bufq->next_to_alloc;
- struct idpf_rx_buf *buf;
dma_addr_t addr;
- splitq_rx_desc = IDPF_SPLITQ_RX_BUF_DESC(bufq, nta);
- buf = &bufq->rx_buf.buf[buf_id];
+ splitq_rx_desc = &bufq->split_buf[nta];
+
+ if (idpf_queue_has(HSPLIT_EN, bufq)) {
+ fq.pp = bufq->hdr_pp;
+ fq.fqes = bufq->hdr_buf;
+ fq.truesize = bufq->hdr_truesize;
- if (bufq->rx_hsplit_en) {
- splitq_rx_desc->hdr_addr =
- cpu_to_le64(bufq->rx_buf.hdr_buf_pa +
- (u32)buf_id * IDPF_HDR_BUF_SIZE);
+ addr = libeth_rx_alloc(&fq, buf_id);
+ if (addr == DMA_MAPPING_ERROR)
+ return false;
+
+ splitq_rx_desc->hdr_addr = cpu_to_le64(addr);
}
- addr = idpf_alloc_page(bufq->pp, buf, bufq->rx_buf_size);
- if (unlikely(addr == DMA_MAPPING_ERROR))
+ fq.pp = bufq->pp;
+ fq.fqes = bufq->buf;
+ fq.truesize = bufq->truesize;
+
+ addr = libeth_rx_alloc(&fq, buf_id);
+ if (addr == DMA_MAPPING_ERROR)
return false;
splitq_rx_desc->pkt_addr = cpu_to_le64(addr);
@@ -562,7 +681,8 @@ static bool idpf_rx_post_buf_desc(struct idpf_queue *bufq, u16 buf_id)
*
* Returns true if @working_set bufs were posted successfully, false otherwise.
*/
-static bool idpf_rx_post_init_bufs(struct idpf_queue *bufq, u16 working_set)
+static bool idpf_rx_post_init_bufs(struct idpf_buf_queue *bufq,
+ u16 working_set)
{
int i;
@@ -571,95 +691,114 @@ static bool idpf_rx_post_init_bufs(struct idpf_queue *bufq, u16 working_set)
return false;
}
- idpf_rx_buf_hw_update(bufq,
- bufq->next_to_alloc & ~(bufq->rx_buf_stride - 1));
+ idpf_rx_buf_hw_update(bufq, ALIGN_DOWN(bufq->next_to_alloc,
+ IDPF_RX_BUF_STRIDE));
return true;
}
/**
- * idpf_rx_create_page_pool - Create a page pool
- * @rxbufq: RX queue to create page pool for
+ * idpf_rx_buf_alloc_singleq - Allocate memory for all buffer resources
+ * @rxq: queue for which the buffers are allocated
*
- * Returns &page_pool on success, casted -errno on failure
+ * Return: 0 on success, -ENOMEM on failure.
*/
-static struct page_pool *idpf_rx_create_page_pool(struct idpf_queue *rxbufq)
+static int idpf_rx_buf_alloc_singleq(struct idpf_rx_queue *rxq)
{
- struct page_pool_params pp = {
- .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
- .order = 0,
- .pool_size = rxbufq->desc_count,
- .nid = NUMA_NO_NODE,
- .dev = rxbufq->vport->netdev->dev.parent,
- .max_len = PAGE_SIZE,
- .dma_dir = DMA_FROM_DEVICE,
- .offset = 0,
+ if (idpf_rx_singleq_buf_hw_alloc_all(rxq, rxq->desc_count - 1))
+ goto err;
+
+ return 0;
+
+err:
+ idpf_rx_buf_rel_all(rxq);
+
+ return -ENOMEM;
+}
+
+/**
+ * idpf_rx_bufs_init_singleq - Initialize page pool and allocate Rx bufs
+ * @rxq: buffer queue to create page pool for
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+static int idpf_rx_bufs_init_singleq(struct idpf_rx_queue *rxq)
+{
+ struct libeth_fq fq = {
+ .count = rxq->desc_count,
+ .type = LIBETH_FQE_MTU,
+ .nid = idpf_q_vector_to_mem(rxq->q_vector),
};
+ int ret;
+
+ ret = libeth_rx_fq_create(&fq, &rxq->q_vector->napi);
+ if (ret)
+ return ret;
- return page_pool_create(&pp);
+ rxq->pp = fq.pp;
+ rxq->rx_buf = fq.fqes;
+ rxq->truesize = fq.truesize;
+ rxq->rx_buf_size = fq.buf_len;
+
+ return idpf_rx_buf_alloc_singleq(rxq);
}
/**
* idpf_rx_buf_alloc_all - Allocate memory for all buffer resources
- * @rxbufq: queue for which the buffers are allocated; equivalent to
- * rxq when operating in singleq mode
+ * @rxbufq: queue for which the buffers are allocated
*
* Returns 0 on success, negative on failure
*/
-static int idpf_rx_buf_alloc_all(struct idpf_queue *rxbufq)
+static int idpf_rx_buf_alloc_all(struct idpf_buf_queue *rxbufq)
{
int err = 0;
- /* Allocate book keeping buffers */
- rxbufq->rx_buf.buf = kcalloc(rxbufq->desc_count,
- sizeof(struct idpf_rx_buf), GFP_KERNEL);
- if (!rxbufq->rx_buf.buf) {
- err = -ENOMEM;
- goto rx_buf_alloc_all_out;
- }
-
- if (rxbufq->rx_hsplit_en) {
+ if (idpf_queue_has(HSPLIT_EN, rxbufq)) {
err = idpf_rx_hdr_buf_alloc_all(rxbufq);
if (err)
goto rx_buf_alloc_all_out;
}
/* Allocate buffers to be given to HW. */
- if (idpf_is_queue_model_split(rxbufq->vport->rxq_model)) {
- int working_set = IDPF_RX_BUFQ_WORKING_SET(rxbufq);
-
- if (!idpf_rx_post_init_bufs(rxbufq, working_set))
- err = -ENOMEM;
- } else {
- if (idpf_rx_singleq_buf_hw_alloc_all(rxbufq,
- rxbufq->desc_count - 1))
- err = -ENOMEM;
- }
+ if (!idpf_rx_post_init_bufs(rxbufq, IDPF_RX_BUFQ_WORKING_SET(rxbufq)))
+ err = -ENOMEM;
rx_buf_alloc_all_out:
if (err)
- idpf_rx_buf_rel_all(rxbufq);
+ idpf_rx_buf_rel_bufq(rxbufq);
return err;
}
/**
* idpf_rx_bufs_init - Initialize page pool, allocate rx bufs, and post to HW
- * @rxbufq: RX queue to create page pool for
+ * @bufq: buffer queue to create page pool for
+ * @type: type of Rx buffers to allocate
*
* Returns 0 on success, negative on failure
*/
-static int idpf_rx_bufs_init(struct idpf_queue *rxbufq)
+static int idpf_rx_bufs_init(struct idpf_buf_queue *bufq,
+ enum libeth_fqe_type type)
{
- struct page_pool *pool;
+ struct libeth_fq fq = {
+ .truesize = bufq->truesize,
+ .count = bufq->desc_count,
+ .type = type,
+ .hsplit = idpf_queue_has(HSPLIT_EN, bufq),
+ .nid = idpf_q_vector_to_mem(bufq->q_vector),
+ };
+ int ret;
- pool = idpf_rx_create_page_pool(rxbufq);
- if (IS_ERR(pool))
- return PTR_ERR(pool);
+ ret = libeth_rx_fq_create(&fq, &bufq->q_vector->napi);
+ if (ret)
+ return ret;
- rxbufq->pp = pool;
+ bufq->pp = fq.pp;
+ bufq->buf = fq.fqes;
+ bufq->truesize = fq.truesize;
+ bufq->rx_buf_size = fq.buf_len;
- return idpf_rx_buf_alloc_all(rxbufq);
+ return idpf_rx_buf_alloc_all(bufq);
}
/**
@@ -670,20 +809,22 @@ static int idpf_rx_bufs_init(struct idpf_queue *rxbufq)
*/
int idpf_rx_bufs_init_all(struct idpf_vport *vport)
{
- struct idpf_rxq_group *rx_qgrp;
- struct idpf_queue *q;
+ bool split = idpf_is_queue_model_split(vport->rxq_model);
int i, j, err;
for (i = 0; i < vport->num_rxq_grp; i++) {
- rx_qgrp = &vport->rxq_grps[i];
+ struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
+ u32 truesize = 0;
/* Allocate bufs for the rxq itself in singleq */
- if (!idpf_is_queue_model_split(vport->rxq_model)) {
+ if (!split) {
int num_rxq = rx_qgrp->singleq.num_rxq;
for (j = 0; j < num_rxq; j++) {
+ struct idpf_rx_queue *q;
+
q = rx_qgrp->singleq.rxqs[j];
- err = idpf_rx_bufs_init(q);
+ err = idpf_rx_bufs_init_singleq(q);
if (err)
return err;
}
@@ -693,10 +834,19 @@ int idpf_rx_bufs_init_all(struct idpf_vport *vport)
/* Otherwise, allocate bufs for the buffer queues */
for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
+ enum libeth_fqe_type type;
+ struct idpf_buf_queue *q;
+
q = &rx_qgrp->splitq.bufq_sets[j].bufq;
- err = idpf_rx_bufs_init(q);
+ q->truesize = truesize;
+
+ type = truesize ? LIBETH_FQE_SHORT : LIBETH_FQE_MTU;
+
+ err = idpf_rx_bufs_init(q, type);
if (err)
return err;
+
+ truesize = q->truesize >> 1;
}
}
@@ -705,22 +855,17 @@ int idpf_rx_bufs_init_all(struct idpf_vport *vport)
/**
* idpf_rx_desc_alloc - Allocate queue Rx resources
+ * @vport: vport to allocate resources for
* @rxq: Rx queue for which the resources are setup
- * @bufq: buffer or completion queue
- * @q_model: single or split queue model
*
* Returns 0 on success, negative on failure
*/
-static int idpf_rx_desc_alloc(struct idpf_queue *rxq, bool bufq, s32 q_model)
+static int idpf_rx_desc_alloc(const struct idpf_vport *vport,
+ struct idpf_rx_queue *rxq)
{
- struct device *dev = rxq->dev;
+ struct device *dev = &vport->adapter->pdev->dev;
- if (bufq)
- rxq->size = rxq->desc_count *
- sizeof(struct virtchnl2_splitq_rx_buf_desc);
- else
- rxq->size = rxq->desc_count *
- sizeof(union virtchnl2_rx_desc);
+ rxq->size = rxq->desc_count * sizeof(union virtchnl2_rx_desc);
/* Allocate descriptors and also round up to nearest 4K */
rxq->size = ALIGN(rxq->size, 4096);
@@ -735,7 +880,35 @@ static int idpf_rx_desc_alloc(struct idpf_queue *rxq, bool bufq, s32 q_model)
rxq->next_to_alloc = 0;
rxq->next_to_clean = 0;
rxq->next_to_use = 0;
- set_bit(__IDPF_Q_GEN_CHK, rxq->flags);
+ idpf_queue_set(GEN_CHK, rxq);
+
+ return 0;
+}
+
+/**
+ * idpf_bufq_desc_alloc - Allocate buffer queue descriptor ring
+ * @vport: vport to allocate resources for
+ * @bufq: buffer queue for which the resources are set up
+ *
+ * Return: 0 on success, -ENOMEM on failure.
+ */
+static int idpf_bufq_desc_alloc(const struct idpf_vport *vport,
+ struct idpf_buf_queue *bufq)
+{
+ struct device *dev = &vport->adapter->pdev->dev;
+
+ bufq->size = array_size(bufq->desc_count, sizeof(*bufq->split_buf));
+
+ bufq->split_buf = dma_alloc_coherent(dev, bufq->size, &bufq->dma,
+ GFP_KERNEL);
+ if (!bufq->split_buf)
+ return -ENOMEM;
+
+ bufq->next_to_alloc = 0;
+ bufq->next_to_clean = 0;
+ bufq->next_to_use = 0;
+
+ idpf_queue_set(GEN_CHK, bufq);
return 0;
}
@@ -748,9 +921,7 @@ static int idpf_rx_desc_alloc(struct idpf_queue *rxq, bool bufq, s32 q_model)
*/
static int idpf_rx_desc_alloc_all(struct idpf_vport *vport)
{
- struct device *dev = &vport->adapter->pdev->dev;
struct idpf_rxq_group *rx_qgrp;
- struct idpf_queue *q;
int i, j, err;
u16 num_rxq;
@@ -762,13 +933,17 @@ static int idpf_rx_desc_alloc_all(struct idpf_vport *vport)
num_rxq = rx_qgrp->singleq.num_rxq;
for (j = 0; j < num_rxq; j++) {
+ struct idpf_rx_queue *q;
+
if (idpf_is_queue_model_split(vport->rxq_model))
q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
else
q = rx_qgrp->singleq.rxqs[j];
- err = idpf_rx_desc_alloc(q, false, vport->rxq_model);
+
+ err = idpf_rx_desc_alloc(vport, q);
if (err) {
- dev_err(dev, "Memory allocation for Rx Queue %u failed\n",
+ pci_err(vport->adapter->pdev,
+ "Memory allocation for Rx Queue %u failed\n",
i);
goto err_out;
}
@@ -778,10 +953,14 @@ static int idpf_rx_desc_alloc_all(struct idpf_vport *vport)
continue;
for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
+ struct idpf_buf_queue *q;
+
q = &rx_qgrp->splitq.bufq_sets[j].bufq;
- err = idpf_rx_desc_alloc(q, true, vport->rxq_model);
+
+ err = idpf_bufq_desc_alloc(vport, q);
if (err) {
- dev_err(dev, "Memory allocation for Rx Buffer Queue %u failed\n",
+ pci_err(vport->adapter->pdev,
+ "Memory allocation for Rx Buffer Queue %u failed\n",
i);
goto err_out;
}
@@ -802,11 +981,16 @@ err_out:
*/
static void idpf_txq_group_rel(struct idpf_vport *vport)
{
+ bool split, flow_sch_en;
int i, j;
if (!vport->txq_grps)
return;
+ split = idpf_is_queue_model_split(vport->txq_model);
+ flow_sch_en = !idpf_is_cap_ena(vport->adapter, IDPF_OTHER_CAPS,
+ VIRTCHNL2_CAP_SPLITQ_QSCHED);
+
for (i = 0; i < vport->num_txq_grp; i++) {
struct idpf_txq_group *txq_grp = &vport->txq_grps[i];
@@ -814,8 +998,15 @@ static void idpf_txq_group_rel(struct idpf_vport *vport)
kfree(txq_grp->txqs[j]);
txq_grp->txqs[j] = NULL;
}
+
+ if (!split)
+ continue;
+
kfree(txq_grp->complq);
txq_grp->complq = NULL;
+
+ if (flow_sch_en)
+ kfree(txq_grp->stashes);
}
kfree(vport->txq_grps);
vport->txq_grps = NULL;
@@ -919,7 +1110,7 @@ static int idpf_vport_init_fast_path_txqs(struct idpf_vport *vport)
{
int i, j, k = 0;
- vport->txqs = kcalloc(vport->num_txq, sizeof(struct idpf_queue *),
+ vport->txqs = kcalloc(vport->num_txq, sizeof(*vport->txqs),
GFP_KERNEL);
if (!vport->txqs)
@@ -967,17 +1158,11 @@ void idpf_vport_init_num_qs(struct idpf_vport *vport,
/* Adjust number of buffer queues per Rx queue group. */
if (!idpf_is_queue_model_split(vport->rxq_model)) {
vport->num_bufqs_per_qgrp = 0;
- vport->bufq_size[0] = IDPF_RX_BUF_2048;
return;
}
vport->num_bufqs_per_qgrp = IDPF_MAX_BUFQS_PER_RXQ_GRP;
- /* Bufq[0] default buffer size is 4K
- * Bufq[1] default buffer size is 2K
- */
- vport->bufq_size[0] = IDPF_RX_BUF_4096;
- vport->bufq_size[1] = IDPF_RX_BUF_2048;
}
/**
@@ -1137,9 +1322,10 @@ static void idpf_vport_calc_numq_per_grp(struct idpf_vport *vport,
* @q: rx queue for which descids are set
*
*/
-static void idpf_rxq_set_descids(struct idpf_vport *vport, struct idpf_queue *q)
+static void idpf_rxq_set_descids(const struct idpf_vport *vport,
+ struct idpf_rx_queue *q)
{
- if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
+ if (idpf_is_queue_model_split(vport->rxq_model)) {
q->rxdids = VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M;
} else {
if (vport->base_rxd)
@@ -1158,20 +1344,22 @@ static void idpf_rxq_set_descids(struct idpf_vport *vport, struct idpf_queue *q)
*/
static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq)
{
- bool flow_sch_en;
- int err, i;
+ bool split, flow_sch_en;
+ int i;
vport->txq_grps = kcalloc(vport->num_txq_grp,
sizeof(*vport->txq_grps), GFP_KERNEL);
if (!vport->txq_grps)
return -ENOMEM;
+ split = idpf_is_queue_model_split(vport->txq_model);
flow_sch_en = !idpf_is_cap_ena(vport->adapter, IDPF_OTHER_CAPS,
VIRTCHNL2_CAP_SPLITQ_QSCHED);
for (i = 0; i < vport->num_txq_grp; i++) {
struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
struct idpf_adapter *adapter = vport->adapter;
+ struct idpf_txq_stash *stashes;
int j;
tx_qgrp->vport = vport;
@@ -1180,45 +1368,62 @@ static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq)
for (j = 0; j < tx_qgrp->num_txq; j++) {
tx_qgrp->txqs[j] = kzalloc(sizeof(*tx_qgrp->txqs[j]),
GFP_KERNEL);
- if (!tx_qgrp->txqs[j]) {
- err = -ENOMEM;
+ if (!tx_qgrp->txqs[j])
goto err_alloc;
- }
+ }
+
+ if (split && flow_sch_en) {
+ stashes = kcalloc(num_txq, sizeof(*stashes),
+ GFP_KERNEL);
+ if (!stashes)
+ goto err_alloc;
+
+ tx_qgrp->stashes = stashes;
}
for (j = 0; j < tx_qgrp->num_txq; j++) {
- struct idpf_queue *q = tx_qgrp->txqs[j];
+ struct idpf_tx_queue *q = tx_qgrp->txqs[j];
q->dev = &adapter->pdev->dev;
q->desc_count = vport->txq_desc_count;
q->tx_max_bufs = idpf_get_max_tx_bufs(adapter);
q->tx_min_pkt_len = idpf_get_min_tx_pkt_len(adapter);
- q->vport = vport;
+ q->netdev = vport->netdev;
q->txq_grp = tx_qgrp;
- hash_init(q->sched_buf_hash);
- if (flow_sch_en)
- set_bit(__IDPF_Q_FLOW_SCH_EN, q->flags);
+ if (!split) {
+ q->clean_budget = vport->compln_clean_budget;
+ idpf_queue_assign(CRC_EN, q,
+ vport->crc_enable);
+ }
+
+ if (!flow_sch_en)
+ continue;
+
+ if (split) {
+ q->stash = &stashes[j];
+ hash_init(q->stash->sched_buf_hash);
+ }
+
+ idpf_queue_set(FLOW_SCH_EN, q);
}
- if (!idpf_is_queue_model_split(vport->txq_model))
+ if (!split)
continue;
tx_qgrp->complq = kcalloc(IDPF_COMPLQ_PER_GROUP,
sizeof(*tx_qgrp->complq),
GFP_KERNEL);
- if (!tx_qgrp->complq) {
- err = -ENOMEM;
+ if (!tx_qgrp->complq)
goto err_alloc;
- }
- tx_qgrp->complq->dev = &adapter->pdev->dev;
tx_qgrp->complq->desc_count = vport->complq_desc_count;
- tx_qgrp->complq->vport = vport;
tx_qgrp->complq->txq_grp = tx_qgrp;
+ tx_qgrp->complq->netdev = vport->netdev;
+ tx_qgrp->complq->clean_budget = vport->compln_clean_budget;
if (flow_sch_en)
- __set_bit(__IDPF_Q_FLOW_SCH_EN, tx_qgrp->complq->flags);
+ idpf_queue_set(FLOW_SCH_EN, tx_qgrp->complq);
}
return 0;
@@ -1226,7 +1431,7 @@ static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq)
err_alloc:
idpf_txq_group_rel(vport);
- return err;
+ return -ENOMEM;
}
/**
@@ -1238,8 +1443,6 @@ err_alloc:
*/
static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq)
{
- struct idpf_adapter *adapter = vport->adapter;
- struct idpf_queue *q;
int i, k, err = 0;
bool hs;
@@ -1292,21 +1495,13 @@ static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq)
struct idpf_bufq_set *bufq_set =
&rx_qgrp->splitq.bufq_sets[j];
int swq_size = sizeof(struct idpf_sw_queue);
+ struct idpf_buf_queue *q;
q = &rx_qgrp->splitq.bufq_sets[j].bufq;
- q->dev = &adapter->pdev->dev;
q->desc_count = vport->bufq_desc_count[j];
- q->vport = vport;
- q->rxq_grp = rx_qgrp;
- q->idx = j;
- q->rx_buf_size = vport->bufq_size[j];
q->rx_buffer_low_watermark = IDPF_LOW_WATERMARK;
- q->rx_buf_stride = IDPF_RX_BUF_STRIDE;
- if (hs) {
- q->rx_hsplit_en = true;
- q->rx_hbuf_size = IDPF_HDR_BUF_SIZE;
- }
+ idpf_queue_assign(HSPLIT_EN, q, hs);
bufq_set->num_refillqs = num_rxq;
bufq_set->refillqs = kcalloc(num_rxq, swq_size,
@@ -1319,13 +1514,12 @@ static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq)
struct idpf_sw_queue *refillq =
&bufq_set->refillqs[k];
- refillq->dev = &vport->adapter->pdev->dev;
refillq->desc_count =
vport->bufq_desc_count[j];
- set_bit(__IDPF_Q_GEN_CHK, refillq->flags);
- set_bit(__IDPF_RFLQ_GEN_CHK, refillq->flags);
+ idpf_queue_set(GEN_CHK, refillq);
+ idpf_queue_set(RFL_GEN_CHK, refillq);
refillq->ring = kcalloc(refillq->desc_count,
- sizeof(u16),
+ sizeof(*refillq->ring),
GFP_KERNEL);
if (!refillq->ring) {
err = -ENOMEM;
@@ -1336,36 +1530,30 @@ static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq)
skip_splitq_rx_init:
for (j = 0; j < num_rxq; j++) {
+ struct idpf_rx_queue *q;
+
if (!idpf_is_queue_model_split(vport->rxq_model)) {
q = rx_qgrp->singleq.rxqs[j];
goto setup_rxq;
}
q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
- rx_qgrp->splitq.rxq_sets[j]->refillq0 =
+ rx_qgrp->splitq.rxq_sets[j]->refillq[0] =
&rx_qgrp->splitq.bufq_sets[0].refillqs[j];
if (vport->num_bufqs_per_qgrp > IDPF_SINGLE_BUFQ_PER_RXQ_GRP)
- rx_qgrp->splitq.rxq_sets[j]->refillq1 =
+ rx_qgrp->splitq.rxq_sets[j]->refillq[1] =
&rx_qgrp->splitq.bufq_sets[1].refillqs[j];
- if (hs) {
- q->rx_hsplit_en = true;
- q->rx_hbuf_size = IDPF_HDR_BUF_SIZE;
- }
+ idpf_queue_assign(HSPLIT_EN, q, hs);
setup_rxq:
- q->dev = &adapter->pdev->dev;
q->desc_count = vport->rxq_desc_count;
- q->vport = vport;
- q->rxq_grp = rx_qgrp;
+ q->rx_ptype_lkup = vport->rx_ptype_lkup;
+ q->netdev = vport->netdev;
+ q->bufq_sets = rx_qgrp->splitq.bufq_sets;
q->idx = (i * num_rxq) + j;
- /* In splitq mode, RXQ buffer size should be
- * set to that of the first buffer queue
- * associated with this RXQ
- */
- q->rx_buf_size = vport->bufq_size[0];
q->rx_buffer_low_watermark = IDPF_LOW_WATERMARK;
q->rx_max_pkt_size = vport->netdev->mtu +
- IDPF_PACKET_HDR_PAD;
+ LIBETH_RX_LL_LEN;
idpf_rxq_set_descids(vport, q);
}
}
@@ -1445,12 +1633,13 @@ err_out:
* idpf_tx_handle_sw_marker - Handle queue marker packet
* @tx_q: tx queue to handle software marker
*/
-static void idpf_tx_handle_sw_marker(struct idpf_queue *tx_q)
+static void idpf_tx_handle_sw_marker(struct idpf_tx_queue *tx_q)
{
- struct idpf_vport *vport = tx_q->vport;
+ struct idpf_netdev_priv *priv = netdev_priv(tx_q->netdev);
+ struct idpf_vport *vport = priv->vport;
int i;
- clear_bit(__IDPF_Q_SW_MARKER, tx_q->flags);
+ idpf_queue_clear(SW_MARKER, tx_q);
/* Hardware must write marker packets to all queues associated with
* completion queues. So check if all queues received marker packets
*/
@@ -1458,7 +1647,7 @@ static void idpf_tx_handle_sw_marker(struct idpf_queue *tx_q)
/* If we're still waiting on any other TXQ marker completions,
* just return now since we cannot wake up the marker_wq yet.
*/
- if (test_bit(__IDPF_Q_SW_MARKER, vport->txqs[i]->flags))
+ if (idpf_queue_has(SW_MARKER, vport->txqs[i]))
return;
/* Drain complete */
@@ -1474,7 +1663,7 @@ static void idpf_tx_handle_sw_marker(struct idpf_queue *tx_q)
* @cleaned: pointer to stats struct to track cleaned packets/bytes
* @napi_budget: Used to determine if we are in netpoll
*/
-static void idpf_tx_splitq_clean_hdr(struct idpf_queue *tx_q,
+static void idpf_tx_splitq_clean_hdr(struct idpf_tx_queue *tx_q,
struct idpf_tx_buf *tx_buf,
struct idpf_cleaned_stats *cleaned,
int napi_budget)
@@ -1505,7 +1694,8 @@ static void idpf_tx_splitq_clean_hdr(struct idpf_queue *tx_q,
* @cleaned: pointer to stats struct to track cleaned packets/bytes
* @budget: Used to determine if we are in netpoll
*/
-static void idpf_tx_clean_stashed_bufs(struct idpf_queue *txq, u16 compl_tag,
+static void idpf_tx_clean_stashed_bufs(struct idpf_tx_queue *txq,
+ u16 compl_tag,
struct idpf_cleaned_stats *cleaned,
int budget)
{
@@ -1513,7 +1703,7 @@ static void idpf_tx_clean_stashed_bufs(struct idpf_queue *txq, u16 compl_tag,
struct hlist_node *tmp_buf;
/* Buffer completion */
- hash_for_each_possible_safe(txq->sched_buf_hash, stash, tmp_buf,
+ hash_for_each_possible_safe(txq->stash->sched_buf_hash, stash, tmp_buf,
hlist, compl_tag) {
if (unlikely(stash->buf.compl_tag != (int)compl_tag))
continue;
@@ -1530,7 +1720,7 @@ static void idpf_tx_clean_stashed_bufs(struct idpf_queue *txq, u16 compl_tag,
}
/* Push shadow buf back onto stack */
- idpf_buf_lifo_push(&txq->buf_stack, stash);
+ idpf_buf_lifo_push(&txq->stash->buf_stack, stash);
hash_del(&stash->hlist);
}
@@ -1542,7 +1732,7 @@ static void idpf_tx_clean_stashed_bufs(struct idpf_queue *txq, u16 compl_tag,
* @txq: Tx queue to clean
* @tx_buf: buffer to store
*/
-static int idpf_stash_flow_sch_buffers(struct idpf_queue *txq,
+static int idpf_stash_flow_sch_buffers(struct idpf_tx_queue *txq,
struct idpf_tx_buf *tx_buf)
{
struct idpf_tx_stash *stash;
@@ -1551,10 +1741,10 @@ static int idpf_stash_flow_sch_buffers(struct idpf_queue *txq,
!dma_unmap_len(tx_buf, len)))
return 0;
- stash = idpf_buf_lifo_pop(&txq->buf_stack);
+ stash = idpf_buf_lifo_pop(&txq->stash->buf_stack);
if (unlikely(!stash)) {
net_err_ratelimited("%s: No out-of-order TX buffers left!\n",
- txq->vport->netdev->name);
+ netdev_name(txq->netdev));
return -ENOMEM;
}
@@ -1568,7 +1758,8 @@ static int idpf_stash_flow_sch_buffers(struct idpf_queue *txq,
stash->buf.compl_tag = tx_buf->compl_tag;
/* Add buffer to buf_hash table to be freed later */
- hash_add(txq->sched_buf_hash, &stash->hlist, stash->buf.compl_tag);
+ hash_add(txq->stash->sched_buf_hash, &stash->hlist,
+ stash->buf.compl_tag);
memset(tx_buf, 0, sizeof(struct idpf_tx_buf));
@@ -1584,7 +1775,7 @@ do { \
if (unlikely(!(ntc))) { \
ntc -= (txq)->desc_count; \
buf = (txq)->tx_buf; \
- desc = IDPF_FLEX_TX_DESC(txq, 0); \
+ desc = &(txq)->flex_tx[0]; \
} else { \
(buf)++; \
(desc)++; \
@@ -1607,7 +1798,7 @@ do { \
* and the buffers will be cleaned separately. The stats are not updated from
* this function when using flow-based scheduling.
*/
-static void idpf_tx_splitq_clean(struct idpf_queue *tx_q, u16 end,
+static void idpf_tx_splitq_clean(struct idpf_tx_queue *tx_q, u16 end,
int napi_budget,
struct idpf_cleaned_stats *cleaned,
bool descs_only)
@@ -1617,8 +1808,8 @@ static void idpf_tx_splitq_clean(struct idpf_queue *tx_q, u16 end,
s16 ntc = tx_q->next_to_clean;
struct idpf_tx_buf *tx_buf;
- tx_desc = IDPF_FLEX_TX_DESC(tx_q, ntc);
- next_pending_desc = IDPF_FLEX_TX_DESC(tx_q, end);
+ tx_desc = &tx_q->flex_tx[ntc];
+ next_pending_desc = &tx_q->flex_tx[end];
tx_buf = &tx_q->tx_buf[ntc];
ntc -= tx_q->desc_count;
@@ -1703,7 +1894,7 @@ do { \
* stashed. Returns the byte/segment count for the cleaned packet associated
* this completion tag.
*/
-static bool idpf_tx_clean_buf_ring(struct idpf_queue *txq, u16 compl_tag,
+static bool idpf_tx_clean_buf_ring(struct idpf_tx_queue *txq, u16 compl_tag,
struct idpf_cleaned_stats *cleaned,
int budget)
{
@@ -1772,14 +1963,14 @@ static bool idpf_tx_clean_buf_ring(struct idpf_queue *txq, u16 compl_tag,
*
* Returns bytes/packets cleaned
*/
-static void idpf_tx_handle_rs_completion(struct idpf_queue *txq,
+static void idpf_tx_handle_rs_completion(struct idpf_tx_queue *txq,
struct idpf_splitq_tx_compl_desc *desc,
struct idpf_cleaned_stats *cleaned,
int budget)
{
u16 compl_tag;
- if (!test_bit(__IDPF_Q_FLOW_SCH_EN, txq->flags)) {
+ if (!idpf_queue_has(FLOW_SCH_EN, txq)) {
u16 head = le16_to_cpu(desc->q_head_compl_tag.q_head);
return idpf_tx_splitq_clean(txq, head, budget, cleaned, false);
@@ -1802,24 +1993,23 @@ static void idpf_tx_handle_rs_completion(struct idpf_queue *txq,
*
* Returns true if there's any budget left (e.g. the clean is finished)
*/
-static bool idpf_tx_clean_complq(struct idpf_queue *complq, int budget,
+static bool idpf_tx_clean_complq(struct idpf_compl_queue *complq, int budget,
int *cleaned)
{
struct idpf_splitq_tx_compl_desc *tx_desc;
- struct idpf_vport *vport = complq->vport;
s16 ntc = complq->next_to_clean;
struct idpf_netdev_priv *np;
unsigned int complq_budget;
bool complq_ok = true;
int i;
- complq_budget = vport->compln_clean_budget;
- tx_desc = IDPF_SPLITQ_TX_COMPLQ_DESC(complq, ntc);
+ complq_budget = complq->clean_budget;
+ tx_desc = &complq->comp[ntc];
ntc -= complq->desc_count;
do {
struct idpf_cleaned_stats cleaned_stats = { };
- struct idpf_queue *tx_q;
+ struct idpf_tx_queue *tx_q;
int rel_tx_qid;
u16 hw_head;
u8 ctype; /* completion type */
@@ -1828,7 +2018,7 @@ static bool idpf_tx_clean_complq(struct idpf_queue *complq, int budget,
/* if the descriptor isn't done, no work yet to do */
gen = le16_get_bits(tx_desc->qid_comptype_gen,
IDPF_TXD_COMPLQ_GEN_M);
- if (test_bit(__IDPF_Q_GEN_CHK, complq->flags) != gen)
+ if (idpf_queue_has(GEN_CHK, complq) != gen)
break;
/* Find necessary info of TX queue to clean buffers */
@@ -1836,8 +2026,7 @@ static bool idpf_tx_clean_complq(struct idpf_queue *complq, int budget,
IDPF_TXD_COMPLQ_QID_M);
if (rel_tx_qid >= complq->txq_grp->num_txq ||
!complq->txq_grp->txqs[rel_tx_qid]) {
- dev_err(&complq->vport->adapter->pdev->dev,
- "TxQ not found\n");
+ netdev_err(complq->netdev, "TxQ not found\n");
goto fetch_next_desc;
}
tx_q = complq->txq_grp->txqs[rel_tx_qid];
@@ -1860,15 +2049,14 @@ static bool idpf_tx_clean_complq(struct idpf_queue *complq, int budget,
idpf_tx_handle_sw_marker(tx_q);
break;
default:
- dev_err(&tx_q->vport->adapter->pdev->dev,
- "Unknown TX completion type: %d\n",
- ctype);
+ netdev_err(tx_q->netdev,
+ "Unknown TX completion type: %d\n", ctype);
goto fetch_next_desc;
}
u64_stats_update_begin(&tx_q->stats_sync);
- u64_stats_add(&tx_q->q_stats.tx.packets, cleaned_stats.packets);
- u64_stats_add(&tx_q->q_stats.tx.bytes, cleaned_stats.bytes);
+ u64_stats_add(&tx_q->q_stats.packets, cleaned_stats.packets);
+ u64_stats_add(&tx_q->q_stats.bytes, cleaned_stats.bytes);
tx_q->cleaned_pkts += cleaned_stats.packets;
tx_q->cleaned_bytes += cleaned_stats.bytes;
complq->num_completions++;
@@ -1879,8 +2067,8 @@ fetch_next_desc:
ntc++;
if (unlikely(!ntc)) {
ntc -= complq->desc_count;
- tx_desc = IDPF_SPLITQ_TX_COMPLQ_DESC(complq, 0);
- change_bit(__IDPF_Q_GEN_CHK, complq->flags);
+ tx_desc = &complq->comp[0];
+ idpf_queue_change(GEN_CHK, complq);
}
prefetch(tx_desc);
@@ -1896,9 +2084,9 @@ fetch_next_desc:
IDPF_TX_COMPLQ_OVERFLOW_THRESH(complq)))
complq_ok = false;
- np = netdev_priv(complq->vport->netdev);
+ np = netdev_priv(complq->netdev);
for (i = 0; i < complq->txq_grp->num_txq; ++i) {
- struct idpf_queue *tx_q = complq->txq_grp->txqs[i];
+ struct idpf_tx_queue *tx_q = complq->txq_grp->txqs[i];
struct netdev_queue *nq;
bool dont_wake;
@@ -1909,11 +2097,11 @@ fetch_next_desc:
*cleaned += tx_q->cleaned_pkts;
/* Update BQL */
- nq = netdev_get_tx_queue(tx_q->vport->netdev, tx_q->idx);
+ nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
dont_wake = !complq_ok || IDPF_TX_BUF_RSV_LOW(tx_q) ||
np->state != __IDPF_VPORT_UP ||
- !netif_carrier_ok(tx_q->vport->netdev);
+ !netif_carrier_ok(tx_q->netdev);
/* Check if the TXQ needs to and can be restarted */
__netif_txq_completed_wake(nq, tx_q->cleaned_pkts, tx_q->cleaned_bytes,
IDPF_DESC_UNUSED(tx_q), IDPF_TX_WAKE_THRESH,
@@ -1976,7 +2164,7 @@ void idpf_tx_splitq_build_flow_desc(union idpf_tx_flex_desc *desc,
*
* Returns 0 if stop is not needed
*/
-int idpf_tx_maybe_stop_common(struct idpf_queue *tx_q, unsigned int size)
+int idpf_tx_maybe_stop_common(struct idpf_tx_queue *tx_q, unsigned int size)
{
struct netdev_queue *nq;
@@ -1984,10 +2172,10 @@ int idpf_tx_maybe_stop_common(struct idpf_queue *tx_q, unsigned int size)
return 0;
u64_stats_update_begin(&tx_q->stats_sync);
- u64_stats_inc(&tx_q->q_stats.tx.q_busy);
+ u64_stats_inc(&tx_q->q_stats.q_busy);
u64_stats_update_end(&tx_q->stats_sync);
- nq = netdev_get_tx_queue(tx_q->vport->netdev, tx_q->idx);
+ nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
return netif_txq_maybe_stop(nq, IDPF_DESC_UNUSED(tx_q), size, size);
}
@@ -1999,7 +2187,7 @@ int idpf_tx_maybe_stop_common(struct idpf_queue *tx_q, unsigned int size)
*
* Returns 0 if stop is not needed
*/
-static int idpf_tx_maybe_stop_splitq(struct idpf_queue *tx_q,
+static int idpf_tx_maybe_stop_splitq(struct idpf_tx_queue *tx_q,
unsigned int descs_needed)
{
if (idpf_tx_maybe_stop_common(tx_q, descs_needed))
@@ -2023,9 +2211,9 @@ static int idpf_tx_maybe_stop_splitq(struct idpf_queue *tx_q,
splitq_stop:
u64_stats_update_begin(&tx_q->stats_sync);
- u64_stats_inc(&tx_q->q_stats.tx.q_busy);
+ u64_stats_inc(&tx_q->q_stats.q_busy);
u64_stats_update_end(&tx_q->stats_sync);
- netif_stop_subqueue(tx_q->vport->netdev, tx_q->idx);
+ netif_stop_subqueue(tx_q->netdev, tx_q->idx);
return -EBUSY;
}
@@ -2040,12 +2228,12 @@ splitq_stop:
* to do a register write to update our queue status. We know this can only
* mean tail here as HW should be owning head for TX.
*/
-void idpf_tx_buf_hw_update(struct idpf_queue *tx_q, u32 val,
+void idpf_tx_buf_hw_update(struct idpf_tx_queue *tx_q, u32 val,
bool xmit_more)
{
struct netdev_queue *nq;
- nq = netdev_get_tx_queue(tx_q->vport->netdev, tx_q->idx);
+ nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
tx_q->next_to_use = val;
idpf_tx_maybe_stop_common(tx_q, IDPF_TX_DESC_NEEDED);
@@ -2069,7 +2257,7 @@ void idpf_tx_buf_hw_update(struct idpf_queue *tx_q, u32 val,
*
* Returns number of data descriptors needed for this skb.
*/
-unsigned int idpf_tx_desc_count_required(struct idpf_queue *txq,
+unsigned int idpf_tx_desc_count_required(struct idpf_tx_queue *txq,
struct sk_buff *skb)
{
const struct skb_shared_info *shinfo;
@@ -2102,7 +2290,7 @@ unsigned int idpf_tx_desc_count_required(struct idpf_queue *txq,
count = idpf_size_to_txd_count(skb->len);
u64_stats_update_begin(&txq->stats_sync);
- u64_stats_inc(&txq->q_stats.tx.linearize);
+ u64_stats_inc(&txq->q_stats.linearize);
u64_stats_update_end(&txq->stats_sync);
}
@@ -2116,11 +2304,11 @@ unsigned int idpf_tx_desc_count_required(struct idpf_queue *txq,
* @first: original first buffer info buffer for packet
* @idx: starting point on ring to unwind
*/
-void idpf_tx_dma_map_error(struct idpf_queue *txq, struct sk_buff *skb,
+void idpf_tx_dma_map_error(struct idpf_tx_queue *txq, struct sk_buff *skb,
struct idpf_tx_buf *first, u16 idx)
{
u64_stats_update_begin(&txq->stats_sync);
- u64_stats_inc(&txq->q_stats.tx.dma_map_errs);
+ u64_stats_inc(&txq->q_stats.dma_map_errs);
u64_stats_update_end(&txq->stats_sync);
/* clear dma mappings for failed tx_buf map */
@@ -2143,7 +2331,7 @@ void idpf_tx_dma_map_error(struct idpf_queue *txq, struct sk_buff *skb,
* used one additional descriptor for a context
* descriptor. Reset that here.
*/
- tx_desc = IDPF_FLEX_TX_DESC(txq, idx);
+ tx_desc = &txq->flex_tx[idx];
memset(tx_desc, 0, sizeof(struct idpf_flex_tx_ctx_desc));
if (idx == 0)
idx = txq->desc_count;
@@ -2159,7 +2347,7 @@ void idpf_tx_dma_map_error(struct idpf_queue *txq, struct sk_buff *skb,
* @txq: the tx ring to wrap
* @ntu: ring index to bump
*/
-static unsigned int idpf_tx_splitq_bump_ntu(struct idpf_queue *txq, u16 ntu)
+static unsigned int idpf_tx_splitq_bump_ntu(struct idpf_tx_queue *txq, u16 ntu)
{
ntu++;
@@ -2181,7 +2369,7 @@ static unsigned int idpf_tx_splitq_bump_ntu(struct idpf_queue *txq, u16 ntu)
* and gets a physical address for each memory location and programs
* it and the length into the transmit flex descriptor.
*/
-static void idpf_tx_splitq_map(struct idpf_queue *tx_q,
+static void idpf_tx_splitq_map(struct idpf_tx_queue *tx_q,
struct idpf_tx_splitq_params *params,
struct idpf_tx_buf *first)
{
@@ -2202,7 +2390,7 @@ static void idpf_tx_splitq_map(struct idpf_queue *tx_q,
data_len = skb->data_len;
size = skb_headlen(skb);
- tx_desc = IDPF_FLEX_TX_DESC(tx_q, i);
+ tx_desc = &tx_q->flex_tx[i];
dma = dma_map_single(tx_q->dev, skb->data, size, DMA_TO_DEVICE);
@@ -2275,7 +2463,7 @@ static void idpf_tx_splitq_map(struct idpf_queue *tx_q,
i++;
if (i == tx_q->desc_count) {
- tx_desc = IDPF_FLEX_TX_DESC(tx_q, 0);
+ tx_desc = &tx_q->flex_tx[0];
i = 0;
tx_q->compl_tag_cur_gen =
IDPF_TX_ADJ_COMPL_TAG_GEN(tx_q);
@@ -2320,7 +2508,7 @@ static void idpf_tx_splitq_map(struct idpf_queue *tx_q,
i++;
if (i == tx_q->desc_count) {
- tx_desc = IDPF_FLEX_TX_DESC(tx_q, 0);
+ tx_desc = &tx_q->flex_tx[0];
i = 0;
tx_q->compl_tag_cur_gen = IDPF_TX_ADJ_COMPL_TAG_GEN(tx_q);
}
@@ -2348,7 +2536,7 @@ static void idpf_tx_splitq_map(struct idpf_queue *tx_q,
tx_q->txq_grp->num_completions_pending++;
/* record bytecount for BQL */
- nq = netdev_get_tx_queue(tx_q->vport->netdev, tx_q->idx);
+ nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
netdev_tx_sent_queue(nq, first->bytecount);
idpf_tx_buf_hw_update(tx_q, i, netdev_xmit_more());
@@ -2525,8 +2713,8 @@ static bool __idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs)
* E.g.: a packet with 7 fragments can require 9 DMA transactions; 1 for TSO
* header, 1 for segment payload, and then 7 for the fragments.
*/
-bool idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs,
- unsigned int count)
+static bool idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs,
+ unsigned int count)
{
if (likely(count < max_bufs))
return false;
@@ -2544,7 +2732,7 @@ bool idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs,
* ring entry to reflect that this index is a context descriptor
*/
static struct idpf_flex_tx_ctx_desc *
-idpf_tx_splitq_get_ctx_desc(struct idpf_queue *txq)
+idpf_tx_splitq_get_ctx_desc(struct idpf_tx_queue *txq)
{
struct idpf_flex_tx_ctx_desc *desc;
int i = txq->next_to_use;
@@ -2553,7 +2741,7 @@ idpf_tx_splitq_get_ctx_desc(struct idpf_queue *txq)
txq->tx_buf[i].compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG;
/* grab the next descriptor */
- desc = IDPF_FLEX_TX_CTX_DESC(txq, i);
+ desc = &txq->flex_ctx[i];
txq->next_to_use = idpf_tx_splitq_bump_ntu(txq, i);
return desc;
@@ -2564,10 +2752,10 @@ idpf_tx_splitq_get_ctx_desc(struct idpf_queue *txq)
* @tx_q: queue to send buffer on
* @skb: pointer to skb
*/
-netdev_tx_t idpf_tx_drop_skb(struct idpf_queue *tx_q, struct sk_buff *skb)
+netdev_tx_t idpf_tx_drop_skb(struct idpf_tx_queue *tx_q, struct sk_buff *skb)
{
u64_stats_update_begin(&tx_q->stats_sync);
- u64_stats_inc(&tx_q->q_stats.tx.skb_drops);
+ u64_stats_inc(&tx_q->q_stats.skb_drops);
u64_stats_update_end(&tx_q->stats_sync);
idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false);
@@ -2585,7 +2773,7 @@ netdev_tx_t idpf_tx_drop_skb(struct idpf_queue *tx_q, struct sk_buff *skb)
* Returns NETDEV_TX_OK if sent, else an error code
*/
static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb,
- struct idpf_queue *tx_q)
+ struct idpf_tx_queue *tx_q)
{
struct idpf_tx_splitq_params tx_params = { };
struct idpf_tx_buf *first;
@@ -2625,7 +2813,7 @@ static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb,
ctx_desc->tso.qw0.hdr_len = tx_params.offload.tso_hdr_len;
u64_stats_update_begin(&tx_q->stats_sync);
- u64_stats_inc(&tx_q->q_stats.tx.lso_pkts);
+ u64_stats_inc(&tx_q->q_stats.lso_pkts);
u64_stats_update_end(&tx_q->stats_sync);
}
@@ -2642,7 +2830,7 @@ static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb,
first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN);
}
- if (test_bit(__IDPF_Q_FLOW_SCH_EN, tx_q->flags)) {
+ if (idpf_queue_has(FLOW_SCH_EN, tx_q)) {
tx_params.dtype = IDPF_TX_DESC_DTYPE_FLEX_FLOW_SCHE;
tx_params.eop_cmd = IDPF_TXD_FLEX_FLOW_CMD_EOP;
/* Set the RE bit to catch any packets that may have not been
@@ -2672,17 +2860,16 @@ static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb,
}
/**
- * idpf_tx_splitq_start - Selects the right Tx queue to send buffer
+ * idpf_tx_start - Selects the right Tx queue to send buffer
* @skb: send buffer
* @netdev: network interface device structure
*
* Returns NETDEV_TX_OK if sent, else an error code
*/
-netdev_tx_t idpf_tx_splitq_start(struct sk_buff *skb,
- struct net_device *netdev)
+netdev_tx_t idpf_tx_start(struct sk_buff *skb, struct net_device *netdev)
{
struct idpf_vport *vport = idpf_netdev_to_vport(netdev);
- struct idpf_queue *tx_q;
+ struct idpf_tx_queue *tx_q;
if (unlikely(skb_get_queue_mapping(skb) >= vport->num_txq)) {
dev_kfree_skb_any(skb);
@@ -2701,31 +2888,10 @@ netdev_tx_t idpf_tx_splitq_start(struct sk_buff *skb,
return NETDEV_TX_OK;
}
- return idpf_tx_splitq_frame(skb, tx_q);
-}
-
-/**
- * idpf_ptype_to_htype - get a hash type
- * @decoded: Decoded Rx packet type related fields
- *
- * Returns appropriate hash type (such as PKT_HASH_TYPE_L2/L3/L4) to be used by
- * skb_set_hash based on PTYPE as parsed by HW Rx pipeline and is part of
- * Rx desc.
- */
-enum pkt_hash_types idpf_ptype_to_htype(const struct idpf_rx_ptype_decoded *decoded)
-{
- if (!decoded->known)
- return PKT_HASH_TYPE_NONE;
- if (decoded->payload_layer == IDPF_RX_PTYPE_PAYLOAD_LAYER_PAY2 &&
- decoded->inner_prot)
- return PKT_HASH_TYPE_L4;
- if (decoded->payload_layer == IDPF_RX_PTYPE_PAYLOAD_LAYER_PAY2 &&
- decoded->outer_ip)
- return PKT_HASH_TYPE_L3;
- if (decoded->outer_ip == IDPF_RX_PTYPE_OUTER_L2)
- return PKT_HASH_TYPE_L2;
-
- return PKT_HASH_TYPE_NONE;
+ if (idpf_is_queue_model_split(vport->txq_model))
+ return idpf_tx_splitq_frame(skb, tx_q);
+ else
+ return idpf_tx_singleq_frame(skb, tx_q);
}
/**
@@ -2735,20 +2901,21 @@ enum pkt_hash_types idpf_ptype_to_htype(const struct idpf_rx_ptype_decoded *deco
* @rx_desc: Receive descriptor
* @decoded: Decoded Rx packet type related fields
*/
-static void idpf_rx_hash(struct idpf_queue *rxq, struct sk_buff *skb,
- struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc,
- struct idpf_rx_ptype_decoded *decoded)
+static void
+idpf_rx_hash(const struct idpf_rx_queue *rxq, struct sk_buff *skb,
+ const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc,
+ struct libeth_rx_pt decoded)
{
u32 hash;
- if (unlikely(!idpf_is_feature_ena(rxq->vport, NETIF_F_RXHASH)))
+ if (!libeth_rx_pt_has_hash(rxq->netdev, decoded))
return;
hash = le16_to_cpu(rx_desc->hash1) |
(rx_desc->ff2_mirrid_hash2.hash2 << 16) |
(rx_desc->hash3 << 24);
- skb_set_hash(skb, hash, idpf_ptype_to_htype(decoded));
+ libeth_rx_pt_set_hash(skb, hash, decoded);
}
/**
@@ -2760,92 +2927,83 @@ static void idpf_rx_hash(struct idpf_queue *rxq, struct sk_buff *skb,
*
* skb->protocol must be set before this function is called
*/
-static void idpf_rx_csum(struct idpf_queue *rxq, struct sk_buff *skb,
- struct idpf_rx_csum_decoded *csum_bits,
- struct idpf_rx_ptype_decoded *decoded)
+static void idpf_rx_csum(struct idpf_rx_queue *rxq, struct sk_buff *skb,
+ struct idpf_rx_csum_decoded csum_bits,
+ struct libeth_rx_pt decoded)
{
bool ipv4, ipv6;
/* check if Rx checksum is enabled */
- if (unlikely(!idpf_is_feature_ena(rxq->vport, NETIF_F_RXCSUM)))
+ if (!libeth_rx_pt_has_checksum(rxq->netdev, decoded))
return;
/* check if HW has decoded the packet and checksum */
- if (!(csum_bits->l3l4p))
+ if (unlikely(!csum_bits.l3l4p))
return;
- ipv4 = IDPF_RX_PTYPE_TO_IPV(decoded, IDPF_RX_PTYPE_OUTER_IPV4);
- ipv6 = IDPF_RX_PTYPE_TO_IPV(decoded, IDPF_RX_PTYPE_OUTER_IPV6);
+ ipv4 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV4;
+ ipv6 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV6;
- if (ipv4 && (csum_bits->ipe || csum_bits->eipe))
+ if (unlikely(ipv4 && (csum_bits.ipe || csum_bits.eipe)))
goto checksum_fail;
- if (ipv6 && csum_bits->ipv6exadd)
+ if (unlikely(ipv6 && csum_bits.ipv6exadd))
return;
/* check for L4 errors and handle packets that were not able to be
* checksummed
*/
- if (csum_bits->l4e)
+ if (unlikely(csum_bits.l4e))
goto checksum_fail;
- /* Only report checksum unnecessary for ICMP, TCP, UDP, or SCTP */
- switch (decoded->inner_prot) {
- case IDPF_RX_PTYPE_INNER_PROT_ICMP:
- case IDPF_RX_PTYPE_INNER_PROT_TCP:
- case IDPF_RX_PTYPE_INNER_PROT_UDP:
- if (!csum_bits->raw_csum_inv) {
- u16 csum = csum_bits->raw_csum;
-
- skb->csum = csum_unfold((__force __sum16)~swab16(csum));
- skb->ip_summed = CHECKSUM_COMPLETE;
- } else {
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- }
- break;
- case IDPF_RX_PTYPE_INNER_PROT_SCTP:
+ if (csum_bits.raw_csum_inv ||
+ decoded.inner_prot == LIBETH_RX_PT_INNER_SCTP) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
- break;
- default:
- break;
+ return;
}
+ skb->csum = csum_unfold((__force __sum16)~swab16(csum_bits.raw_csum));
+ skb->ip_summed = CHECKSUM_COMPLETE;
+
return;
checksum_fail:
u64_stats_update_begin(&rxq->stats_sync);
- u64_stats_inc(&rxq->q_stats.rx.hw_csum_err);
+ u64_stats_inc(&rxq->q_stats.hw_csum_err);
u64_stats_update_end(&rxq->stats_sync);
}
/**
* idpf_rx_splitq_extract_csum_bits - Extract checksum bits from descriptor
* @rx_desc: receive descriptor
- * @csum: structure to extract checksum fields
*
+ * Return: parsed checksum status.
**/
-static void idpf_rx_splitq_extract_csum_bits(struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc,
- struct idpf_rx_csum_decoded *csum)
+static struct idpf_rx_csum_decoded
+idpf_rx_splitq_extract_csum_bits(const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc)
{
+ struct idpf_rx_csum_decoded csum = { };
u8 qword0, qword1;
qword0 = rx_desc->status_err0_qw0;
qword1 = rx_desc->status_err0_qw1;
- csum->ipe = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_IPE_M,
+ csum.ipe = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_IPE_M,
+ qword1);
+ csum.eipe = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EIPE_M,
qword1);
- csum->eipe = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EIPE_M,
+ csum.l4e = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_L4E_M,
+ qword1);
+ csum.l3l4p = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_L3L4P_M,
qword1);
- csum->l4e = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_L4E_M,
- qword1);
- csum->l3l4p = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_L3L4P_M,
- qword1);
- csum->ipv6exadd = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_IPV6EXADD_M,
- qword0);
- csum->raw_csum_inv =
+ csum.ipv6exadd = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_IPV6EXADD_M,
+ qword0);
+ csum.raw_csum_inv =
le16_get_bits(rx_desc->ptype_err_fflags0,
VIRTCHNL2_RX_FLEX_DESC_ADV_RAW_CSUM_INV_M);
- csum->raw_csum = le16_to_cpu(rx_desc->misc.raw_cs);
+ csum.raw_csum = le16_to_cpu(rx_desc->misc.raw_cs);
+
+ return csum;
}
/**
@@ -2860,23 +3018,24 @@ static void idpf_rx_splitq_extract_csum_bits(struct virtchnl2_rx_flex_desc_adv_n
* Populate the skb fields with the total number of RSC segments, RSC payload
* length and packet type.
*/
-static int idpf_rx_rsc(struct idpf_queue *rxq, struct sk_buff *skb,
- struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc,
- struct idpf_rx_ptype_decoded *decoded)
+static int idpf_rx_rsc(struct idpf_rx_queue *rxq, struct sk_buff *skb,
+ const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc,
+ struct libeth_rx_pt decoded)
{
u16 rsc_segments, rsc_seg_len;
bool ipv4, ipv6;
int len;
- if (unlikely(!decoded->outer_ip))
+ if (unlikely(libeth_rx_pt_get_ip_ver(decoded) ==
+ LIBETH_RX_PT_OUTER_L2))
return -EINVAL;
rsc_seg_len = le16_to_cpu(rx_desc->misc.rscseglen);
if (unlikely(!rsc_seg_len))
return -EINVAL;
- ipv4 = IDPF_RX_PTYPE_TO_IPV(decoded, IDPF_RX_PTYPE_OUTER_IPV4);
- ipv6 = IDPF_RX_PTYPE_TO_IPV(decoded, IDPF_RX_PTYPE_OUTER_IPV6);
+ ipv4 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV4;
+ ipv6 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV6;
if (unlikely(!(ipv4 ^ ipv6)))
return -EINVAL;
@@ -2914,7 +3073,7 @@ static int idpf_rx_rsc(struct idpf_queue *rxq, struct sk_buff *skb,
tcp_gro_complete(skb);
u64_stats_update_begin(&rxq->stats_sync);
- u64_stats_inc(&rxq->q_stats.rx.rsc_pkts);
+ u64_stats_inc(&rxq->q_stats.rsc_pkts);
u64_stats_update_end(&rxq->stats_sync);
return 0;
@@ -2930,35 +3089,31 @@ static int idpf_rx_rsc(struct idpf_queue *rxq, struct sk_buff *skb,
* order to populate the hash, checksum, protocol, and
* other fields within the skb.
*/
-static int idpf_rx_process_skb_fields(struct idpf_queue *rxq,
- struct sk_buff *skb,
- struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc)
+static int
+idpf_rx_process_skb_fields(struct idpf_rx_queue *rxq, struct sk_buff *skb,
+ const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc)
{
- struct idpf_rx_csum_decoded csum_bits = { };
- struct idpf_rx_ptype_decoded decoded;
+ struct idpf_rx_csum_decoded csum_bits;
+ struct libeth_rx_pt decoded;
u16 rx_ptype;
rx_ptype = le16_get_bits(rx_desc->ptype_err_fflags0,
VIRTCHNL2_RX_FLEX_DESC_ADV_PTYPE_M);
-
- skb->protocol = eth_type_trans(skb, rxq->vport->netdev);
-
- decoded = rxq->vport->rx_ptype_lkup[rx_ptype];
- /* If we don't know the ptype we can't do anything else with it. Just
- * pass it up the stack as-is.
- */
- if (!decoded.known)
- return 0;
+ decoded = rxq->rx_ptype_lkup[rx_ptype];
/* process RSS/hash */
- idpf_rx_hash(rxq, skb, rx_desc, &decoded);
+ idpf_rx_hash(rxq, skb, rx_desc, decoded);
+
+ skb->protocol = eth_type_trans(skb, rxq->netdev);
if (le16_get_bits(rx_desc->hdrlen_flags,
VIRTCHNL2_RX_FLEX_DESC_ADV_RSC_M))
- return idpf_rx_rsc(rxq, skb, rx_desc, &decoded);
+ return idpf_rx_rsc(rxq, skb, rx_desc, decoded);
+
+ csum_bits = idpf_rx_splitq_extract_csum_bits(rx_desc);
+ idpf_rx_csum(rxq, skb, csum_bits, decoded);
- idpf_rx_splitq_extract_csum_bits(rx_desc, &csum_bits);
- idpf_rx_csum(rxq, skb, &csum_bits, &decoded);
+ skb_record_rx_queue(skb, rxq->idx);
return 0;
}
@@ -2976,103 +3131,73 @@ static int idpf_rx_process_skb_fields(struct idpf_queue *rxq,
void idpf_rx_add_frag(struct idpf_rx_buf *rx_buf, struct sk_buff *skb,
unsigned int size)
{
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page,
- rx_buf->page_offset, size, rx_buf->truesize);
+ u32 hr = rx_buf->page->pp->p.offset;
- rx_buf->page = NULL;
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page,
+ rx_buf->offset + hr, size, rx_buf->truesize);
}
/**
- * idpf_rx_construct_skb - Allocate skb and populate it
- * @rxq: Rx descriptor queue
- * @rx_buf: Rx buffer to pull data from
- * @size: the length of the packet
+ * idpf_rx_hsplit_wa - handle header buffer overflows and split errors
+ * @hdr: Rx buffer for the headers
+ * @buf: Rx buffer for the payload
+ * @data_len: number of bytes received to the payload buffer
*
- * This function allocates an skb. It then populates it with the page
- * data from the current receive descriptor, taking care to set up the
- * skb correctly.
+ * When a header buffer overflow occurs or the HW was unable do parse the
+ * packet type to perform header split, the whole frame gets placed to the
+ * payload buffer. We can't build a valid skb around a payload buffer when
+ * the header split is active since it doesn't reserve any head- or tailroom.
+ * In that case, copy either the whole frame when it's short or just the
+ * Ethernet header to the header buffer to be able to build an skb and adjust
+ * the data offset in the payload buffer, IOW emulate the header split.
+ *
+ * Return: number of bytes copied to the header buffer.
*/
-struct sk_buff *idpf_rx_construct_skb(struct idpf_queue *rxq,
- struct idpf_rx_buf *rx_buf,
- unsigned int size)
+static u32 idpf_rx_hsplit_wa(const struct libeth_fqe *hdr,
+ struct libeth_fqe *buf, u32 data_len)
{
- unsigned int headlen;
- struct sk_buff *skb;
- void *va;
-
- va = page_address(rx_buf->page) + rx_buf->page_offset;
-
- /* prefetch first cache line of first page */
- net_prefetch(va);
- /* allocate a skb to store the frags */
- skb = napi_alloc_skb(&rxq->q_vector->napi, IDPF_RX_HDR_SIZE);
- if (unlikely(!skb)) {
- idpf_rx_put_page(rx_buf);
-
- return NULL;
- }
-
- skb_record_rx_queue(skb, rxq->idx);
- skb_mark_for_recycle(skb);
-
- /* Determine available headroom for copy */
- headlen = size;
- if (headlen > IDPF_RX_HDR_SIZE)
- headlen = eth_get_headlen(skb->dev, va, IDPF_RX_HDR_SIZE);
-
- /* align pull length to size of long to optimize memcpy performance */
- memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
-
- /* if we exhaust the linear part then add what is left as a frag */
- size -= headlen;
- if (!size) {
- idpf_rx_put_page(rx_buf);
+ u32 copy = data_len <= L1_CACHE_BYTES ? data_len : ETH_HLEN;
+ const void *src;
+ void *dst;
- return skb;
- }
+ if (!libeth_rx_sync_for_cpu(buf, copy))
+ return 0;
- skb_add_rx_frag(skb, 0, rx_buf->page, rx_buf->page_offset + headlen,
- size, rx_buf->truesize);
+ dst = page_address(hdr->page) + hdr->offset + hdr->page->pp->p.offset;
+ src = page_address(buf->page) + buf->offset + buf->page->pp->p.offset;
+ memcpy(dst, src, LARGEST_ALIGN(copy));
- /* Since we're giving the page to the stack, clear our reference to it.
- * We'll get a new one during buffer posting.
- */
- rx_buf->page = NULL;
+ buf->offset += copy;
- return skb;
+ return copy;
}
/**
- * idpf_rx_hdr_construct_skb - Allocate skb and populate it from header buffer
- * @rxq: Rx descriptor queue
- * @va: Rx buffer to pull data from
+ * idpf_rx_build_skb - Allocate skb and populate it from header buffer
+ * @buf: Rx buffer to pull data from
* @size: the length of the packet
*
* This function allocates an skb. It then populates it with the page data from
* the current receive descriptor, taking care to set up the skb correctly.
- * This specifically uses a header buffer to start building the skb.
*/
-static struct sk_buff *idpf_rx_hdr_construct_skb(struct idpf_queue *rxq,
- const void *va,
- unsigned int size)
+struct sk_buff *idpf_rx_build_skb(const struct libeth_fqe *buf, u32 size)
{
+ u32 hr = buf->page->pp->p.offset;
struct sk_buff *skb;
+ void *va;
+
+ va = page_address(buf->page) + buf->offset;
+ prefetch(va + hr);
- /* allocate a skb to store the frags */
- skb = napi_alloc_skb(&rxq->q_vector->napi, size);
+ skb = napi_build_skb(va, buf->truesize);
if (unlikely(!skb))
return NULL;
- skb_record_rx_queue(skb, rxq->idx);
-
- memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
-
- /* More than likely, a payload fragment, which will use a page from
- * page_pool will be added to the SKB so mark it for recycle
- * preemptively. And if not, it's inconsequential.
- */
skb_mark_for_recycle(skb);
+ skb_reserve(skb, hr);
+ __skb_put(skb, size);
+
return skb;
}
@@ -3115,31 +3240,27 @@ static bool idpf_rx_splitq_is_eop(struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_de
*
* Returns amount of work completed
*/
-static int idpf_rx_splitq_clean(struct idpf_queue *rxq, int budget)
+static int idpf_rx_splitq_clean(struct idpf_rx_queue *rxq, int budget)
{
int total_rx_bytes = 0, total_rx_pkts = 0;
- struct idpf_queue *rx_bufq = NULL;
+ struct idpf_buf_queue *rx_bufq = NULL;
struct sk_buff *skb = rxq->skb;
u16 ntc = rxq->next_to_clean;
/* Process Rx packets bounded by budget */
while (likely(total_rx_pkts < budget)) {
struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc;
+ struct libeth_fqe *hdr, *rx_buf = NULL;
struct idpf_sw_queue *refillq = NULL;
struct idpf_rxq_set *rxq_set = NULL;
- struct idpf_rx_buf *rx_buf = NULL;
- union virtchnl2_rx_desc *desc;
unsigned int pkt_len = 0;
unsigned int hdr_len = 0;
u16 gen_id, buf_id = 0;
- /* Header buffer overflow only valid for header split */
- bool hbo = false;
int bufq_id;
u8 rxdid;
/* get the Rx desc from Rx queue based on 'next_to_clean' */
- desc = IDPF_RX_DESC(rxq, ntc);
- rx_desc = (struct virtchnl2_rx_flex_desc_adv_nic_3 *)desc;
+ rx_desc = &rxq->rx[ntc].flex_adv_nic_3_wb;
/* This memory barrier is needed to keep us from reading
* any other fields out of the rx_desc
@@ -3150,7 +3271,7 @@ static int idpf_rx_splitq_clean(struct idpf_queue *rxq, int budget)
gen_id = le16_get_bits(rx_desc->pktlen_gen_bufq_id,
VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_M);
- if (test_bit(__IDPF_Q_GEN_CHK, rxq->flags) != gen_id)
+ if (idpf_queue_has(GEN_CHK, rxq) != gen_id)
break;
rxdid = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_RXDID_M,
@@ -3158,7 +3279,7 @@ static int idpf_rx_splitq_clean(struct idpf_queue *rxq, int budget)
if (rxdid != VIRTCHNL2_RXDID_2_FLEX_SPLITQ) {
IDPF_RX_BUMP_NTC(rxq, ntc);
u64_stats_update_begin(&rxq->stats_sync);
- u64_stats_inc(&rxq->q_stats.rx.bad_descs);
+ u64_stats_inc(&rxq->q_stats.bad_descs);
u64_stats_update_end(&rxq->stats_sync);
continue;
}
@@ -3166,71 +3287,79 @@ static int idpf_rx_splitq_clean(struct idpf_queue *rxq, int budget)
pkt_len = le16_get_bits(rx_desc->pktlen_gen_bufq_id,
VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_PBUF_M);
- hbo = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_HBO_M,
- rx_desc->status_err0_qw1);
-
- if (unlikely(hbo)) {
- /* If a header buffer overflow, occurs, i.e. header is
- * too large to fit in the header split buffer, HW will
- * put the entire packet, including headers, in the
- * data/payload buffer.
- */
- u64_stats_update_begin(&rxq->stats_sync);
- u64_stats_inc(&rxq->q_stats.rx.hsplit_buf_ovf);
- u64_stats_update_end(&rxq->stats_sync);
- goto bypass_hsplit;
- }
-
- hdr_len = le16_get_bits(rx_desc->hdrlen_flags,
- VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_HDR_M);
-
-bypass_hsplit:
bufq_id = le16_get_bits(rx_desc->pktlen_gen_bufq_id,
VIRTCHNL2_RX_FLEX_DESC_ADV_BUFQ_ID_M);
rxq_set = container_of(rxq, struct idpf_rxq_set, rxq);
- if (!bufq_id)
- refillq = rxq_set->refillq0;
- else
- refillq = rxq_set->refillq1;
+ refillq = rxq_set->refillq[bufq_id];
/* retrieve buffer from the rxq */
- rx_bufq = &rxq->rxq_grp->splitq.bufq_sets[bufq_id].bufq;
+ rx_bufq = &rxq->bufq_sets[bufq_id].bufq;
buf_id = le16_to_cpu(rx_desc->buf_id);
- rx_buf = &rx_bufq->rx_buf.buf[buf_id];
+ rx_buf = &rx_bufq->buf[buf_id];
- if (hdr_len) {
- const void *va = (u8 *)rx_bufq->rx_buf.hdr_buf_va +
- (u32)buf_id * IDPF_HDR_BUF_SIZE;
+ if (!rx_bufq->hdr_pp)
+ goto payload;
+
+#define __HBO_BIT VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_HBO_M
+#define __HDR_LEN_MASK VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_HDR_M
+ if (likely(!(rx_desc->status_err0_qw1 & __HBO_BIT)))
+ /* If a header buffer overflow, occurs, i.e. header is
+ * too large to fit in the header split buffer, HW will
+ * put the entire packet, including headers, in the
+ * data/payload buffer.
+ */
+ hdr_len = le16_get_bits(rx_desc->hdrlen_flags,
+ __HDR_LEN_MASK);
+#undef __HDR_LEN_MASK
+#undef __HBO_BIT
+
+ hdr = &rx_bufq->hdr_buf[buf_id];
+
+ if (unlikely(!hdr_len && !skb)) {
+ hdr_len = idpf_rx_hsplit_wa(hdr, rx_buf, pkt_len);
+ pkt_len -= hdr_len;
- skb = idpf_rx_hdr_construct_skb(rxq, va, hdr_len);
u64_stats_update_begin(&rxq->stats_sync);
- u64_stats_inc(&rxq->q_stats.rx.hsplit_pkts);
+ u64_stats_inc(&rxq->q_stats.hsplit_buf_ovf);
u64_stats_update_end(&rxq->stats_sync);
}
- if (pkt_len) {
- idpf_rx_sync_for_cpu(rx_buf, pkt_len);
- if (skb)
- idpf_rx_add_frag(rx_buf, skb, pkt_len);
- else
- skb = idpf_rx_construct_skb(rxq, rx_buf,
- pkt_len);
- } else {
- idpf_rx_put_page(rx_buf);
+ if (libeth_rx_sync_for_cpu(hdr, hdr_len)) {
+ skb = idpf_rx_build_skb(hdr, hdr_len);
+ if (!skb)
+ break;
+
+ u64_stats_update_begin(&rxq->stats_sync);
+ u64_stats_inc(&rxq->q_stats.hsplit_pkts);
+ u64_stats_update_end(&rxq->stats_sync);
}
+ hdr->page = NULL;
+
+payload:
+ if (!libeth_rx_sync_for_cpu(rx_buf, pkt_len))
+ goto skip_data;
+
+ if (skb)
+ idpf_rx_add_frag(rx_buf, skb, pkt_len);
+ else
+ skb = idpf_rx_build_skb(rx_buf, pkt_len);
+
/* exit if we failed to retrieve a buffer */
if (!skb)
break;
- idpf_rx_post_buf_refill(refillq, buf_id);
+skip_data:
+ rx_buf->page = NULL;
+ idpf_rx_post_buf_refill(refillq, buf_id);
IDPF_RX_BUMP_NTC(rxq, ntc);
+
/* skip if it is non EOP desc */
- if (!idpf_rx_splitq_is_eop(rx_desc))
+ if (!idpf_rx_splitq_is_eop(rx_desc) || unlikely(!skb))
continue;
/* pad skb if needed (to make valid ethernet frame) */
@@ -3250,7 +3379,7 @@ bypass_hsplit:
}
/* send completed skb up the stack */
- napi_gro_receive(&rxq->q_vector->napi, skb);
+ napi_gro_receive(rxq->napi, skb);
skb = NULL;
/* update budget accounting */
@@ -3261,8 +3390,8 @@ bypass_hsplit:
rxq->skb = skb;
u64_stats_update_begin(&rxq->stats_sync);
- u64_stats_add(&rxq->q_stats.rx.packets, total_rx_pkts);
- u64_stats_add(&rxq->q_stats.rx.bytes, total_rx_bytes);
+ u64_stats_add(&rxq->q_stats.packets, total_rx_pkts);
+ u64_stats_add(&rxq->q_stats.bytes, total_rx_bytes);
u64_stats_update_end(&rxq->stats_sync);
/* guarantee a trip back through this routine if there was a failure */
@@ -3272,34 +3401,41 @@ bypass_hsplit:
/**
* idpf_rx_update_bufq_desc - Update buffer queue descriptor
* @bufq: Pointer to the buffer queue
- * @refill_desc: SW Refill queue descriptor containing buffer ID
+ * @buf_id: buffer ID
* @buf_desc: Buffer queue descriptor
*
* Return 0 on success and negative on failure.
*/
-static int idpf_rx_update_bufq_desc(struct idpf_queue *bufq, u16 refill_desc,
+static int idpf_rx_update_bufq_desc(struct idpf_buf_queue *bufq, u32 buf_id,
struct virtchnl2_splitq_rx_buf_desc *buf_desc)
{
- struct idpf_rx_buf *buf;
+ struct libeth_fq_fp fq = {
+ .pp = bufq->pp,
+ .fqes = bufq->buf,
+ .truesize = bufq->truesize,
+ .count = bufq->desc_count,
+ };
dma_addr_t addr;
- u16 buf_id;
-
- buf_id = FIELD_GET(IDPF_RX_BI_BUFID_M, refill_desc);
- buf = &bufq->rx_buf.buf[buf_id];
-
- addr = idpf_alloc_page(bufq->pp, buf, bufq->rx_buf_size);
- if (unlikely(addr == DMA_MAPPING_ERROR))
+ addr = libeth_rx_alloc(&fq, buf_id);
+ if (addr == DMA_MAPPING_ERROR)
return -ENOMEM;
buf_desc->pkt_addr = cpu_to_le64(addr);
buf_desc->qword0.buf_id = cpu_to_le16(buf_id);
- if (!bufq->rx_hsplit_en)
+ if (!idpf_queue_has(HSPLIT_EN, bufq))
return 0;
- buf_desc->hdr_addr = cpu_to_le64(bufq->rx_buf.hdr_buf_pa +
- (u32)buf_id * IDPF_HDR_BUF_SIZE);
+ fq.pp = bufq->hdr_pp;
+ fq.fqes = bufq->hdr_buf;
+ fq.truesize = bufq->hdr_truesize;
+
+ addr = libeth_rx_alloc(&fq, buf_id);
+ if (addr == DMA_MAPPING_ERROR)
+ return -ENOMEM;
+
+ buf_desc->hdr_addr = cpu_to_le64(addr);
return 0;
}
@@ -3311,38 +3447,37 @@ static int idpf_rx_update_bufq_desc(struct idpf_queue *bufq, u16 refill_desc,
*
* This function takes care of the buffer refill management
*/
-static void idpf_rx_clean_refillq(struct idpf_queue *bufq,
+static void idpf_rx_clean_refillq(struct idpf_buf_queue *bufq,
struct idpf_sw_queue *refillq)
{
struct virtchnl2_splitq_rx_buf_desc *buf_desc;
u16 bufq_nta = bufq->next_to_alloc;
u16 ntc = refillq->next_to_clean;
int cleaned = 0;
- u16 gen;
- buf_desc = IDPF_SPLITQ_RX_BUF_DESC(bufq, bufq_nta);
+ buf_desc = &bufq->split_buf[bufq_nta];
/* make sure we stop at ring wrap in the unlikely case ring is full */
while (likely(cleaned < refillq->desc_count)) {
- u16 refill_desc = IDPF_SPLITQ_RX_BI_DESC(refillq, ntc);
+ u32 buf_id, refill_desc = refillq->ring[ntc];
bool failure;
- gen = FIELD_GET(IDPF_RX_BI_GEN_M, refill_desc);
- if (test_bit(__IDPF_RFLQ_GEN_CHK, refillq->flags) != gen)
+ if (idpf_queue_has(RFL_GEN_CHK, refillq) !=
+ !!(refill_desc & IDPF_RX_BI_GEN_M))
break;
- failure = idpf_rx_update_bufq_desc(bufq, refill_desc,
- buf_desc);
+ buf_id = FIELD_GET(IDPF_RX_BI_BUFID_M, refill_desc);
+ failure = idpf_rx_update_bufq_desc(bufq, buf_id, buf_desc);
if (failure)
break;
if (unlikely(++ntc == refillq->desc_count)) {
- change_bit(__IDPF_RFLQ_GEN_CHK, refillq->flags);
+ idpf_queue_change(RFL_GEN_CHK, refillq);
ntc = 0;
}
if (unlikely(++bufq_nta == bufq->desc_count)) {
- buf_desc = IDPF_SPLITQ_RX_BUF_DESC(bufq, 0);
+ buf_desc = &bufq->split_buf[0];
bufq_nta = 0;
} else {
buf_desc++;
@@ -3371,16 +3506,21 @@ static void idpf_rx_clean_refillq(struct idpf_queue *bufq,
/**
* idpf_rx_clean_refillq_all - Clean all refill queues
* @bufq: buffer queue with refill queues
+ * @nid: ID of the closest NUMA node with memory
*
* Iterates through all refill queues assigned to the buffer queue assigned to
* this vector. Returns true if clean is complete within budget, false
* otherwise.
*/
-static void idpf_rx_clean_refillq_all(struct idpf_queue *bufq)
+static void idpf_rx_clean_refillq_all(struct idpf_buf_queue *bufq, int nid)
{
struct idpf_bufq_set *bufq_set;
int i;
+ page_pool_nid_changed(bufq->pp, nid);
+ if (bufq->hdr_pp)
+ page_pool_nid_changed(bufq->hdr_pp, nid);
+
bufq_set = container_of(bufq, struct idpf_bufq_set, bufq);
for (i = 0; i < bufq_set->num_refillqs; i++)
idpf_rx_clean_refillq(bufq, &bufq_set->refillqs[i]);
@@ -3441,12 +3581,16 @@ void idpf_vport_intr_rel(struct idpf_vport *vport)
for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) {
struct idpf_q_vector *q_vector = &vport->q_vectors[v_idx];
+ kfree(q_vector->complq);
+ q_vector->complq = NULL;
kfree(q_vector->bufq);
q_vector->bufq = NULL;
kfree(q_vector->tx);
q_vector->tx = NULL;
kfree(q_vector->rx);
q_vector->rx = NULL;
+
+ free_cpumask_var(q_vector->affinity_mask);
}
/* Clean up the mapping of queues to vectors */
@@ -3495,7 +3639,7 @@ static void idpf_vport_intr_rel_irq(struct idpf_vport *vport)
/* clear the affinity_mask in the IRQ descriptor */
irq_set_affinity_hint(irq_num, NULL);
- free_irq(irq_num, q_vector);
+ kfree(free_irq(irq_num, q_vector));
}
}
@@ -3579,13 +3723,13 @@ static void idpf_net_dim(struct idpf_q_vector *q_vector)
goto check_rx_itr;
for (i = 0, packets = 0, bytes = 0; i < q_vector->num_txq; i++) {
- struct idpf_queue *txq = q_vector->tx[i];
+ struct idpf_tx_queue *txq = q_vector->tx[i];
unsigned int start;
do {
start = u64_stats_fetch_begin(&txq->stats_sync);
- packets += u64_stats_read(&txq->q_stats.tx.packets);
- bytes += u64_stats_read(&txq->q_stats.tx.bytes);
+ packets += u64_stats_read(&txq->q_stats.packets);
+ bytes += u64_stats_read(&txq->q_stats.bytes);
} while (u64_stats_fetch_retry(&txq->stats_sync, start));
}
@@ -3598,13 +3742,13 @@ check_rx_itr:
return;
for (i = 0, packets = 0, bytes = 0; i < q_vector->num_rxq; i++) {
- struct idpf_queue *rxq = q_vector->rx[i];
+ struct idpf_rx_queue *rxq = q_vector->rx[i];
unsigned int start;
do {
start = u64_stats_fetch_begin(&rxq->stats_sync);
- packets += u64_stats_read(&rxq->q_stats.rx.packets);
- bytes += u64_stats_read(&rxq->q_stats.rx.bytes);
+ packets += u64_stats_read(&rxq->q_stats.packets);
+ bytes += u64_stats_read(&rxq->q_stats.bytes);
} while (u64_stats_fetch_retry(&rxq->stats_sync, start));
}
@@ -3646,6 +3790,7 @@ static int idpf_vport_intr_req_irq(struct idpf_vport *vport, char *basename)
for (vector = 0; vector < vport->num_q_vectors; vector++) {
struct idpf_q_vector *q_vector = &vport->q_vectors[vector];
+ char *name;
vidx = vport->q_vector_idxs[vector];
irq_num = adapter->msix_entries[vidx].vector;
@@ -3659,18 +3804,18 @@ static int idpf_vport_intr_req_irq(struct idpf_vport *vport, char *basename)
else
continue;
- q_vector->name = kasprintf(GFP_KERNEL, "%s-%s-%d",
- basename, vec_name, vidx);
+ name = kasprintf(GFP_KERNEL, "%s-%s-%d", basename, vec_name,
+ vidx);
err = request_irq(irq_num, idpf_vport_intr_clean_queues, 0,
- q_vector->name, q_vector);
+ name, q_vector);
if (err) {
netdev_err(vport->netdev,
"Request_irq failed, error: %d\n", err);
goto free_q_irqs;
}
/* assign the mask for this irq */
- irq_set_affinity_hint(irq_num, &q_vector->affinity_mask);
+ irq_set_affinity_hint(irq_num, q_vector->affinity_mask);
}
return 0;
@@ -3679,7 +3824,7 @@ free_q_irqs:
while (--vector >= 0) {
vidx = vport->q_vector_idxs[vector];
irq_num = adapter->msix_entries[vidx].vector;
- free_irq(irq_num, &vport->q_vectors[vector]);
+ kfree(free_irq(irq_num, &vport->q_vectors[vector]));
}
return err;
@@ -3746,9 +3891,9 @@ static void idpf_vport_intr_ena_irq_all(struct idpf_vport *vport)
*/
void idpf_vport_intr_deinit(struct idpf_vport *vport)
{
+ idpf_vport_intr_dis_irq_all(vport);
idpf_vport_intr_napi_dis_all(vport);
idpf_vport_intr_napi_del_all(vport);
- idpf_vport_intr_dis_irq_all(vport);
idpf_vport_intr_rel_irq(vport);
}
@@ -3846,16 +3991,17 @@ static void idpf_vport_intr_napi_ena_all(struct idpf_vport *vport)
static bool idpf_tx_splitq_clean_all(struct idpf_q_vector *q_vec,
int budget, int *cleaned)
{
- u16 num_txq = q_vec->num_txq;
+ u16 num_complq = q_vec->num_complq;
bool clean_complete = true;
int i, budget_per_q;
- if (unlikely(!num_txq))
+ if (unlikely(!num_complq))
return true;
- budget_per_q = DIV_ROUND_UP(budget, num_txq);
- for (i = 0; i < num_txq; i++)
- clean_complete &= idpf_tx_clean_complq(q_vec->tx[i],
+ budget_per_q = DIV_ROUND_UP(budget, num_complq);
+
+ for (i = 0; i < num_complq; i++)
+ clean_complete &= idpf_tx_clean_complq(q_vec->complq[i],
budget_per_q, cleaned);
return clean_complete;
@@ -3876,13 +4022,14 @@ static bool idpf_rx_splitq_clean_all(struct idpf_q_vector *q_vec, int budget,
bool clean_complete = true;
int pkts_cleaned = 0;
int i, budget_per_q;
+ int nid;
/* We attempt to distribute budget to each Rx queue fairly, but don't
* allow the budget to go below 1 because that would exit polling early.
*/
budget_per_q = num_rxq ? max(budget / num_rxq, 1) : 0;
for (i = 0; i < num_rxq; i++) {
- struct idpf_queue *rxq = q_vec->rx[i];
+ struct idpf_rx_queue *rxq = q_vec->rx[i];
int pkts_cleaned_per_q;
pkts_cleaned_per_q = idpf_rx_splitq_clean(rxq, budget_per_q);
@@ -3893,8 +4040,10 @@ static bool idpf_rx_splitq_clean_all(struct idpf_q_vector *q_vec, int budget,
}
*cleaned = pkts_cleaned;
+ nid = numa_mem_id();
+
for (i = 0; i < q_vec->num_bufq; i++)
- idpf_rx_clean_refillq_all(q_vec->bufq[i]);
+ idpf_rx_clean_refillq_all(q_vec->bufq[i], nid);
return clean_complete;
}
@@ -3937,8 +4086,8 @@ static int idpf_vport_splitq_napi_poll(struct napi_struct *napi, int budget)
* queues virtchnl message, as the interrupts will be disabled after
* that
*/
- if (unlikely(q_vector->num_txq && test_bit(__IDPF_Q_POLL_MODE,
- q_vector->tx[0]->flags)))
+ if (unlikely(q_vector->num_txq && idpf_queue_has(POLL_MODE,
+ q_vector->tx[0])))
return budget;
else
return work_done;
@@ -3952,27 +4101,28 @@ static int idpf_vport_splitq_napi_poll(struct napi_struct *napi, int budget)
*/
static void idpf_vport_intr_map_vector_to_qs(struct idpf_vport *vport)
{
+ bool split = idpf_is_queue_model_split(vport->rxq_model);
u16 num_txq_grp = vport->num_txq_grp;
- int i, j, qv_idx, bufq_vidx = 0;
struct idpf_rxq_group *rx_qgrp;
struct idpf_txq_group *tx_qgrp;
- struct idpf_queue *q, *bufq;
- u16 q_index;
+ u32 i, qv_idx, q_index;
for (i = 0, qv_idx = 0; i < vport->num_rxq_grp; i++) {
u16 num_rxq;
+ if (qv_idx >= vport->num_q_vectors)
+ qv_idx = 0;
+
rx_qgrp = &vport->rxq_grps[i];
- if (idpf_is_queue_model_split(vport->rxq_model))
+ if (split)
num_rxq = rx_qgrp->splitq.num_rxq_sets;
else
num_rxq = rx_qgrp->singleq.num_rxq;
- for (j = 0; j < num_rxq; j++) {
- if (qv_idx >= vport->num_q_vectors)
- qv_idx = 0;
+ for (u32 j = 0; j < num_rxq; j++) {
+ struct idpf_rx_queue *q;
- if (idpf_is_queue_model_split(vport->rxq_model))
+ if (split)
q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
else
q = rx_qgrp->singleq.rxqs[j];
@@ -3980,52 +4130,53 @@ static void idpf_vport_intr_map_vector_to_qs(struct idpf_vport *vport)
q_index = q->q_vector->num_rxq;
q->q_vector->rx[q_index] = q;
q->q_vector->num_rxq++;
- qv_idx++;
+
+ if (split)
+ q->napi = &q->q_vector->napi;
}
- if (idpf_is_queue_model_split(vport->rxq_model)) {
- for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
+ if (split) {
+ for (u32 j = 0; j < vport->num_bufqs_per_qgrp; j++) {
+ struct idpf_buf_queue *bufq;
+
bufq = &rx_qgrp->splitq.bufq_sets[j].bufq;
- bufq->q_vector = &vport->q_vectors[bufq_vidx];
+ bufq->q_vector = &vport->q_vectors[qv_idx];
q_index = bufq->q_vector->num_bufq;
bufq->q_vector->bufq[q_index] = bufq;
bufq->q_vector->num_bufq++;
}
- if (++bufq_vidx >= vport->num_q_vectors)
- bufq_vidx = 0;
}
+
+ qv_idx++;
}
+ split = idpf_is_queue_model_split(vport->txq_model);
+
for (i = 0, qv_idx = 0; i < num_txq_grp; i++) {
u16 num_txq;
+ if (qv_idx >= vport->num_q_vectors)
+ qv_idx = 0;
+
tx_qgrp = &vport->txq_grps[i];
num_txq = tx_qgrp->num_txq;
- if (idpf_is_queue_model_split(vport->txq_model)) {
- if (qv_idx >= vport->num_q_vectors)
- qv_idx = 0;
+ for (u32 j = 0; j < num_txq; j++) {
+ struct idpf_tx_queue *q;
- q = tx_qgrp->complq;
+ q = tx_qgrp->txqs[j];
q->q_vector = &vport->q_vectors[qv_idx];
- q_index = q->q_vector->num_txq;
- q->q_vector->tx[q_index] = q;
- q->q_vector->num_txq++;
- qv_idx++;
- } else {
- for (j = 0; j < num_txq; j++) {
- if (qv_idx >= vport->num_q_vectors)
- qv_idx = 0;
+ q->q_vector->tx[q->q_vector->num_txq++] = q;
+ }
- q = tx_qgrp->txqs[j];
- q->q_vector = &vport->q_vectors[qv_idx];
- q_index = q->q_vector->num_txq;
- q->q_vector->tx[q_index] = q;
- q->q_vector->num_txq++;
+ if (split) {
+ struct idpf_compl_queue *q = tx_qgrp->complq;
- qv_idx++;
- }
+ q->q_vector = &vport->q_vectors[qv_idx];
+ q->q_vector->complq[q->q_vector->num_complq++] = q;
}
+
+ qv_idx++;
}
}
@@ -4086,7 +4237,7 @@ static void idpf_vport_intr_napi_add_all(struct idpf_vport *vport)
/* only set affinity_mask if the CPU is online */
if (cpu_online(v_idx))
- cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
+ cpumask_set_cpu(v_idx, q_vector->affinity_mask);
}
}
@@ -4101,18 +4252,22 @@ int idpf_vport_intr_alloc(struct idpf_vport *vport)
{
u16 txqs_per_vector, rxqs_per_vector, bufqs_per_vector;
struct idpf_q_vector *q_vector;
- int v_idx, err;
+ u32 complqs_per_vector, v_idx;
vport->q_vectors = kcalloc(vport->num_q_vectors,
sizeof(struct idpf_q_vector), GFP_KERNEL);
if (!vport->q_vectors)
return -ENOMEM;
- txqs_per_vector = DIV_ROUND_UP(vport->num_txq, vport->num_q_vectors);
- rxqs_per_vector = DIV_ROUND_UP(vport->num_rxq, vport->num_q_vectors);
+ txqs_per_vector = DIV_ROUND_UP(vport->num_txq_grp,
+ vport->num_q_vectors);
+ rxqs_per_vector = DIV_ROUND_UP(vport->num_rxq_grp,
+ vport->num_q_vectors);
bufqs_per_vector = vport->num_bufqs_per_qgrp *
DIV_ROUND_UP(vport->num_rxq_grp,
vport->num_q_vectors);
+ complqs_per_vector = DIV_ROUND_UP(vport->num_txq_grp,
+ vport->num_q_vectors);
for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) {
q_vector = &vport->q_vectors[v_idx];
@@ -4126,32 +4281,33 @@ int idpf_vport_intr_alloc(struct idpf_vport *vport)
q_vector->rx_intr_mode = IDPF_ITR_DYNAMIC;
q_vector->rx_itr_idx = VIRTCHNL2_ITR_IDX_0;
- q_vector->tx = kcalloc(txqs_per_vector,
- sizeof(struct idpf_queue *),
+ if (!zalloc_cpumask_var(&q_vector->affinity_mask, GFP_KERNEL))
+ goto error;
+
+ q_vector->tx = kcalloc(txqs_per_vector, sizeof(*q_vector->tx),
GFP_KERNEL);
- if (!q_vector->tx) {
- err = -ENOMEM;
+ if (!q_vector->tx)
goto error;
- }
- q_vector->rx = kcalloc(rxqs_per_vector,
- sizeof(struct idpf_queue *),
+ q_vector->rx = kcalloc(rxqs_per_vector, sizeof(*q_vector->rx),
GFP_KERNEL);
- if (!q_vector->rx) {
- err = -ENOMEM;
+ if (!q_vector->rx)
goto error;
- }
if (!idpf_is_queue_model_split(vport->rxq_model))
continue;
q_vector->bufq = kcalloc(bufqs_per_vector,
- sizeof(struct idpf_queue *),
+ sizeof(*q_vector->bufq),
GFP_KERNEL);
- if (!q_vector->bufq) {
- err = -ENOMEM;
+ if (!q_vector->bufq)
+ goto error;
+
+ q_vector->complq = kcalloc(complqs_per_vector,
+ sizeof(*q_vector->complq),
+ GFP_KERNEL);
+ if (!q_vector->complq)
goto error;
- }
}
return 0;
@@ -4159,7 +4315,7 @@ int idpf_vport_intr_alloc(struct idpf_vport *vport)
error:
idpf_vport_intr_rel(vport);
- return err;
+ return -ENOMEM;
}
/**
@@ -4179,7 +4335,6 @@ int idpf_vport_intr_init(struct idpf_vport *vport)
idpf_vport_intr_map_vector_to_qs(vport);
idpf_vport_intr_napi_add_all(vport);
- idpf_vport_intr_napi_ena_all(vport);
err = vport->adapter->dev_ops.reg_ops.intr_reg_init(vport);
if (err)
@@ -4193,17 +4348,20 @@ int idpf_vport_intr_init(struct idpf_vport *vport)
if (err)
goto unroll_vectors_alloc;
- idpf_vport_intr_ena_irq_all(vport);
-
return 0;
unroll_vectors_alloc:
- idpf_vport_intr_napi_dis_all(vport);
idpf_vport_intr_napi_del_all(vport);
return err;
}
+void idpf_vport_intr_ena(struct idpf_vport *vport)
+{
+ idpf_vport_intr_napi_ena_all(vport);
+ idpf_vport_intr_ena_irq_all(vport);
+}
+
/**
* idpf_config_rss - Send virtchnl messages to configure RSS
* @vport: virtual port
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
index 3d046b81e507..6215dbee5546 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.h
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
@@ -4,10 +4,13 @@
#ifndef _IDPF_TXRX_H_
#define _IDPF_TXRX_H_
-#include <net/page_pool/helpers.h>
+#include <linux/dim.h>
+
+#include <net/libeth/cache.h>
#include <net/tcp.h>
#include <net/netdev_queues.h>
+#include "idpf_lan_txrx.h"
#include "virtchnl2_lan_desc.h"
#define IDPF_LARGE_MAX_Q 256
@@ -83,7 +86,7 @@
do { \
if (unlikely(++(ntc) == (rxq)->desc_count)) { \
ntc = 0; \
- change_bit(__IDPF_Q_GEN_CHK, (rxq)->flags); \
+ idpf_queue_change(GEN_CHK, rxq); \
} \
} while (0)
@@ -93,16 +96,10 @@ do { \
idx = 0; \
} while (0)
-#define IDPF_RX_HDR_SIZE 256
-#define IDPF_RX_BUF_2048 2048
-#define IDPF_RX_BUF_4096 4096
#define IDPF_RX_BUF_STRIDE 32
#define IDPF_RX_BUF_POST_STRIDE 16
#define IDPF_LOW_WATERMARK 64
-/* Size of header buffer specifically for header split */
-#define IDPF_HDR_BUF_SIZE 256
-#define IDPF_PACKET_HDR_PAD \
- (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN * 2)
+
#define IDPF_TX_TSO_MIN_MSS 88
/* Minimum number of descriptors between 2 descriptors with the RE bit set;
@@ -110,36 +107,17 @@ do { \
*/
#define IDPF_TX_SPLITQ_RE_MIN_GAP 64
-#define IDPF_RX_BI_BUFID_S 0
-#define IDPF_RX_BI_BUFID_M GENMASK(14, 0)
-#define IDPF_RX_BI_GEN_S 15
-#define IDPF_RX_BI_GEN_M BIT(IDPF_RX_BI_GEN_S)
+#define IDPF_RX_BI_GEN_M BIT(16)
+#define IDPF_RX_BI_BUFID_M GENMASK(15, 0)
+
#define IDPF_RXD_EOF_SPLITQ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_EOF_M
#define IDPF_RXD_EOF_SINGLEQ VIRTCHNL2_RX_BASE_DESC_STATUS_EOF_M
-#define IDPF_SINGLEQ_RX_BUF_DESC(rxq, i) \
- (&(((struct virtchnl2_singleq_rx_buf_desc *)((rxq)->desc_ring))[i]))
-#define IDPF_SPLITQ_RX_BUF_DESC(rxq, i) \
- (&(((struct virtchnl2_splitq_rx_buf_desc *)((rxq)->desc_ring))[i]))
-#define IDPF_SPLITQ_RX_BI_DESC(rxq, i) ((((rxq)->ring))[i])
-
-#define IDPF_BASE_TX_DESC(txq, i) \
- (&(((struct idpf_base_tx_desc *)((txq)->desc_ring))[i]))
-#define IDPF_BASE_TX_CTX_DESC(txq, i) \
- (&(((struct idpf_base_tx_ctx_desc *)((txq)->desc_ring))[i]))
-#define IDPF_SPLITQ_TX_COMPLQ_DESC(txcq, i) \
- (&(((struct idpf_splitq_tx_compl_desc *)((txcq)->desc_ring))[i]))
-
-#define IDPF_FLEX_TX_DESC(txq, i) \
- (&(((union idpf_tx_flex_desc *)((txq)->desc_ring))[i]))
-#define IDPF_FLEX_TX_CTX_DESC(txq, i) \
- (&(((struct idpf_flex_tx_ctx_desc *)((txq)->desc_ring))[i]))
-
#define IDPF_DESC_UNUSED(txq) \
((((txq)->next_to_clean > (txq)->next_to_use) ? 0 : (txq)->desc_count) + \
(txq)->next_to_clean - (txq)->next_to_use - 1)
-#define IDPF_TX_BUF_RSV_UNUSED(txq) ((txq)->buf_stack.top)
+#define IDPF_TX_BUF_RSV_UNUSED(txq) ((txq)->stash->buf_stack.top)
#define IDPF_TX_BUF_RSV_LOW(txq) (IDPF_TX_BUF_RSV_UNUSED(txq) < \
(txq)->desc_count >> 2)
@@ -315,16 +293,7 @@ struct idpf_rx_extracted {
#define IDPF_TX_MAX_DESC_DATA_ALIGNED \
ALIGN_DOWN(IDPF_TX_MAX_DESC_DATA, IDPF_TX_MAX_READ_REQ_SIZE)
-#define IDPF_RX_DMA_ATTR \
- (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
-#define IDPF_RX_DESC(rxq, i) \
- (&(((union virtchnl2_rx_desc *)((rxq)->desc_ring))[i]))
-
-struct idpf_rx_buf {
- struct page *page;
- unsigned int page_offset;
- u16 truesize;
-};
+#define idpf_rx_buf libeth_fqe
#define IDPF_RX_MAX_PTYPE_PROTO_IDS 32
#define IDPF_RX_MAX_PTYPE_SZ (sizeof(struct virtchnl2_ptype) + \
@@ -348,72 +317,6 @@ struct idpf_rx_buf {
#define IDPF_RX_MAX_BASE_PTYPE 256
#define IDPF_INVALID_PTYPE_ID 0xFFFF
-/* Packet type non-ip values */
-enum idpf_rx_ptype_l2 {
- IDPF_RX_PTYPE_L2_RESERVED = 0,
- IDPF_RX_PTYPE_L2_MAC_PAY2 = 1,
- IDPF_RX_PTYPE_L2_TIMESYNC_PAY2 = 2,
- IDPF_RX_PTYPE_L2_FIP_PAY2 = 3,
- IDPF_RX_PTYPE_L2_OUI_PAY2 = 4,
- IDPF_RX_PTYPE_L2_MACCNTRL_PAY2 = 5,
- IDPF_RX_PTYPE_L2_LLDP_PAY2 = 6,
- IDPF_RX_PTYPE_L2_ECP_PAY2 = 7,
- IDPF_RX_PTYPE_L2_EVB_PAY2 = 8,
- IDPF_RX_PTYPE_L2_QCN_PAY2 = 9,
- IDPF_RX_PTYPE_L2_EAPOL_PAY2 = 10,
- IDPF_RX_PTYPE_L2_ARP = 11,
-};
-
-enum idpf_rx_ptype_outer_ip {
- IDPF_RX_PTYPE_OUTER_L2 = 0,
- IDPF_RX_PTYPE_OUTER_IP = 1,
-};
-
-#define IDPF_RX_PTYPE_TO_IPV(ptype, ipv) \
- (((ptype)->outer_ip == IDPF_RX_PTYPE_OUTER_IP) && \
- ((ptype)->outer_ip_ver == (ipv)))
-
-enum idpf_rx_ptype_outer_ip_ver {
- IDPF_RX_PTYPE_OUTER_NONE = 0,
- IDPF_RX_PTYPE_OUTER_IPV4 = 1,
- IDPF_RX_PTYPE_OUTER_IPV6 = 2,
-};
-
-enum idpf_rx_ptype_outer_fragmented {
- IDPF_RX_PTYPE_NOT_FRAG = 0,
- IDPF_RX_PTYPE_FRAG = 1,
-};
-
-enum idpf_rx_ptype_tunnel_type {
- IDPF_RX_PTYPE_TUNNEL_NONE = 0,
- IDPF_RX_PTYPE_TUNNEL_IP_IP = 1,
- IDPF_RX_PTYPE_TUNNEL_IP_GRENAT = 2,
- IDPF_RX_PTYPE_TUNNEL_IP_GRENAT_MAC = 3,
- IDPF_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN = 4,
-};
-
-enum idpf_rx_ptype_tunnel_end_prot {
- IDPF_RX_PTYPE_TUNNEL_END_NONE = 0,
- IDPF_RX_PTYPE_TUNNEL_END_IPV4 = 1,
- IDPF_RX_PTYPE_TUNNEL_END_IPV6 = 2,
-};
-
-enum idpf_rx_ptype_inner_prot {
- IDPF_RX_PTYPE_INNER_PROT_NONE = 0,
- IDPF_RX_PTYPE_INNER_PROT_UDP = 1,
- IDPF_RX_PTYPE_INNER_PROT_TCP = 2,
- IDPF_RX_PTYPE_INNER_PROT_SCTP = 3,
- IDPF_RX_PTYPE_INNER_PROT_ICMP = 4,
- IDPF_RX_PTYPE_INNER_PROT_TIMESYNC = 5,
-};
-
-enum idpf_rx_ptype_payload_layer {
- IDPF_RX_PTYPE_PAYLOAD_LAYER_NONE = 0,
- IDPF_RX_PTYPE_PAYLOAD_LAYER_PAY2 = 1,
- IDPF_RX_PTYPE_PAYLOAD_LAYER_PAY3 = 2,
- IDPF_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3,
-};
-
enum idpf_tunnel_state {
IDPF_PTYPE_TUNNEL_IP = BIT(0),
IDPF_PTYPE_TUNNEL_IP_GRENAT = BIT(1),
@@ -421,22 +324,9 @@ enum idpf_tunnel_state {
};
struct idpf_ptype_state {
- bool outer_ip;
- bool outer_frag;
- u8 tunnel_state;
-};
-
-struct idpf_rx_ptype_decoded {
- u32 ptype:10;
- u32 known:1;
- u32 outer_ip:1;
- u32 outer_ip_ver:2;
- u32 outer_frag:1;
- u32 tunnel_type:3;
- u32 tunnel_end_prot:2;
- u32 tunnel_end_frag:1;
- u32 inner_prot:4;
- u32 payload_layer:3;
+ bool outer_ip:1;
+ bool outer_frag:1;
+ u8 tunnel_state:6;
};
/**
@@ -452,23 +342,37 @@ struct idpf_rx_ptype_decoded {
* to 1 and knows that reading a gen bit of 1 in any
* descriptor on the initial pass of the ring indicates a
* writeback. It also flips on every ring wrap.
- * @__IDPF_RFLQ_GEN_CHK: Refill queues are SW only, so Q_GEN acts as the HW bit
- * and RFLGQ_GEN is the SW bit.
+ * @__IDPF_Q_RFL_GEN_CHK: Refill queues are SW only, so Q_GEN acts as the HW
+ * bit and Q_RFL_GEN is the SW bit.
* @__IDPF_Q_FLOW_SCH_EN: Enable flow scheduling
* @__IDPF_Q_SW_MARKER: Used to indicate TX queue marker completions
* @__IDPF_Q_POLL_MODE: Enable poll mode
+ * @__IDPF_Q_CRC_EN: enable CRC offload in singleq mode
+ * @__IDPF_Q_HSPLIT_EN: enable header split on Rx (splitq)
* @__IDPF_Q_FLAGS_NBITS: Must be last
*/
enum idpf_queue_flags_t {
__IDPF_Q_GEN_CHK,
- __IDPF_RFLQ_GEN_CHK,
+ __IDPF_Q_RFL_GEN_CHK,
__IDPF_Q_FLOW_SCH_EN,
__IDPF_Q_SW_MARKER,
__IDPF_Q_POLL_MODE,
+ __IDPF_Q_CRC_EN,
+ __IDPF_Q_HSPLIT_EN,
__IDPF_Q_FLAGS_NBITS,
};
+#define idpf_queue_set(f, q) __set_bit(__IDPF_Q_##f, (q)->flags)
+#define idpf_queue_clear(f, q) __clear_bit(__IDPF_Q_##f, (q)->flags)
+#define idpf_queue_change(f, q) __change_bit(__IDPF_Q_##f, (q)->flags)
+#define idpf_queue_has(f, q) test_bit(__IDPF_Q_##f, (q)->flags)
+
+#define idpf_queue_has_clear(f, q) \
+ __test_and_clear_bit(__IDPF_Q_##f, (q)->flags)
+#define idpf_queue_assign(f, q, v) \
+ __assign_bit(__IDPF_Q_##f, (q)->flags, v)
+
/**
* struct idpf_vec_regs
* @dyn_ctl_reg: Dynamic control interrupt register offset
@@ -509,54 +413,68 @@ struct idpf_intr_reg {
/**
* struct idpf_q_vector
* @vport: Vport back pointer
- * @affinity_mask: CPU affinity mask
- * @napi: napi handler
- * @v_idx: Vector index
- * @intr_reg: See struct idpf_intr_reg
+ * @num_rxq: Number of RX queues
* @num_txq: Number of TX queues
+ * @num_bufq: Number of buffer queues
+ * @num_complq: number of completion queues
+ * @rx: Array of RX queues to service
* @tx: Array of TX queues to service
+ * @bufq: Array of buffer queues to service
+ * @complq: array of completion queues
+ * @intr_reg: See struct idpf_intr_reg
+ * @napi: napi handler
+ * @total_events: Number of interrupts processed
* @tx_dim: Data for TX net_dim algorithm
* @tx_itr_value: TX interrupt throttling rate
* @tx_intr_mode: Dynamic ITR or not
* @tx_itr_idx: TX ITR index
- * @num_rxq: Number of RX queues
- * @rx: Array of RX queues to service
* @rx_dim: Data for RX net_dim algorithm
* @rx_itr_value: RX interrupt throttling rate
* @rx_intr_mode: Dynamic ITR or not
* @rx_itr_idx: RX ITR index
- * @num_bufq: Number of buffer queues
- * @bufq: Array of buffer queues to service
- * @total_events: Number of interrupts processed
- * @name: Queue vector name
+ * @v_idx: Vector index
+ * @affinity_mask: CPU affinity mask
*/
struct idpf_q_vector {
+ __cacheline_group_begin_aligned(read_mostly);
struct idpf_vport *vport;
- cpumask_t affinity_mask;
- struct napi_struct napi;
- u16 v_idx;
- struct idpf_intr_reg intr_reg;
+ u16 num_rxq;
u16 num_txq;
- struct idpf_queue **tx;
+ u16 num_bufq;
+ u16 num_complq;
+ struct idpf_rx_queue **rx;
+ struct idpf_tx_queue **tx;
+ struct idpf_buf_queue **bufq;
+ struct idpf_compl_queue **complq;
+
+ struct idpf_intr_reg intr_reg;
+ __cacheline_group_end_aligned(read_mostly);
+
+ __cacheline_group_begin_aligned(read_write);
+ struct napi_struct napi;
+ u16 total_events;
+
struct dim tx_dim;
u16 tx_itr_value;
bool tx_intr_mode;
u32 tx_itr_idx;
- u16 num_rxq;
- struct idpf_queue **rx;
struct dim rx_dim;
u16 rx_itr_value;
bool rx_intr_mode;
u32 rx_itr_idx;
+ __cacheline_group_end_aligned(read_write);
- u16 num_bufq;
- struct idpf_queue **bufq;
+ __cacheline_group_begin_aligned(cold);
+ u16 v_idx;
- u16 total_events;
- char *name;
+ cpumask_var_t affinity_mask;
+ __cacheline_group_end_aligned(cold);
};
+libeth_cacheline_set_assert(struct idpf_q_vector, 104,
+ 424 + 2 * sizeof(struct dim),
+ 8 + sizeof(cpumask_var_t));
struct idpf_rx_queue_stats {
u64_stats_t packets;
@@ -583,11 +501,6 @@ struct idpf_cleaned_stats {
u32 bytes;
};
-union idpf_queue_stats {
- struct idpf_rx_queue_stats rx;
- struct idpf_tx_queue_stats tx;
-};
-
#define IDPF_ITR_DYNAMIC 1
#define IDPF_ITR_MAX 0x1FE0
#define IDPF_ITR_20K 0x0032
@@ -603,68 +516,123 @@ union idpf_queue_stats {
#define IDPF_DIM_DEFAULT_PROFILE_IX 1
/**
- * struct idpf_queue
- * @dev: Device back pointer for DMA mapping
- * @vport: Back pointer to associated vport
- * @txq_grp: See struct idpf_txq_group
- * @rxq_grp: See struct idpf_rxq_group
- * @idx: For buffer queue, it is used as group id, either 0 or 1. On clean,
- * buffer queue uses this index to determine which group of refill queues
- * to clean.
- * For TX queue, it is used as index to map between TX queue group and
- * hot path TX pointers stored in vport. Used in both singleq/splitq.
- * For RX queue, it is used to index to total RX queue across groups and
+ * struct idpf_txq_stash - Tx buffer stash for Flow-based scheduling mode
+ * @buf_stack: Stack of empty buffers to store buffer info for out of order
+ * buffer completions. See struct idpf_buf_lifo
+ * @sched_buf_hash: Hash table to store buffers
+ */
+struct idpf_txq_stash {
+ struct idpf_buf_lifo buf_stack;
+ DECLARE_HASHTABLE(sched_buf_hash, 12);
+} ____cacheline_aligned;
+
+/**
+ * struct idpf_rx_queue - software structure representing a receive queue
+ * @rx: universal receive descriptor array
+ * @single_buf: buffer descriptor array in singleq
+ * @desc_ring: virtual descriptor ring address
+ * @bufq_sets: Pointer to the array of buffer queues in splitq mode
+ * @napi: NAPI instance corresponding to this queue (splitq)
+ * @rx_buf: See struct &libeth_fqe
+ * @pp: Page pool pointer in singleq mode
+ * @netdev: &net_device corresponding to this queue
+ * @tail: Tail offset. Used for both queue models single and split.
+ * @flags: See enum idpf_queue_flags_t
+ * @idx: For RX queue, it is used to index to total RX queue across groups and
* used for skb reporting.
- * @tail: Tail offset. Used for both queue models single and split. In splitq
- * model relevant only for TX queue and RX queue.
- * @tx_buf: See struct idpf_tx_buf
- * @rx_buf: Struct with RX buffer related members
- * @rx_buf.buf: See struct idpf_rx_buf
- * @rx_buf.hdr_buf_pa: DMA handle
- * @rx_buf.hdr_buf_va: Virtual address
- * @pp: Page pool pointer
- * @skb: Pointer to the skb
- * @q_type: Queue type (TX, RX, TX completion, RX buffer)
- * @q_id: Queue id
* @desc_count: Number of descriptors
- * @next_to_use: Next descriptor to use. Relevant in both split & single txq
- * and bufq.
- * @next_to_clean: Next descriptor to clean. In split queue model, only
- * relevant to TX completion queue and RX queue.
- * @next_to_alloc: RX buffer to allocate at. Used only for RX. In splitq model
- * only relevant to RX queue.
- * @flags: See enum idpf_queue_flags_t
- * @q_stats: See union idpf_queue_stats
+ * @rxdids: Supported RX descriptor ids
+ * @rx_ptype_lkup: LUT of Rx ptypes
+ * @next_to_use: Next descriptor to use
+ * @next_to_clean: Next descriptor to clean
+ * @next_to_alloc: RX buffer to allocate at
+ * @skb: Pointer to the skb
+ * @truesize: data buffer truesize in singleq
* @stats_sync: See struct u64_stats_sync
- * @cleaned_bytes: Splitq only, TXQ only: When a TX completion is received on
- * the TX completion queue, it can be for any TXQ associated
- * with that completion queue. This means we can clean up to
- * N TXQs during a single call to clean the completion queue.
- * cleaned_bytes|pkts tracks the clean stats per TXQ during
- * that single call to clean the completion queue. By doing so,
- * we can update BQL with aggregate cleaned stats for each TXQ
- * only once at the end of the cleaning routine.
- * @cleaned_pkts: Number of packets cleaned for the above said case
- * @rx_hsplit_en: RX headsplit enable
+ * @q_stats: See union idpf_rx_queue_stats
+ * @q_id: Queue id
+ * @size: Length of descriptor ring in bytes
+ * @dma: Physical address of ring
+ * @q_vector: Backreference to associated vector
+ * @rx_buffer_low_watermark: RX buffer low watermark
* @rx_hbuf_size: Header buffer size
* @rx_buf_size: Buffer size
* @rx_max_pkt_size: RX max packet size
- * @rx_buf_stride: RX buffer stride
- * @rx_buffer_low_watermark: RX buffer low watermark
- * @rxdids: Supported RX descriptor ids
- * @q_vector: Backreference to associated vector
- * @size: Length of descriptor ring in bytes
- * @dma: Physical address of ring
- * @desc_ring: Descriptor ring memory
- * @tx_max_bufs: Max buffers that can be transmitted with scatter-gather
+ */
+struct idpf_rx_queue {
+ __cacheline_group_begin_aligned(read_mostly);
+ union {
+ union virtchnl2_rx_desc *rx;
+ struct virtchnl2_singleq_rx_buf_desc *single_buf;
+
+ void *desc_ring;
+ };
+ union {
+ struct {
+ struct idpf_bufq_set *bufq_sets;
+ struct napi_struct *napi;
+ };
+ struct {
+ struct libeth_fqe *rx_buf;
+ struct page_pool *pp;
+ };
+ };
+ struct net_device *netdev;
+ void __iomem *tail;
+
+ DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS);
+ u16 idx;
+ u16 desc_count;
+
+ u32 rxdids;
+ const struct libeth_rx_pt *rx_ptype_lkup;
+ __cacheline_group_end_aligned(read_mostly);
+
+ __cacheline_group_begin_aligned(read_write);
+ u16 next_to_use;
+ u16 next_to_clean;
+ u16 next_to_alloc;
+
+ struct sk_buff *skb;
+ u32 truesize;
+
+ struct u64_stats_sync stats_sync;
+ struct idpf_rx_queue_stats q_stats;
+ __cacheline_group_end_aligned(read_write);
+
+ __cacheline_group_begin_aligned(cold);
+ u32 q_id;
+ u32 size;
+ dma_addr_t dma;
+
+ struct idpf_q_vector *q_vector;
+
+ u16 rx_buffer_low_watermark;
+ u16 rx_hbuf_size;
+ u16 rx_buf_size;
+ u16 rx_max_pkt_size;
+ __cacheline_group_end_aligned(cold);
+};
+libeth_cacheline_set_assert(struct idpf_rx_queue, 64,
+ 80 + sizeof(struct u64_stats_sync),
+ 32);
+
+/**
+ * struct idpf_tx_queue - software structure representing a transmit queue
+ * @base_tx: base Tx descriptor array
+ * @base_ctx: base Tx context descriptor array
+ * @flex_tx: flex Tx descriptor array
+ * @flex_ctx: flex Tx context descriptor array
+ * @desc_ring: virtual descriptor ring address
+ * @tx_buf: See struct idpf_tx_buf
+ * @txq_grp: See struct idpf_txq_group
+ * @dev: Device back pointer for DMA mapping
+ * @tail: Tail offset. Used for both queue models single and split
+ * @flags: See enum idpf_queue_flags_t
+ * @idx: For TX queue, it is used as index to map between TX queue group and
+ * hot path TX pointers stored in vport. Used in both singleq/splitq.
+ * @desc_count: Number of descriptors
* @tx_min_pkt_len: Min supported packet length
- * @num_completions: Only relevant for TX completion queue. It tracks the
- * number of completions received to compare against the
- * number of completions pending, as accumulated by the
- * TX queues.
- * @buf_stack: Stack of empty buffers to store buffer info for out of order
- * buffer completions. See struct idpf_buf_lifo.
- * @compl_tag_bufid_m: Completion tag buffer id mask
* @compl_tag_gen_s: Completion tag generation bit
* The format of the completion tag will change based on the TXQ
* descriptor ring size so that we can maintain roughly the same level
@@ -685,108 +653,238 @@ union idpf_queue_stats {
* --------------------------------
*
* This gives us 8*8160 = 65280 possible unique values.
+ * @netdev: &net_device corresponding to this queue
+ * @next_to_use: Next descriptor to use
+ * @next_to_clean: Next descriptor to clean
+ * @cleaned_bytes: Splitq only, TXQ only: When a TX completion is received on
+ * the TX completion queue, it can be for any TXQ associated
+ * with that completion queue. This means we can clean up to
+ * N TXQs during a single call to clean the completion queue.
+ * cleaned_bytes|pkts tracks the clean stats per TXQ during
+ * that single call to clean the completion queue. By doing so,
+ * we can update BQL with aggregate cleaned stats for each TXQ
+ * only once at the end of the cleaning routine.
+ * @clean_budget: singleq only, queue cleaning budget
+ * @cleaned_pkts: Number of packets cleaned for the above said case
+ * @tx_max_bufs: Max buffers that can be transmitted with scatter-gather
+ * @stash: Tx buffer stash for Flow-based scheduling mode
+ * @compl_tag_bufid_m: Completion tag buffer id mask
* @compl_tag_cur_gen: Used to keep track of current completion tag generation
* @compl_tag_gen_max: To determine when compl_tag_cur_gen should be reset
- * @sched_buf_hash: Hash table to stores buffers
+ * @stats_sync: See struct u64_stats_sync
+ * @q_stats: See union idpf_tx_queue_stats
+ * @q_id: Queue id
+ * @size: Length of descriptor ring in bytes
+ * @dma: Physical address of ring
+ * @q_vector: Backreference to associated vector
*/
-struct idpf_queue {
- struct device *dev;
- struct idpf_vport *vport;
+struct idpf_tx_queue {
+ __cacheline_group_begin_aligned(read_mostly);
union {
- struct idpf_txq_group *txq_grp;
- struct idpf_rxq_group *rxq_grp;
+ struct idpf_base_tx_desc *base_tx;
+ struct idpf_base_tx_ctx_desc *base_ctx;
+ union idpf_tx_flex_desc *flex_tx;
+ struct idpf_flex_tx_ctx_desc *flex_ctx;
+
+ void *desc_ring;
};
- u16 idx;
+ struct idpf_tx_buf *tx_buf;
+ struct idpf_txq_group *txq_grp;
+ struct device *dev;
void __iomem *tail;
- union {
- struct idpf_tx_buf *tx_buf;
- struct {
- struct idpf_rx_buf *buf;
- dma_addr_t hdr_buf_pa;
- void *hdr_buf_va;
- } rx_buf;
- };
- struct page_pool *pp;
- struct sk_buff *skb;
- u16 q_type;
- u32 q_id;
+
+ DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS);
+ u16 idx;
u16 desc_count;
+ u16 tx_min_pkt_len;
+ u16 compl_tag_gen_s;
+
+ struct net_device *netdev;
+ __cacheline_group_end_aligned(read_mostly);
+
+ __cacheline_group_begin_aligned(read_write);
u16 next_to_use;
u16 next_to_clean;
- u16 next_to_alloc;
- DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS);
- union idpf_queue_stats q_stats;
+ union {
+ u32 cleaned_bytes;
+ u32 clean_budget;
+ };
+ u16 cleaned_pkts;
+
+ u16 tx_max_bufs;
+ struct idpf_txq_stash *stash;
+
+ u16 compl_tag_bufid_m;
+ u16 compl_tag_cur_gen;
+ u16 compl_tag_gen_max;
+
struct u64_stats_sync stats_sync;
+ struct idpf_tx_queue_stats q_stats;
+ __cacheline_group_end_aligned(read_write);
- u32 cleaned_bytes;
- u16 cleaned_pkts;
+ __cacheline_group_begin_aligned(cold);
+ u32 q_id;
+ u32 size;
+ dma_addr_t dma;
- bool rx_hsplit_en;
- u16 rx_hbuf_size;
- u16 rx_buf_size;
- u16 rx_max_pkt_size;
- u16 rx_buf_stride;
- u8 rx_buffer_low_watermark;
- u64 rxdids;
struct idpf_q_vector *q_vector;
- unsigned int size;
+ __cacheline_group_end_aligned(cold);
+};
+libeth_cacheline_set_assert(struct idpf_tx_queue, 64,
+ 88 + sizeof(struct u64_stats_sync),
+ 24);
+
+/**
+ * struct idpf_buf_queue - software structure representing a buffer queue
+ * @split_buf: buffer descriptor array
+ * @hdr_buf: &libeth_fqe for header buffers
+ * @hdr_pp: &page_pool for header buffers
+ * @buf: &libeth_fqe for data buffers
+ * @pp: &page_pool for data buffers
+ * @tail: Tail offset
+ * @flags: See enum idpf_queue_flags_t
+ * @desc_count: Number of descriptors
+ * @next_to_use: Next descriptor to use
+ * @next_to_clean: Next descriptor to clean
+ * @next_to_alloc: RX buffer to allocate at
+ * @hdr_truesize: truesize for buffer headers
+ * @truesize: truesize for data buffers
+ * @q_id: Queue id
+ * @size: Length of descriptor ring in bytes
+ * @dma: Physical address of ring
+ * @q_vector: Backreference to associated vector
+ * @rx_buffer_low_watermark: RX buffer low watermark
+ * @rx_hbuf_size: Header buffer size
+ * @rx_buf_size: Buffer size
+ */
+struct idpf_buf_queue {
+ __cacheline_group_begin_aligned(read_mostly);
+ struct virtchnl2_splitq_rx_buf_desc *split_buf;
+ struct libeth_fqe *hdr_buf;
+ struct page_pool *hdr_pp;
+ struct libeth_fqe *buf;
+ struct page_pool *pp;
+ void __iomem *tail;
+
+ DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS);
+ u32 desc_count;
+ __cacheline_group_end_aligned(read_mostly);
+
+ __cacheline_group_begin_aligned(read_write);
+ u32 next_to_use;
+ u32 next_to_clean;
+ u32 next_to_alloc;
+
+ u32 hdr_truesize;
+ u32 truesize;
+ __cacheline_group_end_aligned(read_write);
+
+ __cacheline_group_begin_aligned(cold);
+ u32 q_id;
+ u32 size;
dma_addr_t dma;
- void *desc_ring;
- u16 tx_max_bufs;
- u8 tx_min_pkt_len;
+ struct idpf_q_vector *q_vector;
- u32 num_completions;
+ u16 rx_buffer_low_watermark;
+ u16 rx_hbuf_size;
+ u16 rx_buf_size;
+ __cacheline_group_end_aligned(cold);
+};
+libeth_cacheline_set_assert(struct idpf_buf_queue, 64, 24, 32);
- struct idpf_buf_lifo buf_stack;
+/**
+ * struct idpf_compl_queue - software structure representing a completion queue
+ * @comp: completion descriptor array
+ * @txq_grp: See struct idpf_txq_group
+ * @flags: See enum idpf_queue_flags_t
+ * @desc_count: Number of descriptors
+ * @clean_budget: queue cleaning budget
+ * @netdev: &net_device corresponding to this queue
+ * @next_to_use: Next descriptor to use. Relevant in both split & single txq
+ * and bufq.
+ * @next_to_clean: Next descriptor to clean
+ * @num_completions: Only relevant for TX completion queue. It tracks the
+ * number of completions received to compare against the
+ * number of completions pending, as accumulated by the
+ * TX queues.
+ * @q_id: Queue id
+ * @size: Length of descriptor ring in bytes
+ * @dma: Physical address of ring
+ * @q_vector: Backreference to associated vector
+ */
+struct idpf_compl_queue {
+ __cacheline_group_begin_aligned(read_mostly);
+ struct idpf_splitq_tx_compl_desc *comp;
+ struct idpf_txq_group *txq_grp;
- u16 compl_tag_bufid_m;
- u16 compl_tag_gen_s;
+ DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS);
+ u32 desc_count;
- u16 compl_tag_cur_gen;
- u16 compl_tag_gen_max;
+ u32 clean_budget;
+ struct net_device *netdev;
+ __cacheline_group_end_aligned(read_mostly);
- DECLARE_HASHTABLE(sched_buf_hash, 12);
-} ____cacheline_internodealigned_in_smp;
+ __cacheline_group_begin_aligned(read_write);
+ u32 next_to_use;
+ u32 next_to_clean;
+
+ u32 num_completions;
+ __cacheline_group_end_aligned(read_write);
+
+ __cacheline_group_begin_aligned(cold);
+ u32 q_id;
+ u32 size;
+ dma_addr_t dma;
+
+ struct idpf_q_vector *q_vector;
+ __cacheline_group_end_aligned(cold);
+};
+libeth_cacheline_set_assert(struct idpf_compl_queue, 40, 16, 24);
/**
* struct idpf_sw_queue
- * @next_to_clean: Next descriptor to clean
- * @next_to_alloc: Buffer to allocate at
- * @flags: See enum idpf_queue_flags_t
* @ring: Pointer to the ring
+ * @flags: See enum idpf_queue_flags_t
* @desc_count: Descriptor count
- * @dev: Device back pointer for DMA mapping
+ * @next_to_use: Buffer to allocate at
+ * @next_to_clean: Next descriptor to clean
*
* Software queues are used in splitq mode to manage buffers between rxq
* producer and the bufq consumer. These are required in order to maintain a
* lockless buffer management system and are strictly software only constructs.
*/
struct idpf_sw_queue {
- u16 next_to_clean;
- u16 next_to_alloc;
+ __cacheline_group_begin_aligned(read_mostly);
+ u32 *ring;
+
DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS);
- u16 *ring;
- u16 desc_count;
- struct device *dev;
-} ____cacheline_internodealigned_in_smp;
+ u32 desc_count;
+ __cacheline_group_end_aligned(read_mostly);
+
+ __cacheline_group_begin_aligned(read_write);
+ u32 next_to_use;
+ u32 next_to_clean;
+ __cacheline_group_end_aligned(read_write);
+};
+libeth_cacheline_group_assert(struct idpf_sw_queue, read_mostly, 24);
+libeth_cacheline_group_assert(struct idpf_sw_queue, read_write, 8);
+libeth_cacheline_struct_assert(struct idpf_sw_queue, 24, 8);
/**
* struct idpf_rxq_set
* @rxq: RX queue
- * @refillq0: Pointer to refill queue 0
- * @refillq1: Pointer to refill queue 1
+ * @refillq: pointers to refill queues
*
* Splitq only. idpf_rxq_set associates an rxq with at an array of refillqs.
* Each rxq needs a refillq to return used buffers back to the respective bufq.
* Bufqs then clean these refillqs for buffers to give to hardware.
*/
struct idpf_rxq_set {
- struct idpf_queue rxq;
- struct idpf_sw_queue *refillq0;
- struct idpf_sw_queue *refillq1;
+ struct idpf_rx_queue rxq;
+ struct idpf_sw_queue *refillq[IDPF_MAX_BUFQS_PER_RXQ_GRP];
};
/**
@@ -805,7 +903,7 @@ struct idpf_rxq_set {
* managed by at most two bufqs (depending on performance configuration).
*/
struct idpf_bufq_set {
- struct idpf_queue bufq;
+ struct idpf_buf_queue bufq;
int num_refillqs;
struct idpf_sw_queue *refillqs;
};
@@ -831,7 +929,7 @@ struct idpf_rxq_group {
union {
struct {
u16 num_rxq;
- struct idpf_queue *rxqs[IDPF_LARGE_MAX_Q];
+ struct idpf_rx_queue *rxqs[IDPF_LARGE_MAX_Q];
} singleq;
struct {
u16 num_rxq_sets;
@@ -846,6 +944,7 @@ struct idpf_rxq_group {
* @vport: Vport back pointer
* @num_txq: Number of TX queues associated
* @txqs: Array of TX queue pointers
+ * @stashes: array of OOO stashes for the queues
* @complq: Associated completion queue pointer, split queue only
* @num_completions_pending: Total number of completions pending for the
* completion queue, acculumated for all TX queues
@@ -859,13 +958,26 @@ struct idpf_txq_group {
struct idpf_vport *vport;
u16 num_txq;
- struct idpf_queue *txqs[IDPF_LARGE_MAX_Q];
+ struct idpf_tx_queue *txqs[IDPF_LARGE_MAX_Q];
+ struct idpf_txq_stash *stashes;
- struct idpf_queue *complq;
+ struct idpf_compl_queue *complq;
u32 num_completions_pending;
};
+static inline int idpf_q_vector_to_mem(const struct idpf_q_vector *q_vector)
+{
+ u32 cpu;
+
+ if (!q_vector)
+ return NUMA_NO_NODE;
+
+ cpu = cpumask_first(q_vector->affinity_mask);
+
+ return cpu < nr_cpu_ids ? cpu_to_mem(cpu) : NUMA_NO_NODE;
+}
+
/**
* idpf_size_to_txd_count - Get number of descriptors needed for large Tx frag
* @size: transmit request size in bytes
@@ -921,60 +1033,6 @@ static inline void idpf_tx_splitq_build_desc(union idpf_tx_flex_desc *desc,
idpf_tx_splitq_build_flow_desc(desc, params, td_cmd, size);
}
-/**
- * idpf_alloc_page - Allocate a new RX buffer from the page pool
- * @pool: page_pool to allocate from
- * @buf: metadata struct to populate with page info
- * @buf_size: 2K or 4K
- *
- * Returns &dma_addr_t to be passed to HW for Rx, %DMA_MAPPING_ERROR otherwise.
- */
-static inline dma_addr_t idpf_alloc_page(struct page_pool *pool,
- struct idpf_rx_buf *buf,
- unsigned int buf_size)
-{
- if (buf_size == IDPF_RX_BUF_2048)
- buf->page = page_pool_dev_alloc_frag(pool, &buf->page_offset,
- buf_size);
- else
- buf->page = page_pool_dev_alloc_pages(pool);
-
- if (!buf->page)
- return DMA_MAPPING_ERROR;
-
- buf->truesize = buf_size;
-
- return page_pool_get_dma_addr(buf->page) + buf->page_offset +
- pool->p.offset;
-}
-
-/**
- * idpf_rx_put_page - Return RX buffer page to pool
- * @rx_buf: RX buffer metadata struct
- */
-static inline void idpf_rx_put_page(struct idpf_rx_buf *rx_buf)
-{
- page_pool_put_page(rx_buf->page->pp, rx_buf->page,
- rx_buf->truesize, true);
- rx_buf->page = NULL;
-}
-
-/**
- * idpf_rx_sync_for_cpu - Synchronize DMA buffer
- * @rx_buf: RX buffer metadata struct
- * @len: frame length from descriptor
- */
-static inline void idpf_rx_sync_for_cpu(struct idpf_rx_buf *rx_buf, u32 len)
-{
- struct page *page = rx_buf->page;
- struct page_pool *pp = page->pp;
-
- dma_sync_single_range_for_cpu(pp->p.dev,
- page_pool_get_dma_addr(page),
- rx_buf->page_offset + pp->p.offset, len,
- page_pool_get_dma_dir(pp));
-}
-
int idpf_vport_singleq_napi_poll(struct napi_struct *napi, int budget);
void idpf_vport_init_num_qs(struct idpf_vport *vport,
struct virtchnl2_create_vport *vport_msg);
@@ -990,35 +1048,28 @@ int idpf_vport_intr_alloc(struct idpf_vport *vport);
void idpf_vport_intr_update_itr_ena_irq(struct idpf_q_vector *q_vector);
void idpf_vport_intr_deinit(struct idpf_vport *vport);
int idpf_vport_intr_init(struct idpf_vport *vport);
-enum pkt_hash_types idpf_ptype_to_htype(const struct idpf_rx_ptype_decoded *decoded);
+void idpf_vport_intr_ena(struct idpf_vport *vport);
int idpf_config_rss(struct idpf_vport *vport);
int idpf_init_rss(struct idpf_vport *vport);
void idpf_deinit_rss(struct idpf_vport *vport);
int idpf_rx_bufs_init_all(struct idpf_vport *vport);
void idpf_rx_add_frag(struct idpf_rx_buf *rx_buf, struct sk_buff *skb,
unsigned int size);
-struct sk_buff *idpf_rx_construct_skb(struct idpf_queue *rxq,
- struct idpf_rx_buf *rx_buf,
- unsigned int size);
-bool idpf_init_rx_buf_hw_alloc(struct idpf_queue *rxq, struct idpf_rx_buf *buf);
-void idpf_rx_buf_hw_update(struct idpf_queue *rxq, u32 val);
-void idpf_tx_buf_hw_update(struct idpf_queue *tx_q, u32 val,
+struct sk_buff *idpf_rx_build_skb(const struct libeth_fqe *buf, u32 size);
+void idpf_tx_buf_hw_update(struct idpf_tx_queue *tx_q, u32 val,
bool xmit_more);
unsigned int idpf_size_to_txd_count(unsigned int size);
-netdev_tx_t idpf_tx_drop_skb(struct idpf_queue *tx_q, struct sk_buff *skb);
-void idpf_tx_dma_map_error(struct idpf_queue *txq, struct sk_buff *skb,
+netdev_tx_t idpf_tx_drop_skb(struct idpf_tx_queue *tx_q, struct sk_buff *skb);
+void idpf_tx_dma_map_error(struct idpf_tx_queue *txq, struct sk_buff *skb,
struct idpf_tx_buf *first, u16 ring_idx);
-unsigned int idpf_tx_desc_count_required(struct idpf_queue *txq,
+unsigned int idpf_tx_desc_count_required(struct idpf_tx_queue *txq,
struct sk_buff *skb);
-bool idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs,
- unsigned int count);
-int idpf_tx_maybe_stop_common(struct idpf_queue *tx_q, unsigned int size);
+int idpf_tx_maybe_stop_common(struct idpf_tx_queue *tx_q, unsigned int size);
void idpf_tx_timeout(struct net_device *netdev, unsigned int txqueue);
-netdev_tx_t idpf_tx_splitq_start(struct sk_buff *skb,
- struct net_device *netdev);
-netdev_tx_t idpf_tx_singleq_start(struct sk_buff *skb,
- struct net_device *netdev);
-bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_queue *rxq,
+netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb,
+ struct idpf_tx_queue *tx_q);
+netdev_tx_t idpf_tx_start(struct sk_buff *skb, struct net_device *netdev);
+bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_rx_queue *rxq,
u16 cleaned_count);
int idpf_tso(struct sk_buff *skb, struct idpf_tx_offload_params *off);
diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
index a5f9b7a5effe..70986e12da28 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2023 Intel Corporation */
+#include <net/libeth/rx.h>
+
#include "idpf.h"
#include "idpf_virtchnl.h"
@@ -750,7 +752,7 @@ static int idpf_wait_for_marker_event(struct idpf_vport *vport)
int i;
for (i = 0; i < vport->num_txq; i++)
- set_bit(__IDPF_Q_SW_MARKER, vport->txqs[i]->flags);
+ idpf_queue_set(SW_MARKER, vport->txqs[i]);
event = wait_event_timeout(vport->sw_marker_wq,
test_and_clear_bit(IDPF_VPORT_SW_MARKER,
@@ -758,7 +760,7 @@ static int idpf_wait_for_marker_event(struct idpf_vport *vport)
msecs_to_jiffies(500));
for (i = 0; i < vport->num_txq; i++)
- clear_bit(__IDPF_Q_POLL_MODE, vport->txqs[i]->flags);
+ idpf_queue_clear(POLL_MODE, vport->txqs[i]);
if (event)
return 0;
@@ -1092,7 +1094,6 @@ static int __idpf_queue_reg_init(struct idpf_vport *vport, u32 *reg_vals,
int num_regs, u32 q_type)
{
struct idpf_adapter *adapter = vport->adapter;
- struct idpf_queue *q;
int i, j, k = 0;
switch (q_type) {
@@ -1111,6 +1112,8 @@ static int __idpf_queue_reg_init(struct idpf_vport *vport, u32 *reg_vals,
u16 num_rxq = rx_qgrp->singleq.num_rxq;
for (j = 0; j < num_rxq && k < num_regs; j++, k++) {
+ struct idpf_rx_queue *q;
+
q = rx_qgrp->singleq.rxqs[j];
q->tail = idpf_get_reg_addr(adapter,
reg_vals[k]);
@@ -1123,6 +1126,8 @@ static int __idpf_queue_reg_init(struct idpf_vport *vport, u32 *reg_vals,
u8 num_bufqs = vport->num_bufqs_per_qgrp;
for (j = 0; j < num_bufqs && k < num_regs; j++, k++) {
+ struct idpf_buf_queue *q;
+
q = &rx_qgrp->splitq.bufq_sets[j].bufq;
q->tail = idpf_get_reg_addr(adapter,
reg_vals[k]);
@@ -1253,12 +1258,12 @@ int idpf_send_create_vport_msg(struct idpf_adapter *adapter,
vport_msg->vport_type = cpu_to_le16(VIRTCHNL2_VPORT_TYPE_DEFAULT);
vport_msg->vport_index = cpu_to_le16(idx);
- if (adapter->req_tx_splitq)
+ if (adapter->req_tx_splitq || !IS_ENABLED(CONFIG_IDPF_SINGLEQ))
vport_msg->txq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SPLIT);
else
vport_msg->txq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SINGLE);
- if (adapter->req_rx_splitq)
+ if (adapter->req_rx_splitq || !IS_ENABLED(CONFIG_IDPF_SINGLEQ))
vport_msg->rxq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SPLIT);
else
vport_msg->rxq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SINGLE);
@@ -1320,10 +1325,17 @@ int idpf_check_supported_desc_ids(struct idpf_vport *vport)
vport_msg = adapter->vport_params_recvd[vport->idx];
+ if (!IS_ENABLED(CONFIG_IDPF_SINGLEQ) &&
+ (vport_msg->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE ||
+ vport_msg->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE)) {
+ pci_err(adapter->pdev, "singleq mode requested, but not compiled-in\n");
+ return -EOPNOTSUPP;
+ }
+
rx_desc_ids = le64_to_cpu(vport_msg->rx_desc_ids);
tx_desc_ids = le64_to_cpu(vport_msg->tx_desc_ids);
- if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
+ if (idpf_is_queue_model_split(vport->rxq_model)) {
if (!(rx_desc_ids & VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M)) {
dev_info(&adapter->pdev->dev, "Minimum RX descriptor support not provided, using the default\n");
vport_msg->rx_desc_ids = cpu_to_le64(VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M);
@@ -1333,7 +1345,7 @@ int idpf_check_supported_desc_ids(struct idpf_vport *vport)
vport->base_rxd = true;
}
- if (vport->txq_model != VIRTCHNL2_QUEUE_MODEL_SPLIT)
+ if (!idpf_is_queue_model_split(vport->txq_model))
return 0;
if ((tx_desc_ids & MIN_SUPPORT_TXDID) != MIN_SUPPORT_TXDID) {
@@ -1449,19 +1461,19 @@ static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport)
qi[k].model =
cpu_to_le16(vport->txq_model);
qi[k].type =
- cpu_to_le32(tx_qgrp->txqs[j]->q_type);
+ cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX);
qi[k].ring_len =
cpu_to_le16(tx_qgrp->txqs[j]->desc_count);
qi[k].dma_ring_addr =
cpu_to_le64(tx_qgrp->txqs[j]->dma);
if (idpf_is_queue_model_split(vport->txq_model)) {
- struct idpf_queue *q = tx_qgrp->txqs[j];
+ struct idpf_tx_queue *q = tx_qgrp->txqs[j];
qi[k].tx_compl_queue_id =
cpu_to_le16(tx_qgrp->complq->q_id);
qi[k].relative_queue_id = cpu_to_le16(j);
- if (test_bit(__IDPF_Q_FLOW_SCH_EN, q->flags))
+ if (idpf_queue_has(FLOW_SCH_EN, q))
qi[k].sched_mode =
cpu_to_le16(VIRTCHNL2_TXQ_SCHED_MODE_FLOW);
else
@@ -1478,11 +1490,11 @@ static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport)
qi[k].queue_id = cpu_to_le32(tx_qgrp->complq->q_id);
qi[k].model = cpu_to_le16(vport->txq_model);
- qi[k].type = cpu_to_le32(tx_qgrp->complq->q_type);
+ qi[k].type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION);
qi[k].ring_len = cpu_to_le16(tx_qgrp->complq->desc_count);
qi[k].dma_ring_addr = cpu_to_le64(tx_qgrp->complq->dma);
- if (test_bit(__IDPF_Q_FLOW_SCH_EN, tx_qgrp->complq->flags))
+ if (idpf_queue_has(FLOW_SCH_EN, tx_qgrp->complq))
sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;
else
sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_QUEUE;
@@ -1567,17 +1579,18 @@ static int idpf_send_config_rx_queues_msg(struct idpf_vport *vport)
goto setup_rxqs;
for (j = 0; j < vport->num_bufqs_per_qgrp; j++, k++) {
- struct idpf_queue *bufq =
+ struct idpf_buf_queue *bufq =
&rx_qgrp->splitq.bufq_sets[j].bufq;
qi[k].queue_id = cpu_to_le32(bufq->q_id);
qi[k].model = cpu_to_le16(vport->rxq_model);
- qi[k].type = cpu_to_le32(bufq->q_type);
+ qi[k].type =
+ cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX_BUFFER);
qi[k].desc_ids = cpu_to_le64(VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M);
qi[k].ring_len = cpu_to_le16(bufq->desc_count);
qi[k].dma_ring_addr = cpu_to_le64(bufq->dma);
qi[k].data_buffer_size = cpu_to_le32(bufq->rx_buf_size);
- qi[k].buffer_notif_stride = bufq->rx_buf_stride;
+ qi[k].buffer_notif_stride = IDPF_RX_BUF_STRIDE;
qi[k].rx_buffer_low_watermark =
cpu_to_le16(bufq->rx_buffer_low_watermark);
if (idpf_is_feature_ena(vport, NETIF_F_GRO_HW))
@@ -1591,35 +1604,47 @@ setup_rxqs:
num_rxq = rx_qgrp->singleq.num_rxq;
for (j = 0; j < num_rxq; j++, k++) {
- struct idpf_queue *rxq;
+ const struct idpf_bufq_set *sets;
+ struct idpf_rx_queue *rxq;
if (!idpf_is_queue_model_split(vport->rxq_model)) {
rxq = rx_qgrp->singleq.rxqs[j];
goto common_qi_fields;
}
+
rxq = &rx_qgrp->splitq.rxq_sets[j]->rxq;
- qi[k].rx_bufq1_id =
- cpu_to_le16(rxq->rxq_grp->splitq.bufq_sets[0].bufq.q_id);
+ sets = rxq->bufq_sets;
+
+ /* In splitq mode, RXQ buffer size should be
+ * set to that of the first buffer queue
+ * associated with this RXQ.
+ */
+ rxq->rx_buf_size = sets[0].bufq.rx_buf_size;
+
+ qi[k].rx_bufq1_id = cpu_to_le16(sets[0].bufq.q_id);
if (vport->num_bufqs_per_qgrp > IDPF_SINGLE_BUFQ_PER_RXQ_GRP) {
qi[k].bufq2_ena = IDPF_BUFQ2_ENA;
qi[k].rx_bufq2_id =
- cpu_to_le16(rxq->rxq_grp->splitq.bufq_sets[1].bufq.q_id);
+ cpu_to_le16(sets[1].bufq.q_id);
}
qi[k].rx_buffer_low_watermark =
cpu_to_le16(rxq->rx_buffer_low_watermark);
if (idpf_is_feature_ena(vport, NETIF_F_GRO_HW))
qi[k].qflags |= cpu_to_le16(VIRTCHNL2_RXQ_RSC);
-common_qi_fields:
- if (rxq->rx_hsplit_en) {
+ rxq->rx_hbuf_size = sets[0].bufq.rx_hbuf_size;
+
+ if (idpf_queue_has(HSPLIT_EN, rxq)) {
qi[k].qflags |=
cpu_to_le16(VIRTCHNL2_RXQ_HDR_SPLIT);
qi[k].hdr_buffer_size =
cpu_to_le16(rxq->rx_hbuf_size);
}
+
+common_qi_fields:
qi[k].queue_id = cpu_to_le32(rxq->q_id);
qi[k].model = cpu_to_le16(vport->rxq_model);
- qi[k].type = cpu_to_le32(rxq->q_type);
+ qi[k].type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX);
qi[k].ring_len = cpu_to_le16(rxq->desc_count);
qi[k].dma_ring_addr = cpu_to_le64(rxq->dma);
qi[k].max_pkt_size = cpu_to_le32(rxq->rx_max_pkt_size);
@@ -1706,7 +1731,7 @@ static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, bool ena)
struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
for (j = 0; j < tx_qgrp->num_txq; j++, k++) {
- qc[k].type = cpu_to_le32(tx_qgrp->txqs[j]->q_type);
+ qc[k].type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX);
qc[k].start_queue_id = cpu_to_le32(tx_qgrp->txqs[j]->q_id);
qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
}
@@ -1720,7 +1745,7 @@ static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, bool ena)
for (i = 0; i < vport->num_txq_grp; i++, k++) {
struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
- qc[k].type = cpu_to_le32(tx_qgrp->complq->q_type);
+ qc[k].type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION);
qc[k].start_queue_id = cpu_to_le32(tx_qgrp->complq->q_id);
qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
}
@@ -1741,12 +1766,12 @@ setup_rx:
qc[k].start_queue_id =
cpu_to_le32(rx_qgrp->splitq.rxq_sets[j]->rxq.q_id);
qc[k].type =
- cpu_to_le32(rx_qgrp->splitq.rxq_sets[j]->rxq.q_type);
+ cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX);
} else {
qc[k].start_queue_id =
cpu_to_le32(rx_qgrp->singleq.rxqs[j]->q_id);
qc[k].type =
- cpu_to_le32(rx_qgrp->singleq.rxqs[j]->q_type);
+ cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX);
}
qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
}
@@ -1761,10 +1786,11 @@ setup_rx:
struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
for (j = 0; j < vport->num_bufqs_per_qgrp; j++, k++) {
- struct idpf_queue *q;
+ const struct idpf_buf_queue *q;
q = &rx_qgrp->splitq.bufq_sets[j].bufq;
- qc[k].type = cpu_to_le32(q->q_type);
+ qc[k].type =
+ cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX_BUFFER);
qc[k].start_queue_id = cpu_to_le32(q->q_id);
qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
}
@@ -1849,7 +1875,8 @@ int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map)
struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
for (j = 0; j < tx_qgrp->num_txq; j++, k++) {
- vqv[k].queue_type = cpu_to_le32(tx_qgrp->txqs[j]->q_type);
+ vqv[k].queue_type =
+ cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX);
vqv[k].queue_id = cpu_to_le32(tx_qgrp->txqs[j]->q_id);
if (idpf_is_queue_model_split(vport->txq_model)) {
@@ -1879,14 +1906,15 @@ int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map)
num_rxq = rx_qgrp->singleq.num_rxq;
for (j = 0; j < num_rxq; j++, k++) {
- struct idpf_queue *rxq;
+ struct idpf_rx_queue *rxq;
if (idpf_is_queue_model_split(vport->rxq_model))
rxq = &rx_qgrp->splitq.rxq_sets[j]->rxq;
else
rxq = rx_qgrp->singleq.rxqs[j];
- vqv[k].queue_type = cpu_to_le32(rxq->q_type);
+ vqv[k].queue_type =
+ cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX);
vqv[k].queue_id = cpu_to_le32(rxq->q_id);
vqv[k].vector_id = cpu_to_le16(rxq->q_vector->v_idx);
vqv[k].itr_idx = cpu_to_le32(rxq->q_vector->rx_itr_idx);
@@ -1975,7 +2003,7 @@ int idpf_send_disable_queues_msg(struct idpf_vport *vport)
* queues virtchnl message is sent
*/
for (i = 0; i < vport->num_txq; i++)
- set_bit(__IDPF_Q_POLL_MODE, vport->txqs[i]->flags);
+ idpf_queue_set(POLL_MODE, vport->txqs[i]);
/* schedule the napi to receive all the marker packets */
local_bh_disable();
@@ -2469,39 +2497,52 @@ do_memcpy:
* @frag: fragmentation allowed
*
*/
-static void idpf_fill_ptype_lookup(struct idpf_rx_ptype_decoded *ptype,
+static void idpf_fill_ptype_lookup(struct libeth_rx_pt *ptype,
struct idpf_ptype_state *pstate,
bool ipv4, bool frag)
{
if (!pstate->outer_ip || !pstate->outer_frag) {
- ptype->outer_ip = IDPF_RX_PTYPE_OUTER_IP;
pstate->outer_ip = true;
if (ipv4)
- ptype->outer_ip_ver = IDPF_RX_PTYPE_OUTER_IPV4;
+ ptype->outer_ip = LIBETH_RX_PT_OUTER_IPV4;
else
- ptype->outer_ip_ver = IDPF_RX_PTYPE_OUTER_IPV6;
+ ptype->outer_ip = LIBETH_RX_PT_OUTER_IPV6;
if (frag) {
- ptype->outer_frag = IDPF_RX_PTYPE_FRAG;
+ ptype->outer_frag = LIBETH_RX_PT_FRAG;
pstate->outer_frag = true;
}
} else {
- ptype->tunnel_type = IDPF_RX_PTYPE_TUNNEL_IP_IP;
+ ptype->tunnel_type = LIBETH_RX_PT_TUNNEL_IP_IP;
pstate->tunnel_state = IDPF_PTYPE_TUNNEL_IP;
if (ipv4)
- ptype->tunnel_end_prot =
- IDPF_RX_PTYPE_TUNNEL_END_IPV4;
+ ptype->tunnel_end_prot = LIBETH_RX_PT_TUNNEL_END_IPV4;
else
- ptype->tunnel_end_prot =
- IDPF_RX_PTYPE_TUNNEL_END_IPV6;
+ ptype->tunnel_end_prot = LIBETH_RX_PT_TUNNEL_END_IPV6;
if (frag)
- ptype->tunnel_end_frag = IDPF_RX_PTYPE_FRAG;
+ ptype->tunnel_end_frag = LIBETH_RX_PT_FRAG;
}
}
+static void idpf_finalize_ptype_lookup(struct libeth_rx_pt *ptype)
+{
+ if (ptype->payload_layer == LIBETH_RX_PT_PAYLOAD_L2 &&
+ ptype->inner_prot)
+ ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_L4;
+ else if (ptype->payload_layer == LIBETH_RX_PT_PAYLOAD_L2 &&
+ ptype->outer_ip)
+ ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_L3;
+ else if (ptype->outer_ip == LIBETH_RX_PT_OUTER_L2)
+ ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_L2;
+ else
+ ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_NONE;
+
+ libeth_rx_pt_gen_hash_type(ptype);
+}
+
/**
* idpf_send_get_rx_ptype_msg - Send virtchnl for ptype info
* @vport: virtual port data structure
@@ -2512,7 +2553,7 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
{
struct virtchnl2_get_ptype_info *get_ptype_info __free(kfree) = NULL;
struct virtchnl2_get_ptype_info *ptype_info __free(kfree) = NULL;
- struct idpf_rx_ptype_decoded *ptype_lkup = vport->rx_ptype_lkup;
+ struct libeth_rx_pt *ptype_lkup __free(kfree) = NULL;
int max_ptype, ptypes_recvd = 0, ptype_offset;
struct idpf_adapter *adapter = vport->adapter;
struct idpf_vc_xn_params xn_params = {};
@@ -2520,12 +2561,17 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
ssize_t reply_sz;
int i, j, k;
+ if (vport->rx_ptype_lkup)
+ return 0;
+
if (idpf_is_queue_model_split(vport->rxq_model))
max_ptype = IDPF_RX_MAX_PTYPE;
else
max_ptype = IDPF_RX_MAX_BASE_PTYPE;
- memset(vport->rx_ptype_lkup, 0, sizeof(vport->rx_ptype_lkup));
+ ptype_lkup = kcalloc(max_ptype, sizeof(*ptype_lkup), GFP_KERNEL);
+ if (!ptype_lkup)
+ return -ENOMEM;
get_ptype_info = kzalloc(sizeof(*get_ptype_info), GFP_KERNEL);
if (!get_ptype_info)
@@ -2583,16 +2629,13 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
/* 0xFFFF indicates end of ptypes */
if (le16_to_cpu(ptype->ptype_id_10) ==
IDPF_INVALID_PTYPE_ID)
- return 0;
+ goto out;
if (idpf_is_queue_model_split(vport->rxq_model))
k = le16_to_cpu(ptype->ptype_id_10);
else
k = ptype->ptype_id_8;
- if (ptype->proto_id_count)
- ptype_lkup[k].known = 1;
-
for (j = 0; j < ptype->proto_id_count; j++) {
id = le16_to_cpu(ptype->proto_id[j]);
switch (id) {
@@ -2600,18 +2643,18 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
if (pstate.tunnel_state ==
IDPF_PTYPE_TUNNEL_IP) {
ptype_lkup[k].tunnel_type =
- IDPF_RX_PTYPE_TUNNEL_IP_GRENAT;
+ LIBETH_RX_PT_TUNNEL_IP_GRENAT;
pstate.tunnel_state |=
IDPF_PTYPE_TUNNEL_IP_GRENAT;
}
break;
case VIRTCHNL2_PROTO_HDR_MAC:
ptype_lkup[k].outer_ip =
- IDPF_RX_PTYPE_OUTER_L2;
+ LIBETH_RX_PT_OUTER_L2;
if (pstate.tunnel_state ==
IDPF_TUN_IP_GRE) {
ptype_lkup[k].tunnel_type =
- IDPF_RX_PTYPE_TUNNEL_IP_GRENAT_MAC;
+ LIBETH_RX_PT_TUNNEL_IP_GRENAT_MAC;
pstate.tunnel_state |=
IDPF_PTYPE_TUNNEL_IP_GRENAT_MAC;
}
@@ -2638,23 +2681,23 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
break;
case VIRTCHNL2_PROTO_HDR_UDP:
ptype_lkup[k].inner_prot =
- IDPF_RX_PTYPE_INNER_PROT_UDP;
+ LIBETH_RX_PT_INNER_UDP;
break;
case VIRTCHNL2_PROTO_HDR_TCP:
ptype_lkup[k].inner_prot =
- IDPF_RX_PTYPE_INNER_PROT_TCP;
+ LIBETH_RX_PT_INNER_TCP;
break;
case VIRTCHNL2_PROTO_HDR_SCTP:
ptype_lkup[k].inner_prot =
- IDPF_RX_PTYPE_INNER_PROT_SCTP;
+ LIBETH_RX_PT_INNER_SCTP;
break;
case VIRTCHNL2_PROTO_HDR_ICMP:
ptype_lkup[k].inner_prot =
- IDPF_RX_PTYPE_INNER_PROT_ICMP;
+ LIBETH_RX_PT_INNER_ICMP;
break;
case VIRTCHNL2_PROTO_HDR_PAY:
ptype_lkup[k].payload_layer =
- IDPF_RX_PTYPE_PAYLOAD_LAYER_PAY2;
+ LIBETH_RX_PT_PAYLOAD_L2;
break;
case VIRTCHNL2_PROTO_HDR_ICMPV6:
case VIRTCHNL2_PROTO_HDR_IPV6_EH:
@@ -2708,9 +2751,14 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
break;
}
}
+
+ idpf_finalize_ptype_lookup(&ptype_lkup[k]);
}
}
+out:
+ vport->rx_ptype_lkup = no_free_ptr(ptype_lkup);
+
return 0;
}
@@ -3125,7 +3173,7 @@ void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q)
rss_data->rss_lut_size = le16_to_cpu(vport_msg->rss_lut_size);
ether_addr_copy(vport->default_mac_addr, vport_msg->default_mac_addr);
- vport->max_mtu = le16_to_cpu(vport_msg->max_mtu) - IDPF_PACKET_HDR_PAD;
+ vport->max_mtu = le16_to_cpu(vport_msg->max_mtu) - LIBETH_RX_LL_LEN;
/* Initialize Tx and Rx profiles for Dynamic Interrupt Moderation */
memcpy(vport->rx_itr_profile, rx_itr, IDPF_DIM_PROFILE_SLOTS);
@@ -3242,7 +3290,6 @@ static int __idpf_vport_queue_ids_init(struct idpf_vport *vport,
int num_qids,
u32 q_type)
{
- struct idpf_queue *q;
int i, j, k = 0;
switch (q_type) {
@@ -3250,11 +3297,8 @@ static int __idpf_vport_queue_ids_init(struct idpf_vport *vport,
for (i = 0; i < vport->num_txq_grp; i++) {
struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
- for (j = 0; j < tx_qgrp->num_txq && k < num_qids; j++, k++) {
+ for (j = 0; j < tx_qgrp->num_txq && k < num_qids; j++, k++)
tx_qgrp->txqs[j]->q_id = qids[k];
- tx_qgrp->txqs[j]->q_type =
- VIRTCHNL2_QUEUE_TYPE_TX;
- }
}
break;
case VIRTCHNL2_QUEUE_TYPE_RX:
@@ -3268,12 +3312,13 @@ static int __idpf_vport_queue_ids_init(struct idpf_vport *vport,
num_rxq = rx_qgrp->singleq.num_rxq;
for (j = 0; j < num_rxq && k < num_qids; j++, k++) {
+ struct idpf_rx_queue *q;
+
if (idpf_is_queue_model_split(vport->rxq_model))
q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
else
q = rx_qgrp->singleq.rxqs[j];
q->q_id = qids[k];
- q->q_type = VIRTCHNL2_QUEUE_TYPE_RX;
}
}
break;
@@ -3282,8 +3327,6 @@ static int __idpf_vport_queue_ids_init(struct idpf_vport *vport,
struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
tx_qgrp->complq->q_id = qids[k];
- tx_qgrp->complq->q_type =
- VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
}
break;
case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
@@ -3292,9 +3335,10 @@ static int __idpf_vport_queue_ids_init(struct idpf_vport *vport,
u8 num_bufqs = vport->num_bufqs_per_qgrp;
for (j = 0; j < num_bufqs && k < num_qids; j++, k++) {
+ struct idpf_buf_queue *q;
+
q = &rx_qgrp->splitq.bufq_sets[j].bufq;
q->q_id = qids[k];
- q->q_type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
}
}
break;
diff --git a/drivers/net/ethernet/intel/igb/Makefile b/drivers/net/ethernet/intel/igb/Makefile
index 394c1e0656b9..463c0d26b9d4 100644
--- a/drivers/net/ethernet/intel/igb/Makefile
+++ b/drivers/net/ethernet/intel/igb/Makefile
@@ -6,6 +6,6 @@
obj-$(CONFIG_IGB) += igb.o
-igb-objs := igb_main.o igb_ethtool.o e1000_82575.o \
- e1000_mac.o e1000_nvm.o e1000_phy.o e1000_mbx.o \
- e1000_i210.o igb_ptp.o igb_hwmon.o
+igb-y := igb_main.o igb_ethtool.o e1000_82575.o \
+ e1000_mac.o e1000_nvm.o e1000_phy.o e1000_mbx.o \
+ e1000_i210.o igb_ptp.o igb_hwmon.o
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 61d72250c0ed..06b9970dffad 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -2381,7 +2381,7 @@ static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
}
static int igb_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct igb_adapter *adapter = netdev_priv(dev);
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index fce2930ae6af..11be39f435f3 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -203,7 +203,6 @@ static const struct pci_error_handlers igb_err_handler = {
static void igb_init_dmac(struct igb_adapter *adapter, u32 pba);
-MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
MODULE_LICENSE("GPL v2");
@@ -9139,6 +9138,10 @@ static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
return -EIO;
break;
case SIOCSMIIREG:
+ if (igb_write_phy_reg(&adapter->hw, data->reg_num & 0x1F,
+ data->val_in))
+ return -EIO;
+ break;
default:
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/intel/igbvf/Makefile b/drivers/net/ethernet/intel/igbvf/Makefile
index afd3e36eae75..902711d5e691 100644
--- a/drivers/net/ethernet/intel/igbvf/Makefile
+++ b/drivers/net/ethernet/intel/igbvf/Makefile
@@ -6,8 +6,4 @@
obj-$(CONFIG_IGBVF) += igbvf.o
-igbvf-objs := vf.o \
- mbx.o \
- ethtool.o \
- netdev.o
-
+igbvf-y := vf.o mbx.o ethtool.o netdev.o
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 7661edd7d0f2..925d7286a8ee 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -3001,7 +3001,6 @@ static void __exit igbvf_exit_module(void)
}
module_exit(igbvf_exit_module);
-MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
MODULE_DESCRIPTION("Intel(R) Gigabit Virtual Function Network Driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/intel/igc/Makefile b/drivers/net/ethernet/intel/igc/Makefile
index ebffd3054285..efc5e7983dad 100644
--- a/drivers/net/ethernet/intel/igc/Makefile
+++ b/drivers/net/ethernet/intel/igc/Makefile
@@ -6,7 +6,7 @@
#
obj-$(CONFIG_IGC) += igc.o
-igc-$(CONFIG_IGC_LEDS) += igc_leds.o
-igc-objs := igc_main.o igc_mac.o igc_i225.o igc_base.o igc_nvm.o igc_phy.o \
-igc_diag.o igc_ethtool.o igc_ptp.o igc_dump.o igc_tsn.o igc_xdp.o
+igc-y := igc_main.o igc_mac.o igc_i225.o igc_base.o igc_nvm.o igc_phy.o \
+ igc_diag.o igc_ethtool.o igc_ptp.o igc_dump.o igc_tsn.o igc_xdp.o
+igc-$(CONFIG_IGC_LEDS) += igc_leds.o
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
index 8b14c029eda1..c38b4d0f00ce 100644
--- a/drivers/net/ethernet/intel/igc/igc.h
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -202,7 +202,6 @@ struct igc_adapter {
struct net_device *netdev;
struct ethtool_keee eee;
- u16 eee_advert;
unsigned long state;
unsigned int flags;
diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
index f2c4f1966bb0..3d3ef4e1547c 100644
--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
+++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
@@ -1559,7 +1559,7 @@ static int igc_ethtool_set_channels(struct net_device *netdev,
}
static int igc_ethtool_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct igc_adapter *adapter = netdev_priv(dev);
@@ -1629,11 +1629,12 @@ static int igc_ethtool_get_eee(struct net_device *netdev,
struct igc_hw *hw = &adapter->hw;
u32 eeer;
- if (hw->dev_spec._base.eee_enable)
- mii_eee_cap1_mod_linkmode_t(edata->advertised,
- adapter->eee_advert);
-
- *edata = adapter->eee;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
+ edata->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ edata->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ edata->supported);
eeer = rd32(IGC_EEER);
@@ -1695,8 +1696,6 @@ static int igc_ethtool_set_eee(struct net_device *netdev,
return -EINVAL;
}
- adapter->eee_advert = linkmode_to_mii_eee_cap1_t(edata->advertised);
-
if (hw->dev_spec._base.eee_enable != edata->eee_enabled) {
hw->dev_spec._base.eee_enable = edata->eee_enabled;
adapter->flags |= IGC_FLAG_EEE;
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 12f004f46082..cb5c7b09e8a0 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -12,6 +12,7 @@
#include <linux/bpf_trace.h>
#include <net/xdp_sock_drv.h>
#include <linux/pci.h>
+#include <linux/mdio.h>
#include <net/ipv6.h>
@@ -31,7 +32,6 @@
static int debug = -1;
-MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
MODULE_DESCRIPTION(DRV_SUMMARY);
MODULE_LICENSE("GPL v2");
module_param(debug, int, 0);
@@ -7028,6 +7028,8 @@ static int igc_probe(struct pci_dev *pdev,
device_set_wakeup_enable(&adapter->pdev->dev,
adapter->flags & IGC_FLAG_WOL_SUPPORTED);
+ igc_ptp_init(adapter);
+
igc_tsn_clear_schedule(adapter);
/* reset the hardware with the new settings */
@@ -7049,9 +7051,6 @@ static int igc_probe(struct pci_dev *pdev,
/* Check if Media Autosense is enabled */
adapter->ei = *ei;
- /* do hw tstamp init after resetting */
- igc_ptp_init(adapter);
-
/* print pcie link status and MAC address */
pcie_print_link_status(pdev);
netdev_info(netdev, "MAC: %pM\n", netdev->dev_addr);
diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c
index 1bb026232efc..946edbad4302 100644
--- a/drivers/net/ethernet/intel/igc/igc_ptp.c
+++ b/drivers/net/ethernet/intel/igc/igc_ptp.c
@@ -938,7 +938,11 @@ static bool igc_is_crosststamp_supported(struct igc_adapter *adapter)
static struct system_counterval_t igc_device_tstamp_to_system(u64 tstamp)
{
#if IS_ENABLED(CONFIG_X86_TSC) && !defined(CONFIG_UML)
- return convert_art_ns_to_tsc(tstamp);
+ return (struct system_counterval_t) {
+ .cs_id = CSID_X86_ART,
+ .cycles = tstamp,
+ .use_nsecs = true,
+ };
#else
return (struct system_counterval_t) { };
#endif
diff --git a/drivers/net/ethernet/intel/ixgbe/Makefile b/drivers/net/ethernet/intel/ixgbe/Makefile
index 4fb0d9e3f2da..965e5ce1b326 100644
--- a/drivers/net/ethernet/intel/ixgbe/Makefile
+++ b/drivers/net/ethernet/intel/ixgbe/Makefile
@@ -6,10 +6,10 @@
obj-$(CONFIG_IXGBE) += ixgbe.o
-ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \
- ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \
- ixgbe_mbx.o ixgbe_x540.o ixgbe_x550.o ixgbe_lib.o ixgbe_ptp.o \
- ixgbe_xsk.o
+ixgbe-y := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \
+ ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \
+ ixgbe_mbx.o ixgbe_x540.o ixgbe_x550.o ixgbe_lib.o ixgbe_ptp.o \
+ ixgbe_xsk.o
ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \
ixgbe_dcb_82599.o ixgbe_dcb_nl.o
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 6e6e6f1847b6..4cac76254966 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -3170,7 +3170,7 @@ static int ixgbe_set_rxfh(struct net_device *netdev,
}
static int ixgbe_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 094653e81b97..8057cef61f39 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -162,7 +162,6 @@ static int debug = -1;
module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
-MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/intel/ixgbevf/Makefile b/drivers/net/ethernet/intel/ixgbevf/Makefile
index 186a4bb24fde..01d3e892f3fa 100644
--- a/drivers/net/ethernet/intel/ixgbevf/Makefile
+++ b/drivers/net/ethernet/intel/ixgbevf/Makefile
@@ -6,9 +6,5 @@
obj-$(CONFIG_IXGBEVF) += ixgbevf.o
-ixgbevf-objs := vf.o \
- mbx.o \
- ethtool.o \
- ixgbevf_main.o
+ixgbevf-y := vf.o mbx.o ethtool.o ixgbevf_main.o
ixgbevf-$(CONFIG_IXGBEVF_IPSEC) += ipsec.o
-
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index b938dc06045d..149911e3002a 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -76,7 +76,6 @@ static const struct pci_device_id ixgbevf_pci_tbl[] = {
};
MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl);
-MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
MODULE_DESCRIPTION("Intel(R) 10 Gigabit Virtual Function Network Driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/intel/libeth/Makefile b/drivers/net/ethernet/intel/libeth/Makefile
index cb99203d1dd2..52492b081132 100644
--- a/drivers/net/ethernet/intel/libeth/Makefile
+++ b/drivers/net/ethernet/intel/libeth/Makefile
@@ -3,4 +3,4 @@
obj-$(CONFIG_LIBETH) += libeth.o
-libeth-objs += rx.o
+libeth-y := rx.o
diff --git a/drivers/net/ethernet/intel/libeth/rx.c b/drivers/net/ethernet/intel/libeth/rx.c
index 6221b88c34ac..f20926669318 100644
--- a/drivers/net/ethernet/intel/libeth/rx.c
+++ b/drivers/net/ethernet/intel/libeth/rx.c
@@ -6,7 +6,7 @@
/* Rx buffer management */
/**
- * libeth_rx_hw_len - get the actual buffer size to be passed to HW
+ * libeth_rx_hw_len_mtu - get the actual buffer size to be passed to HW
* @pp: &page_pool_params of the netdev to calculate the size for
* @max_len: maximum buffer size for a single descriptor
*
@@ -14,7 +14,7 @@
* MTU the @dev has, HW required alignment, minimum and maximum allowed values,
* and system's page size.
*/
-static u32 libeth_rx_hw_len(const struct page_pool_params *pp, u32 max_len)
+static u32 libeth_rx_hw_len_mtu(const struct page_pool_params *pp, u32 max_len)
{
u32 len;
@@ -27,6 +27,118 @@ static u32 libeth_rx_hw_len(const struct page_pool_params *pp, u32 max_len)
}
/**
+ * libeth_rx_hw_len_truesize - get the short buffer size to be passed to HW
+ * @pp: &page_pool_params of the netdev to calculate the size for
+ * @max_len: maximum buffer size for a single descriptor
+ * @truesize: desired truesize for the buffers
+ *
+ * Return: HW-writeable length per one buffer to pass it to the HW ignoring the
+ * MTU and closest to the passed truesize. Can be used for "short" buffer
+ * queues to fragment pages more efficiently.
+ */
+static u32 libeth_rx_hw_len_truesize(const struct page_pool_params *pp,
+ u32 max_len, u32 truesize)
+{
+ u32 min, len;
+
+ min = SKB_HEAD_ALIGN(pp->offset + LIBETH_RX_BUF_STRIDE);
+ truesize = clamp(roundup_pow_of_two(truesize), roundup_pow_of_two(min),
+ PAGE_SIZE << LIBETH_RX_PAGE_ORDER);
+
+ len = SKB_WITH_OVERHEAD(truesize - pp->offset);
+ len = ALIGN_DOWN(len, LIBETH_RX_BUF_STRIDE) ? : LIBETH_RX_BUF_STRIDE;
+ len = min3(len, ALIGN_DOWN(max_len ? : U32_MAX, LIBETH_RX_BUF_STRIDE),
+ pp->max_len);
+
+ return len;
+}
+
+/**
+ * libeth_rx_page_pool_params - calculate params with the stack overhead
+ * @fq: buffer queue to calculate the size for
+ * @pp: &page_pool_params of the netdev
+ *
+ * Set the PP params to will all needed stack overhead (headroom, tailroom) and
+ * both the HW buffer length and the truesize for all types of buffers. For
+ * "short" buffers, truesize never exceeds the "wanted" one; for the rest,
+ * it can be up to the page size.
+ *
+ * Return: true on success, false on invalid input params.
+ */
+static bool libeth_rx_page_pool_params(struct libeth_fq *fq,
+ struct page_pool_params *pp)
+{
+ pp->offset = LIBETH_SKB_HEADROOM;
+ /* HW-writeable / syncable length per one page */
+ pp->max_len = LIBETH_RX_PAGE_LEN(pp->offset);
+
+ /* HW-writeable length per buffer */
+ switch (fq->type) {
+ case LIBETH_FQE_MTU:
+ fq->buf_len = libeth_rx_hw_len_mtu(pp, fq->buf_len);
+ break;
+ case LIBETH_FQE_SHORT:
+ fq->buf_len = libeth_rx_hw_len_truesize(pp, fq->buf_len,
+ fq->truesize);
+ break;
+ case LIBETH_FQE_HDR:
+ fq->buf_len = ALIGN(LIBETH_MAX_HEAD, LIBETH_RX_BUF_STRIDE);
+ break;
+ default:
+ return false;
+ }
+
+ /* Buffer size to allocate */
+ fq->truesize = roundup_pow_of_two(SKB_HEAD_ALIGN(pp->offset +
+ fq->buf_len));
+
+ return true;
+}
+
+/**
+ * libeth_rx_page_pool_params_zc - calculate params without the stack overhead
+ * @fq: buffer queue to calculate the size for
+ * @pp: &page_pool_params of the netdev
+ *
+ * Set the PP params to exclude the stack overhead and both the buffer length
+ * and the truesize, which are equal for the data buffers. Note that this
+ * requires separate header buffers to be always active and account the
+ * overhead.
+ * With the MTU == ``PAGE_SIZE``, this allows the kernel to enable the zerocopy
+ * mode.
+ *
+ * Return: true on success, false on invalid input params.
+ */
+static bool libeth_rx_page_pool_params_zc(struct libeth_fq *fq,
+ struct page_pool_params *pp)
+{
+ u32 mtu, max;
+
+ pp->offset = 0;
+ pp->max_len = PAGE_SIZE << LIBETH_RX_PAGE_ORDER;
+
+ switch (fq->type) {
+ case LIBETH_FQE_MTU:
+ mtu = READ_ONCE(pp->netdev->mtu);
+ break;
+ case LIBETH_FQE_SHORT:
+ mtu = fq->truesize;
+ break;
+ default:
+ return false;
+ }
+
+ mtu = roundup_pow_of_two(mtu);
+ max = min(rounddown_pow_of_two(fq->buf_len ? : U32_MAX),
+ pp->max_len);
+
+ fq->buf_len = clamp(mtu, LIBETH_RX_BUF_STRIDE, max);
+ fq->truesize = fq->buf_len;
+
+ return true;
+}
+
+/**
* libeth_rx_fq_create - create a PP with the default libeth settings
* @fq: buffer queue struct to fill
* @napi: &napi_struct covering this PP (no usage outside its poll loops)
@@ -44,19 +156,17 @@ int libeth_rx_fq_create(struct libeth_fq *fq, struct napi_struct *napi)
.netdev = napi->dev,
.napi = napi,
.dma_dir = DMA_FROM_DEVICE,
- .offset = LIBETH_SKB_HEADROOM,
};
struct libeth_fqe *fqes;
struct page_pool *pool;
+ bool ret;
- /* HW-writeable / syncable length per one page */
- pp.max_len = LIBETH_RX_PAGE_LEN(pp.offset);
-
- /* HW-writeable length per buffer */
- fq->buf_len = libeth_rx_hw_len(&pp, fq->buf_len);
- /* Buffer size to allocate */
- fq->truesize = roundup_pow_of_two(SKB_HEAD_ALIGN(pp.offset +
- fq->buf_len));
+ if (!fq->hsplit)
+ ret = libeth_rx_page_pool_params(fq, &pp);
+ else
+ ret = libeth_rx_page_pool_params_zc(fq, &pp);
+ if (!ret)
+ return -EINVAL;
pool = page_pool_create(&pp);
if (IS_ERR(pool))
@@ -145,6 +255,5 @@ EXPORT_SYMBOL_NS_GPL(libeth_rx_pt_gen_hash_type, LIBETH);
/* Module */
-MODULE_AUTHOR("Intel Corporation");
MODULE_DESCRIPTION("Common Ethernet library");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/intel/libie/Makefile b/drivers/net/ethernet/intel/libie/Makefile
index bf42c5aeeedd..ffd27fab916a 100644
--- a/drivers/net/ethernet/intel/libie/Makefile
+++ b/drivers/net/ethernet/intel/libie/Makefile
@@ -3,4 +3,4 @@
obj-$(CONFIG_LIBIE) += libie.o
-libie-objs += rx.o
+libie-y := rx.o
diff --git a/drivers/net/ethernet/intel/libie/rx.c b/drivers/net/ethernet/intel/libie/rx.c
index 38201ee1e891..aceb8d8813c4 100644
--- a/drivers/net/ethernet/intel/libie/rx.c
+++ b/drivers/net/ethernet/intel/libie/rx.c
@@ -118,7 +118,6 @@ const struct libeth_rx_pt libie_rx_pt_lut[LIBIE_RX_PT_NUM] = {
};
EXPORT_SYMBOL_NS_GPL(libie_rx_pt_lut, LIBIE);
-MODULE_AUTHOR("Intel Corporation");
MODULE_DESCRIPTION("Intel(R) Ethernet common library");
MODULE_IMPORT_NS(LIBETH);
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index 5352fee62d2b..9e6984815386 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -217,9 +217,9 @@ ltq_etop_free_channel(struct net_device *dev, struct ltq_etop_chan *ch)
if (ch->dma.irq)
free_irq(ch->dma.irq, priv);
if (IS_RX(ch->idx)) {
- int desc;
+ struct ltq_dma_channel *dma = &ch->dma;
- for (desc = 0; desc < LTQ_DESC_NUM; desc++)
+ for (dma->desc = 0; dma->desc < LTQ_DESC_NUM; dma->desc++)
dev_kfree_skb_any(ch->skb[ch->dma.desc]);
}
}
@@ -675,7 +675,6 @@ ltq_etop_probe(struct platform_device *pdev)
err = -ENOMEM;
goto err_out;
}
- strcpy(dev->name, "eth%d");
dev->netdev_ops = &ltq_eth_netdev_ops;
dev->ethtool_ops = &ltq_etop_ethtool_ops;
priv = netdev_priv(dev);
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index e91486c48de3..8c45ad983abc 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -4014,7 +4014,10 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
}
}
- skb = build_skb(data, frag_size);
+ if (frag_size)
+ skb = build_skb(data, frag_size);
+ else
+ skb = slab_build_skb(data);
if (!skb) {
netdev_warn(port->dev, "skb build failed\n");
goto err_drop_frame;
@@ -5256,7 +5259,7 @@ static int mvpp2_get_ts_config(struct mvpp2_port *port, struct ifreq *ifr)
}
static int mvpp2_ethtool_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct mvpp2_port *port = netdev_priv(dev);
@@ -6904,6 +6907,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
/* 9704 == 9728 - 20 and rounding to 8 */
dev->max_mtu = MVPP2_BM_JUMBO_PKT_SIZE;
device_set_node(&dev->dev, port_fwnode);
+ dev->dev_port = port->id;
port->pcs_gmac.ops = &mvpp2_phylink_gmac_pcs_ops;
port->pcs_gmac.neg_mode = true;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index 4a77f6fe2622..ed2160cc5acb 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -139,6 +139,7 @@ M(MSIX_OFFSET, 0x005, msix_offset, msg_req, msix_offset_rsp) \
M(VF_FLR, 0x006, vf_flr, msg_req, msg_rsp) \
M(PTP_OP, 0x007, ptp_op, ptp_req, ptp_rsp) \
M(GET_HW_CAP, 0x008, get_hw_cap, msg_req, get_hw_cap_rsp) \
+M(NDC_SYNC_OP, 0x009, ndc_sync_op, ndc_sync_op, msg_rsp) \
M(LMTST_TBL_SETUP, 0x00a, lmtst_tbl_setup, lmtst_tbl_setup_req, \
msg_rsp) \
M(SET_VF_PERM, 0x00b, set_vf_perm, set_vf_perm, msg_rsp) \
@@ -1716,6 +1717,13 @@ struct lmtst_tbl_setup_req {
u64 rsvd[4];
};
+struct ndc_sync_op {
+ struct mbox_msghdr hdr;
+ u8 nix_lf_tx_sync;
+ u8 nix_lf_rx_sync;
+ u8 npa_lf_sync;
+};
+
/* CPT mailbox error codes
* Range 901 - 1000.
*/
@@ -1745,7 +1753,7 @@ struct cpt_lf_alloc_req_msg {
u16 nix_pf_func;
u16 sso_pf_func;
u16 eng_grpmsk;
- int blkaddr;
+ u8 blkaddr;
u8 ctx_ilen_valid : 1;
u8 ctx_ilen : 7;
};
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
index d883157393ea..6c3aca6f278d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/npc.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
@@ -63,8 +63,13 @@ enum npc_kpu_lb_ltype {
NPC_LT_LB_CUSTOM1 = 0xF,
};
+/* Don't modify ltypes up to IP6_EXT, otherwise length and checksum of IP
+ * headers may not be checked correctly. IPv4 ltypes and IPv6 ltypes must
+ * differ only at bit 0 so mask 0xE can be used to detect extended headers.
+ */
enum npc_kpu_lc_ltype {
- NPC_LT_LC_IP = 1,
+ NPC_LT_LC_PTP = 1,
+ NPC_LT_LC_IP,
NPC_LT_LC_IP_OPT,
NPC_LT_LC_IP6,
NPC_LT_LC_IP6_EXT,
@@ -72,7 +77,6 @@ enum npc_kpu_lc_ltype {
NPC_LT_LC_RARP,
NPC_LT_LC_MPLS,
NPC_LT_LC_NSH,
- NPC_LT_LC_PTP,
NPC_LT_LC_FCOE,
NPC_LT_LC_NGIO,
NPC_LT_LC_CUSTOM0 = 0xE,
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index ff78251f92d4..ac7ee3f3598c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -1643,7 +1643,7 @@ static int rvu_check_rsrc_availability(struct rvu *rvu,
if (req->ssow > block->lf.max) {
dev_err(&rvu->pdev->dev,
"Func 0x%x: Invalid SSOW req, %d > max %d\n",
- pcifunc, req->sso, block->lf.max);
+ pcifunc, req->ssow, block->lf.max);
return -EINVAL;
}
mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
@@ -2014,6 +2014,13 @@ int rvu_mbox_handler_vf_flr(struct rvu *rvu, struct msg_req *req,
return 0;
}
+int rvu_ndc_sync(struct rvu *rvu, int lfblkaddr, int lfidx, u64 lfoffset)
+{
+ /* Sync cached info for this LF in NDC to LLC/DRAM */
+ rvu_write64(rvu, lfblkaddr, lfoffset, BIT_ULL(12) | lfidx);
+ return rvu_poll_reg(rvu, lfblkaddr, lfoffset, BIT_ULL(12), true);
+}
+
int rvu_mbox_handler_get_hw_cap(struct rvu *rvu, struct msg_req *req,
struct get_hw_cap_rsp *rsp)
{
@@ -2068,6 +2075,65 @@ int rvu_mbox_handler_set_vf_perm(struct rvu *rvu, struct set_vf_perm *req,
return 0;
}
+int rvu_mbox_handler_ndc_sync_op(struct rvu *rvu,
+ struct ndc_sync_op *req,
+ struct msg_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ int err, lfidx, lfblkaddr;
+
+ if (req->npa_lf_sync) {
+ /* Get NPA LF data */
+ lfblkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, pcifunc);
+ if (lfblkaddr < 0)
+ return NPA_AF_ERR_AF_LF_INVALID;
+
+ lfidx = rvu_get_lf(rvu, &hw->block[lfblkaddr], pcifunc, 0);
+ if (lfidx < 0)
+ return NPA_AF_ERR_AF_LF_INVALID;
+
+ /* Sync NPA NDC */
+ err = rvu_ndc_sync(rvu, lfblkaddr,
+ lfidx, NPA_AF_NDC_SYNC);
+ if (err)
+ dev_err(rvu->dev,
+ "NDC-NPA sync failed for LF %u\n", lfidx);
+ }
+
+ if (!req->nix_lf_tx_sync && !req->nix_lf_rx_sync)
+ return 0;
+
+ /* Get NIX LF data */
+ lfblkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (lfblkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ lfidx = rvu_get_lf(rvu, &hw->block[lfblkaddr], pcifunc, 0);
+ if (lfidx < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ if (req->nix_lf_tx_sync) {
+ /* Sync NIX TX NDC */
+ err = rvu_ndc_sync(rvu, lfblkaddr,
+ lfidx, NIX_AF_NDC_TX_SYNC);
+ if (err)
+ dev_err(rvu->dev,
+ "NDC-NIX-TX sync fail for LF %u\n", lfidx);
+ }
+
+ if (req->nix_lf_rx_sync) {
+ /* Sync NIX RX NDC */
+ err = rvu_ndc_sync(rvu, lfblkaddr,
+ lfidx, NIX_AF_NDC_RX_SYNC);
+ if (err)
+ dev_err(rvu->dev,
+ "NDC-NIX-RX sync failed for LF %u\n", lfidx);
+ }
+
+ return 0;
+}
+
static int rvu_process_mbox_msg(struct otx2_mbox *mbox, int devid,
struct mbox_msghdr *req)
{
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index 35834687e40f..03ee93fd9e94 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -76,6 +76,7 @@ struct rvu_debugfs {
struct dump_ctx nix_cq_ctx;
struct dump_ctx nix_rq_ctx;
struct dump_ctx nix_sq_ctx;
+ struct dump_ctx nix_tm_ctx;
struct cpt_ctx cpt_ctx[MAX_CPT_BLKS];
int npa_qsize_id;
int nix_qsize_id;
@@ -799,6 +800,7 @@ int rvu_lf_reset(struct rvu *rvu, struct rvu_block *block, int lf);
int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc);
int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero);
int rvu_get_num_lbk_chans(void);
+int rvu_ndc_sync(struct rvu *rvu, int lfblkid, int lfidx, u64 lfoffset);
int rvu_get_blkaddr_from_slot(struct rvu *rvu, int blktype, u16 pcifunc,
u16 global_slot, u16 *slot_in_block);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
index f047185f38e0..3e09d2285814 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
@@ -696,7 +696,8 @@ int rvu_mbox_handler_cpt_rd_wr_register(struct rvu *rvu,
struct cpt_rd_wr_reg_msg *req,
struct cpt_rd_wr_reg_msg *rsp)
{
- int blkaddr;
+ u64 offset = req->reg_offset;
+ int blkaddr, lf;
blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr);
if (blkaddr < 0)
@@ -707,17 +708,25 @@ int rvu_mbox_handler_cpt_rd_wr_register(struct rvu *rvu,
!is_cpt_vf(rvu, req->hdr.pcifunc))
return CPT_AF_ERR_ACCESS_DENIED;
- rsp->reg_offset = req->reg_offset;
- rsp->ret_val = req->ret_val;
- rsp->is_write = req->is_write;
-
if (!is_valid_offset(rvu, req))
return CPT_AF_ERR_ACCESS_DENIED;
+ /* Translate local LF used by VFs to global CPT LF */
+ lf = rvu_get_lf(rvu, &rvu->hw->block[blkaddr], req->hdr.pcifunc,
+ (offset & 0xFFF) >> 3);
+
+ /* Translate local LF's offset to global CPT LF's offset */
+ offset &= 0xFF000;
+ offset += lf << 3;
+
+ rsp->reg_offset = offset;
+ rsp->ret_val = req->ret_val;
+ rsp->is_write = req->is_write;
+
if (req->is_write)
- rvu_write64(rvu, blkaddr, req->reg_offset, req->val);
+ rvu_write64(rvu, blkaddr, offset, req->val);
else
- rsp->val = rvu_read64(rvu, blkaddr, req->reg_offset);
+ rsp->val = rvu_read64(rvu, blkaddr, offset);
return 0;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
index 881d704644fb..4a4ef5bd9e0b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
@@ -1603,6 +1603,367 @@ static void print_nix_cn10k_sq_ctx(struct seq_file *m,
(u64)sq_ctx->dropped_pkts);
}
+static void print_tm_tree(struct seq_file *m,
+ struct nix_aq_enq_rsp *rsp, u64 sq)
+{
+ struct nix_sq_ctx_s *sq_ctx = &rsp->sq;
+ struct nix_hw *nix_hw = m->private;
+ struct rvu *rvu = nix_hw->rvu;
+ u16 p1, p2, p3, p4, schq;
+ int blkaddr;
+ u64 cfg;
+
+ blkaddr = nix_hw->blkaddr;
+ schq = sq_ctx->smq;
+
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_MDQX_PARENT(schq));
+ p1 = FIELD_GET(NIX_AF_MDQ_PARENT_MASK, cfg);
+
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_TL4X_PARENT(p1));
+ p2 = FIELD_GET(NIX_AF_TL4_PARENT_MASK, cfg);
+
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_TL3X_PARENT(p2));
+ p3 = FIELD_GET(NIX_AF_TL3_PARENT_MASK, cfg);
+
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_TL2X_PARENT(p3));
+ p4 = FIELD_GET(NIX_AF_TL2_PARENT_MASK, cfg);
+ seq_printf(m,
+ "SQ(%llu) -> SMQ(%u) -> TL4(%u) -> TL3(%u) -> TL2(%u) -> TL1(%u)\n",
+ sq, schq, p1, p2, p3, p4);
+}
+
+/*dumps given tm_tree registers*/
+static int rvu_dbg_nix_tm_tree_display(struct seq_file *m, void *unused)
+{
+ int qidx, nixlf, rc, id, max_id = 0;
+ struct nix_hw *nix_hw = m->private;
+ struct rvu *rvu = nix_hw->rvu;
+ struct nix_aq_enq_req aq_req;
+ struct nix_aq_enq_rsp rsp;
+ struct rvu_pfvf *pfvf;
+ u16 pcifunc;
+
+ nixlf = rvu->rvu_dbg.nix_tm_ctx.lf;
+ id = rvu->rvu_dbg.nix_tm_ctx.id;
+
+ if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
+ return -EINVAL;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ max_id = pfvf->sq_ctx->qsize;
+
+ memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
+ aq_req.hdr.pcifunc = pcifunc;
+ aq_req.ctype = NIX_AQ_CTYPE_SQ;
+ aq_req.op = NIX_AQ_INSTOP_READ;
+ seq_printf(m, "pcifunc is 0x%x\n", pcifunc);
+ for (qidx = id; qidx < max_id; qidx++) {
+ aq_req.qidx = qidx;
+
+ /* Skip SQ's if not initialized */
+ if (!test_bit(qidx, pfvf->sq_bmap))
+ continue;
+
+ rc = rvu_mbox_handler_nix_aq_enq(rvu, &aq_req, &rsp);
+
+ if (rc) {
+ seq_printf(m, "Failed to read SQ(%d) context\n",
+ aq_req.qidx);
+ continue;
+ }
+ print_tm_tree(m, &rsp, aq_req.qidx);
+ }
+ return 0;
+}
+
+static ssize_t rvu_dbg_nix_tm_tree_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *m = filp->private_data;
+ struct nix_hw *nix_hw = m->private;
+ struct rvu *rvu = nix_hw->rvu;
+ struct rvu_pfvf *pfvf;
+ u16 pcifunc;
+ u64 nixlf;
+ int ret;
+
+ ret = kstrtoull_from_user(buffer, count, 10, &nixlf);
+ if (ret)
+ return ret;
+
+ if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
+ return -EINVAL;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ if (!pfvf->sq_ctx) {
+ dev_warn(rvu->dev, "SQ context is not initialized\n");
+ return -EINVAL;
+ }
+
+ rvu->rvu_dbg.nix_tm_ctx.lf = nixlf;
+ return count;
+}
+
+RVU_DEBUG_SEQ_FOPS(nix_tm_tree, nix_tm_tree_display, nix_tm_tree_write);
+
+static void print_tm_topo(struct seq_file *m, u64 schq, u32 lvl)
+{
+ struct nix_hw *nix_hw = m->private;
+ struct rvu *rvu = nix_hw->rvu;
+ int blkaddr, link, link_level;
+ struct rvu_hwinfo *hw;
+
+ hw = rvu->hw;
+ blkaddr = nix_hw->blkaddr;
+ if (lvl == NIX_TXSCH_LVL_MDQ) {
+ seq_printf(m, "NIX_AF_SMQ[%llu]_CFG =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq)));
+ seq_printf(m, "NIX_AF_SMQ[%llu]_STATUS =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr, NIX_AF_SMQX_STATUS(schq)));
+ seq_printf(m, "NIX_AF_MDQ[%llu]_OUT_MD_COUNT =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr,
+ NIX_AF_MDQX_OUT_MD_COUNT(schq)));
+ seq_printf(m, "NIX_AF_MDQ[%llu]_SCHEDULE =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr,
+ NIX_AF_MDQX_SCHEDULE(schq)));
+ seq_printf(m, "NIX_AF_MDQ[%llu]_SHAPE =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr, NIX_AF_MDQX_SHAPE(schq)));
+ seq_printf(m, "NIX_AF_MDQ[%llu]_CIR =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr, NIX_AF_MDQX_CIR(schq)));
+ seq_printf(m, "NIX_AF_MDQ[%llu]_PIR =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr, NIX_AF_MDQX_PIR(schq)));
+ seq_printf(m, "NIX_AF_MDQ[%llu]_SW_XOFF =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr, NIX_AF_MDQX_SW_XOFF(schq)));
+ seq_printf(m, "NIX_AF_MDQ[%llu]_PARENT =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr, NIX_AF_MDQX_PARENT(schq)));
+ seq_puts(m, "\n");
+ }
+
+ if (lvl == NIX_TXSCH_LVL_TL4) {
+ seq_printf(m, "NIX_AF_TL4[%llu]_SDP_LINK_CFG =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr,
+ NIX_AF_TL4X_SDP_LINK_CFG(schq)));
+ seq_printf(m, "NIX_AF_TL4[%llu]_SCHEDULE =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr,
+ NIX_AF_TL4X_SCHEDULE(schq)));
+ seq_printf(m, "NIX_AF_TL4[%llu]_SHAPE =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr, NIX_AF_TL4X_SHAPE(schq)));
+ seq_printf(m, "NIX_AF_TL4[%llu]_CIR =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr, NIX_AF_TL4X_CIR(schq)));
+ seq_printf(m, "NIX_AF_TL4[%llu]_PIR =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr, NIX_AF_TL4X_PIR(schq)));
+ seq_printf(m, "NIX_AF_TL4[%llu]_SW_XOFF =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr, NIX_AF_TL4X_SW_XOFF(schq)));
+ seq_printf(m, "NIX_AF_TL4[%llu]_TOPOLOGY =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr,
+ NIX_AF_TL4X_TOPOLOGY(schq)));
+ seq_printf(m, "NIX_AF_TL4[%llu]_PARENT =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr, NIX_AF_TL4X_PARENT(schq)));
+ seq_printf(m, "NIX_AF_TL4[%llu]_MD_DEBUG0 =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr,
+ NIX_AF_TL4X_MD_DEBUG0(schq)));
+ seq_printf(m, "NIX_AF_TL4[%llu]_MD_DEBUG1 =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr,
+ NIX_AF_TL4X_MD_DEBUG1(schq)));
+ seq_puts(m, "\n");
+ }
+
+ if (lvl == NIX_TXSCH_LVL_TL3) {
+ seq_printf(m, "NIX_AF_TL3[%llu]_SCHEDULE =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr,
+ NIX_AF_TL3X_SCHEDULE(schq)));
+ seq_printf(m, "NIX_AF_TL3[%llu]_SHAPE =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr, NIX_AF_TL3X_SHAPE(schq)));
+ seq_printf(m, "NIX_AF_TL3[%llu]_CIR =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr, NIX_AF_TL3X_CIR(schq)));
+ seq_printf(m, "NIX_AF_TL3[%llu]_PIR =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr, NIX_AF_TL3X_PIR(schq)));
+ seq_printf(m, "NIX_AF_TL3[%llu]_SW_XOFF =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr, NIX_AF_TL3X_SW_XOFF(schq)));
+ seq_printf(m, "NIX_AF_TL3[%llu]_TOPOLOGY =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr,
+ NIX_AF_TL3X_TOPOLOGY(schq)));
+ seq_printf(m, "NIX_AF_TL3[%llu]_PARENT =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr, NIX_AF_TL3X_PARENT(schq)));
+ seq_printf(m, "NIX_AF_TL3[%llu]_MD_DEBUG0 =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr,
+ NIX_AF_TL3X_MD_DEBUG0(schq)));
+ seq_printf(m, "NIX_AF_TL3[%llu]_MD_DEBUG1 =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr,
+ NIX_AF_TL3X_MD_DEBUG1(schq)));
+
+ link_level = rvu_read64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL)
+ & 0x01 ? NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
+ if (lvl == link_level) {
+ seq_printf(m,
+ "NIX_AF_TL3_TL2[%llu]_BP_STATUS =0x%llx\n",
+ schq, rvu_read64(rvu, blkaddr,
+ NIX_AF_TL3_TL2X_BP_STATUS(schq)));
+ for (link = 0; link < hw->cgx_links; link++)
+ seq_printf(m,
+ "NIX_AF_TL3_TL2[%llu]_LINK[%d]_CFG =0x%llx\n",
+ schq, link,
+ rvu_read64(rvu, blkaddr,
+ NIX_AF_TL3_TL2X_LINKX_CFG(schq, link)));
+ }
+ seq_puts(m, "\n");
+ }
+
+ if (lvl == NIX_TXSCH_LVL_TL2) {
+ seq_printf(m, "NIX_AF_TL2[%llu]_SHAPE =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr, NIX_AF_TL2X_SHAPE(schq)));
+ seq_printf(m, "NIX_AF_TL2[%llu]_CIR =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr, NIX_AF_TL2X_CIR(schq)));
+ seq_printf(m, "NIX_AF_TL2[%llu]_PIR =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr, NIX_AF_TL2X_PIR(schq)));
+ seq_printf(m, "NIX_AF_TL2[%llu]_SW_XOFF =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr, NIX_AF_TL2X_SW_XOFF(schq)));
+ seq_printf(m, "NIX_AF_TL2[%llu]_TOPOLOGY =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr,
+ NIX_AF_TL2X_TOPOLOGY(schq)));
+ seq_printf(m, "NIX_AF_TL2[%llu]_PARENT =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr, NIX_AF_TL2X_PARENT(schq)));
+ seq_printf(m, "NIX_AF_TL2[%llu]_MD_DEBUG0 =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr,
+ NIX_AF_TL2X_MD_DEBUG0(schq)));
+ seq_printf(m, "NIX_AF_TL2[%llu]_MD_DEBUG1 =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr,
+ NIX_AF_TL2X_MD_DEBUG1(schq)));
+
+ link_level = rvu_read64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL)
+ & 0x01 ? NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
+ if (lvl == link_level) {
+ seq_printf(m,
+ "NIX_AF_TL3_TL2[%llu]_BP_STATUS =0x%llx\n",
+ schq, rvu_read64(rvu, blkaddr,
+ NIX_AF_TL3_TL2X_BP_STATUS(schq)));
+ for (link = 0; link < hw->cgx_links; link++)
+ seq_printf(m,
+ "NIX_AF_TL3_TL2[%llu]_LINK[%d]_CFG =0x%llx\n",
+ schq, link, rvu_read64(rvu, blkaddr,
+ NIX_AF_TL3_TL2X_LINKX_CFG(schq, link)));
+ }
+ seq_puts(m, "\n");
+ }
+
+ if (lvl == NIX_TXSCH_LVL_TL1) {
+ seq_printf(m, "NIX_AF_TX_LINK[%llu]_NORM_CREDIT =0x%llx\n",
+ schq,
+ rvu_read64(rvu, blkaddr,
+ NIX_AF_TX_LINKX_NORM_CREDIT(schq)));
+ seq_printf(m, "NIX_AF_TX_LINK[%llu]_HW_XOFF =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr,
+ NIX_AF_TX_LINKX_HW_XOFF(schq)));
+ seq_printf(m, "NIX_AF_TL1[%llu]_SCHEDULE =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr,
+ NIX_AF_TL1X_SCHEDULE(schq)));
+ seq_printf(m, "NIX_AF_TL1[%llu]_SHAPE =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr, NIX_AF_TL1X_SHAPE(schq)));
+ seq_printf(m, "NIX_AF_TL1[%llu]_CIR =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr, NIX_AF_TL1X_CIR(schq)));
+ seq_printf(m, "NIX_AF_TL1[%llu]_SW_XOFF =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr, NIX_AF_TL1X_SW_XOFF(schq)));
+ seq_printf(m, "NIX_AF_TL1[%llu]_TOPOLOGY =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr,
+ NIX_AF_TL1X_TOPOLOGY(schq)));
+ seq_printf(m, "NIX_AF_TL1[%llu]_MD_DEBUG0 =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr,
+ NIX_AF_TL1X_MD_DEBUG0(schq)));
+ seq_printf(m, "NIX_AF_TL1[%llu]_MD_DEBUG1 =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr,
+ NIX_AF_TL1X_MD_DEBUG1(schq)));
+ seq_printf(m, "NIX_AF_TL1[%llu]_DROPPED_PACKETS =0x%llx\n",
+ schq,
+ rvu_read64(rvu, blkaddr,
+ NIX_AF_TL1X_DROPPED_PACKETS(schq)));
+ seq_printf(m, "NIX_AF_TL1[%llu]_DROPPED_BYTES =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr,
+ NIX_AF_TL1X_DROPPED_BYTES(schq)));
+ seq_printf(m, "NIX_AF_TL1[%llu]_RED_PACKETS =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr,
+ NIX_AF_TL1X_RED_PACKETS(schq)));
+ seq_printf(m, "NIX_AF_TL1[%llu]_RED_BYTES =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr,
+ NIX_AF_TL1X_RED_BYTES(schq)));
+ seq_printf(m, "NIX_AF_TL1[%llu]_YELLOW_PACKETS =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr,
+ NIX_AF_TL1X_YELLOW_PACKETS(schq)));
+ seq_printf(m, "NIX_AF_TL1[%llu]_YELLOW_BYTES =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr,
+ NIX_AF_TL1X_YELLOW_BYTES(schq)));
+ seq_printf(m, "NIX_AF_TL1[%llu]_GREEN_PACKETS =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr,
+ NIX_AF_TL1X_GREEN_PACKETS(schq)));
+ seq_printf(m, "NIX_AF_TL1[%llu]_GREEN_BYTES =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr,
+ NIX_AF_TL1X_GREEN_BYTES(schq)));
+ seq_puts(m, "\n");
+ }
+}
+
+/*dumps given tm_topo registers*/
+static int rvu_dbg_nix_tm_topo_display(struct seq_file *m, void *unused)
+{
+ struct nix_hw *nix_hw = m->private;
+ struct rvu *rvu = nix_hw->rvu;
+ struct nix_aq_enq_req aq_req;
+ struct nix_txsch *txsch;
+ int nixlf, lvl, schq;
+ u16 pcifunc;
+
+ nixlf = rvu->rvu_dbg.nix_tm_ctx.lf;
+
+ if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
+ return -EINVAL;
+
+ memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
+ aq_req.hdr.pcifunc = pcifunc;
+ aq_req.ctype = NIX_AQ_CTYPE_SQ;
+ aq_req.op = NIX_AQ_INSTOP_READ;
+ seq_printf(m, "pcifunc is 0x%x\n", pcifunc);
+
+ for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
+ txsch = &nix_hw->txsch[lvl];
+ for (schq = 0; schq < txsch->schq.max; schq++) {
+ if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) == pcifunc)
+ print_tm_topo(m, schq, lvl);
+ }
+ }
+ return 0;
+}
+
+static ssize_t rvu_dbg_nix_tm_topo_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *m = filp->private_data;
+ struct nix_hw *nix_hw = m->private;
+ struct rvu *rvu = nix_hw->rvu;
+ struct rvu_pfvf *pfvf;
+ u16 pcifunc;
+ u64 nixlf;
+ int ret;
+
+ ret = kstrtoull_from_user(buffer, count, 10, &nixlf);
+ if (ret)
+ return ret;
+
+ if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
+ return -EINVAL;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ if (!pfvf->sq_ctx) {
+ dev_warn(rvu->dev, "SQ context is not initialized\n");
+ return -EINVAL;
+ }
+
+ rvu->rvu_dbg.nix_tm_ctx.lf = nixlf;
+ return count;
+}
+
+RVU_DEBUG_SEQ_FOPS(nix_tm_topo, nix_tm_topo_display, nix_tm_topo_write);
+
/* Dumps given nix_sq's context */
static void print_nix_sq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
{
@@ -2349,6 +2710,10 @@ static void rvu_dbg_nix_init(struct rvu *rvu, int blkaddr)
nix_hw = &rvu->hw->nix[1];
}
+ debugfs_create_file("tm_tree", 0600, rvu->rvu_dbg.nix, nix_hw,
+ &rvu_dbg_nix_tm_tree_fops);
+ debugfs_create_file("tm_topo", 0600, rvu->rvu_dbg.nix, nix_hw,
+ &rvu_dbg_nix_tm_topo_fops);
debugfs_create_file("sq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
&rvu_dbg_nix_sq_ctx_fops);
debugfs_create_file("rq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index 00af8888e329..222f9e00b836 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -2497,9 +2497,7 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
}
mutex_unlock(&rvu->rsrc_lock);
- /* Sync cached info for this LF in NDC-TX to LLC/DRAM */
- rvu_write64(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12) | nixlf);
- err = rvu_poll_reg(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12), true);
+ err = rvu_ndc_sync(rvu, blkaddr, nixlf, NIX_AF_NDC_TX_SYNC);
if (err)
dev_err(rvu->dev, "NDC-TX sync failed for NIXLF %d\n", nixlf);
@@ -3864,6 +3862,11 @@ static int get_flowkey_alg_idx(struct nix_hw *nix_hw, u32 flow_cfg)
return -ERANGE;
}
+/* Mask to match ipv6(NPC_LT_LC_IP6) and ipv6 ext(NPC_LT_LC_IP6_EXT) */
+#define NPC_LT_LC_IP6_MATCH_MSK ((~(NPC_LT_LC_IP6 ^ NPC_LT_LC_IP6_EXT)) & 0xf)
+/* Mask to match both ipv4(NPC_LT_LC_IP) and ipv4 ext(NPC_LT_LC_IP_OPT) */
+#define NPC_LT_LC_IP_MATCH_MSK ((~(NPC_LT_LC_IP ^ NPC_LT_LC_IP_OPT)) & 0xf)
+
static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
{
int idx, nr_field, key_off, field_marker, keyoff_marker;
@@ -3933,7 +3936,7 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
field->hdr_offset = 9; /* offset */
field->bytesm1 = 0; /* 1 byte */
field->ltype_match = NPC_LT_LC_IP;
- field->ltype_mask = 0xF;
+ field->ltype_mask = NPC_LT_LC_IP_MATCH_MSK;
break;
case NIX_FLOW_KEY_TYPE_IPV4:
case NIX_FLOW_KEY_TYPE_INNR_IPV4:
@@ -3960,8 +3963,7 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
field->bytesm1 = 3; /* DIP, 4 bytes */
}
}
-
- field->ltype_mask = 0xF; /* Match only IPv4 */
+ field->ltype_mask = NPC_LT_LC_IP_MATCH_MSK;
keyoff_marker = false;
break;
case NIX_FLOW_KEY_TYPE_IPV6:
@@ -3990,7 +3992,7 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
field->bytesm1 = 15; /* DIP,16 bytes */
}
}
- field->ltype_mask = 0xF; /* Match only IPv6 */
+ field->ltype_mask = NPC_LT_LC_IP6_MATCH_MSK;
break;
case NIX_FLOW_KEY_TYPE_TCP:
case NIX_FLOW_KEY_TYPE_UDP:
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
index e8b73b9d75e3..97722ce8c4cb 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
@@ -2519,7 +2519,17 @@ static int npc_mcam_alloc_entries(struct npc_mcam *mcam, u16 pcifunc,
* - when available free entries are less.
* Lower priority ones out of avaialble free entries are always
* chosen when 'high vs low' question arises.
+ *
+ * For a VF base MCAM match rule is set by its PF. And all the
+ * further MCAM rules installed by VF on its own are
+ * concatenated with the base rule set by its PF. Hence PF entries
+ * should be at lower priority compared to VF entries. Otherwise
+ * base rule is hit always and rules installed by VF will be of
+ * no use. Hence if the request is from PF then allocate low
+ * priority entries.
*/
+ if (!(pcifunc & RVU_PFVF_FUNC_MASK))
+ goto lprio_alloc;
/* Get the search range for priority allocation request */
if (req->priority) {
@@ -2528,17 +2538,6 @@ static int npc_mcam_alloc_entries(struct npc_mcam *mcam, u16 pcifunc,
goto alloc;
}
- /* For a VF base MCAM match rule is set by its PF. And all the
- * further MCAM rules installed by VF on its own are
- * concatenated with the base rule set by its PF. Hence PF entries
- * should be at lower priority compared to VF entries. Otherwise
- * base rule is hit always and rules installed by VF will be of
- * no use. Hence if the request is from PF and NOT a priority
- * allocation request then allocate low priority entries.
- */
- if (!(pcifunc & RVU_PFVF_FUNC_MASK))
- goto lprio_alloc;
-
/* Find out the search range for non-priority allocation request
*
* Get MCAM free entry count in middle zone.
@@ -2568,6 +2567,18 @@ lprio_alloc:
reverse = true;
start = 0;
end = mcam->bmap_entries;
+ /* Ensure PF requests are always at bottom and if PF requests
+ * for higher/lower priority entry wrt reference entry then
+ * honour that criteria and start search for entries from bottom
+ * and not in mid zone.
+ */
+ if (!(pcifunc & RVU_PFVF_FUNC_MASK) &&
+ req->priority == NPC_MCAM_HIGHER_PRIO)
+ end = req->ref_entry;
+
+ if (!(pcifunc & RVU_PFVF_FUNC_MASK) &&
+ req->priority == NPC_MCAM_LOWER_PRIO)
+ start = req->ref_entry;
}
alloc:
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
index 086f05c0376f..d56be5fb7eb4 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
@@ -121,6 +121,7 @@
#define NPA_AF_LF_RST (0x0020)
#define NPA_AF_GEN_CFG (0x0030)
#define NPA_AF_NDC_CFG (0x0040)
+#define NPA_AF_NDC_SYNC (0x0050)
#define NPA_AF_INP_CTL (0x00D0)
#define NPA_AF_ACTIVE_CYCLES_PC (0x00F0)
#define NPA_AF_AVG_DELAY (0x0100)
@@ -239,6 +240,7 @@
#define NIX_AF_RX_CPTX_INST_ADDR (0x0310)
#define NIX_AF_RX_CPTX_INST_QSEL(a) (0x0320ull | (uint64_t)(a) << 3)
#define NIX_AF_RX_CPTX_CREDIT(a) (0x0360ull | (uint64_t)(a) << 3)
+#define NIX_AF_NDC_RX_SYNC (0x03E0)
#define NIX_AF_NDC_TX_SYNC (0x03F0)
#define NIX_AF_AQ_CFG (0x0400)
#define NIX_AF_AQ_BASE (0x0410)
@@ -429,6 +431,8 @@
#define NIX_AF_RX_ACTIVE_CYCLES_PCX(a) (0x4800 | (a) << 16)
#define NIX_AF_LINKX_CFG(a) (0x4010 | (a) << 17)
#define NIX_AF_MDQX_IN_MD_COUNT(a) (0x14e0 | (a) << 16)
+#define NIX_AF_SMQX_STATUS(a) (0x730 | (a) << 16)
+#define NIX_AF_MDQX_OUT_MD_COUNT(a) (0xdb0 | (a) << 16)
#define NIX_PRIV_AF_INT_CFG (0x8000000)
#define NIX_PRIV_LFX_CFG (0x8000010)
@@ -442,6 +446,11 @@
#define NIX_CONST_MAX_BPIDS GENMASK_ULL(23, 12)
#define NIX_CONST_SDP_CHANS GENMASK_ULL(11, 0)
+#define NIX_AF_MDQ_PARENT_MASK GENMASK_ULL(24, 16)
+#define NIX_AF_TL4_PARENT_MASK GENMASK_ULL(23, 16)
+#define NIX_AF_TL3_PARENT_MASK GENMASK_ULL(23, 16)
+#define NIX_AF_TL2_PARENT_MASK GENMASK_ULL(20, 16)
+
/* SSO */
#define SSO_AF_CONST (0x1000)
#define SSO_AF_CONST1 (0x1008)
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
index 5664f768cb0c..64a97a0a10ed 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
@@ -9,10 +9,9 @@ obj-$(CONFIG_OCTEONTX2_VF) += rvu_nicvf.o otx2_ptp.o
rvu_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o \
otx2_flows.o otx2_tc.o cn10k.o otx2_dmac_flt.o \
otx2_devlink.o qos_sq.o qos.o
-rvu_nicvf-y := otx2_vf.o otx2_devlink.o
+rvu_nicvf-y := otx2_vf.o
rvu_nicpf-$(CONFIG_DCB) += otx2_dcbnl.o
-rvu_nicvf-$(CONFIG_DCB) += otx2_dcbnl.o
rvu_nicpf-$(CONFIG_MACSEC) += cn10k_macsec.o
ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index a85ac039d779..87d5776e3b88 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -648,14 +648,14 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl, int prio, bool txschq_for
} else if (lvl == NIX_TXSCH_LVL_TL4) {
parent = schq_list[NIX_TXSCH_LVL_TL3][prio];
req->reg[0] = NIX_AF_TL4X_PARENT(schq);
- req->regval[0] = parent << 16;
+ req->regval[0] = (u64)parent << 16;
req->num_regs++;
req->reg[1] = NIX_AF_TL4X_SCHEDULE(schq);
req->regval[1] = dwrr_val;
} else if (lvl == NIX_TXSCH_LVL_TL3) {
parent = schq_list[NIX_TXSCH_LVL_TL2][prio];
req->reg[0] = NIX_AF_TL3X_PARENT(schq);
- req->regval[0] = parent << 16;
+ req->regval[0] = (u64)parent << 16;
req->num_regs++;
req->reg[1] = NIX_AF_TL3X_SCHEDULE(schq);
req->regval[1] = dwrr_val;
@@ -670,11 +670,11 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl, int prio, bool txschq_for
} else if (lvl == NIX_TXSCH_LVL_TL2) {
parent = schq_list[NIX_TXSCH_LVL_TL1][prio];
req->reg[0] = NIX_AF_TL2X_PARENT(schq);
- req->regval[0] = parent << 16;
+ req->regval[0] = (u64)parent << 16;
req->num_regs++;
req->reg[1] = NIX_AF_TL2X_SCHEDULE(schq);
- req->regval[1] = TXSCH_TL1_DFLT_RR_PRIO << 24 | dwrr_val;
+ req->regval[1] = (u64)hw->txschq_aggr_lvl_rr_prio << 24 | dwrr_val;
if (lvl == hw->txschq_link_cfg_lvl) {
req->num_regs++;
@@ -698,7 +698,7 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl, int prio, bool txschq_for
req->num_regs++;
req->reg[1] = NIX_AF_TL1X_TOPOLOGY(schq);
- req->regval[1] = (TXSCH_TL1_DFLT_RR_PRIO << 1);
+ req->regval[1] = hw->txschq_aggr_lvl_rr_prio << 1;
req->num_regs++;
req->reg[2] = NIX_AF_TL1X_CIR(schq);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index 24fbbef265a6..f27a3456ae64 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -346,12 +346,9 @@ struct otx2_flow_config {
u16 *def_ent;
u16 nr_flows;
#define OTX2_DEFAULT_FLOWCOUNT 16
-#define OTX2_MAX_UNICAST_FLOWS 8
+#define OTX2_DEFAULT_UNICAST_FLOWS 4
#define OTX2_MAX_VLAN_FLOWS 1
#define OTX2_MAX_TC_FLOWS OTX2_DEFAULT_FLOWCOUNT
-#define OTX2_MCAM_COUNT (OTX2_DEFAULT_FLOWCOUNT + \
- OTX2_MAX_UNICAST_FLOWS + \
- OTX2_MAX_VLAN_FLOWS)
u16 unicast_offset;
u16 rx_vlan_offset;
u16 vf_vlan_offset;
@@ -365,6 +362,7 @@ struct otx2_flow_config {
u16 max_flows;
refcount_t mark_flows;
struct list_head flow_list_tc;
+ u8 ucast_flt_cnt;
bool ntuple;
};
@@ -1067,6 +1065,7 @@ int otx2_handle_ntuple_tc_features(struct net_device *netdev,
int otx2_smq_flush(struct otx2_nic *pfvf, int smq);
void otx2_free_bufs(struct otx2_nic *pfvf, struct otx2_pool *pool,
u64 iova, int size);
+int otx2_mcam_entry_init(struct otx2_nic *pfvf);
/* tc support */
int otx2_init_tc(struct otx2_nic *nic);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
index 28fb643d2917..aa01110f04a3 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
@@ -54,6 +54,7 @@ int otx2_pfc_txschq_config(struct otx2_nic *pfvf)
return 0;
}
+EXPORT_SYMBOL(otx2_pfc_txschq_config);
static int otx2_pfc_txschq_alloc_one(struct otx2_nic *pfvf, u8 prio)
{
@@ -122,6 +123,7 @@ int otx2_pfc_txschq_alloc(struct otx2_nic *pfvf)
return 0;
}
+EXPORT_SYMBOL(otx2_pfc_txschq_alloc);
static int otx2_pfc_txschq_stop_one(struct otx2_nic *pfvf, u8 prio)
{
@@ -260,6 +262,7 @@ update_sq_smq_map:
return 0;
}
+EXPORT_SYMBOL(otx2_pfc_txschq_update);
int otx2_pfc_txschq_stop(struct otx2_nic *pfvf)
{
@@ -282,6 +285,7 @@ int otx2_pfc_txschq_stop(struct otx2_nic *pfvf)
return 0;
}
+EXPORT_SYMBOL(otx2_pfc_txschq_stop);
int otx2_config_priority_flow_ctrl(struct otx2_nic *pfvf)
{
@@ -321,6 +325,7 @@ unlock:
mutex_unlock(&pfvf->mbox.lock);
return err;
}
+EXPORT_SYMBOL(otx2_config_priority_flow_ctrl);
void otx2_update_bpid_in_rqctx(struct otx2_nic *pfvf, int vlan_prio, int qidx,
bool pfc_enable)
@@ -385,6 +390,7 @@ out:
"Updating BPIDs in CQ and Aura contexts of RQ%d failed with err %d\n",
qidx, err);
}
+EXPORT_SYMBOL(otx2_update_bpid_in_rqctx);
static int otx2_dcbnl_ieee_getpfc(struct net_device *dev, struct ieee_pfc *pfc)
{
@@ -472,3 +478,4 @@ int otx2_dcbnl_set_ops(struct net_device *dev)
return 0;
}
+EXPORT_SYMBOL(otx2_dcbnl_set_ops);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
index 99ddf31269d9..53f14aa944bd 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
@@ -64,9 +64,68 @@ static int otx2_dl_mcam_count_get(struct devlink *devlink, u32 id,
return 0;
}
+static int otx2_dl_ucast_flt_cnt_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
+{
+ struct otx2_devlink *otx2_dl = devlink_priv(devlink);
+ struct otx2_nic *pfvf = otx2_dl->pfvf;
+ int err;
+
+ pfvf->flow_cfg->ucast_flt_cnt = ctx->val.vu8;
+
+ otx2_mcam_flow_del(pfvf);
+ err = otx2_mcam_entry_init(pfvf);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int otx2_dl_ucast_flt_cnt_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct otx2_devlink *otx2_dl = devlink_priv(devlink);
+ struct otx2_nic *pfvf = otx2_dl->pfvf;
+
+ ctx->val.vu8 = pfvf->flow_cfg ? pfvf->flow_cfg->ucast_flt_cnt : 0;
+
+ return 0;
+}
+
+static int otx2_dl_ucast_flt_cnt_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ struct otx2_devlink *otx2_dl = devlink_priv(devlink);
+ struct otx2_nic *pfvf = otx2_dl->pfvf;
+
+ /* Check for UNICAST filter support*/
+ if (!(pfvf->flags & OTX2_FLAG_UCAST_FLTR_SUPPORT)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Unicast filter not enabled");
+ return -EINVAL;
+ }
+
+ if (!pfvf->flow_cfg) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "pfvf->flow_cfg not initialized");
+ return -EINVAL;
+ }
+
+ if (pfvf->flow_cfg->nr_flows) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot modify count when there are active rules");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
enum otx2_dl_param_id {
OTX2_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
OTX2_DEVLINK_PARAM_ID_MCAM_COUNT,
+ OTX2_DEVLINK_PARAM_ID_UCAST_FLT_CNT,
};
static const struct devlink_param otx2_dl_params[] = {
@@ -75,6 +134,11 @@ static const struct devlink_param otx2_dl_params[] = {
BIT(DEVLINK_PARAM_CMODE_RUNTIME),
otx2_dl_mcam_count_get, otx2_dl_mcam_count_set,
otx2_dl_mcam_count_validate),
+ DEVLINK_PARAM_DRIVER(OTX2_DEVLINK_PARAM_ID_UCAST_FLT_CNT,
+ "unicast_filter_count", DEVLINK_PARAM_TYPE_U8,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ otx2_dl_ucast_flt_cnt_get, otx2_dl_ucast_flt_cnt_set,
+ otx2_dl_ucast_flt_cnt_validate),
};
static const struct devlink_ops otx2_devlink_ops = {
@@ -113,6 +177,7 @@ err_dl:
devlink_free(dl);
return err;
}
+EXPORT_SYMBOL(otx2_register_dl);
void otx2_unregister_dl(struct otx2_nic *pfvf)
{
@@ -124,3 +189,4 @@ void otx2_unregister_dl(struct otx2_nic *pfvf)
ARRAY_SIZE(otx2_dl_params));
devlink_free(dl);
}
+EXPORT_SYMBOL(otx2_unregister_dl);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
index 7f786de61014..0db62eb0dab3 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
@@ -954,7 +954,7 @@ static u32 otx2_get_link(struct net_device *netdev)
}
static int otx2_get_ts_info(struct net_device *netdev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct otx2_nic *pfvf = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
index bc5819237ed7..98c31a16c70b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
@@ -12,8 +12,6 @@
#define OTX2_DEFAULT_ACTION 0x1
-static int otx2_mcam_entry_init(struct otx2_nic *pfvf);
-
struct otx2_flow {
struct ethtool_rx_flow_spec flow_spec;
struct list_head list;
@@ -161,7 +159,7 @@ exit:
}
EXPORT_SYMBOL(otx2_alloc_mcam_entries);
-static int otx2_mcam_entry_init(struct otx2_nic *pfvf)
+int otx2_mcam_entry_init(struct otx2_nic *pfvf)
{
struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
struct npc_get_field_status_req *freq;
@@ -172,7 +170,7 @@ static int otx2_mcam_entry_init(struct otx2_nic *pfvf)
int ent, count;
vf_vlan_max_flows = pfvf->total_vfs * OTX2_PER_VF_VLAN_FLOWS;
- count = OTX2_MAX_UNICAST_FLOWS +
+ count = flow_cfg->ucast_flt_cnt +
OTX2_MAX_VLAN_FLOWS + vf_vlan_max_flows;
flow_cfg->def_ent = devm_kmalloc_array(pfvf->dev, count,
@@ -214,7 +212,7 @@ static int otx2_mcam_entry_init(struct otx2_nic *pfvf)
flow_cfg->vf_vlan_offset = 0;
flow_cfg->unicast_offset = vf_vlan_max_flows;
flow_cfg->rx_vlan_offset = flow_cfg->unicast_offset +
- OTX2_MAX_UNICAST_FLOWS;
+ flow_cfg->ucast_flt_cnt;
pfvf->flags |= OTX2_FLAG_UCAST_FLTR_SUPPORT;
/* Check if NPC_DMAC field is supported
@@ -255,6 +253,7 @@ static int otx2_mcam_entry_init(struct otx2_nic *pfvf)
refcount_set(&flow_cfg->mark_flows, 1);
return 0;
}
+EXPORT_SYMBOL(otx2_mcam_entry_init);
/* TODO : revisit on size */
#define OTX2_DMAC_FLTR_BITMAP_SZ (4 * 2048 + 32)
@@ -302,6 +301,8 @@ int otx2_mcam_flow_init(struct otx2_nic *pf)
INIT_LIST_HEAD(&pf->flow_cfg->flow_list);
INIT_LIST_HEAD(&pf->flow_cfg->flow_list_tc);
+ pf->flow_cfg->ucast_flt_cnt = OTX2_DEFAULT_UNICAST_FLOWS;
+
/* Allocate bare minimum number of MCAM entries needed for
* unicast and ntuple filters.
*/
@@ -314,7 +315,7 @@ int otx2_mcam_flow_init(struct otx2_nic *pf)
return 0;
pf->mac_table = devm_kzalloc(pf->dev, sizeof(struct otx2_mac_table)
- * OTX2_MAX_UNICAST_FLOWS, GFP_KERNEL);
+ * pf->flow_cfg->ucast_flt_cnt, GFP_KERNEL);
if (!pf->mac_table)
return -ENOMEM;
@@ -356,7 +357,7 @@ static int otx2_do_add_macfilter(struct otx2_nic *pf, const u8 *mac)
return -ENOMEM;
/* dont have free mcam entries or uc list is greater than alloted */
- if (netdev_uc_count(pf->netdev) > OTX2_MAX_UNICAST_FLOWS)
+ if (netdev_uc_count(pf->netdev) > pf->flow_cfg->ucast_flt_cnt)
return -ENOMEM;
mutex_lock(&pf->mbox.lock);
@@ -367,7 +368,7 @@ static int otx2_do_add_macfilter(struct otx2_nic *pf, const u8 *mac)
}
/* unicast offset starts with 32 0..31 for ntuple */
- for (i = 0; i < OTX2_MAX_UNICAST_FLOWS; i++) {
+ for (i = 0; i < pf->flow_cfg->ucast_flt_cnt; i++) {
if (pf->mac_table[i].inuse)
continue;
ether_addr_copy(pf->mac_table[i].addr, mac);
@@ -410,7 +411,7 @@ static bool otx2_get_mcamentry_for_mac(struct otx2_nic *pf, const u8 *mac,
{
int i;
- for (i = 0; i < OTX2_MAX_UNICAST_FLOWS; i++) {
+ for (i = 0; i < pf->flow_cfg->ucast_flt_cnt; i++) {
if (!pf->mac_table[i].inuse)
continue;
@@ -1394,6 +1395,7 @@ int otx2_destroy_mcam_flows(struct otx2_nic *pfvf)
}
pfvf->flags &= ~OTX2_FLAG_MCAM_ENTRIES_ALLOC;
+ flow_cfg->max_flows = 0;
mutex_unlock(&pfvf->mbox.lock);
return 0;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index f5bce3e326cc..5492dea547a1 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -1714,7 +1714,7 @@ static void otx2_do_set_rx_mode(struct otx2_nic *pf)
return;
if ((netdev->flags & IFF_PROMISC) ||
- (netdev_uc_count(netdev) > OTX2_MAX_UNICAST_FLOWS)) {
+ (netdev_uc_count(netdev) > pf->flow_cfg->ucast_flt_cnt)) {
promisc = true;
}
@@ -3245,6 +3245,29 @@ static int otx2_sriov_configure(struct pci_dev *pdev, int numvfs)
return otx2_sriov_enable(pdev, numvfs);
}
+static void otx2_ndc_sync(struct otx2_nic *pf)
+{
+ struct mbox *mbox = &pf->mbox;
+ struct ndc_sync_op *req;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_ndc_sync_op(mbox);
+ if (!req) {
+ mutex_unlock(&mbox->lock);
+ return;
+ }
+
+ req->nix_lf_tx_sync = 1;
+ req->nix_lf_rx_sync = 1;
+ req->npa_lf_sync = 1;
+
+ if (!otx2_sync_mbox_msg(mbox))
+ dev_err(pf->dev, "NDC sync operation failed\n");
+
+ mutex_unlock(&mbox->lock);
+}
+
static void otx2_remove(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
@@ -3293,6 +3316,7 @@ static void otx2_remove(struct pci_dev *pdev)
otx2_mcam_flow_del(pf);
otx2_shutdown_tc(pf);
otx2_shutdown_qos(pf);
+ otx2_ndc_sync(pf);
otx2_detach_resources(&pf->mbox);
if (pf->hw.lmt_info)
free_percpu(pf->hw.lmt_info);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h
index 45a32e4b49d1..e3aee6e36215 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h
@@ -139,33 +139,34 @@
#define NIX_LF_CINTX_ENA_W1C(a) (NIX_LFBASE | 0xD50 | (a) << 12)
/* NIX AF transmit scheduler registers */
-#define NIX_AF_SMQX_CFG(a) (0x700 | (a) << 16)
-#define NIX_AF_TL1X_SCHEDULE(a) (0xC00 | (a) << 16)
-#define NIX_AF_TL1X_CIR(a) (0xC20 | (a) << 16)
-#define NIX_AF_TL1X_TOPOLOGY(a) (0xC80 | (a) << 16)
-#define NIX_AF_TL2X_PARENT(a) (0xE88 | (a) << 16)
-#define NIX_AF_TL2X_SCHEDULE(a) (0xE00 | (a) << 16)
-#define NIX_AF_TL2X_TOPOLOGY(a) (0xE80 | (a) << 16)
-#define NIX_AF_TL2X_CIR(a) (0xE20 | (a) << 16)
-#define NIX_AF_TL2X_PIR(a) (0xE30 | (a) << 16)
-#define NIX_AF_TL3X_PARENT(a) (0x1088 | (a) << 16)
-#define NIX_AF_TL3X_SCHEDULE(a) (0x1000 | (a) << 16)
-#define NIX_AF_TL3X_SHAPE(a) (0x1010 | (a) << 16)
-#define NIX_AF_TL3X_CIR(a) (0x1020 | (a) << 16)
-#define NIX_AF_TL3X_PIR(a) (0x1030 | (a) << 16)
-#define NIX_AF_TL3X_TOPOLOGY(a) (0x1080 | (a) << 16)
-#define NIX_AF_TL4X_PARENT(a) (0x1288 | (a) << 16)
-#define NIX_AF_TL4X_SCHEDULE(a) (0x1200 | (a) << 16)
-#define NIX_AF_TL4X_SHAPE(a) (0x1210 | (a) << 16)
-#define NIX_AF_TL4X_CIR(a) (0x1220 | (a) << 16)
-#define NIX_AF_TL4X_PIR(a) (0x1230 | (a) << 16)
-#define NIX_AF_TL4X_TOPOLOGY(a) (0x1280 | (a) << 16)
-#define NIX_AF_MDQX_SCHEDULE(a) (0x1400 | (a) << 16)
-#define NIX_AF_MDQX_SHAPE(a) (0x1410 | (a) << 16)
-#define NIX_AF_MDQX_CIR(a) (0x1420 | (a) << 16)
-#define NIX_AF_MDQX_PIR(a) (0x1430 | (a) << 16)
-#define NIX_AF_MDQX_PARENT(a) (0x1480 | (a) << 16)
-#define NIX_AF_TL3_TL2X_LINKX_CFG(a, b) (0x1700 | (a) << 16 | (b) << 3)
+#define NIX_AF_SMQX_CFG(a) (0x700 | (u64)(a) << 16)
+#define NIX_AF_TL4X_SDP_LINK_CFG(a) (0xB10 | (u64)(a) << 16)
+#define NIX_AF_TL1X_SCHEDULE(a) (0xC00 | (u64)(a) << 16)
+#define NIX_AF_TL1X_CIR(a) (0xC20 | (u64)(a) << 16)
+#define NIX_AF_TL1X_TOPOLOGY(a) (0xC80 | (u64)(a) << 16)
+#define NIX_AF_TL2X_PARENT(a) (0xE88 | (u64)(a) << 16)
+#define NIX_AF_TL2X_SCHEDULE(a) (0xE00 | (u64)(a) << 16)
+#define NIX_AF_TL2X_TOPOLOGY(a) (0xE80 | (u64)(a) << 16)
+#define NIX_AF_TL2X_CIR(a) (0xE20 | (u64)(a) << 16)
+#define NIX_AF_TL2X_PIR(a) (0xE30 | (u64)(a) << 16)
+#define NIX_AF_TL3X_PARENT(a) (0x1088 | (u64)(a) << 16)
+#define NIX_AF_TL3X_SCHEDULE(a) (0x1000 | (u64)(a) << 16)
+#define NIX_AF_TL3X_SHAPE(a) (0x1010 | (u64)(a) << 16)
+#define NIX_AF_TL3X_CIR(a) (0x1020 | (u64)(a) << 16)
+#define NIX_AF_TL3X_PIR(a) (0x1030 | (u64)(a) << 16)
+#define NIX_AF_TL3X_TOPOLOGY(a) (0x1080 | (u64)(a) << 16)
+#define NIX_AF_TL4X_PARENT(a) (0x1288 | (u64)(a) << 16)
+#define NIX_AF_TL4X_SCHEDULE(a) (0x1200 | (u64)(a) << 16)
+#define NIX_AF_TL4X_SHAPE(a) (0x1210 | (u64)(a) << 16)
+#define NIX_AF_TL4X_CIR(a) (0x1220 | (u64)(a) << 16)
+#define NIX_AF_TL4X_PIR(a) (0x1230 | (u64)(a) << 16)
+#define NIX_AF_TL4X_TOPOLOGY(a) (0x1280 | (u64)(a) << 16)
+#define NIX_AF_MDQX_SCHEDULE(a) (0x1400 | (u64)(a) << 16)
+#define NIX_AF_MDQX_SHAPE(a) (0x1410 | (u64)(a) << 16)
+#define NIX_AF_MDQX_CIR(a) (0x1420 | (u64)(a) << 16)
+#define NIX_AF_MDQX_PIR(a) (0x1430 | (u64)(a) << 16)
+#define NIX_AF_MDQX_PARENT(a) (0x1480 | (u64)(a) << 16)
+#define NIX_AF_TL3_TL2X_LINKX_CFG(a, b) (0x1700 | (u64)(a) << 16 | (b) << 3)
/* LMT LF registers */
#define LMT_LFBASE BIT_ULL(RVU_FUNC_BLKADDR_SHIFT)
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
index a16e9f244117..3eb85949677a 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
@@ -513,7 +513,7 @@ process_cqe:
static void otx2_adjust_adaptive_coalese(struct otx2_nic *pfvf, struct otx2_cq_poll *cq_poll)
{
- struct dim_sample dim_sample;
+ struct dim_sample dim_sample = { 0 };
u64 rx_frames, rx_bytes;
u64 tx_frames, tx_bytes;
@@ -1174,8 +1174,11 @@ bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
if (skb_shinfo(skb)->gso_size && !is_hw_tso_supported(pfvf, skb)) {
/* Insert vlan tag before giving pkt to tso */
- if (skb_vlan_tag_present(skb))
+ if (skb_vlan_tag_present(skb)) {
skb = __vlan_hwaccel_push_inside(skb);
+ if (!skb)
+ return true;
+ }
otx2_sq_append_tso(pfvf, sq, skb, qidx);
return true;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/qos.c b/drivers/net/ethernet/marvell/octeontx2/nic/qos.c
index 070711df612e..0f844c14485a 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/qos.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/qos.c
@@ -153,7 +153,6 @@ static void __otx2_qos_txschq_cfg(struct otx2_nic *pfvf,
num_regs++;
otx2_config_sched_shaping(pfvf, node, cfg, &num_regs);
-
} else if (level == NIX_TXSCH_LVL_TL4) {
otx2_config_sched_shaping(pfvf, node, cfg, &num_regs);
} else if (level == NIX_TXSCH_LVL_TL3) {
@@ -176,7 +175,7 @@ static void __otx2_qos_txschq_cfg(struct otx2_nic *pfvf,
/* check if node is root */
if (node->qid == OTX2_QOS_QID_INNER && !node->parent) {
cfg->reg[num_regs] = NIX_AF_TL2X_SCHEDULE(node->schq);
- cfg->regval[num_regs] = TXSCH_TL1_DFLT_RR_PRIO << 24 |
+ cfg->regval[num_regs] = (u64)hw->txschq_aggr_lvl_rr_prio << 24 |
mtu_to_dwrr_weight(pfvf,
pfvf->tx_max_pktlen);
num_regs++;
@@ -1422,7 +1421,10 @@ static int otx2_qos_leaf_to_inner(struct otx2_nic *pfvf, u16 classid,
otx2_qos_read_txschq_cfg(pfvf, node, old_cfg);
/* delete the txschq nodes allocated for this node */
+ otx2_qos_disable_sq(pfvf, qid);
+ otx2_qos_free_hw_node_schq(pfvf, node);
otx2_qos_free_sw_node_schq(pfvf, node);
+ pfvf->qos.qid_to_sqmap[qid] = OTX2_QOS_INVALID_SQ;
/* mark this node as htb inner node */
WRITE_ONCE(node->qid, OTX2_QOS_QID_INNER);
@@ -1632,6 +1634,7 @@ static int otx2_qos_leaf_del_last(struct otx2_nic *pfvf, u16 classid, bool force
dwrr_del_node = true;
/* destroy the leaf node */
+ otx2_qos_disable_sq(pfvf, qid);
otx2_qos_destroy_node(pfvf, node);
pfvf->qos.qid_to_sqmap[qid] = OTX2_QOS_INVALID_SQ;
diff --git a/drivers/net/ethernet/mediatek/Kconfig b/drivers/net/ethernet/mediatek/Kconfig
index da0db417ab69..95c4405b7d7b 100644
--- a/drivers/net/ethernet/mediatek/Kconfig
+++ b/drivers/net/ethernet/mediatek/Kconfig
@@ -1,12 +1,20 @@
# SPDX-License-Identifier: GPL-2.0-only
config NET_VENDOR_MEDIATEK
bool "MediaTek devices"
- depends on ARCH_MEDIATEK || SOC_MT7621 || SOC_MT7620 || COMPILE_TEST
+ depends on ARCH_MEDIATEK || ARCH_AIROHA || SOC_MT7621 || SOC_MT7620 || COMPILE_TEST
help
If you have a Mediatek SoC with ethernet, say Y.
if NET_VENDOR_MEDIATEK
+config NET_AIROHA
+ tristate "Airoha SoC Gigabit Ethernet support"
+ depends on NET_DSA || !NET_DSA
+ select PAGE_POOL
+ help
+ This driver supports the gigabit ethernet MACs in the
+ Airoha SoC family.
+
config NET_MEDIATEK_SOC_WED
depends on ARCH_MEDIATEK || COMPILE_TEST
def_bool NET_MEDIATEK_SOC != n
diff --git a/drivers/net/ethernet/mediatek/Makefile b/drivers/net/ethernet/mediatek/Makefile
index 03e008fbc859..ddbb7f4a516c 100644
--- a/drivers/net/ethernet/mediatek/Makefile
+++ b/drivers/net/ethernet/mediatek/Makefile
@@ -11,3 +11,4 @@ mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_debugfs.o
endif
obj-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_ops.o
obj-$(CONFIG_NET_MEDIATEK_STAR_EMAC) += mtk_star_emac.o
+obj-$(CONFIG_NET_AIROHA) += airoha_eth.o
diff --git a/drivers/net/ethernet/mediatek/airoha_eth.c b/drivers/net/ethernet/mediatek/airoha_eth.c
new file mode 100644
index 000000000000..7967a92803c2
--- /dev/null
+++ b/drivers/net/ethernet/mediatek/airoha_eth.c
@@ -0,0 +1,2730 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024 AIROHA Inc
+ * Author: Lorenzo Bianconi <lorenzo@kernel.org>
+ */
+#include <linux/etherdevice.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/of.h>
+#include <linux/of_net.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/tcp.h>
+#include <linux/u64_stats_sync.h>
+#include <net/dsa.h>
+#include <net/page_pool/helpers.h>
+#include <uapi/linux/ppp_defs.h>
+
+#define AIROHA_MAX_NUM_GDM_PORTS 1
+#define AIROHA_MAX_NUM_RSTS 3
+#define AIROHA_MAX_NUM_XSI_RSTS 5
+#define AIROHA_MAX_MTU 2000
+#define AIROHA_MAX_PACKET_SIZE 2048
+#define AIROHA_NUM_TX_RING 32
+#define AIROHA_NUM_RX_RING 32
+#define AIROHA_FE_MC_MAX_VLAN_TABLE 64
+#define AIROHA_FE_MC_MAX_VLAN_PORT 16
+#define AIROHA_NUM_TX_IRQ 2
+#define HW_DSCP_NUM 2048
+#define IRQ_QUEUE_LEN(_n) ((_n) ? 1024 : 2048)
+#define TX_DSCP_NUM 1024
+#define RX_DSCP_NUM(_n) \
+ ((_n) == 2 ? 128 : \
+ (_n) == 11 ? 128 : \
+ (_n) == 15 ? 128 : \
+ (_n) == 0 ? 1024 : 16)
+
+#define PSE_RSV_PAGES 128
+#define PSE_QUEUE_RSV_PAGES 64
+
+/* FE */
+#define PSE_BASE 0x0100
+#define CSR_IFC_BASE 0x0200
+#define CDM1_BASE 0x0400
+#define GDM1_BASE 0x0500
+#define PPE1_BASE 0x0c00
+
+#define CDM2_BASE 0x1400
+#define GDM2_BASE 0x1500
+
+#define GDM3_BASE 0x1100
+#define GDM4_BASE 0x2500
+
+#define GDM_BASE(_n) \
+ ((_n) == 4 ? GDM4_BASE : \
+ (_n) == 3 ? GDM3_BASE : \
+ (_n) == 2 ? GDM2_BASE : GDM1_BASE)
+
+#define REG_FE_DMA_GLO_CFG 0x0000
+#define FE_DMA_GLO_L2_SPACE_MASK GENMASK(7, 4)
+#define FE_DMA_GLO_PG_SZ_MASK BIT(3)
+
+#define REG_FE_RST_GLO_CFG 0x0004
+#define FE_RST_GDM4_MBI_ARB_MASK BIT(3)
+#define FE_RST_GDM3_MBI_ARB_MASK BIT(2)
+#define FE_RST_CORE_MASK BIT(0)
+
+#define REG_FE_LAN_MAC_H 0x0040
+#define REG_FE_LAN_MAC_LMIN 0x0044
+#define REG_FE_LAN_MAC_LMAX 0x0048
+
+#define REG_FE_CDM1_OQ_MAP0 0x0050
+#define REG_FE_CDM1_OQ_MAP1 0x0054
+#define REG_FE_CDM1_OQ_MAP2 0x0058
+#define REG_FE_CDM1_OQ_MAP3 0x005c
+
+#define REG_FE_PCE_CFG 0x0070
+#define PCE_DPI_EN_MASK BIT(2)
+#define PCE_KA_EN_MASK BIT(1)
+#define PCE_MC_EN_MASK BIT(0)
+
+#define REG_FE_PSE_QUEUE_CFG_WR 0x0080
+#define PSE_CFG_PORT_ID_MASK GENMASK(27, 24)
+#define PSE_CFG_QUEUE_ID_MASK GENMASK(20, 16)
+#define PSE_CFG_WR_EN_MASK BIT(8)
+#define PSE_CFG_OQRSV_SEL_MASK BIT(0)
+
+#define REG_FE_PSE_QUEUE_CFG_VAL 0x0084
+#define PSE_CFG_OQ_RSV_MASK GENMASK(13, 0)
+
+#define PSE_FQ_CFG 0x008c
+#define PSE_FQ_LIMIT_MASK GENMASK(14, 0)
+
+#define REG_FE_PSE_BUF_SET 0x0090
+#define PSE_SHARE_USED_LTHD_MASK GENMASK(31, 16)
+#define PSE_ALLRSV_MASK GENMASK(14, 0)
+
+#define REG_PSE_SHARE_USED_THD 0x0094
+#define PSE_SHARE_USED_MTHD_MASK GENMASK(31, 16)
+#define PSE_SHARE_USED_HTHD_MASK GENMASK(15, 0)
+
+#define REG_GDM_MISC_CFG 0x0148
+#define GDM2_RDM_ACK_WAIT_PREF_MASK BIT(9)
+#define GDM2_CHN_VLD_MODE_MASK BIT(5)
+
+#define REG_FE_CSR_IFC_CFG CSR_IFC_BASE
+#define FE_IFC_EN_MASK BIT(0)
+
+#define REG_FE_VIP_PORT_EN 0x01f0
+#define REG_FE_IFC_PORT_EN 0x01f4
+
+#define REG_PSE_IQ_REV1 (PSE_BASE + 0x08)
+#define PSE_IQ_RES1_P2_MASK GENMASK(23, 16)
+
+#define REG_PSE_IQ_REV2 (PSE_BASE + 0x0c)
+#define PSE_IQ_RES2_P5_MASK GENMASK(15, 8)
+#define PSE_IQ_RES2_P4_MASK GENMASK(7, 0)
+
+#define REG_FE_VIP_EN(_n) (0x0300 + ((_n) << 3))
+#define PATN_FCPU_EN_MASK BIT(7)
+#define PATN_SWP_EN_MASK BIT(6)
+#define PATN_DP_EN_MASK BIT(5)
+#define PATN_SP_EN_MASK BIT(4)
+#define PATN_TYPE_MASK GENMASK(3, 1)
+#define PATN_EN_MASK BIT(0)
+
+#define REG_FE_VIP_PATN(_n) (0x0304 + ((_n) << 3))
+#define PATN_DP_MASK GENMASK(31, 16)
+#define PATN_SP_MASK GENMASK(15, 0)
+
+#define REG_CDM1_VLAN_CTRL CDM1_BASE
+#define CDM1_VLAN_MASK GENMASK(31, 16)
+
+#define REG_CDM1_FWD_CFG (CDM1_BASE + 0x08)
+#define CDM1_VIP_QSEL_MASK GENMASK(24, 20)
+
+#define REG_CDM1_CRSN_QSEL(_n) (CDM1_BASE + 0x10 + ((_n) << 2))
+#define CDM1_CRSN_QSEL_REASON_MASK(_n) \
+ GENMASK(4 + (((_n) % 4) << 3), (((_n) % 4) << 3))
+
+#define REG_CDM2_FWD_CFG (CDM2_BASE + 0x08)
+#define CDM2_OAM_QSEL_MASK GENMASK(31, 27)
+#define CDM2_VIP_QSEL_MASK GENMASK(24, 20)
+
+#define REG_CDM2_CRSN_QSEL(_n) (CDM2_BASE + 0x10 + ((_n) << 2))
+#define CDM2_CRSN_QSEL_REASON_MASK(_n) \
+ GENMASK(4 + (((_n) % 4) << 3), (((_n) % 4) << 3))
+
+#define REG_GDM_FWD_CFG(_n) GDM_BASE(_n)
+#define GDM_DROP_CRC_ERR BIT(23)
+#define GDM_IP4_CKSUM BIT(22)
+#define GDM_TCP_CKSUM BIT(21)
+#define GDM_UDP_CKSUM BIT(20)
+#define GDM_UCFQ_MASK GENMASK(15, 12)
+#define GDM_BCFQ_MASK GENMASK(11, 8)
+#define GDM_MCFQ_MASK GENMASK(7, 4)
+#define GDM_OCFQ_MASK GENMASK(3, 0)
+
+#define REG_GDM_INGRESS_CFG(_n) (GDM_BASE(_n) + 0x10)
+#define GDM_INGRESS_FC_EN_MASK BIT(1)
+#define GDM_STAG_EN_MASK BIT(0)
+
+#define REG_GDM_LEN_CFG(_n) (GDM_BASE(_n) + 0x14)
+#define GDM_SHORT_LEN_MASK GENMASK(13, 0)
+#define GDM_LONG_LEN_MASK GENMASK(29, 16)
+
+#define REG_FE_CPORT_CFG (GDM1_BASE + 0x40)
+#define FE_CPORT_PAD BIT(26)
+#define FE_CPORT_PORT_XFC_MASK BIT(25)
+#define FE_CPORT_QUEUE_XFC_MASK BIT(24)
+
+#define REG_FE_GDM_MIB_CLEAR(_n) (GDM_BASE(_n) + 0xf0)
+#define FE_GDM_MIB_RX_CLEAR_MASK BIT(1)
+#define FE_GDM_MIB_TX_CLEAR_MASK BIT(0)
+
+#define REG_FE_GDM1_MIB_CFG (GDM1_BASE + 0xf4)
+#define FE_STRICT_RFC2819_MODE_MASK BIT(31)
+#define FE_GDM1_TX_MIB_SPLIT_EN_MASK BIT(17)
+#define FE_GDM1_RX_MIB_SPLIT_EN_MASK BIT(16)
+#define FE_TX_MIB_ID_MASK GENMASK(15, 8)
+#define FE_RX_MIB_ID_MASK GENMASK(7, 0)
+
+#define REG_FE_GDM_TX_OK_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x104)
+#define REG_FE_GDM_TX_OK_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x10c)
+#define REG_FE_GDM_TX_ETH_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x110)
+#define REG_FE_GDM_TX_ETH_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x114)
+#define REG_FE_GDM_TX_ETH_DROP_CNT(_n) (GDM_BASE(_n) + 0x118)
+#define REG_FE_GDM_TX_ETH_BC_CNT(_n) (GDM_BASE(_n) + 0x11c)
+#define REG_FE_GDM_TX_ETH_MC_CNT(_n) (GDM_BASE(_n) + 0x120)
+#define REG_FE_GDM_TX_ETH_RUNT_CNT(_n) (GDM_BASE(_n) + 0x124)
+#define REG_FE_GDM_TX_ETH_LONG_CNT(_n) (GDM_BASE(_n) + 0x128)
+#define REG_FE_GDM_TX_ETH_E64_CNT_L(_n) (GDM_BASE(_n) + 0x12c)
+#define REG_FE_GDM_TX_ETH_L64_CNT_L(_n) (GDM_BASE(_n) + 0x130)
+#define REG_FE_GDM_TX_ETH_L127_CNT_L(_n) (GDM_BASE(_n) + 0x134)
+#define REG_FE_GDM_TX_ETH_L255_CNT_L(_n) (GDM_BASE(_n) + 0x138)
+#define REG_FE_GDM_TX_ETH_L511_CNT_L(_n) (GDM_BASE(_n) + 0x13c)
+#define REG_FE_GDM_TX_ETH_L1023_CNT_L(_n) (GDM_BASE(_n) + 0x140)
+
+#define REG_FE_GDM_RX_OK_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x148)
+#define REG_FE_GDM_RX_FC_DROP_CNT(_n) (GDM_BASE(_n) + 0x14c)
+#define REG_FE_GDM_RX_RC_DROP_CNT(_n) (GDM_BASE(_n) + 0x150)
+#define REG_FE_GDM_RX_OVERFLOW_DROP_CNT(_n) (GDM_BASE(_n) + 0x154)
+#define REG_FE_GDM_RX_ERROR_DROP_CNT(_n) (GDM_BASE(_n) + 0x158)
+#define REG_FE_GDM_RX_OK_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x15c)
+#define REG_FE_GDM_RX_ETH_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x160)
+#define REG_FE_GDM_RX_ETH_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x164)
+#define REG_FE_GDM_RX_ETH_DROP_CNT(_n) (GDM_BASE(_n) + 0x168)
+#define REG_FE_GDM_RX_ETH_BC_CNT(_n) (GDM_BASE(_n) + 0x16c)
+#define REG_FE_GDM_RX_ETH_MC_CNT(_n) (GDM_BASE(_n) + 0x170)
+#define REG_FE_GDM_RX_ETH_CRC_ERR_CNT(_n) (GDM_BASE(_n) + 0x174)
+#define REG_FE_GDM_RX_ETH_FRAG_CNT(_n) (GDM_BASE(_n) + 0x178)
+#define REG_FE_GDM_RX_ETH_JABBER_CNT(_n) (GDM_BASE(_n) + 0x17c)
+#define REG_FE_GDM_RX_ETH_RUNT_CNT(_n) (GDM_BASE(_n) + 0x180)
+#define REG_FE_GDM_RX_ETH_LONG_CNT(_n) (GDM_BASE(_n) + 0x184)
+#define REG_FE_GDM_RX_ETH_E64_CNT_L(_n) (GDM_BASE(_n) + 0x188)
+#define REG_FE_GDM_RX_ETH_L64_CNT_L(_n) (GDM_BASE(_n) + 0x18c)
+#define REG_FE_GDM_RX_ETH_L127_CNT_L(_n) (GDM_BASE(_n) + 0x190)
+#define REG_FE_GDM_RX_ETH_L255_CNT_L(_n) (GDM_BASE(_n) + 0x194)
+#define REG_FE_GDM_RX_ETH_L511_CNT_L(_n) (GDM_BASE(_n) + 0x198)
+#define REG_FE_GDM_RX_ETH_L1023_CNT_L(_n) (GDM_BASE(_n) + 0x19c)
+
+#define REG_PPE1_TB_HASH_CFG (PPE1_BASE + 0x250)
+#define PPE1_SRAM_TABLE_EN_MASK BIT(0)
+#define PPE1_SRAM_HASH1_EN_MASK BIT(8)
+#define PPE1_DRAM_TABLE_EN_MASK BIT(16)
+#define PPE1_DRAM_HASH1_EN_MASK BIT(24)
+
+#define REG_FE_GDM_TX_OK_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x280)
+#define REG_FE_GDM_TX_OK_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x284)
+#define REG_FE_GDM_TX_ETH_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x288)
+#define REG_FE_GDM_TX_ETH_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x28c)
+
+#define REG_FE_GDM_RX_OK_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x290)
+#define REG_FE_GDM_RX_OK_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x294)
+#define REG_FE_GDM_RX_ETH_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x298)
+#define REG_FE_GDM_RX_ETH_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x29c)
+#define REG_FE_GDM_TX_ETH_E64_CNT_H(_n) (GDM_BASE(_n) + 0x2b8)
+#define REG_FE_GDM_TX_ETH_L64_CNT_H(_n) (GDM_BASE(_n) + 0x2bc)
+#define REG_FE_GDM_TX_ETH_L127_CNT_H(_n) (GDM_BASE(_n) + 0x2c0)
+#define REG_FE_GDM_TX_ETH_L255_CNT_H(_n) (GDM_BASE(_n) + 0x2c4)
+#define REG_FE_GDM_TX_ETH_L511_CNT_H(_n) (GDM_BASE(_n) + 0x2c8)
+#define REG_FE_GDM_TX_ETH_L1023_CNT_H(_n) (GDM_BASE(_n) + 0x2cc)
+#define REG_FE_GDM_RX_ETH_E64_CNT_H(_n) (GDM_BASE(_n) + 0x2e8)
+#define REG_FE_GDM_RX_ETH_L64_CNT_H(_n) (GDM_BASE(_n) + 0x2ec)
+#define REG_FE_GDM_RX_ETH_L127_CNT_H(_n) (GDM_BASE(_n) + 0x2f0)
+#define REG_FE_GDM_RX_ETH_L255_CNT_H(_n) (GDM_BASE(_n) + 0x2f4)
+#define REG_FE_GDM_RX_ETH_L511_CNT_H(_n) (GDM_BASE(_n) + 0x2f8)
+#define REG_FE_GDM_RX_ETH_L1023_CNT_H(_n) (GDM_BASE(_n) + 0x2fc)
+
+#define REG_GDM2_CHN_RLS (GDM2_BASE + 0x20)
+#define MBI_RX_AGE_SEL_MASK GENMASK(18, 17)
+#define MBI_TX_AGE_SEL_MASK GENMASK(18, 17)
+
+#define REG_GDM3_FWD_CFG GDM3_BASE
+#define GDM3_PAD_EN_MASK BIT(28)
+
+#define REG_GDM4_FWD_CFG (GDM4_BASE + 0x100)
+#define GDM4_PAD_EN_MASK BIT(28)
+#define GDM4_SPORT_OFFSET0_MASK GENMASK(11, 8)
+
+#define REG_GDM4_SRC_PORT_SET (GDM4_BASE + 0x33c)
+#define GDM4_SPORT_OFF2_MASK GENMASK(19, 16)
+#define GDM4_SPORT_OFF1_MASK GENMASK(15, 12)
+#define GDM4_SPORT_OFF0_MASK GENMASK(11, 8)
+
+#define REG_IP_FRAG_FP 0x2010
+#define IP_ASSEMBLE_PORT_MASK GENMASK(24, 21)
+#define IP_ASSEMBLE_NBQ_MASK GENMASK(20, 16)
+#define IP_FRAGMENT_PORT_MASK GENMASK(8, 5)
+#define IP_FRAGMENT_NBQ_MASK GENMASK(4, 0)
+
+#define REG_MC_VLAN_EN 0x2100
+#define MC_VLAN_EN_MASK BIT(0)
+
+#define REG_MC_VLAN_CFG 0x2104
+#define MC_VLAN_CFG_CMD_DONE_MASK BIT(31)
+#define MC_VLAN_CFG_TABLE_ID_MASK GENMASK(21, 16)
+#define MC_VLAN_CFG_PORT_ID_MASK GENMASK(11, 8)
+#define MC_VLAN_CFG_TABLE_SEL_MASK BIT(4)
+#define MC_VLAN_CFG_RW_MASK BIT(0)
+
+#define REG_MC_VLAN_DATA 0x2108
+
+#define REG_CDM5_RX_OQ1_DROP_CNT 0x29d4
+
+/* QDMA */
+#define REG_QDMA_GLOBAL_CFG 0x0004
+#define GLOBAL_CFG_RX_2B_OFFSET_MASK BIT(31)
+#define GLOBAL_CFG_DMA_PREFERENCE_MASK GENMASK(30, 29)
+#define GLOBAL_CFG_CPU_TXR_RR_MASK BIT(28)
+#define GLOBAL_CFG_DSCP_BYTE_SWAP_MASK BIT(27)
+#define GLOBAL_CFG_PAYLOAD_BYTE_SWAP_MASK BIT(26)
+#define GLOBAL_CFG_MULTICAST_MODIFY_FP_MASK BIT(25)
+#define GLOBAL_CFG_OAM_MODIFY_MASK BIT(24)
+#define GLOBAL_CFG_RESET_MASK BIT(23)
+#define GLOBAL_CFG_RESET_DONE_MASK BIT(22)
+#define GLOBAL_CFG_MULTICAST_EN_MASK BIT(21)
+#define GLOBAL_CFG_IRQ1_EN_MASK BIT(20)
+#define GLOBAL_CFG_IRQ0_EN_MASK BIT(19)
+#define GLOBAL_CFG_LOOPCNT_EN_MASK BIT(18)
+#define GLOBAL_CFG_RD_BYPASS_WR_MASK BIT(17)
+#define GLOBAL_CFG_QDMA_LOOPBACK_MASK BIT(16)
+#define GLOBAL_CFG_LPBK_RXQ_SEL_MASK GENMASK(13, 8)
+#define GLOBAL_CFG_CHECK_DONE_MASK BIT(7)
+#define GLOBAL_CFG_TX_WB_DONE_MASK BIT(6)
+#define GLOBAL_CFG_MAX_ISSUE_NUM_MASK GENMASK(5, 4)
+#define GLOBAL_CFG_RX_DMA_BUSY_MASK BIT(3)
+#define GLOBAL_CFG_RX_DMA_EN_MASK BIT(2)
+#define GLOBAL_CFG_TX_DMA_BUSY_MASK BIT(1)
+#define GLOBAL_CFG_TX_DMA_EN_MASK BIT(0)
+
+#define REG_FWD_DSCP_BASE 0x0010
+#define REG_FWD_BUF_BASE 0x0014
+
+#define REG_HW_FWD_DSCP_CFG 0x0018
+#define HW_FWD_DSCP_PAYLOAD_SIZE_MASK GENMASK(29, 28)
+#define HW_FWD_DSCP_SCATTER_LEN_MASK GENMASK(17, 16)
+#define HW_FWD_DSCP_MIN_SCATTER_LEN_MASK GENMASK(15, 0)
+
+#define REG_INT_STATUS(_n) \
+ (((_n) == 4) ? 0x0730 : \
+ ((_n) == 3) ? 0x0724 : \
+ ((_n) == 2) ? 0x0720 : \
+ ((_n) == 1) ? 0x0024 : 0x0020)
+
+#define REG_INT_ENABLE(_n) \
+ (((_n) == 4) ? 0x0750 : \
+ ((_n) == 3) ? 0x0744 : \
+ ((_n) == 2) ? 0x0740 : \
+ ((_n) == 1) ? 0x002c : 0x0028)
+
+/* QDMA_CSR_INT_ENABLE1 */
+#define RX15_COHERENT_INT_MASK BIT(31)
+#define RX14_COHERENT_INT_MASK BIT(30)
+#define RX13_COHERENT_INT_MASK BIT(29)
+#define RX12_COHERENT_INT_MASK BIT(28)
+#define RX11_COHERENT_INT_MASK BIT(27)
+#define RX10_COHERENT_INT_MASK BIT(26)
+#define RX9_COHERENT_INT_MASK BIT(25)
+#define RX8_COHERENT_INT_MASK BIT(24)
+#define RX7_COHERENT_INT_MASK BIT(23)
+#define RX6_COHERENT_INT_MASK BIT(22)
+#define RX5_COHERENT_INT_MASK BIT(21)
+#define RX4_COHERENT_INT_MASK BIT(20)
+#define RX3_COHERENT_INT_MASK BIT(19)
+#define RX2_COHERENT_INT_MASK BIT(18)
+#define RX1_COHERENT_INT_MASK BIT(17)
+#define RX0_COHERENT_INT_MASK BIT(16)
+#define TX7_COHERENT_INT_MASK BIT(15)
+#define TX6_COHERENT_INT_MASK BIT(14)
+#define TX5_COHERENT_INT_MASK BIT(13)
+#define TX4_COHERENT_INT_MASK BIT(12)
+#define TX3_COHERENT_INT_MASK BIT(11)
+#define TX2_COHERENT_INT_MASK BIT(10)
+#define TX1_COHERENT_INT_MASK BIT(9)
+#define TX0_COHERENT_INT_MASK BIT(8)
+#define CNT_OVER_FLOW_INT_MASK BIT(7)
+#define IRQ1_FULL_INT_MASK BIT(5)
+#define IRQ1_INT_MASK BIT(4)
+#define HWFWD_DSCP_LOW_INT_MASK BIT(3)
+#define HWFWD_DSCP_EMPTY_INT_MASK BIT(2)
+#define IRQ0_FULL_INT_MASK BIT(1)
+#define IRQ0_INT_MASK BIT(0)
+
+#define TX_DONE_INT_MASK(_n) \
+ ((_n) ? IRQ1_INT_MASK | IRQ1_FULL_INT_MASK \
+ : IRQ0_INT_MASK | IRQ0_FULL_INT_MASK)
+
+#define INT_TX_MASK \
+ (IRQ1_INT_MASK | IRQ1_FULL_INT_MASK | \
+ IRQ0_INT_MASK | IRQ0_FULL_INT_MASK)
+
+#define INT_IDX0_MASK \
+ (TX0_COHERENT_INT_MASK | TX1_COHERENT_INT_MASK | \
+ TX2_COHERENT_INT_MASK | TX3_COHERENT_INT_MASK | \
+ TX4_COHERENT_INT_MASK | TX5_COHERENT_INT_MASK | \
+ TX6_COHERENT_INT_MASK | TX7_COHERENT_INT_MASK | \
+ RX0_COHERENT_INT_MASK | RX1_COHERENT_INT_MASK | \
+ RX2_COHERENT_INT_MASK | RX3_COHERENT_INT_MASK | \
+ RX4_COHERENT_INT_MASK | RX7_COHERENT_INT_MASK | \
+ RX8_COHERENT_INT_MASK | RX9_COHERENT_INT_MASK | \
+ RX15_COHERENT_INT_MASK | INT_TX_MASK)
+
+/* QDMA_CSR_INT_ENABLE2 */
+#define RX15_NO_CPU_DSCP_INT_MASK BIT(31)
+#define RX14_NO_CPU_DSCP_INT_MASK BIT(30)
+#define RX13_NO_CPU_DSCP_INT_MASK BIT(29)
+#define RX12_NO_CPU_DSCP_INT_MASK BIT(28)
+#define RX11_NO_CPU_DSCP_INT_MASK BIT(27)
+#define RX10_NO_CPU_DSCP_INT_MASK BIT(26)
+#define RX9_NO_CPU_DSCP_INT_MASK BIT(25)
+#define RX8_NO_CPU_DSCP_INT_MASK BIT(24)
+#define RX7_NO_CPU_DSCP_INT_MASK BIT(23)
+#define RX6_NO_CPU_DSCP_INT_MASK BIT(22)
+#define RX5_NO_CPU_DSCP_INT_MASK BIT(21)
+#define RX4_NO_CPU_DSCP_INT_MASK BIT(20)
+#define RX3_NO_CPU_DSCP_INT_MASK BIT(19)
+#define RX2_NO_CPU_DSCP_INT_MASK BIT(18)
+#define RX1_NO_CPU_DSCP_INT_MASK BIT(17)
+#define RX0_NO_CPU_DSCP_INT_MASK BIT(16)
+#define RX15_DONE_INT_MASK BIT(15)
+#define RX14_DONE_INT_MASK BIT(14)
+#define RX13_DONE_INT_MASK BIT(13)
+#define RX12_DONE_INT_MASK BIT(12)
+#define RX11_DONE_INT_MASK BIT(11)
+#define RX10_DONE_INT_MASK BIT(10)
+#define RX9_DONE_INT_MASK BIT(9)
+#define RX8_DONE_INT_MASK BIT(8)
+#define RX7_DONE_INT_MASK BIT(7)
+#define RX6_DONE_INT_MASK BIT(6)
+#define RX5_DONE_INT_MASK BIT(5)
+#define RX4_DONE_INT_MASK BIT(4)
+#define RX3_DONE_INT_MASK BIT(3)
+#define RX2_DONE_INT_MASK BIT(2)
+#define RX1_DONE_INT_MASK BIT(1)
+#define RX0_DONE_INT_MASK BIT(0)
+
+#define RX_DONE_INT_MASK \
+ (RX0_DONE_INT_MASK | RX1_DONE_INT_MASK | \
+ RX2_DONE_INT_MASK | RX3_DONE_INT_MASK | \
+ RX4_DONE_INT_MASK | RX7_DONE_INT_MASK | \
+ RX8_DONE_INT_MASK | RX9_DONE_INT_MASK | \
+ RX15_DONE_INT_MASK)
+#define INT_IDX1_MASK \
+ (RX_DONE_INT_MASK | \
+ RX0_NO_CPU_DSCP_INT_MASK | RX1_NO_CPU_DSCP_INT_MASK | \
+ RX2_NO_CPU_DSCP_INT_MASK | RX3_NO_CPU_DSCP_INT_MASK | \
+ RX4_NO_CPU_DSCP_INT_MASK | RX7_NO_CPU_DSCP_INT_MASK | \
+ RX8_NO_CPU_DSCP_INT_MASK | RX9_NO_CPU_DSCP_INT_MASK | \
+ RX15_NO_CPU_DSCP_INT_MASK)
+
+/* QDMA_CSR_INT_ENABLE5 */
+#define TX31_COHERENT_INT_MASK BIT(31)
+#define TX30_COHERENT_INT_MASK BIT(30)
+#define TX29_COHERENT_INT_MASK BIT(29)
+#define TX28_COHERENT_INT_MASK BIT(28)
+#define TX27_COHERENT_INT_MASK BIT(27)
+#define TX26_COHERENT_INT_MASK BIT(26)
+#define TX25_COHERENT_INT_MASK BIT(25)
+#define TX24_COHERENT_INT_MASK BIT(24)
+#define TX23_COHERENT_INT_MASK BIT(23)
+#define TX22_COHERENT_INT_MASK BIT(22)
+#define TX21_COHERENT_INT_MASK BIT(21)
+#define TX20_COHERENT_INT_MASK BIT(20)
+#define TX19_COHERENT_INT_MASK BIT(19)
+#define TX18_COHERENT_INT_MASK BIT(18)
+#define TX17_COHERENT_INT_MASK BIT(17)
+#define TX16_COHERENT_INT_MASK BIT(16)
+#define TX15_COHERENT_INT_MASK BIT(15)
+#define TX14_COHERENT_INT_MASK BIT(14)
+#define TX13_COHERENT_INT_MASK BIT(13)
+#define TX12_COHERENT_INT_MASK BIT(12)
+#define TX11_COHERENT_INT_MASK BIT(11)
+#define TX10_COHERENT_INT_MASK BIT(10)
+#define TX9_COHERENT_INT_MASK BIT(9)
+#define TX8_COHERENT_INT_MASK BIT(8)
+
+#define INT_IDX4_MASK \
+ (TX8_COHERENT_INT_MASK | TX9_COHERENT_INT_MASK | \
+ TX10_COHERENT_INT_MASK | TX11_COHERENT_INT_MASK | \
+ TX12_COHERENT_INT_MASK | TX13_COHERENT_INT_MASK | \
+ TX14_COHERENT_INT_MASK | TX15_COHERENT_INT_MASK | \
+ TX16_COHERENT_INT_MASK | TX17_COHERENT_INT_MASK | \
+ TX18_COHERENT_INT_MASK | TX19_COHERENT_INT_MASK | \
+ TX20_COHERENT_INT_MASK | TX21_COHERENT_INT_MASK | \
+ TX22_COHERENT_INT_MASK | TX23_COHERENT_INT_MASK | \
+ TX24_COHERENT_INT_MASK | TX25_COHERENT_INT_MASK | \
+ TX26_COHERENT_INT_MASK | TX27_COHERENT_INT_MASK | \
+ TX28_COHERENT_INT_MASK | TX29_COHERENT_INT_MASK | \
+ TX30_COHERENT_INT_MASK | TX31_COHERENT_INT_MASK)
+
+#define REG_TX_IRQ_BASE(_n) ((_n) ? 0x0048 : 0x0050)
+
+#define REG_TX_IRQ_CFG(_n) ((_n) ? 0x004c : 0x0054)
+#define TX_IRQ_THR_MASK GENMASK(27, 16)
+#define TX_IRQ_DEPTH_MASK GENMASK(11, 0)
+
+#define REG_IRQ_CLEAR_LEN(_n) ((_n) ? 0x0064 : 0x0058)
+#define IRQ_CLEAR_LEN_MASK GENMASK(7, 0)
+
+#define REG_IRQ_STATUS(_n) ((_n) ? 0x0068 : 0x005c)
+#define IRQ_ENTRY_LEN_MASK GENMASK(27, 16)
+#define IRQ_HEAD_IDX_MASK GENMASK(11, 0)
+
+#define REG_TX_RING_BASE(_n) \
+ (((_n) < 8) ? 0x0100 + ((_n) << 5) : 0x0b00 + (((_n) - 8) << 5))
+
+#define REG_TX_RING_BLOCKING(_n) \
+ (((_n) < 8) ? 0x0104 + ((_n) << 5) : 0x0b04 + (((_n) - 8) << 5))
+
+#define TX_RING_IRQ_BLOCKING_MAP_MASK BIT(6)
+#define TX_RING_IRQ_BLOCKING_CFG_MASK BIT(4)
+#define TX_RING_IRQ_BLOCKING_TX_DROP_EN_MASK BIT(2)
+#define TX_RING_IRQ_BLOCKING_MAX_TH_TXRING_EN_MASK BIT(1)
+#define TX_RING_IRQ_BLOCKING_MIN_TH_TXRING_EN_MASK BIT(0)
+
+#define REG_TX_CPU_IDX(_n) \
+ (((_n) < 8) ? 0x0108 + ((_n) << 5) : 0x0b08 + (((_n) - 8) << 5))
+
+#define TX_RING_CPU_IDX_MASK GENMASK(15, 0)
+
+#define REG_TX_DMA_IDX(_n) \
+ (((_n) < 8) ? 0x010c + ((_n) << 5) : 0x0b0c + (((_n) - 8) << 5))
+
+#define TX_RING_DMA_IDX_MASK GENMASK(15, 0)
+
+#define IRQ_RING_IDX_MASK GENMASK(20, 16)
+#define IRQ_DESC_IDX_MASK GENMASK(15, 0)
+
+#define REG_RX_RING_BASE(_n) \
+ (((_n) < 16) ? 0x0200 + ((_n) << 5) : 0x0e00 + (((_n) - 16) << 5))
+
+#define REG_RX_RING_SIZE(_n) \
+ (((_n) < 16) ? 0x0204 + ((_n) << 5) : 0x0e04 + (((_n) - 16) << 5))
+
+#define RX_RING_THR_MASK GENMASK(31, 16)
+#define RX_RING_SIZE_MASK GENMASK(15, 0)
+
+#define REG_RX_CPU_IDX(_n) \
+ (((_n) < 16) ? 0x0208 + ((_n) << 5) : 0x0e08 + (((_n) - 16) << 5))
+
+#define RX_RING_CPU_IDX_MASK GENMASK(15, 0)
+
+#define REG_RX_DMA_IDX(_n) \
+ (((_n) < 16) ? 0x020c + ((_n) << 5) : 0x0e0c + (((_n) - 16) << 5))
+
+#define REG_RX_DELAY_INT_IDX(_n) \
+ (((_n) < 16) ? 0x0210 + ((_n) << 5) : 0x0e10 + (((_n) - 16) << 5))
+
+#define RX_DELAY_INT_MASK GENMASK(15, 0)
+
+#define RX_RING_DMA_IDX_MASK GENMASK(15, 0)
+
+#define REG_INGRESS_TRTCM_CFG 0x0070
+#define INGRESS_TRTCM_EN_MASK BIT(31)
+#define INGRESS_TRTCM_MODE_MASK BIT(30)
+#define INGRESS_SLOW_TICK_RATIO_MASK GENMASK(29, 16)
+#define INGRESS_FAST_TICK_MASK GENMASK(15, 0)
+
+#define REG_TXQ_DIS_CFG_BASE(_n) ((_n) ? 0x20a0 : 0x00a0)
+#define REG_TXQ_DIS_CFG(_n, _m) (REG_TXQ_DIS_CFG_BASE((_n)) + (_m) << 2)
+
+#define REG_LMGR_INIT_CFG 0x1000
+#define LMGR_INIT_START BIT(31)
+#define LMGR_SRAM_MODE_MASK BIT(30)
+#define HW_FWD_PKTSIZE_OVERHEAD_MASK GENMASK(27, 20)
+#define HW_FWD_DESC_NUM_MASK GENMASK(16, 0)
+
+#define REG_FWD_DSCP_LOW_THR 0x1004
+#define FWD_DSCP_LOW_THR_MASK GENMASK(17, 0)
+
+#define REG_EGRESS_RATE_METER_CFG 0x100c
+#define EGRESS_RATE_METER_EN_MASK BIT(29)
+#define EGRESS_RATE_METER_EQ_RATE_EN_MASK BIT(17)
+#define EGRESS_RATE_METER_WINDOW_SZ_MASK GENMASK(16, 12)
+#define EGRESS_RATE_METER_TIMESLICE_MASK GENMASK(10, 0)
+
+#define REG_EGRESS_TRTCM_CFG 0x1010
+#define EGRESS_TRTCM_EN_MASK BIT(31)
+#define EGRESS_TRTCM_MODE_MASK BIT(30)
+#define EGRESS_SLOW_TICK_RATIO_MASK GENMASK(29, 16)
+#define EGRESS_FAST_TICK_MASK GENMASK(15, 0)
+
+#define REG_TXWRR_MODE_CFG 0x1020
+#define TWRR_WEIGHT_SCALE_MASK BIT(31)
+#define TWRR_WEIGHT_BASE_MASK BIT(3)
+
+#define REG_PSE_BUF_USAGE_CFG 0x1028
+#define PSE_BUF_ESTIMATE_EN_MASK BIT(29)
+
+#define REG_GLB_TRTCM_CFG 0x1080
+#define GLB_TRTCM_EN_MASK BIT(31)
+#define GLB_TRTCM_MODE_MASK BIT(30)
+#define GLB_SLOW_TICK_RATIO_MASK GENMASK(29, 16)
+#define GLB_FAST_TICK_MASK GENMASK(15, 0)
+
+#define REG_TXQ_CNGST_CFG 0x10a0
+#define TXQ_CNGST_DROP_EN BIT(31)
+#define TXQ_CNGST_DEI_DROP_EN BIT(30)
+
+#define REG_SLA_TRTCM_CFG 0x1150
+#define SLA_TRTCM_EN_MASK BIT(31)
+#define SLA_TRTCM_MODE_MASK BIT(30)
+#define SLA_SLOW_TICK_RATIO_MASK GENMASK(29, 16)
+#define SLA_FAST_TICK_MASK GENMASK(15, 0)
+
+/* CTRL */
+#define QDMA_DESC_DONE_MASK BIT(31)
+#define QDMA_DESC_DROP_MASK BIT(30) /* tx: drop - rx: overflow */
+#define QDMA_DESC_MORE_MASK BIT(29) /* more SG elements */
+#define QDMA_DESC_DEI_MASK BIT(25)
+#define QDMA_DESC_NO_DROP_MASK BIT(24)
+#define QDMA_DESC_LEN_MASK GENMASK(15, 0)
+/* DATA */
+#define QDMA_DESC_NEXT_ID_MASK GENMASK(15, 0)
+/* TX MSG0 */
+#define QDMA_ETH_TXMSG_MIC_IDX_MASK BIT(30)
+#define QDMA_ETH_TXMSG_SP_TAG_MASK GENMASK(29, 14)
+#define QDMA_ETH_TXMSG_ICO_MASK BIT(13)
+#define QDMA_ETH_TXMSG_UCO_MASK BIT(12)
+#define QDMA_ETH_TXMSG_TCO_MASK BIT(11)
+#define QDMA_ETH_TXMSG_TSO_MASK BIT(10)
+#define QDMA_ETH_TXMSG_FAST_MASK BIT(9)
+#define QDMA_ETH_TXMSG_OAM_MASK BIT(8)
+#define QDMA_ETH_TXMSG_CHAN_MASK GENMASK(7, 3)
+#define QDMA_ETH_TXMSG_QUEUE_MASK GENMASK(2, 0)
+/* TX MSG1 */
+#define QDMA_ETH_TXMSG_NO_DROP BIT(31)
+#define QDMA_ETH_TXMSG_METER_MASK GENMASK(30, 24) /* 0x7f no meters */
+#define QDMA_ETH_TXMSG_FPORT_MASK GENMASK(23, 20)
+#define QDMA_ETH_TXMSG_NBOQ_MASK GENMASK(19, 15)
+#define QDMA_ETH_TXMSG_HWF_MASK BIT(14)
+#define QDMA_ETH_TXMSG_HOP_MASK BIT(13)
+#define QDMA_ETH_TXMSG_PTP_MASK BIT(12)
+#define QDMA_ETH_TXMSG_ACNT_G1_MASK GENMASK(10, 6) /* 0x1f do not count */
+#define QDMA_ETH_TXMSG_ACNT_G0_MASK GENMASK(5, 0) /* 0x3f do not count */
+
+/* RX MSG1 */
+#define QDMA_ETH_RXMSG_DEI_MASK BIT(31)
+#define QDMA_ETH_RXMSG_IP6_MASK BIT(30)
+#define QDMA_ETH_RXMSG_IP4_MASK BIT(29)
+#define QDMA_ETH_RXMSG_IP4F_MASK BIT(28)
+#define QDMA_ETH_RXMSG_L4_VALID_MASK BIT(27)
+#define QDMA_ETH_RXMSG_L4F_MASK BIT(26)
+#define QDMA_ETH_RXMSG_SPORT_MASK GENMASK(25, 21)
+#define QDMA_ETH_RXMSG_CRSN_MASK GENMASK(20, 16)
+#define QDMA_ETH_RXMSG_PPE_ENTRY_MASK GENMASK(15, 0)
+
+struct airoha_qdma_desc {
+ __le32 rsv;
+ __le32 ctrl;
+ __le32 addr;
+ __le32 data;
+ __le32 msg0;
+ __le32 msg1;
+ __le32 msg2;
+ __le32 msg3;
+};
+
+/* CTRL0 */
+#define QDMA_FWD_DESC_CTX_MASK BIT(31)
+#define QDMA_FWD_DESC_RING_MASK GENMASK(30, 28)
+#define QDMA_FWD_DESC_IDX_MASK GENMASK(27, 16)
+#define QDMA_FWD_DESC_LEN_MASK GENMASK(15, 0)
+/* CTRL1 */
+#define QDMA_FWD_DESC_FIRST_IDX_MASK GENMASK(15, 0)
+/* CTRL2 */
+#define QDMA_FWD_DESC_MORE_PKT_NUM_MASK GENMASK(2, 0)
+
+struct airoha_qdma_fwd_desc {
+ __le32 addr;
+ __le32 ctrl0;
+ __le32 ctrl1;
+ __le32 ctrl2;
+ __le32 msg0;
+ __le32 msg1;
+ __le32 rsv0;
+ __le32 rsv1;
+};
+
+enum {
+ QDMA_INT_REG_IDX0,
+ QDMA_INT_REG_IDX1,
+ QDMA_INT_REG_IDX2,
+ QDMA_INT_REG_IDX3,
+ QDMA_INT_REG_IDX4,
+ QDMA_INT_REG_MAX
+};
+
+enum {
+ XSI_PCIE0_PORT,
+ XSI_PCIE1_PORT,
+ XSI_USB_PORT,
+ XSI_AE_PORT,
+ XSI_ETH_PORT,
+};
+
+enum {
+ XSI_PCIE0_VIP_PORT_MASK = BIT(22),
+ XSI_PCIE1_VIP_PORT_MASK = BIT(23),
+ XSI_USB_VIP_PORT_MASK = BIT(25),
+ XSI_ETH_VIP_PORT_MASK = BIT(24),
+};
+
+enum {
+ DEV_STATE_INITIALIZED,
+};
+
+enum {
+ CDM_CRSN_QSEL_Q1 = 1,
+ CDM_CRSN_QSEL_Q5 = 5,
+ CDM_CRSN_QSEL_Q6 = 6,
+ CDM_CRSN_QSEL_Q15 = 15,
+};
+
+enum {
+ CRSN_08 = 0x8,
+ CRSN_21 = 0x15, /* KA */
+ CRSN_22 = 0x16, /* hit bind and force route to CPU */
+ CRSN_24 = 0x18,
+ CRSN_25 = 0x19,
+};
+
+enum {
+ FE_PSE_PORT_CDM1,
+ FE_PSE_PORT_GDM1,
+ FE_PSE_PORT_GDM2,
+ FE_PSE_PORT_GDM3,
+ FE_PSE_PORT_PPE1,
+ FE_PSE_PORT_CDM2,
+ FE_PSE_PORT_CDM3,
+ FE_PSE_PORT_CDM4,
+ FE_PSE_PORT_PPE2,
+ FE_PSE_PORT_GDM4,
+ FE_PSE_PORT_CDM5,
+ FE_PSE_PORT_DROP = 0xf,
+};
+
+struct airoha_queue_entry {
+ union {
+ void *buf;
+ struct sk_buff *skb;
+ };
+ dma_addr_t dma_addr;
+ u16 dma_len;
+};
+
+struct airoha_queue {
+ struct airoha_eth *eth;
+
+ /* protect concurrent queue accesses */
+ spinlock_t lock;
+ struct airoha_queue_entry *entry;
+ struct airoha_qdma_desc *desc;
+ u16 head;
+ u16 tail;
+
+ int queued;
+ int ndesc;
+ int free_thr;
+ int buf_size;
+
+ struct napi_struct napi;
+ struct page_pool *page_pool;
+};
+
+struct airoha_tx_irq_queue {
+ struct airoha_eth *eth;
+
+ struct napi_struct napi;
+ u32 *q;
+
+ int size;
+ int queued;
+ u16 head;
+};
+
+struct airoha_hw_stats {
+ /* protect concurrent hw_stats accesses */
+ spinlock_t lock;
+ struct u64_stats_sync syncp;
+
+ /* get_stats64 */
+ u64 rx_ok_pkts;
+ u64 tx_ok_pkts;
+ u64 rx_ok_bytes;
+ u64 tx_ok_bytes;
+ u64 rx_multicast;
+ u64 rx_errors;
+ u64 rx_drops;
+ u64 tx_drops;
+ u64 rx_crc_error;
+ u64 rx_over_errors;
+ /* ethtool stats */
+ u64 tx_broadcast;
+ u64 tx_multicast;
+ u64 tx_len[7];
+ u64 rx_broadcast;
+ u64 rx_fragment;
+ u64 rx_jabber;
+ u64 rx_len[7];
+};
+
+struct airoha_gdm_port {
+ struct net_device *dev;
+ struct airoha_eth *eth;
+ int id;
+
+ struct airoha_hw_stats stats;
+};
+
+struct airoha_eth {
+ struct device *dev;
+
+ unsigned long state;
+
+ void __iomem *qdma_regs;
+ void __iomem *fe_regs;
+
+ /* protect concurrent irqmask accesses */
+ spinlock_t irq_lock;
+ u32 irqmask[QDMA_INT_REG_MAX];
+ int irq;
+
+ struct reset_control_bulk_data rsts[AIROHA_MAX_NUM_RSTS];
+ struct reset_control_bulk_data xsi_rsts[AIROHA_MAX_NUM_XSI_RSTS];
+
+ struct airoha_gdm_port *ports[AIROHA_MAX_NUM_GDM_PORTS];
+
+ struct net_device *napi_dev;
+ struct airoha_queue q_tx[AIROHA_NUM_TX_RING];
+ struct airoha_queue q_rx[AIROHA_NUM_RX_RING];
+
+ struct airoha_tx_irq_queue q_tx_irq[AIROHA_NUM_TX_IRQ];
+
+ /* descriptor and packet buffers for qdma hw forward */
+ struct {
+ void *desc;
+ void *q;
+ } hfwd;
+};
+
+static u32 airoha_rr(void __iomem *base, u32 offset)
+{
+ return readl(base + offset);
+}
+
+static void airoha_wr(void __iomem *base, u32 offset, u32 val)
+{
+ writel(val, base + offset);
+}
+
+static u32 airoha_rmw(void __iomem *base, u32 offset, u32 mask, u32 val)
+{
+ val |= (airoha_rr(base, offset) & ~mask);
+ airoha_wr(base, offset, val);
+
+ return val;
+}
+
+#define airoha_fe_rr(eth, offset) \
+ airoha_rr((eth)->fe_regs, (offset))
+#define airoha_fe_wr(eth, offset, val) \
+ airoha_wr((eth)->fe_regs, (offset), (val))
+#define airoha_fe_rmw(eth, offset, mask, val) \
+ airoha_rmw((eth)->fe_regs, (offset), (mask), (val))
+#define airoha_fe_set(eth, offset, val) \
+ airoha_rmw((eth)->fe_regs, (offset), 0, (val))
+#define airoha_fe_clear(eth, offset, val) \
+ airoha_rmw((eth)->fe_regs, (offset), (val), 0)
+
+#define airoha_qdma_rr(eth, offset) \
+ airoha_rr((eth)->qdma_regs, (offset))
+#define airoha_qdma_wr(eth, offset, val) \
+ airoha_wr((eth)->qdma_regs, (offset), (val))
+#define airoha_qdma_rmw(eth, offset, mask, val) \
+ airoha_rmw((eth)->qdma_regs, (offset), (mask), (val))
+#define airoha_qdma_set(eth, offset, val) \
+ airoha_rmw((eth)->qdma_regs, (offset), 0, (val))
+#define airoha_qdma_clear(eth, offset, val) \
+ airoha_rmw((eth)->qdma_regs, (offset), (val), 0)
+
+static void airoha_qdma_set_irqmask(struct airoha_eth *eth, int index,
+ u32 clear, u32 set)
+{
+ unsigned long flags;
+
+ if (WARN_ON_ONCE(index >= ARRAY_SIZE(eth->irqmask)))
+ return;
+
+ spin_lock_irqsave(&eth->irq_lock, flags);
+
+ eth->irqmask[index] &= ~clear;
+ eth->irqmask[index] |= set;
+ airoha_qdma_wr(eth, REG_INT_ENABLE(index), eth->irqmask[index]);
+ /* Read irq_enable register in order to guarantee the update above
+ * completes in the spinlock critical section.
+ */
+ airoha_qdma_rr(eth, REG_INT_ENABLE(index));
+
+ spin_unlock_irqrestore(&eth->irq_lock, flags);
+}
+
+static void airoha_qdma_irq_enable(struct airoha_eth *eth, int index,
+ u32 mask)
+{
+ airoha_qdma_set_irqmask(eth, index, 0, mask);
+}
+
+static void airoha_qdma_irq_disable(struct airoha_eth *eth, int index,
+ u32 mask)
+{
+ airoha_qdma_set_irqmask(eth, index, mask, 0);
+}
+
+static void airoha_set_macaddr(struct airoha_eth *eth, const u8 *addr)
+{
+ u32 val;
+
+ val = (addr[0] << 16) | (addr[1] << 8) | addr[2];
+ airoha_fe_wr(eth, REG_FE_LAN_MAC_H, val);
+
+ val = (addr[3] << 16) | (addr[4] << 8) | addr[5];
+ airoha_fe_wr(eth, REG_FE_LAN_MAC_LMIN, val);
+ airoha_fe_wr(eth, REG_FE_LAN_MAC_LMAX, val);
+}
+
+static void airoha_set_gdm_port_fwd_cfg(struct airoha_eth *eth, u32 addr,
+ u32 val)
+{
+ airoha_fe_rmw(eth, addr, GDM_OCFQ_MASK,
+ FIELD_PREP(GDM_OCFQ_MASK, val));
+ airoha_fe_rmw(eth, addr, GDM_MCFQ_MASK,
+ FIELD_PREP(GDM_MCFQ_MASK, val));
+ airoha_fe_rmw(eth, addr, GDM_BCFQ_MASK,
+ FIELD_PREP(GDM_BCFQ_MASK, val));
+ airoha_fe_rmw(eth, addr, GDM_UCFQ_MASK,
+ FIELD_PREP(GDM_UCFQ_MASK, val));
+}
+
+static int airoha_set_gdm_port(struct airoha_eth *eth, int port, bool enable)
+{
+ u32 val = enable ? FE_PSE_PORT_PPE1 : FE_PSE_PORT_DROP;
+ u32 vip_port, cfg_addr;
+
+ switch (port) {
+ case XSI_PCIE0_PORT:
+ vip_port = XSI_PCIE0_VIP_PORT_MASK;
+ cfg_addr = REG_GDM_FWD_CFG(3);
+ break;
+ case XSI_PCIE1_PORT:
+ vip_port = XSI_PCIE1_VIP_PORT_MASK;
+ cfg_addr = REG_GDM_FWD_CFG(3);
+ break;
+ case XSI_USB_PORT:
+ vip_port = XSI_USB_VIP_PORT_MASK;
+ cfg_addr = REG_GDM_FWD_CFG(4);
+ break;
+ case XSI_ETH_PORT:
+ vip_port = XSI_ETH_VIP_PORT_MASK;
+ cfg_addr = REG_GDM_FWD_CFG(4);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (enable) {
+ airoha_fe_set(eth, REG_FE_VIP_PORT_EN, vip_port);
+ airoha_fe_set(eth, REG_FE_IFC_PORT_EN, vip_port);
+ } else {
+ airoha_fe_clear(eth, REG_FE_VIP_PORT_EN, vip_port);
+ airoha_fe_clear(eth, REG_FE_IFC_PORT_EN, vip_port);
+ }
+
+ airoha_set_gdm_port_fwd_cfg(eth, cfg_addr, val);
+
+ return 0;
+}
+
+static int airoha_set_gdm_ports(struct airoha_eth *eth, bool enable)
+{
+ const int port_list[] = {
+ XSI_PCIE0_PORT,
+ XSI_PCIE1_PORT,
+ XSI_USB_PORT,
+ XSI_ETH_PORT
+ };
+ int i, err;
+
+ for (i = 0; i < ARRAY_SIZE(port_list); i++) {
+ err = airoha_set_gdm_port(eth, port_list[i], enable);
+ if (err)
+ goto error;
+ }
+
+ return 0;
+
+error:
+ for (i--; i >= 0; i++)
+ airoha_set_gdm_port(eth, port_list[i], false);
+
+ return err;
+}
+
+static void airoha_fe_maccr_init(struct airoha_eth *eth)
+{
+ int p;
+
+ for (p = 1; p <= ARRAY_SIZE(eth->ports); p++) {
+ airoha_fe_set(eth, REG_GDM_FWD_CFG(p),
+ GDM_TCP_CKSUM | GDM_UDP_CKSUM | GDM_IP4_CKSUM |
+ GDM_DROP_CRC_ERR);
+ airoha_set_gdm_port_fwd_cfg(eth, REG_GDM_FWD_CFG(p),
+ FE_PSE_PORT_CDM1);
+ airoha_fe_rmw(eth, REG_GDM_LEN_CFG(p),
+ GDM_SHORT_LEN_MASK | GDM_LONG_LEN_MASK,
+ FIELD_PREP(GDM_SHORT_LEN_MASK, 60) |
+ FIELD_PREP(GDM_LONG_LEN_MASK, 4004));
+ }
+
+ airoha_fe_rmw(eth, REG_CDM1_VLAN_CTRL, CDM1_VLAN_MASK,
+ FIELD_PREP(CDM1_VLAN_MASK, 0x8100));
+
+ airoha_fe_set(eth, REG_FE_CPORT_CFG, FE_CPORT_PAD);
+}
+
+static void airoha_fe_vip_setup(struct airoha_eth *eth)
+{
+ airoha_fe_wr(eth, REG_FE_VIP_PATN(3), ETH_P_PPP_DISC);
+ airoha_fe_wr(eth, REG_FE_VIP_EN(3), PATN_FCPU_EN_MASK | PATN_EN_MASK);
+
+ airoha_fe_wr(eth, REG_FE_VIP_PATN(4), PPP_LCP);
+ airoha_fe_wr(eth, REG_FE_VIP_EN(4),
+ PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) |
+ PATN_EN_MASK);
+
+ airoha_fe_wr(eth, REG_FE_VIP_PATN(6), PPP_IPCP);
+ airoha_fe_wr(eth, REG_FE_VIP_EN(6),
+ PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) |
+ PATN_EN_MASK);
+
+ airoha_fe_wr(eth, REG_FE_VIP_PATN(7), PPP_CHAP);
+ airoha_fe_wr(eth, REG_FE_VIP_EN(7),
+ PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) |
+ PATN_EN_MASK);
+
+ /* BOOTP (0x43) */
+ airoha_fe_wr(eth, REG_FE_VIP_PATN(8), 0x43);
+ airoha_fe_wr(eth, REG_FE_VIP_EN(8),
+ PATN_FCPU_EN_MASK | PATN_SP_EN_MASK |
+ FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK);
+
+ /* BOOTP (0x44) */
+ airoha_fe_wr(eth, REG_FE_VIP_PATN(9), 0x44);
+ airoha_fe_wr(eth, REG_FE_VIP_EN(9),
+ PATN_FCPU_EN_MASK | PATN_SP_EN_MASK |
+ FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK);
+
+ /* ISAKMP */
+ airoha_fe_wr(eth, REG_FE_VIP_PATN(10), 0x1f401f4);
+ airoha_fe_wr(eth, REG_FE_VIP_EN(10),
+ PATN_FCPU_EN_MASK | PATN_DP_EN_MASK | PATN_SP_EN_MASK |
+ FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK);
+
+ airoha_fe_wr(eth, REG_FE_VIP_PATN(11), PPP_IPV6CP);
+ airoha_fe_wr(eth, REG_FE_VIP_EN(11),
+ PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) |
+ PATN_EN_MASK);
+
+ /* DHCPv6 */
+ airoha_fe_wr(eth, REG_FE_VIP_PATN(12), 0x2220223);
+ airoha_fe_wr(eth, REG_FE_VIP_EN(12),
+ PATN_FCPU_EN_MASK | PATN_DP_EN_MASK | PATN_SP_EN_MASK |
+ FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK);
+
+ airoha_fe_wr(eth, REG_FE_VIP_PATN(19), PPP_PAP);
+ airoha_fe_wr(eth, REG_FE_VIP_EN(19),
+ PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) |
+ PATN_EN_MASK);
+
+ /* ETH->ETH_P_1905 (0x893a) */
+ airoha_fe_wr(eth, REG_FE_VIP_PATN(20), 0x893a);
+ airoha_fe_wr(eth, REG_FE_VIP_EN(20),
+ PATN_FCPU_EN_MASK | PATN_EN_MASK);
+
+ airoha_fe_wr(eth, REG_FE_VIP_PATN(21), ETH_P_LLDP);
+ airoha_fe_wr(eth, REG_FE_VIP_EN(21),
+ PATN_FCPU_EN_MASK | PATN_EN_MASK);
+}
+
+static u32 airoha_fe_get_pse_queue_rsv_pages(struct airoha_eth *eth,
+ u32 port, u32 queue)
+{
+ u32 val;
+
+ airoha_fe_rmw(eth, REG_FE_PSE_QUEUE_CFG_WR,
+ PSE_CFG_PORT_ID_MASK | PSE_CFG_QUEUE_ID_MASK,
+ FIELD_PREP(PSE_CFG_PORT_ID_MASK, port) |
+ FIELD_PREP(PSE_CFG_QUEUE_ID_MASK, queue));
+ val = airoha_fe_rr(eth, REG_FE_PSE_QUEUE_CFG_VAL);
+
+ return FIELD_GET(PSE_CFG_OQ_RSV_MASK, val);
+}
+
+static void airoha_fe_set_pse_queue_rsv_pages(struct airoha_eth *eth,
+ u32 port, u32 queue, u32 val)
+{
+ airoha_fe_rmw(eth, REG_FE_PSE_QUEUE_CFG_VAL, PSE_CFG_OQ_RSV_MASK,
+ FIELD_PREP(PSE_CFG_OQ_RSV_MASK, val));
+ airoha_fe_rmw(eth, REG_FE_PSE_QUEUE_CFG_WR,
+ PSE_CFG_PORT_ID_MASK | PSE_CFG_QUEUE_ID_MASK |
+ PSE_CFG_WR_EN_MASK | PSE_CFG_OQRSV_SEL_MASK,
+ FIELD_PREP(PSE_CFG_PORT_ID_MASK, port) |
+ FIELD_PREP(PSE_CFG_QUEUE_ID_MASK, queue) |
+ PSE_CFG_WR_EN_MASK | PSE_CFG_OQRSV_SEL_MASK);
+}
+
+static int airoha_fe_set_pse_oq_rsv(struct airoha_eth *eth,
+ u32 port, u32 queue, u32 val)
+{
+ u32 orig_val, tmp, all_rsv, fq_limit;
+
+ airoha_fe_set_pse_queue_rsv_pages(eth, port, queue, val);
+
+ /* modify all rsv */
+ orig_val = airoha_fe_get_pse_queue_rsv_pages(eth, port, queue);
+ tmp = airoha_fe_rr(eth, REG_FE_PSE_BUF_SET);
+ all_rsv = FIELD_GET(PSE_ALLRSV_MASK, tmp);
+ all_rsv += (val - orig_val);
+ airoha_fe_rmw(eth, REG_FE_PSE_BUF_SET, PSE_ALLRSV_MASK,
+ FIELD_PREP(PSE_ALLRSV_MASK, all_rsv));
+
+ /* modify hthd */
+ tmp = airoha_fe_rr(eth, PSE_FQ_CFG);
+ fq_limit = FIELD_GET(PSE_FQ_LIMIT_MASK, tmp);
+ tmp = fq_limit - all_rsv - 0x20;
+ airoha_fe_rmw(eth, REG_PSE_SHARE_USED_THD,
+ PSE_SHARE_USED_HTHD_MASK,
+ FIELD_PREP(PSE_SHARE_USED_HTHD_MASK, tmp));
+
+ tmp = fq_limit - all_rsv - 0x100;
+ airoha_fe_rmw(eth, REG_PSE_SHARE_USED_THD,
+ PSE_SHARE_USED_MTHD_MASK,
+ FIELD_PREP(PSE_SHARE_USED_MTHD_MASK, tmp));
+ tmp = (3 * tmp) >> 2;
+ airoha_fe_rmw(eth, REG_FE_PSE_BUF_SET,
+ PSE_SHARE_USED_LTHD_MASK,
+ FIELD_PREP(PSE_SHARE_USED_LTHD_MASK, tmp));
+
+ return 0;
+}
+
+static void airoha_fe_pse_ports_init(struct airoha_eth *eth)
+{
+ const u32 pse_port_num_queues[] = {
+ [FE_PSE_PORT_CDM1] = 6,
+ [FE_PSE_PORT_GDM1] = 6,
+ [FE_PSE_PORT_GDM2] = 32,
+ [FE_PSE_PORT_GDM3] = 6,
+ [FE_PSE_PORT_PPE1] = 4,
+ [FE_PSE_PORT_CDM2] = 6,
+ [FE_PSE_PORT_CDM3] = 8,
+ [FE_PSE_PORT_CDM4] = 10,
+ [FE_PSE_PORT_PPE2] = 4,
+ [FE_PSE_PORT_GDM4] = 2,
+ [FE_PSE_PORT_CDM5] = 2,
+ };
+ int q;
+
+ /* hw misses PPE2 oq rsv */
+ airoha_fe_set(eth, REG_FE_PSE_BUF_SET,
+ PSE_RSV_PAGES * pse_port_num_queues[FE_PSE_PORT_PPE2]);
+
+ /* CMD1 */
+ for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM1]; q++)
+ airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM1, q,
+ PSE_QUEUE_RSV_PAGES);
+ /* GMD1 */
+ for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_GDM1]; q++)
+ airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM1, q,
+ PSE_QUEUE_RSV_PAGES);
+ /* GMD2 */
+ for (q = 6; q < pse_port_num_queues[FE_PSE_PORT_GDM2]; q++)
+ airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM2, q, 0);
+ /* GMD3 */
+ for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_GDM3]; q++)
+ airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM3, q,
+ PSE_QUEUE_RSV_PAGES);
+ /* PPE1 */
+ for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_PPE1]; q++) {
+ if (q < pse_port_num_queues[FE_PSE_PORT_PPE1])
+ airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE1, q,
+ PSE_QUEUE_RSV_PAGES);
+ else
+ airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE1, q, 0);
+ }
+ /* CDM2 */
+ for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM2]; q++)
+ airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM2, q,
+ PSE_QUEUE_RSV_PAGES);
+ /* CDM3 */
+ for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM3] - 1; q++)
+ airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM3, q, 0);
+ /* CDM4 */
+ for (q = 4; q < pse_port_num_queues[FE_PSE_PORT_CDM4]; q++)
+ airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM4, q,
+ PSE_QUEUE_RSV_PAGES);
+ /* PPE2 */
+ for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_PPE2]; q++) {
+ if (q < pse_port_num_queues[FE_PSE_PORT_PPE2] / 2)
+ airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE2, q,
+ PSE_QUEUE_RSV_PAGES);
+ else
+ airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE2, q, 0);
+ }
+ /* GMD4 */
+ for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_GDM4]; q++)
+ airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM4, q,
+ PSE_QUEUE_RSV_PAGES);
+ /* CDM5 */
+ for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM5]; q++)
+ airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM5, q,
+ PSE_QUEUE_RSV_PAGES);
+}
+
+static int airoha_fe_mc_vlan_clear(struct airoha_eth *eth)
+{
+ int i;
+
+ for (i = 0; i < AIROHA_FE_MC_MAX_VLAN_TABLE; i++) {
+ int err, j;
+ u32 val;
+
+ airoha_fe_wr(eth, REG_MC_VLAN_DATA, 0x0);
+
+ val = FIELD_PREP(MC_VLAN_CFG_TABLE_ID_MASK, i) |
+ MC_VLAN_CFG_TABLE_SEL_MASK | MC_VLAN_CFG_RW_MASK;
+ airoha_fe_wr(eth, REG_MC_VLAN_CFG, val);
+ err = read_poll_timeout(airoha_fe_rr, val,
+ val & MC_VLAN_CFG_CMD_DONE_MASK,
+ USEC_PER_MSEC, 5 * USEC_PER_MSEC,
+ false, eth, REG_MC_VLAN_CFG);
+ if (err)
+ return err;
+
+ for (j = 0; j < AIROHA_FE_MC_MAX_VLAN_PORT; j++) {
+ airoha_fe_wr(eth, REG_MC_VLAN_DATA, 0x0);
+
+ val = FIELD_PREP(MC_VLAN_CFG_TABLE_ID_MASK, i) |
+ FIELD_PREP(MC_VLAN_CFG_PORT_ID_MASK, j) |
+ MC_VLAN_CFG_RW_MASK;
+ airoha_fe_wr(eth, REG_MC_VLAN_CFG, val);
+ err = read_poll_timeout(airoha_fe_rr, val,
+ val & MC_VLAN_CFG_CMD_DONE_MASK,
+ USEC_PER_MSEC,
+ 5 * USEC_PER_MSEC, false, eth,
+ REG_MC_VLAN_CFG);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static void airoha_fe_crsn_qsel_init(struct airoha_eth *eth)
+{
+ /* CDM1_CRSN_QSEL */
+ airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_22 >> 2),
+ CDM1_CRSN_QSEL_REASON_MASK(CRSN_22),
+ FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_22),
+ CDM_CRSN_QSEL_Q1));
+ airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_08 >> 2),
+ CDM1_CRSN_QSEL_REASON_MASK(CRSN_08),
+ FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_08),
+ CDM_CRSN_QSEL_Q1));
+ airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_21 >> 2),
+ CDM1_CRSN_QSEL_REASON_MASK(CRSN_21),
+ FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_21),
+ CDM_CRSN_QSEL_Q1));
+ airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_24 >> 2),
+ CDM1_CRSN_QSEL_REASON_MASK(CRSN_24),
+ FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_24),
+ CDM_CRSN_QSEL_Q6));
+ airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_25 >> 2),
+ CDM1_CRSN_QSEL_REASON_MASK(CRSN_25),
+ FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_25),
+ CDM_CRSN_QSEL_Q1));
+ /* CDM2_CRSN_QSEL */
+ airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_08 >> 2),
+ CDM2_CRSN_QSEL_REASON_MASK(CRSN_08),
+ FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_08),
+ CDM_CRSN_QSEL_Q1));
+ airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_21 >> 2),
+ CDM2_CRSN_QSEL_REASON_MASK(CRSN_21),
+ FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_21),
+ CDM_CRSN_QSEL_Q1));
+ airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_22 >> 2),
+ CDM2_CRSN_QSEL_REASON_MASK(CRSN_22),
+ FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_22),
+ CDM_CRSN_QSEL_Q1));
+ airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_24 >> 2),
+ CDM2_CRSN_QSEL_REASON_MASK(CRSN_24),
+ FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_24),
+ CDM_CRSN_QSEL_Q6));
+ airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_25 >> 2),
+ CDM2_CRSN_QSEL_REASON_MASK(CRSN_25),
+ FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_25),
+ CDM_CRSN_QSEL_Q1));
+}
+
+static int airoha_fe_init(struct airoha_eth *eth)
+{
+ airoha_fe_maccr_init(eth);
+
+ /* PSE IQ reserve */
+ airoha_fe_rmw(eth, REG_PSE_IQ_REV1, PSE_IQ_RES1_P2_MASK,
+ FIELD_PREP(PSE_IQ_RES1_P2_MASK, 0x10));
+ airoha_fe_rmw(eth, REG_PSE_IQ_REV2,
+ PSE_IQ_RES2_P5_MASK | PSE_IQ_RES2_P4_MASK,
+ FIELD_PREP(PSE_IQ_RES2_P5_MASK, 0x40) |
+ FIELD_PREP(PSE_IQ_RES2_P4_MASK, 0x34));
+
+ /* enable FE copy engine for MC/KA/DPI */
+ airoha_fe_wr(eth, REG_FE_PCE_CFG,
+ PCE_DPI_EN_MASK | PCE_KA_EN_MASK | PCE_MC_EN_MASK);
+ /* set vip queue selection to ring 1 */
+ airoha_fe_rmw(eth, REG_CDM1_FWD_CFG, CDM1_VIP_QSEL_MASK,
+ FIELD_PREP(CDM1_VIP_QSEL_MASK, 0x4));
+ airoha_fe_rmw(eth, REG_CDM2_FWD_CFG, CDM2_VIP_QSEL_MASK,
+ FIELD_PREP(CDM2_VIP_QSEL_MASK, 0x4));
+ /* set GDM4 source interface offset to 8 */
+ airoha_fe_rmw(eth, REG_GDM4_SRC_PORT_SET,
+ GDM4_SPORT_OFF2_MASK |
+ GDM4_SPORT_OFF1_MASK |
+ GDM4_SPORT_OFF0_MASK,
+ FIELD_PREP(GDM4_SPORT_OFF2_MASK, 8) |
+ FIELD_PREP(GDM4_SPORT_OFF1_MASK, 8) |
+ FIELD_PREP(GDM4_SPORT_OFF0_MASK, 8));
+
+ /* set PSE Page as 128B */
+ airoha_fe_rmw(eth, REG_FE_DMA_GLO_CFG,
+ FE_DMA_GLO_L2_SPACE_MASK | FE_DMA_GLO_PG_SZ_MASK,
+ FIELD_PREP(FE_DMA_GLO_L2_SPACE_MASK, 2) |
+ FE_DMA_GLO_PG_SZ_MASK);
+ airoha_fe_wr(eth, REG_FE_RST_GLO_CFG,
+ FE_RST_CORE_MASK | FE_RST_GDM3_MBI_ARB_MASK |
+ FE_RST_GDM4_MBI_ARB_MASK);
+ usleep_range(1000, 2000);
+
+ /* connect RxRing1 and RxRing15 to PSE Port0 OQ-1
+ * connect other rings to PSE Port0 OQ-0
+ */
+ airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP0, BIT(4));
+ airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP1, BIT(28));
+ airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP2, BIT(4));
+ airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP3, BIT(28));
+
+ airoha_fe_vip_setup(eth);
+ airoha_fe_pse_ports_init(eth);
+
+ airoha_fe_set(eth, REG_GDM_MISC_CFG,
+ GDM2_RDM_ACK_WAIT_PREF_MASK |
+ GDM2_CHN_VLD_MODE_MASK);
+ airoha_fe_rmw(eth, REG_CDM2_FWD_CFG, CDM2_OAM_QSEL_MASK, 15);
+
+ /* init fragment and assemble Force Port */
+ /* NPU Core-3, NPU Bridge Channel-3 */
+ airoha_fe_rmw(eth, REG_IP_FRAG_FP,
+ IP_FRAGMENT_PORT_MASK | IP_FRAGMENT_NBQ_MASK,
+ FIELD_PREP(IP_FRAGMENT_PORT_MASK, 6) |
+ FIELD_PREP(IP_FRAGMENT_NBQ_MASK, 3));
+ /* QDMA LAN, RX Ring-22 */
+ airoha_fe_rmw(eth, REG_IP_FRAG_FP,
+ IP_ASSEMBLE_PORT_MASK | IP_ASSEMBLE_NBQ_MASK,
+ FIELD_PREP(IP_ASSEMBLE_PORT_MASK, 0) |
+ FIELD_PREP(IP_ASSEMBLE_NBQ_MASK, 22));
+
+ airoha_fe_set(eth, REG_GDM3_FWD_CFG, GDM3_PAD_EN_MASK);
+ airoha_fe_set(eth, REG_GDM4_FWD_CFG, GDM4_PAD_EN_MASK);
+
+ airoha_fe_crsn_qsel_init(eth);
+
+ airoha_fe_clear(eth, REG_FE_CPORT_CFG, FE_CPORT_QUEUE_XFC_MASK);
+ airoha_fe_set(eth, REG_FE_CPORT_CFG, FE_CPORT_PORT_XFC_MASK);
+
+ /* default aging mode for mbi unlock issue */
+ airoha_fe_rmw(eth, REG_GDM2_CHN_RLS,
+ MBI_RX_AGE_SEL_MASK | MBI_TX_AGE_SEL_MASK,
+ FIELD_PREP(MBI_RX_AGE_SEL_MASK, 3) |
+ FIELD_PREP(MBI_TX_AGE_SEL_MASK, 3));
+
+ /* disable IFC by default */
+ airoha_fe_clear(eth, REG_FE_CSR_IFC_CFG, FE_IFC_EN_MASK);
+
+ /* enable 1:N vlan action, init vlan table */
+ airoha_fe_set(eth, REG_MC_VLAN_EN, MC_VLAN_EN_MASK);
+
+ return airoha_fe_mc_vlan_clear(eth);
+}
+
+static int airoha_qdma_fill_rx_queue(struct airoha_queue *q)
+{
+ enum dma_data_direction dir = page_pool_get_dma_dir(q->page_pool);
+ struct airoha_eth *eth = q->eth;
+ int qid = q - &eth->q_rx[0];
+ int nframes = 0;
+
+ while (q->queued < q->ndesc - 1) {
+ struct airoha_queue_entry *e = &q->entry[q->head];
+ struct airoha_qdma_desc *desc = &q->desc[q->head];
+ struct page *page;
+ int offset;
+ u32 val;
+
+ page = page_pool_dev_alloc_frag(q->page_pool, &offset,
+ q->buf_size);
+ if (!page)
+ break;
+
+ q->head = (q->head + 1) % q->ndesc;
+ q->queued++;
+ nframes++;
+
+ e->buf = page_address(page) + offset;
+ e->dma_addr = page_pool_get_dma_addr(page) + offset;
+ e->dma_len = SKB_WITH_OVERHEAD(q->buf_size);
+
+ dma_sync_single_for_device(eth->dev, e->dma_addr, e->dma_len,
+ dir);
+
+ val = FIELD_PREP(QDMA_DESC_LEN_MASK, e->dma_len);
+ WRITE_ONCE(desc->ctrl, cpu_to_le32(val));
+ WRITE_ONCE(desc->addr, cpu_to_le32(e->dma_addr));
+ val = FIELD_PREP(QDMA_DESC_NEXT_ID_MASK, q->head);
+ WRITE_ONCE(desc->data, cpu_to_le32(val));
+ WRITE_ONCE(desc->msg0, 0);
+ WRITE_ONCE(desc->msg1, 0);
+ WRITE_ONCE(desc->msg2, 0);
+ WRITE_ONCE(desc->msg3, 0);
+
+ airoha_qdma_rmw(eth, REG_RX_CPU_IDX(qid), RX_RING_CPU_IDX_MASK,
+ FIELD_PREP(RX_RING_CPU_IDX_MASK, q->head));
+ }
+
+ return nframes;
+}
+
+static int airoha_qdma_get_gdm_port(struct airoha_eth *eth,
+ struct airoha_qdma_desc *desc)
+{
+ u32 port, sport, msg1 = le32_to_cpu(desc->msg1);
+
+ sport = FIELD_GET(QDMA_ETH_RXMSG_SPORT_MASK, msg1);
+ switch (sport) {
+ case 0x10 ... 0x13:
+ port = 0;
+ break;
+ case 0x2 ... 0x4:
+ port = sport - 1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return port >= ARRAY_SIZE(eth->ports) ? -EINVAL : port;
+}
+
+static int airoha_qdma_rx_process(struct airoha_queue *q, int budget)
+{
+ enum dma_data_direction dir = page_pool_get_dma_dir(q->page_pool);
+ struct airoha_eth *eth = q->eth;
+ int qid = q - &eth->q_rx[0];
+ int done = 0;
+
+ while (done < budget) {
+ struct airoha_queue_entry *e = &q->entry[q->tail];
+ struct airoha_qdma_desc *desc = &q->desc[q->tail];
+ dma_addr_t dma_addr = le32_to_cpu(desc->addr);
+ u32 desc_ctrl = le32_to_cpu(desc->ctrl);
+ struct sk_buff *skb;
+ int len, p;
+
+ if (!(desc_ctrl & QDMA_DESC_DONE_MASK))
+ break;
+
+ if (!dma_addr)
+ break;
+
+ len = FIELD_GET(QDMA_DESC_LEN_MASK, desc_ctrl);
+ if (!len)
+ break;
+
+ q->tail = (q->tail + 1) % q->ndesc;
+ q->queued--;
+
+ dma_sync_single_for_cpu(eth->dev, dma_addr,
+ SKB_WITH_OVERHEAD(q->buf_size), dir);
+
+ p = airoha_qdma_get_gdm_port(eth, desc);
+ if (p < 0 || !eth->ports[p]) {
+ page_pool_put_full_page(q->page_pool,
+ virt_to_head_page(e->buf),
+ true);
+ continue;
+ }
+
+ skb = napi_build_skb(e->buf, q->buf_size);
+ if (!skb) {
+ page_pool_put_full_page(q->page_pool,
+ virt_to_head_page(e->buf),
+ true);
+ break;
+ }
+
+ skb_reserve(skb, 2);
+ __skb_put(skb, len);
+ skb_mark_for_recycle(skb);
+ skb->dev = eth->ports[p]->dev;
+ skb->protocol = eth_type_trans(skb, skb->dev);
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb_record_rx_queue(skb, qid);
+ napi_gro_receive(&q->napi, skb);
+
+ done++;
+ }
+ airoha_qdma_fill_rx_queue(q);
+
+ return done;
+}
+
+static int airoha_qdma_rx_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct airoha_queue *q = container_of(napi, struct airoha_queue, napi);
+ struct airoha_eth *eth = q->eth;
+ int cur, done = 0;
+
+ do {
+ cur = airoha_qdma_rx_process(q, budget - done);
+ done += cur;
+ } while (cur && done < budget);
+
+ if (done < budget && napi_complete(napi))
+ airoha_qdma_irq_enable(eth, QDMA_INT_REG_IDX1,
+ RX_DONE_INT_MASK);
+
+ return done;
+}
+
+static int airoha_qdma_init_rx_queue(struct airoha_eth *eth,
+ struct airoha_queue *q, int ndesc)
+{
+ const struct page_pool_params pp_params = {
+ .order = 0,
+ .pool_size = 256,
+ .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
+ .dma_dir = DMA_FROM_DEVICE,
+ .max_len = PAGE_SIZE,
+ .nid = NUMA_NO_NODE,
+ .dev = eth->dev,
+ .napi = &q->napi,
+ };
+ int qid = q - &eth->q_rx[0], thr;
+ dma_addr_t dma_addr;
+
+ q->buf_size = PAGE_SIZE / 2;
+ q->ndesc = ndesc;
+ q->eth = eth;
+
+ q->entry = devm_kzalloc(eth->dev, q->ndesc * sizeof(*q->entry),
+ GFP_KERNEL);
+ if (!q->entry)
+ return -ENOMEM;
+
+ q->page_pool = page_pool_create(&pp_params);
+ if (IS_ERR(q->page_pool)) {
+ int err = PTR_ERR(q->page_pool);
+
+ q->page_pool = NULL;
+ return err;
+ }
+
+ q->desc = dmam_alloc_coherent(eth->dev, q->ndesc * sizeof(*q->desc),
+ &dma_addr, GFP_KERNEL);
+ if (!q->desc)
+ return -ENOMEM;
+
+ netif_napi_add(eth->napi_dev, &q->napi, airoha_qdma_rx_napi_poll);
+
+ airoha_qdma_wr(eth, REG_RX_RING_BASE(qid), dma_addr);
+ airoha_qdma_rmw(eth, REG_RX_RING_SIZE(qid), RX_RING_SIZE_MASK,
+ FIELD_PREP(RX_RING_SIZE_MASK, ndesc));
+
+ thr = clamp(ndesc >> 3, 1, 32);
+ airoha_qdma_rmw(eth, REG_RX_RING_SIZE(qid), RX_RING_THR_MASK,
+ FIELD_PREP(RX_RING_THR_MASK, thr));
+ airoha_qdma_rmw(eth, REG_RX_DMA_IDX(qid), RX_RING_DMA_IDX_MASK,
+ FIELD_PREP(RX_RING_DMA_IDX_MASK, q->head));
+
+ airoha_qdma_fill_rx_queue(q);
+
+ return 0;
+}
+
+static void airoha_qdma_cleanup_rx_queue(struct airoha_queue *q)
+{
+ enum dma_data_direction dir = page_pool_get_dma_dir(q->page_pool);
+ struct airoha_eth *eth = q->eth;
+
+ while (q->queued) {
+ struct airoha_queue_entry *e = &q->entry[q->tail];
+ struct page *page = virt_to_head_page(e->buf);
+
+ dma_sync_single_for_cpu(eth->dev, e->dma_addr, e->dma_len,
+ dir);
+ page_pool_put_full_page(q->page_pool, page, false);
+ q->tail = (q->tail + 1) % q->ndesc;
+ q->queued--;
+ }
+}
+
+static int airoha_qdma_init_rx(struct airoha_eth *eth)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(eth->q_rx); i++) {
+ int err;
+
+ if (!(RX_DONE_INT_MASK & BIT(i))) {
+ /* rx-queue not binded to irq */
+ continue;
+ }
+
+ err = airoha_qdma_init_rx_queue(eth, &eth->q_rx[i],
+ RX_DSCP_NUM(i));
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int airoha_qdma_tx_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct airoha_tx_irq_queue *irq_q;
+ struct airoha_eth *eth;
+ int id, done = 0;
+
+ irq_q = container_of(napi, struct airoha_tx_irq_queue, napi);
+ eth = irq_q->eth;
+ id = irq_q - &eth->q_tx_irq[0];
+
+ while (irq_q->queued > 0 && done < budget) {
+ u32 qid, last, val = irq_q->q[irq_q->head];
+ struct airoha_queue *q;
+
+ if (val == 0xff)
+ break;
+
+ irq_q->q[irq_q->head] = 0xff; /* mark as done */
+ irq_q->head = (irq_q->head + 1) % irq_q->size;
+ irq_q->queued--;
+ done++;
+
+ last = FIELD_GET(IRQ_DESC_IDX_MASK, val);
+ qid = FIELD_GET(IRQ_RING_IDX_MASK, val);
+
+ if (qid >= ARRAY_SIZE(eth->q_tx))
+ continue;
+
+ q = &eth->q_tx[qid];
+ if (!q->ndesc)
+ continue;
+
+ spin_lock_bh(&q->lock);
+
+ while (q->queued > 0) {
+ struct airoha_qdma_desc *desc = &q->desc[q->tail];
+ struct airoha_queue_entry *e = &q->entry[q->tail];
+ u32 desc_ctrl = le32_to_cpu(desc->ctrl);
+ struct sk_buff *skb = e->skb;
+ u16 index = q->tail;
+
+ if (!(desc_ctrl & QDMA_DESC_DONE_MASK) &&
+ !(desc_ctrl & QDMA_DESC_DROP_MASK))
+ break;
+
+ q->tail = (q->tail + 1) % q->ndesc;
+ q->queued--;
+
+ dma_unmap_single(eth->dev, e->dma_addr, e->dma_len,
+ DMA_TO_DEVICE);
+
+ WRITE_ONCE(desc->msg0, 0);
+ WRITE_ONCE(desc->msg1, 0);
+
+ if (skb) {
+ struct netdev_queue *txq;
+
+ txq = netdev_get_tx_queue(skb->dev, qid);
+ if (netif_tx_queue_stopped(txq) &&
+ q->ndesc - q->queued >= q->free_thr)
+ netif_tx_wake_queue(txq);
+
+ dev_kfree_skb_any(skb);
+ e->skb = NULL;
+ }
+
+ if (index == last)
+ break;
+ }
+
+ spin_unlock_bh(&q->lock);
+ }
+
+ if (done) {
+ int i, len = done >> 7;
+
+ for (i = 0; i < len; i++)
+ airoha_qdma_rmw(eth, REG_IRQ_CLEAR_LEN(id),
+ IRQ_CLEAR_LEN_MASK, 0x80);
+ airoha_qdma_rmw(eth, REG_IRQ_CLEAR_LEN(id),
+ IRQ_CLEAR_LEN_MASK, (done & 0x7f));
+ }
+
+ if (done < budget && napi_complete(napi))
+ airoha_qdma_irq_enable(eth, QDMA_INT_REG_IDX0,
+ TX_DONE_INT_MASK(id));
+
+ return done;
+}
+
+static int airoha_qdma_init_tx_queue(struct airoha_eth *eth,
+ struct airoha_queue *q, int size)
+{
+ int i, qid = q - &eth->q_tx[0];
+ dma_addr_t dma_addr;
+
+ spin_lock_init(&q->lock);
+ q->ndesc = size;
+ q->eth = eth;
+ q->free_thr = 1 + MAX_SKB_FRAGS;
+
+ q->entry = devm_kzalloc(eth->dev, q->ndesc * sizeof(*q->entry),
+ GFP_KERNEL);
+ if (!q->entry)
+ return -ENOMEM;
+
+ q->desc = dmam_alloc_coherent(eth->dev, q->ndesc * sizeof(*q->desc),
+ &dma_addr, GFP_KERNEL);
+ if (!q->desc)
+ return -ENOMEM;
+
+ for (i = 0; i < q->ndesc; i++) {
+ u32 val;
+
+ val = FIELD_PREP(QDMA_DESC_DONE_MASK, 1);
+ WRITE_ONCE(q->desc[i].ctrl, cpu_to_le32(val));
+ }
+
+ airoha_qdma_wr(eth, REG_TX_RING_BASE(qid), dma_addr);
+ airoha_qdma_rmw(eth, REG_TX_CPU_IDX(qid), TX_RING_CPU_IDX_MASK,
+ FIELD_PREP(TX_RING_CPU_IDX_MASK, q->head));
+ airoha_qdma_rmw(eth, REG_TX_DMA_IDX(qid), TX_RING_DMA_IDX_MASK,
+ FIELD_PREP(TX_RING_DMA_IDX_MASK, q->head));
+
+ return 0;
+}
+
+static int airoha_qdma_tx_irq_init(struct airoha_eth *eth,
+ struct airoha_tx_irq_queue *irq_q,
+ int size)
+{
+ int id = irq_q - &eth->q_tx_irq[0];
+ dma_addr_t dma_addr;
+
+ netif_napi_add_tx(eth->napi_dev, &irq_q->napi,
+ airoha_qdma_tx_napi_poll);
+ irq_q->q = dmam_alloc_coherent(eth->dev, size * sizeof(u32),
+ &dma_addr, GFP_KERNEL);
+ if (!irq_q->q)
+ return -ENOMEM;
+
+ memset(irq_q->q, 0xff, size * sizeof(u32));
+ irq_q->size = size;
+ irq_q->eth = eth;
+
+ airoha_qdma_wr(eth, REG_TX_IRQ_BASE(id), dma_addr);
+ airoha_qdma_rmw(eth, REG_TX_IRQ_CFG(id), TX_IRQ_DEPTH_MASK,
+ FIELD_PREP(TX_IRQ_DEPTH_MASK, size));
+ airoha_qdma_rmw(eth, REG_TX_IRQ_CFG(id), TX_IRQ_THR_MASK,
+ FIELD_PREP(TX_IRQ_THR_MASK, 1));
+
+ return 0;
+}
+
+static int airoha_qdma_init_tx(struct airoha_eth *eth)
+{
+ int i, err;
+
+ for (i = 0; i < ARRAY_SIZE(eth->q_tx_irq); i++) {
+ err = airoha_qdma_tx_irq_init(eth, &eth->q_tx_irq[i],
+ IRQ_QUEUE_LEN(i));
+ if (err)
+ return err;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(eth->q_tx); i++) {
+ err = airoha_qdma_init_tx_queue(eth, &eth->q_tx[i],
+ TX_DSCP_NUM);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static void airoha_qdma_cleanup_tx_queue(struct airoha_queue *q)
+{
+ struct airoha_eth *eth = q->eth;
+
+ spin_lock_bh(&q->lock);
+ while (q->queued) {
+ struct airoha_queue_entry *e = &q->entry[q->tail];
+
+ dma_unmap_single(eth->dev, e->dma_addr, e->dma_len,
+ DMA_TO_DEVICE);
+ dev_kfree_skb_any(e->skb);
+ e->skb = NULL;
+
+ q->tail = (q->tail + 1) % q->ndesc;
+ q->queued--;
+ }
+ spin_unlock_bh(&q->lock);
+}
+
+static int airoha_qdma_init_hfwd_queues(struct airoha_eth *eth)
+{
+ dma_addr_t dma_addr;
+ u32 status;
+ int size;
+
+ size = HW_DSCP_NUM * sizeof(struct airoha_qdma_fwd_desc);
+ eth->hfwd.desc = dmam_alloc_coherent(eth->dev, size, &dma_addr,
+ GFP_KERNEL);
+ if (!eth->hfwd.desc)
+ return -ENOMEM;
+
+ airoha_qdma_wr(eth, REG_FWD_DSCP_BASE, dma_addr);
+
+ size = AIROHA_MAX_PACKET_SIZE * HW_DSCP_NUM;
+ eth->hfwd.q = dmam_alloc_coherent(eth->dev, size, &dma_addr,
+ GFP_KERNEL);
+ if (!eth->hfwd.q)
+ return -ENOMEM;
+
+ airoha_qdma_wr(eth, REG_FWD_BUF_BASE, dma_addr);
+
+ airoha_qdma_rmw(eth, REG_HW_FWD_DSCP_CFG,
+ HW_FWD_DSCP_PAYLOAD_SIZE_MASK,
+ FIELD_PREP(HW_FWD_DSCP_PAYLOAD_SIZE_MASK, 0));
+ airoha_qdma_rmw(eth, REG_FWD_DSCP_LOW_THR, FWD_DSCP_LOW_THR_MASK,
+ FIELD_PREP(FWD_DSCP_LOW_THR_MASK, 128));
+ airoha_qdma_rmw(eth, REG_LMGR_INIT_CFG,
+ LMGR_INIT_START | LMGR_SRAM_MODE_MASK |
+ HW_FWD_DESC_NUM_MASK,
+ FIELD_PREP(HW_FWD_DESC_NUM_MASK, HW_DSCP_NUM) |
+ LMGR_INIT_START);
+
+ return read_poll_timeout(airoha_qdma_rr, status,
+ !(status & LMGR_INIT_START), USEC_PER_MSEC,
+ 30 * USEC_PER_MSEC, true, eth,
+ REG_LMGR_INIT_CFG);
+}
+
+static void airoha_qdma_init_qos(struct airoha_eth *eth)
+{
+ airoha_qdma_clear(eth, REG_TXWRR_MODE_CFG, TWRR_WEIGHT_SCALE_MASK);
+ airoha_qdma_set(eth, REG_TXWRR_MODE_CFG, TWRR_WEIGHT_BASE_MASK);
+
+ airoha_qdma_clear(eth, REG_PSE_BUF_USAGE_CFG,
+ PSE_BUF_ESTIMATE_EN_MASK);
+
+ airoha_qdma_set(eth, REG_EGRESS_RATE_METER_CFG,
+ EGRESS_RATE_METER_EN_MASK |
+ EGRESS_RATE_METER_EQ_RATE_EN_MASK);
+ /* 2047us x 31 = 63.457ms */
+ airoha_qdma_rmw(eth, REG_EGRESS_RATE_METER_CFG,
+ EGRESS_RATE_METER_WINDOW_SZ_MASK,
+ FIELD_PREP(EGRESS_RATE_METER_WINDOW_SZ_MASK, 0x1f));
+ airoha_qdma_rmw(eth, REG_EGRESS_RATE_METER_CFG,
+ EGRESS_RATE_METER_TIMESLICE_MASK,
+ FIELD_PREP(EGRESS_RATE_METER_TIMESLICE_MASK, 0x7ff));
+
+ /* ratelimit init */
+ airoha_qdma_set(eth, REG_GLB_TRTCM_CFG, GLB_TRTCM_EN_MASK);
+ /* fast-tick 25us */
+ airoha_qdma_rmw(eth, REG_GLB_TRTCM_CFG, GLB_FAST_TICK_MASK,
+ FIELD_PREP(GLB_FAST_TICK_MASK, 25));
+ airoha_qdma_rmw(eth, REG_GLB_TRTCM_CFG, GLB_SLOW_TICK_RATIO_MASK,
+ FIELD_PREP(GLB_SLOW_TICK_RATIO_MASK, 40));
+
+ airoha_qdma_set(eth, REG_EGRESS_TRTCM_CFG, EGRESS_TRTCM_EN_MASK);
+ airoha_qdma_rmw(eth, REG_EGRESS_TRTCM_CFG, EGRESS_FAST_TICK_MASK,
+ FIELD_PREP(EGRESS_FAST_TICK_MASK, 25));
+ airoha_qdma_rmw(eth, REG_EGRESS_TRTCM_CFG,
+ EGRESS_SLOW_TICK_RATIO_MASK,
+ FIELD_PREP(EGRESS_SLOW_TICK_RATIO_MASK, 40));
+
+ airoha_qdma_set(eth, REG_INGRESS_TRTCM_CFG, INGRESS_TRTCM_EN_MASK);
+ airoha_qdma_clear(eth, REG_INGRESS_TRTCM_CFG,
+ INGRESS_TRTCM_MODE_MASK);
+ airoha_qdma_rmw(eth, REG_INGRESS_TRTCM_CFG, INGRESS_FAST_TICK_MASK,
+ FIELD_PREP(INGRESS_FAST_TICK_MASK, 125));
+ airoha_qdma_rmw(eth, REG_INGRESS_TRTCM_CFG,
+ INGRESS_SLOW_TICK_RATIO_MASK,
+ FIELD_PREP(INGRESS_SLOW_TICK_RATIO_MASK, 8));
+
+ airoha_qdma_set(eth, REG_SLA_TRTCM_CFG, SLA_TRTCM_EN_MASK);
+ airoha_qdma_rmw(eth, REG_SLA_TRTCM_CFG, SLA_FAST_TICK_MASK,
+ FIELD_PREP(SLA_FAST_TICK_MASK, 25));
+ airoha_qdma_rmw(eth, REG_SLA_TRTCM_CFG, SLA_SLOW_TICK_RATIO_MASK,
+ FIELD_PREP(SLA_SLOW_TICK_RATIO_MASK, 40));
+}
+
+static int airoha_qdma_hw_init(struct airoha_eth *eth)
+{
+ int i;
+
+ /* clear pending irqs */
+ for (i = 0; i < ARRAY_SIZE(eth->irqmask); i++)
+ airoha_qdma_wr(eth, REG_INT_STATUS(i), 0xffffffff);
+
+ /* setup irqs */
+ airoha_qdma_irq_enable(eth, QDMA_INT_REG_IDX0, INT_IDX0_MASK);
+ airoha_qdma_irq_enable(eth, QDMA_INT_REG_IDX1, INT_IDX1_MASK);
+ airoha_qdma_irq_enable(eth, QDMA_INT_REG_IDX4, INT_IDX4_MASK);
+
+ /* setup irq binding */
+ for (i = 0; i < ARRAY_SIZE(eth->q_tx); i++) {
+ if (!eth->q_tx[i].ndesc)
+ continue;
+
+ if (TX_RING_IRQ_BLOCKING_MAP_MASK & BIT(i))
+ airoha_qdma_set(eth, REG_TX_RING_BLOCKING(i),
+ TX_RING_IRQ_BLOCKING_CFG_MASK);
+ else
+ airoha_qdma_clear(eth, REG_TX_RING_BLOCKING(i),
+ TX_RING_IRQ_BLOCKING_CFG_MASK);
+ }
+
+ airoha_qdma_wr(eth, REG_QDMA_GLOBAL_CFG,
+ GLOBAL_CFG_RX_2B_OFFSET_MASK |
+ FIELD_PREP(GLOBAL_CFG_DMA_PREFERENCE_MASK, 3) |
+ GLOBAL_CFG_CPU_TXR_RR_MASK |
+ GLOBAL_CFG_PAYLOAD_BYTE_SWAP_MASK |
+ GLOBAL_CFG_MULTICAST_MODIFY_FP_MASK |
+ GLOBAL_CFG_MULTICAST_EN_MASK |
+ GLOBAL_CFG_IRQ0_EN_MASK | GLOBAL_CFG_IRQ1_EN_MASK |
+ GLOBAL_CFG_TX_WB_DONE_MASK |
+ FIELD_PREP(GLOBAL_CFG_MAX_ISSUE_NUM_MASK, 2));
+
+ airoha_qdma_init_qos(eth);
+
+ /* disable qdma rx delay interrupt */
+ for (i = 0; i < ARRAY_SIZE(eth->q_rx); i++) {
+ if (!eth->q_rx[i].ndesc)
+ continue;
+
+ airoha_qdma_clear(eth, REG_RX_DELAY_INT_IDX(i),
+ RX_DELAY_INT_MASK);
+ }
+
+ airoha_qdma_set(eth, REG_TXQ_CNGST_CFG,
+ TXQ_CNGST_DROP_EN | TXQ_CNGST_DEI_DROP_EN);
+
+ return 0;
+}
+
+static irqreturn_t airoha_irq_handler(int irq, void *dev_instance)
+{
+ struct airoha_eth *eth = dev_instance;
+ u32 intr[ARRAY_SIZE(eth->irqmask)];
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(eth->irqmask); i++) {
+ intr[i] = airoha_qdma_rr(eth, REG_INT_STATUS(i));
+ intr[i] &= eth->irqmask[i];
+ airoha_qdma_wr(eth, REG_INT_STATUS(i), intr[i]);
+ }
+
+ if (!test_bit(DEV_STATE_INITIALIZED, &eth->state))
+ return IRQ_NONE;
+
+ if (intr[1] & RX_DONE_INT_MASK) {
+ airoha_qdma_irq_disable(eth, QDMA_INT_REG_IDX1,
+ RX_DONE_INT_MASK);
+
+ for (i = 0; i < ARRAY_SIZE(eth->q_rx); i++) {
+ if (!eth->q_rx[i].ndesc)
+ continue;
+
+ if (intr[1] & BIT(i))
+ napi_schedule(&eth->q_rx[i].napi);
+ }
+ }
+
+ if (intr[0] & INT_TX_MASK) {
+ for (i = 0; i < ARRAY_SIZE(eth->q_tx_irq); i++) {
+ struct airoha_tx_irq_queue *irq_q = &eth->q_tx_irq[i];
+ u32 status, head;
+
+ if (!(intr[0] & TX_DONE_INT_MASK(i)))
+ continue;
+
+ airoha_qdma_irq_disable(eth, QDMA_INT_REG_IDX0,
+ TX_DONE_INT_MASK(i));
+
+ status = airoha_qdma_rr(eth, REG_IRQ_STATUS(i));
+ head = FIELD_GET(IRQ_HEAD_IDX_MASK, status);
+ irq_q->head = head % irq_q->size;
+ irq_q->queued = FIELD_GET(IRQ_ENTRY_LEN_MASK, status);
+
+ napi_schedule(&eth->q_tx_irq[i].napi);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int airoha_qdma_init(struct airoha_eth *eth)
+{
+ int err;
+
+ err = devm_request_irq(eth->dev, eth->irq, airoha_irq_handler,
+ IRQF_SHARED, KBUILD_MODNAME, eth);
+ if (err)
+ return err;
+
+ err = airoha_qdma_init_rx(eth);
+ if (err)
+ return err;
+
+ err = airoha_qdma_init_tx(eth);
+ if (err)
+ return err;
+
+ err = airoha_qdma_init_hfwd_queues(eth);
+ if (err)
+ return err;
+
+ err = airoha_qdma_hw_init(eth);
+ if (err)
+ return err;
+
+ set_bit(DEV_STATE_INITIALIZED, &eth->state);
+
+ return 0;
+}
+
+static int airoha_hw_init(struct airoha_eth *eth)
+{
+ int err;
+
+ /* disable xsi */
+ reset_control_bulk_assert(ARRAY_SIZE(eth->xsi_rsts), eth->xsi_rsts);
+
+ reset_control_bulk_assert(ARRAY_SIZE(eth->rsts), eth->rsts);
+ msleep(20);
+ reset_control_bulk_deassert(ARRAY_SIZE(eth->rsts), eth->rsts);
+ msleep(20);
+
+ err = airoha_fe_init(eth);
+ if (err)
+ return err;
+
+ return airoha_qdma_init(eth);
+}
+
+static void airoha_hw_cleanup(struct airoha_eth *eth)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(eth->q_rx); i++) {
+ if (!eth->q_rx[i].ndesc)
+ continue;
+
+ napi_disable(&eth->q_rx[i].napi);
+ netif_napi_del(&eth->q_rx[i].napi);
+ airoha_qdma_cleanup_rx_queue(&eth->q_rx[i]);
+ if (eth->q_rx[i].page_pool)
+ page_pool_destroy(eth->q_rx[i].page_pool);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(eth->q_tx_irq); i++) {
+ napi_disable(&eth->q_tx_irq[i].napi);
+ netif_napi_del(&eth->q_tx_irq[i].napi);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(eth->q_tx); i++) {
+ if (!eth->q_tx[i].ndesc)
+ continue;
+
+ airoha_qdma_cleanup_tx_queue(&eth->q_tx[i]);
+ }
+}
+
+static void airoha_qdma_start_napi(struct airoha_eth *eth)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(eth->q_tx_irq); i++)
+ napi_enable(&eth->q_tx_irq[i].napi);
+
+ for (i = 0; i < ARRAY_SIZE(eth->q_rx); i++) {
+ if (!eth->q_rx[i].ndesc)
+ continue;
+
+ napi_enable(&eth->q_rx[i].napi);
+ }
+}
+
+static void airoha_update_hw_stats(struct airoha_gdm_port *port)
+{
+ struct airoha_eth *eth = port->eth;
+ u32 val, i = 0;
+
+ spin_lock(&port->stats.lock);
+ u64_stats_update_begin(&port->stats.syncp);
+
+ /* TX */
+ val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_PKT_CNT_H(port->id));
+ port->stats.tx_ok_pkts += ((u64)val << 32);
+ val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_PKT_CNT_L(port->id));
+ port->stats.tx_ok_pkts += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_BYTE_CNT_H(port->id));
+ port->stats.tx_ok_bytes += ((u64)val << 32);
+ val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_BYTE_CNT_L(port->id));
+ port->stats.tx_ok_bytes += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_DROP_CNT(port->id));
+ port->stats.tx_drops += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_BC_CNT(port->id));
+ port->stats.tx_broadcast += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_MC_CNT(port->id));
+ port->stats.tx_multicast += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_RUNT_CNT(port->id));
+ port->stats.tx_len[i] += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_E64_CNT_H(port->id));
+ port->stats.tx_len[i] += ((u64)val << 32);
+ val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_E64_CNT_L(port->id));
+ port->stats.tx_len[i++] += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L64_CNT_H(port->id));
+ port->stats.tx_len[i] += ((u64)val << 32);
+ val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L64_CNT_L(port->id));
+ port->stats.tx_len[i++] += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L127_CNT_H(port->id));
+ port->stats.tx_len[i] += ((u64)val << 32);
+ val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L127_CNT_L(port->id));
+ port->stats.tx_len[i++] += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L255_CNT_H(port->id));
+ port->stats.tx_len[i] += ((u64)val << 32);
+ val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L255_CNT_L(port->id));
+ port->stats.tx_len[i++] += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L511_CNT_H(port->id));
+ port->stats.tx_len[i] += ((u64)val << 32);
+ val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L511_CNT_L(port->id));
+ port->stats.tx_len[i++] += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L1023_CNT_H(port->id));
+ port->stats.tx_len[i] += ((u64)val << 32);
+ val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L1023_CNT_L(port->id));
+ port->stats.tx_len[i++] += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_LONG_CNT(port->id));
+ port->stats.tx_len[i++] += val;
+
+ /* RX */
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_PKT_CNT_H(port->id));
+ port->stats.rx_ok_pkts += ((u64)val << 32);
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_PKT_CNT_L(port->id));
+ port->stats.rx_ok_pkts += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_BYTE_CNT_H(port->id));
+ port->stats.rx_ok_bytes += ((u64)val << 32);
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_BYTE_CNT_L(port->id));
+ port->stats.rx_ok_bytes += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_DROP_CNT(port->id));
+ port->stats.rx_drops += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_BC_CNT(port->id));
+ port->stats.rx_broadcast += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_MC_CNT(port->id));
+ port->stats.rx_multicast += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ERROR_DROP_CNT(port->id));
+ port->stats.rx_errors += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_CRC_ERR_CNT(port->id));
+ port->stats.rx_crc_error += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_OVERFLOW_DROP_CNT(port->id));
+ port->stats.rx_over_errors += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_FRAG_CNT(port->id));
+ port->stats.rx_fragment += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_JABBER_CNT(port->id));
+ port->stats.rx_jabber += val;
+
+ i = 0;
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_RUNT_CNT(port->id));
+ port->stats.rx_len[i] += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_E64_CNT_H(port->id));
+ port->stats.rx_len[i] += ((u64)val << 32);
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_E64_CNT_L(port->id));
+ port->stats.rx_len[i++] += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L64_CNT_H(port->id));
+ port->stats.rx_len[i] += ((u64)val << 32);
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L64_CNT_L(port->id));
+ port->stats.rx_len[i++] += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L127_CNT_H(port->id));
+ port->stats.rx_len[i] += ((u64)val << 32);
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L127_CNT_L(port->id));
+ port->stats.rx_len[i++] += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L255_CNT_H(port->id));
+ port->stats.rx_len[i] += ((u64)val << 32);
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L255_CNT_L(port->id));
+ port->stats.rx_len[i++] += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L511_CNT_H(port->id));
+ port->stats.rx_len[i] += ((u64)val << 32);
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L511_CNT_L(port->id));
+ port->stats.rx_len[i++] += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L1023_CNT_H(port->id));
+ port->stats.rx_len[i] += ((u64)val << 32);
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L1023_CNT_L(port->id));
+ port->stats.rx_len[i++] += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_LONG_CNT(port->id));
+ port->stats.rx_len[i++] += val;
+
+ /* reset mib counters */
+ airoha_fe_set(eth, REG_FE_GDM_MIB_CLEAR(port->id),
+ FE_GDM_MIB_RX_CLEAR_MASK | FE_GDM_MIB_TX_CLEAR_MASK);
+
+ u64_stats_update_end(&port->stats.syncp);
+ spin_unlock(&port->stats.lock);
+}
+
+static int airoha_dev_open(struct net_device *dev)
+{
+ struct airoha_gdm_port *port = netdev_priv(dev);
+ struct airoha_eth *eth = port->eth;
+ int err;
+
+ netif_tx_start_all_queues(dev);
+ err = airoha_set_gdm_ports(eth, true);
+ if (err)
+ return err;
+
+ if (netdev_uses_dsa(dev))
+ airoha_fe_set(eth, REG_GDM_INGRESS_CFG(port->id),
+ GDM_STAG_EN_MASK);
+ else
+ airoha_fe_clear(eth, REG_GDM_INGRESS_CFG(port->id),
+ GDM_STAG_EN_MASK);
+
+ airoha_qdma_set(eth, REG_QDMA_GLOBAL_CFG, GLOBAL_CFG_TX_DMA_EN_MASK);
+ airoha_qdma_set(eth, REG_QDMA_GLOBAL_CFG, GLOBAL_CFG_RX_DMA_EN_MASK);
+
+ return 0;
+}
+
+static int airoha_dev_stop(struct net_device *dev)
+{
+ struct airoha_gdm_port *port = netdev_priv(dev);
+ struct airoha_eth *eth = port->eth;
+ int err;
+
+ netif_tx_disable(dev);
+ err = airoha_set_gdm_ports(eth, false);
+ if (err)
+ return err;
+
+ airoha_qdma_clear(eth, REG_QDMA_GLOBAL_CFG, GLOBAL_CFG_TX_DMA_EN_MASK);
+ airoha_qdma_clear(eth, REG_QDMA_GLOBAL_CFG, GLOBAL_CFG_RX_DMA_EN_MASK);
+
+ return 0;
+}
+
+static int airoha_dev_set_macaddr(struct net_device *dev, void *p)
+{
+ struct airoha_gdm_port *port = netdev_priv(dev);
+ int err;
+
+ err = eth_mac_addr(dev, p);
+ if (err)
+ return err;
+
+ airoha_set_macaddr(port->eth, dev->dev_addr);
+
+ return 0;
+}
+
+static int airoha_dev_init(struct net_device *dev)
+{
+ struct airoha_gdm_port *port = netdev_priv(dev);
+
+ airoha_set_macaddr(port->eth, dev->dev_addr);
+
+ return 0;
+}
+
+static void airoha_dev_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *storage)
+{
+ struct airoha_gdm_port *port = netdev_priv(dev);
+ unsigned int start;
+
+ airoha_update_hw_stats(port);
+ do {
+ start = u64_stats_fetch_begin(&port->stats.syncp);
+ storage->rx_packets = port->stats.rx_ok_pkts;
+ storage->tx_packets = port->stats.tx_ok_pkts;
+ storage->rx_bytes = port->stats.rx_ok_bytes;
+ storage->tx_bytes = port->stats.tx_ok_bytes;
+ storage->multicast = port->stats.rx_multicast;
+ storage->rx_errors = port->stats.rx_errors;
+ storage->rx_dropped = port->stats.rx_drops;
+ storage->tx_dropped = port->stats.tx_drops;
+ storage->rx_crc_errors = port->stats.rx_crc_error;
+ storage->rx_over_errors = port->stats.rx_over_errors;
+ } while (u64_stats_fetch_retry(&port->stats.syncp, start));
+}
+
+static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct skb_shared_info *sinfo = skb_shinfo(skb);
+ struct airoha_gdm_port *port = netdev_priv(dev);
+ u32 msg0 = 0, msg1, len = skb_headlen(skb);
+ int i, qid = skb_get_queue_mapping(skb);
+ struct airoha_eth *eth = port->eth;
+ u32 nr_frags = 1 + sinfo->nr_frags;
+ struct netdev_queue *txq;
+ struct airoha_queue *q;
+ void *data = skb->data;
+ u16 index;
+ u8 fport;
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ msg0 |= FIELD_PREP(QDMA_ETH_TXMSG_TCO_MASK, 1) |
+ FIELD_PREP(QDMA_ETH_TXMSG_UCO_MASK, 1) |
+ FIELD_PREP(QDMA_ETH_TXMSG_ICO_MASK, 1);
+
+ /* TSO: fill MSS info in tcp checksum field */
+ if (skb_is_gso(skb)) {
+ if (skb_cow_head(skb, 0))
+ goto error;
+
+ if (sinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
+ __be16 csum = cpu_to_be16(sinfo->gso_size);
+
+ tcp_hdr(skb)->check = (__force __sum16)csum;
+ msg0 |= FIELD_PREP(QDMA_ETH_TXMSG_TSO_MASK, 1);
+ }
+ }
+
+ fport = port->id == 4 ? FE_PSE_PORT_GDM4 : port->id;
+ msg1 = FIELD_PREP(QDMA_ETH_TXMSG_FPORT_MASK, fport) |
+ FIELD_PREP(QDMA_ETH_TXMSG_METER_MASK, 0x7f);
+
+ q = &eth->q_tx[qid];
+ if (WARN_ON_ONCE(!q->ndesc))
+ goto error;
+
+ spin_lock_bh(&q->lock);
+
+ txq = netdev_get_tx_queue(dev, qid);
+ if (q->queued + nr_frags > q->ndesc) {
+ /* not enough space in the queue */
+ netif_tx_stop_queue(txq);
+ spin_unlock_bh(&q->lock);
+ return NETDEV_TX_BUSY;
+ }
+
+ index = q->head;
+ for (i = 0; i < nr_frags; i++) {
+ struct airoha_qdma_desc *desc = &q->desc[index];
+ struct airoha_queue_entry *e = &q->entry[index];
+ skb_frag_t *frag = &sinfo->frags[i];
+ dma_addr_t addr;
+ u32 val;
+
+ addr = dma_map_single(dev->dev.parent, data, len,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev->dev.parent, addr)))
+ goto error_unmap;
+
+ index = (index + 1) % q->ndesc;
+
+ val = FIELD_PREP(QDMA_DESC_LEN_MASK, len);
+ if (i < nr_frags - 1)
+ val |= FIELD_PREP(QDMA_DESC_MORE_MASK, 1);
+ WRITE_ONCE(desc->ctrl, cpu_to_le32(val));
+ WRITE_ONCE(desc->addr, cpu_to_le32(addr));
+ val = FIELD_PREP(QDMA_DESC_NEXT_ID_MASK, index);
+ WRITE_ONCE(desc->data, cpu_to_le32(val));
+ WRITE_ONCE(desc->msg0, cpu_to_le32(msg0));
+ WRITE_ONCE(desc->msg1, cpu_to_le32(msg1));
+ WRITE_ONCE(desc->msg2, cpu_to_le32(0xffff));
+
+ e->skb = i ? NULL : skb;
+ e->dma_addr = addr;
+ e->dma_len = len;
+
+ airoha_qdma_rmw(eth, REG_TX_CPU_IDX(qid), TX_RING_CPU_IDX_MASK,
+ FIELD_PREP(TX_RING_CPU_IDX_MASK, index));
+
+ data = skb_frag_address(frag);
+ len = skb_frag_size(frag);
+ }
+
+ q->head = index;
+ q->queued += i;
+
+ skb_tx_timestamp(skb);
+ if (q->ndesc - q->queued < q->free_thr)
+ netif_tx_stop_queue(txq);
+
+ spin_unlock_bh(&q->lock);
+
+ return NETDEV_TX_OK;
+
+error_unmap:
+ for (i--; i >= 0; i++)
+ dma_unmap_single(dev->dev.parent, q->entry[i].dma_addr,
+ q->entry[i].dma_len, DMA_TO_DEVICE);
+
+ spin_unlock_bh(&q->lock);
+error:
+ dev_kfree_skb_any(skb);
+ dev->stats.tx_dropped++;
+
+ return NETDEV_TX_OK;
+}
+
+static void airoha_ethtool_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ struct airoha_gdm_port *port = netdev_priv(dev);
+ struct airoha_eth *eth = port->eth;
+
+ strscpy(info->driver, eth->dev->driver->name, sizeof(info->driver));
+ strscpy(info->bus_info, dev_name(eth->dev), sizeof(info->bus_info));
+}
+
+static void airoha_ethtool_get_mac_stats(struct net_device *dev,
+ struct ethtool_eth_mac_stats *stats)
+{
+ struct airoha_gdm_port *port = netdev_priv(dev);
+ unsigned int start;
+
+ airoha_update_hw_stats(port);
+ do {
+ start = u64_stats_fetch_begin(&port->stats.syncp);
+ stats->MulticastFramesXmittedOK = port->stats.tx_multicast;
+ stats->BroadcastFramesXmittedOK = port->stats.tx_broadcast;
+ stats->BroadcastFramesReceivedOK = port->stats.rx_broadcast;
+ } while (u64_stats_fetch_retry(&port->stats.syncp, start));
+}
+
+static const struct ethtool_rmon_hist_range airoha_ethtool_rmon_ranges[] = {
+ { 0, 64 },
+ { 65, 127 },
+ { 128, 255 },
+ { 256, 511 },
+ { 512, 1023 },
+ { 1024, 1518 },
+ { 1519, 10239 },
+ {},
+};
+
+static void
+airoha_ethtool_get_rmon_stats(struct net_device *dev,
+ struct ethtool_rmon_stats *stats,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ struct airoha_gdm_port *port = netdev_priv(dev);
+ struct airoha_hw_stats *hw_stats = &port->stats;
+ unsigned int start;
+
+ BUILD_BUG_ON(ARRAY_SIZE(airoha_ethtool_rmon_ranges) !=
+ ARRAY_SIZE(hw_stats->tx_len) + 1);
+ BUILD_BUG_ON(ARRAY_SIZE(airoha_ethtool_rmon_ranges) !=
+ ARRAY_SIZE(hw_stats->rx_len) + 1);
+
+ *ranges = airoha_ethtool_rmon_ranges;
+ airoha_update_hw_stats(port);
+ do {
+ int i;
+
+ start = u64_stats_fetch_begin(&port->stats.syncp);
+ stats->fragments = hw_stats->rx_fragment;
+ stats->jabbers = hw_stats->rx_jabber;
+ for (i = 0; i < ARRAY_SIZE(airoha_ethtool_rmon_ranges) - 1;
+ i++) {
+ stats->hist[i] = hw_stats->rx_len[i];
+ stats->hist_tx[i] = hw_stats->tx_len[i];
+ }
+ } while (u64_stats_fetch_retry(&port->stats.syncp, start));
+}
+
+static const struct net_device_ops airoha_netdev_ops = {
+ .ndo_init = airoha_dev_init,
+ .ndo_open = airoha_dev_open,
+ .ndo_stop = airoha_dev_stop,
+ .ndo_start_xmit = airoha_dev_xmit,
+ .ndo_get_stats64 = airoha_dev_get_stats64,
+ .ndo_set_mac_address = airoha_dev_set_macaddr,
+};
+
+static const struct ethtool_ops airoha_ethtool_ops = {
+ .get_drvinfo = airoha_ethtool_get_drvinfo,
+ .get_eth_mac_stats = airoha_ethtool_get_mac_stats,
+ .get_rmon_stats = airoha_ethtool_get_rmon_stats,
+};
+
+static int airoha_alloc_gdm_port(struct airoha_eth *eth, struct device_node *np)
+{
+ const __be32 *id_ptr = of_get_property(np, "reg", NULL);
+ struct airoha_gdm_port *port;
+ struct net_device *dev;
+ int err, index;
+ u32 id;
+
+ if (!id_ptr) {
+ dev_err(eth->dev, "missing gdm port id\n");
+ return -EINVAL;
+ }
+
+ id = be32_to_cpup(id_ptr);
+ index = id - 1;
+
+ if (!id || id > ARRAY_SIZE(eth->ports)) {
+ dev_err(eth->dev, "invalid gdm port id: %d\n", id);
+ return -EINVAL;
+ }
+
+ if (eth->ports[index]) {
+ dev_err(eth->dev, "duplicate gdm port id: %d\n", id);
+ return -EINVAL;
+ }
+
+ dev = devm_alloc_etherdev_mqs(eth->dev, sizeof(*port),
+ AIROHA_NUM_TX_RING, AIROHA_NUM_RX_RING);
+ if (!dev) {
+ dev_err(eth->dev, "alloc_etherdev failed\n");
+ return -ENOMEM;
+ }
+
+ dev->netdev_ops = &airoha_netdev_ops;
+ dev->ethtool_ops = &airoha_ethtool_ops;
+ dev->max_mtu = AIROHA_MAX_MTU;
+ dev->watchdog_timeo = 5 * HZ;
+ dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
+ NETIF_F_TSO6 | NETIF_F_IPV6_CSUM |
+ NETIF_F_SG | NETIF_F_TSO;
+ dev->features |= dev->hw_features;
+ dev->dev.of_node = np;
+ SET_NETDEV_DEV(dev, eth->dev);
+
+ err = of_get_ethdev_address(np, dev);
+ if (err) {
+ if (err == -EPROBE_DEFER)
+ return err;
+
+ eth_hw_addr_random(dev);
+ dev_info(eth->dev, "generated random MAC address %pM\n",
+ dev->dev_addr);
+ }
+
+ port = netdev_priv(dev);
+ u64_stats_init(&port->stats.syncp);
+ spin_lock_init(&port->stats.lock);
+ port->dev = dev;
+ port->eth = eth;
+ port->id = id;
+ eth->ports[index] = port;
+
+ return register_netdev(dev);
+}
+
+static int airoha_probe(struct platform_device *pdev)
+{
+ struct device_node *np;
+ struct airoha_eth *eth;
+ int i, err;
+
+ eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
+ if (!eth)
+ return -ENOMEM;
+
+ eth->dev = &pdev->dev;
+
+ err = dma_set_mask_and_coherent(eth->dev, DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(eth->dev, "failed configuring DMA mask\n");
+ return err;
+ }
+
+ eth->fe_regs = devm_platform_ioremap_resource_byname(pdev, "fe");
+ if (IS_ERR(eth->fe_regs))
+ return dev_err_probe(eth->dev, PTR_ERR(eth->fe_regs),
+ "failed to iomap fe regs\n");
+
+ eth->qdma_regs = devm_platform_ioremap_resource_byname(pdev, "qdma0");
+ if (IS_ERR(eth->qdma_regs))
+ return dev_err_probe(eth->dev, PTR_ERR(eth->qdma_regs),
+ "failed to iomap qdma regs\n");
+
+ eth->rsts[0].id = "fe";
+ eth->rsts[1].id = "pdma";
+ eth->rsts[2].id = "qdma";
+ err = devm_reset_control_bulk_get_exclusive(eth->dev,
+ ARRAY_SIZE(eth->rsts),
+ eth->rsts);
+ if (err) {
+ dev_err(eth->dev, "failed to get bulk reset lines\n");
+ return err;
+ }
+
+ eth->xsi_rsts[0].id = "xsi-mac";
+ eth->xsi_rsts[1].id = "hsi0-mac";
+ eth->xsi_rsts[2].id = "hsi1-mac";
+ eth->xsi_rsts[3].id = "hsi-mac";
+ eth->xsi_rsts[4].id = "xfp-mac";
+ err = devm_reset_control_bulk_get_exclusive(eth->dev,
+ ARRAY_SIZE(eth->xsi_rsts),
+ eth->xsi_rsts);
+ if (err) {
+ dev_err(eth->dev, "failed to get bulk xsi reset lines\n");
+ return err;
+ }
+
+ spin_lock_init(&eth->irq_lock);
+ eth->irq = platform_get_irq(pdev, 0);
+ if (eth->irq < 0)
+ return eth->irq;
+
+ eth->napi_dev = alloc_netdev_dummy(0);
+ if (!eth->napi_dev)
+ return -ENOMEM;
+
+ /* Enable threaded NAPI by default */
+ eth->napi_dev->threaded = true;
+ strscpy(eth->napi_dev->name, "qdma_eth", sizeof(eth->napi_dev->name));
+ platform_set_drvdata(pdev, eth);
+
+ err = airoha_hw_init(eth);
+ if (err)
+ goto error;
+
+ airoha_qdma_start_napi(eth);
+ for_each_child_of_node(pdev->dev.of_node, np) {
+ if (!of_device_is_compatible(np, "airoha,eth-mac"))
+ continue;
+
+ if (!of_device_is_available(np))
+ continue;
+
+ err = airoha_alloc_gdm_port(eth, np);
+ if (err) {
+ of_node_put(np);
+ goto error;
+ }
+ }
+
+ return 0;
+
+error:
+ airoha_hw_cleanup(eth);
+ for (i = 0; i < ARRAY_SIZE(eth->ports); i++) {
+ struct airoha_gdm_port *port = eth->ports[i];
+
+ if (port && port->dev->reg_state == NETREG_REGISTERED)
+ unregister_netdev(port->dev);
+ }
+ free_netdev(eth->napi_dev);
+ platform_set_drvdata(pdev, NULL);
+
+ return err;
+}
+
+static void airoha_remove(struct platform_device *pdev)
+{
+ struct airoha_eth *eth = platform_get_drvdata(pdev);
+ int i;
+
+ airoha_hw_cleanup(eth);
+ for (i = 0; i < ARRAY_SIZE(eth->ports); i++) {
+ struct airoha_gdm_port *port = eth->ports[i];
+
+ if (!port)
+ continue;
+
+ airoha_dev_stop(port->dev);
+ unregister_netdev(port->dev);
+ }
+ free_netdev(eth->napi_dev);
+
+ platform_set_drvdata(pdev, NULL);
+}
+
+static const struct of_device_id of_airoha_match[] = {
+ { .compatible = "airoha,en7581-eth" },
+ { /* sentinel */ }
+};
+
+static struct platform_driver airoha_driver = {
+ .probe = airoha_probe,
+ .remove_new = airoha_remove,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = of_airoha_match,
+ },
+};
+module_platform_driver(airoha_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
+MODULE_DESCRIPTION("Ethernet driver for Airoha SoC");
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index cae46290a7ae..0cc2dd85652f 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -80,7 +80,9 @@ static const struct mtk_reg_map mtk_reg_map = {
.fq_blen = 0x1b2c,
},
.gdm1_cnt = 0x2400,
- .gdma_to_ppe = 0x4444,
+ .gdma_to_ppe = {
+ [0] = 0x4444,
+ },
.ppe_base = 0x0c00,
.wdma_base = {
[0] = 0x2800,
@@ -144,7 +146,10 @@ static const struct mtk_reg_map mt7986_reg_map = {
.tx_sch_rate = 0x4798,
},
.gdm1_cnt = 0x1c00,
- .gdma_to_ppe = 0x3333,
+ .gdma_to_ppe = {
+ [0] = 0x3333,
+ [1] = 0x4444,
+ },
.ppe_base = 0x2000,
.wdma_base = {
[0] = 0x4800,
@@ -192,7 +197,11 @@ static const struct mtk_reg_map mt7988_reg_map = {
.tx_sch_rate = 0x4798,
},
.gdm1_cnt = 0x1c00,
- .gdma_to_ppe = 0x3333,
+ .gdma_to_ppe = {
+ [0] = 0x3333,
+ [1] = 0x4444,
+ [2] = 0xcccc,
+ },
.ppe_base = 0x2000,
.wdma_base = {
[0] = 0x4800,
@@ -1131,9 +1140,9 @@ static int mtk_init_fq_dma(struct mtk_eth *eth)
{
const struct mtk_soc_data *soc = eth->soc;
dma_addr_t phy_ring_tail;
- int cnt = MTK_QDMA_RING_SIZE;
+ int cnt = soc->tx.fq_dma_size;
dma_addr_t dma_addr;
- int i;
+ int i, j, len;
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM))
eth->scratch_ring = eth->sram_base;
@@ -1142,40 +1151,46 @@ static int mtk_init_fq_dma(struct mtk_eth *eth)
cnt * soc->tx.desc_size,
&eth->phy_scratch_ring,
GFP_KERNEL);
+
if (unlikely(!eth->scratch_ring))
return -ENOMEM;
- eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE, GFP_KERNEL);
- if (unlikely(!eth->scratch_head))
- return -ENOMEM;
+ phy_ring_tail = eth->phy_scratch_ring + soc->tx.desc_size * (cnt - 1);
- dma_addr = dma_map_single(eth->dma_dev,
- eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
- DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
- return -ENOMEM;
+ for (j = 0; j < DIV_ROUND_UP(soc->tx.fq_dma_size, MTK_FQ_DMA_LENGTH); j++) {
+ len = min_t(int, cnt - j * MTK_FQ_DMA_LENGTH, MTK_FQ_DMA_LENGTH);
+ eth->scratch_head[j] = kcalloc(len, MTK_QDMA_PAGE_SIZE, GFP_KERNEL);
- phy_ring_tail = eth->phy_scratch_ring + soc->tx.desc_size * (cnt - 1);
+ if (unlikely(!eth->scratch_head[j]))
+ return -ENOMEM;
- for (i = 0; i < cnt; i++) {
- dma_addr_t addr = dma_addr + i * MTK_QDMA_PAGE_SIZE;
- struct mtk_tx_dma_v2 *txd;
+ dma_addr = dma_map_single(eth->dma_dev,
+ eth->scratch_head[j], len * MTK_QDMA_PAGE_SIZE,
+ DMA_FROM_DEVICE);
- txd = eth->scratch_ring + i * soc->tx.desc_size;
- txd->txd1 = addr;
- if (i < cnt - 1)
- txd->txd2 = eth->phy_scratch_ring +
- (i + 1) * soc->tx.desc_size;
+ if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
+ return -ENOMEM;
- txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
- if (MTK_HAS_CAPS(soc->caps, MTK_36BIT_DMA))
- txd->txd3 |= TX_DMA_PREP_ADDR64(addr);
- txd->txd4 = 0;
- if (mtk_is_netsys_v2_or_greater(eth)) {
- txd->txd5 = 0;
- txd->txd6 = 0;
- txd->txd7 = 0;
- txd->txd8 = 0;
+ for (i = 0; i < cnt; i++) {
+ struct mtk_tx_dma_v2 *txd;
+
+ txd = eth->scratch_ring + (j * MTK_FQ_DMA_LENGTH + i) * soc->tx.desc_size;
+ txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE;
+ if (j * MTK_FQ_DMA_LENGTH + i < cnt)
+ txd->txd2 = eth->phy_scratch_ring +
+ (j * MTK_FQ_DMA_LENGTH + i + 1) * soc->tx.desc_size;
+
+ txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
+ if (MTK_HAS_CAPS(soc->caps, MTK_36BIT_DMA))
+ txd->txd3 |= TX_DMA_PREP_ADDR64(dma_addr + i * MTK_QDMA_PAGE_SIZE);
+
+ txd->txd4 = 0;
+ if (mtk_is_netsys_v2_or_greater(eth)) {
+ txd->txd5 = 0;
+ txd->txd6 = 0;
+ txd->txd7 = 0;
+ txd->txd8 = 0;
+ }
}
}
@@ -2009,6 +2024,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
struct mtk_rx_dma_v2 *rxd, trxd;
int done = 0, bytes = 0;
dma_addr_t dma_addr = DMA_MAPPING_ERROR;
+ int ppe_idx = 0;
while (done < budget) {
unsigned int pktlen, *rxdcsum;
@@ -2052,6 +2068,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
goto release_desc;
netdev = eth->netdev[mac];
+ ppe_idx = eth->mac[mac]->ppe_idx;
if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
goto release_desc;
@@ -2175,7 +2192,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
}
if (reason == MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
- mtk_ppe_check_skb(eth->ppe[0], skb, hash);
+ mtk_ppe_check_skb(eth->ppe[ppe_idx], skb, hash);
skb_record_rx_queue(skb, 0);
napi_gro_receive(napi, skb);
@@ -2457,7 +2474,7 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
if (MTK_HAS_CAPS(soc->caps, MTK_QDMA))
ring_size = MTK_QDMA_RING_SIZE;
else
- ring_size = MTK_DMA_SIZE;
+ ring_size = soc->tx.dma_size;
ring->buf = kcalloc(ring_size, sizeof(*ring->buf),
GFP_KERNEL);
@@ -2465,8 +2482,8 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
goto no_tx_mem;
if (MTK_HAS_CAPS(soc->caps, MTK_SRAM)) {
- ring->dma = eth->sram_base + ring_size * sz;
- ring->phys = eth->phy_scratch_ring + ring_size * (dma_addr_t)sz;
+ ring->dma = eth->sram_base + soc->tx.fq_dma_size * sz;
+ ring->phys = eth->phy_scratch_ring + soc->tx.fq_dma_size * (dma_addr_t)sz;
} else {
ring->dma = dma_alloc_coherent(eth->dma_dev, ring_size * sz,
&ring->phys, GFP_KERNEL);
@@ -2588,6 +2605,7 @@ static void mtk_tx_clean(struct mtk_eth *eth)
static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
{
const struct mtk_reg_map *reg_map = eth->soc->reg_map;
+ const struct mtk_soc_data *soc = eth->soc;
struct mtk_rx_ring *ring;
int rx_data_len, rx_dma_size, tx_ring_size;
int i;
@@ -2595,7 +2613,7 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
tx_ring_size = MTK_QDMA_RING_SIZE;
else
- tx_ring_size = MTK_DMA_SIZE;
+ tx_ring_size = soc->tx.dma_size;
if (rx_flag == MTK_RX_FLAGS_QDMA) {
if (ring_no)
@@ -2610,7 +2628,7 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
rx_dma_size = MTK_HW_LRO_DMA_SIZE;
} else {
rx_data_len = ETH_DATA_LEN;
- rx_dma_size = MTK_DMA_SIZE;
+ rx_dma_size = soc->rx.dma_size;
}
ring->frag_size = mtk_max_frag_size(rx_data_len);
@@ -3139,7 +3157,10 @@ static void mtk_dma_free(struct mtk_eth *eth)
mtk_rx_clean(eth, &eth->rx_ring[i], false);
}
- kfree(eth->scratch_head);
+ for (i = 0; i < DIV_ROUND_UP(soc->tx.fq_dma_size, MTK_FQ_DMA_LENGTH); i++) {
+ kfree(eth->scratch_head[i]);
+ eth->scratch_head[i] = NULL;
+ }
}
static bool mtk_hw_reset_check(struct mtk_eth *eth)
@@ -3266,37 +3287,27 @@ static int mtk_start_dma(struct mtk_eth *eth)
return 0;
}
-static void mtk_gdm_config(struct mtk_eth *eth, u32 config)
+static void mtk_gdm_config(struct mtk_eth *eth, u32 id, u32 config)
{
- int i;
+ u32 val;
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
return;
- for (i = 0; i < MTK_MAX_DEVS; i++) {
- u32 val;
+ val = mtk_r32(eth, MTK_GDMA_FWD_CFG(id));
- if (!eth->netdev[i])
- continue;
+ /* default setup the forward port to send frame to PDMA */
+ val &= ~0xffff;
- val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i));
+ /* Enable RX checksum */
+ val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN;
- /* default setup the forward port to send frame to PDMA */
- val &= ~0xffff;
+ val |= config;
- /* Enable RX checksum */
- val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN;
+ if (eth->netdev[id] && netdev_uses_dsa(eth->netdev[id]))
+ val |= MTK_GDMA_SPECIAL_TAG;
- val |= config;
-
- if (netdev_uses_dsa(eth->netdev[i]))
- val |= MTK_GDMA_SPECIAL_TAG;
-
- mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i));
- }
- /* Reset and enable PSE */
- mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
- mtk_w32(eth, 0, MTK_RST_GL);
+ mtk_w32(eth, val, MTK_GDMA_FWD_CFG(id));
}
@@ -3356,7 +3367,10 @@ static int mtk_open(struct net_device *dev)
{
struct mtk_mac *mac = netdev_priv(dev);
struct mtk_eth *eth = mac->hw;
- int i, err;
+ struct mtk_mac *target_mac;
+ int i, err, ppe_num;
+
+ ppe_num = eth->soc->ppe_num;
err = phylink_of_phy_connect(mac->phylink, mac->of_node, 0);
if (err) {
@@ -3380,18 +3394,38 @@ static int mtk_open(struct net_device *dev)
for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
mtk_ppe_start(eth->ppe[i]);
- gdm_config = soc->offload_version ? soc->reg_map->gdma_to_ppe
- : MTK_GDMA_TO_PDMA;
- mtk_gdm_config(eth, gdm_config);
+ for (i = 0; i < MTK_MAX_DEVS; i++) {
+ if (!eth->netdev[i])
+ continue;
+
+ target_mac = netdev_priv(eth->netdev[i]);
+ if (!soc->offload_version) {
+ target_mac->ppe_idx = 0;
+ gdm_config = MTK_GDMA_TO_PDMA;
+ } else if (ppe_num >= 3 && target_mac->id == 2) {
+ target_mac->ppe_idx = 2;
+ gdm_config = soc->reg_map->gdma_to_ppe[2];
+ } else if (ppe_num >= 2 && target_mac->id == 1) {
+ target_mac->ppe_idx = 1;
+ gdm_config = soc->reg_map->gdma_to_ppe[1];
+ } else {
+ target_mac->ppe_idx = 0;
+ gdm_config = soc->reg_map->gdma_to_ppe[0];
+ }
+ mtk_gdm_config(eth, target_mac->id, gdm_config);
+ }
+ /* Reset and enable PSE */
+ mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
+ mtk_w32(eth, 0, MTK_RST_GL);
napi_enable(&eth->tx_napi);
napi_enable(&eth->rx_napi);
mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
mtk_rx_irq_enable(eth, soc->rx.irq_done_mask);
refcount_set(&eth->dma_refcnt, 1);
- }
- else
+ } else {
refcount_inc(&eth->dma_refcnt);
+ }
phylink_start(mac->phylink);
netif_tx_start_all_queues(dev);
@@ -3468,7 +3502,8 @@ static int mtk_stop(struct net_device *dev)
if (!refcount_dec_and_test(&eth->dma_refcnt))
return 0;
- mtk_gdm_config(eth, MTK_GDMA_DROP_ALL);
+ for (i = 0; i < MTK_MAX_DEVS; i++)
+ mtk_gdm_config(eth, i, MTK_GDMA_DROP_ALL);
mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask);
@@ -4429,6 +4464,20 @@ static int mtk_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
return ret;
}
+static void mtk_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *pause)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+
+ phylink_ethtool_get_pauseparam(mac->phylink, pause);
+}
+
+static int mtk_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *pause)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+
+ return phylink_ethtool_set_pauseparam(mac->phylink, pause);
+}
+
static u16 mtk_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev)
{
@@ -4457,8 +4506,10 @@ static const struct ethtool_ops mtk_ethtool_ops = {
.get_strings = mtk_get_strings,
.get_sset_count = mtk_get_sset_count,
.get_ethtool_stats = mtk_get_ethtool_stats,
+ .get_pauseparam = mtk_get_pauseparam,
+ .set_pauseparam = mtk_set_pauseparam,
.get_rxnfc = mtk_get_rxnfc,
- .set_rxnfc = mtk_set_rxnfc,
+ .set_rxnfc = mtk_set_rxnfc,
};
static const struct net_device_ops mtk_netdev_ops = {
@@ -4949,23 +5000,24 @@ static int mtk_probe(struct platform_device *pdev)
}
if (eth->soc->offload_version) {
- u32 num_ppe = mtk_is_netsys_v2_or_greater(eth) ? 2 : 1;
+ u8 ppe_num = eth->soc->ppe_num;
- num_ppe = min_t(u32, ARRAY_SIZE(eth->ppe), num_ppe);
- for (i = 0; i < num_ppe; i++) {
- u32 ppe_addr = eth->soc->reg_map->ppe_base + i * 0x400;
+ ppe_num = min_t(u8, ARRAY_SIZE(eth->ppe), ppe_num);
+ for (i = 0; i < ppe_num; i++) {
+ u32 ppe_addr = eth->soc->reg_map->ppe_base;
+ ppe_addr += (i == 2 ? 0xc00 : i * 0x400);
eth->ppe[i] = mtk_ppe_init(eth, eth->base + ppe_addr, i);
if (!eth->ppe[i]) {
err = -ENOMEM;
goto err_deinit_ppe;
}
- }
+ err = mtk_eth_offload_init(eth, i);
- err = mtk_eth_offload_init(eth);
- if (err)
- goto err_deinit_ppe;
+ if (err)
+ goto err_deinit_ppe;
+ }
}
for (i = 0; i < MTK_MAX_DEVS; i++) {
@@ -5052,11 +5104,14 @@ static const struct mtk_soc_data mt2701_data = {
.desc_size = sizeof(struct mtk_tx_dma),
.dma_max_len = MTK_TX_DMA_BUF_LEN,
.dma_len_offset = 16,
+ .dma_size = MTK_DMA_SIZE(2K),
+ .fq_dma_size = MTK_DMA_SIZE(2K),
},
.rx = {
.desc_size = sizeof(struct mtk_rx_dma),
.irq_done_mask = MTK_RX_DONE_INT,
.dma_l4_valid = RX_DMA_L4_VALID,
+ .dma_size = MTK_DMA_SIZE(2K),
.dma_max_len = MTK_TX_DMA_BUF_LEN,
.dma_len_offset = 16,
},
@@ -5070,17 +5125,21 @@ static const struct mtk_soc_data mt7621_data = {
.required_pctl = false,
.version = 1,
.offload_version = 1,
+ .ppe_num = 1,
.hash_offset = 2,
.foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
.tx = {
.desc_size = sizeof(struct mtk_tx_dma),
.dma_max_len = MTK_TX_DMA_BUF_LEN,
.dma_len_offset = 16,
+ .dma_size = MTK_DMA_SIZE(2K),
+ .fq_dma_size = MTK_DMA_SIZE(2K),
},
.rx = {
.desc_size = sizeof(struct mtk_rx_dma),
.irq_done_mask = MTK_RX_DONE_INT,
.dma_l4_valid = RX_DMA_L4_VALID,
+ .dma_size = MTK_DMA_SIZE(2K),
.dma_max_len = MTK_TX_DMA_BUF_LEN,
.dma_len_offset = 16,
},
@@ -5095,6 +5154,7 @@ static const struct mtk_soc_data mt7622_data = {
.required_pctl = false,
.version = 1,
.offload_version = 2,
+ .ppe_num = 1,
.hash_offset = 2,
.has_accounting = true,
.foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
@@ -5102,11 +5162,14 @@ static const struct mtk_soc_data mt7622_data = {
.desc_size = sizeof(struct mtk_tx_dma),
.dma_max_len = MTK_TX_DMA_BUF_LEN,
.dma_len_offset = 16,
+ .dma_size = MTK_DMA_SIZE(2K),
+ .fq_dma_size = MTK_DMA_SIZE(2K),
},
.rx = {
.desc_size = sizeof(struct mtk_rx_dma),
.irq_done_mask = MTK_RX_DONE_INT,
.dma_l4_valid = RX_DMA_L4_VALID,
+ .dma_size = MTK_DMA_SIZE(2K),
.dma_max_len = MTK_TX_DMA_BUF_LEN,
.dma_len_offset = 16,
},
@@ -5120,6 +5183,7 @@ static const struct mtk_soc_data mt7623_data = {
.required_pctl = true,
.version = 1,
.offload_version = 1,
+ .ppe_num = 1,
.hash_offset = 2,
.foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
.disable_pll_modes = true,
@@ -5127,11 +5191,14 @@ static const struct mtk_soc_data mt7623_data = {
.desc_size = sizeof(struct mtk_tx_dma),
.dma_max_len = MTK_TX_DMA_BUF_LEN,
.dma_len_offset = 16,
+ .dma_size = MTK_DMA_SIZE(2K),
+ .fq_dma_size = MTK_DMA_SIZE(2K),
},
.rx = {
.desc_size = sizeof(struct mtk_rx_dma),
.irq_done_mask = MTK_RX_DONE_INT,
.dma_l4_valid = RX_DMA_L4_VALID,
+ .dma_size = MTK_DMA_SIZE(2K),
.dma_max_len = MTK_TX_DMA_BUF_LEN,
.dma_len_offset = 16,
},
@@ -5150,11 +5217,14 @@ static const struct mtk_soc_data mt7629_data = {
.desc_size = sizeof(struct mtk_tx_dma),
.dma_max_len = MTK_TX_DMA_BUF_LEN,
.dma_len_offset = 16,
+ .dma_size = MTK_DMA_SIZE(2K),
+ .fq_dma_size = MTK_DMA_SIZE(2K),
},
.rx = {
.desc_size = sizeof(struct mtk_rx_dma),
.irq_done_mask = MTK_RX_DONE_INT,
.dma_l4_valid = RX_DMA_L4_VALID,
+ .dma_size = MTK_DMA_SIZE(2K),
.dma_max_len = MTK_TX_DMA_BUF_LEN,
.dma_len_offset = 16,
},
@@ -5169,6 +5239,7 @@ static const struct mtk_soc_data mt7981_data = {
.required_pctl = false,
.version = 2,
.offload_version = 2,
+ .ppe_num = 2,
.hash_offset = 4,
.has_accounting = true,
.foe_entry_size = MTK_FOE_ENTRY_V2_SIZE,
@@ -5176,6 +5247,8 @@ static const struct mtk_soc_data mt7981_data = {
.desc_size = sizeof(struct mtk_tx_dma_v2),
.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
.dma_len_offset = 8,
+ .dma_size = MTK_DMA_SIZE(2K),
+ .fq_dma_size = MTK_DMA_SIZE(2K),
},
.rx = {
.desc_size = sizeof(struct mtk_rx_dma),
@@ -5183,6 +5256,7 @@ static const struct mtk_soc_data mt7981_data = {
.dma_l4_valid = RX_DMA_L4_VALID_V2,
.dma_max_len = MTK_TX_DMA_BUF_LEN,
.dma_len_offset = 16,
+ .dma_size = MTK_DMA_SIZE(2K),
},
};
@@ -5195,6 +5269,7 @@ static const struct mtk_soc_data mt7986_data = {
.required_pctl = false,
.version = 2,
.offload_version = 2,
+ .ppe_num = 2,
.hash_offset = 4,
.has_accounting = true,
.foe_entry_size = MTK_FOE_ENTRY_V2_SIZE,
@@ -5202,6 +5277,8 @@ static const struct mtk_soc_data mt7986_data = {
.desc_size = sizeof(struct mtk_tx_dma_v2),
.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
.dma_len_offset = 8,
+ .dma_size = MTK_DMA_SIZE(2K),
+ .fq_dma_size = MTK_DMA_SIZE(2K),
},
.rx = {
.desc_size = sizeof(struct mtk_rx_dma),
@@ -5209,6 +5286,7 @@ static const struct mtk_soc_data mt7986_data = {
.dma_l4_valid = RX_DMA_L4_VALID_V2,
.dma_max_len = MTK_TX_DMA_BUF_LEN,
.dma_len_offset = 16,
+ .dma_size = MTK_DMA_SIZE(2K),
},
};
@@ -5221,6 +5299,7 @@ static const struct mtk_soc_data mt7988_data = {
.required_pctl = false,
.version = 3,
.offload_version = 2,
+ .ppe_num = 3,
.hash_offset = 4,
.has_accounting = true,
.foe_entry_size = MTK_FOE_ENTRY_V3_SIZE,
@@ -5228,6 +5307,8 @@ static const struct mtk_soc_data mt7988_data = {
.desc_size = sizeof(struct mtk_tx_dma_v2),
.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
.dma_len_offset = 8,
+ .dma_size = MTK_DMA_SIZE(2K),
+ .fq_dma_size = MTK_DMA_SIZE(4K),
},
.rx = {
.desc_size = sizeof(struct mtk_rx_dma_v2),
@@ -5235,6 +5316,7 @@ static const struct mtk_soc_data mt7988_data = {
.dma_l4_valid = RX_DMA_L4_VALID_V2,
.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
.dma_len_offset = 8,
+ .dma_size = MTK_DMA_SIZE(2K),
},
};
@@ -5249,6 +5331,7 @@ static const struct mtk_soc_data rt5350_data = {
.desc_size = sizeof(struct mtk_tx_dma),
.dma_max_len = MTK_TX_DMA_BUF_LEN,
.dma_len_offset = 16,
+ .dma_size = MTK_DMA_SIZE(2K),
},
.rx = {
.desc_size = sizeof(struct mtk_rx_dma),
@@ -5256,6 +5339,7 @@ static const struct mtk_soc_data rt5350_data = {
.dma_l4_valid = RX_DMA_L4_VALID_PDMA,
.dma_max_len = MTK_TX_DMA_BUF_LEN,
.dma_len_offset = 16,
+ .dma_size = MTK_DMA_SIZE(2K),
},
};
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index 4eab30b44070..eb1708b43aa3 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -32,7 +32,9 @@
#define MTK_TX_DMA_BUF_LEN 0x3fff
#define MTK_TX_DMA_BUF_LEN_V2 0xffff
#define MTK_QDMA_RING_SIZE 2048
-#define MTK_DMA_SIZE 512
+#define MTK_DMA_SIZE(x) (SZ_##x)
+#define MTK_FQ_DMA_HEAD 32
+#define MTK_FQ_DMA_LENGTH 2048
#define MTK_RX_ETH_HLEN (ETH_HLEN + ETH_FCS_LEN)
#define MTK_RX_HLEN (NET_SKB_PAD + MTK_RX_ETH_HLEN + NET_IP_ALIGN)
#define MTK_DMA_DUMMY_DESC 0xffffffff
@@ -1130,7 +1132,7 @@ struct mtk_reg_map {
u32 tx_sch_rate; /* tx scheduler rate control registers */
} qdma;
u32 gdm1_cnt;
- u32 gdma_to_ppe;
+ u32 gdma_to_ppe[3];
u32 ppe_base;
u32 wdma_base[3];
u32 pse_iq_sta;
@@ -1168,6 +1170,7 @@ struct mtk_soc_data {
u8 offload_version;
u8 hash_offset;
u8 version;
+ u8 ppe_num;
u16 foe_entry_size;
netdev_features_t hw_features;
bool has_accounting;
@@ -1176,6 +1179,8 @@ struct mtk_soc_data {
u32 desc_size;
u32 dma_max_len;
u32 dma_len_offset;
+ u32 dma_size;
+ u32 fq_dma_size;
} tx;
struct {
u32 desc_size;
@@ -1183,6 +1188,7 @@ struct mtk_soc_data {
u32 dma_l4_valid;
u32 dma_max_len;
u32 dma_len_offset;
+ u32 dma_size;
} rx;
};
@@ -1264,7 +1270,7 @@ struct mtk_eth {
struct napi_struct rx_napi;
void *scratch_ring;
dma_addr_t phy_scratch_ring;
- void *scratch_head;
+ void *scratch_head[MTK_FQ_DMA_HEAD];
struct clk *clks[MTK_CLK_MAX];
struct mii_bus *mii_bus;
@@ -1289,7 +1295,7 @@ struct mtk_eth {
struct metadata_dst *dsa_meta[MTK_MAX_DSA_PORTS];
- struct mtk_ppe *ppe[2];
+ struct mtk_ppe *ppe[3];
struct rhashtable flow_table;
struct bpf_prog __rcu *prog;
@@ -1314,6 +1320,7 @@ struct mtk_eth {
struct mtk_mac {
int id;
phy_interface_t interface;
+ u8 ppe_idx;
int speed;
struct device_node *of_node;
struct phylink *phylink;
@@ -1435,7 +1442,7 @@ int mtk_gmac_sgmii_path_setup(struct mtk_eth *eth, int mac_id);
int mtk_gmac_gephy_path_setup(struct mtk_eth *eth, int mac_id);
int mtk_gmac_rgmii_path_setup(struct mtk_eth *eth, int mac_id);
-int mtk_eth_offload_init(struct mtk_eth *eth);
+int mtk_eth_offload_init(struct mtk_eth *eth, u8 id);
int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data);
int mtk_flow_offload_cmd(struct mtk_eth *eth, struct flow_cls_offload *cls,
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.h b/drivers/net/ethernet/mediatek/mtk_ppe.h
index 691806bca372..223f709e2704 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe.h
+++ b/drivers/net/ethernet/mediatek/mtk_ppe.h
@@ -8,7 +8,7 @@
#include <linux/bitfield.h>
#include <linux/rhashtable.h>
-#define MTK_PPE_ENTRIES_SHIFT 3
+#define MTK_PPE_ENTRIES_SHIFT 4
#define MTK_PPE_ENTRIES (1024 << MTK_PPE_ENTRIES_SHIFT)
#define MTK_PPE_HASH_MASK (MTK_PPE_ENTRIES - 1)
#define MTK_PPE_WAIT_TIMEOUT_US 1000000
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
index aa262e6f4b85..f20bb390df3a 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
@@ -245,10 +245,10 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f,
int ppe_index)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
+ struct net_device *idev = NULL, *odev = NULL;
struct flow_action_entry *act;
struct mtk_flow_data data = {};
struct mtk_foe_entry foe;
- struct net_device *odev = NULL;
struct mtk_flow_entry *entry;
int offload_type = 0;
int wed_index = -1;
@@ -264,6 +264,17 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f,
struct flow_match_meta match;
flow_rule_match_meta(rule, &match);
+ if (mtk_is_netsys_v2_or_greater(eth)) {
+ idev = __dev_get_by_index(&init_net, match.key->ingress_ifindex);
+ if (idev && idev->netdev_ops == eth->netdev[0]->netdev_ops) {
+ struct mtk_mac *mac = netdev_priv(idev);
+
+ if (WARN_ON(mac->ppe_idx >= eth->soc->ppe_num))
+ return -EINVAL;
+
+ ppe_index = mac->ppe_idx;
+ }
+ }
} else {
return -EOPNOTSUPP;
}
@@ -637,7 +648,9 @@ int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type,
}
}
-int mtk_eth_offload_init(struct mtk_eth *eth)
+int mtk_eth_offload_init(struct mtk_eth *eth, u8 id)
{
+ if (!eth->ppe[id] || !eth->ppe[id]->foe_table)
+ return 0;
return rhashtable_init(&eth->flow_table, &mtk_flow_ht_params);
}
diff --git a/drivers/net/ethernet/mediatek/mtk_star_emac.c b/drivers/net/ethernet/mediatek/mtk_star_emac.c
index 31aebeb2e285..25989c79c92e 100644
--- a/drivers/net/ethernet/mediatek/mtk_star_emac.c
+++ b/drivers/net/ethernet/mediatek/mtk_star_emac.c
@@ -1524,6 +1524,7 @@ static int mtk_star_probe(struct platform_device *pdev)
{
struct device_node *of_node;
struct mtk_star_priv *priv;
+ struct phy_device *phydev;
struct net_device *ndev;
struct device *dev;
void __iomem *base;
@@ -1649,6 +1650,12 @@ static int mtk_star_probe(struct platform_device *pdev)
netif_napi_add(ndev, &priv->rx_napi, mtk_star_rx_poll);
netif_napi_add_tx(ndev, &priv->tx_napi, mtk_star_tx_poll);
+ phydev = of_phy_find_device(priv->phy_node);
+ if (phydev) {
+ phydev->mac_managed_pm = true;
+ put_device(&phydev->mdio.dev);
+ }
+
return devm_register_netdev(dev, ndev);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index 1184ac5751e1..461cc2c79c71 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -126,6 +126,7 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
cq_idx = cq_idx % priv->rx_ring_num;
rx_cq = priv->rx_cq[cq_idx];
cq->vector = rx_cq->vector;
+ irq = mlx4_eq_get_irq(mdev->dev, cq->vector);
}
if (cq->type == RX)
@@ -142,18 +143,23 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
if (err)
goto free_eq;
+ cq->cq_idx = cq_idx;
cq->mcq.event = mlx4_en_cq_event;
switch (cq->type) {
case TX:
cq->mcq.comp = mlx4_en_tx_irq;
netif_napi_add_tx(cq->dev, &cq->napi, mlx4_en_poll_tx_cq);
+ netif_napi_set_irq(&cq->napi, irq);
napi_enable(&cq->napi);
+ netif_queue_set_napi(cq->dev, cq_idx, NETDEV_QUEUE_TYPE_TX, &cq->napi);
break;
case RX:
cq->mcq.comp = mlx4_en_rx_irq;
netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq);
+ netif_napi_set_irq(&cq->napi, irq);
napi_enable(&cq->napi);
+ netif_queue_set_napi(cq->dev, cq_idx, NETDEV_QUEUE_TYPE_RX, &cq->napi);
break;
case TX_XDP:
/* nothing regarding napi, it's shared with rx ring */
@@ -189,6 +195,14 @@ void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq)
void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
{
if (cq->type != TX_XDP) {
+ enum netdev_queue_type qtype;
+
+ if (cq->type == RX)
+ qtype = NETDEV_QUEUE_TYPE_RX;
+ else
+ qtype = NETDEV_QUEUE_TYPE_TX;
+
+ netif_queue_set_napi(cq->dev, cq->cq_idx, qtype, NULL);
napi_disable(&cq->napi);
netif_napi_del(&cq->napi);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 619e1c3ef7f9..943d6918c2ec 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -450,7 +450,6 @@ static void mlx4_en_get_strings(struct net_device *dev,
uint32_t stringset, uint8_t *data)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
- int index = 0;
int i, strings = 0;
struct bitmap_iterator it;
@@ -459,10 +458,10 @@ static void mlx4_en_get_strings(struct net_device *dev,
switch (stringset) {
case ETH_SS_TEST:
for (i = 0; i < MLX4_EN_NUM_SELF_TEST - 2; i++)
- strcpy(data + i * ETH_GSTRING_LEN, mlx4_en_test_names[i]);
+ ethtool_puts(&data, mlx4_en_test_names[i]);
if (priv->mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UC_LOOPBACK)
for (; i < MLX4_EN_NUM_SELF_TEST; i++)
- strcpy(data + i * ETH_GSTRING_LEN, mlx4_en_test_names[i]);
+ ethtool_puts(&data, mlx4_en_test_names[i]);
break;
case ETH_SS_STATS:
@@ -470,74 +469,56 @@ static void mlx4_en_get_strings(struct net_device *dev,
for (i = 0; i < NUM_MAIN_STATS; i++, strings++,
bitmap_iterator_inc(&it))
if (bitmap_iterator_test(&it))
- strcpy(data + (index++) * ETH_GSTRING_LEN,
- main_strings[strings]);
+ ethtool_puts(&data, main_strings[strings]);
for (i = 0; i < NUM_PORT_STATS; i++, strings++,
bitmap_iterator_inc(&it))
if (bitmap_iterator_test(&it))
- strcpy(data + (index++) * ETH_GSTRING_LEN,
- main_strings[strings]);
+ ethtool_puts(&data, main_strings[strings]);
for (i = 0; i < NUM_PF_STATS; i++, strings++,
bitmap_iterator_inc(&it))
if (bitmap_iterator_test(&it))
- strcpy(data + (index++) * ETH_GSTRING_LEN,
- main_strings[strings]);
+ ethtool_puts(&data, main_strings[strings]);
for (i = 0; i < NUM_FLOW_STATS; i++, strings++,
bitmap_iterator_inc(&it))
if (bitmap_iterator_test(&it))
- strcpy(data + (index++) * ETH_GSTRING_LEN,
- main_strings[strings]);
+ ethtool_puts(&data, main_strings[strings]);
for (i = 0; i < NUM_PKT_STATS; i++, strings++,
bitmap_iterator_inc(&it))
if (bitmap_iterator_test(&it))
- strcpy(data + (index++) * ETH_GSTRING_LEN,
- main_strings[strings]);
+ ethtool_puts(&data, main_strings[strings]);
for (i = 0; i < NUM_XDP_STATS; i++, strings++,
bitmap_iterator_inc(&it))
if (bitmap_iterator_test(&it))
- strcpy(data + (index++) * ETH_GSTRING_LEN,
- main_strings[strings]);
+ ethtool_puts(&data, main_strings[strings]);
for (i = 0; i < NUM_PHY_STATS; i++, strings++,
bitmap_iterator_inc(&it))
if (bitmap_iterator_test(&it))
- strcpy(data + (index++) * ETH_GSTRING_LEN,
- main_strings[strings]);
+ ethtool_puts(&data, main_strings[strings]);
for (i = 0; i < priv->tx_ring_num[TX]; i++) {
- sprintf(data + (index++) * ETH_GSTRING_LEN,
- "tx%d_packets", i);
- sprintf(data + (index++) * ETH_GSTRING_LEN,
- "tx%d_bytes", i);
+ ethtool_sprintf(&data, "tx%d_packets", i);
+ ethtool_sprintf(&data, "tx%d_bytes", i);
}
for (i = 0; i < priv->rx_ring_num; i++) {
- sprintf(data + (index++) * ETH_GSTRING_LEN,
- "rx%d_packets", i);
- sprintf(data + (index++) * ETH_GSTRING_LEN,
- "rx%d_bytes", i);
- sprintf(data + (index++) * ETH_GSTRING_LEN,
- "rx%d_dropped", i);
- sprintf(data + (index++) * ETH_GSTRING_LEN,
- "rx%d_xdp_drop", i);
- sprintf(data + (index++) * ETH_GSTRING_LEN,
- "rx%d_xdp_redirect", i);
- sprintf(data + (index++) * ETH_GSTRING_LEN,
- "rx%d_xdp_redirect_fail", i);
- sprintf(data + (index++) * ETH_GSTRING_LEN,
- "rx%d_xdp_tx", i);
- sprintf(data + (index++) * ETH_GSTRING_LEN,
- "rx%d_xdp_tx_full", i);
+ ethtool_sprintf(&data, "rx%d_packets", i);
+ ethtool_sprintf(&data, "rx%d_bytes", i);
+ ethtool_sprintf(&data, "rx%d_dropped", i);
+ ethtool_sprintf(&data, "rx%d_xdp_drop", i);
+ ethtool_sprintf(&data, "rx%d_xdp_redirect", i);
+ ethtool_sprintf(&data, "rx%d_xdp_redirect_fail", i);
+ ethtool_sprintf(&data, "rx%d_xdp_tx", i);
+ ethtool_sprintf(&data, "rx%d_xdp_tx_full", i);
}
break;
case ETH_SS_PRIV_FLAGS:
for (i = 0; i < ARRAY_SIZE(mlx4_en_priv_flags); i++)
- strcpy(data + i * ETH_GSTRING_LEN,
- mlx4_en_priv_flags[i]);
+ ethtool_puts(&data, mlx4_en_priv_flags[i]);
break;
}
@@ -1903,7 +1884,7 @@ out:
}
static int mlx4_en_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 4c089cfa027a..281b34af0bb4 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -43,6 +43,7 @@
#include <net/vxlan.h>
#include <net/devlink.h>
#include <net/rps.h>
+#include <net/netdev_queues.h>
#include <linux/mlx4/driver.h>
#include <linux/mlx4/device.h>
@@ -2073,6 +2074,7 @@ static void mlx4_en_clear_stats(struct net_device *dev)
priv->rx_ring[i]->csum_ok = 0;
priv->rx_ring[i]->csum_none = 0;
priv->rx_ring[i]->csum_complete = 0;
+ priv->rx_ring[i]->alloc_fail = 0;
}
}
@@ -3099,6 +3101,77 @@ void mlx4_en_set_stats_bitmap(struct mlx4_dev *dev,
last_i += NUM_PHY_STATS;
}
+static void mlx4_get_queue_stats_rx(struct net_device *dev, int i,
+ struct netdev_queue_stats_rx *stats)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ const struct mlx4_en_rx_ring *ring;
+
+ spin_lock_bh(&priv->stats_lock);
+
+ if (!priv->port_up || mlx4_is_master(priv->mdev->dev))
+ goto out_unlock;
+
+ ring = priv->rx_ring[i];
+ stats->packets = READ_ONCE(ring->packets);
+ stats->bytes = READ_ONCE(ring->bytes);
+ stats->alloc_fail = READ_ONCE(ring->alloc_fail);
+
+out_unlock:
+ spin_unlock_bh(&priv->stats_lock);
+}
+
+static void mlx4_get_queue_stats_tx(struct net_device *dev, int i,
+ struct netdev_queue_stats_tx *stats)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ const struct mlx4_en_tx_ring *ring;
+
+ spin_lock_bh(&priv->stats_lock);
+
+ if (!priv->port_up || mlx4_is_master(priv->mdev->dev))
+ goto out_unlock;
+
+ ring = priv->tx_ring[TX][i];
+ stats->packets = READ_ONCE(ring->packets);
+ stats->bytes = READ_ONCE(ring->bytes);
+
+out_unlock:
+ spin_unlock_bh(&priv->stats_lock);
+}
+
+static void mlx4_get_base_stats(struct net_device *dev,
+ struct netdev_queue_stats_rx *rx,
+ struct netdev_queue_stats_tx *tx)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+
+ spin_lock_bh(&priv->stats_lock);
+
+ if (!priv->port_up || mlx4_is_master(priv->mdev->dev))
+ goto out_unlock;
+
+ if (priv->rx_ring_num) {
+ rx->packets = 0;
+ rx->bytes = 0;
+ rx->alloc_fail = 0;
+ }
+
+ if (priv->tx_ring_num[TX]) {
+ tx->packets = 0;
+ tx->bytes = 0;
+ }
+
+out_unlock:
+ spin_unlock_bh(&priv->stats_lock);
+}
+
+static const struct netdev_stat_ops mlx4_stat_ops = {
+ .get_queue_stats_rx = mlx4_get_queue_stats_rx,
+ .get_queue_stats_tx = mlx4_get_queue_stats_tx,
+ .get_base_stats = mlx4_get_base_stats,
+};
+
int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
struct mlx4_en_port_profile *prof)
{
@@ -3262,6 +3335,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
netif_set_real_num_tx_queues(dev, priv->tx_ring_num[TX]);
netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
+ dev->stat_ops = &mlx4_stat_ops;
dev->ethtool_ops = &mlx4_en_ethtool_ops;
/*
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 8328df8645d5..15c57e9517e9 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -82,8 +82,10 @@ static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv,
for (i = 0; i < priv->num_frags; i++, frags++) {
if (!frags->page) {
- if (mlx4_alloc_page(priv, frags, gfp))
+ if (mlx4_alloc_page(priv, frags, gfp)) {
+ ring->alloc_fail++;
return -ENOMEM;
+ }
ring->rx_alloc_pages++;
}
rx_desc->data[i].addr = cpu_to_be64(frags->dma +
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 98688e4dbec5..febeadfdd5a5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -169,12 +169,6 @@ module_param_array(port_type_array, int, &arr_argc, 0444);
MODULE_PARM_DESC(port_type_array, "Array of port types: HW_DEFAULT (0) is default "
"1 for IB, 2 for Ethernet");
-struct mlx4_port_config {
- struct list_head list;
- enum mlx4_port_type port_type[MLX4_MAX_PORTS + 1];
- struct pci_dev *pdev;
-};
-
static atomic_t pf_loading = ATOMIC_INIT(0);
static int mlx4_devlink_ierr_reset_get(struct devlink *devlink, u32 id,
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index efe3f97b874f..28b70dcc652e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -355,6 +355,7 @@ struct mlx4_en_rx_ring {
unsigned long xdp_tx;
unsigned long xdp_tx_full;
unsigned long dropped;
+ unsigned long alloc_fail;
int hwtstamp_rx_filter;
cpumask_var_t affinity_mask;
struct xdp_rxq_info xdp_rxq;
@@ -379,6 +380,7 @@ struct mlx4_en_cq {
#define MLX4_EN_OPCODE_ERROR 0x1e
const struct cpumask *aff_mask;
+ int cq_idx;
};
struct mlx4_en_port_profile {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 76dc5a9b9648..1289475e7be7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -17,7 +17,7 @@ mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
fs_counters.o fs_ft_pool.o rl.o lag/debugfs.o lag/lag.o dev.o events.o wq.o lib/gid.o \
lib/devcom.o lib/pci_vsc.o lib/dm.o lib/fs_ttc.o diag/fs_tracepoint.o \
diag/fw_tracer.o diag/crdump.o devlink.o diag/rsc_dump.o diag/reporter_vnic.o \
- fw_reset.o qos.o lib/tout.o lib/aso.o
+ fw_reset.o qos.o lib/tout.o lib/aso.o wc.o
#
# Netdev basic
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index e85fb71bf0b4..5fd82c67b6ab 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -80,6 +80,7 @@ struct page_pool;
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
#define MLX5E_RX_MAX_HEAD (256)
+#define MLX5E_SHAMPO_LOG_HEADER_ENTRY_SIZE (8)
#define MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE (9)
#define MLX5E_SHAMPO_WQ_HEADER_PER_PAGE (PAGE_SIZE >> MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE)
#define MLX5E_SHAMPO_WQ_BASE_HEAD_ENTRY_SIZE (64)
@@ -146,25 +147,6 @@ struct page_pool;
#define MLX5E_TX_XSK_POLL_BUDGET 64
#define MLX5E_SQ_RECOVER_MIN_INTERVAL 500 /* msecs */
-#define MLX5E_KLM_UMR_WQE_SZ(sgl_len)\
- (sizeof(struct mlx5e_umr_wqe) +\
- (sizeof(struct mlx5_klm) * (sgl_len)))
-
-#define MLX5E_KLM_UMR_WQEBBS(klm_entries) \
- (DIV_ROUND_UP(MLX5E_KLM_UMR_WQE_SZ(klm_entries), MLX5_SEND_WQE_BB))
-
-#define MLX5E_KLM_UMR_DS_CNT(klm_entries)\
- (DIV_ROUND_UP(MLX5E_KLM_UMR_WQE_SZ(klm_entries), MLX5_SEND_WQE_DS))
-
-#define MLX5E_KLM_MAX_ENTRIES_PER_WQE(wqe_size)\
- (((wqe_size) - sizeof(struct mlx5e_umr_wqe)) / sizeof(struct mlx5_klm))
-
-#define MLX5E_KLM_ENTRIES_PER_WQE(wqe_size)\
- ALIGN_DOWN(MLX5E_KLM_MAX_ENTRIES_PER_WQE(wqe_size), MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT)
-
-#define MLX5E_MAX_KLM_PER_WQE(mdev) \
- MLX5E_KLM_ENTRIES_PER_WQE(MLX5_SEND_WQE_BB * mlx5e_get_max_sq_aligned_wqebbs(mdev))
-
#define mlx5e_state_dereference(priv, p) \
rcu_dereference_protected((p), lockdep_is_held(&(priv)->state_lock))
@@ -885,6 +867,8 @@ struct mlx5e_priv {
/* priv data path fields - start */
struct mlx5e_selq selq;
struct mlx5e_txqsq **txq2sq;
+ struct mlx5e_sq_stats **txq2sq_stats;
+
#ifdef CONFIG_MLX5_CORE_EN_DCB
struct mlx5e_dcbx_dp dcbx_dp;
#endif
@@ -1014,7 +998,7 @@ void mlx5e_build_ptys2ethtool_map(void);
bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev, u8 page_shift,
enum mlx5e_mpwrq_umr_mode umr_mode);
-void mlx5e_shampo_dealloc_hd(struct mlx5e_rq *rq, u16 len, u16 start, bool close);
+void mlx5e_shampo_dealloc_hd(struct mlx5e_rq *rq);
void mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats);
void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s);
@@ -1207,7 +1191,7 @@ int mlx5e_set_per_queue_coalesce(struct net_device *dev, u32 queue,
u32 mlx5e_ethtool_get_rxfh_key_size(struct mlx5e_priv *priv);
u32 mlx5e_ethtool_get_rxfh_indir_size(struct mlx5e_priv *priv);
int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
- struct ethtool_ts_info *info);
+ struct kernel_ethtool_ts_info *info);
int mlx5e_ethtool_flash_device(struct mlx5e_priv *priv,
struct ethtool_flash *flash);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
index 4d6225e0eec7..1e8b7d330701 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
@@ -154,6 +154,19 @@ struct mlx5e_tc_table *mlx5e_fs_get_tc(struct mlx5e_flow_steering *fs);
struct mlx5e_l2_table *mlx5e_fs_get_l2(struct mlx5e_flow_steering *fs);
struct mlx5_flow_namespace *mlx5e_fs_get_ns(struct mlx5e_flow_steering *fs, bool egress);
void mlx5e_fs_set_ns(struct mlx5e_flow_steering *fs, struct mlx5_flow_namespace *ns, bool egress);
+
+static inline bool mlx5e_fs_has_arfs(struct net_device *netdev)
+{
+ return IS_ENABLED(CONFIG_MLX5_EN_ARFS) &&
+ netdev->hw_features & NETIF_F_NTUPLE;
+}
+
+static inline bool mlx5e_fs_want_arfs(struct net_device *netdev)
+{
+ return IS_ENABLED(CONFIG_MLX5_EN_ARFS) &&
+ netdev->features & NETIF_F_NTUPLE;
+}
+
#ifdef CONFIG_MLX5_EN_RXNFC
struct mlx5e_ethtool_steering *mlx5e_fs_get_ethtool(struct mlx5e_flow_steering *fs);
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
index ec819dfc98be..6c9ccccca81e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
@@ -1071,18 +1071,18 @@ static u32 mlx5e_shampo_icosq_sz(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_rq_param *rq_param)
{
- int max_num_of_umr_per_wqe, max_hd_per_wqe, max_klm_per_umr, rest;
+ int max_num_of_umr_per_wqe, max_hd_per_wqe, max_ksm_per_umr, rest;
void *wqc = MLX5_ADDR_OF(rqc, rq_param->rqc, wq);
int wq_size = BIT(MLX5_GET(wq, wqc, log_wq_sz));
u32 wqebbs;
- max_klm_per_umr = MLX5E_MAX_KLM_PER_WQE(mdev);
+ max_ksm_per_umr = MLX5E_MAX_KSM_PER_WQE(mdev);
max_hd_per_wqe = mlx5e_shampo_hd_per_wqe(mdev, params, rq_param);
- max_num_of_umr_per_wqe = max_hd_per_wqe / max_klm_per_umr;
- rest = max_hd_per_wqe % max_klm_per_umr;
- wqebbs = MLX5E_KLM_UMR_WQEBBS(max_klm_per_umr) * max_num_of_umr_per_wqe;
+ max_num_of_umr_per_wqe = max_hd_per_wqe / max_ksm_per_umr;
+ rest = max_hd_per_wqe % max_ksm_per_umr;
+ wqebbs = MLX5E_KSM_UMR_WQEBBS(max_ksm_per_umr) * max_num_of_umr_per_wqe;
if (rest)
- wqebbs += MLX5E_KLM_UMR_WQEBBS(rest);
+ wqebbs += MLX5E_KSM_UMR_WQEBBS(rest);
wqebbs *= wq_size;
return wqebbs;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
index 6743806b8480..f0744a45db92 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
@@ -170,6 +170,7 @@ int mlx5e_activate_qos_sq(void *data, u16 node_qid, u32 hw_id)
mlx5e_tx_disable_queue(netdev_get_tx_queue(priv->netdev, qid));
priv->txq2sq[qid] = sq;
+ priv->txq2sq_stats[qid] = sq->stats;
/* Make the change to txq2sq visible before the queue is started.
* As mlx5e_xmit runs under a spinlock, there is an implicit ACQUIRE,
@@ -186,6 +187,7 @@ int mlx5e_activate_qos_sq(void *data, u16 node_qid, u32 hw_id)
void mlx5e_deactivate_qos_sq(struct mlx5e_priv *priv, u16 qid)
{
struct mlx5e_txqsq *sq;
+ u16 txq_ix;
sq = mlx5e_get_qos_sq(priv, qid);
if (!sq) /* Handle the case when the SQ failed to open. */
@@ -194,7 +196,10 @@ void mlx5e_deactivate_qos_sq(struct mlx5e_priv *priv, u16 qid)
qos_dbg(sq->mdev, "Deactivate QoS SQ qid %u\n", qid);
mlx5e_deactivate_txqsq(sq);
- priv->txq2sq[mlx5e_qid_from_qos(&priv->channels, qid)] = NULL;
+ txq_ix = mlx5e_qid_from_qos(&priv->channels, qid);
+
+ priv->txq2sq[txq_ix] = NULL;
+ priv->txq2sq_stats[txq_ix] = NULL;
/* Make the change to txq2sq visible before the queue is started again.
* As mlx5e_xmit runs under a spinlock, there is an implicit ACQUIRE,
@@ -325,6 +330,7 @@ void mlx5e_qos_deactivate_queues(struct mlx5e_channel *c)
{
struct mlx5e_params *params = &c->priv->channels.params;
struct mlx5e_txqsq __rcu **qos_sqs;
+ u16 txq_ix;
int i;
qos_sqs = mlx5e_state_dereference(c->priv, c->qos_sqs);
@@ -342,8 +348,11 @@ void mlx5e_qos_deactivate_queues(struct mlx5e_channel *c)
qos_dbg(c->mdev, "Deactivate QoS SQ qid %u\n", qid);
mlx5e_deactivate_txqsq(sq);
+ txq_ix = mlx5e_qid_from_qos(&c->priv->channels, qid);
+
/* The queue is disabled, no synchronization with datapath is needed. */
- c->priv->txq2sq[mlx5e_qid_from_qos(&c->priv->channels, qid)] = NULL;
+ c->priv->txq2sq[txq_ix] = NULL;
+ c->priv->txq2sq_stats[txq_ix] = NULL;
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
index fadfa8b50beb..8cf8ba2622f2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
@@ -69,6 +69,8 @@ struct mlx5_tc_ct_priv {
struct rhashtable ct_tuples_nat_ht;
struct mlx5_flow_table *ct;
struct mlx5_flow_table *ct_nat;
+ struct mlx5_flow_group *ct_nat_miss_group;
+ struct mlx5_flow_handle *ct_nat_miss_rule;
struct mlx5e_post_act *post_act;
struct mutex control_lock; /* guards parallel adds/dels */
struct mapping_ctx *zone_mapping;
@@ -141,6 +143,8 @@ struct mlx5_ct_counter {
enum {
MLX5_CT_ENTRY_FLAG_VALID,
+ MLX5_CT_ENTRY_IN_CT_TABLE,
+ MLX5_CT_ENTRY_IN_CT_NAT_TABLE,
};
struct mlx5_ct_entry {
@@ -198,9 +202,15 @@ static const struct rhashtable_params tuples_nat_ht_params = {
};
static bool
-mlx5_tc_ct_entry_has_nat(struct mlx5_ct_entry *entry)
+mlx5_tc_ct_entry_in_ct_table(struct mlx5_ct_entry *entry)
{
- return !!(entry->tuple_nat_node.next);
+ return test_bit(MLX5_CT_ENTRY_IN_CT_TABLE, &entry->flags);
+}
+
+static bool
+mlx5_tc_ct_entry_in_ct_nat_table(struct mlx5_ct_entry *entry)
+{
+ return test_bit(MLX5_CT_ENTRY_IN_CT_NAT_TABLE, &entry->flags);
}
static int
@@ -526,8 +536,10 @@ static void
mlx5_tc_ct_entry_del_rules(struct mlx5_tc_ct_priv *ct_priv,
struct mlx5_ct_entry *entry)
{
- mlx5_tc_ct_entry_del_rule(ct_priv, entry, true);
- mlx5_tc_ct_entry_del_rule(ct_priv, entry, false);
+ if (mlx5_tc_ct_entry_in_ct_nat_table(entry))
+ mlx5_tc_ct_entry_del_rule(ct_priv, entry, true);
+ if (mlx5_tc_ct_entry_in_ct_table(entry))
+ mlx5_tc_ct_entry_del_rule(ct_priv, entry, false);
atomic_dec(&ct_priv->debugfs.stats.offloaded);
}
@@ -814,7 +826,7 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
&zone_rule->mh,
zone_restore_id,
nat,
- mlx5_tc_ct_entry_has_nat(entry));
+ mlx5_tc_ct_entry_in_ct_nat_table(entry));
if (err) {
ct_dbg("Failed to create ct entry mod hdr");
goto err_mod_hdr;
@@ -888,7 +900,7 @@ mlx5_tc_ct_entry_replace_rule(struct mlx5_tc_ct_priv *ct_priv,
*old_attr = *attr;
err = mlx5_tc_ct_entry_create_mod_hdr(ct_priv, attr, flow_rule, &mh, zone_restore_id,
- nat, mlx5_tc_ct_entry_has_nat(entry));
+ nat, mlx5_tc_ct_entry_in_ct_nat_table(entry));
if (err) {
ct_dbg("Failed to create ct entry mod hdr");
goto err_mod_hdr;
@@ -957,11 +969,13 @@ static void mlx5_tc_ct_entry_remove_from_tuples(struct mlx5_ct_entry *entry)
{
struct mlx5_tc_ct_priv *ct_priv = entry->ct_priv;
- rhashtable_remove_fast(&ct_priv->ct_tuples_nat_ht,
- &entry->tuple_nat_node,
- tuples_nat_ht_params);
- rhashtable_remove_fast(&ct_priv->ct_tuples_ht, &entry->tuple_node,
- tuples_ht_params);
+ if (mlx5_tc_ct_entry_in_ct_nat_table(entry))
+ rhashtable_remove_fast(&ct_priv->ct_tuples_nat_ht,
+ &entry->tuple_nat_node,
+ tuples_nat_ht_params);
+ if (mlx5_tc_ct_entry_in_ct_table(entry))
+ rhashtable_remove_fast(&ct_priv->ct_tuples_ht, &entry->tuple_node,
+ tuples_ht_params);
}
static void mlx5_tc_ct_entry_del(struct mlx5_ct_entry *entry)
@@ -1100,21 +1114,26 @@ mlx5_tc_ct_entry_add_rules(struct mlx5_tc_ct_priv *ct_priv,
return err;
}
- err = mlx5_tc_ct_entry_add_rule(ct_priv, flow_rule, entry, false,
- zone_restore_id);
- if (err)
- goto err_orig;
+ if (mlx5_tc_ct_entry_in_ct_table(entry)) {
+ err = mlx5_tc_ct_entry_add_rule(ct_priv, flow_rule, entry, false,
+ zone_restore_id);
+ if (err)
+ goto err_orig;
+ }
- err = mlx5_tc_ct_entry_add_rule(ct_priv, flow_rule, entry, true,
- zone_restore_id);
- if (err)
- goto err_nat;
+ if (mlx5_tc_ct_entry_in_ct_nat_table(entry)) {
+ err = mlx5_tc_ct_entry_add_rule(ct_priv, flow_rule, entry, true,
+ zone_restore_id);
+ if (err)
+ goto err_nat;
+ }
atomic_inc(&ct_priv->debugfs.stats.offloaded);
return 0;
err_nat:
- mlx5_tc_ct_entry_del_rule(ct_priv, entry, false);
+ if (mlx5_tc_ct_entry_in_ct_table(entry))
+ mlx5_tc_ct_entry_del_rule(ct_priv, entry, false);
err_orig:
mlx5_tc_ct_counter_put(ct_priv, entry);
return err;
@@ -1126,17 +1145,21 @@ mlx5_tc_ct_entry_replace_rules(struct mlx5_tc_ct_priv *ct_priv,
struct mlx5_ct_entry *entry,
u8 zone_restore_id)
{
- int err;
+ int err = 0;
- err = mlx5_tc_ct_entry_replace_rule(ct_priv, flow_rule, entry, false,
- zone_restore_id);
- if (err)
- return err;
+ if (mlx5_tc_ct_entry_in_ct_table(entry)) {
+ err = mlx5_tc_ct_entry_replace_rule(ct_priv, flow_rule, entry, false,
+ zone_restore_id);
+ if (err)
+ return err;
+ }
- err = mlx5_tc_ct_entry_replace_rule(ct_priv, flow_rule, entry, true,
- zone_restore_id);
- if (err)
- mlx5_tc_ct_entry_del_rule(ct_priv, entry, false);
+ if (mlx5_tc_ct_entry_in_ct_nat_table(entry)) {
+ err = mlx5_tc_ct_entry_replace_rule(ct_priv, flow_rule, entry, true,
+ zone_restore_id);
+ if (err && mlx5_tc_ct_entry_in_ct_table(entry))
+ mlx5_tc_ct_entry_del_rule(ct_priv, entry, false);
+ }
return err;
}
@@ -1224,18 +1247,24 @@ mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft,
if (err)
goto err_entries;
- err = rhashtable_lookup_insert_fast(&ct_priv->ct_tuples_ht,
- &entry->tuple_node,
- tuples_ht_params);
- if (err)
- goto err_tuple;
-
if (memcmp(&entry->tuple, &entry->tuple_nat, sizeof(entry->tuple))) {
err = rhashtable_lookup_insert_fast(&ct_priv->ct_tuples_nat_ht,
&entry->tuple_nat_node,
tuples_nat_ht_params);
if (err)
goto err_tuple_nat;
+
+ set_bit(MLX5_CT_ENTRY_IN_CT_NAT_TABLE, &entry->flags);
+ }
+
+ if (!mlx5_tc_ct_entry_in_ct_nat_table(entry)) {
+ err = rhashtable_lookup_insert_fast(&ct_priv->ct_tuples_ht,
+ &entry->tuple_node,
+ tuples_ht_params);
+ if (err)
+ goto err_tuple;
+
+ set_bit(MLX5_CT_ENTRY_IN_CT_TABLE, &entry->flags);
}
spin_unlock_bh(&ct_priv->ht_lock);
@@ -1251,17 +1280,10 @@ mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft,
err_rules:
spin_lock_bh(&ct_priv->ht_lock);
- if (mlx5_tc_ct_entry_has_nat(entry))
- rhashtable_remove_fast(&ct_priv->ct_tuples_nat_ht,
- &entry->tuple_nat_node, tuples_nat_ht_params);
-err_tuple_nat:
- rhashtable_remove_fast(&ct_priv->ct_tuples_ht,
- &entry->tuple_node,
- tuples_ht_params);
err_tuple:
- rhashtable_remove_fast(&ft->ct_entries_ht,
- &entry->node,
- cts_ht_params);
+ mlx5_tc_ct_entry_remove_from_tuples(entry);
+err_tuple_nat:
+ rhashtable_remove_fast(&ft->ct_entries_ht, &entry->node, cts_ht_params);
err_entries:
spin_unlock_bh(&ct_priv->ht_lock);
err_set:
@@ -2149,6 +2171,76 @@ mlx5_ct_tc_remove_dbgfs(struct mlx5_tc_ct_priv *ct_priv)
debugfs_remove_recursive(ct_priv->debugfs.root);
}
+static struct mlx5_flow_handle *
+tc_ct_add_miss_rule(struct mlx5_flow_table *ft,
+ struct mlx5_flow_table *next_ft)
+{
+ struct mlx5_flow_destination dest = {};
+ struct mlx5_flow_act act = {};
+
+ act.flags = FLOW_ACT_IGNORE_FLOW_LEVEL | FLOW_ACT_NO_APPEND;
+ act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest.ft = next_ft;
+
+ return mlx5_add_flow_rules(ft, NULL, &act, &dest, 1);
+}
+
+static int
+tc_ct_add_ct_table_miss_rule(struct mlx5_flow_table *from,
+ struct mlx5_flow_table *to,
+ struct mlx5_flow_group **miss_group,
+ struct mlx5_flow_handle **miss_rule)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_group *group;
+ struct mlx5_flow_handle *rule;
+ unsigned int max_fte = from->max_fte;
+ u32 *flow_group_in;
+ int err = 0;
+
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
+ if (!flow_group_in)
+ return -ENOMEM;
+
+ /* create miss group */
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index,
+ max_fte - 2);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
+ max_fte - 1);
+ group = mlx5_create_flow_group(from, flow_group_in);
+ if (IS_ERR(group)) {
+ err = PTR_ERR(group);
+ goto err_miss_grp;
+ }
+
+ /* add miss rule to next fdb */
+ rule = tc_ct_add_miss_rule(from, to);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ goto err_miss_rule;
+ }
+
+ *miss_group = group;
+ *miss_rule = rule;
+ kvfree(flow_group_in);
+ return 0;
+
+err_miss_rule:
+ mlx5_destroy_flow_group(group);
+err_miss_grp:
+ kvfree(flow_group_in);
+ return err;
+}
+
+static void
+tc_ct_del_ct_table_miss_rule(struct mlx5_flow_group *miss_group,
+ struct mlx5_flow_handle *miss_rule)
+{
+ mlx5_del_flow_rules(miss_rule);
+ mlx5_destroy_flow_group(miss_group);
+}
+
#define INIT_ERR_PREFIX "tc ct offload init failed"
struct mlx5_tc_ct_priv *
@@ -2212,6 +2304,12 @@ mlx5_tc_ct_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains,
goto err_ct_nat_tbl;
}
+ err = tc_ct_add_ct_table_miss_rule(ct_priv->ct_nat, ct_priv->ct,
+ &ct_priv->ct_nat_miss_group,
+ &ct_priv->ct_nat_miss_rule);
+ if (err)
+ goto err_ct_zone_ht;
+
ct_priv->post_act = post_act;
mutex_init(&ct_priv->control_lock);
if (rhashtable_init(&ct_priv->zone_ht, &zone_params))
@@ -2273,6 +2371,7 @@ mlx5_tc_ct_clean(struct mlx5_tc_ct_priv *ct_priv)
ct_priv->fs_ops->destroy(ct_priv->fs);
kfree(ct_priv->fs);
+ tc_ct_del_ct_table_miss_rule(ct_priv->ct_nat_miss_group, ct_priv->ct_nat_miss_rule);
mlx5_chains_destroy_global_table(chains, ct_priv->ct_nat);
mlx5_chains_destroy_global_table(chains, ct_priv->ct);
mapping_destroy(ct_priv->zone_mapping);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
index 8dfb57f712b0..721f35e59757 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
@@ -850,6 +850,12 @@ int mlx5e_tc_tun_parse(struct net_device *filter_dev,
flow_rule_match_enc_control(rule, &match);
addr_type = match.key->addr_type;
+ if (flow_rule_has_enc_control_flags(match.mask->flags,
+ extack)) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
/* For tunnel addr_type used same key id`s as for non-tunnel */
if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
struct flow_match_ipv4_addrs match;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
index 879d698b6119..5ec468268d1a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
@@ -6,6 +6,8 @@
#include "en.h"
#include <linux/indirect_call_wrapper.h>
+#include <net/ip6_checksum.h>
+#include <net/tcp.h>
#define MLX5E_TX_WQE_EMPTY_DS_COUNT (sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS)
@@ -34,6 +36,25 @@
#define MLX5E_RX_ERR_CQE(cqe) (get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND)
+#define MLX5E_KSM_UMR_WQE_SZ(sgl_len)\
+ (sizeof(struct mlx5e_umr_wqe) +\
+ (sizeof(struct mlx5_ksm) * (sgl_len)))
+
+#define MLX5E_KSM_UMR_WQEBBS(ksm_entries) \
+ (DIV_ROUND_UP(MLX5E_KSM_UMR_WQE_SZ(ksm_entries), MLX5_SEND_WQE_BB))
+
+#define MLX5E_KSM_UMR_DS_CNT(ksm_entries)\
+ (DIV_ROUND_UP(MLX5E_KSM_UMR_WQE_SZ(ksm_entries), MLX5_SEND_WQE_DS))
+
+#define MLX5E_KSM_MAX_ENTRIES_PER_WQE(wqe_size)\
+ (((wqe_size) - sizeof(struct mlx5e_umr_wqe)) / sizeof(struct mlx5_ksm))
+
+#define MLX5E_KSM_ENTRIES_PER_WQE(wqe_size)\
+ ALIGN_DOWN(MLX5E_KSM_MAX_ENTRIES_PER_WQE(wqe_size), MLX5_UMR_KSM_NUM_ENTRIES_ALIGNMENT)
+
+#define MLX5E_MAX_KSM_PER_WQE(mdev) \
+ MLX5E_KSM_ENTRIES_PER_WQE(MLX5_SEND_WQE_BB * mlx5e_get_max_sq_aligned_wqebbs(mdev))
+
static inline
ktime_t mlx5e_cqe_ts_to_ns(cqe_ts_to_ns func, struct mlx5_clock *clock, u64 cqe_ts)
{
@@ -460,6 +481,41 @@ mlx5e_set_eseg_swp(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg,
}
}
+static inline void
+mlx5e_swp_encap_csum_partial(struct mlx5_core_dev *mdev, struct sk_buff *skb, bool tunnel)
+{
+ const struct iphdr *ip = tunnel ? inner_ip_hdr(skb) : ip_hdr(skb);
+ const struct ipv6hdr *ip6;
+ struct tcphdr *th;
+ struct udphdr *uh;
+ int len;
+
+ if (!MLX5_CAP_ETH(mdev, swp_csum_l4_partial) || !skb_is_gso(skb))
+ return;
+
+ if (skb_is_gso_tcp(skb)) {
+ th = inner_tcp_hdr(skb);
+ len = skb_shinfo(skb)->gso_size + inner_tcp_hdrlen(skb);
+
+ if (ip->version == 4) {
+ th->check = ~tcp_v4_check(len, ip->saddr, ip->daddr, 0);
+ } else {
+ ip6 = tunnel ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
+ th->check = ~tcp_v6_check(len, &ip6->saddr, &ip6->daddr, 0);
+ }
+ } else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
+ uh = (struct udphdr *)skb_inner_transport_header(skb);
+ len = skb_shinfo(skb)->gso_size + sizeof(struct udphdr);
+
+ if (ip->version == 4) {
+ uh->check = ~udp_v4_check(len, ip->saddr, ip->daddr, 0);
+ } else {
+ ip6 = tunnel ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
+ uh->check = ~udp_v6_check(len, &ip6->saddr, &ip6->daddr, 0);
+ }
+ }
+}
+
#define MLX5E_STOP_ROOM(wqebbs) ((wqebbs) * 2 - 1)
static inline u16 mlx5e_stop_room_for_wqe(struct mlx5_core_dev *mdev, u16 wqe_size)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
index caa34b9c161e..33e32584b07f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
@@ -102,8 +102,14 @@ static inline void
mlx5e_udp_gso_handle_tx_skb(struct sk_buff *skb)
{
int payload_len = skb_shinfo(skb)->gso_size + sizeof(struct udphdr);
+ struct udphdr *udphdr;
- udp_hdr(skb)->len = htons(payload_len);
+ if (skb->encapsulation)
+ udphdr = (struct udphdr *)skb_inner_transport_header(skb);
+ else
+ udphdr = udp_hdr(skb);
+
+ udphdr->len = htons(payload_len);
}
struct mlx5e_accel_tx_state {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
index c54fd01ea635..3d274599015b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
@@ -989,7 +989,12 @@ static void mlx5e_xfrm_update_stats(struct xfrm_state *x)
struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule;
struct net *net = dev_net(x->xso.dev);
+ u64 trailer_packets = 0, trailer_bytes = 0;
+ u64 replay_packets = 0, replay_bytes = 0;
+ u64 auth_packets = 0, auth_bytes = 0;
+ u64 success_packets, success_bytes;
u64 packets, bytes, lastuse;
+ size_t headers;
lockdep_assert(lockdep_is_held(&x->lock) ||
lockdep_is_held(&dev_net(x->xso.real_dev)->xfrm.xfrm_cfg_mutex) ||
@@ -999,26 +1004,43 @@ static void mlx5e_xfrm_update_stats(struct xfrm_state *x)
return;
if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_IN) {
- mlx5_fc_query_cached(ipsec_rule->auth.fc, &bytes, &packets, &lastuse);
- x->stats.integrity_failed += packets;
- XFRM_ADD_STATS(net, LINUX_MIB_XFRMINSTATEPROTOERROR, packets);
-
- mlx5_fc_query_cached(ipsec_rule->trailer.fc, &bytes, &packets, &lastuse);
- XFRM_ADD_STATS(net, LINUX_MIB_XFRMINHDRERROR, packets);
+ mlx5_fc_query_cached(ipsec_rule->auth.fc, &auth_bytes,
+ &auth_packets, &lastuse);
+ x->stats.integrity_failed += auth_packets;
+ XFRM_ADD_STATS(net, LINUX_MIB_XFRMINSTATEPROTOERROR, auth_packets);
+
+ mlx5_fc_query_cached(ipsec_rule->trailer.fc, &trailer_bytes,
+ &trailer_packets, &lastuse);
+ XFRM_ADD_STATS(net, LINUX_MIB_XFRMINHDRERROR, trailer_packets);
}
if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET)
return;
- mlx5_fc_query_cached(ipsec_rule->fc, &bytes, &packets, &lastuse);
- x->curlft.packets += packets;
- x->curlft.bytes += bytes;
-
if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_IN) {
- mlx5_fc_query_cached(ipsec_rule->replay.fc, &bytes, &packets, &lastuse);
- x->stats.replay += packets;
- XFRM_ADD_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR, packets);
+ mlx5_fc_query_cached(ipsec_rule->replay.fc, &replay_bytes,
+ &replay_packets, &lastuse);
+ x->stats.replay += replay_packets;
+ XFRM_ADD_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR, replay_packets);
}
+
+ mlx5_fc_query_cached(ipsec_rule->fc, &bytes, &packets, &lastuse);
+ success_packets = packets - auth_packets - trailer_packets - replay_packets;
+ x->curlft.packets += success_packets;
+ /* NIC counts all bytes passed through flow steering and doesn't have
+ * an ability to count payload data size which is needed for SA.
+ *
+ * To overcome HW limitestion, let's approximate the payload size
+ * by removing always available headers.
+ */
+ headers = sizeof(struct ethhdr);
+ if (sa_entry->attrs.family == AF_INET)
+ headers += sizeof(struct iphdr);
+ else
+ headers += sizeof(struct ipv6hdr);
+
+ success_bytes = bytes - auth_bytes - trailer_bytes - replay_bytes;
+ x->curlft.bytes += success_bytes - headers * success_packets;
}
static int mlx5e_xfrm_validate_policy(struct mlx5_core_dev *mdev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
index 41a2543a52cd..e51b03d4c717 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
@@ -750,8 +750,7 @@ err_fs:
err_fs_ft:
if (rx->allow_tunnel_mode)
mlx5_eswitch_unblock_encap(mdev);
- mlx5_del_flow_rules(rx->status.rule);
- mlx5_modify_header_dealloc(mdev, rx->status.modify_hdr);
+ mlx5_ipsec_rx_status_destroy(ipsec, rx);
err_add:
mlx5_destroy_flow_table(rx->ft.status);
err_fs_ft_status:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
index 82064614846f..3cc640669247 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
@@ -97,18 +97,11 @@ mlx5e_ipsec_feature_check(struct sk_buff *skb, netdev_features_t features)
if (!x || !x->xso.offload_handle)
goto out_disable;
- if (xo->inner_ipproto) {
- /* Cannot support tunnel packet over IPsec tunnel mode
- * because we cannot offload three IP header csum
- */
- if (x->props.mode == XFRM_MODE_TUNNEL)
- goto out_disable;
-
- /* Only support UDP or TCP L4 checksum */
- if (xo->inner_ipproto != IPPROTO_UDP &&
- xo->inner_ipproto != IPPROTO_TCP)
- goto out_disable;
- }
+ /* Only support UDP or TCP L4 checksum */
+ if (xo->inner_ipproto &&
+ xo->inner_ipproto != IPPROTO_UDP &&
+ xo->inner_ipproto != IPPROTO_TCP)
+ goto out_disable;
return features;
@@ -123,6 +116,7 @@ static inline bool
mlx5e_ipsec_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5_wqe_eth_seg *eseg)
{
+ struct mlx5_core_dev *mdev = sq->mdev;
u8 inner_ipproto;
if (!mlx5e_ipsec_eseg_meta(eseg))
@@ -132,9 +126,12 @@ mlx5e_ipsec_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
inner_ipproto = xfrm_offload(skb)->inner_ipproto;
if (inner_ipproto) {
eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM;
- if (inner_ipproto == IPPROTO_TCP || inner_ipproto == IPPROTO_UDP)
+ if (inner_ipproto == IPPROTO_TCP || inner_ipproto == IPPROTO_UDP) {
+ mlx5e_swp_encap_csum_partial(mdev, skb, true);
eseg->cs_flags |= MLX5_ETH_WQE_L4_INNER_CSUM;
+ }
} else if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
+ mlx5e_swp_encap_csum_partial(mdev, skb, false);
eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
sq->stats->csum_partial_inner++;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 3320f12ba2db..279dcb54af14 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -525,7 +525,7 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
- arfs_enabled = opened && (priv->netdev->features & NETIF_F_NTUPLE);
+ arfs_enabled = opened && mlx5e_fs_want_arfs(priv->netdev);
if (arfs_enabled)
mlx5e_arfs_disable(priv->fs);
@@ -1658,7 +1658,7 @@ static int mlx5e_set_pauseparam(struct net_device *netdev,
}
int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct mlx5_core_dev *mdev = priv->mdev;
@@ -1682,7 +1682,7 @@ int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
}
static int mlx5e_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct mlx5e_priv *priv = netdev_priv(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index 8c5b291a171f..05058710d2c7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -1307,8 +1307,7 @@ int mlx5e_create_flow_steering(struct mlx5e_flow_steering *fs,
return -EOPNOTSUPP;
mlx5e_fs_set_ns(fs, ns, false);
- err = mlx5e_arfs_create_tables(fs, rx_res,
- !!(netdev->hw_features & NETIF_F_NTUPLE));
+ err = mlx5e_arfs_create_tables(fs, rx_res, mlx5e_fs_has_arfs(netdev));
if (err) {
fs_err(fs, "Failed to create arfs tables, err=%d\n", err);
netdev->hw_features &= ~NETIF_F_NTUPLE;
@@ -1355,7 +1354,7 @@ err_destroy_ttc_table:
err_destroy_inner_ttc_table:
mlx5e_destroy_inner_ttc_table(fs);
err_destroy_arfs_tables:
- mlx5e_arfs_destroy_tables(fs, !!(netdev->hw_features & NETIF_F_NTUPLE));
+ mlx5e_arfs_destroy_tables(fs, mlx5e_fs_has_arfs(netdev));
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index b758bc72ac36..6f686fabed44 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -39,6 +39,7 @@
#include <linux/debugfs.h>
#include <linux/if_bridge.h>
#include <linux/filter.h>
+#include <net/netdev_queues.h>
#include <net/page_pool/types.h>
#include <net/pkt_sched.h>
#include <net/xdp_sock_drv.h>
@@ -74,6 +75,27 @@
#include "lib/devcom.h"
#include "lib/sd.h"
+static bool mlx5e_hw_gro_supported(struct mlx5_core_dev *mdev)
+{
+ if (!MLX5_CAP_GEN(mdev, shampo))
+ return false;
+
+ /* Our HW-GRO implementation relies on "KSM Mkey" for
+ * SHAMPO headers buffer mapping
+ */
+ if (!MLX5_CAP_GEN(mdev, fixed_buffer_size))
+ return false;
+
+ if (!MLX5_CAP_GEN_2(mdev, min_mkey_log_entity_size_fixed_buffer_valid))
+ return false;
+
+ if (MLX5_CAP_GEN_2(mdev, min_mkey_log_entity_size_fixed_buffer) >
+ MLX5E_SHAMPO_LOG_HEADER_ENTRY_SIZE)
+ return false;
+
+ return true;
+}
+
bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev, u8 page_shift,
enum mlx5e_mpwrq_umr_mode umr_mode)
{
@@ -504,8 +526,8 @@ static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev,
return err;
}
-static int mlx5e_create_umr_klm_mkey(struct mlx5_core_dev *mdev,
- u64 nentries,
+static int mlx5e_create_umr_ksm_mkey(struct mlx5_core_dev *mdev,
+ u64 nentries, u8 log_entry_size,
u32 *umr_mkey)
{
int inlen;
@@ -525,12 +547,13 @@ static int mlx5e_create_umr_klm_mkey(struct mlx5_core_dev *mdev,
MLX5_SET(mkc, mkc, umr_en, 1);
MLX5_SET(mkc, mkc, lw, 1);
MLX5_SET(mkc, mkc, lr, 1);
- MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_KLMS);
+ MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_KSM);
mlx5e_mkey_set_relaxed_ordering(mdev, mkc);
MLX5_SET(mkc, mkc, qpn, 0xffffff);
MLX5_SET(mkc, mkc, pd, mdev->mlx5e_res.hw_objs.pdn);
MLX5_SET(mkc, mkc, translations_octword_size, nentries);
- MLX5_SET(mkc, mkc, length64, 1);
+ MLX5_SET(mkc, mkc, log_page_size, log_entry_size);
+ MLX5_SET64(mkc, mkc, len, nentries << log_entry_size);
err = mlx5_core_create_mkey(mdev, umr_mkey, in, inlen);
kvfree(in);
@@ -565,14 +588,16 @@ static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq
static int mlx5e_create_rq_hd_umr_mkey(struct mlx5_core_dev *mdev,
struct mlx5e_rq *rq)
{
- u32 max_klm_size = BIT(MLX5_CAP_GEN(mdev, log_max_klm_list_size));
+ u32 max_ksm_size = BIT(MLX5_CAP_GEN(mdev, log_max_klm_list_size));
- if (max_klm_size < rq->mpwqe.shampo->hd_per_wq) {
- mlx5_core_err(mdev, "max klm list size 0x%x is smaller than shampo header buffer list size 0x%x\n",
- max_klm_size, rq->mpwqe.shampo->hd_per_wq);
+ if (max_ksm_size < rq->mpwqe.shampo->hd_per_wq) {
+ mlx5_core_err(mdev, "max ksm list size 0x%x is smaller than shampo header buffer list size 0x%x\n",
+ max_ksm_size, rq->mpwqe.shampo->hd_per_wq);
return -EINVAL;
}
- return mlx5e_create_umr_klm_mkey(mdev, rq->mpwqe.shampo->hd_per_wq,
+
+ return mlx5e_create_umr_ksm_mkey(mdev, rq->mpwqe.shampo->hd_per_wq,
+ MLX5E_SHAMPO_LOG_HEADER_ENTRY_SIZE,
&rq->mpwqe.shampo->mkey);
}
@@ -1208,15 +1233,6 @@ void mlx5e_free_rx_missing_descs(struct mlx5e_rq *rq)
head = mlx5_wq_ll_get_wqe_next_ix(wq, head);
}
- if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state)) {
- u16 len;
-
- len = (rq->mpwqe.shampo->pi - rq->mpwqe.shampo->ci) &
- (rq->mpwqe.shampo->hd_per_wq - 1);
- mlx5e_shampo_dealloc_hd(rq, len, rq->mpwqe.shampo->ci, false);
- rq->mpwqe.shampo->pi = rq->mpwqe.shampo->ci;
- }
-
rq->mpwqe.actual_wq_head = wq->head;
rq->mpwqe.umr_in_progress = 0;
rq->mpwqe.umr_completed = 0;
@@ -1244,8 +1260,7 @@ void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
}
if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state))
- mlx5e_shampo_dealloc_hd(rq, rq->mpwqe.shampo->hd_per_wq,
- 0, true);
+ mlx5e_shampo_dealloc_hd(rq);
} else {
struct mlx5_wq_cyc *wq = &rq->wqe.wq;
u16 missing = mlx5_wq_cyc_missing(wq);
@@ -3111,6 +3126,7 @@ static void mlx5e_build_txq_maps(struct mlx5e_priv *priv)
struct mlx5e_txqsq *sq = &c->sq[tc];
priv->txq2sq[sq->txq_ix] = sq;
+ priv->txq2sq_stats[sq->txq_ix] = sq->stats;
}
}
@@ -3125,6 +3141,7 @@ static void mlx5e_build_txq_maps(struct mlx5e_priv *priv)
struct mlx5e_txqsq *sq = &c->ptpsq[tc].txqsq;
priv->txq2sq[sq->txq_ix] = sq;
+ priv->txq2sq_stats[sq->txq_ix] = sq->stats;
}
out:
@@ -3886,7 +3903,7 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
mlx5e_fold_sw_stats64(priv, stats);
}
- stats->rx_dropped = priv->stats.qcnt.rx_out_of_buffer;
+ stats->rx_missed_errors = priv->stats.qcnt.rx_out_of_buffer;
stats->rx_length_errors =
PPORT_802_3_GET(pstats, a_in_range_length_errors) +
@@ -4259,13 +4276,19 @@ int mlx5e_set_features(struct net_device *netdev, netdev_features_t features)
#define MLX5E_HANDLE_FEATURE(feature, handler) \
mlx5e_handle_feature(netdev, &oper_features, feature, handler)
- err |= MLX5E_HANDLE_FEATURE(NETIF_F_LRO, set_feature_lro);
- err |= MLX5E_HANDLE_FEATURE(NETIF_F_GRO_HW, set_feature_hw_gro);
+ if (features & (NETIF_F_GRO_HW | NETIF_F_LRO)) {
+ err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXFCS, set_feature_rx_fcs);
+ err |= MLX5E_HANDLE_FEATURE(NETIF_F_LRO, set_feature_lro);
+ err |= MLX5E_HANDLE_FEATURE(NETIF_F_GRO_HW, set_feature_hw_gro);
+ } else {
+ err |= MLX5E_HANDLE_FEATURE(NETIF_F_LRO, set_feature_lro);
+ err |= MLX5E_HANDLE_FEATURE(NETIF_F_GRO_HW, set_feature_hw_gro);
+ err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXFCS, set_feature_rx_fcs);
+ }
err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_FILTER,
set_feature_cvlan_filter);
err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_TC, set_feature_hw_tc);
err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXALL, set_feature_rx_all);
- err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXFCS, set_feature_rx_fcs);
err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_RX, set_feature_rx_vlan);
#ifdef CONFIG_MLX5_EN_ARFS
err |= MLX5E_HANDLE_FEATURE(NETIF_F_NTUPLE, set_feature_arfs);
@@ -4875,7 +4898,7 @@ static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv,
/* Verify if UDP port is being offloaded by HW */
if (mlx5_vxlan_lookup_port(priv->mdev->vxlan, port))
- return features;
+ return vxlan_features_check(skb, features);
#if IS_ENABLED(CONFIG_GENEVE)
/* Support Geneve offload for default UDP port */
@@ -4890,7 +4913,7 @@ static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv,
}
out:
- /* Disable CSUM and GSO if the udp dport is not offloaded by HW */
+ /* Disable CSUM and GSO if skb cannot be offloaded by HW */
return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
}
@@ -4901,7 +4924,6 @@ netdev_features_t mlx5e_features_check(struct sk_buff *skb,
struct mlx5e_priv *priv = netdev_priv(netdev);
features = vlan_features_check(skb, features);
- features = vxlan_features_check(skb, features);
/* Validate if the tunneled packet is being offloaded by HW */
if (skb->encapsulation &&
@@ -5277,6 +5299,136 @@ static bool mlx5e_tunnel_any_tx_proto_supported(struct mlx5_core_dev *mdev)
return (mlx5_vxlan_allowed(mdev->vxlan) || mlx5_geneve_tx_allowed(mdev));
}
+static void mlx5e_get_queue_stats_rx(struct net_device *dev, int i,
+ struct netdev_queue_stats_rx *stats)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5e_channel_stats *channel_stats;
+ struct mlx5e_rq_stats *xskrq_stats;
+ struct mlx5e_rq_stats *rq_stats;
+
+ ASSERT_RTNL();
+ if (mlx5e_is_uplink_rep(priv))
+ return;
+
+ channel_stats = priv->channel_stats[i];
+ xskrq_stats = &channel_stats->xskrq;
+ rq_stats = &channel_stats->rq;
+
+ stats->packets = rq_stats->packets + xskrq_stats->packets;
+ stats->bytes = rq_stats->bytes + xskrq_stats->bytes;
+ stats->alloc_fail = rq_stats->buff_alloc_err +
+ xskrq_stats->buff_alloc_err;
+}
+
+static void mlx5e_get_queue_stats_tx(struct net_device *dev, int i,
+ struct netdev_queue_stats_tx *stats)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5e_sq_stats *sq_stats;
+
+ ASSERT_RTNL();
+ /* no special case needed for ptp htb etc since txq2sq_stats is kept up
+ * to date for active sq_stats, otherwise get_base_stats takes care of
+ * inactive sqs.
+ */
+ sq_stats = priv->txq2sq_stats[i];
+ stats->packets = sq_stats->packets;
+ stats->bytes = sq_stats->bytes;
+}
+
+static void mlx5e_get_base_stats(struct net_device *dev,
+ struct netdev_queue_stats_rx *rx,
+ struct netdev_queue_stats_tx *tx)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5e_ptp *ptp_channel;
+ int i, tc;
+
+ ASSERT_RTNL();
+ if (!mlx5e_is_uplink_rep(priv)) {
+ rx->packets = 0;
+ rx->bytes = 0;
+ rx->alloc_fail = 0;
+
+ for (i = priv->channels.params.num_channels; i < priv->stats_nch; i++) {
+ struct netdev_queue_stats_rx rx_i = {0};
+
+ mlx5e_get_queue_stats_rx(dev, i, &rx_i);
+
+ rx->packets += rx_i.packets;
+ rx->bytes += rx_i.bytes;
+ rx->alloc_fail += rx_i.alloc_fail;
+ }
+
+ /* always report PTP RX stats from base as there is no
+ * corresponding channel to report them under in
+ * mlx5e_get_queue_stats_rx.
+ */
+ if (priv->rx_ptp_opened) {
+ struct mlx5e_rq_stats *rq_stats = &priv->ptp_stats.rq;
+
+ rx->packets += rq_stats->packets;
+ rx->bytes += rq_stats->bytes;
+ }
+ }
+
+ tx->packets = 0;
+ tx->bytes = 0;
+
+ for (i = 0; i < priv->stats_nch; i++) {
+ struct mlx5e_channel_stats *channel_stats = priv->channel_stats[i];
+
+ /* handle two cases:
+ *
+ * 1. channels which are active. In this case,
+ * report only deactivated TCs on these channels.
+ *
+ * 2. channels which were deactivated
+ * (i > priv->channels.params.num_channels)
+ * must have all of their TCs [0 .. priv->max_opened_tc)
+ * examined because deactivated channels will not be in the
+ * range of [0..real_num_tx_queues) and will not have their
+ * stats reported by mlx5e_get_queue_stats_tx.
+ */
+ if (i < priv->channels.params.num_channels)
+ tc = mlx5e_get_dcb_num_tc(&priv->channels.params);
+ else
+ tc = 0;
+
+ for (; tc < priv->max_opened_tc; tc++) {
+ struct mlx5e_sq_stats *sq_stats = &channel_stats->sq[tc];
+
+ tx->packets += sq_stats->packets;
+ tx->bytes += sq_stats->bytes;
+ }
+ }
+
+ /* if PTP TX was opened at some point and has since either:
+ * - been shutdown and set to NULL, or
+ * - simply disabled (bit unset)
+ *
+ * report stats directly from the ptp_stats structures as these queues
+ * are now unavailable and there is no txq index to retrieve these
+ * stats via calls to mlx5e_get_queue_stats_tx.
+ */
+ ptp_channel = priv->channels.ptp;
+ if (priv->tx_ptp_opened && (!ptp_channel || !test_bit(MLX5E_PTP_STATE_TX, ptp_channel->state))) {
+ for (tc = 0; tc < priv->max_opened_tc; tc++) {
+ struct mlx5e_sq_stats *sq_stats = &priv->ptp_stats.sq[tc];
+
+ tx->packets += sq_stats->packets;
+ tx->bytes += sq_stats->bytes;
+ }
+ }
+}
+
+static const struct netdev_stat_ops mlx5e_stat_ops = {
+ .get_queue_stats_rx = mlx5e_get_queue_stats_rx,
+ .get_queue_stats_tx = mlx5e_get_queue_stats_tx,
+ .get_base_stats = mlx5e_get_base_stats,
+};
+
static void mlx5e_build_nic_netdev(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
@@ -5294,6 +5446,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
netdev->watchdog_timeo = 15 * HZ;
+ netdev->stat_ops = &mlx5e_stat_ops;
netdev->ethtool_ops = &mlx5e_ethtool_ops;
netdev->vlan_features |= NETIF_F_SG;
@@ -5332,6 +5485,11 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
netdev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
+ if (mlx5e_hw_gro_supported(mdev) &&
+ mlx5e_check_fragmented_striding_rq_cap(mdev, PAGE_SHIFT,
+ MLX5E_MPWRQ_UMR_MODE_ALIGNED))
+ netdev->hw_features |= NETIF_F_GRO_HW;
+
if (mlx5e_tunnel_any_tx_proto_supported(mdev)) {
netdev->hw_enc_features |= NETIF_F_HW_CSUM;
netdev->hw_enc_features |= NETIF_F_TSO;
@@ -5398,8 +5556,10 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
netdev->hw_features |= NETIF_F_HW_TC;
#endif
-#ifdef CONFIG_MLX5_EN_ARFS
+#if IS_ENABLED(CONFIG_MLX5_EN_ARFS)
netdev->hw_features |= NETIF_F_NTUPLE;
+#elif IS_ENABLED(CONFIG_MLX5_EN_RXNFC)
+ netdev->features |= NETIF_F_NTUPLE;
#endif
}
@@ -5573,7 +5733,7 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
err_tc_nic_cleanup:
mlx5e_tc_nic_cleanup(priv);
err_destroy_flow_steering:
- mlx5e_destroy_flow_steering(priv->fs, !!(priv->netdev->hw_features & NETIF_F_NTUPLE),
+ mlx5e_destroy_flow_steering(priv->fs, mlx5e_fs_has_arfs(priv->netdev),
priv->profile);
err_destroy_rx_res:
mlx5e_rx_res_destroy(priv->rx_res);
@@ -5589,7 +5749,7 @@ static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv)
{
mlx5e_accel_cleanup_rx(priv);
mlx5e_tc_nic_cleanup(priv);
- mlx5e_destroy_flow_steering(priv->fs, !!(priv->netdev->hw_features & NETIF_F_NTUPLE),
+ mlx5e_destroy_flow_steering(priv->fs, mlx5e_fs_has_arfs(priv->netdev),
priv->profile);
mlx5e_rx_res_destroy(priv->rx_res);
priv->rx_res = NULL;
@@ -5824,9 +5984,13 @@ int mlx5e_priv_init(struct mlx5e_priv *priv,
if (!priv->txq2sq)
goto err_destroy_workqueue;
+ priv->txq2sq_stats = kcalloc_node(num_txqs, sizeof(*priv->txq2sq_stats), GFP_KERNEL, node);
+ if (!priv->txq2sq_stats)
+ goto err_free_txq2sq;
+
priv->tx_rates = kcalloc_node(num_txqs, sizeof(*priv->tx_rates), GFP_KERNEL, node);
if (!priv->tx_rates)
- goto err_free_txq2sq;
+ goto err_free_txq2sq_stats;
priv->channel_stats =
kcalloc_node(nch, sizeof(*priv->channel_stats), GFP_KERNEL, node);
@@ -5837,6 +6001,8 @@ int mlx5e_priv_init(struct mlx5e_priv *priv,
err_free_tx_rates:
kfree(priv->tx_rates);
+err_free_txq2sq_stats:
+ kfree(priv->txq2sq_stats);
err_free_txq2sq:
kfree(priv->txq2sq);
err_destroy_workqueue:
@@ -5860,6 +6026,7 @@ void mlx5e_priv_cleanup(struct mlx5e_priv *priv)
kvfree(priv->channel_stats[i]);
kfree(priv->channel_stats);
kfree(priv->tx_rates);
+ kfree(priv->txq2sq_stats);
kfree(priv->txq2sq);
destroy_workqueue(priv->wq);
mlx5e_selq_cleanup(&priv->selq);
@@ -5869,6 +6036,11 @@ void mlx5e_priv_cleanup(struct mlx5e_priv *priv)
kfree(priv->htb_qos_sq_stats[i]);
kvfree(priv->htb_qos_sq_stats);
+ if (priv->mqprio_rl) {
+ mlx5e_mqprio_rl_cleanup(priv->mqprio_rl);
+ mlx5e_mqprio_rl_free(priv->mqprio_rl);
+ }
+
memset(priv, 0, sizeof(*priv));
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index b5333da20e8a..225da8d691fc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -523,15 +523,23 @@ mlx5e_add_skb_shared_info_frag(struct mlx5e_rq *rq, struct skb_shared_info *sinf
static inline void
mlx5e_add_skb_frag(struct mlx5e_rq *rq, struct sk_buff *skb,
- struct page *page, u32 frag_offset, u32 len,
+ struct mlx5e_frag_page *frag_page,
+ u32 frag_offset, u32 len,
unsigned int truesize)
{
- dma_addr_t addr = page_pool_get_dma_addr(page);
+ dma_addr_t addr = page_pool_get_dma_addr(frag_page->page);
+ u8 next_frag = skb_shinfo(skb)->nr_frags;
dma_sync_single_for_cpu(rq->pdev, addr + frag_offset, len,
rq->buff.map_dir);
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
- page, frag_offset, len, truesize);
+
+ if (skb_can_coalesce(skb, next_frag, frag_page->page, frag_offset)) {
+ skb_coalesce_rx_frag(skb, next_frag - 1, len, truesize);
+ } else {
+ frag_page->frags++;
+ skb_add_rx_frag(skb, next_frag, frag_page->page,
+ frag_offset, len, truesize);
+ }
}
static inline void
@@ -619,25 +627,25 @@ static int bitmap_find_window(unsigned long *bitmap, int len,
return min(len, count);
}
-static void build_klm_umr(struct mlx5e_icosq *sq, struct mlx5e_umr_wqe *umr_wqe,
- __be32 key, u16 offset, u16 klm_len, u16 wqe_bbs)
+static void build_ksm_umr(struct mlx5e_icosq *sq, struct mlx5e_umr_wqe *umr_wqe,
+ __be32 key, u16 offset, u16 ksm_len)
{
- memset(umr_wqe, 0, offsetof(struct mlx5e_umr_wqe, inline_klms));
+ memset(umr_wqe, 0, offsetof(struct mlx5e_umr_wqe, inline_ksms));
umr_wqe->ctrl.opmod_idx_opcode =
cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) |
MLX5_OPCODE_UMR);
umr_wqe->ctrl.umr_mkey = key;
umr_wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT)
- | MLX5E_KLM_UMR_DS_CNT(klm_len));
+ | MLX5E_KSM_UMR_DS_CNT(ksm_len));
umr_wqe->uctrl.flags = MLX5_UMR_TRANSLATION_OFFSET_EN | MLX5_UMR_INLINE;
umr_wqe->uctrl.xlt_offset = cpu_to_be16(offset);
- umr_wqe->uctrl.xlt_octowords = cpu_to_be16(klm_len);
+ umr_wqe->uctrl.xlt_octowords = cpu_to_be16(ksm_len);
umr_wqe->uctrl.mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
}
static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq,
struct mlx5e_icosq *sq,
- u16 klm_entries, u16 index)
+ u16 ksm_entries, u16 index)
{
struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
u16 entries, pi, header_offset, err, wqe_bbs, new_entries;
@@ -650,20 +658,20 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq,
int headroom, i;
headroom = rq->buff.headroom;
- new_entries = klm_entries - (shampo->pi & (MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT - 1));
- entries = ALIGN(klm_entries, MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT);
- wqe_bbs = MLX5E_KLM_UMR_WQEBBS(entries);
+ new_entries = ksm_entries - (shampo->pi & (MLX5_UMR_KSM_NUM_ENTRIES_ALIGNMENT - 1));
+ entries = ALIGN(ksm_entries, MLX5_UMR_KSM_NUM_ENTRIES_ALIGNMENT);
+ wqe_bbs = MLX5E_KSM_UMR_WQEBBS(entries);
pi = mlx5e_icosq_get_next_pi(sq, wqe_bbs);
umr_wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi);
- build_klm_umr(sq, umr_wqe, shampo->key, index, entries, wqe_bbs);
+ build_ksm_umr(sq, umr_wqe, shampo->key, index, entries);
frag_page = &shampo->pages[page_index];
for (i = 0; i < entries; i++, index++) {
dma_info = &shampo->info[index];
- if (i >= klm_entries || (index < shampo->pi && shampo->pi - index <
- MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT))
- goto update_klm;
+ if (i >= ksm_entries || (index < shampo->pi && shampo->pi - index <
+ MLX5_UMR_KSM_NUM_ENTRIES_ALIGNMENT))
+ goto update_ksm;
header_offset = (index & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) <<
MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE;
if (!(header_offset & (PAGE_SIZE - 1))) {
@@ -683,12 +691,11 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq,
dma_info->frag_page = frag_page;
}
-update_klm:
- umr_wqe->inline_klms[i].bcount =
- cpu_to_be32(MLX5E_RX_MAX_HEAD);
- umr_wqe->inline_klms[i].key = cpu_to_be32(lkey);
- umr_wqe->inline_klms[i].va =
- cpu_to_be64(dma_info->addr + headroom);
+update_ksm:
+ umr_wqe->inline_ksms[i] = (struct mlx5_ksm) {
+ .key = cpu_to_be32(lkey),
+ .va = cpu_to_be64(dma_info->addr + headroom),
+ };
}
sq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) {
@@ -720,37 +727,37 @@ err_unmap:
static int mlx5e_alloc_rx_hd_mpwqe(struct mlx5e_rq *rq)
{
struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
- u16 klm_entries, num_wqe, index, entries_before;
+ u16 ksm_entries, num_wqe, index, entries_before;
struct mlx5e_icosq *sq = rq->icosq;
- int i, err, max_klm_entries, len;
+ int i, err, max_ksm_entries, len;
- max_klm_entries = MLX5E_MAX_KLM_PER_WQE(rq->mdev);
- klm_entries = bitmap_find_window(shampo->bitmap,
+ max_ksm_entries = MLX5E_MAX_KSM_PER_WQE(rq->mdev);
+ ksm_entries = bitmap_find_window(shampo->bitmap,
shampo->hd_per_wqe,
shampo->hd_per_wq, shampo->pi);
- if (!klm_entries)
+ if (!ksm_entries)
return 0;
- klm_entries += (shampo->pi & (MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT - 1));
- index = ALIGN_DOWN(shampo->pi, MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT);
+ ksm_entries += (shampo->pi & (MLX5_UMR_KSM_NUM_ENTRIES_ALIGNMENT - 1));
+ index = ALIGN_DOWN(shampo->pi, MLX5_UMR_KSM_NUM_ENTRIES_ALIGNMENT);
entries_before = shampo->hd_per_wq - index;
- if (unlikely(entries_before < klm_entries))
- num_wqe = DIV_ROUND_UP(entries_before, max_klm_entries) +
- DIV_ROUND_UP(klm_entries - entries_before, max_klm_entries);
+ if (unlikely(entries_before < ksm_entries))
+ num_wqe = DIV_ROUND_UP(entries_before, max_ksm_entries) +
+ DIV_ROUND_UP(ksm_entries - entries_before, max_ksm_entries);
else
- num_wqe = DIV_ROUND_UP(klm_entries, max_klm_entries);
+ num_wqe = DIV_ROUND_UP(ksm_entries, max_ksm_entries);
for (i = 0; i < num_wqe; i++) {
- len = (klm_entries > max_klm_entries) ? max_klm_entries :
- klm_entries;
+ len = (ksm_entries > max_ksm_entries) ? max_ksm_entries :
+ ksm_entries;
if (unlikely(index + len > shampo->hd_per_wq))
len = shampo->hd_per_wq - index;
err = mlx5e_build_shampo_hd_umr(rq, sq, len, index);
if (unlikely(err))
return err;
index = (index + len) & (rq->mpwqe.shampo->hd_per_wq - 1);
- klm_entries -= len;
+ ksm_entries -= len;
}
return 0;
@@ -839,44 +846,28 @@ err:
return err;
}
-/* This function is responsible to dealloc SHAMPO header buffer.
- * close == true specifies that we are in the middle of closing RQ operation so
- * we go over all the entries and if they are not in use we free them,
- * otherwise we only go over a specific range inside the header buffer that are
- * not in use.
- */
-void mlx5e_shampo_dealloc_hd(struct mlx5e_rq *rq, u16 len, u16 start, bool close)
+static void
+mlx5e_free_rx_shampo_hd_entry(struct mlx5e_rq *rq, u16 header_index)
{
struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
- struct mlx5e_frag_page *deleted_page = NULL;
- int hd_per_wq = shampo->hd_per_wq;
- struct mlx5e_dma_info *hd_info;
- int i, index = start;
-
- for (i = 0; i < len; i++, index++) {
- if (index == hd_per_wq)
- index = 0;
-
- if (close && !test_bit(index, shampo->bitmap))
- continue;
+ u64 addr = shampo->info[header_index].addr;
- hd_info = &shampo->info[index];
- hd_info->addr = ALIGN_DOWN(hd_info->addr, PAGE_SIZE);
- if (hd_info->frag_page && hd_info->frag_page != deleted_page) {
- deleted_page = hd_info->frag_page;
- mlx5e_page_release_fragmented(rq, hd_info->frag_page);
- }
+ if (((header_index + 1) & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) == 0) {
+ struct mlx5e_dma_info *dma_info = &shampo->info[header_index];
- hd_info->frag_page = NULL;
+ dma_info->addr = ALIGN_DOWN(addr, PAGE_SIZE);
+ mlx5e_page_release_fragmented(rq, dma_info->frag_page);
}
+ clear_bit(header_index, shampo->bitmap);
+}
- if (start + len > hd_per_wq) {
- len -= hd_per_wq - start;
- bitmap_clear(shampo->bitmap, start, hd_per_wq - start);
- start = 0;
- }
+void mlx5e_shampo_dealloc_hd(struct mlx5e_rq *rq)
+{
+ struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
+ int i;
- bitmap_clear(shampo->bitmap, start, len);
+ for_each_set_bit(i, shampo->bitmap, rq->mpwqe.shampo->hd_per_wq)
+ mlx5e_free_rx_shampo_hd_entry(rq, i);
}
static void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
@@ -1191,9 +1182,8 @@ static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe,
check = csum_partial(tcp, tcp->doff * 4,
csum_unfold((__force __sum16)cqe->check_sum));
/* Almost done, don't forget the pseudo header */
- tcp->check = csum_tcpudp_magic(ipv4->saddr, ipv4->daddr,
- tot_len - sizeof(struct iphdr),
- IPPROTO_TCP, check);
+ tcp->check = tcp_v4_check(tot_len - sizeof(struct iphdr),
+ ipv4->saddr, ipv4->daddr, check);
} else {
u16 payload_len = tot_len - sizeof(struct ipv6hdr);
struct ipv6hdr *ipv6 = ip_p;
@@ -1208,8 +1198,8 @@ static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe,
check = csum_partial(tcp, tcp->doff * 4,
csum_unfold((__force __sum16)cqe->check_sum));
/* Almost done, don't forget the pseudo header */
- tcp->check = csum_ipv6_magic(&ipv6->saddr, &ipv6->daddr, payload_len,
- IPPROTO_TCP, check);
+ tcp->check = tcp_v6_check(payload_len, &ipv6->saddr,
+ &ipv6->daddr, check);
}
}
@@ -1612,9 +1602,7 @@ static void mlx5e_shampo_complete_rx_cqe(struct mlx5e_rq *rq,
struct mlx5e_rq_stats *stats = rq->stats;
stats->packets++;
- stats->gro_packets++;
stats->bytes += cqe_bcnt;
- stats->gro_bytes += cqe_bcnt;
if (NAPI_GRO_CB(skb)->count != 1)
return;
mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb);
@@ -1964,30 +1952,24 @@ const struct mlx5e_rx_handlers mlx5e_rx_handlers_rep = {
#endif
static void
-mlx5e_fill_skb_data(struct sk_buff *skb, struct mlx5e_rq *rq,
- struct mlx5e_frag_page *frag_page,
- u32 data_bcnt, u32 data_offset)
+mlx5e_shampo_fill_skb_data(struct sk_buff *skb, struct mlx5e_rq *rq,
+ struct mlx5e_frag_page *frag_page,
+ u32 data_bcnt, u32 data_offset)
{
net_prefetchw(skb->data);
- while (data_bcnt) {
+ do {
/* Non-linear mode, hence non-XSK, which always uses PAGE_SIZE. */
u32 pg_consumed_bytes = min_t(u32, PAGE_SIZE - data_offset, data_bcnt);
- unsigned int truesize;
-
- if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state))
- truesize = pg_consumed_bytes;
- else
- truesize = ALIGN(pg_consumed_bytes, BIT(rq->mpwqe.log_stride_sz));
+ unsigned int truesize = pg_consumed_bytes;
- frag_page->frags++;
- mlx5e_add_skb_frag(rq, skb, frag_page->page, data_offset,
+ mlx5e_add_skb_frag(rq, skb, frag_page, data_offset,
pg_consumed_bytes, truesize);
data_bcnt -= pg_consumed_bytes;
data_offset = 0;
frag_page++;
- }
+ } while (data_bcnt);
}
static struct sk_buff *
@@ -2212,8 +2194,8 @@ mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
if (likely(frag_size <= BIT(MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE))) {
/* build SKB around header */
dma_sync_single_range_for_cpu(rq->pdev, head->addr, 0, frag_size, rq->buff.map_dir);
- prefetchw(hdr);
- prefetch(data);
+ net_prefetchw(hdr);
+ net_prefetch(data);
skb = mlx5e_build_linear_skb(rq, hdr, frag_size, rx_headroom, head_size, 0);
if (unlikely(!skb))
@@ -2230,7 +2212,7 @@ mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
return NULL;
}
- prefetchw(skb->data);
+ net_prefetchw(skb->data);
mlx5e_copy_skb_header(rq, skb, head->frag_page->page, head->addr,
head_offset + rx_headroom,
rx_headroom, head_size);
@@ -2261,12 +2243,19 @@ mlx5e_shampo_flush_skb(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, bool match)
{
struct sk_buff *skb = rq->hw_gro_data->skb;
struct mlx5e_rq_stats *stats = rq->stats;
+ u16 gro_count = NAPI_GRO_CB(skb)->count;
- stats->gro_skbs++;
if (likely(skb_shinfo(skb)->nr_frags))
mlx5e_shampo_align_fragment(skb, rq->mpwqe.log_stride_sz);
- if (NAPI_GRO_CB(skb)->count > 1)
+ if (gro_count > 1) {
+ stats->gro_skbs++;
+ stats->gro_packets += gro_count;
+ stats->gro_bytes += skb->data_len + skb_headlen(skb) * gro_count;
+
mlx5e_shampo_update_hdr(rq, cqe, match);
+ } else {
+ skb_shinfo(skb)->gso_size = 0;
+ }
napi_gro_receive(rq->cq.napi, skb);
rq->hw_gro_data->skb = NULL;
}
@@ -2279,21 +2268,6 @@ mlx5e_hw_gro_skb_has_enough_space(struct sk_buff *skb, u16 data_bcnt)
return PAGE_SIZE * nr_frags + data_bcnt <= GRO_LEGACY_MAX_SIZE;
}
-static void
-mlx5e_free_rx_shampo_hd_entry(struct mlx5e_rq *rq, u16 header_index)
-{
- struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
- u64 addr = shampo->info[header_index].addr;
-
- if (((header_index + 1) & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) == 0) {
- struct mlx5e_dma_info *dma_info = &shampo->info[header_index];
-
- dma_info->addr = ALIGN_DOWN(addr, PAGE_SIZE);
- mlx5e_page_release_fragmented(rq, dma_info->frag_page);
- }
- bitmap_clear(shampo->bitmap, header_index, 1);
-}
-
static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
{
u16 data_bcnt = mpwrq_get_cqe_byte_cnt(cqe) - cqe->shampo.header_size;
@@ -2327,8 +2301,6 @@ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cq
goto mpwrq_cqe_out;
}
- stats->gro_match_packets += match;
-
if (*skb && (!match || !(mlx5e_hw_gro_skb_has_enough_space(*skb, data_bcnt)))) {
match = false;
mlx5e_shampo_flush_skb(rq, cqe, match);
@@ -2359,21 +2331,30 @@ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cq
}
if (likely(head_size)) {
- struct mlx5e_frag_page *frag_page;
+ if (data_bcnt) {
+ struct mlx5e_frag_page *frag_page;
- frag_page = &wi->alloc_units.frag_pages[page_idx];
- mlx5e_fill_skb_data(*skb, rq, frag_page, data_bcnt, data_offset);
+ frag_page = &wi->alloc_units.frag_pages[page_idx];
+ mlx5e_shampo_fill_skb_data(*skb, rq, frag_page, data_bcnt, data_offset);
+ } else {
+ stats->hds_nodata_packets++;
+ stats->hds_nodata_bytes += head_size;
+ }
}
mlx5e_shampo_complete_rx_cqe(rq, cqe, cqe_bcnt, *skb);
- if (flush)
+ if (flush && rq->hw_gro_data->skb)
mlx5e_shampo_flush_skb(rq, cqe, match);
free_hd_entry:
- mlx5e_free_rx_shampo_hd_entry(rq, header_index);
+ if (likely(head_size))
+ mlx5e_free_rx_shampo_hd_entry(rq, header_index);
mpwrq_cqe_out:
if (likely(wi->consumed_strides < rq->mpwqe.num_strides))
return;
+ if (unlikely(!cstrides))
+ return;
+
wq = &rq->mpwqe.wq;
wqe = mlx5_wq_ll_get_wqe(wq, wqe_id);
mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index e211c41cec06..e7a3290a708a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -141,8 +141,9 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_skbs) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_match_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_large_hds) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_hds_nodata_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_hds_nodata_bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_ecn_mark) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_removed_vlan_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) },
@@ -343,8 +344,9 @@ static void mlx5e_stats_grp_sw_update_stats_rq_stats(struct mlx5e_sw_stats *s,
s->rx_gro_packets += rq_stats->gro_packets;
s->rx_gro_bytes += rq_stats->gro_bytes;
s->rx_gro_skbs += rq_stats->gro_skbs;
- s->rx_gro_match_packets += rq_stats->gro_match_packets;
s->rx_gro_large_hds += rq_stats->gro_large_hds;
+ s->rx_hds_nodata_packets += rq_stats->hds_nodata_packets;
+ s->rx_hds_nodata_bytes += rq_stats->hds_nodata_bytes;
s->rx_ecn_mark += rq_stats->ecn_mark;
s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets;
s->rx_csum_none += rq_stats->csum_none;
@@ -1186,6 +1188,9 @@ void mlx5e_stats_ts_get(struct mlx5e_priv *priv,
ts_stats->err = 0;
ts_stats->lost = 0;
+ if (!ptp)
+ goto out;
+
/* Aggregate stats across all TCs */
for (i = 0; i < ptp->num_tc; i++) {
struct mlx5e_ptp_cq_stats *stats =
@@ -1214,6 +1219,7 @@ void mlx5e_stats_ts_get(struct mlx5e_priv *priv,
}
}
+out:
mutex_unlock(&priv->state_lock);
}
@@ -2053,8 +2059,9 @@ static const struct counter_desc rq_stats_desc[] = {
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_packets) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_bytes) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_skbs) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_match_packets) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_large_hds) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, hds_nodata_packets) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, hds_nodata_bytes) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, ecn_mark) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) },
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index 650732288616..4c5858c1dd82 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -153,8 +153,9 @@ struct mlx5e_sw_stats {
u64 rx_gro_packets;
u64 rx_gro_bytes;
u64 rx_gro_skbs;
- u64 rx_gro_match_packets;
u64 rx_gro_large_hds;
+ u64 rx_hds_nodata_packets;
+ u64 rx_hds_nodata_bytes;
u64 rx_mcast_packets;
u64 rx_ecn_mark;
u64 rx_removed_vlan_packets;
@@ -352,8 +353,9 @@ struct mlx5e_rq_stats {
u64 gro_packets;
u64 gro_bytes;
u64 gro_skbs;
- u64 gro_match_packets;
u64 gro_large_hds;
+ u64 hds_nodata_packets;
+ u64 hds_nodata_bytes;
u64 mcast_packets;
u64 ecn_mark;
u64 removed_vlan_packets;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 099bf1078889..b09e9abd39f3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -153,7 +153,11 @@ mlx5e_tx_get_gso_ihs(struct mlx5e_txqsq *sq, struct sk_buff *skb, int *hopbyhop)
*hopbyhop = 0;
if (skb->encapsulation) {
- ihs = skb_inner_tcp_all_headers(skb);
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
+ ihs = skb_inner_transport_offset(skb) +
+ sizeof(struct udphdr);
+ else
+ ihs = skb_inner_tcp_all_headers(skb);
stats->tso_inner_packets++;
stats->tso_inner_bytes += skb->len - ihs;
} else {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 5693986ae656..cb7e7e4104af 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -714,7 +714,7 @@ err2:
err1:
mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL);
mlx5_eq_notifier_unregister(dev, &table->cq_err_nb);
- mlx5_ctrl_irq_release(table->ctrl_irq);
+ mlx5_ctrl_irq_release(dev, table->ctrl_irq);
return err;
}
@@ -730,7 +730,7 @@ static void destroy_async_eqs(struct mlx5_core_dev *dev)
cleanup_async_eq(dev, &table->cmd_eq, "cmd");
mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL);
mlx5_eq_notifier_unregister(dev, &table->cq_err_nb);
- mlx5_ctrl_irq_release(table->ctrl_irq);
+ mlx5_ctrl_irq_release(dev, table->ctrl_irq);
}
struct mlx5_eq *mlx5_get_async_eq(struct mlx5_core_dev *dev)
@@ -918,7 +918,7 @@ static int comp_irq_request_sf(struct mlx5_core_dev *dev, u16 vecidx)
af_desc.is_managed = 1;
cpumask_copy(&af_desc.mask, cpu_online_mask);
cpumask_andnot(&af_desc.mask, &af_desc.mask, &table->used_cpus);
- irq = mlx5_irq_affinity_request(pool, &af_desc);
+ irq = mlx5_irq_affinity_request(dev, pool, &af_desc);
if (IS_ERR(irq))
return PTR_ERR(irq);
@@ -1187,7 +1187,6 @@ static int get_num_eqs(struct mlx5_core_dev *dev)
{
struct mlx5_eq_table *eq_table = dev->priv.eq_table;
int max_dev_eqs;
- int max_eqs_sf;
int num_eqs;
/* If ethernet is disabled we use just a single completion vector to
@@ -1197,14 +1196,16 @@ static int get_num_eqs(struct mlx5_core_dev *dev)
if (!mlx5_core_is_eth_enabled(dev) && mlx5_eth_supported(dev))
return 1;
- max_dev_eqs = MLX5_CAP_GEN(dev, max_num_eqs) ?
- MLX5_CAP_GEN(dev, max_num_eqs) :
- 1 << MLX5_CAP_GEN(dev, log_max_eq);
+ max_dev_eqs = mlx5_max_eq_cap_get(dev);
num_eqs = min_t(int, mlx5_irq_table_get_num_comp(eq_table->irq_table),
max_dev_eqs - MLX5_MAX_ASYNC_EQS);
if (mlx5_core_is_sf(dev)) {
- max_eqs_sf = min_t(int, MLX5_COMP_EQS_PER_SF,
+ int max_eqs_sf = MLX5_CAP_GEN_2(dev, sf_eq_usage) ?
+ MLX5_CAP_GEN_2(dev, max_num_eqs_24b) :
+ MLX5_COMP_EQS_PER_SF;
+
+ max_eqs_sf = min_t(int, max_eqs_sf,
mlx5_irq_table_get_sfs_vec(eq_table->irq_table));
num_eqs = min_t(int, num_eqs, max_eqs_sf);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c
index 50d2ea323979..a436ce895e45 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c
@@ -6,6 +6,9 @@
#include "helper.h"
#include "ofld.h"
+static int
+acl_ingress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
+
static bool
esw_acl_ingress_prio_tag_enabled(struct mlx5_eswitch *esw,
const struct mlx5_vport *vport)
@@ -123,18 +126,31 @@ static int esw_acl_ingress_src_port_drop_create(struct mlx5_eswitch *esw,
{
struct mlx5_flow_act flow_act = {};
struct mlx5_flow_handle *flow_rule;
+ bool created = false;
int err = 0;
+ if (!vport->ingress.acl) {
+ err = acl_ingress_ofld_setup(esw, vport);
+ if (err)
+ return err;
+ created = true;
+ }
+
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
flow_act.fg = vport->ingress.offloads.drop_grp;
flow_rule = mlx5_add_flow_rules(vport->ingress.acl, NULL, &flow_act, NULL, 0);
if (IS_ERR(flow_rule)) {
err = PTR_ERR(flow_rule);
- goto out;
+ goto err_out;
}
vport->ingress.offloads.drop_rule = flow_rule;
-out:
+
+ return 0;
+err_out:
+ /* Only destroy ingress acl created in this function. */
+ if (created)
+ esw_acl_ingress_ofld_cleanup(esw, vport);
return err;
}
@@ -299,16 +315,12 @@ static void esw_acl_ingress_ofld_groups_destroy(struct mlx5_vport *vport)
}
}
-int esw_acl_ingress_ofld_setup(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport)
+static int
+acl_ingress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
{
int num_ftes = 0;
int err;
- if (!mlx5_eswitch_vport_match_metadata_enabled(esw) &&
- !esw_acl_ingress_prio_tag_enabled(esw, vport))
- return 0;
-
esw_acl_ingress_allow_rule_destroy(vport);
if (mlx5_eswitch_vport_match_metadata_enabled(esw))
@@ -347,6 +359,15 @@ group_err:
return err;
}
+int esw_acl_ingress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
+{
+ if (!mlx5_eswitch_vport_match_metadata_enabled(esw) &&
+ !esw_acl_ingress_prio_tag_enabled(esw, vport))
+ return 0;
+
+ return acl_ingress_ofld_setup(esw, vport);
+}
+
void esw_acl_ingress_ofld_cleanup(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
index d2ebe56c3977..20146a2dc7f4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
@@ -531,7 +531,7 @@ static bool esw_qos_element_type_supported(struct mlx5_core_dev *dev, int type)
switch (type) {
case SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR:
return MLX5_CAP_QOS(dev, esw_element_type) &
- ELEMENT_TYPE_CAP_MASK_TASR;
+ ELEMENT_TYPE_CAP_MASK_TSAR;
case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT:
return MLX5_CAP_QOS(dev, esw_element_type) &
ELEMENT_TYPE_CAP_MASK_VPORT;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 88745dc6aed5..578466d69f21 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -223,6 +223,7 @@ struct mlx5_vport {
u16 vport;
bool enabled;
+ bool max_eqs_set;
enum mlx5_eswitch_vport_event enabled_events;
int index;
struct mlx5_devlink_port *dl_port;
@@ -579,6 +580,8 @@ int mlx5_devlink_port_fn_max_io_eqs_get(struct devlink_port *port,
int mlx5_devlink_port_fn_max_io_eqs_set(struct devlink_port *port,
u32 max_io_eqs,
struct netlink_ext_ack *extack);
+int mlx5_devlink_port_fn_max_io_eqs_set_sf_default(struct devlink_port *port,
+ struct netlink_ext_ack *extack);
void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 592143d5e1da..768199d2255a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -68,6 +68,7 @@
#define MLX5_ESW_FT_OFFLOADS_DROP_RULE (1)
#define MLX5_ESW_MAX_CTRL_EQS 4
+#define MLX5_ESW_DEFAULT_SF_COMP_EQS 8
static struct esw_vport_tbl_namespace mlx5_esw_vport_tbl_mirror_ns = {
.max_fte = MLX5_ESW_VPORT_TBL_SIZE,
@@ -4600,20 +4601,26 @@ mlx5_devlink_port_fn_max_io_eqs_get(struct devlink_port *port, u32 *max_io_eqs,
return -EOPNOTSUPP;
}
+ if (!MLX5_CAP_GEN_2(esw->dev, max_num_eqs_24b)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Device doesn't support getting the max number of EQs");
+ return -EOPNOTSUPP;
+ }
+
query_ctx = kzalloc(query_out_sz, GFP_KERNEL);
if (!query_ctx)
return -ENOMEM;
mutex_lock(&esw->state_lock);
err = mlx5_vport_get_other_func_cap(esw->dev, vport_num, query_ctx,
- MLX5_CAP_GENERAL);
+ MLX5_CAP_GENERAL_2);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Failed getting HCA caps");
goto out;
}
hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
- max_eqs = MLX5_GET(cmd_hca_cap, hca_caps, max_num_eqs);
+ max_eqs = MLX5_GET(cmd_hca_cap_2, hca_caps, max_num_eqs_24b);
if (max_eqs < MLX5_ESW_MAX_CTRL_EQS)
*max_io_eqs = 0;
else
@@ -4644,6 +4651,12 @@ mlx5_devlink_port_fn_max_io_eqs_set(struct devlink_port *port, u32 max_io_eqs,
return -EOPNOTSUPP;
}
+ if (!MLX5_CAP_GEN_2(esw->dev, max_num_eqs_24b)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Device doesn't support changing the max number of EQs");
+ return -EOPNOTSUPP;
+ }
+
if (check_add_overflow(max_io_eqs, MLX5_ESW_MAX_CTRL_EQS, &max_eqs)) {
NL_SET_ERR_MSG_MOD(extack, "Supplied value out of range");
return -EINVAL;
@@ -4655,22 +4668,34 @@ mlx5_devlink_port_fn_max_io_eqs_set(struct devlink_port *port, u32 max_io_eqs,
mutex_lock(&esw->state_lock);
err = mlx5_vport_get_other_func_cap(esw->dev, vport_num, query_ctx,
- MLX5_CAP_GENERAL);
+ MLX5_CAP_GENERAL_2);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Failed getting HCA caps");
goto out;
}
hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
- MLX5_SET(cmd_hca_cap, hca_caps, max_num_eqs, max_eqs);
+ MLX5_SET(cmd_hca_cap_2, hca_caps, max_num_eqs_24b, max_eqs);
+
+ if (mlx5_esw_is_sf_vport(esw, vport_num))
+ MLX5_SET(cmd_hca_cap_2, hca_caps, sf_eq_usage, 1);
err = mlx5_vport_set_other_func_cap(esw->dev, hca_caps, vport_num,
- MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE);
+ MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE2);
if (err)
NL_SET_ERR_MSG_MOD(extack, "Failed setting HCA caps");
-
+ vport->max_eqs_set = true;
out:
mutex_unlock(&esw->state_lock);
kfree(query_ctx);
return err;
}
+
+int
+mlx5_devlink_port_fn_max_io_eqs_set_sf_default(struct devlink_port *port,
+ struct netlink_ext_ack *extack)
+{
+ return mlx5_devlink_port_fn_max_io_eqs_set(port,
+ MLX5_ESW_DEFAULT_SF_COMP_EQS,
+ extack);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 32cdacc34a0d..a47d6419160d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -3353,9 +3353,9 @@ static int mlx5_fs_mode_get(struct devlink *devlink, u32 id,
struct mlx5_core_dev *dev = devlink_priv(devlink);
if (dev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_SMFS)
- strcpy(ctx->val.vstr, "smfs");
+ strscpy(ctx->val.vstr, "smfs", sizeof(ctx->val.vstr));
else
- strcpy(ctx->val.vstr, "dmfs");
+ strscpy(ctx->val.vstr, "dmfs", sizeof(ctx->val.vstr));
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index 2d95a9b7b44e..b61b7d966114 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -373,6 +373,10 @@ int mlx5_cmd_fast_teardown_hca(struct mlx5_core_dev *dev)
do {
if (mlx5_get_nic_state(dev) == MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED)
break;
+ if (pci_channel_offline(dev->pdev)) {
+ mlx5_core_err(dev, "PCI channel offline, stop waiting for NIC IFC\n");
+ return -EACCES;
+ }
cond_resched();
} while (!time_after(jiffies, end));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index ad38e31822df..a6329ca2d9bf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -248,6 +248,10 @@ recover_from_sw_reset:
do {
if (mlx5_get_nic_state(dev) == MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED)
break;
+ if (pci_channel_offline(dev->pdev)) {
+ mlx5_core_err(dev, "PCI channel offline, stop waiting for NIC IFC\n");
+ goto unlock;
+ }
msleep(20);
} while (!time_after(jiffies, end));
@@ -317,6 +321,10 @@ int mlx5_health_wait_pci_up(struct mlx5_core_dev *dev)
mlx5_core_warn(dev, "device is being removed, stop waiting for PCI\n");
return -ENODEV;
}
+ if (pci_channel_offline(dev->pdev)) {
+ mlx5_core_err(dev, "PCI channel offline, stop waiting for PCI\n");
+ return -EACCES;
+ }
msleep(100);
}
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
index 779d92b762d3..905bdbaffb9a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
@@ -136,7 +136,7 @@ static int mlx5i_get_coalesce(struct net_device *netdev,
}
static int mlx5i_get_ts_info(struct net_device *netdev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct mlx5e_priv *priv = mlx5i_epriv(netdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index 8e0404c0d1ca..0979d672d47f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -372,7 +372,7 @@ static int mlx5i_create_flow_steering(struct mlx5e_priv *priv)
mlx5e_fs_set_ns(priv->fs, ns, false);
err = mlx5e_arfs_create_tables(priv->fs, priv->rx_res,
- !!(priv->netdev->hw_features & NETIF_F_NTUPLE));
+ mlx5e_fs_has_arfs(priv->netdev));
if (err) {
netdev_err(priv->netdev, "Failed to create arfs tables, err=%d\n",
err);
@@ -391,8 +391,7 @@ static int mlx5i_create_flow_steering(struct mlx5e_priv *priv)
return 0;
err_destroy_arfs_tables:
- mlx5e_arfs_destroy_tables(priv->fs,
- !!(priv->netdev->hw_features & NETIF_F_NTUPLE));
+ mlx5e_arfs_destroy_tables(priv->fs, mlx5e_fs_has_arfs(priv->netdev));
return err;
}
@@ -400,8 +399,7 @@ err_destroy_arfs_tables:
static void mlx5i_destroy_flow_steering(struct mlx5e_priv *priv)
{
mlx5e_destroy_ttc_table(priv->fs);
- mlx5e_arfs_destroy_tables(priv->fs,
- !!(priv->netdev->hw_features & NETIF_F_NTUPLE));
+ mlx5e_arfs_destroy_tables(priv->fs, mlx5e_fs_has_arfs(priv->netdev));
mlx5e_ethtool_cleanup_steering(priv->fs);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c b/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
index 612e666ec263..f7b01b3f0cba 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
@@ -112,15 +112,18 @@ irq_pool_find_least_loaded(struct mlx5_irq_pool *pool, const struct cpumask *req
/**
* mlx5_irq_affinity_request - request an IRQ according to the given mask.
+ * @dev: mlx5 core device which is requesting the IRQ.
* @pool: IRQ pool to request from.
* @af_desc: affinity descriptor for this IRQ.
*
* This function returns a pointer to IRQ, or ERR_PTR in case of error.
*/
struct mlx5_irq *
-mlx5_irq_affinity_request(struct mlx5_irq_pool *pool, struct irq_affinity_desc *af_desc)
+mlx5_irq_affinity_request(struct mlx5_core_dev *dev, struct mlx5_irq_pool *pool,
+ struct irq_affinity_desc *af_desc)
{
struct mlx5_irq *least_loaded_irq, *new_irq;
+ int ret;
mutex_lock(&pool->lock);
least_loaded_irq = irq_pool_find_least_loaded(pool, &af_desc->mask);
@@ -153,6 +156,16 @@ out:
mlx5_irq_read_locked(least_loaded_irq) / MLX5_EQ_REFS_PER_IRQ);
unlock:
mutex_unlock(&pool->lock);
+ if (mlx5_irq_pool_is_sf_pool(pool)) {
+ ret = auxiliary_device_sysfs_irq_add(mlx5_sf_coredev_to_adev(dev),
+ mlx5_irq_get_irq(least_loaded_irq));
+ if (ret) {
+ mlx5_core_err(dev, "Failed to create sysfs entry for irq %d, ret = %d\n",
+ mlx5_irq_get_irq(least_loaded_irq), ret);
+ mlx5_irq_put(least_loaded_irq);
+ least_loaded_irq = ERR_PTR(ret);
+ }
+ }
return least_loaded_irq;
}
@@ -164,6 +177,9 @@ void mlx5_irq_affinity_irq_release(struct mlx5_core_dev *dev, struct mlx5_irq *i
cpu = cpumask_first(mlx5_irq_get_affinity_mask(irq));
synchronize_irq(pci_irq_vector(pool->dev->pdev,
mlx5_irq_get_index(irq)));
+ if (mlx5_irq_pool_is_sf_pool(pool))
+ auxiliary_device_sysfs_irq_remove(mlx5_sf_coredev_to_adev(dev),
+ mlx5_irq_get_irq(irq));
if (mlx5_irq_put(irq))
if (pool->irqs_per_cpu)
cpu_put(pool, cpu);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
index f7f0476a4a58..d0871c46b8c5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
@@ -719,6 +719,7 @@ bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
struct mlx5_core_dev *dev;
u8 mode;
#endif
+ bool roce_support;
int i;
for (i = 0; i < ldev->ports; i++)
@@ -743,6 +744,11 @@ bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
if (mlx5_sriov_is_enabled(ldev->pf[i].dev))
return false;
#endif
+ roce_support = mlx5_get_roce_state(ldev->pf[MLX5_LAG_P1].dev);
+ for (i = 1; i < ldev->ports; i++)
+ if (mlx5_get_roce_state(ldev->pf[i].dev) != roce_support)
+ return false;
+
return true;
}
@@ -910,8 +916,10 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
} else if (roce_lag) {
dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
mlx5_rescan_drivers_locked(dev0);
- for (i = 1; i < ldev->ports; i++)
- mlx5_nic_vport_enable_roce(ldev->pf[i].dev);
+ for (i = 1; i < ldev->ports; i++) {
+ if (mlx5_get_roce_state(ldev->pf[i].dev))
+ mlx5_nic_vport_enable_roce(ldev->pf[i].dev);
+ }
} else if (shared_fdb) {
int i;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
index c16b462ddedf..ab2717012b79 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
@@ -88,9 +88,13 @@ static int mlx5_lag_create_port_sel_table(struct mlx5_lag *ldev,
&dest, 1);
if (IS_ERR(lag_definer->rules[idx])) {
err = PTR_ERR(lag_definer->rules[idx]);
- while (i--)
- while (j--)
+ do {
+ while (j--) {
+ idx = i * ldev->buckets + j;
mlx5_del_flow_rules(lag_definer->rules[idx]);
+ }
+ j = ldev->buckets;
+ } while (i--);
goto destroy_fg;
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/pci_vsc.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/pci_vsc.c
index 6b774e0c2766..d0b595ba6110 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/pci_vsc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/pci_vsc.c
@@ -74,6 +74,10 @@ int mlx5_vsc_gw_lock(struct mlx5_core_dev *dev)
ret = -EBUSY;
goto pci_unlock;
}
+ if (pci_channel_offline(dev->pdev)) {
+ ret = -EACCES;
+ goto pci_unlock;
+ }
/* Check if semaphore is already locked */
ret = vsc_read(dev, VSC_SEMAPHORE_OFFSET, &lock_val);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c
index dd5d186dc614..f6deb5a3f820 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c
@@ -100,10 +100,6 @@ static bool ft_create_alias_supported(struct mlx5_core_dev *dev)
static bool mlx5_sd_is_supported(struct mlx5_core_dev *dev, u8 host_buses)
{
- /* Feature is currently implemented for PFs only */
- if (!mlx5_core_is_pf(dev))
- return false;
-
/* Honor the SW implementation limit */
if (host_buses > MLX5_SD_MAX_GROUP_SZ)
return false;
@@ -162,6 +158,14 @@ static int sd_init(struct mlx5_core_dev *dev)
bool sdm;
int err;
+ /* Feature is currently implemented for PFs only */
+ if (!mlx5_core_is_pf(dev))
+ return 0;
+
+ /* Block on embedded CPU PFs */
+ if (mlx5_core_is_ecpf(dev))
+ return 0;
+
if (!MLX5_CAP_MCAM_REG(dev, mpir))
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 6574c145dc1e..527da58c7953 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -1298,6 +1298,9 @@ static int mlx5_function_teardown(struct mlx5_core_dev *dev, bool boot)
if (!err)
mlx5_function_disable(dev, boot);
+ else
+ mlx5_stop_health_poll(dev, boot);
+
return err;
}
@@ -1816,6 +1819,7 @@ int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
mutex_init(&dev->intf_state_mutex);
lockdep_set_class(&dev->intf_state_mutex, &dev->lock_key);
mutex_init(&dev->mlx5e_res.uplink_netdev_lock);
+ mutex_init(&dev->wc_state_lock);
mutex_init(&priv->bfregs.reg_head.lock);
mutex_init(&priv->bfregs.wc_head.lock);
@@ -1913,6 +1917,7 @@ void mlx5_mdev_uninit(struct mlx5_core_dev *dev)
mutex_destroy(&priv->alloc_mutex);
mutex_destroy(&priv->bfregs.wc_head.lock);
mutex_destroy(&priv->bfregs.reg_head.lock);
+ mutex_destroy(&dev->wc_state_lock);
mutex_destroy(&dev->mlx5e_res.uplink_netdev_lock);
mutex_destroy(&dev->intf_state_mutex);
lockdep_unregister_key(&dev->lock_key);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index c38342b9f320..62c770b0eaa8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -320,6 +320,12 @@ static inline bool mlx5_core_is_sf(const struct mlx5_core_dev *dev)
return dev->coredev_type == MLX5_COREDEV_SF;
}
+static inline struct auxiliary_device *
+mlx5_sf_coredev_to_adev(struct mlx5_core_dev *mdev)
+{
+ return container_of(mdev->device, struct auxiliary_device, dev);
+}
+
int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx);
void mlx5_mdev_uninit(struct mlx5_core_dev *dev);
int mlx5_init_one(struct mlx5_core_dev *dev);
@@ -383,4 +389,14 @@ static inline int mlx5_vport_to_func_id(const struct mlx5_core_dev *dev, u16 vpo
: vport;
}
+static inline int mlx5_max_eq_cap_get(const struct mlx5_core_dev *dev)
+{
+ if (MLX5_CAP_GEN_2(dev, max_num_eqs_24b))
+ return MLX5_CAP_GEN_2(dev, max_num_eqs_24b);
+
+ if (MLX5_CAP_GEN(dev, max_num_eqs))
+ return MLX5_CAP_GEN(dev, max_num_eqs);
+
+ return 1 << MLX5_CAP_GEN(dev, log_max_eq);
+}
#endif /* __MLX5_CORE_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h
index 1088114e905d..0881e961d8b1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h
@@ -25,7 +25,7 @@ int mlx5_set_msix_vec_count(struct mlx5_core_dev *dev, int devfn,
int mlx5_get_default_msix_vec_count(struct mlx5_core_dev *dev, int num_vfs);
struct mlx5_irq *mlx5_ctrl_irq_request(struct mlx5_core_dev *dev);
-void mlx5_ctrl_irq_release(struct mlx5_irq *ctrl_irq);
+void mlx5_ctrl_irq_release(struct mlx5_core_dev *dev, struct mlx5_irq *ctrl_irq);
struct mlx5_irq *mlx5_irq_request(struct mlx5_core_dev *dev, u16 vecidx,
struct irq_affinity_desc *af_desc,
struct cpu_rmap **rmap);
@@ -36,13 +36,15 @@ int mlx5_irq_attach_nb(struct mlx5_irq *irq, struct notifier_block *nb);
int mlx5_irq_detach_nb(struct mlx5_irq *irq, struct notifier_block *nb);
struct cpumask *mlx5_irq_get_affinity_mask(struct mlx5_irq *irq);
int mlx5_irq_get_index(struct mlx5_irq *irq);
+int mlx5_irq_get_irq(const struct mlx5_irq *irq);
struct mlx5_irq_pool;
#ifdef CONFIG_MLX5_SF
struct mlx5_irq *mlx5_irq_affinity_irq_request_auto(struct mlx5_core_dev *dev,
struct cpumask *used_cpus, u16 vecidx);
-struct mlx5_irq *mlx5_irq_affinity_request(struct mlx5_irq_pool *pool,
- struct irq_affinity_desc *af_desc);
+struct mlx5_irq *
+mlx5_irq_affinity_request(struct mlx5_core_dev *dev, struct mlx5_irq_pool *pool,
+ struct irq_affinity_desc *af_desc);
void mlx5_irq_affinity_irq_release(struct mlx5_core_dev *dev, struct mlx5_irq *irq);
#else
static inline
@@ -53,7 +55,8 @@ struct mlx5_irq *mlx5_irq_affinity_irq_request_auto(struct mlx5_core_dev *dev,
}
static inline struct mlx5_irq *
-mlx5_irq_affinity_request(struct mlx5_irq_pool *pool, struct irq_affinity_desc *af_desc)
+mlx5_irq_affinity_request(struct mlx5_core_dev *dev, struct mlx5_irq_pool *pool,
+ struct irq_affinity_desc *af_desc)
{
return ERR_PTR(-EOPNOTSUPP);
}
@@ -61,6 +64,7 @@ mlx5_irq_affinity_request(struct mlx5_irq_pool *pool, struct irq_affinity_desc *
static inline
void mlx5_irq_affinity_irq_release(struct mlx5_core_dev *dev, struct mlx5_irq *irq)
{
+ mlx5_irq_release_vector(irq);
}
#endif
#endif /* __MLX5_IRQ_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
index fb8787e30d3f..81a9232a03e1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
@@ -16,6 +16,7 @@
#endif
#define MLX5_SFS_PER_CTRL_IRQ 64
+#define MLX5_MAX_MSIX_PER_SF 256
#define MLX5_IRQ_CTRL_SF_MAX 8
/* min num of vectors for SFs to be enabled */
#define MLX5_IRQ_VEC_COMP_BASE_SF 2
@@ -367,6 +368,11 @@ struct cpumask *mlx5_irq_get_affinity_mask(struct mlx5_irq *irq)
return irq->mask;
}
+int mlx5_irq_get_irq(const struct mlx5_irq *irq)
+{
+ return irq->map.virq;
+}
+
int mlx5_irq_get_index(struct mlx5_irq *irq)
{
return irq->map.index;
@@ -440,11 +446,12 @@ static void _mlx5_irq_release(struct mlx5_irq *irq)
/**
* mlx5_ctrl_irq_release - release a ctrl IRQ back to the system.
+ * @dev: mlx5 device that releasing the IRQ.
* @ctrl_irq: ctrl IRQ to be released.
*/
-void mlx5_ctrl_irq_release(struct mlx5_irq *ctrl_irq)
+void mlx5_ctrl_irq_release(struct mlx5_core_dev *dev, struct mlx5_irq *ctrl_irq)
{
- _mlx5_irq_release(ctrl_irq);
+ mlx5_irq_affinity_irq_release(dev, ctrl_irq);
}
/**
@@ -473,7 +480,7 @@ struct mlx5_irq *mlx5_ctrl_irq_request(struct mlx5_core_dev *dev)
/* Allocate the IRQ in index 0. The vector was already allocated */
irq = irq_pool_request_vector(pool, 0, &af_desc, NULL);
} else {
- irq = mlx5_irq_affinity_request(pool, &af_desc);
+ irq = mlx5_irq_affinity_request(dev, pool, &af_desc);
}
return irq;
@@ -589,8 +596,6 @@ static void irq_pool_free(struct mlx5_irq_pool *pool)
static int irq_pools_init(struct mlx5_core_dev *dev, int sf_vec, int pcif_vec)
{
struct mlx5_irq_table *table = dev->priv.irq_table;
- int num_sf_ctrl_by_msix;
- int num_sf_ctrl_by_sfs;
int num_sf_ctrl;
int err;
@@ -608,10 +613,8 @@ static int irq_pools_init(struct mlx5_core_dev *dev, int sf_vec, int pcif_vec)
}
/* init sf_ctrl_pool */
- num_sf_ctrl_by_msix = DIV_ROUND_UP(sf_vec, MLX5_COMP_EQS_PER_SF);
- num_sf_ctrl_by_sfs = DIV_ROUND_UP(mlx5_sf_max_functions(dev),
- MLX5_SFS_PER_CTRL_IRQ);
- num_sf_ctrl = min_t(int, num_sf_ctrl_by_msix, num_sf_ctrl_by_sfs);
+ num_sf_ctrl = DIV_ROUND_UP(mlx5_sf_max_functions(dev),
+ MLX5_SFS_PER_CTRL_IRQ);
num_sf_ctrl = min_t(int, MLX5_IRQ_CTRL_SF_MAX, num_sf_ctrl);
table->sf_ctrl_pool = irq_pool_alloc(dev, pcif_vec, num_sf_ctrl,
"mlx5_sf_ctrl",
@@ -711,9 +714,7 @@ int mlx5_irq_table_get_num_comp(struct mlx5_irq_table *table)
int mlx5_irq_table_create(struct mlx5_core_dev *dev)
{
- int num_eqs = MLX5_CAP_GEN(dev, max_num_eqs) ?
- MLX5_CAP_GEN(dev, max_num_eqs) :
- 1 << MLX5_CAP_GEN(dev, log_max_eq);
+ int num_eqs = mlx5_max_eq_cap_get(dev);
int total_vec;
int pcif_vec;
int req_vec;
@@ -728,8 +729,7 @@ int mlx5_irq_table_create(struct mlx5_core_dev *dev)
total_vec = pcif_vec;
if (mlx5_sf_max_functions(dev))
- total_vec += MLX5_IRQ_CTRL_SF_MAX +
- MLX5_COMP_EQS_PER_SF * mlx5_sf_max_functions(dev);
+ total_vec += MLX5_MAX_MSIX_PER_SF * mlx5_sf_max_functions(dev);
total_vec = min_t(int, total_vec, pci_msix_vec_count(dev->pdev));
pcif_vec = min_t(int, pcif_vec, pci_msix_vec_count(dev->pdev));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
index 6c11e075cab0..a96be98be032 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
@@ -161,6 +161,7 @@ int mlx5_devlink_sf_port_fn_state_get(struct devlink_port *dl_port,
static int mlx5_sf_activate(struct mlx5_core_dev *dev, struct mlx5_sf *sf,
struct netlink_ext_ack *extack)
{
+ struct mlx5_vport *vport;
int err;
if (mlx5_sf_is_active(sf))
@@ -170,6 +171,13 @@ static int mlx5_sf_activate(struct mlx5_core_dev *dev, struct mlx5_sf *sf,
return -EBUSY;
}
+ vport = mlx5_devlink_port_vport_get(&sf->dl_port.dl_port);
+ if (!vport->max_eqs_set && MLX5_CAP_GEN_2(dev, max_num_eqs_24b)) {
+ err = mlx5_devlink_port_fn_max_io_eqs_set_sf_default(&sf->dl_port.dl_port,
+ extack);
+ if (err)
+ return err;
+ }
err = mlx5_cmd_sf_enable_hca(dev, sf->hw_fn_id);
if (err)
return err;
@@ -318,7 +326,11 @@ int mlx5_devlink_sf_port_new(struct devlink *devlink,
static void mlx5_sf_dealloc(struct mlx5_sf_table *table, struct mlx5_sf *sf)
{
+ struct mlx5_vport *vport;
+
mutex_lock(&table->sf_state_lock);
+ vport = mlx5_devlink_port_vport_get(&sf->dl_port.dl_port);
+ vport->max_eqs_set = false;
mlx5_sf_function_id_erase(table, sf);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
index 81eff6c410ce..7618c6147f86 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
@@ -1379,6 +1379,11 @@ int mlx5dr_cmd_create_modify_header_arg(struct mlx5_core_dev *dev,
void mlx5dr_cmd_destroy_modify_header_arg(struct mlx5_core_dev *dev,
u32 obj_id);
+int mlx5dr_definer_get(struct mlx5dr_domain *dmn, u16 format_id,
+ u8 *dw_selectors, u8 *byte_selectors,
+ u8 *match_mask, u32 *definer_id);
+void mlx5dr_definer_put(struct mlx5dr_domain *dmn, u32 definer_id);
+
struct mlx5dr_icm_pool *mlx5dr_icm_pool_create(struct mlx5dr_domain *dmn,
enum mlx5dr_icm_type icm_type);
void mlx5dr_icm_pool_destroy(struct mlx5dr_icm_pool *pool);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
index 89fced86936f..3ac7dc67509f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
@@ -153,11 +153,6 @@ int mlx5dr_action_destroy(struct mlx5dr_action *action);
u32 mlx5dr_action_get_pkt_reformat_id(struct mlx5dr_action *action);
-int mlx5dr_definer_get(struct mlx5dr_domain *dmn, u16 format_id,
- u8 *dw_selectors, u8 *byte_selectors,
- u8 *match_mask, u32 *definer_id);
-void mlx5dr_definer_put(struct mlx5dr_domain *dmn, u32 definer_id);
-
static inline bool
mlx5dr_is_supported(struct mlx5_core_dev *dev)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wc.c b/drivers/net/ethernet/mellanox/mlx5/core/wc.c
new file mode 100644
index 000000000000..1bed75eca97d
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wc.c
@@ -0,0 +1,434 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include <linux/io.h>
+#include <linux/mlx5/transobj.h>
+#include "lib/clock.h"
+#include "mlx5_core.h"
+#include "wq.h"
+
+#define TEST_WC_NUM_WQES 255
+#define TEST_WC_LOG_CQ_SZ (order_base_2(TEST_WC_NUM_WQES))
+#define TEST_WC_SQ_LOG_WQ_SZ TEST_WC_LOG_CQ_SZ
+#define TEST_WC_POLLING_MAX_TIME_JIFFIES msecs_to_jiffies(100)
+
+struct mlx5_wc_cq {
+ /* data path - accessed per cqe */
+ struct mlx5_cqwq wq;
+
+ /* data path - accessed per napi poll */
+ struct mlx5_core_cq mcq;
+
+ /* control */
+ struct mlx5_core_dev *mdev;
+ struct mlx5_wq_ctrl wq_ctrl;
+};
+
+struct mlx5_wc_sq {
+ /* data path */
+ u16 cc;
+ u16 pc;
+
+ /* read only */
+ struct mlx5_wq_cyc wq;
+ u32 sqn;
+
+ /* control path */
+ struct mlx5_wq_ctrl wq_ctrl;
+
+ struct mlx5_wc_cq cq;
+ struct mlx5_sq_bfreg bfreg;
+};
+
+static int mlx5_wc_create_cqwq(struct mlx5_core_dev *mdev, void *cqc,
+ struct mlx5_wc_cq *cq)
+{
+ struct mlx5_core_cq *mcq = &cq->mcq;
+ struct mlx5_wq_param param = {};
+ int err;
+ u32 i;
+
+ err = mlx5_cqwq_create(mdev, &param, cqc, &cq->wq, &cq->wq_ctrl);
+ if (err)
+ return err;
+
+ mcq->cqe_sz = 64;
+ mcq->set_ci_db = cq->wq_ctrl.db.db;
+ mcq->arm_db = cq->wq_ctrl.db.db + 1;
+
+ for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
+ struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
+
+ cqe->op_own = 0xf1;
+ }
+
+ cq->mdev = mdev;
+
+ return 0;
+}
+
+static int create_wc_cq(struct mlx5_wc_cq *cq, void *cqc_data)
+{
+ u32 out[MLX5_ST_SZ_DW(create_cq_out)];
+ struct mlx5_core_dev *mdev = cq->mdev;
+ struct mlx5_core_cq *mcq = &cq->mcq;
+ int err, inlen, eqn;
+ void *in, *cqc;
+
+ err = mlx5_comp_eqn_get(mdev, 0, &eqn);
+ if (err)
+ return err;
+
+ inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
+ sizeof(u64) * cq->wq_ctrl.buf.npages;
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
+
+ memcpy(cqc, cqc_data, MLX5_ST_SZ_BYTES(cqc));
+
+ mlx5_fill_page_frag_array(&cq->wq_ctrl.buf,
+ (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
+
+ MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
+ MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn);
+ MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
+ MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
+ MLX5_ADAPTER_PAGE_SHIFT);
+ MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
+
+ err = mlx5_core_create_cq(mdev, mcq, in, inlen, out, sizeof(out));
+
+ kvfree(in);
+
+ return err;
+}
+
+static int mlx5_wc_create_cq(struct mlx5_core_dev *mdev, struct mlx5_wc_cq *cq)
+{
+ void *cqc;
+ int err;
+
+ cqc = kvzalloc(MLX5_ST_SZ_BYTES(cqc), GFP_KERNEL);
+ if (!cqc)
+ return -ENOMEM;
+
+ MLX5_SET(cqc, cqc, log_cq_size, TEST_WC_LOG_CQ_SZ);
+ MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
+ if (MLX5_CAP_GEN(mdev, cqe_128_always) && cache_line_size() >= 128)
+ MLX5_SET(cqc, cqc, cqe_sz, CQE_STRIDE_128_PAD);
+
+ err = mlx5_wc_create_cqwq(mdev, cqc, cq);
+ if (err) {
+ mlx5_core_err(mdev, "Failed to create wc cq wq, err=%d\n", err);
+ goto err_create_cqwq;
+ }
+
+ err = create_wc_cq(cq, cqc);
+ if (err) {
+ mlx5_core_err(mdev, "Failed to create wc cq, err=%d\n", err);
+ goto err_create_cq;
+ }
+
+ kvfree(cqc);
+ return 0;
+
+err_create_cq:
+ mlx5_wq_destroy(&cq->wq_ctrl);
+err_create_cqwq:
+ kvfree(cqc);
+ return err;
+}
+
+static void mlx5_wc_destroy_cq(struct mlx5_wc_cq *cq)
+{
+ mlx5_core_destroy_cq(cq->mdev, &cq->mcq);
+ mlx5_wq_destroy(&cq->wq_ctrl);
+}
+
+static int create_wc_sq(struct mlx5_core_dev *mdev, void *sqc_data,
+ struct mlx5_wc_sq *sq)
+{
+ void *in, *sqc, *wq;
+ int inlen, err;
+ u8 ts_format;
+
+ inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
+ sizeof(u64) * sq->wq_ctrl.buf.npages;
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
+ wq = MLX5_ADDR_OF(sqc, sqc, wq);
+
+ memcpy(sqc, sqc_data, MLX5_ST_SZ_BYTES(sqc));
+ MLX5_SET(sqc, sqc, cqn, sq->cq.mcq.cqn);
+
+ MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
+ MLX5_SET(sqc, sqc, flush_in_error_en, 1);
+
+ ts_format = mlx5_is_real_time_sq(mdev) ?
+ MLX5_TIMESTAMP_FORMAT_REAL_TIME :
+ MLX5_TIMESTAMP_FORMAT_FREE_RUNNING;
+ MLX5_SET(sqc, sqc, ts_format, ts_format);
+
+ MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
+ MLX5_SET(wq, wq, uar_page, sq->bfreg.index);
+ MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift -
+ MLX5_ADAPTER_PAGE_SHIFT);
+ MLX5_SET64(wq, wq, dbr_addr, sq->wq_ctrl.db.dma);
+
+ mlx5_fill_page_frag_array(&sq->wq_ctrl.buf,
+ (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
+
+ err = mlx5_core_create_sq(mdev, in, inlen, &sq->sqn);
+ if (err) {
+ mlx5_core_err(mdev, "Failed to create wc sq, err=%d\n", err);
+ goto err_create_sq;
+ }
+
+ memset(in, 0, MLX5_ST_SZ_BYTES(modify_sq_in));
+ MLX5_SET(modify_sq_in, in, sq_state, MLX5_SQC_STATE_RST);
+ sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
+ MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RDY);
+
+ err = mlx5_core_modify_sq(mdev, sq->sqn, in);
+ if (err) {
+ mlx5_core_err(mdev, "Failed to set wc sq(sqn=0x%x) ready, err=%d\n",
+ sq->sqn, err);
+ goto err_modify_sq;
+ }
+
+ kvfree(in);
+ return 0;
+
+err_modify_sq:
+ mlx5_core_destroy_sq(mdev, sq->sqn);
+err_create_sq:
+ kvfree(in);
+ return err;
+}
+
+static int mlx5_wc_create_sq(struct mlx5_core_dev *mdev, struct mlx5_wc_sq *sq)
+{
+ struct mlx5_wq_param param = {};
+ void *sqc_data, *wq;
+ int err;
+
+ sqc_data = kvzalloc(MLX5_ST_SZ_BYTES(sqc), GFP_KERNEL);
+ if (!sqc_data)
+ return -ENOMEM;
+
+ wq = MLX5_ADDR_OF(sqc, sqc_data, wq);
+ MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
+ MLX5_SET(wq, wq, pd, mdev->mlx5e_res.hw_objs.pdn);
+ MLX5_SET(wq, wq, log_wq_sz, TEST_WC_SQ_LOG_WQ_SZ);
+
+ err = mlx5_wq_cyc_create(mdev, &param, wq, &sq->wq, &sq->wq_ctrl);
+ if (err) {
+ mlx5_core_err(mdev, "Failed to create wc sq wq, err=%d\n", err);
+ goto err_create_wq_cyc;
+ }
+
+ err = create_wc_sq(mdev, sqc_data, sq);
+ if (err)
+ goto err_create_sq;
+
+ mlx5_core_dbg(mdev, "wc sq->sqn = 0x%x created\n", sq->sqn);
+
+ kvfree(sqc_data);
+ return 0;
+
+err_create_sq:
+ mlx5_wq_destroy(&sq->wq_ctrl);
+err_create_wq_cyc:
+ kvfree(sqc_data);
+ return err;
+}
+
+static void mlx5_wc_destroy_sq(struct mlx5_wc_sq *sq)
+{
+ mlx5_core_destroy_sq(sq->cq.mdev, sq->sqn);
+ mlx5_wq_destroy(&sq->wq_ctrl);
+}
+
+static void mlx5_wc_post_nop(struct mlx5_wc_sq *sq, bool signaled)
+{
+ int buf_size = (1 << MLX5_CAP_GEN(sq->cq.mdev, log_bf_reg_size)) / 2;
+ struct mlx5_wqe_ctrl_seg *ctrl;
+ __be32 mmio_wqe[16] = {};
+ u16 pi;
+
+ pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc);
+ ctrl = mlx5_wq_cyc_get_wqe(&sq->wq, pi);
+ memset(ctrl, 0, sizeof(*ctrl));
+ ctrl->opmod_idx_opcode =
+ cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) | MLX5_OPCODE_NOP);
+ ctrl->qpn_ds =
+ cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
+ DIV_ROUND_UP(sizeof(struct mlx5_wqe_ctrl_seg), MLX5_SEND_WQE_DS));
+ if (signaled)
+ ctrl->fm_ce_se |= MLX5_WQE_CTRL_CQ_UPDATE;
+
+ memcpy(mmio_wqe, ctrl, sizeof(*ctrl));
+ ((struct mlx5_wqe_ctrl_seg *)&mmio_wqe)->fm_ce_se |=
+ MLX5_WQE_CTRL_CQ_UPDATE;
+
+ /* ensure wqe is visible to device before updating doorbell record */
+ dma_wmb();
+
+ sq->pc++;
+ sq->wq.db[MLX5_SND_DBR] = cpu_to_be32(sq->pc);
+
+ /* ensure doorbell record is visible to device before ringing the
+ * doorbell
+ */
+ wmb();
+
+ __iowrite64_copy(sq->bfreg.map + sq->bfreg.offset, mmio_wqe,
+ sizeof(mmio_wqe) / 8);
+
+ sq->bfreg.offset ^= buf_size;
+}
+
+static int mlx5_wc_poll_cq(struct mlx5_wc_sq *sq)
+{
+ struct mlx5_wc_cq *cq = &sq->cq;
+ struct mlx5_cqe64 *cqe;
+
+ cqe = mlx5_cqwq_get_cqe(&cq->wq);
+ if (!cqe)
+ return -ETIMEDOUT;
+
+ /* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
+ * otherwise a cq overrun may occur
+ */
+ mlx5_cqwq_pop(&cq->wq);
+
+ if (get_cqe_opcode(cqe) == MLX5_CQE_REQ) {
+ int wqe_counter = be16_to_cpu(cqe->wqe_counter);
+ struct mlx5_core_dev *mdev = cq->mdev;
+
+ if (wqe_counter == TEST_WC_NUM_WQES - 1)
+ mdev->wc_state = MLX5_WC_STATE_UNSUPPORTED;
+ else
+ mdev->wc_state = MLX5_WC_STATE_SUPPORTED;
+
+ mlx5_core_dbg(mdev, "wc wqe_counter = 0x%x\n", wqe_counter);
+ }
+
+ mlx5_cqwq_update_db_record(&cq->wq);
+
+ /* ensure cq space is freed before enabling more cqes */
+ wmb();
+
+ sq->cc++;
+
+ return 0;
+}
+
+static void mlx5_core_test_wc(struct mlx5_core_dev *mdev)
+{
+ unsigned long expires;
+ struct mlx5_wc_sq *sq;
+ int i, err;
+
+ if (mdev->wc_state != MLX5_WC_STATE_UNINITIALIZED)
+ return;
+
+ sq = kzalloc(sizeof(*sq), GFP_KERNEL);
+ if (!sq)
+ return;
+
+ err = mlx5_alloc_bfreg(mdev, &sq->bfreg, true, false);
+ if (err) {
+ mlx5_core_err(mdev, "Failed to alloc bfreg for wc, err=%d\n", err);
+ goto err_alloc_bfreg;
+ }
+
+ err = mlx5_wc_create_cq(mdev, &sq->cq);
+ if (err)
+ goto err_create_cq;
+
+ err = mlx5_wc_create_sq(mdev, sq);
+ if (err)
+ goto err_create_sq;
+
+ for (i = 0; i < TEST_WC_NUM_WQES - 1; i++)
+ mlx5_wc_post_nop(sq, false);
+
+ mlx5_wc_post_nop(sq, true);
+
+ expires = jiffies + TEST_WC_POLLING_MAX_TIME_JIFFIES;
+ do {
+ err = mlx5_wc_poll_cq(sq);
+ if (err)
+ usleep_range(2, 10);
+ } while (mdev->wc_state == MLX5_WC_STATE_UNINITIALIZED &&
+ time_is_after_jiffies(expires));
+
+ mlx5_wc_destroy_sq(sq);
+
+err_create_sq:
+ mlx5_wc_destroy_cq(&sq->cq);
+err_create_cq:
+ mlx5_free_bfreg(mdev, &sq->bfreg);
+err_alloc_bfreg:
+ kfree(sq);
+}
+
+bool mlx5_wc_support_get(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_core_dev *parent = NULL;
+
+ if (!MLX5_CAP_GEN(mdev, bf)) {
+ mlx5_core_dbg(mdev, "BlueFlame not supported\n");
+ goto out;
+ }
+
+ if (!MLX5_CAP_GEN(mdev, log_max_sq)) {
+ mlx5_core_dbg(mdev, "SQ not supported\n");
+ goto out;
+ }
+
+ if (mdev->wc_state != MLX5_WC_STATE_UNINITIALIZED)
+ /* No need to lock anything as we perform WC test only
+ * once for whole device and was already done.
+ */
+ goto out;
+
+ mutex_lock(&mdev->wc_state_lock);
+
+ if (mdev->wc_state != MLX5_WC_STATE_UNINITIALIZED)
+ goto unlock;
+
+#ifdef CONFIG_MLX5_SF
+ if (mlx5_core_is_sf(mdev))
+ parent = mdev->priv.parent_mdev;
+#endif
+
+ if (parent) {
+ mutex_lock(&parent->wc_state_lock);
+
+ mlx5_core_test_wc(parent);
+
+ mlx5_core_dbg(mdev, "parent set wc_state=%d\n",
+ parent->wc_state);
+ mdev->wc_state = parent->wc_state;
+
+ mutex_unlock(&parent->wc_state_lock);
+ }
+
+ mlx5_core_test_wc(mdev);
+
+unlock:
+ mutex_unlock(&mdev->wc_state_lock);
+out:
+ mlx5_core_dbg(mdev, "wc_state=%d\n", mdev->wc_state);
+
+ return mdev->wc_state == MLX5_WC_STATE_SUPPORTED;
+}
+EXPORT_SYMBOL(mlx5_wc_support_get);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
index a510bf2cff2f..74f7e27b490f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
@@ -33,6 +33,7 @@ config MLXSW_CORE_THERMAL
config MLXSW_PCI
tristate "PCI bus implementation for Mellanox Technologies Switch ASICs"
depends on PCI && HAS_IOMEM && MLXSW_CORE
+ select PAGE_POOL
default m
help
This is PCI bus implementation for Mellanox Technologies Switch ASICs.
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.c b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
index 6c06b0592760..294e758f1067 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_env.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
@@ -513,6 +513,63 @@ mlxsw_env_get_module_eeprom_by_page(struct mlxsw_core *mlxsw_core,
}
EXPORT_SYMBOL(mlxsw_env_get_module_eeprom_by_page);
+int
+mlxsw_env_set_module_eeprom_by_page(struct mlxsw_core *mlxsw_core,
+ u8 slot_index, u8 module,
+ const struct ethtool_module_eeprom *page,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+ u32 bytes_written = 0;
+ u16 device_addr;
+ int err;
+
+ if (!mlxsw_env_linecard_is_active(mlxsw_env, slot_index)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot write to EEPROM of a module on an inactive line card");
+ return -EIO;
+ }
+
+ err = mlxsw_env_validate_module_type(mlxsw_core, slot_index, module);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "EEPROM is not equipped on port module type");
+ return err;
+ }
+
+ device_addr = page->offset;
+
+ while (bytes_written < page->length) {
+ char mcia_pl[MLXSW_REG_MCIA_LEN];
+ char eeprom_tmp[128] = {};
+ u8 size;
+
+ size = min_t(u8, page->length - bytes_written,
+ mlxsw_env->max_eeprom_len);
+
+ mlxsw_reg_mcia_pack(mcia_pl, slot_index, module, page->page,
+ device_addr + bytes_written, size,
+ page->i2c_address);
+ mlxsw_reg_mcia_bank_number_set(mcia_pl, page->bank);
+ memcpy(eeprom_tmp, page->data + bytes_written, size);
+ mlxsw_reg_mcia_eeprom_memcpy_to(mcia_pl, eeprom_tmp);
+
+ err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcia), mcia_pl);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to access module's EEPROM");
+ return err;
+ }
+
+ err = mlxsw_env_mcia_status_process(mcia_pl, extack);
+ if (err)
+ return err;
+
+ bytes_written += size;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(mlxsw_env_set_module_eeprom_by_page);
+
static int mlxsw_env_module_reset(struct mlxsw_core *mlxsw_core, u8 slot_index,
u8 module)
{
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.h b/drivers/net/ethernet/mellanox/mlxsw/core_env.h
index a197e3ae069c..e4ff17869400 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_env.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.h
@@ -28,6 +28,12 @@ mlxsw_env_get_module_eeprom_by_page(struct mlxsw_core *mlxsw_core,
const struct ethtool_module_eeprom *page,
struct netlink_ext_ack *extack);
+int
+mlxsw_env_set_module_eeprom_by_page(struct mlxsw_core *mlxsw_core,
+ u8 slot_index, u8 module,
+ const struct ethtool_module_eeprom *page,
+ struct netlink_ext_ack *extack);
+
int mlxsw_env_reset_module(struct net_device *netdev,
struct mlxsw_core *mlxsw_core, u8 slot_index,
u8 module, u32 *flags);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c b/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c
index 025e0db983fe..b032d5a4b3b8 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c
@@ -1484,6 +1484,7 @@ err_type_file_file_validate:
vfree(types_info->data);
err_data_alloc:
kfree(types_info);
+ linecards->types_info = NULL;
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
index 5c511e1a8efa..d61478c0c632 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
@@ -100,6 +100,12 @@ static const struct mlxsw_cooling_states default_cooling_states[] = {
struct mlxsw_thermal;
+struct mlxsw_thermal_cooling_device {
+ struct mlxsw_thermal *thermal;
+ struct thermal_cooling_device *cdev;
+ unsigned int idx;
+};
+
struct mlxsw_thermal_module {
struct mlxsw_thermal *parent;
struct thermal_zone_device *tzdev;
@@ -123,7 +129,7 @@ struct mlxsw_thermal {
const struct mlxsw_bus_info *bus_info;
struct thermal_zone_device *tzdev;
int polling_delay;
- struct thermal_cooling_device *cdevs[MLXSW_MFCR_PWMS_MAX];
+ struct mlxsw_thermal_cooling_device cdevs[MLXSW_MFCR_PWMS_MAX];
struct thermal_trip trips[MLXSW_THERMAL_NUM_TRIPS];
struct mlxsw_cooling_states cooling_states[MLXSW_THERMAL_NUM_TRIPS];
struct mlxsw_thermal_area line_cards[];
@@ -147,7 +153,7 @@ static int mlxsw_get_cooling_device_idx(struct mlxsw_thermal *thermal,
int i;
for (i = 0; i < MLXSW_MFCR_PWMS_MAX; i++)
- if (thermal->cdevs[i] == cdev)
+ if (thermal->cdevs[i].cdev == cdev)
return i;
/* Allow mlxsw thermal zone binding to an external cooling device */
@@ -352,17 +358,14 @@ static int mlxsw_thermal_get_cur_state(struct thermal_cooling_device *cdev,
unsigned long *p_state)
{
- struct mlxsw_thermal *thermal = cdev->devdata;
+ struct mlxsw_thermal_cooling_device *mlxsw_cdev = cdev->devdata;
+ struct mlxsw_thermal *thermal = mlxsw_cdev->thermal;
struct device *dev = thermal->bus_info->dev;
char mfsc_pl[MLXSW_REG_MFSC_LEN];
- int err, idx;
u8 duty;
+ int err;
- idx = mlxsw_get_cooling_device_idx(thermal, cdev);
- if (idx < 0)
- return idx;
-
- mlxsw_reg_mfsc_pack(mfsc_pl, idx, 0);
+ mlxsw_reg_mfsc_pack(mfsc_pl, mlxsw_cdev->idx, 0);
err = mlxsw_reg_query(thermal->core, MLXSW_REG(mfsc), mfsc_pl);
if (err) {
dev_err(dev, "Failed to query PWM duty\n");
@@ -378,22 +381,19 @@ static int mlxsw_thermal_set_cur_state(struct thermal_cooling_device *cdev,
unsigned long state)
{
- struct mlxsw_thermal *thermal = cdev->devdata;
+ struct mlxsw_thermal_cooling_device *mlxsw_cdev = cdev->devdata;
+ struct mlxsw_thermal *thermal = mlxsw_cdev->thermal;
struct device *dev = thermal->bus_info->dev;
char mfsc_pl[MLXSW_REG_MFSC_LEN];
- int idx;
int err;
if (state > MLXSW_THERMAL_MAX_STATE)
return -EINVAL;
- idx = mlxsw_get_cooling_device_idx(thermal, cdev);
- if (idx < 0)
- return idx;
-
/* Normalize the state to the valid speed range. */
state = max_t(unsigned long, MLXSW_THERMAL_MIN_STATE, state);
- mlxsw_reg_mfsc_pack(mfsc_pl, idx, mlxsw_state_to_duty(state));
+ mlxsw_reg_mfsc_pack(mfsc_pl, mlxsw_cdev->idx,
+ mlxsw_state_to_duty(state));
err = mlxsw_reg_write(thermal->core, MLXSW_REG(mfsc), mfsc_pl);
if (err) {
dev_err(dev, "Failed to write PWM duty\n");
@@ -753,17 +753,21 @@ int mlxsw_thermal_init(struct mlxsw_core *core,
}
for (i = 0; i < MLXSW_MFCR_PWMS_MAX; i++) {
if (pwm_active & BIT(i)) {
+ struct mlxsw_thermal_cooling_device *mlxsw_cdev;
struct thermal_cooling_device *cdev;
+ mlxsw_cdev = &thermal->cdevs[i];
+ mlxsw_cdev->thermal = thermal;
+ mlxsw_cdev->idx = i;
cdev = thermal_cooling_device_register("mlxsw_fan",
- thermal,
+ mlxsw_cdev,
&mlxsw_cooling_ops);
if (IS_ERR(cdev)) {
err = PTR_ERR(cdev);
dev_err(dev, "Failed to register cooling device\n");
goto err_thermal_cooling_device_register;
}
- thermal->cdevs[i] = cdev;
+ mlxsw_cdev->cdev = cdev;
}
}
@@ -824,8 +828,7 @@ err_thermal_modules_init:
err_thermal_zone_device_register:
err_thermal_cooling_device_register:
for (i = 0; i < MLXSW_MFCR_PWMS_MAX; i++)
- if (thermal->cdevs[i])
- thermal_cooling_device_unregister(thermal->cdevs[i]);
+ thermal_cooling_device_unregister(thermal->cdevs[i].cdev);
err_reg_write:
err_reg_query:
kfree(thermal);
@@ -847,12 +850,8 @@ void mlxsw_thermal_fini(struct mlxsw_thermal *thermal)
thermal->tzdev = NULL;
}
- for (i = 0; i < MLXSW_MFCR_PWMS_MAX; i++) {
- if (thermal->cdevs[i]) {
- thermal_cooling_device_unregister(thermal->cdevs[i]);
- thermal->cdevs[i] = NULL;
- }
- }
+ for (i = 0; i < MLXSW_MFCR_PWMS_MAX; i++)
+ thermal_cooling_device_unregister(thermal->cdevs[i].cdev);
kfree(thermal);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/item.h b/drivers/net/ethernet/mellanox/mlxsw/item.h
index cfafbeb42586..a619a0736bd1 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/item.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/item.h
@@ -218,6 +218,10 @@ __mlxsw_item_bit_array_offset(const struct mlxsw_item *item,
}
max_index = (item->size.bytes << 3) / item->element_size - 1;
+ if (WARN_ONCE(index > max_index,
+ "name=%s,index=%u,max_index=%u\n", item->name, index,
+ max_index))
+ index = 0;
be_index = max_index - index;
offset = be_index * item->element_size >> 3;
in_byte_index = index % (BITS_PER_BYTE / item->element_size);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/minimal.c b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
index f0ceb196a6ce..828c65036a4c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/minimal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
@@ -140,6 +140,20 @@ mlxsw_m_get_module_eeprom_by_page(struct net_device *netdev,
page, extack);
}
+static int
+mlxsw_m_set_module_eeprom_by_page(struct net_device *netdev,
+ const struct ethtool_module_eeprom *page,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_m_port *mlxsw_m_port = netdev_priv(netdev);
+ struct mlxsw_core *core = mlxsw_m_port->mlxsw_m->core;
+
+ return mlxsw_env_set_module_eeprom_by_page(core,
+ mlxsw_m_port->slot_index,
+ mlxsw_m_port->module,
+ page, extack);
+}
+
static int mlxsw_m_reset(struct net_device *netdev, u32 *flags)
{
struct mlxsw_m_port *mlxsw_m_port = netdev_priv(netdev);
@@ -181,6 +195,7 @@ static const struct ethtool_ops mlxsw_m_port_ethtool_ops = {
.get_module_info = mlxsw_m_get_module_info,
.get_module_eeprom = mlxsw_m_get_module_eeprom,
.get_module_eeprom_by_page = mlxsw_m_get_module_eeprom_by_page,
+ .set_module_eeprom_by_page = mlxsw_m_set_module_eeprom_by_page,
.reset = mlxsw_m_reset,
.get_module_power_mode = mlxsw_m_get_module_power_mode,
.set_module_power_mode = mlxsw_m_set_module_power_mode,
@@ -702,8 +717,8 @@ static struct mlxsw_driver mlxsw_m_driver = {
};
static const struct i2c_device_id mlxsw_m_i2c_id[] = {
- { "mlxsw_minimal", 0},
- { },
+ { "mlxsw_minimal" },
+ { }
};
static struct i2c_driver mlxsw_m_i2c_driver = {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index bf66d996e32e..060e5b939211 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -13,6 +13,7 @@
#include <linux/if_vlan.h>
#include <linux/log2.h>
#include <linux/string.h>
+#include <net/page_pool/helpers.h>
#include "pci_hw.h"
#include "pci.h"
@@ -61,15 +62,11 @@ struct mlxsw_pci_mem_item {
};
struct mlxsw_pci_queue_elem_info {
+ struct page *pages[MLXSW_PCI_WQE_SG_ENTRIES];
char *elem; /* pointer to actual dma mapped element mem chunk */
- union {
- struct {
- struct sk_buff *skb;
- } sdq;
- struct {
- struct sk_buff *skb;
- } rdq;
- } u;
+ struct {
+ struct sk_buff *skb;
+ } sdq;
};
struct mlxsw_pci_queue {
@@ -88,10 +85,14 @@ struct mlxsw_pci_queue {
enum mlxsw_pci_cqe_v v;
struct mlxsw_pci_queue *dq;
struct napi_struct napi;
+ struct page_pool *page_pool;
} cq;
struct {
struct tasklet_struct tasklet;
} eq;
+ struct {
+ struct mlxsw_pci_queue *cq;
+ } rdq;
} u;
};
@@ -110,6 +111,7 @@ struct mlxsw_pci {
bool cff_support;
enum mlxsw_cmd_mbox_config_profile_lag_mode lag_mode;
enum mlxsw_cmd_mbox_config_profile_flood_mode flood_mode;
+ u8 num_sg_entries; /* Number of scatter/gather entries for packets. */
struct mlxsw_pci_queue_type_group queues[MLXSW_PCI_QUEUE_TYPE_COUNT];
u32 doorbell_offset;
struct mlxsw_core *core;
@@ -335,6 +337,29 @@ static void mlxsw_pci_sdq_fini(struct mlxsw_pci *mlxsw_pci,
mlxsw_cmd_hw2sw_sdq(mlxsw_pci->core, q->num);
}
+#define MLXSW_PCI_SKB_HEADROOM (NET_SKB_PAD + NET_IP_ALIGN)
+
+#define MLXSW_PCI_RX_BUF_SW_OVERHEAD \
+ (MLXSW_PCI_SKB_HEADROOM + \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+
+static void
+mlxsw_pci_wqe_rx_frag_set(struct mlxsw_pci *mlxsw_pci, struct page *page,
+ char *wqe, int index, size_t frag_len)
+{
+ dma_addr_t mapaddr;
+
+ mapaddr = page_pool_get_dma_addr(page);
+
+ if (index == 0) {
+ mapaddr += MLXSW_PCI_SKB_HEADROOM;
+ frag_len = frag_len - MLXSW_PCI_RX_BUF_SW_OVERHEAD;
+ }
+
+ mlxsw_pci_wqe_address_set(wqe, index, mapaddr);
+ mlxsw_pci_wqe_byte_count_set(wqe, index, frag_len);
+}
+
static int mlxsw_pci_wqe_frag_map(struct mlxsw_pci *mlxsw_pci, char *wqe,
int index, char *frag_data, size_t frag_len,
int direction)
@@ -364,43 +389,140 @@ static void mlxsw_pci_wqe_frag_unmap(struct mlxsw_pci *mlxsw_pci, char *wqe,
dma_unmap_single(&pdev->dev, mapaddr, frag_len, direction);
}
-static int mlxsw_pci_rdq_skb_alloc(struct mlxsw_pci *mlxsw_pci,
- struct mlxsw_pci_queue_elem_info *elem_info,
- gfp_t gfp)
+static struct sk_buff *mlxsw_pci_rdq_build_skb(struct page *pages[],
+ u16 byte_count)
{
- size_t buf_len = MLXSW_PORT_MAX_MTU;
- char *wqe = elem_info->elem;
+ unsigned int linear_data_size;
struct sk_buff *skb;
- int err;
+ int page_index = 0;
+ bool linear_only;
+ void *data;
+
+ data = page_address(pages[page_index]);
+ net_prefetch(data);
+
+ skb = napi_build_skb(data, PAGE_SIZE);
+ if (unlikely(!skb))
+ return ERR_PTR(-ENOMEM);
- skb = __netdev_alloc_skb_ip_align(NULL, buf_len, gfp);
- if (!skb)
+ linear_only = byte_count + MLXSW_PCI_RX_BUF_SW_OVERHEAD <= PAGE_SIZE;
+ linear_data_size = linear_only ? byte_count :
+ PAGE_SIZE -
+ MLXSW_PCI_RX_BUF_SW_OVERHEAD;
+
+ skb_reserve(skb, MLXSW_PCI_SKB_HEADROOM);
+ skb_put(skb, linear_data_size);
+
+ if (linear_only)
+ return skb;
+
+ byte_count -= linear_data_size;
+ page_index++;
+
+ while (byte_count > 0) {
+ unsigned int frag_size;
+ struct page *page;
+
+ page = pages[page_index];
+ frag_size = min(byte_count, PAGE_SIZE);
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+ page, 0, frag_size, PAGE_SIZE);
+ byte_count -= frag_size;
+ page_index++;
+ }
+
+ return skb;
+}
+
+static int mlxsw_pci_rdq_page_alloc(struct mlxsw_pci_queue *q,
+ struct mlxsw_pci_queue_elem_info *elem_info,
+ int index)
+{
+ struct mlxsw_pci_queue *cq = q->u.rdq.cq;
+ char *wqe = elem_info->elem;
+ struct page *page;
+
+ page = page_pool_dev_alloc_pages(cq->u.cq.page_pool);
+ if (unlikely(!page))
return -ENOMEM;
- err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data,
- buf_len, DMA_FROM_DEVICE);
- if (err)
- goto err_frag_map;
+ mlxsw_pci_wqe_rx_frag_set(q->pci, page, wqe, index, PAGE_SIZE);
+ elem_info->pages[index] = page;
+ return 0;
+}
+
+static void mlxsw_pci_rdq_page_free(struct mlxsw_pci_queue *q,
+ struct mlxsw_pci_queue_elem_info *elem_info,
+ int index)
+{
+ struct mlxsw_pci_queue *cq = q->u.rdq.cq;
+
+ page_pool_put_page(cq->u.cq.page_pool, elem_info->pages[index], -1,
+ false);
+}
+
+static u8 mlxsw_pci_num_sg_entries_get(u16 byte_count)
+{
+ return DIV_ROUND_UP(byte_count + MLXSW_PCI_RX_BUF_SW_OVERHEAD,
+ PAGE_SIZE);
+}
+
+static int
+mlxsw_pci_elem_info_pages_ref_store(const struct mlxsw_pci_queue *q,
+ const struct mlxsw_pci_queue_elem_info *el,
+ u16 byte_count, struct page *pages[],
+ u8 *p_num_sg_entries)
+{
+ u8 num_sg_entries;
+ int i;
+
+ num_sg_entries = mlxsw_pci_num_sg_entries_get(byte_count);
+ if (WARN_ON_ONCE(num_sg_entries > q->pci->num_sg_entries))
+ return -EINVAL;
+
+ for (i = 0; i < num_sg_entries; i++)
+ pages[i] = el->pages[i];
+
+ *p_num_sg_entries = num_sg_entries;
+ return 0;
+}
+
+static int
+mlxsw_pci_rdq_pages_alloc(struct mlxsw_pci_queue *q,
+ struct mlxsw_pci_queue_elem_info *elem_info,
+ u8 num_sg_entries)
+{
+ struct page *old_pages[MLXSW_PCI_WQE_SG_ENTRIES];
+ struct mlxsw_pci_queue *cq = q->u.rdq.cq;
+ int i, err;
+
+ for (i = 0; i < num_sg_entries; i++) {
+ old_pages[i] = elem_info->pages[i];
+ err = mlxsw_pci_rdq_page_alloc(q, elem_info, i);
+ if (err) {
+ dev_err_ratelimited(&q->pci->pdev->dev, "Failed to alloc page\n");
+ goto err_page_alloc;
+ }
+ }
- elem_info->u.rdq.skb = skb;
return 0;
-err_frag_map:
- dev_kfree_skb_any(skb);
+err_page_alloc:
+ for (i--; i >= 0; i--)
+ page_pool_recycle_direct(cq->u.cq.page_pool, old_pages[i]);
+
return err;
}
-static void mlxsw_pci_rdq_skb_free(struct mlxsw_pci *mlxsw_pci,
- struct mlxsw_pci_queue_elem_info *elem_info)
+static void
+mlxsw_pci_rdq_pages_recycle(struct mlxsw_pci_queue *q, struct page *pages[],
+ u8 num_sg_entries)
{
- struct sk_buff *skb;
- char *wqe;
-
- skb = elem_info->u.rdq.skb;
- wqe = elem_info->elem;
+ struct mlxsw_pci_queue *cq = q->u.rdq.cq;
+ int i;
- mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE);
- dev_kfree_skb_any(skb);
+ for (i = 0; i < num_sg_entries; i++)
+ page_pool_recycle_direct(cq->u.cq.page_pool, pages[i]);
}
static int mlxsw_pci_rdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
@@ -410,7 +532,7 @@ static int mlxsw_pci_rdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
u8 sdq_count = mlxsw_pci->num_sdqs;
struct mlxsw_pci_queue *cq;
u8 cq_num;
- int i;
+ int i, j;
int err;
q->producer_counter = 0;
@@ -434,15 +556,19 @@ static int mlxsw_pci_rdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
cq = mlxsw_pci_cq_get(mlxsw_pci, cq_num);
cq->u.cq.dq = q;
+ q->u.rdq.cq = cq;
mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
for (i = 0; i < q->count; i++) {
elem_info = mlxsw_pci_queue_elem_info_producer_get(q);
BUG_ON(!elem_info);
- err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info, GFP_KERNEL);
- if (err)
- goto rollback;
+
+ for (j = 0; j < mlxsw_pci->num_sg_entries; j++) {
+ err = mlxsw_pci_rdq_page_alloc(q, elem_info, j);
+ if (err)
+ goto rollback;
+ }
/* Everything is set up, ring doorbell to pass elem to HW */
q->producer_counter++;
mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
@@ -453,8 +579,11 @@ static int mlxsw_pci_rdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
rollback:
for (i--; i >= 0; i--) {
elem_info = mlxsw_pci_queue_elem_info_get(q, i);
- mlxsw_pci_rdq_skb_free(mlxsw_pci, elem_info);
+ for (j--; j >= 0; j--)
+ mlxsw_pci_rdq_page_free(q, elem_info, j);
+ j = mlxsw_pci->num_sg_entries;
}
+ q->u.rdq.cq = NULL;
cq->u.cq.dq = NULL;
mlxsw_cmd_hw2sw_rdq(mlxsw_pci->core, q->num);
@@ -465,12 +594,13 @@ static void mlxsw_pci_rdq_fini(struct mlxsw_pci *mlxsw_pci,
struct mlxsw_pci_queue *q)
{
struct mlxsw_pci_queue_elem_info *elem_info;
- int i;
+ int i, j;
mlxsw_cmd_hw2sw_rdq(mlxsw_pci->core, q->num);
for (i = 0; i < q->count; i++) {
elem_info = mlxsw_pci_queue_elem_info_get(q, i);
- mlxsw_pci_rdq_skb_free(mlxsw_pci, elem_info);
+ for (j = 0; j < mlxsw_pci->num_sg_entries; j++)
+ mlxsw_pci_rdq_page_free(q, elem_info, j);
}
}
@@ -515,7 +645,7 @@ static void mlxsw_pci_cqe_sdq_handle(struct mlxsw_pci *mlxsw_pci,
struct mlxsw_pci_queue *q,
u16 consumer_counter_limit,
enum mlxsw_pci_cqe_v cqe_v,
- char *cqe)
+ char *cqe, int budget)
{
struct pci_dev *pdev = mlxsw_pci->pdev;
struct mlxsw_pci_queue_elem_info *elem_info;
@@ -526,8 +656,8 @@ static void mlxsw_pci_cqe_sdq_handle(struct mlxsw_pci *mlxsw_pci,
spin_lock(&q->lock);
elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
- tx_info = mlxsw_skb_cb(elem_info->u.sdq.skb)->tx_info;
- skb = elem_info->u.sdq.skb;
+ tx_info = mlxsw_skb_cb(elem_info->sdq.skb)->tx_info;
+ skb = elem_info->sdq.skb;
wqe = elem_info->elem;
for (i = 0; i < MLXSW_PCI_WQE_SG_ENTRIES; i++)
mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, i, DMA_TO_DEVICE);
@@ -541,8 +671,8 @@ static void mlxsw_pci_cqe_sdq_handle(struct mlxsw_pci *mlxsw_pci,
}
if (skb)
- dev_kfree_skb_any(skb);
- elem_info->u.sdq.skb = NULL;
+ napi_consume_skb(skb, budget);
+ elem_info->sdq.skb = NULL;
if (q->consumer_counter++ != consumer_counter_limit)
dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in SDQ\n");
@@ -604,27 +734,40 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
enum mlxsw_pci_cqe_v cqe_v, char *cqe)
{
struct pci_dev *pdev = mlxsw_pci->pdev;
+ struct page *pages[MLXSW_PCI_WQE_SG_ENTRIES];
struct mlxsw_pci_queue_elem_info *elem_info;
struct mlxsw_rx_info rx_info = {};
- char wqe[MLXSW_PCI_WQE_SIZE];
struct sk_buff *skb;
+ u8 num_sg_entries;
u16 byte_count;
int err;
elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
- skb = elem_info->u.rdq.skb;
- memcpy(wqe, elem_info->elem, MLXSW_PCI_WQE_SIZE);
if (q->consumer_counter++ != consumer_counter_limit)
dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in RDQ\n");
- err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info, GFP_ATOMIC);
- if (err) {
- dev_err_ratelimited(&pdev->dev, "Failed to alloc skb for RDQ\n");
+ byte_count = mlxsw_pci_cqe_byte_count_get(cqe);
+ if (mlxsw_pci_cqe_crc_get(cqe_v, cqe))
+ byte_count -= ETH_FCS_LEN;
+
+ err = mlxsw_pci_elem_info_pages_ref_store(q, elem_info, byte_count,
+ pages, &num_sg_entries);
+ if (err)
+ goto out;
+
+ err = mlxsw_pci_rdq_pages_alloc(q, elem_info, num_sg_entries);
+ if (err)
+ goto out;
+
+ skb = mlxsw_pci_rdq_build_skb(pages, byte_count);
+ if (IS_ERR(skb)) {
+ dev_err_ratelimited(&pdev->dev, "Failed to build skb for RDQ\n");
+ mlxsw_pci_rdq_pages_recycle(q, pages, num_sg_entries);
goto out;
}
- mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE);
+ skb_mark_for_recycle(skb);
if (mlxsw_pci_cqe_lag_get(cqe_v, cqe)) {
rx_info.is_lag = true;
@@ -657,10 +800,6 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
mlxsw_pci_skb_cb_ts_set(mlxsw_pci, skb, cqe_v, cqe);
- byte_count = mlxsw_pci_cqe_byte_count_get(cqe);
- if (mlxsw_pci_cqe_crc_get(cqe_v, cqe))
- byte_count -= ETH_FCS_LEN;
- skb_put(skb, byte_count);
mlxsw_core_skb_receive(mlxsw_pci->core, skb, &rx_info);
out:
@@ -785,7 +924,7 @@ static int mlxsw_pci_napi_poll_cq_tx(struct napi_struct *napi, int budget)
mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
mlxsw_pci_cqe_sdq_handle(mlxsw_pci, sdq,
- wqe_counter, q->u.cq.v, ncqe);
+ wqe_counter, q->u.cq.v, ncqe, budget);
work_done++;
}
@@ -832,19 +971,51 @@ static void mlxsw_pci_cq_napi_setup(struct mlxsw_pci_queue *q,
mlxsw_pci_napi_poll_cq_rx);
break;
}
-
- napi_enable(&q->u.cq.napi);
}
static void mlxsw_pci_cq_napi_teardown(struct mlxsw_pci_queue *q)
{
- napi_disable(&q->u.cq.napi);
netif_napi_del(&q->u.cq.napi);
}
+static int mlxsw_pci_cq_page_pool_init(struct mlxsw_pci_queue *q,
+ enum mlxsw_pci_cq_type cq_type)
+{
+ struct page_pool_params pp_params = {};
+ struct mlxsw_pci *mlxsw_pci = q->pci;
+ struct page_pool *page_pool;
+
+ if (cq_type != MLXSW_PCI_CQ_RDQ)
+ return 0;
+
+ pp_params.flags = PP_FLAG_DMA_MAP;
+ pp_params.pool_size = MLXSW_PCI_WQE_COUNT * mlxsw_pci->num_sg_entries;
+ pp_params.nid = dev_to_node(&mlxsw_pci->pdev->dev);
+ pp_params.dev = &mlxsw_pci->pdev->dev;
+ pp_params.napi = &q->u.cq.napi;
+ pp_params.dma_dir = DMA_FROM_DEVICE;
+
+ page_pool = page_pool_create(&pp_params);
+ if (IS_ERR(page_pool))
+ return PTR_ERR(page_pool);
+
+ q->u.cq.page_pool = page_pool;
+ return 0;
+}
+
+static void mlxsw_pci_cq_page_pool_fini(struct mlxsw_pci_queue *q,
+ enum mlxsw_pci_cq_type cq_type)
+{
+ if (cq_type != MLXSW_PCI_CQ_RDQ)
+ return;
+
+ page_pool_destroy(q->u.cq.page_pool);
+}
+
static int mlxsw_pci_cq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
struct mlxsw_pci_queue *q)
{
+ enum mlxsw_pci_cq_type cq_type = mlxsw_pci_cq_type(mlxsw_pci, q);
int i;
int err;
@@ -874,15 +1045,29 @@ static int mlxsw_pci_cq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
err = mlxsw_cmd_sw2hw_cq(mlxsw_pci->core, mbox, q->num);
if (err)
return err;
- mlxsw_pci_cq_napi_setup(q, mlxsw_pci_cq_type(mlxsw_pci, q));
+ mlxsw_pci_cq_napi_setup(q, cq_type);
+
+ err = mlxsw_pci_cq_page_pool_init(q, cq_type);
+ if (err)
+ goto err_page_pool_init;
+
+ napi_enable(&q->u.cq.napi);
mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
return 0;
+
+err_page_pool_init:
+ mlxsw_pci_cq_napi_teardown(q);
+ return err;
}
static void mlxsw_pci_cq_fini(struct mlxsw_pci *mlxsw_pci,
struct mlxsw_pci_queue *q)
{
+ enum mlxsw_pci_cq_type cq_type = mlxsw_pci_cq_type(mlxsw_pci, q);
+
+ napi_disable(&q->u.cq.napi);
+ mlxsw_pci_cq_page_pool_fini(q, cq_type);
mlxsw_pci_cq_napi_teardown(q);
mlxsw_cmd_hw2sw_cq(mlxsw_pci->core, q->num);
}
@@ -1594,20 +1779,31 @@ static int mlxsw_pci_sys_ready_wait(struct mlxsw_pci *mlxsw_pci,
return -EBUSY;
}
-static int mlxsw_pci_reset_at_pci_disable(struct mlxsw_pci *mlxsw_pci)
+static int mlxsw_pci_reset_at_pci_disable(struct mlxsw_pci *mlxsw_pci,
+ bool pci_reset_sbr_supported)
{
struct pci_dev *pdev = mlxsw_pci->pdev;
char mrsr_pl[MLXSW_REG_MRSR_LEN];
+ struct pci_dev *bridge;
int err;
+ if (!pci_reset_sbr_supported) {
+ pci_dbg(pdev, "Performing PCI hot reset instead of \"all reset\"\n");
+ goto sbr;
+ }
+
mlxsw_reg_mrsr_pack(mrsr_pl,
MLXSW_REG_MRSR_COMMAND_RESET_AT_PCI_DISABLE);
err = mlxsw_reg_write(mlxsw_pci->core, MLXSW_REG(mrsr), mrsr_pl);
if (err)
return err;
+sbr:
device_lock_assert(&pdev->dev);
+ bridge = pci_upstream_bridge(pdev);
+ if (bridge)
+ pci_cfg_access_lock(bridge);
pci_cfg_access_lock(pdev);
pci_save_state(pdev);
@@ -1617,6 +1813,8 @@ static int mlxsw_pci_reset_at_pci_disable(struct mlxsw_pci *mlxsw_pci)
pci_restore_state(pdev);
pci_cfg_access_unlock(pdev);
+ if (bridge)
+ pci_cfg_access_unlock(bridge);
return err;
}
@@ -1633,6 +1831,7 @@ static int
mlxsw_pci_reset(struct mlxsw_pci *mlxsw_pci, const struct pci_device_id *id)
{
struct pci_dev *pdev = mlxsw_pci->pdev;
+ bool pci_reset_sbr_supported = false;
char mcam_pl[MLXSW_REG_MCAM_LEN];
bool pci_reset_supported = false;
u32 sys_status;
@@ -1652,13 +1851,17 @@ mlxsw_pci_reset(struct mlxsw_pci *mlxsw_pci, const struct pci_device_id *id)
mlxsw_reg_mcam_pack(mcam_pl,
MLXSW_REG_MCAM_FEATURE_GROUP_ENHANCED_FEATURES);
err = mlxsw_reg_query(mlxsw_pci->core, MLXSW_REG(mcam), mcam_pl);
- if (!err)
+ if (!err) {
mlxsw_reg_mcam_unpack(mcam_pl, MLXSW_REG_MCAM_PCI_RESET,
&pci_reset_supported);
+ mlxsw_reg_mcam_unpack(mcam_pl, MLXSW_REG_MCAM_PCI_RESET_SBR,
+ &pci_reset_sbr_supported);
+ }
if (pci_reset_supported) {
pci_dbg(pdev, "Starting PCI reset flow\n");
- err = mlxsw_pci_reset_at_pci_disable(mlxsw_pci);
+ err = mlxsw_pci_reset_at_pci_disable(mlxsw_pci,
+ pci_reset_sbr_supported);
} else {
pci_dbg(pdev, "Starting software reset flow\n");
err = mlxsw_pci_reset_sw(mlxsw_pci);
@@ -1691,6 +1894,17 @@ static void mlxsw_pci_free_irq_vectors(struct mlxsw_pci *mlxsw_pci)
pci_free_irq_vectors(mlxsw_pci->pdev);
}
+static void mlxsw_pci_num_sg_entries_set(struct mlxsw_pci *mlxsw_pci)
+{
+ u8 num_sg_entries;
+
+ num_sg_entries = mlxsw_pci_num_sg_entries_get(MLXSW_PORT_MAX_MTU);
+ mlxsw_pci->num_sg_entries = min(num_sg_entries,
+ MLXSW_PCI_WQE_SG_ENTRIES);
+
+ WARN_ON(num_sg_entries > MLXSW_PCI_WQE_SG_ENTRIES);
+}
+
static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
const struct mlxsw_config_profile *profile,
struct mlxsw_res *res)
@@ -1813,6 +2027,8 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
if (err)
goto err_requery_resources;
+ mlxsw_pci_num_sg_entries_set(mlxsw_pci);
+
err = mlxsw_pci_napi_devs_init(mlxsw_pci);
if (err)
goto err_napi_devs_init;
@@ -1919,7 +2135,7 @@ static int mlxsw_pci_skb_transmit(void *bus_priv, struct sk_buff *skb,
goto unlock;
}
mlxsw_skb_cb(skb)->tx_info = *tx_info;
- elem_info->u.sdq.skb = skb;
+ elem_info->sdq.skb = skb;
wqe = elem_info->elem;
mlxsw_pci_wqe_c_set(wqe, 1); /* always report completion */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/port.h b/drivers/net/ethernet/mellanox/mlxsw/port.h
index ac4d4ea51597..0a73b1a4526e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/port.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/port.h
@@ -6,7 +6,8 @@
#include <linux/types.h>
-#define MLXSW_PORT_MAX_MTU 10000
+#define MLXSW_PORT_MAX_MTU (10 * 1024)
+#define MLXSW_PORT_ETH_FRAME_HDR (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)
#define MLXSW_PORT_DEFAULT_VID 1
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 8adf86a6f5cc..3bb89045eaf5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -10671,6 +10671,8 @@ enum mlxsw_reg_mcam_mng_feature_cap_mask_bits {
MLXSW_REG_MCAM_MCIA_128B = 34,
/* If set, MRSR.command=6 is supported. */
MLXSW_REG_MCAM_PCI_RESET = 48,
+ /* If set, MRSR.command=6 is supported with Secondary Bus Reset. */
+ MLXSW_REG_MCAM_PCI_RESET_SBR = 67,
};
#define MLXSW_REG_BYTES_PER_DWORD 0x4
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 030ed71f945d..f064789f3240 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -405,29 +405,12 @@ static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
mlxsw_sp_port->dev->dev_addr);
}
-static int mlxsw_sp_port_max_mtu_get(struct mlxsw_sp_port *mlxsw_sp_port, int *p_max_mtu)
-{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- char pmtu_pl[MLXSW_REG_PMTU_LEN];
- int err;
-
- mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0);
- err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
- if (err)
- return err;
-
- *p_max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
- return 0;
-}
-
static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
char pmtu_pl[MLXSW_REG_PMTU_LEN];
- mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
- if (mtu > mlxsw_sp_port->max_mtu)
- return -EINVAL;
+ mtu += MLXSW_PORT_ETH_FRAME_HDR;
mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
@@ -1697,8 +1680,8 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u16 local_port,
NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC;
dev->hw_features |= NETIF_F_HW_TC | NETIF_F_LOOPBACK;
- dev->min_mtu = 0;
- dev->max_mtu = ETH_MAX_MTU;
+ dev->min_mtu = ETH_MIN_MTU;
+ dev->max_mtu = MLXSW_PORT_MAX_MTU - MLXSW_PORT_ETH_FRAME_HDR;
/* Each packet needs to have a Tx header (metadata) on top all other
* headers.
@@ -1727,13 +1710,6 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u16 local_port,
goto err_max_speed_get;
}
- err = mlxsw_sp_port_max_mtu_get(mlxsw_sp_port, &mlxsw_sp_port->max_mtu);
- if (err) {
- dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get maximum MTU\n",
- mlxsw_sp_port->local_port);
- goto err_port_max_mtu_get;
- }
-
err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n",
@@ -1877,7 +1853,6 @@ err_port_ets_init:
err_port_buffers_init:
err_port_admin_status_set:
err_port_mtu_set:
-err_port_max_mtu_get:
err_max_speed_get:
err_port_speed_by_width_set:
err_port_system_port_mapping_set:
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 3beb5d0847ab..8d3c61287696 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -238,7 +238,7 @@ struct mlxsw_sp_ptp_ops {
struct hwtstamp_config *config);
void (*shaper_work)(struct work_struct *work);
int (*get_ts_info)(struct mlxsw_sp *mlxsw_sp,
- struct ethtool_ts_info *info);
+ struct kernel_ethtool_ts_info *info);
int (*get_stats_count)(void);
void (*get_stats_strings)(u8 **p);
void (*get_stats)(struct mlxsw_sp_port *mlxsw_sp_port,
@@ -359,7 +359,6 @@ struct mlxsw_sp_port {
u16 egr_types;
struct mlxsw_sp_ptp_port_stats stats;
} ptp;
- int max_mtu;
u32 max_speed;
struct mlxsw_sp_hdroom *hdroom;
u64 module_overheat_initial_val;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c
index 4b713832fdd5..07cb1e26ca3e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c
@@ -391,7 +391,8 @@ mlxsw_sp_acl_atcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp,
if (err)
return err;
- lkey_id = aregion->ops->lkey_id_get(aregion, aentry->enc_key, erp_id);
+ lkey_id = aregion->ops->lkey_id_get(aregion, aentry->ht_key.enc_key,
+ erp_id);
if (IS_ERR(lkey_id))
return PTR_ERR(lkey_id);
aentry->lkey_id = lkey_id;
@@ -399,7 +400,7 @@ mlxsw_sp_acl_atcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp,
kvdl_index = mlxsw_afa_block_first_kvdl_index(rulei->act_block);
mlxsw_reg_ptce3_pack(ptce3_pl, true, MLXSW_REG_PTCE3_OP_WRITE_WRITE,
priority, region->tcam_region_info,
- aentry->enc_key, erp_id,
+ aentry->ht_key.enc_key, erp_id,
aentry->delta_info.start,
aentry->delta_info.mask,
aentry->delta_info.value,
@@ -428,7 +429,7 @@ mlxsw_sp_acl_atcam_region_entry_remove(struct mlxsw_sp *mlxsw_sp,
mlxsw_reg_ptce3_pack(ptce3_pl, false, MLXSW_REG_PTCE3_OP_WRITE_WRITE, 0,
region->tcam_region_info,
- aentry->enc_key, erp_id,
+ aentry->ht_key.enc_key, erp_id,
aentry->delta_info.start,
aentry->delta_info.mask,
aentry->delta_info.value,
@@ -457,7 +458,7 @@ mlxsw_sp_acl_atcam_region_entry_action_replace(struct mlxsw_sp *mlxsw_sp,
kvdl_index = mlxsw_afa_block_first_kvdl_index(rulei->act_block);
mlxsw_reg_ptce3_pack(ptce3_pl, true, MLXSW_REG_PTCE3_OP_WRITE_UPDATE,
priority, region->tcam_region_info,
- aentry->enc_key, erp_id,
+ aentry->ht_key.enc_key, erp_id,
aentry->delta_info.start,
aentry->delta_info.mask,
aentry->delta_info.value,
@@ -480,26 +481,23 @@ __mlxsw_sp_acl_atcam_entry_add(struct mlxsw_sp *mlxsw_sp,
int err;
mlxsw_afk_encode(afk, region->key_info, &rulei->values,
- aentry->ht_key.full_enc_key, mask);
+ aentry->ht_key.enc_key, mask);
erp_mask = mlxsw_sp_acl_erp_mask_get(aregion, mask, false);
if (IS_ERR(erp_mask))
return PTR_ERR(erp_mask);
aentry->erp_mask = erp_mask;
aentry->ht_key.erp_id = mlxsw_sp_acl_erp_mask_erp_id(erp_mask);
- memcpy(aentry->enc_key, aentry->ht_key.full_enc_key,
- sizeof(aentry->enc_key));
/* Compute all needed delta information and clear the delta bits
- * from the encrypted key.
+ * from the encoded key.
*/
delta = mlxsw_sp_acl_erp_delta(aentry->erp_mask);
aentry->delta_info.start = mlxsw_sp_acl_erp_delta_start(delta);
aentry->delta_info.mask = mlxsw_sp_acl_erp_delta_mask(delta);
aentry->delta_info.value =
- mlxsw_sp_acl_erp_delta_value(delta,
- aentry->ht_key.full_enc_key);
- mlxsw_sp_acl_erp_delta_clear(delta, aentry->enc_key);
+ mlxsw_sp_acl_erp_delta_value(delta, aentry->ht_key.enc_key);
+ mlxsw_sp_acl_erp_delta_clear(delta, aentry->ht_key.enc_key);
/* Add rule to the list of A-TCAM rules, assuming this
* rule is intended to A-TCAM. In case this rule does
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c
index 95f63fcf4ba1..a54eedb69a3f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c
@@ -249,7 +249,7 @@ __mlxsw_sp_acl_bf_key_encode(struct mlxsw_sp_acl_atcam_region *aregion,
memcpy(chunk + pad_bytes, &erp_region_id,
sizeof(erp_region_id));
memcpy(chunk + key_offset,
- &aentry->enc_key[chunk_key_offsets[chunk_index]],
+ &aentry->ht_key.enc_key[chunk_key_offsets[chunk_index]],
chunk_key_len);
chunk += chunk_len;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c
index d231f4d2888b..9eee229303cc 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c
@@ -1217,18 +1217,6 @@ static bool mlxsw_sp_acl_erp_delta_check(void *priv, const void *parent_obj,
return err ? false : true;
}
-static int mlxsw_sp_acl_erp_hints_obj_cmp(const void *obj1, const void *obj2)
-{
- const struct mlxsw_sp_acl_erp_key *key1 = obj1;
- const struct mlxsw_sp_acl_erp_key *key2 = obj2;
-
- /* For hints purposes, two objects are considered equal
- * in case the masks are the same. Does not matter what
- * the "ctcam" value is.
- */
- return memcmp(key1->mask, key2->mask, sizeof(key1->mask));
-}
-
static void *mlxsw_sp_acl_erp_delta_create(void *priv, void *parent_obj,
void *obj)
{
@@ -1308,7 +1296,6 @@ static void mlxsw_sp_acl_erp_root_destroy(void *priv, void *root_priv)
static const struct objagg_ops mlxsw_sp_acl_erp_objagg_ops = {
.obj_size = sizeof(struct mlxsw_sp_acl_erp_key),
.delta_check = mlxsw_sp_acl_erp_delta_check,
- .hints_obj_cmp = mlxsw_sp_acl_erp_hints_obj_cmp,
.delta_create = mlxsw_sp_acl_erp_delta_create,
.delta_destroy = mlxsw_sp_acl_erp_delta_destroy,
.root_create = mlxsw_sp_acl_erp_root_create,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
index 79a1d8606512..010204f73ea4 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
@@ -167,9 +167,9 @@ struct mlxsw_sp_acl_atcam_region {
};
struct mlxsw_sp_acl_atcam_entry_ht_key {
- char full_enc_key[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN]; /* Encoded
- * key.
- */
+ char enc_key[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN]; /* Encoded key, minus
+ * delta bits.
+ */
u8 erp_id;
};
@@ -181,9 +181,6 @@ struct mlxsw_sp_acl_atcam_entry {
struct rhash_head ht_node;
struct list_head list; /* Member in entries_list */
struct mlxsw_sp_acl_atcam_entry_ht_key ht_key;
- char enc_key[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN]; /* Encoded key,
- * minus delta bits.
- */
struct {
u16 start;
u8 mask;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
index c9f1c79f3f9d..2c0cfa79d138 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
@@ -399,11 +399,13 @@ void mlxsw_sp_hdroom_bufs_reset_sizes(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_hdroom *hdroom)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ unsigned int max_mtu = mlxsw_sp_port->dev->max_mtu;
u16 reserve_cells;
int i;
+ max_mtu += MLXSW_PORT_ETH_FRAME_HDR;
/* Internal buffer. */
- reserve_cells = mlxsw_sp_hdroom_int_buf_size_get(mlxsw_sp, mlxsw_sp_port->max_mtu,
+ reserve_cells = mlxsw_sp_hdroom_int_buf_size_get(mlxsw_sp, max_mtu,
mlxsw_sp_port->max_speed);
reserve_cells = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, reserve_cells);
hdroom->int_buf.reserve_cells = reserve_cells;
@@ -613,7 +615,9 @@ static int mlxsw_sp_port_headroom_init(struct mlxsw_sp_port *mlxsw_sp_port)
mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom);
/* Buffer 9 is used for control traffic. */
- size9 = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, mlxsw_sp_port->max_mtu);
+ size9 = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port,
+ mlxsw_sp_port->dev->max_mtu +
+ MLXSW_PORT_ETH_FRAME_HDR);
hdroom.bufs.buf[9].size_cells = mlxsw_sp_bytes_cells(mlxsw_sp, size9);
return __mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom, true);
@@ -1607,8 +1611,8 @@ static void mlxsw_sp_sb_sr_occ_query_cb(struct mlxsw_core *mlxsw_core,
int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core,
unsigned int sb_index)
{
+ u16 local_port, local_port_1, first_local_port, last_local_port;
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
- u16 local_port, local_port_1, last_local_port;
struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx;
u8 masked_count, current_page = 0;
unsigned long cb_priv = 0;
@@ -1628,6 +1632,7 @@ next_batch:
masked_count = 0;
mlxsw_reg_sbsr_pack(sbsr_pl, false);
mlxsw_reg_sbsr_port_page_set(sbsr_pl, current_page);
+ first_local_port = current_page * MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE;
last_local_port = current_page * MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE +
MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE - 1;
@@ -1645,9 +1650,12 @@ next_batch:
if (local_port != MLXSW_PORT_CPU_PORT) {
/* Ingress quotas are not supported for the CPU port */
mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl,
- local_port, 1);
+ local_port - first_local_port,
+ 1);
}
- mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1);
+ mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl,
+ local_port - first_local_port,
+ 1);
for (i = 0; i < mlxsw_sp->sb_vals->pool_count; i++) {
err = mlxsw_sp_sb_pm_occ_query(mlxsw_sp, local_port, i,
&bulk_list);
@@ -1684,7 +1692,7 @@ int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core,
unsigned int sb_index)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
- u16 local_port, last_local_port;
+ u16 local_port, first_local_port, last_local_port;
LIST_HEAD(bulk_list);
unsigned int masked_count;
u8 current_page = 0;
@@ -1702,6 +1710,7 @@ next_batch:
masked_count = 0;
mlxsw_reg_sbsr_pack(sbsr_pl, true);
mlxsw_reg_sbsr_port_page_set(sbsr_pl, current_page);
+ first_local_port = current_page * MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE;
last_local_port = current_page * MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE +
MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE - 1;
@@ -1719,9 +1728,12 @@ next_batch:
if (local_port != MLXSW_PORT_CPU_PORT) {
/* Ingress quotas are not supported for the CPU port */
mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl,
- local_port, 1);
+ local_port - first_local_port,
+ 1);
}
- mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1);
+ mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl,
+ local_port - first_local_port,
+ 1);
for (i = 0; i < mlxsw_sp->sb_vals->pool_count; i++) {
err = mlxsw_sp_sb_pm_occ_clear(mlxsw_sp, local_port, i,
&bulk_list);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
index ca80af06465f..fa6eddd27ecf 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
@@ -283,7 +283,7 @@ static u64 mlxsw_sp_dpipe_table_erif_size_get(void *priv)
return MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
}
-static struct devlink_dpipe_table_ops mlxsw_sp_erif_ops = {
+static const struct devlink_dpipe_table_ops mlxsw_sp_erif_ops = {
.matches_dump = mlxsw_sp_dpipe_table_erif_matches_dump,
.actions_dump = mlxsw_sp_dpipe_table_erif_actions_dump,
.entries_dump = mlxsw_sp_dpipe_table_erif_entries_dump,
@@ -734,7 +734,7 @@ static u64 mlxsw_sp_dpipe_table_host4_size_get(void *priv)
return mlxsw_sp_dpipe_table_host_size_get(mlxsw_sp, AF_INET);
}
-static struct devlink_dpipe_table_ops mlxsw_sp_host4_ops = {
+static const struct devlink_dpipe_table_ops mlxsw_sp_host4_ops = {
.matches_dump = mlxsw_sp_dpipe_table_host4_matches_dump,
.actions_dump = mlxsw_sp_dpipe_table_host_actions_dump,
.entries_dump = mlxsw_sp_dpipe_table_host4_entries_dump,
@@ -811,7 +811,7 @@ static u64 mlxsw_sp_dpipe_table_host6_size_get(void *priv)
return mlxsw_sp_dpipe_table_host_size_get(mlxsw_sp, AF_INET6);
}
-static struct devlink_dpipe_table_ops mlxsw_sp_host6_ops = {
+static const struct devlink_dpipe_table_ops mlxsw_sp_host6_ops = {
.matches_dump = mlxsw_sp_dpipe_table_host6_matches_dump,
.actions_dump = mlxsw_sp_dpipe_table_host_actions_dump,
.entries_dump = mlxsw_sp_dpipe_table_host6_entries_dump,
@@ -1230,7 +1230,7 @@ mlxsw_sp_dpipe_table_adj_size_get(void *priv)
return size;
}
-static struct devlink_dpipe_table_ops mlxsw_sp_dpipe_table_adj_ops = {
+static const struct devlink_dpipe_table_ops mlxsw_sp_dpipe_table_adj_ops = {
.matches_dump = mlxsw_sp_dpipe_table_adj_matches_dump,
.actions_dump = mlxsw_sp_dpipe_table_adj_actions_dump,
.entries_dump = mlxsw_sp_dpipe_table_adj_entries_dump,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
index a755b0a901d3..2bed8c86b7cf 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
@@ -1068,7 +1068,21 @@ mlxsw_sp_get_module_eeprom_by_page(struct net_device *dev,
}
static int
-mlxsw_sp_get_ts_info(struct net_device *netdev, struct ethtool_ts_info *info)
+mlxsw_sp_set_module_eeprom_by_page(struct net_device *dev,
+ const struct ethtool_module_eeprom *page,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 slot_index = mlxsw_sp_port->mapping.slot_index;
+ u8 module = mlxsw_sp_port->mapping.module;
+
+ return mlxsw_env_set_module_eeprom_by_page(mlxsw_sp->core, slot_index,
+ module, page, extack);
+}
+
+static int
+mlxsw_sp_get_ts_info(struct net_device *netdev, struct kernel_ethtool_ts_info *info)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
@@ -1256,6 +1270,7 @@ const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
.get_module_info = mlxsw_sp_get_module_info,
.get_module_eeprom = mlxsw_sp_get_module_eeprom,
.get_module_eeprom_by_page = mlxsw_sp_get_module_eeprom_by_page,
+ .set_module_eeprom_by_page = mlxsw_sp_set_module_eeprom_by_page,
.get_ts_info = mlxsw_sp_get_ts_info,
.get_eth_phy_stats = mlxsw_sp_get_eth_phy_stats,
.get_eth_mac_stats = mlxsw_sp_get_eth_mac_stats,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
index cbb6c75a6620..5b174cb95eb8 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
@@ -1276,7 +1276,7 @@ int mlxsw_sp1_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
}
int mlxsw_sp1_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
info->phc_index = ptp_clock_index(mlxsw_sp->clock->ptp);
@@ -1661,7 +1661,7 @@ err_get_message_types:
}
int mlxsw_sp2_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
info->phc_index = ptp_clock_index(mlxsw_sp->clock->ptp);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
index a8b88230959a..769095d4932d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
@@ -11,7 +11,7 @@ struct mlxsw_sp;
struct mlxsw_sp_port;
struct mlxsw_sp_ptp_clock;
-static inline int mlxsw_sp_ptp_get_ts_info_noptp(struct ethtool_ts_info *info)
+static inline int mlxsw_sp_ptp_get_ts_info_noptp(struct kernel_ethtool_ts_info *info)
{
info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
SOF_TIMESTAMPING_SOFTWARE;
@@ -50,7 +50,7 @@ int mlxsw_sp1_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
void mlxsw_sp1_ptp_shaper_work(struct work_struct *work);
int mlxsw_sp1_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
- struct ethtool_ts_info *info);
+ struct kernel_ethtool_ts_info *info);
int mlxsw_sp1_get_stats_count(void);
void mlxsw_sp1_get_stats_strings(u8 **p);
@@ -84,7 +84,7 @@ int mlxsw_sp2_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
struct hwtstamp_config *config);
int mlxsw_sp2_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
- struct ethtool_ts_info *info);
+ struct kernel_ethtool_ts_info *info);
int mlxsw_sp2_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
struct mlxsw_sp_port *mlxsw_sp_port,
@@ -152,7 +152,7 @@ static inline void mlxsw_sp1_ptp_shaper_work(struct work_struct *work)
}
static inline int mlxsw_sp1_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
return mlxsw_sp_ptp_get_ts_info_noptp(info);
}
@@ -227,7 +227,7 @@ mlxsw_sp2_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
}
static inline int mlxsw_sp2_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
return mlxsw_sp_ptp_get_ts_info_noptp(info);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 40ba314fbc72..800dfb64ec83 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -11450,12 +11450,16 @@ static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
{
bool old_inc_parsing_depth, new_inc_parsing_depth;
struct mlxsw_sp_mp_hash_config config = {};
+ struct net *net = mlxsw_sp_net(mlxsw_sp);
char recr2_pl[MLXSW_REG_RECR2_LEN];
unsigned long bit;
u32 seed;
int err;
- seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac), 0);
+ seed = READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_seed).user_seed;
+ if (!seed)
+ seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac), 0);
+
mlxsw_reg_recr2_pack(recr2_pl, seed);
mlxsw_sp_mp4_hash_init(mlxsw_sp, &config);
mlxsw_sp_mp6_hash_init(mlxsw_sp, &config);
diff --git a/drivers/net/ethernet/meta/Kconfig b/drivers/net/ethernet/meta/Kconfig
new file mode 100644
index 000000000000..d8f5e9f9bb33
--- /dev/null
+++ b/drivers/net/ethernet/meta/Kconfig
@@ -0,0 +1,31 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Meta Platforms network device configuration
+#
+
+config NET_VENDOR_META
+ bool "Meta Platforms devices"
+ default y
+ help
+ If you have a network (Ethernet) card designed by Meta, say Y.
+ That's Meta as in the parent company of Facebook.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Meta cards. If you say Y, you will be asked for
+ your specific card in the following questions.
+
+if NET_VENDOR_META
+
+config FBNIC
+ tristate "Meta Platforms Host Network Interface"
+ depends on X86_64 || COMPILE_TEST
+ depends on PCI_MSI
+ select PHYLINK
+ help
+ This driver supports Meta Platforms Host Network Interface.
+
+ To compile this driver as a module, choose M here. The module
+ will be called fbnic. MSI-X interrupt support is required.
+
+endif # NET_VENDOR_META
diff --git a/drivers/net/ethernet/meta/Makefile b/drivers/net/ethernet/meta/Makefile
new file mode 100644
index 000000000000..88804f3de963
--- /dev/null
+++ b/drivers/net/ethernet/meta/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the Meta Platforms network device drivers.
+#
+
+obj-$(CONFIG_FBNIC) += fbnic/
diff --git a/drivers/net/ethernet/meta/fbnic/Makefile b/drivers/net/ethernet/meta/fbnic/Makefile
new file mode 100644
index 000000000000..9373b558fdc9
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/Makefile
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+
+#
+# Makefile for the Meta(R) Host Network Interface
+#
+
+obj-$(CONFIG_FBNIC) += fbnic.o
+
+fbnic-y := fbnic_devlink.o \
+ fbnic_fw.o \
+ fbnic_irq.o \
+ fbnic_mac.o \
+ fbnic_netdev.o \
+ fbnic_pci.o \
+ fbnic_phylink.o \
+ fbnic_rpc.o \
+ fbnic_tlv.o \
+ fbnic_txrx.o
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic.h b/drivers/net/ethernet/meta/fbnic/fbnic.h
new file mode 100644
index 000000000000..ad2689bfd6cb
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic.h
@@ -0,0 +1,144 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#ifndef _FBNIC_H_
+#define _FBNIC_H_
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#include "fbnic_csr.h"
+#include "fbnic_fw.h"
+#include "fbnic_mac.h"
+#include "fbnic_rpc.h"
+
+struct fbnic_dev {
+ struct device *dev;
+ struct net_device *netdev;
+
+ u32 __iomem *uc_addr0;
+ u32 __iomem *uc_addr4;
+ const struct fbnic_mac *mac;
+ unsigned int fw_msix_vector;
+ unsigned int pcs_msix_vector;
+ unsigned short num_irqs;
+
+ struct delayed_work service_task;
+
+ struct fbnic_fw_mbx mbx[FBNIC_IPC_MBX_INDICES];
+ struct fbnic_fw_cap fw_cap;
+ /* Lock protecting Tx Mailbox queue to prevent possible races */
+ spinlock_t fw_tx_lock;
+
+ unsigned long last_heartbeat_request;
+ unsigned long last_heartbeat_response;
+ u8 fw_heartbeat_enabled;
+
+ u64 dsn;
+ u32 mps;
+ u32 readrq;
+
+ /* Local copy of the devices TCAM */
+ struct fbnic_act_tcam act_tcam[FBNIC_RPC_TCAM_ACT_NUM_ENTRIES];
+ struct fbnic_mac_addr mac_addr[FBNIC_RPC_TCAM_MACDA_NUM_ENTRIES];
+ u8 mac_addr_boundary;
+
+ /* Number of TCQs/RCQs available on hardware */
+ u16 max_num_queues;
+};
+
+/* Reserve entry 0 in the MSI-X "others" array until we have filled all
+ * 32 of the possible interrupt slots. By doing this we can avoid any
+ * potential conflicts should we need to enable one of the debug interrupt
+ * causes later.
+ */
+enum {
+ FBNIC_FW_MSIX_ENTRY,
+ FBNIC_PCS_MSIX_ENTRY,
+ FBNIC_NON_NAPI_VECTORS
+};
+
+static inline bool fbnic_present(struct fbnic_dev *fbd)
+{
+ return !!READ_ONCE(fbd->uc_addr0);
+}
+
+static inline void fbnic_wr32(struct fbnic_dev *fbd, u32 reg, u32 val)
+{
+ u32 __iomem *csr = READ_ONCE(fbd->uc_addr0);
+
+ if (csr)
+ writel(val, csr + reg);
+}
+
+u32 fbnic_rd32(struct fbnic_dev *fbd, u32 reg);
+
+static inline void fbnic_wrfl(struct fbnic_dev *fbd)
+{
+ fbnic_rd32(fbd, FBNIC_MASTER_SPARE_0);
+}
+
+static inline void
+fbnic_rmw32(struct fbnic_dev *fbd, u32 reg, u32 mask, u32 val)
+{
+ u32 v;
+
+ v = fbnic_rd32(fbd, reg);
+ v &= ~mask;
+ v |= val;
+ fbnic_wr32(fbd, reg, v);
+}
+
+#define wr32(_f, _r, _v) fbnic_wr32(_f, _r, _v)
+#define rd32(_f, _r) fbnic_rd32(_f, _r)
+#define wrfl(_f) fbnic_wrfl(_f)
+
+bool fbnic_fw_present(struct fbnic_dev *fbd);
+u32 fbnic_fw_rd32(struct fbnic_dev *fbd, u32 reg);
+void fbnic_fw_wr32(struct fbnic_dev *fbd, u32 reg, u32 val);
+
+#define fw_rd32(_f, _r) fbnic_fw_rd32(_f, _r)
+#define fw_wr32(_f, _r, _v) fbnic_fw_wr32(_f, _r, _v)
+#define fw_wrfl(_f) fbnic_fw_rd32(_f, FBNIC_FW_ZERO_REG)
+
+static inline bool fbnic_bmc_present(struct fbnic_dev *fbd)
+{
+ return fbd->fw_cap.bmc_present;
+}
+
+static inline bool fbnic_init_failure(struct fbnic_dev *fbd)
+{
+ return !fbd->netdev;
+}
+
+extern char fbnic_driver_name[];
+
+void fbnic_devlink_free(struct fbnic_dev *fbd);
+struct fbnic_dev *fbnic_devlink_alloc(struct pci_dev *pdev);
+void fbnic_devlink_register(struct fbnic_dev *fbd);
+void fbnic_devlink_unregister(struct fbnic_dev *fbd);
+
+int fbnic_fw_enable_mbx(struct fbnic_dev *fbd);
+void fbnic_fw_disable_mbx(struct fbnic_dev *fbd);
+
+int fbnic_pcs_irq_enable(struct fbnic_dev *fbd);
+void fbnic_pcs_irq_disable(struct fbnic_dev *fbd);
+
+int fbnic_request_irq(struct fbnic_dev *dev, int nr, irq_handler_t handler,
+ unsigned long flags, const char *name, void *data);
+void fbnic_free_irq(struct fbnic_dev *dev, int nr, void *data);
+void fbnic_free_irqs(struct fbnic_dev *fbd);
+int fbnic_alloc_irqs(struct fbnic_dev *fbd);
+
+enum fbnic_boards {
+ fbnic_board_asic
+};
+
+struct fbnic_info {
+ unsigned int max_num_queues;
+ unsigned int bar_mask;
+};
+
+#endif /* _FBNIC_H_ */
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_csr.h b/drivers/net/ethernet/meta/fbnic/fbnic_csr.h
new file mode 100644
index 000000000000..a64360de0552
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_csr.h
@@ -0,0 +1,838 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#ifndef _FBNIC_CSR_H_
+#define _FBNIC_CSR_H_
+
+#include <linux/bitops.h>
+
+#define CSR_BIT(nr) (1u << (nr))
+#define CSR_GENMASK(h, l) GENMASK(h, l)
+
+#define DESC_BIT(nr) BIT_ULL(nr)
+#define DESC_GENMASK(h, l) GENMASK_ULL(h, l)
+
+/* Defines the minimum firmware version required by the driver */
+#define MIN_FW_MAJOR_VERSION 0
+#define MIN_FW_MINOR_VERSION 10
+#define MIN_FW_BUILD_VERSION 6
+#define MIN_FW_VERSION_CODE (MIN_FW_MAJOR_VERSION * (1u << 24) + \
+ MIN_FW_MINOR_VERSION * (1u << 16) + \
+ MIN_FW_BUILD_VERSION)
+
+#define PCI_DEVICE_ID_META_FBNIC_ASIC 0x0013
+
+#define FBNIC_CLOCK_FREQ (600 * (1000 * 1000))
+
+/* Transmit Work Descriptor Format */
+/* Length, Type, Offset Masks and Shifts */
+#define FBNIC_TWD_L2_HLEN_MASK DESC_GENMASK(5, 0)
+
+#define FBNIC_TWD_L3_TYPE_MASK DESC_GENMASK(7, 6)
+enum {
+ FBNIC_TWD_L3_TYPE_OTHER = 0,
+ FBNIC_TWD_L3_TYPE_IPV4 = 1,
+ FBNIC_TWD_L3_TYPE_IPV6 = 2,
+ FBNIC_TWD_L3_TYPE_V6V6 = 3,
+};
+
+#define FBNIC_TWD_L3_OHLEN_MASK DESC_GENMASK(15, 8)
+#define FBNIC_TWD_L3_IHLEN_MASK DESC_GENMASK(23, 16)
+
+enum {
+ FBNIC_TWD_L4_TYPE_OTHER = 0,
+ FBNIC_TWD_L4_TYPE_TCP = 1,
+ FBNIC_TWD_L4_TYPE_UDP = 2,
+};
+
+#define FBNIC_TWD_CSUM_OFFSET_MASK DESC_GENMASK(27, 24)
+#define FBNIC_TWD_L4_HLEN_MASK DESC_GENMASK(31, 28)
+
+/* Flags and Type */
+#define FBNIC_TWD_L4_TYPE_MASK DESC_GENMASK(33, 32)
+#define FBNIC_TWD_FLAG_REQ_TS DESC_BIT(34)
+#define FBNIC_TWD_FLAG_REQ_LSO DESC_BIT(35)
+#define FBNIC_TWD_FLAG_REQ_CSO DESC_BIT(36)
+#define FBNIC_TWD_FLAG_REQ_COMPLETION DESC_BIT(37)
+#define FBNIC_TWD_FLAG_DEST_MAC DESC_BIT(43)
+#define FBNIC_TWD_FLAG_DEST_BMC DESC_BIT(44)
+#define FBNIC_TWD_FLAG_DEST_FW DESC_BIT(45)
+#define FBNIC_TWD_TYPE_MASK DESC_GENMASK(47, 46)
+enum {
+ FBNIC_TWD_TYPE_META = 0,
+ FBNIC_TWD_TYPE_OPT_META = 1,
+ FBNIC_TWD_TYPE_AL = 2,
+ FBNIC_TWD_TYPE_LAST_AL = 3,
+};
+
+/* MSS and Completion Req */
+#define FBNIC_TWD_MSS_MASK DESC_GENMASK(61, 48)
+
+#define FBNIC_TWD_TS_MASK DESC_GENMASK(39, 0)
+#define FBNIC_TWD_ADDR_MASK DESC_GENMASK(45, 0)
+#define FBNIC_TWD_LEN_MASK DESC_GENMASK(63, 48)
+
+/* Tx Completion Descriptor Format */
+#define FBNIC_TCD_TYPE0_HEAD0_MASK DESC_GENMASK(15, 0)
+#define FBNIC_TCD_TYPE0_HEAD1_MASK DESC_GENMASK(31, 16)
+
+#define FBNIC_TCD_TYPE1_TS_MASK DESC_GENMASK(39, 0)
+
+#define FBNIC_TCD_STATUS_MASK DESC_GENMASK(59, 48)
+#define FBNIC_TCD_STATUS_TS_INVALID DESC_BIT(48)
+#define FBNIC_TCD_STATUS_ILLEGAL_TS_REQ DESC_BIT(49)
+#define FBNIC_TCD_TWQ1 DESC_BIT(60)
+#define FBNIC_TCD_TYPE_MASK DESC_GENMASK(62, 61)
+enum {
+ FBNIC_TCD_TYPE_0 = 0,
+ FBNIC_TCD_TYPE_1 = 1,
+};
+
+#define FBNIC_TCD_DONE DESC_BIT(63)
+
+/* Rx Buffer Descriptor Format
+ *
+ * The layout of this can vary depending on the page size of the system.
+ *
+ * If the page size is 4K then the layout will simply consist of ID for
+ * the 16 most significant bits, and the lower 46 are essentially the page
+ * address with the lowest 12 bits being reserved 0 due to the fact that
+ * a page will be aligned.
+ *
+ * If the page size is larger than 4K then the lower n bits of the ID and
+ * page address will be reserved for the fragment ID. This fragment will
+ * be 4K in size and will be used to index both the DMA address and the ID
+ * by the same amount.
+ */
+#define FBNIC_BD_DESC_ADDR_MASK DESC_GENMASK(45, 12)
+#define FBNIC_BD_DESC_ID_MASK DESC_GENMASK(63, 48)
+#define FBNIC_BD_FRAG_SIZE \
+ (FBNIC_BD_DESC_ADDR_MASK & ~(FBNIC_BD_DESC_ADDR_MASK - 1))
+#define FBNIC_BD_FRAG_COUNT \
+ (PAGE_SIZE / FBNIC_BD_FRAG_SIZE)
+#define FBNIC_BD_FRAG_ADDR_MASK \
+ (FBNIC_BD_DESC_ADDR_MASK & \
+ ~(FBNIC_BD_DESC_ADDR_MASK * FBNIC_BD_FRAG_COUNT))
+#define FBNIC_BD_FRAG_ID_MASK \
+ (FBNIC_BD_DESC_ID_MASK & \
+ ~(FBNIC_BD_DESC_ID_MASK * FBNIC_BD_FRAG_COUNT))
+#define FBNIC_BD_PAGE_ADDR_MASK \
+ (FBNIC_BD_DESC_ADDR_MASK & ~FBNIC_BD_FRAG_ADDR_MASK)
+#define FBNIC_BD_PAGE_ID_MASK \
+ (FBNIC_BD_DESC_ID_MASK & ~FBNIC_BD_FRAG_ID_MASK)
+
+/* Rx Completion Queue Descriptors */
+#define FBNIC_RCD_TYPE_MASK DESC_GENMASK(62, 61)
+enum {
+ FBNIC_RCD_TYPE_HDR_AL = 0,
+ FBNIC_RCD_TYPE_PAY_AL = 1,
+ FBNIC_RCD_TYPE_OPT_META = 2,
+ FBNIC_RCD_TYPE_META = 3,
+};
+
+#define FBNIC_RCD_DONE DESC_BIT(63)
+
+/* Address/Length Completion Descriptors */
+#define FBNIC_RCD_AL_BUFF_ID_MASK DESC_GENMASK(15, 0)
+#define FBNIC_RCD_AL_BUFF_FRAG_MASK (FBNIC_BD_FRAG_COUNT - 1)
+#define FBNIC_RCD_AL_BUFF_PAGE_MASK \
+ (FBNIC_RCD_AL_BUFF_ID_MASK & ~FBNIC_RCD_AL_BUFF_FRAG_MASK)
+#define FBNIC_RCD_AL_BUFF_LEN_MASK DESC_GENMASK(28, 16)
+#define FBNIC_RCD_AL_BUFF_OFF_MASK DESC_GENMASK(43, 32)
+#define FBNIC_RCD_AL_PAGE_FIN DESC_BIT(60)
+
+/* Header AL specific values */
+#define FBNIC_RCD_HDR_AL_OVERFLOW DESC_BIT(53)
+#define FBNIC_RCD_HDR_AL_DMA_HINT_MASK DESC_GENMASK(59, 54)
+enum {
+ FBNIC_RCD_HDR_AL_DMA_HINT_NONE = 0,
+ FBNIC_RCD_HDR_AL_DMA_HINT_L2 = 1,
+ FBNIC_RCD_HDR_AL_DMA_HINT_L3 = 2,
+ FBNIC_RCD_HDR_AL_DMA_HINT_L4 = 4,
+};
+
+/* Optional Metadata Completion Descriptors */
+#define FBNIC_RCD_OPT_META_TS_MASK DESC_GENMASK(39, 0)
+#define FBNIC_RCD_OPT_META_ACTION_MASK DESC_GENMASK(45, 40)
+#define FBNIC_RCD_OPT_META_ACTION DESC_BIT(57)
+#define FBNIC_RCD_OPT_META_TS DESC_BIT(58)
+#define FBNIC_RCD_OPT_META_TYPE_MASK DESC_GENMASK(60, 59)
+
+/* Metadata Completion Descriptors */
+#define FBNIC_RCD_META_RSS_HASH_MASK DESC_GENMASK(31, 0)
+#define FBNIC_RCD_META_L2_CSUM_MASK DESC_GENMASK(47, 32)
+#define FBNIC_RCD_META_L3_TYPE_MASK DESC_GENMASK(49, 48)
+enum {
+ FBNIC_RCD_META_L3_TYPE_OTHER = 0,
+ FBNIC_RCD_META_L3_TYPE_IPV4 = 1,
+ FBNIC_RCD_META_L3_TYPE_IPV6 = 2,
+ FBNIC_RCD_META_L3_TYPE_V6V6 = 3,
+};
+
+#define FBNIC_RCD_META_L4_TYPE_MASK DESC_GENMASK(51, 50)
+enum {
+ FBNIC_RCD_META_L4_TYPE_OTHER = 0,
+ FBNIC_RCD_META_L4_TYPE_TCP = 1,
+ FBNIC_RCD_META_L4_TYPE_UDP = 2,
+};
+
+#define FBNIC_RCD_META_L4_CSUM_UNNECESSARY DESC_BIT(52)
+#define FBNIC_RCD_META_ERR_MAC_EOP DESC_BIT(53)
+#define FBNIC_RCD_META_ERR_TRUNCATED_FRAME DESC_BIT(54)
+#define FBNIC_RCD_META_ERR_PARSER DESC_BIT(55)
+#define FBNIC_RCD_META_UNCORRECTABLE_ERR_MASK \
+ (FBNIC_RCD_META_ERR_MAC_EOP | FBNIC_RCD_META_ERR_TRUNCATED_FRAME)
+#define FBNIC_RCD_META_ECN DESC_BIT(60)
+
+/* Register Definitions
+ *
+ * The registers are laid as indexes into an le32 array. As such the actual
+ * address is 4 times the index value. Below each register is defined as 3
+ * fields, name, index, and Address.
+ *
+ * Name Index Address
+ *************************************************************************/
+/* Interrupt Registers */
+#define FBNIC_CSR_START_INTR 0x00000 /* CSR section delimiter */
+#define FBNIC_INTR_STATUS(n) (0x00000 + (n)) /* 0x00000 + 4*n */
+#define FBNIC_INTR_STATUS_CNT 8
+#define FBNIC_INTR_MASK(n) (0x00008 + (n)) /* 0x00020 + 4*n */
+#define FBNIC_INTR_MASK_CNT 8
+#define FBNIC_INTR_SET(n) (0x00010 + (n)) /* 0x00040 + 4*n */
+#define FBNIC_INTR_SET_CNT 8
+#define FBNIC_INTR_CLEAR(n) (0x00018 + (n)) /* 0x00060 + 4*n */
+#define FBNIC_INTR_CLEAR_CNT 8
+#define FBNIC_INTR_SW_STATUS(n) (0x00020 + (n)) /* 0x00080 + 4*n */
+#define FBNIC_INTR_SW_STATUS_CNT 8
+#define FBNIC_INTR_SW_AC_MODE(n) (0x00028 + (n)) /* 0x000a0 + 4*n */
+#define FBNIC_INTR_SW_AC_MODE_CNT 8
+#define FBNIC_INTR_MASK_SET(n) (0x00030 + (n)) /* 0x000c0 + 4*n */
+#define FBNIC_INTR_MASK_SET_CNT 8
+#define FBNIC_INTR_MASK_CLEAR(n) (0x00038 + (n)) /* 0x000e0 + 4*n */
+#define FBNIC_INTR_MASK_CLEAR_CNT 8
+#define FBNIC_MAX_MSIX_VECS 256U
+#define FBNIC_INTR_MSIX_CTRL(n) (0x00040 + (n)) /* 0x00100 + 4*n */
+#define FBNIC_INTR_MSIX_CTRL_VECTOR_MASK CSR_GENMASK(7, 0)
+#define FBNIC_INTR_MSIX_CTRL_ENABLE CSR_BIT(31)
+enum {
+ FBNIC_INTR_MSIX_CTRL_PCS_IDX = 34,
+};
+
+#define FBNIC_CSR_END_INTR 0x0005f /* CSR section delimiter */
+
+/* Interrupt MSIX Registers */
+#define FBNIC_CSR_START_INTR_CQ 0x00400 /* CSR section delimiter */
+#define FBNIC_INTR_CQ_REARM(n) \
+ (0x00400 + 4 * (n)) /* 0x01000 + 16*n */
+#define FBNIC_INTR_CQ_REARM_CNT 256
+#define FBNIC_INTR_CQ_REARM_RCQ_TIMEOUT CSR_GENMASK(13, 0)
+#define FBNIC_INTR_CQ_REARM_RCQ_TIMEOUT_UPD_EN CSR_BIT(14)
+#define FBNIC_INTR_CQ_REARM_TCQ_TIMEOUT CSR_GENMASK(28, 15)
+#define FBNIC_INTR_CQ_REARM_TCQ_TIMEOUT_UPD_EN CSR_BIT(29)
+#define FBNIC_INTR_CQ_REARM_INTR_RELOAD CSR_BIT(30)
+#define FBNIC_INTR_CQ_REARM_INTR_UNMASK CSR_BIT(31)
+
+#define FBNIC_INTR_RCQ_TIMEOUT(n) \
+ (0x00401 + 4 * (n)) /* 0x01004 + 16*n */
+#define FBNIC_INTR_RCQ_TIMEOUT_CNT 256
+#define FBNIC_INTR_TCQ_TIMEOUT(n) \
+ (0x00402 + 4 * (n)) /* 0x01008 + 16*n */
+#define FBNIC_INTR_TCQ_TIMEOUT_CNT 256
+#define FBNIC_CSR_END_INTR_CQ 0x007fe /* CSR section delimiter */
+
+/* Global QM Tx registers */
+#define FBNIC_CSR_START_QM_TX 0x00800 /* CSR section delimiter */
+#define FBNIC_QM_TWQ_IDLE(n) (0x00800 + (n)) /* 0x02000 + 4*n */
+#define FBNIC_QM_TWQ_IDLE_CNT 8
+#define FBNIC_QM_TWQ_DEFAULT_META_L 0x00818 /* 0x02060 */
+#define FBNIC_QM_TWQ_DEFAULT_META_H 0x00819 /* 0x02064 */
+
+#define FBNIC_QM_TQS_CTL0 0x0081b /* 0x0206c */
+#define FBNIC_QM_TQS_CTL0_LSO_TS_MASK CSR_BIT(0)
+enum {
+ FBNIC_QM_TQS_CTL0_LSO_TS_FIRST = 0,
+ FBNIC_QM_TQS_CTL0_LSO_TS_LAST = 1,
+};
+
+#define FBNIC_QM_TQS_CTL0_PREFETCH_THRESH CSR_GENMASK(7, 1)
+enum {
+ FBNIC_QM_TQS_CTL0_PREFETCH_THRESH_MIN = 16,
+};
+
+#define FBNIC_QM_TQS_CTL1 0x0081c /* 0x02070 */
+#define FBNIC_QM_TQS_CTL1_MC_MAX_CREDITS CSR_GENMASK(7, 0)
+#define FBNIC_QM_TQS_CTL1_BULK_MAX_CREDITS CSR_GENMASK(15, 8)
+#define FBNIC_QM_TQS_MTU_CTL0 0x0081d /* 0x02074 */
+#define FBNIC_QM_TQS_MTU_CTL1 0x0081e /* 0x02078 */
+#define FBNIC_QM_TQS_MTU_CTL1_BULK CSR_GENMASK(13, 0)
+#define FBNIC_QM_TCQ_IDLE(n) (0x00821 + (n)) /* 0x02084 + 4*n */
+#define FBNIC_QM_TCQ_IDLE_CNT 4
+#define FBNIC_QM_TCQ_CTL0 0x0082d /* 0x020b4 */
+#define FBNIC_QM_TCQ_CTL0_COAL_WAIT CSR_GENMASK(15, 0)
+#define FBNIC_QM_TCQ_CTL0_TICK_CYCLES CSR_GENMASK(26, 16)
+#define FBNIC_QM_TQS_IDLE(n) (0x00830 + (n)) /* 0x020c0 + 4*n */
+#define FBNIC_QM_TQS_IDLE_CNT 8
+#define FBNIC_QM_TQS_EDT_TS_RANGE 0x00849 /* 0x2124 */
+#define FBNIC_QM_TDE_IDLE(n) (0x00853 + (n)) /* 0x0214c + 4*n */
+#define FBNIC_QM_TDE_IDLE_CNT 8
+#define FBNIC_QM_TNI_TDF_CTL 0x0086c /* 0x021b0 */
+#define FBNIC_QM_TNI_TDF_CTL_MRRS CSR_GENMASK(1, 0)
+#define FBNIC_QM_TNI_TDF_CTL_CLS CSR_GENMASK(3, 2)
+#define FBNIC_QM_TNI_TDF_CTL_MAX_OT CSR_GENMASK(11, 4)
+#define FBNIC_QM_TNI_TDF_CTL_MAX_OB CSR_GENMASK(23, 12)
+#define FBNIC_QM_TNI_TDE_CTL 0x0086d /* 0x021b4 */
+#define FBNIC_QM_TNI_TDE_CTL_MRRS CSR_GENMASK(1, 0)
+#define FBNIC_QM_TNI_TDE_CTL_CLS CSR_GENMASK(3, 2)
+#define FBNIC_QM_TNI_TDE_CTL_MAX_OT CSR_GENMASK(11, 4)
+#define FBNIC_QM_TNI_TDE_CTL_MAX_OB CSR_GENMASK(24, 12)
+#define FBNIC_QM_TNI_TDE_CTL_MRRS_1K CSR_BIT(25)
+#define FBNIC_QM_TNI_TCM_CTL 0x0086e /* 0x021b8 */
+#define FBNIC_QM_TNI_TCM_CTL_MPS CSR_GENMASK(1, 0)
+#define FBNIC_QM_TNI_TCM_CTL_CLS CSR_GENMASK(3, 2)
+#define FBNIC_QM_TNI_TCM_CTL_MAX_OT CSR_GENMASK(11, 4)
+#define FBNIC_QM_TNI_TCM_CTL_MAX_OB CSR_GENMASK(23, 12)
+#define FBNIC_CSR_END_QM_TX 0x00873 /* CSR section delimiter */
+
+/* Global QM Rx registers */
+#define FBNIC_CSR_START_QM_RX 0x00c00 /* CSR section delimiter */
+#define FBNIC_QM_RCQ_IDLE(n) (0x00c00 + (n)) /* 0x03000 + 4*n */
+#define FBNIC_QM_RCQ_IDLE_CNT 4
+#define FBNIC_QM_RCQ_CTL0 0x00c0c /* 0x03030 */
+#define FBNIC_QM_RCQ_CTL0_COAL_WAIT CSR_GENMASK(15, 0)
+#define FBNIC_QM_RCQ_CTL0_TICK_CYCLES CSR_GENMASK(26, 16)
+#define FBNIC_QM_HPQ_IDLE(n) (0x00c0f + (n)) /* 0x0303c + 4*n */
+#define FBNIC_QM_HPQ_IDLE_CNT 4
+#define FBNIC_QM_PPQ_IDLE(n) (0x00c13 + (n)) /* 0x0304c + 4*n */
+#define FBNIC_QM_PPQ_IDLE_CNT 4
+#define FBNIC_QM_RNI_RBP_CTL 0x00c2d /* 0x030b4 */
+#define FBNIC_QM_RNI_RBP_CTL_MRRS CSR_GENMASK(1, 0)
+#define FBNIC_QM_RNI_RBP_CTL_CLS CSR_GENMASK(3, 2)
+#define FBNIC_QM_RNI_RBP_CTL_MAX_OT CSR_GENMASK(11, 4)
+#define FBNIC_QM_RNI_RBP_CTL_MAX_OB CSR_GENMASK(23, 12)
+#define FBNIC_QM_RNI_RDE_CTL 0x00c2e /* 0x030b8 */
+#define FBNIC_QM_RNI_RDE_CTL_MPS CSR_GENMASK(1, 0)
+#define FBNIC_QM_RNI_RDE_CTL_CLS CSR_GENMASK(3, 2)
+#define FBNIC_QM_RNI_RDE_CTL_MAX_OT CSR_GENMASK(11, 4)
+#define FBNIC_QM_RNI_RDE_CTL_MAX_OB CSR_GENMASK(23, 12)
+#define FBNIC_QM_RNI_RCM_CTL 0x00c2f /* 0x030bc */
+#define FBNIC_QM_RNI_RCM_CTL_MPS CSR_GENMASK(1, 0)
+#define FBNIC_QM_RNI_RCM_CTL_CLS CSR_GENMASK(3, 2)
+#define FBNIC_QM_RNI_RCM_CTL_MAX_OT CSR_GENMASK(11, 4)
+#define FBNIC_QM_RNI_RCM_CTL_MAX_OB CSR_GENMASK(23, 12)
+#define FBNIC_CSR_END_QM_RX 0x00c34 /* CSR section delimiter */
+
+/* TCE registers */
+#define FBNIC_CSR_START_TCE 0x04000 /* CSR section delimiter */
+#define FBNIC_TCE_REG_BASE 0x04000 /* 0x10000 */
+
+#define FBNIC_TCE_LSO_CTRL 0x04000 /* 0x10000 */
+#define FBNIC_TCE_LSO_CTRL_TCPF_CLR_1ST CSR_GENMASK(8, 0)
+#define FBNIC_TCE_LSO_CTRL_TCPF_CLR_MID CSR_GENMASK(17, 9)
+#define FBNIC_TCE_LSO_CTRL_TCPF_CLR_END CSR_GENMASK(26, 18)
+#define FBNIC_TCE_LSO_CTRL_IPID_MODE_INC CSR_BIT(27)
+
+#define FBNIC_TCE_CSO_CTRL 0x04001 /* 0x10004 */
+#define FBNIC_TCE_CSO_CTRL_TCP_ZERO_CSUM CSR_BIT(0)
+
+#define FBNIC_TCE_TXB_CTRL 0x04002 /* 0x10008 */
+#define FBNIC_TCE_TXB_CTRL_LOAD CSR_BIT(0)
+#define FBNIC_TCE_TXB_CTRL_TCAM_ENABLE CSR_BIT(1)
+#define FBNIC_TCE_TXB_CTRL_DISABLE CSR_BIT(2)
+
+#define FBNIC_TCE_TXB_ENQ_WRR_CTRL 0x04003 /* 0x1000c */
+#define FBNIC_TCE_TXB_ENQ_WRR_CTRL_WEIGHT0 CSR_GENMASK(7, 0)
+#define FBNIC_TCE_TXB_ENQ_WRR_CTRL_WEIGHT1 CSR_GENMASK(15, 8)
+#define FBNIC_TCE_TXB_ENQ_WRR_CTRL_WEIGHT2 CSR_GENMASK(23, 16)
+
+#define FBNIC_TCE_TXB_TEI_Q0_CTRL 0x04004 /* 0x10010 */
+#define FBNIC_TCE_TXB_TEI_Q1_CTRL 0x04005 /* 0x10014 */
+#define FBNIC_TCE_TXB_MC_Q_CTRL 0x04006 /* 0x10018 */
+#define FBNIC_TCE_TXB_RX_TEI_Q_CTRL 0x04007 /* 0x1001c */
+#define FBNIC_TCE_TXB_RX_BMC_Q_CTRL 0x04008 /* 0x10020 */
+#define FBNIC_TCE_TXB_Q_CTRL_START CSR_GENMASK(10, 0)
+#define FBNIC_TCE_TXB_Q_CTRL_SIZE CSR_GENMASK(22, 11)
+
+#define FBNIC_TCE_TXB_TEI_DWRR_CTRL 0x04009 /* 0x10024 */
+#define FBNIC_TCE_TXB_TEI_DWRR_CTRL_QUANTUM0 CSR_GENMASK(7, 0)
+#define FBNIC_TCE_TXB_TEI_DWRR_CTRL_QUANTUM1 CSR_GENMASK(15, 8)
+#define FBNIC_TCE_TXB_NTWRK_DWRR_CTRL 0x0400a /* 0x10028 */
+#define FBNIC_TCE_TXB_NTWRK_DWRR_CTRL_QUANTUM0 CSR_GENMASK(7, 0)
+#define FBNIC_TCE_TXB_NTWRK_DWRR_CTRL_QUANTUM1 CSR_GENMASK(15, 8)
+#define FBNIC_TCE_TXB_NTWRK_DWRR_CTRL_QUANTUM2 CSR_GENMASK(23, 16)
+
+#define FBNIC_TCE_TXB_CLDR_CFG 0x0400b /* 0x1002c */
+#define FBNIC_TCE_TXB_CLDR_CFG_NUM_SLOT CSR_GENMASK(5, 0)
+#define FBNIC_TCE_TXB_CLDR_SLOT_CFG(n) (0x0400c + (n)) /* 0x10030 + 4*n */
+#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_CNT 16
+#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_0_0 CSR_GENMASK(1, 0)
+#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_0_1 CSR_GENMASK(3, 2)
+#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_0_2 CSR_GENMASK(5, 4)
+#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_0_3 CSR_GENMASK(7, 6)
+#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_1_0 CSR_GENMASK(9, 8)
+#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_1_1 CSR_GENMASK(11, 10)
+#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_1_2 CSR_GENMASK(13, 12)
+#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_1_3 CSR_GENMASK(15, 14)
+#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_2_0 CSR_GENMASK(17, 16)
+#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_2_1 CSR_GENMASK(19, 18)
+#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_2_2 CSR_GENMASK(21, 20)
+#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_2_3 CSR_GENMASK(23, 22)
+#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_3_0 CSR_GENMASK(25, 24)
+#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_3_1 CSR_GENMASK(27, 26)
+#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_3_2 CSR_GENMASK(29, 28)
+#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_3_3 CSR_GENMASK(31, 30)
+
+#define FBNIC_TCE_BMC_MAX_PKTSZ 0x0403a /* 0x100e8 */
+#define FBNIC_TCE_BMC_MAX_PKTSZ_TX CSR_GENMASK(13, 0)
+#define FBNIC_TCE_BMC_MAX_PKTSZ_RX CSR_GENMASK(27, 14)
+#define FBNIC_TCE_MC_MAX_PKTSZ 0x0403b /* 0x100ec */
+#define FBNIC_TCE_MC_MAX_PKTSZ_TMI CSR_GENMASK(13, 0)
+
+#define FBNIC_TCE_SOP_PROT_CTRL 0x0403c /* 0x100f0 */
+#define FBNIC_TCE_SOP_PROT_CTRL_TBI CSR_GENMASK(7, 0)
+#define FBNIC_TCE_SOP_PROT_CTRL_TTI_FRM CSR_GENMASK(14, 8)
+#define FBNIC_TCE_SOP_PROT_CTRL_TTI_CM CSR_GENMASK(18, 15)
+
+#define FBNIC_TCE_DROP_CTRL 0x0403d /* 0x100f4 */
+#define FBNIC_TCE_DROP_CTRL_TTI_CM_DROP_EN CSR_BIT(0)
+#define FBNIC_TCE_DROP_CTRL_TTI_FRM_DROP_EN CSR_BIT(1)
+#define FBNIC_TCE_DROP_CTRL_TTI_TBI_DROP_EN CSR_BIT(2)
+
+#define FBNIC_TCE_TXB_TX_BMC_Q_CTRL 0x0404B /* 0x1012c */
+#define FBNIC_TCE_TXB_BMC_DWRR_CTRL 0x0404C /* 0x10130 */
+#define FBNIC_TCE_TXB_BMC_DWRR_CTRL_QUANTUM0 CSR_GENMASK(7, 0)
+#define FBNIC_TCE_TXB_BMC_DWRR_CTRL_QUANTUM1 CSR_GENMASK(15, 8)
+#define FBNIC_TCE_TXB_TEI_DWRR_CTRL_EXT 0x0404D /* 0x10134 */
+#define FBNIC_TCE_TXB_NTWRK_DWRR_CTRL_EXT \
+ 0x0404E /* 0x10138 */
+#define FBNIC_TCE_TXB_BMC_DWRR_CTRL_EXT 0x0404F /* 0x1013c */
+#define FBNIC_CSR_END_TCE 0x04050 /* CSR section delimiter */
+
+/* TMI registers */
+#define FBNIC_CSR_START_TMI 0x04400 /* CSR section delimiter */
+#define FBNIC_TMI_SOP_PROT_CTRL 0x04400 /* 0x11000 */
+#define FBNIC_TMI_DROP_CTRL 0x04401 /* 0x11004 */
+#define FBNIC_TMI_DROP_CTRL_EN CSR_BIT(0)
+#define FBNIC_CSR_END_TMI 0x0443f /* CSR section delimiter */
+/* Rx Buffer Registers */
+#define FBNIC_CSR_START_RXB 0x08000 /* CSR section delimiter */
+enum {
+ FBNIC_RXB_FIFO_MC = 0,
+ /* Unused */
+ /* Unused */
+ FBNIC_RXB_FIFO_NET_TO_BMC = 3,
+ FBNIC_RXB_FIFO_HOST = 4,
+ /* Unused */
+ FBNIC_RXB_FIFO_BMC_TO_HOST = 6,
+ /* Unused */
+ FBNIC_RXB_FIFO_INDICES = 8
+};
+
+#define FBNIC_RXB_CT_SIZE(n) (0x08000 + (n)) /* 0x20000 + 4*n */
+#define FBNIC_RXB_CT_SIZE_CNT 8
+#define FBNIC_RXB_CT_SIZE_HEADER CSR_GENMASK(5, 0)
+#define FBNIC_RXB_CT_SIZE_PAYLOAD CSR_GENMASK(11, 6)
+#define FBNIC_RXB_CT_SIZE_ENABLE CSR_BIT(12)
+#define FBNIC_RXB_PAUSE_DROP_CTRL 0x08008 /* 0x20020 */
+#define FBNIC_RXB_PAUSE_DROP_CTRL_DROP_ENABLE CSR_GENMASK(7, 0)
+#define FBNIC_RXB_PAUSE_DROP_CTRL_PAUSE_ENABLE CSR_GENMASK(15, 8)
+#define FBNIC_RXB_PAUSE_DROP_CTRL_ECN_ENABLE CSR_GENMASK(23, 16)
+#define FBNIC_RXB_PAUSE_DROP_CTRL_PS_ENABLE CSR_GENMASK(27, 24)
+#define FBNIC_RXB_PAUSE_THLD(n) (0x08009 + (n)) /* 0x20024 + 4*n */
+#define FBNIC_RXB_PAUSE_THLD_CNT 8
+#define FBNIC_RXB_PAUSE_THLD_ON CSR_GENMASK(12, 0)
+#define FBNIC_RXB_PAUSE_THLD_OFF CSR_GENMASK(25, 13)
+#define FBNIC_RXB_DROP_THLD(n) (0x08011 + (n)) /* 0x20044 + 4*n */
+#define FBNIC_RXB_DROP_THLD_CNT 8
+#define FBNIC_RXB_DROP_THLD_ON CSR_GENMASK(12, 0)
+#define FBNIC_RXB_DROP_THLD_OFF CSR_GENMASK(25, 13)
+#define FBNIC_RXB_ECN_THLD(n) (0x0801e + (n)) /* 0x20078 + 4*n */
+#define FBNIC_RXB_ECN_THLD_CNT 8
+#define FBNIC_RXB_ECN_THLD_ON CSR_GENMASK(12, 0)
+#define FBNIC_RXB_ECN_THLD_OFF CSR_GENMASK(25, 13)
+#define FBNIC_RXB_PBUF_CFG(n) (0x08027 + (n)) /* 0x2009c + 4*n */
+#define FBNIC_RXB_PBUF_CFG_CNT 8
+#define FBNIC_RXB_PBUF_BASE_ADDR CSR_GENMASK(12, 0)
+#define FBNIC_RXB_PBUF_SIZE CSR_GENMASK(21, 13)
+#define FBNIC_RXB_DWRR_RDE_WEIGHT0 0x0802f /* 0x200bc */
+#define FBNIC_RXB_DWRR_RDE_WEIGHT0_QUANTUM0 CSR_GENMASK(7, 0)
+#define FBNIC_RXB_DWRR_RDE_WEIGHT0_QUANTUM1 CSR_GENMASK(15, 8)
+#define FBNIC_RXB_DWRR_RDE_WEIGHT0_QUANTUM2 CSR_GENMASK(23, 16)
+#define FBNIC_RXB_DWRR_RDE_WEIGHT0_QUANTUM3 CSR_GENMASK(31, 24)
+#define FBNIC_RXB_DWRR_RDE_WEIGHT1 0x08030 /* 0x200c0 */
+#define FBNIC_RXB_DWRR_RDE_WEIGHT1_QUANTUM4 CSR_GENMASK(7, 0)
+#define FBNIC_RXB_DWRR_BMC_WEIGHT 0x08031 /* 0x200c4 */
+#define FBNIC_RXB_CLDR_PRIO_CFG(n) (0x8034 + (n)) /* 0x200d0 + 4*n */
+#define FBNIC_RXB_CLDR_PRIO_CFG_CNT 16
+#define FBNIC_RXB_ENDIAN_FCS 0x08044 /* 0x20110 */
+enum {
+ /* Unused */
+ /* Unused */
+ FBNIC_RXB_DEQUEUE_BMC = 2,
+ FBNIC_RXB_DEQUEUE_HOST = 3,
+ FBNIC_RXB_DEQUEUE_INDICES = 4
+};
+
+#define FBNIC_RXB_PBUF_CREDIT(n) (0x08047 + (n)) /* 0x2011C + 4*n */
+#define FBNIC_RXB_PBUF_CREDIT_CNT 8
+#define FBNIC_RXB_PBUF_CREDIT_MASK CSR_GENMASK(13, 0)
+#define FBNIC_RXB_INTF_CREDIT 0x0804f /* 0x2013C */
+#define FBNIC_RXB_INTF_CREDIT_MASK0 CSR_GENMASK(3, 0)
+#define FBNIC_RXB_INTF_CREDIT_MASK1 CSR_GENMASK(7, 4)
+#define FBNIC_RXB_INTF_CREDIT_MASK2 CSR_GENMASK(11, 8)
+#define FBNIC_RXB_INTF_CREDIT_MASK3 CSR_GENMASK(15, 12)
+
+#define FBNIC_RXB_PAUSE_EVENT_CNT(n) (0x08053 + (n)) /* 0x2014c + 4*n */
+#define FBNIC_RXB_DROP_FRMS_STS(n) (0x08057 + (n)) /* 0x2015c + 4*n */
+#define FBNIC_RXB_DROP_BYTES_STS_L(n) \
+ (0x08080 + 2 * (n)) /* 0x20200 + 8*n */
+#define FBNIC_RXB_DROP_BYTES_STS_H(n) \
+ (0x08081 + 2 * (n)) /* 0x20204 + 8*n */
+#define FBNIC_RXB_TRUN_FRMS_STS(n) (0x08091 + (n)) /* 0x20244 + 4*n */
+#define FBNIC_RXB_TRUN_BYTES_STS_L(n) \
+ (0x080c0 + 2 * (n)) /* 0x20300 + 8*n */
+#define FBNIC_RXB_TRUN_BYTES_STS_H(n) \
+ (0x080c1 + 2 * (n)) /* 0x20304 + 8*n */
+#define FBNIC_RXB_TRANS_PAUSE_STS(n) (0x080d1 + (n)) /* 0x20344 + 4*n */
+#define FBNIC_RXB_TRANS_DROP_STS(n) (0x080d9 + (n)) /* 0x20364 + 4*n */
+#define FBNIC_RXB_TRANS_ECN_STS(n) (0x080e1 + (n)) /* 0x20384 + 4*n */
+enum {
+ FBNIC_RXB_ENQUEUE_NET = 0,
+ FBNIC_RXB_ENQUEUE_BMC = 1,
+ /* Unused */
+ /* Unused */
+ FBNIC_RXB_ENQUEUE_INDICES = 4
+};
+
+#define FBNIC_RXB_DRBO_FRM_CNT_SRC(n) (0x080f9 + (n)) /* 0x203e4 + 4*n */
+#define FBNIC_RXB_DRBO_BYTE_CNT_SRC_L(n) \
+ (0x080fd + (n)) /* 0x203f4 + 4*n */
+#define FBNIC_RXB_DRBO_BYTE_CNT_SRC_H(n) \
+ (0x08101 + (n)) /* 0x20404 + 4*n */
+#define FBNIC_RXB_INTF_FRM_CNT_DST(n) (0x08105 + (n)) /* 0x20414 + 4*n */
+#define FBNIC_RXB_INTF_BYTE_CNT_DST_L(n) \
+ (0x08109 + (n)) /* 0x20424 + 4*n */
+#define FBNIC_RXB_INTF_BYTE_CNT_DST_H(n) \
+ (0x0810d + (n)) /* 0x20434 + 4*n */
+#define FBNIC_RXB_PBUF_FRM_CNT_DST(n) (0x08111 + (n)) /* 0x20444 + 4*n */
+#define FBNIC_RXB_PBUF_BYTE_CNT_DST_L(n) \
+ (0x08115 + (n)) /* 0x20454 + 4*n */
+#define FBNIC_RXB_PBUF_BYTE_CNT_DST_H(n) \
+ (0x08119 + (n)) /* 0x20464 + 4*n */
+
+#define FBNIC_RXB_PBUF_FIFO_LEVEL(n) (0x0811d + (n)) /* 0x20474 + 4*n */
+
+#define FBNIC_RXB_INTEGRITY_ERR(n) (0x0812f + (n)) /* 0x204bc + 4*n */
+#define FBNIC_RXB_MAC_ERR(n) (0x08133 + (n)) /* 0x204cc + 4*n */
+#define FBNIC_RXB_PARSER_ERR(n) (0x08137 + (n)) /* 0x204dc + 4*n */
+#define FBNIC_RXB_FRM_ERR(n) (0x0813b + (n)) /* 0x204ec + 4*n */
+
+#define FBNIC_RXB_DWRR_RDE_WEIGHT0_EXT 0x08143 /* 0x2050c */
+#define FBNIC_RXB_DWRR_RDE_WEIGHT1_EXT 0x08144 /* 0x20510 */
+#define FBNIC_CSR_END_RXB 0x081b1 /* CSR section delimiter */
+
+/* Rx Parser and Classifier Registers */
+#define FBNIC_CSR_START_RPC 0x08400 /* CSR section delimiter */
+#define FBNIC_RPC_RMI_CONFIG 0x08400 /* 0x21000 */
+#define FBNIC_RPC_RMI_CONFIG_OH_BYTES CSR_GENMASK(4, 0)
+#define FBNIC_RPC_RMI_CONFIG_FCS_PRESENT CSR_BIT(8)
+#define FBNIC_RPC_RMI_CONFIG_ENABLE CSR_BIT(12)
+#define FBNIC_RPC_RMI_CONFIG_MTU CSR_GENMASK(31, 16)
+
+#define FBNIC_RPC_ACT_TBL0_DEFAULT 0x0840a /* 0x21028 */
+#define FBNIC_RPC_ACT_TBL0_DROP CSR_BIT(0)
+#define FBNIC_RPC_ACT_TBL0_DEST_MASK CSR_GENMASK(3, 1)
+enum {
+ FBNIC_RPC_ACT_TBL0_DEST_HOST = 1,
+ FBNIC_RPC_ACT_TBL0_DEST_BMC = 2,
+ FBNIC_RPC_ACT_TBL0_DEST_EI = 4,
+};
+
+#define FBNIC_RPC_ACT_TBL0_DMA_HINT CSR_GENMASK(24, 16)
+#define FBNIC_RPC_ACT_TBL0_RSS_CTXT_ID CSR_BIT(30)
+
+#define FBNIC_RPC_ACT_TBL1_DEFAULT 0x0840b /* 0x2102c */
+#define FBNIC_RPC_ACT_TBL1_RSS_ENA_MASK CSR_GENMASK(15, 0)
+enum {
+ FBNIC_RPC_ACT_TBL1_RSS_ENA_IP_SRC = 1,
+ FBNIC_RPC_ACT_TBL1_RSS_ENA_IP_DST = 2,
+ FBNIC_RPC_ACT_TBL1_RSS_ENA_L4_SRC = 4,
+ FBNIC_RPC_ACT_TBL1_RSS_ENA_L4_DST = 8,
+ FBNIC_RPC_ACT_TBL1_RSS_ENA_L2_DA = 16,
+ FBNIC_RPC_ACT_TBL1_RSS_ENA_L4_RSS_BYTE = 32,
+ FBNIC_RPC_ACT_TBL1_RSS_ENA_IV6_FL_LBL = 64,
+ FBNIC_RPC_ACT_TBL1_RSS_ENA_OV6_FL_LBL = 128,
+ FBNIC_RPC_ACT_TBL1_RSS_ENA_DSCP = 256,
+ FBNIC_RPC_ACT_TBL1_RSS_ENA_L3_PROT = 512,
+ FBNIC_RPC_ACT_TBL1_RSS_ENA_L4_PROT = 1024,
+};
+
+#define FBNIC_RPC_RSS_KEY(n) (0x0840c + (n)) /* 0x21030 + 4*n */
+#define FBNIC_RPC_RSS_KEY_BIT_LEN 425
+#define FBNIC_RPC_RSS_KEY_BYTE_LEN \
+ DIV_ROUND_UP(FBNIC_RPC_RSS_KEY_BIT_LEN, 8)
+#define FBNIC_RPC_RSS_KEY_DWORD_LEN \
+ DIV_ROUND_UP(FBNIC_RPC_RSS_KEY_BIT_LEN, 32)
+#define FBNIC_RPC_RSS_KEY_LAST_IDX \
+ (FBNIC_RPC_RSS_KEY_DWORD_LEN - 1)
+#define FBNIC_RPC_RSS_KEY_LAST_MASK \
+ CSR_GENMASK(31, \
+ FBNIC_RPC_RSS_KEY_DWORD_LEN * 32 - \
+ FBNIC_RPC_RSS_KEY_BIT_LEN)
+
+#define FBNIC_RPC_TCAM_MACDA_VALIDATE 0x0852d /* 0x214b4 */
+#define FBNIC_CSR_END_RPC 0x0856b /* CSR section delimiter */
+
+/* RPC RAM Registers */
+
+#define FBNIC_CSR_START_RPC_RAM 0x08800 /* CSR section delimiter */
+#define FBNIC_RPC_ACT_TBL0(n) (0x08800 + (n)) /* 0x22000 + 4*n */
+#define FBNIC_RPC_ACT_TBL1(n) (0x08840 + (n)) /* 0x22100 + 4*n */
+#define FBNIC_RPC_ACT_TBL_NUM_ENTRIES 64
+
+/* TCAM Tables */
+#define FBNIC_RPC_TCAM_VALIDATE CSR_BIT(31)
+
+/* 64 Action TCAM Entries, 12 registers
+ * 3 mixed, src port, dst port, 6 L4 words, and Validate
+ */
+#define FBNIC_RPC_TCAM_ACT(m, n) \
+ (0x08880 + 0x40 * (n) + (m)) /* 0x22200 + 256*n + 4*m */
+
+#define FBNIC_RPC_TCAM_ACT_VALUE CSR_GENMASK(15, 0)
+#define FBNIC_RPC_TCAM_ACT_MASK CSR_GENMASK(31, 16)
+
+#define FBNIC_RPC_TCAM_MACDA(m, n) \
+ (0x08b80 + 0x20 * (n) + (m)) /* 0x022e00 + 128*n + 4*m */
+#define FBNIC_RPC_TCAM_MACDA_VALUE CSR_GENMASK(15, 0)
+#define FBNIC_RPC_TCAM_MACDA_MASK CSR_GENMASK(31, 16)
+
+#define FBNIC_RPC_RSS_TBL(n, m) \
+ (0x08d20 + 0x100 * (n) + (m)) /* 0x023480 + 1024*n + 4*m */
+#define FBNIC_RPC_RSS_TBL_COUNT 2
+#define FBNIC_RPC_RSS_TBL_SIZE 256
+#define FBNIC_CSR_END_RPC_RAM 0x08f1f /* CSR section delimiter */
+
+/* Fab Registers */
+#define FBNIC_CSR_START_FAB 0x0C000 /* CSR section delimiter */
+#define FBNIC_FAB_AXI4_AR_SPACER_2_CFG 0x0C005 /* 0x30014 */
+#define FBNIC_FAB_AXI4_AR_SPACER_MASK CSR_BIT(16)
+#define FBNIC_FAB_AXI4_AR_SPACER_THREADSHOLD CSR_GENMASK(15, 0)
+#define FBNIC_CSR_END_FAB 0x0C020 /* CSR section delimiter */
+
+/* Master Registers */
+#define FBNIC_CSR_START_MASTER 0x0C400 /* CSR section delimiter */
+#define FBNIC_MASTER_SPARE_0 0x0C41B /* 0x3106c */
+#define FBNIC_CSR_END_MASTER 0x0C452 /* CSR section delimiter */
+
+/* MAC MAC registers (ASIC only) */
+#define FBNIC_CSR_START_MAC_MAC 0x11000 /* CSR section delimiter */
+#define FBNIC_MAC_COMMAND_CONFIG 0x11002 /* 0x44008 */
+#define FBNIC_MAC_COMMAND_CONFIG_RX_PAUSE_DIS CSR_BIT(29)
+#define FBNIC_MAC_COMMAND_CONFIG_TX_PAUSE_DIS CSR_BIT(28)
+#define FBNIC_MAC_COMMAND_CONFIG_FLT_HDL_DIS CSR_BIT(27)
+#define FBNIC_MAC_COMMAND_CONFIG_TX_PAD_EN CSR_BIT(11)
+#define FBNIC_MAC_COMMAND_CONFIG_LOOPBACK_EN CSR_BIT(10)
+#define FBNIC_MAC_COMMAND_CONFIG_PROMISC_EN CSR_BIT(4)
+#define FBNIC_MAC_COMMAND_CONFIG_RX_ENA CSR_BIT(1)
+#define FBNIC_MAC_COMMAND_CONFIG_TX_ENA CSR_BIT(0)
+#define FBNIC_MAC_CL01_PAUSE_QUANTA 0x11015 /* 0x44054 */
+#define FBNIC_MAC_CL01_QUANTA_THRESH 0x11019 /* 0x44064 */
+#define FBNIC_CSR_END_MAC_MAC 0x11028 /* CSR section delimiter */
+
+/* Signals from MAC, AN, PCS, and LED CSR registers (ASIC only) */
+#define FBNIC_CSR_START_SIG 0x11800 /* CSR section delimiter */
+#define FBNIC_SIG_MAC_IN0 0x11800 /* 0x46000 */
+#define FBNIC_SIG_MAC_IN0_RESET_FF_TX_CLK CSR_BIT(14)
+#define FBNIC_SIG_MAC_IN0_RESET_FF_RX_CLK CSR_BIT(13)
+#define FBNIC_SIG_MAC_IN0_RESET_TX_CLK CSR_BIT(12)
+#define FBNIC_SIG_MAC_IN0_RESET_RX_CLK CSR_BIT(11)
+#define FBNIC_SIG_MAC_IN0_TX_CRC CSR_BIT(8)
+#define FBNIC_SIG_MAC_IN0_CFG_MODE128 CSR_BIT(10)
+#define FBNIC_SIG_PCS_OUT0 0x11808 /* 0x46020 */
+#define FBNIC_SIG_PCS_OUT0_LINK CSR_BIT(27)
+#define FBNIC_SIG_PCS_OUT0_BLOCK_LOCK CSR_GENMASK(24, 5)
+#define FBNIC_SIG_PCS_OUT0_AMPS_LOCK CSR_GENMASK(4, 1)
+#define FBNIC_SIG_PCS_OUT1 0x11809 /* 0x46024 */
+#define FBNIC_SIG_PCS_OUT1_FCFEC_LOCK CSR_GENMASK(11, 8)
+#define FBNIC_SIG_PCS_INTR_STS 0x11814 /* 0x46050 */
+#define FBNIC_SIG_PCS_INTR_LINK_DOWN CSR_BIT(1)
+#define FBNIC_SIG_PCS_INTR_LINK_UP CSR_BIT(0)
+#define FBNIC_SIG_PCS_INTR_MASK 0x11816 /* 0x46058 */
+#define FBNIC_CSR_END_SIG 0x1184e /* CSR section delimiter */
+
+/* PUL User Registers */
+#define FBNIC_CSR_START_PUL_USER 0x31000 /* CSR section delimiter */
+#define FBNIC_PUL_OB_TLP_HDR_AW_CFG 0x3103d /* 0xc40f4 */
+#define FBNIC_PUL_OB_TLP_HDR_AW_CFG_BME CSR_BIT(18)
+#define FBNIC_PUL_OB_TLP_HDR_AR_CFG 0x3103e /* 0xc40f8 */
+#define FBNIC_PUL_OB_TLP_HDR_AR_CFG_BME CSR_BIT(18)
+#define FBNIC_CSR_END_PUL_USER 0x31080 /* CSR section delimiter */
+
+/* Queue Registers
+ *
+ * The queue register offsets are specific for a given queue grouping. So to
+ * find the actual register offset it is necessary to combine FBNIC_QUEUE(n)
+ * with the register to get the actual register offset like so:
+ * FBNIC_QUEUE_TWQ0_CTL(n) == FBNIC_QUEUE(n) + FBNIC_QUEUE_TWQ0_CTL
+ */
+#define FBNIC_CSR_START_QUEUE 0x40000 /* CSR section delimiter */
+#define FBNIC_QUEUE_STRIDE 0x400 /* 0x1000 */
+#define FBNIC_QUEUE(n)\
+ (0x40000 + FBNIC_QUEUE_STRIDE * (n)) /* 0x100000 + 4096*n */
+
+#define FBNIC_QUEUE_TWQ0_CTL 0x000 /* 0x000 */
+#define FBNIC_QUEUE_TWQ1_CTL 0x001 /* 0x004 */
+#define FBNIC_QUEUE_TWQ_CTL_RESET CSR_BIT(0)
+#define FBNIC_QUEUE_TWQ_CTL_ENABLE CSR_BIT(1)
+#define FBNIC_QUEUE_TWQ0_TAIL 0x002 /* 0x008 */
+#define FBNIC_QUEUE_TWQ1_TAIL 0x003 /* 0x00c */
+
+#define FBNIC_QUEUE_TWQ0_SIZE 0x00a /* 0x028 */
+#define FBNIC_QUEUE_TWQ1_SIZE 0x00b /* 0x02c */
+#define FBNIC_QUEUE_TWQ_SIZE_MASK CSR_GENMASK(3, 0)
+
+#define FBNIC_QUEUE_TWQ0_BAL 0x020 /* 0x080 */
+#define FBNIC_QUEUE_BAL_MASK CSR_GENMASK(31, 7)
+#define FBNIC_QUEUE_TWQ0_BAH 0x021 /* 0x084 */
+#define FBNIC_QUEUE_TWQ1_BAL 0x022 /* 0x088 */
+#define FBNIC_QUEUE_TWQ1_BAH 0x023 /* 0x08c */
+
+/* Tx Completion Queue Registers */
+#define FBNIC_QUEUE_TCQ_CTL 0x080 /* 0x200 */
+#define FBNIC_QUEUE_TCQ_CTL_RESET CSR_BIT(0)
+#define FBNIC_QUEUE_TCQ_CTL_ENABLE CSR_BIT(1)
+
+#define FBNIC_QUEUE_TCQ_HEAD 0x081 /* 0x204 */
+
+#define FBNIC_QUEUE_TCQ_SIZE 0x084 /* 0x210 */
+#define FBNIC_QUEUE_TCQ_SIZE_MASK CSR_GENMASK(3, 0)
+
+#define FBNIC_QUEUE_TCQ_BAL 0x0a0 /* 0x280 */
+#define FBNIC_QUEUE_TCQ_BAH 0x0a1 /* 0x284 */
+
+/* Tx Interrupt Manager Registers */
+#define FBNIC_QUEUE_TIM_CTL 0x0c0 /* 0x300 */
+#define FBNIC_QUEUE_TIM_CTL_MSIX_MASK CSR_GENMASK(7, 0)
+
+#define FBNIC_QUEUE_TIM_THRESHOLD 0x0c1 /* 0x304 */
+#define FBNIC_QUEUE_TIM_THRESHOLD_TWD_MASK CSR_GENMASK(14, 0)
+
+#define FBNIC_QUEUE_TIM_CLEAR 0x0c2 /* 0x308 */
+#define FBNIC_QUEUE_TIM_CLEAR_MASK CSR_BIT(0)
+#define FBNIC_QUEUE_TIM_SET 0x0c3 /* 0x30c */
+#define FBNIC_QUEUE_TIM_SET_MASK CSR_BIT(0)
+#define FBNIC_QUEUE_TIM_MASK 0x0c4 /* 0x310 */
+#define FBNIC_QUEUE_TIM_MASK_MASK CSR_BIT(0)
+
+#define FBNIC_QUEUE_TIM_TIMER 0x0c5 /* 0x314 */
+
+#define FBNIC_QUEUE_TIM_COUNTS 0x0c6 /* 0x318 */
+#define FBNIC_QUEUE_TIM_COUNTS_CNT1_MASK CSR_GENMASK(30, 16)
+#define FBNIC_QUEUE_TIM_COUNTS_CNT0_MASK CSR_GENMASK(14, 0)
+
+/* Rx Completion Queue Registers */
+#define FBNIC_QUEUE_RCQ_CTL 0x200 /* 0x800 */
+#define FBNIC_QUEUE_RCQ_CTL_RESET CSR_BIT(0)
+#define FBNIC_QUEUE_RCQ_CTL_ENABLE CSR_BIT(1)
+
+#define FBNIC_QUEUE_RCQ_HEAD 0x201 /* 0x804 */
+
+#define FBNIC_QUEUE_RCQ_SIZE 0x204 /* 0x810 */
+#define FBNIC_QUEUE_RCQ_SIZE_MASK CSR_GENMASK(3, 0)
+
+#define FBNIC_QUEUE_RCQ_BAL 0x220 /* 0x880 */
+#define FBNIC_QUEUE_RCQ_BAH 0x221 /* 0x884 */
+
+/* Rx Buffer Descriptor Queue Registers */
+#define FBNIC_QUEUE_BDQ_CTL 0x240 /* 0x900 */
+#define FBNIC_QUEUE_BDQ_CTL_RESET CSR_BIT(0)
+#define FBNIC_QUEUE_BDQ_CTL_ENABLE CSR_BIT(1)
+#define FBNIC_QUEUE_BDQ_CTL_PPQ_ENABLE CSR_BIT(30)
+
+#define FBNIC_QUEUE_BDQ_HPQ_TAIL 0x241 /* 0x904 */
+#define FBNIC_QUEUE_BDQ_PPQ_TAIL 0x242 /* 0x908 */
+
+#define FBNIC_QUEUE_BDQ_HPQ_SIZE 0x247 /* 0x91c */
+#define FBNIC_QUEUE_BDQ_PPQ_SIZE 0x248 /* 0x920 */
+#define FBNIC_QUEUE_BDQ_SIZE_MASK CSR_GENMASK(3, 0)
+
+#define FBNIC_QUEUE_BDQ_HPQ_BAL 0x260 /* 0x980 */
+#define FBNIC_QUEUE_BDQ_HPQ_BAH 0x261 /* 0x984 */
+#define FBNIC_QUEUE_BDQ_PPQ_BAL 0x262 /* 0x988 */
+#define FBNIC_QUEUE_BDQ_PPQ_BAH 0x263 /* 0x98c */
+
+/* Rx DMA Engine Configuration */
+#define FBNIC_QUEUE_RDE_CTL0 0x2a0 /* 0xa80 */
+#define FBNIC_QUEUE_RDE_CTL0_EN_HDR_SPLIT CSR_BIT(31)
+#define FBNIC_QUEUE_RDE_CTL0_DROP_MODE_MASK CSR_GENMASK(30, 29)
+enum {
+ FBNIC_QUEUE_RDE_CTL0_DROP_IMMEDIATE = 0,
+ FBNIC_QUEUE_RDE_CTL0_DROP_WAIT = 1,
+ FBNIC_QUEUE_RDE_CTL0_DROP_NEVER = 2,
+};
+
+#define FBNIC_QUEUE_RDE_CTL0_MIN_HROOM_MASK CSR_GENMASK(28, 20)
+#define FBNIC_QUEUE_RDE_CTL0_MIN_TROOM_MASK CSR_GENMASK(19, 11)
+
+#define FBNIC_QUEUE_RDE_CTL1 0x2a1 /* 0xa84 */
+#define FBNIC_QUEUE_RDE_CTL1_MAX_HDR_MASK CSR_GENMASK(24, 12)
+#define FBNIC_QUEUE_RDE_CTL1_PAYLD_OFF_MASK CSR_GENMASK(11, 9)
+#define FBNIC_QUEUE_RDE_CTL1_PAYLD_PG_CL_MASK CSR_GENMASK(8, 6)
+#define FBNIC_QUEUE_RDE_CTL1_PADLEN_MASK CSR_GENMASK(5, 2)
+#define FBNIC_QUEUE_RDE_CTL1_PAYLD_PACK_MASK CSR_GENMASK(1, 0)
+enum {
+ FBNIC_QUEUE_RDE_CTL1_PAYLD_PACK_NONE = 0,
+ FBNIC_QUEUE_RDE_CTL1_PAYLD_PACK_ALL = 1,
+ FBNIC_QUEUE_RDE_CTL1_PAYLD_PACK_RSS = 2,
+};
+
+/* Rx Interrupt Manager Registers */
+#define FBNIC_QUEUE_RIM_CTL 0x2c0 /* 0xb00 */
+#define FBNIC_QUEUE_RIM_CTL_MSIX_MASK CSR_GENMASK(7, 0)
+
+#define FBNIC_QUEUE_RIM_THRESHOLD 0x2c1 /* 0xb04 */
+#define FBNIC_QUEUE_RIM_THRESHOLD_RCD_MASK CSR_GENMASK(14, 0)
+
+#define FBNIC_QUEUE_RIM_CLEAR 0x2c2 /* 0xb08 */
+#define FBNIC_QUEUE_RIM_CLEAR_MASK CSR_BIT(0)
+#define FBNIC_QUEUE_RIM_SET 0x2c3 /* 0xb0c */
+#define FBNIC_QUEUE_RIM_SET_MASK CSR_BIT(0)
+#define FBNIC_QUEUE_RIM_MASK 0x2c4 /* 0xb10 */
+#define FBNIC_QUEUE_RIM_MASK_MASK CSR_BIT(0)
+
+#define FBNIC_QUEUE_RIM_COAL_STATUS 0x2c5 /* 0xb14 */
+#define FBNIC_QUEUE_RIM_RCD_COUNT_MASK CSR_GENMASK(30, 16)
+#define FBNIC_QUEUE_RIM_TIMER_MASK CSR_GENMASK(13, 0)
+#define FBNIC_MAX_QUEUES 128
+#define FBNIC_CSR_END_QUEUE (0x40000 + 0x400 * FBNIC_MAX_QUEUES - 1)
+
+/* BAR 4 CSRs */
+
+/* The IPC mailbox consists of 32 mailboxes, with each mailbox consisting
+ * of 32 4 byte registers. We will use 2 registers per descriptor so the
+ * length of the mailbox is reduced to 16.
+ *
+ * Currently we use an offset of 0x6000 on BAR4 for the mailbox so we just
+ * have to do the math and determine the offset based on the mailbox
+ * direction and index inside that mailbox.
+ */
+#define FBNIC_IPC_MBX_DESC_LEN 16
+#define FBNIC_IPC_MBX(mbx_idx, desc_idx) \
+ ((((mbx_idx) * FBNIC_IPC_MBX_DESC_LEN + (desc_idx)) * 2) + 0x6000)
+
+/* Use first register in mailbox to flush writes */
+#define FBNIC_FW_ZERO_REG FBNIC_IPC_MBX(0, 0)
+
+enum {
+ FBNIC_IPC_MBX_RX_IDX,
+ FBNIC_IPC_MBX_TX_IDX,
+ FBNIC_IPC_MBX_INDICES,
+};
+
+#define FBNIC_IPC_MBX_DESC_LEN_MASK DESC_GENMASK(63, 48)
+#define FBNIC_IPC_MBX_DESC_EOM DESC_BIT(46)
+#define FBNIC_IPC_MBX_DESC_ADDR_MASK DESC_GENMASK(45, 3)
+#define FBNIC_IPC_MBX_DESC_FW_CMPL DESC_BIT(1)
+#define FBNIC_IPC_MBX_DESC_HOST_CMPL DESC_BIT(0)
+
+#endif /* _FBNIC_CSR_H_ */
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_devlink.c b/drivers/net/ethernet/meta/fbnic/fbnic_devlink.c
new file mode 100644
index 000000000000..e87049dfd223
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_devlink.c
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#include <asm/unaligned.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+#include <net/devlink.h>
+
+#include "fbnic.h"
+
+#define FBNIC_SN_STR_LEN 24
+
+static int fbnic_devlink_info_get(struct devlink *devlink,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ struct fbnic_dev *fbd = devlink_priv(devlink);
+ int err;
+
+ if (fbd->dsn) {
+ unsigned char serial[FBNIC_SN_STR_LEN];
+ u8 dsn[8];
+
+ put_unaligned_be64(fbd->dsn, dsn);
+ err = snprintf(serial, FBNIC_SN_STR_LEN, "%8phD", dsn);
+ if (err < 0)
+ return err;
+
+ err = devlink_info_serial_number_put(req, serial);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static const struct devlink_ops fbnic_devlink_ops = {
+ .info_get = fbnic_devlink_info_get,
+};
+
+void fbnic_devlink_free(struct fbnic_dev *fbd)
+{
+ struct devlink *devlink = priv_to_devlink(fbd);
+
+ devlink_free(devlink);
+}
+
+struct fbnic_dev *fbnic_devlink_alloc(struct pci_dev *pdev)
+{
+ void __iomem * const *iomap_table;
+ struct devlink *devlink;
+ struct fbnic_dev *fbd;
+
+ devlink = devlink_alloc(&fbnic_devlink_ops, sizeof(struct fbnic_dev),
+ &pdev->dev);
+ if (!devlink)
+ return NULL;
+
+ fbd = devlink_priv(devlink);
+ pci_set_drvdata(pdev, fbd);
+ fbd->dev = &pdev->dev;
+
+ iomap_table = pcim_iomap_table(pdev);
+ fbd->uc_addr0 = iomap_table[0];
+ fbd->uc_addr4 = iomap_table[4];
+
+ fbd->dsn = pci_get_dsn(pdev);
+ fbd->mps = pcie_get_mps(pdev);
+ fbd->readrq = pcie_get_readrq(pdev);
+
+ fbd->mac_addr_boundary = FBNIC_RPC_TCAM_MACDA_DEFAULT_BOUNDARY;
+
+ return fbd;
+}
+
+void fbnic_devlink_register(struct fbnic_dev *fbd)
+{
+ struct devlink *devlink = priv_to_devlink(fbd);
+
+ devlink_register(devlink);
+}
+
+void fbnic_devlink_unregister(struct fbnic_dev *fbd)
+{
+ struct devlink *devlink = priv_to_devlink(fbd);
+
+ devlink_unregister(devlink);
+}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_drvinfo.h b/drivers/net/ethernet/meta/fbnic/fbnic_drvinfo.h
new file mode 100644
index 000000000000..809ba6729442
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_drvinfo.h
@@ -0,0 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#define DRV_NAME "fbnic"
+#define DRV_SUMMARY "Meta(R) Host Network Interface Driver"
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_fw.c b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
new file mode 100644
index 000000000000..0c6e1b4c119b
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
@@ -0,0 +1,791 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#include <linux/bitfield.h>
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+#include <linux/dev_printk.h>
+#include <linux/dma-mapping.h>
+#include <linux/gfp.h>
+#include <linux/types.h>
+
+#include "fbnic.h"
+#include "fbnic_tlv.h"
+
+static void __fbnic_mbx_wr_desc(struct fbnic_dev *fbd, int mbx_idx,
+ int desc_idx, u64 desc)
+{
+ u32 desc_offset = FBNIC_IPC_MBX(mbx_idx, desc_idx);
+
+ fw_wr32(fbd, desc_offset + 1, upper_32_bits(desc));
+ fw_wrfl(fbd);
+ fw_wr32(fbd, desc_offset, lower_32_bits(desc));
+}
+
+static u64 __fbnic_mbx_rd_desc(struct fbnic_dev *fbd, int mbx_idx, int desc_idx)
+{
+ u32 desc_offset = FBNIC_IPC_MBX(mbx_idx, desc_idx);
+ u64 desc;
+
+ desc = fw_rd32(fbd, desc_offset);
+ desc |= (u64)fw_rd32(fbd, desc_offset + 1) << 32;
+
+ return desc;
+}
+
+static void fbnic_mbx_init_desc_ring(struct fbnic_dev *fbd, int mbx_idx)
+{
+ int desc_idx;
+
+ /* Initialize first descriptor to all 0s. Doing this gives us a
+ * solid stop for the firmware to hit when it is done looping
+ * through the ring.
+ */
+ __fbnic_mbx_wr_desc(fbd, mbx_idx, 0, 0);
+
+ fw_wrfl(fbd);
+
+ /* We then fill the rest of the ring starting at the end and moving
+ * back toward descriptor 0 with skip descriptors that have no
+ * length nor address, and tell the firmware that they can skip
+ * them and just move past them to the one we initialized to 0.
+ */
+ for (desc_idx = FBNIC_IPC_MBX_DESC_LEN; --desc_idx;) {
+ __fbnic_mbx_wr_desc(fbd, mbx_idx, desc_idx,
+ FBNIC_IPC_MBX_DESC_FW_CMPL |
+ FBNIC_IPC_MBX_DESC_HOST_CMPL);
+ fw_wrfl(fbd);
+ }
+}
+
+void fbnic_mbx_init(struct fbnic_dev *fbd)
+{
+ int i;
+
+ /* Initialize lock to protect Tx ring */
+ spin_lock_init(&fbd->fw_tx_lock);
+
+ /* Reinitialize mailbox memory */
+ for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++)
+ memset(&fbd->mbx[i], 0, sizeof(struct fbnic_fw_mbx));
+
+ /* Do not auto-clear the FW mailbox interrupt, let SW clear it */
+ wr32(fbd, FBNIC_INTR_SW_AC_MODE(0), ~(1u << FBNIC_FW_MSIX_ENTRY));
+
+ /* Clear any stale causes in vector 0 as that is used for doorbell */
+ wr32(fbd, FBNIC_INTR_CLEAR(0), 1u << FBNIC_FW_MSIX_ENTRY);
+
+ for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++)
+ fbnic_mbx_init_desc_ring(fbd, i);
+}
+
+static int fbnic_mbx_map_msg(struct fbnic_dev *fbd, int mbx_idx,
+ struct fbnic_tlv_msg *msg, u16 length, u8 eom)
+{
+ struct fbnic_fw_mbx *mbx = &fbd->mbx[mbx_idx];
+ u8 tail = mbx->tail;
+ dma_addr_t addr;
+ int direction;
+
+ if (!mbx->ready || !fbnic_fw_present(fbd))
+ return -ENODEV;
+
+ direction = (mbx_idx == FBNIC_IPC_MBX_RX_IDX) ? DMA_FROM_DEVICE :
+ DMA_TO_DEVICE;
+
+ if (mbx->head == ((tail + 1) % FBNIC_IPC_MBX_DESC_LEN))
+ return -EBUSY;
+
+ addr = dma_map_single(fbd->dev, msg, PAGE_SIZE, direction);
+ if (dma_mapping_error(fbd->dev, addr)) {
+ free_page((unsigned long)msg);
+
+ return -ENOSPC;
+ }
+
+ mbx->buf_info[tail].msg = msg;
+ mbx->buf_info[tail].addr = addr;
+
+ mbx->tail = (tail + 1) % FBNIC_IPC_MBX_DESC_LEN;
+
+ fw_wr32(fbd, FBNIC_IPC_MBX(mbx_idx, mbx->tail), 0);
+
+ __fbnic_mbx_wr_desc(fbd, mbx_idx, tail,
+ FIELD_PREP(FBNIC_IPC_MBX_DESC_LEN_MASK, length) |
+ (addr & FBNIC_IPC_MBX_DESC_ADDR_MASK) |
+ (eom ? FBNIC_IPC_MBX_DESC_EOM : 0) |
+ FBNIC_IPC_MBX_DESC_HOST_CMPL);
+
+ return 0;
+}
+
+static void fbnic_mbx_unmap_and_free_msg(struct fbnic_dev *fbd, int mbx_idx,
+ int desc_idx)
+{
+ struct fbnic_fw_mbx *mbx = &fbd->mbx[mbx_idx];
+ int direction;
+
+ if (!mbx->buf_info[desc_idx].msg)
+ return;
+
+ direction = (mbx_idx == FBNIC_IPC_MBX_RX_IDX) ? DMA_FROM_DEVICE :
+ DMA_TO_DEVICE;
+ dma_unmap_single(fbd->dev, mbx->buf_info[desc_idx].addr,
+ PAGE_SIZE, direction);
+
+ free_page((unsigned long)mbx->buf_info[desc_idx].msg);
+ mbx->buf_info[desc_idx].msg = NULL;
+}
+
+static void fbnic_mbx_clean_desc_ring(struct fbnic_dev *fbd, int mbx_idx)
+{
+ int i;
+
+ fbnic_mbx_init_desc_ring(fbd, mbx_idx);
+
+ for (i = FBNIC_IPC_MBX_DESC_LEN; i--;)
+ fbnic_mbx_unmap_and_free_msg(fbd, mbx_idx, i);
+}
+
+void fbnic_mbx_clean(struct fbnic_dev *fbd)
+{
+ int i;
+
+ for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++)
+ fbnic_mbx_clean_desc_ring(fbd, i);
+}
+
+#define FBNIC_MBX_MAX_PAGE_SIZE FIELD_MAX(FBNIC_IPC_MBX_DESC_LEN_MASK)
+#define FBNIC_RX_PAGE_SIZE min_t(int, PAGE_SIZE, FBNIC_MBX_MAX_PAGE_SIZE)
+
+static int fbnic_mbx_alloc_rx_msgs(struct fbnic_dev *fbd)
+{
+ struct fbnic_fw_mbx *rx_mbx = &fbd->mbx[FBNIC_IPC_MBX_RX_IDX];
+ u8 tail = rx_mbx->tail, head = rx_mbx->head, count;
+ int err = 0;
+
+ /* Do nothing if mailbox is not ready, or we already have pages on
+ * the ring that can be used by the firmware
+ */
+ if (!rx_mbx->ready)
+ return -ENODEV;
+
+ /* Fill all but 1 unused descriptors in the Rx queue. */
+ count = (head - tail - 1) % FBNIC_IPC_MBX_DESC_LEN;
+ while (!err && count--) {
+ struct fbnic_tlv_msg *msg;
+
+ msg = (struct fbnic_tlv_msg *)__get_free_page(GFP_ATOMIC |
+ __GFP_NOWARN);
+ if (!msg) {
+ err = -ENOMEM;
+ break;
+ }
+
+ err = fbnic_mbx_map_msg(fbd, FBNIC_IPC_MBX_RX_IDX, msg,
+ FBNIC_RX_PAGE_SIZE, 0);
+ if (err)
+ free_page((unsigned long)msg);
+ }
+
+ return err;
+}
+
+static int fbnic_mbx_map_tlv_msg(struct fbnic_dev *fbd,
+ struct fbnic_tlv_msg *msg)
+{
+ unsigned long flags;
+ int err;
+
+ spin_lock_irqsave(&fbd->fw_tx_lock, flags);
+
+ err = fbnic_mbx_map_msg(fbd, FBNIC_IPC_MBX_TX_IDX, msg,
+ le16_to_cpu(msg->hdr.len) * sizeof(u32), 1);
+
+ spin_unlock_irqrestore(&fbd->fw_tx_lock, flags);
+
+ return err;
+}
+
+static void fbnic_mbx_process_tx_msgs(struct fbnic_dev *fbd)
+{
+ struct fbnic_fw_mbx *tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX];
+ u8 head = tx_mbx->head;
+ u64 desc;
+
+ while (head != tx_mbx->tail) {
+ desc = __fbnic_mbx_rd_desc(fbd, FBNIC_IPC_MBX_TX_IDX, head);
+ if (!(desc & FBNIC_IPC_MBX_DESC_FW_CMPL))
+ break;
+
+ fbnic_mbx_unmap_and_free_msg(fbd, FBNIC_IPC_MBX_TX_IDX, head);
+
+ head++;
+ head %= FBNIC_IPC_MBX_DESC_LEN;
+ }
+
+ /* Record head for next interrupt */
+ tx_mbx->head = head;
+}
+
+/**
+ * fbnic_fw_xmit_simple_msg - Transmit a simple single TLV message w/o data
+ * @fbd: FBNIC device structure
+ * @msg_type: ENUM value indicating message type to send
+ *
+ * Return:
+ * One the following values:
+ * -EOPNOTSUPP: Is not ASIC so mailbox is not supported
+ * -ENODEV: Device I/O error
+ * -ENOMEM: Failed to allocate message
+ * -EBUSY: No space in mailbox
+ * -ENOSPC: DMA mapping failed
+ *
+ * This function sends a single TLV header indicating the host wants to take
+ * some action. However there are no other side effects which means that any
+ * response will need to be caught via a completion if this action is
+ * expected to kick off a resultant action.
+ */
+static int fbnic_fw_xmit_simple_msg(struct fbnic_dev *fbd, u32 msg_type)
+{
+ struct fbnic_tlv_msg *msg;
+ int err = 0;
+
+ if (!fbnic_fw_present(fbd))
+ return -ENODEV;
+
+ msg = fbnic_tlv_msg_alloc(msg_type);
+ if (!msg)
+ return -ENOMEM;
+
+ err = fbnic_mbx_map_tlv_msg(fbd, msg);
+ if (err)
+ free_page((unsigned long)msg);
+
+ return err;
+}
+
+/**
+ * fbnic_fw_xmit_cap_msg - Allocate and populate a FW capabilities message
+ * @fbd: FBNIC device structure
+ *
+ * Return: NULL on failure to allocate, error pointer on error, or pointer
+ * to new TLV test message.
+ *
+ * Sends a single TLV header indicating the host wants the firmware to
+ * confirm the capabilities and version.
+ **/
+static int fbnic_fw_xmit_cap_msg(struct fbnic_dev *fbd)
+{
+ int err = fbnic_fw_xmit_simple_msg(fbd, FBNIC_TLV_MSG_ID_HOST_CAP_REQ);
+
+ /* Return 0 if we are not calling this on ASIC */
+ return (err == -EOPNOTSUPP) ? 0 : err;
+}
+
+static void fbnic_mbx_postinit_desc_ring(struct fbnic_dev *fbd, int mbx_idx)
+{
+ struct fbnic_fw_mbx *mbx = &fbd->mbx[mbx_idx];
+
+ /* This is a one time init, so just exit if it is completed */
+ if (mbx->ready)
+ return;
+
+ mbx->ready = true;
+
+ switch (mbx_idx) {
+ case FBNIC_IPC_MBX_RX_IDX:
+ /* Make sure we have a page for the FW to write to */
+ fbnic_mbx_alloc_rx_msgs(fbd);
+ break;
+ case FBNIC_IPC_MBX_TX_IDX:
+ /* Force version to 1 if we successfully requested an update
+ * from the firmware. This should be overwritten once we get
+ * the actual version from the firmware in the capabilities
+ * request message.
+ */
+ if (!fbnic_fw_xmit_cap_msg(fbd) &&
+ !fbd->fw_cap.running.mgmt.version)
+ fbd->fw_cap.running.mgmt.version = 1;
+ break;
+ }
+}
+
+static void fbnic_mbx_postinit(struct fbnic_dev *fbd)
+{
+ int i;
+
+ /* We only need to do this on the first interrupt following init.
+ * this primes the mailbox so that we will have cleared all the
+ * skip descriptors.
+ */
+ if (!(rd32(fbd, FBNIC_INTR_STATUS(0)) & (1u << FBNIC_FW_MSIX_ENTRY)))
+ return;
+
+ wr32(fbd, FBNIC_INTR_CLEAR(0), 1u << FBNIC_FW_MSIX_ENTRY);
+
+ for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++)
+ fbnic_mbx_postinit_desc_ring(fbd, i);
+}
+
+/**
+ * fbnic_fw_xmit_ownership_msg - Create and transmit a host ownership message
+ * to FW mailbox
+ *
+ * @fbd: FBNIC device structure
+ * @take_ownership: take/release the ownership
+ *
+ * Return: zero on success, negative value on failure
+ *
+ * Notifies the firmware that the driver either takes ownership of the NIC
+ * (when @take_ownership is true) or releases it.
+ */
+int fbnic_fw_xmit_ownership_msg(struct fbnic_dev *fbd, bool take_ownership)
+{
+ unsigned long req_time = jiffies;
+ struct fbnic_tlv_msg *msg;
+ int err = 0;
+
+ if (!fbnic_fw_present(fbd))
+ return -ENODEV;
+
+ msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_OWNERSHIP_REQ);
+ if (!msg)
+ return -ENOMEM;
+
+ if (take_ownership) {
+ err = fbnic_tlv_attr_put_flag(msg, FBNIC_FW_OWNERSHIP_FLAG);
+ if (err)
+ goto free_message;
+ }
+
+ err = fbnic_mbx_map_tlv_msg(fbd, msg);
+ if (err)
+ goto free_message;
+
+ /* Initialize heartbeat, set last response to 1 second in the past
+ * so that we will trigger a timeout if the firmware doesn't respond
+ */
+ fbd->last_heartbeat_response = req_time - HZ;
+
+ fbd->last_heartbeat_request = req_time;
+
+ /* Set heartbeat detection based on if we are taking ownership */
+ fbd->fw_heartbeat_enabled = take_ownership;
+
+ return err;
+
+free_message:
+ free_page((unsigned long)msg);
+ return err;
+}
+
+static const struct fbnic_tlv_index fbnic_fw_cap_resp_index[] = {
+ FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_VERSION),
+ FBNIC_TLV_ATTR_FLAG(FBNIC_FW_CAP_RESP_BMC_PRESENT),
+ FBNIC_TLV_ATTR_MAC_ADDR(FBNIC_FW_CAP_RESP_BMC_MAC_ADDR),
+ FBNIC_TLV_ATTR_ARRAY(FBNIC_FW_CAP_RESP_BMC_MAC_ARRAY),
+ FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_STORED_VERSION),
+ FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_ACTIVE_FW_SLOT),
+ FBNIC_TLV_ATTR_STRING(FBNIC_FW_CAP_RESP_VERSION_COMMIT_STR,
+ FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE),
+ FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_BMC_ALL_MULTI),
+ FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_FW_LINK_SPEED),
+ FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_FW_LINK_FEC),
+ FBNIC_TLV_ATTR_STRING(FBNIC_FW_CAP_RESP_STORED_COMMIT_STR,
+ FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE),
+ FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_CMRT_VERSION),
+ FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_STORED_CMRT_VERSION),
+ FBNIC_TLV_ATTR_STRING(FBNIC_FW_CAP_RESP_CMRT_COMMIT_STR,
+ FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE),
+ FBNIC_TLV_ATTR_STRING(FBNIC_FW_CAP_RESP_STORED_CMRT_COMMIT_STR,
+ FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE),
+ FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_UEFI_VERSION),
+ FBNIC_TLV_ATTR_STRING(FBNIC_FW_CAP_RESP_UEFI_COMMIT_STR,
+ FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE),
+ FBNIC_TLV_ATTR_LAST
+};
+
+static int fbnic_fw_parse_bmc_addrs(u8 bmc_mac_addr[][ETH_ALEN],
+ struct fbnic_tlv_msg *attr, int len)
+{
+ int attr_len = le16_to_cpu(attr->hdr.len) / sizeof(u32) - 1;
+ struct fbnic_tlv_msg *mac_results[8];
+ int err, i = 0;
+
+ /* Make sure we have enough room to process all the MAC addresses */
+ if (len > 8)
+ return -ENOSPC;
+
+ /* Parse the array */
+ err = fbnic_tlv_attr_parse_array(&attr[1], attr_len, mac_results,
+ fbnic_fw_cap_resp_index,
+ FBNIC_FW_CAP_RESP_BMC_MAC_ADDR, len);
+ if (err)
+ return err;
+
+ /* Copy results into MAC addr array */
+ for (i = 0; i < len && mac_results[i]; i++)
+ fbnic_tlv_attr_addr_copy(bmc_mac_addr[i], mac_results[i]);
+
+ /* Zero remaining unused addresses */
+ while (i < len)
+ eth_zero_addr(bmc_mac_addr[i++]);
+
+ return 0;
+}
+
+static int fbnic_fw_parse_cap_resp(void *opaque, struct fbnic_tlv_msg **results)
+{
+ u32 active_slot = 0, all_multi = 0;
+ struct fbnic_dev *fbd = opaque;
+ u32 speed = 0, fec = 0;
+ size_t commit_size = 0;
+ bool bmc_present;
+ int err;
+
+ get_unsigned_result(FBNIC_FW_CAP_RESP_VERSION,
+ fbd->fw_cap.running.mgmt.version);
+
+ if (!fbd->fw_cap.running.mgmt.version)
+ return -EINVAL;
+
+ if (fbd->fw_cap.running.mgmt.version < MIN_FW_VERSION_CODE) {
+ char running_ver[FBNIC_FW_VER_MAX_SIZE];
+
+ fbnic_mk_fw_ver_str(fbd->fw_cap.running.mgmt.version,
+ running_ver);
+ dev_err(fbd->dev, "Device firmware version(%s) is older than minimum required version(%02d.%02d.%02d)\n",
+ running_ver,
+ MIN_FW_MAJOR_VERSION,
+ MIN_FW_MINOR_VERSION,
+ MIN_FW_BUILD_VERSION);
+ /* Disable TX mailbox to prevent card use until firmware is
+ * updated.
+ */
+ fbd->mbx[FBNIC_IPC_MBX_TX_IDX].ready = false;
+ return -EINVAL;
+ }
+
+ get_string_result(FBNIC_FW_CAP_RESP_VERSION_COMMIT_STR, commit_size,
+ fbd->fw_cap.running.mgmt.commit,
+ FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
+ if (!commit_size)
+ dev_warn(fbd->dev, "Firmware did not send mgmt commit!\n");
+
+ get_unsigned_result(FBNIC_FW_CAP_RESP_STORED_VERSION,
+ fbd->fw_cap.stored.mgmt.version);
+ get_string_result(FBNIC_FW_CAP_RESP_STORED_COMMIT_STR, commit_size,
+ fbd->fw_cap.stored.mgmt.commit,
+ FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
+
+ get_unsigned_result(FBNIC_FW_CAP_RESP_CMRT_VERSION,
+ fbd->fw_cap.running.bootloader.version);
+ get_string_result(FBNIC_FW_CAP_RESP_CMRT_COMMIT_STR, commit_size,
+ fbd->fw_cap.running.bootloader.commit,
+ FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
+
+ get_unsigned_result(FBNIC_FW_CAP_RESP_STORED_CMRT_VERSION,
+ fbd->fw_cap.stored.bootloader.version);
+ get_string_result(FBNIC_FW_CAP_RESP_STORED_CMRT_COMMIT_STR, commit_size,
+ fbd->fw_cap.stored.bootloader.commit,
+ FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
+
+ get_unsigned_result(FBNIC_FW_CAP_RESP_UEFI_VERSION,
+ fbd->fw_cap.stored.undi.version);
+ get_string_result(FBNIC_FW_CAP_RESP_UEFI_COMMIT_STR, commit_size,
+ fbd->fw_cap.stored.undi.commit,
+ FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
+
+ get_unsigned_result(FBNIC_FW_CAP_RESP_ACTIVE_FW_SLOT, active_slot);
+ fbd->fw_cap.active_slot = active_slot;
+
+ get_unsigned_result(FBNIC_FW_CAP_RESP_FW_LINK_SPEED, speed);
+ get_unsigned_result(FBNIC_FW_CAP_RESP_FW_LINK_FEC, fec);
+ fbd->fw_cap.link_speed = speed;
+ fbd->fw_cap.link_fec = fec;
+
+ bmc_present = !!results[FBNIC_FW_CAP_RESP_BMC_PRESENT];
+ if (bmc_present) {
+ struct fbnic_tlv_msg *attr;
+
+ attr = results[FBNIC_FW_CAP_RESP_BMC_MAC_ARRAY];
+ if (!attr)
+ return -EINVAL;
+
+ err = fbnic_fw_parse_bmc_addrs(fbd->fw_cap.bmc_mac_addr,
+ attr, 4);
+ if (err)
+ return err;
+
+ get_unsigned_result(FBNIC_FW_CAP_RESP_BMC_ALL_MULTI, all_multi);
+ } else {
+ memset(fbd->fw_cap.bmc_mac_addr, 0,
+ sizeof(fbd->fw_cap.bmc_mac_addr));
+ }
+
+ fbd->fw_cap.bmc_present = bmc_present;
+
+ if (results[FBNIC_FW_CAP_RESP_BMC_ALL_MULTI] || !bmc_present)
+ fbd->fw_cap.all_multi = all_multi;
+
+ return 0;
+}
+
+static const struct fbnic_tlv_index fbnic_ownership_resp_index[] = {
+ FBNIC_TLV_ATTR_LAST
+};
+
+static int fbnic_fw_parse_ownership_resp(void *opaque,
+ struct fbnic_tlv_msg **results)
+{
+ struct fbnic_dev *fbd = (struct fbnic_dev *)opaque;
+
+ /* Count the ownership response as a heartbeat reply */
+ fbd->last_heartbeat_response = jiffies;
+
+ return 0;
+}
+
+static const struct fbnic_tlv_index fbnic_heartbeat_resp_index[] = {
+ FBNIC_TLV_ATTR_LAST
+};
+
+static int fbnic_fw_parse_heartbeat_resp(void *opaque,
+ struct fbnic_tlv_msg **results)
+{
+ struct fbnic_dev *fbd = (struct fbnic_dev *)opaque;
+
+ fbd->last_heartbeat_response = jiffies;
+
+ return 0;
+}
+
+static int fbnic_fw_xmit_heartbeat_message(struct fbnic_dev *fbd)
+{
+ unsigned long req_time = jiffies;
+ struct fbnic_tlv_msg *msg;
+ int err = 0;
+
+ if (!fbnic_fw_present(fbd))
+ return -ENODEV;
+
+ msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_HEARTBEAT_REQ);
+ if (!msg)
+ return -ENOMEM;
+
+ err = fbnic_mbx_map_tlv_msg(fbd, msg);
+ if (err)
+ goto free_message;
+
+ fbd->last_heartbeat_request = req_time;
+
+ return err;
+
+free_message:
+ free_page((unsigned long)msg);
+ return err;
+}
+
+static bool fbnic_fw_heartbeat_current(struct fbnic_dev *fbd)
+{
+ unsigned long last_response = fbd->last_heartbeat_response;
+ unsigned long last_request = fbd->last_heartbeat_request;
+
+ return !time_before(last_response, last_request);
+}
+
+int fbnic_fw_init_heartbeat(struct fbnic_dev *fbd, bool poll)
+{
+ int err = -ETIMEDOUT;
+ int attempts = 50;
+
+ if (!fbnic_fw_present(fbd))
+ return -ENODEV;
+
+ while (attempts--) {
+ msleep(200);
+ if (poll)
+ fbnic_mbx_poll(fbd);
+
+ if (!fbnic_fw_heartbeat_current(fbd))
+ continue;
+
+ /* Place new message on mailbox to elicit a response */
+ err = fbnic_fw_xmit_heartbeat_message(fbd);
+ if (err)
+ dev_warn(fbd->dev,
+ "Failed to send heartbeat message: %d\n",
+ err);
+ break;
+ }
+
+ return err;
+}
+
+void fbnic_fw_check_heartbeat(struct fbnic_dev *fbd)
+{
+ unsigned long last_request = fbd->last_heartbeat_request;
+ int err;
+
+ /* Do not check heartbeat or send another request until current
+ * period has expired. Otherwise we might start spamming requests.
+ */
+ if (time_is_after_jiffies(last_request + FW_HEARTBEAT_PERIOD))
+ return;
+
+ /* We already reported no mailbox. Wait for it to come back */
+ if (!fbd->fw_heartbeat_enabled)
+ return;
+
+ /* Was the last heartbeat response long time ago? */
+ if (!fbnic_fw_heartbeat_current(fbd)) {
+ dev_warn(fbd->dev,
+ "Firmware did not respond to heartbeat message\n");
+ fbd->fw_heartbeat_enabled = false;
+ }
+
+ /* Place new message on mailbox to elicit a response */
+ err = fbnic_fw_xmit_heartbeat_message(fbd);
+ if (err)
+ dev_warn(fbd->dev, "Failed to send heartbeat message\n");
+}
+
+static const struct fbnic_tlv_parser fbnic_fw_tlv_parser[] = {
+ FBNIC_TLV_PARSER(FW_CAP_RESP, fbnic_fw_cap_resp_index,
+ fbnic_fw_parse_cap_resp),
+ FBNIC_TLV_PARSER(OWNERSHIP_RESP, fbnic_ownership_resp_index,
+ fbnic_fw_parse_ownership_resp),
+ FBNIC_TLV_PARSER(HEARTBEAT_RESP, fbnic_heartbeat_resp_index,
+ fbnic_fw_parse_heartbeat_resp),
+ FBNIC_TLV_MSG_ERROR
+};
+
+static void fbnic_mbx_process_rx_msgs(struct fbnic_dev *fbd)
+{
+ struct fbnic_fw_mbx *rx_mbx = &fbd->mbx[FBNIC_IPC_MBX_RX_IDX];
+ u8 head = rx_mbx->head;
+ u64 desc, length;
+
+ while (head != rx_mbx->tail) {
+ struct fbnic_tlv_msg *msg;
+ int err;
+
+ desc = __fbnic_mbx_rd_desc(fbd, FBNIC_IPC_MBX_RX_IDX, head);
+ if (!(desc & FBNIC_IPC_MBX_DESC_FW_CMPL))
+ break;
+
+ dma_unmap_single(fbd->dev, rx_mbx->buf_info[head].addr,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+
+ msg = rx_mbx->buf_info[head].msg;
+
+ length = FIELD_GET(FBNIC_IPC_MBX_DESC_LEN_MASK, desc);
+
+ /* Ignore NULL mailbox descriptors */
+ if (!length)
+ goto next_page;
+
+ /* Report descriptors with length greater than page size */
+ if (length > PAGE_SIZE) {
+ dev_warn(fbd->dev,
+ "Invalid mailbox descriptor length: %lld\n",
+ length);
+ goto next_page;
+ }
+
+ if (le16_to_cpu(msg->hdr.len) * sizeof(u32) > length)
+ dev_warn(fbd->dev, "Mailbox message length mismatch\n");
+
+ /* If parsing fails dump contents of message to dmesg */
+ err = fbnic_tlv_msg_parse(fbd, msg, fbnic_fw_tlv_parser);
+ if (err) {
+ dev_warn(fbd->dev, "Unable to process message: %d\n",
+ err);
+ print_hex_dump(KERN_WARNING, "fbnic:",
+ DUMP_PREFIX_OFFSET, 16, 2,
+ msg, length, true);
+ }
+
+ dev_dbg(fbd->dev, "Parsed msg type %d\n", msg->hdr.type);
+next_page:
+
+ free_page((unsigned long)rx_mbx->buf_info[head].msg);
+ rx_mbx->buf_info[head].msg = NULL;
+
+ head++;
+ head %= FBNIC_IPC_MBX_DESC_LEN;
+ }
+
+ /* Record head for next interrupt */
+ rx_mbx->head = head;
+
+ /* Make sure we have at least one page for the FW to write to */
+ fbnic_mbx_alloc_rx_msgs(fbd);
+}
+
+void fbnic_mbx_poll(struct fbnic_dev *fbd)
+{
+ fbnic_mbx_postinit(fbd);
+
+ fbnic_mbx_process_tx_msgs(fbd);
+ fbnic_mbx_process_rx_msgs(fbd);
+}
+
+int fbnic_mbx_poll_tx_ready(struct fbnic_dev *fbd)
+{
+ struct fbnic_fw_mbx *tx_mbx;
+ int attempts = 50;
+
+ /* Immediate fail if BAR4 isn't there */
+ if (!fbnic_fw_present(fbd))
+ return -ENODEV;
+
+ tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX];
+ while (!tx_mbx->ready && --attempts) {
+ /* Force the firmware to trigger an interrupt response to
+ * avoid the mailbox getting stuck closed if the interrupt
+ * is reset.
+ */
+ fbnic_mbx_init_desc_ring(fbd, FBNIC_IPC_MBX_TX_IDX);
+
+ msleep(200);
+
+ fbnic_mbx_poll(fbd);
+ }
+
+ return attempts ? 0 : -ETIMEDOUT;
+}
+
+void fbnic_mbx_flush_tx(struct fbnic_dev *fbd)
+{
+ struct fbnic_fw_mbx *tx_mbx;
+ int attempts = 50;
+ u8 count = 0;
+
+ /* Nothing to do if there is no mailbox */
+ if (!fbnic_fw_present(fbd))
+ return;
+
+ /* Record current Rx stats */
+ tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX];
+
+ /* Nothing to do if mailbox never got to ready */
+ if (!tx_mbx->ready)
+ return;
+
+ /* Give firmware time to process packet,
+ * we will wait up to 10 seconds which is 50 waits of 200ms.
+ */
+ do {
+ u8 head = tx_mbx->head;
+
+ if (head == tx_mbx->tail)
+ break;
+
+ msleep(200);
+ fbnic_mbx_process_tx_msgs(fbd);
+
+ count += (tx_mbx->head - head) % FBNIC_IPC_MBX_DESC_LEN;
+ } while (count < FBNIC_IPC_MBX_DESC_LEN && --attempts);
+}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_fw.h b/drivers/net/ethernet/meta/fbnic/fbnic_fw.h
new file mode 100644
index 000000000000..c65bca613665
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_fw.h
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#ifndef _FBNIC_FW_H_
+#define _FBNIC_FW_H_
+
+#include <linux/if_ether.h>
+#include <linux/types.h>
+
+struct fbnic_dev;
+struct fbnic_tlv_msg;
+
+struct fbnic_fw_mbx {
+ u8 ready, head, tail;
+ struct {
+ struct fbnic_tlv_msg *msg;
+ dma_addr_t addr;
+ } buf_info[FBNIC_IPC_MBX_DESC_LEN];
+};
+
+// FW_VER_MAX_SIZE must match ETHTOOL_FWVERS_LEN
+#define FBNIC_FW_VER_MAX_SIZE 32
+// Formatted version is in the format XX.YY.ZZ_RRR_COMMIT
+#define FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE (FBNIC_FW_VER_MAX_SIZE - 13)
+#define FBNIC_FW_LOG_MAX_SIZE 256
+
+struct fbnic_fw_ver {
+ u32 version;
+ char commit[FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE];
+};
+
+struct fbnic_fw_cap {
+ struct {
+ struct fbnic_fw_ver mgmt, bootloader;
+ } running;
+ struct {
+ struct fbnic_fw_ver mgmt, bootloader, undi;
+ } stored;
+ u8 active_slot;
+ u8 bmc_mac_addr[4][ETH_ALEN];
+ u8 bmc_present : 1;
+ u8 all_multi : 1;
+ u8 link_speed;
+ u8 link_fec;
+};
+
+void fbnic_mbx_init(struct fbnic_dev *fbd);
+void fbnic_mbx_clean(struct fbnic_dev *fbd);
+void fbnic_mbx_poll(struct fbnic_dev *fbd);
+int fbnic_mbx_poll_tx_ready(struct fbnic_dev *fbd);
+void fbnic_mbx_flush_tx(struct fbnic_dev *fbd);
+int fbnic_fw_xmit_ownership_msg(struct fbnic_dev *fbd, bool take_ownership);
+int fbnic_fw_init_heartbeat(struct fbnic_dev *fbd, bool poll);
+void fbnic_fw_check_heartbeat(struct fbnic_dev *fbd);
+
+#define fbnic_mk_full_fw_ver_str(_rev_id, _delim, _commit, _str) \
+do { \
+ const u32 __rev_id = _rev_id; \
+ snprintf(_str, sizeof(_str), "%02lu.%02lu.%02lu-%03lu%s%s", \
+ FIELD_GET(FBNIC_FW_CAP_RESP_VERSION_MAJOR, __rev_id), \
+ FIELD_GET(FBNIC_FW_CAP_RESP_VERSION_MINOR, __rev_id), \
+ FIELD_GET(FBNIC_FW_CAP_RESP_VERSION_PATCH, __rev_id), \
+ FIELD_GET(FBNIC_FW_CAP_RESP_VERSION_BUILD, __rev_id), \
+ _delim, _commit); \
+} while (0)
+
+#define fbnic_mk_fw_ver_str(_rev_id, _str) \
+ fbnic_mk_full_fw_ver_str(_rev_id, "", "", _str)
+
+#define FW_HEARTBEAT_PERIOD (10 * HZ)
+
+enum {
+ FBNIC_TLV_MSG_ID_HOST_CAP_REQ = 0x10,
+ FBNIC_TLV_MSG_ID_FW_CAP_RESP = 0x11,
+ FBNIC_TLV_MSG_ID_OWNERSHIP_REQ = 0x12,
+ FBNIC_TLV_MSG_ID_OWNERSHIP_RESP = 0x13,
+ FBNIC_TLV_MSG_ID_HEARTBEAT_REQ = 0x14,
+ FBNIC_TLV_MSG_ID_HEARTBEAT_RESP = 0x15,
+};
+
+#define FBNIC_FW_CAP_RESP_VERSION_MAJOR CSR_GENMASK(31, 24)
+#define FBNIC_FW_CAP_RESP_VERSION_MINOR CSR_GENMASK(23, 16)
+#define FBNIC_FW_CAP_RESP_VERSION_PATCH CSR_GENMASK(15, 8)
+#define FBNIC_FW_CAP_RESP_VERSION_BUILD CSR_GENMASK(7, 0)
+enum {
+ FBNIC_FW_CAP_RESP_VERSION = 0x0,
+ FBNIC_FW_CAP_RESP_BMC_PRESENT = 0x1,
+ FBNIC_FW_CAP_RESP_BMC_MAC_ADDR = 0x2,
+ FBNIC_FW_CAP_RESP_BMC_MAC_ARRAY = 0x3,
+ FBNIC_FW_CAP_RESP_STORED_VERSION = 0x4,
+ FBNIC_FW_CAP_RESP_ACTIVE_FW_SLOT = 0x5,
+ FBNIC_FW_CAP_RESP_VERSION_COMMIT_STR = 0x6,
+ FBNIC_FW_CAP_RESP_BMC_ALL_MULTI = 0x8,
+ FBNIC_FW_CAP_RESP_FW_STATE = 0x9,
+ FBNIC_FW_CAP_RESP_FW_LINK_SPEED = 0xa,
+ FBNIC_FW_CAP_RESP_FW_LINK_FEC = 0xb,
+ FBNIC_FW_CAP_RESP_STORED_COMMIT_STR = 0xc,
+ FBNIC_FW_CAP_RESP_CMRT_VERSION = 0xd,
+ FBNIC_FW_CAP_RESP_STORED_CMRT_VERSION = 0xe,
+ FBNIC_FW_CAP_RESP_CMRT_COMMIT_STR = 0xf,
+ FBNIC_FW_CAP_RESP_STORED_CMRT_COMMIT_STR = 0x10,
+ FBNIC_FW_CAP_RESP_UEFI_VERSION = 0x11,
+ FBNIC_FW_CAP_RESP_UEFI_COMMIT_STR = 0x12,
+ FBNIC_FW_CAP_RESP_MSG_MAX
+};
+
+enum {
+ FBNIC_FW_LINK_SPEED_25R1 = 1,
+ FBNIC_FW_LINK_SPEED_50R2 = 2,
+ FBNIC_FW_LINK_SPEED_50R1 = 3,
+ FBNIC_FW_LINK_SPEED_100R2 = 4,
+};
+
+enum {
+ FBNIC_FW_LINK_FEC_NONE = 1,
+ FBNIC_FW_LINK_FEC_RS = 2,
+ FBNIC_FW_LINK_FEC_BASER = 3,
+};
+
+enum {
+ FBNIC_FW_OWNERSHIP_FLAG = 0x0,
+ FBNIC_FW_OWNERSHIP_MSG_MAX
+};
+#endif /* _FBNIC_FW_H_ */
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_irq.c b/drivers/net/ethernet/meta/fbnic/fbnic_irq.c
new file mode 100644
index 000000000000..914362195920
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_irq.c
@@ -0,0 +1,208 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#include <linux/pci.h>
+#include <linux/types.h>
+
+#include "fbnic.h"
+#include "fbnic_netdev.h"
+#include "fbnic_txrx.h"
+
+static irqreturn_t fbnic_fw_msix_intr(int __always_unused irq, void *data)
+{
+ struct fbnic_dev *fbd = (struct fbnic_dev *)data;
+
+ fbnic_mbx_poll(fbd);
+
+ fbnic_wr32(fbd, FBNIC_INTR_MASK_CLEAR(0), 1u << FBNIC_FW_MSIX_ENTRY);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * fbnic_fw_enable_mbx - Configure and initialize Firmware Mailbox
+ * @fbd: Pointer to device to initialize
+ *
+ * This function will initialize the firmware mailbox rings, enable the IRQ
+ * and initialize the communication between the Firmware and the host. The
+ * firmware is expected to respond to the initialization by sending an
+ * interrupt essentially notifying the host that it has seen the
+ * initialization and is now synced up.
+ *
+ * Return: non-zero on failure.
+ **/
+int fbnic_fw_enable_mbx(struct fbnic_dev *fbd)
+{
+ u32 vector = fbd->fw_msix_vector;
+ int err;
+
+ /* Request the IRQ for FW Mailbox vector. */
+ err = request_threaded_irq(vector, NULL, &fbnic_fw_msix_intr,
+ IRQF_ONESHOT, dev_name(fbd->dev), fbd);
+ if (err)
+ return err;
+
+ /* Initialize mailbox and attempt to poll it into ready state */
+ fbnic_mbx_init(fbd);
+ err = fbnic_mbx_poll_tx_ready(fbd);
+ if (err) {
+ dev_warn(fbd->dev, "FW mailbox did not enter ready state\n");
+ free_irq(vector, fbd);
+ return err;
+ }
+
+ /* Enable interrupts */
+ fbnic_wr32(fbd, FBNIC_INTR_MASK_CLEAR(0), 1u << FBNIC_FW_MSIX_ENTRY);
+
+ return 0;
+}
+
+/**
+ * fbnic_fw_disable_mbx - Disable mailbox and place it in standby state
+ * @fbd: Pointer to device to disable
+ *
+ * This function will disable the mailbox interrupt, free any messages still
+ * in the mailbox and place it into a standby state. The firmware is
+ * expected to see the update and assume that the host is in the reset state.
+ **/
+void fbnic_fw_disable_mbx(struct fbnic_dev *fbd)
+{
+ /* Disable interrupt and free vector */
+ fbnic_wr32(fbd, FBNIC_INTR_MASK_SET(0), 1u << FBNIC_FW_MSIX_ENTRY);
+
+ /* Free the vector */
+ free_irq(fbd->fw_msix_vector, fbd);
+
+ /* Make sure disabling logs message is sent, must be done here to
+ * avoid risk of completing without a running interrupt.
+ */
+ fbnic_mbx_flush_tx(fbd);
+
+ /* Reset the mailboxes to the initialized state */
+ fbnic_mbx_clean(fbd);
+}
+
+static irqreturn_t fbnic_pcs_msix_intr(int __always_unused irq, void *data)
+{
+ struct fbnic_dev *fbd = data;
+ struct fbnic_net *fbn;
+
+ if (fbd->mac->pcs_get_link_event(fbd) == FBNIC_LINK_EVENT_NONE) {
+ fbnic_wr32(fbd, FBNIC_INTR_MASK_CLEAR(0),
+ 1u << FBNIC_PCS_MSIX_ENTRY);
+ return IRQ_HANDLED;
+ }
+
+ fbn = netdev_priv(fbd->netdev);
+
+ phylink_pcs_change(&fbn->phylink_pcs, false);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * fbnic_pcs_irq_enable - Configure the MAC to enable it to advertise link
+ * @fbd: Pointer to device to initialize
+ *
+ * This function provides basic bringup for the MAC/PCS IRQ. For now the IRQ
+ * will remain disabled until we start the MAC/PCS/PHY logic via phylink.
+ *
+ * Return: non-zero on failure.
+ **/
+int fbnic_pcs_irq_enable(struct fbnic_dev *fbd)
+{
+ u32 vector = fbd->pcs_msix_vector;
+ int err;
+
+ /* Request the IRQ for MAC link vector.
+ * Map MAC cause to it, and unmask it
+ */
+ err = request_irq(vector, &fbnic_pcs_msix_intr, 0,
+ fbd->netdev->name, fbd);
+ if (err)
+ return err;
+
+ fbnic_wr32(fbd, FBNIC_INTR_MSIX_CTRL(FBNIC_INTR_MSIX_CTRL_PCS_IDX),
+ FBNIC_PCS_MSIX_ENTRY | FBNIC_INTR_MSIX_CTRL_ENABLE);
+
+ return 0;
+}
+
+/**
+ * fbnic_pcs_irq_disable - Teardown the MAC IRQ to prepare for stopping
+ * @fbd: Pointer to device that is stopping
+ *
+ * This function undoes the work done in fbnic_pcs_irq_enable and prepares
+ * the device to no longer receive traffic on the host interface.
+ **/
+void fbnic_pcs_irq_disable(struct fbnic_dev *fbd)
+{
+ /* Disable interrupt */
+ fbnic_wr32(fbd, FBNIC_INTR_MSIX_CTRL(FBNIC_INTR_MSIX_CTRL_PCS_IDX),
+ FBNIC_PCS_MSIX_ENTRY);
+ fbnic_wr32(fbd, FBNIC_INTR_MASK_SET(0), 1u << FBNIC_PCS_MSIX_ENTRY);
+
+ /* Free the vector */
+ free_irq(fbd->pcs_msix_vector, fbd);
+}
+
+int fbnic_request_irq(struct fbnic_dev *fbd, int nr, irq_handler_t handler,
+ unsigned long flags, const char *name, void *data)
+{
+ struct pci_dev *pdev = to_pci_dev(fbd->dev);
+ int irq = pci_irq_vector(pdev, nr);
+
+ if (irq < 0)
+ return irq;
+
+ return request_irq(irq, handler, flags, name, data);
+}
+
+void fbnic_free_irq(struct fbnic_dev *fbd, int nr, void *data)
+{
+ struct pci_dev *pdev = to_pci_dev(fbd->dev);
+ int irq = pci_irq_vector(pdev, nr);
+
+ if (irq < 0)
+ return;
+
+ free_irq(irq, data);
+}
+
+void fbnic_free_irqs(struct fbnic_dev *fbd)
+{
+ struct pci_dev *pdev = to_pci_dev(fbd->dev);
+
+ fbd->pcs_msix_vector = 0;
+ fbd->fw_msix_vector = 0;
+
+ fbd->num_irqs = 0;
+
+ pci_free_irq_vectors(pdev);
+}
+
+int fbnic_alloc_irqs(struct fbnic_dev *fbd)
+{
+ unsigned int wanted_irqs = FBNIC_NON_NAPI_VECTORS;
+ struct pci_dev *pdev = to_pci_dev(fbd->dev);
+ int num_irqs;
+
+ wanted_irqs += min_t(unsigned int, num_online_cpus(), FBNIC_MAX_RXQS);
+ num_irqs = pci_alloc_irq_vectors(pdev, FBNIC_NON_NAPI_VECTORS + 1,
+ wanted_irqs, PCI_IRQ_MSIX);
+ if (num_irqs < 0) {
+ dev_err(fbd->dev, "Failed to allocate MSI-X entries\n");
+ return num_irqs;
+ }
+
+ if (num_irqs < wanted_irqs)
+ dev_warn(fbd->dev, "Allocated %d IRQs, expected %d\n",
+ num_irqs, wanted_irqs);
+
+ fbd->num_irqs = num_irqs;
+
+ fbd->pcs_msix_vector = pci_irq_vector(pdev, FBNIC_PCS_MSIX_ENTRY);
+ fbd->fw_msix_vector = pci_irq_vector(pdev, FBNIC_FW_MSIX_ENTRY);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_mac.c b/drivers/net/ethernet/meta/fbnic/fbnic_mac.c
new file mode 100644
index 000000000000..7920e7af82d9
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_mac.c
@@ -0,0 +1,666 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#include <linux/bitfield.h>
+#include <net/tcp.h>
+
+#include "fbnic.h"
+#include "fbnic_mac.h"
+#include "fbnic_netdev.h"
+
+static void fbnic_init_readrq(struct fbnic_dev *fbd, unsigned int offset,
+ unsigned int cls, unsigned int readrq)
+{
+ u32 val = rd32(fbd, offset);
+
+ /* The TDF_CTL masks are a superset of the RNI_RBP ones. So we can
+ * use them when setting either the TDE_CTF or RNI_RBP registers.
+ */
+ val &= FBNIC_QM_TNI_TDF_CTL_MAX_OT | FBNIC_QM_TNI_TDF_CTL_MAX_OB;
+
+ val |= FIELD_PREP(FBNIC_QM_TNI_TDF_CTL_MRRS, readrq) |
+ FIELD_PREP(FBNIC_QM_TNI_TDF_CTL_CLS, cls);
+
+ wr32(fbd, offset, val);
+}
+
+static void fbnic_init_mps(struct fbnic_dev *fbd, unsigned int offset,
+ unsigned int cls, unsigned int mps)
+{
+ u32 val = rd32(fbd, offset);
+
+ /* Currently all MPS masks are identical so just use the first one */
+ val &= ~(FBNIC_QM_TNI_TCM_CTL_MPS | FBNIC_QM_TNI_TCM_CTL_CLS);
+
+ val |= FIELD_PREP(FBNIC_QM_TNI_TCM_CTL_MPS, mps) |
+ FIELD_PREP(FBNIC_QM_TNI_TCM_CTL_CLS, cls);
+
+ wr32(fbd, offset, val);
+}
+
+static void fbnic_mac_init_axi(struct fbnic_dev *fbd)
+{
+ bool override_1k = false;
+ int readrq, mps, cls;
+
+ /* All of the values are based on being a power of 2 starting
+ * with 64 == 0. Therefore we can either divide by 64 in the
+ * case of constants, or just subtract 6 from the log2 of the value
+ * in order to get the value we will be programming into the
+ * registers.
+ */
+ readrq = ilog2(fbd->readrq) - 6;
+ if (readrq > 3)
+ override_1k = true;
+ readrq = clamp(readrq, 0, 3);
+
+ mps = ilog2(fbd->mps) - 6;
+ mps = clamp(mps, 0, 3);
+
+ cls = ilog2(L1_CACHE_BYTES) - 6;
+ cls = clamp(cls, 0, 3);
+
+ /* Configure Tx/Rx AXI Paths w/ Read Request and Max Payload sizes */
+ fbnic_init_readrq(fbd, FBNIC_QM_TNI_TDF_CTL, cls, readrq);
+ fbnic_init_mps(fbd, FBNIC_QM_TNI_TCM_CTL, cls, mps);
+
+ /* Configure QM TNI TDE:
+ * - Max outstanding AXI beats to 704(768 - 64) - guaranetees 8% of
+ * buffer capacity to descriptors.
+ * - Max outstanding transactions to 128
+ */
+ wr32(fbd, FBNIC_QM_TNI_TDE_CTL,
+ FIELD_PREP(FBNIC_QM_TNI_TDE_CTL_MRRS_1K, override_1k ? 1 : 0) |
+ FIELD_PREP(FBNIC_QM_TNI_TDE_CTL_MAX_OB, 704) |
+ FIELD_PREP(FBNIC_QM_TNI_TDE_CTL_MAX_OT, 128) |
+ FIELD_PREP(FBNIC_QM_TNI_TDE_CTL_MRRS, readrq) |
+ FIELD_PREP(FBNIC_QM_TNI_TDE_CTL_CLS, cls));
+
+ fbnic_init_readrq(fbd, FBNIC_QM_RNI_RBP_CTL, cls, readrq);
+ fbnic_init_mps(fbd, FBNIC_QM_RNI_RDE_CTL, cls, mps);
+ fbnic_init_mps(fbd, FBNIC_QM_RNI_RCM_CTL, cls, mps);
+
+ /* Enable XALI AR/AW outbound */
+ wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AW_CFG,
+ FBNIC_PUL_OB_TLP_HDR_AW_CFG_BME);
+ wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AR_CFG,
+ FBNIC_PUL_OB_TLP_HDR_AR_CFG_BME);
+}
+
+static void fbnic_mac_init_qm(struct fbnic_dev *fbd)
+{
+ u32 clock_freq;
+
+ /* Configure TSO behavior */
+ wr32(fbd, FBNIC_QM_TQS_CTL0,
+ FIELD_PREP(FBNIC_QM_TQS_CTL0_LSO_TS_MASK,
+ FBNIC_QM_TQS_CTL0_LSO_TS_LAST) |
+ FIELD_PREP(FBNIC_QM_TQS_CTL0_PREFETCH_THRESH,
+ FBNIC_QM_TQS_CTL0_PREFETCH_THRESH_MIN));
+
+ /* Limit EDT to INT_MAX as this is the limit of the EDT Qdisc */
+ wr32(fbd, FBNIC_QM_TQS_EDT_TS_RANGE, INT_MAX);
+
+ /* Configure MTU
+ * Due to known HW issue we cannot set the MTU to within 16 octets
+ * of a 64 octet aligned boundary. So we will set the TQS_MTU(s) to
+ * MTU + 1.
+ */
+ wr32(fbd, FBNIC_QM_TQS_MTU_CTL0, FBNIC_MAX_JUMBO_FRAME_SIZE + 1);
+ wr32(fbd, FBNIC_QM_TQS_MTU_CTL1,
+ FIELD_PREP(FBNIC_QM_TQS_MTU_CTL1_BULK,
+ FBNIC_MAX_JUMBO_FRAME_SIZE + 1));
+
+ clock_freq = FBNIC_CLOCK_FREQ;
+
+ /* Be aggressive on the timings. We will have the interrupt
+ * threshold timer tick once every 1 usec and coalesce writes for
+ * up to 80 usecs.
+ */
+ wr32(fbd, FBNIC_QM_TCQ_CTL0,
+ FIELD_PREP(FBNIC_QM_TCQ_CTL0_TICK_CYCLES,
+ clock_freq / 1000000) |
+ FIELD_PREP(FBNIC_QM_TCQ_CTL0_COAL_WAIT,
+ clock_freq / 12500));
+
+ /* We will have the interrupt threshold timer tick once every
+ * 1 usec and coalesce writes for up to 2 usecs.
+ */
+ wr32(fbd, FBNIC_QM_RCQ_CTL0,
+ FIELD_PREP(FBNIC_QM_RCQ_CTL0_TICK_CYCLES,
+ clock_freq / 1000000) |
+ FIELD_PREP(FBNIC_QM_RCQ_CTL0_COAL_WAIT,
+ clock_freq / 500000));
+
+ /* Configure spacer control to 64 beats. */
+ wr32(fbd, FBNIC_FAB_AXI4_AR_SPACER_2_CFG,
+ FBNIC_FAB_AXI4_AR_SPACER_MASK |
+ FIELD_PREP(FBNIC_FAB_AXI4_AR_SPACER_THREADSHOLD, 2));
+}
+
+#define FBNIC_DROP_EN_MASK 0x7d
+#define FBNIC_PAUSE_EN_MASK 0x14
+#define FBNIC_ECN_EN_MASK 0x10
+
+struct fbnic_fifo_config {
+ unsigned int addr;
+ unsigned int size;
+};
+
+/* Rx FIFO Configuration
+ * The table consists of 8 entries, of which only 4 are currently used
+ * The starting addr is in units of 64B and the size is in 2KB units
+ * Below is the human readable version of the table defined below:
+ * Function Addr Size
+ * ----------------------------------
+ * Network to Host/BMC 384K 64K
+ * Unused
+ * Unused
+ * Network to BMC 448K 32K
+ * Network to Host 0 384K
+ * Unused
+ * BMC to Host 480K 32K
+ * Unused
+ */
+static const struct fbnic_fifo_config fifo_config[] = {
+ { .addr = 0x1800, .size = 0x20 }, /* Network to Host/BMC */
+ { }, /* Unused */
+ { }, /* Unused */
+ { .addr = 0x1c00, .size = 0x10 }, /* Network to BMC */
+ { .addr = 0x0000, .size = 0xc0 }, /* Network to Host */
+ { }, /* Unused */
+ { .addr = 0x1e00, .size = 0x10 }, /* BMC to Host */
+ { } /* Unused */
+};
+
+static void fbnic_mac_init_rxb(struct fbnic_dev *fbd)
+{
+ bool rx_enable;
+ int i;
+
+ rx_enable = !!(rd32(fbd, FBNIC_RPC_RMI_CONFIG) &
+ FBNIC_RPC_RMI_CONFIG_ENABLE);
+
+ for (i = 0; i < 8; i++) {
+ unsigned int size = fifo_config[i].size;
+
+ /* If we are coming up on a system that already has the
+ * Rx data path enabled we don't need to reconfigure the
+ * FIFOs. Instead we can check to verify the values are
+ * large enough to meet our needs, and use the values to
+ * populate the flow control, ECN, and drop thresholds.
+ */
+ if (rx_enable) {
+ size = FIELD_GET(FBNIC_RXB_PBUF_SIZE,
+ rd32(fbd, FBNIC_RXB_PBUF_CFG(i)));
+ if (size < fifo_config[i].size)
+ dev_warn(fbd->dev,
+ "fifo%d size of %d smaller than expected value of %d\n",
+ i, size << 11,
+ fifo_config[i].size << 11);
+ } else {
+ /* Program RXB Cuthrough */
+ wr32(fbd, FBNIC_RXB_CT_SIZE(i),
+ FIELD_PREP(FBNIC_RXB_CT_SIZE_HEADER, 4) |
+ FIELD_PREP(FBNIC_RXB_CT_SIZE_PAYLOAD, 2));
+
+ /* The granularity for the packet buffer size is 2KB
+ * granularity while the packet buffer base address is
+ * only 64B granularity
+ */
+ wr32(fbd, FBNIC_RXB_PBUF_CFG(i),
+ FIELD_PREP(FBNIC_RXB_PBUF_BASE_ADDR,
+ fifo_config[i].addr) |
+ FIELD_PREP(FBNIC_RXB_PBUF_SIZE, size));
+
+ /* The granularity for the credits is 64B. This is
+ * based on RXB_PBUF_SIZE * 32 + 4.
+ */
+ wr32(fbd, FBNIC_RXB_PBUF_CREDIT(i),
+ FIELD_PREP(FBNIC_RXB_PBUF_CREDIT_MASK,
+ size ? size * 32 + 4 : 0));
+ }
+
+ if (!size)
+ continue;
+
+ /* Pause is size of FIFO with 56KB skid to start/stop */
+ wr32(fbd, FBNIC_RXB_PAUSE_THLD(i),
+ !(FBNIC_PAUSE_EN_MASK & (1u << i)) ? 0x1fff :
+ FIELD_PREP(FBNIC_RXB_PAUSE_THLD_ON,
+ size * 32 - 0x380) |
+ FIELD_PREP(FBNIC_RXB_PAUSE_THLD_OFF, 0x380));
+
+ /* Enable Drop when only one packet is left in the FIFO */
+ wr32(fbd, FBNIC_RXB_DROP_THLD(i),
+ !(FBNIC_DROP_EN_MASK & (1u << i)) ? 0x1fff :
+ FIELD_PREP(FBNIC_RXB_DROP_THLD_ON,
+ size * 32 -
+ FBNIC_MAX_JUMBO_FRAME_SIZE / 64) |
+ FIELD_PREP(FBNIC_RXB_DROP_THLD_OFF,
+ size * 32 -
+ FBNIC_MAX_JUMBO_FRAME_SIZE / 64));
+
+ /* Enable ECN bit when 1/4 of RXB is filled with at least
+ * 1 room for one full jumbo frame before setting ECN
+ */
+ wr32(fbd, FBNIC_RXB_ECN_THLD(i),
+ !(FBNIC_ECN_EN_MASK & (1u << i)) ? 0x1fff :
+ FIELD_PREP(FBNIC_RXB_ECN_THLD_ON,
+ max_t(unsigned int,
+ size * 32 / 4,
+ FBNIC_MAX_JUMBO_FRAME_SIZE / 64)) |
+ FIELD_PREP(FBNIC_RXB_ECN_THLD_OFF,
+ max_t(unsigned int,
+ size * 32 / 4,
+ FBNIC_MAX_JUMBO_FRAME_SIZE / 64)));
+ }
+
+ /* For now only enable drop and ECN. We need to add driver/kernel
+ * interfaces for configuring pause.
+ */
+ wr32(fbd, FBNIC_RXB_PAUSE_DROP_CTRL,
+ FIELD_PREP(FBNIC_RXB_PAUSE_DROP_CTRL_DROP_ENABLE,
+ FBNIC_DROP_EN_MASK) |
+ FIELD_PREP(FBNIC_RXB_PAUSE_DROP_CTRL_ECN_ENABLE,
+ FBNIC_ECN_EN_MASK));
+
+ /* Program INTF credits */
+ wr32(fbd, FBNIC_RXB_INTF_CREDIT,
+ FBNIC_RXB_INTF_CREDIT_MASK0 |
+ FBNIC_RXB_INTF_CREDIT_MASK1 |
+ FBNIC_RXB_INTF_CREDIT_MASK2 |
+ FIELD_PREP(FBNIC_RXB_INTF_CREDIT_MASK3, 8));
+
+ /* Configure calendar slots.
+ * Rx: 0 - 62 RDE 1st, BMC 2nd
+ * 63 BMC 1st, RDE 2nd
+ */
+ for (i = 0; i < 16; i++) {
+ u32 calendar_val = (i == 15) ? 0x1e1b1b1b : 0x1b1b1b1b;
+
+ wr32(fbd, FBNIC_RXB_CLDR_PRIO_CFG(i), calendar_val);
+ }
+
+ /* Split the credits for the DRR up as follows:
+ * Quantum0: 8000 Network to Host
+ * Quantum1: 0 Not used
+ * Quantum2: 80 BMC to Host
+ * Quantum3: 0 Not used
+ * Quantum4: 8000 Multicast to Host and BMC
+ */
+ wr32(fbd, FBNIC_RXB_DWRR_RDE_WEIGHT0,
+ FIELD_PREP(FBNIC_RXB_DWRR_RDE_WEIGHT0_QUANTUM0, 0x40) |
+ FIELD_PREP(FBNIC_RXB_DWRR_RDE_WEIGHT0_QUANTUM2, 0x50));
+ wr32(fbd, FBNIC_RXB_DWRR_RDE_WEIGHT0_EXT,
+ FIELD_PREP(FBNIC_RXB_DWRR_RDE_WEIGHT0_QUANTUM0, 0x1f));
+ wr32(fbd, FBNIC_RXB_DWRR_RDE_WEIGHT1,
+ FIELD_PREP(FBNIC_RXB_DWRR_RDE_WEIGHT1_QUANTUM4, 0x40));
+ wr32(fbd, FBNIC_RXB_DWRR_RDE_WEIGHT1_EXT,
+ FIELD_PREP(FBNIC_RXB_DWRR_RDE_WEIGHT1_QUANTUM4, 0x1f));
+
+ /* Program RXB FCS Endian register */
+ wr32(fbd, FBNIC_RXB_ENDIAN_FCS, 0x0aaaaaa0);
+}
+
+static void fbnic_mac_init_txb(struct fbnic_dev *fbd)
+{
+ int i;
+
+ wr32(fbd, FBNIC_TCE_TXB_CTRL, 0);
+
+ /* Configure Tx QM Credits */
+ wr32(fbd, FBNIC_QM_TQS_CTL1,
+ FIELD_PREP(FBNIC_QM_TQS_CTL1_MC_MAX_CREDITS, 0x40) |
+ FIELD_PREP(FBNIC_QM_TQS_CTL1_BULK_MAX_CREDITS, 0x20));
+
+ /* Initialize internal Tx queues */
+ wr32(fbd, FBNIC_TCE_TXB_TEI_Q0_CTRL, 0);
+ wr32(fbd, FBNIC_TCE_TXB_TEI_Q1_CTRL, 0);
+ wr32(fbd, FBNIC_TCE_TXB_MC_Q_CTRL,
+ FIELD_PREP(FBNIC_TCE_TXB_Q_CTRL_SIZE, 0x400) |
+ FIELD_PREP(FBNIC_TCE_TXB_Q_CTRL_START, 0x000));
+ wr32(fbd, FBNIC_TCE_TXB_RX_TEI_Q_CTRL, 0);
+ wr32(fbd, FBNIC_TCE_TXB_TX_BMC_Q_CTRL,
+ FIELD_PREP(FBNIC_TCE_TXB_Q_CTRL_SIZE, 0x200) |
+ FIELD_PREP(FBNIC_TCE_TXB_Q_CTRL_START, 0x400));
+ wr32(fbd, FBNIC_TCE_TXB_RX_BMC_Q_CTRL,
+ FIELD_PREP(FBNIC_TCE_TXB_Q_CTRL_SIZE, 0x200) |
+ FIELD_PREP(FBNIC_TCE_TXB_Q_CTRL_START, 0x600));
+
+ wr32(fbd, FBNIC_TCE_LSO_CTRL,
+ FBNIC_TCE_LSO_CTRL_IPID_MODE_INC |
+ FIELD_PREP(FBNIC_TCE_LSO_CTRL_TCPF_CLR_1ST, TCPHDR_PSH |
+ TCPHDR_FIN) |
+ FIELD_PREP(FBNIC_TCE_LSO_CTRL_TCPF_CLR_MID, TCPHDR_PSH |
+ TCPHDR_CWR |
+ TCPHDR_FIN) |
+ FIELD_PREP(FBNIC_TCE_LSO_CTRL_TCPF_CLR_END, TCPHDR_CWR));
+ wr32(fbd, FBNIC_TCE_CSO_CTRL, 0);
+
+ wr32(fbd, FBNIC_TCE_BMC_MAX_PKTSZ,
+ FIELD_PREP(FBNIC_TCE_BMC_MAX_PKTSZ_TX,
+ FBNIC_MAX_JUMBO_FRAME_SIZE) |
+ FIELD_PREP(FBNIC_TCE_BMC_MAX_PKTSZ_RX,
+ FBNIC_MAX_JUMBO_FRAME_SIZE));
+ wr32(fbd, FBNIC_TCE_MC_MAX_PKTSZ,
+ FIELD_PREP(FBNIC_TCE_MC_MAX_PKTSZ_TMI,
+ FBNIC_MAX_JUMBO_FRAME_SIZE));
+
+ /* Configure calendar slots.
+ * Tx: 0 - 62 TMI 1st, BMC 2nd
+ * 63 BMC 1st, TMI 2nd
+ */
+ for (i = 0; i < 16; i++) {
+ u32 calendar_val = (i == 15) ? 0x1e1b1b1b : 0x1b1b1b1b;
+
+ wr32(fbd, FBNIC_TCE_TXB_CLDR_SLOT_CFG(i), calendar_val);
+ }
+
+ /* Configure DWRR */
+ wr32(fbd, FBNIC_TCE_TXB_ENQ_WRR_CTRL,
+ FIELD_PREP(FBNIC_TCE_TXB_ENQ_WRR_CTRL_WEIGHT0, 0x64) |
+ FIELD_PREP(FBNIC_TCE_TXB_ENQ_WRR_CTRL_WEIGHT2, 0x04));
+ wr32(fbd, FBNIC_TCE_TXB_TEI_DWRR_CTRL, 0);
+ wr32(fbd, FBNIC_TCE_TXB_TEI_DWRR_CTRL_EXT, 0);
+ wr32(fbd, FBNIC_TCE_TXB_BMC_DWRR_CTRL,
+ FIELD_PREP(FBNIC_TCE_TXB_BMC_DWRR_CTRL_QUANTUM0, 0x50) |
+ FIELD_PREP(FBNIC_TCE_TXB_BMC_DWRR_CTRL_QUANTUM1, 0x82));
+ wr32(fbd, FBNIC_TCE_TXB_BMC_DWRR_CTRL_EXT, 0);
+ wr32(fbd, FBNIC_TCE_TXB_NTWRK_DWRR_CTRL,
+ FIELD_PREP(FBNIC_TCE_TXB_NTWRK_DWRR_CTRL_QUANTUM1, 0x50) |
+ FIELD_PREP(FBNIC_TCE_TXB_NTWRK_DWRR_CTRL_QUANTUM2, 0x20));
+ wr32(fbd, FBNIC_TCE_TXB_NTWRK_DWRR_CTRL_EXT,
+ FIELD_PREP(FBNIC_TCE_TXB_NTWRK_DWRR_CTRL_QUANTUM2, 0x03));
+
+ /* Configure SOP protocol protection */
+ wr32(fbd, FBNIC_TCE_SOP_PROT_CTRL,
+ FIELD_PREP(FBNIC_TCE_SOP_PROT_CTRL_TBI, 0x78) |
+ FIELD_PREP(FBNIC_TCE_SOP_PROT_CTRL_TTI_FRM, 0x40) |
+ FIELD_PREP(FBNIC_TCE_SOP_PROT_CTRL_TTI_CM, 0x0c));
+
+ /* Conservative configuration on MAC interface Start of Packet
+ * protection FIFO. This sets the minimum depth of the FIFO before
+ * we start sending packets to the MAC measured in 64B units and
+ * up to 160 entries deep.
+ *
+ * For the ASIC the clock is fast enough that we will likely fill
+ * the SOP FIFO before the MAC can drain it. So just use a minimum
+ * value of 8.
+ */
+ wr32(fbd, FBNIC_TMI_SOP_PROT_CTRL, 8);
+
+ wrfl(fbd);
+ wr32(fbd, FBNIC_TCE_TXB_CTRL, FBNIC_TCE_TXB_CTRL_TCAM_ENABLE |
+ FBNIC_TCE_TXB_CTRL_LOAD);
+}
+
+static void fbnic_mac_init_regs(struct fbnic_dev *fbd)
+{
+ fbnic_mac_init_axi(fbd);
+ fbnic_mac_init_qm(fbd);
+ fbnic_mac_init_rxb(fbd);
+ fbnic_mac_init_txb(fbd);
+}
+
+static void fbnic_mac_tx_pause_config(struct fbnic_dev *fbd, bool tx_pause)
+{
+ u32 rxb_pause_ctrl;
+
+ /* Enable generation of pause frames if enabled */
+ rxb_pause_ctrl = rd32(fbd, FBNIC_RXB_PAUSE_DROP_CTRL);
+ rxb_pause_ctrl &= ~FBNIC_RXB_PAUSE_DROP_CTRL_PAUSE_ENABLE;
+ if (tx_pause)
+ rxb_pause_ctrl |=
+ FIELD_PREP(FBNIC_RXB_PAUSE_DROP_CTRL_PAUSE_ENABLE,
+ FBNIC_PAUSE_EN_MASK);
+ wr32(fbd, FBNIC_RXB_PAUSE_DROP_CTRL, rxb_pause_ctrl);
+}
+
+static int fbnic_pcs_get_link_event_asic(struct fbnic_dev *fbd)
+{
+ u32 pcs_intr_mask = rd32(fbd, FBNIC_SIG_PCS_INTR_STS);
+
+ if (pcs_intr_mask & FBNIC_SIG_PCS_INTR_LINK_DOWN)
+ return FBNIC_LINK_EVENT_DOWN;
+
+ return (pcs_intr_mask & FBNIC_SIG_PCS_INTR_LINK_UP) ?
+ FBNIC_LINK_EVENT_UP : FBNIC_LINK_EVENT_NONE;
+}
+
+static u32 __fbnic_mac_cmd_config_asic(struct fbnic_dev *fbd,
+ bool tx_pause, bool rx_pause)
+{
+ /* Enable MAC Promiscuous mode and Tx padding */
+ u32 command_config = FBNIC_MAC_COMMAND_CONFIG_TX_PAD_EN |
+ FBNIC_MAC_COMMAND_CONFIG_PROMISC_EN;
+ struct fbnic_net *fbn = netdev_priv(fbd->netdev);
+
+ /* Disable pause frames if not enabled */
+ if (!tx_pause)
+ command_config |= FBNIC_MAC_COMMAND_CONFIG_TX_PAUSE_DIS;
+ if (!rx_pause)
+ command_config |= FBNIC_MAC_COMMAND_CONFIG_RX_PAUSE_DIS;
+
+ /* Disable fault handling if no FEC is requested */
+ if ((fbn->fec & FBNIC_FEC_MODE_MASK) == FBNIC_FEC_OFF)
+ command_config |= FBNIC_MAC_COMMAND_CONFIG_FLT_HDL_DIS;
+
+ return command_config;
+}
+
+static bool fbnic_mac_get_pcs_link_status(struct fbnic_dev *fbd)
+{
+ struct fbnic_net *fbn = netdev_priv(fbd->netdev);
+ u32 pcs_status, lane_mask = ~0;
+
+ pcs_status = rd32(fbd, FBNIC_SIG_PCS_OUT0);
+ if (!(pcs_status & FBNIC_SIG_PCS_OUT0_LINK))
+ return false;
+
+ /* Define the expected lane mask for the status bits we need to check */
+ switch (fbn->link_mode & FBNIC_LINK_MODE_MASK) {
+ case FBNIC_LINK_100R2:
+ lane_mask = 0xf;
+ break;
+ case FBNIC_LINK_50R1:
+ lane_mask = 3;
+ break;
+ case FBNIC_LINK_50R2:
+ switch (fbn->fec & FBNIC_FEC_MODE_MASK) {
+ case FBNIC_FEC_OFF:
+ lane_mask = 0x63;
+ break;
+ case FBNIC_FEC_RS:
+ lane_mask = 5;
+ break;
+ case FBNIC_FEC_BASER:
+ lane_mask = 0xf;
+ break;
+ }
+ break;
+ case FBNIC_LINK_25R1:
+ lane_mask = 1;
+ break;
+ }
+
+ /* Use an XOR to remove the bits we expect to see set */
+ switch (fbn->fec & FBNIC_FEC_MODE_MASK) {
+ case FBNIC_FEC_OFF:
+ lane_mask ^= FIELD_GET(FBNIC_SIG_PCS_OUT0_BLOCK_LOCK,
+ pcs_status);
+ break;
+ case FBNIC_FEC_RS:
+ lane_mask ^= FIELD_GET(FBNIC_SIG_PCS_OUT0_AMPS_LOCK,
+ pcs_status);
+ break;
+ case FBNIC_FEC_BASER:
+ lane_mask ^= FIELD_GET(FBNIC_SIG_PCS_OUT1_FCFEC_LOCK,
+ rd32(fbd, FBNIC_SIG_PCS_OUT1));
+ break;
+ }
+
+ /* If all lanes cancelled then we have a lock on all lanes */
+ return !lane_mask;
+}
+
+static bool fbnic_pcs_get_link_asic(struct fbnic_dev *fbd)
+{
+ bool link;
+
+ /* Flush status bits to clear possible stale data,
+ * bits should reset themselves back to 1 if link is truly up
+ */
+ wr32(fbd, FBNIC_SIG_PCS_OUT0, FBNIC_SIG_PCS_OUT0_LINK |
+ FBNIC_SIG_PCS_OUT0_BLOCK_LOCK |
+ FBNIC_SIG_PCS_OUT0_AMPS_LOCK);
+ wr32(fbd, FBNIC_SIG_PCS_OUT1, FBNIC_SIG_PCS_OUT1_FCFEC_LOCK);
+ wrfl(fbd);
+
+ /* Clear interrupt state due to recent changes. */
+ wr32(fbd, FBNIC_SIG_PCS_INTR_STS,
+ FBNIC_SIG_PCS_INTR_LINK_DOWN | FBNIC_SIG_PCS_INTR_LINK_UP);
+
+ link = fbnic_mac_get_pcs_link_status(fbd);
+
+ /* Enable interrupt to only capture changes in link state */
+ wr32(fbd, FBNIC_SIG_PCS_INTR_MASK,
+ ~FBNIC_SIG_PCS_INTR_LINK_DOWN & ~FBNIC_SIG_PCS_INTR_LINK_UP);
+ wr32(fbd, FBNIC_INTR_MASK_CLEAR(0), 1u << FBNIC_PCS_MSIX_ENTRY);
+
+ return link;
+}
+
+static void fbnic_pcs_get_fw_settings(struct fbnic_dev *fbd)
+{
+ struct fbnic_net *fbn = netdev_priv(fbd->netdev);
+ u8 link_mode = fbn->link_mode;
+ u8 fec = fbn->fec;
+
+ /* Update FEC first to reflect FW current mode */
+ if (fbn->fec & FBNIC_FEC_AUTO) {
+ switch (fbd->fw_cap.link_fec) {
+ case FBNIC_FW_LINK_FEC_NONE:
+ fec = FBNIC_FEC_OFF;
+ break;
+ case FBNIC_FW_LINK_FEC_RS:
+ fec = FBNIC_FEC_RS;
+ break;
+ case FBNIC_FW_LINK_FEC_BASER:
+ fec = FBNIC_FEC_BASER;
+ break;
+ default:
+ return;
+ }
+
+ fbn->fec = fec;
+ }
+
+ /* Do nothing if AUTO mode is not engaged */
+ if (fbn->link_mode & FBNIC_LINK_AUTO) {
+ switch (fbd->fw_cap.link_speed) {
+ case FBNIC_FW_LINK_SPEED_25R1:
+ link_mode = FBNIC_LINK_25R1;
+ break;
+ case FBNIC_FW_LINK_SPEED_50R2:
+ link_mode = FBNIC_LINK_50R2;
+ break;
+ case FBNIC_FW_LINK_SPEED_50R1:
+ link_mode = FBNIC_LINK_50R1;
+ fec = FBNIC_FEC_RS;
+ break;
+ case FBNIC_FW_LINK_SPEED_100R2:
+ link_mode = FBNIC_LINK_100R2;
+ fec = FBNIC_FEC_RS;
+ break;
+ default:
+ return;
+ }
+
+ fbn->link_mode = link_mode;
+ }
+}
+
+static int fbnic_pcs_enable_asic(struct fbnic_dev *fbd)
+{
+ /* Mask and clear the PCS interrupt, will be enabled by link handler */
+ wr32(fbd, FBNIC_SIG_PCS_INTR_MASK, ~0);
+ wr32(fbd, FBNIC_SIG_PCS_INTR_STS, ~0);
+
+ /* Pull in settings from FW */
+ fbnic_pcs_get_fw_settings(fbd);
+
+ return 0;
+}
+
+static void fbnic_pcs_disable_asic(struct fbnic_dev *fbd)
+{
+ /* Mask and clear the PCS interrupt */
+ wr32(fbd, FBNIC_SIG_PCS_INTR_MASK, ~0);
+ wr32(fbd, FBNIC_SIG_PCS_INTR_STS, ~0);
+}
+
+static void fbnic_mac_link_down_asic(struct fbnic_dev *fbd)
+{
+ u32 cmd_cfg, mac_ctrl;
+
+ cmd_cfg = __fbnic_mac_cmd_config_asic(fbd, false, false);
+ mac_ctrl = rd32(fbd, FBNIC_SIG_MAC_IN0);
+
+ mac_ctrl |= FBNIC_SIG_MAC_IN0_RESET_FF_TX_CLK |
+ FBNIC_SIG_MAC_IN0_RESET_TX_CLK |
+ FBNIC_SIG_MAC_IN0_RESET_FF_RX_CLK |
+ FBNIC_SIG_MAC_IN0_RESET_RX_CLK;
+
+ wr32(fbd, FBNIC_SIG_MAC_IN0, mac_ctrl);
+ wr32(fbd, FBNIC_MAC_COMMAND_CONFIG, cmd_cfg);
+}
+
+static void fbnic_mac_link_up_asic(struct fbnic_dev *fbd,
+ bool tx_pause, bool rx_pause)
+{
+ u32 cmd_cfg, mac_ctrl;
+
+ fbnic_mac_tx_pause_config(fbd, tx_pause);
+
+ cmd_cfg = __fbnic_mac_cmd_config_asic(fbd, tx_pause, rx_pause);
+ mac_ctrl = rd32(fbd, FBNIC_SIG_MAC_IN0);
+
+ mac_ctrl &= ~(FBNIC_SIG_MAC_IN0_RESET_FF_TX_CLK |
+ FBNIC_SIG_MAC_IN0_RESET_TX_CLK |
+ FBNIC_SIG_MAC_IN0_RESET_FF_RX_CLK |
+ FBNIC_SIG_MAC_IN0_RESET_RX_CLK);
+ cmd_cfg |= FBNIC_MAC_COMMAND_CONFIG_RX_ENA |
+ FBNIC_MAC_COMMAND_CONFIG_TX_ENA;
+
+ wr32(fbd, FBNIC_SIG_MAC_IN0, mac_ctrl);
+ wr32(fbd, FBNIC_MAC_COMMAND_CONFIG, cmd_cfg);
+}
+
+static const struct fbnic_mac fbnic_mac_asic = {
+ .init_regs = fbnic_mac_init_regs,
+ .pcs_enable = fbnic_pcs_enable_asic,
+ .pcs_disable = fbnic_pcs_disable_asic,
+ .pcs_get_link = fbnic_pcs_get_link_asic,
+ .pcs_get_link_event = fbnic_pcs_get_link_event_asic,
+ .link_down = fbnic_mac_link_down_asic,
+ .link_up = fbnic_mac_link_up_asic,
+};
+
+/**
+ * fbnic_mac_init - Assign a MAC type and initialize the fbnic device
+ * @fbd: Device pointer to device to initialize
+ *
+ * Return: zero on success, negative on failure
+ *
+ * Initialize the MAC function pointers and initializes the MAC of
+ * the device.
+ **/
+int fbnic_mac_init(struct fbnic_dev *fbd)
+{
+ fbd->mac = &fbnic_mac_asic;
+
+ fbd->mac->init_regs(fbd);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_mac.h b/drivers/net/ethernet/meta/fbnic/fbnic_mac.h
new file mode 100644
index 000000000000..f53be6e6aef9
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_mac.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#ifndef _FBNIC_MAC_H_
+#define _FBNIC_MAC_H_
+
+#include <linux/types.h>
+
+struct fbnic_dev;
+
+#define FBNIC_MAX_JUMBO_FRAME_SIZE 9742
+
+enum {
+ FBNIC_LINK_EVENT_NONE = 0,
+ FBNIC_LINK_EVENT_UP = 1,
+ FBNIC_LINK_EVENT_DOWN = 2,
+};
+
+/* Treat the FEC bits as a bitmask laid out as follows:
+ * Bit 0: RS Enabled
+ * Bit 1: BASER(Firecode) Enabled
+ * Bit 2: Retrieve FEC from FW
+ */
+enum {
+ FBNIC_FEC_OFF = 0,
+ FBNIC_FEC_RS = 1,
+ FBNIC_FEC_BASER = 2,
+ FBNIC_FEC_AUTO = 4,
+};
+
+#define FBNIC_FEC_MODE_MASK (FBNIC_FEC_AUTO - 1)
+
+/* Treat the link modes as a set of modulation/lanes bitmask:
+ * Bit 0: Lane Count, 0 = R1, 1 = R2
+ * Bit 1: Modulation, 0 = NRZ, 1 = PAM4
+ * Bit 2: Retrieve link mode from FW
+ */
+enum {
+ FBNIC_LINK_25R1 = 0,
+ FBNIC_LINK_50R2 = 1,
+ FBNIC_LINK_50R1 = 2,
+ FBNIC_LINK_100R2 = 3,
+ FBNIC_LINK_AUTO = 4,
+};
+
+#define FBNIC_LINK_MODE_R2 (FBNIC_LINK_50R2)
+#define FBNIC_LINK_MODE_PAM4 (FBNIC_LINK_50R1)
+#define FBNIC_LINK_MODE_MASK (FBNIC_LINK_AUTO - 1)
+
+/* This structure defines the interface hooks for the MAC. The MAC hooks
+ * will be configured as a const struct provided with a set of function
+ * pointers.
+ *
+ * void (*init_regs)(struct fbnic_dev *fbd);
+ * Initialize MAC registers to enable Tx/Rx paths and FIFOs.
+ *
+ * void (*pcs_enable)(struct fbnic_dev *fbd);
+ * Configure and enable PCS to enable link if not already enabled
+ * void (*pcs_disable)(struct fbnic_dev *fbd);
+ * Shutdown the link if we are the only consumer of it.
+ * bool (*pcs_get_link)(struct fbnic_dev *fbd);
+ * Check PCS link status
+ * int (*pcs_get_link_event)(struct fbnic_dev *fbd)
+ * Get the current link event status, reports true if link has
+ * changed to either FBNIC_LINK_EVENT_DOWN or FBNIC_LINK_EVENT_UP
+ *
+ * void (*link_down)(struct fbnic_dev *fbd);
+ * Configure MAC for link down event
+ * void (*link_up)(struct fbnic_dev *fbd, bool tx_pause, bool rx_pause);
+ * Configure MAC for link up event;
+ *
+ */
+struct fbnic_mac {
+ void (*init_regs)(struct fbnic_dev *fbd);
+
+ int (*pcs_enable)(struct fbnic_dev *fbd);
+ void (*pcs_disable)(struct fbnic_dev *fbd);
+ bool (*pcs_get_link)(struct fbnic_dev *fbd);
+ int (*pcs_get_link_event)(struct fbnic_dev *fbd);
+
+ void (*link_down)(struct fbnic_dev *fbd);
+ void (*link_up)(struct fbnic_dev *fbd, bool tx_pause, bool rx_pause);
+};
+
+int fbnic_mac_init(struct fbnic_dev *fbd);
+#endif /* _FBNIC_MAC_H_ */
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
new file mode 100644
index 000000000000..b7ce6da68543
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
@@ -0,0 +1,488 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#include <linux/etherdevice.h>
+#include <linux/ipv6.h>
+#include <linux/types.h>
+
+#include "fbnic.h"
+#include "fbnic_netdev.h"
+#include "fbnic_txrx.h"
+
+int __fbnic_open(struct fbnic_net *fbn)
+{
+ struct fbnic_dev *fbd = fbn->fbd;
+ int err;
+
+ err = fbnic_alloc_napi_vectors(fbn);
+ if (err)
+ return err;
+
+ err = fbnic_alloc_resources(fbn);
+ if (err)
+ goto free_napi_vectors;
+
+ err = netif_set_real_num_tx_queues(fbn->netdev,
+ fbn->num_tx_queues);
+ if (err)
+ goto free_resources;
+
+ err = netif_set_real_num_rx_queues(fbn->netdev,
+ fbn->num_rx_queues);
+ if (err)
+ goto free_resources;
+
+ /* Send ownership message and flush to verify FW has seen it */
+ err = fbnic_fw_xmit_ownership_msg(fbd, true);
+ if (err) {
+ dev_warn(fbd->dev,
+ "Error %d sending host ownership message to the firmware\n",
+ err);
+ goto free_resources;
+ }
+
+ err = fbnic_fw_init_heartbeat(fbd, false);
+ if (err)
+ goto release_ownership;
+
+ err = fbnic_pcs_irq_enable(fbd);
+ if (err)
+ goto release_ownership;
+ /* Pull the BMC config and initialize the RPC */
+ fbnic_bmc_rpc_init(fbd);
+ fbnic_rss_reinit(fbd, fbn);
+
+ return 0;
+release_ownership:
+ fbnic_fw_xmit_ownership_msg(fbn->fbd, false);
+free_resources:
+ fbnic_free_resources(fbn);
+free_napi_vectors:
+ fbnic_free_napi_vectors(fbn);
+ return err;
+}
+
+static int fbnic_open(struct net_device *netdev)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ int err;
+
+ err = __fbnic_open(fbn);
+ if (!err)
+ fbnic_up(fbn);
+
+ return err;
+}
+
+static int fbnic_stop(struct net_device *netdev)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+
+ fbnic_down(fbn);
+ fbnic_pcs_irq_disable(fbn->fbd);
+
+ fbnic_fw_xmit_ownership_msg(fbn->fbd, false);
+
+ fbnic_free_resources(fbn);
+ fbnic_free_napi_vectors(fbn);
+
+ return 0;
+}
+
+static int fbnic_uc_sync(struct net_device *netdev, const unsigned char *addr)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ struct fbnic_mac_addr *avail_addr;
+
+ if (WARN_ON(!is_valid_ether_addr(addr)))
+ return -EADDRNOTAVAIL;
+
+ avail_addr = __fbnic_uc_sync(fbn->fbd, addr);
+ if (!avail_addr)
+ return -ENOSPC;
+
+ /* Add type flag indicating this address is in use by the host */
+ set_bit(FBNIC_MAC_ADDR_T_UNICAST, avail_addr->act_tcam);
+
+ return 0;
+}
+
+static int fbnic_uc_unsync(struct net_device *netdev, const unsigned char *addr)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ struct fbnic_dev *fbd = fbn->fbd;
+ int i, ret;
+
+ /* Scan from middle of list to bottom, filling bottom up.
+ * Skip the first entry which is reserved for dev_addr and
+ * leave the last entry to use for promiscuous filtering.
+ */
+ for (i = fbd->mac_addr_boundary, ret = -ENOENT;
+ i < FBNIC_RPC_TCAM_MACDA_HOST_ADDR_IDX && ret; i++) {
+ struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[i];
+
+ if (!ether_addr_equal(mac_addr->value.addr8, addr))
+ continue;
+
+ ret = __fbnic_uc_unsync(mac_addr);
+ }
+
+ return ret;
+}
+
+static int fbnic_mc_sync(struct net_device *netdev, const unsigned char *addr)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ struct fbnic_mac_addr *avail_addr;
+
+ if (WARN_ON(!is_multicast_ether_addr(addr)))
+ return -EADDRNOTAVAIL;
+
+ avail_addr = __fbnic_mc_sync(fbn->fbd, addr);
+ if (!avail_addr)
+ return -ENOSPC;
+
+ /* Add type flag indicating this address is in use by the host */
+ set_bit(FBNIC_MAC_ADDR_T_MULTICAST, avail_addr->act_tcam);
+
+ return 0;
+}
+
+static int fbnic_mc_unsync(struct net_device *netdev, const unsigned char *addr)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ struct fbnic_dev *fbd = fbn->fbd;
+ int i, ret;
+
+ /* Scan from middle of list to top, filling top down.
+ * Skip over the address reserved for the BMC MAC and
+ * exclude index 0 as that belongs to the broadcast address
+ */
+ for (i = fbd->mac_addr_boundary, ret = -ENOENT;
+ --i > FBNIC_RPC_TCAM_MACDA_BROADCAST_IDX && ret;) {
+ struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[i];
+
+ if (!ether_addr_equal(mac_addr->value.addr8, addr))
+ continue;
+
+ ret = __fbnic_mc_unsync(mac_addr);
+ }
+
+ return ret;
+}
+
+void __fbnic_set_rx_mode(struct net_device *netdev)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ bool uc_promisc = false, mc_promisc = false;
+ struct fbnic_dev *fbd = fbn->fbd;
+ struct fbnic_mac_addr *mac_addr;
+ int err;
+
+ /* Populate host address from dev_addr */
+ mac_addr = &fbd->mac_addr[FBNIC_RPC_TCAM_MACDA_HOST_ADDR_IDX];
+ if (!ether_addr_equal(mac_addr->value.addr8, netdev->dev_addr) ||
+ mac_addr->state != FBNIC_TCAM_S_VALID) {
+ ether_addr_copy(mac_addr->value.addr8, netdev->dev_addr);
+ mac_addr->state = FBNIC_TCAM_S_UPDATE;
+ set_bit(FBNIC_MAC_ADDR_T_UNICAST, mac_addr->act_tcam);
+ }
+
+ /* Populate broadcast address if broadcast is enabled */
+ mac_addr = &fbd->mac_addr[FBNIC_RPC_TCAM_MACDA_BROADCAST_IDX];
+ if (netdev->flags & IFF_BROADCAST) {
+ if (!is_broadcast_ether_addr(mac_addr->value.addr8) ||
+ mac_addr->state != FBNIC_TCAM_S_VALID) {
+ eth_broadcast_addr(mac_addr->value.addr8);
+ mac_addr->state = FBNIC_TCAM_S_ADD;
+ }
+ set_bit(FBNIC_MAC_ADDR_T_BROADCAST, mac_addr->act_tcam);
+ } else if (mac_addr->state == FBNIC_TCAM_S_VALID) {
+ __fbnic_xc_unsync(mac_addr, FBNIC_MAC_ADDR_T_BROADCAST);
+ }
+
+ /* Synchronize unicast and multicast address lists */
+ err = __dev_uc_sync(netdev, fbnic_uc_sync, fbnic_uc_unsync);
+ if (err == -ENOSPC)
+ uc_promisc = true;
+ err = __dev_mc_sync(netdev, fbnic_mc_sync, fbnic_mc_unsync);
+ if (err == -ENOSPC)
+ mc_promisc = true;
+
+ uc_promisc |= !!(netdev->flags & IFF_PROMISC);
+ mc_promisc |= !!(netdev->flags & IFF_ALLMULTI) || uc_promisc;
+
+ /* Populate last TCAM entry with promiscuous entry and 0/1 bit mask */
+ mac_addr = &fbd->mac_addr[FBNIC_RPC_TCAM_MACDA_PROMISC_IDX];
+ if (uc_promisc) {
+ if (!is_zero_ether_addr(mac_addr->value.addr8) ||
+ mac_addr->state != FBNIC_TCAM_S_VALID) {
+ eth_zero_addr(mac_addr->value.addr8);
+ eth_broadcast_addr(mac_addr->mask.addr8);
+ clear_bit(FBNIC_MAC_ADDR_T_ALLMULTI,
+ mac_addr->act_tcam);
+ set_bit(FBNIC_MAC_ADDR_T_PROMISC,
+ mac_addr->act_tcam);
+ mac_addr->state = FBNIC_TCAM_S_ADD;
+ }
+ } else if (mc_promisc &&
+ (!fbnic_bmc_present(fbd) || !fbd->fw_cap.all_multi)) {
+ /* We have to add a special handler for multicast as the
+ * BMC may have an all-multi rule already in place. As such
+ * adding a rule ourselves won't do any good so we will have
+ * to modify the rules for the ALL MULTI below if the BMC
+ * already has the rule in place.
+ */
+ if (!is_multicast_ether_addr(mac_addr->value.addr8) ||
+ mac_addr->state != FBNIC_TCAM_S_VALID) {
+ eth_zero_addr(mac_addr->value.addr8);
+ eth_broadcast_addr(mac_addr->mask.addr8);
+ mac_addr->value.addr8[0] ^= 1;
+ mac_addr->mask.addr8[0] ^= 1;
+ set_bit(FBNIC_MAC_ADDR_T_ALLMULTI,
+ mac_addr->act_tcam);
+ clear_bit(FBNIC_MAC_ADDR_T_PROMISC,
+ mac_addr->act_tcam);
+ mac_addr->state = FBNIC_TCAM_S_ADD;
+ }
+ } else if (mac_addr->state == FBNIC_TCAM_S_VALID) {
+ if (test_bit(FBNIC_MAC_ADDR_T_BMC, mac_addr->act_tcam)) {
+ clear_bit(FBNIC_MAC_ADDR_T_ALLMULTI,
+ mac_addr->act_tcam);
+ clear_bit(FBNIC_MAC_ADDR_T_PROMISC,
+ mac_addr->act_tcam);
+ } else {
+ mac_addr->state = FBNIC_TCAM_S_DELETE;
+ }
+ }
+
+ /* Add rules for BMC all multicast if it is enabled */
+ fbnic_bmc_rpc_all_multi_config(fbd, mc_promisc);
+
+ /* Sift out any unshared BMC rules and place them in BMC only section */
+ fbnic_sift_macda(fbd);
+
+ /* Write updates to hardware */
+ fbnic_write_rules(fbd);
+ fbnic_write_macda(fbd);
+}
+
+static void fbnic_set_rx_mode(struct net_device *netdev)
+{
+ /* No need to update the hardware if we are not running */
+ if (netif_running(netdev))
+ __fbnic_set_rx_mode(netdev);
+}
+
+static int fbnic_set_mac(struct net_device *netdev, void *p)
+{
+ struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ eth_hw_addr_set(netdev, addr->sa_data);
+
+ fbnic_set_rx_mode(netdev);
+
+ return 0;
+}
+
+void fbnic_clear_rx_mode(struct net_device *netdev)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ struct fbnic_dev *fbd = fbn->fbd;
+ int idx;
+
+ for (idx = ARRAY_SIZE(fbd->mac_addr); idx--;) {
+ struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[idx];
+
+ if (mac_addr->state != FBNIC_TCAM_S_VALID)
+ continue;
+
+ bitmap_clear(mac_addr->act_tcam,
+ FBNIC_MAC_ADDR_T_HOST_START,
+ FBNIC_MAC_ADDR_T_HOST_LEN);
+
+ if (bitmap_empty(mac_addr->act_tcam,
+ FBNIC_RPC_TCAM_ACT_NUM_ENTRIES))
+ mac_addr->state = FBNIC_TCAM_S_DELETE;
+ }
+
+ /* Write updates to hardware */
+ fbnic_write_macda(fbd);
+
+ __dev_uc_unsync(netdev, NULL);
+ __dev_mc_unsync(netdev, NULL);
+}
+
+static const struct net_device_ops fbnic_netdev_ops = {
+ .ndo_open = fbnic_open,
+ .ndo_stop = fbnic_stop,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_start_xmit = fbnic_xmit_frame,
+ .ndo_features_check = fbnic_features_check,
+ .ndo_set_mac_address = fbnic_set_mac,
+ .ndo_set_rx_mode = fbnic_set_rx_mode,
+};
+
+void fbnic_reset_queues(struct fbnic_net *fbn,
+ unsigned int tx, unsigned int rx)
+{
+ struct fbnic_dev *fbd = fbn->fbd;
+ unsigned int max_napis;
+
+ max_napis = fbd->num_irqs - FBNIC_NON_NAPI_VECTORS;
+
+ tx = min(tx, max_napis);
+ fbn->num_tx_queues = tx;
+
+ rx = min(rx, max_napis);
+ fbn->num_rx_queues = rx;
+
+ fbn->num_napi = max(tx, rx);
+}
+
+/**
+ * fbnic_netdev_free - Free the netdev associate with fbnic
+ * @fbd: Driver specific structure to free netdev from
+ *
+ * Allocate and initialize the netdev and netdev private structure. Bind
+ * together the hardware, netdev, and pci data structures.
+ **/
+void fbnic_netdev_free(struct fbnic_dev *fbd)
+{
+ struct fbnic_net *fbn = netdev_priv(fbd->netdev);
+
+ if (fbn->phylink)
+ phylink_destroy(fbn->phylink);
+
+ free_netdev(fbd->netdev);
+ fbd->netdev = NULL;
+}
+
+/**
+ * fbnic_netdev_alloc - Allocate a netdev and associate with fbnic
+ * @fbd: Driver specific structure to associate netdev with
+ *
+ * Allocate and initialize the netdev and netdev private structure. Bind
+ * together the hardware, netdev, and pci data structures.
+ *
+ * Return: 0 on success, negative on failure
+ **/
+struct net_device *fbnic_netdev_alloc(struct fbnic_dev *fbd)
+{
+ struct net_device *netdev;
+ struct fbnic_net *fbn;
+ int default_queues;
+
+ netdev = alloc_etherdev_mq(sizeof(*fbn), FBNIC_MAX_RXQS);
+ if (!netdev)
+ return NULL;
+
+ SET_NETDEV_DEV(netdev, fbd->dev);
+ fbd->netdev = netdev;
+
+ netdev->netdev_ops = &fbnic_netdev_ops;
+
+ fbn = netdev_priv(netdev);
+
+ fbn->netdev = netdev;
+ fbn->fbd = fbd;
+ INIT_LIST_HEAD(&fbn->napis);
+
+ fbn->txq_size = FBNIC_TXQ_SIZE_DEFAULT;
+ fbn->hpq_size = FBNIC_HPQ_SIZE_DEFAULT;
+ fbn->ppq_size = FBNIC_PPQ_SIZE_DEFAULT;
+ fbn->rcq_size = FBNIC_RCQ_SIZE_DEFAULT;
+
+ default_queues = netif_get_num_default_rss_queues();
+ if (default_queues > fbd->max_num_queues)
+ default_queues = fbd->max_num_queues;
+
+ fbnic_reset_queues(fbn, default_queues, default_queues);
+
+ fbnic_reset_indir_tbl(fbn);
+ fbnic_rss_key_fill(fbn->rss_key);
+ fbnic_rss_init_en_mask(fbn);
+
+ netdev->features |=
+ NETIF_F_RXHASH |
+ NETIF_F_SG |
+ NETIF_F_HW_CSUM |
+ NETIF_F_RXCSUM;
+
+ netdev->hw_features |= netdev->features;
+ netdev->vlan_features |= netdev->features;
+ netdev->hw_enc_features |= netdev->features;
+
+ netdev->min_mtu = IPV6_MIN_MTU;
+ netdev->max_mtu = FBNIC_MAX_JUMBO_FRAME_SIZE - ETH_HLEN;
+
+ /* TBD: This is workaround for BMC as phylink doesn't have support
+ * for leavling the link enabled if a BMC is present.
+ */
+ netdev->ethtool->wol_enabled = true;
+
+ fbn->fec = FBNIC_FEC_AUTO | FBNIC_FEC_RS;
+ fbn->link_mode = FBNIC_LINK_AUTO | FBNIC_LINK_50R2;
+ netif_carrier_off(netdev);
+
+ netif_tx_stop_all_queues(netdev);
+
+ if (fbnic_phylink_init(netdev)) {
+ fbnic_netdev_free(fbd);
+ return NULL;
+ }
+
+ return netdev;
+}
+
+static int fbnic_dsn_to_mac_addr(u64 dsn, char *addr)
+{
+ addr[0] = (dsn >> 56) & 0xFF;
+ addr[1] = (dsn >> 48) & 0xFF;
+ addr[2] = (dsn >> 40) & 0xFF;
+ addr[3] = (dsn >> 16) & 0xFF;
+ addr[4] = (dsn >> 8) & 0xFF;
+ addr[5] = dsn & 0xFF;
+
+ return is_valid_ether_addr(addr) ? 0 : -EINVAL;
+}
+
+/**
+ * fbnic_netdev_register - Initialize general software structures
+ * @netdev: Netdev containing structure to initialize and register
+ *
+ * Initialize the MAC address for the netdev and register it.
+ *
+ * Return: 0 on success, negative on failure
+ **/
+int fbnic_netdev_register(struct net_device *netdev)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ struct fbnic_dev *fbd = fbn->fbd;
+ u64 dsn = fbd->dsn;
+ u8 addr[ETH_ALEN];
+ int err;
+
+ err = fbnic_dsn_to_mac_addr(dsn, addr);
+ if (!err) {
+ ether_addr_copy(netdev->perm_addr, addr);
+ eth_hw_addr_set(netdev, addr);
+ } else {
+ /* A randomly assigned MAC address will cause provisioning
+ * issues so instead just fail to spawn the netdev and
+ * avoid any confusion.
+ */
+ dev_err(fbd->dev, "MAC addr %pM invalid\n", addr);
+ return err;
+ }
+
+ return register_netdev(netdev);
+}
+
+void fbnic_netdev_unregister(struct net_device *netdev)
+{
+ unregister_netdev(netdev);
+}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h
new file mode 100644
index 000000000000..6bc0ebeb8182
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#ifndef _FBNIC_NETDEV_H_
+#define _FBNIC_NETDEV_H_
+
+#include <linux/types.h>
+#include <linux/phylink.h>
+
+#include "fbnic_csr.h"
+#include "fbnic_rpc.h"
+#include "fbnic_txrx.h"
+
+struct fbnic_net {
+ struct fbnic_ring *tx[FBNIC_MAX_TXQS];
+ struct fbnic_ring *rx[FBNIC_MAX_RXQS];
+
+ struct net_device *netdev;
+ struct fbnic_dev *fbd;
+
+ u32 txq_size;
+ u32 hpq_size;
+ u32 ppq_size;
+ u32 rcq_size;
+
+ u16 num_napi;
+
+ struct phylink *phylink;
+ struct phylink_config phylink_config;
+ struct phylink_pcs phylink_pcs;
+
+ /* TBD: Remove these when phylink supports FEC and lane config */
+ u8 fec;
+ u8 link_mode;
+
+ u16 num_tx_queues;
+ u16 num_rx_queues;
+
+ u8 indir_tbl[FBNIC_RPC_RSS_TBL_COUNT][FBNIC_RPC_RSS_TBL_SIZE];
+ u32 rss_key[FBNIC_RPC_RSS_KEY_DWORD_LEN];
+ u32 rss_flow_hash[FBNIC_NUM_HASH_OPT];
+
+ u64 link_down_events;
+
+ struct list_head napis;
+};
+
+int __fbnic_open(struct fbnic_net *fbn);
+void fbnic_up(struct fbnic_net *fbn);
+void fbnic_down(struct fbnic_net *fbn);
+
+struct net_device *fbnic_netdev_alloc(struct fbnic_dev *fbd);
+void fbnic_netdev_free(struct fbnic_dev *fbd);
+int fbnic_netdev_register(struct net_device *netdev);
+void fbnic_netdev_unregister(struct net_device *netdev);
+void fbnic_reset_queues(struct fbnic_net *fbn,
+ unsigned int tx, unsigned int rx);
+
+void __fbnic_set_rx_mode(struct net_device *netdev);
+void fbnic_clear_rx_mode(struct net_device *netdev);
+
+int fbnic_phylink_init(struct net_device *netdev);
+#endif /* _FBNIC_NETDEV_H_ */
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_pci.c b/drivers/net/ethernet/meta/fbnic/fbnic_pci.c
new file mode 100644
index 000000000000..a4809fe0fc24
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_pci.c
@@ -0,0 +1,564 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/rtnetlink.h>
+#include <linux/types.h>
+
+#include "fbnic.h"
+#include "fbnic_drvinfo.h"
+#include "fbnic_netdev.h"
+
+char fbnic_driver_name[] = DRV_NAME;
+
+MODULE_DESCRIPTION(DRV_SUMMARY);
+MODULE_LICENSE("GPL");
+
+static const struct fbnic_info fbnic_asic_info = {
+ .max_num_queues = FBNIC_MAX_QUEUES,
+ .bar_mask = BIT(0) | BIT(4)
+};
+
+static const struct fbnic_info *fbnic_info_tbl[] = {
+ [fbnic_board_asic] = &fbnic_asic_info,
+};
+
+static const struct pci_device_id fbnic_pci_tbl[] = {
+ { PCI_DEVICE_DATA(META, FBNIC_ASIC, fbnic_board_asic) },
+ /* Required last entry */
+ {0, }
+};
+MODULE_DEVICE_TABLE(pci, fbnic_pci_tbl);
+
+u32 fbnic_rd32(struct fbnic_dev *fbd, u32 reg)
+{
+ u32 __iomem *csr = READ_ONCE(fbd->uc_addr0);
+ u32 value;
+
+ if (!csr)
+ return ~0U;
+
+ value = readl(csr + reg);
+
+ /* If any bits are 0 value should be valid */
+ if (~value)
+ return value;
+
+ /* All 1's may be valid if ZEROs register still works */
+ if (reg != FBNIC_MASTER_SPARE_0 && ~readl(csr + FBNIC_MASTER_SPARE_0))
+ return value;
+
+ /* Hardware is giving us all 1's reads, assume it is gone */
+ WRITE_ONCE(fbd->uc_addr0, NULL);
+ WRITE_ONCE(fbd->uc_addr4, NULL);
+
+ dev_err(fbd->dev,
+ "Failed read (idx 0x%x AKA addr 0x%x), disabled CSR access, awaiting reset\n",
+ reg, reg << 2);
+
+ /* Notify stack that device has lost (PCIe) link */
+ if (!fbnic_init_failure(fbd))
+ netif_device_detach(fbd->netdev);
+
+ return ~0U;
+}
+
+bool fbnic_fw_present(struct fbnic_dev *fbd)
+{
+ return !!READ_ONCE(fbd->uc_addr4);
+}
+
+void fbnic_fw_wr32(struct fbnic_dev *fbd, u32 reg, u32 val)
+{
+ u32 __iomem *csr = READ_ONCE(fbd->uc_addr4);
+
+ if (csr)
+ writel(val, csr + reg);
+}
+
+u32 fbnic_fw_rd32(struct fbnic_dev *fbd, u32 reg)
+{
+ u32 __iomem *csr = READ_ONCE(fbd->uc_addr4);
+ u32 value;
+
+ if (!csr)
+ return ~0U;
+
+ value = readl(csr + reg);
+
+ /* If any bits are 0 value should be valid */
+ if (~value)
+ return value;
+
+ /* All 1's may be valid if ZEROs register still works */
+ if (reg != FBNIC_FW_ZERO_REG && ~readl(csr + FBNIC_FW_ZERO_REG))
+ return value;
+
+ /* Hardware is giving us all 1's reads, assume it is gone */
+ WRITE_ONCE(fbd->uc_addr0, NULL);
+ WRITE_ONCE(fbd->uc_addr4, NULL);
+
+ dev_err(fbd->dev,
+ "Failed read (idx 0x%x AKA addr 0x%x), disabled CSR access, awaiting reset\n",
+ reg, reg << 2);
+
+ /* Notify stack that device has lost (PCIe) link */
+ if (!fbnic_init_failure(fbd))
+ netif_device_detach(fbd->netdev);
+
+ return ~0U;
+}
+
+static void fbnic_service_task_start(struct fbnic_net *fbn)
+{
+ struct fbnic_dev *fbd = fbn->fbd;
+
+ schedule_delayed_work(&fbd->service_task, HZ);
+ phylink_resume(fbn->phylink);
+}
+
+static void fbnic_service_task_stop(struct fbnic_net *fbn)
+{
+ struct fbnic_dev *fbd = fbn->fbd;
+
+ phylink_suspend(fbn->phylink, fbnic_bmc_present(fbd));
+ cancel_delayed_work(&fbd->service_task);
+}
+
+void fbnic_up(struct fbnic_net *fbn)
+{
+ fbnic_enable(fbn);
+
+ fbnic_fill(fbn);
+
+ fbnic_rss_reinit_hw(fbn->fbd, fbn);
+
+ __fbnic_set_rx_mode(fbn->netdev);
+
+ /* Enable Tx/Rx processing */
+ fbnic_napi_enable(fbn);
+ netif_tx_start_all_queues(fbn->netdev);
+
+ fbnic_service_task_start(fbn);
+}
+
+static void fbnic_down_noidle(struct fbnic_net *fbn)
+{
+ fbnic_service_task_stop(fbn);
+
+ /* Disable Tx/Rx Processing */
+ fbnic_napi_disable(fbn);
+ netif_tx_disable(fbn->netdev);
+
+ fbnic_clear_rx_mode(fbn->netdev);
+ fbnic_clear_rules(fbn->fbd);
+ fbnic_rss_disable_hw(fbn->fbd);
+ fbnic_disable(fbn);
+}
+
+void fbnic_down(struct fbnic_net *fbn)
+{
+ fbnic_down_noidle(fbn);
+
+ fbnic_wait_all_queues_idle(fbn->fbd, false);
+
+ fbnic_flush(fbn);
+}
+
+static void fbnic_health_check(struct fbnic_dev *fbd)
+{
+ struct fbnic_fw_mbx *tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX];
+
+ /* As long as the heart is beating the FW is healty */
+ if (fbd->fw_heartbeat_enabled)
+ return;
+
+ /* If the Tx mailbox still has messages sitting in it then there likely
+ * isn't anything we can do. We will wait until the mailbox is empty to
+ * report the fault so we can collect the crashlog.
+ */
+ if (tx_mbx->head != tx_mbx->tail)
+ return;
+
+ /* TBD: Need to add a more thorough recovery here.
+ * Specifically I need to verify what all the firmware will have
+ * changed since we had setup and it rebooted. May just need to
+ * perform a down/up. For now we will just reclaim ownership so
+ * the heartbeat can catch the next fault.
+ */
+ fbnic_fw_xmit_ownership_msg(fbd, true);
+}
+
+static void fbnic_service_task(struct work_struct *work)
+{
+ struct fbnic_dev *fbd = container_of(to_delayed_work(work),
+ struct fbnic_dev, service_task);
+
+ rtnl_lock();
+
+ fbnic_fw_check_heartbeat(fbd);
+
+ fbnic_health_check(fbd);
+
+ if (netif_carrier_ok(fbd->netdev))
+ fbnic_napi_depletion_check(fbd->netdev);
+
+ if (netif_running(fbd->netdev))
+ schedule_delayed_work(&fbd->service_task, HZ);
+
+ rtnl_unlock();
+}
+
+/**
+ * fbnic_probe - Device Initialization Routine
+ * @pdev: PCI device information struct
+ * @ent: entry in fbnic_pci_tbl
+ *
+ * Initializes a PCI device identified by a pci_dev structure.
+ * The OS initialization, configuring of the adapter private structure,
+ * and a hardware reset occur.
+ *
+ * Return: 0 on success, negative on failure
+ **/
+static int fbnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ const struct fbnic_info *info = fbnic_info_tbl[ent->driver_data];
+ struct net_device *netdev;
+ struct fbnic_dev *fbd;
+ int err;
+
+ if (pdev->error_state != pci_channel_io_normal) {
+ dev_err(&pdev->dev,
+ "PCI device still in an error state. Unable to load...\n");
+ return -EIO;
+ }
+
+ err = pcim_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "PCI enable device failed: %d\n", err);
+ return err;
+ }
+
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(46));
+ if (err)
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(&pdev->dev, "DMA configuration failed: %d\n", err);
+ return err;
+ }
+
+ err = pcim_iomap_regions(pdev, info->bar_mask, fbnic_driver_name);
+ if (err) {
+ dev_err(&pdev->dev,
+ "pci_request_selected_regions failed: %d\n", err);
+ return err;
+ }
+
+ fbd = fbnic_devlink_alloc(pdev);
+ if (!fbd) {
+ dev_err(&pdev->dev, "Devlink allocation failed\n");
+ return -ENOMEM;
+ }
+
+ /* Populate driver with hardware-specific info and handlers */
+ fbd->max_num_queues = info->max_num_queues;
+
+ pci_set_master(pdev);
+ pci_save_state(pdev);
+
+ INIT_DELAYED_WORK(&fbd->service_task, fbnic_service_task);
+
+ err = fbnic_alloc_irqs(fbd);
+ if (err)
+ goto free_fbd;
+
+ err = fbnic_mac_init(fbd);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to initialize MAC: %d\n", err);
+ goto free_irqs;
+ }
+
+ err = fbnic_fw_enable_mbx(fbd);
+ if (err) {
+ dev_err(&pdev->dev,
+ "Firmware mailbox initialization failure\n");
+ goto free_irqs;
+ }
+
+ fbnic_devlink_register(fbd);
+
+ if (!fbd->dsn) {
+ dev_warn(&pdev->dev, "Reading serial number failed\n");
+ goto init_failure_mode;
+ }
+
+ netdev = fbnic_netdev_alloc(fbd);
+ if (!netdev) {
+ dev_err(&pdev->dev, "Netdev allocation failed\n");
+ goto init_failure_mode;
+ }
+
+ err = fbnic_netdev_register(netdev);
+ if (err) {
+ dev_err(&pdev->dev, "Netdev registration failed: %d\n", err);
+ goto ifm_free_netdev;
+ }
+
+ return 0;
+
+ifm_free_netdev:
+ fbnic_netdev_free(fbd);
+init_failure_mode:
+ dev_warn(&pdev->dev, "Probe error encountered, entering init failure mode. Normal networking functionality will not be available.\n");
+ /* Always return 0 even on error so devlink is registered to allow
+ * firmware updates for fixes.
+ */
+ return 0;
+free_irqs:
+ fbnic_free_irqs(fbd);
+free_fbd:
+ pci_disable_device(pdev);
+ fbnic_devlink_free(fbd);
+
+ return err;
+}
+
+/**
+ * fbnic_remove - Device Removal Routine
+ * @pdev: PCI device information struct
+ *
+ * Called by the PCI subsystem to alert the driver that it should release
+ * a PCI device. The could be caused by a Hot-Plug event, or because the
+ * driver is going to be removed from memory.
+ **/
+static void fbnic_remove(struct pci_dev *pdev)
+{
+ struct fbnic_dev *fbd = pci_get_drvdata(pdev);
+
+ if (!fbnic_init_failure(fbd)) {
+ struct net_device *netdev = fbd->netdev;
+
+ fbnic_netdev_unregister(netdev);
+ cancel_delayed_work_sync(&fbd->service_task);
+ fbnic_netdev_free(fbd);
+ }
+
+ fbnic_devlink_unregister(fbd);
+ fbnic_fw_disable_mbx(fbd);
+ fbnic_free_irqs(fbd);
+
+ pci_disable_device(pdev);
+ fbnic_devlink_free(fbd);
+}
+
+static int fbnic_pm_suspend(struct device *dev)
+{
+ struct fbnic_dev *fbd = dev_get_drvdata(dev);
+ struct net_device *netdev = fbd->netdev;
+
+ if (fbnic_init_failure(fbd))
+ goto null_uc_addr;
+
+ rtnl_lock();
+
+ netif_device_detach(netdev);
+
+ if (netif_running(netdev))
+ netdev->netdev_ops->ndo_stop(netdev);
+
+ rtnl_unlock();
+
+null_uc_addr:
+ fbnic_fw_disable_mbx(fbd);
+
+ /* Free the IRQs so they aren't trying to occupy sleeping CPUs */
+ fbnic_free_irqs(fbd);
+
+ /* Hardware is about to go away, so switch off MMIO access internally */
+ WRITE_ONCE(fbd->uc_addr0, NULL);
+ WRITE_ONCE(fbd->uc_addr4, NULL);
+
+ return 0;
+}
+
+static int __fbnic_pm_resume(struct device *dev)
+{
+ struct fbnic_dev *fbd = dev_get_drvdata(dev);
+ struct net_device *netdev = fbd->netdev;
+ void __iomem * const *iomap_table;
+ struct fbnic_net *fbn;
+ int err;
+
+ /* Restore MMIO access */
+ iomap_table = pcim_iomap_table(to_pci_dev(dev));
+ fbd->uc_addr0 = iomap_table[0];
+ fbd->uc_addr4 = iomap_table[4];
+
+ /* Rerequest the IRQs */
+ err = fbnic_alloc_irqs(fbd);
+ if (err)
+ goto err_invalidate_uc_addr;
+
+ fbd->mac->init_regs(fbd);
+
+ /* Re-enable mailbox */
+ err = fbnic_fw_enable_mbx(fbd);
+ if (err)
+ goto err_free_irqs;
+
+ /* No netdev means there isn't a network interface to bring up */
+ if (fbnic_init_failure(fbd))
+ return 0;
+
+ fbn = netdev_priv(netdev);
+
+ /* Reset the queues if needed */
+ fbnic_reset_queues(fbn, fbn->num_tx_queues, fbn->num_rx_queues);
+
+ rtnl_lock();
+
+ if (netif_running(netdev)) {
+ err = __fbnic_open(fbn);
+ if (err)
+ goto err_disable_mbx;
+ }
+
+ rtnl_unlock();
+
+ return 0;
+err_disable_mbx:
+ rtnl_unlock();
+ fbnic_fw_disable_mbx(fbd);
+err_free_irqs:
+ fbnic_free_irqs(fbd);
+err_invalidate_uc_addr:
+ WRITE_ONCE(fbd->uc_addr0, NULL);
+ WRITE_ONCE(fbd->uc_addr4, NULL);
+ return err;
+}
+
+static void __fbnic_pm_attach(struct device *dev)
+{
+ struct fbnic_dev *fbd = dev_get_drvdata(dev);
+ struct net_device *netdev = fbd->netdev;
+ struct fbnic_net *fbn;
+
+ if (fbnic_init_failure(fbd))
+ return;
+
+ fbn = netdev_priv(netdev);
+
+ if (netif_running(netdev))
+ fbnic_up(fbn);
+
+ netif_device_attach(netdev);
+}
+
+static int __maybe_unused fbnic_pm_resume(struct device *dev)
+{
+ int err;
+
+ err = __fbnic_pm_resume(dev);
+ if (!err)
+ __fbnic_pm_attach(dev);
+
+ return err;
+}
+
+static const struct dev_pm_ops fbnic_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(fbnic_pm_suspend, fbnic_pm_resume)
+};
+
+static void fbnic_shutdown(struct pci_dev *pdev)
+{
+ fbnic_pm_suspend(&pdev->dev);
+}
+
+static pci_ers_result_t fbnic_err_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ /* Disconnect device if failure is not recoverable via reset */
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ fbnic_pm_suspend(&pdev->dev);
+
+ /* Request a slot reset */
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t fbnic_err_slot_reset(struct pci_dev *pdev)
+{
+ int err;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ pci_save_state(pdev);
+
+ if (pci_enable_device_mem(pdev)) {
+ dev_err(&pdev->dev,
+ "Cannot re-enable PCI device after reset.\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ /* Restore device to previous state */
+ err = __fbnic_pm_resume(&pdev->dev);
+
+ return err ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
+}
+
+static void fbnic_err_resume(struct pci_dev *pdev)
+{
+ __fbnic_pm_attach(&pdev->dev);
+}
+
+static const struct pci_error_handlers fbnic_err_handler = {
+ .error_detected = fbnic_err_error_detected,
+ .slot_reset = fbnic_err_slot_reset,
+ .resume = fbnic_err_resume,
+};
+
+static struct pci_driver fbnic_driver = {
+ .name = fbnic_driver_name,
+ .id_table = fbnic_pci_tbl,
+ .probe = fbnic_probe,
+ .remove = fbnic_remove,
+ .driver.pm = &fbnic_pm_ops,
+ .shutdown = fbnic_shutdown,
+ .err_handler = &fbnic_err_handler,
+};
+
+/**
+ * fbnic_init_module - Driver Registration Routine
+ *
+ * The first routine called when the driver is loaded. All it does is
+ * register with the PCI subsystem.
+ *
+ * Return: 0 on success, negative on failure
+ **/
+static int __init fbnic_init_module(void)
+{
+ int err;
+
+ err = pci_register_driver(&fbnic_driver);
+ if (err)
+ goto out;
+
+ pr_info(DRV_SUMMARY " (%s)", fbnic_driver.name);
+out:
+ return err;
+}
+module_init(fbnic_init_module);
+
+/**
+ * fbnic_exit_module - Driver Exit Cleanup Routine
+ *
+ * Called just before the driver is removed from memory.
+ **/
+static void __exit fbnic_exit_module(void)
+{
+ pci_unregister_driver(&fbnic_driver);
+}
+module_exit(fbnic_exit_module);
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_phylink.c b/drivers/net/ethernet/meta/fbnic/fbnic_phylink.c
new file mode 100644
index 000000000000..1a5e1e719b30
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_phylink.c
@@ -0,0 +1,161 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#include <linux/phy.h>
+#include <linux/phylink.h>
+
+#include "fbnic.h"
+#include "fbnic_mac.h"
+#include "fbnic_netdev.h"
+
+static struct fbnic_net *
+fbnic_pcs_to_net(struct phylink_pcs *pcs)
+{
+ return container_of(pcs, struct fbnic_net, phylink_pcs);
+}
+
+static void
+fbnic_phylink_pcs_get_state(struct phylink_pcs *pcs,
+ struct phylink_link_state *state)
+{
+ struct fbnic_net *fbn = fbnic_pcs_to_net(pcs);
+ struct fbnic_dev *fbd = fbn->fbd;
+
+ /* For now we use hard-coded defaults and FW config to determine
+ * the current values. In future patches we will add support for
+ * reconfiguring these values and changing link settings.
+ */
+ switch (fbd->fw_cap.link_speed) {
+ case FBNIC_FW_LINK_SPEED_25R1:
+ state->speed = SPEED_25000;
+ break;
+ case FBNIC_FW_LINK_SPEED_50R2:
+ state->speed = SPEED_50000;
+ break;
+ case FBNIC_FW_LINK_SPEED_100R2:
+ state->speed = SPEED_100000;
+ break;
+ default:
+ state->speed = SPEED_UNKNOWN;
+ break;
+ }
+
+ state->duplex = DUPLEX_FULL;
+
+ state->link = fbd->mac->pcs_get_link(fbd);
+}
+
+static int
+fbnic_phylink_pcs_enable(struct phylink_pcs *pcs)
+{
+ struct fbnic_net *fbn = fbnic_pcs_to_net(pcs);
+ struct fbnic_dev *fbd = fbn->fbd;
+
+ return fbd->mac->pcs_enable(fbd);
+}
+
+static void
+fbnic_phylink_pcs_disable(struct phylink_pcs *pcs)
+{
+ struct fbnic_net *fbn = fbnic_pcs_to_net(pcs);
+ struct fbnic_dev *fbd = fbn->fbd;
+
+ return fbd->mac->pcs_disable(fbd);
+}
+
+static int
+fbnic_phylink_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
+ phy_interface_t interface,
+ const unsigned long *advertising,
+ bool permit_pause_to_mac)
+{
+ return 0;
+}
+
+static const struct phylink_pcs_ops fbnic_phylink_pcs_ops = {
+ .pcs_config = fbnic_phylink_pcs_config,
+ .pcs_enable = fbnic_phylink_pcs_enable,
+ .pcs_disable = fbnic_phylink_pcs_disable,
+ .pcs_get_state = fbnic_phylink_pcs_get_state,
+};
+
+static struct phylink_pcs *
+fbnic_phylink_mac_select_pcs(struct phylink_config *config,
+ phy_interface_t interface)
+{
+ struct net_device *netdev = to_net_dev(config->dev);
+ struct fbnic_net *fbn = netdev_priv(netdev);
+
+ return &fbn->phylink_pcs;
+}
+
+static void
+fbnic_phylink_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+}
+
+static void
+fbnic_phylink_mac_link_down(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface)
+{
+ struct net_device *netdev = to_net_dev(config->dev);
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ struct fbnic_dev *fbd = fbn->fbd;
+
+ fbd->mac->link_down(fbd);
+
+ fbn->link_down_events++;
+}
+
+static void
+fbnic_phylink_mac_link_up(struct phylink_config *config,
+ struct phy_device *phy, unsigned int mode,
+ phy_interface_t interface, int speed, int duplex,
+ bool tx_pause, bool rx_pause)
+{
+ struct net_device *netdev = to_net_dev(config->dev);
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ struct fbnic_dev *fbd = fbn->fbd;
+
+ fbd->mac->link_up(fbd, tx_pause, rx_pause);
+}
+
+static const struct phylink_mac_ops fbnic_phylink_mac_ops = {
+ .mac_select_pcs = fbnic_phylink_mac_select_pcs,
+ .mac_config = fbnic_phylink_mac_config,
+ .mac_link_down = fbnic_phylink_mac_link_down,
+ .mac_link_up = fbnic_phylink_mac_link_up,
+};
+
+int fbnic_phylink_init(struct net_device *netdev)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ struct phylink *phylink;
+
+ fbn->phylink_pcs.neg_mode = true;
+ fbn->phylink_pcs.ops = &fbnic_phylink_pcs_ops;
+
+ fbn->phylink_config.dev = &netdev->dev;
+ fbn->phylink_config.type = PHYLINK_NETDEV;
+ fbn->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE |
+ MAC_10000FD | MAC_25000FD |
+ MAC_40000FD | MAC_50000FD |
+ MAC_100000FD;
+ fbn->phylink_config.default_an_inband = true;
+
+ __set_bit(PHY_INTERFACE_MODE_XGMII,
+ fbn->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_XLGMII,
+ fbn->phylink_config.supported_interfaces);
+
+ phylink = phylink_create(&fbn->phylink_config, NULL,
+ PHY_INTERFACE_MODE_XLGMII,
+ &fbnic_phylink_mac_ops);
+ if (IS_ERR(phylink))
+ return PTR_ERR(phylink);
+
+ fbn->phylink = phylink;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_rpc.c b/drivers/net/ethernet/meta/fbnic/fbnic_rpc.c
new file mode 100644
index 000000000000..c8aa29fc052b
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_rpc.c
@@ -0,0 +1,651 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+
+#include "fbnic.h"
+#include "fbnic_netdev.h"
+#include "fbnic_rpc.h"
+
+void fbnic_reset_indir_tbl(struct fbnic_net *fbn)
+{
+ unsigned int num_rx = fbn->num_rx_queues;
+ unsigned int i;
+
+ for (i = 0; i < FBNIC_RPC_RSS_TBL_SIZE; i++) {
+ fbn->indir_tbl[0][i] = ethtool_rxfh_indir_default(i, num_rx);
+ fbn->indir_tbl[1][i] = ethtool_rxfh_indir_default(i, num_rx);
+ }
+}
+
+void fbnic_rss_key_fill(u32 *buffer)
+{
+ static u32 rss_key[FBNIC_RPC_RSS_KEY_DWORD_LEN];
+
+ net_get_random_once(rss_key, sizeof(rss_key));
+ rss_key[FBNIC_RPC_RSS_KEY_LAST_IDX] &= FBNIC_RPC_RSS_KEY_LAST_MASK;
+
+ memcpy(buffer, rss_key, sizeof(rss_key));
+}
+
+#define RX_HASH_OPT_L4 \
+ (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)
+#define RX_HASH_OPT_L3 \
+ (RXH_IP_SRC | RXH_IP_DST)
+#define RX_HASH_OPT_L2 RXH_L2DA
+
+void fbnic_rss_init_en_mask(struct fbnic_net *fbn)
+{
+ fbn->rss_flow_hash[FBNIC_TCP4_HASH_OPT] = RX_HASH_OPT_L4;
+ fbn->rss_flow_hash[FBNIC_TCP6_HASH_OPT] = RX_HASH_OPT_L4;
+
+ fbn->rss_flow_hash[FBNIC_UDP4_HASH_OPT] = RX_HASH_OPT_L3;
+ fbn->rss_flow_hash[FBNIC_UDP6_HASH_OPT] = RX_HASH_OPT_L3;
+ fbn->rss_flow_hash[FBNIC_IPV4_HASH_OPT] = RX_HASH_OPT_L3;
+ fbn->rss_flow_hash[FBNIC_IPV6_HASH_OPT] = RX_HASH_OPT_L3;
+
+ fbn->rss_flow_hash[FBNIC_ETHER_HASH_OPT] = RX_HASH_OPT_L2;
+}
+
+void fbnic_rss_disable_hw(struct fbnic_dev *fbd)
+{
+ /* Disable RPC by clearing enable bit and configuration */
+ if (!fbnic_bmc_present(fbd))
+ wr32(fbd, FBNIC_RPC_RMI_CONFIG,
+ FIELD_PREP(FBNIC_RPC_RMI_CONFIG_OH_BYTES, 20));
+}
+
+#define FBNIC_FH_2_RSSEM_BIT(_fh, _rssem, _val) \
+ FIELD_PREP(FBNIC_RPC_ACT_TBL1_RSS_ENA_##_rssem, \
+ FIELD_GET(RXH_##_fh, _val))
+static u16 fbnic_flow_hash_2_rss_en_mask(struct fbnic_net *fbn, int flow_type)
+{
+ u32 flow_hash = fbn->rss_flow_hash[flow_type];
+ u32 rss_en_mask = 0;
+
+ rss_en_mask |= FBNIC_FH_2_RSSEM_BIT(L2DA, L2_DA, flow_hash);
+ rss_en_mask |= FBNIC_FH_2_RSSEM_BIT(IP_SRC, IP_SRC, flow_hash);
+ rss_en_mask |= FBNIC_FH_2_RSSEM_BIT(IP_DST, IP_DST, flow_hash);
+ rss_en_mask |= FBNIC_FH_2_RSSEM_BIT(L4_B_0_1, L4_SRC, flow_hash);
+ rss_en_mask |= FBNIC_FH_2_RSSEM_BIT(L4_B_2_3, L4_DST, flow_hash);
+
+ return rss_en_mask;
+}
+
+void fbnic_rss_reinit_hw(struct fbnic_dev *fbd, struct fbnic_net *fbn)
+{
+ unsigned int i;
+
+ for (i = 0; i < FBNIC_RPC_RSS_TBL_SIZE; i++) {
+ wr32(fbd, FBNIC_RPC_RSS_TBL(0, i), fbn->indir_tbl[0][i]);
+ wr32(fbd, FBNIC_RPC_RSS_TBL(1, i), fbn->indir_tbl[1][i]);
+ }
+
+ for (i = 0; i < FBNIC_RPC_RSS_KEY_DWORD_LEN; i++)
+ wr32(fbd, FBNIC_RPC_RSS_KEY(i), fbn->rss_key[i]);
+
+ /* Default action for this to drop w/ no destination */
+ wr32(fbd, FBNIC_RPC_ACT_TBL0_DEFAULT, FBNIC_RPC_ACT_TBL0_DROP);
+ wrfl(fbd);
+
+ wr32(fbd, FBNIC_RPC_ACT_TBL1_DEFAULT, 0);
+
+ /* If it isn't already enabled set the RMI Config value to enable RPC */
+ wr32(fbd, FBNIC_RPC_RMI_CONFIG,
+ FIELD_PREP(FBNIC_RPC_RMI_CONFIG_MTU, FBNIC_MAX_JUMBO_FRAME_SIZE) |
+ FIELD_PREP(FBNIC_RPC_RMI_CONFIG_OH_BYTES, 20) |
+ FBNIC_RPC_RMI_CONFIG_ENABLE);
+}
+
+void fbnic_bmc_rpc_all_multi_config(struct fbnic_dev *fbd,
+ bool enable_host)
+{
+ struct fbnic_act_tcam *act_tcam;
+ struct fbnic_mac_addr *mac_addr;
+ int j;
+
+ /* We need to add the all multicast filter at the end of the
+ * multicast address list. This way if there are any that are
+ * shared between the host and the BMC they can be directed to
+ * both. Otherwise the remainder just get sent directly to the
+ * BMC.
+ */
+ mac_addr = &fbd->mac_addr[fbd->mac_addr_boundary - 1];
+ if (fbnic_bmc_present(fbd) && fbd->fw_cap.all_multi) {
+ if (mac_addr->state != FBNIC_TCAM_S_VALID) {
+ eth_zero_addr(mac_addr->value.addr8);
+ eth_broadcast_addr(mac_addr->mask.addr8);
+ mac_addr->value.addr8[0] ^= 1;
+ mac_addr->mask.addr8[0] ^= 1;
+ set_bit(FBNIC_MAC_ADDR_T_BMC, mac_addr->act_tcam);
+ mac_addr->state = FBNIC_TCAM_S_ADD;
+ }
+ if (enable_host)
+ set_bit(FBNIC_MAC_ADDR_T_ALLMULTI,
+ mac_addr->act_tcam);
+ else
+ clear_bit(FBNIC_MAC_ADDR_T_ALLMULTI,
+ mac_addr->act_tcam);
+ } else if (!test_bit(FBNIC_MAC_ADDR_T_BMC, mac_addr->act_tcam) &&
+ !is_zero_ether_addr(mac_addr->mask.addr8) &&
+ mac_addr->state == FBNIC_TCAM_S_VALID) {
+ clear_bit(FBNIC_MAC_ADDR_T_ALLMULTI, mac_addr->act_tcam);
+ clear_bit(FBNIC_MAC_ADDR_T_BMC, mac_addr->act_tcam);
+ mac_addr->state = FBNIC_TCAM_S_DELETE;
+ }
+
+ /* We have to add a special handler for multicast as the
+ * BMC may have an all-multi rule already in place. As such
+ * adding a rule ourselves won't do any good so we will have
+ * to modify the rules for the ALL MULTI below if the BMC
+ * already has the rule in place.
+ */
+ act_tcam = &fbd->act_tcam[FBNIC_RPC_ACT_TBL_BMC_ALL_MULTI_OFFSET];
+
+ /* If we are not enabling the rule just delete it. We will fall
+ * back to the RSS rules that support the multicast addresses.
+ */
+ if (!fbnic_bmc_present(fbd) || !fbd->fw_cap.all_multi || enable_host) {
+ if (act_tcam->state == FBNIC_TCAM_S_VALID)
+ act_tcam->state = FBNIC_TCAM_S_DELETE;
+ return;
+ }
+
+ /* Rewrite TCAM rule 23 to handle BMC all-multi traffic */
+ act_tcam->dest = FIELD_PREP(FBNIC_RPC_ACT_TBL0_DEST_MASK,
+ FBNIC_RPC_ACT_TBL0_DEST_BMC);
+ act_tcam->mask.tcam[0] = 0xffff;
+
+ /* MACDA 0 - 3 is reserved for the BMC MAC address */
+ act_tcam->value.tcam[1] =
+ FIELD_PREP(FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX,
+ fbd->mac_addr_boundary - 1) |
+ FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID;
+ act_tcam->mask.tcam[1] = 0xffff &
+ ~FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX &
+ ~FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID;
+
+ for (j = 2; j < FBNIC_RPC_TCAM_ACT_WORD_LEN; j++)
+ act_tcam->mask.tcam[j] = 0xffff;
+
+ act_tcam->state = FBNIC_TCAM_S_UPDATE;
+}
+
+void fbnic_bmc_rpc_init(struct fbnic_dev *fbd)
+{
+ int i = FBNIC_RPC_TCAM_MACDA_BMC_ADDR_IDX;
+ struct fbnic_act_tcam *act_tcam;
+ struct fbnic_mac_addr *mac_addr;
+ int j;
+
+ /* Check if BMC is present */
+ if (!fbnic_bmc_present(fbd))
+ return;
+
+ /* Fetch BMC MAC addresses from firmware capabilities */
+ for (j = 0; j < 4; j++) {
+ u8 *bmc_mac = fbd->fw_cap.bmc_mac_addr[j];
+
+ /* Validate BMC MAC addresses */
+ if (is_zero_ether_addr(bmc_mac))
+ continue;
+
+ if (is_multicast_ether_addr(bmc_mac))
+ mac_addr = __fbnic_mc_sync(fbd, bmc_mac);
+ else
+ mac_addr = &fbd->mac_addr[i++];
+
+ if (!mac_addr) {
+ netdev_err(fbd->netdev,
+ "No slot for BMC MAC address[%d]\n", j);
+ continue;
+ }
+
+ ether_addr_copy(mac_addr->value.addr8, bmc_mac);
+ eth_zero_addr(mac_addr->mask.addr8);
+
+ set_bit(FBNIC_MAC_ADDR_T_BMC, mac_addr->act_tcam);
+ mac_addr->state = FBNIC_TCAM_S_ADD;
+ }
+
+ /* Validate Broadcast is also present, record it and tag it */
+ mac_addr = &fbd->mac_addr[FBNIC_RPC_TCAM_MACDA_BROADCAST_IDX];
+ eth_broadcast_addr(mac_addr->value.addr8);
+ set_bit(FBNIC_MAC_ADDR_T_BMC, mac_addr->act_tcam);
+ mac_addr->state = FBNIC_TCAM_S_ADD;
+
+ /* Rewrite TCAM rule 0 if it isn't present to relocate BMC rules */
+ act_tcam = &fbd->act_tcam[FBNIC_RPC_ACT_TBL_BMC_OFFSET];
+ act_tcam->dest = FIELD_PREP(FBNIC_RPC_ACT_TBL0_DEST_MASK,
+ FBNIC_RPC_ACT_TBL0_DEST_BMC);
+ act_tcam->mask.tcam[0] = 0xffff;
+
+ /* MACDA 0 - 3 is reserved for the BMC MAC address
+ * to account for that we have to mask out the lower 2 bits
+ * of the macda by performing an &= with 0x1c.
+ */
+ act_tcam->value.tcam[1] = FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID;
+ act_tcam->mask.tcam[1] = 0xffff &
+ ~FIELD_PREP(FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX, 0x1c) &
+ ~FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID;
+
+ for (j = 2; j < FBNIC_RPC_TCAM_ACT_WORD_LEN; j++)
+ act_tcam->mask.tcam[j] = 0xffff;
+
+ act_tcam->state = FBNIC_TCAM_S_UPDATE;
+
+ fbnic_bmc_rpc_all_multi_config(fbd, false);
+}
+
+#define FBNIC_ACT1_INIT(_l4, _udp, _ip, _v6) \
+ (((_l4) ? FBNIC_RPC_TCAM_ACT1_L4_VALID : 0) | \
+ ((_udp) ? FBNIC_RPC_TCAM_ACT1_L4_IS_UDP : 0) | \
+ ((_ip) ? FBNIC_RPC_TCAM_ACT1_IP_VALID : 0) | \
+ ((_v6) ? FBNIC_RPC_TCAM_ACT1_IP_IS_V6 : 0))
+
+void fbnic_rss_reinit(struct fbnic_dev *fbd, struct fbnic_net *fbn)
+{
+ static const u32 act1_value[FBNIC_NUM_HASH_OPT] = {
+ FBNIC_ACT1_INIT(1, 1, 1, 1), /* UDP6 */
+ FBNIC_ACT1_INIT(1, 1, 1, 0), /* UDP4 */
+ FBNIC_ACT1_INIT(1, 0, 1, 1), /* TCP6 */
+ FBNIC_ACT1_INIT(1, 0, 1, 0), /* TCP4 */
+ FBNIC_ACT1_INIT(0, 0, 1, 1), /* IP6 */
+ FBNIC_ACT1_INIT(0, 0, 1, 0), /* IP4 */
+ 0 /* Ether */
+ };
+ unsigned int i;
+
+ /* To support scenarios where a BMC is present we must write the
+ * rules twice, once for the unicast cases, and once again for
+ * the broadcast/multicast cases as we have to support 2 destinations.
+ */
+ BUILD_BUG_ON(FBNIC_RSS_EN_NUM_UNICAST * 2 != FBNIC_RSS_EN_NUM_ENTRIES);
+ BUILD_BUG_ON(ARRAY_SIZE(act1_value) != FBNIC_NUM_HASH_OPT);
+
+ /* Program RSS hash enable mask for host in action TCAM/table. */
+ for (i = fbnic_bmc_present(fbd) ? 0 : FBNIC_RSS_EN_NUM_UNICAST;
+ i < FBNIC_RSS_EN_NUM_ENTRIES; i++) {
+ unsigned int idx = i + FBNIC_RPC_ACT_TBL_RSS_OFFSET;
+ struct fbnic_act_tcam *act_tcam = &fbd->act_tcam[idx];
+ u32 flow_hash, dest, rss_en_mask;
+ int flow_type, j;
+ u16 value = 0;
+
+ flow_type = i % FBNIC_RSS_EN_NUM_UNICAST;
+ flow_hash = fbn->rss_flow_hash[flow_type];
+
+ /* Set DEST_HOST based on absence of RXH_DISCARD */
+ dest = FIELD_PREP(FBNIC_RPC_ACT_TBL0_DEST_MASK,
+ !(RXH_DISCARD & flow_hash) ?
+ FBNIC_RPC_ACT_TBL0_DEST_HOST : 0);
+
+ if (i >= FBNIC_RSS_EN_NUM_UNICAST && fbnic_bmc_present(fbd))
+ dest |= FIELD_PREP(FBNIC_RPC_ACT_TBL0_DEST_MASK,
+ FBNIC_RPC_ACT_TBL0_DEST_BMC);
+
+ if (!dest)
+ dest = FBNIC_RPC_ACT_TBL0_DROP;
+
+ if (act1_value[flow_type] & FBNIC_RPC_TCAM_ACT1_L4_VALID)
+ dest |= FIELD_PREP(FBNIC_RPC_ACT_TBL0_DMA_HINT,
+ FBNIC_RCD_HDR_AL_DMA_HINT_L4);
+
+ rss_en_mask = fbnic_flow_hash_2_rss_en_mask(fbn, flow_type);
+
+ act_tcam->dest = dest;
+ act_tcam->rss_en_mask = rss_en_mask;
+ act_tcam->state = FBNIC_TCAM_S_UPDATE;
+
+ act_tcam->mask.tcam[0] = 0xffff;
+
+ /* We reserve the upper 8 MACDA TCAM entries for host
+ * unicast. So we set the value to 24, and the mask the
+ * lower bits so that the lower entries can be used as
+ * multicast or BMC addresses.
+ */
+ if (i < FBNIC_RSS_EN_NUM_UNICAST)
+ value = FIELD_PREP(FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX,
+ fbd->mac_addr_boundary);
+ value |= FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID;
+
+ flow_type = i % FBNIC_RSS_EN_NUM_UNICAST;
+ value |= act1_value[flow_type];
+
+ act_tcam->value.tcam[1] = value;
+ act_tcam->mask.tcam[1] = ~value;
+
+ for (j = 2; j < FBNIC_RPC_TCAM_ACT_WORD_LEN; j++)
+ act_tcam->mask.tcam[j] = 0xffff;
+
+ act_tcam->state = FBNIC_TCAM_S_UPDATE;
+ }
+}
+
+struct fbnic_mac_addr *__fbnic_uc_sync(struct fbnic_dev *fbd,
+ const unsigned char *addr)
+{
+ struct fbnic_mac_addr *avail_addr = NULL;
+ unsigned int i;
+
+ /* Scan from middle of list to bottom, filling bottom up.
+ * Skip the first entry which is reserved for dev_addr and
+ * leave the last entry to use for promiscuous filtering.
+ */
+ for (i = fbd->mac_addr_boundary - 1;
+ i < FBNIC_RPC_TCAM_MACDA_HOST_ADDR_IDX; i++) {
+ struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[i];
+
+ if (mac_addr->state == FBNIC_TCAM_S_DISABLED) {
+ avail_addr = mac_addr;
+ } else if (ether_addr_equal(mac_addr->value.addr8, addr)) {
+ avail_addr = mac_addr;
+ break;
+ }
+ }
+
+ if (avail_addr && avail_addr->state == FBNIC_TCAM_S_DISABLED) {
+ ether_addr_copy(avail_addr->value.addr8, addr);
+ eth_zero_addr(avail_addr->mask.addr8);
+ avail_addr->state = FBNIC_TCAM_S_ADD;
+ }
+
+ return avail_addr;
+}
+
+struct fbnic_mac_addr *__fbnic_mc_sync(struct fbnic_dev *fbd,
+ const unsigned char *addr)
+{
+ struct fbnic_mac_addr *avail_addr = NULL;
+ unsigned int i;
+
+ /* Scan from middle of list to top, filling top down.
+ * Skip over the address reserved for the BMC MAC and
+ * exclude index 0 as that belongs to the broadcast address
+ */
+ for (i = fbd->mac_addr_boundary;
+ --i > FBNIC_RPC_TCAM_MACDA_BROADCAST_IDX;) {
+ struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[i];
+
+ if (mac_addr->state == FBNIC_TCAM_S_DISABLED) {
+ avail_addr = mac_addr;
+ } else if (ether_addr_equal(mac_addr->value.addr8, addr)) {
+ avail_addr = mac_addr;
+ break;
+ }
+ }
+
+ /* Scan the BMC addresses to see if it may have already
+ * reserved the address.
+ */
+ while (--i) {
+ struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[i];
+
+ if (!is_zero_ether_addr(mac_addr->mask.addr8))
+ continue;
+
+ /* Only move on if we find a match */
+ if (!ether_addr_equal(mac_addr->value.addr8, addr))
+ continue;
+
+ /* We need to pull this address to the shared area */
+ if (avail_addr) {
+ memcpy(avail_addr, mac_addr, sizeof(*mac_addr));
+ mac_addr->state = FBNIC_TCAM_S_DELETE;
+ avail_addr->state = FBNIC_TCAM_S_ADD;
+ }
+
+ break;
+ }
+
+ if (avail_addr && avail_addr->state == FBNIC_TCAM_S_DISABLED) {
+ ether_addr_copy(avail_addr->value.addr8, addr);
+ eth_zero_addr(avail_addr->mask.addr8);
+ avail_addr->state = FBNIC_TCAM_S_ADD;
+ }
+
+ return avail_addr;
+}
+
+int __fbnic_xc_unsync(struct fbnic_mac_addr *mac_addr, unsigned int tcam_idx)
+{
+ if (!test_and_clear_bit(tcam_idx, mac_addr->act_tcam))
+ return -ENOENT;
+
+ if (bitmap_empty(mac_addr->act_tcam, FBNIC_RPC_TCAM_ACT_NUM_ENTRIES))
+ mac_addr->state = FBNIC_TCAM_S_DELETE;
+
+ return 0;
+}
+
+void fbnic_sift_macda(struct fbnic_dev *fbd)
+{
+ int dest, src;
+
+ /* Move BMC only addresses back into BMC region */
+ for (dest = FBNIC_RPC_TCAM_MACDA_BMC_ADDR_IDX,
+ src = FBNIC_RPC_TCAM_MACDA_MULTICAST_IDX;
+ ++dest < FBNIC_RPC_TCAM_MACDA_BROADCAST_IDX &&
+ src < fbd->mac_addr_boundary;) {
+ struct fbnic_mac_addr *dest_addr = &fbd->mac_addr[dest];
+
+ if (dest_addr->state != FBNIC_TCAM_S_DISABLED)
+ continue;
+
+ while (src < fbd->mac_addr_boundary) {
+ struct fbnic_mac_addr *src_addr = &fbd->mac_addr[src++];
+
+ /* Verify BMC bit is set */
+ if (!test_bit(FBNIC_MAC_ADDR_T_BMC, src_addr->act_tcam))
+ continue;
+
+ /* Verify filter isn't already disabled */
+ if (src_addr->state == FBNIC_TCAM_S_DISABLED ||
+ src_addr->state == FBNIC_TCAM_S_DELETE)
+ continue;
+
+ /* Verify only BMC bit is set */
+ if (bitmap_weight(src_addr->act_tcam,
+ FBNIC_RPC_TCAM_ACT_NUM_ENTRIES) != 1)
+ continue;
+
+ /* Verify we are not moving wildcard address */
+ if (!is_zero_ether_addr(src_addr->mask.addr8))
+ continue;
+
+ memcpy(dest_addr, src_addr, sizeof(*src_addr));
+ src_addr->state = FBNIC_TCAM_S_DELETE;
+ dest_addr->state = FBNIC_TCAM_S_ADD;
+ }
+ }
+}
+
+static void fbnic_clear_macda_entry(struct fbnic_dev *fbd, unsigned int idx)
+{
+ int i;
+
+ /* Invalidate entry and clear addr state info */
+ for (i = 0; i <= FBNIC_RPC_TCAM_MACDA_WORD_LEN; i++)
+ wr32(fbd, FBNIC_RPC_TCAM_MACDA(idx, i), 0);
+}
+
+static void fbnic_clear_macda(struct fbnic_dev *fbd)
+{
+ int idx;
+
+ for (idx = ARRAY_SIZE(fbd->mac_addr); idx--;) {
+ struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[idx];
+
+ if (mac_addr->state == FBNIC_TCAM_S_DISABLED)
+ continue;
+
+ if (test_bit(FBNIC_MAC_ADDR_T_BMC, mac_addr->act_tcam)) {
+ if (fbnic_bmc_present(fbd))
+ continue;
+ dev_warn_once(fbd->dev,
+ "Found BMC MAC address w/ BMC not present\n");
+ }
+
+ fbnic_clear_macda_entry(fbd, idx);
+
+ /* If rule was already destined for deletion just wipe it now */
+ if (mac_addr->state == FBNIC_TCAM_S_DELETE) {
+ memset(mac_addr, 0, sizeof(*mac_addr));
+ continue;
+ }
+
+ /* Change state to update so that we will rewrite
+ * this tcam the next time fbnic_write_macda is called.
+ */
+ mac_addr->state = FBNIC_TCAM_S_UPDATE;
+ }
+}
+
+static void fbnic_write_macda_entry(struct fbnic_dev *fbd, unsigned int idx,
+ struct fbnic_mac_addr *mac_addr)
+{
+ __be16 *mask, *value;
+ int i;
+
+ mask = &mac_addr->mask.addr16[FBNIC_RPC_TCAM_MACDA_WORD_LEN - 1];
+ value = &mac_addr->value.addr16[FBNIC_RPC_TCAM_MACDA_WORD_LEN - 1];
+
+ for (i = 0; i < FBNIC_RPC_TCAM_MACDA_WORD_LEN; i++)
+ wr32(fbd, FBNIC_RPC_TCAM_MACDA(idx, i),
+ FIELD_PREP(FBNIC_RPC_TCAM_MACDA_MASK, ntohs(*mask--)) |
+ FIELD_PREP(FBNIC_RPC_TCAM_MACDA_VALUE, ntohs(*value--)));
+
+ wrfl(fbd);
+
+ wr32(fbd, FBNIC_RPC_TCAM_MACDA(idx, i), FBNIC_RPC_TCAM_VALIDATE);
+}
+
+void fbnic_write_macda(struct fbnic_dev *fbd)
+{
+ int idx;
+
+ for (idx = ARRAY_SIZE(fbd->mac_addr); idx--;) {
+ struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[idx];
+
+ /* Check if update flag is set else exit. */
+ if (!(mac_addr->state & FBNIC_TCAM_S_UPDATE))
+ continue;
+
+ /* Clear by writing 0s. */
+ if (mac_addr->state == FBNIC_TCAM_S_DELETE) {
+ /* Invalidate entry and clear addr state info */
+ fbnic_clear_macda_entry(fbd, idx);
+ memset(mac_addr, 0, sizeof(*mac_addr));
+
+ continue;
+ }
+
+ fbnic_write_macda_entry(fbd, idx, mac_addr);
+
+ mac_addr->state = FBNIC_TCAM_S_VALID;
+ }
+}
+
+static void fbnic_clear_act_tcam(struct fbnic_dev *fbd, unsigned int idx)
+{
+ int i;
+
+ /* Invalidate entry and clear addr state info */
+ for (i = 0; i <= FBNIC_RPC_TCAM_ACT_WORD_LEN; i++)
+ wr32(fbd, FBNIC_RPC_TCAM_ACT(idx, i), 0);
+}
+
+void fbnic_clear_rules(struct fbnic_dev *fbd)
+{
+ u32 dest = FIELD_PREP(FBNIC_RPC_ACT_TBL0_DEST_MASK,
+ FBNIC_RPC_ACT_TBL0_DEST_BMC);
+ int i = FBNIC_RPC_TCAM_ACT_NUM_ENTRIES - 1;
+ struct fbnic_act_tcam *act_tcam;
+
+ /* Clear MAC rules */
+ fbnic_clear_macda(fbd);
+
+ /* If BMC is present we need to preserve the last rule which
+ * will be used to route traffic to the BMC if it is received.
+ *
+ * At this point it should be the only MAC address in the MACDA
+ * so any unicast or multicast traffic received should be routed
+ * to it. So leave the last rule in place.
+ *
+ * It will be rewritten to add the host again when we bring
+ * the interface back up.
+ */
+ if (fbnic_bmc_present(fbd)) {
+ act_tcam = &fbd->act_tcam[i];
+
+ if (act_tcam->state == FBNIC_TCAM_S_VALID &&
+ (act_tcam->dest & dest)) {
+ wr32(fbd, FBNIC_RPC_ACT_TBL0(i), dest);
+ wr32(fbd, FBNIC_RPC_ACT_TBL1(i), 0);
+
+ act_tcam->state = FBNIC_TCAM_S_UPDATE;
+
+ i--;
+ }
+ }
+
+ /* Work from the bottom up deleting all other rules from hardware */
+ do {
+ act_tcam = &fbd->act_tcam[i];
+
+ if (act_tcam->state != FBNIC_TCAM_S_VALID)
+ continue;
+
+ fbnic_clear_act_tcam(fbd, i);
+ act_tcam->state = FBNIC_TCAM_S_UPDATE;
+ } while (i--);
+}
+
+static void fbnic_delete_act_tcam(struct fbnic_dev *fbd, unsigned int idx)
+{
+ fbnic_clear_act_tcam(fbd, idx);
+ memset(&fbd->act_tcam[idx], 0, sizeof(struct fbnic_act_tcam));
+}
+
+static void fbnic_update_act_tcam(struct fbnic_dev *fbd, unsigned int idx)
+{
+ struct fbnic_act_tcam *act_tcam = &fbd->act_tcam[idx];
+ int i;
+
+ /* Update entry by writing the destination and RSS mask */
+ wr32(fbd, FBNIC_RPC_ACT_TBL0(idx), act_tcam->dest);
+ wr32(fbd, FBNIC_RPC_ACT_TBL1(idx), act_tcam->rss_en_mask);
+
+ /* Write new TCAM rule to hardware */
+ for (i = 0; i < FBNIC_RPC_TCAM_ACT_WORD_LEN; i++)
+ wr32(fbd, FBNIC_RPC_TCAM_ACT(idx, i),
+ FIELD_PREP(FBNIC_RPC_TCAM_ACT_MASK,
+ act_tcam->mask.tcam[i]) |
+ FIELD_PREP(FBNIC_RPC_TCAM_ACT_VALUE,
+ act_tcam->value.tcam[i]));
+
+ wrfl(fbd);
+
+ wr32(fbd, FBNIC_RPC_TCAM_ACT(idx, i), FBNIC_RPC_TCAM_VALIDATE);
+ act_tcam->state = FBNIC_TCAM_S_VALID;
+}
+
+void fbnic_write_rules(struct fbnic_dev *fbd)
+{
+ int i;
+
+ /* Flush any pending action table rules */
+ for (i = 0; i < FBNIC_RPC_ACT_TBL_NUM_ENTRIES; i++) {
+ struct fbnic_act_tcam *act_tcam = &fbd->act_tcam[i];
+
+ /* Check if update flag is set else exit. */
+ if (!(act_tcam->state & FBNIC_TCAM_S_UPDATE))
+ continue;
+
+ if (act_tcam->state == FBNIC_TCAM_S_DELETE)
+ fbnic_delete_act_tcam(fbd, i);
+ else
+ fbnic_update_act_tcam(fbd, i);
+ }
+}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_rpc.h b/drivers/net/ethernet/meta/fbnic/fbnic_rpc.h
new file mode 100644
index 000000000000..d62935f722a2
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_rpc.h
@@ -0,0 +1,189 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#ifndef _FBNIC_RPC_H_
+#define _FBNIC_RPC_H_
+
+#include <uapi/linux/in6.h>
+#include <linux/bitfield.h>
+
+/* The TCAM state definitions follow an expected ordering.
+ * They start out disabled, then move through the following states:
+ * Disabled 0 -> Add 2
+ * Add 2 -> Valid 1
+ *
+ * Valid 1 -> Add/Update 2
+ * Add 2 -> Valid 1
+ *
+ * Valid 1 -> Delete 3
+ * Delete 3 -> Disabled 0
+ */
+enum {
+ FBNIC_TCAM_S_DISABLED = 0,
+ FBNIC_TCAM_S_VALID = 1,
+ FBNIC_TCAM_S_ADD = 2,
+ FBNIC_TCAM_S_UPDATE = FBNIC_TCAM_S_ADD,
+ FBNIC_TCAM_S_DELETE = 3,
+};
+
+/* 32 MAC Destination Address TCAM Entries
+ * 4 registers DA[1:0], DA[3:2], DA[5:4], Validate
+ */
+#define FBNIC_RPC_TCAM_MACDA_WORD_LEN 3
+#define FBNIC_RPC_TCAM_MACDA_NUM_ENTRIES 32
+
+#define FBNIC_RPC_TCAM_ACT_WORD_LEN 11
+#define FBNIC_RPC_TCAM_ACT_NUM_ENTRIES 64
+
+struct fbnic_mac_addr {
+ union {
+ unsigned char addr8[ETH_ALEN];
+ __be16 addr16[FBNIC_RPC_TCAM_MACDA_WORD_LEN];
+ } mask, value;
+ unsigned char state;
+ DECLARE_BITMAP(act_tcam, FBNIC_RPC_TCAM_ACT_NUM_ENTRIES);
+};
+
+struct fbnic_act_tcam {
+ struct {
+ u16 tcam[FBNIC_RPC_TCAM_ACT_WORD_LEN];
+ } mask, value;
+ unsigned char state;
+ u16 rss_en_mask;
+ u32 dest;
+};
+
+enum {
+ FBNIC_RSS_EN_HOST_UDP6,
+ FBNIC_RSS_EN_HOST_UDP4,
+ FBNIC_RSS_EN_HOST_TCP6,
+ FBNIC_RSS_EN_HOST_TCP4,
+ FBNIC_RSS_EN_HOST_IP6,
+ FBNIC_RSS_EN_HOST_IP4,
+ FBNIC_RSS_EN_HOST_ETHER,
+ FBNIC_RSS_EN_XCAST_UDP6,
+#define FBNIC_RSS_EN_NUM_UNICAST FBNIC_RSS_EN_XCAST_UDP6
+ FBNIC_RSS_EN_XCAST_UDP4,
+ FBNIC_RSS_EN_XCAST_TCP6,
+ FBNIC_RSS_EN_XCAST_TCP4,
+ FBNIC_RSS_EN_XCAST_IP6,
+ FBNIC_RSS_EN_XCAST_IP4,
+ FBNIC_RSS_EN_XCAST_ETHER,
+ FBNIC_RSS_EN_NUM_ENTRIES
+};
+
+/* Reserve the first 2 entries for the use by the BMC so that we can
+ * avoid allowing rules to get in the way of BMC unicast traffic.
+ */
+#define FBNIC_RPC_ACT_TBL_BMC_OFFSET 0
+#define FBNIC_RPC_ACT_TBL_BMC_ALL_MULTI_OFFSET 1
+
+/* We reserve the last 14 entries for RSS rules on the host. The BMC
+ * unicast rule will need to be populated above these and is expected to
+ * use MACDA TCAM entry 23 to store the BMC MAC address.
+ */
+#define FBNIC_RPC_ACT_TBL_RSS_OFFSET \
+ (FBNIC_RPC_ACT_TBL_NUM_ENTRIES - FBNIC_RSS_EN_NUM_ENTRIES)
+
+/* Flags used to identify the owner for this MAC filter. Note that any
+ * flags set for Broadcast thru Promisc indicate that the rule belongs
+ * to the RSS filters for the host.
+ */
+enum {
+ FBNIC_MAC_ADDR_T_BMC = 0,
+ FBNIC_MAC_ADDR_T_BROADCAST = FBNIC_RPC_ACT_TBL_RSS_OFFSET,
+#define FBNIC_MAC_ADDR_T_HOST_START FBNIC_MAC_ADDR_T_BROADCAST
+ FBNIC_MAC_ADDR_T_MULTICAST,
+ FBNIC_MAC_ADDR_T_UNICAST,
+ FBNIC_MAC_ADDR_T_ALLMULTI, /* BROADCAST ... MULTICAST*/
+ FBNIC_MAC_ADDR_T_PROMISC, /* BROADCAST ... UNICAST */
+ FBNIC_MAC_ADDR_T_HOST_LAST
+};
+
+#define FBNIC_MAC_ADDR_T_HOST_LEN \
+ (FBNIC_MAC_ADDR_T_HOST_LAST - FBNIC_MAC_ADDR_T_HOST_START)
+
+#define FBNIC_RPC_TCAM_ACT0_IPSRC_IDX CSR_GENMASK(2, 0)
+#define FBNIC_RPC_TCAM_ACT0_IPSRC_VALID CSR_BIT(3)
+#define FBNIC_RPC_TCAM_ACT0_IPDST_IDX CSR_GENMASK(6, 4)
+#define FBNIC_RPC_TCAM_ACT0_IPDST_VALID CSR_BIT(7)
+#define FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_IDX CSR_GENMASK(10, 8)
+#define FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_VALID CSR_BIT(11)
+#define FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_IDX CSR_GENMASK(14, 12)
+#define FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_VALID CSR_BIT(15)
+
+#define FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX CSR_GENMASK(9, 5)
+#define FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID CSR_BIT(10)
+#define FBNIC_RPC_TCAM_ACT1_IP_IS_V6 CSR_BIT(11)
+#define FBNIC_RPC_TCAM_ACT1_IP_VALID CSR_BIT(12)
+#define FBNIC_RPC_TCAM_ACT1_OUTER_IP_VALID CSR_BIT(13)
+#define FBNIC_RPC_TCAM_ACT1_L4_IS_UDP CSR_BIT(14)
+#define FBNIC_RPC_TCAM_ACT1_L4_VALID CSR_BIT(15)
+
+/* TCAM 0 - 3 reserved for BMC MAC addresses */
+#define FBNIC_RPC_TCAM_MACDA_BMC_ADDR_IDX 0
+/* TCAM 4 reserved for broadcast MAC address */
+#define FBNIC_RPC_TCAM_MACDA_BROADCAST_IDX 4
+/* TCAMs 5 - 30 will be used for multicast and unicast addresses. The
+ * boundary between the two can be variable it is currently set to 24
+ * on which the unicast addresses start. The general idea is that we will
+ * always go top-down with unicast, and bottom-up with multicast so that
+ * there should be free-space in the middle between the two.
+ *
+ * The entry at MADCA_DEFAULT_BOUNDARY is a special case as it can be used
+ * for the ALL MULTI address if the list is full, or the BMC has requested
+ * it.
+ */
+#define FBNIC_RPC_TCAM_MACDA_MULTICAST_IDX 5
+#define FBNIC_RPC_TCAM_MACDA_DEFAULT_BOUNDARY 24
+#define FBNIC_RPC_TCAM_MACDA_HOST_ADDR_IDX 30
+/* Reserved for use to record Multicast promisc, or Promiscuous */
+#define FBNIC_RPC_TCAM_MACDA_PROMISC_IDX 31
+
+enum {
+ FBNIC_UDP6_HASH_OPT,
+ FBNIC_UDP4_HASH_OPT,
+ FBNIC_TCP6_HASH_OPT,
+ FBNIC_TCP4_HASH_OPT,
+#define FBNIC_L4_HASH_OPT FBNIC_TCP4_HASH_OPT
+ FBNIC_IPV6_HASH_OPT,
+ FBNIC_IPV4_HASH_OPT,
+#define FBNIC_IP_HASH_OPT FBNIC_IPV4_HASH_OPT
+ FBNIC_ETHER_HASH_OPT,
+ FBNIC_NUM_HASH_OPT,
+};
+
+struct fbnic_dev;
+struct fbnic_net;
+
+void fbnic_bmc_rpc_init(struct fbnic_dev *fbd);
+void fbnic_bmc_rpc_all_multi_config(struct fbnic_dev *fbd, bool enable_host);
+
+void fbnic_reset_indir_tbl(struct fbnic_net *fbn);
+void fbnic_rss_key_fill(u32 *buffer);
+void fbnic_rss_init_en_mask(struct fbnic_net *fbn);
+void fbnic_rss_disable_hw(struct fbnic_dev *fbd);
+void fbnic_rss_reinit_hw(struct fbnic_dev *fbd, struct fbnic_net *fbn);
+void fbnic_rss_reinit(struct fbnic_dev *fbd, struct fbnic_net *fbn);
+
+int __fbnic_xc_unsync(struct fbnic_mac_addr *mac_addr, unsigned int tcam_idx);
+struct fbnic_mac_addr *__fbnic_uc_sync(struct fbnic_dev *fbd,
+ const unsigned char *addr);
+struct fbnic_mac_addr *__fbnic_mc_sync(struct fbnic_dev *fbd,
+ const unsigned char *addr);
+void fbnic_sift_macda(struct fbnic_dev *fbd);
+void fbnic_write_macda(struct fbnic_dev *fbd);
+
+static inline int __fbnic_uc_unsync(struct fbnic_mac_addr *mac_addr)
+{
+ return __fbnic_xc_unsync(mac_addr, FBNIC_MAC_ADDR_T_UNICAST);
+}
+
+static inline int __fbnic_mc_unsync(struct fbnic_mac_addr *mac_addr)
+{
+ return __fbnic_xc_unsync(mac_addr, FBNIC_MAC_ADDR_T_MULTICAST);
+}
+
+void fbnic_clear_rules(struct fbnic_dev *fbd);
+void fbnic_write_rules(struct fbnic_dev *fbd);
+#endif /* _FBNIC_RPC_H_ */
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_tlv.c b/drivers/net/ethernet/meta/fbnic/fbnic_tlv.c
new file mode 100644
index 000000000000..2a174ab062a3
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_tlv.c
@@ -0,0 +1,529 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#include <linux/gfp.h>
+#include <linux/mm.h>
+#include <linux/once.h>
+#include <linux/random.h>
+#include <linux/string.h>
+#include <uapi/linux/if_ether.h>
+
+#include "fbnic_tlv.h"
+
+/**
+ * fbnic_tlv_msg_alloc - Allocate page and initialize FW message header
+ * @msg_id: Identifier for new message we are starting
+ *
+ * Return: pointer to start of message, or NULL on failure.
+ *
+ * Allocates a page and initializes message header at start of page.
+ * Initial message size is 1 DWORD which is just the header.
+ **/
+struct fbnic_tlv_msg *fbnic_tlv_msg_alloc(u16 msg_id)
+{
+ struct fbnic_tlv_hdr hdr = { 0 };
+ struct fbnic_tlv_msg *msg;
+
+ msg = (struct fbnic_tlv_msg *)__get_free_page(GFP_KERNEL);
+ if (!msg)
+ return NULL;
+
+ /* Start with zero filled header and then back fill with data */
+ hdr.type = msg_id;
+ hdr.is_msg = 1;
+ hdr.len = cpu_to_le16(1);
+
+ /* Copy header into start of message */
+ msg->hdr = hdr;
+
+ return msg;
+}
+
+/**
+ * fbnic_tlv_attr_put_flag - Add flag value to message
+ * @msg: Message header we are adding flag attribute to
+ * @attr_id: ID of flag attribute we are adding to message
+ *
+ * Return: -ENOSPC if there is no room for the attribute. Otherwise 0.
+ *
+ * Adds a 1 DWORD flag attribute to the message. The presence of this
+ * attribute can be used as a boolean value indicating true, otherwise the
+ * value is considered false.
+ **/
+int fbnic_tlv_attr_put_flag(struct fbnic_tlv_msg *msg, const u16 attr_id)
+{
+ int attr_max_len = PAGE_SIZE - offset_in_page(msg) - sizeof(*msg);
+ struct fbnic_tlv_hdr hdr = { 0 };
+ struct fbnic_tlv_msg *attr;
+
+ attr_max_len -= le16_to_cpu(msg->hdr.len) * sizeof(u32);
+ if (attr_max_len < sizeof(*attr))
+ return -ENOSPC;
+
+ /* Get header pointer and bump attr to start of data */
+ attr = &msg[le16_to_cpu(msg->hdr.len)];
+
+ /* Record attribute type and size */
+ hdr.type = attr_id;
+ hdr.len = cpu_to_le16(sizeof(hdr));
+
+ attr->hdr = hdr;
+ le16_add_cpu(&msg->hdr.len,
+ FBNIC_TLV_MSG_SIZE(le16_to_cpu(hdr.len)));
+
+ return 0;
+}
+
+/**
+ * fbnic_tlv_attr_put_value - Add data to message
+ * @msg: Message header we are adding flag attribute to
+ * @attr_id: ID of flag attribute we are adding to message
+ * @value: Pointer to data to be stored
+ * @len: Size of data to be stored.
+ *
+ * Return: -ENOSPC if there is no room for the attribute. Otherwise 0.
+ *
+ * Adds header and copies data pointed to by value into the message. The
+ * result is rounded up to the nearest DWORD for sizing so that the
+ * headers remain aligned.
+ *
+ * The assumption is that the value field is in a format where byte
+ * ordering can be guaranteed such as a byte array or a little endian
+ * format.
+ **/
+int fbnic_tlv_attr_put_value(struct fbnic_tlv_msg *msg, const u16 attr_id,
+ const void *value, const int len)
+{
+ int attr_max_len = PAGE_SIZE - offset_in_page(msg) - sizeof(*msg);
+ struct fbnic_tlv_hdr hdr = { 0 };
+ struct fbnic_tlv_msg *attr;
+
+ attr_max_len -= le16_to_cpu(msg->hdr.len) * sizeof(u32);
+ if (attr_max_len < sizeof(*attr) + len)
+ return -ENOSPC;
+
+ /* Get header pointer and bump attr to start of data */
+ attr = &msg[le16_to_cpu(msg->hdr.len)];
+
+ /* Record attribute type and size */
+ hdr.type = attr_id;
+ hdr.len = cpu_to_le16(sizeof(hdr) + len);
+
+ /* Zero pad end of region to be written if we aren't aligned */
+ if (len % sizeof(hdr))
+ attr->value[len / sizeof(hdr)] = 0;
+
+ /* Copy data over */
+ memcpy(attr->value, value, len);
+
+ attr->hdr = hdr;
+ le16_add_cpu(&msg->hdr.len,
+ FBNIC_TLV_MSG_SIZE(le16_to_cpu(hdr.len)));
+
+ return 0;
+}
+
+/**
+ * __fbnic_tlv_attr_put_int - Add integer to message
+ * @msg: Message header we are adding flag attribute to
+ * @attr_id: ID of flag attribute we are adding to message
+ * @value: Data to be stored
+ * @len: Size of data to be stored, either 4 or 8 bytes.
+ *
+ * Return: -ENOSPC if there is no room for the attribute. Otherwise 0.
+ *
+ * Adds header and copies data pointed to by value into the message. Will
+ * format the data as little endian.
+ **/
+int __fbnic_tlv_attr_put_int(struct fbnic_tlv_msg *msg, const u16 attr_id,
+ s64 value, const int len)
+{
+ __le64 le64_value = cpu_to_le64(value);
+
+ return fbnic_tlv_attr_put_value(msg, attr_id, &le64_value, len);
+}
+
+/**
+ * fbnic_tlv_attr_put_mac_addr - Add mac_addr to message
+ * @msg: Message header we are adding flag attribute to
+ * @attr_id: ID of flag attribute we are adding to message
+ * @mac_addr: Byte pointer to MAC address to be stored
+ *
+ * Return: -ENOSPC if there is no room for the attribute. Otherwise 0.
+ *
+ * Adds header and copies data pointed to by mac_addr into the message. Will
+ * copy the address raw so it will be in big endian with start of MAC
+ * address at start of attribute.
+ **/
+int fbnic_tlv_attr_put_mac_addr(struct fbnic_tlv_msg *msg, const u16 attr_id,
+ const u8 *mac_addr)
+{
+ return fbnic_tlv_attr_put_value(msg, attr_id, mac_addr, ETH_ALEN);
+}
+
+/**
+ * fbnic_tlv_attr_put_string - Add string to message
+ * @msg: Message header we are adding flag attribute to
+ * @attr_id: ID of flag attribute we are adding to message
+ * @string: Byte pointer to null terminated string to be stored
+ *
+ * Return: -ENOSPC if there is no room for the attribute. Otherwise 0.
+ *
+ * Adds header and copies data pointed to by string into the message. Will
+ * copy the address raw so it will be in byte order.
+ **/
+int fbnic_tlv_attr_put_string(struct fbnic_tlv_msg *msg, u16 attr_id,
+ const char *string)
+{
+ int attr_max_len = PAGE_SIZE - sizeof(*msg);
+ int str_len = 1;
+
+ /* The max length will be message minus existing message and new
+ * attribute header. Since the message is measured in DWORDs we have
+ * to multiply the size by 4.
+ *
+ * The string length doesn't include the \0 so we have to add one to
+ * the final value, so start with that as our initial value.
+ *
+ * We will verify if the string will fit in fbnic_tlv_attr_put_value()
+ */
+ attr_max_len -= le16_to_cpu(msg->hdr.len) * sizeof(u32);
+ str_len += strnlen(string, attr_max_len);
+
+ return fbnic_tlv_attr_put_value(msg, attr_id, string, str_len);
+}
+
+/**
+ * fbnic_tlv_attr_get_unsigned - Retrieve unsigned value from result
+ * @attr: Attribute to retrieve data from
+ *
+ * Return: unsigned 64b value containing integer value
+ **/
+u64 fbnic_tlv_attr_get_unsigned(struct fbnic_tlv_msg *attr)
+{
+ __le64 le64_value = 0;
+
+ memcpy(&le64_value, &attr->value[0],
+ le16_to_cpu(attr->hdr.len) - sizeof(*attr));
+
+ return le64_to_cpu(le64_value);
+}
+
+/**
+ * fbnic_tlv_attr_get_signed - Retrieve signed value from result
+ * @attr: Attribute to retrieve data from
+ *
+ * Return: signed 64b value containing integer value
+ **/
+s64 fbnic_tlv_attr_get_signed(struct fbnic_tlv_msg *attr)
+{
+ int shift = (8 + sizeof(*attr) - le16_to_cpu(attr->hdr.len)) * 8;
+ __le64 le64_value = 0;
+ s64 value;
+
+ /* Copy the value and adjust for byte ordering */
+ memcpy(&le64_value, &attr->value[0],
+ le16_to_cpu(attr->hdr.len) - sizeof(*attr));
+ value = le64_to_cpu(le64_value);
+
+ /* Sign extend the return value by using a pair of shifts */
+ return (value << shift) >> shift;
+}
+
+/**
+ * fbnic_tlv_attr_get_string - Retrieve string value from result
+ * @attr: Attribute to retrieve data from
+ * @str: Pointer to an allocated string to store the data
+ * @max_size: The maximum size which can be in str
+ *
+ * Return: the size of the string read from firmware
+ **/
+size_t fbnic_tlv_attr_get_string(struct fbnic_tlv_msg *attr, char *str,
+ size_t max_size)
+{
+ max_size = min_t(size_t, max_size,
+ (le16_to_cpu(attr->hdr.len) * 4) - sizeof(*attr));
+ memcpy(str, &attr->value, max_size);
+
+ return max_size;
+}
+
+/**
+ * fbnic_tlv_attr_nest_start - Add nested attribute header to message
+ * @msg: Message header we are adding flag attribute to
+ * @attr_id: ID of flag attribute we are adding to message
+ *
+ * Return: NULL if there is no room for the attribute. Otherwise a pointer
+ * to the new attribute header.
+ *
+ * New header length is stored initially in DWORDs.
+ **/
+struct fbnic_tlv_msg *fbnic_tlv_attr_nest_start(struct fbnic_tlv_msg *msg,
+ u16 attr_id)
+{
+ int attr_max_len = PAGE_SIZE - offset_in_page(msg) - sizeof(*msg);
+ struct fbnic_tlv_msg *attr = &msg[le16_to_cpu(msg->hdr.len)];
+ struct fbnic_tlv_hdr hdr = { 0 };
+
+ /* Make sure we have space for at least the nest header plus one more */
+ attr_max_len -= le16_to_cpu(msg->hdr.len) * sizeof(u32);
+ if (attr_max_len < sizeof(*attr) * 2)
+ return NULL;
+
+ /* Record attribute type and size */
+ hdr.type = attr_id;
+
+ /* Add current message length to account for consumption within the
+ * page and leave it as a multiple of DWORDs, we will shift to
+ * bytes when we close it out.
+ */
+ hdr.len = cpu_to_le16(1);
+
+ attr->hdr = hdr;
+
+ return attr;
+}
+
+/**
+ * fbnic_tlv_attr_nest_stop - Close out nested attribute and add it to message
+ * @msg: Message header we are adding flag attribute to
+ *
+ * Closes out nested attribute, adds length to message, and then bumps
+ * length from DWORDs to bytes to match other attributes.
+ **/
+void fbnic_tlv_attr_nest_stop(struct fbnic_tlv_msg *msg)
+{
+ struct fbnic_tlv_msg *attr = &msg[le16_to_cpu(msg->hdr.len)];
+ u16 len = le16_to_cpu(attr->hdr.len);
+
+ /* Add attribute to message if there is more than just a header */
+ if (len <= 1)
+ return;
+
+ le16_add_cpu(&msg->hdr.len, len);
+
+ /* Convert from DWORDs to bytes */
+ attr->hdr.len = cpu_to_le16(len * sizeof(u32));
+}
+
+static int
+fbnic_tlv_attr_validate(struct fbnic_tlv_msg *attr,
+ const struct fbnic_tlv_index *tlv_index)
+{
+ u16 len = le16_to_cpu(attr->hdr.len) - sizeof(*attr);
+ u16 attr_id = attr->hdr.type;
+ __le32 *value = &attr->value[0];
+
+ if (attr->hdr.is_msg)
+ return -EINVAL;
+
+ if (attr_id >= FBNIC_TLV_RESULTS_MAX)
+ return -EINVAL;
+
+ while (tlv_index->id != attr_id) {
+ if (tlv_index->id == FBNIC_TLV_ATTR_ID_UNKNOWN) {
+ if (attr->hdr.cannot_ignore)
+ return -ENOENT;
+ return le16_to_cpu(attr->hdr.len);
+ }
+
+ tlv_index++;
+ }
+
+ if (offset_in_page(attr) + len > PAGE_SIZE - sizeof(*attr))
+ return -E2BIG;
+
+ switch (tlv_index->type) {
+ case FBNIC_TLV_STRING:
+ if (!len || len > tlv_index->len)
+ return -EINVAL;
+ if (((char *)value)[len - 1])
+ return -EINVAL;
+ break;
+ case FBNIC_TLV_FLAG:
+ if (len)
+ return -EINVAL;
+ break;
+ case FBNIC_TLV_UNSIGNED:
+ case FBNIC_TLV_SIGNED:
+ if (tlv_index->len > sizeof(__le64))
+ return -EINVAL;
+ fallthrough;
+ case FBNIC_TLV_BINARY:
+ if (!len || len > tlv_index->len)
+ return -EINVAL;
+ break;
+ case FBNIC_TLV_NESTED:
+ case FBNIC_TLV_ARRAY:
+ if (len % 4)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * fbnic_tlv_attr_parse_array - Parse array of attributes into results array
+ * @attr: Start of attributes in the message
+ * @len: Length of attributes in the message
+ * @results: Array of pointers to store the results of parsing
+ * @tlv_index: List of TLV attributes to be parsed from message
+ * @tlv_attr_id: Specific ID that is repeated in array
+ * @array_len: Number of results to store in results array
+ *
+ * Return: zero on success, or negative value on error.
+ *
+ * Will take a list of attributes and a parser definition and will capture
+ * the results in the results array to have the data extracted later.
+ **/
+int fbnic_tlv_attr_parse_array(struct fbnic_tlv_msg *attr, int len,
+ struct fbnic_tlv_msg **results,
+ const struct fbnic_tlv_index *tlv_index,
+ u16 tlv_attr_id, size_t array_len)
+{
+ int i = 0;
+
+ /* Initialize results table to NULL. */
+ memset(results, 0, array_len * sizeof(results[0]));
+
+ /* Nothing to parse if header was only thing there */
+ if (!len)
+ return 0;
+
+ /* Work through list of attributes, parsing them as necessary */
+ while (len > 0) {
+ u16 attr_id = attr->hdr.type;
+ u16 attr_len;
+ int err;
+
+ if (tlv_attr_id != attr_id)
+ return -EINVAL;
+
+ /* Stop parsing on full error */
+ err = fbnic_tlv_attr_validate(attr, tlv_index);
+ if (err < 0)
+ return err;
+
+ if (i >= array_len)
+ return -ENOSPC;
+
+ results[i++] = attr;
+
+ attr_len = FBNIC_TLV_MSG_SIZE(le16_to_cpu(attr->hdr.len));
+ len -= attr_len;
+ attr += attr_len;
+ }
+
+ return len == 0 ? 0 : -EINVAL;
+}
+
+/**
+ * fbnic_tlv_attr_parse - Parse attributes into a list of attribute results
+ * @attr: Start of attributes in the message
+ * @len: Length of attributes in the message
+ * @results: Array of pointers to store the results of parsing
+ * @tlv_index: List of TLV attributes to be parsed from message
+ *
+ * Return: zero on success, or negative value on error.
+ *
+ * Will take a list of attributes and a parser definition and will capture
+ * the results in the results array to have the data extracted later.
+ **/
+int fbnic_tlv_attr_parse(struct fbnic_tlv_msg *attr, int len,
+ struct fbnic_tlv_msg **results,
+ const struct fbnic_tlv_index *tlv_index)
+{
+ /* Initialize results table to NULL. */
+ memset(results, 0, sizeof(results[0]) * FBNIC_TLV_RESULTS_MAX);
+
+ /* Nothing to parse if header was only thing there */
+ if (!len)
+ return 0;
+
+ /* Work through list of attributes, parsing them as necessary */
+ while (len > 0) {
+ int err = fbnic_tlv_attr_validate(attr, tlv_index);
+ u16 attr_id = attr->hdr.type;
+ u16 attr_len;
+
+ /* Stop parsing on full error */
+ if (err < 0)
+ return err;
+
+ /* Ignore results for unsupported values */
+ if (!err) {
+ /* Do not overwrite existing entries */
+ if (results[attr_id])
+ return -EADDRINUSE;
+
+ results[attr_id] = attr;
+ }
+
+ attr_len = FBNIC_TLV_MSG_SIZE(le16_to_cpu(attr->hdr.len));
+ len -= attr_len;
+ attr += attr_len;
+ }
+
+ return len == 0 ? 0 : -EINVAL;
+}
+
+/**
+ * fbnic_tlv_msg_parse - Parse message and process via predetermined functions
+ * @opaque: Value passed to parser function to enable driver access
+ * @msg: Message to be parsed.
+ * @parser: TLV message parser definition.
+ *
+ * Return: zero on success, or negative value on error.
+ *
+ * Will take a message a number of message types via the attribute parsing
+ * definitions and function provided for the parser array.
+ **/
+int fbnic_tlv_msg_parse(void *opaque, struct fbnic_tlv_msg *msg,
+ const struct fbnic_tlv_parser *parser)
+{
+ struct fbnic_tlv_msg *results[FBNIC_TLV_RESULTS_MAX];
+ u16 msg_id = msg->hdr.type;
+ int err;
+
+ if (!msg->hdr.is_msg)
+ return -EINVAL;
+
+ if (le16_to_cpu(msg->hdr.len) > PAGE_SIZE / sizeof(u32))
+ return -E2BIG;
+
+ while (parser->id != msg_id) {
+ if (parser->id == FBNIC_TLV_MSG_ID_UNKNOWN)
+ return -ENOENT;
+ parser++;
+ }
+
+ err = fbnic_tlv_attr_parse(&msg[1], le16_to_cpu(msg->hdr.len) - 1,
+ results, parser->attr);
+ if (err)
+ return err;
+
+ return parser->func(opaque, results);
+}
+
+/**
+ * fbnic_tlv_parser_error - called if message doesn't match known type
+ * @opaque: (unused)
+ * @results: (unused)
+ *
+ * Return: -EBADMSG to indicate the message is an unsupported type
+ **/
+int fbnic_tlv_parser_error(void *opaque, struct fbnic_tlv_msg **results)
+{
+ return -EBADMSG;
+}
+
+void fbnic_tlv_attr_addr_copy(u8 *dest, struct fbnic_tlv_msg *src)
+{
+ u8 *mac_addr;
+
+ mac_addr = fbnic_tlv_attr_get_value_ptr(src);
+ memcpy(dest, mac_addr, ETH_ALEN);
+}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_tlv.h b/drivers/net/ethernet/meta/fbnic/fbnic_tlv.h
new file mode 100644
index 000000000000..67300ab44353
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_tlv.h
@@ -0,0 +1,175 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#ifndef _FBNIC_TLV_H_
+#define _FBNIC_TLV_H_
+
+#include <asm/byteorder.h>
+#include <linux/bits.h>
+#include <linux/const.h>
+#include <linux/types.h>
+
+#define FBNIC_TLV_MSG_ALIGN(len) ALIGN(len, sizeof(u32))
+#define FBNIC_TLV_MSG_SIZE(len) \
+ (FBNIC_TLV_MSG_ALIGN(len) / sizeof(u32))
+
+/* TLV Header Format
+ * 3 2 1
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Length |M|I|RSV| Type / ID |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * The TLV header format described above will be used for transferring
+ * messages between the host and the firmware. To ensure byte ordering
+ * we have defined all fields as being little endian.
+ * Type/ID: Identifier for message and/or attribute
+ * RSV: Reserved field for future use, likely as additional flags
+ * I: cannot_ignore flag, identifies if unrecognized attribute can be ignored
+ * M: is_msg, indicates that this is the start of a new message
+ * Length: Total length of message in dwords including header
+ * or
+ * Total length of attribute in bytes including header
+ */
+struct fbnic_tlv_hdr {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ u16 type : 12; /* 0 .. 11 Type / ID */
+ u16 rsvd : 2; /* 12 .. 13 Reserved for future use */
+ u16 cannot_ignore : 1; /* 14 Attribute can be ignored */
+ u16 is_msg : 1; /* 15 Header belongs to message */
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ u16 is_msg : 1; /* 15 Header belongs to message */
+ u16 cannot_ignore : 1; /* 14 Attribute can be ignored */
+ u16 rsvd : 2; /* 13 .. 12 Reserved for future use */
+ u16 type : 12; /* 11 .. 0 Type / ID */
+#else
+#error "Missing defines from byteorder.h"
+#endif
+ __le16 len; /* 16 .. 32 length including TLV header */
+};
+
+#define FBNIC_TLV_RESULTS_MAX 32
+
+struct fbnic_tlv_msg {
+ struct fbnic_tlv_hdr hdr;
+ __le32 value[];
+};
+
+#define FBNIC_TLV_MSG_ID_UNKNOWN USHRT_MAX
+
+enum fbnic_tlv_type {
+ FBNIC_TLV_STRING,
+ FBNIC_TLV_FLAG,
+ FBNIC_TLV_UNSIGNED,
+ FBNIC_TLV_SIGNED,
+ FBNIC_TLV_BINARY,
+ FBNIC_TLV_NESTED,
+ FBNIC_TLV_ARRAY,
+ __FBNIC_TLV_MAX_TYPE
+};
+
+/* TLV Index
+ * Defines the relationship between the attribute IDs and their types.
+ * For each entry in the index there will be a size and type associated
+ * with it so that we can use this to parse the data and verify it matches
+ * the expected layout.
+ */
+struct fbnic_tlv_index {
+ u16 id;
+ u16 len;
+ enum fbnic_tlv_type type;
+};
+
+#define TLV_MAX_DATA (PAGE_SIZE - 512)
+#define FBNIC_TLV_ATTR_ID_UNKNOWN USHRT_MAX
+#define FBNIC_TLV_ATTR_STRING(id, len) { id, len, FBNIC_TLV_STRING }
+#define FBNIC_TLV_ATTR_FLAG(id) { id, 0, FBNIC_TLV_FLAG }
+#define FBNIC_TLV_ATTR_U32(id) { id, sizeof(u32), FBNIC_TLV_UNSIGNED }
+#define FBNIC_TLV_ATTR_U64(id) { id, sizeof(u64), FBNIC_TLV_UNSIGNED }
+#define FBNIC_TLV_ATTR_S32(id) { id, sizeof(s32), FBNIC_TLV_SIGNED }
+#define FBNIC_TLV_ATTR_S64(id) { id, sizeof(s64), FBNIC_TLV_SIGNED }
+#define FBNIC_TLV_ATTR_MAC_ADDR(id) { id, ETH_ALEN, FBNIC_TLV_BINARY }
+#define FBNIC_TLV_ATTR_NESTED(id) { id, 0, FBNIC_TLV_NESTED }
+#define FBNIC_TLV_ATTR_ARRAY(id) { id, 0, FBNIC_TLV_ARRAY }
+#define FBNIC_TLV_ATTR_RAW_DATA(id) { id, TLV_MAX_DATA, FBNIC_TLV_BINARY }
+#define FBNIC_TLV_ATTR_LAST { FBNIC_TLV_ATTR_ID_UNKNOWN, 0, 0 }
+
+struct fbnic_tlv_parser {
+ u16 id;
+ const struct fbnic_tlv_index *attr;
+ int (*func)(void *opaque,
+ struct fbnic_tlv_msg **results);
+};
+
+#define FBNIC_TLV_PARSER(id, attr, func) { FBNIC_TLV_MSG_ID_##id, attr, func }
+
+static inline void *
+fbnic_tlv_attr_get_value_ptr(struct fbnic_tlv_msg *attr)
+{
+ return (void *)&attr->value[0];
+}
+
+static inline bool fbnic_tlv_attr_get_bool(struct fbnic_tlv_msg *attr)
+{
+ return !!attr;
+}
+
+u64 fbnic_tlv_attr_get_unsigned(struct fbnic_tlv_msg *attr);
+s64 fbnic_tlv_attr_get_signed(struct fbnic_tlv_msg *attr);
+size_t fbnic_tlv_attr_get_string(struct fbnic_tlv_msg *attr, char *str,
+ size_t max_size);
+
+#define get_unsigned_result(id, location) \
+do { \
+ struct fbnic_tlv_msg *result = results[id]; \
+ if (result) \
+ location = fbnic_tlv_attr_get_unsigned(result); \
+} while (0)
+
+#define get_signed_result(id, location) \
+do { \
+ struct fbnic_tlv_msg *result = results[id]; \
+ if (result) \
+ location = fbnic_tlv_attr_get_signed(result); \
+} while (0)
+
+#define get_string_result(id, size, str, max_size) \
+do { \
+ struct fbnic_tlv_msg *result = results[id]; \
+ if (result) \
+ size = fbnic_tlv_attr_get_string(result, str, max_size); \
+} while (0)
+
+#define get_bool(id) (!!(results[id]))
+
+struct fbnic_tlv_msg *fbnic_tlv_msg_alloc(u16 msg_id);
+int fbnic_tlv_attr_put_flag(struct fbnic_tlv_msg *msg, const u16 attr_id);
+int fbnic_tlv_attr_put_value(struct fbnic_tlv_msg *msg, const u16 attr_id,
+ const void *value, const int len);
+int __fbnic_tlv_attr_put_int(struct fbnic_tlv_msg *msg, const u16 attr_id,
+ s64 value, const int len);
+#define fbnic_tlv_attr_put_int(msg, attr_id, value) \
+ __fbnic_tlv_attr_put_int(msg, attr_id, value, \
+ FBNIC_TLV_MSG_ALIGN(sizeof(value)))
+int fbnic_tlv_attr_put_mac_addr(struct fbnic_tlv_msg *msg, const u16 attr_id,
+ const u8 *mac_addr);
+int fbnic_tlv_attr_put_string(struct fbnic_tlv_msg *msg, u16 attr_id,
+ const char *string);
+struct fbnic_tlv_msg *fbnic_tlv_attr_nest_start(struct fbnic_tlv_msg *msg,
+ u16 attr_id);
+void fbnic_tlv_attr_nest_stop(struct fbnic_tlv_msg *msg);
+void fbnic_tlv_attr_addr_copy(u8 *dest, struct fbnic_tlv_msg *src);
+int fbnic_tlv_attr_parse_array(struct fbnic_tlv_msg *attr, int len,
+ struct fbnic_tlv_msg **results,
+ const struct fbnic_tlv_index *tlv_index,
+ u16 tlv_attr_id, size_t array_len);
+int fbnic_tlv_attr_parse(struct fbnic_tlv_msg *attr, int len,
+ struct fbnic_tlv_msg **results,
+ const struct fbnic_tlv_index *tlv_index);
+int fbnic_tlv_msg_parse(void *opaque, struct fbnic_tlv_msg *msg,
+ const struct fbnic_tlv_parser *parser);
+int fbnic_tlv_parser_error(void *opaque, struct fbnic_tlv_msg **results);
+
+#define FBNIC_TLV_MSG_ERROR \
+ FBNIC_TLV_PARSER(UNKNOWN, NULL, fbnic_tlv_parser_error)
+#endif /* _FBNIC_TLV_H_ */
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c
new file mode 100644
index 000000000000..0ed4c9fff5d8
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c
@@ -0,0 +1,1913 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#include <linux/bitfield.h>
+#include <linux/iopoll.h>
+#include <linux/pci.h>
+#include <net/netdev_queues.h>
+#include <net/page_pool/helpers.h>
+
+#include "fbnic.h"
+#include "fbnic_csr.h"
+#include "fbnic_netdev.h"
+#include "fbnic_txrx.h"
+
+struct fbnic_xmit_cb {
+ u32 bytecount;
+ u8 desc_count;
+ int hw_head;
+};
+
+#define FBNIC_XMIT_CB(__skb) ((struct fbnic_xmit_cb *)((__skb)->cb))
+
+static u32 __iomem *fbnic_ring_csr_base(const struct fbnic_ring *ring)
+{
+ unsigned long csr_base = (unsigned long)ring->doorbell;
+
+ csr_base &= ~(FBNIC_QUEUE_STRIDE * sizeof(u32) - 1);
+
+ return (u32 __iomem *)csr_base;
+}
+
+static u32 fbnic_ring_rd32(struct fbnic_ring *ring, unsigned int csr)
+{
+ u32 __iomem *csr_base = fbnic_ring_csr_base(ring);
+
+ return readl(csr_base + csr);
+}
+
+static void fbnic_ring_wr32(struct fbnic_ring *ring, unsigned int csr, u32 val)
+{
+ u32 __iomem *csr_base = fbnic_ring_csr_base(ring);
+
+ writel(val, csr_base + csr);
+}
+
+static unsigned int fbnic_desc_unused(struct fbnic_ring *ring)
+{
+ return (ring->head - ring->tail - 1) & ring->size_mask;
+}
+
+static unsigned int fbnic_desc_used(struct fbnic_ring *ring)
+{
+ return (ring->tail - ring->head) & ring->size_mask;
+}
+
+static struct netdev_queue *txring_txq(const struct net_device *dev,
+ const struct fbnic_ring *ring)
+{
+ return netdev_get_tx_queue(dev, ring->q_idx);
+}
+
+static int fbnic_maybe_stop_tx(const struct net_device *dev,
+ struct fbnic_ring *ring,
+ const unsigned int size)
+{
+ struct netdev_queue *txq = txring_txq(dev, ring);
+ int res;
+
+ res = netif_txq_maybe_stop(txq, fbnic_desc_unused(ring), size,
+ FBNIC_TX_DESC_WAKEUP);
+
+ return !res;
+}
+
+static bool fbnic_tx_sent_queue(struct sk_buff *skb, struct fbnic_ring *ring)
+{
+ struct netdev_queue *dev_queue = txring_txq(skb->dev, ring);
+ unsigned int bytecount = FBNIC_XMIT_CB(skb)->bytecount;
+ bool xmit_more = netdev_xmit_more();
+
+ /* TBD: Request completion more often if xmit_more becomes large */
+
+ return __netdev_tx_sent_queue(dev_queue, bytecount, xmit_more);
+}
+
+static void fbnic_unmap_single_twd(struct device *dev, __le64 *twd)
+{
+ u64 raw_twd = le64_to_cpu(*twd);
+ unsigned int len;
+ dma_addr_t dma;
+
+ dma = FIELD_GET(FBNIC_TWD_ADDR_MASK, raw_twd);
+ len = FIELD_GET(FBNIC_TWD_LEN_MASK, raw_twd);
+
+ dma_unmap_single(dev, dma, len, DMA_TO_DEVICE);
+}
+
+static void fbnic_unmap_page_twd(struct device *dev, __le64 *twd)
+{
+ u64 raw_twd = le64_to_cpu(*twd);
+ unsigned int len;
+ dma_addr_t dma;
+
+ dma = FIELD_GET(FBNIC_TWD_ADDR_MASK, raw_twd);
+ len = FIELD_GET(FBNIC_TWD_LEN_MASK, raw_twd);
+
+ dma_unmap_page(dev, dma, len, DMA_TO_DEVICE);
+}
+
+#define FBNIC_TWD_TYPE(_type) \
+ cpu_to_le64(FIELD_PREP(FBNIC_TWD_TYPE_MASK, FBNIC_TWD_TYPE_##_type))
+
+static bool
+fbnic_tx_offloads(struct fbnic_ring *ring, struct sk_buff *skb, __le64 *meta)
+{
+ unsigned int l2len, i3len;
+
+ if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL))
+ return false;
+
+ l2len = skb_mac_header_len(skb);
+ i3len = skb_checksum_start(skb) - skb_network_header(skb);
+
+ *meta |= cpu_to_le64(FIELD_PREP(FBNIC_TWD_CSUM_OFFSET_MASK,
+ skb->csum_offset / 2));
+
+ *meta |= cpu_to_le64(FBNIC_TWD_FLAG_REQ_CSO);
+
+ *meta |= cpu_to_le64(FIELD_PREP(FBNIC_TWD_L2_HLEN_MASK, l2len / 2) |
+ FIELD_PREP(FBNIC_TWD_L3_IHLEN_MASK, i3len / 2));
+ return false;
+}
+
+static void
+fbnic_rx_csum(u64 rcd, struct sk_buff *skb, struct fbnic_ring *rcq)
+{
+ skb_checksum_none_assert(skb);
+
+ if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM)))
+ return;
+
+ if (FIELD_GET(FBNIC_RCD_META_L4_CSUM_UNNECESSARY, rcd)) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ } else {
+ u16 csum = FIELD_GET(FBNIC_RCD_META_L2_CSUM_MASK, rcd);
+
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ skb->csum = (__force __wsum)csum;
+ }
+}
+
+static bool
+fbnic_tx_map(struct fbnic_ring *ring, struct sk_buff *skb, __le64 *meta)
+{
+ struct device *dev = skb->dev->dev.parent;
+ unsigned int tail = ring->tail, first;
+ unsigned int size, data_len;
+ skb_frag_t *frag;
+ dma_addr_t dma;
+ __le64 *twd;
+
+ ring->tx_buf[tail] = skb;
+
+ tail++;
+ tail &= ring->size_mask;
+ first = tail;
+
+ size = skb_headlen(skb);
+ data_len = skb->data_len;
+
+ if (size > FIELD_MAX(FBNIC_TWD_LEN_MASK))
+ goto dma_error;
+
+ dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
+
+ for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
+ twd = &ring->desc[tail];
+
+ if (dma_mapping_error(dev, dma))
+ goto dma_error;
+
+ *twd = cpu_to_le64(FIELD_PREP(FBNIC_TWD_ADDR_MASK, dma) |
+ FIELD_PREP(FBNIC_TWD_LEN_MASK, size) |
+ FIELD_PREP(FBNIC_TWD_TYPE_MASK,
+ FBNIC_TWD_TYPE_AL));
+
+ tail++;
+ tail &= ring->size_mask;
+
+ if (!data_len)
+ break;
+
+ size = skb_frag_size(frag);
+ data_len -= size;
+
+ if (size > FIELD_MAX(FBNIC_TWD_LEN_MASK))
+ goto dma_error;
+
+ dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
+ }
+
+ *twd |= FBNIC_TWD_TYPE(LAST_AL);
+
+ FBNIC_XMIT_CB(skb)->desc_count = ((twd - meta) + 1) & ring->size_mask;
+
+ ring->tail = tail;
+
+ /* Verify there is room for another packet */
+ fbnic_maybe_stop_tx(skb->dev, ring, FBNIC_MAX_SKB_DESC);
+
+ if (fbnic_tx_sent_queue(skb, ring)) {
+ *meta |= cpu_to_le64(FBNIC_TWD_FLAG_REQ_COMPLETION);
+
+ /* Force DMA writes to flush before writing to tail */
+ dma_wmb();
+
+ writel(tail, ring->doorbell);
+ }
+
+ return false;
+dma_error:
+ if (net_ratelimit())
+ netdev_err(skb->dev, "TX DMA map failed\n");
+
+ while (tail != first) {
+ tail--;
+ tail &= ring->size_mask;
+ twd = &ring->desc[tail];
+ if (tail == first)
+ fbnic_unmap_single_twd(dev, twd);
+ else
+ fbnic_unmap_page_twd(dev, twd);
+ }
+
+ return true;
+}
+
+#define FBNIC_MIN_FRAME_LEN 60
+
+static netdev_tx_t
+fbnic_xmit_frame_ring(struct sk_buff *skb, struct fbnic_ring *ring)
+{
+ __le64 *meta = &ring->desc[ring->tail];
+ u16 desc_needed;
+
+ if (skb_put_padto(skb, FBNIC_MIN_FRAME_LEN))
+ goto err_count;
+
+ /* Need: 1 descriptor per page,
+ * + 1 desc for skb_head,
+ * + 2 desc for metadata and timestamp metadata
+ * + 7 desc gap to keep tail from touching head
+ * otherwise try next time
+ */
+ desc_needed = skb_shinfo(skb)->nr_frags + 10;
+ if (fbnic_maybe_stop_tx(skb->dev, ring, desc_needed))
+ return NETDEV_TX_BUSY;
+
+ *meta = cpu_to_le64(FBNIC_TWD_FLAG_DEST_MAC);
+
+ /* Write all members within DWORD to condense this into 2 4B writes */
+ FBNIC_XMIT_CB(skb)->bytecount = skb->len;
+ FBNIC_XMIT_CB(skb)->desc_count = 0;
+
+ if (fbnic_tx_offloads(ring, skb, meta))
+ goto err_free;
+
+ if (fbnic_tx_map(ring, skb, meta))
+ goto err_free;
+
+ return NETDEV_TX_OK;
+
+err_free:
+ dev_kfree_skb_any(skb);
+err_count:
+ return NETDEV_TX_OK;
+}
+
+netdev_tx_t fbnic_xmit_frame(struct sk_buff *skb, struct net_device *dev)
+{
+ struct fbnic_net *fbn = netdev_priv(dev);
+ unsigned int q_map = skb->queue_mapping;
+
+ return fbnic_xmit_frame_ring(skb, fbn->tx[q_map]);
+}
+
+netdev_features_t
+fbnic_features_check(struct sk_buff *skb, struct net_device *dev,
+ netdev_features_t features)
+{
+ unsigned int l2len, l3len;
+
+ if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL))
+ return features;
+
+ l2len = skb_mac_header_len(skb);
+ l3len = skb_checksum_start(skb) - skb_network_header(skb);
+
+ /* Check header lengths are multiple of 2.
+ * In case of 6in6 we support longer headers (IHLEN + OHLEN)
+ * but keep things simple for now, 512B is plenty.
+ */
+ if ((l2len | l3len | skb->csum_offset) % 2 ||
+ !FIELD_FIT(FBNIC_TWD_L2_HLEN_MASK, l2len / 2) ||
+ !FIELD_FIT(FBNIC_TWD_L3_IHLEN_MASK, l3len / 2) ||
+ !FIELD_FIT(FBNIC_TWD_CSUM_OFFSET_MASK, skb->csum_offset / 2))
+ return features & ~NETIF_F_CSUM_MASK;
+
+ return features;
+}
+
+static void fbnic_clean_twq0(struct fbnic_napi_vector *nv, int napi_budget,
+ struct fbnic_ring *ring, bool discard,
+ unsigned int hw_head)
+{
+ u64 total_bytes = 0, total_packets = 0;
+ unsigned int head = ring->head;
+ struct netdev_queue *txq;
+ unsigned int clean_desc;
+
+ clean_desc = (hw_head - head) & ring->size_mask;
+
+ while (clean_desc) {
+ struct sk_buff *skb = ring->tx_buf[head];
+ unsigned int desc_cnt;
+
+ desc_cnt = FBNIC_XMIT_CB(skb)->desc_count;
+ if (desc_cnt > clean_desc)
+ break;
+
+ ring->tx_buf[head] = NULL;
+
+ clean_desc -= desc_cnt;
+
+ while (!(ring->desc[head] & FBNIC_TWD_TYPE(AL))) {
+ head++;
+ head &= ring->size_mask;
+ desc_cnt--;
+ }
+
+ fbnic_unmap_single_twd(nv->dev, &ring->desc[head]);
+ head++;
+ head &= ring->size_mask;
+ desc_cnt--;
+
+ while (desc_cnt--) {
+ fbnic_unmap_page_twd(nv->dev, &ring->desc[head]);
+ head++;
+ head &= ring->size_mask;
+ }
+
+ total_bytes += FBNIC_XMIT_CB(skb)->bytecount;
+ total_packets += 1;
+
+ napi_consume_skb(skb, napi_budget);
+ }
+
+ if (!total_bytes)
+ return;
+
+ ring->head = head;
+
+ txq = txring_txq(nv->napi.dev, ring);
+
+ if (unlikely(discard)) {
+ netdev_tx_completed_queue(txq, total_packets, total_bytes);
+ return;
+ }
+
+ netif_txq_completed_wake(txq, total_packets, total_bytes,
+ fbnic_desc_unused(ring),
+ FBNIC_TX_DESC_WAKEUP);
+}
+
+static void fbnic_page_pool_init(struct fbnic_ring *ring, unsigned int idx,
+ struct page *page)
+{
+ struct fbnic_rx_buf *rx_buf = &ring->rx_buf[idx];
+
+ page_pool_fragment_page(page, PAGECNT_BIAS_MAX);
+ rx_buf->pagecnt_bias = PAGECNT_BIAS_MAX;
+ rx_buf->page = page;
+}
+
+static struct page *fbnic_page_pool_get(struct fbnic_ring *ring,
+ unsigned int idx)
+{
+ struct fbnic_rx_buf *rx_buf = &ring->rx_buf[idx];
+
+ rx_buf->pagecnt_bias--;
+
+ return rx_buf->page;
+}
+
+static void fbnic_page_pool_drain(struct fbnic_ring *ring, unsigned int idx,
+ struct fbnic_napi_vector *nv, int budget)
+{
+ struct fbnic_rx_buf *rx_buf = &ring->rx_buf[idx];
+ struct page *page = rx_buf->page;
+
+ if (!page_pool_unref_page(page, rx_buf->pagecnt_bias))
+ page_pool_put_unrefed_page(nv->page_pool, page, -1, !!budget);
+
+ rx_buf->page = NULL;
+}
+
+static void fbnic_clean_twq(struct fbnic_napi_vector *nv, int napi_budget,
+ struct fbnic_q_triad *qt, s32 head0)
+{
+ if (head0 >= 0)
+ fbnic_clean_twq0(nv, napi_budget, &qt->sub0, false, head0);
+}
+
+static void
+fbnic_clean_tcq(struct fbnic_napi_vector *nv, struct fbnic_q_triad *qt,
+ int napi_budget)
+{
+ struct fbnic_ring *cmpl = &qt->cmpl;
+ __le64 *raw_tcd, done;
+ u32 head = cmpl->head;
+ s32 head0 = -1;
+
+ done = (head & (cmpl->size_mask + 1)) ? 0 : cpu_to_le64(FBNIC_TCD_DONE);
+ raw_tcd = &cmpl->desc[head & cmpl->size_mask];
+
+ /* Walk the completion queue collecting the heads reported by NIC */
+ while ((*raw_tcd & cpu_to_le64(FBNIC_TCD_DONE)) == done) {
+ u64 tcd;
+
+ dma_rmb();
+
+ tcd = le64_to_cpu(*raw_tcd);
+
+ switch (FIELD_GET(FBNIC_TCD_TYPE_MASK, tcd)) {
+ case FBNIC_TCD_TYPE_0:
+ if (!(tcd & FBNIC_TCD_TWQ1))
+ head0 = FIELD_GET(FBNIC_TCD_TYPE0_HEAD0_MASK,
+ tcd);
+ /* Currently all err status bits are related to
+ * timestamps and as those have yet to be added
+ * they are skipped for now.
+ */
+ break;
+ default:
+ break;
+ }
+
+ raw_tcd++;
+ head++;
+ if (!(head & cmpl->size_mask)) {
+ done ^= cpu_to_le64(FBNIC_TCD_DONE);
+ raw_tcd = &cmpl->desc[0];
+ }
+ }
+
+ /* Record the current head/tail of the queue */
+ if (cmpl->head != head) {
+ cmpl->head = head;
+ writel(head & cmpl->size_mask, cmpl->doorbell);
+ }
+
+ /* Unmap and free processed buffers */
+ fbnic_clean_twq(nv, napi_budget, qt, head0);
+}
+
+static void fbnic_clean_bdq(struct fbnic_napi_vector *nv, int napi_budget,
+ struct fbnic_ring *ring, unsigned int hw_head)
+{
+ unsigned int head = ring->head;
+
+ if (head == hw_head)
+ return;
+
+ do {
+ fbnic_page_pool_drain(ring, head, nv, napi_budget);
+
+ head++;
+ head &= ring->size_mask;
+ } while (head != hw_head);
+
+ ring->head = head;
+}
+
+static void fbnic_bd_prep(struct fbnic_ring *bdq, u16 id, struct page *page)
+{
+ __le64 *bdq_desc = &bdq->desc[id * FBNIC_BD_FRAG_COUNT];
+ dma_addr_t dma = page_pool_get_dma_addr(page);
+ u64 bd, i = FBNIC_BD_FRAG_COUNT;
+
+ bd = (FBNIC_BD_PAGE_ADDR_MASK & dma) |
+ FIELD_PREP(FBNIC_BD_PAGE_ID_MASK, id);
+
+ /* In the case that a page size is larger than 4K we will map a
+ * single page to multiple fragments. The fragments will be
+ * FBNIC_BD_FRAG_COUNT in size and the lower n bits will be use
+ * to indicate the individual fragment IDs.
+ */
+ do {
+ *bdq_desc = cpu_to_le64(bd);
+ bd += FIELD_PREP(FBNIC_BD_DESC_ADDR_MASK, 1) |
+ FIELD_PREP(FBNIC_BD_DESC_ID_MASK, 1);
+ } while (--i);
+}
+
+static void fbnic_fill_bdq(struct fbnic_napi_vector *nv, struct fbnic_ring *bdq)
+{
+ unsigned int count = fbnic_desc_unused(bdq);
+ unsigned int i = bdq->tail;
+
+ if (!count)
+ return;
+
+ do {
+ struct page *page;
+
+ page = page_pool_dev_alloc_pages(nv->page_pool);
+ if (!page)
+ break;
+
+ fbnic_page_pool_init(bdq, i, page);
+ fbnic_bd_prep(bdq, i, page);
+
+ i++;
+ i &= bdq->size_mask;
+
+ count--;
+ } while (count);
+
+ if (bdq->tail != i) {
+ bdq->tail = i;
+
+ /* Force DMA writes to flush before writing to tail */
+ dma_wmb();
+
+ writel(i, bdq->doorbell);
+ }
+}
+
+static unsigned int fbnic_hdr_pg_start(unsigned int pg_off)
+{
+ /* The headroom of the first header may be larger than FBNIC_RX_HROOM
+ * due to alignment. So account for that by just making the page
+ * offset 0 if we are starting at the first header.
+ */
+ if (ALIGN(FBNIC_RX_HROOM, 128) > FBNIC_RX_HROOM &&
+ pg_off == ALIGN(FBNIC_RX_HROOM, 128))
+ return 0;
+
+ return pg_off - FBNIC_RX_HROOM;
+}
+
+static unsigned int fbnic_hdr_pg_end(unsigned int pg_off, unsigned int len)
+{
+ /* Determine the end of the buffer by finding the start of the next
+ * and then subtracting the headroom from that frame.
+ */
+ pg_off += len + FBNIC_RX_TROOM + FBNIC_RX_HROOM;
+
+ return ALIGN(pg_off, 128) - FBNIC_RX_HROOM;
+}
+
+static void fbnic_pkt_prepare(struct fbnic_napi_vector *nv, u64 rcd,
+ struct fbnic_pkt_buff *pkt,
+ struct fbnic_q_triad *qt)
+{
+ unsigned int hdr_pg_idx = FIELD_GET(FBNIC_RCD_AL_BUFF_PAGE_MASK, rcd);
+ unsigned int hdr_pg_off = FIELD_GET(FBNIC_RCD_AL_BUFF_OFF_MASK, rcd);
+ struct page *page = fbnic_page_pool_get(&qt->sub0, hdr_pg_idx);
+ unsigned int len = FIELD_GET(FBNIC_RCD_AL_BUFF_LEN_MASK, rcd);
+ unsigned int frame_sz, hdr_pg_start, hdr_pg_end, headroom;
+ unsigned char *hdr_start;
+
+ /* data_hard_start should always be NULL when this is called */
+ WARN_ON_ONCE(pkt->buff.data_hard_start);
+
+ /* Short-cut the end calculation if we know page is fully consumed */
+ hdr_pg_end = FIELD_GET(FBNIC_RCD_AL_PAGE_FIN, rcd) ?
+ FBNIC_BD_FRAG_SIZE : fbnic_hdr_pg_end(hdr_pg_off, len);
+ hdr_pg_start = fbnic_hdr_pg_start(hdr_pg_off);
+
+ headroom = hdr_pg_off - hdr_pg_start + FBNIC_RX_PAD;
+ frame_sz = hdr_pg_end - hdr_pg_start;
+ xdp_init_buff(&pkt->buff, frame_sz, NULL);
+ hdr_pg_start += (FBNIC_RCD_AL_BUFF_FRAG_MASK & rcd) *
+ FBNIC_BD_FRAG_SIZE;
+
+ /* Sync DMA buffer */
+ dma_sync_single_range_for_cpu(nv->dev, page_pool_get_dma_addr(page),
+ hdr_pg_start, frame_sz,
+ DMA_BIDIRECTIONAL);
+
+ /* Build frame around buffer */
+ hdr_start = page_address(page) + hdr_pg_start;
+
+ xdp_prepare_buff(&pkt->buff, hdr_start, headroom,
+ len - FBNIC_RX_PAD, true);
+
+ pkt->data_truesize = 0;
+ pkt->data_len = 0;
+ pkt->nr_frags = 0;
+}
+
+static void fbnic_add_rx_frag(struct fbnic_napi_vector *nv, u64 rcd,
+ struct fbnic_pkt_buff *pkt,
+ struct fbnic_q_triad *qt)
+{
+ unsigned int pg_idx = FIELD_GET(FBNIC_RCD_AL_BUFF_PAGE_MASK, rcd);
+ unsigned int pg_off = FIELD_GET(FBNIC_RCD_AL_BUFF_OFF_MASK, rcd);
+ unsigned int len = FIELD_GET(FBNIC_RCD_AL_BUFF_LEN_MASK, rcd);
+ struct page *page = fbnic_page_pool_get(&qt->sub1, pg_idx);
+ struct skb_shared_info *shinfo;
+ unsigned int truesize;
+
+ truesize = FIELD_GET(FBNIC_RCD_AL_PAGE_FIN, rcd) ?
+ FBNIC_BD_FRAG_SIZE - pg_off : ALIGN(len, 128);
+
+ pg_off += (FBNIC_RCD_AL_BUFF_FRAG_MASK & rcd) *
+ FBNIC_BD_FRAG_SIZE;
+
+ /* Sync DMA buffer */
+ dma_sync_single_range_for_cpu(nv->dev, page_pool_get_dma_addr(page),
+ pg_off, truesize, DMA_BIDIRECTIONAL);
+
+ /* Add page to xdp shared info */
+ shinfo = xdp_get_shared_info_from_buff(&pkt->buff);
+
+ /* We use gso_segs to store truesize */
+ pkt->data_truesize += truesize;
+
+ __skb_fill_page_desc_noacc(shinfo, pkt->nr_frags++, page, pg_off, len);
+
+ /* Store data_len in gso_size */
+ pkt->data_len += len;
+}
+
+static void fbnic_put_pkt_buff(struct fbnic_napi_vector *nv,
+ struct fbnic_pkt_buff *pkt, int budget)
+{
+ struct skb_shared_info *shinfo;
+ struct page *page;
+ int nr_frags;
+
+ if (!pkt->buff.data_hard_start)
+ return;
+
+ shinfo = xdp_get_shared_info_from_buff(&pkt->buff);
+ nr_frags = pkt->nr_frags;
+
+ while (nr_frags--) {
+ page = skb_frag_page(&shinfo->frags[nr_frags]);
+ page_pool_put_full_page(nv->page_pool, page, !!budget);
+ }
+
+ page = virt_to_page(pkt->buff.data_hard_start);
+ page_pool_put_full_page(nv->page_pool, page, !!budget);
+}
+
+static struct sk_buff *fbnic_build_skb(struct fbnic_napi_vector *nv,
+ struct fbnic_pkt_buff *pkt)
+{
+ unsigned int nr_frags = pkt->nr_frags;
+ struct skb_shared_info *shinfo;
+ unsigned int truesize;
+ struct sk_buff *skb;
+
+ truesize = xdp_data_hard_end(&pkt->buff) + FBNIC_RX_TROOM -
+ pkt->buff.data_hard_start;
+
+ /* Build frame around buffer */
+ skb = napi_build_skb(pkt->buff.data_hard_start, truesize);
+ if (unlikely(!skb))
+ return NULL;
+
+ /* Push data pointer to start of data, put tail to end of data */
+ skb_reserve(skb, pkt->buff.data - pkt->buff.data_hard_start);
+ __skb_put(skb, pkt->buff.data_end - pkt->buff.data);
+
+ /* Add tracking for metadata at the start of the frame */
+ skb_metadata_set(skb, pkt->buff.data - pkt->buff.data_meta);
+
+ /* Add Rx frags */
+ if (nr_frags) {
+ /* Verify that shared info didn't move */
+ shinfo = xdp_get_shared_info_from_buff(&pkt->buff);
+ WARN_ON(skb_shinfo(skb) != shinfo);
+
+ skb->truesize += pkt->data_truesize;
+ skb->data_len += pkt->data_len;
+ shinfo->nr_frags = nr_frags;
+ skb->len += pkt->data_len;
+ }
+
+ skb_mark_for_recycle(skb);
+
+ /* Set MAC header specific fields */
+ skb->protocol = eth_type_trans(skb, nv->napi.dev);
+
+ return skb;
+}
+
+static enum pkt_hash_types fbnic_skb_hash_type(u64 rcd)
+{
+ return (FBNIC_RCD_META_L4_TYPE_MASK & rcd) ? PKT_HASH_TYPE_L4 :
+ (FBNIC_RCD_META_L3_TYPE_MASK & rcd) ? PKT_HASH_TYPE_L3 :
+ PKT_HASH_TYPE_L2;
+}
+
+static void fbnic_populate_skb_fields(struct fbnic_napi_vector *nv,
+ u64 rcd, struct sk_buff *skb,
+ struct fbnic_q_triad *qt)
+{
+ struct net_device *netdev = nv->napi.dev;
+ struct fbnic_ring *rcq = &qt->cmpl;
+
+ fbnic_rx_csum(rcd, skb, rcq);
+
+ if (netdev->features & NETIF_F_RXHASH)
+ skb_set_hash(skb,
+ FIELD_GET(FBNIC_RCD_META_RSS_HASH_MASK, rcd),
+ fbnic_skb_hash_type(rcd));
+
+ skb_record_rx_queue(skb, rcq->q_idx);
+}
+
+static bool fbnic_rcd_metadata_err(u64 rcd)
+{
+ return !!(FBNIC_RCD_META_UNCORRECTABLE_ERR_MASK & rcd);
+}
+
+static int fbnic_clean_rcq(struct fbnic_napi_vector *nv,
+ struct fbnic_q_triad *qt, int budget)
+{
+ struct fbnic_ring *rcq = &qt->cmpl;
+ struct fbnic_pkt_buff *pkt;
+ s32 head0 = -1, head1 = -1;
+ __le64 *raw_rcd, done;
+ u32 head = rcq->head;
+ u64 packets = 0;
+
+ done = (head & (rcq->size_mask + 1)) ? cpu_to_le64(FBNIC_RCD_DONE) : 0;
+ raw_rcd = &rcq->desc[head & rcq->size_mask];
+ pkt = rcq->pkt;
+
+ /* Walk the completion queue collecting the heads reported by NIC */
+ while (likely(packets < budget)) {
+ struct sk_buff *skb = ERR_PTR(-EINVAL);
+ u64 rcd;
+
+ if ((*raw_rcd & cpu_to_le64(FBNIC_RCD_DONE)) == done)
+ break;
+
+ dma_rmb();
+
+ rcd = le64_to_cpu(*raw_rcd);
+
+ switch (FIELD_GET(FBNIC_RCD_TYPE_MASK, rcd)) {
+ case FBNIC_RCD_TYPE_HDR_AL:
+ head0 = FIELD_GET(FBNIC_RCD_AL_BUFF_PAGE_MASK, rcd);
+ fbnic_pkt_prepare(nv, rcd, pkt, qt);
+
+ break;
+ case FBNIC_RCD_TYPE_PAY_AL:
+ head1 = FIELD_GET(FBNIC_RCD_AL_BUFF_PAGE_MASK, rcd);
+ fbnic_add_rx_frag(nv, rcd, pkt, qt);
+
+ break;
+ case FBNIC_RCD_TYPE_OPT_META:
+ /* Only type 0 is currently supported */
+ if (FIELD_GET(FBNIC_RCD_OPT_META_TYPE_MASK, rcd))
+ break;
+
+ /* We currently ignore the action table index */
+ break;
+ case FBNIC_RCD_TYPE_META:
+ if (likely(!fbnic_rcd_metadata_err(rcd)))
+ skb = fbnic_build_skb(nv, pkt);
+
+ /* Populate skb and invalidate XDP */
+ if (!IS_ERR_OR_NULL(skb)) {
+ fbnic_populate_skb_fields(nv, rcd, skb, qt);
+
+ packets++;
+
+ napi_gro_receive(&nv->napi, skb);
+ } else {
+ fbnic_put_pkt_buff(nv, pkt, 1);
+ }
+
+ pkt->buff.data_hard_start = NULL;
+
+ break;
+ }
+
+ raw_rcd++;
+ head++;
+ if (!(head & rcq->size_mask)) {
+ done ^= cpu_to_le64(FBNIC_RCD_DONE);
+ raw_rcd = &rcq->desc[0];
+ }
+ }
+
+ /* Unmap and free processed buffers */
+ if (head0 >= 0)
+ fbnic_clean_bdq(nv, budget, &qt->sub0, head0);
+ fbnic_fill_bdq(nv, &qt->sub0);
+
+ if (head1 >= 0)
+ fbnic_clean_bdq(nv, budget, &qt->sub1, head1);
+ fbnic_fill_bdq(nv, &qt->sub1);
+
+ /* Record the current head/tail of the queue */
+ if (rcq->head != head) {
+ rcq->head = head;
+ writel(head & rcq->size_mask, rcq->doorbell);
+ }
+
+ return packets;
+}
+
+static void fbnic_nv_irq_disable(struct fbnic_napi_vector *nv)
+{
+ struct fbnic_dev *fbd = nv->fbd;
+ u32 v_idx = nv->v_idx;
+
+ fbnic_wr32(fbd, FBNIC_INTR_MASK_SET(v_idx / 32), 1 << (v_idx % 32));
+}
+
+static void fbnic_nv_irq_rearm(struct fbnic_napi_vector *nv)
+{
+ struct fbnic_dev *fbd = nv->fbd;
+ u32 v_idx = nv->v_idx;
+
+ fbnic_wr32(fbd, FBNIC_INTR_CQ_REARM(v_idx),
+ FBNIC_INTR_CQ_REARM_INTR_UNMASK);
+}
+
+static int fbnic_poll(struct napi_struct *napi, int budget)
+{
+ struct fbnic_napi_vector *nv = container_of(napi,
+ struct fbnic_napi_vector,
+ napi);
+ int i, j, work_done = 0;
+
+ for (i = 0; i < nv->txt_count; i++)
+ fbnic_clean_tcq(nv, &nv->qt[i], budget);
+
+ for (j = 0; j < nv->rxt_count; j++, i++)
+ work_done += fbnic_clean_rcq(nv, &nv->qt[i], budget);
+
+ if (work_done >= budget)
+ return budget;
+
+ if (likely(napi_complete_done(napi, work_done)))
+ fbnic_nv_irq_rearm(nv);
+
+ return 0;
+}
+
+static irqreturn_t fbnic_msix_clean_rings(int __always_unused irq, void *data)
+{
+ struct fbnic_napi_vector *nv = data;
+
+ napi_schedule_irqoff(&nv->napi);
+
+ return IRQ_HANDLED;
+}
+
+static void fbnic_remove_tx_ring(struct fbnic_net *fbn,
+ struct fbnic_ring *txr)
+{
+ if (!(txr->flags & FBNIC_RING_F_STATS))
+ return;
+
+ /* Remove pointer to the Tx ring */
+ WARN_ON(fbn->tx[txr->q_idx] && fbn->tx[txr->q_idx] != txr);
+ fbn->tx[txr->q_idx] = NULL;
+}
+
+static void fbnic_remove_rx_ring(struct fbnic_net *fbn,
+ struct fbnic_ring *rxr)
+{
+ if (!(rxr->flags & FBNIC_RING_F_STATS))
+ return;
+
+ /* Remove pointer to the Rx ring */
+ WARN_ON(fbn->rx[rxr->q_idx] && fbn->rx[rxr->q_idx] != rxr);
+ fbn->rx[rxr->q_idx] = NULL;
+}
+
+static void fbnic_free_napi_vector(struct fbnic_net *fbn,
+ struct fbnic_napi_vector *nv)
+{
+ struct fbnic_dev *fbd = nv->fbd;
+ u32 v_idx = nv->v_idx;
+ int i, j;
+
+ for (i = 0; i < nv->txt_count; i++) {
+ fbnic_remove_tx_ring(fbn, &nv->qt[i].sub0);
+ fbnic_remove_tx_ring(fbn, &nv->qt[i].cmpl);
+ }
+
+ for (j = 0; j < nv->rxt_count; j++, i++) {
+ fbnic_remove_rx_ring(fbn, &nv->qt[i].sub0);
+ fbnic_remove_rx_ring(fbn, &nv->qt[i].sub1);
+ fbnic_remove_rx_ring(fbn, &nv->qt[i].cmpl);
+ }
+
+ fbnic_free_irq(fbd, v_idx, nv);
+ page_pool_destroy(nv->page_pool);
+ netif_napi_del(&nv->napi);
+ list_del(&nv->napis);
+ kfree(nv);
+}
+
+void fbnic_free_napi_vectors(struct fbnic_net *fbn)
+{
+ struct fbnic_napi_vector *nv, *temp;
+
+ list_for_each_entry_safe(nv, temp, &fbn->napis, napis)
+ fbnic_free_napi_vector(fbn, nv);
+}
+
+static void fbnic_name_napi_vector(struct fbnic_napi_vector *nv)
+{
+ unsigned char *dev_name = nv->napi.dev->name;
+
+ if (!nv->rxt_count)
+ snprintf(nv->name, sizeof(nv->name), "%s-Tx-%u", dev_name,
+ nv->v_idx - FBNIC_NON_NAPI_VECTORS);
+ else
+ snprintf(nv->name, sizeof(nv->name), "%s-TxRx-%u", dev_name,
+ nv->v_idx - FBNIC_NON_NAPI_VECTORS);
+}
+
+#define FBNIC_PAGE_POOL_FLAGS \
+ (PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV)
+
+static int fbnic_alloc_nv_page_pool(struct fbnic_net *fbn,
+ struct fbnic_napi_vector *nv)
+{
+ struct page_pool_params pp_params = {
+ .order = 0,
+ .flags = FBNIC_PAGE_POOL_FLAGS,
+ .pool_size = (fbn->hpq_size + fbn->ppq_size) * nv->rxt_count,
+ .nid = NUMA_NO_NODE,
+ .dev = nv->dev,
+ .dma_dir = DMA_BIDIRECTIONAL,
+ .offset = 0,
+ .max_len = PAGE_SIZE
+ };
+ struct page_pool *pp;
+
+ /* Page pool cannot exceed a size of 32768. This doesn't limit the
+ * pages on the ring but the number we can have cached waiting on
+ * the next use.
+ *
+ * TBD: Can this be reduced further? Would a multiple of
+ * NAPI_POLL_WEIGHT possibly make more sense? The question is how
+ * may pages do we need to hold in reserve to get the best return
+ * without hogging too much system memory.
+ */
+ if (pp_params.pool_size > 32768)
+ pp_params.pool_size = 32768;
+
+ pp = page_pool_create(&pp_params);
+ if (IS_ERR(pp))
+ return PTR_ERR(pp);
+
+ nv->page_pool = pp;
+
+ return 0;
+}
+
+static void fbnic_ring_init(struct fbnic_ring *ring, u32 __iomem *doorbell,
+ int q_idx, u8 flags)
+{
+ ring->doorbell = doorbell;
+ ring->q_idx = q_idx;
+ ring->flags = flags;
+}
+
+static int fbnic_alloc_napi_vector(struct fbnic_dev *fbd, struct fbnic_net *fbn,
+ unsigned int v_count, unsigned int v_idx,
+ unsigned int txq_count, unsigned int txq_idx,
+ unsigned int rxq_count, unsigned int rxq_idx)
+{
+ int txt_count = txq_count, rxt_count = rxq_count;
+ u32 __iomem *uc_addr = fbd->uc_addr0;
+ struct fbnic_napi_vector *nv;
+ struct fbnic_q_triad *qt;
+ int qt_count, err;
+ u32 __iomem *db;
+
+ qt_count = txt_count + rxq_count;
+ if (!qt_count)
+ return -EINVAL;
+
+ /* If MMIO has already failed there are no rings to initialize */
+ if (!uc_addr)
+ return -EIO;
+
+ /* Allocate NAPI vector and queue triads */
+ nv = kzalloc(struct_size(nv, qt, qt_count), GFP_KERNEL);
+ if (!nv)
+ return -ENOMEM;
+
+ /* Record queue triad counts */
+ nv->txt_count = txt_count;
+ nv->rxt_count = rxt_count;
+
+ /* Provide pointer back to fbnic and MSI-X vectors */
+ nv->fbd = fbd;
+ nv->v_idx = v_idx;
+
+ /* Record IRQ to NAPI struct */
+ netif_napi_set_irq(&nv->napi,
+ pci_irq_vector(to_pci_dev(fbd->dev), nv->v_idx));
+
+ /* Tie napi to netdev */
+ list_add(&nv->napis, &fbn->napis);
+ netif_napi_add(fbn->netdev, &nv->napi, fbnic_poll);
+
+ /* Tie nv back to PCIe dev */
+ nv->dev = fbd->dev;
+
+ /* Allocate page pool */
+ if (rxq_count) {
+ err = fbnic_alloc_nv_page_pool(fbn, nv);
+ if (err)
+ goto napi_del;
+ }
+
+ /* Initialize vector name */
+ fbnic_name_napi_vector(nv);
+
+ /* Request the IRQ for napi vector */
+ err = fbnic_request_irq(fbd, v_idx, &fbnic_msix_clean_rings,
+ IRQF_SHARED, nv->name, nv);
+ if (err)
+ goto pp_destroy;
+
+ /* Initialize queue triads */
+ qt = nv->qt;
+
+ while (txt_count) {
+ /* Configure Tx queue */
+ db = &uc_addr[FBNIC_QUEUE(txq_idx) + FBNIC_QUEUE_TWQ0_TAIL];
+
+ /* Assign Tx queue to netdev if applicable */
+ if (txq_count > 0) {
+ u8 flags = FBNIC_RING_F_CTX | FBNIC_RING_F_STATS;
+
+ fbnic_ring_init(&qt->sub0, db, txq_idx, flags);
+ fbn->tx[txq_idx] = &qt->sub0;
+ txq_count--;
+ } else {
+ fbnic_ring_init(&qt->sub0, db, 0,
+ FBNIC_RING_F_DISABLED);
+ }
+
+ /* Configure Tx completion queue */
+ db = &uc_addr[FBNIC_QUEUE(txq_idx) + FBNIC_QUEUE_TCQ_HEAD];
+ fbnic_ring_init(&qt->cmpl, db, 0, 0);
+
+ /* Update Tx queue index */
+ txt_count--;
+ txq_idx += v_count;
+
+ /* Move to next queue triad */
+ qt++;
+ }
+
+ while (rxt_count) {
+ /* Configure header queue */
+ db = &uc_addr[FBNIC_QUEUE(rxq_idx) + FBNIC_QUEUE_BDQ_HPQ_TAIL];
+ fbnic_ring_init(&qt->sub0, db, 0, FBNIC_RING_F_CTX);
+
+ /* Configure payload queue */
+ db = &uc_addr[FBNIC_QUEUE(rxq_idx) + FBNIC_QUEUE_BDQ_PPQ_TAIL];
+ fbnic_ring_init(&qt->sub1, db, 0, FBNIC_RING_F_CTX);
+
+ /* Configure Rx completion queue */
+ db = &uc_addr[FBNIC_QUEUE(rxq_idx) + FBNIC_QUEUE_RCQ_HEAD];
+ fbnic_ring_init(&qt->cmpl, db, rxq_idx, FBNIC_RING_F_STATS);
+ fbn->rx[rxq_idx] = &qt->cmpl;
+
+ /* Update Rx queue index */
+ rxt_count--;
+ rxq_idx += v_count;
+
+ /* Move to next queue triad */
+ qt++;
+ }
+
+ return 0;
+
+pp_destroy:
+ page_pool_destroy(nv->page_pool);
+napi_del:
+ netif_napi_del(&nv->napi);
+ list_del(&nv->napis);
+ kfree(nv);
+ return err;
+}
+
+int fbnic_alloc_napi_vectors(struct fbnic_net *fbn)
+{
+ unsigned int txq_idx = 0, rxq_idx = 0, v_idx = FBNIC_NON_NAPI_VECTORS;
+ unsigned int num_tx = fbn->num_tx_queues;
+ unsigned int num_rx = fbn->num_rx_queues;
+ unsigned int num_napi = fbn->num_napi;
+ struct fbnic_dev *fbd = fbn->fbd;
+ int err;
+
+ /* Allocate 1 Tx queue per napi vector */
+ if (num_napi < FBNIC_MAX_TXQS && num_napi == num_tx + num_rx) {
+ while (num_tx) {
+ err = fbnic_alloc_napi_vector(fbd, fbn,
+ num_napi, v_idx,
+ 1, txq_idx, 0, 0);
+ if (err)
+ goto free_vectors;
+
+ /* Update counts and index */
+ num_tx--;
+ txq_idx++;
+
+ v_idx++;
+ }
+ }
+
+ /* Allocate Tx/Rx queue pairs per vector, or allocate remaining Rx */
+ while (num_rx | num_tx) {
+ int tqpv = DIV_ROUND_UP(num_tx, num_napi - txq_idx);
+ int rqpv = DIV_ROUND_UP(num_rx, num_napi - rxq_idx);
+
+ err = fbnic_alloc_napi_vector(fbd, fbn, num_napi, v_idx,
+ tqpv, txq_idx, rqpv, rxq_idx);
+ if (err)
+ goto free_vectors;
+
+ /* Update counts and index */
+ num_tx -= tqpv;
+ txq_idx++;
+
+ num_rx -= rqpv;
+ rxq_idx++;
+
+ v_idx++;
+ }
+
+ return 0;
+
+free_vectors:
+ fbnic_free_napi_vectors(fbn);
+
+ return -ENOMEM;
+}
+
+static void fbnic_free_ring_resources(struct device *dev,
+ struct fbnic_ring *ring)
+{
+ kvfree(ring->buffer);
+ ring->buffer = NULL;
+
+ /* If size is not set there are no descriptors present */
+ if (!ring->size)
+ return;
+
+ dma_free_coherent(dev, ring->size, ring->desc, ring->dma);
+ ring->size_mask = 0;
+ ring->size = 0;
+}
+
+static int fbnic_alloc_tx_ring_desc(struct fbnic_net *fbn,
+ struct fbnic_ring *txr)
+{
+ struct device *dev = fbn->netdev->dev.parent;
+ size_t size;
+
+ /* Round size up to nearest 4K */
+ size = ALIGN(array_size(sizeof(*txr->desc), fbn->txq_size), 4096);
+
+ txr->desc = dma_alloc_coherent(dev, size, &txr->dma,
+ GFP_KERNEL | __GFP_NOWARN);
+ if (!txr->desc)
+ return -ENOMEM;
+
+ /* txq_size should be a power of 2, so mask is just that -1 */
+ txr->size_mask = fbn->txq_size - 1;
+ txr->size = size;
+
+ return 0;
+}
+
+static int fbnic_alloc_tx_ring_buffer(struct fbnic_ring *txr)
+{
+ size_t size = array_size(sizeof(*txr->tx_buf), txr->size_mask + 1);
+
+ txr->tx_buf = kvzalloc(size, GFP_KERNEL | __GFP_NOWARN);
+
+ return txr->tx_buf ? 0 : -ENOMEM;
+}
+
+static int fbnic_alloc_tx_ring_resources(struct fbnic_net *fbn,
+ struct fbnic_ring *txr)
+{
+ struct device *dev = fbn->netdev->dev.parent;
+ int err;
+
+ if (txr->flags & FBNIC_RING_F_DISABLED)
+ return 0;
+
+ err = fbnic_alloc_tx_ring_desc(fbn, txr);
+ if (err)
+ return err;
+
+ if (!(txr->flags & FBNIC_RING_F_CTX))
+ return 0;
+
+ err = fbnic_alloc_tx_ring_buffer(txr);
+ if (err)
+ goto free_desc;
+
+ return 0;
+
+free_desc:
+ fbnic_free_ring_resources(dev, txr);
+ return err;
+}
+
+static int fbnic_alloc_rx_ring_desc(struct fbnic_net *fbn,
+ struct fbnic_ring *rxr)
+{
+ struct device *dev = fbn->netdev->dev.parent;
+ size_t desc_size = sizeof(*rxr->desc);
+ u32 rxq_size;
+ size_t size;
+
+ switch (rxr->doorbell - fbnic_ring_csr_base(rxr)) {
+ case FBNIC_QUEUE_BDQ_HPQ_TAIL:
+ rxq_size = fbn->hpq_size / FBNIC_BD_FRAG_COUNT;
+ desc_size *= FBNIC_BD_FRAG_COUNT;
+ break;
+ case FBNIC_QUEUE_BDQ_PPQ_TAIL:
+ rxq_size = fbn->ppq_size / FBNIC_BD_FRAG_COUNT;
+ desc_size *= FBNIC_BD_FRAG_COUNT;
+ break;
+ case FBNIC_QUEUE_RCQ_HEAD:
+ rxq_size = fbn->rcq_size;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Round size up to nearest 4K */
+ size = ALIGN(array_size(desc_size, rxq_size), 4096);
+
+ rxr->desc = dma_alloc_coherent(dev, size, &rxr->dma,
+ GFP_KERNEL | __GFP_NOWARN);
+ if (!rxr->desc)
+ return -ENOMEM;
+
+ /* rxq_size should be a power of 2, so mask is just that -1 */
+ rxr->size_mask = rxq_size - 1;
+ rxr->size = size;
+
+ return 0;
+}
+
+static int fbnic_alloc_rx_ring_buffer(struct fbnic_ring *rxr)
+{
+ size_t size = array_size(sizeof(*rxr->rx_buf), rxr->size_mask + 1);
+
+ if (rxr->flags & FBNIC_RING_F_CTX)
+ size = sizeof(*rxr->rx_buf) * (rxr->size_mask + 1);
+ else
+ size = sizeof(*rxr->pkt);
+
+ rxr->rx_buf = kvzalloc(size, GFP_KERNEL | __GFP_NOWARN);
+
+ return rxr->rx_buf ? 0 : -ENOMEM;
+}
+
+static int fbnic_alloc_rx_ring_resources(struct fbnic_net *fbn,
+ struct fbnic_ring *rxr)
+{
+ struct device *dev = fbn->netdev->dev.parent;
+ int err;
+
+ err = fbnic_alloc_rx_ring_desc(fbn, rxr);
+ if (err)
+ return err;
+
+ err = fbnic_alloc_rx_ring_buffer(rxr);
+ if (err)
+ goto free_desc;
+
+ return 0;
+
+free_desc:
+ fbnic_free_ring_resources(dev, rxr);
+ return err;
+}
+
+static void fbnic_free_qt_resources(struct fbnic_net *fbn,
+ struct fbnic_q_triad *qt)
+{
+ struct device *dev = fbn->netdev->dev.parent;
+
+ fbnic_free_ring_resources(dev, &qt->cmpl);
+ fbnic_free_ring_resources(dev, &qt->sub1);
+ fbnic_free_ring_resources(dev, &qt->sub0);
+}
+
+static int fbnic_alloc_tx_qt_resources(struct fbnic_net *fbn,
+ struct fbnic_q_triad *qt)
+{
+ struct device *dev = fbn->netdev->dev.parent;
+ int err;
+
+ err = fbnic_alloc_tx_ring_resources(fbn, &qt->sub0);
+ if (err)
+ return err;
+
+ err = fbnic_alloc_tx_ring_resources(fbn, &qt->cmpl);
+ if (err)
+ goto free_sub1;
+
+ return 0;
+
+free_sub1:
+ fbnic_free_ring_resources(dev, &qt->sub0);
+ return err;
+}
+
+static int fbnic_alloc_rx_qt_resources(struct fbnic_net *fbn,
+ struct fbnic_q_triad *qt)
+{
+ struct device *dev = fbn->netdev->dev.parent;
+ int err;
+
+ err = fbnic_alloc_rx_ring_resources(fbn, &qt->sub0);
+ if (err)
+ return err;
+
+ err = fbnic_alloc_rx_ring_resources(fbn, &qt->sub1);
+ if (err)
+ goto free_sub0;
+
+ err = fbnic_alloc_rx_ring_resources(fbn, &qt->cmpl);
+ if (err)
+ goto free_sub1;
+
+ return 0;
+
+free_sub1:
+ fbnic_free_ring_resources(dev, &qt->sub1);
+free_sub0:
+ fbnic_free_ring_resources(dev, &qt->sub0);
+ return err;
+}
+
+static void fbnic_free_nv_resources(struct fbnic_net *fbn,
+ struct fbnic_napi_vector *nv)
+{
+ int i, j;
+
+ /* Free Tx Resources */
+ for (i = 0; i < nv->txt_count; i++)
+ fbnic_free_qt_resources(fbn, &nv->qt[i]);
+
+ for (j = 0; j < nv->rxt_count; j++, i++)
+ fbnic_free_qt_resources(fbn, &nv->qt[i]);
+}
+
+static int fbnic_alloc_nv_resources(struct fbnic_net *fbn,
+ struct fbnic_napi_vector *nv)
+{
+ int i, j, err;
+
+ /* Allocate Tx Resources */
+ for (i = 0; i < nv->txt_count; i++) {
+ err = fbnic_alloc_tx_qt_resources(fbn, &nv->qt[i]);
+ if (err)
+ goto free_resources;
+ }
+
+ /* Allocate Rx Resources */
+ for (j = 0; j < nv->rxt_count; j++, i++) {
+ err = fbnic_alloc_rx_qt_resources(fbn, &nv->qt[i]);
+ if (err)
+ goto free_resources;
+ }
+
+ return 0;
+
+free_resources:
+ while (i--)
+ fbnic_free_qt_resources(fbn, &nv->qt[i]);
+ return err;
+}
+
+void fbnic_free_resources(struct fbnic_net *fbn)
+{
+ struct fbnic_napi_vector *nv;
+
+ list_for_each_entry(nv, &fbn->napis, napis)
+ fbnic_free_nv_resources(fbn, nv);
+}
+
+int fbnic_alloc_resources(struct fbnic_net *fbn)
+{
+ struct fbnic_napi_vector *nv;
+ int err = -ENODEV;
+
+ list_for_each_entry(nv, &fbn->napis, napis) {
+ err = fbnic_alloc_nv_resources(fbn, nv);
+ if (err)
+ goto free_resources;
+ }
+
+ return 0;
+
+free_resources:
+ list_for_each_entry_continue_reverse(nv, &fbn->napis, napis)
+ fbnic_free_nv_resources(fbn, nv);
+
+ return err;
+}
+
+static void fbnic_disable_twq0(struct fbnic_ring *txr)
+{
+ u32 twq_ctl = fbnic_ring_rd32(txr, FBNIC_QUEUE_TWQ0_CTL);
+
+ twq_ctl &= ~FBNIC_QUEUE_TWQ_CTL_ENABLE;
+
+ fbnic_ring_wr32(txr, FBNIC_QUEUE_TWQ0_CTL, twq_ctl);
+}
+
+static void fbnic_disable_tcq(struct fbnic_ring *txr)
+{
+ fbnic_ring_wr32(txr, FBNIC_QUEUE_TCQ_CTL, 0);
+ fbnic_ring_wr32(txr, FBNIC_QUEUE_TIM_MASK, FBNIC_QUEUE_TIM_MASK_MASK);
+}
+
+static void fbnic_disable_bdq(struct fbnic_ring *hpq, struct fbnic_ring *ppq)
+{
+ u32 bdq_ctl = fbnic_ring_rd32(hpq, FBNIC_QUEUE_BDQ_CTL);
+
+ bdq_ctl &= ~FBNIC_QUEUE_BDQ_CTL_ENABLE;
+
+ fbnic_ring_wr32(hpq, FBNIC_QUEUE_BDQ_CTL, bdq_ctl);
+}
+
+static void fbnic_disable_rcq(struct fbnic_ring *rxr)
+{
+ fbnic_ring_wr32(rxr, FBNIC_QUEUE_RCQ_CTL, 0);
+ fbnic_ring_wr32(rxr, FBNIC_QUEUE_RIM_MASK, FBNIC_QUEUE_RIM_MASK_MASK);
+}
+
+void fbnic_napi_disable(struct fbnic_net *fbn)
+{
+ struct fbnic_napi_vector *nv;
+
+ list_for_each_entry(nv, &fbn->napis, napis) {
+ napi_disable(&nv->napi);
+
+ fbnic_nv_irq_disable(nv);
+ }
+}
+
+void fbnic_disable(struct fbnic_net *fbn)
+{
+ struct fbnic_dev *fbd = fbn->fbd;
+ struct fbnic_napi_vector *nv;
+ int i, j;
+
+ list_for_each_entry(nv, &fbn->napis, napis) {
+ /* Disable Tx queue triads */
+ for (i = 0; i < nv->txt_count; i++) {
+ struct fbnic_q_triad *qt = &nv->qt[i];
+
+ fbnic_disable_twq0(&qt->sub0);
+ fbnic_disable_tcq(&qt->cmpl);
+ }
+
+ /* Disable Rx queue triads */
+ for (j = 0; j < nv->rxt_count; j++, i++) {
+ struct fbnic_q_triad *qt = &nv->qt[i];
+
+ fbnic_disable_bdq(&qt->sub0, &qt->sub1);
+ fbnic_disable_rcq(&qt->cmpl);
+ }
+ }
+
+ fbnic_wrfl(fbd);
+}
+
+static void fbnic_tx_flush(struct fbnic_dev *fbd)
+{
+ netdev_warn(fbd->netdev, "triggering Tx flush\n");
+
+ fbnic_rmw32(fbd, FBNIC_TMI_DROP_CTRL, FBNIC_TMI_DROP_CTRL_EN,
+ FBNIC_TMI_DROP_CTRL_EN);
+}
+
+static void fbnic_tx_flush_off(struct fbnic_dev *fbd)
+{
+ fbnic_rmw32(fbd, FBNIC_TMI_DROP_CTRL, FBNIC_TMI_DROP_CTRL_EN, 0);
+}
+
+struct fbnic_idle_regs {
+ u32 reg_base;
+ u8 reg_cnt;
+};
+
+static bool fbnic_all_idle(struct fbnic_dev *fbd,
+ const struct fbnic_idle_regs *regs,
+ unsigned int nregs)
+{
+ unsigned int i, j;
+
+ for (i = 0; i < nregs; i++) {
+ for (j = 0; j < regs[i].reg_cnt; j++) {
+ if (fbnic_rd32(fbd, regs[i].reg_base + j) != ~0U)
+ return false;
+ }
+ }
+ return true;
+}
+
+static void fbnic_idle_dump(struct fbnic_dev *fbd,
+ const struct fbnic_idle_regs *regs,
+ unsigned int nregs, const char *dir, int err)
+{
+ unsigned int i, j;
+
+ netdev_err(fbd->netdev, "error waiting for %s idle %d\n", dir, err);
+ for (i = 0; i < nregs; i++)
+ for (j = 0; j < regs[i].reg_cnt; j++)
+ netdev_err(fbd->netdev, "0x%04x: %08x\n",
+ regs[i].reg_base + j,
+ fbnic_rd32(fbd, regs[i].reg_base + j));
+}
+
+int fbnic_wait_all_queues_idle(struct fbnic_dev *fbd, bool may_fail)
+{
+ static const struct fbnic_idle_regs tx[] = {
+ { FBNIC_QM_TWQ_IDLE(0), FBNIC_QM_TWQ_IDLE_CNT, },
+ { FBNIC_QM_TQS_IDLE(0), FBNIC_QM_TQS_IDLE_CNT, },
+ { FBNIC_QM_TDE_IDLE(0), FBNIC_QM_TDE_IDLE_CNT, },
+ { FBNIC_QM_TCQ_IDLE(0), FBNIC_QM_TCQ_IDLE_CNT, },
+ }, rx[] = {
+ { FBNIC_QM_HPQ_IDLE(0), FBNIC_QM_HPQ_IDLE_CNT, },
+ { FBNIC_QM_PPQ_IDLE(0), FBNIC_QM_PPQ_IDLE_CNT, },
+ { FBNIC_QM_RCQ_IDLE(0), FBNIC_QM_RCQ_IDLE_CNT, },
+ };
+ bool idle;
+ int err;
+
+ err = read_poll_timeout_atomic(fbnic_all_idle, idle, idle, 2, 500000,
+ false, fbd, tx, ARRAY_SIZE(tx));
+ if (err == -ETIMEDOUT) {
+ fbnic_tx_flush(fbd);
+ err = read_poll_timeout_atomic(fbnic_all_idle, idle, idle,
+ 2, 500000, false,
+ fbd, tx, ARRAY_SIZE(tx));
+ fbnic_tx_flush_off(fbd);
+ }
+ if (err) {
+ fbnic_idle_dump(fbd, tx, ARRAY_SIZE(tx), "Tx", err);
+ if (may_fail)
+ return err;
+ }
+
+ err = read_poll_timeout_atomic(fbnic_all_idle, idle, idle, 2, 500000,
+ false, fbd, rx, ARRAY_SIZE(rx));
+ if (err)
+ fbnic_idle_dump(fbd, rx, ARRAY_SIZE(rx), "Rx", err);
+ return err;
+}
+
+void fbnic_flush(struct fbnic_net *fbn)
+{
+ struct fbnic_napi_vector *nv;
+
+ list_for_each_entry(nv, &fbn->napis, napis) {
+ int i, j;
+
+ /* Flush any processed Tx Queue Triads and drop the rest */
+ for (i = 0; i < nv->txt_count; i++) {
+ struct fbnic_q_triad *qt = &nv->qt[i];
+ struct netdev_queue *tx_queue;
+
+ /* Clean the work queues of unprocessed work */
+ fbnic_clean_twq0(nv, 0, &qt->sub0, true, qt->sub0.tail);
+
+ /* Reset completion queue descriptor ring */
+ memset(qt->cmpl.desc, 0, qt->cmpl.size);
+
+ /* Nothing else to do if Tx queue is disabled */
+ if (qt->sub0.flags & FBNIC_RING_F_DISABLED)
+ continue;
+
+ /* Reset BQL associated with Tx queue */
+ tx_queue = netdev_get_tx_queue(nv->napi.dev,
+ qt->sub0.q_idx);
+ netdev_tx_reset_queue(tx_queue);
+
+ /* Disassociate Tx queue from NAPI */
+ netif_queue_set_napi(nv->napi.dev, qt->sub0.q_idx,
+ NETDEV_QUEUE_TYPE_TX, NULL);
+ }
+
+ /* Flush any processed Rx Queue Triads and drop the rest */
+ for (j = 0; j < nv->rxt_count; j++, i++) {
+ struct fbnic_q_triad *qt = &nv->qt[i];
+
+ /* Clean the work queues of unprocessed work */
+ fbnic_clean_bdq(nv, 0, &qt->sub0, qt->sub0.tail);
+ fbnic_clean_bdq(nv, 0, &qt->sub1, qt->sub1.tail);
+
+ /* Reset completion queue descriptor ring */
+ memset(qt->cmpl.desc, 0, qt->cmpl.size);
+
+ fbnic_put_pkt_buff(nv, qt->cmpl.pkt, 0);
+ qt->cmpl.pkt->buff.data_hard_start = NULL;
+
+ /* Disassociate Rx queue from NAPI */
+ netif_queue_set_napi(nv->napi.dev, qt->cmpl.q_idx,
+ NETDEV_QUEUE_TYPE_RX, NULL);
+ }
+ }
+}
+
+void fbnic_fill(struct fbnic_net *fbn)
+{
+ struct fbnic_napi_vector *nv;
+
+ list_for_each_entry(nv, &fbn->napis, napis) {
+ int i, j;
+
+ /* Configure NAPI mapping for Tx */
+ for (i = 0; i < nv->txt_count; i++) {
+ struct fbnic_q_triad *qt = &nv->qt[i];
+
+ /* Nothing to do if Tx queue is disabled */
+ if (qt->sub0.flags & FBNIC_RING_F_DISABLED)
+ continue;
+
+ /* Associate Tx queue with NAPI */
+ netif_queue_set_napi(nv->napi.dev, qt->sub0.q_idx,
+ NETDEV_QUEUE_TYPE_TX, &nv->napi);
+ }
+
+ /* Configure NAPI mapping and populate pages
+ * in the BDQ rings to use for Rx
+ */
+ for (j = 0; j < nv->rxt_count; j++, i++) {
+ struct fbnic_q_triad *qt = &nv->qt[i];
+
+ /* Associate Rx queue with NAPI */
+ netif_queue_set_napi(nv->napi.dev, qt->cmpl.q_idx,
+ NETDEV_QUEUE_TYPE_RX, &nv->napi);
+
+ /* Populate the header and payload BDQs */
+ fbnic_fill_bdq(nv, &qt->sub0);
+ fbnic_fill_bdq(nv, &qt->sub1);
+ }
+ }
+}
+
+static void fbnic_enable_twq0(struct fbnic_ring *twq)
+{
+ u32 log_size = fls(twq->size_mask);
+
+ if (!twq->size_mask)
+ return;
+
+ /* Reset head/tail */
+ fbnic_ring_wr32(twq, FBNIC_QUEUE_TWQ0_CTL, FBNIC_QUEUE_TWQ_CTL_RESET);
+ twq->tail = 0;
+ twq->head = 0;
+
+ /* Store descriptor ring address and size */
+ fbnic_ring_wr32(twq, FBNIC_QUEUE_TWQ0_BAL, lower_32_bits(twq->dma));
+ fbnic_ring_wr32(twq, FBNIC_QUEUE_TWQ0_BAH, upper_32_bits(twq->dma));
+
+ /* Write lower 4 bits of log size as 64K ring size is 0 */
+ fbnic_ring_wr32(twq, FBNIC_QUEUE_TWQ0_SIZE, log_size & 0xf);
+
+ fbnic_ring_wr32(twq, FBNIC_QUEUE_TWQ0_CTL, FBNIC_QUEUE_TWQ_CTL_ENABLE);
+}
+
+static void fbnic_enable_tcq(struct fbnic_napi_vector *nv,
+ struct fbnic_ring *tcq)
+{
+ u32 log_size = fls(tcq->size_mask);
+
+ if (!tcq->size_mask)
+ return;
+
+ /* Reset head/tail */
+ fbnic_ring_wr32(tcq, FBNIC_QUEUE_TCQ_CTL, FBNIC_QUEUE_TCQ_CTL_RESET);
+ tcq->tail = 0;
+ tcq->head = 0;
+
+ /* Store descriptor ring address and size */
+ fbnic_ring_wr32(tcq, FBNIC_QUEUE_TCQ_BAL, lower_32_bits(tcq->dma));
+ fbnic_ring_wr32(tcq, FBNIC_QUEUE_TCQ_BAH, upper_32_bits(tcq->dma));
+
+ /* Write lower 4 bits of log size as 64K ring size is 0 */
+ fbnic_ring_wr32(tcq, FBNIC_QUEUE_TCQ_SIZE, log_size & 0xf);
+
+ /* Store interrupt information for the completion queue */
+ fbnic_ring_wr32(tcq, FBNIC_QUEUE_TIM_CTL, nv->v_idx);
+ fbnic_ring_wr32(tcq, FBNIC_QUEUE_TIM_THRESHOLD, tcq->size_mask / 2);
+ fbnic_ring_wr32(tcq, FBNIC_QUEUE_TIM_MASK, 0);
+
+ /* Enable queue */
+ fbnic_ring_wr32(tcq, FBNIC_QUEUE_TCQ_CTL, FBNIC_QUEUE_TCQ_CTL_ENABLE);
+}
+
+static void fbnic_enable_bdq(struct fbnic_ring *hpq, struct fbnic_ring *ppq)
+{
+ u32 bdq_ctl = FBNIC_QUEUE_BDQ_CTL_ENABLE;
+ u32 log_size;
+
+ /* Reset head/tail */
+ fbnic_ring_wr32(hpq, FBNIC_QUEUE_BDQ_CTL, FBNIC_QUEUE_BDQ_CTL_RESET);
+ ppq->tail = 0;
+ ppq->head = 0;
+ hpq->tail = 0;
+ hpq->head = 0;
+
+ log_size = fls(hpq->size_mask);
+
+ /* Store descriptor ring address and size */
+ fbnic_ring_wr32(hpq, FBNIC_QUEUE_BDQ_HPQ_BAL, lower_32_bits(hpq->dma));
+ fbnic_ring_wr32(hpq, FBNIC_QUEUE_BDQ_HPQ_BAH, upper_32_bits(hpq->dma));
+
+ /* Write lower 4 bits of log size as 64K ring size is 0 */
+ fbnic_ring_wr32(hpq, FBNIC_QUEUE_BDQ_HPQ_SIZE, log_size & 0xf);
+
+ if (!ppq->size_mask)
+ goto write_ctl;
+
+ log_size = fls(ppq->size_mask);
+
+ /* Add enabling of PPQ to BDQ control */
+ bdq_ctl |= FBNIC_QUEUE_BDQ_CTL_PPQ_ENABLE;
+
+ /* Store descriptor ring address and size */
+ fbnic_ring_wr32(ppq, FBNIC_QUEUE_BDQ_PPQ_BAL, lower_32_bits(ppq->dma));
+ fbnic_ring_wr32(ppq, FBNIC_QUEUE_BDQ_PPQ_BAH, upper_32_bits(ppq->dma));
+ fbnic_ring_wr32(ppq, FBNIC_QUEUE_BDQ_PPQ_SIZE, log_size & 0xf);
+
+write_ctl:
+ fbnic_ring_wr32(hpq, FBNIC_QUEUE_BDQ_CTL, bdq_ctl);
+}
+
+static void fbnic_config_drop_mode_rcq(struct fbnic_napi_vector *nv,
+ struct fbnic_ring *rcq)
+{
+ u32 drop_mode, rcq_ctl;
+
+ drop_mode = FBNIC_QUEUE_RDE_CTL0_DROP_IMMEDIATE;
+
+ /* Specify packet layout */
+ rcq_ctl = FIELD_PREP(FBNIC_QUEUE_RDE_CTL0_DROP_MODE_MASK, drop_mode) |
+ FIELD_PREP(FBNIC_QUEUE_RDE_CTL0_MIN_HROOM_MASK, FBNIC_RX_HROOM) |
+ FIELD_PREP(FBNIC_QUEUE_RDE_CTL0_MIN_TROOM_MASK, FBNIC_RX_TROOM);
+
+ fbnic_ring_wr32(rcq, FBNIC_QUEUE_RDE_CTL0, rcq_ctl);
+}
+
+static void fbnic_enable_rcq(struct fbnic_napi_vector *nv,
+ struct fbnic_ring *rcq)
+{
+ u32 log_size = fls(rcq->size_mask);
+ u32 rcq_ctl;
+
+ fbnic_config_drop_mode_rcq(nv, rcq);
+
+ rcq_ctl = FIELD_PREP(FBNIC_QUEUE_RDE_CTL1_PADLEN_MASK, FBNIC_RX_PAD) |
+ FIELD_PREP(FBNIC_QUEUE_RDE_CTL1_MAX_HDR_MASK,
+ FBNIC_RX_MAX_HDR) |
+ FIELD_PREP(FBNIC_QUEUE_RDE_CTL1_PAYLD_OFF_MASK,
+ FBNIC_RX_PAYLD_OFFSET) |
+ FIELD_PREP(FBNIC_QUEUE_RDE_CTL1_PAYLD_PG_CL_MASK,
+ FBNIC_RX_PAYLD_PG_CL);
+ fbnic_ring_wr32(rcq, FBNIC_QUEUE_RDE_CTL1, rcq_ctl);
+
+ /* Reset head/tail */
+ fbnic_ring_wr32(rcq, FBNIC_QUEUE_RCQ_CTL, FBNIC_QUEUE_RCQ_CTL_RESET);
+ rcq->head = 0;
+ rcq->tail = 0;
+
+ /* Store descriptor ring address and size */
+ fbnic_ring_wr32(rcq, FBNIC_QUEUE_RCQ_BAL, lower_32_bits(rcq->dma));
+ fbnic_ring_wr32(rcq, FBNIC_QUEUE_RCQ_BAH, upper_32_bits(rcq->dma));
+
+ /* Write lower 4 bits of log size as 64K ring size is 0 */
+ fbnic_ring_wr32(rcq, FBNIC_QUEUE_RCQ_SIZE, log_size & 0xf);
+
+ /* Store interrupt information for the completion queue */
+ fbnic_ring_wr32(rcq, FBNIC_QUEUE_RIM_CTL, nv->v_idx);
+ fbnic_ring_wr32(rcq, FBNIC_QUEUE_RIM_THRESHOLD, rcq->size_mask / 2);
+ fbnic_ring_wr32(rcq, FBNIC_QUEUE_RIM_MASK, 0);
+
+ /* Enable queue */
+ fbnic_ring_wr32(rcq, FBNIC_QUEUE_RCQ_CTL, FBNIC_QUEUE_RCQ_CTL_ENABLE);
+}
+
+void fbnic_enable(struct fbnic_net *fbn)
+{
+ struct fbnic_dev *fbd = fbn->fbd;
+ struct fbnic_napi_vector *nv;
+ int i, j;
+
+ list_for_each_entry(nv, &fbn->napis, napis) {
+ /* Setup Tx Queue Triads */
+ for (i = 0; i < nv->txt_count; i++) {
+ struct fbnic_q_triad *qt = &nv->qt[i];
+
+ fbnic_enable_twq0(&qt->sub0);
+ fbnic_enable_tcq(nv, &qt->cmpl);
+ }
+
+ /* Setup Rx Queue Triads */
+ for (j = 0; j < nv->rxt_count; j++, i++) {
+ struct fbnic_q_triad *qt = &nv->qt[i];
+
+ fbnic_enable_bdq(&qt->sub0, &qt->sub1);
+ fbnic_config_drop_mode_rcq(nv, &qt->cmpl);
+ fbnic_enable_rcq(nv, &qt->cmpl);
+ }
+ }
+
+ fbnic_wrfl(fbd);
+}
+
+static void fbnic_nv_irq_enable(struct fbnic_napi_vector *nv)
+{
+ struct fbnic_dev *fbd = nv->fbd;
+ u32 val;
+
+ val = FBNIC_INTR_CQ_REARM_INTR_UNMASK;
+
+ fbnic_wr32(fbd, FBNIC_INTR_CQ_REARM(nv->v_idx), val);
+}
+
+void fbnic_napi_enable(struct fbnic_net *fbn)
+{
+ u32 irqs[FBNIC_MAX_MSIX_VECS / 32] = {};
+ struct fbnic_dev *fbd = fbn->fbd;
+ struct fbnic_napi_vector *nv;
+ int i;
+
+ list_for_each_entry(nv, &fbn->napis, napis) {
+ napi_enable(&nv->napi);
+
+ fbnic_nv_irq_enable(nv);
+
+ /* Record bit used for NAPI IRQs so we can
+ * set the mask appropriately
+ */
+ irqs[nv->v_idx / 32] |= BIT(nv->v_idx % 32);
+ }
+
+ /* Force the first interrupt on the device to guarantee
+ * that any packets that may have been enqueued during the
+ * bringup are processed.
+ */
+ for (i = 0; i < ARRAY_SIZE(irqs); i++) {
+ if (!irqs[i])
+ continue;
+ fbnic_wr32(fbd, FBNIC_INTR_SET(i), irqs[i]);
+ }
+
+ fbnic_wrfl(fbd);
+}
+
+void fbnic_napi_depletion_check(struct net_device *netdev)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ u32 irqs[FBNIC_MAX_MSIX_VECS / 32] = {};
+ struct fbnic_dev *fbd = fbn->fbd;
+ struct fbnic_napi_vector *nv;
+ int i, j;
+
+ list_for_each_entry(nv, &fbn->napis, napis) {
+ /* Find RQs which are completely out of pages */
+ for (i = nv->txt_count, j = 0; j < nv->rxt_count; j++, i++) {
+ /* Assume 4 pages is always enough to fit a packet
+ * and therefore generate a completion and an IRQ.
+ */
+ if (fbnic_desc_used(&nv->qt[i].sub0) < 4 ||
+ fbnic_desc_used(&nv->qt[i].sub1) < 4)
+ irqs[nv->v_idx / 32] |= BIT(nv->v_idx % 32);
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(irqs); i++) {
+ if (!irqs[i])
+ continue;
+ fbnic_wr32(fbd, FBNIC_INTR_MASK_CLEAR(i), irqs[i]);
+ fbnic_wr32(fbd, FBNIC_INTR_SET(i), irqs[i]);
+ }
+
+ fbnic_wrfl(fbd);
+}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h
new file mode 100644
index 000000000000..4a206c0e7192
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#ifndef _FBNIC_TXRX_H_
+#define _FBNIC_TXRX_H_
+
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+#include <net/xdp.h>
+
+struct fbnic_net;
+
+/* Guarantee we have space needed for storing the buffer
+ * To store the buffer we need:
+ * 1 descriptor per page
+ * + 1 descriptor for skb head
+ * + 2 descriptors for metadata and optional metadata
+ * + 7 descriptors to keep tail out of the same cacheline as head
+ * If we cannot guarantee that then we should return TX_BUSY
+ */
+#define FBNIC_MAX_SKB_DESC (MAX_SKB_FRAGS + 10)
+#define FBNIC_TX_DESC_WAKEUP (FBNIC_MAX_SKB_DESC * 2)
+#define FBNIC_TX_DESC_MIN roundup_pow_of_two(FBNIC_TX_DESC_WAKEUP)
+
+#define FBNIC_MAX_TXQS 128u
+#define FBNIC_MAX_RXQS 128u
+
+#define FBNIC_TXQ_SIZE_DEFAULT 1024
+#define FBNIC_HPQ_SIZE_DEFAULT 256
+#define FBNIC_PPQ_SIZE_DEFAULT 256
+#define FBNIC_RCQ_SIZE_DEFAULT 1024
+
+#define FBNIC_RX_TROOM \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
+#define FBNIC_RX_HROOM \
+ (ALIGN(FBNIC_RX_TROOM + NET_SKB_PAD, 128) - FBNIC_RX_TROOM)
+#define FBNIC_RX_PAD 0
+#define FBNIC_RX_MAX_HDR (1536 - FBNIC_RX_PAD)
+#define FBNIC_RX_PAYLD_OFFSET 0
+#define FBNIC_RX_PAYLD_PG_CL 0
+
+#define FBNIC_RING_F_DISABLED BIT(0)
+#define FBNIC_RING_F_CTX BIT(1)
+#define FBNIC_RING_F_STATS BIT(2) /* Ring's stats may be used */
+
+struct fbnic_pkt_buff {
+ struct xdp_buff buff;
+ u32 data_truesize;
+ u16 data_len;
+ u16 nr_frags;
+};
+
+/* Pagecnt bias is long max to reserve the last bit to catch overflow
+ * cases where if we overcharge the bias it will flip over to be negative.
+ */
+#define PAGECNT_BIAS_MAX LONG_MAX
+struct fbnic_rx_buf {
+ struct page *page;
+ long pagecnt_bias;
+};
+
+struct fbnic_ring {
+ /* Pointer to buffer specific info */
+ union {
+ struct fbnic_pkt_buff *pkt; /* RCQ */
+ struct fbnic_rx_buf *rx_buf; /* BDQ */
+ void **tx_buf; /* TWQ */
+ void *buffer; /* Generic pointer */
+ };
+
+ u32 __iomem *doorbell; /* Pointer to CSR space for ring */
+ __le64 *desc; /* Descriptor ring memory */
+ u16 size_mask; /* Size of ring in descriptors - 1 */
+ u8 q_idx; /* Logical netdev ring index */
+ u8 flags; /* Ring flags (FBNIC_RING_F_*) */
+
+ u32 head, tail; /* Head/Tail of ring */
+
+ /* Slow path fields follow */
+ dma_addr_t dma; /* Phys addr of descriptor memory */
+ size_t size; /* Size of descriptor ring in memory */
+};
+
+struct fbnic_q_triad {
+ struct fbnic_ring sub0, sub1, cmpl;
+};
+
+struct fbnic_napi_vector {
+ struct napi_struct napi;
+ struct device *dev; /* Device for DMA unmapping */
+ struct page_pool *page_pool;
+ struct fbnic_dev *fbd;
+ char name[IFNAMSIZ + 9];
+
+ u16 v_idx;
+ u8 txt_count;
+ u8 rxt_count;
+
+ struct list_head napis;
+
+ struct fbnic_q_triad qt[];
+};
+
+#define FBNIC_MAX_TXQS 128u
+#define FBNIC_MAX_RXQS 128u
+
+netdev_tx_t fbnic_xmit_frame(struct sk_buff *skb, struct net_device *dev);
+netdev_features_t
+fbnic_features_check(struct sk_buff *skb, struct net_device *dev,
+ netdev_features_t features);
+
+int fbnic_alloc_napi_vectors(struct fbnic_net *fbn);
+void fbnic_free_napi_vectors(struct fbnic_net *fbn);
+int fbnic_alloc_resources(struct fbnic_net *fbn);
+void fbnic_free_resources(struct fbnic_net *fbn);
+void fbnic_napi_enable(struct fbnic_net *fbn);
+void fbnic_napi_disable(struct fbnic_net *fbn);
+void fbnic_enable(struct fbnic_net *fbn);
+void fbnic_disable(struct fbnic_net *fbn);
+void fbnic_flush(struct fbnic_net *fbn);
+void fbnic_fill(struct fbnic_net *fbn);
+
+void fbnic_napi_depletion_check(struct net_device *netdev);
+int fbnic_wait_all_queues_idle(struct fbnic_dev *fbd, bool may_fail);
+
+#endif /* _FBNIC_TXRX_H_ */
diff --git a/drivers/net/ethernet/micrel/ks8851_common.c b/drivers/net/ethernet/micrel/ks8851_common.c
index 6453c92f0fa7..7fa1820db9cc 100644
--- a/drivers/net/ethernet/micrel/ks8851_common.c
+++ b/drivers/net/ethernet/micrel/ks8851_common.c
@@ -352,11 +352,11 @@ static irqreturn_t ks8851_irq(int irq, void *_ks)
netif_dbg(ks, intr, ks->netdev,
"%s: txspace %d\n", __func__, tx_space);
- spin_lock(&ks->statelock);
+ spin_lock_bh(&ks->statelock);
ks->tx_space = tx_space;
if (netif_queue_stopped(ks->netdev))
netif_wake_queue(ks->netdev);
- spin_unlock(&ks->statelock);
+ spin_unlock_bh(&ks->statelock);
}
if (status & IRQ_SPIBEI) {
@@ -482,6 +482,7 @@ static int ks8851_net_open(struct net_device *dev)
ks8851_wrreg16(ks, KS_IER, ks->rc_ier);
ks->queued_len = 0;
+ ks->tx_space = ks8851_rdreg16(ks, KS_TXMIR);
netif_start_queue(ks->netdev);
netif_dbg(ks, ifup, ks->netdev, "network device up\n");
@@ -635,14 +636,14 @@ static void ks8851_set_rx_mode(struct net_device *dev)
/* schedule work to do the actual set of the data if needed */
- spin_lock(&ks->statelock);
+ spin_lock_bh(&ks->statelock);
if (memcmp(&rxctrl, &ks->rxctrl, sizeof(rxctrl)) != 0) {
memcpy(&ks->rxctrl, &rxctrl, sizeof(ks->rxctrl));
schedule_work(&ks->rxctrl_work);
}
- spin_unlock(&ks->statelock);
+ spin_unlock_bh(&ks->statelock);
}
static int ks8851_set_mac_address(struct net_device *dev, void *addr)
@@ -1101,7 +1102,6 @@ int ks8851_probe_common(struct net_device *netdev, struct device *dev,
int ret;
ks->netdev = netdev;
- ks->tx_space = 6144;
ks->gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
ret = PTR_ERR_OR_ZERO(ks->gpio);
diff --git a/drivers/net/ethernet/micrel/ks8851_spi.c b/drivers/net/ethernet/micrel/ks8851_spi.c
index 670c1de966db..3062cc0f9199 100644
--- a/drivers/net/ethernet/micrel/ks8851_spi.c
+++ b/drivers/net/ethernet/micrel/ks8851_spi.c
@@ -340,10 +340,10 @@ static void ks8851_tx_work(struct work_struct *work)
tx_space = ks8851_rdreg16_spi(ks, KS_TXMIR);
- spin_lock(&ks->statelock);
+ spin_lock_bh(&ks->statelock);
ks->queued_len -= dequeued_len;
ks->tx_space = tx_space;
- spin_unlock(&ks->statelock);
+ spin_unlock_bh(&ks->statelock);
ks8851_unlock_spi(ks, &flags);
}
diff --git a/drivers/net/ethernet/microchip/encx24j600-regmap.c b/drivers/net/ethernet/microchip/encx24j600-regmap.c
index 3885d6fbace1..26b00e66d912 100644
--- a/drivers/net/ethernet/microchip/encx24j600-regmap.c
+++ b/drivers/net/ethernet/microchip/encx24j600-regmap.c
@@ -474,13 +474,13 @@ static struct regmap_config regcfg = {
.unlock = regmap_unlock_mutex,
};
-static struct regmap_bus regmap_encx24j600 = {
+static const struct regmap_bus regmap_encx24j600 = {
.write = regmap_encx24j600_write,
.read = regmap_encx24j600_read,
.reg_update_bits = regmap_encx24j600_reg_update_bits,
};
-static struct regmap_config phycfg = {
+static const struct regmap_config phycfg = {
.name = "phy",
.reg_bits = 8,
.val_bits = 16,
@@ -492,7 +492,7 @@ static struct regmap_config phycfg = {
.volatile_reg = encx24j600_phymap_volatile,
};
-static struct regmap_bus phymap_encx24j600 = {
+static const struct regmap_bus phymap_encx24j600 = {
.reg_write = regmap_encx24j600_phy_reg_write,
.reg_read = regmap_encx24j600_phy_reg_read,
};
diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.c b/drivers/net/ethernet/microchip/lan743x_ethtool.c
index d0f4ff4ee075..3a63ec091413 100644
--- a/drivers/net/ethernet/microchip/lan743x_ethtool.c
+++ b/drivers/net/ethernet/microchip/lan743x_ethtool.c
@@ -1029,7 +1029,7 @@ static int lan743x_ethtool_set_rxfh(struct net_device *netdev,
}
static int lan743x_ethtool_get_ts_info(struct net_device *netdev,
- struct ethtool_ts_info *ts_info)
+ struct kernel_ethtool_ts_info *ts_info)
{
struct lan743x_adapter *adapter = netdev_priv(netdev);
@@ -1127,8 +1127,12 @@ static void lan743x_ethtool_get_wol(struct net_device *netdev,
if (netdev->phydev)
phy_ethtool_get_wol(netdev->phydev, wol);
- wol->supported |= WAKE_BCAST | WAKE_UCAST | WAKE_MCAST |
- WAKE_MAGIC | WAKE_PHY | WAKE_ARP;
+ if (wol->supported != adapter->phy_wol_supported)
+ netif_warn(adapter, drv, adapter->netdev,
+ "PHY changed its supported WOL! old=%x, new=%x\n",
+ adapter->phy_wol_supported, wol->supported);
+
+ wol->supported |= MAC_SUPPORTED_WAKES;
if (adapter->is_pci11x1x)
wol->supported |= WAKE_MAGICSECURE;
@@ -1143,7 +1147,39 @@ static int lan743x_ethtool_set_wol(struct net_device *netdev,
{
struct lan743x_adapter *adapter = netdev_priv(netdev);
+ /* WAKE_MAGICSEGURE is a modifier of and only valid together with
+ * WAKE_MAGIC
+ */
+ if ((wol->wolopts & WAKE_MAGICSECURE) && !(wol->wolopts & WAKE_MAGIC))
+ return -EINVAL;
+
+ if (netdev->phydev) {
+ struct ethtool_wolinfo phy_wol;
+ int ret;
+
+ phy_wol.wolopts = wol->wolopts & adapter->phy_wol_supported;
+
+ /* If WAKE_MAGICSECURE was requested, filter out WAKE_MAGIC
+ * for PHYs that do not support WAKE_MAGICSECURE
+ */
+ if (wol->wolopts & WAKE_MAGICSECURE &&
+ !(adapter->phy_wol_supported & WAKE_MAGICSECURE))
+ phy_wol.wolopts &= ~WAKE_MAGIC;
+
+ ret = phy_ethtool_set_wol(netdev->phydev, &phy_wol);
+ if (ret && (ret != -EOPNOTSUPP))
+ return ret;
+
+ if (ret == -EOPNOTSUPP)
+ adapter->phy_wolopts = 0;
+ else
+ adapter->phy_wolopts = phy_wol.wolopts;
+ } else {
+ adapter->phy_wolopts = 0;
+ }
+
adapter->wolopts = 0;
+ wol->wolopts &= ~adapter->phy_wolopts;
if (wol->wolopts & WAKE_UCAST)
adapter->wolopts |= WAKE_UCAST;
if (wol->wolopts & WAKE_MCAST)
@@ -1164,10 +1200,10 @@ static int lan743x_ethtool_set_wol(struct net_device *netdev,
memset(adapter->sopass, 0, sizeof(u8) * SOPASS_MAX);
}
+ wol->wolopts = adapter->wolopts | adapter->phy_wolopts;
device_set_wakeup_enable(&adapter->pdev->dev, (bool)wol->wolopts);
- return netdev->phydev ? phy_ethtool_set_wol(netdev->phydev, wol)
- : -ENETDOWN;
+ return 0;
}
#endif /* CONFIG_PM */
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index 6be8a43c908a..e418539565b1 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -3118,6 +3118,17 @@ static int lan743x_netdev_open(struct net_device *netdev)
if (ret)
goto close_tx;
}
+
+#ifdef CONFIG_PM
+ if (adapter->netdev->phydev) {
+ struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
+
+ phy_ethtool_get_wol(netdev->phydev, &wol);
+ adapter->phy_wol_supported = wol.supported;
+ adapter->phy_wolopts = wol.wolopts;
+ }
+#endif
+
return 0;
close_tx:
@@ -3575,7 +3586,7 @@ static void lan743x_pm_set_wol(struct lan743x_adapter *adapter)
/* clear wake settings */
pmtctl = lan743x_csr_read(adapter, PMT_CTL);
- pmtctl |= PMT_CTL_WUPS_MASK_;
+ pmtctl |= PMT_CTL_WUPS_MASK_ | PMT_CTL_RES_CLR_WKP_MASK_;
pmtctl &= ~(PMT_CTL_GPIO_WAKEUP_EN_ | PMT_CTL_EEE_WAKEUP_EN_ |
PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_ |
PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_ | PMT_CTL_ETH_PHY_WAKE_EN_);
@@ -3587,10 +3598,9 @@ static void lan743x_pm_set_wol(struct lan743x_adapter *adapter)
pmtctl |= PMT_CTL_ETH_PHY_D3_COLD_OVR_ | PMT_CTL_ETH_PHY_D3_OVR_;
- if (adapter->wolopts & WAKE_PHY) {
- pmtctl |= PMT_CTL_ETH_PHY_EDPD_PLL_CTL_;
+ if (adapter->phy_wolopts)
pmtctl |= PMT_CTL_ETH_PHY_WAKE_EN_;
- }
+
if (adapter->wolopts & WAKE_MAGIC) {
wucsr |= MAC_WUCSR_MPEN_;
macrx |= MAC_RX_RXEN_;
@@ -3686,7 +3696,7 @@ static int lan743x_pm_suspend(struct device *dev)
lan743x_csr_write(adapter, MAC_WUCSR2, 0);
lan743x_csr_write(adapter, MAC_WK_SRC, 0xFFFFFFFF);
- if (adapter->wolopts)
+ if (adapter->wolopts || adapter->phy_wolopts)
lan743x_pm_set_wol(adapter);
if (adapter->is_pci11x1x) {
@@ -3710,6 +3720,7 @@ static int lan743x_pm_resume(struct device *dev)
struct pci_dev *pdev = to_pci_dev(dev);
struct net_device *netdev = pci_get_drvdata(pdev);
struct lan743x_adapter *adapter = netdev_priv(netdev);
+ u32 data;
int ret;
pci_set_power_state(pdev, PCI_D0);
@@ -3728,6 +3739,30 @@ static int lan743x_pm_resume(struct device *dev)
return ret;
}
+ ret = lan743x_csr_read(adapter, MAC_WK_SRC);
+ netif_dbg(adapter, drv, adapter->netdev,
+ "Wakeup source : 0x%08X\n", ret);
+
+ /* Clear the wol configuration and status bits. Note that
+ * the status bits are "Write One to Clear (W1C)"
+ */
+ data = MAC_WUCSR_EEE_TX_WAKE_ | MAC_WUCSR_EEE_RX_WAKE_ |
+ MAC_WUCSR_RFE_WAKE_FR_ | MAC_WUCSR_PFDA_FR_ | MAC_WUCSR_WUFR_ |
+ MAC_WUCSR_MPR_ | MAC_WUCSR_BCAST_FR_;
+ lan743x_csr_write(adapter, MAC_WUCSR, data);
+
+ data = MAC_WUCSR2_NS_RCD_ | MAC_WUCSR2_ARP_RCD_ |
+ MAC_WUCSR2_IPV6_TCPSYN_RCD_ | MAC_WUCSR2_IPV4_TCPSYN_RCD_;
+ lan743x_csr_write(adapter, MAC_WUCSR2, data);
+
+ data = MAC_WK_SRC_ETH_PHY_WK_ | MAC_WK_SRC_IPV6_TCPSYN_RCD_WK_ |
+ MAC_WK_SRC_IPV4_TCPSYN_RCD_WK_ | MAC_WK_SRC_EEE_TX_WK_ |
+ MAC_WK_SRC_EEE_RX_WK_ | MAC_WK_SRC_RFE_FR_WK_ |
+ MAC_WK_SRC_PFDA_FR_WK_ | MAC_WK_SRC_MP_FR_WK_ |
+ MAC_WK_SRC_BCAST_FR_WK_ | MAC_WK_SRC_WU_FR_WK_ |
+ MAC_WK_SRC_WK_FR_SAVED_;
+ lan743x_csr_write(adapter, MAC_WK_SRC, data);
+
/* open netdev when netdev is at running state while resume.
* For instance, it is true when system wakesup after pm-suspend
* However, it is false when system wakes up after suspend GUI menu
@@ -3736,9 +3771,6 @@ static int lan743x_pm_resume(struct device *dev)
lan743x_netdev_open(netdev);
netif_device_attach(netdev);
- ret = lan743x_csr_read(adapter, MAC_WK_SRC);
- netif_info(adapter, drv, adapter->netdev,
- "Wakeup source : 0x%08X\n", ret);
return 0;
}
diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h
index 645bc048e52e..3b2585a384e2 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.h
+++ b/drivers/net/ethernet/microchip/lan743x_main.h
@@ -61,6 +61,7 @@
#define PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_ BIT(18)
#define PMT_CTL_GPIO_WAKEUP_EN_ BIT(15)
#define PMT_CTL_EEE_WAKEUP_EN_ BIT(13)
+#define PMT_CTL_RES_CLR_WKP_MASK_ GENMASK(9, 8)
#define PMT_CTL_READY_ BIT(7)
#define PMT_CTL_ETH_PHY_RST_ BIT(4)
#define PMT_CTL_WOL_EN_ BIT(3)
@@ -227,12 +228,31 @@
#define MAC_WUCSR (0x140)
#define MAC_MP_SO_EN_ BIT(21)
#define MAC_WUCSR_RFE_WAKE_EN_ BIT(14)
+#define MAC_WUCSR_EEE_TX_WAKE_ BIT(13)
+#define MAC_WUCSR_EEE_RX_WAKE_ BIT(11)
+#define MAC_WUCSR_RFE_WAKE_FR_ BIT(9)
+#define MAC_WUCSR_PFDA_FR_ BIT(7)
+#define MAC_WUCSR_WUFR_ BIT(6)
+#define MAC_WUCSR_MPR_ BIT(5)
+#define MAC_WUCSR_BCAST_FR_ BIT(4)
#define MAC_WUCSR_PFDA_EN_ BIT(3)
#define MAC_WUCSR_WAKE_EN_ BIT(2)
#define MAC_WUCSR_MPEN_ BIT(1)
#define MAC_WUCSR_BCST_EN_ BIT(0)
#define MAC_WK_SRC (0x144)
+#define MAC_WK_SRC_ETH_PHY_WK_ BIT(17)
+#define MAC_WK_SRC_IPV6_TCPSYN_RCD_WK_ BIT(16)
+#define MAC_WK_SRC_IPV4_TCPSYN_RCD_WK_ BIT(15)
+#define MAC_WK_SRC_EEE_TX_WK_ BIT(14)
+#define MAC_WK_SRC_EEE_RX_WK_ BIT(13)
+#define MAC_WK_SRC_RFE_FR_WK_ BIT(12)
+#define MAC_WK_SRC_PFDA_FR_WK_ BIT(11)
+#define MAC_WK_SRC_MP_FR_WK_ BIT(10)
+#define MAC_WK_SRC_BCAST_FR_WK_ BIT(9)
+#define MAC_WK_SRC_WU_FR_WK_ BIT(8)
+#define MAC_WK_SRC_WK_FR_SAVED_ BIT(7)
+
#define MAC_MP_SO_HI (0x148)
#define MAC_MP_SO_LO (0x14C)
@@ -295,6 +315,10 @@
#define RFE_INDX(index) (0x580 + (index << 2))
#define MAC_WUCSR2 (0x600)
+#define MAC_WUCSR2_NS_RCD_ BIT(7)
+#define MAC_WUCSR2_ARP_RCD_ BIT(6)
+#define MAC_WUCSR2_IPV6_TCPSYN_RCD_ BIT(5)
+#define MAC_WUCSR2_IPV4_TCPSYN_RCD_ BIT(4)
#define SGMII_ACC (0x720)
#define SGMII_ACC_SGMII_BZY_ BIT(31)
@@ -1018,6 +1042,8 @@ enum lan743x_sgmii_lsd {
LINK_2500_SLAVE
};
+#define MAC_SUPPORTED_WAKES (WAKE_BCAST | WAKE_UCAST | WAKE_MCAST | \
+ WAKE_MAGIC | WAKE_ARP)
struct lan743x_adapter {
struct net_device *netdev;
struct mii_bus *mdiobus;
@@ -1025,6 +1051,8 @@ struct lan743x_adapter {
#ifdef CONFIG_PM
u32 wolopts;
u8 sopass[SOPASS_MAX];
+ u32 phy_wolopts;
+ u32 phy_wol_supported;
#endif
struct pci_dev *pdev;
struct lan743x_csr csr;
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c b/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c
index 06811c60d598..aec7066d83b3 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c
@@ -376,7 +376,6 @@ static void lan966x_get_eth_mac_stats(struct net_device *dev,
lan966x->stats[idx + SYS_COUNT_TX_PMAC_BC];
mac_stats->SingleCollisionFrames =
lan966x->stats[idx + SYS_COUNT_TX_COL];
- mac_stats->MultipleCollisionFrames = 0;
mac_stats->FramesReceivedOK =
lan966x->stats[idx + SYS_COUNT_RX_UC] +
lan966x->stats[idx + SYS_COUNT_RX_MC] +
@@ -384,26 +383,19 @@ static void lan966x_get_eth_mac_stats(struct net_device *dev,
mac_stats->FrameCheckSequenceErrors =
lan966x->stats[idx + SYS_COUNT_RX_CRC] +
lan966x->stats[idx + SYS_COUNT_RX_CRC];
- mac_stats->AlignmentErrors = 0;
mac_stats->OctetsTransmittedOK =
lan966x->stats[idx + SYS_COUNT_TX_OCT] +
lan966x->stats[idx + SYS_COUNT_TX_PMAC_OCT];
mac_stats->FramesWithDeferredXmissions =
lan966x->stats[idx + SYS_COUNT_TX_MM_HOLD];
- mac_stats->LateCollisions = 0;
- mac_stats->FramesAbortedDueToXSColls = 0;
- mac_stats->FramesLostDueToIntMACXmitError = 0;
- mac_stats->CarrierSenseErrors = 0;
mac_stats->OctetsReceivedOK =
lan966x->stats[idx + SYS_COUNT_RX_OCT];
- mac_stats->FramesLostDueToIntMACRcvError = 0;
mac_stats->MulticastFramesXmittedOK =
lan966x->stats[idx + SYS_COUNT_TX_MC] +
lan966x->stats[idx + SYS_COUNT_TX_PMAC_MC];
mac_stats->BroadcastFramesXmittedOK =
lan966x->stats[idx + SYS_COUNT_TX_BC] +
lan966x->stats[idx + SYS_COUNT_TX_PMAC_BC];
- mac_stats->FramesWithExcessiveDeferral = 0;
mac_stats->MulticastFramesReceivedOK =
lan966x->stats[idx + SYS_COUNT_RX_MC];
mac_stats->BroadcastFramesReceivedOK =
@@ -546,7 +538,7 @@ static int lan966x_set_pauseparam(struct net_device *dev,
}
static int lan966x_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct lan966x_port *port = netdev_priv(dev);
struct lan966x *lan966x = port->lan966x;
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c b/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c
index a4414f63c9b1..a1471e38d118 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c
@@ -581,7 +581,7 @@ static void lan966x_vcap_move(struct net_device *dev,
lan966x_vcap_wait_update(lan966x, admin->tgt_inst);
}
-static struct vcap_operations lan966x_vcap_ops = {
+static const struct vcap_operations lan966x_vcap_ops = {
.validate_keyset = lan966x_vcap_validate_keyset,
.add_default_fields = lan966x_vcap_add_default_fields,
.cache_erase = lan966x_vcap_cache_erase,
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c b/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c
index a06dc5a9b355..4f800c1a435d 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c
@@ -1183,7 +1183,7 @@ static void sparx5_config_port_stats(struct sparx5 *sparx5, int portno)
}
static int sparx5_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct sparx5_port *port = netdev_priv(dev);
struct sparx5 *sparx5 = port->sparx5;
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.c b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.c
index 187efa1fc904..967c8621c250 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.c
@@ -1507,7 +1507,7 @@ static void sparx5_vcap_move(struct net_device *ndev, struct vcap_admin *admin,
}
}
-static struct vcap_operations sparx5_vcap_ops = {
+static const struct vcap_operations sparx5_vcap_ops = {
.validate_keyset = sparx5_vcap_validate_keyset,
.add_default_fields = sparx5_vcap_add_default_fields,
.cache_erase = sparx5_vcap_cache_erase,
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api.h b/drivers/net/ethernet/microchip/vcap/vcap_api.h
index 9eccfa633c1a..6069ad95c27e 100644
--- a/drivers/net/ethernet/microchip/vcap/vcap_api.h
+++ b/drivers/net/ethernet/microchip/vcap/vcap_api.h
@@ -271,7 +271,7 @@ struct vcap_operations {
/* VCAP API Client control interface */
struct vcap_control {
- struct vcap_operations *ops; /* client supplied operations */
+ const struct vcap_operations *ops; /* client supplied operations */
const struct vcap_info *vcaps; /* client supplied vcap models */
const struct vcap_statistics *stats; /* client supplied vcap stats */
struct list_head list; /* list of vcap instances */
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs_kunit.c b/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs_kunit.c
index b23c11b0647c..9c9d38042125 100644
--- a/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs_kunit.c
+++ b/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs_kunit.c
@@ -221,7 +221,7 @@ static int vcap_test_port_info(struct net_device *ndev,
return 0;
}
-static struct vcap_operations test_callbacks = {
+static const struct vcap_operations test_callbacks = {
.validate_keyset = test_val_keyset,
.add_default_fields = test_add_def_fields,
.cache_erase = test_cache_erase,
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c b/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
index fe4e166de8a0..51d9423b08a6 100644
--- a/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
+++ b/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
@@ -211,7 +211,7 @@ static int vcap_test_port_info(struct net_device *ndev,
return 0;
}
-static struct vcap_operations test_callbacks = {
+static const struct vcap_operations test_callbacks = {
.validate_keyset = test_val_keyset,
.add_default_fields = test_add_def_fields,
.cache_erase = test_cache_erase,
diff --git a/drivers/net/ethernet/microsoft/Kconfig b/drivers/net/ethernet/microsoft/Kconfig
index 286f0d5697a1..901fbffbf718 100644
--- a/drivers/net/ethernet/microsoft/Kconfig
+++ b/drivers/net/ethernet/microsoft/Kconfig
@@ -18,7 +18,7 @@ if NET_VENDOR_MICROSOFT
config MICROSOFT_MANA
tristate "Microsoft Azure Network Adapter (MANA) support"
depends on PCI_MSI
- depends on X86_64 || (ARM64 && !CPU_BIG_ENDIAN && ARM64_4K_PAGES)
+ depends on X86_64 || (ARM64 && !CPU_BIG_ENDIAN)
depends on PCI_HYPERV
select AUXILIARY_BUS
select PAGE_POOL
diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c
index 1332db9a08eb..e1d70d21e207 100644
--- a/drivers/net/ethernet/microsoft/mana/gdma_main.c
+++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
@@ -182,7 +182,7 @@ int mana_gd_alloc_memory(struct gdma_context *gc, unsigned int length,
dma_addr_t dma_handle;
void *buf;
- if (length < PAGE_SIZE || !is_power_of_2(length))
+ if (length < MANA_PAGE_SIZE || !is_power_of_2(length))
return -EINVAL;
gmi->dev = gc->dev;
@@ -717,7 +717,7 @@ EXPORT_SYMBOL_NS(mana_gd_destroy_dma_region, NET_MANA);
static int mana_gd_create_dma_region(struct gdma_dev *gd,
struct gdma_mem_info *gmi)
{
- unsigned int num_page = gmi->length / PAGE_SIZE;
+ unsigned int num_page = gmi->length / MANA_PAGE_SIZE;
struct gdma_create_dma_region_req *req = NULL;
struct gdma_create_dma_region_resp resp = {};
struct gdma_context *gc = gd->gdma_context;
@@ -727,10 +727,10 @@ static int mana_gd_create_dma_region(struct gdma_dev *gd,
int err;
int i;
- if (length < PAGE_SIZE || !is_power_of_2(length))
+ if (length < MANA_PAGE_SIZE || !is_power_of_2(length))
return -EINVAL;
- if (offset_in_page(gmi->virt_addr) != 0)
+ if (!MANA_PAGE_ALIGNED(gmi->virt_addr))
return -EINVAL;
hwc = gc->hwc.driver_data;
@@ -751,7 +751,7 @@ static int mana_gd_create_dma_region(struct gdma_dev *gd,
req->page_addr_list_len = num_page;
for (i = 0; i < num_page; i++)
- req->page_addr_list[i] = gmi->dma_handle + i * PAGE_SIZE;
+ req->page_addr_list[i] = gmi->dma_handle + i * MANA_PAGE_SIZE;
err = mana_gd_send_request(gc, req_msg_size, req, sizeof(resp), &resp);
if (err)
diff --git a/drivers/net/ethernet/microsoft/mana/hw_channel.c b/drivers/net/ethernet/microsoft/mana/hw_channel.c
index bbc4f9e16c98..cafded2f9382 100644
--- a/drivers/net/ethernet/microsoft/mana/hw_channel.c
+++ b/drivers/net/ethernet/microsoft/mana/hw_channel.c
@@ -362,12 +362,12 @@ static int mana_hwc_create_cq(struct hw_channel_context *hwc, u16 q_depth,
int err;
eq_size = roundup_pow_of_two(GDMA_EQE_SIZE * q_depth);
- if (eq_size < MINIMUM_SUPPORTED_PAGE_SIZE)
- eq_size = MINIMUM_SUPPORTED_PAGE_SIZE;
+ if (eq_size < MANA_MIN_QSIZE)
+ eq_size = MANA_MIN_QSIZE;
cq_size = roundup_pow_of_two(GDMA_CQE_SIZE * q_depth);
- if (cq_size < MINIMUM_SUPPORTED_PAGE_SIZE)
- cq_size = MINIMUM_SUPPORTED_PAGE_SIZE;
+ if (cq_size < MANA_MIN_QSIZE)
+ cq_size = MANA_MIN_QSIZE;
hwc_cq = kzalloc(sizeof(*hwc_cq), GFP_KERNEL);
if (!hwc_cq)
@@ -429,7 +429,7 @@ static int mana_hwc_alloc_dma_buf(struct hw_channel_context *hwc, u16 q_depth,
dma_buf->num_reqs = q_depth;
- buf_size = PAGE_ALIGN(q_depth * max_msg_size);
+ buf_size = MANA_PAGE_ALIGN(q_depth * max_msg_size);
gmi = &dma_buf->mem_info;
err = mana_gd_alloc_memory(gc, buf_size, gmi);
@@ -497,8 +497,8 @@ static int mana_hwc_create_wq(struct hw_channel_context *hwc,
else
queue_size = roundup_pow_of_two(GDMA_MAX_SQE_SIZE * q_depth);
- if (queue_size < MINIMUM_SUPPORTED_PAGE_SIZE)
- queue_size = MINIMUM_SUPPORTED_PAGE_SIZE;
+ if (queue_size < MANA_MIN_QSIZE)
+ queue_size = MANA_MIN_QSIZE;
hwc_wq = kzalloc(sizeof(*hwc_wq), GFP_KERNEL);
if (!hwc_wq)
diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
index d087cf954f75..91f10910ea44 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -481,7 +481,7 @@ static int mana_get_tx_queue(struct net_device *ndev, struct sk_buff *skb,
struct sock *sk = skb->sk;
int txq;
- txq = apc->indir_table[hash & MANA_INDIRECT_TABLE_MASK];
+ txq = apc->indir_table[hash & (apc->indir_table_sz - 1)];
if (txq != old_q && sk && sk_fullsock(sk) &&
rcu_access_pointer(sk->sk_dst_cache))
@@ -721,6 +721,13 @@ static void mana_cleanup_port_context(struct mana_port_context *apc)
apc->rxqs = NULL;
}
+static void mana_cleanup_indir_table(struct mana_port_context *apc)
+{
+ apc->indir_table_sz = 0;
+ kfree(apc->indir_table);
+ kfree(apc->rxobj_table);
+}
+
static int mana_init_port_context(struct mana_port_context *apc)
{
apc->rxqs = kcalloc(apc->num_queues, sizeof(struct mana_rxq *),
@@ -962,7 +969,16 @@ static int mana_query_vport_cfg(struct mana_port_context *apc, u32 vport_index,
*max_sq = resp.max_num_sq;
*max_rq = resp.max_num_rq;
- *num_indir_entry = resp.num_indirection_ent;
+ if (resp.num_indirection_ent > 0 &&
+ resp.num_indirection_ent <= MANA_INDIRECT_TABLE_MAX_SIZE &&
+ is_power_of_2(resp.num_indirection_ent)) {
+ *num_indir_entry = resp.num_indirection_ent;
+ } else {
+ netdev_warn(apc->ndev,
+ "Setting indirection table size to default %d for vPort %d\n",
+ MANA_INDIRECT_TABLE_DEF_SIZE, apc->port_idx);
+ *num_indir_entry = MANA_INDIRECT_TABLE_DEF_SIZE;
+ }
apc->port_handle = resp.vport;
ether_addr_copy(apc->mac_addr, resp.mac_addr);
@@ -1054,14 +1070,13 @@ static int mana_cfg_vport_steering(struct mana_port_context *apc,
bool update_default_rxobj, bool update_key,
bool update_tab)
{
- u16 num_entries = MANA_INDIRECT_TABLE_SIZE;
struct mana_cfg_rx_steer_req_v2 *req;
struct mana_cfg_rx_steer_resp resp = {};
struct net_device *ndev = apc->ndev;
u32 req_buf_size;
int err;
- req_buf_size = struct_size(req, indir_tab, num_entries);
+ req_buf_size = struct_size(req, indir_tab, apc->indir_table_sz);
req = kzalloc(req_buf_size, GFP_KERNEL);
if (!req)
return -ENOMEM;
@@ -1072,7 +1087,7 @@ static int mana_cfg_vport_steering(struct mana_port_context *apc,
req->hdr.req.msg_version = GDMA_MESSAGE_V2;
req->vport = apc->port_handle;
- req->num_indir_entries = num_entries;
+ req->num_indir_entries = apc->indir_table_sz;
req->indir_tab_offset = offsetof(struct mana_cfg_rx_steer_req_v2,
indir_tab);
req->rx_enable = rx;
@@ -1111,7 +1126,7 @@ static int mana_cfg_vport_steering(struct mana_port_context *apc,
}
netdev_info(ndev, "Configured steering vPort %llu entries %u\n",
- apc->port_handle, num_entries);
+ apc->port_handle, apc->indir_table_sz);
out:
kfree(req);
return err;
@@ -1889,10 +1904,10 @@ static int mana_create_txq(struct mana_port_context *apc,
* to prevent overflow.
*/
txq_size = MAX_SEND_BUFFERS_PER_QUEUE * 32;
- BUILD_BUG_ON(!PAGE_ALIGNED(txq_size));
+ BUILD_BUG_ON(!MANA_PAGE_ALIGNED(txq_size));
cq_size = MAX_SEND_BUFFERS_PER_QUEUE * COMP_ENTRY_SIZE;
- cq_size = PAGE_ALIGN(cq_size);
+ cq_size = MANA_PAGE_ALIGN(cq_size);
gc = gd->gdma_context;
@@ -2189,8 +2204,8 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
if (err)
goto out;
- rq_size = PAGE_ALIGN(rq_size);
- cq_size = PAGE_ALIGN(cq_size);
+ rq_size = MANA_PAGE_ALIGN(rq_size);
+ cq_size = MANA_PAGE_ALIGN(cq_size);
/* Create RQ */
memset(&spec, 0, sizeof(spec));
@@ -2344,11 +2359,33 @@ static int mana_create_vport(struct mana_port_context *apc,
return mana_create_txq(apc, net);
}
+static int mana_rss_table_alloc(struct mana_port_context *apc)
+{
+ if (!apc->indir_table_sz) {
+ netdev_err(apc->ndev,
+ "Indirection table size not set for vPort %d\n",
+ apc->port_idx);
+ return -EINVAL;
+ }
+
+ apc->indir_table = kcalloc(apc->indir_table_sz, sizeof(u32), GFP_KERNEL);
+ if (!apc->indir_table)
+ return -ENOMEM;
+
+ apc->rxobj_table = kcalloc(apc->indir_table_sz, sizeof(mana_handle_t), GFP_KERNEL);
+ if (!apc->rxobj_table) {
+ kfree(apc->indir_table);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
static void mana_rss_table_init(struct mana_port_context *apc)
{
int i;
- for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++)
+ for (i = 0; i < apc->indir_table_sz; i++)
apc->indir_table[i] =
ethtool_rxfh_indir_default(i, apc->num_queues);
}
@@ -2361,7 +2398,7 @@ int mana_config_rss(struct mana_port_context *apc, enum TRI_STATE rx,
int i;
if (update_tab) {
- for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) {
+ for (i = 0; i < apc->indir_table_sz; i++) {
queue_idx = apc->indir_table[i];
apc->rxobj_table[i] = apc->rxqs[queue_idx]->rxobj;
}
@@ -2466,7 +2503,6 @@ static int mana_init_port(struct net_device *ndev)
struct mana_port_context *apc = netdev_priv(ndev);
u32 max_txq, max_rxq, max_queues;
int port_idx = apc->port_idx;
- u32 num_indirect_entries;
int err;
err = mana_init_port_context(apc);
@@ -2474,7 +2510,7 @@ static int mana_init_port(struct net_device *ndev)
return err;
err = mana_query_vport_cfg(apc, port_idx, &max_txq, &max_rxq,
- &num_indirect_entries);
+ &apc->indir_table_sz);
if (err) {
netdev_err(ndev, "Failed to query info for vPort %d\n",
port_idx);
@@ -2493,8 +2529,7 @@ static int mana_init_port(struct net_device *ndev)
return 0;
reset_apc:
- kfree(apc->rxqs);
- apc->rxqs = NULL;
+ mana_cleanup_port_context(apc);
return err;
}
@@ -2723,6 +2758,10 @@ static int mana_probe_port(struct mana_context *ac, int port_idx,
if (err)
goto free_net;
+ err = mana_rss_table_alloc(apc);
+ if (err)
+ goto reset_apc;
+
netdev_lockdep_set_classes(ndev);
ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
@@ -2739,14 +2778,15 @@ static int mana_probe_port(struct mana_context *ac, int port_idx,
err = register_netdev(ndev);
if (err) {
netdev_err(ndev, "Unable to register netdev.\n");
- goto reset_apc;
+ goto free_indir;
}
return 0;
+free_indir:
+ mana_cleanup_indir_table(apc);
reset_apc:
- kfree(apc->rxqs);
- apc->rxqs = NULL;
+ mana_cleanup_port_context(apc);
free_net:
*ndev_storage = NULL;
netdev_err(ndev, "Failed to probe vPort %d: %d\n", port_idx, err);
@@ -2798,6 +2838,8 @@ static int add_adev(struct gdma_dev *gd)
if (ret)
goto init_fail;
+ /* madev is owned by the auxiliary device */
+ madev = NULL;
ret = auxiliary_device_add(adev);
if (ret)
goto add_fail;
@@ -2872,16 +2914,30 @@ int mana_probe(struct gdma_dev *gd, bool resuming)
if (!resuming) {
for (i = 0; i < ac->num_ports; i++) {
err = mana_probe_port(ac, i, &ac->ports[i]);
- if (err)
+ /* we log the port for which the probe failed and stop
+ * probes for subsequent ports.
+ * Note that we keep running ports, for which the probes
+ * were successful, unless add_adev fails too
+ */
+ if (err) {
+ dev_err(dev, "Probe Failed for port %d\n", i);
break;
+ }
}
} else {
for (i = 0; i < ac->num_ports; i++) {
rtnl_lock();
err = mana_attach(ac->ports[i]);
rtnl_unlock();
- if (err)
+ /* we log the port for which the attach failed and stop
+ * attach for subsequent ports
+ * Note that we keep running ports, for which the attach
+ * were successful, unless add_adev fails too
+ */
+ if (err) {
+ dev_err(dev, "Attach Failed for port %d\n", i);
break;
+ }
}
}
@@ -2897,6 +2953,7 @@ void mana_remove(struct gdma_dev *gd, bool suspending)
{
struct gdma_context *gc = gd->gdma_context;
struct mana_context *ac = gd->driver_data;
+ struct mana_port_context *apc;
struct device *dev = gc->dev;
struct net_device *ndev;
int err;
@@ -2908,6 +2965,7 @@ void mana_remove(struct gdma_dev *gd, bool suspending)
for (i = 0; i < ac->num_ports; i++) {
ndev = ac->ports[i];
+ apc = netdev_priv(ndev);
if (!ndev) {
if (i == 0)
dev_err(dev, "No net device to remove\n");
@@ -2931,6 +2989,7 @@ void mana_remove(struct gdma_dev *gd, bool suspending)
}
unregister_netdevice(ndev);
+ mana_cleanup_indir_table(apc);
rtnl_unlock();
diff --git a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
index ab2413d71f6c..146d5db1792f 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
@@ -245,7 +245,9 @@ static u32 mana_get_rxfh_key_size(struct net_device *ndev)
static u32 mana_rss_indir_size(struct net_device *ndev)
{
- return MANA_INDIRECT_TABLE_SIZE;
+ struct mana_port_context *apc = netdev_priv(ndev);
+
+ return apc->indir_table_sz;
}
static int mana_get_rxfh(struct net_device *ndev,
@@ -257,7 +259,7 @@ static int mana_get_rxfh(struct net_device *ndev,
rxfh->hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */
if (rxfh->indir) {
- for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++)
+ for (i = 0; i < apc->indir_table_sz; i++)
rxfh->indir[i] = apc->indir_table[i];
}
@@ -273,8 +275,8 @@ static int mana_set_rxfh(struct net_device *ndev,
{
struct mana_port_context *apc = netdev_priv(ndev);
bool update_hash = false, update_table = false;
- u32 save_table[MANA_INDIRECT_TABLE_SIZE];
u8 save_key[MANA_HASH_KEY_SIZE];
+ u32 *save_table;
int i, err;
if (!apc->port_is_up)
@@ -284,13 +286,19 @@ static int mana_set_rxfh(struct net_device *ndev,
rxfh->hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
+ save_table = kcalloc(apc->indir_table_sz, sizeof(u32), GFP_KERNEL);
+ if (!save_table)
+ return -ENOMEM;
+
if (rxfh->indir) {
- for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++)
- if (rxfh->indir[i] >= apc->num_queues)
- return -EINVAL;
+ for (i = 0; i < apc->indir_table_sz; i++)
+ if (rxfh->indir[i] >= apc->num_queues) {
+ err = -EINVAL;
+ goto cleanup;
+ }
update_table = true;
- for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) {
+ for (i = 0; i < apc->indir_table_sz; i++) {
save_table[i] = apc->indir_table[i];
apc->indir_table[i] = rxfh->indir[i];
}
@@ -306,7 +314,7 @@ static int mana_set_rxfh(struct net_device *ndev,
if (err) { /* recover to original values */
if (update_table) {
- for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++)
+ for (i = 0; i < apc->indir_table_sz; i++)
apc->indir_table[i] = save_table[i];
}
@@ -316,6 +324,9 @@ static int mana_set_rxfh(struct net_device *ndev,
mana_config_rss(apc, TRI_STATE_TRUE, update_hash, update_table);
}
+cleanup:
+ kfree(save_table);
+
return err;
}
diff --git a/drivers/net/ethernet/microsoft/mana/shm_channel.c b/drivers/net/ethernet/microsoft/mana/shm_channel.c
index 5553af9c8085..0f1679ebad96 100644
--- a/drivers/net/ethernet/microsoft/mana/shm_channel.c
+++ b/drivers/net/ethernet/microsoft/mana/shm_channel.c
@@ -6,6 +6,7 @@
#include <linux/io.h>
#include <linux/mm.h>
+#include <net/mana/gdma.h>
#include <net/mana/shm_channel.h>
#define PAGE_FRAME_L48_WIDTH_BYTES 6
@@ -155,8 +156,8 @@ int mana_smc_setup_hwc(struct shm_channel *sc, bool reset_vf, u64 eq_addr,
return err;
}
- if (!PAGE_ALIGNED(eq_addr) || !PAGE_ALIGNED(cq_addr) ||
- !PAGE_ALIGNED(rq_addr) || !PAGE_ALIGNED(sq_addr))
+ if (!MANA_PAGE_ALIGNED(eq_addr) || !MANA_PAGE_ALIGNED(cq_addr) ||
+ !MANA_PAGE_ALIGNED(rq_addr) || !MANA_PAGE_ALIGNED(sq_addr))
return -EINVAL;
if ((eq_msix_index & VECTOR_MASK) != eq_msix_index)
@@ -183,7 +184,7 @@ int mana_smc_setup_hwc(struct shm_channel *sc, bool reset_vf, u64 eq_addr,
/* EQ addr: low 48 bits of frame address */
shmem = (u64 *)ptr;
- frame_addr = PHYS_PFN(eq_addr);
+ frame_addr = MANA_PFN(eq_addr);
*shmem = frame_addr & PAGE_FRAME_L48_MASK;
all_addr_h4bits |= (frame_addr >> PAGE_FRAME_L48_WIDTH_BITS) <<
(frame_addr_seq++ * PAGE_FRAME_H4_WIDTH_BITS);
@@ -191,7 +192,7 @@ int mana_smc_setup_hwc(struct shm_channel *sc, bool reset_vf, u64 eq_addr,
/* CQ addr: low 48 bits of frame address */
shmem = (u64 *)ptr;
- frame_addr = PHYS_PFN(cq_addr);
+ frame_addr = MANA_PFN(cq_addr);
*shmem = frame_addr & PAGE_FRAME_L48_MASK;
all_addr_h4bits |= (frame_addr >> PAGE_FRAME_L48_WIDTH_BITS) <<
(frame_addr_seq++ * PAGE_FRAME_H4_WIDTH_BITS);
@@ -199,7 +200,7 @@ int mana_smc_setup_hwc(struct shm_channel *sc, bool reset_vf, u64 eq_addr,
/* RQ addr: low 48 bits of frame address */
shmem = (u64 *)ptr;
- frame_addr = PHYS_PFN(rq_addr);
+ frame_addr = MANA_PFN(rq_addr);
*shmem = frame_addr & PAGE_FRAME_L48_MASK;
all_addr_h4bits |= (frame_addr >> PAGE_FRAME_L48_WIDTH_BITS) <<
(frame_addr_seq++ * PAGE_FRAME_H4_WIDTH_BITS);
@@ -207,7 +208,7 @@ int mana_smc_setup_hwc(struct shm_channel *sc, bool reset_vf, u64 eq_addr,
/* SQ addr: low 48 bits of frame address */
shmem = (u64 *)ptr;
- frame_addr = PHYS_PFN(sq_addr);
+ frame_addr = MANA_PFN(sq_addr);
*shmem = frame_addr & PAGE_FRAME_L48_MASK;
all_addr_h4bits |= (frame_addr >> PAGE_FRAME_L48_WIDTH_BITS) <<
(frame_addr_seq++ * PAGE_FRAME_H4_WIDTH_BITS);
diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c
index 21a87a3fc556..7c9540a71725 100644
--- a/drivers/net/ethernet/mscc/ocelot_net.c
+++ b/drivers/net/ethernet/mscc/ocelot_net.c
@@ -980,7 +980,7 @@ static int ocelot_port_get_sset_count(struct net_device *dev, int sset)
}
static int ocelot_port_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct ocelot_port_private *priv = netdev_priv(dev);
struct ocelot *ocelot = priv->port.ocelot;
diff --git a/drivers/net/ethernet/mscc/ocelot_ptp.c b/drivers/net/ethernet/mscc/ocelot_ptp.c
index cb32234a5bf1..b3c28260adf8 100644
--- a/drivers/net/ethernet/mscc/ocelot_ptp.c
+++ b/drivers/net/ethernet/mscc/ocelot_ptp.c
@@ -580,7 +580,7 @@ int ocelot_hwstamp_set(struct ocelot *ocelot, int port, struct ifreq *ifr)
EXPORT_SYMBOL(ocelot_hwstamp_set);
int ocelot_get_ts_info(struct ocelot *ocelot, int port,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
info->phc_index = ocelot->ptp_clock ?
ptp_clock_index(ocelot->ptp_clock) : -1;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index 8e0a890381b6..46ffc2c20893 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -321,6 +321,10 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
flow_rule_match_enc_control(rule, &enc_ctl);
+ if (flow_rule_has_enc_control_flags(enc_ctl.mask->flags,
+ extack))
+ return -EOPNOTSUPP;
+
if (enc_ctl.mask->addr_type != 0xffff) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: wildcarded protocols on tunnels are not supported");
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic.h b/drivers/net/ethernet/pensando/ionic/ionic.h
index 2ccc2c2a06e3..1c61390677f7 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic.h
@@ -18,6 +18,8 @@ struct ionic_lif;
#define PCI_DEVICE_ID_PENSANDO_IONIC_ETH_PF 0x1002
#define PCI_DEVICE_ID_PENSANDO_IONIC_ETH_VF 0x1003
+#define IONIC_ASIC_TYPE_ELBA 2
+
#define DEVCMD_TIMEOUT 5
#define IONIC_ADMINQ_TIME_SLICE msecs_to_jiffies(100)
@@ -47,6 +49,7 @@ struct ionic {
struct ionic_dev_bar bars[IONIC_BARS_MAX];
unsigned int num_bars;
struct ionic_identity ident;
+ struct workqueue_struct *wq;
struct ionic_lif *lif;
unsigned int nnqs_per_lif;
unsigned int neqs_per_lif;
@@ -54,6 +57,8 @@ struct ionic {
unsigned int nrxqs_per_lif;
unsigned int nintrs;
DECLARE_BITMAP(intrs, IONIC_INTR_CTRL_REGS_MAX);
+ cpumask_var_t *affinity_masks;
+ struct delayed_work doorbell_check_dwork;
struct work_struct nb_work;
struct notifier_block nb;
struct rw_semaphore vf_op_lock; /* lock for VF operations */
@@ -93,4 +98,6 @@ int ionic_port_identify(struct ionic *ionic);
int ionic_port_init(struct ionic *ionic);
int ionic_port_reset(struct ionic *ionic);
+bool ionic_doorbell_wa(struct ionic *ionic);
+
#endif /* _IONIC_H_ */
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
index 6ba8d4aca0a0..b93791d6b593 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
@@ -326,6 +326,11 @@ static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out;
}
+#ifdef CONFIG_PPC64
+ /* Ensure MSI/MSI-X interrupts lie within addressable physical memory */
+ pdev->no_64bit_msi = 1;
+#endif
+
err = ionic_setup_one(ionic);
if (err)
goto err_out;
@@ -372,6 +377,7 @@ static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
mod_timer(&ionic->watchdog_timer,
round_jiffies(jiffies + ionic->watchdog_period));
+ ionic_queue_doorbell_check(ionic, IONIC_NAPI_DEADLINE);
return 0;
@@ -406,6 +412,8 @@ static void ionic_remove(struct pci_dev *pdev)
if (test_and_clear_bit(IONIC_LIF_F_FW_RESET, ionic->lif->state))
set_bit(IONIC_LIF_F_FW_STOPPING, ionic->lif->state);
+ if (ionic->lif->doorbell_wa)
+ cancel_delayed_work_sync(&ionic->doorbell_check_dwork);
ionic_lif_unregister(ionic->lif);
ionic_devlink_unregister(ionic);
ionic_lif_deinit(ionic->lif);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c b/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c
index c3ae11a48024..59e5a9f21105 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c
@@ -220,7 +220,7 @@ static int netdev_show(struct seq_file *seq, void *v)
{
struct net_device *netdev = seq->private;
- seq_printf(seq, "%s\n", netdev->name);
+ seq_printf(seq, "%s\n", netdev_name(netdev));
return 0;
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.c b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
index 874499337132..9e42d599840d 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
@@ -43,11 +43,99 @@ static void ionic_watchdog_cb(struct timer_list *t)
work->type = IONIC_DW_TYPE_RX_MODE;
netdev_dbg(lif->netdev, "deferred: rx_mode\n");
- ionic_lif_deferred_enqueue(&lif->deferred, work);
+ ionic_lif_deferred_enqueue(lif, work);
}
}
-static void ionic_watchdog_init(struct ionic *ionic)
+static void ionic_napi_schedule_do_softirq(struct napi_struct *napi)
+{
+ local_bh_disable();
+ napi_schedule(napi);
+ local_bh_enable();
+}
+
+void ionic_doorbell_napi_work(struct work_struct *work)
+{
+ struct ionic_qcq *qcq = container_of(work, struct ionic_qcq,
+ doorbell_napi_work);
+ unsigned long now, then, dif;
+
+ now = READ_ONCE(jiffies);
+ then = qcq->q.dbell_jiffies;
+ dif = now - then;
+
+ if (dif > qcq->q.dbell_deadline)
+ ionic_napi_schedule_do_softirq(&qcq->napi);
+}
+
+static int ionic_get_preferred_cpu(struct ionic *ionic,
+ struct ionic_intr_info *intr)
+{
+ int cpu;
+
+ cpu = cpumask_first_and(*intr->affinity_mask, cpu_online_mask);
+ if (cpu >= nr_cpu_ids)
+ cpu = cpumask_local_spread(0, dev_to_node(ionic->dev));
+
+ return cpu;
+}
+
+static void ionic_queue_dbell_napi_work(struct ionic *ionic,
+ struct ionic_qcq *qcq)
+{
+ int cpu;
+
+ if (!(qcq->flags & IONIC_QCQ_F_INTR))
+ return;
+
+ cpu = ionic_get_preferred_cpu(ionic, &qcq->intr);
+ queue_work_on(cpu, ionic->wq, &qcq->doorbell_napi_work);
+}
+
+static void ionic_doorbell_check_dwork(struct work_struct *work)
+{
+ struct ionic *ionic = container_of(work, struct ionic,
+ doorbell_check_dwork.work);
+ struct ionic_lif *lif = ionic->lif;
+
+ mutex_lock(&lif->queue_lock);
+
+ if (test_bit(IONIC_LIF_F_FW_STOPPING, lif->state) ||
+ test_bit(IONIC_LIF_F_FW_RESET, lif->state)) {
+ mutex_unlock(&lif->queue_lock);
+ return;
+ }
+
+ ionic_napi_schedule_do_softirq(&lif->adminqcq->napi);
+
+ if (test_bit(IONIC_LIF_F_UP, lif->state)) {
+ int i;
+
+ for (i = 0; i < lif->nxqs; i++) {
+ ionic_queue_dbell_napi_work(ionic, lif->txqcqs[i]);
+ ionic_queue_dbell_napi_work(ionic, lif->rxqcqs[i]);
+ }
+
+ if (lif->hwstamp_txq &&
+ lif->hwstamp_txq->flags & IONIC_QCQ_F_INTR)
+ ionic_napi_schedule_do_softirq(&lif->hwstamp_txq->napi);
+ if (lif->hwstamp_rxq &&
+ lif->hwstamp_rxq->flags & IONIC_QCQ_F_INTR)
+ ionic_napi_schedule_do_softirq(&lif->hwstamp_rxq->napi);
+ }
+ mutex_unlock(&lif->queue_lock);
+
+ ionic_queue_doorbell_check(ionic, IONIC_NAPI_DEADLINE);
+}
+
+bool ionic_doorbell_wa(struct ionic *ionic)
+{
+ u8 asic_type = ionic->idev.dev_info.asic_type;
+
+ return !asic_type || asic_type == IONIC_ASIC_TYPE_ELBA;
+}
+
+static int ionic_watchdog_init(struct ionic *ionic)
{
struct ionic_dev *idev = &ionic->idev;
@@ -63,6 +151,31 @@ static void ionic_watchdog_init(struct ionic *ionic)
idev->fw_status_ready = true;
idev->fw_generation = IONIC_FW_STS_F_GENERATION &
ioread8(&idev->dev_info_regs->fw_status);
+
+ ionic->wq = alloc_workqueue("%s-wq", WQ_UNBOUND, 0,
+ dev_name(ionic->dev));
+ if (!ionic->wq) {
+ dev_err(ionic->dev, "alloc_workqueue failed");
+ return -ENOMEM;
+ }
+
+ if (ionic_doorbell_wa(ionic))
+ INIT_DELAYED_WORK(&ionic->doorbell_check_dwork,
+ ionic_doorbell_check_dwork);
+
+ return 0;
+}
+
+void ionic_queue_doorbell_check(struct ionic *ionic, int delay)
+{
+ int cpu;
+
+ if (!ionic->lif->doorbell_wa)
+ return;
+
+ cpu = ionic_get_preferred_cpu(ionic, &ionic->lif->adminqcq->intr);
+ queue_delayed_work_on(cpu, ionic->wq, &ionic->doorbell_check_dwork,
+ delay);
}
void ionic_init_devinfo(struct ionic *ionic)
@@ -94,6 +207,7 @@ int ionic_dev_setup(struct ionic *ionic)
struct device *dev = ionic->dev;
int size;
u32 sig;
+ int err;
/* BAR0: dev_cmd and interrupts */
if (num_bars < 1) {
@@ -129,7 +243,9 @@ int ionic_dev_setup(struct ionic *ionic)
return -EFAULT;
}
- ionic_watchdog_init(ionic);
+ err = ionic_watchdog_init(ionic);
+ if (err)
+ return err;
idev->db_pages = bar->vaddr;
idev->phy_db_pages = bar->bus_addr;
@@ -161,6 +277,7 @@ void ionic_dev_teardown(struct ionic *ionic)
idev->phy_cmb_pages = 0;
idev->cmb_npages = 0;
+ destroy_workqueue(ionic->wq);
mutex_destroy(&idev->cmb_inuse_lock);
}
@@ -273,7 +390,7 @@ do_check_time:
if (work) {
work->type = IONIC_DW_TYPE_LIF_RESET;
work->fw_status = fw_status_ready;
- ionic_lif_deferred_enqueue(&lif->deferred, work);
+ ionic_lif_deferred_enqueue(lif, work);
}
}
}
@@ -703,10 +820,6 @@ void ionic_q_post(struct ionic_queue *q, bool ring_doorbell)
q->dbval | q->head_idx);
q->dbell_jiffies = jiffies;
-
- if (q_to_qcq(q)->napi_qcq)
- mod_timer(&q_to_qcq(q)->napi_qcq->napi_deadline,
- jiffies + IONIC_NAPI_DEADLINE);
}
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
index f30eee4a5a80..c647033f3ad2 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
@@ -28,7 +28,7 @@
#define IONIC_DEV_INFO_REG_COUNT 32
#define IONIC_DEV_CMD_REG_COUNT 32
-#define IONIC_NAPI_DEADLINE (HZ / 200) /* 5ms */
+#define IONIC_NAPI_DEADLINE (HZ) /* 1 sec */
#define IONIC_ADMIN_DOORBELL_DEADLINE (HZ / 2) /* 500ms */
#define IONIC_TX_DOORBELL_DEADLINE (HZ / 100) /* 10ms */
#define IONIC_RX_MIN_DOORBELL_DEADLINE (HZ / 100) /* 10ms */
@@ -280,9 +280,9 @@ struct ionic_intr_info {
u64 rearm_count;
unsigned int index;
unsigned int vector;
- unsigned int cpu;
u32 dim_coal_hw;
- cpumask_t affinity_mask;
+ cpumask_var_t *affinity_mask;
+ struct irq_affinity_notify aff_notify;
};
struct ionic_cq {
@@ -375,7 +375,9 @@ typedef void (*ionic_cq_done_cb)(void *done_arg);
unsigned int ionic_cq_service(struct ionic_cq *cq, unsigned int work_to_do,
ionic_cq_cb cb, ionic_cq_done_cb done_cb,
void *done_arg);
-unsigned int ionic_tx_cq_service(struct ionic_cq *cq, unsigned int work_to_do);
+unsigned int ionic_tx_cq_service(struct ionic_cq *cq,
+ unsigned int work_to_do,
+ bool in_napi);
int ionic_q_init(struct ionic_lif *lif, struct ionic_dev *idev,
struct ionic_queue *q, unsigned int index, const char *name,
@@ -386,6 +388,8 @@ bool ionic_q_is_posted(struct ionic_queue *q, unsigned int pos);
int ionic_heartbeat_check(struct ionic *ionic);
bool ionic_is_fw_running(struct ionic_dev *idev);
+void ionic_doorbell_napi_work(struct work_struct *work);
+void ionic_queue_doorbell_check(struct ionic *ionic, int delay);
bool ionic_adminq_poke_doorbell(struct ionic_queue *q);
bool ionic_txq_poke_doorbell(struct ionic_queue *q);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
index 91183965a6b7..4619fd74f3e3 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
@@ -11,6 +11,8 @@
#include "ionic_ethtool.h"
#include "ionic_stats.h"
+#define IONIC_MAX_RX_COPYBREAK min(U16_MAX, IONIC_MAX_BUF_LEN)
+
static void ionic_get_stats_strings(struct ionic_lif *lif, u8 *buf)
{
u32 i;
@@ -872,10 +874,17 @@ static int ionic_set_tunable(struct net_device *dev,
const void *data)
{
struct ionic_lif *lif = netdev_priv(dev);
+ u32 rx_copybreak;
switch (tuna->id) {
case ETHTOOL_RX_COPYBREAK:
- lif->rx_copybreak = *(u32 *)data;
+ rx_copybreak = *(u32 *)data;
+ if (rx_copybreak > IONIC_MAX_RX_COPYBREAK) {
+ netdev_err(dev, "Max supported rx_copybreak size: %u\n",
+ IONIC_MAX_RX_COPYBREAK);
+ return -EINVAL;
+ }
+ lif->rx_copybreak = (u16)rx_copybreak;
break;
default:
return -EOPNOTSUPP;
@@ -968,7 +977,7 @@ static int ionic_get_module_eeprom(struct net_device *netdev,
}
static int ionic_get_ts_info(struct net_device *netdev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct ionic_lif *lif = netdev_priv(netdev);
struct ionic *ionic = lif->ionic;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_if.h b/drivers/net/ethernet/pensando/ionic/ionic_if.h
index 9a1825edf0d0..9c85c0706c6e 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_if.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_if.h
@@ -71,7 +71,7 @@ enum ionic_cmd_opcode {
IONIC_CMD_FW_CONTROL_V1 = 255,
};
-/**
+/*
* enum ionic_status_code - Device command return codes
*/
enum ionic_status_code {
@@ -112,6 +112,7 @@ enum ionic_notifyq_opcode {
/**
* struct ionic_admin_cmd - General admin command format
* @opcode: Opcode for the command
+ * @rsvd: reserved byte(s)
* @lif_index: LIF index
* @cmd_data: Opcode-specific command bytes
*/
@@ -125,6 +126,7 @@ struct ionic_admin_cmd {
/**
* struct ionic_admin_comp - General admin command completion format
* @status: Status of the command (enum ionic_status_code)
+ * @rsvd: reserved byte(s)
* @comp_index: Index in the descriptor ring for which this is the completion
* @cmd_data: Command-specific bytes
* @color: Color bit (Always 0 for commands issued to the
@@ -147,6 +149,7 @@ static inline u8 color_match(u8 color, u8 done_color)
/**
* struct ionic_nop_cmd - NOP command
* @opcode: opcode
+ * @rsvd: reserved byte(s)
*/
struct ionic_nop_cmd {
u8 opcode;
@@ -156,6 +159,7 @@ struct ionic_nop_cmd {
/**
* struct ionic_nop_comp - NOP command completion
* @status: Status of the command (enum ionic_status_code)
+ * @rsvd: reserved byte(s)
*/
struct ionic_nop_comp {
u8 status;
@@ -166,6 +170,7 @@ struct ionic_nop_comp {
* struct ionic_dev_init_cmd - Device init command
* @opcode: opcode
* @type: Device type
+ * @rsvd: reserved byte(s)
*/
struct ionic_dev_init_cmd {
u8 opcode;
@@ -176,6 +181,7 @@ struct ionic_dev_init_cmd {
/**
* struct ionic_dev_init_comp - Device init command completion
* @status: Status of the command (enum ionic_status_code)
+ * @rsvd: reserved byte(s)
*/
struct ionic_dev_init_comp {
u8 status;
@@ -185,6 +191,7 @@ struct ionic_dev_init_comp {
/**
* struct ionic_dev_reset_cmd - Device reset command
* @opcode: opcode
+ * @rsvd: reserved byte(s)
*/
struct ionic_dev_reset_cmd {
u8 opcode;
@@ -194,6 +201,7 @@ struct ionic_dev_reset_cmd {
/**
* struct ionic_dev_reset_comp - Reset command completion
* @status: Status of the command (enum ionic_status_code)
+ * @rsvd: reserved byte(s)
*/
struct ionic_dev_reset_comp {
u8 status;
@@ -207,6 +215,7 @@ struct ionic_dev_reset_comp {
* struct ionic_dev_identify_cmd - Driver/device identify command
* @opcode: opcode
* @ver: Highest version of identify supported by driver
+ * @rsvd: reserved byte(s)
*/
struct ionic_dev_identify_cmd {
u8 opcode;
@@ -218,6 +227,7 @@ struct ionic_dev_identify_cmd {
* struct ionic_dev_identify_comp - Driver/device identify command completion
* @status: Status of the command (enum ionic_status_code)
* @ver: Version of identify returned by device
+ * @rsvd: reserved byte(s)
*/
struct ionic_dev_identify_comp {
u8 status;
@@ -242,6 +252,7 @@ enum ionic_os_type {
* @kernel_ver: Kernel version, numeric format
* @kernel_ver_str: Kernel version, string format
* @driver_ver_str: Driver version, string format
+ * @words: word access to struct contents
*/
union ionic_drv_identity {
struct {
@@ -267,7 +278,9 @@ enum ionic_dev_capability {
* union ionic_dev_identity - device identity information
* @version: Version of device identify
* @type: Identify type (0 for now)
+ * @rsvd: reserved byte(s)
* @nports: Number of ports provisioned
+ * @rsvd2: reserved byte(s)
* @nlifs: Number of LIFs provisioned
* @nintrs: Number of interrupts provisioned
* @ndbpgs_per_lif: Number of doorbell pages per LIF
@@ -284,6 +297,7 @@ enum ionic_dev_capability {
* @hwstamp_mult: Hardware tick to nanosecond multiplier.
* @hwstamp_shift: Hardware tick to nanosecond divisor (power of two).
* @capabilities: Device capabilities
+ * @words: word access to struct contents
*/
union ionic_dev_identity {
struct {
@@ -317,6 +331,7 @@ enum ionic_lif_type {
* @opcode: opcode
* @type: LIF type (enum ionic_lif_type)
* @ver: Version of identify returned by device
+ * @rsvd: reserved byte(s)
*/
struct ionic_lif_identify_cmd {
u8 opcode;
@@ -329,6 +344,7 @@ struct ionic_lif_identify_cmd {
* struct ionic_lif_identify_comp - LIF identify command completion
* @status: Status of the command (enum ionic_status_code)
* @ver: Version of identify returned by device
+ * @rsvd2: reserved byte(s)
*/
struct ionic_lif_identify_comp {
u8 status;
@@ -416,7 +432,7 @@ enum ionic_txq_feature {
};
/**
- * struct ionic_hwstamp_bits - Hardware timestamp decoding bits
+ * enum ionic_hwstamp_bits - Hardware timestamp decoding bits
* @IONIC_HWSTAMP_INVALID: Invalid hardware timestamp value
* @IONIC_HWSTAMP_CQ_NEGOFFSET: Timestamp field negative offset
* from the base cq descriptor.
@@ -429,6 +445,7 @@ enum ionic_hwstamp_bits {
/**
* struct ionic_lif_logical_qtype - Descriptor of logical to HW queue type
* @qtype: Hardware Queue Type
+ * @rsvd: reserved byte(s)
* @qid_count: Number of Queue IDs of the logical type
* @qid_base: Minimum Queue ID of the logical type
*/
@@ -454,12 +471,14 @@ enum ionic_lif_state {
/**
* union ionic_lif_config - LIF configuration
* @state: LIF state (enum ionic_lif_state)
+ * @rsvd: reserved byte(s)
* @name: LIF name
* @mtu: MTU
* @mac: Station MAC address
* @vlan: Default Vlan ID
* @features: Features (enum ionic_eth_hw_features)
* @queue_count: Queue counts per queue-type
+ * @words: word access to struct contents
*/
union ionic_lif_config {
struct {
@@ -481,33 +500,39 @@ union ionic_lif_config {
* @capabilities: LIF capabilities
*
* @eth: Ethernet identify structure
- * @version: Ethernet identify structure version
- * @max_ucast_filters: Number of perfect unicast addresses supported
- * @max_mcast_filters: Number of perfect multicast addresses supported
- * @min_frame_size: Minimum size of frames to be sent
- * @max_frame_size: Maximum size of frames to be sent
- * @hwstamp_tx_modes: Bitmask of BIT_ULL(enum ionic_txstamp_mode)
- * @hwstamp_rx_filters: Bitmask of enum ionic_pkt_class
- * @config: LIF config struct with features, mtu, mac, q counts
+ * @eth.version: Ethernet identify structure version
+ * @eth.rsvd: reserved byte(s)
+ * @eth.max_ucast_filters: Number of perfect unicast addresses supported
+ * @eth.max_mcast_filters: Number of perfect multicast addresses supported
+ * @eth.min_frame_size: Minimum size of frames to be sent
+ * @eth.max_frame_size: Maximum size of frames to be sent
+ * @eth.rsvd2: reserved byte(s)
+ * @eth.hwstamp_tx_modes: Bitmask of BIT_ULL(enum ionic_txstamp_mode)
+ * @eth.hwstamp_rx_filters: Bitmask of enum ionic_pkt_class
+ * @eth.rsvd3: reserved byte(s)
+ * @eth.config: LIF config struct with features, mtu, mac, q counts
*
* @rdma: RDMA identify structure
- * @version: RDMA version of opcodes and queue descriptors
- * @qp_opcodes: Number of RDMA queue pair opcodes supported
- * @admin_opcodes: Number of RDMA admin opcodes supported
- * @npts_per_lif: Page table size per LIF
- * @nmrs_per_lif: Number of memory regions per LIF
- * @nahs_per_lif: Number of address handles per LIF
- * @max_stride: Max work request stride
- * @cl_stride: Cache line stride
- * @pte_stride: Page table entry stride
- * @rrq_stride: Remote RQ work request stride
- * @rsq_stride: Remote SQ work request stride
- * @dcqcn_profiles: Number of DCQCN profiles
- * @aq_qtype: RDMA Admin Qtype
- * @sq_qtype: RDMA Send Qtype
- * @rq_qtype: RDMA Receive Qtype
- * @cq_qtype: RDMA Completion Qtype
- * @eq_qtype: RDMA Event Qtype
+ * @rdma.version: RDMA version of opcodes and queue descriptors
+ * @rdma.qp_opcodes: Number of RDMA queue pair opcodes supported
+ * @rdma.admin_opcodes: Number of RDMA admin opcodes supported
+ * @rdma.rsvd: reserved byte(s)
+ * @rdma.npts_per_lif: Page table size per LIF
+ * @rdma.nmrs_per_lif: Number of memory regions per LIF
+ * @rdma.nahs_per_lif: Number of address handles per LIF
+ * @rdma.max_stride: Max work request stride
+ * @rdma.cl_stride: Cache line stride
+ * @rdma.pte_stride: Page table entry stride
+ * @rdma.rrq_stride: Remote RQ work request stride
+ * @rdma.rsq_stride: Remote SQ work request stride
+ * @rdma.dcqcn_profiles: Number of DCQCN profiles
+ * @rdma.rsvd_dimensions: reserved byte(s)
+ * @rdma.aq_qtype: RDMA Admin Qtype
+ * @rdma.sq_qtype: RDMA Send Qtype
+ * @rdma.rq_qtype: RDMA Receive Qtype
+ * @rdma.cq_qtype: RDMA Completion Qtype
+ * @rdma.eq_qtype: RDMA Event Qtype
+ * @words: word access to struct contents
*/
union ionic_lif_identity {
struct {
@@ -558,7 +583,9 @@ union ionic_lif_identity {
* @opcode: Opcode
* @type: LIF type (enum ionic_lif_type)
* @index: LIF index
+ * @rsvd: reserved byte(s)
* @info_pa: Destination address for LIF info (struct ionic_lif_info)
+ * @rsvd2: reserved byte(s)
*/
struct ionic_lif_init_cmd {
u8 opcode;
@@ -572,7 +599,9 @@ struct ionic_lif_init_cmd {
/**
* struct ionic_lif_init_comp - LIF init command completion
* @status: Status of the command (enum ionic_status_code)
+ * @rsvd: reserved byte(s)
* @hw_index: Hardware index of the initialized LIF
+ * @rsvd2: reserved byte(s)
*/
struct ionic_lif_init_comp {
u8 status;
@@ -584,9 +613,11 @@ struct ionic_lif_init_comp {
/**
* struct ionic_q_identify_cmd - queue identify command
* @opcode: opcode
+ * @rsvd: reserved byte(s)
* @lif_type: LIF type (enum ionic_lif_type)
* @type: Logical queue type (enum ionic_logical_qtype)
* @ver: Highest queue type version that the driver supports
+ * @rsvd2: reserved byte(s)
*/
struct ionic_q_identify_cmd {
u8 opcode;
@@ -600,8 +631,10 @@ struct ionic_q_identify_cmd {
/**
* struct ionic_q_identify_comp - queue identify command completion
* @status: Status of the command (enum ionic_status_code)
+ * @rsvd: reserved byte(s)
* @comp_index: Index in the descriptor ring for which this is the completion
* @ver: Queue type version that can be used with FW
+ * @rsvd2: reserved byte(s)
*/
struct ionic_q_identify_comp {
u8 status;
@@ -615,12 +648,14 @@ struct ionic_q_identify_comp {
* union ionic_q_identity - queue identity information
* @version: Queue type version that can be used with FW
* @supported: Bitfield of queue versions, first bit = ver 0
+ * @rsvd: reserved byte(s)
* @features: Queue features (enum ionic_q_feature, etc)
* @desc_sz: Descriptor size
* @comp_sz: Completion descriptor size
* @sg_desc_sz: Scatter/Gather descriptor size
* @max_sg_elems: Maximum number of Scatter/Gather elements
* @sg_desc_stride: Number of Scatter/Gather elements per descriptor
+ * @words: word access to struct contents
*/
union ionic_q_identity {
struct {
@@ -640,8 +675,10 @@ union ionic_q_identity {
/**
* struct ionic_q_init_cmd - Queue init command
* @opcode: opcode
+ * @rsvd: reserved byte(s)
* @type: Logical queue type
* @ver: Queue type version
+ * @rsvd1: reserved byte(s)
* @lif_index: LIF index
* @index: (LIF, qtype) relative admin queue index
* @intr_index: Interrupt control register index, or Event queue index
@@ -667,6 +704,7 @@ union ionic_q_identity {
* @ring_base: Queue ring base address
* @cq_ring_base: Completion queue ring base address
* @sg_ring_base: Scatter/Gather ring base address
+ * @rsvd2: reserved byte(s)
* @features: Mask of queue features to enable, if not in the flags above.
*/
struct ionic_q_init_cmd {
@@ -698,9 +736,11 @@ struct ionic_q_init_cmd {
/**
* struct ionic_q_init_comp - Queue init command completion
* @status: Status of the command (enum ionic_status_code)
+ * @rsvd: reserved byte(s)
* @comp_index: Index in the descriptor ring for which this is the completion
* @hw_index: Hardware Queue ID
* @hw_type: Hardware Queue type
+ * @rsvd2: reserved byte(s)
* @color: Color
*/
struct ionic_q_init_comp {
@@ -800,7 +840,7 @@ enum ionic_txq_desc_opcode {
* will set CWR flag in the first segment if
* CWR is set in the template header, and
* clear CWR in remaining segments.
- * @flags:
+ * flags:
* vlan:
* Insert an L2 VLAN header using @vlan_tci
* encap:
@@ -813,13 +853,14 @@ enum ionic_txq_desc_opcode {
* TSO start
* tso_eot:
* TSO end
- * @num_sg_elems: Number of scatter-gather elements in SG
+ * num_sg_elems: Number of scatter-gather elements in SG
* descriptor
- * @addr: First data buffer's DMA address
+ * addr: First data buffer's DMA address
* (Subsequent data buffers are on txq_sg_desc)
* @len: First data buffer's length, in bytes
* @vlan_tci: VLAN tag to insert in the packet (if requested
* by @V-bit). Includes .1p and .1q tags
+ * @hword0: half word padding
* @hdr_len: Length of packet headers, including
* encapsulating outer header, if applicable
* Valid for opcodes IONIC_TXQ_DESC_OPCODE_CALC_CSUM and
@@ -830,10 +871,12 @@ enum ionic_txq_desc_opcode {
* IONIC_TXQ_DESC_OPCODE_TSO, @hdr_len is up to
* inner-most L4 payload, so inclusive of
* inner-most L4 header.
+ * @hword1: half word padding
* @mss: Desired MSS value for TSO; only applicable for
* IONIC_TXQ_DESC_OPCODE_TSO
* @csum_start: Offset from packet to first byte checked in L4 checksum
* @csum_offset: Offset from csum_start to L4 checksum field
+ * @hword2: half word padding
*/
struct ionic_txq_desc {
__le64 cmd;
@@ -901,6 +944,7 @@ static inline void decode_txq_desc_cmd(u64 cmd, u8 *opcode, u8 *flags,
* struct ionic_txq_sg_elem - Transmit scatter-gather (SG) descriptor element
* @addr: DMA address of SG element data buffer
* @len: Length of SG element data buffer, in bytes
+ * @rsvd: reserved byte(s)
*/
struct ionic_txq_sg_elem {
__le64 addr;
@@ -927,7 +971,9 @@ struct ionic_txq_sg_desc_v1 {
/**
* struct ionic_txq_comp - Ethernet transmit queue completion descriptor
* @status: Status of the command (enum ionic_status_code)
+ * @rsvd: reserved byte(s)
* @comp_index: Index in the descriptor ring for which this is the completion
+ * @rsvd2: reserved byte(s)
* @color: Color bit
*/
struct ionic_txq_comp {
@@ -953,6 +999,7 @@ enum ionic_rxq_desc_opcode {
* receive, including actual bytes received,
* are recorded in Rx completion descriptor.
*
+ * @rsvd: reserved byte(s)
* @len: Data buffer's length, in bytes
* @addr: Data buffer's DMA address
*/
@@ -967,6 +1014,7 @@ struct ionic_rxq_desc {
* struct ionic_rxq_sg_elem - Receive scatter-gather (SG) descriptor element
* @addr: DMA address of SG element data buffer
* @len: Length of SG element data buffer, in bytes
+ * @rsvd: reserved byte(s)
*/
struct ionic_rxq_sg_elem {
__le64 addr;
@@ -1170,6 +1218,7 @@ enum ionic_pkt_class {
* @lif_index: LIF index
* @index: Queue index
* @oper: Operation (enum ionic_q_control_oper)
+ * @rsvd: reserved byte(s)
*/
struct ionic_q_control_cmd {
u8 opcode;
@@ -1182,7 +1231,7 @@ struct ionic_q_control_cmd {
typedef struct ionic_admin_comp ionic_q_control_comp;
-enum q_control_oper {
+enum ionic_q_control_oper {
IONIC_Q_DISABLE = 0,
IONIC_Q_ENABLE = 1,
IONIC_Q_HANG_RESET = 2,
@@ -1216,7 +1265,7 @@ enum ionic_xcvr_state {
IONIC_XCVR_STATE_SPROM_READ_ERR = 4,
};
-/**
+/*
* enum ionic_xcvr_pid - Supported link modes
*/
enum ionic_xcvr_pid {
@@ -1351,6 +1400,7 @@ struct ionic_xcvr_status {
* @fec_type: fec type (enum ionic_port_fec_type)
* @pause_type: pause type (enum ionic_port_pause_type)
* @loopback_mode: loopback mode (enum ionic_port_loopback_mode)
+ * @words: word access to struct contents
*/
union ionic_port_config {
struct {
@@ -1382,6 +1432,7 @@ union ionic_port_config {
* @speed: link speed (in Mbps)
* @link_down_count: number of times link went from up to down
* @fec_type: fec type (enum ionic_port_fec_type)
+ * @rsvd: reserved byte(s)
* @xcvr: transceiver status
*/
struct ionic_port_status {
@@ -1399,6 +1450,7 @@ struct ionic_port_status {
* @opcode: opcode
* @index: port index
* @ver: Highest version of identify supported by driver
+ * @rsvd: reserved byte(s)
*/
struct ionic_port_identify_cmd {
u8 opcode;
@@ -1411,6 +1463,7 @@ struct ionic_port_identify_cmd {
* struct ionic_port_identify_comp - Port identify command completion
* @status: Status of the command (enum ionic_status_code)
* @ver: Version of identify returned by device
+ * @rsvd: reserved byte(s)
*/
struct ionic_port_identify_comp {
u8 status;
@@ -1422,7 +1475,9 @@ struct ionic_port_identify_comp {
* struct ionic_port_init_cmd - Port initialization command
* @opcode: opcode
* @index: port index
+ * @rsvd: reserved byte(s)
* @info_pa: destination address for port info (struct ionic_port_info)
+ * @rsvd2: reserved byte(s)
*/
struct ionic_port_init_cmd {
u8 opcode;
@@ -1435,6 +1490,7 @@ struct ionic_port_init_cmd {
/**
* struct ionic_port_init_comp - Port initialization command completion
* @status: Status of the command (enum ionic_status_code)
+ * @rsvd: reserved byte(s)
*/
struct ionic_port_init_comp {
u8 status;
@@ -1445,6 +1501,7 @@ struct ionic_port_init_comp {
* struct ionic_port_reset_cmd - Port reset command
* @opcode: opcode
* @index: port index
+ * @rsvd: reserved byte(s)
*/
struct ionic_port_reset_cmd {
u8 opcode;
@@ -1455,6 +1512,7 @@ struct ionic_port_reset_cmd {
/**
* struct ionic_port_reset_comp - Port reset command completion
* @status: Status of the command (enum ionic_status_code)
+ * @rsvd: reserved byte(s)
*/
struct ionic_port_reset_comp {
u8 status;
@@ -1510,6 +1568,7 @@ enum ionic_port_attr {
* @opcode: Opcode
* @index: Port index
* @attr: Attribute type (enum ionic_port_attr)
+ * @rsvd: reserved byte(s)
* @state: Port state
* @speed: Port speed
* @mtu: Port MTU
@@ -1518,6 +1577,7 @@ enum ionic_port_attr {
* @pause_type: Port pause type setting
* @loopback_mode: Port loopback mode
* @stats_ctl: Port stats setting
+ * @rsvd2: reserved byte(s)
*/
struct ionic_port_setattr_cmd {
u8 opcode;
@@ -1540,6 +1600,7 @@ struct ionic_port_setattr_cmd {
/**
* struct ionic_port_setattr_comp - Port set attr command completion
* @status: Status of the command (enum ionic_status_code)
+ * @rsvd: reserved byte(s)
* @color: Color bit
*/
struct ionic_port_setattr_comp {
@@ -1553,6 +1614,7 @@ struct ionic_port_setattr_comp {
* @opcode: Opcode
* @index: port index
* @attr: Attribute type (enum ionic_port_attr)
+ * @rsvd: reserved byte(s)
*/
struct ionic_port_getattr_cmd {
u8 opcode;
@@ -1564,6 +1626,7 @@ struct ionic_port_getattr_cmd {
/**
* struct ionic_port_getattr_comp - Port get attr command completion
* @status: Status of the command (enum ionic_status_code)
+ * @rsvd: reserved byte(s)
* @state: Port state
* @speed: Port speed
* @mtu: Port MTU
@@ -1571,6 +1634,7 @@ struct ionic_port_getattr_cmd {
* @fec_type: Port FEC type setting
* @pause_type: Port pause type setting
* @loopback_mode: Port loopback mode
+ * @rsvd2: reserved byte(s)
* @color: Color bit
*/
struct ionic_port_getattr_comp {
@@ -1593,9 +1657,11 @@ struct ionic_port_getattr_comp {
* struct ionic_lif_status - LIF status register
* @eid: most recent NotifyQ event id
* @port_num: port the LIF is connected to
+ * @rsvd: reserved byte(s)
* @link_status: port status (enum ionic_port_oper_status)
* @link_speed: speed of link in Mbps
* @link_down_count: number of times link went from up to down
+ * @rsvd2: reserved byte(s)
*/
struct ionic_lif_status {
__le64 eid;
@@ -1610,7 +1676,9 @@ struct ionic_lif_status {
/**
* struct ionic_lif_reset_cmd - LIF reset command
* @opcode: opcode
+ * @rsvd: reserved byte(s)
* @index: LIF index
+ * @rsvd2: reserved byte(s)
*/
struct ionic_lif_reset_cmd {
u8 opcode;
@@ -1643,9 +1711,11 @@ enum ionic_dev_attr {
* struct ionic_dev_setattr_cmd - Set Device attributes on the NIC
* @opcode: Opcode
* @attr: Attribute type (enum ionic_dev_attr)
+ * @rsvd: reserved byte(s)
* @state: Device state (enum ionic_dev_state)
* @name: The bus info, e.g. PCI slot-device-function, 0 terminated
* @features: Device features
+ * @rsvd2: reserved byte(s)
*/
struct ionic_dev_setattr_cmd {
u8 opcode;
@@ -1662,7 +1732,9 @@ struct ionic_dev_setattr_cmd {
/**
* struct ionic_dev_setattr_comp - Device set attr command completion
* @status: Status of the command (enum ionic_status_code)
+ * @rsvd: reserved byte(s)
* @features: Device features
+ * @rsvd2: reserved byte(s)
* @color: Color bit
*/
struct ionic_dev_setattr_comp {
@@ -1679,6 +1751,7 @@ struct ionic_dev_setattr_comp {
* struct ionic_dev_getattr_cmd - Get Device attributes from the NIC
* @opcode: opcode
* @attr: Attribute type (enum ionic_dev_attr)
+ * @rsvd: reserved byte(s)
*/
struct ionic_dev_getattr_cmd {
u8 opcode;
@@ -1687,9 +1760,11 @@ struct ionic_dev_getattr_cmd {
};
/**
- * struct ionic_dev_setattr_comp - Device set attr command completion
+ * struct ionic_dev_getattr_comp - Device set attr command completion
* @status: Status of the command (enum ionic_status_code)
+ * @rsvd: reserved byte(s)
* @features: Device features
+ * @rsvd2: reserved byte(s)
* @color: Color bit
*/
struct ionic_dev_getattr_comp {
@@ -1702,7 +1777,7 @@ struct ionic_dev_getattr_comp {
u8 color;
};
-/**
+/*
* RSS parameters
*/
#define IONIC_RSS_HASH_KEY_SIZE 40
@@ -1726,6 +1801,7 @@ enum ionic_rss_hash_types {
* @IONIC_LIF_ATTR_RSS: LIF RSS attribute
* @IONIC_LIF_ATTR_STATS_CTRL: LIF statistics control attribute
* @IONIC_LIF_ATTR_TXSTAMP: LIF TX timestamping mode
+ * @IONIC_LIF_ATTR_MAX: maximum attribute value
*/
enum ionic_lif_attr {
IONIC_LIF_ATTR_STATE = 0,
@@ -1736,6 +1812,7 @@ enum ionic_lif_attr {
IONIC_LIF_ATTR_RSS = 5,
IONIC_LIF_ATTR_STATS_CTRL = 6,
IONIC_LIF_ATTR_TXSTAMP = 7,
+ IONIC_LIF_ATTR_MAX = 255,
};
/**
@@ -1749,11 +1826,13 @@ enum ionic_lif_attr {
* @mac: Station mac
* @features: Features (enum ionic_eth_hw_features)
* @rss: RSS properties
- * @types: The hash types to enable (see rss_hash_types)
- * @key: The hash secret key
- * @addr: Address for the indirection table shared memory
+ * @rss.types: The hash types to enable (see rss_hash_types)
+ * @rss.key: The hash secret key
+ * @rss.rsvd: reserved byte(s)
+ * @rss.addr: Address for the indirection table shared memory
* @stats_ctl: stats control commands (enum ionic_stats_ctl_cmd)
- * @txstamp: TX Timestamping Mode (enum ionic_txstamp_mode)
+ * @txstamp_mode: TX Timestamping Mode (enum ionic_txstamp_mode)
+ * @rsvd: reserved byte(s)
*/
struct ionic_lif_setattr_cmd {
u8 opcode;
@@ -1772,7 +1851,7 @@ struct ionic_lif_setattr_cmd {
__le64 addr;
} rss;
u8 stats_ctl;
- __le16 txstamp_mode;
+ __le16 txstamp_mode;
u8 rsvd[60];
} __packed;
};
@@ -1780,8 +1859,10 @@ struct ionic_lif_setattr_cmd {
/**
* struct ionic_lif_setattr_comp - LIF set attr command completion
* @status: Status of the command (enum ionic_status_code)
+ * @rsvd: reserved byte(s)
* @comp_index: Index in the descriptor ring for which this is the completion
* @features: features (enum ionic_eth_hw_features)
+ * @rsvd2: reserved byte(s)
* @color: Color bit
*/
struct ionic_lif_setattr_comp {
@@ -1800,6 +1881,7 @@ struct ionic_lif_setattr_comp {
* @opcode: Opcode
* @attr: Attribute type (enum ionic_lif_attr)
* @index: LIF index
+ * @rsvd: reserved byte(s)
*/
struct ionic_lif_getattr_cmd {
u8 opcode;
@@ -1811,13 +1893,14 @@ struct ionic_lif_getattr_cmd {
/**
* struct ionic_lif_getattr_comp - LIF get attr command completion
* @status: Status of the command (enum ionic_status_code)
+ * @rsvd: reserved byte(s)
* @comp_index: Index in the descriptor ring for which this is the completion
* @state: LIF state (enum ionic_lif_state)
- * @name: The netdev name string, 0 terminated
* @mtu: Mtu
* @mac: Station mac
* @features: Features (enum ionic_eth_hw_features)
- * @txstamp: TX Timestamping Mode (enum ionic_txstamp_mode)
+ * @txstamp_mode: TX Timestamping Mode (enum ionic_txstamp_mode)
+ * @rsvd2: reserved byte(s)
* @color: Color bit
*/
struct ionic_lif_getattr_comp {
@@ -1838,12 +1921,15 @@ struct ionic_lif_getattr_comp {
/**
* struct ionic_lif_setphc_cmd - Set LIF PTP Hardware Clock
* @opcode: Opcode
+ * @rsvd1: reserved byte(s)
* @lif_index: LIF index
+ * @rsvd2: reserved byte(s)
* @tick: Hardware stamp tick of an instant in time.
* @nsec: Nanosecond stamp of the same instant.
* @frac: Fractional nanoseconds at the same instant.
* @mult: Cycle to nanosecond multiplier.
* @shift: Cycle to nanosecond divisor (power of two).
+ * @rsvd3: reserved byte(s)
*/
struct ionic_lif_setphc_cmd {
u8 opcode;
@@ -1870,6 +1956,7 @@ enum ionic_rx_mode {
/**
* struct ionic_rx_mode_set_cmd - Set LIF's Rx mode command
* @opcode: opcode
+ * @rsvd: reserved byte(s)
* @lif_index: LIF index
* @rx_mode: Rx mode flags:
* IONIC_RX_MODE_F_UNICAST: Accept known unicast packets
@@ -1878,6 +1965,7 @@ enum ionic_rx_mode {
* IONIC_RX_MODE_F_PROMISC: Accept any packets
* IONIC_RX_MODE_F_ALLMULTI: Accept any multicast packets
* IONIC_RX_MODE_F_RDMA_SNIFFER: Sniff RDMA packets
+ * @rsvd2: reserved byte(s)
*/
struct ionic_rx_mode_set_cmd {
u8 opcode;
@@ -1904,13 +1992,14 @@ enum ionic_rx_filter_match_type {
* @qid: Queue ID
* @match: Rx filter match type (see IONIC_RX_FILTER_MATCH_xxx)
* @vlan: VLAN filter
- * @vlan: VLAN ID
+ * @vlan.vlan: VLAN ID
* @mac: MAC filter
- * @addr: MAC address (network-byte order)
+ * @mac.addr: MAC address (network-byte order)
* @mac_vlan: MACVLAN filter
- * @vlan: VLAN ID
- * @addr: MAC address (network-byte order)
+ * @mac_vlan.vlan: VLAN ID
+ * @mac_vlan.addr: MAC address (network-byte order)
* @pkt_class: Packet classification filter
+ * @rsvd: reserved byte(s)
*/
struct ionic_rx_filter_add_cmd {
u8 opcode;
@@ -1937,8 +2026,10 @@ struct ionic_rx_filter_add_cmd {
/**
* struct ionic_rx_filter_add_comp - Add LIF Rx filter command completion
* @status: Status of the command (enum ionic_status_code)
+ * @rsvd: reserved byte(s)
* @comp_index: Index in the descriptor ring for which this is the completion
* @filter_id: Filter ID
+ * @rsvd2: reserved byte(s)
* @color: Color bit
*/
struct ionic_rx_filter_add_comp {
@@ -1953,8 +2044,10 @@ struct ionic_rx_filter_add_comp {
/**
* struct ionic_rx_filter_del_cmd - Delete LIF Rx filter command
* @opcode: opcode
+ * @rsvd: reserved byte(s)
* @lif_index: LIF index
* @filter_id: Filter ID
+ * @rsvd2: reserved byte(s)
*/
struct ionic_rx_filter_del_cmd {
u8 opcode;
@@ -2000,6 +2093,7 @@ enum ionic_vf_link_status {
* @trust: enable VF trust
* @linkstate: set link up or down
* @stats_pa: set DMA address for VF stats
+ * @pad: reserved byte(s)
*/
struct ionic_vf_setattr_cmd {
u8 opcode;
@@ -2031,6 +2125,7 @@ struct ionic_vf_setattr_comp {
* @opcode: Opcode
* @attr: Attribute type (enum ionic_vf_attr)
* @vf_index: VF index
+ * @rsvd: reserved byte(s)
*/
struct ionic_vf_getattr_cmd {
u8 opcode;
@@ -2064,8 +2159,8 @@ enum ionic_vf_ctrl_opcode {
/**
* struct ionic_vf_ctrl_cmd - VF control command
* @opcode: Opcode for the command
- * @vf_index: VF Index. It is unused if op START_ALL is used.
* @ctrl_opcode: VF control operation type
+ * @vf_index: VF Index. It is unused if op START_ALL is used.
*/
struct ionic_vf_ctrl_cmd {
u8 opcode;
@@ -2089,7 +2184,7 @@ struct ionic_vf_ctrl_comp {
* struct ionic_qos_identify_cmd - QoS identify command
* @opcode: opcode
* @ver: Highest version of identify supported by driver
- *
+ * @rsvd: reserved byte(s)
*/
struct ionic_qos_identify_cmd {
u8 opcode;
@@ -2101,6 +2196,7 @@ struct ionic_qos_identify_cmd {
* struct ionic_qos_identify_comp - QoS identify command completion
* @status: Status of the command (enum ionic_status_code)
* @ver: Version of identify returned by device
+ * @rsvd: reserved byte(s)
*/
struct ionic_qos_identify_comp {
u8 status;
@@ -2118,7 +2214,7 @@ struct ionic_qos_identify_comp {
#define IONIC_QOS_ALL_PCP 0xFF
#define IONIC_DSCP_BLOCK_SIZE 8
-/**
+/*
* enum ionic_qos_class
*/
enum ionic_qos_class {
@@ -2174,6 +2270,7 @@ enum ionic_qos_sched_type {
* @dot1q_pcp: Dot1q pcp value
* @ndscp: Number of valid dscp values in the ip_dscp field
* @ip_dscp: IP dscp values
+ * @words: word access to struct contents
*/
union ionic_qos_config {
struct {
@@ -2219,8 +2316,9 @@ union ionic_qos_config {
* union ionic_qos_identity - QoS identity structure
* @version: Version of the identify structure
* @type: QoS system type
- * @nclasses: Number of usable QoS classes
+ * @rsvd: reserved byte(s)
* @config: Current configuration of classes
+ * @words: word access to struct contents
*/
union ionic_qos_identity {
struct {
@@ -2236,7 +2334,9 @@ union ionic_qos_identity {
* struct ionic_qos_init_cmd - QoS config init command
* @opcode: Opcode
* @group: QoS class id
+ * @rsvd: reserved byte(s)
* @info_pa: destination address for qos info
+ * @rsvd1: reserved byte(s)
*/
struct ionic_qos_init_cmd {
u8 opcode;
@@ -2252,6 +2352,7 @@ typedef struct ionic_admin_comp ionic_qos_init_comp;
* struct ionic_qos_reset_cmd - QoS config reset command
* @opcode: Opcode
* @group: QoS class id
+ * @rsvd: reserved byte(s)
*/
struct ionic_qos_reset_cmd {
u8 opcode;
@@ -2260,8 +2361,10 @@ struct ionic_qos_reset_cmd {
};
/**
- * struct ionic_qos_clear_port_stats_cmd - Qos config reset command
+ * struct ionic_qos_clear_stats_cmd - Qos config reset command
* @opcode: Opcode
+ * @group_bitmap: bitmap of groups to be cleared
+ * @rsvd: reserved byte(s)
*/
struct ionic_qos_clear_stats_cmd {
u8 opcode;
@@ -2274,6 +2377,7 @@ typedef struct ionic_admin_comp ionic_qos_reset_comp;
/**
* struct ionic_fw_download_cmd - Firmware download command
* @opcode: opcode
+ * @rsvd: reserved byte(s)
* @addr: dma address of the firmware buffer
* @offset: offset of the firmware buffer within the full image
* @length: number of valid bytes in the firmware buffer
@@ -2297,6 +2401,7 @@ typedef struct ionic_admin_comp ionic_fw_download_comp;
* @IONIC_FW_INSTALL_STATUS: Firmware installation status
* @IONIC_FW_ACTIVATE_ASYNC: Activate firmware asynchronously
* @IONIC_FW_ACTIVATE_STATUS: Firmware activate status
+ * @IONIC_FW_UPDATE_CLEANUP: Clean up after an interrupted fw update
*/
enum ionic_fw_control_oper {
IONIC_FW_RESET = 0,
@@ -2312,8 +2417,10 @@ enum ionic_fw_control_oper {
/**
* struct ionic_fw_control_cmd - Firmware control command
* @opcode: opcode
+ * @rsvd: reserved byte(s)
* @oper: firmware control operation (enum ionic_fw_control_oper)
* @slot: slot to activate
+ * @rsvd1: reserved byte(s)
*/
struct ionic_fw_control_cmd {
u8 opcode;
@@ -2326,8 +2433,10 @@ struct ionic_fw_control_cmd {
/**
* struct ionic_fw_control_comp - Firmware control copletion
* @status: Status of the command (enum ionic_status_code)
+ * @rsvd: reserved byte(s)
* @comp_index: Index in the descriptor ring for which this is the completion
* @slot: Slot where the firmware was installed
+ * @rsvd1: reserved byte(s)
* @color: Color bit
*/
struct ionic_fw_control_comp {
@@ -2346,7 +2455,9 @@ struct ionic_fw_control_comp {
/**
* struct ionic_rdma_reset_cmd - Reset RDMA LIF cmd
* @opcode: opcode
+ * @rsvd: reserved byte(s)
* @lif_index: LIF index
+ * @rsvd2: reserved byte(s)
*
* There is no RDMA specific dev command completion struct. Completion uses
* the common struct ionic_admin_comp. Only the status is indicated.
@@ -2362,6 +2473,7 @@ struct ionic_rdma_reset_cmd {
/**
* struct ionic_rdma_queue_cmd - Create RDMA Queue command
* @opcode: opcode, 52, 53
+ * @rsvd: reserved byte(s)
* @lif_index: LIF index
* @qid_ver: (qid | (RDMA version << 24))
* @cid: intr, eq_id, or cq_id
@@ -2369,6 +2481,7 @@ struct ionic_rdma_reset_cmd {
* @depth_log2: log base two of queue depth
* @stride_log2: log base two of queue stride
* @dma_addr: address of the queue memory
+ * @rsvd2: reserved byte(s)
*
* The same command struct is used to create an RDMA event queue, completion
* queue, or RDMA admin queue. The cid is an interrupt number for an event
@@ -2425,6 +2538,7 @@ struct ionic_notifyq_event {
* @ecode: event code = IONIC_EVENT_LINK_CHANGE
* @link_status: link up/down, with error bits (enum ionic_port_status)
* @link_speed: speed of the network link
+ * @rsvd: reserved byte(s)
*
* Sent when the network link state changes between UP and DOWN
*/
@@ -2442,6 +2556,7 @@ struct ionic_link_change_event {
* @ecode: event code = IONIC_EVENT_RESET
* @reset_code: reset type
* @state: 0=pending, 1=complete, 2=error
+ * @rsvd: reserved byte(s)
*
* Sent when the NIC or some subsystem is going to be or
* has been reset.
@@ -2458,6 +2573,7 @@ struct ionic_reset_event {
* struct ionic_heartbeat_event - Sent periodically by NIC to indicate health
* @eid: event number
* @ecode: event code = IONIC_EVENT_HEARTBEAT
+ * @rsvd: reserved byte(s)
*/
struct ionic_heartbeat_event {
__le64 eid;
@@ -2481,6 +2597,7 @@ struct ionic_log_event {
* struct ionic_xcvr_event - Transceiver change event
* @eid: event number
* @ecode: event code = IONIC_EVENT_XCVR
+ * @rsvd: reserved byte(s)
*/
struct ionic_xcvr_event {
__le64 eid;
@@ -2488,7 +2605,7 @@ struct ionic_xcvr_event {
u8 rsvd[54];
};
-/**
+/*
* struct ionic_port_stats - Port statistics structure
*/
struct ionic_port_stats {
@@ -2646,8 +2763,7 @@ enum ionic_oflow_drop_stats {
IONIC_OFLOW_DROP_MAX,
};
-/**
- * struct port_pb_stats - packet buffers system stats
+/* struct ionic_port_pb_stats - packet buffers system stats
* uses ionic_pb_buffer_drop_stats for drop_counts[]
*/
struct ionic_port_pb_stats {
@@ -2681,7 +2797,9 @@ struct ionic_port_pb_stats {
* @pause_type: supported pause types
* @loopback_mode: supported loopback mode
* @speeds: supported speeds
+ * @rsvd2: reserved byte(s)
* @config: current port configuration
+ * @words: word access to struct contents
*/
union ionic_port_identity {
struct {
@@ -2707,7 +2825,8 @@ union ionic_port_identity {
* @status: Port status data
* @stats: Port statistics data
* @mgmt_stats: Port management statistics data
- * @port_pb_drop_stats: uplink pb drop stats
+ * @rsvd: reserved byte(s)
+ * @pb_stats: uplink pb drop stats
*/
struct ionic_port_info {
union ionic_port_config config;
@@ -2721,7 +2840,7 @@ struct ionic_port_info {
struct ionic_port_pb_stats pb_stats;
};
-/**
+/*
* struct ionic_lif_stats - LIF statistics structure
*/
struct ionic_lif_stats {
@@ -2983,8 +3102,10 @@ struct ionic_hwstamp_regs {
* bit 4-7 - 4 bit generation number, changes on fw restart
* @fw_heartbeat: Firmware heartbeat counter
* @serial_num: Serial number
+ * @rsvd_pad1024: reserved byte(s)
* @fw_version: Firmware version
- * @hwstamp_regs: Hardware current timestamp registers
+ * @hwstamp: Hardware current timestamp registers
+ * @words: word access to struct contents
*/
union ionic_dev_info_regs {
#define IONIC_DEVINFO_FWVERS_BUFLEN 32
@@ -3014,7 +3135,9 @@ union ionic_dev_info_regs {
* @done: Done indicator, bit 0 == 1 when command is complete
* @cmd: Opcode-specific command bytes
* @comp: Opcode-specific response bytes
+ * @rsvd: reserved byte(s)
* @data: Opcode-specific side-data
+ * @words: word access to struct contents
*/
union ionic_dev_cmd_regs {
struct {
@@ -3032,6 +3155,7 @@ union ionic_dev_cmd_regs {
* union ionic_dev_regs - Device register format for bar 0 page 0
* @info: Device info registers
* @devcmd: Device command registers
+ * @words: word access to struct contents
*/
union ionic_dev_regs {
struct {
@@ -3098,6 +3222,7 @@ union ionic_adminq_comp {
* interrupts when armed.
* @qid_lo: Queue destination for the producer index and flags (low bits)
* @qid_hi: Queue destination for the producer index and flags (high bits)
+ * @rsvd2: reserved byte(s)
*/
struct ionic_doorbell {
__le16 p_index;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index 24870da3f484..aa0cc31dfe6e 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -126,13 +126,13 @@ static void ionic_lif_deferred_work(struct work_struct *work)
} while (true);
}
-void ionic_lif_deferred_enqueue(struct ionic_deferred *def,
+void ionic_lif_deferred_enqueue(struct ionic_lif *lif,
struct ionic_deferred_work *work)
{
- spin_lock_bh(&def->lock);
- list_add_tail(&work->list, &def->list);
- spin_unlock_bh(&def->lock);
- schedule_work(&def->work);
+ spin_lock_bh(&lif->deferred.lock);
+ list_add_tail(&work->list, &lif->deferred.list);
+ spin_unlock_bh(&lif->deferred.lock);
+ queue_work(lif->ionic->wq, &lif->deferred.work);
}
static void ionic_link_status_check(struct ionic_lif *lif)
@@ -207,19 +207,12 @@ void ionic_link_status_check_request(struct ionic_lif *lif, bool can_sleep)
}
work->type = IONIC_DW_TYPE_LINK_STATUS;
- ionic_lif_deferred_enqueue(&lif->deferred, work);
+ ionic_lif_deferred_enqueue(lif, work);
} else {
ionic_link_status_check(lif);
}
}
-static void ionic_napi_deadline(struct timer_list *timer)
-{
- struct ionic_qcq *qcq = container_of(timer, struct ionic_qcq, napi_deadline);
-
- napi_schedule(&qcq->napi);
-}
-
static irqreturn_t ionic_isr(int irq, void *data)
{
struct napi_struct *napi = data;
@@ -237,12 +230,12 @@ static int ionic_request_irq(struct ionic_lif *lif, struct ionic_qcq *qcq)
const char *name;
if (lif->registered)
- name = lif->netdev->name;
+ name = netdev_name(lif->netdev);
else
name = dev_name(dev);
snprintf(intr->name, sizeof(intr->name),
- "%s-%s-%s", IONIC_DRV_NAME, name, q->name);
+ "%.5s-%.16s-%.8s", IONIC_DRV_NAME, name, q->name);
return devm_request_irq(dev, intr->vector, ionic_isr,
0, intr->name, &qcq->napi);
@@ -272,6 +265,18 @@ static void ionic_intr_free(struct ionic *ionic, int index)
clear_bit(index, ionic->intrs);
}
+static void ionic_irq_aff_notify(struct irq_affinity_notify *notify,
+ const cpumask_t *mask)
+{
+ struct ionic_intr_info *intr = container_of(notify, struct ionic_intr_info, aff_notify);
+
+ cpumask_copy(*intr->affinity_mask, mask);
+}
+
+static void ionic_irq_aff_release(struct kref __always_unused *ref)
+{
+}
+
static int ionic_qcq_enable(struct ionic_qcq *qcq)
{
struct ionic_queue *q = &qcq->q;
@@ -304,12 +309,12 @@ static int ionic_qcq_enable(struct ionic_qcq *qcq)
if (ret)
return ret;
- if (qcq->napi.poll)
- napi_enable(&qcq->napi);
-
if (qcq->flags & IONIC_QCQ_F_INTR) {
+ napi_enable(&qcq->napi);
+ irq_set_affinity_notifier(qcq->intr.vector,
+ &qcq->intr.aff_notify);
irq_set_affinity_hint(qcq->intr.vector,
- &qcq->intr.affinity_mask);
+ *qcq->intr.affinity_mask);
ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
IONIC_INTR_MASK_CLEAR);
}
@@ -339,13 +344,15 @@ static int ionic_qcq_disable(struct ionic_lif *lif, struct ionic_qcq *qcq, int f
if (qcq->flags & IONIC_QCQ_F_INTR) {
struct ionic_dev *idev = &lif->ionic->idev;
+ if (lif->doorbell_wa)
+ cancel_work_sync(&qcq->doorbell_napi_work);
cancel_work_sync(&qcq->dim.work);
ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
IONIC_INTR_MASK_SET);
synchronize_irq(qcq->intr.vector);
+ irq_set_affinity_notifier(qcq->intr.vector, NULL);
irq_set_affinity_hint(qcq->intr.vector, NULL);
napi_disable(&qcq->napi);
- del_timer_sync(&qcq->napi_deadline);
}
/* If there was a previous fw communcation error, don't bother with
@@ -480,11 +487,11 @@ static void ionic_link_qcq_interrupts(struct ionic_qcq *src_qcq,
{
n_qcq->intr.vector = src_qcq->intr.vector;
n_qcq->intr.index = src_qcq->intr.index;
- n_qcq->napi_qcq = src_qcq->napi_qcq;
}
static int ionic_alloc_qcq_interrupt(struct ionic_lif *lif, struct ionic_qcq *qcq)
{
+ cpumask_var_t *affinity_mask;
int err;
if (!(qcq->flags & IONIC_QCQ_F_INTR)) {
@@ -516,10 +523,19 @@ static int ionic_alloc_qcq_interrupt(struct ionic_lif *lif, struct ionic_qcq *qc
}
/* try to get the irq on the local numa node first */
- qcq->intr.cpu = cpumask_local_spread(qcq->intr.index,
- dev_to_node(lif->ionic->dev));
- if (qcq->intr.cpu != -1)
- cpumask_set_cpu(qcq->intr.cpu, &qcq->intr.affinity_mask);
+ affinity_mask = &lif->ionic->affinity_masks[qcq->intr.index];
+ if (cpumask_empty(*affinity_mask)) {
+ unsigned int cpu;
+
+ cpu = cpumask_local_spread(qcq->intr.index,
+ dev_to_node(lif->ionic->dev));
+ if (cpu != -1)
+ cpumask_set_cpu(cpu, *affinity_mask);
+ }
+
+ qcq->intr.affinity_mask = affinity_mask;
+ qcq->intr.aff_notify.notify = ionic_irq_aff_notify;
+ qcq->intr.aff_notify.release = ionic_irq_aff_release;
netdev_dbg(lif->netdev, "%s: Interrupt index %d\n", qcq->q.name, qcq->intr.index);
return 0;
@@ -676,6 +692,8 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
INIT_WORK(&new->dim.work, ionic_dim_work);
new->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_CQE;
+ if (lif->doorbell_wa)
+ INIT_WORK(&new->doorbell_napi_work, ionic_doorbell_napi_work);
*qcq = new;
@@ -834,11 +852,8 @@ static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
q->dbell_deadline = IONIC_TX_DOORBELL_DEADLINE;
q->dbell_jiffies = jiffies;
- if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) {
+ if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
netif_napi_add(lif->netdev, &qcq->napi, ionic_tx_napi);
- qcq->napi_qcq = qcq;
- timer_setup(&qcq->napi_deadline, ionic_napi_deadline, 0);
- }
qcq->flags |= IONIC_QCQ_F_INITED;
@@ -911,9 +926,6 @@ static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
else
netif_napi_add(lif->netdev, &qcq->napi, ionic_txrx_napi);
- qcq->napi_qcq = qcq;
- timer_setup(&qcq->napi_deadline, ionic_napi_deadline, 0);
-
qcq->flags |= IONIC_QCQ_F_INITED;
return 0;
@@ -1168,7 +1180,6 @@ static int ionic_adminq_napi(struct napi_struct *napi, int budget)
struct ionic_dev *idev = &lif->ionic->idev;
unsigned long irqflags;
unsigned int flags = 0;
- bool resched = false;
int rx_work = 0;
int tx_work = 0;
int n_work = 0;
@@ -1184,6 +1195,7 @@ static int ionic_adminq_napi(struct napi_struct *napi, int budget)
if (lif->adminqcq && lif->adminqcq->flags & IONIC_QCQ_F_INITED)
a_work = ionic_cq_service(&lif->adminqcq->cq, budget,
ionic_adminq_service, NULL, NULL);
+
spin_unlock_irqrestore(&lif->adminq_lock, irqflags);
if (lif->hwstamp_rxq)
@@ -1191,7 +1203,7 @@ static int ionic_adminq_napi(struct napi_struct *napi, int budget)
ionic_rx_service, NULL, NULL);
if (lif->hwstamp_txq)
- tx_work = ionic_tx_cq_service(&lif->hwstamp_txq->cq, budget);
+ tx_work = ionic_tx_cq_service(&lif->hwstamp_txq->cq, budget, !!budget);
work_done = max(max(n_work, a_work), max(rx_work, tx_work));
if (work_done < budget && napi_complete_done(napi, work_done)) {
@@ -1205,15 +1217,14 @@ static int ionic_adminq_napi(struct napi_struct *napi, int budget)
ionic_intr_credits(idev->intr_ctrl, intr->index, credits, flags);
}
- if (!a_work && ionic_adminq_poke_doorbell(&lif->adminqcq->q))
- resched = true;
- if (lif->hwstamp_rxq && !rx_work && ionic_rxq_poke_doorbell(&lif->hwstamp_rxq->q))
- resched = true;
- if (lif->hwstamp_txq && !tx_work && ionic_txq_poke_doorbell(&lif->hwstamp_txq->q))
- resched = true;
- if (resched)
- mod_timer(&lif->adminqcq->napi_deadline,
- jiffies + IONIC_NAPI_DEADLINE);
+ if (lif->doorbell_wa) {
+ if (!a_work)
+ ionic_adminq_poke_doorbell(&lif->adminqcq->q);
+ if (lif->hwstamp_rxq && !rx_work)
+ ionic_rxq_poke_doorbell(&lif->hwstamp_rxq->q);
+ if (lif->hwstamp_txq && !tx_work)
+ ionic_txq_poke_doorbell(&lif->hwstamp_txq->q);
+ }
return work_done;
}
@@ -1385,7 +1396,7 @@ static void ionic_ndo_set_rx_mode(struct net_device *netdev)
}
work->type = IONIC_DW_TYPE_RX_MODE;
netdev_dbg(lif->netdev, "deferred: rx_mode\n");
- ionic_lif_deferred_enqueue(&lif->deferred, work);
+ ionic_lif_deferred_enqueue(lif, work);
}
static __le64 ionic_netdev_features_to_nic(netdev_features_t features)
@@ -3141,6 +3152,44 @@ err_out:
return err;
}
+static int ionic_affinity_masks_alloc(struct ionic *ionic)
+{
+ cpumask_var_t *affinity_masks;
+ int nintrs = ionic->nintrs;
+ int i;
+
+ affinity_masks = kcalloc(nintrs, sizeof(cpumask_var_t), GFP_KERNEL);
+ if (!affinity_masks)
+ return -ENOMEM;
+
+ for (i = 0; i < nintrs; i++) {
+ if (!zalloc_cpumask_var_node(&affinity_masks[i], GFP_KERNEL,
+ dev_to_node(ionic->dev)))
+ goto err_out;
+ }
+
+ ionic->affinity_masks = affinity_masks;
+
+ return 0;
+
+err_out:
+ for (--i; i >= 0; i--)
+ free_cpumask_var(affinity_masks[i]);
+ kfree(affinity_masks);
+
+ return -ENOMEM;
+}
+
+static void ionic_affinity_masks_free(struct ionic *ionic)
+{
+ int i;
+
+ for (i = 0; i < ionic->nintrs; i++)
+ free_cpumask_var(ionic->affinity_masks[i]);
+ kfree(ionic->affinity_masks);
+ ionic->affinity_masks = NULL;
+}
+
int ionic_lif_alloc(struct ionic *ionic)
{
struct device *dev = ionic->dev;
@@ -3232,11 +3281,15 @@ int ionic_lif_alloc(struct ionic *ionic)
ionic_debugfs_add_lif(lif);
+ err = ionic_affinity_masks_alloc(ionic);
+ if (err)
+ goto err_out_free_lif_info;
+
/* allocate control queues and txrx queue arrays */
ionic_lif_queue_identify(lif);
err = ionic_qcqs_alloc(lif);
if (err)
- goto err_out_free_lif_info;
+ goto err_out_free_affinity_masks;
/* allocate rss indirection table */
tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
@@ -3258,6 +3311,8 @@ int ionic_lif_alloc(struct ionic *ionic)
err_out_free_qcqs:
ionic_qcqs_free(lif);
+err_out_free_affinity_masks:
+ ionic_affinity_masks_free(lif->ionic);
err_out_free_lif_info:
dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa);
lif->info = NULL;
@@ -3358,6 +3413,7 @@ int ionic_restart_lif(struct ionic_lif *lif)
clear_bit(IONIC_LIF_F_FW_RESET, lif->state);
ionic_link_status_check_request(lif, CAN_SLEEP);
netif_device_attach(lif->netdev);
+ ionic_queue_doorbell_check(ionic, IONIC_NAPI_DEADLINE);
return 0;
@@ -3388,6 +3444,7 @@ static void ionic_lif_handle_fw_up(struct ionic_lif *lif)
* just need to reanimate it.
*/
ionic_init_devinfo(ionic);
+ ionic_reset(ionic);
err = ionic_identify(ionic);
if (err)
goto err_out;
@@ -3430,6 +3487,8 @@ void ionic_lif_free(struct ionic_lif *lif)
if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state))
ionic_lif_reset(lif);
+ ionic_affinity_masks_free(lif->ionic);
+
/* free lif info */
kfree(lif->identity);
dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa);
@@ -3503,14 +3562,11 @@ static int ionic_lif_adminq_init(struct ionic_lif *lif)
netif_napi_add(lif->netdev, &qcq->napi, ionic_adminq_napi);
- qcq->napi_qcq = qcq;
- timer_setup(&qcq->napi_deadline, ionic_napi_deadline, 0);
-
napi_enable(&qcq->napi);
if (qcq->flags & IONIC_QCQ_F_INTR) {
irq_set_affinity_hint(qcq->intr.vector,
- &qcq->intr.affinity_mask);
+ *qcq->intr.affinity_mask);
ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
IONIC_INTR_MASK_CLEAR);
}
@@ -3697,6 +3753,7 @@ int ionic_lif_init(struct ionic_lif *lif)
goto err_out_notifyq_deinit;
lif->rx_copybreak = IONIC_RX_COPYBREAK_DEFAULT;
+ lif->doorbell_wa = ionic_doorbell_wa(lif->ionic);
set_bit(IONIC_LIF_F_INITED, lif->state);
@@ -3731,7 +3788,7 @@ static void ionic_lif_set_netdev_info(struct ionic_lif *lif)
},
};
- strscpy(ctx.cmd.lif_setattr.name, lif->netdev->name,
+ strscpy(ctx.cmd.lif_setattr.name, netdev_name(lif->netdev),
sizeof(ctx.cmd.lif_setattr.name));
ionic_adminq_post_wait(lif, &ctx);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.h b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
index 08f4266fe2aa..3e1005293c4a 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
@@ -84,12 +84,11 @@ struct ionic_qcq {
u32 cmb_pgid;
u32 cmb_order;
struct dim dim;
- struct timer_list napi_deadline;
struct ionic_queue q;
struct ionic_cq cq;
struct napi_struct napi;
- struct ionic_qcq *napi_qcq;
struct ionic_intr_info intr;
+ struct work_struct doorbell_napi_work;
struct dentry *dentry;
};
@@ -207,11 +206,12 @@ struct ionic_lif {
unsigned int nxqs;
unsigned int ntxq_descs;
unsigned int nrxq_descs;
- u32 rx_copybreak;
u64 rxq_features;
- u16 rx_mode;
u64 hw_features;
+ u16 rx_copybreak;
+ u16 rx_mode;
bool registered;
+ bool doorbell_wa;
u16 lif_type;
unsigned int link_down_count;
unsigned int nmcast;
@@ -226,11 +226,11 @@ struct ionic_lif {
u32 info_sz;
struct ionic_qtype_info qtype_info[IONIC_QTYPE_MAX];
- u16 rss_types;
u8 rss_hash_key[IONIC_RSS_HASH_KEY_SIZE];
u8 *rss_ind_tbl;
dma_addr_t rss_ind_tbl_pa;
u32 rss_ind_tbl_sz;
+ u16 rss_types;
struct ionic_rx_filters rx_filters;
u32 rx_coalesce_usecs; /* what the user asked for */
@@ -333,7 +333,7 @@ static inline bool ionic_txq_hwstamp_enabled(struct ionic_queue *q)
void ionic_link_status_check_request(struct ionic_lif *lif, bool can_sleep);
void ionic_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64 *ns);
-void ionic_lif_deferred_enqueue(struct ionic_deferred *def,
+void ionic_lif_deferred_enqueue(struct ionic_lif *lif,
struct ionic_deferred_work *work);
int ionic_lif_alloc(struct ionic *ionic);
int ionic_lif_init(struct ionic_lif *lif);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c
index c1259324b0be..0f817c3f92d8 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_main.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c
@@ -287,7 +287,7 @@ bool ionic_notifyq_service(struct ionic_cq *cq)
clear_bit(IONIC_LIF_F_FW_STOPPING, lif->state);
} else {
work->type = IONIC_DW_TYPE_LIF_RESET;
- ionic_lif_deferred_enqueue(&lif->deferred, work);
+ ionic_lif_deferred_enqueue(lif, work);
}
}
break;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
index 5dba6d2d633c..fc79baad4561 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
@@ -23,7 +23,8 @@ static void ionic_tx_desc_unmap_bufs(struct ionic_queue *q,
static void ionic_tx_clean(struct ionic_queue *q,
struct ionic_tx_desc_info *desc_info,
- struct ionic_txq_comp *comp);
+ struct ionic_txq_comp *comp,
+ bool in_napi);
static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell)
{
@@ -480,6 +481,20 @@ int ionic_xdp_xmit(struct net_device *netdev, int n,
return nxmit;
}
+static void ionic_xdp_rx_put_bufs(struct ionic_queue *q,
+ struct ionic_buf_info *buf_info,
+ int nbufs)
+{
+ int i;
+
+ for (i = 0; i < nbufs; i++) {
+ dma_unmap_page(q->dev, buf_info->dma_addr,
+ IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
+ buf_info->page = NULL;
+ buf_info++;
+ }
+}
+
static bool ionic_run_xdp(struct ionic_rx_stats *stats,
struct net_device *netdev,
struct bpf_prog *xdp_prog,
@@ -493,6 +508,7 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats,
struct netdev_queue *nq;
struct xdp_frame *xdpf;
int remain_len;
+ int nbufs = 1;
int frag_len;
int err = 0;
@@ -502,7 +518,7 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats,
XDP_PACKET_HEADROOM, frag_len, false);
dma_sync_single_range_for_cpu(rxq->dev, ionic_rx_buf_pa(buf_info),
- XDP_PACKET_HEADROOM, len,
+ XDP_PACKET_HEADROOM, frag_len,
DMA_FROM_DEVICE);
prefetchw(&xdp_buf.data_hard_start);
@@ -542,6 +558,7 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats,
if (page_is_pfmemalloc(bi->page))
xdp_buff_set_frag_pfmemalloc(&xdp_buf);
} while (remain_len > 0);
+ nbufs += sinfo->nr_frags;
}
xdp_action = bpf_prog_run_xdp(xdp_prog, &xdp_buf);
@@ -574,34 +591,28 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats,
goto out_xdp_abort;
}
- dma_unmap_page(rxq->dev, buf_info->dma_addr,
- IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
-
err = ionic_xdp_post_frame(txq, xdpf, XDP_TX,
buf_info->page,
buf_info->page_offset,
true);
__netif_tx_unlock(nq);
- if (err) {
+ if (unlikely(err)) {
netdev_dbg(netdev, "tx ionic_xdp_post_frame err %d\n", err);
goto out_xdp_abort;
}
+ ionic_xdp_rx_put_bufs(rxq, buf_info, nbufs);
stats->xdp_tx++;
/* the Tx completion will free the buffers */
break;
case XDP_REDIRECT:
- /* unmap the pages before handing them to a different device */
- dma_unmap_page(rxq->dev, buf_info->dma_addr,
- IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
-
err = xdp_do_redirect(netdev, &xdp_buf, xdp_prog);
- if (err) {
+ if (unlikely(err)) {
netdev_dbg(netdev, "xdp_do_redirect err %d\n", err);
goto out_xdp_abort;
}
- buf_info->page = NULL;
+ ionic_xdp_rx_put_bufs(rxq, buf_info, nbufs);
rxq->xdp_flush = true;
stats->xdp_redirect++;
break;
@@ -867,9 +878,6 @@ void ionic_rx_fill(struct ionic_queue *q)
q->dbell_deadline = IONIC_RX_MIN_DOORBELL_DEADLINE;
q->dbell_jiffies = jiffies;
-
- mod_timer(&q_to_qcq(q)->napi_qcq->napi_deadline,
- jiffies + IONIC_NAPI_DEADLINE);
}
void ionic_rx_empty(struct ionic_queue *q)
@@ -934,7 +942,7 @@ int ionic_tx_napi(struct napi_struct *napi, int budget)
u32 work_done = 0;
u32 flags = 0;
- work_done = ionic_tx_cq_service(cq, budget);
+ work_done = ionic_tx_cq_service(cq, budget, !!budget);
if (unlikely(!budget))
return budget;
@@ -952,8 +960,8 @@ int ionic_tx_napi(struct napi_struct *napi, int budget)
work_done, flags);
}
- if (!work_done && ionic_txq_poke_doorbell(&qcq->q))
- mod_timer(&qcq->napi_deadline, jiffies + IONIC_NAPI_DEADLINE);
+ if (!work_done && cq->bound_q->lif->doorbell_wa)
+ ionic_txq_poke_doorbell(&qcq->q);
return work_done;
}
@@ -995,8 +1003,8 @@ int ionic_rx_napi(struct napi_struct *napi, int budget)
work_done, flags);
}
- if (!work_done && ionic_rxq_poke_doorbell(&qcq->q))
- mod_timer(&qcq->napi_deadline, jiffies + IONIC_NAPI_DEADLINE);
+ if (!work_done && cq->bound_q->lif->doorbell_wa)
+ ionic_rxq_poke_doorbell(&qcq->q);
return work_done;
}
@@ -1009,7 +1017,6 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget)
struct ionic_qcq *txqcq;
struct ionic_lif *lif;
struct ionic_cq *txcq;
- bool resched = false;
u32 rx_work_done = 0;
u32 tx_work_done = 0;
u32 flags = 0;
@@ -1018,7 +1025,7 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget)
txqcq = lif->txqcqs[qi];
txcq = &lif->txqcqs[qi]->cq;
- tx_work_done = ionic_tx_cq_service(txcq, IONIC_TX_BUDGET_DEFAULT);
+ tx_work_done = ionic_tx_cq_service(txcq, IONIC_TX_BUDGET_DEFAULT, !!budget);
if (unlikely(!budget))
return budget;
@@ -1041,12 +1048,12 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget)
tx_work_done + rx_work_done, flags);
}
- if (!rx_work_done && ionic_rxq_poke_doorbell(&rxqcq->q))
- resched = true;
- if (!tx_work_done && ionic_txq_poke_doorbell(&txqcq->q))
- resched = true;
- if (resched)
- mod_timer(&rxqcq->napi_deadline, jiffies + IONIC_NAPI_DEADLINE);
+ if (lif->doorbell_wa) {
+ if (!rx_work_done)
+ ionic_rxq_poke_doorbell(&rxqcq->q);
+ if (!tx_work_done)
+ ionic_txq_poke_doorbell(&txqcq->q);
+ }
return rx_work_done;
}
@@ -1058,7 +1065,7 @@ static dma_addr_t ionic_tx_map_single(struct ionic_queue *q,
dma_addr_t dma_addr;
dma_addr = dma_map_single(dev, data, len, DMA_TO_DEVICE);
- if (dma_mapping_error(dev, dma_addr)) {
+ if (unlikely(dma_mapping_error(dev, dma_addr))) {
net_warn_ratelimited("%s: DMA single map failed on %s!\n",
dev_name(dev), q->name);
q_to_tx_stats(q)->dma_map_err++;
@@ -1075,7 +1082,7 @@ static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q,
dma_addr_t dma_addr;
dma_addr = skb_frag_dma_map(dev, frag, offset, len, DMA_TO_DEVICE);
- if (dma_mapping_error(dev, dma_addr)) {
+ if (unlikely(dma_mapping_error(dev, dma_addr))) {
net_warn_ratelimited("%s: DMA frag map failed on %s!\n",
dev_name(dev), q->name);
q_to_tx_stats(q)->dma_map_err++;
@@ -1151,7 +1158,8 @@ static void ionic_tx_desc_unmap_bufs(struct ionic_queue *q,
static void ionic_tx_clean(struct ionic_queue *q,
struct ionic_tx_desc_info *desc_info,
- struct ionic_txq_comp *comp)
+ struct ionic_txq_comp *comp,
+ bool in_napi)
{
struct ionic_tx_stats *stats = q_to_tx_stats(q);
struct ionic_qcq *qcq = q_to_qcq(q);
@@ -1203,11 +1211,13 @@ static void ionic_tx_clean(struct ionic_queue *q,
desc_info->bytes = skb->len;
stats->clean++;
- napi_consume_skb(skb, 1);
+ napi_consume_skb(skb, likely(in_napi) ? 1 : 0);
}
static bool ionic_tx_service(struct ionic_cq *cq,
- unsigned int *total_pkts, unsigned int *total_bytes)
+ unsigned int *total_pkts,
+ unsigned int *total_bytes,
+ bool in_napi)
{
struct ionic_tx_desc_info *desc_info;
struct ionic_queue *q = cq->bound_q;
@@ -1229,7 +1239,7 @@ static bool ionic_tx_service(struct ionic_cq *cq,
desc_info->bytes = 0;
index = q->tail_idx;
q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
- ionic_tx_clean(q, desc_info, comp);
+ ionic_tx_clean(q, desc_info, comp, in_napi);
if (desc_info->skb) {
pkts++;
bytes += desc_info->bytes;
@@ -1243,7 +1253,9 @@ static bool ionic_tx_service(struct ionic_cq *cq,
return true;
}
-unsigned int ionic_tx_cq_service(struct ionic_cq *cq, unsigned int work_to_do)
+unsigned int ionic_tx_cq_service(struct ionic_cq *cq,
+ unsigned int work_to_do,
+ bool in_napi)
{
unsigned int work_done = 0;
unsigned int bytes = 0;
@@ -1252,7 +1264,7 @@ unsigned int ionic_tx_cq_service(struct ionic_cq *cq, unsigned int work_to_do)
if (work_to_do == 0)
return 0;
- while (ionic_tx_service(cq, &pkts, &bytes)) {
+ while (ionic_tx_service(cq, &pkts, &bytes, in_napi)) {
if (cq->tail_idx == cq->num_descs - 1)
cq->done_color = !cq->done_color;
cq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1);
@@ -1278,7 +1290,7 @@ void ionic_tx_flush(struct ionic_cq *cq)
{
u32 work_done;
- work_done = ionic_tx_cq_service(cq, cq->num_descs);
+ work_done = ionic_tx_cq_service(cq, cq->num_descs, false);
if (work_done)
ionic_intr_credits(cq->idev->intr_ctrl, cq->bound_intr->index,
work_done, IONIC_INTR_CRED_RESET_COALESCE);
@@ -1295,7 +1307,7 @@ void ionic_tx_empty(struct ionic_queue *q)
desc_info = &q->tx_info[q->tail_idx];
desc_info->bytes = 0;
q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
- ionic_tx_clean(q, desc_info, NULL);
+ ionic_tx_clean(q, desc_info, NULL, false);
if (desc_info->skb) {
pkts++;
bytes += desc_info->bytes;
@@ -1316,7 +1328,7 @@ static int ionic_tx_tcp_inner_pseudo_csum(struct sk_buff *skb)
int err;
err = skb_cow_head(skb, 0);
- if (err)
+ if (unlikely(err))
return err;
if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
@@ -1340,7 +1352,7 @@ static int ionic_tx_tcp_pseudo_csum(struct sk_buff *skb)
int err;
err = skb_cow_head(skb, 0);
- if (err)
+ if (unlikely(err))
return err;
if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
@@ -1357,7 +1369,7 @@ static int ionic_tx_tcp_pseudo_csum(struct sk_buff *skb)
}
static void ionic_tx_tso_post(struct net_device *netdev, struct ionic_queue *q,
- struct ionic_tx_desc_info *desc_info,
+ struct ionic_txq_desc *desc,
struct sk_buff *skb,
dma_addr_t addr, u8 nsge, u16 len,
unsigned int hdrlen, unsigned int mss,
@@ -1365,7 +1377,6 @@ static void ionic_tx_tso_post(struct net_device *netdev, struct ionic_queue *q,
u16 vlan_tci, bool has_vlan,
bool start, bool done)
{
- struct ionic_txq_desc *desc = &q->txq[q->head_idx];
u8 flags = 0;
u64 cmd;
@@ -1445,7 +1456,7 @@ static int ionic_tx_tso(struct net_device *netdev, struct ionic_queue *q,
err = ionic_tx_tcp_inner_pseudo_csum(skb);
else
err = ionic_tx_tcp_pseudo_csum(skb);
- if (err) {
+ if (unlikely(err)) {
/* clean up mapping from ionic_tx_map_skb */
ionic_tx_desc_unmap_bufs(q, desc_info);
return err;
@@ -1503,10 +1514,9 @@ static int ionic_tx_tso(struct net_device *netdev, struct ionic_queue *q,
seg_rem = min(tso_rem, mss);
done = (tso_rem == 0);
/* post descriptor */
- ionic_tx_tso_post(netdev, q, desc_info, skb,
- desc_addr, desc_nsge, desc_len,
- hdrlen, mss, outer_csum, vlan_tci, has_vlan,
- start, done);
+ ionic_tx_tso_post(netdev, q, desc, skb, desc_addr, desc_nsge,
+ desc_len, hdrlen, mss, outer_csum, vlan_tci,
+ has_vlan, start, done);
start = false;
/* Buffer information is stored with the first tso descriptor */
desc_info = &q->tx_info[q->head_idx];
@@ -1731,7 +1741,7 @@ static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb)
linearize:
if (too_many_frags) {
err = skb_linearize(skb);
- if (err)
+ if (unlikely(err))
return err;
q_to_tx_stats(q)->linearize++;
}
@@ -1765,7 +1775,7 @@ static netdev_tx_t ionic_start_hwstamp_xmit(struct sk_buff *skb,
else
err = ionic_tx(netdev, q, skb);
- if (err)
+ if (unlikely(err))
goto err_out_drop;
return NETDEV_TX_OK;
@@ -1811,7 +1821,7 @@ netdev_tx_t ionic_start_xmit(struct sk_buff *skb, struct net_device *netdev)
else
err = ionic_tx(netdev, q, skb);
- if (err)
+ if (unlikely(err))
goto err_out_drop;
return NETDEV_TX_OK;
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
index 2fcbcecb41d1..fef4b2b0b1f2 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
@@ -571,9 +571,6 @@ static u64 ctx_addr_sig_regs[][3] = {
#define CRB_CTX_ADDR_REG_HI(FUNC_ID) (ctx_addr_sig_regs[FUNC_ID][2])
#define CRB_CTX_SIGNATURE_REG(FUNC_ID) (ctx_addr_sig_regs[FUNC_ID][1])
-#define lower32(x) ((u32)((x) & 0xffffffff))
-#define upper32(x) ((u32)(((u64)(x) >> 32) & 0xffffffff))
-
static struct netxen_recv_crb recv_crb_registers[] = {
/* Instance 0 */
{
@@ -723,9 +720,9 @@ netxen_init_old_ctx(struct netxen_adapter *adapter)
NETXEN_CTX_SIGNATURE_V2 : NETXEN_CTX_SIGNATURE;
NXWR32(adapter, CRB_CTX_ADDR_REG_LO(port),
- lower32(recv_ctx->phys_addr));
+ lower_32_bits(recv_ctx->phys_addr));
NXWR32(adapter, CRB_CTX_ADDR_REG_HI(port),
- upper32(recv_ctx->phys_addr));
+ upper_32_bits(recv_ctx->phys_addr));
NXWR32(adapter, CRB_CTX_SIGNATURE_REG(port),
signature | port);
return 0;
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index f497f6ca1018..97b059be1041 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -1137,7 +1137,7 @@ static int qede_set_channels(struct net_device *dev,
}
static int qede_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct qede_dev *edev = netdev_priv(dev);
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
index 747cc5e2bb78..63e3dac4d5f7 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ptp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
@@ -321,7 +321,7 @@ int qede_ptp_hw_ts(struct qede_dev *edev, struct ifreq *ifr)
sizeof(config)) ? -EFAULT : 0;
}
-int qede_ptp_get_ts_info(struct qede_dev *edev, struct ethtool_ts_info *info)
+int qede_ptp_get_ts_info(struct qede_dev *edev, struct kernel_ethtool_ts_info *info)
{
struct qede_ptp *ptp = edev->ptp;
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.h b/drivers/net/ethernet/qlogic/qede/qede_ptp.h
index 1db0f021c645..adafc894797e 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ptp.h
+++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.h
@@ -17,7 +17,7 @@ void qede_ptp_tx_ts(struct qede_dev *edev, struct sk_buff *skb);
int qede_ptp_hw_ts(struct qede_dev *edev, struct ifreq *req);
void qede_ptp_disable(struct qede_dev *edev);
int qede_ptp_enable(struct qede_dev *edev);
-int qede_ptp_get_ts_info(struct qede_dev *edev, struct ethtool_ts_info *ts);
+int qede_ptp_get_ts_info(struct qede_dev *edev, struct kernel_ethtool_ts_info *ts);
static inline void qede_ptp_record_rx_ts(struct qede_dev *edev,
union eth_rx_cqe *cqe,
diff --git a/drivers/net/ethernet/qualcomm/qca_debug.c b/drivers/net/ethernet/qualcomm/qca_debug.c
index ff3b89e9028e..ad06da0fdaa0 100644
--- a/drivers/net/ethernet/qualcomm/qca_debug.c
+++ b/drivers/net/ethernet/qualcomm/qca_debug.c
@@ -98,10 +98,8 @@ qcaspi_info_show(struct seq_file *s, void *what)
seq_printf(s, "IRQ : %d\n",
qca->spi_dev->irq);
- seq_printf(s, "INTR REQ : %u\n",
- qca->intr_req);
- seq_printf(s, "INTR SVC : %u\n",
- qca->intr_svc);
+ seq_printf(s, "INTR : %lx\n",
+ qca->intr);
seq_printf(s, "SPI max speed : %lu\n",
(unsigned long)qca->spi_dev->max_speed_hz);
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
index 5799ecc88a87..8f7ce6b51a1c 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.c
+++ b/drivers/net/ethernet/qualcomm/qca_spi.c
@@ -35,6 +35,8 @@
#define MAX_DMA_BURST_LEN 5000
+#define SPI_INTR 0
+
/* Modules parameters */
#define QCASPI_CLK_SPEED_MIN 1000000
#define QCASPI_CLK_SPEED_MAX 16000000
@@ -579,14 +581,14 @@ qcaspi_spi_thread(void *data)
continue;
}
- if ((qca->intr_req == qca->intr_svc) &&
+ if (!test_bit(SPI_INTR, &qca->intr) &&
!qca->txr.skb[qca->txr.head])
schedule();
set_current_state(TASK_RUNNING);
- netdev_dbg(qca->net_dev, "have work to do. int: %d, tx_skb: %p\n",
- qca->intr_req - qca->intr_svc,
+ netdev_dbg(qca->net_dev, "have work to do. int: %lu, tx_skb: %p\n",
+ qca->intr,
qca->txr.skb[qca->txr.head]);
qcaspi_qca7k_sync(qca, QCASPI_EVENT_UPDATE);
@@ -600,8 +602,7 @@ qcaspi_spi_thread(void *data)
msleep(QCASPI_QCA7K_REBOOT_TIME_MS);
}
- if (qca->intr_svc != qca->intr_req) {
- qca->intr_svc = qca->intr_req;
+ if (test_and_clear_bit(SPI_INTR, &qca->intr)) {
start_spi_intr_handling(qca, &intr_cause);
if (intr_cause & SPI_INT_CPU_ON) {
@@ -663,7 +664,7 @@ qcaspi_intr_handler(int irq, void *data)
{
struct qcaspi *qca = data;
- qca->intr_req++;
+ set_bit(SPI_INTR, &qca->intr);
if (qca->spi_thread)
wake_up_process(qca->spi_thread);
@@ -679,8 +680,7 @@ qcaspi_netdev_open(struct net_device *dev)
if (!qca)
return -EINVAL;
- qca->intr_req = 1;
- qca->intr_svc = 0;
+ set_bit(SPI_INTR, &qca->intr);
qca->sync = QCASPI_SYNC_UNKNOWN;
qcafrm_fsm_init_spi(&qca->frm_handle);
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.h b/drivers/net/ethernet/qualcomm/qca_spi.h
index d59cb2352cee..8f4808695e82 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.h
+++ b/drivers/net/ethernet/qualcomm/qca_spi.h
@@ -81,8 +81,7 @@ struct qcaspi {
struct qcafrm_handle frm_handle;
struct sk_buff *rx_skb;
- unsigned int intr_req;
- unsigned int intr_svc;
+ unsigned long intr;
u16 reset_count;
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 7b9e04884575..714d2e804694 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -1608,7 +1608,7 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
if (!tp->dash_enabled) {
rtl_set_d3_pll_down(tp, !wolopts);
- tp->dev->wol_enabled = wolopts ? 1 : 0;
+ tp->dev->ethtool->wol_enabled = wolopts ? 1 : 0;
}
}
@@ -2274,7 +2274,9 @@ static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii)
/* 8168B family. */
{ 0x7c8, 0x380, RTL_GIGA_MAC_VER_17 },
- { 0x7c8, 0x300, RTL_GIGA_MAC_VER_11 },
+ /* This one is very old and rare, let's see if anybody complains.
+ * { 0x7c8, 0x300, RTL_GIGA_MAC_VER_11 },
+ */
/* 8101 family. */
{ 0x7c8, 0x448, RTL_GIGA_MAC_VER_39 },
@@ -5086,12 +5088,10 @@ static void rtl_set_irq_mask(struct rtl8169_private *tp)
tp->irq_mask = RxOK | RxErr | TxOK | TxErr | LinkChg;
if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
- tp->irq_mask |= SYSErr | RxOverflow | RxFIFOOver;
+ tp->irq_mask |= SYSErr | RxFIFOOver;
else if (tp->mac_version == RTL_GIGA_MAC_VER_11)
/* special workaround needed */
tp->irq_mask |= RxFIFOOver;
- else
- tp->irq_mask |= RxOverflow;
}
static int rtl_alloc_irq(struct rtl8169_private *tp)
@@ -5478,7 +5478,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
rtl_set_d3_pll_down(tp, true);
} else {
rtl_set_d3_pll_down(tp, false);
- dev->wol_enabled = 1;
+ dev->ethtool->wol_enabled = 1;
}
jumbo_max = rtl_jumbo_max(tp);
diff --git a/drivers/net/ethernet/renesas/Kconfig b/drivers/net/ethernet/renesas/Kconfig
index b03fae7a0f72..9b7559c88bee 100644
--- a/drivers/net/ethernet/renesas/Kconfig
+++ b/drivers/net/ethernet/renesas/Kconfig
@@ -33,6 +33,7 @@ config RAVB
select CRC32
select MII
select MDIO_BITBANG
+ select PAGE_POOL
select PHYLIB
select RESET_CONTROLLER
help
@@ -58,4 +59,14 @@ config RENESAS_GEN4_PTP
help
Renesas R-Car Gen4 gPTP device driver.
+config RTSN
+ tristate "Renesas Ethernet-TSN support"
+ depends on ARCH_RENESAS || COMPILE_TEST
+ depends on PTP_1588_CLOCK
+ select CRC32
+ select PHYLIB
+ select RENESAS_GEN4_PTP
+ help
+ Renesas Ethernet-TSN device driver.
+
endif # NET_VENDOR_RENESAS
diff --git a/drivers/net/ethernet/renesas/Makefile b/drivers/net/ethernet/renesas/Makefile
index 9070acfd6aaf..f65fc76f8b4d 100644
--- a/drivers/net/ethernet/renesas/Makefile
+++ b/drivers/net/ethernet/renesas/Makefile
@@ -11,3 +11,5 @@ obj-$(CONFIG_RAVB) += ravb.o
obj-$(CONFIG_RENESAS_ETHER_SWITCH) += rswitch.o
obj-$(CONFIG_RENESAS_GEN4_PTP) += rcar_gen4_ptp.o
+
+obj-$(CONFIG_RTSN) += rtsn.o
diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h
index b48935ec7e28..9893c91af105 100644
--- a/drivers/net/ethernet/renesas/ravb.h
+++ b/drivers/net/ethernet/renesas/ravb.h
@@ -19,6 +19,7 @@
#include <linux/phy.h>
#include <linux/platform_device.h>
#include <linux/ptp_clock_kernel.h>
+#include <net/page_pool/types.h>
#define BE_TX_RING_SIZE 64 /* TX ring size for Best Effort */
#define BE_RX_RING_SIZE 1024 /* RX ring size for Best Effort */
@@ -257,6 +258,7 @@ enum APSR_BIT {
APSR_CMSW = 0x00000010,
APSR_RDM = 0x00002000,
APSR_TDM = 0x00004000,
+ APSR_MIISELECT = 0x01000000, /* R-Car V4M only */
};
/* RCR */
@@ -1039,7 +1041,7 @@ struct ravb_ptp {
};
struct ravb_hw_info {
- bool (*receive)(struct net_device *ndev, int *quota, int q);
+ int (*receive)(struct net_device *ndev, int budget, int q);
void (*set_rate)(struct net_device *ndev);
int (*set_feature)(struct net_device *ndev, netdev_features_t features);
int (*dmac_init)(struct net_device *ndev);
@@ -1051,9 +1053,10 @@ struct ravb_hw_info {
int stats_len;
u32 tccr_mask;
u32 rx_max_frame_size;
- u32 rx_max_desc_use;
+ u32 rx_buffer_size;
u32 rx_desc_size;
unsigned aligned_tx: 1;
+ unsigned coalesce_irqs:1; /* Needs software IRQ coalescing */
/* hardware features */
unsigned internal_delay:1; /* AVB-DMAC has internal delays */
@@ -1070,6 +1073,11 @@ struct ravb_hw_info {
unsigned half_duplex:1; /* E-MAC supports half duplex mode */
};
+struct ravb_rx_buffer {
+ struct page *page;
+ unsigned int offset;
+};
+
struct ravb_private {
struct net_device *ndev;
struct platform_device *pdev;
@@ -1093,7 +1101,8 @@ struct ravb_private {
struct ravb_tx_desc *tx_ring[NUM_TX_QUEUE];
void *tx_align[NUM_TX_QUEUE];
struct sk_buff *rx_1st_skb;
- struct sk_buff **rx_skb[NUM_RX_QUEUE];
+ struct page_pool *rx_pool[NUM_RX_QUEUE];
+ struct ravb_rx_buffer *rx_buffers[NUM_RX_QUEUE];
struct sk_buff **tx_skb[NUM_TX_QUEUE];
u32 rx_over_errors;
u32 rx_fifo_errors;
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 4d100283c30f..c02fb296bf7d 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -30,6 +30,7 @@
#include <linux/reset.h>
#include <linux/math64.h>
#include <net/ip.h>
+#include <net/page_pool/helpers.h>
#include "ravb.h"
@@ -113,25 +114,6 @@ static void ravb_set_rate_rcar(struct net_device *ndev)
}
}
-static struct sk_buff *
-ravb_alloc_skb(struct net_device *ndev, const struct ravb_hw_info *info,
- gfp_t gfp_mask)
-{
- struct sk_buff *skb;
- u32 reserve;
-
- skb = __netdev_alloc_skb(ndev, info->rx_max_frame_size + RAVB_ALIGN - 1,
- gfp_mask);
- if (!skb)
- return NULL;
-
- reserve = (unsigned long)skb->data & (RAVB_ALIGN - 1);
- if (reserve)
- skb_reserve(skb, RAVB_ALIGN - reserve);
-
- return skb;
-}
-
/* Get MAC address from the MAC address registers
*
* Ethernet AVB device doesn't have ROM for MAC address.
@@ -257,21 +239,10 @@ static void ravb_rx_ring_free(struct net_device *ndev, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
unsigned int ring_size;
- unsigned int i;
if (!priv->rx_ring[q].raw)
return;
- for (i = 0; i < priv->num_rx_ring[q]; i++) {
- struct ravb_rx_desc *desc = ravb_rx_get_desc(priv, q, i);
-
- if (!dma_mapping_error(ndev->dev.parent,
- le32_to_cpu(desc->dptr)))
- dma_unmap_single(ndev->dev.parent,
- le32_to_cpu(desc->dptr),
- priv->info->rx_max_frame_size,
- DMA_FROM_DEVICE);
- }
ring_size = priv->info->rx_desc_size * (priv->num_rx_ring[q] + 1);
dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q].raw,
priv->rx_desc_dma[q]);
@@ -298,13 +269,16 @@ static void ravb_ring_free(struct net_device *ndev, int q)
priv->tx_ring[q] = NULL;
}
- /* Free RX skb ringbuffer */
- if (priv->rx_skb[q]) {
- for (i = 0; i < priv->num_rx_ring[q]; i++)
- dev_kfree_skb(priv->rx_skb[q][i]);
+ /* Free RX buffers */
+ for (i = 0; i < priv->num_rx_ring[q]; i++) {
+ if (priv->rx_buffers[q][i].page)
+ page_pool_put_page(priv->rx_pool[q],
+ priv->rx_buffers[q][i].page,
+ 0, true);
}
- kfree(priv->rx_skb[q]);
- priv->rx_skb[q] = NULL;
+ kfree(priv->rx_buffers[q]);
+ priv->rx_buffers[q] = NULL;
+ page_pool_destroy(priv->rx_pool[q]);
/* Free aligned TX buffers */
kfree(priv->tx_align[q]);
@@ -317,35 +291,64 @@ static void ravb_ring_free(struct net_device *ndev, int q)
priv->tx_skb[q] = NULL;
}
-static void ravb_rx_ring_format(struct net_device *ndev, int q)
+static int
+ravb_alloc_rx_buffer(struct net_device *ndev, int q, u32 entry, gfp_t gfp_mask,
+ struct ravb_rx_desc *rx_desc)
{
struct ravb_private *priv = netdev_priv(ndev);
- struct ravb_rx_desc *rx_desc;
- unsigned int rx_ring_size;
+ const struct ravb_hw_info *info = priv->info;
+ struct ravb_rx_buffer *rx_buff;
dma_addr_t dma_addr;
- unsigned int i;
+ unsigned int size;
- rx_ring_size = priv->info->rx_desc_size * priv->num_rx_ring[q];
- memset(priv->rx_ring[q].raw, 0, rx_ring_size);
- /* Build RX ring buffer */
- for (i = 0; i < priv->num_rx_ring[q]; i++) {
- /* RX descriptor */
- rx_desc = ravb_rx_get_desc(priv, q, i);
- rx_desc->ds_cc = cpu_to_le16(priv->info->rx_max_desc_use);
- dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data,
- priv->info->rx_max_frame_size,
- DMA_FROM_DEVICE);
+ rx_buff = &priv->rx_buffers[q][entry];
+ size = info->rx_buffer_size;
+ rx_buff->page = page_pool_alloc(priv->rx_pool[q], &rx_buff->offset,
+ &size, gfp_mask);
+ if (unlikely(!rx_buff->page)) {
/* We just set the data size to 0 for a failed mapping which
* should prevent DMA from happening...
*/
- if (dma_mapping_error(ndev->dev.parent, dma_addr))
- rx_desc->ds_cc = cpu_to_le16(0);
- rx_desc->dptr = cpu_to_le32(dma_addr);
+ rx_desc->ds_cc = cpu_to_le16(0);
+ return -ENOMEM;
+ }
+
+ dma_addr = page_pool_get_dma_addr(rx_buff->page) + rx_buff->offset;
+ dma_sync_single_for_device(ndev->dev.parent, dma_addr,
+ info->rx_buffer_size, DMA_FROM_DEVICE);
+ rx_desc->dptr = cpu_to_le32(dma_addr);
+
+ /* The end of the RX buffer is used to store skb shared data, so we need
+ * to ensure that the hardware leaves enough space for this.
+ */
+ rx_desc->ds_cc = cpu_to_le16(info->rx_buffer_size -
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) -
+ ETH_FCS_LEN + sizeof(__sum16));
+ return 0;
+}
+
+static u32
+ravb_rx_ring_refill(struct net_device *ndev, int q, u32 count, gfp_t gfp_mask)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ struct ravb_rx_desc *rx_desc;
+ u32 i, entry;
+
+ for (i = 0; i < count; i++) {
+ entry = (priv->dirty_rx[q] + i) % priv->num_rx_ring[q];
+ rx_desc = ravb_rx_get_desc(priv, q, entry);
+
+ if (!priv->rx_buffers[q][entry].page) {
+ if (unlikely(ravb_alloc_rx_buffer(ndev, q, entry,
+ gfp_mask, rx_desc)))
+ break;
+ }
+ /* Descriptor type must be set after all the above writes */
+ dma_wmb();
rx_desc->die_dt = DT_FEMPTY;
}
- rx_desc = ravb_rx_get_desc(priv, q, i);
- rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
- rx_desc->die_dt = DT_LINKFIX; /* type */
+
+ return i;
}
/* Format skb and descriptor buffer for Ethernet AVB */
@@ -353,6 +356,7 @@ static void ravb_ring_format(struct net_device *ndev, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
unsigned int num_tx_desc = priv->num_tx_desc;
+ struct ravb_rx_desc *rx_desc;
struct ravb_tx_desc *tx_desc;
struct ravb_desc *desc;
unsigned int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q] *
@@ -364,7 +368,13 @@ static void ravb_ring_format(struct net_device *ndev, int q)
priv->dirty_rx[q] = 0;
priv->dirty_tx[q] = 0;
- ravb_rx_ring_format(ndev, q);
+ /* Regular RX descriptors have already been initialized by
+ * ravb_rx_ring_refill(), we just need to initialize the final link
+ * descriptor.
+ */
+ rx_desc = ravb_rx_get_desc(priv, q, priv->num_rx_ring[q]);
+ rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
+ rx_desc->die_dt = DT_LINKFIX; /* type */
memset(priv->tx_ring[q], 0, tx_ring_size);
/* Build TX ring buffer */
@@ -408,26 +418,47 @@ static void *ravb_alloc_rx_desc(struct net_device *ndev, int q)
static int ravb_ring_init(struct net_device *ndev, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
- const struct ravb_hw_info *info = priv->info;
unsigned int num_tx_desc = priv->num_tx_desc;
+ struct page_pool_params params = {
+ .order = 0,
+ .flags = PP_FLAG_DMA_MAP,
+ .pool_size = priv->num_rx_ring[q],
+ .nid = NUMA_NO_NODE,
+ .dev = ndev->dev.parent,
+ .dma_dir = DMA_FROM_DEVICE,
+ };
unsigned int ring_size;
- struct sk_buff *skb;
- unsigned int i;
+ u32 num_filled;
+
+ /* Allocate RX page pool and buffers */
+ priv->rx_pool[q] = page_pool_create(&params);
+ if (IS_ERR(priv->rx_pool[q]))
+ goto error;
- /* Allocate RX and TX skb rings */
- priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q],
- sizeof(*priv->rx_skb[q]), GFP_KERNEL);
+ /* Allocate RX buffers */
+ priv->rx_buffers[q] = kcalloc(priv->num_rx_ring[q],
+ sizeof(*priv->rx_buffers[q]), GFP_KERNEL);
+ if (!priv->rx_buffers[q])
+ goto error;
+
+ /* Allocate TX skb rings */
priv->tx_skb[q] = kcalloc(priv->num_tx_ring[q],
sizeof(*priv->tx_skb[q]), GFP_KERNEL);
- if (!priv->rx_skb[q] || !priv->tx_skb[q])
+ if (!priv->tx_skb[q])
goto error;
- for (i = 0; i < priv->num_rx_ring[q]; i++) {
- skb = ravb_alloc_skb(ndev, info, GFP_KERNEL);
- if (!skb)
- goto error;
- priv->rx_skb[q][i] = skb;
- }
+ /* Allocate all RX descriptors. */
+ if (!ravb_alloc_rx_desc(ndev, q))
+ goto error;
+
+ /* Populate RX ring buffer. */
+ priv->dirty_rx[q] = 0;
+ ring_size = priv->info->rx_desc_size * priv->num_rx_ring[q];
+ memset(priv->rx_ring[q].raw, 0, ring_size);
+ num_filled = ravb_rx_ring_refill(ndev, q, priv->num_rx_ring[q],
+ GFP_KERNEL);
+ if (num_filled != priv->num_rx_ring[q])
+ goto error;
if (num_tx_desc > 1) {
/* Allocate rings for the aligned buffers */
@@ -437,12 +468,6 @@ static int ravb_ring_init(struct net_device *ndev, int q)
goto error;
}
- /* Allocate all RX descriptors. */
- if (!ravb_alloc_rx_desc(ndev, q))
- goto error;
-
- priv->dirty_rx[q] = 0;
-
/* Allocate all TX descriptors. */
ring_size = sizeof(struct ravb_tx_desc) *
(priv->num_tx_ring[q] * num_tx_desc + 1);
@@ -554,6 +579,16 @@ static void ravb_emac_init_rcar(struct net_device *ndev)
ravb_write(ndev, ECSIPR_ICDIP | ECSIPR_MPDIP | ECSIPR_LCHNGIP, ECSIPR);
}
+static void ravb_emac_init_rcar_gen4(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ bool mii = priv->phy_interface == PHY_INTERFACE_MODE_MII;
+
+ ravb_modify(ndev, APSR, APSR_MIISELECT, mii ? APSR_MIISELECT : 0);
+
+ ravb_emac_init_rcar(ndev);
+}
+
/* E-MAC init function */
static void ravb_emac_init(struct net_device *ndev)
{
@@ -706,7 +741,9 @@ static void ravb_get_tx_tstamp(struct net_device *ndev)
static void ravb_rx_csum_gbeth(struct sk_buff *skb)
{
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
__wsum csum_ip_hdr, csum_proto;
+ skb_frag_t *last_frag;
u8 *hw_csum;
/* The hardware checksum status is contained in sizeof(__sum16) * 2 = 4
@@ -716,12 +753,24 @@ static void ravb_rx_csum_gbeth(struct sk_buff *skb)
if (unlikely(skb->len < sizeof(__sum16) * 2))
return;
- hw_csum = skb_tail_pointer(skb) - sizeof(__sum16);
+ if (skb_is_nonlinear(skb)) {
+ last_frag = &shinfo->frags[shinfo->nr_frags - 1];
+ hw_csum = skb_frag_address(last_frag) +
+ skb_frag_size(last_frag);
+ } else {
+ hw_csum = skb_tail_pointer(skb);
+ }
+
+ hw_csum -= sizeof(__sum16);
csum_proto = csum_unfold((__force __sum16)get_unaligned_le16(hw_csum));
hw_csum -= sizeof(__sum16);
csum_ip_hdr = csum_unfold((__force __sum16)get_unaligned_le16(hw_csum));
- skb_trim(skb, skb->len - 2 * sizeof(__sum16));
+
+ if (skb_is_nonlinear(skb))
+ skb_frag_size_sub(last_frag, 2 * sizeof(__sum16));
+ else
+ skb_trim(skb, skb->len - 2 * sizeof(__sum16));
/* TODO: IPV6 Rx checksum */
if (skb->protocol == htons(ETH_P_IP) && !csum_ip_hdr && !csum_proto)
@@ -743,30 +792,14 @@ static void ravb_rx_csum(struct sk_buff *skb)
skb_trim(skb, skb->len - sizeof(__sum16));
}
-static struct sk_buff *ravb_get_skb_gbeth(struct net_device *ndev, int entry,
- struct ravb_rx_desc *desc)
-{
- struct ravb_private *priv = netdev_priv(ndev);
- struct sk_buff *skb;
-
- skb = priv->rx_skb[RAVB_BE][entry];
- priv->rx_skb[RAVB_BE][entry] = NULL;
- dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
- ALIGN(priv->info->rx_max_frame_size, 16),
- DMA_FROM_DEVICE);
-
- return skb;
-}
-
/* Packet receive function for Gigabit Ethernet */
-static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
+static int ravb_rx_gbeth(struct net_device *ndev, int budget, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
struct net_device_stats *stats;
struct ravb_rx_desc *desc;
struct sk_buff *skb;
- dma_addr_t dma_addr;
int rx_packets = 0;
u8 desc_status;
u16 desc_len;
@@ -781,7 +814,7 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
for (i = 0; i < limit; i++, priv->cur_rx[q]++) {
entry = priv->cur_rx[q] % priv->num_rx_ring[q];
desc = &priv->rx_ring[q].desc[entry];
- if (rx_packets == *quota || desc->die_dt == DT_FEMPTY)
+ if (rx_packets == budget || desc->die_dt == DT_FEMPTY)
break;
/* Descriptor type must be checked before all other reads */
@@ -807,87 +840,110 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
if (desc_status & MSC_CEEF)
stats->rx_missed_errors++;
} else {
+ struct ravb_rx_buffer *rx_buff;
+ void *rx_addr;
+
+ rx_buff = &priv->rx_buffers[q][entry];
+ rx_addr = page_address(rx_buff->page) + rx_buff->offset;
die_dt = desc->die_dt & 0xF0;
+ dma_sync_single_for_cpu(ndev->dev.parent,
+ le32_to_cpu(desc->dptr),
+ desc_len, DMA_FROM_DEVICE);
+
switch (die_dt) {
case DT_FSINGLE:
- skb = ravb_get_skb_gbeth(ndev, entry, desc);
- skb_put(skb, desc_len);
- skb->protocol = eth_type_trans(skb, ndev);
- if (ndev->features & NETIF_F_RXCSUM)
- ravb_rx_csum_gbeth(skb);
- napi_gro_receive(&priv->napi[q], skb);
- rx_packets++;
- stats->rx_bytes += desc_len;
- break;
case DT_FSTART:
- priv->rx_1st_skb = ravb_get_skb_gbeth(ndev, entry, desc);
- skb_put(priv->rx_1st_skb, desc_len);
+ /* Start of packet: Set initial data length. */
+ skb = napi_build_skb(rx_addr,
+ info->rx_buffer_size);
+ if (unlikely(!skb)) {
+ stats->rx_errors++;
+ page_pool_put_page(priv->rx_pool[q],
+ rx_buff->page, 0,
+ true);
+ goto refill;
+ }
+ skb_mark_for_recycle(skb);
+ skb_put(skb, desc_len);
+
+ /* Save this skb if the packet spans multiple
+ * descriptors.
+ */
+ if (die_dt == DT_FSTART)
+ priv->rx_1st_skb = skb;
break;
+
case DT_FMID:
- skb = ravb_get_skb_gbeth(ndev, entry, desc);
- skb_copy_to_linear_data_offset(priv->rx_1st_skb,
- priv->rx_1st_skb->len,
- skb->data,
- desc_len);
- skb_put(priv->rx_1st_skb, desc_len);
- dev_kfree_skb(skb);
- break;
case DT_FEND:
- skb = ravb_get_skb_gbeth(ndev, entry, desc);
- skb_copy_to_linear_data_offset(priv->rx_1st_skb,
- priv->rx_1st_skb->len,
- skb->data,
- desc_len);
- skb_put(priv->rx_1st_skb, desc_len);
- dev_kfree_skb(skb);
- priv->rx_1st_skb->protocol =
- eth_type_trans(priv->rx_1st_skb, ndev);
+ /* Continuing a packet: Add this buffer as an RX
+ * frag.
+ */
+
+ /* rx_1st_skb will be NULL if napi_build_skb()
+ * failed for the first descriptor of a
+ * multi-descriptor packet.
+ */
+ if (unlikely(!priv->rx_1st_skb)) {
+ stats->rx_errors++;
+ page_pool_put_page(priv->rx_pool[q],
+ rx_buff->page, 0,
+ true);
+
+ /* We may find a DT_FSINGLE or DT_FSTART
+ * descriptor in the queue which we can
+ * process, so don't give up yet.
+ */
+ continue;
+ }
+ skb_add_rx_frag(priv->rx_1st_skb,
+ skb_shinfo(priv->rx_1st_skb)->nr_frags,
+ rx_buff->page, rx_buff->offset,
+ desc_len, info->rx_buffer_size);
+
+ /* Set skb to point at the whole packet so that
+ * we only need one code path for finishing a
+ * packet.
+ */
+ skb = priv->rx_1st_skb;
+ }
+
+ switch (die_dt) {
+ case DT_FSINGLE:
+ case DT_FEND:
+ /* Finishing a packet: Determine protocol &
+ * checksum, hand off to NAPI and update our
+ * stats.
+ */
+ skb->protocol = eth_type_trans(skb, ndev);
if (ndev->features & NETIF_F_RXCSUM)
- ravb_rx_csum_gbeth(priv->rx_1st_skb);
- stats->rx_bytes += priv->rx_1st_skb->len;
- napi_gro_receive(&priv->napi[q],
- priv->rx_1st_skb);
+ ravb_rx_csum_gbeth(skb);
+ stats->rx_bytes += skb->len;
+ napi_gro_receive(&priv->napi[q], skb);
rx_packets++;
- break;
+
+ /* Clear rx_1st_skb so that it will only be
+ * non-NULL when valid.
+ */
+ priv->rx_1st_skb = NULL;
}
+
+ /* Mark this RX buffer as consumed. */
+ rx_buff->page = NULL;
}
}
+refill:
/* Refill the RX ring buffers. */
- for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) {
- entry = priv->dirty_rx[q] % priv->num_rx_ring[q];
- desc = &priv->rx_ring[q].desc[entry];
- desc->ds_cc = cpu_to_le16(priv->info->rx_max_desc_use);
-
- if (!priv->rx_skb[q][entry]) {
- skb = ravb_alloc_skb(ndev, info, GFP_ATOMIC);
- if (!skb)
- break;
- dma_addr = dma_map_single(ndev->dev.parent,
- skb->data,
- priv->info->rx_max_frame_size,
- DMA_FROM_DEVICE);
- skb_checksum_none_assert(skb);
- /* We just set the data size to 0 for a failed mapping
- * which should prevent DMA from happening...
- */
- if (dma_mapping_error(ndev->dev.parent, dma_addr))
- desc->ds_cc = cpu_to_le16(0);
- desc->dptr = cpu_to_le32(dma_addr);
- priv->rx_skb[q][entry] = skb;
- }
- /* Descriptor type must be set after all the above writes */
- dma_wmb();
- desc->die_dt = DT_FEMPTY;
- }
+ priv->dirty_rx[q] += ravb_rx_ring_refill(ndev, q,
+ priv->cur_rx[q] - priv->dirty_rx[q],
+ GFP_ATOMIC);
stats->rx_packets += rx_packets;
- *quota -= rx_packets;
- return *quota == 0;
+ return rx_packets;
}
/* Packet receive function for Ethernet AVB */
-static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)
+static int ravb_rx_rcar(struct net_device *ndev, int budget, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
@@ -895,7 +951,6 @@ static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)
struct ravb_ex_rx_desc *desc;
unsigned int limit, i;
struct sk_buff *skb;
- dma_addr_t dma_addr;
struct timespec64 ts;
int rx_packets = 0;
u8 desc_status;
@@ -906,7 +961,7 @@ static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)
for (i = 0; i < limit; i++, priv->cur_rx[q]++) {
entry = priv->cur_rx[q] % priv->num_rx_ring[q];
desc = &priv->rx_ring[q].ex_desc[entry];
- if (rx_packets == *quota || desc->die_dt == DT_FEMPTY)
+ if (rx_packets == budget || desc->die_dt == DT_FEMPTY)
break;
/* Descriptor type must be checked before all other reads */
@@ -934,12 +989,23 @@ static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)
stats->rx_missed_errors++;
} else {
u32 get_ts = priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE;
-
- skb = priv->rx_skb[q][entry];
- priv->rx_skb[q][entry] = NULL;
- dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
- priv->info->rx_max_frame_size,
- DMA_FROM_DEVICE);
+ struct ravb_rx_buffer *rx_buff;
+ void *rx_addr;
+
+ rx_buff = &priv->rx_buffers[q][entry];
+ rx_addr = page_address(rx_buff->page) + rx_buff->offset;
+ dma_sync_single_for_cpu(ndev->dev.parent,
+ le32_to_cpu(desc->dptr),
+ pkt_len, DMA_FROM_DEVICE);
+
+ skb = napi_build_skb(rx_addr, info->rx_buffer_size);
+ if (unlikely(!skb)) {
+ stats->rx_errors++;
+ page_pool_put_page(priv->rx_pool[q],
+ rx_buff->page, 0, true);
+ break;
+ }
+ skb_mark_for_recycle(skb);
get_ts &= (q == RAVB_NC) ?
RAVB_RXTSTAMP_TYPE_V2_L2_EVENT :
~RAVB_RXTSTAMP_TYPE_V2_L2_EVENT;
@@ -961,48 +1027,28 @@ static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)
napi_gro_receive(&priv->napi[q], skb);
rx_packets++;
stats->rx_bytes += pkt_len;
+
+ /* Mark this RX buffer as consumed. */
+ rx_buff->page = NULL;
}
}
/* Refill the RX ring buffers. */
- for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) {
- entry = priv->dirty_rx[q] % priv->num_rx_ring[q];
- desc = &priv->rx_ring[q].ex_desc[entry];
- desc->ds_cc = cpu_to_le16(priv->info->rx_max_desc_use);
-
- if (!priv->rx_skb[q][entry]) {
- skb = ravb_alloc_skb(ndev, info, GFP_ATOMIC);
- if (!skb)
- break; /* Better luck next round. */
- dma_addr = dma_map_single(ndev->dev.parent, skb->data,
- priv->info->rx_max_frame_size,
- DMA_FROM_DEVICE);
- skb_checksum_none_assert(skb);
- /* We just set the data size to 0 for a failed mapping
- * which should prevent DMA from happening...
- */
- if (dma_mapping_error(ndev->dev.parent, dma_addr))
- desc->ds_cc = cpu_to_le16(0);
- desc->dptr = cpu_to_le32(dma_addr);
- priv->rx_skb[q][entry] = skb;
- }
- /* Descriptor type must be set after all the above writes */
- dma_wmb();
- desc->die_dt = DT_FEMPTY;
- }
+ priv->dirty_rx[q] += ravb_rx_ring_refill(ndev, q,
+ priv->cur_rx[q] - priv->dirty_rx[q],
+ GFP_ATOMIC);
stats->rx_packets += rx_packets;
- *quota -= rx_packets;
- return *quota == 0;
+ return rx_packets;
}
/* Packet receive function for Ethernet AVB */
-static bool ravb_rx(struct net_device *ndev, int *quota, int q)
+static int ravb_rx(struct net_device *ndev, int budget, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
- return info->receive(ndev, quota, q);
+ return info->receive(ndev, budget, q);
}
static void ravb_rcv_snd_disable(struct net_device *ndev)
@@ -1319,13 +1365,12 @@ static int ravb_poll(struct napi_struct *napi, int budget)
unsigned long flags;
int q = napi - priv->napi;
int mask = BIT(q);
- int quota = budget;
- bool unmask;
+ int work_done;
/* Processing RX Descriptor Ring */
/* Clear RX interrupt */
ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0);
- unmask = !ravb_rx(ndev, &quota, q);
+ work_done = ravb_rx(ndev, budget, q);
/* Processing TX Descriptor Ring */
spin_lock_irqsave(&priv->lock, flags);
@@ -1344,24 +1389,20 @@ static int ravb_poll(struct napi_struct *napi, int budget)
if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors)
ndev->stats.rx_fifo_errors = priv->rx_fifo_errors;
- if (!unmask)
- goto out;
-
- napi_complete(napi);
-
- /* Re-enable RX/TX interrupts */
- spin_lock_irqsave(&priv->lock, flags);
- if (!info->irq_en_dis) {
- ravb_modify(ndev, RIC0, mask, mask);
- ravb_modify(ndev, TIC, mask, mask);
- } else {
- ravb_write(ndev, mask, RIE0);
- ravb_write(ndev, mask, TIE);
+ if (work_done < budget && napi_complete_done(napi, work_done)) {
+ /* Re-enable RX/TX interrupts */
+ spin_lock_irqsave(&priv->lock, flags);
+ if (!info->irq_en_dis) {
+ ravb_modify(ndev, RIC0, mask, mask);
+ ravb_modify(ndev, TIC, mask, mask);
+ } else {
+ ravb_write(ndev, mask, RIE0);
+ ravb_write(ndev, mask, TIE);
+ }
+ spin_unlock_irqrestore(&priv->lock, flags);
}
- spin_unlock_irqrestore(&priv->lock, flags);
-out:
- return budget - quota;
+ return work_done;
}
static void ravb_set_duplex_gbeth(struct net_device *ndev)
@@ -1696,7 +1737,7 @@ static int ravb_set_ringparam(struct net_device *ndev,
}
static int ravb_get_ts_info(struct net_device *ndev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *hw_info = priv->info;
@@ -2621,6 +2662,28 @@ static int ravb_mdio_release(struct ravb_private *priv)
return 0;
}
+static const struct ravb_hw_info ravb_gen2_hw_info = {
+ .receive = ravb_rx_rcar,
+ .set_rate = ravb_set_rate_rcar,
+ .set_feature = ravb_set_features_rcar,
+ .dmac_init = ravb_dmac_init_rcar,
+ .emac_init = ravb_emac_init_rcar,
+ .gstrings_stats = ravb_gstrings_stats,
+ .gstrings_size = sizeof(ravb_gstrings_stats),
+ .net_hw_features = NETIF_F_RXCSUM,
+ .net_features = NETIF_F_RXCSUM,
+ .stats_len = ARRAY_SIZE(ravb_gstrings_stats),
+ .tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
+ .rx_max_frame_size = SZ_2K,
+ .rx_buffer_size = SZ_2K +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
+ .rx_desc_size = sizeof(struct ravb_ex_rx_desc),
+ .aligned_tx = 1,
+ .gptp = 1,
+ .nc_queues = 1,
+ .magic_pkt = 1,
+};
+
static const struct ravb_hw_info ravb_gen3_hw_info = {
.receive = ravb_rx_rcar,
.set_rate = ravb_set_rate_rcar,
@@ -2634,7 +2697,8 @@ static const struct ravb_hw_info ravb_gen3_hw_info = {
.stats_len = ARRAY_SIZE(ravb_gstrings_stats),
.tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
.rx_max_frame_size = SZ_2K,
- .rx_max_desc_use = SZ_2K - ETH_FCS_LEN + sizeof(__sum16),
+ .rx_buffer_size = SZ_2K +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
.rx_desc_size = sizeof(struct ravb_ex_rx_desc),
.internal_delay = 1,
.tx_counters = 1,
@@ -2645,12 +2709,12 @@ static const struct ravb_hw_info ravb_gen3_hw_info = {
.magic_pkt = 1,
};
-static const struct ravb_hw_info ravb_gen2_hw_info = {
+static const struct ravb_hw_info ravb_gen4_hw_info = {
.receive = ravb_rx_rcar,
.set_rate = ravb_set_rate_rcar,
.set_feature = ravb_set_features_rcar,
.dmac_init = ravb_dmac_init_rcar,
- .emac_init = ravb_emac_init_rcar,
+ .emac_init = ravb_emac_init_rcar_gen4,
.gstrings_stats = ravb_gstrings_stats,
.gstrings_size = sizeof(ravb_gstrings_stats),
.net_hw_features = NETIF_F_RXCSUM,
@@ -2658,10 +2722,14 @@ static const struct ravb_hw_info ravb_gen2_hw_info = {
.stats_len = ARRAY_SIZE(ravb_gstrings_stats),
.tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
.rx_max_frame_size = SZ_2K,
- .rx_max_desc_use = SZ_2K - ETH_FCS_LEN + sizeof(__sum16),
+ .rx_buffer_size = SZ_2K +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
.rx_desc_size = sizeof(struct ravb_ex_rx_desc),
- .aligned_tx = 1,
- .gptp = 1,
+ .internal_delay = 1,
+ .tx_counters = 1,
+ .multi_irqs = 1,
+ .irq_en_dis = 1,
+ .ccc_gac = 1,
.nc_queues = 1,
.magic_pkt = 1,
};
@@ -2679,7 +2747,8 @@ static const struct ravb_hw_info ravb_rzv2m_hw_info = {
.stats_len = ARRAY_SIZE(ravb_gstrings_stats),
.tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
.rx_max_frame_size = SZ_2K,
- .rx_max_desc_use = SZ_2K - ETH_FCS_LEN + sizeof(__sum16),
+ .rx_buffer_size = SZ_2K +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
.rx_desc_size = sizeof(struct ravb_ex_rx_desc),
.multi_irqs = 1,
.err_mgmt_irqs = 1,
@@ -2702,9 +2771,10 @@ static const struct ravb_hw_info gbeth_hw_info = {
.stats_len = ARRAY_SIZE(ravb_gstrings_stats_gbeth),
.tccr_mask = TCCR_TSRQ0,
.rx_max_frame_size = SZ_8K,
- .rx_max_desc_use = 4080,
+ .rx_buffer_size = SZ_2K,
.rx_desc_size = sizeof(struct ravb_rx_desc),
.aligned_tx = 1,
+ .coalesce_irqs = 1,
.tx_counters = 1,
.carrier_counters = 1,
.half_duplex = 1,
@@ -2716,7 +2786,7 @@ static const struct of_device_id ravb_match_table[] = {
{ .compatible = "renesas,etheravb-rcar-gen2", .data = &ravb_gen2_hw_info },
{ .compatible = "renesas,etheravb-r8a7795", .data = &ravb_gen3_hw_info },
{ .compatible = "renesas,etheravb-rcar-gen3", .data = &ravb_gen3_hw_info },
- { .compatible = "renesas,etheravb-rcar-gen4", .data = &ravb_gen3_hw_info },
+ { .compatible = "renesas,etheravb-rcar-gen4", .data = &ravb_gen4_hw_info },
{ .compatible = "renesas,etheravb-rzv2m", .data = &ravb_rzv2m_hw_info },
{ .compatible = "renesas,rzg2l-gbeth", .data = &gbeth_hw_info },
{ }
@@ -2981,6 +3051,12 @@ static int ravb_probe(struct platform_device *pdev)
if (info->nc_queues)
netif_napi_add(ndev, &priv->napi[RAVB_NC], ravb_poll);
+ if (info->coalesce_irqs) {
+ netdev_sw_irq_coalesce_default_on(ndev);
+ if (num_present_cpus() == 1)
+ dev_set_threaded(ndev, true);
+ }
+
/* Network device register */
error = register_netdev(ndev);
if (error)
diff --git a/drivers/net/ethernet/renesas/rswitch.c b/drivers/net/ethernet/renesas/rswitch.c
index dcab638c57fe..ff50e20856ec 100644
--- a/drivers/net/ethernet/renesas/rswitch.c
+++ b/drivers/net/ethernet/renesas/rswitch.c
@@ -871,13 +871,13 @@ static void rswitch_tx_free(struct net_device *ndev)
dma_rmb();
skb = gq->skbs[gq->dirty];
if (skb) {
+ rdev->ndev->stats.tx_packets++;
+ rdev->ndev->stats.tx_bytes += skb->len;
dma_unmap_single(ndev->dev.parent,
gq->unmap_addrs[gq->dirty],
skb->len, DMA_TO_DEVICE);
dev_kfree_skb_any(gq->skbs[gq->dirty]);
gq->skbs[gq->dirty] = NULL;
- rdev->ndev->stats.tx_packets++;
- rdev->ndev->stats.tx_bytes += skb->len;
}
desc->desc.die_dt = DT_EEMPTY;
}
@@ -1809,7 +1809,7 @@ static const struct net_device_ops rswitch_netdev_ops = {
.ndo_set_mac_address = eth_mac_addr,
};
-static int rswitch_get_ts_info(struct net_device *ndev, struct ethtool_ts_info *info)
+static int rswitch_get_ts_info(struct net_device *ndev, struct kernel_ethtool_ts_info *info)
{
struct rswitch_device *rdev = netdev_priv(ndev);
diff --git a/drivers/net/ethernet/renesas/rtsn.c b/drivers/net/ethernet/renesas/rtsn.c
new file mode 100644
index 000000000000..577227c007ab
--- /dev/null
+++ b/drivers/net/ethernet/renesas/rtsn.c
@@ -0,0 +1,1391 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Renesas Ethernet-TSN device driver
+ *
+ * Copyright (C) 2022 Renesas Electronics Corporation
+ * Copyright (C) 2023 Niklas Söderlund <niklas.soderlund@ragnatech.se>
+ */
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/module.h>
+#include <linux/net_tstamp.h>
+#include <linux/of.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+#include <linux/spinlock.h>
+
+#include "rtsn.h"
+#include "rcar_gen4_ptp.h"
+
+struct rtsn_private {
+ struct net_device *ndev;
+ struct platform_device *pdev;
+ void __iomem *base;
+ struct rcar_gen4_ptp_private *ptp_priv;
+ struct clk *clk;
+ struct reset_control *reset;
+
+ u32 num_tx_ring;
+ u32 num_rx_ring;
+ u32 tx_desc_bat_size;
+ dma_addr_t tx_desc_bat_dma;
+ struct rtsn_desc *tx_desc_bat;
+ u32 rx_desc_bat_size;
+ dma_addr_t rx_desc_bat_dma;
+ struct rtsn_desc *rx_desc_bat;
+ dma_addr_t tx_desc_dma;
+ dma_addr_t rx_desc_dma;
+ struct rtsn_ext_desc *tx_ring;
+ struct rtsn_ext_ts_desc *rx_ring;
+ struct sk_buff **tx_skb;
+ struct sk_buff **rx_skb;
+ spinlock_t lock; /* Register access lock */
+ u32 cur_tx;
+ u32 dirty_tx;
+ u32 cur_rx;
+ u32 dirty_rx;
+ u8 ts_tag;
+ struct napi_struct napi;
+ struct rtnl_link_stats64 stats;
+
+ struct mii_bus *mii;
+ phy_interface_t iface;
+ int link;
+ int speed;
+
+ int tx_data_irq;
+ int rx_data_irq;
+};
+
+static u32 rtsn_read(struct rtsn_private *priv, enum rtsn_reg reg)
+{
+ return ioread32(priv->base + reg);
+}
+
+static void rtsn_write(struct rtsn_private *priv, enum rtsn_reg reg, u32 data)
+{
+ iowrite32(data, priv->base + reg);
+}
+
+static void rtsn_modify(struct rtsn_private *priv, enum rtsn_reg reg,
+ u32 clear, u32 set)
+{
+ rtsn_write(priv, reg, (rtsn_read(priv, reg) & ~clear) | set);
+}
+
+static int rtsn_reg_wait(struct rtsn_private *priv, enum rtsn_reg reg,
+ u32 mask, u32 expected)
+{
+ u32 val;
+
+ return readl_poll_timeout(priv->base + reg, val,
+ (val & mask) == expected,
+ RTSN_INTERVAL_US, RTSN_TIMEOUT_US);
+}
+
+static void rtsn_ctrl_data_irq(struct rtsn_private *priv, bool enable)
+{
+ if (enable) {
+ rtsn_write(priv, TDIE0, TDIE_TDID_TDX(TX_CHAIN_IDX));
+ rtsn_write(priv, RDIE0, RDIE_RDID_RDX(RX_CHAIN_IDX));
+ } else {
+ rtsn_write(priv, TDID0, TDIE_TDID_TDX(TX_CHAIN_IDX));
+ rtsn_write(priv, RDID0, RDIE_RDID_RDX(RX_CHAIN_IDX));
+ }
+}
+
+static void rtsn_get_timestamp(struct rtsn_private *priv, struct timespec64 *ts)
+{
+ struct rcar_gen4_ptp_private *ptp_priv = priv->ptp_priv;
+
+ ptp_priv->info.gettime64(&ptp_priv->info, ts);
+}
+
+static int rtsn_tx_free(struct net_device *ndev, bool free_txed_only)
+{
+ struct rtsn_private *priv = netdev_priv(ndev);
+ struct rtsn_ext_desc *desc;
+ struct sk_buff *skb;
+ int free_num = 0;
+ int entry, size;
+
+ for (; priv->cur_tx - priv->dirty_tx > 0; priv->dirty_tx++) {
+ entry = priv->dirty_tx % priv->num_tx_ring;
+ desc = &priv->tx_ring[entry];
+ if (free_txed_only && (desc->die_dt & DT_MASK) != DT_FEMPTY)
+ break;
+
+ dma_rmb();
+ size = le16_to_cpu(desc->info_ds) & TX_DS;
+ skb = priv->tx_skb[entry];
+ if (skb) {
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
+ struct skb_shared_hwtstamps shhwtstamps;
+ struct timespec64 ts;
+
+ rtsn_get_timestamp(priv, &ts);
+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+ shhwtstamps.hwtstamp = timespec64_to_ktime(ts);
+ skb_tstamp_tx(skb, &shhwtstamps);
+ }
+ dma_unmap_single(ndev->dev.parent,
+ le32_to_cpu(desc->dptr),
+ size, DMA_TO_DEVICE);
+ dev_kfree_skb_any(priv->tx_skb[entry]);
+ free_num++;
+
+ priv->stats.tx_packets++;
+ priv->stats.tx_bytes += size;
+ }
+
+ desc->die_dt = DT_EEMPTY;
+ }
+
+ desc = &priv->tx_ring[priv->num_tx_ring];
+ desc->die_dt = DT_LINK;
+
+ return free_num;
+}
+
+static int rtsn_rx(struct net_device *ndev, int budget)
+{
+ struct rtsn_private *priv = netdev_priv(ndev);
+ unsigned int ndescriptors;
+ unsigned int rx_packets;
+ unsigned int i;
+ bool get_ts;
+
+ get_ts = priv->ptp_priv->tstamp_rx_ctrl &
+ RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT;
+
+ ndescriptors = priv->dirty_rx + priv->num_rx_ring - priv->cur_rx;
+ rx_packets = 0;
+ for (i = 0; i < ndescriptors; i++) {
+ const unsigned int entry = priv->cur_rx % priv->num_rx_ring;
+ struct rtsn_ext_ts_desc *desc = &priv->rx_ring[entry];
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+ u16 pkt_len;
+
+ /* Stop processing descriptors if budget is consumed. */
+ if (rx_packets >= budget)
+ break;
+
+ /* Stop processing descriptors on first empty. */
+ if ((desc->die_dt & DT_MASK) == DT_FEMPTY)
+ break;
+
+ dma_rmb();
+ pkt_len = le16_to_cpu(desc->info_ds) & RX_DS;
+
+ skb = priv->rx_skb[entry];
+ priv->rx_skb[entry] = NULL;
+ dma_addr = le32_to_cpu(desc->dptr);
+ dma_unmap_single(ndev->dev.parent, dma_addr, PKT_BUF_SZ,
+ DMA_FROM_DEVICE);
+
+ /* Get timestamp if enabled. */
+ if (get_ts) {
+ struct skb_shared_hwtstamps *shhwtstamps;
+ struct timespec64 ts;
+
+ shhwtstamps = skb_hwtstamps(skb);
+ memset(shhwtstamps, 0, sizeof(*shhwtstamps));
+
+ ts.tv_sec = (u64)le32_to_cpu(desc->ts_sec);
+ ts.tv_nsec = le32_to_cpu(desc->ts_nsec & cpu_to_le32(0x3fffffff));
+
+ shhwtstamps->hwtstamp = timespec64_to_ktime(ts);
+ }
+
+ skb_put(skb, pkt_len);
+ skb->protocol = eth_type_trans(skb, ndev);
+ napi_gro_receive(&priv->napi, skb);
+
+ /* Update statistics. */
+ priv->stats.rx_packets++;
+ priv->stats.rx_bytes += pkt_len;
+
+ /* Update counters. */
+ priv->cur_rx++;
+ rx_packets++;
+ }
+
+ /* Refill the RX ring buffers */
+ for (; priv->cur_rx - priv->dirty_rx > 0; priv->dirty_rx++) {
+ const unsigned int entry = priv->dirty_rx % priv->num_rx_ring;
+ struct rtsn_ext_ts_desc *desc = &priv->rx_ring[entry];
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+
+ desc->info_ds = cpu_to_le16(PKT_BUF_SZ);
+
+ if (!priv->rx_skb[entry]) {
+ skb = napi_alloc_skb(&priv->napi,
+ PKT_BUF_SZ + RTSN_ALIGN - 1);
+ if (!skb)
+ break;
+ skb_reserve(skb, NET_IP_ALIGN);
+ dma_addr = dma_map_single(ndev->dev.parent, skb->data,
+ le16_to_cpu(desc->info_ds),
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(ndev->dev.parent, dma_addr))
+ desc->info_ds = cpu_to_le16(0);
+ desc->dptr = cpu_to_le32(dma_addr);
+ skb_checksum_none_assert(skb);
+ priv->rx_skb[entry] = skb;
+ }
+
+ dma_wmb();
+ desc->die_dt = DT_FEMPTY | D_DIE;
+ }
+
+ priv->rx_ring[priv->num_rx_ring].die_dt = DT_LINK;
+
+ return rx_packets;
+}
+
+static int rtsn_poll(struct napi_struct *napi, int budget)
+{
+ struct rtsn_private *priv;
+ struct net_device *ndev;
+ unsigned long flags;
+ int work_done;
+
+ ndev = napi->dev;
+ priv = netdev_priv(ndev);
+
+ /* Processing RX Descriptor Ring */
+ work_done = rtsn_rx(ndev, budget);
+
+ /* Processing TX Descriptor Ring */
+ spin_lock_irqsave(&priv->lock, flags);
+ rtsn_tx_free(ndev, true);
+ netif_wake_subqueue(ndev, 0);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ /* Re-enable TX/RX interrupts */
+ if (work_done < budget && napi_complete_done(napi, work_done)) {
+ spin_lock_irqsave(&priv->lock, flags);
+ rtsn_ctrl_data_irq(priv, true);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ }
+
+ return work_done;
+}
+
+static int rtsn_desc_alloc(struct rtsn_private *priv)
+{
+ struct device *dev = &priv->pdev->dev;
+ unsigned int i;
+
+ priv->tx_desc_bat_size = sizeof(struct rtsn_desc) * TX_NUM_CHAINS;
+ priv->tx_desc_bat = dma_alloc_coherent(dev, priv->tx_desc_bat_size,
+ &priv->tx_desc_bat_dma,
+ GFP_KERNEL);
+
+ if (!priv->tx_desc_bat)
+ return -ENOMEM;
+
+ for (i = 0; i < TX_NUM_CHAINS; i++)
+ priv->tx_desc_bat[i].die_dt = DT_EOS;
+
+ priv->rx_desc_bat_size = sizeof(struct rtsn_desc) * RX_NUM_CHAINS;
+ priv->rx_desc_bat = dma_alloc_coherent(dev, priv->rx_desc_bat_size,
+ &priv->rx_desc_bat_dma,
+ GFP_KERNEL);
+
+ if (!priv->rx_desc_bat)
+ return -ENOMEM;
+
+ for (i = 0; i < RX_NUM_CHAINS; i++)
+ priv->rx_desc_bat[i].die_dt = DT_EOS;
+
+ return 0;
+}
+
+static void rtsn_desc_free(struct rtsn_private *priv)
+{
+ if (priv->tx_desc_bat)
+ dma_free_coherent(&priv->pdev->dev, priv->tx_desc_bat_size,
+ priv->tx_desc_bat, priv->tx_desc_bat_dma);
+ priv->tx_desc_bat = NULL;
+
+ if (priv->rx_desc_bat)
+ dma_free_coherent(&priv->pdev->dev, priv->rx_desc_bat_size,
+ priv->rx_desc_bat, priv->rx_desc_bat_dma);
+ priv->rx_desc_bat = NULL;
+}
+
+static void rtsn_chain_free(struct rtsn_private *priv)
+{
+ struct device *dev = &priv->pdev->dev;
+
+ dma_free_coherent(dev,
+ sizeof(struct rtsn_ext_desc) * (priv->num_tx_ring + 1),
+ priv->tx_ring, priv->tx_desc_dma);
+ priv->tx_ring = NULL;
+
+ dma_free_coherent(dev,
+ sizeof(struct rtsn_ext_ts_desc) * (priv->num_rx_ring + 1),
+ priv->rx_ring, priv->rx_desc_dma);
+ priv->rx_ring = NULL;
+
+ kfree(priv->tx_skb);
+ priv->tx_skb = NULL;
+
+ kfree(priv->rx_skb);
+ priv->rx_skb = NULL;
+}
+
+static int rtsn_chain_init(struct rtsn_private *priv, int tx_size, int rx_size)
+{
+ struct net_device *ndev = priv->ndev;
+ struct sk_buff *skb;
+ int i;
+
+ priv->num_tx_ring = tx_size;
+ priv->num_rx_ring = rx_size;
+
+ priv->tx_skb = kcalloc(tx_size, sizeof(*priv->tx_skb), GFP_KERNEL);
+ priv->rx_skb = kcalloc(rx_size, sizeof(*priv->rx_skb), GFP_KERNEL);
+
+ if (!priv->rx_skb || !priv->tx_skb)
+ goto error;
+
+ for (i = 0; i < rx_size; i++) {
+ skb = netdev_alloc_skb(ndev, PKT_BUF_SZ + RTSN_ALIGN - 1);
+ if (!skb)
+ goto error;
+ skb_reserve(skb, NET_IP_ALIGN);
+ priv->rx_skb[i] = skb;
+ }
+
+ /* Allocate TX, RX descriptors */
+ priv->tx_ring = dma_alloc_coherent(ndev->dev.parent,
+ sizeof(struct rtsn_ext_desc) * (tx_size + 1),
+ &priv->tx_desc_dma, GFP_KERNEL);
+ priv->rx_ring = dma_alloc_coherent(ndev->dev.parent,
+ sizeof(struct rtsn_ext_ts_desc) * (rx_size + 1),
+ &priv->rx_desc_dma, GFP_KERNEL);
+
+ if (!priv->tx_ring || !priv->rx_ring)
+ goto error;
+
+ return 0;
+error:
+ rtsn_chain_free(priv);
+
+ return -ENOMEM;
+}
+
+static void rtsn_chain_format(struct rtsn_private *priv)
+{
+ struct net_device *ndev = priv->ndev;
+ struct rtsn_ext_ts_desc *rx_desc;
+ struct rtsn_ext_desc *tx_desc;
+ struct rtsn_desc *bat_desc;
+ dma_addr_t dma_addr;
+ unsigned int i;
+
+ priv->cur_tx = 0;
+ priv->cur_rx = 0;
+ priv->dirty_rx = 0;
+ priv->dirty_tx = 0;
+
+ /* TX */
+ memset(priv->tx_ring, 0, sizeof(*tx_desc) * priv->num_tx_ring);
+ for (i = 0, tx_desc = priv->tx_ring; i < priv->num_tx_ring; i++, tx_desc++)
+ tx_desc->die_dt = DT_EEMPTY | D_DIE;
+
+ tx_desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma);
+ tx_desc->die_dt = DT_LINK;
+
+ bat_desc = &priv->tx_desc_bat[TX_CHAIN_IDX];
+ bat_desc->die_dt = DT_LINK;
+ bat_desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma);
+
+ /* RX */
+ memset(priv->rx_ring, 0, sizeof(*rx_desc) * priv->num_rx_ring);
+ for (i = 0, rx_desc = priv->rx_ring; i < priv->num_rx_ring; i++, rx_desc++) {
+ dma_addr = dma_map_single(ndev->dev.parent,
+ priv->rx_skb[i]->data, PKT_BUF_SZ,
+ DMA_FROM_DEVICE);
+ if (!dma_mapping_error(ndev->dev.parent, dma_addr))
+ rx_desc->info_ds = cpu_to_le16(PKT_BUF_SZ);
+ rx_desc->dptr = cpu_to_le32((u32)dma_addr);
+ rx_desc->die_dt = DT_FEMPTY | D_DIE;
+ }
+ rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma);
+ rx_desc->die_dt = DT_LINK;
+
+ bat_desc = &priv->rx_desc_bat[RX_CHAIN_IDX];
+ bat_desc->die_dt = DT_LINK;
+ bat_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma);
+}
+
+static int rtsn_dmac_init(struct rtsn_private *priv)
+{
+ int ret;
+
+ ret = rtsn_chain_init(priv, TX_CHAIN_SIZE, RX_CHAIN_SIZE);
+ if (ret)
+ return ret;
+
+ rtsn_chain_format(priv);
+
+ return 0;
+}
+
+static enum rtsn_mode rtsn_read_mode(struct rtsn_private *priv)
+{
+ return (rtsn_read(priv, OSR) & OSR_OPS) >> 1;
+}
+
+static int rtsn_wait_mode(struct rtsn_private *priv, enum rtsn_mode mode)
+{
+ unsigned int i;
+
+ /* Need to busy loop as mode changes can happen in atomic context. */
+ for (i = 0; i < RTSN_TIMEOUT_US / RTSN_INTERVAL_US; i++) {
+ if (rtsn_read_mode(priv) == mode)
+ return 0;
+
+ udelay(RTSN_INTERVAL_US);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int rtsn_change_mode(struct rtsn_private *priv, enum rtsn_mode mode)
+{
+ int ret;
+
+ rtsn_write(priv, OCR, mode);
+ ret = rtsn_wait_mode(priv, mode);
+ if (ret)
+ netdev_err(priv->ndev, "Failed to switch operation mode\n");
+ return ret;
+}
+
+static int rtsn_get_data_irq_status(struct rtsn_private *priv)
+{
+ u32 val;
+
+ val = rtsn_read(priv, TDIS0) | TDIS_TDS(TX_CHAIN_IDX);
+ val |= rtsn_read(priv, RDIS0) | RDIS_RDS(RX_CHAIN_IDX);
+
+ return val;
+}
+
+static irqreturn_t rtsn_irq(int irq, void *dev_id)
+{
+ struct rtsn_private *priv = dev_id;
+ int ret = IRQ_NONE;
+
+ spin_lock(&priv->lock);
+
+ if (rtsn_get_data_irq_status(priv)) {
+ /* Clear TX/RX irq status */
+ rtsn_write(priv, TDIS0, TDIS_TDS(TX_CHAIN_IDX));
+ rtsn_write(priv, RDIS0, RDIS_RDS(RX_CHAIN_IDX));
+
+ if (napi_schedule_prep(&priv->napi)) {
+ /* Disable TX/RX interrupts */
+ rtsn_ctrl_data_irq(priv, false);
+
+ __napi_schedule(&priv->napi);
+ }
+
+ ret = IRQ_HANDLED;
+ }
+
+ spin_unlock(&priv->lock);
+
+ return ret;
+}
+
+static int rtsn_request_irq(unsigned int irq, irq_handler_t handler,
+ unsigned long flags, struct rtsn_private *priv,
+ const char *ch)
+{
+ char *name;
+ int ret;
+
+ name = devm_kasprintf(&priv->pdev->dev, GFP_KERNEL, "%s:%s",
+ priv->ndev->name, ch);
+ if (!name)
+ return -ENOMEM;
+
+ ret = request_irq(irq, handler, flags, name, priv);
+ if (ret)
+ netdev_err(priv->ndev, "Cannot request IRQ %s\n", name);
+
+ return ret;
+}
+
+static void rtsn_free_irqs(struct rtsn_private *priv)
+{
+ free_irq(priv->tx_data_irq, priv);
+ free_irq(priv->rx_data_irq, priv);
+}
+
+static int rtsn_request_irqs(struct rtsn_private *priv)
+{
+ int ret;
+
+ priv->rx_data_irq = platform_get_irq_byname(priv->pdev, "rx");
+ if (priv->rx_data_irq < 0)
+ return priv->rx_data_irq;
+
+ priv->tx_data_irq = platform_get_irq_byname(priv->pdev, "tx");
+ if (priv->tx_data_irq < 0)
+ return priv->tx_data_irq;
+
+ ret = rtsn_request_irq(priv->tx_data_irq, rtsn_irq, 0, priv, "tx");
+ if (ret)
+ return ret;
+
+ ret = rtsn_request_irq(priv->rx_data_irq, rtsn_irq, 0, priv, "rx");
+ if (ret) {
+ free_irq(priv->tx_data_irq, priv);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rtsn_reset(struct rtsn_private *priv)
+{
+ reset_control_reset(priv->reset);
+ mdelay(1);
+
+ return rtsn_wait_mode(priv, OCR_OPC_DISABLE);
+}
+
+static int rtsn_axibmi_init(struct rtsn_private *priv)
+{
+ int ret;
+
+ ret = rtsn_reg_wait(priv, RR, RR_RST, RR_RST_COMPLETE);
+ if (ret)
+ return ret;
+
+ /* Set AXIWC */
+ rtsn_write(priv, AXIWC, AXIWC_DEFAULT);
+
+ /* Set AXIRC */
+ rtsn_write(priv, AXIRC, AXIRC_DEFAULT);
+
+ /* TX Descriptor chain setting */
+ rtsn_write(priv, TATLS0, TATLS0_TEDE | TATLS0_TATEN(TX_CHAIN_IDX));
+ rtsn_write(priv, TATLS1, priv->tx_desc_bat_dma + TX_CHAIN_ADDR_OFFSET);
+ rtsn_write(priv, TATLR, TATLR_TATL);
+
+ ret = rtsn_reg_wait(priv, TATLR, TATLR_TATL, 0);
+ if (ret)
+ return ret;
+
+ /* RX Descriptor chain setting */
+ rtsn_write(priv, RATLS0,
+ RATLS0_RETS | RATLS0_REDE | RATLS0_RATEN(RX_CHAIN_IDX));
+ rtsn_write(priv, RATLS1, priv->rx_desc_bat_dma + RX_CHAIN_ADDR_OFFSET);
+ rtsn_write(priv, RATLR, RATLR_RATL);
+
+ ret = rtsn_reg_wait(priv, RATLR, RATLR_RATL, 0);
+ if (ret)
+ return ret;
+
+ /* Enable TX/RX interrupts */
+ rtsn_ctrl_data_irq(priv, true);
+
+ return 0;
+}
+
+static void rtsn_mhd_init(struct rtsn_private *priv)
+{
+ /* TX General setting */
+ rtsn_write(priv, TGC1, TGC1_STTV_DEFAULT | TGC1_TQTM_SFM);
+ rtsn_write(priv, TMS0, TMS_MFS_MAX);
+
+ /* RX Filter IP */
+ rtsn_write(priv, CFCR0, CFCR_SDID(RX_CHAIN_IDX));
+ rtsn_write(priv, FMSCR, FMSCR_FMSIE(RX_CHAIN_IDX));
+}
+
+static int rtsn_get_phy_params(struct rtsn_private *priv)
+{
+ int ret;
+
+ ret = of_get_phy_mode(priv->pdev->dev.of_node, &priv->iface);
+ if (ret)
+ return ret;
+
+ switch (priv->iface) {
+ case PHY_INTERFACE_MODE_MII:
+ priv->speed = 100;
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ priv->speed = 1000;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static void rtsn_set_phy_interface(struct rtsn_private *priv)
+{
+ u32 val;
+
+ switch (priv->iface) {
+ case PHY_INTERFACE_MODE_MII:
+ val = MPIC_PIS_MII;
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ val = MPIC_PIS_GMII;
+ break;
+ default:
+ return;
+ }
+
+ rtsn_modify(priv, MPIC, MPIC_PIS_MASK, val);
+}
+
+static void rtsn_set_rate(struct rtsn_private *priv)
+{
+ u32 val;
+
+ switch (priv->speed) {
+ case 10:
+ val = MPIC_LSC_10M;
+ break;
+ case 100:
+ val = MPIC_LSC_100M;
+ break;
+ case 1000:
+ val = MPIC_LSC_1G;
+ break;
+ default:
+ return;
+ }
+
+ rtsn_modify(priv, MPIC, MPIC_LSC_MASK, val);
+}
+
+static int rtsn_rmac_init(struct rtsn_private *priv)
+{
+ const u8 *mac_addr = priv->ndev->dev_addr;
+ int ret;
+
+ /* Set MAC address */
+ rtsn_write(priv, MRMAC0, (mac_addr[0] << 8) | mac_addr[1]);
+ rtsn_write(priv, MRMAC1, (mac_addr[2] << 24) | (mac_addr[3] << 16) |
+ (mac_addr[4] << 8) | mac_addr[5]);
+
+ /* Set xMII type */
+ rtsn_set_phy_interface(priv);
+ rtsn_set_rate(priv);
+
+ /* Enable MII */
+ rtsn_modify(priv, MPIC, MPIC_PSMCS_MASK | MPIC_PSMHT_MASK,
+ MPIC_PSMCS_DEFAULT | MPIC_PSMHT_DEFAULT);
+
+ /* Link verification */
+ rtsn_modify(priv, MLVC, MLVC_PLV, MLVC_PLV);
+ ret = rtsn_reg_wait(priv, MLVC, MLVC_PLV, 0);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+static int rtsn_hw_init(struct rtsn_private *priv)
+{
+ int ret;
+
+ ret = rtsn_reset(priv);
+ if (ret)
+ return ret;
+
+ /* Change to CONFIG mode */
+ ret = rtsn_change_mode(priv, OCR_OPC_CONFIG);
+ if (ret)
+ return ret;
+
+ ret = rtsn_axibmi_init(priv);
+ if (ret)
+ return ret;
+
+ rtsn_mhd_init(priv);
+
+ ret = rtsn_rmac_init(priv);
+ if (ret)
+ return ret;
+
+ ret = rtsn_change_mode(priv, OCR_OPC_DISABLE);
+ if (ret)
+ return ret;
+
+ /* Change to OPERATION mode */
+ ret = rtsn_change_mode(priv, OCR_OPC_OPERATION);
+
+ return ret;
+}
+
+static int rtsn_mii_access(struct mii_bus *bus, bool read, int phyad,
+ int regad, u16 data)
+{
+ struct rtsn_private *priv = bus->priv;
+ u32 val;
+ int ret;
+
+ val = MPSM_PDA(phyad) | MPSM_PRA(regad) | MPSM_PSME;
+
+ if (!read)
+ val |= MPSM_PSMAD | MPSM_PRD_SET(data);
+
+ rtsn_write(priv, MPSM, val);
+
+ ret = rtsn_reg_wait(priv, MPSM, MPSM_PSME, 0);
+ if (ret)
+ return ret;
+
+ if (read)
+ ret = MPSM_PRD_GET(rtsn_read(priv, MPSM));
+
+ return ret;
+}
+
+static int rtsn_mii_read(struct mii_bus *bus, int addr, int regnum)
+{
+ return rtsn_mii_access(bus, true, addr, regnum, 0);
+}
+
+static int rtsn_mii_write(struct mii_bus *bus, int addr, int regnum, u16 val)
+{
+ return rtsn_mii_access(bus, false, addr, regnum, val);
+}
+
+static int rtsn_mdio_alloc(struct rtsn_private *priv)
+{
+ struct platform_device *pdev = priv->pdev;
+ struct device *dev = &pdev->dev;
+ struct device_node *mdio_node;
+ struct mii_bus *mii;
+ int ret;
+
+ mii = mdiobus_alloc();
+ if (!mii)
+ return -ENOMEM;
+
+ mdio_node = of_get_child_by_name(dev->of_node, "mdio");
+ if (!mdio_node) {
+ ret = -ENODEV;
+ goto out_free_bus;
+ }
+
+ /* Enter config mode before registering the MDIO bus */
+ ret = rtsn_reset(priv);
+ if (ret)
+ goto out_free_bus;
+
+ ret = rtsn_change_mode(priv, OCR_OPC_CONFIG);
+ if (ret)
+ goto out_free_bus;
+
+ rtsn_modify(priv, MPIC, MPIC_PSMCS_MASK | MPIC_PSMHT_MASK,
+ MPIC_PSMCS_DEFAULT | MPIC_PSMHT_DEFAULT);
+
+ /* Register the MDIO bus */
+ mii->name = "rtsn_mii";
+ snprintf(mii->id, MII_BUS_ID_SIZE, "%s-%x",
+ pdev->name, pdev->id);
+ mii->priv = priv;
+ mii->read = rtsn_mii_read;
+ mii->write = rtsn_mii_write;
+ mii->parent = dev;
+
+ ret = of_mdiobus_register(mii, mdio_node);
+ of_node_put(mdio_node);
+ if (ret)
+ goto out_free_bus;
+
+ priv->mii = mii;
+
+ return 0;
+
+out_free_bus:
+ mdiobus_free(mii);
+ return ret;
+}
+
+static void rtsn_mdio_free(struct rtsn_private *priv)
+{
+ mdiobus_unregister(priv->mii);
+ mdiobus_free(priv->mii);
+ priv->mii = NULL;
+}
+
+static void rtsn_adjust_link(struct net_device *ndev)
+{
+ struct rtsn_private *priv = netdev_priv(ndev);
+ struct phy_device *phydev = ndev->phydev;
+ bool new_state = false;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ if (phydev->link) {
+ if (phydev->speed != priv->speed) {
+ new_state = true;
+ priv->speed = phydev->speed;
+ }
+
+ if (!priv->link) {
+ new_state = true;
+ priv->link = phydev->link;
+ }
+ } else if (priv->link) {
+ new_state = true;
+ priv->link = 0;
+ priv->speed = 0;
+ }
+
+ if (new_state) {
+ /* Need to transition to CONFIG mode before reconfiguring and
+ * then back to the original mode. Any state change to/from
+ * CONFIG or OPERATION must go over DISABLED to stop Rx/Tx.
+ */
+ enum rtsn_mode orgmode = rtsn_read_mode(priv);
+
+ /* Transit to CONFIG */
+ if (orgmode != OCR_OPC_CONFIG) {
+ if (orgmode != OCR_OPC_DISABLE &&
+ rtsn_change_mode(priv, OCR_OPC_DISABLE))
+ goto out;
+ if (rtsn_change_mode(priv, OCR_OPC_CONFIG))
+ goto out;
+ }
+
+ rtsn_set_rate(priv);
+
+ /* Transition to original mode */
+ if (orgmode != OCR_OPC_CONFIG) {
+ if (rtsn_change_mode(priv, OCR_OPC_DISABLE))
+ goto out;
+ if (orgmode != OCR_OPC_DISABLE &&
+ rtsn_change_mode(priv, orgmode))
+ goto out;
+ }
+ }
+out:
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ if (new_state)
+ phy_print_status(phydev);
+}
+
+static int rtsn_phy_init(struct rtsn_private *priv)
+{
+ struct device_node *np = priv->ndev->dev.parent->of_node;
+ struct phy_device *phydev;
+ struct device_node *phy;
+
+ priv->link = 0;
+
+ phy = of_parse_phandle(np, "phy-handle", 0);
+ if (!phy)
+ return -ENOENT;
+
+ phydev = of_phy_connect(priv->ndev, phy, rtsn_adjust_link, 0,
+ priv->iface);
+ of_node_put(phy);
+ if (!phydev)
+ return -ENOENT;
+
+ /* Only support full-duplex mode */
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
+
+ phy_attached_info(phydev);
+
+ return 0;
+}
+
+static void rtsn_phy_deinit(struct rtsn_private *priv)
+{
+ phy_disconnect(priv->ndev->phydev);
+ priv->ndev->phydev = NULL;
+}
+
+static int rtsn_init(struct rtsn_private *priv)
+{
+ int ret;
+
+ ret = rtsn_desc_alloc(priv);
+ if (ret)
+ return ret;
+
+ ret = rtsn_dmac_init(priv);
+ if (ret)
+ goto error_free_desc;
+
+ ret = rtsn_hw_init(priv);
+ if (ret)
+ goto error_free_chain;
+
+ ret = rtsn_phy_init(priv);
+ if (ret)
+ goto error_free_chain;
+
+ ret = rtsn_request_irqs(priv);
+ if (ret)
+ goto error_free_phy;
+
+ return 0;
+error_free_phy:
+ rtsn_phy_deinit(priv);
+error_free_chain:
+ rtsn_chain_free(priv);
+error_free_desc:
+ rtsn_desc_free(priv);
+ return ret;
+}
+
+static void rtsn_deinit(struct rtsn_private *priv)
+{
+ rtsn_free_irqs(priv);
+ rtsn_phy_deinit(priv);
+ rtsn_chain_free(priv);
+ rtsn_desc_free(priv);
+}
+
+static void rtsn_parse_mac_address(struct device_node *np,
+ struct net_device *ndev)
+{
+ struct rtsn_private *priv = netdev_priv(ndev);
+ u8 addr[ETH_ALEN];
+ u32 mrmac0;
+ u32 mrmac1;
+
+ /* Try to read address from Device Tree. */
+ if (!of_get_mac_address(np, addr)) {
+ eth_hw_addr_set(ndev, addr);
+ return;
+ }
+
+ /* Try to read address from device. */
+ mrmac0 = rtsn_read(priv, MRMAC0);
+ mrmac1 = rtsn_read(priv, MRMAC1);
+
+ addr[0] = (mrmac0 >> 8) & 0xff;
+ addr[1] = (mrmac0 >> 0) & 0xff;
+ addr[2] = (mrmac1 >> 24) & 0xff;
+ addr[3] = (mrmac1 >> 16) & 0xff;
+ addr[4] = (mrmac1 >> 8) & 0xff;
+ addr[5] = (mrmac1 >> 0) & 0xff;
+
+ if (is_valid_ether_addr(addr)) {
+ eth_hw_addr_set(ndev, addr);
+ return;
+ }
+
+ /* Fallback to a random address */
+ eth_hw_addr_random(ndev);
+}
+
+static int rtsn_open(struct net_device *ndev)
+{
+ struct rtsn_private *priv = netdev_priv(ndev);
+ int ret;
+
+ napi_enable(&priv->napi);
+
+ ret = rtsn_init(priv);
+ if (ret) {
+ napi_disable(&priv->napi);
+ return ret;
+ }
+
+ phy_start(ndev->phydev);
+
+ netif_start_queue(ndev);
+
+ return 0;
+}
+
+static int rtsn_stop(struct net_device *ndev)
+{
+ struct rtsn_private *priv = netdev_priv(ndev);
+
+ phy_stop(priv->ndev->phydev);
+ napi_disable(&priv->napi);
+ rtsn_change_mode(priv, OCR_OPC_DISABLE);
+ rtsn_deinit(priv);
+
+ return 0;
+}
+
+static netdev_tx_t rtsn_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct rtsn_private *priv = netdev_priv(ndev);
+ struct rtsn_ext_desc *desc;
+ int ret = NETDEV_TX_OK;
+ unsigned long flags;
+ dma_addr_t dma_addr;
+ int entry;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ /* Drop packet if it won't fit in a single descriptor. */
+ if (skb->len >= TX_DS) {
+ priv->stats.tx_dropped++;
+ priv->stats.tx_errors++;
+ goto out;
+ }
+
+ if (priv->cur_tx - priv->dirty_tx > priv->num_tx_ring) {
+ netif_stop_subqueue(ndev, 0);
+ ret = NETDEV_TX_BUSY;
+ goto out;
+ }
+
+ if (skb_put_padto(skb, ETH_ZLEN))
+ goto out;
+
+ dma_addr = dma_map_single(ndev->dev.parent, skb->data, skb->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(ndev->dev.parent, dma_addr)) {
+ dev_kfree_skb_any(skb);
+ goto out;
+ }
+
+ entry = priv->cur_tx % priv->num_tx_ring;
+ priv->tx_skb[entry] = skb;
+ desc = &priv->tx_ring[entry];
+ desc->dptr = cpu_to_le32(dma_addr);
+ desc->info_ds = cpu_to_le16(skb->len);
+ desc->info1 = cpu_to_le64(skb->len);
+
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ priv->ts_tag++;
+ desc->info_ds |= cpu_to_le16(TXC);
+ desc->info = priv->ts_tag;
+ }
+
+ skb_tx_timestamp(skb);
+ dma_wmb();
+
+ desc->die_dt = DT_FSINGLE | D_DIE;
+ priv->cur_tx++;
+
+ /* Start xmit */
+ rtsn_write(priv, TRCR0, BIT(TX_CHAIN_IDX));
+out:
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return ret;
+}
+
+static void rtsn_get_stats64(struct net_device *ndev,
+ struct rtnl_link_stats64 *storage)
+{
+ struct rtsn_private *priv = netdev_priv(ndev);
+ *storage = priv->stats;
+}
+
+static int rtsn_do_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
+{
+ if (!netif_running(ndev))
+ return -ENODEV;
+
+ return phy_do_ioctl_running(ndev, ifr, cmd);
+}
+
+static int rtsn_hwtstamp_get(struct net_device *ndev,
+ struct kernel_hwtstamp_config *config)
+{
+ struct rcar_gen4_ptp_private *ptp_priv;
+ struct rtsn_private *priv;
+
+ if (!netif_running(ndev))
+ return -ENODEV;
+
+ priv = netdev_priv(ndev);
+ ptp_priv = priv->ptp_priv;
+
+ config->flags = 0;
+
+ config->tx_type =
+ ptp_priv->tstamp_tx_ctrl ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+
+ switch (ptp_priv->tstamp_rx_ctrl & RCAR_GEN4_RXTSTAMP_TYPE) {
+ case RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT:
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
+ break;
+ case RCAR_GEN4_RXTSTAMP_TYPE_ALL:
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ default:
+ config->rx_filter = HWTSTAMP_FILTER_NONE;
+ break;
+ }
+
+ return 0;
+}
+
+static int rtsn_hwtstamp_set(struct net_device *ndev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
+{
+ struct rcar_gen4_ptp_private *ptp_priv;
+ struct rtsn_private *priv;
+ u32 tstamp_rx_ctrl;
+ u32 tstamp_tx_ctrl;
+
+ if (!netif_running(ndev))
+ return -ENODEV;
+
+ priv = netdev_priv(ndev);
+ ptp_priv = priv->ptp_priv;
+
+ if (config->flags)
+ return -EINVAL;
+
+ switch (config->tx_type) {
+ case HWTSTAMP_TX_OFF:
+ tstamp_tx_ctrl = 0;
+ break;
+ case HWTSTAMP_TX_ON:
+ tstamp_tx_ctrl = RCAR_GEN4_TXTSTAMP_ENABLED;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config->rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ tstamp_rx_ctrl = 0;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ tstamp_rx_ctrl = RCAR_GEN4_RXTSTAMP_ENABLED |
+ RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT;
+ break;
+ default:
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
+ tstamp_rx_ctrl = RCAR_GEN4_RXTSTAMP_ENABLED |
+ RCAR_GEN4_RXTSTAMP_TYPE_ALL;
+ break;
+ }
+
+ ptp_priv->tstamp_tx_ctrl = tstamp_tx_ctrl;
+ ptp_priv->tstamp_rx_ctrl = tstamp_rx_ctrl;
+
+ return 0;
+}
+
+static const struct net_device_ops rtsn_netdev_ops = {
+ .ndo_open = rtsn_open,
+ .ndo_stop = rtsn_stop,
+ .ndo_start_xmit = rtsn_start_xmit,
+ .ndo_get_stats64 = rtsn_get_stats64,
+ .ndo_eth_ioctl = rtsn_do_ioctl,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_hwtstamp_set = rtsn_hwtstamp_set,
+ .ndo_hwtstamp_get = rtsn_hwtstamp_get,
+};
+
+static int rtsn_get_ts_info(struct net_device *ndev,
+ struct kernel_ethtool_ts_info *info)
+{
+ struct rtsn_private *priv = netdev_priv(ndev);
+
+ info->phc_index = ptp_clock_index(priv->ptp_priv->clock);
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL);
+
+ return 0;
+}
+
+static const struct ethtool_ops rtsn_ethtool_ops = {
+ .nway_reset = phy_ethtool_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .get_ts_info = rtsn_get_ts_info,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
+};
+
+static const struct of_device_id rtsn_match_table[] = {
+ { .compatible = "renesas,r8a779g0-ethertsn", },
+ { /* Sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, rtsn_match_table);
+
+static int rtsn_probe(struct platform_device *pdev)
+{
+ struct rtsn_private *priv;
+ struct net_device *ndev;
+ struct resource *res;
+ int ret;
+
+ ndev = alloc_etherdev_mqs(sizeof(struct rtsn_private), TX_NUM_CHAINS,
+ RX_NUM_CHAINS);
+ if (!ndev)
+ return -ENOMEM;
+
+ priv = netdev_priv(ndev);
+ priv->pdev = pdev;
+ priv->ndev = ndev;
+ priv->ptp_priv = rcar_gen4_ptp_alloc(pdev);
+
+ spin_lock_init(&priv->lock);
+ platform_set_drvdata(pdev, priv);
+
+ priv->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ ret = PTR_ERR(priv->clk);
+ goto error_free;
+ }
+
+ priv->reset = devm_reset_control_get(&pdev->dev, NULL);
+ if (IS_ERR(priv->reset)) {
+ ret = PTR_ERR(priv->reset);
+ goto error_free;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tsnes");
+ if (!res) {
+ dev_err(&pdev->dev, "Can't find tsnes resource\n");
+ ret = -EINVAL;
+ goto error_free;
+ }
+
+ priv->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(priv->base)) {
+ ret = PTR_ERR(priv->base);
+ goto error_free;
+ }
+
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+
+ ndev->features = NETIF_F_RXCSUM;
+ ndev->hw_features = NETIF_F_RXCSUM;
+ ndev->base_addr = res->start;
+ ndev->netdev_ops = &rtsn_netdev_ops;
+ ndev->ethtool_ops = &rtsn_ethtool_ops;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gptp");
+ if (!res) {
+ dev_err(&pdev->dev, "Can't find gptp resource\n");
+ ret = -EINVAL;
+ goto error_free;
+ }
+
+ priv->ptp_priv->addr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(priv->ptp_priv->addr)) {
+ ret = PTR_ERR(priv->ptp_priv->addr);
+ goto error_free;
+ }
+
+ ret = rtsn_get_phy_params(priv);
+ if (ret)
+ goto error_free;
+
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_sync(&pdev->dev);
+
+ netif_napi_add(ndev, &priv->napi, rtsn_poll);
+
+ rtsn_parse_mac_address(pdev->dev.of_node, ndev);
+
+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+
+ device_set_wakeup_capable(&pdev->dev, 1);
+
+ ret = rcar_gen4_ptp_register(priv->ptp_priv, RCAR_GEN4_PTP_REG_LAYOUT,
+ clk_get_rate(priv->clk));
+ if (ret)
+ goto error_pm;
+
+ ret = rtsn_mdio_alloc(priv);
+ if (ret)
+ goto error_ptp;
+
+ ret = register_netdev(ndev);
+ if (ret)
+ goto error_mdio;
+
+ netdev_info(ndev, "MAC address %pM\n", ndev->dev_addr);
+
+ return 0;
+
+error_mdio:
+ rtsn_mdio_free(priv);
+error_ptp:
+ rcar_gen4_ptp_unregister(priv->ptp_priv);
+error_pm:
+ netif_napi_del(&priv->napi);
+ rtsn_change_mode(priv, OCR_OPC_DISABLE);
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+error_free:
+ free_netdev(ndev);
+
+ return ret;
+}
+
+static int rtsn_remove(struct platform_device *pdev)
+{
+ struct rtsn_private *priv = platform_get_drvdata(pdev);
+
+ unregister_netdev(priv->ndev);
+ rtsn_mdio_free(priv);
+ rcar_gen4_ptp_unregister(priv->ptp_priv);
+ rtsn_change_mode(priv, OCR_OPC_DISABLE);
+ netif_napi_del(&priv->napi);
+
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ free_netdev(priv->ndev);
+
+ return 0;
+}
+
+static struct platform_driver rtsn_driver = {
+ .probe = rtsn_probe,
+ .remove = rtsn_remove,
+ .driver = {
+ .name = "rtsn",
+ .of_match_table = rtsn_match_table,
+ }
+};
+module_platform_driver(rtsn_driver);
+
+MODULE_AUTHOR("Phong Hoang, Niklas Söderlund");
+MODULE_DESCRIPTION("Renesas Ethernet-TSN device driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/renesas/rtsn.h b/drivers/net/ethernet/renesas/rtsn.h
new file mode 100644
index 000000000000..3183e80d7e6b
--- /dev/null
+++ b/drivers/net/ethernet/renesas/rtsn.h
@@ -0,0 +1,464 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Renesas Ethernet-TSN device driver
+ *
+ * Copyright (C) 2022 Renesas Electronics Corporation
+ * Copyright (C) 2023 Niklas Söderlund <niklas.soderlund@ragnatech.se>
+ */
+
+#ifndef __RTSN_H__
+#define __RTSN_H__
+
+#include <linux/types.h>
+
+#define AXIBMI 0x0000
+#define TSNMHD 0x1000
+#define RMSO 0x2000
+#define RMRO 0x3800
+
+enum rtsn_reg {
+ AXIWC = AXIBMI + 0x0000,
+ AXIRC = AXIBMI + 0x0004,
+ TDPC0 = AXIBMI + 0x0010,
+ TFT = AXIBMI + 0x0090,
+ TATLS0 = AXIBMI + 0x00a0,
+ TATLS1 = AXIBMI + 0x00a4,
+ TATLR = AXIBMI + 0x00a8,
+ RATLS0 = AXIBMI + 0x00b0,
+ RATLS1 = AXIBMI + 0x00b4,
+ RATLR = AXIBMI + 0x00b8,
+ TSA0 = AXIBMI + 0x00c0,
+ TSS0 = AXIBMI + 0x00c4,
+ TRCR0 = AXIBMI + 0x0140,
+ RIDAUAS0 = AXIBMI + 0x0180,
+ RR = AXIBMI + 0x0200,
+ TATS = AXIBMI + 0x0210,
+ TATSR0 = AXIBMI + 0x0214,
+ TATSR1 = AXIBMI + 0x0218,
+ TATSR2 = AXIBMI + 0x021c,
+ RATS = AXIBMI + 0x0220,
+ RATSR0 = AXIBMI + 0x0224,
+ RATSR1 = AXIBMI + 0x0228,
+ RATSR2 = AXIBMI + 0x022c,
+ RIDASM0 = AXIBMI + 0x0240,
+ RIDASAM0 = AXIBMI + 0x0244,
+ RIDACAM0 = AXIBMI + 0x0248,
+ EIS0 = AXIBMI + 0x0300,
+ EIE0 = AXIBMI + 0x0304,
+ EID0 = AXIBMI + 0x0308,
+ EIS1 = AXIBMI + 0x0310,
+ EIE1 = AXIBMI + 0x0314,
+ EID1 = AXIBMI + 0x0318,
+ TCEIS0 = AXIBMI + 0x0340,
+ TCEIE0 = AXIBMI + 0x0344,
+ TCEID0 = AXIBMI + 0x0348,
+ RFSEIS0 = AXIBMI + 0x04c0,
+ RFSEIE0 = AXIBMI + 0x04c4,
+ RFSEID0 = AXIBMI + 0x04c8,
+ RFEIS0 = AXIBMI + 0x0540,
+ RFEIE0 = AXIBMI + 0x0544,
+ RFEID0 = AXIBMI + 0x0548,
+ RCEIS0 = AXIBMI + 0x05c0,
+ RCEIE0 = AXIBMI + 0x05c4,
+ RCEID0 = AXIBMI + 0x05c8,
+ RIDAOIS = AXIBMI + 0x0640,
+ RIDAOIE = AXIBMI + 0x0644,
+ RIDAOID = AXIBMI + 0x0648,
+ TSFEIS = AXIBMI + 0x06c0,
+ TSFEIE = AXIBMI + 0x06c4,
+ TSFEID = AXIBMI + 0x06c8,
+ TSCEIS = AXIBMI + 0x06d0,
+ TSCEIE = AXIBMI + 0x06d4,
+ TSCEID = AXIBMI + 0x06d8,
+ DIS = AXIBMI + 0x0b00,
+ DIE = AXIBMI + 0x0b04,
+ DID = AXIBMI + 0x0b08,
+ TDIS0 = AXIBMI + 0x0b10,
+ TDIE0 = AXIBMI + 0x0b14,
+ TDID0 = AXIBMI + 0x0b18,
+ RDIS0 = AXIBMI + 0x0b90,
+ RDIE0 = AXIBMI + 0x0b94,
+ RDID0 = AXIBMI + 0x0b98,
+ TSDIS = AXIBMI + 0x0c10,
+ TSDIE = AXIBMI + 0x0c14,
+ TSDID = AXIBMI + 0x0c18,
+ GPOUT = AXIBMI + 0x6000,
+
+ OCR = TSNMHD + 0x0000,
+ OSR = TSNMHD + 0x0004,
+ SWR = TSNMHD + 0x0008,
+ SIS = TSNMHD + 0x000c,
+ GIS = TSNMHD + 0x0010,
+ GIE = TSNMHD + 0x0014,
+ GID = TSNMHD + 0x0018,
+ TIS1 = TSNMHD + 0x0020,
+ TIE1 = TSNMHD + 0x0024,
+ TID1 = TSNMHD + 0x0028,
+ TIS2 = TSNMHD + 0x0030,
+ TIE2 = TSNMHD + 0x0034,
+ TID2 = TSNMHD + 0x0038,
+ RIS = TSNMHD + 0x0040,
+ RIE = TSNMHD + 0x0044,
+ RID = TSNMHD + 0x0048,
+ TGC1 = TSNMHD + 0x0050,
+ TGC2 = TSNMHD + 0x0054,
+ TFS0 = TSNMHD + 0x0060,
+ TCF0 = TSNMHD + 0x0070,
+ TCR1 = TSNMHD + 0x0080,
+ TCR2 = TSNMHD + 0x0084,
+ TCR3 = TSNMHD + 0x0088,
+ TCR4 = TSNMHD + 0x008c,
+ TMS0 = TSNMHD + 0x0090,
+ TSR1 = TSNMHD + 0x00b0,
+ TSR2 = TSNMHD + 0x00b4,
+ TSR3 = TSNMHD + 0x00b8,
+ TSR4 = TSNMHD + 0x00bc,
+ TSR5 = TSNMHD + 0x00c0,
+ RGC = TSNMHD + 0x00d0,
+ RDFCR = TSNMHD + 0x00d4,
+ RCFCR = TSNMHD + 0x00d8,
+ REFCNCR = TSNMHD + 0x00dc,
+ RSR1 = TSNMHD + 0x00e0,
+ RSR2 = TSNMHD + 0x00e4,
+ RSR3 = TSNMHD + 0x00e8,
+ TCIS = TSNMHD + 0x01e0,
+ TCIE = TSNMHD + 0x01e4,
+ TCID = TSNMHD + 0x01e8,
+ TPTPC = TSNMHD + 0x01f0,
+ TTML = TSNMHD + 0x01f4,
+ TTJ = TSNMHD + 0x01f8,
+ TCC = TSNMHD + 0x0200,
+ TCS = TSNMHD + 0x0204,
+ TGS = TSNMHD + 0x020c,
+ TACST0 = TSNMHD + 0x0210,
+ TACST1 = TSNMHD + 0x0214,
+ TACST2 = TSNMHD + 0x0218,
+ TALIT0 = TSNMHD + 0x0220,
+ TALIT1 = TSNMHD + 0x0224,
+ TALIT2 = TSNMHD + 0x0228,
+ TAEN0 = TSNMHD + 0x0230,
+ TAEN1 = TSNMHD + 0x0234,
+ TASFE = TSNMHD + 0x0240,
+ TACLL0 = TSNMHD + 0x0250,
+ TACLL1 = TSNMHD + 0x0254,
+ TACLL2 = TSNMHD + 0x0258,
+ CACC = TSNMHD + 0x0260,
+ CCS = TSNMHD + 0x0264,
+ CAIV0 = TSNMHD + 0x0270,
+ CAUL0 = TSNMHD + 0x0290,
+ TOCST0 = TSNMHD + 0x0300,
+ TOCST1 = TSNMHD + 0x0304,
+ TOCST2 = TSNMHD + 0x0308,
+ TOLIT0 = TSNMHD + 0x0310,
+ TOLIT1 = TSNMHD + 0x0314,
+ TOLIT2 = TSNMHD + 0x0318,
+ TOEN0 = TSNMHD + 0x0320,
+ TOEN1 = TSNMHD + 0x0324,
+ TOSFE = TSNMHD + 0x0330,
+ TCLR0 = TSNMHD + 0x0340,
+ TCLR1 = TSNMHD + 0x0344,
+ TCLR2 = TSNMHD + 0x0348,
+ TSMS = TSNMHD + 0x0350,
+ COCC = TSNMHD + 0x0360,
+ COIV0 = TSNMHD + 0x03b0,
+ COUL0 = TSNMHD + 0x03d0,
+ QSTMACU0 = TSNMHD + 0x0400,
+ QSTMACD0 = TSNMHD + 0x0404,
+ QSTMAMU0 = TSNMHD + 0x0408,
+ QSTMAMD0 = TSNMHD + 0x040c,
+ QSFTVL0 = TSNMHD + 0x0410,
+ QSFTVLM0 = TSNMHD + 0x0414,
+ QSFTMSD0 = TSNMHD + 0x0418,
+ QSFTGMI0 = TSNMHD + 0x041c,
+ QSFTLS = TSNMHD + 0x0600,
+ QSFTLIS = TSNMHD + 0x0604,
+ QSFTLIE = TSNMHD + 0x0608,
+ QSFTLID = TSNMHD + 0x060c,
+ QSMSMC = TSNMHD + 0x0610,
+ QSGTMC = TSNMHD + 0x0614,
+ QSEIS = TSNMHD + 0x0618,
+ QSEIE = TSNMHD + 0x061c,
+ QSEID = TSNMHD + 0x0620,
+ QGACST0 = TSNMHD + 0x0630,
+ QGACST1 = TSNMHD + 0x0634,
+ QGACST2 = TSNMHD + 0x0638,
+ QGALIT1 = TSNMHD + 0x0640,
+ QGALIT2 = TSNMHD + 0x0644,
+ QGAEN0 = TSNMHD + 0x0648,
+ QGAEN1 = TSNMHD + 0x074c,
+ QGIGS = TSNMHD + 0x0650,
+ QGGC = TSNMHD + 0x0654,
+ QGATL0 = TSNMHD + 0x0664,
+ QGATL1 = TSNMHD + 0x0668,
+ QGATL2 = TSNMHD + 0x066c,
+ QGOCST0 = TSNMHD + 0x0670,
+ QGOCST1 = TSNMHD + 0x0674,
+ QGOCST2 = TSNMHD + 0x0678,
+ QGOLIT0 = TSNMHD + 0x067c,
+ QGOLIT1 = TSNMHD + 0x0680,
+ QGOLIT2 = TSNMHD + 0x0684,
+ QGOEN0 = TSNMHD + 0x0688,
+ QGOEN1 = TSNMHD + 0x068c,
+ QGTRO = TSNMHD + 0x0690,
+ QGTR1 = TSNMHD + 0x0694,
+ QGTR2 = TSNMHD + 0x0698,
+ QGFSMS = TSNMHD + 0x069c,
+ QTMIS = TSNMHD + 0x06e0,
+ QTMIE = TSNMHD + 0x06e4,
+ QTMID = TSNMHD + 0x06e8,
+ QMEC = TSNMHD + 0x0700,
+ QMMC = TSNMHD + 0x0704,
+ QRFDC = TSNMHD + 0x0708,
+ QYFDC = TSNMHD + 0x070c,
+ QVTCMC0 = TSNMHD + 0x0710,
+ QMCBSC0 = TSNMHD + 0x0750,
+ QMCIRC0 = TSNMHD + 0x0790,
+ QMEBSC0 = TSNMHD + 0x07d0,
+ QMEIRC0 = TSNMHD + 0x0710,
+ QMCFC = TSNMHD + 0x0850,
+ QMEIS = TSNMHD + 0x0860,
+ QMEIE = TSNMHD + 0x0864,
+ QMEID = TSNMHD + 0x086c,
+ QSMFC0 = TSNMHD + 0x0870,
+ QMSPPC0 = TSNMHD + 0x08b0,
+ QMSRPC0 = TSNMHD + 0x08f0,
+ QGPPC0 = TSNMHD + 0x0930,
+ QGRPC0 = TSNMHD + 0x0950,
+ QMDPC0 = TSNMHD + 0x0970,
+ QMGPC0 = TSNMHD + 0x09b0,
+ QMYPC0 = TSNMHD + 0x09f0,
+ QMRPC0 = TSNMHD + 0x0a30,
+ MQSTMACU = TSNMHD + 0x0a70,
+ MQSTMACD = TSNMHD + 0x0a74,
+ MQSTMAMU = TSNMHD + 0x0a78,
+ MQSTMAMD = TSNMHD + 0x0a7c,
+ MQSFTVL = TSNMHD + 0x0a80,
+ MQSFTVLM = TSNMHD + 0x0a84,
+ MQSFTMSD = TSNMHD + 0x0a88,
+ MQSFTGMI = TSNMHD + 0x0a8c,
+
+ CFCR0 = RMSO + 0x0800,
+ FMSCR = RMSO + 0x0c10,
+
+ MMC = RMRO + 0x0000,
+ MPSM = RMRO + 0x0010,
+ MPIC = RMRO + 0x0014,
+ MTFFC = RMRO + 0x0020,
+ MTPFC = RMRO + 0x0024,
+ MTATC0 = RMRO + 0x0040,
+ MRGC = RMRO + 0x0080,
+ MRMAC0 = RMRO + 0x0084,
+ MRMAC1 = RMRO + 0x0088,
+ MRAFC = RMRO + 0x008c,
+ MRSCE = RMRO + 0x0090,
+ MRSCP = RMRO + 0x0094,
+ MRSCC = RMRO + 0x0098,
+ MRFSCE = RMRO + 0x009c,
+ MRFSCP = RMRO + 0x00a0,
+ MTRC = RMRO + 0x00a4,
+ MPFC = RMRO + 0x0100,
+ MLVC = RMRO + 0x0340,
+ MEEEC = RMRO + 0x0350,
+ MLBC = RMRO + 0x0360,
+ MGMR = RMRO + 0x0400,
+ MMPFTCT = RMRO + 0x0410,
+ MAPFTCT = RMRO + 0x0414,
+ MPFRCT = RMRO + 0x0418,
+ MFCICT = RMRO + 0x041c,
+ MEEECT = RMRO + 0x0420,
+ MEIS = RMRO + 0x0500,
+ MEIE = RMRO + 0x0504,
+ MEID = RMRO + 0x0508,
+ MMIS0 = RMRO + 0x0510,
+ MMIE0 = RMRO + 0x0514,
+ MMID0 = RMRO + 0x0518,
+ MMIS1 = RMRO + 0x0520,
+ MMIE1 = RMRO + 0x0524,
+ MMID1 = RMRO + 0x0528,
+ MMIS2 = RMRO + 0x0530,
+ MMIE2 = RMRO + 0x0534,
+ MMID2 = RMRO + 0x0538,
+ MXMS = RMRO + 0x0600,
+
+};
+
+/* AXIBMI */
+#define RR_RATRR BIT(0)
+#define RR_TATRR BIT(1)
+#define RR_RST (RR_RATRR | RR_TATRR)
+#define RR_RST_COMPLETE 0x03
+
+#define AXIWC_DEFAULT 0xffff
+#define AXIRC_DEFAULT 0xffff
+
+#define TATLS0_TEDE BIT(1)
+#define TATLS0_TATEN_SHIFT 24
+#define TATLS0_TATEN(n) ((n) << TATLS0_TATEN_SHIFT)
+#define TATLR_TATL BIT(31)
+
+#define RATLS0_RETS BIT(2)
+#define RATLS0_REDE BIT(3)
+#define RATLS0_RATEN_SHIFT 24
+#define RATLS0_RATEN(n) ((n) << RATLS0_RATEN_SHIFT)
+#define RATLR_RATL BIT(31)
+
+#define DIE_DID_TDICX(n) BIT((n))
+#define DIE_DID_RDICX(n) BIT((n) + 8)
+#define TDIE_TDID_TDX(n) BIT(n)
+#define RDIE_RDID_RDX(n) BIT(n)
+#define TDIS_TDS(n) BIT(n)
+#define RDIS_RDS(n) BIT(n)
+
+/* MHD */
+#define OSR_OPS 0x07
+#define SWR_SWR BIT(0)
+
+#define TGC1_TQTM_SFM 0xff00
+#define TGC1_STTV_DEFAULT 0x03
+
+#define TMS_MFS_MAX 0x2800
+
+/* RMAC System */
+#define CFCR_SDID(n) ((n) << 16)
+#define FMSCR_FMSIE(n) ((n) << 0)
+
+/* RMAC */
+#define MPIC_PIS_MASK GENMASK(1, 0)
+#define MPIC_PIS_MII 0
+#define MPIC_PIS_RMII 0x01
+#define MPIC_PIS_GMII 0x02
+#define MPIC_PIS_RGMII 0x03
+#define MPIC_LSC_SHIFT 2
+#define MPIC_LSC_MASK GENMASK(3, MPIC_LSC_SHIFT)
+#define MPIC_LSC_10M (0 << MPIC_LSC_SHIFT)
+#define MPIC_LSC_100M (0x01 << MPIC_LSC_SHIFT)
+#define MPIC_LSC_1G (0x02 << MPIC_LSC_SHIFT)
+#define MPIC_PSMCS_SHIFT 16
+#define MPIC_PSMCS_MASK GENMASK(21, MPIC_PSMCS_SHIFT)
+#define MPIC_PSMCS_DEFAULT (0x0a << MPIC_PSMCS_SHIFT)
+#define MPIC_PSMHT_SHIFT 24
+#define MPIC_PSMHT_MASK GENMASK(26, MPIC_PSMHT_SHIFT)
+#define MPIC_PSMHT_DEFAULT (0x07 << MPIC_PSMHT_SHIFT)
+
+#define MLVC_PASE BIT(8)
+#define MLVC_PSE BIT(16)
+#define MLVC_PLV BIT(17)
+
+#define MPSM_PSME BIT(0)
+#define MPSM_PSMAD BIT(1)
+#define MPSM_PDA_SHIFT 3
+#define MPSM_PDA_MASK GENMASK(7, 3)
+#define MPSM_PDA(n) (((n) << MPSM_PDA_SHIFT) & MPSM_PDA_MASK)
+#define MPSM_PRA_SHIFT 8
+#define MPSM_PRA_MASK GENMASK(12, 8)
+#define MPSM_PRA(n) (((n) << MPSM_PRA_SHIFT) & MPSM_PRA_MASK)
+#define MPSM_PRD_SHIFT 16
+#define MPSM_PRD_SET(n) ((n) << MPSM_PRD_SHIFT)
+#define MPSM_PRD_GET(n) ((n) >> MPSM_PRD_SHIFT)
+
+#define GPOUT_RDM BIT(13)
+#define GPOUT_TDM BIT(14)
+
+/* RTSN */
+#define RTSN_INTERVAL_US 1000
+#define RTSN_TIMEOUT_US 1000000
+
+#define TX_NUM_CHAINS 1
+#define RX_NUM_CHAINS 1
+
+#define TX_CHAIN_SIZE 1024
+#define RX_CHAIN_SIZE 1024
+
+#define TX_CHAIN_IDX 0
+#define RX_CHAIN_IDX 0
+
+#define TX_CHAIN_ADDR_OFFSET (sizeof(struct rtsn_desc) * TX_CHAIN_IDX)
+#define RX_CHAIN_ADDR_OFFSET (sizeof(struct rtsn_desc) * RX_CHAIN_IDX)
+
+#define PKT_BUF_SZ 1584
+#define RTSN_ALIGN 128
+
+enum rtsn_mode {
+ OCR_OPC_DISABLE,
+ OCR_OPC_CONFIG,
+ OCR_OPC_OPERATION,
+};
+
+/* Descriptors */
+enum RX_DS_CC_BIT {
+ RX_DS = 0x0fff, /* Data size */
+ RX_TR = 0x1000, /* Truncation indication */
+ RX_EI = 0x2000, /* Error indication */
+ RX_PS = 0xc000, /* Padding selection */
+};
+
+enum TX_FS_TAGL_BIT {
+ TX_DS = 0x0fff, /* Data size */
+ TX_TAGL = 0xf000, /* Frame tag LSBs */
+};
+
+enum DIE_DT {
+ /* HW/SW arbitration */
+ DT_FEMPTY_IS = 0x10,
+ DT_FEMPTY_IC = 0x20,
+ DT_FEMPTY_ND = 0x30,
+ DT_FEMPTY = 0x40,
+ DT_FEMPTY_START = 0x50,
+ DT_FEMPTY_MID = 0x60,
+ DT_FEMPTY_END = 0x70,
+
+ /* Frame data */
+ DT_FSINGLE = 0x80,
+ DT_FSTART = 0x90,
+ DT_FMID = 0xa0,
+ DT_FEND = 0xb0,
+
+ /* Chain control */
+ DT_LEMPTY = 0xc0,
+ DT_EEMPTY = 0xd0,
+ DT_LINK = 0xe0,
+ DT_EOS = 0xf0,
+
+ DT_MASK = 0xf0,
+ D_DIE = 0x08,
+};
+
+struct rtsn_desc {
+ __le16 info_ds;
+ __u8 info;
+ u8 die_dt;
+ __le32 dptr;
+} __packed;
+
+struct rtsn_ts_desc {
+ __le16 info_ds;
+ __u8 info;
+ u8 die_dt;
+ __le32 dptr;
+ __le32 ts_nsec;
+ __le32 ts_sec;
+} __packed;
+
+struct rtsn_ext_desc {
+ __le16 info_ds;
+ __u8 info;
+ u8 die_dt;
+ __le32 dptr;
+ __le64 info1;
+} __packed;
+
+struct rtsn_ext_ts_desc {
+ __le16 info_ds;
+ __u8 info;
+ u8 die_dt;
+ __le32 dptr;
+ __le64 info1;
+ __le32 ts_nsec;
+ __le32 ts_sec;
+} __packed;
+
+enum EXT_INFO_DS_BIT {
+ TXC = 0x4000,
+};
+
+#endif
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 8fa6c0e9195b..7d69302ffa0a 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -1396,7 +1396,7 @@ static void efx_ef10_table_reset_mc_allocations(struct efx_nic *efx)
efx_mcdi_filter_table_reset_mc_allocations(efx);
nic_data->must_restore_piobufs = true;
efx_ef10_forget_old_piobufs(efx);
- efx->rss_context.context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
+ efx->rss_context.priv.context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
/* Driver-created vswitches and vports must be re-created */
nic_data->must_probe_vswitching = true;
diff --git a/drivers/net/ethernet/sfc/ef100_ethtool.c b/drivers/net/ethernet/sfc/ef100_ethtool.c
index cf55202b3a7b..896ffca4aee2 100644
--- a/drivers/net/ethernet/sfc/ef100_ethtool.c
+++ b/drivers/net/ethernet/sfc/ef100_ethtool.c
@@ -59,8 +59,12 @@ const struct ethtool_ops ef100_ethtool_ops = {
.get_rxfh_indir_size = efx_ethtool_get_rxfh_indir_size,
.get_rxfh_key_size = efx_ethtool_get_rxfh_key_size,
+ .rxfh_priv_size = sizeof(struct efx_rss_context_priv),
.get_rxfh = efx_ethtool_get_rxfh,
.set_rxfh = efx_ethtool_set_rxfh,
+ .create_rxfh_context = efx_ethtool_create_rxfh_context,
+ .modify_rxfh_context = efx_ethtool_modify_rxfh_context,
+ .remove_rxfh_context = efx_ethtool_remove_rxfh_context,
.get_module_info = efx_ethtool_get_module_info,
.get_module_eeprom = efx_ethtool_get_module_eeprom,
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index e9d9de8e648a..6f1a01ded7d4 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -299,7 +299,7 @@ static int efx_probe_nic(struct efx_nic *efx)
if (efx->n_channels > 1)
netdev_rss_key_fill(efx->rss_context.rx_hash_key,
sizeof(efx->rss_context.rx_hash_key));
- efx_set_default_rx_indir_table(efx, &efx->rss_context);
+ efx_set_default_rx_indir_table(efx, efx->rss_context.rx_indir_table);
/* Initialise the interrupt moderation settings */
efx->irq_mod_step_us = DIV_ROUND_UP(efx->timer_quantum_ns, 1000);
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index 48d3623735ba..7a6cab883d66 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -158,7 +158,7 @@ static inline s32 efx_filter_get_rx_ids(struct efx_nic *efx,
}
/* RSS contexts */
-static inline bool efx_rss_active(struct efx_rss_context *ctx)
+static inline bool efx_rss_active(struct efx_rss_context_priv *ctx)
{
return ctx->context_id != EFX_MCDI_RSS_CONTEXT_INVALID;
}
diff --git a/drivers/net/ethernet/sfc/efx_common.c b/drivers/net/ethernet/sfc/efx_common.c
index 4ebd5ae23eca..13cf647051af 100644
--- a/drivers/net/ethernet/sfc/efx_common.c
+++ b/drivers/net/ethernet/sfc/efx_common.c
@@ -714,7 +714,7 @@ void efx_reset_down(struct efx_nic *efx, enum reset_type method)
mutex_lock(&efx->mac_lock);
down_write(&efx->filter_sem);
- mutex_lock(&efx->rss_lock);
+ mutex_lock(&efx->net_dev->ethtool->rss_lock);
efx->type->fini(efx);
}
@@ -777,7 +777,7 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
if (efx->type->rx_restore_rss_contexts)
efx->type->rx_restore_rss_contexts(efx);
- mutex_unlock(&efx->rss_lock);
+ mutex_unlock(&efx->net_dev->ethtool->rss_lock);
efx->type->filter_table_restore(efx);
up_write(&efx->filter_sem);
@@ -793,7 +793,7 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
fail:
efx->port_initialized = false;
- mutex_unlock(&efx->rss_lock);
+ mutex_unlock(&efx->net_dev->ethtool->rss_lock);
up_write(&efx->filter_sem);
mutex_unlock(&efx->mac_lock);
@@ -1000,9 +1000,7 @@ int efx_init_struct(struct efx_nic *efx, struct pci_dev *pci_dev)
efx->type->rx_hash_offset - efx->type->rx_prefix_size;
efx->rx_packet_ts_offset =
efx->type->rx_ts_offset - efx->type->rx_prefix_size;
- INIT_LIST_HEAD(&efx->rss_context.list);
- efx->rss_context.context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
- mutex_init(&efx->rss_lock);
+ efx->rss_context.priv.context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
efx->vport_id = EVB_PORT_ID_ASSIGNED;
spin_lock_init(&efx->stats_lock);
efx->vi_stride = EFX_DEFAULT_VI_STRIDE;
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 37c69c8d90b1..7c887160e2ef 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -226,7 +226,7 @@ static void efx_ethtool_get_fec_stats(struct net_device *net_dev,
}
static int efx_ethtool_get_ts_info(struct net_device *net_dev,
- struct ethtool_ts_info *ts_info)
+ struct kernel_ethtool_ts_info *ts_info)
{
struct efx_nic *efx = efx_netdev_priv(net_dev);
@@ -268,8 +268,12 @@ const struct ethtool_ops efx_ethtool_ops = {
.set_rxnfc = efx_ethtool_set_rxnfc,
.get_rxfh_indir_size = efx_ethtool_get_rxfh_indir_size,
.get_rxfh_key_size = efx_ethtool_get_rxfh_key_size,
+ .rxfh_priv_size = sizeof(struct efx_rss_context_priv),
.get_rxfh = efx_ethtool_get_rxfh,
.set_rxfh = efx_ethtool_set_rxfh,
+ .create_rxfh_context = efx_ethtool_create_rxfh_context,
+ .modify_rxfh_context = efx_ethtool_modify_rxfh_context,
+ .remove_rxfh_context = efx_ethtool_remove_rxfh_context,
.get_ts_info = efx_ethtool_get_ts_info,
.get_module_info = efx_ethtool_get_module_info,
.get_module_eeprom = efx_ethtool_get_module_eeprom,
diff --git a/drivers/net/ethernet/sfc/ethtool_common.c b/drivers/net/ethernet/sfc/ethtool_common.c
index 7d5e5db4eac5..6ded44b86052 100644
--- a/drivers/net/ethernet/sfc/ethtool_common.c
+++ b/drivers/net/ethernet/sfc/ethtool_common.c
@@ -820,10 +820,10 @@ int efx_ethtool_get_rxnfc(struct net_device *net_dev,
return 0;
case ETHTOOL_GRXFH: {
- struct efx_rss_context *ctx = &efx->rss_context;
+ struct efx_rss_context_priv *ctx = &efx->rss_context.priv;
__u64 data;
- mutex_lock(&efx->rss_lock);
+ mutex_lock(&net_dev->ethtool->rss_lock);
if (info->flow_type & FLOW_RSS && info->rss_context) {
ctx = efx_find_rss_context_entry(efx, info->rss_context);
if (!ctx) {
@@ -864,7 +864,7 @@ int efx_ethtool_get_rxnfc(struct net_device *net_dev,
out_setdata_unlock:
info->data = data;
out_unlock:
- mutex_unlock(&efx->rss_lock);
+ mutex_unlock(&net_dev->ethtool->rss_lock);
return rc;
}
@@ -1163,46 +1163,14 @@ u32 efx_ethtool_get_rxfh_key_size(struct net_device *net_dev)
return efx->type->rx_hash_key_size;
}
-static int efx_ethtool_get_rxfh_context(struct net_device *net_dev,
- struct ethtool_rxfh_param *rxfh)
-{
- struct efx_nic *efx = efx_netdev_priv(net_dev);
- struct efx_rss_context *ctx;
- int rc = 0;
-
- if (!efx->type->rx_pull_rss_context_config)
- return -EOPNOTSUPP;
-
- mutex_lock(&efx->rss_lock);
- ctx = efx_find_rss_context_entry(efx, rxfh->rss_context);
- if (!ctx) {
- rc = -ENOENT;
- goto out_unlock;
- }
- rc = efx->type->rx_pull_rss_context_config(efx, ctx);
- if (rc)
- goto out_unlock;
-
- rxfh->hfunc = ETH_RSS_HASH_TOP;
- if (rxfh->indir)
- memcpy(rxfh->indir, ctx->rx_indir_table,
- sizeof(ctx->rx_indir_table));
- if (rxfh->key)
- memcpy(rxfh->key, ctx->rx_hash_key,
- efx->type->rx_hash_key_size);
-out_unlock:
- mutex_unlock(&efx->rss_lock);
- return rc;
-}
-
int efx_ethtool_get_rxfh(struct net_device *net_dev,
struct ethtool_rxfh_param *rxfh)
{
struct efx_nic *efx = efx_netdev_priv(net_dev);
int rc;
- if (rxfh->rss_context)
- return efx_ethtool_get_rxfh_context(net_dev, rxfh);
+ if (rxfh->rss_context) /* core should never call us for these */
+ return -EINVAL;
rc = efx->type->rx_pull_rss_config(efx);
if (rc)
@@ -1218,68 +1186,85 @@ int efx_ethtool_get_rxfh(struct net_device *net_dev,
return 0;
}
-static int efx_ethtool_set_rxfh_context(struct net_device *net_dev,
- struct ethtool_rxfh_param *rxfh,
- struct netlink_ext_ack *extack)
+int efx_ethtool_modify_rxfh_context(struct net_device *net_dev,
+ struct ethtool_rxfh_context *ctx,
+ const struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct efx_nic *efx = efx_netdev_priv(net_dev);
- u32 *rss_context = &rxfh->rss_context;
- struct efx_rss_context *ctx;
- u32 *indir = rxfh->indir;
- bool allocated = false;
- u8 *key = rxfh->key;
- int rc;
+ struct efx_rss_context_priv *priv;
+ const u32 *indir = rxfh->indir;
+ const u8 *key = rxfh->key;
- if (!efx->type->rx_push_rss_context_config)
+ if (!efx->type->rx_push_rss_context_config) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "NIC type does not support custom contexts");
return -EOPNOTSUPP;
-
- mutex_lock(&efx->rss_lock);
-
- if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) {
- if (rxfh->rss_delete) {
- /* alloc + delete == Nothing to do */
- rc = -EINVAL;
- goto out_unlock;
- }
- ctx = efx_alloc_rss_context_entry(efx);
- if (!ctx) {
- rc = -ENOMEM;
- goto out_unlock;
- }
- ctx->context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
- /* Initialise indir table and key to defaults */
- efx_set_default_rx_indir_table(efx, ctx);
- netdev_rss_key_fill(ctx->rx_hash_key, sizeof(ctx->rx_hash_key));
- allocated = true;
- } else {
- ctx = efx_find_rss_context_entry(efx, *rss_context);
- if (!ctx) {
- rc = -ENOENT;
- goto out_unlock;
- }
}
-
- if (rxfh->rss_delete) {
- /* delete this context */
- rc = efx->type->rx_push_rss_context_config(efx, ctx, NULL, NULL);
- if (!rc)
- efx_free_rss_context_entry(ctx);
- goto out_unlock;
+ /* Hash function is Toeplitz, cannot be changed */
+ if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP) {
+ NL_SET_ERR_MSG_MOD(extack, "Only Toeplitz hash is supported");
+ return -EOPNOTSUPP;
}
+ priv = ethtool_rxfh_context_priv(ctx);
+
if (!key)
- key = ctx->rx_hash_key;
+ key = ethtool_rxfh_context_key(ctx);
if (!indir)
- indir = ctx->rx_indir_table;
+ indir = ethtool_rxfh_context_indir(ctx);
- rc = efx->type->rx_push_rss_context_config(efx, ctx, indir, key);
- if (rc && allocated)
- efx_free_rss_context_entry(ctx);
- else
- *rss_context = ctx->user_id;
-out_unlock:
- mutex_unlock(&efx->rss_lock);
- return rc;
+ return efx->type->rx_push_rss_context_config(efx, priv, indir, key,
+ false);
+}
+
+int efx_ethtool_create_rxfh_context(struct net_device *net_dev,
+ struct ethtool_rxfh_context *ctx,
+ const struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
+{
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
+ struct efx_rss_context_priv *priv;
+
+ priv = ethtool_rxfh_context_priv(ctx);
+
+ priv->context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
+ priv->rx_hash_udp_4tuple = false;
+ /* Generate default indir table and/or key if not specified.
+ * We use ctx as a place to store these; this is fine because
+ * we're doing a create, so if we fail then the ctx will just
+ * be deleted.
+ */
+ if (!rxfh->indir)
+ efx_set_default_rx_indir_table(efx, ethtool_rxfh_context_indir(ctx));
+ if (!rxfh->key)
+ netdev_rss_key_fill(ethtool_rxfh_context_key(ctx),
+ ctx->key_size);
+ if (rxfh->hfunc == ETH_RSS_HASH_NO_CHANGE)
+ ctx->hfunc = ETH_RSS_HASH_TOP;
+ if (rxfh->input_xfrm == RXH_XFRM_NO_CHANGE)
+ ctx->input_xfrm = 0;
+ return efx_ethtool_modify_rxfh_context(net_dev, ctx, rxfh, extack);
+}
+
+int efx_ethtool_remove_rxfh_context(struct net_device *net_dev,
+ struct ethtool_rxfh_context *ctx,
+ u32 rss_context,
+ struct netlink_ext_ack *extack)
+{
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
+ struct efx_rss_context_priv *priv;
+
+ if (!efx->type->rx_push_rss_context_config) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "NIC type does not support custom contexts");
+ return -EOPNOTSUPP;
+ }
+
+ priv = ethtool_rxfh_context_priv(ctx);
+ return efx->type->rx_push_rss_context_config(efx, priv, NULL, NULL,
+ true);
}
int efx_ethtool_set_rxfh(struct net_device *net_dev,
@@ -1295,8 +1280,9 @@ int efx_ethtool_set_rxfh(struct net_device *net_dev,
rxfh->hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
- if (rxfh->rss_context)
- return efx_ethtool_set_rxfh_context(net_dev, rxfh, extack);
+ /* Custom contexts should use new API */
+ if (WARN_ON_ONCE(rxfh->rss_context))
+ return -EIO;
if (!indir && !key)
return 0;
diff --git a/drivers/net/ethernet/sfc/ethtool_common.h b/drivers/net/ethernet/sfc/ethtool_common.h
index a680e5980213..fc52e891637d 100644
--- a/drivers/net/ethernet/sfc/ethtool_common.h
+++ b/drivers/net/ethernet/sfc/ethtool_common.h
@@ -49,6 +49,18 @@ int efx_ethtool_get_rxfh(struct net_device *net_dev,
int efx_ethtool_set_rxfh(struct net_device *net_dev,
struct ethtool_rxfh_param *rxfh,
struct netlink_ext_ack *extack);
+int efx_ethtool_create_rxfh_context(struct net_device *net_dev,
+ struct ethtool_rxfh_context *ctx,
+ const struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack);
+int efx_ethtool_modify_rxfh_context(struct net_device *net_dev,
+ struct ethtool_rxfh_context *ctx,
+ const struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack);
+int efx_ethtool_remove_rxfh_context(struct net_device *net_dev,
+ struct ethtool_rxfh_context *ctx,
+ u32 rss_context,
+ struct netlink_ext_ack *extack);
int efx_ethtool_reset(struct net_device *net_dev, u32 *flags);
int efx_ethtool_get_module_eeprom(struct net_device *net_dev,
struct ethtool_eeprom *ee,
diff --git a/drivers/net/ethernet/sfc/falcon/falcon.c b/drivers/net/ethernet/sfc/falcon/falcon.c
index 7a1c9337081b..36114ce88034 100644
--- a/drivers/net/ethernet/sfc/falcon/falcon.c
+++ b/drivers/net/ethernet/sfc/falcon/falcon.c
@@ -367,7 +367,7 @@ static const struct i2c_algo_bit_data falcon_i2c_bit_operations = {
.getsda = falcon_getsda,
.getscl = falcon_getscl,
.udelay = 5,
- /* Wait up to 50 ms for slave to let us pull SCL high */
+ /* Wait up to 50 ms for target to let us pull SCL high */
.timeout = DIV_ROUND_UP(HZ, 20),
};
diff --git a/drivers/net/ethernet/sfc/falcon/nic.h b/drivers/net/ethernet/sfc/falcon/nic.h
index 9f413474bd9f..ada6e036fd97 100644
--- a/drivers/net/ethernet/sfc/falcon/nic.h
+++ b/drivers/net/ethernet/sfc/falcon/nic.h
@@ -297,7 +297,7 @@ static inline struct falcon_board *falcon_board(struct ef4_nic *efx)
return &data->board;
}
-struct ethtool_ts_info;
+struct kernel_ethtool_ts_info;
extern const struct ef4_nic_type falcon_a1_nic_type;
extern const struct ef4_nic_type falcon_b0_nic_type;
diff --git a/drivers/net/ethernet/sfc/mcdi_filters.c b/drivers/net/ethernet/sfc/mcdi_filters.c
index 4ff6586116ee..6ef96292909a 100644
--- a/drivers/net/ethernet/sfc/mcdi_filters.c
+++ b/drivers/net/ethernet/sfc/mcdi_filters.c
@@ -194,7 +194,7 @@ efx_mcdi_filter_push_prep_set_match_fields(struct efx_nic *efx,
static void efx_mcdi_filter_push_prep(struct efx_nic *efx,
const struct efx_filter_spec *spec,
efx_dword_t *inbuf, u64 handle,
- struct efx_rss_context *ctx,
+ struct efx_rss_context_priv *ctx,
bool replacing)
{
u32 flags = spec->flags;
@@ -245,7 +245,7 @@ static void efx_mcdi_filter_push_prep(struct efx_nic *efx,
static int efx_mcdi_filter_push(struct efx_nic *efx,
const struct efx_filter_spec *spec, u64 *handle,
- struct efx_rss_context *ctx, bool replacing)
+ struct efx_rss_context_priv *ctx, bool replacing)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN);
MCDI_DECLARE_BUF(outbuf, MC_CMD_FILTER_OP_EXT_OUT_LEN);
@@ -345,9 +345,9 @@ static s32 efx_mcdi_filter_insert_locked(struct efx_nic *efx,
bool replace_equal)
{
DECLARE_BITMAP(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT);
+ struct efx_rss_context_priv *ctx = NULL;
struct efx_mcdi_filter_table *table;
struct efx_filter_spec *saved_spec;
- struct efx_rss_context *ctx = NULL;
unsigned int match_pri, hash;
unsigned int priv_flags;
bool rss_locked = false;
@@ -380,12 +380,12 @@ static s32 efx_mcdi_filter_insert_locked(struct efx_nic *efx,
bitmap_zero(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT);
if (spec->flags & EFX_FILTER_FLAG_RX_RSS) {
- mutex_lock(&efx->rss_lock);
+ mutex_lock(&efx->net_dev->ethtool->rss_lock);
rss_locked = true;
if (spec->rss_context)
ctx = efx_find_rss_context_entry(efx, spec->rss_context);
else
- ctx = &efx->rss_context;
+ ctx = &efx->rss_context.priv;
if (!ctx) {
rc = -ENOENT;
goto out_unlock;
@@ -548,7 +548,7 @@ static s32 efx_mcdi_filter_insert_locked(struct efx_nic *efx,
out_unlock:
if (rss_locked)
- mutex_unlock(&efx->rss_lock);
+ mutex_unlock(&efx->net_dev->ethtool->rss_lock);
up_write(&table->lock);
return rc;
}
@@ -611,13 +611,13 @@ static int efx_mcdi_filter_remove_internal(struct efx_nic *efx,
new_spec.priority = EFX_FILTER_PRI_AUTO;
new_spec.flags = (EFX_FILTER_FLAG_RX |
- (efx_rss_active(&efx->rss_context) ?
+ (efx_rss_active(&efx->rss_context.priv) ?
EFX_FILTER_FLAG_RX_RSS : 0));
new_spec.dmaq_id = 0;
new_spec.rss_context = 0;
rc = efx_mcdi_filter_push(efx, &new_spec,
&table->entry[filter_idx].handle,
- &efx->rss_context,
+ &efx->rss_context.priv,
true);
if (rc == 0)
@@ -764,7 +764,7 @@ static int efx_mcdi_filter_insert_addr_list(struct efx_nic *efx,
ids = vlan->uc;
}
- filter_flags = efx_rss_active(&efx->rss_context) ? EFX_FILTER_FLAG_RX_RSS : 0;
+ filter_flags = efx_rss_active(&efx->rss_context.priv) ? EFX_FILTER_FLAG_RX_RSS : 0;
/* Insert/renew filters */
for (i = 0; i < addr_count; i++) {
@@ -833,7 +833,7 @@ static int efx_mcdi_filter_insert_def(struct efx_nic *efx,
int rc;
u16 *id;
- filter_flags = efx_rss_active(&efx->rss_context) ? EFX_FILTER_FLAG_RX_RSS : 0;
+ filter_flags = efx_rss_active(&efx->rss_context.priv) ? EFX_FILTER_FLAG_RX_RSS : 0;
efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
@@ -1375,8 +1375,8 @@ void efx_mcdi_filter_table_restore(struct efx_nic *efx)
struct efx_mcdi_filter_table *table = efx->filter_state;
unsigned int invalid_filters = 0, failed = 0;
struct efx_mcdi_filter_vlan *vlan;
+ struct efx_rss_context_priv *ctx;
struct efx_filter_spec *spec;
- struct efx_rss_context *ctx;
unsigned int filter_idx;
u32 mcdi_flags;
int match_pri;
@@ -1388,7 +1388,7 @@ void efx_mcdi_filter_table_restore(struct efx_nic *efx)
return;
down_write(&table->lock);
- mutex_lock(&efx->rss_lock);
+ mutex_lock(&efx->net_dev->ethtool->rss_lock);
for (filter_idx = 0; filter_idx < EFX_MCDI_FILTER_TBL_ROWS; filter_idx++) {
spec = efx_mcdi_filter_entry_spec(table, filter_idx);
@@ -1407,7 +1407,7 @@ void efx_mcdi_filter_table_restore(struct efx_nic *efx)
if (spec->rss_context)
ctx = efx_find_rss_context_entry(efx, spec->rss_context);
else
- ctx = &efx->rss_context;
+ ctx = &efx->rss_context.priv;
if (spec->flags & EFX_FILTER_FLAG_RX_RSS) {
if (!ctx) {
netif_warn(efx, drv, efx->net_dev,
@@ -1444,7 +1444,7 @@ not_restored:
}
}
- mutex_unlock(&efx->rss_lock);
+ mutex_unlock(&efx->net_dev->ethtool->rss_lock);
up_write(&table->lock);
/*
@@ -1861,7 +1861,8 @@ out_unlock:
RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_LBN |\
RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV6_RSS_MODE_LBN)
-int efx_mcdi_get_rss_context_flags(struct efx_nic *efx, u32 context, u32 *flags)
+static int efx_mcdi_get_rss_context_flags(struct efx_nic *efx, u32 context,
+ u32 *flags)
{
/*
* Firmware had a bug (sfc bug 61952) where it would not actually
@@ -1909,8 +1910,8 @@ int efx_mcdi_get_rss_context_flags(struct efx_nic *efx, u32 context, u32 *flags)
* Defaults are 4-tuple for TCP and 2-tuple for UDP and other-IP, so we
* just need to set the UDP ports flags (for both IP versions).
*/
-void efx_mcdi_set_rss_context_flags(struct efx_nic *efx,
- struct efx_rss_context *ctx)
+static void efx_mcdi_set_rss_context_flags(struct efx_nic *efx,
+ struct efx_rss_context_priv *ctx)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN);
u32 flags;
@@ -1931,7 +1932,7 @@ void efx_mcdi_set_rss_context_flags(struct efx_nic *efx,
}
static int efx_mcdi_filter_alloc_rss_context(struct efx_nic *efx, bool exclusive,
- struct efx_rss_context *ctx,
+ struct efx_rss_context_priv *ctx,
unsigned *context_size)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN);
@@ -2032,25 +2033,26 @@ void efx_mcdi_rx_free_indir_table(struct efx_nic *efx)
{
int rc;
- if (efx->rss_context.context_id != EFX_MCDI_RSS_CONTEXT_INVALID) {
- rc = efx_mcdi_filter_free_rss_context(efx, efx->rss_context.context_id);
+ if (efx->rss_context.priv.context_id != EFX_MCDI_RSS_CONTEXT_INVALID) {
+ rc = efx_mcdi_filter_free_rss_context(efx, efx->rss_context.priv.context_id);
WARN_ON(rc != 0);
}
- efx->rss_context.context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
+ efx->rss_context.priv.context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
}
static int efx_mcdi_filter_rx_push_shared_rss_config(struct efx_nic *efx,
unsigned *context_size)
{
struct efx_mcdi_filter_table *table = efx->filter_state;
- int rc = efx_mcdi_filter_alloc_rss_context(efx, false, &efx->rss_context,
- context_size);
+ int rc = efx_mcdi_filter_alloc_rss_context(efx, false,
+ &efx->rss_context.priv,
+ context_size);
if (rc != 0)
return rc;
table->rx_rss_context_exclusive = false;
- efx_set_default_rx_indir_table(efx, &efx->rss_context);
+ efx_set_default_rx_indir_table(efx, efx->rss_context.rx_indir_table);
return 0;
}
@@ -2058,26 +2060,27 @@ static int efx_mcdi_filter_rx_push_exclusive_rss_config(struct efx_nic *efx,
const u32 *rx_indir_table,
const u8 *key)
{
+ u32 old_rx_rss_context = efx->rss_context.priv.context_id;
struct efx_mcdi_filter_table *table = efx->filter_state;
- u32 old_rx_rss_context = efx->rss_context.context_id;
int rc;
- if (efx->rss_context.context_id == EFX_MCDI_RSS_CONTEXT_INVALID ||
+ if (efx->rss_context.priv.context_id == EFX_MCDI_RSS_CONTEXT_INVALID ||
!table->rx_rss_context_exclusive) {
- rc = efx_mcdi_filter_alloc_rss_context(efx, true, &efx->rss_context,
- NULL);
+ rc = efx_mcdi_filter_alloc_rss_context(efx, true,
+ &efx->rss_context.priv,
+ NULL);
if (rc == -EOPNOTSUPP)
return rc;
else if (rc != 0)
goto fail1;
}
- rc = efx_mcdi_filter_populate_rss_table(efx, efx->rss_context.context_id,
- rx_indir_table, key);
+ rc = efx_mcdi_filter_populate_rss_table(efx, efx->rss_context.priv.context_id,
+ rx_indir_table, key);
if (rc != 0)
goto fail2;
- if (efx->rss_context.context_id != old_rx_rss_context &&
+ if (efx->rss_context.priv.context_id != old_rx_rss_context &&
old_rx_rss_context != EFX_MCDI_RSS_CONTEXT_INVALID)
WARN_ON(efx_mcdi_filter_free_rss_context(efx, old_rx_rss_context) != 0);
table->rx_rss_context_exclusive = true;
@@ -2091,9 +2094,9 @@ static int efx_mcdi_filter_rx_push_exclusive_rss_config(struct efx_nic *efx,
return 0;
fail2:
- if (old_rx_rss_context != efx->rss_context.context_id) {
- WARN_ON(efx_mcdi_filter_free_rss_context(efx, efx->rss_context.context_id) != 0);
- efx->rss_context.context_id = old_rx_rss_context;
+ if (old_rx_rss_context != efx->rss_context.priv.context_id) {
+ WARN_ON(efx_mcdi_filter_free_rss_context(efx, efx->rss_context.priv.context_id) != 0);
+ efx->rss_context.priv.context_id = old_rx_rss_context;
}
fail1:
netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
@@ -2101,33 +2104,28 @@ fail1:
}
int efx_mcdi_rx_push_rss_context_config(struct efx_nic *efx,
- struct efx_rss_context *ctx,
+ struct efx_rss_context_priv *ctx,
const u32 *rx_indir_table,
- const u8 *key)
+ const u8 *key, bool delete)
{
int rc;
- WARN_ON(!mutex_is_locked(&efx->rss_lock));
+ WARN_ON(!mutex_is_locked(&efx->net_dev->ethtool->rss_lock));
if (ctx->context_id == EFX_MCDI_RSS_CONTEXT_INVALID) {
+ if (delete)
+ /* already wasn't in HW, nothing to do */
+ return 0;
rc = efx_mcdi_filter_alloc_rss_context(efx, true, ctx, NULL);
if (rc)
return rc;
}
- if (!rx_indir_table) /* Delete this context */
+ if (delete) /* Delete this context */
return efx_mcdi_filter_free_rss_context(efx, ctx->context_id);
- rc = efx_mcdi_filter_populate_rss_table(efx, ctx->context_id,
- rx_indir_table, key);
- if (rc)
- return rc;
-
- memcpy(ctx->rx_indir_table, rx_indir_table,
- sizeof(efx->rss_context.rx_indir_table));
- memcpy(ctx->rx_hash_key, key, efx->type->rx_hash_key_size);
-
- return 0;
+ return efx_mcdi_filter_populate_rss_table(efx, ctx->context_id,
+ rx_indir_table, key);
}
int efx_mcdi_rx_pull_rss_context_config(struct efx_nic *efx,
@@ -2139,16 +2137,16 @@ int efx_mcdi_rx_pull_rss_context_config(struct efx_nic *efx,
size_t outlen;
int rc, i;
- WARN_ON(!mutex_is_locked(&efx->rss_lock));
+ WARN_ON(!mutex_is_locked(&efx->net_dev->ethtool->rss_lock));
BUILD_BUG_ON(MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN !=
MC_CMD_RSS_CONTEXT_GET_KEY_IN_LEN);
- if (ctx->context_id == EFX_MCDI_RSS_CONTEXT_INVALID)
+ if (ctx->priv.context_id == EFX_MCDI_RSS_CONTEXT_INVALID)
return -ENOENT;
MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_TABLE_IN_RSS_CONTEXT_ID,
- ctx->context_id);
+ ctx->priv.context_id);
BUILD_BUG_ON(ARRAY_SIZE(ctx->rx_indir_table) !=
MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE_LEN);
rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_GET_TABLE, inbuf, sizeof(inbuf),
@@ -2164,7 +2162,7 @@ int efx_mcdi_rx_pull_rss_context_config(struct efx_nic *efx,
RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE)[i];
MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_KEY_IN_RSS_CONTEXT_ID,
- ctx->context_id);
+ ctx->priv.context_id);
BUILD_BUG_ON(ARRAY_SIZE(ctx->rx_hash_key) !=
MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN);
rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_GET_KEY, inbuf, sizeof(inbuf),
@@ -2186,35 +2184,42 @@ int efx_mcdi_rx_pull_rss_config(struct efx_nic *efx)
{
int rc;
- mutex_lock(&efx->rss_lock);
+ mutex_lock(&efx->net_dev->ethtool->rss_lock);
rc = efx_mcdi_rx_pull_rss_context_config(efx, &efx->rss_context);
- mutex_unlock(&efx->rss_lock);
+ mutex_unlock(&efx->net_dev->ethtool->rss_lock);
return rc;
}
void efx_mcdi_rx_restore_rss_contexts(struct efx_nic *efx)
{
struct efx_mcdi_filter_table *table = efx->filter_state;
- struct efx_rss_context *ctx;
+ struct ethtool_rxfh_context *ctx;
+ unsigned long context;
int rc;
- WARN_ON(!mutex_is_locked(&efx->rss_lock));
+ WARN_ON(!mutex_is_locked(&efx->net_dev->ethtool->rss_lock));
if (!table->must_restore_rss_contexts)
return;
- list_for_each_entry(ctx, &efx->rss_context.list, list) {
+ xa_for_each(&efx->net_dev->ethtool->rss_ctx, context, ctx) {
+ struct efx_rss_context_priv *priv;
+ u32 *indir;
+ u8 *key;
+
+ priv = ethtool_rxfh_context_priv(ctx);
/* previous NIC RSS context is gone */
- ctx->context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
+ priv->context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
/* so try to allocate a new one */
- rc = efx_mcdi_rx_push_rss_context_config(efx, ctx,
- ctx->rx_indir_table,
- ctx->rx_hash_key);
+ indir = ethtool_rxfh_context_indir(ctx);
+ key = ethtool_rxfh_context_key(ctx);
+ rc = efx_mcdi_rx_push_rss_context_config(efx, priv, indir, key,
+ false);
if (rc)
netif_warn(efx, probe, efx->net_dev,
- "failed to restore RSS context %u, rc=%d"
+ "failed to restore RSS context %lu, rc=%d"
"; RSS filters may fail to be applied\n",
- ctx->user_id, rc);
+ context, rc);
}
table->must_restore_rss_contexts = false;
}
@@ -2276,7 +2281,7 @@ int efx_mcdi_vf_rx_push_rss_config(struct efx_nic *efx, bool user,
{
if (user)
return -EOPNOTSUPP;
- if (efx->rss_context.context_id != EFX_MCDI_RSS_CONTEXT_INVALID)
+ if (efx->rss_context.priv.context_id != EFX_MCDI_RSS_CONTEXT_INVALID)
return 0;
return efx_mcdi_filter_rx_push_shared_rss_config(efx, NULL);
}
@@ -2295,7 +2300,7 @@ int efx_mcdi_push_default_indir_table(struct efx_nic *efx,
efx_mcdi_rx_free_indir_table(efx);
if (rss_spread > 1) {
- efx_set_default_rx_indir_table(efx, &efx->rss_context);
+ efx_set_default_rx_indir_table(efx, efx->rss_context.rx_indir_table);
rc = efx->type->rx_push_rss_config(efx, false,
efx->rss_context.rx_indir_table, NULL);
}
diff --git a/drivers/net/ethernet/sfc/mcdi_filters.h b/drivers/net/ethernet/sfc/mcdi_filters.h
index c0d6558b9fd2..11b9f87ed9e1 100644
--- a/drivers/net/ethernet/sfc/mcdi_filters.h
+++ b/drivers/net/ethernet/sfc/mcdi_filters.h
@@ -145,9 +145,9 @@ void efx_mcdi_filter_del_vlan(struct efx_nic *efx, u16 vid);
void efx_mcdi_rx_free_indir_table(struct efx_nic *efx);
int efx_mcdi_rx_push_rss_context_config(struct efx_nic *efx,
- struct efx_rss_context *ctx,
+ struct efx_rss_context_priv *ctx,
const u32 *rx_indir_table,
- const u8 *key);
+ const u8 *key, bool delete);
int efx_mcdi_pf_rx_push_rss_config(struct efx_nic *efx, bool user,
const u32 *rx_indir_table,
const u8 *key);
@@ -161,10 +161,6 @@ int efx_mcdi_push_default_indir_table(struct efx_nic *efx,
int efx_mcdi_rx_pull_rss_config(struct efx_nic *efx);
int efx_mcdi_rx_pull_rss_context_config(struct efx_nic *efx,
struct efx_rss_context *ctx);
-int efx_mcdi_get_rss_context_flags(struct efx_nic *efx, u32 context,
- u32 *flags);
-void efx_mcdi_set_rss_context_flags(struct efx_nic *efx,
- struct efx_rss_context *ctx);
void efx_mcdi_rx_restore_rss_contexts(struct efx_nic *efx);
static inline void efx_mcdi_update_rx_scatter(struct efx_nic *efx)
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index f2dd7feb0e0c..b85c51cbe7f9 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -737,21 +737,24 @@ struct vfdi_status;
/* The reserved RSS context value */
#define EFX_MCDI_RSS_CONTEXT_INVALID 0xffffffff
/**
- * struct efx_rss_context - A user-defined RSS context for filtering
- * @list: node of linked list on which this struct is stored
+ * struct efx_rss_context_priv - driver private data for an RSS context
* @context_id: the RSS_CONTEXT_ID returned by MC firmware, or
* %EFX_MCDI_RSS_CONTEXT_INVALID if this context is not present on the NIC.
- * For Siena, 0 if RSS is active, else %EFX_MCDI_RSS_CONTEXT_INVALID.
- * @user_id: the rss_context ID exposed to userspace over ethtool.
* @rx_hash_udp_4tuple: UDP 4-tuple hashing enabled
+ */
+struct efx_rss_context_priv {
+ u32 context_id;
+ bool rx_hash_udp_4tuple;
+};
+
+/**
+ * struct efx_rss_context - an RSS context
+ * @priv: hardware-specific state
* @rx_hash_key: Toeplitz hash key for this RSS context
* @indir_table: Indirection table for this RSS context
*/
struct efx_rss_context {
- struct list_head list;
- u32 context_id;
- u32 user_id;
- bool rx_hash_udp_4tuple;
+ struct efx_rss_context_priv priv;
u8 rx_hash_key[40];
u32 rx_indir_table[128];
};
@@ -883,9 +886,7 @@ struct efx_mae;
* @rx_packet_ts_offset: Offset of timestamp from start of packet data
* (valid only if channel->sync_timestamps_enabled; always negative)
* @rx_scatter: Scatter mode enabled for receives
- * @rss_context: Main RSS context. Its @list member is the head of the list of
- * RSS contexts created by user requests
- * @rss_lock: Protects custom RSS context software state in @rss_context.list
+ * @rss_context: Main RSS context.
* @vport_id: The function's vport ID, only relevant for PFs
* @int_error_count: Number of internal errors seen recently
* @int_error_expire: Time at which error count will be expired
@@ -1052,7 +1053,6 @@ struct efx_nic {
int rx_packet_ts_offset;
bool rx_scatter;
struct efx_rss_context rss_context;
- struct mutex rss_lock;
u32 vport_id;
unsigned int_error_count;
@@ -1416,9 +1416,9 @@ struct efx_nic_type {
const u32 *rx_indir_table, const u8 *key);
int (*rx_pull_rss_config)(struct efx_nic *efx);
int (*rx_push_rss_context_config)(struct efx_nic *efx,
- struct efx_rss_context *ctx,
+ struct efx_rss_context_priv *ctx,
const u32 *rx_indir_table,
- const u8 *key);
+ const u8 *key, bool delete);
int (*rx_pull_rss_context_config)(struct efx_nic *efx,
struct efx_rss_context *ctx);
void (*rx_restore_rss_contexts)(struct efx_nic *efx);
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index c3bffbf0ba2b..6fd2fdbaa418 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -1864,7 +1864,7 @@ static int efx_ptp_ts_init(struct efx_nic *efx, struct kernel_hwtstamp_config *i
return 0;
}
-void efx_ptp_get_ts_info(struct efx_nic *efx, struct ethtool_ts_info *ts_info)
+void efx_ptp_get_ts_info(struct efx_nic *efx, struct kernel_ethtool_ts_info *ts_info)
{
struct efx_ptp_data *ptp = efx->ptp_data;
struct efx_nic *primary = efx->primary;
diff --git a/drivers/net/ethernet/sfc/ptp.h b/drivers/net/ethernet/sfc/ptp.h
index 2f30dbb490d2..6946203499ef 100644
--- a/drivers/net/ethernet/sfc/ptp.h
+++ b/drivers/net/ethernet/sfc/ptp.h
@@ -12,7 +12,7 @@
#include <linux/net_tstamp.h>
#include "net_driver.h"
-struct ethtool_ts_info;
+struct kernel_ethtool_ts_info;
int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel);
void efx_ptp_defer_probe_with_channel(struct efx_nic *efx);
struct efx_channel *efx_ptp_channel(struct efx_nic *efx);
@@ -23,7 +23,8 @@ int efx_ptp_set_ts_config(struct efx_nic *efx,
struct netlink_ext_ack *extack);
int efx_ptp_get_ts_config(struct efx_nic *efx,
struct kernel_hwtstamp_config *config);
-void efx_ptp_get_ts_info(struct efx_nic *efx, struct ethtool_ts_info *ts_info);
+void efx_ptp_get_ts_info(struct efx_nic *efx,
+ struct kernel_ethtool_ts_info *ts_info);
bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
int efx_ptp_get_mode(struct efx_nic *efx);
int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted,
diff --git a/drivers/net/ethernet/sfc/rx_common.c b/drivers/net/ethernet/sfc/rx_common.c
index dcd901eccfc8..0b7dc75c40f9 100644
--- a/drivers/net/ethernet/sfc/rx_common.c
+++ b/drivers/net/ethernet/sfc/rx_common.c
@@ -557,69 +557,25 @@ efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
napi_gro_frags(napi);
}
-/* RSS contexts. We're using linked lists and crappy O(n) algorithms, because
- * (a) this is an infrequent control-plane operation and (b) n is small (max 64)
- */
-struct efx_rss_context *efx_alloc_rss_context_entry(struct efx_nic *efx)
+struct efx_rss_context_priv *efx_find_rss_context_entry(struct efx_nic *efx,
+ u32 id)
{
- struct list_head *head = &efx->rss_context.list;
- struct efx_rss_context *ctx, *new;
- u32 id = 1; /* Don't use zero, that refers to the master RSS context */
-
- WARN_ON(!mutex_is_locked(&efx->rss_lock));
+ struct ethtool_rxfh_context *ctx;
- /* Search for first gap in the numbering */
- list_for_each_entry(ctx, head, list) {
- if (ctx->user_id != id)
- break;
- id++;
- /* Check for wrap. If this happens, we have nearly 2^32
- * allocated RSS contexts, which seems unlikely.
- */
- if (WARN_ON_ONCE(!id))
- return NULL;
- }
+ WARN_ON(!mutex_is_locked(&efx->net_dev->ethtool->rss_lock));
- /* Create the new entry */
- new = kmalloc(sizeof(*new), GFP_KERNEL);
- if (!new)
+ ctx = xa_load(&efx->net_dev->ethtool->rss_ctx, id);
+ if (!ctx)
return NULL;
- new->context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
- new->rx_hash_udp_4tuple = false;
-
- /* Insert the new entry into the gap */
- new->user_id = id;
- list_add_tail(&new->list, &ctx->list);
- return new;
-}
-
-struct efx_rss_context *efx_find_rss_context_entry(struct efx_nic *efx, u32 id)
-{
- struct list_head *head = &efx->rss_context.list;
- struct efx_rss_context *ctx;
-
- WARN_ON(!mutex_is_locked(&efx->rss_lock));
-
- list_for_each_entry(ctx, head, list)
- if (ctx->user_id == id)
- return ctx;
- return NULL;
-}
-
-void efx_free_rss_context_entry(struct efx_rss_context *ctx)
-{
- list_del(&ctx->list);
- kfree(ctx);
+ return ethtool_rxfh_context_priv(ctx);
}
-void efx_set_default_rx_indir_table(struct efx_nic *efx,
- struct efx_rss_context *ctx)
+void efx_set_default_rx_indir_table(struct efx_nic *efx, u32 *indir)
{
size_t i;
- for (i = 0; i < ARRAY_SIZE(ctx->rx_indir_table); i++)
- ctx->rx_indir_table[i] =
- ethtool_rxfh_indir_default(i, efx->rss_spread);
+ for (i = 0; i < ARRAY_SIZE(efx->rss_context.rx_indir_table); i++)
+ indir[i] = ethtool_rxfh_indir_default(i, efx->rss_spread);
}
/**
diff --git a/drivers/net/ethernet/sfc/rx_common.h b/drivers/net/ethernet/sfc/rx_common.h
index fbd2769307f9..75fa84192362 100644
--- a/drivers/net/ethernet/sfc/rx_common.h
+++ b/drivers/net/ethernet/sfc/rx_common.h
@@ -84,11 +84,9 @@ void
efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
unsigned int n_frags, u8 *eh, __wsum csum);
-struct efx_rss_context *efx_alloc_rss_context_entry(struct efx_nic *efx);
-struct efx_rss_context *efx_find_rss_context_entry(struct efx_nic *efx, u32 id);
-void efx_free_rss_context_entry(struct efx_rss_context *ctx);
-void efx_set_default_rx_indir_table(struct efx_nic *efx,
- struct efx_rss_context *ctx);
+struct efx_rss_context_priv *efx_find_rss_context_entry(struct efx_nic *efx,
+ u32 id);
+void efx_set_default_rx_indir_table(struct efx_nic *efx, u32 *indir);
bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec);
bool efx_filter_spec_equal(const struct efx_filter_spec *left,
diff --git a/drivers/net/ethernet/sfc/siena/ethtool.c b/drivers/net/ethernet/sfc/siena/ethtool.c
index 14dd3893bdef..4c182d4edfc2 100644
--- a/drivers/net/ethernet/sfc/siena/ethtool.c
+++ b/drivers/net/ethernet/sfc/siena/ethtool.c
@@ -226,7 +226,7 @@ static void efx_ethtool_get_fec_stats(struct net_device *net_dev,
}
static int efx_ethtool_get_ts_info(struct net_device *net_dev,
- struct ethtool_ts_info *ts_info)
+ struct kernel_ethtool_ts_info *ts_info)
{
struct efx_nic *efx = netdev_priv(net_dev);
diff --git a/drivers/net/ethernet/sfc/siena/ptp.c b/drivers/net/ethernet/sfc/siena/ptp.c
index 4b5e2f0ba350..c473a4b6dd44 100644
--- a/drivers/net/ethernet/sfc/siena/ptp.c
+++ b/drivers/net/ethernet/sfc/siena/ptp.c
@@ -1780,7 +1780,7 @@ static int efx_ptp_ts_init(struct efx_nic *efx,
}
void efx_siena_ptp_get_ts_info(struct efx_nic *efx,
- struct ethtool_ts_info *ts_info)
+ struct kernel_ethtool_ts_info *ts_info)
{
struct efx_ptp_data *ptp = efx->ptp_data;
struct efx_nic *primary = efx->primary;
diff --git a/drivers/net/ethernet/sfc/siena/ptp.h b/drivers/net/ethernet/sfc/siena/ptp.h
index 6352f84424f6..b6133e7c5608 100644
--- a/drivers/net/ethernet/sfc/siena/ptp.h
+++ b/drivers/net/ethernet/sfc/siena/ptp.h
@@ -12,7 +12,7 @@
#include <linux/net_tstamp.h>
#include "net_driver.h"
-struct ethtool_ts_info;
+struct kernel_ethtool_ts_info;
void efx_siena_ptp_defer_probe_with_channel(struct efx_nic *efx);
struct efx_channel *efx_siena_ptp_channel(struct efx_nic *efx);
int efx_siena_ptp_set_ts_config(struct efx_nic *efx,
@@ -21,7 +21,7 @@ int efx_siena_ptp_set_ts_config(struct efx_nic *efx,
int efx_siena_ptp_get_ts_config(struct efx_nic *efx,
struct kernel_hwtstamp_config *config);
void efx_siena_ptp_get_ts_info(struct efx_nic *efx,
- struct ethtool_ts_info *ts_info);
+ struct kernel_ethtool_ts_info *ts_info);
bool efx_siena_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
int efx_siena_ptp_get_mode(struct efx_nic *efx);
int efx_siena_ptp_change_mode(struct efx_nic *efx, bool enable_wanted,
diff --git a/drivers/net/ethernet/sfc/tc.c b/drivers/net/ethernet/sfc/tc.c
index 9d140203e273..0d93164988fc 100644
--- a/drivers/net/ethernet/sfc/tc.c
+++ b/drivers/net/ethernet/sfc/tc.c
@@ -387,11 +387,8 @@ static int efx_tc_flower_parse_match(struct efx_nic *efx,
struct flow_match_control fm;
flow_rule_match_enc_control(rule, &fm);
- if (fm.mask->flags) {
- NL_SET_ERR_MSG_FMT_MOD(extack, "Unsupported match on enc_control.flags %#x",
- fm.mask->flags);
+ if (flow_rule_has_enc_control_flags(fm.mask->flags, extack))
return -EOPNOTSUPP;
- }
if (!IS_ALL_ONES(fm.mask->addr_type)) {
NL_SET_ERR_MSG_FMT_MOD(extack, "Unsupported enc addr_type mask %u (key %u)",
fm.mask->addr_type,
diff --git a/drivers/net/ethernet/smsc/smc9194.c b/drivers/net/ethernet/smsc/smc9194.c
index af661c65ffe2..e2e7b1c68563 100644
--- a/drivers/net/ethernet/smsc/smc9194.c
+++ b/drivers/net/ethernet/smsc/smc9194.c
@@ -1501,6 +1501,7 @@ static void smc_set_multicast_list(struct net_device *dev)
#ifdef MODULE
static struct net_device *devSMC9194;
+MODULE_DESCRIPTION("SMC 9194 Ethernet driver");
MODULE_LICENSE("GPL");
module_param_hw(io, int, ioport, 0);
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 78ff3af7911a..907498848028 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -1574,12 +1574,8 @@ smc_ethtool_set_link_ksettings(struct net_device *dev,
(cmd->base.port != PORT_TP && cmd->base.port != PORT_AUI))
return -EINVAL;
-// lp->port = cmd->base.port;
lp->ctl_rfduplx = cmd->base.duplex == DUPLEX_FULL;
-// if (netif_running(dev))
-// smc_set_port(dev);
-
ret = 0;
}
diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h
index 45ef5ac0788a..38aa4374e813 100644
--- a/drivers/net/ethernet/smsc/smc91x.h
+++ b/drivers/net/ethernet/smsc/smc91x.h
@@ -142,14 +142,14 @@ static inline void _SMC_outw_align4(u16 val, void __iomem *ioaddr, int reg,
#define SMC_CAN_USE_32BIT 0
#define SMC_NOWAIT 1
-static inline void mcf_insw(void *a, unsigned char *p, int l)
+static inline void mcf_insw(void __iomem *a, unsigned char *p, int l)
{
u16 *wp = (u16 *) p;
while (l-- > 0)
*wp++ = readw(a);
}
-static inline void mcf_outsw(void *a, unsigned char *p, int l)
+static inline void mcf_outsw(void __iomem *a, unsigned char *p, int l)
{
u16 *wp = (u16 *) p;
while (l-- > 0)
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 9cd62b2110a1..cd36ff4da68c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -271,8 +271,6 @@ struct stmmac_safety_stats {
/* PCS defines */
#define STMMAC_PCS_RGMII (1 << 0)
#define STMMAC_PCS_SGMII (1 << 1)
-#define STMMAC_PCS_TBI (1 << 2)
-#define STMMAC_PCS_RTBI (1 << 3)
#define SF_DMA_MODE 1 /* DMA STORE-AND-FORWARD Operation Mode */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
index 60283543ffc8..83ad7c7935e3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
@@ -248,7 +248,7 @@ static void intel_speed_mode_2500(struct net_device *ndev, void *intel_data)
dev_info(priv->device, "Link Speed Mode: 2.5Gbps\n");
priv->plat->max_speed = 2500;
priv->plat->phy_interface = PHY_INTERFACE_MODE_2500BASEX;
- priv->plat->mdio_bus_data->xpcs_an_inband = false;
+ priv->plat->mdio_bus_data->default_an_inband = false;
} else {
priv->plat->max_speed = 1000;
}
@@ -390,10 +390,11 @@ static int intel_crosststamp(ktime_t *device,
*device = ns_to_ktime(ptp_time);
read_unlock_irqrestore(&priv->ptp_lock, flags);
get_arttime(priv->mii, intel_priv->mdio_adhoc_addr, &art_time);
- *system = convert_art_to_tsc(art_time);
+ system->cycles = art_time;
}
system->cycles *= intel_priv->crossts_adj;
+ system->cs_id = CSID_X86_ART;
priv->plat->flags &= ~STMMAC_FLAG_INT_SNAPSHOT_EN;
return 0;
@@ -443,6 +444,16 @@ static void common_default_data(struct plat_stmmacenet_data *plat)
plat->rx_queues_cfg[0].pkt_route = 0x0;
}
+static struct phylink_pcs *intel_mgbe_select_pcs(struct stmmac_priv *priv,
+ phy_interface_t interface)
+{
+ /* plat->mdio_bus_data->has_xpcs has been set true, so there
+ * should always be an XPCS. The original code would always
+ * return this if present.
+ */
+ return &priv->hw->xpcs->pcs;
+}
+
static int intel_mgbe_common_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
@@ -585,19 +596,9 @@ static int intel_mgbe_common_data(struct pci_dev *pdev,
/* Intel mgbe SGMII interface uses pcs-xcps */
if (plat->phy_interface == PHY_INTERFACE_MODE_SGMII ||
plat->phy_interface == PHY_INTERFACE_MODE_1000BASEX) {
- plat->mdio_bus_data->has_xpcs = true;
- plat->mdio_bus_data->xpcs_an_inband = true;
- }
-
- /* For fixed-link setup, we clear xpcs_an_inband */
- if (fwnode) {
- struct fwnode_handle *fixed_node;
-
- fixed_node = fwnode_get_named_child_node(fwnode, "fixed-link");
- if (fixed_node)
- plat->mdio_bus_data->xpcs_an_inband = false;
-
- fwnode_handle_put(fixed_node);
+ plat->mdio_bus_data->pcs_mask = BIT(INTEL_MGBE_XPCS_ADDR);
+ plat->mdio_bus_data->default_an_inband = true;
+ plat->select_pcs = intel_mgbe_select_pcs;
}
/* Ensure mdio bus scan skips intel serdes and pcs-xpcs */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
index e254b21fdb59..901a3c1959fa 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
@@ -21,6 +21,7 @@
#define RGMII_IO_MACRO_CONFIG2 0x1C
#define RGMII_IO_MACRO_DEBUG1 0x20
#define EMAC_SYSTEM_LOW_POWER_DEBUG 0x28
+#define EMAC_WRAPPER_SGMII_PHY_CNTRL1 0xf4
/* RGMII_IO_MACRO_CONFIG fields */
#define RGMII_CONFIG_FUNC_CLK_EN BIT(30)
@@ -79,6 +80,9 @@
#define ETHQOS_MAC_CTRL_SPEED_MODE BIT(14)
#define ETHQOS_MAC_CTRL_PORT_SEL BIT(15)
+/* EMAC_WRAPPER_SGMII_PHY_CNTRL1 bits */
+#define SGMII_PHY_CNTRL1_SGMII_TX_TO_RX_LOOPBACK_EN BIT(3)
+
#define SGMII_10M_RX_CLK_DVDR 0x31
struct ethqos_emac_por {
@@ -93,7 +97,9 @@ struct ethqos_emac_driver_data {
bool has_emac_ge_3;
const char *link_clk_name;
bool has_integrated_pcs;
+ u32 dma_addr_width;
struct dwmac4_addrs dwmac4_addrs;
+ bool needs_sgmii_loopback;
};
struct qcom_ethqos {
@@ -113,6 +119,7 @@ struct qcom_ethqos {
unsigned int num_por;
bool rgmii_config_loopback_en;
bool has_emac_ge_3;
+ bool needs_sgmii_loopback;
};
static int rgmii_readl(struct qcom_ethqos *ethqos, unsigned int offset)
@@ -190,8 +197,22 @@ ethqos_update_link_clk(struct qcom_ethqos *ethqos, unsigned int speed)
clk_set_rate(ethqos->link_clk, ethqos->link_clk_rate);
}
+static void
+qcom_ethqos_set_sgmii_loopback(struct qcom_ethqos *ethqos, bool enable)
+{
+ if (!ethqos->needs_sgmii_loopback ||
+ ethqos->phy_mode != PHY_INTERFACE_MODE_2500BASEX)
+ return;
+
+ rgmii_updatel(ethqos,
+ SGMII_PHY_CNTRL1_SGMII_TX_TO_RX_LOOPBACK_EN,
+ enable ? SGMII_PHY_CNTRL1_SGMII_TX_TO_RX_LOOPBACK_EN : 0,
+ EMAC_WRAPPER_SGMII_PHY_CNTRL1);
+}
+
static void ethqos_set_func_clk_en(struct qcom_ethqos *ethqos)
{
+ qcom_ethqos_set_sgmii_loopback(ethqos, true);
rgmii_updatel(ethqos, RGMII_CONFIG_FUNC_CLK_EN,
RGMII_CONFIG_FUNC_CLK_EN, RGMII_IO_MACRO_CONFIG);
}
@@ -271,11 +292,13 @@ static const struct ethqos_emac_por emac_v4_0_0_por[] = {
static const struct ethqos_emac_driver_data emac_v4_0_0_data = {
.por = emac_v4_0_0_por,
- .num_por = ARRAY_SIZE(emac_v3_0_0_por),
+ .num_por = ARRAY_SIZE(emac_v4_0_0_por),
.rgmii_config_loopback_en = false,
.has_emac_ge_3 = true,
.link_clk_name = "phyaux",
.has_integrated_pcs = true,
+ .needs_sgmii_loopback = true,
+ .dma_addr_width = 36,
.dwmac4_addrs = {
.dma_chan = 0x00008100,
.dma_chan_offset = 0x1000,
@@ -605,6 +628,14 @@ static int ethqos_configure_rgmii(struct qcom_ethqos *ethqos)
return 0;
}
+static void ethqos_set_serdes_speed(struct qcom_ethqos *ethqos, int speed)
+{
+ if (ethqos->serdes_speed != speed) {
+ phy_set_speed(ethqos->serdes_phy, speed);
+ ethqos->serdes_speed = speed;
+ }
+}
+
/* On interface toggle MAC registers gets reset.
* Configure MAC block for SGMII on ethernet phy link up
*/
@@ -622,9 +653,7 @@ static int ethqos_configure_sgmii(struct qcom_ethqos *ethqos)
rgmii_updatel(ethqos, RGMII_CONFIG2_RGMII_CLK_SEL_CFG,
RGMII_CONFIG2_RGMII_CLK_SEL_CFG,
RGMII_IO_MACRO_CONFIG2);
- if (ethqos->serdes_speed != SPEED_2500)
- phy_set_speed(ethqos->serdes_phy, SPEED_2500);
- ethqos->serdes_speed = SPEED_2500;
+ ethqos_set_serdes_speed(ethqos, SPEED_2500);
stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 0, 0, 0);
break;
case SPEED_1000:
@@ -632,16 +661,12 @@ static int ethqos_configure_sgmii(struct qcom_ethqos *ethqos)
rgmii_updatel(ethqos, RGMII_CONFIG2_RGMII_CLK_SEL_CFG,
RGMII_CONFIG2_RGMII_CLK_SEL_CFG,
RGMII_IO_MACRO_CONFIG2);
- if (ethqos->serdes_speed != SPEED_1000)
- phy_set_speed(ethqos->serdes_phy, SPEED_1000);
- ethqos->serdes_speed = SPEED_1000;
+ ethqos_set_serdes_speed(ethqos, SPEED_1000);
stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, 0, 0);
break;
case SPEED_100:
val |= ETHQOS_MAC_CTRL_PORT_SEL | ETHQOS_MAC_CTRL_SPEED_MODE;
- if (ethqos->serdes_speed != SPEED_1000)
- phy_set_speed(ethqos->serdes_phy, SPEED_1000);
- ethqos->serdes_speed = SPEED_1000;
+ ethqos_set_serdes_speed(ethqos, SPEED_1000);
stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, 0, 0);
break;
case SPEED_10:
@@ -651,9 +676,7 @@ static int ethqos_configure_sgmii(struct qcom_ethqos *ethqos)
FIELD_PREP(RGMII_CONFIG_SGMII_CLK_DVDR,
SGMII_10M_RX_CLK_DVDR),
RGMII_IO_MACRO_CONFIG);
- if (ethqos->serdes_speed != SPEED_1000)
- phy_set_speed(ethqos->serdes_phy, ethqos->speed);
- ethqos->serdes_speed = SPEED_1000;
+ ethqos_set_serdes_speed(ethqos, SPEED_1000);
stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, 0, 0);
break;
}
@@ -663,6 +686,14 @@ static int ethqos_configure_sgmii(struct qcom_ethqos *ethqos)
return val;
}
+static void qcom_ethqos_speed_mode_2500(struct net_device *ndev, void *data)
+{
+ struct stmmac_priv *priv = netdev_priv(ndev);
+
+ priv->plat->max_speed = 2500;
+ priv->plat->phy_interface = PHY_INTERFACE_MODE_2500BASEX;
+}
+
static int ethqos_configure(struct qcom_ethqos *ethqos)
{
return ethqos->configure_func(ethqos);
@@ -672,6 +703,7 @@ static void ethqos_fix_mac_speed(void *priv, unsigned int speed, unsigned int mo
{
struct qcom_ethqos *ethqos = priv;
+ qcom_ethqos_set_sgmii_loopback(ethqos, false);
ethqos->speed = speed;
ethqos_update_link_clk(ethqos, speed);
ethqos_configure(ethqos);
@@ -785,6 +817,9 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
case PHY_INTERFACE_MODE_RGMII_TXID:
ethqos->configure_func = ethqos_configure_rgmii;
break;
+ case PHY_INTERFACE_MODE_2500BASEX:
+ plat_dat->speed_mode_2500 = qcom_ethqos_speed_mode_2500;
+ fallthrough;
case PHY_INTERFACE_MODE_SGMII:
ethqos->configure_func = ethqos_configure_sgmii;
break;
@@ -807,6 +842,7 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
ethqos->num_por = data->num_por;
ethqos->rgmii_config_loopback_en = data->rgmii_config_loopback_en;
ethqos->has_emac_ge_3 = data->has_emac_ge_3;
+ ethqos->needs_sgmii_loopback = data->needs_sgmii_loopback;
ethqos->link_clk = devm_clk_get(dev, data->link_clk_name ?: "rgmii");
if (IS_ERR(ethqos->link_clk))
@@ -845,6 +881,8 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
plat_dat->flags |= STMMAC_FLAG_RX_CLK_RUNS_IN_LPI;
if (data->has_integrated_pcs)
plat_dat->flags |= STMMAC_FLAG_HAS_INTEGRATED_PCS;
+ if (data->dma_addr_width)
+ plat_dat->host_dma_width = data->dma_addr_width;
if (ethqos->serdes_phy) {
plat_dat->serdes_powerup = qcom_ethqos_serdes_powerup;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rzn1.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rzn1.c
index 848cf3c01f4a..59a7bd560f96 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rzn1.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rzn1.c
@@ -39,6 +39,12 @@ static void rzn1_dwmac_pcs_exit(struct stmmac_priv *priv)
miic_destroy(priv->hw->phylink_pcs);
}
+static struct phylink_pcs *rzn1_dwmac_select_pcs(struct stmmac_priv *priv,
+ phy_interface_t interface)
+{
+ return priv->hw->phylink_pcs;
+}
+
static int rzn1_dwmac_probe(struct platform_device *pdev)
{
struct plat_stmmacenet_data *plat_dat;
@@ -57,6 +63,7 @@ static int rzn1_dwmac_probe(struct platform_device *pdev)
plat_dat->bsp_priv = plat_dat;
plat_dat->pcs_init = rzn1_dwmac_pcs_init;
plat_dat->pcs_exit = rzn1_dwmac_pcs_exit;
+ plat_dat->select_pcs = rzn1_dwmac_select_pcs;
ret = stmmac_dvr_probe(dev, plat_dat, &stmmac_res);
if (ret)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
index b3d45f9dfb55..fdb4c773ec98 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
@@ -429,6 +429,12 @@ static void socfpga_dwmac_pcs_exit(struct stmmac_priv *priv)
lynx_pcs_destroy(priv->hw->phylink_pcs);
}
+static struct phylink_pcs *socfpga_dwmac_select_pcs(struct stmmac_priv *priv,
+ phy_interface_t interface)
+{
+ return priv->hw->phylink_pcs;
+}
+
static int socfpga_dwmac_probe(struct platform_device *pdev)
{
struct plat_stmmacenet_data *plat_dat;
@@ -478,6 +484,7 @@ static int socfpga_dwmac_probe(struct platform_device *pdev)
plat_dat->fix_mac_speed = socfpga_dwmac_fix_mac_speed;
plat_dat->pcs_init = socfpga_dwmac_pcs_init;
plat_dat->pcs_exit = socfpga_dwmac_pcs_exit;
+ plat_dat->select_pcs = socfpga_dwmac_select_pcs;
ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
if (ret)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
index c92dfc4ecf57..c1732955a697 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
@@ -53,12 +53,23 @@
#define SYSCFG_MCU_ETH_SEL_MII 0
#define SYSCFG_MCU_ETH_SEL_RMII 1
-/* STM32MP1 register definitions
+/* STM32MP2 register definitions */
+#define SYSCFG_MP2_ETH_MASK GENMASK(31, 0)
+
+#define SYSCFG_ETHCR_ETH_PTP_CLK_SEL BIT(2)
+#define SYSCFG_ETHCR_ETH_CLK_SEL BIT(1)
+#define SYSCFG_ETHCR_ETH_REF_CLK_SEL BIT(0)
+
+#define SYSCFG_ETHCR_ETH_SEL_MII 0
+#define SYSCFG_ETHCR_ETH_SEL_RGMII BIT(4)
+#define SYSCFG_ETHCR_ETH_SEL_RMII BIT(6)
+
+/* STM32MPx register definitions
*
* Below table summarizes the clock requirement and clock sources for
* supported phy interface modes.
* __________________________________________________________________________
- *|PHY_MODE | Normal | PHY wo crystal| PHY wo crystal |No 125Mhz from PHY|
+ *|PHY_MODE | Normal | PHY wo crystal| PHY wo crystal |No 125MHz from PHY|
*| | | 25MHz | 50MHz | |
* ---------------------------------------------------------------------------
*| MII | - | eth-ck | n/a | n/a |
@@ -90,6 +101,7 @@ struct stm32_dwmac {
int eth_ref_clk_sel_reg;
int irq_pwr_wakeup;
u32 mode_reg; /* MAC glue-logic mode register */
+ u32 mode_mask;
struct regmap *regmap;
u32 speed;
const struct stm32_ops *ops;
@@ -102,8 +114,9 @@ struct stm32_ops {
void (*resume)(struct stm32_dwmac *dwmac);
int (*parse_data)(struct stm32_dwmac *dwmac,
struct device *dev);
- u32 syscfg_eth_mask;
bool clk_rx_enable_in_suspend;
+ bool is_mp13, is_mp2;
+ u32 syscfg_clr_off;
};
static int stm32_dwmac_clk_enable(struct stm32_dwmac *dwmac, bool resume)
@@ -157,65 +170,190 @@ static int stm32_dwmac_init(struct plat_stmmacenet_data *plat_dat, bool resume)
return stm32_dwmac_clk_enable(dwmac, resume);
}
-static int stm32mp1_set_mode(struct plat_stmmacenet_data *plat_dat)
+static int stm32mp1_select_ethck_external(struct plat_stmmacenet_data *plat_dat)
{
struct stm32_dwmac *dwmac = plat_dat->bsp_priv;
- u32 reg = dwmac->mode_reg, clk_rate;
- int val;
- clk_rate = clk_get_rate(dwmac->clk_eth_ck);
- dwmac->enable_eth_ck = false;
switch (plat_dat->mac_interface) {
case PHY_INTERFACE_MODE_MII:
- if (clk_rate == ETH_CK_F_25M && dwmac->ext_phyclk)
- dwmac->enable_eth_ck = true;
- val = SYSCFG_PMCR_ETH_SEL_MII;
- pr_debug("SYSCFG init : PHY_INTERFACE_MODE_MII\n");
+ dwmac->enable_eth_ck = dwmac->ext_phyclk;
+ return 0;
+ case PHY_INTERFACE_MODE_GMII:
+ dwmac->enable_eth_ck = dwmac->eth_clk_sel_reg ||
+ dwmac->ext_phyclk;
+ return 0;
+ case PHY_INTERFACE_MODE_RMII:
+ dwmac->enable_eth_ck = dwmac->eth_ref_clk_sel_reg ||
+ dwmac->ext_phyclk;
+ return 0;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ dwmac->enable_eth_ck = dwmac->eth_clk_sel_reg ||
+ dwmac->ext_phyclk;
+ return 0;
+ default:
+ dwmac->enable_eth_ck = false;
+ dev_err(dwmac->dev, "Mode %s not supported",
+ phy_modes(plat_dat->mac_interface));
+ return -EINVAL;
+ }
+}
+
+static int stm32mp1_validate_ethck_rate(struct plat_stmmacenet_data *plat_dat)
+{
+ struct stm32_dwmac *dwmac = plat_dat->bsp_priv;
+ const u32 clk_rate = clk_get_rate(dwmac->clk_eth_ck);
+
+ if (!dwmac->enable_eth_ck)
+ return 0;
+
+ switch (plat_dat->mac_interface) {
+ case PHY_INTERFACE_MODE_MII:
+ case PHY_INTERFACE_MODE_GMII:
+ if (clk_rate == ETH_CK_F_25M)
+ return 0;
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ if (clk_rate == ETH_CK_F_25M || clk_rate == ETH_CK_F_50M)
+ return 0;
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ if (clk_rate == ETH_CK_F_25M || clk_rate == ETH_CK_F_125M)
+ return 0;
+ break;
+ default:
+ break;
+ }
+
+ dev_err(dwmac->dev, "Mode %s does not match eth-ck frequency %d Hz",
+ phy_modes(plat_dat->mac_interface), clk_rate);
+ return -EINVAL;
+}
+
+static int stm32mp1_configure_pmcr(struct plat_stmmacenet_data *plat_dat)
+{
+ struct stm32_dwmac *dwmac = plat_dat->bsp_priv;
+ u32 reg = dwmac->mode_reg;
+ int val = 0;
+
+ switch (plat_dat->mac_interface) {
+ case PHY_INTERFACE_MODE_MII:
+ /*
+ * STM32MP15xx supports both MII and GMII, STM32MP13xx MII only.
+ * SYSCFG_PMCSETR ETH_SELMII is present only on STM32MP15xx and
+ * acts as a selector between 0:GMII and 1:MII. As STM32MP13xx
+ * supports only MII, ETH_SELMII is not present.
+ */
+ if (!dwmac->ops->is_mp13) /* Select MII mode on STM32MP15xx */
+ val |= SYSCFG_PMCR_ETH_SEL_MII;
break;
case PHY_INTERFACE_MODE_GMII:
val = SYSCFG_PMCR_ETH_SEL_GMII;
- if (clk_rate == ETH_CK_F_25M &&
- (dwmac->eth_clk_sel_reg || dwmac->ext_phyclk)) {
- dwmac->enable_eth_ck = true;
+ if (dwmac->enable_eth_ck)
val |= SYSCFG_PMCR_ETH_CLK_SEL;
- }
- pr_debug("SYSCFG init : PHY_INTERFACE_MODE_GMII\n");
break;
case PHY_INTERFACE_MODE_RMII:
val = SYSCFG_PMCR_ETH_SEL_RMII;
- if ((clk_rate == ETH_CK_F_25M || clk_rate == ETH_CK_F_50M) &&
- (dwmac->eth_ref_clk_sel_reg || dwmac->ext_phyclk)) {
- dwmac->enable_eth_ck = true;
+ if (dwmac->enable_eth_ck)
val |= SYSCFG_PMCR_ETH_REF_CLK_SEL;
- }
- pr_debug("SYSCFG init : PHY_INTERFACE_MODE_RMII\n");
break;
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII_RXID:
case PHY_INTERFACE_MODE_RGMII_TXID:
val = SYSCFG_PMCR_ETH_SEL_RGMII;
- if ((clk_rate == ETH_CK_F_25M || clk_rate == ETH_CK_F_125M) &&
- (dwmac->eth_clk_sel_reg || dwmac->ext_phyclk)) {
- dwmac->enable_eth_ck = true;
+ if (dwmac->enable_eth_ck)
val |= SYSCFG_PMCR_ETH_CLK_SEL;
- }
- pr_debug("SYSCFG init : PHY_INTERFACE_MODE_RGMII\n");
break;
default:
- pr_debug("SYSCFG init : Do not manage %d interface\n",
- plat_dat->mac_interface);
+ dev_err(dwmac->dev, "Mode %s not supported",
+ phy_modes(plat_dat->mac_interface));
/* Do not manage others interfaces */
return -EINVAL;
}
+ dev_dbg(dwmac->dev, "Mode %s", phy_modes(plat_dat->mac_interface));
+
+ /* Shift value at correct ethernet MAC offset in SYSCFG_PMCSETR */
+ val <<= ffs(dwmac->mode_mask) - ffs(SYSCFG_MP1_ETH_MASK);
+
/* Need to update PMCCLRR (clear register) */
- regmap_write(dwmac->regmap, reg + SYSCFG_PMCCLRR_OFFSET,
- dwmac->ops->syscfg_eth_mask);
+ regmap_write(dwmac->regmap, dwmac->ops->syscfg_clr_off,
+ dwmac->mode_mask);
/* Update PMCSETR (set register) */
return regmap_update_bits(dwmac->regmap, reg,
- dwmac->ops->syscfg_eth_mask, val);
+ dwmac->mode_mask, val);
+}
+
+static int stm32mp2_configure_syscfg(struct plat_stmmacenet_data *plat_dat)
+{
+ struct stm32_dwmac *dwmac = plat_dat->bsp_priv;
+ u32 reg = dwmac->mode_reg;
+ int val = 0;
+
+ switch (plat_dat->mac_interface) {
+ case PHY_INTERFACE_MODE_MII:
+ /* ETH_REF_CLK_SEL bit in SYSCFG register is not applicable in MII mode */
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ val = SYSCFG_ETHCR_ETH_SEL_RMII;
+ if (dwmac->enable_eth_ck) {
+ /* Internal clock ETH_CLK of 50MHz from RCC is used */
+ val |= SYSCFG_ETHCR_ETH_REF_CLK_SEL;
+ }
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ val = SYSCFG_ETHCR_ETH_SEL_RGMII;
+ fallthrough;
+ case PHY_INTERFACE_MODE_GMII:
+ if (dwmac->enable_eth_ck) {
+ /* Internal clock ETH_CLK of 125MHz from RCC is used */
+ val |= SYSCFG_ETHCR_ETH_CLK_SEL;
+ }
+ break;
+ default:
+ dev_err(dwmac->dev, "Mode %s not supported",
+ phy_modes(plat_dat->mac_interface));
+ /* Do not manage others interfaces */
+ return -EINVAL;
+ }
+
+ dev_dbg(dwmac->dev, "Mode %s", phy_modes(plat_dat->mac_interface));
+
+ /* Select PTP (IEEE1588) clock selection from RCC (ck_ker_ethxptp) */
+ val |= SYSCFG_ETHCR_ETH_PTP_CLK_SEL;
+
+ /* Update ETHCR (set register) */
+ return regmap_update_bits(dwmac->regmap, reg,
+ SYSCFG_MP2_ETH_MASK, val);
+}
+
+static int stm32mp1_set_mode(struct plat_stmmacenet_data *plat_dat)
+{
+ struct stm32_dwmac *dwmac = plat_dat->bsp_priv;
+ int ret;
+
+ ret = stm32mp1_select_ethck_external(plat_dat);
+ if (ret)
+ return ret;
+
+ ret = stm32mp1_validate_ethck_rate(plat_dat);
+ if (ret)
+ return ret;
+
+ if (!dwmac->ops->is_mp2)
+ return stm32mp1_configure_pmcr(plat_dat);
+ else
+ return stm32mp2_configure_syscfg(plat_dat);
}
static int stm32mcu_set_mode(struct plat_stmmacenet_data *plat_dat)
@@ -227,21 +365,21 @@ static int stm32mcu_set_mode(struct plat_stmmacenet_data *plat_dat)
switch (plat_dat->mac_interface) {
case PHY_INTERFACE_MODE_MII:
val = SYSCFG_MCU_ETH_SEL_MII;
- pr_debug("SYSCFG init : PHY_INTERFACE_MODE_MII\n");
break;
case PHY_INTERFACE_MODE_RMII:
val = SYSCFG_MCU_ETH_SEL_RMII;
- pr_debug("SYSCFG init : PHY_INTERFACE_MODE_RMII\n");
break;
default:
- pr_debug("SYSCFG init : Do not manage %d interface\n",
- plat_dat->mac_interface);
+ dev_err(dwmac->dev, "Mode %s not supported",
+ phy_modes(plat_dat->mac_interface));
/* Do not manage others interfaces */
return -EINVAL;
}
+ dev_dbg(dwmac->dev, "Mode %s", phy_modes(plat_dat->mac_interface));
+
return regmap_update_bits(dwmac->regmap, reg,
- dwmac->ops->syscfg_eth_mask, val << 23);
+ SYSCFG_MCU_ETH_MASK, val << 23);
}
static void stm32_dwmac_clk_disable(struct stm32_dwmac *dwmac, bool suspend)
@@ -286,8 +424,24 @@ static int stm32_dwmac_parse_data(struct stm32_dwmac *dwmac,
return PTR_ERR(dwmac->regmap);
err = of_property_read_u32_index(np, "st,syscon", 1, &dwmac->mode_reg);
- if (err)
+ if (err) {
dev_err(dev, "Can't get sysconfig mode offset (%d)\n", err);
+ return err;
+ }
+
+ if (dwmac->ops->is_mp2)
+ return 0;
+
+ dwmac->mode_mask = SYSCFG_MP1_ETH_MASK;
+ err = of_property_read_u32_index(np, "st,syscon", 2, &dwmac->mode_mask);
+ if (err) {
+ if (dwmac->ops->is_mp13) {
+ dev_err(dev, "Sysconfig register mask must be set (%d)\n", err);
+ } else {
+ dev_dbg(dev, "Warning sysconfig register mask not set\n");
+ err = 0;
+ }
+ }
return err;
}
@@ -305,7 +459,7 @@ static int stm32mp1_parse_data(struct stm32_dwmac *dwmac,
/* Gigabit Ethernet 125MHz clock selection. */
dwmac->eth_clk_sel_reg = of_property_read_bool(np, "st,eth-clk-sel");
- /* Ethernet 50Mhz RMII clock selection */
+ /* Ethernet 50MHz RMII clock selection */
dwmac->eth_ref_clk_sel_reg =
of_property_read_bool(np, "st,eth-ref-clk-sel");
@@ -478,8 +632,7 @@ static SIMPLE_DEV_PM_OPS(stm32_dwmac_pm_ops,
stm32_dwmac_suspend, stm32_dwmac_resume);
static struct stm32_ops stm32mcu_dwmac_data = {
- .set_mode = stm32mcu_set_mode,
- .syscfg_eth_mask = SYSCFG_MCU_ETH_MASK
+ .set_mode = stm32mcu_set_mode
};
static struct stm32_ops stm32mp1_dwmac_data = {
@@ -487,13 +640,35 @@ static struct stm32_ops stm32mp1_dwmac_data = {
.suspend = stm32mp1_suspend,
.resume = stm32mp1_resume,
.parse_data = stm32mp1_parse_data,
- .syscfg_eth_mask = SYSCFG_MP1_ETH_MASK,
+ .syscfg_clr_off = 0x44,
+ .is_mp13 = false,
+ .clk_rx_enable_in_suspend = true
+};
+
+static struct stm32_ops stm32mp13_dwmac_data = {
+ .set_mode = stm32mp1_set_mode,
+ .suspend = stm32mp1_suspend,
+ .resume = stm32mp1_resume,
+ .parse_data = stm32mp1_parse_data,
+ .syscfg_clr_off = 0x08,
+ .is_mp13 = true,
+ .clk_rx_enable_in_suspend = true
+};
+
+static struct stm32_ops stm32mp25_dwmac_data = {
+ .set_mode = stm32mp1_set_mode,
+ .suspend = stm32mp1_suspend,
+ .resume = stm32mp1_resume,
+ .parse_data = stm32mp1_parse_data,
+ .is_mp2 = true,
.clk_rx_enable_in_suspend = true
};
static const struct of_device_id stm32_dwmac_match[] = {
{ .compatible = "st,stm32-dwmac", .data = &stm32mcu_dwmac_data},
{ .compatible = "st,stm32mp1-dwmac", .data = &stm32mp1_dwmac_data},
+ { .compatible = "st,stm32mp13-dwmac", .data = &stm32mp13_dwmac_data},
+ { .compatible = "st,stm32mp25-dwmac", .data = &stm32mp25_dwmac_data},
{ }
};
MODULE_DEVICE_TABLE(of, stm32_dwmac_match);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index 8555299443f4..d413d76a8936 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -15,7 +15,7 @@
#include <linux/crc32.h>
#include <linux/slab.h>
#include <linux/ethtool.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include "stmmac.h"
#include "stmmac_pcs.h"
#include "dwmac1000.h"
@@ -404,11 +404,6 @@ static void dwmac1000_ctrl_ane(void __iomem *ioaddr, bool ane, bool srgmi_ral,
dwmac_ctrl_ane(ioaddr, GMAC_PCS_BASE, ane, srgmi_ral, loopback);
}
-static void dwmac1000_rane(void __iomem *ioaddr, bool restart)
-{
- dwmac_rane(ioaddr, GMAC_PCS_BASE, restart);
-}
-
static void dwmac1000_get_adv_lp(void __iomem *ioaddr, struct rgmii_adv *adv)
{
dwmac_get_adv_lp(ioaddr, GMAC_PCS_BASE, adv);
@@ -519,7 +514,6 @@ const struct stmmac_ops dwmac1000_ops = {
.set_eee_pls = dwmac1000_set_eee_pls,
.debug = dwmac1000_debug,
.pcs_ctrl_ane = dwmac1000_ctrl_ane,
- .pcs_rane = dwmac1000_rane,
.pcs_get_adv_lp = dwmac1000_get_adv_lp,
.set_mac_loopback = dwmac1000_set_mac_loopback,
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
index daf79cdbd3ec..adccdd816ea9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
@@ -12,7 +12,7 @@
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
-#include <asm/io.h>
+#include <linux/io.h>
#include "dwmac1000.h"
#include "dwmac_dma.h"
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
index 7667d103cd0e..14e847c0e1a9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
@@ -15,7 +15,7 @@
*******************************************************************************/
#include <linux/crc32.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include "stmmac.h"
#include "dwmac100.h"
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
index dea270f60cc3..b402fb54f613 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
@@ -14,7 +14,7 @@
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
-#include <asm/io.h>
+#include <linux/io.h>
#include "dwmac100.h"
#include "dwmac_dma.h"
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index b25774d69195..dbd9f93b2460 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -758,11 +758,6 @@ static void dwmac4_ctrl_ane(void __iomem *ioaddr, bool ane, bool srgmi_ral,
dwmac_ctrl_ane(ioaddr, GMAC_PCS_BASE, ane, srgmi_ral, loopback);
}
-static void dwmac4_rane(void __iomem *ioaddr, bool restart)
-{
- dwmac_rane(ioaddr, GMAC_PCS_BASE, restart);
-}
-
static void dwmac4_get_adv_lp(void __iomem *ioaddr, struct rgmii_adv *adv)
{
dwmac_get_adv_lp(ioaddr, GMAC_PCS_BASE, adv);
@@ -1215,7 +1210,6 @@ const struct stmmac_ops dwmac4_ops = {
.set_eee_timer = dwmac4_set_eee_timer,
.set_eee_pls = dwmac4_set_eee_pls,
.pcs_ctrl_ane = dwmac4_ctrl_ane,
- .pcs_rane = dwmac4_rane,
.pcs_get_adv_lp = dwmac4_get_adv_lp,
.debug = dwmac4_debug,
.set_filter = dwmac4_set_filter,
@@ -1260,7 +1254,6 @@ const struct stmmac_ops dwmac410_ops = {
.set_eee_timer = dwmac4_set_eee_timer,
.set_eee_pls = dwmac4_set_eee_pls,
.pcs_ctrl_ane = dwmac4_ctrl_ane,
- .pcs_rane = dwmac4_rane,
.pcs_get_adv_lp = dwmac4_get_adv_lp,
.debug = dwmac4_debug,
.set_filter = dwmac4_set_filter,
@@ -1309,7 +1302,6 @@ const struct stmmac_ops dwmac510_ops = {
.set_eee_timer = dwmac4_set_eee_timer,
.set_eee_pls = dwmac4_set_eee_pls,
.pcs_ctrl_ane = dwmac4_ctrl_ane,
- .pcs_rane = dwmac4_rane,
.pcs_get_adv_lp = dwmac4_get_adv_lp,
.debug = dwmac4_debug,
.set_filter = dwmac4_set_filter,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
index f8e7775bb633..6a987cf598e4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
@@ -1554,9 +1554,6 @@ const struct stmmac_ops dwxgmac210_ops = {
.reset_eee_mode = dwxgmac2_reset_eee_mode,
.set_eee_timer = dwxgmac2_set_eee_timer,
.set_eee_pls = dwxgmac2_set_eee_pls,
- .pcs_ctrl_ane = NULL,
- .pcs_rane = NULL,
- .pcs_get_adv_lp = NULL,
.debug = NULL,
.set_filter = dwxgmac2_set_filter,
.safety_feat_config = dwxgmac3_safety_feat_config,
@@ -1614,9 +1611,6 @@ const struct stmmac_ops dwxlgmac2_ops = {
.reset_eee_mode = dwxgmac2_reset_eee_mode,
.set_eee_timer = dwxgmac2_set_eee_timer,
.set_eee_pls = dwxgmac2_set_eee_pls,
- .pcs_ctrl_ane = NULL,
- .pcs_rane = NULL,
- .pcs_get_adv_lp = NULL,
.debug = NULL,
.set_filter = dwxgmac2_set_filter,
.safety_feat_config = dwxgmac3_safety_feat_config,
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
index 90384db228b5..97934ccba5b1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
@@ -370,7 +370,6 @@ struct stmmac_ops {
/* PCS calls */
void (*pcs_ctrl_ane)(void __iomem *ioaddr, bool ane, bool srgmi_ral,
bool loopback);
- void (*pcs_rane)(void __iomem *ioaddr, bool restart);
void (*pcs_get_adv_lp)(void __iomem *ioaddr, struct rgmii_adv *adv);
/* Safety Features */
int (*safety_feat_config)(void __iomem *ioaddr, unsigned int asp,
@@ -484,8 +483,6 @@ struct stmmac_ops {
stmmac_do_void_callback(__priv, mac, debug, __priv, __args)
#define stmmac_pcs_ctrl_ane(__priv, __args...) \
stmmac_do_void_callback(__priv, mac, pcs_ctrl_ane, __args)
-#define stmmac_pcs_rane(__priv, __args...) \
- stmmac_do_void_callback(__priv, mac, pcs_rane, __priv, __args)
#define stmmac_pcs_get_adv_lp(__priv, __args...) \
stmmac_do_void_callback(__priv, mac, pcs_get_adv_lp, __args)
#define stmmac_safety_feat_config(__priv, __args...) \
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index 542e2633a6f5..7008219fd88d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -11,10 +11,10 @@
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/interrupt.h>
+#include <linux/io.h>
#include <linux/mii.h>
#include <linux/phylink.h>
#include <linux/net_tstamp.h>
-#include <asm/io.h>
#include "stmmac.h"
#include "dwmac_dma.h"
@@ -1199,7 +1199,7 @@ static int stmmac_set_channels(struct net_device *dev,
}
static int stmmac_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct stmmac_priv *priv = netdev_priv(dev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
index f05bd757dfe5..5ef52ef2698f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
@@ -218,6 +218,7 @@ static void timestamp_interrupt(struct stmmac_priv *priv)
{
u32 num_snapshot, ts_status, tsync_int;
struct ptp_clock_event event;
+ u32 acr_value, channel;
unsigned long flags;
u64 ptp_time;
int i;
@@ -243,12 +244,15 @@ static void timestamp_interrupt(struct stmmac_priv *priv)
num_snapshot = (ts_status & GMAC_TIMESTAMP_ATSNS_MASK) >>
GMAC_TIMESTAMP_ATSNS_SHIFT;
+ acr_value = readl(priv->ptpaddr + PTP_ACR);
+ channel = ilog2(FIELD_GET(PTP_ACR_MASK, acr_value));
+
for (i = 0; i < num_snapshot; i++) {
read_lock_irqsave(&priv->ptp_lock, flags);
get_ptptime(priv->ptpaddr, &ptp_time);
read_unlock_irqrestore(&priv->ptp_lock, flags);
event.type = PTP_CLOCK_EXTTS;
- event.index = 0;
+ event.index = channel;
event.timestamp = ptp_time;
ptp_clock_event(priv->ptp_clock, &event);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index b3afc7cb7d72..4b6a359e5a94 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -471,13 +471,6 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
{
int eee_tw_timer = priv->eee_tw_timer;
- /* Using PCS we cannot dial with the phy registers at this stage
- * so we do not support extra feature like EEE.
- */
- if (priv->hw->pcs == STMMAC_PCS_TBI ||
- priv->hw->pcs == STMMAC_PCS_RTBI)
- return false;
-
/* Check if MAC core supports the EEE feature. */
if (!priv->dma_cap.eee)
return false;
@@ -956,11 +949,15 @@ static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config,
phy_interface_t interface)
{
struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
+ struct phylink_pcs *pcs;
- if (priv->hw->xpcs)
- return &priv->hw->xpcs->pcs;
+ if (priv->plat->select_pcs) {
+ pcs = priv->plat->select_pcs(priv, interface);
+ if (!IS_ERR(pcs))
+ return pcs;
+ }
- return priv->hw->phylink_pcs;
+ return NULL;
}
static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
@@ -1228,8 +1225,8 @@ static int stmmac_phy_setup(struct stmmac_priv *priv)
mdio_bus_data = priv->plat->mdio_bus_data;
if (mdio_bus_data)
- priv->phylink_config.ovr_an_inband =
- mdio_bus_data->xpcs_an_inband;
+ priv->phylink_config.default_an_inband =
+ mdio_bus_data->default_an_inband;
/* Set the platform/firmware specified interface mode. Note, phylink
* deals with the PHY interface mode, not the MAC interface mode.
@@ -3953,9 +3950,7 @@ static int __stmmac_open(struct net_device *dev,
if (ret < 0)
return ret;
- if (priv->hw->pcs != STMMAC_PCS_TBI &&
- priv->hw->pcs != STMMAC_PCS_RTBI &&
- (!priv->hw->xpcs ||
+ if ((!priv->hw->xpcs ||
xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73)) {
ret = stmmac_init_phy(dev);
if (ret) {
@@ -4097,8 +4092,6 @@ static int stmmac_release(struct net_device *dev)
if (priv->plat->serdes_powerdown)
priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
- netif_carrier_off(dev);
-
stmmac_release_ptp(priv);
pm_runtime_put(priv->device);
@@ -4244,18 +4237,32 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct dma_desc *desc, *first, *mss_desc = NULL;
struct stmmac_priv *priv = netdev_priv(dev);
- int nfrags = skb_shinfo(skb)->nr_frags;
- u32 queue = skb_get_queue_mapping(skb);
+ int tmp_pay_len = 0, first_tx, nfrags;
unsigned int first_entry, tx_packets;
struct stmmac_txq_stats *txq_stats;
- int tmp_pay_len = 0, first_tx;
struct stmmac_tx_queue *tx_q;
- bool has_vlan, set_ic;
+ u32 pay_len, mss, queue;
u8 proto_hdr_len, hdr;
- u32 pay_len, mss;
dma_addr_t des;
+ bool set_ic;
int i;
+ /* Always insert VLAN tag to SKB payload for TSO frames.
+ *
+ * Never insert VLAN tag by HW, since segments splited by
+ * TSO engine will be un-tagged by mistake.
+ */
+ if (skb_vlan_tag_present(skb)) {
+ skb = __vlan_hwaccel_push_inside(skb);
+ if (unlikely(!skb)) {
+ priv->xstats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
+ }
+
+ nfrags = skb_shinfo(skb)->nr_frags;
+ queue = skb_get_queue_mapping(skb);
+
tx_q = &priv->dma_conf.tx_queue[queue];
txq_stats = &priv->xstats.txq_stats[queue];
first_tx = tx_q->cur_tx;
@@ -4308,9 +4315,6 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
skb->data_len);
}
- /* Check if VLAN can be inserted by HW */
- has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
-
first_entry = tx_q->cur_tx;
WARN_ON(tx_q->tx_skbuff[first_entry]);
@@ -4320,9 +4324,6 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
desc = &tx_q->dma_tx[first_entry];
first = desc;
- if (has_vlan)
- stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
-
/* first descriptor: fill Headers on Buf1 */
des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
DMA_TO_DEVICE);
@@ -7662,9 +7663,10 @@ int stmmac_dvr_probe(struct device *device,
#ifdef STMMAC_VLAN_TAG_USED
/* Both mac100 and gmac support receive VLAN tag detection */
ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
- ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
- priv->hw->hw_vlan_en = true;
-
+ if (priv->plat->has_gmac4) {
+ ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
+ priv->hw->hw_vlan_en = true;
+ }
if (priv->dma_cap.vlhash) {
ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
@@ -7689,8 +7691,6 @@ int stmmac_dvr_probe(struct device *device,
ndev->features |= NETIF_F_RXHASH;
ndev->vlan_features |= ndev->features;
- /* TSO doesn't work on VLANs yet */
- ndev->vlan_features &= ~NETIF_F_TSO;
/* MTU range: 46 - hw-specific max */
ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
@@ -7739,16 +7739,12 @@ int stmmac_dvr_probe(struct device *device,
if (!pm_runtime_enabled(device))
pm_runtime_enable(device);
- if (priv->hw->pcs != STMMAC_PCS_TBI &&
- priv->hw->pcs != STMMAC_PCS_RTBI) {
- /* MDIO bus Registration */
- ret = stmmac_mdio_register(ndev);
- if (ret < 0) {
- dev_err_probe(priv->device, ret,
- "%s: MDIO bus (id: %d) registration failed\n",
- __func__, priv->plat->bus_id);
- goto error_mdio_register;
- }
+ ret = stmmac_mdio_register(ndev);
+ if (ret < 0) {
+ dev_err_probe(priv->device, ret,
+ "MDIO bus (id: %d) registration failed\n",
+ priv->plat->bus_id);
+ goto error_mdio_register;
}
if (priv->plat->speed_mode_2500)
@@ -7790,9 +7786,7 @@ error_netdev_register:
error_phy_setup:
stmmac_pcs_clean(ndev);
error_pcs_setup:
- if (priv->hw->pcs != STMMAC_PCS_TBI &&
- priv->hw->pcs != STMMAC_PCS_RTBI)
- stmmac_mdio_unregister(ndev);
+ stmmac_mdio_unregister(ndev);
error_mdio_register:
stmmac_napi_del(ndev);
error_hw_init:
@@ -7821,7 +7815,6 @@ void stmmac_dvr_remove(struct device *dev)
stmmac_stop_all_dma(priv);
stmmac_mac_set(priv, priv->ioaddr, false);
- netif_carrier_off(ndev);
unregister_netdev(ndev);
#ifdef CONFIG_DEBUG_FS
@@ -7833,10 +7826,8 @@ void stmmac_dvr_remove(struct device *dev)
reset_control_assert(priv->plat->stmmac_ahb_rst);
stmmac_pcs_clean(ndev);
+ stmmac_mdio_unregister(ndev);
- if (priv->hw->pcs != STMMAC_PCS_TBI &&
- priv->hw->pcs != STMMAC_PCS_RTBI)
- stmmac_mdio_unregister(ndev);
destroy_workqueue(priv->wq);
mutex_destroy(&priv->lock);
bitmap_free(priv->af_xdp_zc_qps);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index aa43117134d3..03f90676b3ad 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -497,35 +497,33 @@ int stmmac_mdio_reset(struct mii_bus *bus)
int stmmac_pcs_setup(struct net_device *ndev)
{
+ struct fwnode_handle *devnode, *pcsnode;
struct dw_xpcs *xpcs = NULL;
struct stmmac_priv *priv;
- int ret = -ENODEV;
- int mode, addr;
+ int addr, mode, ret;
priv = netdev_priv(ndev);
mode = priv->plat->phy_interface;
+ devnode = priv->plat->port_node;
if (priv->plat->pcs_init) {
ret = priv->plat->pcs_init(priv);
+ } else if (fwnode_property_present(devnode, "pcs-handle")) {
+ pcsnode = fwnode_find_reference(devnode, "pcs-handle", 0);
+ xpcs = xpcs_create_fwnode(pcsnode, mode);
+ fwnode_handle_put(pcsnode);
+ ret = PTR_ERR_OR_ZERO(xpcs);
} else if (priv->plat->mdio_bus_data &&
- priv->plat->mdio_bus_data->has_xpcs) {
- /* Try to probe the XPCS by scanning all addresses */
- for (addr = 0; addr < PHY_MAX_ADDR; addr++) {
- xpcs = xpcs_create_mdiodev(priv->mii, addr, mode);
- if (IS_ERR(xpcs))
- continue;
-
- ret = 0;
- break;
- }
+ priv->plat->mdio_bus_data->pcs_mask) {
+ addr = ffs(priv->plat->mdio_bus_data->pcs_mask) - 1;
+ xpcs = xpcs_create_mdiodev(priv->mii, addr, mode);
+ ret = PTR_ERR_OR_ZERO(xpcs);
} else {
return 0;
}
- if (ret) {
- dev_warn(priv->device, "No xPCS found\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(priv->device, ret, "No xPCS found\n");
priv->hw->xpcs = xpcs;
@@ -610,7 +608,7 @@ int stmmac_mdio_register(struct net_device *ndev)
snprintf(new_bus->id, MII_BUS_ID_SIZE, "%s-%x",
new_bus->name, priv->plat->bus_id);
new_bus->priv = ndev;
- new_bus->phy_mask = mdio_bus_data->phy_mask;
+ new_bus->phy_mask = mdio_bus_data->phy_mask | mdio_bus_data->pcs_mask;
new_bus->parent = priv->device;
err = of_mdiobus_register(new_bus, mdio_node);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h
index 13a30e6df4c1..1bdf87b237c4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h
@@ -75,23 +75,6 @@ static inline void dwmac_pcs_isr(void __iomem *ioaddr, u32 reg,
}
/**
- * dwmac_rane - To restart ANE
- * @ioaddr: IO registers pointer
- * @reg: Base address of the AN Control Register.
- * @restart: to restart ANE
- * Description: this is to just restart the Auto-Negotiation.
- */
-static inline void dwmac_rane(void __iomem *ioaddr, u32 reg, bool restart)
-{
- u32 value = readl(ioaddr + GMAC_AN_CTRL(reg));
-
- if (restart)
- value |= GMAC_AN_CTRL_RAN;
-
- writel(value, ioaddr + GMAC_AN_CTRL(reg));
-}
-
-/**
* dwmac_ctrl_ane - To program the AN Control Register.
* @ioaddr: IO registers pointer
* @reg: Base address of the AN Control Register.
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 54797edc9b38..ad868e8d195d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -764,8 +764,8 @@ EXPORT_SYMBOL_GPL(stmmac_get_platform_resources);
* Description: Call the platform's init callback (if any) and propagate
* the return value.
*/
-int stmmac_pltfr_init(struct platform_device *pdev,
- struct plat_stmmacenet_data *plat)
+static int stmmac_pltfr_init(struct platform_device *pdev,
+ struct plat_stmmacenet_data *plat)
{
int ret = 0;
@@ -774,7 +774,6 @@ int stmmac_pltfr_init(struct platform_device *pdev,
return ret;
}
-EXPORT_SYMBOL_GPL(stmmac_pltfr_init);
/**
* stmmac_pltfr_exit
@@ -782,13 +781,12 @@ EXPORT_SYMBOL_GPL(stmmac_pltfr_init);
* @plat: driver data platform structure
* Description: Call the platform's exit callback (if any).
*/
-void stmmac_pltfr_exit(struct platform_device *pdev,
- struct plat_stmmacenet_data *plat)
+static void stmmac_pltfr_exit(struct platform_device *pdev,
+ struct plat_stmmacenet_data *plat)
{
if (plat->exit)
plat->exit(pdev, plat->bsp_priv);
}
-EXPORT_SYMBOL_GPL(stmmac_pltfr_exit);
/**
* stmmac_pltfr_probe
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
index bb6fc7e59aed..72dc1a32e46d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
@@ -17,11 +17,6 @@ devm_stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac);
int stmmac_get_platform_resources(struct platform_device *pdev,
struct stmmac_resources *stmmac_res);
-int stmmac_pltfr_init(struct platform_device *pdev,
- struct plat_stmmacenet_data *plat);
-void stmmac_pltfr_exit(struct platform_device *pdev,
- struct plat_stmmacenet_data *plat);
-
int stmmac_pltfr_probe(struct platform_device *pdev,
struct plat_stmmacenet_data *plat,
struct stmmac_resources *res);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index 222540b55480..996f2bcd07a2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -343,10 +343,11 @@ static int tc_setup_cbs(struct stmmac_priv *priv,
struct tc_cbs_qopt_offload *qopt)
{
u32 tx_queues_count = priv->plat->tx_queues_to_use;
+ s64 port_transmit_rate_kbps;
u32 queue = qopt->queue;
- u32 ptr, speed_div;
u32 mode_to_use;
u64 value;
+ u32 ptr;
int ret;
/* Queue 0 is not AVB capable */
@@ -355,30 +356,30 @@ static int tc_setup_cbs(struct stmmac_priv *priv,
if (!priv->dma_cap.av)
return -EOPNOTSUPP;
- /* Port Transmit Rate and Speed Divider */
- switch (priv->speed) {
- case SPEED_10000:
- ptr = 32;
- speed_div = 10000000;
- break;
- case SPEED_5000:
- ptr = 32;
- speed_div = 5000000;
- break;
- case SPEED_2500:
- ptr = 8;
- speed_div = 2500000;
- break;
- case SPEED_1000:
- ptr = 8;
- speed_div = 1000000;
- break;
- case SPEED_100:
- ptr = 4;
- speed_div = 100000;
- break;
- default:
- return -EOPNOTSUPP;
+ port_transmit_rate_kbps = qopt->idleslope - qopt->sendslope;
+
+ if (qopt->enable) {
+ /* Port Transmit Rate and Speed Divider */
+ switch (div_s64(port_transmit_rate_kbps, 1000)) {
+ case SPEED_10000:
+ case SPEED_5000:
+ ptr = 32;
+ break;
+ case SPEED_2500:
+ case SPEED_1000:
+ ptr = 8;
+ break;
+ case SPEED_100:
+ ptr = 4;
+ break;
+ default:
+ netdev_err(priv->dev,
+ "Invalid portTransmitRate %lld (idleSlope - sendSlope)\n",
+ port_transmit_rate_kbps);
+ return -EINVAL;
+ }
+ } else {
+ ptr = 0;
}
mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
@@ -398,10 +399,10 @@ static int tc_setup_cbs(struct stmmac_priv *priv,
}
/* Final adjustments for HW */
- value = div_s64(qopt->idleslope * 1024ll * ptr, speed_div);
+ value = div_s64(qopt->idleslope * 1024ll * ptr, port_transmit_rate_kbps);
priv->plat->tx_queues_cfg[queue].idle_slope = value & GENMASK(31, 0);
- value = div_s64(-qopt->sendslope * 1024ll * ptr, speed_div);
+ value = div_s64(-qopt->sendslope * 1024ll * ptr, port_transmit_rate_kbps);
priv->plat->tx_queues_cfg[queue].send_slope = value & GENMASK(31, 0);
value = qopt->hicredit * 1024ll * 8;
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c
index f8e133604146..131786aa4d5b 100644
--- a/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c
@@ -21,8 +21,6 @@
#include "dwc-xlgmac.h"
#include "dwc-xlgmac-reg.h"
-MODULE_LICENSE("Dual BSD/GPL");
-
static int debug = -1;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "DWC ethernet debug level (0=none,...,16=all)");
@@ -725,3 +723,8 @@ void xlgmac_print_all_hw_features(struct xlgmac_pdata *pdata)
XLGMAC_PR("=====================================================\n");
XLGMAC_PR("\n");
}
+
+MODULE_DESCRIPTION(XLGMAC_DRV_DESC);
+MODULE_VERSION(XLGMAC_DRV_VERSION);
+MODULE_AUTHOR("Jie Deng <jiedeng@synopsys.com>");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-pci.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-pci.c
index fa8604d7b797..36fe538e3332 100644
--- a/drivers/net/ethernet/synopsys/dwc-xlgmac-pci.c
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-pci.c
@@ -71,8 +71,3 @@ static struct pci_driver xlgmac_pci_driver = {
};
module_pci_driver(xlgmac_pci_driver);
-
-MODULE_DESCRIPTION(XLGMAC_DRV_DESC);
-MODULE_VERSION(XLGMAC_DRV_VERSION);
-MODULE_AUTHOR("Jie Deng <jiedeng@synopsys.com>");
-MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/tehuti/Kconfig b/drivers/net/ethernet/tehuti/Kconfig
index 8735633765a1..6db2c9817445 100644
--- a/drivers/net/ethernet/tehuti/Kconfig
+++ b/drivers/net/ethernet/tehuti/Kconfig
@@ -23,4 +23,19 @@ config TEHUTI
help
Tehuti Networks 10G Ethernet NIC
+config TEHUTI_TN40
+ tristate "Tehuti Networks TN40xx 10G Ethernet adapters"
+ depends on PCI
+ select PAGE_POOL
+ select FW_LOADER
+ select PHYLINK
+ help
+ This driver supports 10G Ethernet adapters using Tehuti Networks
+ TN40xx chips. Currently, adapters with Applied Micro Circuits
+ Corporation QT2025 are supported; Tehuti Networks TN9310,
+ DLink DXE-810S, ASUS XG-C100F, and Edimax EN-9320.
+
+ To compile this driver as a module, choose M here: the module
+ will be called tn40xx.
+
endif # NET_VENDOR_TEHUTI
diff --git a/drivers/net/ethernet/tehuti/Makefile b/drivers/net/ethernet/tehuti/Makefile
index 13a0ddd62088..0d4f4d63a65c 100644
--- a/drivers/net/ethernet/tehuti/Makefile
+++ b/drivers/net/ethernet/tehuti/Makefile
@@ -4,3 +4,6 @@
#
obj-$(CONFIG_TEHUTI) += tehuti.o
+
+tn40xx-y := tn40.o tn40_mdio.o tn40_phy.o
+obj-$(CONFIG_TEHUTI_TN40) += tn40xx.o
diff --git a/drivers/net/ethernet/tehuti/tn40.c b/drivers/net/ethernet/tehuti/tn40.c
new file mode 100644
index 000000000000..259bdac24cf2
--- /dev/null
+++ b/drivers/net/ethernet/tehuti/tn40.c
@@ -0,0 +1,1850 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (c) Tehuti Networks Ltd. */
+
+#include <linux/bitfield.h>
+#include <linux/ethtool.h>
+#include <linux/firmware.h>
+#include <linux/if_vlan.h>
+#include <linux/iopoll.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+#include <linux/phylink.h>
+#include <linux/vmalloc.h>
+#include <net/netdev_queues.h>
+#include <net/page_pool/helpers.h>
+
+#include "tn40.h"
+
+#define TN40_SHORT_PACKET_SIZE 60
+#define TN40_FIRMWARE_NAME "tehuti/bdx.bin"
+
+static void tn40_enable_interrupts(struct tn40_priv *priv)
+{
+ tn40_write_reg(priv, TN40_REG_IMR, priv->isr_mask);
+}
+
+static void tn40_disable_interrupts(struct tn40_priv *priv)
+{
+ tn40_write_reg(priv, TN40_REG_IMR, 0);
+}
+
+static int tn40_fifo_alloc(struct tn40_priv *priv, struct tn40_fifo *f,
+ int fsz_type,
+ u16 reg_cfg0, u16 reg_cfg1,
+ u16 reg_rptr, u16 reg_wptr)
+{
+ u16 memsz = TN40_FIFO_SIZE * (1 << fsz_type);
+ u64 cfg_base;
+
+ memset(f, 0, sizeof(struct tn40_fifo));
+ /* 1K extra space is allocated at the end of the fifo to simplify
+ * processing of descriptors that wraps around fifo's end.
+ */
+ f->va = dma_alloc_coherent(&priv->pdev->dev,
+ memsz + TN40_FIFO_EXTRA_SPACE, &f->da,
+ GFP_KERNEL);
+ if (!f->va)
+ return -ENOMEM;
+
+ f->reg_cfg0 = reg_cfg0;
+ f->reg_cfg1 = reg_cfg1;
+ f->reg_rptr = reg_rptr;
+ f->reg_wptr = reg_wptr;
+ f->rptr = 0;
+ f->wptr = 0;
+ f->memsz = memsz;
+ f->size_mask = memsz - 1;
+ cfg_base = lower_32_bits((f->da & TN40_TX_RX_CFG0_BASE) | fsz_type);
+ tn40_write_reg(priv, reg_cfg0, cfg_base);
+ tn40_write_reg(priv, reg_cfg1, upper_32_bits(f->da));
+ return 0;
+}
+
+static void tn40_fifo_free(struct tn40_priv *priv, struct tn40_fifo *f)
+{
+ dma_free_coherent(&priv->pdev->dev,
+ f->memsz + TN40_FIFO_EXTRA_SPACE, f->va, f->da);
+}
+
+static struct tn40_rxdb *tn40_rxdb_alloc(int nelem)
+{
+ size_t size = sizeof(struct tn40_rxdb) + (nelem * sizeof(int)) +
+ (nelem * sizeof(struct tn40_rx_map));
+ struct tn40_rxdb *db;
+ int i;
+
+ db = vzalloc(size);
+ if (db) {
+ db->stack = (int *)(db + 1);
+ db->elems = (void *)(db->stack + nelem);
+ db->nelem = nelem;
+ db->top = nelem;
+ /* make the first alloc close to db struct */
+ for (i = 0; i < nelem; i++)
+ db->stack[i] = nelem - i - 1;
+ }
+ return db;
+}
+
+static void tn40_rxdb_free(struct tn40_rxdb *db)
+{
+ vfree(db);
+}
+
+static int tn40_rxdb_alloc_elem(struct tn40_rxdb *db)
+{
+ return db->stack[--db->top];
+}
+
+static void *tn40_rxdb_addr_elem(struct tn40_rxdb *db, unsigned int n)
+{
+ return db->elems + n;
+}
+
+static int tn40_rxdb_available(struct tn40_rxdb *db)
+{
+ return db->top;
+}
+
+static void tn40_rxdb_free_elem(struct tn40_rxdb *db, unsigned int n)
+{
+ db->stack[db->top++] = n;
+}
+
+/**
+ * tn40_create_rx_ring - Initialize RX all related HW and SW resources
+ * @priv: NIC private structure
+ *
+ * create_rx_ring creates rxf and rxd fifos, updates the relevant HW registers,
+ * preallocates skbs for rx. It assumes that Rx is disabled in HW funcs are
+ * grouped for better cache usage
+ *
+ * RxD fifo is smaller then RxF fifo by design. Upon high load, RxD will be
+ * filled and packets will be dropped by the NIC without getting into the host
+ * or generating interrupts. In this situation the host has no chance of
+ * processing all the packets. Dropping packets by the NIC is cheaper, since it
+ * takes 0 CPU cycles.
+ *
+ * Return: 0 on success and negative value on error.
+ */
+static int tn40_create_rx_ring(struct tn40_priv *priv)
+{
+ struct page_pool_params pp = {
+ .dev = &priv->pdev->dev,
+ .napi = &priv->napi,
+ .dma_dir = DMA_FROM_DEVICE,
+ .netdev = priv->ndev,
+ .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
+ .max_len = PAGE_SIZE,
+ };
+ int ret, pkt_size, nr;
+
+ priv->page_pool = page_pool_create(&pp);
+ if (IS_ERR(priv->page_pool))
+ return PTR_ERR(priv->page_pool);
+
+ ret = tn40_fifo_alloc(priv, &priv->rxd_fifo0.m, priv->rxd_size,
+ TN40_REG_RXD_CFG0_0, TN40_REG_RXD_CFG1_0,
+ TN40_REG_RXD_RPTR_0, TN40_REG_RXD_WPTR_0);
+ if (ret)
+ goto err_destroy_page_pool;
+
+ ret = tn40_fifo_alloc(priv, &priv->rxf_fifo0.m, priv->rxf_size,
+ TN40_REG_RXF_CFG0_0, TN40_REG_RXF_CFG1_0,
+ TN40_REG_RXF_RPTR_0, TN40_REG_RXF_WPTR_0);
+ if (ret)
+ goto err_free_rxd;
+
+ pkt_size = priv->ndev->mtu + VLAN_ETH_HLEN;
+ priv->rxf_fifo0.m.pktsz = pkt_size;
+ nr = priv->rxf_fifo0.m.memsz / sizeof(struct tn40_rxf_desc);
+ priv->rxdb0 = tn40_rxdb_alloc(nr);
+ if (!priv->rxdb0) {
+ ret = -ENOMEM;
+ goto err_free_rxf;
+ }
+ return 0;
+err_free_rxf:
+ tn40_fifo_free(priv, &priv->rxf_fifo0.m);
+err_free_rxd:
+ tn40_fifo_free(priv, &priv->rxd_fifo0.m);
+err_destroy_page_pool:
+ page_pool_destroy(priv->page_pool);
+ return ret;
+}
+
+static void tn40_rx_free_buffers(struct tn40_priv *priv)
+{
+ struct tn40_rxdb *db = priv->rxdb0;
+ struct tn40_rx_map *dm;
+ u16 i;
+
+ netdev_dbg(priv->ndev, "total =%d free =%d busy =%d\n", db->nelem,
+ tn40_rxdb_available(db),
+ db->nelem - tn40_rxdb_available(db));
+
+ for (i = 0; i < db->nelem; i++) {
+ dm = tn40_rxdb_addr_elem(db, i);
+ if (dm->page)
+ page_pool_put_full_page(priv->page_pool, dm->page,
+ false);
+ }
+}
+
+static void tn40_destroy_rx_ring(struct tn40_priv *priv)
+{
+ if (priv->rxdb0) {
+ tn40_rx_free_buffers(priv);
+ tn40_rxdb_free(priv->rxdb0);
+ priv->rxdb0 = NULL;
+ }
+ tn40_fifo_free(priv, &priv->rxf_fifo0.m);
+ tn40_fifo_free(priv, &priv->rxd_fifo0.m);
+ page_pool_destroy(priv->page_pool);
+}
+
+static void tn40_set_rx_desc(struct tn40_priv *priv, int idx, u64 dma)
+{
+ struct tn40_rxf_fifo *f = &priv->rxf_fifo0;
+ struct tn40_rxf_desc *rxfd;
+ int delta;
+
+ rxfd = (struct tn40_rxf_desc *)(f->m.va + f->m.wptr);
+ rxfd->info = cpu_to_le32(0x10003); /* INFO =1 BC =3 */
+ rxfd->va_lo = cpu_to_le32(idx);
+ rxfd->pa_lo = cpu_to_le32(lower_32_bits(dma));
+ rxfd->pa_hi = cpu_to_le32(upper_32_bits(dma));
+ rxfd->len = cpu_to_le32(f->m.pktsz);
+ f->m.wptr += sizeof(struct tn40_rxf_desc);
+ delta = f->m.wptr - f->m.memsz;
+ if (unlikely(delta >= 0)) {
+ f->m.wptr = delta;
+ if (delta > 0) {
+ memcpy(f->m.va, f->m.va + f->m.memsz, delta);
+ netdev_dbg(priv->ndev,
+ "wrapped rxd descriptor\n");
+ }
+ }
+}
+
+/**
+ * tn40_rx_alloc_buffers - Fill rxf fifo with buffers.
+ *
+ * @priv: NIC's private structure
+ *
+ * rx_alloc_buffers allocates buffers via the page pool API, builds rxf descs
+ * and pushes them (rxf descr) into the rxf fifo. The pages are stored in rxdb.
+ * To calculate the free space, we uses the cached values of RPTR and WPTR
+ * when needed. This function also updates RPTR and WPTR.
+ */
+static void tn40_rx_alloc_buffers(struct tn40_priv *priv)
+{
+ struct tn40_rxf_fifo *f = &priv->rxf_fifo0;
+ struct tn40_rxdb *db = priv->rxdb0;
+ struct tn40_rx_map *dm;
+ struct page *page;
+ int dno, i, idx;
+
+ dno = tn40_rxdb_available(db) - 1;
+ for (i = dno; i > 0; i--) {
+ page = page_pool_dev_alloc_pages(priv->page_pool);
+ if (!page)
+ break;
+
+ idx = tn40_rxdb_alloc_elem(db);
+ tn40_set_rx_desc(priv, idx, page_pool_get_dma_addr(page));
+ dm = tn40_rxdb_addr_elem(db, idx);
+ dm->page = page;
+ }
+ if (i != dno)
+ tn40_write_reg(priv, f->m.reg_wptr,
+ f->m.wptr & TN40_TXF_WPTR_WR_PTR);
+ netdev_dbg(priv->ndev, "write_reg 0x%04x f->m.reg_wptr 0x%x\n",
+ f->m.reg_wptr, f->m.wptr & TN40_TXF_WPTR_WR_PTR);
+ netdev_dbg(priv->ndev, "read_reg 0x%04x f->m.reg_rptr=0x%x\n",
+ f->m.reg_rptr, tn40_read_reg(priv, f->m.reg_rptr));
+ netdev_dbg(priv->ndev, "write_reg 0x%04x f->m.reg_wptr=0x%x\n",
+ f->m.reg_wptr, tn40_read_reg(priv, f->m.reg_wptr));
+}
+
+static void tn40_recycle_rx_buffer(struct tn40_priv *priv,
+ struct tn40_rxd_desc *rxdd)
+{
+ struct tn40_rxf_fifo *f = &priv->rxf_fifo0;
+ struct tn40_rx_map *dm;
+ int idx;
+
+ idx = le32_to_cpu(rxdd->va_lo);
+ dm = tn40_rxdb_addr_elem(priv->rxdb0, idx);
+ tn40_set_rx_desc(priv, idx, page_pool_get_dma_addr(dm->page));
+
+ tn40_write_reg(priv, f->m.reg_wptr, f->m.wptr & TN40_TXF_WPTR_WR_PTR);
+}
+
+static int tn40_rx_receive(struct tn40_priv *priv, int budget)
+{
+ struct tn40_rxd_fifo *f = &priv->rxd_fifo0;
+ u32 rxd_val1, rxd_err, pkt_id;
+ int tmp_len, size, done = 0;
+ struct tn40_rxdb *db = NULL;
+ struct tn40_rxd_desc *rxdd;
+ struct tn40_rx_map *dm;
+ struct sk_buff *skb;
+ u16 len, rxd_vlan;
+ int idx;
+
+ f->m.wptr = tn40_read_reg(priv, f->m.reg_wptr) & TN40_TXF_WPTR_WR_PTR;
+ size = f->m.wptr - f->m.rptr;
+ if (size < 0)
+ size += f->m.memsz; /* Size is negative :-) */
+
+ while (size > 0) {
+ rxdd = (struct tn40_rxd_desc *)(f->m.va + f->m.rptr);
+ db = priv->rxdb0;
+
+ /* We have a chicken and egg problem here. If the
+ * descriptor is wrapped we first need to copy the tail
+ * of the descriptor to the end of the buffer before
+ * extracting values from the descriptor. However in
+ * order to know if the descriptor is wrapped we need to
+ * obtain the length of the descriptor from (the
+ * wrapped) descriptor. Luckily the length is the first
+ * word of the descriptor. Descriptor lengths are
+ * multiples of 8 bytes so in case of a wrapped
+ * descriptor the first 8 bytes guaranteed to appear
+ * before the end of the buffer. We first obtain the
+ * length, we then copy the rest of the descriptor if
+ * needed and then extract the rest of the values from
+ * the descriptor.
+ *
+ * Do not change the order of operations as it will
+ * break the code!!!
+ */
+ rxd_val1 = le32_to_cpu(rxdd->rxd_val1);
+ tmp_len = TN40_GET_RXD_BC(rxd_val1) << 3;
+ pkt_id = TN40_GET_RXD_PKT_ID(rxd_val1);
+ size -= tmp_len;
+ /* CHECK FOR A PARTIALLY ARRIVED DESCRIPTOR */
+ if (size < 0) {
+ netdev_dbg(priv->ndev,
+ "%s partially arrived desc tmp_len %d\n",
+ __func__, tmp_len);
+ break;
+ }
+ /* make sure that the descriptor fully is arrived
+ * before reading the rest of the descriptor.
+ */
+ rmb();
+
+ /* A special treatment is given to non-contiguous
+ * descriptors that start near the end, wraps around
+ * and continue at the beginning. The second part is
+ * copied right after the first, and then descriptor
+ * is interpreted as normal. The fifo has an extra
+ * space to allow such operations.
+ */
+
+ /* HAVE WE REACHED THE END OF THE QUEUE? */
+ f->m.rptr += tmp_len;
+ tmp_len = f->m.rptr - f->m.memsz;
+ if (unlikely(tmp_len >= 0)) {
+ f->m.rptr = tmp_len;
+ if (tmp_len > 0) {
+ /* COPY PARTIAL DESCRIPTOR
+ * TO THE END OF THE QUEUE
+ */
+ netdev_dbg(priv->ndev,
+ "wrapped desc rptr=%d tmp_len=%d\n",
+ f->m.rptr, tmp_len);
+ memcpy(f->m.va + f->m.memsz, f->m.va, tmp_len);
+ }
+ }
+ idx = le32_to_cpu(rxdd->va_lo);
+ dm = tn40_rxdb_addr_elem(db, idx);
+ prefetch(dm);
+
+ len = le16_to_cpu(rxdd->len);
+ rxd_vlan = le16_to_cpu(rxdd->rxd_vlan);
+ /* CHECK FOR ERRORS */
+ rxd_err = TN40_GET_RXD_ERR(rxd_val1);
+ if (unlikely(rxd_err)) {
+ u64_stats_update_begin(&priv->syncp);
+ priv->stats.rx_errors++;
+ u64_stats_update_end(&priv->syncp);
+ tn40_recycle_rx_buffer(priv, rxdd);
+ continue;
+ }
+
+ skb = napi_build_skb(page_address(dm->page), PAGE_SIZE);
+ if (!skb) {
+ u64_stats_update_begin(&priv->syncp);
+ priv->stats.rx_dropped++;
+ priv->alloc_fail++;
+ u64_stats_update_end(&priv->syncp);
+ tn40_recycle_rx_buffer(priv, rxdd);
+ break;
+ }
+ skb_mark_for_recycle(skb);
+ skb_put(skb, len);
+ skb->protocol = eth_type_trans(skb, priv->ndev);
+ skb->ip_summed =
+ (pkt_id == 0) ? CHECKSUM_NONE : CHECKSUM_UNNECESSARY;
+ if (TN40_GET_RXD_VTAG(rxd_val1))
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ TN40_GET_RXD_VLAN_TCI(rxd_vlan));
+
+ dm->page = NULL;
+ tn40_rxdb_free_elem(db, idx);
+
+ napi_gro_receive(&priv->napi, skb);
+
+ u64_stats_update_begin(&priv->syncp);
+ priv->stats.rx_bytes += len;
+ u64_stats_update_end(&priv->syncp);
+
+ if (unlikely(++done >= budget))
+ break;
+ }
+ u64_stats_update_begin(&priv->syncp);
+ priv->stats.rx_packets += done;
+ u64_stats_update_end(&priv->syncp);
+ /* FIXME: Do something to minimize pci accesses */
+ tn40_write_reg(priv, f->m.reg_rptr, f->m.rptr & TN40_TXF_WPTR_WR_PTR);
+ tn40_rx_alloc_buffers(priv);
+ return done;
+}
+
+/* TX HW/SW interaction overview
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ * There are 2 types of TX communication channels between driver and NIC.
+ * 1) TX Free Fifo - TXF - Holds ack descriptors for sent packets.
+ * 2) TX Data Fifo - TXD - Holds descriptors of full buffers.
+ *
+ * Currently the NIC supports TSO, checksumming and gather DMA
+ * UFO and IP fragmentation is on the way.
+ *
+ * RX SW Data Structures
+ * ~~~~~~~~~~~~~~~~~~~~~
+ * TXDB is used to keep track of all skbs owned by SW and their DMA addresses.
+ * For TX case, ownership lasts from getting the packet via hard_xmit and
+ * until the HW acknowledges sending the packet by TXF descriptors.
+ * TXDB is implemented as a cyclic buffer.
+ *
+ * FIFO objects keep info about the fifo's size and location, relevant HW
+ * registers, usage and skb db. Each RXD and RXF fifo has their own fifo
+ * structure. Implemented as simple struct.
+ *
+ * TX SW Execution Flow
+ * ~~~~~~~~~~~~~~~~~~~~
+ * OS calls the driver's hard_xmit method with a packet to send. The driver
+ * creates DMA mappings, builds TXD descriptors and kicks the HW by updating
+ * TXD WPTR.
+ *
+ * When a packet is sent, The HW write a TXF descriptor and the SW
+ * frees the original skb. To prevent TXD fifo overflow without
+ * reading HW registers every time, the SW deploys "tx level"
+ * technique. Upon startup, the tx level is initialized to TXD fifo
+ * length. For every sent packet, the SW gets its TXD descriptor size
+ * (from a pre-calculated array) and subtracts it from tx level. The
+ * size is also stored in txdb. When a TXF ack arrives, the SW fetched
+ * the size of the original TXD descriptor from the txdb and adds it
+ * to the tx level. When the Tx level drops below some predefined
+ * threshold, the driver stops the TX queue. When the TX level rises
+ * above that level, the tx queue is enabled again.
+ *
+ * This technique avoids excessive reading of RPTR and WPTR registers.
+ * As our benchmarks shows, it adds 1.5 Gbit/sec to NIC's throughput.
+ */
+static void tn40_do_tx_db_ptr_next(struct tn40_txdb *db,
+ struct tn40_tx_map **pptr)
+{
+ ++*pptr;
+ if (unlikely(*pptr == db->end))
+ *pptr = db->start;
+}
+
+static void tn40_tx_db_inc_rptr(struct tn40_txdb *db)
+{
+ tn40_do_tx_db_ptr_next(db, &db->rptr);
+}
+
+static void tn40_tx_db_inc_wptr(struct tn40_txdb *db)
+{
+ tn40_do_tx_db_ptr_next(db, &db->wptr);
+}
+
+static int tn40_tx_db_init(struct tn40_txdb *d, int sz_type)
+{
+ int memsz = TN40_FIFO_SIZE * (1 << (sz_type + 1));
+
+ d->start = vzalloc(memsz);
+ if (!d->start)
+ return -ENOMEM;
+ /* In order to differentiate between an empty db state and a full db
+ * state at least one element should always be empty in order to
+ * avoid rptr == wptr, which means that the db is empty.
+ */
+ d->size = memsz / sizeof(struct tn40_tx_map) - 1;
+ d->end = d->start + d->size + 1; /* just after last element */
+
+ /* All dbs are created empty */
+ d->rptr = d->start;
+ d->wptr = d->start;
+ return 0;
+}
+
+static void tn40_tx_db_close(struct tn40_txdb *d)
+{
+ if (d->start) {
+ vfree(d->start);
+ d->start = NULL;
+ }
+}
+
+/* Sizes of tx desc (including padding if needed) as function of the SKB's
+ * frag number
+ * 7 - is number of lwords in txd with one phys buffer
+ * 3 - is number of lwords used for every additional phys buffer
+ * for (i = 0; i < TN40_MAX_PBL; i++) {
+ * lwords = 7 + (i * 3);
+ * if (lwords & 1)
+ * lwords++; pad it with 1 lword
+ * tn40_txd_sizes[i].bytes = lwords << 2;
+ * tn40_txd_sizes[i].qwords = lwords >> 1;
+ * }
+ */
+static struct {
+ u16 bytes;
+ u16 qwords; /* qword = 64 bit */
+} tn40_txd_sizes[] = {
+ {0x20, 0x04},
+ {0x28, 0x05},
+ {0x38, 0x07},
+ {0x40, 0x08},
+ {0x50, 0x0a},
+ {0x58, 0x0b},
+ {0x68, 0x0d},
+ {0x70, 0x0e},
+ {0x80, 0x10},
+ {0x88, 0x11},
+ {0x98, 0x13},
+ {0xa0, 0x14},
+ {0xb0, 0x16},
+ {0xb8, 0x17},
+ {0xc8, 0x19},
+ {0xd0, 0x1a},
+ {0xe0, 0x1c},
+ {0xe8, 0x1d},
+ {0xf8, 0x1f},
+};
+
+static void tn40_pbl_set(struct tn40_pbl *pbl, dma_addr_t dma, int len)
+{
+ pbl->len = cpu_to_le32(len);
+ pbl->pa_lo = cpu_to_le32(lower_32_bits(dma));
+ pbl->pa_hi = cpu_to_le32(upper_32_bits(dma));
+}
+
+static void tn40_txdb_set(struct tn40_txdb *db, dma_addr_t dma, int len)
+{
+ db->wptr->len = len;
+ db->wptr->addr.dma = dma;
+}
+
+struct tn40_mapping_info {
+ dma_addr_t dma;
+ size_t size;
+};
+
+/**
+ * tn40_tx_map_skb - create and store DMA mappings for skb's data blocks
+ * @priv: NIC private structure
+ * @skb: socket buffer to map
+ * @txdd: pointer to tx descriptor to be updated
+ * @pkt_len: pointer to unsigned long value
+ *
+ * This function creates DMA mappings for skb's data blocks and writes them to
+ * PBL of a new tx descriptor. It also stores them in the tx db, so they could
+ * be unmapped after the data has been sent. It is the responsibility of the
+ * caller to make sure that there is enough space in the txdb. The last
+ * element holds a pointer to skb itself and is marked with a zero length.
+ *
+ * Return: 0 on success and negative value on error.
+ */
+static int tn40_tx_map_skb(struct tn40_priv *priv, struct sk_buff *skb,
+ struct tn40_txd_desc *txdd, unsigned int *pkt_len)
+{
+ struct tn40_mapping_info info[TN40_MAX_PBL];
+ int nr_frags = skb_shinfo(skb)->nr_frags;
+ struct tn40_pbl *pbl = &txdd->pbl[0];
+ struct tn40_txdb *db = &priv->txdb;
+ unsigned int size;
+ int i, len, ret;
+ dma_addr_t dma;
+
+ netdev_dbg(priv->ndev, "TX skb %p skbLen %d dataLen %d frags %d\n", skb,
+ skb->len, skb->data_len, nr_frags);
+ if (nr_frags > TN40_MAX_PBL - 1) {
+ ret = skb_linearize(skb);
+ if (ret)
+ return ret;
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ }
+ /* initial skb */
+ len = skb->len - skb->data_len;
+ dma = dma_map_single(&priv->pdev->dev, skb->data, len,
+ DMA_TO_DEVICE);
+ ret = dma_mapping_error(&priv->pdev->dev, dma);
+ if (ret)
+ return ret;
+
+ tn40_txdb_set(db, dma, len);
+ tn40_pbl_set(pbl++, db->wptr->addr.dma, db->wptr->len);
+ *pkt_len = db->wptr->len;
+
+ for (i = 0; i < nr_frags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ size = skb_frag_size(frag);
+ dma = skb_frag_dma_map(&priv->pdev->dev, frag, 0,
+ size, DMA_TO_DEVICE);
+
+ ret = dma_mapping_error(&priv->pdev->dev, dma);
+ if (ret)
+ goto mapping_error;
+ info[i].dma = dma;
+ info[i].size = size;
+ }
+
+ for (i = 0; i < nr_frags; i++) {
+ tn40_tx_db_inc_wptr(db);
+ tn40_txdb_set(db, info[i].dma, info[i].size);
+ tn40_pbl_set(pbl++, db->wptr->addr.dma, db->wptr->len);
+ *pkt_len += db->wptr->len;
+ }
+
+ /* SHORT_PKT_FIX */
+ if (skb->len < TN40_SHORT_PACKET_SIZE)
+ ++nr_frags;
+
+ /* Add skb clean up info. */
+ tn40_tx_db_inc_wptr(db);
+ db->wptr->len = -tn40_txd_sizes[nr_frags].bytes;
+ db->wptr->addr.skb = skb;
+ tn40_tx_db_inc_wptr(db);
+
+ return 0;
+ mapping_error:
+ dma_unmap_page(&priv->pdev->dev, db->wptr->addr.dma, db->wptr->len,
+ DMA_TO_DEVICE);
+ for (; i > 0; i--)
+ dma_unmap_page(&priv->pdev->dev, info[i - 1].dma,
+ info[i - 1].size, DMA_TO_DEVICE);
+ return -ENOMEM;
+}
+
+static int tn40_create_tx_ring(struct tn40_priv *priv)
+{
+ int ret;
+
+ ret = tn40_fifo_alloc(priv, &priv->txd_fifo0.m, priv->txd_size,
+ TN40_REG_TXD_CFG0_0, TN40_REG_TXD_CFG1_0,
+ TN40_REG_TXD_RPTR_0, TN40_REG_TXD_WPTR_0);
+ if (ret)
+ return ret;
+
+ ret = tn40_fifo_alloc(priv, &priv->txf_fifo0.m, priv->txf_size,
+ TN40_REG_TXF_CFG0_0, TN40_REG_TXF_CFG1_0,
+ TN40_REG_TXF_RPTR_0, TN40_REG_TXF_WPTR_0);
+ if (ret)
+ goto err_free_txd;
+
+ /* The TX db has to keep mappings for all packets sent (on
+ * TxD) and not yet reclaimed (on TxF).
+ */
+ ret = tn40_tx_db_init(&priv->txdb, max(priv->txd_size, priv->txf_size));
+ if (ret)
+ goto err_free_txf;
+
+ /* SHORT_PKT_FIX */
+ priv->b0_len = 64;
+ priv->b0_va = dma_alloc_coherent(&priv->pdev->dev, priv->b0_len,
+ &priv->b0_dma, GFP_KERNEL);
+ if (!priv->b0_va)
+ goto err_free_db;
+
+ priv->tx_level = TN40_MAX_TX_LEVEL;
+ priv->tx_update_mark = priv->tx_level - 1024;
+ return 0;
+err_free_db:
+ tn40_tx_db_close(&priv->txdb);
+err_free_txf:
+ tn40_fifo_free(priv, &priv->txf_fifo0.m);
+err_free_txd:
+ tn40_fifo_free(priv, &priv->txd_fifo0.m);
+ return -ENOMEM;
+}
+
+/**
+ * tn40_tx_space - Calculate the available space in the TX fifo.
+ * @priv: NIC private structure
+ *
+ * Return: available space in TX fifo in bytes
+ */
+static int tn40_tx_space(struct tn40_priv *priv)
+{
+ struct tn40_txd_fifo *f = &priv->txd_fifo0;
+ int fsize;
+
+ f->m.rptr = tn40_read_reg(priv, f->m.reg_rptr) & TN40_TXF_WPTR_WR_PTR;
+ fsize = f->m.rptr - f->m.wptr;
+ if (fsize <= 0)
+ fsize = f->m.memsz + fsize;
+ return fsize;
+}
+
+#define TN40_TXD_FULL_CHECKSUM 7
+
+static netdev_tx_t tn40_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct tn40_priv *priv = netdev_priv(ndev);
+ struct tn40_txd_fifo *f = &priv->txd_fifo0;
+ int txd_checksum = TN40_TXD_FULL_CHECKSUM;
+ struct tn40_txd_desc *txdd;
+ int nr_frags, len, err;
+ unsigned int pkt_len;
+ int txd_vlan_id = 0;
+ int txd_lgsnd = 0;
+ int txd_vtag = 0;
+ int txd_mss = 0;
+
+ /* Build tx descriptor */
+ txdd = (struct tn40_txd_desc *)(f->m.va + f->m.wptr);
+ err = tn40_tx_map_skb(priv, skb, txdd, &pkt_len);
+ if (err) {
+ u64_stats_update_begin(&priv->syncp);
+ priv->stats.tx_dropped++;
+ u64_stats_update_end(&priv->syncp);
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL))
+ txd_checksum = 0;
+
+ if (skb_shinfo(skb)->gso_size) {
+ txd_mss = skb_shinfo(skb)->gso_size;
+ txd_lgsnd = 1;
+ netdev_dbg(priv->ndev, "skb %p pkt len %d gso size = %d\n", skb,
+ pkt_len, txd_mss);
+ }
+ if (skb_vlan_tag_present(skb)) {
+ /* Don't cut VLAN ID to 12 bits */
+ txd_vlan_id = skb_vlan_tag_get(skb);
+ txd_vtag = 1;
+ }
+ txdd->va_hi = 0;
+ txdd->va_lo = 0;
+ txdd->length = cpu_to_le16(pkt_len);
+ txdd->mss = cpu_to_le16(txd_mss);
+ txdd->txd_val1 =
+ cpu_to_le32(TN40_TXD_W1_VAL
+ (tn40_txd_sizes[nr_frags].qwords, txd_checksum,
+ txd_vtag, txd_lgsnd, txd_vlan_id));
+ netdev_dbg(priv->ndev, "=== w1 qwords[%d] %d =====\n", nr_frags,
+ tn40_txd_sizes[nr_frags].qwords);
+ netdev_dbg(priv->ndev, "=== TxD desc =====================\n");
+ netdev_dbg(priv->ndev, "=== w1: 0x%x ================\n",
+ txdd->txd_val1);
+ netdev_dbg(priv->ndev, "=== w2: mss 0x%x len 0x%x\n", txdd->mss,
+ txdd->length);
+ /* SHORT_PKT_FIX */
+ if (pkt_len < TN40_SHORT_PACKET_SIZE) {
+ struct tn40_pbl *pbl = &txdd->pbl[++nr_frags];
+
+ txdd->length = cpu_to_le16(TN40_SHORT_PACKET_SIZE);
+ txdd->txd_val1 =
+ cpu_to_le32(TN40_TXD_W1_VAL
+ (tn40_txd_sizes[nr_frags].qwords,
+ txd_checksum, txd_vtag, txd_lgsnd,
+ txd_vlan_id));
+ pbl->len = cpu_to_le32(TN40_SHORT_PACKET_SIZE - pkt_len);
+ pbl->pa_lo = cpu_to_le32(lower_32_bits(priv->b0_dma));
+ pbl->pa_hi = cpu_to_le32(upper_32_bits(priv->b0_dma));
+ netdev_dbg(priv->ndev, "=== SHORT_PKT_FIX ==============\n");
+ netdev_dbg(priv->ndev, "=== nr_frags : %d ==============\n",
+ nr_frags);
+ }
+
+ /* Increment TXD write pointer. In case of fifo wrapping copy
+ * reminder of the descriptor to the beginning.
+ */
+ f->m.wptr += tn40_txd_sizes[nr_frags].bytes;
+ len = f->m.wptr - f->m.memsz;
+ if (unlikely(len >= 0)) {
+ f->m.wptr = len;
+ if (len > 0)
+ memcpy(f->m.va, f->m.va + f->m.memsz, len);
+ }
+ /* Force memory writes to complete before letting the HW know
+ * there are new descriptors to fetch.
+ */
+ wmb();
+
+ priv->tx_level -= tn40_txd_sizes[nr_frags].bytes;
+ if (priv->tx_level > priv->tx_update_mark) {
+ tn40_write_reg(priv, f->m.reg_wptr,
+ f->m.wptr & TN40_TXF_WPTR_WR_PTR);
+ } else {
+ if (priv->tx_noupd++ > TN40_NO_UPD_PACKETS) {
+ priv->tx_noupd = 0;
+ tn40_write_reg(priv, f->m.reg_wptr,
+ f->m.wptr & TN40_TXF_WPTR_WR_PTR);
+ }
+ }
+
+ u64_stats_update_begin(&priv->syncp);
+ priv->stats.tx_packets++;
+ priv->stats.tx_bytes += pkt_len;
+ u64_stats_update_end(&priv->syncp);
+ if (priv->tx_level < TN40_MIN_TX_LEVEL) {
+ netdev_dbg(priv->ndev, "TX Q STOP level %d\n", priv->tx_level);
+ netif_stop_queue(ndev);
+ }
+
+ return NETDEV_TX_OK;
+}
+
+static void tn40_tx_cleanup(struct tn40_priv *priv)
+{
+ struct tn40_txf_fifo *f = &priv->txf_fifo0;
+ struct tn40_txdb *db = &priv->txdb;
+ int tx_level = 0;
+
+ f->m.wptr = tn40_read_reg(priv, f->m.reg_wptr) & TN40_TXF_WPTR_MASK;
+
+ netif_tx_lock(priv->ndev);
+ while (f->m.wptr != f->m.rptr) {
+ f->m.rptr += TN40_TXF_DESC_SZ;
+ f->m.rptr &= f->m.size_mask;
+ /* Unmap all fragments */
+ /* First has to come tx_maps containing DMA */
+ do {
+ dma_addr_t addr = db->rptr->addr.dma;
+ size_t size = db->rptr->len;
+
+ netif_tx_unlock(priv->ndev);
+ dma_unmap_page(&priv->pdev->dev, addr,
+ size, DMA_TO_DEVICE);
+ netif_tx_lock(priv->ndev);
+ tn40_tx_db_inc_rptr(db);
+ } while (db->rptr->len > 0);
+ tx_level -= db->rptr->len; /* '-' Because the len is negative */
+
+ /* Now should come skb pointer - free it */
+ dev_kfree_skb_any(db->rptr->addr.skb);
+ netdev_dbg(priv->ndev, "dev_kfree_skb_any %p %d\n",
+ db->rptr->addr.skb, -db->rptr->len);
+ tn40_tx_db_inc_rptr(db);
+ }
+
+ /* Let the HW know which TXF descriptors were cleaned */
+ tn40_write_reg(priv, f->m.reg_rptr, f->m.rptr & TN40_TXF_WPTR_WR_PTR);
+
+ /* We reclaimed resources, so in case the Q is stopped by xmit
+ * callback, we resume the transmission and use tx_lock to
+ * synchronize with xmit.
+ */
+ priv->tx_level += tx_level;
+ if (priv->tx_noupd) {
+ priv->tx_noupd = 0;
+ tn40_write_reg(priv, priv->txd_fifo0.m.reg_wptr,
+ priv->txd_fifo0.m.wptr & TN40_TXF_WPTR_WR_PTR);
+ }
+ if (unlikely(netif_queue_stopped(priv->ndev) &&
+ netif_carrier_ok(priv->ndev) &&
+ (priv->tx_level >= TN40_MAX_TX_LEVEL / 2))) {
+ netdev_dbg(priv->ndev, "TX Q WAKE level %d\n", priv->tx_level);
+ netif_wake_queue(priv->ndev);
+ }
+ netif_tx_unlock(priv->ndev);
+}
+
+static void tn40_tx_free_skbs(struct tn40_priv *priv)
+{
+ struct tn40_txdb *db = &priv->txdb;
+
+ while (db->rptr != db->wptr) {
+ if (likely(db->rptr->len))
+ dma_unmap_page(&priv->pdev->dev, db->rptr->addr.dma,
+ db->rptr->len, DMA_TO_DEVICE);
+ else
+ dev_kfree_skb(db->rptr->addr.skb);
+ tn40_tx_db_inc_rptr(db);
+ }
+}
+
+static void tn40_destroy_tx_ring(struct tn40_priv *priv)
+{
+ tn40_tx_free_skbs(priv);
+ tn40_fifo_free(priv, &priv->txd_fifo0.m);
+ tn40_fifo_free(priv, &priv->txf_fifo0.m);
+ tn40_tx_db_close(&priv->txdb);
+ /* SHORT_PKT_FIX */
+ if (priv->b0_len) {
+ dma_free_coherent(&priv->pdev->dev, priv->b0_len, priv->b0_va,
+ priv->b0_dma);
+ priv->b0_len = 0;
+ }
+}
+
+/**
+ * tn40_tx_push_desc - Push a descriptor to TxD fifo.
+ *
+ * @priv: NIC private structure
+ * @data: desc's data
+ * @size: desc's size
+ *
+ * This function pushes desc to TxD fifo and overlaps it if needed.
+ *
+ * This function does not check for available space, nor does it check
+ * that the data size is smaller than the fifo size. Checking for
+ * space is the responsibility of the caller.
+ */
+static void tn40_tx_push_desc(struct tn40_priv *priv, void *data, int size)
+{
+ struct tn40_txd_fifo *f = &priv->txd_fifo0;
+ int i = f->m.memsz - f->m.wptr;
+
+ if (size == 0)
+ return;
+
+ if (i > size) {
+ memcpy(f->m.va + f->m.wptr, data, size);
+ f->m.wptr += size;
+ } else {
+ memcpy(f->m.va + f->m.wptr, data, i);
+ f->m.wptr = size - i;
+ memcpy(f->m.va, data + i, f->m.wptr);
+ }
+ tn40_write_reg(priv, f->m.reg_wptr, f->m.wptr & TN40_TXF_WPTR_WR_PTR);
+}
+
+/**
+ * tn40_tx_push_desc_safe - push descriptor to TxD fifo in a safe way.
+ *
+ * @priv: NIC private structure
+ * @data: descriptor data
+ * @size: descriptor size
+ *
+ * This function does check for available space and, if necessary,
+ * waits for the NIC to read existing data before writing new data.
+ */
+static void tn40_tx_push_desc_safe(struct tn40_priv *priv, void *data, int size)
+{
+ int timer = 0;
+
+ while (size > 0) {
+ /* We subtract 8 because when the fifo is full rptr ==
+ * wptr, which also means that fifo is empty, we can
+ * understand the difference, but could the HW do the
+ * same ???
+ */
+ int avail = tn40_tx_space(priv) - 8;
+
+ if (avail <= 0) {
+ if (timer++ > 300) /* Prevent endless loop */
+ break;
+ /* Give the HW a chance to clean the fifo */
+ usleep_range(50, 60);
+ continue;
+ }
+ avail = min(avail, size);
+ netdev_dbg(priv->ndev,
+ "about to push %d bytes starting %p size %d\n",
+ avail, data, size);
+ tn40_tx_push_desc(priv, data, avail);
+ size -= avail;
+ data += avail;
+ }
+}
+
+int tn40_set_link_speed(struct tn40_priv *priv, u32 speed)
+{
+ u32 val;
+ int i;
+
+ netdev_dbg(priv->ndev, "speed %d\n", speed);
+ switch (speed) {
+ case SPEED_10000:
+ case SPEED_5000:
+ case SPEED_2500:
+ netdev_dbg(priv->ndev, "link_speed %d\n", speed);
+
+ tn40_write_reg(priv, 0x1010, 0x217); /*ETHSD.REFCLK_CONF */
+ tn40_write_reg(priv, 0x104c, 0x4c); /*ETHSD.L0_RX_PCNT */
+ tn40_write_reg(priv, 0x1050, 0x4c); /*ETHSD.L1_RX_PCNT */
+ tn40_write_reg(priv, 0x1054, 0x4c); /*ETHSD.L2_RX_PCNT */
+ tn40_write_reg(priv, 0x1058, 0x4c); /*ETHSD.L3_RX_PCNT */
+ tn40_write_reg(priv, 0x102c, 0x434); /*ETHSD.L0_TX_PCNT */
+ tn40_write_reg(priv, 0x1030, 0x434); /*ETHSD.L1_TX_PCNT */
+ tn40_write_reg(priv, 0x1034, 0x434); /*ETHSD.L2_TX_PCNT */
+ tn40_write_reg(priv, 0x1038, 0x434); /*ETHSD.L3_TX_PCNT */
+ tn40_write_reg(priv, 0x6300, 0x0400); /*MAC.PCS_CTRL */
+
+ tn40_write_reg(priv, 0x1018, 0x00); /*Mike2 */
+ udelay(5);
+ tn40_write_reg(priv, 0x1018, 0x04); /*Mike2 */
+ udelay(5);
+ tn40_write_reg(priv, 0x1018, 0x06); /*Mike2 */
+ udelay(5);
+ /*MikeFix1 */
+ /*L0: 0x103c , L1: 0x1040 , L2: 0x1044 , L3: 0x1048 =0x81644 */
+ tn40_write_reg(priv, 0x103c, 0x81644); /*ETHSD.L0_TX_DCNT */
+ tn40_write_reg(priv, 0x1040, 0x81644); /*ETHSD.L1_TX_DCNT */
+ tn40_write_reg(priv, 0x1044, 0x81644); /*ETHSD.L2_TX_DCNT */
+ tn40_write_reg(priv, 0x1048, 0x81644); /*ETHSD.L3_TX_DCNT */
+ tn40_write_reg(priv, 0x1014, 0x043); /*ETHSD.INIT_STAT */
+ for (i = 1000; i; i--) {
+ usleep_range(50, 60);
+ /*ETHSD.INIT_STAT */
+ val = tn40_read_reg(priv, 0x1014);
+ if (val & (1 << 9)) {
+ /*ETHSD.INIT_STAT */
+ tn40_write_reg(priv, 0x1014, 0x3);
+ /*ETHSD.INIT_STAT */
+ val = tn40_read_reg(priv, 0x1014);
+
+ break;
+ }
+ }
+ if (!i)
+ netdev_err(priv->ndev, "MAC init timeout!\n");
+
+ tn40_write_reg(priv, 0x6350, 0x0); /*MAC.PCS_IF_MODE */
+ tn40_write_reg(priv, TN40_REG_CTRLST, 0xC13); /*0x93//0x13 */
+ tn40_write_reg(priv, 0x111c, 0x7ff); /*MAC.MAC_RST_CNT */
+ usleep_range(2000, 2100);
+
+ tn40_write_reg(priv, 0x111c, 0x0); /*MAC.MAC_RST_CNT */
+ break;
+
+ case SPEED_1000:
+ case SPEED_100:
+ tn40_write_reg(priv, 0x1010, 0x613); /*ETHSD.REFCLK_CONF */
+ tn40_write_reg(priv, 0x104c, 0x4d); /*ETHSD.L0_RX_PCNT */
+ tn40_write_reg(priv, 0x1050, 0x0); /*ETHSD.L1_RX_PCNT */
+ tn40_write_reg(priv, 0x1054, 0x0); /*ETHSD.L2_RX_PCNT */
+ tn40_write_reg(priv, 0x1058, 0x0); /*ETHSD.L3_RX_PCNT */
+ tn40_write_reg(priv, 0x102c, 0x35); /*ETHSD.L0_TX_PCNT */
+ tn40_write_reg(priv, 0x1030, 0x0); /*ETHSD.L1_TX_PCNT */
+ tn40_write_reg(priv, 0x1034, 0x0); /*ETHSD.L2_TX_PCNT */
+ tn40_write_reg(priv, 0x1038, 0x0); /*ETHSD.L3_TX_PCNT */
+ tn40_write_reg(priv, 0x6300, 0x01140); /*MAC.PCS_CTRL */
+
+ tn40_write_reg(priv, 0x1014, 0x043); /*ETHSD.INIT_STAT */
+ for (i = 1000; i; i--) {
+ usleep_range(50, 60);
+ val = tn40_read_reg(priv, 0x1014); /*ETHSD.INIT_STAT */
+ if (val & (1 << 9)) {
+ /*ETHSD.INIT_STAT */
+ tn40_write_reg(priv, 0x1014, 0x3);
+ /*ETHSD.INIT_STAT */
+ val = tn40_read_reg(priv, 0x1014);
+
+ break;
+ }
+ }
+ if (!i)
+ netdev_err(priv->ndev, "MAC init timeout!\n");
+
+ tn40_write_reg(priv, 0x6350, 0x2b); /*MAC.PCS_IF_MODE 1g */
+ tn40_write_reg(priv, 0x6310, 0x9801); /*MAC.PCS_DEV_AB */
+
+ tn40_write_reg(priv, 0x6314, 0x1); /*MAC.PCS_PART_AB */
+ tn40_write_reg(priv, 0x6348, 0xc8); /*MAC.PCS_LINK_LO */
+ tn40_write_reg(priv, 0x634c, 0xc8); /*MAC.PCS_LINK_HI */
+ usleep_range(50, 60);
+ tn40_write_reg(priv, TN40_REG_CTRLST, 0xC13); /*0x93//0x13 */
+ tn40_write_reg(priv, 0x111c, 0x7ff); /*MAC.MAC_RST_CNT */
+ usleep_range(2000, 2100);
+
+ tn40_write_reg(priv, 0x111c, 0x0); /*MAC.MAC_RST_CNT */
+ tn40_write_reg(priv, 0x6300, 0x1140); /*MAC.PCS_CTRL */
+ break;
+
+ case 0: /* Link down */
+ tn40_write_reg(priv, 0x104c, 0x0); /*ETHSD.L0_RX_PCNT */
+ tn40_write_reg(priv, 0x1050, 0x0); /*ETHSD.L1_RX_PCNT */
+ tn40_write_reg(priv, 0x1054, 0x0); /*ETHSD.L2_RX_PCNT */
+ tn40_write_reg(priv, 0x1058, 0x0); /*ETHSD.L3_RX_PCNT */
+ tn40_write_reg(priv, 0x102c, 0x0); /*ETHSD.L0_TX_PCNT */
+ tn40_write_reg(priv, 0x1030, 0x0); /*ETHSD.L1_TX_PCNT */
+ tn40_write_reg(priv, 0x1034, 0x0); /*ETHSD.L2_TX_PCNT */
+ tn40_write_reg(priv, 0x1038, 0x0); /*ETHSD.L3_TX_PCNT */
+
+ tn40_write_reg(priv, TN40_REG_CTRLST, 0x800);
+ tn40_write_reg(priv, 0x111c, 0x7ff); /*MAC.MAC_RST_CNT */
+ usleep_range(2000, 2100);
+
+ tn40_write_reg(priv, 0x111c, 0x0); /*MAC.MAC_RST_CNT */
+ break;
+
+ default:
+ netdev_err(priv->ndev,
+ "Link speed was not identified yet (%d)\n", speed);
+ speed = 0;
+ break;
+ }
+ return speed;
+}
+
+static void tn40_link_changed(struct tn40_priv *priv)
+{
+ u32 link = tn40_read_reg(priv,
+ TN40_REG_MAC_LNK_STAT) & TN40_MAC_LINK_STAT;
+
+ netdev_dbg(priv->ndev, "link changed %u\n", link);
+}
+
+static void tn40_isr_extra(struct tn40_priv *priv, u32 isr)
+{
+ if (isr & (TN40_IR_LNKCHG0 | TN40_IR_LNKCHG1 | TN40_IR_TMR0)) {
+ netdev_dbg(priv->ndev, "isr = 0x%x\n", isr);
+ tn40_link_changed(priv);
+ }
+}
+
+static irqreturn_t tn40_isr_napi(int irq, void *dev)
+{
+ struct tn40_priv *priv = netdev_priv((struct net_device *)dev);
+ u32 isr;
+
+ isr = tn40_read_reg(priv, TN40_REG_ISR_MSK0);
+
+ if (unlikely(!isr)) {
+ tn40_enable_interrupts(priv);
+ return IRQ_NONE; /* Not our interrupt */
+ }
+
+ if (isr & TN40_IR_EXTRA)
+ tn40_isr_extra(priv, isr);
+
+ if (isr & (TN40_IR_RX_DESC_0 | TN40_IR_TX_FREE_0 | TN40_IR_TMR1)) {
+ if (likely(napi_schedule_prep(&priv->napi))) {
+ __napi_schedule(&priv->napi);
+ return IRQ_HANDLED;
+ }
+ /* We get here if an interrupt has slept into the
+ * small time window between these lines in
+ * tn40_poll: tn40_enable_interrupts(priv); return 0;
+ *
+ * Currently interrupts are disabled (since we read
+ * the ISR register) and we have failed to register
+ * the next poll. So we read the regs to trigger the
+ * chip and allow further interrupts.
+ */
+ tn40_read_reg(priv, TN40_REG_TXF_WPTR_0);
+ tn40_read_reg(priv, TN40_REG_RXD_WPTR_0);
+ }
+
+ tn40_enable_interrupts(priv);
+ return IRQ_HANDLED;
+}
+
+static int tn40_poll(struct napi_struct *napi, int budget)
+{
+ struct tn40_priv *priv = container_of(napi, struct tn40_priv, napi);
+ int work_done;
+
+ tn40_tx_cleanup(priv);
+
+ if (!budget)
+ return 0;
+
+ work_done = tn40_rx_receive(priv, budget);
+ if (work_done == budget)
+ return budget;
+
+ if (napi_complete_done(napi, work_done))
+ tn40_enable_interrupts(priv);
+ return work_done;
+}
+
+static int tn40_fw_load(struct tn40_priv *priv)
+{
+ const struct firmware *fw = NULL;
+ int master, ret;
+ u32 val;
+
+ ret = request_firmware(&fw, TN40_FIRMWARE_NAME, &priv->pdev->dev);
+ if (ret)
+ return ret;
+
+ master = tn40_read_reg(priv, TN40_REG_INIT_SEMAPHORE);
+ if (!tn40_read_reg(priv, TN40_REG_INIT_STATUS) && master) {
+ netdev_dbg(priv->ndev, "Loading FW...\n");
+ tn40_tx_push_desc_safe(priv, (void *)fw->data, fw->size);
+ msleep(100);
+ }
+ ret = read_poll_timeout(tn40_read_reg, val, val, 2000, 400000, false,
+ priv, TN40_REG_INIT_STATUS);
+ if (master)
+ tn40_write_reg(priv, TN40_REG_INIT_SEMAPHORE, 1);
+
+ if (ret) {
+ netdev_err(priv->ndev, "firmware loading failed\n");
+ netdev_dbg(priv->ndev, "VPC: 0x%x VIC: 0x%x STATUS: 0x%xd\n",
+ tn40_read_reg(priv, TN40_REG_VPC),
+ tn40_read_reg(priv, TN40_REG_VIC),
+ tn40_read_reg(priv, TN40_REG_INIT_STATUS));
+ ret = -EIO;
+ } else {
+ netdev_dbg(priv->ndev, "firmware loading success\n");
+ }
+ release_firmware(fw);
+ return ret;
+}
+
+static void tn40_restore_mac(struct net_device *ndev, struct tn40_priv *priv)
+{
+ u32 val;
+
+ netdev_dbg(priv->ndev, "mac0 =%x mac1 =%x mac2 =%x\n",
+ tn40_read_reg(priv, TN40_REG_UNC_MAC0_A),
+ tn40_read_reg(priv, TN40_REG_UNC_MAC1_A),
+ tn40_read_reg(priv, TN40_REG_UNC_MAC2_A));
+
+ val = (ndev->dev_addr[0] << 8) | (ndev->dev_addr[1]);
+ tn40_write_reg(priv, TN40_REG_UNC_MAC2_A, val);
+ val = (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]);
+ tn40_write_reg(priv, TN40_REG_UNC_MAC1_A, val);
+ val = (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]);
+ tn40_write_reg(priv, TN40_REG_UNC_MAC0_A, val);
+
+ /* More then IP MAC address */
+ tn40_write_reg(priv, TN40_REG_MAC_ADDR_0,
+ (ndev->dev_addr[3] << 24) | (ndev->dev_addr[2] << 16) |
+ (ndev->dev_addr[1] << 8) | (ndev->dev_addr[0]));
+ tn40_write_reg(priv, TN40_REG_MAC_ADDR_1,
+ (ndev->dev_addr[5] << 8) | (ndev->dev_addr[4]));
+
+ netdev_dbg(priv->ndev, "mac0 =%x mac1 =%x mac2 =%x\n",
+ tn40_read_reg(priv, TN40_REG_UNC_MAC0_A),
+ tn40_read_reg(priv, TN40_REG_UNC_MAC1_A),
+ tn40_read_reg(priv, TN40_REG_UNC_MAC2_A));
+}
+
+static void tn40_hw_start(struct tn40_priv *priv)
+{
+ tn40_write_reg(priv, TN40_REG_FRM_LENGTH, 0X3FE0);
+ tn40_write_reg(priv, TN40_REG_GMAC_RXF_A, 0X10fd);
+ /*MikeFix1 */
+ /*L0: 0x103c , L1: 0x1040 , L2: 0x1044 , L3: 0x1048 =0x81644 */
+ tn40_write_reg(priv, 0x103c, 0x81644); /*ETHSD.L0_TX_DCNT */
+ tn40_write_reg(priv, 0x1040, 0x81644); /*ETHSD.L1_TX_DCNT */
+ tn40_write_reg(priv, 0x1044, 0x81644); /*ETHSD.L2_TX_DCNT */
+ tn40_write_reg(priv, 0x1048, 0x81644); /*ETHSD.L3_TX_DCNT */
+ tn40_write_reg(priv, TN40_REG_RX_FIFO_SECTION, 0x10);
+ tn40_write_reg(priv, TN40_REG_TX_FIFO_SECTION, 0xE00010);
+ tn40_write_reg(priv, TN40_REG_RX_FULLNESS, 0);
+ tn40_write_reg(priv, TN40_REG_TX_FULLNESS, 0);
+
+ tn40_write_reg(priv, TN40_REG_VGLB, 0);
+ tn40_write_reg(priv, TN40_REG_MAX_FRAME_A,
+ priv->rxf_fifo0.m.pktsz & TN40_MAX_FRAME_AB_VAL);
+ tn40_write_reg(priv, TN40_REG_RDINTCM0, priv->rdintcm);
+ tn40_write_reg(priv, TN40_REG_RDINTCM2, 0);
+
+ /* old val = 0x300064 */
+ tn40_write_reg(priv, TN40_REG_TDINTCM0, priv->tdintcm);
+
+ /* Enable timer interrupt once in 2 secs. */
+ tn40_restore_mac(priv->ndev, priv);
+
+ /* Pause frame */
+ tn40_write_reg(priv, 0x12E0, 0x28);
+ tn40_write_reg(priv, TN40_REG_PAUSE_QUANT, 0xFFFF);
+ tn40_write_reg(priv, 0x6064, 0xF);
+
+ tn40_write_reg(priv, TN40_REG_GMAC_RXF_A,
+ TN40_GMAC_RX_FILTER_OSEN | TN40_GMAC_RX_FILTER_TXFC |
+ TN40_GMAC_RX_FILTER_AM | TN40_GMAC_RX_FILTER_AB);
+
+ tn40_enable_interrupts(priv);
+}
+
+static int tn40_hw_reset(struct tn40_priv *priv)
+{
+ u32 val;
+
+ /* Reset sequences: read, write 1, read, write 0 */
+ val = tn40_read_reg(priv, TN40_REG_CLKPLL);
+ tn40_write_reg(priv, TN40_REG_CLKPLL, (val | TN40_CLKPLL_SFTRST) + 0x8);
+ usleep_range(50, 60);
+ val = tn40_read_reg(priv, TN40_REG_CLKPLL);
+ tn40_write_reg(priv, TN40_REG_CLKPLL, val & ~TN40_CLKPLL_SFTRST);
+
+ /* Check that the PLLs are locked and reset ended */
+ val = read_poll_timeout(tn40_read_reg, val,
+ (val & TN40_CLKPLL_LKD) == TN40_CLKPLL_LKD,
+ 10000, 700000, false, priv, TN40_REG_CLKPLL);
+ if (val)
+ return -EIO;
+
+ usleep_range(50, 60);
+ /* Do any PCI-E read transaction */
+ tn40_read_reg(priv, TN40_REG_RXD_CFG0_0);
+ return 0;
+}
+
+static void tn40_sw_reset(struct tn40_priv *priv)
+{
+ int i, ret;
+ u32 val;
+
+ /* 1. load MAC (obsolete) */
+ /* 2. disable Rx (and Tx) */
+ tn40_write_reg(priv, TN40_REG_GMAC_RXF_A, 0);
+ msleep(100);
+ /* 3. Disable port */
+ tn40_write_reg(priv, TN40_REG_DIS_PORT, 1);
+ /* 4. Disable queue */
+ tn40_write_reg(priv, TN40_REG_DIS_QU, 1);
+ /* 5. Wait until hw is disabled */
+ ret = read_poll_timeout(tn40_read_reg, val, val & 1, 10000, 500000,
+ false, priv, TN40_REG_RST_PORT);
+ if (ret)
+ netdev_err(priv->ndev, "SW reset timeout. continuing anyway\n");
+
+ /* 6. Disable interrupts */
+ tn40_write_reg(priv, TN40_REG_RDINTCM0, 0);
+ tn40_write_reg(priv, TN40_REG_TDINTCM0, 0);
+ tn40_write_reg(priv, TN40_REG_IMR, 0);
+ tn40_read_reg(priv, TN40_REG_ISR);
+
+ /* 7. Reset queue */
+ tn40_write_reg(priv, TN40_REG_RST_QU, 1);
+ /* 8. Reset port */
+ tn40_write_reg(priv, TN40_REG_RST_PORT, 1);
+ /* 9. Zero all read and write pointers */
+ for (i = TN40_REG_TXD_WPTR_0; i <= TN40_REG_TXF_RPTR_3; i += 0x10)
+ tn40_write_reg(priv, i, 0);
+ /* 10. Unset port disable */
+ tn40_write_reg(priv, TN40_REG_DIS_PORT, 0);
+ /* 11. Unset queue disable */
+ tn40_write_reg(priv, TN40_REG_DIS_QU, 0);
+ /* 12. Unset queue reset */
+ tn40_write_reg(priv, TN40_REG_RST_QU, 0);
+ /* 13. Unset port reset */
+ tn40_write_reg(priv, TN40_REG_RST_PORT, 0);
+ /* 14. Enable Rx */
+ /* Skipped. will be done later */
+}
+
+static int tn40_start(struct tn40_priv *priv)
+{
+ int ret;
+
+ ret = tn40_create_tx_ring(priv);
+ if (ret) {
+ netdev_err(priv->ndev, "failed to tx init %d\n", ret);
+ return ret;
+ }
+
+ ret = tn40_create_rx_ring(priv);
+ if (ret) {
+ netdev_err(priv->ndev, "failed to rx init %d\n", ret);
+ goto err_tx_ring;
+ }
+
+ tn40_rx_alloc_buffers(priv);
+ if (tn40_rxdb_available(priv->rxdb0) != 1) {
+ ret = -ENOMEM;
+ netdev_err(priv->ndev, "failed to allocate rx buffers\n");
+ goto err_rx_ring;
+ }
+
+ ret = request_irq(priv->pdev->irq, &tn40_isr_napi, IRQF_SHARED,
+ priv->ndev->name, priv->ndev);
+ if (ret) {
+ netdev_err(priv->ndev, "failed to request irq %d\n", ret);
+ goto err_rx_ring;
+ }
+
+ tn40_hw_start(priv);
+ return 0;
+err_rx_ring:
+ tn40_destroy_rx_ring(priv);
+err_tx_ring:
+ tn40_destroy_tx_ring(priv);
+ return ret;
+}
+
+static void tn40_stop(struct tn40_priv *priv)
+{
+ tn40_disable_interrupts(priv);
+ free_irq(priv->pdev->irq, priv->ndev);
+ tn40_sw_reset(priv);
+ tn40_destroy_tx_ring(priv);
+ tn40_destroy_rx_ring(priv);
+}
+
+static int tn40_close(struct net_device *ndev)
+{
+ struct tn40_priv *priv = netdev_priv(ndev);
+
+ phylink_stop(priv->phylink);
+ phylink_disconnect_phy(priv->phylink);
+
+ napi_disable(&priv->napi);
+ netif_napi_del(&priv->napi);
+ tn40_stop(priv);
+ return 0;
+}
+
+static int tn40_open(struct net_device *dev)
+{
+ struct tn40_priv *priv = netdev_priv(dev);
+ int ret;
+
+ ret = phylink_connect_phy(priv->phylink, priv->phydev);
+ if (ret) {
+ netdev_err(dev, "failed to connect to phy %d\n", ret);
+ return ret;
+ }
+ tn40_sw_reset(priv);
+ ret = tn40_start(priv);
+ if (ret) {
+ phylink_disconnect_phy(priv->phylink);
+ netdev_err(dev, "failed to start %d\n", ret);
+ return ret;
+ }
+ napi_enable(&priv->napi);
+ phylink_start(priv->phylink);
+ netif_start_queue(priv->ndev);
+ return 0;
+}
+
+static void __tn40_vlan_rx_vid(struct net_device *ndev, uint16_t vid,
+ int enable)
+{
+ struct tn40_priv *priv = netdev_priv(ndev);
+ u32 reg, bit, val;
+
+ netdev_dbg(priv->ndev, "vid =%d value =%d\n", (int)vid, enable);
+ reg = TN40_REG_VLAN_0 + (vid / 32) * 4;
+ bit = 1 << vid % 32;
+ val = tn40_read_reg(priv, reg);
+ netdev_dbg(priv->ndev, "reg =%x, val =%x, bit =%d\n", reg, val, bit);
+ if (enable)
+ val |= bit;
+ else
+ val &= ~bit;
+ netdev_dbg(priv->ndev, "new val %x\n", val);
+ tn40_write_reg(priv, reg, val);
+}
+
+static int tn40_vlan_rx_add_vid(struct net_device *ndev,
+ __always_unused __be16 proto, u16 vid)
+{
+ __tn40_vlan_rx_vid(ndev, vid, 1);
+ return 0;
+}
+
+static int tn40_vlan_rx_kill_vid(struct net_device *ndev,
+ __always_unused __be16 proto, u16 vid)
+{
+ __tn40_vlan_rx_vid(ndev, vid, 0);
+ return 0;
+}
+
+static void tn40_setmulti(struct net_device *ndev)
+{
+ u32 rxf_val = TN40_GMAC_RX_FILTER_AM | TN40_GMAC_RX_FILTER_AB |
+ TN40_GMAC_RX_FILTER_OSEN | TN40_GMAC_RX_FILTER_TXFC;
+ struct tn40_priv *priv = netdev_priv(ndev);
+ int i;
+
+ /* IMF - imperfect (hash) rx multicast filter */
+ /* PMF - perfect rx multicast filter */
+
+ /* FIXME: RXE(OFF) */
+ if (ndev->flags & IFF_PROMISC) {
+ rxf_val |= TN40_GMAC_RX_FILTER_PRM;
+ } else if (ndev->flags & IFF_ALLMULTI) {
+ /* set IMF to accept all multicast frames */
+ for (i = 0; i < TN40_MAC_MCST_HASH_NUM; i++)
+ tn40_write_reg(priv,
+ TN40_REG_RX_MCST_HASH0 + i * 4, ~0);
+ } else if (netdev_mc_count(ndev)) {
+ struct netdev_hw_addr *mclist;
+ u32 reg, val;
+ u8 hash;
+
+ /* Set IMF to deny all multicast frames */
+ for (i = 0; i < TN40_MAC_MCST_HASH_NUM; i++)
+ tn40_write_reg(priv,
+ TN40_REG_RX_MCST_HASH0 + i * 4, 0);
+
+ /* Set PMF to deny all multicast frames */
+ for (i = 0; i < TN40_MAC_MCST_NUM; i++) {
+ tn40_write_reg(priv,
+ TN40_REG_RX_MAC_MCST0 + i * 8, 0);
+ tn40_write_reg(priv,
+ TN40_REG_RX_MAC_MCST1 + i * 8, 0);
+ }
+ /* Use PMF to accept first MAC_MCST_NUM (15) addresses */
+
+ /* TBD: Sort the addresses and write them in ascending
+ * order into RX_MAC_MCST regs. we skip this phase now
+ * and accept ALL multicast frames through IMF. Accept
+ * the rest of addresses throw IMF.
+ */
+ netdev_for_each_mc_addr(mclist, ndev) {
+ hash = 0;
+ for (i = 0; i < ETH_ALEN; i++)
+ hash ^= mclist->addr[i];
+
+ reg = TN40_REG_RX_MCST_HASH0 + ((hash >> 5) << 2);
+ val = tn40_read_reg(priv, reg);
+ val |= (1 << (hash % 32));
+ tn40_write_reg(priv, reg, val);
+ }
+ } else {
+ rxf_val |= TN40_GMAC_RX_FILTER_AB;
+ }
+ tn40_write_reg(priv, TN40_REG_GMAC_RXF_A, rxf_val);
+ /* Enable RX */
+ /* FIXME: RXE(ON) */
+}
+
+static int tn40_set_mac(struct net_device *ndev, void *p)
+{
+ struct tn40_priv *priv = netdev_priv(ndev);
+ struct sockaddr *addr = p;
+
+ eth_hw_addr_set(ndev, addr->sa_data);
+ tn40_restore_mac(ndev, priv);
+ return 0;
+}
+
+static void tn40_mac_init(struct tn40_priv *priv)
+{
+ u8 addr[ETH_ALEN];
+ u64 val;
+
+ val = (u64)tn40_read_reg(priv, TN40_REG_UNC_MAC0_A);
+ val |= (u64)tn40_read_reg(priv, TN40_REG_UNC_MAC1_A) << 16;
+ val |= (u64)tn40_read_reg(priv, TN40_REG_UNC_MAC2_A) << 32;
+
+ u64_to_ether_addr(val, addr);
+ eth_hw_addr_set(priv->ndev, addr);
+}
+
+static void tn40_get_stats(struct net_device *ndev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct tn40_priv *priv = netdev_priv(ndev);
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin(&priv->syncp);
+ stats->tx_packets = priv->stats.tx_packets;
+ stats->tx_bytes = priv->stats.tx_bytes;
+ stats->tx_dropped = priv->stats.tx_dropped;
+
+ stats->rx_packets = priv->stats.rx_packets;
+ stats->rx_bytes = priv->stats.rx_bytes;
+ stats->rx_dropped = priv->stats.rx_dropped;
+ stats->rx_errors = priv->stats.rx_errors;
+ } while (u64_stats_fetch_retry(&priv->syncp, start));
+}
+
+static const struct net_device_ops tn40_netdev_ops = {
+ .ndo_open = tn40_open,
+ .ndo_stop = tn40_close,
+ .ndo_start_xmit = tn40_start_xmit,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_rx_mode = tn40_setmulti,
+ .ndo_get_stats64 = tn40_get_stats,
+ .ndo_set_mac_address = tn40_set_mac,
+ .ndo_vlan_rx_add_vid = tn40_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = tn40_vlan_rx_kill_vid,
+};
+
+static int tn40_ethtool_get_link_ksettings(struct net_device *ndev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct tn40_priv *priv = netdev_priv(ndev);
+
+ return phylink_ethtool_ksettings_get(priv->phylink, cmd);
+}
+
+static const struct ethtool_ops tn40_ethtool_ops = {
+ .get_link = ethtool_op_get_link,
+ .get_link_ksettings = tn40_ethtool_get_link_ksettings,
+};
+
+static void tn40_get_queue_stats_rx(struct net_device *ndev, int idx,
+ struct netdev_queue_stats_rx *stats)
+{
+ struct tn40_priv *priv = netdev_priv(ndev);
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin(&priv->syncp);
+
+ stats->packets = priv->stats.rx_packets;
+ stats->bytes = priv->stats.rx_bytes;
+ stats->alloc_fail = priv->alloc_fail;
+ } while (u64_stats_fetch_retry(&priv->syncp, start));
+}
+
+static void tn40_get_queue_stats_tx(struct net_device *ndev, int idx,
+ struct netdev_queue_stats_tx *stats)
+{
+ struct tn40_priv *priv = netdev_priv(ndev);
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin(&priv->syncp);
+
+ stats->packets = priv->stats.tx_packets;
+ stats->bytes = priv->stats.tx_bytes;
+ } while (u64_stats_fetch_retry(&priv->syncp, start));
+}
+
+static void tn40_get_base_stats(struct net_device *ndev,
+ struct netdev_queue_stats_rx *rx,
+ struct netdev_queue_stats_tx *tx)
+{
+ rx->packets = 0;
+ rx->bytes = 0;
+ rx->alloc_fail = 0;
+
+ tx->packets = 0;
+ tx->bytes = 0;
+}
+
+static const struct netdev_stat_ops tn40_stat_ops = {
+ .get_queue_stats_rx = tn40_get_queue_stats_rx,
+ .get_queue_stats_tx = tn40_get_queue_stats_tx,
+ .get_base_stats = tn40_get_base_stats,
+};
+
+static int tn40_priv_init(struct tn40_priv *priv)
+{
+ int ret;
+
+ tn40_set_link_speed(priv, 0);
+
+ /* Set GPIO[9:0] to output 0 */
+ tn40_write_reg(priv, 0x51E0, 0x30010006); /* GPIO_OE_ WR CMD */
+ tn40_write_reg(priv, 0x51F0, 0x0); /* GPIO_OE_ DATA */
+ tn40_write_reg(priv, TN40_REG_MDIO_CMD_STAT, 0x3ec8);
+
+ /* we use tx descriptors to load a firmware. */
+ ret = tn40_create_tx_ring(priv);
+ if (ret)
+ return ret;
+ ret = tn40_fw_load(priv);
+ tn40_destroy_tx_ring(priv);
+ return ret;
+}
+
+static struct net_device *tn40_netdev_alloc(struct pci_dev *pdev)
+{
+ struct net_device *ndev;
+
+ ndev = devm_alloc_etherdev(&pdev->dev, sizeof(struct tn40_priv));
+ if (!ndev)
+ return NULL;
+ ndev->netdev_ops = &tn40_netdev_ops;
+ ndev->ethtool_ops = &tn40_ethtool_ops;
+ ndev->stat_ops = &tn40_stat_ops;
+ ndev->tx_queue_len = TN40_NDEV_TXQ_LEN;
+ ndev->mem_start = pci_resource_start(pdev, 0);
+ ndev->mem_end = pci_resource_end(pdev, 0);
+ ndev->min_mtu = ETH_ZLEN;
+ ndev->max_mtu = TN40_MAX_MTU;
+
+ ndev->features = NETIF_F_IP_CSUM |
+ NETIF_F_SG |
+ NETIF_F_FRAGLIST |
+ NETIF_F_TSO | NETIF_F_GRO |
+ NETIF_F_RXCSUM |
+ NETIF_F_RXHASH |
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_FILTER;
+ ndev->vlan_features = NETIF_F_IP_CSUM |
+ NETIF_F_SG |
+ NETIF_F_TSO | NETIF_F_GRO | NETIF_F_RXHASH;
+
+ if (dma_get_mask(&pdev->dev) == DMA_BIT_MASK(64)) {
+ ndev->features |= NETIF_F_HIGHDMA;
+ ndev->vlan_features |= NETIF_F_HIGHDMA;
+ }
+ ndev->hw_features |= ndev->features;
+
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+ netif_stop_queue(ndev);
+ return ndev;
+}
+
+static int tn40_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct net_device *ndev;
+ struct tn40_priv *priv;
+ unsigned int nvec = 1;
+ void __iomem *regs;
+ int ret;
+
+ ret = pci_enable_device(pdev);
+ if (ret)
+ return ret;
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (ret) {
+ dev_err(&pdev->dev, "failed to set DMA mask.\n");
+ goto err_disable_device;
+ }
+
+ ret = pci_request_regions(pdev, TN40_DRV_NAME);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request PCI regions.\n");
+ goto err_disable_device;
+ }
+
+ pci_set_master(pdev);
+
+ regs = pci_iomap(pdev, 0, TN40_REGS_SIZE);
+ if (!regs) {
+ ret = -EIO;
+ dev_err(&pdev->dev, "failed to map PCI bar.\n");
+ goto err_free_regions;
+ }
+
+ ndev = tn40_netdev_alloc(pdev);
+ if (!ndev) {
+ ret = -ENOMEM;
+ dev_err(&pdev->dev, "failed to allocate netdev.\n");
+ goto err_iounmap;
+ }
+
+ priv = netdev_priv(ndev);
+ pci_set_drvdata(pdev, priv);
+ netif_napi_add(ndev, &priv->napi, tn40_poll);
+
+ priv->regs = regs;
+ priv->pdev = pdev;
+ priv->ndev = ndev;
+ /* Initialize fifo sizes. */
+ priv->txd_size = 3;
+ priv->txf_size = 3;
+ priv->rxd_size = 3;
+ priv->rxf_size = 3;
+ /* Initialize the initial coalescing registers. */
+ priv->rdintcm = TN40_INT_REG_VAL(0x20, 1, 4, 12);
+ priv->tdintcm = TN40_INT_REG_VAL(0x20, 1, 0, 12);
+
+ ret = tn40_hw_reset(priv);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to reset HW.\n");
+ goto err_unset_drvdata;
+ }
+
+ ret = pci_alloc_irq_vectors(pdev, 1, nvec, PCI_IRQ_MSI);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to allocate irq.\n");
+ goto err_unset_drvdata;
+ }
+
+ ret = tn40_mdiobus_init(priv);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to initialize mdio bus.\n");
+ goto err_free_irq;
+ }
+
+ priv->stats_flag =
+ ((tn40_read_reg(priv, TN40_FPGA_VER) & 0xFFF) != 308);
+ u64_stats_init(&priv->syncp);
+
+ priv->isr_mask = TN40_IR_RX_FREE_0 | TN40_IR_LNKCHG0 | TN40_IR_PSE |
+ TN40_IR_TMR0 | TN40_IR_RX_DESC_0 | TN40_IR_TX_FREE_0 |
+ TN40_IR_TMR1;
+
+ tn40_mac_init(priv);
+ ret = tn40_phy_register(priv);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to set up PHY.\n");
+ goto err_free_irq;
+ }
+
+ ret = tn40_priv_init(priv);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to initialize tn40_priv.\n");
+ goto err_unregister_phydev;
+ }
+
+ ret = register_netdev(ndev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register netdev.\n");
+ goto err_unregister_phydev;
+ }
+ return 0;
+err_unregister_phydev:
+ tn40_phy_unregister(priv);
+err_free_irq:
+ pci_free_irq_vectors(pdev);
+err_unset_drvdata:
+ pci_set_drvdata(pdev, NULL);
+err_iounmap:
+ iounmap(regs);
+err_free_regions:
+ pci_release_regions(pdev);
+err_disable_device:
+ pci_disable_device(pdev);
+ return ret;
+}
+
+static void tn40_remove(struct pci_dev *pdev)
+{
+ struct tn40_priv *priv = pci_get_drvdata(pdev);
+ struct net_device *ndev = priv->ndev;
+
+ unregister_netdev(ndev);
+
+ tn40_phy_unregister(priv);
+ pci_free_irq_vectors(priv->pdev);
+ pci_set_drvdata(pdev, NULL);
+ iounmap(priv->regs);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+}
+
+static const struct pci_device_id tn40_id_table[] = {
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_TEHUTI, 0x4022,
+ PCI_VENDOR_ID_TEHUTI, 0x3015) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_TEHUTI, 0x4022,
+ PCI_VENDOR_ID_DLINK, 0x4d00) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_TEHUTI, 0x4022,
+ PCI_VENDOR_ID_ASUSTEK, 0x8709) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_TEHUTI, 0x4022,
+ PCI_VENDOR_ID_EDIMAX, 0x8103) },
+ { }
+};
+
+static struct pci_driver tn40_driver = {
+ .name = TN40_DRV_NAME,
+ .id_table = tn40_id_table,
+ .probe = tn40_probe,
+ .remove = tn40_remove,
+};
+
+module_pci_driver(tn40_driver);
+
+MODULE_DEVICE_TABLE(pci, tn40_id_table);
+MODULE_LICENSE("GPL");
+MODULE_FIRMWARE(TN40_FIRMWARE_NAME);
+MODULE_DESCRIPTION("Tehuti Network TN40xx Driver");
diff --git a/drivers/net/ethernet/tehuti/tn40.h b/drivers/net/ethernet/tehuti/tn40.h
new file mode 100644
index 000000000000..490781fe5120
--- /dev/null
+++ b/drivers/net/ethernet/tehuti/tn40.h
@@ -0,0 +1,233 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) Tehuti Networks Ltd. */
+
+#ifndef _TN40_H_
+#define _TN40_H_
+
+#include "tn40_regs.h"
+
+#define TN40_DRV_NAME "tn40xx"
+
+#define TN40_MDIO_SPEED_1MHZ (1)
+#define TN40_MDIO_SPEED_6MHZ (6)
+
+/* netdev tx queue len for Luxor. The default value is 1000.
+ * ifconfig eth1 txqueuelen 3000 - to change it at runtime.
+ */
+#define TN40_NDEV_TXQ_LEN 1000
+
+#define TN40_FIFO_SIZE 4096
+#define TN40_FIFO_EXTRA_SPACE 1024
+
+#define TN40_TXF_DESC_SZ 16
+#define TN40_MAX_TX_LEVEL (priv->txd_fifo0.m.memsz - 16)
+#define TN40_MIN_TX_LEVEL 256
+#define TN40_NO_UPD_PACKETS 40
+#define TN40_MAX_MTU BIT(14)
+
+#define TN40_PCK_TH_MULT 128
+#define TN40_INT_COAL_MULT 2
+
+#define TN40_INT_REG_VAL(coal, coal_rc, rxf_th, pck_th) ( \
+ FIELD_PREP(GENMASK(14, 0), (coal)) | \
+ FIELD_PREP(BIT(15), (coal_rc)) | \
+ FIELD_PREP(GENMASK(19, 16), (rxf_th)) | \
+ FIELD_PREP(GENMASK(31, 20), (pck_th)) \
+ )
+
+struct tn40_fifo {
+ dma_addr_t da; /* Physical address of fifo (used by HW) */
+ char *va; /* Virtual address of fifo (used by SW) */
+ u32 rptr, wptr;
+ /* Cached values of RPTR and WPTR registers,
+ * they're 32 bits on both 32 and 64 archs.
+ */
+ u16 reg_cfg0;
+ u16 reg_cfg1;
+ u16 reg_rptr;
+ u16 reg_wptr;
+ u16 memsz; /* Memory size allocated for fifo */
+ u16 size_mask;
+ u16 pktsz; /* Skb packet size to allocate */
+ u16 rcvno; /* Number of buffers that come from this RXF */
+};
+
+struct tn40_txf_fifo {
+ struct tn40_fifo m; /* The minimal set of variables used by all fifos */
+};
+
+struct tn40_txd_fifo {
+ struct tn40_fifo m; /* The minimal set of variables used by all fifos */
+};
+
+struct tn40_rxf_fifo {
+ struct tn40_fifo m; /* The minimal set of variables used by all fifos */
+};
+
+struct tn40_rxd_fifo {
+ struct tn40_fifo m; /* The minimal set of variables used by all fifos */
+};
+
+struct tn40_rx_map {
+ struct page *page;
+};
+
+struct tn40_rxdb {
+ unsigned int *stack;
+ struct tn40_rx_map *elems;
+ unsigned int nelem;
+ unsigned int top;
+};
+
+union tn40_tx_dma_addr {
+ dma_addr_t dma;
+ struct sk_buff *skb;
+};
+
+/* Entry in the db.
+ * if len == 0 addr is dma
+ * if len != 0 addr is skb
+ */
+struct tn40_tx_map {
+ union tn40_tx_dma_addr addr;
+ int len;
+};
+
+/* tx database - implemented as circular fifo buffer */
+struct tn40_txdb {
+ struct tn40_tx_map *start; /* Points to the first element */
+ struct tn40_tx_map *end; /* Points just AFTER the last element */
+ struct tn40_tx_map *rptr; /* Points to the next element to read */
+ struct tn40_tx_map *wptr; /* Points to the next element to write */
+ int size; /* Number of elements in the db */
+};
+
+struct tn40_priv {
+ struct net_device *ndev;
+ struct pci_dev *pdev;
+
+ struct napi_struct napi;
+ /* RX FIFOs: 1 for data (full) descs, and 2 for free descs */
+ struct tn40_rxd_fifo rxd_fifo0;
+ struct tn40_rxf_fifo rxf_fifo0;
+ struct tn40_rxdb *rxdb0; /* Rx dbs to store skb pointers */
+ struct page_pool *page_pool;
+
+ /* Tx FIFOs: 1 for data desc, 1 for empty (acks) desc */
+ struct tn40_txd_fifo txd_fifo0;
+ struct tn40_txf_fifo txf_fifo0;
+ struct tn40_txdb txdb;
+ int tx_level;
+ int tx_update_mark;
+ int tx_noupd;
+
+ int stats_flag;
+ struct rtnl_link_stats64 stats;
+ u64 alloc_fail;
+ struct u64_stats_sync syncp;
+
+ u8 txd_size;
+ u8 txf_size;
+ u8 rxd_size;
+ u8 rxf_size;
+ u32 rdintcm;
+ u32 tdintcm;
+
+ u32 isr_mask;
+
+ void __iomem *regs;
+
+ /* SHORT_PKT_FIX */
+ u32 b0_len;
+ dma_addr_t b0_dma; /* Physical address of buffer */
+ char *b0_va; /* Virtual address of buffer */
+
+ struct mii_bus *mdio;
+ struct phy_device *phydev;
+ struct phylink *phylink;
+ struct phylink_config phylink_config;
+};
+
+/* RX FREE descriptor - 64bit */
+struct tn40_rxf_desc {
+ __le32 info; /* Buffer Count + Info - described below */
+ __le32 va_lo; /* VAdr[31:0] */
+ __le32 va_hi; /* VAdr[63:32] */
+ __le32 pa_lo; /* PAdr[31:0] */
+ __le32 pa_hi; /* PAdr[63:32] */
+ __le32 len; /* Buffer Length */
+};
+
+#define TN40_GET_RXD_BC(x) FIELD_GET(GENMASK(4, 0), (x))
+#define TN40_GET_RXD_ERR(x) FIELD_GET(GENMASK(26, 21), (x))
+#define TN40_GET_RXD_PKT_ID(x) FIELD_GET(GENMASK(30, 28), (x))
+#define TN40_GET_RXD_VTAG(x) FIELD_GET(BIT(31), (x))
+#define TN40_GET_RXD_VLAN_TCI(x) FIELD_GET(GENMASK(15, 0), (x))
+
+struct tn40_rxd_desc {
+ __le32 rxd_val1;
+ __le16 len;
+ __le16 rxd_vlan;
+ __le32 va_lo;
+ __le32 va_hi;
+ __le32 rss_lo;
+ __le32 rss_hash;
+};
+
+#define TN40_MAX_PBL (19)
+/* PBL describes each virtual buffer to be transmitted from the host. */
+struct tn40_pbl {
+ __le32 pa_lo;
+ __le32 pa_hi;
+ __le32 len;
+};
+
+/* First word for TXD descriptor. It means: type = 3 for regular Tx packet,
+ * hw_csum = 7 for IP+UDP+TCP HW checksums.
+ */
+#define TN40_TXD_W1_VAL(bc, checksum, vtag, lgsnd, vlan_id) ( \
+ GENMASK(17, 16) | \
+ FIELD_PREP(GENMASK(4, 0), (bc)) | \
+ FIELD_PREP(GENMASK(7, 5), (checksum)) | \
+ FIELD_PREP(BIT(8), (vtag)) | \
+ FIELD_PREP(GENMASK(12, 9), (lgsnd)) | \
+ FIELD_PREP(GENMASK(15, 13), \
+ FIELD_GET(GENMASK(15, 13), (vlan_id))) | \
+ FIELD_PREP(GENMASK(31, 20), \
+ FIELD_GET(GENMASK(11, 0), (vlan_id))) \
+ )
+
+struct tn40_txd_desc {
+ __le32 txd_val1;
+ __le16 mss;
+ __le16 length;
+ __le32 va_lo;
+ __le32 va_hi;
+ struct tn40_pbl pbl[]; /* Fragments */
+};
+
+struct tn40_txf_desc {
+ u32 status;
+ u32 va_lo; /* VAdr[31:0] */
+ u32 va_hi; /* VAdr[63:32] */
+ u32 pad;
+};
+
+static inline u32 tn40_read_reg(struct tn40_priv *priv, u32 reg)
+{
+ return readl(priv->regs + reg);
+}
+
+static inline void tn40_write_reg(struct tn40_priv *priv, u32 reg, u32 val)
+{
+ writel(val, priv->regs + reg);
+}
+
+int tn40_set_link_speed(struct tn40_priv *priv, u32 speed);
+
+int tn40_mdiobus_init(struct tn40_priv *priv);
+
+int tn40_phy_register(struct tn40_priv *priv);
+void tn40_phy_unregister(struct tn40_priv *priv);
+
+#endif /* _TN40XX_H */
diff --git a/drivers/net/ethernet/tehuti/tn40_mdio.c b/drivers/net/ethernet/tehuti/tn40_mdio.c
new file mode 100644
index 000000000000..af18615d64a8
--- /dev/null
+++ b/drivers/net/ethernet/tehuti/tn40_mdio.c
@@ -0,0 +1,142 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (c) Tehuti Networks Ltd. */
+
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+#include <linux/phylink.h>
+
+#include "tn40.h"
+
+#define TN40_MDIO_DEVAD_MASK GENMASK(4, 0)
+#define TN40_MDIO_PRTAD_MASK GENMASK(9, 5)
+#define TN40_MDIO_CMD_VAL(device, port) \
+ (FIELD_PREP(TN40_MDIO_DEVAD_MASK, (device)) | \
+ (FIELD_PREP(TN40_MDIO_PRTAD_MASK, (port))))
+#define TN40_MDIO_CMD_READ BIT(15)
+
+static void tn40_mdio_set_speed(struct tn40_priv *priv, u32 speed)
+{
+ void __iomem *regs = priv->regs;
+ int mdio_cfg;
+
+ if (speed == TN40_MDIO_SPEED_1MHZ)
+ mdio_cfg = (0x7d << 7) | 0x08; /* 1MHz */
+ else
+ mdio_cfg = 0xA08; /* 6MHz */
+ mdio_cfg |= (1 << 6);
+ writel(mdio_cfg, regs + TN40_REG_MDIO_CMD_STAT);
+ msleep(100);
+}
+
+static u32 tn40_mdio_stat(struct tn40_priv *priv)
+{
+ void __iomem *regs = priv->regs;
+
+ return readl(regs + TN40_REG_MDIO_CMD_STAT);
+}
+
+static int tn40_mdio_wait_nobusy(struct tn40_priv *priv, u32 *val)
+{
+ u32 stat;
+ int ret;
+
+ ret = readx_poll_timeout_atomic(tn40_mdio_stat, priv, stat,
+ TN40_GET_MDIO_BUSY(stat) == 0, 10,
+ 10000);
+ if (val)
+ *val = stat;
+ return ret;
+}
+
+static int tn40_mdio_read(struct tn40_priv *priv, int port, int device,
+ u16 regnum)
+{
+ void __iomem *regs = priv->regs;
+ u32 i;
+
+ /* wait until MDIO is not busy */
+ if (tn40_mdio_wait_nobusy(priv, NULL))
+ return -EIO;
+
+ i = TN40_MDIO_CMD_VAL(device, port);
+ writel(i, regs + TN40_REG_MDIO_CMD);
+ writel((u32)regnum, regs + TN40_REG_MDIO_ADDR);
+ if (tn40_mdio_wait_nobusy(priv, NULL))
+ return -EIO;
+
+ writel(TN40_MDIO_CMD_READ | i, regs + TN40_REG_MDIO_CMD);
+ /* read CMD_STAT until not busy */
+ if (tn40_mdio_wait_nobusy(priv, NULL))
+ return -EIO;
+
+ return lower_16_bits(readl(regs + TN40_REG_MDIO_DATA));
+}
+
+static int tn40_mdio_write(struct tn40_priv *priv, int port, int device,
+ u16 regnum, u16 data)
+{
+ void __iomem *regs = priv->regs;
+ u32 tmp_reg = 0;
+ int ret;
+
+ /* wait until MDIO is not busy */
+ if (tn40_mdio_wait_nobusy(priv, NULL))
+ return -EIO;
+ writel(TN40_MDIO_CMD_VAL(device, port), regs + TN40_REG_MDIO_CMD);
+ writel((u32)regnum, regs + TN40_REG_MDIO_ADDR);
+ if (tn40_mdio_wait_nobusy(priv, NULL))
+ return -EIO;
+ writel((u32)data, regs + TN40_REG_MDIO_DATA);
+ /* read CMD_STAT until not busy */
+ ret = tn40_mdio_wait_nobusy(priv, &tmp_reg);
+ if (ret)
+ return -EIO;
+
+ if (TN40_GET_MDIO_RD_ERR(tmp_reg)) {
+ dev_err(&priv->pdev->dev, "MDIO error after write command\n");
+ return -EIO;
+ }
+ return 0;
+}
+
+static int tn40_mdio_read_c45(struct mii_bus *mii_bus, int addr, int devnum,
+ int regnum)
+{
+ return tn40_mdio_read(mii_bus->priv, addr, devnum, regnum);
+}
+
+static int tn40_mdio_write_c45(struct mii_bus *mii_bus, int addr, int devnum,
+ int regnum, u16 val)
+{
+ return tn40_mdio_write(mii_bus->priv, addr, devnum, regnum, val);
+}
+
+int tn40_mdiobus_init(struct tn40_priv *priv)
+{
+ struct pci_dev *pdev = priv->pdev;
+ struct mii_bus *bus;
+ int ret;
+
+ bus = devm_mdiobus_alloc(&pdev->dev);
+ if (!bus)
+ return -ENOMEM;
+
+ bus->name = TN40_DRV_NAME;
+ bus->parent = &pdev->dev;
+ snprintf(bus->id, MII_BUS_ID_SIZE, "tn40xx-%x-%x",
+ pci_domain_nr(pdev->bus), pci_dev_id(pdev));
+ bus->priv = priv;
+
+ bus->read_c45 = tn40_mdio_read_c45;
+ bus->write_c45 = tn40_mdio_write_c45;
+
+ ret = devm_mdiobus_register(&pdev->dev, bus);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register mdiobus %d %u %u\n",
+ ret, bus->state, MDIOBUS_UNREGISTERED);
+ return ret;
+ }
+ tn40_mdio_set_speed(priv, TN40_MDIO_SPEED_6MHZ);
+ priv->mdio = bus;
+ return 0;
+}
diff --git a/drivers/net/ethernet/tehuti/tn40_phy.c b/drivers/net/ethernet/tehuti/tn40_phy.c
new file mode 100644
index 000000000000..39eef7ca7958
--- /dev/null
+++ b/drivers/net/ethernet/tehuti/tn40_phy.c
@@ -0,0 +1,76 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (c) Tehuti Networks Ltd. */
+
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+#include <linux/phylink.h>
+
+#include "tn40.h"
+
+static struct tn40_priv *tn40_config_to_priv(struct phylink_config *config)
+{
+ return container_of(config, struct tn40_priv, phylink_config);
+}
+
+static void tn40_link_up(struct phylink_config *config, struct phy_device *phy,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex, bool tx_pause, bool rx_pause)
+{
+ struct tn40_priv *priv = tn40_config_to_priv(config);
+
+ tn40_set_link_speed(priv, speed);
+ netif_wake_queue(priv->ndev);
+}
+
+static void tn40_link_down(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface)
+{
+ struct tn40_priv *priv = tn40_config_to_priv(config);
+
+ netif_stop_queue(priv->ndev);
+ tn40_set_link_speed(priv, 0);
+}
+
+static void tn40_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+}
+
+static const struct phylink_mac_ops tn40_mac_ops = {
+ .mac_config = tn40_mac_config,
+ .mac_link_up = tn40_link_up,
+ .mac_link_down = tn40_link_down,
+};
+
+int tn40_phy_register(struct tn40_priv *priv)
+{
+ struct phylink_config *config;
+ struct phy_device *phydev;
+ struct phylink *phylink;
+
+ phydev = phy_find_first(priv->mdio);
+ if (!phydev) {
+ dev_err(&priv->pdev->dev, "PHY isn't found\n");
+ return -ENODEV;
+ }
+
+ config = &priv->phylink_config;
+ config->dev = &priv->ndev->dev;
+ config->type = PHYLINK_NETDEV;
+ config->mac_capabilities = MAC_10000FD;
+ __set_bit(PHY_INTERFACE_MODE_XAUI, config->supported_interfaces);
+
+ phylink = phylink_create(config, NULL, PHY_INTERFACE_MODE_XAUI,
+ &tn40_mac_ops);
+ if (IS_ERR(phylink))
+ return PTR_ERR(phylink);
+
+ priv->phydev = phydev;
+ priv->phylink = phylink;
+ return 0;
+}
+
+void tn40_phy_unregister(struct tn40_priv *priv)
+{
+ phylink_destroy(priv->phylink);
+}
diff --git a/drivers/net/ethernet/tehuti/tn40_regs.h b/drivers/net/ethernet/tehuti/tn40_regs.h
new file mode 100644
index 000000000000..95171aa57a9e
--- /dev/null
+++ b/drivers/net/ethernet/tehuti/tn40_regs.h
@@ -0,0 +1,245 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) Tehuti Networks Ltd. */
+
+#ifndef _TN40_REGS_H_
+#define _TN40_REGS_H_
+
+/* Register region size */
+#define TN40_REGS_SIZE 0x10000
+
+/* Registers from 0x0000-0x00fc were remapped to 0x4000-0x40fc */
+#define TN40_REG_TXD_CFG1_0 0x4000
+#define TN40_REG_TXD_CFG1_1 0x4004
+#define TN40_REG_TXD_CFG1_2 0x4008
+#define TN40_REG_TXD_CFG1_3 0x400C
+
+#define TN40_REG_RXF_CFG1_0 0x4010
+#define TN40_REG_RXF_CFG1_1 0x4014
+#define TN40_REG_RXF_CFG1_2 0x4018
+#define TN40_REG_RXF_CFG1_3 0x401C
+
+#define TN40_REG_RXD_CFG1_0 0x4020
+#define TN40_REG_RXD_CFG1_1 0x4024
+#define TN40_REG_RXD_CFG1_2 0x4028
+#define TN40_REG_RXD_CFG1_3 0x402C
+
+#define TN40_REG_TXF_CFG1_0 0x4030
+#define TN40_REG_TXF_CFG1_1 0x4034
+#define TN40_REG_TXF_CFG1_2 0x4038
+#define TN40_REG_TXF_CFG1_3 0x403C
+
+#define TN40_REG_TXD_CFG0_0 0x4040
+#define TN40_REG_TXD_CFG0_1 0x4044
+#define TN40_REG_TXD_CFG0_2 0x4048
+#define TN40_REG_TXD_CFG0_3 0x404C
+
+#define TN40_REG_RXF_CFG0_0 0x4050
+#define TN40_REG_RXF_CFG0_1 0x4054
+#define TN40_REG_RXF_CFG0_2 0x4058
+#define TN40_REG_RXF_CFG0_3 0x405C
+
+#define TN40_REG_RXD_CFG0_0 0x4060
+#define TN40_REG_RXD_CFG0_1 0x4064
+#define TN40_REG_RXD_CFG0_2 0x4068
+#define TN40_REG_RXD_CFG0_3 0x406C
+
+#define TN40_REG_TXF_CFG0_0 0x4070
+#define TN40_REG_TXF_CFG0_1 0x4074
+#define TN40_REG_TXF_CFG0_2 0x4078
+#define TN40_REG_TXF_CFG0_3 0x407C
+
+#define TN40_REG_TXD_WPTR_0 0x4080
+#define TN40_REG_TXD_WPTR_1 0x4084
+#define TN40_REG_TXD_WPTR_2 0x4088
+#define TN40_REG_TXD_WPTR_3 0x408C
+
+#define TN40_REG_RXF_WPTR_0 0x4090
+#define TN40_REG_RXF_WPTR_1 0x4094
+#define TN40_REG_RXF_WPTR_2 0x4098
+#define TN40_REG_RXF_WPTR_3 0x409C
+
+#define TN40_REG_RXD_WPTR_0 0x40A0
+#define TN40_REG_RXD_WPTR_1 0x40A4
+#define TN40_REG_RXD_WPTR_2 0x40A8
+#define TN40_REG_RXD_WPTR_3 0x40AC
+
+#define TN40_REG_TXF_WPTR_0 0x40B0
+#define TN40_REG_TXF_WPTR_1 0x40B4
+#define TN40_REG_TXF_WPTR_2 0x40B8
+#define TN40_REG_TXF_WPTR_3 0x40BC
+
+#define TN40_REG_TXD_RPTR_0 0x40C0
+#define TN40_REG_TXD_RPTR_1 0x40C4
+#define TN40_REG_TXD_RPTR_2 0x40C8
+#define TN40_REG_TXD_RPTR_3 0x40CC
+
+#define TN40_REG_RXF_RPTR_0 0x40D0
+#define TN40_REG_RXF_RPTR_1 0x40D4
+#define TN40_REG_RXF_RPTR_2 0x40D8
+#define TN40_REG_RXF_RPTR_3 0x40DC
+
+#define TN40_REG_RXD_RPTR_0 0x40E0
+#define TN40_REG_RXD_RPTR_1 0x40E4
+#define TN40_REG_RXD_RPTR_2 0x40E8
+#define TN40_REG_RXD_RPTR_3 0x40EC
+
+#define TN40_REG_TXF_RPTR_0 0x40F0
+#define TN40_REG_TXF_RPTR_1 0x40F4
+#define TN40_REG_TXF_RPTR_2 0x40F8
+#define TN40_REG_TXF_RPTR_3 0x40FC
+
+/* Hardware versioning */
+#define TN40_FPGA_VER 0x5030
+
+/* Registers from 0x0100-0x0150 were remapped to 0x5100-0x5150 */
+#define TN40_REG_ISR TN40_REG_ISR0
+#define TN40_REG_ISR0 0x5100
+
+#define TN40_REG_IMR TN40_REG_IMR0
+#define TN40_REG_IMR0 0x5110
+
+#define TN40_REG_RDINTCM0 0x5120
+#define TN40_REG_RDINTCM2 0x5128
+
+#define TN40_REG_TDINTCM0 0x5130
+
+#define TN40_REG_ISR_MSK0 0x5140
+
+#define TN40_REG_INIT_SEMAPHORE 0x5170
+#define TN40_REG_INIT_STATUS 0x5180
+
+#define TN40_REG_MAC_LNK_STAT 0x0200
+#define TN40_MAC_LINK_STAT 0x0004 /* Link state */
+
+#define TN40_REG_BLNK_LED 0x0210
+
+#define TN40_REG_GMAC_RXF_A 0x1240
+
+#define TN40_REG_UNC_MAC0_A 0x1250
+#define TN40_REG_UNC_MAC1_A 0x1260
+#define TN40_REG_UNC_MAC2_A 0x1270
+
+#define TN40_REG_VLAN_0 0x1800
+
+#define TN40_REG_MAX_FRAME_A 0x12C0
+
+#define TN40_REG_RX_MAC_MCST0 0x1A80
+#define TN40_REG_RX_MAC_MCST1 0x1A84
+#define TN40_MAC_MCST_NUM 15
+#define TN40_REG_RX_MCST_HASH0 0x1A00
+#define TN40_MAC_MCST_HASH_NUM 8
+
+#define TN40_REG_VPC 0x2300
+#define TN40_REG_VIC 0x2320
+#define TN40_REG_VGLB 0x2340
+
+#define TN40_REG_CLKPLL 0x5000
+
+/* MDIO interface */
+
+#define TN40_REG_MDIO_CMD_STAT 0x6030
+#define TN40_REG_MDIO_CMD 0x6034
+#define TN40_REG_MDIO_DATA 0x6038
+#define TN40_REG_MDIO_ADDR 0x603C
+#define TN40_GET_MDIO_BUSY(x) FIELD_GET(GENMASK(0, 0), (x))
+#define TN40_GET_MDIO_RD_ERR(x) FIELD_GET(GENMASK(1, 1), (x))
+
+#define TN40_REG_REVISION 0x6000
+#define TN40_REG_SCRATCH 0x6004
+#define TN40_REG_CTRLST 0x6008
+#define TN40_REG_MAC_ADDR_0 0x600C
+#define TN40_REG_MAC_ADDR_1 0x6010
+#define TN40_REG_FRM_LENGTH 0x6014
+#define TN40_REG_PAUSE_QUANT 0x6054
+#define TN40_REG_RX_FIFO_SECTION 0x601C
+#define TN40_REG_TX_FIFO_SECTION 0x6020
+#define TN40_REG_RX_FULLNESS 0x6024
+#define TN40_REG_TX_FULLNESS 0x6028
+#define TN40_REG_HASHTABLE 0x602C
+
+#define TN40_REG_RST_PORT 0x7000
+#define TN40_REG_DIS_PORT 0x7010
+#define TN40_REG_RST_QU 0x7020
+#define TN40_REG_DIS_QU 0x7030
+
+#define TN40_REG_CTRLST_TX_ENA 0x0001
+#define TN40_REG_CTRLST_RX_ENA 0x0002
+#define TN40_REG_CTRLST_PRM_ENA 0x0010
+#define TN40_REG_CTRLST_PAD_ENA 0x0020
+
+#define TN40_REG_CTRLST_BASE (TN40_REG_CTRLST_PAD_ENA | REG_CTRLST_PRM_ENA)
+
+/* TXD TXF RXF RXD CONFIG 0x0000 --- 0x007c */
+#define TN40_TX_RX_CFG1_BASE 0xffffffff /*0-31 */
+#define TN40_TX_RX_CFG0_BASE 0xfffff000 /*31:12 */
+#define TN40_TX_RX_CFG0_RSVD 0x00000ffc /*11:2 */
+#define TN40_TX_RX_CFG0_SIZE 0x00000003 /*1:0 */
+
+/* TXD TXF RXF RXD WRITE 0x0080 --- 0x00BC */
+#define TN40_TXF_WPTR_WR_PTR 0x00007ff8 /*14:3 */
+
+/* TXD TXF RXF RXD READ 0x00CO --- 0x00FC */
+#define TN40_TXF_RPTR_RD_PTR 0x00007ff8 /*14:3 */
+
+/* The last 4 bits are dropped size is rounded to 16 */
+#define TN40_TXF_WPTR_MASK 0x7ff0
+
+/* regISR 0x0100 */
+/* regIMR 0x0110 */
+#define TN40_IMR_INPROG 0x80000000 /*31 */
+#define TN40_IR_LNKCHG1 0x10000000 /*28 */
+#define TN40_IR_LNKCHG0 0x08000000 /*27 */
+#define TN40_IR_GPIO 0x04000000 /*26 */
+#define TN40_IR_RFRSH 0x02000000 /*25 */
+#define TN40_IR_RSVD 0x01000000 /*24 */
+#define TN40_IR_SWI 0x00800000 /*23 */
+#define TN40_IR_RX_FREE_3 0x00400000 /*22 */
+#define TN40_IR_RX_FREE_2 0x00200000 /*21 */
+#define TN40_IR_RX_FREE_1 0x00100000 /*20 */
+#define TN40_IR_RX_FREE_0 0x00080000 /*19 */
+#define TN40_IR_TX_FREE_3 0x00040000 /*18 */
+#define TN40_IR_TX_FREE_2 0x00020000 /*17 */
+#define TN40_IR_TX_FREE_1 0x00010000 /*16 */
+#define TN40_IR_TX_FREE_0 0x00008000 /*15 */
+#define TN40_IR_RX_DESC_3 0x00004000 /*14 */
+#define TN40_IR_RX_DESC_2 0x00002000 /*13 */
+#define TN40_IR_RX_DESC_1 0x00001000 /*12 */
+#define TN40_IR_RX_DESC_0 0x00000800 /*11 */
+#define TN40_IR_PSE 0x00000400 /*10 */
+#define TN40_IR_TMR3 0x00000200 /* 9 */
+#define TN40_IR_TMR2 0x00000100 /* 8 */
+#define TN40_IR_TMR1 0x00000080 /* 7 */
+#define TN40_IR_TMR0 0x00000040 /* 6 */
+#define TN40_IR_VNT 0x00000020 /* 5 */
+#define TN40_IR_RxFL 0x00000010 /* 4 */
+#define TN40_IR_SDPERR 0x00000008 /* 3 */
+#define TN40_IR_TR 0x00000004 /* 2 */
+#define TN40_IR_PCIE_LINK 0x00000002 /* 1 */
+#define TN40_IR_PCIE_TOUT 0x00000001 /* 0 */
+
+#define TN40_IR_EXTRA \
+ (TN40_IR_RX_FREE_0 | TN40_IR_LNKCHG0 | TN40_IR_LNKCHG1 |\
+ TN40_IR_PSE | TN40_IR_TMR0 | TN40_IR_PCIE_LINK | \
+ TN40_IR_PCIE_TOUT)
+
+#define TN40_GMAC_RX_FILTER_OSEN 0x1000 /* shared OS enable */
+#define TN40_GMAC_RX_FILTER_TXFC 0x0400 /* Tx flow control */
+#define TN40_GMAC_RX_FILTER_RSV0 0x0200 /* reserved */
+#define TN40_GMAC_RX_FILTER_FDA 0x0100 /* filter out direct address */
+#define TN40_GMAC_RX_FILTER_AOF 0x0080 /* accept over run */
+#define TN40_GMAC_RX_FILTER_ACF 0x0040 /* accept control frames */
+#define TN40_GMAC_RX_FILTER_ARUNT 0x0020 /* accept under run */
+#define TN40_GMAC_RX_FILTER_ACRC 0x0010 /* accept crc error */
+#define TN40_GMAC_RX_FILTER_AM 0x0008 /* accept multicast */
+#define TN40_GMAC_RX_FILTER_AB 0x0004 /* accept broadcast */
+#define TN40_GMAC_RX_FILTER_PRM 0x0001 /* [0:1] promiscuous mode */
+
+#define TN40_MAX_FRAME_AB_VAL 0x3fff /* 13:0 */
+
+#define TN40_CLKPLL_PLLLKD 0x0200 /* 9 */
+#define TN40_CLKPLL_RSTEND 0x0100 /* 8 */
+#define TN40_CLKPLL_SFTRST 0x0001 /* 0 */
+
+#define TN40_CLKPLL_LKD (TN40_CLKPLL_PLLLKD | TN40_CLKPLL_RSTEND)
+
+#endif
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index 1729eb0e0b41..0d5a862cd78a 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -188,6 +188,7 @@ config TI_ICSSG_PRUETH
select TI_ICSS_IEP
select TI_K3_CPPI_DESC_POOL
depends on PRU_REMOTEPROC
+ depends on NET_SWITCHDEV
depends on ARCH_K3 && OF && TI_K3_UDMA_GLUE_LAYER
depends on PTP_1588_CLOCK_OPTIONAL
help
@@ -204,6 +205,7 @@ config TI_ICSSG_PRUETH_SR1
select TI_ICSS_IEP
select TI_K3_CPPI_DESC_POOL
depends on PRU_REMOTEPROC
+ depends on NET_SWITCHDEV
depends on ARCH_K3 && OF && TI_K3_UDMA_GLUE_LAYER
help
Support dual Gigabit Ethernet ports over the ICSSG PRU Subsystem.
diff --git a/drivers/net/ethernet/ti/Makefile b/drivers/net/ethernet/ti/Makefile
index 6e086b4c0384..cbcf44806924 100644
--- a/drivers/net/ethernet/ti/Makefile
+++ b/drivers/net/ethernet/ti/Makefile
@@ -31,21 +31,18 @@ ti-am65-cpsw-nuss-$(CONFIG_TI_AM65_CPSW_QOS) += am65-cpsw-qos.o
ti-am65-cpsw-nuss-$(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV) += am65-cpsw-switchdev.o
obj-$(CONFIG_TI_K3_AM65_CPTS) += am65-cpts.o
-obj-$(CONFIG_TI_ICSSG_PRUETH) += icssg-prueth.o
-icssg-prueth-y := icssg/icssg_prueth.o \
- icssg/icssg_common.o \
- icssg/icssg_classifier.o \
- icssg/icssg_queues.o \
- icssg/icssg_config.o \
- icssg/icssg_mii_cfg.o \
- icssg/icssg_stats.o \
- icssg/icssg_ethtool.o
-obj-$(CONFIG_TI_ICSSG_PRUETH_SR1) += icssg-prueth-sr1.o
-icssg-prueth-sr1-y := icssg/icssg_prueth_sr1.o \
- icssg/icssg_common.o \
- icssg/icssg_classifier.o \
- icssg/icssg_config.o \
- icssg/icssg_mii_cfg.o \
- icssg/icssg_stats.o \
- icssg/icssg_ethtool.o
+obj-$(CONFIG_TI_ICSSG_PRUETH) += icssg-prueth.o icssg.o
+icssg-prueth-y := icssg/icssg_prueth.o icssg/icssg_switchdev.o
+
+obj-$(CONFIG_TI_ICSSG_PRUETH_SR1) += icssg-prueth-sr1.o icssg.o
+icssg-prueth-sr1-y := icssg/icssg_prueth_sr1.o
+
+icssg-y := icssg/icssg_common.o \
+ icssg/icssg_classifier.o \
+ icssg/icssg_queues.o \
+ icssg/icssg_config.o \
+ icssg/icssg_mii_cfg.o \
+ icssg/icssg_stats.o \
+ icssg/icssg_ethtool.o
+
obj-$(CONFIG_TI_ICSS_IEP) += icssg/icss_iep.o
diff --git a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
index a1d0935d1ebe..b60976947da5 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
@@ -692,7 +692,7 @@ static void am65_cpsw_get_eth_mac_stats(struct net_device *ndev,
};
static int am65_cpsw_get_ethtool_ts_info(struct net_device *ndev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
unsigned int ptp_v2_filter;
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index 4e50b3792888..81d9f21086ec 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -896,7 +896,7 @@ static int am65_cpsw_nuss_ndo_slave_open(struct net_device *ndev)
/* mac_sl should be configured via phy-link interface */
am65_cpsw_sl_ctl_reset(port);
- ret = phylink_of_phy_connect(port->slave.phylink, port->slave.phy_node, 0);
+ ret = phylink_of_phy_connect(port->slave.phylink, port->slave.port_np, 0);
if (ret)
goto error_cleanup;
@@ -2424,10 +2424,10 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
rx_chn->irq = k3_udma_glue_rx_get_irq(rx_chn->rx_chn, i);
- if (rx_chn->irq <= 0) {
+ if (rx_chn->irq < 0) {
dev_err(dev, "Failed to get rx dma irq %d\n",
rx_chn->irq);
- ret = -ENXIO;
+ ret = rx_chn->irq;
goto err;
}
}
@@ -2611,7 +2611,7 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
of_property_read_bool(port_np, "ti,mac-only");
/* get phy/link info */
- port->slave.phy_node = port_np;
+ port->slave.port_np = port_np;
ret = of_get_phy_mode(port_np, &port->slave.phy_if);
if (ret) {
dev_err(dev, "%pOF read phy-mode err %d\n",
@@ -2703,6 +2703,7 @@ am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx)
mutex_init(&ndev_priv->mm_lock);
port->qos.link_speed = SPEED_UNKNOWN;
SET_NETDEV_DEV(port->ndev, dev);
+ port->ndev->dev.of_node = port->slave.port_np;
eth_hw_addr_set(port->ndev, port->slave.mac_addr);
@@ -2760,7 +2761,7 @@ am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx)
}
phylink = phylink_create(&port->slave.phylink_config,
- of_node_to_fwnode(port->slave.phy_node),
+ of_node_to_fwnode(port->slave.port_np),
port->slave.phy_if,
&am65_cpsw_phylink_mac_ops);
if (IS_ERR(phylink))
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.h b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
index d8ce88dc9c89..e2ce2be320bd 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.h
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
@@ -30,7 +30,7 @@ struct am65_cpts;
struct am65_cpsw_slave_data {
bool mac_only;
struct cpsw_sl *mac_sl;
- struct device_node *phy_node;
+ struct device_node *port_np;
phy_interface_t phy_if;
struct phy *ifphy;
struct phy *serdes_phy;
diff --git a/drivers/net/ethernet/ti/cpsw_ethtool.c b/drivers/net/ethernet/ti/cpsw_ethtool.c
index f7b283353ba2..53ed23d68722 100644
--- a/drivers/net/ethernet/ti/cpsw_ethtool.c
+++ b/drivers/net/ethernet/ti/cpsw_ethtool.c
@@ -717,7 +717,7 @@ err:
}
#if IS_ENABLED(CONFIG_TI_CPTS)
-int cpsw_get_ts_info(struct net_device *ndev, struct ethtool_ts_info *info)
+int cpsw_get_ts_info(struct net_device *ndev, struct kernel_ethtool_ts_info *info)
{
struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
@@ -738,7 +738,7 @@ int cpsw_get_ts_info(struct net_device *ndev, struct ethtool_ts_info *info)
return 0;
}
#else
-int cpsw_get_ts_info(struct net_device *ndev, struct ethtool_ts_info *info)
+int cpsw_get_ts_info(struct net_device *ndev, struct kernel_ethtool_ts_info *info)
{
info->so_timestamping =
SOF_TIMESTAMPING_TX_SOFTWARE |
diff --git a/drivers/net/ethernet/ti/cpsw_priv.h b/drivers/net/ethernet/ti/cpsw_priv.h
index 7efa72502c86..1f448290b9f4 100644
--- a/drivers/net/ethernet/ti/cpsw_priv.h
+++ b/drivers/net/ethernet/ti/cpsw_priv.h
@@ -510,6 +510,6 @@ int cpsw_set_ringparam(struct net_device *ndev,
int cpsw_set_channels_common(struct net_device *ndev,
struct ethtool_channels *chs,
cpdma_handler_fn rx_handler);
-int cpsw_get_ts_info(struct net_device *ndev, struct ethtool_ts_info *info);
+int cpsw_get_ts_info(struct net_device *ndev, struct kernel_ethtool_ts_info *info);
#endif /* DRIVERS_NET_ETHERNET_TI_CPSW_PRIV_H_ */
diff --git a/drivers/net/ethernet/ti/icssg/icss_iep.c b/drivers/net/ethernet/ti/icssg/icss_iep.c
index 3025e9c18970..75c294ce6fb6 100644
--- a/drivers/net/ethernet/ti/icssg/icss_iep.c
+++ b/drivers/net/ethernet/ti/icssg/icss_iep.c
@@ -17,6 +17,7 @@
#include <linux/timekeeping.h>
#include <linux/interrupt.h>
#include <linux/of_irq.h>
+#include <linux/workqueue.h>
#include "icss_iep.h"
@@ -94,7 +95,7 @@ enum {
* @flags: Flags to represent IEP properties
*/
struct icss_iep_plat_data {
- struct regmap_config *config;
+ const struct regmap_config *config;
u32 reg_offs[ICSS_IEP_MAX_REGS];
u32 flags;
};
@@ -110,7 +111,6 @@ struct icss_iep {
struct ptp_clock_info ptp_info;
struct ptp_clock *ptp_clock;
struct mutex ptp_clk_mutex; /* PHC access serializer */
- spinlock_t irq_lock; /* CMP IRQ vs icss_iep_ptp_enable access */
u32 def_inc;
s16 slow_cmp_inc;
u32 slow_cmp_count;
@@ -122,6 +122,7 @@ struct icss_iep {
int cap_cmp_irq;
u64 period;
u32 latch_enable;
+ struct work_struct work;
};
/**
@@ -192,14 +193,11 @@ static void icss_iep_update_to_next_boundary(struct icss_iep *iep, u64 start_ns)
*/
static void icss_iep_settime(struct icss_iep *iep, u64 ns)
{
- unsigned long flags;
-
if (iep->ops && iep->ops->settime) {
iep->ops->settime(iep->clockops_data, ns);
return;
}
- spin_lock_irqsave(&iep->irq_lock, flags);
if (iep->pps_enabled || iep->perout_enabled)
writel(0, iep->base + iep->plat_data->reg_offs[ICSS_IEP_SYNC_CTRL_REG]);
@@ -210,7 +208,6 @@ static void icss_iep_settime(struct icss_iep *iep, u64 ns)
writel(IEP_SYNC_CTRL_SYNC_N_EN(0) | IEP_SYNC_CTRL_SYNC_EN,
iep->base + iep->plat_data->reg_offs[ICSS_IEP_SYNC_CTRL_REG]);
}
- spin_unlock_irqrestore(&iep->irq_lock, flags);
}
/**
@@ -546,7 +543,6 @@ static int icss_iep_perout_enable_hw(struct icss_iep *iep,
static int icss_iep_perout_enable(struct icss_iep *iep,
struct ptp_perout_request *req, int on)
{
- unsigned long flags;
int ret = 0;
mutex_lock(&iep->ptp_clk_mutex);
@@ -559,11 +555,9 @@ static int icss_iep_perout_enable(struct icss_iep *iep,
if (iep->perout_enabled == !!on)
goto exit;
- spin_lock_irqsave(&iep->irq_lock, flags);
ret = icss_iep_perout_enable_hw(iep, req, on);
if (!ret)
iep->perout_enabled = !!on;
- spin_unlock_irqrestore(&iep->irq_lock, flags);
exit:
mutex_unlock(&iep->ptp_clk_mutex);
@@ -571,11 +565,61 @@ exit:
return ret;
}
+static void icss_iep_cap_cmp_work(struct work_struct *work)
+{
+ struct icss_iep *iep = container_of(work, struct icss_iep, work);
+ const u32 *reg_offs = iep->plat_data->reg_offs;
+ struct ptp_clock_event pevent;
+ unsigned int val;
+ u64 ns, ns_next;
+
+ mutex_lock(&iep->ptp_clk_mutex);
+
+ ns = readl(iep->base + reg_offs[ICSS_IEP_CMP1_REG0]);
+ if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT) {
+ val = readl(iep->base + reg_offs[ICSS_IEP_CMP1_REG1]);
+ ns |= (u64)val << 32;
+ }
+ /* set next event */
+ ns_next = ns + iep->period;
+ writel(lower_32_bits(ns_next),
+ iep->base + reg_offs[ICSS_IEP_CMP1_REG0]);
+ if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
+ writel(upper_32_bits(ns_next),
+ iep->base + reg_offs[ICSS_IEP_CMP1_REG1]);
+
+ pevent.pps_times.ts_real = ns_to_timespec64(ns);
+ pevent.type = PTP_CLOCK_PPSUSR;
+ pevent.index = 0;
+ ptp_clock_event(iep->ptp_clock, &pevent);
+ dev_dbg(iep->dev, "IEP:pps ts: %llu next:%llu:\n", ns, ns_next);
+
+ mutex_unlock(&iep->ptp_clk_mutex);
+}
+
+static irqreturn_t icss_iep_cap_cmp_irq(int irq, void *dev_id)
+{
+ struct icss_iep *iep = (struct icss_iep *)dev_id;
+ const u32 *reg_offs = iep->plat_data->reg_offs;
+ unsigned int val;
+
+ val = readl(iep->base + reg_offs[ICSS_IEP_CMP_STAT_REG]);
+ /* The driver only enables CMP1 */
+ if (val & BIT(1)) {
+ /* Clear the event */
+ writel(BIT(1), iep->base + reg_offs[ICSS_IEP_CMP_STAT_REG]);
+ if (iep->pps_enabled || iep->perout_enabled)
+ schedule_work(&iep->work);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
static int icss_iep_pps_enable(struct icss_iep *iep, int on)
{
struct ptp_clock_request rq;
struct timespec64 ts;
- unsigned long flags;
int ret = 0;
u64 ns;
@@ -589,8 +633,6 @@ static int icss_iep_pps_enable(struct icss_iep *iep, int on)
if (iep->pps_enabled == !!on)
goto exit;
- spin_lock_irqsave(&iep->irq_lock, flags);
-
rq.perout.index = 0;
if (on) {
ns = icss_iep_gettime(iep, NULL);
@@ -602,13 +644,13 @@ static int icss_iep_pps_enable(struct icss_iep *iep, int on)
ret = icss_iep_perout_enable_hw(iep, &rq.perout, on);
} else {
ret = icss_iep_perout_enable_hw(iep, &rq.perout, on);
+ if (iep->cap_cmp_irq)
+ cancel_work_sync(&iep->work);
}
if (!ret)
iep->pps_enabled = !!on;
- spin_unlock_irqrestore(&iep->irq_lock, flags);
-
exit:
mutex_unlock(&iep->ptp_clk_mutex);
@@ -777,6 +819,8 @@ int icss_iep_init(struct icss_iep *iep, const struct icss_iep_clockops *clkops,
if (iep->ops && iep->ops->perout_enable) {
iep->ptp_info.n_per_out = 1;
iep->ptp_info.pps = 1;
+ } else if (iep->cap_cmp_irq) {
+ iep->ptp_info.pps = 1;
}
if (iep->ops && iep->ops->extts_enable)
@@ -817,6 +861,7 @@ static int icss_iep_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct icss_iep *iep;
struct clk *iep_clk;
+ int ret, irq;
iep = devm_kzalloc(dev, sizeof(*iep), GFP_KERNEL);
if (!iep)
@@ -827,6 +872,22 @@ static int icss_iep_probe(struct platform_device *pdev)
if (IS_ERR(iep->base))
return -ENODEV;
+ irq = platform_get_irq_byname_optional(pdev, "iep_cap_cmp");
+ if (irq == -EPROBE_DEFER)
+ return irq;
+
+ if (irq > 0) {
+ ret = devm_request_irq(dev, irq, icss_iep_cap_cmp_irq,
+ IRQF_TRIGGER_HIGH, "iep_cap_cmp", iep);
+ if (ret) {
+ dev_info(iep->dev, "cap_cmp irq request failed: %x\n",
+ ret);
+ } else {
+ iep->cap_cmp_irq = irq;
+ INIT_WORK(&iep->work, icss_iep_cap_cmp_work);
+ }
+ }
+
iep_clk = devm_clk_get(dev, NULL);
if (IS_ERR(iep_clk))
return PTR_ERR(iep_clk);
@@ -853,7 +914,6 @@ static int icss_iep_probe(struct platform_device *pdev)
iep->ptp_info = icss_iep_ptp_info;
mutex_init(&iep->ptp_clk_mutex);
- spin_lock_init(&iep->irq_lock);
dev_set_drvdata(dev, iep);
icss_iep_disable(iep);
@@ -892,7 +952,7 @@ static int icss_iep_regmap_read(void *context, unsigned int reg,
return 0;
}
-static struct regmap_config am654_icss_iep_regmap_config = {
+static const struct regmap_config am654_icss_iep_regmap_config = {
.name = "icss iep",
.reg_stride = 1,
.reg_write = icss_iep_regmap_write,
diff --git a/drivers/net/ethernet/ti/icssg/icssg_classifier.c b/drivers/net/ethernet/ti/icssg/icssg_classifier.c
index 79ba47bb3602..9ec504d976d6 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_classifier.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_classifier.c
@@ -297,6 +297,7 @@ void icssg_class_set_mac_addr(struct regmap *miig_rt, int slice, u8 *mac)
mac[2] << 16 | mac[3] << 24));
regmap_write(miig_rt, offs[slice].mac1, (u32)(mac[4] | mac[5] << 8));
}
+EXPORT_SYMBOL_GPL(icssg_class_set_mac_addr);
static void icssg_class_ft1_add_mcast(struct regmap *miig_rt, int slice,
int slot, const u8 *addr, const u8 *mask)
@@ -360,6 +361,7 @@ void icssg_class_disable(struct regmap *miig_rt, int slice)
/* clear CFG2 */
regmap_write(miig_rt, offs[slice].rx_class_cfg2, 0);
}
+EXPORT_SYMBOL_GPL(icssg_class_disable);
void icssg_class_default(struct regmap *miig_rt, int slice, bool allmulti,
bool is_sr1)
@@ -390,6 +392,7 @@ void icssg_class_default(struct regmap *miig_rt, int slice, bool allmulti,
/* clear CFG2 */
regmap_write(miig_rt, offs[slice].rx_class_cfg2, 0);
}
+EXPORT_SYMBOL_GPL(icssg_class_default);
void icssg_class_promiscuous_sr1(struct regmap *miig_rt, int slice)
{
@@ -408,6 +411,7 @@ void icssg_class_promiscuous_sr1(struct regmap *miig_rt, int slice)
regmap_write(miig_rt, offset, data);
}
}
+EXPORT_SYMBOL_GPL(icssg_class_promiscuous_sr1);
void icssg_class_add_mcast_sr1(struct regmap *miig_rt, int slice,
struct net_device *ndev)
@@ -449,14 +453,16 @@ void icssg_class_add_mcast_sr1(struct regmap *miig_rt, int slice,
slot++;
}
}
+EXPORT_SYMBOL_GPL(icssg_class_add_mcast_sr1);
/* required for SAV check */
void icssg_ft1_set_mac_addr(struct regmap *miig_rt, int slice, u8 *mac_addr)
{
const u8 mask_addr[] = { 0, 0, 0, 0, 0, 0, };
- rx_class_ft1_set_start_len(miig_rt, slice, 0, 6);
+ rx_class_ft1_set_start_len(miig_rt, slice, ETH_ALEN, ETH_ALEN);
rx_class_ft1_set_da(miig_rt, slice, 0, mac_addr);
rx_class_ft1_set_da_mask(miig_rt, slice, 0, mask_addr);
rx_class_ft1_cfg_set_type(miig_rt, slice, 0, FT1_CFG_TYPE_EQ);
}
+EXPORT_SYMBOL_GPL(icssg_ft1_set_mac_addr);
diff --git a/drivers/net/ethernet/ti/icssg/icssg_common.c b/drivers/net/ethernet/ti/icssg/icssg_common.c
index 088ab8076db4..b9d8a93d1680 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_common.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_common.c
@@ -51,6 +51,7 @@ void prueth_cleanup_rx_chns(struct prueth_emac *emac,
if (rx_chn->rx_chn)
k3_udma_glue_release_rx_chn(rx_chn->rx_chn);
}
+EXPORT_SYMBOL_GPL(prueth_cleanup_rx_chns);
void prueth_cleanup_tx_chns(struct prueth_emac *emac)
{
@@ -71,6 +72,7 @@ void prueth_cleanup_tx_chns(struct prueth_emac *emac)
memset(tx_chn, 0, sizeof(*tx_chn));
}
}
+EXPORT_SYMBOL_GPL(prueth_cleanup_tx_chns);
void prueth_ndev_del_tx_napi(struct prueth_emac *emac, int num)
{
@@ -84,6 +86,7 @@ void prueth_ndev_del_tx_napi(struct prueth_emac *emac, int num)
netif_napi_del(&tx_chn->napi_tx);
}
}
+EXPORT_SYMBOL_GPL(prueth_ndev_del_tx_napi);
void prueth_xmit_free(struct prueth_tx_chn *tx_chn,
struct cppi5_host_desc_t *desc)
@@ -120,6 +123,7 @@ void prueth_xmit_free(struct prueth_tx_chn *tx_chn,
k3_cppi_desc_pool_free(tx_chn->desc_pool, first_desc);
}
+EXPORT_SYMBOL_GPL(prueth_xmit_free);
int emac_tx_complete_packets(struct prueth_emac *emac, int chn,
int budget, bool *tdown)
@@ -264,6 +268,7 @@ fail:
prueth_ndev_del_tx_napi(emac, i);
return ret;
}
+EXPORT_SYMBOL_GPL(prueth_ndev_add_tx_napi);
int prueth_init_tx_chns(struct prueth_emac *emac)
{
@@ -344,6 +349,7 @@ fail:
prueth_cleanup_tx_chns(emac);
return ret;
}
+EXPORT_SYMBOL_GPL(prueth_init_tx_chns);
int prueth_init_rx_chns(struct prueth_emac *emac,
struct prueth_rx_chn *rx_chn,
@@ -440,9 +446,7 @@ int prueth_init_rx_chns(struct prueth_emac *emac,
fdqring_id = k3_udma_glue_rx_flow_get_fdq_id(rx_chn->rx_chn,
i);
ret = k3_udma_glue_rx_get_irq(rx_chn->rx_chn, i);
- if (ret <= 0) {
- if (!ret)
- ret = -ENXIO;
+ if (ret < 0) {
netdev_err(ndev, "Failed to get rx dma irq");
goto fail;
}
@@ -455,6 +459,7 @@ fail:
prueth_cleanup_rx_chns(emac, rx_chn, max_rflows);
return ret;
}
+EXPORT_SYMBOL_GPL(prueth_init_rx_chns);
int prueth_dma_rx_push(struct prueth_emac *emac,
struct sk_buff *skb,
@@ -492,6 +497,7 @@ int prueth_dma_rx_push(struct prueth_emac *emac,
return k3_udma_glue_push_rx_chn(rx_chn->rx_chn, 0,
desc_rx, desc_dma);
}
+EXPORT_SYMBOL_GPL(prueth_dma_rx_push);
u64 icssg_ts_to_ns(u32 hi_sw, u32 hi, u32 lo, u32 cycle_time_ns)
{
@@ -507,6 +513,7 @@ u64 icssg_ts_to_ns(u32 hi_sw, u32 hi, u32 lo, u32 cycle_time_ns)
return ns;
}
+EXPORT_SYMBOL_GPL(icssg_ts_to_ns);
void emac_rx_timestamp(struct prueth_emac *emac,
struct sk_buff *skb, u32 *psdata)
@@ -581,6 +588,8 @@ static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id)
} else {
/* send the filled skb up the n/w stack */
skb_put(skb, pkt_len);
+ if (emac->prueth->is_switch_mode)
+ skb->offload_fwd_mark = emac->offload_fwd_mark;
skb->protocol = eth_type_trans(skb, ndev);
napi_gro_receive(&emac->napi_rx, skb);
ndev->stats.rx_bytes += pkt_len;
@@ -636,7 +645,7 @@ static int prueth_tx_ts_cookie_get(struct prueth_emac *emac)
}
/**
- * emac_ndo_start_xmit - EMAC Transmit function
+ * icssg_ndo_start_xmit - EMAC Transmit function
* @skb: SKB pointer
* @ndev: EMAC network adapter
*
@@ -647,7 +656,7 @@ static int prueth_tx_ts_cookie_get(struct prueth_emac *emac)
*
* Return: enum netdev_tx
*/
-enum netdev_tx emac_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+enum netdev_tx icssg_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct cppi5_host_desc_t *first_desc, *next_desc, *cur_desc;
struct prueth_emac *emac = netdev_priv(ndev);
@@ -806,6 +815,7 @@ drop_stop_q_busy:
netif_tx_stop_queue(netif_txq);
return NETDEV_TX_BUSY;
}
+EXPORT_SYMBOL_GPL(icssg_ndo_start_xmit);
static void prueth_tx_cleanup(void *data, dma_addr_t desc_dma)
{
@@ -831,6 +841,7 @@ irqreturn_t prueth_rx_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
+EXPORT_SYMBOL_GPL(prueth_rx_irq);
void prueth_emac_stop(struct prueth_emac *emac)
{
@@ -855,6 +866,7 @@ void prueth_emac_stop(struct prueth_emac *emac)
rproc_shutdown(prueth->rtu[slice]);
rproc_shutdown(prueth->pru[slice]);
}
+EXPORT_SYMBOL_GPL(prueth_emac_stop);
void prueth_cleanup_tx_ts(struct prueth_emac *emac)
{
@@ -867,8 +879,9 @@ void prueth_cleanup_tx_ts(struct prueth_emac *emac)
}
}
}
+EXPORT_SYMBOL_GPL(prueth_cleanup_tx_ts);
-int emac_napi_rx_poll(struct napi_struct *napi_rx, int budget)
+int icssg_napi_rx_poll(struct napi_struct *napi_rx, int budget)
{
struct prueth_emac *emac = prueth_napi_to_emac(napi_rx);
int rx_flow = emac->is_sr1 ?
@@ -905,6 +918,7 @@ int emac_napi_rx_poll(struct napi_struct *napi_rx, int budget)
return num_rx;
}
+EXPORT_SYMBOL_GPL(icssg_napi_rx_poll);
int prueth_prepare_rx_chan(struct prueth_emac *emac,
struct prueth_rx_chn *chn,
@@ -930,6 +944,7 @@ int prueth_prepare_rx_chan(struct prueth_emac *emac,
return 0;
}
+EXPORT_SYMBOL_GPL(prueth_prepare_rx_chan);
void prueth_reset_tx_chan(struct prueth_emac *emac, int ch_num,
bool free_skb)
@@ -944,6 +959,7 @@ void prueth_reset_tx_chan(struct prueth_emac *emac, int ch_num,
k3_udma_glue_disable_tx_chn(emac->tx_chns[i].tx_chn);
}
}
+EXPORT_SYMBOL_GPL(prueth_reset_tx_chan);
void prueth_reset_rx_chan(struct prueth_rx_chn *chn,
int num_flows, bool disable)
@@ -956,11 +972,13 @@ void prueth_reset_rx_chan(struct prueth_rx_chn *chn,
if (disable)
k3_udma_glue_disable_rx_chn(chn->rx_chn);
}
+EXPORT_SYMBOL_GPL(prueth_reset_rx_chan);
-void emac_ndo_tx_timeout(struct net_device *ndev, unsigned int txqueue)
+void icssg_ndo_tx_timeout(struct net_device *ndev, unsigned int txqueue)
{
ndev->stats.tx_errors++;
}
+EXPORT_SYMBOL_GPL(icssg_ndo_tx_timeout);
static int emac_set_ts_config(struct net_device *ndev, struct ifreq *ifr)
{
@@ -1024,7 +1042,7 @@ static int emac_get_ts_config(struct net_device *ndev, struct ifreq *ifr)
-EFAULT : 0;
}
-int emac_ndo_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
+int icssg_ndo_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
{
switch (cmd) {
case SIOCGHWTSTAMP:
@@ -1037,9 +1055,10 @@ int emac_ndo_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
return phy_do_ioctl(ndev, ifr, cmd);
}
+EXPORT_SYMBOL_GPL(icssg_ndo_ioctl);
-void emac_ndo_get_stats64(struct net_device *ndev,
- struct rtnl_link_stats64 *stats)
+void icssg_ndo_get_stats64(struct net_device *ndev,
+ struct rtnl_link_stats64 *stats)
{
struct prueth_emac *emac = netdev_priv(ndev);
@@ -1058,9 +1077,10 @@ void emac_ndo_get_stats64(struct net_device *ndev,
stats->tx_errors = ndev->stats.tx_errors;
stats->tx_dropped = ndev->stats.tx_dropped;
}
+EXPORT_SYMBOL_GPL(icssg_ndo_get_stats64);
-int emac_ndo_get_phys_port_name(struct net_device *ndev, char *name,
- size_t len)
+int icssg_ndo_get_phys_port_name(struct net_device *ndev, char *name,
+ size_t len)
{
struct prueth_emac *emac = netdev_priv(ndev);
int ret;
@@ -1071,6 +1091,7 @@ int emac_ndo_get_phys_port_name(struct net_device *ndev, char *name,
return 0;
}
+EXPORT_SYMBOL_GPL(icssg_ndo_get_phys_port_name);
/* get emac_port corresponding to eth_node name */
int prueth_node_port(struct device_node *eth_node)
@@ -1089,6 +1110,7 @@ int prueth_node_port(struct device_node *eth_node)
else
return PRUETH_PORT_INVALID;
}
+EXPORT_SYMBOL_GPL(prueth_node_port);
/* get MAC instance corresponding to eth_node name */
int prueth_node_mac(struct device_node *eth_node)
@@ -1107,6 +1129,7 @@ int prueth_node_mac(struct device_node *eth_node)
else
return PRUETH_MAC_INVALID;
}
+EXPORT_SYMBOL_GPL(prueth_node_mac);
void prueth_netdev_exit(struct prueth *prueth,
struct device_node *eth_node)
@@ -1132,6 +1155,7 @@ void prueth_netdev_exit(struct prueth *prueth,
free_netdev(emac->ndev);
prueth->emac[mac] = NULL;
}
+EXPORT_SYMBOL_GPL(prueth_netdev_exit);
int prueth_get_cores(struct prueth *prueth, int slice, bool is_sr1)
{
@@ -1182,6 +1206,7 @@ int prueth_get_cores(struct prueth *prueth, int slice, bool is_sr1)
return 0;
}
+EXPORT_SYMBOL_GPL(prueth_get_cores);
void prueth_put_cores(struct prueth *prueth, int slice)
{
@@ -1194,6 +1219,7 @@ void prueth_put_cores(struct prueth *prueth, int slice)
if (prueth->pru[slice])
pru_rproc_put(prueth->pru[slice]);
}
+EXPORT_SYMBOL_GPL(prueth_put_cores);
#ifdef CONFIG_PM_SLEEP
static int prueth_suspend(struct device *dev)
@@ -1250,3 +1276,9 @@ static int prueth_resume(struct device *dev)
const struct dev_pm_ops prueth_dev_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(prueth_suspend, prueth_resume)
};
+EXPORT_SYMBOL_GPL(prueth_dev_pm_ops);
+
+MODULE_AUTHOR("Roger Quadros <rogerq@ti.com>");
+MODULE_AUTHOR("Md Danish Anwar <danishanwar@ti.com>");
+MODULE_DESCRIPTION("PRUSS ICSSG Ethernet Driver Common Module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/ti/icssg/icssg_config.c b/drivers/net/ethernet/ti/icssg/icssg_config.c
index 15f2235bf90f..dae52a83a378 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_config.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_config.c
@@ -107,28 +107,49 @@ static const struct map hwq_map[2][ICSSG_NUM_OTHER_QUEUES] = {
},
};
+static void icssg_config_mii_init_switch(struct prueth_emac *emac)
+{
+ struct prueth *prueth = emac->prueth;
+ int mii = prueth_emac_slice(emac);
+ u32 txcfg_reg, pcnt_reg, txcfg;
+ struct regmap *mii_rt;
+
+ mii_rt = prueth->mii_rt;
+
+ txcfg_reg = (mii == ICSS_MII0) ? PRUSS_MII_RT_TXCFG0 :
+ PRUSS_MII_RT_TXCFG1;
+ pcnt_reg = (mii == ICSS_MII0) ? PRUSS_MII_RT_RX_PCNT0 :
+ PRUSS_MII_RT_RX_PCNT1;
+
+ txcfg = PRUSS_MII_RT_TXCFG_TX_ENABLE |
+ PRUSS_MII_RT_TXCFG_TX_AUTO_PREAMBLE |
+ PRUSS_MII_RT_TXCFG_TX_IPG_WIRE_CLK_EN;
+
+ if (emac->phy_if == PHY_INTERFACE_MODE_MII && mii == ICSS_MII1)
+ txcfg |= PRUSS_MII_RT_TXCFG_TX_MUX_SEL;
+ else if (emac->phy_if != PHY_INTERFACE_MODE_MII && mii == ICSS_MII0)
+ txcfg |= PRUSS_MII_RT_TXCFG_TX_MUX_SEL;
+
+ regmap_write(mii_rt, txcfg_reg, txcfg);
+ regmap_write(mii_rt, pcnt_reg, 0x1);
+}
+
static void icssg_config_mii_init(struct prueth_emac *emac)
{
- u32 rxcfg, txcfg, rxcfg_reg, txcfg_reg, pcnt_reg;
struct prueth *prueth = emac->prueth;
int slice = prueth_emac_slice(emac);
+ u32 txcfg, txcfg_reg, pcnt_reg;
struct regmap *mii_rt;
mii_rt = prueth->mii_rt;
- rxcfg_reg = (slice == ICSS_MII0) ? PRUSS_MII_RT_RXCFG0 :
- PRUSS_MII_RT_RXCFG1;
txcfg_reg = (slice == ICSS_MII0) ? PRUSS_MII_RT_TXCFG0 :
PRUSS_MII_RT_TXCFG1;
pcnt_reg = (slice == ICSS_MII0) ? PRUSS_MII_RT_RX_PCNT0 :
PRUSS_MII_RT_RX_PCNT1;
- rxcfg = MII_RXCFG_DEFAULT;
txcfg = MII_TXCFG_DEFAULT;
- if (slice == ICSS_MII1)
- rxcfg |= PRUSS_MII_RT_RXCFG_RX_MUX_SEL;
-
/* In MII mode TX lines swapped inside ICSSG, so TX_MUX_SEL cfg need
* to be swapped also comparing to RGMII mode.
*/
@@ -137,7 +158,6 @@ static void icssg_config_mii_init(struct prueth_emac *emac)
else if (emac->phy_if != PHY_INTERFACE_MODE_MII && slice == ICSS_MII1)
txcfg |= PRUSS_MII_RT_TXCFG_TX_MUX_SEL;
- regmap_write(mii_rt, rxcfg_reg, rxcfg);
regmap_write(mii_rt, txcfg_reg, txcfg);
regmap_write(mii_rt, pcnt_reg, 0x1);
}
@@ -228,6 +248,7 @@ void icssg_config_ipg(struct prueth_emac *emac)
icssg_mii_update_ipg(prueth->mii_rt, slice, ipg);
}
+EXPORT_SYMBOL_GPL(icssg_config_ipg);
static void emac_r30_cmd_init(struct prueth_emac *emac)
{
@@ -257,6 +278,66 @@ static int emac_r30_is_done(struct prueth_emac *emac)
return 1;
}
+static int prueth_switch_buffer_setup(struct prueth_emac *emac)
+{
+ struct icssg_buffer_pool_cfg __iomem *bpool_cfg;
+ struct icssg_rxq_ctx __iomem *rxq_ctx;
+ struct prueth *prueth = emac->prueth;
+ int slice = prueth_emac_slice(emac);
+ u32 addr;
+ int i;
+
+ addr = lower_32_bits(prueth->msmcram.pa);
+ if (slice)
+ addr += PRUETH_NUM_BUF_POOLS * PRUETH_EMAC_BUF_POOL_SIZE;
+
+ if (addr % SZ_64K) {
+ dev_warn(prueth->dev, "buffer pool needs to be 64KB aligned\n");
+ return -EINVAL;
+ }
+
+ bpool_cfg = emac->dram.va + BUFFER_POOL_0_ADDR_OFFSET;
+ /* workaround for f/w bug. bpool 0 needs to be initialized */
+ for (i = 0; i < PRUETH_NUM_BUF_POOLS; i++) {
+ writel(addr, &bpool_cfg[i].addr);
+ writel(PRUETH_EMAC_BUF_POOL_SIZE, &bpool_cfg[i].len);
+ addr += PRUETH_EMAC_BUF_POOL_SIZE;
+ }
+
+ if (!slice)
+ addr += PRUETH_NUM_BUF_POOLS * PRUETH_EMAC_BUF_POOL_SIZE;
+ else
+ addr += PRUETH_SW_NUM_BUF_POOLS_HOST * PRUETH_SW_BUF_POOL_SIZE_HOST;
+
+ for (i = PRUETH_NUM_BUF_POOLS;
+ i < 2 * PRUETH_SW_NUM_BUF_POOLS_HOST + PRUETH_NUM_BUF_POOLS;
+ i++) {
+ /* The driver only uses first 4 queues per PRU so only initialize them */
+ if (i % PRUETH_SW_NUM_BUF_POOLS_HOST < PRUETH_SW_NUM_BUF_POOLS_PER_PRU) {
+ writel(addr, &bpool_cfg[i].addr);
+ writel(PRUETH_SW_BUF_POOL_SIZE_HOST, &bpool_cfg[i].len);
+ addr += PRUETH_SW_BUF_POOL_SIZE_HOST;
+ } else {
+ writel(0, &bpool_cfg[i].addr);
+ writel(0, &bpool_cfg[i].len);
+ }
+ }
+
+ if (!slice)
+ addr += PRUETH_SW_NUM_BUF_POOLS_HOST * PRUETH_SW_BUF_POOL_SIZE_HOST;
+ else
+ addr += PRUETH_EMAC_RX_CTX_BUF_SIZE;
+
+ rxq_ctx = emac->dram.va + HOST_RX_Q_PRE_CONTEXT_OFFSET;
+ for (i = 0; i < 3; i++)
+ writel(addr, &rxq_ctx->start[i]);
+
+ addr += PRUETH_EMAC_RX_CTX_BUF_SIZE;
+ writel(addr - SZ_2K, &rxq_ctx->end);
+
+ return 0;
+}
+
static int prueth_emac_buffer_setup(struct prueth_emac *emac)
{
struct icssg_buffer_pool_cfg __iomem *bpool_cfg;
@@ -321,25 +402,63 @@ static void icssg_init_emac_mode(struct prueth *prueth)
/* When the device is configured as a bridge and it is being brought
* back to the emac mode, the host mac address has to be set as 0.
*/
+ u32 addr = prueth->shram.pa + EMAC_ICSSG_SWITCH_DEFAULT_VLAN_TABLE_OFFSET;
+ int i;
u8 mac[ETH_ALEN] = { 0 };
if (prueth->emacs_initialized)
return;
- regmap_update_bits(prueth->miig_rt, FDB_GEN_CFG1,
- SMEM_VLAN_OFFSET_MASK, 0);
- regmap_write(prueth->miig_rt, FDB_GEN_CFG2, 0);
+ /* Set VLAN TABLE address base */
+ regmap_update_bits(prueth->miig_rt, FDB_GEN_CFG1, SMEM_VLAN_OFFSET_MASK,
+ addr << SMEM_VLAN_OFFSET);
+ /* Set enable VLAN aware mode, and FDBs for all PRUs */
+ regmap_write(prueth->miig_rt, FDB_GEN_CFG2, (FDB_PRU0_EN | FDB_PRU1_EN | FDB_HOST_EN));
+ prueth->vlan_tbl = (struct prueth_vlan_tbl __force *)(prueth->shram.va +
+ EMAC_ICSSG_SWITCH_DEFAULT_VLAN_TABLE_OFFSET);
+ for (i = 0; i < SZ_4K - 1; i++) {
+ prueth->vlan_tbl[i].fid = i;
+ prueth->vlan_tbl[i].fid_c1 = 0;
+ }
/* Clear host MAC address */
icssg_class_set_host_mac_addr(prueth->miig_rt, mac);
}
+static void icssg_init_switch_mode(struct prueth *prueth)
+{
+ u32 addr = prueth->shram.pa + EMAC_ICSSG_SWITCH_DEFAULT_VLAN_TABLE_OFFSET;
+ int i;
+
+ if (prueth->emacs_initialized)
+ return;
+
+ /* Set VLAN TABLE address base */
+ regmap_update_bits(prueth->miig_rt, FDB_GEN_CFG1, SMEM_VLAN_OFFSET_MASK,
+ addr << SMEM_VLAN_OFFSET);
+ /* Set enable VLAN aware mode, and FDBs for all PRUs */
+ regmap_write(prueth->miig_rt, FDB_GEN_CFG2, FDB_EN_ALL);
+ prueth->vlan_tbl = (struct prueth_vlan_tbl __force *)(prueth->shram.va +
+ EMAC_ICSSG_SWITCH_DEFAULT_VLAN_TABLE_OFFSET);
+ for (i = 0; i < SZ_4K - 1; i++) {
+ prueth->vlan_tbl[i].fid = i;
+ prueth->vlan_tbl[i].fid_c1 = 0;
+ }
+
+ if (prueth->hw_bridge_dev)
+ icssg_class_set_host_mac_addr(prueth->miig_rt, prueth->hw_bridge_dev->dev_addr);
+ icssg_set_pvid(prueth, prueth->default_vlan, PRUETH_PORT_HOST);
+}
+
int icssg_config(struct prueth *prueth, struct prueth_emac *emac, int slice)
{
void __iomem *config = emac->dram.va + ICSSG_CONFIG_OFFSET;
struct icssg_flow_cfg __iomem *flow_cfg;
int ret;
- icssg_init_emac_mode(prueth);
+ if (prueth->is_switch_mode)
+ icssg_init_switch_mode(prueth);
+ else
+ icssg_init_emac_mode(prueth);
memset_io(config, 0, TAS_GATE_MASK_LIST0);
icssg_miig_queues_init(prueth, slice);
@@ -353,7 +472,10 @@ int icssg_config(struct prueth *prueth, struct prueth_emac *emac, int slice)
regmap_update_bits(prueth->miig_rt, ICSSG_CFG_OFFSET,
ICSSG_CFG_DEFAULT, ICSSG_CFG_DEFAULT);
icssg_miig_set_interface_mode(prueth->miig_rt, slice, emac->phy_if);
- icssg_config_mii_init(emac);
+ if (prueth->is_switch_mode)
+ icssg_config_mii_init_switch(emac);
+ else
+ icssg_config_mii_init(emac);
icssg_config_ipg(emac);
icssg_update_rgmii_cfg(prueth->miig_rt, emac);
@@ -376,7 +498,10 @@ int icssg_config(struct prueth *prueth, struct prueth_emac *emac, int slice)
writeb(0, config + SPL_PKT_DEFAULT_PRIORITY);
writeb(0, config + QUEUE_NUM_UNTAGGED);
- ret = prueth_emac_buffer_setup(emac);
+ if (prueth->is_switch_mode)
+ ret = prueth_switch_buffer_setup(emac);
+ else
+ ret = prueth_emac_buffer_setup(emac);
if (ret)
return ret;
@@ -384,6 +509,7 @@ int icssg_config(struct prueth *prueth, struct prueth_emac *emac, int slice)
return 0;
}
+EXPORT_SYMBOL_GPL(icssg_config);
/* Bitmask for ICSSG r30 commands */
static const struct icssg_r30_cmd emac_r32_bitmask[] = {
@@ -408,8 +534,8 @@ static const struct icssg_r30_cmd emac_r32_bitmask[] = {
{{0xffef0000, EMAC_NONE, 0xffef0000, EMAC_NONE}} /* VLAN UNWARE*/
};
-int emac_set_port_state(struct prueth_emac *emac,
- enum icssg_port_state_cmd cmd)
+int icssg_set_port_state(struct prueth_emac *emac,
+ enum icssg_port_state_cmd cmd)
{
struct icssg_r30_cmd __iomem *p;
int ret = -ETIMEDOUT;
@@ -440,6 +566,7 @@ int emac_set_port_state(struct prueth_emac *emac,
return ret;
}
+EXPORT_SYMBOL_GPL(icssg_set_port_state);
void icssg_config_half_duplex(struct prueth_emac *emac)
{
@@ -451,6 +578,7 @@ void icssg_config_half_duplex(struct prueth_emac *emac)
val = get_random_u32();
writel(val, emac->dram.va + HD_RAND_SEED_OFFSET);
}
+EXPORT_SYMBOL_GPL(icssg_config_half_duplex);
void icssg_config_set_speed(struct prueth_emac *emac)
{
@@ -477,3 +605,180 @@ void icssg_config_set_speed(struct prueth_emac *emac)
writeb(fw_speed, emac->dram.va + PORT_LINK_SPEED_OFFSET);
}
+EXPORT_SYMBOL_GPL(icssg_config_set_speed);
+
+int icssg_send_fdb_msg(struct prueth_emac *emac, struct mgmt_cmd *cmd,
+ struct mgmt_cmd_rsp *rsp)
+{
+ struct prueth *prueth = emac->prueth;
+ int slice = prueth_emac_slice(emac);
+ int addr, ret;
+
+ addr = icssg_queue_pop(prueth, slice == 0 ?
+ ICSSG_CMD_POP_SLICE0 : ICSSG_CMD_POP_SLICE1);
+ if (addr < 0)
+ return addr;
+
+ /* First 4 bytes have FW owned buffer linking info which should
+ * not be touched
+ */
+ memcpy_toio(prueth->shram.va + addr + 4, cmd, sizeof(*cmd));
+ icssg_queue_push(prueth, slice == 0 ?
+ ICSSG_CMD_PUSH_SLICE0 : ICSSG_CMD_PUSH_SLICE1, addr);
+ ret = read_poll_timeout(icssg_queue_pop, addr, addr >= 0,
+ 2000, 20000000, false, prueth, slice == 0 ?
+ ICSSG_RSP_POP_SLICE0 : ICSSG_RSP_POP_SLICE1);
+ if (ret) {
+ netdev_err(emac->ndev, "Timedout sending HWQ message\n");
+ return ret;
+ }
+
+ memcpy_fromio(rsp, prueth->shram.va + addr, sizeof(*rsp));
+ /* Return buffer back for to pool */
+ icssg_queue_push(prueth, slice == 0 ?
+ ICSSG_RSP_PUSH_SLICE0 : ICSSG_RSP_PUSH_SLICE1, addr);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(icssg_send_fdb_msg);
+
+static void icssg_fdb_setup(struct prueth_emac *emac, struct mgmt_cmd *fdb_cmd,
+ const unsigned char *addr, u8 fid, int cmd)
+{
+ int slice = prueth_emac_slice(emac);
+ u8 mac_fid[ETH_ALEN + 2];
+ u16 fdb_slot;
+
+ ether_addr_copy(mac_fid, addr);
+
+ /* 1-1 VID-FID mapping is already setup */
+ mac_fid[ETH_ALEN] = fid;
+ mac_fid[ETH_ALEN + 1] = 0;
+
+ fdb_slot = bitrev32(crc32_le(0, mac_fid, 8)) & PRUETH_SWITCH_FDB_MASK;
+
+ fdb_cmd->header = ICSSG_FW_MGMT_CMD_HEADER;
+ fdb_cmd->type = ICSSG_FW_MGMT_FDB_CMD_TYPE;
+ fdb_cmd->seqnum = ++(emac->prueth->icssg_hwcmdseq);
+ fdb_cmd->param = cmd;
+ fdb_cmd->param |= (slice << 4);
+
+ memcpy(&fdb_cmd->cmd_args[0], addr, 4);
+ memcpy(&fdb_cmd->cmd_args[1], &addr[4], 2);
+ fdb_cmd->cmd_args[2] = fdb_slot;
+
+ netdev_dbg(emac->ndev, "MAC %pM slot %X FID %X\n", addr, fdb_slot, fid);
+}
+
+int icssg_fdb_add_del(struct prueth_emac *emac, const unsigned char *addr,
+ u8 vid, u8 fid_c2, bool add)
+{
+ struct mgmt_cmd_rsp fdb_cmd_rsp = { 0 };
+ struct mgmt_cmd fdb_cmd = { 0 };
+ u8 fid = vid;
+ int ret;
+
+ icssg_fdb_setup(emac, &fdb_cmd, addr, fid, add ? ICSS_CMD_ADD_FDB : ICSS_CMD_DEL_FDB);
+
+ fid_c2 |= ICSSG_FDB_ENTRY_VALID;
+ fdb_cmd.cmd_args[1] |= ((fid << 16) | (fid_c2 << 24));
+
+ ret = icssg_send_fdb_msg(emac, &fdb_cmd, &fdb_cmd_rsp);
+ if (ret)
+ return ret;
+
+ WARN_ON(fdb_cmd.seqnum != fdb_cmd_rsp.seqnum);
+ if (fdb_cmd_rsp.status == 1)
+ return 0;
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(icssg_fdb_add_del);
+
+int icssg_fdb_lookup(struct prueth_emac *emac, const unsigned char *addr,
+ u8 vid)
+{
+ struct mgmt_cmd_rsp fdb_cmd_rsp = { 0 };
+ struct mgmt_cmd fdb_cmd = { 0 };
+ struct prueth_fdb_slot *slot;
+ u8 fid = vid;
+ int ret, i;
+
+ icssg_fdb_setup(emac, &fdb_cmd, addr, fid, ICSS_CMD_GET_FDB_SLOT);
+
+ fdb_cmd.cmd_args[1] |= fid << 16;
+
+ ret = icssg_send_fdb_msg(emac, &fdb_cmd, &fdb_cmd_rsp);
+ if (ret)
+ return ret;
+
+ WARN_ON(fdb_cmd.seqnum != fdb_cmd_rsp.seqnum);
+
+ slot = (struct prueth_fdb_slot __force *)(emac->dram.va + FDB_CMD_BUFFER);
+ for (i = 0; i < 4; i++) {
+ if (ether_addr_equal(addr, slot->mac) && vid == slot->fid)
+ return (slot->fid_c2 & ~ICSSG_FDB_ENTRY_VALID);
+ slot++;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(icssg_fdb_lookup);
+
+void icssg_vtbl_modify(struct prueth_emac *emac, u8 vid, u8 port_mask,
+ u8 untag_mask, bool add)
+{
+ struct prueth *prueth = emac->prueth;
+ struct prueth_vlan_tbl *tbl;
+ u8 fid_c1;
+
+ tbl = prueth->vlan_tbl;
+ fid_c1 = tbl[vid].fid_c1;
+
+ /* FID_C1: bit0..2 port membership mask,
+ * bit3..5 tagging mask for each port
+ * bit6 Stream VID (not handled currently)
+ * bit7 MC flood (not handled currently)
+ */
+ if (add) {
+ fid_c1 |= (port_mask | port_mask << 3);
+ fid_c1 &= ~(untag_mask << 3);
+ } else {
+ fid_c1 &= ~(port_mask | port_mask << 3);
+ }
+
+ tbl[vid].fid_c1 = fid_c1;
+}
+EXPORT_SYMBOL_GPL(icssg_vtbl_modify);
+
+u16 icssg_get_pvid(struct prueth_emac *emac)
+{
+ struct prueth *prueth = emac->prueth;
+ u32 pvid;
+
+ if (emac->port_id == PRUETH_PORT_MII0)
+ pvid = readl(prueth->shram.va + EMAC_ICSSG_SWITCH_PORT1_DEFAULT_VLAN_OFFSET);
+ else
+ pvid = readl(prueth->shram.va + EMAC_ICSSG_SWITCH_PORT2_DEFAULT_VLAN_OFFSET);
+
+ pvid = pvid >> 24;
+
+ return pvid;
+}
+EXPORT_SYMBOL_GPL(icssg_get_pvid);
+
+void icssg_set_pvid(struct prueth *prueth, u8 vid, u8 port)
+{
+ u32 pvid;
+
+ /* only 256 VLANs are supported */
+ pvid = (u32 __force)cpu_to_be32((ETH_P_8021Q << 16) | (vid & 0xff));
+
+ if (port == PRUETH_PORT_MII0)
+ writel(pvid, prueth->shram.va + EMAC_ICSSG_SWITCH_PORT1_DEFAULT_VLAN_OFFSET);
+ else if (port == PRUETH_PORT_MII1)
+ writel(pvid, prueth->shram.va + EMAC_ICSSG_SWITCH_PORT2_DEFAULT_VLAN_OFFSET);
+ else
+ writel(pvid, prueth->shram.va + EMAC_ICSSG_SWITCH_PORT0_DEFAULT_VLAN_OFFSET);
+}
+EXPORT_SYMBOL_GPL(icssg_set_pvid);
diff --git a/drivers/net/ethernet/ti/icssg/icssg_config.h b/drivers/net/ethernet/ti/icssg/icssg_config.h
index cf2ea4bd22a2..1ac60283923b 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_config.h
+++ b/drivers/net/ethernet/ti/icssg/icssg_config.h
@@ -35,6 +35,15 @@ struct icssg_flow_cfg {
(2 * (PRUETH_EMAC_BUF_POOL_SIZE * PRUETH_NUM_BUF_POOLS + \
PRUETH_EMAC_RX_CTX_BUF_SIZE * 2))
+#define PRUETH_SW_BUF_POOL_SIZE_HOST SZ_4K
+#define PRUETH_SW_NUM_BUF_POOLS_HOST 8
+#define PRUETH_SW_NUM_BUF_POOLS_PER_PRU 4
+#define MSMC_RAM_SIZE_SWITCH_MODE \
+ (MSMC_RAM_SIZE + \
+ (2 * PRUETH_SW_BUF_POOL_SIZE_HOST * PRUETH_SW_NUM_BUF_POOLS_HOST))
+
+#define PRUETH_SWITCH_FDB_MASK ((SIZE_OF_FDB / NUMBER_OF_FDB_BUCKET_ENTRIES) - 1)
+
struct icssg_rxq_ctx {
__le32 start[3];
__le32 end;
@@ -202,6 +211,23 @@ struct icssg_setclock_desc {
#define ICSSG_TS_PUSH_SLICE0 40
#define ICSSG_TS_PUSH_SLICE1 41
+struct mgmt_cmd {
+ u8 param;
+ u8 seqnum;
+ u8 type;
+ u8 header;
+ u32 cmd_args[3];
+};
+
+struct mgmt_cmd_rsp {
+ u32 reserved;
+ u8 status;
+ u8 seqnum;
+ u8 type;
+ u8 header;
+ u32 cmd_args[3];
+};
+
/* FDB FID_C2 flag definitions */
/* Indicates host port membership.*/
#define ICSSG_FDB_ENTRY_P0_MEMBERSHIP BIT(0)
diff --git a/drivers/net/ethernet/ti/icssg/icssg_ethtool.c b/drivers/net/ethernet/ti/icssg/icssg_ethtool.c
index c8d0f45cc5b1..5688f054cec5 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_ethtool.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_ethtool.c
@@ -110,7 +110,7 @@ static void emac_get_ethtool_stats(struct net_device *ndev,
}
static int emac_get_ts_info(struct net_device *ndev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct prueth_emac *emac = netdev_priv(ndev);
@@ -312,3 +312,4 @@ const struct ethtool_ops icssg_ethtool_ops = {
.nway_reset = emac_nway_reset,
.get_rmon_stats = emac_get_rmon_stats,
};
+EXPORT_SYMBOL_GPL(icssg_ethtool_ops);
diff --git a/drivers/net/ethernet/ti/icssg/icssg_mii_cfg.c b/drivers/net/ethernet/ti/icssg/icssg_mii_cfg.c
index 92718ae40d7e..b64955438bb2 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_mii_cfg.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_mii_cfg.c
@@ -40,6 +40,7 @@ void icssg_mii_update_mtu(struct regmap *mii_rt, int mii, int mtu)
(mtu - 1) << PRUSS_MII_RT_RX_FRMS_MAX_FRM_SHIFT);
}
}
+EXPORT_SYMBOL_GPL(icssg_mii_update_mtu);
void icssg_update_rgmii_cfg(struct regmap *miig_rt, struct prueth_emac *emac)
{
@@ -66,6 +67,7 @@ void icssg_update_rgmii_cfg(struct regmap *miig_rt, struct prueth_emac *emac)
regmap_update_bits(miig_rt, RGMII_CFG_OFFSET, full_duplex_mask,
full_duplex_val);
}
+EXPORT_SYMBOL_GPL(icssg_update_rgmii_cfg);
void icssg_miig_set_interface_mode(struct regmap *miig_rt, int mii, phy_interface_t phy_if)
{
@@ -105,6 +107,7 @@ u32 icssg_rgmii_get_speed(struct regmap *miig_rt, int mii)
return icssg_rgmii_cfg_get_bitfield(miig_rt, mask, shift);
}
+EXPORT_SYMBOL_GPL(icssg_rgmii_get_speed);
u32 icssg_rgmii_get_fullduplex(struct regmap *miig_rt, int mii)
{
@@ -118,3 +121,4 @@ u32 icssg_rgmii_get_fullduplex(struct regmap *miig_rt, int mii)
return icssg_rgmii_cfg_get_bitfield(miig_rt, mask, shift);
}
+EXPORT_SYMBOL_GPL(icssg_rgmii_get_fullduplex);
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
index 1ea3fbd5e954..3e51b3a9b0a5 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
@@ -27,13 +27,19 @@
#include <linux/remoteproc/pruss.h>
#include <linux/regmap.h>
#include <linux/remoteproc.h>
+#include <net/switchdev.h>
#include "icssg_prueth.h"
#include "icssg_mii_rt.h"
+#include "icssg_switchdev.h"
#include "../k3-cppi-desc-pool.h"
#define PRUETH_MODULE_DESCRIPTION "PRUSS ICSSG Ethernet driver"
+#define DEFAULT_VID 1
+#define DEFAULT_PORT_MASK 1
+#define DEFAULT_UNTAG_MASK 1
+
/* CTRLMMR_ICSSG_RGMII_CTRL register bits */
#define ICSSG_CTRL_RGMII_ID_MODE BIT(24)
@@ -112,6 +118,19 @@ static irqreturn_t prueth_tx_ts_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static struct icssg_firmwares icssg_switch_firmwares[] = {
+ {
+ .pru = "ti-pruss/am65x-sr2-pru0-prusw-fw.elf",
+ .rtu = "ti-pruss/am65x-sr2-rtu0-prusw-fw.elf",
+ .txpru = "ti-pruss/am65x-sr2-txpru0-prusw-fw.elf",
+ },
+ {
+ .pru = "ti-pruss/am65x-sr2-pru1-prusw-fw.elf",
+ .rtu = "ti-pruss/am65x-sr2-rtu1-prusw-fw.elf",
+ .txpru = "ti-pruss/am65x-sr2-txpru1-prusw-fw.elf",
+ }
+};
+
static struct icssg_firmwares icssg_emac_firmwares[] = {
{
.pru = "ti-pruss/am65x-sr2-pru0-prueth-fw.elf",
@@ -131,7 +150,10 @@ static int prueth_emac_start(struct prueth *prueth, struct prueth_emac *emac)
struct device *dev = prueth->dev;
int slice, ret;
- firmwares = icssg_emac_firmwares;
+ if (prueth->is_switch_mode)
+ firmwares = icssg_switch_firmwares;
+ else
+ firmwares = icssg_emac_firmwares;
slice = prueth_emac_slice(emac);
if (slice < 0) {
@@ -227,10 +249,10 @@ static void emac_adjust_link(struct net_device *ndev)
icssg_config_ipg(emac);
spin_unlock_irqrestore(&emac->lock, flags);
icssg_config_set_speed(emac);
- emac_set_port_state(emac, ICSSG_EMAC_PORT_FORWARD);
+ icssg_set_port_state(emac, ICSSG_EMAC_PORT_FORWARD);
} else {
- emac_set_port_state(emac, ICSSG_EMAC_PORT_DISABLE);
+ icssg_set_port_state(emac, ICSSG_EMAC_PORT_DISABLE);
}
}
@@ -417,6 +439,37 @@ const struct icss_iep_clockops prueth_iep_clockops = {
.perout_enable = prueth_perout_enable,
};
+static int icssg_prueth_add_mcast(struct net_device *ndev, const u8 *addr)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ int port_mask = BIT(emac->port_id);
+
+ port_mask |= icssg_fdb_lookup(emac, addr, 0);
+ icssg_fdb_add_del(emac, addr, 0, port_mask, true);
+ icssg_vtbl_modify(emac, 0, port_mask, port_mask, true);
+
+ return 0;
+}
+
+static int icssg_prueth_del_mcast(struct net_device *ndev, const u8 *addr)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ int port_mask = BIT(emac->port_id);
+ int other_port_mask;
+
+ other_port_mask = port_mask ^ icssg_fdb_lookup(emac, addr, 0);
+
+ icssg_fdb_add_del(emac, addr, 0, port_mask, false);
+ icssg_vtbl_modify(emac, 0, port_mask, port_mask, false);
+
+ if (other_port_mask) {
+ icssg_fdb_add_del(emac, addr, 0, other_port_mask, true);
+ icssg_vtbl_modify(emac, 0, other_port_mask, other_port_mask, true);
+ }
+
+ return 0;
+}
+
/**
* emac_ndo_open - EMAC device open
* @ndev: network adapter device
@@ -445,9 +498,8 @@ static int emac_ndo_open(struct net_device *ndev)
ether_addr_copy(emac->mac_addr, ndev->dev_addr);
icssg_class_set_mac_addr(prueth->miig_rt, slice, emac->mac_addr);
- icssg_ft1_set_mac_addr(prueth->miig_rt, slice, emac->mac_addr);
-
icssg_class_default(prueth->miig_rt, slice, 0, false);
+ icssg_ft1_set_mac_addr(prueth->miig_rt, slice, emac->mac_addr);
/* Notify the stack of the actual queue counts. */
ret = netif_set_real_num_tx_queues(ndev, num_data_chn);
@@ -578,6 +630,8 @@ static int emac_ndo_stop(struct net_device *ndev)
icssg_class_disable(prueth->miig_rt, prueth_emac_slice(emac));
+ __dev_mc_unsync(ndev, icssg_prueth_del_mcast);
+
atomic_set(&emac->tdown_cnt, emac->tx_ch_num);
/* ensure new tdown_cnt value is visible */
smp_mb__after_atomic();
@@ -640,24 +694,21 @@ static void emac_ndo_set_rx_mode_work(struct work_struct *work)
promisc = ndev->flags & IFF_PROMISC;
allmulti = ndev->flags & IFF_ALLMULTI;
- emac_set_port_state(emac, ICSSG_EMAC_PORT_UC_FLOODING_DISABLE);
- emac_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_DISABLE);
+ icssg_set_port_state(emac, ICSSG_EMAC_PORT_UC_FLOODING_DISABLE);
+ icssg_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_DISABLE);
if (promisc) {
- emac_set_port_state(emac, ICSSG_EMAC_PORT_UC_FLOODING_ENABLE);
- emac_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_ENABLE);
+ icssg_set_port_state(emac, ICSSG_EMAC_PORT_UC_FLOODING_ENABLE);
+ icssg_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_ENABLE);
return;
}
if (allmulti) {
- emac_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_ENABLE);
+ icssg_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_ENABLE);
return;
}
- if (!netdev_mc_empty(ndev)) {
- emac_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_ENABLE);
- return;
- }
+ __dev_mc_sync(ndev, icssg_prueth_add_mcast, icssg_prueth_del_mcast);
}
/**
@@ -677,14 +728,14 @@ static void emac_ndo_set_rx_mode(struct net_device *ndev)
static const struct net_device_ops emac_netdev_ops = {
.ndo_open = emac_ndo_open,
.ndo_stop = emac_ndo_stop,
- .ndo_start_xmit = emac_ndo_start_xmit,
+ .ndo_start_xmit = icssg_ndo_start_xmit,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
- .ndo_tx_timeout = emac_ndo_tx_timeout,
+ .ndo_tx_timeout = icssg_ndo_tx_timeout,
.ndo_set_rx_mode = emac_ndo_set_rx_mode,
- .ndo_eth_ioctl = emac_ndo_ioctl,
- .ndo_get_stats64 = emac_ndo_get_stats64,
- .ndo_get_phys_port_name = emac_ndo_get_phys_port_name,
+ .ndo_eth_ioctl = icssg_ndo_ioctl,
+ .ndo_get_stats64 = icssg_ndo_get_stats64,
+ .ndo_get_phys_port_name = icssg_ndo_get_phys_port_name,
};
static int prueth_netdev_init(struct prueth *prueth,
@@ -720,7 +771,7 @@ static int prueth_netdev_init(struct prueth *prueth,
}
INIT_WORK(&emac->rx_mode_work, emac_ndo_set_rx_mode_work);
- INIT_DELAYED_WORK(&emac->stats_work, emac_stats_work_handler);
+ INIT_DELAYED_WORK(&emac->stats_work, icssg_stats_work_handler);
ret = pruss_request_mem_region(prueth->pruss,
port == PRUETH_PORT_MII0 ?
@@ -813,7 +864,7 @@ static int prueth_netdev_init(struct prueth *prueth,
ndev->hw_features = NETIF_F_SG;
ndev->features = ndev->hw_features;
- netif_napi_add(ndev, &emac->napi_rx, emac_napi_rx_poll);
+ netif_napi_add(ndev, &emac->napi_rx, icssg_napi_rx_poll);
hrtimer_init(&emac->rx_hrtimer, CLOCK_MONOTONIC,
HRTIMER_MODE_REL_PINNED);
emac->rx_hrtimer.function = &emac_rx_timer_callback;
@@ -833,6 +884,214 @@ free_ndev:
return ret;
}
+bool prueth_dev_check(const struct net_device *ndev)
+{
+ if (ndev->netdev_ops == &emac_netdev_ops && netif_running(ndev)) {
+ struct prueth_emac *emac = netdev_priv(ndev);
+
+ return emac->prueth->is_switch_mode;
+ }
+
+ return false;
+}
+
+static void prueth_offload_fwd_mark_update(struct prueth *prueth)
+{
+ int set_val = 0;
+ int i;
+
+ if (prueth->br_members == (BIT(PRUETH_PORT_MII0) | BIT(PRUETH_PORT_MII1)))
+ set_val = 1;
+
+ dev_dbg(prueth->dev, "set offload_fwd_mark %d\n", set_val);
+
+ for (i = PRUETH_MAC0; i < PRUETH_NUM_MACS; i++) {
+ struct prueth_emac *emac = prueth->emac[i];
+
+ if (!emac || !emac->ndev)
+ continue;
+
+ emac->offload_fwd_mark = set_val;
+ }
+}
+
+static void prueth_emac_restart(struct prueth *prueth)
+{
+ struct prueth_emac *emac0 = prueth->emac[PRUETH_MAC0];
+ struct prueth_emac *emac1 = prueth->emac[PRUETH_MAC1];
+
+ /* Detach the net_device for both PRUeth ports*/
+ if (netif_running(emac0->ndev))
+ netif_device_detach(emac0->ndev);
+ if (netif_running(emac1->ndev))
+ netif_device_detach(emac1->ndev);
+
+ /* Disable both PRUeth ports */
+ icssg_set_port_state(emac0, ICSSG_EMAC_PORT_DISABLE);
+ icssg_set_port_state(emac1, ICSSG_EMAC_PORT_DISABLE);
+
+ /* Stop both pru cores for both PRUeth ports*/
+ prueth_emac_stop(emac0);
+ prueth->emacs_initialized--;
+ prueth_emac_stop(emac1);
+ prueth->emacs_initialized--;
+
+ /* Start both pru cores for both PRUeth ports */
+ prueth_emac_start(prueth, emac0);
+ prueth->emacs_initialized++;
+ prueth_emac_start(prueth, emac1);
+ prueth->emacs_initialized++;
+
+ /* Enable forwarding for both PRUeth ports */
+ icssg_set_port_state(emac0, ICSSG_EMAC_PORT_FORWARD);
+ icssg_set_port_state(emac1, ICSSG_EMAC_PORT_FORWARD);
+
+ /* Attache net_device for both PRUeth ports */
+ netif_device_attach(emac0->ndev);
+ netif_device_attach(emac1->ndev);
+}
+
+static void icssg_enable_switch_mode(struct prueth *prueth)
+{
+ struct prueth_emac *emac;
+ int mac;
+
+ prueth_emac_restart(prueth);
+
+ for (mac = PRUETH_MAC0; mac < PRUETH_NUM_MACS; mac++) {
+ emac = prueth->emac[mac];
+ if (netif_running(emac->ndev)) {
+ icssg_fdb_add_del(emac, eth_stp_addr, prueth->default_vlan,
+ ICSSG_FDB_ENTRY_P0_MEMBERSHIP |
+ ICSSG_FDB_ENTRY_P1_MEMBERSHIP |
+ ICSSG_FDB_ENTRY_P2_MEMBERSHIP |
+ ICSSG_FDB_ENTRY_BLOCK,
+ true);
+ icssg_vtbl_modify(emac, emac->port_vlan | DEFAULT_VID,
+ BIT(emac->port_id) | DEFAULT_PORT_MASK,
+ BIT(emac->port_id) | DEFAULT_UNTAG_MASK,
+ true);
+ icssg_set_pvid(prueth, emac->port_vlan, emac->port_id);
+ icssg_set_port_state(emac, ICSSG_EMAC_PORT_VLAN_AWARE_ENABLE);
+ }
+ }
+}
+
+static int prueth_netdevice_port_link(struct net_device *ndev,
+ struct net_device *br_ndev,
+ struct netlink_ext_ack *extack)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct prueth *prueth = emac->prueth;
+ int err;
+
+ if (!prueth->br_members) {
+ prueth->hw_bridge_dev = br_ndev;
+ } else {
+ /* This is adding the port to a second bridge, this is
+ * unsupported
+ */
+ if (prueth->hw_bridge_dev != br_ndev)
+ return -EOPNOTSUPP;
+ }
+
+ err = switchdev_bridge_port_offload(br_ndev, ndev, emac,
+ &prueth->prueth_switchdev_nb,
+ &prueth->prueth_switchdev_bl_nb,
+ false, extack);
+ if (err)
+ return err;
+
+ prueth->br_members |= BIT(emac->port_id);
+
+ if (!prueth->is_switch_mode) {
+ if (prueth->br_members & BIT(PRUETH_PORT_MII0) &&
+ prueth->br_members & BIT(PRUETH_PORT_MII1)) {
+ prueth->is_switch_mode = true;
+ prueth->default_vlan = 1;
+ emac->port_vlan = prueth->default_vlan;
+ icssg_enable_switch_mode(prueth);
+ }
+ }
+
+ prueth_offload_fwd_mark_update(prueth);
+
+ return NOTIFY_DONE;
+}
+
+static void prueth_netdevice_port_unlink(struct net_device *ndev)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct prueth *prueth = emac->prueth;
+
+ prueth->br_members &= ~BIT(emac->port_id);
+
+ if (prueth->is_switch_mode) {
+ prueth->is_switch_mode = false;
+ emac->port_vlan = 0;
+ prueth_emac_restart(prueth);
+ }
+
+ prueth_offload_fwd_mark_update(prueth);
+
+ if (!prueth->br_members)
+ prueth->hw_bridge_dev = NULL;
+}
+
+/* netdev notifier */
+static int prueth_netdevice_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(ptr);
+ struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
+ struct netdev_notifier_changeupper_info *info;
+ int ret = NOTIFY_DONE;
+
+ if (ndev->netdev_ops != &emac_netdev_ops)
+ return NOTIFY_DONE;
+
+ switch (event) {
+ case NETDEV_CHANGEUPPER:
+ info = ptr;
+
+ if (netif_is_bridge_master(info->upper_dev)) {
+ if (info->linking)
+ ret = prueth_netdevice_port_link(ndev, info->upper_dev, extack);
+ else
+ prueth_netdevice_port_unlink(ndev);
+ }
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+
+ return notifier_from_errno(ret);
+}
+
+static int prueth_register_notifiers(struct prueth *prueth)
+{
+ int ret = 0;
+
+ prueth->prueth_netdevice_nb.notifier_call = &prueth_netdevice_event;
+ ret = register_netdevice_notifier(&prueth->prueth_netdevice_nb);
+ if (ret) {
+ dev_err(prueth->dev, "can't register netdevice notifier\n");
+ return ret;
+ }
+
+ ret = prueth_switchdev_register_notifiers(prueth);
+ if (ret)
+ unregister_netdevice_notifier(&prueth->prueth_netdevice_nb);
+
+ return ret;
+}
+
+static void prueth_unregister_notifiers(struct prueth *prueth)
+{
+ prueth_switchdev_unregister_notifiers(prueth);
+ unregister_netdevice_notifier(&prueth->prueth_netdevice_nb);
+}
+
static int prueth_probe(struct platform_device *pdev)
{
struct device_node *eth_node, *eth_ports_node;
@@ -960,6 +1219,9 @@ static int prueth_probe(struct platform_device *pdev)
}
msmc_ram_size = MSMC_RAM_SIZE;
+ prueth->is_switchmode_supported = prueth->pdata.switch_mode;
+ if (prueth->is_switchmode_supported)
+ msmc_ram_size = MSMC_RAM_SIZE_SWITCH_MODE;
/* NOTE: FW bug needs buffer base to be 64KB aligned */
prueth->msmcram.va =
@@ -1065,6 +1327,14 @@ static int prueth_probe(struct platform_device *pdev)
phy_attached_info(prueth->emac[PRUETH_MAC1]->ndev->phydev);
}
+ if (prueth->is_switchmode_supported) {
+ ret = prueth_register_notifiers(prueth);
+ if (ret)
+ goto netdev_unregister;
+
+ sprintf(prueth->switch_id, "%s", dev_name(dev));
+ }
+
dev_info(dev, "TI PRU ethernet driver initialized: %s EMAC mode\n",
(!eth0_node || !eth1_node) ? "single" : "dual");
@@ -1134,6 +1404,8 @@ static void prueth_remove(struct platform_device *pdev)
struct device_node *eth_node;
int i;
+ prueth_unregister_notifiers(prueth);
+
for (i = 0; i < PRUETH_NUM_MACS; i++) {
if (!prueth->registered_netdevs[i])
continue;
@@ -1175,10 +1447,12 @@ static void prueth_remove(struct platform_device *pdev)
static const struct prueth_pdata am654_icssg_pdata = {
.fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE,
.quirk_10m_link_issue = 1,
+ .switch_mode = 1,
};
static const struct prueth_pdata am64x_icssg_pdata = {
.fdqring_mode = K3_RINGACC_RING_MODE_RING,
+ .switch_mode = 1,
};
static const struct of_device_id prueth_dt_match[] = {
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.h b/drivers/net/ethernet/ti/icssg/icssg_prueth.h
index a78c5eb75fb8..f678d656a3ed 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.h
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.h
@@ -186,6 +186,9 @@ struct prueth_emac {
struct pruss_mem_region dram;
+ bool offload_fwd_mark;
+ int port_vlan;
+
struct delayed_work stats_work;
u64 stats[ICSSG_NUM_STATS];
@@ -198,10 +201,12 @@ struct prueth_emac {
* struct prueth_pdata - PRUeth platform data
* @fdqring_mode: Free desc queue mode
* @quirk_10m_link_issue: 10M link detect errata
+ * @switch_mode: switch firmware support
*/
struct prueth_pdata {
enum k3_ring_mode fdqring_mode;
u32 quirk_10m_link_issue:1;
+ u32 switch_mode:1;
};
struct icssg_firmwares {
@@ -232,6 +237,16 @@ struct icssg_firmwares {
* @emacs_initialized: num of EMACs/ext ports that are up/running
* @iep0: pointer to IEP0 device
* @iep1: pointer to IEP1 device
+ * @vlan_tbl: VLAN-FID table pointer
+ * @hw_bridge_dev: pointer to HW bridge net device
+ * @br_members: bitmask of bridge member ports
+ * @prueth_netdevice_nb: netdevice notifier block
+ * @prueth_switchdev_nb: switchdev notifier block
+ * @prueth_switchdev_bl_nb: switchdev blocking notifier block
+ * @is_switch_mode: flag to indicate if device is in Switch mode
+ * @is_switchmode_supported: indicates platform support for switch mode
+ * @switch_id: ID for mapping switch ports to bridge
+ * @default_vlan: Default VLAN for host
*/
struct prueth {
struct device *dev;
@@ -256,6 +271,17 @@ struct prueth {
int emacs_initialized;
struct icss_iep *iep0;
struct icss_iep *iep1;
+ struct prueth_vlan_tbl *vlan_tbl;
+
+ struct net_device *hw_bridge_dev;
+ u8 br_members;
+ struct notifier_block prueth_netdevice_nb;
+ struct notifier_block prueth_switchdev_nb;
+ struct notifier_block prueth_switchdev_bl_nb;
+ bool is_switch_mode;
+ bool is_switchmode_supported;
+ unsigned char switch_id[MAX_PHYS_ITEM_ID_LEN];
+ int default_vlan;
};
struct emac_tx_ts_response {
@@ -303,8 +329,8 @@ void icssg_ft1_set_mac_addr(struct regmap *miig_rt, int slice, u8 *mac_addr);
void icssg_config_ipg(struct prueth_emac *emac);
int icssg_config(struct prueth *prueth, struct prueth_emac *emac,
int slice);
-int emac_set_port_state(struct prueth_emac *emac,
- enum icssg_port_state_cmd state);
+int icssg_set_port_state(struct prueth_emac *emac,
+ enum icssg_port_state_cmd state);
void icssg_config_set_speed(struct prueth_emac *emac);
void icssg_config_half_duplex(struct prueth_emac *emac);
@@ -313,10 +339,20 @@ int icssg_queue_pop(struct prueth *prueth, u8 queue);
void icssg_queue_push(struct prueth *prueth, int queue, u16 addr);
u32 icssg_queue_level(struct prueth *prueth, int queue);
+int icssg_send_fdb_msg(struct prueth_emac *emac, struct mgmt_cmd *cmd,
+ struct mgmt_cmd_rsp *rsp);
+int icssg_fdb_add_del(struct prueth_emac *emac, const unsigned char *addr,
+ u8 vid, u8 fid_c2, bool add);
+int icssg_fdb_lookup(struct prueth_emac *emac, const unsigned char *addr,
+ u8 vid);
+void icssg_vtbl_modify(struct prueth_emac *emac, u8 vid, u8 port_mask,
+ u8 untag_mask, bool add);
+u16 icssg_get_pvid(struct prueth_emac *emac);
+void icssg_set_pvid(struct prueth *prueth, u8 vid, u8 port);
#define prueth_napi_to_tx_chn(pnapi) \
container_of(pnapi, struct prueth_tx_chn, napi_tx)
-void emac_stats_work_handler(struct work_struct *work);
+void icssg_stats_work_handler(struct work_struct *work);
void emac_update_hardware_stats(struct prueth_emac *emac);
int emac_get_stat_by_name(struct prueth_emac *emac, char *stat_name);
@@ -341,11 +377,11 @@ int prueth_dma_rx_push(struct prueth_emac *emac,
struct prueth_rx_chn *rx_chn);
void emac_rx_timestamp(struct prueth_emac *emac,
struct sk_buff *skb, u32 *psdata);
-enum netdev_tx emac_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev);
+enum netdev_tx icssg_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev);
irqreturn_t prueth_rx_irq(int irq, void *dev_id);
void prueth_emac_stop(struct prueth_emac *emac);
void prueth_cleanup_tx_ts(struct prueth_emac *emac);
-int emac_napi_rx_poll(struct napi_struct *napi_rx, int budget);
+int icssg_napi_rx_poll(struct napi_struct *napi_rx, int budget);
int prueth_prepare_rx_chan(struct prueth_emac *emac,
struct prueth_rx_chn *chn,
int buf_size);
@@ -353,12 +389,12 @@ void prueth_reset_tx_chan(struct prueth_emac *emac, int ch_num,
bool free_skb);
void prueth_reset_rx_chan(struct prueth_rx_chn *chn,
int num_flows, bool disable);
-void emac_ndo_tx_timeout(struct net_device *ndev, unsigned int txqueue);
-int emac_ndo_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd);
-void emac_ndo_get_stats64(struct net_device *ndev,
- struct rtnl_link_stats64 *stats);
-int emac_ndo_get_phys_port_name(struct net_device *ndev, char *name,
- size_t len);
+void icssg_ndo_tx_timeout(struct net_device *ndev, unsigned int txqueue);
+int icssg_ndo_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd);
+void icssg_ndo_get_stats64(struct net_device *ndev,
+ struct rtnl_link_stats64 *stats);
+int icssg_ndo_get_phys_port_name(struct net_device *ndev, char *name,
+ size_t len);
int prueth_node_port(struct device_node *eth_node);
int prueth_node_mac(struct device_node *eth_node);
void prueth_netdev_exit(struct prueth *prueth,
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c b/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c
index 7b3304bbd7fc..e180c1166170 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c
@@ -722,14 +722,14 @@ static void emac_ndo_set_rx_mode_sr1(struct net_device *ndev)
static const struct net_device_ops emac_netdev_ops = {
.ndo_open = emac_ndo_open,
.ndo_stop = emac_ndo_stop,
- .ndo_start_xmit = emac_ndo_start_xmit,
+ .ndo_start_xmit = icssg_ndo_start_xmit,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
- .ndo_tx_timeout = emac_ndo_tx_timeout,
+ .ndo_tx_timeout = icssg_ndo_tx_timeout,
.ndo_set_rx_mode = emac_ndo_set_rx_mode_sr1,
- .ndo_eth_ioctl = emac_ndo_ioctl,
- .ndo_get_stats64 = emac_ndo_get_stats64,
- .ndo_get_phys_port_name = emac_ndo_get_phys_port_name,
+ .ndo_eth_ioctl = icssg_ndo_ioctl,
+ .ndo_get_stats64 = icssg_ndo_get_stats64,
+ .ndo_get_phys_port_name = icssg_ndo_get_phys_port_name,
};
static int prueth_netdev_init(struct prueth *prueth,
@@ -767,7 +767,7 @@ static int prueth_netdev_init(struct prueth *prueth,
goto free_ndev;
}
- INIT_DELAYED_WORK(&emac->stats_work, emac_stats_work_handler);
+ INIT_DELAYED_WORK(&emac->stats_work, icssg_stats_work_handler);
ret = pruss_request_mem_region(prueth->pruss,
port == PRUETH_PORT_MII0 ?
@@ -854,7 +854,7 @@ static int prueth_netdev_init(struct prueth *prueth,
ndev->hw_features = NETIF_F_SG;
ndev->features = ndev->hw_features;
- netif_napi_add(ndev, &emac->napi_rx, emac_napi_rx_poll);
+ netif_napi_add(ndev, &emac->napi_rx, icssg_napi_rx_poll);
prueth->emac[mac] = emac;
return 0;
@@ -1011,16 +1011,44 @@ static int prueth_probe(struct platform_device *pdev)
dev_dbg(dev, "sram: pa %llx va %p size %zx\n", prueth->msmcram.pa,
prueth->msmcram.va, prueth->msmcram.size);
+ prueth->iep0 = icss_iep_get_idx(np, 0);
+ if (IS_ERR(prueth->iep0)) {
+ ret = dev_err_probe(dev, PTR_ERR(prueth->iep0),
+ "iep0 get failed\n");
+ goto free_pool;
+ }
+
+ prueth->iep1 = icss_iep_get_idx(np, 1);
+ if (IS_ERR(prueth->iep1)) {
+ ret = dev_err_probe(dev, PTR_ERR(prueth->iep1),
+ "iep1 get failed\n");
+ goto put_iep0;
+ }
+
+ ret = icss_iep_init(prueth->iep0, NULL, NULL, 0);
+ if (ret) {
+ dev_err_probe(dev, ret, "failed to init iep0\n");
+ goto put_iep;
+ }
+
+ ret = icss_iep_init(prueth->iep1, NULL, NULL, 0);
+ if (ret) {
+ dev_err_probe(dev, ret, "failed to init iep1\n");
+ goto exit_iep0;
+ }
+
if (eth0_node) {
ret = prueth_netdev_init(prueth, eth0_node);
if (ret) {
dev_err_probe(dev, ret, "netdev init %s failed\n",
eth0_node->name);
- goto free_pool;
+ goto exit_iep;
}
if (of_find_property(eth0_node, "ti,half-duplex-capable", NULL))
prueth->emac[PRUETH_MAC0]->half_duplex = 1;
+
+ prueth->emac[PRUETH_MAC0]->iep = prueth->iep0;
}
if (eth1_node) {
@@ -1033,6 +1061,8 @@ static int prueth_probe(struct platform_device *pdev)
if (of_find_property(eth1_node, "ti,half-duplex-capable", NULL))
prueth->emac[PRUETH_MAC1]->half_duplex = 1;
+
+ prueth->emac[PRUETH_MAC1]->iep = prueth->iep1;
}
/* register the network devices */
@@ -1091,6 +1121,19 @@ netdev_exit:
prueth_netdev_exit(prueth, eth_node);
}
+exit_iep:
+ icss_iep_exit(prueth->iep1);
+exit_iep0:
+ icss_iep_exit(prueth->iep0);
+
+put_iep:
+ icss_iep_put(prueth->iep1);
+
+put_iep0:
+ icss_iep_put(prueth->iep0);
+ prueth->iep0 = NULL;
+ prueth->iep1 = NULL;
+
free_pool:
gen_pool_free(prueth->sram_pool,
(unsigned long)prueth->msmcram.va, msmc_ram_size);
@@ -1138,6 +1181,12 @@ static void prueth_remove(struct platform_device *pdev)
prueth_netdev_exit(prueth, eth_node);
}
+ icss_iep_exit(prueth->iep1);
+ icss_iep_exit(prueth->iep0);
+
+ icss_iep_put(prueth->iep1);
+ icss_iep_put(prueth->iep0);
+
gen_pool_free(prueth->sram_pool,
(unsigned long)prueth->msmcram.va,
MSMC_RAM_SIZE_SR1);
diff --git a/drivers/net/ethernet/ti/icssg/icssg_queues.c b/drivers/net/ethernet/ti/icssg/icssg_queues.c
index 3c34f61ad40b..e5052d9e7807 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_queues.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_queues.c
@@ -28,6 +28,7 @@ int icssg_queue_pop(struct prueth *prueth, u8 queue)
return val;
}
+EXPORT_SYMBOL_GPL(icssg_queue_pop);
void icssg_queue_push(struct prueth *prueth, int queue, u16 addr)
{
@@ -36,6 +37,7 @@ void icssg_queue_push(struct prueth *prueth, int queue, u16 addr)
regmap_write(prueth->miig_rt, ICSSG_QUEUE_OFFSET + 4 * queue, addr);
}
+EXPORT_SYMBOL_GPL(icssg_queue_push);
u32 icssg_queue_level(struct prueth *prueth, int queue)
{
diff --git a/drivers/net/ethernet/ti/icssg/icssg_stats.c b/drivers/net/ethernet/ti/icssg/icssg_stats.c
index 3dbadddd7e35..2fb150c13078 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_stats.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_stats.c
@@ -42,7 +42,7 @@ void emac_update_hardware_stats(struct prueth_emac *emac)
}
}
-void emac_stats_work_handler(struct work_struct *work)
+void icssg_stats_work_handler(struct work_struct *work)
{
struct prueth_emac *emac = container_of(work, struct prueth_emac,
stats_work.work);
@@ -51,6 +51,7 @@ void emac_stats_work_handler(struct work_struct *work)
queue_delayed_work(system_long_wq, &emac->stats_work,
msecs_to_jiffies((STATS_TIME_LIMIT_1G_MS * 1000) / emac->speed));
}
+EXPORT_SYMBOL_GPL(icssg_stats_work_handler);
int emac_get_stat_by_name(struct prueth_emac *emac, char *stat_name)
{
diff --git a/drivers/net/ethernet/ti/icssg/icssg_switchdev.c b/drivers/net/ethernet/ti/icssg/icssg_switchdev.c
new file mode 100644
index 000000000000..67e2927e176d
--- /dev/null
+++ b/drivers/net/ethernet/ti/icssg/icssg_switchdev.c
@@ -0,0 +1,477 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Texas Instruments K3 ICSSG Ethernet Switchdev Driver
+ *
+ * Copyright (C) 2021 Texas Instruments Incorporated - https://www.ti.com/
+ *
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/if_bridge.h>
+#include <linux/netdevice.h>
+#include <linux/workqueue.h>
+#include <net/switchdev.h>
+
+#include "icssg_prueth.h"
+#include "icssg_switchdev.h"
+#include "icssg_mii_rt.h"
+
+struct prueth_switchdev_event_work {
+ struct work_struct work;
+ struct switchdev_notifier_fdb_info fdb_info;
+ struct prueth_emac *emac;
+ unsigned long event;
+};
+
+static int prueth_switchdev_stp_state_set(struct prueth_emac *emac,
+ u8 state)
+{
+ enum icssg_port_state_cmd emac_state;
+ int ret = 0;
+
+ switch (state) {
+ case BR_STATE_FORWARDING:
+ emac_state = ICSSG_EMAC_PORT_FORWARD;
+ break;
+ case BR_STATE_DISABLED:
+ emac_state = ICSSG_EMAC_PORT_DISABLE;
+ break;
+ case BR_STATE_LISTENING:
+ case BR_STATE_BLOCKING:
+ emac_state = ICSSG_EMAC_PORT_BLOCK;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ icssg_set_port_state(emac, emac_state);
+ netdev_dbg(emac->ndev, "STP state: %u\n", emac_state);
+
+ return ret;
+}
+
+static int prueth_switchdev_attr_br_flags_set(struct prueth_emac *emac,
+ struct net_device *orig_dev,
+ struct switchdev_brport_flags brport_flags)
+{
+ enum icssg_port_state_cmd emac_state;
+
+ if (brport_flags.mask & BR_MCAST_FLOOD)
+ emac_state = ICSSG_EMAC_PORT_MC_FLOODING_ENABLE;
+ else
+ emac_state = ICSSG_EMAC_PORT_MC_FLOODING_DISABLE;
+
+ netdev_dbg(emac->ndev, "BR_MCAST_FLOOD: %d port %u\n",
+ emac_state, emac->port_id);
+
+ icssg_set_port_state(emac, emac_state);
+
+ return 0;
+}
+
+static int prueth_switchdev_attr_br_flags_pre_set(struct net_device *netdev,
+ struct switchdev_brport_flags brport_flags)
+{
+ if (brport_flags.mask & ~(BR_LEARNING | BR_MCAST_FLOOD))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int prueth_switchdev_attr_set(struct net_device *ndev, const void *ctx,
+ const struct switchdev_attr *attr,
+ struct netlink_ext_ack *extack)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ int ret;
+
+ netdev_dbg(ndev, "attr: id %u port: %u\n", attr->id, emac->port_id);
+
+ switch (attr->id) {
+ case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
+ ret = prueth_switchdev_attr_br_flags_pre_set(ndev,
+ attr->u.brport_flags);
+ break;
+ case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
+ ret = prueth_switchdev_stp_state_set(emac,
+ attr->u.stp_state);
+ netdev_dbg(ndev, "stp state: %u\n", attr->u.stp_state);
+ break;
+ case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
+ ret = prueth_switchdev_attr_br_flags_set(emac, attr->orig_dev,
+ attr->u.brport_flags);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ return ret;
+}
+
+static void prueth_switchdev_fdb_offload_notify(struct net_device *ndev,
+ struct switchdev_notifier_fdb_info *rcv)
+{
+ struct switchdev_notifier_fdb_info info;
+
+ memset(&info, 0, sizeof(info));
+ info.addr = rcv->addr;
+ info.vid = rcv->vid;
+ info.offloaded = true;
+ call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED,
+ ndev, &info.info, NULL);
+}
+
+static void prueth_switchdev_event_work(struct work_struct *work)
+{
+ struct prueth_switchdev_event_work *switchdev_work =
+ container_of(work, struct prueth_switchdev_event_work, work);
+ struct prueth_emac *emac = switchdev_work->emac;
+ struct switchdev_notifier_fdb_info *fdb;
+ int port_id = emac->port_id;
+ int ret;
+
+ rtnl_lock();
+ switch (switchdev_work->event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ fdb = &switchdev_work->fdb_info;
+
+ netdev_dbg(emac->ndev, "prueth_fdb_add: MACID = %pM vid = %u flags = %u %u -- port %d\n",
+ fdb->addr, fdb->vid, fdb->added_by_user,
+ fdb->offloaded, port_id);
+
+ if (!fdb->added_by_user)
+ break;
+ if (!ether_addr_equal(emac->mac_addr, fdb->addr))
+ break;
+
+ ret = icssg_fdb_add_del(emac, fdb->addr, fdb->vid,
+ BIT(port_id), true);
+ if (!ret)
+ prueth_switchdev_fdb_offload_notify(emac->ndev, fdb);
+ break;
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ fdb = &switchdev_work->fdb_info;
+
+ netdev_dbg(emac->ndev, "prueth_fdb_del: MACID = %pM vid = %u flags = %u %u -- port %d\n",
+ fdb->addr, fdb->vid, fdb->added_by_user,
+ fdb->offloaded, port_id);
+
+ if (!fdb->added_by_user)
+ break;
+ if (!ether_addr_equal(emac->mac_addr, fdb->addr))
+ break;
+ icssg_fdb_add_del(emac, fdb->addr, fdb->vid,
+ BIT(port_id), false);
+ break;
+ default:
+ break;
+ }
+ rtnl_unlock();
+
+ kfree(switchdev_work->fdb_info.addr);
+ kfree(switchdev_work);
+ dev_put(emac->ndev);
+}
+
+static int prueth_switchdev_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *ndev = switchdev_notifier_info_to_dev(ptr);
+ struct prueth_switchdev_event_work *switchdev_work;
+ struct switchdev_notifier_fdb_info *fdb_info = ptr;
+ struct prueth_emac *emac = netdev_priv(ndev);
+ int err;
+
+ if (!prueth_dev_check(ndev))
+ return NOTIFY_DONE;
+
+ if (event == SWITCHDEV_PORT_ATTR_SET) {
+ err = switchdev_handle_port_attr_set(ndev, ptr,
+ prueth_dev_check,
+ prueth_switchdev_attr_set);
+ return notifier_from_errno(err);
+ }
+
+ switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
+ if (WARN_ON(!switchdev_work))
+ return NOTIFY_BAD;
+
+ INIT_WORK(&switchdev_work->work, prueth_switchdev_event_work);
+ switchdev_work->emac = emac;
+ switchdev_work->event = event;
+
+ switch (event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ memcpy(&switchdev_work->fdb_info, ptr,
+ sizeof(switchdev_work->fdb_info));
+ switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
+ if (!switchdev_work->fdb_info.addr)
+ goto err_addr_alloc;
+ ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
+ fdb_info->addr);
+ dev_hold(ndev);
+ break;
+ default:
+ kfree(switchdev_work);
+ return NOTIFY_DONE;
+ }
+
+ queue_work(system_long_wq, &switchdev_work->work);
+
+ return NOTIFY_DONE;
+
+err_addr_alloc:
+ kfree(switchdev_work);
+ return NOTIFY_BAD;
+}
+
+static int prueth_switchdev_vlan_add(struct prueth_emac *emac, bool untag, bool pvid,
+ u8 vid, struct net_device *orig_dev)
+{
+ bool cpu_port = netif_is_bridge_master(orig_dev);
+ int untag_mask = 0;
+ int port_mask;
+ int ret = 0;
+
+ if (cpu_port)
+ port_mask = BIT(PRUETH_PORT_HOST);
+ else
+ port_mask = BIT(emac->port_id);
+
+ if (untag)
+ untag_mask = port_mask;
+
+ icssg_vtbl_modify(emac, vid, port_mask, untag_mask, true);
+
+ netdev_dbg(emac->ndev, "VID add vid:%u port_mask:%X untag_mask %X PVID %d\n",
+ vid, port_mask, untag_mask, pvid);
+
+ if (!pvid)
+ return ret;
+
+ icssg_set_pvid(emac->prueth, vid, emac->port_id);
+
+ return ret;
+}
+
+static int prueth_switchdev_vlan_del(struct prueth_emac *emac, u16 vid,
+ struct net_device *orig_dev)
+{
+ bool cpu_port = netif_is_bridge_master(orig_dev);
+ int port_mask;
+ int ret = 0;
+
+ if (cpu_port)
+ port_mask = BIT(PRUETH_PORT_HOST);
+ else
+ port_mask = BIT(emac->port_id);
+
+ icssg_vtbl_modify(emac, vid, port_mask, 0, false);
+
+ if (cpu_port)
+ icssg_fdb_add_del(emac, emac->mac_addr, vid,
+ BIT(PRUETH_PORT_HOST), false);
+
+ if (vid == icssg_get_pvid(emac))
+ icssg_set_pvid(emac->prueth, 0, emac->port_id);
+
+ netdev_dbg(emac->ndev, "VID del vid:%u port_mask:%X\n",
+ vid, port_mask);
+
+ return ret;
+}
+
+static int prueth_switchdev_vlans_add(struct prueth_emac *emac,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ bool untag = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ struct net_device *orig_dev = vlan->obj.orig_dev;
+ bool cpu_port = netif_is_bridge_master(orig_dev);
+ bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
+
+ netdev_dbg(emac->ndev, "VID add vid:%u flags:%X\n",
+ vlan->vid, vlan->flags);
+
+ if (cpu_port && !(vlan->flags & BRIDGE_VLAN_INFO_BRENTRY))
+ return 0;
+
+ if (vlan->vid > 0xff)
+ return 0;
+
+ return prueth_switchdev_vlan_add(emac, untag, pvid, vlan->vid,
+ orig_dev);
+}
+
+static int prueth_switchdev_vlans_del(struct prueth_emac *emac,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ if (vlan->vid > 0xff)
+ return 0;
+
+ return prueth_switchdev_vlan_del(emac, vlan->vid,
+ vlan->obj.orig_dev);
+}
+
+static int prueth_switchdev_mdb_add(struct prueth_emac *emac,
+ struct switchdev_obj_port_mdb *mdb)
+{
+ struct net_device *orig_dev = mdb->obj.orig_dev;
+ u8 port_mask, fid_c2;
+ bool cpu_port;
+ int err;
+
+ cpu_port = netif_is_bridge_master(orig_dev);
+
+ if (cpu_port)
+ port_mask = BIT(PRUETH_PORT_HOST);
+ else
+ port_mask = BIT(emac->port_id);
+
+ fid_c2 = icssg_fdb_lookup(emac, mdb->addr, mdb->vid);
+
+ err = icssg_fdb_add_del(emac, mdb->addr, mdb->vid, fid_c2 | port_mask, true);
+ netdev_dbg(emac->ndev, "MDB add vid %u:%pM ports: %X\n",
+ mdb->vid, mdb->addr, port_mask);
+
+ return err;
+}
+
+static int prueth_switchdev_mdb_del(struct prueth_emac *emac,
+ struct switchdev_obj_port_mdb *mdb)
+{
+ struct net_device *orig_dev = mdb->obj.orig_dev;
+ int del_mask, ret, fid_c2;
+ bool cpu_port;
+
+ cpu_port = netif_is_bridge_master(orig_dev);
+
+ if (cpu_port)
+ del_mask = BIT(PRUETH_PORT_HOST);
+ else
+ del_mask = BIT(emac->port_id);
+
+ fid_c2 = icssg_fdb_lookup(emac, mdb->addr, mdb->vid);
+
+ if (fid_c2 & ~del_mask)
+ ret = icssg_fdb_add_del(emac, mdb->addr, mdb->vid, fid_c2 & ~del_mask, true);
+ else
+ ret = icssg_fdb_add_del(emac, mdb->addr, mdb->vid, 0, false);
+
+ netdev_dbg(emac->ndev, "MDB del vid %u:%pM ports: %X\n",
+ mdb->vid, mdb->addr, del_mask);
+
+ return ret;
+}
+
+static int prueth_switchdev_obj_add(struct net_device *ndev, const void *ctx,
+ const struct switchdev_obj *obj,
+ struct netlink_ext_ack *extack)
+{
+ struct switchdev_obj_port_vlan *vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
+ struct switchdev_obj_port_mdb *mdb = SWITCHDEV_OBJ_PORT_MDB(obj);
+ struct prueth_emac *emac = netdev_priv(ndev);
+ int err = 0;
+
+ netdev_dbg(ndev, "obj_add: id %u port: %u\n", obj->id, emac->port_id);
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ err = prueth_switchdev_vlans_add(emac, vlan);
+ break;
+ case SWITCHDEV_OBJ_ID_PORT_MDB:
+ case SWITCHDEV_OBJ_ID_HOST_MDB:
+ err = prueth_switchdev_mdb_add(emac, mdb);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static int prueth_switchdev_obj_del(struct net_device *ndev, const void *ctx,
+ const struct switchdev_obj *obj)
+{
+ struct switchdev_obj_port_vlan *vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
+ struct switchdev_obj_port_mdb *mdb = SWITCHDEV_OBJ_PORT_MDB(obj);
+ struct prueth_emac *emac = netdev_priv(ndev);
+ int err = 0;
+
+ netdev_dbg(ndev, "obj_del: id %u port: %u\n", obj->id, emac->port_id);
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ err = prueth_switchdev_vlans_del(emac, vlan);
+ break;
+ case SWITCHDEV_OBJ_ID_PORT_MDB:
+ case SWITCHDEV_OBJ_ID_HOST_MDB:
+ err = prueth_switchdev_mdb_del(emac, mdb);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static int prueth_switchdev_blocking_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+ int err;
+
+ switch (event) {
+ case SWITCHDEV_PORT_OBJ_ADD:
+ err = switchdev_handle_port_obj_add(dev, ptr,
+ prueth_dev_check,
+ prueth_switchdev_obj_add);
+ return notifier_from_errno(err);
+ case SWITCHDEV_PORT_OBJ_DEL:
+ err = switchdev_handle_port_obj_del(dev, ptr,
+ prueth_dev_check,
+ prueth_switchdev_obj_del);
+ return notifier_from_errno(err);
+ case SWITCHDEV_PORT_ATTR_SET:
+ err = switchdev_handle_port_attr_set(dev, ptr,
+ prueth_dev_check,
+ prueth_switchdev_attr_set);
+ return notifier_from_errno(err);
+ default:
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+int prueth_switchdev_register_notifiers(struct prueth *prueth)
+{
+ int ret = 0;
+
+ prueth->prueth_switchdev_nb.notifier_call = &prueth_switchdev_event;
+ ret = register_switchdev_notifier(&prueth->prueth_switchdev_nb);
+ if (ret) {
+ dev_err(prueth->dev, "register switchdev notifier fail ret:%d\n",
+ ret);
+ return ret;
+ }
+
+ prueth->prueth_switchdev_bl_nb.notifier_call = &prueth_switchdev_blocking_event;
+ ret = register_switchdev_blocking_notifier(&prueth->prueth_switchdev_bl_nb);
+ if (ret) {
+ dev_err(prueth->dev, "register switchdev blocking notifier ret:%d\n",
+ ret);
+ unregister_switchdev_notifier(&prueth->prueth_switchdev_nb);
+ }
+
+ return ret;
+}
+
+void prueth_switchdev_unregister_notifiers(struct prueth *prueth)
+{
+ unregister_switchdev_blocking_notifier(&prueth->prueth_switchdev_bl_nb);
+ unregister_switchdev_notifier(&prueth->prueth_switchdev_nb);
+}
diff --git a/drivers/net/ethernet/ti/icssg/icssg_switchdev.h b/drivers/net/ethernet/ti/icssg/icssg_switchdev.h
new file mode 100644
index 000000000000..0e64e7760a00
--- /dev/null
+++ b/drivers/net/ethernet/ti/icssg/icssg_switchdev.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2021 Texas Instruments Incorporated - https://www.ti.com/
+ */
+#ifndef __NET_TI_ICSSG_SWITCHDEV_H
+#define __NET_TI_ICSSG_SWITCHDEV_H
+
+#include "icssg_prueth.h"
+
+int prueth_switchdev_register_notifiers(struct prueth *prueth);
+void prueth_switchdev_unregister_notifiers(struct prueth *prueth);
+bool prueth_dev_check(const struct net_device *ndev);
+
+#endif /* __NET_TI_ICSSG_SWITCHDEV_H */
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
index 02cb6474f6dc..d286709ca3b9 100644
--- a/drivers/net/ethernet/ti/netcp_ethss.c
+++ b/drivers/net/ethernet/ti/netcp_ethss.c
@@ -1999,7 +1999,7 @@ static int keystone_set_link_ksettings(struct net_device *ndev,
#if IS_ENABLED(CONFIG_TI_CPTS)
static int keystone_get_ts_info(struct net_device *ndev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct netcp_intf *netcp = netdev_priv(ndev);
struct gbe_intf *gbe_intf;
@@ -2027,7 +2027,7 @@ static int keystone_get_ts_info(struct net_device *ndev,
}
#else
static int keystone_get_ts_info(struct net_device *ndev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
info->so_timestamping =
SOF_TIMESTAMPING_TX_SOFTWARE |
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c
index cc3bec42ed8e..abe5921dde02 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c
@@ -43,6 +43,11 @@ static const struct wx_stats wx_gstrings_stats[] = {
WX_STAT("alloc_rx_buff_failed", alloc_rx_buff_failed),
};
+static const struct wx_stats wx_gstrings_fdir_stats[] = {
+ WX_STAT("fdir_match", stats.fdirmatch),
+ WX_STAT("fdir_miss", stats.fdirmiss),
+};
+
/* drivers allocates num_tx_queues and num_rx_queues symmetrically so
* we set the num_rx_queues to evaluate to num_tx_queues. This is
* used because we do not have a good way to get the max number of
@@ -55,13 +60,17 @@ static const struct wx_stats wx_gstrings_stats[] = {
(WX_NUM_TX_QUEUES + WX_NUM_RX_QUEUES) * \
(sizeof(struct wx_queue_stats) / sizeof(u64)))
#define WX_GLOBAL_STATS_LEN ARRAY_SIZE(wx_gstrings_stats)
+#define WX_FDIR_STATS_LEN ARRAY_SIZE(wx_gstrings_fdir_stats)
#define WX_STATS_LEN (WX_GLOBAL_STATS_LEN + WX_QUEUE_STATS_LEN)
int wx_get_sset_count(struct net_device *netdev, int sset)
{
+ struct wx *wx = netdev_priv(netdev);
+
switch (sset) {
case ETH_SS_STATS:
- return WX_STATS_LEN;
+ return (wx->mac.type == wx_mac_sp) ?
+ WX_STATS_LEN + WX_FDIR_STATS_LEN : WX_STATS_LEN;
default:
return -EOPNOTSUPP;
}
@@ -70,6 +79,7 @@ EXPORT_SYMBOL(wx_get_sset_count);
void wx_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
{
+ struct wx *wx = netdev_priv(netdev);
u8 *p = data;
int i;
@@ -77,6 +87,10 @@ void wx_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
case ETH_SS_STATS:
for (i = 0; i < WX_GLOBAL_STATS_LEN; i++)
ethtool_puts(&p, wx_gstrings_stats[i].stat_string);
+ if (wx->mac.type == wx_mac_sp) {
+ for (i = 0; i < WX_FDIR_STATS_LEN; i++)
+ ethtool_puts(&p, wx_gstrings_fdir_stats[i].stat_string);
+ }
for (i = 0; i < netdev->num_tx_queues; i++) {
ethtool_sprintf(&p, "tx_queue_%u_packets", i);
ethtool_sprintf(&p, "tx_queue_%u_bytes", i);
@@ -96,7 +110,7 @@ void wx_get_ethtool_stats(struct net_device *netdev,
struct wx *wx = netdev_priv(netdev);
struct wx_ring *ring;
unsigned int start;
- int i, j;
+ int i, j, k;
char *p;
wx_update_stats(wx);
@@ -107,6 +121,13 @@ void wx_get_ethtool_stats(struct net_device *netdev,
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
+ if (wx->mac.type == wx_mac_sp) {
+ for (k = 0; k < WX_FDIR_STATS_LEN; k++) {
+ p = (char *)wx + wx_gstrings_fdir_stats[k].stat_offset;
+ data[i++] = *(u64 *)p;
+ }
+ }
+
for (j = 0; j < netdev->num_tx_queues; j++) {
ring = wx->tx_ring[j];
if (!ring) {
@@ -172,17 +193,21 @@ EXPORT_SYMBOL(wx_get_pause_stats);
void wx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info)
{
+ unsigned int stats_len = WX_STATS_LEN;
struct wx *wx = netdev_priv(netdev);
+ if (wx->mac.type == wx_mac_sp)
+ stats_len += WX_FDIR_STATS_LEN;
+
strscpy(info->driver, wx->driver_name, sizeof(info->driver));
strscpy(info->fw_version, wx->eeprom_id, sizeof(info->fw_version));
strscpy(info->bus_info, pci_name(wx->pdev), sizeof(info->bus_info));
if (wx->num_tx_queues <= WX_NUM_TX_QUEUES) {
- info->n_stats = WX_STATS_LEN -
+ info->n_stats = stats_len -
(WX_NUM_TX_QUEUES - wx->num_tx_queues) *
(sizeof(struct wx_queue_stats) / sizeof(u64)) * 2;
} else {
- info->n_stats = WX_STATS_LEN;
+ info->n_stats = stats_len;
}
}
EXPORT_SYMBOL(wx_get_drvinfo);
@@ -383,6 +408,9 @@ void wx_get_channels(struct net_device *dev,
/* record RSS queues */
ch->combined_count = wx->ring_feature[RING_F_RSS].indices;
+
+ if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags))
+ ch->combined_count = wx->ring_feature[RING_F_FDIR].indices;
}
EXPORT_SYMBOL(wx_get_channels);
@@ -400,6 +428,9 @@ int wx_set_channels(struct net_device *dev,
if (count > wx_max_channels(wx))
return -EINVAL;
+ if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags))
+ wx->ring_feature[RING_F_FDIR].limit = count;
+
wx->ring_feature[RING_F_RSS].limit = count;
return 0;
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.c b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
index 7c4b6881a93f..1bf9c38e4125 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_hw.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
@@ -1147,8 +1147,15 @@ static void wx_enable_rx(struct wx *wx)
static void wx_set_rxpba(struct wx *wx)
{
u32 rxpktsize, txpktsize, txpbthresh;
+ u32 pbsize = wx->mac.rx_pb_size;
- rxpktsize = wx->mac.rx_pb_size << WX_RDB_PB_SZ_SHIFT;
+ if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags)) {
+ if (test_bit(WX_FLAG_FDIR_HASH, wx->flags) ||
+ test_bit(WX_FLAG_FDIR_PERFECT, wx->flags))
+ pbsize -= 64; /* Default 64KB */
+ }
+
+ rxpktsize = pbsize << WX_RDB_PB_SZ_SHIFT;
wr32(wx, WX_RDB_PB_SZ(0), rxpktsize);
/* Only support an equally distributed Tx packet buffer strategy. */
@@ -1261,7 +1268,7 @@ static void wx_configure_port(struct wx *wx)
* Stops the receive data path and waits for the HW to internally empty
* the Rx security block
**/
-static int wx_disable_sec_rx_path(struct wx *wx)
+int wx_disable_sec_rx_path(struct wx *wx)
{
u32 secrx;
@@ -1271,6 +1278,7 @@ static int wx_disable_sec_rx_path(struct wx *wx)
return read_poll_timeout(rd32, secrx, secrx & WX_RSC_ST_RSEC_RDY,
1000, 40000, false, wx, WX_RSC_ST);
}
+EXPORT_SYMBOL(wx_disable_sec_rx_path);
/**
* wx_enable_sec_rx_path - Enables the receive data path
@@ -1278,11 +1286,12 @@ static int wx_disable_sec_rx_path(struct wx *wx)
*
* Enables the receive data path.
**/
-static void wx_enable_sec_rx_path(struct wx *wx)
+void wx_enable_sec_rx_path(struct wx *wx)
{
wr32m(wx, WX_RSC_CTL, WX_RSC_CTL_RX_DIS, 0);
WX_WRITE_FLUSH(wx);
}
+EXPORT_SYMBOL(wx_enable_sec_rx_path);
static void wx_vlan_strip_control(struct wx *wx, bool enable)
{
@@ -1499,6 +1508,13 @@ static void wx_configure_tx_ring(struct wx *wx,
txdctl |= ring->count / 128 << WX_PX_TR_CFG_TR_SIZE_SHIFT;
txdctl |= 0x20 << WX_PX_TR_CFG_WTHRESH_SHIFT;
+ ring->atr_count = 0;
+ if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags) &&
+ test_bit(WX_FLAG_FDIR_HASH, wx->flags))
+ ring->atr_sample_rate = wx->atr_sample_rate;
+ else
+ ring->atr_sample_rate = 0;
+
/* reinitialize tx_buffer_info */
memset(ring->tx_buffer_info, 0,
sizeof(struct wx_tx_buffer) * ring->count);
@@ -1732,7 +1748,9 @@ void wx_configure(struct wx *wx)
wx_set_rx_mode(wx->netdev);
wx_restore_vlan(wx);
- wx_enable_sec_rx_path(wx);
+
+ if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags))
+ wx->configure_fdir(wx);
wx_configure_tx(wx);
wx_configure_rx(wx);
@@ -1959,6 +1977,8 @@ int wx_sw_init(struct wx *wx)
}
bitmap_zero(wx->state, WX_STATE_NBITS);
+ bitmap_zero(wx->flags, WX_PF_FLAGS_NBITS);
+ wx->misc_irq_domain = false;
return 0;
}
@@ -2333,6 +2353,11 @@ void wx_update_stats(struct wx *wx)
hwstats->b2ogprc += rd32(wx, WX_RDM_BMC2OS_CNT);
hwstats->rdmdrop += rd32(wx, WX_RDM_DRP_PKT);
+ if (wx->mac.type == wx_mac_sp) {
+ hwstats->fdirmatch += rd32(wx, WX_RDB_FDIR_MATCH);
+ hwstats->fdirmiss += rd32(wx, WX_RDB_FDIR_MISS);
+ }
+
for (i = 0; i < wx->mac.max_rx_queues; i++)
hwstats->qmprc += rd32(wx, WX_PX_MPRC(i));
}
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.h b/drivers/net/ethernet/wangxun/libwx/wx_hw.h
index 9e219fa717a2..11fb33349482 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_hw.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.h
@@ -28,6 +28,8 @@ void wx_mac_set_default_filter(struct wx *wx, u8 *addr);
void wx_flush_sw_mac_table(struct wx *wx);
int wx_set_mac(struct net_device *netdev, void *p);
void wx_disable_rx(struct wx *wx);
+int wx_disable_sec_rx_path(struct wx *wx);
+void wx_enable_sec_rx_path(struct wx *wx);
void wx_set_rx_mode(struct net_device *netdev);
int wx_change_mtu(struct net_device *netdev, int new_mtu);
void wx_disable_rx_queue(struct wx *wx, struct wx_ring *ring);
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
index 68bde91b67a0..1eecba984f3b 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
@@ -148,10 +148,11 @@ static struct wx_dec_ptype wx_ptype_lookup[256] = {
[0xFD] = WX_PTT(IP, IPV6, IGMV, IPV6, SCTP, PAY4),
};
-static struct wx_dec_ptype wx_decode_ptype(const u8 ptype)
+struct wx_dec_ptype wx_decode_ptype(const u8 ptype)
{
return wx_ptype_lookup[ptype];
}
+EXPORT_SYMBOL(wx_decode_ptype);
/* wx_test_staterr - tests bits in Rx descriptor status and error fields */
static __le32 wx_test_staterr(union wx_rx_desc *rx_desc,
@@ -1453,6 +1454,7 @@ static void wx_tx_csum(struct wx_ring *tx_ring, struct wx_tx_buffer *first,
static netdev_tx_t wx_xmit_frame_ring(struct sk_buff *skb,
struct wx_ring *tx_ring)
{
+ struct wx *wx = netdev_priv(tx_ring->netdev);
u16 count = TXD_USE_COUNT(skb_headlen(skb));
struct wx_tx_buffer *first;
u8 hdr_len = 0, ptype;
@@ -1498,6 +1500,10 @@ static netdev_tx_t wx_xmit_frame_ring(struct sk_buff *skb,
goto out_drop;
else if (!tso)
wx_tx_csum(tx_ring, first, ptype);
+
+ if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags) && tx_ring->atr_sample_rate)
+ wx->atr(tx_ring, first, ptype);
+
wx_tx_map(tx_ring, first, hdr_len);
return NETDEV_TX_OK;
@@ -1574,8 +1580,27 @@ static void wx_set_rss_queues(struct wx *wx)
f = &wx->ring_feature[RING_F_RSS];
f->indices = f->limit;
- wx->num_rx_queues = f->limit;
- wx->num_tx_queues = f->limit;
+ if (!(test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags)))
+ goto out;
+
+ clear_bit(WX_FLAG_FDIR_HASH, wx->flags);
+
+ /* Use Flow Director in addition to RSS to ensure the best
+ * distribution of flows across cores, even when an FDIR flow
+ * isn't matched.
+ */
+ if (f->indices > 1) {
+ f = &wx->ring_feature[RING_F_FDIR];
+
+ f->indices = f->limit;
+
+ if (!(test_bit(WX_FLAG_FDIR_PERFECT, wx->flags)))
+ set_bit(WX_FLAG_FDIR_HASH, wx->flags);
+ }
+
+out:
+ wx->num_rx_queues = f->indices;
+ wx->num_tx_queues = f->indices;
}
static void wx_set_num_queues(struct wx *wx)
@@ -1686,6 +1711,7 @@ static int wx_set_interrupt_capability(struct wx *wx)
}
pdev->irq = pci_irq_vector(pdev, 0);
+ wx->num_q_vectors = 1;
return 0;
}
@@ -1996,7 +2022,8 @@ void wx_free_irq(struct wx *wx)
int vector;
if (!(pdev->msix_enabled)) {
- free_irq(pdev->irq, wx);
+ if (!wx->misc_irq_domain)
+ free_irq(pdev->irq, wx);
return;
}
@@ -2011,7 +2038,7 @@ void wx_free_irq(struct wx *wx)
free_irq(entry->vector, q_vector);
}
- if (wx->mac.type == wx_mac_em)
+ if (!wx->misc_irq_domain)
free_irq(wx->msix_entry->vector, wx);
}
EXPORT_SYMBOL(wx_free_irq);
@@ -2026,6 +2053,9 @@ int wx_setup_isb_resources(struct wx *wx)
{
struct pci_dev *pdev = wx->pdev;
+ if (wx->isb_mem)
+ return 0;
+
wx->isb_mem = dma_alloc_coherent(&pdev->dev,
sizeof(u32) * 4,
&wx->isb_dma,
@@ -2385,7 +2415,6 @@ static void wx_free_all_tx_resources(struct wx *wx)
void wx_free_resources(struct wx *wx)
{
- wx_free_isb_resources(wx);
wx_free_all_rx_resources(wx);
wx_free_all_tx_resources(wx);
}
@@ -2680,6 +2709,7 @@ int wx_set_features(struct net_device *netdev, netdev_features_t features)
{
netdev_features_t changed = netdev->features ^ features;
struct wx *wx = netdev_priv(netdev);
+ bool need_reset = false;
if (features & NETIF_F_RXHASH) {
wr32m(wx, WX_RDB_RA_CTL, WX_RDB_RA_CTL_RSS_EN,
@@ -2697,6 +2727,36 @@ int wx_set_features(struct net_device *netdev, netdev_features_t features)
else if (changed & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER))
wx_set_rx_mode(netdev);
+ if (!(test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags)))
+ return 0;
+
+ /* Check if Flow Director n-tuple support was enabled or disabled. If
+ * the state changed, we need to reset.
+ */
+ switch (features & NETIF_F_NTUPLE) {
+ case NETIF_F_NTUPLE:
+ /* turn off ATR, enable perfect filters and reset */
+ if (!(test_and_set_bit(WX_FLAG_FDIR_PERFECT, wx->flags)))
+ need_reset = true;
+
+ clear_bit(WX_FLAG_FDIR_HASH, wx->flags);
+ break;
+ default:
+ /* turn off perfect filters, enable ATR and reset */
+ if (test_and_clear_bit(WX_FLAG_FDIR_PERFECT, wx->flags))
+ need_reset = true;
+
+ /* We cannot enable ATR if RSS is disabled */
+ if (wx->ring_feature[RING_F_RSS].limit <= 1)
+ break;
+
+ set_bit(WX_FLAG_FDIR_HASH, wx->flags);
+ break;
+ }
+
+ if (need_reset)
+ wx->do_reset(netdev);
+
return 0;
}
EXPORT_SYMBOL(wx_set_features);
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.h b/drivers/net/ethernet/wangxun/libwx/wx_lib.h
index c41b29ea812f..fdeb0c315b75 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_lib.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.h
@@ -7,6 +7,7 @@
#ifndef _WX_LIB_H_
#define _WX_LIB_H_
+struct wx_dec_ptype wx_decode_ptype(const u8 ptype);
void wx_alloc_rx_buffers(struct wx_ring *rx_ring, u16 cleaned_count);
u16 wx_desc_unused(struct wx_ring *ring);
netdev_tx_t wx_xmit_frame(struct sk_buff *skb,
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h
index 5aaf7b1fa2db..1d57b047817b 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_type.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h
@@ -157,6 +157,8 @@
#define WX_RDB_RA_CTL_RSS_IPV6_TCP BIT(21)
#define WX_RDB_RA_CTL_RSS_IPV4_UDP BIT(22)
#define WX_RDB_RA_CTL_RSS_IPV6_UDP BIT(23)
+#define WX_RDB_FDIR_MATCH 0x19558
+#define WX_RDB_FDIR_MISS 0x1955C
/******************************* PSR Registers *******************************/
/* psr control */
@@ -503,6 +505,34 @@ enum WX_MSCA_CMD_value {
#define WX_PTYPE_TYP_TCP 0x04
#define WX_PTYPE_TYP_SCTP 0x05
+/* Packet type non-ip values */
+enum wx_l2_ptypes {
+ WX_PTYPE_L2_ABORTED = (WX_PTYPE_PKT_MAC),
+ WX_PTYPE_L2_MAC = (WX_PTYPE_PKT_MAC | WX_PTYPE_TYP_MAC),
+
+ WX_PTYPE_L2_IPV4_FRAG = (WX_PTYPE_PKT_IP | WX_PTYPE_TYP_IPFRAG),
+ WX_PTYPE_L2_IPV4 = (WX_PTYPE_PKT_IP | WX_PTYPE_TYP_IP),
+ WX_PTYPE_L2_IPV4_UDP = (WX_PTYPE_PKT_IP | WX_PTYPE_TYP_UDP),
+ WX_PTYPE_L2_IPV4_TCP = (WX_PTYPE_PKT_IP | WX_PTYPE_TYP_TCP),
+ WX_PTYPE_L2_IPV4_SCTP = (WX_PTYPE_PKT_IP | WX_PTYPE_TYP_SCTP),
+ WX_PTYPE_L2_IPV6_FRAG = (WX_PTYPE_PKT_IP | WX_PTYPE_PKT_IPV6 |
+ WX_PTYPE_TYP_IPFRAG),
+ WX_PTYPE_L2_IPV6 = (WX_PTYPE_PKT_IP | WX_PTYPE_PKT_IPV6 |
+ WX_PTYPE_TYP_IP),
+ WX_PTYPE_L2_IPV6_UDP = (WX_PTYPE_PKT_IP | WX_PTYPE_PKT_IPV6 |
+ WX_PTYPE_TYP_UDP),
+ WX_PTYPE_L2_IPV6_TCP = (WX_PTYPE_PKT_IP | WX_PTYPE_PKT_IPV6 |
+ WX_PTYPE_TYP_TCP),
+ WX_PTYPE_L2_IPV6_SCTP = (WX_PTYPE_PKT_IP | WX_PTYPE_PKT_IPV6 |
+ WX_PTYPE_TYP_SCTP),
+
+ WX_PTYPE_L2_TUN4_MAC = (WX_PTYPE_TUN_IPV4 | WX_PTYPE_PKT_IGM),
+ WX_PTYPE_L2_TUN6_MAC = (WX_PTYPE_TUN_IPV6 | WX_PTYPE_PKT_IGM),
+};
+
+#define WX_PTYPE_PKT(_pt) ((_pt) & 0x30)
+#define WX_PTYPE_TYPL4(_pt) ((_pt) & 0x07)
+
#define WX_RXD_PKTTYPE(_rxd) \
((le32_to_cpu((_rxd)->wb.lower.lo_dword.data) >> 9) & 0xFF)
#define WX_RXD_IPV6EX(_rxd) \
@@ -552,6 +582,9 @@ enum wx_tx_flags {
WX_TX_FLAGS_OUTER_IPV4 = 0x100,
WX_TX_FLAGS_LINKSEC = 0x200,
WX_TX_FLAGS_IPSEC = 0x400,
+
+ /* software defined flags */
+ WX_TX_FLAGS_SW_VLAN = 0x40,
};
/* VLAN info */
@@ -900,7 +933,13 @@ struct wx_ring {
*/
u16 next_to_use;
u16 next_to_clean;
- u16 next_to_alloc;
+ union {
+ u16 next_to_alloc;
+ struct {
+ u8 atr_sample_rate;
+ u8 atr_count;
+ };
+ };
struct wx_queue_stats stats;
struct u64_stats_sync syncp;
@@ -939,6 +978,7 @@ struct wx_ring_feature {
enum wx_ring_f_enum {
RING_F_NONE = 0,
RING_F_RSS,
+ RING_F_FDIR,
RING_F_ARRAY_SIZE /* must be last in enum set */
};
@@ -980,15 +1020,26 @@ struct wx_hw_stats {
u64 crcerrs;
u64 rlec;
u64 qmprc;
+ u64 fdirmatch;
+ u64 fdirmiss;
};
enum wx_state {
WX_STATE_RESETTING,
WX_STATE_NBITS, /* must be last */
};
+
+enum wx_pf_flags {
+ WX_FLAG_FDIR_CAPABLE,
+ WX_FLAG_FDIR_HASH,
+ WX_FLAG_FDIR_PERFECT,
+ WX_PF_FLAGS_NBITS /* must be last */
+};
+
struct wx {
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
DECLARE_BITMAP(state, WX_STATE_NBITS);
+ DECLARE_BITMAP(flags, WX_PF_FLAGS_NBITS);
void *priv;
u8 __iomem *hw_addr;
@@ -1058,6 +1109,7 @@ struct wx {
dma_addr_t isb_dma;
u32 *isb_mem;
u32 isb_tag[WX_ISB_MAX];
+ bool misc_irq_domain;
#define WX_MAX_RETA_ENTRIES 128
#define WX_RSS_INDIR_TBL_MAX 64
@@ -1077,6 +1129,9 @@ struct wx {
u64 hw_csum_rx_error;
u64 alloc_rx_buff_failed;
+ u32 atr_sample_rate;
+ void (*atr)(struct wx_ring *ring, struct wx_tx_buffer *first, u8 ptype);
+ void (*configure_fdir)(struct wx *wx);
void (*do_reset)(struct net_device *netdev);
};
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c
index 46a5a3e95202..e868f7ef4920 100644
--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c
@@ -37,9 +37,9 @@ static int ngbe_set_wol(struct net_device *netdev,
wx->wol = 0;
if (wol->wolopts & WAKE_MAGIC)
wx->wol = WX_PSR_WKUP_CTL_MAG;
- netdev->wol_enabled = !!(wx->wol);
+ netdev->ethtool->wol_enabled = !!(wx->wol);
wr32(wx, WX_PSR_WKUP_CTL, wx->wol);
- device_set_wakeup_enable(&pdev->dev, netdev->wol_enabled);
+ device_set_wakeup_enable(&pdev->dev, netdev->ethtool->wol_enabled);
return 0;
}
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
index e894e01d030d..53aeae2f884b 100644
--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
@@ -387,6 +387,7 @@ err_dis_phy:
err_free_irq:
wx_free_irq(wx);
err_free_resources:
+ wx_free_isb_resources(wx);
wx_free_resources(wx);
return err;
}
@@ -408,6 +409,7 @@ static int ngbe_close(struct net_device *netdev)
ngbe_down(wx);
wx_free_irq(wx);
+ wx_free_isb_resources(wx);
wx_free_resources(wx);
phylink_disconnect_phy(wx->phylink);
wx_control_hw(wx, false);
@@ -650,7 +652,7 @@ static int ngbe_probe(struct pci_dev *pdev,
if (wx->wol_hw_supported)
wx->wol = NGBE_PSR_WKUP_CTL_MAG;
- netdev->wol_enabled = !!(wx->wol);
+ netdev->ethtool->wol_enabled = !!(wx->wol);
wr32(wx, NGBE_PSR_WKUP_CTL, wx->wol);
device_set_wakeup_enable(&pdev->dev, wx->wol);
diff --git a/drivers/net/ethernet/wangxun/txgbe/Makefile b/drivers/net/ethernet/wangxun/txgbe/Makefile
index 42718875277c..f74576fe7062 100644
--- a/drivers/net/ethernet/wangxun/txgbe/Makefile
+++ b/drivers/net/ethernet/wangxun/txgbe/Makefile
@@ -10,4 +10,5 @@ txgbe-objs := txgbe_main.o \
txgbe_hw.o \
txgbe_phy.o \
txgbe_irq.o \
+ txgbe_fdir.o \
txgbe_ethtool.o
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c
index 31fde3fa7c6b..d98314b26c19 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c
@@ -9,6 +9,7 @@
#include "../libwx/wx_type.h"
#include "../libwx/wx_lib.h"
#include "txgbe_type.h"
+#include "txgbe_fdir.h"
#include "txgbe_ethtool.h"
static int txgbe_set_ringparam(struct net_device *netdev,
@@ -79,6 +80,430 @@ static int txgbe_set_channels(struct net_device *dev,
return txgbe_setup_tc(dev, netdev_get_num_tc(dev));
}
+static int txgbe_get_ethtool_fdir_entry(struct txgbe *txgbe,
+ struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp =
+ (struct ethtool_rx_flow_spec *)&cmd->fs;
+ union txgbe_atr_input *mask = &txgbe->fdir_mask;
+ struct txgbe_fdir_filter *rule = NULL;
+ struct hlist_node *node;
+
+ /* report total rule count */
+ cmd->data = (1024 << TXGBE_FDIR_PBALLOC_64K) - 2;
+
+ hlist_for_each_entry_safe(rule, node, &txgbe->fdir_filter_list,
+ fdir_node) {
+ if (fsp->location <= rule->sw_idx)
+ break;
+ }
+
+ if (!rule || fsp->location != rule->sw_idx)
+ return -EINVAL;
+
+ /* set flow type field */
+ switch (rule->filter.formatted.flow_type) {
+ case TXGBE_ATR_FLOW_TYPE_TCPV4:
+ fsp->flow_type = TCP_V4_FLOW;
+ break;
+ case TXGBE_ATR_FLOW_TYPE_UDPV4:
+ fsp->flow_type = UDP_V4_FLOW;
+ break;
+ case TXGBE_ATR_FLOW_TYPE_SCTPV4:
+ fsp->flow_type = SCTP_V4_FLOW;
+ break;
+ case TXGBE_ATR_FLOW_TYPE_IPV4:
+ fsp->flow_type = IP_USER_FLOW;
+ fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
+ fsp->h_u.usr_ip4_spec.proto = 0;
+ fsp->m_u.usr_ip4_spec.proto = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ fsp->h_u.tcp_ip4_spec.psrc = rule->filter.formatted.src_port;
+ fsp->m_u.tcp_ip4_spec.psrc = mask->formatted.src_port;
+ fsp->h_u.tcp_ip4_spec.pdst = rule->filter.formatted.dst_port;
+ fsp->m_u.tcp_ip4_spec.pdst = mask->formatted.dst_port;
+ fsp->h_u.tcp_ip4_spec.ip4src = rule->filter.formatted.src_ip[0];
+ fsp->m_u.tcp_ip4_spec.ip4src = mask->formatted.src_ip[0];
+ fsp->h_u.tcp_ip4_spec.ip4dst = rule->filter.formatted.dst_ip[0];
+ fsp->m_u.tcp_ip4_spec.ip4dst = mask->formatted.dst_ip[0];
+ fsp->h_ext.vlan_etype = rule->filter.formatted.flex_bytes;
+ fsp->m_ext.vlan_etype = mask->formatted.flex_bytes;
+ fsp->h_ext.data[1] = htonl(rule->filter.formatted.vm_pool);
+ fsp->m_ext.data[1] = htonl(mask->formatted.vm_pool);
+ fsp->flow_type |= FLOW_EXT;
+
+ /* record action */
+ if (rule->action == TXGBE_RDB_FDIR_DROP_QUEUE)
+ fsp->ring_cookie = RX_CLS_FLOW_DISC;
+ else
+ fsp->ring_cookie = rule->action;
+
+ return 0;
+}
+
+static int txgbe_get_ethtool_fdir_all(struct txgbe *txgbe,
+ struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct txgbe_fdir_filter *rule;
+ struct hlist_node *node;
+ int cnt = 0;
+
+ /* report total rule count */
+ cmd->data = (1024 << TXGBE_FDIR_PBALLOC_64K) - 2;
+
+ hlist_for_each_entry_safe(rule, node, &txgbe->fdir_filter_list,
+ fdir_node) {
+ if (cnt == cmd->rule_cnt)
+ return -EMSGSIZE;
+ rule_locs[cnt] = rule->sw_idx;
+ cnt++;
+ }
+
+ cmd->rule_cnt = cnt;
+
+ return 0;
+}
+
+static int txgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct wx *wx = netdev_priv(dev);
+ struct txgbe *txgbe = wx->priv;
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXRINGS:
+ cmd->data = wx->num_rx_queues;
+ ret = 0;
+ break;
+ case ETHTOOL_GRXCLSRLCNT:
+ cmd->rule_cnt = txgbe->fdir_filter_count;
+ ret = 0;
+ break;
+ case ETHTOOL_GRXCLSRULE:
+ ret = txgbe_get_ethtool_fdir_entry(txgbe, cmd);
+ break;
+ case ETHTOOL_GRXCLSRLALL:
+ ret = txgbe_get_ethtool_fdir_all(txgbe, cmd, (u32 *)rule_locs);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static int txgbe_flowspec_to_flow_type(struct ethtool_rx_flow_spec *fsp,
+ u8 *flow_type)
+{
+ switch (fsp->flow_type & ~FLOW_EXT) {
+ case TCP_V4_FLOW:
+ *flow_type = TXGBE_ATR_FLOW_TYPE_TCPV4;
+ break;
+ case UDP_V4_FLOW:
+ *flow_type = TXGBE_ATR_FLOW_TYPE_UDPV4;
+ break;
+ case SCTP_V4_FLOW:
+ *flow_type = TXGBE_ATR_FLOW_TYPE_SCTPV4;
+ break;
+ case IP_USER_FLOW:
+ switch (fsp->h_u.usr_ip4_spec.proto) {
+ case IPPROTO_TCP:
+ *flow_type = TXGBE_ATR_FLOW_TYPE_TCPV4;
+ break;
+ case IPPROTO_UDP:
+ *flow_type = TXGBE_ATR_FLOW_TYPE_UDPV4;
+ break;
+ case IPPROTO_SCTP:
+ *flow_type = TXGBE_ATR_FLOW_TYPE_SCTPV4;
+ break;
+ case 0:
+ if (!fsp->m_u.usr_ip4_spec.proto) {
+ *flow_type = TXGBE_ATR_FLOW_TYPE_IPV4;
+ break;
+ }
+ fallthrough;
+ default:
+ return -EINVAL;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static bool txgbe_match_ethtool_fdir_entry(struct txgbe *txgbe,
+ struct txgbe_fdir_filter *input)
+{
+ struct txgbe_fdir_filter *rule = NULL;
+ struct hlist_node *node2;
+
+ hlist_for_each_entry_safe(rule, node2, &txgbe->fdir_filter_list,
+ fdir_node) {
+ if (rule->filter.formatted.bkt_hash ==
+ input->filter.formatted.bkt_hash &&
+ rule->action == input->action) {
+ wx_dbg(txgbe->wx, "FDIR entry already exist\n");
+ return true;
+ }
+ }
+ return false;
+}
+
+static int txgbe_update_ethtool_fdir_entry(struct txgbe *txgbe,
+ struct txgbe_fdir_filter *input,
+ u16 sw_idx)
+{
+ struct hlist_node *node = NULL, *parent = NULL;
+ struct txgbe_fdir_filter *rule;
+ struct wx *wx = txgbe->wx;
+ bool deleted = false;
+ int err;
+
+ hlist_for_each_entry_safe(rule, node, &txgbe->fdir_filter_list,
+ fdir_node) {
+ /* hash found, or no matching entry */
+ if (rule->sw_idx >= sw_idx)
+ break;
+ parent = node;
+ }
+
+ /* if there is an old rule occupying our place remove it */
+ if (rule && rule->sw_idx == sw_idx) {
+ /* hardware filters are only configured when interface is up,
+ * and we should not issue filter commands while the interface
+ * is down
+ */
+ if (netif_running(wx->netdev) &&
+ (!input || rule->filter.formatted.bkt_hash !=
+ input->filter.formatted.bkt_hash)) {
+ err = txgbe_fdir_erase_perfect_filter(wx,
+ &rule->filter,
+ sw_idx);
+ if (err)
+ return -EINVAL;
+ }
+
+ hlist_del(&rule->fdir_node);
+ kfree(rule);
+ txgbe->fdir_filter_count--;
+ deleted = true;
+ }
+
+ /* If we weren't given an input, then this was a request to delete a
+ * filter. We should return -EINVAL if the filter wasn't found, but
+ * return 0 if the rule was successfully deleted.
+ */
+ if (!input)
+ return deleted ? 0 : -EINVAL;
+
+ /* initialize node and set software index */
+ INIT_HLIST_NODE(&input->fdir_node);
+
+ /* add filter to the list */
+ if (parent)
+ hlist_add_behind(&input->fdir_node, parent);
+ else
+ hlist_add_head(&input->fdir_node,
+ &txgbe->fdir_filter_list);
+
+ /* update counts */
+ txgbe->fdir_filter_count++;
+
+ return 0;
+}
+
+static int txgbe_add_ethtool_fdir_entry(struct txgbe *txgbe,
+ struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp =
+ (struct ethtool_rx_flow_spec *)&cmd->fs;
+ struct txgbe_fdir_filter *input;
+ union txgbe_atr_input mask;
+ struct wx *wx = txgbe->wx;
+ int err = -EINVAL;
+ u16 ptype = 0;
+ u8 queue;
+
+ if (!(test_bit(WX_FLAG_FDIR_PERFECT, wx->flags)))
+ return -EOPNOTSUPP;
+
+ /* ring_cookie is a masked into a set of queues and txgbe pools or
+ * we use drop index
+ */
+ if (fsp->ring_cookie == RX_CLS_FLOW_DISC) {
+ queue = TXGBE_RDB_FDIR_DROP_QUEUE;
+ } else {
+ u32 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
+
+ if (ring >= wx->num_rx_queues)
+ return -EINVAL;
+
+ /* Map the ring onto the absolute queue index */
+ queue = wx->rx_ring[ring]->reg_idx;
+ }
+
+ /* Don't allow indexes to exist outside of available space */
+ if (fsp->location >= ((1024 << TXGBE_FDIR_PBALLOC_64K) - 2)) {
+ wx_err(wx, "Location out of range\n");
+ return -EINVAL;
+ }
+
+ input = kzalloc(sizeof(*input), GFP_ATOMIC);
+ if (!input)
+ return -ENOMEM;
+
+ memset(&mask, 0, sizeof(union txgbe_atr_input));
+
+ /* set SW index */
+ input->sw_idx = fsp->location;
+
+ /* record flow type */
+ if (txgbe_flowspec_to_flow_type(fsp,
+ &input->filter.formatted.flow_type)) {
+ wx_err(wx, "Unrecognized flow type\n");
+ goto err_out;
+ }
+
+ mask.formatted.flow_type = TXGBE_ATR_L4TYPE_IPV6_MASK |
+ TXGBE_ATR_L4TYPE_MASK;
+
+ if (input->filter.formatted.flow_type == TXGBE_ATR_FLOW_TYPE_IPV4)
+ mask.formatted.flow_type &= TXGBE_ATR_L4TYPE_IPV6_MASK;
+
+ /* Copy input into formatted structures */
+ input->filter.formatted.src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src;
+ mask.formatted.src_ip[0] = fsp->m_u.tcp_ip4_spec.ip4src;
+ input->filter.formatted.dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst;
+ mask.formatted.dst_ip[0] = fsp->m_u.tcp_ip4_spec.ip4dst;
+ input->filter.formatted.src_port = fsp->h_u.tcp_ip4_spec.psrc;
+ mask.formatted.src_port = fsp->m_u.tcp_ip4_spec.psrc;
+ input->filter.formatted.dst_port = fsp->h_u.tcp_ip4_spec.pdst;
+ mask.formatted.dst_port = fsp->m_u.tcp_ip4_spec.pdst;
+
+ if (fsp->flow_type & FLOW_EXT) {
+ input->filter.formatted.vm_pool =
+ (unsigned char)ntohl(fsp->h_ext.data[1]);
+ mask.formatted.vm_pool =
+ (unsigned char)ntohl(fsp->m_ext.data[1]);
+ input->filter.formatted.flex_bytes =
+ fsp->h_ext.vlan_etype;
+ mask.formatted.flex_bytes = fsp->m_ext.vlan_etype;
+ }
+
+ switch (input->filter.formatted.flow_type) {
+ case TXGBE_ATR_FLOW_TYPE_TCPV4:
+ ptype = WX_PTYPE_L2_IPV4_TCP;
+ break;
+ case TXGBE_ATR_FLOW_TYPE_UDPV4:
+ ptype = WX_PTYPE_L2_IPV4_UDP;
+ break;
+ case TXGBE_ATR_FLOW_TYPE_SCTPV4:
+ ptype = WX_PTYPE_L2_IPV4_SCTP;
+ break;
+ case TXGBE_ATR_FLOW_TYPE_IPV4:
+ ptype = WX_PTYPE_L2_IPV4;
+ break;
+ default:
+ break;
+ }
+
+ input->filter.formatted.vlan_id = htons(ptype);
+ if (mask.formatted.flow_type & TXGBE_ATR_L4TYPE_MASK)
+ mask.formatted.vlan_id = htons(0xFFFF);
+ else
+ mask.formatted.vlan_id = htons(0xFFF8);
+
+ /* determine if we need to drop or route the packet */
+ if (fsp->ring_cookie == RX_CLS_FLOW_DISC)
+ input->action = TXGBE_RDB_FDIR_DROP_QUEUE;
+ else
+ input->action = fsp->ring_cookie;
+
+ spin_lock(&txgbe->fdir_perfect_lock);
+
+ if (hlist_empty(&txgbe->fdir_filter_list)) {
+ /* save mask and program input mask into HW */
+ memcpy(&txgbe->fdir_mask, &mask, sizeof(mask));
+ err = txgbe_fdir_set_input_mask(wx, &mask);
+ if (err)
+ goto err_unlock;
+ } else if (memcmp(&txgbe->fdir_mask, &mask, sizeof(mask))) {
+ wx_err(wx, "Hardware only supports one mask per port. To change the mask you must first delete all the rules.\n");
+ goto err_unlock;
+ }
+
+ /* apply mask and compute/store hash */
+ txgbe_atr_compute_perfect_hash(&input->filter, &mask);
+
+ /* check if new entry does not exist on filter list */
+ if (txgbe_match_ethtool_fdir_entry(txgbe, input))
+ goto err_unlock;
+
+ /* only program filters to hardware if the net device is running, as
+ * we store the filters in the Rx buffer which is not allocated when
+ * the device is down
+ */
+ if (netif_running(wx->netdev)) {
+ err = txgbe_fdir_write_perfect_filter(wx, &input->filter,
+ input->sw_idx, queue);
+ if (err)
+ goto err_unlock;
+ }
+
+ txgbe_update_ethtool_fdir_entry(txgbe, input, input->sw_idx);
+
+ spin_unlock(&txgbe->fdir_perfect_lock);
+
+ return 0;
+err_unlock:
+ spin_unlock(&txgbe->fdir_perfect_lock);
+err_out:
+ kfree(input);
+ return err;
+}
+
+static int txgbe_del_ethtool_fdir_entry(struct txgbe *txgbe,
+ struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp =
+ (struct ethtool_rx_flow_spec *)&cmd->fs;
+ int err = 0;
+
+ spin_lock(&txgbe->fdir_perfect_lock);
+ err = txgbe_update_ethtool_fdir_entry(txgbe, NULL, fsp->location);
+ spin_unlock(&txgbe->fdir_perfect_lock);
+
+ return err;
+}
+
+static int txgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+{
+ struct wx *wx = netdev_priv(dev);
+ struct txgbe *txgbe = wx->priv;
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXCLSRLINS:
+ ret = txgbe_add_ethtool_fdir_entry(txgbe, cmd);
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ ret = txgbe_del_ethtool_fdir_entry(txgbe, cmd);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
static const struct ethtool_ops txgbe_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ,
@@ -100,6 +525,8 @@ static const struct ethtool_ops txgbe_ethtool_ops = {
.set_coalesce = wx_set_coalesce,
.get_channels = wx_get_channels,
.set_channels = txgbe_set_channels,
+ .get_rxnfc = txgbe_get_rxnfc,
+ .set_rxnfc = txgbe_set_rxnfc,
.get_msglevel = wx_get_msglevel,
.set_msglevel = wx_set_msglevel,
};
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_fdir.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_fdir.c
new file mode 100644
index 000000000000..ef50efbaec0f
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_fdir.c
@@ -0,0 +1,643 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2015 - 2024 Beijing WangXun Technology Co., Ltd. */
+
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+
+#include "../libwx/wx_type.h"
+#include "../libwx/wx_lib.h"
+#include "../libwx/wx_hw.h"
+#include "txgbe_type.h"
+#include "txgbe_fdir.h"
+
+/* These defines allow us to quickly generate all of the necessary instructions
+ * in the function below by simply calling out TXGBE_COMPUTE_SIG_HASH_ITERATION
+ * for values 0 through 15
+ */
+#define TXGBE_ATR_COMMON_HASH_KEY \
+ (TXGBE_ATR_BUCKET_HASH_KEY & TXGBE_ATR_SIGNATURE_HASH_KEY)
+#define TXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \
+do { \
+ u32 n = (_n); \
+ if (TXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \
+ common_hash ^= lo_hash_dword >> n; \
+ else if (TXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
+ bucket_hash ^= lo_hash_dword >> n; \
+ else if (TXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \
+ sig_hash ^= lo_hash_dword << (16 - n); \
+ if (TXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \
+ common_hash ^= hi_hash_dword >> n; \
+ else if (TXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
+ bucket_hash ^= hi_hash_dword >> n; \
+ else if (TXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \
+ sig_hash ^= hi_hash_dword << (16 - n); \
+} while (0)
+
+/**
+ * txgbe_atr_compute_sig_hash - Compute the signature hash
+ * @input: input bitstream to compute the hash on
+ * @common: compressed common input dword
+ * @hash: pointer to the computed hash
+ *
+ * This function is almost identical to the function above but contains
+ * several optimizations such as unwinding all of the loops, letting the
+ * compiler work out all of the conditional ifs since the keys are static
+ * defines, and computing two keys at once since the hashed dword stream
+ * will be the same for both keys.
+ **/
+static void txgbe_atr_compute_sig_hash(union txgbe_atr_hash_dword input,
+ union txgbe_atr_hash_dword common,
+ u32 *hash)
+{
+ u32 sig_hash = 0, bucket_hash = 0, common_hash = 0;
+ u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
+ u32 i;
+
+ /* record the flow_vm_vlan bits as they are a key part to the hash */
+ flow_vm_vlan = ntohl(input.dword);
+
+ /* generate common hash dword */
+ hi_hash_dword = ntohl(common.dword);
+
+ /* low dword is word swapped version of common */
+ lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
+
+ /* apply flow ID/VM pool/VLAN ID bits to hash words */
+ hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
+
+ /* Process bits 0 and 16 */
+ TXGBE_COMPUTE_SIG_HASH_ITERATION(0);
+
+ /* apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
+ * delay this because bit 0 of the stream should not be processed
+ * so we do not add the VLAN until after bit 0 was processed
+ */
+ lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
+
+ /* Process remaining 30 bit of the key */
+ for (i = 1; i <= 15; i++)
+ TXGBE_COMPUTE_SIG_HASH_ITERATION(i);
+
+ /* combine common_hash result with signature and bucket hashes */
+ bucket_hash ^= common_hash;
+ bucket_hash &= TXGBE_ATR_HASH_MASK;
+
+ sig_hash ^= common_hash << 16;
+ sig_hash &= TXGBE_ATR_HASH_MASK << 16;
+
+ /* return completed signature hash */
+ *hash = sig_hash ^ bucket_hash;
+}
+
+#define TXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \
+do { \
+ u32 n = (_n); \
+ if (TXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
+ bucket_hash ^= lo_hash_dword >> n; \
+ if (TXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
+ bucket_hash ^= hi_hash_dword >> n; \
+} while (0)
+
+/**
+ * txgbe_atr_compute_perfect_hash - Compute the perfect filter hash
+ * @input: input bitstream to compute the hash on
+ * @input_mask: mask for the input bitstream
+ *
+ * This function serves two main purposes. First it applies the input_mask
+ * to the atr_input resulting in a cleaned up atr_input data stream.
+ * Secondly it computes the hash and stores it in the bkt_hash field at
+ * the end of the input byte stream. This way it will be available for
+ * future use without needing to recompute the hash.
+ **/
+void txgbe_atr_compute_perfect_hash(union txgbe_atr_input *input,
+ union txgbe_atr_input *input_mask)
+{
+ u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
+ u32 bucket_hash = 0;
+ __be32 hi_dword = 0;
+ u32 i = 0;
+
+ /* Apply masks to input data */
+ for (i = 0; i < 11; i++)
+ input->dword_stream[i] &= input_mask->dword_stream[i];
+
+ /* record the flow_vm_vlan bits as they are a key part to the hash */
+ flow_vm_vlan = ntohl(input->dword_stream[0]);
+
+ /* generate common hash dword */
+ for (i = 1; i <= 10; i++)
+ hi_dword ^= input->dword_stream[i];
+ hi_hash_dword = ntohl(hi_dword);
+
+ /* low dword is word swapped version of common */
+ lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
+
+ /* apply flow ID/VM pool/VLAN ID bits to hash words */
+ hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
+
+ /* Process bits 0 and 16 */
+ TXGBE_COMPUTE_BKT_HASH_ITERATION(0);
+
+ /* apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
+ * delay this because bit 0 of the stream should not be processed
+ * so we do not add the VLAN until after bit 0 was processed
+ */
+ lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
+
+ /* Process remaining 30 bit of the key */
+ for (i = 1; i <= 15; i++)
+ TXGBE_COMPUTE_BKT_HASH_ITERATION(i);
+
+ /* Limit hash to 13 bits since max bucket count is 8K.
+ * Store result at the end of the input stream.
+ */
+ input->formatted.bkt_hash = (__force __be16)(bucket_hash & 0x1FFF);
+}
+
+static int txgbe_fdir_check_cmd_complete(struct wx *wx)
+{
+ u32 val;
+
+ return read_poll_timeout_atomic(rd32, val,
+ !(val & TXGBE_RDB_FDIR_CMD_CMD_MASK),
+ 10, 100, false,
+ wx, TXGBE_RDB_FDIR_CMD);
+}
+
+/**
+ * txgbe_fdir_add_signature_filter - Adds a signature hash filter
+ * @wx: pointer to hardware structure
+ * @input: unique input dword
+ * @common: compressed common input dword
+ * @queue: queue index to direct traffic to
+ *
+ * @return: 0 on success and negative on failure
+ **/
+static int txgbe_fdir_add_signature_filter(struct wx *wx,
+ union txgbe_atr_hash_dword input,
+ union txgbe_atr_hash_dword common,
+ u8 queue)
+{
+ u32 fdirhashcmd, fdircmd;
+ u8 flow_type;
+ int err;
+
+ /* Get the flow_type in order to program FDIRCMD properly
+ * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6
+ * fifth is FDIRCMD.TUNNEL_FILTER
+ */
+ flow_type = input.formatted.flow_type;
+ switch (flow_type) {
+ case TXGBE_ATR_FLOW_TYPE_TCPV4:
+ case TXGBE_ATR_FLOW_TYPE_UDPV4:
+ case TXGBE_ATR_FLOW_TYPE_SCTPV4:
+ case TXGBE_ATR_FLOW_TYPE_TCPV6:
+ case TXGBE_ATR_FLOW_TYPE_UDPV6:
+ case TXGBE_ATR_FLOW_TYPE_SCTPV6:
+ break;
+ default:
+ wx_err(wx, "Error on flow type input\n");
+ return -EINVAL;
+ }
+
+ /* configure FDIRCMD register */
+ fdircmd = TXGBE_RDB_FDIR_CMD_CMD_ADD_FLOW |
+ TXGBE_RDB_FDIR_CMD_FILTER_UPDATE |
+ TXGBE_RDB_FDIR_CMD_LAST | TXGBE_RDB_FDIR_CMD_QUEUE_EN;
+ fdircmd |= TXGBE_RDB_FDIR_CMD_FLOW_TYPE(flow_type);
+ fdircmd |= TXGBE_RDB_FDIR_CMD_RX_QUEUE(queue);
+
+ txgbe_atr_compute_sig_hash(input, common, &fdirhashcmd);
+ fdirhashcmd |= TXGBE_RDB_FDIR_HASH_BUCKET_VALID;
+ wr32(wx, TXGBE_RDB_FDIR_HASH, fdirhashcmd);
+ wr32(wx, TXGBE_RDB_FDIR_CMD, fdircmd);
+
+ wx_dbg(wx, "Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd);
+
+ err = txgbe_fdir_check_cmd_complete(wx);
+ if (err)
+ wx_err(wx, "Flow Director command did not complete!\n");
+
+ return err;
+}
+
+void txgbe_atr(struct wx_ring *ring, struct wx_tx_buffer *first, u8 ptype)
+{
+ union txgbe_atr_hash_dword common = { .dword = 0 };
+ union txgbe_atr_hash_dword input = { .dword = 0 };
+ struct wx_q_vector *q_vector = ring->q_vector;
+ struct wx_dec_ptype dptype;
+ union network_header {
+ struct ipv6hdr *ipv6;
+ struct iphdr *ipv4;
+ void *raw;
+ } hdr;
+ struct tcphdr *th;
+
+ /* if ring doesn't have a interrupt vector, cannot perform ATR */
+ if (!q_vector)
+ return;
+
+ ring->atr_count++;
+ dptype = wx_decode_ptype(ptype);
+ if (dptype.etype) {
+ if (WX_PTYPE_TYPL4(ptype) != WX_PTYPE_TYP_TCP)
+ return;
+ hdr.raw = (void *)skb_inner_network_header(first->skb);
+ th = inner_tcp_hdr(first->skb);
+ } else {
+ if (WX_PTYPE_PKT(ptype) != WX_PTYPE_PKT_IP ||
+ WX_PTYPE_TYPL4(ptype) != WX_PTYPE_TYP_TCP)
+ return;
+ hdr.raw = (void *)skb_network_header(first->skb);
+ th = tcp_hdr(first->skb);
+ }
+
+ /* skip this packet since it is invalid or the socket is closing */
+ if (!th || th->fin)
+ return;
+
+ /* sample on all syn packets or once every atr sample count */
+ if (!th->syn && ring->atr_count < ring->atr_sample_rate)
+ return;
+
+ /* reset sample count */
+ ring->atr_count = 0;
+
+ /* src and dst are inverted, think how the receiver sees them
+ *
+ * The input is broken into two sections, a non-compressed section
+ * containing vm_pool, vlan_id, and flow_type. The rest of the data
+ * is XORed together and stored in the compressed dword.
+ */
+ input.formatted.vlan_id = htons((u16)ptype);
+
+ /* since src port and flex bytes occupy the same word XOR them together
+ * and write the value to source port portion of compressed dword
+ */
+ if (first->tx_flags & WX_TX_FLAGS_SW_VLAN)
+ common.port.src ^= th->dest ^ first->skb->protocol;
+ else if (first->tx_flags & WX_TX_FLAGS_HW_VLAN)
+ common.port.src ^= th->dest ^ first->skb->vlan_proto;
+ else
+ common.port.src ^= th->dest ^ first->protocol;
+ common.port.dst ^= th->source;
+
+ if (WX_PTYPE_PKT_IPV6 & WX_PTYPE_PKT(ptype)) {
+ input.formatted.flow_type = TXGBE_ATR_FLOW_TYPE_TCPV6;
+ common.ip ^= hdr.ipv6->saddr.s6_addr32[0] ^
+ hdr.ipv6->saddr.s6_addr32[1] ^
+ hdr.ipv6->saddr.s6_addr32[2] ^
+ hdr.ipv6->saddr.s6_addr32[3] ^
+ hdr.ipv6->daddr.s6_addr32[0] ^
+ hdr.ipv6->daddr.s6_addr32[1] ^
+ hdr.ipv6->daddr.s6_addr32[2] ^
+ hdr.ipv6->daddr.s6_addr32[3];
+ } else {
+ input.formatted.flow_type = TXGBE_ATR_FLOW_TYPE_TCPV4;
+ common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr;
+ }
+
+ /* This assumes the Rx queue and Tx queue are bound to the same CPU */
+ txgbe_fdir_add_signature_filter(q_vector->wx, input, common,
+ ring->queue_index);
+}
+
+int txgbe_fdir_set_input_mask(struct wx *wx, union txgbe_atr_input *input_mask)
+{
+ u32 fdirm = 0, fdirtcpm = 0, flex = 0;
+
+ /* Program the relevant mask registers. If src/dst_port or src/dst_addr
+ * are zero, then assume a full mask for that field. Also assume that
+ * a VLAN of 0 is unspecified, so mask that out as well. L4type
+ * cannot be masked out in this implementation.
+ *
+ * This also assumes IPv4 only. IPv6 masking isn't supported at this
+ * point in time.
+ */
+
+ /* verify bucket hash is cleared on hash generation */
+ if (input_mask->formatted.bkt_hash)
+ wx_dbg(wx, "bucket hash should always be 0 in mask\n");
+
+ /* Program FDIRM and verify partial masks */
+ switch (input_mask->formatted.vm_pool & 0x7F) {
+ case 0x0:
+ fdirm |= TXGBE_RDB_FDIR_OTHER_MSK_POOL;
+ break;
+ case 0x7F:
+ break;
+ default:
+ wx_err(wx, "Error on vm pool mask\n");
+ return -EINVAL;
+ }
+
+ switch (input_mask->formatted.flow_type & TXGBE_ATR_L4TYPE_MASK) {
+ case 0x0:
+ fdirm |= TXGBE_RDB_FDIR_OTHER_MSK_L4P;
+ if (input_mask->formatted.dst_port ||
+ input_mask->formatted.src_port) {
+ wx_err(wx, "Error on src/dst port mask\n");
+ return -EINVAL;
+ }
+ break;
+ case TXGBE_ATR_L4TYPE_MASK:
+ break;
+ default:
+ wx_err(wx, "Error on flow type mask\n");
+ return -EINVAL;
+ }
+
+ /* Now mask VM pool and destination IPv6 - bits 5 and 2 */
+ wr32(wx, TXGBE_RDB_FDIR_OTHER_MSK, fdirm);
+
+ flex = rd32(wx, TXGBE_RDB_FDIR_FLEX_CFG(0));
+ flex &= ~TXGBE_RDB_FDIR_FLEX_CFG_FIELD0;
+ flex |= (TXGBE_RDB_FDIR_FLEX_CFG_BASE_MAC |
+ TXGBE_RDB_FDIR_FLEX_CFG_OFST(0x6));
+
+ switch ((__force u16)input_mask->formatted.flex_bytes & 0xFFFF) {
+ case 0x0000:
+ /* Mask Flex Bytes */
+ flex |= TXGBE_RDB_FDIR_FLEX_CFG_MSK;
+ break;
+ case 0xFFFF:
+ break;
+ default:
+ wx_err(wx, "Error on flexible byte mask\n");
+ return -EINVAL;
+ }
+ wr32(wx, TXGBE_RDB_FDIR_FLEX_CFG(0), flex);
+
+ /* store the TCP/UDP port masks, bit reversed from port layout */
+ fdirtcpm = ntohs(input_mask->formatted.dst_port);
+ fdirtcpm <<= TXGBE_RDB_FDIR_PORT_DESTINATION_SHIFT;
+ fdirtcpm |= ntohs(input_mask->formatted.src_port);
+
+ /* write both the same so that UDP and TCP use the same mask */
+ wr32(wx, TXGBE_RDB_FDIR_TCP_MSK, ~fdirtcpm);
+ wr32(wx, TXGBE_RDB_FDIR_UDP_MSK, ~fdirtcpm);
+ wr32(wx, TXGBE_RDB_FDIR_SCTP_MSK, ~fdirtcpm);
+
+ /* store source and destination IP masks (little-enian) */
+ wr32(wx, TXGBE_RDB_FDIR_SA4_MSK,
+ ntohl(~input_mask->formatted.src_ip[0]));
+ wr32(wx, TXGBE_RDB_FDIR_DA4_MSK,
+ ntohl(~input_mask->formatted.dst_ip[0]));
+
+ return 0;
+}
+
+int txgbe_fdir_write_perfect_filter(struct wx *wx,
+ union txgbe_atr_input *input,
+ u16 soft_id, u8 queue)
+{
+ u32 fdirport, fdirvlan, fdirhash, fdircmd;
+ int err = 0;
+
+ /* currently IPv6 is not supported, must be programmed with 0 */
+ wr32(wx, TXGBE_RDB_FDIR_IP6(2), ntohl(input->formatted.src_ip[0]));
+ wr32(wx, TXGBE_RDB_FDIR_IP6(1), ntohl(input->formatted.src_ip[1]));
+ wr32(wx, TXGBE_RDB_FDIR_IP6(0), ntohl(input->formatted.src_ip[2]));
+
+ /* record the source address (little-endian) */
+ wr32(wx, TXGBE_RDB_FDIR_SA, ntohl(input->formatted.src_ip[0]));
+
+ /* record the first 32 bits of the destination address
+ * (little-endian)
+ */
+ wr32(wx, TXGBE_RDB_FDIR_DA, ntohl(input->formatted.dst_ip[0]));
+
+ /* record source and destination port (little-endian)*/
+ fdirport = ntohs(input->formatted.dst_port);
+ fdirport <<= TXGBE_RDB_FDIR_PORT_DESTINATION_SHIFT;
+ fdirport |= ntohs(input->formatted.src_port);
+ wr32(wx, TXGBE_RDB_FDIR_PORT, fdirport);
+
+ /* record packet type and flex_bytes (little-endian) */
+ fdirvlan = ntohs(input->formatted.flex_bytes);
+ fdirvlan <<= TXGBE_RDB_FDIR_FLEX_FLEX_SHIFT;
+ fdirvlan |= ntohs(input->formatted.vlan_id);
+ wr32(wx, TXGBE_RDB_FDIR_FLEX, fdirvlan);
+
+ /* configure FDIRHASH register */
+ fdirhash = (__force u32)input->formatted.bkt_hash |
+ TXGBE_RDB_FDIR_HASH_BUCKET_VALID |
+ TXGBE_RDB_FDIR_HASH_SIG_SW_INDEX(soft_id);
+ wr32(wx, TXGBE_RDB_FDIR_HASH, fdirhash);
+
+ /* flush all previous writes to make certain registers are
+ * programmed prior to issuing the command
+ */
+ WX_WRITE_FLUSH(wx);
+
+ /* configure FDIRCMD register */
+ fdircmd = TXGBE_RDB_FDIR_CMD_CMD_ADD_FLOW |
+ TXGBE_RDB_FDIR_CMD_FILTER_UPDATE |
+ TXGBE_RDB_FDIR_CMD_LAST | TXGBE_RDB_FDIR_CMD_QUEUE_EN;
+ if (queue == TXGBE_RDB_FDIR_DROP_QUEUE)
+ fdircmd |= TXGBE_RDB_FDIR_CMD_DROP;
+ fdircmd |= TXGBE_RDB_FDIR_CMD_FLOW_TYPE(input->formatted.flow_type);
+ fdircmd |= TXGBE_RDB_FDIR_CMD_RX_QUEUE(queue);
+ fdircmd |= TXGBE_RDB_FDIR_CMD_VT_POOL(input->formatted.vm_pool);
+
+ wr32(wx, TXGBE_RDB_FDIR_CMD, fdircmd);
+ err = txgbe_fdir_check_cmd_complete(wx);
+ if (err)
+ wx_err(wx, "Flow Director command did not complete!\n");
+
+ return err;
+}
+
+int txgbe_fdir_erase_perfect_filter(struct wx *wx,
+ union txgbe_atr_input *input,
+ u16 soft_id)
+{
+ u32 fdirhash, fdircmd;
+ int err = 0;
+
+ /* configure FDIRHASH register */
+ fdirhash = (__force u32)input->formatted.bkt_hash;
+ fdirhash |= TXGBE_RDB_FDIR_HASH_SIG_SW_INDEX(soft_id);
+ wr32(wx, TXGBE_RDB_FDIR_HASH, fdirhash);
+
+ /* flush hash to HW */
+ WX_WRITE_FLUSH(wx);
+
+ /* Query if filter is present */
+ wr32(wx, TXGBE_RDB_FDIR_CMD, TXGBE_RDB_FDIR_CMD_CMD_QUERY_REM_FILT);
+
+ err = txgbe_fdir_check_cmd_complete(wx);
+ if (err) {
+ wx_err(wx, "Flow Director command did not complete!\n");
+ return err;
+ }
+
+ fdircmd = rd32(wx, TXGBE_RDB_FDIR_CMD);
+ /* if filter exists in hardware then remove it */
+ if (fdircmd & TXGBE_RDB_FDIR_CMD_FILTER_VALID) {
+ wr32(wx, TXGBE_RDB_FDIR_HASH, fdirhash);
+ WX_WRITE_FLUSH(wx);
+ wr32(wx, TXGBE_RDB_FDIR_CMD,
+ TXGBE_RDB_FDIR_CMD_CMD_REMOVE_FLOW);
+ }
+
+ return 0;
+}
+
+/**
+ * txgbe_fdir_enable - Initialize Flow Director control registers
+ * @wx: pointer to hardware structure
+ * @fdirctrl: value to write to flow director control register
+ **/
+static void txgbe_fdir_enable(struct wx *wx, u32 fdirctrl)
+{
+ u32 val;
+ int ret;
+
+ /* Prime the keys for hashing */
+ wr32(wx, TXGBE_RDB_FDIR_HKEY, TXGBE_ATR_BUCKET_HASH_KEY);
+ wr32(wx, TXGBE_RDB_FDIR_SKEY, TXGBE_ATR_SIGNATURE_HASH_KEY);
+
+ wr32(wx, TXGBE_RDB_FDIR_CTL, fdirctrl);
+ WX_WRITE_FLUSH(wx);
+ ret = read_poll_timeout(rd32, val, val & TXGBE_RDB_FDIR_CTL_INIT_DONE,
+ 1000, 10000, false, wx, TXGBE_RDB_FDIR_CTL);
+
+ if (ret < 0)
+ wx_dbg(wx, "Flow Director poll time exceeded!\n");
+}
+
+/**
+ * txgbe_init_fdir_signature -Initialize Flow Director sig filters
+ * @wx: pointer to hardware structure
+ **/
+static void txgbe_init_fdir_signature(struct wx *wx)
+{
+ u32 fdirctrl = TXGBE_FDIR_PBALLOC_64K;
+ u32 flex = 0;
+
+ flex = rd32(wx, TXGBE_RDB_FDIR_FLEX_CFG(0));
+ flex &= ~TXGBE_RDB_FDIR_FLEX_CFG_FIELD0;
+
+ flex |= (TXGBE_RDB_FDIR_FLEX_CFG_BASE_MAC |
+ TXGBE_RDB_FDIR_FLEX_CFG_OFST(0x6));
+ wr32(wx, TXGBE_RDB_FDIR_FLEX_CFG(0), flex);
+
+ /* Continue setup of fdirctrl register bits:
+ * Move the flexible bytes to use the ethertype - shift 6 words
+ * Set the maximum length per hash bucket to 0xA filters
+ * Send interrupt when 64 filters are left
+ */
+ fdirctrl |= TXGBE_RDB_FDIR_CTL_HASH_BITS(0xF) |
+ TXGBE_RDB_FDIR_CTL_MAX_LENGTH(0xA) |
+ TXGBE_RDB_FDIR_CTL_FULL_THRESH(4);
+
+ /* write hashes and fdirctrl register, poll for completion */
+ txgbe_fdir_enable(wx, fdirctrl);
+}
+
+/**
+ * txgbe_init_fdir_perfect - Initialize Flow Director perfect filters
+ * @wx: pointer to hardware structure
+ **/
+static void txgbe_init_fdir_perfect(struct wx *wx)
+{
+ u32 fdirctrl = TXGBE_FDIR_PBALLOC_64K;
+
+ /* Continue setup of fdirctrl register bits:
+ * Turn perfect match filtering on
+ * Report hash in RSS field of Rx wb descriptor
+ * Initialize the drop queue
+ * Move the flexible bytes to use the ethertype - shift 6 words
+ * Set the maximum length per hash bucket to 0xA filters
+ * Send interrupt when 64 (0x4 * 16) filters are left
+ */
+ fdirctrl |= TXGBE_RDB_FDIR_CTL_PERFECT_MATCH |
+ TXGBE_RDB_FDIR_CTL_DROP_Q(TXGBE_RDB_FDIR_DROP_QUEUE) |
+ TXGBE_RDB_FDIR_CTL_HASH_BITS(0xF) |
+ TXGBE_RDB_FDIR_CTL_MAX_LENGTH(0xA) |
+ TXGBE_RDB_FDIR_CTL_FULL_THRESH(4);
+
+ /* write hashes and fdirctrl register, poll for completion */
+ txgbe_fdir_enable(wx, fdirctrl);
+}
+
+static void txgbe_fdir_filter_restore(struct wx *wx)
+{
+ struct txgbe_fdir_filter *filter;
+ struct txgbe *txgbe = wx->priv;
+ struct hlist_node *node;
+ u8 queue = 0;
+ int ret = 0;
+
+ spin_lock(&txgbe->fdir_perfect_lock);
+
+ if (!hlist_empty(&txgbe->fdir_filter_list))
+ ret = txgbe_fdir_set_input_mask(wx, &txgbe->fdir_mask);
+
+ if (ret)
+ goto unlock;
+
+ hlist_for_each_entry_safe(filter, node,
+ &txgbe->fdir_filter_list, fdir_node) {
+ if (filter->action == TXGBE_RDB_FDIR_DROP_QUEUE) {
+ queue = TXGBE_RDB_FDIR_DROP_QUEUE;
+ } else {
+ u32 ring = ethtool_get_flow_spec_ring(filter->action);
+
+ if (ring >= wx->num_rx_queues) {
+ wx_err(wx, "FDIR restore failed, ring:%u\n",
+ ring);
+ continue;
+ }
+
+ /* Map the ring onto the absolute queue index */
+ queue = wx->rx_ring[ring]->reg_idx;
+ }
+
+ ret = txgbe_fdir_write_perfect_filter(wx,
+ &filter->filter,
+ filter->sw_idx,
+ queue);
+ if (ret)
+ wx_err(wx, "FDIR restore failed, index:%u\n",
+ filter->sw_idx);
+ }
+
+unlock:
+ spin_unlock(&txgbe->fdir_perfect_lock);
+}
+
+void txgbe_configure_fdir(struct wx *wx)
+{
+ wx_disable_sec_rx_path(wx);
+
+ if (test_bit(WX_FLAG_FDIR_HASH, wx->flags)) {
+ txgbe_init_fdir_signature(wx);
+ } else if (test_bit(WX_FLAG_FDIR_PERFECT, wx->flags)) {
+ txgbe_init_fdir_perfect(wx);
+ txgbe_fdir_filter_restore(wx);
+ }
+
+ wx_enable_sec_rx_path(wx);
+}
+
+void txgbe_fdir_filter_exit(struct wx *wx)
+{
+ struct txgbe_fdir_filter *filter;
+ struct txgbe *txgbe = wx->priv;
+ struct hlist_node *node;
+
+ spin_lock(&txgbe->fdir_perfect_lock);
+
+ hlist_for_each_entry_safe(filter, node,
+ &txgbe->fdir_filter_list, fdir_node) {
+ hlist_del(&filter->fdir_node);
+ kfree(filter);
+ }
+ txgbe->fdir_filter_count = 0;
+
+ spin_unlock(&txgbe->fdir_perfect_lock);
+}
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_fdir.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_fdir.h
new file mode 100644
index 000000000000..1f44ce60becb
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_fdir.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2015 - 2024 Beijing WangXun Technology Co., Ltd. */
+
+#ifndef _TXGBE_FDIR_H_
+#define _TXGBE_FDIR_H_
+
+void txgbe_atr_compute_perfect_hash(union txgbe_atr_input *input,
+ union txgbe_atr_input *input_mask);
+void txgbe_atr(struct wx_ring *ring, struct wx_tx_buffer *first, u8 ptype);
+int txgbe_fdir_set_input_mask(struct wx *wx, union txgbe_atr_input *input_mask);
+int txgbe_fdir_write_perfect_filter(struct wx *wx,
+ union txgbe_atr_input *input,
+ u16 soft_id, u8 queue);
+int txgbe_fdir_erase_perfect_filter(struct wx *wx,
+ union txgbe_atr_input *input,
+ u16 soft_id);
+void txgbe_configure_fdir(struct wx *wx);
+void txgbe_fdir_filter_exit(struct wx *wx);
+
+#endif /* _TXGBE_FDIR_H_ */
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c
index b3e3605d1edb..a4cf682dca65 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c
@@ -27,57 +27,19 @@ void txgbe_irq_enable(struct wx *wx, bool queues)
}
/**
- * txgbe_intr - msi/legacy mode Interrupt Handler
- * @irq: interrupt number
- * @data: pointer to a network interface device structure
- **/
-static irqreturn_t txgbe_intr(int __always_unused irq, void *data)
-{
- struct wx_q_vector *q_vector;
- struct wx *wx = data;
- struct pci_dev *pdev;
- u32 eicr;
-
- q_vector = wx->q_vector[0];
- pdev = wx->pdev;
-
- eicr = wx_misc_isb(wx, WX_ISB_VEC0);
- if (!eicr) {
- /* shared interrupt alert!
- * the interrupt that we masked before the ICR read.
- */
- if (netif_running(wx->netdev))
- txgbe_irq_enable(wx, true);
- return IRQ_NONE; /* Not our interrupt */
- }
- wx->isb_mem[WX_ISB_VEC0] = 0;
- if (!(pdev->msi_enabled))
- wr32(wx, WX_PX_INTA, 1);
-
- wx->isb_mem[WX_ISB_MISC] = 0;
- /* would disable interrupts here but it is auto disabled */
- napi_schedule_irqoff(&q_vector->napi);
-
- /* re-enable link(maybe) and non-queue interrupts, no flush.
- * txgbe_poll will re-enable the queue interrupts
- */
- if (netif_running(wx->netdev))
- txgbe_irq_enable(wx, false);
-
- return IRQ_HANDLED;
-}
-
-/**
- * txgbe_request_msix_irqs - Initialize MSI-X interrupts
+ * txgbe_request_queue_irqs - Initialize MSI-X queue interrupts
* @wx: board private structure
*
- * Allocate MSI-X vectors and request interrupts from the kernel.
+ * Allocate MSI-X queue vectors and request interrupts from the kernel.
**/
-static int txgbe_request_msix_irqs(struct wx *wx)
+int txgbe_request_queue_irqs(struct wx *wx)
{
struct net_device *netdev = wx->netdev;
int vector, err;
+ if (!wx->pdev->msix_enabled)
+ return 0;
+
for (vector = 0; vector < wx->num_q_vectors; vector++) {
struct wx_q_vector *q_vector = wx->q_vector[vector];
struct msix_entry *entry = &wx->msix_q_entries[vector];
@@ -110,34 +72,6 @@ free_queue_irqs:
return err;
}
-/**
- * txgbe_request_irq - initialize interrupts
- * @wx: board private structure
- *
- * Attempt to configure interrupts using the best available
- * capabilities of the hardware and kernel.
- **/
-int txgbe_request_irq(struct wx *wx)
-{
- struct net_device *netdev = wx->netdev;
- struct pci_dev *pdev = wx->pdev;
- int err;
-
- if (pdev->msix_enabled)
- err = txgbe_request_msix_irqs(wx);
- else if (pdev->msi_enabled)
- err = request_irq(wx->pdev->irq, &txgbe_intr, 0,
- netdev->name, wx);
- else
- err = request_irq(wx->pdev->irq, &txgbe_intr, IRQF_SHARED,
- netdev->name, wx);
-
- if (err)
- wx_err(wx, "request_irq failed, Error %d\n", err);
-
- return err;
-}
-
static int txgbe_request_gpio_irq(struct txgbe *txgbe)
{
txgbe->gpio_irq = irq_find_mapping(txgbe->misc.domain, TXGBE_IRQ_GPIO);
@@ -178,6 +112,36 @@ static const struct irq_domain_ops txgbe_misc_irq_domain_ops = {
static irqreturn_t txgbe_misc_irq_handle(int irq, void *data)
{
+ struct wx_q_vector *q_vector;
+ struct txgbe *txgbe = data;
+ struct wx *wx = txgbe->wx;
+ u32 eicr;
+
+ if (wx->pdev->msix_enabled)
+ return IRQ_WAKE_THREAD;
+
+ eicr = wx_misc_isb(wx, WX_ISB_VEC0);
+ if (!eicr) {
+ /* shared interrupt alert!
+ * the interrupt that we masked before the ICR read.
+ */
+ if (netif_running(wx->netdev))
+ txgbe_irq_enable(wx, true);
+ return IRQ_NONE; /* Not our interrupt */
+ }
+ wx->isb_mem[WX_ISB_VEC0] = 0;
+ if (!(wx->pdev->msi_enabled))
+ wr32(wx, WX_PX_INTA, 1);
+
+ /* would disable interrupts here but it is auto disabled */
+ q_vector = wx->q_vector[0];
+ napi_schedule_irqoff(&q_vector->napi);
+
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t txgbe_misc_irq_thread_fn(int irq, void *data)
+{
struct txgbe *txgbe = data;
struct wx *wx = txgbe->wx;
unsigned int nhandled = 0;
@@ -223,6 +187,7 @@ void txgbe_free_misc_irq(struct txgbe *txgbe)
int txgbe_setup_misc_irq(struct txgbe *txgbe)
{
+ unsigned long flags = IRQF_ONESHOT;
struct wx *wx = txgbe->wx;
int hwirq, err;
@@ -236,14 +201,17 @@ int txgbe_setup_misc_irq(struct txgbe *txgbe)
irq_create_mapping(txgbe->misc.domain, hwirq);
txgbe->misc.chip = txgbe_irq_chip;
- if (wx->pdev->msix_enabled)
+ if (wx->pdev->msix_enabled) {
txgbe->misc.irq = wx->msix_entry->vector;
- else
+ } else {
txgbe->misc.irq = wx->pdev->irq;
+ if (!wx->pdev->msi_enabled)
+ flags |= IRQF_SHARED;
+ }
- err = request_threaded_irq(txgbe->misc.irq, NULL,
- txgbe_misc_irq_handle,
- IRQF_ONESHOT,
+ err = request_threaded_irq(txgbe->misc.irq, txgbe_misc_irq_handle,
+ txgbe_misc_irq_thread_fn,
+ flags,
wx->netdev->name, txgbe);
if (err)
goto del_misc_irq;
@@ -256,6 +224,8 @@ int txgbe_setup_misc_irq(struct txgbe *txgbe)
if (err)
goto free_gpio_irq;
+ wx->misc_irq_domain = true;
+
return 0;
free_gpio_irq:
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.h
index b77945e7a0f2..e6285b94625e 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.h
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.h
@@ -2,6 +2,6 @@
/* Copyright (c) 2015 - 2024 Beijing WangXun Technology Co., Ltd. */
void txgbe_irq_enable(struct wx *wx, bool queues);
-int txgbe_request_irq(struct wx *wx);
+int txgbe_request_queue_irqs(struct wx *wx);
void txgbe_free_misc_irq(struct txgbe *txgbe);
int txgbe_setup_misc_irq(struct txgbe *txgbe);
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
index 8c7a74981b90..93180225a6f1 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
@@ -18,6 +18,7 @@
#include "txgbe_hw.h"
#include "txgbe_phy.h"
#include "txgbe_irq.h"
+#include "txgbe_fdir.h"
#include "txgbe_ethtool.h"
char txgbe_driver_name[] = "txgbe";
@@ -257,6 +258,14 @@ static int txgbe_sw_init(struct wx *wx)
num_online_cpus());
wx->rss_enabled = true;
+ wx->ring_feature[RING_F_FDIR].limit = min_t(int, TXGBE_MAX_FDIR_INDICES,
+ num_online_cpus());
+ set_bit(WX_FLAG_FDIR_CAPABLE, wx->flags);
+ set_bit(WX_FLAG_FDIR_HASH, wx->flags);
+ wx->atr_sample_rate = TXGBE_DEFAULT_ATR_SAMPLE_RATE;
+ wx->atr = txgbe_atr;
+ wx->configure_fdir = txgbe_configure_fdir;
+
/* enable itr by default in dynamic mode */
wx->rx_itr_setting = 1;
wx->tx_itr_setting = 1;
@@ -274,6 +283,12 @@ static int txgbe_sw_init(struct wx *wx)
return 0;
}
+static void txgbe_init_fdir(struct txgbe *txgbe)
+{
+ txgbe->fdir_filter_count = 0;
+ spin_lock_init(&txgbe->fdir_perfect_lock);
+}
+
/**
* txgbe_open - Called when a network interface is made active
* @netdev: network interface device structure
@@ -294,9 +309,9 @@ static int txgbe_open(struct net_device *netdev)
wx_configure(wx);
- err = txgbe_request_irq(wx);
+ err = txgbe_request_queue_irqs(wx);
if (err)
- goto err_free_isb;
+ goto err_free_resources;
/* Notify the stack of the actual queue counts. */
err = netif_set_real_num_tx_queues(netdev, wx->num_tx_queues);
@@ -313,8 +328,8 @@ static int txgbe_open(struct net_device *netdev)
err_free_irq:
wx_free_irq(wx);
-err_free_isb:
- wx_free_isb_resources(wx);
+err_free_resources:
+ wx_free_resources(wx);
err_reset:
txgbe_reset(wx);
@@ -352,6 +367,7 @@ static int txgbe_close(struct net_device *netdev)
txgbe_down(wx);
wx_free_irq(wx);
wx_free_resources(wx);
+ txgbe_fdir_filter_exit(wx);
wx_control_hw(wx, false);
return 0;
@@ -660,6 +676,8 @@ static int txgbe_probe(struct pci_dev *pdev,
txgbe->wx = wx;
wx->priv = txgbe;
+ txgbe_init_fdir(txgbe);
+
err = txgbe_setup_misc_irq(txgbe);
if (err)
goto err_release_hw;
@@ -729,6 +747,7 @@ static void txgbe_remove(struct pci_dev *pdev)
txgbe_remove_phy(txgbe);
txgbe_free_misc_irq(txgbe);
+ wx_free_isb_resources(wx);
pci_release_selected_regions(pdev,
pci_select_bars(pdev, IORESOURCE_MEM));
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
index f434a7865cb7..959102c4c379 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
@@ -89,6 +89,55 @@
#define TXGBE_XPCS_IDA_ADDR 0x13000
#define TXGBE_XPCS_IDA_DATA 0x13004
+/********************************* Flow Director *****************************/
+#define TXGBE_RDB_FDIR_DROP_QUEUE 127
+#define TXGBE_RDB_FDIR_CTL 0x19500
+#define TXGBE_RDB_FDIR_CTL_INIT_DONE BIT(3)
+#define TXGBE_RDB_FDIR_CTL_PERFECT_MATCH BIT(4)
+#define TXGBE_RDB_FDIR_CTL_DROP_Q(v) FIELD_PREP(GENMASK(14, 8), v)
+#define TXGBE_RDB_FDIR_CTL_HASH_BITS(v) FIELD_PREP(GENMASK(23, 20), v)
+#define TXGBE_RDB_FDIR_CTL_MAX_LENGTH(v) FIELD_PREP(GENMASK(27, 24), v)
+#define TXGBE_RDB_FDIR_CTL_FULL_THRESH(v) FIELD_PREP(GENMASK(31, 28), v)
+#define TXGBE_RDB_FDIR_IP6(_i) (0x1950C + ((_i) * 4)) /* 0-2 */
+#define TXGBE_RDB_FDIR_SA 0x19518
+#define TXGBE_RDB_FDIR_DA 0x1951C
+#define TXGBE_RDB_FDIR_PORT 0x19520
+#define TXGBE_RDB_FDIR_PORT_DESTINATION_SHIFT 16
+#define TXGBE_RDB_FDIR_FLEX 0x19524
+#define TXGBE_RDB_FDIR_FLEX_FLEX_SHIFT 16
+#define TXGBE_RDB_FDIR_HASH 0x19528
+#define TXGBE_RDB_FDIR_HASH_SIG_SW_INDEX(v) FIELD_PREP(GENMASK(31, 16), v)
+#define TXGBE_RDB_FDIR_HASH_BUCKET_VALID BIT(15)
+#define TXGBE_RDB_FDIR_CMD 0x1952C
+#define TXGBE_RDB_FDIR_CMD_CMD_MASK GENMASK(1, 0)
+#define TXGBE_RDB_FDIR_CMD_CMD(v) FIELD_PREP(GENMASK(1, 0), v)
+#define TXGBE_RDB_FDIR_CMD_CMD_ADD_FLOW TXGBE_RDB_FDIR_CMD_CMD(1)
+#define TXGBE_RDB_FDIR_CMD_CMD_REMOVE_FLOW TXGBE_RDB_FDIR_CMD_CMD(2)
+#define TXGBE_RDB_FDIR_CMD_CMD_QUERY_REM_FILT TXGBE_RDB_FDIR_CMD_CMD(3)
+#define TXGBE_RDB_FDIR_CMD_FILTER_VALID BIT(2)
+#define TXGBE_RDB_FDIR_CMD_FILTER_UPDATE BIT(3)
+#define TXGBE_RDB_FDIR_CMD_FLOW_TYPE(v) FIELD_PREP(GENMASK(6, 5), v)
+#define TXGBE_RDB_FDIR_CMD_DROP BIT(9)
+#define TXGBE_RDB_FDIR_CMD_LAST BIT(11)
+#define TXGBE_RDB_FDIR_CMD_QUEUE_EN BIT(15)
+#define TXGBE_RDB_FDIR_CMD_RX_QUEUE(v) FIELD_PREP(GENMASK(22, 16), v)
+#define TXGBE_RDB_FDIR_CMD_VT_POOL(v) FIELD_PREP(GENMASK(29, 24), v)
+#define TXGBE_RDB_FDIR_DA4_MSK 0x1953C
+#define TXGBE_RDB_FDIR_SA4_MSK 0x19540
+#define TXGBE_RDB_FDIR_TCP_MSK 0x19544
+#define TXGBE_RDB_FDIR_UDP_MSK 0x19548
+#define TXGBE_RDB_FDIR_SCTP_MSK 0x19560
+#define TXGBE_RDB_FDIR_HKEY 0x19568
+#define TXGBE_RDB_FDIR_SKEY 0x1956C
+#define TXGBE_RDB_FDIR_OTHER_MSK 0x19570
+#define TXGBE_RDB_FDIR_OTHER_MSK_POOL BIT(2)
+#define TXGBE_RDB_FDIR_OTHER_MSK_L4P BIT(3)
+#define TXGBE_RDB_FDIR_FLEX_CFG(_i) (0x19580 + ((_i) * 4))
+#define TXGBE_RDB_FDIR_FLEX_CFG_FIELD0 GENMASK(7, 0)
+#define TXGBE_RDB_FDIR_FLEX_CFG_BASE_MAC FIELD_PREP(GENMASK(1, 0), 0)
+#define TXGBE_RDB_FDIR_FLEX_CFG_MSK BIT(2)
+#define TXGBE_RDB_FDIR_FLEX_CFG_OFST(v) FIELD_PREP(GENMASK(7, 3), v)
+
/* Checksum and EEPROM pointers */
#define TXGBE_EEPROM_LAST_WORD 0x800
#define TXGBE_EEPROM_CHECKSUM 0x2F
@@ -112,6 +161,98 @@
#define TXGBE_SP_RX_PB_SIZE 512
#define TXGBE_SP_TDB_PB_SZ (160 * 1024) /* 160KB Packet Buffer */
+#define TXGBE_DEFAULT_ATR_SAMPLE_RATE 20
+
+/* Software ATR hash keys */
+#define TXGBE_ATR_BUCKET_HASH_KEY 0x3DAD14E2
+#define TXGBE_ATR_SIGNATURE_HASH_KEY 0x174D3614
+
+/* Software ATR input stream values and masks */
+#define TXGBE_ATR_HASH_MASK 0x7fff
+#define TXGBE_ATR_L4TYPE_MASK 0x3
+#define TXGBE_ATR_L4TYPE_UDP 0x1
+#define TXGBE_ATR_L4TYPE_TCP 0x2
+#define TXGBE_ATR_L4TYPE_SCTP 0x3
+#define TXGBE_ATR_L4TYPE_IPV6_MASK 0x4
+#define TXGBE_ATR_L4TYPE_TUNNEL_MASK 0x10
+
+enum txgbe_atr_flow_type {
+ TXGBE_ATR_FLOW_TYPE_IPV4 = 0x0,
+ TXGBE_ATR_FLOW_TYPE_UDPV4 = 0x1,
+ TXGBE_ATR_FLOW_TYPE_TCPV4 = 0x2,
+ TXGBE_ATR_FLOW_TYPE_SCTPV4 = 0x3,
+ TXGBE_ATR_FLOW_TYPE_IPV6 = 0x4,
+ TXGBE_ATR_FLOW_TYPE_UDPV6 = 0x5,
+ TXGBE_ATR_FLOW_TYPE_TCPV6 = 0x6,
+ TXGBE_ATR_FLOW_TYPE_SCTPV6 = 0x7,
+ TXGBE_ATR_FLOW_TYPE_TUNNELED_IPV4 = 0x10,
+ TXGBE_ATR_FLOW_TYPE_TUNNELED_UDPV4 = 0x11,
+ TXGBE_ATR_FLOW_TYPE_TUNNELED_TCPV4 = 0x12,
+ TXGBE_ATR_FLOW_TYPE_TUNNELED_SCTPV4 = 0x13,
+ TXGBE_ATR_FLOW_TYPE_TUNNELED_IPV6 = 0x14,
+ TXGBE_ATR_FLOW_TYPE_TUNNELED_UDPV6 = 0x15,
+ TXGBE_ATR_FLOW_TYPE_TUNNELED_TCPV6 = 0x16,
+ TXGBE_ATR_FLOW_TYPE_TUNNELED_SCTPV6 = 0x17,
+};
+
+/* Flow Director ATR input struct. */
+union txgbe_atr_input {
+ /* Byte layout in order, all values with MSB first:
+ *
+ * vm_pool - 1 byte
+ * flow_type - 1 byte
+ * vlan_id - 2 bytes
+ * dst_ip - 16 bytes
+ * src_ip - 16 bytes
+ * src_port - 2 bytes
+ * dst_port - 2 bytes
+ * flex_bytes - 2 bytes
+ * bkt_hash - 2 bytes
+ */
+ struct {
+ u8 vm_pool;
+ u8 flow_type;
+ __be16 vlan_id;
+ __be32 dst_ip[4];
+ __be32 src_ip[4];
+ __be16 src_port;
+ __be16 dst_port;
+ __be16 flex_bytes;
+ __be16 bkt_hash;
+ } formatted;
+ __be32 dword_stream[11];
+};
+
+/* Flow Director compressed ATR hash input struct */
+union txgbe_atr_hash_dword {
+ struct {
+ u8 vm_pool;
+ u8 flow_type;
+ __be16 vlan_id;
+ } formatted;
+ __be32 ip;
+ struct {
+ __be16 src;
+ __be16 dst;
+ } port;
+ __be16 flex_bytes;
+ __be32 dword;
+};
+
+enum txgbe_fdir_pballoc_type {
+ TXGBE_FDIR_PBALLOC_NONE = 0,
+ TXGBE_FDIR_PBALLOC_64K = 1,
+ TXGBE_FDIR_PBALLOC_128K = 2,
+ TXGBE_FDIR_PBALLOC_256K = 3,
+};
+
+struct txgbe_fdir_filter {
+ struct hlist_node fdir_node;
+ union txgbe_atr_input filter;
+ u16 sw_idx;
+ u16 action;
+};
+
/* TX/RX descriptor defines */
#define TXGBE_DEFAULT_TXD 512
#define TXGBE_DEFAULT_TX_WORK 256
@@ -196,6 +337,12 @@ struct txgbe {
struct gpio_chip *gpio;
unsigned int gpio_irq;
unsigned int link_irq;
+
+ /* flow director */
+ struct hlist_head fdir_filter_list;
+ union txgbe_atr_input fdir_mask;
+ int fdir_filter_count;
+ spinlock_t fdir_perfect_lock; /* spinlock for FDIR */
};
#endif /* _TXGBE_TYPE_H_ */
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index c29809cd9201..e342f387c3dd 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -1945,9 +1945,9 @@ axienet_ethtools_set_coalesce(struct net_device *ndev,
struct axienet_local *lp = netdev_priv(ndev);
if (netif_running(ndev)) {
- netdev_err(ndev,
- "Please stop netif before applying configuration\n");
- return -EFAULT;
+ NL_SET_ERR_MSG(extack,
+ "Please stop netif before applying configuration");
+ return -EBUSY;
}
if (ecoalesce->rx_max_coalesced_frames)
@@ -2254,7 +2254,6 @@ static int axienet_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ndev);
SET_NETDEV_DEV(ndev, &pdev->dev);
- ndev->flags &= ~IFF_MULTICAST; /* clear multicast */
ndev->features = NETIF_F_SG;
ndev->ethtool_ops = &axienet_ethtool_ops;
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index 8aff6a73ca0a..56df37f8d50a 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -1015,7 +1015,7 @@ static void ixp4xx_get_drvinfo(struct net_device *dev,
}
static int ixp4xx_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct port *port = netdev_priv(dev);
diff --git a/drivers/net/fjes/fjes_trace.h b/drivers/net/fjes/fjes_trace.h
index 166ef015262b..37c6071cb333 100644
--- a/drivers/net/fjes/fjes_trace.h
+++ b/drivers/net/fjes/fjes_trace.h
@@ -358,7 +358,7 @@ TRACE_EVENT(fjes_stop_req_irq_post,
#undef TRACE_INCLUDE_PATH
#undef TRACE_INCLUDE_FILE
-#define TRACE_INCLUDE_PATH ../../../drivers/net/fjes
+#define TRACE_INCLUDE_PATH ../../drivers/net/fjes
#define TRACE_INCLUDE_FILE fjes_trace
/* This part must be outside protection */
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 51495cb4b9be..838e85ddec67 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -815,6 +815,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
struct geneve_dev *geneve,
const struct ip_tunnel_info *info)
{
+ bool inner_proto_inherit = geneve->cfg.inner_proto_inherit;
bool xnet = !net_eq(geneve->net, dev_net(geneve->dev));
struct geneve_sock *gs4 = rcu_dereference(geneve->sock4);
const struct ip_tunnel_key *key = &info->key;
@@ -826,7 +827,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
__be16 sport;
int err;
- if (!skb_vlan_inet_prepare(skb))
+ if (!skb_vlan_inet_prepare(skb, inner_proto_inherit))
return -EINVAL;
if (!gs4)
@@ -908,7 +909,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
}
err = geneve_build_skb(&rt->dst, skb, info, xnet, sizeof(struct iphdr),
- geneve->cfg.inner_proto_inherit);
+ inner_proto_inherit);
if (unlikely(err))
return err;
@@ -925,6 +926,7 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
struct geneve_dev *geneve,
const struct ip_tunnel_info *info)
{
+ bool inner_proto_inherit = geneve->cfg.inner_proto_inherit;
bool xnet = !net_eq(geneve->net, dev_net(geneve->dev));
struct geneve_sock *gs6 = rcu_dereference(geneve->sock6);
const struct ip_tunnel_key *key = &info->key;
@@ -935,7 +937,7 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
__be16 sport;
int err;
- if (!skb_vlan_inet_prepare(skb))
+ if (!skb_vlan_inet_prepare(skb, inner_proto_inherit))
return -EINVAL;
if (!gs6)
@@ -997,7 +999,7 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
ttl = ttl ? : ip6_dst_hoplimit(dst);
}
err = geneve_build_skb(dst, skb, info, xnet, sizeof(struct ipv6hdr),
- geneve->cfg.inner_proto_inherit);
+ inner_proto_inherit);
if (unlikely(err))
return err;
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index 2d5b021b4ea6..fef4eff7753a 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -439,7 +439,7 @@ static noinline_for_stack int ipvlan_process_v4_outbound(struct sk_buff *skb)
memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
- err = ip_local_out(net, skb->sk, skb);
+ err = ip_local_out(net, NULL, skb);
if (unlikely(net_xmit_eval(err)))
DEV_STATS_INC(dev, tx_errors);
else
@@ -494,7 +494,7 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb)
memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
- err = ip6_local_out(dev_net(dev), skb->sk, skb);
+ err = ip6_local_out(dev_net(dev), NULL, skb);
if (unlikely(net_xmit_eval(err)))
DEV_STATS_INC(dev, tx_errors);
else
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 67b7ef2d463f..24298a33e0e9 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -1084,7 +1084,7 @@ static int macvlan_ethtool_get_link_ksettings(struct net_device *dev,
}
static int macvlan_ethtool_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct net_device *real_dev = macvlan_dev_real_dev(dev);
diff --git a/drivers/net/mctp/mctp-i2c.c b/drivers/net/mctp/mctp-i2c.c
index b37a9e4bade4..4dc057c121f5 100644
--- a/drivers/net/mctp/mctp-i2c.c
+++ b/drivers/net/mctp/mctp-i2c.c
@@ -442,6 +442,42 @@ static void mctp_i2c_unlock_reset(struct mctp_i2c_dev *midev)
i2c_unlock_bus(midev->adapter, I2C_LOCK_SEGMENT);
}
+static void mctp_i2c_invalidate_tx_flow(struct mctp_i2c_dev *midev,
+ struct sk_buff *skb)
+{
+ struct mctp_sk_key *key;
+ struct mctp_flow *flow;
+ unsigned long flags;
+ bool release;
+
+ flow = skb_ext_find(skb, SKB_EXT_MCTP);
+ if (!flow)
+ return;
+
+ key = flow->key;
+ if (!key)
+ return;
+
+ spin_lock_irqsave(&key->lock, flags);
+ if (key->manual_alloc) {
+ /* we don't have control over lifetimes for manually-allocated
+ * keys, so cannot assume we can invalidate all future flows
+ * that would use this key.
+ */
+ release = false;
+ } else {
+ release = key->dev_flow_state == MCTP_I2C_FLOW_STATE_ACTIVE;
+ key->dev_flow_state = MCTP_I2C_FLOW_STATE_INVALID;
+ }
+ spin_unlock_irqrestore(&key->lock, flags);
+
+ /* if we have changed state from active, the flow held a reference on
+ * the lock; release that now.
+ */
+ if (release)
+ mctp_i2c_unlock_nest(midev);
+}
+
static void mctp_i2c_xmit(struct mctp_i2c_dev *midev, struct sk_buff *skb)
{
struct net_device_stats *stats = &midev->ndev->stats;
@@ -500,6 +536,11 @@ static void mctp_i2c_xmit(struct mctp_i2c_dev *midev, struct sk_buff *skb)
case MCTP_I2C_TX_FLOW_EXISTING:
/* existing flow: we already have the lock; just tx */
rc = __i2c_transfer(midev->adapter, &msg, 1);
+
+ /* on tx errors, the flow can no longer be considered valid */
+ if (rc)
+ mctp_i2c_invalidate_tx_flow(midev, skb);
+
break;
case MCTP_I2C_TX_FLOW_INVALID:
@@ -1042,8 +1083,8 @@ static struct notifier_block mctp_i2c_notifier = {
};
static const struct i2c_device_id mctp_i2c_id[] = {
- { "mctp-i2c-interface", 0 },
- {},
+ { "mctp-i2c-interface" },
+ {}
};
MODULE_DEVICE_TABLE(i2c, mctp_i2c_id);
diff --git a/drivers/net/mdio/mdio-mscc-miim.c b/drivers/net/mdio/mdio-mscc-miim.c
index c29377c85307..62c47e0dd142 100644
--- a/drivers/net/mdio/mdio-mscc-miim.c
+++ b/drivers/net/mdio/mdio-mscc-miim.c
@@ -19,6 +19,7 @@
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/regmap.h>
+#include <linux/reset.h>
#define MSCC_MIIM_REG_STATUS 0x0
#define MSCC_MIIM_STATUS_STAT_PENDING BIT(2)
@@ -271,10 +272,17 @@ static int mscc_miim_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
struct regmap *mii_regmap, *phy_regmap;
struct device *dev = &pdev->dev;
+ struct reset_control *reset;
struct mscc_miim_dev *miim;
struct mii_bus *bus;
int ret;
+ reset = devm_reset_control_get_optional_shared(dev, "switch");
+ if (IS_ERR(reset))
+ return dev_err_probe(dev, PTR_ERR(reset), "Failed to get reset\n");
+
+ reset_control_reset(reset);
+
mii_regmap = ocelot_regmap_from_resource(pdev, 0,
&mscc_miim_regmap_config);
if (IS_ERR(mii_regmap))
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index d7070dd4fe73..9c09293b5258 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -344,7 +344,7 @@ static ssize_t enabled_store(struct config_item *item,
goto out_unlock;
err = -EINVAL;
- if ((bool)enabled == nt->enabled) {
+ if (enabled == nt->enabled) {
pr_info("network logging has already %s\n",
nt->enabled ? "started" : "stopped");
goto out_unlock;
@@ -369,6 +369,7 @@ static ssize_t enabled_store(struct config_item *item,
if (err)
goto out_unlock;
+ nt->enabled = true;
pr_info("network logging started\n");
} else { /* false */
/* We need to disable the netconsole before cleaning it up
@@ -381,8 +382,6 @@ static ssize_t enabled_store(struct config_item *item,
netpoll_cleanup(&nt->np);
}
- nt->enabled = enabled;
-
mutex_unlock(&dynamic_netconsole_mutex);
return strnlen(buf, count);
out_unlock:
@@ -974,6 +973,7 @@ restart:
/* rtnl_lock already held
* we might sleep in __netpoll_cleanup()
*/
+ nt->enabled = false;
spin_unlock_irqrestore(&target_list_lock, flags);
__netpoll_cleanup(&nt->np);
@@ -981,7 +981,6 @@ restart:
spin_lock_irqsave(&target_list_lock, flags);
netdev_put(nt->np.dev, &nt->np.dev_tracker);
nt->np.dev = NULL;
- nt->enabled = false;
stopped = true;
netconsole_target_put(nt);
goto restart;
@@ -1262,6 +1261,8 @@ static int __init init_netconsole(void)
while ((target_config = strsep(&input, ";"))) {
nt = alloc_param_target(target_config, count);
if (IS_ERR(nt)) {
+ if (IS_ENABLED(CONFIG_NETCONSOLE_DYNAMIC))
+ continue;
err = PTR_ERR(nt);
goto fail;
}
diff --git a/drivers/net/netdevsim/ethtool.c b/drivers/net/netdevsim/ethtool.c
index 3f9c9327f149..1436905bc106 100644
--- a/drivers/net/netdevsim/ethtool.c
+++ b/drivers/net/netdevsim/ethtool.c
@@ -148,7 +148,7 @@ nsim_get_fec_stats(struct net_device *dev, struct ethtool_fec_stats *fec_stats)
}
static int nsim_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct netdevsim *ns = netdev_priv(dev);
diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c
index c22897bf5509..017a6102be0a 100644
--- a/drivers/net/netdevsim/netdev.c
+++ b/drivers/net/netdevsim/netdev.c
@@ -324,7 +324,8 @@ static int nsim_get_iflink(const struct net_device *dev)
rcu_read_lock();
peer = rcu_dereference(nsim->peer);
- iflink = peer ? READ_ONCE(peer->netdev->ifindex) : 0;
+ iflink = peer ? READ_ONCE(peer->netdev->ifindex) :
+ READ_ONCE(dev->ifindex);
rcu_read_unlock();
return iflink;
diff --git a/drivers/net/netkit.c b/drivers/net/netkit.c
index a4d2e76a8d58..16789cd446e9 100644
--- a/drivers/net/netkit.c
+++ b/drivers/net/netkit.c
@@ -55,6 +55,7 @@ static void netkit_prep_forward(struct sk_buff *skb, bool xnet)
skb_scrub_packet(skb, xnet);
skb->priority = 0;
nf_skip_egress(skb, true);
+ skb_reset_mac_header(skb);
}
static struct netkit *netkit_priv(const struct net_device *dev)
@@ -78,6 +79,7 @@ static netdev_tx_t netkit_xmit(struct sk_buff *skb, struct net_device *dev)
skb_orphan_frags(skb, GFP_ATOMIC)))
goto drop;
netkit_prep_forward(skb, !net_eq(dev_net(dev), dev_net(peer)));
+ eth_skb_pkt_type(skb, peer);
skb->dev = peer;
entry = rcu_dereference(nk->active);
if (entry)
@@ -85,7 +87,7 @@ static netdev_tx_t netkit_xmit(struct sk_buff *skb, struct net_device *dev)
switch (ret) {
case NETKIT_NEXT:
case NETKIT_PASS:
- skb->protocol = eth_type_trans(skb, skb->dev);
+ eth_skb_pull_mac(skb);
skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
if (likely(__netif_rx(skb) == NET_RX_SUCCESS)) {
dev_sw_netstats_tx_add(dev, 1, len);
@@ -155,6 +157,16 @@ static void netkit_set_multicast(struct net_device *dev)
/* Nothing to do, we receive whatever gets pushed to us! */
}
+static int netkit_set_macaddr(struct net_device *dev, void *sa)
+{
+ struct netkit *nk = netkit_priv(dev);
+
+ if (nk->mode != NETKIT_L2)
+ return -EOPNOTSUPP;
+
+ return eth_mac_addr(dev, sa);
+}
+
static void netkit_set_headroom(struct net_device *dev, int headroom)
{
struct netkit *nk = netkit_priv(dev), *nk2;
@@ -198,6 +210,7 @@ static const struct net_device_ops netkit_netdev_ops = {
.ndo_start_xmit = netkit_xmit,
.ndo_set_rx_mode = netkit_set_multicast,
.ndo_set_rx_headroom = netkit_set_headroom,
+ .ndo_set_mac_address = netkit_set_macaddr,
.ndo_get_iflink = netkit_get_iflink,
.ndo_get_peer_dev = netkit_peer_dev,
.ndo_get_stats64 = netkit_get_stats,
@@ -300,9 +313,11 @@ static int netkit_validate(struct nlattr *tb[], struct nlattr *data[],
if (!attr)
return 0;
- NL_SET_ERR_MSG_ATTR(extack, attr,
- "Setting Ethernet address is not supported");
- return -EOPNOTSUPP;
+ if (nla_len(attr) != ETH_ALEN)
+ return -EINVAL;
+ if (!is_valid_ether_addr(nla_data(attr)))
+ return -EADDRNOTAVAIL;
+ return 0;
}
static struct rtnl_link_ops netkit_link_ops;
@@ -365,6 +380,9 @@ static int netkit_new_link(struct net *src_net, struct net_device *dev,
strscpy(ifname, "nk%d", IFNAMSIZ);
ifname_assign_type = NET_NAME_ENUM;
}
+ if (mode != NETKIT_L2 &&
+ (tb[IFLA_ADDRESS] || tbp[IFLA_ADDRESS]))
+ return -EOPNOTSUPP;
net = rtnl_link_get_net(src_net, tbp);
if (IS_ERR(net))
@@ -379,7 +397,7 @@ static int netkit_new_link(struct net *src_net, struct net_device *dev,
netif_inherit_tso_max(peer, dev);
- if (mode == NETKIT_L2)
+ if (mode == NETKIT_L2 && !(ifmp && tbp[IFLA_ADDRESS]))
eth_hw_addr_random(peer);
if (ifmp && dev->ifindex)
peer->ifindex = ifmp->ifi_index;
@@ -402,7 +420,7 @@ static int netkit_new_link(struct net *src_net, struct net_device *dev,
if (err < 0)
goto err_configure_peer;
- if (mode == NETKIT_L2)
+ if (mode == NETKIT_L2 && !tb[IFLA_ADDRESS])
eth_hw_addr_random(dev);
if (tb[IFLA_IFNAME])
nla_strscpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ);
diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c
index ffe7f463e16e..ef6df0e37bea 100644
--- a/drivers/net/ntb_netdev.c
+++ b/drivers/net/ntb_netdev.c
@@ -119,7 +119,7 @@ static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data,
skb->protocol = eth_type_trans(skb, ndev);
skb->ip_summed = CHECKSUM_NONE;
- if (__netif_rx(skb) == NET_RX_DROP) {
+ if (netif_rx(skb) == NET_RX_DROP) {
ndev->stats.rx_errors++;
ndev->stats.rx_dropped++;
} else {
diff --git a/drivers/net/pcs/Kconfig b/drivers/net/pcs/Kconfig
index 87cf308fc6d8..f6aa437473de 100644
--- a/drivers/net/pcs/Kconfig
+++ b/drivers/net/pcs/Kconfig
@@ -6,11 +6,11 @@
menu "PCS device drivers"
config PCS_XPCS
- tristate
+ tristate "Synopsys DesignWare Ethernet XPCS"
select PHYLINK
help
- This module provides helper functions for Synopsys DesignWare XPCS
- controllers.
+ This module provides a driver and helper functions for Synopsys
+ DesignWare XPCS controllers.
config PCS_LYNX
tristate
diff --git a/drivers/net/pcs/Makefile b/drivers/net/pcs/Makefile
index fb1694192ae6..4f7920618b90 100644
--- a/drivers/net/pcs/Makefile
+++ b/drivers/net/pcs/Makefile
@@ -1,7 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
# Makefile for Linux PCS drivers
-pcs_xpcs-$(CONFIG_PCS_XPCS) := pcs-xpcs.o pcs-xpcs-nxp.o pcs-xpcs-wx.o
+pcs_xpcs-$(CONFIG_PCS_XPCS) := pcs-xpcs.o pcs-xpcs-plat.o \
+ pcs-xpcs-nxp.o pcs-xpcs-wx.o
obj-$(CONFIG_PCS_XPCS) += pcs_xpcs.o
obj-$(CONFIG_PCS_LYNX) += pcs-lynx.o
diff --git a/drivers/net/pcs/pcs-xpcs-plat.c b/drivers/net/pcs/pcs-xpcs-plat.c
new file mode 100644
index 000000000000..629315f1e57c
--- /dev/null
+++ b/drivers/net/pcs/pcs-xpcs-plat.c
@@ -0,0 +1,460 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Synopsys DesignWare XPCS platform device driver
+ *
+ * Copyright (C) 2024 Serge Semin
+ */
+
+#include <linux/atomic.h>
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/mdio.h>
+#include <linux/module.h>
+#include <linux/pcs/pcs-xpcs.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/property.h>
+#include <linux/sizes.h>
+
+#include "pcs-xpcs.h"
+
+/* Page select register for the indirect MMIO CSRs access */
+#define DW_VR_CSR_VIEWPORT 0xff
+
+struct dw_xpcs_plat {
+ struct platform_device *pdev;
+ struct mii_bus *bus;
+ bool reg_indir;
+ int reg_width;
+ void __iomem *reg_base;
+ struct clk *cclk;
+};
+
+static ptrdiff_t xpcs_mmio_addr_format(int dev, int reg)
+{
+ return FIELD_PREP(0x1f0000, dev) | FIELD_PREP(0xffff, reg);
+}
+
+static u16 xpcs_mmio_addr_page(ptrdiff_t csr)
+{
+ return FIELD_GET(0x1fff00, csr);
+}
+
+static ptrdiff_t xpcs_mmio_addr_offset(ptrdiff_t csr)
+{
+ return FIELD_GET(0xff, csr);
+}
+
+static int xpcs_mmio_read_reg_indirect(struct dw_xpcs_plat *pxpcs,
+ int dev, int reg)
+{
+ ptrdiff_t csr, ofs;
+ u16 page;
+ int ret;
+
+ csr = xpcs_mmio_addr_format(dev, reg);
+ page = xpcs_mmio_addr_page(csr);
+ ofs = xpcs_mmio_addr_offset(csr);
+
+ ret = pm_runtime_resume_and_get(&pxpcs->pdev->dev);
+ if (ret)
+ return ret;
+
+ switch (pxpcs->reg_width) {
+ case 4:
+ writel(page, pxpcs->reg_base + (DW_VR_CSR_VIEWPORT << 2));
+ ret = readl(pxpcs->reg_base + (ofs << 2));
+ break;
+ default:
+ writew(page, pxpcs->reg_base + (DW_VR_CSR_VIEWPORT << 1));
+ ret = readw(pxpcs->reg_base + (ofs << 1));
+ break;
+ }
+
+ pm_runtime_put(&pxpcs->pdev->dev);
+
+ return ret;
+}
+
+static int xpcs_mmio_write_reg_indirect(struct dw_xpcs_plat *pxpcs,
+ int dev, int reg, u16 val)
+{
+ ptrdiff_t csr, ofs;
+ u16 page;
+ int ret;
+
+ csr = xpcs_mmio_addr_format(dev, reg);
+ page = xpcs_mmio_addr_page(csr);
+ ofs = xpcs_mmio_addr_offset(csr);
+
+ ret = pm_runtime_resume_and_get(&pxpcs->pdev->dev);
+ if (ret)
+ return ret;
+
+ switch (pxpcs->reg_width) {
+ case 4:
+ writel(page, pxpcs->reg_base + (DW_VR_CSR_VIEWPORT << 2));
+ writel(val, pxpcs->reg_base + (ofs << 2));
+ break;
+ default:
+ writew(page, pxpcs->reg_base + (DW_VR_CSR_VIEWPORT << 1));
+ writew(val, pxpcs->reg_base + (ofs << 1));
+ break;
+ }
+
+ pm_runtime_put(&pxpcs->pdev->dev);
+
+ return 0;
+}
+
+static int xpcs_mmio_read_reg_direct(struct dw_xpcs_plat *pxpcs,
+ int dev, int reg)
+{
+ ptrdiff_t csr;
+ int ret;
+
+ csr = xpcs_mmio_addr_format(dev, reg);
+
+ ret = pm_runtime_resume_and_get(&pxpcs->pdev->dev);
+ if (ret)
+ return ret;
+
+ switch (pxpcs->reg_width) {
+ case 4:
+ ret = readl(pxpcs->reg_base + (csr << 2));
+ break;
+ default:
+ ret = readw(pxpcs->reg_base + (csr << 1));
+ break;
+ }
+
+ pm_runtime_put(&pxpcs->pdev->dev);
+
+ return ret;
+}
+
+static int xpcs_mmio_write_reg_direct(struct dw_xpcs_plat *pxpcs,
+ int dev, int reg, u16 val)
+{
+ ptrdiff_t csr;
+ int ret;
+
+ csr = xpcs_mmio_addr_format(dev, reg);
+
+ ret = pm_runtime_resume_and_get(&pxpcs->pdev->dev);
+ if (ret)
+ return ret;
+
+ switch (pxpcs->reg_width) {
+ case 4:
+ writel(val, pxpcs->reg_base + (csr << 2));
+ break;
+ default:
+ writew(val, pxpcs->reg_base + (csr << 1));
+ break;
+ }
+
+ pm_runtime_put(&pxpcs->pdev->dev);
+
+ return 0;
+}
+
+static int xpcs_mmio_read_c22(struct mii_bus *bus, int addr, int reg)
+{
+ struct dw_xpcs_plat *pxpcs = bus->priv;
+
+ if (addr != 0)
+ return -ENODEV;
+
+ if (pxpcs->reg_indir)
+ return xpcs_mmio_read_reg_indirect(pxpcs, MDIO_MMD_VEND2, reg);
+ else
+ return xpcs_mmio_read_reg_direct(pxpcs, MDIO_MMD_VEND2, reg);
+}
+
+static int xpcs_mmio_write_c22(struct mii_bus *bus, int addr, int reg, u16 val)
+{
+ struct dw_xpcs_plat *pxpcs = bus->priv;
+
+ if (addr != 0)
+ return -ENODEV;
+
+ if (pxpcs->reg_indir)
+ return xpcs_mmio_write_reg_indirect(pxpcs, MDIO_MMD_VEND2, reg, val);
+ else
+ return xpcs_mmio_write_reg_direct(pxpcs, MDIO_MMD_VEND2, reg, val);
+}
+
+static int xpcs_mmio_read_c45(struct mii_bus *bus, int addr, int dev, int reg)
+{
+ struct dw_xpcs_plat *pxpcs = bus->priv;
+
+ if (addr != 0)
+ return -ENODEV;
+
+ if (pxpcs->reg_indir)
+ return xpcs_mmio_read_reg_indirect(pxpcs, dev, reg);
+ else
+ return xpcs_mmio_read_reg_direct(pxpcs, dev, reg);
+}
+
+static int xpcs_mmio_write_c45(struct mii_bus *bus, int addr, int dev,
+ int reg, u16 val)
+{
+ struct dw_xpcs_plat *pxpcs = bus->priv;
+
+ if (addr != 0)
+ return -ENODEV;
+
+ if (pxpcs->reg_indir)
+ return xpcs_mmio_write_reg_indirect(pxpcs, dev, reg, val);
+ else
+ return xpcs_mmio_write_reg_direct(pxpcs, dev, reg, val);
+}
+
+static struct dw_xpcs_plat *xpcs_plat_create_data(struct platform_device *pdev)
+{
+ struct dw_xpcs_plat *pxpcs;
+
+ pxpcs = devm_kzalloc(&pdev->dev, sizeof(*pxpcs), GFP_KERNEL);
+ if (!pxpcs)
+ return ERR_PTR(-ENOMEM);
+
+ pxpcs->pdev = pdev;
+
+ dev_set_drvdata(&pdev->dev, pxpcs);
+
+ return pxpcs;
+}
+
+static int xpcs_plat_init_res(struct dw_xpcs_plat *pxpcs)
+{
+ struct platform_device *pdev = pxpcs->pdev;
+ struct device *dev = &pdev->dev;
+ resource_size_t spc_size;
+ struct resource *res;
+
+ if (!device_property_read_u32(dev, "reg-io-width", &pxpcs->reg_width)) {
+ if (pxpcs->reg_width != 2 && pxpcs->reg_width != 4) {
+ dev_err(dev, "Invalid reg-space data width\n");
+ return -EINVAL;
+ }
+ } else {
+ pxpcs->reg_width = 2;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "direct") ?:
+ platform_get_resource_byname(pdev, IORESOURCE_MEM, "indirect");
+ if (!res) {
+ dev_err(dev, "No reg-space found\n");
+ return -EINVAL;
+ }
+
+ if (!strcmp(res->name, "indirect"))
+ pxpcs->reg_indir = true;
+
+ if (pxpcs->reg_indir)
+ spc_size = pxpcs->reg_width * SZ_256;
+ else
+ spc_size = pxpcs->reg_width * SZ_2M;
+
+ if (resource_size(res) < spc_size) {
+ dev_err(dev, "Invalid reg-space size\n");
+ return -EINVAL;
+ }
+
+ pxpcs->reg_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pxpcs->reg_base)) {
+ dev_err(dev, "Failed to map reg-space\n");
+ return PTR_ERR(pxpcs->reg_base);
+ }
+
+ return 0;
+}
+
+static int xpcs_plat_init_clk(struct dw_xpcs_plat *pxpcs)
+{
+ struct device *dev = &pxpcs->pdev->dev;
+ int ret;
+
+ pxpcs->cclk = devm_clk_get(dev, "csr");
+ if (IS_ERR(pxpcs->cclk))
+ return dev_err_probe(dev, PTR_ERR(pxpcs->cclk),
+ "Failed to get CSR clock\n");
+
+ pm_runtime_set_active(dev);
+ ret = devm_pm_runtime_enable(dev);
+ if (ret) {
+ dev_err(dev, "Failed to enable runtime-PM\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int xpcs_plat_init_bus(struct dw_xpcs_plat *pxpcs)
+{
+ struct device *dev = &pxpcs->pdev->dev;
+ static atomic_t id = ATOMIC_INIT(-1);
+ int ret;
+
+ pxpcs->bus = devm_mdiobus_alloc_size(dev, 0);
+ if (!pxpcs->bus)
+ return -ENOMEM;
+
+ pxpcs->bus->name = "DW XPCS MCI/APB3";
+ pxpcs->bus->read = xpcs_mmio_read_c22;
+ pxpcs->bus->write = xpcs_mmio_write_c22;
+ pxpcs->bus->read_c45 = xpcs_mmio_read_c45;
+ pxpcs->bus->write_c45 = xpcs_mmio_write_c45;
+ pxpcs->bus->phy_mask = ~0;
+ pxpcs->bus->parent = dev;
+ pxpcs->bus->priv = pxpcs;
+
+ snprintf(pxpcs->bus->id, MII_BUS_ID_SIZE,
+ "dwxpcs-%x", atomic_inc_return(&id));
+
+ /* MDIO-bus here serves as just a back-end engine abstracting out
+ * the MDIO and MCI/APB3 IO interfaces utilized for the DW XPCS CSRs
+ * access.
+ */
+ ret = devm_mdiobus_register(dev, pxpcs->bus);
+ if (ret) {
+ dev_err(dev, "Failed to create MDIO bus\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+/* Note there is no need in the next function antagonist because the MDIO-bus
+ * de-registration will effectively remove and destroy all the MDIO-devices
+ * registered on the bus.
+ */
+static int xpcs_plat_init_dev(struct dw_xpcs_plat *pxpcs)
+{
+ struct device *dev = &pxpcs->pdev->dev;
+ struct mdio_device *mdiodev;
+ int ret;
+
+ /* There is a single memory-mapped DW XPCS device */
+ mdiodev = mdio_device_create(pxpcs->bus, 0);
+ if (IS_ERR(mdiodev))
+ return PTR_ERR(mdiodev);
+
+ /* Associate the FW-node with the device structure so it can be looked
+ * up later. Make sure DD-core is aware of the OF-node being re-used.
+ */
+ device_set_node(&mdiodev->dev, fwnode_handle_get(dev_fwnode(dev)));
+ mdiodev->dev.of_node_reused = true;
+
+ /* Pass the data further so the DW XPCS driver core could use it */
+ mdiodev->dev.platform_data = (void *)device_get_match_data(dev);
+
+ ret = mdio_device_register(mdiodev);
+ if (ret) {
+ dev_err(dev, "Failed to register MDIO device\n");
+ goto err_clean_data;
+ }
+
+ return 0;
+
+err_clean_data:
+ mdiodev->dev.platform_data = NULL;
+
+ fwnode_handle_put(dev_fwnode(&mdiodev->dev));
+ device_set_node(&mdiodev->dev, NULL);
+
+ mdio_device_free(mdiodev);
+
+ return ret;
+}
+
+static int xpcs_plat_probe(struct platform_device *pdev)
+{
+ struct dw_xpcs_plat *pxpcs;
+ int ret;
+
+ pxpcs = xpcs_plat_create_data(pdev);
+ if (IS_ERR(pxpcs))
+ return PTR_ERR(pxpcs);
+
+ ret = xpcs_plat_init_res(pxpcs);
+ if (ret)
+ return ret;
+
+ ret = xpcs_plat_init_clk(pxpcs);
+ if (ret)
+ return ret;
+
+ ret = xpcs_plat_init_bus(pxpcs);
+ if (ret)
+ return ret;
+
+ ret = xpcs_plat_init_dev(pxpcs);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int __maybe_unused xpcs_plat_pm_runtime_suspend(struct device *dev)
+{
+ struct dw_xpcs_plat *pxpcs = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(pxpcs->cclk);
+
+ return 0;
+}
+
+static int __maybe_unused xpcs_plat_pm_runtime_resume(struct device *dev)
+{
+ struct dw_xpcs_plat *pxpcs = dev_get_drvdata(dev);
+
+ return clk_prepare_enable(pxpcs->cclk);
+}
+
+static const struct dev_pm_ops xpcs_plat_pm_ops = {
+ SET_RUNTIME_PM_OPS(xpcs_plat_pm_runtime_suspend,
+ xpcs_plat_pm_runtime_resume,
+ NULL)
+};
+
+DW_XPCS_INFO_DECLARE(xpcs_generic, DW_XPCS_ID_NATIVE, DW_XPCS_PMA_ID_NATIVE);
+DW_XPCS_INFO_DECLARE(xpcs_pma_gen1_3g, DW_XPCS_ID_NATIVE, DW_XPCS_PMA_GEN1_3G_ID);
+DW_XPCS_INFO_DECLARE(xpcs_pma_gen2_3g, DW_XPCS_ID_NATIVE, DW_XPCS_PMA_GEN2_3G_ID);
+DW_XPCS_INFO_DECLARE(xpcs_pma_gen2_6g, DW_XPCS_ID_NATIVE, DW_XPCS_PMA_GEN2_6G_ID);
+DW_XPCS_INFO_DECLARE(xpcs_pma_gen4_3g, DW_XPCS_ID_NATIVE, DW_XPCS_PMA_GEN4_3G_ID);
+DW_XPCS_INFO_DECLARE(xpcs_pma_gen4_6g, DW_XPCS_ID_NATIVE, DW_XPCS_PMA_GEN4_6G_ID);
+DW_XPCS_INFO_DECLARE(xpcs_pma_gen5_10g, DW_XPCS_ID_NATIVE, DW_XPCS_PMA_GEN5_10G_ID);
+DW_XPCS_INFO_DECLARE(xpcs_pma_gen5_12g, DW_XPCS_ID_NATIVE, DW_XPCS_PMA_GEN5_12G_ID);
+
+static const struct of_device_id xpcs_of_ids[] = {
+ { .compatible = "snps,dw-xpcs", .data = &xpcs_generic },
+ { .compatible = "snps,dw-xpcs-gen1-3g", .data = &xpcs_pma_gen1_3g },
+ { .compatible = "snps,dw-xpcs-gen2-3g", .data = &xpcs_pma_gen2_3g },
+ { .compatible = "snps,dw-xpcs-gen2-6g", .data = &xpcs_pma_gen2_6g },
+ { .compatible = "snps,dw-xpcs-gen4-3g", .data = &xpcs_pma_gen4_3g },
+ { .compatible = "snps,dw-xpcs-gen4-6g", .data = &xpcs_pma_gen4_6g },
+ { .compatible = "snps,dw-xpcs-gen5-10g", .data = &xpcs_pma_gen5_10g },
+ { .compatible = "snps,dw-xpcs-gen5-12g", .data = &xpcs_pma_gen5_12g },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, xpcs_of_ids);
+
+static struct platform_driver xpcs_plat_driver = {
+ .probe = xpcs_plat_probe,
+ .driver = {
+ .name = "dwxpcs",
+ .pm = &xpcs_plat_pm_ops,
+ .of_match_table = xpcs_of_ids,
+ },
+};
+module_platform_driver(xpcs_plat_driver);
+
+MODULE_DESCRIPTION("Synopsys DesignWare XPCS platform device driver");
+MODULE_AUTHOR("Signed-off-by: Serge Semin <fancer.lancer@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/pcs/pcs-xpcs.c b/drivers/net/pcs/pcs-xpcs.c
index 31525fe9c32e..82463f9d50c8 100644
--- a/drivers/net/pcs/pcs-xpcs.c
+++ b/drivers/net/pcs/pcs-xpcs.c
@@ -6,10 +6,13 @@
* Author: Jose Abreu <Jose.Abreu@synopsys.com>
*/
+#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/pcs/pcs-xpcs.h>
#include <linux/mdio.h>
+#include <linux/phy.h>
#include <linux/phylink.h>
+#include <linux/property.h>
#include "pcs-xpcs.h"
@@ -143,7 +146,7 @@ enum {
DW_XPCS_INTERFACE_MAX,
};
-struct xpcs_compat {
+struct dw_xpcs_compat {
const int *supported;
const phy_interface_t *interface;
int num_interfaces;
@@ -151,19 +154,19 @@ struct xpcs_compat {
int (*pma_config)(struct dw_xpcs *xpcs);
};
-struct xpcs_id {
+struct dw_xpcs_desc {
u32 id;
u32 mask;
- const struct xpcs_compat *compat;
+ const struct dw_xpcs_compat *compat;
};
-static const struct xpcs_compat *xpcs_find_compat(const struct xpcs_id *id,
- phy_interface_t interface)
+static const struct dw_xpcs_compat *
+xpcs_find_compat(const struct dw_xpcs_desc *desc, phy_interface_t interface)
{
int i, j;
for (i = 0; i < DW_XPCS_INTERFACE_MAX; i++) {
- const struct xpcs_compat *compat = &id->compat[i];
+ const struct dw_xpcs_compat *compat = &desc->compat[i];
for (j = 0; j < compat->num_interfaces; j++)
if (compat->interface[j] == interface)
@@ -175,9 +178,9 @@ static const struct xpcs_compat *xpcs_find_compat(const struct xpcs_id *id,
int xpcs_get_an_mode(struct dw_xpcs *xpcs, phy_interface_t interface)
{
- const struct xpcs_compat *compat;
+ const struct dw_xpcs_compat *compat;
- compat = xpcs_find_compat(xpcs->id, interface);
+ compat = xpcs_find_compat(xpcs->desc, interface);
if (!compat)
return -ENODEV;
@@ -185,7 +188,7 @@ int xpcs_get_an_mode(struct dw_xpcs *xpcs, phy_interface_t interface)
}
EXPORT_SYMBOL_GPL(xpcs_get_an_mode);
-static bool __xpcs_linkmode_supported(const struct xpcs_compat *compat,
+static bool __xpcs_linkmode_supported(const struct dw_xpcs_compat *compat,
enum ethtool_link_mode_bit_indices linkmode)
{
int i;
@@ -237,29 +240,6 @@ int xpcs_write_vpcs(struct dw_xpcs *xpcs, int reg, u16 val)
return xpcs_write_vendor(xpcs, MDIO_MMD_PCS, reg, val);
}
-static int xpcs_dev_flag(struct dw_xpcs *xpcs)
-{
- int ret, oui;
-
- ret = xpcs_read(xpcs, MDIO_MMD_PMAPMD, MDIO_DEVID1);
- if (ret < 0)
- return ret;
-
- oui = ret;
-
- ret = xpcs_read(xpcs, MDIO_MMD_PMAPMD, MDIO_DEVID2);
- if (ret < 0)
- return ret;
-
- ret = (ret >> 10) & 0x3F;
- oui |= ret << 16;
-
- if (oui == DW_OUI_WX)
- xpcs->dev_flag = DW_DEV_TXGBE;
-
- return 0;
-}
-
static int xpcs_poll_reset(struct dw_xpcs *xpcs, int dev)
{
/* Poll until the reset bit clears (50ms per retry == 0.6 sec) */
@@ -277,7 +257,7 @@ static int xpcs_poll_reset(struct dw_xpcs *xpcs, int dev)
}
static int xpcs_soft_reset(struct dw_xpcs *xpcs,
- const struct xpcs_compat *compat)
+ const struct dw_xpcs_compat *compat)
{
int ret, dev;
@@ -418,7 +398,7 @@ out:
}
static int _xpcs_config_aneg_c73(struct dw_xpcs *xpcs,
- const struct xpcs_compat *compat)
+ const struct dw_xpcs_compat *compat)
{
int ret, adv;
@@ -463,7 +443,7 @@ static int _xpcs_config_aneg_c73(struct dw_xpcs *xpcs,
}
static int xpcs_config_aneg_c73(struct dw_xpcs *xpcs,
- const struct xpcs_compat *compat)
+ const struct dw_xpcs_compat *compat)
{
int ret;
@@ -482,7 +462,7 @@ static int xpcs_config_aneg_c73(struct dw_xpcs *xpcs,
static int xpcs_aneg_done_c73(struct dw_xpcs *xpcs,
struct phylink_link_state *state,
- const struct xpcs_compat *compat, u16 an_stat1)
+ const struct dw_xpcs_compat *compat, u16 an_stat1)
{
int ret;
@@ -607,12 +587,12 @@ static int xpcs_validate(struct phylink_pcs *pcs, unsigned long *supported,
const struct phylink_link_state *state)
{
__ETHTOOL_DECLARE_LINK_MODE_MASK(xpcs_supported) = { 0, };
- const struct xpcs_compat *compat;
+ const struct dw_xpcs_compat *compat;
struct dw_xpcs *xpcs;
int i;
xpcs = phylink_pcs_to_xpcs(pcs);
- compat = xpcs_find_compat(xpcs->id, state->interface);
+ compat = xpcs_find_compat(xpcs->desc, state->interface);
if (!compat)
return -EINVAL;
@@ -633,7 +613,7 @@ void xpcs_get_interfaces(struct dw_xpcs *xpcs, unsigned long *interfaces)
int i, j;
for (i = 0; i < DW_XPCS_INTERFACE_MAX; i++) {
- const struct xpcs_compat *compat = &xpcs->id->compat[i];
+ const struct dw_xpcs_compat *compat = &xpcs->desc->compat[i];
for (j = 0; j < compat->num_interfaces; j++)
__set_bit(compat->interface[j], interfaces);
@@ -684,7 +664,7 @@ static int xpcs_config_aneg_c37_sgmii(struct dw_xpcs *xpcs,
{
int ret, mdio_ctrl, tx_conf;
- if (xpcs->dev_flag == DW_DEV_TXGBE)
+ if (xpcs->info.pma == WX_TXGBE_XPCS_PMA_10G_ID)
xpcs_write_vpcs(xpcs, DW_VR_XS_PCS_DIG_CTRL1, DW_CL37_BP | DW_EN_VSMMD1);
/* For AN for C37 SGMII mode, the settings are :-
@@ -722,7 +702,7 @@ static int xpcs_config_aneg_c37_sgmii(struct dw_xpcs *xpcs,
ret |= (DW_VR_MII_PCS_MODE_C37_SGMII <<
DW_VR_MII_AN_CTRL_PCS_MODE_SHIFT &
DW_VR_MII_PCS_MODE_MASK);
- if (xpcs->dev_flag == DW_DEV_TXGBE) {
+ if (xpcs->info.pma == WX_TXGBE_XPCS_PMA_10G_ID) {
ret |= DW_VR_MII_AN_CTRL_8BIT;
/* Hardware requires it to be PHY side SGMII */
tx_conf = DW_VR_MII_TX_CONFIG_PHY_SIDE_SGMII;
@@ -744,7 +724,7 @@ static int xpcs_config_aneg_c37_sgmii(struct dw_xpcs *xpcs,
else
ret &= ~DW_VR_MII_DIG_CTRL1_MAC_AUTO_SW;
- if (xpcs->dev_flag == DW_DEV_TXGBE)
+ if (xpcs->info.pma == WX_TXGBE_XPCS_PMA_10G_ID)
ret |= DW_VR_MII_DIG_CTRL1_PHY_MODE_CTRL;
ret = xpcs_write(xpcs, MDIO_MMD_VEND2, DW_VR_MII_DIG_CTRL1, ret);
@@ -766,7 +746,7 @@ static int xpcs_config_aneg_c37_1000basex(struct dw_xpcs *xpcs,
int ret, mdio_ctrl, adv;
bool changed = 0;
- if (xpcs->dev_flag == DW_DEV_TXGBE)
+ if (xpcs->info.pma == WX_TXGBE_XPCS_PMA_10G_ID)
xpcs_write_vpcs(xpcs, DW_VR_XS_PCS_DIG_CTRL1, DW_CL37_BP | DW_EN_VSMMD1);
/* According to Chap 7.12, to set 1000BASE-X C37 AN, AN must
@@ -850,14 +830,14 @@ static int xpcs_config_2500basex(struct dw_xpcs *xpcs)
int xpcs_do_config(struct dw_xpcs *xpcs, phy_interface_t interface,
const unsigned long *advertising, unsigned int neg_mode)
{
- const struct xpcs_compat *compat;
+ const struct dw_xpcs_compat *compat;
int ret;
- compat = xpcs_find_compat(xpcs->id, interface);
+ compat = xpcs_find_compat(xpcs->desc, interface);
if (!compat)
return -ENODEV;
- if (xpcs->dev_flag == DW_DEV_TXGBE) {
+ if (xpcs->info.pma == WX_TXGBE_XPCS_PMA_10G_ID) {
ret = txgbe_xpcs_switch_mode(xpcs, interface);
if (ret)
return ret;
@@ -915,7 +895,7 @@ static int xpcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
static int xpcs_get_state_c73(struct dw_xpcs *xpcs,
struct phylink_link_state *state,
- const struct xpcs_compat *compat)
+ const struct dw_xpcs_compat *compat)
{
bool an_enabled;
int pcs_stat1;
@@ -1115,10 +1095,10 @@ static void xpcs_get_state(struct phylink_pcs *pcs,
struct phylink_link_state *state)
{
struct dw_xpcs *xpcs = phylink_pcs_to_xpcs(pcs);
- const struct xpcs_compat *compat;
+ const struct dw_xpcs_compat *compat;
int ret;
- compat = xpcs_find_compat(xpcs->id, state->interface);
+ compat = xpcs_find_compat(xpcs->desc, state->interface);
if (!compat)
return;
@@ -1229,47 +1209,73 @@ static void xpcs_an_restart(struct phylink_pcs *pcs)
}
}
-static u32 xpcs_get_id(struct dw_xpcs *xpcs)
+static int xpcs_get_id(struct dw_xpcs *xpcs)
{
int ret;
u32 id;
- /* First, search C73 PCS using PCS MMD */
+ /* First, search C73 PCS using PCS MMD 3. Return ENODEV if communication
+ * failed indicating that device couldn't be reached.
+ */
ret = xpcs_read(xpcs, MDIO_MMD_PCS, MII_PHYSID1);
if (ret < 0)
- return 0xffffffff;
+ return -ENODEV;
id = ret << 16;
ret = xpcs_read(xpcs, MDIO_MMD_PCS, MII_PHYSID2);
if (ret < 0)
- return 0xffffffff;
+ return ret;
+
+ id |= ret;
- /* If Device IDs are not all zeros or all ones,
- * we found C73 AN-type device
+ /* If Device IDs are not all zeros or ones, then 10GBase-X/R or C73
+ * KR/KX4 PCS found. Otherwise fallback to detecting 1000Base-X or C37
+ * PCS in MII MMD 31.
*/
- if ((id | ret) && (id | ret) != 0xffffffff)
- return id | ret;
+ if (!id || id == 0xffffffff) {
+ ret = xpcs_read(xpcs, MDIO_MMD_VEND2, MII_PHYSID1);
+ if (ret < 0)
+ return ret;
+
+ id = ret << 16;
+
+ ret = xpcs_read(xpcs, MDIO_MMD_VEND2, MII_PHYSID2);
+ if (ret < 0)
+ return ret;
- /* Next, search C37 PCS using Vendor-Specific MII MMD */
- ret = xpcs_read(xpcs, MDIO_MMD_VEND2, MII_PHYSID1);
+ id |= ret;
+ }
+
+ /* Set the PCS ID if it hasn't been pre-initialized */
+ if (xpcs->info.pcs == DW_XPCS_ID_NATIVE)
+ xpcs->info.pcs = id;
+
+ /* Find out PMA/PMD ID from MMD 1 device ID registers */
+ ret = xpcs_read(xpcs, MDIO_MMD_PMAPMD, MDIO_DEVID1);
if (ret < 0)
- return 0xffffffff;
+ return ret;
- id = ret << 16;
+ id = ret;
- ret = xpcs_read(xpcs, MDIO_MMD_VEND2, MII_PHYSID2);
+ ret = xpcs_read(xpcs, MDIO_MMD_PMAPMD, MDIO_DEVID2);
if (ret < 0)
- return 0xffffffff;
+ return ret;
+
+ /* Note the inverted dword order and masked out Model/Revision numbers
+ * with respect to what is done with the PCS ID...
+ */
+ ret = (ret >> 10) & 0x3F;
+ id |= ret << 16;
- /* If Device IDs are not all zeros, we found C37 AN-type device */
- if (id | ret)
- return id | ret;
+ /* Set the PMA ID if it hasn't been pre-initialized */
+ if (xpcs->info.pma == DW_XPCS_PMA_ID_NATIVE)
+ xpcs->info.pma = id;
- return 0xffffffff;
+ return 0;
}
-static const struct xpcs_compat synopsys_xpcs_compat[DW_XPCS_INTERFACE_MAX] = {
+static const struct dw_xpcs_compat synopsys_xpcs_compat[DW_XPCS_INTERFACE_MAX] = {
[DW_XPCS_USXGMII] = {
.supported = xpcs_usxgmii_features,
.interface = xpcs_usxgmii_interfaces,
@@ -1314,7 +1320,7 @@ static const struct xpcs_compat synopsys_xpcs_compat[DW_XPCS_INTERFACE_MAX] = {
},
};
-static const struct xpcs_compat nxp_sja1105_xpcs_compat[DW_XPCS_INTERFACE_MAX] = {
+static const struct dw_xpcs_compat nxp_sja1105_xpcs_compat[DW_XPCS_INTERFACE_MAX] = {
[DW_XPCS_SGMII] = {
.supported = xpcs_sgmii_features,
.interface = xpcs_sgmii_interfaces,
@@ -1324,7 +1330,7 @@ static const struct xpcs_compat nxp_sja1105_xpcs_compat[DW_XPCS_INTERFACE_MAX] =
},
};
-static const struct xpcs_compat nxp_sja1110_xpcs_compat[DW_XPCS_INTERFACE_MAX] = {
+static const struct dw_xpcs_compat nxp_sja1110_xpcs_compat[DW_XPCS_INTERFACE_MAX] = {
[DW_XPCS_SGMII] = {
.supported = xpcs_sgmii_features,
.interface = xpcs_sgmii_interfaces,
@@ -1341,18 +1347,18 @@ static const struct xpcs_compat nxp_sja1110_xpcs_compat[DW_XPCS_INTERFACE_MAX] =
},
};
-static const struct xpcs_id xpcs_id_list[] = {
+static const struct dw_xpcs_desc xpcs_desc_list[] = {
{
- .id = SYNOPSYS_XPCS_ID,
- .mask = SYNOPSYS_XPCS_MASK,
+ .id = DW_XPCS_ID,
+ .mask = DW_XPCS_ID_MASK,
.compat = synopsys_xpcs_compat,
}, {
.id = NXP_SJA1105_XPCS_ID,
- .mask = SYNOPSYS_XPCS_MASK,
+ .mask = DW_XPCS_ID_MASK,
.compat = nxp_sja1105_xpcs_compat,
}, {
.id = NXP_SJA1110_XPCS_ID,
- .mask = SYNOPSYS_XPCS_MASK,
+ .mask = DW_XPCS_ID_MASK,
.compat = nxp_sja1110_xpcs_compat,
},
};
@@ -1365,12 +1371,9 @@ static const struct phylink_pcs_ops xpcs_phylink_ops = {
.pcs_link_up = xpcs_link_up,
};
-static struct dw_xpcs *xpcs_create(struct mdio_device *mdiodev,
- phy_interface_t interface)
+static struct dw_xpcs *xpcs_create_data(struct mdio_device *mdiodev)
{
struct dw_xpcs *xpcs;
- u32 xpcs_id;
- int i, ret;
xpcs = kzalloc(sizeof(*xpcs), GFP_KERNEL);
if (!xpcs)
@@ -1378,59 +1381,142 @@ static struct dw_xpcs *xpcs_create(struct mdio_device *mdiodev,
mdio_device_get(mdiodev);
xpcs->mdiodev = mdiodev;
+ xpcs->pcs.ops = &xpcs_phylink_ops;
+ xpcs->pcs.neg_mode = true;
+ xpcs->pcs.poll = true;
+
+ return xpcs;
+}
- xpcs_id = xpcs_get_id(xpcs);
+static void xpcs_free_data(struct dw_xpcs *xpcs)
+{
+ mdio_device_put(xpcs->mdiodev);
+ kfree(xpcs);
+}
- for (i = 0; i < ARRAY_SIZE(xpcs_id_list); i++) {
- const struct xpcs_id *entry = &xpcs_id_list[i];
- const struct xpcs_compat *compat;
+static int xpcs_init_clks(struct dw_xpcs *xpcs)
+{
+ static const char *ids[DW_XPCS_NUM_CLKS] = {
+ [DW_XPCS_CORE_CLK] = "core",
+ [DW_XPCS_PAD_CLK] = "pad",
+ };
+ struct device *dev = &xpcs->mdiodev->dev;
+ int ret, i;
- if ((xpcs_id & entry->mask) != entry->id)
- continue;
+ for (i = 0; i < DW_XPCS_NUM_CLKS; ++i)
+ xpcs->clks[i].id = ids[i];
- xpcs->id = entry;
+ ret = clk_bulk_get_optional(dev, DW_XPCS_NUM_CLKS, xpcs->clks);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get clocks\n");
- compat = xpcs_find_compat(entry, interface);
- if (!compat) {
- ret = -ENODEV;
- goto out;
- }
+ ret = clk_bulk_prepare_enable(DW_XPCS_NUM_CLKS, xpcs->clks);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to enable clocks\n");
- ret = xpcs_dev_flag(xpcs);
- if (ret)
- goto out;
+ return 0;
+}
- xpcs->pcs.ops = &xpcs_phylink_ops;
- xpcs->pcs.neg_mode = true;
+static void xpcs_clear_clks(struct dw_xpcs *xpcs)
+{
+ clk_bulk_disable_unprepare(DW_XPCS_NUM_CLKS, xpcs->clks);
- if (xpcs->dev_flag != DW_DEV_TXGBE) {
- xpcs->pcs.poll = true;
+ clk_bulk_put(DW_XPCS_NUM_CLKS, xpcs->clks);
+}
- ret = xpcs_soft_reset(xpcs, compat);
- if (ret)
- goto out;
- }
+static int xpcs_init_id(struct dw_xpcs *xpcs)
+{
+ const struct dw_xpcs_info *info;
+ int i, ret;
- return xpcs;
+ info = dev_get_platdata(&xpcs->mdiodev->dev);
+ if (!info) {
+ xpcs->info.pcs = DW_XPCS_ID_NATIVE;
+ xpcs->info.pma = DW_XPCS_PMA_ID_NATIVE;
+ } else {
+ xpcs->info = *info;
}
- ret = -ENODEV;
+ ret = xpcs_get_id(xpcs);
+ if (ret < 0)
+ return ret;
-out:
- mdio_device_put(mdiodev);
- kfree(xpcs);
+ for (i = 0; i < ARRAY_SIZE(xpcs_desc_list); i++) {
+ const struct dw_xpcs_desc *desc = &xpcs_desc_list[i];
- return ERR_PTR(ret);
+ if ((xpcs->info.pcs & desc->mask) != desc->id)
+ continue;
+
+ xpcs->desc = desc;
+
+ break;
+ }
+
+ if (!xpcs->desc)
+ return -ENODEV;
+
+ return 0;
}
-void xpcs_destroy(struct dw_xpcs *xpcs)
+static int xpcs_init_iface(struct dw_xpcs *xpcs, phy_interface_t interface)
{
- if (xpcs)
- mdio_device_put(xpcs->mdiodev);
- kfree(xpcs);
+ const struct dw_xpcs_compat *compat;
+
+ compat = xpcs_find_compat(xpcs->desc, interface);
+ if (!compat)
+ return -EINVAL;
+
+ if (xpcs->info.pma == WX_TXGBE_XPCS_PMA_10G_ID) {
+ xpcs->pcs.poll = false;
+ return 0;
+ }
+
+ return xpcs_soft_reset(xpcs, compat);
}
-EXPORT_SYMBOL_GPL(xpcs_destroy);
+static struct dw_xpcs *xpcs_create(struct mdio_device *mdiodev,
+ phy_interface_t interface)
+{
+ struct dw_xpcs *xpcs;
+ int ret;
+
+ xpcs = xpcs_create_data(mdiodev);
+ if (IS_ERR(xpcs))
+ return xpcs;
+
+ ret = xpcs_init_clks(xpcs);
+ if (ret)
+ goto out_free_data;
+
+ ret = xpcs_init_id(xpcs);
+ if (ret)
+ goto out_clear_clks;
+
+ ret = xpcs_init_iface(xpcs, interface);
+ if (ret)
+ goto out_clear_clks;
+
+ return xpcs;
+
+out_clear_clks:
+ xpcs_clear_clks(xpcs);
+
+out_free_data:
+ xpcs_free_data(xpcs);
+
+ return ERR_PTR(ret);
+}
+
+/**
+ * xpcs_create_mdiodev() - create a DW xPCS instance with the MDIO @addr
+ * @bus: pointer to the MDIO-bus descriptor for the device to be looked at
+ * @addr: device MDIO-bus ID
+ * @interface: requested PHY interface
+ *
+ * Return: a pointer to the DW XPCS handle if successful, otherwise -ENODEV if
+ * the PCS device couldn't be found on the bus and other negative errno related
+ * to the data allocation and MDIO-bus communications.
+ */
struct dw_xpcs *xpcs_create_mdiodev(struct mii_bus *bus, int addr,
phy_interface_t interface)
{
@@ -1455,5 +1541,54 @@ struct dw_xpcs *xpcs_create_mdiodev(struct mii_bus *bus, int addr,
}
EXPORT_SYMBOL_GPL(xpcs_create_mdiodev);
+/**
+ * xpcs_create_fwnode() - Create a DW xPCS instance from @fwnode
+ * @fwnode: fwnode handle poining to the DW XPCS device
+ * @interface: requested PHY interface
+ *
+ * Return: a pointer to the DW XPCS handle if successful, otherwise -ENODEV if
+ * the fwnode device is unavailable or the PCS device couldn't be found on the
+ * bus, -EPROBE_DEFER if the respective MDIO-device instance couldn't be found,
+ * other negative errno related to the data allocations and MDIO-bus
+ * communications.
+ */
+struct dw_xpcs *xpcs_create_fwnode(struct fwnode_handle *fwnode,
+ phy_interface_t interface)
+{
+ struct mdio_device *mdiodev;
+ struct dw_xpcs *xpcs;
+
+ if (!fwnode_device_is_available(fwnode))
+ return ERR_PTR(-ENODEV);
+
+ mdiodev = fwnode_mdio_find_device(fwnode);
+ if (!mdiodev)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ xpcs = xpcs_create(mdiodev, interface);
+
+ /* xpcs_create() has taken a refcount on the mdiodev if it was
+ * successful. If xpcs_create() fails, this will free the mdio
+ * device here. In any case, we don't need to hold our reference
+ * anymore, and putting it here will allow mdio_device_put() in
+ * xpcs_destroy() to automatically free the mdio device.
+ */
+ mdio_device_put(mdiodev);
+
+ return xpcs;
+}
+EXPORT_SYMBOL_GPL(xpcs_create_fwnode);
+
+void xpcs_destroy(struct dw_xpcs *xpcs)
+{
+ if (!xpcs)
+ return;
+
+ xpcs_clear_clks(xpcs);
+
+ xpcs_free_data(xpcs);
+}
+EXPORT_SYMBOL_GPL(xpcs_destroy);
+
MODULE_DESCRIPTION("Synopsys DesignWare XPCS library");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/pcs/pcs-xpcs.h b/drivers/net/pcs/pcs-xpcs.h
index 96c36b32ca99..fa05adfae220 100644
--- a/drivers/net/pcs/pcs-xpcs.h
+++ b/drivers/net/pcs/pcs-xpcs.h
@@ -6,8 +6,8 @@
* Author: Jose Abreu <Jose.Abreu@synopsys.com>
*/
-#define SYNOPSYS_XPCS_ID 0x7996ced0
-#define SYNOPSYS_XPCS_MASK 0xffffffff
+#include <linux/bits.h>
+#include <linux/pcs/pcs-xpcs.h>
/* Vendor regs access */
#define DW_VENDOR BIT(15)
@@ -120,6 +120,9 @@
/* VR MII EEE Control 1 defines */
#define DW_VR_MII_EEE_TRN_LPI BIT(0) /* Transparent Mode Enable */
+#define DW_XPCS_INFO_DECLARE(_name, _pcs, _pma) \
+ static const struct dw_xpcs_info _name = { .pcs = _pcs, .pma = _pma }
+
int xpcs_read(struct dw_xpcs *xpcs, int dev, u32 reg);
int xpcs_write(struct dw_xpcs *xpcs, int dev, u32 reg, u16 val);
int xpcs_read_vpcs(struct dw_xpcs *xpcs, int reg);
diff --git a/drivers/net/phy/aquantia/Makefile b/drivers/net/phy/aquantia/Makefile
index aa77fb63c8ec..c6c4d494ee2a 100644
--- a/drivers/net/phy/aquantia/Makefile
+++ b/drivers/net/phy/aquantia/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-aquantia-objs += aquantia_main.o aquantia_firmware.o
+aquantia-objs += aquantia_main.o aquantia_firmware.o aquantia_leds.o
ifdef CONFIG_HWMON
aquantia-objs += aquantia_hwmon.o
endif
diff --git a/drivers/net/phy/aquantia/aquantia.h b/drivers/net/phy/aquantia/aquantia.h
index 1c19ae74ad2b..2465345081f8 100644
--- a/drivers/net/phy/aquantia/aquantia.h
+++ b/drivers/net/phy/aquantia/aquantia.h
@@ -6,6 +6,9 @@
* Author: Heiner Kallweit <hkallweit1@gmail.com>
*/
+#ifndef AQUANTIA_H
+#define AQUANTIA_H
+
#include <linux/device.h>
#include <linux/phy.h>
@@ -63,6 +66,28 @@
#define VEND1_GLOBAL_CONTROL2_UP_RUN_STALL_OVD BIT(6)
#define VEND1_GLOBAL_CONTROL2_UP_RUN_STALL BIT(0)
+#define VEND1_GLOBAL_LED_PROV 0xc430
+#define AQR_LED_PROV(x) (VEND1_GLOBAL_LED_PROV + (x))
+#define VEND1_GLOBAL_LED_PROV_LINK2500 BIT(14)
+#define VEND1_GLOBAL_LED_PROV_LINK5000 BIT(15)
+#define VEND1_GLOBAL_LED_PROV_FORCE_ON BIT(8)
+#define VEND1_GLOBAL_LED_PROV_LINK10000 BIT(7)
+#define VEND1_GLOBAL_LED_PROV_LINK1000 BIT(6)
+#define VEND1_GLOBAL_LED_PROV_LINK100 BIT(5)
+#define VEND1_GLOBAL_LED_PROV_RX_ACT BIT(3)
+#define VEND1_GLOBAL_LED_PROV_TX_ACT BIT(2)
+#define VEND1_GLOBAL_LED_PROV_ACT_STRETCH GENMASK(0, 1)
+
+#define VEND1_GLOBAL_LED_PROV_LINK_MASK (VEND1_GLOBAL_LED_PROV_LINK100 | \
+ VEND1_GLOBAL_LED_PROV_LINK1000 | \
+ VEND1_GLOBAL_LED_PROV_LINK10000 | \
+ VEND1_GLOBAL_LED_PROV_LINK5000 | \
+ VEND1_GLOBAL_LED_PROV_LINK2500)
+
+#define VEND1_GLOBAL_LED_DRIVE 0xc438
+#define VEND1_GLOBAL_LED_DRIVE_VDD BIT(1)
+#define AQR_LED_DRIVE(x) (VEND1_GLOBAL_LED_DRIVE + (x))
+
#define VEND1_THERMAL_PROV_HIGH_TEMP_FAIL 0xc421
#define VEND1_THERMAL_PROV_LOW_TEMP_FAIL 0xc422
#define VEND1_THERMAL_PROV_HIGH_TEMP_WARN 0xc423
@@ -87,6 +112,18 @@
#define VEND1_GLOBAL_RSVD_STAT9_MODE GENMASK(7, 0)
#define VEND1_GLOBAL_RSVD_STAT9_1000BT2 0x23
+/* MDIO_MMD_C22EXT */
+#define MDIO_C22EXT_STAT_SGMII_RX_GOOD_FRAMES 0xd292
+#define MDIO_C22EXT_STAT_SGMII_RX_BAD_FRAMES 0xd294
+#define MDIO_C22EXT_STAT_SGMII_RX_FALSE_CARRIER 0xd297
+#define MDIO_C22EXT_STAT_SGMII_TX_GOOD_FRAMES 0xd313
+#define MDIO_C22EXT_STAT_SGMII_TX_BAD_FRAMES 0xd315
+#define MDIO_C22EXT_STAT_SGMII_TX_FALSE_CARRIER 0xd317
+#define MDIO_C22EXT_STAT_SGMII_TX_COLLISIONS 0xd318
+#define MDIO_C22EXT_STAT_SGMII_TX_LINE_COLLISIONS 0xd319
+#define MDIO_C22EXT_STAT_SGMII_TX_FRAME_ALIGN_ERR 0xd31a
+#define MDIO_C22EXT_STAT_SGMII_TX_RUNT_FRAMES 0xd31b
+
#define VEND1_GLOBAL_INT_STD_STATUS 0xfc00
#define VEND1_GLOBAL_INT_VEND_STATUS 0xfc01
@@ -113,6 +150,35 @@
#define VEND1_GLOBAL_INT_VEND_MASK_GLOBAL2 BIT(1)
#define VEND1_GLOBAL_INT_VEND_MASK_GLOBAL3 BIT(0)
+#define AQR_MAX_LEDS 3
+
+struct aqr107_hw_stat {
+ const char *name;
+ int reg;
+ int size;
+};
+
+#define SGMII_STAT(n, r, s) { n, MDIO_C22EXT_STAT_SGMII_ ## r, s }
+static const struct aqr107_hw_stat aqr107_hw_stats[] = {
+ SGMII_STAT("sgmii_rx_good_frames", RX_GOOD_FRAMES, 26),
+ SGMII_STAT("sgmii_rx_bad_frames", RX_BAD_FRAMES, 26),
+ SGMII_STAT("sgmii_rx_false_carrier_events", RX_FALSE_CARRIER, 8),
+ SGMII_STAT("sgmii_tx_good_frames", TX_GOOD_FRAMES, 26),
+ SGMII_STAT("sgmii_tx_bad_frames", TX_BAD_FRAMES, 26),
+ SGMII_STAT("sgmii_tx_false_carrier_events", TX_FALSE_CARRIER, 8),
+ SGMII_STAT("sgmii_tx_collisions", TX_COLLISIONS, 8),
+ SGMII_STAT("sgmii_tx_line_collisions", TX_LINE_COLLISIONS, 8),
+ SGMII_STAT("sgmii_tx_frame_alignment_err", TX_FRAME_ALIGN_ERR, 16),
+ SGMII_STAT("sgmii_tx_runt_frames", TX_RUNT_FRAMES, 22),
+};
+
+#define AQR107_SGMII_STAT_SZ ARRAY_SIZE(aqr107_hw_stats)
+
+struct aqr107_priv {
+ u64 sgmii_stats[AQR107_SGMII_STAT_SZ];
+ unsigned long leds_active_low;
+};
+
#if IS_REACHABLE(CONFIG_HWMON)
int aqr_hwmon_probe(struct phy_device *phydev);
#else
@@ -120,3 +186,21 @@ static inline int aqr_hwmon_probe(struct phy_device *phydev) { return 0; }
#endif
int aqr_firmware_load(struct phy_device *phydev);
+
+int aqr_phy_led_blink_set(struct phy_device *phydev, u8 index,
+ unsigned long *delay_on,
+ unsigned long *delay_off);
+int aqr_phy_led_brightness_set(struct phy_device *phydev,
+ u8 index, enum led_brightness value);
+int aqr_phy_led_hw_is_supported(struct phy_device *phydev, u8 index,
+ unsigned long rules);
+int aqr_phy_led_hw_control_get(struct phy_device *phydev, u8 index,
+ unsigned long *rules);
+int aqr_phy_led_hw_control_set(struct phy_device *phydev, u8 index,
+ unsigned long rules);
+int aqr_phy_led_active_low_set(struct phy_device *phydev, int index, bool enable);
+int aqr_phy_led_polarity_set(struct phy_device *phydev, int index,
+ unsigned long modes);
+int aqr_wait_reset_complete(struct phy_device *phydev);
+
+#endif /* AQUANTIA_H */
diff --git a/drivers/net/phy/aquantia/aquantia_firmware.c b/drivers/net/phy/aquantia/aquantia_firmware.c
index 0c9640ef153b..524627a36c6f 100644
--- a/drivers/net/phy/aquantia/aquantia_firmware.c
+++ b/drivers/net/phy/aquantia/aquantia_firmware.c
@@ -353,6 +353,10 @@ int aqr_firmware_load(struct phy_device *phydev)
{
int ret;
+ ret = aqr_wait_reset_complete(phydev);
+ if (ret)
+ return ret;
+
/* Check if the firmware is not already loaded by pooling
* the current version returned by the PHY. If 0 is returned,
* no firmware is loaded.
diff --git a/drivers/net/phy/aquantia/aquantia_leds.c b/drivers/net/phy/aquantia/aquantia_leds.c
new file mode 100644
index 000000000000..0516ac02c3f8
--- /dev/null
+++ b/drivers/net/phy/aquantia/aquantia_leds.c
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: GPL-2.0
+/* LED driver for Aquantia PHY
+ *
+ * Author: Daniel Golle <daniel@makrotopia.org>
+ */
+
+#include <linux/phy.h>
+
+#include "aquantia.h"
+
+int aqr_phy_led_brightness_set(struct phy_device *phydev,
+ u8 index, enum led_brightness value)
+{
+ if (index >= AQR_MAX_LEDS)
+ return -EINVAL;
+
+ return phy_modify_mmd(phydev, MDIO_MMD_VEND1, AQR_LED_PROV(index),
+ VEND1_GLOBAL_LED_PROV_LINK_MASK |
+ VEND1_GLOBAL_LED_PROV_FORCE_ON |
+ VEND1_GLOBAL_LED_PROV_RX_ACT |
+ VEND1_GLOBAL_LED_PROV_TX_ACT,
+ value ? VEND1_GLOBAL_LED_PROV_FORCE_ON : 0);
+}
+
+static const unsigned long supported_triggers = (BIT(TRIGGER_NETDEV_LINK) |
+ BIT(TRIGGER_NETDEV_LINK_100) |
+ BIT(TRIGGER_NETDEV_LINK_1000) |
+ BIT(TRIGGER_NETDEV_LINK_2500) |
+ BIT(TRIGGER_NETDEV_LINK_5000) |
+ BIT(TRIGGER_NETDEV_LINK_10000) |
+ BIT(TRIGGER_NETDEV_RX) |
+ BIT(TRIGGER_NETDEV_TX));
+
+int aqr_phy_led_hw_is_supported(struct phy_device *phydev, u8 index,
+ unsigned long rules)
+{
+ if (index >= AQR_MAX_LEDS)
+ return -EINVAL;
+
+ /* All combinations of the supported triggers are allowed */
+ if (rules & ~supported_triggers)
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+int aqr_phy_led_hw_control_get(struct phy_device *phydev, u8 index,
+ unsigned long *rules)
+{
+ int val;
+
+ if (index >= AQR_MAX_LEDS)
+ return -EINVAL;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_VEND1, AQR_LED_PROV(index));
+ if (val < 0)
+ return val;
+
+ *rules = 0;
+ if (val & VEND1_GLOBAL_LED_PROV_LINK100)
+ *rules |= BIT(TRIGGER_NETDEV_LINK_100);
+
+ if (val & VEND1_GLOBAL_LED_PROV_LINK1000)
+ *rules |= BIT(TRIGGER_NETDEV_LINK_1000);
+
+ if (val & VEND1_GLOBAL_LED_PROV_LINK2500)
+ *rules |= BIT(TRIGGER_NETDEV_LINK_2500);
+
+ if (val & VEND1_GLOBAL_LED_PROV_LINK5000)
+ *rules |= BIT(TRIGGER_NETDEV_LINK_5000);
+
+ if (val & VEND1_GLOBAL_LED_PROV_LINK10000)
+ *rules |= BIT(TRIGGER_NETDEV_LINK_10000);
+
+ if (val & VEND1_GLOBAL_LED_PROV_RX_ACT)
+ *rules |= BIT(TRIGGER_NETDEV_RX);
+
+ if (val & VEND1_GLOBAL_LED_PROV_TX_ACT)
+ *rules |= BIT(TRIGGER_NETDEV_TX);
+
+ return 0;
+}
+
+int aqr_phy_led_hw_control_set(struct phy_device *phydev, u8 index,
+ unsigned long rules)
+{
+ u16 val = 0;
+
+ if (index >= AQR_MAX_LEDS)
+ return -EINVAL;
+
+ if (rules & (BIT(TRIGGER_NETDEV_LINK_100) | BIT(TRIGGER_NETDEV_LINK)))
+ val |= VEND1_GLOBAL_LED_PROV_LINK100;
+
+ if (rules & (BIT(TRIGGER_NETDEV_LINK_1000) | BIT(TRIGGER_NETDEV_LINK)))
+ val |= VEND1_GLOBAL_LED_PROV_LINK1000;
+
+ if (rules & (BIT(TRIGGER_NETDEV_LINK_2500) | BIT(TRIGGER_NETDEV_LINK)))
+ val |= VEND1_GLOBAL_LED_PROV_LINK2500;
+
+ if (rules & (BIT(TRIGGER_NETDEV_LINK_5000) | BIT(TRIGGER_NETDEV_LINK)))
+ val |= VEND1_GLOBAL_LED_PROV_LINK5000;
+
+ if (rules & (BIT(TRIGGER_NETDEV_LINK_10000) | BIT(TRIGGER_NETDEV_LINK)))
+ val |= VEND1_GLOBAL_LED_PROV_LINK10000;
+
+ if (rules & BIT(TRIGGER_NETDEV_RX))
+ val |= VEND1_GLOBAL_LED_PROV_RX_ACT;
+
+ if (rules & BIT(TRIGGER_NETDEV_TX))
+ val |= VEND1_GLOBAL_LED_PROV_TX_ACT;
+
+ return phy_modify_mmd(phydev, MDIO_MMD_VEND1, AQR_LED_PROV(index),
+ VEND1_GLOBAL_LED_PROV_LINK_MASK |
+ VEND1_GLOBAL_LED_PROV_FORCE_ON |
+ VEND1_GLOBAL_LED_PROV_RX_ACT |
+ VEND1_GLOBAL_LED_PROV_TX_ACT, val);
+}
+
+int aqr_phy_led_active_low_set(struct phy_device *phydev, int index, bool enable)
+{
+ return phy_modify_mmd(phydev, MDIO_MMD_VEND1, AQR_LED_DRIVE(index),
+ VEND1_GLOBAL_LED_DRIVE_VDD, enable);
+}
+
+int aqr_phy_led_polarity_set(struct phy_device *phydev, int index, unsigned long modes)
+{
+ struct aqr107_priv *priv = phydev->priv;
+ bool active_low = false;
+ u32 mode;
+
+ if (index >= AQR_MAX_LEDS)
+ return -EINVAL;
+
+ for_each_set_bit(mode, &modes, __PHY_LED_MODES_NUM) {
+ switch (mode) {
+ case PHY_LED_ACTIVE_LOW:
+ active_low = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ /* Save LED driver vdd state to restore on SW reset */
+ if (active_low)
+ priv->leds_active_low |= BIT(index);
+
+ return aqr_phy_led_active_low_set(phydev, index, active_low);
+}
diff --git a/drivers/net/phy/aquantia/aquantia_main.c b/drivers/net/phy/aquantia/aquantia_main.c
index d34cdec47636..d12e35374231 100644
--- a/drivers/net/phy/aquantia/aquantia_main.c
+++ b/drivers/net/phy/aquantia/aquantia_main.c
@@ -29,6 +29,7 @@
#define PHY_ID_AQR113 0x31c31c40
#define PHY_ID_AQR113C 0x31c31c12
#define PHY_ID_AQR114C 0x31c31c22
+#define PHY_ID_AQR115C 0x31c31c33
#define PHY_ID_AQR813 0x31c31cb2
#define MDIO_PHYXS_VEND_IF_STATUS 0xe812
@@ -84,49 +85,12 @@
#define MDIO_AN_RX_VEND_STAT3 0xe832
#define MDIO_AN_RX_VEND_STAT3_AFR BIT(0)
-/* MDIO_MMD_C22EXT */
-#define MDIO_C22EXT_STAT_SGMII_RX_GOOD_FRAMES 0xd292
-#define MDIO_C22EXT_STAT_SGMII_RX_BAD_FRAMES 0xd294
-#define MDIO_C22EXT_STAT_SGMII_RX_FALSE_CARRIER 0xd297
-#define MDIO_C22EXT_STAT_SGMII_TX_GOOD_FRAMES 0xd313
-#define MDIO_C22EXT_STAT_SGMII_TX_BAD_FRAMES 0xd315
-#define MDIO_C22EXT_STAT_SGMII_TX_FALSE_CARRIER 0xd317
-#define MDIO_C22EXT_STAT_SGMII_TX_COLLISIONS 0xd318
-#define MDIO_C22EXT_STAT_SGMII_TX_LINE_COLLISIONS 0xd319
-#define MDIO_C22EXT_STAT_SGMII_TX_FRAME_ALIGN_ERR 0xd31a
-#define MDIO_C22EXT_STAT_SGMII_TX_RUNT_FRAMES 0xd31b
-
/* Sleep and timeout for checking if the Processor-Intensive
* MDIO operation is finished
*/
#define AQR107_OP_IN_PROG_SLEEP 1000
#define AQR107_OP_IN_PROG_TIMEOUT 100000
-struct aqr107_hw_stat {
- const char *name;
- int reg;
- int size;
-};
-
-#define SGMII_STAT(n, r, s) { n, MDIO_C22EXT_STAT_SGMII_ ## r, s }
-static const struct aqr107_hw_stat aqr107_hw_stats[] = {
- SGMII_STAT("sgmii_rx_good_frames", RX_GOOD_FRAMES, 26),
- SGMII_STAT("sgmii_rx_bad_frames", RX_BAD_FRAMES, 26),
- SGMII_STAT("sgmii_rx_false_carrier_events", RX_FALSE_CARRIER, 8),
- SGMII_STAT("sgmii_tx_good_frames", TX_GOOD_FRAMES, 26),
- SGMII_STAT("sgmii_tx_bad_frames", TX_BAD_FRAMES, 26),
- SGMII_STAT("sgmii_tx_false_carrier_events", TX_FALSE_CARRIER, 8),
- SGMII_STAT("sgmii_tx_collisions", TX_COLLISIONS, 8),
- SGMII_STAT("sgmii_tx_line_collisions", TX_LINE_COLLISIONS, 8),
- SGMII_STAT("sgmii_tx_frame_alignment_err", TX_FRAME_ALIGN_ERR, 16),
- SGMII_STAT("sgmii_tx_runt_frames", TX_RUNT_FRAMES, 22),
-};
-#define AQR107_SGMII_STAT_SZ ARRAY_SIZE(aqr107_hw_stats)
-
-struct aqr107_priv {
- u64 sgmii_stats[AQR107_SGMII_STAT_SZ];
-};
-
static int aqr107_get_sset_count(struct phy_device *phydev)
{
return AQR107_SGMII_STAT_SZ;
@@ -478,7 +442,7 @@ static int aqr107_set_tunable(struct phy_device *phydev,
* The chip also provides a "reset completed" bit, but it's cleared after
* read. Therefore function would time out if called again.
*/
-static int aqr107_wait_reset_complete(struct phy_device *phydev)
+int aqr_wait_reset_complete(struct phy_device *phydev)
{
int val;
@@ -512,7 +476,9 @@ static void aqr107_chip_info(struct phy_device *phydev)
static int aqr107_config_init(struct phy_device *phydev)
{
- int ret;
+ struct aqr107_priv *priv = phydev->priv;
+ u32 led_active_low;
+ int ret, index = 0;
/* Check that the PHY interface type is compatible */
if (phydev->interface != PHY_INTERFACE_MODE_SGMII &&
@@ -529,11 +495,23 @@ static int aqr107_config_init(struct phy_device *phydev)
WARN(phydev->interface == PHY_INTERFACE_MODE_XGMII,
"Your devicetree is out of date, please update it. The AQR107 family doesn't support XGMII, maybe you mean USXGMII.\n");
- ret = aqr107_wait_reset_complete(phydev);
+ ret = aqr_wait_reset_complete(phydev);
if (!ret)
aqr107_chip_info(phydev);
- return aqr107_set_downshift(phydev, MDIO_AN_VEND_PROV_DOWNSHIFT_DFLT);
+ ret = aqr107_set_downshift(phydev, MDIO_AN_VEND_PROV_DOWNSHIFT_DFLT);
+ if (ret)
+ return ret;
+
+ /* Restore LED polarity state after reset */
+ for_each_set_bit(led_active_low, &priv->leds_active_low, AQR_MAX_LEDS) {
+ ret = aqr_phy_led_active_low_set(phydev, index, led_active_low);
+ if (ret)
+ return ret;
+ index++;
+ }
+
+ return 0;
}
static int aqcs109_config_init(struct phy_device *phydev)
@@ -545,7 +523,7 @@ static int aqcs109_config_init(struct phy_device *phydev)
phydev->interface != PHY_INTERFACE_MODE_2500BASEX)
return -ENODEV;
- ret = aqr107_wait_reset_complete(phydev);
+ ret = aqr_wait_reset_complete(phydev);
if (!ret)
aqr107_chip_info(phydev);
@@ -675,7 +653,13 @@ static int aqr107_fill_interface_modes(struct phy_device *phydev)
unsigned long *possible = phydev->possible_interfaces;
unsigned int serdes_mode, rate_adapt;
phy_interface_t interface;
- int i, val;
+ int i, val, ret;
+
+ ret = phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1,
+ VEND1_GLOBAL_CFG_10M, val, val != 0,
+ 1000, 100000, false);
+ if (ret)
+ return ret;
/* Walk the media-speed configuration registers to determine which
* host-side serdes modes may be used by the PHY depending on the
@@ -823,6 +807,11 @@ static struct phy_driver aqr_driver[] = {
.get_strings = aqr107_get_strings,
.get_stats = aqr107_get_stats,
.link_change_notify = aqr107_link_change_notify,
+ .led_brightness_set = aqr_phy_led_brightness_set,
+ .led_hw_is_supported = aqr_phy_led_hw_is_supported,
+ .led_hw_control_set = aqr_phy_led_hw_control_set,
+ .led_hw_control_get = aqr_phy_led_hw_control_get,
+ .led_polarity_set = aqr_phy_led_polarity_set,
},
{
PHY_ID_MATCH_MODEL(PHY_ID_AQCS109),
@@ -842,6 +831,11 @@ static struct phy_driver aqr_driver[] = {
.get_strings = aqr107_get_strings,
.get_stats = aqr107_get_stats,
.link_change_notify = aqr107_link_change_notify,
+ .led_brightness_set = aqr_phy_led_brightness_set,
+ .led_hw_is_supported = aqr_phy_led_hw_is_supported,
+ .led_hw_control_set = aqr_phy_led_hw_control_set,
+ .led_hw_control_get = aqr_phy_led_hw_control_get,
+ .led_polarity_set = aqr_phy_led_polarity_set,
},
{
PHY_ID_MATCH_MODEL(PHY_ID_AQR111),
@@ -861,6 +855,11 @@ static struct phy_driver aqr_driver[] = {
.get_strings = aqr107_get_strings,
.get_stats = aqr107_get_stats,
.link_change_notify = aqr107_link_change_notify,
+ .led_brightness_set = aqr_phy_led_brightness_set,
+ .led_hw_is_supported = aqr_phy_led_hw_is_supported,
+ .led_hw_control_set = aqr_phy_led_hw_control_set,
+ .led_hw_control_get = aqr_phy_led_hw_control_get,
+ .led_polarity_set = aqr_phy_led_polarity_set,
},
{
PHY_ID_MATCH_MODEL(PHY_ID_AQR111B0),
@@ -880,6 +879,11 @@ static struct phy_driver aqr_driver[] = {
.get_strings = aqr107_get_strings,
.get_stats = aqr107_get_stats,
.link_change_notify = aqr107_link_change_notify,
+ .led_brightness_set = aqr_phy_led_brightness_set,
+ .led_hw_is_supported = aqr_phy_led_hw_is_supported,
+ .led_hw_control_set = aqr_phy_led_hw_control_set,
+ .led_hw_control_get = aqr_phy_led_hw_control_get,
+ .led_polarity_set = aqr_phy_led_polarity_set,
},
{
PHY_ID_MATCH_MODEL(PHY_ID_AQR405),
@@ -906,6 +910,11 @@ static struct phy_driver aqr_driver[] = {
.get_strings = aqr107_get_strings,
.get_stats = aqr107_get_stats,
.link_change_notify = aqr107_link_change_notify,
+ .led_brightness_set = aqr_phy_led_brightness_set,
+ .led_hw_is_supported = aqr_phy_led_hw_is_supported,
+ .led_hw_control_set = aqr_phy_led_hw_control_set,
+ .led_hw_control_get = aqr_phy_led_hw_control_get,
+ .led_polarity_set = aqr_phy_led_polarity_set,
},
{
PHY_ID_MATCH_MODEL(PHY_ID_AQR412),
@@ -943,6 +952,11 @@ static struct phy_driver aqr_driver[] = {
.get_strings = aqr107_get_strings,
.get_stats = aqr107_get_stats,
.link_change_notify = aqr107_link_change_notify,
+ .led_brightness_set = aqr_phy_led_brightness_set,
+ .led_hw_is_supported = aqr_phy_led_hw_is_supported,
+ .led_hw_control_set = aqr_phy_led_hw_control_set,
+ .led_hw_control_get = aqr_phy_led_hw_control_get,
+ .led_polarity_set = aqr_phy_led_polarity_set,
},
{
PHY_ID_MATCH_MODEL(PHY_ID_AQR113C),
@@ -962,6 +976,11 @@ static struct phy_driver aqr_driver[] = {
.get_strings = aqr107_get_strings,
.get_stats = aqr107_get_stats,
.link_change_notify = aqr107_link_change_notify,
+ .led_brightness_set = aqr_phy_led_brightness_set,
+ .led_hw_is_supported = aqr_phy_led_hw_is_supported,
+ .led_hw_control_set = aqr_phy_led_hw_control_set,
+ .led_hw_control_get = aqr_phy_led_hw_control_get,
+ .led_polarity_set = aqr_phy_led_polarity_set,
},
{
PHY_ID_MATCH_MODEL(PHY_ID_AQR114C),
@@ -981,6 +1000,35 @@ static struct phy_driver aqr_driver[] = {
.get_strings = aqr107_get_strings,
.get_stats = aqr107_get_stats,
.link_change_notify = aqr107_link_change_notify,
+ .led_brightness_set = aqr_phy_led_brightness_set,
+ .led_hw_is_supported = aqr_phy_led_hw_is_supported,
+ .led_hw_control_set = aqr_phy_led_hw_control_set,
+ .led_hw_control_get = aqr_phy_led_hw_control_get,
+ .led_polarity_set = aqr_phy_led_polarity_set,
+},
+{
+ PHY_ID_MATCH_MODEL(PHY_ID_AQR115C),
+ .name = "Aquantia AQR115C",
+ .probe = aqr107_probe,
+ .get_rate_matching = aqr107_get_rate_matching,
+ .config_init = aqr113c_config_init,
+ .config_aneg = aqr_config_aneg,
+ .config_intr = aqr_config_intr,
+ .handle_interrupt = aqr_handle_interrupt,
+ .read_status = aqr107_read_status,
+ .get_tunable = aqr107_get_tunable,
+ .set_tunable = aqr107_set_tunable,
+ .suspend = aqr107_suspend,
+ .resume = aqr107_resume,
+ .get_sset_count = aqr107_get_sset_count,
+ .get_strings = aqr107_get_strings,
+ .get_stats = aqr107_get_stats,
+ .link_change_notify = aqr107_link_change_notify,
+ .led_brightness_set = aqr_phy_led_brightness_set,
+ .led_hw_is_supported = aqr_phy_led_hw_is_supported,
+ .led_hw_control_set = aqr_phy_led_hw_control_set,
+ .led_hw_control_get = aqr_phy_led_hw_control_get,
+ .led_polarity_set = aqr_phy_led_polarity_set,
},
{
PHY_ID_MATCH_MODEL(PHY_ID_AQR813),
@@ -1000,6 +1048,11 @@ static struct phy_driver aqr_driver[] = {
.get_strings = aqr107_get_strings,
.get_stats = aqr107_get_stats,
.link_change_notify = aqr107_link_change_notify,
+ .led_brightness_set = aqr_phy_led_brightness_set,
+ .led_hw_is_supported = aqr_phy_led_hw_is_supported,
+ .led_hw_control_set = aqr_phy_led_hw_control_set,
+ .led_hw_control_get = aqr_phy_led_hw_control_get,
+ .led_polarity_set = aqr_phy_led_polarity_set,
},
};
@@ -1020,6 +1073,7 @@ static struct mdio_device_id __maybe_unused aqr_tbl[] = {
{ PHY_ID_MATCH_MODEL(PHY_ID_AQR113) },
{ PHY_ID_MATCH_MODEL(PHY_ID_AQR113C) },
{ PHY_ID_MATCH_MODEL(PHY_ID_AQR114C) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_AQR115C) },
{ PHY_ID_MATCH_MODEL(PHY_ID_AQR813) },
{ }
};
diff --git a/drivers/net/phy/bcm-phy-lib.c b/drivers/net/phy/bcm-phy-lib.c
index 876f28fd8256..6c52f7dda514 100644
--- a/drivers/net/phy/bcm-phy-lib.c
+++ b/drivers/net/phy/bcm-phy-lib.c
@@ -794,6 +794,49 @@ out:
return ret;
}
+static int bcm_setup_lre_forced(struct phy_device *phydev)
+{
+ u16 ctl = 0;
+
+ phydev->pause = 0;
+ phydev->asym_pause = 0;
+
+ if (phydev->speed == SPEED_100)
+ ctl |= LRECR_SPEED100;
+
+ if (phydev->duplex != DUPLEX_FULL)
+ return -EOPNOTSUPP;
+
+ return phy_modify(phydev, MII_BCM54XX_LRECR, LRECR_SPEED100, ctl);
+}
+
+/**
+ * bcm_linkmode_adv_to_lre_adv_t - translate linkmode advertisement to LDS
+ * @advertising: the linkmode advertisement settings
+ * Return: LDS Auto-Negotiation Advertised Ability register value
+ *
+ * A small helper function that translates linkmode advertisement
+ * settings to phy LDS autonegotiation advertisements for the
+ * MII_BCM54XX_LREANAA register of Broadcom PHYs capable of LDS
+ */
+static u32 bcm_linkmode_adv_to_lre_adv_t(unsigned long *advertising)
+{
+ u32 result = 0;
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT1BRR_Full_BIT,
+ advertising))
+ result |= LREANAA_10_1PAIR;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT1_Full_BIT,
+ advertising))
+ result |= LREANAA_100_1PAIR;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT, advertising))
+ result |= LRELPA_PAUSE;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, advertising))
+ result |= LRELPA_PAUSE_ASYM;
+
+ return result;
+}
+
int bcm_phy_cable_test_start(struct phy_device *phydev)
{
return _bcm_phy_cable_test_start(phydev, false);
@@ -1066,6 +1109,78 @@ int bcm_phy_led_brightness_set(struct phy_device *phydev,
}
EXPORT_SYMBOL_GPL(bcm_phy_led_brightness_set);
+int bcm_setup_lre_master_slave(struct phy_device *phydev)
+{
+ u16 ctl = 0;
+
+ switch (phydev->master_slave_set) {
+ case MASTER_SLAVE_CFG_MASTER_PREFERRED:
+ case MASTER_SLAVE_CFG_MASTER_FORCE:
+ ctl = LRECR_MASTER;
+ break;
+ case MASTER_SLAVE_CFG_SLAVE_PREFERRED:
+ case MASTER_SLAVE_CFG_SLAVE_FORCE:
+ break;
+ case MASTER_SLAVE_CFG_UNKNOWN:
+ case MASTER_SLAVE_CFG_UNSUPPORTED:
+ return 0;
+ default:
+ phydev_warn(phydev, "Unsupported Master/Slave mode\n");
+ return -EOPNOTSUPP;
+ }
+
+ return phy_modify_changed(phydev, MII_BCM54XX_LRECR, LRECR_MASTER, ctl);
+}
+EXPORT_SYMBOL_GPL(bcm_setup_lre_master_slave);
+
+int bcm_config_lre_aneg(struct phy_device *phydev, bool changed)
+{
+ int err;
+
+ if (genphy_config_eee_advert(phydev))
+ changed = true;
+
+ err = bcm_setup_lre_master_slave(phydev);
+ if (err < 0)
+ return err;
+ else if (err)
+ changed = true;
+
+ if (phydev->autoneg != AUTONEG_ENABLE)
+ return bcm_setup_lre_forced(phydev);
+
+ err = bcm_config_lre_advert(phydev);
+ if (err < 0)
+ return err;
+ else if (err)
+ changed = true;
+
+ return genphy_check_and_restart_aneg(phydev, changed);
+}
+EXPORT_SYMBOL_GPL(bcm_config_lre_aneg);
+
+/**
+ * bcm_config_lre_advert - sanitize and advertise Long-Distance Signaling
+ * auto-negotiation parameters
+ * @phydev: target phy_device struct
+ * Return: 0 if the PHY's advertisement hasn't changed, < 0 on error,
+ * > 0 if it has changed
+ *
+ * Writes MII_BCM54XX_LREANAA with the appropriate values. The values are to be
+ * sanitized before, to make sure we only advertise what is supported.
+ * The sanitization is done already in phy_ethtool_ksettings_set()
+ */
+int bcm_config_lre_advert(struct phy_device *phydev)
+{
+ u32 adv = bcm_linkmode_adv_to_lre_adv_t(phydev->advertising);
+
+ /* Setup BroadR-Reach mode advertisement */
+ return phy_modify_changed(phydev, MII_BCM54XX_LREANAA,
+ LRE_ADVERTISE_ALL | LREANAA_PAUSE |
+ LREANAA_PAUSE_ASYM, adv);
+}
+EXPORT_SYMBOL_GPL(bcm_config_lre_advert);
+
MODULE_DESCRIPTION("Broadcom PHY Library");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Broadcom Corporation");
diff --git a/drivers/net/phy/bcm-phy-lib.h b/drivers/net/phy/bcm-phy-lib.h
index b52189e45a84..bceddbc860eb 100644
--- a/drivers/net/phy/bcm-phy-lib.h
+++ b/drivers/net/phy/bcm-phy-lib.h
@@ -121,4 +121,8 @@ irqreturn_t bcm_phy_wol_isr(int irq, void *dev_id);
int bcm_phy_led_brightness_set(struct phy_device *phydev,
u8 index, enum led_brightness value);
+int bcm_setup_lre_master_slave(struct phy_device *phydev);
+int bcm_config_lre_aneg(struct phy_device *phydev, bool changed);
+int bcm_config_lre_advert(struct phy_device *phydev);
+
#endif /* _LINUX_BCM_PHY_LIB_H */
diff --git a/drivers/net/phy/bcm-phy-ptp.c b/drivers/net/phy/bcm-phy-ptp.c
index 617d384d4551..874a1b64b115 100644
--- a/drivers/net/phy/bcm-phy-ptp.c
+++ b/drivers/net/phy/bcm-phy-ptp.c
@@ -841,7 +841,7 @@ static int bcm_ptp_hwtstamp(struct mii_timestamper *mii_ts,
}
static int bcm_ptp_ts_info(struct mii_timestamper *mii_ts,
- struct ethtool_ts_info *ts_info)
+ struct kernel_ethtool_ts_info *ts_info)
{
struct bcm_ptp_private *priv = mii2priv(mii_ts);
@@ -931,6 +931,9 @@ struct bcm_ptp_private *bcm_ptp_probe(struct phy_device *phydev)
return ERR_CAST(clock);
priv->ptp_clock = clock;
+ /* Timestamp selected by default to keep legacy API */
+ phydev->default_timestamp = true;
+
priv->phydev = phydev;
bcm_ptp_init(priv);
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index 370e4ed45098..ddded162c44c 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -5,6 +5,8 @@
* Broadcom BCM5411, BCM5421 and BCM5461 Gigabit Ethernet
* transceivers.
*
+ * Broadcom BCM54810, BCM54811 BroadR-Reach transceivers.
+ *
* Copyright (c) 2006 Maciej W. Rozycki
*
* Inspired by code written by Amy Fong.
@@ -36,6 +38,29 @@ struct bcm54xx_phy_priv {
struct bcm_ptp_private *ptp;
int wake_irq;
bool wake_irq_enabled;
+ bool brr_mode;
+};
+
+/* Link modes for BCM58411 PHY */
+static const int bcm54811_linkmodes[] = {
+ ETHTOOL_LINK_MODE_100baseT1_Full_BIT,
+ ETHTOOL_LINK_MODE_10baseT1BRR_Full_BIT,
+ ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
+ ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+ ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ ETHTOOL_LINK_MODE_100baseT_Half_BIT,
+ ETHTOOL_LINK_MODE_10baseT_Full_BIT,
+ ETHTOOL_LINK_MODE_10baseT_Half_BIT
+};
+
+/* Long-Distance Signaling (BroadR-Reach mode aneg) relevant linkmode bits */
+static const int lds_br_bits[] = {
+ ETHTOOL_LINK_MODE_Autoneg_BIT,
+ ETHTOOL_LINK_MODE_Pause_BIT,
+ ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ ETHTOOL_LINK_MODE_10baseT1BRR_Full_BIT,
+ ETHTOOL_LINK_MODE_100baseT1_Full_BIT
};
static bool bcm54xx_phy_can_wakeup(struct phy_device *phydev)
@@ -347,6 +372,61 @@ static void bcm54xx_ptp_config_init(struct phy_device *phydev)
bcm_ptp_config_init(phydev);
}
+static int bcm5481x_set_brrmode(struct phy_device *phydev, bool on)
+{
+ int reg;
+ int err;
+ u16 val;
+
+ reg = bcm_phy_read_exp(phydev, BCM54810_EXP_BROADREACH_LRE_MISC_CTL);
+
+ if (reg < 0)
+ return reg;
+
+ if (on)
+ reg |= BCM54810_EXP_BROADREACH_LRE_MISC_CTL_EN;
+ else
+ reg &= ~BCM54810_EXP_BROADREACH_LRE_MISC_CTL_EN;
+
+ err = bcm_phy_write_exp(phydev,
+ BCM54810_EXP_BROADREACH_LRE_MISC_CTL, reg);
+ if (err)
+ return err;
+
+ /* Ensure LRE or IEEE register set is accessed according to the brr
+ * on/off, thus set the override
+ */
+ val = BCM54811_EXP_BROADREACH_LRE_OVERLAY_CTL_EN;
+ if (!on)
+ val |= BCM54811_EXP_BROADREACH_LRE_OVERLAY_CTL_OVERRIDE_VAL;
+
+ return bcm_phy_write_exp(phydev,
+ BCM54811_EXP_BROADREACH_LRE_OVERLAY_CTL, val);
+}
+
+static int bcm54811_config_init(struct phy_device *phydev)
+{
+ struct bcm54xx_phy_priv *priv = phydev->priv;
+ int err, reg;
+
+ /* Enable CLK125 MUX on LED4 if ref clock is enabled. */
+ if (!(phydev->dev_flags & PHY_BRCM_RX_REFCLK_UNUSED)) {
+ reg = bcm_phy_read_exp(phydev, BCM54612E_EXP_SPARE0);
+ if (reg < 0)
+ return reg;
+ err = bcm_phy_write_exp(phydev, BCM54612E_EXP_SPARE0,
+ BCM54612E_LED4_CLK125OUT_EN | reg);
+ if (err < 0)
+ return err;
+ }
+
+ /* With BCM54811, BroadR-Reach implies no autoneg */
+ if (priv->brr_mode)
+ phydev->autoneg = 0;
+
+ return bcm5481x_set_brrmode(phydev, priv->brr_mode);
+}
+
static int bcm54xx_config_init(struct phy_device *phydev)
{
int reg, err, val;
@@ -399,6 +479,9 @@ static int bcm54xx_config_init(struct phy_device *phydev)
BCM54810_EXP_BROADREACH_LRE_MISC_CTL,
val);
break;
+ case PHY_ID_BCM54811:
+ err = bcm54811_config_init(phydev);
+ break;
}
if (err)
return err;
@@ -553,52 +636,117 @@ static int bcm54810_write_mmd(struct phy_device *phydev, int devnum, u16 regnum,
return -EOPNOTSUPP;
}
-static int bcm54811_config_init(struct phy_device *phydev)
+
+/**
+ * bcm5481x_read_abilities - read PHY abilities from LRESR or Clause 22
+ * (BMSR) registers, based on whether the PHY is in BroadR-Reach or IEEE mode
+ * @phydev: target phy_device struct
+ *
+ * Description: Reads the PHY's abilities and populates phydev->supported
+ * accordingly. The register to read the abilities from is determined by
+ * the brr mode setting of the PHY as read from the device tree.
+ * Note that the LRE and IEEE sets of abilities are disjunct, in other words,
+ * not only the link modes differ, but also the auto-negotiation and
+ * master-slave setup is controlled differently.
+ *
+ * Returns: 0 on success, < 0 on failure
+ */
+static int bcm5481x_read_abilities(struct phy_device *phydev)
{
- int err, reg;
+ struct device_node *np = phydev->mdio.dev.of_node;
+ struct bcm54xx_phy_priv *priv = phydev->priv;
+ int i, val, err;
- /* Disable BroadR-Reach function. */
- reg = bcm_phy_read_exp(phydev, BCM54810_EXP_BROADREACH_LRE_MISC_CTL);
- reg &= ~BCM54810_EXP_BROADREACH_LRE_MISC_CTL_EN;
- err = bcm_phy_write_exp(phydev, BCM54810_EXP_BROADREACH_LRE_MISC_CTL,
- reg);
- if (err < 0)
+ for (i = 0; i < ARRAY_SIZE(bcm54811_linkmodes); i++)
+ linkmode_clear_bit(bcm54811_linkmodes[i], phydev->supported);
+
+ priv->brr_mode = of_property_read_bool(np, "brr-mode");
+
+ /* Set BroadR-Reach mode as configured in the DT. */
+ err = bcm5481x_set_brrmode(phydev, priv->brr_mode);
+ if (err)
return err;
- err = bcm54xx_config_init(phydev);
+ if (priv->brr_mode) {
+ linkmode_set_bit_array(phy_basic_ports_array,
+ ARRAY_SIZE(phy_basic_ports_array),
+ phydev->supported);
- /* Enable CLK125 MUX on LED4 if ref clock is enabled. */
- if (!(phydev->dev_flags & PHY_BRCM_RX_REFCLK_UNUSED)) {
- reg = bcm_phy_read_exp(phydev, BCM54612E_EXP_SPARE0);
- err = bcm_phy_write_exp(phydev, BCM54612E_EXP_SPARE0,
- BCM54612E_LED4_CLK125OUT_EN | reg);
- if (err < 0)
- return err;
+ val = phy_read(phydev, MII_BCM54XX_LRESR);
+ if (val < 0)
+ return val;
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ phydev->supported,
+ val & LRESR_LDSABILITY);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_100baseT1_Full_BIT,
+ phydev->supported,
+ val & LRESR_100_1PAIR);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT1BRR_Full_BIT,
+ phydev->supported,
+ val & LRESR_10_1PAIR);
+ return 0;
}
- return err;
+ return genphy_read_abilities(phydev);
}
-static int bcm5481_config_aneg(struct phy_device *phydev)
+static int bcm5481x_config_delay_swap(struct phy_device *phydev)
{
struct device_node *np = phydev->mdio.dev.of_node;
- int ret;
-
- /* Aneg firstly. */
- ret = genphy_config_aneg(phydev);
- /* Then we can set up the delay. */
+ /* Set up the delay. */
bcm54xx_config_clock_delay(phydev);
if (of_property_read_bool(np, "enet-phy-lane-swap")) {
/* Lane Swap - Undocumented register...magic! */
- ret = bcm_phy_write_exp(phydev, MII_BCM54XX_EXP_SEL_ER + 0x9,
- 0x11B);
+ int ret = bcm_phy_write_exp(phydev,
+ MII_BCM54XX_EXP_SEL_ER + 0x9,
+ 0x11B);
if (ret < 0)
return ret;
}
- return ret;
+ return 0;
+}
+
+static int bcm5481_config_aneg(struct phy_device *phydev)
+{
+ struct bcm54xx_phy_priv *priv = phydev->priv;
+ int ret;
+
+ /* Aneg firstly. */
+ if (priv->brr_mode)
+ ret = bcm_config_lre_aneg(phydev, false);
+ else
+ ret = genphy_config_aneg(phydev);
+
+ if (ret)
+ return ret;
+
+ /* Then we can set up the delay and swap. */
+ return bcm5481x_config_delay_swap(phydev);
+}
+
+static int bcm54811_config_aneg(struct phy_device *phydev)
+{
+ struct bcm54xx_phy_priv *priv = phydev->priv;
+ int ret;
+
+ /* Aneg firstly. */
+ if (priv->brr_mode) {
+ /* BCM54811 is only capable of autonegotiation in IEEE mode */
+ phydev->autoneg = 0;
+ ret = bcm_config_lre_aneg(phydev, false);
+ } else {
+ ret = genphy_config_aneg(phydev);
+ }
+
+ if (ret)
+ return ret;
+
+ /* Then we can set up the delay and swap. */
+ return bcm5481x_config_delay_swap(phydev);
}
struct bcm54616s_phy_priv {
@@ -1062,6 +1210,203 @@ static void bcm54xx_link_change_notify(struct phy_device *phydev)
bcm_phy_write_exp(phydev, MII_BCM54XX_EXP_EXP08, ret);
}
+static int lre_read_master_slave(struct phy_device *phydev)
+{
+ int cfg = MASTER_SLAVE_CFG_UNKNOWN, state;
+ int val;
+
+ /* In BroadR-Reach mode we are always capable of master-slave
+ * and there is no preferred master or slave configuration
+ */
+ phydev->master_slave_get = MASTER_SLAVE_CFG_UNKNOWN;
+ phydev->master_slave_state = MASTER_SLAVE_STATE_UNKNOWN;
+
+ val = phy_read(phydev, MII_BCM54XX_LRECR);
+ if (val < 0)
+ return val;
+
+ if ((val & LRECR_LDSEN) == 0) {
+ if (val & LRECR_MASTER)
+ cfg = MASTER_SLAVE_CFG_MASTER_FORCE;
+ else
+ cfg = MASTER_SLAVE_CFG_SLAVE_FORCE;
+ }
+
+ val = phy_read(phydev, MII_BCM54XX_LRELDSE);
+ if (val < 0)
+ return val;
+
+ if (val & LDSE_MASTER)
+ state = MASTER_SLAVE_STATE_MASTER;
+ else
+ state = MASTER_SLAVE_STATE_SLAVE;
+
+ phydev->master_slave_get = cfg;
+ phydev->master_slave_state = state;
+
+ return 0;
+}
+
+/* Read LDS Link Partner Ability in BroadR-Reach mode */
+static int lre_read_lpa(struct phy_device *phydev)
+{
+ int i, lrelpa;
+
+ if (phydev->autoneg != AUTONEG_ENABLE) {
+ if (!phydev->autoneg_complete) {
+ /* aneg not yet done, reset all relevant bits */
+ for (i = 0; i < ARRAY_SIZE(lds_br_bits); i++)
+ linkmode_clear_bit(lds_br_bits[i],
+ phydev->lp_advertising);
+
+ return 0;
+ }
+
+ /* Long-Distance Signaling Link Partner Ability */
+ lrelpa = phy_read(phydev, MII_BCM54XX_LRELPA);
+ if (lrelpa < 0)
+ return lrelpa;
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ phydev->lp_advertising,
+ lrelpa & LRELPA_PAUSE_ASYM);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ phydev->lp_advertising,
+ lrelpa & LRELPA_PAUSE);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_100baseT1_Full_BIT,
+ phydev->lp_advertising,
+ lrelpa & LRELPA_100_1PAIR);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT1BRR_Full_BIT,
+ phydev->lp_advertising,
+ lrelpa & LRELPA_10_1PAIR);
+ } else {
+ linkmode_zero(phydev->lp_advertising);
+ }
+
+ return 0;
+}
+
+static int lre_read_status_fixed(struct phy_device *phydev)
+{
+ int lrecr = phy_read(phydev, MII_BCM54XX_LRECR);
+
+ if (lrecr < 0)
+ return lrecr;
+
+ phydev->duplex = DUPLEX_FULL;
+
+ if (lrecr & LRECR_SPEED100)
+ phydev->speed = SPEED_100;
+ else
+ phydev->speed = SPEED_10;
+
+ return 0;
+}
+
+/**
+ * lre_update_link - update link status in @phydev
+ * @phydev: target phy_device struct
+ * Return: 0 on success, < 0 on error
+ *
+ * Description: Update the value in phydev->link to reflect the
+ * current link value. In order to do this, we need to read
+ * the status register twice, keeping the second value.
+ * This is a genphy_update_link modified to work on LRE registers
+ * of BroadR-Reach PHY
+ */
+static int lre_update_link(struct phy_device *phydev)
+{
+ int status = 0, lrecr;
+
+ lrecr = phy_read(phydev, MII_BCM54XX_LRECR);
+ if (lrecr < 0)
+ return lrecr;
+
+ /* Autoneg is being started, therefore disregard BMSR value and
+ * report link as down.
+ */
+ if (lrecr & BMCR_ANRESTART)
+ goto done;
+
+ /* The link state is latched low so that momentary link
+ * drops can be detected. Do not double-read the status
+ * in polling mode to detect such short link drops except
+ * the link was already down.
+ */
+ if (!phy_polling_mode(phydev) || !phydev->link) {
+ status = phy_read(phydev, MII_BCM54XX_LRESR);
+ if (status < 0)
+ return status;
+ else if (status & LRESR_LSTATUS)
+ goto done;
+ }
+
+ /* Read link and autonegotiation status */
+ status = phy_read(phydev, MII_BCM54XX_LRESR);
+ if (status < 0)
+ return status;
+done:
+ phydev->link = status & LRESR_LSTATUS ? 1 : 0;
+ phydev->autoneg_complete = status & LRESR_LDSCOMPLETE ? 1 : 0;
+
+ /* Consider the case that autoneg was started and "aneg complete"
+ * bit has been reset, but "link up" bit not yet.
+ */
+ if (phydev->autoneg == AUTONEG_ENABLE && !phydev->autoneg_complete)
+ phydev->link = 0;
+
+ return 0;
+}
+
+/* Get the status in BroadRReach mode just like genphy_read_status does
+* in normal mode
+*/
+static int bcm54811_lre_read_status(struct phy_device *phydev)
+{
+ int err, old_link = phydev->link;
+
+ /* Update the link, but return if there was an error */
+ err = lre_update_link(phydev);
+ if (err)
+ return err;
+
+ /* why bother the PHY if nothing can have changed */
+ if (phydev->autoneg ==
+ AUTONEG_ENABLE && old_link && phydev->link)
+ return 0;
+
+ phydev->speed = SPEED_UNKNOWN;
+ phydev->duplex = DUPLEX_UNKNOWN;
+ phydev->pause = 0;
+ phydev->asym_pause = 0;
+
+ err = lre_read_master_slave(phydev);
+ if (err < 0)
+ return err;
+
+ /* Read LDS Link Partner Ability */
+ err = lre_read_lpa(phydev);
+ if (err < 0)
+ return err;
+
+ if (phydev->autoneg == AUTONEG_ENABLE && phydev->autoneg_complete)
+ phy_resolve_aneg_linkmode(phydev);
+ else if (phydev->autoneg == AUTONEG_DISABLE)
+ err = lre_read_status_fixed(phydev);
+
+ return err;
+}
+
+static int bcm54811_read_status(struct phy_device *phydev)
+{
+ struct bcm54xx_phy_priv *priv = phydev->priv;
+
+ if (priv->brr_mode)
+ return bcm54811_lre_read_status(phydev);
+
+ return genphy_read_status(phydev);
+}
+
static struct phy_driver broadcom_drivers[] = {
{
.phy_id = PHY_ID_BCM5411,
@@ -1211,10 +1556,12 @@ static struct phy_driver broadcom_drivers[] = {
.get_strings = bcm_phy_get_strings,
.get_stats = bcm54xx_get_stats,
.probe = bcm54xx_phy_probe,
- .config_init = bcm54811_config_init,
- .config_aneg = bcm5481_config_aneg,
+ .config_init = bcm54xx_config_init,
+ .config_aneg = bcm54811_config_aneg,
.config_intr = bcm_phy_config_intr,
.handle_interrupt = bcm_phy_handle_interrupt,
+ .read_status = bcm54811_read_status,
+ .get_features = bcm5481x_read_abilities,
.suspend = bcm54xx_suspend,
.resume = bcm54xx_resume,
.link_change_notify = bcm54xx_link_change_notify,
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index 5c42c47dc564..075d2beea716 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -1395,7 +1395,7 @@ static void dp83640_txtstamp(struct mii_timestamper *mii_ts,
}
static int dp83640_ts_info(struct mii_timestamper *mii_ts,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct dp83640_private *dp83640 =
container_of(mii_ts, struct dp83640_private, mii_ts);
@@ -1447,6 +1447,8 @@ static int dp83640_probe(struct phy_device *phydev)
for (i = 0; i < MAX_RXTS; i++)
list_add(&dp83640->rx_pool_data[i].list, &dp83640->rxpool);
+ /* Timestamp selected by default to keep legacy API */
+ phydev->default_timestamp = true;
phydev->mii_ts = &dp83640->mii_ts;
phydev->priv = dp83640;
diff --git a/drivers/net/phy/dp83td510.c b/drivers/net/phy/dp83td510.c
index d7616b13c594..551e37786c2d 100644
--- a/drivers/net/phy/dp83td510.c
+++ b/drivers/net/phy/dp83td510.c
@@ -4,6 +4,7 @@
*/
#include <linux/bitfield.h>
+#include <linux/ethtool_netlink.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/phy.h>
@@ -29,6 +30,10 @@
#define DP83TD510E_INT1_LINK BIT(13)
#define DP83TD510E_INT1_LINK_EN BIT(5)
+#define DP83TD510E_CTRL 0x1f
+#define DP83TD510E_CTRL_HW_RESET BIT(15)
+#define DP83TD510E_CTRL_SW_RESET BIT(14)
+
#define DP83TD510E_AN_STAT_1 0x60c
#define DP83TD510E_MASTER_SLAVE_RESOL_FAIL BIT(15)
@@ -53,6 +58,117 @@ static const u16 dp83td510_mse_sqi_map[] = {
0x0000 /* 24dB =< SNR */
};
+/* Time Domain Reflectometry (TDR) Functionality of DP83TD510 PHY
+ *
+ * I assume that this PHY is using a variation of Spread Spectrum Time Domain
+ * Reflectometry (SSTDR) rather than the commonly used TDR found in many PHYs.
+ * Here are the following observations which likely confirm this:
+ * - The DP83TD510 PHY transmits a modulated signal of configurable length
+ * (default 16000 µs) instead of a single pulse pattern, which is typical
+ * for traditional TDR.
+ * - The pulse observed on the wire, triggered by the HW RESET register, is not
+ * part of the cable testing process.
+ *
+ * I assume that SSTDR seems to be a logical choice for the 10BaseT1L
+ * environment due to improved noise resistance, making it suitable for
+ * environments with significant electrical noise, such as long 10BaseT1L cable
+ * runs.
+ *
+ * Configuration Variables:
+ * The SSTDR variation used in this PHY involves more configuration variables
+ * that can dramatically affect the functionality and precision of cable
+ * testing. Since most of these configuration options are either not well
+ * documented or documented with minimal details, the following sections
+ * describe my understanding and observations of these variables and their
+ * impact on TDR functionality.
+ *
+ * Timeline:
+ * ,<--cfg_pre_silence_time
+ * | ,<-SSTDR Modulated Transmission
+ * | | ,<--cfg_post_silence_time
+ * | | | ,<--Force Link Mode
+ * |<--'-->|<-------'------->|<--'-->|<--------'------->|
+ *
+ * - cfg_pre_silence_time: Optional silence time before TDR transmission starts.
+ * - SSTDR Modulated Transmission: Transmission duration configured by
+ * cfg_tdr_tx_duration and amplitude configured by cfg_tdr_tx_type.
+ * - cfg_post_silence_time: Silence time after TDR transmission.
+ * - Force Link Mode: If nothing is configured after cfg_post_silence_time,
+ * the PHY continues in force link mode without autonegotiation.
+ */
+
+#define DP83TD510E_TDR_CFG 0x1e
+#define DP83TD510E_TDR_START BIT(15)
+#define DP83TD510E_TDR_DONE BIT(1)
+#define DP83TD510E_TDR_FAIL BIT(0)
+
+#define DP83TD510E_TDR_CFG1 0x300
+/* cfg_tdr_tx_type: Transmit voltage level for TDR.
+ * 0 = 1V, 1 = 2.4V
+ * Note: Using different voltage levels may not work
+ * in all configuration variations. For example, setting
+ * 2.4V may give different cable length measurements.
+ * Other settings may be needed to make it work properly.
+ */
+#define DP83TD510E_TDR_TX_TYPE BIT(12)
+#define DP83TD510E_TDR_TX_TYPE_1V 0
+#define DP83TD510E_TDR_TX_TYPE_2_4V 1
+/* cfg_post_silence_time: Time after the TDR sequence. Since we force master mode
+ * for the TDR will proceed with forced link state after this time. For Linux
+ * it is better to set max value to avoid false link state detection.
+ */
+#define DP83TD510E_TDR_CFG1_POST_SILENCE_TIME GENMASK(3, 2)
+#define DP83TD510E_TDR_CFG1_POST_SILENCE_TIME_0MS 0
+#define DP83TD510E_TDR_CFG1_POST_SILENCE_TIME_10MS 1
+#define DP83TD510E_TDR_CFG1_POST_SILENCE_TIME_100MS 2
+#define DP83TD510E_TDR_CFG1_POST_SILENCE_TIME_1000MS 3
+/* cfg_pre_silence_time: Time before the TDR sequence. It should be enough to
+ * settle down all pulses and reflections. Since for 10BASE-T1L we have
+ * maximum 2000m cable length, we can set it to 1ms.
+ */
+#define DP83TD510E_TDR_CFG1_PRE_SILENCE_TIME GENMASK(1, 0)
+#define DP83TD510E_TDR_CFG1_PRE_SILENCE_TIME_0MS 0
+#define DP83TD510E_TDR_CFG1_PRE_SILENCE_TIME_10MS 1
+#define DP83TD510E_TDR_CFG1_PRE_SILENCE_TIME_100MS 2
+#define DP83TD510E_TDR_CFG1_PRE_SILENCE_TIME_1000MS 3
+
+#define DP83TD510E_TDR_CFG2 0x301
+#define DP83TD510E_TDR_END_TAP_INDEX_1 GENMASK(14, 8)
+#define DP83TD510E_TDR_END_TAP_INDEX_1_DEF 36
+#define DP83TD510E_TDR_START_TAP_INDEX_1 GENMASK(6, 0)
+#define DP83TD510E_TDR_START_TAP_INDEX_1_DEF 4
+
+#define DP83TD510E_TDR_CFG3 0x302
+/* cfg_tdr_tx_duration: Duration of the TDR transmission in microseconds.
+ * This value sets the duration of the modulated signal used for TDR
+ * measurements.
+ * - Default: 16000 µs
+ * - Observation: A minimum duration of 6000 µs is recommended to ensure
+ * accurate detection of cable faults. Durations shorter than 6000 µs may
+ * result in incomplete data, especially for shorter cables (e.g., 20 meters),
+ * leading to false "OK" results. Longer durations (e.g., 6000 µs or more)
+ * provide better accuracy, particularly for detecting open circuits.
+ */
+#define DP83TD510E_TDR_TX_DURATION_US GENMASK(15, 0)
+#define DP83TD510E_TDR_TX_DURATION_US_DEF 16000
+
+#define DP83TD510E_TDR_FAULT_CFG1 0x303
+#define DP83TD510E_TDR_FLT_LOC_OFFSET_1 GENMASK(14, 8)
+#define DP83TD510E_TDR_FLT_LOC_OFFSET_1_DEF 4
+#define DP83TD510E_TDR_FLT_INIT_1 GENMASK(7, 0)
+#define DP83TD510E_TDR_FLT_INIT_1_DEF 62
+
+#define DP83TD510E_TDR_FAULT_STAT 0x30c
+#define DP83TD510E_TDR_PEAK_DETECT BIT(11)
+#define DP83TD510E_TDR_PEAK_SIGN BIT(10)
+#define DP83TD510E_TDR_PEAK_LOCATION GENMASK(9, 0)
+
+/* Not documented registers and values but recommended according to
+ * "DP83TD510E Cable Diagnostics Toolkit revC"
+ */
+#define DP83TD510E_UNKN_030E 0x30e
+#define DP83TD510E_030E_VAL 0x2520
+
static int dp83td510_config_intr(struct phy_device *phydev)
{
int ret;
@@ -198,6 +314,151 @@ static int dp83td510_get_sqi_max(struct phy_device *phydev)
return DP83TD510_SQI_MAX;
}
+/**
+ * dp83td510_cable_test_start - Start the cable test for the DP83TD510 PHY.
+ * @phydev: Pointer to the phy_device structure.
+ *
+ * This sequence is implemented according to the "Application Note DP83TD510E
+ * Cable Diagnostics Toolkit revC".
+ *
+ * Returns: 0 on success, a negative error code on failure.
+ */
+static int dp83td510_cable_test_start(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_CTRL,
+ DP83TD510E_CTRL_HW_RESET);
+ if (ret)
+ return ret;
+
+ ret = genphy_c45_an_disable_aneg(phydev);
+ if (ret)
+ return ret;
+
+ /* Force master mode */
+ ret = phy_set_bits_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_PMD_BT1_CTRL,
+ MDIO_PMA_PMD_BT1_CTRL_CFG_MST);
+ if (ret)
+ return ret;
+
+ /* There is no official recommendation for this register, but it is
+ * better to use 1V for TDR since other values seems to be optimized
+ * for this amplitude. Except of amplitude, it is better to configure
+ * pre TDR silence time to 10ms to avoid false reflections (value 0
+ * seems to be too short, otherwise we need to implement own silence
+ * time). Also, post TDR silence time should be set to 1000ms to avoid
+ * false link state detection, it fits to the polling time of the
+ * PHY framework. The idea is to wait until
+ * dp83td510_cable_test_get_status() will be called and reconfigure
+ * the PHY to the default state within the post silence time window.
+ */
+ ret = phy_modify_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_TDR_CFG1,
+ DP83TD510E_TDR_TX_TYPE |
+ DP83TD510E_TDR_CFG1_POST_SILENCE_TIME |
+ DP83TD510E_TDR_CFG1_PRE_SILENCE_TIME,
+ DP83TD510E_TDR_TX_TYPE_1V |
+ DP83TD510E_TDR_CFG1_PRE_SILENCE_TIME_10MS |
+ DP83TD510E_TDR_CFG1_POST_SILENCE_TIME_1000MS);
+ if (ret)
+ return ret;
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_TDR_CFG2,
+ FIELD_PREP(DP83TD510E_TDR_END_TAP_INDEX_1,
+ DP83TD510E_TDR_END_TAP_INDEX_1_DEF) |
+ FIELD_PREP(DP83TD510E_TDR_START_TAP_INDEX_1,
+ DP83TD510E_TDR_START_TAP_INDEX_1_DEF));
+ if (ret)
+ return ret;
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_TDR_FAULT_CFG1,
+ FIELD_PREP(DP83TD510E_TDR_FLT_LOC_OFFSET_1,
+ DP83TD510E_TDR_FLT_LOC_OFFSET_1_DEF) |
+ FIELD_PREP(DP83TD510E_TDR_FLT_INIT_1,
+ DP83TD510E_TDR_FLT_INIT_1_DEF));
+ if (ret)
+ return ret;
+
+ /* Undocumented register, from the "Application Note DP83TD510E Cable
+ * Diagnostics Toolkit revC".
+ */
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_UNKN_030E,
+ DP83TD510E_030E_VAL);
+ if (ret)
+ return ret;
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_TDR_CFG3,
+ DP83TD510E_TDR_TX_DURATION_US_DEF);
+ if (ret)
+ return ret;
+
+ ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_CTRL,
+ DP83TD510E_CTRL_SW_RESET);
+ if (ret)
+ return ret;
+
+ return phy_set_bits_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_TDR_CFG,
+ DP83TD510E_TDR_START);
+}
+
+/**
+ * dp83td510_cable_test_get_status - Get the status of the cable test for the
+ * DP83TD510 PHY.
+ * @phydev: Pointer to the phy_device structure.
+ * @finished: Pointer to a boolean that indicates whether the test is finished.
+ *
+ * The function sets the @finished flag to true if the test is complete.
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+static int dp83td510_cable_test_get_status(struct phy_device *phydev,
+ bool *finished)
+{
+ int ret, stat;
+
+ *finished = false;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_TDR_CFG);
+ if (ret < 0)
+ return ret;
+
+ if (!(ret & DP83TD510E_TDR_DONE))
+ return 0;
+
+ if (!(ret & DP83TD510E_TDR_FAIL)) {
+ int location;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2,
+ DP83TD510E_TDR_FAULT_STAT);
+ if (ret < 0)
+ return ret;
+
+ if (ret & DP83TD510E_TDR_PEAK_DETECT) {
+ if (ret & DP83TD510E_TDR_PEAK_SIGN)
+ stat = ETHTOOL_A_CABLE_RESULT_CODE_OPEN;
+ else
+ stat = ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT;
+
+ location = FIELD_GET(DP83TD510E_TDR_PEAK_LOCATION,
+ ret) * 100;
+ ethnl_cable_test_fault_length(phydev,
+ ETHTOOL_A_CABLE_PAIR_A,
+ location);
+ } else {
+ stat = ETHTOOL_A_CABLE_RESULT_CODE_OK;
+ }
+ } else {
+ /* Most probably we have active link partner */
+ stat = ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC;
+ }
+
+ *finished = true;
+
+ ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A, stat);
+
+ return phy_init_hw(phydev);
+}
+
static int dp83td510_get_features(struct phy_device *phydev)
{
/* This PHY can't respond on MDIO bus if no RMII clock is enabled.
@@ -221,6 +482,7 @@ static struct phy_driver dp83td510_driver[] = {
PHY_ID_MATCH_MODEL(DP83TD510E_PHY_ID),
.name = "TI DP83TD510E",
+ .flags = PHY_POLL_CABLE_TEST,
.config_aneg = dp83td510_config_aneg,
.read_status = dp83td510_read_status,
.get_features = dp83td510_get_features,
@@ -228,6 +490,8 @@ static struct phy_driver dp83td510_driver[] = {
.handle_interrupt = dp83td510_handle_interrupt,
.get_sqi = dp83td510_get_sqi,
.get_sqi_max = dp83td510_get_sqi_max,
+ .cable_test_start = dp83td510_cable_test_start,
+ .cable_test_get_status = dp83td510_cable_test_get_status,
.suspend = genphy_suspend,
.resume = genphy_resume,
diff --git a/drivers/net/phy/dp83tg720.c b/drivers/net/phy/dp83tg720.c
index 326c9770a6dc..c706429b225a 100644
--- a/drivers/net/phy/dp83tg720.c
+++ b/drivers/net/phy/dp83tg720.c
@@ -17,6 +17,11 @@
#define DP83TG720S_PHY_RESET 0x1f
#define DP83TG720S_HW_RESET BIT(15)
+#define DP83TG720S_LPS_CFG3 0x18c
+/* Power modes are documented as bit fields but used as values */
+/* Power Mode 0 is Normal mode */
+#define DP83TG720S_LPS_CFG3_PWR_MODE_0 BIT(0)
+
#define DP83TG720S_RGMII_DELAY_CTRL 0x602
/* In RGMII mode, Enable or disable the internal delay for RXD */
#define DP83TG720S_RGMII_RX_CLK_SEL BIT(1)
@@ -31,11 +36,20 @@
static int dp83tg720_config_aneg(struct phy_device *phydev)
{
+ int ret;
+
/* Autoneg is not supported and this PHY supports only one speed.
* We need to care only about master/slave configuration if it was
* changed by user.
*/
- return genphy_c45_pma_baset1_setup_master_slave(phydev);
+ ret = genphy_c45_pma_baset1_setup_master_slave(phydev);
+ if (ret)
+ return ret;
+
+ /* Re-read role configuration to make changes visible even if
+ * the link is in administrative down state.
+ */
+ return genphy_c45_pma_baset1_read_master_slave(phydev);
}
static int dp83tg720_read_status(struct phy_device *phydev)
@@ -64,6 +78,8 @@ static int dp83tg720_read_status(struct phy_device *phydev)
return ret;
/* After HW reset we need to restore master/slave configuration.
+ * genphy_c45_pma_baset1_read_master_slave() call will be done
+ * by the dp83tg720_config_aneg() function.
*/
ret = dp83tg720_config_aneg(phydev);
if (ret)
@@ -154,10 +170,24 @@ static int dp83tg720_config_init(struct phy_device *phydev)
*/
usleep_range(1000, 2000);
- if (phy_interface_is_rgmii(phydev))
- return dp83tg720_config_rgmii_delay(phydev);
+ if (phy_interface_is_rgmii(phydev)) {
+ ret = dp83tg720_config_rgmii_delay(phydev);
+ if (ret)
+ return ret;
+ }
+
+ /* In case the PHY is bootstrapped in managed mode, we need to
+ * wake it.
+ */
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, DP83TG720S_LPS_CFG3,
+ DP83TG720S_LPS_CFG3_PWR_MODE_0);
+ if (ret)
+ return ret;
- return 0;
+ /* Make role configuration visible for ethtool on init and after
+ * rest.
+ */
+ return genphy_c45_pma_baset1_read_master_slave(phydev);
}
static struct phy_driver dp83tg720_driver[] = {
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 13e30ea7eec5..dd519805deee 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -866,6 +866,17 @@ static int ksz8061_config_init(struct phy_device *phydev)
{
int ret;
+ /* Chip can be powered down by the bootstrap code. */
+ ret = phy_read(phydev, MII_BMCR);
+ if (ret < 0)
+ return ret;
+ if (ret & BMCR_PDOWN) {
+ ret = phy_write(phydev, MII_BMCR, ret & ~BMCR_PDOWN);
+ if (ret < 0)
+ return ret;
+ usleep_range(1000, 2000);
+ }
+
ret = phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_DEVID1, 0xB61A);
if (ret)
return ret;
@@ -1939,7 +1950,7 @@ static const struct ksz9477_errata_write ksz9477_errata_writes[] = {
{0x1c, 0x20, 0xeeee},
};
-static int ksz9477_config_init(struct phy_device *phydev)
+static int ksz9477_phy_errata(struct phy_device *phydev)
{
int err;
int i;
@@ -1967,16 +1978,30 @@ static int ksz9477_config_init(struct phy_device *phydev)
return err;
}
+ err = genphy_restart_aneg(phydev);
+ if (err)
+ return err;
+
+ return err;
+}
+
+static int ksz9477_config_init(struct phy_device *phydev)
+{
+ int err;
+
+ /* Only KSZ9897 family of switches needs this fix. */
+ if ((phydev->phy_id & 0xf) == 1) {
+ err = ksz9477_phy_errata(phydev);
+ if (err)
+ return err;
+ }
+
/* According to KSZ9477 Errata DS80000754C (Module 4) all EEE modes
* in this switch shall be regarded as broken.
*/
if (phydev->dev_flags & MICREL_NO_EEE)
phydev->eee_broken_modes = -1;
- err = genphy_restart_aneg(phydev);
- if (err)
- return err;
-
return kszphy_config_init(phydev);
}
@@ -2085,6 +2110,71 @@ static int kszphy_resume(struct phy_device *phydev)
return 0;
}
+static int ksz9477_resume(struct phy_device *phydev)
+{
+ int ret;
+
+ /* No need to initialize registers if not powered down. */
+ ret = phy_read(phydev, MII_BMCR);
+ if (ret < 0)
+ return ret;
+ if (!(ret & BMCR_PDOWN))
+ return 0;
+
+ genphy_resume(phydev);
+
+ /* After switching from power-down to normal mode, an internal global
+ * reset is automatically generated. Wait a minimum of 1 ms before
+ * read/write access to the PHY registers.
+ */
+ usleep_range(1000, 2000);
+
+ /* Only KSZ9897 family of switches needs this fix. */
+ if ((phydev->phy_id & 0xf) == 1) {
+ ret = ksz9477_phy_errata(phydev);
+ if (ret)
+ return ret;
+ }
+
+ /* Enable PHY Interrupts */
+ if (phy_interrupt_is_valid(phydev)) {
+ phydev->interrupts = PHY_INTERRUPT_ENABLED;
+ if (phydev->drv->config_intr)
+ phydev->drv->config_intr(phydev);
+ }
+
+ return 0;
+}
+
+static int ksz8061_resume(struct phy_device *phydev)
+{
+ int ret;
+
+ /* This function can be called twice when the Ethernet device is on. */
+ ret = phy_read(phydev, MII_BMCR);
+ if (ret < 0)
+ return ret;
+ if (!(ret & BMCR_PDOWN))
+ return 0;
+
+ genphy_resume(phydev);
+ usleep_range(1000, 2000);
+
+ /* Re-program the value after chip is reset. */
+ ret = phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_DEVID1, 0xB61A);
+ if (ret)
+ return ret;
+
+ /* Enable PHY Interrupts */
+ if (phy_interrupt_is_valid(phydev)) {
+ phydev->interrupts = PHY_INTERRUPT_ENABLED;
+ if (phydev->drv->config_intr)
+ phydev->drv->config_intr(phydev);
+ }
+
+ return 0;
+}
+
static int kszphy_probe(struct phy_device *phydev)
{
const struct kszphy_type *type = phydev->drv->driver_data;
@@ -2462,7 +2552,7 @@ static void lan8814_ptp_tx_ts_get(struct phy_device *phydev,
*seq_id = lanphy_read_page_reg(phydev, 5, PTP_TX_MSG_HEADER2);
}
-static int lan8814_ts_info(struct mii_timestamper *mii_ts, struct ethtool_ts_info *info)
+static int lan8814_ts_info(struct mii_timestamper *mii_ts, struct kernel_ethtool_ts_info *info)
{
struct kszphy_ptp_priv *ptp_priv = container_of(mii_ts, struct kszphy_ptp_priv, mii_ts);
struct phy_device *phydev = ptp_priv->phydev;
@@ -3691,6 +3781,9 @@ static void lan8814_ptp_init(struct phy_device *phydev)
ptp_priv->mii_ts.ts_info = lan8814_ts_info;
phydev->mii_ts = &ptp_priv->mii_ts;
+
+ /* Timestamp selected by default to keep legacy API */
+ phydev->default_timestamp = true;
}
static int lan8814_ptp_probe_once(struct phy_device *phydev)
@@ -4029,7 +4122,7 @@ static int lan8841_config_intr(struct phy_device *phydev)
if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
err = phy_read(phydev, LAN8814_INTS);
- if (err)
+ if (err < 0)
return err;
/* Enable / disable interrupts. It is OK to enable PTP interrupt
@@ -4045,6 +4138,14 @@ static int lan8841_config_intr(struct phy_device *phydev)
return err;
err = phy_read(phydev, LAN8814_INTS);
+ if (err < 0)
+ return err;
+
+ /* Getting a positive value doesn't mean that is an error, it
+ * just indicates what was the status. Therefore make sure to
+ * clear the value and say that there is no error.
+ */
+ err = 0;
}
return err;
@@ -4216,7 +4317,7 @@ static irqreturn_t lan8841_handle_interrupt(struct phy_device *phydev)
}
static int lan8841_ts_info(struct mii_timestamper *mii_ts,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct kszphy_ptp_priv *ptp_priv;
@@ -5181,6 +5282,9 @@ static int lan8841_probe(struct phy_device *phydev)
phydev->mii_ts = &ptp_priv->mii_ts;
+ /* Timestamp selected by default to keep legacy API */
+ phydev->default_timestamp = true;
+
return 0;
}
@@ -5327,10 +5431,11 @@ static struct phy_driver ksphy_driver[] = {
/* PHY_BASIC_FEATURES */
.probe = kszphy_probe,
.config_init = ksz8061_config_init,
+ .soft_reset = genphy_soft_reset,
.config_intr = kszphy_config_intr,
.handle_interrupt = kszphy_handle_interrupt,
.suspend = kszphy_suspend,
- .resume = kszphy_resume,
+ .resume = ksz8061_resume,
}, {
.phy_id = PHY_ID_KSZ9021,
.phy_id_mask = 0x000ffffe,
@@ -5484,7 +5589,7 @@ static struct phy_driver ksphy_driver[] = {
.config_intr = kszphy_config_intr,
.handle_interrupt = kszphy_handle_interrupt,
.suspend = genphy_suspend,
- .resume = genphy_resume,
+ .resume = ksz9477_resume,
.get_features = ksz9477_get_features,
} };
@@ -5508,6 +5613,7 @@ static struct mdio_device_id __maybe_unused micrel_tbl[] = {
{ PHY_ID_KSZ8081, MICREL_PHY_ID_MASK },
{ PHY_ID_KSZ8873MLL, MICREL_PHY_ID_MASK },
{ PHY_ID_KSZ886X, MICREL_PHY_ID_MASK },
+ { PHY_ID_KSZ9477, MICREL_PHY_ID_MASK },
{ PHY_ID_LAN8814, MICREL_PHY_ID_MASK },
{ PHY_ID_LAN8804, MICREL_PHY_ID_MASK },
{ PHY_ID_LAN8841, MICREL_PHY_ID_MASK },
diff --git a/drivers/net/phy/microchip.c b/drivers/net/phy/microchip.c
index 0b88635f4fbc..d3273bc0da4a 100644
--- a/drivers/net/phy/microchip.c
+++ b/drivers/net/phy/microchip.c
@@ -12,8 +12,14 @@
#include <linux/of.h>
#include <dt-bindings/net/microchip-lan78xx.h>
+#define PHY_ID_LAN937X_TX 0x0007c190
+
+#define LAN937X_MODE_CTRL_STATUS_REG 0x11
+#define LAN937X_AUTOMDIX_EN BIT(7)
+#define LAN937X_MDI_MODE BIT(6)
+
#define DRIVER_AUTHOR "WOOJUNG HUH <woojung.huh@microchip.com>"
-#define DRIVER_DESC "Microchip LAN88XX PHY driver"
+#define DRIVER_DESC "Microchip LAN88XX/LAN937X TX PHY driver"
struct lan88xx_priv {
int chip_id;
@@ -373,6 +379,115 @@ static void lan88xx_link_change_notify(struct phy_device *phydev)
}
}
+/**
+ * lan937x_tx_read_mdix_status - Read the MDIX status for the LAN937x TX PHY.
+ * @phydev: Pointer to the phy_device structure.
+ *
+ * This function reads the MDIX status of the LAN937x TX PHY and sets the
+ * mdix_ctrl and mdix fields of the phy_device structure accordingly.
+ * Note that MDIX status is not supported in AUTO mode, and will be set
+ * to invalid in such cases.
+ *
+ * Return: 0 on success, a negative error code on failure.
+ */
+static int lan937x_tx_read_mdix_status(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = phy_read(phydev, LAN937X_MODE_CTRL_STATUS_REG);
+ if (ret < 0)
+ return ret;
+
+ if (ret & LAN937X_AUTOMDIX_EN) {
+ phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
+ /* MDI/MDIX status is unknown */
+ phydev->mdix = ETH_TP_MDI_INVALID;
+ } else if (ret & LAN937X_MDI_MODE) {
+ phydev->mdix_ctrl = ETH_TP_MDI_X;
+ phydev->mdix = ETH_TP_MDI_X;
+ } else {
+ phydev->mdix_ctrl = ETH_TP_MDI;
+ phydev->mdix = ETH_TP_MDI;
+ }
+
+ return 0;
+}
+
+/**
+ * lan937x_tx_read_status - Read the status for the LAN937x TX PHY.
+ * @phydev: Pointer to the phy_device structure.
+ *
+ * This function reads the status of the LAN937x TX PHY and updates the
+ * phy_device structure accordingly.
+ *
+ * Return: 0 on success, a negative error code on failure.
+ */
+static int lan937x_tx_read_status(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = genphy_read_status(phydev);
+ if (ret < 0)
+ return ret;
+
+ return lan937x_tx_read_mdix_status(phydev);
+}
+
+/**
+ * lan937x_tx_set_mdix - Set the MDIX mode for the LAN937x TX PHY.
+ * @phydev: Pointer to the phy_device structure.
+ *
+ * This function configures the MDIX mode of the LAN937x TX PHY based on the
+ * mdix_ctrl field of the phy_device structure. The MDIX mode can be set to
+ * MDI (straight-through), MDIX (crossover), or AUTO (auto-MDIX). If the mode
+ * is not recognized, it returns 0 without making any changes.
+ *
+ * Return: 0 on success, a negative error code on failure.
+ */
+static int lan937x_tx_set_mdix(struct phy_device *phydev)
+{
+ u16 val;
+
+ switch (phydev->mdix_ctrl) {
+ case ETH_TP_MDI:
+ val = 0;
+ break;
+ case ETH_TP_MDI_X:
+ val = LAN937X_MDI_MODE;
+ break;
+ case ETH_TP_MDI_AUTO:
+ val = LAN937X_AUTOMDIX_EN;
+ break;
+ default:
+ return 0;
+ }
+
+ return phy_modify(phydev, LAN937X_MODE_CTRL_STATUS_REG,
+ LAN937X_AUTOMDIX_EN | LAN937X_MDI_MODE, val);
+}
+
+/**
+ * lan937x_tx_config_aneg - Configure auto-negotiation and fixed modes for the
+ * LAN937x TX PHY.
+ * @phydev: Pointer to the phy_device structure.
+ *
+ * This function configures the MDIX mode for the LAN937x TX PHY and then
+ * proceeds to configure the auto-negotiation or fixed mode settings
+ * based on the phy_device structure.
+ *
+ * Return: 0 on success, a negative error code on failure.
+ */
+static int lan937x_tx_config_aneg(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = lan937x_tx_set_mdix(phydev);
+ if (ret < 0)
+ return ret;
+
+ return genphy_config_aneg(phydev);
+}
+
static struct phy_driver microchip_phy_driver[] = {
{
.phy_id = 0x0007c132,
@@ -400,12 +515,21 @@ static struct phy_driver microchip_phy_driver[] = {
.set_wol = lan88xx_set_wol,
.read_page = lan88xx_read_page,
.write_page = lan88xx_write_page,
+},
+{
+ PHY_ID_MATCH_MODEL(PHY_ID_LAN937X_TX),
+ .name = "Microchip LAN937x TX",
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .config_aneg = lan937x_tx_config_aneg,
+ .read_status = lan937x_tx_read_status,
} };
module_phy_driver(microchip_phy_driver);
static struct mdio_device_id __maybe_unused microchip_tbl[] = {
{ 0x0007c132, 0xfffffff2 },
+ { PHY_ID_MATCH_MODEL(PHY_ID_LAN937X_TX) },
{ }
};
diff --git a/drivers/net/phy/microchip_t1.c b/drivers/net/phy/microchip_t1.c
index a838b61cd844..a35528497a57 100644
--- a/drivers/net/phy/microchip_t1.c
+++ b/drivers/net/phy/microchip_t1.c
@@ -748,7 +748,7 @@ static int lan87xx_cable_test_report(struct phy_device *phydev)
ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
lan87xx_cable_test_report_trans(detect));
- return 0;
+ return phy_init_hw(phydev);
}
static int lan87xx_cable_test_get_status(struct phy_device *phydev,
diff --git a/drivers/net/phy/mscc/mscc_ptp.c b/drivers/net/phy/mscc/mscc_ptp.c
index eb0b032cb613..c1ddae36a2ae 100644
--- a/drivers/net/phy/mscc/mscc_ptp.c
+++ b/drivers/net/phy/mscc/mscc_ptp.c
@@ -1134,7 +1134,7 @@ static int vsc85xx_hwtstamp(struct mii_timestamper *mii_ts,
}
static int vsc85xx_ts_info(struct mii_timestamper *mii_ts,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct vsc8531_private *vsc8531 =
container_of(mii_ts, struct vsc8531_private, mii_ts);
@@ -1570,6 +1570,9 @@ int vsc8584_ptp_probe(struct phy_device *phydev)
return PTR_ERR(vsc8531->load_save);
}
+ /* Timestamp selected by default to keep legacy API */
+ phydev->default_timestamp = true;
+
vsc8531->ptp->phydev = phydev;
return 0;
diff --git a/drivers/net/phy/mxl-gpy.c b/drivers/net/phy/mxl-gpy.c
index b2d36a3a96f1..e5f8ac4b4604 100644
--- a/drivers/net/phy/mxl-gpy.c
+++ b/drivers/net/phy/mxl-gpy.c
@@ -107,6 +107,7 @@ struct gpy_priv {
u8 fw_major;
u8 fw_minor;
+ u32 wolopts;
/* It takes 3 seconds to fully switch out of loopback mode before
* it can safely re-enter loopback mode. Record the time when
@@ -221,6 +222,15 @@ static int gpy_hwmon_register(struct phy_device *phydev)
}
#endif
+static int gpy_ack_interrupt(struct phy_device *phydev)
+{
+ int ret;
+
+ /* Clear all pending interrupts */
+ ret = phy_read(phydev, PHY_ISTAT);
+ return ret < 0 ? ret : 0;
+}
+
static int gpy_mbox_read(struct phy_device *phydev, u32 addr)
{
struct gpy_priv *priv = phydev->priv;
@@ -262,16 +272,8 @@ out:
static int gpy_config_init(struct phy_device *phydev)
{
- int ret;
-
- /* Mask all interrupts */
- ret = phy_write(phydev, PHY_IMASK, 0);
- if (ret)
- return ret;
-
- /* Clear all pending interrupts */
- ret = phy_read(phydev, PHY_ISTAT);
- return ret < 0 ? ret : 0;
+ /* Nothing to configure. Configuration Requirement Placeholder */
+ return 0;
}
static int gpy21x_config_init(struct phy_device *phydev)
@@ -627,11 +629,23 @@ static int gpy_read_status(struct phy_device *phydev)
static int gpy_config_intr(struct phy_device *phydev)
{
+ struct gpy_priv *priv = phydev->priv;
u16 mask = 0;
+ int ret;
+
+ ret = gpy_ack_interrupt(phydev);
+ if (ret)
+ return ret;
if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
mask = PHY_IMASK_MASK;
+ if (priv->wolopts & WAKE_MAGIC)
+ mask |= PHY_IMASK_WOL;
+
+ if (priv->wolopts & WAKE_PHY)
+ mask |= PHY_IMASK_LSTC;
+
return phy_write(phydev, PHY_IMASK, mask);
}
@@ -678,6 +692,7 @@ static int gpy_set_wol(struct phy_device *phydev,
struct ethtool_wolinfo *wol)
{
struct net_device *attach_dev = phydev->attached_dev;
+ struct gpy_priv *priv = phydev->priv;
int ret;
if (wol->wolopts & WAKE_MAGIC) {
@@ -725,6 +740,8 @@ static int gpy_set_wol(struct phy_device *phydev,
ret = phy_read(phydev, PHY_ISTAT);
if (ret < 0)
return ret;
+
+ priv->wolopts |= WAKE_MAGIC;
} else {
/* Disable magic packet matching */
ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND2,
@@ -732,6 +749,13 @@ static int gpy_set_wol(struct phy_device *phydev,
WOL_EN);
if (ret < 0)
return ret;
+
+ /* Disable the WOL interrupt */
+ ret = phy_clear_bits(phydev, PHY_IMASK, PHY_IMASK_WOL);
+ if (ret < 0)
+ return ret;
+
+ priv->wolopts &= ~WAKE_MAGIC;
}
if (wol->wolopts & WAKE_PHY) {
@@ -748,9 +772,11 @@ static int gpy_set_wol(struct phy_device *phydev,
if (ret & (PHY_IMASK_MASK & ~PHY_IMASK_LSTC))
phy_trigger_machine(phydev);
+ priv->wolopts |= WAKE_PHY;
return 0;
}
+ priv->wolopts &= ~WAKE_PHY;
/* Disable the link state change interrupt */
return phy_clear_bits(phydev, PHY_IMASK, PHY_IMASK_LSTC);
}
@@ -758,18 +784,10 @@ static int gpy_set_wol(struct phy_device *phydev,
static void gpy_get_wol(struct phy_device *phydev,
struct ethtool_wolinfo *wol)
{
- int ret;
+ struct gpy_priv *priv = phydev->priv;
wol->supported = WAKE_MAGIC | WAKE_PHY;
- wol->wolopts = 0;
-
- ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, VPSPEC2_WOL_CTL);
- if (ret & WOL_EN)
- wol->wolopts |= WAKE_MAGIC;
-
- ret = phy_read(phydev, PHY_IMASK);
- if (ret & PHY_IMASK_LSTC)
- wol->wolopts |= WAKE_PHY;
+ wol->wolopts = priv->wolopts;
}
static int gpy_loopback(struct phy_device *phydev, bool enable)
diff --git a/drivers/net/phy/nxp-c45-tja11xx.c b/drivers/net/phy/nxp-c45-tja11xx.c
index 3cf614b4cd52..5af5ade4fc64 100644
--- a/drivers/net/phy/nxp-c45-tja11xx.c
+++ b/drivers/net/phy/nxp-c45-tja11xx.c
@@ -1058,7 +1058,7 @@ nxp_c45_no_ptp_irq:
}
static int nxp_c45_ts_info(struct mii_timestamper *mii_ts,
- struct ethtool_ts_info *ts_info)
+ struct kernel_ethtool_ts_info *ts_info)
{
struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
mii_ts);
@@ -1660,6 +1660,9 @@ static int nxp_c45_probe(struct phy_device *phydev)
priv->mii_ts.ts_info = nxp_c45_ts_info;
phydev->mii_ts = &priv->mii_ts;
ret = nxp_c45_init_ptp_clock(priv);
+
+ /* Timestamp selected by default to keep legacy API */
+ phydev->default_timestamp = true;
} else {
phydev_dbg(phydev, "PTP support not enabled even if the phy supports it");
}
diff --git a/drivers/net/phy/phy-core.c b/drivers/net/phy/phy-core.c
index 15f349e5995a..1f98b6a96c15 100644
--- a/drivers/net/phy/phy-core.c
+++ b/drivers/net/phy/phy-core.c
@@ -13,7 +13,7 @@
*/
const char *phy_speed_to_str(int speed)
{
- BUILD_BUG_ON_MSG(__ETHTOOL_LINK_MODE_MASK_NBITS != 102,
+ BUILD_BUG_ON_MSG(__ETHTOOL_LINK_MODE_MASK_NBITS != 103,
"Enum ethtool_link_mode_bit_indices and phylib are out of sync. "
"If a speed or mode has been added please update phy_speed_to_str "
"and the PHY settings array.\n");
@@ -141,6 +141,7 @@ int phy_interface_num_ports(phy_interface_t interface)
return 1;
case PHY_INTERFACE_MODE_QSGMII:
case PHY_INTERFACE_MODE_QUSGMII:
+ case PHY_INTERFACE_MODE_10G_QXGMII:
return 4;
case PHY_INTERFACE_MODE_PSGMII:
return 5;
@@ -265,6 +266,7 @@ static const struct phy_setting settings[] = {
PHY_SETTING( 10, FULL, 10baseT1S_Full ),
PHY_SETTING( 10, HALF, 10baseT1S_Half ),
PHY_SETTING( 10, HALF, 10baseT1S_P2MP_Half ),
+ PHY_SETTING( 10, FULL, 10baseT1BRR_Full ),
};
#undef PHY_SETTING
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index c4236564c1cd..785182fa5fe0 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -1309,7 +1309,7 @@ static irqreturn_t phy_interrupt(int irq, void *phy_dat)
if (netdev) {
struct device *parent = netdev->dev.parent;
- if (netdev->wol_enabled)
+ if (netdev->ethtool->wol_enabled)
pm_system_wakeup();
else if (device_may_wakeup(&netdev->dev))
pm_wakeup_dev_event(&netdev->dev, 0, true);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 6c6ec9475709..70b07e621fb2 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -296,7 +296,7 @@ static bool mdio_bus_phy_may_suspend(struct phy_device *phydev)
if (!netdev)
goto out;
- if (netdev->wol_enabled)
+ if (netdev->ethtool->wol_enabled)
return false;
/* As long as not all affected network drivers support the
@@ -1980,16 +1980,17 @@ int phy_suspend(struct phy_device *phydev)
const struct phy_driver *phydrv = phydev->drv;
int ret;
- if (phydev->suspended)
+ if (phydev->suspended || !phydrv)
return 0;
phy_ethtool_get_wol(phydev, &wol);
- phydev->wol_enabled = wol.wolopts || (netdev && netdev->wol_enabled);
+ phydev->wol_enabled = wol.wolopts ||
+ (netdev && netdev->ethtool->wol_enabled);
/* If the device has WOL enabled, we cannot suspend the PHY */
if (phydev->wol_enabled && !(phydrv->flags & PHY_ALWAYS_CALL_SUSPEND))
return -EBUSY;
- if (!phydrv || !phydrv->suspend)
+ if (!phydrv->suspend)
return 0;
ret = phydrv->suspend(phydev);
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 994471fad833..51c526d227fa 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -231,6 +231,7 @@ static int phylink_interface_max_speed(phy_interface_t interface)
return SPEED_1000;
case PHY_INTERFACE_MODE_2500BASEX:
+ case PHY_INTERFACE_MODE_10G_QXGMII:
return SPEED_2500;
case PHY_INTERFACE_MODE_5GBASER:
@@ -500,7 +501,11 @@ static unsigned long phylink_get_capabilities(phy_interface_t interface,
switch (interface) {
case PHY_INTERFACE_MODE_USXGMII:
- caps |= MAC_10000FD | MAC_5000FD | MAC_2500FD;
+ caps |= MAC_10000FD | MAC_5000FD;
+ fallthrough;
+
+ case PHY_INTERFACE_MODE_10G_QXGMII:
+ caps |= MAC_2500FD;
fallthrough;
case PHY_INTERFACE_MODE_RGMII_TXID:
@@ -885,26 +890,31 @@ static int phylink_parse_mode(struct phylink *pl,
const char *managed;
unsigned long caps;
+ if (pl->config->default_an_inband)
+ pl->cfg_link_an_mode = MLO_AN_INBAND;
+
dn = fwnode_get_named_child_node(fwnode, "fixed-link");
if (dn || fwnode_property_present(fwnode, "fixed-link"))
pl->cfg_link_an_mode = MLO_AN_FIXED;
fwnode_handle_put(dn);
if ((fwnode_property_read_string(fwnode, "managed", &managed) == 0 &&
- strcmp(managed, "in-band-status") == 0) ||
- pl->config->ovr_an_inband) {
+ strcmp(managed, "in-band-status") == 0)) {
if (pl->cfg_link_an_mode == MLO_AN_FIXED) {
phylink_err(pl,
"can't use both fixed-link and in-band-status\n");
return -EINVAL;
}
+ pl->cfg_link_an_mode = MLO_AN_INBAND;
+ }
+
+ if (pl->cfg_link_an_mode == MLO_AN_INBAND) {
linkmode_zero(pl->supported);
phylink_set(pl->supported, MII);
phylink_set(pl->supported, Autoneg);
phylink_set(pl->supported, Asym_Pause);
phylink_set(pl->supported, Pause);
- pl->cfg_link_an_mode = MLO_AN_INBAND;
switch (pl->link_config.interface) {
case PHY_INTERFACE_MODE_SGMII:
@@ -921,6 +931,7 @@ static int phylink_parse_mode(struct phylink *pl,
case PHY_INTERFACE_MODE_5GBASER:
case PHY_INTERFACE_MODE_25GBASER:
case PHY_INTERFACE_MODE_USXGMII:
+ case PHY_INTERFACE_MODE_10G_QXGMII:
case PHY_INTERFACE_MODE_10GKR:
case PHY_INTERFACE_MODE_10GBASER:
case PHY_INTERFACE_MODE_XLGMII:
@@ -1119,6 +1130,7 @@ static unsigned int phylink_pcs_neg_mode(unsigned int mode,
case PHY_INTERFACE_MODE_QSGMII:
case PHY_INTERFACE_MODE_QUSGMII:
case PHY_INTERFACE_MODE_USXGMII:
+ case PHY_INTERFACE_MODE_10G_QXGMII:
/* These protocols are designed for use with a PHY which
* communicates its negotiation result back to the MAC via
* inband communication. Note: there exist PHYs that run
@@ -2270,7 +2282,7 @@ void phylink_suspend(struct phylink *pl, bool mac_wol)
{
ASSERT_RTNL();
- if (mac_wol && (!pl->netdev || pl->netdev->wol_enabled)) {
+ if (mac_wol && (!pl->netdev || pl->netdev->ethtool->wol_enabled)) {
/* Wake-on-Lan enabled, MAC handling */
mutex_lock(&pl->state_mutex);
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index 7ab41f95dae5..bed839237fb5 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -32,6 +32,15 @@
#define RTL8211F_PHYCR2 0x19
#define RTL8211F_INSR 0x1d
+#define RTL8211F_LEDCR 0x10
+#define RTL8211F_LEDCR_MODE BIT(15)
+#define RTL8211F_LEDCR_ACT_TXRX BIT(4)
+#define RTL8211F_LEDCR_LINK_1000 BIT(3)
+#define RTL8211F_LEDCR_LINK_100 BIT(1)
+#define RTL8211F_LEDCR_LINK_10 BIT(0)
+#define RTL8211F_LEDCR_MASK GENMASK(4, 0)
+#define RTL8211F_LEDCR_SHIFT 5
+
#define RTL8211F_TX_DELAY BIT(8)
#define RTL8211F_RX_DELAY BIT(3)
@@ -87,6 +96,8 @@
#define RTL_8221B_VN_CG 0x001cc84a
#define RTL_8251B 0x001cc862
+#define RTL8211F_LED_COUNT 3
+
MODULE_DESCRIPTION("Realtek PHY driver");
MODULE_AUTHOR("Johnson Leung");
MODULE_LICENSE("GPL");
@@ -476,6 +487,98 @@ static int rtl821x_resume(struct phy_device *phydev)
return 0;
}
+static int rtl8211f_led_hw_is_supported(struct phy_device *phydev, u8 index,
+ unsigned long rules)
+{
+ const unsigned long mask = BIT(TRIGGER_NETDEV_LINK_10) |
+ BIT(TRIGGER_NETDEV_LINK_100) |
+ BIT(TRIGGER_NETDEV_LINK_1000) |
+ BIT(TRIGGER_NETDEV_RX) |
+ BIT(TRIGGER_NETDEV_TX);
+
+ /* The RTL8211F PHY supports these LED settings on up to three LEDs:
+ * - Link: Configurable subset of 10/100/1000 link rates
+ * - Active: Blink on activity, RX or TX is not differentiated
+ * The Active option has two modes, A and B:
+ * - A: Link and Active indication at configurable, but matching,
+ * subset of 10/100/1000 link rates
+ * - B: Link indication at configurable subset of 10/100/1000 link
+ * rates and Active indication always at all three 10+100+1000
+ * link rates.
+ * This code currently uses mode B only.
+ */
+
+ if (index >= RTL8211F_LED_COUNT)
+ return -EINVAL;
+
+ /* Filter out any other unsupported triggers. */
+ if (rules & ~mask)
+ return -EOPNOTSUPP;
+
+ /* RX and TX are not differentiated, either both are set or not set. */
+ if (!(rules & BIT(TRIGGER_NETDEV_RX)) ^ !(rules & BIT(TRIGGER_NETDEV_TX)))
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static int rtl8211f_led_hw_control_get(struct phy_device *phydev, u8 index,
+ unsigned long *rules)
+{
+ int val;
+
+ val = phy_read_paged(phydev, 0xd04, RTL8211F_LEDCR);
+ if (val < 0)
+ return val;
+
+ val >>= RTL8211F_LEDCR_SHIFT * index;
+ val &= RTL8211F_LEDCR_MASK;
+
+ if (val & RTL8211F_LEDCR_LINK_10)
+ set_bit(TRIGGER_NETDEV_LINK_10, rules);
+
+ if (val & RTL8211F_LEDCR_LINK_100)
+ set_bit(TRIGGER_NETDEV_LINK_100, rules);
+
+ if (val & RTL8211F_LEDCR_LINK_1000)
+ set_bit(TRIGGER_NETDEV_LINK_1000, rules);
+
+ if (val & RTL8211F_LEDCR_ACT_TXRX) {
+ set_bit(TRIGGER_NETDEV_RX, rules);
+ set_bit(TRIGGER_NETDEV_TX, rules);
+ }
+
+ return 0;
+}
+
+static int rtl8211f_led_hw_control_set(struct phy_device *phydev, u8 index,
+ unsigned long rules)
+{
+ const u16 mask = RTL8211F_LEDCR_MASK << (RTL8211F_LEDCR_SHIFT * index);
+ u16 reg = RTL8211F_LEDCR_MODE; /* Mode B */
+
+ if (index >= RTL8211F_LED_COUNT)
+ return -EINVAL;
+
+ if (test_bit(TRIGGER_NETDEV_LINK_10, &rules))
+ reg |= RTL8211F_LEDCR_LINK_10;
+
+ if (test_bit(TRIGGER_NETDEV_LINK_100, &rules))
+ reg |= RTL8211F_LEDCR_LINK_100;
+
+ if (test_bit(TRIGGER_NETDEV_LINK_1000, &rules))
+ reg |= RTL8211F_LEDCR_LINK_1000;
+
+ if (test_bit(TRIGGER_NETDEV_RX, &rules) ||
+ test_bit(TRIGGER_NETDEV_TX, &rules)) {
+ reg |= RTL8211F_LEDCR_ACT_TXRX;
+ }
+
+ reg <<= RTL8211F_LEDCR_SHIFT * index;
+
+ return phy_modify_paged(phydev, 0xd04, RTL8211F_LEDCR, mask, reg);
+}
+
static int rtl8211e_config_init(struct phy_device *phydev)
{
int ret = 0, oldpage;
@@ -1192,6 +1295,9 @@ static struct phy_driver realtek_drvs[] = {
.read_page = rtl821x_read_page,
.write_page = rtl821x_write_page,
.flags = PHY_ALWAYS_CALL_SUSPEND,
+ .led_hw_is_supported = rtl8211f_led_hw_is_supported,
+ .led_hw_control_get = rtl8211f_led_hw_control_get,
+ .led_hw_control_set = rtl8211f_led_hw_control_set,
}, {
PHY_ID_MATCH_EXACT(RTL_8211FVD_PHYID),
.name = "RTL8211F-VD Gigabit Ethernet",
@@ -1318,6 +1424,14 @@ static struct phy_driver realtek_drvs[] = {
.read_page = rtl821x_read_page,
.write_page = rtl821x_write_page,
}, {
+ PHY_ID_MATCH_EXACT(0x001ccad0),
+ .name = "RTL8224 2.5Gbps PHY",
+ .get_features = rtl822x_c45_get_features,
+ .config_aneg = rtl822x_c45_config_aneg,
+ .read_status = rtl822x_c45_read_status,
+ .suspend = genphy_c45_pma_suspend,
+ .resume = rtlgen_c45_resume,
+ }, {
PHY_ID_MATCH_EXACT(0x001cc961),
.name = "RTL8366RB Gigabit Ethernet",
.config_init = &rtl8366rb_config_init,
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index 3f9cbd797fd6..a5684ef5884b 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -2429,8 +2429,7 @@ static void sfp_sm_module(struct sfp *sfp, unsigned int event)
/* Handle remove event globally, it resets this state machine */
if (event == SFP_E_REMOVE) {
- if (sfp->sm_mod_state > SFP_MOD_PROBE)
- sfp_sm_mod_remove(sfp);
+ sfp_sm_mod_remove(sfp);
sfp_sm_mod_next(sfp, SFP_MOD_EMPTY, 0);
return;
}
diff --git a/drivers/net/phy/xilinx_gmii2rgmii.c b/drivers/net/phy/xilinx_gmii2rgmii.c
index 7b1bc5fcef9b..7c51daecf18e 100644
--- a/drivers/net/phy/xilinx_gmii2rgmii.c
+++ b/drivers/net/phy/xilinx_gmii2rgmii.c
@@ -15,6 +15,7 @@
#include <linux/mii.h>
#include <linux/mdio.h>
#include <linux/phy.h>
+#include <linux/clk.h>
#include <linux/of_mdio.h>
#define XILINX_GMII2RGMII_REG 0x10
@@ -85,11 +86,17 @@ static int xgmiitorgmii_probe(struct mdio_device *mdiodev)
struct device *dev = &mdiodev->dev;
struct device_node *np = dev->of_node, *phy_node;
struct gmii2rgmii *priv;
+ struct clk *clkin;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
+ clkin = devm_clk_get_optional_enabled(dev, NULL);
+ if (IS_ERR(clkin))
+ return dev_err_probe(dev, PTR_ERR(clkin),
+ "Failed to get and enable clock from Device Tree\n");
+
phy_node = of_parse_phandle(np, "phy-handle", 0);
if (!phy_node) {
dev_err(dev, "Couldn't parse phy-handle\n");
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 0a65b6d690fe..eb9acfcaeb09 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -70,6 +70,7 @@
#define MPHDRLEN_SSN 4 /* ditto with short sequence numbers */
#define PPP_PROTO_LEN 2
+#define PPP_LCP_HDRLEN 4
/*
* An instance of /dev/ppp can be associated with either a ppp
@@ -493,6 +494,15 @@ static ssize_t ppp_read(struct file *file, char __user *buf,
return ret;
}
+static bool ppp_check_packet(struct sk_buff *skb, size_t count)
+{
+ /* LCP packets must include LCP header which 4 bytes long:
+ * 1-byte code, 1-byte identifier, and 2-byte length.
+ */
+ return get_unaligned_be16(skb->data) != PPP_LCP ||
+ count >= PPP_PROTO_LEN + PPP_LCP_HDRLEN;
+}
+
static ssize_t ppp_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
@@ -515,6 +525,11 @@ static ssize_t ppp_write(struct file *file, const char __user *buf,
kfree_skb(skb);
goto out;
}
+ ret = -EINVAL;
+ if (unlikely(!ppp_check_packet(skb, count))) {
+ kfree_skb(skb);
+ goto out;
+ }
switch (pf->kind) {
case INTERFACE:
diff --git a/drivers/net/pse-pd/Kconfig b/drivers/net/pse-pd/Kconfig
index 577ea904b3d9..7fab916a7f46 100644
--- a/drivers/net/pse-pd/Kconfig
+++ b/drivers/net/pse-pd/Kconfig
@@ -23,6 +23,7 @@ config PSE_REGULATOR
config PSE_PD692X0
tristate "PD692X0 PSE controller"
depends on I2C
+ select FW_LOADER
select FW_UPLOAD
help
This module provides support for PD692x0 regulator based Ethernet
diff --git a/drivers/net/pse-pd/pd692x0.c b/drivers/net/pse-pd/pd692x0.c
index 6488b941703c..0af7db80b2f8 100644
--- a/drivers/net/pse-pd/pd692x0.c
+++ b/drivers/net/pse-pd/pd692x0.c
@@ -73,6 +73,9 @@ enum {
PD692X0_MSG_SET_PORT_PARAM,
PD692X0_MSG_GET_PORT_STATUS,
PD692X0_MSG_DOWNLOAD_CMD,
+ PD692X0_MSG_GET_PORT_CLASS,
+ PD692X0_MSG_GET_PORT_MEAS,
+ PD692X0_MSG_GET_PORT_PARAM,
/* add new message above here */
PD692X0_MSG_CNT
@@ -134,7 +137,7 @@ static const struct pd692x0_msg pd692x0_msg_template_list[PD692X0_MSG_CNT] = {
[PD692X0_MSG_SET_PORT_PARAM] = {
.key = PD692X0_KEY_CMD,
.sub = {0x05, 0xc0},
- .data = { 0, 0xff, 0xff, 0xff,
+ .data = { 0xf, 0xff, 0xff, 0xff,
0x4e, 0x4e, 0x4e, 0x4e},
},
[PD692X0_MSG_GET_PORT_STATUS] = {
@@ -149,6 +152,24 @@ static const struct pd692x0_msg pd692x0_msg_template_list[PD692X0_MSG_CNT] = {
.data = {0x16, 0x16, 0x99, 0x4e,
0x4e, 0x4e, 0x4e, 0x4e},
},
+ [PD692X0_MSG_GET_PORT_CLASS] = {
+ .key = PD692X0_KEY_REQ,
+ .sub = {0x05, 0xc4},
+ .data = {0x4e, 0x4e, 0x4e, 0x4e,
+ 0x4e, 0x4e, 0x4e, 0x4e},
+ },
+ [PD692X0_MSG_GET_PORT_MEAS] = {
+ .key = PD692X0_KEY_REQ,
+ .sub = {0x05, 0xc5},
+ .data = {0x4e, 0x4e, 0x4e, 0x4e,
+ 0x4e, 0x4e, 0x4e, 0x4e},
+ },
+ [PD692X0_MSG_GET_PORT_PARAM] = {
+ .key = PD692X0_KEY_REQ,
+ .sub = {0x05, 0xc0},
+ .data = {0x4e, 0x4e, 0x4e, 0x4e,
+ 0x4e, 0x4e, 0x4e, 0x4e},
+ },
};
static u8 pd692x0_build_msg(struct pd692x0_msg *msg, u8 echo)
@@ -435,6 +456,184 @@ static int pd692x0_pi_is_enabled(struct pse_controller_dev *pcdev, int id)
}
}
+struct pd692x0_pse_ext_state_mapping {
+ u32 status_code;
+ enum ethtool_c33_pse_ext_state pse_ext_state;
+ u32 pse_ext_substate;
+};
+
+static const struct pd692x0_pse_ext_state_mapping
+pd692x0_pse_ext_state_map[] = {
+ {0x06, ETHTOOL_C33_PSE_EXT_STATE_OPTION_VPORT_LIM,
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_OPTION_VPORT_LIM_HIGH_VOLTAGE},
+ {0x07, ETHTOOL_C33_PSE_EXT_STATE_OPTION_VPORT_LIM,
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_OPTION_VPORT_LIM_LOW_VOLTAGE},
+ {0x08, ETHTOOL_C33_PSE_EXT_STATE_MR_PSE_ENABLE,
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_MR_PSE_ENABLE_DISABLE_PIN_ACTIVE},
+ {0x0C, ETHTOOL_C33_PSE_EXT_STATE_ERROR_CONDITION,
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_NON_EXISTING_PORT},
+ {0x11, ETHTOOL_C33_PSE_EXT_STATE_ERROR_CONDITION,
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_UNDEFINED_PORT},
+ {0x12, ETHTOOL_C33_PSE_EXT_STATE_ERROR_CONDITION,
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_INTERNAL_HW_FAULT},
+ {0x1B, ETHTOOL_C33_PSE_EXT_STATE_OPTION_DETECT_TED,
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_OPTION_DETECT_TED_DET_IN_PROCESS},
+ {0x1C, ETHTOOL_C33_PSE_EXT_STATE_ERROR_CONDITION,
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_UNKNOWN_PORT_STATUS},
+ {0x1E, ETHTOOL_C33_PSE_EXT_STATE_MR_MPS_VALID,
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_MR_MPS_VALID_DETECTED_UNDERLOAD},
+ {0x1F, ETHTOOL_C33_PSE_EXT_STATE_OVLD_DETECTED,
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_OVLD_DETECTED_OVERLOAD},
+ {0x20, ETHTOOL_C33_PSE_EXT_STATE_POWER_NOT_AVAILABLE,
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_POWER_NOT_AVAILABLE_BUDGET_EXCEEDED},
+ {0x21, ETHTOOL_C33_PSE_EXT_STATE_ERROR_CONDITION,
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_INTERNAL_HW_FAULT},
+ {0x22, ETHTOOL_C33_PSE_EXT_STATE_ERROR_CONDITION,
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_CONFIG_CHANGE},
+ {0x24, ETHTOOL_C33_PSE_EXT_STATE_OPTION_VPORT_LIM,
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_OPTION_VPORT_LIM_VOLTAGE_INJECTION},
+ {0x25, ETHTOOL_C33_PSE_EXT_STATE_ERROR_CONDITION,
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_UNKNOWN_PORT_STATUS},
+ {0x34, ETHTOOL_C33_PSE_EXT_STATE_SHORT_DETECTED,
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_SHORT_DETECTED_SHORT_CONDITION},
+ {0x35, ETHTOOL_C33_PSE_EXT_STATE_ERROR_CONDITION,
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_DETECTED_OVER_TEMP},
+ {0x36, ETHTOOL_C33_PSE_EXT_STATE_ERROR_CONDITION,
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_DETECTED_OVER_TEMP},
+ {0x37, ETHTOOL_C33_PSE_EXT_STATE_ERROR_CONDITION,
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_UNKNOWN_PORT_STATUS},
+ {0x3C, ETHTOOL_C33_PSE_EXT_STATE_POWER_NOT_AVAILABLE,
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_POWER_NOT_AVAILABLE_PORT_PW_LIMIT_EXCEEDS_CONTROLLER_BUDGET},
+ {0x3D, ETHTOOL_C33_PSE_EXT_STATE_POWER_NOT_AVAILABLE,
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_POWER_NOT_AVAILABLE_PD_REQUEST_EXCEEDS_PORT_LIMIT},
+ {0x41, ETHTOOL_C33_PSE_EXT_STATE_POWER_NOT_AVAILABLE,
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_POWER_NOT_AVAILABLE_HW_PW_LIMIT},
+ {0x43, ETHTOOL_C33_PSE_EXT_STATE_ERROR_CONDITION,
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_UNKNOWN_PORT_STATUS},
+ {0xA7, ETHTOOL_C33_PSE_EXT_STATE_OPTION_DETECT_TED,
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_OPTION_DETECT_TED_CONNECTION_CHECK_ERROR},
+ {0xA8, ETHTOOL_C33_PSE_EXT_STATE_MR_MPS_VALID,
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_MR_MPS_VALID_CONNECTION_OPEN},
+ { /* sentinel */ }
+};
+
+static void
+pd692x0_get_ext_state(struct ethtool_c33_pse_ext_state_info *c33_ext_state_info,
+ u32 status_code)
+{
+ const struct pd692x0_pse_ext_state_mapping *ext_state_map;
+
+ ext_state_map = pd692x0_pse_ext_state_map;
+ while (ext_state_map->status_code) {
+ if (ext_state_map->status_code == status_code) {
+ c33_ext_state_info->c33_pse_ext_state = ext_state_map->pse_ext_state;
+ c33_ext_state_info->__c33_pse_ext_substate = ext_state_map->pse_ext_substate;
+ return;
+ }
+ ext_state_map++;
+ }
+}
+
+struct pd692x0_class_pw {
+ int class;
+ int class_cfg_value;
+ int class_pw;
+ int max_added_class_pw;
+};
+
+#define PD692X0_CLASS_PW_TABLE_SIZE 4
+/* 4/2 pairs class configuration power table in compliance mode.
+ * Need to be arranged in ascending order of power support.
+ */
+static const struct pd692x0_class_pw
+pd692x0_class_pw_table[PD692X0_CLASS_PW_TABLE_SIZE] = {
+ {.class = 3, .class_cfg_value = 0x3, .class_pw = 15000, .max_added_class_pw = 3100},
+ {.class = 4, .class_cfg_value = 0x2, .class_pw = 30000, .max_added_class_pw = 8000},
+ {.class = 6, .class_cfg_value = 0x1, .class_pw = 60000, .max_added_class_pw = 5000},
+ {.class = 8, .class_cfg_value = 0x0, .class_pw = 90000, .max_added_class_pw = 7500},
+};
+
+static int pd692x0_pi_get_pw_from_table(int op_mode, int added_pw)
+{
+ const struct pd692x0_class_pw *pw_table;
+ int i;
+
+ pw_table = pd692x0_class_pw_table;
+ for (i = 0; i < PD692X0_CLASS_PW_TABLE_SIZE; i++, pw_table++) {
+ if (pw_table->class_cfg_value == op_mode)
+ return pw_table->class_pw + added_pw * 100;
+ }
+
+ return -ERANGE;
+}
+
+static int pd692x0_pi_set_pw_from_table(struct device *dev,
+ struct pd692x0_msg *msg, int pw)
+{
+ const struct pd692x0_class_pw *pw_table;
+ int i;
+
+ pw_table = pd692x0_class_pw_table;
+ if (pw < pw_table->class_pw) {
+ dev_err(dev,
+ "Power limit %dmW not supported. Ranges minimal available: [%d-%d]\n",
+ pw,
+ pw_table->class_pw,
+ pw_table->class_pw + pw_table->max_added_class_pw);
+ return -ERANGE;
+ }
+
+ for (i = 0; i < PD692X0_CLASS_PW_TABLE_SIZE; i++, pw_table++) {
+ if (pw > (pw_table->class_pw + pw_table->max_added_class_pw))
+ continue;
+
+ if (pw < pw_table->class_pw) {
+ dev_err(dev,
+ "Power limit %dmW not supported. Ranges available: [%d-%d] or [%d-%d]\n",
+ pw,
+ (pw_table - 1)->class_pw,
+ (pw_table - 1)->class_pw + (pw_table - 1)->max_added_class_pw,
+ pw_table->class_pw,
+ pw_table->class_pw + pw_table->max_added_class_pw);
+ return -ERANGE;
+ }
+
+ msg->data[2] = pw_table->class_cfg_value;
+ msg->data[3] = (pw - pw_table->class_pw) / 100;
+ return 0;
+ }
+
+ pw_table--;
+ dev_warn(dev,
+ "Power limit %dmW not supported. Set to highest power limit %dmW\n",
+ pw, pw_table->class_pw + pw_table->max_added_class_pw);
+ msg->data[2] = pw_table->class_cfg_value;
+ msg->data[3] = pw_table->max_added_class_pw / 100;
+ return 0;
+}
+
+static int
+pd692x0_pi_get_pw_ranges(struct pse_control_status *st)
+{
+ const struct pd692x0_class_pw *pw_table;
+ int i;
+
+ pw_table = pd692x0_class_pw_table;
+ st->c33_pw_limit_ranges = kcalloc(PD692X0_CLASS_PW_TABLE_SIZE,
+ sizeof(struct ethtool_c33_pse_pw_limit_range),
+ GFP_KERNEL);
+ if (!st->c33_pw_limit_ranges)
+ return -ENOMEM;
+
+ for (i = 0; i < PD692X0_CLASS_PW_TABLE_SIZE; i++, pw_table++) {
+ st->c33_pw_limit_ranges[i].min = pw_table->class_pw;
+ st->c33_pw_limit_ranges[i].max = pw_table->class_pw + pw_table->max_added_class_pw;
+ }
+
+ st->c33_pw_limit_nb_ranges = i;
+ return 0;
+}
+
static int pd692x0_ethtool_get_status(struct pse_controller_dev *pcdev,
unsigned long id,
struct netlink_ext_ack *extack,
@@ -442,6 +641,7 @@ static int pd692x0_ethtool_get_status(struct pse_controller_dev *pcdev,
{
struct pd692x0_priv *priv = to_pd692x0_priv(pcdev);
struct pd692x0_msg msg, buf = {0};
+ u32 class;
int ret;
ret = pd692x0_fw_unavailable(priv);
@@ -471,6 +671,36 @@ static int pd692x0_ethtool_get_status(struct pse_controller_dev *pcdev,
priv->admin_state[id] = status->c33_admin_state;
+ pd692x0_get_ext_state(&status->c33_ext_state_info, buf.sub[0]);
+ status->c33_actual_pw = (buf.data[0] << 4 | buf.data[1]) * 100;
+
+ msg = pd692x0_msg_template_list[PD692X0_MSG_GET_PORT_PARAM];
+ msg.sub[2] = id;
+ memset(&buf, 0, sizeof(buf));
+ ret = pd692x0_sendrecv_msg(priv, &msg, &buf);
+ if (ret < 0)
+ return ret;
+
+ ret = pd692x0_pi_get_pw_from_table(buf.data[0], buf.data[1]);
+ if (ret < 0)
+ return ret;
+ status->c33_avail_pw_limit = ret;
+
+ memset(&buf, 0, sizeof(buf));
+ msg = pd692x0_msg_template_list[PD692X0_MSG_GET_PORT_CLASS];
+ msg.sub[2] = id;
+ ret = pd692x0_sendrecv_msg(priv, &msg, &buf);
+ if (ret < 0)
+ return ret;
+
+ class = buf.data[3] >> 4;
+ if (class <= 8)
+ status->c33_pw_class = class;
+
+ ret = pd692x0_pi_get_pw_ranges(status);
+ if (ret < 0)
+ return ret;
+
return 0;
}
@@ -749,12 +979,97 @@ out:
return ret;
}
+static int pd692x0_pi_get_voltage(struct pse_controller_dev *pcdev, int id)
+{
+ struct pd692x0_priv *priv = to_pd692x0_priv(pcdev);
+ struct pd692x0_msg msg, buf = {0};
+ int ret;
+
+ ret = pd692x0_fw_unavailable(priv);
+ if (ret)
+ return ret;
+
+ msg = pd692x0_msg_template_list[PD692X0_MSG_GET_PORT_MEAS];
+ msg.sub[2] = id;
+ ret = pd692x0_sendrecv_msg(priv, &msg, &buf);
+ if (ret < 0)
+ return ret;
+
+ /* Convert 0.1V unit to uV */
+ return (buf.sub[0] << 8 | buf.sub[1]) * 100000;
+}
+
+static int pd692x0_pi_get_current_limit(struct pse_controller_dev *pcdev,
+ int id)
+{
+ struct pd692x0_priv *priv = to_pd692x0_priv(pcdev);
+ struct pd692x0_msg msg, buf = {0};
+ int mW, uV, uA, ret;
+ s64 tmp_64;
+
+ msg = pd692x0_msg_template_list[PD692X0_MSG_GET_PORT_PARAM];
+ msg.sub[2] = id;
+ ret = pd692x0_sendrecv_msg(priv, &msg, &buf);
+ if (ret < 0)
+ return ret;
+
+ ret = pd692x0_pi_get_pw_from_table(buf.data[2], buf.data[3]);
+ if (ret < 0)
+ return ret;
+ mW = ret;
+
+ ret = pd692x0_pi_get_voltage(pcdev, id);
+ if (ret < 0)
+ return ret;
+ uV = ret;
+
+ tmp_64 = mW;
+ tmp_64 *= 1000000000ull;
+ /* uA = mW * 1000000000 / uV */
+ uA = DIV_ROUND_CLOSEST_ULL(tmp_64, uV);
+ return uA;
+}
+
+static int pd692x0_pi_set_current_limit(struct pse_controller_dev *pcdev,
+ int id, int max_uA)
+{
+ struct pd692x0_priv *priv = to_pd692x0_priv(pcdev);
+ struct device *dev = &priv->client->dev;
+ struct pd692x0_msg msg, buf = {0};
+ int uV, ret, mW;
+ s64 tmp_64;
+
+ ret = pd692x0_fw_unavailable(priv);
+ if (ret)
+ return ret;
+
+ ret = pd692x0_pi_get_voltage(pcdev, id);
+ if (ret < 0)
+ return ret;
+ uV = ret;
+
+ msg = pd692x0_msg_template_list[PD692X0_MSG_SET_PORT_PARAM];
+ msg.sub[2] = id;
+ tmp_64 = uV;
+ tmp_64 *= max_uA;
+ /* mW = uV * uA / 1000000000 */
+ mW = DIV_ROUND_CLOSEST_ULL(tmp_64, 1000000000);
+ ret = pd692x0_pi_set_pw_from_table(dev, &msg, mW);
+ if (ret)
+ return ret;
+
+ return pd692x0_sendrecv_msg(priv, &msg, &buf);
+}
+
static const struct pse_controller_ops pd692x0_ops = {
.setup_pi_matrix = pd692x0_setup_pi_matrix,
.ethtool_get_status = pd692x0_ethtool_get_status,
.pi_enable = pd692x0_pi_enable,
.pi_disable = pd692x0_pi_disable,
.pi_is_enabled = pd692x0_pi_is_enabled,
+ .pi_get_voltage = pd692x0_pi_get_voltage,
+ .pi_get_current_limit = pd692x0_pi_get_current_limit,
+ .pi_set_current_limit = pd692x0_pi_set_current_limit,
};
#define PD692X0_FW_LINE_MAX_SZ 0xff
@@ -1194,8 +1509,8 @@ static void pd692x0_i2c_remove(struct i2c_client *client)
}
static const struct i2c_device_id pd692x0_id[] = {
- { PD692X0_PSE_NAME, 0 },
- { },
+ { PD692X0_PSE_NAME },
+ { }
};
MODULE_DEVICE_TABLE(i2c, pd692x0_id);
diff --git a/drivers/net/pse-pd/pse_core.c b/drivers/net/pse-pd/pse_core.c
index 795ab264eaf2..ec20953e0f82 100644
--- a/drivers/net/pse-pd/pse_core.c
+++ b/drivers/net/pse-pd/pse_core.c
@@ -265,10 +265,113 @@ static int pse_pi_disable(struct regulator_dev *rdev)
return ret;
}
+static int _pse_pi_get_voltage(struct regulator_dev *rdev)
+{
+ struct pse_controller_dev *pcdev = rdev_get_drvdata(rdev);
+ const struct pse_controller_ops *ops;
+ int id;
+
+ ops = pcdev->ops;
+ if (!ops->pi_get_voltage)
+ return -EOPNOTSUPP;
+
+ id = rdev_get_id(rdev);
+ return ops->pi_get_voltage(pcdev, id);
+}
+
+static int pse_pi_get_voltage(struct regulator_dev *rdev)
+{
+ struct pse_controller_dev *pcdev = rdev_get_drvdata(rdev);
+ int ret;
+
+ mutex_lock(&pcdev->lock);
+ ret = _pse_pi_get_voltage(rdev);
+ mutex_unlock(&pcdev->lock);
+
+ return ret;
+}
+
+static int _pse_ethtool_get_status(struct pse_controller_dev *pcdev,
+ int id,
+ struct netlink_ext_ack *extack,
+ struct pse_control_status *status);
+
+static int pse_pi_get_current_limit(struct regulator_dev *rdev)
+{
+ struct pse_controller_dev *pcdev = rdev_get_drvdata(rdev);
+ const struct pse_controller_ops *ops;
+ struct netlink_ext_ack extack = {};
+ struct pse_control_status st = {};
+ int id, uV, ret;
+ s64 tmp_64;
+
+ ops = pcdev->ops;
+ id = rdev_get_id(rdev);
+ mutex_lock(&pcdev->lock);
+ if (ops->pi_get_current_limit) {
+ ret = ops->pi_get_current_limit(pcdev, id);
+ goto out;
+ }
+
+ /* If pi_get_current_limit() callback not populated get voltage
+ * from pi_get_voltage() and power limit from ethtool_get_status()
+ * to calculate current limit.
+ */
+ ret = _pse_pi_get_voltage(rdev);
+ if (!ret) {
+ dev_err(pcdev->dev, "Voltage null\n");
+ ret = -ERANGE;
+ goto out;
+ }
+ if (ret < 0)
+ goto out;
+ uV = ret;
+
+ ret = _pse_ethtool_get_status(pcdev, id, &extack, &st);
+ if (ret)
+ goto out;
+
+ if (!st.c33_avail_pw_limit) {
+ ret = -ENODATA;
+ goto out;
+ }
+
+ tmp_64 = st.c33_avail_pw_limit;
+ tmp_64 *= 1000000000ull;
+ /* uA = mW * 1000000000 / uV */
+ ret = DIV_ROUND_CLOSEST_ULL(tmp_64, uV);
+
+out:
+ mutex_unlock(&pcdev->lock);
+ return ret;
+}
+
+static int pse_pi_set_current_limit(struct regulator_dev *rdev, int min_uA,
+ int max_uA)
+{
+ struct pse_controller_dev *pcdev = rdev_get_drvdata(rdev);
+ const struct pse_controller_ops *ops;
+ int id, ret;
+
+ ops = pcdev->ops;
+ if (!ops->pi_set_current_limit)
+ return -EOPNOTSUPP;
+
+ id = rdev_get_id(rdev);
+ mutex_lock(&pcdev->lock);
+ ret = ops->pi_set_current_limit(pcdev, id, max_uA);
+ mutex_unlock(&pcdev->lock);
+
+ return ret;
+}
+
static const struct regulator_ops pse_pi_ops = {
.is_enabled = pse_pi_is_enabled,
.enable = pse_pi_enable,
.disable = pse_pi_disable,
+ .get_voltage = pse_pi_get_voltage,
+ .get_current_limit = pse_pi_get_current_limit,
+ .set_current_limit = pse_pi_set_current_limit,
};
static int
@@ -298,7 +401,9 @@ devm_pse_pi_regulator_register(struct pse_controller_dev *pcdev,
rdesc->ops = &pse_pi_ops;
rdesc->owner = pcdev->owner;
- rinit_data->constraints.valid_ops_mask = REGULATOR_CHANGE_STATUS;
+ rinit_data->constraints.valid_ops_mask = REGULATOR_CHANGE_STATUS |
+ REGULATOR_CHANGE_CURRENT;
+ rinit_data->constraints.max_uA = MAX_PI_CURRENT;
rinit_data->supply_regulator = "vpwr";
rconfig.dev = pcdev->dev;
@@ -626,6 +731,23 @@ out:
}
EXPORT_SYMBOL_GPL(of_pse_control_get);
+static int _pse_ethtool_get_status(struct pse_controller_dev *pcdev,
+ int id,
+ struct netlink_ext_ack *extack,
+ struct pse_control_status *status)
+{
+ const struct pse_controller_ops *ops;
+
+ ops = pcdev->ops;
+ if (!ops->ethtool_get_status) {
+ NL_SET_ERR_MSG(extack,
+ "PSE driver does not support status report");
+ return -EOPNOTSUPP;
+ }
+
+ return ops->ethtool_get_status(pcdev, id, extack, status);
+}
+
/**
* pse_ethtool_get_status - get status of PSE control
* @psec: PSE control pointer
@@ -638,19 +760,10 @@ int pse_ethtool_get_status(struct pse_control *psec,
struct netlink_ext_ack *extack,
struct pse_control_status *status)
{
- const struct pse_controller_ops *ops;
int err;
- ops = psec->pcdev->ops;
-
- if (!ops->ethtool_get_status) {
- NL_SET_ERR_MSG(extack,
- "PSE driver does not support status report");
- return -EOPNOTSUPP;
- }
-
mutex_lock(&psec->pcdev->lock);
- err = ops->ethtool_get_status(psec->pcdev, psec->id, extack, status);
+ err = _pse_ethtool_get_status(psec->pcdev, psec->id, extack, status);
mutex_unlock(&psec->pcdev->lock);
return err;
@@ -719,19 +832,56 @@ int pse_ethtool_set_config(struct pse_control *psec,
{
int err = 0;
- if (pse_has_c33(psec)) {
+ if (pse_has_c33(psec) && config->c33_admin_control) {
err = pse_ethtool_c33_set_config(psec, config);
if (err)
return err;
}
- if (pse_has_podl(psec))
+ if (pse_has_podl(psec) && config->podl_admin_control)
err = pse_ethtool_podl_set_config(psec, config);
return err;
}
EXPORT_SYMBOL_GPL(pse_ethtool_set_config);
+/**
+ * pse_ethtool_set_pw_limit - set PSE control power limit
+ * @psec: PSE control pointer
+ * @extack: extack for reporting useful error messages
+ * @pw_limit: power limit value in mW
+ *
+ * Return: 0 on success and failure value on error
+ */
+int pse_ethtool_set_pw_limit(struct pse_control *psec,
+ struct netlink_ext_ack *extack,
+ const unsigned int pw_limit)
+{
+ int uV, uA, ret;
+ s64 tmp_64;
+
+ ret = regulator_get_voltage(psec->ps);
+ if (!ret) {
+ NL_SET_ERR_MSG(extack,
+ "Can't calculate the current, PSE voltage read is 0");
+ return -ERANGE;
+ }
+ if (ret < 0) {
+ NL_SET_ERR_MSG(extack,
+ "Error reading PSE voltage");
+ return ret;
+ }
+ uV = ret;
+
+ tmp_64 = pw_limit;
+ tmp_64 *= 1000000000ull;
+ /* uA = mW * 1000000000 / uV */
+ uA = DIV_ROUND_CLOSEST_ULL(tmp_64, uV);
+
+ return regulator_set_current_limit(psec->ps, 0, uA);
+}
+EXPORT_SYMBOL_GPL(pse_ethtool_set_pw_limit);
+
bool pse_has_podl(struct pse_control *psec)
{
return psec->pcdev->types & ETHTOOL_PSE_PODL;
diff --git a/drivers/net/pse-pd/tps23881.c b/drivers/net/pse-pd/tps23881.c
index 98ffbb1bbf13..61f6ad9c1934 100644
--- a/drivers/net/pse-pd/tps23881.c
+++ b/drivers/net/pse-pd/tps23881.c
@@ -794,8 +794,8 @@ static int tps23881_i2c_probe(struct i2c_client *client)
}
static const struct i2c_device_id tps23881_id[] = {
- { "tps23881", 0 },
- { },
+ { "tps23881" },
+ { }
};
MODULE_DEVICE_TABLE(i2c, tps23881_id);
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 9254bca2813d..9b24861464bc 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1661,6 +1661,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
int len, int *skb_xdp)
{
struct page_frag *alloc_frag = &current->task_frag;
+ struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
struct bpf_prog *xdp_prog;
int buflen = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
char *buf;
@@ -1700,6 +1701,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
local_bh_disable();
rcu_read_lock();
+ bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
xdp_prog = rcu_dereference(tun->xdp_prog);
if (xdp_prog) {
struct xdp_buff xdp;
@@ -1728,12 +1730,14 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
pad = xdp.data - xdp.data_hard_start;
len = xdp.data_end - xdp.data;
}
+ bpf_net_ctx_clear(bpf_net_ctx);
rcu_read_unlock();
local_bh_enable();
return __tun_build_skb(tfile, alloc_frag, buf, buflen, len, pad);
out:
+ bpf_net_ctx_clear(bpf_net_ctx);
rcu_read_unlock();
local_bh_enable();
return NULL;
@@ -2566,6 +2570,7 @@ static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
if (m->msg_controllen == sizeof(struct tun_msg_ctl) &&
ctl && ctl->type == TUN_MSG_PTR) {
+ struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
struct tun_page tpage;
int n = ctl->num;
int flush = 0, queued = 0;
@@ -2574,6 +2579,7 @@ static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
local_bh_disable();
rcu_read_lock();
+ bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
for (i = 0; i < n; i++) {
xdp = &((struct xdp_buff *)ctl->ptr)[i];
@@ -2588,6 +2594,7 @@ static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
if (tfile->napi_enabled && queued > 0)
napi_schedule(&tfile->napi);
+ bpf_net_ctx_clear(bpf_net_ctx);
rcu_read_unlock();
local_bh_enable();
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index 51c295e1e823..b034ef8a73ea 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -174,7 +174,6 @@ struct ax88179_data {
u32 wol_supported;
u32 wolopts;
u8 disconnecting;
- u8 initialized;
};
struct ax88179_int_data {
@@ -327,7 +326,8 @@ static void ax88179_status(struct usbnet *dev, struct urb *urb)
if (netif_carrier_ok(dev->net) != link) {
usbnet_link_change(dev, link, 1);
- netdev_info(dev->net, "ax88179 - Link status is: %d\n", link);
+ if (!link)
+ netdev_info(dev->net, "ax88179 - Link status is: 0\n");
}
}
@@ -1543,6 +1543,7 @@ static int ax88179_link_reset(struct usbnet *dev)
GMII_PHY_PHYSR, 2, &tmp16);
if (!(tmp16 & GMII_PHY_PHYSR_LINK)) {
+ netdev_info(dev->net, "ax88179 - Link status is: 0\n");
return 0;
} else if (GMII_PHY_PHYSR_GIGA == (tmp16 & GMII_PHY_PHYSR_SMASK)) {
mode |= AX_MEDIUM_GIGAMODE | AX_MEDIUM_EN_125MHZ;
@@ -1580,6 +1581,8 @@ static int ax88179_link_reset(struct usbnet *dev)
netif_carrier_on(dev->net);
+ netdev_info(dev->net, "ax88179 - Link status is: 1\n");
+
return 0;
}
@@ -1678,12 +1681,21 @@ static int ax88179_reset(struct usbnet *dev)
static int ax88179_net_reset(struct usbnet *dev)
{
- struct ax88179_data *ax179_data = dev->driver_priv;
+ u16 tmp16;
- if (ax179_data->initialized)
+ ax88179_read_cmd(dev, AX_ACCESS_PHY, AX88179_PHY_ID, GMII_PHY_PHYSR,
+ 2, &tmp16);
+ if (tmp16) {
+ ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE,
+ 2, 2, &tmp16);
+ if (!(tmp16 & AX_MEDIUM_RECEIVE_EN)) {
+ tmp16 |= AX_MEDIUM_RECEIVE_EN;
+ ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE,
+ 2, 2, &tmp16);
+ }
+ } else {
ax88179_reset(dev);
- else
- ax179_data->initialized = 1;
+ }
return 0;
}
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index bf76ecccc2e6..d5c47a2a62dc 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -933,7 +933,8 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
cdc_ncm_find_endpoints(dev, ctx->data);
cdc_ncm_find_endpoints(dev, ctx->control);
- if (!dev->in || !dev->out || !dev->status) {
+ if (!dev->in || !dev->out ||
+ (!dev->status && dev->driver_info->flags & FLAG_LINK_INTR)) {
dev_dbg(&intf->dev, "failed to collect endpoints\n");
goto error2;
}
@@ -1925,6 +1926,34 @@ static const struct driver_info cdc_ncm_zlp_info = {
.set_rx_mode = usbnet_cdc_update_filter,
};
+/* Same as cdc_ncm_info, but with FLAG_SEND_ZLP */
+static const struct driver_info apple_tethering_interface_info = {
+ .description = "CDC NCM (Apple Tethering)",
+ .flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET
+ | FLAG_LINK_INTR | FLAG_ETHER | FLAG_SEND_ZLP,
+ .bind = cdc_ncm_bind,
+ .unbind = cdc_ncm_unbind,
+ .manage_power = usbnet_manage_power,
+ .status = cdc_ncm_status,
+ .rx_fixup = cdc_ncm_rx_fixup,
+ .tx_fixup = cdc_ncm_tx_fixup,
+ .set_rx_mode = usbnet_cdc_update_filter,
+};
+
+/* Same as apple_tethering_interface_info, but without FLAG_LINK_INTR */
+static const struct driver_info apple_private_interface_info = {
+ .description = "CDC NCM (Apple Private)",
+ .flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET
+ | FLAG_ETHER | FLAG_SEND_ZLP,
+ .bind = cdc_ncm_bind,
+ .unbind = cdc_ncm_unbind,
+ .manage_power = usbnet_manage_power,
+ .status = cdc_ncm_status,
+ .rx_fixup = cdc_ncm_rx_fixup,
+ .tx_fixup = cdc_ncm_tx_fixup,
+ .set_rx_mode = usbnet_cdc_update_filter,
+};
+
/* Same as cdc_ncm_info, but with FLAG_WWAN */
static const struct driver_info wwan_info = {
.description = "Mobile Broadband Network Device",
@@ -1954,6 +1983,22 @@ static const struct driver_info wwan_noarp_info = {
};
static const struct usb_device_id cdc_devs[] = {
+ /* iPhone */
+ { USB_DEVICE_INTERFACE_NUMBER(0x05ac, 0x12a8, 2),
+ .driver_info = (unsigned long)&apple_tethering_interface_info,
+ },
+ { USB_DEVICE_INTERFACE_NUMBER(0x05ac, 0x12a8, 4),
+ .driver_info = (unsigned long)&apple_private_interface_info,
+ },
+
+ /* iPad */
+ { USB_DEVICE_INTERFACE_NUMBER(0x05ac, 0x12ab, 2),
+ .driver_info = (unsigned long)&apple_tethering_interface_info,
+ },
+ { USB_DEVICE_INTERFACE_NUMBER(0x05ac, 0x12ab, 4),
+ .driver_info = (unsigned long)&apple_private_interface_info,
+ },
+
/* Ericsson MBM devices like F5521gw */
{ .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
| USB_DEVICE_ID_MATCH_VENDOR,
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index 5a2c38b63012..8adf77e3557e 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -380,11 +380,6 @@ struct skb_data { /* skb->cb is one of these */
int num_of_packet;
};
-struct usb_context {
- struct usb_ctrlrequest req;
- struct lan78xx_net *dev;
-};
-
#define EVENT_TX_HALT 0
#define EVENT_RX_HALT 1
#define EVENT_RX_MEMORY 2
@@ -2946,6 +2941,8 @@ static int lan78xx_reset(struct lan78xx_net *dev)
return ret;
buf |= HW_CFG_MEF_;
+ buf |= HW_CFG_CLK125_EN_;
+ buf |= HW_CFG_REFCLK25_EN_;
ret = lan78xx_write_reg(dev, HW_CFG, buf);
if (ret < 0)
@@ -3034,8 +3031,11 @@ static int lan78xx_reset(struct lan78xx_net *dev)
return ret;
/* LAN7801 only has RGMII mode */
- if (dev->chipid == ID_REV_CHIP_ID_7801_)
+ if (dev->chipid == ID_REV_CHIP_ID_7801_) {
buf &= ~MAC_CR_GMII_EN_;
+ /* Enable Auto Duplex and Auto speed */
+ buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
+ }
if (dev->chipid == ID_REV_CHIP_ID_7800_ ||
dev->chipid == ID_REV_CHIP_ID_7850_) {
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 663e46348ce3..386d62769ded 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1372,6 +1372,8 @@ static const struct usb_device_id products[] = {
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1260, 2)}, /* Telit LE910Cx */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1261, 2)}, /* Telit LE910Cx */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1900, 1)}, /* Telit LN940 series */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x3000, 0)}, /* Telit FN912 series */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x3001, 0)}, /* Telit FN912 series */
{QMI_FIXED_INTF(0x1c9e, 0x9801, 3)}, /* Telewell TW-3G HSPA+ */
{QMI_FIXED_INTF(0x1c9e, 0x9803, 4)}, /* Telewell TW-3G HSPA+ */
{QMI_FIXED_INTF(0x1c9e, 0x9b01, 3)}, /* XS Stick W100-2 from 4G Systems */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 19df1cd9f072..15e12f46d0ea 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -1774,6 +1774,7 @@ static int vendor_mac_passthru_addr_read(struct r8152 *tp, struct sockaddr *sa)
goto amacout;
}
memcpy(sa->sa_data, buf, 6);
+ tp->netdev->addr_assign_type = NET_ADDR_STOLEN;
netif_info(tp, probe, tp->netdev,
"Using pass-thru MAC addr %pM\n", sa->sa_data);
@@ -8554,6 +8555,19 @@ static int rtl8152_system_resume(struct r8152 *tp)
usb_submit_urb(tp->intr_urb, GFP_NOIO);
}
+ /* If the device is RTL8152_INACCESSIBLE here then we should do a
+ * reset. This is important because the usb_lock_device_for_reset()
+ * that happens as a result of usb_queue_reset_device() will silently
+ * fail if the device was suspended or if too much time passed.
+ *
+ * NOTE: The device is locked here so we can directly do the reset.
+ * We don't need usb_lock_device_for_reset() because that's just a
+ * wrapper over device_lock() and device_resume() (which calls us)
+ * does that for us.
+ */
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ usb_reset_device(tp->udev);
+
return 0;
}
@@ -8634,6 +8648,13 @@ static int rtl8152_system_suspend(struct r8152 *tp)
tasklet_enable(&tp->tx_tl);
}
+ /* If we're inaccessible here then some of the work that we did to
+ * get the adapter ready for suspend didn't work. Queue up a wakeup
+ * event so we can try again.
+ */
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ pm_wakeup_event(&tp->udev->dev, 0);
+
return 0;
}
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index 97afd7335d86..01a3b2417a54 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -778,7 +778,8 @@ static int rtl8150_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *ecmd)
{
rtl8150_t *dev = netdev_priv(netdev);
- short lpa, bmcr;
+ short lpa = 0;
+ short bmcr = 0;
u32 supported;
supported = (SUPPORTED_10baseT_Half |
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index 0726e18bee6f..78c821349f48 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -61,11 +61,6 @@ struct smsc75xx_priv {
u8 suspend_flags;
};
-struct usb_context {
- struct usb_ctrlrequest req;
- struct usbnet *dev;
-};
-
static bool turbo_mode = true;
module_param(turbo_mode, bool, 0644);
MODULE_PARM_DESC(turbo_mode, "Enable multiple frames per Rx transaction");
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index cbea24666479..8e82184be5e7 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -879,7 +879,7 @@ static int smsc95xx_start_rx_path(struct usbnet *dev)
static int smsc95xx_reset(struct usbnet *dev)
{
struct smsc95xx_priv *pdata = dev->driver_priv;
- u32 read_buf, write_buf, burst_cap;
+ u32 read_buf, burst_cap;
int ret = 0, timeout;
netif_dbg(dev, ifup, dev->net, "entering smsc95xx_reset\n");
@@ -1003,10 +1003,13 @@ static int smsc95xx_reset(struct usbnet *dev)
return ret;
netif_dbg(dev, ifup, dev->net, "ID_REV = 0x%08x\n", read_buf);
+ ret = smsc95xx_read_reg(dev, LED_GPIO_CFG, &read_buf);
+ if (ret < 0)
+ return ret;
/* Configure GPIO pins as LED outputs */
- write_buf = LED_GPIO_CFG_SPD_LED | LED_GPIO_CFG_LNK_LED |
- LED_GPIO_CFG_FDX_LED;
- ret = smsc95xx_write_reg(dev, LED_GPIO_CFG, write_buf);
+ read_buf |= LED_GPIO_CFG_SPD_LED | LED_GPIO_CFG_LNK_LED |
+ LED_GPIO_CFG_FDX_LED;
+ ret = smsc95xx_write_reg(dev, LED_GPIO_CFG, read_buf);
if (ret < 0)
return ret;
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 4a802c0ea2cb..af474cc191d0 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -25,6 +25,7 @@
#include <net/net_failover.h>
#include <net/netdev_rx_queue.h>
#include <net/netdev_queues.h>
+#include <net/xdp_sock_drv.h>
static int napi_weight = NAPI_POLL_WEIGHT;
module_param(napi_weight, int, 0444);
@@ -40,14 +41,12 @@ module_param(napi_tx, bool, 0644);
#define VIRTNET_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD)
-/* Amount of XDP headroom to prepend to packets for use by xdp_adjust_head */
-#define VIRTIO_XDP_HEADROOM 256
-
/* Separating two types of XDP xmit */
#define VIRTIO_XDP_TX BIT(0)
#define VIRTIO_XDP_REDIR BIT(1)
-#define VIRTIO_XDP_FLAG BIT(0)
+#define VIRTIO_XDP_FLAG BIT(0)
+#define VIRTIO_ORPHAN_FLAG BIT(1)
/* RX packet size EWMA. The average packet size is used to determine the packet
* buffer size when refilling RX rings. As the entire RX ring may be refilled
@@ -85,6 +84,8 @@ struct virtnet_stat_desc {
struct virtnet_sq_free_stats {
u64 packets;
u64 bytes;
+ u64 napi_packets;
+ u64 napi_bytes;
};
struct virtnet_sq_stats {
@@ -348,6 +349,13 @@ struct receive_queue {
/* Record the last dma info to free after new pages is allocated. */
struct virtnet_rq_dma *last_dma;
+
+ struct xsk_buff_pool *xsk_pool;
+
+ /* xdp rxq used by xsk */
+ struct xdp_rxq_info xsk_rxq_info;
+
+ struct xdp_buff **xsk_buffs;
};
/* This structure can contain rss message with maximum settings for indirection table and keysize
@@ -490,6 +498,16 @@ struct virtio_net_common_hdr {
};
static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
+static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp,
+ struct net_device *dev,
+ unsigned int *xdp_xmit,
+ struct virtnet_rq_stats *stats);
+static void virtnet_receive_done(struct virtnet_info *vi, struct receive_queue *rq,
+ struct sk_buff *skb, u8 flags);
+static struct sk_buff *virtnet_skb_append_frag(struct sk_buff *head_skb,
+ struct sk_buff *curr_skb,
+ struct page *page, void *buf,
+ int len, int truesize);
static bool is_xdp_frame(void *ptr)
{
@@ -506,29 +524,50 @@ static struct xdp_frame *ptr_to_xdp(void *ptr)
return (struct xdp_frame *)((unsigned long)ptr & ~VIRTIO_XDP_FLAG);
}
-static void __free_old_xmit(struct send_queue *sq, bool in_napi,
- struct virtnet_sq_free_stats *stats)
+static bool is_orphan_skb(void *ptr)
+{
+ return (unsigned long)ptr & VIRTIO_ORPHAN_FLAG;
+}
+
+static void *skb_to_ptr(struct sk_buff *skb, bool orphan)
+{
+ return (void *)((unsigned long)skb | (orphan ? VIRTIO_ORPHAN_FLAG : 0));
+}
+
+static struct sk_buff *ptr_to_skb(void *ptr)
+{
+ return (struct sk_buff *)((unsigned long)ptr & ~VIRTIO_ORPHAN_FLAG);
+}
+
+static void __free_old_xmit(struct send_queue *sq, struct netdev_queue *txq,
+ bool in_napi, struct virtnet_sq_free_stats *stats)
{
unsigned int len;
void *ptr;
while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
- ++stats->packets;
-
if (!is_xdp_frame(ptr)) {
- struct sk_buff *skb = ptr;
+ struct sk_buff *skb = ptr_to_skb(ptr);
pr_debug("Sent skb %p\n", skb);
- stats->bytes += skb->len;
+ if (is_orphan_skb(ptr)) {
+ stats->packets++;
+ stats->bytes += skb->len;
+ } else {
+ stats->napi_packets++;
+ stats->napi_bytes += skb->len;
+ }
napi_consume_skb(skb, in_napi);
} else {
struct xdp_frame *frame = ptr_to_xdp(ptr);
+ stats->packets++;
stats->bytes += xdp_get_frame_len(frame);
xdp_return_frame(frame);
}
}
+ netdev_tx_completed_queue(txq, stats->napi_packets, stats->napi_bytes);
}
/* Converting between virtqueue no. and kernel tx/rx queue no.
@@ -949,27 +988,33 @@ static void virtnet_rq_unmap_free_buf(struct virtqueue *vq, void *buf)
rq = &vi->rq[i];
+ if (rq->xsk_pool) {
+ xsk_buff_free((struct xdp_buff *)buf);
+ return;
+ }
+
if (!vi->big_packets || vi->mergeable_rx_bufs)
virtnet_rq_unmap(rq, buf, 0);
virtnet_rq_free_buf(vi, rq, buf);
}
-static void free_old_xmit(struct send_queue *sq, bool in_napi)
+static void free_old_xmit(struct send_queue *sq, struct netdev_queue *txq,
+ bool in_napi)
{
struct virtnet_sq_free_stats stats = {0};
- __free_old_xmit(sq, in_napi, &stats);
+ __free_old_xmit(sq, txq, in_napi, &stats);
/* Avoid overhead when no packets have been processed
* happens when called speculatively from start_xmit.
*/
- if (!stats.packets)
+ if (!stats.packets && !stats.napi_packets)
return;
u64_stats_update_begin(&sq->stats.syncp);
- u64_stats_add(&sq->stats.bytes, stats.bytes);
- u64_stats_add(&sq->stats.packets, stats.packets);
+ u64_stats_add(&sq->stats.bytes, stats.bytes + stats.napi_bytes);
+ u64_stats_add(&sq->stats.packets, stats.packets + stats.napi_packets);
u64_stats_update_end(&sq->stats.syncp);
}
@@ -1003,7 +1048,9 @@ static void check_sq_full_and_disable(struct virtnet_info *vi,
* early means 16 slots are typically wasted.
*/
if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
- netif_stop_subqueue(dev, qnum);
+ struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum);
+
+ netif_tx_stop_queue(txq);
u64_stats_update_begin(&sq->stats.syncp);
u64_stats_inc(&sq->stats.stop);
u64_stats_update_end(&sq->stats.syncp);
@@ -1012,7 +1059,7 @@ static void check_sq_full_and_disable(struct virtnet_info *vi,
virtqueue_napi_schedule(&sq->napi, sq->vq);
} else if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
/* More just got used, free them then recheck. */
- free_old_xmit(sq, false);
+ free_old_xmit(sq, txq, false);
if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
netif_start_subqueue(dev, qnum);
u64_stats_update_begin(&sq->stats.syncp);
@@ -1024,6 +1071,329 @@ static void check_sq_full_and_disable(struct virtnet_info *vi,
}
}
+static void sg_fill_dma(struct scatterlist *sg, dma_addr_t addr, u32 len)
+{
+ sg->dma_address = addr;
+ sg->length = len;
+}
+
+static struct xdp_buff *buf_to_xdp(struct virtnet_info *vi,
+ struct receive_queue *rq, void *buf, u32 len)
+{
+ struct xdp_buff *xdp;
+ u32 bufsize;
+
+ xdp = (struct xdp_buff *)buf;
+
+ bufsize = xsk_pool_get_rx_frame_size(rq->xsk_pool) + vi->hdr_len;
+
+ if (unlikely(len > bufsize)) {
+ pr_debug("%s: rx error: len %u exceeds truesize %u\n",
+ vi->dev->name, len, bufsize);
+ DEV_STATS_INC(vi->dev, rx_length_errors);
+ xsk_buff_free(xdp);
+ return NULL;
+ }
+
+ xsk_buff_set_size(xdp, len);
+ xsk_buff_dma_sync_for_cpu(xdp);
+
+ return xdp;
+}
+
+static struct sk_buff *xsk_construct_skb(struct receive_queue *rq,
+ struct xdp_buff *xdp)
+{
+ unsigned int metasize = xdp->data - xdp->data_meta;
+ struct sk_buff *skb;
+ unsigned int size;
+
+ size = xdp->data_end - xdp->data_hard_start;
+ skb = napi_alloc_skb(&rq->napi, size);
+ if (unlikely(!skb)) {
+ xsk_buff_free(xdp);
+ return NULL;
+ }
+
+ skb_reserve(skb, xdp->data_meta - xdp->data_hard_start);
+
+ size = xdp->data_end - xdp->data_meta;
+ memcpy(__skb_put(skb, size), xdp->data_meta, size);
+
+ if (metasize) {
+ __skb_pull(skb, metasize);
+ skb_metadata_set(skb, metasize);
+ }
+
+ xsk_buff_free(xdp);
+
+ return skb;
+}
+
+static struct sk_buff *virtnet_receive_xsk_small(struct net_device *dev, struct virtnet_info *vi,
+ struct receive_queue *rq, struct xdp_buff *xdp,
+ unsigned int *xdp_xmit,
+ struct virtnet_rq_stats *stats)
+{
+ struct bpf_prog *prog;
+ u32 ret;
+
+ ret = XDP_PASS;
+ rcu_read_lock();
+ prog = rcu_dereference(rq->xdp_prog);
+ if (prog)
+ ret = virtnet_xdp_handler(prog, xdp, dev, xdp_xmit, stats);
+ rcu_read_unlock();
+
+ switch (ret) {
+ case XDP_PASS:
+ return xsk_construct_skb(rq, xdp);
+
+ case XDP_TX:
+ case XDP_REDIRECT:
+ return NULL;
+
+ default:
+ /* drop packet */
+ xsk_buff_free(xdp);
+ u64_stats_inc(&stats->drops);
+ return NULL;
+ }
+}
+
+static void xsk_drop_follow_bufs(struct net_device *dev,
+ struct receive_queue *rq,
+ u32 num_buf,
+ struct virtnet_rq_stats *stats)
+{
+ struct xdp_buff *xdp;
+ u32 len;
+
+ while (num_buf-- > 1) {
+ xdp = virtqueue_get_buf(rq->vq, &len);
+ if (unlikely(!xdp)) {
+ pr_debug("%s: rx error: %d buffers missing\n",
+ dev->name, num_buf);
+ DEV_STATS_INC(dev, rx_length_errors);
+ break;
+ }
+ u64_stats_add(&stats->bytes, len);
+ xsk_buff_free(xdp);
+ }
+}
+
+static int xsk_append_merge_buffer(struct virtnet_info *vi,
+ struct receive_queue *rq,
+ struct sk_buff *head_skb,
+ u32 num_buf,
+ struct virtio_net_hdr_mrg_rxbuf *hdr,
+ struct virtnet_rq_stats *stats)
+{
+ struct sk_buff *curr_skb;
+ struct xdp_buff *xdp;
+ u32 len, truesize;
+ struct page *page;
+ void *buf;
+
+ curr_skb = head_skb;
+
+ while (--num_buf) {
+ buf = virtqueue_get_buf(rq->vq, &len);
+ if (unlikely(!buf)) {
+ pr_debug("%s: rx error: %d buffers out of %d missing\n",
+ vi->dev->name, num_buf,
+ virtio16_to_cpu(vi->vdev,
+ hdr->num_buffers));
+ DEV_STATS_INC(vi->dev, rx_length_errors);
+ return -EINVAL;
+ }
+
+ u64_stats_add(&stats->bytes, len);
+
+ xdp = buf_to_xdp(vi, rq, buf, len);
+ if (!xdp)
+ goto err;
+
+ buf = napi_alloc_frag(len);
+ if (!buf) {
+ xsk_buff_free(xdp);
+ goto err;
+ }
+
+ memcpy(buf, xdp->data - vi->hdr_len, len);
+
+ xsk_buff_free(xdp);
+
+ page = virt_to_page(buf);
+
+ truesize = len;
+
+ curr_skb = virtnet_skb_append_frag(head_skb, curr_skb, page,
+ buf, len, truesize);
+ if (!curr_skb) {
+ put_page(page);
+ goto err;
+ }
+ }
+
+ return 0;
+
+err:
+ xsk_drop_follow_bufs(vi->dev, rq, num_buf, stats);
+ return -EINVAL;
+}
+
+static struct sk_buff *virtnet_receive_xsk_merge(struct net_device *dev, struct virtnet_info *vi,
+ struct receive_queue *rq, struct xdp_buff *xdp,
+ unsigned int *xdp_xmit,
+ struct virtnet_rq_stats *stats)
+{
+ struct virtio_net_hdr_mrg_rxbuf *hdr;
+ struct bpf_prog *prog;
+ struct sk_buff *skb;
+ u32 ret, num_buf;
+
+ hdr = xdp->data - vi->hdr_len;
+ num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
+
+ ret = XDP_PASS;
+ rcu_read_lock();
+ prog = rcu_dereference(rq->xdp_prog);
+ /* TODO: support multi buffer. */
+ if (prog && num_buf == 1)
+ ret = virtnet_xdp_handler(prog, xdp, dev, xdp_xmit, stats);
+ rcu_read_unlock();
+
+ switch (ret) {
+ case XDP_PASS:
+ skb = xsk_construct_skb(rq, xdp);
+ if (!skb)
+ goto drop_bufs;
+
+ if (xsk_append_merge_buffer(vi, rq, skb, num_buf, hdr, stats)) {
+ dev_kfree_skb(skb);
+ goto drop;
+ }
+
+ return skb;
+
+ case XDP_TX:
+ case XDP_REDIRECT:
+ return NULL;
+
+ default:
+ /* drop packet */
+ xsk_buff_free(xdp);
+ }
+
+drop_bufs:
+ xsk_drop_follow_bufs(dev, rq, num_buf, stats);
+
+drop:
+ u64_stats_inc(&stats->drops);
+ return NULL;
+}
+
+static void virtnet_receive_xsk_buf(struct virtnet_info *vi, struct receive_queue *rq,
+ void *buf, u32 len,
+ unsigned int *xdp_xmit,
+ struct virtnet_rq_stats *stats)
+{
+ struct net_device *dev = vi->dev;
+ struct sk_buff *skb = NULL;
+ struct xdp_buff *xdp;
+ u8 flags;
+
+ len -= vi->hdr_len;
+
+ u64_stats_add(&stats->bytes, len);
+
+ xdp = buf_to_xdp(vi, rq, buf, len);
+ if (!xdp)
+ return;
+
+ if (unlikely(len < ETH_HLEN)) {
+ pr_debug("%s: short packet %i\n", dev->name, len);
+ DEV_STATS_INC(dev, rx_length_errors);
+ xsk_buff_free(xdp);
+ return;
+ }
+
+ flags = ((struct virtio_net_common_hdr *)(xdp->data - vi->hdr_len))->hdr.flags;
+
+ if (!vi->mergeable_rx_bufs)
+ skb = virtnet_receive_xsk_small(dev, vi, rq, xdp, xdp_xmit, stats);
+ else
+ skb = virtnet_receive_xsk_merge(dev, vi, rq, xdp, xdp_xmit, stats);
+
+ if (skb)
+ virtnet_receive_done(vi, rq, skb, flags);
+}
+
+static int virtnet_add_recvbuf_xsk(struct virtnet_info *vi, struct receive_queue *rq,
+ struct xsk_buff_pool *pool, gfp_t gfp)
+{
+ struct xdp_buff **xsk_buffs;
+ dma_addr_t addr;
+ int err = 0;
+ u32 len, i;
+ int num;
+
+ xsk_buffs = rq->xsk_buffs;
+
+ num = xsk_buff_alloc_batch(pool, xsk_buffs, rq->vq->num_free);
+ if (!num)
+ return -ENOMEM;
+
+ len = xsk_pool_get_rx_frame_size(pool) + vi->hdr_len;
+
+ for (i = 0; i < num; ++i) {
+ /* Use the part of XDP_PACKET_HEADROOM as the virtnet hdr space.
+ * We assume XDP_PACKET_HEADROOM is larger than hdr->len.
+ * (see function virtnet_xsk_pool_enable)
+ */
+ addr = xsk_buff_xdp_get_dma(xsk_buffs[i]) - vi->hdr_len;
+
+ sg_init_table(rq->sg, 1);
+ sg_fill_dma(rq->sg, addr, len);
+
+ err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, xsk_buffs[i], gfp);
+ if (err)
+ goto err;
+ }
+
+ return num;
+
+err:
+ for (; i < num; ++i)
+ xsk_buff_free(xsk_buffs[i]);
+
+ return err;
+}
+
+static int virtnet_xsk_wakeup(struct net_device *dev, u32 qid, u32 flag)
+{
+ struct virtnet_info *vi = netdev_priv(dev);
+ struct send_queue *sq;
+
+ if (!netif_running(dev))
+ return -ENETDOWN;
+
+ if (qid >= vi->curr_queue_pairs)
+ return -EINVAL;
+
+ sq = &vi->sq[qid];
+
+ if (napi_if_scheduled_mark_missed(&sq->napi))
+ return 0;
+
+ local_bh_disable();
+ virtqueue_napi_schedule(&sq->napi, sq->vq);
+ local_bh_enable();
+
+ return 0;
+}
+
static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,
struct send_queue *sq,
struct xdp_frame *xdpf)
@@ -1138,7 +1508,8 @@ static int virtnet_xdp_xmit(struct net_device *dev,
}
/* Free up any pending old buffers before queueing new ones. */
- __free_old_xmit(sq, false, &stats);
+ __free_old_xmit(sq, netdev_get_tx_queue(dev, sq - vi->sq),
+ false, &stats);
for (i = 0; i < n; i++) {
struct xdp_frame *xdpf = frames[i];
@@ -1240,7 +1611,7 @@ static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp,
static unsigned int virtnet_get_headroom(struct virtnet_info *vi)
{
- return vi->xdp_enabled ? VIRTIO_XDP_HEADROOM : 0;
+ return vi->xdp_enabled ? XDP_PACKET_HEADROOM : 0;
}
/* We copy the packet for XDP in the following cases:
@@ -1304,7 +1675,7 @@ static struct page *xdp_linearize_page(struct receive_queue *rq,
}
/* Headroom does not contribute to packet length */
- *len = page_off - VIRTIO_XDP_HEADROOM;
+ *len = page_off - XDP_PACKET_HEADROOM;
return page;
err_buf:
__free_pages(page, 0);
@@ -1360,6 +1731,10 @@ static struct sk_buff *receive_small_xdp(struct net_device *dev,
if (unlikely(hdr->hdr.gso_type))
goto err_xdp;
+ /* Partially checksummed packets must be dropped. */
+ if (unlikely(hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM))
+ goto err_xdp;
+
buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
@@ -1587,8 +1962,8 @@ static int virtnet_build_xdp_buff_mrg(struct net_device *dev,
void *ctx;
xdp_init_buff(xdp, frame_sz, &rq->xdp_rxq);
- xdp_prepare_buff(xdp, buf - VIRTIO_XDP_HEADROOM,
- VIRTIO_XDP_HEADROOM + vi->hdr_len, len - vi->hdr_len, true);
+ xdp_prepare_buff(xdp, buf - XDP_PACKET_HEADROOM,
+ XDP_PACKET_HEADROOM + vi->hdr_len, len - vi->hdr_len, true);
if (!*num_buf)
return 0;
@@ -1677,6 +2052,10 @@ static void *mergeable_xdp_get_buf(struct virtnet_info *vi,
if (unlikely(hdr->hdr.gso_type))
return NULL;
+ /* Partially checksummed packets must be dropped. */
+ if (unlikely(hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM))
+ return NULL;
+
/* Now XDP core assumes frag size is PAGE_SIZE, but buffers
* with headroom may add hole in truesize, which
* make their length exceed PAGE_SIZE. So we disabled the
@@ -1701,12 +2080,12 @@ static void *mergeable_xdp_get_buf(struct virtnet_info *vi,
/* linearize data for XDP */
xdp_page = xdp_linearize_page(rq, num_buf,
*page, offset,
- VIRTIO_XDP_HEADROOM,
+ XDP_PACKET_HEADROOM,
len);
if (!xdp_page)
return NULL;
} else {
- xdp_room = SKB_DATA_ALIGN(VIRTIO_XDP_HEADROOM +
+ xdp_room = SKB_DATA_ALIGN(XDP_PACKET_HEADROOM +
sizeof(struct skb_shared_info));
if (*len + xdp_room > PAGE_SIZE)
return NULL;
@@ -1715,7 +2094,7 @@ static void *mergeable_xdp_get_buf(struct virtnet_info *vi,
if (!xdp_page)
return NULL;
- memcpy(page_address(xdp_page) + VIRTIO_XDP_HEADROOM,
+ memcpy(page_address(xdp_page) + XDP_PACKET_HEADROOM,
page_address(*page) + offset, *len);
}
@@ -1725,7 +2104,7 @@ static void *mergeable_xdp_get_buf(struct virtnet_info *vi,
*page = xdp_page;
- return page_address(*page) + VIRTIO_XDP_HEADROOM;
+ return page_address(*page) + XDP_PACKET_HEADROOM;
}
static struct sk_buff *receive_mergeable_xdp(struct net_device *dev,
@@ -1788,6 +2167,49 @@ err_xdp:
return NULL;
}
+static struct sk_buff *virtnet_skb_append_frag(struct sk_buff *head_skb,
+ struct sk_buff *curr_skb,
+ struct page *page, void *buf,
+ int len, int truesize)
+{
+ int num_skb_frags;
+ int offset;
+
+ num_skb_frags = skb_shinfo(curr_skb)->nr_frags;
+ if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) {
+ struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC);
+
+ if (unlikely(!nskb))
+ return NULL;
+
+ if (curr_skb == head_skb)
+ skb_shinfo(curr_skb)->frag_list = nskb;
+ else
+ curr_skb->next = nskb;
+ curr_skb = nskb;
+ head_skb->truesize += nskb->truesize;
+ num_skb_frags = 0;
+ }
+
+ if (curr_skb != head_skb) {
+ head_skb->data_len += len;
+ head_skb->len += len;
+ head_skb->truesize += truesize;
+ }
+
+ offset = buf - page_address(page);
+ if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) {
+ put_page(page);
+ skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1,
+ len, truesize);
+ } else {
+ skb_add_rx_frag(curr_skb, num_skb_frags, page,
+ offset, len, truesize);
+ }
+
+ return curr_skb;
+}
+
static struct sk_buff *receive_mergeable(struct net_device *dev,
struct virtnet_info *vi,
struct receive_queue *rq,
@@ -1837,8 +2259,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
if (unlikely(!curr_skb))
goto err_skb;
while (--num_buf) {
- int num_skb_frags;
-
buf = virtnet_rq_get_buf(rq, &len, &ctx);
if (unlikely(!buf)) {
pr_debug("%s: rx error: %d buffers out of %d missing\n",
@@ -1863,34 +2283,10 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
goto err_skb;
}
- num_skb_frags = skb_shinfo(curr_skb)->nr_frags;
- if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) {
- struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC);
-
- if (unlikely(!nskb))
- goto err_skb;
- if (curr_skb == head_skb)
- skb_shinfo(curr_skb)->frag_list = nskb;
- else
- curr_skb->next = nskb;
- curr_skb = nskb;
- head_skb->truesize += nskb->truesize;
- num_skb_frags = 0;
- }
- if (curr_skb != head_skb) {
- head_skb->data_len += len;
- head_skb->len += len;
- head_skb->truesize += truesize;
- }
- offset = buf - page_address(page);
- if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) {
- put_page(page);
- skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1,
- len, truesize);
- } else {
- skb_add_rx_frag(curr_skb, num_skb_frags, page,
- offset, len, truesize);
- }
+ curr_skb = virtnet_skb_append_frag(head_skb, curr_skb, page,
+ buf, len, truesize);
+ if (!curr_skb)
+ goto err_skb;
}
ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len);
@@ -1935,38 +2331,17 @@ static void virtio_skb_set_hash(const struct virtio_net_hdr_v1_hash *hdr_hash,
skb_set_hash(skb, __le32_to_cpu(hdr_hash->hash_value), rss_hash_type);
}
-static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
- void *buf, unsigned int len, void **ctx,
- unsigned int *xdp_xmit,
- struct virtnet_rq_stats *stats)
+static void virtnet_receive_done(struct virtnet_info *vi, struct receive_queue *rq,
+ struct sk_buff *skb, u8 flags)
{
- struct net_device *dev = vi->dev;
- struct sk_buff *skb;
struct virtio_net_common_hdr *hdr;
-
- if (unlikely(len < vi->hdr_len + ETH_HLEN)) {
- pr_debug("%s: short packet %i\n", dev->name, len);
- DEV_STATS_INC(dev, rx_length_errors);
- virtnet_rq_free_buf(vi, rq, buf);
- return;
- }
-
- if (vi->mergeable_rx_bufs)
- skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit,
- stats);
- else if (vi->big_packets)
- skb = receive_big(dev, vi, rq, buf, len, stats);
- else
- skb = receive_small(dev, vi, rq, buf, ctx, len, xdp_xmit, stats);
-
- if (unlikely(!skb))
- return;
+ struct net_device *dev = vi->dev;
hdr = skb_vnet_common_hdr(skb);
if (dev->features & NETIF_F_RXHASH && vi->has_rss_hash_report)
virtio_skb_set_hash(&hdr->hash_v1_hdr, skb);
- if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID)
+ if (flags & VIRTIO_NET_HDR_F_DATA_VALID)
skb->ip_summed = CHECKSUM_UNNECESSARY;
if (virtio_net_hdr_to_skb(skb, &hdr->hdr,
@@ -1990,6 +2365,45 @@ frame_err:
dev_kfree_skb(skb);
}
+static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
+ void *buf, unsigned int len, void **ctx,
+ unsigned int *xdp_xmit,
+ struct virtnet_rq_stats *stats)
+{
+ struct net_device *dev = vi->dev;
+ struct sk_buff *skb;
+ u8 flags;
+
+ if (unlikely(len < vi->hdr_len + ETH_HLEN)) {
+ pr_debug("%s: short packet %i\n", dev->name, len);
+ DEV_STATS_INC(dev, rx_length_errors);
+ virtnet_rq_free_buf(vi, rq, buf);
+ return;
+ }
+
+ /* 1. Save the flags early, as the XDP program might overwrite them.
+ * These flags ensure packets marked as VIRTIO_NET_HDR_F_DATA_VALID
+ * stay valid after XDP processing.
+ * 2. XDP doesn't work with partially checksummed packets (refer to
+ * virtnet_xdp_set()), so packets marked as
+ * VIRTIO_NET_HDR_F_NEEDS_CSUM get dropped during XDP processing.
+ */
+ flags = ((struct virtio_net_common_hdr *)buf)->hdr.flags;
+
+ if (vi->mergeable_rx_bufs)
+ skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit,
+ stats);
+ else if (vi->big_packets)
+ skb = receive_big(dev, vi, rq, buf, len, stats);
+ else
+ skb = receive_small(dev, vi, rq, buf, ctx, len, xdp_xmit, stats);
+
+ if (unlikely(!skb))
+ return;
+
+ virtnet_receive_done(vi, rq, skb, flags);
+}
+
/* Unlike mergeable buffers, all buffers are allocated to the
* same size, except for the headroom. For this reason we do
* not need to use mergeable_len_to_ctx here - it is enough
@@ -2148,7 +2562,11 @@ static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq,
gfp_t gfp)
{
int err;
- bool oom;
+
+ if (rq->xsk_pool) {
+ err = virtnet_add_recvbuf_xsk(vi, rq, rq->xsk_pool, gfp);
+ goto kick;
+ }
do {
if (vi->mergeable_rx_bufs)
@@ -2158,10 +2576,11 @@ static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq,
else
err = add_recvbuf_small(vi, rq, gfp);
- oom = err == -ENOMEM;
if (err)
break;
} while (rq->vq->num_free);
+
+kick:
if (virtqueue_kick_prepare(rq->vq) && virtqueue_notify(rq->vq)) {
unsigned long flags;
@@ -2170,7 +2589,7 @@ static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq,
u64_stats_update_end_irqrestore(&rq->stats.syncp, flags);
}
- return !oom;
+ return err != -ENOMEM;
}
static void skb_recv_done(struct virtqueue *rvq)
@@ -2241,32 +2660,68 @@ static void refill_work(struct work_struct *work)
}
}
-static int virtnet_receive(struct receive_queue *rq, int budget,
- unsigned int *xdp_xmit)
+static int virtnet_receive_xsk_bufs(struct virtnet_info *vi,
+ struct receive_queue *rq,
+ int budget,
+ unsigned int *xdp_xmit,
+ struct virtnet_rq_stats *stats)
+{
+ unsigned int len;
+ int packets = 0;
+ void *buf;
+
+ while (packets < budget) {
+ buf = virtqueue_get_buf(rq->vq, &len);
+ if (!buf)
+ break;
+
+ virtnet_receive_xsk_buf(vi, rq, buf, len, xdp_xmit, stats);
+ packets++;
+ }
+
+ return packets;
+}
+
+static int virtnet_receive_packets(struct virtnet_info *vi,
+ struct receive_queue *rq,
+ int budget,
+ unsigned int *xdp_xmit,
+ struct virtnet_rq_stats *stats)
{
- struct virtnet_info *vi = rq->vq->vdev->priv;
- struct virtnet_rq_stats stats = {};
unsigned int len;
int packets = 0;
void *buf;
- int i;
if (!vi->big_packets || vi->mergeable_rx_bufs) {
void *ctx;
-
while (packets < budget &&
(buf = virtnet_rq_get_buf(rq, &len, &ctx))) {
- receive_buf(vi, rq, buf, len, ctx, xdp_xmit, &stats);
+ receive_buf(vi, rq, buf, len, ctx, xdp_xmit, stats);
packets++;
}
} else {
while (packets < budget &&
(buf = virtqueue_get_buf(rq->vq, &len)) != NULL) {
- receive_buf(vi, rq, buf, len, NULL, xdp_xmit, &stats);
+ receive_buf(vi, rq, buf, len, NULL, xdp_xmit, stats);
packets++;
}
}
+ return packets;
+}
+
+static int virtnet_receive(struct receive_queue *rq, int budget,
+ unsigned int *xdp_xmit)
+{
+ struct virtnet_info *vi = rq->vq->vdev->priv;
+ struct virtnet_rq_stats stats = {};
+ int i, packets;
+
+ if (rq->xsk_pool)
+ packets = virtnet_receive_xsk_bufs(vi, rq, budget, xdp_xmit, &stats);
+ else
+ packets = virtnet_receive_packets(vi, rq, budget, xdp_xmit, &stats);
+
if (rq->vq->num_free > min((unsigned int)budget, virtqueue_get_vring_size(rq->vq)) / 2) {
if (!try_fill_recv(vi, rq, GFP_ATOMIC)) {
spin_lock(&vi->refill_lock);
@@ -2295,7 +2750,7 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
return packets;
}
-static void virtnet_poll_cleantx(struct receive_queue *rq)
+static void virtnet_poll_cleantx(struct receive_queue *rq, int budget)
{
struct virtnet_info *vi = rq->vq->vdev->priv;
unsigned int index = vq2rxq(rq->vq);
@@ -2313,7 +2768,7 @@ static void virtnet_poll_cleantx(struct receive_queue *rq)
do {
virtqueue_disable_cb(sq->vq);
- free_old_xmit(sq, true);
+ free_old_xmit(sq, txq, !!budget);
} while (unlikely(!virtqueue_enable_cb_delayed(sq->vq)));
if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) {
@@ -2336,12 +2791,13 @@ static void virtnet_rx_dim_update(struct virtnet_info *vi, struct receive_queue
if (!rq->packets_in_napi)
return;
- u64_stats_update_begin(&rq->stats.syncp);
+ /* Don't need protection when fetching stats, since fetcher and
+ * updater of the stats are in same context
+ */
dim_update_sample(rq->calls,
u64_stats_read(&rq->stats.packets),
u64_stats_read(&rq->stats.bytes),
&cur_sample);
- u64_stats_update_end(&rq->stats.syncp);
net_dim(&rq->dim, cur_sample);
rq->packets_in_napi = 0;
@@ -2357,7 +2813,7 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
unsigned int xdp_xmit = 0;
bool napi_complete;
- virtnet_poll_cleantx(rq);
+ virtnet_poll_cleantx(rq, budget);
received = virtnet_receive(rq, budget, &xdp_xmit);
rq->packets_in_napi += received;
@@ -2412,6 +2868,7 @@ static int virtnet_enable_queue_pair(struct virtnet_info *vi, int qp_index)
goto err_xdp_reg_mem_model;
virtnet_napi_enable(vi->rq[qp_index].vq, &vi->rq[qp_index].napi);
+ netdev_tx_reset_queue(netdev_get_tx_queue(vi->dev, qp_index));
virtnet_napi_tx_enable(vi, vi->sq[qp_index].vq, &vi->sq[qp_index].napi);
return 0;
@@ -2421,6 +2878,13 @@ err_xdp_reg_mem_model:
return err;
}
+static void virtnet_cancel_dim(struct virtnet_info *vi, struct dim *dim)
+{
+ if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL))
+ return;
+ net_dim_work_cancel(dim);
+}
+
static int virtnet_open(struct net_device *dev)
{
struct virtnet_info *vi = netdev_priv(dev);
@@ -2447,7 +2911,7 @@ err_enable_qp:
for (i--; i >= 0; i--) {
virtnet_disable_queue_pair(vi, i);
- cancel_work_sync(&vi->rq[i].dim.work);
+ virtnet_cancel_dim(vi, &vi->rq[i].dim);
}
return err;
@@ -2471,7 +2935,7 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget)
txq = netdev_get_tx_queue(vi->dev, index);
__netif_tx_lock(txq, raw_smp_processor_id());
virtqueue_disable_cb(sq->vq);
- free_old_xmit(sq, true);
+ free_old_xmit(sq, txq, !!budget);
if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) {
if (netif_tx_queue_stopped(txq)) {
@@ -2505,7 +2969,7 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget)
return 0;
}
-static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
+static int xmit_skb(struct send_queue *sq, struct sk_buff *skb, bool orphan)
{
struct virtio_net_hdr_mrg_rxbuf *hdr;
const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
@@ -2549,7 +3013,8 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
return num_sg;
num_sg++;
}
- return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC);
+ return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg,
+ skb_to_ptr(skb, orphan), GFP_ATOMIC);
}
static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -2559,24 +3024,25 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
struct send_queue *sq = &vi->sq[qnum];
int err;
struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum);
- bool kick = !netdev_xmit_more();
+ bool xmit_more = netdev_xmit_more();
bool use_napi = sq->napi.weight;
+ bool kick;
/* Free up any pending old buffers before queueing new ones. */
do {
if (use_napi)
virtqueue_disable_cb(sq->vq);
- free_old_xmit(sq, false);
+ free_old_xmit(sq, txq, false);
- } while (use_napi && kick &&
+ } while (use_napi && !xmit_more &&
unlikely(!virtqueue_enable_cb_delayed(sq->vq)));
/* timestamp packet in software */
skb_tx_timestamp(skb);
/* Try to transmit */
- err = xmit_skb(sq, skb);
+ err = xmit_skb(sq, skb, !use_napi);
/* This should not happen! */
if (unlikely(err)) {
@@ -2598,7 +3064,9 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
check_sq_full_and_disable(vi, dev, sq);
- if (kick || netif_xmit_stopped(txq)) {
+ kick = use_napi ? __netdev_tx_sent_queue(txq, skb->len, xmit_more) :
+ !xmit_more || netif_xmit_stopped(txq);
+ if (kick) {
if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
u64_stats_update_begin(&sq->stats.syncp);
u64_stats_inc(&sq->stats.kicks);
@@ -2609,37 +3077,49 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
-static int virtnet_rx_resize(struct virtnet_info *vi,
- struct receive_queue *rq, u32 ring_num)
+static void virtnet_rx_pause(struct virtnet_info *vi, struct receive_queue *rq)
{
bool running = netif_running(vi->dev);
- int err, qindex;
-
- qindex = rq - vi->rq;
if (running) {
napi_disable(&rq->napi);
- cancel_work_sync(&rq->dim.work);
+ virtnet_cancel_dim(vi, &rq->dim);
}
+}
- err = virtqueue_resize(rq->vq, ring_num, virtnet_rq_unmap_free_buf);
- if (err)
- netdev_err(vi->dev, "resize rx fail: rx queue index: %d err: %d\n", qindex, err);
+static void virtnet_rx_resume(struct virtnet_info *vi, struct receive_queue *rq)
+{
+ bool running = netif_running(vi->dev);
if (!try_fill_recv(vi, rq, GFP_KERNEL))
schedule_delayed_work(&vi->refill, 0);
if (running)
virtnet_napi_enable(rq->vq, &rq->napi);
+}
+
+static int virtnet_rx_resize(struct virtnet_info *vi,
+ struct receive_queue *rq, u32 ring_num)
+{
+ int err, qindex;
+
+ qindex = rq - vi->rq;
+
+ virtnet_rx_pause(vi, rq);
+
+ err = virtqueue_resize(rq->vq, ring_num, virtnet_rq_unmap_free_buf);
+ if (err)
+ netdev_err(vi->dev, "resize rx fail: rx queue index: %d err: %d\n", qindex, err);
+
+ virtnet_rx_resume(vi, rq);
return err;
}
-static int virtnet_tx_resize(struct virtnet_info *vi,
- struct send_queue *sq, u32 ring_num)
+static void virtnet_tx_pause(struct virtnet_info *vi, struct send_queue *sq)
{
bool running = netif_running(vi->dev);
struct netdev_queue *txq;
- int err, qindex;
+ int qindex;
qindex = sq - vi->sq;
@@ -2660,10 +3140,17 @@ static int virtnet_tx_resize(struct virtnet_info *vi,
netif_stop_subqueue(vi->dev, qindex);
__netif_tx_unlock_bh(txq);
+}
- err = virtqueue_resize(sq->vq, ring_num, virtnet_sq_free_unused_buf);
- if (err)
- netdev_err(vi->dev, "resize tx fail: tx queue index: %d err: %d\n", qindex, err);
+static void virtnet_tx_resume(struct virtnet_info *vi, struct send_queue *sq)
+{
+ bool running = netif_running(vi->dev);
+ struct netdev_queue *txq;
+ int qindex;
+
+ qindex = sq - vi->sq;
+
+ txq = netdev_get_tx_queue(vi->dev, qindex);
__netif_tx_lock_bh(txq);
sq->reset = false;
@@ -2672,6 +3159,23 @@ static int virtnet_tx_resize(struct virtnet_info *vi,
if (running)
virtnet_napi_tx_enable(vi, sq->vq, &sq->napi);
+}
+
+static int virtnet_tx_resize(struct virtnet_info *vi, struct send_queue *sq,
+ u32 ring_num)
+{
+ int qindex, err;
+
+ qindex = sq - vi->sq;
+
+ virtnet_tx_pause(vi, sq);
+
+ err = virtqueue_resize(sq->vq, ring_num, virtnet_sq_free_unused_buf);
+ if (err)
+ netdev_err(vi->dev, "resize tx fail: tx queue index: %d err: %d\n", qindex, err);
+
+ virtnet_tx_resume(vi, sq);
+
return err;
}
@@ -2686,6 +3190,7 @@ static bool virtnet_send_command_reply(struct virtnet_info *vi, u8 class, u8 cmd
{
struct scatterlist *sgs[5], hdr, stat;
u32 out_num = 0, tmp, in_num = 0;
+ bool ok;
int ret;
/* Caller should know better */
@@ -2731,8 +3236,9 @@ static bool virtnet_send_command_reply(struct virtnet_info *vi, u8 class, u8 cmd
}
unlock:
+ ok = vi->ctrl->status == VIRTIO_NET_OK;
mutex_unlock(&vi->cvq_lock);
- return vi->ctrl->status == VIRTIO_NET_OK;
+ return ok;
}
static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
@@ -2878,7 +3384,7 @@ static int virtnet_close(struct net_device *dev)
for (i = 0; i < vi->max_queue_pairs; i++) {
virtnet_disable_queue_pair(vi, i);
- cancel_work_sync(&vi->rq[i].dim.work);
+ virtnet_cancel_dim(vi, &vi->rq[i].dim);
}
return 0;
@@ -4257,7 +4763,6 @@ static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi,
struct virtio_net_ctrl_coal_rx *coal_rx __free(kfree) = NULL;
bool rx_ctrl_dim_on = !!ec->use_adaptive_rx_coalesce;
struct scatterlist sgs_rx;
- int ret = 0;
int i;
if (rx_ctrl_dim_on && !virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL))
@@ -4267,27 +4772,27 @@ static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi,
ec->rx_max_coalesced_frames != vi->intr_coal_rx.max_packets))
return -EINVAL;
- /* Acquire all queues dim_locks */
- for (i = 0; i < vi->max_queue_pairs; i++)
- mutex_lock(&vi->rq[i].dim_lock);
-
if (rx_ctrl_dim_on && !vi->rx_dim_enabled) {
vi->rx_dim_enabled = true;
- for (i = 0; i < vi->max_queue_pairs; i++)
+ for (i = 0; i < vi->max_queue_pairs; i++) {
+ mutex_lock(&vi->rq[i].dim_lock);
vi->rq[i].dim_enabled = true;
- goto unlock;
+ mutex_unlock(&vi->rq[i].dim_lock);
+ }
+ return 0;
}
coal_rx = kzalloc(sizeof(*coal_rx), GFP_KERNEL);
- if (!coal_rx) {
- ret = -ENOMEM;
- goto unlock;
- }
+ if (!coal_rx)
+ return -ENOMEM;
if (!rx_ctrl_dim_on && vi->rx_dim_enabled) {
vi->rx_dim_enabled = false;
- for (i = 0; i < vi->max_queue_pairs; i++)
+ for (i = 0; i < vi->max_queue_pairs; i++) {
+ mutex_lock(&vi->rq[i].dim_lock);
vi->rq[i].dim_enabled = false;
+ mutex_unlock(&vi->rq[i].dim_lock);
+ }
}
/* Since the per-queue coalescing params can be set,
@@ -4300,22 +4805,19 @@ static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi,
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
VIRTIO_NET_CTRL_NOTF_COAL_RX_SET,
- &sgs_rx)) {
- ret = -EINVAL;
- goto unlock;
- }
+ &sgs_rx))
+ return -EINVAL;
vi->intr_coal_rx.max_usecs = ec->rx_coalesce_usecs;
vi->intr_coal_rx.max_packets = ec->rx_max_coalesced_frames;
for (i = 0; i < vi->max_queue_pairs; i++) {
+ mutex_lock(&vi->rq[i].dim_lock);
vi->rq[i].intr_coal.max_usecs = ec->rx_coalesce_usecs;
vi->rq[i].intr_coal.max_packets = ec->rx_max_coalesced_frames;
- }
-unlock:
- for (i = vi->max_queue_pairs - 1; i >= 0; i--)
mutex_unlock(&vi->rq[i].dim_lock);
+ }
- return ret;
+ return 0;
}
static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi,
@@ -4408,7 +4910,7 @@ static void virtnet_rx_dim_work(struct work_struct *work)
if (!rq->dim_enabled)
goto out;
- update_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
+ update_moder = net_dim_get_rx_irq_moder(dev, dim);
if (update_moder.usec != rq->intr_coal.max_usecs ||
update_moder.pkts != rq->intr_coal.max_packets) {
err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, qnum,
@@ -4417,9 +4919,9 @@ static void virtnet_rx_dim_work(struct work_struct *work)
if (err)
pr_debug("%s: Failed to send dim parameters on rxq%d\n",
dev->name, qnum);
- dim->state = DIM_START_MEASURE;
}
out:
+ dim->state = DIM_START_MEASURE;
mutex_unlock(&rq->dim_lock);
}
@@ -4911,10 +5413,144 @@ static int virtnet_restore_guest_offloads(struct virtnet_info *vi)
return virtnet_set_guest_offloads(vi, offloads);
}
+static int virtnet_rq_bind_xsk_pool(struct virtnet_info *vi, struct receive_queue *rq,
+ struct xsk_buff_pool *pool)
+{
+ int err, qindex;
+
+ qindex = rq - vi->rq;
+
+ if (pool) {
+ err = xdp_rxq_info_reg(&rq->xsk_rxq_info, vi->dev, qindex, rq->napi.napi_id);
+ if (err < 0)
+ return err;
+
+ err = xdp_rxq_info_reg_mem_model(&rq->xsk_rxq_info,
+ MEM_TYPE_XSK_BUFF_POOL, NULL);
+ if (err < 0)
+ goto unreg;
+
+ xsk_pool_set_rxq_info(pool, &rq->xsk_rxq_info);
+ }
+
+ virtnet_rx_pause(vi, rq);
+
+ err = virtqueue_reset(rq->vq, virtnet_rq_unmap_free_buf);
+ if (err) {
+ netdev_err(vi->dev, "reset rx fail: rx queue index: %d err: %d\n", qindex, err);
+
+ pool = NULL;
+ }
+
+ rq->xsk_pool = pool;
+
+ virtnet_rx_resume(vi, rq);
+
+ if (pool)
+ return 0;
+
+unreg:
+ xdp_rxq_info_unreg(&rq->xsk_rxq_info);
+ return err;
+}
+
+static int virtnet_xsk_pool_enable(struct net_device *dev,
+ struct xsk_buff_pool *pool,
+ u16 qid)
+{
+ struct virtnet_info *vi = netdev_priv(dev);
+ struct receive_queue *rq;
+ struct device *dma_dev;
+ struct send_queue *sq;
+ int err, size;
+
+ if (vi->hdr_len > xsk_pool_get_headroom(pool))
+ return -EINVAL;
+
+ /* In big_packets mode, xdp cannot work, so there is no need to
+ * initialize xsk of rq.
+ */
+ if (vi->big_packets && !vi->mergeable_rx_bufs)
+ return -ENOENT;
+
+ if (qid >= vi->curr_queue_pairs)
+ return -EINVAL;
+
+ sq = &vi->sq[qid];
+ rq = &vi->rq[qid];
+
+ /* xsk assumes that tx and rx must have the same dma device. The af-xdp
+ * may use one buffer to receive from the rx and reuse this buffer to
+ * send by the tx. So the dma dev of sq and rq must be the same one.
+ *
+ * But vq->dma_dev allows every vq has the respective dma dev. So I
+ * check the dma dev of vq and sq is the same dev.
+ */
+ if (virtqueue_dma_dev(rq->vq) != virtqueue_dma_dev(sq->vq))
+ return -EINVAL;
+
+ dma_dev = virtqueue_dma_dev(rq->vq);
+ if (!dma_dev)
+ return -EINVAL;
+
+ size = virtqueue_get_vring_size(rq->vq);
+
+ rq->xsk_buffs = kvcalloc(size, sizeof(*rq->xsk_buffs), GFP_KERNEL);
+ if (!rq->xsk_buffs)
+ return -ENOMEM;
+
+ err = xsk_pool_dma_map(pool, dma_dev, 0);
+ if (err)
+ goto err_xsk_map;
+
+ err = virtnet_rq_bind_xsk_pool(vi, rq, pool);
+ if (err)
+ goto err_rq;
+
+ return 0;
+
+err_rq:
+ xsk_pool_dma_unmap(pool, 0);
+err_xsk_map:
+ return err;
+}
+
+static int virtnet_xsk_pool_disable(struct net_device *dev, u16 qid)
+{
+ struct virtnet_info *vi = netdev_priv(dev);
+ struct xsk_buff_pool *pool;
+ struct receive_queue *rq;
+ int err;
+
+ if (qid >= vi->curr_queue_pairs)
+ return -EINVAL;
+
+ rq = &vi->rq[qid];
+
+ pool = rq->xsk_pool;
+
+ err = virtnet_rq_bind_xsk_pool(vi, rq, NULL);
+
+ xsk_pool_dma_unmap(pool, 0);
+
+ kvfree(rq->xsk_buffs);
+
+ return err;
+}
+
+static int virtnet_xsk_pool_setup(struct net_device *dev, struct netdev_bpf *xdp)
+{
+ if (xdp->xsk.pool)
+ return virtnet_xsk_pool_enable(dev, xdp->xsk.pool,
+ xdp->xsk.queue_id);
+ else
+ return virtnet_xsk_pool_disable(dev, xdp->xsk.queue_id);
+}
+
static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
struct netlink_ext_ack *extack)
{
- unsigned int room = SKB_DATA_ALIGN(VIRTIO_XDP_HEADROOM +
+ unsigned int room = SKB_DATA_ALIGN(XDP_PACKET_HEADROOM +
sizeof(struct skb_shared_info));
unsigned int max_sz = PAGE_SIZE - room - ETH_HLEN;
struct virtnet_info *vi = netdev_priv(dev);
@@ -5036,6 +5672,8 @@ static int virtnet_xdp(struct net_device *dev, struct netdev_bpf *xdp)
switch (xdp->command) {
case XDP_SETUP_PROG:
return virtnet_xdp_set(dev, xdp->prog, xdp->extack);
+ case XDP_SETUP_XSK_POOL:
+ return virtnet_xsk_pool_setup(dev, xdp);
default:
return -EINVAL;
}
@@ -5108,6 +5746,36 @@ static void virtnet_tx_timeout(struct net_device *dev, unsigned int txqueue)
jiffies_to_usecs(jiffies - READ_ONCE(txq->trans_start)));
}
+static int virtnet_init_irq_moder(struct virtnet_info *vi)
+{
+ u8 profile_flags = 0, coal_flags = 0;
+ int ret, i;
+
+ profile_flags |= DIM_PROFILE_RX;
+ coal_flags |= DIM_COALESCE_USEC | DIM_COALESCE_PKTS;
+ ret = net_dim_init_irq_moder(vi->dev, profile_flags, coal_flags,
+ DIM_CQ_PERIOD_MODE_START_FROM_EQE,
+ 0, virtnet_rx_dim_work, NULL);
+
+ if (ret)
+ return ret;
+
+ for (i = 0; i < vi->max_queue_pairs; i++)
+ net_dim_setting(vi->dev, &vi->rq[i].dim, false);
+
+ return 0;
+}
+
+static void virtnet_free_irq_moder(struct virtnet_info *vi)
+{
+ if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL))
+ return;
+
+ rtnl_lock();
+ net_dim_free_irq_moder(vi->dev);
+ rtnl_unlock();
+}
+
static const struct net_device_ops virtnet_netdev = {
.ndo_open = virtnet_open,
.ndo_stop = virtnet_close,
@@ -5120,6 +5788,7 @@ static const struct net_device_ops virtnet_netdev = {
.ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid,
.ndo_bpf = virtnet_xdp,
.ndo_xdp_xmit = virtnet_xdp_xmit,
+ .ndo_xsk_wakeup = virtnet_xsk_wakeup,
.ndo_features_check = passthru_features_check,
.ndo_get_phys_port_name = virtnet_get_phys_port_name,
.ndo_set_features = virtnet_set_features,
@@ -5387,9 +6056,6 @@ static int virtnet_alloc_queues(struct virtnet_info *vi)
virtnet_poll_tx,
napi_tx ? napi_weight : 0);
- INIT_WORK(&vi->rq[i].dim.work, virtnet_rx_dim_work);
- vi->rq[i].dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
-
sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg));
ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len);
sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg));
@@ -5668,8 +6334,16 @@ static int virtnet_probe(struct virtio_device *vdev)
dev->features |= dev->hw_features & NETIF_F_ALL_TSO;
/* (!csum && gso) case will be fixed by register_netdev() */
}
- if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM))
- dev->features |= NETIF_F_RXCSUM;
+
+ /* 1. With VIRTIO_NET_F_GUEST_CSUM negotiation, the driver doesn't
+ * need to calculate checksums for partially checksummed packets,
+ * as they're considered valid by the upper layer.
+ * 2. Without VIRTIO_NET_F_GUEST_CSUM negotiation, the driver only
+ * receives fully checksummed packets. The device may assist in
+ * validating these packets' checksums, so the driver won't have to.
+ */
+ dev->features |= NETIF_F_RXCSUM;
+
if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6))
dev->features |= NETIF_F_GRO_HW;
@@ -5810,6 +6484,10 @@ static int virtnet_probe(struct virtio_device *vdev)
for (i = 0; i < vi->max_queue_pairs; i++)
if (vi->sq[i].napi.weight)
vi->sq[i].intr_coal.max_packets = 1;
+
+ err = virtnet_init_irq_moder(vi);
+ if (err)
+ goto free;
}
#ifdef CONFIG_SYSFS
@@ -5961,6 +6639,8 @@ static void virtnet_remove(struct virtio_device *vdev)
disable_rx_mode_work(vi);
flush_work(&vi->rx_mode_work);
+ virtnet_free_irq_moder(vi);
+
unregister_netdev(vi->dev);
net_failover_destroy(vi->failover);
diff --git a/drivers/net/vmxnet3/Makefile b/drivers/net/vmxnet3/Makefile
index f82870c10205..59ef494ce2e0 100644
--- a/drivers/net/vmxnet3/Makefile
+++ b/drivers/net/vmxnet3/Makefile
@@ -2,7 +2,7 @@
#
# Linux driver for VMware's vmxnet3 ethernet NIC.
#
-# Copyright (C) 2007-2022, VMware, Inc. All Rights Reserved.
+# Copyright (C) 2007-2024, VMware, Inc. All Rights Reserved.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
diff --git a/drivers/net/vmxnet3/vmxnet3_defs.h b/drivers/net/vmxnet3/vmxnet3_defs.h
index 41d6767283a6..5c5148768039 100644
--- a/drivers/net/vmxnet3/vmxnet3_defs.h
+++ b/drivers/net/vmxnet3/vmxnet3_defs.h
@@ -1,7 +1,7 @@
/*
* Linux driver for VMware's vmxnet3 ethernet NIC.
*
- * Copyright (C) 2008-2022, VMware, Inc. All Rights Reserved.
+ * Copyright (C) 2008-2024, VMware, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -80,6 +80,8 @@ enum {
#define VMXNET3_IO_TYPE(addr) ((addr) >> 24)
#define VMXNET3_IO_REG(addr) ((addr) & 0xFFFFFF)
+#define VMXNET3_PMC_PSEUDO_TSC 0x10003
+
enum {
VMXNET3_CMD_FIRST_SET = 0xCAFE0000,
VMXNET3_CMD_ACTIVATE_DEV = VMXNET3_CMD_FIRST_SET,
@@ -123,6 +125,8 @@ enum {
VMXNET3_CMD_GET_RESERVED4,
VMXNET3_CMD_GET_MAX_CAPABILITIES,
VMXNET3_CMD_GET_DCR0_REG,
+ VMXNET3_CMD_GET_TSRING_DESC_SIZE,
+ VMXNET3_CMD_GET_DISABLED_OFFLOADS,
};
/*
@@ -254,6 +258,24 @@ struct Vmxnet3_RxDesc {
#define VMXNET3_RCD_HDR_INNER_SHIFT 13
+struct Vmxnet3TSInfo {
+ u64 tsData:56;
+ u64 tsType:4;
+ u64 tsi:1; //bit to indicate to set ts
+ u64 pad:3;
+ u64 pad2;
+};
+
+struct Vmxnet3_TxTSDesc {
+ struct Vmxnet3TSInfo ts;
+ u64 pad[14];
+};
+
+struct Vmxnet3_RxTSDesc {
+ struct Vmxnet3TSInfo ts;
+ u64 pad[14];
+};
+
struct Vmxnet3_RxCompDesc {
#ifdef __BIG_ENDIAN_BITFIELD
u32 ext2:1;
@@ -427,6 +449,13 @@ union Vmxnet3_GenericDesc {
#define VMXNET3_RXDATA_DESC_SIZE_ALIGN 64
#define VMXNET3_RXDATA_DESC_SIZE_MASK (VMXNET3_RXDATA_DESC_SIZE_ALIGN - 1)
+/* Rx TS Ring buffer size must be a multiple of 64 bytes */
+#define VMXNET3_RXTS_DESC_SIZE_ALIGN 64
+#define VMXNET3_RXTS_DESC_SIZE_MASK (VMXNET3_RXTS_DESC_SIZE_ALIGN - 1)
+/* Tx TS Ring buffer size must be a multiple of 64 bytes */
+#define VMXNET3_TXTS_DESC_SIZE_ALIGN 64
+#define VMXNET3_TXTS_DESC_SIZE_MASK (VMXNET3_TXTS_DESC_SIZE_ALIGN - 1)
+
/* Max ring size */
#define VMXNET3_TX_RING_MAX_SIZE 4096
#define VMXNET3_TC_RING_MAX_SIZE 4096
@@ -439,6 +468,9 @@ union Vmxnet3_GenericDesc {
#define VMXNET3_RXDATA_DESC_MAX_SIZE 2048
+#define VMXNET3_TXTS_DESC_MAX_SIZE 256
+#define VMXNET3_RXTS_DESC_MAX_SIZE 256
+
/* a list of reasons for queue stop */
enum {
@@ -546,6 +578,24 @@ struct Vmxnet3_RxQueueConf {
};
+struct Vmxnet3_LatencyConf {
+ u16 sampleRate;
+ u16 pad;
+};
+
+struct Vmxnet3_TxQueueTSConf {
+ __le64 txTSRingBasePA;
+ __le16 txTSRingDescSize; /* size of tx timestamp ring buffer */
+ u16 pad;
+ struct Vmxnet3_LatencyConf latencyConf;
+};
+
+struct Vmxnet3_RxQueueTSConf {
+ __le64 rxTSRingBasePA;
+ __le16 rxTSRingDescSize; /* size of rx timestamp ring buffer */
+ u16 pad[3];
+};
+
enum vmxnet3_intr_mask_mode {
VMXNET3_IMM_AUTO = 0,
VMXNET3_IMM_ACTIVE = 1,
@@ -679,7 +729,8 @@ struct Vmxnet3_TxQueueDesc {
/* Driver read after a GET command */
struct Vmxnet3_QueueStatus status;
struct UPT1_TxStats stats;
- u8 _pad[88]; /* 128 aligned */
+ struct Vmxnet3_TxQueueTSConf tsConf;
+ u8 _pad[72]; /* 128 aligned */
};
@@ -689,7 +740,8 @@ struct Vmxnet3_RxQueueDesc {
/* Driver read after a GET commad */
struct Vmxnet3_QueueStatus status;
struct UPT1_RxStats stats;
- u8 __pad[88]; /* 128 aligned */
+ struct Vmxnet3_RxQueueTSConf tsConf;
+ u8 __pad[72]; /* 128 aligned */
};
struct Vmxnet3_SetPolling {
@@ -861,4 +913,7 @@ struct Vmxnet3_DriverShared {
/* when new capability is introduced, update VMXNET3_CAP_MAX */
#define VMXNET3_CAP_MAX VMXNET3_CAP_VERSION_7_MAX
+#define VMXNET3_OFFLOAD_TSO BIT(0)
+#define VMXNET3_OFFLOAD_LRO BIT(1)
+
#endif /* _VMXNET3_DEFS_H_ */
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 89ca6e75fcc6..b70654c7ad34 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1,7 +1,7 @@
/*
* Linux driver for VMware's vmxnet3 ethernet NIC.
*
- * Copyright (C) 2008-2022, VMware, Inc. All Rights Reserved.
+ * Copyright (C) 2008-2024, VMware, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -143,6 +143,32 @@ vmxnet3_tq_stop(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
netif_stop_subqueue(adapter->netdev, (tq - adapter->tx_queue));
}
+static u64
+vmxnet3_get_cycles(int pmc)
+{
+#ifdef CONFIG_X86
+ return native_read_pmc(pmc);
+#else
+ return 0;
+#endif
+}
+
+static bool
+vmxnet3_apply_timestamp(struct vmxnet3_tx_queue *tq, u16 rate)
+{
+#ifdef CONFIG_X86
+ if (rate > 0) {
+ if (tq->tsPktCount == 1) {
+ if (rate != 1)
+ tq->tsPktCount = rate;
+ return true;
+ }
+ tq->tsPktCount--;
+ }
+#endif
+ return false;
+}
+
/* Check if capability is supported by UPT device or
* UPT is even requested
*/
@@ -498,6 +524,12 @@ vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq,
tq->data_ring.base, tq->data_ring.basePA);
tq->data_ring.base = NULL;
}
+ if (tq->ts_ring.base) {
+ dma_free_coherent(&adapter->pdev->dev,
+ tq->tx_ring.size * tq->tx_ts_desc_size,
+ tq->ts_ring.base, tq->ts_ring.basePA);
+ tq->ts_ring.base = NULL;
+ }
if (tq->comp_ring.base) {
dma_free_coherent(&adapter->pdev->dev, tq->comp_ring.size *
sizeof(struct Vmxnet3_TxCompDesc),
@@ -535,6 +567,10 @@ vmxnet3_tq_init(struct vmxnet3_tx_queue *tq,
memset(tq->data_ring.base, 0,
tq->data_ring.size * tq->txdata_desc_size);
+ if (tq->ts_ring.base)
+ memset(tq->ts_ring.base, 0,
+ tq->tx_ring.size * tq->tx_ts_desc_size);
+
/* reset the tx comp ring contents to 0 and reset comp ring states */
memset(tq->comp_ring.base, 0, tq->comp_ring.size *
sizeof(struct Vmxnet3_TxCompDesc));
@@ -573,6 +609,18 @@ vmxnet3_tq_create(struct vmxnet3_tx_queue *tq,
goto err;
}
+ if (tq->tx_ts_desc_size != 0) {
+ tq->ts_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
+ tq->tx_ring.size * tq->tx_ts_desc_size,
+ &tq->ts_ring.basePA, GFP_KERNEL);
+ if (!tq->ts_ring.base) {
+ netdev_err(adapter->netdev, "failed to allocate tx ts ring\n");
+ tq->tx_ts_desc_size = 0;
+ }
+ } else {
+ tq->ts_ring.base = NULL;
+ }
+
tq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
tq->comp_ring.size * sizeof(struct Vmxnet3_TxCompDesc),
&tq->comp_ring.basePA, GFP_KERNEL);
@@ -861,6 +909,11 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
/* set the last buf_info for the pkt */
tbi->skb = skb;
tbi->sop_idx = ctx->sop_txd - tq->tx_ring.base;
+ if (tq->tx_ts_desc_size != 0) {
+ ctx->ts_txd = (struct Vmxnet3_TxTSDesc *)((u8 *)tq->ts_ring.base +
+ tbi->sop_idx * tq->tx_ts_desc_size);
+ ctx->ts_txd->ts.tsi = 0;
+ }
return 0;
}
@@ -968,7 +1021,7 @@ vmxnet3_parse_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
skb_headlen(skb));
}
- if (skb->len <= VMXNET3_HDR_COPY_SIZE)
+ if (skb->len <= tq->txdata_desc_size)
ctx->copy_size = skb->len;
/* make sure headers are accessible directly */
@@ -1259,6 +1312,14 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
gdesc->txd.tci = skb_vlan_tag_get(skb);
}
+ if (tq->tx_ts_desc_size != 0 &&
+ adapter->latencyConf->sampleRate != 0) {
+ if (vmxnet3_apply_timestamp(tq, adapter->latencyConf->sampleRate)) {
+ ctx.ts_txd->ts.tsData = vmxnet3_get_cycles(VMXNET3_PMC_PSEUDO_TSC);
+ ctx.ts_txd->ts.tsi = 1;
+ }
+ }
+
/* Ensure that the write to (&gdesc->txd)->gen will be observed after
* all other writes to &gdesc->txd.
*/
@@ -1608,6 +1669,15 @@ skip_xdp:
skip_page_frags = false;
ctx->skb = rbi->skb;
+ if (rq->rx_ts_desc_size != 0 && rcd->ext2) {
+ struct Vmxnet3_RxTSDesc *ts_rxd;
+
+ ts_rxd = (struct Vmxnet3_RxTSDesc *)((u8 *)rq->ts_ring.base +
+ idx * rq->rx_ts_desc_size);
+ ts_rxd->ts.tsData = vmxnet3_get_cycles(VMXNET3_PMC_PSEUDO_TSC);
+ ts_rxd->ts.tsi = 1;
+ }
+
rxDataRingUsed =
VMXNET3_RX_DATA_RING(adapter, rcd->rqID);
len = rxDataRingUsed ? rcd->len : rbi->len;
@@ -2007,6 +2077,13 @@ static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
rq->data_ring.base = NULL;
}
+ if (rq->ts_ring.base) {
+ dma_free_coherent(&adapter->pdev->dev,
+ rq->rx_ring[0].size * rq->rx_ts_desc_size,
+ rq->ts_ring.base, rq->ts_ring.basePA);
+ rq->ts_ring.base = NULL;
+ }
+
if (rq->comp_ring.base) {
dma_free_coherent(&adapter->pdev->dev, rq->comp_ring.size
* sizeof(struct Vmxnet3_RxCompDesc),
@@ -2034,8 +2111,8 @@ vmxnet3_rq_destroy_all_rxdataring(struct vmxnet3_adapter *adapter)
rq->data_ring.base,
rq->data_ring.basePA);
rq->data_ring.base = NULL;
- rq->data_ring.desc_size = 0;
}
+ rq->data_ring.desc_size = 0;
}
}
@@ -2090,6 +2167,10 @@ vmxnet3_rq_init(struct vmxnet3_rx_queue *rq,
}
vmxnet3_rq_alloc_rx_buf(rq, 1, rq->rx_ring[1].size - 1, adapter);
+ if (rq->ts_ring.base)
+ memset(rq->ts_ring.base, 0,
+ rq->rx_ring[0].size * rq->rx_ts_desc_size);
+
/* reset the comp ring */
rq->comp_ring.next2proc = 0;
memset(rq->comp_ring.base, 0, rq->comp_ring.size *
@@ -2160,6 +2241,21 @@ vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
rq->data_ring.desc_size = 0;
}
+ if (rq->rx_ts_desc_size != 0) {
+ sz = rq->rx_ring[0].size * rq->rx_ts_desc_size;
+ rq->ts_ring.base =
+ dma_alloc_coherent(&adapter->pdev->dev, sz,
+ &rq->ts_ring.basePA,
+ GFP_KERNEL);
+ if (!rq->ts_ring.base) {
+ netdev_err(adapter->netdev,
+ "rx ts ring will be disabled\n");
+ rq->rx_ts_desc_size = 0;
+ }
+ } else {
+ rq->ts_ring.base = NULL;
+ }
+
sz = rq->comp_ring.size * sizeof(struct Vmxnet3_RxCompDesc);
rq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev, sz,
&rq->comp_ring.basePA,
@@ -2759,6 +2855,8 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
struct Vmxnet3_DSDevReadExt *devReadExt = &shared->devReadExt;
struct Vmxnet3_TxQueueConf *tqc;
struct Vmxnet3_RxQueueConf *rqc;
+ struct Vmxnet3_TxQueueTSConf *tqtsc;
+ struct Vmxnet3_RxQueueTSConf *rqtsc;
int i;
memset(shared, 0, sizeof(*shared));
@@ -2815,6 +2913,11 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
tqc->compRingSize = cpu_to_le32(tq->comp_ring.size);
tqc->ddLen = cpu_to_le32(0);
tqc->intrIdx = tq->comp_ring.intr_idx;
+ if (VMXNET3_VERSION_GE_9(adapter)) {
+ tqtsc = &adapter->tqd_start[i].tsConf;
+ tqtsc->txTSRingBasePA = cpu_to_le64(tq->ts_ring.basePA);
+ tqtsc->txTSRingDescSize = cpu_to_le16(tq->tx_ts_desc_size);
+ }
}
/* rx queue settings */
@@ -2837,6 +2940,11 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
rqc->rxDataRingDescSize =
cpu_to_le16(rq->data_ring.desc_size);
}
+ if (VMXNET3_VERSION_GE_9(adapter)) {
+ rqtsc = &adapter->rqd_start[i].tsConf;
+ rqtsc->rxTSRingBasePA = cpu_to_le64(rq->ts_ring.basePA);
+ rqtsc->rxTSRingDescSize = cpu_to_le16(rq->rx_ts_desc_size);
+ }
}
#ifdef VMXNET3_RSS
@@ -3299,6 +3407,8 @@ vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size,
tq->stopped = true;
tq->adapter = adapter;
tq->qid = i;
+ tq->tx_ts_desc_size = adapter->tx_ts_desc_size;
+ tq->tsPktCount = 1;
err = vmxnet3_tq_create(tq, adapter);
/*
* Too late to change num_tx_queues. We cannot do away with
@@ -3320,6 +3430,7 @@ vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size,
rq->shared = &adapter->rqd_start[i].ctrl;
rq->adapter = adapter;
rq->data_ring.desc_size = rxdata_desc_size;
+ rq->rx_ts_desc_size = adapter->rx_ts_desc_size;
err = vmxnet3_rq_create(rq, adapter);
if (err) {
if (i == 0) {
@@ -3361,14 +3472,15 @@ vmxnet3_open(struct net_device *netdev)
if (VMXNET3_VERSION_GE_3(adapter)) {
unsigned long flags;
u16 txdata_desc_size;
+ u32 ret;
spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_GET_TXDATA_DESC_SIZE);
- txdata_desc_size = VMXNET3_READ_BAR1_REG(adapter,
- VMXNET3_REG_CMD);
+ ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
spin_unlock_irqrestore(&adapter->cmd_lock, flags);
+ txdata_desc_size = ret & 0xffff;
if ((txdata_desc_size < VMXNET3_TXDATA_DESC_MIN_SIZE) ||
(txdata_desc_size > VMXNET3_TXDATA_DESC_MAX_SIZE) ||
(txdata_desc_size & VMXNET3_TXDATA_DESC_SIZE_MASK)) {
@@ -3377,10 +3489,40 @@ vmxnet3_open(struct net_device *netdev)
} else {
adapter->txdata_desc_size = txdata_desc_size;
}
+ if (VMXNET3_VERSION_GE_9(adapter))
+ adapter->rxdata_desc_size = (ret >> 16) & 0xffff;
} else {
adapter->txdata_desc_size = sizeof(struct Vmxnet3_TxDataDesc);
}
+ if (VMXNET3_VERSION_GE_9(adapter)) {
+ unsigned long flags;
+ u16 tx_ts_desc_size = 0;
+ u16 rx_ts_desc_size = 0;
+ u32 ret;
+
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
+ VMXNET3_CMD_GET_TSRING_DESC_SIZE);
+ ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
+ if (ret > 0) {
+ tx_ts_desc_size = (ret & 0xff);
+ rx_ts_desc_size = ((ret >> 16) & 0xff);
+ }
+ if (tx_ts_desc_size > VMXNET3_TXTS_DESC_MAX_SIZE ||
+ tx_ts_desc_size & VMXNET3_TXTS_DESC_SIZE_MASK)
+ tx_ts_desc_size = 0;
+ if (rx_ts_desc_size > VMXNET3_RXTS_DESC_MAX_SIZE ||
+ rx_ts_desc_size & VMXNET3_RXTS_DESC_SIZE_MASK)
+ rx_ts_desc_size = 0;
+ adapter->tx_ts_desc_size = tx_ts_desc_size;
+ adapter->rx_ts_desc_size = rx_ts_desc_size;
+ } else {
+ adapter->tx_ts_desc_size = 0;
+ adapter->rx_ts_desc_size = 0;
+ }
+
err = vmxnet3_create_queues(adapter,
adapter->tx_ring_size,
adapter->rx_ring_size,
@@ -3503,6 +3645,15 @@ static void
vmxnet3_declare_features(struct vmxnet3_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
+ unsigned long flags;
+
+ if (VMXNET3_VERSION_GE_9(adapter)) {
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
+ VMXNET3_CMD_GET_DISABLED_OFFLOADS);
+ adapter->disabledOffloads = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
+ }
netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
@@ -3520,6 +3671,16 @@ vmxnet3_declare_features(struct vmxnet3_adapter *adapter)
NETIF_F_GSO_UDP_TUNNEL_CSUM;
}
+ if (adapter->disabledOffloads & VMXNET3_OFFLOAD_TSO) {
+ netdev->hw_features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
+ netdev->hw_enc_features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
+ }
+
+ if (adapter->disabledOffloads & VMXNET3_OFFLOAD_LRO) {
+ netdev->hw_features &= ~(NETIF_F_LRO);
+ netdev->hw_enc_features &= ~(NETIF_F_LRO);
+ }
+
if (VMXNET3_VERSION_GE_7(adapter)) {
unsigned long flags;
@@ -3790,7 +3951,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
struct net_device *netdev;
struct vmxnet3_adapter *adapter;
u8 mac[ETH_ALEN];
- int size;
+ int size, i;
int num_tx_queues;
int num_rx_queues;
int queues;
@@ -3857,42 +4018,14 @@ vmxnet3_probe_device(struct pci_dev *pdev,
goto err_alloc_pci;
ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_VRRS);
- if (ver & (1 << VMXNET3_REV_7)) {
- VMXNET3_WRITE_BAR1_REG(adapter,
- VMXNET3_REG_VRRS,
- 1 << VMXNET3_REV_7);
- adapter->version = VMXNET3_REV_7 + 1;
- } else if (ver & (1 << VMXNET3_REV_6)) {
- VMXNET3_WRITE_BAR1_REG(adapter,
- VMXNET3_REG_VRRS,
- 1 << VMXNET3_REV_6);
- adapter->version = VMXNET3_REV_6 + 1;
- } else if (ver & (1 << VMXNET3_REV_5)) {
- VMXNET3_WRITE_BAR1_REG(adapter,
- VMXNET3_REG_VRRS,
- 1 << VMXNET3_REV_5);
- adapter->version = VMXNET3_REV_5 + 1;
- } else if (ver & (1 << VMXNET3_REV_4)) {
- VMXNET3_WRITE_BAR1_REG(adapter,
- VMXNET3_REG_VRRS,
- 1 << VMXNET3_REV_4);
- adapter->version = VMXNET3_REV_4 + 1;
- } else if (ver & (1 << VMXNET3_REV_3)) {
- VMXNET3_WRITE_BAR1_REG(adapter,
- VMXNET3_REG_VRRS,
- 1 << VMXNET3_REV_3);
- adapter->version = VMXNET3_REV_3 + 1;
- } else if (ver & (1 << VMXNET3_REV_2)) {
- VMXNET3_WRITE_BAR1_REG(adapter,
- VMXNET3_REG_VRRS,
- 1 << VMXNET3_REV_2);
- adapter->version = VMXNET3_REV_2 + 1;
- } else if (ver & (1 << VMXNET3_REV_1)) {
- VMXNET3_WRITE_BAR1_REG(adapter,
- VMXNET3_REG_VRRS,
- 1 << VMXNET3_REV_1);
- adapter->version = VMXNET3_REV_1 + 1;
- } else {
+ for (i = VMXNET3_REV_9; i >= VMXNET3_REV_1; i--) {
+ if (ver & (1 << i)) {
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_VRRS, 1 << i);
+ adapter->version = i + 1;
+ break;
+ }
+ }
+ if (i < VMXNET3_REV_1) {
dev_err(&pdev->dev,
"Incompatible h/w version (0x%x) for adapter\n", ver);
err = -EBUSY;
@@ -3992,6 +4125,8 @@ vmxnet3_probe_device(struct pci_dev *pdev,
}
adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start +
adapter->num_tx_queues);
+ if (VMXNET3_VERSION_GE_9(adapter))
+ adapter->latencyConf = &adapter->tqd_start->tsConf.latencyConf;
adapter->pm_conf = dma_alloc_coherent(&adapter->pdev->dev,
sizeof(struct Vmxnet3_PMConf),
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index 7e8008d5378a..471f91c4204a 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -1,7 +1,7 @@
/*
* Linux driver for VMware's vmxnet3 ethernet NIC.
*
- * Copyright (C) 2008-2022, VMware, Inc. All Rights Reserved.
+ * Copyright (C) 2008-2024, VMware, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index 915aaf18c409..9f24d66dbb27 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -1,7 +1,7 @@
/*
* Linux driver for VMware's vmxnet3 ethernet NIC.
*
- * Copyright (C) 2008-2022, VMware, Inc. All Rights Reserved.
+ * Copyright (C) 2008-2024, VMware, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -72,18 +72,20 @@
/*
* Version numbers
*/
-#define VMXNET3_DRIVER_VERSION_STRING "1.7.0.0-k"
+#define VMXNET3_DRIVER_VERSION_STRING "1.9.0.0-k"
/* Each byte of this 32-bit integer encodes a version number in
* VMXNET3_DRIVER_VERSION_STRING.
*/
-#define VMXNET3_DRIVER_VERSION_NUM 0x01070000
+#define VMXNET3_DRIVER_VERSION_NUM 0x01090000
#if defined(CONFIG_PCI_MSI)
/* RSS only makes sense if MSI-X is supported. */
#define VMXNET3_RSS
#endif
+#define VMXNET3_REV_9 8 /* Vmxnet3 Rev. 9 */
+#define VMXNET3_REV_8 7 /* Vmxnet3 Rev. 8 */
#define VMXNET3_REV_7 6 /* Vmxnet3 Rev. 7 */
#define VMXNET3_REV_6 5 /* Vmxnet3 Rev. 6 */
#define VMXNET3_REV_5 4 /* Vmxnet3 Rev. 5 */
@@ -191,6 +193,11 @@ struct vmxnet3_tx_data_ring {
dma_addr_t basePA;
};
+struct vmxnet3_tx_ts_ring {
+ struct Vmxnet3_TxTSDesc *base;
+ dma_addr_t basePA;
+};
+
#define VMXNET3_MAP_NONE 0
#define VMXNET3_MAP_SINGLE BIT(0)
#define VMXNET3_MAP_PAGE BIT(1)
@@ -243,6 +250,7 @@ struct vmxnet3_tx_ctx {
u32 copy_size; /* # of bytes copied into the data ring */
union Vmxnet3_GenericDesc *sop_txd;
union Vmxnet3_GenericDesc *eop_txd;
+ struct Vmxnet3_TxTSDesc *ts_txd;
};
struct vmxnet3_tx_queue {
@@ -252,6 +260,7 @@ struct vmxnet3_tx_queue {
struct vmxnet3_cmd_ring tx_ring;
struct vmxnet3_tx_buf_info *buf_info;
struct vmxnet3_tx_data_ring data_ring;
+ struct vmxnet3_tx_ts_ring ts_ring;
struct vmxnet3_comp_ring comp_ring;
struct Vmxnet3_TxQueueCtrl *shared;
struct vmxnet3_tq_driver_stats stats;
@@ -260,6 +269,8 @@ struct vmxnet3_tx_queue {
* stopped */
int qid;
u16 txdata_desc_size;
+ u16 tx_ts_desc_size;
+ u16 tsPktCount;
} ____cacheline_aligned;
enum vmxnet3_rx_buf_type {
@@ -307,6 +318,11 @@ struct vmxnet3_rx_data_ring {
u16 desc_size;
};
+struct vmxnet3_rx_ts_ring {
+ struct Vmxnet3_RxTSDesc *base;
+ dma_addr_t basePA;
+};
+
struct vmxnet3_rx_queue {
char name[IFNAMSIZ + 8]; /* To identify interrupt */
struct vmxnet3_adapter *adapter;
@@ -314,6 +330,7 @@ struct vmxnet3_rx_queue {
struct vmxnet3_cmd_ring rx_ring[2];
struct vmxnet3_rx_data_ring data_ring;
struct vmxnet3_comp_ring comp_ring;
+ struct vmxnet3_rx_ts_ring ts_ring;
struct vmxnet3_rx_ctx rx_ctx;
u32 qid; /* rqID in RCD for buffer from 1st ring */
u32 qid2; /* rqID in RCD for buffer from 2nd ring */
@@ -323,6 +340,7 @@ struct vmxnet3_rx_queue {
struct vmxnet3_rq_driver_stats stats;
struct page_pool *page_pool;
struct xdp_rxq_info xdp_rxq;
+ u16 rx_ts_desc_size;
} ____cacheline_aligned;
#define VMXNET3_DEVICE_MAX_TX_QUEUES 32
@@ -432,6 +450,11 @@ struct vmxnet3_adapter {
u16 rx_prod_offset;
u16 rx_prod2_offset;
struct bpf_prog __rcu *xdp_bpf_prog;
+ struct Vmxnet3_LatencyConf *latencyConf;
+ /* Size of buffer in the ts ring */
+ u16 tx_ts_desc_size;
+ u16 rx_ts_desc_size;
+ u32 disabledOffloads;
};
#define VMXNET3_WRITE_BAR0_REG(adapter, reg, val) \
@@ -463,6 +486,10 @@ struct vmxnet3_adapter {
(adapter->version >= VMXNET3_REV_6 + 1)
#define VMXNET3_VERSION_GE_7(adapter) \
(adapter->version >= VMXNET3_REV_7 + 1)
+#define VMXNET3_VERSION_GE_8(adapter) \
+ (adapter->version >= VMXNET3_REV_8 + 1)
+#define VMXNET3_VERSION_GE_9(adapter) \
+ (adapter->version >= VMXNET3_REV_9 + 1)
/* must be a multiple of VMXNET3_RING_SIZE_ALIGN */
#define VMXNET3_DEF_TX_RING_SIZE 512
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 3a252ac5dd28..9af316cdd8b3 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -126,8 +126,8 @@ static void vrf_rx_stats(struct net_device *dev, int len)
struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
u64_stats_update_begin(&dstats->syncp);
- dstats->rx_packets++;
- dstats->rx_bytes += len;
+ u64_stats_inc(&dstats->rx_packets);
+ u64_stats_add(&dstats->rx_bytes, len);
u64_stats_update_end(&dstats->syncp);
}
@@ -137,33 +137,6 @@ static void vrf_tx_error(struct net_device *vrf_dev, struct sk_buff *skb)
kfree_skb(skb);
}
-static void vrf_get_stats64(struct net_device *dev,
- struct rtnl_link_stats64 *stats)
-{
- int i;
-
- for_each_possible_cpu(i) {
- const struct pcpu_dstats *dstats;
- u64 tbytes, tpkts, tdrops, rbytes, rpkts;
- unsigned int start;
-
- dstats = per_cpu_ptr(dev->dstats, i);
- do {
- start = u64_stats_fetch_begin(&dstats->syncp);
- tbytes = dstats->tx_bytes;
- tpkts = dstats->tx_packets;
- tdrops = dstats->tx_drops;
- rbytes = dstats->rx_bytes;
- rpkts = dstats->rx_packets;
- } while (u64_stats_fetch_retry(&dstats->syncp, start));
- stats->tx_bytes += tbytes;
- stats->tx_packets += tpkts;
- stats->tx_dropped += tdrops;
- stats->rx_bytes += rbytes;
- stats->rx_packets += rpkts;
- }
-}
-
static struct vrf_map *netns_vrf_map(struct net *net)
{
struct netns_vrf *nn_vrf = net_generic(net, vrf_net_id);
@@ -408,10 +381,15 @@ static int vrf_local_xmit(struct sk_buff *skb, struct net_device *dev,
skb->protocol = eth_type_trans(skb, dev);
- if (likely(__netif_rx(skb) == NET_RX_SUCCESS))
+ if (likely(__netif_rx(skb) == NET_RX_SUCCESS)) {
vrf_rx_stats(dev, len);
- else
- this_cpu_inc(dev->dstats->rx_drops);
+ } else {
+ struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
+
+ u64_stats_update_begin(&dstats->syncp);
+ u64_stats_inc(&dstats->rx_drops);
+ u64_stats_update_end(&dstats->syncp);
+ }
return NETDEV_TX_OK;
}
@@ -599,19 +577,20 @@ static netdev_tx_t is_ip_tx_frame(struct sk_buff *skb, struct net_device *dev)
static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev)
{
+ struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
+
int len = skb->len;
netdev_tx_t ret = is_ip_tx_frame(skb, dev);
+ u64_stats_update_begin(&dstats->syncp);
if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
- struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
- u64_stats_update_begin(&dstats->syncp);
- dstats->tx_packets++;
- dstats->tx_bytes += len;
- u64_stats_update_end(&dstats->syncp);
+ u64_stats_inc(&dstats->tx_packets);
+ u64_stats_add(&dstats->tx_bytes, len);
} else {
- this_cpu_inc(dev->dstats->tx_drops);
+ u64_stats_inc(&dstats->tx_drops);
}
+ u64_stats_update_end(&dstats->syncp);
return ret;
}
@@ -1195,7 +1174,6 @@ static const struct net_device_ops vrf_netdev_ops = {
.ndo_uninit = vrf_dev_uninit,
.ndo_start_xmit = vrf_xmit,
.ndo_set_mac_address = eth_mac_addr,
- .ndo_get_stats64 = vrf_get_stats64,
.ndo_add_slave = vrf_add_slave,
.ndo_del_slave = vrf_del_slave,
};
diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c
index f78dd0438843..ba59e92ab941 100644
--- a/drivers/net/vxlan/vxlan_core.c
+++ b/drivers/net/vxlan/vxlan_core.c
@@ -1446,6 +1446,10 @@ static bool vxlan_snoop(struct net_device *dev,
struct vxlan_fdb *f;
u32 ifindex = 0;
+ /* Ignore packets from invalid src-address */
+ if (!is_valid_ether_addr(src_mac))
+ return true;
+
#if IS_ENABLED(CONFIG_IPV6)
if (src_ip->sa.sa_family == AF_INET6 &&
(ipv6_addr_type(&src_ip->sin6.sin6_addr) & IPV6_ADDR_LINKLOCAL))
@@ -1616,10 +1620,6 @@ static bool vxlan_set_mac(struct vxlan_dev *vxlan,
if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr))
return false;
- /* Ignore packets from invalid src-address */
- if (!is_valid_ether_addr(eth_hdr(skb)->h_source))
- return false;
-
/* Get address from the outer IP header */
if (vxlan_get_sk_family(vs) == AF_INET) {
saddr.sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
@@ -2339,7 +2339,7 @@ void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
struct ip_tunnel_key *pkey;
struct ip_tunnel_key key;
struct vxlan_dev *vxlan = netdev_priv(dev);
- const struct iphdr *old_iph = ip_hdr(skb);
+ const struct iphdr *old_iph;
struct vxlan_metadata _md;
struct vxlan_metadata *md = &_md;
unsigned int pkt_len = skb->len;
@@ -2353,8 +2353,15 @@ void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
bool use_cache;
bool udp_sum = false;
bool xnet = !net_eq(vxlan->net, dev_net(vxlan->dev));
+ bool no_eth_encap;
__be32 vni = 0;
+ no_eth_encap = flags & VXLAN_F_GPE && skb->protocol != htons(ETH_P_TEB);
+ if (!skb_vlan_inet_prepare(skb, no_eth_encap))
+ goto drop;
+
+ old_iph = ip_hdr(skb);
+
info = skb_tunnel_info(skb);
use_cache = ip_tunnel_dst_cache_usable(skb, info);
diff --git a/drivers/net/wireguard/allowedips.c b/drivers/net/wireguard/allowedips.c
index 0ba714ca5185..4b8528206cc8 100644
--- a/drivers/net/wireguard/allowedips.c
+++ b/drivers/net/wireguard/allowedips.c
@@ -15,8 +15,8 @@ static void swap_endian(u8 *dst, const u8 *src, u8 bits)
if (bits == 32) {
*(u32 *)dst = be32_to_cpu(*(const __be32 *)src);
} else if (bits == 128) {
- ((u64 *)dst)[0] = be64_to_cpu(((const __be64 *)src)[0]);
- ((u64 *)dst)[1] = be64_to_cpu(((const __be64 *)src)[1]);
+ ((u64 *)dst)[0] = get_unaligned_be64(src);
+ ((u64 *)dst)[1] = get_unaligned_be64(src + 8);
}
}
diff --git a/drivers/net/wireguard/queueing.h b/drivers/net/wireguard/queueing.h
index 1ea4f874e367..7eb76724b3ed 100644
--- a/drivers/net/wireguard/queueing.h
+++ b/drivers/net/wireguard/queueing.h
@@ -124,10 +124,10 @@ static inline int wg_cpumask_choose_online(int *stored_cpu, unsigned int id)
*/
static inline int wg_cpumask_next_online(int *last_cpu)
{
- int cpu = cpumask_next(*last_cpu, cpu_online_mask);
+ int cpu = cpumask_next(READ_ONCE(*last_cpu), cpu_online_mask);
if (cpu >= nr_cpu_ids)
cpu = cpumask_first(cpu_online_mask);
- *last_cpu = cpu;
+ WRITE_ONCE(*last_cpu, cpu);
return cpu;
}
diff --git a/drivers/net/wireguard/send.c b/drivers/net/wireguard/send.c
index 0d48e0f4a1ba..26e09c30d596 100644
--- a/drivers/net/wireguard/send.c
+++ b/drivers/net/wireguard/send.c
@@ -222,7 +222,7 @@ void wg_packet_send_keepalive(struct wg_peer *peer)
{
struct sk_buff *skb;
- if (skb_queue_empty(&peer->staged_packet_queue)) {
+ if (skb_queue_empty_lockless(&peer->staged_packet_queue)) {
skb = alloc_skb(DATA_PACKET_HEAD_ROOM + MESSAGE_MINIMUM_LENGTH,
GFP_ATOMIC);
if (unlikely(!skb))
diff --git a/drivers/net/wireless/admtek/adm8211.c b/drivers/net/wireless/admtek/adm8211.c
index e3fd48dd3909..a2d87c3ad196 100644
--- a/drivers/net/wireless/admtek/adm8211.c
+++ b/drivers/net/wireless/admtek/adm8211.c
@@ -1550,7 +1550,7 @@ fail:
return retval;
}
-static void adm8211_stop(struct ieee80211_hw *dev)
+static void adm8211_stop(struct ieee80211_hw *dev, bool suspend)
{
struct adm8211_priv *priv = dev->priv;
diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c
index 5a55db349cb5..156f3650c006 100644
--- a/drivers/net/wireless/ath/ar5523/ar5523.c
+++ b/drivers/net/wireless/ath/ar5523/ar5523.c
@@ -1061,7 +1061,7 @@ err:
return error;
}
-static void ar5523_stop(struct ieee80211_hw *hw)
+static void ar5523_stop(struct ieee80211_hw *hw, bool suspend)
{
struct ar5523 *ar = hw->priv;
diff --git a/drivers/net/wireless/ath/ath10k/Kconfig b/drivers/net/wireless/ath/ath10k/Kconfig
index e6ea884cafc1..876aed765833 100644
--- a/drivers/net/wireless/ath/ath10k/Kconfig
+++ b/drivers/net/wireless/ath/ath10k/Kconfig
@@ -45,6 +45,7 @@ config ATH10K_SNOC
depends on ATH10K
depends on ARCH_QCOM || COMPILE_TEST
depends on QCOM_SMEM
+ depends on QCOM_RPROC_COMMON || QCOM_RPROC_COMMON=n
select QCOM_SCM
select QCOM_QMI_HELPERS
help
@@ -67,6 +68,12 @@ config ATH10K_DEBUGFS
If unsure, say Y to make it easier to debug problems.
+config ATH10K_LEDS
+ bool
+ depends on ATH10K
+ depends on LEDS_CLASS=y || LEDS_CLASS=MAC80211
+ default y
+
config ATH10K_SPECTRAL
bool "Atheros ath10k spectral scan support"
depends on ATH10K_DEBUGFS
diff --git a/drivers/net/wireless/ath/ath10k/Makefile b/drivers/net/wireless/ath/ath10k/Makefile
index 142c777b287f..02bf9b629038 100644
--- a/drivers/net/wireless/ath/ath10k/Makefile
+++ b/drivers/net/wireless/ath/ath10k/Makefile
@@ -19,6 +19,7 @@ ath10k_core-$(CONFIG_ATH10K_SPECTRAL) += spectral.o
ath10k_core-$(CONFIG_NL80211_TESTMODE) += testmode.o
ath10k_core-$(CONFIG_ATH10K_TRACING) += trace.o
ath10k_core-$(CONFIG_THERMAL) += thermal.o
+ath10k_core-$(CONFIG_ATH10K_LEDS) += leds.o
ath10k_core-$(CONFIG_MAC80211_DEBUGFS) += debugfs_sta.o
ath10k_core-$(CONFIG_PM) += wow.o
ath10k_core-$(CONFIG_DEV_COREDUMP) += coredump.o
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index bdf0552cd1c3..b3294287bce1 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -27,6 +27,7 @@
#include "testmode.h"
#include "wmi-ops.h"
#include "coredump.h"
+#include "leds.h"
unsigned int ath10k_debug_mask;
EXPORT_SYMBOL(ath10k_debug_mask);
@@ -68,6 +69,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.name = "qca988x hw2.0",
.patch_load_addr = QCA988X_HW_2_0_PATCH_LOAD_ADDR,
.uart_pin = 7,
+ .led_pin = 1,
.cc_wraparound_type = ATH10K_HW_CC_WRAP_SHIFTED_ALL,
.otp_exe_param = 0,
.channel_counters_freq_hz = 88000,
@@ -108,6 +110,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.name = "qca988x hw2.0 ubiquiti",
.patch_load_addr = QCA988X_HW_2_0_PATCH_LOAD_ADDR,
.uart_pin = 7,
+ .led_pin = 0,
.cc_wraparound_type = ATH10K_HW_CC_WRAP_SHIFTED_ALL,
.otp_exe_param = 0,
.channel_counters_freq_hz = 88000,
@@ -149,6 +152,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.name = "qca9887 hw1.0",
.patch_load_addr = QCA9887_HW_1_0_PATCH_LOAD_ADDR,
.uart_pin = 7,
+ .led_pin = 1,
.cc_wraparound_type = ATH10K_HW_CC_WRAP_SHIFTED_ALL,
.otp_exe_param = 0,
.channel_counters_freq_hz = 88000,
@@ -190,6 +194,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.name = "qca6174 hw3.2 sdio",
.patch_load_addr = QCA6174_HW_3_0_PATCH_LOAD_ADDR,
.uart_pin = 19,
+ .led_pin = 0,
.otp_exe_param = 0,
.channel_counters_freq_hz = 88000,
.max_probe_resp_desc_thres = 0,
@@ -226,6 +231,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.name = "qca6164 hw2.1",
.patch_load_addr = QCA6174_HW_2_1_PATCH_LOAD_ADDR,
.uart_pin = 6,
+ .led_pin = 0,
.otp_exe_param = 0,
.channel_counters_freq_hz = 88000,
.max_probe_resp_desc_thres = 0,
@@ -266,6 +272,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.name = "qca6174 hw2.1",
.patch_load_addr = QCA6174_HW_2_1_PATCH_LOAD_ADDR,
.uart_pin = 6,
+ .led_pin = 0,
.otp_exe_param = 0,
.channel_counters_freq_hz = 88000,
.max_probe_resp_desc_thres = 0,
@@ -306,6 +313,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.name = "qca6174 hw3.0",
.patch_load_addr = QCA6174_HW_3_0_PATCH_LOAD_ADDR,
.uart_pin = 6,
+ .led_pin = 0,
.otp_exe_param = 0,
.channel_counters_freq_hz = 88000,
.max_probe_resp_desc_thres = 0,
@@ -346,6 +354,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.name = "qca6174 hw3.2",
.patch_load_addr = QCA6174_HW_3_0_PATCH_LOAD_ADDR,
.uart_pin = 6,
+ .led_pin = 0,
.otp_exe_param = 0,
.channel_counters_freq_hz = 88000,
.max_probe_resp_desc_thres = 0,
@@ -390,6 +399,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.name = "qca99x0 hw2.0",
.patch_load_addr = QCA99X0_HW_2_0_PATCH_LOAD_ADDR,
.uart_pin = 7,
+ .led_pin = 17,
.otp_exe_param = 0x00000700,
.continuous_frag_desc = true,
.cck_rate_map_rev2 = true,
@@ -436,6 +446,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.name = "qca9984/qca9994 hw1.0",
.patch_load_addr = QCA9984_HW_1_0_PATCH_LOAD_ADDR,
.uart_pin = 7,
+ .led_pin = 17,
.cc_wraparound_type = ATH10K_HW_CC_WRAP_SHIFTED_EACH,
.otp_exe_param = 0x00000700,
.continuous_frag_desc = true,
@@ -488,6 +499,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.name = "qca9888 hw2.0",
.patch_load_addr = QCA9888_HW_2_0_PATCH_LOAD_ADDR,
.uart_pin = 7,
+ .led_pin = 17,
.cc_wraparound_type = ATH10K_HW_CC_WRAP_SHIFTED_EACH,
.otp_exe_param = 0x00000700,
.continuous_frag_desc = true,
@@ -538,6 +550,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.name = "qca9377 hw1.0",
.patch_load_addr = QCA9377_HW_1_0_PATCH_LOAD_ADDR,
.uart_pin = 6,
+ .led_pin = 0,
.otp_exe_param = 0,
.channel_counters_freq_hz = 88000,
.max_probe_resp_desc_thres = 0,
@@ -578,6 +591,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.name = "qca9377 hw1.1",
.patch_load_addr = QCA9377_HW_1_0_PATCH_LOAD_ADDR,
.uart_pin = 6,
+ .led_pin = 0,
.otp_exe_param = 0,
.channel_counters_freq_hz = 88000,
.max_probe_resp_desc_thres = 0,
@@ -620,6 +634,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.name = "qca9377 hw1.1 sdio",
.patch_load_addr = QCA9377_HW_1_0_PATCH_LOAD_ADDR,
.uart_pin = 19,
+ .led_pin = 0,
.otp_exe_param = 0,
.channel_counters_freq_hz = 88000,
.max_probe_resp_desc_thres = 0,
@@ -653,6 +668,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.name = "qca4019 hw1.0",
.patch_load_addr = QCA4019_HW_1_0_PATCH_LOAD_ADDR,
.uart_pin = 7,
+ .led_pin = 0,
.cc_wraparound_type = ATH10K_HW_CC_WRAP_SHIFTED_EACH,
.otp_exe_param = 0x0010000,
.continuous_frag_desc = true,
@@ -698,6 +714,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.dev_id = 0,
.bus = ATH10K_BUS_SNOC,
.name = "wcn3990 hw1.0",
+ .led_pin = 0,
.continuous_frag_desc = true,
.tx_chain_mask = 0x7,
.rx_chain_mask = 0x7,
@@ -3224,6 +3241,10 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
goto err_hif_stop;
}
+ status = ath10k_leds_start(ar);
+ if (status)
+ goto err_hif_stop;
+
return 0;
err_hif_stop:
@@ -3482,9 +3503,18 @@ static void ath10k_core_register_work(struct work_struct *work)
goto err_spectral_destroy;
}
+ status = ath10k_leds_register(ar);
+ if (status) {
+ ath10k_err(ar, "could not register leds: %d\n",
+ status);
+ goto err_thermal_unregister;
+ }
+
set_bit(ATH10K_FLAG_CORE_REGISTERED, &ar->dev_flags);
return;
+err_thermal_unregister:
+ ath10k_thermal_unregister(ar);
err_spectral_destroy:
ath10k_spectral_destroy(ar);
err_debug_destroy:
@@ -3520,6 +3550,8 @@ void ath10k_core_unregister(struct ath10k *ar)
if (!test_bit(ATH10K_FLAG_CORE_REGISTERED, &ar->dev_flags))
return;
+ ath10k_leds_unregister(ar);
+
ath10k_thermal_unregister(ar);
/* Stop spectral before unregistering from mac80211 to remove the
* relayfs debugfs file cleanly. Otherwise the parent debugfs tree
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index b00099f0b24e..446dca74f06a 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -15,6 +15,7 @@
#include <linux/pci.h>
#include <linux/uuid.h>
#include <linux/time.h>
+#include <linux/leds.h>
#include "htt.h"
#include "htc.h"
@@ -1259,6 +1260,13 @@ struct ath10k {
} testmode;
struct {
+ struct gpio_led wifi_led;
+ struct led_classdev cdev;
+ char label[48];
+ u32 gpio_state_pin;
+ } leds;
+
+ struct {
/* protected by data_lock */
u32 rx_crc_err_drop;
u32 fw_crash_counter;
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index 48897e5eca06..442091c6dfd2 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -512,6 +512,7 @@ struct ath10k_hw_params {
const char *name;
u32 patch_load_addr;
int uart_pin;
+ int led_pin;
u32 otp_exe_param;
/* Type of hw cycle counter wraparound logic, for more info
diff --git a/drivers/net/wireless/ath/ath10k/leds.c b/drivers/net/wireless/ath/ath10k/leds.c
new file mode 100644
index 000000000000..9b1d04eb4265
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/leds.c
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018 Sebastian Gottschall <s.gottschall@dd-wrt.com>
+ * Copyright (c) 2018 The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/leds.h>
+
+#include "core.h"
+#include "wmi.h"
+#include "wmi-ops.h"
+
+#include "leds.h"
+
+static int ath10k_leds_set_brightness_blocking(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct ath10k *ar = container_of(led_cdev, struct ath10k,
+ leds.cdev);
+ struct gpio_led *led = &ar->leds.wifi_led;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH10K_STATE_ON)
+ goto out;
+
+ ar->leds.gpio_state_pin = (brightness != LED_OFF) ^ led->active_low;
+ ath10k_wmi_gpio_output(ar, led->gpio, ar->leds.gpio_state_pin);
+
+out:
+ mutex_unlock(&ar->conf_mutex);
+
+ return 0;
+}
+
+int ath10k_leds_start(struct ath10k *ar)
+{
+ if (ar->hw_params.led_pin == 0)
+ /* leds not supported */
+ return 0;
+
+ /* under some circumstances, the gpio pin gets reconfigured
+ * to default state by the firmware, so we need to
+ * reconfigure it this behaviour has only ben seen on
+ * QCA9984 and QCA99XX devices so far
+ */
+ ath10k_wmi_gpio_config(ar, ar->hw_params.led_pin, 0,
+ WMI_GPIO_PULL_NONE, WMI_GPIO_INTTYPE_DISABLE);
+ ath10k_wmi_gpio_output(ar, ar->hw_params.led_pin, 1);
+
+ return 0;
+}
+
+int ath10k_leds_register(struct ath10k *ar)
+{
+ int ret;
+
+ if (ar->hw_params.led_pin == 0)
+ /* leds not supported */
+ return 0;
+
+ snprintf(ar->leds.label, sizeof(ar->leds.label), "ath10k-%s",
+ wiphy_name(ar->hw->wiphy));
+ ar->leds.wifi_led.active_low = 1;
+ ar->leds.wifi_led.gpio = ar->hw_params.led_pin;
+ ar->leds.wifi_led.name = ar->leds.label;
+ ar->leds.wifi_led.default_state = LEDS_GPIO_DEFSTATE_KEEP;
+
+ ar->leds.cdev.name = ar->leds.label;
+ ar->leds.cdev.brightness_set_blocking = ath10k_leds_set_brightness_blocking;
+ ar->leds.cdev.default_trigger = ar->leds.wifi_led.default_trigger;
+
+ ret = led_classdev_register(wiphy_dev(ar->hw->wiphy), &ar->leds.cdev);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+void ath10k_leds_unregister(struct ath10k *ar)
+{
+ if (ar->hw_params.led_pin == 0)
+ /* leds not supported */
+ return;
+
+ led_classdev_unregister(&ar->leds.cdev);
+}
+
diff --git a/drivers/net/wireless/ath/ath10k/leds.h b/drivers/net/wireless/ath/ath10k/leds.h
new file mode 100644
index 000000000000..56325b0875e5
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/leds.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: ISC */
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018 Sebastian Gottschall <s.gottschall@dd-wrt.com>
+ * Copyright (c) 2018 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _LEDS_H_
+#define _LEDS_H_
+
+#include "core.h"
+
+#ifdef CONFIG_ATH10K_LEDS
+void ath10k_leds_unregister(struct ath10k *ar);
+int ath10k_leds_start(struct ath10k *ar);
+int ath10k_leds_register(struct ath10k *ar);
+#else
+static inline void ath10k_leds_unregister(struct ath10k *ar)
+{
+}
+
+static inline int ath10k_leds_start(struct ath10k *ar)
+{
+ return 0;
+}
+
+static inline int ath10k_leds_register(struct ath10k *ar)
+{
+ return 0;
+}
+
+#endif
+#endif /* _LEDS_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index e322b528baaf..a5da32e87106 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -25,6 +25,7 @@
#include "wmi-tlv.h"
#include "wmi-ops.h"
#include "wow.h"
+#include "leds.h"
/*********/
/* Rates */
@@ -5362,7 +5363,7 @@ err:
return ret;
}
-static void ath10k_stop(struct ieee80211_hw *hw)
+static void ath10k_stop(struct ieee80211_hw *hw, bool suspend)
{
struct ath10k *ar = hw->priv;
u32 opt;
diff --git a/drivers/net/wireless/ath/ath10k/qmi.c b/drivers/net/wireless/ath/ath10k/qmi.c
index 38e939f572a9..f1f33af0170a 100644
--- a/drivers/net/wireless/ath/ath10k/qmi.c
+++ b/drivers/net/wireless/ath/ath10k/qmi.c
@@ -1040,6 +1040,10 @@ static void ath10k_qmi_driver_event_work(struct work_struct *work)
switch (event->type) {
case ATH10K_QMI_EVENT_SERVER_ARRIVE:
ath10k_qmi_event_server_arrive(qmi);
+ if (qmi->no_msa_ready_indicator) {
+ ath10k_info(ar, "qmi not waiting for msa_ready indicator");
+ ath10k_qmi_event_msa_ready(qmi);
+ }
break;
case ATH10K_QMI_EVENT_SERVER_EXIT:
ath10k_qmi_event_server_exit(qmi);
@@ -1048,6 +1052,10 @@ static void ath10k_qmi_driver_event_work(struct work_struct *work)
ath10k_qmi_event_fw_ready_ind(qmi);
break;
case ATH10K_QMI_EVENT_MSA_READY_IND:
+ if (qmi->no_msa_ready_indicator) {
+ ath10k_warn(ar, "qmi unexpected msa_ready indicator");
+ break;
+ }
ath10k_qmi_event_msa_ready(qmi);
break;
default:
@@ -1077,6 +1085,9 @@ int ath10k_qmi_init(struct ath10k *ar, u32 msa_size)
if (of_property_read_bool(dev->of_node, "qcom,msa-fixed-perm"))
qmi->msa_fixed_perm = true;
+ if (of_property_read_bool(dev->of_node, "qcom,no-msa-ready-indicator"))
+ qmi->no_msa_ready_indicator = true;
+
ret = qmi_handle_init(&qmi->qmi_hdl,
WLFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN,
&ath10k_qmi_ops, qmi_msg_handler);
diff --git a/drivers/net/wireless/ath/ath10k/qmi.h b/drivers/net/wireless/ath/ath10k/qmi.h
index 89464239fe96..0816eb4e4a18 100644
--- a/drivers/net/wireless/ath/ath10k/qmi.h
+++ b/drivers/net/wireless/ath/ath10k/qmi.h
@@ -107,6 +107,7 @@ struct ath10k_qmi {
char fw_build_timestamp[MAX_TIMESTAMP_LEN + 1];
struct ath10k_qmi_cal_data cal_data[MAX_NUM_CAL_V01];
bool msa_fixed_perm;
+ bool no_msa_ready_indicator;
enum ath10k_qmi_state state;
};
diff --git a/drivers/net/wireless/ath/ath10k/wmi-ops.h b/drivers/net/wireless/ath/ath10k/wmi-ops.h
index aa57d807491c..f3f6b5954b27 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-ops.h
+++ b/drivers/net/wireless/ath/ath10k/wmi-ops.h
@@ -226,7 +226,10 @@ struct wmi_ops {
const struct wmi_bb_timing_cfg_arg *arg);
struct sk_buff *(*gen_per_peer_per_tid_cfg)(struct ath10k *ar,
const struct wmi_per_peer_per_tid_cfg_arg *arg);
+ struct sk_buff *(*gen_gpio_config)(struct ath10k *ar, u32 gpio_num,
+ u32 input, u32 pull_type, u32 intr_mode);
+ struct sk_buff *(*gen_gpio_output)(struct ath10k *ar, u32 gpio_num, u32 set);
};
int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id);
@@ -1122,6 +1125,35 @@ ath10k_wmi_force_fw_hang(struct ath10k *ar,
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->force_fw_hang_cmdid);
}
+static inline int ath10k_wmi_gpio_config(struct ath10k *ar, u32 gpio_num,
+ u32 input, u32 pull_type, u32 intr_mode)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_gpio_config)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_gpio_config(ar, gpio_num, input, pull_type, intr_mode);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->gpio_config_cmdid);
+}
+
+static inline int ath10k_wmi_gpio_output(struct ath10k *ar, u32 gpio_num, u32 set)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_gpio_config)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_gpio_output(ar, gpio_num, set);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->gpio_output_cmdid);
+}
+
static inline int
ath10k_wmi_dbglog_cfg(struct ath10k *ar, u64 module_enable, u32 log_level)
{
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
index aed97fd121ba..dbaf26d6a7a6 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
@@ -4606,6 +4606,8 @@ static const struct wmi_ops wmi_tlv_ops = {
.gen_echo = ath10k_wmi_tlv_op_gen_echo,
.gen_vdev_spectral_conf = ath10k_wmi_tlv_op_gen_vdev_spectral_conf,
.gen_vdev_spectral_enable = ath10k_wmi_tlv_op_gen_vdev_spectral_enable,
+ /* .gen_gpio_config not implemented */
+ /* .gen_gpio_output not implemented */
};
static const struct wmi_peer_flags_map wmi_tlv_peer_flags_map = {
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index 80d255aaff1b..fe2344598364 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -7493,6 +7493,49 @@ ath10k_wmi_op_gen_peer_set_param(struct ath10k *ar, u32 vdev_id,
return skb;
}
+static struct sk_buff *ath10k_wmi_op_gen_gpio_config(struct ath10k *ar,
+ u32 gpio_num, u32 input,
+ u32 pull_type, u32 intr_mode)
+{
+ struct wmi_gpio_config_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_gpio_config_cmd *)skb->data;
+ cmd->pull_type = __cpu_to_le32(pull_type);
+ cmd->gpio_num = __cpu_to_le32(gpio_num);
+ cmd->input = __cpu_to_le32(input);
+ cmd->intr_mode = __cpu_to_le32(intr_mode);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi gpio_config gpio_num 0x%08x input 0x%08x pull_type 0x%08x intr_mode 0x%08x\n",
+ gpio_num, input, pull_type, intr_mode);
+
+ return skb;
+}
+
+static struct sk_buff *ath10k_wmi_op_gen_gpio_output(struct ath10k *ar,
+ u32 gpio_num, u32 set)
+{
+ struct wmi_gpio_output_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_gpio_output_cmd *)skb->data;
+ cmd->gpio_num = __cpu_to_le32(gpio_num);
+ cmd->set = __cpu_to_le32(set);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi gpio_output gpio_num 0x%08x set 0x%08x\n",
+ gpio_num, set);
+
+ return skb;
+}
+
static struct sk_buff *
ath10k_wmi_op_gen_set_psmode(struct ath10k *ar, u32 vdev_id,
enum wmi_sta_ps_mode psmode)
@@ -9157,6 +9200,9 @@ static const struct wmi_ops wmi_ops = {
.fw_stats_fill = ath10k_wmi_main_op_fw_stats_fill,
.get_vdev_subtype = ath10k_wmi_op_get_vdev_subtype,
.gen_echo = ath10k_wmi_op_gen_echo,
+ .gen_gpio_config = ath10k_wmi_op_gen_gpio_config,
+ .gen_gpio_output = ath10k_wmi_op_gen_gpio_output,
+
/* .gen_bcn_tmpl not implemented */
/* .gen_prb_tmpl not implemented */
/* .gen_p2p_go_bcn_ie not implemented */
@@ -9227,6 +9273,8 @@ static const struct wmi_ops wmi_10_1_ops = {
.fw_stats_fill = ath10k_wmi_10x_op_fw_stats_fill,
.get_vdev_subtype = ath10k_wmi_op_get_vdev_subtype,
.gen_echo = ath10k_wmi_op_gen_echo,
+ .gen_gpio_config = ath10k_wmi_op_gen_gpio_config,
+ .gen_gpio_output = ath10k_wmi_op_gen_gpio_output,
/* .gen_bcn_tmpl not implemented */
/* .gen_prb_tmpl not implemented */
/* .gen_p2p_go_bcn_ie not implemented */
@@ -9299,6 +9347,8 @@ static const struct wmi_ops wmi_10_2_ops = {
.gen_delba_send = ath10k_wmi_op_gen_delba_send,
.fw_stats_fill = ath10k_wmi_10x_op_fw_stats_fill,
.get_vdev_subtype = ath10k_wmi_op_get_vdev_subtype,
+ .gen_gpio_config = ath10k_wmi_op_gen_gpio_config,
+ .gen_gpio_output = ath10k_wmi_op_gen_gpio_output,
/* .gen_pdev_enable_adaptive_cca not implemented */
};
@@ -9370,6 +9420,8 @@ static const struct wmi_ops wmi_10_2_4_ops = {
ath10k_wmi_op_gen_pdev_enable_adaptive_cca,
.get_vdev_subtype = ath10k_wmi_10_2_4_op_get_vdev_subtype,
.gen_bb_timing = ath10k_wmi_10_2_4_op_gen_bb_timing,
+ .gen_gpio_config = ath10k_wmi_op_gen_gpio_config,
+ .gen_gpio_output = ath10k_wmi_op_gen_gpio_output,
/* .gen_bcn_tmpl not implemented */
/* .gen_prb_tmpl not implemented */
/* .gen_p2p_go_bcn_ie not implemented */
@@ -9451,6 +9503,8 @@ static const struct wmi_ops wmi_10_4_ops = {
.gen_pdev_bss_chan_info_req = ath10k_wmi_10_2_op_gen_pdev_bss_chan_info,
.gen_echo = ath10k_wmi_op_gen_echo,
.gen_pdev_get_tpc_config = ath10k_wmi_10_2_4_op_gen_pdev_get_tpc_config,
+ .gen_gpio_config = ath10k_wmi_op_gen_gpio_config,
+ .gen_gpio_output = ath10k_wmi_op_gen_gpio_output,
};
int ath10k_wmi_attach(struct ath10k *ar)
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index 2379501225a4..0faefc0a9a40 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -3034,6 +3034,41 @@ enum wmi_10_4_feature_mask {
};
+/* WMI_GPIO_CONFIG_CMDID */
+enum {
+ WMI_GPIO_PULL_NONE,
+ WMI_GPIO_PULL_UP,
+ WMI_GPIO_PULL_DOWN,
+};
+
+enum {
+ WMI_GPIO_INTTYPE_DISABLE,
+ WMI_GPIO_INTTYPE_RISING_EDGE,
+ WMI_GPIO_INTTYPE_FALLING_EDGE,
+ WMI_GPIO_INTTYPE_BOTH_EDGE,
+ WMI_GPIO_INTTYPE_LEVEL_LOW,
+ WMI_GPIO_INTTYPE_LEVEL_HIGH
+};
+
+/* WMI_GPIO_CONFIG_CMDID */
+struct wmi_gpio_config_cmd {
+ __le32 gpio_num; /* GPIO number to be setup */
+ __le32 input; /* 0 - Output/ 1 - Input */
+ __le32 pull_type; /* Pull type defined above */
+ __le32 intr_mode; /* Interrupt mode defined above (Input) */
+} __packed;
+
+/* WMI_GPIO_OUTPUT_CMDID */
+struct wmi_gpio_output_cmd {
+ __le32 gpio_num; /* GPIO number to be setup */
+ __le32 set; /* Set the GPIO pin*/
+} __packed;
+
+/* WMI_GPIO_INPUT_EVENTID */
+struct wmi_gpio_input_event {
+ __le32 gpio_num; /* GPIO number which changed state */
+} __packed;
+
struct wmi_ext_resource_config_10_4_cmd {
/* contains enum wmi_host_platform_type */
__le32 host_platform_config;
diff --git a/drivers/net/wireless/ath/ath11k/ahb.c b/drivers/net/wireless/ath/ath11k/ahb.c
index ca0f17ddebba..e3ff4786c714 100644
--- a/drivers/net/wireless/ath/ath11k/ahb.c
+++ b/drivers/net/wireless/ath/ath11k/ahb.c
@@ -954,6 +954,36 @@ static int ath11k_ahb_setup_msa_resources(struct ath11k_base *ab)
return 0;
}
+static int ath11k_ahb_ce_remap(struct ath11k_base *ab)
+{
+ const struct ce_remap *ce_remap = ab->hw_params.ce_remap;
+ struct platform_device *pdev = ab->pdev;
+
+ if (!ce_remap) {
+ /* no separate CE register space */
+ ab->mem_ce = ab->mem;
+ return 0;
+ }
+
+ /* ce register space is moved out of wcss unlike ipq8074 or ipq6018
+ * and the space is not contiguous, hence remapping the CE registers
+ * to a new space for accessing them.
+ */
+ ab->mem_ce = ioremap(ce_remap->base, ce_remap->size);
+ if (!ab->mem_ce) {
+ dev_err(&pdev->dev, "ce ioremap error\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void ath11k_ahb_ce_unmap(struct ath11k_base *ab)
+{
+ if (ab->hw_params.ce_remap)
+ iounmap(ab->mem_ce);
+}
+
static int ath11k_ahb_fw_resources_init(struct ath11k_base *ab)
{
struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
@@ -1146,25 +1176,13 @@ static int ath11k_ahb_probe(struct platform_device *pdev)
if (ret)
goto err_core_free;
- ab->mem_ce = ab->mem;
-
- if (ab->hw_params.ce_remap) {
- const struct ce_remap *ce_remap = ab->hw_params.ce_remap;
- /* ce register space is moved out of wcss unlike ipq8074 or ipq6018
- * and the space is not contiguous, hence remapping the CE registers
- * to a new space for accessing them.
- */
- ab->mem_ce = ioremap(ce_remap->base, ce_remap->size);
- if (!ab->mem_ce) {
- dev_err(&pdev->dev, "ce ioremap error\n");
- ret = -ENOMEM;
- goto err_core_free;
- }
- }
+ ret = ath11k_ahb_ce_remap(ab);
+ if (ret)
+ goto err_core_free;
ret = ath11k_ahb_fw_resources_init(ab);
if (ret)
- goto err_core_free;
+ goto err_ce_unmap;
ret = ath11k_ahb_setup_smp2p_handle(ab);
if (ret)
@@ -1216,6 +1234,9 @@ err_release_smp2p_handle:
err_fw_deinit:
ath11k_ahb_fw_resource_deinit(ab);
+err_ce_unmap:
+ ath11k_ahb_ce_unmap(ab);
+
err_core_free:
ath11k_core_free(ab);
platform_set_drvdata(pdev, NULL);
@@ -1248,9 +1269,7 @@ static void ath11k_ahb_free_resources(struct ath11k_base *ab)
ath11k_ahb_release_smp2p_handle(ab);
ath11k_ahb_fw_resource_deinit(ab);
ath11k_ce_free_pipes(ab);
-
- if (ab->hw_params.ce_remap)
- iounmap(ab->mem_ce);
+ ath11k_ahb_ce_unmap(ab);
ath11k_core_free(ab);
platform_set_drvdata(pdev, NULL);
diff --git a/drivers/net/wireless/ath/ath11k/ce.h b/drivers/net/wireless/ath/ath11k/ce.h
index 69946fc70077..bcde2fcf02cf 100644
--- a/drivers/net/wireless/ath/ath11k/ce.h
+++ b/drivers/net/wireless/ath/ath11k/ce.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_CE_H
@@ -146,7 +146,7 @@ struct ath11k_ce_ring {
/* Host address space */
void *base_addr_owner_space_unaligned;
/* CE address space */
- u32 base_addr_ce_space_unaligned;
+ dma_addr_t base_addr_ce_space_unaligned;
/* Actual start of descriptors.
* Aligned to descriptor-size boundary.
@@ -156,7 +156,7 @@ struct ath11k_ce_ring {
void *base_addr_owner_space;
/* CE address space */
- u32 base_addr_ce_space;
+ dma_addr_t base_addr_ce_space;
/* HAL ring id */
u32 hal_ring_id;
diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c
index 3cc817a3b4a4..03187df26000 100644
--- a/drivers/net/wireless/ath/ath11k/core.c
+++ b/drivers/net/wireless/ath/ath11k/core.c
@@ -62,7 +62,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.ce_ie_addr = &ath11k_ce_ie_addr_ipq8074,
.single_pdev_only = false,
.rxdma1_enable = true,
- .num_rxmda_per_pdev = 1,
+ .num_rxdma_per_pdev = 1,
.rx_mac_buf_ring = false,
.vdev_start_delay = false,
.htt_peer_map_v2 = true,
@@ -148,7 +148,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.ce_ie_addr = &ath11k_ce_ie_addr_ipq8074,
.single_pdev_only = false,
.rxdma1_enable = true,
- .num_rxmda_per_pdev = 1,
+ .num_rxdma_per_pdev = 1,
.rx_mac_buf_ring = false,
.vdev_start_delay = false,
.htt_peer_map_v2 = true,
@@ -232,7 +232,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.ce_ie_addr = &ath11k_ce_ie_addr_ipq8074,
.single_pdev_only = true,
.rxdma1_enable = false,
- .num_rxmda_per_pdev = 2,
+ .num_rxdma_per_pdev = 2,
.rx_mac_buf_ring = true,
.vdev_start_delay = true,
.htt_peer_map_v2 = false,
@@ -320,7 +320,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.svc_to_ce_map_len = 18,
.ce_ie_addr = &ath11k_ce_ie_addr_ipq8074,
.rxdma1_enable = true,
- .num_rxmda_per_pdev = 1,
+ .num_rxdma_per_pdev = 1,
.rx_mac_buf_ring = false,
.vdev_start_delay = false,
.htt_peer_map_v2 = true,
@@ -404,7 +404,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.ce_ie_addr = &ath11k_ce_ie_addr_ipq8074,
.single_pdev_only = true,
.rxdma1_enable = false,
- .num_rxmda_per_pdev = 2,
+ .num_rxdma_per_pdev = 2,
.rx_mac_buf_ring = true,
.vdev_start_delay = true,
.htt_peer_map_v2 = false,
@@ -492,7 +492,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.svc_to_ce_map_len = 14,
.single_pdev_only = true,
.rxdma1_enable = false,
- .num_rxmda_per_pdev = 2,
+ .num_rxdma_per_pdev = 2,
.rx_mac_buf_ring = true,
.vdev_start_delay = true,
.htt_peer_map_v2 = false,
@@ -580,7 +580,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.ce_ie_addr = &ath11k_ce_ie_addr_ipq8074,
.single_pdev_only = true,
.rxdma1_enable = false,
- .num_rxmda_per_pdev = 1,
+ .num_rxdma_per_pdev = 1,
.rx_mac_buf_ring = true,
.vdev_start_delay = true,
.htt_peer_map_v2 = false,
@@ -604,7 +604,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.coldboot_cal_ftm = true,
.cbcal_restart_fw = false,
.fw_mem_mode = 0,
- .num_vdevs = 16 + 1,
+ .num_vdevs = 3,
.num_peers = 512,
.supports_suspend = false,
.hal_desc_sz = sizeof(struct hal_rx_desc_qcn9074),
@@ -673,7 +673,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.ce_ie_addr = &ath11k_ce_ie_addr_ipq5018,
.ce_remap = &ath11k_ce_remap_ipq5018,
.rxdma1_enable = true,
- .num_rxmda_per_pdev = RXDMA_PER_PDEV_5018,
+ .num_rxdma_per_pdev = RXDMA_PER_PDEV_5018,
.rx_mac_buf_ring = false,
.vdev_start_delay = false,
.htt_peer_map_v2 = true,
@@ -744,7 +744,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.ce_ie_addr = &ath11k_ce_ie_addr_ipq8074,
.single_pdev_only = true,
.rxdma1_enable = false,
- .num_rxmda_per_pdev = 2,
+ .num_rxdma_per_pdev = 2,
.rx_mac_buf_ring = true,
.vdev_start_delay = true,
.htt_peer_map_v2 = false,
@@ -1009,6 +1009,16 @@ int ath11k_core_resume(struct ath11k_base *ab)
return -ETIMEDOUT;
}
+ if (ab->hw_params.current_cc_support &&
+ ar->alpha2[0] != 0 && ar->alpha2[1] != 0) {
+ ret = ath11k_reg_set_cc(ar);
+ if (ret) {
+ ath11k_warn(ab, "failed to set country code during resume: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
ret = ath11k_dp_rx_pktlog_start(ab);
if (ret)
ath11k_warn(ab, "failed to start rx pktlog during resume: %d\n",
@@ -1801,7 +1811,7 @@ static int ath11k_core_start(struct ath11k_base *ab)
}
/* put hardware to DBS mode */
- if (ab->hw_params.single_pdev_only && ab->hw_params.num_rxmda_per_pdev > 1) {
+ if (ab->hw_params.single_pdev_only && ab->hw_params.num_rxdma_per_pdev > 1) {
ret = ath11k_wmi_set_hw_mode(ab, WMI_HOST_HW_MODE_DBS);
if (ret) {
ath11k_err(ab, "failed to send dbs mode: %d\n", ret);
@@ -1978,23 +1988,20 @@ static void ath11k_update_11d(struct work_struct *work)
struct ath11k_base *ab = container_of(work, struct ath11k_base, update_11d_work);
struct ath11k *ar;
struct ath11k_pdev *pdev;
- struct wmi_set_current_country_params set_current_param = {};
int ret, i;
- spin_lock_bh(&ab->base_lock);
- memcpy(&set_current_param.alpha2, &ab->new_alpha2, 2);
- spin_unlock_bh(&ab->base_lock);
-
- ath11k_dbg(ab, ATH11K_DBG_WMI, "update 11d new cc %c%c\n",
- set_current_param.alpha2[0],
- set_current_param.alpha2[1]);
-
for (i = 0; i < ab->num_radios; i++) {
pdev = &ab->pdevs[i];
ar = pdev->ar;
- memcpy(&ar->alpha2, &set_current_param.alpha2, 2);
- ret = ath11k_wmi_send_set_current_country_cmd(ar, &set_current_param);
+ spin_lock_bh(&ab->base_lock);
+ memcpy(&ar->alpha2, &ab->new_alpha2, 2);
+ spin_unlock_bh(&ab->base_lock);
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "update 11d new cc %c%c for pdev %d\n",
+ ar->alpha2[0], ar->alpha2[1], i);
+
+ ret = ath11k_reg_set_cc(ar);
if (ret)
ath11k_warn(ar->ab,
"pdev id %d failed set current country code: %d\n",
diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h
index 205f40ee6b66..df24f0e409af 100644
--- a/drivers/net/wireless/ath/ath11k/core.h
+++ b/drivers/net/wireless/ath/ath11k/core.h
@@ -330,6 +330,9 @@ struct ath11k_chan_power_info {
s8 tx_power;
};
+/* ath11k only deals with 160 MHz, so 8 subchannels */
+#define ATH11K_NUM_PWR_LEVELS 8
+
/**
* struct ath11k_reg_tpc_power_info - regulatory TPC power info
* @is_psd_power: is PSD power or not
@@ -346,10 +349,10 @@ struct ath11k_reg_tpc_power_info {
u8 eirp_power;
enum wmi_reg_6ghz_ap_type ap_power_type;
u8 num_pwr_levels;
- u8 reg_max[IEEE80211_MAX_NUM_PWR_LEVEL];
+ u8 reg_max[ATH11K_NUM_PWR_LEVELS];
u8 ap_constraint_power;
- s8 tpe[IEEE80211_MAX_NUM_PWR_LEVEL];
- struct ath11k_chan_power_info chan_power_info[IEEE80211_MAX_NUM_PWR_LEVEL];
+ s8 tpe[ATH11K_NUM_PWR_LEVELS];
+ struct ath11k_chan_power_info chan_power_info[ATH11K_NUM_PWR_LEVELS];
};
struct ath11k_vif {
diff --git a/drivers/net/wireless/ath/ath11k/debugfs.c b/drivers/net/wireless/ath/ath11k/debugfs.c
index 414a5ce279f7..57281a135dd7 100644
--- a/drivers/net/wireless/ath/ath11k/debugfs.c
+++ b/drivers/net/wireless/ath/ath11k/debugfs.c
@@ -668,7 +668,7 @@ static ssize_t ath11k_write_extd_rx_stats(struct file *file,
ar->debug.rx_filter = tlv_filter.rx_filter;
- for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {
ring_id = ar->dp.rx_mon_status_refill_ring[i].refill_buf_ring.ring_id;
ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, ar->dp.mac_id,
HAL_RXDMA_MONITOR_STATUS,
@@ -1112,7 +1112,7 @@ static ssize_t ath11k_write_pktlog_filter(struct file *file,
}
/* Clear rx filter set for monitor mode and rx status */
- for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {
ring_id = ar->dp.rx_mon_status_refill_ring[i].refill_buf_ring.ring_id;
ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, ar->dp.mac_id,
HAL_RXDMA_MONITOR_STATUS,
@@ -1171,7 +1171,7 @@ static ssize_t ath11k_write_pktlog_filter(struct file *file,
HTT_RX_FP_DATA_FILTER_FLASG3;
}
- for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {
ring_id = ar->dp.rx_mon_status_refill_ring[i].refill_buf_ring.ring_id;
ret = ath11k_dp_tx_htt_rx_filter_setup(ab, ring_id,
ar->dp.mac_id + i,
diff --git a/drivers/net/wireless/ath/ath11k/dp.c b/drivers/net/wireless/ath/ath11k/dp.c
index 1a62407e5a9f..fbf666d0ecf1 100644
--- a/drivers/net/wireless/ath/ath11k/dp.c
+++ b/drivers/net/wireless/ath/ath11k/dp.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <crypto/hash.h>
@@ -830,8 +830,8 @@ int ath11k_dp_service_srng(struct ath11k_base *ab,
if (ab->hw_params.ring_mask->rx_mon_status[grp_id]) {
for (i = 0; i < ab->num_radios; i++) {
- for (j = 0; j < ab->hw_params.num_rxmda_per_pdev; j++) {
- int id = i * ab->hw_params.num_rxmda_per_pdev + j;
+ for (j = 0; j < ab->hw_params.num_rxdma_per_pdev; j++) {
+ int id = i * ab->hw_params.num_rxdma_per_pdev + j;
if (ab->hw_params.ring_mask->rx_mon_status[grp_id] &
BIT(id)) {
@@ -853,8 +853,8 @@ int ath11k_dp_service_srng(struct ath11k_base *ab,
ath11k_dp_process_reo_status(ab);
for (i = 0; i < ab->num_radios; i++) {
- for (j = 0; j < ab->hw_params.num_rxmda_per_pdev; j++) {
- int id = i * ab->hw_params.num_rxmda_per_pdev + j;
+ for (j = 0; j < ab->hw_params.num_rxdma_per_pdev; j++) {
+ int id = i * ab->hw_params.num_rxdma_per_pdev + j;
if (ab->hw_params.ring_mask->rxdma2host[grp_id] & BIT(id)) {
work_done = ath11k_dp_process_rxdma_err(ab, id, budget);
@@ -913,7 +913,7 @@ void ath11k_dp_pdev_pre_alloc(struct ath11k_base *ab)
spin_lock_init(&dp->rx_refill_buf_ring.idr_lock);
atomic_set(&dp->num_tx_pending, 0);
init_waitqueue_head(&dp->tx_empty_waitq);
- for (j = 0; j < ab->hw_params.num_rxmda_per_pdev; j++) {
+ for (j = 0; j < ab->hw_params.num_rxdma_per_pdev; j++) {
idr_init(&dp->rx_mon_status_refill_ring[j].bufs_idr);
spin_lock_init(&dp->rx_mon_status_refill_ring[j].idr_lock);
}
diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c
index afd481f5858f..86485580dd89 100644
--- a/drivers/net/wireless/ath/ath11k/dp_rx.c
+++ b/drivers/net/wireless/ath/ath11k/dp_rx.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/ieee80211.h>
@@ -311,7 +311,7 @@ static void ath11k_dp_service_mon_ring(struct timer_list *t)
struct ath11k_base *ab = from_timer(ab, t, mon_reap_timer);
int i;
- for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++)
+ for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++)
ath11k_dp_rx_process_mon_rings(ab, i, NULL, DP_MON_SERVICE_BUDGET);
mod_timer(&ab->mon_reap_timer, jiffies +
@@ -324,7 +324,7 @@ static int ath11k_dp_purge_mon_ring(struct ath11k_base *ab)
unsigned long timeout = jiffies + msecs_to_jiffies(DP_MON_PURGE_TIMEOUT_MS);
do {
- for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++)
+ for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++)
reaped += ath11k_dp_rx_process_mon_rings(ab, i,
NULL,
DP_MON_SERVICE_BUDGET);
@@ -468,7 +468,7 @@ static int ath11k_dp_rxdma_pdev_buf_free(struct ath11k *ar)
rx_ring = &dp->rxdma_mon_buf_ring;
ath11k_dp_rxdma_buf_ring_free(ar, rx_ring);
- for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {
rx_ring = &dp->rx_mon_status_refill_ring[i];
ath11k_dp_rxdma_buf_ring_free(ar, rx_ring);
}
@@ -506,7 +506,7 @@ static int ath11k_dp_rxdma_pdev_buf_setup(struct ath11k *ar)
ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_BUF);
}
- for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {
rx_ring = &dp->rx_mon_status_refill_ring[i];
ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_STATUS);
}
@@ -522,7 +522,7 @@ static void ath11k_dp_rx_pdev_srng_free(struct ath11k *ar)
ath11k_dp_srng_cleanup(ab, &dp->rx_refill_buf_ring.refill_buf_ring);
- for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {
if (ab->hw_params.rx_mac_buf_ring)
ath11k_dp_srng_cleanup(ab, &dp->rx_mac_buf_ring[i]);
@@ -585,7 +585,7 @@ static int ath11k_dp_rx_pdev_srng_alloc(struct ath11k *ar)
}
if (ar->ab->hw_params.rx_mac_buf_ring) {
- for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {
ret = ath11k_dp_srng_setup(ar->ab,
&dp->rx_mac_buf_ring[i],
HAL_RXDMA_BUF, 1,
@@ -598,7 +598,7 @@ static int ath11k_dp_rx_pdev_srng_alloc(struct ath11k *ar)
}
}
- for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {
ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_err_dst_ring[i],
HAL_RXDMA_DST, 0, dp->mac_id + i,
DP_RXDMA_ERR_DST_RING_SIZE);
@@ -608,7 +608,7 @@ static int ath11k_dp_rx_pdev_srng_alloc(struct ath11k *ar)
}
}
- for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {
srng = &dp->rx_mon_status_refill_ring[i].refill_buf_ring;
ret = ath11k_dp_srng_setup(ar->ab,
srng,
@@ -1877,8 +1877,7 @@ static void ath11k_dp_rx_h_csum_offload(struct ath11k *ar, struct sk_buff *msdu)
CHECKSUM_NONE : CHECKSUM_UNNECESSARY;
}
-static int ath11k_dp_rx_crypto_mic_len(struct ath11k *ar,
- enum hal_encrypt_type enctype)
+int ath11k_dp_rx_crypto_mic_len(struct ath11k *ar, enum hal_encrypt_type enctype)
{
switch (enctype) {
case HAL_ENCRYPT_TYPE_OPEN:
@@ -2990,11 +2989,52 @@ ath11k_dp_rx_mon_update_status_buf_state(struct ath11k_mon_data *pmon,
}
}
+static enum dp_mon_status_buf_state
+ath11k_dp_rx_mon_buf_done(struct ath11k_base *ab, struct hal_srng *srng,
+ struct dp_rxdma_ring *rx_ring)
+{
+ struct ath11k_skb_rxcb *rxcb;
+ struct hal_tlv_hdr *tlv;
+ struct sk_buff *skb;
+ void *status_desc;
+ dma_addr_t paddr;
+ u32 cookie;
+ int buf_id;
+ u8 rbm;
+
+ status_desc = ath11k_hal_srng_src_next_peek(ab, srng);
+ if (!status_desc)
+ return DP_MON_STATUS_NO_DMA;
+
+ ath11k_hal_rx_buf_addr_info_get(status_desc, &paddr, &cookie, &rbm);
+
+ buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, cookie);
+
+ spin_lock_bh(&rx_ring->idr_lock);
+ skb = idr_find(&rx_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+
+ if (!skb)
+ return DP_MON_STATUS_NO_DMA;
+
+ rxcb = ATH11K_SKB_RXCB(skb);
+ dma_sync_single_for_cpu(ab->dev, rxcb->paddr,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+
+ tlv = (struct hal_tlv_hdr *)skb->data;
+ if (FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl) != HAL_RX_STATUS_BUFFER_DONE)
+ return DP_MON_STATUS_NO_DMA;
+
+ return DP_MON_STATUS_REPLINISH;
+}
+
static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base *ab, int mac_id,
int *budget, struct sk_buff_head *skb_list)
{
struct ath11k *ar;
const struct ath11k_hw_hal_params *hal_params;
+ enum dp_mon_status_buf_state reap_status;
struct ath11k_pdev_dp *dp;
struct dp_rxdma_ring *rx_ring;
struct ath11k_mon_data *pmon;
@@ -3057,15 +3097,38 @@ static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base *ab, int mac_id,
ath11k_warn(ab, "mon status DONE not set %lx, buf_id %d\n",
FIELD_GET(HAL_TLV_HDR_TAG,
tlv->tl), buf_id);
- /* If done status is missing, hold onto status
- * ring until status is done for this status
- * ring buffer.
- * Keep HP in mon_status_ring unchanged,
- * and break from here.
- * Check status for same buffer for next time
+ /* RxDMA status done bit might not be set even
+ * though tp is moved by HW.
*/
- pmon->buf_state = DP_MON_STATUS_NO_DMA;
- break;
+
+ /* If done status is missing:
+ * 1. As per MAC team's suggestion,
+ * when HP + 1 entry is peeked and if DMA
+ * is not done and if HP + 2 entry's DMA done
+ * is set. skip HP + 1 entry and
+ * start processing in next interrupt.
+ * 2. If HP + 2 entry's DMA done is not set,
+ * poll onto HP + 1 entry DMA done to be set.
+ * Check status for same buffer for next time
+ * dp_rx_mon_status_srng_process
+ */
+
+ reap_status = ath11k_dp_rx_mon_buf_done(ab, srng,
+ rx_ring);
+ if (reap_status == DP_MON_STATUS_NO_DMA)
+ continue;
+
+ spin_lock_bh(&rx_ring->idr_lock);
+ idr_remove(&rx_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+
+ dma_unmap_single(ab->dev, rxcb->paddr,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+
+ dev_kfree_skb_any(skb);
+ pmon->buf_state = DP_MON_STATUS_REPLINISH;
+ goto move_next;
}
spin_lock_bh(&rx_ring->idr_lock);
@@ -4391,7 +4454,7 @@ int ath11k_dp_rx_pdev_alloc(struct ath11k_base *ab, int mac_id)
}
if (ab->hw_params.rx_mac_buf_ring) {
- for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {
ring_id = dp->rx_mac_buf_ring[i].ring_id;
ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id,
mac_id + i, HAL_RXDMA_BUF);
@@ -4403,7 +4466,7 @@ int ath11k_dp_rx_pdev_alloc(struct ath11k_base *ab, int mac_id)
}
}
- for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {
ring_id = dp->rxdma_err_dst_ring[i].ring_id;
ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id,
mac_id + i, HAL_RXDMA_DST);
@@ -4443,7 +4506,7 @@ int ath11k_dp_rx_pdev_alloc(struct ath11k_base *ab, int mac_id)
}
config_refill_ring:
- for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {
ring_id = dp->rx_mon_status_refill_ring[i].refill_buf_ring.ring_id;
ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id + i,
HAL_RXDMA_MONITOR_STATUS);
diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.h b/drivers/net/wireless/ath/ath11k/dp_rx.h
index 623da3bf9dc8..c322e30caa96 100644
--- a/drivers/net/wireless/ath/ath11k/dp_rx.h
+++ b/drivers/net/wireless/ath/ath11k/dp_rx.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_DP_RX_H
#define ATH11K_DP_RX_H
@@ -95,4 +96,6 @@ int ath11k_peer_rx_frag_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id
int ath11k_dp_rx_pktlog_start(struct ath11k_base *ab);
int ath11k_dp_rx_pktlog_stop(struct ath11k_base *ab, bool stop_timer);
+int ath11k_dp_rx_crypto_mic_len(struct ath11k *ar, enum hal_encrypt_type enctype);
+
#endif /* ATH11K_DP_RX_H */
diff --git a/drivers/net/wireless/ath/ath11k/dp_tx.c b/drivers/net/wireless/ath/ath11k/dp_tx.c
index 272b1c35f98d..8522c67baabf 100644
--- a/drivers/net/wireless/ath/ath11k/dp_tx.c
+++ b/drivers/net/wireless/ath/ath11k/dp_tx.c
@@ -353,8 +353,12 @@ ath11k_dp_tx_htt_tx_complete_buf(struct ath11k_base *ab,
if (ts->acked) {
if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
info->flags |= IEEE80211_TX_STAT_ACK;
- info->status.ack_signal = ATH11K_DEFAULT_NOISE_FLOOR +
- ts->ack_rssi;
+ info->status.ack_signal = ts->ack_rssi;
+
+ if (!test_bit(WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT,
+ ab->wmi_ab.svc_map))
+ info->status.ack_signal += ATH11K_DEFAULT_NOISE_FLOOR;
+
info->status.flags |=
IEEE80211_TX_STATUS_ACK_SIGNAL_VALID;
} else {
@@ -584,8 +588,12 @@ static void ath11k_dp_tx_complete_msdu(struct ath11k *ar,
if (ts->status == HAL_WBM_TQM_REL_REASON_FRAME_ACKED &&
!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
info->flags |= IEEE80211_TX_STAT_ACK;
- info->status.ack_signal = ATH11K_DEFAULT_NOISE_FLOOR +
- ts->ack_rssi;
+ info->status.ack_signal = ts->ack_rssi;
+
+ if (!test_bit(WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT,
+ ab->wmi_ab.svc_map))
+ info->status.ack_signal += ATH11K_DEFAULT_NOISE_FLOOR;
+
info->status.flags |= IEEE80211_TX_STATUS_ACK_SIGNAL_VALID;
}
@@ -1035,7 +1043,7 @@ int ath11k_dp_tx_htt_h2t_ppdu_stats_req(struct ath11k *ar, u32 mask)
int ret;
int i;
- for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {
skb = ath11k_htc_alloc_skb(ab, len);
if (!skb)
return -ENOMEM;
@@ -1218,7 +1226,7 @@ int ath11k_dp_tx_htt_monitor_mode_ring_config(struct ath11k *ar, bool reset)
&tlv_filter);
} else if (!reset) {
/* set in monitor mode only */
- for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {
ring_id = dp->rx_mac_buf_ring[i].ring_id;
ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id,
dp->mac_id + i,
@@ -1231,7 +1239,7 @@ int ath11k_dp_tx_htt_monitor_mode_ring_config(struct ath11k *ar, bool reset)
if (ret)
return ret;
- for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {
ring_id = dp->rx_mon_status_refill_ring[i].refill_buf_ring.ring_id;
if (!reset) {
tlv_filter.rx_filter =
diff --git a/drivers/net/wireless/ath/ath11k/dp_tx.h b/drivers/net/wireless/ath/ath11k/dp_tx.h
index 61be2265e09f..795fe3b8fa0d 100644
--- a/drivers/net/wireless/ath/ath11k/dp_tx.h
+++ b/drivers/net/wireless/ath/ath11k/dp_tx.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021, 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021, 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_DP_TX_H
@@ -13,7 +13,7 @@
struct ath11k_dp_htt_wbm_tx_status {
u32 msdu_id;
bool acked;
- int ack_rssi;
+ s8 ack_rssi;
u16 peer_id;
};
diff --git a/drivers/net/wireless/ath/ath11k/hal.c b/drivers/net/wireless/ath/ath11k/hal.c
index f3d04568c221..f02599bd1c36 100644
--- a/drivers/net/wireless/ath/ath11k/hal.c
+++ b/drivers/net/wireless/ath/ath11k/hal.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/dma-mapping.h>
#include "hal_tx.h"
@@ -796,6 +796,20 @@ u32 *ath11k_hal_srng_src_get_next_reaped(struct ath11k_base *ab,
return desc;
}
+u32 *ath11k_hal_srng_src_next_peek(struct ath11k_base *ab, struct hal_srng *srng)
+{
+ u32 next_hp;
+
+ lockdep_assert_held(&srng->lock);
+
+ next_hp = (srng->u.src_ring.hp + srng->entry_size) % srng->ring_size;
+
+ if (next_hp != srng->u.src_ring.cached_tp)
+ return srng->ring_base_vaddr + next_hp;
+
+ return NULL;
+}
+
u32 *ath11k_hal_srng_src_peek(struct ath11k_base *ab, struct hal_srng *srng)
{
lockdep_assert_held(&srng->lock);
diff --git a/drivers/net/wireless/ath/ath11k/hal.h b/drivers/net/wireless/ath/ath11k/hal.h
index e453c137385e..dc8bbe073017 100644
--- a/drivers/net/wireless/ath/ath11k/hal.h
+++ b/drivers/net/wireless/ath/ath11k/hal.h
@@ -947,6 +947,8 @@ u32 *ath11k_hal_srng_dst_peek(struct ath11k_base *ab, struct hal_srng *srng);
int ath11k_hal_srng_dst_num_free(struct ath11k_base *ab, struct hal_srng *srng,
bool sync_hw_ptr);
u32 *ath11k_hal_srng_src_peek(struct ath11k_base *ab, struct hal_srng *srng);
+u32 *ath11k_hal_srng_src_next_peek(struct ath11k_base *ab,
+ struct hal_srng *srng);
u32 *ath11k_hal_srng_src_get_next_reaped(struct ath11k_base *ab,
struct hal_srng *srng);
u32 *ath11k_hal_srng_src_reap_next(struct ath11k_base *ab,
diff --git a/drivers/net/wireless/ath/ath11k/hal_tx.h b/drivers/net/wireless/ath/ath11k/hal_tx.h
index c5e88364afe5..46d17abd808b 100644
--- a/drivers/net/wireless/ath/ath11k/hal_tx.h
+++ b/drivers/net/wireless/ath/ath11k/hal_tx.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_HAL_TX_H
@@ -54,7 +54,7 @@ struct hal_tx_info {
struct hal_tx_status {
enum hal_wbm_rel_src_module buf_rel_source;
enum hal_wbm_tqm_rel_reason status;
- u8 ack_rssi;
+ s8 ack_rssi;
u32 flags; /* %HAL_TX_STATUS_FLAGS_ */
u32 ppdu_id;
u8 try_cnt;
diff --git a/drivers/net/wireless/ath/ath11k/hw.h b/drivers/net/wireless/ath/ath11k/hw.h
index 14ef4eb48f80..300322535766 100644
--- a/drivers/net/wireless/ath/ath11k/hw.h
+++ b/drivers/net/wireless/ath/ath11k/hw.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_HW_H
@@ -167,7 +167,7 @@ struct ath11k_hw_params {
bool single_pdev_only;
bool rxdma1_enable;
- int num_rxmda_per_pdev;
+ int num_rxdma_per_pdev;
bool rx_mac_buf_ring;
bool vdev_start_delay;
bool htt_peer_map_v2;
diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
index 4f62e38ba48b..ba910ae2c676 100644
--- a/drivers/net/wireless/ath/ath11k/mac.c
+++ b/drivers/net/wireless/ath/ath11k/mac.c
@@ -4229,6 +4229,7 @@ static int ath11k_install_key(struct ath11k_vif *arvif,
switch (key->cipher) {
case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_CCMP_256:
arg.key_cipher = WMI_CIPHER_AES_CCM;
/* TODO: Re-check if flag is valid */
key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT;
@@ -4238,12 +4239,10 @@ static int ath11k_install_key(struct ath11k_vif *arvif,
arg.key_txmic_len = 8;
arg.key_rxmic_len = 8;
break;
- case WLAN_CIPHER_SUITE_CCMP_256:
- arg.key_cipher = WMI_CIPHER_AES_CCM;
- break;
case WLAN_CIPHER_SUITE_GCMP:
case WLAN_CIPHER_SUITE_GCMP_256:
arg.key_cipher = WMI_CIPHER_AES_GCM;
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT;
break;
default:
ath11k_warn(ar->ab, "cipher %d is not supported\n", key->cipher);
@@ -5903,7 +5902,10 @@ static int ath11k_mac_mgmt_tx_wmi(struct ath11k *ar, struct ath11k_vif *arvif,
{
struct ath11k_base *ab = ar->ab;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ath11k_skb_cb *skb_cb = ATH11K_SKB_CB(skb);
struct ieee80211_tx_info *info;
+ enum hal_encrypt_type enctype;
+ unsigned int mic_len;
dma_addr_t paddr;
int buf_id;
int ret;
@@ -5927,7 +5929,12 @@ static int ath11k_mac_mgmt_tx_wmi(struct ath11k *ar, struct ath11k_vif *arvif,
ieee80211_is_deauth(hdr->frame_control) ||
ieee80211_is_disassoc(hdr->frame_control)) &&
ieee80211_has_protected(hdr->frame_control)) {
- skb_put(skb, IEEE80211_CCMP_MIC_LEN);
+ if (!(skb_cb->flags & ATH11K_SKB_CIPHER_SET))
+ ath11k_warn(ab, "WMI management tx frame without ATH11K_SKB_CIPHER_SET");
+
+ enctype = ath11k_dp_tx_get_encrypt_type(skb_cb->cipher);
+ mic_len = ath11k_dp_rx_crypto_mic_len(ar, enctype);
+ skb_put(skb, mic_len);
}
}
@@ -6108,7 +6115,7 @@ static int ath11k_mac_config_mon_status_default(struct ath11k *ar, bool enable)
tlv_filter.rx_filter = ath11k_debugfs_rx_filter(ar);
}
- for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {
ring_id = ar->dp.rx_mon_status_refill_ring[i].refill_buf_ring.ring_id;
ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id,
ar->dp.mac_id + i,
@@ -6278,7 +6285,7 @@ err:
return ret;
}
-static void ath11k_mac_op_stop(struct ieee80211_hw *hw)
+static void ath11k_mac_op_stop(struct ieee80211_hw *hw, bool suspend)
{
struct ath11k *ar = hw->priv;
struct htt_ppdu_stats_info *ppdu_stats, *tmp;
@@ -7507,32 +7514,6 @@ static int ath11k_mac_stop_vdev_early(struct ieee80211_hw *hw,
return 0;
}
-static u8 ath11k_mac_get_tpe_count(u8 txpwr_intrprt, u8 txpwr_cnt)
-{
- switch (txpwr_intrprt) {
- /* Refer "Table 9-276-Meaning of Maximum Transmit Power Count subfield
- * if the Maximum Transmit Power Interpretation subfield is 0 or 2" of
- * "IEEE Std 802.11ax 2021".
- */
- case IEEE80211_TPE_LOCAL_EIRP:
- case IEEE80211_TPE_REG_CLIENT_EIRP:
- txpwr_cnt = txpwr_cnt <= 3 ? txpwr_cnt : 3;
- txpwr_cnt = txpwr_cnt + 1;
- break;
- /* Refer "Table 9-277-Meaning of Maximum Transmit Power Count subfield
- * if Maximum Transmit Power Interpretation subfield is 1 or 3" of
- * "IEEE Std 802.11ax 2021".
- */
- case IEEE80211_TPE_LOCAL_EIRP_PSD:
- case IEEE80211_TPE_REG_CLIENT_EIRP_PSD:
- txpwr_cnt = txpwr_cnt <= 4 ? txpwr_cnt : 4;
- txpwr_cnt = txpwr_cnt ? (BIT(txpwr_cnt - 1)) : 1;
- break;
- }
-
- return txpwr_cnt;
-}
-
static u8 ath11k_mac_get_num_pwr_levels(struct cfg80211_chan_def *chan_def)
{
if (chan_def->chan->flags & IEEE80211_CHAN_PSD) {
@@ -7688,7 +7669,7 @@ void ath11k_mac_fill_reg_tpc_info(struct ath11k *ar,
struct ieee80211_channel *chan, *temp_chan;
u8 pwr_lvl_idx, num_pwr_levels, pwr_reduction;
bool is_psd_power = false, is_tpe_present = false;
- s8 max_tx_power[IEEE80211_MAX_NUM_PWR_LEVEL],
+ s8 max_tx_power[ATH11K_NUM_PWR_LEVELS],
psd_power, tx_power;
s8 eirp_power = 0;
u16 start_freq, center_freq;
@@ -7701,7 +7682,8 @@ void ath11k_mac_fill_reg_tpc_info(struct ath11k *ar,
is_tpe_present = true;
num_pwr_levels = arvif->reg_tpc_info.num_pwr_levels;
} else {
- num_pwr_levels = ath11k_mac_get_num_pwr_levels(&ctx->def);
+ num_pwr_levels =
+ ath11k_mac_get_num_pwr_levels(&bss_conf->chanreq.oper);
}
for (pwr_lvl_idx = 0; pwr_lvl_idx < num_pwr_levels; pwr_lvl_idx++) {
@@ -7858,33 +7840,23 @@ static void ath11k_mac_parse_tx_pwr_env(struct ath11k *ar,
struct ath11k_base *ab = ar->ab;
struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
- struct ieee80211_tx_pwr_env *single_tpe;
+ struct ieee80211_parsed_tpe_eirp *non_psd = NULL;
+ struct ieee80211_parsed_tpe_psd *psd = NULL;
enum wmi_reg_6ghz_client_type client_type;
struct cur_regulatory_info *reg_info;
+ u8 local_tpe_count, reg_tpe_count;
+ bool use_local_tpe;
int i;
- u8 pwr_count, pwr_interpret, pwr_category;
- u8 psd_index = 0, non_psd_index = 0, local_tpe_count = 0, reg_tpe_count = 0;
- bool use_local_tpe, non_psd_set = false, psd_set = false;
reg_info = &ab->reg_info_store[ar->pdev_idx];
client_type = reg_info->client_type;
- for (i = 0; i < bss_conf->tx_pwr_env_num; i++) {
- single_tpe = &bss_conf->tx_pwr_env[i];
- pwr_category = u8_get_bits(single_tpe->tx_power_info,
- IEEE80211_TX_PWR_ENV_INFO_CATEGORY);
- pwr_interpret = u8_get_bits(single_tpe->tx_power_info,
- IEEE80211_TX_PWR_ENV_INFO_INTERPRET);
-
- if (pwr_category == client_type) {
- if (pwr_interpret == IEEE80211_TPE_LOCAL_EIRP ||
- pwr_interpret == IEEE80211_TPE_LOCAL_EIRP_PSD)
- local_tpe_count++;
- else if (pwr_interpret == IEEE80211_TPE_REG_CLIENT_EIRP ||
- pwr_interpret == IEEE80211_TPE_REG_CLIENT_EIRP_PSD)
- reg_tpe_count++;
- }
- }
+ local_tpe_count =
+ bss_conf->tpe.max_local[client_type].valid +
+ bss_conf->tpe.psd_local[client_type].valid;
+ reg_tpe_count =
+ bss_conf->tpe.max_reg_client[client_type].valid +
+ bss_conf->tpe.psd_reg_client[client_type].valid;
if (!reg_tpe_count && !local_tpe_count) {
ath11k_warn(ab,
@@ -7897,83 +7869,44 @@ static void ath11k_mac_parse_tx_pwr_env(struct ath11k *ar,
use_local_tpe = false;
}
- for (i = 0; i < bss_conf->tx_pwr_env_num; i++) {
- single_tpe = &bss_conf->tx_pwr_env[i];
- pwr_category = u8_get_bits(single_tpe->tx_power_info,
- IEEE80211_TX_PWR_ENV_INFO_CATEGORY);
- pwr_interpret = u8_get_bits(single_tpe->tx_power_info,
- IEEE80211_TX_PWR_ENV_INFO_INTERPRET);
-
- if (pwr_category != client_type)
- continue;
-
- /* get local transmit power envelope */
- if (use_local_tpe) {
- if (pwr_interpret == IEEE80211_TPE_LOCAL_EIRP) {
- non_psd_index = i;
- non_psd_set = true;
- } else if (pwr_interpret == IEEE80211_TPE_LOCAL_EIRP_PSD) {
- psd_index = i;
- psd_set = true;
- }
- /* get regulatory transmit power envelope */
- } else {
- if (pwr_interpret == IEEE80211_TPE_REG_CLIENT_EIRP) {
- non_psd_index = i;
- non_psd_set = true;
- } else if (pwr_interpret == IEEE80211_TPE_REG_CLIENT_EIRP_PSD) {
- psd_index = i;
- psd_set = true;
- }
- }
+ if (use_local_tpe) {
+ psd = &bss_conf->tpe.psd_local[client_type];
+ if (!psd->valid)
+ psd = NULL;
+ non_psd = &bss_conf->tpe.max_local[client_type];
+ if (!non_psd->valid)
+ non_psd = NULL;
+ } else {
+ psd = &bss_conf->tpe.psd_reg_client[client_type];
+ if (!psd->valid)
+ psd = NULL;
+ non_psd = &bss_conf->tpe.max_reg_client[client_type];
+ if (!non_psd->valid)
+ non_psd = NULL;
}
- if (non_psd_set && !psd_set) {
- single_tpe = &bss_conf->tx_pwr_env[non_psd_index];
- pwr_count = u8_get_bits(single_tpe->tx_power_info,
- IEEE80211_TX_PWR_ENV_INFO_COUNT);
- pwr_interpret = u8_get_bits(single_tpe->tx_power_info,
- IEEE80211_TX_PWR_ENV_INFO_INTERPRET);
+ if (non_psd && !psd) {
arvif->reg_tpc_info.is_psd_power = false;
arvif->reg_tpc_info.eirp_power = 0;
- arvif->reg_tpc_info.num_pwr_levels =
- ath11k_mac_get_tpe_count(pwr_interpret, pwr_count);
+ arvif->reg_tpc_info.num_pwr_levels = non_psd->count;
for (i = 0; i < arvif->reg_tpc_info.num_pwr_levels; i++) {
ath11k_dbg(ab, ATH11K_DBG_MAC,
"non PSD power[%d] : %d\n",
- i, single_tpe->tx_power[i]);
- arvif->reg_tpc_info.tpe[i] = single_tpe->tx_power[i] / 2;
+ i, non_psd->power[i]);
+ arvif->reg_tpc_info.tpe[i] = non_psd->power[i] / 2;
}
}
- if (psd_set) {
- single_tpe = &bss_conf->tx_pwr_env[psd_index];
- pwr_count = u8_get_bits(single_tpe->tx_power_info,
- IEEE80211_TX_PWR_ENV_INFO_COUNT);
- pwr_interpret = u8_get_bits(single_tpe->tx_power_info,
- IEEE80211_TX_PWR_ENV_INFO_INTERPRET);
- arvif->reg_tpc_info.is_psd_power = true;
+ if (psd) {
+ arvif->reg_tpc_info.num_pwr_levels = psd->count;
- if (pwr_count == 0) {
+ for (i = 0; i < arvif->reg_tpc_info.num_pwr_levels; i++) {
ath11k_dbg(ab, ATH11K_DBG_MAC,
- "TPE PSD power : %d\n", single_tpe->tx_power[0]);
- arvif->reg_tpc_info.num_pwr_levels =
- ath11k_mac_get_num_pwr_levels(&ctx->def);
-
- for (i = 0; i < arvif->reg_tpc_info.num_pwr_levels; i++)
- arvif->reg_tpc_info.tpe[i] = single_tpe->tx_power[0] / 2;
- } else {
- arvif->reg_tpc_info.num_pwr_levels =
- ath11k_mac_get_tpe_count(pwr_interpret, pwr_count);
-
- for (i = 0; i < arvif->reg_tpc_info.num_pwr_levels; i++) {
- ath11k_dbg(ab, ATH11K_DBG_MAC,
- "TPE PSD power[%d] : %d\n",
- i, single_tpe->tx_power[i]);
- arvif->reg_tpc_info.tpe[i] = single_tpe->tx_power[i] / 2;
- }
+ "TPE PSD power[%d] : %d\n",
+ i, psd->power[i]);
+ arvif->reg_tpc_info.tpe[i] = psd->power[i] / 2;
}
}
}
@@ -7988,8 +7921,6 @@ ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
struct ath11k_base *ab = ar->ab;
struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
int ret;
- struct cur_regulatory_info *reg_info;
- enum ieee80211_ap_reg_power power_type;
mutex_lock(&ar->conf_mutex);
@@ -8000,17 +7931,6 @@ ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
if (ath11k_wmi_supports_6ghz_cc_ext(ar) &&
ctx->def.chan->band == NL80211_BAND_6GHZ &&
arvif->vdev_type == WMI_VDEV_TYPE_STA) {
- reg_info = &ab->reg_info_store[ar->pdev_idx];
- power_type = vif->bss_conf.power_type;
-
- ath11k_dbg(ab, ATH11K_DBG_MAC, "chanctx power type %d\n", power_type);
-
- if (power_type == IEEE80211_REG_UNSET_AP) {
- ret = -EINVAL;
- goto out;
- }
-
- ath11k_reg_handle_chan_list(ab, reg_info, power_type);
arvif->chanctx = *ctx;
ath11k_mac_parse_tx_pwr_env(ar, vif, ctx);
}
@@ -8864,12 +8784,8 @@ ath11k_mac_op_reconfig_complete(struct ieee80211_hw *hw,
ieee80211_wake_queues(ar->hw);
if (ar->ab->hw_params.current_cc_support &&
- ar->alpha2[0] != 0 && ar->alpha2[1] != 0) {
- struct wmi_set_current_country_params set_current_param = {};
-
- memcpy(&set_current_param.alpha2, ar->alpha2, 2);
- ath11k_wmi_send_set_current_country_cmd(ar, &set_current_param);
- }
+ ar->alpha2[0] != 0 && ar->alpha2[1] != 0)
+ ath11k_reg_set_cc(ar);
if (ab->is_reset) {
recovery_count = atomic_inc_return(&ab->recovery_count);
@@ -9068,8 +8984,11 @@ static void ath11k_mac_op_sta_statistics(struct ieee80211_hw *hw,
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
}
- sinfo->signal_avg = ewma_avg_rssi_read(&arsta->avg_rssi) +
- ATH11K_DEFAULT_NOISE_FLOOR;
+ sinfo->signal_avg = ewma_avg_rssi_read(&arsta->avg_rssi);
+
+ if (!db2dbm)
+ sinfo->signal_avg += ATH11K_DEFAULT_NOISE_FLOOR;
+
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG);
}
@@ -9104,7 +9023,6 @@ static void ath11k_mac_op_ipv6_changed(struct ieee80211_hw *hw,
struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
struct inet6_ifaddr *ifa6;
struct ifacaddr6 *ifaca6;
- struct list_head *p;
u32 count, scope;
ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "op ipv6 changed\n");
@@ -9112,7 +9030,12 @@ static void ath11k_mac_op_ipv6_changed(struct ieee80211_hw *hw,
offload = &arvif->arp_ns_offload;
count = 0;
- /* Note: read_lock_bh() calls rcu_read_lock() */
+ /* The _ipv6_changed() is called with RCU lock already held in
+ * atomic_notifier_call_chain(), so we don't need to call
+ * rcu_read_lock() again here. But note that with CONFIG_PREEMPT_RT
+ * enabled, read_lock_bh() also calls rcu_read_lock(). This is OK
+ * because RCU read critical section is allowed to get nested.
+ */
read_lock_bh(&idev->lock);
memset(offload->ipv6_addr, 0, sizeof(offload->ipv6_addr));
@@ -9120,11 +9043,10 @@ static void ath11k_mac_op_ipv6_changed(struct ieee80211_hw *hw,
memcpy(offload->mac_addr, vif->addr, ETH_ALEN);
/* get unicast address */
- list_for_each(p, &idev->addr_list) {
+ list_for_each_entry(ifa6, &idev->addr_list, if_list) {
if (count >= ATH11K_IPV6_MAX_COUNT)
goto generate;
- ifa6 = list_entry(p, struct inet6_ifaddr, if_list);
if (ifa6->flags & IFA_F_DADFAILED)
continue;
scope = ipv6_addr_src_scope(&ifa6->addr);
@@ -9626,6 +9548,8 @@ static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw,
struct ath11k *ar = hw->priv;
struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta);
+ enum ieee80211_ap_reg_power power_type;
+ struct cur_regulatory_info *reg_info;
struct ath11k_peer *peer;
int ret = 0;
@@ -9705,6 +9629,29 @@ static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw,
ath11k_warn(ar->ab, "Unable to authorize peer %pM vdev %d: %d\n",
sta->addr, arvif->vdev_id, ret);
}
+
+ if (!ret &&
+ ath11k_wmi_supports_6ghz_cc_ext(ar) &&
+ arvif->vdev_type == WMI_VDEV_TYPE_STA &&
+ arvif->chanctx.def.chan &&
+ arvif->chanctx.def.chan->band == NL80211_BAND_6GHZ) {
+ reg_info = &ar->ab->reg_info_store[ar->pdev_idx];
+ power_type = vif->bss_conf.power_type;
+
+ if (power_type == IEEE80211_REG_UNSET_AP) {
+ ath11k_warn(ar->ab, "invalid power type %d\n",
+ power_type);
+ ret = -EINVAL;
+ } else {
+ ret = ath11k_reg_handle_chan_list(ar->ab,
+ reg_info,
+ power_type);
+ if (ret)
+ ath11k_warn(ar->ab,
+ "failed to handle chan list with power type %d\n",
+ power_type);
+ }
+ }
} else if (old_state == IEEE80211_STA_AUTHORIZED &&
new_state == IEEE80211_STA_ASSOC) {
spin_lock_bh(&ar->ab->base_lock);
@@ -10313,11 +10260,8 @@ static int __ath11k_mac_register(struct ath11k *ar)
}
if (ab->hw_params.current_cc_support && ab->new_alpha2[0]) {
- struct wmi_set_current_country_params set_current_param = {};
-
- memcpy(&set_current_param.alpha2, ab->new_alpha2, 2);
memcpy(&ar->alpha2, ab->new_alpha2, 2);
- ret = ath11k_wmi_send_set_current_country_cmd(ar, &set_current_param);
+ ret = ath11k_reg_set_cc(ar);
if (ret)
ath11k_warn(ar->ab,
"failed set cc code for mac register: %d\n", ret);
diff --git a/drivers/net/wireless/ath/ath11k/pcic.c b/drivers/net/wireless/ath/ath11k/pcic.c
index 79eb3f9c902f..debe7c5919ef 100644
--- a/drivers/net/wireless/ath/ath11k/pcic.c
+++ b/drivers/net/wireless/ath/ath11k/pcic.c
@@ -561,6 +561,7 @@ static int ath11k_pcic_ext_irq_config(struct ath11k_base *ab)
{
int i, j, n, ret, num_vectors = 0;
u32 user_base_data = 0, base_vector = 0;
+ struct ath11k_ext_irq_grp *irq_grp;
unsigned long irq_flags;
ret = ath11k_pcic_get_user_msi_assignment(ab, "DP", &num_vectors,
@@ -574,14 +575,16 @@ static int ath11k_pcic_ext_irq_config(struct ath11k_base *ab)
irq_flags |= IRQF_NOBALANCING;
for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
- struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
+ irq_grp = &ab->ext_irq_grp[i];
u32 num_irq = 0;
irq_grp->ab = ab;
irq_grp->grp_id = i;
irq_grp->napi_ndev = alloc_netdev_dummy(0);
- if (!irq_grp->napi_ndev)
- return -ENOMEM;
+ if (!irq_grp->napi_ndev) {
+ ret = -ENOMEM;
+ goto fail_allocate;
+ }
netif_napi_add(irq_grp->napi_ndev, &irq_grp->napi,
ath11k_pcic_ext_grp_napi_poll);
@@ -606,11 +609,8 @@ static int ath11k_pcic_ext_irq_config(struct ath11k_base *ab)
int irq = ath11k_pcic_get_msi_irq(ab, vector);
if (irq < 0) {
- for (n = 0; n <= i; n++) {
- irq_grp = &ab->ext_irq_grp[n];
- free_netdev(irq_grp->napi_ndev);
- }
- return irq;
+ ret = irq;
+ goto fail_irq;
}
ab->irq_num[irq_idx] = irq;
@@ -635,6 +635,15 @@ static int ath11k_pcic_ext_irq_config(struct ath11k_base *ab)
}
return 0;
+fail_irq:
+ /* i ->napi_ndev was properly allocated. Free it also */
+ i += 1;
+fail_allocate:
+ for (n = 0; n < i; n++) {
+ irq_grp = &ab->ext_irq_grp[n];
+ free_netdev(irq_grp->napi_ndev);
+ }
+ return ret;
}
int ath11k_pcic_config_irq(struct ath11k_base *ab)
diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c
index d4a243b64f6c..1bc648920ab6 100644
--- a/drivers/net/wireless/ath/ath11k/qmi.c
+++ b/drivers/net/wireless/ath/ath11k/qmi.c
@@ -2293,7 +2293,7 @@ static int ath11k_qmi_load_file_target_mem(struct ath11k_base *ab,
struct qmi_txn txn;
const u8 *temp = data;
void __iomem *bdf_addr = NULL;
- int ret;
+ int ret = 0;
u32 remaining = len;
req = kzalloc(sizeof(*req), GFP_KERNEL);
@@ -2859,7 +2859,7 @@ int ath11k_qmi_firmware_start(struct ath11k_base *ab,
int ath11k_qmi_fwreset_from_cold_boot(struct ath11k_base *ab)
{
- int timeout;
+ long time_left;
if (!ath11k_core_coldboot_cal_support(ab) ||
ab->hw_params.cbcal_restart_fw == 0)
@@ -2867,11 +2867,11 @@ int ath11k_qmi_fwreset_from_cold_boot(struct ath11k_base *ab)
ath11k_dbg(ab, ATH11K_DBG_QMI, "wait for cold boot done\n");
- timeout = wait_event_timeout(ab->qmi.cold_boot_waitq,
- (ab->qmi.cal_done == 1),
- ATH11K_COLD_BOOT_FW_RESET_DELAY);
+ time_left = wait_event_timeout(ab->qmi.cold_boot_waitq,
+ (ab->qmi.cal_done == 1),
+ ATH11K_COLD_BOOT_FW_RESET_DELAY);
- if (timeout <= 0) {
+ if (time_left <= 0) {
ath11k_warn(ab, "Coldboot Calibration timed out\n");
return -ETIMEDOUT;
}
@@ -2886,7 +2886,7 @@ EXPORT_SYMBOL(ath11k_qmi_fwreset_from_cold_boot);
static int ath11k_qmi_process_coldboot_calibration(struct ath11k_base *ab)
{
- int timeout;
+ long time_left;
int ret;
ret = ath11k_qmi_wlanfw_mode_send(ab, ATH11K_FIRMWARE_MODE_COLD_BOOT);
@@ -2897,10 +2897,10 @@ static int ath11k_qmi_process_coldboot_calibration(struct ath11k_base *ab)
ath11k_dbg(ab, ATH11K_DBG_QMI, "Coldboot calibration wait started\n");
- timeout = wait_event_timeout(ab->qmi.cold_boot_waitq,
- (ab->qmi.cal_done == 1),
- ATH11K_COLD_BOOT_FW_RESET_DELAY);
- if (timeout <= 0) {
+ time_left = wait_event_timeout(ab->qmi.cold_boot_waitq,
+ (ab->qmi.cal_done == 1),
+ ATH11K_COLD_BOOT_FW_RESET_DELAY);
+ if (time_left <= 0) {
ath11k_warn(ab, "coldboot calibration timed out\n");
return 0;
}
diff --git a/drivers/net/wireless/ath/ath11k/reg.c b/drivers/net/wireless/ath/ath11k/reg.c
index 737fcd450d4b..b0f289784dd3 100644
--- a/drivers/net/wireless/ath/ath11k/reg.c
+++ b/drivers/net/wireless/ath/ath11k/reg.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/rtnetlink.h>
@@ -49,7 +49,6 @@ ath11k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
{
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
struct wmi_init_country_params init_country_param;
- struct wmi_set_current_country_params set_current_param = {};
struct ath11k *ar = hw->priv;
int ret;
@@ -83,9 +82,8 @@ ath11k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
* reg info
*/
if (ar->ab->hw_params.current_cc_support) {
- memcpy(&set_current_param.alpha2, request->alpha2, 2);
- memcpy(&ar->alpha2, &set_current_param.alpha2, 2);
- ret = ath11k_wmi_send_set_current_country_cmd(ar, &set_current_param);
+ memcpy(&ar->alpha2, request->alpha2, 2);
+ ret = ath11k_reg_set_cc(ar);
if (ret)
ath11k_warn(ar->ab,
"failed set current country code: %d\n", ret);
@@ -878,7 +876,7 @@ int ath11k_reg_handle_chan_list(struct ath11k_base *ab,
ath11k_reg_reset_info(reg_info);
if (ab->hw_params.single_pdev_only &&
- pdev_idx < ab->hw_params.num_rxmda_per_pdev)
+ pdev_idx < ab->hw_params.num_rxdma_per_pdev)
return 0;
goto fallback;
}
@@ -1017,3 +1015,11 @@ void ath11k_reg_free(struct ath11k_base *ab)
kfree(ab->new_regd[i]);
}
}
+
+int ath11k_reg_set_cc(struct ath11k *ar)
+{
+ struct wmi_set_current_country_params set_current_param = {};
+
+ memcpy(&set_current_param.alpha2, ar->alpha2, 2);
+ return ath11k_wmi_send_set_current_country_cmd(ar, &set_current_param);
+}
diff --git a/drivers/net/wireless/ath/ath11k/reg.h b/drivers/net/wireless/ath/ath11k/reg.h
index 64edb794260a..263ea9061948 100644
--- a/drivers/net/wireless/ath/ath11k/reg.h
+++ b/drivers/net/wireless/ath/ath11k/reg.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_REG_H
@@ -45,5 +45,5 @@ ath11k_reg_ap_pwr_convert(enum ieee80211_ap_reg_power power_type);
int ath11k_reg_handle_chan_list(struct ath11k_base *ab,
struct cur_regulatory_info *reg_info,
enum ieee80211_ap_reg_power power_type);
-
+int ath11k_reg_set_cc(struct ath11k *ar);
#endif
diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c
index 6ff01c45f165..38f175dd1557 100644
--- a/drivers/net/wireless/ath/ath11k/wmi.c
+++ b/drivers/net/wireless/ath/ath11k/wmi.c
@@ -9082,7 +9082,7 @@ int ath11k_wmi_attach(struct ath11k_base *ab)
ab->wmi_ab.preferred_hw_mode = WMI_HOST_HW_MODE_MAX;
/* It's overwritten when service_ext_ready is handled */
- if (ab->hw_params.single_pdev_only && ab->hw_params.num_rxmda_per_pdev > 1)
+ if (ab->hw_params.single_pdev_only && ab->hw_params.num_rxdma_per_pdev > 1)
ab->wmi_ab.preferred_hw_mode = WMI_HOST_HW_MODE_SINGLE;
/* TODO: Init remaining wmi soc resources required */
diff --git a/drivers/net/wireless/ath/ath12k/Makefile b/drivers/net/wireless/ath/ath12k/Makefile
index d42480db7463..5a1ed20d730e 100644
--- a/drivers/net/wireless/ath/ath12k/Makefile
+++ b/drivers/net/wireless/ath/ath12k/Makefile
@@ -23,9 +23,10 @@ ath12k-y += core.o \
fw.o \
p2p.o
-ath12k-$(CONFIG_ATH12K_DEBUGFS) += debugfs.o
+ath12k-$(CONFIG_ATH12K_DEBUGFS) += debugfs.o debugfs_htt_stats.o
ath12k-$(CONFIG_ACPI) += acpi.o
ath12k-$(CONFIG_ATH12K_TRACING) += trace.o
+ath12k-$(CONFIG_PM) += wow.o
# for tracing framework to find trace.h
CFLAGS_trace.o := -I$(src)
diff --git a/drivers/net/wireless/ath/ath12k/acpi.c b/drivers/net/wireless/ath/ath12k/acpi.c
index 443ba12e01f3..0555d35aab47 100644
--- a/drivers/net/wireless/ath/ath12k/acpi.c
+++ b/drivers/net/wireless/ath/ath12k/acpi.c
@@ -391,4 +391,6 @@ void ath12k_acpi_stop(struct ath12k_base *ab)
acpi_remove_notify_handler(ACPI_HANDLE(ab->dev),
ACPI_DEVICE_NOTIFY,
ath12k_acpi_dsm_notify);
+
+ memset(&ab->acpi, 0, sizeof(ab->acpi));
}
diff --git a/drivers/net/wireless/ath/ath12k/ce.h b/drivers/net/wireless/ath/ath12k/ce.h
index 79af3b6159f1..857bc5f9e946 100644
--- a/drivers/net/wireless/ath/ath12k/ce.h
+++ b/drivers/net/wireless/ath/ath12k/ce.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_CE_H
@@ -119,7 +119,7 @@ struct ath12k_ce_ring {
/* Host address space */
void *base_addr_owner_space_unaligned;
/* CE address space */
- u32 base_addr_ce_space_unaligned;
+ dma_addr_t base_addr_ce_space_unaligned;
/* Actual start of descriptors.
* Aligned to descriptor-size boundary.
@@ -129,7 +129,7 @@ struct ath12k_ce_ring {
void *base_addr_owner_space;
/* CE address space */
- u32 base_addr_ce_space;
+ dma_addr_t base_addr_ce_space;
/* HAL ring id */
u32 hal_ring_id;
diff --git a/drivers/net/wireless/ath/ath12k/core.c b/drivers/net/wireless/ath/ath12k/core.c
index 6663f4e1792d..51252e8bc1ae 100644
--- a/drivers/net/wireless/ath/ath12k/core.c
+++ b/drivers/net/wireless/ath/ath12k/core.c
@@ -16,6 +16,7 @@
#include "hif.h"
#include "fw.h"
#include "debugfs.h"
+#include "wow.h"
unsigned int ath12k_debug_mask;
module_param_named(debug_mask, ath12k_debug_mask, uint, 0644);
@@ -42,27 +43,48 @@ static int ath12k_core_rfkill_config(struct ath12k_base *ab)
return ret;
}
-int ath12k_core_suspend(struct ath12k_base *ab)
+/* Check if we need to continue with suspend/resume operation.
+ * Return:
+ * a negative value: error happens and don't continue.
+ * 0: no error but don't continue.
+ * positive value: no error and do continue.
+ */
+static int ath12k_core_continue_suspend_resume(struct ath12k_base *ab)
{
struct ath12k *ar;
- int ret, i;
if (!ab->hw_params->supports_suspend)
return -EOPNOTSUPP;
- rcu_read_lock();
+ /* so far single_pdev_only chips have supports_suspend as true
+ * so pass 0 as a dummy pdev_id here.
+ */
+ ar = ab->pdevs[0].ar;
+ if (!ar || !ar->ah || ar->ah->state != ATH12K_HW_STATE_OFF)
+ return 0;
+
+ return 1;
+}
+
+int ath12k_core_suspend(struct ath12k_base *ab)
+{
+ struct ath12k *ar;
+ int ret, i;
+
+ ret = ath12k_core_continue_suspend_resume(ab);
+ if (ret <= 0)
+ return ret;
+
for (i = 0; i < ab->num_radios; i++) {
- ar = ath12k_mac_get_ar_by_pdev_id(ab, i);
+ ar = ab->pdevs[i].ar;
if (!ar)
continue;
ret = ath12k_mac_wait_tx_complete(ar);
if (ret) {
ath12k_warn(ab, "failed to wait tx complete: %d\n", ret);
- rcu_read_unlock();
return ret;
}
}
- rcu_read_unlock();
/* PM framework skips suspend_late/resume_early callbacks
* if other devices report errors in their suspend callbacks.
@@ -83,8 +105,13 @@ EXPORT_SYMBOL(ath12k_core_suspend);
int ath12k_core_suspend_late(struct ath12k_base *ab)
{
- if (!ab->hw_params->supports_suspend)
- return -EOPNOTSUPP;
+ int ret;
+
+ ret = ath12k_core_continue_suspend_resume(ab);
+ if (ret <= 0)
+ return ret;
+
+ ath12k_acpi_stop(ab);
ath12k_hif_irq_disable(ab);
ath12k_hif_ce_irq_disable(ab);
@@ -99,8 +126,9 @@ int ath12k_core_resume_early(struct ath12k_base *ab)
{
int ret;
- if (!ab->hw_params->supports_suspend)
- return -EOPNOTSUPP;
+ ret = ath12k_core_continue_suspend_resume(ab);
+ if (ret <= 0)
+ return ret;
reinit_completion(&ab->restart_completed);
ret = ath12k_hif_power_up(ab);
@@ -114,9 +142,11 @@ EXPORT_SYMBOL(ath12k_core_resume_early);
int ath12k_core_resume(struct ath12k_base *ab)
{
long time_left;
+ int ret;
- if (!ab->hw_params->supports_suspend)
- return -EOPNOTSUPP;
+ ret = ath12k_core_continue_suspend_resume(ab);
+ if (ret <= 0)
+ return ret;
time_left = wait_for_completion_timeout(&ab->restart_completed,
ATH12K_RESET_TIMEOUT_HZ);
@@ -994,9 +1024,8 @@ void ath12k_core_halt(struct ath12k *ar)
static void ath12k_core_pre_reconfigure_recovery(struct ath12k_base *ab)
{
struct ath12k *ar;
- struct ath12k_pdev *pdev;
struct ath12k_hw *ah;
- int i;
+ int i, j;
spin_lock_bh(&ab->base_lock);
ab->stats.fw_crash_counter++;
@@ -1006,35 +1035,32 @@ static void ath12k_core_pre_reconfigure_recovery(struct ath12k_base *ab)
set_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags);
for (i = 0; i < ab->num_hw; i++) {
- if (!ab->ah[i])
+ ah = ab->ah[i];
+ if (!ah || ah->state == ATH12K_HW_STATE_OFF)
continue;
- ah = ab->ah[i];
ieee80211_stop_queues(ah->hw);
- }
- for (i = 0; i < ab->num_radios; i++) {
- pdev = &ab->pdevs[i];
- ar = pdev->ar;
- if (!ar || ar->state == ATH12K_STATE_OFF)
- continue;
+ for (j = 0; j < ah->num_radio; j++) {
+ ar = &ah->radio[j];
- ath12k_mac_drain_tx(ar);
- complete(&ar->scan.started);
- complete(&ar->scan.completed);
- complete(&ar->scan.on_channel);
- complete(&ar->peer_assoc_done);
- complete(&ar->peer_delete_done);
- complete(&ar->install_key_done);
- complete(&ar->vdev_setup_done);
- complete(&ar->vdev_delete_done);
- complete(&ar->bss_survey_done);
-
- wake_up(&ar->dp.tx_empty_waitq);
- idr_for_each(&ar->txmgmt_idr,
- ath12k_mac_tx_mgmt_pending_free, ar);
- idr_destroy(&ar->txmgmt_idr);
- wake_up(&ar->txmgmt_empty_waitq);
+ ath12k_mac_drain_tx(ar);
+ complete(&ar->scan.started);
+ complete(&ar->scan.completed);
+ complete(&ar->scan.on_channel);
+ complete(&ar->peer_assoc_done);
+ complete(&ar->peer_delete_done);
+ complete(&ar->install_key_done);
+ complete(&ar->vdev_setup_done);
+ complete(&ar->vdev_delete_done);
+ complete(&ar->bss_survey_done);
+
+ wake_up(&ar->dp.tx_empty_waitq);
+ idr_for_each(&ar->txmgmt_idr,
+ ath12k_mac_tx_mgmt_pending_free, ar);
+ idr_destroy(&ar->txmgmt_idr);
+ wake_up(&ar->txmgmt_empty_waitq);
+ }
}
wake_up(&ab->wmi_ab.tx_credits_wq);
@@ -1043,48 +1069,57 @@ static void ath12k_core_pre_reconfigure_recovery(struct ath12k_base *ab)
static void ath12k_core_post_reconfigure_recovery(struct ath12k_base *ab)
{
+ struct ath12k_hw *ah;
struct ath12k *ar;
- struct ath12k_pdev *pdev;
- int i;
+ int i, j;
- for (i = 0; i < ab->num_radios; i++) {
- pdev = &ab->pdevs[i];
- ar = pdev->ar;
- if (!ar || ar->state == ATH12K_STATE_OFF)
+ for (i = 0; i < ab->num_hw; i++) {
+ ah = ab->ah[i];
+ if (!ah || ah->state == ATH12K_HW_STATE_OFF)
continue;
- mutex_lock(&ar->conf_mutex);
+ mutex_lock(&ah->hw_mutex);
+
+ switch (ah->state) {
+ case ATH12K_HW_STATE_ON:
+ ah->state = ATH12K_HW_STATE_RESTARTING;
+
+ for (j = 0; j < ah->num_radio; j++) {
+ ar = &ah->radio[j];
+
+ mutex_lock(&ar->conf_mutex);
+ ath12k_core_halt(ar);
+ mutex_unlock(&ar->conf_mutex);
+ }
- switch (ar->state) {
- case ATH12K_STATE_ON:
- ar->state = ATH12K_STATE_RESTARTING;
- ath12k_core_halt(ar);
- ieee80211_restart_hw(ath12k_ar_to_hw(ar));
break;
- case ATH12K_STATE_OFF:
+ case ATH12K_HW_STATE_OFF:
ath12k_warn(ab,
- "cannot restart radio %d that hasn't been started\n",
+ "cannot restart hw %d that hasn't been started\n",
i);
break;
- case ATH12K_STATE_RESTARTING:
+ case ATH12K_HW_STATE_RESTARTING:
break;
- case ATH12K_STATE_RESTARTED:
- ar->state = ATH12K_STATE_WEDGED;
+ case ATH12K_HW_STATE_RESTARTED:
+ ah->state = ATH12K_HW_STATE_WEDGED;
fallthrough;
- case ATH12K_STATE_WEDGED:
+ case ATH12K_HW_STATE_WEDGED:
ath12k_warn(ab,
- "device is wedged, will not restart radio %d\n", i);
+ "device is wedged, will not restart hw %d\n", i);
break;
}
- mutex_unlock(&ar->conf_mutex);
+
+ mutex_unlock(&ah->hw_mutex);
}
+
complete(&ab->driver_recovery);
}
static void ath12k_core_restart(struct work_struct *work)
{
struct ath12k_base *ab = container_of(work, struct ath12k_base, restart_work);
- int ret;
+ struct ath12k_hw *ah;
+ int ret, i;
ret = ath12k_core_reconfigure_on_crash(ab);
if (ret) {
@@ -1092,8 +1127,12 @@ static void ath12k_core_restart(struct work_struct *work)
return;
}
- if (ab->is_reset)
- complete_all(&ab->reconfigure_complete);
+ if (ab->is_reset) {
+ for (i = 0; i < ab->num_hw; i++) {
+ ah = ab->ah[i];
+ ieee80211_restart_hw(ah->hw);
+ }
+ }
complete(&ab->restart_completed);
}
@@ -1147,20 +1186,14 @@ static void ath12k_core_reset(struct work_struct *work)
ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset starting\n");
ab->is_reset = true;
- atomic_set(&ab->recovery_start_count, 0);
- reinit_completion(&ab->recovery_start);
atomic_set(&ab->recovery_count, 0);
ath12k_core_pre_reconfigure_recovery(ab);
- reinit_completion(&ab->reconfigure_complete);
ath12k_core_post_reconfigure_recovery(ab);
ath12k_dbg(ab, ATH12K_DBG_BOOT, "waiting recovery start...\n");
- time_left = wait_for_completion_timeout(&ab->recovery_start,
- ATH12K_RECOVER_START_TIMEOUT_HZ);
-
ath12k_hif_irq_disable(ab);
ath12k_hif_ce_irq_disable(ab);
@@ -1185,6 +1218,29 @@ int ath12k_core_pre_init(struct ath12k_base *ab)
return 0;
}
+static int ath12k_core_panic_handler(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct ath12k_base *ab = container_of(nb, struct ath12k_base,
+ panic_nb);
+
+ return ath12k_hif_panic_handler(ab);
+}
+
+static int ath12k_core_panic_notifier_register(struct ath12k_base *ab)
+{
+ ab->panic_nb.notifier_call = ath12k_core_panic_handler;
+
+ return atomic_notifier_chain_register(&panic_notifier_list,
+ &ab->panic_nb);
+}
+
+static void ath12k_core_panic_notifier_unregister(struct ath12k_base *ab)
+{
+ atomic_notifier_chain_unregister(&panic_notifier_list,
+ &ab->panic_nb);
+}
+
int ath12k_core_init(struct ath12k_base *ab)
{
int ret;
@@ -1195,11 +1251,17 @@ int ath12k_core_init(struct ath12k_base *ab)
return ret;
}
+ ret = ath12k_core_panic_notifier_register(ab);
+ if (ret)
+ ath12k_warn(ab, "failed to register panic handler: %d\n", ret);
+
return 0;
}
void ath12k_core_deinit(struct ath12k_base *ab)
{
+ ath12k_core_panic_notifier_unregister(ab);
+
mutex_lock(&ab->core_lock);
ath12k_core_pdev_destroy(ab);
@@ -1243,8 +1305,6 @@ struct ath12k_base *ath12k_core_alloc(struct device *dev, size_t priv_size,
mutex_init(&ab->core_lock);
spin_lock_init(&ab->base_lock);
init_completion(&ab->reset_complete);
- init_completion(&ab->reconfigure_complete);
- init_completion(&ab->recovery_start);
INIT_LIST_HEAD(&ab->peers);
init_waitqueue_head(&ab->peer_mapping_wq);
@@ -1256,12 +1316,23 @@ struct ath12k_base *ath12k_core_alloc(struct device *dev, size_t priv_size,
timer_setup(&ab->rx_replenish_retry, ath12k_ce_rx_replenish_retry, 0);
init_completion(&ab->htc_suspend);
init_completion(&ab->restart_completed);
+ init_completion(&ab->wow.wakeup_completed);
ab->dev = dev;
ab->hif.bus = bus;
ab->qmi.num_radios = U8_MAX;
ab->mlo_capable_flags = ATH12K_INTRA_DEVICE_MLO_SUPPORT;
+ /* Device index used to identify the devices in a group.
+ *
+ * In Intra-device MLO, only one device present in a group,
+ * so it is always zero.
+ *
+ * In Inter-device MLO, Multiple device present in a group,
+ * expect non-zero value.
+ */
+ ab->device_id = 0;
+
return ab;
err_free_wq:
diff --git a/drivers/net/wireless/ath/ath12k/core.h b/drivers/net/wireless/ath/ath12k/core.h
index 47dde4401210..cdfd43a7321a 100644
--- a/drivers/net/wireless/ath/ath12k/core.h
+++ b/drivers/net/wireless/ath/ath12k/core.h
@@ -14,6 +14,7 @@
#include <linux/dmi.h>
#include <linux/ctype.h>
#include <linux/firmware.h>
+#include <linux/panic_notifier.h>
#include "qmi.h"
#include "htc.h"
#include "wmi.h"
@@ -27,6 +28,8 @@
#include "dbring.h"
#include "fw.h"
#include "acpi.h"
+#include "wow.h"
+#include "debugfs_htt_stats.h"
#define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK)
@@ -146,7 +149,7 @@ struct ath12k_ext_irq_grp {
u32 grp_id;
u64 timestamp;
struct napi_struct napi;
- struct net_device napi_ndev;
+ struct net_device *napi_ndev;
};
struct ath12k_smbios_bdf {
@@ -180,8 +183,6 @@ struct ath12k_he {
u32 heop_param;
};
-#define MAX_RADIOS 3
-
enum {
WMI_HOST_TP_SCALE_MAX = 0,
WMI_HOST_TP_SCALE_50 = 1,
@@ -212,10 +213,6 @@ enum ath12k_dev_flags {
ATH12K_FLAG_EXT_IRQ_ENABLED,
};
-enum ath12k_monitor_flags {
- ATH12K_FLAG_MONITOR_ENABLED,
-};
-
struct ath12k_tx_conf {
bool changed;
u16 ac;
@@ -234,6 +231,13 @@ struct ath12k_vif_cache {
u32 bss_conf_changed;
};
+struct ath12k_rekey_data {
+ u8 kck[NL80211_KCK_LEN];
+ u8 kek[NL80211_KCK_LEN];
+ u64 replay_ctr;
+ bool enable_offload;
+};
+
struct ath12k_vif {
u32 vdev_id;
enum wmi_vdev_type vdev_type;
@@ -290,6 +294,7 @@ struct ath12k_vif {
u32 punct_bitmap;
bool ps;
struct ath12k_vif_cache *cache;
+ struct ath12k_rekey_data rekey_data;
};
struct ath12k_vif_iter {
@@ -454,15 +459,15 @@ struct ath12k_sta {
#define ATH12K_MIN_5G_FREQ 4150
#define ATH12K_MIN_6G_FREQ 5925
#define ATH12K_MAX_6G_FREQ 7115
-#define ATH12K_NUM_CHANS 100
+#define ATH12K_NUM_CHANS 101
#define ATH12K_MAX_5G_CHAN 173
-enum ath12k_state {
- ATH12K_STATE_OFF,
- ATH12K_STATE_ON,
- ATH12K_STATE_RESTARTING,
- ATH12K_STATE_RESTARTED,
- ATH12K_STATE_WEDGED,
+enum ath12k_hw_state {
+ ATH12K_HW_STATE_OFF,
+ ATH12K_HW_STATE_ON,
+ ATH12K_HW_STATE_RESTARTING,
+ ATH12K_HW_STATE_RESTARTED,
+ ATH12K_HW_STATE_WEDGED,
/* Add other states as required */
};
@@ -477,8 +482,17 @@ struct ath12k_fw_stats {
struct list_head bcn;
};
+struct ath12k_dbg_htt_stats {
+ enum ath12k_dbg_htt_ext_stats_type type;
+ u32 cfg_param[4];
+ u8 reset;
+ struct debug_htt_stats_req *stats_req;
+};
+
struct ath12k_debug {
struct dentry *debugfs_pdev;
+ struct dentry *debugfs_pdev_symlink;
+ struct ath12k_dbg_htt_stats htt_stats;
};
struct ath12k_per_peer_tx_stats {
@@ -511,7 +525,6 @@ struct ath12k {
u32 ht_cap_info;
u32 vht_cap_info;
struct ath12k_he ar_he;
- enum ath12k_state state;
bool supports_6ghz;
struct {
struct completion started;
@@ -533,7 +546,6 @@ struct ath12k {
unsigned long dev_flags;
unsigned int filter_flags;
- unsigned long monitor_flags;
u32 min_tx_power;
u32 max_tx_power;
u32 txpower_limit_2g;
@@ -613,6 +625,9 @@ struct ath12k {
struct work_struct wmi_mgmt_tx_work;
struct sk_buff_head wmi_mgmt_tx_queue;
+ struct ath12k_wow wow;
+ struct completion target_suspend;
+ bool target_suspend_ack;
struct ath12k_per_peer_tx_stats peer_tx_stats;
struct list_head ppdu_stats_info;
u32 ppdu_stat_list_depth;
@@ -632,14 +647,22 @@ struct ath12k {
u32 freq_low;
u32 freq_high;
+
+ bool nlo_enabled;
};
struct ath12k_hw {
struct ieee80211_hw *hw;
+ /* Protect the write operation of the hardware state ath12k_hw::state
+ * between hardware start<=>reconfigure<=>stop transitions.
+ */
+ struct mutex hw_mutex;
+ enum ath12k_hw_state state;
bool regd_updated;
bool use_6ghz_regd;
-
u8 num_radio;
+
+ /* Keep last */
struct ath12k radio[] __aligned(sizeof(void *));
};
@@ -689,6 +712,7 @@ struct mlo_timestamp {
struct ath12k_pdev {
struct ath12k *ar;
u32 pdev_id;
+ u32 hw_link_id;
struct ath12k_pdev_cap cap;
u8 mac_addr[ETH_ALEN];
struct mlo_timestamp timestamp;
@@ -747,6 +771,7 @@ struct ath12k_base {
struct ath12k_qmi qmi;
struct ath12k_wmi_base wmi_ab;
struct completion fw_ready;
+ u8 device_id;
int num_radios;
/* HW channel counters frequency value in hertz common to all MACs */
u32 cc_freq_hz;
@@ -763,6 +788,11 @@ struct ath12k_base {
const struct ath12k_hif_ops *ops;
} hif;
+ struct {
+ struct completion wakeup_completed;
+ u32 wmi_conf_rx_decap_mode;
+ } wow;
+
struct ath12k_ce ce;
struct timer_list rx_replenish_retry;
struct ath12k_hal hal;
@@ -845,11 +875,8 @@ struct ath12k_base {
struct work_struct reset_work;
atomic_t reset_count;
atomic_t recovery_count;
- atomic_t recovery_start_count;
bool is_reset;
struct completion reset_complete;
- struct completion reconfigure_complete;
- struct completion recovery_start;
/* continuous recovery fail count */
atomic_t fail_cont_count;
unsigned long reset_fail_timeout;
@@ -923,6 +950,8 @@ struct ath12k_base {
#endif /* CONFIG_ACPI */
+ struct notifier_block panic_nb;
+
/* must be last */
u8 drv_priv[] __aligned(sizeof(void *));
};
@@ -1037,6 +1066,11 @@ static inline struct ath12k *ath12k_ah_to_ar(struct ath12k_hw *ah, u8 hw_link_id
return &ah->radio[hw_link_id];
}
+static inline struct ath12k_hw *ath12k_ar_to_ah(struct ath12k *ar)
+{
+ return ar->ah;
+}
+
static inline struct ieee80211_hw *ath12k_ar_to_hw(struct ath12k *ar)
{
return ar->ah->hw;
diff --git a/drivers/net/wireless/ath/ath12k/debug.h b/drivers/net/wireless/ath/ath12k/debug.h
index aa685295f8a4..f7005917362c 100644
--- a/drivers/net/wireless/ath/ath12k/debug.h
+++ b/drivers/net/wireless/ath/ath12k/debug.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _ATH12K_DEBUG_H_
@@ -25,6 +25,7 @@ enum ath12k_debug_mask {
ATH12K_DBG_PCI = 0x00001000,
ATH12K_DBG_DP_TX = 0x00002000,
ATH12K_DBG_DP_RX = 0x00004000,
+ ATH12K_DBG_WOW = 0x00008000,
ATH12K_DBG_ANY = 0xffffffff,
};
diff --git a/drivers/net/wireless/ath/ath12k/debugfs.c b/drivers/net/wireless/ath/ath12k/debugfs.c
index 8d8ba951093b..2a977c36af00 100644
--- a/drivers/net/wireless/ath/ath12k/debugfs.c
+++ b/drivers/net/wireless/ath/ath12k/debugfs.c
@@ -6,6 +6,7 @@
#include "core.h"
#include "debugfs.h"
+#include "debugfs_htt_stats.h"
static ssize_t ath12k_write_simulate_radar(struct file *file,
const char __user *user_buf,
@@ -80,11 +81,27 @@ void ath12k_debugfs_register(struct ath12k *ar)
/* Create a symlink under ieee80211/phy* */
scnprintf(buf, sizeof(buf), "../../ath12k/%pd2", ar->debug.debugfs_pdev);
- debugfs_create_symlink("ath12k", hw->wiphy->debugfsdir, buf);
+ ar->debug.debugfs_pdev_symlink = debugfs_create_symlink("ath12k",
+ hw->wiphy->debugfsdir,
+ buf);
if (ar->mac.sbands[NL80211_BAND_5GHZ].channels) {
debugfs_create_file("dfs_simulate_radar", 0200,
ar->debug.debugfs_pdev, ar,
&fops_simulate_radar);
}
+
+ ath12k_debugfs_htt_stats_register(ar);
+}
+
+void ath12k_debugfs_unregister(struct ath12k *ar)
+{
+ if (!ar->debug.debugfs_pdev)
+ return;
+
+ /* Remove symlink under ieee80211/phy* */
+ debugfs_remove(ar->debug.debugfs_pdev_symlink);
+ debugfs_remove_recursive(ar->debug.debugfs_pdev);
+ ar->debug.debugfs_pdev_symlink = NULL;
+ ar->debug.debugfs_pdev = NULL;
}
diff --git a/drivers/net/wireless/ath/ath12k/debugfs.h b/drivers/net/wireless/ath/ath12k/debugfs.h
index a62f2a550b23..8d64ba03aa9a 100644
--- a/drivers/net/wireless/ath/ath12k/debugfs.h
+++ b/drivers/net/wireless/ath/ath12k/debugfs.h
@@ -11,7 +11,7 @@
void ath12k_debugfs_soc_create(struct ath12k_base *ab);
void ath12k_debugfs_soc_destroy(struct ath12k_base *ab);
void ath12k_debugfs_register(struct ath12k *ar);
-
+void ath12k_debugfs_unregister(struct ath12k *ar);
#else
static inline void ath12k_debugfs_soc_create(struct ath12k_base *ab)
{
@@ -25,6 +25,10 @@ static inline void ath12k_debugfs_register(struct ath12k *ar)
{
}
+static inline void ath12k_debugfs_unregister(struct ath12k *ar)
+{
+}
+
#endif /* CONFIG_ATH12K_DEBUGFS */
#endif /* _ATH12K_DEBUGFS_H_ */
diff --git a/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c b/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c
new file mode 100644
index 000000000000..ce80e7b5175b
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c
@@ -0,0 +1,1540 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/vmalloc.h>
+#include "core.h"
+#include "debug.h"
+#include "debugfs_htt_stats.h"
+#include "dp_tx.h"
+#include "dp_rx.h"
+
+static u32
+print_array_to_buf(u8 *buf, u32 offset, const char *header,
+ const __le32 *array, u32 array_len, const char *footer)
+{
+ int index = 0;
+ u8 i;
+
+ if (header) {
+ index += scnprintf(buf + offset,
+ ATH12K_HTT_STATS_BUF_SIZE - offset,
+ "%s = ", header);
+ }
+ for (i = 0; i < array_len; i++) {
+ index += scnprintf(buf + offset + index,
+ (ATH12K_HTT_STATS_BUF_SIZE - offset) - index,
+ " %u:%u,", i, le32_to_cpu(array[i]));
+ }
+ /* To overwrite the last trailing comma */
+ index--;
+ *(buf + offset + index) = '\0';
+
+ if (footer) {
+ index += scnprintf(buf + offset + index,
+ (ATH12K_HTT_STATS_BUF_SIZE - offset) - index,
+ "%s", footer);
+ }
+ return index;
+}
+
+static void
+htt_print_tx_pdev_stats_cmn_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_pdev_stats_cmn_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 mac_id_word;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ mac_id_word = le32_to_cpu(htt_stats_buf->mac_id__word);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_PDEV_STATS_CMN_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
+ u32_get_bits(mac_id_word, ATH12K_HTT_STATS_MAC_ID));
+ len += scnprintf(buf + len, buf_len - len, "comp_delivered = %u\n",
+ le32_to_cpu(htt_stats_buf->comp_delivered));
+ len += scnprintf(buf + len, buf_len - len, "self_triggers = %u\n",
+ le32_to_cpu(htt_stats_buf->self_triggers));
+ len += scnprintf(buf + len, buf_len - len, "hw_queued = %u\n",
+ le32_to_cpu(htt_stats_buf->hw_queued));
+ len += scnprintf(buf + len, buf_len - len, "hw_reaped = %u\n",
+ le32_to_cpu(htt_stats_buf->hw_reaped));
+ len += scnprintf(buf + len, buf_len - len, "underrun = %u\n",
+ le32_to_cpu(htt_stats_buf->underrun));
+ len += scnprintf(buf + len, buf_len - len, "hw_paused = %u\n",
+ le32_to_cpu(htt_stats_buf->hw_paused));
+ len += scnprintf(buf + len, buf_len - len, "hw_flush = %u\n",
+ le32_to_cpu(htt_stats_buf->hw_flush));
+ len += scnprintf(buf + len, buf_len - len, "hw_filt = %u\n",
+ le32_to_cpu(htt_stats_buf->hw_filt));
+ len += scnprintf(buf + len, buf_len - len, "tx_abort = %u\n",
+ le32_to_cpu(htt_stats_buf->tx_abort));
+ len += scnprintf(buf + len, buf_len - len, "ppdu_ok = %u\n",
+ le32_to_cpu(htt_stats_buf->ppdu_ok));
+ len += scnprintf(buf + len, buf_len - len, "mpdu_requeued = %u\n",
+ le32_to_cpu(htt_stats_buf->mpdu_requed));
+ len += scnprintf(buf + len, buf_len - len, "tx_xretry = %u\n",
+ le32_to_cpu(htt_stats_buf->tx_xretry));
+ len += scnprintf(buf + len, buf_len - len, "data_rc = %u\n",
+ le32_to_cpu(htt_stats_buf->data_rc));
+ len += scnprintf(buf + len, buf_len - len, "mpdu_dropped_xretry = %u\n",
+ le32_to_cpu(htt_stats_buf->mpdu_dropped_xretry));
+ len += scnprintf(buf + len, buf_len - len, "illegal_rate_phy_err = %u\n",
+ le32_to_cpu(htt_stats_buf->illgl_rate_phy_err));
+ len += scnprintf(buf + len, buf_len - len, "cont_xretry = %u\n",
+ le32_to_cpu(htt_stats_buf->cont_xretry));
+ len += scnprintf(buf + len, buf_len - len, "tx_timeout = %u\n",
+ le32_to_cpu(htt_stats_buf->tx_timeout));
+ len += scnprintf(buf + len, buf_len - len, "tx_time_dur_data = %u\n",
+ le32_to_cpu(htt_stats_buf->tx_time_dur_data));
+ len += scnprintf(buf + len, buf_len - len, "pdev_resets = %u\n",
+ le32_to_cpu(htt_stats_buf->pdev_resets));
+ len += scnprintf(buf + len, buf_len - len, "phy_underrun = %u\n",
+ le32_to_cpu(htt_stats_buf->phy_underrun));
+ len += scnprintf(buf + len, buf_len - len, "txop_ovf = %u\n",
+ le32_to_cpu(htt_stats_buf->txop_ovf));
+ len += scnprintf(buf + len, buf_len - len, "seq_posted = %u\n",
+ le32_to_cpu(htt_stats_buf->seq_posted));
+ len += scnprintf(buf + len, buf_len - len, "seq_failed_queueing = %u\n",
+ le32_to_cpu(htt_stats_buf->seq_failed_queueing));
+ len += scnprintf(buf + len, buf_len - len, "seq_completed = %u\n",
+ le32_to_cpu(htt_stats_buf->seq_completed));
+ len += scnprintf(buf + len, buf_len - len, "seq_restarted = %u\n",
+ le32_to_cpu(htt_stats_buf->seq_restarted));
+ len += scnprintf(buf + len, buf_len - len, "seq_txop_repost_stop = %u\n",
+ le32_to_cpu(htt_stats_buf->seq_txop_repost_stop));
+ len += scnprintf(buf + len, buf_len - len, "next_seq_cancel = %u\n",
+ le32_to_cpu(htt_stats_buf->next_seq_cancel));
+ len += scnprintf(buf + len, buf_len - len, "dl_mu_mimo_seq_posted = %u\n",
+ le32_to_cpu(htt_stats_buf->mu_seq_posted));
+ len += scnprintf(buf + len, buf_len - len, "dl_mu_ofdma_seq_posted = %u\n",
+ le32_to_cpu(htt_stats_buf->mu_ofdma_seq_posted));
+ len += scnprintf(buf + len, buf_len - len, "ul_mu_mimo_seq_posted = %u\n",
+ le32_to_cpu(htt_stats_buf->ul_mumimo_seq_posted));
+ len += scnprintf(buf + len, buf_len - len, "ul_mu_ofdma_seq_posted = %u\n",
+ le32_to_cpu(htt_stats_buf->ul_ofdma_seq_posted));
+ len += scnprintf(buf + len, buf_len - len, "mu_mimo_peer_blacklisted = %u\n",
+ le32_to_cpu(htt_stats_buf->num_mu_peer_blacklisted));
+ len += scnprintf(buf + len, buf_len - len, "seq_qdepth_repost_stop = %u\n",
+ le32_to_cpu(htt_stats_buf->seq_qdepth_repost_stop));
+ len += scnprintf(buf + len, buf_len - len, "seq_min_msdu_repost_stop = %u\n",
+ le32_to_cpu(htt_stats_buf->seq_min_msdu_repost_stop));
+ len += scnprintf(buf + len, buf_len - len, "mu_seq_min_msdu_repost_stop = %u\n",
+ le32_to_cpu(htt_stats_buf->mu_seq_min_msdu_repost_stop));
+ len += scnprintf(buf + len, buf_len - len, "seq_switch_hw_paused = %u\n",
+ le32_to_cpu(htt_stats_buf->seq_switch_hw_paused));
+ len += scnprintf(buf + len, buf_len - len, "next_seq_posted_dsr = %u\n",
+ le32_to_cpu(htt_stats_buf->next_seq_posted_dsr));
+ len += scnprintf(buf + len, buf_len - len, "seq_posted_isr = %u\n",
+ le32_to_cpu(htt_stats_buf->seq_posted_isr));
+ len += scnprintf(buf + len, buf_len - len, "seq_ctrl_cached = %u\n",
+ le32_to_cpu(htt_stats_buf->seq_ctrl_cached));
+ len += scnprintf(buf + len, buf_len - len, "mpdu_count_tqm = %u\n",
+ le32_to_cpu(htt_stats_buf->mpdu_count_tqm));
+ len += scnprintf(buf + len, buf_len - len, "msdu_count_tqm = %u\n",
+ le32_to_cpu(htt_stats_buf->msdu_count_tqm));
+ len += scnprintf(buf + len, buf_len - len, "mpdu_removed_tqm = %u\n",
+ le32_to_cpu(htt_stats_buf->mpdu_removed_tqm));
+ len += scnprintf(buf + len, buf_len - len, "msdu_removed_tqm = %u\n",
+ le32_to_cpu(htt_stats_buf->msdu_removed_tqm));
+ len += scnprintf(buf + len, buf_len - len, "remove_mpdus_max_retries = %u\n",
+ le32_to_cpu(htt_stats_buf->remove_mpdus_max_retries));
+ len += scnprintf(buf + len, buf_len - len, "mpdus_sw_flush = %u\n",
+ le32_to_cpu(htt_stats_buf->mpdus_sw_flush));
+ len += scnprintf(buf + len, buf_len - len, "mpdus_hw_filter = %u\n",
+ le32_to_cpu(htt_stats_buf->mpdus_hw_filter));
+ len += scnprintf(buf + len, buf_len - len, "mpdus_truncated = %u\n",
+ le32_to_cpu(htt_stats_buf->mpdus_truncated));
+ len += scnprintf(buf + len, buf_len - len, "mpdus_ack_failed = %u\n",
+ le32_to_cpu(htt_stats_buf->mpdus_ack_failed));
+ len += scnprintf(buf + len, buf_len - len, "mpdus_expired = %u\n",
+ le32_to_cpu(htt_stats_buf->mpdus_expired));
+ len += scnprintf(buf + len, buf_len - len, "mpdus_seq_hw_retry = %u\n",
+ le32_to_cpu(htt_stats_buf->mpdus_seq_hw_retry));
+ len += scnprintf(buf + len, buf_len - len, "ack_tlv_proc = %u\n",
+ le32_to_cpu(htt_stats_buf->ack_tlv_proc));
+ len += scnprintf(buf + len, buf_len - len, "coex_abort_mpdu_cnt_valid = %u\n",
+ le32_to_cpu(htt_stats_buf->coex_abort_mpdu_cnt_valid));
+ len += scnprintf(buf + len, buf_len - len, "coex_abort_mpdu_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->coex_abort_mpdu_cnt));
+ len += scnprintf(buf + len, buf_len - len, "num_total_ppdus_tried_ota = %u\n",
+ le32_to_cpu(htt_stats_buf->num_total_ppdus_tried_ota));
+ len += scnprintf(buf + len, buf_len - len, "num_data_ppdus_tried_ota = %u\n",
+ le32_to_cpu(htt_stats_buf->num_data_ppdus_tried_ota));
+ len += scnprintf(buf + len, buf_len - len, "local_ctrl_mgmt_enqued = %u\n",
+ le32_to_cpu(htt_stats_buf->local_ctrl_mgmt_enqued));
+ len += scnprintf(buf + len, buf_len - len, "local_ctrl_mgmt_freed = %u\n",
+ le32_to_cpu(htt_stats_buf->local_ctrl_mgmt_freed));
+ len += scnprintf(buf + len, buf_len - len, "local_data_enqued = %u\n",
+ le32_to_cpu(htt_stats_buf->local_data_enqued));
+ len += scnprintf(buf + len, buf_len - len, "local_data_freed = %u\n",
+ le32_to_cpu(htt_stats_buf->local_data_freed));
+ len += scnprintf(buf + len, buf_len - len, "mpdu_tried = %u\n",
+ le32_to_cpu(htt_stats_buf->mpdu_tried));
+ len += scnprintf(buf + len, buf_len - len, "isr_wait_seq_posted = %u\n",
+ le32_to_cpu(htt_stats_buf->isr_wait_seq_posted));
+ len += scnprintf(buf + len, buf_len - len, "tx_active_dur_us_low = %u\n",
+ le32_to_cpu(htt_stats_buf->tx_active_dur_us_low));
+ len += scnprintf(buf + len, buf_len - len, "tx_active_dur_us_high = %u\n",
+ le32_to_cpu(htt_stats_buf->tx_active_dur_us_high));
+ len += scnprintf(buf + len, buf_len - len, "fes_offsets_err_cnt = %u\n\n",
+ le32_to_cpu(htt_stats_buf->fes_offsets_err_cnt));
+
+ stats_req->buf_len = len;
+}
+
+static void
+htt_print_tx_pdev_stats_urrn_tlv(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_pdev_stats_urrn_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u16 num_elems = min_t(u16, (tag_len >> 2),
+ HTT_TX_PDEV_MAX_URRN_STATS);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_PDEV_STATS_URRN_TLV:\n");
+
+ len += print_array_to_buf(buf, len, "urrn_stats", htt_stats_buf->urrn_stats,
+ num_elems, "\n\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+htt_print_tx_pdev_stats_flush_tlv(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_pdev_stats_flush_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u16 num_elems = min_t(u16, (tag_len >> 2),
+ ATH12K_HTT_TX_PDEV_MAX_FLUSH_REASON_STATS);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_PDEV_STATS_FLUSH_TLV:\n");
+
+ len += print_array_to_buf(buf, len, "flush_errs", htt_stats_buf->flush_errs,
+ num_elems, "\n\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+htt_print_tx_pdev_stats_sifs_tlv(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_pdev_stats_sifs_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u16 num_elems = min_t(u16, (tag_len >> 2),
+ ATH12K_HTT_TX_PDEV_MAX_SIFS_BURST_STATS);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_PDEV_STATS_SIFS_TLV:\n");
+
+ len += print_array_to_buf(buf, len, "sifs_status", htt_stats_buf->sifs_status,
+ num_elems, "\n\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+htt_print_tx_pdev_mu_ppdu_dist_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_pdev_mu_ppdu_dist_stats_tlv *htt_stats_buf = tag_buf;
+ char *mode;
+ u8 j, hw_mode, i, str_buf_len;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 stats_value;
+ u8 max_ppdu = ATH12K_HTT_STATS_MAX_NUM_MU_PPDU_PER_BURST;
+ u8 max_sched = ATH12K_HTT_STATS_MAX_NUM_SCHED_STATUS;
+ char str_buf[ATH12K_HTT_MAX_STRING_LEN];
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ hw_mode = le32_to_cpu(htt_stats_buf->hw_mode);
+
+ switch (hw_mode) {
+ case ATH12K_HTT_STATS_HWMODE_AC:
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_PDEV_AC_MU_PPDU_DISTRIBUTION_STATS:\n");
+ mode = "ac";
+ break;
+ case ATH12K_HTT_STATS_HWMODE_AX:
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_PDEV_AX_MU_PPDU_DISTRIBUTION_STATS:\n");
+ mode = "ax";
+ break;
+ case ATH12K_HTT_STATS_HWMODE_BE:
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_PDEV_BE_MU_PPDU_DISTRIBUTION_STATS:\n");
+ mode = "be";
+ break;
+ default:
+ return;
+ }
+
+ for (i = 0; i < ATH12K_HTT_STATS_NUM_NR_BINS ; i++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "%s_mu_mimo_num_seq_posted_nr%u = %u\n", mode,
+ ((i + 1) * 4), htt_stats_buf->num_seq_posted[i]);
+ str_buf_len = 0;
+ memset(str_buf, 0x0, sizeof(str_buf));
+ for (j = 0; j < ATH12K_HTT_STATS_MAX_NUM_MU_PPDU_PER_BURST ; j++) {
+ stats_value = le32_to_cpu(htt_stats_buf->num_ppdu_posted_per_burst
+ [i * max_ppdu + j]);
+ str_buf_len += scnprintf(&str_buf[str_buf_len],
+ ATH12K_HTT_MAX_STRING_LEN - str_buf_len,
+ " %u:%u,", j, stats_value);
+ }
+ /* To overwrite the last trailing comma */
+ str_buf[str_buf_len - 1] = '\0';
+ len += scnprintf(buf + len, buf_len - len,
+ "%s_mu_mimo_num_ppdu_posted_per_burst_nr%u = %s\n",
+ mode, ((i + 1) * 4), str_buf);
+ str_buf_len = 0;
+ memset(str_buf, 0x0, sizeof(str_buf));
+ for (j = 0; j < ATH12K_HTT_STATS_MAX_NUM_MU_PPDU_PER_BURST ; j++) {
+ stats_value = le32_to_cpu(htt_stats_buf->num_ppdu_cmpl_per_burst
+ [i * max_ppdu + j]);
+ str_buf_len += scnprintf(&str_buf[str_buf_len],
+ ATH12K_HTT_MAX_STRING_LEN - str_buf_len,
+ " %u:%u,", j, stats_value);
+ }
+ /* To overwrite the last trailing comma */
+ str_buf[str_buf_len - 1] = '\0';
+ len += scnprintf(buf + len, buf_len - len,
+ "%s_mu_mimo_num_ppdu_completed_per_burst_nr%u = %s\n",
+ mode, ((i + 1) * 4), str_buf);
+ str_buf_len = 0;
+ memset(str_buf, 0x0, sizeof(str_buf));
+ for (j = 0; j < ATH12K_HTT_STATS_MAX_NUM_SCHED_STATUS ; j++) {
+ stats_value = le32_to_cpu(htt_stats_buf->num_seq_term_status
+ [i * max_sched + j]);
+ str_buf_len += scnprintf(&str_buf[str_buf_len],
+ ATH12K_HTT_MAX_STRING_LEN - str_buf_len,
+ " %u:%u,", j, stats_value);
+ }
+ /* To overwrite the last trailing comma */
+ str_buf[str_buf_len - 1] = '\0';
+ len += scnprintf(buf + len, buf_len - len,
+ "%s_mu_mimo_num_seq_term_status_nr%u = %s\n\n",
+ mode, ((i + 1) * 4), str_buf);
+ }
+
+ stats_req->buf_len = len;
+}
+
+static void
+htt_print_tx_pdev_stats_sifs_hist_tlv(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_pdev_stats_sifs_hist_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u16 num_elems = min_t(u16, (tag_len >> 2),
+ ATH12K_HTT_TX_PDEV_MAX_SIFS_BURST_HIST_STATS);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_PDEV_STATS_SIFS_HIST_TLV:\n");
+
+ len += print_array_to_buf(buf, len, "sifs_hist_status",
+ htt_stats_buf->sifs_hist_status, num_elems, "\n\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+htt_print_pdev_ctrl_path_tx_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_pdev_ctrl_path_tx_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+ if (len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_PDEV_STATS_CTRL_PATH_TX_STATS:\n");
+ len += print_array_to_buf(buf, len, "fw_tx_mgmt_subtype",
+ htt_stats_buf->fw_tx_mgmt_subtype,
+ ATH12K_HTT_STATS_SUBTYPE_MAX, "\n\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_stats_tx_sched_cmn_tlv(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_stats_tx_sched_cmn_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 mac_id_word;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ mac_id_word = __le32_to_cpu(htt_stats_buf->mac_id__word);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_STATS_TX_SCHED_CMN_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
+ u32_get_bits(mac_id_word, ATH12K_HTT_STATS_MAC_ID));
+ len += scnprintf(buf + len, buf_len - len, "current_timestamp = %u\n\n",
+ le32_to_cpu(htt_stats_buf->current_timestamp));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_tx_pdev_stats_sched_per_txq_tlv(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_pdev_stats_sched_per_txq_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 mac_id_word;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ mac_id_word = __le32_to_cpu(htt_stats_buf->mac_id__word);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_PDEV_STATS_SCHED_PER_TXQ_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
+ u32_get_bits(mac_id_word,
+ ATH12K_HTT_TX_PDEV_STATS_SCHED_PER_TXQ_MAC_ID));
+ len += scnprintf(buf + len, buf_len - len, "txq_id = %u\n",
+ u32_get_bits(mac_id_word,
+ ATH12K_HTT_TX_PDEV_STATS_SCHED_PER_TXQ_ID));
+ len += scnprintf(buf + len, buf_len - len, "sched_policy = %u\n",
+ le32_to_cpu(htt_stats_buf->sched_policy));
+ len += scnprintf(buf + len, buf_len - len,
+ "last_sched_cmd_posted_timestamp = %u\n",
+ le32_to_cpu(htt_stats_buf->last_sched_cmd_posted_timestamp));
+ len += scnprintf(buf + len, buf_len - len,
+ "last_sched_cmd_compl_timestamp = %u\n",
+ le32_to_cpu(htt_stats_buf->last_sched_cmd_compl_timestamp));
+ len += scnprintf(buf + len, buf_len - len, "sched_2_tac_lwm_count = %u\n",
+ le32_to_cpu(htt_stats_buf->sched_2_tac_lwm_count));
+ len += scnprintf(buf + len, buf_len - len, "sched_2_tac_ring_full = %u\n",
+ le32_to_cpu(htt_stats_buf->sched_2_tac_ring_full));
+ len += scnprintf(buf + len, buf_len - len, "sched_cmd_post_failure = %u\n",
+ le32_to_cpu(htt_stats_buf->sched_cmd_post_failure));
+ len += scnprintf(buf + len, buf_len - len, "num_active_tids = %u\n",
+ le32_to_cpu(htt_stats_buf->num_active_tids));
+ len += scnprintf(buf + len, buf_len - len, "num_ps_schedules = %u\n",
+ le32_to_cpu(htt_stats_buf->num_ps_schedules));
+ len += scnprintf(buf + len, buf_len - len, "sched_cmds_pending = %u\n",
+ le32_to_cpu(htt_stats_buf->sched_cmds_pending));
+ len += scnprintf(buf + len, buf_len - len, "num_tid_register = %u\n",
+ le32_to_cpu(htt_stats_buf->num_tid_register));
+ len += scnprintf(buf + len, buf_len - len, "num_tid_unregister = %u\n",
+ le32_to_cpu(htt_stats_buf->num_tid_unregister));
+ len += scnprintf(buf + len, buf_len - len, "num_qstats_queried = %u\n",
+ le32_to_cpu(htt_stats_buf->num_qstats_queried));
+ len += scnprintf(buf + len, buf_len - len, "qstats_update_pending = %u\n",
+ le32_to_cpu(htt_stats_buf->qstats_update_pending));
+ len += scnprintf(buf + len, buf_len - len, "last_qstats_query_timestamp = %u\n",
+ le32_to_cpu(htt_stats_buf->last_qstats_query_timestamp));
+ len += scnprintf(buf + len, buf_len - len, "num_tqm_cmdq_full = %u\n",
+ le32_to_cpu(htt_stats_buf->num_tqm_cmdq_full));
+ len += scnprintf(buf + len, buf_len - len, "num_de_sched_algo_trigger = %u\n",
+ le32_to_cpu(htt_stats_buf->num_de_sched_algo_trigger));
+ len += scnprintf(buf + len, buf_len - len, "num_rt_sched_algo_trigger = %u\n",
+ le32_to_cpu(htt_stats_buf->num_rt_sched_algo_trigger));
+ len += scnprintf(buf + len, buf_len - len, "num_tqm_sched_algo_trigger = %u\n",
+ le32_to_cpu(htt_stats_buf->num_tqm_sched_algo_trigger));
+ len += scnprintf(buf + len, buf_len - len, "notify_sched = %u\n",
+ le32_to_cpu(htt_stats_buf->notify_sched));
+ len += scnprintf(buf + len, buf_len - len, "dur_based_sendn_term = %u\n",
+ le32_to_cpu(htt_stats_buf->dur_based_sendn_term));
+ len += scnprintf(buf + len, buf_len - len, "su_notify2_sched = %u\n",
+ le32_to_cpu(htt_stats_buf->su_notify2_sched));
+ len += scnprintf(buf + len, buf_len - len, "su_optimal_queued_msdus_sched = %u\n",
+ le32_to_cpu(htt_stats_buf->su_optimal_queued_msdus_sched));
+ len += scnprintf(buf + len, buf_len - len, "su_delay_timeout_sched = %u\n",
+ le32_to_cpu(htt_stats_buf->su_delay_timeout_sched));
+ len += scnprintf(buf + len, buf_len - len, "su_min_txtime_sched_delay = %u\n",
+ le32_to_cpu(htt_stats_buf->su_min_txtime_sched_delay));
+ len += scnprintf(buf + len, buf_len - len, "su_no_delay = %u\n",
+ le32_to_cpu(htt_stats_buf->su_no_delay));
+ len += scnprintf(buf + len, buf_len - len, "num_supercycles = %u\n",
+ le32_to_cpu(htt_stats_buf->num_supercycles));
+ len += scnprintf(buf + len, buf_len - len, "num_subcycles_with_sort = %u\n",
+ le32_to_cpu(htt_stats_buf->num_subcycles_with_sort));
+ len += scnprintf(buf + len, buf_len - len, "num_subcycles_no_sort = %u\n\n",
+ le32_to_cpu(htt_stats_buf->num_subcycles_no_sort));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_sched_txq_cmd_posted_tlv(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_sched_txq_cmd_posted_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u16 num_elements = tag_len >> 2;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_SCHED_TXQ_CMD_POSTED_TLV:\n");
+ len += print_array_to_buf(buf, len, "sched_cmd_posted",
+ htt_stats_buf->sched_cmd_posted, num_elements, "\n\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_sched_txq_cmd_reaped_tlv(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_sched_txq_cmd_reaped_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u16 num_elements = tag_len >> 2;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_SCHED_TXQ_CMD_REAPED_TLV:\n");
+ len += print_array_to_buf(buf, len, "sched_cmd_reaped",
+ htt_stats_buf->sched_cmd_reaped, num_elements, "\n\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_sched_txq_sched_order_su_tlv(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_sched_txq_sched_order_su_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 sched_order_su_num_entries = min_t(u32, (tag_len >> 2),
+ ATH12K_HTT_TX_PDEV_NUM_SCHED_ORDER_LOG);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_SCHED_TXQ_SCHED_ORDER_SU_TLV:\n");
+ len += print_array_to_buf(buf, len, "sched_order_su",
+ htt_stats_buf->sched_order_su,
+ sched_order_su_num_entries, "\n\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_sched_txq_sched_ineligibility_tlv(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_sched_txq_sched_ineligibility_tlv *htt_stats_buf =
+ tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 sched_ineligibility_num_entries = tag_len >> 2;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_SCHED_TXQ_SCHED_INELIGIBILITY:\n");
+ len += print_array_to_buf(buf, len, "sched_ineligibility",
+ htt_stats_buf->sched_ineligibility,
+ sched_ineligibility_num_entries, "\n\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_sched_txq_supercycle_trigger_tlv(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_sched_txq_supercycle_triggers_tlv *htt_stats_buf =
+ tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u16 num_elems = min_t(u16, (tag_len >> 2),
+ ATH12K_HTT_SCHED_SUPERCYCLE_TRIGGER_MAX);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_SCHED_TXQ_SUPERCYCLE_TRIGGER:\n");
+ len += print_array_to_buf(buf, len, "supercycle_triggers",
+ htt_stats_buf->supercycle_triggers, num_elems, "\n\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_hw_stats_pdev_errs_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_hw_stats_pdev_errs_tlv *htt_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 mac_id_word;
+
+ if (tag_len < sizeof(*htt_buf))
+ return;
+
+ mac_id_word = le32_to_cpu(htt_buf->mac_id__word);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_HW_STATS_PDEV_ERRS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
+ u32_get_bits(mac_id_word, ATH12K_HTT_STATS_MAC_ID));
+ len += scnprintf(buf + len, buf_len - len, "tx_abort = %u\n",
+ le32_to_cpu(htt_buf->tx_abort));
+ len += scnprintf(buf + len, buf_len - len, "tx_abort_fail_count = %u\n",
+ le32_to_cpu(htt_buf->tx_abort_fail_count));
+ len += scnprintf(buf + len, buf_len - len, "rx_abort = %u\n",
+ le32_to_cpu(htt_buf->rx_abort));
+ len += scnprintf(buf + len, buf_len - len, "rx_abort_fail_count = %u\n",
+ le32_to_cpu(htt_buf->rx_abort_fail_count));
+ len += scnprintf(buf + len, buf_len - len, "rx_flush_cnt = %u\n",
+ le32_to_cpu(htt_buf->rx_flush_cnt));
+ len += scnprintf(buf + len, buf_len - len, "warm_reset = %u\n",
+ le32_to_cpu(htt_buf->warm_reset));
+ len += scnprintf(buf + len, buf_len - len, "cold_reset = %u\n",
+ le32_to_cpu(htt_buf->cold_reset));
+ len += scnprintf(buf + len, buf_len - len, "mac_cold_reset_restore_cal = %u\n",
+ le32_to_cpu(htt_buf->mac_cold_reset_restore_cal));
+ len += scnprintf(buf + len, buf_len - len, "mac_cold_reset = %u\n",
+ le32_to_cpu(htt_buf->mac_cold_reset));
+ len += scnprintf(buf + len, buf_len - len, "mac_warm_reset = %u\n",
+ le32_to_cpu(htt_buf->mac_warm_reset));
+ len += scnprintf(buf + len, buf_len - len, "mac_only_reset = %u\n",
+ le32_to_cpu(htt_buf->mac_only_reset));
+ len += scnprintf(buf + len, buf_len - len, "phy_warm_reset = %u\n",
+ le32_to_cpu(htt_buf->phy_warm_reset));
+ len += scnprintf(buf + len, buf_len - len, "phy_warm_reset_ucode_trig = %u\n",
+ le32_to_cpu(htt_buf->phy_warm_reset_ucode_trig));
+ len += scnprintf(buf + len, buf_len - len, "mac_warm_reset_restore_cal = %u\n",
+ le32_to_cpu(htt_buf->mac_warm_reset_restore_cal));
+ len += scnprintf(buf + len, buf_len - len, "mac_sfm_reset = %u\n",
+ le32_to_cpu(htt_buf->mac_sfm_reset));
+ len += scnprintf(buf + len, buf_len - len, "phy_warm_reset_m3_ssr = %u\n",
+ le32_to_cpu(htt_buf->phy_warm_reset_m3_ssr));
+ len += scnprintf(buf + len, buf_len - len, "fw_rx_rings_reset = %u\n",
+ le32_to_cpu(htt_buf->fw_rx_rings_reset));
+ len += scnprintf(buf + len, buf_len - len, "tx_flush = %u\n",
+ le32_to_cpu(htt_buf->tx_flush));
+ len += scnprintf(buf + len, buf_len - len, "tx_glb_reset = %u\n",
+ le32_to_cpu(htt_buf->tx_glb_reset));
+ len += scnprintf(buf + len, buf_len - len, "tx_txq_reset = %u\n",
+ le32_to_cpu(htt_buf->tx_txq_reset));
+ len += scnprintf(buf + len, buf_len - len, "rx_timeout_reset = %u\n\n",
+ le32_to_cpu(htt_buf->rx_timeout_reset));
+
+ len += scnprintf(buf + len, buf_len - len, "PDEV_PHY_WARM_RESET_REASONS:\n");
+ len += scnprintf(buf + len, buf_len - len, "phy_warm_reset_reason_phy_m3 = %u\n",
+ le32_to_cpu(htt_buf->phy_warm_reset_reason_phy_m3));
+ len += scnprintf(buf + len, buf_len - len,
+ "phy_warm_reset_reason_tx_hw_stuck = %u\n",
+ le32_to_cpu(htt_buf->phy_warm_reset_reason_tx_hw_stuck));
+ len += scnprintf(buf + len, buf_len - len,
+ "phy_warm_reset_reason_num_cca_rx_frame_stuck = %u\n",
+ le32_to_cpu(htt_buf->phy_warm_reset_reason_num_rx_frame_stuck));
+ len += scnprintf(buf + len, buf_len - len,
+ "phy_warm_reset_reason_wal_rx_recovery_rst_rx_busy = %u\n",
+ le32_to_cpu(htt_buf->phy_warm_reset_reason_wal_rx_rec_rx_busy));
+ len += scnprintf(buf + len, buf_len - len,
+ "phy_warm_reset_reason_wal_rx_recovery_rst_mac_hang = %u\n",
+ le32_to_cpu(htt_buf->phy_warm_reset_reason_wal_rx_rec_mac_hng));
+ len += scnprintf(buf + len, buf_len - len,
+ "phy_warm_reset_reason_mac_reset_converted_phy_reset = %u\n",
+ le32_to_cpu(htt_buf->phy_warm_reset_reason_mac_conv_phy_reset));
+ len += scnprintf(buf + len, buf_len - len,
+ "phy_warm_reset_reason_tx_lifetime_expiry_cca_stuck = %u\n",
+ le32_to_cpu(htt_buf->phy_warm_reset_reason_tx_exp_cca_stuck));
+ len += scnprintf(buf + len, buf_len - len,
+ "phy_warm_reset_reason_tx_consecutive_flush9_war = %u\n",
+ le32_to_cpu(htt_buf->phy_warm_reset_reason_tx_consec_flsh_war));
+ len += scnprintf(buf + len, buf_len - len,
+ "phy_warm_reset_reason_tx_hwsch_reset_war = %u\n",
+ le32_to_cpu(htt_buf->phy_warm_reset_reason_tx_hwsch_reset_war));
+ len += scnprintf(buf + len, buf_len - len,
+ "phy_warm_reset_reason_hwsch_wdog_or_cca_wdog_war = %u\n\n",
+ le32_to_cpu(htt_buf->phy_warm_reset_reason_hwsch_cca_wdog_war));
+
+ len += scnprintf(buf + len, buf_len - len, "WAL_RX_RECOVERY_STATS:\n");
+ len += scnprintf(buf + len, buf_len - len,
+ "wal_rx_recovery_rst_mac_hang_count = %u\n",
+ le32_to_cpu(htt_buf->wal_rx_recovery_rst_mac_hang_cnt));
+ len += scnprintf(buf + len, buf_len - len,
+ "wal_rx_recovery_rst_known_sig_count = %u\n",
+ le32_to_cpu(htt_buf->wal_rx_recovery_rst_known_sig_cnt));
+ len += scnprintf(buf + len, buf_len - len,
+ "wal_rx_recovery_rst_no_rx_count = %u\n",
+ le32_to_cpu(htt_buf->wal_rx_recovery_rst_no_rx_cnt));
+ len += scnprintf(buf + len, buf_len - len,
+ "wal_rx_recovery_rst_no_rx_consecutive_count = %u\n",
+ le32_to_cpu(htt_buf->wal_rx_recovery_rst_no_rx_consec_cnt));
+ len += scnprintf(buf + len, buf_len - len,
+ "wal_rx_recovery_rst_rx_busy_count = %u\n",
+ le32_to_cpu(htt_buf->wal_rx_recovery_rst_rx_busy_cnt));
+ len += scnprintf(buf + len, buf_len - len,
+ "wal_rx_recovery_rst_phy_mac_hang_count = %u\n\n",
+ le32_to_cpu(htt_buf->wal_rx_recovery_rst_phy_mac_hang_cnt));
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_RX_DEST_DRAIN_STATS:\n");
+ len += scnprintf(buf + len, buf_len - len,
+ "rx_dest_drain_rx_descs_leak_prevention_done = %u\n",
+ le32_to_cpu(htt_buf->rx_dest_drain_rx_descs_leak_prevented));
+ len += scnprintf(buf + len, buf_len - len,
+ "rx_dest_drain_rx_descs_saved_cnt = %u\n",
+ le32_to_cpu(htt_buf->rx_dest_drain_rx_descs_saved_cnt));
+ len += scnprintf(buf + len, buf_len - len,
+ "rx_dest_drain_rxdma2reo_leak_detected = %u\n",
+ le32_to_cpu(htt_buf->rx_dest_drain_rxdma2reo_leak_detected));
+ len += scnprintf(buf + len, buf_len - len,
+ "rx_dest_drain_rxdma2fw_leak_detected = %u\n",
+ le32_to_cpu(htt_buf->rx_dest_drain_rxdma2fw_leak_detected));
+ len += scnprintf(buf + len, buf_len - len,
+ "rx_dest_drain_rxdma2wbm_leak_detected = %u\n",
+ le32_to_cpu(htt_buf->rx_dest_drain_rxdma2wbm_leak_detected));
+ len += scnprintf(buf + len, buf_len - len,
+ "rx_dest_drain_rxdma1_2sw_leak_detected = %u\n",
+ le32_to_cpu(htt_buf->rx_dest_drain_rxdma1_2sw_leak_detected));
+ len += scnprintf(buf + len, buf_len - len,
+ "rx_dest_drain_rx_drain_ok_mac_idle = %u\n",
+ le32_to_cpu(htt_buf->rx_dest_drain_rx_drain_ok_mac_idle));
+ len += scnprintf(buf + len, buf_len - len,
+ "rx_dest_drain_ok_mac_not_idle = %u\n",
+ le32_to_cpu(htt_buf->rx_dest_drain_ok_mac_not_idle));
+ len += scnprintf(buf + len, buf_len - len,
+ "rx_dest_drain_prerequisite_invld = %u\n",
+ le32_to_cpu(htt_buf->rx_dest_drain_prerequisite_invld));
+ len += scnprintf(buf + len, buf_len - len,
+ "rx_dest_drain_skip_for_non_lmac_reset = %u\n",
+ le32_to_cpu(htt_buf->rx_dest_drain_skip_non_lmac_reset));
+ len += scnprintf(buf + len, buf_len - len,
+ "rx_dest_drain_hw_fifo_not_empty_post_drain_wait = %u\n\n",
+ le32_to_cpu(htt_buf->rx_dest_drain_hw_fifo_notempty_post_wait));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_hw_stats_intr_misc_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_hw_stats_intr_misc_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_HW_STATS_INTR_MISC_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "hw_intr_name = %s\n",
+ htt_stats_buf->hw_intr_name);
+ len += scnprintf(buf + len, buf_len - len, "mask = %u\n",
+ le32_to_cpu(htt_stats_buf->mask));
+ len += scnprintf(buf + len, buf_len - len, "count = %u\n\n",
+ le32_to_cpu(htt_stats_buf->count));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_hw_stats_whal_tx_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_hw_stats_whal_tx_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 mac_id_word;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ mac_id_word = __le32_to_cpu(htt_stats_buf->mac_id__word);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_HW_STATS_WHAL_TX_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
+ u32_get_bits(mac_id_word, ATH12K_HTT_STATS_MAC_ID));
+ len += scnprintf(buf + len, buf_len - len, "last_unpause_ppdu_id = %u\n",
+ le32_to_cpu(htt_stats_buf->last_unpause_ppdu_id));
+ len += scnprintf(buf + len, buf_len - len, "hwsch_unpause_wait_tqm_write = %u\n",
+ le32_to_cpu(htt_stats_buf->hwsch_unpause_wait_tqm_write));
+ len += scnprintf(buf + len, buf_len - len, "hwsch_dummy_tlv_skipped = %u\n",
+ le32_to_cpu(htt_stats_buf->hwsch_dummy_tlv_skipped));
+ len += scnprintf(buf + len, buf_len - len,
+ "hwsch_misaligned_offset_received = %u\n",
+ le32_to_cpu(htt_stats_buf->hwsch_misaligned_offset_received));
+ len += scnprintf(buf + len, buf_len - len, "hwsch_reset_count = %u\n",
+ le32_to_cpu(htt_stats_buf->hwsch_reset_count));
+ len += scnprintf(buf + len, buf_len - len, "hwsch_dev_reset_war = %u\n",
+ le32_to_cpu(htt_stats_buf->hwsch_dev_reset_war));
+ len += scnprintf(buf + len, buf_len - len, "hwsch_delayed_pause = %u\n",
+ le32_to_cpu(htt_stats_buf->hwsch_delayed_pause));
+ len += scnprintf(buf + len, buf_len - len, "hwsch_long_delayed_pause = %u\n",
+ le32_to_cpu(htt_stats_buf->hwsch_long_delayed_pause));
+ len += scnprintf(buf + len, buf_len - len, "sch_rx_ppdu_no_response = %u\n",
+ le32_to_cpu(htt_stats_buf->sch_rx_ppdu_no_response));
+ len += scnprintf(buf + len, buf_len - len, "sch_selfgen_response = %u\n",
+ le32_to_cpu(htt_stats_buf->sch_selfgen_response));
+ len += scnprintf(buf + len, buf_len - len, "sch_rx_sifs_resp_trigger= %u\n\n",
+ le32_to_cpu(htt_stats_buf->sch_rx_sifs_resp_trigger));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_hw_war_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_hw_war_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u16 fixed_len, array_len;
+ u8 i, array_words;
+ u32 mac_id;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ mac_id = __le32_to_cpu(htt_stats_buf->mac_id__word);
+ fixed_len = sizeof(*htt_stats_buf);
+ array_len = tag_len - fixed_len;
+ array_words = array_len >> 2;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_HW_WAR_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
+ u32_get_bits(mac_id, ATH12K_HTT_STATS_MAC_ID));
+
+ for (i = 0; i < array_words; i++) {
+ len += scnprintf(buf + len, buf_len - len, "hw_war %u = %u\n\n",
+ i, le32_to_cpu(htt_stats_buf->hw_wars[i]));
+ }
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_tx_tqm_cmn_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_tqm_cmn_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 mac_id_word;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ mac_id_word = __le32_to_cpu(htt_stats_buf->mac_id__word);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_CMN_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
+ u32_get_bits(mac_id_word, ATH12K_HTT_STATS_MAC_ID));
+ len += scnprintf(buf + len, buf_len - len, "max_cmdq_id = %u\n",
+ le32_to_cpu(htt_stats_buf->max_cmdq_id));
+ len += scnprintf(buf + len, buf_len - len, "list_mpdu_cnt_hist_intvl = %u\n",
+ le32_to_cpu(htt_stats_buf->list_mpdu_cnt_hist_intvl));
+ len += scnprintf(buf + len, buf_len - len, "add_msdu = %u\n",
+ le32_to_cpu(htt_stats_buf->add_msdu));
+ len += scnprintf(buf + len, buf_len - len, "q_empty = %u\n",
+ le32_to_cpu(htt_stats_buf->q_empty));
+ len += scnprintf(buf + len, buf_len - len, "q_not_empty = %u\n",
+ le32_to_cpu(htt_stats_buf->q_not_empty));
+ len += scnprintf(buf + len, buf_len - len, "drop_notification = %u\n",
+ le32_to_cpu(htt_stats_buf->drop_notification));
+ len += scnprintf(buf + len, buf_len - len, "desc_threshold = %u\n",
+ le32_to_cpu(htt_stats_buf->desc_threshold));
+ len += scnprintf(buf + len, buf_len - len, "hwsch_tqm_invalid_status = %u\n",
+ le32_to_cpu(htt_stats_buf->hwsch_tqm_invalid_status));
+ len += scnprintf(buf + len, buf_len - len, "missed_tqm_gen_mpdus = %u\n",
+ le32_to_cpu(htt_stats_buf->missed_tqm_gen_mpdus));
+ len += scnprintf(buf + len, buf_len - len,
+ "total_msduq_timestamp_updates = %u\n",
+ le32_to_cpu(htt_stats_buf->msduq_timestamp_updates));
+ len += scnprintf(buf + len, buf_len - len,
+ "total_msduq_timestamp_updates_by_get_mpdu_head_info_cmd = %u\n",
+ le32_to_cpu(htt_stats_buf->msduq_updates_mpdu_head_info_cmd));
+ len += scnprintf(buf + len, buf_len - len,
+ "total_msduq_timestamp_updates_by_emp_to_nonemp_status = %u\n",
+ le32_to_cpu(htt_stats_buf->msduq_updates_emp_to_nonemp_status));
+ len += scnprintf(buf + len, buf_len - len,
+ "total_get_mpdu_head_info_cmds_by_sched_algo_la_query = %u\n",
+ le32_to_cpu(htt_stats_buf->get_mpdu_head_info_cmds_by_query));
+ len += scnprintf(buf + len, buf_len - len,
+ "total_get_mpdu_head_info_cmds_by_tac = %u\n",
+ le32_to_cpu(htt_stats_buf->get_mpdu_head_info_cmds_by_tac));
+ len += scnprintf(buf + len, buf_len - len,
+ "total_gen_mpdu_cmds_by_sched_algo_la_query = %u\n",
+ le32_to_cpu(htt_stats_buf->gen_mpdu_cmds_by_query));
+ len += scnprintf(buf + len, buf_len - len, "active_tqm_tids = %u\n",
+ le32_to_cpu(htt_stats_buf->tqm_active_tids));
+ len += scnprintf(buf + len, buf_len - len, "inactive_tqm_tids = %u\n",
+ le32_to_cpu(htt_stats_buf->tqm_inactive_tids));
+ len += scnprintf(buf + len, buf_len - len, "tqm_active_msduq_flows = %u\n",
+ le32_to_cpu(htt_stats_buf->tqm_active_msduq_flows));
+ len += scnprintf(buf + len, buf_len - len, "hi_prio_q_not_empty = %u\n\n",
+ le32_to_cpu(htt_stats_buf->high_prio_q_not_empty));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_tx_tqm_error_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_tqm_error_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_ERROR_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "q_empty_failure = %u\n",
+ le32_to_cpu(htt_stats_buf->q_empty_failure));
+ len += scnprintf(buf + len, buf_len - len, "q_not_empty_failure = %u\n",
+ le32_to_cpu(htt_stats_buf->q_not_empty_failure));
+ len += scnprintf(buf + len, buf_len - len, "add_msdu_failure = %u\n\n",
+ le32_to_cpu(htt_stats_buf->add_msdu_failure));
+
+ len += scnprintf(buf + len, buf_len - len, "TQM_ERROR_RESET_STATS:\n");
+ len += scnprintf(buf + len, buf_len - len, "tqm_cache_ctl_err = %u\n",
+ le32_to_cpu(htt_stats_buf->tqm_cache_ctl_err));
+ len += scnprintf(buf + len, buf_len - len, "tqm_soft_reset = %u\n",
+ le32_to_cpu(htt_stats_buf->tqm_soft_reset));
+ len += scnprintf(buf + len, buf_len - len,
+ "tqm_reset_total_num_in_use_link_descs = %u\n",
+ le32_to_cpu(htt_stats_buf->tqm_reset_num_in_use_link_descs));
+ len += scnprintf(buf + len, buf_len - len,
+ "tqm_reset_worst_case_num_lost_link_descs = %u\n",
+ le32_to_cpu(htt_stats_buf->tqm_reset_num_lost_link_descs));
+ len += scnprintf(buf + len, buf_len - len,
+ "tqm_reset_worst_case_num_lost_host_tx_bufs_count = %u\n",
+ le32_to_cpu(htt_stats_buf->tqm_reset_num_lost_host_tx_buf_cnt));
+ len += scnprintf(buf + len, buf_len - len,
+ "tqm_reset_num_in_use_link_descs_internal_tqm = %u\n",
+ le32_to_cpu(htt_stats_buf->tqm_reset_num_in_use_internal_tqm));
+ len += scnprintf(buf + len, buf_len - len,
+ "tqm_reset_num_in_use_link_descs_wbm_idle_link_ring = %u\n",
+ le32_to_cpu(htt_stats_buf->tqm_reset_num_in_use_idle_link_rng));
+ len += scnprintf(buf + len, buf_len - len,
+ "tqm_reset_time_to_tqm_hang_delta_ms = %u\n",
+ le32_to_cpu(htt_stats_buf->tqm_reset_time_to_tqm_hang_delta_ms));
+ len += scnprintf(buf + len, buf_len - len, "tqm_reset_recovery_time_ms = %u\n",
+ le32_to_cpu(htt_stats_buf->tqm_reset_recovery_time_ms));
+ len += scnprintf(buf + len, buf_len - len, "tqm_reset_num_peers_hdl = %u\n",
+ le32_to_cpu(htt_stats_buf->tqm_reset_num_peers_hdl));
+ len += scnprintf(buf + len, buf_len - len,
+ "tqm_reset_cumm_dirty_hw_mpduq_proc_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->tqm_reset_cumm_dirty_hw_mpduq_cnt));
+ len += scnprintf(buf + len, buf_len - len,
+ "tqm_reset_cumm_dirty_hw_msduq_proc = %u\n",
+ le32_to_cpu(htt_stats_buf->tqm_reset_cumm_dirty_hw_msduq_proc));
+ len += scnprintf(buf + len, buf_len - len,
+ "tqm_reset_flush_cache_cmd_su_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->tqm_reset_flush_cache_cmd_su_cnt));
+ len += scnprintf(buf + len, buf_len - len,
+ "tqm_reset_flush_cache_cmd_other_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->tqm_reset_flush_cache_cmd_other_cnt));
+ len += scnprintf(buf + len, buf_len - len,
+ "tqm_reset_flush_cache_cmd_trig_type = %u\n",
+ le32_to_cpu(htt_stats_buf->tqm_reset_flush_cache_cmd_trig_type));
+ len += scnprintf(buf + len, buf_len - len,
+ "tqm_reset_flush_cache_cmd_trig_cfg = %u\n",
+ le32_to_cpu(htt_stats_buf->tqm_reset_flush_cache_cmd_trig_cfg));
+ len += scnprintf(buf + len, buf_len - len,
+ "tqm_reset_flush_cache_cmd_skip_cmd_status_null = %u\n\n",
+ le32_to_cpu(htt_stats_buf->tqm_reset_flush_cmd_skp_status_null));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_tx_tqm_gen_mpdu_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_tqm_gen_mpdu_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u16 num_elements = tag_len >> 2;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_GEN_MPDU_STATS_TLV:\n");
+ len += print_array_to_buf(buf, len, "gen_mpdu_end_reason",
+ htt_stats_buf->gen_mpdu_end_reason, num_elements,
+ "\n\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_tx_tqm_list_mpdu_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_tqm_list_mpdu_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u16 num_elems = min_t(u16, (tag_len >> 2),
+ ATH12K_HTT_TX_TQM_MAX_LIST_MPDU_END_REASON);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_LIST_MPDU_STATS_TLV:\n");
+ len += print_array_to_buf(buf, len, "list_mpdu_end_reason",
+ htt_stats_buf->list_mpdu_end_reason, num_elems, "\n\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_tx_tqm_list_mpdu_cnt_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_tqm_list_mpdu_cnt_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u16 num_elems = min_t(u16, (tag_len >> 2),
+ ATH12K_HTT_TX_TQM_MAX_LIST_MPDU_CNT_HISTOGRAM_BINS);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_LIST_MPDU_CNT_TLV_V:\n");
+ len += print_array_to_buf(buf, len, "list_mpdu_cnt_hist",
+ htt_stats_buf->list_mpdu_cnt_hist, num_elems, "\n\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_tx_tqm_pdev_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_tqm_pdev_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_PDEV_STATS_TLV_V:\n");
+ len += scnprintf(buf + len, buf_len - len, "msdu_count = %u\n",
+ le32_to_cpu(htt_stats_buf->msdu_count));
+ len += scnprintf(buf + len, buf_len - len, "mpdu_count = %u\n",
+ le32_to_cpu(htt_stats_buf->mpdu_count));
+ len += scnprintf(buf + len, buf_len - len, "remove_msdu = %u\n",
+ le32_to_cpu(htt_stats_buf->remove_msdu));
+ len += scnprintf(buf + len, buf_len - len, "remove_mpdu = %u\n",
+ le32_to_cpu(htt_stats_buf->remove_mpdu));
+ len += scnprintf(buf + len, buf_len - len, "remove_msdu_ttl = %u\n",
+ le32_to_cpu(htt_stats_buf->remove_msdu_ttl));
+ len += scnprintf(buf + len, buf_len - len, "send_bar = %u\n",
+ le32_to_cpu(htt_stats_buf->send_bar));
+ len += scnprintf(buf + len, buf_len - len, "bar_sync = %u\n",
+ le32_to_cpu(htt_stats_buf->bar_sync));
+ len += scnprintf(buf + len, buf_len - len, "notify_mpdu = %u\n",
+ le32_to_cpu(htt_stats_buf->notify_mpdu));
+ len += scnprintf(buf + len, buf_len - len, "sync_cmd = %u\n",
+ le32_to_cpu(htt_stats_buf->sync_cmd));
+ len += scnprintf(buf + len, buf_len - len, "write_cmd = %u\n",
+ le32_to_cpu(htt_stats_buf->write_cmd));
+ len += scnprintf(buf + len, buf_len - len, "hwsch_trigger = %u\n",
+ le32_to_cpu(htt_stats_buf->hwsch_trigger));
+ len += scnprintf(buf + len, buf_len - len, "ack_tlv_proc = %u\n",
+ le32_to_cpu(htt_stats_buf->ack_tlv_proc));
+ len += scnprintf(buf + len, buf_len - len, "gen_mpdu_cmd = %u\n",
+ le32_to_cpu(htt_stats_buf->gen_mpdu_cmd));
+ len += scnprintf(buf + len, buf_len - len, "gen_list_cmd = %u\n",
+ le32_to_cpu(htt_stats_buf->gen_list_cmd));
+ len += scnprintf(buf + len, buf_len - len, "remove_mpdu_cmd = %u\n",
+ le32_to_cpu(htt_stats_buf->remove_mpdu_cmd));
+ len += scnprintf(buf + len, buf_len - len, "remove_mpdu_tried_cmd = %u\n",
+ le32_to_cpu(htt_stats_buf->remove_mpdu_tried_cmd));
+ len += scnprintf(buf + len, buf_len - len, "mpdu_queue_stats_cmd = %u\n",
+ le32_to_cpu(htt_stats_buf->mpdu_queue_stats_cmd));
+ len += scnprintf(buf + len, buf_len - len, "mpdu_head_info_cmd = %u\n",
+ le32_to_cpu(htt_stats_buf->mpdu_head_info_cmd));
+ len += scnprintf(buf + len, buf_len - len, "msdu_flow_stats_cmd = %u\n",
+ le32_to_cpu(htt_stats_buf->msdu_flow_stats_cmd));
+ len += scnprintf(buf + len, buf_len - len, "remove_msdu_cmd = %u\n",
+ le32_to_cpu(htt_stats_buf->remove_msdu_cmd));
+ len += scnprintf(buf + len, buf_len - len, "remove_msdu_ttl_cmd = %u\n",
+ le32_to_cpu(htt_stats_buf->remove_msdu_ttl_cmd));
+ len += scnprintf(buf + len, buf_len - len, "flush_cache_cmd = %u\n",
+ le32_to_cpu(htt_stats_buf->flush_cache_cmd));
+ len += scnprintf(buf + len, buf_len - len, "update_mpduq_cmd = %u\n",
+ le32_to_cpu(htt_stats_buf->update_mpduq_cmd));
+ len += scnprintf(buf + len, buf_len - len, "enqueue = %u\n",
+ le32_to_cpu(htt_stats_buf->enqueue));
+ len += scnprintf(buf + len, buf_len - len, "enqueue_notify = %u\n",
+ le32_to_cpu(htt_stats_buf->enqueue_notify));
+ len += scnprintf(buf + len, buf_len - len, "notify_mpdu_at_head = %u\n",
+ le32_to_cpu(htt_stats_buf->notify_mpdu_at_head));
+ len += scnprintf(buf + len, buf_len - len, "notify_mpdu_state_valid = %u\n",
+ le32_to_cpu(htt_stats_buf->notify_mpdu_state_valid));
+ len += scnprintf(buf + len, buf_len - len, "sched_udp_notify1 = %u\n",
+ le32_to_cpu(htt_stats_buf->sched_udp_notify1));
+ len += scnprintf(buf + len, buf_len - len, "sched_udp_notify2 = %u\n",
+ le32_to_cpu(htt_stats_buf->sched_udp_notify2));
+ len += scnprintf(buf + len, buf_len - len, "sched_nonudp_notify1 = %u\n",
+ le32_to_cpu(htt_stats_buf->sched_nonudp_notify1));
+ len += scnprintf(buf + len, buf_len - len, "sched_nonudp_notify2 = %u\n\n",
+ le32_to_cpu(htt_stats_buf->sched_nonudp_notify2));
+
+ stats_req->buf_len = len;
+}
+
+static int ath12k_dbg_htt_ext_stats_parse(struct ath12k_base *ab,
+ u16 tag, u16 len, const void *tag_buf,
+ void *user_data)
+{
+ struct debug_htt_stats_req *stats_req = user_data;
+
+ switch (tag) {
+ case HTT_STATS_TX_PDEV_CMN_TAG:
+ htt_print_tx_pdev_stats_cmn_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_PDEV_UNDERRUN_TAG:
+ htt_print_tx_pdev_stats_urrn_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_PDEV_SIFS_TAG:
+ htt_print_tx_pdev_stats_sifs_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_PDEV_FLUSH_TAG:
+ htt_print_tx_pdev_stats_flush_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_PDEV_SIFS_HIST_TAG:
+ htt_print_tx_pdev_stats_sifs_hist_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_PDEV_CTRL_PATH_TX_STATS_TAG:
+ htt_print_pdev_ctrl_path_tx_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_MU_PPDU_DIST_TAG:
+ htt_print_tx_pdev_mu_ppdu_dist_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_SCHED_CMN_TAG:
+ ath12k_htt_print_stats_tx_sched_cmn_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_PDEV_SCHEDULER_TXQ_STATS_TAG:
+ ath12k_htt_print_tx_pdev_stats_sched_per_txq_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_SCHED_TXQ_CMD_POSTED_TAG:
+ ath12k_htt_print_sched_txq_cmd_posted_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_SCHED_TXQ_CMD_REAPED_TAG:
+ ath12k_htt_print_sched_txq_cmd_reaped_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_SCHED_TXQ_SCHED_ORDER_SU_TAG:
+ ath12k_htt_print_sched_txq_sched_order_su_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_SCHED_TXQ_SCHED_INELIGIBILITY_TAG:
+ ath12k_htt_print_sched_txq_sched_ineligibility_tlv(tag_buf, len,
+ stats_req);
+ break;
+ case HTT_STATS_SCHED_TXQ_SUPERCYCLE_TRIGGER_TAG:
+ ath12k_htt_print_sched_txq_supercycle_trigger_tlv(tag_buf, len,
+ stats_req);
+ break;
+ case HTT_STATS_HW_PDEV_ERRS_TAG:
+ ath12k_htt_print_hw_stats_pdev_errs_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_HW_INTR_MISC_TAG:
+ ath12k_htt_print_hw_stats_intr_misc_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_WHAL_TX_TAG:
+ ath12k_htt_print_hw_stats_whal_tx_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_HW_WAR_TAG:
+ ath12k_htt_print_hw_war_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_TQM_CMN_TAG:
+ ath12k_htt_print_tx_tqm_cmn_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_TQM_ERROR_STATS_TAG:
+ ath12k_htt_print_tx_tqm_error_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_TQM_GEN_MPDU_TAG:
+ ath12k_htt_print_tx_tqm_gen_mpdu_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_TQM_LIST_MPDU_TAG:
+ ath12k_htt_print_tx_tqm_list_mpdu_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_TQM_LIST_MPDU_CNT_TAG:
+ ath12k_htt_print_tx_tqm_list_mpdu_cnt_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_TQM_PDEV_TAG:
+ ath12k_htt_print_tx_tqm_pdev_stats_tlv(tag_buf, len, stats_req);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+void ath12k_debugfs_htt_ext_stats_handler(struct ath12k_base *ab,
+ struct sk_buff *skb)
+{
+ struct ath12k_htt_extd_stats_msg *msg;
+ struct debug_htt_stats_req *stats_req;
+ struct ath12k *ar;
+ u32 len, pdev_id, stats_info;
+ u64 cookie;
+ int ret;
+ bool send_completion = false;
+
+ msg = (struct ath12k_htt_extd_stats_msg *)skb->data;
+ cookie = le64_to_cpu(msg->cookie);
+
+ if (u64_get_bits(cookie, ATH12K_HTT_STATS_COOKIE_MSB) !=
+ ATH12K_HTT_STATS_MAGIC_VALUE) {
+ ath12k_warn(ab, "received invalid htt ext stats event\n");
+ return;
+ }
+
+ pdev_id = u64_get_bits(cookie, ATH12K_HTT_STATS_COOKIE_LSB);
+ rcu_read_lock();
+ ar = ath12k_mac_get_ar_by_pdev_id(ab, pdev_id);
+ if (!ar) {
+ ath12k_warn(ab, "failed to get ar for pdev_id %d\n", pdev_id);
+ goto exit;
+ }
+
+ stats_req = ar->debug.htt_stats.stats_req;
+ if (!stats_req)
+ goto exit;
+
+ spin_lock_bh(&ar->data_lock);
+
+ stats_info = le32_to_cpu(msg->info1);
+ stats_req->done = u32_get_bits(stats_info, ATH12K_HTT_T2H_EXT_STATS_INFO1_DONE);
+ if (stats_req->done)
+ send_completion = true;
+
+ spin_unlock_bh(&ar->data_lock);
+
+ len = u32_get_bits(stats_info, ATH12K_HTT_T2H_EXT_STATS_INFO1_LENGTH);
+ if (len > skb->len) {
+ ath12k_warn(ab, "invalid length %d for HTT stats", len);
+ goto exit;
+ }
+
+ ret = ath12k_dp_htt_tlv_iter(ab, msg->data, len,
+ ath12k_dbg_htt_ext_stats_parse,
+ stats_req);
+ if (ret)
+ ath12k_warn(ab, "Failed to parse tlv %d\n", ret);
+
+ if (send_completion)
+ complete(&stats_req->htt_stats_rcvd);
+exit:
+ rcu_read_unlock();
+}
+
+static ssize_t ath12k_read_htt_stats_type(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath12k *ar = file->private_data;
+ enum ath12k_dbg_htt_ext_stats_type type;
+ char buf[32];
+ size_t len;
+
+ mutex_lock(&ar->conf_mutex);
+ type = ar->debug.htt_stats.type;
+ mutex_unlock(&ar->conf_mutex);
+
+ len = scnprintf(buf, sizeof(buf), "%u\n", type);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath12k_write_htt_stats_type(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath12k *ar = file->private_data;
+ enum ath12k_dbg_htt_ext_stats_type type;
+ unsigned int cfg_param[4] = {0};
+ const int size = 32;
+ int num_args;
+
+ char *buf __free(kfree) = kzalloc(size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ if (copy_from_user(buf, user_buf, count))
+ return -EFAULT;
+
+ num_args = sscanf(buf, "%u %u %u %u %u\n", &type, &cfg_param[0],
+ &cfg_param[1], &cfg_param[2], &cfg_param[3]);
+ if (!num_args || num_args > 5)
+ return -EINVAL;
+
+ if (type == ATH12K_DBG_HTT_EXT_STATS_RESET ||
+ type >= ATH12K_DBG_HTT_NUM_EXT_STATS)
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ar->debug.htt_stats.type = type;
+ ar->debug.htt_stats.cfg_param[0] = cfg_param[0];
+ ar->debug.htt_stats.cfg_param[1] = cfg_param[1];
+ ar->debug.htt_stats.cfg_param[2] = cfg_param[2];
+ ar->debug.htt_stats.cfg_param[3] = cfg_param[3];
+
+ mutex_unlock(&ar->conf_mutex);
+
+ return count;
+}
+
+static const struct file_operations fops_htt_stats_type = {
+ .read = ath12k_read_htt_stats_type,
+ .write = ath12k_write_htt_stats_type,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static int ath12k_debugfs_htt_stats_req(struct ath12k *ar)
+{
+ struct debug_htt_stats_req *stats_req = ar->debug.htt_stats.stats_req;
+ enum ath12k_dbg_htt_ext_stats_type type = stats_req->type;
+ u64 cookie;
+ int ret, pdev_id;
+ struct htt_ext_stats_cfg_params cfg_params = { 0 };
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ init_completion(&stats_req->htt_stats_rcvd);
+
+ pdev_id = ath12k_mac_get_target_pdev_id(ar);
+ stats_req->done = false;
+ stats_req->pdev_id = pdev_id;
+
+ cookie = u64_encode_bits(ATH12K_HTT_STATS_MAGIC_VALUE,
+ ATH12K_HTT_STATS_COOKIE_MSB);
+ cookie |= u64_encode_bits(pdev_id, ATH12K_HTT_STATS_COOKIE_LSB);
+
+ if (stats_req->override_cfg_param) {
+ cfg_params.cfg0 = stats_req->cfg_param[0];
+ cfg_params.cfg1 = stats_req->cfg_param[1];
+ cfg_params.cfg2 = stats_req->cfg_param[2];
+ cfg_params.cfg3 = stats_req->cfg_param[3];
+ }
+
+ ret = ath12k_dp_tx_htt_h2t_ext_stats_req(ar, type, &cfg_params, cookie);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to send htt stats request: %d\n", ret);
+ return ret;
+ }
+ if (!wait_for_completion_timeout(&stats_req->htt_stats_rcvd, 3 * HZ)) {
+ spin_lock_bh(&ar->data_lock);
+ if (!stats_req->done) {
+ stats_req->done = true;
+ spin_unlock_bh(&ar->data_lock);
+ ath12k_warn(ar->ab, "stats request timed out\n");
+ return -ETIMEDOUT;
+ }
+ spin_unlock_bh(&ar->data_lock);
+ }
+
+ return 0;
+}
+
+static int ath12k_open_htt_stats(struct inode *inode,
+ struct file *file)
+{
+ struct ath12k *ar = inode->i_private;
+ struct debug_htt_stats_req *stats_req;
+ enum ath12k_dbg_htt_ext_stats_type type = ar->debug.htt_stats.type;
+ struct ath12k_hw *ah = ath12k_ar_to_ah(ar);
+ int ret;
+
+ if (type == ATH12K_DBG_HTT_EXT_STATS_RESET)
+ return -EPERM;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ah->state != ATH12K_HW_STATE_ON) {
+ ret = -ENETDOWN;
+ goto err_unlock;
+ }
+
+ if (ar->debug.htt_stats.stats_req) {
+ ret = -EAGAIN;
+ goto err_unlock;
+ }
+
+ stats_req = kzalloc(sizeof(*stats_req) + ATH12K_HTT_STATS_BUF_SIZE, GFP_KERNEL);
+ if (!stats_req) {
+ ret = -ENOMEM;
+ goto err_unlock;
+ }
+
+ ar->debug.htt_stats.stats_req = stats_req;
+ stats_req->type = type;
+ stats_req->cfg_param[0] = ar->debug.htt_stats.cfg_param[0];
+ stats_req->cfg_param[1] = ar->debug.htt_stats.cfg_param[1];
+ stats_req->cfg_param[2] = ar->debug.htt_stats.cfg_param[2];
+ stats_req->cfg_param[3] = ar->debug.htt_stats.cfg_param[3];
+ stats_req->override_cfg_param = !!stats_req->cfg_param[0] ||
+ !!stats_req->cfg_param[1] ||
+ !!stats_req->cfg_param[2] ||
+ !!stats_req->cfg_param[3];
+
+ ret = ath12k_debugfs_htt_stats_req(ar);
+ if (ret < 0)
+ goto out;
+
+ file->private_data = stats_req;
+
+ mutex_unlock(&ar->conf_mutex);
+
+ return 0;
+out:
+ kfree(stats_req);
+ ar->debug.htt_stats.stats_req = NULL;
+err_unlock:
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static int ath12k_release_htt_stats(struct inode *inode,
+ struct file *file)
+{
+ struct ath12k *ar = inode->i_private;
+
+ mutex_lock(&ar->conf_mutex);
+ kfree(file->private_data);
+ ar->debug.htt_stats.stats_req = NULL;
+ mutex_unlock(&ar->conf_mutex);
+
+ return 0;
+}
+
+static ssize_t ath12k_read_htt_stats(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct debug_htt_stats_req *stats_req = file->private_data;
+ char *buf;
+ u32 length;
+
+ buf = stats_req->buf;
+ length = min_t(u32, stats_req->buf_len, ATH12K_HTT_STATS_BUF_SIZE);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, length);
+}
+
+static const struct file_operations fops_dump_htt_stats = {
+ .open = ath12k_open_htt_stats,
+ .release = ath12k_release_htt_stats,
+ .read = ath12k_read_htt_stats,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath12k_write_htt_stats_reset(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath12k *ar = file->private_data;
+ enum ath12k_dbg_htt_ext_stats_type type;
+ struct htt_ext_stats_cfg_params cfg_params = { 0 };
+ u8 param_pos;
+ int ret;
+
+ ret = kstrtou32_from_user(user_buf, count, 0, &type);
+ if (ret)
+ return ret;
+
+ if (type >= ATH12K_DBG_HTT_NUM_EXT_STATS ||
+ type == ATH12K_DBG_HTT_EXT_STATS_RESET)
+ return -E2BIG;
+
+ mutex_lock(&ar->conf_mutex);
+ cfg_params.cfg0 = HTT_STAT_DEFAULT_RESET_START_OFFSET;
+ param_pos = (type >> 5) + 1;
+
+ switch (param_pos) {
+ case ATH12K_HTT_STATS_RESET_PARAM_CFG_32_BYTES:
+ cfg_params.cfg1 = 1 << (cfg_params.cfg0 + type);
+ break;
+ case ATH12K_HTT_STATS_RESET_PARAM_CFG_64_BYTES:
+ cfg_params.cfg2 = ATH12K_HTT_STATS_RESET_BITMAP32_BIT(cfg_params.cfg0 +
+ type);
+ break;
+ case ATH12K_HTT_STATS_RESET_PARAM_CFG_128_BYTES:
+ cfg_params.cfg3 = ATH12K_HTT_STATS_RESET_BITMAP64_BIT(cfg_params.cfg0 +
+ type);
+ break;
+ default:
+ break;
+ }
+
+ ret = ath12k_dp_tx_htt_h2t_ext_stats_req(ar,
+ ATH12K_DBG_HTT_EXT_STATS_RESET,
+ &cfg_params,
+ 0ULL);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to send htt stats request: %d\n", ret);
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+ }
+
+ ar->debug.htt_stats.reset = type;
+ mutex_unlock(&ar->conf_mutex);
+
+ return count;
+}
+
+static const struct file_operations fops_htt_stats_reset = {
+ .write = ath12k_write_htt_stats_reset,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+void ath12k_debugfs_htt_stats_register(struct ath12k *ar)
+{
+ debugfs_create_file("htt_stats_type", 0600, ar->debug.debugfs_pdev,
+ ar, &fops_htt_stats_type);
+ debugfs_create_file("htt_stats", 0400, ar->debug.debugfs_pdev,
+ ar, &fops_dump_htt_stats);
+ debugfs_create_file("htt_stats_reset", 0200, ar->debug.debugfs_pdev,
+ ar, &fops_htt_stats_reset);
+}
diff --git a/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h b/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h
new file mode 100644
index 000000000000..6294a082cf8a
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h
@@ -0,0 +1,567 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef DEBUG_HTT_STATS_H
+#define DEBUG_HTT_STATS_H
+
+#define ATH12K_HTT_STATS_BUF_SIZE (1024 * 512)
+#define ATH12K_HTT_STATS_COOKIE_LSB GENMASK_ULL(31, 0)
+#define ATH12K_HTT_STATS_COOKIE_MSB GENMASK_ULL(63, 32)
+#define ATH12K_HTT_STATS_MAGIC_VALUE 0xF0F0F0F0
+#define ATH12K_HTT_STATS_SUBTYPE_MAX 16
+#define ATH12K_HTT_MAX_STRING_LEN 256
+
+#define ATH12K_HTT_STATS_RESET_BITMAP32_OFFSET(_idx) ((_idx) & 0x1f)
+#define ATH12K_HTT_STATS_RESET_BITMAP64_OFFSET(_idx) ((_idx) & 0x3f)
+#define ATH12K_HTT_STATS_RESET_BITMAP32_BIT(_idx) (1 << \
+ ATH12K_HTT_STATS_RESET_BITMAP32_OFFSET(_idx))
+#define ATH12K_HTT_STATS_RESET_BITMAP64_BIT(_idx) (1 << \
+ ATH12K_HTT_STATS_RESET_BITMAP64_OFFSET(_idx))
+
+void ath12k_debugfs_htt_stats_register(struct ath12k *ar);
+
+#ifdef CONFIG_ATH12K_DEBUGFS
+void ath12k_debugfs_htt_ext_stats_handler(struct ath12k_base *ab,
+ struct sk_buff *skb);
+#else /* CONFIG_ATH12K_DEBUGFS */
+static inline void ath12k_debugfs_htt_ext_stats_handler(struct ath12k_base *ab,
+ struct sk_buff *skb)
+{
+}
+#endif
+
+/**
+ * DOC: target -> host extended statistics upload
+ *
+ * The following field definitions describe the format of the HTT
+ * target to host stats upload confirmation message.
+ * The message contains a cookie echoed from the HTT host->target stats
+ * upload request, which identifies which request the confirmation is
+ * for, and a single stats can span over multiple HTT stats indication
+ * due to the HTT message size limitation so every HTT ext stats
+ * indication will have tag-length-value stats information elements.
+ * The tag-length header for each HTT stats IND message also includes a
+ * status field, to indicate whether the request for the stat type in
+ * question was fully met, partially met, unable to be met, or invalid
+ * (if the stat type in question is disabled in the target).
+ * A Done bit 1's indicate the end of the of stats info elements.
+ *
+ *
+ * |31 16|15 12|11|10 8|7 5|4 0|
+ * |--------------------------------------------------------------|
+ * | reserved | msg type |
+ * |--------------------------------------------------------------|
+ * | cookie LSBs |
+ * |--------------------------------------------------------------|
+ * | cookie MSBs |
+ * |--------------------------------------------------------------|
+ * | stats entry length | rsvd | D| S | stat type |
+ * |--------------------------------------------------------------|
+ * | type-specific stats info |
+ * | (see debugfs_htt_stats.h) |
+ * |--------------------------------------------------------------|
+ * Header fields:
+ * - MSG_TYPE
+ * Bits 7:0
+ * Purpose: Identifies this is a extended statistics upload confirmation
+ * message.
+ * Value: 0x1c
+ * - COOKIE_LSBS
+ * Bits 31:0
+ * Purpose: Provide a mechanism to match a target->host stats confirmation
+ * message with its preceding host->target stats request message.
+ * Value: MSBs of the opaque cookie specified by the host-side requestor
+ * - COOKIE_MSBS
+ * Bits 31:0
+ * Purpose: Provide a mechanism to match a target->host stats confirmation
+ * message with its preceding host->target stats request message.
+ * Value: MSBs of the opaque cookie specified by the host-side requestor
+ *
+ * Stats Information Element tag-length header fields:
+ * - STAT_TYPE
+ * Bits 7:0
+ * Purpose: identifies the type of statistics info held in the
+ * following information element
+ * Value: ath12k_dbg_htt_ext_stats_type
+ * - STATUS
+ * Bits 10:8
+ * Purpose: indicate whether the requested stats are present
+ * Value:
+ * 0 -> The requested stats have been delivered in full
+ * 1 -> The requested stats have been delivered in part
+ * 2 -> The requested stats could not be delivered (error case)
+ * 3 -> The requested stat type is either not recognized (invalid)
+ * - DONE
+ * Bits 11
+ * Purpose:
+ * Indicates the completion of the stats entry, this will be the last
+ * stats conf HTT segment for the requested stats type.
+ * Value:
+ * 0 -> the stats retrieval is ongoing
+ * 1 -> the stats retrieval is complete
+ * - LENGTH
+ * Bits 31:16
+ * Purpose: indicate the stats information size
+ * Value: This field specifies the number of bytes of stats information
+ * that follows the element tag-length header.
+ * It is expected but not required that this length is a multiple of
+ * 4 bytes.
+ */
+
+#define ATH12K_HTT_T2H_EXT_STATS_INFO1_DONE BIT(11)
+#define ATH12K_HTT_T2H_EXT_STATS_INFO1_LENGTH GENMASK(31, 16)
+
+struct ath12k_htt_extd_stats_msg {
+ __le32 info0;
+ __le64 cookie;
+ __le32 info1;
+ u8 data[];
+} __packed;
+
+/* htt_dbg_ext_stats_type */
+enum ath12k_dbg_htt_ext_stats_type {
+ ATH12K_DBG_HTT_EXT_STATS_RESET = 0,
+ ATH12K_DBG_HTT_EXT_STATS_PDEV_TX = 1,
+ ATH12K_DBG_HTT_EXT_STATS_PDEV_TX_SCHED = 4,
+ ATH12K_DBG_HTT_EXT_STATS_PDEV_ERROR = 5,
+ ATH12K_DBG_HTT_EXT_STATS_PDEV_TQM = 6,
+
+ /* keep this last */
+ ATH12K_DBG_HTT_NUM_EXT_STATS,
+};
+
+enum ath12k_dbg_htt_tlv_tag {
+ HTT_STATS_TX_PDEV_CMN_TAG = 0,
+ HTT_STATS_TX_PDEV_UNDERRUN_TAG = 1,
+ HTT_STATS_TX_PDEV_SIFS_TAG = 2,
+ HTT_STATS_TX_PDEV_FLUSH_TAG = 3,
+ HTT_STATS_TX_TQM_GEN_MPDU_TAG = 11,
+ HTT_STATS_TX_TQM_LIST_MPDU_TAG = 12,
+ HTT_STATS_TX_TQM_LIST_MPDU_CNT_TAG = 13,
+ HTT_STATS_TX_TQM_CMN_TAG = 14,
+ HTT_STATS_TX_TQM_PDEV_TAG = 15,
+ HTT_STATS_TX_PDEV_SCHEDULER_TXQ_STATS_TAG = 36,
+ HTT_STATS_TX_SCHED_CMN_TAG = 37,
+ HTT_STATS_SCHED_TXQ_CMD_POSTED_TAG = 39,
+ HTT_STATS_TX_TQM_ERROR_STATS_TAG = 43,
+ HTT_STATS_SCHED_TXQ_CMD_REAPED_TAG = 44,
+ HTT_STATS_HW_INTR_MISC_TAG = 54,
+ HTT_STATS_HW_PDEV_ERRS_TAG = 56,
+ HTT_STATS_WHAL_TX_TAG = 66,
+ HTT_STATS_TX_PDEV_SIFS_HIST_TAG = 67,
+ HTT_STATS_SCHED_TXQ_SCHED_ORDER_SU_TAG = 86,
+ HTT_STATS_SCHED_TXQ_SCHED_INELIGIBILITY_TAG = 87,
+ HTT_STATS_HW_WAR_TAG = 89,
+ HTT_STATS_SCHED_TXQ_SUPERCYCLE_TRIGGER_TAG = 100,
+ HTT_STATS_PDEV_CTRL_PATH_TX_STATS_TAG = 102,
+ HTT_STATS_MU_PPDU_DIST_TAG = 129,
+
+ HTT_STATS_MAX_TAG,
+};
+
+#define ATH12K_HTT_STATS_MAC_ID GENMASK(7, 0)
+
+#define ATH12K_HTT_TX_PDEV_MAX_SIFS_BURST_STATS 9
+#define ATH12K_HTT_TX_PDEV_MAX_FLUSH_REASON_STATS 150
+
+/* MU MIMO distribution stats is a 2-dimensional array
+ * with dimension one denoting stats for nr4[0] or nr8[1]
+ */
+#define ATH12K_HTT_STATS_NUM_NR_BINS 2
+#define ATH12K_HTT_STATS_MAX_NUM_MU_PPDU_PER_BURST 10
+#define ATH12K_HTT_TX_PDEV_MAX_SIFS_BURST_HIST_STATS 10
+#define ATH12K_HTT_STATS_MAX_NUM_SCHED_STATUS 9
+#define ATH12K_HTT_STATS_NUM_SCHED_STATUS_WORDS \
+ (ATH12K_HTT_STATS_NUM_NR_BINS * ATH12K_HTT_STATS_MAX_NUM_SCHED_STATUS)
+#define ATH12K_HTT_STATS_MU_PPDU_PER_BURST_WORDS \
+ (ATH12K_HTT_STATS_NUM_NR_BINS * ATH12K_HTT_STATS_MAX_NUM_MU_PPDU_PER_BURST)
+
+enum ath12k_htt_tx_pdev_underrun_enum {
+ HTT_STATS_TX_PDEV_NO_DATA_UNDERRUN = 0,
+ HTT_STATS_TX_PDEV_DATA_UNDERRUN_BETWEEN_MPDU = 1,
+ HTT_STATS_TX_PDEV_DATA_UNDERRUN_WITHIN_MPDU = 2,
+ HTT_TX_PDEV_MAX_URRN_STATS = 3,
+};
+
+enum ath12k_htt_stats_reset_cfg_param_alloc_pos {
+ ATH12K_HTT_STATS_RESET_PARAM_CFG_32_BYTES = 1,
+ ATH12K_HTT_STATS_RESET_PARAM_CFG_64_BYTES,
+ ATH12K_HTT_STATS_RESET_PARAM_CFG_128_BYTES,
+};
+
+struct debug_htt_stats_req {
+ bool done;
+ bool override_cfg_param;
+ u8 pdev_id;
+ enum ath12k_dbg_htt_ext_stats_type type;
+ u32 cfg_param[4];
+ u8 peer_addr[ETH_ALEN];
+ struct completion htt_stats_rcvd;
+ u32 buf_len;
+ u8 buf[];
+};
+
+struct ath12k_htt_tx_pdev_stats_cmn_tlv {
+ __le32 mac_id__word;
+ __le32 hw_queued;
+ __le32 hw_reaped;
+ __le32 underrun;
+ __le32 hw_paused;
+ __le32 hw_flush;
+ __le32 hw_filt;
+ __le32 tx_abort;
+ __le32 mpdu_requed;
+ __le32 tx_xretry;
+ __le32 data_rc;
+ __le32 mpdu_dropped_xretry;
+ __le32 illgl_rate_phy_err;
+ __le32 cont_xretry;
+ __le32 tx_timeout;
+ __le32 pdev_resets;
+ __le32 phy_underrun;
+ __le32 txop_ovf;
+ __le32 seq_posted;
+ __le32 seq_failed_queueing;
+ __le32 seq_completed;
+ __le32 seq_restarted;
+ __le32 mu_seq_posted;
+ __le32 seq_switch_hw_paused;
+ __le32 next_seq_posted_dsr;
+ __le32 seq_posted_isr;
+ __le32 seq_ctrl_cached;
+ __le32 mpdu_count_tqm;
+ __le32 msdu_count_tqm;
+ __le32 mpdu_removed_tqm;
+ __le32 msdu_removed_tqm;
+ __le32 mpdus_sw_flush;
+ __le32 mpdus_hw_filter;
+ __le32 mpdus_truncated;
+ __le32 mpdus_ack_failed;
+ __le32 mpdus_expired;
+ __le32 mpdus_seq_hw_retry;
+ __le32 ack_tlv_proc;
+ __le32 coex_abort_mpdu_cnt_valid;
+ __le32 coex_abort_mpdu_cnt;
+ __le32 num_total_ppdus_tried_ota;
+ __le32 num_data_ppdus_tried_ota;
+ __le32 local_ctrl_mgmt_enqued;
+ __le32 local_ctrl_mgmt_freed;
+ __le32 local_data_enqued;
+ __le32 local_data_freed;
+ __le32 mpdu_tried;
+ __le32 isr_wait_seq_posted;
+
+ __le32 tx_active_dur_us_low;
+ __le32 tx_active_dur_us_high;
+ __le32 remove_mpdus_max_retries;
+ __le32 comp_delivered;
+ __le32 ppdu_ok;
+ __le32 self_triggers;
+ __le32 tx_time_dur_data;
+ __le32 seq_qdepth_repost_stop;
+ __le32 mu_seq_min_msdu_repost_stop;
+ __le32 seq_min_msdu_repost_stop;
+ __le32 seq_txop_repost_stop;
+ __le32 next_seq_cancel;
+ __le32 fes_offsets_err_cnt;
+ __le32 num_mu_peer_blacklisted;
+ __le32 mu_ofdma_seq_posted;
+ __le32 ul_mumimo_seq_posted;
+ __le32 ul_ofdma_seq_posted;
+
+ __le32 thermal_suspend_cnt;
+ __le32 dfs_suspend_cnt;
+ __le32 tx_abort_suspend_cnt;
+ __le32 tgt_specific_opaque_txq_suspend_info;
+ __le32 last_suspend_reason;
+} __packed;
+
+struct ath12k_htt_tx_pdev_stats_urrn_tlv {
+ DECLARE_FLEX_ARRAY(__le32, urrn_stats);
+} __packed;
+
+struct ath12k_htt_tx_pdev_stats_flush_tlv {
+ DECLARE_FLEX_ARRAY(__le32, flush_errs);
+} __packed;
+
+struct ath12k_htt_tx_pdev_stats_phy_err_tlv {
+ DECLARE_FLEX_ARRAY(__le32, phy_errs);
+} __packed;
+
+struct ath12k_htt_tx_pdev_stats_sifs_tlv {
+ DECLARE_FLEX_ARRAY(__le32, sifs_status);
+} __packed;
+
+struct ath12k_htt_pdev_ctrl_path_tx_stats_tlv {
+ __le32 fw_tx_mgmt_subtype[ATH12K_HTT_STATS_SUBTYPE_MAX];
+} __packed;
+
+struct ath12k_htt_tx_pdev_stats_sifs_hist_tlv {
+ DECLARE_FLEX_ARRAY(__le32, sifs_hist_status);
+} __packed;
+
+enum ath12k_htt_stats_hw_mode {
+ ATH12K_HTT_STATS_HWMODE_AC = 0,
+ ATH12K_HTT_STATS_HWMODE_AX = 1,
+ ATH12K_HTT_STATS_HWMODE_BE = 2,
+};
+
+struct ath12k_htt_tx_pdev_mu_ppdu_dist_stats_tlv {
+ __le32 hw_mode;
+ __le32 num_seq_term_status[ATH12K_HTT_STATS_NUM_SCHED_STATUS_WORDS];
+ __le32 num_ppdu_cmpl_per_burst[ATH12K_HTT_STATS_MU_PPDU_PER_BURST_WORDS];
+ __le32 num_seq_posted[ATH12K_HTT_STATS_NUM_NR_BINS];
+ __le32 num_ppdu_posted_per_burst[ATH12K_HTT_STATS_MU_PPDU_PER_BURST_WORDS];
+} __packed;
+
+#define ATH12K_HTT_TX_PDEV_STATS_SCHED_PER_TXQ_MAC_ID GENMASK(7, 0)
+#define ATH12K_HTT_TX_PDEV_STATS_SCHED_PER_TXQ_ID GENMASK(15, 8)
+
+#define ATH12K_HTT_TX_PDEV_NUM_SCHED_ORDER_LOG 20
+
+struct ath12k_htt_stats_tx_sched_cmn_tlv {
+ __le32 mac_id__word;
+ __le32 current_timestamp;
+} __packed;
+
+struct ath12k_htt_tx_pdev_stats_sched_per_txq_tlv {
+ __le32 mac_id__word;
+ __le32 sched_policy;
+ __le32 last_sched_cmd_posted_timestamp;
+ __le32 last_sched_cmd_compl_timestamp;
+ __le32 sched_2_tac_lwm_count;
+ __le32 sched_2_tac_ring_full;
+ __le32 sched_cmd_post_failure;
+ __le32 num_active_tids;
+ __le32 num_ps_schedules;
+ __le32 sched_cmds_pending;
+ __le32 num_tid_register;
+ __le32 num_tid_unregister;
+ __le32 num_qstats_queried;
+ __le32 qstats_update_pending;
+ __le32 last_qstats_query_timestamp;
+ __le32 num_tqm_cmdq_full;
+ __le32 num_de_sched_algo_trigger;
+ __le32 num_rt_sched_algo_trigger;
+ __le32 num_tqm_sched_algo_trigger;
+ __le32 notify_sched;
+ __le32 dur_based_sendn_term;
+ __le32 su_notify2_sched;
+ __le32 su_optimal_queued_msdus_sched;
+ __le32 su_delay_timeout_sched;
+ __le32 su_min_txtime_sched_delay;
+ __le32 su_no_delay;
+ __le32 num_supercycles;
+ __le32 num_subcycles_with_sort;
+ __le32 num_subcycles_no_sort;
+} __packed;
+
+struct ath12k_htt_sched_txq_cmd_posted_tlv {
+ DECLARE_FLEX_ARRAY(__le32, sched_cmd_posted);
+} __packed;
+
+struct ath12k_htt_sched_txq_cmd_reaped_tlv {
+ DECLARE_FLEX_ARRAY(__le32, sched_cmd_reaped);
+} __packed;
+
+struct ath12k_htt_sched_txq_sched_order_su_tlv {
+ DECLARE_FLEX_ARRAY(__le32, sched_order_su);
+} __packed;
+
+struct ath12k_htt_sched_txq_sched_ineligibility_tlv {
+ DECLARE_FLEX_ARRAY(__le32, sched_ineligibility);
+} __packed;
+
+enum ath12k_htt_sched_txq_supercycle_triggers_tlv_enum {
+ ATH12K_HTT_SCHED_SUPERCYCLE_TRIGGER_NONE = 0,
+ ATH12K_HTT_SCHED_SUPERCYCLE_TRIGGER_FORCED,
+ ATH12K_HTT_SCHED_SUPERCYCLE_TRIGGER_LESS_NUM_TIDQ_ENTRIES,
+ ATH12K_HTT_SCHED_SUPERCYCLE_TRIGGER_LESS_NUM_ACTIVE_TIDS,
+ ATH12K_HTT_SCHED_SUPERCYCLE_TRIGGER_MAX_ITR_REACHED,
+ ATH12K_HTT_SCHED_SUPERCYCLE_TRIGGER_DUR_THRESHOLD_REACHED,
+ ATH12K_HTT_SCHED_SUPERCYCLE_TRIGGER_TWT_TRIGGER,
+ ATH12K_HTT_SCHED_SUPERCYCLE_TRIGGER_MAX,
+};
+
+struct ath12k_htt_sched_txq_supercycle_triggers_tlv {
+ DECLARE_FLEX_ARRAY(__le32, supercycle_triggers);
+} __packed;
+
+struct ath12k_htt_hw_stats_pdev_errs_tlv {
+ __le32 mac_id__word;
+ __le32 tx_abort;
+ __le32 tx_abort_fail_count;
+ __le32 rx_abort;
+ __le32 rx_abort_fail_count;
+ __le32 warm_reset;
+ __le32 cold_reset;
+ __le32 tx_flush;
+ __le32 tx_glb_reset;
+ __le32 tx_txq_reset;
+ __le32 rx_timeout_reset;
+ __le32 mac_cold_reset_restore_cal;
+ __le32 mac_cold_reset;
+ __le32 mac_warm_reset;
+ __le32 mac_only_reset;
+ __le32 phy_warm_reset;
+ __le32 phy_warm_reset_ucode_trig;
+ __le32 mac_warm_reset_restore_cal;
+ __le32 mac_sfm_reset;
+ __le32 phy_warm_reset_m3_ssr;
+ __le32 phy_warm_reset_reason_phy_m3;
+ __le32 phy_warm_reset_reason_tx_hw_stuck;
+ __le32 phy_warm_reset_reason_num_rx_frame_stuck;
+ __le32 phy_warm_reset_reason_wal_rx_rec_rx_busy;
+ __le32 phy_warm_reset_reason_wal_rx_rec_mac_hng;
+ __le32 phy_warm_reset_reason_mac_conv_phy_reset;
+ __le32 wal_rx_recovery_rst_mac_hang_cnt;
+ __le32 wal_rx_recovery_rst_known_sig_cnt;
+ __le32 wal_rx_recovery_rst_no_rx_cnt;
+ __le32 wal_rx_recovery_rst_no_rx_consec_cnt;
+ __le32 wal_rx_recovery_rst_rx_busy_cnt;
+ __le32 wal_rx_recovery_rst_phy_mac_hang_cnt;
+ __le32 rx_flush_cnt;
+ __le32 phy_warm_reset_reason_tx_exp_cca_stuck;
+ __le32 phy_warm_reset_reason_tx_consec_flsh_war;
+ __le32 phy_warm_reset_reason_tx_hwsch_reset_war;
+ __le32 phy_warm_reset_reason_hwsch_cca_wdog_war;
+ __le32 fw_rx_rings_reset;
+ __le32 rx_dest_drain_rx_descs_leak_prevented;
+ __le32 rx_dest_drain_rx_descs_saved_cnt;
+ __le32 rx_dest_drain_rxdma2reo_leak_detected;
+ __le32 rx_dest_drain_rxdma2fw_leak_detected;
+ __le32 rx_dest_drain_rxdma2wbm_leak_detected;
+ __le32 rx_dest_drain_rxdma1_2sw_leak_detected;
+ __le32 rx_dest_drain_rx_drain_ok_mac_idle;
+ __le32 rx_dest_drain_ok_mac_not_idle;
+ __le32 rx_dest_drain_prerequisite_invld;
+ __le32 rx_dest_drain_skip_non_lmac_reset;
+ __le32 rx_dest_drain_hw_fifo_notempty_post_wait;
+} __packed;
+
+#define ATH12K_HTT_STATS_MAX_HW_INTR_NAME_LEN 8
+struct ath12k_htt_hw_stats_intr_misc_tlv {
+ u8 hw_intr_name[ATH12K_HTT_STATS_MAX_HW_INTR_NAME_LEN];
+ __le32 mask;
+ __le32 count;
+} __packed;
+
+struct ath12k_htt_hw_stats_whal_tx_tlv {
+ __le32 mac_id__word;
+ __le32 last_unpause_ppdu_id;
+ __le32 hwsch_unpause_wait_tqm_write;
+ __le32 hwsch_dummy_tlv_skipped;
+ __le32 hwsch_misaligned_offset_received;
+ __le32 hwsch_reset_count;
+ __le32 hwsch_dev_reset_war;
+ __le32 hwsch_delayed_pause;
+ __le32 hwsch_long_delayed_pause;
+ __le32 sch_rx_ppdu_no_response;
+ __le32 sch_selfgen_response;
+ __le32 sch_rx_sifs_resp_trigger;
+} __packed;
+
+struct ath12k_htt_hw_war_stats_tlv {
+ __le32 mac_id__word;
+ DECLARE_FLEX_ARRAY(__le32, hw_wars);
+} __packed;
+
+struct ath12k_htt_tx_tqm_cmn_stats_tlv {
+ __le32 mac_id__word;
+ __le32 max_cmdq_id;
+ __le32 list_mpdu_cnt_hist_intvl;
+ __le32 add_msdu;
+ __le32 q_empty;
+ __le32 q_not_empty;
+ __le32 drop_notification;
+ __le32 desc_threshold;
+ __le32 hwsch_tqm_invalid_status;
+ __le32 missed_tqm_gen_mpdus;
+ __le32 tqm_active_tids;
+ __le32 tqm_inactive_tids;
+ __le32 tqm_active_msduq_flows;
+ __le32 msduq_timestamp_updates;
+ __le32 msduq_updates_mpdu_head_info_cmd;
+ __le32 msduq_updates_emp_to_nonemp_status;
+ __le32 get_mpdu_head_info_cmds_by_query;
+ __le32 get_mpdu_head_info_cmds_by_tac;
+ __le32 gen_mpdu_cmds_by_query;
+ __le32 high_prio_q_not_empty;
+} __packed;
+
+struct ath12k_htt_tx_tqm_error_stats_tlv {
+ __le32 q_empty_failure;
+ __le32 q_not_empty_failure;
+ __le32 add_msdu_failure;
+ __le32 tqm_cache_ctl_err;
+ __le32 tqm_soft_reset;
+ __le32 tqm_reset_num_in_use_link_descs;
+ __le32 tqm_reset_num_lost_link_descs;
+ __le32 tqm_reset_num_lost_host_tx_buf_cnt;
+ __le32 tqm_reset_num_in_use_internal_tqm;
+ __le32 tqm_reset_num_in_use_idle_link_rng;
+ __le32 tqm_reset_time_to_tqm_hang_delta_ms;
+ __le32 tqm_reset_recovery_time_ms;
+ __le32 tqm_reset_num_peers_hdl;
+ __le32 tqm_reset_cumm_dirty_hw_mpduq_cnt;
+ __le32 tqm_reset_cumm_dirty_hw_msduq_proc;
+ __le32 tqm_reset_flush_cache_cmd_su_cnt;
+ __le32 tqm_reset_flush_cache_cmd_other_cnt;
+ __le32 tqm_reset_flush_cache_cmd_trig_type;
+ __le32 tqm_reset_flush_cache_cmd_trig_cfg;
+ __le32 tqm_reset_flush_cmd_skp_status_null;
+} __packed;
+
+struct ath12k_htt_tx_tqm_gen_mpdu_stats_tlv {
+ DECLARE_FLEX_ARRAY(__le32, gen_mpdu_end_reason);
+} __packed;
+
+#define ATH12K_HTT_TX_TQM_MAX_LIST_MPDU_END_REASON 16
+#define ATH12K_HTT_TX_TQM_MAX_LIST_MPDU_CNT_HISTOGRAM_BINS 16
+
+struct ath12k_htt_tx_tqm_list_mpdu_stats_tlv {
+ DECLARE_FLEX_ARRAY(__le32, list_mpdu_end_reason);
+} __packed;
+
+struct ath12k_htt_tx_tqm_list_mpdu_cnt_tlv {
+ DECLARE_FLEX_ARRAY(__le32, list_mpdu_cnt_hist);
+} __packed;
+
+struct ath12k_htt_tx_tqm_pdev_stats_tlv {
+ __le32 msdu_count;
+ __le32 mpdu_count;
+ __le32 remove_msdu;
+ __le32 remove_mpdu;
+ __le32 remove_msdu_ttl;
+ __le32 send_bar;
+ __le32 bar_sync;
+ __le32 notify_mpdu;
+ __le32 sync_cmd;
+ __le32 write_cmd;
+ __le32 hwsch_trigger;
+ __le32 ack_tlv_proc;
+ __le32 gen_mpdu_cmd;
+ __le32 gen_list_cmd;
+ __le32 remove_mpdu_cmd;
+ __le32 remove_mpdu_tried_cmd;
+ __le32 mpdu_queue_stats_cmd;
+ __le32 mpdu_head_info_cmd;
+ __le32 msdu_flow_stats_cmd;
+ __le32 remove_msdu_cmd;
+ __le32 remove_msdu_ttl_cmd;
+ __le32 flush_cache_cmd;
+ __le32 update_mpduq_cmd;
+ __le32 enqueue;
+ __le32 enqueue_notify;
+ __le32 notify_mpdu_at_head;
+ __le32 notify_mpdu_state_valid;
+ __le32 sched_udp_notify1;
+ __le32 sched_udp_notify2;
+ __le32 sched_nonudp_notify1;
+ __le32 sched_nonudp_notify2;
+} __packed;
+
+#endif
diff --git a/drivers/net/wireless/ath/ath12k/dp.c b/drivers/net/wireless/ath/ath12k/dp.c
index 7843c76a82c1..61aa78d8bd8c 100644
--- a/drivers/net/wireless/ath/ath12k/dp.c
+++ b/drivers/net/wireless/ath/ath12k/dp.c
@@ -132,7 +132,9 @@ static int ath12k_dp_srng_find_ring_in_mask(int ring_num, const u8 *grp_mask)
static int ath12k_dp_srng_calculate_msi_group(struct ath12k_base *ab,
enum hal_ring_type type, int ring_num)
{
+ const struct ath12k_hal_tcl_to_wbm_rbm_map *map;
const u8 *grp_mask;
+ int i;
switch (type) {
case HAL_WBM2SW_RELEASE:
@@ -140,6 +142,14 @@ static int ath12k_dp_srng_calculate_msi_group(struct ath12k_base *ab,
grp_mask = &ab->hw_params->ring_mask->rx_wbm_rel[0];
ring_num = 0;
} else {
+ map = ab->hw_params->hal_ops->tcl_to_wbm_rbm_map;
+ for (i = 0; i < ab->hw_params->max_tx_ring; i++) {
+ if (ring_num == map[i].wbm_ring_num) {
+ ring_num = i;
+ break;
+ }
+ }
+
grp_mask = &ab->hw_params->ring_mask->tx[0];
}
break;
@@ -457,8 +467,6 @@ static void ath12k_dp_srng_common_cleanup(struct ath12k_base *ab)
ath12k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_comp_ring);
ath12k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_data_ring);
}
- ath12k_dp_srng_cleanup(ab, &dp->tcl_status_ring);
- ath12k_dp_srng_cleanup(ab, &dp->tcl_cmd_ring);
ath12k_dp_srng_cleanup(ab, &dp->wbm_desc_rel_ring);
}
@@ -479,20 +487,6 @@ static int ath12k_dp_srng_common_setup(struct ath12k_base *ab)
goto err;
}
- ret = ath12k_dp_srng_setup(ab, &dp->tcl_cmd_ring, HAL_TCL_CMD, 0, 0,
- DP_TCL_CMD_RING_SIZE);
- if (ret) {
- ath12k_warn(ab, "failed to set up tcl_cmd ring :%d\n", ret);
- goto err;
- }
-
- ret = ath12k_dp_srng_setup(ab, &dp->tcl_status_ring, HAL_TCL_STATUS,
- 0, 0, DP_TCL_STATUS_RING_SIZE);
- if (ret) {
- ath12k_warn(ab, "failed to set up tcl_status ring :%d\n", ret);
- goto err;
- }
-
for (i = 0; i < ab->hw_params->max_tx_ring; i++) {
map = ab->hw_params->hal_ops->tcl_to_wbm_rbm_map;
tx_comp_ring_num = map[i].wbm_ring_num;
@@ -616,6 +610,7 @@ static int ath12k_dp_scatter_idle_link_desc_setup(struct ath12k_base *ab,
int i;
int ret = 0;
u32 end_offset, cookie;
+ enum hal_rx_buf_return_buf_manager rbm = dp->idle_link_rbm;
n_entries_per_buf = HAL_WBM_IDLE_SCATTER_BUF_SIZE /
ath12k_hal_srng_get_entrysize(ab, HAL_WBM_IDLE_LINK);
@@ -646,7 +641,8 @@ static int ath12k_dp_scatter_idle_link_desc_setup(struct ath12k_base *ab,
paddr = link_desc_banks[i].paddr;
while (n_entries) {
cookie = DP_LINK_DESC_COOKIE_SET(n_entries, i);
- ath12k_hal_set_link_desc_addr(scatter_buf, cookie, paddr);
+ ath12k_hal_set_link_desc_addr(scatter_buf, cookie,
+ paddr, rbm);
n_entries--;
paddr += HAL_LINK_DESC_SIZE;
if (rem_entries) {
@@ -790,6 +786,7 @@ int ath12k_dp_link_desc_setup(struct ath12k_base *ab,
u32 paddr;
int i, ret;
u32 cookie;
+ enum hal_rx_buf_return_buf_manager rbm = ab->dp.idle_link_rbm;
tot_mem_sz = n_link_desc * HAL_LINK_DESC_SIZE;
tot_mem_sz += HAL_LINK_DESC_ALIGN;
@@ -850,8 +847,7 @@ int ath12k_dp_link_desc_setup(struct ath12k_base *ab,
while (n_entries &&
(desc = ath12k_hal_srng_src_get_next_entry(ab, srng))) {
cookie = DP_LINK_DESC_COOKIE_SET(n_entries, i);
- ath12k_hal_set_link_desc_addr(desc,
- cookie, paddr);
+ ath12k_hal_set_link_desc_addr(desc, cookie, paddr, rbm);
n_entries--;
paddr += HAL_LINK_DESC_SIZE;
}
@@ -881,11 +877,9 @@ int ath12k_dp_service_srng(struct ath12k_base *ab,
enum dp_monitor_mode monitor_mode;
u8 ring_mask;
- while (i < ab->hw_params->max_tx_ring) {
- if (ab->hw_params->ring_mask->tx[grp_id] &
- BIT(ab->hw_params->hal_ops->tcl_to_wbm_rbm_map[i].wbm_ring_num))
- ath12k_dp_tx_completion_handler(ab, i);
- i++;
+ if (ab->hw_params->ring_mask->tx[grp_id]) {
+ i = fls(ab->hw_params->ring_mask->tx[grp_id]) - 1;
+ ath12k_dp_tx_completion_handler(ab, i);
}
if (ab->hw_params->ring_mask->rx_err[grp_id]) {
@@ -921,8 +915,8 @@ int ath12k_dp_service_srng(struct ath12k_base *ab,
monitor_mode = ATH12K_DP_RX_MONITOR_MODE;
ring_mask = ab->hw_params->ring_mask->rx_mon_dest[grp_id];
for (i = 0; i < ab->num_radios; i++) {
- for (j = 0; j < ab->hw_params->num_rxmda_per_pdev; j++) {
- int id = i * ab->hw_params->num_rxmda_per_pdev + j;
+ for (j = 0; j < ab->hw_params->num_rxdma_per_pdev; j++) {
+ int id = i * ab->hw_params->num_rxdma_per_pdev + j;
if (ring_mask & BIT(id)) {
work_done =
@@ -942,8 +936,8 @@ int ath12k_dp_service_srng(struct ath12k_base *ab,
monitor_mode = ATH12K_DP_TX_MONITOR_MODE;
ring_mask = ab->hw_params->ring_mask->tx_mon_dest[grp_id];
for (i = 0; i < ab->num_radios; i++) {
- for (j = 0; j < ab->hw_params->num_rxmda_per_pdev; j++) {
- int id = i * ab->hw_params->num_rxmda_per_pdev + j;
+ for (j = 0; j < ab->hw_params->num_rxdma_per_pdev; j++) {
+ int id = i * ab->hw_params->num_rxdma_per_pdev + j;
if (ring_mask & BIT(id)) {
work_done =
@@ -1031,7 +1025,7 @@ static void ath12k_dp_service_mon_ring(struct timer_list *t)
struct ath12k_base *ab = from_timer(ab, t, mon_reap_timer);
int i;
- for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++)
+ for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++)
ath12k_dp_mon_process_ring(ab, i, NULL, DP_MON_SERVICE_BUDGET,
ATH12K_DP_RX_MONITOR_MODE);
@@ -1355,13 +1349,14 @@ static inline void *ath12k_dp_cc_get_desc_addr_ptr(struct ath12k_base *ab,
struct ath12k_rx_desc_info *ath12k_dp_get_rx_desc(struct ath12k_base *ab,
u32 cookie)
{
+ struct ath12k_dp *dp = &ab->dp;
struct ath12k_rx_desc_info **desc_addr_ptr;
u16 start_ppt_idx, end_ppt_idx, ppt_idx, spt_idx;
ppt_idx = u32_get_bits(cookie, ATH12K_DP_CC_COOKIE_PPT);
spt_idx = u32_get_bits(cookie, ATH12K_DP_CC_COOKIE_SPT);
- start_ppt_idx = ATH12K_RX_SPT_PAGE_OFFSET;
+ start_ppt_idx = dp->rx_ppt_base + ATH12K_RX_SPT_PAGE_OFFSET;
end_ppt_idx = start_ppt_idx + ATH12K_NUM_RX_SPT_PAGES;
if (ppt_idx < start_ppt_idx ||
@@ -1369,6 +1364,7 @@ struct ath12k_rx_desc_info *ath12k_dp_get_rx_desc(struct ath12k_base *ab,
spt_idx > ATH12K_MAX_SPT_ENTRIES)
return NULL;
+ ppt_idx = ppt_idx - dp->rx_ppt_base;
desc_addr_ptr = ath12k_dp_cc_get_desc_addr_ptr(ab, ppt_idx, spt_idx);
return *desc_addr_ptr;
@@ -1403,7 +1399,7 @@ static int ath12k_dp_cc_desc_init(struct ath12k_base *ab)
struct ath12k_rx_desc_info *rx_descs, **rx_desc_addr;
struct ath12k_tx_desc_info *tx_descs, **tx_desc_addr;
u32 i, j, pool_id, tx_spt_page;
- u32 ppt_idx;
+ u32 ppt_idx, cookie_ppt_idx;
spin_lock_bh(&dp->rx_desc_lock);
@@ -1418,10 +1414,11 @@ static int ath12k_dp_cc_desc_init(struct ath12k_base *ab)
}
ppt_idx = ATH12K_RX_SPT_PAGE_OFFSET + i;
+ cookie_ppt_idx = dp->rx_ppt_base + ppt_idx;
dp->spt_info->rxbaddr[i] = &rx_descs[0];
for (j = 0; j < ATH12K_MAX_SPT_ENTRIES; j++) {
- rx_descs[j].cookie = ath12k_dp_cc_cookie_gen(ppt_idx, j);
+ rx_descs[j].cookie = ath12k_dp_cc_cookie_gen(cookie_ppt_idx, j);
rx_descs[j].magic = ATH12K_DP_RX_DESC_MAGIC;
list_add_tail(&rx_descs[j].list, &dp->rx_desc_free_list);
@@ -1482,6 +1479,7 @@ static int ath12k_dp_cmem_init(struct ath12k_base *ab,
end = start + ATH12K_NUM_TX_SPT_PAGES;
break;
case ATH12K_DP_RX_DESC:
+ cmem_base += ATH12K_PPT_ADDR_OFFSET(dp->rx_ppt_base);
start = ATH12K_RX_SPT_PAGE_OFFSET;
end = start + ATH12K_NUM_RX_SPT_PAGES;
break;
@@ -1524,6 +1522,8 @@ static int ath12k_dp_cc_init(struct ath12k_base *ab)
return -ENOMEM;
}
+ dp->rx_ppt_base = ab->device_id * ATH12K_NUM_RX_SPT_PAGES;
+
for (i = 0; i < dp->num_spt_pages; i++) {
dp->spt_info[i].vaddr = dma_alloc_coherent(ab->dev,
ATH12K_PAGE_SIZE,
@@ -1587,6 +1587,24 @@ static int ath12k_dp_reoq_lut_setup(struct ath12k_base *ab)
return 0;
}
+static enum hal_rx_buf_return_buf_manager
+ath12k_dp_get_idle_link_rbm(struct ath12k_base *ab)
+{
+ switch (ab->device_id) {
+ case 0:
+ return HAL_RX_BUF_RBM_WBM_DEV0_IDLE_DESC_LIST;
+ case 1:
+ return HAL_RX_BUF_RBM_WBM_DEV1_IDLE_DESC_LIST;
+ case 2:
+ return HAL_RX_BUF_RBM_WBM_DEV2_IDLE_DESC_LIST;
+ default:
+ ath12k_warn(ab, "invalid %d device id, so choose default rbm\n",
+ ab->device_id);
+ WARN_ON(1);
+ return HAL_RX_BUF_RBM_WBM_DEV0_IDLE_DESC_LIST;
+ }
+}
+
int ath12k_dp_alloc(struct ath12k_base *ab)
{
struct ath12k_dp *dp = &ab->dp;
@@ -1603,6 +1621,7 @@ int ath12k_dp_alloc(struct ath12k_base *ab)
spin_lock_init(&dp->reo_cmd_lock);
dp->reo_cmd_cache_flush_count = 0;
+ dp->idle_link_rbm = ath12k_dp_get_idle_link_rbm(ab);
ret = ath12k_wbm_idle_ring_setup(ab, &n_link_desc);
if (ret) {
diff --git a/drivers/net/wireless/ath/ath12k/dp.h b/drivers/net/wireless/ath/ath12k/dp.h
index 5cf0d21ef184..b77497c14ac4 100644
--- a/drivers/net/wireless/ath/ath12k/dp.h
+++ b/drivers/net/wireless/ath/ath12k/dp.h
@@ -325,15 +325,15 @@ struct ath12k_dp {
u8 htt_tgt_ver_major;
u8 htt_tgt_ver_minor;
struct dp_link_desc_bank link_desc_banks[DP_LINK_DESC_BANKS_MAX];
+ enum hal_rx_buf_return_buf_manager idle_link_rbm;
struct dp_srng wbm_idle_ring;
struct dp_srng wbm_desc_rel_ring;
- struct dp_srng tcl_cmd_ring;
- struct dp_srng tcl_status_ring;
struct dp_srng reo_reinject_ring;
struct dp_srng rx_rel_ring;
struct dp_srng reo_except_ring;
struct dp_srng reo_cmd_ring;
struct dp_srng reo_status_ring;
+ enum ath12k_peer_metadata_version peer_metadata_ver;
struct dp_srng reo_dst_ring[DP_REO_DST_RING_MAX];
struct dp_tx_ring tx_ring[DP_TCL_NUM_RING_MAX];
struct hal_wbm_idle_scatter_list scatter_list[DP_IDLE_SCATTER_BUFS_MAX];
@@ -351,6 +351,7 @@ struct ath12k_dp {
struct ath12k_hp_update_timer tx_ring_timer[DP_TCL_NUM_RING_MAX];
struct ath12k_spt_info *spt_info;
u32 num_spt_pages;
+ u32 rx_ppt_base;
struct list_head rx_desc_free_list;
/* protects the free desc list */
spinlock_t rx_desc_lock;
diff --git a/drivers/net/wireless/ath/ath12k/dp_mon.c b/drivers/net/wireless/ath/ath12k/dp_mon.c
index 6b0b72477540..5c6749bc4039 100644
--- a/drivers/net/wireless/ath/ath12k/dp_mon.c
+++ b/drivers/net/wireless/ath/ath12k/dp_mon.c
@@ -1903,43 +1903,6 @@ ath12k_dp_mon_tx_parse_status_tlv(struct ath12k_base *ab,
break;
}
- case HAL_MON_BUF_ADDR: {
- struct dp_rxdma_mon_ring *buf_ring = &ab->dp.tx_mon_buf_ring;
- struct dp_mon_packet_info *packet_info =
- (struct dp_mon_packet_info *)tlv_data;
- int buf_id = u32_get_bits(packet_info->cookie,
- DP_RXDMA_BUF_COOKIE_BUF_ID);
- struct sk_buff *msdu;
- struct dp_mon_mpdu *mon_mpdu = tx_ppdu_info->tx_mon_mpdu;
- struct ath12k_skb_rxcb *rxcb;
-
- spin_lock_bh(&buf_ring->idr_lock);
- msdu = idr_remove(&buf_ring->bufs_idr, buf_id);
- spin_unlock_bh(&buf_ring->idr_lock);
-
- if (unlikely(!msdu)) {
- ath12k_warn(ab, "monitor destination with invalid buf_id %d\n",
- buf_id);
- return DP_MON_TX_STATUS_PPDU_NOT_DONE;
- }
-
- rxcb = ATH12K_SKB_RXCB(msdu);
- dma_unmap_single(ab->dev, rxcb->paddr,
- msdu->len + skb_tailroom(msdu),
- DMA_FROM_DEVICE);
-
- if (!mon_mpdu->head)
- mon_mpdu->head = msdu;
- else if (mon_mpdu->tail)
- mon_mpdu->tail->next = msdu;
-
- mon_mpdu->tail = msdu;
-
- ath12k_dp_mon_buf_replenish(ab, buf_ring, 1);
- status = DP_MON_TX_BUFFER_ADDR;
- break;
- }
-
case HAL_TX_MPDU_END:
list_add_tail(&tx_ppdu_info->tx_mon_mpdu->list,
&tx_ppdu_info->dp_tx_mon_mpdu_list);
@@ -2088,8 +2051,7 @@ int ath12k_dp_mon_srng_process(struct ath12k *ar, int mac_id, int *budget,
mon_dst_ring = &pdev_dp->rxdma_mon_dst_ring[srng_id];
buf_ring = &dp->rxdma_mon_buf_ring;
} else {
- mon_dst_ring = &pdev_dp->tx_mon_dst_ring[srng_id];
- buf_ring = &dp->tx_mon_buf_ring;
+ return 0;
}
srng = &ab->hal.srng_list[mon_dst_ring->ring_id];
diff --git a/drivers/net/wireless/ath/ath12k/dp_rx.c b/drivers/net/wireless/ath/ath12k/dp_rx.c
index 75df622f25d8..14236d0a0c89 100644
--- a/drivers/net/wireless/ath/ath12k/dp_rx.c
+++ b/drivers/net/wireless/ath/ath12k/dp_rx.c
@@ -17,6 +17,7 @@
#include "dp_tx.h"
#include "peer.h"
#include "dp_mon.h"
+#include "debugfs_htt_stats.h"
#define ATH12K_DP_RX_FRAGMENT_TIMEOUT_MS (2 * HZ)
@@ -422,8 +423,6 @@ static int ath12k_dp_rxdma_buf_free(struct ath12k_base *ab)
ath12k_dp_rxdma_mon_buf_ring_free(ab, &dp->rxdma_mon_buf_ring);
- ath12k_dp_rxdma_mon_buf_ring_free(ab, &dp->tx_mon_buf_ring);
-
return 0;
}
@@ -476,15 +475,6 @@ static int ath12k_dp_rxdma_buf_setup(struct ath12k_base *ab)
"failed to setup HAL_RXDMA_MONITOR_BUF\n");
return ret;
}
-
- ret = ath12k_dp_rxdma_mon_ring_buf_setup(ab,
- &dp->tx_mon_buf_ring,
- HAL_TX_MONITOR_BUF);
- if (ret) {
- ath12k_warn(ab,
- "failed to setup HAL_TX_MONITOR_BUF\n");
- return ret;
- }
}
return 0;
@@ -496,10 +486,8 @@ static void ath12k_dp_rx_pdev_srng_free(struct ath12k *ar)
struct ath12k_base *ab = ar->ab;
int i;
- for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
+ for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++)
ath12k_dp_srng_cleanup(ab, &dp->rxdma_mon_dst_ring[i]);
- ath12k_dp_srng_cleanup(ab, &dp->tx_mon_dst_ring[i]);
- }
}
void ath12k_dp_rx_pdev_reo_cleanup(struct ath12k_base *ab)
@@ -543,7 +531,7 @@ static int ath12k_dp_rx_pdev_srng_alloc(struct ath12k *ar)
int ret;
u32 mac_id = dp->mac_id;
- for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
+ for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
ret = ath12k_dp_srng_setup(ar->ab,
&dp->rxdma_mon_dst_ring[i],
HAL_RXDMA_MONITOR_DST,
@@ -554,17 +542,6 @@ static int ath12k_dp_rx_pdev_srng_alloc(struct ath12k *ar)
"failed to setup HAL_RXDMA_MONITOR_DST\n");
return ret;
}
-
- ret = ath12k_dp_srng_setup(ar->ab,
- &dp->tx_mon_dst_ring[i],
- HAL_TX_MONITOR_DST,
- 0, mac_id + i,
- DP_TX_MONITOR_DEST_RING_SIZE);
- if (ret) {
- ath12k_warn(ar->ab,
- "failed to setup HAL_TX_MONITOR_DST\n");
- return ret;
- }
}
return 0;
@@ -1292,10 +1269,10 @@ static int ath12k_htt_tlv_ppdu_stats_parse(struct ath12k_base *ab,
return 0;
}
-static int ath12k_dp_htt_tlv_iter(struct ath12k_base *ab, const void *ptr, size_t len,
- int (*iter)(struct ath12k_base *ar, u16 tag, u16 len,
- const void *ptr, void *data),
- void *data)
+int ath12k_dp_htt_tlv_iter(struct ath12k_base *ab, const void *ptr, size_t len,
+ int (*iter)(struct ath12k_base *ar, u16 tag, u16 len,
+ const void *ptr, void *data),
+ void *data)
{
const struct htt_tlv *tlv;
const void *begin = ptr;
@@ -1765,6 +1742,7 @@ void ath12k_dp_htt_htc_t2h_msg_handler(struct ath12k_base *ab,
ath12k_htt_pull_ppdu_stats(ab, skb);
break;
case HTT_T2H_MSG_TYPE_EXT_STATS_CONF:
+ ath12k_debugfs_htt_ext_stats_handler(ab, skb);
break;
case HTT_T2H_MSG_TYPE_MLO_TIMESTAMP_OFFSET_IND:
ath12k_htt_mlo_offset_event_handler(ab, skb);
@@ -2383,8 +2361,10 @@ void ath12k_dp_rx_h_ppdu(struct ath12k *ar, struct hal_rx_desc *rx_desc,
channel_num = meta_data;
center_freq = meta_data >> 16;
- if (center_freq >= 5935 && center_freq <= 7105) {
+ if (center_freq >= ATH12K_MIN_6G_FREQ &&
+ center_freq <= ATH12K_MAX_6G_FREQ) {
rx_status->band = NL80211_BAND_6GHZ;
+ rx_status->freq = center_freq;
} else if (channel_num >= 1 && channel_num <= 14) {
rx_status->band = NL80211_BAND_2GHZ;
} else if (channel_num >= 36 && channel_num <= 173) {
@@ -2402,8 +2382,9 @@ void ath12k_dp_rx_h_ppdu(struct ath12k *ar, struct hal_rx_desc *rx_desc,
rx_desc, sizeof(*rx_desc));
}
- rx_status->freq = ieee80211_channel_to_frequency(channel_num,
- rx_status->band);
+ if (rx_status->band != NL80211_BAND_6GHZ)
+ rx_status->freq = ieee80211_channel_to_frequency(channel_num,
+ rx_status->band);
ath12k_dp_rx_h_rate(ar, rx_desc, rx_status);
}
@@ -2604,6 +2585,29 @@ static void ath12k_dp_rx_process_received_packets(struct ath12k_base *ab,
rcu_read_unlock();
}
+static u16 ath12k_dp_rx_get_peer_id(struct ath12k_base *ab,
+ enum ath12k_peer_metadata_version ver,
+ __le32 peer_metadata)
+{
+ switch (ver) {
+ default:
+ ath12k_warn(ab, "Unknown peer metadata version: %d", ver);
+ fallthrough;
+ case ATH12K_PEER_METADATA_V0:
+ return le32_get_bits(peer_metadata,
+ RX_MPDU_DESC_META_DATA_V0_PEER_ID);
+ case ATH12K_PEER_METADATA_V1:
+ return le32_get_bits(peer_metadata,
+ RX_MPDU_DESC_META_DATA_V1_PEER_ID);
+ case ATH12K_PEER_METADATA_V1A:
+ return le32_get_bits(peer_metadata,
+ RX_MPDU_DESC_META_DATA_V1A_PEER_ID);
+ case ATH12K_PEER_METADATA_V1B:
+ return le32_get_bits(peer_metadata,
+ RX_MPDU_DESC_META_DATA_V1B_PEER_ID);
+ }
+}
+
int ath12k_dp_rx_process(struct ath12k_base *ab, int ring_id,
struct napi_struct *napi, int budget)
{
@@ -2632,6 +2636,8 @@ try_again:
ath12k_hal_srng_access_begin(ab, srng);
while ((desc = ath12k_hal_srng_dst_get_next_entry(ab, srng))) {
+ struct rx_mpdu_desc *mpdu_info;
+ struct rx_msdu_desc *msdu_info;
enum hal_reo_dest_ring_push_reason push_reason;
u32 cookie;
@@ -2649,7 +2655,8 @@ try_again:
if (!desc_info) {
desc_info = ath12k_dp_get_rx_desc(ab, cookie);
if (!desc_info) {
- ath12k_warn(ab, "Invalid cookie in manual desc retrieval");
+ ath12k_warn(ab, "Invalid cookie in manual descriptor retrieval: 0x%x\n",
+ cookie);
continue;
}
}
@@ -2678,16 +2685,19 @@ try_again:
continue;
}
- rxcb->is_first_msdu = !!(le32_to_cpu(desc->rx_msdu_info.info0) &
+ msdu_info = &desc->rx_msdu_info;
+ mpdu_info = &desc->rx_mpdu_info;
+
+ rxcb->is_first_msdu = !!(le32_to_cpu(msdu_info->info0) &
RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU);
- rxcb->is_last_msdu = !!(le32_to_cpu(desc->rx_msdu_info.info0) &
+ rxcb->is_last_msdu = !!(le32_to_cpu(msdu_info->info0) &
RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU);
- rxcb->is_continuation = !!(le32_to_cpu(desc->rx_msdu_info.info0) &
+ rxcb->is_continuation = !!(le32_to_cpu(msdu_info->info0) &
RX_MSDU_DESC_INFO0_MSDU_CONTINUATION);
rxcb->mac_id = mac_id;
- rxcb->peer_id = le32_get_bits(desc->rx_mpdu_info.peer_meta_data,
- RX_MPDU_DESC_META_DATA_PEER_ID);
- rxcb->tid = le32_get_bits(desc->rx_mpdu_info.info0,
+ rxcb->peer_id = ath12k_dp_rx_get_peer_id(ab, dp->peer_metadata_ver,
+ mpdu_info->peer_meta_data);
+ rxcb->tid = le32_get_bits(mpdu_info->info0,
RX_MPDU_DESC_INFO0_TID);
__skb_queue_tail(&msdu_list, msdu);
@@ -2762,6 +2772,7 @@ int ath12k_dp_rx_peer_frag_setup(struct ath12k *ar, const u8 *peer_mac, int vdev
peer = ath12k_peer_find(ab, vdev_id, peer_mac);
if (!peer) {
spin_unlock_bh(&ab->base_lock);
+ crypto_free_shash(tfm);
ath12k_warn(ab, "failed to find the peer to set up fragment info\n");
return -ENOENT;
}
@@ -2991,9 +3002,10 @@ static int ath12k_dp_rx_h_defrag_reo_reinject(struct ath12k *ar,
struct hal_srng *srng;
dma_addr_t link_paddr, buf_paddr;
u32 desc_bank, msdu_info, msdu_ext_info, mpdu_info;
- u32 cookie, hal_rx_desc_sz, dest_ring_info0;
+ u32 cookie, hal_rx_desc_sz, dest_ring_info0, queue_addr_hi;
int ret;
struct ath12k_rx_desc_info *desc_info;
+ enum hal_rx_buf_return_buf_manager idle_link_rbm = dp->idle_link_rbm;
u8 dst_ind;
hal_rx_desc_sz = ab->hal.hal_desc_sz;
@@ -3027,7 +3039,7 @@ static int ath12k_dp_rx_h_defrag_reo_reinject(struct ath12k *ar,
buf_paddr = dma_map_single(ab->dev, defrag_skb->data,
defrag_skb->len + skb_tailroom(defrag_skb),
- DMA_FROM_DEVICE);
+ DMA_TO_DEVICE);
if (dma_mapping_error(ab->dev, buf_paddr))
return -ENOMEM;
@@ -3071,7 +3083,7 @@ static int ath12k_dp_rx_h_defrag_reo_reinject(struct ath12k *ar,
ath12k_hal_rx_buf_addr_info_set(&reo_ent_ring->buf_addr_info, link_paddr,
cookie,
- HAL_RX_BUF_RBM_WBM_CHIP0_IDLE_DESC_LIST);
+ idle_link_rbm);
mpdu_info = u32_encode_bits(1, RX_MPDU_DESC_INFO0_MSDU_COUNT) |
u32_encode_bits(0, RX_MPDU_DESC_INFO0_FRAG_FLAG) |
@@ -3083,13 +3095,11 @@ static int ath12k_dp_rx_h_defrag_reo_reinject(struct ath12k *ar,
reo_ent_ring->rx_mpdu_info.peer_meta_data =
reo_dest_ring->rx_mpdu_info.peer_meta_data;
- /* Firmware expects physical address to be filled in queue_addr_lo in
- * the MLO scenario and in case of non MLO peer meta data needs to be
- * filled.
- * TODO: Need to handle for MLO scenario.
- */
- reo_ent_ring->queue_addr_lo = reo_dest_ring->rx_mpdu_info.peer_meta_data;
- reo_ent_ring->info0 = le32_encode_bits(dst_ind,
+ reo_ent_ring->queue_addr_lo = cpu_to_le32(lower_32_bits(rx_tid->paddr));
+ queue_addr_hi = upper_32_bits(rx_tid->paddr);
+ reo_ent_ring->info0 = le32_encode_bits(queue_addr_hi,
+ HAL_REO_ENTR_RING_INFO0_QUEUE_ADDR_HI) |
+ le32_encode_bits(dst_ind,
HAL_REO_ENTR_RING_INFO0_DEST_IND);
reo_ent_ring->info1 = le32_encode_bits(rx_tid->cur_sn,
@@ -3113,7 +3123,7 @@ err_free_desc:
spin_unlock_bh(&dp->rx_desc_lock);
err_unmap_dma:
dma_unmap_single(ab->dev, buf_paddr, defrag_skb->len + skb_tailroom(defrag_skb),
- DMA_FROM_DEVICE);
+ DMA_TO_DEVICE);
return ret;
}
@@ -3346,7 +3356,8 @@ ath12k_dp_process_rx_err_buf(struct ath12k *ar, struct hal_reo_dest_ring *desc,
if (!desc_info) {
desc_info = ath12k_dp_get_rx_desc(ab, cookie);
if (!desc_info) {
- ath12k_warn(ab, "Invalid cookie in manual desc retrieval");
+ ath12k_warn(ab, "Invalid cookie in DP rx error descriptor retrieval: 0x%x\n",
+ cookie);
return -EINVAL;
}
}
@@ -3421,7 +3432,7 @@ int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi,
struct ath12k *ar;
dma_addr_t paddr;
bool is_frag;
- bool drop = false;
+ bool drop;
int pdev_id;
tot_n_bufs_reaped = 0;
@@ -3439,7 +3450,9 @@ int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi,
while (budget &&
(reo_desc = ath12k_hal_srng_dst_get_next_entry(ab, srng))) {
+ drop = false;
ab->soc_stats.err_ring_pkts++;
+
ret = ath12k_hal_desc_reo_parse_err(ab, reo_desc, &paddr,
&desc_bank);
if (ret) {
@@ -3451,7 +3464,7 @@ int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi,
(paddr - link_desc_banks[desc_bank].paddr);
ath12k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, msdu_cookies,
&rbm);
- if (rbm != HAL_RX_BUF_RBM_WBM_CHIP0_IDLE_DESC_LIST &&
+ if (rbm != dp->idle_link_rbm &&
rbm != HAL_RX_BUF_RBM_SW3_BM &&
rbm != ab->hw_params->hal_params->rx_buf_rbm) {
ab->soc_stats.invalid_rbm++;
@@ -3765,7 +3778,8 @@ int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
if (!desc_info) {
desc_info = ath12k_dp_get_rx_desc(ab, err_info.cookie);
if (!desc_info) {
- ath12k_warn(ab, "Invalid cookie in manual desc retrieval");
+ ath12k_warn(ab, "Invalid cookie in DP WBM rx error descriptor retrieval: 0x%x\n",
+ err_info.cookie);
continue;
}
}
@@ -3961,7 +3975,7 @@ void ath12k_dp_rx_free(struct ath12k_base *ab)
ath12k_dp_srng_cleanup(ab, &dp->rx_refill_buf_ring.refill_buf_ring);
- for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
+ for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
if (ab->hw_params->rx_mac_buf_ring)
ath12k_dp_srng_cleanup(ab, &dp->rx_mac_buf_ring[i]);
}
@@ -3970,7 +3984,6 @@ void ath12k_dp_rx_free(struct ath12k_base *ab)
ath12k_dp_srng_cleanup(ab, &dp->rxdma_err_dst_ring[i]);
ath12k_dp_srng_cleanup(ab, &dp->rxdma_mon_buf_ring.refill_buf_ring);
- ath12k_dp_srng_cleanup(ab, &dp->tx_mon_buf_ring.refill_buf_ring);
ath12k_dp_rxdma_buf_free(ab);
}
@@ -4028,7 +4041,7 @@ int ath12k_dp_rxdma_ring_sel_config_wcn7850(struct ath12k_base *ab)
struct ath12k_dp *dp = &ab->dp;
struct htt_rx_ring_tlv_filter tlv_filter = {0};
u32 ring_id;
- int ret;
+ int ret = 0;
u32 hal_rx_desc_sz = ab->hal.hal_desc_sz;
int i;
@@ -4054,7 +4067,7 @@ int ath12k_dp_rxdma_ring_sel_config_wcn7850(struct ath12k_base *ab)
* and modify the rx_desc struct
*/
- for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
+ for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
ring_id = dp->rx_mac_buf_ring[i].ring_id;
ret = ath12k_dp_tx_htt_rx_filter_setup(ab, ring_id, i,
HAL_RXDMA_BUF,
@@ -4081,7 +4094,7 @@ int ath12k_dp_rx_htt_setup(struct ath12k_base *ab)
}
if (ab->hw_params->rx_mac_buf_ring) {
- for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
+ for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
ring_id = dp->rx_mac_buf_ring[i].ring_id;
ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id,
i, HAL_RXDMA_BUF);
@@ -4113,15 +4126,6 @@ int ath12k_dp_rx_htt_setup(struct ath12k_base *ab)
ret);
return ret;
}
-
- ring_id = dp->tx_mon_buf_ring.refill_buf_ring.ring_id;
- ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id,
- 0, HAL_TX_MONITOR_BUF);
- if (ret) {
- ath12k_warn(ab, "failed to configure rxdma_mon_buf_ring %d\n",
- ret);
- return ret;
- }
}
ret = ab->hw_params->hw_ops->rxdma_ring_sel_config(ab);
@@ -4141,9 +4145,6 @@ int ath12k_dp_rx_alloc(struct ath12k_base *ab)
idr_init(&dp->rxdma_mon_buf_ring.bufs_idr);
spin_lock_init(&dp->rxdma_mon_buf_ring.idr_lock);
- idr_init(&dp->tx_mon_buf_ring.bufs_idr);
- spin_lock_init(&dp->tx_mon_buf_ring.idr_lock);
-
ret = ath12k_dp_srng_setup(ab,
&dp->rx_refill_buf_ring.refill_buf_ring,
HAL_RXDMA_BUF, 0, 0,
@@ -4154,7 +4155,7 @@ int ath12k_dp_rx_alloc(struct ath12k_base *ab)
}
if (ab->hw_params->rx_mac_buf_ring) {
- for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
+ for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
ret = ath12k_dp_srng_setup(ab,
&dp->rx_mac_buf_ring[i],
HAL_RXDMA_BUF, 1,
@@ -4186,15 +4187,6 @@ int ath12k_dp_rx_alloc(struct ath12k_base *ab)
ath12k_warn(ab, "failed to setup HAL_RXDMA_MONITOR_BUF\n");
return ret;
}
-
- ret = ath12k_dp_srng_setup(ab,
- &dp->tx_mon_buf_ring.refill_buf_ring,
- HAL_TX_MONITOR_BUF, 0, 0,
- DP_TX_MONITOR_BUF_RING_SIZE);
- if (ret) {
- ath12k_warn(ab, "failed to setup DP_TX_MONITOR_BUF_RING_SIZE\n");
- return ret;
- }
}
ret = ath12k_dp_rxdma_buf_setup(ab);
@@ -4223,7 +4215,7 @@ int ath12k_dp_rx_pdev_alloc(struct ath12k_base *ab, int mac_id)
return ret;
}
- for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
+ for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
ring_id = dp->rxdma_mon_dst_ring[i].ring_id;
ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id,
mac_id + i,
@@ -4234,17 +4226,6 @@ int ath12k_dp_rx_pdev_alloc(struct ath12k_base *ab, int mac_id)
i, ret);
return ret;
}
-
- ring_id = dp->tx_mon_dst_ring[i].ring_id;
- ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id,
- mac_id + i,
- HAL_TX_MONITOR_DST);
- if (ret) {
- ath12k_warn(ab,
- "failed to configure tx_mon_dst_ring %d %d\n",
- i, ret);
- return ret;
- }
}
out:
return 0;
diff --git a/drivers/net/wireless/ath/ath12k/dp_rx.h b/drivers/net/wireless/ath/ath12k/dp_rx.h
index 2ff421160181..eb1f92559179 100644
--- a/drivers/net/wireless/ath/ath12k/dp_rx.h
+++ b/drivers/net/wireless/ath/ath12k/dp_rx.h
@@ -139,4 +139,8 @@ ath12k_dp_rx_h_find_peer(struct ath12k_base *ab, struct sk_buff *msdu);
int ath12k_dp_rxdma_ring_sel_config_qcn9274(struct ath12k_base *ab);
int ath12k_dp_rxdma_ring_sel_config_wcn7850(struct ath12k_base *ab);
+int ath12k_dp_htt_tlv_iter(struct ath12k_base *ab, const void *ptr, size_t len,
+ int (*iter)(struct ath12k_base *ar, u16 tag, u16 len,
+ const void *ptr, void *data),
+ void *data);
#endif /* ATH12K_DP_RX_H */
diff --git a/drivers/net/wireless/ath/ath12k/dp_tx.c b/drivers/net/wireless/ath/ath12k/dp_tx.c
index 9b6d7d72f57c..d08c04343e90 100644
--- a/drivers/net/wireless/ath/ath12k/dp_tx.c
+++ b/drivers/net/wireless/ath/ath12k/dp_tx.c
@@ -124,6 +124,44 @@ static void ath12k_hal_tx_cmd_ext_desc_setup(struct ath12k_base *ab,
HAL_TX_MSDU_EXT_INFO1_ENCRYPT_TYPE);
}
+#define HTT_META_DATA_ALIGNMENT 0x8
+
+static void *ath12k_dp_metadata_align_skb(struct sk_buff *skb, u8 tail_len)
+{
+ struct sk_buff *tail;
+ void *metadata;
+
+ if (unlikely(skb_cow_data(skb, tail_len, &tail) < 0))
+ return NULL;
+
+ metadata = pskb_put(skb, tail, tail_len);
+ memset(metadata, 0, tail_len);
+ return metadata;
+}
+
+/* Preparing HTT Metadata when utilized with ext MSDU */
+static int ath12k_dp_prepare_htt_metadata(struct sk_buff *skb)
+{
+ struct hal_tx_msdu_metadata *desc_ext;
+ u8 htt_desc_size;
+ /* Size rounded of multiple of 8 bytes */
+ u8 htt_desc_size_aligned;
+
+ htt_desc_size = sizeof(struct hal_tx_msdu_metadata);
+ htt_desc_size_aligned = ALIGN(htt_desc_size, HTT_META_DATA_ALIGNMENT);
+
+ desc_ext = ath12k_dp_metadata_align_skb(skb, htt_desc_size_aligned);
+ if (!desc_ext)
+ return -ENOMEM;
+
+ desc_ext->info0 = le32_encode_bits(1, HAL_TX_MSDU_METADATA_INFO0_ENCRYPT_FLAG) |
+ le32_encode_bits(0, HAL_TX_MSDU_METADATA_INFO0_ENCRYPT_TYPE) |
+ le32_encode_bits(1,
+ HAL_TX_MSDU_METADATA_INFO0_HOST_TX_DESC_POOL);
+
+ return 0;
+}
+
int ath12k_dp_tx(struct ath12k *ar, struct ath12k_vif *arvif,
struct sk_buff *skb)
{
@@ -145,6 +183,7 @@ int ath12k_dp_tx(struct ath12k *ar, struct ath12k_vif *arvif,
u8 ring_selector, ring_map = 0;
bool tcl_ring_retry;
bool msdu_ext_desc = false;
+ bool add_htt_metadata = false;
if (test_bit(ATH12K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags))
return -ESHUTDOWN;
@@ -248,6 +287,18 @@ tcl_ring_sel:
goto fail_remove_tx_buf;
}
+ if (!test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags) &&
+ !(skb_cb->flags & ATH12K_SKB_HW_80211_ENCAP) &&
+ !(skb_cb->flags & ATH12K_SKB_CIPHER_SET) &&
+ ieee80211_has_protected(hdr->frame_control)) {
+ /* Add metadata for sw encrypted vlan group traffic */
+ add_htt_metadata = true;
+ msdu_ext_desc = true;
+ ti.flags0 |= u32_encode_bits(1, HAL_TCL_DATA_CMD_INFO2_TO_FW);
+ ti.encap_type = HAL_TCL_ENCAP_TYPE_RAW;
+ ti.encrypt_type = HAL_ENCRYPT_TYPE_OPEN;
+ }
+
tx_desc->skb = skb;
tx_desc->mac_id = ar->pdev_idx;
ti.desc_id = tx_desc->desc_id;
@@ -269,6 +320,15 @@ tcl_ring_sel:
msg = (struct hal_tx_msdu_ext_desc *)skb_ext_desc->data;
ath12k_hal_tx_cmd_ext_desc_setup(ab, msg, &ti);
+ if (add_htt_metadata) {
+ ret = ath12k_dp_prepare_htt_metadata(skb_ext_desc);
+ if (ret < 0) {
+ ath12k_dbg(ab, ATH12K_DBG_DP_TX,
+ "Failed to add HTT meta data, dropping packet\n");
+ goto fail_unmap_dma;
+ }
+ }
+
ti.paddr = dma_map_single(ab->dev, skb_ext_desc->data,
skb_ext_desc->len, DMA_TO_DEVICE);
ret = dma_mapping_error(ab->dev, ti.paddr);
@@ -352,15 +412,15 @@ static void ath12k_dp_tx_free_txbuf(struct ath12k_base *ab,
u8 pdev_id = ath12k_hw_mac_id_to_pdev_id(ab->hw_params, mac_id);
skb_cb = ATH12K_SKB_CB(msdu);
+ ar = ab->pdevs[pdev_id].ar;
dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
if (skb_cb->paddr_ext_desc)
dma_unmap_single(ab->dev, skb_cb->paddr_ext_desc,
sizeof(struct hal_tx_msdu_ext_desc), DMA_TO_DEVICE);
- dev_kfree_skb_any(msdu);
+ ieee80211_free_txskb(ar->ah->hw, msdu);
- ar = ab->pdevs[pdev_id].ar;
if (atomic_dec_and_test(&ar->dp.num_tx_pending))
wake_up(&ar->dp.tx_empty_waitq);
}
@@ -393,8 +453,12 @@ ath12k_dp_tx_htt_tx_complete_buf(struct ath12k_base *ab,
if (ts->acked) {
if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
info->flags |= IEEE80211_TX_STAT_ACK;
- info->status.ack_signal = ATH12K_DEFAULT_NOISE_FLOOR +
- ts->ack_rssi;
+ info->status.ack_signal = ts->ack_rssi;
+
+ if (!test_bit(WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT,
+ ab->wmi_ab.svc_map))
+ info->status.ack_signal += ATH12K_DEFAULT_NOISE_FLOOR;
+
info->status.flags = IEEE80211_TX_STATUS_ACK_SIGNAL_VALID;
} else {
info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
@@ -448,6 +512,7 @@ static void ath12k_dp_tx_complete_msdu(struct ath12k *ar,
struct hal_tx_status *ts)
{
struct ath12k_base *ab = ar->ab;
+ struct ath12k_hw *ah = ar->ah;
struct ieee80211_tx_info *info;
struct ath12k_skb_cb *skb_cb;
@@ -466,12 +531,12 @@ static void ath12k_dp_tx_complete_msdu(struct ath12k *ar,
rcu_read_lock();
if (!rcu_dereference(ab->pdevs_active[ar->pdev_idx])) {
- dev_kfree_skb_any(msdu);
+ ieee80211_free_txskb(ah->hw, msdu);
goto exit;
}
if (!skb_cb->vif) {
- dev_kfree_skb_any(msdu);
+ ieee80211_free_txskb(ah->hw, msdu);
goto exit;
}
@@ -481,17 +546,39 @@ static void ath12k_dp_tx_complete_msdu(struct ath12k *ar,
/* skip tx rate update from ieee80211_status*/
info->status.rates[0].idx = -1;
- if (ts->status == HAL_WBM_TQM_REL_REASON_FRAME_ACKED &&
- !(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
- info->flags |= IEEE80211_TX_STAT_ACK;
- info->status.ack_signal = ATH12K_DEFAULT_NOISE_FLOOR +
- ts->ack_rssi;
- info->status.flags = IEEE80211_TX_STATUS_ACK_SIGNAL_VALID;
- }
+ switch (ts->status) {
+ case HAL_WBM_TQM_REL_REASON_FRAME_ACKED:
+ if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
+ info->flags |= IEEE80211_TX_STAT_ACK;
+ info->status.ack_signal = ts->ack_rssi;
- if (ts->status == HAL_WBM_TQM_REL_REASON_CMD_REMOVE_TX &&
- (info->flags & IEEE80211_TX_CTL_NO_ACK))
- info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
+ if (!test_bit(WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT,
+ ab->wmi_ab.svc_map))
+ info->status.ack_signal += ATH12K_DEFAULT_NOISE_FLOOR;
+
+ info->status.flags = IEEE80211_TX_STATUS_ACK_SIGNAL_VALID;
+ }
+ break;
+ case HAL_WBM_TQM_REL_REASON_CMD_REMOVE_TX:
+ if (info->flags & IEEE80211_TX_CTL_NO_ACK) {
+ info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
+ break;
+ }
+ fallthrough;
+ case HAL_WBM_TQM_REL_REASON_CMD_REMOVE_MPDU:
+ case HAL_WBM_TQM_REL_REASON_DROP_THRESHOLD:
+ case HAL_WBM_TQM_REL_REASON_CMD_REMOVE_AGED_FRAMES:
+ /* The failure status is due to internal firmware tx failure
+ * hence drop the frame; do not update the status of frame to
+ * the upper layer
+ */
+ ieee80211_free_txskb(ah->hw, msdu);
+ goto exit;
+ default:
+ ath12k_dbg(ab, ATH12K_DBG_DP_TX, "tx frame is not acked status %d\n",
+ ts->status);
+ break;
+ }
/* NOTE: Tx rate status reporting. Tx completion status does not have
* necessary information (for example nss) to build the tx rate.
@@ -669,14 +756,6 @@ ath12k_dp_tx_get_ring_id_type(struct ath12k_base *ab,
*htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
*htt_ring_type = HTT_SW_TO_HW_RING;
break;
- case HAL_TX_MONITOR_BUF:
- *htt_ring_id = HTT_TX_MON_HOST2MON_BUF_RING;
- *htt_ring_type = HTT_SW_TO_HW_RING;
- break;
- case HAL_TX_MONITOR_DST:
- *htt_ring_id = HTT_TX_MON_MON2HOST_DEST_RING;
- *htt_ring_type = HTT_HW_TO_SW_RING;
- break;
default:
ath12k_warn(ab, "Unsupported ring type in DP :%d\n", ring_type);
ret = -EINVAL;
@@ -854,7 +933,7 @@ int ath12k_dp_tx_htt_h2t_ppdu_stats_req(struct ath12k *ar, u32 mask)
int ret;
int i;
- for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
+ for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
skb = ath12k_htc_alloc_skb(ab, len);
if (!skb)
return -ENOMEM;
@@ -1007,6 +1086,7 @@ ath12k_dp_tx_htt_h2t_ext_stats_req(struct ath12k *ar, u8 type,
struct htt_ext_stats_cfg_cmd *cmd;
int len = sizeof(*cmd);
int ret;
+ u32 pdev_id;
skb = ath12k_htc_alloc_skb(ab, len);
if (!skb)
@@ -1018,7 +1098,8 @@ ath12k_dp_tx_htt_h2t_ext_stats_req(struct ath12k *ar, u8 type,
memset(cmd, 0, sizeof(*cmd));
cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_EXT_STATS_CFG;
- cmd->hdr.pdev_mask = 1 << ar->pdev->pdev_id;
+ pdev_id = ath12k_mac_get_target_pdev_id(ar);
+ cmd->hdr.pdev_mask = 1 << pdev_id;
cmd->hdr.stats_type = type;
cmd->cfg_param0 = cpu_to_le32(cfg_params->cfg0);
@@ -1044,13 +1125,7 @@ int ath12k_dp_tx_htt_monitor_mode_ring_config(struct ath12k *ar, bool reset)
struct ath12k_base *ab = ar->ab;
int ret;
- ret = ath12k_dp_tx_htt_tx_monitor_mode_ring_config(ar, reset);
- if (ret) {
- ath12k_err(ab, "failed to setup tx monitor filter %d\n", ret);
- return ret;
- }
-
- ret = ath12k_dp_tx_htt_tx_monitor_mode_ring_config(ar, reset);
+ ret = ath12k_dp_tx_htt_rx_monitor_mode_ring_config(ar, reset);
if (ret) {
ath12k_err(ab, "failed to setup rx monitor filter %d\n", ret);
return ret;
@@ -1209,31 +1284,3 @@ err_free:
dev_kfree_skb_any(skb);
return ret;
}
-
-int ath12k_dp_tx_htt_tx_monitor_mode_ring_config(struct ath12k *ar, bool reset)
-{
- struct ath12k_base *ab = ar->ab;
- struct ath12k_dp *dp = &ab->dp;
- struct htt_tx_ring_tlv_filter tlv_filter = {0};
- int ret, ring_id;
-
- ring_id = dp->tx_mon_buf_ring.refill_buf_ring.ring_id;
-
- /* TODO: Need to set upstream/downstream tlv filters
- * here
- */
-
- if (ab->hw_params->rxdma1_enable) {
- ret = ath12k_dp_tx_htt_tx_filter_setup(ar->ab, ring_id, 0,
- HAL_TX_MONITOR_BUF,
- DP_RXDMA_REFILL_RING_SIZE,
- &tlv_filter);
- if (ret) {
- ath12k_err(ab,
- "failed to setup filter for monitor buf %d\n", ret);
- return ret;
- }
- }
-
- return 0;
-}
diff --git a/drivers/net/wireless/ath/ath12k/dp_tx.h b/drivers/net/wireless/ath/ath12k/dp_tx.h
index 436d77e5e9ee..55ff8cc721e3 100644
--- a/drivers/net/wireless/ath/ath12k/dp_tx.h
+++ b/drivers/net/wireless/ath/ath12k/dp_tx.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_DP_TX_H
@@ -12,7 +12,7 @@
struct ath12k_dp_htt_wbm_tx_status {
bool acked;
- int ack_rssi;
+ s8 ack_rssi;
};
int ath12k_dp_tx_htt_h2t_ver_req_msg(struct ath12k_base *ab);
@@ -36,6 +36,5 @@ int ath12k_dp_tx_htt_tx_filter_setup(struct ath12k_base *ab, u32 ring_id,
int mac_id, enum hal_ring_type ring_type,
int tx_buf_size,
struct htt_tx_ring_tlv_filter *htt_tlv_filter);
-int ath12k_dp_tx_htt_tx_monitor_mode_ring_config(struct ath12k *ar, bool reset);
int ath12k_dp_tx_htt_monitor_mode_ring_config(struct ath12k *ar, bool reset);
#endif
diff --git a/drivers/net/wireless/ath/ath12k/hal.c b/drivers/net/wireless/ath/ath12k/hal.c
index 78310da8cfe8..ca04bfae8bdc 100644
--- a/drivers/net/wireless/ath/ath12k/hal.c
+++ b/drivers/net/wireless/ath/ath12k/hal.c
@@ -1969,14 +1969,15 @@ u32 ath12k_hal_ce_dst_status_get_length(struct hal_ce_srng_dst_status_desc *desc
}
void ath12k_hal_set_link_desc_addr(struct hal_wbm_link_desc *desc, u32 cookie,
- dma_addr_t paddr)
+ dma_addr_t paddr,
+ enum hal_rx_buf_return_buf_manager rbm)
{
desc->buf_addr_info.info0 = le32_encode_bits((paddr & HAL_ADDR_LSB_REG_MASK),
BUFFER_ADDR_INFO0_ADDR);
desc->buf_addr_info.info1 =
le32_encode_bits(((u64)paddr >> HAL_ADDR_MSB_REG_SHIFT),
BUFFER_ADDR_INFO1_ADDR) |
- le32_encode_bits(1, BUFFER_ADDR_INFO1_RET_BUF_MGR) |
+ le32_encode_bits(rbm, BUFFER_ADDR_INFO1_RET_BUF_MGR) |
le32_encode_bits(cookie, BUFFER_ADDR_INFO1_SW_COOKIE);
}
diff --git a/drivers/net/wireless/ath/ath12k/hal.h b/drivers/net/wireless/ath/ath12k/hal.h
index dbb9205bfa10..8a78bb9a10bc 100644
--- a/drivers/net/wireless/ath/ath12k/hal.h
+++ b/drivers/net/wireless/ath/ath12k/hal.h
@@ -770,12 +770,12 @@ struct hal_srng_config {
* enum hal_rx_buf_return_buf_manager - manager for returned rx buffers
*
* @HAL_RX_BUF_RBM_WBM_IDLE_BUF_LIST: Buffer returned to WBM idle buffer list
- * @HAL_RX_BUF_RBM_WBM_CHIP0_IDLE_DESC_LIST: Descriptor returned to WBM idle
- * descriptor list, where the chip 0 WBM is chosen in case of a multi-chip config
- * @HAL_RX_BUF_RBM_WBM_CHIP1_IDLE_DESC_LIST: Descriptor returned to WBM idle
- * descriptor list, where the chip 1 WBM is chosen in case of a multi-chip config
- * @HAL_RX_BUF_RBM_WBM_CHIP2_IDLE_DESC_LIST: Descriptor returned to WBM idle
- * descriptor list, where the chip 2 WBM is chosen in case of a multi-chip config
+ * @HAL_RX_BUF_RBM_WBM_DEV0_IDLE_DESC_LIST: Descriptor returned to WBM idle
+ * descriptor list, where the device 0 WBM is chosen in case of a multi-device config
+ * @HAL_RX_BUF_RBM_WBM_DEV1_IDLE_DESC_LIST: Descriptor returned to WBM idle
+ * descriptor list, where the device 1 WBM is chosen in case of a multi-device config
+ * @HAL_RX_BUF_RBM_WBM_DEV2_IDLE_DESC_LIST: Descriptor returned to WBM idle
+ * descriptor list, where the device 2 WBM is chosen in case of a multi-device config
* @HAL_RX_BUF_RBM_FW_BM: Buffer returned to FW
* @HAL_RX_BUF_RBM_SW0_BM: For ring 0 -- returned to host
* @HAL_RX_BUF_RBM_SW1_BM: For ring 1 -- returned to host
@@ -788,9 +788,9 @@ struct hal_srng_config {
enum hal_rx_buf_return_buf_manager {
HAL_RX_BUF_RBM_WBM_IDLE_BUF_LIST,
- HAL_RX_BUF_RBM_WBM_CHIP0_IDLE_DESC_LIST,
- HAL_RX_BUF_RBM_WBM_CHIP1_IDLE_DESC_LIST,
- HAL_RX_BUF_RBM_WBM_CHIP2_IDLE_DESC_LIST,
+ HAL_RX_BUF_RBM_WBM_DEV0_IDLE_DESC_LIST,
+ HAL_RX_BUF_RBM_WBM_DEV1_IDLE_DESC_LIST,
+ HAL_RX_BUF_RBM_WBM_DEV2_IDLE_DESC_LIST,
HAL_RX_BUF_RBM_FW_BM,
HAL_RX_BUF_RBM_SW0_BM,
HAL_RX_BUF_RBM_SW1_BM,
@@ -1113,7 +1113,8 @@ dma_addr_t ath12k_hal_srng_get_tp_addr(struct ath12k_base *ab,
dma_addr_t ath12k_hal_srng_get_hp_addr(struct ath12k_base *ab,
struct hal_srng *srng);
void ath12k_hal_set_link_desc_addr(struct hal_wbm_link_desc *desc, u32 cookie,
- dma_addr_t paddr);
+ dma_addr_t paddr,
+ enum hal_rx_buf_return_buf_manager rbm);
u32 ath12k_hal_ce_get_desc_size(enum hal_ce_desc type);
void ath12k_hal_ce_src_set_desc(struct hal_ce_srng_src_desc *desc, dma_addr_t paddr,
u32 len, u32 id, u8 byte_swap_data);
diff --git a/drivers/net/wireless/ath/ath12k/hal_desc.h b/drivers/net/wireless/ath/ath12k/hal_desc.h
index 63340256d3f6..739f73370015 100644
--- a/drivers/net/wireless/ath/ath12k/hal_desc.h
+++ b/drivers/net/wireless/ath/ath12k/hal_desc.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "core.h"
@@ -597,8 +597,30 @@ struct hal_tlv_64_hdr {
#define RX_MPDU_DESC_INFO0_MPDU_QOS_CTRL_VALID BIT(27)
#define RX_MPDU_DESC_INFO0_TID GENMASK(31, 28)
-/* TODO revisit after meta data is concluded */
-#define RX_MPDU_DESC_META_DATA_PEER_ID GENMASK(15, 0)
+/* Peer Metadata classification */
+
+/* Version 0 */
+#define RX_MPDU_DESC_META_DATA_V0_PEER_ID GENMASK(15, 0)
+#define RX_MPDU_DESC_META_DATA_V0_VDEV_ID GENMASK(23, 16)
+
+/* Version 1 */
+#define RX_MPDU_DESC_META_DATA_V1_PEER_ID GENMASK(13, 0)
+#define RX_MPDU_DESC_META_DATA_V1_LOGICAL_LINK_ID GENMASK(15, 14)
+#define RX_MPDU_DESC_META_DATA_V1_VDEV_ID GENMASK(23, 16)
+#define RX_MPDU_DESC_META_DATA_V1_LMAC_ID GENMASK(25, 24)
+#define RX_MPDU_DESC_META_DATA_V1_DEVICE_ID GENMASK(28, 26)
+
+/* Version 1A */
+#define RX_MPDU_DESC_META_DATA_V1A_PEER_ID GENMASK(13, 0)
+#define RX_MPDU_DESC_META_DATA_V1A_VDEV_ID GENMASK(21, 14)
+#define RX_MPDU_DESC_META_DATA_V1A_LOGICAL_LINK_ID GENMASK(25, 22)
+#define RX_MPDU_DESC_META_DATA_V1A_DEVICE_ID GENMASK(28, 26)
+
+/* Version 1B */
+#define RX_MPDU_DESC_META_DATA_V1B_PEER_ID GENMASK(13, 0)
+#define RX_MPDU_DESC_META_DATA_V1B_VDEV_ID GENMASK(21, 14)
+#define RX_MPDU_DESC_META_DATA_V1B_HW_LINK_ID GENMASK(25, 22)
+#define RX_MPDU_DESC_META_DATA_V1B_DEVICE_ID GENMASK(28, 26)
struct rx_mpdu_desc {
__le32 info0; /* %RX_MPDU_DESC_INFO */
@@ -2048,6 +2070,19 @@ struct hal_wbm_release_ring {
* fw with fw_reason2.
* @HAL_WBM_TQM_REL_REASON_CMD_REMOVE_RESEAON3: Remove command initiated by
* fw with fw_reason3.
+ * @HAL_WBM_TQM_REL_REASON_CMD_DISABLE_QUEUE: Remove command initiated by
+ * fw with disable queue.
+ * @HAL_WBM_TQM_REL_REASON_CMD_TILL_NONMATCHING: Remove command initiated by
+ * fw to remove all mpdu until 1st non-match.
+ * @HAL_WBM_TQM_REL_REASON_DROP_THRESHOLD: Dropped due to drop threshold
+ * criteria
+ * @HAL_WBM_TQM_REL_REASON_DROP_LINK_DESC_UNAVAIL: Dropped due to link desc
+ * not available
+ * @HAL_WBM_TQM_REL_REASON_DROP_OR_INVALID_MSDU: Dropped due drop bit set or
+ * null flow
+ * @HAL_WBM_TQM_REL_REASON_MULTICAST_DROP: Dropped due mcast drop set for VDEV
+ * @HAL_WBM_TQM_REL_REASON_VDEV_MISMATCH_DROP: Dropped due to being set with
+ * 'TCL_drop_reason'
*/
enum hal_wbm_tqm_rel_reason {
HAL_WBM_TQM_REL_REASON_FRAME_ACKED,
@@ -2058,6 +2093,13 @@ enum hal_wbm_tqm_rel_reason {
HAL_WBM_TQM_REL_REASON_CMD_REMOVE_RESEAON1,
HAL_WBM_TQM_REL_REASON_CMD_REMOVE_RESEAON2,
HAL_WBM_TQM_REL_REASON_CMD_REMOVE_RESEAON3,
+ HAL_WBM_TQM_REL_REASON_CMD_DISABLE_QUEUE,
+ HAL_WBM_TQM_REL_REASON_CMD_TILL_NONMATCHING,
+ HAL_WBM_TQM_REL_REASON_DROP_THRESHOLD,
+ HAL_WBM_TQM_REL_REASON_DROP_LINK_DESC_UNAVAIL,
+ HAL_WBM_TQM_REL_REASON_DROP_OR_INVALID_MSDU,
+ HAL_WBM_TQM_REL_REASON_MULTICAST_DROP,
+ HAL_WBM_TQM_REL_REASON_VDEV_MISMATCH_DROP,
};
struct hal_wbm_buffer_ring {
@@ -2964,4 +3006,29 @@ struct hal_mon_dest_desc {
* updated by SRNG.
*/
+#define HAL_TX_MSDU_METADATA_INFO0_ENCRYPT_FLAG BIT(8)
+#define HAL_TX_MSDU_METADATA_INFO0_ENCRYPT_TYPE GENMASK(16, 15)
+#define HAL_TX_MSDU_METADATA_INFO0_HOST_TX_DESC_POOL BIT(31)
+
+struct hal_tx_msdu_metadata {
+ __le32 info0;
+ __le32 rsvd0[6];
+} __packed;
+
+/* hal_tx_msdu_metadata
+ * valid_encrypt_type
+ * if set, encrypt type is valid
+ * encrypt_type
+ * 0 = NO_ENCRYPT,
+ * 1 = ENCRYPT,
+ * 2 ~ 3 - Reserved
+ * host_tx_desc_pool
+ * If set, Firmware allocates tx_descriptors
+ * in WAL_BUFFERID_TX_HOST_DATA_EXP,instead
+ * of WAL_BUFFERID_TX_TCL_DATA_EXP.
+ * Use cases:
+ * Any time firmware uses TQM-BYPASS for Data
+ * TID, firmware expect host to set this bit.
+ */
+
#endif /* ATH12K_HAL_DESC_H */
diff --git a/drivers/net/wireless/ath/ath12k/hal_tx.h b/drivers/net/wireless/ath/ath12k/hal_tx.h
index 7c837094a6f7..3cf5973771d7 100644
--- a/drivers/net/wireless/ath/ath12k/hal_tx.h
+++ b/drivers/net/wireless/ath/ath12k/hal_tx.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_HAL_TX_H
@@ -57,7 +57,7 @@ struct hal_tx_info {
struct hal_tx_status {
enum hal_wbm_rel_src_module buf_rel_source;
enum hal_wbm_tqm_rel_reason status;
- u8 ack_rssi;
+ s8 ack_rssi;
u32 flags; /* %HAL_TX_STATUS_FLAGS_ */
u32 ppdu_id;
u8 try_cnt;
diff --git a/drivers/net/wireless/ath/ath12k/hif.h b/drivers/net/wireless/ath/ath12k/hif.h
index 7f0926fe751d..0e53ec269fa4 100644
--- a/drivers/net/wireless/ath/ath12k/hif.h
+++ b/drivers/net/wireless/ath/ath12k/hif.h
@@ -30,6 +30,7 @@ struct ath12k_hif_ops {
void (*ce_irq_enable)(struct ath12k_base *ab);
void (*ce_irq_disable)(struct ath12k_base *ab);
void (*get_ce_msi_idx)(struct ath12k_base *ab, u32 ce_id, u32 *msi_idx);
+ int (*panic_handler)(struct ath12k_base *ab);
};
static inline int ath12k_hif_map_service_to_pipe(struct ath12k_base *ab, u16 service_id,
@@ -147,4 +148,12 @@ static inline void ath12k_hif_power_down(struct ath12k_base *ab, bool is_suspend
ab->hif.ops->power_down(ab, is_suspend);
}
+static inline int ath12k_hif_panic_handler(struct ath12k_base *ab)
+{
+ if (!ab->hif.ops->panic_handler)
+ return NOTIFY_DONE;
+
+ return ab->hif.ops->panic_handler(ab);
+}
+
#endif /* ATH12K_HIF_H */
diff --git a/drivers/net/wireless/ath/ath12k/htc.c b/drivers/net/wireless/ath/ath12k/htc.c
index 2f2230f565bb..d13616bf07f4 100644
--- a/drivers/net/wireless/ath/ath12k/htc.c
+++ b/drivers/net/wireless/ath/ath12k/htc.c
@@ -244,6 +244,11 @@ static void ath12k_htc_suspend_complete(struct ath12k_base *ab, bool ack)
complete(&ab->htc_suspend);
}
+static void ath12k_htc_wakeup_from_suspend(struct ath12k_base *ab)
+{
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot wakeup from suspend is received\n");
+}
+
void ath12k_htc_rx_completion_handler(struct ath12k_base *ab,
struct sk_buff *skb)
{
@@ -349,6 +354,7 @@ void ath12k_htc_rx_completion_handler(struct ath12k_base *ab,
ath12k_htc_suspend_complete(ab, false);
break;
case ATH12K_HTC_MSG_WAKEUP_FROM_SUSPEND_ID:
+ ath12k_htc_wakeup_from_suspend(ab);
break;
default:
ath12k_warn(ab, "ignoring unsolicited htc ep0 event %u\n",
diff --git a/drivers/net/wireless/ath/ath12k/hw.c b/drivers/net/wireless/ath/ath12k/hw.c
index f4c827015821..2e11ea763574 100644
--- a/drivers/net/wireless/ath/ath12k/hw.c
+++ b/drivers/net/wireless/ath/ath12k/hw.c
@@ -544,9 +544,6 @@ static const struct ath12k_hw_ring_mask ath12k_hw_ring_mask_qcn9274 = {
},
.rx_mon_dest = {
0, 0, 0,
- ATH12K_RX_MON_RING_MASK_0,
- ATH12K_RX_MON_RING_MASK_1,
- ATH12K_RX_MON_RING_MASK_2,
},
.rx = {
0, 0, 0, 0,
@@ -572,16 +569,15 @@ static const struct ath12k_hw_ring_mask ath12k_hw_ring_mask_qcn9274 = {
ATH12K_HOST2RXDMA_RING_MASK_0,
},
.tx_mon_dest = {
- ATH12K_TX_MON_RING_MASK_0,
- ATH12K_TX_MON_RING_MASK_1,
+ 0, 0, 0,
},
};
static const struct ath12k_hw_ring_mask ath12k_hw_ring_mask_wcn7850 = {
.tx = {
ATH12K_TX_RING_MASK_0,
+ ATH12K_TX_RING_MASK_1,
ATH12K_TX_RING_MASK_2,
- ATH12K_TX_RING_MASK_4,
},
.rx_mon_dest = {
},
@@ -884,14 +880,15 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.hal_params = &ath12k_hw_hal_params_qcn9274,
.rxdma1_enable = false,
- .num_rxmda_per_pdev = 1,
+ .num_rxdma_per_pdev = 1,
.num_rxdma_dst_ring = 0,
.rx_mac_buf_ring = false,
.vdev_start_delay = false,
.interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_AP) |
- BIT(NL80211_IFTYPE_MESH_POINT),
+ BIT(NL80211_IFTYPE_MESH_POINT) |
+ BIT(NL80211_IFTYPE_AP_VLAN),
.supports_monitor = false,
.idle_ps = false,
@@ -926,6 +923,7 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.supports_sta_ps = false,
.acpi_guid = NULL,
+ .supports_dynamic_smps_6ghz = true,
},
{
.name = "wcn7850 hw2.0",
@@ -956,7 +954,7 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.hal_params = &ath12k_hw_hal_params_wcn7850,
.rxdma1_enable = false,
- .num_rxmda_per_pdev = 2,
+ .num_rxdma_per_pdev = 2,
.num_rxdma_dst_ring = 1,
.rx_mac_buf_ring = true,
.vdev_start_delay = true,
@@ -1001,6 +999,7 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.supports_sta_ps = true,
.acpi_guid = &wcn7850_uuid,
+ .supports_dynamic_smps_6ghz = false,
},
{
.name = "qcn9274 hw2.0",
@@ -1029,14 +1028,15 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.hal_params = &ath12k_hw_hal_params_qcn9274,
.rxdma1_enable = false,
- .num_rxmda_per_pdev = 1,
+ .num_rxdma_per_pdev = 1,
.num_rxdma_dst_ring = 0,
.rx_mac_buf_ring = false,
.vdev_start_delay = false,
.interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_AP) |
- BIT(NL80211_IFTYPE_MESH_POINT),
+ BIT(NL80211_IFTYPE_MESH_POINT) |
+ BIT(NL80211_IFTYPE_AP_VLAN),
.supports_monitor = false,
.idle_ps = false,
@@ -1071,6 +1071,7 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.supports_sta_ps = false,
.acpi_guid = NULL,
+ .supports_dynamic_smps_6ghz = true,
},
};
diff --git a/drivers/net/wireless/ath/ath12k/hw.h b/drivers/net/wireless/ath/ath12k/hw.h
index 3f450ee93f34..e792eb6b249b 100644
--- a/drivers/net/wireless/ath/ath12k/hw.h
+++ b/drivers/net/wireless/ath/ath12k/hw.h
@@ -78,8 +78,7 @@
#define TARGET_NUM_WDS_ENTRIES 32
#define TARGET_DMA_BURST_SIZE 1
#define TARGET_RX_BATCHMODE 1
-#define TARGET_RX_PEER_METADATA_VER_V1A 2
-#define TARGET_RX_PEER_METADATA_VER_V1B 3
+#define TARGET_EMA_MAX_PROFILE_PERIOD 8
#define ATH12K_HW_DEFAULT_QUEUE 0
#define ATH12K_HW_MAX_QUEUES 4
@@ -174,7 +173,7 @@ struct ath12k_hw_params {
const struct ath12k_hw_hal_params *hal_params;
bool rxdma1_enable:1;
- int num_rxmda_per_pdev;
+ int num_rxdma_per_pdev;
int num_rxdma_dst_ring;
bool rx_mac_buf_ring:1;
bool vdev_start_delay:1;
@@ -215,6 +214,7 @@ struct ath12k_hw_params {
bool supports_sta_ps;
const guid_t *acpi_guid;
+ bool supports_dynamic_smps_6ghz;
};
struct ath12k_hw_ops {
diff --git a/drivers/net/wireless/ath/ath12k/mac.c b/drivers/net/wireless/ath/ath12k/mac.c
index 805cb084484a..8106297f0bc1 100644
--- a/drivers/net/wireless/ath/ath12k/mac.c
+++ b/drivers/net/wireless/ath/ath12k/mac.c
@@ -6,6 +6,7 @@
#include <net/mac80211.h>
#include <linux/etherdevice.h>
+
#include "mac.h"
#include "core.h"
#include "debug.h"
@@ -15,6 +16,8 @@
#include "dp_rx.h"
#include "peer.h"
#include "debugfs.h"
+#include "hif.h"
+#include "wow.h"
#define CHAN2G(_channel, _freq, _flags) { \
.band = NL80211_BAND_2GHZ, \
@@ -91,6 +94,10 @@ static const struct ieee80211_channel ath12k_5ghz_channels[] = {
};
static const struct ieee80211_channel ath12k_6ghz_channels[] = {
+ /* Operating Class 136 */
+ CHAN6G(2, 5935, 0),
+
+ /* Operating Classes 131-135 */
CHAN6G(1, 5955, 0),
CHAN6G(5, 5975, 0),
CHAN6G(9, 5995, 0),
@@ -666,6 +673,82 @@ static struct ath12k *ath12k_get_ar_by_vif(struct ieee80211_hw *hw,
return NULL;
}
+static struct ath12k_vif *ath12k_mac_get_vif_up(struct ath12k *ar)
+{
+ struct ath12k_vif *arvif;
+
+ lockdep_assert_held(&ar->conf_mutex);
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (arvif->is_up)
+ return arvif;
+ }
+
+ return NULL;
+}
+
+static bool ath12k_mac_band_match(enum nl80211_band band1, enum WMI_HOST_WLAN_BAND band2)
+{
+ switch (band1) {
+ case NL80211_BAND_2GHZ:
+ if (band2 & WMI_HOST_WLAN_2G_CAP)
+ return true;
+ break;
+ case NL80211_BAND_5GHZ:
+ case NL80211_BAND_6GHZ:
+ if (band2 & WMI_HOST_WLAN_5G_CAP)
+ return true;
+ break;
+ default:
+ return false;
+ }
+
+ return false;
+}
+
+static u8 ath12k_mac_get_target_pdev_id_from_vif(struct ath12k_vif *arvif)
+{
+ struct ath12k *ar = arvif->ar;
+ struct ath12k_base *ab = ar->ab;
+ struct ieee80211_vif *vif = arvif->vif;
+ struct cfg80211_chan_def def;
+ enum nl80211_band band;
+ u8 pdev_id = ab->fw_pdev[0].pdev_id;
+ int i;
+
+ if (WARN_ON(ath12k_mac_vif_chan(vif, &def)))
+ return pdev_id;
+
+ band = def.chan->band;
+
+ for (i = 0; i < ab->fw_pdev_count; i++) {
+ if (ath12k_mac_band_match(band, ab->fw_pdev[i].supported_bands))
+ return ab->fw_pdev[i].pdev_id;
+ }
+
+ return pdev_id;
+}
+
+u8 ath12k_mac_get_target_pdev_id(struct ath12k *ar)
+{
+ struct ath12k_vif *arvif;
+ struct ath12k_base *ab = ar->ab;
+
+ if (!ab->hw_params->single_pdev_only)
+ return ar->pdev->pdev_id;
+
+ arvif = ath12k_mac_get_vif_up(ar);
+
+ /* fw_pdev array has pdev ids derived from phy capability
+ * service ready event (pdev_and_hw_link_ids).
+ * If no vif is active, return default first index.
+ */
+ if (!arvif)
+ return ar->ab->fw_pdev[0].pdev_id;
+
+ /* If active vif is found, return the pdev id matching chandef band */
+ return ath12k_mac_get_target_pdev_id_from_vif(arvif);
+}
+
static void ath12k_pdev_caps_update(struct ath12k *ar)
{
struct ath12k_base *ab = ar->ab;
@@ -863,9 +946,12 @@ static int ath12k_mac_vdev_setup_sync(struct ath12k *ar)
static int ath12k_monitor_vdev_up(struct ath12k *ar, int vdev_id)
{
+ struct ath12k_wmi_vdev_up_params params = {};
int ret;
- ret = ath12k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr);
+ params.vdev_id = vdev_id;
+ params.bssid = ar->mac_addr;
+ ret = ath12k_wmi_vdev_up(ar, &params);
if (ret) {
ath12k_warn(ar->ab, "failed to put up monitor vdev %i: %d\n",
vdev_id, ret);
@@ -882,6 +968,7 @@ static int ath12k_mac_monitor_vdev_start(struct ath12k *ar, int vdev_id,
{
struct ieee80211_channel *channel;
struct wmi_vdev_start_req_arg arg = {};
+ struct ath12k_wmi_vdev_up_params params = {};
int ret;
lockdep_assert_held(&ar->conf_mutex);
@@ -922,7 +1009,9 @@ static int ath12k_mac_monitor_vdev_start(struct ath12k *ar, int vdev_id,
return ret;
}
- ret = ath12k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr);
+ params.vdev_id = vdev_id;
+ params.bssid = ar->mac_addr;
+ ret = ath12k_wmi_vdev_up(ar, &params);
if (ret) {
ath12k_warn(ar->ab, "failed to put up monitor vdev %i: %d\n",
vdev_id, ret);
@@ -1289,37 +1378,188 @@ static int ath12k_mac_remove_vendor_ie(struct sk_buff *skb, unsigned int oui,
return 0;
}
+static void ath12k_mac_set_arvif_ies(struct ath12k_vif *arvif, struct sk_buff *bcn,
+ u8 bssid_index, bool *nontx_profile_found)
+{
+ struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)bcn->data;
+ const struct element *elem, *nontx, *index, *nie;
+ const u8 *start, *tail;
+ u16 rem_len;
+ u8 i;
+
+ start = bcn->data + ieee80211_get_hdrlen_from_skb(bcn) + sizeof(mgmt->u.beacon);
+ tail = skb_tail_pointer(bcn);
+ rem_len = tail - start;
+
+ arvif->rsnie_present = false;
+ arvif->wpaie_present = false;
+
+ if (cfg80211_find_ie(WLAN_EID_RSN, start, rem_len))
+ arvif->rsnie_present = true;
+ if (cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT, WLAN_OUI_TYPE_MICROSOFT_WPA,
+ start, rem_len))
+ arvif->wpaie_present = true;
+
+ /* Return from here for the transmitted profile */
+ if (!bssid_index)
+ return;
+
+ /* Initial rsnie_present for the nontransmitted profile is set to be same as that
+ * of the transmitted profile. It will be changed if security configurations are
+ * different.
+ */
+ *nontx_profile_found = false;
+ for_each_element_id(elem, WLAN_EID_MULTIPLE_BSSID, start, rem_len) {
+ /* Fixed minimum MBSSID element length with at least one
+ * nontransmitted BSSID profile is 12 bytes as given below;
+ * 1 (max BSSID indicator) +
+ * 2 (Nontransmitted BSSID profile: Subelement ID + length) +
+ * 4 (Nontransmitted BSSID Capabilities: tag + length + info)
+ * 2 (Nontransmitted BSSID SSID: tag + length)
+ * 3 (Nontransmitted BSSID Index: tag + length + BSSID index
+ */
+ if (elem->datalen < 12 || elem->data[0] < 1)
+ continue; /* Max BSSID indicator must be >=1 */
+
+ for_each_element(nontx, elem->data + 1, elem->datalen - 1) {
+ start = nontx->data;
+
+ if (nontx->id != 0 || nontx->datalen < 4)
+ continue; /* Invalid nontransmitted profile */
+
+ if (nontx->data[0] != WLAN_EID_NON_TX_BSSID_CAP ||
+ nontx->data[1] != 2) {
+ continue; /* Missing nontransmitted BSS capabilities */
+ }
+
+ if (nontx->data[4] != WLAN_EID_SSID)
+ continue; /* Missing SSID for nontransmitted BSS */
+
+ index = cfg80211_find_elem(WLAN_EID_MULTI_BSSID_IDX,
+ start, nontx->datalen);
+ if (!index || index->datalen < 1 || index->data[0] == 0)
+ continue; /* Invalid MBSSID Index element */
+
+ if (index->data[0] == bssid_index) {
+ *nontx_profile_found = true;
+ if (cfg80211_find_ie(WLAN_EID_RSN,
+ nontx->data,
+ nontx->datalen)) {
+ arvif->rsnie_present = true;
+ return;
+ } else if (!arvif->rsnie_present) {
+ return; /* Both tx and nontx BSS are open */
+ }
+
+ nie = cfg80211_find_ext_elem(WLAN_EID_EXT_NON_INHERITANCE,
+ nontx->data,
+ nontx->datalen);
+ if (!nie || nie->datalen < 2)
+ return; /* Invalid non-inheritance element */
+
+ for (i = 1; i < nie->datalen - 1; i++) {
+ if (nie->data[i] == WLAN_EID_RSN) {
+ arvif->rsnie_present = false;
+ break;
+ }
+ }
+
+ return;
+ }
+ }
+ }
+}
+
+static int ath12k_mac_setup_bcn_tmpl_ema(struct ath12k_vif *arvif)
+{
+ struct ieee80211_bss_conf *bss_conf = &arvif->vif->bss_conf;
+ struct ath12k_wmi_bcn_tmpl_ema_arg ema_args;
+ struct ieee80211_ema_beacons *beacons;
+ struct ath12k_vif *tx_arvif;
+ bool nontx_profile_found = false;
+ int ret = 0;
+ u8 i;
+
+ tx_arvif = ath12k_vif_to_arvif(arvif->vif->mbssid_tx_vif);
+ beacons = ieee80211_beacon_get_template_ema_list(ath12k_ar_to_hw(tx_arvif->ar),
+ tx_arvif->vif, 0);
+ if (!beacons || !beacons->cnt) {
+ ath12k_warn(arvif->ar->ab,
+ "failed to get ema beacon templates from mac80211\n");
+ return -EPERM;
+ }
+
+ if (tx_arvif == arvif)
+ ath12k_mac_set_arvif_ies(arvif, beacons->bcn[0].skb, 0, NULL);
+
+ for (i = 0; i < beacons->cnt; i++) {
+ if (tx_arvif != arvif && !nontx_profile_found)
+ ath12k_mac_set_arvif_ies(arvif, beacons->bcn[i].skb,
+ bss_conf->bssid_index,
+ &nontx_profile_found);
+
+ ema_args.bcn_cnt = beacons->cnt;
+ ema_args.bcn_index = i;
+ ret = ath12k_wmi_bcn_tmpl(tx_arvif->ar, tx_arvif->vdev_id,
+ &beacons->bcn[i].offs,
+ beacons->bcn[i].skb, &ema_args);
+ if (ret) {
+ ath12k_warn(tx_arvif->ar->ab,
+ "failed to set ema beacon template id %i error %d\n",
+ i, ret);
+ break;
+ }
+ }
+
+ if (tx_arvif != arvif && !nontx_profile_found)
+ ath12k_warn(arvif->ar->ab,
+ "nontransmitted bssid index %u not found in beacon template\n",
+ bss_conf->bssid_index);
+
+ ieee80211_beacon_free_ema_list(beacons);
+ return ret;
+}
+
static int ath12k_mac_setup_bcn_tmpl(struct ath12k_vif *arvif)
{
+ struct ath12k_vif *tx_arvif = arvif;
struct ath12k *ar = arvif->ar;
struct ath12k_base *ab = ar->ab;
- struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
struct ieee80211_vif *vif = arvif->vif;
struct ieee80211_mutable_offsets offs = {};
+ bool nontx_profile_found = false;
struct sk_buff *bcn;
- struct ieee80211_mgmt *mgmt;
- u8 *ies;
int ret;
if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
return 0;
- bcn = ieee80211_beacon_get_template(hw, vif, &offs, 0);
+ if (vif->mbssid_tx_vif) {
+ tx_arvif = ath12k_vif_to_arvif(vif->mbssid_tx_vif);
+ if (tx_arvif != arvif && arvif->is_up)
+ return 0;
+
+ if (vif->bss_conf.ema_ap)
+ return ath12k_mac_setup_bcn_tmpl_ema(arvif);
+ }
+
+ bcn = ieee80211_beacon_get_template(ath12k_ar_to_hw(tx_arvif->ar), tx_arvif->vif,
+ &offs, 0);
if (!bcn) {
ath12k_warn(ab, "failed to get beacon template from mac80211\n");
return -EPERM;
}
- ies = bcn->data + ieee80211_get_hdrlen_from_skb(bcn);
- ies += sizeof(mgmt->u.beacon);
-
- if (cfg80211_find_ie(WLAN_EID_RSN, ies, (skb_tail_pointer(bcn) - ies)))
- arvif->rsnie_present = true;
-
- if (cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
- WLAN_OUI_TYPE_MICROSOFT_WPA,
- ies, (skb_tail_pointer(bcn) - ies)))
- arvif->wpaie_present = true;
+ if (tx_arvif == arvif) {
+ ath12k_mac_set_arvif_ies(arvif, bcn, 0, NULL);
+ } else {
+ ath12k_mac_set_arvif_ies(arvif, bcn,
+ arvif->vif->bss_conf.bssid_index,
+ &nontx_profile_found);
+ if (!nontx_profile_found)
+ ath12k_warn(ab,
+ "nontransmitted profile not found in beacon template\n");
+ }
if (arvif->vif->type == NL80211_IFTYPE_AP && arvif->vif->p2p) {
ret = ath12k_mac_setup_bcn_p2p_ie(arvif, bcn);
@@ -1344,7 +1584,7 @@ static int ath12k_mac_setup_bcn_tmpl(struct ath12k_vif *arvif)
}
}
- ret = ath12k_wmi_bcn_tmpl(ar, arvif->vdev_id, &offs, bcn);
+ ret = ath12k_wmi_bcn_tmpl(ar, arvif->vdev_id, &offs, bcn, NULL);
if (ret)
ath12k_warn(ab, "failed to submit beacon template command: %d\n",
@@ -1358,6 +1598,7 @@ free_bcn_skb:
static void ath12k_control_beaconing(struct ath12k_vif *arvif,
struct ieee80211_bss_conf *info)
{
+ struct ath12k_wmi_vdev_up_params params = {};
struct ath12k *ar = arvif->ar;
int ret;
@@ -1385,8 +1626,15 @@ static void ath12k_control_beaconing(struct ath12k_vif *arvif,
ether_addr_copy(arvif->bssid, info->bssid);
- ret = ath12k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
- arvif->bssid);
+ params.vdev_id = arvif->vdev_id;
+ params.aid = arvif->aid;
+ params.bssid = arvif->bssid;
+ if (arvif->vif->mbssid_tx_vif) {
+ params.tx_bssid = ath12k_vif_to_arvif(arvif->vif->mbssid_tx_vif)->bssid;
+ params.nontx_profile_idx = info->bssid_index;
+ params.nontx_profile_cnt = 1 << info->bssid_indicator;
+ }
+ ret = ath12k_wmi_vdev_up(arvif->ar, &params);
if (ret) {
ath12k_warn(ar->ab, "failed to bring up vdev %d: %i\n",
arvif->vdev_id, ret);
@@ -1881,7 +2129,9 @@ static void ath12k_peer_assoc_h_he(struct ath12k *ar,
{
const struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap;
int i;
- u8 ampdu_factor, rx_mcs_80, rx_mcs_160, max_nss;
+ u8 ampdu_factor, max_nss;
+ u8 rx_mcs_80 = IEEE80211_HE_MCS_NOT_SUPPORTED;
+ u8 rx_mcs_160 = IEEE80211_HE_MCS_NOT_SUPPORTED;
u16 mcs_160_map, mcs_80_map;
bool support_160;
u16 v;
@@ -2028,17 +2278,88 @@ static void ath12k_peer_assoc_h_he(struct ath12k *ar,
}
}
+static void ath12k_peer_assoc_h_he_6ghz(struct ath12k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ath12k_wmi_peer_assoc_arg *arg)
+{
+ const struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap;
+ struct cfg80211_chan_def def;
+ enum nl80211_band band;
+ u8 ampdu_factor, mpdu_density;
+
+ if (WARN_ON(ath12k_mac_vif_chan(vif, &def)))
+ return;
+
+ band = def.chan->band;
+
+ if (!arg->he_flag || band != NL80211_BAND_6GHZ || !sta->deflink.he_6ghz_capa.capa)
+ return;
+
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
+ arg->bw_40 = true;
+
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80)
+ arg->bw_80 = true;
+
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160)
+ arg->bw_160 = true;
+
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_320)
+ arg->bw_320 = true;
+
+ arg->peer_he_caps_6ghz = le16_to_cpu(sta->deflink.he_6ghz_capa.capa);
+
+ mpdu_density = u32_get_bits(arg->peer_he_caps_6ghz,
+ IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START);
+ arg->peer_mpdu_density = ath12k_parse_mpdudensity(mpdu_density);
+
+ /* From IEEE Std 802.11ax-2021 - Section 10.12.2: An HE STA shall be capable of
+ * receiving A-MPDU where the A-MPDU pre-EOF padding length is up to the value
+ * indicated by the Maximum A-MPDU Length Exponent Extension field in the HE
+ * Capabilities element and the Maximum A-MPDU Length Exponent field in HE 6 GHz
+ * Band Capabilities element in the 6 GHz band.
+ *
+ * Here, we are extracting the Max A-MPDU Exponent Extension from HE caps and
+ * factor is the Maximum A-MPDU Length Exponent from HE 6 GHZ Band capability.
+ */
+ ampdu_factor = u8_get_bits(he_cap->he_cap_elem.mac_cap_info[3],
+ IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK) +
+ u32_get_bits(arg->peer_he_caps_6ghz,
+ IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP);
+
+ arg->peer_max_mpdu = (1u << (IEEE80211_HE_6GHZ_MAX_AMPDU_FACTOR +
+ ampdu_factor)) - 1;
+}
+
+static int ath12k_get_smps_from_capa(const struct ieee80211_sta_ht_cap *ht_cap,
+ const struct ieee80211_he_6ghz_capa *he_6ghz_capa,
+ int *smps)
+{
+ if (ht_cap->ht_supported)
+ *smps = u16_get_bits(ht_cap->cap, IEEE80211_HT_CAP_SM_PS);
+ else
+ *smps = le16_get_bits(he_6ghz_capa->capa,
+ IEEE80211_HE_6GHZ_CAP_SM_PS);
+
+ if (*smps >= ARRAY_SIZE(ath12k_smps_map))
+ return -EINVAL;
+
+ return 0;
+}
+
static void ath12k_peer_assoc_h_smps(struct ieee80211_sta *sta,
struct ath12k_wmi_peer_assoc_arg *arg)
{
+ const struct ieee80211_he_6ghz_capa *he_6ghz_capa = &sta->deflink.he_6ghz_capa;
const struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap;
int smps;
- if (!ht_cap->ht_supported)
+ if (!ht_cap->ht_supported && !he_6ghz_capa->capa)
return;
- smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS;
- smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT;
+ if (ath12k_get_smps_from_capa(ht_cap, he_6ghz_capa, &smps))
+ return;
switch (smps) {
case WLAN_HT_CAP_SM_PS_STATIC:
@@ -2500,6 +2821,7 @@ static void ath12k_peer_assoc_prepare(struct ath12k *ar,
ath12k_peer_assoc_h_ht(ar, vif, sta, arg);
ath12k_peer_assoc_h_vht(ar, vif, sta, arg);
ath12k_peer_assoc_h_he(ar, vif, sta, arg);
+ ath12k_peer_assoc_h_he_6ghz(ar, vif, sta, arg);
ath12k_peer_assoc_h_eht(ar, vif, sta, arg);
ath12k_peer_assoc_h_qos(ar, vif, sta, arg);
ath12k_peer_assoc_h_phymode(ar, vif, sta, arg);
@@ -2510,18 +2832,17 @@ static void ath12k_peer_assoc_prepare(struct ath12k *ar,
static int ath12k_setup_peer_smps(struct ath12k *ar, struct ath12k_vif *arvif,
const u8 *addr,
- const struct ieee80211_sta_ht_cap *ht_cap)
+ const struct ieee80211_sta_ht_cap *ht_cap,
+ const struct ieee80211_he_6ghz_capa *he_6ghz_capa)
{
- int smps;
+ int smps, ret = 0;
- if (!ht_cap->ht_supported)
+ if (!ht_cap->ht_supported && !he_6ghz_capa)
return 0;
- smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS;
- smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT;
-
- if (smps >= ARRAY_SIZE(ath12k_smps_map))
- return -EINVAL;
+ ret = ath12k_get_smps_from_capa(ht_cap, he_6ghz_capa, &smps);
+ if (ret < 0)
+ return ret;
return ath12k_wmi_set_peer_param(ar, addr, arvif->vdev_id,
WMI_PEER_MIMO_PS_STATE,
@@ -2533,6 +2854,7 @@ static void ath12k_bss_assoc(struct ath12k *ar,
struct ieee80211_bss_conf *bss_conf)
{
struct ieee80211_vif *vif = arvif->vif;
+ struct ath12k_wmi_vdev_up_params params = {};
struct ath12k_wmi_peer_assoc_arg peer_arg;
struct ieee80211_sta *ap_sta;
struct ath12k_peer *peer;
@@ -2572,7 +2894,8 @@ static void ath12k_bss_assoc(struct ath12k *ar,
}
ret = ath12k_setup_peer_smps(ar, arvif, bss_conf->bssid,
- &ap_sta->deflink.ht_cap);
+ &ap_sta->deflink.ht_cap,
+ &ap_sta->deflink.he_6ghz_capa);
if (ret) {
ath12k_warn(ar->ab, "failed to setup peer SMPS for vdev %d: %d\n",
arvif->vdev_id, ret);
@@ -2584,7 +2907,10 @@ static void ath12k_bss_assoc(struct ath12k *ar,
arvif->aid = vif->cfg.aid;
ether_addr_copy(arvif->bssid, bss_conf->bssid);
- ret = ath12k_wmi_vdev_up(ar, arvif->vdev_id, arvif->aid, arvif->bssid);
+ params.vdev_id = arvif->vdev_id;
+ params.aid = arvif->aid;
+ params.bssid = arvif->bssid;
+ ret = ath12k_wmi_vdev_up(ar, &params);
if (ret) {
ath12k_warn(ar->ab, "failed to set vdev %d up: %d\n",
arvif->vdev_id, ret);
@@ -2592,6 +2918,7 @@ static void ath12k_bss_assoc(struct ath12k *ar,
}
arvif->is_up = true;
+ arvif->rekey_data.enable_offload = false;
ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
"mac vdev %d up (associated) bssid %pM aid %d\n",
@@ -2639,6 +2966,8 @@ static void ath12k_bss_disassoc(struct ath12k *ar,
arvif->is_up = false;
+ memset(&arvif->rekey_data, 0, sizeof(arvif->rekey_data));
+
cancel_delayed_work(&arvif->connection_loss_work);
}
@@ -3130,7 +3459,7 @@ static void ath12k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
static struct ath12k*
ath12k_mac_select_scan_device(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
- struct ieee80211_scan_request *req)
+ u32 center_freq)
{
struct ath12k_hw *ah = hw->priv;
enum nl80211_band band;
@@ -3147,9 +3476,9 @@ ath12k_mac_select_scan_device(struct ieee80211_hw *hw,
* split the hw request and perform multiple scans
*/
- if (req->req.channels[0]->center_freq < ATH12K_MIN_5G_FREQ)
+ if (center_freq < ATH12K_MIN_5G_FREQ)
band = NL80211_BAND_2GHZ;
- else if (req->req.channels[0]->center_freq < ATH12K_MIN_6G_FREQ)
+ else if (center_freq < ATH12K_MIN_6G_FREQ)
band = NL80211_BAND_5GHZ;
else
band = NL80211_BAND_6GHZ;
@@ -3349,7 +3678,7 @@ static int ath12k_mac_op_hw_scan(struct ieee80211_hw *hw,
/* Since the targeted scan device could depend on the frequency
* requested in the hw_req, select the corresponding radio
*/
- ar = ath12k_mac_select_scan_device(hw, vif, hw_req);
+ ar = ath12k_mac_select_scan_device(hw, vif, hw_req->req.channels[0]->center_freq);
if (!ar)
return -EINVAL;
@@ -3845,6 +4174,11 @@ static int ath12k_station_assoc(struct ath12k *ar,
ath12k_peer_assoc_prepare(ar, vif, sta, &peer_arg, reassoc);
+ if (peer_arg.peer_nss < 1) {
+ ath12k_warn(ar->ab,
+ "invalid peer NSS %d\n", peer_arg.peer_nss);
+ return -EINVAL;
+ }
ret = ath12k_wmi_send_peer_assoc_cmd(ar, &peer_arg);
if (ret) {
ath12k_warn(ar->ab, "failed to run peer assoc for STA %pM vdev %i: %d\n",
@@ -3879,7 +4213,8 @@ static int ath12k_station_assoc(struct ath12k *ar,
return 0;
ret = ath12k_setup_peer_smps(ar, arvif, sta->addr,
- &sta->deflink.ht_cap);
+ &sta->deflink.ht_cap,
+ &sta->deflink.he_6ghz_capa);
if (ret) {
ath12k_warn(ar->ab, "failed to setup peer SMPS for vdev %d: %d\n",
arvif->vdev_id, ret);
@@ -5269,6 +5604,7 @@ static void ath12k_mac_setup_sband_iftype_data(struct ath12k *ar,
static int __ath12k_set_antenna(struct ath12k *ar, u32 tx_ant, u32 rx_ant)
{
+ struct ath12k_hw *ah = ath12k_ar_to_ah(ar);
int ret;
lockdep_assert_held(&ar->conf_mutex);
@@ -5289,8 +5625,8 @@ static int __ath12k_set_antenna(struct ath12k *ar, u32 tx_ant, u32 rx_ant)
ar->cfg_tx_chainmask = tx_ant;
ar->cfg_rx_chainmask = rx_ant;
- if (ar->state != ATH12K_STATE_ON &&
- ar->state != ATH12K_STATE_RESTARTED)
+ if (ah->state != ATH12K_HW_STATE_ON &&
+ ah->state != ATH12K_HW_STATE_RESTARTED)
return 0;
ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_TX_CHAIN_MASK,
@@ -5590,51 +5926,16 @@ static int ath12k_mac_config_mon_status_default(struct ath12k *ar, bool enable)
/* TODO: Need to support new monitor mode */
}
-static void ath12k_mac_wait_reconfigure(struct ath12k_base *ab)
-{
- int recovery_start_count;
-
- if (!ab->is_reset)
- return;
-
- recovery_start_count = atomic_inc_return(&ab->recovery_start_count);
-
- ath12k_dbg(ab, ATH12K_DBG_MAC, "recovery start count %d\n", recovery_start_count);
-
- if (recovery_start_count == ab->num_radios) {
- complete(&ab->recovery_start);
- ath12k_dbg(ab, ATH12K_DBG_MAC, "recovery started success\n");
- }
-
- ath12k_dbg(ab, ATH12K_DBG_MAC, "waiting reconfigure...\n");
-
- wait_for_completion_timeout(&ab->reconfigure_complete,
- ATH12K_RECONFIGURE_TIMEOUT_HZ);
-}
-
static int ath12k_mac_start(struct ath12k *ar)
{
+ struct ath12k_hw *ah = ar->ah;
struct ath12k_base *ab = ar->ab;
struct ath12k_pdev *pdev = ar->pdev;
int ret;
- mutex_lock(&ar->conf_mutex);
+ lockdep_assert_held(&ah->hw_mutex);
- switch (ar->state) {
- case ATH12K_STATE_OFF:
- ar->state = ATH12K_STATE_ON;
- break;
- case ATH12K_STATE_RESTARTING:
- ar->state = ATH12K_STATE_RESTARTED;
- ath12k_mac_wait_reconfigure(ab);
- break;
- case ATH12K_STATE_RESTARTED:
- case ATH12K_STATE_WEDGED:
- case ATH12K_STATE_ON:
- WARN_ON(1);
- ret = -EINVAL;
- goto err;
- }
+ mutex_lock(&ar->conf_mutex);
ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_PMF_QOS,
1, pdev->pdev_id);
@@ -5726,7 +6027,6 @@ static int ath12k_mac_start(struct ath12k *ar)
return 0;
err:
- ar->state = ATH12K_STATE_OFF;
mutex_unlock(&ar->conf_mutex);
return ret;
@@ -5749,9 +6049,29 @@ static int ath12k_mac_op_start(struct ieee80211_hw *hw)
ath12k_drain_tx(ah);
+ guard(mutex)(&ah->hw_mutex);
+
+ switch (ah->state) {
+ case ATH12K_HW_STATE_OFF:
+ ah->state = ATH12K_HW_STATE_ON;
+ break;
+ case ATH12K_HW_STATE_RESTARTING:
+ ah->state = ATH12K_HW_STATE_RESTARTED;
+ break;
+ case ATH12K_HW_STATE_RESTARTED:
+ case ATH12K_HW_STATE_WEDGED:
+ case ATH12K_HW_STATE_ON:
+ ah->state = ATH12K_HW_STATE_OFF;
+
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
for_each_ar(ah, ar, i) {
ret = ath12k_mac_start(ar);
if (ret) {
+ ah->state = ATH12K_HW_STATE_OFF;
+
ath12k_err(ar->ab, "fail to start mac operations in pdev idx %d ret %d\n",
ar->pdev_idx, ret);
goto fail_start;
@@ -5759,11 +6079,13 @@ static int ath12k_mac_op_start(struct ieee80211_hw *hw)
}
return 0;
+
fail_start:
for (; i > 0; i--) {
ar = ath12k_ah_to_ar(ah, i - 1);
ath12k_mac_stop(ar);
}
+
return ret;
}
@@ -5826,9 +6148,12 @@ int ath12k_mac_rfkill_enable_radio(struct ath12k *ar, bool enable)
static void ath12k_mac_stop(struct ath12k *ar)
{
+ struct ath12k_hw *ah = ar->ah;
struct htt_ppdu_stats_info *ppdu_stats, *tmp;
int ret;
+ lockdep_assert_held(&ah->hw_mutex);
+
mutex_lock(&ar->conf_mutex);
ret = ath12k_mac_config_mon_status_default(ar, false);
if (ret && (ret != -EOPNOTSUPP))
@@ -5836,7 +6161,6 @@ static void ath12k_mac_stop(struct ath12k *ar)
ret);
clear_bit(ATH12K_CAC_RUNNING, &ar->dev_flags);
- ar->state = ATH12K_STATE_OFF;
mutex_unlock(&ar->conf_mutex);
cancel_delayed_work_sync(&ar->scan.timeout);
@@ -5857,7 +6181,7 @@ static void ath12k_mac_stop(struct ath12k *ar)
atomic_set(&ar->num_pending_mgmt_tx, 0);
}
-static void ath12k_mac_op_stop(struct ieee80211_hw *hw)
+static void ath12k_mac_op_stop(struct ieee80211_hw *hw, bool suspend)
{
struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
struct ath12k *ar;
@@ -5865,8 +6189,14 @@ static void ath12k_mac_op_stop(struct ieee80211_hw *hw)
ath12k_drain_tx(ah);
+ mutex_lock(&ah->hw_mutex);
+
+ ah->state = ATH12K_HW_STATE_OFF;
+
for_each_ar(ah, ar, i)
ath12k_mac_stop(ar);
+
+ mutex_unlock(&ah->hw_mutex);
}
static u8
@@ -5892,17 +6222,59 @@ ath12k_mac_get_vdev_stats_id(struct ath12k_vif *arvif)
return vdev_stats_id;
}
-static void ath12k_mac_setup_vdev_create_arg(struct ath12k_vif *arvif,
- struct ath12k_wmi_vdev_create_arg *arg)
+static int ath12k_mac_setup_vdev_params_mbssid(struct ath12k_vif *arvif,
+ u32 *flags, u32 *tx_vdev_id)
+{
+ struct ieee80211_vif *tx_vif = arvif->vif->mbssid_tx_vif;
+ struct ath12k *ar = arvif->ar;
+ struct ath12k_vif *tx_arvif;
+
+ if (!tx_vif)
+ return 0;
+
+ tx_arvif = ath12k_vif_to_arvif(tx_vif);
+
+ if (arvif->vif->bss_conf.nontransmitted) {
+ if (ar->ah->hw->wiphy != ieee80211_vif_to_wdev(tx_vif)->wiphy)
+ return -EINVAL;
+
+ *flags = WMI_VDEV_MBSSID_FLAGS_NON_TRANSMIT_AP;
+ *tx_vdev_id = tx_arvif->vdev_id;
+ } else if (tx_arvif == arvif) {
+ *flags = WMI_VDEV_MBSSID_FLAGS_TRANSMIT_AP;
+ } else {
+ return -EINVAL;
+ }
+
+ if (arvif->vif->bss_conf.ema_ap)
+ *flags |= WMI_VDEV_MBSSID_FLAGS_EMA_MODE;
+
+ return 0;
+}
+
+static int ath12k_mac_setup_vdev_create_arg(struct ath12k_vif *arvif,
+ struct ath12k_wmi_vdev_create_arg *arg)
{
struct ath12k *ar = arvif->ar;
struct ath12k_pdev *pdev = ar->pdev;
+ int ret;
arg->if_id = arvif->vdev_id;
arg->type = arvif->vdev_type;
arg->subtype = arvif->vdev_subtype;
arg->pdev_id = pdev->pdev_id;
+ arg->mbssid_flags = WMI_VDEV_MBSSID_FLAGS_NON_MBSSID_AP;
+ arg->mbssid_tx_vdev_id = 0;
+ if (!test_bit(WMI_TLV_SERVICE_MBSS_PARAM_IN_VDEV_START_SUPPORT,
+ ar->ab->wmi_ab.svc_map)) {
+ ret = ath12k_mac_setup_vdev_params_mbssid(arvif,
+ &arg->mbssid_flags,
+ &arg->mbssid_tx_vdev_id);
+ if (ret)
+ return ret;
+ }
+
if (pdev->cap.supported_bands & WMI_HOST_WLAN_2G_CAP) {
arg->chains[NL80211_BAND_2GHZ].tx = ar->num_tx_chains;
arg->chains[NL80211_BAND_2GHZ].rx = ar->num_rx_chains;
@@ -5918,6 +6290,7 @@ static void ath12k_mac_setup_vdev_create_arg(struct ath12k_vif *arvif,
}
arg->if_stats_id = ath12k_mac_get_vdev_stats_id(arvif);
+ return 0;
}
static u32
@@ -6099,7 +6472,12 @@ static int ath12k_mac_vdev_create(struct ath12k *ar, struct ieee80211_vif *vif)
for (i = 0; i < ARRAY_SIZE(vif->hw_queue); i++)
vif->hw_queue[i] = i % (ATH12K_HW_MAX_QUEUES - 1);
- ath12k_mac_setup_vdev_create_arg(arvif, &vdev_arg);
+ ret = ath12k_mac_setup_vdev_create_arg(arvif, &vdev_arg);
+ if (ret) {
+ ath12k_warn(ab, "failed to create vdev parameters %d: %d\n",
+ arvif->vdev_id, ret);
+ goto err;
+ }
ret = ath12k_wmi_vdev_create(ar, vif->addr, &vdev_arg);
if (ret) {
@@ -6492,7 +6870,6 @@ err_vdev_del:
/* Recalc txpower for remaining vdev */
ath12k_mac_txpower_recalc(ar);
- clear_bit(ATH12K_FLAG_MONITOR_ENABLED, &ar->monitor_flags);
/* TODO: recal traffic pause state based on the available vdevs */
arvif->is_created = false;
@@ -6563,15 +6940,9 @@ static void ath12k_mac_configure_filter(struct ath12k *ar,
reset_flag = !(ar->filter_flags & FIF_BCN_PRBRESP_PROMISC);
ret = ath12k_dp_tx_htt_monitor_mode_ring_config(ar, reset_flag);
- if (!ret) {
- if (!reset_flag)
- set_bit(ATH12K_FLAG_MONITOR_ENABLED, &ar->monitor_flags);
- else
- clear_bit(ATH12K_FLAG_MONITOR_ENABLED, &ar->monitor_flags);
- } else {
+ if (ret)
ath12k_warn(ar->ab,
"fail to set monitor filter: %d\n", ret);
- }
ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
"total_flags:0x%x, reset_flag:%d\n",
@@ -6848,10 +7219,16 @@ ath12k_mac_vdev_start_restart(struct ath12k_vif *arvif,
arg.pref_tx_streams = ar->num_tx_chains;
arg.pref_rx_streams = ar->num_rx_chains;
- /* Fill the MBSSID flags to indicate AP is non MBSSID by default
- * Corresponding flags would be updated with MBSSID support.
- */
arg.mbssid_flags = WMI_VDEV_MBSSID_FLAGS_NON_MBSSID_AP;
+ arg.mbssid_tx_vdev_id = 0;
+ if (test_bit(WMI_TLV_SERVICE_MBSS_PARAM_IN_VDEV_START_SUPPORT,
+ ar->ab->wmi_ab.svc_map)) {
+ ret = ath12k_mac_setup_vdev_params_mbssid(arvif,
+ &arg.mbssid_flags,
+ &arg.mbssid_tx_vdev_id);
+ if (ret)
+ return ret;
+ }
if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
arg.ssid = arvif->u.ap.ssid;
@@ -7045,7 +7422,9 @@ ath12k_mac_update_vif_chan(struct ath12k *ar,
struct ieee80211_vif_chanctx_switch *vifs,
int n_vifs)
{
+ struct ath12k_wmi_vdev_up_params params = {};
struct ath12k_base *ab = ar->ab;
+ struct ieee80211_vif *vif;
struct ath12k_vif *arvif;
int ret;
int i;
@@ -7054,9 +7433,10 @@ ath12k_mac_update_vif_chan(struct ath12k *ar,
lockdep_assert_held(&ar->conf_mutex);
for (i = 0; i < n_vifs; i++) {
- arvif = ath12k_vif_to_arvif(vifs[i].vif);
+ vif = vifs[i].vif;
+ arvif = ath12k_vif_to_arvif(vif);
- if (vifs[i].vif->type == NL80211_IFTYPE_MONITOR)
+ if (vif->type == NL80211_IFTYPE_MONITOR)
monitor_vif = true;
ath12k_dbg(ab, ATH12K_DBG_MAC,
@@ -7070,29 +7450,6 @@ ath12k_mac_update_vif_chan(struct ath12k *ar,
if (WARN_ON(!arvif->is_started))
continue;
- if (WARN_ON(!arvif->is_up))
- continue;
-
- ret = ath12k_wmi_vdev_down(ar, arvif->vdev_id);
- if (ret) {
- ath12k_warn(ab, "failed to down vdev %d: %d\n",
- arvif->vdev_id, ret);
- continue;
- }
- }
-
- /* All relevant vdevs are downed and associated channel resources
- * should be available for the channel switch now.
- */
-
- /* TODO: Update ar->rx_channel */
-
- for (i = 0; i < n_vifs; i++) {
- arvif = ath12k_vif_to_arvif(vifs[i].vif);
-
- if (WARN_ON(!arvif->is_started))
- continue;
-
arvif->punct_bitmap = vifs[i].new_ctx->def.punctured;
/* Firmware expect vdev_restart only if vdev is up.
@@ -7125,8 +7482,16 @@ ath12k_mac_update_vif_chan(struct ath12k *ar,
ath12k_warn(ab, "failed to update bcn tmpl during csa: %d\n",
ret);
- ret = ath12k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
- arvif->bssid);
+ memset(&params, 0, sizeof(params));
+ params.vdev_id = arvif->vdev_id;
+ params.aid = arvif->aid;
+ params.bssid = arvif->bssid;
+ if (vif->mbssid_tx_vif) {
+ params.tx_bssid = ath12k_vif_to_arvif(vif->mbssid_tx_vif)->bssid;
+ params.nontx_profile_idx = vif->bss_conf.bssid_index;
+ params.nontx_profile_cnt = 1 << vif->bss_conf.bssid_indicator;
+ }
+ ret = ath12k_wmi_vdev_up(arvif->ar, &params);
if (ret) {
ath12k_warn(ab, "failed to bring vdev up %d: %d\n",
arvif->vdev_id, ret);
@@ -7259,7 +7624,6 @@ ath12k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
struct ath12k_base *ab;
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
int ret;
- struct ath12k_wmi_peer_create_arg param;
/* For multi radio wiphy, the vdev was not created during add_interface
* create now since we have a channel ctx now to assign to a specific ar/fw
@@ -7295,21 +7659,6 @@ ath12k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
goto out;
}
- if (ab->hw_params->vdev_start_delay &&
- arvif->vdev_type != WMI_VDEV_TYPE_AP &&
- arvif->vdev_type != WMI_VDEV_TYPE_MONITOR) {
- param.vdev_id = arvif->vdev_id;
- param.peer_type = WMI_PEER_TYPE_DEFAULT;
- param.peer_addr = ar->mac_addr;
-
- ret = ath12k_peer_create(ar, arvif, NULL, &param);
- if (ret) {
- ath12k_warn(ab, "failed to create peer after vdev start delay: %d",
- ret);
- goto out;
- }
- }
-
if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
ret = ath12k_mac_monitor_start(ar);
if (ret)
@@ -7371,11 +7720,6 @@ ath12k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
WARN_ON(!arvif->is_started);
- if (ab->hw_params->vdev_start_delay &&
- arvif->vdev_type == WMI_VDEV_TYPE_MONITOR &&
- ath12k_peer_find_by_addr(ab, ar->mac_addr))
- ath12k_peer_delete(ar, arvif->vdev_id, ar->mac_addr);
-
if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
ret = ath12k_mac_monitor_stop(ar);
if (ret) {
@@ -7386,7 +7730,8 @@ ath12k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
arvif->is_started = false;
}
- if (arvif->vdev_type != WMI_VDEV_TYPE_STA) {
+ if (arvif->vdev_type != WMI_VDEV_TYPE_STA &&
+ arvif->vdev_type != WMI_VDEV_TYPE_MONITOR) {
ath12k_bss_disassoc(ar, arvif);
ret = ath12k_mac_vdev_stop(arvif);
if (ret)
@@ -7395,10 +7740,6 @@ ath12k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
}
arvif->is_started = false;
- if (ab->hw_params->vdev_start_delay &&
- arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)
- ath12k_wmi_vdev_down(ar, arvif->vdev_id);
-
if (arvif->vdev_type != WMI_VDEV_TYPE_MONITOR &&
ar->num_started_vdevs == 1 && ar->monitor_vdev_created)
ath12k_mac_monitor_stop(ar);
@@ -7920,26 +8261,33 @@ ath12k_mac_op_reconfig_complete(struct ieee80211_hw *hw,
struct ath12k *ar;
struct ath12k_base *ab;
struct ath12k_vif *arvif;
- int recovery_count;
+ int recovery_count, i;
if (reconfig_type != IEEE80211_RECONFIG_TYPE_RESTART)
return;
- ar = ath12k_ah_to_ar(ah, 0);
- ab = ar->ab;
+ guard(mutex)(&ah->hw_mutex);
- mutex_lock(&ar->conf_mutex);
+ if (ah->state != ATH12K_HW_STATE_RESTARTED)
+ return;
+
+ ah->state = ATH12K_HW_STATE_ON;
+ ieee80211_wake_queues(hw);
+
+ for_each_ar(ah, ar, i) {
+ mutex_lock(&ar->conf_mutex);
+
+ ab = ar->ab;
- if (ar->state == ATH12K_STATE_RESTARTED) {
ath12k_warn(ar->ab, "pdev %d successfully recovered\n",
ar->pdev->pdev_id);
- ar->state = ATH12K_STATE_ON;
- ieee80211_wake_queues(hw);
if (ab->is_reset) {
recovery_count = atomic_inc_return(&ab->recovery_count);
+
ath12k_dbg(ab, ATH12K_DBG_BOOT, "recovery count %d\n",
recovery_count);
+
/* When there are multiple radios in an SOC,
* the recovery has to be done for each radio
*/
@@ -7958,6 +8306,7 @@ ath12k_mac_op_reconfig_complete(struct ieee80211_hw *hw,
arvif->key_cipher,
arvif->is_up,
arvif->vdev_type);
+
/* After trigger disconnect, then upper layer will
* trigger connect again, then the PN number of
* upper layer will be reset to keep up with AP
@@ -7967,13 +8316,14 @@ ath12k_mac_op_reconfig_complete(struct ieee80211_hw *hw,
arvif->vdev_type == WMI_VDEV_TYPE_STA &&
arvif->vdev_subtype == WMI_VDEV_SUBTYPE_NONE) {
ieee80211_hw_restart_disconnect(arvif->vif);
+
ath12k_dbg(ab, ATH12K_DBG_BOOT,
"restart disconnect\n");
}
}
- }
- mutex_unlock(&ar->conf_mutex);
+ mutex_unlock(&ar->conf_mutex);
+ }
}
static void
@@ -8026,6 +8376,13 @@ static int ath12k_mac_op_get_survey(struct ieee80211_hw *hw, int idx,
if (!sband)
sband = hw->wiphy->bands[NL80211_BAND_5GHZ];
+ if (sband && idx >= sband->n_channels) {
+ idx -= sband->n_channels;
+ sband = NULL;
+ }
+
+ if (!sband)
+ sband = hw->wiphy->bands[NL80211_BAND_6GHZ];
if (!sband || idx >= sband->n_channels)
return -ENOENT;
@@ -8124,12 +8481,68 @@ static int ath12k_mac_op_remain_on_channel(struct ieee80211_hw *hw,
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
struct ath12k_wmi_scan_req_arg arg;
- struct ath12k *ar;
+ struct ath12k *ar, *prev_ar;
u32 scan_time_msec;
+ bool create = true;
int ret;
- ar = ath12k_ah_to_ar(ah, 0);
+ if (ah->num_radio == 1) {
+ WARN_ON(!arvif->is_created);
+ ar = ath12k_ah_to_ar(ah, 0);
+ goto scan;
+ }
+
+ ar = ath12k_mac_select_scan_device(hw, vif, chan->center_freq);
+ if (!ar)
+ return -EINVAL;
+
+ /* If the vif is already assigned to a specific vdev of an ar,
+ * check whether its already started, vdev which is started
+ * are not allowed to switch to a new radio.
+ * If the vdev is not started, but was earlier created on a
+ * different ar, delete that vdev and create a new one. We don't
+ * delete at the scan stop as an optimization to avoid redundant
+ * delete-create vdev's for the same ar, in case the request is
+ * always on the same band for the vif
+ */
+ if (arvif->is_created) {
+ if (WARN_ON(!arvif->ar))
+ return -EINVAL;
+
+ if (ar != arvif->ar && arvif->is_started)
+ return -EBUSY;
+
+ if (ar != arvif->ar) {
+ /* backup the previously used ar ptr, since the vdev delete
+ * would assign the arvif->ar to NULL after the call
+ */
+ prev_ar = arvif->ar;
+ mutex_lock(&prev_ar->conf_mutex);
+ ret = ath12k_mac_vdev_delete(prev_ar, vif);
+ mutex_unlock(&prev_ar->conf_mutex);
+ if (ret) {
+ ath12k_warn(prev_ar->ab,
+ "unable to delete scan vdev for roc: %d\n",
+ ret);
+ return ret;
+ }
+ } else {
+ create = false;
+ }
+ }
+ if (create) {
+ mutex_lock(&ar->conf_mutex);
+ ret = ath12k_mac_vdev_create(ar, vif);
+ mutex_unlock(&ar->conf_mutex);
+ if (ret) {
+ ath12k_warn(ar->ab, "unable to create scan vdev for roc: %d\n",
+ ret);
+ return -EINVAL;
+ }
+ }
+
+scan:
mutex_lock(&ar->conf_mutex);
spin_lock_bh(&ar->data_lock);
@@ -8211,6 +8624,40 @@ exit:
return ret;
}
+static void ath12k_mac_op_set_rekey_data(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_gtk_rekey_data *data)
+{
+ struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ath12k_rekey_data *rekey_data = &arvif->rekey_data;
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar = ath12k_ah_to_ar(ah, 0);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac set rekey data vdev %d\n",
+ arvif->vdev_id);
+
+ mutex_lock(&ar->conf_mutex);
+
+ memcpy(rekey_data->kck, data->kck, NL80211_KCK_LEN);
+ memcpy(rekey_data->kek, data->kek, NL80211_KEK_LEN);
+
+ /* The supplicant works on big-endian, the firmware expects it on
+ * little endian.
+ */
+ rekey_data->replay_ctr = get_unaligned_be64(data->replay_ctr);
+
+ arvif->rekey_data.enable_offload = true;
+
+ ath12k_dbg_dump(ar->ab, ATH12K_DBG_MAC, "kck", NULL,
+ rekey_data->kck, NL80211_KCK_LEN);
+ ath12k_dbg_dump(ar->ab, ATH12K_DBG_MAC, "kek", NULL,
+ rekey_data->kck, NL80211_KEK_LEN);
+ ath12k_dbg_dump(ar->ab, ATH12K_DBG_MAC, "replay ctr", NULL,
+ &rekey_data->replay_ctr, sizeof(rekey_data->replay_ctr));
+
+ mutex_unlock(&ar->conf_mutex);
+}
+
static const struct ieee80211_ops ath12k_ops = {
.tx = ath12k_mac_op_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
@@ -8226,6 +8673,7 @@ static const struct ieee80211_ops ath12k_ops = {
.hw_scan = ath12k_mac_op_hw_scan,
.cancel_hw_scan = ath12k_mac_op_cancel_hw_scan,
.set_key = ath12k_mac_op_set_key,
+ .set_rekey_data = ath12k_mac_op_set_rekey_data,
.sta_state = ath12k_mac_op_sta_state,
.sta_set_txpwr = ath12k_mac_op_sta_set_txpwr,
.sta_rc_update = ath12k_mac_op_sta_rc_update,
@@ -8247,6 +8695,12 @@ static const struct ieee80211_ops ath12k_ops = {
.sta_statistics = ath12k_mac_op_sta_statistics,
.remain_on_channel = ath12k_mac_op_remain_on_channel,
.cancel_remain_on_channel = ath12k_mac_op_cancel_remain_on_channel,
+
+#ifdef CONFIG_PM
+ .suspend = ath12k_wow_op_suspend,
+ .resume = ath12k_wow_op_resume,
+ .set_wakeup = ath12k_wow_op_set_wakeup,
+#endif
};
static void ath12k_mac_update_ch_list(struct ath12k *ar,
@@ -8488,19 +8942,23 @@ static int ath12k_mac_setup_iface_combinations(struct ath12k_hw *ah)
static const u8 ath12k_if_types_ext_capa[] = {
[0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING,
+ [2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT,
[7] = WLAN_EXT_CAPA8_OPMODE_NOTIF,
};
static const u8 ath12k_if_types_ext_capa_sta[] = {
[0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING,
+ [2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT,
[7] = WLAN_EXT_CAPA8_OPMODE_NOTIF,
[9] = WLAN_EXT_CAPA10_TWT_REQUESTER_SUPPORT,
};
static const u8 ath12k_if_types_ext_capa_ap[] = {
[0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING,
+ [2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT,
[7] = WLAN_EXT_CAPA8_OPMODE_NOTIF,
[9] = WLAN_EXT_CAPA10_TWT_RESPONDER_SUPPORT,
+ [10] = WLAN_EXT_CAPA11_EMA_SUPPORT,
};
static const struct wiphy_iftype_ext_capab ath12k_iftypes_ext_capa[] = {
@@ -8540,8 +8998,10 @@ static void ath12k_mac_hw_unregister(struct ath12k_hw *ah)
struct ath12k *ar;
int i;
- for_each_ar(ah, ar, i)
+ for_each_ar(ah, ar, i) {
cancel_work_sync(&ar->regd_update_work);
+ ath12k_debugfs_unregister(ar);
+ }
ieee80211_unregister_hw(hw);
@@ -8605,6 +9065,7 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah)
u32 ht_cap = U32_MAX, antennas_rx = 0, antennas_tx = 0;
bool is_6ghz = false, is_raw_mode = false, is_monitor_disable = false;
u8 *mac_addr = NULL;
+ u8 mbssid_max_interfaces = 0;
wiphy->max_ap_assoc_sta = 0;
@@ -8648,6 +9109,8 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah)
mac_addr = ar->mac_addr;
else
mac_addr = ab->mac_addr;
+
+ mbssid_max_interfaces += TARGET_NUM_VDEVS;
}
wiphy->available_antennas_rx = antennas_rx;
@@ -8685,7 +9148,7 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah)
ieee80211_hw_set(hw, SUPPORTS_TX_FRAG);
ieee80211_hw_set(hw, REPORTS_LOW_ACK);
- if (ht_cap & WMI_HT_CAP_ENABLED) {
+ if ((ht_cap & WMI_HT_CAP_ENABLED) || ar->supports_6ghz) {
ieee80211_hw_set(hw, AMPDU_AGGREGATION);
ieee80211_hw_set(hw, TX_AMPDU_SETUP_IN_HW);
ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
@@ -8700,7 +9163,8 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah)
* for each band for a dual band capable radio. It will be tricky to
* handle it when the ht capability different for each band.
*/
- if (ht_cap & WMI_HT_CAP_DYNAMIC_SMPS)
+ if (ht_cap & WMI_HT_CAP_DYNAMIC_SMPS ||
+ (ar->supports_6ghz && ab->hw_params->supports_dynamic_smps_6ghz))
wiphy->features |= NL80211_FEATURE_DYNAMIC_SMPS;
wiphy->max_scan_ssids = WLAN_SCAN_PARAMS_MAX_SSID;
@@ -8739,6 +9203,9 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah)
wiphy->iftype_ext_capab = ath12k_iftypes_ext_capa;
wiphy->num_iftype_ext_capab = ARRAY_SIZE(ath12k_iftypes_ext_capa);
+ wiphy->mbssid_max_interfaces = mbssid_max_interfaces;
+ wiphy->ema_max_profile_periodicity = TARGET_EMA_MAX_PROFILE_PERIOD;
+
if (is_6ghz) {
wiphy_ext_feature_set(wiphy,
NL80211_EXT_FEATURE_FILS_DISCOVERY);
@@ -8756,6 +9223,24 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah)
ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
}
+ if (test_bit(WMI_TLV_SERVICE_NLO, ar->wmi->wmi_ab->svc_map)) {
+ wiphy->max_sched_scan_ssids = WMI_PNO_MAX_SUPP_NETWORKS;
+ wiphy->max_match_sets = WMI_PNO_MAX_SUPP_NETWORKS;
+ wiphy->max_sched_scan_ie_len = WMI_PNO_MAX_IE_LENGTH;
+ wiphy->max_sched_scan_plans = WMI_PNO_MAX_SCHED_SCAN_PLANS;
+ wiphy->max_sched_scan_plan_interval =
+ WMI_PNO_MAX_SCHED_SCAN_PLAN_INT;
+ wiphy->max_sched_scan_plan_iterations =
+ WMI_PNO_MAX_SCHED_SCAN_PLAN_ITRNS;
+ wiphy->features |= NL80211_FEATURE_ND_RANDOM_MAC_ADDR;
+ }
+
+ ret = ath12k_wow_init(ar);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to init wow: %d\n", ret);
+ goto err_free_if_combs;
+ }
+
ret = ieee80211_register_hw(hw);
if (ret) {
ath12k_err(ab, "ieee80211 registration failed: %d\n", ret);
@@ -8777,13 +9262,16 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah)
ath12k_err(ar->ab, "ath12k regd update failed: %d\n", ret);
goto err_unregister_hw;
}
- }
- ath12k_debugfs_register(ar);
+ ath12k_debugfs_register(ar);
+ }
return 0;
err_unregister_hw:
+ for_each_ar(ah, ar, i)
+ ath12k_debugfs_unregister(ar);
+
ieee80211_unregister_hw(hw);
err_free_if_combs:
@@ -8842,7 +9330,6 @@ static void ath12k_mac_setup(struct ath12k *ar)
INIT_WORK(&ar->wmi_mgmt_tx_work, ath12k_mgmt_over_wmi_tx_work);
skb_queue_head_init(&ar->wmi_mgmt_tx_queue);
- clear_bit(ATH12K_FLAG_MONITOR_ENABLED, &ar->monitor_flags);
}
int ath12k_mac_register(struct ath12k_base *ab)
@@ -8919,6 +9406,8 @@ static struct ath12k_hw *ath12k_mac_hw_allocate(struct ath12k_base *ab,
ah->hw = hw;
ah->num_radio = num_pdev_map;
+ mutex_init(&ah->hw_mutex);
+
for (i = 0; i < num_pdev_map; i++) {
ab = pdev_map[i].ab;
pdev_idx = pdev_map[i].pdev_idx;
@@ -8927,7 +9416,7 @@ static struct ath12k_hw *ath12k_mac_hw_allocate(struct ath12k_base *ab,
ar = ath12k_ah_to_ar(ah, i);
ar->ah = ah;
ar->ab = ab;
- ar->hw_link_id = i;
+ ar->hw_link_id = pdev->hw_link_id;
ar->pdev = pdev;
ar->pdev_idx = pdev_idx;
pdev->ar = ar;
@@ -9005,3 +9494,34 @@ err:
return ret;
}
+
+int ath12k_mac_vif_set_keepalive(struct ath12k_vif *arvif,
+ enum wmi_sta_keepalive_method method,
+ u32 interval)
+{
+ struct wmi_sta_keepalive_arg arg = {};
+ struct ath12k *ar = arvif->ar;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
+ return 0;
+
+ if (!test_bit(WMI_TLV_SERVICE_STA_KEEP_ALIVE, ar->ab->wmi_ab.svc_map))
+ return 0;
+
+ arg.vdev_id = arvif->vdev_id;
+ arg.enabled = 1;
+ arg.method = method;
+ arg.interval = interval;
+
+ ret = ath12k_wmi_sta_keepalive(ar, &arg);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to set keepalive on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/ath12k/mac.h b/drivers/net/wireless/ath/ath12k/mac.h
index 69fd282b9dd3..5efbb6822628 100644
--- a/drivers/net/wireless/ath/ath12k/mac.h
+++ b/drivers/net/wireless/ath/ath12k/mac.h
@@ -9,6 +9,7 @@
#include <net/mac80211.h>
#include <net/cfg80211.h>
+#include "wmi.h"
struct ath12k;
struct ath12k_base;
@@ -81,5 +82,9 @@ int ath12k_mac_rfkill_config(struct ath12k *ar);
int ath12k_mac_wait_tx_complete(struct ath12k *ar);
void ath12k_mac_handle_beacon(struct ath12k *ar, struct sk_buff *skb);
void ath12k_mac_handle_beacon_miss(struct ath12k *ar, u32 vdev_id);
+int ath12k_mac_vif_set_keepalive(struct ath12k_vif *arvif,
+ enum wmi_sta_keepalive_method method,
+ u32 interval);
+u8 ath12k_mac_get_target_pdev_id(struct ath12k *ar);
#endif
diff --git a/drivers/net/wireless/ath/ath12k/mhi.c b/drivers/net/wireless/ath/ath12k/mhi.c
index fef2f7622033..df96b0f91f54 100644
--- a/drivers/net/wireless/ath/ath12k/mhi.c
+++ b/drivers/net/wireless/ath/ath12k/mhi.c
@@ -16,6 +16,7 @@
#define MHI_TIMEOUT_DEFAULT_MS 90000
#define OTP_INVALID_BOARD_ID 0xFFFF
#define OTP_VALID_DUALMAC_BOARD_ID_MASK 0x1000
+#define MHI_CB_INVALID 0xff
static const struct mhi_channel_config ath12k_mhi_channels_qcn9274[] = {
{
@@ -268,6 +269,7 @@ static void ath12k_mhi_op_status_cb(struct mhi_controller *mhi_cntrl,
enum mhi_callback cb)
{
struct ath12k_base *ab = dev_get_drvdata(mhi_cntrl->cntrl_dev);
+ struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
ath12k_dbg(ab, ATH12K_DBG_BOOT, "mhi notify status reason %s\n",
ath12k_mhi_op_callback_to_str(cb));
@@ -277,12 +279,20 @@ static void ath12k_mhi_op_status_cb(struct mhi_controller *mhi_cntrl,
ath12k_warn(ab, "firmware crashed: MHI_CB_SYS_ERROR\n");
break;
case MHI_CB_EE_RDDM:
+ if (ab_pci->mhi_pre_cb == MHI_CB_EE_RDDM) {
+ ath12k_dbg(ab, ATH12K_DBG_BOOT,
+ "do not queue again for consecutive RDDM event\n");
+ break;
+ }
+
if (!(test_bit(ATH12K_FLAG_UNREGISTERING, &ab->dev_flags)))
queue_work(ab->workqueue_aux, &ab->reset_work);
break;
default:
break;
}
+
+ ab_pci->mhi_pre_cb = cb;
}
static int ath12k_mhi_op_read_reg(struct mhi_controller *mhi_cntrl,
@@ -313,6 +323,7 @@ int ath12k_mhi_register(struct ath12k_pci *ab_pci)
if (!mhi_ctrl)
return -ENOMEM;
+ ab_pci->mhi_pre_cb = MHI_CB_INVALID;
ab_pci->mhi_ctrl = mhi_ctrl;
mhi_ctrl->cntrl_dev = ab->dev;
mhi_ctrl->regs = ab->mem;
diff --git a/drivers/net/wireless/ath/ath12k/pci.c b/drivers/net/wireless/ath/ath12k/pci.c
index 16af046c33d9..876c029f58f6 100644
--- a/drivers/net/wireless/ath/ath12k/pci.c
+++ b/drivers/net/wireless/ath/ath12k/pci.c
@@ -350,6 +350,7 @@ static void ath12k_pci_free_ext_irq(struct ath12k_base *ab)
free_irq(ab->irq_num[irq_grp->irqs[j]], irq_grp);
netif_napi_del(&irq_grp->napi);
+ free_netdev(irq_grp->napi_ndev);
}
}
@@ -560,8 +561,9 @@ static irqreturn_t ath12k_pci_ext_interrupt_handler(int irq, void *arg)
static int ath12k_pci_ext_irq_config(struct ath12k_base *ab)
{
struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
- int i, j, ret, num_vectors = 0;
+ int i, j, n, ret, num_vectors = 0;
u32 user_base_data = 0, base_vector = 0, base_idx;
+ struct ath12k_ext_irq_grp *irq_grp;
base_idx = ATH12K_PCI_IRQ_CE0_OFFSET + CE_COUNT_MAX;
ret = ath12k_pci_get_user_msi_assignment(ab, "DP",
@@ -572,13 +574,18 @@ static int ath12k_pci_ext_irq_config(struct ath12k_base *ab)
return ret;
for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
- struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
+ irq_grp = &ab->ext_irq_grp[i];
u32 num_irq = 0;
irq_grp->ab = ab;
irq_grp->grp_id = i;
- init_dummy_netdev(&irq_grp->napi_ndev);
- netif_napi_add(&irq_grp->napi_ndev, &irq_grp->napi,
+ irq_grp->napi_ndev = alloc_netdev_dummy(0);
+ if (!irq_grp->napi_ndev) {
+ ret = -ENOMEM;
+ goto fail_allocate;
+ }
+
+ netif_napi_add(irq_grp->napi_ndev, &irq_grp->napi,
ath12k_pci_ext_grp_napi_poll);
if (ab->hw_params->ring_mask->tx[i] ||
@@ -611,13 +618,23 @@ static int ath12k_pci_ext_irq_config(struct ath12k_base *ab)
if (ret) {
ath12k_err(ab, "failed request irq %d: %d\n",
vector, ret);
- return ret;
+ goto fail_request;
}
}
ath12k_pci_ext_grp_disable(irq_grp);
}
return 0;
+
+fail_request:
+ /* i ->napi_ndev was properly allocated. Free it also */
+ i += 1;
+fail_allocate:
+ for (n = 0; n < i; n++) {
+ irq_grp = &ab->ext_irq_grp[n];
+ free_netdev(irq_grp->napi_ndev);
+ }
+ return ret;
}
static int ath12k_pci_set_irq_affinity_hint(struct ath12k_pci *ab_pci,
@@ -1090,14 +1107,14 @@ void ath12k_pci_ext_irq_enable(struct ath12k_base *ab)
{
int i;
- set_bit(ATH12K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags);
-
for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
napi_enable(&irq_grp->napi);
ath12k_pci_ext_grp_enable(irq_grp);
}
+
+ set_bit(ATH12K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags);
}
void ath12k_pci_ext_irq_disable(struct ath12k_base *ab)
@@ -1285,6 +1302,13 @@ void ath12k_pci_power_down(struct ath12k_base *ab, bool is_suspend)
ath12k_pci_sw_reset(ab_pci->ab, false);
}
+static int ath12k_pci_panic_handler(struct ath12k_base *ab)
+{
+ ath12k_pci_sw_reset(ab, false);
+
+ return NOTIFY_OK;
+}
+
static const struct ath12k_hif_ops ath12k_pci_hif_ops = {
.start = ath12k_pci_start,
.stop = ath12k_pci_stop,
@@ -1302,6 +1326,7 @@ static const struct ath12k_hif_ops ath12k_pci_hif_ops = {
.ce_irq_enable = ath12k_pci_hif_ce_irq_enable,
.ce_irq_disable = ath12k_pci_hif_ce_irq_disable,
.get_ce_msi_idx = ath12k_pci_get_ce_msi_idx,
+ .panic_handler = ath12k_pci_panic_handler,
};
static
diff --git a/drivers/net/wireless/ath/ath12k/pci.h b/drivers/net/wireless/ath/ath12k/pci.h
index 6186a78038cf..31584a7ad80e 100644
--- a/drivers/net/wireless/ath/ath12k/pci.h
+++ b/drivers/net/wireless/ath/ath12k/pci.h
@@ -104,6 +104,7 @@ struct ath12k_pci {
struct mhi_controller *mhi_ctrl;
const struct ath12k_msi_config *msi_config;
unsigned long mhi_state;
+ enum mhi_callback mhi_pre_cb;
u32 register_window;
/* protects register_window above */
diff --git a/drivers/net/wireless/ath/ath12k/qmi.c b/drivers/net/wireless/ath/ath12k/qmi.c
index 5484112859a6..b93ce9f87f61 100644
--- a/drivers/net/wireless/ath/ath12k/qmi.c
+++ b/drivers/net/wireless/ath/ath12k/qmi.c
@@ -2041,7 +2041,7 @@ static void ath12k_host_cap_parse_mlo(struct ath12k_base *ab,
req->mlo_capable_valid = 1;
req->mlo_capable = 1;
req->mlo_chip_id_valid = 1;
- req->mlo_chip_id = 0;
+ req->mlo_chip_id = ab->device_id;
req->mlo_group_id_valid = 1;
req->mlo_group_id = 0;
req->max_mlo_peer_valid = 1;
@@ -2053,7 +2053,7 @@ static void ath12k_host_cap_parse_mlo(struct ath12k_base *ab,
req->mlo_num_chips = 1;
info = &req->mlo_chip_info[0];
- info->chip_id = 0;
+ info->chip_id = ab->device_id;
info->num_local_links = ab->qmi.num_radios;
for (i = 0; i < info->num_local_links; i++) {
@@ -2503,7 +2503,7 @@ static int ath12k_qmi_request_target_cap(struct ath12k_base *ab)
ab->qmi.dev_mem[i].size =
resp.dev_mem[i].size;
ath12k_dbg(ab, ATH12K_DBG_QMI,
- "devmem [%d] start ox%llx size %llu\n", i,
+ "devmem [%d] start 0x%llx size %llu\n", i,
ab->qmi.dev_mem[i].start,
ab->qmi.dev_mem[i].size);
}
@@ -2538,7 +2538,7 @@ static int ath12k_qmi_load_file_target_mem(struct ath12k_base *ab,
struct qmi_wlanfw_bdf_download_resp_msg_v01 resp = {};
struct qmi_txn txn;
const u8 *temp = data;
- int ret;
+ int ret = 0;
u32 remaining = len;
req = kzalloc(sizeof(*req), GFP_KERNEL);
diff --git a/drivers/net/wireless/ath/ath12k/reg.c b/drivers/net/wireless/ath/ath12k/reg.c
index fbf38044938c..439d61f284d8 100644
--- a/drivers/net/wireless/ath/ath12k/reg.c
+++ b/drivers/net/wireless/ath/ath12k/reg.c
@@ -206,9 +206,9 @@ static void ath12k_copy_regd(struct ieee80211_regdomain *regd_orig,
int ath12k_regd_update(struct ath12k *ar, bool init)
{
- struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
+ struct ath12k_hw *ah = ath12k_ar_to_ah(ar);
+ struct ieee80211_hw *hw = ah->hw;
struct ieee80211_regdomain *regd, *regd_copy = NULL;
- struct ath12k_hw *ah = ar->ah;
int ret, regd_len, pdev_id;
struct ath12k_base *ab;
int i;
@@ -286,19 +286,20 @@ int ath12k_regd_update(struct ath12k *ar, bool init)
if (ret)
goto err;
+ if (ah->state != ATH12K_HW_STATE_ON)
+ goto skip;
+
ah->regd_updated = true;
/* Apply the new regd to all the radios, this is expected to be received only once
* since we check for ah->regd_updated and allow here only once.
*/
for_each_ar(ah, ar, i) {
- if (ar->state == ATH12K_STATE_ON) {
- ab = ar->ab;
- ret = ath12k_reg_update_chan_list(ar);
- if (ret)
- goto err;
- }
+ ab = ar->ab;
+ ret = ath12k_reg_update_chan_list(ar);
+ if (ret)
+ goto err;
}
-
+skip:
return 0;
err:
ath12k_warn(ab, "failed to perform regd update : %d\n", ret);
diff --git a/drivers/net/wireless/ath/ath12k/wmi.c b/drivers/net/wireless/ath/ath12k/wmi.c
index 7a52d2082b79..9f6be557365e 100644
--- a/drivers/net/wireless/ath/ath12k/wmi.c
+++ b/drivers/net/wireless/ath/ath12k/wmi.c
@@ -228,9 +228,12 @@ void ath12k_wmi_init_qcn9274(struct ath12k_base *ab,
config->peer_map_unmap_version = 0x32;
config->twt_ap_pdev_count = ab->num_radios;
config->twt_ap_sta_count = 1000;
+ config->ema_max_vap_cnt = ab->num_radios;
+ config->ema_max_profile_period = TARGET_EMA_MAX_PROFILE_PERIOD;
+ config->beacon_tx_offload_max_vdev += config->ema_max_vap_cnt;
if (test_bit(WMI_TLV_SERVICE_PEER_METADATA_V1A_V1B_SUPPORT, ab->wmi_ab.svc_map))
- config->dp_peer_meta_data_ver = TARGET_RX_PEER_METADATA_VER_V1B;
+ config->peer_metadata_ver = ATH12K_PEER_METADATA_V1B;
}
void ath12k_wmi_init_wcn7850(struct ath12k_base *ab,
@@ -497,6 +500,7 @@ ath12k_pull_mac_phy_cap_svc_ready_ext(struct ath12k_wmi_pdev *wmi_handle,
mac_caps = wmi_mac_phy_caps + phy_idx;
pdev->pdev_id = ath12k_wmi_mac_phy_get_pdev_id(mac_caps);
+ pdev->hw_link_id = ath12k_wmi_mac_phy_get_hw_link_id(mac_caps);
pdev_cap->supported_bands |= le32_to_cpu(mac_caps->supported_bands);
pdev_cap->ampdu_density = le32_to_cpu(mac_caps->ampdu_density);
@@ -841,6 +845,8 @@ int ath12k_wmi_vdev_create(struct ath12k *ar, u8 *macaddr,
cmd->vdev_subtype = cpu_to_le32(args->subtype);
cmd->num_cfg_txrx_streams = cpu_to_le32(WMI_NUM_SUPPORTED_BAND_MAX);
cmd->pdev_id = cpu_to_le32(args->pdev_id);
+ cmd->mbssid_flags = cpu_to_le32(args->mbssid_flags);
+ cmd->mbssid_tx_vdev_id = cpu_to_le32(args->mbssid_tx_vdev_id);
cmd->vdev_stats_id = cpu_to_le32(args->if_stats_id);
ether_addr_copy(cmd->vdev_macaddr.addr, macaddr);
@@ -1046,6 +1052,7 @@ int ath12k_wmi_vdev_start(struct ath12k *ar, struct wmi_vdev_start_req_arg *arg,
cmd->he_ops = cpu_to_le32(arg->he_ops);
cmd->punct_bitmap = cpu_to_le32(arg->punct_bitmap);
cmd->mbssid_flags = cpu_to_le32(arg->mbssid_flags);
+ cmd->mbssid_tx_vdev_id = cpu_to_le32(arg->mbssid_tx_vdev_id);
if (!restart) {
if (arg->ssid) {
@@ -1097,7 +1104,7 @@ int ath12k_wmi_vdev_start(struct ath12k *ar, struct wmi_vdev_start_req_arg *arg,
return ret;
}
-int ath12k_wmi_vdev_up(struct ath12k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
+int ath12k_wmi_vdev_up(struct ath12k *ar, struct ath12k_wmi_vdev_up_params *params)
{
struct ath12k_wmi_pdev *wmi = ar->wmi;
struct wmi_vdev_up_cmd *cmd;
@@ -1112,14 +1119,20 @@ int ath12k_wmi_vdev_up(struct ath12k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_UP_CMD,
sizeof(*cmd));
- cmd->vdev_id = cpu_to_le32(vdev_id);
- cmd->vdev_assoc_id = cpu_to_le32(aid);
+ cmd->vdev_id = cpu_to_le32(params->vdev_id);
+ cmd->vdev_assoc_id = cpu_to_le32(params->aid);
+
+ ether_addr_copy(cmd->vdev_bssid.addr, params->bssid);
- ether_addr_copy(cmd->vdev_bssid.addr, bssid);
+ if (params->tx_bssid) {
+ ether_addr_copy(cmd->tx_vdev_bssid.addr, params->tx_bssid);
+ cmd->nontx_profile_idx = cpu_to_le32(params->nontx_profile_idx);
+ cmd->nontx_profile_cnt = cpu_to_le32(params->nontx_profile_cnt);
+ }
ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
"WMI mgmt vdev up id 0x%x assoc id %d bssid %pM\n",
- vdev_id, aid, bssid);
+ params->vdev_id, params->aid, params->bssid);
ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_UP_CMDID);
if (ret) {
@@ -1776,13 +1789,15 @@ int ath12k_wmi_p2p_go_bcn_ie(struct ath12k *ar, u32 vdev_id,
int ath12k_wmi_bcn_tmpl(struct ath12k *ar, u32 vdev_id,
struct ieee80211_mutable_offsets *offs,
- struct sk_buff *bcn)
+ struct sk_buff *bcn,
+ struct ath12k_wmi_bcn_tmpl_ema_arg *ema_args)
{
struct ath12k_wmi_pdev *wmi = ar->wmi;
struct wmi_bcn_tmpl_cmd *cmd;
struct ath12k_wmi_bcn_prb_info_params *bcn_prb_info;
struct wmi_tlv *tlv;
struct sk_buff *skb;
+ u32 ema_params = 0;
void *ptr;
int ret, len;
size_t aligned_len = roundup(bcn->len, 4);
@@ -1801,6 +1816,16 @@ int ath12k_wmi_bcn_tmpl(struct ath12k *ar, u32 vdev_id,
cmd->csa_switch_count_offset = cpu_to_le32(offs->cntdwn_counter_offs[0]);
cmd->ext_csa_switch_count_offset = cpu_to_le32(offs->cntdwn_counter_offs[1]);
cmd->buf_len = cpu_to_le32(bcn->len);
+ cmd->mbssid_ie_offset = cpu_to_le32(offs->mbssid_off);
+ if (ema_args) {
+ u32p_replace_bits(&ema_params, ema_args->bcn_cnt, WMI_EMA_BEACON_CNT);
+ u32p_replace_bits(&ema_params, ema_args->bcn_index, WMI_EMA_BEACON_IDX);
+ if (ema_args->bcn_index == 0)
+ u32p_replace_bits(&ema_params, 1, WMI_EMA_BEACON_FIRST);
+ if (ema_args->bcn_index + 1 == ema_args->bcn_cnt)
+ u32p_replace_bits(&ema_params, 1, WMI_EMA_BEACON_LAST);
+ cmd->ema_params = cpu_to_le32(ema_params);
+ }
ptr = skb->data + sizeof(*cmd);
@@ -3473,11 +3498,13 @@ ath12k_wmi_copy_resource_config(struct ath12k_wmi_resource_config_params *wmi_cf
wmi_cfg->sched_params = cpu_to_le32(tg_cfg->sched_params);
wmi_cfg->twt_ap_pdev_count = cpu_to_le32(tg_cfg->twt_ap_pdev_count);
wmi_cfg->twt_ap_sta_count = cpu_to_le32(tg_cfg->twt_ap_sta_count);
- wmi_cfg->flags2 = le32_encode_bits(tg_cfg->dp_peer_meta_data_ver,
+ wmi_cfg->flags2 = le32_encode_bits(tg_cfg->peer_metadata_ver,
WMI_RSRC_CFG_FLAGS2_RX_PEER_METADATA_VERSION);
-
wmi_cfg->host_service_flags = cpu_to_le32(tg_cfg->is_reg_cc_ext_event_supported <<
WMI_RSRC_CFG_HOST_SVC_FLAG_REG_CC_EXT_SUPPORT_BIT);
+ wmi_cfg->ema_max_vap_cnt = cpu_to_le32(tg_cfg->ema_max_vap_cnt);
+ wmi_cfg->ema_max_profile_period = cpu_to_le32(tg_cfg->ema_max_profile_period);
+ wmi_cfg->flags2 |= cpu_to_le32(WMI_RSRC_CFG_FLAGS2_CALC_NEXT_DTIM_COUNT_SET);
}
static int ath12k_init_cmd_send(struct ath12k_wmi_pdev *wmi,
@@ -3690,6 +3717,7 @@ int ath12k_wmi_cmd_init(struct ath12k_base *ab)
arg.res_cfg.is_reg_cc_ext_event_supported = true;
ab->hw_params->wmi_init(ab, &arg.res_cfg);
+ ab->wow.wmi_conf_rx_decap_mode = arg.res_cfg.rx_decap_mode;
arg.num_mem_chunks = wmi_ab->num_mem_chunks;
arg.hw_mode_id = wmi_ab->preferred_hw_mode;
@@ -3701,6 +3729,8 @@ int ath12k_wmi_cmd_init(struct ath12k_base *ab)
arg.num_band_to_mac = ab->num_radios;
ath12k_fill_band_to_mac_param(ab, arg.band_to_mac);
+ ab->dp.peer_metadata_ver = arg.res_cfg.peer_metadata_ver;
+
return ath12k_init_cmd_send(&wmi_ab->wmi[0], &arg);
}
@@ -3808,7 +3838,7 @@ int ath12k_wmi_pdev_dma_ring_cfg(struct ath12k *ar,
cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_DMA_RING_CFG_REQ,
sizeof(*cmd));
- cmd->pdev_id = cpu_to_le32(DP_SW2HW_MACID(arg->pdev_id));
+ cmd->pdev_id = cpu_to_le32(arg->pdev_id);
cmd->module_id = cpu_to_le32(arg->module_id);
cmd->base_paddr_lo = cpu_to_le32(arg->base_paddr_lo);
cmd->base_paddr_hi = cpu_to_le32(arg->base_paddr_hi);
@@ -5693,7 +5723,7 @@ static int ath12k_reg_chan_list_event(struct ath12k_base *ab, struct sk_buff *sk
* event. Otherwise, it goes to fallback.
*/
if (ab->hw_params->single_pdev_only &&
- pdev_idx < ab->hw_params->num_rxmda_per_pdev)
+ pdev_idx < ab->hw_params->num_rxdma_per_pdev)
goto mem_free;
else
goto fallback;
@@ -6022,8 +6052,10 @@ static void ath12k_mgmt_rx_event(struct ath12k_base *ab, struct sk_buff *skb)
if (rx_ev.status & WMI_RX_STATUS_ERR_MIC)
status->flag |= RX_FLAG_MMIC_ERROR;
- if (rx_ev.chan_freq >= ATH12K_MIN_6G_FREQ) {
+ if (rx_ev.chan_freq >= ATH12K_MIN_6G_FREQ &&
+ rx_ev.chan_freq <= ATH12K_MAX_6G_FREQ) {
status->band = NL80211_BAND_6GHZ;
+ status->freq = rx_ev.chan_freq;
} else if (rx_ev.channel >= 1 && rx_ev.channel <= 14) {
status->band = NL80211_BAND_2GHZ;
} else if (rx_ev.channel >= 36 && rx_ev.channel <= ATH12K_MAX_5G_CHAN) {
@@ -6044,8 +6076,10 @@ static void ath12k_mgmt_rx_event(struct ath12k_base *ab, struct sk_buff *skb)
sband = &ar->mac.sbands[status->band];
- status->freq = ieee80211_channel_to_frequency(rx_ev.channel,
- status->band);
+ if (status->band != NL80211_BAND_6GHZ)
+ status->freq = ieee80211_channel_to_frequency(rx_ev.channel,
+ status->band);
+
status->signal = rx_ev.snr + ATH12K_DEFAULT_NOISE_FLOOR;
status->rate_idx = ath12k_mac_bitrate_to_idx(sband, rx_ev.rate / 100);
@@ -6999,6 +7033,116 @@ exit:
kfree(tb);
}
+static int ath12k_wmi_wow_wakeup_host_parse(struct ath12k_base *ab,
+ u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ const struct wmi_wow_ev_pg_fault_param *pf_param;
+ const struct wmi_wow_ev_param *param;
+ struct wmi_wow_ev_arg *arg = data;
+ int pf_len;
+
+ switch (tag) {
+ case WMI_TAG_WOW_EVENT_INFO:
+ param = ptr;
+ arg->wake_reason = le32_to_cpu(param->wake_reason);
+ ath12k_dbg(ab, ATH12K_DBG_WMI, "wow wakeup host reason %d %s\n",
+ arg->wake_reason, wow_reason(arg->wake_reason));
+ break;
+
+ case WMI_TAG_ARRAY_BYTE:
+ if (arg && arg->wake_reason == WOW_REASON_PAGE_FAULT) {
+ pf_param = ptr;
+ pf_len = le32_to_cpu(pf_param->len);
+ if (pf_len > len - sizeof(pf_len) ||
+ pf_len < 0) {
+ ath12k_warn(ab, "invalid wo reason page fault buffer len %d\n",
+ pf_len);
+ return -EINVAL;
+ }
+ ath12k_dbg(ab, ATH12K_DBG_WMI, "wow_reason_page_fault len %d\n",
+ pf_len);
+ ath12k_dbg_dump(ab, ATH12K_DBG_WMI,
+ "wow_reason_page_fault packet present",
+ "wow_pg_fault ",
+ pf_param->data,
+ pf_len);
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static void ath12k_wmi_event_wow_wakeup_host(struct ath12k_base *ab, struct sk_buff *skb)
+{
+ struct wmi_wow_ev_arg arg = { };
+ int ret;
+
+ ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
+ ath12k_wmi_wow_wakeup_host_parse,
+ &arg);
+ if (ret) {
+ ath12k_warn(ab, "failed to parse wmi wow wakeup host event tlv: %d\n",
+ ret);
+ return;
+ }
+
+ complete(&ab->wow.wakeup_completed);
+}
+
+static void ath12k_wmi_gtk_offload_status_event(struct ath12k_base *ab,
+ struct sk_buff *skb)
+{
+ const struct wmi_gtk_offload_status_event *ev;
+ struct ath12k_vif *arvif;
+ __be64 replay_ctr_be;
+ u64 replay_ctr;
+ const void **tb;
+ int ret;
+
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return;
+ }
+
+ ev = tb[WMI_TAG_GTK_OFFLOAD_STATUS_EVENT];
+ if (!ev) {
+ ath12k_warn(ab, "failed to fetch gtk offload status ev");
+ kfree(tb);
+ return;
+ }
+
+ rcu_read_lock();
+ arvif = ath12k_mac_get_arvif_by_vdev_id(ab, le32_to_cpu(ev->vdev_id));
+ if (!arvif) {
+ rcu_read_unlock();
+ ath12k_warn(ab, "failed to get arvif for vdev_id:%d\n",
+ le32_to_cpu(ev->vdev_id));
+ kfree(tb);
+ return;
+ }
+
+ replay_ctr = le64_to_cpu(ev->replay_ctr);
+ arvif->rekey_data.replay_ctr = replay_ctr;
+ ath12k_dbg(ab, ATH12K_DBG_WMI, "wmi gtk offload event refresh_cnt %d replay_ctr %llu\n",
+ le32_to_cpu(ev->refresh_cnt), replay_ctr);
+
+ /* supplicant expects big-endian replay counter */
+ replay_ctr_be = cpu_to_be64(replay_ctr);
+
+ ieee80211_gtk_rekey_notify(arvif->vif, arvif->bssid,
+ (void *)&replay_ctr_be, GFP_ATOMIC);
+
+ rcu_read_unlock();
+
+ kfree(tb);
+}
+
static void ath12k_wmi_op_rx(struct ath12k_base *ab, struct sk_buff *skb)
{
struct wmi_cmd_hdr *cmd_hdr;
@@ -7119,6 +7263,12 @@ static void ath12k_wmi_op_rx(struct ath12k_base *ab, struct sk_buff *skb)
case WMI_DIAG_EVENTID:
ath12k_wmi_diag_event(ab, skb);
break;
+ case WMI_WOW_WAKEUP_HOST_EVENTID:
+ ath12k_wmi_event_wow_wakeup_host(ab, skb);
+ break;
+ case WMI_GTK_OFFLOAD_STATUS_EVENTID:
+ ath12k_wmi_gtk_offload_status_event(ab, skb);
+ break;
/* TODO: Add remaining events */
default:
ath12k_dbg(ab, ATH12K_DBG_WMI, "Unknown eventid: 0x%x\n", id);
@@ -7328,3 +7478,608 @@ void ath12k_wmi_detach(struct ath12k_base *ab)
ath12k_wmi_free_dbring_caps(ab);
}
+
+int ath12k_wmi_hw_data_filter_cmd(struct ath12k *ar, struct wmi_hw_data_filter_arg *arg)
+{
+ struct wmi_hw_data_filter_cmd *cmd;
+ struct sk_buff *skb;
+ int len;
+
+ len = sizeof(*cmd);
+ skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_hw_data_filter_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_HW_DATA_FILTER_CMD,
+ sizeof(*cmd));
+ cmd->vdev_id = cpu_to_le32(arg->vdev_id);
+ cmd->enable = cpu_to_le32(arg->enable ? 1 : 0);
+
+ /* Set all modes in case of disable */
+ if (arg->enable)
+ cmd->hw_filter_bitmap = cpu_to_le32(arg->hw_filter_bitmap);
+ else
+ cmd->hw_filter_bitmap = cpu_to_le32((u32)~0U);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "wmi hw data filter enable %d filter_bitmap 0x%x\n",
+ arg->enable, arg->hw_filter_bitmap);
+
+ return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_HW_DATA_FILTER_CMDID);
+}
+
+int ath12k_wmi_wow_host_wakeup_ind(struct ath12k *ar)
+{
+ struct wmi_wow_host_wakeup_cmd *cmd;
+ struct sk_buff *skb;
+ size_t len;
+
+ len = sizeof(*cmd);
+ skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_wow_host_wakeup_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_HOSTWAKEUP_FROM_SLEEP_CMD,
+ sizeof(*cmd));
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow host wakeup ind\n");
+
+ return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID);
+}
+
+int ath12k_wmi_wow_enable(struct ath12k *ar)
+{
+ struct wmi_wow_enable_cmd *cmd;
+ struct sk_buff *skb;
+ int len;
+
+ len = sizeof(*cmd);
+ skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_wow_enable_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_ENABLE_CMD,
+ sizeof(*cmd));
+
+ cmd->enable = cpu_to_le32(1);
+ cmd->pause_iface_config = cpu_to_le32(WOW_IFACE_PAUSE_ENABLED);
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow enable\n");
+
+ return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ENABLE_CMDID);
+}
+
+int ath12k_wmi_wow_add_wakeup_event(struct ath12k *ar, u32 vdev_id,
+ enum wmi_wow_wakeup_event event,
+ u32 enable)
+{
+ struct wmi_wow_add_del_event_cmd *cmd;
+ struct sk_buff *skb;
+ size_t len;
+
+ len = sizeof(*cmd);
+ skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_wow_add_del_event_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_ADD_DEL_EVT_CMD,
+ sizeof(*cmd));
+ cmd->vdev_id = cpu_to_le32(vdev_id);
+ cmd->is_add = cpu_to_le32(enable);
+ cmd->event_bitmap = cpu_to_le32((1 << event));
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow add wakeup event %s enable %d vdev_id %d\n",
+ wow_wakeup_event(event), enable, vdev_id);
+
+ return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID);
+}
+
+int ath12k_wmi_wow_add_pattern(struct ath12k *ar, u32 vdev_id, u32 pattern_id,
+ const u8 *pattern, const u8 *mask,
+ int pattern_len, int pattern_offset)
+{
+ struct wmi_wow_add_pattern_cmd *cmd;
+ struct wmi_wow_bitmap_pattern_params *bitmap;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ void *ptr;
+ size_t len;
+
+ len = sizeof(*cmd) +
+ sizeof(*tlv) + /* array struct */
+ sizeof(*bitmap) + /* bitmap */
+ sizeof(*tlv) + /* empty ipv4 sync */
+ sizeof(*tlv) + /* empty ipv6 sync */
+ sizeof(*tlv) + /* empty magic */
+ sizeof(*tlv) + /* empty info timeout */
+ sizeof(*tlv) + sizeof(u32); /* ratelimit interval */
+
+ skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ /* cmd */
+ ptr = skb->data;
+ cmd = ptr;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_ADD_PATTERN_CMD,
+ sizeof(*cmd));
+ cmd->vdev_id = cpu_to_le32(vdev_id);
+ cmd->pattern_id = cpu_to_le32(pattern_id);
+ cmd->pattern_type = cpu_to_le32(WOW_BITMAP_PATTERN);
+
+ ptr += sizeof(*cmd);
+
+ /* bitmap */
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, sizeof(*bitmap));
+
+ ptr += sizeof(*tlv);
+
+ bitmap = ptr;
+ bitmap->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_BITMAP_PATTERN_T,
+ sizeof(*bitmap));
+ memcpy(bitmap->patternbuf, pattern, pattern_len);
+ memcpy(bitmap->bitmaskbuf, mask, pattern_len);
+ bitmap->pattern_offset = cpu_to_le32(pattern_offset);
+ bitmap->pattern_len = cpu_to_le32(pattern_len);
+ bitmap->bitmask_len = cpu_to_le32(pattern_len);
+ bitmap->pattern_id = cpu_to_le32(pattern_id);
+
+ ptr += sizeof(*bitmap);
+
+ /* ipv4 sync */
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0);
+
+ ptr += sizeof(*tlv);
+
+ /* ipv6 sync */
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0);
+
+ ptr += sizeof(*tlv);
+
+ /* magic */
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0);
+
+ ptr += sizeof(*tlv);
+
+ /* pattern info timeout */
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, 0);
+
+ ptr += sizeof(*tlv);
+
+ /* ratelimit interval */
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, sizeof(u32));
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow add pattern vdev_id %d pattern_id %d pattern_offset %d pattern_len %d\n",
+ vdev_id, pattern_id, pattern_offset, pattern_len);
+
+ ath12k_dbg_dump(ar->ab, ATH12K_DBG_WMI, NULL, "wow pattern: ",
+ bitmap->patternbuf, pattern_len);
+ ath12k_dbg_dump(ar->ab, ATH12K_DBG_WMI, NULL, "wow bitmask: ",
+ bitmap->bitmaskbuf, pattern_len);
+
+ return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ADD_WAKE_PATTERN_CMDID);
+}
+
+int ath12k_wmi_wow_del_pattern(struct ath12k *ar, u32 vdev_id, u32 pattern_id)
+{
+ struct wmi_wow_del_pattern_cmd *cmd;
+ struct sk_buff *skb;
+ size_t len;
+
+ len = sizeof(*cmd);
+ skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_wow_del_pattern_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_DEL_PATTERN_CMD,
+ sizeof(*cmd));
+ cmd->vdev_id = cpu_to_le32(vdev_id);
+ cmd->pattern_id = cpu_to_le32(pattern_id);
+ cmd->pattern_type = cpu_to_le32(WOW_BITMAP_PATTERN);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow del pattern vdev_id %d pattern_id %d\n",
+ vdev_id, pattern_id);
+
+ return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_DEL_WAKE_PATTERN_CMDID);
+}
+
+static struct sk_buff *
+ath12k_wmi_op_gen_config_pno_start(struct ath12k *ar, u32 vdev_id,
+ struct wmi_pno_scan_req_arg *pno)
+{
+ struct nlo_configured_params *nlo_list;
+ size_t len, nlo_list_len, channel_list_len;
+ struct wmi_wow_nlo_config_cmd *cmd;
+ __le32 *channel_list;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ void *ptr;
+ u32 i;
+
+ len = sizeof(*cmd) +
+ sizeof(*tlv) +
+ /* TLV place holder for array of structures
+ * nlo_configured_params(nlo_list)
+ */
+ sizeof(*tlv);
+ /* TLV place holder for array of uint32 channel_list */
+
+ channel_list_len = sizeof(u32) * pno->a_networks[0].channel_count;
+ len += channel_list_len;
+
+ nlo_list_len = sizeof(*nlo_list) * pno->uc_networks_count;
+ len += nlo_list_len;
+
+ skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = skb->data;
+ cmd = ptr;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_NLO_CONFIG_CMD, sizeof(*cmd));
+
+ cmd->vdev_id = cpu_to_le32(pno->vdev_id);
+ cmd->flags = cpu_to_le32(WMI_NLO_CONFIG_START | WMI_NLO_CONFIG_SSID_HIDE_EN);
+
+ /* current FW does not support min-max range for dwell time */
+ cmd->active_dwell_time = cpu_to_le32(pno->active_max_time);
+ cmd->passive_dwell_time = cpu_to_le32(pno->passive_max_time);
+
+ if (pno->do_passive_scan)
+ cmd->flags |= cpu_to_le32(WMI_NLO_CONFIG_SCAN_PASSIVE);
+
+ cmd->fast_scan_period = cpu_to_le32(pno->fast_scan_period);
+ cmd->slow_scan_period = cpu_to_le32(pno->slow_scan_period);
+ cmd->fast_scan_max_cycles = cpu_to_le32(pno->fast_scan_max_cycles);
+ cmd->delay_start_time = cpu_to_le32(pno->delay_start_time);
+
+ if (pno->enable_pno_scan_randomization) {
+ cmd->flags |= cpu_to_le32(WMI_NLO_CONFIG_SPOOFED_MAC_IN_PROBE_REQ |
+ WMI_NLO_CONFIG_RANDOM_SEQ_NO_IN_PROBE_REQ);
+ ether_addr_copy(cmd->mac_addr.addr, pno->mac_addr);
+ ether_addr_copy(cmd->mac_mask.addr, pno->mac_addr_mask);
+ }
+
+ ptr += sizeof(*cmd);
+
+ /* nlo_configured_params(nlo_list) */
+ cmd->no_of_ssids = cpu_to_le32(pno->uc_networks_count);
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, nlo_list_len);
+
+ ptr += sizeof(*tlv);
+ nlo_list = ptr;
+ for (i = 0; i < pno->uc_networks_count; i++) {
+ tlv = (struct wmi_tlv *)(&nlo_list[i].tlv_header);
+ tlv->header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ARRAY_BYTE,
+ sizeof(*nlo_list));
+
+ nlo_list[i].ssid.valid = cpu_to_le32(1);
+ nlo_list[i].ssid.ssid.ssid_len =
+ cpu_to_le32(pno->a_networks[i].ssid.ssid_len);
+ memcpy(nlo_list[i].ssid.ssid.ssid,
+ pno->a_networks[i].ssid.ssid,
+ le32_to_cpu(nlo_list[i].ssid.ssid.ssid_len));
+
+ if (pno->a_networks[i].rssi_threshold &&
+ pno->a_networks[i].rssi_threshold > -300) {
+ nlo_list[i].rssi_cond.valid = cpu_to_le32(1);
+ nlo_list[i].rssi_cond.rssi =
+ cpu_to_le32(pno->a_networks[i].rssi_threshold);
+ }
+
+ nlo_list[i].bcast_nw_type.valid = cpu_to_le32(1);
+ nlo_list[i].bcast_nw_type.bcast_nw_type =
+ cpu_to_le32(pno->a_networks[i].bcast_nw_type);
+ }
+
+ ptr += nlo_list_len;
+ cmd->num_of_channels = cpu_to_le32(pno->a_networks[0].channel_count);
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, channel_list_len);
+ ptr += sizeof(*tlv);
+ channel_list = ptr;
+
+ for (i = 0; i < pno->a_networks[0].channel_count; i++)
+ channel_list[i] = cpu_to_le32(pno->a_networks[0].channels[i]);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv start pno config vdev_id %d\n",
+ vdev_id);
+
+ return skb;
+}
+
+static struct sk_buff *ath12k_wmi_op_gen_config_pno_stop(struct ath12k *ar,
+ u32 vdev_id)
+{
+ struct wmi_wow_nlo_config_cmd *cmd;
+ struct sk_buff *skb;
+ size_t len;
+
+ len = sizeof(*cmd);
+ skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_wow_nlo_config_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_NLO_CONFIG_CMD, len);
+
+ cmd->vdev_id = cpu_to_le32(vdev_id);
+ cmd->flags = cpu_to_le32(WMI_NLO_CONFIG_STOP);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "wmi tlv stop pno config vdev_id %d\n", vdev_id);
+ return skb;
+}
+
+int ath12k_wmi_wow_config_pno(struct ath12k *ar, u32 vdev_id,
+ struct wmi_pno_scan_req_arg *pno_scan)
+{
+ struct sk_buff *skb;
+
+ if (pno_scan->enable)
+ skb = ath12k_wmi_op_gen_config_pno_start(ar, vdev_id, pno_scan);
+ else
+ skb = ath12k_wmi_op_gen_config_pno_stop(ar, vdev_id);
+
+ if (IS_ERR_OR_NULL(skb))
+ return -ENOMEM;
+
+ return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID);
+}
+
+static void ath12k_wmi_fill_ns_offload(struct ath12k *ar,
+ struct wmi_arp_ns_offload_arg *offload,
+ void **ptr,
+ bool enable,
+ bool ext)
+{
+ struct wmi_ns_offload_params *ns;
+ struct wmi_tlv *tlv;
+ void *buf_ptr = *ptr;
+ u32 ns_cnt, ns_ext_tuples;
+ int i, max_offloads;
+
+ ns_cnt = offload->ipv6_count;
+
+ tlv = buf_ptr;
+
+ if (ext) {
+ ns_ext_tuples = offload->ipv6_count - WMI_MAX_NS_OFFLOADS;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
+ ns_ext_tuples * sizeof(*ns));
+ i = WMI_MAX_NS_OFFLOADS;
+ max_offloads = offload->ipv6_count;
+ } else {
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
+ WMI_MAX_NS_OFFLOADS * sizeof(*ns));
+ i = 0;
+ max_offloads = WMI_MAX_NS_OFFLOADS;
+ }
+
+ buf_ptr += sizeof(*tlv);
+
+ for (; i < max_offloads; i++) {
+ ns = buf_ptr;
+ ns->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_NS_OFFLOAD_TUPLE,
+ sizeof(*ns));
+
+ if (enable) {
+ if (i < ns_cnt)
+ ns->flags |= cpu_to_le32(WMI_NSOL_FLAGS_VALID);
+
+ memcpy(ns->target_ipaddr[0], offload->ipv6_addr[i], 16);
+ memcpy(ns->solicitation_ipaddr, offload->self_ipv6_addr[i], 16);
+
+ if (offload->ipv6_type[i])
+ ns->flags |= cpu_to_le32(WMI_NSOL_FLAGS_IS_IPV6_ANYCAST);
+
+ memcpy(ns->target_mac.addr, offload->mac_addr, ETH_ALEN);
+
+ if (!is_zero_ether_addr(ns->target_mac.addr))
+ ns->flags |= cpu_to_le32(WMI_NSOL_FLAGS_MAC_VALID);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "wmi index %d ns_solicited %pI6 target %pI6",
+ i, ns->solicitation_ipaddr,
+ ns->target_ipaddr[0]);
+ }
+
+ buf_ptr += sizeof(*ns);
+ }
+
+ *ptr = buf_ptr;
+}
+
+static void ath12k_wmi_fill_arp_offload(struct ath12k *ar,
+ struct wmi_arp_ns_offload_arg *offload,
+ void **ptr,
+ bool enable)
+{
+ struct wmi_arp_offload_params *arp;
+ struct wmi_tlv *tlv;
+ void *buf_ptr = *ptr;
+ int i;
+
+ /* fill arp tuple */
+ tlv = buf_ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
+ WMI_MAX_ARP_OFFLOADS * sizeof(*arp));
+ buf_ptr += sizeof(*tlv);
+
+ for (i = 0; i < WMI_MAX_ARP_OFFLOADS; i++) {
+ arp = buf_ptr;
+ arp->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ARP_OFFLOAD_TUPLE,
+ sizeof(*arp));
+
+ if (enable && i < offload->ipv4_count) {
+ /* Copy the target ip addr and flags */
+ arp->flags = cpu_to_le32(WMI_ARPOL_FLAGS_VALID);
+ memcpy(arp->target_ipaddr, offload->ipv4_addr[i], 4);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi arp offload address %pI4",
+ arp->target_ipaddr);
+ }
+
+ buf_ptr += sizeof(*arp);
+ }
+
+ *ptr = buf_ptr;
+}
+
+int ath12k_wmi_arp_ns_offload(struct ath12k *ar,
+ struct ath12k_vif *arvif,
+ struct wmi_arp_ns_offload_arg *offload,
+ bool enable)
+{
+ struct wmi_set_arp_ns_offload_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ void *buf_ptr;
+ size_t len;
+ u8 ns_cnt, ns_ext_tuples = 0;
+
+ ns_cnt = offload->ipv6_count;
+
+ len = sizeof(*cmd) +
+ sizeof(*tlv) +
+ WMI_MAX_NS_OFFLOADS * sizeof(struct wmi_ns_offload_params) +
+ sizeof(*tlv) +
+ WMI_MAX_ARP_OFFLOADS * sizeof(struct wmi_arp_offload_params);
+
+ if (ns_cnt > WMI_MAX_NS_OFFLOADS) {
+ ns_ext_tuples = ns_cnt - WMI_MAX_NS_OFFLOADS;
+ len += sizeof(*tlv) +
+ ns_ext_tuples * sizeof(struct wmi_ns_offload_params);
+ }
+
+ skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ buf_ptr = skb->data;
+ cmd = buf_ptr;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_SET_ARP_NS_OFFLOAD_CMD,
+ sizeof(*cmd));
+ cmd->flags = cpu_to_le32(0);
+ cmd->vdev_id = cpu_to_le32(arvif->vdev_id);
+ cmd->num_ns_ext_tuples = cpu_to_le32(ns_ext_tuples);
+
+ buf_ptr += sizeof(*cmd);
+
+ ath12k_wmi_fill_ns_offload(ar, offload, &buf_ptr, enable, 0);
+ ath12k_wmi_fill_arp_offload(ar, offload, &buf_ptr, enable);
+
+ if (ns_ext_tuples)
+ ath12k_wmi_fill_ns_offload(ar, offload, &buf_ptr, enable, 1);
+
+ return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_SET_ARP_NS_OFFLOAD_CMDID);
+}
+
+int ath12k_wmi_gtk_rekey_offload(struct ath12k *ar,
+ struct ath12k_vif *arvif, bool enable)
+{
+ struct ath12k_rekey_data *rekey_data = &arvif->rekey_data;
+ struct wmi_gtk_rekey_offload_cmd *cmd;
+ struct sk_buff *skb;
+ __le64 replay_ctr;
+ int len;
+
+ len = sizeof(*cmd);
+ skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_gtk_rekey_offload_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_GTK_OFFLOAD_CMD, sizeof(*cmd));
+ cmd->vdev_id = cpu_to_le32(arvif->vdev_id);
+
+ if (enable) {
+ cmd->flags = cpu_to_le32(GTK_OFFLOAD_ENABLE_OPCODE);
+
+ /* the length in rekey_data and cmd is equal */
+ memcpy(cmd->kck, rekey_data->kck, sizeof(cmd->kck));
+ memcpy(cmd->kek, rekey_data->kek, sizeof(cmd->kek));
+
+ replay_ctr = cpu_to_le64(rekey_data->replay_ctr);
+ memcpy(cmd->replay_ctr, &replay_ctr,
+ sizeof(replay_ctr));
+ } else {
+ cmd->flags = cpu_to_le32(GTK_OFFLOAD_DISABLE_OPCODE);
+ }
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "offload gtk rekey vdev: %d %d\n",
+ arvif->vdev_id, enable);
+ return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_GTK_OFFLOAD_CMDID);
+}
+
+int ath12k_wmi_gtk_rekey_getinfo(struct ath12k *ar,
+ struct ath12k_vif *arvif)
+{
+ struct wmi_gtk_rekey_offload_cmd *cmd;
+ struct sk_buff *skb;
+ int len;
+
+ len = sizeof(*cmd);
+ skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_gtk_rekey_offload_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_GTK_OFFLOAD_CMD, sizeof(*cmd));
+ cmd->vdev_id = cpu_to_le32(arvif->vdev_id);
+ cmd->flags = cpu_to_le32(GTK_OFFLOAD_REQUEST_STATUS_OPCODE);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "get gtk rekey vdev_id: %d\n",
+ arvif->vdev_id);
+ return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_GTK_OFFLOAD_CMDID);
+}
+
+int ath12k_wmi_sta_keepalive(struct ath12k *ar,
+ const struct wmi_sta_keepalive_arg *arg)
+{
+ struct wmi_sta_keepalive_arp_resp_params *arp;
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_sta_keepalive_cmd *cmd;
+ struct sk_buff *skb;
+ size_t len;
+
+ len = sizeof(*cmd) + sizeof(*arp);
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_sta_keepalive_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STA_KEEPALIVE_CMD, sizeof(*cmd));
+ cmd->vdev_id = cpu_to_le32(arg->vdev_id);
+ cmd->enabled = cpu_to_le32(arg->enabled);
+ cmd->interval = cpu_to_le32(arg->interval);
+ cmd->method = cpu_to_le32(arg->method);
+
+ arp = (struct wmi_sta_keepalive_arp_resp_params *)(cmd + 1);
+ arp->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STA_KEEPALVE_ARP_RESPONSE,
+ sizeof(*arp));
+ if (arg->method == WMI_STA_KEEPALIVE_METHOD_UNSOLICITED_ARP_RESPONSE ||
+ arg->method == WMI_STA_KEEPALIVE_METHOD_GRATUITOUS_ARP_REQUEST) {
+ arp->src_ip4_addr = cpu_to_le32(arg->src_ip4_addr);
+ arp->dest_ip4_addr = cpu_to_le32(arg->dest_ip4_addr);
+ ether_addr_copy(arp->dest_mac_addr.addr, arg->dest_mac_addr);
+ }
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "wmi sta keepalive vdev %d enabled %d method %d interval %d\n",
+ arg->vdev_id, arg->enabled, arg->method, arg->interval);
+
+ return ath12k_wmi_cmd_send(wmi, skb, WMI_STA_KEEPALIVE_CMDID);
+}
diff --git a/drivers/net/wireless/ath/ath12k/wmi.h b/drivers/net/wireless/ath/ath12k/wmi.h
index 496866673aea..f1f52175a52b 100644
--- a/drivers/net/wireless/ath/ath12k/wmi.h
+++ b/drivers/net/wireless/ath/ath12k/wmi.h
@@ -24,6 +24,7 @@
struct ath12k_base;
struct ath12k;
+struct ath12k_vif;
/* There is no signed version of __le32, so for a temporary solution come
* up with our own version. The idea is from fs/ntfs/endian.h.
@@ -2154,6 +2155,7 @@ enum wmi_tlv_service {
WMI_TLV_SERVICE_PER_PEER_HTT_STATS_RESET = 213,
WMI_TLV_SERVICE_FREQINFO_IN_METADATA = 219,
WMI_TLV_SERVICE_EXT2_MSG = 220,
+ WMI_TLV_SERVICE_MBSS_PARAM_IN_VDEV_START_SUPPORT = 253,
WMI_MAX_EXT_SERVICE = 256,
@@ -2292,6 +2294,13 @@ struct ath12k_wmi_host_mem_chunk_arg {
u32 req_id;
};
+enum ath12k_peer_metadata_version {
+ ATH12K_PEER_METADATA_V0,
+ ATH12K_PEER_METADATA_V1,
+ ATH12K_PEER_METADATA_V1A,
+ ATH12K_PEER_METADATA_V1B
+};
+
struct ath12k_wmi_resource_config_arg {
u32 num_vdevs;
u32 num_peers;
@@ -2354,8 +2363,10 @@ struct ath12k_wmi_resource_config_arg {
u32 sched_params;
u32 twt_ap_pdev_count;
u32 twt_ap_sta_count;
+ enum ath12k_peer_metadata_version peer_metadata_ver;
+ u32 ema_max_vap_cnt;
+ u32 ema_max_profile_period;
bool is_reg_cc_ext_event_supported;
- u8 dp_peer_meta_data_ver;
};
struct ath12k_wmi_init_cmd_arg {
@@ -2410,6 +2421,7 @@ struct wmi_init_cmd {
#define WMI_RSRC_CFG_HOST_SVC_FLAG_REG_CC_EXT_SUPPORT_BIT 4
#define WMI_RSRC_CFG_FLAGS2_RX_PEER_METADATA_VERSION GENMASK(5, 4)
#define WMI_RSRC_CFG_FLAG1_BSS_CHANNEL_INFO_64 BIT(5)
+#define WMI_RSRC_CFG_FLAGS2_CALC_NEXT_DTIM_COUNT_SET BIT(9)
struct ath12k_wmi_resource_config_params {
__le32 tlv_header;
@@ -2726,6 +2738,8 @@ struct ath12k_wmi_vdev_create_arg {
} chains[NUM_NL80211_BANDS];
u32 pdev_id;
u8 if_stats_id;
+ u32 mbssid_flags;
+ u32 mbssid_tx_vdev_id;
};
#define ATH12K_MAX_VDEV_STATS_ID 0x30
@@ -2757,14 +2771,23 @@ struct wmi_vdev_delete_cmd {
__le32 vdev_id;
} __packed;
+struct ath12k_wmi_vdev_up_params {
+ u32 vdev_id;
+ u32 aid;
+ const u8 *bssid;
+ const u8 *tx_bssid;
+ u32 nontx_profile_idx;
+ u32 nontx_profile_cnt;
+};
+
struct wmi_vdev_up_cmd {
__le32 tlv_header;
__le32 vdev_id;
__le32 vdev_assoc_id;
struct ath12k_wmi_mac_addr_params vdev_bssid;
- struct ath12k_wmi_mac_addr_params trans_bssid;
- __le32 profile_idx;
- __le32 profile_num;
+ struct ath12k_wmi_mac_addr_params tx_vdev_bssid;
+ __le32 nontx_profile_idx;
+ __le32 nontx_profile_cnt;
} __packed;
struct wmi_vdev_stop_cmd {
@@ -2792,6 +2815,10 @@ struct ath12k_wmi_ssid_params {
enum wmi_vdev_mbssid_flags {
WMI_VDEV_MBSSID_FLAGS_NON_MBSSID_AP = BIT(0),
+ WMI_VDEV_MBSSID_FLAGS_TRANSMIT_AP = BIT(1),
+ WMI_VDEV_MBSSID_FLAGS_NON_TRANSMIT_AP = BIT(2),
+ WMI_VDEV_MBSSID_FLAGS_EMA_MODE = BIT(3),
+ WMI_VDEV_MBSSID_FLAGS_SCAN_MODE_VAP = BIT(4),
};
struct wmi_vdev_start_request_cmd {
@@ -3514,6 +3541,16 @@ struct ath12k_wmi_p2p_noa_info {
#define WMI_BEACON_TX_BUFFER_SIZE 512
+#define WMI_EMA_BEACON_CNT GENMASK(7, 0)
+#define WMI_EMA_BEACON_IDX GENMASK(15, 8)
+#define WMI_EMA_BEACON_FIRST GENMASK(23, 16)
+#define WMI_EMA_BEACON_LAST GENMASK(31, 24)
+
+struct ath12k_wmi_bcn_tmpl_ema_arg {
+ u8 bcn_cnt;
+ u8 bcn_index;
+};
+
struct wmi_bcn_tmpl_cmd {
__le32 tlv_header;
__le32 vdev_id;
@@ -3524,6 +3561,11 @@ struct wmi_bcn_tmpl_cmd {
__le32 csa_event_bitmap;
__le32 mbssid_ie_offset;
__le32 esp_ie_offset;
+ __le32 csc_switch_count_offset;
+ __le32 csc_event_bitmap;
+ __le32 mu_edca_ie_offset;
+ __le32 feature_enable_bitmap;
+ __le32 ema_params;
} __packed;
struct wmi_p2p_go_set_beacon_ie_cmd {
@@ -4770,7 +4812,7 @@ struct wmi_probe_tmpl_cmd {
__le32 buf_len;
} __packed;
-#define MAX_RADIOS 3
+#define MAX_RADIOS 2
#define WMI_SERVICE_READY_TIMEOUT_HZ (5 * HZ)
#define WMI_SEND_TIMEOUT_HZ (3 * HZ)
@@ -4868,6 +4910,556 @@ struct wmi_twt_disable_event {
__le32 status;
} __packed;
+/* WOW structures */
+enum wmi_wow_wakeup_event {
+ WOW_BMISS_EVENT = 0,
+ WOW_BETTER_AP_EVENT,
+ WOW_DEAUTH_RECVD_EVENT,
+ WOW_MAGIC_PKT_RECVD_EVENT,
+ WOW_GTK_ERR_EVENT,
+ WOW_FOURWAY_HSHAKE_EVENT,
+ WOW_EAPOL_RECVD_EVENT,
+ WOW_NLO_DETECTED_EVENT,
+ WOW_DISASSOC_RECVD_EVENT,
+ WOW_PATTERN_MATCH_EVENT,
+ WOW_CSA_IE_EVENT,
+ WOW_PROBE_REQ_WPS_IE_EVENT,
+ WOW_AUTH_REQ_EVENT,
+ WOW_ASSOC_REQ_EVENT,
+ WOW_HTT_EVENT,
+ WOW_RA_MATCH_EVENT,
+ WOW_HOST_AUTO_SHUTDOWN_EVENT,
+ WOW_IOAC_MAGIC_EVENT,
+ WOW_IOAC_SHORT_EVENT,
+ WOW_IOAC_EXTEND_EVENT,
+ WOW_IOAC_TIMER_EVENT,
+ WOW_DFS_PHYERR_RADAR_EVENT,
+ WOW_BEACON_EVENT,
+ WOW_CLIENT_KICKOUT_EVENT,
+ WOW_EVENT_MAX,
+};
+
+enum wmi_wow_interface_cfg {
+ WOW_IFACE_PAUSE_ENABLED,
+ WOW_IFACE_PAUSE_DISABLED
+};
+
+#define C2S(x) case x: return #x
+
+static inline const char *wow_wakeup_event(enum wmi_wow_wakeup_event ev)
+{
+ switch (ev) {
+ C2S(WOW_BMISS_EVENT);
+ C2S(WOW_BETTER_AP_EVENT);
+ C2S(WOW_DEAUTH_RECVD_EVENT);
+ C2S(WOW_MAGIC_PKT_RECVD_EVENT);
+ C2S(WOW_GTK_ERR_EVENT);
+ C2S(WOW_FOURWAY_HSHAKE_EVENT);
+ C2S(WOW_EAPOL_RECVD_EVENT);
+ C2S(WOW_NLO_DETECTED_EVENT);
+ C2S(WOW_DISASSOC_RECVD_EVENT);
+ C2S(WOW_PATTERN_MATCH_EVENT);
+ C2S(WOW_CSA_IE_EVENT);
+ C2S(WOW_PROBE_REQ_WPS_IE_EVENT);
+ C2S(WOW_AUTH_REQ_EVENT);
+ C2S(WOW_ASSOC_REQ_EVENT);
+ C2S(WOW_HTT_EVENT);
+ C2S(WOW_RA_MATCH_EVENT);
+ C2S(WOW_HOST_AUTO_SHUTDOWN_EVENT);
+ C2S(WOW_IOAC_MAGIC_EVENT);
+ C2S(WOW_IOAC_SHORT_EVENT);
+ C2S(WOW_IOAC_EXTEND_EVENT);
+ C2S(WOW_IOAC_TIMER_EVENT);
+ C2S(WOW_DFS_PHYERR_RADAR_EVENT);
+ C2S(WOW_BEACON_EVENT);
+ C2S(WOW_CLIENT_KICKOUT_EVENT);
+ C2S(WOW_EVENT_MAX);
+ default:
+ return NULL;
+ }
+}
+
+enum wmi_wow_wake_reason {
+ WOW_REASON_UNSPECIFIED = -1,
+ WOW_REASON_NLOD = 0,
+ WOW_REASON_AP_ASSOC_LOST,
+ WOW_REASON_LOW_RSSI,
+ WOW_REASON_DEAUTH_RECVD,
+ WOW_REASON_DISASSOC_RECVD,
+ WOW_REASON_GTK_HS_ERR,
+ WOW_REASON_EAP_REQ,
+ WOW_REASON_FOURWAY_HS_RECV,
+ WOW_REASON_TIMER_INTR_RECV,
+ WOW_REASON_PATTERN_MATCH_FOUND,
+ WOW_REASON_RECV_MAGIC_PATTERN,
+ WOW_REASON_P2P_DISC,
+ WOW_REASON_WLAN_HB,
+ WOW_REASON_CSA_EVENT,
+ WOW_REASON_PROBE_REQ_WPS_IE_RECV,
+ WOW_REASON_AUTH_REQ_RECV,
+ WOW_REASON_ASSOC_REQ_RECV,
+ WOW_REASON_HTT_EVENT,
+ WOW_REASON_RA_MATCH,
+ WOW_REASON_HOST_AUTO_SHUTDOWN,
+ WOW_REASON_IOAC_MAGIC_EVENT,
+ WOW_REASON_IOAC_SHORT_EVENT,
+ WOW_REASON_IOAC_EXTEND_EVENT,
+ WOW_REASON_IOAC_TIMER_EVENT,
+ WOW_REASON_ROAM_HO,
+ WOW_REASON_DFS_PHYERR_RADADR_EVENT,
+ WOW_REASON_BEACON_RECV,
+ WOW_REASON_CLIENT_KICKOUT_EVENT,
+ WOW_REASON_PAGE_FAULT = 0x3a,
+ WOW_REASON_DEBUG_TEST = 0xFF,
+};
+
+static inline const char *wow_reason(enum wmi_wow_wake_reason reason)
+{
+ switch (reason) {
+ C2S(WOW_REASON_UNSPECIFIED);
+ C2S(WOW_REASON_NLOD);
+ C2S(WOW_REASON_AP_ASSOC_LOST);
+ C2S(WOW_REASON_LOW_RSSI);
+ C2S(WOW_REASON_DEAUTH_RECVD);
+ C2S(WOW_REASON_DISASSOC_RECVD);
+ C2S(WOW_REASON_GTK_HS_ERR);
+ C2S(WOW_REASON_EAP_REQ);
+ C2S(WOW_REASON_FOURWAY_HS_RECV);
+ C2S(WOW_REASON_TIMER_INTR_RECV);
+ C2S(WOW_REASON_PATTERN_MATCH_FOUND);
+ C2S(WOW_REASON_RECV_MAGIC_PATTERN);
+ C2S(WOW_REASON_P2P_DISC);
+ C2S(WOW_REASON_WLAN_HB);
+ C2S(WOW_REASON_CSA_EVENT);
+ C2S(WOW_REASON_PROBE_REQ_WPS_IE_RECV);
+ C2S(WOW_REASON_AUTH_REQ_RECV);
+ C2S(WOW_REASON_ASSOC_REQ_RECV);
+ C2S(WOW_REASON_HTT_EVENT);
+ C2S(WOW_REASON_RA_MATCH);
+ C2S(WOW_REASON_HOST_AUTO_SHUTDOWN);
+ C2S(WOW_REASON_IOAC_MAGIC_EVENT);
+ C2S(WOW_REASON_IOAC_SHORT_EVENT);
+ C2S(WOW_REASON_IOAC_EXTEND_EVENT);
+ C2S(WOW_REASON_IOAC_TIMER_EVENT);
+ C2S(WOW_REASON_ROAM_HO);
+ C2S(WOW_REASON_DFS_PHYERR_RADADR_EVENT);
+ C2S(WOW_REASON_BEACON_RECV);
+ C2S(WOW_REASON_CLIENT_KICKOUT_EVENT);
+ C2S(WOW_REASON_PAGE_FAULT);
+ C2S(WOW_REASON_DEBUG_TEST);
+ default:
+ return NULL;
+ }
+}
+
+#undef C2S
+
+#define WOW_DEFAULT_BITMAP_PATTERN_SIZE 148
+#define WOW_DEFAULT_BITMASK_SIZE 148
+
+#define WOW_MIN_PATTERN_SIZE 1
+#define WOW_MAX_PATTERN_SIZE 148
+#define WOW_MAX_PKT_OFFSET 128
+#define WOW_HDR_LEN (sizeof(struct ieee80211_hdr_3addr) + \
+ sizeof(struct rfc1042_hdr))
+#define WOW_MAX_REDUCE (WOW_HDR_LEN - sizeof(struct ethhdr) - \
+ offsetof(struct ieee80211_hdr_3addr, addr1))
+
+struct wmi_wow_bitmap_pattern_params {
+ __le32 tlv_header;
+ u8 patternbuf[WOW_DEFAULT_BITMAP_PATTERN_SIZE];
+ u8 bitmaskbuf[WOW_DEFAULT_BITMASK_SIZE];
+ __le32 pattern_offset;
+ __le32 pattern_len;
+ __le32 bitmask_len;
+ __le32 pattern_id;
+} __packed;
+
+struct wmi_wow_add_pattern_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ __le32 pattern_id;
+ __le32 pattern_type;
+} __packed;
+
+struct wmi_wow_del_pattern_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ __le32 pattern_id;
+ __le32 pattern_type;
+} __packed;
+
+enum wmi_tlv_pattern_type {
+ WOW_PATTERN_MIN = 0,
+ WOW_BITMAP_PATTERN = WOW_PATTERN_MIN,
+ WOW_IPV4_SYNC_PATTERN,
+ WOW_IPV6_SYNC_PATTERN,
+ WOW_WILD_CARD_PATTERN,
+ WOW_TIMER_PATTERN,
+ WOW_MAGIC_PATTERN,
+ WOW_IPV6_RA_PATTERN,
+ WOW_IOAC_PKT_PATTERN,
+ WOW_IOAC_TMR_PATTERN,
+ WOW_PATTERN_MAX
+};
+
+struct wmi_wow_add_del_event_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ __le32 is_add;
+ __le32 event_bitmap;
+} __packed;
+
+struct wmi_wow_enable_cmd {
+ __le32 tlv_header;
+ __le32 enable;
+ __le32 pause_iface_config;
+ __le32 flags;
+} __packed;
+
+struct wmi_wow_host_wakeup_cmd {
+ __le32 tlv_header;
+ __le32 reserved;
+} __packed;
+
+struct wmi_wow_ev_param {
+ __le32 vdev_id;
+ __le32 flag;
+ __le32 wake_reason;
+ __le32 data_len;
+} __packed;
+
+struct wmi_wow_ev_pg_fault_param {
+ __le32 len;
+ u8 data[];
+} __packed;
+
+struct wmi_wow_ev_arg {
+ enum wmi_wow_wake_reason wake_reason;
+};
+
+#define WMI_PNO_MAX_SCHED_SCAN_PLANS 2
+#define WMI_PNO_MAX_SCHED_SCAN_PLAN_INT 7200
+#define WMI_PNO_MAX_SCHED_SCAN_PLAN_ITRNS 100
+#define WMI_PNO_MAX_NETW_CHANNELS 26
+#define WMI_PNO_MAX_NETW_CHANNELS_EX 60
+#define WMI_PNO_MAX_SUPP_NETWORKS WLAN_SCAN_PARAMS_MAX_SSID
+#define WMI_PNO_MAX_IE_LENGTH WLAN_SCAN_PARAMS_MAX_IE_LEN
+
+/* size based of dot11 declaration without extra IEs as we will not carry those for PNO */
+#define WMI_PNO_MAX_PB_REQ_SIZE 450
+
+#define WMI_PNO_24GHZ_DEFAULT_CH 1
+#define WMI_PNO_5GHZ_DEFAULT_CH 36
+
+#define WMI_ACTIVE_MAX_CHANNEL_TIME 40
+#define WMI_PASSIVE_MAX_CHANNEL_TIME 110
+
+/* SSID broadcast type */
+enum wmi_ssid_bcast_type {
+ BCAST_UNKNOWN = 0,
+ BCAST_NORMAL = 1,
+ BCAST_HIDDEN = 2,
+};
+
+#define WMI_NLO_MAX_SSIDS 16
+#define WMI_NLO_MAX_CHAN 48
+
+#define WMI_NLO_CONFIG_STOP BIT(0)
+#define WMI_NLO_CONFIG_START BIT(1)
+#define WMI_NLO_CONFIG_RESET BIT(2)
+#define WMI_NLO_CONFIG_SLOW_SCAN BIT(4)
+#define WMI_NLO_CONFIG_FAST_SCAN BIT(5)
+#define WMI_NLO_CONFIG_SSID_HIDE_EN BIT(6)
+
+/* This bit is used to indicate if EPNO or supplicant PNO is enabled.
+ * Only one of them can be enabled at a given time
+ */
+#define WMI_NLO_CONFIG_ENLO BIT(7)
+#define WMI_NLO_CONFIG_SCAN_PASSIVE BIT(8)
+#define WMI_NLO_CONFIG_ENLO_RESET BIT(9)
+#define WMI_NLO_CONFIG_SPOOFED_MAC_IN_PROBE_REQ BIT(10)
+#define WMI_NLO_CONFIG_RANDOM_SEQ_NO_IN_PROBE_REQ BIT(11)
+#define WMI_NLO_CONFIG_ENABLE_IE_WHITELIST_IN_PROBE_REQ BIT(12)
+#define WMI_NLO_CONFIG_ENABLE_CNLO_RSSI_CONFIG BIT(13)
+
+struct wmi_nlo_ssid_params {
+ __le32 valid;
+ struct ath12k_wmi_ssid_params ssid;
+} __packed;
+
+struct wmi_nlo_enc_params {
+ __le32 valid;
+ __le32 enc_type;
+} __packed;
+
+struct wmi_nlo_auth_params {
+ __le32 valid;
+ __le32 auth_type;
+} __packed;
+
+struct wmi_nlo_bcast_nw_params {
+ __le32 valid;
+ __le32 bcast_nw_type;
+} __packed;
+
+struct wmi_nlo_rssi_params {
+ __le32 valid;
+ __le32 rssi;
+} __packed;
+
+struct nlo_configured_params {
+ /* TLV tag and len;*/
+ __le32 tlv_header;
+ struct wmi_nlo_ssid_params ssid;
+ struct wmi_nlo_enc_params enc_type;
+ struct wmi_nlo_auth_params auth_type;
+ struct wmi_nlo_rssi_params rssi_cond;
+
+ /* indicates if the SSID is hidden or not */
+ struct wmi_nlo_bcast_nw_params bcast_nw_type;
+} __packed;
+
+struct wmi_network_type_arg {
+ struct cfg80211_ssid ssid;
+ u32 authentication;
+ u32 encryption;
+ u32 bcast_nw_type;
+ u8 channel_count;
+ u16 channels[WMI_PNO_MAX_NETW_CHANNELS_EX];
+ s32 rssi_threshold;
+};
+
+struct wmi_pno_scan_req_arg {
+ u8 enable;
+ u8 vdev_id;
+ u8 uc_networks_count;
+ struct wmi_network_type_arg a_networks[WMI_PNO_MAX_SUPP_NETWORKS];
+ u32 fast_scan_period;
+ u32 slow_scan_period;
+ u8 fast_scan_max_cycles;
+
+ bool do_passive_scan;
+
+ u32 delay_start_time;
+ u32 active_min_time;
+ u32 active_max_time;
+ u32 passive_min_time;
+ u32 passive_max_time;
+
+ /* mac address randomization attributes */
+ u32 enable_pno_scan_randomization;
+ u8 mac_addr[ETH_ALEN];
+ u8 mac_addr_mask[ETH_ALEN];
+};
+
+struct wmi_wow_nlo_config_cmd {
+ __le32 tlv_header;
+ __le32 flags;
+ __le32 vdev_id;
+ __le32 fast_scan_max_cycles;
+ __le32 active_dwell_time;
+ __le32 passive_dwell_time;
+ __le32 probe_bundle_size;
+
+ /* ART = IRT */
+ __le32 rest_time;
+
+ /* max value that can be reached after scan_backoff_multiplier */
+ __le32 max_rest_time;
+
+ __le32 scan_backoff_multiplier;
+ __le32 fast_scan_period;
+
+ /* specific to windows */
+ __le32 slow_scan_period;
+
+ __le32 no_of_ssids;
+
+ __le32 num_of_channels;
+
+ /* NLO scan start delay time in milliseconds */
+ __le32 delay_start_time;
+
+ /* MAC Address to use in Probe Req as SA */
+ struct ath12k_wmi_mac_addr_params mac_addr;
+
+ /* Mask on which MAC has to be randomized */
+ struct ath12k_wmi_mac_addr_params mac_mask;
+
+ /* IE bitmap to use in Probe Req */
+ __le32 ie_bitmap[8];
+
+ /* Number of vendor OUIs. In the TLV vendor_oui[] */
+ __le32 num_vendor_oui;
+
+ /* Number of connected NLO band preferences */
+ __le32 num_cnlo_band_pref;
+
+ /* The TLVs will follow.
+ * nlo_configured_params nlo_list[];
+ * u32 channel_list[num_of_channels];
+ */
+} __packed;
+
+/* Definition of HW data filtering */
+enum hw_data_filter_type {
+ WMI_HW_DATA_FILTER_DROP_NON_ARP_BC = BIT(0),
+ WMI_HW_DATA_FILTER_DROP_NON_ICMPV6_MC = BIT(1),
+};
+
+struct wmi_hw_data_filter_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ __le32 enable;
+ __le32 hw_filter_bitmap;
+} __packed;
+
+struct wmi_hw_data_filter_arg {
+ u32 vdev_id;
+ bool enable;
+ u32 hw_filter_bitmap;
+};
+
+#define WMI_IPV6_UC_TYPE 0
+#define WMI_IPV6_AC_TYPE 1
+
+#define WMI_IPV6_MAX_COUNT 16
+#define WMI_IPV4_MAX_COUNT 2
+
+struct wmi_arp_ns_offload_arg {
+ u8 ipv4_addr[WMI_IPV4_MAX_COUNT][4];
+ u32 ipv4_count;
+ u32 ipv6_count;
+ u8 ipv6_addr[WMI_IPV6_MAX_COUNT][16];
+ u8 self_ipv6_addr[WMI_IPV6_MAX_COUNT][16];
+ u8 ipv6_type[WMI_IPV6_MAX_COUNT];
+ bool ipv6_valid[WMI_IPV6_MAX_COUNT];
+ u8 mac_addr[ETH_ALEN];
+};
+
+#define WMI_MAX_NS_OFFLOADS 2
+#define WMI_MAX_ARP_OFFLOADS 2
+
+#define WMI_ARPOL_FLAGS_VALID BIT(0)
+#define WMI_ARPOL_FLAGS_MAC_VALID BIT(1)
+#define WMI_ARPOL_FLAGS_REMOTE_IP_VALID BIT(2)
+
+struct wmi_arp_offload_params {
+ __le32 tlv_header;
+ __le32 flags;
+ u8 target_ipaddr[4];
+ u8 remote_ipaddr[4];
+ struct ath12k_wmi_mac_addr_params target_mac;
+} __packed;
+
+#define WMI_NSOL_FLAGS_VALID BIT(0)
+#define WMI_NSOL_FLAGS_MAC_VALID BIT(1)
+#define WMI_NSOL_FLAGS_REMOTE_IP_VALID BIT(2)
+#define WMI_NSOL_FLAGS_IS_IPV6_ANYCAST BIT(3)
+
+#define WMI_NSOL_MAX_TARGET_IPS 2
+
+struct wmi_ns_offload_params {
+ __le32 tlv_header;
+ __le32 flags;
+ u8 target_ipaddr[WMI_NSOL_MAX_TARGET_IPS][16];
+ u8 solicitation_ipaddr[16];
+ u8 remote_ipaddr[16];
+ struct ath12k_wmi_mac_addr_params target_mac;
+} __packed;
+
+struct wmi_set_arp_ns_offload_cmd {
+ __le32 tlv_header;
+ __le32 flags;
+ __le32 vdev_id;
+ __le32 num_ns_ext_tuples;
+ /* The TLVs follow:
+ * wmi_ns_offload_params ns[WMI_MAX_NS_OFFLOADS];
+ * wmi_arp_offload_params arp[WMI_MAX_ARP_OFFLOADS];
+ * wmi_ns_offload_params ns_ext[num_ns_ext_tuples];
+ */
+} __packed;
+
+#define GTK_OFFLOAD_OPCODE_MASK 0xFF000000
+#define GTK_OFFLOAD_ENABLE_OPCODE 0x01000000
+#define GTK_OFFLOAD_DISABLE_OPCODE 0x02000000
+#define GTK_OFFLOAD_REQUEST_STATUS_OPCODE 0x04000000
+
+#define GTK_OFFLOAD_KEK_BYTES 16
+#define GTK_OFFLOAD_KCK_BYTES 16
+#define GTK_REPLAY_COUNTER_BYTES 8
+#define WMI_MAX_KEY_LEN 32
+#define IGTK_PN_SIZE 6
+
+struct wmi_gtk_offload_status_event {
+ __le32 vdev_id;
+ __le32 flags;
+ __le32 refresh_cnt;
+ __le64 replay_ctr;
+ u8 igtk_key_index;
+ u8 igtk_key_length;
+ u8 igtk_key_rsc[IGTK_PN_SIZE];
+ u8 igtk_key[WMI_MAX_KEY_LEN];
+ u8 gtk_key_index;
+ u8 gtk_key_length;
+ u8 gtk_key_rsc[GTK_REPLAY_COUNTER_BYTES];
+ u8 gtk_key[WMI_MAX_KEY_LEN];
+} __packed;
+
+struct wmi_gtk_rekey_offload_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ __le32 flags;
+ u8 kek[GTK_OFFLOAD_KEK_BYTES];
+ u8 kck[GTK_OFFLOAD_KCK_BYTES];
+ u8 replay_ctr[GTK_REPLAY_COUNTER_BYTES];
+} __packed;
+
+struct wmi_sta_keepalive_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ __le32 enabled;
+
+ /* WMI_STA_KEEPALIVE_METHOD_ */
+ __le32 method;
+
+ /* in seconds */
+ __le32 interval;
+
+ /* following this structure is the TLV for struct
+ * wmi_sta_keepalive_arp_resp_params
+ */
+} __packed;
+
+struct wmi_sta_keepalive_arp_resp_params {
+ __le32 tlv_header;
+ __le32 src_ip4_addr;
+ __le32 dest_ip4_addr;
+ struct ath12k_wmi_mac_addr_params dest_mac_addr;
+} __packed;
+
+struct wmi_sta_keepalive_arg {
+ u32 vdev_id;
+ u32 enabled;
+ u32 method;
+ u32 interval;
+ u32 src_ip4_addr;
+ u32 dest_ip4_addr;
+ const u8 dest_mac_addr[ETH_ALEN];
+};
+
+enum wmi_sta_keepalive_method {
+ WMI_STA_KEEPALIVE_METHOD_NULL_FRAME = 1,
+ WMI_STA_KEEPALIVE_METHOD_UNSOLICITED_ARP_RESPONSE = 2,
+ WMI_STA_KEEPALIVE_METHOD_ETHERNET_LOOPBACK = 3,
+ WMI_STA_KEEPALIVE_METHOD_GRATUITOUS_ARP_REQUEST = 4,
+ WMI_STA_KEEPALIVE_METHOD_MGMT_VENDOR_ACTION = 5,
+};
+
+#define WMI_STA_KEEPALIVE_INTERVAL_DEFAULT 30
+#define WMI_STA_KEEPALIVE_INTERVAL_DISABLE 0
+
void ath12k_wmi_init_qcn9274(struct ath12k_base *ab,
struct ath12k_wmi_resource_config_arg *config);
void ath12k_wmi_init_wcn7850(struct ath12k_base *ab,
@@ -4881,10 +5473,10 @@ int ath12k_wmi_p2p_go_bcn_ie(struct ath12k *ar, u32 vdev_id,
const u8 *p2p_ie);
int ath12k_wmi_bcn_tmpl(struct ath12k *ar, u32 vdev_id,
struct ieee80211_mutable_offsets *offs,
- struct sk_buff *bcn);
+ struct sk_buff *bcn,
+ struct ath12k_wmi_bcn_tmpl_ema_arg *ema_args);
int ath12k_wmi_vdev_down(struct ath12k *ar, u8 vdev_id);
-int ath12k_wmi_vdev_up(struct ath12k *ar, u32 vdev_id, u32 aid,
- const u8 *bssid);
+int ath12k_wmi_vdev_up(struct ath12k *ar, struct ath12k_wmi_vdev_up_params *params);
int ath12k_wmi_vdev_stop(struct ath12k *ar, u8 vdev_id);
int ath12k_wmi_vdev_start(struct ath12k *ar, struct wmi_vdev_start_req_arg *arg,
bool restart);
@@ -5020,4 +5612,28 @@ ath12k_wmi_mac_phy_get_hw_link_id(const struct ath12k_wmi_mac_phy_caps_params *p
WMI_CAPS_PARAMS_HW_LINK_ID);
}
+int ath12k_wmi_wow_host_wakeup_ind(struct ath12k *ar);
+int ath12k_wmi_wow_enable(struct ath12k *ar);
+int ath12k_wmi_wow_del_pattern(struct ath12k *ar, u32 vdev_id, u32 pattern_id);
+int ath12k_wmi_wow_add_pattern(struct ath12k *ar, u32 vdev_id, u32 pattern_id,
+ const u8 *pattern, const u8 *mask,
+ int pattern_len, int pattern_offset);
+int ath12k_wmi_wow_add_wakeup_event(struct ath12k *ar, u32 vdev_id,
+ enum wmi_wow_wakeup_event event,
+ u32 enable);
+int ath12k_wmi_wow_config_pno(struct ath12k *ar, u32 vdev_id,
+ struct wmi_pno_scan_req_arg *pno_scan);
+int ath12k_wmi_hw_data_filter_cmd(struct ath12k *ar,
+ struct wmi_hw_data_filter_arg *arg);
+int ath12k_wmi_arp_ns_offload(struct ath12k *ar,
+ struct ath12k_vif *arvif,
+ struct wmi_arp_ns_offload_arg *offload,
+ bool enable);
+int ath12k_wmi_gtk_rekey_offload(struct ath12k *ar,
+ struct ath12k_vif *arvif, bool enable);
+int ath12k_wmi_gtk_rekey_getinfo(struct ath12k *ar,
+ struct ath12k_vif *arvif);
+int ath12k_wmi_sta_keepalive(struct ath12k *ar,
+ const struct wmi_sta_keepalive_arg *arg);
+
#endif
diff --git a/drivers/net/wireless/ath/ath12k/wow.c b/drivers/net/wireless/ath/ath12k/wow.c
new file mode 100644
index 000000000000..bead19db2c9a
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/wow.c
@@ -0,0 +1,1026 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2020 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/delay.h>
+#include <linux/inetdevice.h>
+#include <net/addrconf.h>
+#include <net/if_inet6.h>
+#include <net/ipv6.h>
+
+#include "mac.h"
+
+#include <net/mac80211.h>
+#include "core.h"
+#include "hif.h"
+#include "debug.h"
+#include "wmi.h"
+#include "wow.h"
+
+static const struct wiphy_wowlan_support ath12k_wowlan_support = {
+ .flags = WIPHY_WOWLAN_DISCONNECT |
+ WIPHY_WOWLAN_MAGIC_PKT |
+ WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
+ WIPHY_WOWLAN_GTK_REKEY_FAILURE,
+ .pattern_min_len = WOW_MIN_PATTERN_SIZE,
+ .pattern_max_len = WOW_MAX_PATTERN_SIZE,
+ .max_pkt_offset = WOW_MAX_PKT_OFFSET,
+};
+
+static inline bool ath12k_wow_is_p2p_vdev(struct ath12k_vif *arvif)
+{
+ return (arvif->vdev_subtype == WMI_VDEV_SUBTYPE_P2P_DEVICE ||
+ arvif->vdev_subtype == WMI_VDEV_SUBTYPE_P2P_CLIENT ||
+ arvif->vdev_subtype == WMI_VDEV_SUBTYPE_P2P_GO);
+}
+
+int ath12k_wow_enable(struct ath12k *ar)
+{
+ struct ath12k_base *ab = ar->ab;
+ int i, ret;
+
+ clear_bit(ATH12K_FLAG_HTC_SUSPEND_COMPLETE, &ab->dev_flags);
+
+ /* The firmware might be busy and it can not enter WoW immediately.
+ * In that case firmware notifies host with
+ * ATH12K_HTC_MSG_NACK_SUSPEND message, asking host to try again
+ * later. Per the firmware team there could be up to 10 loops.
+ */
+ for (i = 0; i < ATH12K_WOW_RETRY_NUM; i++) {
+ reinit_completion(&ab->htc_suspend);
+
+ ret = ath12k_wmi_wow_enable(ar);
+ if (ret) {
+ ath12k_warn(ab, "failed to issue wow enable: %d\n", ret);
+ return ret;
+ }
+
+ ret = wait_for_completion_timeout(&ab->htc_suspend, 3 * HZ);
+ if (ret == 0) {
+ ath12k_warn(ab,
+ "timed out while waiting for htc suspend completion\n");
+ return -ETIMEDOUT;
+ }
+
+ if (test_bit(ATH12K_FLAG_HTC_SUSPEND_COMPLETE, &ab->dev_flags))
+ /* success, suspend complete received */
+ return 0;
+
+ ath12k_warn(ab, "htc suspend not complete, retrying (try %d)\n",
+ i);
+ msleep(ATH12K_WOW_RETRY_WAIT_MS);
+ }
+
+ ath12k_warn(ab, "htc suspend not complete, failing after %d tries\n", i);
+
+ return -ETIMEDOUT;
+}
+
+int ath12k_wow_wakeup(struct ath12k *ar)
+{
+ struct ath12k_base *ab = ar->ab;
+ int ret;
+
+ reinit_completion(&ab->wow.wakeup_completed);
+
+ ret = ath12k_wmi_wow_host_wakeup_ind(ar);
+ if (ret) {
+ ath12k_warn(ab, "failed to send wow wakeup indication: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = wait_for_completion_timeout(&ab->wow.wakeup_completed, 3 * HZ);
+ if (ret == 0) {
+ ath12k_warn(ab, "timed out while waiting for wow wakeup completion\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int ath12k_wow_vif_cleanup(struct ath12k_vif *arvif)
+{
+ struct ath12k *ar = arvif->ar;
+ int i, ret;
+
+ for (i = 0; i < WOW_EVENT_MAX; i++) {
+ ret = ath12k_wmi_wow_add_wakeup_event(ar, arvif->vdev_id, i, 0);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to issue wow wakeup for event %s on vdev %i: %d\n",
+ wow_wakeup_event(i), arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ for (i = 0; i < ar->wow.max_num_patterns; i++) {
+ ret = ath12k_wmi_wow_del_pattern(ar, arvif->vdev_id, i);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to delete wow pattern %d for vdev %i: %d\n",
+ i, arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ath12k_wow_cleanup(struct ath12k *ar)
+{
+ struct ath12k_vif *arvif;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ ret = ath12k_wow_vif_cleanup(arvif);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to clean wow wakeups on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/* Convert a 802.3 format to a 802.11 format.
+ * +------------+-----------+--------+----------------+
+ * 802.3: |dest mac(6B)|src mac(6B)|type(2B)| body... |
+ * +------------+-----------+--------+----------------+
+ * |__ |_______ |____________ |________
+ * | | | |
+ * +--+------------+----+-----------+---------------+-----------+
+ * 802.11: |4B|dest mac(6B)| 6B |src mac(6B)| 8B |type(2B)| body... |
+ * +--+------------+----+-----------+---------------+-----------+
+ */
+static void
+ath12k_wow_convert_8023_to_80211(struct ath12k *ar,
+ const struct cfg80211_pkt_pattern *eth_pattern,
+ struct ath12k_pkt_pattern *i80211_pattern)
+{
+ size_t r1042_eth_ofs = offsetof(struct rfc1042_hdr, eth_type);
+ size_t a1_ofs = offsetof(struct ieee80211_hdr_3addr, addr1);
+ size_t a3_ofs = offsetof(struct ieee80211_hdr_3addr, addr3);
+ size_t i80211_hdr_len = sizeof(struct ieee80211_hdr_3addr);
+ size_t prot_ofs = offsetof(struct ethhdr, h_proto);
+ size_t src_ofs = offsetof(struct ethhdr, h_source);
+ u8 eth_bytemask[WOW_MAX_PATTERN_SIZE] = {};
+ const u8 *eth_pat = eth_pattern->pattern;
+ size_t eth_pat_len = eth_pattern->pattern_len;
+ size_t eth_pkt_ofs = eth_pattern->pkt_offset;
+ u8 *bytemask = i80211_pattern->bytemask;
+ u8 *pat = i80211_pattern->pattern;
+ size_t pat_len = 0;
+ size_t pkt_ofs = 0;
+ size_t delta;
+ int i;
+
+ /* convert bitmask to bytemask */
+ for (i = 0; i < eth_pat_len; i++)
+ if (eth_pattern->mask[i / 8] & BIT(i % 8))
+ eth_bytemask[i] = 0xff;
+
+ if (eth_pkt_ofs < ETH_ALEN) {
+ pkt_ofs = eth_pkt_ofs + a1_ofs;
+
+ if (size_add(eth_pkt_ofs, eth_pat_len) < ETH_ALEN) {
+ memcpy(pat, eth_pat, eth_pat_len);
+ memcpy(bytemask, eth_bytemask, eth_pat_len);
+
+ pat_len = eth_pat_len;
+ } else if (eth_pkt_ofs + eth_pat_len < prot_ofs) {
+ memcpy(pat, eth_pat, ETH_ALEN - eth_pkt_ofs);
+ memcpy(bytemask, eth_bytemask, ETH_ALEN - eth_pkt_ofs);
+
+ delta = eth_pkt_ofs + eth_pat_len - src_ofs;
+ memcpy(pat + a3_ofs - pkt_ofs,
+ eth_pat + ETH_ALEN - eth_pkt_ofs,
+ delta);
+ memcpy(bytemask + a3_ofs - pkt_ofs,
+ eth_bytemask + ETH_ALEN - eth_pkt_ofs,
+ delta);
+
+ pat_len = a3_ofs - pkt_ofs + delta;
+ } else {
+ memcpy(pat, eth_pat, ETH_ALEN - eth_pkt_ofs);
+ memcpy(bytemask, eth_bytemask, ETH_ALEN - eth_pkt_ofs);
+
+ memcpy(pat + a3_ofs - pkt_ofs,
+ eth_pat + ETH_ALEN - eth_pkt_ofs,
+ ETH_ALEN);
+ memcpy(bytemask + a3_ofs - pkt_ofs,
+ eth_bytemask + ETH_ALEN - eth_pkt_ofs,
+ ETH_ALEN);
+
+ delta = eth_pkt_ofs + eth_pat_len - prot_ofs;
+ memcpy(pat + i80211_hdr_len + r1042_eth_ofs - pkt_ofs,
+ eth_pat + prot_ofs - eth_pkt_ofs,
+ delta);
+ memcpy(bytemask + i80211_hdr_len + r1042_eth_ofs - pkt_ofs,
+ eth_bytemask + prot_ofs - eth_pkt_ofs,
+ delta);
+
+ pat_len = i80211_hdr_len + r1042_eth_ofs - pkt_ofs + delta;
+ }
+ } else if (eth_pkt_ofs < prot_ofs) {
+ pkt_ofs = eth_pkt_ofs - ETH_ALEN + a3_ofs;
+
+ if (size_add(eth_pkt_ofs, eth_pat_len) < prot_ofs) {
+ memcpy(pat, eth_pat, eth_pat_len);
+ memcpy(bytemask, eth_bytemask, eth_pat_len);
+
+ pat_len = eth_pat_len;
+ } else {
+ memcpy(pat, eth_pat, prot_ofs - eth_pkt_ofs);
+ memcpy(bytemask, eth_bytemask, prot_ofs - eth_pkt_ofs);
+
+ delta = eth_pkt_ofs + eth_pat_len - prot_ofs;
+ memcpy(pat + i80211_hdr_len + r1042_eth_ofs - pkt_ofs,
+ eth_pat + prot_ofs - eth_pkt_ofs,
+ delta);
+ memcpy(bytemask + i80211_hdr_len + r1042_eth_ofs - pkt_ofs,
+ eth_bytemask + prot_ofs - eth_pkt_ofs,
+ delta);
+
+ pat_len = i80211_hdr_len + r1042_eth_ofs - pkt_ofs + delta;
+ }
+ } else {
+ pkt_ofs = eth_pkt_ofs - prot_ofs + i80211_hdr_len + r1042_eth_ofs;
+
+ memcpy(pat, eth_pat, eth_pat_len);
+ memcpy(bytemask, eth_bytemask, eth_pat_len);
+
+ pat_len = eth_pat_len;
+ }
+
+ i80211_pattern->pattern_len = pat_len;
+ i80211_pattern->pkt_offset = pkt_ofs;
+}
+
+static int
+ath12k_wow_pno_check_and_convert(struct ath12k *ar, u32 vdev_id,
+ const struct cfg80211_sched_scan_request *nd_config,
+ struct wmi_pno_scan_req_arg *pno)
+{
+ int i, j;
+ u8 ssid_len;
+
+ pno->enable = 1;
+ pno->vdev_id = vdev_id;
+ pno->uc_networks_count = nd_config->n_match_sets;
+
+ if (!pno->uc_networks_count ||
+ pno->uc_networks_count > WMI_PNO_MAX_SUPP_NETWORKS)
+ return -EINVAL;
+
+ if (nd_config->n_channels > WMI_PNO_MAX_NETW_CHANNELS_EX)
+ return -EINVAL;
+
+ /* Filling per profile params */
+ for (i = 0; i < pno->uc_networks_count; i++) {
+ ssid_len = nd_config->match_sets[i].ssid.ssid_len;
+
+ if (ssid_len == 0 || ssid_len > 32)
+ return -EINVAL;
+
+ pno->a_networks[i].ssid.ssid_len = ssid_len;
+
+ memcpy(pno->a_networks[i].ssid.ssid,
+ nd_config->match_sets[i].ssid.ssid,
+ ssid_len);
+ pno->a_networks[i].authentication = 0;
+ pno->a_networks[i].encryption = 0;
+ pno->a_networks[i].bcast_nw_type = 0;
+
+ /* Copying list of valid channel into request */
+ pno->a_networks[i].channel_count = nd_config->n_channels;
+ pno->a_networks[i].rssi_threshold = nd_config->match_sets[i].rssi_thold;
+
+ for (j = 0; j < nd_config->n_channels; j++) {
+ pno->a_networks[i].channels[j] =
+ nd_config->channels[j]->center_freq;
+ }
+ }
+
+ /* set scan to passive if no SSIDs are specified in the request */
+ if (nd_config->n_ssids == 0)
+ pno->do_passive_scan = true;
+ else
+ pno->do_passive_scan = false;
+
+ for (i = 0; i < nd_config->n_ssids; i++) {
+ for (j = 0; j < pno->uc_networks_count; j++) {
+ if (pno->a_networks[j].ssid.ssid_len ==
+ nd_config->ssids[i].ssid_len &&
+ !memcmp(pno->a_networks[j].ssid.ssid,
+ nd_config->ssids[i].ssid,
+ pno->a_networks[j].ssid.ssid_len)) {
+ pno->a_networks[j].bcast_nw_type = BCAST_HIDDEN;
+ break;
+ }
+ }
+ }
+
+ if (nd_config->n_scan_plans == 2) {
+ pno->fast_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC;
+ pno->fast_scan_max_cycles = nd_config->scan_plans[0].iterations;
+ pno->slow_scan_period =
+ nd_config->scan_plans[1].interval * MSEC_PER_SEC;
+ } else if (nd_config->n_scan_plans == 1) {
+ pno->fast_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC;
+ pno->fast_scan_max_cycles = 1;
+ pno->slow_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC;
+ } else {
+ ath12k_warn(ar->ab, "Invalid number of PNO scan plans: %d",
+ nd_config->n_scan_plans);
+ }
+
+ if (nd_config->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
+ /* enable mac randomization */
+ pno->enable_pno_scan_randomization = 1;
+ memcpy(pno->mac_addr, nd_config->mac_addr, ETH_ALEN);
+ memcpy(pno->mac_addr_mask, nd_config->mac_addr_mask, ETH_ALEN);
+ }
+
+ pno->delay_start_time = nd_config->delay;
+
+ /* Current FW does not support min-max range for dwell time */
+ pno->active_max_time = WMI_ACTIVE_MAX_CHANNEL_TIME;
+ pno->passive_max_time = WMI_PASSIVE_MAX_CHANNEL_TIME;
+
+ return 0;
+}
+
+static int ath12k_wow_vif_set_wakeups(struct ath12k_vif *arvif,
+ struct cfg80211_wowlan *wowlan)
+{
+ const struct cfg80211_pkt_pattern *patterns = wowlan->patterns;
+ struct ath12k *ar = arvif->ar;
+ unsigned long wow_mask = 0;
+ int pattern_id = 0;
+ int ret, i;
+
+ /* Setup requested WOW features */
+ switch (arvif->vdev_type) {
+ case WMI_VDEV_TYPE_IBSS:
+ __set_bit(WOW_BEACON_EVENT, &wow_mask);
+ fallthrough;
+ case WMI_VDEV_TYPE_AP:
+ __set_bit(WOW_DEAUTH_RECVD_EVENT, &wow_mask);
+ __set_bit(WOW_DISASSOC_RECVD_EVENT, &wow_mask);
+ __set_bit(WOW_PROBE_REQ_WPS_IE_EVENT, &wow_mask);
+ __set_bit(WOW_AUTH_REQ_EVENT, &wow_mask);
+ __set_bit(WOW_ASSOC_REQ_EVENT, &wow_mask);
+ __set_bit(WOW_HTT_EVENT, &wow_mask);
+ __set_bit(WOW_RA_MATCH_EVENT, &wow_mask);
+ break;
+ case WMI_VDEV_TYPE_STA:
+ if (wowlan->disconnect) {
+ __set_bit(WOW_DEAUTH_RECVD_EVENT, &wow_mask);
+ __set_bit(WOW_DISASSOC_RECVD_EVENT, &wow_mask);
+ __set_bit(WOW_BMISS_EVENT, &wow_mask);
+ __set_bit(WOW_CSA_IE_EVENT, &wow_mask);
+ }
+
+ if (wowlan->magic_pkt)
+ __set_bit(WOW_MAGIC_PKT_RECVD_EVENT, &wow_mask);
+
+ if (wowlan->nd_config) {
+ struct wmi_pno_scan_req_arg *pno;
+ int ret;
+
+ pno = kzalloc(sizeof(*pno), GFP_KERNEL);
+ if (!pno)
+ return -ENOMEM;
+
+ ar->nlo_enabled = true;
+
+ ret = ath12k_wow_pno_check_and_convert(ar, arvif->vdev_id,
+ wowlan->nd_config, pno);
+ if (!ret) {
+ ath12k_wmi_wow_config_pno(ar, arvif->vdev_id, pno);
+ __set_bit(WOW_NLO_DETECTED_EVENT, &wow_mask);
+ }
+
+ kfree(pno);
+ }
+ break;
+ default:
+ break;
+ }
+
+ for (i = 0; i < wowlan->n_patterns; i++) {
+ const struct cfg80211_pkt_pattern *eth_pattern = &patterns[i];
+ struct ath12k_pkt_pattern new_pattern = {};
+
+ if (WARN_ON(eth_pattern->pattern_len > WOW_MAX_PATTERN_SIZE))
+ return -EINVAL;
+
+ if (ar->ab->wow.wmi_conf_rx_decap_mode ==
+ ATH12K_HW_TXRX_NATIVE_WIFI) {
+ ath12k_wow_convert_8023_to_80211(ar, eth_pattern,
+ &new_pattern);
+
+ if (WARN_ON(new_pattern.pattern_len > WOW_MAX_PATTERN_SIZE))
+ return -EINVAL;
+ } else {
+ memcpy(new_pattern.pattern, eth_pattern->pattern,
+ eth_pattern->pattern_len);
+
+ /* convert bitmask to bytemask */
+ for (i = 0; i < eth_pattern->pattern_len; i++)
+ if (eth_pattern->mask[i / 8] & BIT(i % 8))
+ new_pattern.bytemask[i] = 0xff;
+
+ new_pattern.pattern_len = eth_pattern->pattern_len;
+ new_pattern.pkt_offset = eth_pattern->pkt_offset;
+ }
+
+ ret = ath12k_wmi_wow_add_pattern(ar, arvif->vdev_id,
+ pattern_id,
+ new_pattern.pattern,
+ new_pattern.bytemask,
+ new_pattern.pattern_len,
+ new_pattern.pkt_offset);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to add pattern %i to vdev %i: %d\n",
+ pattern_id,
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ pattern_id++;
+ __set_bit(WOW_PATTERN_MATCH_EVENT, &wow_mask);
+ }
+
+ for (i = 0; i < WOW_EVENT_MAX; i++) {
+ if (!test_bit(i, &wow_mask))
+ continue;
+ ret = ath12k_wmi_wow_add_wakeup_event(ar, arvif->vdev_id, i, 1);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to enable wakeup event %s on vdev %i: %d\n",
+ wow_wakeup_event(i), arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ath12k_wow_set_wakeups(struct ath12k *ar,
+ struct cfg80211_wowlan *wowlan)
+{
+ struct ath12k_vif *arvif;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (ath12k_wow_is_p2p_vdev(arvif))
+ continue;
+ ret = ath12k_wow_vif_set_wakeups(arvif, wowlan);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to set wow wakeups on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ath12k_wow_vdev_clean_nlo(struct ath12k *ar, u32 vdev_id)
+{
+ struct wmi_pno_scan_req_arg *pno;
+ int ret;
+
+ if (!ar->nlo_enabled)
+ return 0;
+
+ pno = kzalloc(sizeof(*pno), GFP_KERNEL);
+ if (!pno)
+ return -ENOMEM;
+
+ pno->enable = 0;
+ ret = ath12k_wmi_wow_config_pno(ar, vdev_id, pno);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to disable PNO: %d", ret);
+ goto out;
+ }
+
+ ar->nlo_enabled = false;
+
+out:
+ kfree(pno);
+ return ret;
+}
+
+static int ath12k_wow_vif_clean_nlo(struct ath12k_vif *arvif)
+{
+ struct ath12k *ar = arvif->ar;
+
+ switch (arvif->vdev_type) {
+ case WMI_VDEV_TYPE_STA:
+ return ath12k_wow_vdev_clean_nlo(ar, arvif->vdev_id);
+ default:
+ return 0;
+ }
+}
+
+static int ath12k_wow_nlo_cleanup(struct ath12k *ar)
+{
+ struct ath12k_vif *arvif;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (ath12k_wow_is_p2p_vdev(arvif))
+ continue;
+
+ ret = ath12k_wow_vif_clean_nlo(arvif);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to clean nlo settings on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ath12k_wow_set_hw_filter(struct ath12k *ar)
+{
+ struct wmi_hw_data_filter_arg arg;
+ struct ath12k_vif *arvif;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
+ continue;
+
+ arg.vdev_id = arvif->vdev_id;
+ arg.enable = true;
+ arg.hw_filter_bitmap = WMI_HW_DATA_FILTER_DROP_NON_ICMPV6_MC;
+ ret = ath12k_wmi_hw_data_filter_cmd(ar, &arg);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to set hw data filter on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ath12k_wow_clear_hw_filter(struct ath12k *ar)
+{
+ struct wmi_hw_data_filter_arg arg;
+ struct ath12k_vif *arvif;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
+ continue;
+
+ arg.vdev_id = arvif->vdev_id;
+ arg.enable = false;
+ arg.hw_filter_bitmap = 0;
+ ret = ath12k_wmi_hw_data_filter_cmd(ar, &arg);
+
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to clear hw data filter on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void ath12k_wow_generate_ns_mc_addr(struct ath12k_base *ab,
+ struct wmi_arp_ns_offload_arg *offload)
+{
+ int i;
+
+ for (i = 0; i < offload->ipv6_count; i++) {
+ offload->self_ipv6_addr[i][0] = 0xff;
+ offload->self_ipv6_addr[i][1] = 0x02;
+ offload->self_ipv6_addr[i][11] = 0x01;
+ offload->self_ipv6_addr[i][12] = 0xff;
+ offload->self_ipv6_addr[i][13] =
+ offload->ipv6_addr[i][13];
+ offload->self_ipv6_addr[i][14] =
+ offload->ipv6_addr[i][14];
+ offload->self_ipv6_addr[i][15] =
+ offload->ipv6_addr[i][15];
+ ath12k_dbg(ab, ATH12K_DBG_WOW, "NS solicited addr %pI6\n",
+ offload->self_ipv6_addr[i]);
+ }
+}
+
+static void ath12k_wow_prepare_ns_offload(struct ath12k_vif *arvif,
+ struct wmi_arp_ns_offload_arg *offload)
+{
+ struct net_device *ndev = ieee80211_vif_to_wdev(arvif->vif)->netdev;
+ struct ath12k_base *ab = arvif->ar->ab;
+ struct inet6_ifaddr *ifa6;
+ struct ifacaddr6 *ifaca6;
+ struct inet6_dev *idev;
+ u32 count = 0, scope;
+
+ if (!ndev)
+ return;
+
+ idev = in6_dev_get(ndev);
+ if (!idev)
+ return;
+
+ ath12k_dbg(ab, ATH12K_DBG_WOW, "wow prepare ns offload\n");
+
+ read_lock_bh(&idev->lock);
+
+ /* get unicast address */
+ list_for_each_entry(ifa6, &idev->addr_list, if_list) {
+ if (count >= WMI_IPV6_MAX_COUNT)
+ goto unlock;
+
+ if (ifa6->flags & IFA_F_DADFAILED)
+ continue;
+
+ scope = ipv6_addr_src_scope(&ifa6->addr);
+ if (scope != IPV6_ADDR_SCOPE_LINKLOCAL &&
+ scope != IPV6_ADDR_SCOPE_GLOBAL) {
+ ath12k_dbg(ab, ATH12K_DBG_WOW,
+ "Unsupported ipv6 scope: %d\n", scope);
+ continue;
+ }
+
+ memcpy(offload->ipv6_addr[count], &ifa6->addr.s6_addr,
+ sizeof(ifa6->addr.s6_addr));
+ offload->ipv6_type[count] = WMI_IPV6_UC_TYPE;
+ ath12k_dbg(ab, ATH12K_DBG_WOW, "mac count %d ipv6 uc %pI6 scope %d\n",
+ count, offload->ipv6_addr[count],
+ scope);
+ count++;
+ }
+
+ /* get anycast address */
+ rcu_read_lock();
+
+ for (ifaca6 = rcu_dereference(idev->ac_list); ifaca6;
+ ifaca6 = rcu_dereference(ifaca6->aca_next)) {
+ if (count >= WMI_IPV6_MAX_COUNT) {
+ rcu_read_unlock();
+ goto unlock;
+ }
+
+ scope = ipv6_addr_src_scope(&ifaca6->aca_addr);
+ if (scope != IPV6_ADDR_SCOPE_LINKLOCAL &&
+ scope != IPV6_ADDR_SCOPE_GLOBAL) {
+ ath12k_dbg(ab, ATH12K_DBG_WOW,
+ "Unsupported ipv scope: %d\n", scope);
+ continue;
+ }
+
+ memcpy(offload->ipv6_addr[count], &ifaca6->aca_addr,
+ sizeof(ifaca6->aca_addr));
+ offload->ipv6_type[count] = WMI_IPV6_AC_TYPE;
+ ath12k_dbg(ab, ATH12K_DBG_WOW, "mac count %d ipv6 ac %pI6 scope %d\n",
+ count, offload->ipv6_addr[count],
+ scope);
+ count++;
+ }
+
+ rcu_read_unlock();
+
+unlock:
+ read_unlock_bh(&idev->lock);
+
+ in6_dev_put(idev);
+
+ offload->ipv6_count = count;
+ ath12k_wow_generate_ns_mc_addr(ab, offload);
+}
+
+static void ath12k_wow_prepare_arp_offload(struct ath12k_vif *arvif,
+ struct wmi_arp_ns_offload_arg *offload)
+{
+ struct ieee80211_vif *vif = arvif->vif;
+ struct ieee80211_vif_cfg vif_cfg = vif->cfg;
+ struct ath12k_base *ab = arvif->ar->ab;
+ u32 ipv4_cnt;
+
+ ath12k_dbg(ab, ATH12K_DBG_WOW, "wow prepare arp offload\n");
+
+ ipv4_cnt = min(vif_cfg.arp_addr_cnt, WMI_IPV4_MAX_COUNT);
+ memcpy(offload->ipv4_addr, vif_cfg.arp_addr_list, ipv4_cnt * sizeof(u32));
+ offload->ipv4_count = ipv4_cnt;
+
+ ath12k_dbg(ab, ATH12K_DBG_WOW,
+ "wow arp_addr_cnt %d vif->addr %pM, offload_addr %pI4\n",
+ vif_cfg.arp_addr_cnt, vif->addr, offload->ipv4_addr);
+}
+
+static int ath12k_wow_arp_ns_offload(struct ath12k *ar, bool enable)
+{
+ struct wmi_arp_ns_offload_arg *offload;
+ struct ath12k_vif *arvif;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ offload = kmalloc(sizeof(*offload), GFP_KERNEL);
+ if (!offload)
+ return -ENOMEM;
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
+ continue;
+
+ memset(offload, 0, sizeof(*offload));
+
+ memcpy(offload->mac_addr, arvif->vif->addr, ETH_ALEN);
+ ath12k_wow_prepare_ns_offload(arvif, offload);
+ ath12k_wow_prepare_arp_offload(arvif, offload);
+
+ ret = ath12k_wmi_arp_ns_offload(ar, arvif, offload, enable);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to set arp ns offload vdev %i: enable %d, ret %d\n",
+ arvif->vdev_id, enable, ret);
+ return ret;
+ }
+ }
+
+ kfree(offload);
+
+ return 0;
+}
+
+static int ath12k_gtk_rekey_offload(struct ath12k *ar, bool enable)
+{
+ struct ath12k_vif *arvif;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (arvif->vdev_type != WMI_VDEV_TYPE_STA ||
+ !arvif->is_up ||
+ !arvif->rekey_data.enable_offload)
+ continue;
+
+ /* get rekey info before disable rekey offload */
+ if (!enable) {
+ ret = ath12k_wmi_gtk_rekey_getinfo(ar, arvif);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to request rekey info vdev %i, ret %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ ret = ath12k_wmi_gtk_rekey_offload(ar, arvif, enable);
+
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to offload gtk reky vdev %i: enable %d, ret %d\n",
+ arvif->vdev_id, enable, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ath12k_wow_protocol_offload(struct ath12k *ar, bool enable)
+{
+ int ret;
+
+ ret = ath12k_wow_arp_ns_offload(ar, enable);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to offload ARP and NS %d %d\n",
+ enable, ret);
+ return ret;
+ }
+
+ ret = ath12k_gtk_rekey_offload(ar, enable);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to offload gtk rekey %d %d\n",
+ enable, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath12k_wow_set_keepalive(struct ath12k *ar,
+ enum wmi_sta_keepalive_method method,
+ u32 interval)
+{
+ struct ath12k_vif *arvif;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ ret = ath12k_mac_vif_set_keepalive(arvif, method, interval);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath12k_wow_op_suspend(struct ieee80211_hw *hw,
+ struct cfg80211_wowlan *wowlan)
+{
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar = ath12k_ah_to_ar(ah, 0);
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ret = ath12k_wow_cleanup(ar);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to clear wow wakeup events: %d\n",
+ ret);
+ goto exit;
+ }
+
+ ret = ath12k_wow_set_wakeups(ar, wowlan);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to set wow wakeup events: %d\n",
+ ret);
+ goto cleanup;
+ }
+
+ ret = ath12k_wow_protocol_offload(ar, true);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to set wow protocol offload events: %d\n",
+ ret);
+ goto cleanup;
+ }
+
+ ret = ath12k_mac_wait_tx_complete(ar);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to wait tx complete: %d\n", ret);
+ goto cleanup;
+ }
+
+ ret = ath12k_wow_set_hw_filter(ar);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to set hw filter: %d\n",
+ ret);
+ goto cleanup;
+ }
+
+ ret = ath12k_wow_set_keepalive(ar,
+ WMI_STA_KEEPALIVE_METHOD_NULL_FRAME,
+ WMI_STA_KEEPALIVE_INTERVAL_DEFAULT);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to enable wow keepalive: %d\n", ret);
+ goto cleanup;
+ }
+
+ ret = ath12k_wow_enable(ar);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to start wow: %d\n", ret);
+ goto cleanup;
+ }
+
+ ath12k_hif_irq_disable(ar->ab);
+ ath12k_hif_ce_irq_disable(ar->ab);
+
+ ret = ath12k_hif_suspend(ar->ab);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to suspend hif: %d\n", ret);
+ goto wakeup;
+ }
+
+ goto exit;
+
+wakeup:
+ ath12k_wow_wakeup(ar);
+
+cleanup:
+ ath12k_wow_cleanup(ar);
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret ? 1 : 0;
+}
+
+void ath12k_wow_op_set_wakeup(struct ieee80211_hw *hw, bool enabled)
+{
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar = ath12k_ah_to_ar(ah, 0);
+
+ mutex_lock(&ar->conf_mutex);
+ device_set_wakeup_enable(ar->ab->dev, enabled);
+ mutex_unlock(&ar->conf_mutex);
+}
+
+int ath12k_wow_op_resume(struct ieee80211_hw *hw)
+{
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar = ath12k_ah_to_ar(ah, 0);
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ret = ath12k_hif_resume(ar->ab);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to resume hif: %d\n", ret);
+ goto exit;
+ }
+
+ ath12k_hif_ce_irq_enable(ar->ab);
+ ath12k_hif_irq_enable(ar->ab);
+
+ ret = ath12k_wow_wakeup(ar);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to wakeup from wow: %d\n", ret);
+ goto exit;
+ }
+
+ ret = ath12k_wow_nlo_cleanup(ar);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to cleanup nlo: %d\n", ret);
+ goto exit;
+ }
+
+ ret = ath12k_wow_clear_hw_filter(ar);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to clear hw filter: %d\n", ret);
+ goto exit;
+ }
+
+ ret = ath12k_wow_protocol_offload(ar, false);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to clear wow protocol offload events: %d\n",
+ ret);
+ goto exit;
+ }
+
+ ret = ath12k_wow_set_keepalive(ar,
+ WMI_STA_KEEPALIVE_METHOD_NULL_FRAME,
+ WMI_STA_KEEPALIVE_INTERVAL_DISABLE);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to disable wow keepalive: %d\n", ret);
+ goto exit;
+ }
+
+exit:
+ if (ret) {
+ switch (ah->state) {
+ case ATH12K_HW_STATE_ON:
+ ah->state = ATH12K_HW_STATE_RESTARTING;
+ ret = 1;
+ break;
+ case ATH12K_HW_STATE_OFF:
+ case ATH12K_HW_STATE_RESTARTING:
+ case ATH12K_HW_STATE_RESTARTED:
+ case ATH12K_HW_STATE_WEDGED:
+ ath12k_warn(ar->ab, "encountered unexpected device state %d on resume, cannot recover\n",
+ ah->state);
+ ret = -EIO;
+ break;
+ }
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+int ath12k_wow_init(struct ath12k *ar)
+{
+ if (!test_bit(WMI_TLV_SERVICE_WOW, ar->wmi->wmi_ab->svc_map))
+ return 0;
+
+ ar->wow.wowlan_support = ath12k_wowlan_support;
+
+ if (ar->ab->wow.wmi_conf_rx_decap_mode == ATH12K_HW_TXRX_NATIVE_WIFI) {
+ ar->wow.wowlan_support.pattern_max_len -= WOW_MAX_REDUCE;
+ ar->wow.wowlan_support.max_pkt_offset -= WOW_MAX_REDUCE;
+ }
+
+ if (test_bit(WMI_TLV_SERVICE_NLO, ar->wmi->wmi_ab->svc_map)) {
+ ar->wow.wowlan_support.flags |= WIPHY_WOWLAN_NET_DETECT;
+ ar->wow.wowlan_support.max_nd_match_sets = WMI_PNO_MAX_SUPP_NETWORKS;
+ }
+
+ ar->wow.max_num_patterns = ATH12K_WOW_PATTERNS;
+ ar->wow.wowlan_support.n_patterns = ar->wow.max_num_patterns;
+ ar->ah->hw->wiphy->wowlan = &ar->wow.wowlan_support;
+
+ device_set_wakeup_capable(ar->ab->dev, true);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/ath12k/wow.h b/drivers/net/wireless/ath/ath12k/wow.h
new file mode 100644
index 000000000000..af9be5fadcc3
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/wow.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2020 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef ATH12K_WOW_H
+#define ATH12K_WOW_H
+
+#define ATH12K_WOW_RETRY_NUM 10
+#define ATH12K_WOW_RETRY_WAIT_MS 200
+#define ATH12K_WOW_PATTERNS 22
+
+struct ath12k_wow {
+ u32 max_num_patterns;
+ struct completion wakeup_completed;
+ struct wiphy_wowlan_support wowlan_support;
+};
+
+struct ath12k_pkt_pattern {
+ u8 pattern[WOW_MAX_PATTERN_SIZE];
+ u8 bytemask[WOW_MAX_PATTERN_SIZE];
+ int pattern_len;
+ int pkt_offset;
+};
+
+struct rfc1042_hdr {
+ u8 llc_dsap;
+ u8 llc_ssap;
+ u8 llc_ctrl;
+ u8 snap_oui[3];
+ __be16 eth_type;
+} __packed;
+
+#ifdef CONFIG_PM
+
+int ath12k_wow_init(struct ath12k *ar);
+int ath12k_wow_op_suspend(struct ieee80211_hw *hw,
+ struct cfg80211_wowlan *wowlan);
+int ath12k_wow_op_resume(struct ieee80211_hw *hw);
+void ath12k_wow_op_set_wakeup(struct ieee80211_hw *hw, bool enabled);
+int ath12k_wow_enable(struct ath12k *ar);
+int ath12k_wow_wakeup(struct ath12k *ar);
+
+#else
+
+static inline int ath12k_wow_init(struct ath12k *ar)
+{
+ return 0;
+}
+
+static inline int ath12k_wow_enable(struct ath12k *ar)
+{
+ return 0;
+}
+
+static inline int ath12k_wow_wakeup(struct ath12k *ar)
+{
+ return 0;
+}
+#endif /* CONFIG_PM */
+#endif /* ATH12K_WOW_H */
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 9f534ed2fbb3..abe41330fb69 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -2847,7 +2847,7 @@ static void ath5k_stop_tasklets(struct ath5k_hw *ah)
* if another thread does a system call and the thread doing the
* stop is preempted).
*/
-void ath5k_stop(struct ieee80211_hw *hw)
+void ath5k_stop(struct ieee80211_hw *hw, bool suspend)
{
struct ath5k_hw *ah = hw->priv;
int ret;
diff --git a/drivers/net/wireless/ath/ath5k/base.h b/drivers/net/wireless/ath/ath5k/base.h
index 97469d0fbad7..594e5b945cb7 100644
--- a/drivers/net/wireless/ath/ath5k/base.h
+++ b/drivers/net/wireless/ath/ath5k/base.h
@@ -92,7 +92,7 @@ void ath5k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif);
bool ath5k_any_vif_assoc(struct ath5k_hw *ah);
int ath5k_start(struct ieee80211_hw *hw);
-void ath5k_stop(struct ieee80211_hw *hw);
+void ath5k_stop(struct ieee80211_hw *hw, bool suspend);
void ath5k_beacon_update_timers(struct ath5k_hw *ah, u64 bc_tsf);
int ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index b389e19381c4..8a03bcc2789e 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -973,7 +973,7 @@ static int ath9k_htc_start(struct ieee80211_hw *hw)
return ret;
}
-static void ath9k_htc_stop(struct ieee80211_hw *hw)
+static void ath9k_htc_stop(struct ieee80211_hw *hw, bool suspend)
{
struct ath9k_htc_priv *priv = hw->priv;
struct ath_hw *ah = priv->ah;
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 01173aac3045..b92c89dad8de 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -895,7 +895,7 @@ static void ath9k_pending_key_del(struct ath_softc *sc, u8 keyix)
ath_key_delete(common, keyix);
}
-static void ath9k_stop(struct ieee80211_hw *hw)
+static void ath9k_stop(struct ieee80211_hw *hw, bool suspend)
{
struct ath_softc *sc = hw->priv;
struct ath_hw *ah = sc->sc_ah;
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index 7e7797bf44b7..755c068e4197 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -439,7 +439,7 @@ static void carl9170_cancel_worker(struct ar9170 *ar)
cancel_work_sync(&ar->ampdu_work);
}
-static void carl9170_op_stop(struct ieee80211_hw *hw)
+static void carl9170_op_stop(struct ieee80211_hw *hw, bool suspend)
{
struct ar9170 *ar = hw->priv;
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index e760d8002e09..408776562a7e 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -278,7 +278,7 @@ out_err:
return ret;
}
-static void wcn36xx_stop(struct ieee80211_hw *hw)
+static void wcn36xx_stop(struct ieee80211_hw *hw, bool suspend)
{
struct wcn36xx *wcn = hw->priv;
diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c
index ee7d7e9c2718..d5d364683c0e 100644
--- a/drivers/net/wireless/ath/wil6210/netdev.c
+++ b/drivers/net/wireless/ath/wil6210/netdev.c
@@ -453,16 +453,21 @@ int wil_if_add(struct wil6210_priv *wil)
return rc;
}
- init_dummy_netdev(&wil->napi_ndev);
+ wil->napi_ndev = alloc_netdev_dummy(0);
+ if (!wil->napi_ndev) {
+ wil_err(wil, "failed to allocate dummy netdev");
+ rc = -ENOMEM;
+ goto out_wiphy;
+ }
if (wil->use_enhanced_dma_hw) {
- netif_napi_add(&wil->napi_ndev, &wil->napi_rx,
+ netif_napi_add(wil->napi_ndev, &wil->napi_rx,
wil6210_netdev_poll_rx_edma);
- netif_napi_add_tx(&wil->napi_ndev,
+ netif_napi_add_tx(wil->napi_ndev,
&wil->napi_tx, wil6210_netdev_poll_tx_edma);
} else {
- netif_napi_add(&wil->napi_ndev, &wil->napi_rx,
+ netif_napi_add(wil->napi_ndev, &wil->napi_rx,
wil6210_netdev_poll_rx);
- netif_napi_add_tx(&wil->napi_ndev,
+ netif_napi_add_tx(wil->napi_ndev,
&wil->napi_tx, wil6210_netdev_poll_tx);
}
@@ -474,10 +479,12 @@ int wil_if_add(struct wil6210_priv *wil)
wiphy_unlock(wiphy);
rtnl_unlock();
if (rc < 0)
- goto out_wiphy;
+ goto free_dummy;
return 0;
+free_dummy:
+ free_netdev(wil->napi_ndev);
out_wiphy:
wiphy_unregister(wiphy);
return rc;
@@ -554,5 +561,7 @@ void wil_if_remove(struct wil6210_priv *wil)
netif_napi_del(&wil->napi_tx);
netif_napi_del(&wil->napi_rx);
+ free_netdev(wil->napi_ndev);
+
wiphy_unregister(wiphy);
}
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index 22a6eb3e12b7..9bd1286d2857 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -983,7 +983,7 @@ struct wil6210_priv {
spinlock_t eap_lock; /* guarding access to eap rekey fields */
struct napi_struct napi_rx;
struct napi_struct napi_tx;
- struct net_device napi_ndev; /* dummy net_device serving all VIFs */
+ struct net_device *napi_ndev; /* dummy net_device serving all VIFs */
/* DMA related */
struct wil_ring ring_rx;
diff --git a/drivers/net/wireless/atmel/at76c50x-usb.c b/drivers/net/wireless/atmel/at76c50x-usb.c
index 0b55a272bfd6..504e05ea30f2 100644
--- a/drivers/net/wireless/atmel/at76c50x-usb.c
+++ b/drivers/net/wireless/atmel/at76c50x-usb.c
@@ -332,7 +332,7 @@ static int at76_dfu_get_status(struct usb_device *udev,
ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), DFU_GETSTATUS,
USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE,
- 0, 0, status, sizeof(struct dfu_status),
+ 0, 0, status, sizeof(*status),
USB_CTRL_GET_TIMEOUT);
return ret;
}
@@ -366,7 +366,7 @@ static int at76_usbdfu_download(struct usb_device *udev, u8 *buf, u32 size,
u32 dfu_timeout = 0;
int bsize = 0;
int blockno = 0;
- struct dfu_status *dfu_stat_buf = NULL;
+ struct dfu_status *dfu_stat_buf;
u8 *dfu_state = NULL;
u8 *block = NULL;
@@ -378,7 +378,7 @@ static int at76_usbdfu_download(struct usb_device *udev, u8 *buf, u32 size,
return -EINVAL;
}
- dfu_stat_buf = kmalloc(sizeof(struct dfu_status), GFP_KERNEL);
+ dfu_stat_buf = kmalloc(sizeof(*dfu_stat_buf), GFP_KERNEL);
if (!dfu_stat_buf) {
ret = -ENOMEM;
goto exit;
@@ -721,9 +721,11 @@ static int at76_set_card_command(struct usb_device *udev, u8 cmd, void *buf,
int buf_size)
{
int ret;
- struct at76_command *cmd_buf = kmalloc(sizeof(struct at76_command) +
- buf_size, GFP_KERNEL);
+ size_t total_size;
+ struct at76_command *cmd_buf;
+ total_size = struct_size(cmd_buf, data, buf_size);
+ cmd_buf = kmalloc(total_size, GFP_KERNEL);
if (!cmd_buf)
return -ENOMEM;
@@ -732,15 +734,13 @@ static int at76_set_card_command(struct usb_device *udev, u8 cmd, void *buf,
cmd_buf->size = cpu_to_le16(buf_size);
memcpy(cmd_buf->data, buf, buf_size);
- at76_dbg_dump(DBG_CMD, cmd_buf, sizeof(struct at76_command) + buf_size,
+ at76_dbg_dump(DBG_CMD, cmd_buf, total_size,
"issuing command %s (0x%02x)",
at76_get_cmd_string(cmd), cmd);
ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x0e,
USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE,
- 0, 0, cmd_buf,
- sizeof(struct at76_command) + buf_size,
- USB_CTRL_GET_TIMEOUT);
+ 0, 0, cmd_buf, total_size, USB_CTRL_GET_TIMEOUT);
kfree(cmd_buf);
return ret;
}
@@ -931,14 +931,12 @@ static void at76_dump_mib_mac_addr(struct at76_priv *priv)
{
int i;
int ret;
- struct mib_mac_addr *m = kmalloc(sizeof(struct mib_mac_addr),
- GFP_KERNEL);
+ struct mib_mac_addr *m = kmalloc(sizeof(*m), GFP_KERNEL);
if (!m)
return;
- ret = at76_get_mib(priv->udev, MIB_MAC_ADDR, m,
- sizeof(struct mib_mac_addr));
+ ret = at76_get_mib(priv->udev, MIB_MAC_ADDR, m, sizeof(*m));
if (ret < 0) {
wiphy_err(priv->hw->wiphy,
"at76_get_mib (MAC_ADDR) failed: %d\n", ret);
@@ -961,13 +959,12 @@ static void at76_dump_mib_mac_wep(struct at76_priv *priv)
int i;
int ret;
int key_len;
- struct mib_mac_wep *m = kmalloc(sizeof(struct mib_mac_wep), GFP_KERNEL);
+ struct mib_mac_wep *m = kmalloc(sizeof(*m), GFP_KERNEL);
if (!m)
return;
- ret = at76_get_mib(priv->udev, MIB_MAC_WEP, m,
- sizeof(struct mib_mac_wep));
+ ret = at76_get_mib(priv->udev, MIB_MAC_WEP, m, sizeof(*m));
if (ret < 0) {
wiphy_err(priv->hw->wiphy,
"at76_get_mib (MAC_WEP) failed: %d\n", ret);
@@ -997,14 +994,12 @@ exit:
static void at76_dump_mib_mac_mgmt(struct at76_priv *priv)
{
int ret;
- struct mib_mac_mgmt *m = kmalloc(sizeof(struct mib_mac_mgmt),
- GFP_KERNEL);
+ struct mib_mac_mgmt *m = kmalloc(sizeof(*m), GFP_KERNEL);
if (!m)
return;
- ret = at76_get_mib(priv->udev, MIB_MAC_MGMT, m,
- sizeof(struct mib_mac_mgmt));
+ ret = at76_get_mib(priv->udev, MIB_MAC_MGMT, m, sizeof(*m));
if (ret < 0) {
wiphy_err(priv->hw->wiphy,
"at76_get_mib (MAC_MGMT) failed: %d\n", ret);
@@ -1035,12 +1030,12 @@ exit:
static void at76_dump_mib_mac(struct at76_priv *priv)
{
int ret;
- struct mib_mac *m = kmalloc(sizeof(struct mib_mac), GFP_KERNEL);
+ struct mib_mac *m = kmalloc(sizeof(*m), GFP_KERNEL);
if (!m)
return;
- ret = at76_get_mib(priv->udev, MIB_MAC, m, sizeof(struct mib_mac));
+ ret = at76_get_mib(priv->udev, MIB_MAC, m, sizeof(*m));
if (ret < 0) {
wiphy_err(priv->hw->wiphy,
"at76_get_mib (MAC) failed: %d\n", ret);
@@ -1072,12 +1067,12 @@ exit:
static void at76_dump_mib_phy(struct at76_priv *priv)
{
int ret;
- struct mib_phy *m = kmalloc(sizeof(struct mib_phy), GFP_KERNEL);
+ struct mib_phy *m = kmalloc(sizeof(*m), GFP_KERNEL);
if (!m)
return;
- ret = at76_get_mib(priv->udev, MIB_PHY, m, sizeof(struct mib_phy));
+ ret = at76_get_mib(priv->udev, MIB_PHY, m, sizeof(*m));
if (ret < 0) {
wiphy_err(priv->hw->wiphy,
"at76_get_mib (PHY) failed: %d\n", ret);
@@ -1130,13 +1125,12 @@ exit:
static void at76_dump_mib_mdomain(struct at76_priv *priv)
{
int ret;
- struct mib_mdomain *m = kmalloc(sizeof(struct mib_mdomain), GFP_KERNEL);
+ struct mib_mdomain *m = kmalloc(sizeof(*m), GFP_KERNEL);
if (!m)
return;
- ret = at76_get_mib(priv->udev, MIB_MDOMAIN, m,
- sizeof(struct mib_mdomain));
+ ret = at76_get_mib(priv->udev, MIB_MDOMAIN, m, sizeof(*m));
if (ret < 0) {
wiphy_err(priv->hw->wiphy,
"at76_get_mib (MDOMAIN) failed: %d\n", ret);
@@ -1375,7 +1369,7 @@ static int at76_startup_device(struct at76_priv *priv)
priv->scan_min_time, priv->scan_max_time,
priv->scan_mode == SCAN_TYPE_ACTIVE ? "active" : "passive");
- memset(ccfg, 0, sizeof(struct at76_card_config));
+ memset(ccfg, 0, sizeof(*ccfg));
ccfg->promiscuous_mode = 0;
ccfg->short_retry_limit = priv->short_retry_limit;
@@ -1411,7 +1405,7 @@ static int at76_startup_device(struct at76_priv *priv)
ccfg->beacon_period = cpu_to_le16(priv->beacon_period);
ret = at76_set_card_command(priv->udev, CMD_STARTUP, &priv->card_config,
- sizeof(struct at76_card_config));
+ sizeof(*ccfg));
if (ret < 0) {
wiphy_err(priv->hw->wiphy, "at76_set_card_command failed: %d\n",
ret);
@@ -1856,7 +1850,7 @@ error:
return 0;
}
-static void at76_mac80211_stop(struct ieee80211_hw *hw)
+static void at76_mac80211_stop(struct ieee80211_hw *hw, bool suspend)
{
struct at76_priv *priv = hw->priv;
@@ -2443,7 +2437,7 @@ static int at76_probe(struct usb_interface *interface,
struct usb_device *udev;
int op_mode;
int need_ext_fw = 0;
- struct mib_fw_version *fwv = NULL;
+ struct mib_fw_version *fwv;
int board_type = (int)id->driver_info;
udev = usb_get_dev(interface_to_usbdev(interface));
@@ -2531,7 +2525,7 @@ static int at76_probe(struct usb_interface *interface,
usb_set_intfdata(interface, priv);
- memcpy(&priv->fw_version, fwv, sizeof(struct mib_fw_version));
+ memcpy(&priv->fw_version, fwv, sizeof(*fwv));
priv->board_type = board_type;
ret = at76_init_new_device(priv, interface);
diff --git a/drivers/net/wireless/atmel/at76c50x-usb.h b/drivers/net/wireless/atmel/at76c50x-usb.h
index 746e64dfd8aa..843146a0de64 100644
--- a/drivers/net/wireless/atmel/at76c50x-usb.h
+++ b/drivers/net/wireless/atmel/at76c50x-usb.h
@@ -151,7 +151,7 @@ struct at76_command {
u8 cmd;
u8 reserved;
__le16 size;
- u8 data[];
+ u8 data[] __counted_by_le(size);
} __packed;
/* Length of Atmel-specific Rx header before 802.11 frame */
diff --git a/drivers/net/wireless/broadcom/b43/main.c b/drivers/net/wireless/broadcom/b43/main.c
index badb2f494035..8e56dcf9309d 100644
--- a/drivers/net/wireless/broadcom/b43/main.c
+++ b/drivers/net/wireless/broadcom/b43/main.c
@@ -5078,7 +5078,7 @@ static int b43_op_start(struct ieee80211_hw *hw)
return err;
}
-static void b43_op_stop(struct ieee80211_hw *hw)
+static void b43_op_stop(struct ieee80211_hw *hw, bool suspend)
{
struct b43_wl *wl = hw_to_b43_wl(hw);
struct b43_wldev *dev = wl->current_dev;
diff --git a/drivers/net/wireless/broadcom/b43legacy/main.c b/drivers/net/wireless/broadcom/b43legacy/main.c
index 18eb610f600a..441d6440671b 100644
--- a/drivers/net/wireless/broadcom/b43legacy/main.c
+++ b/drivers/net/wireless/broadcom/b43legacy/main.c
@@ -3485,7 +3485,7 @@ out_mutex_unlock:
return err;
}
-static void b43legacy_op_stop(struct ieee80211_hw *hw)
+static void b43legacy_op_stop(struct ieee80211_hw *hw, bool suspend)
{
struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw);
struct b43legacy_wldev *dev = wl->current_dev;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
index 13391c2d82aa..d35262335eaf 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
@@ -1061,10 +1061,10 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
if (func->num != 2)
return -ENODEV;
- bus_if = kzalloc(sizeof(struct brcmf_bus), GFP_KERNEL);
+ bus_if = kzalloc(sizeof(*bus_if), GFP_KERNEL);
if (!bus_if)
return -ENOMEM;
- sdiodev = kzalloc(sizeof(struct brcmf_sdio_dev), GFP_KERNEL);
+ sdiodev = kzalloc(sizeof(*sdiodev), GFP_KERNEL);
if (!sdiodev) {
kfree(bus_if);
return -ENOMEM;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c
index 7ea2631b8069..0c3d119d1219 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c
@@ -358,10 +358,10 @@ idle:
*/
int brcmf_btcoex_attach(struct brcmf_cfg80211_info *cfg)
{
- struct brcmf_btcoex_info *btci = NULL;
+ struct brcmf_btcoex_info *btci;
brcmf_dbg(TRACE, "enter\n");
- btci = kmalloc(sizeof(struct brcmf_btcoex_info), GFP_KERNEL);
+ btci = kmalloc(sizeof(*btci), GFP_KERNEL);
if (!btci)
return -ENOMEM;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index 5fe0e671ecb3..1585a5653ee4 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -4071,7 +4071,7 @@ static void brcmf_report_wowl_wakeind(struct wiphy *wiphy, struct brcmf_if *ifp)
struct cfg80211_wowlan_wakeup *wakeup;
u32 wakeind;
s32 err;
- int timeout;
+ long time_left;
err = brcmf_fil_iovar_data_get(ifp, "wowl_wakeind", &wake_ind_le,
sizeof(wake_ind_le));
@@ -4113,10 +4113,10 @@ static void brcmf_report_wowl_wakeind(struct wiphy *wiphy, struct brcmf_if *ifp)
}
if (wakeind & BRCMF_WOWL_PFN_FOUND) {
brcmf_dbg(INFO, "WOWL Wake indicator: BRCMF_WOWL_PFN_FOUND\n");
- timeout = wait_event_timeout(cfg->wowl.nd_data_wait,
- cfg->wowl.nd_data_completed,
- BRCMF_ND_INFO_TIMEOUT);
- if (!timeout)
+ time_left = wait_event_timeout(cfg->wowl.nd_data_wait,
+ cfg->wowl.nd_data_completed,
+ BRCMF_ND_INFO_TIMEOUT);
+ if (!time_left)
bphy_err(drvr, "No result for wowl net detect\n");
else
wakeup_data.net_detect = cfg->wowl.nd_info;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
index e406e11481a6..fe4f65756105 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
@@ -70,6 +70,7 @@ void brcmf_of_probe(struct device *dev, enum brcmf_bus_type bus_type,
{
struct brcmfmac_sdio_pd *sdio = &settings->bus.sdio;
struct device_node *root, *np = dev->of_node;
+ struct of_phandle_args oirq;
const char *prop;
int irq;
int err;
@@ -129,10 +130,10 @@ void brcmf_of_probe(struct device *dev, enum brcmf_bus_type bus_type,
sdio->drive_strength = val;
/* make sure there are interrupts defined in the node */
- if (!of_property_present(np, "interrupts"))
+ if (of_irq_parse_one(np, 0, &oirq))
return;
- irq = irq_of_parse_and_map(np, 0);
+ irq = irq_create_of_mapping(&oirq);
if (!irq) {
brcmf_err("interrupt could not be mapped\n");
return;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
index 06698a714b52..ce482a3877e9 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
@@ -313,11 +313,6 @@ struct brcmf_pcie_shared_info {
u8 version;
};
-struct brcmf_pcie_core_info {
- u32 base;
- u32 wrapbase;
-};
-
#define BRCMF_OTP_MAX_PARAM_LEN 16
struct brcmf_otp_params {
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index 6b38d9de71af..1461dc453ac2 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -4450,7 +4450,7 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
brcmf_dbg(TRACE, "Enter\n");
/* Allocate private bus interface state */
- bus = kzalloc(sizeof(struct brcmf_sdio), GFP_ATOMIC);
+ bus = kzalloc(sizeof(*bus), GFP_ATOMIC);
if (!bus)
goto fail;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
index 9a105e6debe1..8afbf529c745 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
@@ -1236,8 +1236,8 @@ brcmf_usb_prepare_fw_request(struct brcmf_usbdev_info *devinfo)
static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo,
enum brcmf_fwvendor fwvid)
{
- struct brcmf_bus *bus = NULL;
- struct brcmf_usbdev *bus_pub = NULL;
+ struct brcmf_bus *bus;
+ struct brcmf_usbdev *bus_pub;
struct device *dev = devinfo->dev;
struct brcmf_fw_request *fwreq;
int ret;
@@ -1247,7 +1247,7 @@ static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo,
if (!bus_pub)
return -ENODEV;
- bus = kzalloc(sizeof(struct brcmf_bus), GFP_ATOMIC);
+ bus = kzalloc(sizeof(*bus), GFP_ATOMIC);
if (!bus) {
ret = -ENOMEM;
goto fail;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/aiutils.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/aiutils.c
index 2084b506a450..50d817485cf9 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/aiutils.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/aiutils.c
@@ -512,7 +512,7 @@ ai_attach(struct bcma_bus *pbus)
struct si_info *sii;
/* alloc struct si_info */
- sii = kzalloc(sizeof(struct si_info), GFP_ATOMIC);
+ sii = kzalloc(sizeof(*sii), GFP_ATOMIC);
if (sii == NULL)
return NULL;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c
index c3376f887114..33d17b779201 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c
@@ -219,7 +219,7 @@ struct ampdu_info *brcms_c_ampdu_attach(struct brcms_c_info *wlc)
struct ampdu_info *ampdu;
int i;
- ampdu = kzalloc(sizeof(struct ampdu_info), GFP_ATOMIC);
+ ampdu = kzalloc(sizeof(*ampdu), GFP_ATOMIC);
if (!ampdu)
return NULL;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/antsel.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/antsel.c
index 54c616919590..f411bc6d795d 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/antsel.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/antsel.c
@@ -111,7 +111,7 @@ struct antsel_info *brcms_c_antsel_attach(struct brcms_c_info *wlc)
struct antsel_info *asi;
struct ssb_sprom *sprom = &wlc->hw->d11core->bus->sprom;
- asi = kzalloc(sizeof(struct antsel_info), GFP_ATOMIC);
+ asi = kzalloc(sizeof(*asi), GFP_ATOMIC);
if (!asi)
return NULL;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/channel.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/channel.c
index f6962e558d7c..d1b9a18d0374 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/channel.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/channel.c
@@ -331,7 +331,7 @@ struct brcms_cm_info *brcms_c_channel_mgr_attach(struct brcms_c_info *wlc)
const char *ccode = sprom->alpha2;
int ccode_len = sizeof(sprom->alpha2);
- wlc_cm = kzalloc(sizeof(struct brcms_cm_info), GFP_ATOMIC);
+ wlc_cm = kzalloc(sizeof(*wlc_cm), GFP_ATOMIC);
if (wlc_cm == NULL)
return NULL;
wlc_cm->pub = pub;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c
index 3d5c1ef8f7f2..bd480239368a 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c
@@ -558,7 +558,7 @@ struct dma_pub *dma_attach(char *name, struct brcms_c_info *wlc,
struct si_info *sii = container_of(sih, struct si_info, pub);
/* allocate private info structure */
- di = kzalloc(sizeof(struct dma_info), GFP_ATOMIC);
+ di = kzalloc(sizeof(*di), GFP_ATOMIC);
if (di == NULL)
return NULL;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
index 92860dc0a92e..d86f28b8bc60 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
@@ -457,7 +457,7 @@ static int brcms_ops_start(struct ieee80211_hw *hw)
return err;
}
-static void brcms_ops_stop(struct ieee80211_hw *hw)
+static void brcms_ops_stop(struct ieee80211_hw *hw, bool suspend)
{
struct brcms_info *wl = hw->priv;
int status;
@@ -1090,6 +1090,7 @@ static int ieee_hw_init(struct ieee80211_hw *hw)
ieee80211_hw_set(hw, AMPDU_AGGREGATION);
ieee80211_hw_set(hw, SIGNAL_DBM);
ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
+ ieee80211_hw_set(hw, MFP_CAPABLE);
hw->extra_tx_headroom = brcms_c_get_header_len();
hw->queues = N_TX_QUEUES;
@@ -1496,7 +1497,7 @@ struct brcms_timer *brcms_init_timer(struct brcms_info *wl,
{
struct brcms_timer *t;
- t = kzalloc(sizeof(struct brcms_timer), GFP_ATOMIC);
+ t = kzalloc(sizeof(*t), GFP_ATOMIC);
if (!t)
return NULL;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c
index 34460b5815d0..2738d4d6c60a 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c
@@ -234,12 +234,6 @@
/* max # tx status to process in wlc_txstatus() */
#define TXSBND 8
-/* brcmu_format_flags() bit description structure */
-struct brcms_c_bit_desc {
- u32 bit;
- const char *name;
-};
-
/*
* The following table lists the buffer memory allocated to xmt fifos in HW.
* the size is in units of 256bytes(one block), total size is HW dependent
@@ -463,11 +457,11 @@ static struct brcms_bss_cfg *brcms_c_bsscfg_malloc(uint unit)
{
struct brcms_bss_cfg *cfg;
- cfg = kzalloc(sizeof(struct brcms_bss_cfg), GFP_ATOMIC);
+ cfg = kzalloc(sizeof(*cfg), GFP_ATOMIC);
if (cfg == NULL)
goto fail;
- cfg->current_bss = kzalloc(sizeof(struct brcms_bss_info), GFP_ATOMIC);
+ cfg->current_bss = kzalloc(sizeof(*cfg->current_bss), GFP_ATOMIC);
if (cfg->current_bss == NULL)
goto fail;
@@ -483,14 +477,14 @@ brcms_c_attach_malloc(uint unit, uint *err, uint devid)
{
struct brcms_c_info *wlc;
- wlc = kzalloc(sizeof(struct brcms_c_info), GFP_ATOMIC);
+ wlc = kzalloc(sizeof(*wlc), GFP_ATOMIC);
if (wlc == NULL) {
*err = 1002;
goto fail;
}
/* allocate struct brcms_c_pub state structure */
- wlc->pub = kzalloc(sizeof(struct brcms_pub), GFP_ATOMIC);
+ wlc->pub = kzalloc(sizeof(*wlc->pub), GFP_ATOMIC);
if (wlc->pub == NULL) {
*err = 1003;
goto fail;
@@ -499,7 +493,7 @@ brcms_c_attach_malloc(uint unit, uint *err, uint devid)
/* allocate struct brcms_hardware state structure */
- wlc->hw = kzalloc(sizeof(struct brcms_hardware), GFP_ATOMIC);
+ wlc->hw = kzalloc(sizeof(*wlc->hw), GFP_ATOMIC);
if (wlc->hw == NULL) {
*err = 1005;
goto fail;
@@ -528,7 +522,7 @@ brcms_c_attach_malloc(uint unit, uint *err, uint devid)
goto fail;
}
- wlc->default_bss = kzalloc(sizeof(struct brcms_bss_info), GFP_ATOMIC);
+ wlc->default_bss = kzalloc(sizeof(*wlc->default_bss), GFP_ATOMIC);
if (wlc->default_bss == NULL) {
*err = 1010;
goto fail;
@@ -540,21 +534,20 @@ brcms_c_attach_malloc(uint unit, uint *err, uint devid)
goto fail;
}
- wlc->protection = kzalloc(sizeof(struct brcms_protection),
- GFP_ATOMIC);
+ wlc->protection = kzalloc(sizeof(*wlc->protection), GFP_ATOMIC);
if (wlc->protection == NULL) {
*err = 1016;
goto fail;
}
- wlc->stf = kzalloc(sizeof(struct brcms_stf), GFP_ATOMIC);
+ wlc->stf = kzalloc(sizeof(*wlc->stf), GFP_ATOMIC);
if (wlc->stf == NULL) {
*err = 1017;
goto fail;
}
wlc->bandstate[0] =
- kcalloc(MAXBANDS, sizeof(struct brcms_band), GFP_ATOMIC);
+ kcalloc(MAXBANDS, sizeof(*wlc->bandstate[0]), GFP_ATOMIC);
if (wlc->bandstate[0] == NULL) {
*err = 1025;
goto fail;
@@ -567,14 +560,14 @@ brcms_c_attach_malloc(uint unit, uint *err, uint devid)
+ (sizeof(struct brcms_band)*i));
}
- wlc->corestate = kzalloc(sizeof(struct brcms_core), GFP_ATOMIC);
+ wlc->corestate = kzalloc(sizeof(*wlc->corestate), GFP_ATOMIC);
if (wlc->corestate == NULL) {
*err = 1026;
goto fail;
}
wlc->corestate->macstat_snapshot =
- kzalloc(sizeof(struct macstat), GFP_ATOMIC);
+ kzalloc(sizeof(*wlc->corestate->macstat_snapshot), GFP_ATOMIC);
if (wlc->corestate->macstat_snapshot == NULL) {
*err = 1027;
goto fail;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c
index a27d6f0b8819..c3d7aa570b4e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c
@@ -355,7 +355,7 @@ struct shared_phy *wlc_phy_shared_attach(struct shared_phy_params *shp)
{
struct shared_phy *sh;
- sh = kzalloc(sizeof(struct shared_phy), GFP_ATOMIC);
+ sh = kzalloc(sizeof(*sh), GFP_ATOMIC);
if (sh == NULL)
return NULL;
@@ -442,7 +442,7 @@ wlc_phy_attach(struct shared_phy *sh, struct bcma_device *d11core,
return &pi->pubpi_ro;
}
- pi = kzalloc(sizeof(struct brcms_phy), GFP_ATOMIC);
+ pi = kzalloc(sizeof(*pi), GFP_ATOMIC);
if (pi == NULL)
return NULL;
pi->wiphy = wiphy;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c
index aae2cf95fe95..d0faba240561 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c
@@ -2567,7 +2567,6 @@ wlc_lcnphy_tx_iqlo_cal(struct brcms_phy *pi,
struct lcnphy_txgains cal_gains, temp_gains;
u16 hash;
- u8 band_idx;
int j;
u16 ncorr_override[5];
u16 syst_coeffs[] = { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
@@ -2599,6 +2598,9 @@ wlc_lcnphy_tx_iqlo_cal(struct brcms_phy *pi,
u16 *values_to_save;
struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
+ if (WARN_ON(CHSPEC_IS5G(pi->radio_chanspec)))
+ return;
+
values_to_save = kmalloc_array(20, sizeof(u16), GFP_ATOMIC);
if (NULL == values_to_save)
return;
@@ -2662,20 +2664,18 @@ wlc_lcnphy_tx_iqlo_cal(struct brcms_phy *pi,
hash = (target_gains->gm_gain << 8) |
(target_gains->pga_gain << 4) | (target_gains->pad_gain);
- band_idx = (CHSPEC_IS5G(pi->radio_chanspec) ? 1 : 0);
-
cal_gains = *target_gains;
memset(ncorr_override, 0, sizeof(ncorr_override));
- for (j = 0; j < iqcal_gainparams_numgains_lcnphy[band_idx]; j++) {
- if (hash == tbl_iqcal_gainparams_lcnphy[band_idx][j][0]) {
+ for (j = 0; j < iqcal_gainparams_numgains_lcnphy[0]; j++) {
+ if (hash == tbl_iqcal_gainparams_lcnphy[0][j][0]) {
cal_gains.gm_gain =
- tbl_iqcal_gainparams_lcnphy[band_idx][j][1];
+ tbl_iqcal_gainparams_lcnphy[0][j][1];
cal_gains.pga_gain =
- tbl_iqcal_gainparams_lcnphy[band_idx][j][2];
+ tbl_iqcal_gainparams_lcnphy[0][j][2];
cal_gains.pad_gain =
- tbl_iqcal_gainparams_lcnphy[band_idx][j][3];
+ tbl_iqcal_gainparams_lcnphy[0][j][3];
memcpy(ncorr_override,
- &tbl_iqcal_gainparams_lcnphy[band_idx][j][3],
+ &tbl_iqcal_gainparams_lcnphy[0][j][3],
sizeof(ncorr_override));
break;
}
@@ -4968,11 +4968,11 @@ bool wlc_phy_attach_lcnphy(struct brcms_phy *pi)
{
struct brcms_phy_lcnphy *pi_lcn;
- pi->u.pi_lcnphy = kzalloc(sizeof(struct brcms_phy_lcnphy), GFP_ATOMIC);
- if (pi->u.pi_lcnphy == NULL)
+ pi_lcn = kzalloc(sizeof(*pi_lcn), GFP_ATOMIC);
+ if (!pi_lcn)
return false;
- pi_lcn = pi->u.pi_lcnphy;
+ pi->u.pi_lcnphy = pi_lcn;
if (0 == (pi->sh->boardflags & BFL_NOPA)) {
pi->hwpwrctrl = true;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.c
index b72381791536..8b852581c4e4 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.c
@@ -38,9 +38,9 @@ struct phy_shim_info {
struct phy_shim_info *wlc_phy_shim_attach(struct brcms_hardware *wlc_hw,
struct brcms_info *wl,
struct brcms_c_info *wlc) {
- struct phy_shim_info *physhim = NULL;
+ struct phy_shim_info *physhim;
- physhim = kzalloc(sizeof(struct phy_shim_info), GFP_ATOMIC);
+ physhim = kzalloc(sizeof(*physhim), GFP_ATOMIC);
if (!physhim)
return NULL;
diff --git a/drivers/net/wireless/intel/ipw2x00/libipw_tx.c b/drivers/net/wireless/intel/ipw2x00/libipw_tx.c
index 4aec1fce1ae2..e22a6732a4c3 100644
--- a/drivers/net/wireless/intel/ipw2x00/libipw_tx.c
+++ b/drivers/net/wireless/intel/ipw2x00/libipw_tx.c
@@ -180,11 +180,10 @@ static struct libipw_txb *libipw_alloc_txb(int nr_frags, int txb_size,
struct libipw_txb *txb;
int i;
- txb = kmalloc(struct_size(txb, fragments, nr_frags), gfp_mask);
+ txb = kzalloc(struct_size(txb, fragments, nr_frags), gfp_mask);
if (!txb)
return NULL;
- memset(txb, 0, sizeof(struct libipw_txb));
txb->nr_frags = nr_frags;
txb->frag_size = txb_size;
diff --git a/drivers/net/wireless/intel/iwlegacy/3945-mac.c b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
index 075b705a8d7b..74fc76c00ebc 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
@@ -2813,7 +2813,7 @@ out_release_irq:
}
static void
-il3945_mac_stop(struct ieee80211_hw *hw)
+il3945_mac_stop(struct ieee80211_hw *hw, bool suspend)
{
struct il_priv *il = hw->priv;
diff --git a/drivers/net/wireless/intel/iwlegacy/3945.c b/drivers/net/wireless/intel/iwlegacy/3945.c
index a773939b8c2a..1fab7849f56d 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945.c
+++ b/drivers/net/wireless/intel/iwlegacy/3945.c
@@ -566,7 +566,7 @@ il3945_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb)
if (!(rx_end->status & RX_RES_STATUS_NO_CRC32_ERROR) ||
!(rx_end->status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
D_RX("Bad CRC or FIFO: 0x%08X.\n", rx_end->status);
- return;
+ rx_status.flag |= RX_FLAG_FAILED_FCS_CRC;
}
/* Convert 3945's rssi indicator to dBm */
diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
index 4beb7be6d51d..1600c344edbb 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
@@ -664,7 +664,7 @@ il4965_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb)
if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) ||
!(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
D_RX("Bad CRC or FIFO: 0x%08X.\n", le32_to_cpu(rx_pkt_status));
- return;
+ rx_status.flag |= RX_FLAG_FAILED_FCS_CRC;
}
/* This will be used in several places later */
@@ -5820,7 +5820,7 @@ out:
}
void
-il4965_mac_stop(struct ieee80211_hw *hw)
+il4965_mac_stop(struct ieee80211_hw *hw, bool suspend)
{
struct il_priv *il = hw->priv;
diff --git a/drivers/net/wireless/intel/iwlegacy/4965.h b/drivers/net/wireless/intel/iwlegacy/4965.h
index 863e3792d153..951f2245fefb 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965.h
+++ b/drivers/net/wireless/intel/iwlegacy/4965.h
@@ -151,7 +151,7 @@ void il4965_mac_tx(struct ieee80211_hw *hw,
struct ieee80211_tx_control *control,
struct sk_buff *skb);
int il4965_mac_start(struct ieee80211_hw *hw);
-void il4965_mac_stop(struct ieee80211_hw *hw);
+void il4965_mac_stop(struct ieee80211_hw *hw, bool suspend);
void il4965_configure_filter(struct ieee80211_hw *hw,
unsigned int changed_flags,
unsigned int *total_flags, u64 multicast);
diff --git a/drivers/net/wireless/intel/iwlwifi/Makefile b/drivers/net/wireless/intel/iwlwifi/Makefile
index 8bb94a4c12cd..64c123314245 100644
--- a/drivers/net/wireless/intel/iwlwifi/Makefile
+++ b/drivers/net/wireless/intel/iwlwifi/Makefile
@@ -4,7 +4,7 @@ obj-$(CONFIG_IWLWIFI) += iwlwifi.o
iwlwifi-objs += iwl-io.o
iwlwifi-objs += iwl-drv.o
iwlwifi-objs += iwl-debug.o
-iwlwifi-objs += iwl-eeprom-read.o iwl-eeprom-parse.o
+iwlwifi-objs += iwl-nvm-utils.o
iwlwifi-objs += iwl-phy-db.o iwl-nvm-parse.o
iwlwifi-objs += pcie/drv.o pcie/rx.o pcie/tx.o pcie/trans.o
iwlwifi-objs += pcie/ctxt-info.o pcie/ctxt-info-gen3.o
@@ -14,7 +14,6 @@ iwlwifi-$(CONFIG_IWLMVM) += cfg/7000.o cfg/8000.o cfg/9000.o cfg/22000.o
iwlwifi-$(CONFIG_IWLMVM) += cfg/ax210.o cfg/bz.o cfg/sc.o
iwlwifi-objs += iwl-dbg-tlv.o
iwlwifi-objs += iwl-trans.o
-iwlwifi-objs += queue/tx.o
iwlwifi-objs += fw/img.o fw/notif-wait.o fw/rs.o
iwlwifi-objs += fw/dbg.o fw/pnvm.o fw/dump.o
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
index d594694206b3..2e2fcb3807ef 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2023 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*/
#include <linux/module.h>
#include <linux/stringify.h>
@@ -13,7 +13,7 @@
#define IWL_22000_UCODE_API_MAX 77
/* Lowest firmware API version supported */
-#define IWL_22000_UCODE_API_MIN 50
+#define IWL_22000_UCODE_API_MIN 77
/* NVM versions */
#define IWL_22000_NVM_VERSION 0x0a1d
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/ax210.c b/drivers/net/wireless/intel/iwlwifi/cfg/ax210.c
index 25952d0bea99..975e8aed1526 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/ax210.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/ax210.c
@@ -13,7 +13,7 @@
#define IWL_AX210_UCODE_API_MAX 89
/* Lowest firmware API version supported */
-#define IWL_AX210_UCODE_API_MIN 59
+#define IWL_AX210_UCODE_API_MIN 77
/* NVM versions */
#define IWL_AX210_NVM_VERSION 0x0a1d
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/bz.c b/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
index bc98b87cf2a1..3b6b8b410be5 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
@@ -10,10 +10,10 @@
#include "fw/api/txq.h"
/* Highest firmware API version supported */
-#define IWL_BZ_UCODE_API_MAX 90
+#define IWL_BZ_UCODE_API_MAX 92
/* Lowest firmware API version supported */
-#define IWL_BZ_UCODE_API_MIN 80
+#define IWL_BZ_UCODE_API_MIN 90
/* NVM versions */
#define IWL_BZ_NVM_VERSION 0x0a1d
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/sc.c b/drivers/net/wireless/intel/iwlwifi/cfg/sc.c
index 9b79279fd76c..4ccb0b7bdc20 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/sc.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/sc.c
@@ -10,10 +10,10 @@
#include "fw/api/txq.h"
/* Highest firmware API version supported */
-#define IWL_SC_UCODE_API_MAX 90
+#define IWL_SC_UCODE_API_MAX 92
/* Lowest firmware API version supported */
-#define IWL_SC_UCODE_API_MIN 82
+#define IWL_SC_UCODE_API_MIN 90
/* NVM versions */
#define IWL_SC_NVM_VERSION 0x0a1d
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/Makefile b/drivers/net/wireless/intel/iwlwifi/dvm/Makefile
index 6109d64006db..abcf8aeb010d 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/Makefile
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/Makefile
@@ -2,7 +2,7 @@
# DVM
obj-$(CONFIG_IWLDVM) += iwldvm.o
iwldvm-objs += main.o rs.o mac80211.o ucode.o tx.o
-iwldvm-objs += lib.o calib.o tt.o sta.o rx.o
+iwldvm-objs += lib.o calib.o tt.o sta.o rx.o eeprom.o
iwldvm-objs += power.o
iwldvm-objs += scan.o
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/agn.h b/drivers/net/wireless/intel/iwlwifi/dvm/agn.h
index fefaa414272b..a13add556a7b 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/agn.h
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/agn.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2021 Intel Corporation
+ * Copyright (C) 2005-2014, 2021, 2024 Intel Corporation
*/
#ifndef __iwl_agn_h__
#define __iwl_agn_h__
@@ -385,6 +385,25 @@ static inline void iwl_dvm_set_pmi(struct iwl_priv *priv, bool state)
iwl_trans_set_pmi(priv->trans, state);
}
+/**
+ * iwl_parse_eeprom_data - parse EEPROM data and return values
+ *
+ * @trans: ransport we're parsing for, for debug only
+ * @cfg: device configuration for parsing and overrides
+ * @eeprom: the EEPROM data
+ * @eeprom_size: length of the EEPROM data
+ *
+ * This function parses all EEPROM values we need and then
+ * returns a (newly allocated) struct containing all the
+ * relevant values for driver use. The struct must be freed
+ * later with iwl_free_nvm_data().
+ */
+struct iwl_nvm_data *
+iwl_parse_eeprom_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
+ const u8 *eeprom, size_t eeprom_size);
+
+int iwl_read_eeprom(struct iwl_trans *trans, u8 **eeprom, size_t *eeprom_size);
+
#ifdef CONFIG_IWLWIFI_DEBUGFS
void iwl_dbgfs_register(struct iwl_priv *priv, struct dentry *dbgfs_dir);
#else
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/commands.h b/drivers/net/wireless/intel/iwlwifi/dvm/commands.h
index 04864d3fda63..3f49c0bccb28 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/commands.h
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/commands.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2023 Intel Corporation
+ * Copyright (C) 2005-2014, 2023-2024 Intel Corporation
*/
/*
* Please use this file (commands.h) only for uCode API definitions.
@@ -177,7 +177,7 @@ enum {
*
*****************************************************************************/
-/**
+/*
* iwlagn rate_n_flags bit fields
*
* rate_n_flags format is used in following iwlagn commands:
@@ -251,7 +251,7 @@ enum {
#define RATE_MCS_SGI_POS 13
#define RATE_MCS_SGI_MSK 0x2000
-/**
+/*
* rate_n_flags Tx antenna masks
* bit14:16
*/
@@ -2767,7 +2767,7 @@ struct iwl_missed_beacon_notif {
*
*****************************************************************************/
-/**
+/*
* SENSITIVITY_CMD = 0xa8 (command, has simple generic response)
*
* This command sets up the Rx signal detector for a sensitivity level that
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/dev.h b/drivers/net/wireless/intel/iwlwifi/dvm/dev.h
index 25283e4b849f..4ac8b862ad41 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/dev.h
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/dev.h
@@ -19,7 +19,7 @@
#include <linux/mutex.h>
#include "fw/img.h"
-#include "iwl-eeprom-parse.h"
+#include "iwl-nvm-utils.h"
#include "iwl-csr.h"
#include "iwl-debug.h"
#include "iwl-agn-hw.h"
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/devices.c b/drivers/net/wireless/intel/iwlwifi/dvm/devices.c
index 39e40901fa46..48a8349680fc 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/devices.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/devices.c
@@ -12,7 +12,7 @@
*/
#include "iwl-io.h"
#include "iwl-prph.h"
-#include "iwl-eeprom-parse.h"
+#include "iwl-nvm-utils.h"
#include "agn.h"
#include "dev.h"
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c b/drivers/net/wireless/intel/iwlwifi/dvm/eeprom.c
index 2b290fab1ef2..931aa3f5798d 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/eeprom.c
@@ -1,16 +1,18 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2005-2014, 2018-2021, 2023 Intel Corporation
- * Copyright (C) 2015 Intel Mobile Communications GmbH
+ * Copyright (C) 2005-2014, 2018-2019, 2021, 2024 Intel Corporation
*/
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/export.h>
+
#include "iwl-drv.h"
-#include "iwl-modparams.h"
-#include "iwl-eeprom-parse.h"
+#include "iwl-debug.h"
+#include "iwl-io.h"
+#include "iwl-prph.h"
+#include "iwl-csr.h"
+#include "agn.h"
-#if IS_ENABLED(CONFIG_IWLDVM)
/* EEPROM offset definitions */
/* indirect access definitions */
@@ -79,7 +81,6 @@ enum eeprom_sku_bits {
#define EEPROM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */
#define EEPROM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
-
/*
* EEPROM bands
* These are the channel numbers from each band in the order
@@ -257,7 +258,6 @@ struct iwl_eeprom_channel {
s8 max_power_avg;
} __packed;
-
enum iwl_eeprom_enhanced_txpwr_flags {
IWL_EEPROM_ENH_TXP_FL_VALID = BIT(0),
IWL_EEPROM_ENH_TXP_FL_BAND_52G = BIT(1),
@@ -648,114 +648,385 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
return n_channels;
}
-#endif
+/*
+ * EEPROM access time values:
+ *
+ * Driver initiates EEPROM read by writing byte address << 1 to CSR_EEPROM_REG.
+ * Driver then polls CSR_EEPROM_REG for CSR_EEPROM_REG_READ_VALID_MSK (0x1).
+ * When polling, wait 10 uSec between polling loops, up to a maximum 5000 uSec.
+ * Driver reads 16-bit value from bits 31-16 of CSR_EEPROM_REG.
+ */
+#define IWL_EEPROM_ACCESS_TIMEOUT 5000 /* uSec */
-int iwl_init_sband_channels(struct iwl_nvm_data *data,
- struct ieee80211_supported_band *sband,
- int n_channels, enum nl80211_band band)
+/*
+ * The device's EEPROM semaphore prevents conflicts between driver and uCode
+ * when accessing the EEPROM; each access is a series of pulses to/from the
+ * EEPROM chip, not a single event, so even reads could conflict if they
+ * weren't arbitrated by the semaphore.
+ */
+#define IWL_EEPROM_SEM_TIMEOUT 10 /* microseconds */
+#define IWL_EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
+
+
+static int iwl_eeprom_acquire_semaphore(struct iwl_trans *trans)
{
- struct ieee80211_channel *chan = &data->channels[0];
- int n = 0, idx = 0;
+ u16 count;
+ int ret;
+
+ for (count = 0; count < IWL_EEPROM_SEM_RETRY_LIMIT; count++) {
+ /* Request semaphore */
+ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
+
+ /* See if we got it */
+ ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
+ CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
+ IWL_EEPROM_SEM_TIMEOUT);
+ if (ret >= 0) {
+ IWL_DEBUG_EEPROM(trans->dev,
+ "Acquired semaphore after %d tries.\n",
+ count+1);
+ return ret;
+ }
+ }
- while (idx < n_channels && chan->band != band)
- chan = &data->channels[++idx];
+ return ret;
+}
- sband->channels = &data->channels[idx];
+static void iwl_eeprom_release_semaphore(struct iwl_trans *trans)
+{
+ iwl_clear_bit(trans, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
+}
- while (idx < n_channels && chan->band == band) {
- chan = &data->channels[++idx];
- n++;
- }
+static int iwl_eeprom_verify_signature(struct iwl_trans *trans, bool nvm_is_otp)
+{
+ u32 gp = iwl_read32(trans, CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK;
- sband->n_channels = n;
+ IWL_DEBUG_EEPROM(trans->dev, "EEPROM signature=0x%08x\n", gp);
- return n;
+ switch (gp) {
+ case CSR_EEPROM_GP_BAD_SIG_EEP_GOOD_SIG_OTP:
+ if (!nvm_is_otp) {
+ IWL_ERR(trans, "EEPROM with bad signature: 0x%08x\n",
+ gp);
+ return -ENOENT;
+ }
+ return 0;
+ case CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K:
+ case CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K:
+ if (nvm_is_otp) {
+ IWL_ERR(trans, "OTP with bad signature: 0x%08x\n", gp);
+ return -ENOENT;
+ }
+ return 0;
+ case CSR_EEPROM_GP_BAD_SIGNATURE_BOTH_EEP_AND_OTP:
+ default:
+ IWL_ERR(trans,
+ "bad EEPROM/OTP signature, type=%s, EEPROM_GP=0x%08x\n",
+ nvm_is_otp ? "OTP" : "EEPROM", gp);
+ return -ENOENT;
+ }
}
-#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
-#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
+/******************************************************************************
+ *
+ * OTP related functions
+ *
+******************************************************************************/
-void iwl_init_ht_hw_capab(struct iwl_trans *trans,
- struct iwl_nvm_data *data,
- struct ieee80211_sta_ht_cap *ht_info,
- enum nl80211_band band,
- u8 tx_chains, u8 rx_chains)
+static void iwl_set_otp_access_absolute(struct iwl_trans *trans)
{
- const struct iwl_cfg *cfg = trans->cfg;
- int max_bit_rate = 0;
-
- tx_chains = hweight8(tx_chains);
- if (cfg->rx_with_siso_diversity)
- rx_chains = 1;
- else
- rx_chains = hweight8(rx_chains);
-
- if (!(data->sku_cap_11n_enable) ||
- (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_ALL) ||
- !cfg->ht_params) {
- ht_info->ht_supported = false;
- return;
+ iwl_read32(trans, CSR_OTP_GP_REG);
+
+ iwl_clear_bit(trans, CSR_OTP_GP_REG,
+ CSR_OTP_GP_REG_OTP_ACCESS_MODE);
+}
+
+static int iwl_nvm_is_otp(struct iwl_trans *trans)
+{
+ u32 otpgp;
+
+ /* OTP only valid for CP/PP and after */
+ switch (trans->hw_rev & CSR_HW_REV_TYPE_MSK) {
+ case CSR_HW_REV_TYPE_NONE:
+ IWL_ERR(trans, "Unknown hardware type\n");
+ return -EIO;
+ case CSR_HW_REV_TYPE_5300:
+ case CSR_HW_REV_TYPE_5350:
+ case CSR_HW_REV_TYPE_5100:
+ case CSR_HW_REV_TYPE_5150:
+ return 0;
+ default:
+ otpgp = iwl_read32(trans, CSR_OTP_GP_REG);
+ if (otpgp & CSR_OTP_GP_REG_DEVICE_SELECT)
+ return 1;
+ return 0;
}
+}
+
+static int iwl_init_otp_access(struct iwl_trans *trans)
+{
+ int ret;
+
+ ret = iwl_finish_nic_init(trans);
+ if (ret)
+ return ret;
+
+ iwl_set_bits_prph(trans, APMG_PS_CTRL_REG,
+ APMG_PS_CTRL_VAL_RESET_REQ);
+ udelay(5);
+ iwl_clear_bits_prph(trans, APMG_PS_CTRL_REG,
+ APMG_PS_CTRL_VAL_RESET_REQ);
- if (data->sku_cap_mimo_disabled)
- rx_chains = 1;
+ /*
+ * CSR auto clock gate disable bit -
+ * this is only applicable for HW with OTP shadow RAM
+ */
+ if (trans->trans_cfg->base_params->shadow_ram_support)
+ iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
+ CSR_RESET_LINK_PWR_MGMT_DISABLED);
- ht_info->ht_supported = true;
- ht_info->cap = IEEE80211_HT_CAP_DSSSCCK40;
+ return 0;
+}
- if (cfg->ht_params->stbc) {
- ht_info->cap |= (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
+static int iwl_read_otp_word(struct iwl_trans *trans, u16 addr,
+ __le16 *eeprom_data)
+{
+ int ret = 0;
+ u32 r;
+ u32 otpgp;
+
+ iwl_write32(trans, CSR_EEPROM_REG,
+ CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
+ ret = iwl_poll_bit(trans, CSR_EEPROM_REG,
+ CSR_EEPROM_REG_READ_VALID_MSK,
+ CSR_EEPROM_REG_READ_VALID_MSK,
+ IWL_EEPROM_ACCESS_TIMEOUT);
+ if (ret < 0) {
+ IWL_ERR(trans, "Time out reading OTP[%d]\n", addr);
+ return ret;
+ }
+ r = iwl_read32(trans, CSR_EEPROM_REG);
+ /* check for ECC errors: */
+ otpgp = iwl_read32(trans, CSR_OTP_GP_REG);
+ if (otpgp & CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK) {
+ /* stop in this case */
+ /* set the uncorrectable OTP ECC bit for acknowledgment */
+ iwl_set_bit(trans, CSR_OTP_GP_REG,
+ CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK);
+ IWL_ERR(trans, "Uncorrectable OTP ECC error, abort OTP read\n");
+ return -EINVAL;
+ }
+ if (otpgp & CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK) {
+ /* continue in this case */
+ /* set the correctable OTP ECC bit for acknowledgment */
+ iwl_set_bit(trans, CSR_OTP_GP_REG,
+ CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK);
+ IWL_ERR(trans, "Correctable OTP ECC error, continue read\n");
+ }
+ *eeprom_data = cpu_to_le16(r >> 16);
+ return 0;
+}
- if (tx_chains > 1)
- ht_info->cap |= IEEE80211_HT_CAP_TX_STBC;
+/*
+ * iwl_is_otp_empty: check for empty OTP
+ */
+static bool iwl_is_otp_empty(struct iwl_trans *trans)
+{
+ u16 next_link_addr = 0;
+ __le16 link_value;
+ bool is_empty = false;
+
+ /* locate the beginning of OTP link list */
+ if (!iwl_read_otp_word(trans, next_link_addr, &link_value)) {
+ if (!link_value) {
+ IWL_ERR(trans, "OTP is empty\n");
+ is_empty = true;
+ }
+ } else {
+ IWL_ERR(trans, "Unable to read first block of OTP list.\n");
+ is_empty = true;
}
- if (cfg->ht_params->ldpc)
- ht_info->cap |= IEEE80211_HT_CAP_LDPC_CODING;
+ return is_empty;
+}
- if (trans->trans_cfg->mq_rx_supported ||
- iwlwifi_mod_params.amsdu_size >= IWL_AMSDU_8K)
- ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
- ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
- ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_4;
+/*
+ * iwl_find_otp_image: find EEPROM image in OTP
+ * finding the OTP block that contains the EEPROM image.
+ * the last valid block on the link list (the block _before_ the last block)
+ * is the block we should read and used to configure the device.
+ * If all the available OTP blocks are full, the last block will be the block
+ * we should read and used to configure the device.
+ * only perform this operation if shadow RAM is disabled
+ */
+static int iwl_find_otp_image(struct iwl_trans *trans,
+ u16 *validblockaddr)
+{
+ u16 next_link_addr = 0, valid_addr;
+ __le16 link_value = 0;
+ int usedblocks = 0;
- ht_info->mcs.rx_mask[0] = 0xFF;
- ht_info->mcs.rx_mask[1] = 0x00;
- ht_info->mcs.rx_mask[2] = 0x00;
+ /* set addressing mode to absolute to traverse the link list */
+ iwl_set_otp_access_absolute(trans);
- if (rx_chains >= 2)
- ht_info->mcs.rx_mask[1] = 0xFF;
- if (rx_chains >= 3)
- ht_info->mcs.rx_mask[2] = 0xFF;
+ /* checking for empty OTP or error */
+ if (iwl_is_otp_empty(trans))
+ return -EINVAL;
- if (cfg->ht_params->ht_greenfield_support)
- ht_info->cap |= IEEE80211_HT_CAP_GRN_FLD;
- ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
+ /*
+ * start traverse link list
+ * until reach the max number of OTP blocks
+ * different devices have different number of OTP blocks
+ */
+ do {
+ /* save current valid block address
+ * check for more block on the link list
+ */
+ valid_addr = next_link_addr;
+ next_link_addr = le16_to_cpu(link_value) * sizeof(u16);
+ IWL_DEBUG_EEPROM(trans->dev, "OTP blocks %d addr 0x%x\n",
+ usedblocks, next_link_addr);
+ if (iwl_read_otp_word(trans, next_link_addr, &link_value))
+ return -EINVAL;
+ if (!link_value) {
+ /*
+ * reach the end of link list, return success and
+ * set address point to the starting address
+ * of the image
+ */
+ *validblockaddr = valid_addr;
+ /* skip first 2 bytes (link list pointer) */
+ *validblockaddr += 2;
+ return 0;
+ }
+ /* more in the link list, continue */
+ usedblocks++;
+ } while (usedblocks <= trans->trans_cfg->base_params->max_ll_items);
- max_bit_rate = MAX_BIT_RATE_20_MHZ;
+ /* OTP has no valid blocks */
+ IWL_DEBUG_EEPROM(trans->dev, "OTP has no valid blocks\n");
+ return -EINVAL;
+}
- if (cfg->ht_params->ht40_bands & BIT(band)) {
- ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
- ht_info->cap |= IEEE80211_HT_CAP_SGI_40;
- max_bit_rate = MAX_BIT_RATE_40_MHZ;
+/*
+ * iwl_read_eeprom - read EEPROM contents
+ *
+ * Load the EEPROM contents from adapter and return it
+ * and its size.
+ *
+ * NOTE: This routine uses the non-debug IO access functions.
+ */
+int iwl_read_eeprom(struct iwl_trans *trans, u8 **eeprom, size_t *eeprom_size)
+{
+ __le16 *e;
+ u32 gp = iwl_read32(trans, CSR_EEPROM_GP);
+ int sz;
+ int ret;
+ u16 addr;
+ u16 validblockaddr = 0;
+ u16 cache_addr = 0;
+ int nvm_is_otp;
+
+ if (!eeprom || !eeprom_size)
+ return -EINVAL;
+
+ nvm_is_otp = iwl_nvm_is_otp(trans);
+ if (nvm_is_otp < 0)
+ return nvm_is_otp;
+
+ sz = trans->trans_cfg->base_params->eeprom_size;
+ IWL_DEBUG_EEPROM(trans->dev, "NVM size = %d\n", sz);
+
+ e = kmalloc(sz, GFP_KERNEL);
+ if (!e)
+ return -ENOMEM;
+
+ ret = iwl_eeprom_verify_signature(trans, nvm_is_otp);
+ if (ret < 0) {
+ IWL_ERR(trans, "EEPROM not found, EEPROM_GP=0x%08x\n", gp);
+ goto err_free;
}
- /* Highest supported Rx data rate */
- max_bit_rate *= rx_chains;
- WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK);
- ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate);
-
- /* Tx MCS capabilities */
- ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
- if (tx_chains != rx_chains) {
- ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
- ht_info->mcs.tx_params |= ((tx_chains - 1) <<
- IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
+ /* Make sure driver (instead of uCode) is allowed to read EEPROM */
+ ret = iwl_eeprom_acquire_semaphore(trans);
+ if (ret < 0) {
+ IWL_ERR(trans, "Failed to acquire EEPROM semaphore.\n");
+ goto err_free;
}
+
+ if (nvm_is_otp) {
+ ret = iwl_init_otp_access(trans);
+ if (ret) {
+ IWL_ERR(trans, "Failed to initialize OTP access.\n");
+ goto err_unlock;
+ }
+
+ iwl_write32(trans, CSR_EEPROM_GP,
+ iwl_read32(trans, CSR_EEPROM_GP) &
+ ~CSR_EEPROM_GP_IF_OWNER_MSK);
+
+ iwl_set_bit(trans, CSR_OTP_GP_REG,
+ CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK |
+ CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK);
+ /* traversing the linked list if no shadow ram supported */
+ if (!trans->trans_cfg->base_params->shadow_ram_support) {
+ ret = iwl_find_otp_image(trans, &validblockaddr);
+ if (ret)
+ goto err_unlock;
+ }
+ for (addr = validblockaddr; addr < validblockaddr + sz;
+ addr += sizeof(u16)) {
+ __le16 eeprom_data;
+
+ ret = iwl_read_otp_word(trans, addr, &eeprom_data);
+ if (ret)
+ goto err_unlock;
+ e[cache_addr / 2] = eeprom_data;
+ cache_addr += sizeof(u16);
+ }
+ } else {
+ /* eeprom is an array of 16bit values */
+ for (addr = 0; addr < sz; addr += sizeof(u16)) {
+ u32 r;
+
+ iwl_write32(trans, CSR_EEPROM_REG,
+ CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
+
+ ret = iwl_poll_bit(trans, CSR_EEPROM_REG,
+ CSR_EEPROM_REG_READ_VALID_MSK,
+ CSR_EEPROM_REG_READ_VALID_MSK,
+ IWL_EEPROM_ACCESS_TIMEOUT);
+ if (ret < 0) {
+ IWL_ERR(trans,
+ "Time out reading EEPROM[%d]\n", addr);
+ goto err_unlock;
+ }
+ r = iwl_read32(trans, CSR_EEPROM_REG);
+ e[addr / 2] = cpu_to_le16(r >> 16);
+ }
+ }
+
+ IWL_DEBUG_EEPROM(trans->dev, "NVM Type: %s\n",
+ nvm_is_otp ? "OTP" : "EEPROM");
+
+ iwl_eeprom_release_semaphore(trans);
+
+ *eeprom_size = sz;
+ *eeprom = (u8 *)e;
+ return 0;
+
+ err_unlock:
+ iwl_eeprom_release_semaphore(trans);
+ err_free:
+ kfree(e);
+
+ return ret;
}
-#if IS_ENABLED(CONFIG_IWLDVM)
static void iwl_init_sbands(struct iwl_trans *trans, const struct iwl_cfg *cfg,
struct iwl_nvm_data *data,
const u8 *eeprom, size_t eeprom_size)
@@ -790,7 +1061,6 @@ static void iwl_init_sbands(struct iwl_trans *trans, const struct iwl_cfg *cfg,
}
/* EEPROM data functions */
-
struct iwl_nvm_data *
iwl_parse_eeprom_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
const u8 *eeprom, size_t eeprom_size)
@@ -837,8 +1107,8 @@ iwl_parse_eeprom_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
data->kelvin_temperature = *(__le16 *)tmp;
data->kelvin_voltage = *((__le16 *)tmp + 1);
- radio_cfg = iwl_eeprom_query16(eeprom, eeprom_size,
- EEPROM_RADIO_CONFIG);
+ radio_cfg =
+ iwl_eeprom_query16(eeprom, eeprom_size, EEPROM_RADIO_CONFIG);
data->radio_cfg_dash = EEPROM_RF_CFG_DASH_MSK(radio_cfg);
data->radio_cfg_pnum = EEPROM_RF_CFG_PNUM_MSK(radio_cfg);
data->radio_cfg_step = EEPROM_RF_CFG_STEP_MSK(radio_cfg);
@@ -878,5 +1148,3 @@ iwl_parse_eeprom_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
kfree(data);
return NULL;
}
-IWL_EXPORT_SYMBOL(iwl_parse_eeprom_data);
-#endif
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
index 52b008ce53bd..74d163e56511 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
@@ -2,7 +2,7 @@
/******************************************************************************
*
* Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
- * Copyright(C) 2018 - 2019, 2022 - 2023 Intel Corporation
+ * Copyright(C) 2018 - 2019, 2022 - 2024 Intel Corporation
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -145,8 +145,6 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
#ifdef CONFIG_PM_SLEEP
if (priv->fw->img[IWL_UCODE_WOWLAN].num_sec &&
- priv->trans->ops->d3_suspend &&
- priv->trans->ops->d3_resume &&
device_can_wakeup(priv->trans->dev)) {
priv->wowlan_support.flags = WIPHY_WOWLAN_MAGIC_PKT |
WIPHY_WOWLAN_DISCONNECT |
@@ -302,7 +300,7 @@ static int iwlagn_mac_start(struct ieee80211_hw *hw)
return ret;
}
-static void iwlagn_mac_stop(struct ieee80211_hw *hw)
+static void iwlagn_mac_stop(struct ieee80211_hw *hw, bool suspend)
{
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
@@ -730,8 +728,6 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
ret = iwl_sta_rx_agg_stop(priv, sta, tid);
break;
case IEEE80211_AMPDU_TX_START:
- if (!priv->trans->ops->txq_enable)
- break;
if (!iwl_enable_tx_ampdu())
break;
IWL_DEBUG_HT(priv, "start Tx\n");
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/main.c b/drivers/net/wireless/intel/iwlwifi/dvm/main.c
index 8774dd7b921e..65b7c68e5ca7 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/main.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/main.c
@@ -1,7 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only
/******************************************************************************
*
- * Copyright(c) 2003 - 2014, 2018 - 2022 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2014, 2018 - 2022 Intel Corporation. All rights reserved.
+ * Copyright(c) 2024 Intel Corporation. All rights reserved.
* Copyright(c) 2015 Intel Deutschland GmbH
*
* Portions of this file are derived from the ipw3945 project, as well
@@ -25,8 +26,7 @@
#include <asm/div64.h>
-#include "iwl-eeprom-read.h"
-#include "iwl-eeprom-parse.h"
+#include "iwl-nvm-utils.h"
#include "iwl-io.h"
#include "iwl-trans.h"
#include "iwl-op-mode.h"
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c
index f4a6f76cf193..8879e668ef0d 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c
@@ -2673,20 +2673,16 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta,
IWL_DEBUG_RATE_LIMIT(priv, "rate scale calculate new rate for skb\n");
/* Get max rate if user set max rate */
- if (lq_sta) {
- lq_sta->max_rate_idx = fls(txrc->rate_idx_mask) - 1;
- if ((sband->band == NL80211_BAND_5GHZ) &&
- (lq_sta->max_rate_idx != -1))
- lq_sta->max_rate_idx += IWL_FIRST_OFDM_RATE;
- if ((lq_sta->max_rate_idx < 0) ||
- (lq_sta->max_rate_idx >= IWL_RATE_COUNT))
- lq_sta->max_rate_idx = -1;
- }
+ lq_sta->max_rate_idx = fls(txrc->rate_idx_mask) - 1;
+ if (sband->band == NL80211_BAND_5GHZ && lq_sta->max_rate_idx != -1)
+ lq_sta->max_rate_idx += IWL_FIRST_OFDM_RATE;
+ if (lq_sta->max_rate_idx < 0 || lq_sta->max_rate_idx >= IWL_RATE_COUNT)
+ lq_sta->max_rate_idx = -1;
- /* Treat uninitialized rate scaling data same as non-existing. */
- if (lq_sta && !lq_sta->drv) {
+ if (!lq_sta->drv) {
IWL_DEBUG_RATE(priv, "Rate scaling not initialized yet.\n");
- priv_sta = NULL;
+ /* mac80211 already set up the data for using low rates */
+ return;
}
rate_idx = lq_sta->last_txrate_idx;
@@ -2756,7 +2752,6 @@ void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_i
lq_sta = &sta_priv->lq_sta;
sband = hw->wiphy->bands[conf->chandef.chan->band];
-
lq_sta->lq.sta_id = sta_id;
for (j = 0; j < LQ_SIZE; j++)
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/tt.h b/drivers/net/wireless/intel/iwlwifi/dvm/tt.h
index 23dfcda0dd86..4af792d41dce 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/tt.h
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/tt.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/******************************************************************************
*
- * Copyright(c) 2007 - 2014, 2023 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2014, 2023-2024 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -64,7 +64,7 @@ struct iwl_tt_trans {
};
/**
- * struct iwl_tt_mgnt - Thermal Throttling Management structure
+ * struct iwl_tt_mgmt - Thermal Throttling Management structure
* @advanced_tt: advanced thermal throttle required
* @state: current Thermal Throttling state
* @tt_power_mode: Thermal Throttling power mode index
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
index fa339791223b..79774c8c7ff4 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
@@ -27,6 +27,7 @@ static const size_t acpi_dsm_size[DSM_FUNC_NUM_FUNCS] = {
[DSM_FUNC_FORCE_DISABLE_CHANNELS] = sizeof(u32),
[DSM_FUNC_ENERGY_DETECTION_THRESHOLD] = sizeof(u32),
[DSM_FUNC_RFI_CONFIG] = sizeof(u32),
+ [DSM_FUNC_ENABLE_11BE] = sizeof(u32),
};
static int iwl_acpi_get_handle(struct device *dev, acpi_string method,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h b/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h
index e00ab21e7358..ebe85fdf08d3 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018, 2020-2021 Intel Corporation
+ * Copyright (C) 2012-2014, 2018, 2020-2021, 2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -113,7 +113,7 @@ struct iwl_alive_ntf_v6 {
} __packed; /* UCODE_ALIVE_NTFY_API_S_VER_6 */
/**
- * enum iwl_extended_cfg_flag - commands driver may send before
+ * enum iwl_extended_cfg_flags - commands driver may send before
* finishing init flow
* @IWL_INIT_DEBUG_CFG: driver is going to send debug config command
* @IWL_INIT_NVM: driver is going to send NVM_ACCESS commands
@@ -126,7 +126,7 @@ enum iwl_extended_cfg_flags {
};
/**
- * struct iwl_extended_cfg_cmd - mark what commands ucode should wait for
+ * struct iwl_init_extended_cfg_cmd - mark what commands ucode should wait for
* before finishing init flows
* @init_flags: values from iwl_extended_cfg_flags
*/
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/binding.h b/drivers/net/wireless/intel/iwlwifi/fw/api/binding.h
index d9044ada6a43..2397fdc37fc5 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/binding.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/binding.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2020, 2022 Intel Corporation
+ * Copyright (C) 2012-2014, 2020, 2022, 2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -77,7 +77,7 @@ struct iwl_time_quota_data_v1 {
} __packed; /* TIME_QUOTA_DATA_API_S_VER_1 */
/**
- * struct iwl_time_quota_cmd - configuration of time quota between bindings
+ * struct iwl_time_quota_cmd_v1 - configuration of time quota between bindings
* ( TIME_QUOTA_CMD = 0x2c )
* @quotas: allocations per binding
* Note: on non-CDB the fourth one is the auxilary mac and is
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h b/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h
index bc27e15488f5..b97a43353779 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2023 Intel Corporation
+ * Copyright (C) 2023-2024 Intel Corporation
* Copyright (C) 2013-2014, 2018-2019 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2017 Intel Deutschland GmbH
@@ -77,73 +77,6 @@ struct iwl_bt_coex_ci_cmd {
__le32 secondary_ch_phy_id;
} __packed; /* BT_CI_MSG_API_S_VER_2 */
-#define BT_MBOX(n_dw, _msg, _pos, _nbits) \
- BT_MBOX##n_dw##_##_msg##_POS = (_pos), \
- BT_MBOX##n_dw##_##_msg = BITS(_nbits) << BT_MBOX##n_dw##_##_msg##_POS
-
-enum iwl_bt_mxbox_dw0 {
- BT_MBOX(0, LE_SLAVE_LAT, 0, 3),
- BT_MBOX(0, LE_PROF1, 3, 1),
- BT_MBOX(0, LE_PROF2, 4, 1),
- BT_MBOX(0, LE_PROF_OTHER, 5, 1),
- BT_MBOX(0, CHL_SEQ_N, 8, 4),
- BT_MBOX(0, INBAND_S, 13, 1),
- BT_MBOX(0, LE_MIN_RSSI, 16, 4),
- BT_MBOX(0, LE_SCAN, 20, 1),
- BT_MBOX(0, LE_ADV, 21, 1),
- BT_MBOX(0, LE_MAX_TX_POWER, 24, 4),
- BT_MBOX(0, OPEN_CON_1, 28, 2),
-};
-
-enum iwl_bt_mxbox_dw1 {
- BT_MBOX(1, BR_MAX_TX_POWER, 0, 4),
- BT_MBOX(1, IP_SR, 4, 1),
- BT_MBOX(1, LE_MSTR, 5, 1),
- BT_MBOX(1, AGGR_TRFC_LD, 8, 6),
- BT_MBOX(1, MSG_TYPE, 16, 3),
- BT_MBOX(1, SSN, 19, 2),
-};
-
-enum iwl_bt_mxbox_dw2 {
- BT_MBOX(2, SNIFF_ACT, 0, 3),
- BT_MBOX(2, PAG, 3, 1),
- BT_MBOX(2, INQUIRY, 4, 1),
- BT_MBOX(2, CONN, 5, 1),
- BT_MBOX(2, SNIFF_INTERVAL, 8, 5),
- BT_MBOX(2, DISC, 13, 1),
- BT_MBOX(2, SCO_TX_ACT, 16, 2),
- BT_MBOX(2, SCO_RX_ACT, 18, 2),
- BT_MBOX(2, ESCO_RE_TX, 20, 2),
- BT_MBOX(2, SCO_DURATION, 24, 6),
-};
-
-enum iwl_bt_mxbox_dw3 {
- BT_MBOX(3, SCO_STATE, 0, 1),
- BT_MBOX(3, SNIFF_STATE, 1, 1),
- BT_MBOX(3, A2DP_STATE, 2, 1),
- BT_MBOX(3, ACL_STATE, 3, 1),
- BT_MBOX(3, MSTR_STATE, 4, 1),
- BT_MBOX(3, OBX_STATE, 5, 1),
- BT_MBOX(3, A2DP_SRC, 6, 1),
- BT_MBOX(3, OPEN_CON_2, 8, 2),
- BT_MBOX(3, TRAFFIC_LOAD, 10, 2),
- BT_MBOX(3, CHL_SEQN_LSB, 12, 1),
- BT_MBOX(3, INBAND_P, 13, 1),
- BT_MBOX(3, MSG_TYPE_2, 16, 3),
- BT_MBOX(3, SSN_2, 19, 2),
- BT_MBOX(3, UPDATE_REQUEST, 21, 1),
-};
-
-#define BT_MBOX_MSG(_notif, _num, _field) \
- ((le32_to_cpu((_notif)->mbox_msg[(_num)]) & BT_MBOX##_num##_##_field)\
- >> BT_MBOX##_num##_##_field##_POS)
-
-#define BT_MBOX_PRINT(_num, _field, _end) \
- pos += scnprintf(buf + pos, bufsz - pos, \
- "\t%s: %d%s", \
- #_field, \
- BT_MBOX_MSG(notif, _num, _field), \
- true ? "\n" : ", ")
enum iwl_bt_activity_grading {
BT_OFF = 0,
BT_ON_NO_CONNECTION = 1,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/config.h b/drivers/net/wireless/intel/iwlwifi/fw/api/config.h
index 4419631604b4..1fc65469990e 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/config.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/config.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2019, 2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2019, 2023-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -76,7 +76,7 @@ struct iwl_phy_specific_cfg {
} __packed; /* PHY_SPECIFIC_CONFIGURATION_API_VER_1*/
/**
- * struct iwl_phy_cfg_cmd - Phy configuration command
+ * struct iwl_phy_cfg_cmd_v1 - Phy configuration command
*
* @phy_cfg: PHY configuration value, uses &enum iwl_fw_phy_cfg
* @calib_control: calibration control data
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
index bbaaf3c73115..ffee7927cf26 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
@@ -42,7 +42,7 @@ struct iwl_d3_manager_config {
/* TODO: OFFLOADS_QUERY_API_S_VER_1 */
/**
- * enum iwl_d3_proto_offloads - enabled protocol offloads
+ * enum iwl_proto_offloads - enabled protocol offloads
* @IWL_D3_PROTO_OFFLOAD_ARP: ARP data is enabled
* @IWL_D3_PROTO_OFFLOAD_NS: NS (Neighbor Solicitation) is enabled
* @IWL_D3_PROTO_IPV4_VALID: IPv4 data is valid
@@ -195,7 +195,7 @@ struct iwl_wowlan_pattern_v1 {
#define IWL_WOWLAN_MAX_PATTERNS 20
/**
- * struct iwl_wowlan_patterns_cmd - WoWLAN wakeup patterns
+ * struct iwl_wowlan_patterns_cmd_v1 - WoWLAN wakeup patterns
*/
struct iwl_wowlan_patterns_cmd_v1 {
/**
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
index f272b6a4e72e..2ab38eaeb290 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
@@ -231,28 +231,33 @@ struct iwl_synced_time_rsp {
#define PTP_CTX_MAX_DATA_SIZE 128
/**
- * struct iwl_time_msmt_ptp_ctx - Vendor specific information element
+ * struct iwl_time_msmt_ptp_ctx - Vendor specific element
* to allow a space for flexibility for the userspace App
*
- * @element_id: element id of vendor specific ie
- * @length: length of vendor specific ie
- * @reserved: for alignment
- * @data: vendor specific data blob
+ * @ftm: FTM specific vendor element
+ * @ftm.element_id: element id of vendor specific ie
+ * @ftm.length: length of vendor specific ie
+ * @ftm.reserved: for alignment
+ * @ftm.data: vendor specific data blob
+ * @tm: TM specific vendor element
+ * @tm.element_id: element id of vendor specific ie
+ * @tm.length: length of vendor specific ie
+ * @tm.data: vendor specific data blob
*/
struct iwl_time_msmt_ptp_ctx {
- /* Differentiate between FTM and TM specific Vendor IEs */
+ /* Differentiate between FTM and TM specific Vendor elements */
union {
struct {
u8 element_id;
u8 length;
__le16 reserved;
u8 data[PTP_CTX_MAX_DATA_SIZE];
- } ftm; /* FTM specific vendor IE */
+ } ftm;
struct {
u8 element_id;
u8 length;
u8 data[PTP_CTX_MAX_DATA_SIZE];
- } tm; /* TM specific vendor IE */
+ } tm;
};
} __packed /* PTP_CTX_VER_1 */;
@@ -531,6 +536,10 @@ struct iwl_rx_baid_cfg_cmd_remove {
/**
* struct iwl_rx_baid_cfg_cmd - BAID allocation/config command
* @action: the action, from &enum iwl_rx_baid_action
+ * @alloc: allocation data
+ * @modify: modify data
+ * @remove_v1: remove data (version 1)
+ * @remove: remove data
*/
struct iwl_rx_baid_cfg_cmd {
__le32 action;
@@ -565,6 +574,7 @@ enum iwl_scd_queue_cfg_operation {
/**
* struct iwl_scd_queue_cfg_cmd - scheduler queue allocation command
* @operation: the operation, see &enum iwl_scd_queue_cfg_operation
+ * @u: union depending on command usage
* @u.add.sta_mask: station mask
* @u.add.tid: TID
* @u.add.reserved: reserved
@@ -634,6 +644,7 @@ enum iwl_sec_key_flags {
/**
* struct iwl_sec_key_cmd - security key command
* @action: action from &enum iwl_ctxt_action
+ * @u: union depending on command type
* @u.add.sta_mask: station mask for the new key
* @u.add.key_id: key ID (0-7) for the new key
* @u.add.key_flags: key flags per &enum iwl_sec_key_flags
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
index 47c914de2992..855cd13a181e 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
@@ -147,32 +147,34 @@ struct iwl_fw_ini_region_internal_buffer {
* Configures parameters for region data collection
*
* @hdr: debug header
- * @id: region id. Max id is &IWL_FW_INI_MAX_REGION_ID
+ * @id: region id. Max id is %IWL_FW_INI_MAX_REGION_ID
* @type: region type. One of &enum iwl_fw_ini_region_type
* @sub_type: region sub type
* @sub_type_ver: region sub type version
* @reserved: not in use
* @name: region name
* @dev_addr: device address configuration. Used by
- * &IWL_FW_INI_REGION_DEVICE_MEMORY, &IWL_FW_INI_REGION_PERIPHERY_MAC,
- * &IWL_FW_INI_REGION_PERIPHERY_PHY, &IWL_FW_INI_REGION_PERIPHERY_AUX,
- * &IWL_FW_INI_REGION_PAGING, &IWL_FW_INI_REGION_CSR,
- * &IWL_FW_INI_REGION_DRAM_IMR and &IWL_FW_INI_REGION_PCI_IOSF_CONFIG
- * &IWL_FW_INI_REGION_DBGI_SRAM, &FW_TLV_DEBUG_REGION_TYPE_DBGI_SRAM,
- * &IWL_FW_INI_REGION_PERIPHERY_SNPS_DPHYIP,
+ * %IWL_FW_INI_REGION_DEVICE_MEMORY, %IWL_FW_INI_REGION_PERIPHERY_MAC,
+ * %IWL_FW_INI_REGION_PERIPHERY_PHY, %IWL_FW_INI_REGION_PERIPHERY_AUX,
+ * %IWL_FW_INI_REGION_PAGING, %IWL_FW_INI_REGION_CSR,
+ * %IWL_FW_INI_REGION_DRAM_IMR and %IWL_FW_INI_REGION_PCI_IOSF_CONFIG
+ * %IWL_FW_INI_REGION_DBGI_SRAM, %FW_TLV_DEBUG_REGION_TYPE_DBGI_SRAM,
+ * %IWL_FW_INI_REGION_PERIPHERY_SNPS_DPHYIP,
* @dev_addr_range: device address range configuration. Used by
- * &IWL_FW_INI_REGION_PERIPHERY_MAC_RANGE and
- * &IWL_FW_INI_REGION_PERIPHERY_PHY_RANGE
- * @fifos: fifos configuration. Used by &IWL_FW_INI_REGION_TXF and
- * &IWL_FW_INI_REGION_RXF
+ * %IWL_FW_INI_REGION_PERIPHERY_MAC_RANGE and
+ * %IWL_FW_INI_REGION_PERIPHERY_PHY_RANGE
+ * @fifos: fifos configuration. Used by %IWL_FW_INI_REGION_TXF and
+ * %IWL_FW_INI_REGION_RXF
* @err_table: error table configuration. Used by
- * IWL_FW_INI_REGION_LMAC_ERROR_TABLE and
- * IWL_FW_INI_REGION_UMAC_ERROR_TABLE
+ * %IWL_FW_INI_REGION_LMAC_ERROR_TABLE and
+ * %IWL_FW_INI_REGION_UMAC_ERROR_TABLE
* @internal_buffer: internal monitor buffer configuration. Used by
- * &IWL_FW_INI_REGION_INTERNAL_BUFFER
+ * %IWL_FW_INI_REGION_INTERNAL_BUFFER
+ * @special_mem: special device memory region, used by
+ * %IWL_FW_INI_REGION_SPECIAL_DEVICE_MEMORY
* @dram_alloc_id: dram allocation id. One of &enum iwl_fw_ini_allocation_id.
- * Used by &IWL_FW_INI_REGION_DRAM_BUFFER
- * @tlv_mask: tlv collection mask. Used by &IWL_FW_INI_REGION_TLV
+ * Used by %IWL_FW_INI_REGION_DRAM_BUFFER
+ * @tlv_mask: tlv collection mask. Used by %IWL_FW_INI_REGION_TLV
* @addrs: array of addresses attached to the end of the region tlv
*/
struct iwl_fw_ini_region_tlv {
@@ -291,7 +293,7 @@ struct iwl_fw_ini_addr_val {
} __packed; /* FW_TLV_DEBUG_ADDR_VALUE_VER_1 */
/**
- * struct iwl_fw_ini_conf_tlv - configuration TLV to set register/memory.
+ * struct iwl_fw_ini_conf_set_tlv - configuration TLV to set register/memory.
*
* @hdr: debug header
* @time_point: time point to apply config. One of &enum iwl_fw_ini_time_point
@@ -470,6 +472,10 @@ enum iwl_fw_ini_region_device_memory_subtype {
* @IWL_FW_INI_TIME_POINT_EAPOL_FAILED: EAPOL failed
* @IWL_FW_INI_TIME_POINT_FAKE_TX: fake Tx
* @IWL_FW_INI_TIME_POINT_DEASSOC: de association
+ * @IWL_FW_INI_TIME_POINT_PRESET_OVERRIDE_EXT_REQ: request to override preset
+ * @IWL_FW_INI_TIME_POINT_PRESET_OVERRIDE_START: start handling override preset
+ * request
+ * @IWL_FW_INI_TIME_SCAN_FAILURE: failed scan channel list
* @IWL_FW_INI_TIME_POINT_NUM: number of time points
*/
enum iwl_fw_ini_time_point {
@@ -500,6 +506,9 @@ enum iwl_fw_ini_time_point {
IWL_FW_INI_TIME_POINT_EAPOL_FAILED,
IWL_FW_INI_TIME_POINT_FAKE_TX,
IWL_FW_INI_TIME_POINT_DEASSOC,
+ IWL_FW_INI_TIME_POINT_PRESET_OVERRIDE_EXT_REQ,
+ IWL_FW_INI_TIME_POINT_PRESET_OVERRIDE_START,
+ IWL_FW_INI_TIME_SCAN_FAILURE,
IWL_FW_INI_TIME_POINT_NUM,
}; /* FW_TLV_DEBUG_TIME_POINT_API_E */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h
index b31ae6889bd0..bea0f4668cc8 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h
@@ -1,11 +1,12 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
#ifndef __iwl_fw_api_debug_h__
#define __iwl_fw_api_debug_h__
+#include "dbg-tlv.h"
/**
* enum iwl_debug_cmds - debug commands
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/location.h b/drivers/net/wireless/intel/iwlwifi/fw/api/location.h
index 25530a29317e..30a54c7fa001 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/location.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/location.h
@@ -2,6 +2,7 @@
/*
* Copyright (C) 2015-2017 Intel Deutschland GmbH
* Copyright (C) 2018-2022 Intel Corporation
+ * Copyright (C) 2024 Intel Corporation
*/
#ifndef __iwl_fw_api_location_h__
#define __iwl_fw_api_location_h__
@@ -390,10 +391,62 @@ struct iwl_tof_responder_config_cmd_v9 {
__le16 max_time_between_msr;
} __packed; /* TOF_RESPONDER_CONFIG_CMD_API_S_VER_8 */
+/**
+ * struct iwl_tof_responder_config_cmd - ToF AP mode
+ * @cmd_valid_fields: &iwl_tof_responder_cmd_valid_field
+ * @responder_cfg_flags: &iwl_tof_responder_cfg_flags
+ * @format_bw: bits 0 - 3: &enum iwl_location_frame_format.
+ * bits 4 - 7: &enum iwl_location_bw.
+ * @bss_color: current AP bss_color
+ * @channel_num: current AP Channel
+ * @ctrl_ch_position: coding of the control channel position relative to
+ * the center frequency, see iwl_mvm_get_ctrl_pos()
+ * @sta_id: index of the AP STA when in AP mode
+ * @band: current AP band
+ * @toa_offset: Artificial addition [pSec] for the ToA - to be used for debug
+ * purposes, simulating station movement by adding various values
+ * to this field
+ * @common_calib: XVT: common calibration value
+ * @specific_calib: XVT: specific calibration value
+ * @bssid: Current AP BSSID
+ * @r2i_ndp_params: parameters for R2I NDP.
+ * bits 0 - 2: max number of LTF repetitions
+ * bits 3 - 5: max number of spatial streams (supported values are < 2)
+ * bits 6 - 7: max number of total LTFs see
+ * &enum ieee80211_range_params_max_total_ltf
+ * @i2r_ndp_params: parameters for I2R NDP.
+ * bits 0 - 2: max number of LTF repetitions
+ * bits 3 - 5: max number of spatial streams
+ * bits 6 - 7: max number of total LTFs see
+ * &enum ieee80211_range_params_max_total_ltf
+ * @min_time_between_msr: for non trigger based NDP ranging, minimum time
+ * between measurements in milliseconds.
+ * @max_time_between_msr: for non trigger based NDP ranging, maximum time
+ * between measurements in milliseconds.
+ */
+struct iwl_tof_responder_config_cmd {
+ __le32 cmd_valid_fields;
+ __le32 responder_cfg_flags;
+ u8 format_bw;
+ u8 bss_color;
+ u8 channel_num;
+ u8 ctrl_ch_position;
+ u8 sta_id;
+ u8 band;
+ __le16 toa_offset;
+ __le16 common_calib;
+ __le16 specific_calib;
+ u8 bssid[ETH_ALEN];
+ u8 r2i_ndp_params;
+ u8 i2r_ndp_params;
+ __le16 min_time_between_msr;
+ __le16 max_time_between_msr;
+} __packed; /* TOF_RESPONDER_CONFIG_CMD_API_S_VER_10 */
+
#define IWL_LCI_CIVIC_IE_MAX_SIZE 400
/**
- * struct iwl_tof_responder_dyn_config_cmd - Dynamic responder settings
+ * struct iwl_tof_responder_dyn_config_cmd_v2 - Dynamic responder settings
* @lci_len: The length of the 1st (LCI) part in the @lci_civic buffer
* @civic_len: The length of the 2nd (CIVIC) part in the @lci_civic buffer
* @lci_civic: The LCI/CIVIC buffer. LCI data (if exists) comes first, then, if
@@ -561,6 +614,8 @@ struct iwl_tof_range_req_ap_entry_v2 {
* the responder asked for LMR feedback although the initiator did not set
* the LMR feedback bit in the FTM request. If not set, the initiator will
* continue with the session and will provide the LMR feedback.
+ * @IWL_INITIATOR_AP_FLAGS_TEST_INCORRECT_SAC: send an incorrect SAC in the
+ * first NDP exchange. This is used for testing.
*/
enum iwl_initiator_ap_flags {
IWL_INITIATOR_AP_FLAGS_ASAP = BIT(1),
@@ -577,6 +632,7 @@ enum iwl_initiator_ap_flags {
IWL_INITIATOR_AP_FLAGS_USE_CALIB = BIT(13),
IWL_INITIATOR_AP_FLAGS_PMF = BIT(14),
IWL_INITIATOR_AP_FLAGS_TERMINATE_ON_LMR_FEEDBACK = BIT(15),
+ IWL_INITIATOR_AP_FLAGS_TEST_INCORRECT_SAC = BIT(16),
};
/**
@@ -797,6 +853,7 @@ struct iwl_tof_range_req_ap_entry_v7 {
} __packed; /* LOCATION_RANGE_REQ_AP_ENTRY_CMD_API_S_VER_7 */
#define IWL_LOCATION_MAX_STS_POS 3
+#define IWL_LOCATION_TOTAL_LTF_POS 6
/**
* struct iwl_tof_range_req_ap_entry_v8 - AP configuration parameters
@@ -954,6 +1011,78 @@ struct iwl_tof_range_req_ap_entry_v9 {
} __packed; /* LOCATION_RANGE_REQ_AP_ENTRY_CMD_API_S_VER_9 */
/**
+ * struct iwl_tof_range_req_ap_entry_v10 - AP configuration parameters
+ * @initiator_ap_flags: see &enum iwl_initiator_ap_flags.
+ * @band: 0 for 5.2 GHz, 1 for 2.4 GHz, 2 for 6GHz
+ * @channel_num: AP Channel number
+ * @format_bw: bits 0 - 3: &enum iwl_location_frame_format.
+ * bits 4 - 7: &enum iwl_location_bw.
+ * @ctrl_ch_position: Coding of the control channel position relative to the
+ * center frequency, see iwl_mvm_get_ctrl_pos().
+ * @bssid: AP's BSSID
+ * @burst_period: For EDCA based ranging: Recommended value to be sent to the
+ * AP. Measurement periodicity In units of 100ms. ignored if
+ * num_of_bursts_exp = 0.
+ * For non trigger based NDP ranging, the maximum time between
+ * measurements in units of milliseconds.
+ * @samples_per_burst: the number of FTMs pairs in single Burst (1-31);
+ * @num_of_bursts: Recommended value to be sent to the AP. 2s Exponent of
+ * the number of measurement iterations (min 2^0 = 1, max 2^14)
+ * @sta_id: the station id of the AP. Only relevant when associated to the AP,
+ * otherwise should be set to &IWL_MVM_INVALID_STA.
+ * @cipher: pairwise cipher suite for secured measurement.
+ * &enum iwl_location_cipher.
+ * @hltk: HLTK to be used for secured 11az measurement
+ * @tk: TK to be used for secured 11az measurement
+ * @calib: An array of calibration values per FTM rx bandwidth.
+ * If &IWL_INITIATOR_AP_FLAGS_USE_CALIB is set, the fw will use the
+ * calibration value that corresponds to the rx bandwidth of the FTM
+ * frame.
+ * @beacon_interval: beacon interval of the AP in TUs. Only required if
+ * &IWL_INITIATOR_AP_FLAGS_TB is set.
+ * @rx_pn: the next expected PN for protected management frames Rx. LE byte
+ * order. Only valid if &IWL_INITIATOR_AP_FLAGS_SECURED is set and sta_id
+ * is set to &IWL_MVM_INVALID_STA.
+ * @tx_pn: the next PN to use for protected management frames Tx. LE byte
+ * order. Only valid if &IWL_INITIATOR_AP_FLAGS_SECURED is set and sta_id
+ * is set to &IWL_MVM_INVALID_STA.
+ * @r2i_ndp_params: parameters for R2I NDP ranging negotiation.
+ * bits 0 - 2: max LTF repetitions
+ * bits 3 - 5: max number of spatial streams
+ * bits 6 - 7: max total LTFs. One of
+ * &enum ieee80211_range_params_max_total_ltf.
+ * @i2r_ndp_params: parameters for I2R NDP ranging negotiation.
+ * bits 0 - 2: max LTF repetitions
+ * bits 3 - 5: max number of spatial streams (supported values are < 2)
+ * bits 6 - 7: max total LTFs. One of
+ * &enum ieee80211_range_params_max_total_ltf.
+ * @min_time_between_msr: For non trigger based NDP ranging, the minimum time
+ * between measurements in units of milliseconds
+ */
+struct iwl_tof_range_req_ap_entry_v10 {
+ __le32 initiator_ap_flags;
+ u8 band;
+ u8 channel_num;
+ u8 format_bw;
+ u8 ctrl_ch_position;
+ u8 bssid[ETH_ALEN];
+ __le16 burst_period;
+ u8 samples_per_burst;
+ u8 num_of_bursts;
+ u8 sta_id;
+ u8 cipher;
+ u8 hltk[HLTK_11AZ_LEN];
+ u8 tk[TK_11AZ_LEN];
+ __le16 calib[IWL_TOF_BW_NUM];
+ __le16 beacon_interval;
+ u8 rx_pn[IEEE80211_CCMP_PN_LEN];
+ u8 tx_pn[IEEE80211_CCMP_PN_LEN];
+ u8 r2i_ndp_params;
+ u8 i2r_ndp_params;
+ __le16 min_time_between_msr;
+} __packed; /* LOCATION_RANGE_REQ_AP_ENTRY_CMD_API_S_VER_9 */
+
+/**
* enum iwl_tof_response_mode
* @IWL_MVM_TOF_RESPONSE_ASAP: report each AP measurement separately as soon as
* possible (not supported for this release)
@@ -1230,6 +1359,34 @@ struct iwl_tof_range_req_cmd_v13 {
struct iwl_tof_range_req_ap_entry_v9 ap[IWL_MVM_TOF_MAX_APS];
} __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_13 */
+/**
+ * struct iwl_tof_range_req_cmd_v14 - start measurement cmd
+ * @initiator_flags: see flags @ iwl_tof_initiator_flags
+ * @request_id: A Token incremented per request. The same Token will be
+ * sent back in the range response
+ * @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS)
+ * @range_req_bssid: ranging request BSSID
+ * @macaddr_mask: Bits set to 0 shall be copied from the MAC address template.
+ * Bits set to 1 shall be randomized by the UMAC
+ * @macaddr_template: MAC address template to use for non-randomized bits
+ * @req_timeout_ms: Requested timeout of the response in units of milliseconds.
+ * This is the session time for completing the measurement.
+ * @tsf_mac_id: report the measurement start time for each ap in terms of the
+ * TSF of this mac id. 0xff to disable TSF reporting.
+ * @ap: per-AP request data, see &struct iwl_tof_range_req_ap_entry_v10.
+ */
+struct iwl_tof_range_req_cmd_v14 {
+ __le32 initiator_flags;
+ u8 request_id;
+ u8 num_of_ap;
+ u8 range_req_bssid[ETH_ALEN];
+ u8 macaddr_mask[ETH_ALEN];
+ u8 macaddr_template[ETH_ALEN];
+ __le32 req_timeout_ms;
+ __le32 tsf_mac_id;
+ struct iwl_tof_range_req_ap_entry_v10 ap[IWL_MVM_TOF_MAX_APS];
+} __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_13 */
+
/*
* enum iwl_tof_range_request_status - status of the sent request
* @IWL_TOF_RANGE_REQUEST_STATUS_SUCCESSFUL - FW successfully received the
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
index 754c5d655ad0..ca6fa66d1917 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
@@ -144,7 +144,7 @@ struct iwl_missed_vap_notif {
} __packed; /* MISSED_VAP_NTFY_API_S_VER_1 */
/**
- * struct iwl_channel_switch_start_notif - Channel switch start notification
+ * struct iwl_channel_switch_start_notif_v1 - Channel switch start notification
*
* @id_and_color: ID and color of the MAC
*/
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
index 545826973a80..bcbbf8c4a297 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
@@ -310,6 +310,13 @@ struct iwl_ac_qos {
* @filter_flags: combination of &enum iwl_mac_filter_flags
* @qos_flags: from &enum iwl_mac_qos_flags
* @ac: one iwl_mac_qos configuration for each AC
+ * @ap: AP specific config data, see &struct iwl_mac_data_ap
+ * @go: GO specific config data, see &struct iwl_mac_data_go
+ * @sta: BSS client specific config data, see &struct iwl_mac_data_sta
+ * @p2p_sta: P2P client specific config data, see &struct iwl_mac_data_p2p_sta
+ * @p2p_dev: P2P-device specific config data, see &struct iwl_mac_data_p2p_dev
+ * @pibss: Pseudo-IBSS specific data, unused; see struct iwl_mac_data_pibss
+ * @ibss: IBSS specific config data, see &struct iwl_mac_data_ibss
*/
struct iwl_mac_ctx_cmd {
/* COMMON_INDEX_HDR_API_S_VER_1 */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
index a08497a04733..d424d0126367 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
@@ -23,7 +23,8 @@ enum iwl_regulatory_and_nvm_subcmd_ids {
* &struct iwl_lari_config_change_cmd_v4,
* &struct iwl_lari_config_change_cmd_v5,
* &struct iwl_lari_config_change_cmd_v6,
- * &struct iwl_lari_config_change_cmd_v7 or
+ * &struct iwl_lari_config_change_cmd_v7,
+ * &struct iwl_lari_config_change_cmd_v10 or
* &struct iwl_lari_config_change_cmd
*/
LARI_CONFIG_CHANGE = 0x1,
@@ -119,7 +120,7 @@ struct iwl_nvm_access_cmd {
} __packed; /* NVM_ACCESS_CMD_API_S_VER_2 */
/**
- * struct iwl_nvm_access_resp_ver2 - response to NVM_ACCESS_CMD
+ * struct iwl_nvm_access_resp - response to NVM_ACCESS_CMD
* @offset: offset in bytes into the section
* @length: in bytes, either how much was written or read
* @type: NVM_SECTION_TYPE_*
@@ -211,7 +212,7 @@ struct iwl_nvm_get_info_phy {
#define IWL_NUM_CHANNELS 110
/**
- * struct iwl_nvm_get_info_regulatory - regulatory information
+ * struct iwl_nvm_get_info_regulatory_v1 - regulatory information
* @lar_enabled: is LAR enabled
* @channel_profile: regulatory data of this channel
* @reserved: reserved
@@ -648,7 +649,7 @@ struct iwl_lari_config_change_cmd_v7 {
/* LARI_CHANGE_CONF_CMD_S_VER_9 */
/**
- * struct iwl_lari_config_change_cmd - change LARI configuration
+ * struct iwl_lari_config_change_cmd_v10 - change LARI configuration
* @config_bitmap: Bitmap of the config commands. Each bit will trigger a
* different predefined FW config operation.
* @oem_uhb_allow_bitmap: Bitmap of UHB enabled MCC sets.
@@ -674,7 +675,7 @@ struct iwl_lari_config_change_cmd_v7 {
* bit1: enable 320Mhz in South Korea.
* bit 2 - 31: reserved.
*/
-struct iwl_lari_config_change_cmd {
+struct iwl_lari_config_change_cmd_v10 {
__le32 config_bitmap;
__le32 oem_uhb_allow_bitmap;
__le32 oem_11ax_allow_bitmap;
@@ -686,8 +687,57 @@ struct iwl_lari_config_change_cmd {
} __packed;
/* LARI_CHANGE_CONF_CMD_S_VER_10 */
+/**
+ * struct iwl_lari_config_change_cmd - change LARI configuration
+ * @config_bitmap: Bitmap of the config commands. Each bit will trigger a
+ * different predefined FW config operation.
+ * @oem_uhb_allow_bitmap: Bitmap of UHB enabled MCC sets.
+ * @oem_11ax_allow_bitmap: Bitmap of 11ax allowed MCCs. There are two bits
+ * per country, one to indicate whether to override and the other to
+ * indicate the value to use.
+ * @oem_unii4_allow_bitmap: Bitmap of unii4 allowed MCCs.There are two bits
+ * per country, one to indicate whether to override and the other to
+ * indicate allow/disallow unii4 channels.
+ * For LARI cmd version 11 - bits 0:5 are supported.
+ * @chan_state_active_bitmap: Bitmap to enable different bands per country
+ * or region.
+ * Each bit represents a country or region, and a band to activate
+ * according to the BIOS definitions.
+ * For LARI cmd version 11 - bits 0:4 are supported.
+ * For LARI cmd version 12 - bits 0:6 are supported and bits 7:31 are
+ * reserved. No need to mask out the reserved bits.
+ * @force_disable_channels_bitmap: Bitmap of disabled bands/channels.
+ * Each bit represents a set of channels in a specific band that should be
+ * disabled
+ * @edt_bitmap: Bitmap of energy detection threshold table.
+ * Disable/enable the EDT optimization method for different band.
+ * @oem_320mhz_allow_bitmap: 320Mhz bandwidth enablement bitmap per MCC.
+ * bit0: enable 320Mhz in Japan.
+ * bit1: enable 320Mhz in South Korea.
+ * bit 2 - 31: reserved.
+ * @oem_11be_allow_bitmap: Bitmap of 11be allowed MCCs. No need to mask out the
+ * unsupported bits
+ * bit0: enable 11be in China(CB/CN).
+ * bit1: enable 11be in South Korea.
+ * bit 2 - 31: reserved.
+ */
+struct iwl_lari_config_change_cmd {
+ __le32 config_bitmap;
+ __le32 oem_uhb_allow_bitmap;
+ __le32 oem_11ax_allow_bitmap;
+ __le32 oem_unii4_allow_bitmap;
+ __le32 chan_state_active_bitmap;
+ __le32 force_disable_channels_bitmap;
+ __le32 edt_bitmap;
+ __le32 oem_320mhz_allow_bitmap;
+ __le32 oem_11be_allow_bitmap;
+} __packed;
+/* LARI_CHANGE_CONF_CMD_S_VER_11 */
+/* LARI_CHANGE_CONF_CMD_S_VER_12 */
+
/* Activate UNII-1 (5.2GHz) for World Wide */
-#define ACTIVATE_5G2_IN_WW_MASK BIT(4)
+#define ACTIVATE_5G2_IN_WW_MASK BIT(4)
+#define CHAN_STATE_ACTIVE_BITMAP_CMD_V11 0x1F
/**
* struct iwl_pnvm_init_complete_ntfy - PNVM initialization complete
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/offload.h b/drivers/net/wireless/intel/iwlwifi/fw/api/offload.h
index 2ed7acc09e5a..6a7bbfd6b2b7 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/offload.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/offload.h
@@ -60,7 +60,7 @@ struct iwl_stored_beacon_notif_common {
} __packed;
/**
- * struct iwl_stored_beacon_notif - Stored beacon notification
+ * struct iwl_stored_beacon_notif_v2 - Stored beacon notification
*
* @common: fields common for all versions
* @data: beacon data, length in @byte_count
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h b/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h
index 08a2c416ce60..4d8a12799c4d 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018, 2020-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018, 2020-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -113,7 +113,7 @@ struct iwl_phy_context_cmd_tail {
} __packed;
/**
- * struct iwl_phy_context_cmd - config of the PHY context
+ * struct iwl_phy_context_cmd_v1 - config of the PHY context
* ( PHY_CONTEXT_CMD = 0x8 )
* @id_and_color: ID and color of the relevant Binding
* @action: action to perform, see &enum iwl_ctxt_action
@@ -144,6 +144,7 @@ struct iwl_phy_context_cmd_v1 {
* @rxchain_info: ???
* @sbb_bandwidth: 0 disabled, 1 - 40Mhz ... 4 - 320MHz
* @sbb_ctrl_channel_loc: location of the control channel
+ * @puncture_mask: bitmap of punctured subchannels
* @dsp_cfg_flags: set to 0
* @reserved: reserved to align to 64 bit
*/
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/phy.h b/drivers/net/wireless/intel/iwlwifi/fw/api/phy.h
index 92e4b62c119f..c73d4d597857 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/phy.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/phy.h
@@ -195,7 +195,7 @@ struct ct_kill_notif {
} __packed; /* CT_KILL_NOTIFICATION_API_S_VER_1, CT_KILL_NOTIFICATION_API_S_VER_2 */
/**
-* enum ctdp_cmd_operation - CTDP command operations
+* enum iwl_mvm_ctdp_cmd_operation - CTDP command operations
* @CTDP_CMD_OPERATION_START: update the current budget
* @CTDP_CMD_OPERATION_STOP: stop ctdp
* @CTDP_CMD_OPERATION_REPORT: get the average budget
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
index 532d5cfa9162..6e6a92d173cc 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
@@ -462,7 +462,7 @@ struct iwl_per_chain_offset {
} __packed; /* PER_CHAIN_LIMIT_OFFSET_PER_CHAIN_S_VER_1 */
/**
- * struct iwl_geo_tx_power_profile_cmd_v1 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd.
+ * struct iwl_geo_tx_power_profiles_cmd_v1 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd.
* @ops: operations, value from &enum iwl_geo_per_chain_offset_operation
* @table: offset profile per band.
*/
@@ -472,7 +472,7 @@ struct iwl_geo_tx_power_profiles_cmd_v1 {
} __packed; /* PER_CHAIN_LIMIT_OFFSET_CMD_VER_1 */
/**
- * struct iwl_geo_tx_power_profile_cmd_v2 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd.
+ * struct iwl_geo_tx_power_profiles_cmd_v2 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd.
* @ops: operations, value from &enum iwl_geo_per_chain_offset_operation
* @table: offset profile per band.
* @table_revision: 0 for not-South Korea, 1 for South Korea (the name is misleading)
@@ -484,7 +484,7 @@ struct iwl_geo_tx_power_profiles_cmd_v2 {
} __packed; /* PER_CHAIN_LIMIT_OFFSET_CMD_VER_2 */
/**
- * struct iwl_geo_tx_power_profile_cmd_v3 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd.
+ * struct iwl_geo_tx_power_profiles_cmd_v3 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd.
* @ops: operations, value from &enum iwl_geo_per_chain_offset_operation
* @table: offset profile per band.
* @table_revision: 0 for not-South Korea, 1 for South Korea (the name is misleading)
@@ -496,7 +496,7 @@ struct iwl_geo_tx_power_profiles_cmd_v3 {
} __packed; /* PER_CHAIN_LIMIT_OFFSET_CMD_VER_3 */
/**
- * struct iwl_geo_tx_power_profile_cmd_v4 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd.
+ * struct iwl_geo_tx_power_profiles_cmd_v4 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd.
* @ops: operations, value from &enum iwl_geo_per_chain_offset_operation
* @table: offset profile per band.
* @table_revision: 0 for not-South Korea, 1 for South Korea (the name is misleading)
@@ -508,7 +508,7 @@ struct iwl_geo_tx_power_profiles_cmd_v4 {
} __packed; /* PER_CHAIN_LIMIT_OFFSET_CMD_VER_4 */
/**
- * struct iwl_geo_tx_power_profile_cmd_v5 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd.
+ * struct iwl_geo_tx_power_profiles_cmd_v5 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd.
* @ops: operations, value from &enum iwl_geo_per_chain_offset_operation
* @table: offset profile per band.
* @table_revision: 0 for not-South Korea, 1 for South Korea (the name is misleading)
@@ -569,9 +569,12 @@ enum iwl_ppag_flags {
* @v2: version 2
* version 3, 4, 5 and 6 are the same structure as v2,
* but has a different format of the flags bitmap
- * @flags: values from &enum iwl_ppag_flags
- * @gain: table of antenna gain values per chain and sub-band
- * @reserved: reserved
+ * @v1.flags: values from &enum iwl_ppag_flags
+ * @v1.gain: table of antenna gain values per chain and sub-band
+ * @v1.reserved: reserved
+ * @v2.flags: values from &enum iwl_ppag_flags
+ * @v2.gain: table of antenna gain values per chain and sub-band
+ * @v2.reserved: reserved
*/
union iwl_ppag_table_cmd {
struct {
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
index a1a272433b09..1a60f0cdf972 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2022 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2022, 2024 Intel Corporation
* Copyright (C) 2017 Intel Deutschland GmbH
*/
#ifndef __iwl_fw_api_rs_h__
@@ -9,7 +9,7 @@
#include "mac.h"
/**
- * enum iwl_tlc_mng_cfg_flags_enum - options for TLC config flags
+ * enum iwl_tlc_mng_cfg_flags - options for TLC config flags
* @IWL_TLC_MNG_CFG_FLAGS_STBC_MSK: enable STBC. For HE this enables STBC for
* bandwidths <= 80MHz
* @IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK: enable LDPC
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
index e71f29d0c694..691c879cb90d 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
@@ -710,7 +710,15 @@ struct iwl_rx_mpdu_desc {
__le32 reorder_data;
union {
+ /**
+ * @v1: version 1 of the remaining RX descriptor,
+ * see &struct iwl_rx_mpdu_desc_v1
+ */
struct iwl_rx_mpdu_desc_v1 v1;
+ /**
+ * @v3: version 3 of the remaining RX descriptor,
+ * see &struct iwl_rx_mpdu_desc_v3
+ */
struct iwl_rx_mpdu_desc_v3 v3;
};
} __packed; /* RX_MPDU_RES_START_API_S_VER_3,
@@ -976,7 +984,7 @@ struct iwl_ba_window_status_notif {
} __packed; /* BA_WINDOW_STATUS_NTFY_API_S_VER_1 */
/**
- * struct iwl_rfh_queue_config - RX queue configuration
+ * struct iwl_rfh_queue_data - RX queue configuration
* @q_num: Q num
* @enable: enable queue
* @reserved: alignment
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
index 6684506f4fc4..8598031567bb 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
@@ -149,7 +149,7 @@ struct iwl_scan_offload_profile_cfg_data {
} __packed;
/**
- * struct iwl_scan_offload_profile_cfg
+ * struct iwl_scan_offload_profile_cfg_v1 - scan offload profile config
* @profiles: profiles to search for match
* @data: the rest of the data for profile_cfg
*/
@@ -423,7 +423,7 @@ struct iwl_lmac_scan_complete_notif {
} __packed;
/**
- * struct iwl_scan_offload_complete - PERIODIC_SCAN_COMPLETE_NTF_API_S_VER_2
+ * struct iwl_periodic_scan_complete - PERIODIC_SCAN_COMPLETE_NTF_API_S_VER_2
* @last_schedule_line: last schedule line executed (fast or regular)
* @last_schedule_iteration: last scan iteration executed before scan abort
* @status: &enum iwl_scan_offload_complete_status
@@ -443,10 +443,10 @@ struct iwl_periodic_scan_complete {
/* UMAC Scan API */
/* The maximum of either of these cannot exceed 8, because we use an
- * 8-bit mask (see IWL_MVM_SCAN_MASK in mvm.h).
+ * 8-bit mask (see enum iwl_scan_status).
*/
-#define IWL_MVM_MAX_UMAC_SCANS 4
-#define IWL_MVM_MAX_LMAC_SCANS 1
+#define IWL_MAX_UMAC_SCANS 4
+#define IWL_MAX_LMAC_SCANS 1
enum scan_config_flags {
SCAN_CONFIG_FLAG_ACTIVATE = BIT(0),
@@ -789,7 +789,7 @@ struct iwl_scan_req_umac_tail_v1 {
} __packed;
/**
- * struct iwl_scan_req_umac_tail - the rest of the UMAC scan request command
+ * struct iwl_scan_req_umac_tail_v2 - the rest of the UMAC scan request command
* parameters following channels configuration array.
* @schedule: two scheduling plans.
* @delay: delay in TUs before starting the first scan iteration
@@ -1085,7 +1085,7 @@ struct iwl_scan_req_params_v12 {
} __packed; /* SCAN_REQUEST_PARAMS_API_S_VER_12 */
/**
- * struct iwl_scan_req_params_v16
+ * struct iwl_scan_req_params_v17 - scan request parameters (v17)
* @general_params: &struct iwl_scan_general_params_v11
* @channel_params: &struct iwl_scan_channel_params_v7
* @periodic_params: &struct iwl_scan_periodic_parms_v1
@@ -1111,7 +1111,7 @@ struct iwl_scan_req_umac_v12 {
} __packed; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_12 */
/**
- * struct iwl_scan_req_umac_v16
+ * struct iwl_scan_req_umac_v17 - scan request command (v17)
* @uid: scan id, &enum iwl_umac_scan_uid_offsets
* @ooc_priority: out of channel priority - &enum iwl_scan_priority
* @scan_params: scan parameters
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h b/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h
index 2e15be71c957..f4b827b58bd3 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2020, 2022-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2020, 2022-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -340,11 +340,13 @@ struct iwl_hs20_roc_res {
* @ROC_ACTIVITY_HOTSPOT: ROC for hs20 activity
* @ROC_ACTIVITY_P2P_DISC: ROC for p2p discoverability activity
* @ROC_ACTIVITY_P2P_TXRX: ROC for p2p action frames activity
+ * @ROC_ACTIVITY_P2P_NEG: ROC for p2p negotiation (used also for TX)
*/
enum iwl_roc_activity {
ROC_ACTIVITY_HOTSPOT,
ROC_ACTIVITY_P2P_DISC,
ROC_ACTIVITY_P2P_TXRX,
+ ROC_ACTIVITY_P2P_NEG,
ROC_NUM_ACTIVITIES
}; /* ROC_ACTIVITY_API_E_VER_1 */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
index bbd176d88820..c5277e2f8cd4 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
@@ -698,6 +698,7 @@ enum iwl_mvm_ba_resp_flags {
* @query_frame_cnt: SCD query frame count
* @txed: number of frames sent in the aggregation (all-TIDs)
* @done: number of frames that were Acked by the BA (all-TIDs)
+ * @rts_retry_cnt: RTS retry count
* @reserved: reserved (for alignment)
* @wireless_time: Wireless-media time
* @tx_rate: the rate the aggregation was sent at
@@ -718,7 +719,8 @@ struct iwl_mvm_compressed_ba_notif {
__le16 query_frame_cnt;
__le16 txed;
__le16 done;
- __le16 reserved;
+ u8 rts_retry_cnt;
+ u8 reserved;
__le32 wireless_time;
__le32 tx_rate;
__le16 tfd_cnt;
@@ -864,7 +866,7 @@ enum iwl_dump_control {
};
/**
- * struct iwl_tx_path_flush_cmd -- queue/FIFO flush command
+ * struct iwl_tx_path_flush_cmd_v1 -- queue/FIFO flush command
* @queues_ctl: bitmap of queues to flush
* @flush_ctl: control flags
* @reserved: reserved
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
index 945ffc083d25..fa57df336785 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
@@ -1168,17 +1168,13 @@ static int iwl_dump_ini_config_iter(struct iwl_fw_runtime *fwrt,
le32_to_cpu(reg->dev_addr.offset);
int i;
- /* we shouldn't get here if the trans doesn't have read_config32 */
- if (WARN_ON_ONCE(!trans->ops->read_config32))
- return -EOPNOTSUPP;
-
range->internal_base_addr = cpu_to_le32(addr);
range->range_data_size = reg->dev_addr.size;
for (i = 0; i < le32_to_cpu(reg->dev_addr.size); i += 4) {
int ret;
u32 tmp;
- ret = trans->ops->read_config32(trans, addr + i, &tmp);
+ ret = iwl_trans_read_config32(trans, addr + i, &tmp);
if (ret < 0)
return ret;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
index 751a125a1566..893b21fcaf87 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
@@ -230,8 +230,7 @@ static ssize_t iwl_dbgfs_send_hcmd_write(struct iwl_fw_runtime *fwrt, char *buf,
.data = { NULL, },
};
- if (fwrt->ops && fwrt->ops->fw_running &&
- !fwrt->ops->fw_running(fwrt->ops_ctx))
+ if (!iwl_trans_fw_running(fwrt->trans))
return -EIO;
if (count < header_size + 1 || count > 1024 * 4)
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
index 5c76e3b94968..e63b08b7d336 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
@@ -248,7 +248,7 @@ struct iwl_fw_error_dump_mem {
#define IWL_INI_DUMP_NAME_TYPE (BIT(31) | BIT(24))
/**
- * struct iwl_fw_error_dump_data - data for one type
+ * struct iwl_fw_ini_error_dump_data - data for one type
* @type: &enum iwl_fw_ini_region_type
* @sub_type: sub type id
* @sub_type_ver: sub type version
@@ -278,7 +278,7 @@ struct iwl_fw_ini_dump_entry {
} __packed;
/**
- * struct iwl_fw_error_dump_file - header of dump file
+ * struct iwl_fw_ini_dump_file_hdr - header of dump file
* @barker: must be %IWL_FW_INI_ERROR_DUMP_BARKER
* @file_len: the length of all the file including the header
*/
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/init.c b/drivers/net/wireless/intel/iwlwifi/fw/init.c
index 135bd48bfe9f..d8b083be5b6b 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/init.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/init.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2019-2021 Intel Corporation
+ * Copyright (C) 2019-2021, 2024 Intel Corporation
*/
#include "iwl-drv.h"
#include "runtime.h"
@@ -135,7 +135,9 @@ int iwl_configure_rxq(struct iwl_fw_runtime *fwrt)
struct iwl_trans_rxq_dma_data data;
cmd->data[i].q_num = i + 1;
- iwl_trans_get_rxq_dma_data(fwrt->trans, i + 1, &data);
+ ret = iwl_trans_get_rxq_dma_data(fwrt->trans, i + 1, &data);
+ if (ret)
+ goto out;
cmd->data[i].fr_bd_cb = cpu_to_le64(data.fr_bd_cb);
cmd->data[i].urbd_stts_wrptr =
@@ -149,6 +151,7 @@ int iwl_configure_rxq(struct iwl_fw_runtime *fwrt)
ret = iwl_trans_send_cmd(fwrt->trans, &hcmd);
+out:
kfree(cmd);
if (ret)
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c
index b9bb3636e88f..560a91998cc4 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c
@@ -497,9 +497,13 @@ static size_t iwl_get_lari_config_cmd_size(u8 cmd_ver)
size_t cmd_size;
switch (cmd_ver) {
- case 10:
+ case 12:
+ case 11:
cmd_size = sizeof(struct iwl_lari_config_change_cmd);
break;
+ case 10:
+ cmd_size = sizeof(struct iwl_lari_config_change_cmd_v10);
+ break;
case 9:
case 8:
case 7:
@@ -560,6 +564,9 @@ int iwl_fill_lari_config(struct iwl_fw_runtime *fwrt,
if (!ret) {
if (cmd_ver < 8)
value &= ~ACTIVATE_5G2_IN_WW_MASK;
+ if (cmd_ver < 12)
+ value &= CHAN_STATE_ACTIVE_BITMAP_CMD_V11;
+
cmd->chan_state_active_bitmap = cpu_to_le32(value);
}
@@ -580,6 +587,10 @@ int iwl_fill_lari_config(struct iwl_fw_runtime *fwrt,
if (!ret)
cmd->oem_320mhz_allow_bitmap = cpu_to_le32(value);
+ ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ENABLE_11BE, &value);
+ if (!ret)
+ cmd->oem_11be_allow_bitmap = cpu_to_le32(value);
+
if (cmd->config_bitmap ||
cmd->oem_uhb_allow_bitmap ||
cmd->oem_11ax_allow_bitmap ||
@@ -587,7 +598,8 @@ int iwl_fill_lari_config(struct iwl_fw_runtime *fwrt,
cmd->chan_state_active_bitmap ||
cmd->force_disable_channels_bitmap ||
cmd->edt_bitmap ||
- cmd->oem_320mhz_allow_bitmap) {
+ cmd->oem_320mhz_allow_bitmap ||
+ cmd->oem_11be_allow_bitmap) {
IWL_DEBUG_RADIO(fwrt,
"sending LARI_CONFIG_CHANGE, config_bitmap=0x%x, oem_11ax_allow_bitmap=0x%x\n",
le32_to_cpu(cmd->config_bitmap),
@@ -605,6 +617,9 @@ int iwl_fill_lari_config(struct iwl_fw_runtime *fwrt,
"sending LARI_CONFIG_CHANGE, edt_bitmap=0x%x, oem_320mhz_allow_bitmap=0x%x\n",
le32_to_cpu(cmd->edt_bitmap),
le32_to_cpu(cmd->oem_320mhz_allow_bitmap));
+ IWL_DEBUG_RADIO(fwrt,
+ "sending LARI_CONFIG_CHANGE, oem_11be_allow_bitmap=0x%x\n",
+ le32_to_cpu(cmd->oem_11be_allow_bitmap));
} else {
return 1;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h
index 633c9ad9af84..e2c056f483c1 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h
@@ -115,7 +115,8 @@ enum iwl_dsm_funcs {
DSM_FUNC_FORCE_DISABLE_CHANNELS = 9,
DSM_FUNC_ENERGY_DETECTION_THRESHOLD = 10,
DSM_FUNC_RFI_CONFIG = 11,
- DSM_FUNC_NUM_FUNCS = 12,
+ DSM_FUNC_ENABLE_11BE = 12,
+ DSM_FUNC_NUM_FUNCS = 13,
};
enum iwl_dsm_values_srd {
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
index 9122f9a1260a..048877fa7c71 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
@@ -12,14 +12,13 @@
#include "fw/api/debug.h"
#include "fw/api/paging.h"
#include "fw/api/power.h"
-#include "iwl-eeprom-parse.h"
+#include "iwl-nvm-utils.h"
#include "fw/acpi.h"
#include "fw/regulatory.h"
struct iwl_fw_runtime_ops {
void (*dump_start)(void *ctx);
void (*dump_end)(void *ctx);
- bool (*fw_running)(void *ctx);
int (*send_hcmd)(void *ctx, struct iwl_host_cmd *host_cmd);
bool (*d3_debug_enable)(void *ctx);
};
@@ -104,7 +103,6 @@ struct iwl_txf_iter_data {
* @cur_fw_img: current firmware image, must be maintained by
* the driver by calling &iwl_fw_set_current_image()
* @dump: debug dump data
- * @uats_enabled: VLP or AFC AP is enabled
* @uats_table: AP type table
* @uefi_tables_lock_status: The status of the WIFI GUID UEFI variables lock:
* 0: Unlocked, 1 and 2: Locked.
@@ -184,7 +182,6 @@ struct iwl_fw_runtime {
bool sgom_enabled;
struct iwl_mcc_allowed_ap_type_cmd uats_table;
u8 uefi_tables_lock_status;
- bool uats_enabled;
};
void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans,
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index 732889f96ca2..b2abd4fd1944 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -241,7 +241,7 @@ enum iwl_cfg_trans_ltr_delay {
};
/**
- * struct iwl_cfg_trans - information needed to start the trans
+ * struct iwl_cfg_trans_params - information needed to start the trans
*
* These values are specific to the device ID and do not change when
* multiple configs are used for a single device ID. They values are
@@ -258,6 +258,7 @@ enum iwl_cfg_trans_ltr_delay {
* @mq_rx_supported: multi-queue rx support
* @integrated: discrete or integrated
* @low_latency_xtal: use the low latency xtal if supported
+ * @bisr_workaround: BISR hardware workaround (for 22260 series devices)
* @ltr_delay: LTR delay parameter, &enum iwl_cfg_trans_ltr_delay.
* @imr_enabled: use the IMR if supported.
*/
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
index 4511d7fb2279..98563757ce2c 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2016 Intel Deutschland GmbH
*/
@@ -304,9 +304,7 @@
#define CSR_HW_RFID_IS_CDB(_val) (((_val) & 0x10000000) >> 28)
#define CSR_HW_RFID_IS_JACKET(_val) (((_val) & 0x20000000) >> 29)
-/**
- * hw_rev values
- */
+/* hw_rev values */
enum {
SILICON_A_STEP = 0,
SILICON_B_STEP,
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
index 561d0c261123..08d990ba8a79 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
@@ -223,12 +223,6 @@ static int iwl_dbg_tlv_alloc_region(struct iwl_trans *trans,
return -EINVAL;
}
- if (type == IWL_FW_INI_REGION_PCI_IOSF_CONFIG &&
- !trans->ops->read_config32) {
- IWL_ERR(trans, "WRT: Unsupported region type %u\n", type);
- return -EOPNOTSUPP;
- }
-
if (type == IWL_FW_INI_REGION_INTERNAL_BUFFER) {
trans->dbg.imr_data.sram_addr =
le32_to_cpu(reg->internal_buffer.base_addr);
@@ -1246,12 +1240,6 @@ iwl_dbg_tlv_tp_trigger(struct iwl_fw_runtime *fwrt, bool sync,
}
fwrt->trans->dbg.restart_required = false;
- IWL_DEBUG_FW(fwrt, "WRT: tp %d, reset_fw %d\n",
- tp, dump_data.trig->reset_fw);
- IWL_DEBUG_FW(fwrt,
- "WRT: restart_required %d, last_tp_resetfw %d\n",
- fwrt->trans->dbg.restart_required,
- fwrt->trans->dbg.last_tp_resetfw);
if (fwrt->trans->trans_cfg->device_family ==
IWL_DEVICE_FAMILY_9000) {
@@ -1261,22 +1249,17 @@ iwl_dbg_tlv_tp_trigger(struct iwl_fw_runtime *fwrt, bool sync,
IWL_FW_INI_RESET_FW_MODE_STOP_FW_ONLY) {
fwrt->trans->dbg.restart_required = false;
fwrt->trans->dbg.last_tp_resetfw = 0xFF;
- IWL_DEBUG_FW(fwrt, "WRT: FW_ASSERT due to reset_fw_mode-no restart\n");
} else if (le32_to_cpu(dump_data.trig->reset_fw) ==
IWL_FW_INI_RESET_FW_MODE_STOP_AND_RELOAD_FW) {
- IWL_DEBUG_FW(fwrt, "WRT: stop and reload firmware\n");
fwrt->trans->dbg.restart_required = true;
} else if (le32_to_cpu(dump_data.trig->reset_fw) ==
IWL_FW_INI_RESET_FW_MODE_STOP_FW_ONLY) {
- IWL_DEBUG_FW(fwrt,
- "WRT: stop only and no reload firmware\n");
fwrt->trans->dbg.restart_required = false;
fwrt->trans->dbg.last_tp_resetfw =
le32_to_cpu(dump_data.trig->reset_fw);
} else if (le32_to_cpu(dump_data.trig->reset_fw) ==
IWL_FW_INI_RESET_FW_MODE_NOTHING) {
- IWL_DEBUG_FW(fwrt,
- "WRT: nothing need to be done after debug collection\n");
+ /* nothing */
} else {
IWL_ERR(fwrt, "WRT: wrong resetfw %d\n",
le32_to_cpu(dump_data.trig->reset_fw));
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h
index 2c280a2fe3df..0d4a0896a2c5 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h
@@ -3,7 +3,7 @@
*
* Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2015 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019, 2023 Intel Corporation
+ * Copyright(c) 2018 - 2019, 2023-2024 Intel Corporation
*****************************************************************************/
#if !defined(__IWLWIFI_DEVICE_TRACE_DATA) || defined(TRACE_HEADER_MULTI_READ)
@@ -28,7 +28,7 @@ TRACE_EVENT(iwlwifi_dev_tx_tb,
TP_fast_assign(
DEV_ASSIGN;
__entry->phys = phys;
- if (iwl_trace_data(skb))
+ if (__get_dynamic_array_len(data))
memcpy(__get_dynamic_array(data), data_src, data_len);
),
TP_printk("[%s] TX frame data", __get_str(dev))
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h
index e656bf6bc003..ead72c3d33bd 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h
@@ -4,7 +4,7 @@
* Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018, 2023 Intel Corporation
+ * Copyright(c) 2018, 2023-2024 Intel Corporation
*****************************************************************************/
#if !defined(__IWLWIFI_DEVICE_TRACE_IWLWIFI) || defined(TRACE_HEADER_MULTI_READ)
@@ -88,8 +88,8 @@ TRACE_EVENT(iwlwifi_dev_tx,
* for the possible padding).
*/
__dynamic_array(u8, buf0, buf0_len)
- __dynamic_array(u8, buf1, hdr_len > 0 && iwl_trace_data(skb) ?
- 0 : skb->len - hdr_len)
+ __dynamic_array(u8, buf1, hdr_len > 0 && !iwl_trace_data(skb) ?
+ skb->len - hdr_len : 0)
),
TP_fast_assign(
DEV_ASSIGN;
@@ -99,7 +99,7 @@ TRACE_EVENT(iwlwifi_dev_tx,
__entry->framelen += skb->len - hdr_len;
memcpy(__get_dynamic_array(tfd), tfd, tfdlen);
memcpy(__get_dynamic_array(buf0), buf0, buf0_len);
- if (hdr_len > 0 && !iwl_trace_data(skb))
+ if (__get_dynamic_array_len(buf1))
skb_copy_bits(skb, hdr_len,
__get_dynamic_array(buf1),
skb->len - hdr_len);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index 33654f228ee8..aaaabd67f959 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -982,16 +982,10 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
minor = le32_to_cpup(ptr++);
local_comp = le32_to_cpup(ptr);
- if (major >= 35)
- snprintf(drv->fw.fw_version,
- sizeof(drv->fw.fw_version),
- "%u.%08x.%u %s", major, minor,
- local_comp, iwl_reduced_fw_name(drv));
- else
- snprintf(drv->fw.fw_version,
- sizeof(drv->fw.fw_version),
- "%u.%u.%u %s", major, minor,
- local_comp, iwl_reduced_fw_name(drv));
+ snprintf(drv->fw.fw_version,
+ sizeof(drv->fw.fw_version),
+ "%u.%08x.%u %s", major, minor,
+ local_comp, iwl_reduced_fw_name(drv));
break;
}
case IWL_UCODE_TLV_FW_DBG_DEST: {
@@ -1815,8 +1809,8 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans)
err_fw:
#ifdef CONFIG_IWLWIFI_DEBUGFS
debugfs_remove_recursive(drv->dbgfs_drv);
- iwl_dbg_tlv_free(drv->trans);
#endif
+ iwl_dbg_tlv_free(drv->trans);
kfree(drv);
err:
return ERR_PTR(ret);
@@ -1842,7 +1836,7 @@ void iwl_drv_stop(struct iwl_drv *drv)
mutex_unlock(&iwlwifi_opmode_table_mtx);
#ifdef CONFIG_IWLWIFI_DEBUGFS
- drv->trans->ops->debugfs_cleanup(drv->trans);
+ iwl_trans_debugfs_cleanup(drv->trans);
debugfs_remove_recursive(drv->dbgfs_drv);
#endif
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c
deleted file mode 100644
index 5f386bb1a353..000000000000
--- a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c
+++ /dev/null
@@ -1,394 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
-/*
- * Copyright (C) 2005-2014, 2018-2019, 2021 Intel Corporation
- */
-#include <linux/types.h>
-#include <linux/slab.h>
-#include <linux/export.h>
-
-#include "iwl-drv.h"
-#include "iwl-debug.h"
-#include "iwl-eeprom-read.h"
-#include "iwl-io.h"
-#include "iwl-prph.h"
-#include "iwl-csr.h"
-
-/*
- * EEPROM access time values:
- *
- * Driver initiates EEPROM read by writing byte address << 1 to CSR_EEPROM_REG.
- * Driver then polls CSR_EEPROM_REG for CSR_EEPROM_REG_READ_VALID_MSK (0x1).
- * When polling, wait 10 uSec between polling loops, up to a maximum 5000 uSec.
- * Driver reads 16-bit value from bits 31-16 of CSR_EEPROM_REG.
- */
-#define IWL_EEPROM_ACCESS_TIMEOUT 5000 /* uSec */
-
-/*
- * The device's EEPROM semaphore prevents conflicts between driver and uCode
- * when accessing the EEPROM; each access is a series of pulses to/from the
- * EEPROM chip, not a single event, so even reads could conflict if they
- * weren't arbitrated by the semaphore.
- */
-#define IWL_EEPROM_SEM_TIMEOUT 10 /* microseconds */
-#define IWL_EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
-
-
-static int iwl_eeprom_acquire_semaphore(struct iwl_trans *trans)
-{
- u16 count;
- int ret;
-
- for (count = 0; count < IWL_EEPROM_SEM_RETRY_LIMIT; count++) {
- /* Request semaphore */
- iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
-
- /* See if we got it */
- ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
- CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
- IWL_EEPROM_SEM_TIMEOUT);
- if (ret >= 0) {
- IWL_DEBUG_EEPROM(trans->dev,
- "Acquired semaphore after %d tries.\n",
- count+1);
- return ret;
- }
- }
-
- return ret;
-}
-
-static void iwl_eeprom_release_semaphore(struct iwl_trans *trans)
-{
- iwl_clear_bit(trans, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
-}
-
-static int iwl_eeprom_verify_signature(struct iwl_trans *trans, bool nvm_is_otp)
-{
- u32 gp = iwl_read32(trans, CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK;
-
- IWL_DEBUG_EEPROM(trans->dev, "EEPROM signature=0x%08x\n", gp);
-
- switch (gp) {
- case CSR_EEPROM_GP_BAD_SIG_EEP_GOOD_SIG_OTP:
- if (!nvm_is_otp) {
- IWL_ERR(trans, "EEPROM with bad signature: 0x%08x\n",
- gp);
- return -ENOENT;
- }
- return 0;
- case CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K:
- case CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K:
- if (nvm_is_otp) {
- IWL_ERR(trans, "OTP with bad signature: 0x%08x\n", gp);
- return -ENOENT;
- }
- return 0;
- case CSR_EEPROM_GP_BAD_SIGNATURE_BOTH_EEP_AND_OTP:
- default:
- IWL_ERR(trans,
- "bad EEPROM/OTP signature, type=%s, EEPROM_GP=0x%08x\n",
- nvm_is_otp ? "OTP" : "EEPROM", gp);
- return -ENOENT;
- }
-}
-
-/******************************************************************************
- *
- * OTP related functions
- *
-******************************************************************************/
-
-static void iwl_set_otp_access_absolute(struct iwl_trans *trans)
-{
- iwl_read32(trans, CSR_OTP_GP_REG);
-
- iwl_clear_bit(trans, CSR_OTP_GP_REG,
- CSR_OTP_GP_REG_OTP_ACCESS_MODE);
-}
-
-static int iwl_nvm_is_otp(struct iwl_trans *trans)
-{
- u32 otpgp;
-
- /* OTP only valid for CP/PP and after */
- switch (trans->hw_rev & CSR_HW_REV_TYPE_MSK) {
- case CSR_HW_REV_TYPE_NONE:
- IWL_ERR(trans, "Unknown hardware type\n");
- return -EIO;
- case CSR_HW_REV_TYPE_5300:
- case CSR_HW_REV_TYPE_5350:
- case CSR_HW_REV_TYPE_5100:
- case CSR_HW_REV_TYPE_5150:
- return 0;
- default:
- otpgp = iwl_read32(trans, CSR_OTP_GP_REG);
- if (otpgp & CSR_OTP_GP_REG_DEVICE_SELECT)
- return 1;
- return 0;
- }
-}
-
-static int iwl_init_otp_access(struct iwl_trans *trans)
-{
- int ret;
-
- ret = iwl_finish_nic_init(trans);
- if (ret)
- return ret;
-
- iwl_set_bits_prph(trans, APMG_PS_CTRL_REG,
- APMG_PS_CTRL_VAL_RESET_REQ);
- udelay(5);
- iwl_clear_bits_prph(trans, APMG_PS_CTRL_REG,
- APMG_PS_CTRL_VAL_RESET_REQ);
-
- /*
- * CSR auto clock gate disable bit -
- * this is only applicable for HW with OTP shadow RAM
- */
- if (trans->trans_cfg->base_params->shadow_ram_support)
- iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
- CSR_RESET_LINK_PWR_MGMT_DISABLED);
-
- return 0;
-}
-
-static int iwl_read_otp_word(struct iwl_trans *trans, u16 addr,
- __le16 *eeprom_data)
-{
- int ret = 0;
- u32 r;
- u32 otpgp;
-
- iwl_write32(trans, CSR_EEPROM_REG,
- CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
- ret = iwl_poll_bit(trans, CSR_EEPROM_REG,
- CSR_EEPROM_REG_READ_VALID_MSK,
- CSR_EEPROM_REG_READ_VALID_MSK,
- IWL_EEPROM_ACCESS_TIMEOUT);
- if (ret < 0) {
- IWL_ERR(trans, "Time out reading OTP[%d]\n", addr);
- return ret;
- }
- r = iwl_read32(trans, CSR_EEPROM_REG);
- /* check for ECC errors: */
- otpgp = iwl_read32(trans, CSR_OTP_GP_REG);
- if (otpgp & CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK) {
- /* stop in this case */
- /* set the uncorrectable OTP ECC bit for acknowledgment */
- iwl_set_bit(trans, CSR_OTP_GP_REG,
- CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK);
- IWL_ERR(trans, "Uncorrectable OTP ECC error, abort OTP read\n");
- return -EINVAL;
- }
- if (otpgp & CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK) {
- /* continue in this case */
- /* set the correctable OTP ECC bit for acknowledgment */
- iwl_set_bit(trans, CSR_OTP_GP_REG,
- CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK);
- IWL_ERR(trans, "Correctable OTP ECC error, continue read\n");
- }
- *eeprom_data = cpu_to_le16(r >> 16);
- return 0;
-}
-
-/*
- * iwl_is_otp_empty: check for empty OTP
- */
-static bool iwl_is_otp_empty(struct iwl_trans *trans)
-{
- u16 next_link_addr = 0;
- __le16 link_value;
- bool is_empty = false;
-
- /* locate the beginning of OTP link list */
- if (!iwl_read_otp_word(trans, next_link_addr, &link_value)) {
- if (!link_value) {
- IWL_ERR(trans, "OTP is empty\n");
- is_empty = true;
- }
- } else {
- IWL_ERR(trans, "Unable to read first block of OTP list.\n");
- is_empty = true;
- }
-
- return is_empty;
-}
-
-
-/*
- * iwl_find_otp_image: find EEPROM image in OTP
- * finding the OTP block that contains the EEPROM image.
- * the last valid block on the link list (the block _before_ the last block)
- * is the block we should read and used to configure the device.
- * If all the available OTP blocks are full, the last block will be the block
- * we should read and used to configure the device.
- * only perform this operation if shadow RAM is disabled
- */
-static int iwl_find_otp_image(struct iwl_trans *trans,
- u16 *validblockaddr)
-{
- u16 next_link_addr = 0, valid_addr;
- __le16 link_value = 0;
- int usedblocks = 0;
-
- /* set addressing mode to absolute to traverse the link list */
- iwl_set_otp_access_absolute(trans);
-
- /* checking for empty OTP or error */
- if (iwl_is_otp_empty(trans))
- return -EINVAL;
-
- /*
- * start traverse link list
- * until reach the max number of OTP blocks
- * different devices have different number of OTP blocks
- */
- do {
- /* save current valid block address
- * check for more block on the link list
- */
- valid_addr = next_link_addr;
- next_link_addr = le16_to_cpu(link_value) * sizeof(u16);
- IWL_DEBUG_EEPROM(trans->dev, "OTP blocks %d addr 0x%x\n",
- usedblocks, next_link_addr);
- if (iwl_read_otp_word(trans, next_link_addr, &link_value))
- return -EINVAL;
- if (!link_value) {
- /*
- * reach the end of link list, return success and
- * set address point to the starting address
- * of the image
- */
- *validblockaddr = valid_addr;
- /* skip first 2 bytes (link list pointer) */
- *validblockaddr += 2;
- return 0;
- }
- /* more in the link list, continue */
- usedblocks++;
- } while (usedblocks <= trans->trans_cfg->base_params->max_ll_items);
-
- /* OTP has no valid blocks */
- IWL_DEBUG_EEPROM(trans->dev, "OTP has no valid blocks\n");
- return -EINVAL;
-}
-
-/*
- * iwl_read_eeprom - read EEPROM contents
- *
- * Load the EEPROM contents from adapter and return it
- * and its size.
- *
- * NOTE: This routine uses the non-debug IO access functions.
- */
-int iwl_read_eeprom(struct iwl_trans *trans, u8 **eeprom, size_t *eeprom_size)
-{
- __le16 *e;
- u32 gp = iwl_read32(trans, CSR_EEPROM_GP);
- int sz;
- int ret;
- u16 addr;
- u16 validblockaddr = 0;
- u16 cache_addr = 0;
- int nvm_is_otp;
-
- if (!eeprom || !eeprom_size)
- return -EINVAL;
-
- nvm_is_otp = iwl_nvm_is_otp(trans);
- if (nvm_is_otp < 0)
- return nvm_is_otp;
-
- sz = trans->trans_cfg->base_params->eeprom_size;
- IWL_DEBUG_EEPROM(trans->dev, "NVM size = %d\n", sz);
-
- e = kmalloc(sz, GFP_KERNEL);
- if (!e)
- return -ENOMEM;
-
- ret = iwl_eeprom_verify_signature(trans, nvm_is_otp);
- if (ret < 0) {
- IWL_ERR(trans, "EEPROM not found, EEPROM_GP=0x%08x\n", gp);
- goto err_free;
- }
-
- /* Make sure driver (instead of uCode) is allowed to read EEPROM */
- ret = iwl_eeprom_acquire_semaphore(trans);
- if (ret < 0) {
- IWL_ERR(trans, "Failed to acquire EEPROM semaphore.\n");
- goto err_free;
- }
-
- if (nvm_is_otp) {
- ret = iwl_init_otp_access(trans);
- if (ret) {
- IWL_ERR(trans, "Failed to initialize OTP access.\n");
- goto err_unlock;
- }
-
- iwl_write32(trans, CSR_EEPROM_GP,
- iwl_read32(trans, CSR_EEPROM_GP) &
- ~CSR_EEPROM_GP_IF_OWNER_MSK);
-
- iwl_set_bit(trans, CSR_OTP_GP_REG,
- CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK |
- CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK);
- /* traversing the linked list if no shadow ram supported */
- if (!trans->trans_cfg->base_params->shadow_ram_support) {
- ret = iwl_find_otp_image(trans, &validblockaddr);
- if (ret)
- goto err_unlock;
- }
- for (addr = validblockaddr; addr < validblockaddr + sz;
- addr += sizeof(u16)) {
- __le16 eeprom_data;
-
- ret = iwl_read_otp_word(trans, addr, &eeprom_data);
- if (ret)
- goto err_unlock;
- e[cache_addr / 2] = eeprom_data;
- cache_addr += sizeof(u16);
- }
- } else {
- /* eeprom is an array of 16bit values */
- for (addr = 0; addr < sz; addr += sizeof(u16)) {
- u32 r;
-
- iwl_write32(trans, CSR_EEPROM_REG,
- CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
-
- ret = iwl_poll_bit(trans, CSR_EEPROM_REG,
- CSR_EEPROM_REG_READ_VALID_MSK,
- CSR_EEPROM_REG_READ_VALID_MSK,
- IWL_EEPROM_ACCESS_TIMEOUT);
- if (ret < 0) {
- IWL_ERR(trans,
- "Time out reading EEPROM[%d]\n", addr);
- goto err_unlock;
- }
- r = iwl_read32(trans, CSR_EEPROM_REG);
- e[addr / 2] = cpu_to_le16(r >> 16);
- }
- }
-
- IWL_DEBUG_EEPROM(trans->dev, "NVM Type: %s\n",
- nvm_is_otp ? "OTP" : "EEPROM");
-
- iwl_eeprom_release_semaphore(trans);
-
- *eeprom_size = sz;
- *eeprom = (u8 *)e;
- return 0;
-
- err_unlock:
- iwl_eeprom_release_semaphore(trans);
- err_free:
- kfree(e);
-
- return ret;
-}
-IWL_EXPORT_SYMBOL(iwl_read_eeprom);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.h b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.h
deleted file mode 100644
index 63b8e6c6659b..000000000000
--- a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/*
- * Copyright (C) 2005-2014 Intel Corporation
- */
-#ifndef __iwl_eeprom_h__
-#define __iwl_eeprom_h__
-
-#include "iwl-trans.h"
-
-int iwl_read_eeprom(struct iwl_trans *trans, u8 **eeprom, size_t *eeprom_size);
-
-#endif /* __iwl_eeprom_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
index 6ba374efaacb..5c8f1868db64 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
@@ -15,7 +15,7 @@
/* Flow Handler Definitions */
/****************************/
-/**
+/*
* This I/O area is directly read/writable by driver (e.g. Linux uses writel())
* Addresses are offsets from device's PCI hardware base address.
*/
@@ -24,7 +24,7 @@
#define FH_MEM_LOWER_BOUND_GEN2 (0xa06000)
#define FH_MEM_UPPER_BOUND_GEN2 (0xa08000)
-/**
+/*
* Keep-Warm (KW) buffer base address.
*
* Driver must allocate a 4KByte buffer that is for keeping the
@@ -44,7 +44,7 @@
#define FH_KW_MEM_ADDR_REG (FH_MEM_LOWER_BOUND + 0x97C)
-/**
+/*
* TFD Circular Buffers Base (CBBC) addresses
*
* Device has 16 base pointer registers, one for each of 16 host-DRAM-resident
@@ -143,7 +143,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
*/
#define TFH_SRV_DMA_CHNL0_BC (0x1F70)
-/**
+/*
* Rx SRAM Control and Status Registers (RSCSR)
*
* These registers provide handshake between driver and device for the Rx queue
@@ -216,21 +216,21 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
#define FH_MEM_RSCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xC00)
#define FH_MEM_RSCSR_CHNL0 (FH_MEM_RSCSR_LOWER_BOUND)
-/**
+/*
* Physical base address of 8-byte Rx Status buffer.
* Bit fields:
* 31-0: Rx status buffer physical base address [35:4], must 16-byte aligned.
*/
#define FH_RSCSR_CHNL0_STTS_WPTR_REG (FH_MEM_RSCSR_CHNL0)
-/**
+/*
* Physical base address of Rx Buffer Descriptor Circular Buffer.
* Bit fields:
* 27-0: RBD CD physical base address [35:8], must be 256-byte aligned.
*/
#define FH_RSCSR_CHNL0_RBDCB_BASE_REG (FH_MEM_RSCSR_CHNL0 + 0x004)
-/**
+/*
* Rx write pointer (index, really!).
* Bit fields:
* 11-0: Index of driver's most recent prepared-to-be-filled RBD, + 1.
@@ -242,7 +242,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
#define FW_RSCSR_CHNL0_RXDCB_RDPTR_REG (FH_MEM_RSCSR_CHNL0 + 0x00c)
#define FH_RSCSR_CHNL0_RDPTR FW_RSCSR_CHNL0_RXDCB_RDPTR_REG
-/**
+/*
* Rx Config/Status Registers (RCSR)
* Rx Config Reg for channel 0 (only channel used)
*
@@ -300,7 +300,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_NO_INT_VAL (0x00000000)
#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL (0x00001000)
-/**
+/*
* Rx Shared Status Registers (RSSR)
*
* After stopping Rx DMA channel (writing 0 to
@@ -356,7 +356,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
#define RFH_RBDBUF_RBD0_LSB 0xA08300
#define RFH_RBDBUF_RBD_LSB(q) (RFH_RBDBUF_RBD0_LSB + (q) * 8)
-/**
+/*
* RFH Status Register
*
* Bit fields:
@@ -440,7 +440,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
#define FH_TFDIB_CTRL0_REG(_chnl) (FH_TFDIB_LOWER_BOUND + 0x8 * (_chnl))
#define FH_TFDIB_CTRL1_REG(_chnl) (FH_TFDIB_LOWER_BOUND + 0x8 * (_chnl) + 0x4)
-/**
+/*
* Transmit DMA Channel Control/Status Registers (TCSR)
*
* Device has one configuration register for each of 8 Tx DMA/FIFO channels
@@ -501,7 +501,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
#define FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM (20)
#define FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX (12)
-/**
+/*
* Tx Shared Status Registers (TSSR)
*
* After stopping Tx DMA channel (writing 0 to
@@ -518,7 +518,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
#define FH_TSSR_TX_STATUS_REG (FH_TSSR_LOWER_BOUND + 0x010)
-/**
+/*
* Bit fields for TSSR(Tx Shared Status & Control) error status register:
* 31: Indicates an address error when accessed to internal memory
* uCode/driver must write "1" in order to clear this flag
@@ -634,7 +634,7 @@ enum iwl_tfd_tb_hi_n_len {
};
/**
- * struct iwl_tfd_tb transmit buffer descriptor within transmit frame descriptor
+ * struct iwl_tfd_tb - transmit buffer descriptor within transmit frame descriptor
*
* This structure contains dma address and length of transmission address
*
@@ -648,7 +648,7 @@ struct iwl_tfd_tb {
} __packed;
/**
- * struct iwl_tfh_tb transmit buffer descriptor within transmit frame descriptor
+ * struct iwl_tfh_tb - transmit buffer descriptor within transmit frame descriptor
*
* This structure contains dma address and length of transmission address
*
@@ -717,7 +717,7 @@ struct iwl_tfh_tfd {
/* Fixed (non-configurable) rx data from phy */
/**
- * struct iwlagn_schedq_bc_tbl scheduler byte count table
+ * struct iwlagn_scd_bc_tbl - scheduler byte count table
* base physical address provided by SCD_DRAM_BASE_ADDR
* For devices up to 22000:
* @tfd_offset:
@@ -734,7 +734,7 @@ struct iwlagn_scd_bc_tbl {
} __packed;
/**
- * struct iwl_gen3_bc_tbl_entry scheduler byte count table entry gen3
+ * struct iwl_gen3_bc_tbl_entry - scheduler byte count table entry gen3
* For AX210 and on:
* @tfd_offset: 0-12 - tx command byte count
* 12-13 - number of 64 byte chunks
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-io.c b/drivers/net/wireless/intel/iwlwifi/iwl-io.c
index c60f9466c5fd..060becfd64f3 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-io.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-io.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2003-2014, 2018-2022 Intel Corporation
+ * Copyright (C) 2003-2014, 2018-2022, 2024 Intel Corporation
* Copyright (C) 2015-2016 Intel Deutschland GmbH
*/
#include <linux/delay.h>
@@ -460,7 +460,7 @@ int iwl_finish_nic_init(struct iwl_trans *trans)
*/
if (cfg_trans->device_family >= IWL_DEVICE_FAMILY_BZ) {
iwl_set_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
+ CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ |
CSR_GP_CNTRL_REG_FLAG_MAC_INIT);
poll_ready = CSR_GP_CNTRL_REG_FLAG_MAC_STATUS;
} else {
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
index 1cf26ab4f488..21eabfc3ffc8 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2018-2022 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2022, 2024 Intel Corporation
*/
#ifndef __iwl_modparams_h__
#define __iwl_modparams_h__
@@ -106,4 +106,23 @@ static inline bool iwl_enable_tx_ampdu(void)
return true;
}
+/* Verify amsdu_size module parameter and convert it to a rxb size */
+static inline enum iwl_amsdu_size
+iwl_amsdu_size_to_rxb_size(void)
+{
+ switch (iwlwifi_mod_params.amsdu_size) {
+ case IWL_AMSDU_8K:
+ return IWL_AMSDU_8K;
+ case IWL_AMSDU_12K:
+ return IWL_AMSDU_12K;
+ default:
+ pr_err("%s: Unsupported amsdu_size: %d\n", KBUILD_MODNAME,
+ iwlwifi_mod_params.amsdu_size);
+ fallthrough;
+ case IWL_AMSDU_DEF:
+ case IWL_AMSDU_4K:
+ return IWL_AMSDU_4K;
+ }
+}
+
#endif /* #__iwl_modparams_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
index 149903f52567..d902121da009 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
@@ -38,16 +38,13 @@ enum nvm_offsets {
N_HW_ADDRS = 3,
NVM_CHANNELS = 0x1E0 - NVM_SW_SECTION,
- /* NVM calibration section offset (in words) definitions */
- NVM_CALIB_SECTION = 0x2B8,
- XTAL_CALIB = 0x316 - NVM_CALIB_SECTION,
-
/* NVM REGULATORY -Section offset (in words) definitions */
NVM_CHANNELS_SDP = 0,
};
enum ext_nvm_offsets {
/* NVM HW-Section offset (in words) definitions */
+
MAC_ADDRESS_OVERRIDE_EXT_NVM = 1,
/* NVM SW-Section offset (in words) definitions */
@@ -373,7 +370,9 @@ static u32 iwl_get_channel_flags(u8 ch_num, int ch_idx, enum nl80211_band band,
flags |= IEEE80211_CHAN_IR_CONCURRENT;
/* Set the AP type for the UHB case. */
- if (!(nvm_flags & NVM_CHANNEL_VLP))
+ if (nvm_flags & NVM_CHANNEL_VLP)
+ flags |= IEEE80211_CHAN_ALLOW_6GHZ_VLP_AP;
+ else
flags |= IEEE80211_CHAN_NO_6GHZ_VLP_CLIENT;
if (!(nvm_flags & NVM_CHANNEL_AFC))
flags |= IEEE80211_CHAN_NO_6GHZ_AFC_CLIENT;
@@ -1574,9 +1573,6 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
&regulatory[NVM_CHANNELS_SDP] :
&nvm_sw[NVM_CHANNELS];
- /* in family 8000 Xtal calibration values moved to OTP */
- data->xtal_calib[0] = *(nvm_calib + XTAL_CALIB);
- data->xtal_calib[1] = *(nvm_calib + XTAL_CALIB + 1);
lar_enabled = true;
} else {
u16 lar_offset = data->nvm_version < 0xE39 ?
@@ -1614,8 +1610,7 @@ IWL_EXPORT_SYMBOL(iwl_parse_nvm_data);
static u32 iwl_nvm_get_regdom_bw_flags(const u16 *nvm_chan,
int ch_idx, u16 nvm_flags,
struct iwl_reg_capa reg_capa,
- const struct iwl_cfg *cfg,
- bool uats_enabled)
+ const struct iwl_cfg *cfg)
{
u32 flags = NL80211_RRF_NO_HT40;
@@ -1625,11 +1620,15 @@ static u32 iwl_nvm_get_regdom_bw_flags(const u16 *nvm_chan,
flags &= ~NL80211_RRF_NO_HT40PLUS;
if (nvm_chan[ch_idx] >= FIRST_2GHZ_HT_MINUS)
flags &= ~NL80211_RRF_NO_HT40MINUS;
- } else if (nvm_flags & NVM_CHANNEL_40MHZ) {
+ } else if (ch_idx < NUM_2GHZ_CHANNELS + NUM_5GHZ_CHANNELS &&
+ nvm_flags & NVM_CHANNEL_40MHZ) {
if ((ch_idx - NUM_2GHZ_CHANNELS) % 2 == 0)
flags &= ~NL80211_RRF_NO_HT40PLUS;
else
flags &= ~NL80211_RRF_NO_HT40MINUS;
+ } else if (nvm_flags & NVM_CHANNEL_40MHZ) {
+ flags &= ~NL80211_RRF_NO_HT40PLUS;
+ flags &= ~NL80211_RRF_NO_HT40MINUS;
}
if (!(nvm_flags & NVM_CHANNEL_80MHZ))
@@ -1662,13 +1661,13 @@ static u32 iwl_nvm_get_regdom_bw_flags(const u16 *nvm_chan,
}
/* Set the AP type for the UHB case. */
- if (uats_enabled) {
- if (!(nvm_flags & NVM_CHANNEL_VLP))
- flags |= NL80211_RRF_NO_6GHZ_VLP_CLIENT;
+ if (nvm_flags & NVM_CHANNEL_VLP)
+ flags |= NL80211_RRF_ALLOW_6GHZ_VLP_AP;
+ else
+ flags |= NL80211_RRF_NO_6GHZ_VLP_CLIENT;
- if (!(nvm_flags & NVM_CHANNEL_AFC))
- flags |= NL80211_RRF_NO_6GHZ_AFC_CLIENT;
- }
+ if (!(nvm_flags & NVM_CHANNEL_AFC))
+ flags |= NL80211_RRF_NO_6GHZ_AFC_CLIENT;
/*
* reg_capa is per regulatory domain so apply it for every channel
@@ -1724,7 +1723,7 @@ static struct iwl_reg_capa iwl_get_reg_capa(u32 flags, u8 resp_ver)
struct ieee80211_regdomain *
iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
int num_of_ch, __le32 *channels, u16 fw_mcc,
- u16 geo_info, u32 cap, u8 resp_ver, bool uats_enabled)
+ u16 geo_info, u32 cap, u8 resp_ver)
{
int ch_idx;
u16 ch_flags;
@@ -1732,7 +1731,6 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
const u16 *nvm_chan;
struct ieee80211_regdomain *regd, *copy_rd;
struct ieee80211_reg_rule *rule;
- enum nl80211_band band;
int center_freq, prev_center_freq = 0;
int valid_rules = 0;
bool new_rule;
@@ -1776,8 +1774,10 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
reg_capa = iwl_get_reg_capa(cap, resp_ver);
for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) {
+ enum nl80211_band band =
+ iwl_nl80211_band_from_channel_idx(ch_idx);
+
ch_flags = (u16)__le32_to_cpup(channels + ch_idx);
- band = iwl_nl80211_band_from_channel_idx(ch_idx);
center_freq = ieee80211_channel_to_frequency(nvm_chan[ch_idx],
band);
new_rule = false;
@@ -1790,7 +1790,7 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
reg_rule_flags = iwl_nvm_get_regdom_bw_flags(nvm_chan, ch_idx,
ch_flags, reg_capa,
- cfg, uats_enabled);
+ cfg);
/* we can't continue the same rule */
if (ch_idx == 0 || prev_reg_rule_flags != reg_rule_flags ||
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
index fd9c3bed9407..0c6c3fb8c6dd 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
@@ -1,13 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2015, 2018-2023 Intel Corporation
+ * Copyright (C) 2005-2015, 2018-2024 Intel Corporation
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
#ifndef __iwl_nvm_parse_h__
#define __iwl_nvm_parse_h__
#include <net/cfg80211.h>
-#include "iwl-eeprom-parse.h"
+#include "iwl-nvm-utils.h"
#include "mei/iwl-mei.h"
/**
@@ -38,7 +38,7 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
u8 tx_chains, u8 rx_chains);
/**
- * iwl_parse_mcc_info - parse MCC (mobile country code) info coming from FW
+ * iwl_parse_nvm_mcc_info - parse MCC (mobile country code) info coming from FW
*
* This function parses the regulatory channel data received as a
* MCC_UPDATE_CMD command. It returns a newly allocation regulatory domain,
@@ -50,7 +50,7 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
struct ieee80211_regdomain *
iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
int num_of_ch, __le32 *channels, u16 fw_mcc,
- u16 geo_info, u32 cap, u8 resp_ver, bool uats_enabled);
+ u16 geo_info, u32 cap, u8 resp_ver);
/**
* struct iwl_nvm_section - describes an NVM section in memory.
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-utils.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-utils.c
new file mode 100644
index 000000000000..b3c25acd3691
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-utils.c
@@ -0,0 +1,118 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2005-2014, 2018-2021, 2023 Intel Corporation
+ * Copyright (C) 2015 Intel Mobile Communications GmbH
+ */
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+#include "iwl-drv.h"
+#include "iwl-modparams.h"
+#include "iwl-nvm-utils.h"
+
+int iwl_init_sband_channels(struct iwl_nvm_data *data,
+ struct ieee80211_supported_band *sband,
+ int n_channels, enum nl80211_band band)
+{
+ struct ieee80211_channel *chan = &data->channels[0];
+ int n = 0, idx = 0;
+
+ while (idx < n_channels && chan->band != band)
+ chan = &data->channels[++idx];
+
+ sband->channels = &data->channels[idx];
+
+ while (idx < n_channels && chan->band == band) {
+ chan = &data->channels[++idx];
+ n++;
+ }
+
+ sband->n_channels = n;
+
+ return n;
+}
+IWL_EXPORT_SYMBOL(iwl_init_sband_channels);
+
+#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
+#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
+
+void iwl_init_ht_hw_capab(struct iwl_trans *trans,
+ struct iwl_nvm_data *data,
+ struct ieee80211_sta_ht_cap *ht_info,
+ enum nl80211_band band,
+ u8 tx_chains, u8 rx_chains)
+{
+ const struct iwl_cfg *cfg = trans->cfg;
+ int max_bit_rate = 0;
+
+ tx_chains = hweight8(tx_chains);
+ if (cfg->rx_with_siso_diversity)
+ rx_chains = 1;
+ else
+ rx_chains = hweight8(rx_chains);
+
+ if (!(data->sku_cap_11n_enable) ||
+ (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_ALL) ||
+ !cfg->ht_params) {
+ ht_info->ht_supported = false;
+ return;
+ }
+
+ if (data->sku_cap_mimo_disabled)
+ rx_chains = 1;
+
+ ht_info->ht_supported = true;
+ ht_info->cap = IEEE80211_HT_CAP_DSSSCCK40;
+
+ if (cfg->ht_params->stbc) {
+ ht_info->cap |= (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
+
+ if (tx_chains > 1)
+ ht_info->cap |= IEEE80211_HT_CAP_TX_STBC;
+ }
+
+ if (cfg->ht_params->ldpc)
+ ht_info->cap |= IEEE80211_HT_CAP_LDPC_CODING;
+
+ if (trans->trans_cfg->mq_rx_supported ||
+ iwlwifi_mod_params.amsdu_size >= IWL_AMSDU_8K)
+ ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
+
+ ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
+ ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_4;
+
+ ht_info->mcs.rx_mask[0] = 0xFF;
+ ht_info->mcs.rx_mask[1] = 0x00;
+ ht_info->mcs.rx_mask[2] = 0x00;
+
+ if (rx_chains >= 2)
+ ht_info->mcs.rx_mask[1] = 0xFF;
+ if (rx_chains >= 3)
+ ht_info->mcs.rx_mask[2] = 0xFF;
+
+ if (cfg->ht_params->ht_greenfield_support)
+ ht_info->cap |= IEEE80211_HT_CAP_GRN_FLD;
+ ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
+
+ max_bit_rate = MAX_BIT_RATE_20_MHZ;
+
+ if (cfg->ht_params->ht40_bands & BIT(band)) {
+ ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+ ht_info->cap |= IEEE80211_HT_CAP_SGI_40;
+ max_bit_rate = MAX_BIT_RATE_40_MHZ;
+ }
+
+ /* Highest supported Rx data rate */
+ max_bit_rate *= rx_chains;
+ WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK);
+ ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate);
+
+ /* Tx MCS capabilities */
+ ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
+ if (tx_chains != rx_chains) {
+ ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
+ ht_info->mcs.tx_params |= ((tx_chains - 1) <<
+ IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
+ }
+}
+IWL_EXPORT_SYMBOL(iwl_init_ht_hw_capab);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-utils.h
index 34a178a2eb5d..ac0a29a1c31f 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-utils.h
@@ -58,23 +58,6 @@ struct iwl_nvm_data {
struct ieee80211_channel channels[];
};
-/**
- * iwl_parse_eeprom_data - parse EEPROM data and return values
- *
- * @trans: ransport we're parsing for, for debug only
- * @cfg: device configuration for parsing and overrides
- * @eeprom: the EEPROM data
- * @eeprom_size: length of the EEPROM data
- *
- * This function parses all EEPROM values we need and then
- * returns a (newly allocated) struct containing all the
- * relevant values for driver use. The struct must be freed
- * later with iwl_free_nvm_data().
- */
-struct iwl_nvm_data *
-iwl_parse_eeprom_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
- const u8 *eeprom, size_t eeprom_size);
-
int iwl_init_sband_channels(struct iwl_nvm_data *data,
struct ieee80211_supported_band *sband,
int n_channels, enum nl80211_band band);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h b/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h
index 1ca82f3e4ebf..595fa6ddf084 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2021, 2024 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015 Intel Deutschland GmbH
*/
@@ -185,7 +185,8 @@ static inline void iwl_op_mode_cmd_queue_full(struct iwl_op_mode *op_mode)
static inline void iwl_op_mode_nic_config(struct iwl_op_mode *op_mode)
{
might_sleep();
- op_mode->ops->nic_config(op_mode);
+ if (op_mode->ops->nic_config)
+ op_mode->ops->nic_config(op_mode);
}
static inline void iwl_op_mode_wimax_active(struct iwl_op_mode *op_mode)
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
index 898e22e0d1ab..dc171c29eb7b 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
@@ -96,7 +96,7 @@
#define DTSC_PTAT_AVG (0x00a10650)
-/**
+/*
* Tx Scheduler
*
* The Tx Scheduler selects the next frame to be transmitted, choosing TFDs
@@ -169,7 +169,7 @@
*/
#define SCD_MEM_LOWER_BOUND (0x0000)
-/**
+/*
* Max Tx window size is the max number of contiguous TFDs that the scheduler
* can keep track of at one time when creating block-ack chains of frames.
* Note that "64" matches the number of ack bits in a block-ack packet.
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
index f95098c21c7d..3c9d91496c82 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
- * Copyright (C) 2019-2021, 2023 Intel Corporation
+ * Copyright (C) 2019-2021, 2023-2024 Intel Corporation
*/
#include <linux/kernel.h>
#include <linux/bsearch.h>
@@ -11,13 +11,13 @@
#include "iwl-trans.h"
#include "iwl-drv.h"
#include "iwl-fh.h"
-#include "queue/tx.h"
#include <linux/dmapool.h>
#include "fw/api/commands.h"
+#include "pcie/internal.h"
+#include "iwl-context-info-gen3.h"
struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
struct device *dev,
- const struct iwl_trans_ops *ops,
const struct iwl_cfg_trans_params *cfg_trans)
{
struct iwl_trans *trans;
@@ -37,22 +37,8 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
#endif
trans->dev = dev;
- trans->ops = ops;
trans->num_rx_queues = 1;
- WARN_ON(!ops->wait_txq_empty && !ops->wait_tx_queues_empty);
-
- if (trans->trans_cfg->gen2) {
- trans->txqs.tfd.addr_size = 64;
- trans->txqs.tfd.max_tbs = IWL_TFH_NUM_TBS;
- trans->txqs.tfd.size = sizeof(struct iwl_tfh_tfd);
- } else {
- trans->txqs.tfd.addr_size = 36;
- trans->txqs.tfd.max_tbs = IWL_NUM_OF_TBS;
- trans->txqs.tfd.size = sizeof(struct iwl_tfd);
- }
- trans->max_skb_frags = IWL_TRANS_MAX_FRAGS(trans);
-
return trans;
}
@@ -78,31 +64,6 @@ int iwl_trans_init(struct iwl_trans *trans)
if (WARN_ON(trans->trans_cfg->gen2 && txcmd_size >= txcmd_align))
return -EINVAL;
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
- trans->txqs.bc_tbl_size =
- sizeof(struct iwl_gen3_bc_tbl_entry) * TFD_QUEUE_BC_SIZE_GEN3_BZ;
- else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
- trans->txqs.bc_tbl_size =
- sizeof(struct iwl_gen3_bc_tbl_entry) * TFD_QUEUE_BC_SIZE_GEN3_AX210;
- else
- trans->txqs.bc_tbl_size = sizeof(struct iwlagn_scd_bc_tbl);
- /*
- * For gen2 devices, we use a single allocation for each byte-count
- * table, but they're pretty small (1k) so use a DMA pool that we
- * allocate here.
- */
- if (trans->trans_cfg->gen2) {
- trans->txqs.bc_pool = dmam_pool_create("iwlwifi:bc", trans->dev,
- trans->txqs.bc_tbl_size,
- 256, 0);
- if (!trans->txqs.bc_pool)
- return -ENOMEM;
- }
-
- /* Some things must not change even if the config does */
- WARN_ON(trans->txqs.tfd.addr_size !=
- (trans->trans_cfg->gen2 ? 64 : 36));
-
snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name),
"iwl_cmd_pool:%s", dev_name(trans->dev));
trans->dev_cmd_pool =
@@ -112,12 +73,6 @@ int iwl_trans_init(struct iwl_trans *trans)
if (!trans->dev_cmd_pool)
return -ENOMEM;
- trans->txqs.tso_hdr_page = alloc_percpu(struct iwl_tso_hdr_page);
- if (!trans->txqs.tso_hdr_page) {
- kmem_cache_destroy(trans->dev_cmd_pool);
- return -ENOMEM;
- }
-
/* Initialize the wait queue for commands */
init_waitqueue_head(&trans->wait_command_queue);
@@ -126,20 +81,6 @@ int iwl_trans_init(struct iwl_trans *trans)
void iwl_trans_free(struct iwl_trans *trans)
{
- int i;
-
- if (trans->txqs.tso_hdr_page) {
- for_each_possible_cpu(i) {
- struct iwl_tso_hdr_page *p =
- per_cpu_ptr(trans->txqs.tso_hdr_page, i);
-
- if (p && p->page)
- __free_page(p->page);
- }
-
- free_percpu(trans->txqs.tso_hdr_page);
- }
-
kmem_cache_destroy(trans->dev_cmd_pool);
}
@@ -167,10 +108,9 @@ int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status)))
return -EIO;
- if (unlikely(trans->state != IWL_TRANS_FW_ALIVE)) {
- IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
+ if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
+ "bad state = %d\n", trans->state))
return -EIO;
- }
if (!(cmd->flags & CMD_ASYNC))
lock_map_acquire_read(&trans->sync_cmd_lockdep_map);
@@ -180,7 +120,7 @@ int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
cmd->id = DEF_ID(cmd->id);
}
- ret = iwl_trans_txq_send_hcmd(trans, cmd);
+ ret = iwl_trans_pcie_send_hcmd(trans, cmd);
if (!(cmd->flags & CMD_ASYNC))
lock_map_release(&trans->sync_cmd_lockdep_map);
@@ -247,3 +187,379 @@ int iwl_cmd_groups_verify_sorted(const struct iwl_trans_config *trans)
return 0;
}
IWL_EXPORT_SYMBOL(iwl_cmd_groups_verify_sorted);
+
+void iwl_trans_configure(struct iwl_trans *trans,
+ const struct iwl_trans_config *trans_cfg)
+{
+ trans->op_mode = trans_cfg->op_mode;
+
+ iwl_trans_pcie_configure(trans, trans_cfg);
+ WARN_ON(iwl_cmd_groups_verify_sorted(trans_cfg));
+}
+IWL_EXPORT_SYMBOL(iwl_trans_configure);
+
+int iwl_trans_start_hw(struct iwl_trans *trans)
+{
+ might_sleep();
+
+ return iwl_trans_pcie_start_hw(trans);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_start_hw);
+
+void iwl_trans_op_mode_leave(struct iwl_trans *trans)
+{
+ might_sleep();
+
+ iwl_trans_pcie_op_mode_leave(trans);
+
+ trans->op_mode = NULL;
+
+ trans->state = IWL_TRANS_NO_FW;
+}
+IWL_EXPORT_SYMBOL(iwl_trans_op_mode_leave);
+
+void iwl_trans_write8(struct iwl_trans *trans, u32 ofs, u8 val)
+{
+ iwl_trans_pcie_write8(trans, ofs, val);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_write8);
+
+void iwl_trans_write32(struct iwl_trans *trans, u32 ofs, u32 val)
+{
+ iwl_trans_pcie_write32(trans, ofs, val);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_write32);
+
+u32 iwl_trans_read32(struct iwl_trans *trans, u32 ofs)
+{
+ return iwl_trans_pcie_read32(trans, ofs);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_read32);
+
+u32 iwl_trans_read_prph(struct iwl_trans *trans, u32 ofs)
+{
+ return iwl_trans_pcie_read_prph(trans, ofs);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_read_prph);
+
+void iwl_trans_write_prph(struct iwl_trans *trans, u32 ofs, u32 val)
+{
+ return iwl_trans_pcie_write_prph(trans, ofs, val);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_write_prph);
+
+int iwl_trans_read_mem(struct iwl_trans *trans, u32 addr,
+ void *buf, int dwords)
+{
+ return iwl_trans_pcie_read_mem(trans, addr, buf, dwords);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_read_mem);
+
+int iwl_trans_write_mem(struct iwl_trans *trans, u32 addr,
+ const void *buf, int dwords)
+{
+ return iwl_trans_pcie_write_mem(trans, addr, buf, dwords);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_write_mem);
+
+void iwl_trans_set_pmi(struct iwl_trans *trans, bool state)
+{
+ if (state)
+ set_bit(STATUS_TPOWER_PMI, &trans->status);
+ else
+ clear_bit(STATUS_TPOWER_PMI, &trans->status);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_set_pmi);
+
+int iwl_trans_sw_reset(struct iwl_trans *trans, bool retake_ownership)
+{
+ return iwl_trans_pcie_sw_reset(trans, retake_ownership);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_sw_reset);
+
+struct iwl_trans_dump_data *
+iwl_trans_dump_data(struct iwl_trans *trans, u32 dump_mask,
+ const struct iwl_dump_sanitize_ops *sanitize_ops,
+ void *sanitize_ctx)
+{
+ return iwl_trans_pcie_dump_data(trans, dump_mask,
+ sanitize_ops, sanitize_ctx);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_dump_data);
+
+int iwl_trans_d3_suspend(struct iwl_trans *trans, bool test, bool reset)
+{
+ might_sleep();
+
+ return iwl_trans_pcie_d3_suspend(trans, test, reset);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_d3_suspend);
+
+int iwl_trans_d3_resume(struct iwl_trans *trans, enum iwl_d3_status *status,
+ bool test, bool reset)
+{
+ might_sleep();
+
+ return iwl_trans_pcie_d3_resume(trans, status, test, reset);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_d3_resume);
+
+void iwl_trans_interrupts(struct iwl_trans *trans, bool enable)
+{
+ iwl_trans_pci_interrupts(trans, enable);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_interrupts);
+
+void iwl_trans_sync_nmi(struct iwl_trans *trans)
+{
+ iwl_trans_pcie_sync_nmi(trans);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_sync_nmi);
+
+int iwl_trans_write_imr_mem(struct iwl_trans *trans, u32 dst_addr,
+ u64 src_addr, u32 byte_cnt)
+{
+ return iwl_trans_pcie_copy_imr(trans, dst_addr, src_addr, byte_cnt);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_write_imr_mem);
+
+void iwl_trans_set_bits_mask(struct iwl_trans *trans, u32 reg,
+ u32 mask, u32 value)
+{
+ iwl_trans_pcie_set_bits_mask(trans, reg, mask, value);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_set_bits_mask);
+
+int iwl_trans_read_config32(struct iwl_trans *trans, u32 ofs,
+ u32 *val)
+{
+ return iwl_trans_pcie_read_config32(trans, ofs, val);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_read_config32);
+
+bool _iwl_trans_grab_nic_access(struct iwl_trans *trans)
+{
+ return iwl_trans_pcie_grab_nic_access(trans);
+}
+IWL_EXPORT_SYMBOL(_iwl_trans_grab_nic_access);
+
+void __releases(nic_access)
+iwl_trans_release_nic_access(struct iwl_trans *trans)
+{
+ iwl_trans_pcie_release_nic_access(trans);
+ __release(nic_access);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_release_nic_access);
+
+void iwl_trans_fw_alive(struct iwl_trans *trans, u32 scd_addr)
+{
+ might_sleep();
+
+ trans->state = IWL_TRANS_FW_ALIVE;
+
+ if (trans->trans_cfg->gen2)
+ iwl_trans_pcie_gen2_fw_alive(trans);
+ else
+ iwl_trans_pcie_fw_alive(trans, scd_addr);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_fw_alive);
+
+int iwl_trans_start_fw(struct iwl_trans *trans, const struct fw_img *fw,
+ bool run_in_rfkill)
+{
+ int ret;
+
+ might_sleep();
+
+ WARN_ON_ONCE(!trans->rx_mpdu_cmd);
+
+ clear_bit(STATUS_FW_ERROR, &trans->status);
+
+ if (trans->trans_cfg->gen2)
+ ret = iwl_trans_pcie_gen2_start_fw(trans, fw, run_in_rfkill);
+ else
+ ret = iwl_trans_pcie_start_fw(trans, fw, run_in_rfkill);
+
+ if (ret == 0)
+ trans->state = IWL_TRANS_FW_STARTED;
+
+ return ret;
+}
+IWL_EXPORT_SYMBOL(iwl_trans_start_fw);
+
+void iwl_trans_stop_device(struct iwl_trans *trans)
+{
+ might_sleep();
+
+ if (trans->trans_cfg->gen2)
+ iwl_trans_pcie_gen2_stop_device(trans);
+ else
+ iwl_trans_pcie_stop_device(trans);
+
+ trans->state = IWL_TRANS_NO_FW;
+}
+IWL_EXPORT_SYMBOL(iwl_trans_stop_device);
+
+int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
+ struct iwl_device_tx_cmd *dev_cmd, int queue)
+{
+ if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status)))
+ return -EIO;
+
+ if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
+ "bad state = %d\n", trans->state))
+ return -EIO;
+
+ if (trans->trans_cfg->gen2)
+ return iwl_txq_gen2_tx(trans, skb, dev_cmd, queue);
+
+ return iwl_trans_pcie_tx(trans, skb, dev_cmd, queue);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_tx);
+
+void iwl_trans_reclaim(struct iwl_trans *trans, int queue, int ssn,
+ struct sk_buff_head *skbs, bool is_flush)
+{
+ if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
+ "bad state = %d\n", trans->state))
+ return;
+
+ iwl_pcie_reclaim(trans, queue, ssn, skbs, is_flush);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_reclaim);
+
+void iwl_trans_txq_disable(struct iwl_trans *trans, int queue,
+ bool configure_scd)
+{
+ iwl_trans_pcie_txq_disable(trans, queue, configure_scd);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_txq_disable);
+
+bool iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn,
+ const struct iwl_trans_txq_scd_cfg *cfg,
+ unsigned int queue_wdg_timeout)
+{
+ might_sleep();
+
+ if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
+ "bad state = %d\n", trans->state))
+ return false;
+
+ return iwl_trans_pcie_txq_enable(trans, queue, ssn,
+ cfg, queue_wdg_timeout);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_txq_enable_cfg);
+
+int iwl_trans_wait_txq_empty(struct iwl_trans *trans, int queue)
+{
+ if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
+ "bad state = %d\n", trans->state))
+ return -EIO;
+
+ return iwl_trans_pcie_wait_txq_empty(trans, queue);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_wait_txq_empty);
+
+int iwl_trans_wait_tx_queues_empty(struct iwl_trans *trans, u32 txqs)
+{
+ if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
+ "bad state = %d\n", trans->state))
+ return -EIO;
+
+ return iwl_trans_pcie_wait_txqs_empty(trans, txqs);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_wait_tx_queues_empty);
+
+void iwl_trans_freeze_txq_timer(struct iwl_trans *trans,
+ unsigned long txqs, bool freeze)
+{
+ if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
+ "bad state = %d\n", trans->state))
+ return;
+
+ iwl_pcie_freeze_txq_timer(trans, txqs, freeze);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_freeze_txq_timer);
+
+void iwl_trans_txq_set_shared_mode(struct iwl_trans *trans,
+ int txq_id, bool shared_mode)
+{
+ iwl_trans_pcie_txq_set_shared_mode(trans, txq_id, shared_mode);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_txq_set_shared_mode);
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+void iwl_trans_debugfs_cleanup(struct iwl_trans *trans)
+{
+ iwl_trans_pcie_debugfs_cleanup(trans);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_debugfs_cleanup);
+#endif
+
+void iwl_trans_set_q_ptrs(struct iwl_trans *trans, int queue, int ptr)
+{
+ if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
+ "bad state = %d\n", trans->state))
+ return;
+
+ iwl_pcie_set_q_ptrs(trans, queue, ptr);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_set_q_ptrs);
+
+int iwl_trans_txq_alloc(struct iwl_trans *trans, u32 flags, u32 sta_mask,
+ u8 tid, int size, unsigned int wdg_timeout)
+{
+ might_sleep();
+
+ if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
+ "bad state = %d\n", trans->state))
+ return -EIO;
+
+ return iwl_txq_dyn_alloc(trans, flags, sta_mask, tid,
+ size, wdg_timeout);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_txq_alloc);
+
+void iwl_trans_txq_free(struct iwl_trans *trans, int queue)
+{
+ iwl_txq_dyn_free(trans, queue);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_txq_free);
+
+int iwl_trans_get_rxq_dma_data(struct iwl_trans *trans, int queue,
+ struct iwl_trans_rxq_dma_data *data)
+{
+ return iwl_trans_pcie_rxq_dma_data(trans, queue, data);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_get_rxq_dma_data);
+
+int iwl_trans_load_pnvm(struct iwl_trans *trans,
+ const struct iwl_pnvm_image *pnvm_data,
+ const struct iwl_ucode_capabilities *capa)
+{
+ return iwl_trans_pcie_ctx_info_gen3_load_pnvm(trans, pnvm_data, capa);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_load_pnvm);
+
+void iwl_trans_set_pnvm(struct iwl_trans *trans,
+ const struct iwl_ucode_capabilities *capa)
+{
+ iwl_trans_pcie_ctx_info_gen3_set_pnvm(trans, capa);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_set_pnvm);
+
+int iwl_trans_load_reduce_power(struct iwl_trans *trans,
+ const struct iwl_pnvm_image *payloads,
+ const struct iwl_ucode_capabilities *capa)
+{
+ return iwl_trans_pcie_ctx_info_gen3_load_reduce_power(trans, payloads,
+ capa);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_load_reduce_power);
+
+void iwl_trans_set_reduce_power(struct iwl_trans *trans,
+ const struct iwl_ucode_capabilities *capa)
+{
+ iwl_trans_pcie_ctx_info_gen3_set_reduce_power(trans, capa);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_set_reduce_power);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
index b93cef7b2330..6148acbac6af 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
@@ -26,11 +26,9 @@
* DOC: Transport layer - what is it ?
*
* The transport layer is the layer that deals with the HW directly. It provides
- * an abstraction of the underlying HW to the upper layer. The transport layer
- * doesn't provide any policy, algorithm or anything of this kind, but only
- * mechanisms to make the HW do something. It is not completely stateless but
- * close to it.
- * We will have an implementation for each different supported bus.
+ * the PCIe access to the underlying hardwarwe. The transport layer doesn't
+ * provide any policy, algorithm or anything of this kind, but only mechanisms
+ * to make the HW do something. It is not completely stateless but close to it.
*/
/**
@@ -122,6 +120,7 @@ enum CMD_MODE {
CMD_BLOCK_TXQS = BIT(3),
CMD_SEND_IN_D3 = BIT(4),
};
+#define CMD_MODE_BITS 5
#define DEF_CMD_PAYLOAD_SIZE 320
@@ -131,6 +130,11 @@ enum CMD_MODE {
* For allocation of the command and tx queues, this establishes the overall
* size of the largest command we send to uCode, except for commands that
* aren't fully copied and use other TFD space.
+ *
+ * @hdr: command header
+ * @payload: payload for the command
+ * @hdr_wide: wide command header
+ * @payload_wide: payload for the wide command
*/
struct iwl_device_cmd {
union {
@@ -167,12 +171,6 @@ struct iwl_device_tx_cmd {
*/
#define IWL_MAX_CMD_TBS_PER_TFD 2
-/* We need 2 entries for the TX command and header, and another one might
- * be needed for potential data in the SKB's head. The remaining ones can
- * be used for frags.
- */
-#define IWL_TRANS_MAX_FRAGS(trans) ((trans)->txqs.tfd.max_tbs - 3)
-
/**
* enum iwl_hcmd_dataflag - flag for each one of the chunks of the command
*
@@ -281,7 +279,7 @@ static inline void iwl_free_rxb(struct iwl_rx_cmd_buffer *r)
#define IWL_9000_MAX_RX_HW_QUEUES 1
/**
- * enum iwl_wowlan_status - WoWLAN image/device status
+ * enum iwl_d3_status - WoWLAN image/device status
* @IWL_D3_STATUS_ALIVE: firmware is still running after resume
* @IWL_D3_STATUS_RESET: device was reset while suspended
*/
@@ -299,9 +297,6 @@ enum iwl_d3_status {
* @STATUS_RFKILL_HW: the actual HW state of the RF-kill switch
* @STATUS_RFKILL_OPMODE: RF-kill state reported to opmode
* @STATUS_FW_ERROR: the fw is in error state
- * @STATUS_TRANS_GOING_IDLE: shutting down the trans, only special commands
- * are sent
- * @STATUS_TRANS_IDLE: the trans is idle - general commands are not to be sent
* @STATUS_TRANS_DEAD: trans is dead - avoid any read/write operation
* @STATUS_SUPPRESS_CMD_ERROR_ONCE: suppress "FW error in SYNC CMD" once,
* e.g. for testing
@@ -314,8 +309,6 @@ enum iwl_trans_status {
STATUS_RFKILL_HW,
STATUS_RFKILL_OPMODE,
STATUS_FW_ERROR,
- STATUS_TRANS_GOING_IDLE,
- STATUS_TRANS_IDLE,
STATUS_TRANS_DEAD,
STATUS_SUPPRESS_CMD_ERROR_ONCE,
};
@@ -482,183 +475,6 @@ struct iwl_pnvm_image {
};
/**
- * struct iwl_trans_ops - transport specific operations
- *
- * All the handlers MUST be implemented
- *
- * @start_hw: starts the HW. From that point on, the HW can send interrupts.
- * May sleep.
- * @op_mode_leave: Turn off the HW RF kill indication if on
- * May sleep
- * @start_fw: allocates and inits all the resources for the transport
- * layer. Also kick a fw image.
- * May sleep
- * @fw_alive: called when the fw sends alive notification. If the fw provides
- * the SCD base address in SRAM, then provide it here, or 0 otherwise.
- * May sleep
- * @stop_device: stops the whole device (embedded CPU put to reset) and stops
- * the HW. From that point on, the HW will be stopped but will still issue
- * an interrupt if the HW RF kill switch is triggered.
- * This callback must do the right thing and not crash even if %start_hw()
- * was called but not &start_fw(). May sleep.
- * @d3_suspend: put the device into the correct mode for WoWLAN during
- * suspend. This is optional, if not implemented WoWLAN will not be
- * supported. This callback may sleep.
- * @d3_resume: resume the device after WoWLAN, enabling the opmode to
- * talk to the WoWLAN image to get its status. This is optional, if not
- * implemented WoWLAN will not be supported. This callback may sleep.
- * @send_cmd:send a host command. Must return -ERFKILL if RFkill is asserted.
- * If RFkill is asserted in the middle of a SYNC host command, it must
- * return -ERFKILL straight away.
- * May sleep only if CMD_ASYNC is not set
- * @tx: send an skb. The transport relies on the op_mode to zero the
- * the ieee80211_tx_info->driver_data. If the MPDU is an A-MSDU, all
- * the CSUM will be taken care of (TCP CSUM and IP header in case of
- * IPv4). If the MPDU is a single MSDU, the op_mode must compute the IP
- * header if it is IPv4.
- * Must be atomic
- * @reclaim: free packet until ssn. Returns a list of freed packets.
- * Must be atomic
- * @set_q_ptrs: set queue pointers internally, after D3 when HW state changed
- * @txq_enable: setup a queue. To setup an AC queue, use the
- * iwl_trans_ac_txq_enable wrapper. fw_alive must have been called before
- * this one. The op_mode must not configure the HCMD queue. The scheduler
- * configuration may be %NULL, in which case the hardware will not be
- * configured. If true is returned, the operation mode needs to increment
- * the sequence number of the packets routed to this queue because of a
- * hardware scheduler bug. May sleep.
- * @txq_disable: de-configure a Tx queue to send AMPDUs
- * Must be atomic
- * @txq_alloc: Allocate a new TX queue, may sleep.
- * @txq_free: Free a previously allocated TX queue.
- * @txq_set_shared_mode: change Tx queue shared/unshared marking
- * @wait_tx_queues_empty: wait until tx queues are empty. May sleep.
- * @wait_txq_empty: wait until specific tx queue is empty. May sleep.
- * @freeze_txq_timer: prevents the timer of the queue from firing until the
- * queue is set to awake. Must be atomic.
- * @write8: write a u8 to a register at offset ofs from the BAR
- * @write32: write a u32 to a register at offset ofs from the BAR
- * @read32: read a u32 register at offset ofs from the BAR
- * @read_prph: read a DWORD from a periphery register
- * @write_prph: write a DWORD to a periphery register
- * @read_mem: read device's SRAM in DWORD
- * @write_mem: write device's SRAM in DWORD. If %buf is %NULL, then the memory
- * will be zeroed.
- * @read_config32: read a u32 value from the device's config space at
- * the given offset.
- * @configure: configure parameters required by the transport layer from
- * the op_mode. May be called several times before start_fw, can't be
- * called after that.
- * @set_pmi: set the power pmi state
- * @sw_reset: trigger software reset of the NIC
- * @grab_nic_access: wake the NIC to be able to access non-HBUS regs.
- * Sleeping is not allowed between grab_nic_access and
- * release_nic_access.
- * @release_nic_access: let the NIC go to sleep. The "flags" parameter
- * must be the same one that was sent before to the grab_nic_access.
- * @set_bits_mask: set SRAM register according to value and mask.
- * @dump_data: return a vmalloc'ed buffer with debug data, maybe containing last
- * TX'ed commands and similar. The buffer will be vfree'd by the caller.
- * Note that the transport must fill in the proper file headers.
- * @debugfs_cleanup: used in the driver unload flow to make a proper cleanup
- * of the trans debugfs
- * @sync_nmi: trigger a firmware NMI and wait for it to complete
- * @load_pnvm: save the pnvm data in DRAM
- * @set_pnvm: set the pnvm data in the prph scratch buffer, inside the
- * context info.
- * @load_reduce_power: copy reduce power table to the corresponding DRAM memory
- * @set_reduce_power: set reduce power table addresses in the sratch buffer
- * @interrupts: disable/enable interrupts to transport
- * @imr_dma_data: set up IMR DMA
- * @rxq_dma_data: retrieve RX queue DMA data, see @struct iwl_trans_rxq_dma_data
- */
-struct iwl_trans_ops {
-
- int (*start_hw)(struct iwl_trans *iwl_trans);
- void (*op_mode_leave)(struct iwl_trans *iwl_trans);
- int (*start_fw)(struct iwl_trans *trans, const struct fw_img *fw,
- bool run_in_rfkill);
- void (*fw_alive)(struct iwl_trans *trans, u32 scd_addr);
- void (*stop_device)(struct iwl_trans *trans);
-
- int (*d3_suspend)(struct iwl_trans *trans, bool test, bool reset);
- int (*d3_resume)(struct iwl_trans *trans, enum iwl_d3_status *status,
- bool test, bool reset);
-
- int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
-
- int (*tx)(struct iwl_trans *trans, struct sk_buff *skb,
- struct iwl_device_tx_cmd *dev_cmd, int queue);
- void (*reclaim)(struct iwl_trans *trans, int queue, int ssn,
- struct sk_buff_head *skbs, bool is_flush);
-
- void (*set_q_ptrs)(struct iwl_trans *trans, int queue, int ptr);
-
- bool (*txq_enable)(struct iwl_trans *trans, int queue, u16 ssn,
- const struct iwl_trans_txq_scd_cfg *cfg,
- unsigned int queue_wdg_timeout);
- void (*txq_disable)(struct iwl_trans *trans, int queue,
- bool configure_scd);
- /* 22000 functions */
- int (*txq_alloc)(struct iwl_trans *trans, u32 flags,
- u32 sta_mask, u8 tid,
- int size, unsigned int queue_wdg_timeout);
- void (*txq_free)(struct iwl_trans *trans, int queue);
- int (*rxq_dma_data)(struct iwl_trans *trans, int queue,
- struct iwl_trans_rxq_dma_data *data);
-
- void (*txq_set_shared_mode)(struct iwl_trans *trans, u32 txq_id,
- bool shared);
-
- int (*wait_tx_queues_empty)(struct iwl_trans *trans, u32 txq_bm);
- int (*wait_txq_empty)(struct iwl_trans *trans, int queue);
- void (*freeze_txq_timer)(struct iwl_trans *trans, unsigned long txqs,
- bool freeze);
-
- void (*write8)(struct iwl_trans *trans, u32 ofs, u8 val);
- void (*write32)(struct iwl_trans *trans, u32 ofs, u32 val);
- u32 (*read32)(struct iwl_trans *trans, u32 ofs);
- u32 (*read_prph)(struct iwl_trans *trans, u32 ofs);
- void (*write_prph)(struct iwl_trans *trans, u32 ofs, u32 val);
- int (*read_mem)(struct iwl_trans *trans, u32 addr,
- void *buf, int dwords);
- int (*write_mem)(struct iwl_trans *trans, u32 addr,
- const void *buf, int dwords);
- int (*read_config32)(struct iwl_trans *trans, u32 ofs, u32 *val);
- void (*configure)(struct iwl_trans *trans,
- const struct iwl_trans_config *trans_cfg);
- void (*set_pmi)(struct iwl_trans *trans, bool state);
- int (*sw_reset)(struct iwl_trans *trans, bool retake_ownership);
- bool (*grab_nic_access)(struct iwl_trans *trans);
- void (*release_nic_access)(struct iwl_trans *trans);
- void (*set_bits_mask)(struct iwl_trans *trans, u32 reg, u32 mask,
- u32 value);
-
- struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans,
- u32 dump_mask,
- const struct iwl_dump_sanitize_ops *sanitize_ops,
- void *sanitize_ctx);
- void (*debugfs_cleanup)(struct iwl_trans *trans);
- void (*sync_nmi)(struct iwl_trans *trans);
- int (*load_pnvm)(struct iwl_trans *trans,
- const struct iwl_pnvm_image *pnvm_payloads,
- const struct iwl_ucode_capabilities *capa);
- void (*set_pnvm)(struct iwl_trans *trans,
- const struct iwl_ucode_capabilities *capa);
- int (*load_reduce_power)(struct iwl_trans *trans,
- const struct iwl_pnvm_image *payloads,
- const struct iwl_ucode_capabilities *capa);
- void (*set_reduce_power)(struct iwl_trans *trans,
- const struct iwl_ucode_capabilities *capa);
-
- void (*interrupts)(struct iwl_trans *trans, bool enable);
- int (*imr_dma_data)(struct iwl_trans *trans,
- u32 dst_addr, u64 src_addr,
- u32 byte_cnt);
-
-};
-
-/**
* enum iwl_trans_state - state of the transport layer
*
* @IWL_TRANS_NO_FW: firmware wasn't started yet, or crashed
@@ -897,7 +713,9 @@ struct iwl_dma_ptr {
struct iwl_cmd_meta {
/* only for SYNC commands, iff the reply skb is wanted */
struct iwl_host_cmd *source;
- u32 flags;
+ u32 flags: CMD_MODE_BITS;
+ /* sg_offset is valid if it is non-zero */
+ u32 sg_offset: PAGE_SHIFT;
u32 tbs;
};
@@ -934,6 +752,7 @@ struct iwl_pcie_first_tb_buf {
* @first_tb_dma: DMA address for the first_tb_bufs start
* @entries: transmit entries (driver state)
* @lock: queue lock
+ * @reclaim_lock: reclaim lock
* @stuck_timer: timer that fires if queue gets stuck
* @trans: pointer back to transport (for timer)
* @need_update: indicates need to update read/write index
@@ -976,6 +795,8 @@ struct iwl_txq {
struct iwl_pcie_txq_entry *entries;
/* lock for syncing changes on the queue */
spinlock_t lock;
+ /* lock to prevent concurrent reclaim */
+ spinlock_t reclaim_lock;
unsigned long frozen_expiry_remainder;
struct timer_list stuck_timer;
struct iwl_trans *trans;
@@ -999,58 +820,9 @@ struct iwl_txq {
};
/**
- * struct iwl_trans_txqs - transport tx queues data
- *
- * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes)
- * @page_offs: offset from skb->cb to mac header page pointer
- * @dev_cmd_offs: offset from skb->cb to iwl_device_tx_cmd pointer
- * @queue_used: bit mask of used queues
- * @queue_stopped: bit mask of stopped queues
- * @txq: array of TXQ data structures representing the TXQs
- * @scd_bc_tbls: gen1 pointer to the byte count table of the scheduler
- * @queue_alloc_cmd_ver: queue allocation command version
- * @bc_pool: bytecount DMA allocations pool
- * @bc_tbl_size: bytecount table size
- * @tso_hdr_page: page allocated (per CPU) for A-MSDU headers when doing TSO
- * (and similar usage)
- * @tfd: TFD data
- * @tfd.max_tbs: max number of buffers per TFD
- * @tfd.size: TFD size
- * @tfd.addr_size: TFD/TB address size
- */
-struct iwl_trans_txqs {
- unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
- unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
- struct iwl_txq *txq[IWL_MAX_TVQM_QUEUES];
- struct dma_pool *bc_pool;
- size_t bc_tbl_size;
- bool bc_table_dword;
- u8 page_offs;
- u8 dev_cmd_offs;
- struct iwl_tso_hdr_page __percpu *tso_hdr_page;
-
- struct {
- u8 fifo;
- u8 q_id;
- unsigned int wdg_timeout;
- } cmd;
-
- struct {
- u8 max_tbs;
- u16 size;
- u8 addr_size;
- } tfd;
-
- struct iwl_dma_ptr scd_bc_tbls;
-
- u8 queue_alloc_cmd_ver;
-};
-
-/**
* struct iwl_trans - transport common data
*
* @csme_own: true if we couldn't get ownership on the device
- * @ops: pointer to iwl_trans_ops
* @op_mode: pointer to the op_mode
* @trans_cfg: the trans-specific configuration part
* @cfg: pointer to the configuration
@@ -1099,7 +871,6 @@ struct iwl_trans_txqs {
* This mode is set dynamically, depending on the WoWLAN values
* configured from the userspace at runtime.
* @name: the device name
- * @txqs: transport tx queues data.
* @mbx_addr_0_step: step address data 0
* @mbx_addr_1_step: step address data 1
* @pcie_link_speed: current PCIe link speed (%PCI_EXP_LNKSTA_CLS_*),
@@ -1112,7 +883,6 @@ struct iwl_trans_txqs {
*/
struct iwl_trans {
bool csme_own;
- const struct iwl_trans_ops *ops;
struct iwl_op_mode *op_mode;
const struct iwl_cfg_trans_params *trans_cfg;
const struct iwl_cfg *cfg;
@@ -1169,7 +939,6 @@ struct iwl_trans {
enum iwl_plat_pm_mode system_pm_mode;
const char *name;
- struct iwl_trans_txqs txqs;
u32 mbx_addr_0_step;
u32 mbx_addr_1_step;
@@ -1185,101 +954,29 @@ struct iwl_trans {
const char *iwl_get_cmd_string(struct iwl_trans *trans, u32 id);
int iwl_cmd_groups_verify_sorted(const struct iwl_trans_config *trans);
-static inline void iwl_trans_configure(struct iwl_trans *trans,
- const struct iwl_trans_config *trans_cfg)
-{
- trans->op_mode = trans_cfg->op_mode;
+void iwl_trans_configure(struct iwl_trans *trans,
+ const struct iwl_trans_config *trans_cfg);
- trans->ops->configure(trans, trans_cfg);
- WARN_ON(iwl_cmd_groups_verify_sorted(trans_cfg));
-}
+int iwl_trans_start_hw(struct iwl_trans *trans);
-static inline int iwl_trans_start_hw(struct iwl_trans *trans)
-{
- might_sleep();
+void iwl_trans_op_mode_leave(struct iwl_trans *trans);
- return trans->ops->start_hw(trans);
-}
+void iwl_trans_fw_alive(struct iwl_trans *trans, u32 scd_addr);
-static inline void iwl_trans_op_mode_leave(struct iwl_trans *trans)
-{
- might_sleep();
+int iwl_trans_start_fw(struct iwl_trans *trans, const struct fw_img *fw,
+ bool run_in_rfkill);
- if (trans->ops->op_mode_leave)
- trans->ops->op_mode_leave(trans);
+void iwl_trans_stop_device(struct iwl_trans *trans);
- trans->op_mode = NULL;
-
- trans->state = IWL_TRANS_NO_FW;
-}
-
-static inline void iwl_trans_fw_alive(struct iwl_trans *trans, u32 scd_addr)
-{
- might_sleep();
-
- trans->state = IWL_TRANS_FW_ALIVE;
-
- trans->ops->fw_alive(trans, scd_addr);
-}
-
-static inline int iwl_trans_start_fw(struct iwl_trans *trans,
- const struct fw_img *fw,
- bool run_in_rfkill)
-{
- int ret;
+int iwl_trans_d3_suspend(struct iwl_trans *trans, bool test, bool reset);
- might_sleep();
+int iwl_trans_d3_resume(struct iwl_trans *trans, enum iwl_d3_status *status,
+ bool test, bool reset);
- WARN_ON_ONCE(!trans->rx_mpdu_cmd);
-
- clear_bit(STATUS_FW_ERROR, &trans->status);
- ret = trans->ops->start_fw(trans, fw, run_in_rfkill);
- if (ret == 0)
- trans->state = IWL_TRANS_FW_STARTED;
-
- return ret;
-}
-
-static inline void iwl_trans_stop_device(struct iwl_trans *trans)
-{
- might_sleep();
-
- trans->ops->stop_device(trans);
-
- trans->state = IWL_TRANS_NO_FW;
-}
-
-static inline int iwl_trans_d3_suspend(struct iwl_trans *trans, bool test,
- bool reset)
-{
- might_sleep();
- if (!trans->ops->d3_suspend)
- return -EOPNOTSUPP;
-
- return trans->ops->d3_suspend(trans, test, reset);
-}
-
-static inline int iwl_trans_d3_resume(struct iwl_trans *trans,
- enum iwl_d3_status *status,
- bool test, bool reset)
-{
- might_sleep();
- if (!trans->ops->d3_resume)
- return -EOPNOTSUPP;
-
- return trans->ops->d3_resume(trans, status, test, reset);
-}
-
-static inline struct iwl_trans_dump_data *
+struct iwl_trans_dump_data *
iwl_trans_dump_data(struct iwl_trans *trans, u32 dump_mask,
const struct iwl_dump_sanitize_ops *sanitize_ops,
- void *sanitize_ctx)
-{
- if (!trans->ops->dump_data)
- return NULL;
- return trans->ops->dump_data(trans, dump_mask,
- sanitize_ops, sanitize_ctx);
-}
+ void *sanitize_ctx);
static inline struct iwl_device_tx_cmd *
iwl_trans_alloc_tx_cmd(struct iwl_trans *trans)
@@ -1295,109 +992,31 @@ static inline void iwl_trans_free_tx_cmd(struct iwl_trans *trans,
kmem_cache_free(trans->dev_cmd_pool, dev_cmd);
}
-static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
- struct iwl_device_tx_cmd *dev_cmd, int queue)
-{
- if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status)))
- return -EIO;
-
- if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
- IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
- return -EIO;
- }
-
- return trans->ops->tx(trans, skb, dev_cmd, queue);
-}
-
-static inline void iwl_trans_reclaim(struct iwl_trans *trans, int queue,
- int ssn, struct sk_buff_head *skbs,
- bool is_flush)
-{
- if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
- IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
- return;
- }
-
- trans->ops->reclaim(trans, queue, ssn, skbs, is_flush);
-}
-
-static inline void iwl_trans_set_q_ptrs(struct iwl_trans *trans, int queue,
- int ptr)
-{
- if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
- IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
- return;
- }
-
- trans->ops->set_q_ptrs(trans, queue, ptr);
-}
-
-static inline void iwl_trans_txq_disable(struct iwl_trans *trans, int queue,
- bool configure_scd)
-{
- trans->ops->txq_disable(trans, queue, configure_scd);
-}
+int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
+ struct iwl_device_tx_cmd *dev_cmd, int queue);
-static inline bool
-iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn,
- const struct iwl_trans_txq_scd_cfg *cfg,
- unsigned int queue_wdg_timeout)
-{
- might_sleep();
+void iwl_trans_reclaim(struct iwl_trans *trans, int queue, int ssn,
+ struct sk_buff_head *skbs, bool is_flush);
- if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
- IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
- return false;
- }
+void iwl_trans_set_q_ptrs(struct iwl_trans *trans, int queue, int ptr);
- return trans->ops->txq_enable(trans, queue, ssn,
- cfg, queue_wdg_timeout);
-}
+void iwl_trans_txq_disable(struct iwl_trans *trans, int queue,
+ bool configure_scd);
-static inline int
-iwl_trans_get_rxq_dma_data(struct iwl_trans *trans, int queue,
- struct iwl_trans_rxq_dma_data *data)
-{
- if (WARN_ON_ONCE(!trans->ops->rxq_dma_data))
- return -EOPNOTSUPP;
+bool iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn,
+ const struct iwl_trans_txq_scd_cfg *cfg,
+ unsigned int queue_wdg_timeout);
- return trans->ops->rxq_dma_data(trans, queue, data);
-}
+int iwl_trans_get_rxq_dma_data(struct iwl_trans *trans, int queue,
+ struct iwl_trans_rxq_dma_data *data);
-static inline void
-iwl_trans_txq_free(struct iwl_trans *trans, int queue)
-{
- if (WARN_ON_ONCE(!trans->ops->txq_free))
- return;
+void iwl_trans_txq_free(struct iwl_trans *trans, int queue);
- trans->ops->txq_free(trans, queue);
-}
-
-static inline int
-iwl_trans_txq_alloc(struct iwl_trans *trans,
- u32 flags, u32 sta_mask, u8 tid,
- int size, unsigned int wdg_timeout)
-{
- might_sleep();
-
- if (WARN_ON_ONCE(!trans->ops->txq_alloc))
- return -EOPNOTSUPP;
-
- if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
- IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
- return -EIO;
- }
-
- return trans->ops->txq_alloc(trans, flags, sta_mask, tid,
- size, wdg_timeout);
-}
+int iwl_trans_txq_alloc(struct iwl_trans *trans, u32 flags, u32 sta_mask,
+ u8 tid, int size, unsigned int wdg_timeout);
-static inline void iwl_trans_txq_set_shared_mode(struct iwl_trans *trans,
- int queue, bool shared_mode)
-{
- if (trans->ops->txq_set_shared_mode)
- trans->ops->txq_set_shared_mode(trans, queue, shared_mode);
-}
+void iwl_trans_txq_set_shared_mode(struct iwl_trans *trans,
+ int txq_id, bool shared_mode);
static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue,
int fifo, int sta_id, int tid,
@@ -1430,78 +1049,32 @@ void iwl_trans_ac_txq_enable(struct iwl_trans *trans, int queue, int fifo,
iwl_trans_txq_enable_cfg(trans, queue, 0, &cfg, queue_wdg_timeout);
}
-static inline void iwl_trans_freeze_txq_timer(struct iwl_trans *trans,
- unsigned long txqs,
- bool freeze)
-{
- if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
- IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
- return;
- }
+void iwl_trans_freeze_txq_timer(struct iwl_trans *trans,
+ unsigned long txqs, bool freeze);
- if (trans->ops->freeze_txq_timer)
- trans->ops->freeze_txq_timer(trans, txqs, freeze);
-}
+int iwl_trans_wait_tx_queues_empty(struct iwl_trans *trans, u32 txqs);
-static inline int iwl_trans_wait_tx_queues_empty(struct iwl_trans *trans,
- u32 txqs)
-{
- if (WARN_ON_ONCE(!trans->ops->wait_tx_queues_empty))
- return -EOPNOTSUPP;
+int iwl_trans_wait_txq_empty(struct iwl_trans *trans, int queue);
- /* No need to wait if the firmware is not alive */
- if (trans->state != IWL_TRANS_FW_ALIVE) {
- IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
- return -EIO;
- }
+void iwl_trans_write8(struct iwl_trans *trans, u32 ofs, u8 val);
- return trans->ops->wait_tx_queues_empty(trans, txqs);
-}
+void iwl_trans_write32(struct iwl_trans *trans, u32 ofs, u32 val);
-static inline int iwl_trans_wait_txq_empty(struct iwl_trans *trans, int queue)
-{
- if (WARN_ON_ONCE(!trans->ops->wait_txq_empty))
- return -EOPNOTSUPP;
+u32 iwl_trans_read32(struct iwl_trans *trans, u32 ofs);
- if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
- IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
- return -EIO;
- }
+u32 iwl_trans_read_prph(struct iwl_trans *trans, u32 ofs);
- return trans->ops->wait_txq_empty(trans, queue);
-}
+void iwl_trans_write_prph(struct iwl_trans *trans, u32 ofs, u32 val);
-static inline void iwl_trans_write8(struct iwl_trans *trans, u32 ofs, u8 val)
-{
- trans->ops->write8(trans, ofs, val);
-}
+int iwl_trans_read_mem(struct iwl_trans *trans, u32 addr,
+ void *buf, int dwords);
-static inline void iwl_trans_write32(struct iwl_trans *trans, u32 ofs, u32 val)
-{
- trans->ops->write32(trans, ofs, val);
-}
-
-static inline u32 iwl_trans_read32(struct iwl_trans *trans, u32 ofs)
-{
- return trans->ops->read32(trans, ofs);
-}
+int iwl_trans_read_config32(struct iwl_trans *trans, u32 ofs,
+ u32 *val);
-static inline u32 iwl_trans_read_prph(struct iwl_trans *trans, u32 ofs)
-{
- return trans->ops->read_prph(trans, ofs);
-}
-
-static inline void iwl_trans_write_prph(struct iwl_trans *trans, u32 ofs,
- u32 val)
-{
- return trans->ops->write_prph(trans, ofs, val);
-}
-
-static inline int iwl_trans_read_mem(struct iwl_trans *trans, u32 addr,
- void *buf, int dwords)
-{
- return trans->ops->read_mem(trans, addr, buf, dwords);
-}
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+void iwl_trans_debugfs_cleanup(struct iwl_trans *trans);
+#endif
#define iwl_trans_read_mem_bytes(trans, addr, buf, bufsize) \
do { \
@@ -1510,14 +1083,8 @@ static inline int iwl_trans_read_mem(struct iwl_trans *trans, u32 addr,
iwl_trans_read_mem(trans, addr, buf, (bufsize) / sizeof(u32));\
} while (0)
-static inline int iwl_trans_write_imr_mem(struct iwl_trans *trans,
- u32 dst_addr, u64 src_addr,
- u32 byte_cnt)
-{
- if (trans->ops->imr_dma_data)
- return trans->ops->imr_dma_data(trans, dst_addr, src_addr, byte_cnt);
- return 0;
-}
+int iwl_trans_write_imr_mem(struct iwl_trans *trans, u32 dst_addr,
+ u64 src_addr, u32 byte_cnt);
static inline u32 iwl_trans_read_mem32(struct iwl_trans *trans, u32 addr)
{
@@ -1529,11 +1096,8 @@ static inline u32 iwl_trans_read_mem32(struct iwl_trans *trans, u32 addr)
return value;
}
-static inline int iwl_trans_write_mem(struct iwl_trans *trans, u32 addr,
- const void *buf, int dwords)
-{
- return trans->ops->write_mem(trans, addr, buf, dwords);
-}
+int iwl_trans_write_mem(struct iwl_trans *trans, u32 addr,
+ const void *buf, int dwords);
static inline u32 iwl_trans_write_mem32(struct iwl_trans *trans, u32 addr,
u32 val)
@@ -1541,36 +1105,21 @@ static inline u32 iwl_trans_write_mem32(struct iwl_trans *trans, u32 addr,
return iwl_trans_write_mem(trans, addr, &val, 1);
}
-static inline void iwl_trans_set_pmi(struct iwl_trans *trans, bool state)
-{
- if (trans->ops->set_pmi)
- trans->ops->set_pmi(trans, state);
-}
+void iwl_trans_set_pmi(struct iwl_trans *trans, bool state);
-static inline int iwl_trans_sw_reset(struct iwl_trans *trans,
- bool retake_ownership)
-{
- if (trans->ops->sw_reset)
- return trans->ops->sw_reset(trans, retake_ownership);
- return 0;
-}
+int iwl_trans_sw_reset(struct iwl_trans *trans, bool retake_ownership);
-static inline void
-iwl_trans_set_bits_mask(struct iwl_trans *trans, u32 reg, u32 mask, u32 value)
-{
- trans->ops->set_bits_mask(trans, reg, mask, value);
-}
+void iwl_trans_set_bits_mask(struct iwl_trans *trans, u32 reg,
+ u32 mask, u32 value);
+
+bool _iwl_trans_grab_nic_access(struct iwl_trans *trans);
#define iwl_trans_grab_nic_access(trans) \
__cond_lock(nic_access, \
- likely((trans)->ops->grab_nic_access(trans)))
+ likely(_iwl_trans_grab_nic_access(trans)))
-static inline void __releases(nic_access)
-iwl_trans_release_nic_access(struct iwl_trans *trans)
-{
- trans->ops->release_nic_access(trans);
- __release(nic_access);
-}
+void __releases(nic_access)
+iwl_trans_release_nic_access(struct iwl_trans *trans);
static inline void iwl_trans_fw_error(struct iwl_trans *trans, bool sync)
{
@@ -1589,44 +1138,24 @@ static inline bool iwl_trans_fw_running(struct iwl_trans *trans)
return trans->state == IWL_TRANS_FW_ALIVE;
}
-static inline void iwl_trans_sync_nmi(struct iwl_trans *trans)
-{
- if (trans->ops->sync_nmi)
- trans->ops->sync_nmi(trans);
-}
+void iwl_trans_sync_nmi(struct iwl_trans *trans);
void iwl_trans_sync_nmi_with_addr(struct iwl_trans *trans, u32 inta_addr,
u32 sw_err_bit);
-static inline int iwl_trans_load_pnvm(struct iwl_trans *trans,
- const struct iwl_pnvm_image *pnvm_data,
- const struct iwl_ucode_capabilities *capa)
-{
- return trans->ops->load_pnvm(trans, pnvm_data, capa);
-}
+int iwl_trans_load_pnvm(struct iwl_trans *trans,
+ const struct iwl_pnvm_image *pnvm_data,
+ const struct iwl_ucode_capabilities *capa);
-static inline void iwl_trans_set_pnvm(struct iwl_trans *trans,
- const struct iwl_ucode_capabilities *capa)
-{
- if (trans->ops->set_pnvm)
- trans->ops->set_pnvm(trans, capa);
-}
+void iwl_trans_set_pnvm(struct iwl_trans *trans,
+ const struct iwl_ucode_capabilities *capa);
-static inline int iwl_trans_load_reduce_power
- (struct iwl_trans *trans,
- const struct iwl_pnvm_image *payloads,
- const struct iwl_ucode_capabilities *capa)
-{
- return trans->ops->load_reduce_power(trans, payloads, capa);
-}
+int iwl_trans_load_reduce_power(struct iwl_trans *trans,
+ const struct iwl_pnvm_image *payloads,
+ const struct iwl_ucode_capabilities *capa);
-static inline void
-iwl_trans_set_reduce_power(struct iwl_trans *trans,
- const struct iwl_ucode_capabilities *capa)
-{
- if (trans->ops->set_reduce_power)
- trans->ops->set_reduce_power(trans, capa);
-}
+void iwl_trans_set_reduce_power(struct iwl_trans *trans,
+ const struct iwl_ucode_capabilities *capa);
static inline bool iwl_trans_dbg_ini_valid(struct iwl_trans *trans)
{
@@ -1634,18 +1163,13 @@ static inline bool iwl_trans_dbg_ini_valid(struct iwl_trans *trans)
trans->dbg.external_ini_cfg != IWL_INI_CFG_STATE_NOT_LOADED;
}
-static inline void iwl_trans_interrupts(struct iwl_trans *trans, bool enable)
-{
- if (trans->ops->interrupts)
- trans->ops->interrupts(trans, enable);
-}
+void iwl_trans_interrupts(struct iwl_trans *trans, bool enable);
/*****************************************************
* transport helper functions
*****************************************************/
struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
struct device *dev,
- const struct iwl_trans_ops *ops,
const struct iwl_cfg_trans_params *cfg_trans);
int iwl_trans_init(struct iwl_trans *trans);
void iwl_trans_free(struct iwl_trans *trans);
@@ -1656,10 +1180,13 @@ static inline bool iwl_trans_is_hw_error_value(u32 val)
}
/*****************************************************
-* driver (transport) register/unregister functions
-******************************************************/
+ * PCIe handling
+ *****************************************************/
int __must_check iwl_pci_register_driver(void);
void iwl_pci_unregister_driver(void);
void iwl_trans_pcie_remove(struct iwl_trans *trans, bool rescan);
+int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans,
+ struct iwl_host_cmd *cmd);
+
#endif /* __iwl_trans_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mei/iwl-mei.h b/drivers/net/wireless/intel/iwlwifi/mei/iwl-mei.h
index 1f3c885aeb65..4900de3cc0d3 100644
--- a/drivers/net/wireless/intel/iwlwifi/mei/iwl-mei.h
+++ b/drivers/net/wireless/intel/iwlwifi/mei/iwl-mei.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) 2021-2023 Intel Corporation
+ * Copyright (C) 2021-2024 Intel Corporation
*/
#ifndef __iwl_mei_h__
@@ -456,8 +456,11 @@ void iwl_mei_device_state(bool up);
/**
* iwl_mei_pldr_req() - must be called before loading the fw
*
- * Return: 0 if the PLDR flow was successful and the fw can be loaded, negative
- * value otherwise.
+ * Requests from the ME that it releases its potential bus access to
+ * the WiFi NIC so that the device can safely undergo product reset.
+ *
+ * Return: 0 if the request was successful and the device can be
+ * reset, a negative error value otherwise
*/
int iwl_mei_pldr_req(void);
@@ -488,7 +491,7 @@ static inline void iwl_mei_set_nic_info(const u8 *mac_address, const u8 *nvm_add
static inline void iwl_mei_set_country_code(u16 mcc)
{}
-static inline void iwl_mei_set_power_limit(__le16 *power_limit)
+static inline void iwl_mei_set_power_limit(const __le16 *power_limit)
{}
static inline int iwl_mei_register(void *priv,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
index 3cbeaddf4358..c4c1e67b9ac7 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
@@ -23,7 +23,7 @@
#define IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT (10 * USEC_PER_MSEC)
#define IWL_MVM_SHORT_PS_TX_DATA_TIMEOUT (2 * 1024) /* defined in TU */
#define IWL_MVM_SHORT_PS_RX_DATA_TIMEOUT (40 * 1024) /* defined in TU */
-#define IWL_MVM_P2P_LOWLATENCY_PS_ENABLE 0
+#define IWL_MVM_P2P_LOWLATENCY_PS_ENABLE 1
#define IWL_MVM_UAPSD_RX_DATA_TIMEOUT (50 * USEC_PER_MSEC)
#define IWL_MVM_UAPSD_TX_DATA_TIMEOUT (50 * USEC_PER_MSEC)
#define IWL_MVM_UAPSD_QUEUES (IEEE80211_WMM_IE_STA_QOSINFO_AC_VO |\
@@ -56,7 +56,6 @@
#define IWL_MVM_RS_80_20_FAR_RANGE_TWEAK 1
#define IWL_MVM_TOF_IS_RESPONDER 0
#define IWL_MVM_HW_CSUM_DISABLE 0
-#define IWL_MVM_PARSE_NVM 0
#define IWL_MVM_ADWELL_ENABLE 1
#define IWL_MVM_ADWELL_MAX_BUDGET 0
#define IWL_MVM_TCM_LOAD_MEDIUM_THRESH 10 /* percentage */
@@ -100,6 +99,7 @@
#define IWL_MVM_FTM_INITIATOR_ALGO IWL_TOF_ALGO_TYPE_MAX_LIKE
#define IWL_MVM_FTM_INITIATOR_DYNACK true
#define IWL_MVM_FTM_LMR_FEEDBACK_TERMINATE false
+#define IWL_MVM_FTM_TEST_INCORRECT_SAC false
#define IWL_MVM_FTM_R2I_MAX_REP 7
#define IWL_MVM_FTM_I2R_MAX_REP 7
#define IWL_MVM_FTM_R2I_MAX_STS 1
@@ -114,7 +114,6 @@
#define IWL_MVM_D3_DEBUG false
#define IWL_MVM_USE_TWT true
#define IWL_MVM_AMPDU_CONSEC_DROPS_DELBA 20
-#define IWL_MVM_USE_NSSN_SYNC 0
#define IWL_MVM_FTM_INITIATOR_ENABLE_SMOOTH false
#define IWL_MVM_FTM_INITIATOR_SMOOTH_ALPHA 40
/* 20016 pSec is 6 meter RTT, meaning 3 meter range */
@@ -124,6 +123,7 @@
#define IWL_MVM_DISABLE_AP_FILS false
#define IWL_MVM_6GHZ_PASSIVE_SCAN_TIMEOUT 3000 /* in seconds */
#define IWL_MVM_6GHZ_PASSIVE_SCAN_ASSOC_TIMEOUT 60 /* in seconds */
+#define IWL_MVM_MIN_BEACON_INTERVAL_TU 16
#define IWL_MVM_AUTO_EML_ENABLE true
#define IWL_MVM_MISSED_BEACONS_EXIT_ESR_THRESH 7
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index 71e6b06481a9..b4d650583ac2 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -595,6 +595,12 @@ static void iwl_mvm_wowlan_gtk_type_iter(struct ieee80211_hw *hw,
void *_data)
{
struct wowlan_key_gtk_type_iter *data = _data;
+ __le32 *cipher = NULL;
+
+ if (key->keyidx == 4 || key->keyidx == 5)
+ cipher = &data->kek_kck_cmd->igtk_cipher;
+ if (key->keyidx == 6 || key->keyidx == 7)
+ cipher = &data->kek_kck_cmd->bigtk_cipher;
switch (key->cipher) {
default:
@@ -606,10 +612,13 @@ static void iwl_mvm_wowlan_gtk_type_iter(struct ieee80211_hw *hw,
return;
case WLAN_CIPHER_SUITE_BIP_GMAC_256:
case WLAN_CIPHER_SUITE_BIP_GMAC_128:
- data->kek_kck_cmd->igtk_cipher = cpu_to_le32(STA_KEY_FLG_GCMP);
+ if (cipher)
+ *cipher = cpu_to_le32(STA_KEY_FLG_GCMP);
return;
case WLAN_CIPHER_SUITE_AES_CMAC:
- data->kek_kck_cmd->igtk_cipher = cpu_to_le32(STA_KEY_FLG_CCM);
+ case WLAN_CIPHER_SUITE_BIP_CMAC_256:
+ if (cipher)
+ *cipher = cpu_to_le32(STA_KEY_FLG_CCM);
return;
case WLAN_CIPHER_SUITE_CCMP:
if (!sta)
@@ -2341,7 +2350,8 @@ static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm,
out:
if (iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP,
- WOWLAN_GET_STATUSES, 0) < 10) {
+ WOWLAN_GET_STATUSES,
+ IWL_FW_CMD_VER_UNKNOWN) < 10) {
mvmvif->seqno_valid = true;
/* +0x10 because the set API expects next-to-use, not last-used */
mvmvif->seqno = status->non_qos_seq_ctr + 0x10;
@@ -2483,6 +2493,9 @@ static void iwl_mvm_parse_wowlan_info_notif(struct iwl_mvm *mvm,
return;
}
+ if (mvm->fast_resume)
+ return;
+
iwl_mvm_convert_key_counters_v5(status, &data->gtk[0].sc);
iwl_mvm_convert_gtk_v3(status, data->gtk);
iwl_mvm_convert_igtk(status, &data->igtk[0]);
@@ -3039,7 +3052,7 @@ static bool iwl_mvm_check_rt_status(struct iwl_mvm *mvm,
if (iwl_mvm_rt_status(mvm->trans,
mvm->trans->dbg.lmac_error_event_table[0],
&err_id)) {
- if (err_id == RF_KILL_INDICATOR_FOR_WOWLAN) {
+ if (err_id == RF_KILL_INDICATOR_FOR_WOWLAN && vif) {
struct cfg80211_wowlan_wakeup wakeup = {
.rfkill_release = true,
};
@@ -3356,7 +3369,7 @@ static int iwl_mvm_resume_firmware(struct iwl_mvm *mvm, bool test)
return ret;
}
-#define IWL_MVM_D3_NOTIF_TIMEOUT (HZ / 5)
+#define IWL_MVM_D3_NOTIF_TIMEOUT (HZ / 3)
static int iwl_mvm_d3_notif_wait(struct iwl_mvm *mvm,
struct iwl_d3_data *d3_data)
@@ -3367,12 +3380,22 @@ static int iwl_mvm_d3_notif_wait(struct iwl_mvm *mvm,
WIDE_ID(SCAN_GROUP, OFFLOAD_MATCH_INFO_NOTIF),
WIDE_ID(PROT_OFFLOAD_GROUP, D3_END_NOTIFICATION)
};
+ static const u16 d3_fast_resume_notif[] = {
+ WIDE_ID(PROT_OFFLOAD_GROUP, D3_END_NOTIFICATION)
+ };
struct iwl_notification_wait wait_d3_notif;
int ret;
- iwl_init_notification_wait(&mvm->notif_wait, &wait_d3_notif,
- d3_resume_notif, ARRAY_SIZE(d3_resume_notif),
- iwl_mvm_wait_d3_notif, d3_data);
+ if (mvm->fast_resume)
+ iwl_init_notification_wait(&mvm->notif_wait, &wait_d3_notif,
+ d3_fast_resume_notif,
+ ARRAY_SIZE(d3_fast_resume_notif),
+ iwl_mvm_wait_d3_notif, d3_data);
+ else
+ iwl_init_notification_wait(&mvm->notif_wait, &wait_d3_notif,
+ d3_resume_notif,
+ ARRAY_SIZE(d3_resume_notif),
+ iwl_mvm_wait_d3_notif, d3_data);
ret = iwl_mvm_resume_firmware(mvm, d3_data->test);
if (ret) {
@@ -3557,6 +3580,68 @@ void iwl_mvm_set_wakeup(struct ieee80211_hw *hw, bool enabled)
device_set_wakeup_enable(mvm->trans->dev, enabled);
}
+void iwl_mvm_fast_suspend(struct iwl_mvm *mvm)
+{
+ struct iwl_d3_manager_config d3_cfg_cmd_data = {};
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ IWL_DEBUG_WOWLAN(mvm, "Starting fast suspend flow\n");
+
+ mvm->fast_resume = true;
+ set_bit(IWL_MVM_STATUS_IN_D3, &mvm->status);
+
+ WARN_ON(iwl_mvm_power_update_device(mvm));
+ mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_D3;
+ ret = iwl_mvm_send_cmd_pdu(mvm, D3_CONFIG_CMD, CMD_SEND_IN_D3,
+ sizeof(d3_cfg_cmd_data), &d3_cfg_cmd_data);
+ if (ret)
+ IWL_ERR(mvm,
+ "fast suspend: couldn't send D3_CONFIG_CMD %d\n", ret);
+
+ WARN_ON(iwl_mvm_power_update_mac(mvm));
+
+ ret = iwl_trans_d3_suspend(mvm->trans, false, false);
+ if (ret)
+ IWL_ERR(mvm, "fast suspend: trans_d3_suspend failed %d\n", ret);
+}
+
+int iwl_mvm_fast_resume(struct iwl_mvm *mvm)
+{
+ struct iwl_d3_data d3_data = {
+ .notif_expected =
+ IWL_D3_NOTIF_D3_END_NOTIF,
+ };
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ IWL_DEBUG_WOWLAN(mvm, "Starting the fast resume flow\n");
+
+ mvm->last_reset_or_resume_time_jiffies = jiffies;
+ iwl_fw_dbg_read_d3_debug_data(&mvm->fwrt);
+
+ if (iwl_mvm_check_rt_status(mvm, NULL)) {
+ set_bit(STATUS_FW_ERROR, &mvm->trans->status);
+ iwl_mvm_dump_nic_error_log(mvm);
+ iwl_dbg_tlv_time_point(&mvm->fwrt,
+ IWL_FW_INI_TIME_POINT_FW_ASSERT, NULL);
+ iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert,
+ false, 0);
+ return -ENODEV;
+ }
+ ret = iwl_mvm_d3_notif_wait(mvm, &d3_data);
+ clear_bit(IWL_MVM_STATUS_IN_D3, &mvm->status);
+ mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
+ mvm->fast_resume = false;
+
+ if (ret)
+ IWL_ERR(mvm, "Couldn't get the d3 notif %d\n", ret);
+
+ return ret;
+}
+
#ifdef CONFIG_IWLWIFI_DEBUGFS
static int iwl_mvm_d3_test_open(struct inode *inode, struct file *file)
{
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
index 17c97dfbc62a..25f07e00db42 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
@@ -692,6 +692,42 @@ static ssize_t iwl_dbgfs_quota_min_read(struct file *file,
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
+static ssize_t iwl_dbgfs_max_tx_op_write(struct ieee80211_vif *vif, char *buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ u16 value;
+ int ret;
+
+ ret = kstrtou16(buf, 0, &value);
+ if (ret)
+ return ret;
+
+ mutex_lock(&mvm->mutex);
+ mvmvif->max_tx_op = value;
+ mutex_unlock(&mvm->mutex);
+
+ return count;
+}
+
+static ssize_t iwl_dbgfs_max_tx_op_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_vif *vif = file->private_data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ char buf[10];
+ int len;
+
+ mutex_lock(&mvm->mutex);
+ len = scnprintf(buf, sizeof(buf), "%hu\n", mvmvif->max_tx_op);
+ mutex_unlock(&mvm->mutex);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
static ssize_t iwl_dbgfs_int_mlo_scan_write(struct ieee80211_vif *vif,
char *buf, size_t count,
loff_t *ppos)
@@ -801,6 +837,7 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(uapsd_misbehaving, 20);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(rx_phyinfo, 10);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(quota_min, 32);
MVM_DEBUGFS_READ_FILE_OPS(os_device_timediff);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(max_tx_op, 10);
MVM_DEBUGFS_WRITE_FILE_OPS(int_mlo_scan, 32);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(esr_disable_reason, 32);
@@ -830,6 +867,7 @@ void iwl_mvm_vif_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
MVM_DEBUGFS_ADD_FILE_VIF(rx_phyinfo, mvmvif->dbgfs_dir, 0600);
MVM_DEBUGFS_ADD_FILE_VIF(quota_min, mvmvif->dbgfs_dir, 0600);
MVM_DEBUGFS_ADD_FILE_VIF(os_device_timediff, mvmvif->dbgfs_dir, 0400);
+ MVM_DEBUGFS_ADD_FILE_VIF(max_tx_op, mvmvif->dbgfs_dir, 0600);
debugfs_create_bool("ftm_unprotected", 0200, mvmvif->dbgfs_dir,
&mvmvif->ftm_unprotected);
MVM_DEBUGFS_ADD_FILE_VIF(int_mlo_scan, mvmvif->dbgfs_dir, 0200);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index 79f4ac8cbc72..91ca830a7b60 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -151,37 +151,6 @@ static ssize_t iwl_dbgfs_tx_flush_write(struct iwl_mvm *mvm, char *buf,
return ret;
}
-static ssize_t iwl_dbgfs_sta_drain_write(struct iwl_mvm *mvm, char *buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_mvm_sta *mvmsta;
- int sta_id, drain, ret;
-
- if (!iwl_mvm_firmware_running(mvm) ||
- mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR)
- return -EIO;
-
- if (sscanf(buf, "%d %d", &sta_id, &drain) != 2)
- return -EINVAL;
- if (sta_id < 0 || sta_id >= mvm->fw->ucode_capa.num_stations)
- return -EINVAL;
- if (drain < 0 || drain > 1)
- return -EINVAL;
-
- mutex_lock(&mvm->mutex);
-
- mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
-
- if (!mvmsta)
- ret = -ENOENT;
- else
- ret = iwl_mvm_drain_sta(mvm, mvmsta, drain) ? : count;
-
- mutex_unlock(&mvm->mutex);
-
- return ret;
-}
-
static ssize_t iwl_dbgfs_sram_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
@@ -568,193 +537,12 @@ static ssize_t iwl_dbgfs_disable_power_off_write(struct iwl_mvm *mvm, char *buf,
return ret ?: count;
}
-static
-int iwl_mvm_coex_dump_mbox(struct iwl_bt_coex_profile_notif *notif, char *buf,
- int pos, int bufsz)
-{
- pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw0:\n");
-
- BT_MBOX_PRINT(0, LE_SLAVE_LAT, false);
- BT_MBOX_PRINT(0, LE_PROF1, false);
- BT_MBOX_PRINT(0, LE_PROF2, false);
- BT_MBOX_PRINT(0, LE_PROF_OTHER, false);
- BT_MBOX_PRINT(0, CHL_SEQ_N, false);
- BT_MBOX_PRINT(0, INBAND_S, false);
- BT_MBOX_PRINT(0, LE_MIN_RSSI, false);
- BT_MBOX_PRINT(0, LE_SCAN, false);
- BT_MBOX_PRINT(0, LE_ADV, false);
- BT_MBOX_PRINT(0, LE_MAX_TX_POWER, false);
- BT_MBOX_PRINT(0, OPEN_CON_1, true);
-
- pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw1:\n");
-
- BT_MBOX_PRINT(1, BR_MAX_TX_POWER, false);
- BT_MBOX_PRINT(1, IP_SR, false);
- BT_MBOX_PRINT(1, LE_MSTR, false);
- BT_MBOX_PRINT(1, AGGR_TRFC_LD, false);
- BT_MBOX_PRINT(1, MSG_TYPE, false);
- BT_MBOX_PRINT(1, SSN, true);
-
- pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw2:\n");
-
- BT_MBOX_PRINT(2, SNIFF_ACT, false);
- BT_MBOX_PRINT(2, PAG, false);
- BT_MBOX_PRINT(2, INQUIRY, false);
- BT_MBOX_PRINT(2, CONN, false);
- BT_MBOX_PRINT(2, SNIFF_INTERVAL, false);
- BT_MBOX_PRINT(2, DISC, false);
- BT_MBOX_PRINT(2, SCO_TX_ACT, false);
- BT_MBOX_PRINT(2, SCO_RX_ACT, false);
- BT_MBOX_PRINT(2, ESCO_RE_TX, false);
- BT_MBOX_PRINT(2, SCO_DURATION, true);
-
- pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw3:\n");
-
- BT_MBOX_PRINT(3, SCO_STATE, false);
- BT_MBOX_PRINT(3, SNIFF_STATE, false);
- BT_MBOX_PRINT(3, A2DP_STATE, false);
- BT_MBOX_PRINT(3, A2DP_SRC, false);
- BT_MBOX_PRINT(3, ACL_STATE, false);
- BT_MBOX_PRINT(3, MSTR_STATE, false);
- BT_MBOX_PRINT(3, OBX_STATE, false);
- BT_MBOX_PRINT(3, OPEN_CON_2, false);
- BT_MBOX_PRINT(3, TRAFFIC_LOAD, false);
- BT_MBOX_PRINT(3, CHL_SEQN_LSB, false);
- BT_MBOX_PRINT(3, INBAND_P, false);
- BT_MBOX_PRINT(3, MSG_TYPE_2, false);
- BT_MBOX_PRINT(3, SSN_2, false);
- BT_MBOX_PRINT(3, UPDATE_REQUEST, true);
-
- return pos;
-}
-
-static ssize_t iwl_dbgfs_bt_notif_read(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_mvm *mvm = file->private_data;
- struct iwl_bt_coex_profile_notif *notif = &mvm->last_bt_notif;
- char *buf;
- int ret, pos = 0, bufsz = sizeof(char) * 1024;
-
- buf = kmalloc(bufsz, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- mutex_lock(&mvm->mutex);
-
- pos += iwl_mvm_coex_dump_mbox(notif, buf, pos, bufsz);
-
- pos += scnprintf(buf + pos, bufsz - pos, "bt_ci_compliance = %d\n",
- notif->bt_ci_compliance);
- pos += scnprintf(buf + pos, bufsz - pos, "primary_ch_lut = %d\n",
- le32_to_cpu(notif->primary_ch_lut));
- pos += scnprintf(buf + pos, bufsz - pos, "secondary_ch_lut = %d\n",
- le32_to_cpu(notif->secondary_ch_lut));
- pos += scnprintf(buf + pos,
- bufsz - pos, "bt_activity_grading = %d\n",
- le32_to_cpu(notif->bt_activity_grading));
- pos += scnprintf(buf + pos, bufsz - pos, "bt_rrc = %d\n",
- notif->rrc_status & 0xF);
- pos += scnprintf(buf + pos, bufsz - pos, "bt_ttc = %d\n",
- notif->ttc_status & 0xF);
-
- pos += scnprintf(buf + pos, bufsz - pos, "sync_sco = %d\n",
- IWL_MVM_BT_COEX_SYNC2SCO);
- pos += scnprintf(buf + pos, bufsz - pos, "mplut = %d\n",
- IWL_MVM_BT_COEX_MPLUT);
-
- mutex_unlock(&mvm->mutex);
-
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
-
- return ret;
-}
-#undef BT_MBOX_PRINT
-
-static ssize_t iwl_dbgfs_bt_cmd_read(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_mvm *mvm = file->private_data;
- struct iwl_bt_coex_ci_cmd *cmd = &mvm->last_bt_ci_cmd;
- char buf[256];
- int bufsz = sizeof(buf);
- int pos = 0;
-
- mutex_lock(&mvm->mutex);
-
- pos += scnprintf(buf + pos, bufsz - pos, "Channel inhibition CMD\n");
- pos += scnprintf(buf + pos, bufsz - pos,
- "\tPrimary Channel Bitmap 0x%016llx\n",
- le64_to_cpu(cmd->bt_primary_ci));
- pos += scnprintf(buf + pos, bufsz - pos,
- "\tSecondary Channel Bitmap 0x%016llx\n",
- le64_to_cpu(cmd->bt_secondary_ci));
-
- mutex_unlock(&mvm->mutex);
-
- return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-}
-
-static ssize_t
-iwl_dbgfs_bt_tx_prio_write(struct iwl_mvm *mvm, char *buf,
- size_t count, loff_t *ppos)
-{
- u32 bt_tx_prio;
-
- if (sscanf(buf, "%u", &bt_tx_prio) != 1)
- return -EINVAL;
- if (bt_tx_prio > 4)
- return -EINVAL;
-
- mvm->bt_tx_prio = bt_tx_prio;
-
- return count;
-}
-
-static ssize_t
-iwl_dbgfs_bt_force_ant_write(struct iwl_mvm *mvm, char *buf,
- size_t count, loff_t *ppos)
-{
- static const char * const modes_str[BT_FORCE_ANT_MAX] = {
- [BT_FORCE_ANT_DIS] = "dis",
- [BT_FORCE_ANT_AUTO] = "auto",
- [BT_FORCE_ANT_BT] = "bt",
- [BT_FORCE_ANT_WIFI] = "wifi",
- };
- int ret, bt_force_ant_mode;
-
- ret = match_string(modes_str, ARRAY_SIZE(modes_str), buf);
- if (ret < 0)
- return ret;
-
- bt_force_ant_mode = ret;
- ret = 0;
- mutex_lock(&mvm->mutex);
- if (mvm->bt_force_ant_mode == bt_force_ant_mode)
- goto out;
-
- mvm->bt_force_ant_mode = bt_force_ant_mode;
- IWL_DEBUG_COEX(mvm, "Force mode: %s\n",
- modes_str[mvm->bt_force_ant_mode]);
-
- if (iwl_mvm_firmware_running(mvm))
- ret = iwl_mvm_send_bt_init_conf(mvm);
- else
- ret = 0;
-
-out:
- mutex_unlock(&mvm->mutex);
- return ret ?: count;
-}
-
static ssize_t iwl_dbgfs_fw_ver_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct iwl_mvm *mvm = file->private_data;
char *buff, *pos, *endpos;
static const size_t bufsz = 1024;
- char _fw_name_pre[FW_NAME_PRE_BUFSIZE];
int ret;
buff = kmalloc(bufsz, GFP_KERNEL);
@@ -764,8 +552,8 @@ static ssize_t iwl_dbgfs_fw_ver_read(struct file *file, char __user *user_buf,
pos = buff;
endpos = pos + bufsz;
- pos += scnprintf(pos, endpos - pos, "FW prefix: %s\n",
- iwl_drv_get_fwname_pre(mvm->trans, _fw_name_pre));
+ pos += scnprintf(pos, endpos - pos, "FW id: %s\n",
+ mvm->fwrt.fw->fw_version);
pos += scnprintf(pos, endpos - pos, "FW: %s\n",
mvm->fwrt.fw->human_readable);
pos += scnprintf(pos, endpos - pos, "Device: %s\n",
@@ -1396,6 +1184,8 @@ static ssize_t iwl_dbgfs_fw_nmi_write(struct iwl_mvm *mvm, char *buf,
if (!iwl_mvm_firmware_running(mvm))
return -EIO;
+ IWL_ERR(mvm, "Triggering an NMI from debugfs\n");
+
if (count == 6 && !strcmp(buf, "nolog\n"))
set_bit(IWL_MVM_STATUS_SUPPRESS_ERROR_LOG_ONCE, &mvm->status);
@@ -1617,6 +1407,15 @@ static int _iwl_dbgfs_inject_beacon_ie(struct iwl_mvm *mvm, char *bin, int len)
&beacon_cmd.tim_size,
beacon->data, beacon->len);
+ if (iwl_fw_lookup_cmd_ver(mvm->fw,
+ BEACON_TEMPLATE_CMD, 0) >= 14) {
+ u32 offset = iwl_mvm_find_ie_offset(beacon->data,
+ WLAN_EID_S1G_TWT,
+ beacon->len);
+
+ beacon_cmd.btwt_offset = cpu_to_le32(offset);
+ }
+
iwl_mvm_mac_ctxt_send_beacon_cmd(mvm, beacon, &beacon_cmd,
sizeof(beacon_cmd));
}
@@ -2155,15 +1954,12 @@ MVM_DEBUGFS_WRITE_FILE_OPS(stop_ctdp, 8);
MVM_DEBUGFS_WRITE_FILE_OPS(start_ctdp, 8);
MVM_DEBUGFS_WRITE_FILE_OPS(force_ctkill, 8);
MVM_DEBUGFS_WRITE_FILE_OPS(tx_flush, 16);
-MVM_DEBUGFS_WRITE_FILE_OPS(sta_drain, 8);
MVM_DEBUGFS_WRITE_FILE_OPS(send_echo_cmd, 8);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(sram, 64);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(set_nic_temperature, 64);
MVM_DEBUGFS_READ_FILE_OPS(nic_temp);
MVM_DEBUGFS_READ_FILE_OPS(stations);
MVM_DEBUGFS_READ_LINK_STA_FILE_OPS(rs_data);
-MVM_DEBUGFS_READ_FILE_OPS(bt_notif);
-MVM_DEBUGFS_READ_FILE_OPS(bt_cmd);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(disable_power_off, 64);
MVM_DEBUGFS_READ_FILE_OPS(fw_rx_stats);
MVM_DEBUGFS_READ_FILE_OPS(drv_rx_stats);
@@ -2173,8 +1969,6 @@ MVM_DEBUGFS_READ_FILE_OPS(phy_integration_ver);
MVM_DEBUGFS_READ_FILE_OPS(tas_get_status);
MVM_DEBUGFS_WRITE_FILE_OPS(fw_restart, 10);
MVM_DEBUGFS_WRITE_FILE_OPS(fw_nmi, 10);
-MVM_DEBUGFS_WRITE_FILE_OPS(bt_tx_prio, 10);
-MVM_DEBUGFS_WRITE_FILE_OPS(bt_force_ant, 10);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(scan_ant_rxchain, 8);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(fw_dbg_conf, 8);
MVM_DEBUGFS_WRITE_FILE_OPS(fw_dbg_collect, 64);
@@ -2361,7 +2155,6 @@ void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm)
spin_lock_init(&mvm->drv_stats_lock);
MVM_DEBUGFS_ADD_FILE(tx_flush, mvm->debugfs_dir, 0200);
- MVM_DEBUGFS_ADD_FILE(sta_drain, mvm->debugfs_dir, 0200);
MVM_DEBUGFS_ADD_FILE(sram, mvm->debugfs_dir, 0600);
MVM_DEBUGFS_ADD_FILE(set_nic_temperature, mvm->debugfs_dir, 0600);
MVM_DEBUGFS_ADD_FILE(nic_temp, mvm->debugfs_dir, 0400);
@@ -2370,8 +2163,6 @@ void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm)
MVM_DEBUGFS_ADD_FILE(start_ctdp, mvm->debugfs_dir, 0200);
MVM_DEBUGFS_ADD_FILE(force_ctkill, mvm->debugfs_dir, 0200);
MVM_DEBUGFS_ADD_FILE(stations, mvm->debugfs_dir, 0400);
- MVM_DEBUGFS_ADD_FILE(bt_notif, mvm->debugfs_dir, 0400);
- MVM_DEBUGFS_ADD_FILE(bt_cmd, mvm->debugfs_dir, 0400);
MVM_DEBUGFS_ADD_FILE(disable_power_off, mvm->debugfs_dir, 0600);
MVM_DEBUGFS_ADD_FILE(fw_ver, mvm->debugfs_dir, 0400);
MVM_DEBUGFS_ADD_FILE(fw_rx_stats, mvm->debugfs_dir, 0400);
@@ -2379,8 +2170,6 @@ void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm)
MVM_DEBUGFS_ADD_FILE(fw_system_stats, mvm->debugfs_dir, 0400);
MVM_DEBUGFS_ADD_FILE(fw_restart, mvm->debugfs_dir, 0200);
MVM_DEBUGFS_ADD_FILE(fw_nmi, mvm->debugfs_dir, 0200);
- MVM_DEBUGFS_ADD_FILE(bt_tx_prio, mvm->debugfs_dir, 0200);
- MVM_DEBUGFS_ADD_FILE(bt_force_ant, mvm->debugfs_dir, 0200);
MVM_DEBUGFS_ADD_FILE(scan_ant_rxchain, mvm->debugfs_dir, 0600);
MVM_DEBUGFS_ADD_FILE(prph_reg, mvm->debugfs_dir, 0600);
MVM_DEBUGFS_ADD_FILE(fw_dbg_conf, mvm->debugfs_dir, 0600);
@@ -2439,6 +2228,9 @@ void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm)
debugfs_create_file("mem", 0600, mvm->debugfs_dir, mvm,
&iwl_dbgfs_mem_ops);
+ debugfs_create_bool("rx_ts_ptp", 0600, mvm->debugfs_dir,
+ &mvm->rx_ts_ptp);
+
/*
* Create a symlink with mac80211. It will be removed when mac80211
* exists (before the opmode exists which removes the target.)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
index 72a3d71f46f0..afd90a52d4ec 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
@@ -40,6 +40,12 @@ struct iwl_mvm_ftm_pasn_entry {
u32 flags;
};
+struct iwl_mvm_ftm_iter_data {
+ u8 *cipher;
+ u8 *bssid;
+ u8 *tk;
+};
+
int iwl_mvm_ftm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
u8 *addr, u32 cipher, u8 *tk, u32 tk_len,
u8 *hltk, u32 hltk_len)
@@ -431,47 +437,55 @@ iwl_mvm_ftm_put_target_v2(struct iwl_mvm *mvm,
return 0;
}
-#define FTM_PUT_FLAG(flag) (target->initiator_ap_flags |= \
+#define FTM_SET_FLAG(flag) (*flags |= \
cpu_to_le32(IWL_INITIATOR_AP_FLAGS_##flag))
static void
-iwl_mvm_ftm_put_target_common(struct iwl_mvm *mvm,
- struct cfg80211_pmsr_request_peer *peer,
- struct iwl_tof_range_req_ap_entry_v6 *target)
+iwl_mvm_ftm_set_target_flags(struct iwl_mvm *mvm,
+ struct cfg80211_pmsr_request_peer *peer,
+ __le32 *flags)
{
- memcpy(target->bssid, peer->addr, ETH_ALEN);
- target->burst_period =
- cpu_to_le16(peer->ftm.burst_period);
- target->samples_per_burst = peer->ftm.ftms_per_burst;
- target->num_of_bursts = peer->ftm.num_bursts_exp;
- target->ftmr_max_retries = peer->ftm.ftmr_retries;
- target->initiator_ap_flags = cpu_to_le32(0);
+ *flags = cpu_to_le32(0);
if (peer->ftm.asap)
- FTM_PUT_FLAG(ASAP);
+ FTM_SET_FLAG(ASAP);
if (peer->ftm.request_lci)
- FTM_PUT_FLAG(LCI_REQUEST);
+ FTM_SET_FLAG(LCI_REQUEST);
if (peer->ftm.request_civicloc)
- FTM_PUT_FLAG(CIVIC_REQUEST);
+ FTM_SET_FLAG(CIVIC_REQUEST);
if (IWL_MVM_FTM_INITIATOR_DYNACK)
- FTM_PUT_FLAG(DYN_ACK);
+ FTM_SET_FLAG(DYN_ACK);
if (IWL_MVM_FTM_INITIATOR_ALGO == IWL_TOF_ALGO_TYPE_LINEAR_REG)
- FTM_PUT_FLAG(ALGO_LR);
+ FTM_SET_FLAG(ALGO_LR);
else if (IWL_MVM_FTM_INITIATOR_ALGO == IWL_TOF_ALGO_TYPE_FFT)
- FTM_PUT_FLAG(ALGO_FFT);
+ FTM_SET_FLAG(ALGO_FFT);
if (peer->ftm.trigger_based)
- FTM_PUT_FLAG(TB);
+ FTM_SET_FLAG(TB);
else if (peer->ftm.non_trigger_based)
- FTM_PUT_FLAG(NON_TB);
+ FTM_SET_FLAG(NON_TB);
if ((peer->ftm.trigger_based || peer->ftm.non_trigger_based) &&
peer->ftm.lmr_feedback)
- FTM_PUT_FLAG(LMR_FEEDBACK);
+ FTM_SET_FLAG(LMR_FEEDBACK);
+}
+
+static void
+iwl_mvm_ftm_put_target_common(struct iwl_mvm *mvm,
+ struct cfg80211_pmsr_request_peer *peer,
+ struct iwl_tof_range_req_ap_entry_v6 *target)
+{
+ memcpy(target->bssid, peer->addr, ETH_ALEN);
+ target->burst_period =
+ cpu_to_le16(peer->ftm.burst_period);
+ target->samples_per_burst = peer->ftm.ftms_per_burst;
+ target->num_of_bursts = peer->ftm.num_bursts_exp;
+ target->ftmr_max_retries = peer->ftm.ftmr_retries;
+ iwl_mvm_ftm_set_target_flags(mvm, peer, &target->initiator_ap_flags);
}
static int
@@ -514,21 +528,10 @@ iwl_mvm_ftm_put_target_v4(struct iwl_mvm *mvm,
return 0;
}
-static int
-iwl_mvm_ftm_put_target(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- struct cfg80211_pmsr_request_peer *peer,
- struct iwl_tof_range_req_ap_entry_v6 *target)
+static int iwl_mvm_ftm_set_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct cfg80211_pmsr_request_peer *peer,
+ u8 *sta_id, __le32 *flags)
{
- int ret;
-
- ret = iwl_mvm_ftm_target_chandef_v2(mvm, peer, &target->channel_num,
- &target->format_bw,
- &target->ctrl_ch_position);
- if (ret)
- return ret;
-
- iwl_mvm_ftm_put_target_common(mvm, peer, target);
-
if (vif->cfg.assoc) {
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct ieee80211_sta *sta;
@@ -540,8 +543,8 @@ iwl_mvm_ftm_put_target(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (memcmp(peer->addr, link_conf->bssid, ETH_ALEN))
continue;
- target->sta_id = mvmvif->link[link_id]->ap_sta_id;
- sta = rcu_dereference(mvm->fw_id_to_mac_id[target->sta_id]);
+ *sta_id = mvmvif->link[link_id]->ap_sta_id;
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[*sta_id]);
if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
rcu_read_unlock();
return PTR_ERR_OR_ZERO(sta);
@@ -549,23 +552,42 @@ iwl_mvm_ftm_put_target(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (sta->mfp && (peer->ftm.trigger_based ||
peer->ftm.non_trigger_based))
- FTM_PUT_FLAG(PMF);
+ FTM_SET_FLAG(PMF);
break;
}
rcu_read_unlock();
#ifdef CONFIG_IWLWIFI_DEBUGFS
if (mvmvif->ftm_unprotected) {
- target->sta_id = IWL_MVM_INVALID_STA;
- target->initiator_ap_flags &=
- ~cpu_to_le32(IWL_INITIATOR_AP_FLAGS_PMF);
+ *sta_id = IWL_MVM_INVALID_STA;
+ *flags &= ~cpu_to_le32(IWL_INITIATOR_AP_FLAGS_PMF);
}
-
#endif
} else {
- target->sta_id = IWL_MVM_INVALID_STA;
+ *sta_id = IWL_MVM_INVALID_STA;
}
+ return 0;
+}
+
+static int
+iwl_mvm_ftm_put_target(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct cfg80211_pmsr_request_peer *peer,
+ struct iwl_tof_range_req_ap_entry_v6 *target)
+{
+ int ret;
+
+ ret = iwl_mvm_ftm_target_chandef_v2(mvm, peer, &target->channel_num,
+ &target->format_bw,
+ &target->ctrl_ch_position);
+ if (ret)
+ return ret;
+
+ iwl_mvm_ftm_put_target_common(mvm, peer, target);
+
+ iwl_mvm_ftm_set_sta(mvm, vif, peer, &target->sta_id,
+ &target->initiator_ap_flags);
+
/*
* TODO: Beacon interval is currently unknown, so use the common value
* of 100 TUs.
@@ -703,27 +725,24 @@ static void iter(struct ieee80211_hw *hw,
struct ieee80211_key_conf *key,
void *data)
{
- struct iwl_tof_range_req_ap_entry_v6 *target = data;
+ struct iwl_mvm_ftm_iter_data *target = data;
if (!sta || memcmp(sta->addr, target->bssid, ETH_ALEN))
return;
WARN_ON(!sta->mfp);
- if (WARN_ON(key->keylen > sizeof(target->tk)))
- return;
-
- memcpy(target->tk, key->key, key->keylen);
- target->cipher = iwl_mvm_cipher_to_location_cipher(key->cipher);
- WARN_ON(target->cipher == IWL_LOCATION_CIPHER_INVALID);
+ target->tk = key->key;
+ *target->cipher = iwl_mvm_cipher_to_location_cipher(key->cipher);
+ WARN_ON(*target->cipher == IWL_LOCATION_CIPHER_INVALID);
}
static void
iwl_mvm_ftm_set_secured_ranging(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- struct iwl_tof_range_req_ap_entry_v7 *target)
+ u8 *bssid, u8 *cipher, u8 *hltk, u8 *tk,
+ u8 *rx_pn, u8 *tx_pn, __le32 *flags)
{
struct iwl_mvm_ftm_pasn_entry *entry;
- u32 flags = le32_to_cpu(target->initiator_ap_flags);
#ifdef CONFIG_IWLWIFI_DEBUGFS
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
@@ -731,35 +750,37 @@ iwl_mvm_ftm_set_secured_ranging(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return;
#endif
- if (!(flags & (IWL_INITIATOR_AP_FLAGS_NON_TB |
+ if (!(le32_to_cpu(*flags) & (IWL_INITIATOR_AP_FLAGS_NON_TB |
IWL_INITIATOR_AP_FLAGS_TB)))
return;
lockdep_assert_held(&mvm->mutex);
list_for_each_entry(entry, &mvm->ftm_initiator.pasn_list, list) {
- if (memcmp(entry->addr, target->bssid, sizeof(entry->addr)))
+ if (memcmp(entry->addr, bssid, sizeof(entry->addr)))
continue;
- target->cipher = entry->cipher;
+ *cipher = entry->cipher;
if (entry->flags & IWL_MVM_PASN_FLAG_HAS_HLTK)
- memcpy(target->hltk, entry->hltk, sizeof(target->hltk));
+ memcpy(hltk, entry->hltk, sizeof(entry->hltk));
else
- memset(target->hltk, 0, sizeof(target->hltk));
+ memset(hltk, 0, sizeof(entry->hltk));
if (vif->cfg.assoc &&
- !memcmp(vif->bss_conf.bssid, target->bssid,
- sizeof(target->bssid)))
- ieee80211_iter_keys(mvm->hw, vif, iter, target);
- else
- memcpy(target->tk, entry->tk, sizeof(target->tk));
+ !memcmp(vif->bss_conf.bssid, bssid, ETH_ALEN)) {
+ struct iwl_mvm_ftm_iter_data target;
+
+ target.bssid = bssid;
+ ieee80211_iter_keys(mvm->hw, vif, iter, &target);
+ } else {
+ memcpy(tk, entry->tk, sizeof(entry->tk));
+ }
- memcpy(target->rx_pn, entry->rx_pn, sizeof(target->rx_pn));
- memcpy(target->tx_pn, entry->tx_pn, sizeof(target->tx_pn));
+ memcpy(rx_pn, entry->rx_pn, sizeof(entry->rx_pn));
+ memcpy(tx_pn, entry->tx_pn, sizeof(entry->tx_pn));
- target->initiator_ap_flags |=
- cpu_to_le32(IWL_INITIATOR_AP_FLAGS_SECURED);
+ FTM_SET_FLAG(SECURED);
return;
}
}
@@ -773,7 +794,11 @@ iwl_mvm_ftm_put_target_v7(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (err)
return err;
- iwl_mvm_ftm_set_secured_ranging(mvm, vif, target);
+ iwl_mvm_ftm_set_secured_ranging(mvm, vif, target->bssid,
+ &target->cipher, target->hltk,
+ target->tk, target->rx_pn,
+ target->tx_pn,
+ &target->initiator_ap_flags);
return err;
}
@@ -920,6 +945,105 @@ static int iwl_mvm_ftm_start_v13(struct iwl_mvm *mvm,
return iwl_mvm_ftm_send_cmd(mvm, &hcmd);
}
+static int
+iwl_mvm_ftm_put_target_v10(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct cfg80211_pmsr_request_peer *peer,
+ struct iwl_tof_range_req_ap_entry_v10 *target)
+{
+ u32 i2r_max_sts, flags;
+ int ret;
+
+ ret = iwl_mvm_ftm_target_chandef_v2(mvm, peer, &target->channel_num,
+ &target->format_bw,
+ &target->ctrl_ch_position);
+ if (ret)
+ return ret;
+
+ memcpy(target->bssid, peer->addr, ETH_ALEN);
+ target->burst_period =
+ cpu_to_le16(peer->ftm.burst_period);
+ target->samples_per_burst = peer->ftm.ftms_per_burst;
+ target->num_of_bursts = peer->ftm.num_bursts_exp;
+ iwl_mvm_ftm_set_target_flags(mvm, peer, &target->initiator_ap_flags);
+ iwl_mvm_ftm_set_sta(mvm, vif, peer, &target->sta_id,
+ &target->initiator_ap_flags);
+ iwl_mvm_ftm_set_secured_ranging(mvm, vif, target->bssid,
+ &target->cipher, target->hltk,
+ target->tk, target->rx_pn,
+ target->tx_pn,
+ &target->initiator_ap_flags);
+
+ i2r_max_sts = IWL_MVM_FTM_I2R_MAX_STS > 1 ? 1 :
+ IWL_MVM_FTM_I2R_MAX_STS;
+
+ target->r2i_ndp_params = IWL_MVM_FTM_R2I_MAX_REP |
+ (IWL_MVM_FTM_R2I_MAX_STS << IWL_LOCATION_MAX_STS_POS) |
+ (IWL_MVM_FTM_R2I_MAX_TOTAL_LTF << IWL_LOCATION_TOTAL_LTF_POS);
+ target->i2r_ndp_params = IWL_MVM_FTM_I2R_MAX_REP |
+ (i2r_max_sts << IWL_LOCATION_MAX_STS_POS) |
+ (IWL_MVM_FTM_I2R_MAX_TOTAL_LTF << IWL_LOCATION_TOTAL_LTF_POS);
+
+ if (peer->ftm.non_trigger_based) {
+ target->min_time_between_msr =
+ cpu_to_le16(IWL_MVM_FTM_NON_TB_MIN_TIME_BETWEEN_MSR);
+ target->burst_period =
+ cpu_to_le16(IWL_MVM_FTM_NON_TB_MAX_TIME_BETWEEN_MSR);
+ } else {
+ target->min_time_between_msr = cpu_to_le16(0);
+ }
+
+ target->band =
+ iwl_mvm_phy_band_from_nl80211(peer->chandef.chan->band);
+
+ /*
+ * TODO: Beacon interval is currently unknown, so use the common value
+ * of 100 TUs.
+ */
+ target->beacon_interval = cpu_to_le16(100);
+
+ /*
+ * If secure LTF is turned off, replace the flag with PMF only
+ */
+ flags = le32_to_cpu(target->initiator_ap_flags);
+ if (flags & IWL_INITIATOR_AP_FLAGS_SECURED) {
+ if (!IWL_MVM_FTM_INITIATOR_SECURE_LTF)
+ flags &= ~IWL_INITIATOR_AP_FLAGS_SECURED;
+
+ flags |= IWL_INITIATOR_AP_FLAGS_PMF;
+ target->initiator_ap_flags = cpu_to_le32(flags);
+ }
+
+ return 0;
+}
+
+static int iwl_mvm_ftm_start_v14(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct cfg80211_pmsr_request *req)
+{
+ struct iwl_tof_range_req_cmd_v14 cmd;
+ struct iwl_host_cmd hcmd = {
+ .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
+ .dataflags[0] = IWL_HCMD_DFL_DUP,
+ .data[0] = &cmd,
+ .len[0] = sizeof(cmd),
+ };
+ u8 i;
+ int err;
+
+ iwl_mvm_ftm_cmd_common(mvm, vif, (void *)&cmd, req);
+
+ for (i = 0; i < cmd.num_of_ap; i++) {
+ struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
+ struct iwl_tof_range_req_ap_entry_v10 *target = &cmd.ap[i];
+
+ err = iwl_mvm_ftm_put_target_v10(mvm, vif, peer, target);
+ if (err)
+ return err;
+ }
+
+ return iwl_mvm_ftm_send_cmd(mvm, &hcmd);
+}
+
int iwl_mvm_ftm_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct cfg80211_pmsr_request *req)
{
@@ -938,6 +1062,9 @@ int iwl_mvm_ftm_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
IWL_FW_CMD_VER_UNKNOWN);
switch (cmd_ver) {
+ case 14:
+ err = iwl_mvm_ftm_start_v14(mvm, vif, req);
+ break;
case 13:
err = iwl_mvm_ftm_start_v13(mvm, vif, req);
break;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c
index 8e760300a1ab..e4caa362f597 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2023 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*/
#include <net/cfg80211.h>
#include <linux/etherdevice.h>
@@ -88,7 +88,7 @@ static int iwl_mvm_ftm_responder_set_bw_v2(struct cfg80211_chan_def *chandef,
static void
iwl_mvm_ftm_responder_set_ndp(struct iwl_mvm *mvm,
- struct iwl_tof_responder_config_cmd_v9 *cmd)
+ struct iwl_tof_responder_config_cmd *cmd)
{
/* Up to 2 R2I STS are allowed on the responder */
u32 r2i_max_sts = IWL_MVM_FTM_R2I_MAX_STS < 2 ?
@@ -117,7 +117,7 @@ iwl_mvm_ftm_responder_cmd(struct iwl_mvm *mvm,
* field interpretation is different), so the same struct can be use
* for all cases.
*/
- struct iwl_tof_responder_config_cmd_v9 cmd = {
+ struct iwl_tof_responder_config_cmd cmd = {
.channel_num = chandef->chan->hw_value,
.cmd_valid_fields =
cpu_to_le32(IWL_TOF_RESPONDER_CMD_VALID_CHAN_INFO |
@@ -131,8 +131,13 @@ iwl_mvm_ftm_responder_cmd(struct iwl_mvm *mvm,
lockdep_assert_held(&mvm->mutex);
+ if (cmd_ver == 10) {
+ cmd.band =
+ iwl_mvm_phy_band_from_nl80211(chandef->chan->band);
+ }
+
/* Use a default of bss_color=1 for now */
- if (cmd_ver == 9) {
+ if (cmd_ver >= 9) {
cmd.cmd_valid_fields |=
cpu_to_le32(IWL_TOF_RESPONDER_CMD_VALID_BSS_COLOR |
IWL_TOF_RESPONDER_CMD_VALID_MIN_MAX_TIME_BETWEEN_MSR);
@@ -148,7 +153,7 @@ iwl_mvm_ftm_responder_cmd(struct iwl_mvm *mvm,
}
if (cmd_ver >= 8)
- iwl_mvm_ftm_responder_set_ndp(mvm, &cmd);
+ iwl_mvm_ftm_responder_set_ndp(mvm, (void *)&cmd);
if (cmd_ver >= 7)
err = iwl_mvm_ftm_responder_set_bw_v2(chandef, &cmd.format_bw,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index e7f5978ef2d7..08c4898c8f1a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -28,9 +28,6 @@
#define MVM_UCODE_ALIVE_TIMEOUT (2 * HZ)
#define MVM_UCODE_CALIB_TIMEOUT (2 * HZ)
-#define IWL_UATS_VLP_AP_SUPPORTED BIT(29)
-#define IWL_UATS_AFC_AP_SUPPORTED BIT(30)
-
struct iwl_mvm_alive_data {
bool valid;
u32 scd_base_addr;
@@ -94,20 +91,10 @@ void iwl_mvm_mfu_assert_dump_notif(struct iwl_mvm *mvm,
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_mfu_assert_dump_notif *mfu_dump_notif = (void *)pkt->data;
- __le32 *dump_data = mfu_dump_notif->data;
- int n_words = le32_to_cpu(mfu_dump_notif->data_size) / sizeof(__le32);
- int i;
if (mfu_dump_notif->index_num == 0)
IWL_INFO(mvm, "MFUART assert id 0x%x occurred\n",
le32_to_cpu(mfu_dump_notif->assert_id));
-
- for (i = 0; i < n_words; i++)
- IWL_DEBUG_INFO(mvm,
- "MFUART assert dump, dword %u: 0x%08x\n",
- le16_to_cpu(mfu_dump_notif->index_num) *
- n_words + i,
- le32_to_cpu(dump_data[i]));
}
static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
@@ -418,7 +405,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
UREG_LMAC2_CURRENT_PC));
}
- if (ret == -ETIMEDOUT && !mvm->pldr_sync)
+ if (ret == -ETIMEDOUT && !mvm->fw_product_reset)
iwl_fw_dbg_error_collect(&mvm->fwrt,
FW_DBG_TRIGGER_ALIVE_TIMEOUT);
@@ -470,12 +457,14 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
#endif
/*
+ * For pre-MLD API (MLD API doesn't use the timestamps):
* All the BSSes in the BSS table include the GP2 in the system
* at the beacon Rx time, this is of course no longer relevant
* since we are resetting the firmware.
* Purge all the BSS table.
*/
- cfg80211_bss_flush(mvm->hw->wiphy);
+ if (!mvm->mld_api_is_used)
+ cfg80211_bss_flush(mvm->hw->wiphy);
return 0;
}
@@ -501,17 +490,11 @@ static void iwl_mvm_uats_init(struct iwl_mvm *mvm)
.dataflags[0] = IWL_HCMD_DFL_NOCOPY,
};
- if (!(mvm->trans->trans_cfg->device_family >=
- IWL_DEVICE_FAMILY_AX210)) {
+ if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) {
IWL_DEBUG_RADIO(mvm, "UATS feature is not supported\n");
return;
}
- if (!mvm->fwrt.uats_enabled) {
- IWL_DEBUG_RADIO(mvm, "UATS feature is disabled\n");
- return;
- }
-
cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd.id,
IWL_FW_CMD_VER_UNKNOWN);
if (cmd_ver != 1) {
@@ -523,7 +506,7 @@ static void iwl_mvm_uats_init(struct iwl_mvm *mvm)
ret = iwl_uefi_get_uats_table(mvm->trans, &mvm->fwrt);
if (ret < 0) {
- IWL_ERR(mvm, "failed to read UATS table (%d)\n", ret);
+ IWL_DEBUG_FW(mvm, "failed to read UATS table (%d)\n", ret);
return;
}
@@ -637,8 +620,8 @@ static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm)
if (mvm->trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210) {
sb_cfg = iwl_read_umac_prph(mvm->trans, SB_MODIFY_CFG_FLAG);
/* if needed, we'll reset this on our way out later */
- mvm->pldr_sync = sb_cfg == SB_CFG_RESIDES_IN_ROM;
- if (mvm->pldr_sync && iwl_mei_pldr_req())
+ mvm->fw_product_reset = sb_cfg == SB_CFG_RESIDES_IN_ROM;
+ if (mvm->fw_product_reset && iwl_mei_pldr_req())
return -EBUSY;
}
@@ -657,7 +640,7 @@ static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm)
IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret);
/* if we needed reset then fail here, but notify and remove */
- if (mvm->pldr_sync) {
+ if (mvm->fw_product_reset) {
iwl_mei_alive_notif(false);
iwl_trans_pcie_remove(mvm->trans, true);
}
@@ -696,14 +679,6 @@ static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm)
goto error;
}
- if (IWL_MVM_PARSE_NVM && !mvm->nvm_data) {
- ret = iwl_nvm_init(mvm);
- if (ret) {
- IWL_ERR(mvm, "Failed to read NVM: %d\n", ret);
- goto error;
- }
- }
-
ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(REGULATORY_AND_NVM_GROUP,
NVM_ACCESS_COMPLETE),
CMD_SEND_IN_RFKILL,
@@ -728,7 +703,7 @@ static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm)
return ret;
/* Read the NVM only at driver load time, no need to do this twice */
- if (!IWL_MVM_PARSE_NVM && !mvm->nvm_data) {
+ if (!mvm->nvm_data) {
mvm->nvm_data = iwl_get_nvm(mvm->trans, mvm->fw,
mvm->set_tx_ant, mvm->set_rx_ant);
if (IS_ERR(mvm->nvm_data)) {
@@ -853,7 +828,7 @@ remove_notif:
iwl_remove_notification(&mvm->notif_wait, &calib_wait);
out:
mvm->rfkill_safe_init_done = false;
- if (iwlmvm_mod_params.init_dbg && !mvm->nvm_data) {
+ if (!mvm->nvm_data) {
/* we want to debug INIT and we have no NVM - fake */
mvm->nvm_data = kzalloc(sizeof(struct iwl_nvm_data) +
sizeof(struct ieee80211_channel) +
@@ -895,8 +870,8 @@ int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
int ret;
u16 len = 0;
u32 n_subbands;
- u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id,
- IWL_FW_CMD_VER_UNKNOWN);
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 3);
+
if (cmd_ver >= 7) {
len = sizeof(cmd.v7);
n_subbands = IWL_NUM_SUB_BANDS_V2;
@@ -1241,10 +1216,6 @@ static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm)
"Failed to send LARI_CONFIG_CHANGE (%d)\n",
ret);
}
-
- if (le32_to_cpu(cmd.oem_uhb_allow_bitmap) & IWL_UATS_VLP_AP_SUPPORTED ||
- le32_to_cpu(cmd.oem_uhb_allow_bitmap) & IWL_UATS_AFC_AP_SUPPORTED)
- mvm->fwrt.uats_enabled = true;
}
void iwl_mvm_get_bios_tables(struct iwl_mvm *mvm)
@@ -1386,9 +1357,6 @@ static int iwl_mvm_load_rt_fw(struct iwl_mvm *mvm)
if (ret) {
IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret);
-
- if (iwlmvm_mod_params.init_dbg)
- return 0;
return ret;
}
@@ -1425,14 +1393,14 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
ret = iwl_mvm_load_rt_fw(mvm);
if (ret) {
IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret);
- if (ret != -ERFKILL && !mvm->pldr_sync)
+ if (ret != -ERFKILL && !mvm->fw_product_reset)
iwl_fw_dbg_error_collect(&mvm->fwrt,
FW_DBG_TRIGGER_DRIVER);
goto error;
}
/* FW loaded successfully */
- mvm->pldr_sync = false;
+ mvm->fw_product_reset = false;
iwl_fw_disable_dbg_asserts(&mvm->fwrt);
iwl_get_shared_mem_conf(&mvm->fwrt);
@@ -1499,8 +1467,6 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
for (i = 0; i < IWL_MVM_FW_MAX_LINK_ID + 1; i++)
RCU_INIT_POINTER(mvm->link_id_to_link_conf[i], NULL);
- memset(&mvm->fw_link_ids_map, 0, sizeof(mvm->fw_link_ids_map));
-
mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
/* reset quota debouncing buffer - 0xff will yield invalid data */
@@ -1629,8 +1595,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
IWL_DEBUG_INFO(mvm, "RT uCode started.\n");
return 0;
error:
- if (!iwlmvm_mod_params.init_dbg || !ret)
- iwl_mvm_stop_device(mvm);
+ iwl_mvm_stop_device(mvm);
return ret;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/link.c b/drivers/net/wireless/intel/iwlwifi/mvm/link.c
index 6ec9a8e21a34..a9929aa49913 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/link.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/link.c
@@ -11,6 +11,7 @@
HOW(BLOCKED_TPT) \
HOW(BLOCKED_FW) \
HOW(BLOCKED_NON_BSS) \
+ HOW(BLOCKED_ROC) \
HOW(EXIT_MISSED_BEACON) \
HOW(EXIT_LOW_RSSI) \
HOW(EXIT_COEX) \
@@ -50,26 +51,15 @@ static void iwl_mvm_print_esr_state(struct iwl_mvm *mvm, u32 mask)
static u32 iwl_mvm_get_free_fw_link_id(struct iwl_mvm *mvm,
struct iwl_mvm_vif *mvm_vif)
{
- u32 link_id;
+ u32 i;
lockdep_assert_held(&mvm->mutex);
- link_id = ffz(mvm->fw_link_ids_map);
+ for (i = 0; i < ARRAY_SIZE(mvm->link_id_to_link_conf); i++)
+ if (!rcu_access_pointer(mvm->link_id_to_link_conf[i]))
+ return i;
- /* this case can happen if there're deactivated but not removed links */
- if (link_id > IWL_MVM_FW_MAX_LINK_ID)
- return IWL_MVM_FW_LINK_ID_INVALID;
-
- mvm->fw_link_ids_map |= BIT(link_id);
- return link_id;
-}
-
-static void iwl_mvm_release_fw_link_id(struct iwl_mvm *mvm, u32 link_id)
-{
- lockdep_assert_held(&mvm->mutex);
-
- if (!WARN_ON(link_id > IWL_MVM_FW_MAX_LINK_ID))
- mvm->fw_link_ids_map &= ~BIT(link_id);
+ return IWL_MVM_FW_LINK_ID_INVALID;
}
static int iwl_mvm_link_cmd_send(struct iwl_mvm *mvm,
@@ -380,7 +370,6 @@ int iwl_mvm_unset_link_mapping(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
RCU_INIT_POINTER(mvm->link_id_to_link_conf[link_info->fw_link_id],
NULL);
- iwl_mvm_release_fw_link_id(mvm, link_info->fw_link_id);
return 0;
}
@@ -504,17 +493,27 @@ iwl_mvm_get_puncturing_factor(const struct ieee80211_bss_conf *link_conf)
static unsigned int
iwl_mvm_get_chan_load(struct ieee80211_bss_conf *link_conf)
{
+ struct ieee80211_vif *vif = link_conf->vif;
struct iwl_mvm_vif_link_info *mvm_link =
iwl_mvm_vif_from_mac80211(link_conf->vif)->link[link_conf->link_id];
const struct element *bss_load_elem;
const struct ieee80211_bss_load_elem *bss_load;
enum nl80211_band band = link_conf->chanreq.oper.chan->band;
+ const struct cfg80211_bss_ies *ies;
unsigned int chan_load;
u32 chan_load_by_us;
rcu_read_lock();
- bss_load_elem = ieee80211_bss_get_elem(link_conf->bss,
- WLAN_EID_QBSS_LOAD);
+ if (ieee80211_vif_link_active(vif, link_conf->link_id))
+ ies = rcu_dereference(link_conf->bss->beacon_ies);
+ else
+ ies = rcu_dereference(link_conf->bss->ies);
+
+ if (ies)
+ bss_load_elem = cfg80211_find_elem(WLAN_EID_QBSS_LOAD,
+ ies->data, ies->len);
+ else
+ bss_load_elem = NULL;
/* If there isn't BSS Load element, take the defaults */
if (!bss_load_elem ||
@@ -978,6 +977,9 @@ void iwl_mvm_exit_esr(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
lockdep_assert_held(&mvm->mutex);
+ if (!IWL_MVM_AUTO_EML_ENABLE)
+ return;
+
/* Nothing to do */
if (!mvmvif->esr_active)
return;
@@ -1025,19 +1027,24 @@ void iwl_mvm_block_esr(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
lockdep_assert_held(&mvm->mutex);
+ if (!IWL_MVM_AUTO_EML_ENABLE)
+ return;
+
/* This should be called only with disable reasons */
if (WARN_ON(!(reason & IWL_MVM_BLOCK_ESR_REASONS)))
return;
- if (!(mvmvif->esr_disable_reason & reason)) {
- IWL_DEBUG_INFO(mvm,
- "Blocking EMLSR mode. reason = %s (0x%x)\n",
- iwl_get_esr_state_string(reason), reason);
- iwl_mvm_print_esr_state(mvm, mvmvif->esr_disable_reason);
- }
+ if (mvmvif->esr_disable_reason & reason)
+ return;
+
+ IWL_DEBUG_INFO(mvm,
+ "Blocking EMLSR mode. reason = %s (0x%x)\n",
+ iwl_get_esr_state_string(reason), reason);
mvmvif->esr_disable_reason |= reason;
+ iwl_mvm_print_esr_state(mvm, mvmvif->esr_disable_reason);
+
iwl_mvm_exit_esr(mvm, vif, reason, link_to_keep);
}
@@ -1082,6 +1089,15 @@ static void iwl_mvm_esr_unblocked(struct iwl_mvm *mvm,
IWL_DEBUG_INFO(mvm, "EMLSR is unblocked\n");
+ /* If we exited due to an EXIT reason, and the exit was in less than
+ * 30 seconds, then a MLO scan was scheduled already.
+ */
+ if (!need_new_sel &&
+ !(mvmvif->last_esr_exit.reason & IWL_MVM_BLOCK_ESR_REASONS)) {
+ IWL_DEBUG_INFO(mvm, "Wait for MLO scan\n");
+ return;
+ }
+
/*
* If EMLSR was blocked for more than 30 seconds, or the last link
* selection decided to not enter EMLSR, trigger a new scan.
@@ -1111,6 +1127,9 @@ void iwl_mvm_unblock_esr(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
lockdep_assert_held(&mvm->mutex);
+ if (!IWL_MVM_AUTO_EML_ENABLE)
+ return;
+
/* This should be called only with disable reasons */
if (WARN_ON(!(reason & IWL_MVM_BLOCK_ESR_REASONS)))
return;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
index 5a06f887769a..dfcc96f18b4f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -296,6 +296,7 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
INIT_LIST_HEAD(&mvmvif->time_event_data.list);
mvmvif->time_event_data.id = TE_MAX;
+ mvmvif->roc_activity = ROC_NUM_ACTIVITIES;
mvmvif->deflink.bcast_sta.sta_id = IWL_MVM_INVALID_STA;
mvmvif->deflink.mcast_sta.sta_id = IWL_MVM_INVALID_STA;
@@ -873,7 +874,7 @@ void iwl_mvm_mac_ctxt_set_tim(struct iwl_mvm *mvm,
}
}
-static u32 iwl_mvm_find_ie_offset(u8 *beacon, u8 eid, u32 frame_size)
+u32 iwl_mvm_find_ie_offset(u8 *beacon, u8 eid, u32 frame_size)
{
struct ieee80211_mgmt *mgmt = (void *)beacon;
const u8 *ie;
@@ -1010,12 +1011,13 @@ static void iwl_mvm_mac_ctxt_set_tx(struct iwl_mvm *mvm,
tx->tx_flags = cpu_to_le32(tx_flags);
if (!fw_has_capa(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_BEACON_ANT_SELECTION))
+ IWL_UCODE_TLV_CAPA_BEACON_ANT_SELECTION)) {
iwl_mvm_toggle_tx_ant(mvm, &mvm->mgmt_last_antenna_idx);
- tx->rate_n_flags =
- cpu_to_le32(BIT(mvm->mgmt_last_antenna_idx) <<
- RATE_MCS_ANT_POS);
+ tx->rate_n_flags =
+ cpu_to_le32(BIT(mvm->mgmt_last_antenna_idx) <<
+ RATE_MCS_ANT_POS);
+ }
rate = iwl_mvm_mac_ctxt_get_beacon_rate(mvm, info, vif);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 486a6b8f3c97..835a05b91833 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -22,7 +22,7 @@
#include "mvm.h"
#include "sta.h"
#include "time-event.h"
-#include "iwl-eeprom-parse.h"
+#include "iwl-nvm-utils.h"
#include "iwl-phy-db.h"
#include "testmode.h"
#include "fw/error-dump.h"
@@ -30,21 +30,28 @@
#include "iwl-nvm-parse.h"
#include "time-sync.h"
+#define IWL_MVM_LIMITS(ap) \
+ { \
+ .max = 1, \
+ .types = BIT(NL80211_IFTYPE_STATION), \
+ }, \
+ { \
+ .max = 1, \
+ .types = ap | \
+ BIT(NL80211_IFTYPE_P2P_CLIENT) | \
+ BIT(NL80211_IFTYPE_P2P_GO), \
+ }, \
+ { \
+ .max = 1, \
+ .types = BIT(NL80211_IFTYPE_P2P_DEVICE), \
+ }
+
static const struct ieee80211_iface_limit iwl_mvm_limits[] = {
- {
- .max = 1,
- .types = BIT(NL80211_IFTYPE_STATION),
- },
- {
- .max = 1,
- .types = BIT(NL80211_IFTYPE_AP) |
- BIT(NL80211_IFTYPE_P2P_CLIENT) |
- BIT(NL80211_IFTYPE_P2P_GO),
- },
- {
- .max = 1,
- .types = BIT(NL80211_IFTYPE_P2P_DEVICE),
- },
+ IWL_MVM_LIMITS(0)
+};
+
+static const struct ieee80211_iface_limit iwl_mvm_limits_ap[] = {
+ IWL_MVM_LIMITS(BIT(NL80211_IFTYPE_AP))
};
static const struct ieee80211_iface_combination iwl_mvm_iface_combinations[] = {
@@ -54,6 +61,12 @@ static const struct ieee80211_iface_combination iwl_mvm_iface_combinations[] = {
.limits = iwl_mvm_limits,
.n_limits = ARRAY_SIZE(iwl_mvm_limits),
},
+ {
+ .num_different_channels = 1,
+ .max_interfaces = 3,
+ .limits = iwl_mvm_limits_ap,
+ .n_limits = ARRAY_SIZE(iwl_mvm_limits_ap),
+ },
};
static const struct cfg80211_pmsr_capabilities iwl_mvm_pmsr_capa = {
@@ -138,8 +151,7 @@ struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
resp->channels,
__le16_to_cpu(resp->mcc),
__le16_to_cpu(resp->geo_info),
- le32_to_cpu(resp->cap), resp_ver,
- mvm->fwrt.uats_enabled);
+ le32_to_cpu(resp->cap), resp_ver);
/* Store the return source id */
src_id = resp->source_id;
if (IS_ERR_OR_NULL(regd)) {
@@ -360,7 +372,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
if (mvm->mld_api_is_used && mvm->nvm_data->sku_cap_11be_enable &&
!iwlwifi_mod_params.disable_11ax &&
!iwlwifi_mod_params.disable_11be) {
- hw->wiphy->flags |= WIPHY_FLAG_DISABLE_WEXT;
+ hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_MLO;
/* we handle this already earlier, but need it for MLO */
ieee80211_hw_set(hw, HANDLES_QUIET_CSA);
}
@@ -371,12 +383,6 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
if (!mvm->mld_api_is_used)
ieee80211_hw_set(hw, TIMING_BEACON_ONLY);
- /* We should probably have this, but mac80211
- * currently doesn't support it for MLO.
- */
- if (!(hw->wiphy->flags & WIPHY_FLAG_SUPPORTS_MLO))
- ieee80211_hw_set(hw, DEAUTH_NEED_MGD_TX_PREP);
-
/*
* On older devices, enabling TX A-MSDU occasionally leads to
* something getting messed up, the command read from the FIFO
@@ -579,13 +585,13 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
BUILD_BUG_ON(IWL_MVM_SCAN_STOPPING_MASK & IWL_MVM_SCAN_MASK);
- BUILD_BUG_ON(IWL_MVM_MAX_UMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK) ||
- IWL_MVM_MAX_LMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK));
+ BUILD_BUG_ON(IWL_MAX_UMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK) ||
+ IWL_MAX_LMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK));
if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
- mvm->max_scans = IWL_MVM_MAX_UMAC_SCANS;
+ mvm->max_scans = IWL_MAX_UMAC_SCANS;
else
- mvm->max_scans = IWL_MVM_MAX_LMAC_SCANS;
+ mvm->max_scans = IWL_MAX_LMAC_SCANS;
if (mvm->nvm_data->bands[NL80211_BAND_2GHZ].n_channels)
hw->wiphy->bands[NL80211_BAND_2GHZ] =
@@ -654,7 +660,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->wiphy->features |= NL80211_FEATURE_WFA_TPC_IE_IN_PROBES;
if (iwl_fw_lookup_cmd_ver(mvm->fw, WOWLAN_KEK_KCK_MATERIAL,
- IWL_FW_CMD_VER_UNKNOWN) == 3)
+ IWL_FW_CMD_VER_UNKNOWN) >= 3)
hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_EXT_KEK_KCK;
if (fw_has_api(&mvm->fw->ucode_capa,
@@ -727,8 +733,6 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
#ifdef CONFIG_PM_SLEEP
if ((unified || mvm->fw->img[IWL_UCODE_WOWLAN].num_sec) &&
- mvm->trans->ops->d3_suspend &&
- mvm->trans->ops->d3_resume &&
device_can_wakeup(mvm->trans->dev)) {
mvm->wowlan.flags |= WIPHY_WOWLAN_MAGIC_PKT |
WIPHY_WOWLAN_DISCONNECT |
@@ -823,7 +827,7 @@ void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
}
if (offchannel &&
- !test_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status) &&
+ !test_bit(IWL_MVM_STATUS_ROC_P2P_RUNNING, &mvm->status) &&
!test_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status))
goto drop;
@@ -1104,6 +1108,8 @@ static void iwl_mvm_cleanup_iterator(void *data, u8 *mac,
iwl_mvm_te_clear_data(mvm, &mvmvif->time_event_data);
spin_unlock_bh(&mvm->time_event_lock);
+ mvmvif->roc_activity = ROC_NUM_ACTIVITIES;
+
mvmvif->bf_enabled = false;
mvmvif->ba_enabled = false;
mvmvif->ap_sta = NULL;
@@ -1128,6 +1134,39 @@ static void iwl_mvm_cleanup_iterator(void *data, u8 *mac,
RCU_INIT_POINTER(mvmvif->deflink.probe_resp_data, NULL);
}
+static void iwl_mvm_cleanup_sta_iterator(void *data, struct ieee80211_sta *sta)
+{
+ struct iwl_mvm *mvm = data;
+ struct iwl_mvm_sta *mvm_sta;
+ struct ieee80211_vif *vif;
+ int link_id;
+
+ mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+ vif = mvm_sta->vif;
+
+ if (!sta->valid_links)
+ return;
+
+ for (link_id = 0; link_id < ARRAY_SIZE((sta)->link); link_id++) {
+ struct iwl_mvm_link_sta *mvm_link_sta;
+
+ mvm_link_sta =
+ rcu_dereference_check(mvm_sta->link[link_id],
+ lockdep_is_held(&mvm->mutex));
+ if (mvm_link_sta && !(vif->active_links & BIT(link_id))) {
+ /*
+ * We have a link STA but the link is inactive in
+ * mac80211. This will happen if we failed to
+ * deactivate the link but mac80211 roll back the
+ * deactivation of the link.
+ * Delete the stale data to avoid issues later on.
+ */
+ iwl_mvm_mld_free_sta_link(mvm, mvm_sta, mvm_link_sta,
+ link_id, false);
+ }
+ }
+}
+
static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
{
iwl_mvm_stop_device(mvm);
@@ -1150,6 +1189,10 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
*/
ieee80211_iterate_interfaces(mvm->hw, 0, iwl_mvm_cleanup_iterator, mvm);
+ /* cleanup stations as links may be gone after restart */
+ ieee80211_iterate_stations_atomic(mvm->hw,
+ iwl_mvm_cleanup_sta_iterator, mvm);
+
mvm->p2p_device_vif = NULL;
iwl_mvm_reset_phy_ctxts(mvm);
@@ -1172,6 +1215,7 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
int __iwl_mvm_mac_start(struct iwl_mvm *mvm)
{
+ bool fast_resume = false;
int ret;
lockdep_assert_held(&mvm->mutex);
@@ -1197,6 +1241,30 @@ int __iwl_mvm_mac_start(struct iwl_mvm *mvm)
mvm->nvm_data = NULL;
}
+#ifdef CONFIG_PM
+ /* fast_resume will be cleared by iwl_mvm_fast_resume */
+ fast_resume = mvm->fast_resume;
+
+ if (fast_resume) {
+ ret = iwl_mvm_fast_resume(mvm);
+ if (ret) {
+ iwl_mvm_stop_device(mvm);
+ /* iwl_mvm_up() will be called further down */
+ } else {
+ /*
+ * We clear IWL_MVM_STATUS_FIRMWARE_RUNNING upon
+ * mac_down() so that debugfs will stop honoring
+ * requests after we flush all the workers.
+ * Set the IWL_MVM_STATUS_FIRMWARE_RUNNING bit again
+ * now that we are back. This is a bit abusing the
+ * flag since the firmware wasn't really ever stopped,
+ * but this still serves the purpose.
+ */
+ set_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
+ }
+ }
+#endif /* CONFIG_PM */
+
if (test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status)) {
/*
* Now convert the HW_RESTART_REQUESTED flag to IN_HW_RESTART
@@ -1207,7 +1275,10 @@ int __iwl_mvm_mac_start(struct iwl_mvm *mvm)
/* Clean up some internal and mac80211 state on restart */
iwl_mvm_restart_cleanup(mvm);
}
- ret = iwl_mvm_up(mvm);
+
+ /* we also want to load the firmware if fast_resume failed */
+ if (!fast_resume || ret)
+ ret = iwl_mvm_up(mvm);
iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_POST_INIT,
NULL);
@@ -1258,7 +1329,7 @@ static void iwl_mvm_restart_complete(struct iwl_mvm *mvm)
{
int ret;
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
@@ -1274,8 +1345,6 @@ static void iwl_mvm_restart_complete(struct iwl_mvm *mvm)
* of packets the FW sent out, so we must reconnect.
*/
iwl_mvm_teardown_tdls_peers(mvm);
-
- mutex_unlock(&mvm->mutex);
}
void iwl_mvm_mac_reconfig_complete(struct ieee80211_hw *hw,
@@ -1292,7 +1361,7 @@ void iwl_mvm_mac_reconfig_complete(struct ieee80211_hw *hw,
}
}
-void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
+void __iwl_mvm_mac_stop(struct iwl_mvm *mvm, bool suspend)
{
lockdep_assert_held(&mvm->mutex);
@@ -1308,7 +1377,11 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
if (!iwl_mvm_has_new_station_api(mvm->fw))
iwl_mvm_rm_aux_sta(mvm);
- iwl_mvm_stop_device(mvm);
+ if (suspend &&
+ mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+ iwl_mvm_fast_suspend(mvm);
+ else
+ iwl_mvm_stop_device(mvm);
iwl_mvm_async_handlers_purge(mvm);
/* async_handlers_list is empty and will stay empty: HW is stopped */
@@ -1341,7 +1414,7 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
}
}
-void iwl_mvm_mac_stop(struct ieee80211_hw *hw)
+void iwl_mvm_mac_stop(struct ieee80211_hw *hw, bool suspend)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
@@ -1377,7 +1450,7 @@ void iwl_mvm_mac_stop(struct ieee80211_hw *hw)
iwl_mvm_mei_set_sw_rfkill_state(mvm);
mutex_lock(&mvm->mutex);
- __iwl_mvm_mac_stop(mvm);
+ __iwl_mvm_mac_stop(mvm, suspend);
mutex_unlock(&mvm->mutex);
/*
@@ -1619,11 +1692,11 @@ static void iwl_mvm_prevent_esr_done_wk(struct wiphy *wiphy,
struct iwl_mvm_vif *mvmvif =
container_of(wk, struct iwl_mvm_vif, prevent_esr_done_wk.work);
struct iwl_mvm *mvm = mvmvif->mvm;
- struct ieee80211_vif *vif = iwl_mvm_get_bss_vif(mvm);
+ struct ieee80211_vif *vif =
+ container_of((void *)mvmvif, struct ieee80211_vif, drv_priv);
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
iwl_mvm_unblock_esr(mvm, vif, IWL_MVM_ESR_BLOCKED_PREVENTION);
- mutex_unlock(&mvm->mutex);
}
static void iwl_mvm_mlo_int_scan_wk(struct wiphy *wiphy, struct wiphy_work *wk)
@@ -1633,11 +1706,8 @@ static void iwl_mvm_mlo_int_scan_wk(struct wiphy *wiphy, struct wiphy_work *wk)
struct ieee80211_vif *vif =
container_of((void *)mvmvif, struct ieee80211_vif, drv_priv);
- mutex_lock(&mvmvif->mvm->mutex);
-
+ guard(mvm)(mvmvif->mvm);
iwl_mvm_int_mlo_scan(mvmvif->mvm, vif);
-
- mutex_unlock(&mvmvif->mvm->mutex);
}
static void iwl_mvm_unblock_esr_tpt(struct wiphy *wiphy, struct wiphy_work *wk)
@@ -1645,11 +1715,11 @@ static void iwl_mvm_unblock_esr_tpt(struct wiphy *wiphy, struct wiphy_work *wk)
struct iwl_mvm_vif *mvmvif =
container_of(wk, struct iwl_mvm_vif, unblock_esr_tpt_wk);
struct iwl_mvm *mvm = mvmvif->mvm;
- struct ieee80211_vif *vif = iwl_mvm_get_bss_vif(mvm);
+ struct ieee80211_vif *vif =
+ container_of((void *)mvmvif, struct ieee80211_vif, drv_priv);
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
iwl_mvm_unblock_esr(mvm, vif, IWL_MVM_ESR_BLOCKED_TPT);
- mutex_unlock(&mvm->mutex);
}
void iwl_mvm_mac_init_mvmvif(struct iwl_mvm *mvm, struct iwl_mvm_vif *mvmvif)
@@ -1824,12 +1894,8 @@ void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm,
cancel_delayed_work_sync(&mvmvif->csa_work);
}
-/* This function is doing the common part of removing the interface for
- * both - MLD and non-MLD modes. Returns true if removing the interface
- * is done
- */
-static bool iwl_mvm_mac_remove_interface_common(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
@@ -1877,21 +1943,10 @@ static bool iwl_mvm_mac_remove_interface_common(struct ieee80211_hw *hw,
mvm->noa_duration = 0;
}
#endif
- return true;
+ goto out;
}
iwl_mvm_power_update_mac(mvm);
- return false;
-}
-
-static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
-{
- struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
-
- if (iwl_mvm_mac_remove_interface_common(hw, vif))
- goto out;
/* Before the interface removal, mac80211 would cancel the ROC, and the
* ROC worker would be scheduled if needed. The worker would be flushed
@@ -2039,7 +2094,7 @@ void iwl_mvm_configure_filter(struct ieee80211_hw *hw,
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
struct iwl_mcast_filter_cmd *cmd = (void *)(unsigned long)multicast;
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
/* replace previous configuration */
kfree(mvm->mcast_filter_cmd);
@@ -2056,7 +2111,6 @@ void iwl_mvm_configure_filter(struct ieee80211_hw *hw,
iwl_mvm_recalc_multicast(mvm);
out:
- mutex_unlock(&mvm->mutex);
*total_flags = 0;
}
@@ -2076,9 +2130,8 @@ static void iwl_mvm_config_iface_filter(struct ieee80211_hw *hw,
!vif->p2p)
return;
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
- mutex_unlock(&mvm->mutex);
}
int iwl_mvm_update_mu_groups(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
@@ -2740,6 +2793,13 @@ iwl_mvm_bss_info_changed_station_common(struct iwl_mvm *mvm,
if (changes & BSS_CHANGED_BANDWIDTH)
iwl_mvm_update_link_smps(vif, link_conf);
+
+ if (changes & BSS_CHANGED_TPE) {
+ IWL_DEBUG_CALIB(mvm, "Changing TPE\n");
+ iwl_mvm_send_ap_tx_power_constraint_cmd(mvm, vif,
+ link_conf,
+ false);
+ }
}
static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
@@ -2789,6 +2849,8 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
if (changes & BSS_CHANGED_ASSOC) {
if (vif->cfg.assoc) {
+ mvmvif->session_prot_connection_loss = false;
+
/* clear statistics to get clean beacon counter */
iwl_mvm_request_statistics(mvm, true);
for_each_mvm_vif_valid_link(mvmvif, i)
@@ -3123,7 +3185,7 @@ static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw,
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
iwl_mvm_stop_ap_ibss_common(mvm, vif);
@@ -3153,8 +3215,6 @@ static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw,
iwl_mvm_power_update_mac(mvm);
iwl_mvm_mac_ctxt_remove(mvm, vif);
-
- mutex_unlock(&mvm->mutex);
}
static void iwl_mvm_stop_ap(struct ieee80211_hw *hw,
@@ -3209,7 +3269,7 @@ static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw,
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
if (changes & BSS_CHANGED_IDLE && !vif->cfg.idle)
iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true);
@@ -3236,25 +3296,19 @@ static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw,
bss_conf->txpower);
iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower);
}
-
- mutex_unlock(&mvm->mutex);
}
int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_scan_request *hw_req)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- int ret;
if (hw_req->req.n_channels == 0 ||
hw_req->req.n_channels > mvm->fw->ucode_capa.n_scan_channels)
return -EINVAL;
- mutex_lock(&mvm->mutex);
- ret = iwl_mvm_reg_scan_start(mvm, vif, &hw_req->req, &hw_req->ies);
- mutex_unlock(&mvm->mutex);
-
- return ret;
+ guard(mvm)(mvm);
+ return iwl_mvm_reg_scan_start(mvm, vif, &hw_req->req, &hw_req->ies);
}
void iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw *hw,
@@ -3262,7 +3316,7 @@ void iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw *hw,
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
/* Due to a race condition, it's possible that mac80211 asks
* us to stop a hw_scan when it's already stopped. This can
@@ -3273,8 +3327,6 @@ void iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw *hw,
*/
if (mvm->scan_status & IWL_MVM_SCAN_REGULAR)
iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true);
-
- mutex_unlock(&mvm->mutex);
}
void
@@ -3443,7 +3495,7 @@ void iwl_mvm_sta_pre_rcu_remove(struct ieee80211_hw *hw,
* Since there's mvm->mutex here, no need to have RCU lock for
* mvm_sta->link access.
*/
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
for (link_id = 0; link_id < ARRAY_SIZE(mvm_sta->link); link_id++) {
struct iwl_mvm_link_sta *link_sta;
u32 sta_id;
@@ -3460,7 +3512,6 @@ void iwl_mvm_sta_pre_rcu_remove(struct ieee80211_hw *hw,
RCU_INIT_POINTER(mvm->fw_id_to_link_sta[sta_id], NULL);
}
}
- mutex_unlock(&mvm->mutex);
}
static void iwl_mvm_check_uapsd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
@@ -3736,8 +3787,6 @@ static void iwl_mvm_rs_rate_init_all_links(struct iwl_mvm *mvm,
}
}
-#define IWL_MVM_MIN_BEACON_INTERVAL_TU 16
-
static bool iwl_mvm_vif_conf_from_sta(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
@@ -4207,12 +4256,8 @@ static int iwl_mvm_mac_conf_tx(struct ieee80211_hw *hw,
* The exception is P2P_DEVICE interface which needs immediate update.
*/
if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
- int ret;
-
- mutex_lock(&mvm->mutex);
- ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
- mutex_unlock(&mvm->mutex);
- return ret;
+ guard(mvm)(mvm);
+ return iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
}
return 0;
}
@@ -4221,11 +4266,14 @@ void iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_prep_tx_info *info)
{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- mutex_lock(&mvm->mutex);
+ if (info->was_assoc && !mvmvif->session_prot_connection_loss)
+ return;
+
+ guard(mvm)(mvm);
iwl_mvm_protect_assoc(mvm, vif, info->duration, info->link_id);
- mutex_unlock(&mvm->mutex);
}
void iwl_mvm_mac_mgd_complete_tx(struct ieee80211_hw *hw,
@@ -4238,9 +4286,8 @@ void iwl_mvm_mac_mgd_complete_tx(struct ieee80211_hw *hw,
if (info->success)
return;
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
iwl_mvm_stop_session_protection(mvm, vif);
- mutex_unlock(&mvm->mutex);
}
int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw,
@@ -4250,20 +4297,12 @@ int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw,
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- int ret;
-
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
- if (!vif->cfg.idle) {
- ret = -EBUSY;
- goto out;
- }
-
- ret = iwl_mvm_sched_scan_start(mvm, vif, req, ies, IWL_MVM_SCAN_SCHED);
+ if (!vif->cfg.idle)
+ return -EBUSY;
-out:
- mutex_unlock(&mvm->mutex);
- return ret;
+ return iwl_mvm_sched_scan_start(mvm, vif, req, ies, IWL_MVM_SCAN_SCHED);
}
int iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw,
@@ -4541,13 +4580,9 @@ int iwl_mvm_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_key_conf *key)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- int ret;
- mutex_lock(&mvm->mutex);
- ret = __iwl_mvm_mac_set_key(hw, cmd, vif, sta, key);
- mutex_unlock(&mvm->mutex);
-
- return ret;
+ guard(mvm)(mvm);
+ return __iwl_mvm_mac_set_key(hw, cmd, vif, sta, key);
}
void iwl_mvm_mac_update_tkip_key(struct ieee80211_hw *hw,
@@ -4758,7 +4793,7 @@ static int iwl_mvm_roc_station(struct iwl_mvm *mvm,
if (fw_ver == IWL_FW_CMD_VER_UNKNOWN) {
ret = iwl_mvm_send_aux_roc_cmd(mvm, channel, vif, duration);
- } else if (fw_ver == 3) {
+ } else if (fw_ver >= 3) {
ret = iwl_mvm_roc_add_cmd(mvm, channel, vif, duration,
ROC_ACTIVITY_HOTSPOT);
} else {
@@ -4769,6 +4804,37 @@ static int iwl_mvm_roc_station(struct iwl_mvm *mvm,
return ret;
}
+static int iwl_mvm_roc_p2p(struct iwl_mvm *mvm,
+ struct ieee80211_channel *channel,
+ struct ieee80211_vif *vif,
+ int duration,
+ enum ieee80211_roc_type type)
+{
+ enum iwl_roc_activity activity;
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ switch (type) {
+ case IEEE80211_ROC_TYPE_NORMAL:
+ activity = ROC_ACTIVITY_P2P_DISC;
+ break;
+ case IEEE80211_ROC_TYPE_MGMT_TX:
+ activity = ROC_ACTIVITY_P2P_NEG;
+ break;
+ default:
+ WARN_ONCE(1, "Got an invalid P2P ROC type\n");
+ return -EINVAL;
+ }
+
+ ret = iwl_mvm_mld_add_aux_sta(mvm,
+ iwl_mvm_get_lmac_id(mvm, channel->band));
+ if (ret)
+ return ret;
+
+ return iwl_mvm_roc_add_cmd(mvm, channel, vif, duration, activity);
+}
+
static int iwl_mvm_p2p_find_phy_ctxt(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct ieee80211_channel *channel)
@@ -4822,6 +4888,7 @@ int iwl_mvm_roc_common(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
const struct iwl_mvm_roc_ops *ops)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct ieee80211_vif *bss_vif = iwl_mvm_get_bss_vif(mvm);
u32 lmac_id;
int ret;
@@ -4834,11 +4901,14 @@ int iwl_mvm_roc_common(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
*/
flush_work(&mvm->roc_done_wk);
- ret = iwl_mvm_esr_non_bss_link(mvm, vif, 0, true);
- if (ret)
- return ret;
+ if (!IS_ERR_OR_NULL(bss_vif)) {
+ ret = iwl_mvm_block_esr_sync(mvm, bss_vif,
+ IWL_MVM_ESR_BLOCKED_ROC);
+ if (ret)
+ return ret;
+ }
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
switch (vif->type) {
case NL80211_IFTYPE_STATION:
@@ -4848,30 +4918,29 @@ int iwl_mvm_roc_common(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
ret = ops->add_aux_sta_for_hs20(mvm, lmac_id);
if (!ret)
ret = iwl_mvm_roc_station(mvm, channel, vif, duration);
- goto out_unlock;
+ return ret;
case NL80211_IFTYPE_P2P_DEVICE:
/* handle below */
break;
default:
IWL_ERR(mvm, "ROC: Invalid vif type=%u\n", vif->type);
- ret = -EINVAL;
- goto out_unlock;
+ return -EINVAL;
}
+ if (iwl_mvm_has_p2p_over_aux(mvm)) {
+ ret = iwl_mvm_roc_p2p(mvm, channel, vif, duration, type);
+ return ret;
+ }
ret = iwl_mvm_p2p_find_phy_ctxt(mvm, vif, channel);
if (ret)
- goto out_unlock;
+ return ret;
ret = ops->link(mvm, vif);
if (ret)
- goto out_unlock;
+ return ret;
- ret = iwl_mvm_start_p2p_roc(mvm, vif, duration, type);
-out_unlock:
- mutex_unlock(&mvm->mutex);
- IWL_DEBUG_MAC80211(mvm, "leave\n");
- return ret;
+ return iwl_mvm_start_p2p_roc(mvm, vif, duration, type);
}
int iwl_mvm_cancel_roc(struct ieee80211_hw *hw,
@@ -4952,13 +5021,9 @@ int iwl_mvm_add_chanctx(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *ctx)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- int ret;
- mutex_lock(&mvm->mutex);
- ret = __iwl_mvm_add_chanctx(mvm, ctx);
- mutex_unlock(&mvm->mutex);
-
- return ret;
+ guard(mvm)(mvm);
+ return __iwl_mvm_add_chanctx(mvm, ctx);
}
static void __iwl_mvm_remove_chanctx(struct iwl_mvm *mvm,
@@ -4977,9 +5042,8 @@ void iwl_mvm_remove_chanctx(struct ieee80211_hw *hw,
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
__iwl_mvm_remove_chanctx(mvm, ctx);
- mutex_unlock(&mvm->mutex);
}
void iwl_mvm_change_chanctx(struct ieee80211_hw *hw,
@@ -4999,26 +5063,23 @@ void iwl_mvm_change_chanctx(struct ieee80211_hw *hw,
phy_ctxt->ref, changed))
return;
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
/* we are only changing the min_width, may be a noop */
if (changed == IEEE80211_CHANCTX_CHANGE_MIN_WIDTH) {
if (phy_ctxt->width == def->width)
- goto out_unlock;
+ return;
/* we are just toggling between 20_NOHT and 20 */
if (phy_ctxt->width <= NL80211_CHAN_WIDTH_20 &&
def->width <= NL80211_CHAN_WIDTH_20)
- goto out_unlock;
+ return;
}
iwl_mvm_bt_coex_vif_change(mvm);
iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, def, &ctx->ap,
ctx->rx_chains_static,
ctx->rx_chains_dynamic);
-
-out_unlock:
- mutex_unlock(&mvm->mutex);
}
/*
@@ -5138,6 +5199,10 @@ static int __iwl_mvm_assign_vif_chanctx(struct iwl_mvm *mvm,
}
iwl_mvm_update_quotas(mvm, false, NULL);
+
+ iwl_mvm_send_ap_tx_power_constraint_cmd(mvm, vif,
+ link_conf,
+ false);
}
goto out;
@@ -5157,13 +5222,9 @@ static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *ctx)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- int ret;
-
- mutex_lock(&mvm->mutex);
- ret = __iwl_mvm_assign_vif_chanctx(mvm, vif, link_conf, ctx, false);
- mutex_unlock(&mvm->mutex);
- return ret;
+ guard(mvm)(mvm);
+ return __iwl_mvm_assign_vif_chanctx(mvm, vif, link_conf, ctx, false);
}
/*
@@ -5251,9 +5312,8 @@ static void iwl_mvm_unassign_vif_chanctx(struct ieee80211_hw *hw,
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
__iwl_mvm_unassign_vif_chanctx(mvm, vif, link_conf, ctx, false);
- mutex_unlock(&mvm->mutex);
}
static int
@@ -5263,7 +5323,7 @@ iwl_mvm_switch_vif_chanctx_swap(struct iwl_mvm *mvm,
{
int ret;
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
ops->__unassign_vif_chanctx(mvm, vifs[0].vif, vifs[0].link_conf,
vifs[0].old_ctx, true);
__iwl_mvm_remove_chanctx(mvm, vifs[0].old_ctx);
@@ -5286,7 +5346,7 @@ iwl_mvm_switch_vif_chanctx_swap(struct iwl_mvm *mvm,
if (iwl_mvm_phy_ctx_count(mvm) > 1)
iwl_mvm_teardown_tdls_peers(mvm);
- goto out;
+ return 0;
out_remove:
__iwl_mvm_remove_chanctx(mvm, vifs[0].new_ctx);
@@ -5303,15 +5363,11 @@ out_reassign:
goto out_restart;
}
- goto out;
+ return ret;
out_restart:
/* things keep failing, better restart the hw */
iwl_mvm_nic_restart(mvm, false);
-
-out:
- mutex_unlock(&mvm->mutex);
-
return ret;
}
@@ -5322,7 +5378,7 @@ iwl_mvm_switch_vif_chanctx_reassign(struct iwl_mvm *mvm,
{
int ret;
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
ops->__unassign_vif_chanctx(mvm, vifs[0].vif, vifs[0].link_conf,
vifs[0].old_ctx, true);
@@ -5334,7 +5390,7 @@ iwl_mvm_switch_vif_chanctx_reassign(struct iwl_mvm *mvm,
goto out_reassign;
}
- goto out;
+ return 0;
out_reassign:
if (ops->__assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].link_conf,
@@ -5343,15 +5399,11 @@ out_reassign:
goto out_restart;
}
- goto out;
+ return ret;
out_restart:
/* things keep failing, better restart the hw */
iwl_mvm_nic_restart(mvm, false);
-
-out:
- mutex_unlock(&mvm->mutex);
-
return ret;
}
@@ -5478,13 +5530,9 @@ int iwl_mvm_mac_testmode_cmd(struct ieee80211_hw *hw,
void *data, int len)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- int err;
- mutex_lock(&mvm->mutex);
- err = __iwl_mvm_mac_testmode_cmd(mvm, vif, data, len);
- mutex_unlock(&mvm->mutex);
-
- return err;
+ guard(mvm)(mvm);
+ return __iwl_mvm_mac_testmode_cmd(mvm, vif, data, len);
}
#endif
@@ -5701,13 +5749,9 @@ static int iwl_mvm_mac_pre_channel_switch(struct ieee80211_hw *hw,
struct ieee80211_channel_switch *chsw)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- int ret;
-
- mutex_lock(&mvm->mutex);
- ret = iwl_mvm_pre_channel_switch(mvm, vif, chsw);
- mutex_unlock(&mvm->mutex);
- return ret;
+ guard(mvm)(mvm);
+ return iwl_mvm_pre_channel_switch(mvm, vif, chsw);
}
void iwl_mvm_channel_switch_rx_beacon(struct ieee80211_hw *hw,
@@ -5759,16 +5803,14 @@ void iwl_mvm_channel_switch_rx_beacon(struct ieee80211_hw *hw,
}
mvmvif->csa_count = chsw->count;
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
if (mvmvif->csa_failed)
- goto out_unlock;
+ return;
WARN_ON(iwl_mvm_send_cmd_pdu(mvm,
WIDE_ID(MAC_CONF_GROUP,
CHANNEL_SWITCH_TIME_EVENT_CMD),
0, sizeof(cmd), &cmd));
-out_unlock:
- mutex_unlock(&mvm->mutex);
}
static void iwl_mvm_flush_no_vif(struct iwl_mvm *mvm, u32 queues, bool drop)
@@ -5777,17 +5819,16 @@ static void iwl_mvm_flush_no_vif(struct iwl_mvm *mvm, u32 queues, bool drop)
if (!iwl_mvm_has_new_tx_api(mvm)) {
if (drop) {
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
iwl_mvm_flush_tx_path(mvm,
iwl_mvm_flushable_queues(mvm) & queues);
- mutex_unlock(&mvm->mutex);
} else {
iwl_trans_wait_tx_queues_empty(mvm->trans, queues);
}
return;
}
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) {
struct ieee80211_sta *sta;
@@ -5802,7 +5843,6 @@ static void iwl_mvm_flush_no_vif(struct iwl_mvm *mvm, u32 queues, bool drop)
iwl_mvm_wait_sta_queues_empty(mvm,
iwl_mvm_sta_from_mac80211(sta));
}
- mutex_unlock(&mvm->mutex);
}
void iwl_mvm_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
@@ -5885,7 +5925,7 @@ void iwl_mvm_mac_flush_sta(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_link_sta *link_sta;
int link_id;
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
for_each_sta_active_link(vif, sta, link_sta, link_id) {
mvm_link_sta = rcu_dereference_protected(mvmsta->link[link_id],
lockdep_is_held(&mvm->mutex));
@@ -5896,7 +5936,6 @@ void iwl_mvm_mac_flush_sta(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
mvmsta->tfd_queue_msk))
IWL_ERR(mvm, "flush request fail\n");
}
- mutex_unlock(&mvm->mutex);
}
static int iwl_mvm_mac_get_acs_survey(struct iwl_mvm *mvm, int idx,
@@ -5962,7 +6001,6 @@ int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx,
struct survey_info *survey)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- int ret = 0;
u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
WIDE_ID(SYSTEM_GROUP,
SYSTEM_STATISTICS_CMD),
@@ -5982,12 +6020,13 @@ int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx,
if (idx > 0)
return iwl_mvm_mac_get_acs_survey(mvm, idx - 1, survey);
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
if (iwl_mvm_firmware_running(mvm)) {
- ret = iwl_mvm_request_statistics(mvm, false);
+ int ret = iwl_mvm_request_statistics(mvm, false);
+
if (ret)
- goto out;
+ return ret;
}
survey->filled = SURVEY_INFO_TIME_RX |
@@ -6003,7 +6042,7 @@ int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx,
/* the new fw api doesn't support the following fields */
if (cmd_ver != IWL_FW_CMD_VER_UNKNOWN)
- goto out;
+ return 0;
survey->filled |= SURVEY_INFO_TIME |
SURVEY_INFO_TIME_SCAN;
@@ -6015,9 +6054,7 @@ int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx,
mvm->radio_stats.on_time_scan;
do_div(survey->time_scan, USEC_PER_MSEC);
- out:
- mutex_unlock(&mvm->mutex);
- return ret;
+ return 0;
}
static void iwl_mvm_set_sta_rate(u32 rate_n_flags, struct rate_info *rinfo)
@@ -6184,13 +6221,13 @@ void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
if (!vif->cfg.assoc)
return;
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
if (mvmvif->deflink.ap_sta_id != mvmsta->deflink.sta_id)
- goto unlock;
+ return;
if (iwl_mvm_request_statistics(mvm, false))
- goto unlock;
+ return;
sinfo->rx_beacon = 0;
for_each_mvm_vif_valid_link(mvmvif, i)
@@ -6204,8 +6241,6 @@ void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
mvmvif->deflink.beacon_stats.avg_signal;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_SIGNAL_AVG);
}
- unlock:
- mutex_unlock(&mvm->mutex);
}
static void iwl_mvm_event_mlme_callback_ini(struct iwl_mvm *mvm,
@@ -6348,7 +6383,7 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
.len[0] = sizeof(cmd),
.data[1] = data,
.len[1] = size,
- .flags = sync ? 0 : CMD_ASYNC,
+ .flags = CMD_SEND_IN_RFKILL | (sync ? 0 : CMD_ASYNC),
};
int ret;
@@ -6373,11 +6408,9 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
if (sync) {
lockdep_assert_held(&mvm->mutex);
ret = wait_event_timeout(mvm->rx_sync_waitq,
- READ_ONCE(mvm->queue_sync_state) == 0 ||
- iwl_mvm_is_radio_hw_killed(mvm),
+ READ_ONCE(mvm->queue_sync_state) == 0,
SYNC_RX_QUEUE_TIMEOUT);
- WARN_ONCE(!ret && !iwl_mvm_is_radio_hw_killed(mvm),
- "queue sync: failed to sync, state is 0x%lx, cookie %d\n",
+ WARN_ONCE(!ret, "queue sync: failed to sync, state is 0x%lx, cookie %d\n",
mvm->queue_sync_state,
mvm->queue_sync_cookie);
}
@@ -6393,9 +6426,8 @@ void iwl_mvm_sync_rx_queues(struct ieee80211_hw *hw)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
iwl_mvm_sync_rx_queues_internal(mvm, IWL_MVM_RXQ_EMPTY, true, NULL, 0);
- mutex_unlock(&mvm->mutex);
}
int
@@ -6431,13 +6463,9 @@ int iwl_mvm_start_pmsr(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct cfg80211_pmsr_request *request)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- int ret;
- mutex_lock(&mvm->mutex);
- ret = iwl_mvm_ftm_start(mvm, vif, request);
- mutex_unlock(&mvm->mutex);
-
- return ret;
+ guard(mvm)(mvm);
+ return iwl_mvm_ftm_start(mvm, vif, request);
}
void iwl_mvm_abort_pmsr(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
@@ -6445,9 +6473,8 @@ void iwl_mvm_abort_pmsr(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
iwl_mvm_ftm_abort(mvm, request);
- mutex_unlock(&mvm->mutex);
}
static bool iwl_mvm_can_hw_csum(struct sk_buff *skb)
@@ -6482,7 +6509,6 @@ int iwl_mvm_set_hw_timestamp(struct ieee80211_hw *hw,
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
u32 protocols = 0;
- int ret;
/* HW timestamping is only supported for a specific station */
if (!hwts->macaddr)
@@ -6492,11 +6518,8 @@ int iwl_mvm_set_hw_timestamp(struct ieee80211_hw *hw,
protocols =
IWL_TIME_SYNC_PROTOCOL_TM | IWL_TIME_SYNC_PROTOCOL_FTM;
- mutex_lock(&mvm->mutex);
- ret = iwl_mvm_time_sync_config(mvm, hwts->macaddr, protocols);
- mutex_unlock(&mvm->mutex);
-
- return ret;
+ guard(mvm)(mvm);
+ return iwl_mvm_time_sync_config(mvm, hwts->macaddr, protocols);
}
const struct ieee80211_ops iwl_mvm_hw_ops = {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
index 0a3b7284eedd..3c99396ad369 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
@@ -12,7 +12,7 @@ static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw,
int ret;
int i;
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
iwl_mvm_mac_init_mvmvif(mvm, mvmvif);
@@ -32,7 +32,7 @@ static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw,
/* Allocate resources for the MAC context, and add it to the fw */
ret = iwl_mvm_mac_ctxt_init(mvm, vif);
if (ret)
- goto out_unlock;
+ return ret;
rcu_assign_pointer(mvm->vif_id_to_mac[mvmvif->id], vif);
@@ -46,7 +46,7 @@ static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw,
ret = iwl_mvm_mld_mac_ctxt_add(mvm, vif);
if (ret)
- goto out_unlock;
+ return ret;
/* beacon filtering */
ret = iwl_mvm_disable_beacon_filter(mvm, vif);
@@ -75,8 +75,6 @@ static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw,
goto out_free_bf;
iwl_mvm_tcm_add_vif(mvm, vif);
- INIT_DELAYED_WORK(&mvmvif->csa_work,
- iwl_mvm_channel_switch_disconnect_wk);
if (vif->type == NL80211_IFTYPE_MONITOR) {
mvm->monitor_on = true;
@@ -97,7 +95,7 @@ static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw,
if (vif->p2p || iwl_fw_lookup_cmd_ver(mvm->fw, PHY_CONTEXT_CMD, 1) < 5)
vif->driver_flags |= IEEE80211_VIF_IGNORE_OFDMA_WIDER_BW;
- goto out_unlock;
+ return 0;
out_free_bf:
if (mvm->bf_allowed_vif == mvmvif) {
@@ -108,9 +106,6 @@ static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw,
out_remove_mac:
mvmvif->link[0] = NULL;
iwl_mvm_mld_mac_ctxt_remove(mvm, vif);
- out_unlock:
- mutex_unlock(&mvm->mutex);
-
return ret;
}
@@ -127,7 +122,7 @@ static void iwl_mvm_mld_mac_remove_interface(struct ieee80211_hw *hw,
vif->type == NL80211_IFTYPE_ADHOC))
iwl_mvm_tcm_rm_vif(mvm, vif);
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
if (vif == mvm->csme_vif) {
iwl_mei_set_netdev(NULL);
@@ -190,8 +185,6 @@ static void iwl_mvm_mld_mac_remove_interface(struct ieee80211_hw *hw,
mvm->monitor_on = false;
__clear_bit(IEEE80211_HW_RX_INCLUDES_FCS, mvm->hw->flags);
}
-
- mutex_unlock(&mvm->mutex);
}
static unsigned int iwl_mvm_mld_count_active_links(struct iwl_mvm_vif *mvmvif)
@@ -229,6 +222,8 @@ static void iwl_mvm_restart_mpdu_count(struct iwl_mvm *mvm,
mvmsta->mpdu_counters[q].window_start = jiffies;
spin_unlock_bh(&mvmsta->mpdu_counters[q].lock);
}
+
+ IWL_DEBUG_STATS(mvm, "MPDU counters are cleared\n");
}
static int iwl_mvm_esr_mode_active(struct iwl_mvm *mvm,
@@ -352,6 +347,11 @@ __iwl_mvm_mld_assign_vif_chanctx(struct iwl_mvm *mvm,
rcu_read_unlock();
}
+ if (vif->type == NL80211_IFTYPE_STATION)
+ iwl_mvm_send_ap_tx_power_constraint_cmd(mvm, vif,
+ link_conf,
+ false);
+
/* then activate */
ret = iwl_mvm_link_changed(mvm, vif, link_conf,
LINK_CONTEXT_MODIFY_ACTIVE |
@@ -389,10 +389,11 @@ static int iwl_mvm_mld_assign_vif_chanctx(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *ctx)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- int ret;
/* update EMLSR mode */
if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_STATION) {
+ int ret;
+
ret = iwl_mvm_esr_non_bss_link(mvm, vif, link_conf->link_id,
true);
/*
@@ -403,11 +404,8 @@ static int iwl_mvm_mld_assign_vif_chanctx(struct ieee80211_hw *hw,
return ret;
}
- mutex_lock(&mvm->mutex);
- ret = __iwl_mvm_mld_assign_vif_chanctx(mvm, vif, link_conf, ctx, false);
- mutex_unlock(&mvm->mutex);
-
- return ret;
+ guard(mvm)(mvm);
+ return __iwl_mvm_mld_assign_vif_chanctx(mvm, vif, link_conf, ctx, false);
}
static int iwl_mvm_esr_mode_inactive(struct iwl_mvm *mvm,
@@ -533,9 +531,37 @@ static void iwl_mvm_mld_unassign_vif_chanctx(struct ieee80211_hw *hw,
}
static void
+iwl_mvm_tpe_sta_cmd_data(struct iwl_txpower_constraints_cmd *cmd,
+ const struct ieee80211_bss_conf *bss_info)
+{
+ u8 i;
+
+ /*
+ * NOTE: the 0 here is IEEE80211_TPE_CAT_6GHZ_DEFAULT,
+ * we fully ignore IEEE80211_TPE_CAT_6GHZ_SUBORDINATE
+ */
+
+ BUILD_BUG_ON(ARRAY_SIZE(cmd->psd_pwr) !=
+ ARRAY_SIZE(bss_info->tpe.psd_local[0].power));
+
+ /* if not valid, mac80211 puts default (max value) */
+ for (i = 0; i < ARRAY_SIZE(cmd->psd_pwr); i++)
+ cmd->psd_pwr[i] = min(bss_info->tpe.psd_local[0].power[i],
+ bss_info->tpe.psd_reg_client[0].power[i]);
+
+ BUILD_BUG_ON(ARRAY_SIZE(cmd->eirp_pwr) !=
+ ARRAY_SIZE(bss_info->tpe.max_local[0].power));
+
+ for (i = 0; i < ARRAY_SIZE(cmd->eirp_pwr); i++)
+ cmd->eirp_pwr[i] = min(bss_info->tpe.max_local[0].power[i],
+ bss_info->tpe.max_reg_client[0].power[i]);
+}
+
+void
iwl_mvm_send_ap_tx_power_constraint_cmd(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *bss_conf)
+ struct ieee80211_bss_conf *bss_conf,
+ bool is_ap)
{
struct iwl_txpower_constraints_cmd cmd = {};
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
@@ -555,19 +581,22 @@ iwl_mvm_send_ap_tx_power_constraint_cmd(struct iwl_mvm *mvm,
link_info->fw_link_id == IWL_MVM_FW_LINK_ID_INVALID)
return;
- if (bss_conf->chanreq.oper.chan->band != NL80211_BAND_6GHZ ||
- bss_conf->chanreq.oper.chan->flags &
- IEEE80211_CHAN_NO_6GHZ_VLP_CLIENT)
+ if (bss_conf->chanreq.oper.chan->band != NL80211_BAND_6GHZ)
return;
cmd.link_id = cpu_to_le16(link_info->fw_link_id);
- /*
- * Currently supporting VLP Soft AP only.
- */
- cmd.ap_type = cpu_to_le16(IWL_6GHZ_AP_TYPE_VLP);
memset(cmd.psd_pwr, DEFAULT_TPE_TX_POWER, sizeof(cmd.psd_pwr));
memset(cmd.eirp_pwr, DEFAULT_TPE_TX_POWER, sizeof(cmd.eirp_pwr));
+ if (is_ap) {
+ cmd.ap_type = cpu_to_le16(IWL_6GHZ_AP_TYPE_VLP);
+ } else if (bss_conf->power_type == IEEE80211_REG_UNSET_AP) {
+ return;
+ } else {
+ cmd.ap_type = cpu_to_le16(bss_conf->power_type - 1);
+ iwl_mvm_tpe_sta_cmd_data(&cmd, bss_conf);
+ }
+
ret = iwl_mvm_send_cmd_pdu(mvm,
WIDE_ID(PHY_OPS_GROUP,
AP_TX_POWER_CONSTRAINTS_CMD),
@@ -586,15 +615,16 @@ static int iwl_mvm_mld_start_ap_ibss(struct ieee80211_hw *hw,
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
int ret;
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
if (vif->type == NL80211_IFTYPE_AP)
- iwl_mvm_send_ap_tx_power_constraint_cmd(mvm, vif, link_conf);
+ iwl_mvm_send_ap_tx_power_constraint_cmd(mvm, vif,
+ link_conf, true);
/* Send the beacon template */
ret = iwl_mvm_mac_ctxt_beacon_changed(mvm, vif, link_conf);
if (ret)
- goto out_unlock;
+ return ret;
/* the link should be already activated when assigning chan context */
ret = iwl_mvm_link_changed(mvm, vif, link_conf,
@@ -602,11 +632,11 @@ static int iwl_mvm_mld_start_ap_ibss(struct ieee80211_hw *hw,
~LINK_CONTEXT_MODIFY_ACTIVE,
true);
if (ret)
- goto out_unlock;
+ return ret;
ret = iwl_mvm_mld_add_mcast_sta(mvm, vif, link_conf);
if (ret)
- goto out_unlock;
+ return ret;
/* Send the bcast station. At this stage the TBTT and DTIM time
* events are added and applied to the scheduler
@@ -630,7 +660,7 @@ static int iwl_mvm_mld_start_ap_ibss(struct ieee80211_hw *hw,
iwl_mvm_ftm_restart_responder(mvm, vif, link_conf);
- goto out_unlock;
+ return 0;
out_failed:
iwl_mvm_power_update_mac(mvm);
@@ -638,8 +668,6 @@ out_failed:
iwl_mvm_mld_rm_bcast_sta(mvm, vif, link_conf);
out_rm_mcast:
iwl_mvm_mld_rm_mcast_sta(mvm, vif, link_conf);
-out_unlock:
- mutex_unlock(&mvm->mutex);
return ret;
}
@@ -662,7 +690,7 @@ static void iwl_mvm_mld_stop_ap_ibss(struct ieee80211_hw *hw,
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
iwl_mvm_stop_ap_ibss_common(mvm, vif);
@@ -676,7 +704,6 @@ static void iwl_mvm_mld_stop_ap_ibss(struct ieee80211_hw *hw,
iwl_mvm_mld_rm_mcast_sta(mvm, vif, link_conf);
iwl_mvm_power_update_mac(mvm);
- mutex_unlock(&mvm->mutex);
}
static void iwl_mvm_mld_stop_ap(struct ieee80211_hw *hw,
@@ -846,6 +873,8 @@ static void iwl_mvm_mld_vif_cfg_changed_station(struct iwl_mvm *mvm,
if (changes & BSS_CHANGED_ASSOC) {
if (vif->cfg.assoc) {
+ mvmvif->session_prot_connection_loss = false;
+
/* clear statistics to get clean beacon counter */
iwl_mvm_request_statistics(mvm, true);
iwl_mvm_sf_update(mvm, vif, false);
@@ -979,7 +1008,7 @@ static void iwl_mvm_mld_link_info_changed(struct ieee80211_hw *hw,
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
switch (vif->type) {
case NL80211_IFTYPE_STATION:
@@ -1005,8 +1034,6 @@ static void iwl_mvm_mld_link_info_changed(struct ieee80211_hw *hw,
link_conf->txpower);
iwl_mvm_set_tx_power(mvm, vif, link_conf->txpower);
}
-
- mutex_unlock(&mvm->mutex);
}
static void iwl_mvm_mld_vif_cfg_changed(struct ieee80211_hw *hw,
@@ -1015,15 +1042,13 @@ static void iwl_mvm_mld_vif_cfg_changed(struct ieee80211_hw *hw,
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
if (changes & BSS_CHANGED_IDLE && !vif->cfg.idle)
iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true);
if (vif->type == NL80211_IFTYPE_STATION)
iwl_mvm_mld_vif_cfg_changed_station(mvm, vif, changes);
-
- mutex_unlock(&mvm->mutex);
}
static int
@@ -1056,9 +1081,8 @@ static void iwl_mvm_mld_config_iface_filter(struct ieee80211_hw *hw,
!vif->p2p)
return;
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
iwl_mvm_mld_mac_ctxt_changed(mvm, vif, false);
- mutex_unlock(&mvm->mutex);
}
static int
@@ -1080,14 +1104,10 @@ iwl_mvm_mld_mac_conf_tx(struct ieee80211_hw *hw,
* The exception is P2P_DEVICE interface which needs immediate update.
*/
if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
- int ret;
-
- mutex_lock(&mvm->mutex);
- ret = iwl_mvm_link_changed(mvm, vif, &vif->bss_conf,
- LINK_CONTEXT_MODIFY_QOS_PARAMS,
- true);
- mutex_unlock(&mvm->mutex);
- return ret;
+ guard(mvm)(mvm);
+ return iwl_mvm_link_changed(mvm, vif, &vif->bss_conf,
+ LINK_CONTEXT_MODIFY_QOS_PARAMS,
+ true);
}
return 0;
}
@@ -1232,13 +1252,9 @@ iwl_mvm_mld_change_sta_links(struct ieee80211_hw *hw,
u16 old_links, u16 new_links)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- int ret;
- mutex_lock(&mvm->mutex);
- ret = iwl_mvm_mld_update_sta_links(mvm, vif, sta, old_links, new_links);
- mutex_unlock(&mvm->mutex);
-
- return ret;
+ guard(mvm)(mvm);
+ return iwl_mvm_mld_update_sta_links(mvm, vif, sta, old_links, new_links);
}
bool iwl_mvm_vif_has_esr_cap(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
@@ -1266,26 +1282,19 @@ static bool iwl_mvm_mld_can_activate_links(struct ieee80211_hw *hw,
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
int n_links = hweight16(desired_links);
- bool ret = true;
if (n_links <= 1)
return true;
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
/* Check if HW supports the wanted number of links */
- if (n_links > iwl_mvm_max_active_links(mvm, vif)) {
- ret = false;
- goto unlock;
- }
+ if (n_links > iwl_mvm_max_active_links(mvm, vif))
+ return false;
/* If it is an eSR device, check that we can enter eSR */
- ret = iwl_mvm_is_esr_supported(mvm->fwrt.trans) &&
- iwl_mvm_vif_has_esr_cap(mvm, vif);
-
-unlock:
- mutex_unlock(&mvm->mutex);
- return ret;
+ return iwl_mvm_is_esr_supported(mvm->fwrt.trans) &&
+ iwl_mvm_vif_has_esr_cap(mvm, vif);
}
static enum ieee80211_neg_ttlm_res
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c
index b7a461dba41e..d5a204e52076 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c
@@ -241,7 +241,7 @@ int iwl_mvm_mld_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
IWL_MAX_TID_COUNT, &wdg_timeout);
}
-/* Allocate a new station entry for the broadcast station to the given vif,
+/* Allocate a new station entry for the multicast station to the given vif,
* and send it to the FW.
* Note that each AP/GO mac should have its own multicast station.
*/
@@ -470,7 +470,7 @@ static int iwl_mvm_mld_cfg_sta(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
break;
}
- switch (sta->deflink.smps_mode) {
+ switch (link_sta->smps_mode) {
case IEEE80211_SMPS_AUTOMATIC:
case IEEE80211_SMPS_NUM_MODES:
WARN_ON(1);
@@ -515,11 +515,11 @@ static int iwl_mvm_mld_cfg_sta(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
return iwl_mvm_mld_send_sta_cmd(mvm, &cmd);
}
-static void iwl_mvm_mld_free_sta_link(struct iwl_mvm *mvm,
- struct iwl_mvm_sta *mvm_sta,
- struct iwl_mvm_link_sta *mvm_sta_link,
- unsigned int link_id,
- bool is_in_fw)
+void iwl_mvm_mld_free_sta_link(struct iwl_mvm *mvm,
+ struct iwl_mvm_sta *mvm_sta,
+ struct iwl_mvm_link_sta *mvm_sta_link,
+ unsigned int link_id,
+ bool is_in_fw)
{
RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta_link->sta_id],
is_in_fw ? ERR_PTR(-EINVAL) : NULL);
@@ -1014,7 +1014,8 @@ static int iwl_mvm_mld_update_sta_baids(struct iwl_mvm *mvm,
cmd.modify.tid = cpu_to_le32(data->tid);
- ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(cmd), &cmd);
+ ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, CMD_SEND_IN_RFKILL,
+ sizeof(cmd), &cmd);
data->sta_mask = new_sta_mask;
if (ret)
return ret;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index 1f58c727fa63..22f48b66d79c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -9,6 +9,7 @@
#include <linux/list.h>
#include <linux/spinlock.h>
+#include <linux/cleanup.h>
#include <linux/leds.h>
#include <linux/in6.h>
@@ -23,7 +24,7 @@
#include "iwl-op-mode.h"
#include "iwl-trans.h"
#include "fw/notif-wait.h"
-#include "iwl-eeprom-parse.h"
+#include "iwl-nvm-utils.h"
#include "fw/file.h"
#include "iwl-config.h"
#include "sta.h"
@@ -82,14 +83,9 @@ extern const struct ieee80211_ops iwl_mvm_mld_hw_ops;
/**
* struct iwl_mvm_mod_params - module parameters for iwlmvm
- * @init_dbg: if true, then the NIC won't be stopped if the INIT fw asserted.
- * We will register to mac80211 to have testmode working. The NIC must not
- * be up'ed after the INIT fw asserted. This is useful to be able to use
- * proprietary tools over testmode to debug the INIT fw.
* @power_scheme: one of enum iwl_power_scheme
*/
struct iwl_mvm_mod_params {
- bool init_dbg;
int power_scheme;
};
extern struct iwl_mvm_mod_params iwlmvm_mod_params;
@@ -360,7 +356,9 @@ struct iwl_mvm_vif_link_info {
* @IWL_MVM_ESR_BLOCKED_WOWLAN: WOWLAN is preventing the enablement of EMLSR
* @IWL_MVM_ESR_BLOCKED_TPT: block EMLSR when there is not enough traffic
* @IWL_MVM_ESR_BLOCKED_FW: FW didn't recommended/forced exit from EMLSR
- * @IWL_MVM_ESR_BLOCKED_NON_BSS: An active non-bssid link's preventing EMLSR
+ * @IWL_MVM_ESR_BLOCKED_NON_BSS: An active non-BSS interface's link is
+ * preventing EMLSR
+ * @IWL_MVM_ESR_BLOCKED_ROC: remain-on-channel is preventing EMLSR
* @IWL_MVM_ESR_EXIT_MISSED_BEACON: exited EMLSR due to missed beacons
* @IWL_MVM_ESR_EXIT_LOW_RSSI: link is deactivated/not allowed for EMLSR
* due to low RSSI.
@@ -377,6 +375,7 @@ enum iwl_mvm_esr_state {
IWL_MVM_ESR_BLOCKED_TPT = 0x4,
IWL_MVM_ESR_BLOCKED_FW = 0x8,
IWL_MVM_ESR_BLOCKED_NON_BSS = 0x10,
+ IWL_MVM_ESR_BLOCKED_ROC = 0x20,
IWL_MVM_ESR_EXIT_MISSED_BEACON = 0x10000,
IWL_MVM_ESR_EXIT_LOW_RSSI = 0x20000,
IWL_MVM_ESR_EXIT_COEX = 0x40000,
@@ -426,6 +425,7 @@ struct iwl_mvm_esr_exit {
* @csa_bcn_pending: indicates that we are waiting for a beacon on a new channel
* @csa_blocks_tx: CSA is blocking TX
* @features: hw features active for this vif
+ * @max_tx_op: max TXOP in usecs for all ACs, zero for no limit.
* @ap_beacon_time: AP beacon time for synchronisation (on older FW)
* @bf_enabled: indicates if beacon filtering is enabled
* @ba_enabled: indicated if beacon abort is enabled
@@ -448,6 +448,40 @@ struct iwl_mvm_esr_exit {
* @prevent_esr_done_wk: work that should be done when esr prevention ends.
* @mlo_int_scan_wk: work for the internal MLO scan.
* @unblock_esr_tpt_wk: work for unblocking EMLSR when tpt is high enough.
+ * @roc_activity: currently running ROC activity for this vif (or
+ * ROC_NUM_ACTIVITIES if no activity is running).
+ * @session_prot_connection_loss: the connection was lost due to session
+ * protection ending without receiving a beacon, so we need to now
+ * protect the deauth separately
+ * @ap_early_keys: The firmware cannot install keys before stations etc.,
+ * but higher layers work differently, so we store the keys here for
+ * later installation.
+ * @ap_sta: pointer to the AP STA data structure
+ * @csa_count: CSA counter (old CSA implementation w/o firmware)
+ * @csa_misbehave: CSA AP misbehaviour flag (old implementation)
+ * @csa_target_freq: CSA target channel frequency (old implementation)
+ * @csa_work: CSA work (old implementation)
+ * @dbgfs_bf: beamforming debugfs data
+ * @dbgfs_dir: debugfs directory for this vif
+ * @dbgfs_pm: power management debugfs data
+ * @dbgfs_quota_min: debugfs value for minimal quota
+ * @dbgfs_slink: debugfs symlink for this interface
+ * @ftm_unprotected: unprotected FTM debugfs override
+ * @hs_time_event_data: hotspot/AUX ROC time event data
+ * @mac_pwr_cmd: debugfs override for MAC power command
+ * @target_ipv6_addrs: IPv6 addresses on this interface for offload
+ * @num_target_ipv6_addrs: number of @target_ipv6_addrs
+ * @tentative_addrs: bitmap of tentative IPv6 addresses in @target_ipv6_addrs
+ * @rekey_data: rekeying data for WoWLAN GTK rekey offload
+ * @seqno: storage for seqno for older firmware D0/D3 transition
+ * @seqno_valid: indicates @seqno is valid
+ * @time_event_data: session protection time event data
+ * @tsf_id: the TSF resource ID assigned in firmware (for firmware needing that)
+ * @tx_key_idx: WEP transmit key index for D3
+ * @uapsd_misbehaving_ap_addr: MLD address/BSSID of U-APSD misbehaving AP, to
+ * not use U-APSD on reconnection
+ * @uapsd_nonagg_detected_wk: worker for handling detection of no aggregation
+ * in U-APSD
*/
struct iwl_mvm_vif {
struct iwl_mvm *mvm;
@@ -461,6 +495,7 @@ struct iwl_mvm_vif {
bool pm_enabled;
bool monitor_active;
bool esr_active;
+ bool session_prot_connection_loss;
u8 low_latency: 6;
u8 low_latency_actual: 1;
@@ -525,6 +560,7 @@ struct iwl_mvm_vif {
struct iwl_mvm_time_event_data time_event_data;
struct iwl_mvm_time_event_data hs_time_event_data;
+ enum iwl_roc_activity roc_activity;
/* TCP Checksum Offload */
netdev_features_t features;
@@ -538,6 +574,8 @@ struct iwl_mvm_vif {
struct ieee80211_key_conf __rcu *keys[2];
} bcn_prot;
+ u16 max_tx_op;
+
u16 link_selection_res;
u8 link_selection_primary;
u8 primary_link;
@@ -607,7 +645,7 @@ enum iwl_mvm_sched_scan_pass_all_states {
};
/**
- * struct iwl_mvm_tt_mgnt - Thermal Throttling Management structure
+ * struct iwl_mvm_tt_mgmt - Thermal Throttling Management structure
* @ct_kill_exit: worker to exit thermal kill
* @dynamic_smps: Is thermal throttling enabled dynamic_smps?
* @tx_backoff: The current thremal throttling tx backoff in uSec.
@@ -730,24 +768,20 @@ struct iwl_mvm_tcm {
* struct iwl_mvm_reorder_buffer - per ra/tid/queue reorder buffer
* @head_sn: reorder window head sn
* @num_stored: number of mpdus stored in the buffer
- * @buf_size: the reorder buffer size as set by the last addba request
* @queue: queue of this reorder buffer
* @last_amsdu: track last ASMDU SN for duplication detection
* @last_sub_index: track ASMDU sub frame index for duplication detection
* @valid: reordering is valid for this queue
* @lock: protect reorder buffer internal state
- * @mvm: mvm pointer, needed for frame timer context
*/
struct iwl_mvm_reorder_buffer {
u16 head_sn;
u16 num_stored;
- u16 buf_size;
int queue;
u16 last_amsdu;
u8 last_sub_index;
bool valid;
spinlock_t lock;
- struct iwl_mvm *mvm;
} ____cacheline_aligned_in_smp;
/**
@@ -769,6 +803,7 @@ __aligned(roundup_pow_of_two(sizeof(struct sk_buff_head)))
* @tid: tid of the session
* @baid: baid of the session
* @timeout: the timeout set in the addba request
+ * @buf_size: the reorder buffer size as set by the last addba request
* @entries_per_queue: # of buffers per queue, this actually gets
* aligned up to avoid cache line sharing between queues
* @last_rx: last rx jiffies, updated only if timeout passed from last update
@@ -785,13 +820,14 @@ struct iwl_mvm_baid_data {
u8 tid;
u8 baid;
u16 timeout;
+ u16 buf_size;
u16 entries_per_queue;
unsigned long last_rx;
struct timer_list session_timer;
struct iwl_mvm_baid_data __rcu **rcu_ptr;
struct iwl_mvm *mvm;
struct iwl_mvm_reorder_buffer reorder_buf[IWL_MAX_RX_HW_QUEUES];
- struct iwl_mvm_reorder_buf_entry entries[];
+ struct iwl_mvm_reorder_buf_entry entries[] ____cacheline_aligned_in_smp;
};
static inline struct iwl_mvm_baid_data *
@@ -1040,7 +1076,6 @@ struct iwl_mvm {
struct iwl_rx_phy_info last_phy_info;
struct ieee80211_sta __rcu *fw_id_to_mac_id[IWL_MVM_STATION_COUNT_MAX];
struct ieee80211_link_sta __rcu *fw_id_to_link_sta[IWL_MVM_STATION_COUNT_MAX];
- unsigned long fw_link_ids_map;
u8 rx_ba_sessions;
/* configured by mac80211 */
@@ -1062,7 +1097,7 @@ struct iwl_mvm {
unsigned int max_scans;
/* UMAC scan tracking */
- u32 scan_uid_status[IWL_MVM_MAX_UMAC_SCANS];
+ u32 scan_uid_status[IWL_MAX_UMAC_SCANS];
/* start time of last scan in TSF of the mac that requested the scan */
u64 scan_start;
@@ -1152,6 +1187,7 @@ struct iwl_mvm {
struct ieee80211_channel **nd_channels;
int n_nd_channels;
bool net_detect;
+ bool fast_resume;
u8 offload_tid;
#ifdef CONFIG_IWLWIFI_DEBUGFS
bool d3_wake_sysassert;
@@ -1306,13 +1342,21 @@ struct iwl_mvm {
struct iwl_phy_specific_cfg phy_filters;
#endif
+ /* report rx timestamp in ptp clock time */
+ bool rx_ts_ptp;
+
unsigned long last_6ghz_passive_scan_jiffies;
unsigned long last_reset_or_resume_time_jiffies;
bool sta_remove_requires_queue_remove;
bool mld_api_is_used;
- bool pldr_sync;
+ /*
+ * Indicates that firmware will do a product reset (and then
+ * therefore fail to load) when we start it (due to OTP burn),
+ * if so don't dump errors etc. since this is expected.
+ */
+ bool fw_product_reset;
struct iwl_time_sync_data time_sync;
@@ -1330,11 +1374,14 @@ struct iwl_mvm {
#define IWL_MAC80211_GET_MVM(_hw) \
IWL_OP_MODE_GET_MVM((struct iwl_op_mode *)((_hw)->priv))
+DEFINE_GUARD(mvm, struct iwl_mvm *, mutex_lock(&_T->mutex), mutex_unlock(&_T->mutex))
+
/**
* enum iwl_mvm_status - MVM status bits
* @IWL_MVM_STATUS_HW_RFKILL: HW RF-kill is asserted
* @IWL_MVM_STATUS_HW_CTKILL: CT-kill is active
- * @IWL_MVM_STATUS_ROC_RUNNING: remain-on-channel is running
+ * @IWL_MVM_STATUS_ROC_P2P_RUNNING: remain-on-channel on P2P is running (when
+ * P2P is not over AUX)
* @IWL_MVM_STATUS_HW_RESTART_REQUESTED: HW restart was requested
* @IWL_MVM_STATUS_IN_HW_RESTART: HW restart is active
* @IWL_MVM_STATUS_ROC_AUX_RUNNING: AUX remain-on-channel is running
@@ -1348,7 +1395,7 @@ struct iwl_mvm {
enum iwl_mvm_status {
IWL_MVM_STATUS_HW_RFKILL,
IWL_MVM_STATUS_HW_CTKILL,
- IWL_MVM_STATUS_ROC_RUNNING,
+ IWL_MVM_STATUS_ROC_P2P_RUNNING,
IWL_MVM_STATUS_HW_RESTART_REQUESTED,
IWL_MVM_STATUS_IN_HW_RESTART,
IWL_MVM_STATUS_ROC_AUX_RUNNING,
@@ -1439,7 +1486,8 @@ iwl_mvm_rcu_dereference_vif_id(struct iwl_mvm *mvm, u8 vif_id, bool rcu)
static inline struct ieee80211_bss_conf *
iwl_mvm_rcu_fw_link_id_to_link_conf(struct iwl_mvm *mvm, u8 link_id, bool rcu)
{
- if (WARN_ON(link_id >= ARRAY_SIZE(mvm->link_id_to_link_conf)))
+ if (IWL_FW_CHECK(mvm, link_id >= ARRAY_SIZE(mvm->link_id_to_link_conf),
+ "erroneous FW link ID: %d\n", link_id))
return NULL;
if (rcu)
@@ -1724,7 +1772,7 @@ struct iwl_rate_info {
u8 ieee; /* MAC header: IWL_RATE_6M_IEEE, etc. */
};
-void __iwl_mvm_mac_stop(struct iwl_mvm *mvm);
+void __iwl_mvm_mac_stop(struct iwl_mvm *mvm, bool suspend);
int __iwl_mvm_mac_start(struct iwl_mvm *mvm);
/******************
@@ -1758,6 +1806,7 @@ u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx);
void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, int clock_type, u32 *gp2,
u64 *boottime, ktime_t *realtime);
u32 iwl_mvm_get_systime(struct iwl_mvm *mvm);
+u32 iwl_mvm_find_ie_offset(u8 *beacon, u8 eid, u32 frame_size);
/* Tx / Host Commands */
int __must_check iwl_mvm_send_cmd(struct iwl_mvm *mvm,
@@ -1859,10 +1908,10 @@ static inline u8 iwl_mvm_get_valid_tx_ant(struct iwl_mvm *mvm)
static inline u8 iwl_mvm_get_valid_rx_ant(struct iwl_mvm *mvm)
{
- u8 rx_ant = mvm->fw->valid_tx_ant;
+ u8 rx_ant = mvm->fw->valid_rx_ant;
if (mvm->nvm_data && mvm->nvm_data->valid_rx_ant)
- rx_ant &= mvm->nvm_data->valid_tx_ant;
+ rx_ant &= mvm->nvm_data->valid_rx_ant;
if (mvm->set_rx_ant)
rx_ant &= mvm->set_rx_ant;
@@ -2245,11 +2294,22 @@ extern const struct file_operations iwl_dbgfs_d3_test_ops;
#ifdef CONFIG_PM
void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm,
struct ieee80211_vif *vif);
+void iwl_mvm_fast_suspend(struct iwl_mvm *mvm);
+int iwl_mvm_fast_resume(struct iwl_mvm *mvm);
#else
static inline void
iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
}
+
+static inline void iwl_mvm_fast_suspend(struct iwl_mvm *mvm)
+{
+}
+
+static inline int iwl_mvm_fast_resume(struct iwl_mvm *mvm)
+{
+ return 0;
+}
#endif
void iwl_mvm_set_wowlan_qos_seq(struct iwl_mvm_sta *mvm_ap_sta,
struct iwl_wowlan_config_cmd *cmd);
@@ -2761,6 +2821,13 @@ static inline void iwl_mvm_mei_set_sw_rfkill_state(struct iwl_mvm *mvm)
sw_rfkill);
}
+static inline bool iwl_mvm_has_p2p_over_aux(struct iwl_mvm *mvm)
+{
+ u32 cmd_id = WIDE_ID(MAC_CONF_GROUP, ROC_CMD);
+
+ return iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 0) >= 4;
+}
+
static inline bool iwl_mvm_mei_filter_scan(struct iwl_mvm *mvm,
struct sk_buff *skb)
{
@@ -2795,7 +2862,7 @@ int iwl_mvm_op_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant);
int iwl_mvm_mac_start(struct ieee80211_hw *hw);
void iwl_mvm_mac_reconfig_complete(struct ieee80211_hw *hw,
enum ieee80211_reconfig_type reconfig_type);
-void iwl_mvm_mac_stop(struct ieee80211_hw *hw);
+void iwl_mvm_mac_stop(struct ieee80211_hw *hw, bool suspend);
static inline int iwl_mvm_mac_config(struct ieee80211_hw *hw, u32 changed)
{
return 0;
@@ -2926,7 +2993,7 @@ void iwl_mvm_roc_duration_and_delay(struct ieee80211_vif *vif,
int iwl_mvm_roc_add_cmd(struct iwl_mvm *mvm,
struct ieee80211_channel *channel,
struct ieee80211_vif *vif,
- int duration, u32 activity);
+ int duration, enum iwl_roc_activity activity);
/* EMLSR */
bool iwl_mvm_vif_has_esr_cap(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
@@ -2953,4 +3020,10 @@ iwl_mvm_bt_coex_calculate_esr_mode(struct iwl_mvm *mvm,
bool primary);
int iwl_mvm_esr_non_bss_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
unsigned int link_id, bool active);
+
+void
+iwl_mvm_send_ap_tx_power_constraint_cmd(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf,
+ bool is_ap);
#endif /* __IWL_MVM_H__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
index ae8177222881..836ca22597bc 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2019, 2021-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2019, 2021-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -9,8 +9,7 @@
#include "iwl-trans.h"
#include "iwl-csr.h"
#include "mvm.h"
-#include "iwl-eeprom-parse.h"
-#include "iwl-eeprom-read.h"
+#include "iwl-nvm-utils.h"
#include "iwl-nvm-parse.h"
#include "iwl-prph.h"
#include "fw/acpi.h"
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index 53283d052e18..b7dcae76a05d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -18,7 +18,7 @@
#include "iwl-modparams.h"
#include "mvm.h"
#include "iwl-phy-db.h"
-#include "iwl-eeprom-parse.h"
+#include "iwl-nvm-utils.h"
#include "iwl-csr.h"
#include "iwl-io.h"
#include "iwl-prph.h"
@@ -41,12 +41,8 @@ static const struct iwl_op_mode_ops iwl_mvm_ops_mq;
struct iwl_mvm_mod_params iwlmvm_mod_params = {
.power_scheme = IWL_POWER_SCHEME_BPS,
- /* rest of fields are 0 by default */
};
-module_param_named(init_dbg, iwlmvm_mod_params.init_dbg, bool, 0444);
-MODULE_PARM_DESC(init_dbg,
- "set to true to debug an ASSERT in INIT fw (default: false");
module_param_named(power_scheme, iwlmvm_mod_params.power_scheme, int, 0444);
MODULE_PARM_DESC(power_scheme,
"power management scheme: 1-active, 2-balanced, 3-low power, default: 2");
@@ -153,7 +149,7 @@ static void iwl_mvm_rx_esr_mode_notif(struct iwl_mvm *mvm,
struct ieee80211_vif *vif = iwl_mvm_get_bss_vif(mvm);
/* FW recommendations is only for entering EMLSR */
- if (!vif || iwl_mvm_vif_from_mac80211(vif)->esr_active)
+ if (IS_ERR_OR_NULL(vif) || iwl_mvm_vif_from_mac80211(vif)->esr_active)
return;
if (le32_to_cpu(notif->action) == ESR_RECOMMEND_ENTER)
@@ -471,7 +467,7 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
iwl_mvm_time_sync_msmt_confirm_event, RX_HANDLER_SYNC,
struct iwl_time_msmt_cfm_notify),
RX_HANDLER_GRP(MAC_CONF_GROUP, ROC_NOTIF,
- iwl_mvm_rx_roc_notif, RX_HANDLER_SYNC,
+ iwl_mvm_rx_roc_notif, RX_HANDLER_ASYNC_LOCKED,
struct iwl_roc_notif),
RX_HANDLER_GRP(SCAN_GROUP, CHANNEL_SURVEY_NOTIF,
iwl_mvm_rx_channel_survey_notif, RX_HANDLER_ASYNC_LOCKED,
@@ -572,6 +568,7 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = {
HCMD_NAME(D0I3_END_CMD),
HCMD_NAME(LTR_CONFIG),
HCMD_NAME(LDBG_CONFIG_CMD),
+ HCMD_NAME(DEBUG_LOG_MSG),
};
/* Please keep this array *SORTED* by hex value.
@@ -579,6 +576,7 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = {
*/
static const struct iwl_hcmd_names iwl_mvm_system_names[] = {
HCMD_NAME(SHARED_MEM_CFG_CMD),
+ HCMD_NAME(SOC_CONFIGURATION_CMD),
HCMD_NAME(INIT_EXTENDED_CFG_CMD),
HCMD_NAME(FW_ERROR_RECOVERY_CMD),
HCMD_NAME(RFI_CONFIG_CMD),
@@ -593,8 +591,10 @@ static const struct iwl_hcmd_names iwl_mvm_system_names[] = {
* Access is done through binary search
*/
static const struct iwl_hcmd_names iwl_mvm_mac_conf_names[] = {
+ HCMD_NAME(LOW_LATENCY_CMD),
HCMD_NAME(CHANNEL_SWITCH_TIME_EVENT_CMD),
HCMD_NAME(SESSION_PROTECTION_CMD),
+ HCMD_NAME(CANCEL_CHANNEL_SWITCH_CMD),
HCMD_NAME(MAC_CONFIG_CMD),
HCMD_NAME(LINK_CONFIG_CMD),
HCMD_NAME(STA_CONFIG_CMD),
@@ -603,7 +603,10 @@ static const struct iwl_hcmd_names iwl_mvm_mac_conf_names[] = {
HCMD_NAME(STA_DISABLE_TX_CMD),
HCMD_NAME(ROC_CMD),
HCMD_NAME(ROC_NOTIF),
+ HCMD_NAME(CHANNEL_SWITCH_ERROR_NOTIF),
+ HCMD_NAME(MISSED_VAP_NOTIF),
HCMD_NAME(SESSION_PROTECTION_NOTIF),
+ HCMD_NAME(PROBE_RESPONSE_DATA_NOTIF),
HCMD_NAME(CHANNEL_SWITCH_START_NOTIF),
};
@@ -627,6 +630,8 @@ static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = {
HCMD_NAME(DQA_ENABLE_CMD),
HCMD_NAME(UPDATE_MU_GROUPS_CMD),
HCMD_NAME(TRIGGER_RX_QUEUES_NOTIF_CMD),
+ HCMD_NAME(WNM_PLATFORM_PTM_REQUEST_CMD),
+ HCMD_NAME(WNM_80211V_TIMING_MEASUREMENT_CONFIG_CMD),
HCMD_NAME(STA_HE_CTXT_CMD),
HCMD_NAME(RLC_CONFIG_CMD),
HCMD_NAME(RFH_QUEUE_CONFIG_CMD),
@@ -653,6 +658,21 @@ static const struct iwl_hcmd_names iwl_mvm_statistics_names[] = {
/* Please keep this array *SORTED* by hex value.
* Access is done through binary search
*/
+static const struct iwl_hcmd_names iwl_mvm_debug_names[] = {
+ HCMD_NAME(LMAC_RD_WR),
+ HCMD_NAME(UMAC_RD_WR),
+ HCMD_NAME(HOST_EVENT_CFG),
+ HCMD_NAME(DBGC_SUSPEND_RESUME),
+ HCMD_NAME(BUFFER_ALLOCATION),
+ HCMD_NAME(GET_TAS_STATUS),
+ HCMD_NAME(FW_DUMP_COMPLETE_CMD),
+ HCMD_NAME(FW_CLEAR_BUFFER),
+ HCMD_NAME(MFU_ASSERT_DUMP_NTF),
+};
+
+/* Please keep this array *SORTED* by hex value.
+ * Access is done through binary search
+ */
static const struct iwl_hcmd_names iwl_mvm_scan_names[] = {
HCMD_NAME(CHANNEL_SURVEY_NOTIF),
HCMD_NAME(OFFLOAD_MATCH_INFO_NOTIF),
@@ -705,6 +725,7 @@ static const struct iwl_hcmd_arr iwl_mvm_groups[] = {
[PROT_OFFLOAD_GROUP] = HCMD_ARR(iwl_mvm_prot_offload_names),
[REGULATORY_AND_NVM_GROUP] =
HCMD_ARR(iwl_mvm_regulatory_and_nvm_names),
+ [DEBUG_GROUP] = HCMD_ARR(iwl_mvm_debug_names),
[STATISTICS_GROUP] = HCMD_ARR(iwl_mvm_statistics_names),
};
@@ -740,20 +761,18 @@ static void iwl_mvm_tx_unblock_dwork(struct work_struct *work)
struct ieee80211_vif *tx_blocked_vif;
struct iwl_mvm_vif *mvmvif;
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
tx_blocked_vif =
rcu_dereference_protected(mvm->csa_tx_blocked_vif,
lockdep_is_held(&mvm->mutex));
if (!tx_blocked_vif)
- goto unlock;
+ return;
mvmvif = iwl_mvm_vif_from_mac80211(tx_blocked_vif);
iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, false);
RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL);
-unlock:
- mutex_unlock(&mvm->mutex);
}
static void iwl_mvm_fwrt_dump_start(void *ctx)
@@ -770,21 +789,12 @@ static void iwl_mvm_fwrt_dump_end(void *ctx)
mutex_unlock(&mvm->mutex);
}
-static bool iwl_mvm_fwrt_fw_running(void *ctx)
-{
- return iwl_mvm_firmware_running(ctx);
-}
-
static int iwl_mvm_fwrt_send_hcmd(void *ctx, struct iwl_host_cmd *host_cmd)
{
struct iwl_mvm *mvm = (struct iwl_mvm *)ctx;
- int ret;
- mutex_lock(&mvm->mutex);
- ret = iwl_mvm_send_cmd(mvm, host_cmd);
- mutex_unlock(&mvm->mutex);
-
- return ret;
+ guard(mvm)(mvm);
+ return iwl_mvm_send_cmd(mvm, host_cmd);
}
static bool iwl_mvm_d3_debug_enable(void *ctx)
@@ -795,7 +805,6 @@ static bool iwl_mvm_d3_debug_enable(void *ctx)
static const struct iwl_fw_runtime_ops iwl_mvm_fwrt_ops = {
.dump_start = iwl_mvm_fwrt_dump_start,
.dump_end = iwl_mvm_fwrt_dump_end,
- .fw_running = iwl_mvm_fwrt_fw_running,
.send_hcmd = iwl_mvm_fwrt_send_hcmd,
.d3_debug_enable = iwl_mvm_d3_debug_enable,
};
@@ -851,8 +860,7 @@ get_nvm_from_fw:
ret = iwl_mvm_init_mcc(mvm);
}
- if (!iwlmvm_mod_params.init_dbg || !ret)
- iwl_mvm_stop_device(mvm);
+ iwl_mvm_stop_device(mvm);
mutex_unlock(&mvm->mutex);
wiphy_unlock(mvm->hw->wiphy);
@@ -862,7 +870,7 @@ get_nvm_from_fw:
IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret);
/* no longer need this regardless of failure or not */
- mvm->pldr_sync = false;
+ mvm->fw_product_reset = false;
return ret;
}
@@ -1360,24 +1368,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
trans_cfg.no_reclaim_cmds = no_reclaim_cmds;
trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds);
- switch (iwlwifi_mod_params.amsdu_size) {
- case IWL_AMSDU_DEF:
- trans_cfg.rx_buf_size = IWL_AMSDU_4K;
- break;
- case IWL_AMSDU_4K:
- trans_cfg.rx_buf_size = IWL_AMSDU_4K;
- break;
- case IWL_AMSDU_8K:
- trans_cfg.rx_buf_size = IWL_AMSDU_8K;
- break;
- case IWL_AMSDU_12K:
- trans_cfg.rx_buf_size = IWL_AMSDU_12K;
- break;
- default:
- pr_err("%s: Unsupported amsdu_size: %d\n", KBUILD_MODNAME,
- iwlwifi_mod_params.amsdu_size);
- trans_cfg.rx_buf_size = IWL_AMSDU_4K;
- }
+ trans_cfg.rx_buf_size = iwl_amsdu_size_to_rxb_size();
trans->wide_cmd_header = true;
trans_cfg.bc_table_dword =
@@ -1437,9 +1428,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
goto out_free;
}
- IWL_INFO(mvm, "Detected %s, REV=0x%X\n",
- mvm->trans->name, mvm->trans->hw_rev);
-
if (iwlwifi_mod_params.nvm_file)
mvm->nvm_file_name = iwlwifi_mod_params.nvm_file;
else
@@ -1507,8 +1495,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
iwl_fw_flush_dumps(&mvm->fwrt);
iwl_fw_runtime_free(&mvm->fwrt);
- if (iwlmvm_mod_params.init_dbg)
- return op_mode;
iwl_phy_db_free(mvm->phy_db);
kfree(mvm->scan_cmd);
iwl_trans_op_mode_leave(trans);
@@ -1912,12 +1898,10 @@ static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
bool rfkill_safe_init_done = READ_ONCE(mvm->rfkill_safe_init_done);
bool unified = iwl_mvm_has_unified_ucode(mvm);
- if (state) {
+ if (state)
set_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
- wake_up(&mvm->rx_sync_waitq);
- } else {
+ else
clear_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
- }
iwl_mvm_set_rfkill_state(mvm);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/power.c b/drivers/net/wireless/intel/iwlwifi/mvm/power.c
index 568f53c56199..bc363e8427e4 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/power.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/power.c
@@ -211,19 +211,37 @@ static void iwl_mvm_power_configure_uapsd(struct iwl_mvm *mvm,
IWL_MVM_PS_HEAVY_RX_THLD_PERCENT;
}
-static void iwl_mvm_p2p_standalone_iterator(void *_data, u8 *mac,
- struct ieee80211_vif *vif)
+struct iwl_allow_uapsd_iface_iterator_data {
+ struct ieee80211_vif *current_vif;
+ bool allow_uapsd;
+};
+
+static void iwl_mvm_allow_uapsd_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
{
- bool *is_p2p_standalone = _data;
+ struct iwl_allow_uapsd_iface_iterator_data *data = _data;
+ struct iwl_mvm_vif *other_mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_vif *curr_mvmvif =
+ iwl_mvm_vif_from_mac80211(data->current_vif);
- switch (ieee80211_vif_type_p2p(vif)) {
- case NL80211_IFTYPE_P2P_GO:
+ /* exclude the given vif */
+ if (vif == data->current_vif)
+ return;
+
+ switch (vif->type) {
case NL80211_IFTYPE_AP:
- *is_p2p_standalone = false;
+ case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_NAN:
+ data->allow_uapsd = false;
break;
case NL80211_IFTYPE_STATION:
- if (vif->cfg.assoc)
- *is_p2p_standalone = false;
+ /* allow UAPSD if P2P interface and BSS station interface share
+ * the same channel.
+ */
+ if (vif->cfg.assoc && other_mvmvif->deflink.phy_ctxt &&
+ curr_mvmvif->deflink.phy_ctxt &&
+ other_mvmvif->deflink.phy_ctxt->id != curr_mvmvif->deflink.phy_ctxt->id)
+ data->allow_uapsd = false;
break;
default:
@@ -235,6 +253,10 @@ static bool iwl_mvm_power_allow_uapsd(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_allow_uapsd_iface_iterator_data data = {
+ .current_vif = vif,
+ .allow_uapsd = true,
+ };
if (ether_addr_equal(mvmvif->uapsd_misbehaving_ap_addr,
vif->cfg.ap_addr))
@@ -249,88 +271,75 @@ static bool iwl_mvm_power_allow_uapsd(struct iwl_mvm *mvm,
IEEE80211_P2P_OPPPS_ENABLE_BIT))
return false;
- /*
- * Avoid using uAPSD if client is in DCM -
- * low latency issue in Miracast
- */
- if (iwl_mvm_phy_ctx_count(mvm) >= 2)
+ if (vif->p2p && !iwl_mvm_is_p2p_scm_uapsd_supported(mvm))
return false;
- if (vif->p2p) {
- /* Allow U-APSD only if p2p is stand alone */
- bool is_p2p_standalone = true;
-
- if (!iwl_mvm_is_p2p_scm_uapsd_supported(mvm))
- return false;
-
- ieee80211_iterate_active_interfaces_atomic(mvm->hw,
- IEEE80211_IFACE_ITER_NORMAL,
- iwl_mvm_p2p_standalone_iterator,
- &is_p2p_standalone);
-
- if (!is_p2p_standalone)
- return false;
- }
+ ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_allow_uapsd_iterator,
+ &data);
- return true;
+ return data.allow_uapsd;
}
-static bool iwl_mvm_power_is_radar(struct ieee80211_vif *vif)
+static bool iwl_mvm_power_is_radar(struct ieee80211_bss_conf *link_conf)
{
struct ieee80211_chanctx_conf *chanctx_conf;
- struct ieee80211_bss_conf *link_conf;
- bool radar_detect = false;
- unsigned int link_id;
- rcu_read_lock();
- for_each_vif_active_link(vif, link_conf, link_id) {
- chanctx_conf = rcu_dereference(link_conf->chanctx_conf);
- /* this happens on link switching, just ignore inactive ones */
- if (!chanctx_conf)
- continue;
+ chanctx_conf = rcu_dereference(link_conf->chanctx_conf);
- radar_detect = !!(chanctx_conf->def.chan->flags &
- IEEE80211_CHAN_RADAR);
- if (radar_detect)
- goto out;
- }
+ /* this happens on link switching, just ignore inactive ones */
+ if (!chanctx_conf)
+ return false;
-out:
- rcu_read_unlock();
- return radar_detect;
+ return chanctx_conf->def.chan->flags & IEEE80211_CHAN_RADAR;
}
static void iwl_mvm_power_config_skip_dtim(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct iwl_mac_power_cmd *cmd)
{
- int dtimper = vif->bss_conf.dtim_period ?: 1;
- int skip;
+ struct ieee80211_bss_conf *link_conf;
+ unsigned int min_link_skip = ~0;
+ unsigned int link_id;
/* disable, in case we're supposed to override */
cmd->skip_dtim_periods = 0;
cmd->flags &= ~cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
- if (iwl_mvm_power_is_radar(vif))
+ if (!test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status)) {
+ if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_LP)
+ return;
+ cmd->skip_dtim_periods = 2;
+ cmd->flags |= cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
return;
+ }
- if (dtimper >= 10)
- return;
+ rcu_read_lock();
+ for_each_vif_active_link(vif, link_conf, link_id) {
+ unsigned int dtimper = link_conf->dtim_period ?: 1;
+ unsigned int dtimper_tu = dtimper * link_conf->beacon_int;
+ unsigned int skip;
- if (!test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status)) {
- if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_LP)
+ if (dtimper >= 10 || iwl_mvm_power_is_radar(link_conf)) {
+ rcu_read_unlock();
return;
- skip = 2;
- } else {
- int dtimper_tu = dtimper * vif->bss_conf.beacon_int;
+ }
if (WARN_ON(!dtimper_tu))
- return;
+ continue;
+
/* configure skip over dtim up to 900 TU DTIM interval */
- skip = max_t(u8, 1, 900 / dtimper_tu);
+ skip = max_t(int, 1, 900 / dtimper_tu);
+ min_link_skip = min(min_link_skip, skip);
}
+ rcu_read_unlock();
+
+ /* no WARN_ON, can only happen with WARN_ON above */
+ if (min_link_skip == ~0)
+ return;
- cmd->skip_dtim_periods = skip;
+ cmd->skip_dtim_periods = min_link_skip;
cmd->flags |= cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
index 3ba62fb2c85e..05715e5af6ab 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
@@ -514,6 +514,8 @@ void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm,
link_sta->agg.max_tid_amsdu_len[i] = 1;
}
+ ieee80211_sta_recalc_aggregates(sta);
+
IWL_DEBUG_RATE(mvm,
"AMSDU update. AMSDU size: %d, AMSDU selected size: %d, AMSDU TID bitmap 0x%X\n",
le32_to_cpu(notif->amsdu_size), size,
@@ -609,6 +611,7 @@ void iwl_mvm_rs_fw_rate_init(struct iwl_mvm *mvm,
cpu_to_le16(max_amsdu_len) : 0,
};
unsigned int link_id = link_conf->link_id;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
int cmd_ver;
int ret;
@@ -652,7 +655,10 @@ void iwl_mvm_rs_fw_rate_init(struct iwl_mvm *mvm,
* since TLC offload works with one mode we can assume
* that only vht/ht is used and also set it as station max amsdu
*/
- sta->deflink.agg.max_amsdu_len = max_amsdu_len;
+ link_sta->agg.max_amsdu_len = max_amsdu_len;
+ ieee80211_sta_recalc_aggregates(sta);
+
+ cfg_cmd.max_tx_op = cpu_to_le16(mvmvif->max_tx_op);
cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 0);
IWL_DEBUG_RATE(mvm, "TLC CONFIG CMD, sta_id=%d, max_ch_width=%d, mode=%d\n",
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
index 376b23b409dc..ea81cb236d5c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
@@ -3,7 +3,7 @@
*
* Copyright(c) 2015 Intel Mobile Communications GmbH
* Copyright(c) 2017 Intel Deutschland GmbH
- * Copyright (C) 2003 - 2014, 2018 - 2023 Intel Corporation
+ * Copyright (C) 2003 - 2014, 2018 - 2024 Intel Corporation
*****************************************************************************/
#ifndef __rs_h__
@@ -122,13 +122,8 @@ enum {
#define LINK_QUAL_AGG_FRAME_LIMIT_DEF (63)
#define LINK_QUAL_AGG_FRAME_LIMIT_MAX (63)
-/*
- * FIXME - various places in firmware API still use u8,
- * e.g. LQ command and SCD config command.
- * This should be 256 instead.
- */
-#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF (255)
-#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_MAX (255)
+#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF (64)
+#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_MAX (64)
#define LINK_QUAL_AGG_FRAME_LIMIT_MIN (0)
#define LQ_SIZE 2 /* 2 mode tables: "Active" and "Search" */
@@ -203,11 +198,12 @@ struct rs_rate {
/**
* struct iwl_lq_sta_rs_fw - rate and related statistics for RS in FW
* @last_rate_n_flags: last rate reported by FW
+ * @pers: persistent fields
* @pers.sta_id: the id of the station
- * @chains: bitmask of chains reported in %chain_signal
- * @chain_signal: per chain signal strength
- * @last_rssi: last rssi reported
- * @drv: pointer back to the driver data
+ * @pers.chains: bitmask of chains reported in %chain_signal
+ * @pers.chain_signal: per chain signal strength
+ * @pers.last_rssi: last rssi reported
+ * @pers.drv: pointer back to the driver data
*/
struct iwl_lq_sta_rs_fw {
/* last tx rate_n_flags */
@@ -218,11 +214,11 @@ struct iwl_lq_sta_rs_fw {
u32 sta_id;
#ifdef CONFIG_MAC80211_DEBUGFS
/**
- * @dbg_fixed_rate: for debug, use fixed rate if not 0
+ * @pers.dbg_fixed_rate: for debug, use fixed rate if not 0
*/
u32 dbg_fixed_rate;
/**
- * @dbg_agg_frame_count_lim: for debug, max number of
+ * @pers.dbg_agg_frame_count_lim: for debug, max number of
* frames in A-MPDU
*/
u16 dbg_agg_frame_count_lim;
@@ -407,7 +403,7 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
int tid, struct ieee80211_tx_info *info, bool ndp);
/**
- * iwl_rate_control_register - Register the rate control algorithm callbacks
+ * iwl_mvm_rate_control_register - Register the rate control algorithm callbacks
*
* Since the rate control algorithm is hardware specific, there is no need
* or reason to place it as a stand alone module. The driver can call
@@ -419,7 +415,7 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
int iwl_mvm_rate_control_register(void);
/**
- * iwl_rate_control_unregister - Unregister the rate control callbacks
+ * iwl_mvm_rate_control_unregister - Unregister the rate control callbacks
*
* This should be called after calling ieee80211_unregister_hw, but before
* the driver is unloaded.
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
index 4fa8066a89b6..151289e13308 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
@@ -557,12 +557,10 @@ struct iwl_mvm_stat_data_all_macs {
};
static void iwl_mvm_update_link_sig(struct ieee80211_vif *vif, int sig,
- struct iwl_mvm_vif_link_info *link_info)
+ struct iwl_mvm_vif_link_info *link_info,
+ struct ieee80211_bss_conf *bss_conf)
{
struct iwl_mvm *mvm = iwl_mvm_vif_from_mac80211(vif)->mvm;
- struct ieee80211_bss_conf *bss_conf =
- iwl_mvm_rcu_fw_link_id_to_link_conf(mvm, link_info->fw_link_id,
- false);
int thold = bss_conf->cqm_rssi_thold;
int hyst = bss_conf->cqm_rssi_hyst;
int last_event;
@@ -670,7 +668,7 @@ static void iwl_mvm_stat_iterator(void *_data, u8 *mac,
mvmvif->deflink.beacon_stats.num_beacons;
/* This is used in pre-MLO API so use deflink */
- iwl_mvm_update_link_sig(vif, sig, &mvmvif->deflink);
+ iwl_mvm_update_link_sig(vif, sig, &mvmvif->deflink, &vif->bss_conf);
}
static void iwl_mvm_stat_iterator_all_macs(void *_data, u8 *mac,
@@ -705,7 +703,7 @@ static void iwl_mvm_stat_iterator_all_macs(void *_data, u8 *mac,
sig = -le32_to_cpu(mac_stats->beacon_filter_average_energy);
/* This is used in pre-MLO API so use deflink */
- iwl_mvm_update_link_sig(vif, sig, &mvmvif->deflink);
+ iwl_mvm_update_link_sig(vif, sig, &mvmvif->deflink, &vif->bss_conf);
}
static inline void
@@ -921,7 +919,8 @@ iwl_mvm_stat_iterator_all_links(struct iwl_mvm *mvm,
mvmvif->link[link_id]->beacon_stats.num_beacons;
sig = -le32_to_cpu(link_stats->beacon_filter_average_energy);
- iwl_mvm_update_link_sig(bss_conf->vif, sig, link_info);
+ iwl_mvm_update_link_sig(bss_conf->vif, sig, link_info,
+ bss_conf);
if (WARN_ONCE(mvmvif->id >= MAC_INDEX_AUX,
"invalid mvmvif id: %d", mvmvif->id))
@@ -967,7 +966,7 @@ static void iwl_mvm_update_esr_mode_tpt(struct iwl_mvm *mvm)
lockdep_assert_held(&mvm->mutex);
- if (!bss_vif)
+ if (IS_ERR_OR_NULL(bss_vif))
return;
mvmvif = iwl_mvm_vif_from_mac80211(bss_vif);
@@ -1010,6 +1009,9 @@ static void iwl_mvm_update_esr_mode_tpt(struct iwl_mvm *mvm)
spin_unlock_bh(&mvmsta->mpdu_counters[q].lock);
}
+ IWL_DEBUG_STATS(mvm, "total Tx MPDUs: %ld. total Rx MPDUs: %ld\n",
+ total_tx, total_rx);
+
/* If we don't have enough MPDUs - exit EMLSR */
if (total_tx < IWL_MVM_ENTER_ESR_TPT_THRESH &&
total_rx < IWL_MVM_ENTER_ESR_TPT_THRESH) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index d78af2928152..1a210d0c22b3 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -566,7 +566,7 @@ static void iwl_mvm_release_frames(struct iwl_mvm *mvm,
lockdep_assert_held(&reorder_buf->lock);
while (ieee80211_sn_less(ssn, nssn)) {
- int index = ssn % reorder_buf->buf_size;
+ int index = ssn % baid_data->buf_size;
struct sk_buff_head *skb_list = &entries[index].frames;
struct sk_buff *skb;
@@ -617,7 +617,7 @@ static void iwl_mvm_del_ba(struct iwl_mvm *mvm, int queue,
spin_lock_bh(&reorder_buf->lock);
iwl_mvm_release_frames(mvm, sta, NULL, ba_data, reorder_buf,
ieee80211_sn_add(reorder_buf->head_sn,
- reorder_buf->buf_size));
+ ba_data->buf_size));
spin_unlock_bh(&reorder_buf->lock);
out:
@@ -839,7 +839,7 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
}
/* put in reorder buffer */
- index = sn % buffer->buf_size;
+ index = sn % baid_data->buf_size;
__skb_queue_tail(&entries[index].frames, skb);
buffer->num_stored++;
@@ -1954,6 +1954,16 @@ static void iwl_mvm_rx_fill_status(struct iwl_mvm *mvm,
iwl_mvm_decode_lsig(skb, phy_data);
rx_status->device_timestamp = phy_data->gp2_on_air_rise;
+
+ if (mvm->rx_ts_ptp && mvm->monitor_on) {
+ u64 adj_time =
+ iwl_mvm_ptp_get_adj_time(mvm, phy_data->gp2_on_air_rise * NSEC_PER_USEC);
+
+ rx_status->mactime = div64_u64(adj_time, NSEC_PER_USEC);
+ rx_status->flag |= RX_FLAG_MACTIME_IS_RTAP_TS64;
+ rx_status->flag &= ~RX_FLAG_MACTIME;
+ }
+
rx_status->freq = ieee80211_channel_to_frequency(phy_data->channel,
rx_status->band);
iwl_mvm_get_signal_strength(mvm, rx_status, rate_n_flags,
@@ -2032,7 +2042,6 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
u32 len;
u32 pkt_len = iwl_rx_packet_payload_len(pkt);
struct ieee80211_sta *sta = NULL;
- struct ieee80211_link_sta *link_sta = NULL;
struct sk_buff *skb;
u8 crypt_len = 0;
u8 sta_id = le32_get_bits(desc->status, IWL_RX_MPDU_STATUS_STA_ID);
@@ -2185,6 +2194,8 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
if (desc->status & cpu_to_le32(IWL_RX_MPDU_STATUS_SRC_STA_FOUND)) {
if (!WARN_ON_ONCE(sta_id >= mvm->fw->ucode_capa.num_stations)) {
+ struct ieee80211_link_sta *link_sta;
+
sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
if (IS_ERR(sta))
sta = NULL;
@@ -2360,7 +2371,6 @@ void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi,
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_rx_no_data_ver_3 *desc = (void *)pkt->data;
u32 rssi;
- u32 info_type;
struct ieee80211_sta *sta = NULL;
struct sk_buff *skb;
struct iwl_mvm_rx_phy_data phy_data;
@@ -2373,7 +2383,6 @@ void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi,
return;
rssi = le32_to_cpu(desc->rssi);
- info_type = le32_to_cpu(desc->info) & RX_NO_DATA_INFO_TYPE_MSK;
phy_data.d0 = desc->phy_info[0];
phy_data.d1 = desc->phy_info[1];
phy_data.phy_info = IWL_RX_MPDU_PHY_TSF_OVERLOAD;
@@ -2425,7 +2434,12 @@ void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi,
/* 0-length PSDU */
rx_status->flag |= RX_FLAG_NO_PSDU;
- switch (info_type) {
+ /* mark as failed PLCP on any errors to skip checks in mac80211 */
+ if (le32_get_bits(desc->info, RX_NO_DATA_INFO_ERR_MSK) !=
+ RX_NO_DATA_INFO_ERR_NONE)
+ rx_status->flag |= RX_FLAG_FAILED_PLCP_CRC;
+
+ switch (le32_get_bits(desc->info, RX_NO_DATA_INFO_TYPE_MSK)) {
case RX_NO_DATA_INFO_TYPE_NDP:
rx_status->zero_length_psdu_type =
IEEE80211_RADIOTAP_ZERO_LEN_PSDU_SOUNDING;
@@ -2450,8 +2464,11 @@ void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi,
*
* We mark it as mac header, for upper layers to know where
* all radio tap header ends.
+ *
+ * Since data doesn't move data while putting data on skb and that is
+ * the only way we use, data + len is the next place that hdr would be put
*/
- skb_reset_mac_header(skb);
+ skb_set_mac_header(skb, skb->len);
/*
* Override the nss from the rx_vec since the rate_n_flags has
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index a7ec172eeade..8e0df31f1b3e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -208,7 +208,7 @@ static void iwl_mvm_scan_iterator(void *_data, u8 *mac,
curr_mvmvif = iwl_mvm_vif_from_mac80211(data->current_vif);
- if (vif->type == NL80211_IFTYPE_AP && vif->p2p &&
+ if (ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_P2P_GO &&
mvmvif->deflink.phy_ctxt && curr_mvmvif->deflink.phy_ctxt &&
mvmvif->deflink.phy_ctxt->id != curr_mvmvif->deflink.phy_ctxt->id)
data->is_dcm_with_p2p_go = true;
@@ -1313,7 +1313,7 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
if (IWL_MVM_ADWELL_MAX_BUDGET)
cmd->v7.adwell_max_budget =
cpu_to_le16(IWL_MVM_ADWELL_MAX_BUDGET);
- else if (params->ssids && params->ssids[0].ssid_len)
+ else if (params->n_ssids && params->ssids[0].ssid_len)
cmd->v7.adwell_max_budget =
cpu_to_le16(IWL_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN);
else
@@ -1418,7 +1418,7 @@ iwl_mvm_scan_umac_dwell_v11(struct iwl_mvm *mvm,
if (IWL_MVM_ADWELL_MAX_BUDGET)
general_params->adwell_max_budget =
cpu_to_le16(IWL_MVM_ADWELL_MAX_BUDGET);
- else if (params->ssids && params->ssids[0].ssid_len)
+ else if (params->n_ssids && params->ssids[0].ssid_len)
general_params->adwell_max_budget =
cpu_to_le16(IWL_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN);
else
@@ -1730,7 +1730,10 @@ iwl_mvm_umac_scan_fill_6g_chan_list(struct iwl_mvm *mvm,
break;
}
- if (k == idex_b && idex_b < SCAN_BSSID_MAX_SIZE) {
+ if (k == idex_b && idex_b < SCAN_BSSID_MAX_SIZE &&
+ !WARN_ONCE(!is_valid_ether_addr(scan_6ghz_params[j].bssid),
+ "scan: invalid BSSID at index %u, index_b=%u\n",
+ j, idex_b)) {
memcpy(&pp->bssid_array[idex_b++],
scan_6ghz_params[j].bssid, ETH_ALEN);
}
@@ -1827,7 +1830,7 @@ iwl_mvm_umac_scan_cfg_channels_v7_6g(struct iwl_mvm *mvm,
*/
if (!iwl_mvm_is_scan_fragmented(params->type)) {
if (!cfg80211_channel_is_psc(params->channels[i]) ||
- flags & IWL_UHB_CHAN_CFG_FLAG_PSC_CHAN_NO_LISTEN) {
+ psc_no_listen) {
if (unsolicited_probe_on_chan) {
max_s_ssids = 2;
max_bssids = 6;
@@ -2875,7 +2878,7 @@ static void iwl_mvm_scan_respect_p2p_go_iter(void *_data, u8 *mac,
if (vif == data->current_vif)
return;
- if (vif->type == NL80211_IFTYPE_AP && vif->p2p) {
+ if (ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_P2P_GO) {
u32 link_id;
for (link_id = 0;
@@ -3319,10 +3322,11 @@ static int iwl_mvm_umac_scan_abort(struct iwl_mvm *mvm, int type)
ret = iwl_mvm_send_cmd_pdu(mvm,
WIDE_ID(IWL_ALWAYS_LONG_GROUP, SCAN_ABORT_UMAC),
- 0, sizeof(cmd), &cmd);
+ CMD_SEND_IN_RFKILL, sizeof(cmd), &cmd);
if (!ret)
mvm->scan_uid_status[uid] = type << IWL_MVM_SCAN_STOPPING_SHIFT;
+ IWL_DEBUG_SCAN(mvm, "Scan abort: ret=%d\n", ret);
return ret;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index 20d4968d692a..15e64d94d6ea 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -857,12 +857,6 @@ int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm,
size = iwl_mvm_get_queue_size(sta);
}
- /* take the min with bc tbl entries allowed */
- size = min_t(u32, size, mvm->trans->txqs.bc_tbl_size / sizeof(u16));
-
- /* size needs to be power of 2 values for calculating read/write pointers */
- size = rounddown_pow_of_two(size);
-
if (sta) {
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct ieee80211_link_sta *link_sta;
@@ -887,22 +881,13 @@ int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm,
if (!sta_mask)
return -EINVAL;
- do {
- queue = iwl_trans_txq_alloc(mvm->trans, 0, sta_mask,
- tid, size, timeout);
-
- if (queue < 0)
- IWL_DEBUG_TX_QUEUES(mvm,
- "Failed allocating TXQ of size %d for sta mask %x tid %d, ret: %d\n",
- size, sta_mask, tid, queue);
- size /= 2;
- } while (queue < 0 && size >= 16);
-
- if (queue < 0)
- return queue;
+ queue = iwl_trans_txq_alloc(mvm->trans, 0, sta_mask,
+ tid, size, timeout);
- IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta mask 0x%x tid %d\n",
- queue, sta_mask, tid);
+ if (queue >= 0)
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "Enabling TXQ #%d for sta mask 0x%x tid %d\n",
+ queue, sta_mask, tid);
return queue;
}
@@ -2758,7 +2743,7 @@ static void iwl_mvm_free_reorder(struct iwl_mvm *mvm,
*/
WARN_ON(1);
- for (j = 0; j < reorder_buf->buf_size; j++)
+ for (j = 0; j < data->buf_size; j++)
__skb_queue_purge(&entries[j].frames);
spin_unlock_bh(&reorder_buf->lock);
@@ -2767,7 +2752,7 @@ static void iwl_mvm_free_reorder(struct iwl_mvm *mvm,
static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
struct iwl_mvm_baid_data *data,
- u16 ssn, u16 buf_size)
+ u16 ssn)
{
int i;
@@ -2780,12 +2765,10 @@ static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
reorder_buf->num_stored = 0;
reorder_buf->head_sn = ssn;
- reorder_buf->buf_size = buf_size;
spin_lock_init(&reorder_buf->lock);
- reorder_buf->mvm = mvm;
reorder_buf->queue = i;
reorder_buf->valid = false;
- for (j = 0; j < reorder_buf->buf_size; j++)
+ for (j = 0; j < data->buf_size; j++)
__skb_queue_head_init(&entries[j].frames);
}
}
@@ -2848,7 +2831,12 @@ static int iwl_mvm_fw_baid_op_cmd(struct iwl_mvm *mvm,
.action = start ? cpu_to_le32(IWL_RX_BAID_ACTION_ADD) :
cpu_to_le32(IWL_RX_BAID_ACTION_REMOVE),
};
- u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, RX_BAID_ALLOCATION_CONFIG_CMD);
+ struct iwl_host_cmd hcmd = {
+ .id = WIDE_ID(DATA_PATH_GROUP, RX_BAID_ALLOCATION_CONFIG_CMD),
+ .flags = CMD_SEND_IN_RFKILL,
+ .len[0] = sizeof(cmd),
+ .data[0] = &cmd,
+ };
int ret;
BUILD_BUG_ON(sizeof(struct iwl_rx_baid_cfg_resp) != sizeof(baid));
@@ -2860,7 +2848,7 @@ static int iwl_mvm_fw_baid_op_cmd(struct iwl_mvm *mvm,
cmd.alloc.ssn = cpu_to_le16(ssn);
cmd.alloc.win_size = cpu_to_le16(buf_size);
baid = -EIO;
- } else if (iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 1) == 1) {
+ } else if (iwl_fw_lookup_cmd_ver(mvm->fw, hcmd.id, 1) == 1) {
cmd.remove_v1.baid = cpu_to_le32(baid);
BUILD_BUG_ON(sizeof(cmd.remove_v1) > sizeof(cmd.remove));
} else {
@@ -2869,8 +2857,7 @@ static int iwl_mvm_fw_baid_op_cmd(struct iwl_mvm *mvm,
cmd.remove.tid = cpu_to_le32(tid);
}
- ret = iwl_mvm_send_cmd_pdu_status(mvm, cmd_id, sizeof(cmd),
- &cmd, &baid);
+ ret = iwl_mvm_send_cmd_status(mvm, &hcmd, &baid);
if (ret)
return ret;
@@ -2990,13 +2977,14 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
baid_data->mvm = mvm;
baid_data->tid = tid;
baid_data->sta_mask = iwl_mvm_sta_fw_id_mask(mvm, sta, -1);
+ baid_data->buf_size = buf_size;
mvm_sta->tid_to_baid[tid] = baid;
if (timeout)
mod_timer(&baid_data->session_timer,
TU_TO_EXP_TIME(timeout * 2));
- iwl_mvm_init_reorder_buffer(mvm, baid_data, ssn, buf_size);
+ iwl_mvm_init_reorder_buffer(mvm, baid_data, ssn);
/*
* protect the BA data with RCU to cover a case where our
* internal RX sync mechanism will timeout (not that it's
@@ -4429,6 +4417,7 @@ void iwl_mvm_count_mpdu(struct iwl_mvm_sta *mvm_sta, u8 fw_sta_id, u32 count,
bool tx, int queue)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvm_sta->vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
struct iwl_mvm_tpt_counter *queue_counter;
struct iwl_mvm_mpdu_counter *link_counter;
u32 total_mpdus = 0;
@@ -4465,6 +4454,8 @@ void iwl_mvm_count_mpdu(struct iwl_mvm_sta *mvm_sta, u8 fw_sta_id, u32 count,
memset(queue_counter->per_link, 0,
sizeof(queue_counter->per_link));
queue_counter->window_start = jiffies;
+
+ IWL_DEBUG_STATS(mvm, "MPDU counters are cleared\n");
}
for (int i = 0; i < IWL_MVM_FW_MAX_LINK_ID; i++)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
index 264f1f9394b6..0dc83d6afb3c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
@@ -478,7 +478,7 @@ struct iwl_mvm_int_sta {
};
/**
- * Send the STA info to the FW.
+ * iwl_mvm_sta_send_to_fw - Send the STA info to the FW.
*
* @mvm: the iwl_mvm* to use
* @sta: the STA
@@ -662,6 +662,11 @@ int iwl_mvm_mld_update_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
int iwl_mvm_mld_rm_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
+void iwl_mvm_mld_free_sta_link(struct iwl_mvm *mvm,
+ struct iwl_mvm_sta *mvm_sta,
+ struct iwl_mvm_link_sta *mvm_sta_link,
+ unsigned int link_id,
+ bool is_in_fw);
int iwl_mvm_mld_rm_sta_id(struct iwl_mvm *mvm, u8 sta_id);
int iwl_mvm_mld_update_sta_links(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c
index e7d5f4ebeb25..3d25ff5cd7e8 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2014 Intel Mobile Communications GmbH
* Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2020, 2022-2023 Intel Corporation
+ * Copyright (C) 2018-2020, 2022-2024 Intel Corporation
*/
#include <linux/etherdevice.h>
#include "mvm.h"
@@ -151,7 +151,7 @@ void iwl_mvm_mac_mgd_protect_tdls_discover(struct ieee80211_hw *hw,
u32 duration = 2 * vif->bss_conf.dtim_period * vif->bss_conf.beacon_int;
/* Protect the session to hear the TDLS setup response on the channel */
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD))
iwl_mvm_schedule_session_protection(mvm, vif, duration,
@@ -159,7 +159,6 @@ void iwl_mvm_mac_mgd_protect_tdls_discover(struct ieee80211_hw *hw,
else
iwl_mvm_protect_session(mvm, vif, duration,
duration, 100, true);
- mutex_unlock(&mvm->mutex);
}
static const char *
@@ -460,21 +459,21 @@ void iwl_mvm_tdls_ch_switch_work(struct work_struct *work)
int ret;
mvm = container_of(work, struct iwl_mvm, tdls_cs.dwork.work);
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
/* called after an active channel switch has finished or timed-out */
iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE);
/* station might be gone, in that case do nothing */
if (mvm->tdls_cs.peer.sta_id == IWL_MVM_INVALID_STA)
- goto out;
+ return;
sta = rcu_dereference_protected(
mvm->fw_id_to_mac_id[mvm->tdls_cs.peer.sta_id],
lockdep_is_held(&mvm->mutex));
/* the station may not be here, but if it is, it must be a TDLS peer */
if (!sta || IS_ERR(sta) || WARN_ON(!sta->tdls))
- goto out;
+ return;
mvmsta = iwl_mvm_sta_from_mac80211(sta);
vif = mvmsta->vif;
@@ -493,8 +492,6 @@ void iwl_mvm_tdls_ch_switch_work(struct work_struct *work)
/* retry after a DTIM if we failed sending now */
delay = TU_TO_MS(vif->bss_conf.dtim_period * vif->bss_conf.beacon_int);
schedule_delayed_work(&mvm->tdls_cs.dwork, msecs_to_jiffies(delay));
-out:
- mutex_unlock(&mvm->mutex);
}
int
@@ -509,7 +506,7 @@ iwl_mvm_tdls_channel_switch(struct ieee80211_hw *hw,
unsigned int delay;
int ret;
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
IWL_DEBUG_TDLS(mvm, "TDLS channel switch with %pM ch %d width %d\n",
sta->addr, chandef->chan->center_freq, chandef->width);
@@ -519,8 +516,7 @@ iwl_mvm_tdls_channel_switch(struct ieee80211_hw *hw,
IWL_DEBUG_TDLS(mvm,
"Existing peer. Can't start switch with %pM\n",
sta->addr);
- ret = -EBUSY;
- goto out;
+ return -EBUSY;
}
ret = iwl_mvm_tdls_config_channel_switch(mvm, vif,
@@ -529,17 +525,15 @@ iwl_mvm_tdls_channel_switch(struct ieee80211_hw *hw,
oper_class, chandef, 0, 0, 0,
tmpl_skb, ch_sw_tm_ie);
if (ret)
- goto out;
+ return ret;
/*
* Mark the peer as "in tdls switch" for this vif. We only allow a
* single such peer per vif.
*/
mvm->tdls_cs.peer.skb = skb_copy(tmpl_skb, GFP_KERNEL);
- if (!mvm->tdls_cs.peer.skb) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!mvm->tdls_cs.peer.skb)
+ return -ENOMEM;
mvmsta = iwl_mvm_sta_from_mac80211(sta);
mvm->tdls_cs.peer.sta_id = mvmsta->deflink.sta_id;
@@ -556,10 +550,7 @@ iwl_mvm_tdls_channel_switch(struct ieee80211_hw *hw,
vif->bss_conf.beacon_int);
mod_delayed_work(system_wq, &mvm->tdls_cs.dwork,
msecs_to_jiffies(delay));
-
-out:
- mutex_unlock(&mvm->mutex);
- return ret;
+ return 0;
}
void iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw *hw,
@@ -626,7 +617,7 @@ iwl_mvm_tdls_recv_channel_switch(struct ieee80211_hw *hw,
params->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST ?
"REQ" : "RESP";
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
IWL_DEBUG_TDLS(mvm,
"Received TDLS ch switch action %s from %pM status %d\n",
@@ -670,5 +661,4 @@ retry:
1024 / 1000;
mod_delayed_work(system_wq, &mvm->tdls_cs.dwork,
msecs_to_jiffies(delay));
- mutex_unlock(&mvm->mutex);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tests/links.c b/drivers/net/wireless/intel/iwlwifi/mvm/tests/links.c
index f49e3c98b1ba..47b8e7b64ead 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tests/links.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tests/links.c
@@ -208,6 +208,7 @@ static void setup_link_conf(struct kunit *test)
bss_load->channel_util = params->channel_util;
rcu_assign_pointer(bss.ies, ies);
+ rcu_assign_pointer(bss.beacon_ies, ies);
}
static void test_link_grading(struct kunit *test)
@@ -393,9 +394,6 @@ static void test_valid_link_pair(struct kunit *test)
chandef_a.width = params->cw_a ?: NL80211_CHAN_WIDTH_20;
chandef_b.width = params->cw_b ?: NL80211_CHAN_WIDTH_20;
-#ifdef CONFIG_IWLWIFI_SUPPORT_DEBUG_OVERRIDES
- trans->dbg_cfg = default_dbg_config;
-#endif
mvm.trans = trans;
mvm.last_bt_notif.wifi_loss_low_rssi = params->bt;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
index 8ee4498f4245..a8c42ce3b630 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
@@ -47,12 +47,13 @@ void iwl_mvm_te_clear_data(struct iwl_mvm *mvm,
static void iwl_mvm_cleanup_roc(struct iwl_mvm *mvm)
{
+ struct ieee80211_vif *bss_vif = iwl_mvm_get_bss_vif(mvm);
struct ieee80211_vif *vif = mvm->p2p_device_vif;
lockdep_assert_held(&mvm->mutex);
/*
- * Clear the ROC_RUNNING status bit.
+ * Clear the ROC_P2P_RUNNING status bit.
* This will cause the TX path to drop offchannel transmissions.
* That would also be done by mac80211, but it is racy, in particular
* in the case that the time event actually completed in the firmware.
@@ -62,7 +63,7 @@ static void iwl_mvm_cleanup_roc(struct iwl_mvm *mvm)
* won't get stuck on the queue and be transmitted in the next
* time event.
*/
- if (test_and_clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status)) {
+ if (test_and_clear_bit(IWL_MVM_STATUS_ROC_P2P_RUNNING, &mvm->status)) {
struct iwl_mvm_vif *mvmvif;
synchronize_net();
@@ -99,7 +100,14 @@ static void iwl_mvm_cleanup_roc(struct iwl_mvm *mvm)
}
}
- /* Do the same for AUX ROC */
+ /*
+ * P2P AUX ROC and HS2.0 ROC do not run simultaneously.
+ * Clear the ROC_AUX_RUNNING status bit.
+ * This will cause the TX path to drop offchannel transmissions.
+ * That would also be done by mac80211, but it is racy, in particular
+ * in the case that the time event actually completed in the firmware
+ * (which is handled in iwl_mvm_te_handle_notif).
+ */
if (test_and_clear_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status)) {
synchronize_net();
@@ -119,9 +127,9 @@ static void iwl_mvm_cleanup_roc(struct iwl_mvm *mvm)
iwl_mvm_rm_aux_sta(mvm);
}
+ if (!IS_ERR_OR_NULL(bss_vif))
+ iwl_mvm_unblock_esr(mvm, bss_vif, IWL_MVM_ESR_BLOCKED_ROC);
mutex_unlock(&mvm->mutex);
- if (vif)
- iwl_mvm_esr_non_bss_link(mvm, vif, 0, false);
}
void iwl_mvm_roc_done_wk(struct work_struct *wk)
@@ -214,6 +222,8 @@ static bool iwl_mvm_te_check_disconnect(struct iwl_mvm *mvm,
iwl_dbg_tlv_time_point(&mvm->fwrt,
IWL_FW_INI_TIME_POINT_ASSOC_FAILED,
NULL);
+
+ mvmvif->session_prot_connection_loss = true;
}
iwl_mvm_connection_loss(mvm, vif, errmsg);
@@ -378,7 +388,7 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
te_data->end_jiffies = TU_TO_EXP_TIME(te_data->duration);
if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
- set_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status);
+ set_bit(IWL_MVM_STATUS_ROC_P2P_RUNNING, &mvm->status);
ieee80211_ready_on_channel(mvm->hw);
} else if (te_data->id == TE_CHANNEL_SWITCH_PERIOD) {
iwl_mvm_te_handle_notify_csa(mvm, te_data, notif);
@@ -388,14 +398,51 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
}
}
+struct iwl_mvm_rx_roc_iterator_data {
+ u32 activity;
+ bool end_activity;
+ bool found;
+};
+
+static void iwl_mvm_rx_roc_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_rx_roc_iterator_data *data = _data;
+
+ if (mvmvif->roc_activity == data->activity) {
+ data->found = true;
+ if (data->end_activity)
+ mvmvif->roc_activity = ROC_NUM_ACTIVITIES;
+ }
+}
+
void iwl_mvm_rx_roc_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_roc_notif *notif = (void *)pkt->data;
+ u32 activity = le32_to_cpu(notif->activity);
+ bool started = le32_to_cpu(notif->success) &&
+ le32_to_cpu(notif->started);
+ struct iwl_mvm_rx_roc_iterator_data data = {
+ .activity = activity,
+ .end_activity = !started,
+ };
- if (le32_to_cpu(notif->success) && le32_to_cpu(notif->started) &&
- le32_to_cpu(notif->activity) == ROC_ACTIVITY_HOTSPOT) {
+ /* Clear vif roc_activity if done (set to ROC_NUM_ACTIVITIES) */
+ ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_rx_roc_iterator,
+ &data);
+ /*
+ * It is possible that the ROC was canceled
+ * but the notification was already fired.
+ */
+ if (!data.found)
+ return;
+
+ if (started) {
set_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status);
ieee80211_ready_on_channel(mvm->hw);
} else {
@@ -724,6 +771,21 @@ static void iwl_mvm_cancel_session_protection(struct iwl_mvm *mvm,
"Couldn't send the SESSION_PROTECTION_CMD: %d\n", ret);
}
+static void iwl_mvm_roc_rm_cmd(struct iwl_mvm *mvm, u32 activity)
+{
+ struct iwl_roc_req roc_cmd = {
+ .action = cpu_to_le32(FW_CTXT_ACTION_REMOVE),
+ .activity = cpu_to_le32(activity),
+ };
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+ ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(MAC_CONF_GROUP, ROC_CMD), 0,
+ sizeof(roc_cmd), &roc_cmd);
+ if (ret)
+ IWL_ERR(mvm, "Couldn't send the ROC_CMD: %d\n", ret);
+}
+
static bool __iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
struct iwl_mvm_time_event_data *te_data,
u32 *uid)
@@ -733,6 +795,9 @@ static bool __iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
struct iwl_mvm_vif *mvmvif;
enum nl80211_iftype iftype;
s8 link_id;
+ bool p2p_aux = iwl_mvm_has_p2p_over_aux(mvm);
+ u8 roc_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
+ WIDE_ID(MAC_CONF_GROUP, ROC_CMD), 0);
if (!vif)
return false;
@@ -757,14 +822,22 @@ static bool __iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
iwl_mvm_te_clear_data(mvm, te_data);
spin_unlock_bh(&mvm->time_event_lock);
- /* When session protection is used, the te_data->id field
- * is reused to save session protection's configuration.
- * For AUX ROC, HOT_SPOT_CMD is used and the te_data->id field is set
- * to HOT_SPOT_CMD.
- */
- if (fw_has_capa(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD) &&
- id != HOT_SPOT_CMD) {
+ if ((p2p_aux && iftype == NL80211_IFTYPE_P2P_DEVICE) ||
+ (roc_ver >= 3 && mvmvif->roc_activity == ROC_ACTIVITY_HOTSPOT)) {
+ if (mvmvif->roc_activity < ROC_NUM_ACTIVITIES) {
+ iwl_mvm_roc_rm_cmd(mvm, mvmvif->roc_activity);
+ mvmvif->roc_activity = ROC_NUM_ACTIVITIES;
+ iwl_mvm_roc_finished(mvm);
+ }
+ return false;
+ } else if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD) &&
+ id != HOT_SPOT_CMD) {
+ /* When session protection is used, the te_data->id field
+ * is reused to save session protection's configuration.
+ * For AUX ROC, HOT_SPOT_CMD is used and the te_data->id
+ * field is set to HOT_SPOT_CMD.
+ */
if (mvmvif && id < SESSION_PROTECT_CONF_MAX_ID) {
/* Session protection is still ongoing. Cancel it */
iwl_mvm_cancel_session_protection(mvm, vif, id,
@@ -965,7 +1038,7 @@ void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm,
if (WARN_ON(mvmvif->time_event_data.id !=
le32_to_cpu(notif->conf_id)))
goto out_unlock;
- set_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status);
+ set_bit(IWL_MVM_STATUS_ROC_P2P_RUNNING, &mvm->status);
ieee80211_ready_on_channel(mvm->hw); /* Start TE */
}
@@ -984,12 +1057,21 @@ void iwl_mvm_roc_duration_and_delay(struct ieee80211_vif *vif,
u32 *duration_tu,
u32 *delay)
{
- u32 dtim_interval = vif->bss_conf.dtim_period *
- vif->bss_conf.beacon_int;
+ struct ieee80211_bss_conf *link_conf;
+ unsigned int link_id;
+ u32 dtim_interval = 0;
*delay = AUX_ROC_MIN_DELAY;
*duration_tu = MSEC_TO_TU(duration_ms);
+ rcu_read_lock();
+ for_each_vif_active_link(vif, link_conf, link_id) {
+ dtim_interval =
+ max_t(u32, dtim_interval,
+ link_conf->dtim_period * link_conf->beacon_int);
+ }
+ rcu_read_unlock();
+
/*
* If we are associated we want the delay time to be at least one
* dtim interval so that the FW can wait until after the DTIM and
@@ -998,8 +1080,10 @@ void iwl_mvm_roc_duration_and_delay(struct ieee80211_vif *vif,
* Since we want to use almost a whole dtim interval we would also
* like the delay to be for 2-3 dtim intervals, in case there are
* other time events with higher priority.
+ * dtim_interval should never be 0, it can be 1 if we don't know it
+ * (we haven't heard any beacon yet).
*/
- if (vif->cfg.assoc) {
+ if (vif->cfg.assoc && !WARN_ON(!dtim_interval)) {
*delay = min_t(u32, dtim_interval * 3, AUX_ROC_MAX_DELAY);
/* We cannot remain off-channel longer than the DTIM interval */
if (dtim_interval <= *duration_tu) {
@@ -1014,7 +1098,7 @@ void iwl_mvm_roc_duration_and_delay(struct ieee80211_vif *vif,
int iwl_mvm_roc_add_cmd(struct iwl_mvm *mvm,
struct ieee80211_channel *channel,
struct ieee80211_vif *vif,
- int duration, u32 activity)
+ int duration, enum iwl_roc_activity activity)
{
int res;
u32 duration_tu, delay;
@@ -1023,9 +1107,13 @@ int iwl_mvm_roc_add_cmd(struct iwl_mvm *mvm,
.activity = cpu_to_le32(activity),
.sta_id = cpu_to_le32(mvm->aux_sta.sta_id),
};
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
lockdep_assert_held(&mvm->mutex);
+ if (WARN_ON(mvmvif->roc_activity != ROC_NUM_ACTIVITIES))
+ return -EBUSY;
+
/* Set the channel info data */
iwl_mvm_set_chan_info(mvm, &roc_req.channel_info,
channel->hw_value,
@@ -1041,14 +1129,16 @@ int iwl_mvm_roc_add_cmd(struct iwl_mvm *mvm,
"\t(requested = %ums, max_delay = %ums)\n",
duration, delay);
IWL_DEBUG_TE(mvm,
- "Requesting to remain on channel %u for %utu\n",
- channel->hw_value, duration_tu);
+ "Requesting to remain on channel %u for %utu. activity %u\n",
+ channel->hw_value, duration_tu, activity);
/* Set the node address */
memcpy(roc_req.node_addr, vif->addr, ETH_ALEN);
res = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(MAC_CONF_GROUP, ROC_CMD),
0, sizeof(roc_req), &roc_req);
+ if (!res)
+ mvmvif->roc_activity = activity;
return res;
}
@@ -1191,61 +1281,40 @@ void iwl_mvm_cleanup_roc_te(struct iwl_mvm *mvm)
__iwl_mvm_remove_time_event(mvm, te_data, &uid);
}
-static void iwl_mvm_roc_rm_cmd(struct iwl_mvm *mvm, u32 activity)
-{
- int ret;
- struct iwl_roc_req roc_cmd = {
- .action = cpu_to_le32(FW_CTXT_ACTION_REMOVE),
- .activity = cpu_to_le32(activity),
- };
-
- lockdep_assert_held(&mvm->mutex);
- ret = iwl_mvm_send_cmd_pdu(mvm,
- WIDE_ID(MAC_CONF_GROUP, ROC_CMD),
- 0, sizeof(roc_cmd), &roc_cmd);
- WARN_ON(ret);
-}
-
-static void iwl_mvm_roc_station_remove(struct iwl_mvm *mvm,
- struct iwl_mvm_vif *mvmvif)
-{
- u32 cmd_id = WIDE_ID(MAC_CONF_GROUP, ROC_CMD);
- u8 fw_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id,
- IWL_FW_CMD_VER_UNKNOWN);
-
- if (fw_ver == IWL_FW_CMD_VER_UNKNOWN)
- iwl_mvm_remove_aux_roc_te(mvm, mvmvif,
- &mvmvif->hs_time_event_data);
- else if (fw_ver == 3)
- iwl_mvm_roc_rm_cmd(mvm, ROC_ACTIVITY_HOTSPOT);
- else
- IWL_ERR(mvm, "ROC command version %d mismatch!\n", fw_ver);
-}
-
void iwl_mvm_stop_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
- struct iwl_mvm_vif *mvmvif;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm_time_event_data *te_data;
+ bool p2p_aux = iwl_mvm_has_p2p_over_aux(mvm);
+ u8 roc_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
+ WIDE_ID(MAC_CONF_GROUP, ROC_CMD), 0);
+ int iftype = vif->type;
mutex_lock(&mvm->mutex);
- if (fw_has_capa(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) {
- mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ if (p2p_aux || (roc_ver >= 3 && iftype != NL80211_IFTYPE_P2P_DEVICE)) {
+ if (mvmvif->roc_activity < ROC_NUM_ACTIVITIES) {
+ iwl_mvm_roc_rm_cmd(mvm, mvmvif->roc_activity);
+ mvmvif->roc_activity = ROC_NUM_ACTIVITIES;
+ }
+ goto cleanup_roc;
+ } else if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) {
te_data = &mvmvif->time_event_data;
- if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+ if (iftype == NL80211_IFTYPE_P2P_DEVICE) {
if (te_data->id >= SESSION_PROTECT_CONF_MAX_ID) {
IWL_DEBUG_TE(mvm,
"No remain on channel event\n");
+ mutex_unlock(&mvm->mutex);
return;
}
-
iwl_mvm_cancel_session_protection(mvm, vif,
te_data->id,
te_data->link_id);
} else {
- iwl_mvm_roc_station_remove(mvm, mvmvif);
+ iwl_mvm_remove_aux_roc_te(mvm, mvmvif,
+ &mvmvif->hs_time_event_data);
}
goto cleanup_roc;
}
@@ -1253,12 +1322,13 @@ void iwl_mvm_stop_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
te_data = iwl_mvm_get_roc_te(mvm);
if (!te_data) {
IWL_WARN(mvm, "No remain on channel event\n");
+ mutex_unlock(&mvm->mutex);
return;
}
mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
-
- if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE)
+ iftype = te_data->vif->type;
+ if (iftype == NL80211_IFTYPE_P2P_DEVICE)
iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
else
iwl_mvm_remove_aux_roc_te(mvm, mvmvif, te_data);
@@ -1269,9 +1339,10 @@ cleanup_roc:
* (so the status bit isn't set) set it here so iwl_mvm_cleanup_roc will
* cleanup things properly
*/
- set_bit(vif->type == NL80211_IFTYPE_P2P_DEVICE ?
- IWL_MVM_STATUS_ROC_RUNNING : IWL_MVM_STATUS_ROC_AUX_RUNNING,
- &mvm->status);
+ if (p2p_aux || iftype != NL80211_IFTYPE_P2P_DEVICE)
+ set_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status);
+ else
+ set_bit(IWL_MVM_STATUS_ROC_P2P_RUNNING, &mvm->status);
/* Mutex is released inside this function */
iwl_mvm_cleanup_roc(mvm);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
index 61a4638d1be2..ed0796aff722 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2019-2022 Intel Corporation
+ * Copyright (C) 2012-2014, 2019-2022, 2024 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015-2016 Intel Deutschland GmbH
*/
@@ -299,7 +299,7 @@ static void check_exit_ctkill(struct work_struct *work)
ret = iwl_mvm_get_temp(mvm, &temp);
- __iwl_mvm_mac_stop(mvm);
+ __iwl_mvm_mac_stop(mvm, false);
if (ret)
goto reschedule;
@@ -618,48 +618,35 @@ static int iwl_mvm_tzone_get_temp(struct thermal_zone_device *device,
int ret;
int temp;
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
if (!iwl_mvm_firmware_running(mvm) ||
- mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) {
- ret = -ENODATA;
- goto out;
- }
+ mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR)
+ return -ENODATA;
ret = iwl_mvm_get_temp(mvm, &temp);
if (ret)
- goto out;
+ return ret;
*temperature = temp * 1000;
-
-out:
- mutex_unlock(&mvm->mutex);
- return ret;
+ return 0;
}
static int iwl_mvm_tzone_set_trip_temp(struct thermal_zone_device *device,
- int trip, int temp)
+ const struct thermal_trip *trip, int temp)
{
struct iwl_mvm *mvm = thermal_zone_device_priv(device);
- int ret;
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
if (!iwl_mvm_firmware_running(mvm) ||
- mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) {
- ret = -EIO;
- goto out;
- }
+ mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR)
+ return -EIO;
- if ((temp / 1000) > S16_MAX) {
- ret = -EINVAL;
- goto out;
- }
+ if ((temp / 1000) > S16_MAX)
+ return -EINVAL;
- ret = iwl_mvm_send_temp_report_ths_cmd(mvm);
-out:
- mutex_unlock(&mvm->mutex);
- return ret;
+ return iwl_mvm_send_temp_report_ths_cmd(mvm);
}
static struct thermal_zone_device_ops tzone_ops = {
@@ -733,27 +720,18 @@ static int iwl_mvm_tcool_set_cur_state(struct thermal_cooling_device *cdev,
unsigned long new_state)
{
struct iwl_mvm *mvm = (struct iwl_mvm *)(cdev->devdata);
- int ret;
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
if (!iwl_mvm_firmware_running(mvm) ||
- mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) {
- ret = -EIO;
- goto unlock;
- }
+ mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR)
+ return -EIO;
- if (new_state >= ARRAY_SIZE(iwl_mvm_cdev_budgets)) {
- ret = -EINVAL;
- goto unlock;
- }
-
- ret = iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_START,
- new_state);
+ if (new_state >= ARRAY_SIZE(iwl_mvm_cdev_budgets))
+ return -EINVAL;
-unlock:
- mutex_unlock(&mvm->mutex);
- return ret;
+ return iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_START,
+ new_state);
}
static const struct thermal_cooling_device_ops tcooling_ops = {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index 1d695ece93e9..7ff5ea5e7aca 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -12,7 +12,7 @@
#include <net/ipv6.h>
#include "iwl-trans.h"
-#include "iwl-eeprom-parse.h"
+#include "iwl-nvm-utils.h"
#include "mvm.h"
#include "sta.h"
#include "time-sync.h"
@@ -802,10 +802,30 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
if (info.control.vif) {
struct iwl_mvm_vif *mvmvif =
iwl_mvm_vif_from_mac80211(info.control.vif);
+ bool p2p_aux = iwl_mvm_has_p2p_over_aux(mvm);
- if (info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE ||
- info.control.vif->type == NL80211_IFTYPE_AP ||
- info.control.vif->type == NL80211_IFTYPE_ADHOC) {
+ if ((info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE &&
+ p2p_aux) ||
+ (info.control.vif->type == NL80211_IFTYPE_STATION &&
+ offchannel)) {
+ /*
+ * IWL_MVM_OFFCHANNEL_QUEUE is used for ROC packets
+ * that can be used in 2 different types of vifs, P2P
+ * Device and STATION.
+ * P2P Device uses the offchannel queue.
+ * STATION (HS2.0) uses the auxiliary context of the FW,
+ * and hence needs to be sent on the aux queue.
+ * If P2P_DEV_OVER_AUX is supported (p2p_aux = true)
+ * also P2P Device uses the aux queue.
+ */
+ sta_id = mvm->aux_sta.sta_id;
+ queue = mvm->aux_queue;
+ if (WARN_ON(queue == IWL_MVM_INVALID_QUEUE))
+ return -1;
+ } else if (info.control.vif->type ==
+ NL80211_IFTYPE_P2P_DEVICE ||
+ info.control.vif->type == NL80211_IFTYPE_AP ||
+ info.control.vif->type == NL80211_IFTYPE_ADHOC) {
u32 link_id = u32_get_bits(info.control.flags,
IEEE80211_TX_CTRL_MLO_LINK);
struct iwl_mvm_vif_link_info *link;
@@ -831,18 +851,6 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
} else if (info.control.vif->type == NL80211_IFTYPE_MONITOR) {
queue = mvm->snif_queue;
sta_id = mvm->snif_sta.sta_id;
- } else if (info.control.vif->type == NL80211_IFTYPE_STATION &&
- offchannel) {
- /*
- * IWL_MVM_OFFCHANNEL_QUEUE is used for ROC packets
- * that can be used in 2 different types of vifs, P2P &
- * STATION.
- * P2P uses the offchannel queue.
- * STATION (HS2.0) uses the auxiliary context of the FW,
- * and hence needs to be sent on the aux queue.
- */
- sta_id = mvm->aux_sta.sta_id;
- queue = mvm->aux_queue;
}
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
index 47283a358ffd..0e5fa8374103 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
@@ -892,7 +892,7 @@ static void iwl_mvm_tcm_iter(void *_data, u8 *mac, struct ieee80211_vif *vif)
static void iwl_mvm_tcm_results(struct iwl_mvm *mvm)
{
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
ieee80211_iterate_active_interfaces(
mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
@@ -900,8 +900,6 @@ static void iwl_mvm_tcm_results(struct iwl_mvm *mvm)
if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
iwl_mvm_config_scan(mvm);
-
- mutex_unlock(&mvm->mutex);
}
static void iwl_mvm_tcm_uapsd_nonagg_detected_wk(struct work_struct *wk)
@@ -1130,10 +1128,9 @@ void iwl_mvm_recalc_tcm(struct iwl_mvm *mvm)
spin_unlock(&mvm->tcm.lock);
if (handle_uapsd && iwl_mvm_has_new_rx_api(mvm)) {
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
if (iwl_mvm_request_statistics(mvm, true))
handle_uapsd = false;
- mutex_unlock(&mvm->mutex);
}
spin_lock(&mvm->tcm.lock);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
index ebf11f276b20..e63efbf809f0 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
@@ -216,7 +216,7 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
ctxt_info_gen3->cr_tail_idx_arr_base_addr =
cpu_to_le64(trans_pcie->prph_info_dma_addr + 3 * PAGE_SIZE / 4);
ctxt_info_gen3->mtr_base_addr =
- cpu_to_le64(trans->txqs.txq[trans->txqs.cmd.q_id]->dma_addr);
+ cpu_to_le64(trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id]->dma_addr);
ctxt_info_gen3->mcr_base_addr =
cpu_to_le64(trans_pcie->rxq->used_bd_dma);
ctxt_info_gen3->mtr_size =
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
index 0fa92704cd14..344e4d5a1c6e 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2023 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*/
#include "iwl-trans.h"
#include "iwl-fh.h"
@@ -218,7 +218,7 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
/* initialize TX command queue */
ctxt_info->hcmd_cfg.cmd_queue_addr =
- cpu_to_le64(trans->txqs.txq[trans->txqs.cmd.q_id]->dma_addr);
+ cpu_to_le64(trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id]->dma_addr);
ctxt_info->hcmd_cfg.cmd_queue_size =
TFD_QUEUE_CB_SIZE(IWL_CMD_QUEUE_SIZE);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index fed2754be680..9ad43464b702 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -503,7 +503,37 @@ VISIBLE_IF_IWLWIFI_KUNIT const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x2727, PCI_ANY_ID, iwl_bz_trans_cfg)},
{IWL_PCI_DEVICE(0x272D, PCI_ANY_ID, iwl_bz_trans_cfg)},
{IWL_PCI_DEVICE(0x272b, PCI_ANY_ID, iwl_bz_trans_cfg)},
- {IWL_PCI_DEVICE(0xA840, PCI_ANY_ID, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x0000, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x0090, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x0094, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x0098, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x009C, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x00C0, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x00C4, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x00E0, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x00E4, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x00E8, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x00EC, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x0100, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x0110, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x0114, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x0118, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x011C, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x0310, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x0314, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x0510, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x0A10, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x1671, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x1672, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x1771, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x1772, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x1791, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x1792, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x4090, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x40C4, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x40E0, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x4110, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x4314, iwl_bz_trans_cfg)},
{IWL_PCI_DEVICE(0x7740, PCI_ANY_ID, iwl_bz_trans_cfg)},
{IWL_PCI_DEVICE(0x4D40, PCI_ANY_ID, iwl_bz_trans_cfg)},
@@ -997,32 +1027,6 @@ VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info iwl_dev_info_table[] = {
IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_CDB,
iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_name),
-/* Bz */
-/* FIXME: need to change the naming according to the actual CRF */
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
- iwl_cfg_bz, iwl_fm_name),
-
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_BZ_W, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
- iwl_cfg_bz, iwl_fm_name),
-
-/* Ga (Gl) */
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_320, IWL_CFG_ANY, IWL_CFG_NO_CDB,
- iwl_cfg_gl, iwl_gl_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_NO_320, IWL_CFG_ANY, IWL_CFG_NO_CDB,
- iwl_cfg_gl, iwl_mtp_name),
-
/* SoF with JF2 */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
@@ -1103,6 +1107,32 @@ VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info iwl_dev_info_table[] = {
IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwlax210_2ax_cfg_so_jf_b0, iwl9462_name),
+/* Bz */
+/* FIXME: need to change the naming according to the actual CRF */
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ iwl_cfg_bz, iwl_fm_name),
+
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_BZ_W, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ iwl_cfg_bz, iwl_fm_name),
+
+/* Ga (Gl) */
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_320, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ iwl_cfg_gl, iwl_gl_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_NO_320, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ iwl_cfg_gl, iwl_mtp_name),
+
/* Sc */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SC, IWL_CFG_ANY,
@@ -1476,6 +1506,8 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!iwl_trans->name)
iwl_trans->name = iwl_trans->cfg->name;
+ IWL_INFO(iwl_trans, "Detected %s\n", iwl_trans->name);
+
if (iwl_trans->trans_cfg->mq_rx_supported) {
if (WARN_ON(!iwl_trans->cfg->num_rbds)) {
ret = -EINVAL;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
index a7eebe400b5b..b59de4f80b4b 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2003-2015, 2018-2023 Intel Corporation
+ * Copyright (C) 2003-2015, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -22,7 +22,6 @@
#include "iwl-io.h"
#include "iwl-op-mode.h"
#include "iwl-drv.h"
-#include "queue/tx.h"
#include "iwl-context-info.h"
/*
@@ -273,7 +272,7 @@ enum iwl_pcie_fw_reset_state {
};
/**
- * enum wl_pcie_imr_status - imr dma transfer state
+ * enum iwl_pcie_imr_status - imr dma transfer state
* @IMR_D2S_IDLE: default value of the dma transfer
* @IMR_D2S_REQUESTED: dma transfer requested
* @IMR_D2S_COMPLETED: dma transfer completed
@@ -287,6 +286,58 @@ enum iwl_pcie_imr_status {
};
/**
+ * struct iwl_pcie_txqs - TX queues data
+ *
+ * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes)
+ * @page_offs: offset from skb->cb to mac header page pointer
+ * @dev_cmd_offs: offset from skb->cb to iwl_device_tx_cmd pointer
+ * @queue_used: bit mask of used queues
+ * @queue_stopped: bit mask of stopped queues
+ * @txq: array of TXQ data structures representing the TXQs
+ * @scd_bc_tbls: gen1 pointer to the byte count table of the scheduler
+ * @queue_alloc_cmd_ver: queue allocation command version
+ * @bc_pool: bytecount DMA allocations pool
+ * @bc_tbl_size: bytecount table size
+ * @tso_hdr_page: page allocated (per CPU) for A-MSDU headers when doing TSO
+ * (and similar usage)
+ * @cmd: command queue data
+ * @cmd.fifo: FIFO number
+ * @cmd.q_id: queue ID
+ * @cmd.wdg_timeout: watchdog timeout
+ * @tfd: TFD data
+ * @tfd.max_tbs: max number of buffers per TFD
+ * @tfd.size: TFD size
+ * @tfd.addr_size: TFD/TB address size
+ */
+struct iwl_pcie_txqs {
+ unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
+ unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
+ struct iwl_txq *txq[IWL_MAX_TVQM_QUEUES];
+ struct dma_pool *bc_pool;
+ size_t bc_tbl_size;
+ bool bc_table_dword;
+ u8 page_offs;
+ u8 dev_cmd_offs;
+ struct iwl_tso_hdr_page __percpu *tso_hdr_page;
+
+ struct {
+ u8 fifo;
+ u8 q_id;
+ unsigned int wdg_timeout;
+ } cmd;
+
+ struct {
+ u8 max_tbs;
+ u16 size;
+ u8 addr_size;
+ } tfd;
+
+ struct iwl_dma_ptr scd_bc_tbls;
+
+ u8 queue_alloc_cmd_ver;
+};
+
+/**
* struct iwl_trans_pcie - PCIe transport specific data
* @rxq: all the RX queue data
* @rx_pool: initial pool of iwl_rx_mem_buffer for all the queues
@@ -367,6 +418,7 @@ enum iwl_pcie_imr_status {
* @is_down: indicates the NIC is down
* @isr_stats: interrupt statistics
* @napi_dev: (fake) netdev for NAPI registration
+ * @txqs: transport tx queues data.
*/
struct iwl_trans_pcie {
struct iwl_rxq *rxq;
@@ -464,6 +516,8 @@ struct iwl_trans_pcie {
enum iwl_pcie_imr_status imr_status;
wait_queue_head_t imr_waitq;
char rf_name[32];
+
+ struct iwl_pcie_txqs txqs;
};
static inline struct iwl_trans_pcie *
@@ -538,6 +592,33 @@ void iwl_pcie_disable_ict(struct iwl_trans *trans);
/*****************************************************
* TX / HCMD
******************************************************/
+/* We need 2 entries for the TX command and header, and another one might
+ * be needed for potential data in the SKB's head. The remaining ones can
+ * be used for frags.
+ */
+#define IWL_TRANS_PCIE_MAX_FRAGS(trans_pcie) ((trans_pcie)->txqs.tfd.max_tbs - 3)
+
+struct iwl_tso_hdr_page {
+ struct page *page;
+ u8 *pos;
+};
+
+/*
+ * Note that we put this struct *last* in the page. By doing that, we ensure
+ * that no TB referencing this page can trigger the 32-bit boundary hardware
+ * bug.
+ */
+struct iwl_tso_page_info {
+ dma_addr_t dma_addr;
+ struct page *next;
+ refcount_t use_count;
+};
+
+#define IWL_TSO_PAGE_DATA_SIZE (PAGE_SIZE - sizeof(struct iwl_tso_page_info))
+#define IWL_TSO_PAGE_INFO(addr) \
+ ((struct iwl_tso_page_info *)(((unsigned long)addr & PAGE_MASK) + \
+ IWL_TSO_PAGE_DATA_SIZE))
+
int iwl_pcie_tx_init(struct iwl_trans *trans);
void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr);
int iwl_pcie_tx_stop(struct iwl_trans *trans);
@@ -552,10 +633,170 @@ void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,
int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_device_tx_cmd *dev_cmd, int txq_id);
void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans);
-int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
struct iwl_rx_cmd_buffer *rxb);
void iwl_trans_pcie_tx_reset(struct iwl_trans *trans);
+int iwl_pcie_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq,
+ int slots_num, bool cmd_queue);
+
+dma_addr_t iwl_pcie_get_sgt_tb_phys(struct sg_table *sgt, void *addr);
+struct sg_table *iwl_pcie_prep_tso(struct iwl_trans *trans, struct sk_buff *skb,
+ struct iwl_cmd_meta *cmd_meta,
+ u8 **hdr, unsigned int hdr_room);
+
+void iwl_pcie_free_tso_pages(struct iwl_trans *trans, struct sk_buff *skb,
+ struct iwl_cmd_meta *cmd_meta);
+
+static inline dma_addr_t iwl_pcie_get_tso_page_phys(void *addr)
+{
+ dma_addr_t res;
+
+ res = IWL_TSO_PAGE_INFO(addr)->dma_addr;
+ res += (unsigned long)addr & ~PAGE_MASK;
+
+ return res;
+}
+
+static inline dma_addr_t
+iwl_txq_get_first_tb_dma(struct iwl_txq *txq, int idx)
+{
+ return txq->first_tb_dma +
+ sizeof(struct iwl_pcie_first_tb_buf) * idx;
+}
+
+static inline u16 iwl_txq_get_cmd_index(const struct iwl_txq *q, u32 index)
+{
+ return index & (q->n_window - 1);
+}
+
+static inline void *iwl_txq_get_tfd(struct iwl_trans *trans,
+ struct iwl_txq *txq, int idx)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ if (trans->trans_cfg->gen2)
+ idx = iwl_txq_get_cmd_index(txq, idx);
+
+ return (u8 *)txq->tfds + trans_pcie->txqs.tfd.size * idx;
+}
+
+/*
+ * We need this inline in case dma_addr_t is only 32-bits - since the
+ * hardware is always 64-bit, the issue can still occur in that case,
+ * so use u64 for 'phys' here to force the addition in 64-bit.
+ */
+static inline bool iwl_txq_crosses_4g_boundary(u64 phys, u16 len)
+{
+ return upper_32_bits(phys) != upper_32_bits(phys + len);
+}
+
+int iwl_txq_space(struct iwl_trans *trans, const struct iwl_txq *q);
+
+static inline void iwl_txq_stop(struct iwl_trans *trans, struct iwl_txq *txq)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ if (!test_and_set_bit(txq->id, trans_pcie->txqs.queue_stopped)) {
+ iwl_op_mode_queue_full(trans->op_mode, txq->id);
+ IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->id);
+ } else {
+ IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n",
+ txq->id);
+ }
+}
+
+/**
+ * iwl_txq_inc_wrap - increment queue index, wrap back to beginning
+ * @trans: the transport (for configuration data)
+ * @index: current index
+ */
+static inline int iwl_txq_inc_wrap(struct iwl_trans *trans, int index)
+{
+ return ++index &
+ (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
+}
+
+/**
+ * iwl_txq_dec_wrap - decrement queue index, wrap back to end
+ * @trans: the transport (for configuration data)
+ * @index: current index
+ */
+static inline int iwl_txq_dec_wrap(struct iwl_trans *trans, int index)
+{
+ return --index &
+ (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
+}
+
+void iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq);
+
+static inline void
+iwl_trans_pcie_wake_queue(struct iwl_trans *trans, struct iwl_txq *txq)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ if (test_and_clear_bit(txq->id, trans_pcie->txqs.queue_stopped)) {
+ IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->id);
+ iwl_op_mode_queue_not_full(trans->op_mode, txq->id);
+ }
+}
+
+int iwl_txq_gen2_set_tb(struct iwl_trans *trans,
+ struct iwl_tfh_tfd *tfd, dma_addr_t addr,
+ u16 len);
+
+static inline void iwl_txq_set_tfd_invalid_gen2(struct iwl_trans *trans,
+ struct iwl_tfh_tfd *tfd)
+{
+ tfd->num_tbs = 0;
+
+ iwl_txq_gen2_set_tb(trans, tfd, trans->invalid_tx_cmd.dma,
+ trans->invalid_tx_cmd.size);
+}
+
+void iwl_txq_gen2_tfd_unmap(struct iwl_trans *trans,
+ struct iwl_cmd_meta *meta,
+ struct iwl_tfh_tfd *tfd);
+
+int iwl_txq_dyn_alloc(struct iwl_trans *trans, u32 flags,
+ u32 sta_mask, u8 tid,
+ int size, unsigned int timeout);
+
+int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
+ struct iwl_device_tx_cmd *dev_cmd, int txq_id);
+
+void iwl_txq_dyn_free(struct iwl_trans *trans, int queue);
+void iwl_txq_gen2_tx_free(struct iwl_trans *trans);
+int iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
+ int slots_num, bool cmd_queue);
+int iwl_txq_gen2_init(struct iwl_trans *trans, int txq_id,
+ int queue_size);
+
+static inline u16 iwl_txq_gen1_tfd_tb_get_len(struct iwl_trans *trans,
+ void *_tfd, u8 idx)
+{
+ struct iwl_tfd *tfd;
+ struct iwl_tfd_tb *tb;
+
+ if (trans->trans_cfg->gen2) {
+ struct iwl_tfh_tfd *tfh_tfd = _tfd;
+ struct iwl_tfh_tb *tfh_tb = &tfh_tfd->tbs[idx];
+
+ return le16_to_cpu(tfh_tb->tb_len);
+ }
+
+ tfd = (struct iwl_tfd *)_tfd;
+ tb = &tfd->tbs[idx];
+
+ return le16_to_cpu(tb->hi_n_len) >> 4;
+}
+
+void iwl_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
+ struct sk_buff_head *skbs, bool is_flush);
+void iwl_pcie_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr);
+void iwl_pcie_freeze_txq_timer(struct iwl_trans *trans,
+ unsigned long txqs, bool freeze);
+int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx);
+int iwl_trans_pcie_wait_txqs_empty(struct iwl_trans *trans, u32 txq_bm);
/*****************************************************
* Error handling
@@ -822,12 +1063,51 @@ void iwl_trans_pcie_dump_regs(struct iwl_trans *trans);
#ifdef CONFIG_IWLWIFI_DEBUGFS
void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans);
+void iwl_trans_pcie_debugfs_cleanup(struct iwl_trans *trans);
#else
static inline void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans) { }
#endif
void iwl_pcie_rx_allocator_work(struct work_struct *data);
+/* common trans ops for all generations transports */
+void iwl_trans_pcie_configure(struct iwl_trans *trans,
+ const struct iwl_trans_config *trans_cfg);
+int iwl_trans_pcie_start_hw(struct iwl_trans *trans);
+void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans);
+void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val);
+void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val);
+u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs);
+u32 iwl_trans_pcie_read_prph(struct iwl_trans *trans, u32 reg);
+void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr, u32 val);
+int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,
+ void *buf, int dwords);
+int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr,
+ const void *buf, int dwords);
+int iwl_trans_pcie_sw_reset(struct iwl_trans *trans, bool retake_ownership);
+struct iwl_trans_dump_data *
+iwl_trans_pcie_dump_data(struct iwl_trans *trans, u32 dump_mask,
+ const struct iwl_dump_sanitize_ops *sanitize_ops,
+ void *sanitize_ctx);
+int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
+ enum iwl_d3_status *status,
+ bool test, bool reset);
+int iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test, bool reset);
+void iwl_trans_pci_interrupts(struct iwl_trans *trans, bool enable);
+void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans);
+void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg,
+ u32 mask, u32 value);
+int iwl_trans_pcie_read_config32(struct iwl_trans *trans, u32 ofs,
+ u32 *val);
+bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans);
+void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans);
+
+/* transport gen 1 exported functions */
+void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr);
+int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
+ const struct fw_img *fw, bool run_in_rfkill);
+void iwl_trans_pcie_stop_device(struct iwl_trans *trans);
+
/* common functions that are used by gen2 transport */
int iwl_pcie_gen2_apm_init(struct iwl_trans *trans);
void iwl_pcie_apm_config(struct iwl_trans *trans);
@@ -849,7 +1129,7 @@ void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power);
/* transport gen 2 exported functions */
int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
const struct fw_img *fw, bool run_in_rfkill);
-void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr);
+void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans);
int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans,
struct iwl_host_cmd *cmd);
void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans);
@@ -864,5 +1144,7 @@ void iwl_trans_pcie_copy_imr_fh(struct iwl_trans *trans,
u32 dst_addr, u64 src_addr, u32 byte_cnt);
int iwl_trans_pcie_copy_imr(struct iwl_trans *trans,
u32 dst_addr, u64 src_addr, u32 byte_cnt);
+int iwl_trans_pcie_rxq_dma_data(struct iwl_trans *trans, int queue,
+ struct iwl_trans_rxq_dma_data *data);
#endif /* __iwl_trans_int_pcie_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
index 984d7bcd381f..afb88eab8174 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2003-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2003-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -1301,7 +1301,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
int i)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
+ struct iwl_txq *txq = trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id];
bool page_stolen = false;
int max_len = trans_pcie->rx_buf_bytes;
u32 offset = 0;
@@ -1678,6 +1678,7 @@ irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id)
*/
static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int i;
/* W/A for WiFi/WiMAX coex and WiMAX own the RF */
@@ -1694,9 +1695,9 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
}
for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) {
- if (!trans->txqs.txq[i])
+ if (!trans_pcie->txqs.txq[i])
continue;
- del_timer(&trans->txqs.txq[i]->stuck_timer);
+ del_timer(&trans_pcie->txqs.txq[i]->stuck_timer);
}
/* The STATUS_FW_ERROR bit is set in this function. This must happen
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
index a4a4772330cf..18dda89b7985 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2023 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*/
#include "iwl-trans.h"
#include "iwl-prph.h"
@@ -247,7 +247,7 @@ static int iwl_pcie_gen2_nic_init(struct iwl_trans *trans)
return -ENOMEM;
/* Allocate or reset and init all Tx and Command queues */
- if (iwl_txq_gen2_init(trans, trans->txqs.cmd.q_id, queue_size))
+ if (iwl_txq_gen2_init(trans, trans_pcie->txqs.cmd.q_id, queue_size))
return -ENOMEM;
/* enable shadow regs in HW */
@@ -339,16 +339,17 @@ static void iwl_pcie_get_rf_name(struct iwl_trans *trans)
pos += scnprintf(buf + pos, buflen - pos, "\n");
}
-void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr)
+void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
iwl_pcie_reset_ict(trans);
/* make sure all queue are not stopped/used */
- memset(trans->txqs.queue_stopped, 0,
- sizeof(trans->txqs.queue_stopped));
- memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
+ memset(trans_pcie->txqs.queue_stopped, 0,
+ sizeof(trans_pcie->txqs.queue_stopped));
+ memset(trans_pcie->txqs.queue_used, 0,
+ sizeof(trans_pcie->txqs.queue_used));
/* now that we got alive we can free the fw image & the context info.
* paging memory cannot be freed included since FW will still use it
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index d5a887b3a4bb..719ddc4b72c5 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2007-2015, 2018-2023 Intel Corporation
+ * Copyright (C) 2007-2015, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -127,8 +127,7 @@ out:
kfree(buf);
}
-static int iwl_trans_pcie_sw_reset(struct iwl_trans *trans,
- bool retake_ownership)
+int iwl_trans_pcie_sw_reset(struct iwl_trans *trans, bool retake_ownership)
{
/* Reset entire device - do controller reset (results in SHRD_HW_RST) */
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
@@ -1336,8 +1335,8 @@ void iwl_pcie_synchronize_irqs(struct iwl_trans *trans)
}
}
-static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
- const struct fw_img *fw, bool run_in_rfkill)
+int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
+ const struct fw_img *fw, bool run_in_rfkill)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
bool hw_rfkill;
@@ -1423,7 +1422,7 @@ out:
return ret;
}
-static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr)
+void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr)
{
iwl_pcie_reset_ict(trans);
iwl_pcie_tx_start(trans, scd_addr);
@@ -1458,7 +1457,7 @@ void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans,
iwl_trans_pcie_rf_kill(trans, hw_rfkill, false);
}
-static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
+void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
bool was_in_rfkill;
@@ -1505,9 +1504,17 @@ void iwl_pcie_d3_complete_suspend(struct iwl_trans *trans,
iwl_pcie_synchronize_irqs(trans);
- iwl_clear_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
- iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
+ iwl_clear_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ);
+ iwl_clear_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_INIT);
+ } else {
+ iwl_clear_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ iwl_clear_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+ }
if (reset) {
/*
@@ -1552,8 +1559,7 @@ static int iwl_pcie_d3_handshake(struct iwl_trans *trans, bool suspend)
return 0;
}
-static int iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test,
- bool reset)
+int iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test, bool reset)
{
int ret;
@@ -1571,9 +1577,9 @@ static int iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test,
return 0;
}
-static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
- enum iwl_d3_status *status,
- bool test, bool reset)
+int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
+ enum iwl_d3_status *status,
+ bool test, bool reset)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 val;
@@ -1586,8 +1592,12 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
goto out;
}
- iwl_set_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
+ iwl_set_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ);
+ else
+ iwl_set_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
ret = iwl_finish_nic_init(trans);
if (ret)
@@ -1874,7 +1884,7 @@ static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans)
return 0;
}
-static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
+int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int ret;
@@ -1886,7 +1896,7 @@ static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
return ret;
}
-static void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans)
+void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -1906,17 +1916,17 @@ static void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans)
iwl_pcie_synchronize_irqs(trans);
}
-static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val)
+void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val)
{
writeb(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
}
-static void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val)
+void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val)
{
writel(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
}
-static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs)
+u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs)
{
return readl(IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
}
@@ -1929,7 +1939,7 @@ static u32 iwl_trans_pcie_prph_msk(struct iwl_trans *trans)
return 0x000FFFFF;
}
-static u32 iwl_trans_pcie_read_prph(struct iwl_trans *trans, u32 reg)
+u32 iwl_trans_pcie_read_prph(struct iwl_trans *trans, u32 reg)
{
u32 mask = iwl_trans_pcie_prph_msk(trans);
@@ -1938,8 +1948,7 @@ static u32 iwl_trans_pcie_read_prph(struct iwl_trans *trans, u32 reg)
return iwl_trans_pcie_read32(trans, HBUS_TARG_PRPH_RDAT);
}
-static void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr,
- u32 val)
+void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr, u32 val)
{
u32 mask = iwl_trans_pcie_prph_msk(trans);
@@ -1948,20 +1957,20 @@ static void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr,
iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val);
}
-static void iwl_trans_pcie_configure(struct iwl_trans *trans,
- const struct iwl_trans_config *trans_cfg)
+void iwl_trans_pcie_configure(struct iwl_trans *trans,
+ const struct iwl_trans_config *trans_cfg)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
/* free all first - we might be reconfigured for a different size */
iwl_pcie_free_rbs_pool(trans);
- trans->txqs.cmd.q_id = trans_cfg->cmd_queue;
- trans->txqs.cmd.fifo = trans_cfg->cmd_fifo;
- trans->txqs.cmd.wdg_timeout = trans_cfg->cmd_q_wdg_timeout;
- trans->txqs.page_offs = trans_cfg->cb_data_offs;
- trans->txqs.dev_cmd_offs = trans_cfg->cb_data_offs + sizeof(void *);
- trans->txqs.queue_alloc_cmd_ver = trans_cfg->queue_alloc_cmd_ver;
+ trans_pcie->txqs.cmd.q_id = trans_cfg->cmd_queue;
+ trans_pcie->txqs.cmd.fifo = trans_cfg->cmd_fifo;
+ trans_pcie->txqs.cmd.wdg_timeout = trans_cfg->cmd_q_wdg_timeout;
+ trans_pcie->txqs.page_offs = trans_cfg->cb_data_offs;
+ trans_pcie->txqs.dev_cmd_offs = trans_cfg->cb_data_offs + sizeof(void *);
+ trans_pcie->txqs.queue_alloc_cmd_ver = trans_cfg->queue_alloc_cmd_ver;
if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS))
trans_pcie->n_no_reclaim_cmds = 0;
@@ -1980,7 +1989,7 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
trans_pcie->supported_dma_mask = DMA_BIT_MASK(11);
- trans->txqs.bc_table_dword = trans_cfg->bc_table_dword;
+ trans_pcie->txqs.bc_table_dword = trans_cfg->bc_table_dword;
trans_pcie->scd_set_active = trans_cfg->scd_set_active;
trans->command_groups = trans_cfg->command_groups;
@@ -2079,15 +2088,20 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
trans->dev);
mutex_destroy(&trans_pcie->mutex);
- iwl_trans_free(trans);
-}
-static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state)
-{
- if (state)
- set_bit(STATUS_TPOWER_PMI, &trans->status);
- else
- clear_bit(STATUS_TPOWER_PMI, &trans->status);
+ if (trans_pcie->txqs.tso_hdr_page) {
+ for_each_possible_cpu(i) {
+ struct iwl_tso_hdr_page *p =
+ per_cpu_ptr(trans_pcie->txqs.tso_hdr_page, i);
+
+ if (p && p->page)
+ __free_page(p->page);
+ }
+
+ free_percpu(trans_pcie->txqs.tso_hdr_page);
+ }
+
+ iwl_trans_free(trans);
}
struct iwl_trans_pcie_removal {
@@ -2253,7 +2267,7 @@ out:
return true;
}
-static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans)
+bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans)
{
bool ret;
@@ -2267,7 +2281,7 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans)
return false;
}
-static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans)
+void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -2297,8 +2311,8 @@ out:
spin_unlock_bh(&trans_pcie->reg_lock);
}
-static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,
- void *buf, int dwords)
+int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,
+ void *buf, int dwords)
{
#define IWL_MAX_HW_ERRS 5
unsigned int num_consec_hw_errors = 0;
@@ -2347,8 +2361,8 @@ static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,
return 0;
}
-static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr,
- const void *buf, int dwords)
+int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr,
+ const void *buf, int dwords)
{
int offs, ret = 0;
const u32 *vals = buf;
@@ -2365,8 +2379,8 @@ static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr,
return ret;
}
-static int iwl_trans_pcie_read_config32(struct iwl_trans *trans, u32 ofs,
- u32 *val)
+int iwl_trans_pcie_read_config32(struct iwl_trans *trans, u32 ofs,
+ u32 *val)
{
return pci_read_config_dword(IWL_TRANS_GET_PCIE_TRANS(trans)->pci_dev,
ofs, val);
@@ -2374,8 +2388,8 @@ static int iwl_trans_pcie_read_config32(struct iwl_trans *trans, u32 ofs,
#define IWL_FLUSH_WAIT_MS 2000
-static int iwl_trans_pcie_rxq_dma_data(struct iwl_trans *trans, int queue,
- struct iwl_trans_rxq_dma_data *data)
+int iwl_trans_pcie_rxq_dma_data(struct iwl_trans *trans, int queue,
+ struct iwl_trans_rxq_dma_data *data)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -2390,8 +2404,9 @@ static int iwl_trans_pcie_rxq_dma_data(struct iwl_trans *trans, int queue,
return 0;
}
-static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx)
+int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq;
unsigned long now = jiffies;
bool overflow_tx;
@@ -2401,11 +2416,11 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx)
if (test_bit(STATUS_TRANS_DEAD, &trans->status))
return -ENODEV;
- if (!test_bit(txq_idx, trans->txqs.queue_used))
+ if (!test_bit(txq_idx, trans_pcie->txqs.queue_used))
return -EINVAL;
IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", txq_idx);
- txq = trans->txqs.txq[txq_idx];
+ txq = trans_pcie->txqs.txq[txq_idx];
spin_lock_bh(&txq->lock);
overflow_tx = txq->overflow_tx ||
@@ -2451,8 +2466,9 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx)
return 0;
}
-static int iwl_trans_pcie_wait_txqs_empty(struct iwl_trans *trans, u32 txq_bm)
+int iwl_trans_pcie_wait_txqs_empty(struct iwl_trans *trans, u32 txq_bm)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int cnt;
int ret = 0;
@@ -2461,9 +2477,9 @@ static int iwl_trans_pcie_wait_txqs_empty(struct iwl_trans *trans, u32 txq_bm)
cnt < trans->trans_cfg->base_params->num_of_queues;
cnt++) {
- if (cnt == trans->txqs.cmd.q_id)
+ if (cnt == trans_pcie->txqs.cmd.q_id)
continue;
- if (!test_bit(cnt, trans->txqs.queue_used))
+ if (!test_bit(cnt, trans_pcie->txqs.queue_used))
continue;
if (!(BIT(cnt) & txq_bm))
continue;
@@ -2476,8 +2492,8 @@ static int iwl_trans_pcie_wait_txqs_empty(struct iwl_trans *trans, u32 txq_bm)
return ret;
}
-static void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg,
- u32 mask, u32 value)
+void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg,
+ u32 mask, u32 value)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -2636,12 +2652,13 @@ static int iwl_dbgfs_tx_queue_seq_show(struct seq_file *seq, void *v)
struct iwl_dbgfs_tx_queue_priv *priv = seq->private;
struct iwl_dbgfs_tx_queue_state *state = v;
struct iwl_trans *trans = priv->trans;
- struct iwl_txq *txq = trans->txqs.txq[state->pos];
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_txq *txq = trans_pcie->txqs.txq[state->pos];
seq_printf(seq, "hwq %.3u: used=%d stopped=%d ",
(unsigned int)state->pos,
- !!test_bit(state->pos, trans->txqs.queue_used),
- !!test_bit(state->pos, trans->txqs.queue_stopped));
+ !!test_bit(state->pos, trans_pcie->txqs.queue_used),
+ !!test_bit(state->pos, trans_pcie->txqs.queue_stopped));
if (txq)
seq_printf(seq,
"read=%u write=%u need_update=%d frozen=%d n_window=%d ampdu=%d",
@@ -2651,7 +2668,7 @@ static int iwl_dbgfs_tx_queue_seq_show(struct seq_file *seq, void *v)
else
seq_puts(seq, "(unallocated)");
- if (state->pos == trans->txqs.cmd.q_id)
+ if (state->pos == trans_pcie->txqs.cmd.q_id)
seq_puts(seq, " (HCMD)");
seq_puts(seq, "\n");
@@ -3055,7 +3072,7 @@ void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans)
DEBUGFS_ADD_FILE(rf, dir, 0400);
}
-static void iwl_trans_pcie_debugfs_cleanup(struct iwl_trans *trans)
+void iwl_trans_pcie_debugfs_cleanup(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct cont_rec *data = &trans_pcie->fw_mon_data;
@@ -3068,10 +3085,11 @@ static void iwl_trans_pcie_debugfs_cleanup(struct iwl_trans *trans)
static u32 iwl_trans_pcie_get_cmdlen(struct iwl_trans *trans, void *tfd)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 cmdlen = 0;
int i;
- for (i = 0; i < trans->txqs.tfd.max_tbs; i++)
+ for (i = 0; i < trans_pcie->txqs.tfd.max_tbs; i++)
cmdlen += iwl_txq_gen1_tfd_tb_get_len(trans, tfd, i);
return cmdlen;
@@ -3332,15 +3350,14 @@ static int iwl_trans_get_fw_monitor_len(struct iwl_trans *trans, u32 *len)
return 0;
}
-static struct iwl_trans_dump_data *
-iwl_trans_pcie_dump_data(struct iwl_trans *trans,
- u32 dump_mask,
+struct iwl_trans_dump_data *
+iwl_trans_pcie_dump_data(struct iwl_trans *trans, u32 dump_mask,
const struct iwl_dump_sanitize_ops *sanitize_ops,
void *sanitize_ctx)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_fw_error_dump_data *data;
- struct iwl_txq *cmdq = trans->txqs.txq[trans->txqs.cmd.q_id];
+ struct iwl_txq *cmdq = trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id];
struct iwl_fw_error_dump_txcmd *txcmd;
struct iwl_trans_dump_data *dump_data;
u32 len, num_rbs = 0, monitor_len = 0;
@@ -3407,7 +3424,7 @@ iwl_trans_pcie_dump_data(struct iwl_trans *trans,
data = (void *)dump_data->data;
if (dump_mask & BIT(IWL_FW_ERROR_DUMP_TXCMD) && cmdq) {
- u16 tfd_size = trans->txqs.tfd.size;
+ u16 tfd_size = trans_pcie->txqs.tfd.size;
data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD);
txcmd = (void *)data->data;
@@ -3483,7 +3500,7 @@ iwl_trans_pcie_dump_data(struct iwl_trans *trans,
return dump_data;
}
-static void iwl_trans_pci_interrupts(struct iwl_trans *trans, bool enable)
+void iwl_trans_pci_interrupts(struct iwl_trans *trans, bool enable)
{
if (enable)
iwl_enable_interrupts(trans);
@@ -3491,7 +3508,7 @@ static void iwl_trans_pci_interrupts(struct iwl_trans *trans, bool enable)
iwl_disable_interrupts(trans);
}
-static void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans)
+void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans)
{
u32 inta_addr, sw_err_bit;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -3510,81 +3527,6 @@ static void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans)
iwl_trans_sync_nmi_with_addr(trans, inta_addr, sw_err_bit);
}
-#define IWL_TRANS_COMMON_OPS \
- .op_mode_leave = iwl_trans_pcie_op_mode_leave, \
- .write8 = iwl_trans_pcie_write8, \
- .write32 = iwl_trans_pcie_write32, \
- .read32 = iwl_trans_pcie_read32, \
- .read_prph = iwl_trans_pcie_read_prph, \
- .write_prph = iwl_trans_pcie_write_prph, \
- .read_mem = iwl_trans_pcie_read_mem, \
- .write_mem = iwl_trans_pcie_write_mem, \
- .read_config32 = iwl_trans_pcie_read_config32, \
- .configure = iwl_trans_pcie_configure, \
- .set_pmi = iwl_trans_pcie_set_pmi, \
- .sw_reset = iwl_trans_pcie_sw_reset, \
- .grab_nic_access = iwl_trans_pcie_grab_nic_access, \
- .release_nic_access = iwl_trans_pcie_release_nic_access, \
- .set_bits_mask = iwl_trans_pcie_set_bits_mask, \
- .dump_data = iwl_trans_pcie_dump_data, \
- .d3_suspend = iwl_trans_pcie_d3_suspend, \
- .d3_resume = iwl_trans_pcie_d3_resume, \
- .interrupts = iwl_trans_pci_interrupts, \
- .sync_nmi = iwl_trans_pcie_sync_nmi, \
- .imr_dma_data = iwl_trans_pcie_copy_imr \
-
-static const struct iwl_trans_ops trans_ops_pcie = {
- IWL_TRANS_COMMON_OPS,
- .start_hw = iwl_trans_pcie_start_hw,
- .fw_alive = iwl_trans_pcie_fw_alive,
- .start_fw = iwl_trans_pcie_start_fw,
- .stop_device = iwl_trans_pcie_stop_device,
-
- .send_cmd = iwl_pcie_enqueue_hcmd,
-
- .tx = iwl_trans_pcie_tx,
- .reclaim = iwl_txq_reclaim,
-
- .txq_disable = iwl_trans_pcie_txq_disable,
- .txq_enable = iwl_trans_pcie_txq_enable,
-
- .txq_set_shared_mode = iwl_trans_pcie_txq_set_shared_mode,
-
- .wait_tx_queues_empty = iwl_trans_pcie_wait_txqs_empty,
-
- .freeze_txq_timer = iwl_trans_txq_freeze_timer,
-#ifdef CONFIG_IWLWIFI_DEBUGFS
- .debugfs_cleanup = iwl_trans_pcie_debugfs_cleanup,
-#endif
-};
-
-static const struct iwl_trans_ops trans_ops_pcie_gen2 = {
- IWL_TRANS_COMMON_OPS,
- .start_hw = iwl_trans_pcie_start_hw,
- .fw_alive = iwl_trans_pcie_gen2_fw_alive,
- .start_fw = iwl_trans_pcie_gen2_start_fw,
- .stop_device = iwl_trans_pcie_gen2_stop_device,
-
- .send_cmd = iwl_pcie_gen2_enqueue_hcmd,
-
- .tx = iwl_txq_gen2_tx,
- .reclaim = iwl_txq_reclaim,
-
- .set_q_ptrs = iwl_txq_set_q_ptrs,
-
- .txq_alloc = iwl_txq_dyn_alloc,
- .txq_free = iwl_txq_dyn_free,
- .wait_txq_empty = iwl_trans_pcie_wait_txq_empty,
- .rxq_dma_data = iwl_trans_pcie_rxq_dma_data,
- .load_pnvm = iwl_trans_pcie_ctx_info_gen3_load_pnvm,
- .set_pnvm = iwl_trans_pcie_ctx_info_gen3_set_pnvm,
- .load_reduce_power = iwl_trans_pcie_ctx_info_gen3_load_reduce_power,
- .set_reduce_power = iwl_trans_pcie_ctx_info_gen3_set_reduce_power,
-#ifdef CONFIG_IWLWIFI_DEBUGFS
- .debugfs_cleanup = iwl_trans_pcie_debugfs_cleanup,
-#endif
-};
-
struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
const struct pci_device_id *ent,
const struct iwl_cfg_trans_params *cfg_trans)
@@ -3592,13 +3534,9 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
struct iwl_trans_pcie *trans_pcie, **priv;
struct iwl_trans *trans;
int ret, addr_size;
- const struct iwl_trans_ops *ops = &trans_ops_pcie_gen2;
void __iomem * const *table;
u32 bar0;
- if (!cfg_trans->gen2)
- ops = &trans_ops_pcie;
-
/* reassign our BAR 0 if invalid due to possible runtime PM races */
pci_read_config_dword(pdev, PCI_BASE_ADDRESS_0, &bar0);
if (bar0 == PCI_BASE_ADDRESS_MEM_TYPE_64) {
@@ -3611,20 +3549,65 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
if (ret)
return ERR_PTR(ret);
- trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), &pdev->dev, ops,
+ trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), &pdev->dev,
cfg_trans);
if (!trans)
return ERR_PTR(-ENOMEM);
trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ if (trans->trans_cfg->gen2) {
+ trans_pcie->txqs.tfd.addr_size = 64;
+ trans_pcie->txqs.tfd.max_tbs = IWL_TFH_NUM_TBS;
+ trans_pcie->txqs.tfd.size = sizeof(struct iwl_tfh_tfd);
+ } else {
+ trans_pcie->txqs.tfd.addr_size = 36;
+ trans_pcie->txqs.tfd.max_tbs = IWL_NUM_OF_TBS;
+ trans_pcie->txqs.tfd.size = sizeof(struct iwl_tfd);
+ }
+ trans->max_skb_frags = IWL_TRANS_PCIE_MAX_FRAGS(trans_pcie);
+
+ trans_pcie->txqs.tso_hdr_page = alloc_percpu(struct iwl_tso_hdr_page);
+ if (!trans_pcie->txqs.tso_hdr_page) {
+ ret = -ENOMEM;
+ goto out_free_trans;
+ }
+
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
+ trans_pcie->txqs.bc_tbl_size =
+ sizeof(struct iwl_gen3_bc_tbl_entry) * TFD_QUEUE_BC_SIZE_GEN3_BZ;
+ else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+ trans_pcie->txqs.bc_tbl_size =
+ sizeof(struct iwl_gen3_bc_tbl_entry) * TFD_QUEUE_BC_SIZE_GEN3_AX210;
+ else
+ trans_pcie->txqs.bc_tbl_size = sizeof(struct iwlagn_scd_bc_tbl);
+ /*
+ * For gen2 devices, we use a single allocation for each byte-count
+ * table, but they're pretty small (1k) so use a DMA pool that we
+ * allocate here.
+ */
+ if (trans->trans_cfg->gen2) {
+ trans_pcie->txqs.bc_pool =
+ dmam_pool_create("iwlwifi:bc", trans->dev,
+ trans_pcie->txqs.bc_tbl_size,
+ 256, 0);
+ if (!trans_pcie->txqs.bc_pool) {
+ ret = -ENOMEM;
+ goto out_free_tso;
+ }
+ }
+
+ /* Some things must not change even if the config does */
+ WARN_ON(trans_pcie->txqs.tfd.addr_size !=
+ (trans->trans_cfg->gen2 ? 64 : 36));
+
/* Initialize NAPI here - it should be before registering to mac80211
* in the opmode but after the HW struct is allocated.
*/
trans_pcie->napi_dev = alloc_netdev_dummy(sizeof(struct iwl_trans_pcie *));
if (!trans_pcie->napi_dev) {
ret = -ENOMEM;
- goto out_free_trans;
+ goto out_free_tso;
}
/* The private struct in netdev is a pointer to struct iwl_trans_pcie */
priv = netdev_priv(trans_pcie->napi_dev);
@@ -3663,7 +3646,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
pci_set_master(pdev);
- addr_size = trans->txqs.tfd.addr_size;
+ addr_size = trans_pcie->txqs.tfd.addr_size;
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_size));
if (ret) {
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
@@ -3766,6 +3749,8 @@ out_no_pci:
destroy_workqueue(trans_pcie->rba.alloc_wq);
out_free_ndev:
free_netdev(trans_pcie->napi_dev);
+out_free_tso:
+ free_percpu(trans_pcie->txqs.tso_hdr_page);
out_free_trans:
iwl_trans_free(trans);
return ERR_PTR(ret);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
index aabbef114bc2..2e780fb2da42 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2020, 2023 Intel Corporation
+ * Copyright (C) 2018-2020, 2023-2024 Intel Corporation
*/
#include <net/tso.h>
#include <linux/tcp.h>
@@ -11,7 +11,1180 @@
#include "iwl-io.h"
#include "internal.h"
#include "fw/api/tx.h"
-#include "queue/tx.h"
+#include "fw/api/commands.h"
+#include "fw/api/datapath.h"
+#include "iwl-scd.h"
+
+static struct page *get_workaround_page(struct iwl_trans *trans,
+ struct sk_buff *skb)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_tso_page_info *info;
+ struct page **page_ptr;
+ struct page *ret;
+ dma_addr_t phys;
+
+ page_ptr = (void *)((u8 *)skb->cb + trans_pcie->txqs.page_offs);
+
+ ret = alloc_page(GFP_ATOMIC);
+ if (!ret)
+ return NULL;
+
+ info = IWL_TSO_PAGE_INFO(page_address(ret));
+
+ /* Create a DMA mapping for the page */
+ phys = dma_map_page_attrs(trans->dev, ret, 0, PAGE_SIZE,
+ DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+ if (unlikely(dma_mapping_error(trans->dev, phys))) {
+ __free_page(ret);
+ return NULL;
+ }
+
+ /* Store physical address and set use count */
+ info->dma_addr = phys;
+ refcount_set(&info->use_count, 1);
+
+ /* set the chaining pointer to the previous page if there */
+ info->next = *page_ptr;
+ *page_ptr = ret;
+
+ return ret;
+}
+
+/*
+ * Add a TB and if needed apply the FH HW bug workaround;
+ * meta != NULL indicates that it's a page mapping and we
+ * need to dma_unmap_page() and set the meta->tbs bit in
+ * this case.
+ */
+static int iwl_txq_gen2_set_tb_with_wa(struct iwl_trans *trans,
+ struct sk_buff *skb,
+ struct iwl_tfh_tfd *tfd,
+ dma_addr_t phys, void *virt,
+ u16 len, struct iwl_cmd_meta *meta,
+ bool unmap)
+{
+ dma_addr_t oldphys = phys;
+ struct page *page;
+ int ret;
+
+ if (unlikely(dma_mapping_error(trans->dev, phys)))
+ return -ENOMEM;
+
+ if (likely(!iwl_txq_crosses_4g_boundary(phys, len))) {
+ ret = iwl_txq_gen2_set_tb(trans, tfd, phys, len);
+
+ if (ret < 0)
+ goto unmap;
+
+ if (meta)
+ meta->tbs |= BIT(ret);
+
+ ret = 0;
+ goto trace;
+ }
+
+ /*
+ * Work around a hardware bug. If (as expressed in the
+ * condition above) the TB ends on a 32-bit boundary,
+ * then the next TB may be accessed with the wrong
+ * address.
+ * To work around it, copy the data elsewhere and make
+ * a new mapping for it so the device will not fail.
+ */
+
+ if (WARN_ON(len > IWL_TSO_PAGE_DATA_SIZE)) {
+ ret = -ENOBUFS;
+ goto unmap;
+ }
+
+ page = get_workaround_page(trans, skb);
+ if (!page) {
+ ret = -ENOMEM;
+ goto unmap;
+ }
+
+ memcpy(page_address(page), virt, len);
+
+ /*
+ * This is a bit odd, but performance does not matter here, what
+ * matters are the expectations of the calling code and TB cleanup
+ * function.
+ *
+ * As such, if unmap is set, then create another mapping for the TB
+ * entry as it will be unmapped later. On the other hand, if it is not
+ * set, then the TB entry will not be unmapped and instead we simply
+ * reference and sync the mapping that get_workaround_page() created.
+ */
+ if (unmap) {
+ phys = dma_map_single(trans->dev, page_address(page), len,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(trans->dev, phys)))
+ return -ENOMEM;
+ } else {
+ phys = iwl_pcie_get_tso_page_phys(page_address(page));
+ dma_sync_single_for_device(trans->dev, phys, len,
+ DMA_TO_DEVICE);
+ }
+
+ ret = iwl_txq_gen2_set_tb(trans, tfd, phys, len);
+ if (ret < 0) {
+ /* unmap the new allocation as single */
+ oldphys = phys;
+ meta = NULL;
+ goto unmap;
+ }
+
+ IWL_DEBUG_TX(trans,
+ "TB bug workaround: copied %d bytes from 0x%llx to 0x%llx\n",
+ len, (unsigned long long)oldphys,
+ (unsigned long long)phys);
+
+ ret = 0;
+unmap:
+ if (!unmap)
+ goto trace;
+
+ if (meta)
+ dma_unmap_page(trans->dev, oldphys, len, DMA_TO_DEVICE);
+ else
+ dma_unmap_single(trans->dev, oldphys, len, DMA_TO_DEVICE);
+trace:
+ trace_iwlwifi_dev_tx_tb(trans->dev, skb, virt, phys, len);
+
+ return ret;
+}
+
+static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans,
+ struct sk_buff *skb,
+ struct iwl_tfh_tfd *tfd,
+ struct iwl_cmd_meta *out_meta,
+ int start_len,
+ u8 hdr_len,
+ struct iwl_device_tx_cmd *dev_cmd)
+{
+#ifdef CONFIG_INET
+ struct iwl_tx_cmd_gen2 *tx_cmd = (void *)dev_cmd->payload;
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
+ unsigned int mss = skb_shinfo(skb)->gso_size;
+ dma_addr_t start_hdr_phys;
+ u16 length, amsdu_pad;
+ u8 *start_hdr;
+ struct sg_table *sgt;
+ struct tso_t tso;
+
+ trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd),
+ &dev_cmd->hdr, start_len, 0);
+
+ ip_hdrlen = skb_network_header_len(skb);
+ snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb);
+ total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len;
+ amsdu_pad = 0;
+
+ /* total amount of header we may need for this A-MSDU */
+ hdr_room = DIV_ROUND_UP(total_len, mss) *
+ (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr));
+
+ /* Our device supports 9 segments at most, it will fit in 1 page */
+ sgt = iwl_pcie_prep_tso(trans, skb, out_meta, &start_hdr, hdr_room);
+ if (!sgt)
+ return -ENOMEM;
+
+ start_hdr_phys = iwl_pcie_get_tso_page_phys(start_hdr);
+
+ /*
+ * Pull the ieee80211 header to be able to use TSO core,
+ * we will restore it for the tx_status flow.
+ */
+ skb_pull(skb, hdr_len);
+
+ /*
+ * Remove the length of all the headers that we don't actually
+ * have in the MPDU by themselves, but that we duplicate into
+ * all the different MSDUs inside the A-MSDU.
+ */
+ le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen);
+
+ tso_start(skb, &tso);
+
+ while (total_len) {
+ /* this is the data left for this subframe */
+ unsigned int data_left = min_t(unsigned int, mss, total_len);
+ unsigned int tb_len;
+ dma_addr_t tb_phys;
+ u8 *pos_hdr = start_hdr;
+
+ total_len -= data_left;
+
+ memset(pos_hdr, 0, amsdu_pad);
+ pos_hdr += amsdu_pad;
+ amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen +
+ data_left)) & 0x3;
+ ether_addr_copy(pos_hdr, ieee80211_get_DA(hdr));
+ pos_hdr += ETH_ALEN;
+ ether_addr_copy(pos_hdr, ieee80211_get_SA(hdr));
+ pos_hdr += ETH_ALEN;
+
+ length = snap_ip_tcp_hdrlen + data_left;
+ *((__be16 *)pos_hdr) = cpu_to_be16(length);
+ pos_hdr += sizeof(length);
+
+ /*
+ * This will copy the SNAP as well which will be considered
+ * as MAC header.
+ */
+ tso_build_hdr(skb, pos_hdr, &tso, data_left, !total_len);
+
+ pos_hdr += snap_ip_tcp_hdrlen;
+
+ tb_len = pos_hdr - start_hdr;
+ tb_phys = iwl_pcie_get_tso_page_phys(start_hdr);
+
+ /*
+ * No need for _with_wa, this is from the TSO page and
+ * we leave some space at the end of it so can't hit
+ * the buggy scenario.
+ */
+ iwl_txq_gen2_set_tb(trans, tfd, tb_phys, tb_len);
+ trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr,
+ tb_phys, tb_len);
+ /* add this subframe's headers' length to the tx_cmd */
+ le16_add_cpu(&tx_cmd->len, tb_len);
+
+ /* prepare the start_hdr for the next subframe */
+ start_hdr = pos_hdr;
+
+ /* put the payload */
+ while (data_left) {
+ int ret;
+
+ tb_len = min_t(unsigned int, tso.size, data_left);
+ tb_phys = iwl_pcie_get_sgt_tb_phys(sgt, tso.data);
+ /* Not a real mapping error, use direct comparison */
+ if (unlikely(tb_phys == DMA_MAPPING_ERROR))
+ goto out_err;
+
+ ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd,
+ tb_phys, tso.data,
+ tb_len, NULL, false);
+ if (ret)
+ goto out_err;
+
+ data_left -= tb_len;
+ tso_build_data(skb, &tso, tb_len);
+ }
+ }
+
+ dma_sync_single_for_device(trans->dev, start_hdr_phys, hdr_room,
+ DMA_TO_DEVICE);
+
+ /* re -add the WiFi header */
+ skb_push(skb, hdr_len);
+
+ return 0;
+
+out_err:
+#endif
+ return -EINVAL;
+}
+
+static struct
+iwl_tfh_tfd *iwl_txq_gen2_build_tx_amsdu(struct iwl_trans *trans,
+ struct iwl_txq *txq,
+ struct iwl_device_tx_cmd *dev_cmd,
+ struct sk_buff *skb,
+ struct iwl_cmd_meta *out_meta,
+ int hdr_len,
+ int tx_cmd_len)
+{
+ int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
+ struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx);
+ dma_addr_t tb_phys;
+ int len;
+ void *tb1_addr;
+
+ tb_phys = iwl_txq_get_first_tb_dma(txq, idx);
+
+ /*
+ * No need for _with_wa, the first TB allocation is aligned up
+ * to a 64-byte boundary and thus can't be at the end or cross
+ * a page boundary (much less a 2^32 boundary).
+ */
+ iwl_txq_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
+
+ /*
+ * The second TB (tb1) points to the remainder of the TX command
+ * and the 802.11 header - dword aligned size
+ * (This calculation modifies the TX command, so do it before the
+ * setup of the first TB)
+ */
+ len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len -
+ IWL_FIRST_TB_SIZE;
+
+ /* do not align A-MSDU to dword as the subframe header aligns it */
+
+ /* map the data for TB1 */
+ tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
+ tb_phys = dma_map_single(trans->dev, tb1_addr, len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
+ goto out_err;
+ /*
+ * No need for _with_wa(), we ensure (via alignment) that the data
+ * here can never cross or end at a page boundary.
+ */
+ iwl_txq_gen2_set_tb(trans, tfd, tb_phys, len);
+
+ if (iwl_txq_gen2_build_amsdu(trans, skb, tfd, out_meta,
+ len + IWL_FIRST_TB_SIZE, hdr_len, dev_cmd))
+ goto out_err;
+
+ /* building the A-MSDU might have changed this data, memcpy it now */
+ memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE);
+ return tfd;
+
+out_err:
+ iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd);
+ return NULL;
+}
+
+static int iwl_txq_gen2_tx_add_frags(struct iwl_trans *trans,
+ struct sk_buff *skb,
+ struct iwl_tfh_tfd *tfd,
+ struct iwl_cmd_meta *out_meta)
+{
+ int i;
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ dma_addr_t tb_phys;
+ unsigned int fragsz = skb_frag_size(frag);
+ int ret;
+
+ if (!fragsz)
+ continue;
+
+ tb_phys = skb_frag_dma_map(trans->dev, frag, 0,
+ fragsz, DMA_TO_DEVICE);
+ ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
+ skb_frag_address(frag),
+ fragsz, out_meta, true);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct
+iwl_tfh_tfd *iwl_txq_gen2_build_tx(struct iwl_trans *trans,
+ struct iwl_txq *txq,
+ struct iwl_device_tx_cmd *dev_cmd,
+ struct sk_buff *skb,
+ struct iwl_cmd_meta *out_meta,
+ int hdr_len,
+ int tx_cmd_len,
+ bool pad)
+{
+ int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
+ struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx);
+ dma_addr_t tb_phys;
+ int len, tb1_len, tb2_len;
+ void *tb1_addr;
+ struct sk_buff *frag;
+
+ tb_phys = iwl_txq_get_first_tb_dma(txq, idx);
+
+ /* The first TB points to bi-directional DMA data */
+ memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE);
+
+ /*
+ * No need for _with_wa, the first TB allocation is aligned up
+ * to a 64-byte boundary and thus can't be at the end or cross
+ * a page boundary (much less a 2^32 boundary).
+ */
+ iwl_txq_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
+
+ /*
+ * The second TB (tb1) points to the remainder of the TX command
+ * and the 802.11 header - dword aligned size
+ * (This calculation modifies the TX command, so do it before the
+ * setup of the first TB)
+ */
+ len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len -
+ IWL_FIRST_TB_SIZE;
+
+ if (pad)
+ tb1_len = ALIGN(len, 4);
+ else
+ tb1_len = len;
+
+ /* map the data for TB1 */
+ tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
+ tb_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
+ goto out_err;
+ /*
+ * No need for _with_wa(), we ensure (via alignment) that the data
+ * here can never cross or end at a page boundary.
+ */
+ iwl_txq_gen2_set_tb(trans, tfd, tb_phys, tb1_len);
+ trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr,
+ IWL_FIRST_TB_SIZE + tb1_len, hdr_len);
+
+ /* set up TFD's third entry to point to remainder of skb's head */
+ tb2_len = skb_headlen(skb) - hdr_len;
+
+ if (tb2_len > 0) {
+ int ret;
+
+ tb_phys = dma_map_single(trans->dev, skb->data + hdr_len,
+ tb2_len, DMA_TO_DEVICE);
+ ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
+ skb->data + hdr_len, tb2_len,
+ NULL, true);
+ if (ret)
+ goto out_err;
+ }
+
+ if (iwl_txq_gen2_tx_add_frags(trans, skb, tfd, out_meta))
+ goto out_err;
+
+ skb_walk_frags(skb, frag) {
+ int ret;
+
+ tb_phys = dma_map_single(trans->dev, frag->data,
+ skb_headlen(frag), DMA_TO_DEVICE);
+ ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
+ frag->data,
+ skb_headlen(frag), NULL,
+ true);
+ if (ret)
+ goto out_err;
+ if (iwl_txq_gen2_tx_add_frags(trans, frag, tfd, out_meta))
+ goto out_err;
+ }
+
+ return tfd;
+
+out_err:
+ iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd);
+ return NULL;
+}
+
+static
+struct iwl_tfh_tfd *iwl_txq_gen2_build_tfd(struct iwl_trans *trans,
+ struct iwl_txq *txq,
+ struct iwl_device_tx_cmd *dev_cmd,
+ struct sk_buff *skb,
+ struct iwl_cmd_meta *out_meta)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
+ struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx);
+ int len, hdr_len;
+ bool amsdu;
+
+ /* There must be data left over for TB1 or this code must be changed */
+ BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) < IWL_FIRST_TB_SIZE);
+ BUILD_BUG_ON(sizeof(struct iwl_cmd_header) +
+ offsetofend(struct iwl_tx_cmd_gen2, dram_info) >
+ IWL_FIRST_TB_SIZE);
+ BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen3) < IWL_FIRST_TB_SIZE);
+ BUILD_BUG_ON(sizeof(struct iwl_cmd_header) +
+ offsetofend(struct iwl_tx_cmd_gen3, dram_info) >
+ IWL_FIRST_TB_SIZE);
+
+ memset(tfd, 0, sizeof(*tfd));
+
+ if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
+ len = sizeof(struct iwl_tx_cmd_gen2);
+ else
+ len = sizeof(struct iwl_tx_cmd_gen3);
+
+ amsdu = ieee80211_is_data_qos(hdr->frame_control) &&
+ (*ieee80211_get_qos_ctl(hdr) &
+ IEEE80211_QOS_CTL_A_MSDU_PRESENT);
+
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+
+ /*
+ * Only build A-MSDUs here if doing so by GSO, otherwise it may be
+ * an A-MSDU for other reasons, e.g. NAN or an A-MSDU having been
+ * built in the higher layers already.
+ */
+ if (amsdu && skb_shinfo(skb)->gso_size)
+ return iwl_txq_gen2_build_tx_amsdu(trans, txq, dev_cmd, skb,
+ out_meta, hdr_len, len);
+ return iwl_txq_gen2_build_tx(trans, txq, dev_cmd, skb, out_meta,
+ hdr_len, len, !amsdu);
+}
+
+int iwl_txq_space(struct iwl_trans *trans, const struct iwl_txq *q)
+{
+ unsigned int max;
+ unsigned int used;
+
+ /*
+ * To avoid ambiguity between empty and completely full queues, there
+ * should always be less than max_tfd_queue_size elements in the queue.
+ * If q->n_window is smaller than max_tfd_queue_size, there is no need
+ * to reserve any queue entries for this purpose.
+ */
+ if (q->n_window < trans->trans_cfg->base_params->max_tfd_queue_size)
+ max = q->n_window;
+ else
+ max = trans->trans_cfg->base_params->max_tfd_queue_size - 1;
+
+ /*
+ * max_tfd_queue_size is a power of 2, so the following is equivalent to
+ * modulo by max_tfd_queue_size and is well defined.
+ */
+ used = (q->write_ptr - q->read_ptr) &
+ (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
+
+ if (WARN_ON(used > max))
+ return 0;
+
+ return max - used;
+}
+
+/*
+ * iwl_pcie_gen2_update_byte_tbl - Set up entry in Tx byte-count array
+ */
+static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans *trans,
+ struct iwl_txq *txq, u16 byte_cnt,
+ int num_tbs)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
+ u8 filled_tfd_size, num_fetch_chunks;
+ u16 len = byte_cnt;
+ __le16 bc_ent;
+
+ if (WARN(idx >= txq->n_window, "%d >= %d\n", idx, txq->n_window))
+ return;
+
+ filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) +
+ num_tbs * sizeof(struct iwl_tfh_tb);
+ /*
+ * filled_tfd_size contains the number of filled bytes in the TFD.
+ * Dividing it by 64 will give the number of chunks to fetch
+ * to SRAM- 0 for one chunk, 1 for 2 and so on.
+ * If, for example, TFD contains only 3 TBs then 32 bytes
+ * of the TFD are used, and only one chunk of 64 bytes should
+ * be fetched
+ */
+ num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1;
+
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
+ struct iwl_gen3_bc_tbl_entry *scd_bc_tbl_gen3 = txq->bc_tbl.addr;
+
+ /* Starting from AX210, the HW expects bytes */
+ WARN_ON(trans_pcie->txqs.bc_table_dword);
+ WARN_ON(len > 0x3FFF);
+ bc_ent = cpu_to_le16(len | (num_fetch_chunks << 14));
+ scd_bc_tbl_gen3[idx].tfd_offset = bc_ent;
+ } else {
+ struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr;
+
+ /* Before AX210, the HW expects DW */
+ WARN_ON(!trans_pcie->txqs.bc_table_dword);
+ len = DIV_ROUND_UP(len, 4);
+ WARN_ON(len > 0xFFF);
+ bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12));
+ scd_bc_tbl->tfd_offset[idx] = bc_ent;
+ }
+}
+
+static u8 iwl_txq_gen2_get_num_tbs(struct iwl_tfh_tfd *tfd)
+{
+ return le16_to_cpu(tfd->num_tbs) & 0x1f;
+}
+
+int iwl_txq_gen2_set_tb(struct iwl_trans *trans, struct iwl_tfh_tfd *tfd,
+ dma_addr_t addr, u16 len)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int idx = iwl_txq_gen2_get_num_tbs(tfd);
+ struct iwl_tfh_tb *tb;
+
+ /* Only WARN here so we know about the issue, but we mess up our
+ * unmap path because not every place currently checks for errors
+ * returned from this function - it can only return an error if
+ * there's no more space, and so when we know there is enough we
+ * don't always check ...
+ */
+ WARN(iwl_txq_crosses_4g_boundary(addr, len),
+ "possible DMA problem with iova:0x%llx, len:%d\n",
+ (unsigned long long)addr, len);
+
+ if (WARN_ON(idx >= IWL_TFH_NUM_TBS))
+ return -EINVAL;
+ tb = &tfd->tbs[idx];
+
+ /* Each TFD can point to a maximum max_tbs Tx buffers */
+ if (le16_to_cpu(tfd->num_tbs) >= trans_pcie->txqs.tfd.max_tbs) {
+ IWL_ERR(trans, "Error can not send more than %d chunks\n",
+ trans_pcie->txqs.tfd.max_tbs);
+ return -EINVAL;
+ }
+
+ put_unaligned_le64(addr, &tb->addr);
+ tb->tb_len = cpu_to_le16(len);
+
+ tfd->num_tbs = cpu_to_le16(idx + 1);
+
+ return idx;
+}
+
+void iwl_txq_gen2_tfd_unmap(struct iwl_trans *trans,
+ struct iwl_cmd_meta *meta,
+ struct iwl_tfh_tfd *tfd)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int i, num_tbs;
+
+ /* Sanity check on number of chunks */
+ num_tbs = iwl_txq_gen2_get_num_tbs(tfd);
+
+ if (num_tbs > trans_pcie->txqs.tfd.max_tbs) {
+ IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
+ return;
+ }
+
+ /* TB1 is mapped directly, the rest is the TSO page and SG list. */
+ if (meta->sg_offset)
+ num_tbs = 2;
+
+ /* first TB is never freed - it's the bidirectional DMA data */
+ for (i = 1; i < num_tbs; i++) {
+ if (meta->tbs & BIT(i))
+ dma_unmap_page(trans->dev,
+ le64_to_cpu(tfd->tbs[i].addr),
+ le16_to_cpu(tfd->tbs[i].tb_len),
+ DMA_TO_DEVICE);
+ else
+ dma_unmap_single(trans->dev,
+ le64_to_cpu(tfd->tbs[i].addr),
+ le16_to_cpu(tfd->tbs[i].tb_len),
+ DMA_TO_DEVICE);
+ }
+
+ iwl_txq_set_tfd_invalid_gen2(trans, tfd);
+}
+
+static void iwl_txq_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
+{
+ /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
+ * idx is bounded by n_window
+ */
+ int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr);
+ struct sk_buff *skb;
+
+ lockdep_assert_held(&txq->lock);
+
+ if (!txq->entries)
+ return;
+
+ iwl_txq_gen2_tfd_unmap(trans, &txq->entries[idx].meta,
+ iwl_txq_get_tfd(trans, txq, idx));
+
+ skb = txq->entries[idx].skb;
+
+ /* Can be called from irqs-disabled context
+ * If skb is not NULL, it means that the whole queue is being
+ * freed and that the queue is not empty - free the skb
+ */
+ if (skb) {
+ iwl_op_mode_free_skb(trans->op_mode, skb);
+ txq->entries[idx].skb = NULL;
+ }
+}
+
+/*
+ * iwl_txq_inc_wr_ptr - Send new write index to hardware
+ */
+static void iwl_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq)
+{
+ lockdep_assert_held(&txq->lock);
+
+ IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq->id, txq->write_ptr);
+
+ /*
+ * if not in power-save mode, uCode will never sleep when we're
+ * trying to tx (during RFKILL, we're not trying to tx).
+ */
+ iwl_write32(trans, HBUS_TARG_WRPTR, txq->write_ptr | (txq->id << 16));
+}
+
+int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
+ struct iwl_device_tx_cmd *dev_cmd, int txq_id)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_cmd_meta *out_meta;
+ struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id];
+ u16 cmd_len;
+ int idx;
+ void *tfd;
+
+ if (WARN_ONCE(txq_id >= IWL_MAX_TVQM_QUEUES,
+ "queue %d out of range", txq_id))
+ return -EINVAL;
+
+ if (WARN_ONCE(!test_bit(txq_id, trans_pcie->txqs.queue_used),
+ "TX on unused queue %d\n", txq_id))
+ return -EINVAL;
+
+ if (skb_is_nonlinear(skb) &&
+ skb_shinfo(skb)->nr_frags > IWL_TRANS_PCIE_MAX_FRAGS(trans_pcie) &&
+ __skb_linearize(skb))
+ return -ENOMEM;
+
+ spin_lock(&txq->lock);
+
+ if (iwl_txq_space(trans, txq) < txq->high_mark) {
+ iwl_txq_stop(trans, txq);
+
+ /* don't put the packet on the ring, if there is no room */
+ if (unlikely(iwl_txq_space(trans, txq) < 3)) {
+ struct iwl_device_tx_cmd **dev_cmd_ptr;
+
+ dev_cmd_ptr = (void *)((u8 *)skb->cb +
+ trans_pcie->txqs.dev_cmd_offs);
+
+ *dev_cmd_ptr = dev_cmd;
+ __skb_queue_tail(&txq->overflow_q, skb);
+ spin_unlock(&txq->lock);
+ return 0;
+ }
+ }
+
+ idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
+
+ /* Set up driver data for this TFD */
+ txq->entries[idx].skb = skb;
+ txq->entries[idx].cmd = dev_cmd;
+
+ dev_cmd->hdr.sequence =
+ cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
+ INDEX_TO_SEQ(idx)));
+
+ /* Set up first empty entry in queue's array of Tx/cmd buffers */
+ out_meta = &txq->entries[idx].meta;
+ memset(out_meta, 0, sizeof(*out_meta));
+
+ tfd = iwl_txq_gen2_build_tfd(trans, txq, dev_cmd, skb, out_meta);
+ if (!tfd) {
+ spin_unlock(&txq->lock);
+ return -1;
+ }
+
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
+ struct iwl_tx_cmd_gen3 *tx_cmd_gen3 =
+ (void *)dev_cmd->payload;
+
+ cmd_len = le16_to_cpu(tx_cmd_gen3->len);
+ } else {
+ struct iwl_tx_cmd_gen2 *tx_cmd_gen2 =
+ (void *)dev_cmd->payload;
+
+ cmd_len = le16_to_cpu(tx_cmd_gen2->len);
+ }
+
+ /* Set up entry for this TFD in Tx byte-count array */
+ iwl_pcie_gen2_update_byte_tbl(trans, txq, cmd_len,
+ iwl_txq_gen2_get_num_tbs(tfd));
+
+ /* start timer if queue currently empty */
+ if (txq->read_ptr == txq->write_ptr && txq->wd_timeout)
+ mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
+
+ /* Tell device the write index *just past* this latest filled TFD */
+ txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr);
+ iwl_txq_inc_wr_ptr(trans, txq);
+ /*
+ * At this point the frame is "transmitted" successfully
+ * and we will get a TX status notification eventually.
+ */
+ spin_unlock(&txq->lock);
+ return 0;
+}
+
+/*************** HOST COMMAND QUEUE FUNCTIONS *****/
+
+/*
+ * iwl_txq_gen2_unmap - Unmap any remaining DMA mappings and free skb's
+ */
+static void iwl_txq_gen2_unmap(struct iwl_trans *trans, int txq_id)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id];
+
+ spin_lock_bh(&txq->reclaim_lock);
+ spin_lock(&txq->lock);
+ while (txq->write_ptr != txq->read_ptr) {
+ IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
+ txq_id, txq->read_ptr);
+
+ if (txq_id != trans_pcie->txqs.cmd.q_id) {
+ int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr);
+ struct iwl_cmd_meta *cmd_meta = &txq->entries[idx].meta;
+ struct sk_buff *skb = txq->entries[idx].skb;
+
+ if (!WARN_ON_ONCE(!skb))
+ iwl_pcie_free_tso_pages(trans, skb, cmd_meta);
+ }
+ iwl_txq_gen2_free_tfd(trans, txq);
+ txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr);
+ }
+
+ while (!skb_queue_empty(&txq->overflow_q)) {
+ struct sk_buff *skb = __skb_dequeue(&txq->overflow_q);
+
+ iwl_op_mode_free_skb(trans->op_mode, skb);
+ }
+
+ spin_unlock(&txq->lock);
+ spin_unlock_bh(&txq->reclaim_lock);
+
+ /* just in case - this queue may have been stopped */
+ iwl_trans_pcie_wake_queue(trans, txq);
+}
+
+static void iwl_txq_gen2_free_memory(struct iwl_trans *trans,
+ struct iwl_txq *txq)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct device *dev = trans->dev;
+
+ /* De-alloc circular buffer of TFDs */
+ if (txq->tfds) {
+ dma_free_coherent(dev,
+ trans_pcie->txqs.tfd.size * txq->n_window,
+ txq->tfds, txq->dma_addr);
+ dma_free_coherent(dev,
+ sizeof(*txq->first_tb_bufs) * txq->n_window,
+ txq->first_tb_bufs, txq->first_tb_dma);
+ }
+
+ kfree(txq->entries);
+ if (txq->bc_tbl.addr)
+ dma_pool_free(trans_pcie->txqs.bc_pool,
+ txq->bc_tbl.addr, txq->bc_tbl.dma);
+ kfree(txq);
+}
+
+/*
+ * iwl_pcie_txq_free - Deallocate DMA queue.
+ * @txq: Transmit queue to deallocate.
+ *
+ * Empty queue by removing and destroying all BD's.
+ * Free all buffers.
+ * 0-fill, but do not free "txq" descriptor structure.
+ */
+static void iwl_txq_gen2_free(struct iwl_trans *trans, int txq_id)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_txq *txq;
+ int i;
+
+ if (WARN_ONCE(txq_id >= IWL_MAX_TVQM_QUEUES,
+ "queue %d out of range", txq_id))
+ return;
+
+ txq = trans_pcie->txqs.txq[txq_id];
+
+ if (WARN_ON(!txq))
+ return;
+
+ iwl_txq_gen2_unmap(trans, txq_id);
+
+ /* De-alloc array of command/tx buffers */
+ if (txq_id == trans_pcie->txqs.cmd.q_id)
+ for (i = 0; i < txq->n_window; i++) {
+ kfree_sensitive(txq->entries[i].cmd);
+ kfree_sensitive(txq->entries[i].free_buf);
+ }
+ del_timer_sync(&txq->stuck_timer);
+
+ iwl_txq_gen2_free_memory(trans, txq);
+
+ trans_pcie->txqs.txq[txq_id] = NULL;
+
+ clear_bit(txq_id, trans_pcie->txqs.queue_used);
+}
+
+static struct iwl_txq *
+iwl_txq_dyn_alloc_dma(struct iwl_trans *trans, int size, unsigned int timeout)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ size_t bc_tbl_size, bc_tbl_entries;
+ struct iwl_txq *txq;
+ int ret;
+
+ WARN_ON(!trans_pcie->txqs.bc_tbl_size);
+
+ bc_tbl_size = trans_pcie->txqs.bc_tbl_size;
+ bc_tbl_entries = bc_tbl_size / sizeof(u16);
+
+ if (WARN_ON(size > bc_tbl_entries))
+ return ERR_PTR(-EINVAL);
+
+ txq = kzalloc(sizeof(*txq), GFP_KERNEL);
+ if (!txq)
+ return ERR_PTR(-ENOMEM);
+
+ txq->bc_tbl.addr = dma_pool_alloc(trans_pcie->txqs.bc_pool, GFP_KERNEL,
+ &txq->bc_tbl.dma);
+ if (!txq->bc_tbl.addr) {
+ IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
+ kfree(txq);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ ret = iwl_pcie_txq_alloc(trans, txq, size, false);
+ if (ret) {
+ IWL_ERR(trans, "Tx queue alloc failed\n");
+ goto error;
+ }
+ ret = iwl_txq_init(trans, txq, size, false);
+ if (ret) {
+ IWL_ERR(trans, "Tx queue init failed\n");
+ goto error;
+ }
+
+ txq->wd_timeout = msecs_to_jiffies(timeout);
+
+ return txq;
+
+error:
+ iwl_txq_gen2_free_memory(trans, txq);
+ return ERR_PTR(ret);
+}
+
+static int iwl_pcie_txq_alloc_response(struct iwl_trans *trans,
+ struct iwl_txq *txq,
+ struct iwl_host_cmd *hcmd)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_tx_queue_cfg_rsp *rsp;
+ int ret, qid;
+ u32 wr_ptr;
+
+ if (WARN_ON(iwl_rx_packet_payload_len(hcmd->resp_pkt) !=
+ sizeof(*rsp))) {
+ ret = -EINVAL;
+ goto error_free_resp;
+ }
+
+ rsp = (void *)hcmd->resp_pkt->data;
+ qid = le16_to_cpu(rsp->queue_number);
+ wr_ptr = le16_to_cpu(rsp->write_pointer);
+
+ if (qid >= ARRAY_SIZE(trans_pcie->txqs.txq)) {
+ WARN_ONCE(1, "queue index %d unsupported", qid);
+ ret = -EIO;
+ goto error_free_resp;
+ }
+
+ if (test_and_set_bit(qid, trans_pcie->txqs.queue_used)) {
+ WARN_ONCE(1, "queue %d already used", qid);
+ ret = -EIO;
+ goto error_free_resp;
+ }
+
+ if (WARN_ONCE(trans_pcie->txqs.txq[qid],
+ "queue %d already allocated\n", qid)) {
+ ret = -EIO;
+ goto error_free_resp;
+ }
+
+ txq->id = qid;
+ trans_pcie->txqs.txq[qid] = txq;
+ wr_ptr &= (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
+
+ /* Place first TFD at index corresponding to start sequence number */
+ txq->read_ptr = wr_ptr;
+ txq->write_ptr = wr_ptr;
+
+ IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d\n", qid);
+
+ iwl_free_resp(hcmd);
+ return qid;
+
+error_free_resp:
+ iwl_free_resp(hcmd);
+ iwl_txq_gen2_free_memory(trans, txq);
+ return ret;
+}
+
+int iwl_txq_dyn_alloc(struct iwl_trans *trans, u32 flags, u32 sta_mask,
+ u8 tid, int size, unsigned int timeout)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_txq *txq;
+ union {
+ struct iwl_tx_queue_cfg_cmd old;
+ struct iwl_scd_queue_cfg_cmd new;
+ } cmd;
+ struct iwl_host_cmd hcmd = {
+ .flags = CMD_WANT_SKB,
+ };
+ int ret;
+
+ /* take the min with bytecount table entries allowed */
+ size = min_t(u32, size, trans_pcie->txqs.bc_tbl_size / sizeof(u16));
+ /* but must be power of 2 values for calculating read/write pointers */
+ size = rounddown_pow_of_two(size);
+
+ if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_BZ &&
+ trans->hw_rev_step == SILICON_A_STEP) {
+ size = 4096;
+ txq = iwl_txq_dyn_alloc_dma(trans, size, timeout);
+ } else {
+ do {
+ txq = iwl_txq_dyn_alloc_dma(trans, size, timeout);
+ if (!IS_ERR(txq))
+ break;
+
+ IWL_DEBUG_TX_QUEUES(trans,
+ "Failed allocating TXQ of size %d for sta mask %x tid %d, ret: %ld\n",
+ size, sta_mask, tid,
+ PTR_ERR(txq));
+ size /= 2;
+ } while (size >= 16);
+ }
+
+ if (IS_ERR(txq))
+ return PTR_ERR(txq);
+
+ if (trans_pcie->txqs.queue_alloc_cmd_ver == 0) {
+ memset(&cmd.old, 0, sizeof(cmd.old));
+ cmd.old.tfdq_addr = cpu_to_le64(txq->dma_addr);
+ cmd.old.byte_cnt_addr = cpu_to_le64(txq->bc_tbl.dma);
+ cmd.old.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size));
+ cmd.old.flags = cpu_to_le16(flags | TX_QUEUE_CFG_ENABLE_QUEUE);
+ cmd.old.tid = tid;
+
+ if (hweight32(sta_mask) != 1) {
+ ret = -EINVAL;
+ goto error;
+ }
+ cmd.old.sta_id = ffs(sta_mask) - 1;
+
+ hcmd.id = SCD_QUEUE_CFG;
+ hcmd.len[0] = sizeof(cmd.old);
+ hcmd.data[0] = &cmd.old;
+ } else if (trans_pcie->txqs.queue_alloc_cmd_ver == 3) {
+ memset(&cmd.new, 0, sizeof(cmd.new));
+ cmd.new.operation = cpu_to_le32(IWL_SCD_QUEUE_ADD);
+ cmd.new.u.add.tfdq_dram_addr = cpu_to_le64(txq->dma_addr);
+ cmd.new.u.add.bc_dram_addr = cpu_to_le64(txq->bc_tbl.dma);
+ cmd.new.u.add.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size));
+ cmd.new.u.add.flags = cpu_to_le32(flags);
+ cmd.new.u.add.sta_mask = cpu_to_le32(sta_mask);
+ cmd.new.u.add.tid = tid;
+
+ hcmd.id = WIDE_ID(DATA_PATH_GROUP, SCD_QUEUE_CONFIG_CMD);
+ hcmd.len[0] = sizeof(cmd.new);
+ hcmd.data[0] = &cmd.new;
+ } else {
+ ret = -EOPNOTSUPP;
+ goto error;
+ }
+
+ ret = iwl_trans_send_cmd(trans, &hcmd);
+ if (ret)
+ goto error;
+
+ return iwl_pcie_txq_alloc_response(trans, txq, &hcmd);
+
+error:
+ iwl_txq_gen2_free_memory(trans, txq);
+ return ret;
+}
+
+void iwl_txq_dyn_free(struct iwl_trans *trans, int queue)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ if (WARN(queue >= IWL_MAX_TVQM_QUEUES,
+ "queue %d out of range", queue))
+ return;
+
+ /*
+ * Upon HW Rfkill - we stop the device, and then stop the queues
+ * in the op_mode. Just for the sake of the simplicity of the op_mode,
+ * allow the op_mode to call txq_disable after it already called
+ * stop_device.
+ */
+ if (!test_and_clear_bit(queue, trans_pcie->txqs.queue_used)) {
+ WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status),
+ "queue %d not used", queue);
+ return;
+ }
+
+ iwl_txq_gen2_free(trans, queue);
+
+ IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", queue);
+}
+
+void iwl_txq_gen2_tx_free(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int i;
+
+ memset(trans_pcie->txqs.queue_used, 0,
+ sizeof(trans_pcie->txqs.queue_used));
+
+ /* Free all TX queues */
+ for (i = 0; i < ARRAY_SIZE(trans_pcie->txqs.txq); i++) {
+ if (!trans_pcie->txqs.txq[i])
+ continue;
+
+ iwl_txq_gen2_free(trans, i);
+ }
+}
+
+int iwl_txq_gen2_init(struct iwl_trans *trans, int txq_id, int queue_size)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_txq *queue;
+ int ret;
+
+ /* alloc and init the tx queue */
+ if (!trans_pcie->txqs.txq[txq_id]) {
+ queue = kzalloc(sizeof(*queue), GFP_KERNEL);
+ if (!queue) {
+ IWL_ERR(trans, "Not enough memory for tx queue\n");
+ return -ENOMEM;
+ }
+ trans_pcie->txqs.txq[txq_id] = queue;
+ ret = iwl_pcie_txq_alloc(trans, queue, queue_size, true);
+ if (ret) {
+ IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
+ goto error;
+ }
+ } else {
+ queue = trans_pcie->txqs.txq[txq_id];
+ }
+
+ ret = iwl_txq_init(trans, queue, queue_size,
+ (txq_id == trans_pcie->txqs.cmd.q_id));
+ if (ret) {
+ IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
+ goto error;
+ }
+ trans_pcie->txqs.txq[txq_id]->id = txq_id;
+ set_bit(txq_id, trans_pcie->txqs.queue_used);
+
+ return 0;
+
+error:
+ iwl_txq_gen2_tx_free(trans);
+ return ret;
+}
/*************** HOST COMMAND QUEUE FUNCTIONS *****/
@@ -28,7 +1201,7 @@ int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
struct iwl_host_cmd *cmd)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
+ struct iwl_txq *txq = trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id];
struct iwl_device_cmd *out_cmd;
struct iwl_cmd_meta *out_meta;
void *dup_buf = NULL;
@@ -130,7 +1303,7 @@ int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
out_cmd = txq->entries[idx].cmd;
out_meta = &txq->entries[idx].meta;
- /* re-initialize to NULL */
+ /* re-initialize, this also marks the SG list as unused */
memset(out_meta, 0, sizeof(*out_meta));
if (cmd->flags & CMD_WANT_SKB)
out_meta->source = cmd;
@@ -143,7 +1316,7 @@ int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
cpu_to_le16(cmd_size - sizeof(struct iwl_cmd_header_wide));
out_cmd->hdr_wide.reserved = 0;
out_cmd->hdr_wide.sequence =
- cpu_to_le16(QUEUE_TO_SEQ(trans->txqs.cmd.q_id) |
+ cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->txqs.cmd.q_id) |
INDEX_TO_SEQ(txq->write_ptr));
cmd_pos = sizeof(struct iwl_cmd_header_wide);
@@ -191,7 +1364,7 @@ int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
"Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
iwl_get_cmd_string(trans, cmd->id), group_id,
out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
- cmd_size, txq->write_ptr, idx, trans->txqs.cmd.q_id);
+ cmd_size, txq->write_ptr, idx, trans_pcie->txqs.cmd.q_id);
/* start the TFD with the minimum copy bytes */
tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index fa8eba47dc4c..22d482ae53d9 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -1,16 +1,22 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2003-2014, 2018-2021, 2023 Intel Corporation
+ * Copyright (C) 2003-2014, 2018-2021, 2023-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
#include <linux/etherdevice.h>
#include <linux/ieee80211.h>
+#include <linux/dmapool.h>
#include <linux/slab.h>
#include <linux/sched.h>
+#include <linux/tcp.h>
#include <net/ip6_checksum.h>
#include <net/tso.h>
+#include "fw/api/commands.h"
+#include "fw/api/datapath.h"
+#include "fw/api/debug.h"
+#include "iwl-fh.h"
#include "iwl-debug.h"
#include "iwl-csr.h"
#include "iwl-prph.h"
@@ -72,6 +78,7 @@ void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr)
static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans,
struct iwl_txq *txq)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 reg = 0;
int txq_id = txq->id;
@@ -84,7 +91,7 @@ static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans,
* 3. there is a chance that the NIC is asleep
*/
if (!trans->trans_cfg->base_params->shadow_reg_enable &&
- txq_id != trans->txqs.cmd.q_id &&
+ txq_id != trans_pcie->txqs.cmd.q_id &&
test_bit(STATUS_TPOWER_PMI, &trans->status)) {
/*
* wake up nic if it's powered down ...
@@ -115,12 +122,13 @@ static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans,
void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int i;
for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) {
- struct iwl_txq *txq = trans->txqs.txq[i];
+ struct iwl_txq *txq = trans_pcie->txqs.txq[i];
- if (!test_bit(i, trans->txqs.queue_used))
+ if (!test_bit(i, trans_pcie->txqs.queue_used))
continue;
spin_lock_bh(&txq->lock);
@@ -132,23 +140,43 @@ void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans)
}
}
+static inline void iwl_pcie_gen1_tfd_set_tb(struct iwl_tfd *tfd,
+ u8 idx, dma_addr_t addr, u16 len)
+{
+ struct iwl_tfd_tb *tb = &tfd->tbs[idx];
+ u16 hi_n_len = len << 4;
+
+ put_unaligned_le32(addr, &tb->lo);
+ hi_n_len |= iwl_get_dma_hi_addr(addr);
+
+ tb->hi_n_len = cpu_to_le16(hi_n_len);
+
+ tfd->num_tbs = idx + 1;
+}
+
+static inline u8 iwl_txq_gen1_tfd_get_num_tbs(struct iwl_tfd *tfd)
+{
+ return tfd->num_tbs & 0x1f;
+}
+
static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
dma_addr_t addr, u16 len, bool reset)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
void *tfd;
u32 num_tbs;
- tfd = (u8 *)txq->tfds + trans->txqs.tfd.size * txq->write_ptr;
+ tfd = (u8 *)txq->tfds + trans_pcie->txqs.tfd.size * txq->write_ptr;
if (reset)
- memset(tfd, 0, trans->txqs.tfd.size);
+ memset(tfd, 0, trans_pcie->txqs.tfd.size);
- num_tbs = iwl_txq_gen1_tfd_get_num_tbs(trans, tfd);
+ num_tbs = iwl_txq_gen1_tfd_get_num_tbs(tfd);
/* Each TFD can point to a maximum max_tbs Tx buffers */
- if (num_tbs >= trans->txqs.tfd.max_tbs) {
+ if (num_tbs >= trans_pcie->txqs.tfd.max_tbs) {
IWL_ERR(trans, "Error can not send more than %d chunks\n",
- trans->txqs.tfd.max_tbs);
+ trans_pcie->txqs.tfd.max_tbs);
return -EINVAL;
}
@@ -156,7 +184,7 @@ static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
"Unaligned address = %llx\n", (unsigned long long)addr))
return -EINVAL;
- iwl_pcie_gen1_tfd_set_tb(trans, tfd, num_tbs, addr, len);
+ iwl_pcie_gen1_tfd_set_tb(tfd, num_tbs, addr, len);
return num_tbs;
}
@@ -181,36 +209,206 @@ static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans)
spin_unlock(&trans_pcie->reg_lock);
}
+static void iwl_pcie_free_and_unmap_tso_page(struct iwl_trans *trans,
+ struct page *page)
+{
+ struct iwl_tso_page_info *info = IWL_TSO_PAGE_INFO(page_address(page));
+
+ /* Decrease internal use count and unmap/free page if needed */
+ if (refcount_dec_and_test(&info->use_count)) {
+ dma_unmap_page(trans->dev, info->dma_addr, PAGE_SIZE,
+ DMA_TO_DEVICE);
+
+ __free_page(page);
+ }
+}
+
+void iwl_pcie_free_tso_pages(struct iwl_trans *trans, struct sk_buff *skb,
+ struct iwl_cmd_meta *cmd_meta)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct page **page_ptr;
+ struct page *next;
+
+ page_ptr = (void *)((u8 *)skb->cb + trans_pcie->txqs.page_offs);
+ next = *page_ptr;
+ *page_ptr = NULL;
+
+ while (next) {
+ struct iwl_tso_page_info *info;
+ struct page *tmp = next;
+
+ info = IWL_TSO_PAGE_INFO(page_address(next));
+ next = info->next;
+
+ /* Unmap the scatter gather list that is on the last page */
+ if (!next && cmd_meta->sg_offset) {
+ struct sg_table *sgt;
+
+ sgt = (void *)((u8 *)page_address(tmp) +
+ cmd_meta->sg_offset);
+
+ dma_unmap_sgtable(trans->dev, sgt, DMA_TO_DEVICE, 0);
+ }
+
+ iwl_pcie_free_and_unmap_tso_page(trans, tmp);
+ }
+}
+
+static inline dma_addr_t
+iwl_txq_gen1_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
+{
+ struct iwl_tfd_tb *tb = &tfd->tbs[idx];
+ dma_addr_t addr;
+ dma_addr_t hi_len;
+
+ addr = get_unaligned_le32(&tb->lo);
+
+ if (sizeof(dma_addr_t) <= sizeof(u32))
+ return addr;
+
+ hi_len = le16_to_cpu(tb->hi_n_len) & 0xF;
+
+ /*
+ * shift by 16 twice to avoid warnings on 32-bit
+ * (where this code never runs anyway due to the
+ * if statement above)
+ */
+ return addr | ((hi_len << 16) << 16);
+}
+
+static void iwl_txq_set_tfd_invalid_gen1(struct iwl_trans *trans,
+ struct iwl_tfd *tfd)
+{
+ tfd->num_tbs = 0;
+
+ iwl_pcie_gen1_tfd_set_tb(tfd, 0, trans->invalid_tx_cmd.dma,
+ trans->invalid_tx_cmd.size);
+}
+
+static void iwl_txq_gen1_tfd_unmap(struct iwl_trans *trans,
+ struct iwl_cmd_meta *meta,
+ struct iwl_txq *txq, int index)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int i, num_tbs;
+ struct iwl_tfd *tfd = iwl_txq_get_tfd(trans, txq, index);
+
+ /* Sanity check on number of chunks */
+ num_tbs = iwl_txq_gen1_tfd_get_num_tbs(tfd);
+
+ if (num_tbs > trans_pcie->txqs.tfd.max_tbs) {
+ IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
+ /* @todo issue fatal error, it is quite serious situation */
+ return;
+ }
+
+ /* TB1 is mapped directly, the rest is the TSO page and SG list. */
+ if (meta->sg_offset)
+ num_tbs = 2;
+
+ /* first TB is never freed - it's the bidirectional DMA data */
+
+ for (i = 1; i < num_tbs; i++) {
+ if (meta->tbs & BIT(i))
+ dma_unmap_page(trans->dev,
+ iwl_txq_gen1_tfd_tb_get_addr(tfd, i),
+ iwl_txq_gen1_tfd_tb_get_len(trans,
+ tfd, i),
+ DMA_TO_DEVICE);
+ else
+ dma_unmap_single(trans->dev,
+ iwl_txq_gen1_tfd_tb_get_addr(tfd, i),
+ iwl_txq_gen1_tfd_tb_get_len(trans,
+ tfd, i),
+ DMA_TO_DEVICE);
+ }
+
+ meta->tbs = 0;
+
+ iwl_txq_set_tfd_invalid_gen1(trans, tfd);
+}
+
+/**
+ * iwl_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
+ * @trans: transport private data
+ * @txq: tx queue
+ * @read_ptr: the TXQ read_ptr to free
+ *
+ * Does NOT advance any TFD circular buffer read/write indexes
+ * Does NOT free the TFD itself (which is within circular buffer)
+ */
+static void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
+ int read_ptr)
+{
+ /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
+ * idx is bounded by n_window
+ */
+ int idx = iwl_txq_get_cmd_index(txq, read_ptr);
+ struct sk_buff *skb;
+
+ lockdep_assert_held(&txq->reclaim_lock);
+
+ if (!txq->entries)
+ return;
+
+ /* We have only q->n_window txq->entries, but we use
+ * TFD_QUEUE_SIZE_MAX tfds
+ */
+ if (trans->trans_cfg->gen2)
+ iwl_txq_gen2_tfd_unmap(trans, &txq->entries[idx].meta,
+ iwl_txq_get_tfd(trans, txq, read_ptr));
+ else
+ iwl_txq_gen1_tfd_unmap(trans, &txq->entries[idx].meta,
+ txq, read_ptr);
+
+ /* free SKB */
+ skb = txq->entries[idx].skb;
+
+ /* Can be called from irqs-disabled context
+ * If skb is not NULL, it means that the whole queue is being
+ * freed and that the queue is not empty - free the skb
+ */
+ if (skb) {
+ iwl_op_mode_free_skb(trans->op_mode, skb);
+ txq->entries[idx].skb = NULL;
+ }
+}
+
/*
* iwl_pcie_txq_unmap - Unmap any remaining DMA mappings and free skb's
*/
static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
{
- struct iwl_txq *txq = trans->txqs.txq[txq_id];
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id];
if (!txq) {
IWL_ERR(trans, "Trying to free a queue that wasn't allocated?\n");
return;
}
- spin_lock_bh(&txq->lock);
+ spin_lock_bh(&txq->reclaim_lock);
+ spin_lock(&txq->lock);
while (txq->write_ptr != txq->read_ptr) {
IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
txq_id, txq->read_ptr);
- if (txq_id != trans->txqs.cmd.q_id) {
+ if (txq_id != trans_pcie->txqs.cmd.q_id) {
struct sk_buff *skb = txq->entries[txq->read_ptr].skb;
+ struct iwl_cmd_meta *cmd_meta =
+ &txq->entries[txq->read_ptr].meta;
if (WARN_ON_ONCE(!skb))
continue;
- iwl_txq_free_tso_page(trans, skb);
+ iwl_pcie_free_tso_pages(trans, skb, cmd_meta);
}
- iwl_txq_free_tfd(trans, txq);
+ iwl_txq_free_tfd(trans, txq, txq->read_ptr);
txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr);
if (txq->read_ptr == txq->write_ptr &&
- txq_id == trans->txqs.cmd.q_id)
+ txq_id == trans_pcie->txqs.cmd.q_id)
iwl_pcie_clear_cmd_in_flight(trans);
}
@@ -220,10 +418,11 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
iwl_op_mode_free_skb(trans->op_mode, skb);
}
- spin_unlock_bh(&txq->lock);
+ spin_unlock(&txq->lock);
+ spin_unlock_bh(&txq->reclaim_lock);
/* just in case - this queue may have been stopped */
- iwl_wake_queue(trans, txq);
+ iwl_trans_pcie_wake_queue(trans, txq);
}
/*
@@ -236,7 +435,8 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
*/
static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
{
- struct iwl_txq *txq = trans->txqs.txq[txq_id];
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id];
struct device *dev = trans->dev;
int i;
@@ -246,7 +446,7 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
iwl_pcie_txq_unmap(trans, txq_id);
/* De-alloc array of command/tx buffers */
- if (txq_id == trans->txqs.cmd.q_id)
+ if (txq_id == trans_pcie->txqs.cmd.q_id)
for (i = 0; i < txq->n_window; i++) {
kfree_sensitive(txq->entries[i].cmd);
kfree_sensitive(txq->entries[i].free_buf);
@@ -255,7 +455,7 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
/* De-alloc circular buffer of TFDs */
if (txq->tfds) {
dma_free_coherent(dev,
- trans->txqs.tfd.size *
+ trans_pcie->txqs.tfd.size *
trans->trans_cfg->base_params->max_tfd_queue_size,
txq->tfds, txq->dma_addr);
txq->dma_addr = 0;
@@ -285,9 +485,10 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(u32);
/* make sure all queue are not stopped/used */
- memset(trans->txqs.queue_stopped, 0,
- sizeof(trans->txqs.queue_stopped));
- memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
+ memset(trans_pcie->txqs.queue_stopped, 0,
+ sizeof(trans_pcie->txqs.queue_stopped));
+ memset(trans_pcie->txqs.queue_used, 0,
+ sizeof(trans_pcie->txqs.queue_used));
trans_pcie->scd_base_addr =
iwl_read_prph(trans, SCD_SRAM_BASE_ADDR);
@@ -301,7 +502,7 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
NULL, clear_dwords);
iwl_write_prph(trans, SCD_DRAM_BASE_ADDR,
- trans->txqs.scd_bc_tbls.dma >> 10);
+ trans_pcie->txqs.scd_bc_tbls.dma >> 10);
/* The chain extension of the SCD doesn't work well. This feature is
* enabled by default by the HW, so we need to disable it manually.
@@ -309,9 +510,9 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
if (trans->trans_cfg->base_params->scd_chain_ext_wa)
iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);
- iwl_trans_ac_txq_enable(trans, trans->txqs.cmd.q_id,
- trans->txqs.cmd.fifo,
- trans->txqs.cmd.wdg_timeout);
+ iwl_trans_ac_txq_enable(trans, trans_pcie->txqs.cmd.q_id,
+ trans_pcie->txqs.cmd.fifo,
+ trans_pcie->txqs.cmd.wdg_timeout);
/* Activate all Tx DMA/FIFO channels */
iwl_scd_activate_fifos(trans);
@@ -347,7 +548,7 @@ void iwl_trans_pcie_tx_reset(struct iwl_trans *trans)
for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues;
txq_id++) {
- struct iwl_txq *txq = trans->txqs.txq[txq_id];
+ struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id];
if (trans->trans_cfg->gen2)
iwl_write_direct64(trans,
FH_MEM_CBBC_QUEUE(trans, txq_id),
@@ -422,9 +623,10 @@ int iwl_pcie_tx_stop(struct iwl_trans *trans)
* queues. This happens when we have an rfkill interrupt.
* Since we stop Tx altogether - mark the queues as stopped.
*/
- memset(trans->txqs.queue_stopped, 0,
- sizeof(trans->txqs.queue_stopped));
- memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
+ memset(trans_pcie->txqs.queue_stopped, 0,
+ sizeof(trans_pcie->txqs.queue_stopped));
+ memset(trans_pcie->txqs.queue_used, 0,
+ sizeof(trans_pcie->txqs.queue_used));
/* This can happen: start_hw, stop_device */
if (!trans_pcie->txq_memory)
@@ -448,7 +650,8 @@ void iwl_pcie_tx_free(struct iwl_trans *trans)
int txq_id;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
+ memset(trans_pcie->txqs.queue_used, 0,
+ sizeof(trans_pcie->txqs.queue_used));
/* Tx queues */
if (trans_pcie->txq_memory) {
@@ -456,7 +659,7 @@ void iwl_pcie_tx_free(struct iwl_trans *trans)
txq_id < trans->trans_cfg->base_params->num_of_queues;
txq_id++) {
iwl_pcie_txq_free(trans, txq_id);
- trans->txqs.txq[txq_id] = NULL;
+ trans_pcie->txqs.txq[txq_id] = NULL;
}
}
@@ -465,7 +668,135 @@ void iwl_pcie_tx_free(struct iwl_trans *trans)
iwl_pcie_free_dma_ptr(trans, &trans_pcie->kw);
- iwl_pcie_free_dma_ptr(trans, &trans->txqs.scd_bc_tbls);
+ iwl_pcie_free_dma_ptr(trans, &trans_pcie->txqs.scd_bc_tbls);
+}
+
+void iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq)
+{
+ u32 txq_id = txq->id;
+ u32 status;
+ bool active;
+ u8 fifo;
+
+ if (trans->trans_cfg->gen2) {
+ IWL_ERR(trans, "Queue %d is stuck %d %d\n", txq_id,
+ txq->read_ptr, txq->write_ptr);
+ /* TODO: access new SCD registers and dump them */
+ return;
+ }
+
+ status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id));
+ fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
+ active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
+
+ IWL_ERR(trans,
+ "Queue %d is %sactive on fifo %d and stuck for %u ms. SW [%d, %d] HW [%d, %d] FH TRB=0x0%x\n",
+ txq_id, active ? "" : "in", fifo,
+ jiffies_to_msecs(txq->wd_timeout),
+ txq->read_ptr, txq->write_ptr,
+ iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) &
+ (trans->trans_cfg->base_params->max_tfd_queue_size - 1),
+ iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id)) &
+ (trans->trans_cfg->base_params->max_tfd_queue_size - 1),
+ iwl_read_direct32(trans, FH_TX_TRB_REG(fifo)));
+}
+
+static void iwl_txq_stuck_timer(struct timer_list *t)
+{
+ struct iwl_txq *txq = from_timer(txq, t, stuck_timer);
+ struct iwl_trans *trans = txq->trans;
+
+ spin_lock(&txq->lock);
+ /* check if triggered erroneously */
+ if (txq->read_ptr == txq->write_ptr) {
+ spin_unlock(&txq->lock);
+ return;
+ }
+ spin_unlock(&txq->lock);
+
+ iwl_txq_log_scd_error(trans, txq);
+
+ iwl_force_nmi(trans);
+}
+
+int iwl_pcie_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq,
+ int slots_num, bool cmd_queue)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ size_t num_entries = trans->trans_cfg->gen2 ?
+ slots_num : trans->trans_cfg->base_params->max_tfd_queue_size;
+ size_t tfd_sz;
+ size_t tb0_buf_sz;
+ int i;
+
+ if (WARN_ONCE(slots_num <= 0, "Invalid slots num:%d\n", slots_num))
+ return -EINVAL;
+
+ if (WARN_ON(txq->entries || txq->tfds))
+ return -EINVAL;
+
+ tfd_sz = trans_pcie->txqs.tfd.size * num_entries;
+
+ timer_setup(&txq->stuck_timer, iwl_txq_stuck_timer, 0);
+ txq->trans = trans;
+
+ txq->n_window = slots_num;
+
+ txq->entries = kcalloc(slots_num,
+ sizeof(struct iwl_pcie_txq_entry),
+ GFP_KERNEL);
+
+ if (!txq->entries)
+ goto error;
+
+ if (cmd_queue)
+ for (i = 0; i < slots_num; i++) {
+ txq->entries[i].cmd =
+ kmalloc(sizeof(struct iwl_device_cmd),
+ GFP_KERNEL);
+ if (!txq->entries[i].cmd)
+ goto error;
+ }
+
+ /* Circular buffer of transmit frame descriptors (TFDs),
+ * shared with device
+ */
+ txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz,
+ &txq->dma_addr, GFP_KERNEL);
+ if (!txq->tfds)
+ goto error;
+
+ BUILD_BUG_ON(sizeof(*txq->first_tb_bufs) != IWL_FIRST_TB_SIZE_ALIGN);
+
+ tb0_buf_sz = sizeof(*txq->first_tb_bufs) * slots_num;
+
+ txq->first_tb_bufs = dma_alloc_coherent(trans->dev, tb0_buf_sz,
+ &txq->first_tb_dma,
+ GFP_KERNEL);
+ if (!txq->first_tb_bufs)
+ goto err_free_tfds;
+
+ for (i = 0; i < num_entries; i++) {
+ void *tfd = iwl_txq_get_tfd(trans, txq, i);
+
+ if (trans->trans_cfg->gen2)
+ iwl_txq_set_tfd_invalid_gen2(trans, tfd);
+ else
+ iwl_txq_set_tfd_invalid_gen1(trans, tfd);
+ }
+
+ return 0;
+err_free_tfds:
+ dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->dma_addr);
+ txq->tfds = NULL;
+error:
+ if (txq->entries && cmd_queue)
+ for (i = 0; i < slots_num; i++)
+ kfree(txq->entries[i].cmd);
+ kfree(txq->entries);
+ txq->entries = NULL;
+
+ return -ENOMEM;
}
/*
@@ -491,7 +822,7 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
goto error;
}
- ret = iwl_pcie_alloc_dma_ptr(trans, &trans->txqs.scd_bc_tbls,
+ ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->txqs.scd_bc_tbls,
bc_tbls_size);
if (ret) {
IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
@@ -517,7 +848,7 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
/* Alloc and init all Tx queues, including the command queue (#4/#9) */
for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues;
txq_id++) {
- bool cmd_queue = (txq_id == trans->txqs.cmd.q_id);
+ bool cmd_queue = (txq_id == trans_pcie->txqs.cmd.q_id);
if (cmd_queue)
slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE,
@@ -525,14 +856,14 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
else
slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE,
trans->cfg->min_ba_txq_size);
- trans->txqs.txq[txq_id] = &trans_pcie->txq_memory[txq_id];
- ret = iwl_txq_alloc(trans, trans->txqs.txq[txq_id], slots_num,
- cmd_queue);
+ trans_pcie->txqs.txq[txq_id] = &trans_pcie->txq_memory[txq_id];
+ ret = iwl_pcie_txq_alloc(trans, trans_pcie->txqs.txq[txq_id],
+ slots_num, cmd_queue);
if (ret) {
IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
goto error;
}
- trans->txqs.txq[txq_id]->id = txq_id;
+ trans_pcie->txqs.txq[txq_id]->id = txq_id;
}
return 0;
@@ -543,6 +874,69 @@ error:
return ret;
}
+/*
+ * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
+ */
+static int iwl_queue_init(struct iwl_txq *q, int slots_num)
+{
+ q->n_window = slots_num;
+
+ /* slots_num must be power-of-two size, otherwise
+ * iwl_txq_get_cmd_index is broken.
+ */
+ if (WARN_ON(!is_power_of_2(slots_num)))
+ return -EINVAL;
+
+ q->low_mark = q->n_window / 4;
+ if (q->low_mark < 4)
+ q->low_mark = 4;
+
+ q->high_mark = q->n_window / 8;
+ if (q->high_mark < 2)
+ q->high_mark = 2;
+
+ q->write_ptr = 0;
+ q->read_ptr = 0;
+
+ return 0;
+}
+
+int iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
+ int slots_num, bool cmd_queue)
+{
+ u32 tfd_queue_max_size =
+ trans->trans_cfg->base_params->max_tfd_queue_size;
+ int ret;
+
+ txq->need_update = false;
+
+ /* max_tfd_queue_size must be power-of-two size, otherwise
+ * iwl_txq_inc_wrap and iwl_txq_dec_wrap are broken.
+ */
+ if (WARN_ONCE(tfd_queue_max_size & (tfd_queue_max_size - 1),
+ "Max tfd queue size must be a power of two, but is %d",
+ tfd_queue_max_size))
+ return -EINVAL;
+
+ /* Initialize queue's high/low-water marks, and head/tail indexes */
+ ret = iwl_queue_init(txq, slots_num);
+ if (ret)
+ return ret;
+
+ spin_lock_init(&txq->lock);
+ spin_lock_init(&txq->reclaim_lock);
+
+ if (cmd_queue) {
+ static struct lock_class_key iwl_txq_cmd_queue_lock_class;
+
+ lockdep_set_class(&txq->lock, &iwl_txq_cmd_queue_lock_class);
+ }
+
+ __skb_queue_head_init(&txq->overflow_q);
+
+ return 0;
+}
+
int iwl_pcie_tx_init(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -571,7 +965,7 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
/* Alloc and init all Tx queues, including the command queue (#4/#9) */
for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues;
txq_id++) {
- bool cmd_queue = (txq_id == trans->txqs.cmd.q_id);
+ bool cmd_queue = (txq_id == trans_pcie->txqs.cmd.q_id);
if (cmd_queue)
slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE,
@@ -579,7 +973,7 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
else
slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE,
trans->cfg->min_ba_txq_size);
- ret = iwl_txq_init(trans, trans->txqs.txq[txq_id], slots_num,
+ ret = iwl_txq_init(trans, trans_pcie->txqs.txq[txq_id], slots_num,
cmd_queue);
if (ret) {
IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
@@ -593,7 +987,7 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
* Circular buffer (TFD queue in DRAM) physical base address
*/
iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(trans, txq_id),
- trans->txqs.txq[txq_id]->dma_addr >> 8);
+ trans_pcie->txqs.txq[txq_id]->dma_addr >> 8);
}
iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE);
@@ -641,6 +1035,42 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans,
return 0;
}
+static void iwl_txq_progress(struct iwl_txq *txq)
+{
+ lockdep_assert_held(&txq->lock);
+
+ if (!txq->wd_timeout)
+ return;
+
+ /*
+ * station is asleep and we send data - that must
+ * be uAPSD or PS-Poll. Don't rearm the timer.
+ */
+ if (txq->frozen)
+ return;
+
+ /*
+ * if empty delete timer, otherwise move timer forward
+ * since we're making progress on this queue
+ */
+ if (txq->read_ptr == txq->write_ptr)
+ del_timer(&txq->stuck_timer);
+ else
+ mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
+}
+
+static inline bool iwl_txq_used(const struct iwl_txq *q, int i,
+ int read_ptr, int write_ptr)
+{
+ int index = iwl_txq_get_cmd_index(q, i);
+ int r = iwl_txq_get_cmd_index(q, read_ptr);
+ int w = iwl_txq_get_cmd_index(q, write_ptr);
+
+ return w >= r ?
+ (index >= r && index < w) :
+ !(index < r && index >= w);
+}
+
/*
* iwl_pcie_cmdq_reclaim - Reclaim TX command queue entries already Tx'd
*
@@ -650,7 +1080,8 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans,
*/
static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
{
- struct iwl_txq *txq = trans->txqs.txq[txq_id];
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id];
int nfreed = 0;
u16 r;
@@ -660,8 +1091,8 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
r = iwl_txq_get_cmd_index(txq, txq->read_ptr);
if (idx >= trans->trans_cfg->base_params->max_tfd_queue_size ||
- (!iwl_txq_used(txq, idx))) {
- WARN_ONCE(test_bit(txq_id, trans->txqs.queue_used),
+ (!iwl_txq_used(txq, idx, txq->read_ptr, txq->write_ptr))) {
+ WARN_ONCE(test_bit(txq_id, trans_pcie->txqs.queue_used),
"%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n",
__func__, txq_id, idx,
trans->trans_cfg->base_params->max_tfd_queue_size,
@@ -720,11 +1151,11 @@ bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
unsigned int wdg_timeout)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = trans->txqs.txq[txq_id];
+ struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id];
int fifo = -1;
bool scd_bug = false;
- if (test_and_set_bit(txq_id, trans->txqs.queue_used))
+ if (test_and_set_bit(txq_id, trans_pcie->txqs.queue_used))
WARN_ONCE(1, "queue %d already used - expect issues", txq_id);
txq->wd_timeout = msecs_to_jiffies(wdg_timeout);
@@ -733,7 +1164,7 @@ bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
fifo = cfg->fifo;
/* Disable the scheduler prior configuring the cmd queue */
- if (txq_id == trans->txqs.cmd.q_id &&
+ if (txq_id == trans_pcie->txqs.cmd.q_id &&
trans_pcie->scd_set_active)
iwl_scd_enable_set_active(trans, 0);
@@ -741,7 +1172,7 @@ bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
iwl_scd_txq_set_inactive(trans, txq_id);
/* Set this queue as a chain-building queue unless it is CMD */
- if (txq_id != trans->txqs.cmd.q_id)
+ if (txq_id != trans_pcie->txqs.cmd.q_id)
iwl_scd_txq_set_chain(trans, txq_id);
if (cfg->aggregate) {
@@ -811,7 +1242,7 @@ bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
SCD_QUEUE_STTS_REG_MSK);
/* enable the scheduler for this queue (only) */
- if (txq_id == trans->txqs.cmd.q_id &&
+ if (txq_id == trans_pcie->txqs.cmd.q_id &&
trans_pcie->scd_set_active)
iwl_scd_enable_set_active(trans, BIT(txq_id));
@@ -830,7 +1261,8 @@ bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,
bool shared_mode)
{
- struct iwl_txq *txq = trans->txqs.txq[txq_id];
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id];
txq->ampdu = !shared_mode;
}
@@ -843,8 +1275,8 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
SCD_TX_STTS_QUEUE_OFFSET(txq_id);
static const u32 zero_val[4] = {};
- trans->txqs.txq[txq_id]->frozen_expiry_remainder = 0;
- trans->txqs.txq[txq_id]->frozen = false;
+ trans_pcie->txqs.txq[txq_id]->frozen_expiry_remainder = 0;
+ trans_pcie->txqs.txq[txq_id]->frozen = false;
/*
* Upon HW Rfkill - we stop the device, and then stop the queues
@@ -852,7 +1284,7 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
* allow the op_mode to call txq_disable after it already called
* stop_device.
*/
- if (!test_and_clear_bit(txq_id, trans->txqs.queue_used)) {
+ if (!test_and_clear_bit(txq_id, trans_pcie->txqs.queue_used)) {
WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status),
"queue %d not used", txq_id);
return;
@@ -866,7 +1298,7 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
}
iwl_pcie_txq_unmap(trans, txq_id);
- trans->txqs.txq[txq_id]->ampdu = false;
+ trans_pcie->txqs.txq[txq_id]->ampdu = false;
IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id);
}
@@ -875,12 +1307,13 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int i;
for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) {
- struct iwl_txq *txq = trans->txqs.txq[i];
+ struct iwl_txq *txq = trans_pcie->txqs.txq[i];
- if (i == trans->txqs.cmd.q_id)
+ if (i == trans_pcie->txqs.cmd.q_id)
continue;
/* we skip the command queue (obviously) so it's OK to nest */
@@ -912,7 +1345,8 @@ static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block)
int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
struct iwl_host_cmd *cmd)
{
- struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_txq *txq = trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id];
struct iwl_device_cmd *out_cmd;
struct iwl_cmd_meta *out_meta;
void *dup_buf = NULL;
@@ -1024,7 +1458,8 @@ int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
out_cmd = txq->entries[idx].cmd;
out_meta = &txq->entries[idx].meta;
- memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */
+ /* re-initialize, this also marks the SG list as unused */
+ memset(out_meta, 0, sizeof(*out_meta));
if (cmd->flags & CMD_WANT_SKB)
out_meta->source = cmd;
@@ -1038,7 +1473,7 @@ int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
sizeof(struct iwl_cmd_header_wide));
out_cmd->hdr_wide.reserved = 0;
out_cmd->hdr_wide.sequence =
- cpu_to_le16(QUEUE_TO_SEQ(trans->txqs.cmd.q_id) |
+ cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->txqs.cmd.q_id) |
INDEX_TO_SEQ(txq->write_ptr));
cmd_pos = sizeof(struct iwl_cmd_header_wide);
@@ -1046,7 +1481,7 @@ int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
} else {
out_cmd->hdr.cmd = iwl_cmd_opcode(cmd->id);
out_cmd->hdr.sequence =
- cpu_to_le16(QUEUE_TO_SEQ(trans->txqs.cmd.q_id) |
+ cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->txqs.cmd.q_id) |
INDEX_TO_SEQ(txq->write_ptr));
out_cmd->hdr.group_id = 0;
@@ -1097,7 +1532,7 @@ int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
iwl_get_cmd_string(trans, cmd->id),
group_id, out_cmd->hdr.cmd,
le16_to_cpu(out_cmd->hdr.sequence),
- cmd_size, txq->write_ptr, idx, trans->txqs.cmd.q_id);
+ cmd_size, txq->write_ptr, idx, trans_pcie->txqs.cmd.q_id);
/* start the TFD with the minimum copy bytes */
tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE);
@@ -1196,14 +1631,14 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
struct iwl_device_cmd *cmd;
struct iwl_cmd_meta *meta;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
+ struct iwl_txq *txq = trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id];
/* If a Tx command is being handled and it isn't in the actual
* command queue then there a command routing bug has been introduced
* in the queue management code. */
- if (WARN(txq_id != trans->txqs.cmd.q_id,
+ if (WARN(txq_id != trans_pcie->txqs.cmd.q_id,
"wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
- txq_id, trans->txqs.cmd.q_id, sequence, txq->read_ptr,
+ txq_id, trans_pcie->txqs.cmd.q_id, sequence, txq->read_ptr,
txq->write_ptr)) {
iwl_print_hex_error(trans, pkt, 32);
return;
@@ -1306,19 +1741,169 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
}
#ifdef CONFIG_INET
+static void *iwl_pcie_get_page_hdr(struct iwl_trans *trans,
+ size_t len, struct sk_buff *skb)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_tso_hdr_page *p = this_cpu_ptr(trans_pcie->txqs.tso_hdr_page);
+ struct iwl_tso_page_info *info;
+ struct page **page_ptr;
+ dma_addr_t phys;
+ void *ret;
+
+ page_ptr = (void *)((u8 *)skb->cb + trans_pcie->txqs.page_offs);
+
+ if (WARN_ON(*page_ptr))
+ return NULL;
+
+ if (!p->page)
+ goto alloc;
+
+ /*
+ * Check if there's enough room on this page
+ *
+ * Note that we put a page chaining pointer *last* in the
+ * page - we need it somewhere, and if it's there then we
+ * avoid DMA mapping the last bits of the page which may
+ * trigger the 32-bit boundary hardware bug.
+ *
+ * (see also get_workaround_page() in tx-gen2.c)
+ */
+ if (((unsigned long)p->pos & ~PAGE_MASK) + len < IWL_TSO_PAGE_DATA_SIZE) {
+ info = IWL_TSO_PAGE_INFO(page_address(p->page));
+ goto out;
+ }
+
+ /* We don't have enough room on this page, get a new one. */
+ iwl_pcie_free_and_unmap_tso_page(trans, p->page);
+
+alloc:
+ p->page = alloc_page(GFP_ATOMIC);
+ if (!p->page)
+ return NULL;
+ p->pos = page_address(p->page);
+
+ info = IWL_TSO_PAGE_INFO(page_address(p->page));
+
+ /* set the chaining pointer to NULL */
+ info->next = NULL;
+
+ /* Create a DMA mapping for the page */
+ phys = dma_map_page_attrs(trans->dev, p->page, 0, PAGE_SIZE,
+ DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+ if (unlikely(dma_mapping_error(trans->dev, phys))) {
+ __free_page(p->page);
+ p->page = NULL;
+
+ return NULL;
+ }
+
+ /* Store physical address and set use count */
+ info->dma_addr = phys;
+ refcount_set(&info->use_count, 1);
+out:
+ *page_ptr = p->page;
+ /* Return an internal reference for the caller */
+ refcount_inc(&info->use_count);
+ ret = p->pos;
+ p->pos += len;
+
+ return ret;
+}
+
+/**
+ * iwl_pcie_get_sgt_tb_phys - Find TB address in mapped SG list
+ * @sgt: scatter gather table
+ * @addr: Virtual address
+ *
+ * Find the entry that includes the address for the given address and return
+ * correct physical address for the TB entry.
+ *
+ * Returns: Address for TB entry
+ */
+dma_addr_t iwl_pcie_get_sgt_tb_phys(struct sg_table *sgt, void *addr)
+{
+ struct scatterlist *sg;
+ int i;
+
+ for_each_sgtable_dma_sg(sgt, sg, i) {
+ if (addr >= sg_virt(sg) &&
+ (u8 *)addr < (u8 *)sg_virt(sg) + sg_dma_len(sg))
+ return sg_dma_address(sg) +
+ ((unsigned long)addr - (unsigned long)sg_virt(sg));
+ }
+
+ WARN_ON_ONCE(1);
+
+ return DMA_MAPPING_ERROR;
+}
+
+/**
+ * iwl_pcie_prep_tso - Prepare TSO page and SKB for sending
+ * @trans: transport private data
+ * @skb: the SKB to map
+ * @cmd_meta: command meta to store the scatter list information for unmapping
+ * @hdr: output argument for TSO headers
+ * @hdr_room: requested length for TSO headers
+ *
+ * Allocate space for a scatter gather list and TSO headers and map the SKB
+ * using the scatter gather list. The SKB is unmapped again when the page is
+ * free'ed again at the end of the operation.
+ *
+ * Returns: newly allocated and mapped scatter gather table with list
+ */
+struct sg_table *iwl_pcie_prep_tso(struct iwl_trans *trans, struct sk_buff *skb,
+ struct iwl_cmd_meta *cmd_meta,
+ u8 **hdr, unsigned int hdr_room)
+{
+ struct sg_table *sgt;
+
+ if (WARN_ON_ONCE(skb_has_frag_list(skb)))
+ return NULL;
+
+ *hdr = iwl_pcie_get_page_hdr(trans,
+ hdr_room + __alignof__(struct sg_table) +
+ sizeof(struct sg_table) +
+ (skb_shinfo(skb)->nr_frags + 1) *
+ sizeof(struct scatterlist),
+ skb);
+ if (!*hdr)
+ return NULL;
+
+ sgt = (void *)PTR_ALIGN(*hdr + hdr_room, __alignof__(struct sg_table));
+ sgt->sgl = (void *)(sgt + 1);
+
+ sg_init_table(sgt->sgl, skb_shinfo(skb)->nr_frags + 1);
+
+ sgt->orig_nents = skb_to_sgvec(skb, sgt->sgl, 0, skb->len);
+ if (WARN_ON_ONCE(sgt->orig_nents <= 0))
+ return NULL;
+
+ /* And map the entire SKB */
+ if (dma_map_sgtable(trans->dev, sgt, DMA_TO_DEVICE, 0) < 0)
+ return NULL;
+
+ /* Store non-zero (i.e. valid) offset for unmapping */
+ cmd_meta->sg_offset = (unsigned long) sgt & ~PAGE_MASK;
+
+ return sgt;
+}
+
static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_txq *txq, u8 hdr_len,
struct iwl_cmd_meta *out_meta,
struct iwl_device_tx_cmd *dev_cmd,
u16 tb1_len)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
struct ieee80211_hdr *hdr = (void *)skb->data;
unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
unsigned int mss = skb_shinfo(skb)->gso_size;
u16 length, iv_len, amsdu_pad;
- u8 *start_hdr;
- struct iwl_tso_hdr_page *hdr_page;
+ dma_addr_t start_hdr_phys;
+ u8 *start_hdr, *pos_hdr;
+ struct sg_table *sgt;
struct tso_t tso;
/* if the packet is protected, then it must be CCMP or GCMP */
@@ -1328,7 +1913,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
trace_iwlwifi_dev_tx(trans->dev, skb,
iwl_txq_get_tfd(trans, txq, txq->write_ptr),
- trans->txqs.tfd.size,
+ trans_pcie->txqs.tfd.size,
&dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 0);
ip_hdrlen = skb_network_header_len(skb);
@@ -1341,13 +1926,14 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
(3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)) + iv_len;
/* Our device supports 9 segments at most, it will fit in 1 page */
- hdr_page = get_page_hdr(trans, hdr_room, skb);
- if (!hdr_page)
+ sgt = iwl_pcie_prep_tso(trans, skb, out_meta, &start_hdr, hdr_room);
+ if (!sgt)
return -ENOMEM;
- start_hdr = hdr_page->pos;
- memcpy(hdr_page->pos, skb->data + hdr_len, iv_len);
- hdr_page->pos += iv_len;
+ start_hdr_phys = iwl_pcie_get_tso_page_phys(start_hdr);
+ pos_hdr = start_hdr;
+ memcpy(pos_hdr, skb->data + hdr_len, iv_len);
+ pos_hdr += iv_len;
/*
* Pull the ieee80211 header + IV to be able to use TSO core,
@@ -1370,45 +1956,43 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
min_t(unsigned int, mss, total_len);
unsigned int hdr_tb_len;
dma_addr_t hdr_tb_phys;
- u8 *subf_hdrs_start = hdr_page->pos;
+ u8 *subf_hdrs_start = pos_hdr;
total_len -= data_left;
- memset(hdr_page->pos, 0, amsdu_pad);
- hdr_page->pos += amsdu_pad;
+ memset(pos_hdr, 0, amsdu_pad);
+ pos_hdr += amsdu_pad;
amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen +
data_left)) & 0x3;
- ether_addr_copy(hdr_page->pos, ieee80211_get_DA(hdr));
- hdr_page->pos += ETH_ALEN;
- ether_addr_copy(hdr_page->pos, ieee80211_get_SA(hdr));
- hdr_page->pos += ETH_ALEN;
+ ether_addr_copy(pos_hdr, ieee80211_get_DA(hdr));
+ pos_hdr += ETH_ALEN;
+ ether_addr_copy(pos_hdr, ieee80211_get_SA(hdr));
+ pos_hdr += ETH_ALEN;
length = snap_ip_tcp_hdrlen + data_left;
- *((__be16 *)hdr_page->pos) = cpu_to_be16(length);
- hdr_page->pos += sizeof(length);
+ *((__be16 *)pos_hdr) = cpu_to_be16(length);
+ pos_hdr += sizeof(length);
/*
* This will copy the SNAP as well which will be considered
* as MAC header.
*/
- tso_build_hdr(skb, hdr_page->pos, &tso, data_left, !total_len);
+ tso_build_hdr(skb, pos_hdr, &tso, data_left, !total_len);
- hdr_page->pos += snap_ip_tcp_hdrlen;
+ pos_hdr += snap_ip_tcp_hdrlen;
+
+ hdr_tb_len = pos_hdr - start_hdr;
+ hdr_tb_phys = iwl_pcie_get_tso_page_phys(start_hdr);
- hdr_tb_len = hdr_page->pos - start_hdr;
- hdr_tb_phys = dma_map_single(trans->dev, start_hdr,
- hdr_tb_len, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(trans->dev, hdr_tb_phys)))
- return -EINVAL;
iwl_pcie_txq_build_tfd(trans, txq, hdr_tb_phys,
hdr_tb_len, false);
trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr,
hdr_tb_phys, hdr_tb_len);
/* add this subframe's headers' length to the tx_cmd */
- le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start);
+ le16_add_cpu(&tx_cmd->len, pos_hdr - subf_hdrs_start);
/* prepare the start_hdr for the next subframe */
- start_hdr = hdr_page->pos;
+ start_hdr = pos_hdr;
/* put the payload */
while (data_left) {
@@ -1416,9 +2000,9 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
data_left);
dma_addr_t tb_phys;
- tb_phys = dma_map_single(trans->dev, tso.data,
- size, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
+ tb_phys = iwl_pcie_get_sgt_tb_phys(sgt, tso.data);
+ /* Not a real mapping error, use direct comparison */
+ if (unlikely(tb_phys == DMA_MAPPING_ERROR))
return -EINVAL;
iwl_pcie_txq_build_tfd(trans, txq, tb_phys,
@@ -1431,6 +2015,9 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
}
}
+ dma_sync_single_for_device(trans->dev, start_hdr_phys, hdr_room,
+ DMA_TO_DEVICE);
+
/* re -add the WiFi header and IV */
skb_push(skb, hdr_len + iv_len);
@@ -1450,9 +2037,61 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
}
#endif /* CONFIG_INET */
+#define IWL_TX_CRC_SIZE 4
+#define IWL_TX_DELIMITER_SIZE 4
+
+/*
+ * iwl_txq_gen1_update_byte_cnt_tbl - Set up entry in Tx byte-count array
+ */
+static void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans,
+ struct iwl_txq *txq, u16 byte_cnt,
+ int num_tbs)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwlagn_scd_bc_tbl *scd_bc_tbl;
+ int write_ptr = txq->write_ptr;
+ int txq_id = txq->id;
+ u8 sec_ctl = 0;
+ u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
+ __le16 bc_ent;
+ struct iwl_device_tx_cmd *dev_cmd = txq->entries[txq->write_ptr].cmd;
+ struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
+ u8 sta_id = tx_cmd->sta_id;
+
+ scd_bc_tbl = trans_pcie->txqs.scd_bc_tbls.addr;
+
+ sec_ctl = tx_cmd->sec_ctl;
+
+ switch (sec_ctl & TX_CMD_SEC_MSK) {
+ case TX_CMD_SEC_CCM:
+ len += IEEE80211_CCMP_MIC_LEN;
+ break;
+ case TX_CMD_SEC_TKIP:
+ len += IEEE80211_TKIP_ICV_LEN;
+ break;
+ case TX_CMD_SEC_WEP:
+ len += IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN;
+ break;
+ }
+ if (trans_pcie->txqs.bc_table_dword)
+ len = DIV_ROUND_UP(len, 4);
+
+ if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX))
+ return;
+
+ bc_ent = cpu_to_le16(len | (sta_id << 12));
+
+ scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
+
+ if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
+ scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] =
+ bc_ent;
+}
+
int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_device_tx_cmd *dev_cmd, int txq_id)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct ieee80211_hdr *hdr;
struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
struct iwl_cmd_meta *out_meta;
@@ -1467,14 +2106,14 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
u16 wifi_seq;
bool amsdu;
- txq = trans->txqs.txq[txq_id];
+ txq = trans_pcie->txqs.txq[txq_id];
- if (WARN_ONCE(!test_bit(txq_id, trans->txqs.queue_used),
+ if (WARN_ONCE(!test_bit(txq_id, trans_pcie->txqs.queue_used),
"TX on unused queue %d\n", txq_id))
return -EINVAL;
if (skb_is_nonlinear(skb) &&
- skb_shinfo(skb)->nr_frags > IWL_TRANS_MAX_FRAGS(trans) &&
+ skb_shinfo(skb)->nr_frags > IWL_TRANS_PCIE_MAX_FRAGS(trans_pcie) &&
__skb_linearize(skb))
return -ENOMEM;
@@ -1495,7 +2134,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_device_tx_cmd **dev_cmd_ptr;
dev_cmd_ptr = (void *)((u8 *)skb->cb +
- trans->txqs.dev_cmd_offs);
+ trans_pcie->txqs.dev_cmd_offs);
*dev_cmd_ptr = dev_cmd;
__skb_queue_tail(&txq->overflow_q, skb);
@@ -1533,7 +2172,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
/* Set up first empty entry in queue's array of Tx/cmd buffers */
out_meta = &txq->entries[txq->write_ptr].meta;
- out_meta->flags = 0;
+ memset(out_meta, 0, sizeof(*out_meta));
/*
* The second TB (tb1) points to the remainder of the TX command
@@ -1578,7 +2217,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
trace_iwlwifi_dev_tx(trans->dev, skb,
iwl_txq_get_tfd(trans, txq, txq->write_ptr),
- trans->txqs.tfd.size,
+ trans_pcie->txqs.tfd.size,
&dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len,
hdr_len);
@@ -1613,8 +2252,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
tfd = iwl_txq_get_tfd(trans, txq, txq->write_ptr);
/* Set up entry for this TFD in Tx byte-count array */
iwl_txq_gen1_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len),
- iwl_txq_gen1_tfd_get_num_tbs(trans,
- tfd));
+ iwl_txq_gen1_tfd_get_num_tbs(tfd));
wait_write_ptr = ieee80211_has_morefrags(fc);
@@ -1649,3 +2287,379 @@ out_err:
spin_unlock(&txq->lock);
return -1;
}
+
+static void iwl_txq_gen1_inval_byte_cnt_tbl(struct iwl_trans *trans,
+ struct iwl_txq *txq,
+ int read_ptr)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->txqs.scd_bc_tbls.addr;
+ int txq_id = txq->id;
+ u8 sta_id = 0;
+ __le16 bc_ent;
+ struct iwl_device_tx_cmd *dev_cmd = txq->entries[read_ptr].cmd;
+ struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
+
+ WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
+
+ if (txq_id != trans_pcie->txqs.cmd.q_id)
+ sta_id = tx_cmd->sta_id;
+
+ bc_ent = cpu_to_le16(1 | (sta_id << 12));
+
+ scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
+
+ if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
+ scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] =
+ bc_ent;
+}
+
+/* Frees buffers until index _not_ inclusive */
+void iwl_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
+ struct sk_buff_head *skbs, bool is_flush)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id];
+ int tfd_num, read_ptr, last_to_free;
+ int txq_read_ptr, txq_write_ptr;
+
+ /* This function is not meant to release cmd queue*/
+ if (WARN_ON(txq_id == trans_pcie->txqs.cmd.q_id))
+ return;
+
+ if (WARN_ON(!txq))
+ return;
+
+ tfd_num = iwl_txq_get_cmd_index(txq, ssn);
+
+ spin_lock_bh(&txq->reclaim_lock);
+
+ spin_lock(&txq->lock);
+ txq_read_ptr = txq->read_ptr;
+ txq_write_ptr = txq->write_ptr;
+ spin_unlock(&txq->lock);
+
+ read_ptr = iwl_txq_get_cmd_index(txq, txq_read_ptr);
+
+ if (!test_bit(txq_id, trans_pcie->txqs.queue_used)) {
+ IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n",
+ txq_id, ssn);
+ goto out;
+ }
+
+ if (read_ptr == tfd_num)
+ goto out;
+
+ IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d (%d) -> %d (%d)\n",
+ txq_id, read_ptr, txq_read_ptr, tfd_num, ssn);
+
+ /* Since we free until index _not_ inclusive, the one before index is
+ * the last we will free. This one must be used
+ */
+ last_to_free = iwl_txq_dec_wrap(trans, tfd_num);
+
+ if (!iwl_txq_used(txq, last_to_free, txq_read_ptr, txq_write_ptr)) {
+ IWL_ERR(trans,
+ "%s: Read index for txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
+ __func__, txq_id, last_to_free,
+ trans->trans_cfg->base_params->max_tfd_queue_size,
+ txq_write_ptr, txq_read_ptr);
+
+ iwl_op_mode_time_point(trans->op_mode,
+ IWL_FW_INI_TIME_POINT_FAKE_TX,
+ NULL);
+ goto out;
+ }
+
+ if (WARN_ON(!skb_queue_empty(skbs)))
+ goto out;
+
+ for (;
+ read_ptr != tfd_num;
+ txq_read_ptr = iwl_txq_inc_wrap(trans, txq_read_ptr),
+ read_ptr = iwl_txq_get_cmd_index(txq, txq_read_ptr)) {
+ struct iwl_cmd_meta *cmd_meta = &txq->entries[read_ptr].meta;
+ struct sk_buff *skb = txq->entries[read_ptr].skb;
+
+ if (WARN_ONCE(!skb, "no SKB at %d (%d) on queue %d\n",
+ read_ptr, txq_read_ptr, txq_id))
+ continue;
+
+ iwl_pcie_free_tso_pages(trans, skb, cmd_meta);
+
+ __skb_queue_tail(skbs, skb);
+
+ txq->entries[read_ptr].skb = NULL;
+
+ if (!trans->trans_cfg->gen2)
+ iwl_txq_gen1_inval_byte_cnt_tbl(trans, txq,
+ txq_read_ptr);
+
+ iwl_txq_free_tfd(trans, txq, txq_read_ptr);
+ }
+
+ spin_lock(&txq->lock);
+ txq->read_ptr = txq_read_ptr;
+
+ iwl_txq_progress(txq);
+
+ if (iwl_txq_space(trans, txq) > txq->low_mark &&
+ test_bit(txq_id, trans_pcie->txqs.queue_stopped)) {
+ struct sk_buff_head overflow_skbs;
+ struct sk_buff *skb;
+
+ __skb_queue_head_init(&overflow_skbs);
+ skb_queue_splice_init(&txq->overflow_q,
+ is_flush ? skbs : &overflow_skbs);
+
+ /*
+ * We are going to transmit from the overflow queue.
+ * Remember this state so that wait_for_txq_empty will know we
+ * are adding more packets to the TFD queue. It cannot rely on
+ * the state of &txq->overflow_q, as we just emptied it, but
+ * haven't TXed the content yet.
+ */
+ txq->overflow_tx = true;
+
+ /*
+ * This is tricky: we are in reclaim path and are holding
+ * reclaim_lock, so noone will try to access the txq data
+ * from that path. We stopped tx, so we can't have tx as well.
+ * Bottom line, we can unlock and re-lock later.
+ */
+ spin_unlock(&txq->lock);
+
+ while ((skb = __skb_dequeue(&overflow_skbs))) {
+ struct iwl_device_tx_cmd *dev_cmd_ptr;
+
+ dev_cmd_ptr = *(void **)((u8 *)skb->cb +
+ trans_pcie->txqs.dev_cmd_offs);
+
+ /*
+ * Note that we can very well be overflowing again.
+ * In that case, iwl_txq_space will be small again
+ * and we won't wake mac80211's queue.
+ */
+ iwl_trans_tx(trans, skb, dev_cmd_ptr, txq_id);
+ }
+
+ if (iwl_txq_space(trans, txq) > txq->low_mark)
+ iwl_trans_pcie_wake_queue(trans, txq);
+
+ spin_lock(&txq->lock);
+ txq->overflow_tx = false;
+ }
+
+ spin_unlock(&txq->lock);
+out:
+ spin_unlock_bh(&txq->reclaim_lock);
+}
+
+/* Set wr_ptr of specific device and txq */
+void iwl_pcie_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id];
+
+ spin_lock_bh(&txq->lock);
+
+ txq->write_ptr = ptr;
+ txq->read_ptr = txq->write_ptr;
+
+ spin_unlock_bh(&txq->lock);
+}
+
+void iwl_pcie_freeze_txq_timer(struct iwl_trans *trans,
+ unsigned long txqs, bool freeze)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int queue;
+
+ for_each_set_bit(queue, &txqs, BITS_PER_LONG) {
+ struct iwl_txq *txq = trans_pcie->txqs.txq[queue];
+ unsigned long now;
+
+ spin_lock_bh(&txq->lock);
+
+ now = jiffies;
+
+ if (txq->frozen == freeze)
+ goto next_queue;
+
+ IWL_DEBUG_TX_QUEUES(trans, "%s TXQ %d\n",
+ freeze ? "Freezing" : "Waking", queue);
+
+ txq->frozen = freeze;
+
+ if (txq->read_ptr == txq->write_ptr)
+ goto next_queue;
+
+ if (freeze) {
+ if (unlikely(time_after(now,
+ txq->stuck_timer.expires))) {
+ /*
+ * The timer should have fired, maybe it is
+ * spinning right now on the lock.
+ */
+ goto next_queue;
+ }
+ /* remember how long until the timer fires */
+ txq->frozen_expiry_remainder =
+ txq->stuck_timer.expires - now;
+ del_timer(&txq->stuck_timer);
+ goto next_queue;
+ }
+
+ /*
+ * Wake a non-empty queue -> arm timer with the
+ * remainder before it froze
+ */
+ mod_timer(&txq->stuck_timer,
+ now + txq->frozen_expiry_remainder);
+
+next_queue:
+ spin_unlock_bh(&txq->lock);
+ }
+}
+
+#define HOST_COMPLETE_TIMEOUT (2 * HZ)
+
+static int iwl_trans_pcie_send_hcmd_sync(struct iwl_trans *trans,
+ struct iwl_host_cmd *cmd)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ const char *cmd_str = iwl_get_cmd_string(trans, cmd->id);
+ struct iwl_txq *txq = trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id];
+ int cmd_idx;
+ int ret;
+
+ IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", cmd_str);
+
+ if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE,
+ &trans->status),
+ "Command %s: a command is already active!\n", cmd_str))
+ return -EIO;
+
+ IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", cmd_str);
+
+ if (trans->trans_cfg->gen2)
+ cmd_idx = iwl_pcie_gen2_enqueue_hcmd(trans, cmd);
+ else
+ cmd_idx = iwl_pcie_enqueue_hcmd(trans, cmd);
+
+ if (cmd_idx < 0) {
+ ret = cmd_idx;
+ clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
+ IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n",
+ cmd_str, ret);
+ return ret;
+ }
+
+ ret = wait_event_timeout(trans->wait_command_queue,
+ !test_bit(STATUS_SYNC_HCMD_ACTIVE,
+ &trans->status),
+ HOST_COMPLETE_TIMEOUT);
+ if (!ret) {
+ IWL_ERR(trans, "Error sending %s: time out after %dms.\n",
+ cmd_str, jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
+
+ IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n",
+ txq->read_ptr, txq->write_ptr);
+
+ clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
+ IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
+ cmd_str);
+ ret = -ETIMEDOUT;
+
+ iwl_trans_sync_nmi(trans);
+ goto cancel;
+ }
+
+ if (test_bit(STATUS_FW_ERROR, &trans->status)) {
+ if (!test_and_clear_bit(STATUS_SUPPRESS_CMD_ERROR_ONCE,
+ &trans->status)) {
+ IWL_ERR(trans, "FW error in SYNC CMD %s\n", cmd_str);
+ dump_stack();
+ }
+ ret = -EIO;
+ goto cancel;
+ }
+
+ if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
+ test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
+ IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n");
+ ret = -ERFKILL;
+ goto cancel;
+ }
+
+ if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) {
+ IWL_ERR(trans, "Error: Response NULL in '%s'\n", cmd_str);
+ ret = -EIO;
+ goto cancel;
+ }
+
+ return 0;
+
+cancel:
+ if (cmd->flags & CMD_WANT_SKB) {
+ /*
+ * Cancel the CMD_WANT_SKB flag for the cmd in the
+ * TX cmd queue. Otherwise in case the cmd comes
+ * in later, it will possibly set an invalid
+ * address (cmd->meta.source).
+ */
+ txq->entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB;
+ }
+
+ if (cmd->resp_pkt) {
+ iwl_free_resp(cmd);
+ cmd->resp_pkt = NULL;
+ }
+
+ return ret;
+}
+
+int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans,
+ struct iwl_host_cmd *cmd)
+{
+ /* Make sure the NIC is still alive in the bus */
+ if (test_bit(STATUS_TRANS_DEAD, &trans->status))
+ return -ENODEV;
+
+ if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
+ test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
+ IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n",
+ cmd->id);
+ return -ERFKILL;
+ }
+
+ if (unlikely(trans->system_pm_mode == IWL_PLAT_PM_MODE_D3 &&
+ !(cmd->flags & CMD_SEND_IN_D3))) {
+ IWL_DEBUG_WOWLAN(trans, "Dropping CMD 0x%x: D3\n", cmd->id);
+ return -EHOSTDOWN;
+ }
+
+ if (cmd->flags & CMD_ASYNC) {
+ int ret;
+
+ /* An asynchronous command can not expect an SKB to be set. */
+ if (WARN_ON(cmd->flags & CMD_WANT_SKB))
+ return -EINVAL;
+
+ if (trans->trans_cfg->gen2)
+ ret = iwl_pcie_gen2_enqueue_hcmd(trans, cmd);
+ else
+ ret = iwl_pcie_enqueue_hcmd(trans, cmd);
+
+ if (ret < 0) {
+ IWL_ERR(trans,
+ "Error sending %s: enqueue_hcmd failed: %d\n",
+ iwl_get_cmd_string(trans, cmd->id), ret);
+ return ret;
+ }
+ return 0;
+ }
+
+ return iwl_trans_pcie_send_hcmd_sync(trans, cmd);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_pcie_send_hcmd);
diff --git a/drivers/net/wireless/intel/iwlwifi/queue/tx.c b/drivers/net/wireless/intel/iwlwifi/queue/tx.c
deleted file mode 100644
index 6229c785c845..000000000000
--- a/drivers/net/wireless/intel/iwlwifi/queue/tx.c
+++ /dev/null
@@ -1,1900 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
-/*
- * Copyright (C) 2020-2024 Intel Corporation
- */
-#include <net/tso.h>
-#include <linux/tcp.h>
-
-#include "iwl-debug.h"
-#include "iwl-io.h"
-#include "fw/api/commands.h"
-#include "fw/api/tx.h"
-#include "fw/api/datapath.h"
-#include "fw/api/debug.h"
-#include "queue/tx.h"
-#include "iwl-fh.h"
-#include "iwl-scd.h"
-#include <linux/dmapool.h>
-
-/*
- * iwl_txq_update_byte_tbl - Set up entry in Tx byte-count array
- */
-static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans *trans,
- struct iwl_txq *txq, u16 byte_cnt,
- int num_tbs)
-{
- int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
- u8 filled_tfd_size, num_fetch_chunks;
- u16 len = byte_cnt;
- __le16 bc_ent;
-
- if (WARN(idx >= txq->n_window, "%d >= %d\n", idx, txq->n_window))
- return;
-
- filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) +
- num_tbs * sizeof(struct iwl_tfh_tb);
- /*
- * filled_tfd_size contains the number of filled bytes in the TFD.
- * Dividing it by 64 will give the number of chunks to fetch
- * to SRAM- 0 for one chunk, 1 for 2 and so on.
- * If, for example, TFD contains only 3 TBs then 32 bytes
- * of the TFD are used, and only one chunk of 64 bytes should
- * be fetched
- */
- num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1;
-
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
- struct iwl_gen3_bc_tbl_entry *scd_bc_tbl_gen3 = txq->bc_tbl.addr;
-
- /* Starting from AX210, the HW expects bytes */
- WARN_ON(trans->txqs.bc_table_dword);
- WARN_ON(len > 0x3FFF);
- bc_ent = cpu_to_le16(len | (num_fetch_chunks << 14));
- scd_bc_tbl_gen3[idx].tfd_offset = bc_ent;
- } else {
- struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr;
-
- /* Before AX210, the HW expects DW */
- WARN_ON(!trans->txqs.bc_table_dword);
- len = DIV_ROUND_UP(len, 4);
- WARN_ON(len > 0xFFF);
- bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12));
- scd_bc_tbl->tfd_offset[idx] = bc_ent;
- }
-}
-
-/*
- * iwl_txq_inc_wr_ptr - Send new write index to hardware
- */
-void iwl_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq)
-{
- lockdep_assert_held(&txq->lock);
-
- IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq->id, txq->write_ptr);
-
- /*
- * if not in power-save mode, uCode will never sleep when we're
- * trying to tx (during RFKILL, we're not trying to tx).
- */
- iwl_write32(trans, HBUS_TARG_WRPTR, txq->write_ptr | (txq->id << 16));
-}
-
-static u8 iwl_txq_gen2_get_num_tbs(struct iwl_trans *trans,
- struct iwl_tfh_tfd *tfd)
-{
- return le16_to_cpu(tfd->num_tbs) & 0x1f;
-}
-
-int iwl_txq_gen2_set_tb(struct iwl_trans *trans, struct iwl_tfh_tfd *tfd,
- dma_addr_t addr, u16 len)
-{
- int idx = iwl_txq_gen2_get_num_tbs(trans, tfd);
- struct iwl_tfh_tb *tb;
-
- /* Only WARN here so we know about the issue, but we mess up our
- * unmap path because not every place currently checks for errors
- * returned from this function - it can only return an error if
- * there's no more space, and so when we know there is enough we
- * don't always check ...
- */
- WARN(iwl_txq_crosses_4g_boundary(addr, len),
- "possible DMA problem with iova:0x%llx, len:%d\n",
- (unsigned long long)addr, len);
-
- if (WARN_ON(idx >= IWL_TFH_NUM_TBS))
- return -EINVAL;
- tb = &tfd->tbs[idx];
-
- /* Each TFD can point to a maximum max_tbs Tx buffers */
- if (le16_to_cpu(tfd->num_tbs) >= trans->txqs.tfd.max_tbs) {
- IWL_ERR(trans, "Error can not send more than %d chunks\n",
- trans->txqs.tfd.max_tbs);
- return -EINVAL;
- }
-
- put_unaligned_le64(addr, &tb->addr);
- tb->tb_len = cpu_to_le16(len);
-
- tfd->num_tbs = cpu_to_le16(idx + 1);
-
- return idx;
-}
-
-static void iwl_txq_set_tfd_invalid_gen2(struct iwl_trans *trans,
- struct iwl_tfh_tfd *tfd)
-{
- tfd->num_tbs = 0;
-
- iwl_txq_gen2_set_tb(trans, tfd, trans->invalid_tx_cmd.dma,
- trans->invalid_tx_cmd.size);
-}
-
-void iwl_txq_gen2_tfd_unmap(struct iwl_trans *trans, struct iwl_cmd_meta *meta,
- struct iwl_tfh_tfd *tfd)
-{
- int i, num_tbs;
-
- /* Sanity check on number of chunks */
- num_tbs = iwl_txq_gen2_get_num_tbs(trans, tfd);
-
- if (num_tbs > trans->txqs.tfd.max_tbs) {
- IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
- return;
- }
-
- /* first TB is never freed - it's the bidirectional DMA data */
- for (i = 1; i < num_tbs; i++) {
- if (meta->tbs & BIT(i))
- dma_unmap_page(trans->dev,
- le64_to_cpu(tfd->tbs[i].addr),
- le16_to_cpu(tfd->tbs[i].tb_len),
- DMA_TO_DEVICE);
- else
- dma_unmap_single(trans->dev,
- le64_to_cpu(tfd->tbs[i].addr),
- le16_to_cpu(tfd->tbs[i].tb_len),
- DMA_TO_DEVICE);
- }
-
- iwl_txq_set_tfd_invalid_gen2(trans, tfd);
-}
-
-void iwl_txq_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
-{
- /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
- * idx is bounded by n_window
- */
- int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr);
- struct sk_buff *skb;
-
- lockdep_assert_held(&txq->lock);
-
- if (!txq->entries)
- return;
-
- iwl_txq_gen2_tfd_unmap(trans, &txq->entries[idx].meta,
- iwl_txq_get_tfd(trans, txq, idx));
-
- skb = txq->entries[idx].skb;
-
- /* Can be called from irqs-disabled context
- * If skb is not NULL, it means that the whole queue is being
- * freed and that the queue is not empty - free the skb
- */
- if (skb) {
- iwl_op_mode_free_skb(trans->op_mode, skb);
- txq->entries[idx].skb = NULL;
- }
-}
-
-static struct page *get_workaround_page(struct iwl_trans *trans,
- struct sk_buff *skb)
-{
- struct page **page_ptr;
- struct page *ret;
-
- page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs);
-
- ret = alloc_page(GFP_ATOMIC);
- if (!ret)
- return NULL;
-
- /* set the chaining pointer to the previous page if there */
- *(void **)((u8 *)page_address(ret) + PAGE_SIZE - sizeof(void *)) = *page_ptr;
- *page_ptr = ret;
-
- return ret;
-}
-
-/*
- * Add a TB and if needed apply the FH HW bug workaround;
- * meta != NULL indicates that it's a page mapping and we
- * need to dma_unmap_page() and set the meta->tbs bit in
- * this case.
- */
-static int iwl_txq_gen2_set_tb_with_wa(struct iwl_trans *trans,
- struct sk_buff *skb,
- struct iwl_tfh_tfd *tfd,
- dma_addr_t phys, void *virt,
- u16 len, struct iwl_cmd_meta *meta)
-{
- dma_addr_t oldphys = phys;
- struct page *page;
- int ret;
-
- if (unlikely(dma_mapping_error(trans->dev, phys)))
- return -ENOMEM;
-
- if (likely(!iwl_txq_crosses_4g_boundary(phys, len))) {
- ret = iwl_txq_gen2_set_tb(trans, tfd, phys, len);
-
- if (ret < 0)
- goto unmap;
-
- if (meta)
- meta->tbs |= BIT(ret);
-
- ret = 0;
- goto trace;
- }
-
- /*
- * Work around a hardware bug. If (as expressed in the
- * condition above) the TB ends on a 32-bit boundary,
- * then the next TB may be accessed with the wrong
- * address.
- * To work around it, copy the data elsewhere and make
- * a new mapping for it so the device will not fail.
- */
-
- if (WARN_ON(len > PAGE_SIZE - sizeof(void *))) {
- ret = -ENOBUFS;
- goto unmap;
- }
-
- page = get_workaround_page(trans, skb);
- if (!page) {
- ret = -ENOMEM;
- goto unmap;
- }
-
- memcpy(page_address(page), virt, len);
-
- phys = dma_map_single(trans->dev, page_address(page), len,
- DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(trans->dev, phys)))
- return -ENOMEM;
- ret = iwl_txq_gen2_set_tb(trans, tfd, phys, len);
- if (ret < 0) {
- /* unmap the new allocation as single */
- oldphys = phys;
- meta = NULL;
- goto unmap;
- }
- IWL_DEBUG_TX(trans,
- "TB bug workaround: copied %d bytes from 0x%llx to 0x%llx\n",
- len, (unsigned long long)oldphys,
- (unsigned long long)phys);
-
- ret = 0;
-unmap:
- if (meta)
- dma_unmap_page(trans->dev, oldphys, len, DMA_TO_DEVICE);
- else
- dma_unmap_single(trans->dev, oldphys, len, DMA_TO_DEVICE);
-trace:
- trace_iwlwifi_dev_tx_tb(trans->dev, skb, virt, phys, len);
-
- return ret;
-}
-
-#ifdef CONFIG_INET
-struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len,
- struct sk_buff *skb)
-{
- struct iwl_tso_hdr_page *p = this_cpu_ptr(trans->txqs.tso_hdr_page);
- struct page **page_ptr;
-
- page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs);
-
- if (WARN_ON(*page_ptr))
- return NULL;
-
- if (!p->page)
- goto alloc;
-
- /*
- * Check if there's enough room on this page
- *
- * Note that we put a page chaining pointer *last* in the
- * page - we need it somewhere, and if it's there then we
- * avoid DMA mapping the last bits of the page which may
- * trigger the 32-bit boundary hardware bug.
- *
- * (see also get_workaround_page() in tx-gen2.c)
- */
- if (p->pos + len < (u8 *)page_address(p->page) + PAGE_SIZE -
- sizeof(void *))
- goto out;
-
- /* We don't have enough room on this page, get a new one. */
- __free_page(p->page);
-
-alloc:
- p->page = alloc_page(GFP_ATOMIC);
- if (!p->page)
- return NULL;
- p->pos = page_address(p->page);
- /* set the chaining pointer to NULL */
- *(void **)((u8 *)page_address(p->page) + PAGE_SIZE - sizeof(void *)) = NULL;
-out:
- *page_ptr = p->page;
- get_page(p->page);
- return p;
-}
-#endif
-
-static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans,
- struct sk_buff *skb,
- struct iwl_tfh_tfd *tfd, int start_len,
- u8 hdr_len,
- struct iwl_device_tx_cmd *dev_cmd)
-{
-#ifdef CONFIG_INET
- struct iwl_tx_cmd_gen2 *tx_cmd = (void *)dev_cmd->payload;
- struct ieee80211_hdr *hdr = (void *)skb->data;
- unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
- unsigned int mss = skb_shinfo(skb)->gso_size;
- u16 length, amsdu_pad;
- u8 *start_hdr;
- struct iwl_tso_hdr_page *hdr_page;
- struct tso_t tso;
-
- trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd),
- &dev_cmd->hdr, start_len, 0);
-
- ip_hdrlen = skb_network_header_len(skb);
- snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb);
- total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len;
- amsdu_pad = 0;
-
- /* total amount of header we may need for this A-MSDU */
- hdr_room = DIV_ROUND_UP(total_len, mss) *
- (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr));
-
- /* Our device supports 9 segments at most, it will fit in 1 page */
- hdr_page = get_page_hdr(trans, hdr_room, skb);
- if (!hdr_page)
- return -ENOMEM;
-
- start_hdr = hdr_page->pos;
-
- /*
- * Pull the ieee80211 header to be able to use TSO core,
- * we will restore it for the tx_status flow.
- */
- skb_pull(skb, hdr_len);
-
- /*
- * Remove the length of all the headers that we don't actually
- * have in the MPDU by themselves, but that we duplicate into
- * all the different MSDUs inside the A-MSDU.
- */
- le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen);
-
- tso_start(skb, &tso);
-
- while (total_len) {
- /* this is the data left for this subframe */
- unsigned int data_left = min_t(unsigned int, mss, total_len);
- unsigned int tb_len;
- dma_addr_t tb_phys;
- u8 *subf_hdrs_start = hdr_page->pos;
-
- total_len -= data_left;
-
- memset(hdr_page->pos, 0, amsdu_pad);
- hdr_page->pos += amsdu_pad;
- amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen +
- data_left)) & 0x3;
- ether_addr_copy(hdr_page->pos, ieee80211_get_DA(hdr));
- hdr_page->pos += ETH_ALEN;
- ether_addr_copy(hdr_page->pos, ieee80211_get_SA(hdr));
- hdr_page->pos += ETH_ALEN;
-
- length = snap_ip_tcp_hdrlen + data_left;
- *((__be16 *)hdr_page->pos) = cpu_to_be16(length);
- hdr_page->pos += sizeof(length);
-
- /*
- * This will copy the SNAP as well which will be considered
- * as MAC header.
- */
- tso_build_hdr(skb, hdr_page->pos, &tso, data_left, !total_len);
-
- hdr_page->pos += snap_ip_tcp_hdrlen;
-
- tb_len = hdr_page->pos - start_hdr;
- tb_phys = dma_map_single(trans->dev, start_hdr,
- tb_len, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
- goto out_err;
- /*
- * No need for _with_wa, this is from the TSO page and
- * we leave some space at the end of it so can't hit
- * the buggy scenario.
- */
- iwl_txq_gen2_set_tb(trans, tfd, tb_phys, tb_len);
- trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr,
- tb_phys, tb_len);
- /* add this subframe's headers' length to the tx_cmd */
- le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start);
-
- /* prepare the start_hdr for the next subframe */
- start_hdr = hdr_page->pos;
-
- /* put the payload */
- while (data_left) {
- int ret;
-
- tb_len = min_t(unsigned int, tso.size, data_left);
- tb_phys = dma_map_single(trans->dev, tso.data,
- tb_len, DMA_TO_DEVICE);
- ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd,
- tb_phys, tso.data,
- tb_len, NULL);
- if (ret)
- goto out_err;
-
- data_left -= tb_len;
- tso_build_data(skb, &tso, tb_len);
- }
- }
-
- /* re -add the WiFi header */
- skb_push(skb, hdr_len);
-
- return 0;
-
-out_err:
-#endif
- return -EINVAL;
-}
-
-static struct
-iwl_tfh_tfd *iwl_txq_gen2_build_tx_amsdu(struct iwl_trans *trans,
- struct iwl_txq *txq,
- struct iwl_device_tx_cmd *dev_cmd,
- struct sk_buff *skb,
- struct iwl_cmd_meta *out_meta,
- int hdr_len,
- int tx_cmd_len)
-{
- int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
- struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx);
- dma_addr_t tb_phys;
- int len;
- void *tb1_addr;
-
- tb_phys = iwl_txq_get_first_tb_dma(txq, idx);
-
- /*
- * No need for _with_wa, the first TB allocation is aligned up
- * to a 64-byte boundary and thus can't be at the end or cross
- * a page boundary (much less a 2^32 boundary).
- */
- iwl_txq_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
-
- /*
- * The second TB (tb1) points to the remainder of the TX command
- * and the 802.11 header - dword aligned size
- * (This calculation modifies the TX command, so do it before the
- * setup of the first TB)
- */
- len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len -
- IWL_FIRST_TB_SIZE;
-
- /* do not align A-MSDU to dword as the subframe header aligns it */
-
- /* map the data for TB1 */
- tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
- tb_phys = dma_map_single(trans->dev, tb1_addr, len, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
- goto out_err;
- /*
- * No need for _with_wa(), we ensure (via alignment) that the data
- * here can never cross or end at a page boundary.
- */
- iwl_txq_gen2_set_tb(trans, tfd, tb_phys, len);
-
- if (iwl_txq_gen2_build_amsdu(trans, skb, tfd, len + IWL_FIRST_TB_SIZE,
- hdr_len, dev_cmd))
- goto out_err;
-
- /* building the A-MSDU might have changed this data, memcpy it now */
- memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE);
- return tfd;
-
-out_err:
- iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd);
- return NULL;
-}
-
-static int iwl_txq_gen2_tx_add_frags(struct iwl_trans *trans,
- struct sk_buff *skb,
- struct iwl_tfh_tfd *tfd,
- struct iwl_cmd_meta *out_meta)
-{
- int i;
-
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- dma_addr_t tb_phys;
- unsigned int fragsz = skb_frag_size(frag);
- int ret;
-
- if (!fragsz)
- continue;
-
- tb_phys = skb_frag_dma_map(trans->dev, frag, 0,
- fragsz, DMA_TO_DEVICE);
- ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
- skb_frag_address(frag),
- fragsz, out_meta);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-static struct
-iwl_tfh_tfd *iwl_txq_gen2_build_tx(struct iwl_trans *trans,
- struct iwl_txq *txq,
- struct iwl_device_tx_cmd *dev_cmd,
- struct sk_buff *skb,
- struct iwl_cmd_meta *out_meta,
- int hdr_len,
- int tx_cmd_len,
- bool pad)
-{
- int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
- struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx);
- dma_addr_t tb_phys;
- int len, tb1_len, tb2_len;
- void *tb1_addr;
- struct sk_buff *frag;
-
- tb_phys = iwl_txq_get_first_tb_dma(txq, idx);
-
- /* The first TB points to bi-directional DMA data */
- memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE);
-
- /*
- * No need for _with_wa, the first TB allocation is aligned up
- * to a 64-byte boundary and thus can't be at the end or cross
- * a page boundary (much less a 2^32 boundary).
- */
- iwl_txq_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
-
- /*
- * The second TB (tb1) points to the remainder of the TX command
- * and the 802.11 header - dword aligned size
- * (This calculation modifies the TX command, so do it before the
- * setup of the first TB)
- */
- len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len -
- IWL_FIRST_TB_SIZE;
-
- if (pad)
- tb1_len = ALIGN(len, 4);
- else
- tb1_len = len;
-
- /* map the data for TB1 */
- tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
- tb_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
- goto out_err;
- /*
- * No need for _with_wa(), we ensure (via alignment) that the data
- * here can never cross or end at a page boundary.
- */
- iwl_txq_gen2_set_tb(trans, tfd, tb_phys, tb1_len);
- trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr,
- IWL_FIRST_TB_SIZE + tb1_len, hdr_len);
-
- /* set up TFD's third entry to point to remainder of skb's head */
- tb2_len = skb_headlen(skb) - hdr_len;
-
- if (tb2_len > 0) {
- int ret;
-
- tb_phys = dma_map_single(trans->dev, skb->data + hdr_len,
- tb2_len, DMA_TO_DEVICE);
- ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
- skb->data + hdr_len, tb2_len,
- NULL);
- if (ret)
- goto out_err;
- }
-
- if (iwl_txq_gen2_tx_add_frags(trans, skb, tfd, out_meta))
- goto out_err;
-
- skb_walk_frags(skb, frag) {
- int ret;
-
- tb_phys = dma_map_single(trans->dev, frag->data,
- skb_headlen(frag), DMA_TO_DEVICE);
- ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
- frag->data,
- skb_headlen(frag), NULL);
- if (ret)
- goto out_err;
- if (iwl_txq_gen2_tx_add_frags(trans, frag, tfd, out_meta))
- goto out_err;
- }
-
- return tfd;
-
-out_err:
- iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd);
- return NULL;
-}
-
-static
-struct iwl_tfh_tfd *iwl_txq_gen2_build_tfd(struct iwl_trans *trans,
- struct iwl_txq *txq,
- struct iwl_device_tx_cmd *dev_cmd,
- struct sk_buff *skb,
- struct iwl_cmd_meta *out_meta)
-{
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
- struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx);
- int len, hdr_len;
- bool amsdu;
-
- /* There must be data left over for TB1 or this code must be changed */
- BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) < IWL_FIRST_TB_SIZE);
- BUILD_BUG_ON(sizeof(struct iwl_cmd_header) +
- offsetofend(struct iwl_tx_cmd_gen2, dram_info) >
- IWL_FIRST_TB_SIZE);
- BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen3) < IWL_FIRST_TB_SIZE);
- BUILD_BUG_ON(sizeof(struct iwl_cmd_header) +
- offsetofend(struct iwl_tx_cmd_gen3, dram_info) >
- IWL_FIRST_TB_SIZE);
-
- memset(tfd, 0, sizeof(*tfd));
-
- if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
- len = sizeof(struct iwl_tx_cmd_gen2);
- else
- len = sizeof(struct iwl_tx_cmd_gen3);
-
- amsdu = ieee80211_is_data_qos(hdr->frame_control) &&
- (*ieee80211_get_qos_ctl(hdr) &
- IEEE80211_QOS_CTL_A_MSDU_PRESENT);
-
- hdr_len = ieee80211_hdrlen(hdr->frame_control);
-
- /*
- * Only build A-MSDUs here if doing so by GSO, otherwise it may be
- * an A-MSDU for other reasons, e.g. NAN or an A-MSDU having been
- * built in the higher layers already.
- */
- if (amsdu && skb_shinfo(skb)->gso_size)
- return iwl_txq_gen2_build_tx_amsdu(trans, txq, dev_cmd, skb,
- out_meta, hdr_len, len);
- return iwl_txq_gen2_build_tx(trans, txq, dev_cmd, skb, out_meta,
- hdr_len, len, !amsdu);
-}
-
-int iwl_txq_space(struct iwl_trans *trans, const struct iwl_txq *q)
-{
- unsigned int max;
- unsigned int used;
-
- /*
- * To avoid ambiguity between empty and completely full queues, there
- * should always be less than max_tfd_queue_size elements in the queue.
- * If q->n_window is smaller than max_tfd_queue_size, there is no need
- * to reserve any queue entries for this purpose.
- */
- if (q->n_window < trans->trans_cfg->base_params->max_tfd_queue_size)
- max = q->n_window;
- else
- max = trans->trans_cfg->base_params->max_tfd_queue_size - 1;
-
- /*
- * max_tfd_queue_size is a power of 2, so the following is equivalent to
- * modulo by max_tfd_queue_size and is well defined.
- */
- used = (q->write_ptr - q->read_ptr) &
- (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
-
- if (WARN_ON(used > max))
- return 0;
-
- return max - used;
-}
-
-int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
- struct iwl_device_tx_cmd *dev_cmd, int txq_id)
-{
- struct iwl_cmd_meta *out_meta;
- struct iwl_txq *txq = trans->txqs.txq[txq_id];
- u16 cmd_len;
- int idx;
- void *tfd;
-
- if (WARN_ONCE(txq_id >= IWL_MAX_TVQM_QUEUES,
- "queue %d out of range", txq_id))
- return -EINVAL;
-
- if (WARN_ONCE(!test_bit(txq_id, trans->txqs.queue_used),
- "TX on unused queue %d\n", txq_id))
- return -EINVAL;
-
- if (skb_is_nonlinear(skb) &&
- skb_shinfo(skb)->nr_frags > IWL_TRANS_MAX_FRAGS(trans) &&
- __skb_linearize(skb))
- return -ENOMEM;
-
- spin_lock(&txq->lock);
-
- if (iwl_txq_space(trans, txq) < txq->high_mark) {
- iwl_txq_stop(trans, txq);
-
- /* don't put the packet on the ring, if there is no room */
- if (unlikely(iwl_txq_space(trans, txq) < 3)) {
- struct iwl_device_tx_cmd **dev_cmd_ptr;
-
- dev_cmd_ptr = (void *)((u8 *)skb->cb +
- trans->txqs.dev_cmd_offs);
-
- *dev_cmd_ptr = dev_cmd;
- __skb_queue_tail(&txq->overflow_q, skb);
- spin_unlock(&txq->lock);
- return 0;
- }
- }
-
- idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
-
- /* Set up driver data for this TFD */
- txq->entries[idx].skb = skb;
- txq->entries[idx].cmd = dev_cmd;
-
- dev_cmd->hdr.sequence =
- cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
- INDEX_TO_SEQ(idx)));
-
- /* Set up first empty entry in queue's array of Tx/cmd buffers */
- out_meta = &txq->entries[idx].meta;
- out_meta->flags = 0;
-
- tfd = iwl_txq_gen2_build_tfd(trans, txq, dev_cmd, skb, out_meta);
- if (!tfd) {
- spin_unlock(&txq->lock);
- return -1;
- }
-
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
- struct iwl_tx_cmd_gen3 *tx_cmd_gen3 =
- (void *)dev_cmd->payload;
-
- cmd_len = le16_to_cpu(tx_cmd_gen3->len);
- } else {
- struct iwl_tx_cmd_gen2 *tx_cmd_gen2 =
- (void *)dev_cmd->payload;
-
- cmd_len = le16_to_cpu(tx_cmd_gen2->len);
- }
-
- /* Set up entry for this TFD in Tx byte-count array */
- iwl_pcie_gen2_update_byte_tbl(trans, txq, cmd_len,
- iwl_txq_gen2_get_num_tbs(trans, tfd));
-
- /* start timer if queue currently empty */
- if (txq->read_ptr == txq->write_ptr && txq->wd_timeout)
- mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
-
- /* Tell device the write index *just past* this latest filled TFD */
- txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr);
- iwl_txq_inc_wr_ptr(trans, txq);
- /*
- * At this point the frame is "transmitted" successfully
- * and we will get a TX status notification eventually.
- */
- spin_unlock(&txq->lock);
- return 0;
-}
-
-/*************** HOST COMMAND QUEUE FUNCTIONS *****/
-
-/*
- * iwl_txq_gen2_unmap - Unmap any remaining DMA mappings and free skb's
- */
-void iwl_txq_gen2_unmap(struct iwl_trans *trans, int txq_id)
-{
- struct iwl_txq *txq = trans->txqs.txq[txq_id];
-
- spin_lock_bh(&txq->lock);
- while (txq->write_ptr != txq->read_ptr) {
- IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
- txq_id, txq->read_ptr);
-
- if (txq_id != trans->txqs.cmd.q_id) {
- int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr);
- struct sk_buff *skb = txq->entries[idx].skb;
-
- if (!WARN_ON_ONCE(!skb))
- iwl_txq_free_tso_page(trans, skb);
- }
- iwl_txq_gen2_free_tfd(trans, txq);
- txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr);
- }
-
- while (!skb_queue_empty(&txq->overflow_q)) {
- struct sk_buff *skb = __skb_dequeue(&txq->overflow_q);
-
- iwl_op_mode_free_skb(trans->op_mode, skb);
- }
-
- spin_unlock_bh(&txq->lock);
-
- /* just in case - this queue may have been stopped */
- iwl_wake_queue(trans, txq);
-}
-
-static void iwl_txq_gen2_free_memory(struct iwl_trans *trans,
- struct iwl_txq *txq)
-{
- struct device *dev = trans->dev;
-
- /* De-alloc circular buffer of TFDs */
- if (txq->tfds) {
- dma_free_coherent(dev,
- trans->txqs.tfd.size * txq->n_window,
- txq->tfds, txq->dma_addr);
- dma_free_coherent(dev,
- sizeof(*txq->first_tb_bufs) * txq->n_window,
- txq->first_tb_bufs, txq->first_tb_dma);
- }
-
- kfree(txq->entries);
- if (txq->bc_tbl.addr)
- dma_pool_free(trans->txqs.bc_pool,
- txq->bc_tbl.addr, txq->bc_tbl.dma);
- kfree(txq);
-}
-
-/*
- * iwl_pcie_txq_free - Deallocate DMA queue.
- * @txq: Transmit queue to deallocate.
- *
- * Empty queue by removing and destroying all BD's.
- * Free all buffers.
- * 0-fill, but do not free "txq" descriptor structure.
- */
-static void iwl_txq_gen2_free(struct iwl_trans *trans, int txq_id)
-{
- struct iwl_txq *txq;
- int i;
-
- if (WARN_ONCE(txq_id >= IWL_MAX_TVQM_QUEUES,
- "queue %d out of range", txq_id))
- return;
-
- txq = trans->txqs.txq[txq_id];
-
- if (WARN_ON(!txq))
- return;
-
- iwl_txq_gen2_unmap(trans, txq_id);
-
- /* De-alloc array of command/tx buffers */
- if (txq_id == trans->txqs.cmd.q_id)
- for (i = 0; i < txq->n_window; i++) {
- kfree_sensitive(txq->entries[i].cmd);
- kfree_sensitive(txq->entries[i].free_buf);
- }
- del_timer_sync(&txq->stuck_timer);
-
- iwl_txq_gen2_free_memory(trans, txq);
-
- trans->txqs.txq[txq_id] = NULL;
-
- clear_bit(txq_id, trans->txqs.queue_used);
-}
-
-/*
- * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
- */
-static int iwl_queue_init(struct iwl_txq *q, int slots_num)
-{
- q->n_window = slots_num;
-
- /* slots_num must be power-of-two size, otherwise
- * iwl_txq_get_cmd_index is broken. */
- if (WARN_ON(!is_power_of_2(slots_num)))
- return -EINVAL;
-
- q->low_mark = q->n_window / 4;
- if (q->low_mark < 4)
- q->low_mark = 4;
-
- q->high_mark = q->n_window / 8;
- if (q->high_mark < 2)
- q->high_mark = 2;
-
- q->write_ptr = 0;
- q->read_ptr = 0;
-
- return 0;
-}
-
-int iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num,
- bool cmd_queue)
-{
- int ret;
- u32 tfd_queue_max_size =
- trans->trans_cfg->base_params->max_tfd_queue_size;
-
- txq->need_update = false;
-
- /* max_tfd_queue_size must be power-of-two size, otherwise
- * iwl_txq_inc_wrap and iwl_txq_dec_wrap are broken. */
- if (WARN_ONCE(tfd_queue_max_size & (tfd_queue_max_size - 1),
- "Max tfd queue size must be a power of two, but is %d",
- tfd_queue_max_size))
- return -EINVAL;
-
- /* Initialize queue's high/low-water marks, and head/tail indexes */
- ret = iwl_queue_init(txq, slots_num);
- if (ret)
- return ret;
-
- spin_lock_init(&txq->lock);
-
- if (cmd_queue) {
- static struct lock_class_key iwl_txq_cmd_queue_lock_class;
-
- lockdep_set_class(&txq->lock, &iwl_txq_cmd_queue_lock_class);
- }
-
- __skb_queue_head_init(&txq->overflow_q);
-
- return 0;
-}
-
-void iwl_txq_free_tso_page(struct iwl_trans *trans, struct sk_buff *skb)
-{
- struct page **page_ptr;
- struct page *next;
-
- page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs);
- next = *page_ptr;
- *page_ptr = NULL;
-
- while (next) {
- struct page *tmp = next;
-
- next = *(void **)((u8 *)page_address(next) + PAGE_SIZE -
- sizeof(void *));
- __free_page(tmp);
- }
-}
-
-void iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq)
-{
- u32 txq_id = txq->id;
- u32 status;
- bool active;
- u8 fifo;
-
- if (trans->trans_cfg->gen2) {
- IWL_ERR(trans, "Queue %d is stuck %d %d\n", txq_id,
- txq->read_ptr, txq->write_ptr);
- /* TODO: access new SCD registers and dump them */
- return;
- }
-
- status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id));
- fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
- active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
-
- IWL_ERR(trans,
- "Queue %d is %sactive on fifo %d and stuck for %u ms. SW [%d, %d] HW [%d, %d] FH TRB=0x0%x\n",
- txq_id, active ? "" : "in", fifo,
- jiffies_to_msecs(txq->wd_timeout),
- txq->read_ptr, txq->write_ptr,
- iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) &
- (trans->trans_cfg->base_params->max_tfd_queue_size - 1),
- iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id)) &
- (trans->trans_cfg->base_params->max_tfd_queue_size - 1),
- iwl_read_direct32(trans, FH_TX_TRB_REG(fifo)));
-}
-
-static void iwl_txq_stuck_timer(struct timer_list *t)
-{
- struct iwl_txq *txq = from_timer(txq, t, stuck_timer);
- struct iwl_trans *trans = txq->trans;
-
- spin_lock(&txq->lock);
- /* check if triggered erroneously */
- if (txq->read_ptr == txq->write_ptr) {
- spin_unlock(&txq->lock);
- return;
- }
- spin_unlock(&txq->lock);
-
- iwl_txq_log_scd_error(trans, txq);
-
- iwl_force_nmi(trans);
-}
-
-static void iwl_txq_set_tfd_invalid_gen1(struct iwl_trans *trans,
- struct iwl_tfd *tfd)
-{
- tfd->num_tbs = 0;
-
- iwl_pcie_gen1_tfd_set_tb(trans, tfd, 0, trans->invalid_tx_cmd.dma,
- trans->invalid_tx_cmd.size);
-}
-
-int iwl_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num,
- bool cmd_queue)
-{
- size_t num_entries = trans->trans_cfg->gen2 ?
- slots_num : trans->trans_cfg->base_params->max_tfd_queue_size;
- size_t tfd_sz;
- size_t tb0_buf_sz;
- int i;
-
- if (WARN_ONCE(slots_num <= 0, "Invalid slots num:%d\n", slots_num))
- return -EINVAL;
-
- if (WARN_ON(txq->entries || txq->tfds))
- return -EINVAL;
-
- tfd_sz = trans->txqs.tfd.size * num_entries;
-
- timer_setup(&txq->stuck_timer, iwl_txq_stuck_timer, 0);
- txq->trans = trans;
-
- txq->n_window = slots_num;
-
- txq->entries = kcalloc(slots_num,
- sizeof(struct iwl_pcie_txq_entry),
- GFP_KERNEL);
-
- if (!txq->entries)
- goto error;
-
- if (cmd_queue)
- for (i = 0; i < slots_num; i++) {
- txq->entries[i].cmd =
- kmalloc(sizeof(struct iwl_device_cmd),
- GFP_KERNEL);
- if (!txq->entries[i].cmd)
- goto error;
- }
-
- /* Circular buffer of transmit frame descriptors (TFDs),
- * shared with device */
- txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz,
- &txq->dma_addr, GFP_KERNEL);
- if (!txq->tfds)
- goto error;
-
- BUILD_BUG_ON(sizeof(*txq->first_tb_bufs) != IWL_FIRST_TB_SIZE_ALIGN);
-
- tb0_buf_sz = sizeof(*txq->first_tb_bufs) * slots_num;
-
- txq->first_tb_bufs = dma_alloc_coherent(trans->dev, tb0_buf_sz,
- &txq->first_tb_dma,
- GFP_KERNEL);
- if (!txq->first_tb_bufs)
- goto err_free_tfds;
-
- for (i = 0; i < num_entries; i++) {
- void *tfd = iwl_txq_get_tfd(trans, txq, i);
-
- if (trans->trans_cfg->gen2)
- iwl_txq_set_tfd_invalid_gen2(trans, tfd);
- else
- iwl_txq_set_tfd_invalid_gen1(trans, tfd);
- }
-
- return 0;
-err_free_tfds:
- dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->dma_addr);
- txq->tfds = NULL;
-error:
- if (txq->entries && cmd_queue)
- for (i = 0; i < slots_num; i++)
- kfree(txq->entries[i].cmd);
- kfree(txq->entries);
- txq->entries = NULL;
-
- return -ENOMEM;
-}
-
-static struct iwl_txq *
-iwl_txq_dyn_alloc_dma(struct iwl_trans *trans, int size, unsigned int timeout)
-{
- size_t bc_tbl_size, bc_tbl_entries;
- struct iwl_txq *txq;
- int ret;
-
- WARN_ON(!trans->txqs.bc_tbl_size);
-
- bc_tbl_size = trans->txqs.bc_tbl_size;
- bc_tbl_entries = bc_tbl_size / sizeof(u16);
-
- if (WARN_ON(size > bc_tbl_entries))
- return ERR_PTR(-EINVAL);
-
- txq = kzalloc(sizeof(*txq), GFP_KERNEL);
- if (!txq)
- return ERR_PTR(-ENOMEM);
-
- txq->bc_tbl.addr = dma_pool_alloc(trans->txqs.bc_pool, GFP_KERNEL,
- &txq->bc_tbl.dma);
- if (!txq->bc_tbl.addr) {
- IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
- kfree(txq);
- return ERR_PTR(-ENOMEM);
- }
-
- ret = iwl_txq_alloc(trans, txq, size, false);
- if (ret) {
- IWL_ERR(trans, "Tx queue alloc failed\n");
- goto error;
- }
- ret = iwl_txq_init(trans, txq, size, false);
- if (ret) {
- IWL_ERR(trans, "Tx queue init failed\n");
- goto error;
- }
-
- txq->wd_timeout = msecs_to_jiffies(timeout);
-
- return txq;
-
-error:
- iwl_txq_gen2_free_memory(trans, txq);
- return ERR_PTR(ret);
-}
-
-static int iwl_txq_alloc_response(struct iwl_trans *trans, struct iwl_txq *txq,
- struct iwl_host_cmd *hcmd)
-{
- struct iwl_tx_queue_cfg_rsp *rsp;
- int ret, qid;
- u32 wr_ptr;
-
- if (WARN_ON(iwl_rx_packet_payload_len(hcmd->resp_pkt) !=
- sizeof(*rsp))) {
- ret = -EINVAL;
- goto error_free_resp;
- }
-
- rsp = (void *)hcmd->resp_pkt->data;
- qid = le16_to_cpu(rsp->queue_number);
- wr_ptr = le16_to_cpu(rsp->write_pointer);
-
- if (qid >= ARRAY_SIZE(trans->txqs.txq)) {
- WARN_ONCE(1, "queue index %d unsupported", qid);
- ret = -EIO;
- goto error_free_resp;
- }
-
- if (test_and_set_bit(qid, trans->txqs.queue_used)) {
- WARN_ONCE(1, "queue %d already used", qid);
- ret = -EIO;
- goto error_free_resp;
- }
-
- if (WARN_ONCE(trans->txqs.txq[qid],
- "queue %d already allocated\n", qid)) {
- ret = -EIO;
- goto error_free_resp;
- }
-
- txq->id = qid;
- trans->txqs.txq[qid] = txq;
- wr_ptr &= (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
-
- /* Place first TFD at index corresponding to start sequence number */
- txq->read_ptr = wr_ptr;
- txq->write_ptr = wr_ptr;
-
- IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d\n", qid);
-
- iwl_free_resp(hcmd);
- return qid;
-
-error_free_resp:
- iwl_free_resp(hcmd);
- iwl_txq_gen2_free_memory(trans, txq);
- return ret;
-}
-
-int iwl_txq_dyn_alloc(struct iwl_trans *trans, u32 flags, u32 sta_mask,
- u8 tid, int size, unsigned int timeout)
-{
- struct iwl_txq *txq;
- union {
- struct iwl_tx_queue_cfg_cmd old;
- struct iwl_scd_queue_cfg_cmd new;
- } cmd;
- struct iwl_host_cmd hcmd = {
- .flags = CMD_WANT_SKB,
- };
- int ret;
-
- if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_BZ &&
- trans->hw_rev_step == SILICON_A_STEP)
- size = 4096;
-
- txq = iwl_txq_dyn_alloc_dma(trans, size, timeout);
- if (IS_ERR(txq))
- return PTR_ERR(txq);
-
- if (trans->txqs.queue_alloc_cmd_ver == 0) {
- memset(&cmd.old, 0, sizeof(cmd.old));
- cmd.old.tfdq_addr = cpu_to_le64(txq->dma_addr);
- cmd.old.byte_cnt_addr = cpu_to_le64(txq->bc_tbl.dma);
- cmd.old.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size));
- cmd.old.flags = cpu_to_le16(flags | TX_QUEUE_CFG_ENABLE_QUEUE);
- cmd.old.tid = tid;
-
- if (hweight32(sta_mask) != 1) {
- ret = -EINVAL;
- goto error;
- }
- cmd.old.sta_id = ffs(sta_mask) - 1;
-
- hcmd.id = SCD_QUEUE_CFG;
- hcmd.len[0] = sizeof(cmd.old);
- hcmd.data[0] = &cmd.old;
- } else if (trans->txqs.queue_alloc_cmd_ver == 3) {
- memset(&cmd.new, 0, sizeof(cmd.new));
- cmd.new.operation = cpu_to_le32(IWL_SCD_QUEUE_ADD);
- cmd.new.u.add.tfdq_dram_addr = cpu_to_le64(txq->dma_addr);
- cmd.new.u.add.bc_dram_addr = cpu_to_le64(txq->bc_tbl.dma);
- cmd.new.u.add.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size));
- cmd.new.u.add.flags = cpu_to_le32(flags);
- cmd.new.u.add.sta_mask = cpu_to_le32(sta_mask);
- cmd.new.u.add.tid = tid;
-
- hcmd.id = WIDE_ID(DATA_PATH_GROUP, SCD_QUEUE_CONFIG_CMD);
- hcmd.len[0] = sizeof(cmd.new);
- hcmd.data[0] = &cmd.new;
- } else {
- ret = -EOPNOTSUPP;
- goto error;
- }
-
- ret = iwl_trans_send_cmd(trans, &hcmd);
- if (ret)
- goto error;
-
- return iwl_txq_alloc_response(trans, txq, &hcmd);
-
-error:
- iwl_txq_gen2_free_memory(trans, txq);
- return ret;
-}
-
-void iwl_txq_dyn_free(struct iwl_trans *trans, int queue)
-{
- if (WARN(queue >= IWL_MAX_TVQM_QUEUES,
- "queue %d out of range", queue))
- return;
-
- /*
- * Upon HW Rfkill - we stop the device, and then stop the queues
- * in the op_mode. Just for the sake of the simplicity of the op_mode,
- * allow the op_mode to call txq_disable after it already called
- * stop_device.
- */
- if (!test_and_clear_bit(queue, trans->txqs.queue_used)) {
- WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status),
- "queue %d not used", queue);
- return;
- }
-
- iwl_txq_gen2_free(trans, queue);
-
- IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", queue);
-}
-
-void iwl_txq_gen2_tx_free(struct iwl_trans *trans)
-{
- int i;
-
- memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
-
- /* Free all TX queues */
- for (i = 0; i < ARRAY_SIZE(trans->txqs.txq); i++) {
- if (!trans->txqs.txq[i])
- continue;
-
- iwl_txq_gen2_free(trans, i);
- }
-}
-
-int iwl_txq_gen2_init(struct iwl_trans *trans, int txq_id, int queue_size)
-{
- struct iwl_txq *queue;
- int ret;
-
- /* alloc and init the tx queue */
- if (!trans->txqs.txq[txq_id]) {
- queue = kzalloc(sizeof(*queue), GFP_KERNEL);
- if (!queue) {
- IWL_ERR(trans, "Not enough memory for tx queue\n");
- return -ENOMEM;
- }
- trans->txqs.txq[txq_id] = queue;
- ret = iwl_txq_alloc(trans, queue, queue_size, true);
- if (ret) {
- IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
- goto error;
- }
- } else {
- queue = trans->txqs.txq[txq_id];
- }
-
- ret = iwl_txq_init(trans, queue, queue_size,
- (txq_id == trans->txqs.cmd.q_id));
- if (ret) {
- IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
- goto error;
- }
- trans->txqs.txq[txq_id]->id = txq_id;
- set_bit(txq_id, trans->txqs.queue_used);
-
- return 0;
-
-error:
- iwl_txq_gen2_tx_free(trans);
- return ret;
-}
-
-static inline dma_addr_t iwl_txq_gen1_tfd_tb_get_addr(struct iwl_trans *trans,
- struct iwl_tfd *tfd, u8 idx)
-{
- struct iwl_tfd_tb *tb = &tfd->tbs[idx];
- dma_addr_t addr;
- dma_addr_t hi_len;
-
- addr = get_unaligned_le32(&tb->lo);
-
- if (sizeof(dma_addr_t) <= sizeof(u32))
- return addr;
-
- hi_len = le16_to_cpu(tb->hi_n_len) & 0xF;
-
- /*
- * shift by 16 twice to avoid warnings on 32-bit
- * (where this code never runs anyway due to the
- * if statement above)
- */
- return addr | ((hi_len << 16) << 16);
-}
-
-void iwl_txq_gen1_tfd_unmap(struct iwl_trans *trans,
- struct iwl_cmd_meta *meta,
- struct iwl_txq *txq, int index)
-{
- int i, num_tbs;
- struct iwl_tfd *tfd = iwl_txq_get_tfd(trans, txq, index);
-
- /* Sanity check on number of chunks */
- num_tbs = iwl_txq_gen1_tfd_get_num_tbs(trans, tfd);
-
- if (num_tbs > trans->txqs.tfd.max_tbs) {
- IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
- /* @todo issue fatal error, it is quite serious situation */
- return;
- }
-
- /* first TB is never freed - it's the bidirectional DMA data */
-
- for (i = 1; i < num_tbs; i++) {
- if (meta->tbs & BIT(i))
- dma_unmap_page(trans->dev,
- iwl_txq_gen1_tfd_tb_get_addr(trans,
- tfd, i),
- iwl_txq_gen1_tfd_tb_get_len(trans,
- tfd, i),
- DMA_TO_DEVICE);
- else
- dma_unmap_single(trans->dev,
- iwl_txq_gen1_tfd_tb_get_addr(trans,
- tfd, i),
- iwl_txq_gen1_tfd_tb_get_len(trans,
- tfd, i),
- DMA_TO_DEVICE);
- }
-
- meta->tbs = 0;
-
- iwl_txq_set_tfd_invalid_gen1(trans, tfd);
-}
-
-#define IWL_TX_CRC_SIZE 4
-#define IWL_TX_DELIMITER_SIZE 4
-
-/*
- * iwl_txq_gen1_update_byte_cnt_tbl - Set up entry in Tx byte-count array
- */
-void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans,
- struct iwl_txq *txq, u16 byte_cnt,
- int num_tbs)
-{
- struct iwlagn_scd_bc_tbl *scd_bc_tbl;
- int write_ptr = txq->write_ptr;
- int txq_id = txq->id;
- u8 sec_ctl = 0;
- u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
- __le16 bc_ent;
- struct iwl_device_tx_cmd *dev_cmd = txq->entries[txq->write_ptr].cmd;
- struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
- u8 sta_id = tx_cmd->sta_id;
-
- scd_bc_tbl = trans->txqs.scd_bc_tbls.addr;
-
- sec_ctl = tx_cmd->sec_ctl;
-
- switch (sec_ctl & TX_CMD_SEC_MSK) {
- case TX_CMD_SEC_CCM:
- len += IEEE80211_CCMP_MIC_LEN;
- break;
- case TX_CMD_SEC_TKIP:
- len += IEEE80211_TKIP_ICV_LEN;
- break;
- case TX_CMD_SEC_WEP:
- len += IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN;
- break;
- }
- if (trans->txqs.bc_table_dword)
- len = DIV_ROUND_UP(len, 4);
-
- if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX))
- return;
-
- bc_ent = cpu_to_le16(len | (sta_id << 12));
-
- scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
-
- if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
- scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] =
- bc_ent;
-}
-
-void iwl_txq_gen1_inval_byte_cnt_tbl(struct iwl_trans *trans,
- struct iwl_txq *txq)
-{
- struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans->txqs.scd_bc_tbls.addr;
- int txq_id = txq->id;
- int read_ptr = txq->read_ptr;
- u8 sta_id = 0;
- __le16 bc_ent;
- struct iwl_device_tx_cmd *dev_cmd = txq->entries[read_ptr].cmd;
- struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
-
- WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
-
- if (txq_id != trans->txqs.cmd.q_id)
- sta_id = tx_cmd->sta_id;
-
- bc_ent = cpu_to_le16(1 | (sta_id << 12));
-
- scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
-
- if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
- scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] =
- bc_ent;
-}
-
-/*
- * iwl_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
- * @trans - transport private data
- * @txq - tx queue
- * @dma_dir - the direction of the DMA mapping
- *
- * Does NOT advance any TFD circular buffer read/write indexes
- * Does NOT free the TFD itself (which is within circular buffer)
- */
-void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
-{
- /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
- * idx is bounded by n_window
- */
- int rd_ptr = txq->read_ptr;
- int idx = iwl_txq_get_cmd_index(txq, rd_ptr);
- struct sk_buff *skb;
-
- lockdep_assert_held(&txq->lock);
-
- if (!txq->entries)
- return;
-
- /* We have only q->n_window txq->entries, but we use
- * TFD_QUEUE_SIZE_MAX tfds
- */
- if (trans->trans_cfg->gen2)
- iwl_txq_gen2_tfd_unmap(trans, &txq->entries[idx].meta,
- iwl_txq_get_tfd(trans, txq, rd_ptr));
- else
- iwl_txq_gen1_tfd_unmap(trans, &txq->entries[idx].meta,
- txq, rd_ptr);
-
- /* free SKB */
- skb = txq->entries[idx].skb;
-
- /* Can be called from irqs-disabled context
- * If skb is not NULL, it means that the whole queue is being
- * freed and that the queue is not empty - free the skb
- */
- if (skb) {
- iwl_op_mode_free_skb(trans->op_mode, skb);
- txq->entries[idx].skb = NULL;
- }
-}
-
-void iwl_txq_progress(struct iwl_txq *txq)
-{
- lockdep_assert_held(&txq->lock);
-
- if (!txq->wd_timeout)
- return;
-
- /*
- * station is asleep and we send data - that must
- * be uAPSD or PS-Poll. Don't rearm the timer.
- */
- if (txq->frozen)
- return;
-
- /*
- * if empty delete timer, otherwise move timer forward
- * since we're making progress on this queue
- */
- if (txq->read_ptr == txq->write_ptr)
- del_timer(&txq->stuck_timer);
- else
- mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
-}
-
-/* Frees buffers until index _not_ inclusive */
-void iwl_txq_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
- struct sk_buff_head *skbs, bool is_flush)
-{
- struct iwl_txq *txq = trans->txqs.txq[txq_id];
- int tfd_num, read_ptr, last_to_free;
-
- /* This function is not meant to release cmd queue*/
- if (WARN_ON(txq_id == trans->txqs.cmd.q_id))
- return;
-
- if (WARN_ON(!txq))
- return;
-
- tfd_num = iwl_txq_get_cmd_index(txq, ssn);
-
- spin_lock_bh(&txq->lock);
- read_ptr = iwl_txq_get_cmd_index(txq, txq->read_ptr);
-
- if (!test_bit(txq_id, trans->txqs.queue_used)) {
- IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n",
- txq_id, ssn);
- goto out;
- }
-
- if (read_ptr == tfd_num)
- goto out;
-
- IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d (%d) -> %d (%d)\n",
- txq_id, read_ptr, txq->read_ptr, tfd_num, ssn);
-
- /*Since we free until index _not_ inclusive, the one before index is
- * the last we will free. This one must be used */
- last_to_free = iwl_txq_dec_wrap(trans, tfd_num);
-
- if (!iwl_txq_used(txq, last_to_free)) {
- IWL_ERR(trans,
- "%s: Read index for txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
- __func__, txq_id, last_to_free,
- trans->trans_cfg->base_params->max_tfd_queue_size,
- txq->write_ptr, txq->read_ptr);
-
- iwl_op_mode_time_point(trans->op_mode,
- IWL_FW_INI_TIME_POINT_FAKE_TX,
- NULL);
- goto out;
- }
-
- if (WARN_ON(!skb_queue_empty(skbs)))
- goto out;
-
- for (;
- read_ptr != tfd_num;
- txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr),
- read_ptr = iwl_txq_get_cmd_index(txq, txq->read_ptr)) {
- struct sk_buff *skb = txq->entries[read_ptr].skb;
-
- if (WARN_ONCE(!skb, "no SKB at %d (%d) on queue %d\n",
- read_ptr, txq->read_ptr, txq_id))
- continue;
-
- iwl_txq_free_tso_page(trans, skb);
-
- __skb_queue_tail(skbs, skb);
-
- txq->entries[read_ptr].skb = NULL;
-
- if (!trans->trans_cfg->gen2)
- iwl_txq_gen1_inval_byte_cnt_tbl(trans, txq);
-
- iwl_txq_free_tfd(trans, txq);
- }
-
- iwl_txq_progress(txq);
-
- if (iwl_txq_space(trans, txq) > txq->low_mark &&
- test_bit(txq_id, trans->txqs.queue_stopped)) {
- struct sk_buff_head overflow_skbs;
- struct sk_buff *skb;
-
- __skb_queue_head_init(&overflow_skbs);
- skb_queue_splice_init(&txq->overflow_q,
- is_flush ? skbs : &overflow_skbs);
-
- /*
- * We are going to transmit from the overflow queue.
- * Remember this state so that wait_for_txq_empty will know we
- * are adding more packets to the TFD queue. It cannot rely on
- * the state of &txq->overflow_q, as we just emptied it, but
- * haven't TXed the content yet.
- */
- txq->overflow_tx = true;
-
- /*
- * This is tricky: we are in reclaim path which is non
- * re-entrant, so noone will try to take the access the
- * txq data from that path. We stopped tx, so we can't
- * have tx as well. Bottom line, we can unlock and re-lock
- * later.
- */
- spin_unlock_bh(&txq->lock);
-
- while ((skb = __skb_dequeue(&overflow_skbs))) {
- struct iwl_device_tx_cmd *dev_cmd_ptr;
-
- dev_cmd_ptr = *(void **)((u8 *)skb->cb +
- trans->txqs.dev_cmd_offs);
-
- /*
- * Note that we can very well be overflowing again.
- * In that case, iwl_txq_space will be small again
- * and we won't wake mac80211's queue.
- */
- iwl_trans_tx(trans, skb, dev_cmd_ptr, txq_id);
- }
-
- if (iwl_txq_space(trans, txq) > txq->low_mark)
- iwl_wake_queue(trans, txq);
-
- spin_lock_bh(&txq->lock);
- txq->overflow_tx = false;
- }
-
-out:
- spin_unlock_bh(&txq->lock);
-}
-
-/* Set wr_ptr of specific device and txq */
-void iwl_txq_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr)
-{
- struct iwl_txq *txq = trans->txqs.txq[txq_id];
-
- spin_lock_bh(&txq->lock);
-
- txq->write_ptr = ptr;
- txq->read_ptr = txq->write_ptr;
-
- spin_unlock_bh(&txq->lock);
-}
-
-void iwl_trans_txq_freeze_timer(struct iwl_trans *trans, unsigned long txqs,
- bool freeze)
-{
- int queue;
-
- for_each_set_bit(queue, &txqs, BITS_PER_LONG) {
- struct iwl_txq *txq = trans->txqs.txq[queue];
- unsigned long now;
-
- spin_lock_bh(&txq->lock);
-
- now = jiffies;
-
- if (txq->frozen == freeze)
- goto next_queue;
-
- IWL_DEBUG_TX_QUEUES(trans, "%s TXQ %d\n",
- freeze ? "Freezing" : "Waking", queue);
-
- txq->frozen = freeze;
-
- if (txq->read_ptr == txq->write_ptr)
- goto next_queue;
-
- if (freeze) {
- if (unlikely(time_after(now,
- txq->stuck_timer.expires))) {
- /*
- * The timer should have fired, maybe it is
- * spinning right now on the lock.
- */
- goto next_queue;
- }
- /* remember how long until the timer fires */
- txq->frozen_expiry_remainder =
- txq->stuck_timer.expires - now;
- del_timer(&txq->stuck_timer);
- goto next_queue;
- }
-
- /*
- * Wake a non-empty queue -> arm timer with the
- * remainder before it froze
- */
- mod_timer(&txq->stuck_timer,
- now + txq->frozen_expiry_remainder);
-
-next_queue:
- spin_unlock_bh(&txq->lock);
- }
-}
-
-#define HOST_COMPLETE_TIMEOUT (2 * HZ)
-
-static int iwl_trans_txq_send_hcmd_sync(struct iwl_trans *trans,
- struct iwl_host_cmd *cmd)
-{
- const char *cmd_str = iwl_get_cmd_string(trans, cmd->id);
- struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
- int cmd_idx;
- int ret;
-
- IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", cmd_str);
-
- if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE,
- &trans->status),
- "Command %s: a command is already active!\n", cmd_str))
- return -EIO;
-
- IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", cmd_str);
-
- cmd_idx = trans->ops->send_cmd(trans, cmd);
- if (cmd_idx < 0) {
- ret = cmd_idx;
- clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
- IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n",
- cmd_str, ret);
- return ret;
- }
-
- ret = wait_event_timeout(trans->wait_command_queue,
- !test_bit(STATUS_SYNC_HCMD_ACTIVE,
- &trans->status),
- HOST_COMPLETE_TIMEOUT);
- if (!ret) {
- IWL_ERR(trans, "Error sending %s: time out after %dms.\n",
- cmd_str, jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
-
- IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n",
- txq->read_ptr, txq->write_ptr);
-
- clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
- IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
- cmd_str);
- ret = -ETIMEDOUT;
-
- iwl_trans_sync_nmi(trans);
- goto cancel;
- }
-
- if (test_bit(STATUS_FW_ERROR, &trans->status)) {
- if (!test_and_clear_bit(STATUS_SUPPRESS_CMD_ERROR_ONCE,
- &trans->status)) {
- IWL_ERR(trans, "FW error in SYNC CMD %s\n", cmd_str);
- dump_stack();
- }
- ret = -EIO;
- goto cancel;
- }
-
- if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
- test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
- IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n");
- ret = -ERFKILL;
- goto cancel;
- }
-
- if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) {
- IWL_ERR(trans, "Error: Response NULL in '%s'\n", cmd_str);
- ret = -EIO;
- goto cancel;
- }
-
- return 0;
-
-cancel:
- if (cmd->flags & CMD_WANT_SKB) {
- /*
- * Cancel the CMD_WANT_SKB flag for the cmd in the
- * TX cmd queue. Otherwise in case the cmd comes
- * in later, it will possibly set an invalid
- * address (cmd->meta.source).
- */
- txq->entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB;
- }
-
- if (cmd->resp_pkt) {
- iwl_free_resp(cmd);
- cmd->resp_pkt = NULL;
- }
-
- return ret;
-}
-
-int iwl_trans_txq_send_hcmd(struct iwl_trans *trans,
- struct iwl_host_cmd *cmd)
-{
- /* Make sure the NIC is still alive in the bus */
- if (test_bit(STATUS_TRANS_DEAD, &trans->status))
- return -ENODEV;
-
- if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
- test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
- IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n",
- cmd->id);
- return -ERFKILL;
- }
-
- if (unlikely(trans->system_pm_mode == IWL_PLAT_PM_MODE_D3 &&
- !(cmd->flags & CMD_SEND_IN_D3))) {
- IWL_DEBUG_WOWLAN(trans, "Dropping CMD 0x%x: D3\n", cmd->id);
- return -EHOSTDOWN;
- }
-
- if (cmd->flags & CMD_ASYNC) {
- int ret;
-
- /* An asynchronous command can not expect an SKB to be set. */
- if (WARN_ON(cmd->flags & CMD_WANT_SKB))
- return -EINVAL;
-
- ret = trans->ops->send_cmd(trans, cmd);
- if (ret < 0) {
- IWL_ERR(trans,
- "Error sending %s: enqueue_hcmd failed: %d\n",
- iwl_get_cmd_string(trans, cmd->id), ret);
- return ret;
- }
- return 0;
- }
-
- return iwl_trans_txq_send_hcmd_sync(trans, cmd);
-}
-
diff --git a/drivers/net/wireless/intel/iwlwifi/queue/tx.h b/drivers/net/wireless/intel/iwlwifi/queue/tx.h
deleted file mode 100644
index 124b29aac4a1..000000000000
--- a/drivers/net/wireless/intel/iwlwifi/queue/tx.h
+++ /dev/null
@@ -1,191 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/*
- * Copyright (C) 2020-2023 Intel Corporation
- */
-#ifndef __iwl_trans_queue_tx_h__
-#define __iwl_trans_queue_tx_h__
-#include "iwl-fh.h"
-#include "fw/api/tx.h"
-
-struct iwl_tso_hdr_page {
- struct page *page;
- u8 *pos;
-};
-
-static inline dma_addr_t
-iwl_txq_get_first_tb_dma(struct iwl_txq *txq, int idx)
-{
- return txq->first_tb_dma +
- sizeof(struct iwl_pcie_first_tb_buf) * idx;
-}
-
-static inline u16 iwl_txq_get_cmd_index(const struct iwl_txq *q, u32 index)
-{
- return index & (q->n_window - 1);
-}
-
-void iwl_txq_gen2_unmap(struct iwl_trans *trans, int txq_id);
-
-static inline void iwl_wake_queue(struct iwl_trans *trans,
- struct iwl_txq *txq)
-{
- if (test_and_clear_bit(txq->id, trans->txqs.queue_stopped)) {
- IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->id);
- iwl_op_mode_queue_not_full(trans->op_mode, txq->id);
- }
-}
-
-static inline void *iwl_txq_get_tfd(struct iwl_trans *trans,
- struct iwl_txq *txq, int idx)
-{
- if (trans->trans_cfg->gen2)
- idx = iwl_txq_get_cmd_index(txq, idx);
-
- return (u8 *)txq->tfds + trans->txqs.tfd.size * idx;
-}
-
-int iwl_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num,
- bool cmd_queue);
-/*
- * We need this inline in case dma_addr_t is only 32-bits - since the
- * hardware is always 64-bit, the issue can still occur in that case,
- * so use u64 for 'phys' here to force the addition in 64-bit.
- */
-static inline bool iwl_txq_crosses_4g_boundary(u64 phys, u16 len)
-{
- return upper_32_bits(phys) != upper_32_bits(phys + len);
-}
-
-int iwl_txq_space(struct iwl_trans *trans, const struct iwl_txq *q);
-
-static inline void iwl_txq_stop(struct iwl_trans *trans, struct iwl_txq *txq)
-{
- if (!test_and_set_bit(txq->id, trans->txqs.queue_stopped)) {
- iwl_op_mode_queue_full(trans->op_mode, txq->id);
- IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->id);
- } else {
- IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n",
- txq->id);
- }
-}
-
-/**
- * iwl_txq_inc_wrap - increment queue index, wrap back to beginning
- * @trans: the transport (for configuration data)
- * @index: current index
- */
-static inline int iwl_txq_inc_wrap(struct iwl_trans *trans, int index)
-{
- return ++index &
- (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
-}
-
-/**
- * iwl_txq_dec_wrap - decrement queue index, wrap back to end
- * @trans: the transport (for configuration data)
- * @index: current index
- */
-static inline int iwl_txq_dec_wrap(struct iwl_trans *trans, int index)
-{
- return --index &
- (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
-}
-
-static inline bool iwl_txq_used(const struct iwl_txq *q, int i)
-{
- int index = iwl_txq_get_cmd_index(q, i);
- int r = iwl_txq_get_cmd_index(q, q->read_ptr);
- int w = iwl_txq_get_cmd_index(q, q->write_ptr);
-
- return w >= r ?
- (index >= r && index < w) :
- !(index < r && index >= w);
-}
-
-void iwl_txq_free_tso_page(struct iwl_trans *trans, struct sk_buff *skb);
-
-void iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq);
-
-int iwl_txq_gen2_set_tb(struct iwl_trans *trans,
- struct iwl_tfh_tfd *tfd, dma_addr_t addr,
- u16 len);
-
-void iwl_txq_gen2_tfd_unmap(struct iwl_trans *trans,
- struct iwl_cmd_meta *meta,
- struct iwl_tfh_tfd *tfd);
-
-int iwl_txq_dyn_alloc(struct iwl_trans *trans, u32 flags,
- u32 sta_mask, u8 tid,
- int size, unsigned int timeout);
-
-int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
- struct iwl_device_tx_cmd *dev_cmd, int txq_id);
-
-void iwl_txq_dyn_free(struct iwl_trans *trans, int queue);
-void iwl_txq_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq);
-void iwl_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq);
-void iwl_txq_gen2_tx_free(struct iwl_trans *trans);
-int iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num,
- bool cmd_queue);
-int iwl_txq_gen2_init(struct iwl_trans *trans, int txq_id, int queue_size);
-#ifdef CONFIG_INET
-struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len,
- struct sk_buff *skb);
-#endif
-static inline u8 iwl_txq_gen1_tfd_get_num_tbs(struct iwl_trans *trans,
- struct iwl_tfd *tfd)
-{
- return tfd->num_tbs & 0x1f;
-}
-
-static inline u16 iwl_txq_gen1_tfd_tb_get_len(struct iwl_trans *trans,
- void *_tfd, u8 idx)
-{
- struct iwl_tfd *tfd;
- struct iwl_tfd_tb *tb;
-
- if (trans->trans_cfg->gen2) {
- struct iwl_tfh_tfd *tfh_tfd = _tfd;
- struct iwl_tfh_tb *tfh_tb = &tfh_tfd->tbs[idx];
-
- return le16_to_cpu(tfh_tb->tb_len);
- }
-
- tfd = (struct iwl_tfd *)_tfd;
- tb = &tfd->tbs[idx];
-
- return le16_to_cpu(tb->hi_n_len) >> 4;
-}
-
-static inline void iwl_pcie_gen1_tfd_set_tb(struct iwl_trans *trans,
- struct iwl_tfd *tfd,
- u8 idx, dma_addr_t addr, u16 len)
-{
- struct iwl_tfd_tb *tb = &tfd->tbs[idx];
- u16 hi_n_len = len << 4;
-
- put_unaligned_le32(addr, &tb->lo);
- hi_n_len |= iwl_get_dma_hi_addr(addr);
-
- tb->hi_n_len = cpu_to_le16(hi_n_len);
-
- tfd->num_tbs = idx + 1;
-}
-
-void iwl_txq_gen1_tfd_unmap(struct iwl_trans *trans,
- struct iwl_cmd_meta *meta,
- struct iwl_txq *txq, int index);
-void iwl_txq_gen1_inval_byte_cnt_tbl(struct iwl_trans *trans,
- struct iwl_txq *txq);
-void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans,
- struct iwl_txq *txq, u16 byte_cnt,
- int num_tbs);
-void iwl_txq_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
- struct sk_buff_head *skbs, bool is_flush);
-void iwl_txq_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr);
-void iwl_trans_txq_freeze_timer(struct iwl_trans *trans, unsigned long txqs,
- bool freeze);
-void iwl_txq_progress(struct iwl_txq *txq);
-void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq);
-int iwl_trans_txq_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
-#endif /* __iwl_trans_queue_tx_h__ */
diff --git a/drivers/net/wireless/intersil/p54/fwio.c b/drivers/net/wireless/intersil/p54/fwio.c
index c4fe70e05b9b..772084a9bd8d 100644
--- a/drivers/net/wireless/intersil/p54/fwio.c
+++ b/drivers/net/wireless/intersil/p54/fwio.c
@@ -216,7 +216,7 @@ int p54_download_eeprom(struct p54_common *priv, void *buf,
struct sk_buff *skb;
size_t eeprom_hdr_size;
int ret = 0;
- long timeout;
+ long time_left;
if (priv->fw_var >= 0x509)
eeprom_hdr_size = sizeof(*eeprom_hdr);
@@ -245,9 +245,9 @@ int p54_download_eeprom(struct p54_common *priv, void *buf,
p54_tx(priv, skb);
- timeout = wait_for_completion_interruptible_timeout(
+ time_left = wait_for_completion_interruptible_timeout(
&priv->eeprom_comp, HZ);
- if (timeout <= 0) {
+ if (time_left <= 0) {
wiphy_err(priv->hw->wiphy,
"device does not respond or signal received!\n");
ret = -EBUSY;
diff --git a/drivers/net/wireless/intersil/p54/main.c b/drivers/net/wireless/intersil/p54/main.c
index 687841b2fa2a..42111bb53f58 100644
--- a/drivers/net/wireless/intersil/p54/main.c
+++ b/drivers/net/wireless/intersil/p54/main.c
@@ -197,7 +197,7 @@ out:
return err;
}
-static void p54_stop(struct ieee80211_hw *dev)
+static void p54_stop(struct ieee80211_hw *dev, bool suspend)
{
struct p54_common *priv = dev->priv;
int i;
diff --git a/drivers/net/wireless/intersil/p54/p54pci.c b/drivers/net/wireless/intersil/p54/p54pci.c
index e97ee547b9f3..6588f5477762 100644
--- a/drivers/net/wireless/intersil/p54/p54pci.c
+++ b/drivers/net/wireless/intersil/p54/p54pci.c
@@ -434,7 +434,7 @@ static int p54p_open(struct ieee80211_hw *dev)
{
struct p54p_priv *priv = dev->priv;
int err;
- long timeout;
+ long time_left;
init_completion(&priv->boot_comp);
err = request_irq(priv->pdev->irq, p54p_interrupt,
@@ -472,12 +472,12 @@ static int p54p_open(struct ieee80211_hw *dev)
P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_RESET));
P54P_READ(dev_int);
- timeout = wait_for_completion_interruptible_timeout(
+ time_left = wait_for_completion_interruptible_timeout(
&priv->boot_comp, HZ);
- if (timeout <= 0) {
+ if (time_left <= 0) {
wiphy_err(dev->wiphy, "Cannot boot firmware!\n");
p54p_stop(dev);
- return timeout ? -ERESTARTSYS : -ETIMEDOUT;
+ return time_left ? -ERESTARTSYS : -ETIMEDOUT;
}
P54P_WRITE(int_enable, cpu_to_le32(ISL38XX_INT_IDENT_UPDATE));
diff --git a/drivers/net/wireless/intersil/p54/p54spi.c b/drivers/net/wireless/intersil/p54/p54spi.c
index 0073b5e0f9c9..d33a994906a7 100644
--- a/drivers/net/wireless/intersil/p54/p54spi.c
+++ b/drivers/net/wireless/intersil/p54/p54spi.c
@@ -518,7 +518,7 @@ out:
static int p54spi_op_start(struct ieee80211_hw *dev)
{
struct p54s_priv *priv = dev->priv;
- unsigned long timeout;
+ long time_left;
int ret = 0;
if (mutex_lock_interruptible(&priv->mutex)) {
@@ -538,10 +538,10 @@ static int p54spi_op_start(struct ieee80211_hw *dev)
mutex_unlock(&priv->mutex);
- timeout = msecs_to_jiffies(2000);
- timeout = wait_for_completion_interruptible_timeout(&priv->fw_comp,
- timeout);
- if (!timeout) {
+ time_left = msecs_to_jiffies(2000);
+ time_left = wait_for_completion_interruptible_timeout(&priv->fw_comp,
+ time_left);
+ if (!time_left) {
dev_err(&priv->spi->dev, "firmware boot failed");
p54spi_power_off(priv);
ret = -1;
diff --git a/drivers/net/wireless/marvell/libertas_tf/main.c b/drivers/net/wireless/marvell/libertas_tf/main.c
index 9cca69fe04d7..b47a832b9ae2 100644
--- a/drivers/net/wireless/marvell/libertas_tf/main.c
+++ b/drivers/net/wireless/marvell/libertas_tf/main.c
@@ -267,7 +267,7 @@ static int lbtf_op_start(struct ieee80211_hw *hw)
return 0;
}
-static void lbtf_op_stop(struct ieee80211_hw *hw)
+static void lbtf_op_stop(struct ieee80211_hw *hw, bool suspend)
{
struct lbtf_private *priv = hw->priv;
unsigned long flags;
diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
index b909a7665e9c..155eb0fab12a 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
@@ -926,6 +926,8 @@ mwifiex_init_new_priv_params(struct mwifiex_private *priv,
return -EOPNOTSUPP;
}
+ priv->bss_num = mwifiex_get_unused_bss_num(adapter, priv->bss_type);
+
spin_lock_irqsave(&adapter->main_proc_lock, flags);
adapter->main_locked = false;
spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h
index 175882485a19..c5164ae41b54 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.h
+++ b/drivers/net/wireless/marvell/mwifiex/main.h
@@ -1287,6 +1287,9 @@ mwifiex_get_priv_by_id(struct mwifiex_adapter *adapter,
for (i = 0; i < adapter->priv_num; i++) {
if (adapter->priv[i]) {
+ if (adapter->priv[i]->bss_mode == NL80211_IFTYPE_UNSPECIFIED)
+ continue;
+
if ((adapter->priv[i]->bss_num == bss_num) &&
(adapter->priv[i]->bss_type == bss_type))
break;
diff --git a/drivers/net/wireless/marvell/mwl8k.c b/drivers/net/wireless/marvell/mwl8k.c
index d3d07bb26335..b130e057370f 100644
--- a/drivers/net/wireless/marvell/mwl8k.c
+++ b/drivers/net/wireless/marvell/mwl8k.c
@@ -2211,7 +2211,7 @@ static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt_hdr *cmd
dma_addr_t dma_addr;
unsigned int dma_size;
int rc;
- unsigned long timeout = 0;
+ unsigned long time_left = 0;
u8 buf[32];
u32 bitmap = 0;
@@ -2258,8 +2258,8 @@ static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt_hdr *cmd
iowrite32(MWL8K_H2A_INT_DUMMY,
regs + MWL8K_HIU_H2A_INTERRUPT_EVENTS);
- timeout = wait_for_completion_timeout(&cmd_wait,
- msecs_to_jiffies(MWL8K_CMD_TIMEOUT_MS));
+ time_left = wait_for_completion_timeout(&cmd_wait,
+ msecs_to_jiffies(MWL8K_CMD_TIMEOUT_MS));
priv->hostcmd_wait = NULL;
@@ -2267,7 +2267,7 @@ static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt_hdr *cmd
dma_unmap_single(&priv->pdev->dev, dma_addr, dma_size,
DMA_BIDIRECTIONAL);
- if (!timeout) {
+ if (!time_left) {
wiphy_err(hw->wiphy, "Command %s timeout after %u ms\n",
mwl8k_cmd_name(cmd->code, buf, sizeof(buf)),
MWL8K_CMD_TIMEOUT_MS);
@@ -2275,7 +2275,7 @@ static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt_hdr *cmd
} else {
int ms;
- ms = MWL8K_CMD_TIMEOUT_MS - jiffies_to_msecs(timeout);
+ ms = MWL8K_CMD_TIMEOUT_MS - jiffies_to_msecs(time_left);
rc = cmd->result ? -EINVAL : 0;
if (rc)
@@ -4768,7 +4768,7 @@ static int mwl8k_start(struct ieee80211_hw *hw)
return rc;
}
-static void mwl8k_stop(struct ieee80211_hw *hw)
+static void mwl8k_stop(struct ieee80211_hw *hw, bool suspend)
{
struct mwl8k_priv *priv = hw->priv;
int i;
@@ -6023,7 +6023,7 @@ static int mwl8k_reload_firmware(struct ieee80211_hw *hw, char *fw_image)
struct mwl8k_priv *priv = hw->priv;
struct mwl8k_vif *vif, *tmp_vif;
- mwl8k_stop(hw);
+ mwl8k_stop(hw, false);
mwl8k_rxq_deinit(hw, 0);
/*
diff --git a/drivers/net/wireless/mediatek/mt76/debugfs.c b/drivers/net/wireless/mediatek/mt76/debugfs.c
index ae83be572b94..b6a2746c187d 100644
--- a/drivers/net/wireless/mediatek/mt76/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/debugfs.c
@@ -33,8 +33,8 @@ mt76_napi_threaded_set(void *data, u64 val)
if (!mt76_is_mmio(dev))
return -EOPNOTSUPP;
- if (dev->napi_dev.threaded != val)
- return dev_set_threaded(&dev->napi_dev, val);
+ if (dev->napi_dev->threaded != val)
+ return dev_set_threaded(dev->napi_dev, val);
return 0;
}
@@ -44,7 +44,7 @@ mt76_napi_threaded_get(void *data, u64 *val)
{
struct mt76_dev *dev = data;
- *val = dev->napi_dev.threaded;
+ *val = dev->napi_dev->threaded;
return 0;
}
diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
index f4f88c444e21..5f46d6daeaa7 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/dma.c
@@ -916,7 +916,7 @@ int mt76_dma_rx_poll(struct napi_struct *napi, int budget)
struct mt76_dev *dev;
int qid, done = 0, cur;
- dev = container_of(napi->dev, struct mt76_dev, napi_dev);
+ dev = mt76_priv(napi->dev);
qid = napi - dev->napi;
rcu_read_lock();
@@ -940,18 +940,35 @@ static int
mt76_dma_init(struct mt76_dev *dev,
int (*poll)(struct napi_struct *napi, int budget))
{
+ struct mt76_dev **priv;
int i;
- init_dummy_netdev(&dev->napi_dev);
- init_dummy_netdev(&dev->tx_napi_dev);
- snprintf(dev->napi_dev.name, sizeof(dev->napi_dev.name), "%s",
+ dev->napi_dev = alloc_netdev_dummy(sizeof(struct mt76_dev *));
+ if (!dev->napi_dev)
+ return -ENOMEM;
+
+ /* napi_dev private data points to mt76_dev parent, so, mt76_dev
+ * can be retrieved given napi_dev
+ */
+ priv = netdev_priv(dev->napi_dev);
+ *priv = dev;
+
+ dev->tx_napi_dev = alloc_netdev_dummy(sizeof(struct mt76_dev *));
+ if (!dev->tx_napi_dev) {
+ free_netdev(dev->napi_dev);
+ return -ENOMEM;
+ }
+ priv = netdev_priv(dev->tx_napi_dev);
+ *priv = dev;
+
+ snprintf(dev->napi_dev->name, sizeof(dev->napi_dev->name), "%s",
wiphy_name(dev->hw->wiphy));
- dev->napi_dev.threaded = 1;
+ dev->napi_dev->threaded = 1;
init_completion(&dev->mmio.wed_reset);
init_completion(&dev->mmio.wed_reset_complete);
mt76_for_each_q_rx(dev, i) {
- netif_napi_add(&dev->napi_dev, &dev->napi[i], poll);
+ netif_napi_add(dev->napi_dev, &dev->napi[i], poll);
mt76_dma_rx_fill(dev, &dev->q_rx[i], false);
napi_enable(&dev->napi[i]);
}
@@ -1019,5 +1036,7 @@ void mt76_dma_cleanup(struct mt76_dev *dev)
mt76_free_pending_txwi(dev);
mt76_free_pending_rxwi(dev);
+ free_netdev(dev->napi_dev);
+ free_netdev(dev->tx_napi_dev);
}
EXPORT_SYMBOL_GPL(mt76_dma_cleanup);
diff --git a/drivers/net/wireless/mediatek/mt76/dma.h b/drivers/net/wireless/mediatek/mt76/dma.h
index 1de5a2b20f74..e3ddc7a83757 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.h
+++ b/drivers/net/wireless/mediatek/mt76/dma.h
@@ -116,4 +116,13 @@ mt76_dma_should_drop_buf(bool *drop, u32 ctrl, u32 buf1, u32 info)
}
}
+static inline void *mt76_priv(struct net_device *dev)
+{
+ struct mt76_dev **priv;
+
+ priv = netdev_priv(dev);
+
+ return *priv;
+}
+
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
index e8ba2e4e8484..bb291fe314fb 100644
--- a/drivers/net/wireless/mediatek/mt76/mac80211.c
+++ b/drivers/net/wireless/mediatek/mt76/mac80211.c
@@ -1125,6 +1125,11 @@ mt76_rx_convert(struct mt76_dev *dev, struct sk_buff *skb,
memcpy(status->chain_signal, mstat.chain_signal,
sizeof(mstat.chain_signal));
+ if (mstat.wcid) {
+ status->link_valid = mstat.wcid->link_valid;
+ status->link_id = mstat.wcid->link_id;
+ }
+
*sta = wcid_to_sta(mstat.wcid);
*hw = mt76_phy_hw(dev, mstat.phy_idx);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
index 11b9f22ca7f3..4a58a78d5ed2 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76.h
@@ -349,6 +349,8 @@ struct mt76_wcid {
u8 sta:1;
u8 amsdu:1;
u8 phy_idx:2;
+ u8 link_id:4;
+ bool link_valid;
u8 rx_check_pn;
u8 rx_key_pn[IEEE80211_NUM_TIDS + 1][6];
@@ -366,6 +368,8 @@ struct mt76_wcid {
struct mt76_sta_stats stats;
struct list_head poll_list;
+
+ struct mt76_wcid *def_wcid;
};
struct mt76_txq {
@@ -831,8 +835,8 @@ struct mt76_dev {
struct mt76_mcu mcu;
- struct net_device napi_dev;
- struct net_device tx_napi_dev;
+ struct net_device *napi_dev;
+ struct net_device *tx_napi_dev;
spinlock_t rx_lock;
struct napi_struct napi[__MT_RXQ_MAX];
struct sk_buff_head rx_skb[__MT_RXQ_MAX];
@@ -1081,6 +1085,7 @@ bool ____mt76_poll_msec(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs);
void mt76_pci_disable_aspm(struct pci_dev *pdev);
+bool mt76_pci_aspm_supported(struct pci_dev *pdev);
static inline u16 mt76_chip(struct mt76_dev *dev)
{
@@ -1256,6 +1261,9 @@ wcid_to_sta(struct mt76_wcid *wcid)
if (!wcid || !wcid->sta)
return NULL;
+ if (wcid->def_wcid)
+ ptr = wcid->def_wcid;
+
return container_of(ptr, struct ieee80211_sta, drv_priv);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
index 14304b063715..ea017f22fff2 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
@@ -242,7 +242,7 @@ int mt7603_dma_init(struct mt7603_dev *dev)
if (ret)
return ret;
- netif_napi_add_tx(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi,
+ netif_napi_add_tx(dev->mt76.tx_napi_dev, &dev->mt76.tx_napi,
mt7603_poll_tx);
napi_enable(&dev->mt76.tx_napi);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/main.c b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
index 9b49267b1eab..f35fa643c0da 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
@@ -23,7 +23,7 @@ mt7603_start(struct ieee80211_hw *hw)
}
static void
-mt7603_stop(struct ieee80211_hw *hw)
+mt7603_stop(struct ieee80211_hw *hw, bool suspend)
{
struct mt7603_dev *dev = hw->priv;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/dma.c b/drivers/net/wireless/mediatek/mt76/mt7615/dma.c
index e7135b2f1742..bcf7864312d7 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/dma.c
@@ -67,7 +67,7 @@ static int mt7615_poll_tx(struct napi_struct *napi, int budget)
{
struct mt7615_dev *dev;
- dev = container_of(napi, struct mt7615_dev, mt76.tx_napi);
+ dev = mt76_priv(napi->dev);
if (!mt76_connac_pm_ref(&dev->mphy, &dev->pm)) {
napi_complete(napi);
queue_work(dev->mt76.wq, &dev->pm.wake_work);
@@ -89,7 +89,7 @@ static int mt7615_poll_rx(struct napi_struct *napi, int budget)
struct mt7615_dev *dev;
int done;
- dev = container_of(napi->dev, struct mt7615_dev, mt76.napi_dev);
+ dev = mt76_priv(napi->dev);
if (!mt76_connac_pm_ref(&dev->mphy, &dev->pm)) {
napi_complete(napi);
@@ -282,7 +282,7 @@ int mt7615_dma_init(struct mt7615_dev *dev)
if (ret < 0)
return ret;
- netif_napi_add_tx(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi,
+ netif_napi_add_tx(dev->mt76.tx_napi_dev, &dev->mt76.tx_napi,
mt7615_poll_tx);
napi_enable(&dev->mt76.tx_napi);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
index 0971c164b57e..50e262c1622f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
@@ -91,7 +91,7 @@ out:
return ret;
}
-static void mt7615_stop(struct ieee80211_hw *hw)
+static void mt7615_stop(struct ieee80211_hw *hw, bool suspend)
{
struct mt7615_dev *dev = mt7615_hw_dev(hw);
struct mt7615_phy *phy = mt7615_hw_phy(hw);
@@ -1326,6 +1326,10 @@ static void mt7615_set_rekey_data(struct ieee80211_hw *hw,
#endif /* CONFIG_PM */
const struct ieee80211_ops mt7615_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = mt7615_tx,
.start = mt7615_start,
.stop = mt7615_stop,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
index c807bd8d928d..d50d967828be 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
@@ -842,6 +842,7 @@ mt7615_mcu_wtbl_sta_add(struct mt7615_phy *phy, struct ieee80211_vif *vif,
{
struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
struct sk_buff *skb, *sskb, *wskb = NULL;
+ struct ieee80211_link_sta *link_sta;
struct mt7615_dev *dev = phy->dev;
struct wtbl_req_hdr *wtbl_hdr;
struct mt7615_sta *msta;
@@ -849,6 +850,7 @@ mt7615_mcu_wtbl_sta_add(struct mt7615_phy *phy, struct ieee80211_vif *vif,
int cmd, err;
msta = sta ? (struct mt7615_sta *)sta->drv_priv : &mvif->sta;
+ link_sta = sta ? &sta->deflink : NULL;
sskb = mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76,
&msta->wcid);
@@ -861,8 +863,8 @@ mt7615_mcu_wtbl_sta_add(struct mt7615_phy *phy, struct ieee80211_vif *vif,
else
mvif->sta_added = true;
}
- mt76_connac_mcu_sta_basic_tlv(&dev->mt76, sskb, vif, sta, enable,
- new_entry);
+ mt76_connac_mcu_sta_basic_tlv(&dev->mt76, sskb, vif, link_sta,
+ enable, new_entry);
if (enable && sta)
mt76_connac_mcu_sta_tlv(phy->mt76, sskb, sta, vif, 0,
MT76_STA_INFO_STATE_ASSOC);
@@ -1109,8 +1111,8 @@ mt7615_mcu_uni_add_dev(struct mt7615_phy *phy, struct ieee80211_vif *vif,
{
struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
- return mt76_connac_mcu_uni_add_dev(phy->mt76, vif, &mvif->sta.wcid,
- enable);
+ return mt76_connac_mcu_uni_add_dev(phy->mt76, &vif->bss_conf,
+ &mvif->sta.wcid, enable);
}
static int
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb.c
index df737e1ff27b..9335ca0776fe 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb.c
@@ -79,7 +79,7 @@ static void mt7663u_copy(struct mt76_dev *dev, u32 offset,
mutex_unlock(&usb->usb_ctrl_mtx);
}
-static void mt7663u_stop(struct ieee80211_hw *hw)
+static void mt7663u_stop(struct ieee80211_hw *hw, bool suspend)
{
struct mt7615_phy *phy = mt7615_hw_phy(hw);
struct mt7615_dev *dev = hw->priv;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
index 162c57fb7954..4dce03ddbfa4 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
@@ -370,7 +370,7 @@ EXPORT_SYMBOL_GPL(mt76_connac_mcu_bss_omac_tlv);
void mt76_connac_mcu_sta_basic_tlv(struct mt76_dev *dev, struct sk_buff *skb,
struct ieee80211_vif *vif,
- struct ieee80211_sta *sta,
+ struct ieee80211_link_sta *link_sta,
bool enable, bool newly)
{
struct sta_rec_basic *basic;
@@ -390,7 +390,7 @@ void mt76_connac_mcu_sta_basic_tlv(struct mt76_dev *dev, struct sk_buff *skb,
basic->conn_state = CONN_STATE_DISCONNECT;
}
- if (!sta) {
+ if (!link_sta) {
basic->conn_type = cpu_to_le32(CONNECTION_INFRA_BC);
if (vif->type == NL80211_IFTYPE_STATION &&
@@ -411,7 +411,7 @@ void mt76_connac_mcu_sta_basic_tlv(struct mt76_dev *dev, struct sk_buff *skb,
else
conn_type = CONNECTION_INFRA_STA;
basic->conn_type = cpu_to_le32(conn_type);
- basic->aid = cpu_to_le16(sta->aid);
+ basic->aid = cpu_to_le16(link_sta->sta->aid);
break;
case NL80211_IFTYPE_STATION:
if (vif->p2p && !is_mt7921(dev))
@@ -423,15 +423,15 @@ void mt76_connac_mcu_sta_basic_tlv(struct mt76_dev *dev, struct sk_buff *skb,
break;
case NL80211_IFTYPE_ADHOC:
basic->conn_type = cpu_to_le32(CONNECTION_IBSS_ADHOC);
- basic->aid = cpu_to_le16(sta->aid);
+ basic->aid = cpu_to_le16(link_sta->sta->aid);
break;
default:
WARN_ON(1);
break;
}
- memcpy(basic->peer_addr, sta->addr, ETH_ALEN);
- basic->qos = sta->wme;
+ memcpy(basic->peer_addr, link_sta->addr, ETH_ALEN);
+ basic->qos = link_sta->sta->wme;
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_sta_basic_tlv);
@@ -793,7 +793,8 @@ EXPORT_SYMBOL_GPL(mt76_connac_mcu_sta_he_tlv_v2);
u8
mt76_connac_get_phy_mode_v2(struct mt76_phy *mphy, struct ieee80211_vif *vif,
- enum nl80211_band band, struct ieee80211_sta *sta)
+ enum nl80211_band band,
+ struct ieee80211_link_sta *link_sta)
{
struct ieee80211_sta_ht_cap *ht_cap;
struct ieee80211_sta_vht_cap *vht_cap;
@@ -801,11 +802,11 @@ mt76_connac_get_phy_mode_v2(struct mt76_phy *mphy, struct ieee80211_vif *vif,
const struct ieee80211_sta_eht_cap *eht_cap;
u8 mode = 0;
- if (sta) {
- ht_cap = &sta->deflink.ht_cap;
- vht_cap = &sta->deflink.vht_cap;
- he_cap = &sta->deflink.he_cap;
- eht_cap = &sta->deflink.eht_cap;
+ if (link_sta) {
+ ht_cap = &link_sta->ht_cap;
+ vht_cap = &link_sta->vht_cap;
+ he_cap = &link_sta->he_cap;
+ eht_cap = &link_sta->eht_cap;
} else {
struct ieee80211_supported_band *sband;
@@ -911,7 +912,8 @@ void mt76_connac_mcu_sta_tlv(struct mt76_phy *mphy, struct sk_buff *skb,
tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_PHY, sizeof(*phy));
phy = (struct sta_rec_phy *)tlv;
- phy->phy_type = mt76_connac_get_phy_mode_v2(mphy, vif, band, sta);
+ phy->phy_type = mt76_connac_get_phy_mode_v2(mphy, vif, band,
+ &sta->deflink);
phy->basic_rate = cpu_to_le16((u16)vif->bss_conf.basic_rates);
phy->rcpi = rcpi;
phy->ampdu = FIELD_PREP(IEEE80211_HT_AMPDU_PARM_FACTOR,
@@ -1044,6 +1046,7 @@ int mt76_connac_mcu_sta_cmd(struct mt76_phy *phy,
struct mt76_sta_cmd_info *info)
{
struct mt76_vif *mvif = (struct mt76_vif *)info->vif->drv_priv;
+ struct ieee80211_link_sta *link_sta;
struct mt76_dev *dev = phy->dev;
struct wtbl_req_hdr *wtbl_hdr;
struct tlv *sta_wtbl;
@@ -1053,9 +1056,11 @@ int mt76_connac_mcu_sta_cmd(struct mt76_phy *phy,
if (IS_ERR(skb))
return PTR_ERR(skb);
+ link_sta = info->sta ? &info->sta->deflink : NULL;
if (info->sta || !info->offload_fw)
- mt76_connac_mcu_sta_basic_tlv(dev, skb, info->vif, info->sta,
- info->enable, info->newly);
+ mt76_connac_mcu_sta_basic_tlv(dev, skb, info->vif,
+ link_sta, info->enable,
+ info->newly);
if (info->sta && info->enable)
mt76_connac_mcu_sta_tlv(phy, skb, info->sta,
info->vif, info->rcpi,
@@ -1132,11 +1137,11 @@ void mt76_connac_mcu_wtbl_ba_tlv(struct mt76_dev *dev, struct sk_buff *skb,
EXPORT_SYMBOL_GPL(mt76_connac_mcu_wtbl_ba_tlv);
int mt76_connac_mcu_uni_add_dev(struct mt76_phy *phy,
- struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf,
struct mt76_wcid *wcid,
bool enable)
{
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct mt76_vif *mvif = (struct mt76_vif *)bss_conf->vif->drv_priv;
struct mt76_dev *dev = phy->dev;
struct {
struct {
@@ -1148,7 +1153,7 @@ int mt76_connac_mcu_uni_add_dev(struct mt76_phy *phy,
__le16 tag;
__le16 len;
u8 active;
- u8 pad;
+ u8 link_idx; /* not link_id */
u8 omac_addr[ETH_ALEN];
} __packed tlv;
} dev_req = {
@@ -1160,6 +1165,7 @@ int mt76_connac_mcu_uni_add_dev(struct mt76_phy *phy,
.tag = cpu_to_le16(DEV_INFO_ACTIVE),
.len = cpu_to_le16(sizeof(struct req_tlv)),
.active = enable,
+ .link_idx = mvif->idx,
},
};
struct {
@@ -1182,12 +1188,13 @@ int mt76_connac_mcu_uni_add_dev(struct mt76_phy *phy,
.bmc_tx_wlan_idx = cpu_to_le16(wcid->idx),
.sta_idx = cpu_to_le16(wcid->idx),
.conn_state = 1,
+ .link_idx = mvif->idx,
},
};
int err, idx, cmd, len;
void *data;
- switch (vif->type) {
+ switch (bss_conf->vif->type) {
case NL80211_IFTYPE_MESH_POINT:
case NL80211_IFTYPE_MONITOR:
case NL80211_IFTYPE_AP:
@@ -1207,7 +1214,7 @@ int mt76_connac_mcu_uni_add_dev(struct mt76_phy *phy,
idx = mvif->omac_idx > EXT_BSSID_START ? HW_BSSID_0 : mvif->omac_idx;
basic_req.basic.hw_bss_idx = idx;
- memcpy(dev_req.tlv.omac_addr, vif->addr, ETH_ALEN);
+ memcpy(dev_req.tlv.omac_addr, bss_conf->addr, ETH_ALEN);
cmd = enable ? MCU_UNI_CMD(DEV_INFO_UPDATE) : MCU_UNI_CMD(BSS_INFO_UPDATE);
data = enable ? (void *)&dev_req : (void *)&basic_req;
@@ -1305,7 +1312,8 @@ int mt76_connac_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif *mvif,
EXPORT_SYMBOL_GPL(mt76_connac_mcu_sta_ba);
u8 mt76_connac_get_phy_mode(struct mt76_phy *phy, struct ieee80211_vif *vif,
- enum nl80211_band band, struct ieee80211_sta *sta)
+ enum nl80211_band band,
+ struct ieee80211_link_sta *link_sta)
{
struct mt76_dev *dev = phy->dev;
const struct ieee80211_sta_he_cap *he_cap;
@@ -1316,10 +1324,10 @@ u8 mt76_connac_get_phy_mode(struct mt76_phy *phy, struct ieee80211_vif *vif,
if (is_connac_v1(dev))
return 0x38;
- if (sta) {
- ht_cap = &sta->deflink.ht_cap;
- vht_cap = &sta->deflink.vht_cap;
- he_cap = &sta->deflink.he_cap;
+ if (link_sta) {
+ ht_cap = &link_sta->ht_cap;
+ vht_cap = &link_sta->vht_cap;
+ he_cap = &link_sta->he_cap;
} else {
struct ieee80211_supported_band *sband;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
index 6873ce14d299..4242d436de26 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
@@ -545,6 +545,13 @@ struct sta_rec_muru {
} mimo_ul;
} __packed;
+struct sta_rec_remove {
+ __le16 tag;
+ __le16 len;
+ u8 action;
+ u8 pad[3];
+} __packed;
+
struct sta_phy {
u8 type;
u8 flag;
@@ -813,7 +820,10 @@ enum {
STA_REC_HE_6G = 0x17,
STA_REC_HE_V2 = 0x19,
STA_REC_MLD = 0x20,
+ STA_REC_EHT_MLD = 0x21,
STA_REC_EHT = 0x22,
+ STA_REC_MLD_OFF = 0x23,
+ STA_REC_REMOVE = 0x25,
STA_REC_PN_INFO = 0x26,
STA_REC_KEY_V3 = 0x27,
STA_REC_HDRT = 0x28,
@@ -1392,6 +1402,7 @@ enum {
MT_NIC_CAP_WFDMA_REALLOC,
MT_NIC_CAP_6G,
MT_NIC_CAP_CHIP_CAP = 0x20,
+ MT_NIC_CAP_EML_CAP = 0x22,
};
#define UNI_WOW_DETECT_TYPE_MAGIC BIT(0)
@@ -1443,7 +1454,7 @@ struct mt76_connac_bss_basic_tlv {
__le16 sta_idx;
__le16 nonht_basic_phy;
u8 phymode_ext; /* bit(0) AX_6G */
- u8 pad[1];
+ u8 link_idx;
} __packed;
struct mt76_connac_bss_qos_tlv {
@@ -1733,7 +1744,10 @@ enum mt76_sta_info_state {
};
struct mt76_sta_cmd_info {
- struct ieee80211_sta *sta;
+ union {
+ struct ieee80211_sta *sta;
+ struct ieee80211_link_sta *link_sta;
+ };
struct mt76_wcid *wcid;
struct ieee80211_vif *vif;
@@ -1883,8 +1897,8 @@ int mt76_connac_mcu_set_channel_domain(struct mt76_phy *phy);
int mt76_connac_mcu_set_vif_ps(struct mt76_dev *dev, struct ieee80211_vif *vif);
void mt76_connac_mcu_sta_basic_tlv(struct mt76_dev *dev, struct sk_buff *skb,
struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, bool enable,
- bool newly);
+ struct ieee80211_link_sta *link_sta,
+ bool enable, bool newly);
void mt76_connac_mcu_wtbl_generic_tlv(struct mt76_dev *dev, struct sk_buff *skb,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta, void *sta_wtbl,
@@ -1898,7 +1912,8 @@ int mt76_connac_mcu_sta_update_hdr_trans(struct mt76_dev *dev,
struct mt76_wcid *wcid, int cmd);
void mt76_connac_mcu_sta_he_tlv_v2(struct sk_buff *skb, struct ieee80211_sta *sta);
u8 mt76_connac_get_phy_mode_v2(struct mt76_phy *mphy, struct ieee80211_vif *vif,
- enum nl80211_band band, struct ieee80211_sta *sta);
+ enum nl80211_band band,
+ struct ieee80211_link_sta *link_sta);
int mt76_connac_mcu_wtbl_update_hdr_trans(struct mt76_dev *dev,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
@@ -1917,7 +1932,7 @@ void mt76_connac_mcu_sta_ba_tlv(struct sk_buff *skb,
struct ieee80211_ampdu_params *params,
bool enable, bool tx);
int mt76_connac_mcu_uni_add_dev(struct mt76_phy *phy,
- struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf,
struct mt76_wcid *wcid,
bool enable);
int mt76_connac_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif *mvif,
@@ -1992,7 +2007,8 @@ mt76_connac_get_he_phy_cap(struct mt76_phy *phy, struct ieee80211_vif *vif);
const struct ieee80211_sta_eht_cap *
mt76_connac_get_eht_phy_cap(struct mt76_phy *phy, struct ieee80211_vif *vif);
u8 mt76_connac_get_phy_mode(struct mt76_phy *phy, struct ieee80211_vif *vif,
- enum nl80211_band band, struct ieee80211_sta *sta);
+ enum nl80211_band band,
+ struct ieee80211_link_sta *sta);
u8 mt76_connac_get_phy_mode_ext(struct mt76_phy *phy, struct ieee80211_vif *vif,
enum nl80211_band band);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
index 79b7996ad1a8..2ecee7c5c80d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
@@ -44,7 +44,7 @@ static void mt76x0e_stop_hw(struct mt76x02_dev *dev)
mt76_clear(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_RX_DMA_EN);
}
-static void mt76x0e_stop(struct ieee80211_hw *hw)
+static void mt76x0e_stop(struct ieee80211_hw *hw, bool suspend)
{
struct mt76x02_dev *dev = hw->priv;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
index bba44f289b4e..390f502e97f0 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
@@ -77,7 +77,7 @@ static void mt76x0u_cleanup(struct mt76x02_dev *dev)
mt76u_queues_deinit(&dev->mt76);
}
-static void mt76x0u_stop(struct ieee80211_hw *hw)
+static void mt76x0u_stop(struct ieee80211_hw *hw, bool suspend)
{
struct mt76x02_dev *dev = hw->priv;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
index e5ad635d3c56..35b7ebc2c9c6 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
@@ -239,7 +239,7 @@ int mt76x02_dma_init(struct mt76x02_dev *dev)
if (ret)
return ret;
- netif_napi_add_tx(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi,
+ netif_napi_add_tx(dev->mt76.tx_napi_dev, &dev->mt76.tx_napi,
mt76x02_poll_tx);
napi_enable(&dev->mt76.tx_napi);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
index bfc8c69f43fa..6accea551319 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
@@ -24,7 +24,7 @@ mt76x2_start(struct ieee80211_hw *hw)
}
static void
-mt76x2_stop(struct ieee80211_hw *hw)
+mt76x2_stop(struct ieee80211_hw *hw, bool suspend)
{
struct mt76x02_dev *dev = hw->priv;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
index 9fe390fdd730..ba0241c36672 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
@@ -22,7 +22,7 @@ static int mt76x2u_start(struct ieee80211_hw *hw)
return 0;
}
-static void mt76x2u_stop(struct ieee80211_hw *hw)
+static void mt76x2u_stop(struct ieee80211_hw *hw, bool suspend)
{
struct mt76x02_dev *dev = hw->priv;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/dma.c b/drivers/net/wireless/mediatek/mt76/mt7915/dma.c
index 0baa82c8df5a..0c62272fe7d0 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/dma.c
@@ -578,7 +578,7 @@ int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2)
if (ret < 0)
return ret;
- netif_napi_add_tx(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi,
+ netif_napi_add_tx(dev->mt76.tx_napi_dev, &dev->mt76.tx_napi,
mt7915_poll_tx);
napi_enable(&dev->mt76.tx_napi);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/main.c b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
index 2624edbb59a1..049223df9beb 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
@@ -108,7 +108,7 @@ static int mt7915_start(struct ieee80211_hw *hw)
return ret;
}
-static void mt7915_stop(struct ieee80211_hw *hw)
+static void mt7915_stop(struct ieee80211_hw *hw, bool suspend)
{
struct mt7915_dev *dev = mt7915_hw_dev(hw);
struct mt7915_phy *phy = mt7915_hw_phy(hw);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
index 9599adf104b1..2185cd24e2e1 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
@@ -1503,7 +1503,7 @@ mt7915_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7915_dev *dev,
ra->valid = true;
ra->auto_rate = true;
- ra->phy_mode = mt76_connac_get_phy_mode(mphy, vif, band, sta);
+ ra->phy_mode = mt76_connac_get_phy_mode(mphy, vif, band, &sta->deflink);
ra->channel = chandef->chan->hw_value;
ra->bw = sta->deflink.bandwidth;
ra->phy.bw = sta->deflink.bandwidth;
@@ -1656,11 +1656,13 @@ int mt7915_mcu_add_sta(struct mt7915_dev *dev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, bool enable)
{
struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct ieee80211_link_sta *link_sta;
struct mt7915_sta *msta;
struct sk_buff *skb;
int ret;
msta = sta ? (struct mt7915_sta *)sta->drv_priv : &mvif->sta;
+ link_sta = sta ? &sta->deflink : NULL;
skb = mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76,
&msta->wcid);
@@ -1668,7 +1670,7 @@ int mt7915_mcu_add_sta(struct mt7915_dev *dev, struct ieee80211_vif *vif,
return PTR_ERR(skb);
/* starec basic */
- mt76_connac_mcu_sta_basic_tlv(&dev->mt76, skb, vif, sta, enable,
+ mt76_connac_mcu_sta_basic_tlv(&dev->mt76, skb, vif, link_sta, enable,
!rcu_access_pointer(dev->mt76.wcid[msta->wcid.idx]));
if (!enable)
goto out;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
index 73e42ef42983..047106b65d2b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
@@ -39,6 +39,7 @@ static void mt7921_mac_sta_poll(struct mt792x_dev *dev)
};
struct ieee80211_sta *sta;
struct mt792x_sta *msta;
+ struct mt792x_link_sta *mlink;
u32 tx_time[IEEE80211_NUM_ACS], rx_time[IEEE80211_NUM_ACS];
LIST_HEAD(sta_poll_list);
struct rate_info *rate;
@@ -60,23 +61,25 @@ static void mt7921_mac_sta_poll(struct mt792x_dev *dev)
spin_unlock_bh(&dev->mt76.sta_poll_lock);
break;
}
- msta = list_first_entry(&sta_poll_list,
- struct mt792x_sta, wcid.poll_list);
- list_del_init(&msta->wcid.poll_list);
+ mlink = list_first_entry(&sta_poll_list,
+ struct mt792x_link_sta,
+ wcid.poll_list);
+ msta = container_of(mlink, struct mt792x_sta, deflink);
+ list_del_init(&mlink->wcid.poll_list);
spin_unlock_bh(&dev->mt76.sta_poll_lock);
- idx = msta->wcid.idx;
+ idx = mlink->wcid.idx;
addr = mt7921_mac_wtbl_lmac_addr(idx, MT_WTBL_AC0_CTT_OFFSET);
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
- u32 tx_last = msta->airtime_ac[i];
- u32 rx_last = msta->airtime_ac[i + 4];
+ u32 tx_last = mlink->airtime_ac[i];
+ u32 rx_last = mlink->airtime_ac[i + 4];
- msta->airtime_ac[i] = mt76_rr(dev, addr);
- msta->airtime_ac[i + 4] = mt76_rr(dev, addr + 4);
+ mlink->airtime_ac[i] = mt76_rr(dev, addr);
+ mlink->airtime_ac[i + 4] = mt76_rr(dev, addr + 4);
- tx_time[i] = msta->airtime_ac[i] - tx_last;
- rx_time[i] = msta->airtime_ac[i + 4] - rx_last;
+ tx_time[i] = mlink->airtime_ac[i] - tx_last;
+ rx_time[i] = mlink->airtime_ac[i + 4] - rx_last;
if ((tx_last | rx_last) & BIT(30))
clear = true;
@@ -87,10 +90,10 @@ static void mt7921_mac_sta_poll(struct mt792x_dev *dev)
if (clear) {
mt7921_mac_wtbl_update(dev, idx,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
- memset(msta->airtime_ac, 0, sizeof(msta->airtime_ac));
+ memset(mlink->airtime_ac, 0, sizeof(mlink->airtime_ac));
}
- if (!msta->wcid.sta)
+ if (!mlink->wcid.sta)
continue;
sta = container_of((void *)msta, struct ieee80211_sta,
@@ -113,7 +116,7 @@ static void mt7921_mac_sta_poll(struct mt792x_dev *dev)
* we need to make sure that flags match so polling GI
* from per-sta counters directly.
*/
- rate = &msta->wcid.rate;
+ rate = &mlink->wcid.rate;
addr = mt7921_mac_wtbl_lmac_addr(idx,
MT_WTBL_TXRX_CAP_RATE_OFFSET);
val = mt76_rr(dev, addr);
@@ -154,10 +157,10 @@ static void mt7921_mac_sta_poll(struct mt792x_dev *dev)
rssi[2] = to_rssi(GENMASK(23, 16), val);
rssi[3] = to_rssi(GENMASK(31, 14), val);
- msta->ack_signal =
+ mlink->ack_signal =
mt76_rx_signal(msta->vif->phy->mt76->antenna_mask, rssi);
- ewma_avg_signal_add(&msta->avg_ack_signal, -msta->ack_signal);
+ ewma_avg_signal_add(&mlink->avg_ack_signal, -mlink->ack_signal);
}
}
@@ -180,6 +183,7 @@ mt7921_mac_fill_rx(struct mt792x_dev *dev, struct sk_buff *skb)
u32 rxd3 = le32_to_cpu(rxd[3]);
u32 rxd4 = le32_to_cpu(rxd[4]);
struct mt792x_sta *msta = NULL;
+ struct mt792x_link_sta *mlink;
u16 seq_ctrl = 0;
__le16 fc = 0;
u8 mode = 0;
@@ -210,10 +214,11 @@ mt7921_mac_fill_rx(struct mt792x_dev *dev, struct sk_buff *skb)
status->wcid = mt792x_rx_get_wcid(dev, idx, unicast);
if (status->wcid) {
- msta = container_of(status->wcid, struct mt792x_sta, wcid);
+ mlink = container_of(status->wcid, struct mt792x_link_sta, wcid);
+ msta = container_of(mlink, struct mt792x_sta, deflink);
spin_lock_bh(&dev->mt76.sta_poll_lock);
- if (list_empty(&msta->wcid.poll_list))
- list_add_tail(&msta->wcid.poll_list,
+ if (list_empty(&mlink->wcid.poll_list))
+ list_add_tail(&mlink->wcid.poll_list,
&dev->mt76.sta_poll_list);
spin_unlock_bh(&dev->mt76.sta_poll_lock);
}
@@ -444,7 +449,7 @@ mt7921_mac_fill_rx(struct mt792x_dev *dev, struct sk_buff *skb)
void mt7921_mac_add_txs(struct mt792x_dev *dev, void *data)
{
- struct mt792x_sta *msta = NULL;
+ struct mt792x_link_sta *mlink;
struct mt76_wcid *wcid;
__le32 *txs_data = data;
u16 wcidx;
@@ -468,15 +473,15 @@ void mt7921_mac_add_txs(struct mt792x_dev *dev, void *data)
if (!wcid)
goto out;
- msta = container_of(wcid, struct mt792x_sta, wcid);
+ mlink = container_of(wcid, struct mt792x_link_sta, wcid);
mt76_connac2_mac_add_txs_skb(&dev->mt76, wcid, pid, txs_data);
if (!wcid->sta)
goto out;
spin_lock_bh(&dev->mt76.sta_poll_lock);
- if (list_empty(&msta->wcid.poll_list))
- list_add_tail(&msta->wcid.poll_list, &dev->mt76.sta_poll_list);
+ if (list_empty(&mlink->wcid.poll_list))
+ list_add_tail(&mlink->wcid.poll_list, &dev->mt76.sta_poll_list);
spin_unlock_bh(&dev->mt76.sta_poll_lock);
out:
@@ -513,7 +518,7 @@ static void mt7921_mac_tx_free(struct mt792x_dev *dev, void *data, int len)
* 1'b0: msdu_id with the same 'wcid pair' as above.
*/
if (info & MT_TX_FREE_PAIR) {
- struct mt792x_sta *msta;
+ struct mt792x_link_sta *mlink;
u16 idx;
count++;
@@ -523,10 +528,10 @@ static void mt7921_mac_tx_free(struct mt792x_dev *dev, void *data, int len)
if (!sta)
continue;
- msta = container_of(wcid, struct mt792x_sta, wcid);
+ mlink = container_of(wcid, struct mt792x_link_sta, wcid);
spin_lock_bh(&mdev->sta_poll_lock);
- if (list_empty(&msta->wcid.poll_list))
- list_add_tail(&msta->wcid.poll_list,
+ if (list_empty(&mlink->wcid.poll_list))
+ list_add_tail(&mlink->wcid.poll_list,
&mdev->sta_poll_list);
spin_unlock_bh(&mdev->sta_poll_lock);
continue;
@@ -641,11 +646,12 @@ mt7921_vif_connect_iter(void *priv, u8 *mac,
if (vif->type == NL80211_IFTYPE_STATION)
ieee80211_disconnect(vif, true);
- mt76_connac_mcu_uni_add_dev(&dev->mphy, vif, &mvif->sta.wcid, true);
+ mt76_connac_mcu_uni_add_dev(&dev->mphy, &vif->bss_conf,
+ &mvif->sta.deflink.wcid, true);
mt7921_mcu_set_tx(dev, vif);
if (vif->type == NL80211_IFTYPE_AP) {
- mt76_connac_mcu_uni_add_bss(dev->phy.mt76, vif, &mvif->sta.wcid,
+ mt76_connac_mcu_uni_add_bss(dev->phy.mt76, vif, &mvif->sta.deflink.wcid,
true, NULL);
mt7921_mcu_sta_update(dev, NULL, vif, true,
MT76_STA_INFO_STATE_NONE);
@@ -786,9 +792,9 @@ int mt7921_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
if (sta) {
struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv;
- if (time_after(jiffies, msta->last_txs + HZ / 4)) {
+ if (time_after(jiffies, msta->deflink.last_txs + HZ / 4)) {
info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
- msta->last_txs = jiffies;
+ msta->deflink.last_txs = jiffies;
}
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
index 3e3ad3518d85..2e6268cb06c0 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
@@ -268,7 +268,7 @@ static int mt7921_start(struct ieee80211_hw *hw)
return err;
}
-static void mt7921_stop(struct ieee80211_hw *hw)
+static void mt7921_stop(struct ieee80211_hw *hw, bool suspend)
{
struct mt792x_dev *dev = mt792x_hw_dev(hw);
int err = 0;
@@ -281,7 +281,7 @@ static void mt7921_stop(struct ieee80211_hw *hw)
return;
}
- mt792x_stop(hw);
+ mt792x_stop(hw, false);
}
static int
@@ -295,40 +295,40 @@ mt7921_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
mt792x_mutex_acquire(dev);
- mvif->mt76.idx = __ffs64(~dev->mt76.vif_mask);
- if (mvif->mt76.idx >= MT792x_MAX_INTERFACES) {
+ mvif->bss_conf.mt76.idx = __ffs64(~dev->mt76.vif_mask);
+ if (mvif->bss_conf.mt76.idx >= MT792x_MAX_INTERFACES) {
ret = -ENOSPC;
goto out;
}
- mvif->mt76.omac_idx = mvif->mt76.idx;
+ mvif->bss_conf.mt76.omac_idx = mvif->bss_conf.mt76.idx;
mvif->phy = phy;
- mvif->mt76.band_idx = 0;
- mvif->mt76.wmm_idx = mvif->mt76.idx % MT76_CONNAC_MAX_WMM_SETS;
+ mvif->bss_conf.mt76.band_idx = 0;
+ mvif->bss_conf.mt76.wmm_idx = mvif->bss_conf.mt76.idx % MT76_CONNAC_MAX_WMM_SETS;
- ret = mt76_connac_mcu_uni_add_dev(&dev->mphy, vif, &mvif->sta.wcid,
- true);
+ ret = mt76_connac_mcu_uni_add_dev(&dev->mphy, &vif->bss_conf,
+ &mvif->sta.deflink.wcid, true);
if (ret)
goto out;
- dev->mt76.vif_mask |= BIT_ULL(mvif->mt76.idx);
- phy->omac_mask |= BIT_ULL(mvif->mt76.omac_idx);
+ dev->mt76.vif_mask |= BIT_ULL(mvif->bss_conf.mt76.idx);
+ phy->omac_mask |= BIT_ULL(mvif->bss_conf.mt76.omac_idx);
- idx = MT792x_WTBL_RESERVED - mvif->mt76.idx;
+ idx = MT792x_WTBL_RESERVED - mvif->bss_conf.mt76.idx;
- INIT_LIST_HEAD(&mvif->sta.wcid.poll_list);
- mvif->sta.wcid.idx = idx;
- mvif->sta.wcid.phy_idx = mvif->mt76.band_idx;
- mvif->sta.wcid.hw_key_idx = -1;
- mvif->sta.wcid.tx_info |= MT_WCID_TX_INFO_SET;
- mt76_wcid_init(&mvif->sta.wcid);
+ INIT_LIST_HEAD(&mvif->sta.deflink.wcid.poll_list);
+ mvif->sta.deflink.wcid.idx = idx;
+ mvif->sta.deflink.wcid.phy_idx = mvif->bss_conf.mt76.band_idx;
+ mvif->sta.deflink.wcid.hw_key_idx = -1;
+ mvif->sta.deflink.wcid.tx_info |= MT_WCID_TX_INFO_SET;
+ mt76_wcid_init(&mvif->sta.deflink.wcid);
mt7921_mac_wtbl_update(dev, idx,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
- ewma_rssi_init(&mvif->rssi);
+ ewma_rssi_init(&mvif->bss_conf.rssi);
- rcu_assign_pointer(dev->mt76.wcid[idx], &mvif->sta.wcid);
+ rcu_assign_pointer(dev->mt76.wcid[idx], &mvif->sta.deflink.wcid);
if (vif->txq) {
mtxq = (struct mt76_txq *)vif->txq->drv_priv;
mtxq->wcid = idx;
@@ -494,7 +494,7 @@ static int mt7921_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
struct mt792x_sta *msta = sta ? (struct mt792x_sta *)sta->drv_priv :
&mvif->sta;
- struct mt76_wcid *wcid = &msta->wcid;
+ struct mt76_wcid *wcid = &msta->deflink.wcid;
u8 *wcid_keyidx = &wcid->hw_key_idx;
int idx = key->keyidx, err = 0;
@@ -541,18 +541,18 @@ static int mt7921_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
}
mt76_wcid_key_setup(&dev->mt76, wcid, key);
- err = mt76_connac_mcu_add_key(&dev->mt76, vif, &msta->bip,
+ err = mt76_connac_mcu_add_key(&dev->mt76, vif, &msta->deflink.bip,
key, MCU_UNI_CMD(STA_REC_UPDATE),
- &msta->wcid, cmd);
+ &msta->deflink.wcid, cmd);
if (err)
goto out;
if (key->cipher == WLAN_CIPHER_SUITE_WEP104 ||
key->cipher == WLAN_CIPHER_SUITE_WEP40)
err = mt76_connac_mcu_add_key(&dev->mt76, vif,
- &mvif->wep_sta->bip,
+ &mvif->wep_sta->deflink.bip,
key, MCU_UNI_CMD(STA_REC_UPDATE),
- &mvif->wep_sta->wcid, cmd);
+ &mvif->wep_sta->deflink.wcid, cmd);
out:
mt792x_mutex_release(dev);
@@ -718,7 +718,7 @@ static void mt7921_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_ARP_FILTER) {
struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
- mt76_connac_mcu_update_arp_filter(&dev->mt76, &mvif->mt76,
+ mt76_connac_mcu_update_arp_filter(&dev->mt76, &mvif->bss_conf.mt76,
info);
}
@@ -799,13 +799,13 @@ int mt7921_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
if (idx < 0)
return -ENOSPC;
- INIT_LIST_HEAD(&msta->wcid.poll_list);
+ INIT_LIST_HEAD(&msta->deflink.wcid.poll_list);
msta->vif = mvif;
- msta->wcid.sta = 1;
- msta->wcid.idx = idx;
- msta->wcid.phy_idx = mvif->mt76.band_idx;
- msta->wcid.tx_info |= MT_WCID_TX_INFO_SET;
- msta->last_txs = jiffies;
+ msta->deflink.wcid.sta = 1;
+ msta->deflink.wcid.idx = idx;
+ msta->deflink.wcid.phy_idx = mvif->bss_conf.mt76.band_idx;
+ msta->deflink.wcid.tx_info |= MT_WCID_TX_INFO_SET;
+ msta->deflink.last_txs = jiffies;
ret = mt76_connac_pm_wake(&dev->mphy, &dev->pm);
if (ret)
@@ -840,14 +840,14 @@ void mt7921_mac_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif,
mt792x_mutex_acquire(dev);
if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls)
- mt76_connac_mcu_uni_add_bss(&dev->mphy, vif, &mvif->sta.wcid,
- true, mvif->mt76.ctx);
+ mt76_connac_mcu_uni_add_bss(&dev->mphy, vif, &mvif->sta.deflink.wcid,
+ true, mvif->bss_conf.mt76.ctx);
- ewma_avg_signal_init(&msta->avg_ack_signal);
+ ewma_avg_signal_init(&msta->deflink.avg_ack_signal);
- mt7921_mac_wtbl_update(dev, msta->wcid.idx,
+ mt7921_mac_wtbl_update(dev, msta->deflink.wcid.idx,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
- memset(msta->airtime_ac, 0, sizeof(msta->airtime_ac));
+ memset(msta->deflink.airtime_ac, 0, sizeof(msta->deflink.airtime_ac));
mt7921_mcu_sta_update(dev, sta, vif, true, MT76_STA_INFO_STATE_ASSOC);
@@ -861,27 +861,27 @@ void mt7921_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76);
struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv;
- mt76_connac_free_pending_tx_skbs(&dev->pm, &msta->wcid);
+ mt76_connac_free_pending_tx_skbs(&dev->pm, &msta->deflink.wcid);
mt76_connac_pm_wake(&dev->mphy, &dev->pm);
mt7921_mcu_sta_update(dev, sta, vif, false, MT76_STA_INFO_STATE_NONE);
- mt7921_mac_wtbl_update(dev, msta->wcid.idx,
+ mt7921_mac_wtbl_update(dev, msta->deflink.wcid.idx,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
if (vif->type == NL80211_IFTYPE_STATION) {
struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
mvif->wep_sta = NULL;
- ewma_rssi_init(&mvif->rssi);
+ ewma_rssi_init(&mvif->bss_conf.rssi);
if (!sta->tdls)
mt76_connac_mcu_uni_add_bss(&dev->mphy, vif,
- &mvif->sta.wcid, false,
- mvif->mt76.ctx);
+ &mvif->sta.deflink.wcid, false,
+ mvif->bss_conf.mt76.ctx);
}
spin_lock_bh(&dev->mt76.sta_poll_lock);
- if (!list_empty(&msta->wcid.poll_list))
- list_del_init(&msta->wcid.poll_list);
+ if (!list_empty(&msta->deflink.wcid.poll_list))
+ list_del_init(&msta->deflink.wcid.poll_list);
spin_unlock_bh(&dev->mt76.sta_poll_lock);
mt7921_regd_set_6ghz_power_type(vif, false);
@@ -923,12 +923,12 @@ mt7921_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
mt792x_mutex_acquire(dev);
switch (action) {
case IEEE80211_AMPDU_RX_START:
- mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid, ssn,
+ mt76_rx_aggr_start(&dev->mt76, &msta->deflink.wcid, tid, ssn,
params->buf_size);
mt7921_mcu_uni_rx_ba(dev, params, true);
break;
case IEEE80211_AMPDU_RX_STOP:
- mt76_rx_aggr_stop(&dev->mt76, &msta->wcid, tid);
+ mt76_rx_aggr_stop(&dev->mt76, &msta->deflink.wcid, tid);
mt7921_mcu_uni_rx_ba(dev, params, false);
break;
case IEEE80211_AMPDU_TX_OPERATIONAL:
@@ -939,16 +939,16 @@ mt7921_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
case IEEE80211_AMPDU_TX_STOP_FLUSH:
case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
mtxq->aggr = false;
- clear_bit(tid, &msta->wcid.ampdu_state);
+ clear_bit(tid, &msta->deflink.wcid.ampdu_state);
mt7921_mcu_uni_tx_ba(dev, params, false);
break;
case IEEE80211_AMPDU_TX_START:
- set_bit(tid, &msta->wcid.ampdu_state);
+ set_bit(tid, &msta->deflink.wcid.ampdu_state);
ret = IEEE80211_AMPDU_TX_START_IMMEDIATE;
break;
case IEEE80211_AMPDU_TX_STOP_CONT:
mtxq->aggr = false;
- clear_bit(tid, &msta->wcid.ampdu_state);
+ clear_bit(tid, &msta->deflink.wcid.ampdu_state);
mt7921_mcu_uni_tx_ba(dev, params, false);
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
break;
@@ -1166,11 +1166,11 @@ static void mt7921_sta_set_decap_offload(struct ieee80211_hw *hw,
mt792x_mutex_acquire(dev);
if (enabled)
- set_bit(MT_WCID_FLAG_HDR_TRANS, &msta->wcid.flags);
+ set_bit(MT_WCID_FLAG_HDR_TRANS, &msta->deflink.wcid.flags);
else
- clear_bit(MT_WCID_FLAG_HDR_TRANS, &msta->wcid.flags);
+ clear_bit(MT_WCID_FLAG_HDR_TRANS, &msta->deflink.wcid.flags);
- mt76_connac_mcu_sta_update_hdr_trans(&dev->mt76, vif, &msta->wcid,
+ mt76_connac_mcu_sta_update_hdr_trans(&dev->mt76, vif, &msta->deflink.wcid,
MCU_UNI_CMD(STA_REC_UPDATE));
mt792x_mutex_release(dev);
@@ -1196,7 +1196,7 @@ static void mt7921_ipv6_addr_change(struct ieee80211_hw *hw,
struct mt76_connac_arpns_tlv arpns;
} req_hdr = {
.hdr = {
- .bss_idx = mvif->mt76.idx,
+ .bss_idx = mvif->bss_conf.mt76.idx,
},
.arpns = {
.tag = cpu_to_le16(UNI_OFFLOAD_OFFLOAD_ND),
@@ -1294,8 +1294,8 @@ mt7921_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
mt792x_mutex_acquire(dev);
- err = mt76_connac_mcu_uni_add_bss(phy->mt76, vif, &mvif->sta.wcid,
- true, mvif->mt76.ctx);
+ err = mt76_connac_mcu_uni_add_bss(phy->mt76, vif, &mvif->sta.deflink.wcid,
+ true, mvif->bss_conf.mt76.ctx);
if (err)
goto out;
@@ -1326,8 +1326,8 @@ mt7921_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
if (err)
goto out;
- mt76_connac_mcu_uni_add_bss(phy->mt76, vif, &mvif->sta.wcid, false,
- mvif->mt76.ctx);
+ mt76_connac_mcu_uni_add_bss(phy->mt76, vif, &mvif->sta.deflink.wcid, false,
+ mvif->bss_conf.mt76.ctx);
out:
mt792x_mutex_release(dev);
@@ -1346,32 +1346,27 @@ mt7921_remove_chanctx(struct ieee80211_hw *hw,
{
}
-static void mt7921_ctx_iter(void *priv, u8 *mac,
- struct ieee80211_vif *vif)
-{
- struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
- struct ieee80211_chanctx_conf *ctx = priv;
-
- if (ctx != mvif->mt76.ctx)
- return;
-
- if (vif->type == NL80211_IFTYPE_MONITOR)
- mt7921_mcu_config_sniffer(mvif, ctx);
- else
- mt76_connac_mcu_uni_set_chctx(mvif->phy->mt76, &mvif->mt76, ctx);
-}
-
static void
mt7921_change_chanctx(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *ctx,
u32 changed)
{
+ struct mt792x_chanctx *mctx = (struct mt792x_chanctx *)ctx->drv_priv;
struct mt792x_phy *phy = mt792x_hw_phy(hw);
+ struct ieee80211_vif *vif;
+ struct mt792x_vif *mvif;
+
+ if (!mctx->bss_conf)
+ return;
+
+ mvif = container_of(mctx->bss_conf, struct mt792x_vif, bss_conf);
+ vif = container_of((void *)mvif, struct ieee80211_vif, drv_priv);
mt792x_mutex_acquire(phy->dev);
- ieee80211_iterate_active_interfaces(phy->mt76->hw,
- IEEE80211_IFACE_ITER_ACTIVE,
- mt7921_ctx_iter, ctx);
+ if (vif->type == NL80211_IFTYPE_MONITOR)
+ mt7921_mcu_config_sniffer(mvif, ctx);
+ else
+ mt76_connac_mcu_uni_set_chctx(mvif->phy->mt76, &mvif->bss_conf.mt76, ctx);
mt792x_mutex_release(phy->dev);
}
@@ -1385,7 +1380,7 @@ static void mt7921_mgd_prepare_tx(struct ieee80211_hw *hw,
jiffies_to_msecs(HZ);
mt792x_mutex_acquire(dev);
- mt7921_set_roc(mvif->phy, mvif, mvif->mt76.ctx->def.chan, duration,
+ mt7921_set_roc(mvif->phy, mvif, mvif->bss_conf.mt76.ctx->def.chan, duration,
MT7921_ROC_REQ_JOIN);
mt792x_mutex_release(dev);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
index bdd8b5f19b24..394fcd799345 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
@@ -105,7 +105,7 @@ mt7921_mcu_set_ipv6_ns_filter(struct mt76_dev *dev,
struct mt76_connac_arpns_tlv arpns;
} req = {
.hdr = {
- .bss_idx = mvif->mt76.idx,
+ .bss_idx = mvif->bss_conf.mt76.idx,
},
.arpns = {
.tag = cpu_to_le16(UNI_OFFLOAD_OFFLOAD_ND),
@@ -260,7 +260,7 @@ mt7921_mcu_rssi_monitor_iter(void *priv, u8 *mac,
struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
struct mt76_connac_rssi_notify_event *event = priv;
enum nl80211_cqm_rssi_threshold_event nl_event;
- s32 rssi = le32_to_cpu(event->rssi[mvif->mt76.idx]);
+ s32 rssi = le32_to_cpu(event->rssi[mvif->bss_conf.mt76.idx]);
if (!rssi)
return;
@@ -386,9 +386,9 @@ int mt7921_mcu_uni_tx_ba(struct mt792x_dev *dev,
struct mt792x_sta *msta = (struct mt792x_sta *)params->sta->drv_priv;
if (enable && !params->amsdu)
- msta->wcid.amsdu = false;
+ msta->deflink.wcid.amsdu = false;
- return mt76_connac_mcu_sta_ba(&dev->mt76, &msta->vif->mt76, params,
+ return mt76_connac_mcu_sta_ba(&dev->mt76, &msta->vif->bss_conf.mt76, params,
MCU_UNI_CMD(STA_REC_UPDATE),
enable, true);
}
@@ -399,7 +399,7 @@ int mt7921_mcu_uni_rx_ba(struct mt792x_dev *dev,
{
struct mt792x_sta *msta = (struct mt792x_sta *)params->sta->drv_priv;
- return mt76_connac_mcu_sta_ba(&dev->mt76, &msta->vif->mt76, params,
+ return mt76_connac_mcu_sta_ba(&dev->mt76, &msta->vif->bss_conf.mt76, params,
MCU_UNI_CMD(STA_REC_UPDATE),
enable, false);
}
@@ -678,9 +678,9 @@ int mt7921_mcu_set_tx(struct mt792x_dev *dev, struct ieee80211_vif *vif)
u8 wmm_idx;
u8 pad;
} __packed req = {
- .bss_idx = mvif->mt76.idx,
+ .bss_idx = mvif->bss_conf.mt76.idx,
.qos = vif->bss_conf.qos,
- .wmm_idx = mvif->mt76.wmm_idx,
+ .wmm_idx = mvif->bss_conf.mt76.wmm_idx,
};
struct mu_edca {
u8 cw_min;
@@ -701,15 +701,15 @@ int mt7921_mcu_set_tx(struct mt792x_dev *dev, struct ieee80211_vif *vif)
struct mu_edca edca[IEEE80211_NUM_ACS];
u8 pad3[32];
} __packed req_mu = {
- .bss_idx = mvif->mt76.idx,
+ .bss_idx = mvif->bss_conf.mt76.idx,
.qos = vif->bss_conf.qos,
- .wmm_idx = mvif->mt76.wmm_idx,
+ .wmm_idx = mvif->bss_conf.mt76.wmm_idx,
};
static const int to_aci[] = { 1, 0, 2, 3 };
int ac, ret;
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
- struct ieee80211_tx_queue_params *q = &mvif->queue_params[ac];
+ struct ieee80211_tx_queue_params *q = &mvif->bss_conf.queue_params[ac];
struct edca *e = &req.edca[to_aci[ac]];
e->aifs = cpu_to_le16(q->aifs);
@@ -738,10 +738,10 @@ int mt7921_mcu_set_tx(struct mt792x_dev *dev, struct ieee80211_vif *vif)
struct ieee80211_he_mu_edca_param_ac_rec *q;
struct mu_edca *e;
- if (!mvif->queue_params[ac].mu_edca)
+ if (!mvif->bss_conf.queue_params[ac].mu_edca)
break;
- q = &mvif->queue_params[ac].mu_edca_param_rec;
+ q = &mvif->bss_conf.queue_params[ac].mu_edca_param_rec;
e = &(req_mu.edca[to_aci[ac]]);
e->cw_min = q->ecw_min_max & 0xf;
@@ -790,7 +790,7 @@ int mt7921_mcu_set_roc(struct mt792x_phy *phy, struct mt792x_vif *vif,
.tokenid = token_id,
.reqtype = type,
.maxinterval = cpu_to_le32(duration),
- .bss_idx = vif->mt76.idx,
+ .bss_idx = vif->bss_conf.mt76.idx,
.control_channel = chan->hw_value,
.bw = CMD_CBW_20MHZ,
.bw_from_ap = CMD_CBW_20MHZ,
@@ -842,7 +842,7 @@ int mt7921_mcu_abort_roc(struct mt792x_phy *phy, struct mt792x_vif *vif,
.tag = cpu_to_le16(UNI_ROC_ABORT),
.len = cpu_to_le16(sizeof(struct roc_abort_tlv)),
.tokenid = token_id,
- .bss_idx = vif->mt76.idx,
+ .bss_idx = vif->bss_conf.mt76.idx,
.dbdcband = 0xff, /* auto*/
},
};
@@ -947,7 +947,7 @@ int mt7921_mcu_uni_bss_ps(struct mt792x_dev *dev, struct ieee80211_vif *vif)
} __packed ps;
} __packed ps_req = {
.hdr = {
- .bss_idx = mvif->mt76.idx,
+ .bss_idx = mvif->bss_conf.mt76.idx,
},
.ps = {
.tag = cpu_to_le16(UNI_BSS_INFO_PS),
@@ -982,7 +982,7 @@ mt7921_mcu_uni_bss_bcnft(struct mt792x_dev *dev, struct ieee80211_vif *vif,
} __packed bcnft;
} __packed bcnft_req = {
.hdr = {
- .bss_idx = mvif->mt76.idx,
+ .bss_idx = mvif->bss_conf.mt76.idx,
},
.bcnft = {
.tag = cpu_to_le16(UNI_BSS_INFO_BCNFT),
@@ -1015,7 +1015,7 @@ mt7921_mcu_set_bss_pm(struct mt792x_dev *dev, struct ieee80211_vif *vif,
u8 bmc_triggered_ac;
u8 pad;
} req = {
- .bss_idx = mvif->mt76.idx,
+ .bss_idx = mvif->bss_conf.mt76.idx,
.aid = cpu_to_le16(vif->cfg.aid),
.dtim_period = vif->bss_conf.dtim_period,
.bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int),
@@ -1024,7 +1024,7 @@ mt7921_mcu_set_bss_pm(struct mt792x_dev *dev, struct ieee80211_vif *vif,
u8 bss_idx;
u8 pad[3];
} req_hdr = {
- .bss_idx = mvif->mt76.idx,
+ .bss_idx = mvif->bss_conf.mt76.idx,
};
int err;
@@ -1042,7 +1042,7 @@ int mt7921_mcu_sta_update(struct mt792x_dev *dev, struct ieee80211_sta *sta,
enum mt76_sta_info_state state)
{
struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
- int rssi = -ewma_rssi_read(&mvif->rssi);
+ int rssi = -ewma_rssi_read(&mvif->bss_conf.rssi);
struct mt76_sta_cmd_info info = {
.sta = sta,
.vif = vif,
@@ -1055,7 +1055,7 @@ int mt7921_mcu_sta_update(struct mt792x_dev *dev, struct ieee80211_sta *sta,
struct mt792x_sta *msta;
msta = sta ? (struct mt792x_sta *)sta->drv_priv : NULL;
- info.wcid = msta ? &msta->wcid : &mvif->sta.wcid;
+ info.wcid = msta ? &msta->deflink.wcid : &mvif->sta.deflink.wcid;
info.newly = msta ? state != MT76_STA_INFO_STATE_ASSOC : true;
return mt76_connac_mcu_sta_cmd(&dev->mphy, &info);
@@ -1190,7 +1190,7 @@ int mt7921_mcu_config_sniffer(struct mt792x_vif *vif,
} __packed tlv;
} __packed req = {
.hdr = {
- .band_idx = vif->mt76.band_idx,
+ .band_idx = vif->bss_conf.mt76.band_idx,
},
.tlv = {
.tag = cpu_to_le16(1),
@@ -1251,7 +1251,7 @@ mt7921_mcu_uni_add_beacon_offload(struct mt792x_dev *dev,
} __packed beacon_tlv;
} req = {
.hdr = {
- .bss_idx = mvif->mt76.idx,
+ .bss_idx = mvif->bss_conf.mt76.idx,
},
.beacon_tlv = {
.tag = cpu_to_le16(UNI_BSS_INFO_BCN_CONTENT),
@@ -1460,7 +1460,7 @@ int mt7921_mcu_set_rssimonitor(struct mt792x_dev *dev, struct ieee80211_vif *vif
.enable = vif->cfg.assoc,
.cqm_rssi_high = vif->bss_conf.cqm_rssi_thold + vif->bss_conf.cqm_rssi_hyst,
.cqm_rssi_low = vif->bss_conf.cqm_rssi_thold - vif->bss_conf.cqm_rssi_hyst,
- .bss_idx = mvif->mt76.idx,
+ .bss_idx = mvif->bss_conf.mt76.idx,
};
return mt76_mcu_send_msg(&dev->mt76, MCU_CE_CMD(RSSI_MONITOR),
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
index f768e9389ac6..a7430216a80d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
@@ -219,7 +219,7 @@ static int mt7921_dma_init(struct mt792x_dev *dev)
if (ret < 0)
return ret;
- netif_napi_add_tx(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi,
+ netif_napi_add_tx(dev->mt76.tx_napi_dev, &dev->mt76.tx_napi,
mt792x_poll_tx);
napi_enable(&dev->mt76.tx_napi);
@@ -339,6 +339,9 @@ static int mt7921_pci_probe(struct pci_dev *pdev,
bus_ops->rmw = mt7921_rmw;
dev->mt76.bus = bus_ops;
+ if (!mt7921_disable_aspm && mt76_pci_aspm_supported(pdev))
+ dev->aspm_supported = true;
+
ret = mt792xe_mcu_fw_pmctrl(dev);
if (ret)
goto err_free_dev;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c
index 031ba9aaa4e2..2452b1a2d118 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c
@@ -34,9 +34,9 @@ int mt7921e_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
if (sta) {
struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv;
- if (time_after(jiffies, msta->last_txs + HZ / 4)) {
+ if (time_after(jiffies, msta->deflink.last_txs + HZ / 4)) {
info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
- msta->last_txs = jiffies;
+ msta->deflink.last_txs = jiffies;
}
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/init.c b/drivers/net/wireless/mediatek/mt76/mt7925/init.c
index c4cbc8976046..039949b344b9 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/init.c
@@ -179,6 +179,12 @@ static void mt7925_init_work(struct work_struct *work)
mt76_set_stream_caps(&dev->mphy, true);
mt7925_set_stream_he_eht_caps(&dev->phy);
+ ret = mt7925_init_mlo_caps(&dev->phy);
+ if (ret) {
+ dev_err(dev->mt76.dev, "MLO init failed\n");
+ return;
+ }
+
ret = mt76_register_device(&dev->mt76, true, mt76_rates,
ARRAY_SIZE(mt76_rates));
if (ret) {
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mac.c b/drivers/net/wireless/mediatek/mt76/mt7925/mac.c
index c2460ef4993d..cf36750cf709 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/mac.c
@@ -28,6 +28,7 @@ static void mt7925_mac_sta_poll(struct mt792x_dev *dev)
};
struct ieee80211_sta *sta;
struct mt792x_sta *msta;
+ struct mt792x_link_sta *mlink;
u32 tx_time[IEEE80211_NUM_ACS], rx_time[IEEE80211_NUM_ACS];
LIST_HEAD(sta_poll_list);
struct rate_info *rate;
@@ -46,24 +47,25 @@ static void mt7925_mac_sta_poll(struct mt792x_dev *dev)
if (list_empty(&sta_poll_list))
break;
- msta = list_first_entry(&sta_poll_list,
- struct mt792x_sta, wcid.poll_list);
+ mlink = list_first_entry(&sta_poll_list,
+ struct mt792x_link_sta, wcid.poll_list);
+ msta = container_of(mlink, struct mt792x_sta, deflink);
spin_lock_bh(&dev->mt76.sta_poll_lock);
- list_del_init(&msta->wcid.poll_list);
+ list_del_init(&mlink->wcid.poll_list);
spin_unlock_bh(&dev->mt76.sta_poll_lock);
- idx = msta->wcid.idx;
+ idx = mlink->wcid.idx;
addr = mt7925_mac_wtbl_lmac_addr(dev, idx, MT_WTBL_AC0_CTT_OFFSET);
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
- u32 tx_last = msta->airtime_ac[i];
- u32 rx_last = msta->airtime_ac[i + 4];
+ u32 tx_last = mlink->airtime_ac[i];
+ u32 rx_last = mlink->airtime_ac[i + 4];
- msta->airtime_ac[i] = mt76_rr(dev, addr);
- msta->airtime_ac[i + 4] = mt76_rr(dev, addr + 4);
+ mlink->airtime_ac[i] = mt76_rr(dev, addr);
+ mlink->airtime_ac[i + 4] = mt76_rr(dev, addr + 4);
- tx_time[i] = msta->airtime_ac[i] - tx_last;
- rx_time[i] = msta->airtime_ac[i + 4] - rx_last;
+ tx_time[i] = mlink->airtime_ac[i] - tx_last;
+ rx_time[i] = mlink->airtime_ac[i + 4] - rx_last;
if ((tx_last | rx_last) & BIT(30))
clear = true;
@@ -74,10 +76,10 @@ static void mt7925_mac_sta_poll(struct mt792x_dev *dev)
if (clear) {
mt7925_mac_wtbl_update(dev, idx,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
- memset(msta->airtime_ac, 0, sizeof(msta->airtime_ac));
+ memset(mlink->airtime_ac, 0, sizeof(mlink->airtime_ac));
}
- if (!msta->wcid.sta)
+ if (!mlink->wcid.sta)
continue;
sta = container_of((void *)msta, struct ieee80211_sta,
@@ -100,7 +102,7 @@ static void mt7925_mac_sta_poll(struct mt792x_dev *dev)
* we need to make sure that flags match so polling GI
* from per-sta counters directly.
*/
- rate = &msta->wcid.rate;
+ rate = &mlink->wcid.rate;
switch (rate->bw) {
case RATE_INFO_BW_160:
@@ -144,10 +146,10 @@ static void mt7925_mac_sta_poll(struct mt792x_dev *dev)
rssi[2] = to_rssi(GENMASK(23, 16), val);
rssi[3] = to_rssi(GENMASK(31, 14), val);
- msta->ack_signal =
+ mlink->ack_signal =
mt76_rx_signal(msta->vif->phy->mt76->antenna_mask, rssi);
- ewma_avg_signal_add(&msta->avg_ack_signal, -msta->ack_signal);
+ ewma_avg_signal_add(&mlink->avg_ack_signal, -mlink->ack_signal);
}
}
@@ -365,7 +367,7 @@ mt7925_mac_fill_rx(struct mt792x_dev *dev, struct sk_buff *skb)
u32 rxd2 = le32_to_cpu(rxd[2]);
u32 rxd3 = le32_to_cpu(rxd[3]);
u32 rxd4 = le32_to_cpu(rxd[4]);
- struct mt792x_sta *msta = NULL;
+ struct mt792x_link_sta *mlink;
u8 mode = 0; /* , band_idx; */
u16 seq_ctrl = 0;
__le16 fc = 0;
@@ -393,10 +395,10 @@ mt7925_mac_fill_rx(struct mt792x_dev *dev, struct sk_buff *skb)
status->wcid = mt792x_rx_get_wcid(dev, idx, unicast);
if (status->wcid) {
- msta = container_of(status->wcid, struct mt792x_sta, wcid);
+ mlink = container_of(status->wcid, struct mt792x_link_sta, wcid);
spin_lock_bh(&dev->mt76.sta_poll_lock);
- if (list_empty(&msta->wcid.poll_list))
- list_add_tail(&msta->wcid.poll_list,
+ if (list_empty(&mlink->wcid.poll_list))
+ list_add_tail(&mlink->wcid.poll_list,
&dev->mt76.sta_poll_list);
spin_unlock_bh(&dev->mt76.sta_poll_lock);
}
@@ -738,8 +740,12 @@ mt7925_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi,
BSS_CHANGED_BEACON_ENABLED));
bool inband_disc = !!(changed & (BSS_CHANGED_UNSOL_BCAST_PROBE_RESP |
BSS_CHANGED_FILS_DISCOVERY));
+ struct mt792x_bss_conf *mconf;
+
+ mconf = vif ? mt792x_vif_to_link((struct mt792x_vif *)vif->drv_priv,
+ wcid->link_id) : NULL;
+ mvif = mconf ? (struct mt76_vif *)&mconf->mt76 : NULL;
- mvif = vif ? (struct mt76_vif *)vif->drv_priv : NULL;
if (mvif) {
omac_idx = mvif->omac_idx;
wmm_idx = mvif->wmm_idx;
@@ -800,8 +806,10 @@ mt7925_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi,
txwi[5] = cpu_to_le32(val);
- val = MT_TXD6_DIS_MAT | MT_TXD6_DAS |
- FIELD_PREP(MT_TXD6_MSDU_CNT, 1);
+ val = MT_TXD6_DAS | FIELD_PREP(MT_TXD6_MSDU_CNT, 1);
+ if (!ieee80211_vif_is_mld(vif) ||
+ (q_idx >= MT_LMAC_ALTX0 && q_idx <= MT_LMAC_BCN0))
+ val |= MT_TXD6_DIS_MAT;
txwi[6] = cpu_to_le32(val);
txwi[7] = 0;
@@ -831,27 +839,53 @@ mt7925_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi,
}
EXPORT_SYMBOL_GPL(mt7925_mac_write_txwi);
-static void mt7925_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi)
+static void mt7925_tx_check_aggr(struct ieee80211_sta *sta, struct sk_buff *skb,
+ struct mt76_wcid *wcid)
{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_link_sta *link_sta;
+ struct mt792x_link_sta *mlink;
struct mt792x_sta *msta;
+ bool is_8023;
u16 fc, tid;
- u32 val;
- if (!sta || !(sta->deflink.ht_cap.ht_supported || sta->deflink.he_cap.has_he))
+ link_sta = rcu_dereference(sta->link[wcid->link_id]);
+ if (!link_sta)
return;
- tid = le32_get_bits(txwi[1], MT_TXD1_TID);
- if (tid >= 6) /* skip VO queue */
+ if (!sta || !(link_sta->ht_cap.ht_supported || link_sta->he_cap.has_he))
return;
- val = le32_to_cpu(txwi[2]);
- fc = FIELD_GET(MT_TXD2_FRAME_TYPE, val) << 2 |
- FIELD_GET(MT_TXD2_SUB_TYPE, val) << 4;
+ tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
+ is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP;
+
+ if (is_8023) {
+ fc = IEEE80211_FTYPE_DATA |
+ (sta->wme ? IEEE80211_STYPE_QOS_DATA :
+ IEEE80211_STYPE_DATA);
+ } else {
+ /* No need to get precise TID for Action/Management Frame,
+ * since it will not meet the following Frame Control
+ * condition anyway.
+ */
+
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+
+ fc = le16_to_cpu(hdr->frame_control) &
+ (IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE);
+ }
+
if (unlikely(fc != (IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_DATA)))
return;
msta = (struct mt792x_sta *)sta->drv_priv;
- if (!test_and_set_bit(tid, &msta->wcid.ampdu_state))
+
+ if (sta->mlo && msta->deflink_id != IEEE80211_LINK_UNSPECIFIED)
+ mlink = rcu_dereference(msta->link[msta->deflink_id]);
+ else
+ mlink = &msta->deflink;
+
+ if (!test_and_set_bit(tid, &mlink->wcid.ampdu_state))
ieee80211_start_tx_ba_session(sta, tid, 0);
}
@@ -991,7 +1025,7 @@ out_no_skb:
void mt7925_mac_add_txs(struct mt792x_dev *dev, void *data)
{
- struct mt792x_sta *msta = NULL;
+ struct mt792x_link_sta *mlink = NULL;
struct mt76_wcid *wcid;
__le32 *txs_data = data;
u16 wcidx;
@@ -1015,15 +1049,15 @@ void mt7925_mac_add_txs(struct mt792x_dev *dev, void *data)
if (!wcid)
goto out;
- msta = container_of(wcid, struct mt792x_sta, wcid);
+ mlink = container_of(wcid, struct mt792x_link_sta, wcid);
mt7925_mac_add_txs_skb(dev, wcid, pid, txs_data);
if (!wcid->sta)
goto out;
spin_lock_bh(&dev->mt76.sta_poll_lock);
- if (list_empty(&msta->wcid.poll_list))
- list_add_tail(&msta->wcid.poll_list, &dev->mt76.sta_poll_list);
+ if (list_empty(&mlink->wcid.poll_list))
+ list_add_tail(&mlink->wcid.poll_list, &dev->mt76.sta_poll_list);
spin_unlock_bh(&dev->mt76.sta_poll_lock);
out:
@@ -1031,7 +1065,7 @@ out:
}
void mt7925_txwi_free(struct mt792x_dev *dev, struct mt76_txwi_cache *t,
- struct ieee80211_sta *sta, bool clear_status,
+ struct ieee80211_sta *sta, struct mt76_wcid *wcid,
struct list_head *free_list)
{
struct mt76_dev *mdev = &dev->mt76;
@@ -1044,10 +1078,8 @@ void mt7925_txwi_free(struct mt792x_dev *dev, struct mt76_txwi_cache *t,
txwi = (__le32 *)mt76_get_txwi_ptr(mdev, t);
if (sta) {
- struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
-
if (likely(t->skb->protocol != cpu_to_be16(ETH_P_PAE)))
- mt7925_tx_check_aggr(sta, txwi);
+ mt7925_tx_check_aggr(sta, t->skb, wcid);
wcid_idx = wcid->idx;
} else {
@@ -1094,7 +1126,7 @@ mt7925_mac_tx_free(struct mt792x_dev *dev, void *data, int len)
*/
info = le32_to_cpu(*cur_info);
if (info & MT_TXFREE_INFO_PAIR) {
- struct mt792x_sta *msta;
+ struct mt792x_link_sta *mlink;
u16 idx;
idx = FIELD_GET(MT_TXFREE_INFO_WLAN_ID, info);
@@ -1103,10 +1135,10 @@ mt7925_mac_tx_free(struct mt792x_dev *dev, void *data, int len)
if (!sta)
continue;
- msta = container_of(wcid, struct mt792x_sta, wcid);
+ mlink = container_of(wcid, struct mt792x_link_sta, wcid);
spin_lock_bh(&mdev->sta_poll_lock);
- if (list_empty(&msta->wcid.poll_list))
- list_add_tail(&msta->wcid.poll_list,
+ if (list_empty(&mlink->wcid.poll_list))
+ list_add_tail(&mlink->wcid.poll_list,
&mdev->sta_poll_list);
spin_unlock_bh(&mdev->sta_poll_lock);
continue;
@@ -1132,7 +1164,7 @@ mt7925_mac_tx_free(struct mt792x_dev *dev, void *data, int len)
if (!txwi)
continue;
- mt7925_txwi_free(dev, txwi, sta, 0, &free_list);
+ mt7925_txwi_free(dev, txwi, sta, wcid, &free_list);
}
}
@@ -1235,17 +1267,26 @@ mt7925_vif_connect_iter(void *priv, u8 *mac,
struct ieee80211_vif *vif)
{
struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+ unsigned long valid = ieee80211_vif_is_mld(vif) ?
+ mvif->valid_links : BIT(0);
struct mt792x_dev *dev = mvif->phy->dev;
struct ieee80211_hw *hw = mt76_hw(dev);
+ struct ieee80211_bss_conf *bss_conf;
+ int i;
if (vif->type == NL80211_IFTYPE_STATION)
ieee80211_disconnect(vif, true);
- mt76_connac_mcu_uni_add_dev(&dev->mphy, vif, &mvif->sta.wcid, true);
- mt7925_mcu_set_tx(dev, vif);
+ for_each_set_bit(i, &valid, IEEE80211_MLD_MAX_NUM_LINKS) {
+ bss_conf = mt792x_vif_to_bss_conf(vif, i);
+
+ mt76_connac_mcu_uni_add_dev(&dev->mphy, bss_conf,
+ &mvif->sta.deflink.wcid, true);
+ mt7925_mcu_set_tx(dev, bss_conf);
+ }
if (vif->type == NL80211_IFTYPE_AP) {
- mt76_connac_mcu_uni_add_bss(dev->phy.mt76, vif, &mvif->sta.wcid,
+ mt76_connac_mcu_uni_add_bss(dev->phy.mt76, vif, &mvif->sta.deflink.wcid,
true, NULL);
mt7925_mcu_sta_update(dev, NULL, vif, true,
MT76_STA_INFO_STATE_NONE);
@@ -1380,9 +1421,9 @@ int mt7925_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
if (sta) {
struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv;
- if (time_after(jiffies, msta->last_txs + HZ / 4)) {
+ if (time_after(jiffies, msta->deflink.last_txs + HZ / 4)) {
info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
- msta->last_txs = jiffies;
+ msta->deflink.last_txs = jiffies;
}
}
@@ -1417,7 +1458,7 @@ void mt7925_usb_sdio_tx_complete_skb(struct mt76_dev *mdev,
sta = wcid_to_sta(wcid);
if (sta && likely(e->skb->protocol != cpu_to_be16(ETH_P_PAE)))
- mt7925_tx_check_aggr(sta, txwi);
+ mt76_connac2_tx_check_aggr(sta, txwi);
skb_pull(e->skb, headroom);
mt76_tx_complete_skb(mdev, e->wcid, e->skb);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/main.c b/drivers/net/wireless/mediatek/mt76/mt7925/main.c
index 6179798a8845..8c0768bf9343 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/main.c
@@ -236,6 +236,35 @@ mt7925_init_eht_caps(struct mt792x_phy *phy, enum nl80211_band band,
eht_nss->bw._160.rx_tx_mcs13_max_nss = val;
}
+int mt7925_init_mlo_caps(struct mt792x_phy *phy)
+{
+ struct wiphy *wiphy = phy->mt76->hw->wiphy;
+ static const u8 ext_capa_sta[] = {
+ [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF,
+ };
+ static struct wiphy_iftype_ext_capab ext_capab[] = {
+ {
+ .iftype = NL80211_IFTYPE_STATION,
+ .extended_capabilities = ext_capa_sta,
+ .extended_capabilities_mask = ext_capa_sta,
+ .extended_capabilities_len = sizeof(ext_capa_sta),
+ },
+ };
+
+ if (!(phy->chip_cap & MT792x_CHIP_CAP_MLO_EVT_EN))
+ return 0;
+
+ ext_capab[0].eml_capabilities = phy->eml_cap;
+ ext_capab[0].mld_capa_and_ops =
+ u16_encode_bits(1, IEEE80211_MLD_CAP_OP_MAX_SIMUL_LINKS);
+
+ wiphy->flags |= WIPHY_FLAG_SUPPORTS_MLO;
+ wiphy->iftype_ext_capab = ext_capab;
+ wiphy->num_iftype_ext_capab = ARRAY_SIZE(ext_capab);
+
+ return 0;
+}
+
static void
__mt7925_set_stream_he_eht_caps(struct mt792x_phy *phy,
struct ieee80211_supported_band *sband,
@@ -317,62 +346,83 @@ static int mt7925_start(struct ieee80211_hw *hw)
return err;
}
-static int
-mt7925_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+static int mt7925_mac_link_bss_add(struct mt792x_dev *dev,
+ struct ieee80211_bss_conf *link_conf,
+ struct mt792x_link_sta *mlink)
{
- struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
- struct mt792x_dev *dev = mt792x_hw_dev(hw);
- struct mt792x_phy *phy = mt792x_hw_phy(hw);
+ struct mt792x_bss_conf *mconf = mt792x_link_conf_to_mconf(link_conf);
+ struct ieee80211_vif *vif = link_conf->vif;
+ struct mt792x_vif *mvif = mconf->vif;
struct mt76_txq *mtxq;
int idx, ret = 0;
- mt792x_mutex_acquire(dev);
-
- mvif->mt76.idx = __ffs64(~dev->mt76.vif_mask);
- if (mvif->mt76.idx >= MT792x_MAX_INTERFACES) {
+ mconf->mt76.idx = __ffs64(~dev->mt76.vif_mask);
+ if (mconf->mt76.idx >= MT792x_MAX_INTERFACES) {
ret = -ENOSPC;
goto out;
}
- mvif->mt76.omac_idx = mvif->mt76.idx;
- mvif->phy = phy;
- mvif->mt76.band_idx = 0;
- mvif->mt76.wmm_idx = mvif->mt76.idx % MT76_CONNAC_MAX_WMM_SETS;
+ mconf->mt76.omac_idx = ieee80211_vif_is_mld(vif) ?
+ 0 : mconf->mt76.idx;
+ mconf->mt76.band_idx = 0xff;
+ mconf->mt76.wmm_idx = mconf->mt76.idx % MT76_CONNAC_MAX_WMM_SETS;
- if (phy->mt76->chandef.chan->band != NL80211_BAND_2GHZ)
- mvif->mt76.basic_rates_idx = MT792x_BASIC_RATES_TBL + 4;
+ if (mvif->phy->mt76->chandef.chan->band != NL80211_BAND_2GHZ)
+ mconf->mt76.basic_rates_idx = MT792x_BASIC_RATES_TBL + 4;
else
- mvif->mt76.basic_rates_idx = MT792x_BASIC_RATES_TBL;
+ mconf->mt76.basic_rates_idx = MT792x_BASIC_RATES_TBL;
- ret = mt76_connac_mcu_uni_add_dev(&dev->mphy, vif, &mvif->sta.wcid,
- true);
+ ret = mt76_connac_mcu_uni_add_dev(&dev->mphy, link_conf,
+ &mlink->wcid, true);
if (ret)
goto out;
- dev->mt76.vif_mask |= BIT_ULL(mvif->mt76.idx);
- phy->omac_mask |= BIT_ULL(mvif->mt76.omac_idx);
+ dev->mt76.vif_mask |= BIT_ULL(mconf->mt76.idx);
+ mvif->phy->omac_mask |= BIT_ULL(mconf->mt76.omac_idx);
- idx = MT792x_WTBL_RESERVED - mvif->mt76.idx;
+ idx = MT792x_WTBL_RESERVED - mconf->mt76.idx;
- INIT_LIST_HEAD(&mvif->sta.wcid.poll_list);
- mvif->sta.wcid.idx = idx;
- mvif->sta.wcid.phy_idx = mvif->mt76.band_idx;
- mvif->sta.wcid.hw_key_idx = -1;
- mvif->sta.wcid.tx_info |= MT_WCID_TX_INFO_SET;
- mvif->sta.vif = mvif;
- mt76_wcid_init(&mvif->sta.wcid);
+ INIT_LIST_HEAD(&mlink->wcid.poll_list);
+ mlink->wcid.idx = idx;
+ mlink->wcid.phy_idx = mconf->mt76.band_idx;
+ mlink->wcid.hw_key_idx = -1;
+ mlink->wcid.tx_info |= MT_WCID_TX_INFO_SET;
+ mt76_wcid_init(&mlink->wcid);
mt7925_mac_wtbl_update(dev, idx,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
- ewma_rssi_init(&mvif->rssi);
+ ewma_rssi_init(&mconf->rssi);
- rcu_assign_pointer(dev->mt76.wcid[idx], &mvif->sta.wcid);
+ rcu_assign_pointer(dev->mt76.wcid[idx], &mlink->wcid);
if (vif->txq) {
mtxq = (struct mt76_txq *)vif->txq->drv_priv;
mtxq->wcid = idx;
}
+out:
+ return ret;
+}
+
+static int
+mt7925_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+ struct mt792x_dev *dev = mt792x_hw_dev(hw);
+ struct mt792x_phy *phy = mt792x_hw_phy(hw);
+ int ret = 0;
+
+ mt792x_mutex_acquire(dev);
+
+ mvif->phy = phy;
+ mvif->bss_conf.vif = mvif;
+ mvif->sta.vif = mvif;
+ mvif->deflink_id = IEEE80211_LINK_UNSPECIFIED;
+
+ ret = mt7925_mac_link_bss_add(dev, &vif->bss_conf, &mvif->sta.deflink);
+ if (ret < 0)
+ goto out;
+
vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER;
out:
mt792x_mutex_release(dev);
@@ -386,7 +436,7 @@ static void mt7925_roc_iter(void *priv, u8 *mac,
struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
struct mt792x_phy *phy = priv;
- mt7925_mcu_abort_roc(phy, mvif, phy->roc_token_id);
+ mt7925_mcu_abort_roc(phy, &mvif->bss_conf, phy->roc_token_id);
}
void mt7925_roc_work(struct work_struct *work)
@@ -407,7 +457,8 @@ void mt7925_roc_work(struct work_struct *work)
ieee80211_remain_on_channel_expired(phy->mt76->hw);
}
-static int mt7925_abort_roc(struct mt792x_phy *phy, struct mt792x_vif *vif)
+static int mt7925_abort_roc(struct mt792x_phy *phy,
+ struct mt792x_bss_conf *mconf)
{
int err = 0;
@@ -416,14 +467,14 @@ static int mt7925_abort_roc(struct mt792x_phy *phy, struct mt792x_vif *vif)
mt792x_mutex_acquire(phy->dev);
if (test_and_clear_bit(MT76_STATE_ROC, &phy->mt76->state))
- err = mt7925_mcu_abort_roc(phy, vif, phy->roc_token_id);
+ err = mt7925_mcu_abort_roc(phy, mconf, phy->roc_token_id);
mt792x_mutex_release(phy->dev);
return err;
}
static int mt7925_set_roc(struct mt792x_phy *phy,
- struct mt792x_vif *vif,
+ struct mt792x_bss_conf *mconf,
struct ieee80211_channel *chan,
int duration,
enum mt7925_roc_req type)
@@ -435,7 +486,7 @@ static int mt7925_set_roc(struct mt792x_phy *phy,
phy->roc_grant = false;
- err = mt7925_mcu_set_roc(phy, vif, chan, duration, type,
+ err = mt7925_mcu_set_roc(phy, mconf, chan, duration, type,
++phy->roc_token_id);
if (err < 0) {
clear_bit(MT76_STATE_ROC, &phy->mt76->state);
@@ -443,7 +494,34 @@ static int mt7925_set_roc(struct mt792x_phy *phy,
}
if (!wait_event_timeout(phy->roc_wait, phy->roc_grant, 4 * HZ)) {
- mt7925_mcu_abort_roc(phy, vif, phy->roc_token_id);
+ mt7925_mcu_abort_roc(phy, mconf, phy->roc_token_id);
+ clear_bit(MT76_STATE_ROC, &phy->mt76->state);
+ err = -ETIMEDOUT;
+ }
+
+out:
+ return err;
+}
+
+static int mt7925_set_mlo_roc(struct mt792x_phy *phy,
+ struct mt792x_bss_conf *mconf,
+ u16 sel_links)
+{
+ int err;
+
+ if (WARN_ON_ONCE(test_and_set_bit(MT76_STATE_ROC, &phy->mt76->state)))
+ return -EBUSY;
+
+ phy->roc_grant = false;
+
+ err = mt7925_mcu_set_mlo_roc(mconf, sel_links, 5, ++phy->roc_token_id);
+ if (err < 0) {
+ clear_bit(MT76_STATE_ROC, &phy->mt76->state);
+ goto out;
+ }
+
+ if (!wait_event_timeout(phy->roc_wait, phy->roc_grant, 4 * HZ)) {
+ mt7925_mcu_abort_roc(phy, mconf, phy->roc_token_id);
clear_bit(MT76_STATE_ROC, &phy->mt76->state);
err = -ETIMEDOUT;
}
@@ -463,7 +541,8 @@ static int mt7925_remain_on_channel(struct ieee80211_hw *hw,
int err;
mt792x_mutex_acquire(phy->dev);
- err = mt7925_set_roc(phy, mvif, chan, duration, MT7925_ROC_REQ_ROC);
+ err = mt7925_set_roc(phy, &mvif->bss_conf,
+ chan, duration, MT7925_ROC_REQ_ROC);
mt792x_mutex_release(phy->dev);
return err;
@@ -475,30 +554,31 @@ static int mt7925_cancel_remain_on_channel(struct ieee80211_hw *hw,
struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
struct mt792x_phy *phy = mt792x_hw_phy(hw);
- return mt7925_abort_roc(phy, mvif);
+ return mt7925_abort_roc(phy, &mvif->bss_conf);
}
-static int mt7925_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
- struct ieee80211_vif *vif, struct ieee80211_sta *sta,
- struct ieee80211_key_conf *key)
+static int mt7925_set_link_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key, int link_id)
{
struct mt792x_dev *dev = mt792x_hw_dev(hw);
struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
struct mt792x_sta *msta = sta ? (struct mt792x_sta *)sta->drv_priv :
&mvif->sta;
- struct mt76_wcid *wcid = &msta->wcid;
- u8 *wcid_keyidx = &wcid->hw_key_idx;
+ struct ieee80211_bss_conf *link_conf;
+ struct ieee80211_link_sta *link_sta;
int idx = key->keyidx, err = 0;
-
- /* The hardware does not support per-STA RX GTK, fallback
- * to software mode for these.
- */
- if ((vif->type == NL80211_IFTYPE_ADHOC ||
- vif->type == NL80211_IFTYPE_MESH_POINT) &&
- (key->cipher == WLAN_CIPHER_SUITE_TKIP ||
- key->cipher == WLAN_CIPHER_SUITE_CCMP) &&
- !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
- return -EOPNOTSUPP;
+ struct mt792x_link_sta *mlink;
+ struct mt792x_bss_conf *mconf;
+ struct mt76_wcid *wcid;
+ u8 *wcid_keyidx;
+
+ link_conf = mt792x_vif_to_bss_conf(vif, link_id);
+ link_sta = sta ? mt792x_sta_to_link_sta(vif, sta, link_id) : NULL;
+ mconf = mt792x_vif_to_link(mvif, link_id);
+ mlink = mt792x_sta_to_link(msta, link_id);
+ wcid = &mlink->wcid;
+ wcid_keyidx = &wcid->hw_key_idx;
/* fall back to sw encryption for unsupported ciphers */
switch (key->cipher) {
@@ -522,13 +602,12 @@ static int mt7925_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
return -EOPNOTSUPP;
}
- mt792x_mutex_acquire(dev);
-
- if (cmd == SET_KEY && !mvif->mt76.cipher) {
+ if (cmd == SET_KEY && !mconf->mt76.cipher) {
struct mt792x_phy *phy = mt792x_hw_phy(hw);
- mvif->mt76.cipher = mt7925_mcu_get_cipher(key->cipher);
- mt7925_mcu_add_bss_info(phy, mvif->mt76.ctx, vif, sta, true);
+ mconf->mt76.cipher = mt7925_mcu_get_cipher(key->cipher);
+ mt7925_mcu_add_bss_info(phy, mconf->mt76.ctx, link_conf,
+ link_sta, true);
}
if (cmd == SET_KEY)
@@ -541,20 +620,59 @@ static int mt7925_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
mt76_wcid_key_setup(&dev->mt76, wcid,
cmd == SET_KEY ? key : NULL);
- err = mt7925_mcu_add_key(&dev->mt76, vif, &msta->bip,
+ err = mt7925_mcu_add_key(&dev->mt76, vif, &mlink->bip,
key, MCU_UNI_CMD(STA_REC_UPDATE),
- &msta->wcid, cmd);
+ &mlink->wcid, cmd, msta);
if (err)
goto out;
if (key->cipher == WLAN_CIPHER_SUITE_WEP104 ||
key->cipher == WLAN_CIPHER_SUITE_WEP40)
- err = mt7925_mcu_add_key(&dev->mt76, vif, &mvif->wep_sta->bip,
+ err = mt7925_mcu_add_key(&dev->mt76, vif, &mvif->wep_sta->deflink.bip,
key, MCU_WMWA_UNI_CMD(STA_REC_UPDATE),
- &mvif->wep_sta->wcid, cmd);
-
+ &mvif->wep_sta->deflink.wcid, cmd, msta);
out:
+ return err;
+}
+
+static int mt7925_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct mt792x_dev *dev = mt792x_hw_dev(hw);
+ struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+ struct mt792x_sta *msta = sta ? (struct mt792x_sta *)sta->drv_priv :
+ &mvif->sta;
+ int err;
+
+ /* The hardware does not support per-STA RX GTK, fallback
+ * to software mode for these.
+ */
+ if ((vif->type == NL80211_IFTYPE_ADHOC ||
+ vif->type == NL80211_IFTYPE_MESH_POINT) &&
+ (key->cipher == WLAN_CIPHER_SUITE_TKIP ||
+ key->cipher == WLAN_CIPHER_SUITE_CCMP) &&
+ !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+ return -EOPNOTSUPP;
+
+ mt792x_mutex_acquire(dev);
+
+ if (ieee80211_vif_is_mld(vif)) {
+ unsigned int link_id;
+ unsigned long add;
+
+ add = key->link_id != -1 ? BIT(key->link_id) : msta->valid_links;
+
+ for_each_set_bit(link_id, &add, IEEE80211_MLD_MAX_NUM_LINKS) {
+ err = mt7925_set_link_key(hw, cmd, vif, sta, key, link_id);
+ if (err < 0)
+ break;
+ }
+ } else {
+ err = mt7925_set_link_key(hw, cmd, vif, sta, key, vif->bss_conf.link_id);
+ }
+
mt792x_mutex_release(dev);
return err;
@@ -695,166 +813,388 @@ mt7925_get_rates_table(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
return mvif->basic_rates_idx;
}
-static void mt7925_bss_info_changed(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *info,
- u64 changed)
+static int mt7925_mac_link_sta_add(struct mt76_dev *mdev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_link_sta *link_sta)
{
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
- struct mt792x_phy *phy = mt792x_hw_phy(hw);
- struct mt792x_dev *dev = mt792x_hw_dev(hw);
+ struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76);
+ struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+ struct ieee80211_bss_conf *link_conf;
+ struct mt792x_bss_conf *mconf;
+ u8 link_id = link_sta->link_id;
+ struct mt792x_link_sta *mlink;
+ struct mt792x_sta *msta;
+ int ret, idx;
- mt792x_mutex_acquire(dev);
+ msta = (struct mt792x_sta *)link_sta->sta->drv_priv;
+ mlink = mt792x_sta_to_link(msta, link_id);
- if (changed & BSS_CHANGED_ERP_SLOT) {
- int slottime = info->use_short_slot ? 9 : 20;
+ idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT792x_WTBL_STA - 1);
+ if (idx < 0)
+ return -ENOSPC;
- if (slottime != phy->slottime) {
- phy->slottime = slottime;
- mt7925_mcu_set_timing(phy, vif);
- }
- }
+ mconf = mt792x_vif_to_link(mvif, link_id);
+ INIT_LIST_HEAD(&mlink->wcid.poll_list);
+ mlink->wcid.sta = 1;
+ mlink->wcid.idx = idx;
+ mlink->wcid.phy_idx = mconf->mt76.band_idx;
+ mlink->wcid.tx_info |= MT_WCID_TX_INFO_SET;
+ mlink->last_txs = jiffies;
+ mlink->wcid.link_id = link_sta->link_id;
+ mlink->wcid.link_valid = !!link_sta->sta->valid_links;
- if (changed & BSS_CHANGED_MCAST_RATE)
- mvif->mcast_rates_idx =
- mt7925_get_rates_table(hw, vif, false, true);
+ ret = mt76_connac_pm_wake(&dev->mphy, &dev->pm);
+ if (ret)
+ return ret;
- if (changed & BSS_CHANGED_BASIC_RATES)
- mvif->basic_rates_idx =
- mt7925_get_rates_table(hw, vif, false, false);
+ mt7925_mac_wtbl_update(dev, idx,
+ MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
- if (changed & (BSS_CHANGED_BEACON |
- BSS_CHANGED_BEACON_ENABLED)) {
- mvif->beacon_rates_idx =
- mt7925_get_rates_table(hw, vif, true, false);
+ link_conf = mt792x_vif_to_bss_conf(vif, link_id);
- mt7925_mcu_uni_add_beacon_offload(dev, hw, vif,
- info->enable_beacon);
+ /* should update bss info before STA add */
+ if (vif->type == NL80211_IFTYPE_STATION && !link_sta->sta->tdls)
+ mt7925_mcu_add_bss_info(&dev->phy, mconf->mt76.ctx,
+ link_conf, link_sta, false);
+
+ if (ieee80211_vif_is_mld(vif) &&
+ link_sta == mlink->pri_link) {
+ ret = mt7925_mcu_sta_update(dev, link_sta, vif, true,
+ MT76_STA_INFO_STATE_NONE);
+ if (ret)
+ return ret;
+ } else if (ieee80211_vif_is_mld(vif) &&
+ link_sta != mlink->pri_link) {
+ ret = mt7925_mcu_sta_update(dev, mlink->pri_link, vif,
+ true, MT76_STA_INFO_STATE_ASSOC);
+ if (ret)
+ return ret;
+
+ ret = mt7925_mcu_sta_update(dev, link_sta, vif, true,
+ MT76_STA_INFO_STATE_ASSOC);
+ if (ret)
+ return ret;
+ } else {
+ ret = mt7925_mcu_sta_update(dev, link_sta, vif, true,
+ MT76_STA_INFO_STATE_NONE);
+ if (ret)
+ return ret;
}
- /* ensure that enable txcmd_mode after bss_info */
- if (changed & (BSS_CHANGED_QOS | BSS_CHANGED_BEACON_ENABLED))
- mt7925_mcu_set_tx(dev, vif);
+ mt76_connac_power_save_sched(&dev->mphy, &dev->pm);
- if (changed & BSS_CHANGED_PS)
- mt7925_mcu_uni_bss_ps(dev, vif);
+ return 0;
+}
- if (changed & BSS_CHANGED_ASSOC) {
- mt7925_mcu_sta_update(dev, NULL, vif, true,
- MT76_STA_INFO_STATE_ASSOC);
- mt7925_mcu_set_beacon_filter(dev, vif, vif->cfg.assoc);
- }
+static int
+mt7925_mac_sta_add_links(struct mt792x_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, unsigned long new_links)
+{
+ struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv;
+ struct mt76_wcid *wcid;
+ unsigned int link_id;
+ int err = 0;
- if (changed & BSS_CHANGED_ARP_FILTER) {
- struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+ for_each_set_bit(link_id, &new_links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ struct ieee80211_link_sta *link_sta;
+ struct mt792x_link_sta *mlink;
+
+ if (msta->deflink_id == IEEE80211_LINK_UNSPECIFIED) {
+ mlink = &msta->deflink;
+ msta->deflink_id = link_id;
+ } else {
+ mlink = devm_kzalloc(dev->mt76.dev, sizeof(*mlink), GFP_KERNEL);
+ if (!mlink) {
+ err = -ENOMEM;
+ break;
+ }
- mt7925_mcu_update_arp_filter(&dev->mt76, &mvif->mt76, info);
+ wcid = &mlink->wcid;
+ ewma_signal_init(&wcid->rssi);
+ rcu_assign_pointer(dev->mt76.wcid[wcid->idx], wcid);
+ mt76_wcid_init(wcid);
+ ewma_avg_signal_init(&mlink->avg_ack_signal);
+ memset(mlink->airtime_ac, 0,
+ sizeof(msta->deflink.airtime_ac));
+ }
+
+ msta->valid_links |= BIT(link_id);
+ rcu_assign_pointer(msta->link[link_id], mlink);
+ mlink->sta = msta;
+ mlink->pri_link = &sta->deflink;
+ mlink->wcid.def_wcid = &msta->deflink.wcid;
+
+ link_sta = mt792x_sta_to_link_sta(vif, sta, link_id);
+ mt7925_mac_link_sta_add(&dev->mt76, vif, link_sta);
}
- mt792x_mutex_release(dev);
+ return err;
}
int mt7925_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76);
- struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv;
struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
- int ret, idx;
-
- idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT792x_WTBL_STA - 1);
- if (idx < 0)
- return -ENOSPC;
+ struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv;
+ int err;
- INIT_LIST_HEAD(&msta->wcid.poll_list);
msta->vif = mvif;
- msta->wcid.sta = 1;
- msta->wcid.idx = idx;
- msta->wcid.phy_idx = mvif->mt76.band_idx;
- msta->wcid.tx_info |= MT_WCID_TX_INFO_SET;
- msta->last_txs = jiffies;
-
- ret = mt76_connac_pm_wake(&dev->mphy, &dev->pm);
- if (ret)
- return ret;
if (vif->type == NL80211_IFTYPE_STATION)
mvif->wep_sta = msta;
- mt7925_mac_wtbl_update(dev, idx,
- MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
+ if (ieee80211_vif_is_mld(vif)) {
+ msta->deflink_id = IEEE80211_LINK_UNSPECIFIED;
- /* should update bss info before STA add */
- if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls)
- mt7925_mcu_add_bss_info(&dev->phy, mvif->mt76.ctx, vif, sta,
- false);
+ err = mt7925_mac_sta_add_links(dev, vif, sta, sta->valid_links);
+ } else {
+ err = mt7925_mac_link_sta_add(mdev, vif, &sta->deflink);
+ }
- ret = mt7925_mcu_sta_update(dev, sta, vif, true,
- MT76_STA_INFO_STATE_NONE);
- if (ret)
- return ret;
+ return err;
+}
+EXPORT_SYMBOL_GPL(mt7925_mac_sta_add);
- mt76_connac_power_save_sched(&dev->mphy, &dev->pm);
+static u16
+mt7925_mac_select_links(struct mt76_dev *mdev, struct ieee80211_vif *vif)
+{
+ unsigned long usable_links = ieee80211_vif_usable_links(vif);
+ struct {
+ u8 link_id;
+ enum nl80211_band band;
+ } data[IEEE80211_MLD_MAX_NUM_LINKS];
+ u8 link_id, i, j, n_data = 0;
+ u16 sel_links = 0;
+
+ if (!ieee80211_vif_is_mld(vif))
+ return 0;
+
+ if (vif->active_links == usable_links)
+ return vif->active_links;
+
+ rcu_read_lock();
+ for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ struct ieee80211_bss_conf *link_conf =
+ rcu_dereference(vif->link_conf[link_id]);
+
+ if (WARN_ON_ONCE(!link_conf))
+ continue;
- return 0;
+ data[n_data].link_id = link_id;
+ data[n_data].band = link_conf->chanreq.oper.chan->band;
+ n_data++;
+ }
+ rcu_read_unlock();
+
+ for (i = 0; i < n_data; i++) {
+ if (!(BIT(data[i].link_id) & vif->active_links))
+ continue;
+
+ sel_links = BIT(data[i].link_id);
+
+ for (j = 0; j < n_data; j++) {
+ if (data[i].band != data[j].band) {
+ sel_links |= BIT(data[j].link_id);
+ break;
+ }
+ }
+
+ break;
+ }
+
+ return sel_links;
}
-EXPORT_SYMBOL_GPL(mt7925_mac_sta_add);
-void mt7925_mac_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
+static void
+mt7925_mac_set_links(struct mt76_dev *mdev, struct ieee80211_vif *vif)
{
struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76);
- struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv;
struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+ struct ieee80211_bss_conf *link_conf =
+ mt792x_vif_to_bss_conf(vif, mvif->deflink_id);
+ struct cfg80211_chan_def *chandef = &link_conf->chanreq.oper;
+ enum nl80211_band band = chandef->chan->band, secondary_band;
+
+ u16 sel_links = mt7925_mac_select_links(mdev, vif);
+ u8 secondary_link_id = __ffs(~BIT(mvif->deflink_id) & sel_links);
+
+ if (!ieee80211_vif_is_mld(vif) || hweight16(sel_links) < 2)
+ return;
+
+ link_conf = mt792x_vif_to_bss_conf(vif, secondary_link_id);
+ secondary_band = link_conf->chanreq.oper.chan->band;
+
+ if (band == NL80211_BAND_2GHZ ||
+ (band == NL80211_BAND_5GHZ && secondary_band == NL80211_BAND_6GHZ)) {
+ mt7925_abort_roc(mvif->phy, &mvif->bss_conf);
+
+ mt792x_mutex_acquire(dev);
+
+ mt7925_set_mlo_roc(mvif->phy, &mvif->bss_conf, sel_links);
+
+ mt792x_mutex_release(dev);
+ }
+
+ ieee80211_set_active_links_async(vif, sel_links);
+}
+
+static void mt7925_mac_link_sta_assoc(struct mt76_dev *mdev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_link_sta *link_sta)
+{
+ struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76);
+ struct ieee80211_bss_conf *link_conf;
+ struct mt792x_link_sta *mlink;
+ struct mt792x_sta *msta;
+
+ msta = (struct mt792x_sta *)link_sta->sta->drv_priv;
+ mlink = mt792x_sta_to_link(msta, link_sta->link_id);
mt792x_mutex_acquire(dev);
- if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls)
- mt7925_mcu_add_bss_info(&dev->phy, mvif->mt76.ctx, vif, sta,
- true);
+ if (ieee80211_vif_is_mld(vif)) {
+ link_conf = mt792x_vif_to_bss_conf(vif, msta->deflink_id);
+ } else {
+ link_conf = mt792x_vif_to_bss_conf(vif, vif->bss_conf.link_id);
+ }
+
+ if (vif->type == NL80211_IFTYPE_STATION && !link_sta->sta->tdls) {
+ struct mt792x_bss_conf *mconf;
- ewma_avg_signal_init(&msta->avg_ack_signal);
+ mconf = mt792x_link_conf_to_mconf(link_conf);
+ mt7925_mcu_add_bss_info(&dev->phy, mconf->mt76.ctx,
+ link_conf, link_sta, true);
+ }
+
+ ewma_avg_signal_init(&mlink->avg_ack_signal);
- mt7925_mac_wtbl_update(dev, msta->wcid.idx,
+ mt7925_mac_wtbl_update(dev, mlink->wcid.idx,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
- memset(msta->airtime_ac, 0, sizeof(msta->airtime_ac));
+ memset(mlink->airtime_ac, 0, sizeof(mlink->airtime_ac));
- mt7925_mcu_sta_update(dev, sta, vif, true, MT76_STA_INFO_STATE_ASSOC);
+ mt7925_mcu_sta_update(dev, link_sta, vif, true, MT76_STA_INFO_STATE_ASSOC);
mt792x_mutex_release(dev);
}
+
+void mt7925_mac_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ if (ieee80211_vif_is_mld(vif)) {
+ struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv;
+ struct ieee80211_link_sta *link_sta;
+
+ link_sta = mt792x_sta_to_link_sta(vif, sta, msta->deflink_id);
+
+ mt7925_mac_set_links(mdev, vif);
+
+ mt7925_mac_link_sta_assoc(mdev, vif, link_sta);
+ } else {
+ mt7925_mac_link_sta_assoc(mdev, vif, &sta->deflink);
+ }
+}
EXPORT_SYMBOL_GPL(mt7925_mac_sta_assoc);
-void mt7925_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
+static void mt7925_mac_link_sta_remove(struct mt76_dev *mdev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_link_sta *link_sta)
{
struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76);
- struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv;
+ struct ieee80211_bss_conf *link_conf;
+ u8 link_id = link_sta->link_id;
+ struct mt792x_link_sta *mlink;
+ struct mt792x_sta *msta;
- mt76_connac_free_pending_tx_skbs(&dev->pm, &msta->wcid);
+ msta = (struct mt792x_sta *)link_sta->sta->drv_priv;
+ mlink = mt792x_sta_to_link(msta, link_id);
+
+ mt76_connac_free_pending_tx_skbs(&dev->pm, &mlink->wcid);
mt76_connac_pm_wake(&dev->mphy, &dev->pm);
- mt7925_mcu_sta_update(dev, sta, vif, false, MT76_STA_INFO_STATE_NONE);
- mt7925_mac_wtbl_update(dev, msta->wcid.idx,
+ mt7925_mcu_sta_update(dev, link_sta, vif, false,
+ MT76_STA_INFO_STATE_NONE);
+ mt7925_mac_wtbl_update(dev, mlink->wcid.idx,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
- if (vif->type == NL80211_IFTYPE_STATION) {
- struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+ link_conf = mt792x_vif_to_bss_conf(vif, link_id);
- mvif->wep_sta = NULL;
- ewma_rssi_init(&mvif->rssi);
- if (!sta->tdls)
- mt7925_mcu_add_bss_info(&dev->phy, mvif->mt76.ctx, vif, sta,
- false);
+ if (vif->type == NL80211_IFTYPE_STATION && !link_sta->sta->tdls) {
+ struct mt792x_bss_conf *mconf;
+
+ mconf = mt792x_link_conf_to_mconf(link_conf);
+ mt7925_mcu_add_bss_info(&dev->phy, mconf->mt76.ctx, link_conf,
+ link_sta, false);
}
spin_lock_bh(&mdev->sta_poll_lock);
- if (!list_empty(&msta->wcid.poll_list))
- list_del_init(&msta->wcid.poll_list);
+ if (!list_empty(&mlink->wcid.poll_list))
+ list_del_init(&mlink->wcid.poll_list);
spin_unlock_bh(&mdev->sta_poll_lock);
mt76_connac_power_save_sched(&dev->mphy, &dev->pm);
}
+
+static int
+mt7925_mac_sta_remove_links(struct mt792x_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, unsigned long old_links)
+{
+ struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv;
+ struct mt76_dev *mdev = &dev->mt76;
+ struct mt76_wcid *wcid;
+ unsigned int link_id;
+
+ for_each_set_bit(link_id, &old_links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ struct ieee80211_link_sta *link_sta;
+ struct mt792x_link_sta *mlink;
+
+ link_sta = mt792x_sta_to_link_sta(vif, sta, link_id);
+ if (!link_sta)
+ continue;
+
+ mlink = mt792x_sta_to_link(msta, link_id);
+ if (!mlink)
+ continue;
+
+ mt7925_mac_link_sta_remove(&dev->mt76, vif, link_sta);
+
+ wcid = &mlink->wcid;
+ rcu_assign_pointer(msta->link[link_id], NULL);
+ msta->valid_links &= ~BIT(link_id);
+ mlink->sta = NULL;
+ mlink->pri_link = NULL;
+
+ if (link_sta != mlink->pri_link) {
+ mt76_wcid_cleanup(mdev, wcid);
+ mt76_wcid_mask_clear(mdev->wcid_mask, wcid->idx);
+ mt76_wcid_mask_clear(mdev->wcid_phy_mask, wcid->idx);
+ }
+
+ if (msta->deflink_id == link_id)
+ msta->deflink_id = IEEE80211_LINK_UNSPECIFIED;
+ }
+
+ return 0;
+}
+
+void mt7925_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76);
+ struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv;
+ unsigned long rem;
+
+ rem = ieee80211_vif_is_mld(vif) ? msta->valid_links : BIT(0);
+
+ mt7925_mac_sta_remove_links(dev, vif, sta, rem);
+
+ if (vif->type == NL80211_IFTYPE_STATION) {
+ struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+
+ mvif->wep_sta = NULL;
+ ewma_rssi_init(&mvif->bss_conf.rssi);
+ }
+}
EXPORT_SYMBOL_GPL(mt7925_mac_sta_remove);
static int mt7925_set_rts_threshold(struct ieee80211_hw *hw, u32 val)
@@ -890,12 +1230,12 @@ mt7925_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
mt792x_mutex_acquire(dev);
switch (action) {
case IEEE80211_AMPDU_RX_START:
- mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid, ssn,
+ mt76_rx_aggr_start(&dev->mt76, &msta->deflink.wcid, tid, ssn,
params->buf_size);
mt7925_mcu_uni_rx_ba(dev, params, true);
break;
case IEEE80211_AMPDU_RX_STOP:
- mt76_rx_aggr_stop(&dev->mt76, &msta->wcid, tid);
+ mt76_rx_aggr_stop(&dev->mt76, &msta->deflink.wcid, tid);
mt7925_mcu_uni_rx_ba(dev, params, false);
break;
case IEEE80211_AMPDU_TX_OPERATIONAL:
@@ -906,16 +1246,16 @@ mt7925_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
case IEEE80211_AMPDU_TX_STOP_FLUSH:
case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
mtxq->aggr = false;
- clear_bit(tid, &msta->wcid.ampdu_state);
+ clear_bit(tid, &msta->deflink.wcid.ampdu_state);
mt7925_mcu_uni_tx_ba(dev, params, false);
break;
case IEEE80211_AMPDU_TX_START:
- set_bit(tid, &msta->wcid.ampdu_state);
+ set_bit(tid, &msta->deflink.wcid.ampdu_state);
ret = IEEE80211_AMPDU_TX_START_IMMEDIATE;
break;
case IEEE80211_AMPDU_TX_STOP_CONT:
mtxq->aggr = false;
- clear_bit(tid, &msta->wcid.ampdu_state);
+ clear_bit(tid, &msta->deflink.wcid.ampdu_state);
mt7925_mcu_uni_tx_ba(dev, params, false);
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
break;
@@ -1156,27 +1496,38 @@ static void mt7925_sta_set_decap_offload(struct ieee80211_hw *hw,
bool enabled)
{
struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv;
+ struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
struct mt792x_dev *dev = mt792x_hw_dev(hw);
+ unsigned long valid = mvif->valid_links;
+ u8 i;
mt792x_mutex_acquire(dev);
- if (enabled)
- set_bit(MT_WCID_FLAG_HDR_TRANS, &msta->wcid.flags);
- else
- clear_bit(MT_WCID_FLAG_HDR_TRANS, &msta->wcid.flags);
+ valid = ieee80211_vif_is_mld(vif) ? mvif->valid_links : BIT(0);
+
+ for_each_set_bit(i, &valid, IEEE80211_MLD_MAX_NUM_LINKS) {
+ struct mt792x_link_sta *mlink;
- mt7925_mcu_wtbl_update_hdr_trans(dev, vif, sta);
+ mlink = mt792x_sta_to_link(msta, i);
+
+ if (enabled)
+ set_bit(MT_WCID_FLAG_HDR_TRANS, &mlink->wcid.flags);
+ else
+ clear_bit(MT_WCID_FLAG_HDR_TRANS, &mlink->wcid.flags);
+
+ mt7925_mcu_wtbl_update_hdr_trans(dev, vif, sta, i);
+ }
mt792x_mutex_release(dev);
}
#if IS_ENABLED(CONFIG_IPV6)
-static void mt7925_ipv6_addr_change(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct inet6_dev *idev)
+static void __mt7925_ipv6_addr_change(struct ieee80211_hw *hw,
+ struct ieee80211_bss_conf *link_conf,
+ struct inet6_dev *idev)
{
- struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
- struct mt792x_dev *dev = mvif->phy->dev;
+ struct mt792x_bss_conf *mconf = mt792x_link_conf_to_mconf(link_conf);
+ struct mt792x_dev *dev = mt792x_hw_dev(hw);
struct inet6_ifaddr *ifa;
struct sk_buff *skb;
u8 idx = 0;
@@ -1190,7 +1541,7 @@ static void mt7925_ipv6_addr_change(struct ieee80211_hw *hw,
struct in6_addr ns_addrs[IEEE80211_BSS_ARP_ADDR_LIST_LEN];
} req_hdr = {
.hdr = {
- .bss_idx = mvif->mt76.idx,
+ .bss_idx = mconf->mt76.idx,
},
.arpns = {
.tag = cpu_to_le16(UNI_OFFLOAD_OFFLOAD_ND),
@@ -1225,6 +1576,23 @@ static void mt7925_ipv6_addr_change(struct ieee80211_hw *hw,
ieee80211_queue_work(dev->mt76.hw, &dev->ipv6_ns_work);
}
+
+static void mt7925_ipv6_addr_change(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct inet6_dev *idev)
+{
+ struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+ unsigned long valid = ieee80211_vif_is_mld(vif) ?
+ mvif->valid_links : BIT(0);
+ struct ieee80211_bss_conf *bss_conf;
+ int i;
+
+ for_each_set_bit(i, &valid, IEEE80211_MLD_MAX_NUM_LINKS) {
+ bss_conf = mt792x_vif_to_bss_conf(vif, i);
+ __mt7925_ipv6_addr_change(hw, bss_conf, idev);
+ }
+}
+
#endif
int mt7925_set_tx_sar_pwr(struct ieee80211_hw *hw,
@@ -1280,6 +1648,7 @@ mt7925_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
const struct ieee80211_tx_queue_params *params)
{
struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+ struct mt792x_bss_conf *mconf = mt792x_vif_to_link(mvif, link_id);
static const u8 mq_to_aci[] = {
[IEEE80211_AC_VO] = 3,
[IEEE80211_AC_VI] = 2,
@@ -1288,7 +1657,7 @@ mt7925_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
};
/* firmware uses access class index */
- mvif->queue_params[mq_to_aci[queue]] = *params;
+ mconf->queue_params[mq_to_aci[queue]] = *params;
return 0;
}
@@ -1303,12 +1672,12 @@ mt7925_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
mt792x_mutex_acquire(dev);
- err = mt7925_mcu_add_bss_info(&dev->phy, mvif->mt76.ctx, vif, NULL,
- true);
+ err = mt7925_mcu_add_bss_info(&dev->phy, mvif->bss_conf.mt76.ctx,
+ link_conf, NULL, true);
if (err)
goto out;
- err = mt7925_mcu_set_bss_pm(dev, vif, true);
+ err = mt7925_mcu_set_bss_pm(dev, link_conf, true);
if (err)
goto out;
@@ -1330,12 +1699,12 @@ mt7925_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
mt792x_mutex_acquire(dev);
- err = mt7925_mcu_set_bss_pm(dev, vif, false);
+ err = mt7925_mcu_set_bss_pm(dev, link_conf, false);
if (err)
goto out;
- mt7925_mcu_add_bss_info(&dev->phy, mvif->mt76.ctx, vif, NULL,
- false);
+ mt7925_mcu_add_bss_info(&dev->phy, mvif->bss_conf.mt76.ctx, link_conf,
+ NULL, false);
out:
mt792x_mutex_release(dev);
@@ -1354,34 +1723,52 @@ mt7925_remove_chanctx(struct ieee80211_hw *hw,
{
}
-static void mt7925_ctx_iter(void *priv, u8 *mac,
- struct ieee80211_vif *vif)
+static void
+mt7925_change_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx,
+ u32 changed)
{
- struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
- struct ieee80211_chanctx_conf *ctx = priv;
+ struct mt792x_chanctx *mctx = (struct mt792x_chanctx *)ctx->drv_priv;
+ struct mt792x_phy *phy = mt792x_hw_phy(hw);
+ struct mt792x_bss_conf *mconf;
+ struct ieee80211_vif *vif;
+ struct mt792x_vif *mvif;
- if (ctx != mvif->mt76.ctx)
+ if (!mctx->bss_conf)
return;
+ mconf = mctx->bss_conf;
+ mvif = mconf->vif;
+ vif = container_of((void *)mvif, struct ieee80211_vif, drv_priv);
+
+ mt792x_mutex_acquire(phy->dev);
if (vif->type == NL80211_IFTYPE_MONITOR) {
mt7925_mcu_set_sniffer(mvif->phy->dev, vif, true);
mt7925_mcu_config_sniffer(mvif, ctx);
} else {
- mt7925_mcu_set_chctx(mvif->phy->mt76, &mvif->mt76, ctx);
- }
-}
+ if (ieee80211_vif_is_mld(vif)) {
+ unsigned long valid = mvif->valid_links;
+ u8 i;
-static void
-mt7925_change_chanctx(struct ieee80211_hw *hw,
- struct ieee80211_chanctx_conf *ctx,
- u32 changed)
-{
- struct mt792x_phy *phy = mt792x_hw_phy(hw);
+ for_each_set_bit(i, &valid, IEEE80211_MLD_MAX_NUM_LINKS) {
+ mconf = mt792x_vif_to_link(mvif, i);
+ if (mconf && mconf->mt76.ctx == ctx)
+ break;
+ }
+
+ } else {
+ mconf = &mvif->bss_conf;
+ }
+
+ if (mconf) {
+ struct ieee80211_bss_conf *link_conf;
+
+ link_conf = mt792x_vif_to_bss_conf(vif, mconf->link_id);
+ mt7925_mcu_set_chctx(mvif->phy->mt76, &mconf->mt76,
+ link_conf, ctx);
+ }
+ }
- mt792x_mutex_acquire(phy->dev);
- ieee80211_iterate_active_interfaces(phy->mt76->hw,
- IEEE80211_IFACE_ITER_ACTIVE,
- mt7925_ctx_iter, ctx);
mt792x_mutex_release(phy->dev);
}
@@ -1395,7 +1782,8 @@ static void mt7925_mgd_prepare_tx(struct ieee80211_hw *hw,
jiffies_to_msecs(HZ);
mt792x_mutex_acquire(dev);
- mt7925_set_roc(mvif->phy, mvif, mvif->mt76.ctx->def.chan, duration,
+ mt7925_set_roc(mvif->phy, &mvif->bss_conf,
+ mvif->bss_conf.mt76.ctx->def.chan, duration,
MT7925_ROC_REQ_JOIN);
mt792x_mutex_release(dev);
}
@@ -1406,7 +1794,285 @@ static void mt7925_mgd_complete_tx(struct ieee80211_hw *hw,
{
struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
- mt7925_abort_roc(mvif->phy, mvif);
+ mt7925_abort_roc(mvif->phy, &mvif->bss_conf);
+}
+
+static void mt7925_vif_cfg_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u64 changed)
+{
+ struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+ struct mt792x_dev *dev = mt792x_hw_dev(hw);
+ unsigned long valid = ieee80211_vif_is_mld(vif) ?
+ mvif->valid_links : BIT(0);
+ struct ieee80211_bss_conf *bss_conf;
+ int i;
+
+ mt792x_mutex_acquire(dev);
+
+ if (changed & BSS_CHANGED_ASSOC) {
+ mt7925_mcu_sta_update(dev, NULL, vif, true,
+ MT76_STA_INFO_STATE_ASSOC);
+ mt7925_mcu_set_beacon_filter(dev, vif, vif->cfg.assoc);
+ }
+
+ if (changed & BSS_CHANGED_ARP_FILTER) {
+ for_each_set_bit(i, &valid, IEEE80211_MLD_MAX_NUM_LINKS) {
+ bss_conf = mt792x_vif_to_bss_conf(vif, i);
+ mt7925_mcu_update_arp_filter(&dev->mt76, bss_conf);
+ }
+ }
+
+ if (changed & BSS_CHANGED_PS) {
+ for_each_set_bit(i, &valid, IEEE80211_MLD_MAX_NUM_LINKS) {
+ bss_conf = mt792x_vif_to_bss_conf(vif, i);
+ mt7925_mcu_uni_bss_ps(dev, bss_conf);
+ }
+ }
+
+ mt792x_mutex_release(dev);
+}
+
+static void mt7925_link_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *info,
+ u64 changed)
+{
+ struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+ struct mt792x_phy *phy = mt792x_hw_phy(hw);
+ struct mt792x_dev *dev = mt792x_hw_dev(hw);
+ struct mt792x_bss_conf *mconf;
+
+ mconf = mt792x_vif_to_link(mvif, info->link_id);
+
+ mt792x_mutex_acquire(dev);
+
+ if (changed & BSS_CHANGED_ERP_SLOT) {
+ int slottime = info->use_short_slot ? 9 : 20;
+
+ if (slottime != phy->slottime) {
+ phy->slottime = slottime;
+ mt7925_mcu_set_timing(phy, info);
+ }
+ }
+
+ if (changed & BSS_CHANGED_MCAST_RATE)
+ mconf->mt76.mcast_rates_idx =
+ mt7925_get_rates_table(hw, vif, false, true);
+
+ if (changed & BSS_CHANGED_BASIC_RATES)
+ mconf->mt76.basic_rates_idx =
+ mt7925_get_rates_table(hw, vif, false, false);
+
+ if (changed & (BSS_CHANGED_BEACON |
+ BSS_CHANGED_BEACON_ENABLED)) {
+ mconf->mt76.beacon_rates_idx =
+ mt7925_get_rates_table(hw, vif, true, false);
+
+ mt7925_mcu_uni_add_beacon_offload(dev, hw, vif,
+ info->enable_beacon);
+ }
+
+ /* ensure that enable txcmd_mode after bss_info */
+ if (changed & (BSS_CHANGED_QOS | BSS_CHANGED_BEACON_ENABLED))
+ mt7925_mcu_set_tx(dev, info);
+
+ mt792x_mutex_release(dev);
+}
+
+static int
+mt7925_change_vif_links(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u16 old_links, u16 new_links,
+ struct ieee80211_bss_conf *old[IEEE80211_MLD_MAX_NUM_LINKS])
+{
+ struct mt792x_bss_conf *mconfs[IEEE80211_MLD_MAX_NUM_LINKS] = {}, *mconf;
+ struct mt792x_link_sta *mlinks[IEEE80211_MLD_MAX_NUM_LINKS] = {}, *mlink;
+ struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+ unsigned long add = new_links & ~old_links;
+ unsigned long rem = old_links & ~new_links;
+ struct mt792x_dev *dev = mt792x_hw_dev(hw);
+ struct mt792x_phy *phy = mt792x_hw_phy(hw);
+ struct ieee80211_bss_conf *link_conf;
+ unsigned int link_id;
+ int err;
+
+ if (old_links == new_links)
+ return 0;
+
+ mt792x_mutex_acquire(dev);
+
+ for_each_set_bit(link_id, &rem, IEEE80211_MLD_MAX_NUM_LINKS) {
+ mconf = mt792x_vif_to_link(mvif, link_id);
+ mlink = mt792x_sta_to_link(&mvif->sta, link_id);
+
+ if (!mconf || !mlink)
+ continue;
+
+ if (mconf != &mvif->bss_conf) {
+ mt792x_mac_link_bss_remove(dev, mconf, mlink);
+ devm_kfree(dev->mt76.dev, mconf);
+ devm_kfree(dev->mt76.dev, mlink);
+ }
+
+ rcu_assign_pointer(mvif->link_conf[link_id], NULL);
+ rcu_assign_pointer(mvif->sta.link[link_id], NULL);
+ }
+
+ for_each_set_bit(link_id, &add, IEEE80211_MLD_MAX_NUM_LINKS) {
+ if (!old_links) {
+ mvif->deflink_id = link_id;
+ mconf = &mvif->bss_conf;
+ mlink = &mvif->sta.deflink;
+ } else {
+ mconf = devm_kzalloc(dev->mt76.dev, sizeof(*mconf),
+ GFP_KERNEL);
+ mlink = devm_kzalloc(dev->mt76.dev, sizeof(*mlink),
+ GFP_KERNEL);
+ }
+
+ mconfs[link_id] = mconf;
+ mlinks[link_id] = mlink;
+ mconf->link_id = link_id;
+ mconf->vif = mvif;
+ mlink->wcid.link_id = link_id;
+ mlink->wcid.link_valid = !!vif->valid_links;
+ mlink->wcid.def_wcid = &mvif->sta.deflink.wcid;
+ }
+
+ if (hweight16(mvif->valid_links) == 0)
+ mt792x_mac_link_bss_remove(dev, &mvif->bss_conf,
+ &mvif->sta.deflink);
+
+ for_each_set_bit(link_id, &add, IEEE80211_MLD_MAX_NUM_LINKS) {
+ mconf = mconfs[link_id];
+ mlink = mlinks[link_id];
+ link_conf = mt792x_vif_to_bss_conf(vif, link_id);
+
+ rcu_assign_pointer(mvif->link_conf[link_id], mconf);
+ rcu_assign_pointer(mvif->sta.link[link_id], mlink);
+
+ err = mt7925_mac_link_bss_add(dev, link_conf, mlink);
+ if (err < 0)
+ goto free;
+
+ if (mconf != &mvif->bss_conf) {
+ err = mt7925_set_mlo_roc(phy, &mvif->bss_conf,
+ vif->active_links);
+ if (err < 0)
+ goto free;
+ }
+ }
+
+ mvif->valid_links = new_links;
+
+ mt792x_mutex_release(dev);
+
+ return 0;
+
+free:
+ for_each_set_bit(link_id, &add, IEEE80211_MLD_MAX_NUM_LINKS) {
+ rcu_assign_pointer(mvif->link_conf[link_id], NULL);
+ rcu_assign_pointer(mvif->sta.link[link_id], NULL);
+
+ if (mconf != &mvif->bss_conf)
+ devm_kfree(dev->mt76.dev, mconfs[link_id]);
+ if (mlink != &mvif->sta.deflink)
+ devm_kfree(dev->mt76.dev, mlinks[link_id]);
+ }
+
+ mt792x_mutex_release(dev);
+
+ return err;
+}
+
+static int
+mt7925_change_sta_links(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u16 old_links, u16 new_links)
+{
+ unsigned long add = new_links & ~old_links;
+ unsigned long rem = old_links & ~new_links;
+ struct mt792x_dev *dev = mt792x_hw_dev(hw);
+ int err = 0;
+
+ if (old_links == new_links)
+ return 0;
+
+ mt792x_mutex_acquire(dev);
+
+ err = mt7925_mac_sta_remove_links(dev, vif, sta, rem);
+ if (err < 0)
+ goto out;
+
+ err = mt7925_mac_sta_add_links(dev, vif, sta, add);
+ if (err < 0)
+ goto out;
+
+out:
+ mt792x_mutex_release(dev);
+
+ return err;
+}
+
+static int mt7925_assign_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct mt792x_chanctx *mctx = (struct mt792x_chanctx *)ctx->drv_priv;
+ struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+ struct mt792x_dev *dev = mt792x_hw_dev(hw);
+ struct ieee80211_bss_conf *pri_link_conf;
+ struct mt792x_bss_conf *mconf;
+
+ mutex_lock(&dev->mt76.mutex);
+
+ if (ieee80211_vif_is_mld(vif)) {
+ mconf = mt792x_vif_to_link(mvif, link_conf->link_id);
+ pri_link_conf = mt792x_vif_to_bss_conf(vif, mvif->deflink_id);
+
+ if (vif->type == NL80211_IFTYPE_STATION &&
+ mconf == &mvif->bss_conf)
+ mt7925_mcu_add_bss_info(&dev->phy, NULL, pri_link_conf,
+ NULL, true);
+ } else {
+ mconf = &mvif->bss_conf;
+ }
+
+ mconf->mt76.ctx = ctx;
+ mctx->bss_conf = mconf;
+ mutex_unlock(&dev->mt76.mutex);
+
+ return 0;
+}
+
+static void mt7925_unassign_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct mt792x_chanctx *mctx = (struct mt792x_chanctx *)ctx->drv_priv;
+ struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+ struct mt792x_dev *dev = mt792x_hw_dev(hw);
+ struct ieee80211_bss_conf *pri_link_conf;
+ struct mt792x_bss_conf *mconf;
+
+ mutex_lock(&dev->mt76.mutex);
+
+ if (ieee80211_vif_is_mld(vif)) {
+ mconf = mt792x_vif_to_link(mvif, link_conf->link_id);
+ pri_link_conf = mt792x_vif_to_bss_conf(vif, mvif->deflink_id);
+
+ if (vif->type == NL80211_IFTYPE_STATION &&
+ mconf == &mvif->bss_conf)
+ mt7925_mcu_add_bss_info(&dev->phy, NULL, pri_link_conf,
+ NULL, false);
+ } else {
+ mconf = &mvif->bss_conf;
+ }
+
+ mctx->bss_conf = NULL;
+ mconf->mt76.ctx = NULL;
+ mutex_unlock(&dev->mt76.mutex);
}
const struct ieee80211_ops mt7925_ops = {
@@ -1418,7 +2084,6 @@ const struct ieee80211_ops mt7925_ops = {
.config = mt7925_config,
.conf_tx = mt7925_conf_tx,
.configure_filter = mt7925_configure_filter,
- .bss_info_changed = mt7925_bss_info_changed,
.start_ap = mt7925_start_ap,
.stop_ap = mt7925_stop_ap,
.sta_state = mt76_sta_state,
@@ -1462,10 +2127,14 @@ const struct ieee80211_ops mt7925_ops = {
.add_chanctx = mt7925_add_chanctx,
.remove_chanctx = mt7925_remove_chanctx,
.change_chanctx = mt7925_change_chanctx,
- .assign_vif_chanctx = mt792x_assign_vif_chanctx,
- .unassign_vif_chanctx = mt792x_unassign_vif_chanctx,
+ .assign_vif_chanctx = mt7925_assign_vif_chanctx,
+ .unassign_vif_chanctx = mt7925_unassign_vif_chanctx,
.mgd_prepare_tx = mt7925_mgd_prepare_tx,
.mgd_complete_tx = mt7925_mgd_complete_tx,
+ .vif_cfg_changed = mt7925_vif_cfg_changed,
+ .link_info_changed = mt7925_link_info_changed,
+ .change_vif_links = mt7925_change_vif_links,
+ .change_sta_links = mt7925_change_sta_links,
};
EXPORT_SYMBOL_GPL(mt7925_ops);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
index 652a9accc43c..9dc22fbe25d3 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
@@ -121,11 +121,12 @@ int mt7925_mcu_regval(struct mt792x_dev *dev, u32 regidx, u32 *val, bool set)
EXPORT_SYMBOL_GPL(mt7925_mcu_regval);
int mt7925_mcu_update_arp_filter(struct mt76_dev *dev,
- struct mt76_vif *vif,
- struct ieee80211_bss_conf *info)
+ struct ieee80211_bss_conf *link_conf)
{
- struct ieee80211_vif *mvif = container_of(info, struct ieee80211_vif,
- bss_conf);
+ struct ieee80211_vif *mvif = container_of((void *)link_conf->vif,
+ struct ieee80211_vif,
+ drv_priv);
+ struct mt792x_bss_conf *mconf = mt792x_link_conf_to_mconf(link_conf);
struct sk_buff *skb;
int i, len = min_t(int, mvif->cfg.arp_addr_cnt,
IEEE80211_BSS_ARP_ADDR_LIST_LEN);
@@ -137,7 +138,7 @@ int mt7925_mcu_update_arp_filter(struct mt76_dev *dev,
struct mt7925_arpns_tlv arp;
} req = {
.hdr = {
- .bss_idx = vif->idx,
+ .bss_idx = mconf->mt76.idx,
},
.arp = {
.tag = cpu_to_le16(UNI_OFFLOAD_OFFLOAD_ARP),
@@ -308,22 +309,23 @@ mt7925_mcu_roc_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
struct mt7925_roc_grant_tlv *grant = priv;
+ if (ieee80211_vif_is_mld(vif) && vif->type == NL80211_IFTYPE_STATION)
+ return;
+
if (mvif->idx != grant->bss_idx)
return;
mvif->band_idx = grant->dbdcband;
}
-static void
-mt7925_mcu_uni_roc_event(struct mt792x_dev *dev, struct sk_buff *skb)
+static void mt7925_mcu_roc_handle_grant(struct mt792x_dev *dev,
+ struct tlv *tlv)
{
struct ieee80211_hw *hw = dev->mt76.hw;
struct mt7925_roc_grant_tlv *grant;
- struct mt7925_mcu_rxd *rxd;
int duration;
- rxd = (struct mt7925_mcu_rxd *)skb->data;
- grant = (struct mt7925_roc_grant_tlv *)(rxd->tlv + 4);
+ grant = (struct mt7925_roc_grant_tlv *)tlv;
/* should never happen */
WARN_ON_ONCE((le16_to_cpu(grant->tag) != UNI_EVENT_ROC_GRANT));
@@ -342,6 +344,29 @@ mt7925_mcu_uni_roc_event(struct mt792x_dev *dev, struct sk_buff *skb)
}
static void
+mt7925_mcu_uni_roc_event(struct mt792x_dev *dev, struct sk_buff *skb)
+{
+ struct tlv *tlv;
+ int i = 0;
+
+ skb_pull(skb, sizeof(struct mt7925_mcu_rxd) + 4);
+
+ while (i < skb->len) {
+ tlv = (struct tlv *)(skb->data + i);
+
+ switch (le16_to_cpu(tlv->tag)) {
+ case UNI_EVENT_ROC_GRANT:
+ mt7925_mcu_roc_handle_grant(dev, tlv);
+ break;
+ case UNI_EVENT_ROC_GRANT_SUB_LINK:
+ break;
+ }
+
+ i += le16_to_cpu(tlv->len);
+ }
+}
+
+static void
mt7925_mcu_scan_event(struct mt792x_dev *dev, struct sk_buff *skb)
{
struct mt76_phy *mphy = &dev->mt76.phy;
@@ -544,9 +569,9 @@ int mt7925_mcu_uni_tx_ba(struct mt792x_dev *dev,
struct mt792x_vif *mvif = msta->vif;
if (enable && !params->amsdu)
- msta->wcid.amsdu = false;
+ msta->deflink.wcid.amsdu = false;
- return mt7925_mcu_sta_ba(&dev->mt76, &mvif->mt76, params,
+ return mt7925_mcu_sta_ba(&dev->mt76, &mvif->bss_conf.mt76, params,
enable, true);
}
@@ -557,7 +582,7 @@ int mt7925_mcu_uni_rx_ba(struct mt792x_dev *dev,
struct mt792x_sta *msta = (struct mt792x_sta *)params->sta->drv_priv;
struct mt792x_vif *mvif = msta->vif;
- return mt7925_mcu_sta_ba(&dev->mt76, &mvif->mt76, params,
+ return mt7925_mcu_sta_ba(&dev->mt76, &mvif->bss_conf.mt76, params,
enable, false);
}
@@ -726,6 +751,20 @@ mt7925_mcu_parse_phy_cap(struct mt792x_dev *dev, char *data)
dev->has_eht = cap->eht;
}
+static void
+mt7925_mcu_parse_eml_cap(struct mt792x_dev *dev, char *data)
+{
+ struct mt7925_mcu_eml_cap {
+ u8 rsv[4];
+ __le16 eml_cap;
+ u8 rsv2[6];
+ } __packed * cap;
+
+ cap = (struct mt7925_mcu_eml_cap *)data;
+
+ dev->phy.eml_cap = le16_to_cpu(cap->eml_cap);
+}
+
static int
mt7925_mcu_get_nic_capability(struct mt792x_dev *dev)
{
@@ -780,6 +819,12 @@ mt7925_mcu_get_nic_capability(struct mt792x_dev *dev)
case MT_NIC_CAP_PHY:
mt7925_mcu_parse_phy_cap(dev, tlv->data);
break;
+ case MT_NIC_CAP_CHIP_CAP:
+ memcpy(&dev->phy.chip_cap, (void *)skb->data, sizeof(u64));
+ break;
+ case MT_NIC_CAP_EML_CAP:
+ mt7925_mcu_parse_eml_cap(dev, tlv->data);
+ break;
default:
break;
}
@@ -848,7 +893,7 @@ EXPORT_SYMBOL_GPL(mt7925_run_firmware);
static void
mt7925_mcu_sta_hdr_trans_tlv(struct sk_buff *skb,
struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
+ struct ieee80211_link_sta *link_sta)
{
struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
struct sta_rec_hdr_trans *hdr_trans;
@@ -864,10 +909,15 @@ mt7925_mcu_sta_hdr_trans_tlv(struct sk_buff *skb,
else
hdr_trans->from_ds = true;
- if (sta)
- wcid = (struct mt76_wcid *)sta->drv_priv;
- else
- wcid = &mvif->sta.wcid;
+ if (link_sta) {
+ struct mt792x_sta *msta = (struct mt792x_sta *)link_sta->sta->drv_priv;
+ struct mt792x_link_sta *mlink;
+
+ mlink = mt792x_sta_to_link(msta, link_sta->link_id);
+ wcid = &mlink->wcid;
+ } else {
+ wcid = &mvif->sta.deflink.wcid;
+ }
if (!wcid)
return;
@@ -881,27 +931,36 @@ mt7925_mcu_sta_hdr_trans_tlv(struct sk_buff *skb,
int mt7925_mcu_wtbl_update_hdr_trans(struct mt792x_dev *dev,
struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
+ struct ieee80211_sta *sta,
+ int link_id)
{
struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+ struct ieee80211_link_sta *link_sta = sta ? &sta->deflink : NULL;
+ struct mt792x_link_sta *mlink;
+ struct mt792x_bss_conf *mconf;
struct mt792x_sta *msta;
struct sk_buff *skb;
msta = sta ? (struct mt792x_sta *)sta->drv_priv : &mvif->sta;
- skb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76,
- &msta->wcid,
+ mlink = mt792x_sta_to_link(msta, link_id);
+ link_sta = mt792x_sta_to_link_sta(vif, sta, link_id);
+ mconf = mt792x_vif_to_link(mvif, link_id);
+
+ skb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mconf->mt76,
+ &mlink->wcid,
MT7925_STA_UPDATE_MAX_SIZE);
if (IS_ERR(skb))
return PTR_ERR(skb);
/* starec hdr trans */
- mt7925_mcu_sta_hdr_trans_tlv(skb, vif, sta);
+ mt7925_mcu_sta_hdr_trans_tlv(skb, vif, link_sta);
return mt76_mcu_skb_send_msg(&dev->mt76, skb,
MCU_WMWA_UNI_CMD(STA_REC_UPDATE), true);
}
-int mt7925_mcu_set_tx(struct mt792x_dev *dev, struct ieee80211_vif *vif)
+int mt7925_mcu_set_tx(struct mt792x_dev *dev,
+ struct ieee80211_bss_conf *bss_conf)
{
#define MCU_EDCA_AC_PARAM 0
#define WMM_AIFS_SET BIT(0)
@@ -910,12 +969,12 @@ int mt7925_mcu_set_tx(struct mt792x_dev *dev, struct ieee80211_vif *vif)
#define WMM_TXOP_SET BIT(3)
#define WMM_PARAM_SET (WMM_AIFS_SET | WMM_CW_MIN_SET | \
WMM_CW_MAX_SET | WMM_TXOP_SET)
- struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+ struct mt792x_bss_conf *mconf = mt792x_link_conf_to_mconf(bss_conf);
struct {
u8 bss_idx;
u8 __rsv[3];
} __packed hdr = {
- .bss_idx = mvif->mt76.idx,
+ .bss_idx = mconf->mt76.idx,
};
struct sk_buff *skb;
int len = sizeof(hdr) + IEEE80211_NUM_ACS * sizeof(struct edca);
@@ -928,7 +987,7 @@ int mt7925_mcu_set_tx(struct mt792x_dev *dev, struct ieee80211_vif *vif)
skb_put_data(skb, &hdr, sizeof(hdr));
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
- struct ieee80211_tx_queue_params *q = &mvif->queue_params[ac];
+ struct ieee80211_tx_queue_params *q = &mconf->queue_params[ac];
struct edca *e;
struct tlv *tlv;
@@ -960,11 +1019,12 @@ mt7925_mcu_sta_key_tlv(struct mt76_wcid *wcid,
struct mt76_connac_sta_key_conf *sta_key_conf,
struct sk_buff *skb,
struct ieee80211_key_conf *key,
- enum set_key_cmd cmd)
+ enum set_key_cmd cmd,
+ struct mt792x_sta *msta)
{
- struct mt792x_sta *msta = container_of(wcid, struct mt792x_sta, wcid);
- struct sta_rec_sec_uni *sec;
struct mt792x_vif *mvif = msta->vif;
+ struct mt792x_bss_conf *mconf = mt792x_vif_to_link(mvif, wcid->link_id);
+ struct sta_rec_sec_uni *sec;
struct ieee80211_sta *sta;
struct ieee80211_vif *vif;
struct tlv *tlv;
@@ -976,17 +1036,27 @@ mt7925_mcu_sta_key_tlv(struct mt76_wcid *wcid,
tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_KEY_V3, sizeof(*sec));
sec = (struct sta_rec_sec_uni *)tlv;
- sec->bss_idx = mvif->mt76.idx;
+ sec->bss_idx = mconf->mt76.idx;
sec->is_authenticator = 0;
- sec->mgmt_prot = 0;
+ sec->mgmt_prot = 1; /* only used in MLO mode */
sec->wlan_idx = (u8)wcid->idx;
if (sta) {
+ struct ieee80211_link_sta *link_sta;
+
sec->tx_key = 1;
sec->key_type = 1;
- memcpy(sec->peer_addr, sta->addr, ETH_ALEN);
+ link_sta = mt792x_sta_to_link_sta(vif, sta, wcid->link_id);
+
+ if (link_sta)
+ memcpy(sec->peer_addr, link_sta->addr, ETH_ALEN);
} else {
- memcpy(sec->peer_addr, vif->bss_conf.bssid, ETH_ALEN);
+ struct ieee80211_bss_conf *link_conf;
+
+ link_conf = mt792x_vif_to_bss_conf(vif, wcid->link_id);
+
+ if (link_conf)
+ memcpy(sec->peer_addr, link_conf->bssid, ETH_ALEN);
}
if (cmd == SET_KEY) {
@@ -1031,25 +1101,121 @@ mt7925_mcu_sta_key_tlv(struct mt76_wcid *wcid,
int mt7925_mcu_add_key(struct mt76_dev *dev, struct ieee80211_vif *vif,
struct mt76_connac_sta_key_conf *sta_key_conf,
struct ieee80211_key_conf *key, int mcu_cmd,
- struct mt76_wcid *wcid, enum set_key_cmd cmd)
+ struct mt76_wcid *wcid, enum set_key_cmd cmd,
+ struct mt792x_sta *msta)
{
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+ struct mt792x_bss_conf *mconf = mt792x_vif_to_link(mvif, wcid->link_id);
struct sk_buff *skb;
int ret;
- skb = __mt76_connac_mcu_alloc_sta_req(dev, mvif, wcid,
+ skb = __mt76_connac_mcu_alloc_sta_req(dev, &mconf->mt76, wcid,
MT7925_STA_UPDATE_MAX_SIZE);
if (IS_ERR(skb))
return PTR_ERR(skb);
- ret = mt7925_mcu_sta_key_tlv(wcid, sta_key_conf, skb, key, cmd);
+ ret = mt7925_mcu_sta_key_tlv(wcid, sta_key_conf, skb, key, cmd, msta);
if (ret)
return ret;
return mt76_mcu_skb_send_msg(dev, skb, mcu_cmd, true);
}
-int mt7925_mcu_set_roc(struct mt792x_phy *phy, struct mt792x_vif *vif,
+int mt7925_mcu_set_mlo_roc(struct mt792x_bss_conf *mconf, u16 sel_links,
+ int duration, u8 token_id)
+{
+ struct mt792x_vif *mvif = mconf->vif;
+ struct ieee80211_vif *vif = container_of((void *)mvif,
+ struct ieee80211_vif, drv_priv);
+ struct ieee80211_bss_conf *link_conf;
+ struct ieee80211_channel *chan;
+ const u8 ch_band[] = {
+ [NL80211_BAND_2GHZ] = 1,
+ [NL80211_BAND_5GHZ] = 2,
+ [NL80211_BAND_6GHZ] = 3,
+ };
+ enum mt7925_roc_req type;
+ int center_ch, i = 0;
+ bool is_AG_band = false;
+ struct {
+ u8 id;
+ u8 bss_idx;
+ u16 tag;
+ struct mt792x_bss_conf *mconf;
+ struct ieee80211_channel *chan;
+ } links[2];
+
+ struct {
+ struct {
+ u8 rsv[4];
+ } __packed hdr;
+ struct roc_acquire_tlv roc[2];
+ } __packed req;
+
+ if (!mconf || hweight16(vif->valid_links) < 2 ||
+ hweight16(sel_links) != 2)
+ return -EPERM;
+
+ for (i = 0; i < ARRAY_SIZE(links); i++) {
+ links[i].id = i ? __ffs(~BIT(mconf->link_id) & sel_links) :
+ mconf->link_id;
+ link_conf = mt792x_vif_to_bss_conf(vif, links[i].id);
+ if (WARN_ON_ONCE(!link_conf))
+ return -EPERM;
+
+ links[i].chan = link_conf->chanreq.oper.chan;
+ if (WARN_ON_ONCE(!links[i].chan))
+ return -EPERM;
+
+ links[i].mconf = mt792x_vif_to_link(mvif, links[i].id);
+ links[i].tag = links[i].id == mconf->link_id ?
+ UNI_ROC_ACQUIRE : UNI_ROC_SUB_LINK;
+
+ is_AG_band |= links[i].chan->band == NL80211_BAND_2GHZ;
+ }
+
+ if (vif->cfg.eml_cap & IEEE80211_EML_CAP_EMLSR_SUPP)
+ type = is_AG_band ? MT7925_ROC_REQ_MLSR_AG :
+ MT7925_ROC_REQ_MLSR_AA;
+ else
+ type = MT7925_ROC_REQ_JOIN;
+
+ for (i = 0; i < ARRAY_SIZE(links) && i < hweight16(vif->active_links); i++) {
+ if (WARN_ON_ONCE(!links[i].mconf || !links[i].chan))
+ continue;
+
+ chan = links[i].chan;
+ center_ch = ieee80211_frequency_to_channel(chan->center_freq);
+ req.roc[i].len = cpu_to_le16(sizeof(struct roc_acquire_tlv));
+ req.roc[i].tag = cpu_to_le16(links[i].tag);
+ req.roc[i].tokenid = token_id;
+ req.roc[i].reqtype = type;
+ req.roc[i].maxinterval = cpu_to_le32(duration);
+ req.roc[i].bss_idx = links[i].mconf->mt76.idx;
+ req.roc[i].control_channel = chan->hw_value;
+ req.roc[i].bw = CMD_CBW_20MHZ;
+ req.roc[i].bw_from_ap = CMD_CBW_20MHZ;
+ req.roc[i].center_chan = center_ch;
+ req.roc[i].center_chan_from_ap = center_ch;
+
+ /* STR : 0xfe indicates BAND_ALL with enabling DBDC
+ * EMLSR : 0xff indicates (BAND_AUTO) without DBDC
+ */
+ req.roc[i].dbdcband = type == MT7925_ROC_REQ_JOIN ? 0xfe : 0xff;
+
+ if (chan->hw_value < center_ch)
+ req.roc[i].sco = 1; /* SCA */
+ else if (chan->hw_value > center_ch)
+ req.roc[i].sco = 3; /* SCB */
+
+ req.roc[i].band = ch_band[chan->band];
+ }
+
+ return mt76_mcu_send_msg(&mvif->phy->dev->mt76, MCU_UNI_CMD(ROC),
+ &req, sizeof(req), false);
+}
+
+int mt7925_mcu_set_roc(struct mt792x_phy *phy, struct mt792x_bss_conf *mconf,
struct ieee80211_channel *chan, int duration,
enum mt7925_roc_req type, u8 token_id)
{
@@ -1059,25 +1225,7 @@ int mt7925_mcu_set_roc(struct mt792x_phy *phy, struct mt792x_vif *vif,
struct {
u8 rsv[4];
} __packed hdr;
- struct roc_acquire_tlv {
- __le16 tag;
- __le16 len;
- u8 bss_idx;
- u8 tokenid;
- u8 control_channel;
- u8 sco;
- u8 band;
- u8 bw;
- u8 center_chan;
- u8 center_chan2;
- u8 bw_from_ap;
- u8 center_chan_from_ap;
- u8 center_chan2_from_ap;
- u8 reqtype;
- __le32 maxinterval;
- u8 dbdcband;
- u8 rsv[3];
- } __packed roc;
+ struct roc_acquire_tlv roc;
} __packed req = {
.roc = {
.tag = cpu_to_le16(UNI_ROC_ACQUIRE),
@@ -1085,7 +1233,7 @@ int mt7925_mcu_set_roc(struct mt792x_phy *phy, struct mt792x_vif *vif,
.tokenid = token_id,
.reqtype = type,
.maxinterval = cpu_to_le32(duration),
- .bss_idx = vif->mt76.idx,
+ .bss_idx = mconf->mt76.idx,
.control_channel = chan->hw_value,
.bw = CMD_CBW_20MHZ,
.bw_from_ap = CMD_CBW_20MHZ,
@@ -1116,7 +1264,7 @@ int mt7925_mcu_set_roc(struct mt792x_phy *phy, struct mt792x_vif *vif,
&req, sizeof(req), false);
}
-int mt7925_mcu_abort_roc(struct mt792x_phy *phy, struct mt792x_vif *vif,
+int mt7925_mcu_abort_roc(struct mt792x_phy *phy, struct mt792x_bss_conf *mconf,
u8 token_id)
{
struct mt792x_dev *dev = phy->dev;
@@ -1137,7 +1285,7 @@ int mt7925_mcu_abort_roc(struct mt792x_phy *phy, struct mt792x_vif *vif,
.tag = cpu_to_le16(UNI_ROC_ABORT),
.len = cpu_to_le16(sizeof(struct roc_abort_tlv)),
.tokenid = token_id,
- .bss_idx = vif->mt76.idx,
+ .bss_idx = mconf->mt76.idx,
.dbdcband = 0xff, /* auto*/
},
};
@@ -1146,80 +1294,6 @@ int mt7925_mcu_abort_roc(struct mt792x_phy *phy, struct mt792x_vif *vif,
&req, sizeof(req), false);
}
-int mt7925_mcu_set_chan_info(struct mt792x_phy *phy, u16 tag)
-{
- static const u8 ch_band[] = {
- [NL80211_BAND_2GHZ] = 0,
- [NL80211_BAND_5GHZ] = 1,
- [NL80211_BAND_6GHZ] = 2,
- };
- struct mt792x_dev *dev = phy->dev;
- struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
- int freq1 = chandef->center_freq1;
- u8 band_idx = chandef->chan->band != NL80211_BAND_2GHZ;
- struct {
- /* fixed field */
- u8 __rsv[4];
-
- __le16 tag;
- __le16 len;
- u8 control_ch;
- u8 center_ch;
- u8 bw;
- u8 tx_path_num;
- u8 rx_path; /* mask or num */
- u8 switch_reason;
- u8 band_idx;
- u8 center_ch2; /* for 80+80 only */
- __le16 cac_case;
- u8 channel_band;
- u8 rsv0;
- __le32 outband_freq;
- u8 txpower_drop;
- u8 ap_bw;
- u8 ap_center_ch;
- u8 rsv1[53];
- } __packed req = {
- .tag = cpu_to_le16(tag),
- .len = cpu_to_le16(sizeof(req) - 4),
- .control_ch = chandef->chan->hw_value,
- .center_ch = ieee80211_frequency_to_channel(freq1),
- .bw = mt76_connac_chan_bw(chandef),
- .tx_path_num = hweight8(phy->mt76->antenna_mask),
- .rx_path = phy->mt76->antenna_mask,
- .band_idx = band_idx,
- .channel_band = ch_band[chandef->chan->band],
- };
-
- if (chandef->chan->band == NL80211_BAND_6GHZ)
- req.channel_band = 2;
- else
- req.channel_band = chandef->chan->band;
-
- if (tag == UNI_CHANNEL_RX_PATH ||
- dev->mt76.hw->conf.flags & IEEE80211_CONF_MONITOR)
- req.switch_reason = CH_SWITCH_NORMAL;
- else if (phy->mt76->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
- req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD;
- else if (!cfg80211_reg_can_beacon(phy->mt76->hw->wiphy, chandef,
- NL80211_IFTYPE_AP))
- req.switch_reason = CH_SWITCH_DFS;
- else
- req.switch_reason = CH_SWITCH_NORMAL;
-
- if (tag == UNI_CHANNEL_SWITCH)
- req.rx_path = hweight8(req.rx_path);
-
- if (chandef->width == NL80211_CHAN_WIDTH_80P80) {
- int freq2 = chandef->center_freq2;
-
- req.center_ch2 = ieee80211_frequency_to_channel(freq2);
- }
-
- return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(CHANNEL_SWITCH),
- &req, sizeof(req), true);
-}
-
int mt7925_mcu_set_eeprom(struct mt792x_dev *dev)
{
struct {
@@ -1242,9 +1316,10 @@ int mt7925_mcu_set_eeprom(struct mt792x_dev *dev)
}
EXPORT_SYMBOL_GPL(mt7925_mcu_set_eeprom);
-int mt7925_mcu_uni_bss_ps(struct mt792x_dev *dev, struct ieee80211_vif *vif)
+int mt7925_mcu_uni_bss_ps(struct mt792x_dev *dev,
+ struct ieee80211_bss_conf *link_conf)
{
- struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+ struct mt792x_bss_conf *mconf = mt792x_link_conf_to_mconf(link_conf);
struct {
struct {
u8 bss_idx;
@@ -1263,16 +1338,16 @@ int mt7925_mcu_uni_bss_ps(struct mt792x_dev *dev, struct ieee80211_vif *vif)
} __packed ps;
} __packed ps_req = {
.hdr = {
- .bss_idx = mvif->mt76.idx,
+ .bss_idx = mconf->mt76.idx,
},
.ps = {
.tag = cpu_to_le16(UNI_BSS_INFO_PS),
.len = cpu_to_le16(sizeof(struct ps_tlv)),
- .ps_state = vif->cfg.ps ? 2 : 0,
+ .ps_state = link_conf->vif->cfg.ps ? 2 : 0,
},
};
- if (vif->type != NL80211_IFTYPE_STATION)
+ if (link_conf->vif->type != NL80211_IFTYPE_STATION)
return -EOPNOTSUPP;
return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(BSS_INFO_UPDATE),
@@ -1280,10 +1355,10 @@ int mt7925_mcu_uni_bss_ps(struct mt792x_dev *dev, struct ieee80211_vif *vif)
}
static int
-mt7925_mcu_uni_bss_bcnft(struct mt792x_dev *dev, struct ieee80211_vif *vif,
- bool enable)
+mt7925_mcu_uni_bss_bcnft(struct mt792x_dev *dev,
+ struct ieee80211_bss_conf *link_conf, bool enable)
{
- struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+ struct mt792x_bss_conf *mconf = mt792x_link_conf_to_mconf(link_conf);
struct {
struct {
u8 bss_idx;
@@ -1300,17 +1375,17 @@ mt7925_mcu_uni_bss_bcnft(struct mt792x_dev *dev, struct ieee80211_vif *vif,
} __packed bcnft;
} __packed bcnft_req = {
.hdr = {
- .bss_idx = mvif->mt76.idx,
+ .bss_idx = mconf->mt76.idx,
},
.bcnft = {
.tag = cpu_to_le16(UNI_BSS_INFO_BCNFT),
.len = cpu_to_le16(sizeof(struct bcnft_tlv)),
- .bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int),
- .dtim_period = vif->bss_conf.dtim_period,
+ .bcn_interval = cpu_to_le16(link_conf->beacon_int),
+ .dtim_period = link_conf->dtim_period,
},
};
- if (vif->type != NL80211_IFTYPE_STATION)
+ if (link_conf->vif->type != NL80211_IFTYPE_STATION)
return 0;
return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(BSS_INFO_UPDATE),
@@ -1318,10 +1393,11 @@ mt7925_mcu_uni_bss_bcnft(struct mt792x_dev *dev, struct ieee80211_vif *vif,
}
int
-mt7925_mcu_set_bss_pm(struct mt792x_dev *dev, struct ieee80211_vif *vif,
+mt7925_mcu_set_bss_pm(struct mt792x_dev *dev,
+ struct ieee80211_bss_conf *link_conf,
bool enable)
{
- struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+ struct mt792x_bss_conf *mconf = mt792x_link_conf_to_mconf(link_conf);
struct {
struct {
u8 bss_idx;
@@ -1338,13 +1414,13 @@ mt7925_mcu_set_bss_pm(struct mt792x_dev *dev, struct ieee80211_vif *vif,
} __packed enable;
} req = {
.hdr = {
- .bss_idx = mvif->mt76.idx,
+ .bss_idx = mconf->mt76.idx,
},
.enable = {
.tag = cpu_to_le16(UNI_BSS_INFO_BCNFT),
.len = cpu_to_le16(sizeof(struct bcnft_tlv)),
- .dtim_period = vif->bss_conf.dtim_period,
- .bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int),
+ .dtim_period = link_conf->dtim_period,
+ .bcn_interval = cpu_to_le16(link_conf->beacon_int),
},
};
struct {
@@ -1358,7 +1434,7 @@ mt7925_mcu_set_bss_pm(struct mt792x_dev *dev, struct ieee80211_vif *vif,
} __packed disable;
} req1 = {
.hdr = {
- .bss_idx = mvif->mt76.idx,
+ .bss_idx = mconf->mt76.idx,
},
.disable = {
.tag = cpu_to_le16(UNI_BSS_INFO_PM_DISABLE),
@@ -1377,42 +1453,43 @@ mt7925_mcu_set_bss_pm(struct mt792x_dev *dev, struct ieee80211_vif *vif,
}
static void
-mt7925_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
+mt7925_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_link_sta *link_sta)
{
- if (!sta->deflink.he_cap.has_he)
+ if (!link_sta->he_cap.has_he)
return;
- mt76_connac_mcu_sta_he_tlv_v2(skb, sta);
+ mt76_connac_mcu_sta_he_tlv_v2(skb, link_sta->sta);
}
static void
-mt7925_mcu_sta_he_6g_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
+mt7925_mcu_sta_he_6g_tlv(struct sk_buff *skb,
+ struct ieee80211_link_sta *link_sta)
{
struct sta_rec_he_6g_capa *he_6g;
struct tlv *tlv;
- if (!sta->deflink.he_6ghz_capa.capa)
+ if (!link_sta->he_6ghz_capa.capa)
return;
tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HE_6G, sizeof(*he_6g));
he_6g = (struct sta_rec_he_6g_capa *)tlv;
- he_6g->capa = sta->deflink.he_6ghz_capa.capa;
+ he_6g->capa = link_sta->he_6ghz_capa.capa;
}
static void
-mt7925_mcu_sta_eht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
+mt7925_mcu_sta_eht_tlv(struct sk_buff *skb, struct ieee80211_link_sta *link_sta)
{
struct ieee80211_eht_mcs_nss_supp *mcs_map;
struct ieee80211_eht_cap_elem_fixed *elem;
struct sta_rec_eht *eht;
struct tlv *tlv;
- if (!sta->deflink.eht_cap.has_eht)
+ if (!link_sta->eht_cap.has_eht)
return;
- mcs_map = &sta->deflink.eht_cap.eht_mcs_nss_supp;
- elem = &sta->deflink.eht_cap.eht_cap_elem;
+ mcs_map = &link_sta->eht_cap.eht_mcs_nss_supp;
+ elem = &link_sta->eht_cap.eht_cap_elem;
tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_EHT, sizeof(*eht));
@@ -1422,50 +1499,52 @@ mt7925_mcu_sta_eht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
eht->phy_cap = cpu_to_le64(*(u64 *)elem->phy_cap_info);
eht->phy_cap_ext = cpu_to_le64(elem->phy_cap_info[8]);
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20)
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_20)
memcpy(eht->mcs_map_bw20, &mcs_map->only_20mhz, sizeof(eht->mcs_map_bw20));
memcpy(eht->mcs_map_bw80, &mcs_map->bw._80, sizeof(eht->mcs_map_bw80));
memcpy(eht->mcs_map_bw160, &mcs_map->bw._160, sizeof(eht->mcs_map_bw160));
}
static void
-mt7925_mcu_sta_ht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
+mt7925_mcu_sta_ht_tlv(struct sk_buff *skb, struct ieee80211_link_sta *link_sta)
{
struct sta_rec_ht *ht;
struct tlv *tlv;
- if (!sta->deflink.ht_cap.ht_supported)
+ if (!link_sta->ht_cap.ht_supported)
return;
tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HT, sizeof(*ht));
ht = (struct sta_rec_ht *)tlv;
- ht->ht_cap = cpu_to_le16(sta->deflink.ht_cap.cap);
+ ht->ht_cap = cpu_to_le16(link_sta->ht_cap.cap);
}
static void
-mt7925_mcu_sta_vht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
+mt7925_mcu_sta_vht_tlv(struct sk_buff *skb, struct ieee80211_link_sta *link_sta)
{
struct sta_rec_vht *vht;
struct tlv *tlv;
/* For 6G band, this tlv is necessary to let hw work normally */
- if (!sta->deflink.he_6ghz_capa.capa && !sta->deflink.vht_cap.vht_supported)
+ if (!link_sta->he_6ghz_capa.capa && !link_sta->vht_cap.vht_supported)
return;
tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_VHT, sizeof(*vht));
vht = (struct sta_rec_vht *)tlv;
- vht->vht_cap = cpu_to_le32(sta->deflink.vht_cap.cap);
- vht->vht_rx_mcs_map = sta->deflink.vht_cap.vht_mcs.rx_mcs_map;
- vht->vht_tx_mcs_map = sta->deflink.vht_cap.vht_mcs.tx_mcs_map;
+ vht->vht_cap = cpu_to_le32(link_sta->vht_cap.cap);
+ vht->vht_rx_mcs_map = link_sta->vht_cap.vht_mcs.rx_mcs_map;
+ vht->vht_tx_mcs_map = link_sta->vht_cap.vht_mcs.tx_mcs_map;
}
static void
mt7925_mcu_sta_amsdu_tlv(struct sk_buff *skb,
- struct ieee80211_vif *vif, struct ieee80211_sta *sta)
+ struct ieee80211_vif *vif,
+ struct ieee80211_link_sta *link_sta)
{
- struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv;
+ struct mt792x_sta *msta = (struct mt792x_sta *)link_sta->sta->drv_priv;
+ struct mt792x_link_sta *mlink;
struct sta_rec_amsdu *amsdu;
struct tlv *tlv;
@@ -1473,16 +1552,18 @@ mt7925_mcu_sta_amsdu_tlv(struct sk_buff *skb,
vif->type != NL80211_IFTYPE_AP)
return;
- if (!sta->deflink.agg.max_amsdu_len)
+ if (!link_sta->agg.max_amsdu_len)
return;
tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HW_AMSDU, sizeof(*amsdu));
amsdu = (struct sta_rec_amsdu *)tlv;
amsdu->max_amsdu_num = 8;
amsdu->amsdu_en = true;
- msta->wcid.amsdu = true;
- switch (sta->deflink.agg.max_amsdu_len) {
+ mlink = mt792x_sta_to_link(msta, link_sta->link_id);
+ mlink->wcid.amsdu = true;
+
+ switch (link_sta->agg.max_amsdu_len) {
case IEEE80211_MAX_MPDU_LEN_VHT_11454:
amsdu->max_mpdu_size =
IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454;
@@ -1499,34 +1580,44 @@ mt7925_mcu_sta_amsdu_tlv(struct sk_buff *skb,
static void
mt7925_mcu_sta_phy_tlv(struct sk_buff *skb,
- struct ieee80211_vif *vif, struct ieee80211_sta *sta)
+ struct ieee80211_vif *vif,
+ struct ieee80211_link_sta *link_sta)
{
struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
- struct cfg80211_chan_def *chandef = &mvif->mt76.ctx->def;
+ struct ieee80211_bss_conf *link_conf;
+ struct cfg80211_chan_def *chandef;
+ struct mt792x_bss_conf *mconf;
struct sta_rec_phy *phy;
struct tlv *tlv;
u8 af = 0, mm = 0;
+ link_conf = mt792x_vif_to_bss_conf(vif, link_sta->link_id);
+ mconf = mt792x_vif_to_link(mvif, link_sta->link_id);
+ chandef = mconf->mt76.ctx ? &mconf->mt76.ctx->def :
+ &link_conf->chanreq.oper;
+
tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_PHY, sizeof(*phy));
phy = (struct sta_rec_phy *)tlv;
- phy->phy_type = mt76_connac_get_phy_mode_v2(mvif->phy->mt76, vif, chandef->chan->band, sta);
- phy->basic_rate = cpu_to_le16((u16)vif->bss_conf.basic_rates);
- if (sta->deflink.ht_cap.ht_supported) {
- af = sta->deflink.ht_cap.ampdu_factor;
- mm = sta->deflink.ht_cap.ampdu_density;
+ phy->phy_type = mt76_connac_get_phy_mode_v2(mvif->phy->mt76, vif,
+ chandef->chan->band,
+ link_sta);
+ phy->basic_rate = cpu_to_le16((u16)link_conf->basic_rates);
+ if (link_sta->ht_cap.ht_supported) {
+ af = link_sta->ht_cap.ampdu_factor;
+ mm = link_sta->ht_cap.ampdu_density;
}
- if (sta->deflink.vht_cap.vht_supported) {
+ if (link_sta->vht_cap.vht_supported) {
u8 vht_af = FIELD_GET(IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK,
- sta->deflink.vht_cap.cap);
+ link_sta->vht_cap.cap);
af = max_t(u8, af, vht_af);
}
- if (sta->deflink.he_6ghz_capa.capa) {
- af = le16_get_bits(sta->deflink.he_6ghz_capa.capa,
+ if (link_sta->he_6ghz_capa.capa) {
+ af = le16_get_bits(link_sta->he_6ghz_capa.capa,
IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP);
- mm = le16_get_bits(sta->deflink.he_6ghz_capa.capa,
+ mm = le16_get_bits(link_sta->he_6ghz_capa.capa,
IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START);
}
@@ -1537,7 +1628,7 @@ mt7925_mcu_sta_phy_tlv(struct sk_buff *skb,
static void
mt7925_mcu_sta_state_v2_tlv(struct mt76_phy *mphy, struct sk_buff *skb,
- struct ieee80211_sta *sta,
+ struct ieee80211_link_sta *link_sta,
struct ieee80211_vif *vif,
u8 rcpi, u8 sta_state)
{
@@ -1557,28 +1648,37 @@ mt7925_mcu_sta_state_v2_tlv(struct mt76_phy *mphy, struct sk_buff *skb,
state = (struct sta_rec_state_v2 *)tlv;
state->state = sta_state;
- if (sta->deflink.vht_cap.vht_supported) {
- state->vht_opmode = sta->deflink.bandwidth;
- state->vht_opmode |= sta->deflink.rx_nss <<
+ if (link_sta->vht_cap.vht_supported) {
+ state->vht_opmode = link_sta->bandwidth;
+ state->vht_opmode |= link_sta->rx_nss <<
IEEE80211_OPMODE_NOTIF_RX_NSS_SHIFT;
}
}
static void
mt7925_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb,
- struct ieee80211_vif *vif, struct ieee80211_sta *sta)
+ struct ieee80211_vif *vif,
+ struct ieee80211_link_sta *link_sta)
{
struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
- struct cfg80211_chan_def *chandef = &mvif->mt76.ctx->def;
- enum nl80211_band band = chandef->chan->band;
+ struct ieee80211_bss_conf *link_conf;
+ struct cfg80211_chan_def *chandef;
struct sta_rec_ra_info *ra_info;
+ struct mt792x_bss_conf *mconf;
+ enum nl80211_band band;
struct tlv *tlv;
u16 supp_rates;
+ link_conf = mt792x_vif_to_bss_conf(vif, link_sta->link_id);
+ mconf = mt792x_vif_to_link(mvif, link_sta->link_id);
+ chandef = mconf->mt76.ctx ? &mconf->mt76.ctx->def :
+ &link_conf->chanreq.oper;
+ band = chandef->chan->band;
+
tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_RA, sizeof(*ra_info));
ra_info = (struct sta_rec_ra_info *)tlv;
- supp_rates = sta->deflink.supp_rates[band];
+ supp_rates = link_sta->supp_rates[band];
if (band == NL80211_BAND_2GHZ)
supp_rates = FIELD_PREP(RA_LEGACY_OFDM, supp_rates >> 4) |
FIELD_PREP(RA_LEGACY_CCK, supp_rates & 0xf);
@@ -1587,29 +1687,80 @@ mt7925_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb,
ra_info->legacy = cpu_to_le16(supp_rates);
- if (sta->deflink.ht_cap.ht_supported)
+ if (link_sta->ht_cap.ht_supported)
memcpy(ra_info->rx_mcs_bitmask,
- sta->deflink.ht_cap.mcs.rx_mask,
+ link_sta->ht_cap.mcs.rx_mask,
HT_MCS_MASK_NUM);
}
static void
+mt7925_mcu_sta_eht_mld_tlv(struct sk_buff *skb,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta)
+{
+ struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+ struct wiphy *wiphy = mvif->phy->mt76->hw->wiphy;
+ const struct wiphy_iftype_ext_capab *ext_capa;
+ struct sta_rec_eht_mld *eht_mld;
+ struct tlv *tlv;
+ u16 eml_cap;
+
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_EHT_MLD, sizeof(*eht_mld));
+ eht_mld = (struct sta_rec_eht_mld *)tlv;
+ eht_mld->mld_type = 0xff;
+
+ if (!ieee80211_vif_is_mld(vif))
+ return;
+
+ ext_capa = cfg80211_get_iftype_ext_capa(wiphy,
+ ieee80211_vif_type_p2p(vif));
+ if (!ext_capa)
+ return;
+
+ eml_cap = (vif->cfg.eml_cap & (IEEE80211_EML_CAP_EMLSR_SUPP |
+ IEEE80211_EML_CAP_TRANSITION_TIMEOUT)) |
+ (ext_capa->eml_capabilities & (IEEE80211_EML_CAP_EMLSR_PADDING_DELAY |
+ IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY));
+
+ if (eml_cap & IEEE80211_EML_CAP_EMLSR_SUPP) {
+ eht_mld->eml_cap[0] = u16_get_bits(eml_cap, GENMASK(7, 0));
+ eht_mld->eml_cap[1] = u16_get_bits(eml_cap, GENMASK(15, 8));
+ } else {
+ eht_mld->str_cap[0] = BIT(1);
+ }
+}
+
+static void
mt7925_mcu_sta_mld_tlv(struct sk_buff *skb,
struct ieee80211_vif *vif, struct ieee80211_sta *sta)
{
- struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
+ struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+ struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv;
+ unsigned long valid = mvif->valid_links;
+ struct mt792x_bss_conf *mconf;
+ struct mt792x_link_sta *mlink;
struct sta_rec_mld *mld;
struct tlv *tlv;
+ int i, cnt = 0;
tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_MLD, sizeof(*mld));
mld = (struct sta_rec_mld *)tlv;
- memcpy(mld->mac_addr, vif->addr, ETH_ALEN);
- mld->primary_id = cpu_to_le16(wcid->idx);
- mld->wlan_id = cpu_to_le16(wcid->idx);
+ memcpy(mld->mac_addr, sta->addr, ETH_ALEN);
+ mld->primary_id = cpu_to_le16(msta->deflink.wcid.idx);
+ mld->wlan_id = cpu_to_le16(msta->deflink.wcid.idx);
+ mld->link_num = min_t(u8, hweight16(mvif->valid_links), 2);
+
+ for_each_set_bit(i, &valid, IEEE80211_MLD_MAX_NUM_LINKS) {
+ if (cnt == mld->link_num)
+ break;
+
+ mconf = mt792x_vif_to_link(mvif, i);
+ mlink = mt792x_sta_to_link(msta, i);
+ mld->link[cnt].wlan_id = cpu_to_le16(mlink->wcid.idx);
+ mld->link[cnt++].bss_idx = mconf->mt76.idx;
- /* TODO: 0 means deflink only, add secondary link(1) later */
- mld->link_num = !!(hweight8(vif->active_links) > 1);
- WARN_ON_ONCE(mld->link_num);
+ if (mlink != &msta->deflink)
+ mld->secondary_id = cpu_to_le16(mlink->wcid.idx);
+ }
}
static int
@@ -1625,39 +1776,106 @@ mt7925_mcu_sta_cmd(struct mt76_phy *phy,
if (IS_ERR(skb))
return PTR_ERR(skb);
- if (info->sta || !info->offload_fw)
- mt76_connac_mcu_sta_basic_tlv(dev, skb, info->vif, info->sta,
+ if (info->link_sta)
+ mt76_connac_mcu_sta_basic_tlv(dev, skb, info->vif,
+ info->link_sta,
info->enable, info->newly);
- if (info->sta && info->enable) {
- mt7925_mcu_sta_phy_tlv(skb, info->vif, info->sta);
- mt7925_mcu_sta_ht_tlv(skb, info->sta);
- mt7925_mcu_sta_vht_tlv(skb, info->sta);
- mt76_connac_mcu_sta_uapsd(skb, info->vif, info->sta);
- mt7925_mcu_sta_amsdu_tlv(skb, info->vif, info->sta);
- mt7925_mcu_sta_he_tlv(skb, info->sta);
- mt7925_mcu_sta_he_6g_tlv(skb, info->sta);
- mt7925_mcu_sta_eht_tlv(skb, info->sta);
- mt7925_mcu_sta_rate_ctrl_tlv(skb, info->vif, info->sta);
- mt7925_mcu_sta_state_v2_tlv(phy, skb, info->sta,
+ if (info->link_sta && info->enable) {
+ mt7925_mcu_sta_phy_tlv(skb, info->vif, info->link_sta);
+ mt7925_mcu_sta_ht_tlv(skb, info->link_sta);
+ mt7925_mcu_sta_vht_tlv(skb, info->link_sta);
+ mt76_connac_mcu_sta_uapsd(skb, info->vif, info->link_sta->sta);
+ mt7925_mcu_sta_amsdu_tlv(skb, info->vif, info->link_sta);
+ mt7925_mcu_sta_he_tlv(skb, info->link_sta);
+ mt7925_mcu_sta_he_6g_tlv(skb, info->link_sta);
+ mt7925_mcu_sta_eht_tlv(skb, info->link_sta);
+ mt7925_mcu_sta_rate_ctrl_tlv(skb, info->vif,
+ info->link_sta);
+ mt7925_mcu_sta_state_v2_tlv(phy, skb, info->link_sta,
info->vif, info->rcpi,
info->state);
- mt7925_mcu_sta_mld_tlv(skb, info->vif, info->sta);
+ mt7925_mcu_sta_mld_tlv(skb, info->vif, info->link_sta->sta);
}
if (info->enable)
- mt7925_mcu_sta_hdr_trans_tlv(skb, info->vif, info->sta);
+ mt7925_mcu_sta_hdr_trans_tlv(skb, info->vif, info->link_sta);
+
+ return mt76_mcu_skb_send_msg(dev, skb, info->cmd, true);
+}
+
+static void
+mt7925_mcu_sta_remove_tlv(struct sk_buff *skb)
+{
+ struct sta_rec_remove *rem;
+ struct tlv *tlv;
+
+ tlv = mt76_connac_mcu_add_tlv(skb, 0x25, sizeof(*rem));
+ rem = (struct sta_rec_remove *)tlv;
+ rem->action = 0;
+}
+
+static int
+mt7925_mcu_mlo_sta_cmd(struct mt76_phy *phy,
+ struct mt76_sta_cmd_info *info)
+{
+ struct mt792x_vif *mvif = (struct mt792x_vif *)info->vif->drv_priv;
+ struct mt76_dev *dev = phy->dev;
+ struct mt792x_bss_conf *mconf;
+ struct sk_buff *skb;
+
+ mconf = mt792x_vif_to_link(mvif, info->wcid->link_id);
+
+ skb = __mt76_connac_mcu_alloc_sta_req(dev, &mconf->mt76, info->wcid,
+ MT7925_STA_UPDATE_MAX_SIZE);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ if (info->enable)
+ mt76_connac_mcu_sta_basic_tlv(dev, skb, info->vif,
+ info->link_sta,
+ info->enable, info->newly);
+
+ if (info->enable && info->link_sta) {
+ mt7925_mcu_sta_phy_tlv(skb, info->vif, info->link_sta);
+ mt7925_mcu_sta_ht_tlv(skb, info->link_sta);
+ mt7925_mcu_sta_vht_tlv(skb, info->link_sta);
+ mt76_connac_mcu_sta_uapsd(skb, info->vif, info->link_sta->sta);
+ mt7925_mcu_sta_amsdu_tlv(skb, info->vif, info->link_sta);
+ mt7925_mcu_sta_he_tlv(skb, info->link_sta);
+ mt7925_mcu_sta_he_6g_tlv(skb, info->link_sta);
+ mt7925_mcu_sta_eht_tlv(skb, info->link_sta);
+ mt7925_mcu_sta_rate_ctrl_tlv(skb, info->vif,
+ info->link_sta);
+ mt7925_mcu_sta_state_v2_tlv(phy, skb, info->link_sta,
+ info->vif, info->rcpi,
+ info->state);
+
+ if (info->state != MT76_STA_INFO_STATE_NONE) {
+ mt7925_mcu_sta_mld_tlv(skb, info->vif, info->link_sta->sta);
+ mt7925_mcu_sta_eht_mld_tlv(skb, info->vif, info->link_sta->sta);
+ }
+
+ mt7925_mcu_sta_hdr_trans_tlv(skb, info->vif, info->link_sta);
+ }
+
+ if (!info->enable) {
+ mt7925_mcu_sta_remove_tlv(skb);
+ mt76_connac_mcu_add_tlv(skb, STA_REC_MLD_OFF,
+ sizeof(struct tlv));
+ }
return mt76_mcu_skb_send_msg(dev, skb, info->cmd, true);
}
-int mt7925_mcu_sta_update(struct mt792x_dev *dev, struct ieee80211_sta *sta,
+int mt7925_mcu_sta_update(struct mt792x_dev *dev,
+ struct ieee80211_link_sta *link_sta,
struct ieee80211_vif *vif, bool enable,
enum mt76_sta_info_state state)
{
struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
- int rssi = -ewma_rssi_read(&mvif->rssi);
+ int rssi = -ewma_rssi_read(&mvif->bss_conf.rssi);
struct mt76_sta_cmd_info info = {
- .sta = sta,
+ .link_sta = link_sta,
.vif = vif,
.enable = enable,
.cmd = MCU_UNI_CMD(STA_REC_UPDATE),
@@ -1666,12 +1884,22 @@ int mt7925_mcu_sta_update(struct mt792x_dev *dev, struct ieee80211_sta *sta,
.rcpi = to_rcpi(rssi),
};
struct mt792x_sta *msta;
+ struct mt792x_link_sta *mlink;
+ int err;
+
+ if (link_sta) {
+ msta = (struct mt792x_sta *)link_sta->sta->drv_priv;
+ mlink = mt792x_sta_to_link(msta, link_sta->link_id);
+ }
+ info.wcid = link_sta ? &mlink->wcid : &mvif->sta.deflink.wcid;
+ info.newly = link_sta ? state != MT76_STA_INFO_STATE_ASSOC : true;
- msta = sta ? (struct mt792x_sta *)sta->drv_priv : NULL;
- info.wcid = msta ? &msta->wcid : &mvif->sta.wcid;
- info.newly = msta ? state != MT76_STA_INFO_STATE_ASSOC : true;
+ if (ieee80211_vif_is_mld(vif))
+ err = mt7925_mcu_mlo_sta_cmd(&dev->mphy, &info);
+ else
+ err = mt7925_mcu_sta_cmd(&dev->mphy, &info);
- return mt7925_mcu_sta_cmd(&dev->mphy, &info);
+ return err;
}
int mt7925_mcu_set_beacon_filter(struct mt792x_dev *dev,
@@ -1680,21 +1908,32 @@ int mt7925_mcu_set_beacon_filter(struct mt792x_dev *dev,
{
#define MT7925_FIF_BIT_CLR BIT(1)
#define MT7925_FIF_BIT_SET BIT(0)
+ struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+ unsigned long valid = ieee80211_vif_is_mld(vif) ?
+ mvif->valid_links : BIT(0);
+ struct ieee80211_bss_conf *bss_conf;
int err = 0;
+ int i;
if (enable) {
- err = mt7925_mcu_uni_bss_bcnft(dev, vif, true);
- if (err)
- return err;
+ for_each_set_bit(i, &valid, IEEE80211_MLD_MAX_NUM_LINKS) {
+ bss_conf = mt792x_vif_to_bss_conf(vif, i);
+ err = mt7925_mcu_uni_bss_bcnft(dev, bss_conf, true);
+ if (err < 0)
+ return err;
+ }
return mt7925_mcu_set_rxfilter(dev, 0,
MT7925_FIF_BIT_SET,
MT_WF_RFCR_DROP_OTHER_BEACON);
}
- err = mt7925_mcu_set_bss_pm(dev, vif, false);
- if (err)
- return err;
+ for_each_set_bit(i, &valid, IEEE80211_MLD_MAX_NUM_LINKS) {
+ bss_conf = mt792x_vif_to_bss_conf(vif, i);
+ err = mt7925_mcu_set_bss_pm(dev, bss_conf, false);
+ if (err)
+ return err;
+ }
return mt7925_mcu_set_rxfilter(dev, 0,
MT7925_FIF_BIT_CLR,
@@ -1746,7 +1985,7 @@ int mt7925_mcu_set_sniffer(struct mt792x_dev *dev, struct ieee80211_vif *vif,
} __packed enable;
} __packed req = {
.hdr = {
- .band_idx = mvif->mt76.band_idx,
+ .band_idx = mvif->bss_conf.mt76.band_idx,
},
.enable = {
.tag = cpu_to_le16(UNI_SNIFFER_ENABLE),
@@ -1805,7 +2044,7 @@ int mt7925_mcu_config_sniffer(struct mt792x_vif *vif,
} __packed tlv;
} __packed req = {
.hdr = {
- .band_idx = vif->mt76.band_idx,
+ .band_idx = vif->bss_conf.mt76.band_idx,
},
.tlv = {
.tag = cpu_to_le16(UNI_SNIFFER_CONFIG),
@@ -1866,7 +2105,7 @@ mt7925_mcu_uni_add_beacon_offload(struct mt792x_dev *dev,
} __packed beacon_tlv;
} req = {
.hdr = {
- .bss_idx = mvif->mt76.idx,
+ .bss_idx = mvif->bss_conf.mt76.idx,
},
.beacon_tlv = {
.tag = cpu_to_le16(UNI_BSS_INFO_BCN_CONTENT),
@@ -1918,83 +2157,59 @@ mt7925_mcu_uni_add_beacon_offload(struct mt792x_dev *dev,
&req, sizeof(req), true);
}
-int mt7925_mcu_set_chctx(struct mt76_phy *phy, struct mt76_vif *mvif,
- struct ieee80211_chanctx_conf *ctx)
+static
+void mt7925_mcu_bss_rlm_tlv(struct sk_buff *skb, struct mt76_phy *phy,
+ struct ieee80211_bss_conf *link_conf,
+ struct ieee80211_chanctx_conf *ctx)
{
- struct cfg80211_chan_def *chandef = ctx ? &ctx->def : &phy->chandef;
+ struct cfg80211_chan_def *chandef = ctx ? &ctx->def :
+ &link_conf->chanreq.oper;
int freq1 = chandef->center_freq1, freq2 = chandef->center_freq2;
enum nl80211_band band = chandef->chan->band;
- struct mt76_dev *mdev = phy->dev;
- struct {
- struct {
- u8 bss_idx;
- u8 pad[3];
- } __packed hdr;
- struct rlm_tlv {
- __le16 tag;
- __le16 len;
- u8 control_channel;
- u8 center_chan;
- u8 center_chan2;
- u8 bw;
- u8 tx_streams;
- u8 rx_streams;
- u8 ht_op_info;
- u8 sco;
- u8 band;
- u8 pad[3];
- } __packed rlm;
- } __packed rlm_req = {
- .hdr = {
- .bss_idx = mvif->idx,
- },
- .rlm = {
- .tag = cpu_to_le16(UNI_BSS_INFO_RLM),
- .len = cpu_to_le16(sizeof(struct rlm_tlv)),
- .control_channel = chandef->chan->hw_value,
- .center_chan = ieee80211_frequency_to_channel(freq1),
- .center_chan2 = ieee80211_frequency_to_channel(freq2),
- .tx_streams = hweight8(phy->antenna_mask),
- .ht_op_info = 4, /* set HT 40M allowed */
- .rx_streams = hweight8(phy->antenna_mask),
- .band = band,
- },
- };
+ struct bss_rlm_tlv *req;
+ struct tlv *tlv;
+
+ tlv = mt76_connac_mcu_add_tlv(skb, UNI_BSS_INFO_RLM, sizeof(*req));
+ req = (struct bss_rlm_tlv *)tlv;
+ req->control_channel = chandef->chan->hw_value,
+ req->center_chan = ieee80211_frequency_to_channel(freq1),
+ req->center_chan2 = ieee80211_frequency_to_channel(freq2),
+ req->tx_streams = hweight8(phy->antenna_mask),
+ req->ht_op_info = 4, /* set HT 40M allowed */
+ req->rx_streams = hweight8(phy->antenna_mask),
+ req->band = band;
switch (chandef->width) {
case NL80211_CHAN_WIDTH_40:
- rlm_req.rlm.bw = CMD_CBW_40MHZ;
+ req->bw = CMD_CBW_40MHZ;
break;
case NL80211_CHAN_WIDTH_80:
- rlm_req.rlm.bw = CMD_CBW_80MHZ;
+ req->bw = CMD_CBW_80MHZ;
break;
case NL80211_CHAN_WIDTH_80P80:
- rlm_req.rlm.bw = CMD_CBW_8080MHZ;
+ req->bw = CMD_CBW_8080MHZ;
break;
case NL80211_CHAN_WIDTH_160:
- rlm_req.rlm.bw = CMD_CBW_160MHZ;
+ req->bw = CMD_CBW_160MHZ;
break;
case NL80211_CHAN_WIDTH_5:
- rlm_req.rlm.bw = CMD_CBW_5MHZ;
+ req->bw = CMD_CBW_5MHZ;
break;
case NL80211_CHAN_WIDTH_10:
- rlm_req.rlm.bw = CMD_CBW_10MHZ;
+ req->bw = CMD_CBW_10MHZ;
break;
case NL80211_CHAN_WIDTH_20_NOHT:
case NL80211_CHAN_WIDTH_20:
default:
- rlm_req.rlm.bw = CMD_CBW_20MHZ;
- rlm_req.rlm.ht_op_info = 0;
+ req->bw = CMD_CBW_20MHZ;
+ req->ht_op_info = 0;
break;
}
- if (rlm_req.rlm.control_channel < rlm_req.rlm.center_chan)
- rlm_req.rlm.sco = 1; /* SCA */
- else if (rlm_req.rlm.control_channel > rlm_req.rlm.center_chan)
- rlm_req.rlm.sco = 3; /* SCB */
-
- return mt76_mcu_send_msg(mdev, MCU_UNI_CMD(BSS_INFO_UPDATE), &rlm_req,
- sizeof(rlm_req), true);
+ if (req->control_channel < req->center_chan)
+ req->sco = 1; /* SCA */
+ else if (req->control_channel > req->center_chan)
+ req->sco = 3; /* SCB */
}
static struct sk_buff *
@@ -2014,18 +2229,36 @@ __mt7925_mcu_alloc_bss_req(struct mt76_dev *dev, struct mt76_vif *mvif, int len)
return skb;
}
+int mt7925_mcu_set_chctx(struct mt76_phy *phy, struct mt76_vif *mvif,
+ struct ieee80211_bss_conf *link_conf,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct sk_buff *skb;
+
+ skb = __mt7925_mcu_alloc_bss_req(phy->dev, mvif,
+ MT7925_BSS_UPDATE_MAX_SIZE);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ mt7925_mcu_bss_rlm_tlv(skb, phy, link_conf, ctx);
+
+ return mt76_mcu_skb_send_msg(phy->dev, skb,
+ MCU_UNI_CMD(BSS_INFO_UPDATE), true);
+}
+
static u8
mt7925_get_phy_mode_ext(struct mt76_phy *phy, struct ieee80211_vif *vif,
- enum nl80211_band band, struct ieee80211_sta *sta)
+ enum nl80211_band band,
+ struct ieee80211_link_sta *link_sta)
{
struct ieee80211_he_6ghz_capa *he_6ghz_capa;
const struct ieee80211_sta_eht_cap *eht_cap;
__le16 capa = 0;
u8 mode = 0;
- if (sta) {
- he_6ghz_capa = &sta->deflink.he_6ghz_capa;
- eht_cap = &sta->deflink.eht_cap;
+ if (link_sta) {
+ he_6ghz_capa = &link_sta->he_6ghz_capa;
+ eht_cap = &link_sta->eht_cap;
} else {
struct ieee80211_supported_band *sband;
@@ -2061,18 +2294,19 @@ mt7925_get_phy_mode_ext(struct mt76_phy *phy, struct ieee80211_vif *vif,
static void
mt7925_mcu_bss_basic_tlv(struct sk_buff *skb,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta,
+ struct ieee80211_bss_conf *link_conf,
+ struct ieee80211_link_sta *link_sta,
struct ieee80211_chanctx_conf *ctx,
struct mt76_phy *phy, u16 wlan_idx,
bool enable)
{
- struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
- struct mt792x_sta *msta = sta ? (struct mt792x_sta *)sta->drv_priv :
- &mvif->sta;
- struct cfg80211_chan_def *chandef = ctx ? &ctx->def : &phy->chandef;
+ struct ieee80211_vif *vif = link_conf->vif;
+ struct mt792x_bss_conf *mconf = mt792x_link_conf_to_mconf(link_conf);
+ struct cfg80211_chan_def *chandef = ctx ? &ctx->def :
+ &link_conf->chanreq.oper;
enum nl80211_band band = chandef->chan->band;
struct mt76_connac_bss_basic_tlv *basic_req;
+ struct mt792x_link_sta *mlink;
struct tlv *tlv;
int conn_type;
u8 idx;
@@ -2080,26 +2314,39 @@ mt7925_mcu_bss_basic_tlv(struct sk_buff *skb,
tlv = mt76_connac_mcu_add_tlv(skb, UNI_BSS_INFO_BASIC, sizeof(*basic_req));
basic_req = (struct mt76_connac_bss_basic_tlv *)tlv;
- idx = mvif->mt76.omac_idx > EXT_BSSID_START ? HW_BSSID_0 :
- mvif->mt76.omac_idx;
+ idx = mconf->mt76.omac_idx > EXT_BSSID_START ? HW_BSSID_0 :
+ mconf->mt76.omac_idx;
basic_req->hw_bss_idx = idx;
- basic_req->phymode_ext = mt7925_get_phy_mode_ext(phy, vif, band, sta);
+ basic_req->phymode_ext = mt7925_get_phy_mode_ext(phy, vif, band,
+ link_sta);
if (band == NL80211_BAND_2GHZ)
basic_req->nonht_basic_phy = cpu_to_le16(PHY_TYPE_ERP_INDEX);
else
basic_req->nonht_basic_phy = cpu_to_le16(PHY_TYPE_OFDM_INDEX);
- memcpy(basic_req->bssid, vif->bss_conf.bssid, ETH_ALEN);
- basic_req->phymode = mt76_connac_get_phy_mode(phy, vif, band, sta);
- basic_req->bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int);
- basic_req->dtim_period = vif->bss_conf.dtim_period;
+ memcpy(basic_req->bssid, link_conf->bssid, ETH_ALEN);
+ basic_req->phymode = mt76_connac_get_phy_mode(phy, vif, band, link_sta);
+ basic_req->bcn_interval = cpu_to_le16(link_conf->beacon_int);
+ basic_req->dtim_period = link_conf->dtim_period;
basic_req->bmc_tx_wlan_idx = cpu_to_le16(wlan_idx);
- basic_req->sta_idx = cpu_to_le16(msta->wcid.idx);
- basic_req->omac_idx = mvif->mt76.omac_idx;
- basic_req->band_idx = mvif->mt76.band_idx;
- basic_req->wmm_idx = mvif->mt76.wmm_idx;
+ basic_req->link_idx = mconf->mt76.idx;
+
+ if (link_sta) {
+ struct mt792x_sta *msta;
+
+ msta = (struct mt792x_sta *)link_sta->sta->drv_priv;
+ mlink = mt792x_sta_to_link(msta, link_sta->link_id);
+
+ } else {
+ mlink = &mconf->vif->sta.deflink;
+ }
+
+ basic_req->sta_idx = cpu_to_le16(mlink->wcid.idx);
+ basic_req->omac_idx = mconf->mt76.omac_idx;
+ basic_req->band_idx = mconf->mt76.band_idx;
+ basic_req->wmm_idx = mconf->mt76.wmm_idx;
basic_req->conn_state = !enable;
switch (vif->type) {
@@ -2131,9 +2378,11 @@ mt7925_mcu_bss_basic_tlv(struct sk_buff *skb,
}
static void
-mt7925_mcu_bss_sec_tlv(struct sk_buff *skb, struct ieee80211_vif *vif)
+mt7925_mcu_bss_sec_tlv(struct sk_buff *skb,
+ struct ieee80211_bss_conf *link_conf)
{
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct mt792x_bss_conf *mconf = mt792x_link_conf_to_mconf(link_conf);
+ struct mt76_vif *mvif = &mconf->mt76;
struct bss_sec_tlv {
__le16 tag;
__le16 len;
@@ -2178,12 +2427,13 @@ mt7925_mcu_bss_sec_tlv(struct sk_buff *skb, struct ieee80211_vif *vif)
static void
mt7925_mcu_bss_bmc_tlv(struct sk_buff *skb, struct mt792x_phy *phy,
struct ieee80211_chanctx_conf *ctx,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
+ struct ieee80211_bss_conf *link_conf)
{
- struct cfg80211_chan_def *chandef = ctx ? &ctx->def : &phy->mt76->chandef;
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct cfg80211_chan_def *chandef = ctx ? &ctx->def :
+ &link_conf->chanreq.oper;
+ struct mt792x_bss_conf *mconf = mt792x_link_conf_to_mconf(link_conf);
enum nl80211_band band = chandef->chan->band;
+ struct mt76_vif *mvif = &mconf->mt76;
struct bss_rate_tlv *bmc;
struct tlv *tlv;
u8 idx = mvif->mcast_rates_idx ?
@@ -2205,39 +2455,44 @@ mt7925_mcu_bss_bmc_tlv(struct sk_buff *skb, struct mt792x_phy *phy,
static void
mt7925_mcu_bss_mld_tlv(struct sk_buff *skb,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
+ struct ieee80211_bss_conf *link_conf)
{
- struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
- bool is_mld = ieee80211_vif_is_mld(vif);
+ struct mt792x_bss_conf *mconf = mt792x_link_conf_to_mconf(link_conf);
+ struct mt792x_vif *mvif = (struct mt792x_vif *)link_conf->vif->drv_priv;
struct bss_mld_tlv *mld;
struct tlv *tlv;
+ bool is_mld;
+
+ is_mld = ieee80211_vif_is_mld(link_conf->vif) ||
+ (hweight16(mvif->valid_links) > 1);
tlv = mt76_connac_mcu_add_tlv(skb, UNI_BSS_INFO_MLD, sizeof(*mld));
mld = (struct bss_mld_tlv *)tlv;
- mld->link_id = sta ? (is_mld ? vif->bss_conf.link_id : 0) : 0xff;
- mld->group_mld_id = is_mld ? mvif->mt76.idx : 0xff;
- mld->own_mld_id = mvif->mt76.idx + 32;
+ mld->link_id = is_mld ? link_conf->link_id : 0xff;
+ /* apply the index of the primary link */
+ mld->group_mld_id = is_mld ? mvif->bss_conf.mt76.idx : 0xff;
+ mld->own_mld_id = mconf->mt76.idx + 32;
mld->remap_idx = 0xff;
+ mld->eml_enable = !!(link_conf->vif->cfg.eml_cap &
+ IEEE80211_EML_CAP_EMLSR_SUPP);
- if (sta)
- memcpy(mld->mac_addr, sta->addr, ETH_ALEN);
+ memcpy(mld->mac_addr, link_conf->addr, ETH_ALEN);
}
static void
-mt7925_mcu_bss_qos_tlv(struct sk_buff *skb, struct ieee80211_vif *vif)
+mt7925_mcu_bss_qos_tlv(struct sk_buff *skb, struct ieee80211_bss_conf *link_conf)
{
struct mt76_connac_bss_qos_tlv *qos;
struct tlv *tlv;
tlv = mt76_connac_mcu_add_tlv(skb, UNI_BSS_INFO_QBSS, sizeof(*qos));
qos = (struct mt76_connac_bss_qos_tlv *)tlv;
- qos->qos = vif->bss_conf.qos;
+ qos->qos = link_conf->qos;
}
static void
-mt7925_mcu_bss_he_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
+mt7925_mcu_bss_he_tlv(struct sk_buff *skb, struct ieee80211_bss_conf *link_conf,
struct mt792x_phy *phy)
{
#define DEFAULT_HE_PE_DURATION 4
@@ -2246,16 +2501,16 @@ mt7925_mcu_bss_he_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
struct bss_info_uni_he *he;
struct tlv *tlv;
- cap = mt76_connac_get_he_phy_cap(phy->mt76, vif);
+ cap = mt76_connac_get_he_phy_cap(phy->mt76, link_conf->vif);
tlv = mt76_connac_mcu_add_tlv(skb, UNI_BSS_INFO_HE_BASIC, sizeof(*he));
he = (struct bss_info_uni_he *)tlv;
- he->he_pe_duration = vif->bss_conf.htc_trig_based_pkt_ext;
+ he->he_pe_duration = link_conf->htc_trig_based_pkt_ext;
if (!he->he_pe_duration)
he->he_pe_duration = DEFAULT_HE_PE_DURATION;
- he->he_rts_thres = cpu_to_le16(vif->bss_conf.frame_time_rts_th);
+ he->he_rts_thres = cpu_to_le16(link_conf->frame_time_rts_th);
if (!he->he_rts_thres)
he->he_rts_thres = cpu_to_le16(DEFAULT_HE_DURATION_RTS_THRES);
@@ -2265,7 +2520,7 @@ mt7925_mcu_bss_he_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
}
static void
-mt7925_mcu_bss_color_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
+mt7925_mcu_bss_color_tlv(struct sk_buff *skb, struct ieee80211_bss_conf *link_conf,
bool enable)
{
struct bss_info_uni_bss_color *color;
@@ -2275,15 +2530,16 @@ mt7925_mcu_bss_color_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
color = (struct bss_info_uni_bss_color *)tlv;
color->enable = enable ?
- vif->bss_conf.he_bss_color.enabled : 0;
+ link_conf->he_bss_color.enabled : 0;
color->bss_color = enable ?
- vif->bss_conf.he_bss_color.color : 0;
+ link_conf->he_bss_color.color : 0;
}
static void
-mt7925_mcu_bss_ifs_tlv(struct sk_buff *skb, struct ieee80211_vif *vif)
+mt7925_mcu_bss_ifs_tlv(struct sk_buff *skb,
+ struct ieee80211_bss_conf *link_conf)
{
- struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+ struct mt792x_vif *mvif = (struct mt792x_vif *)link_conf->vif->drv_priv;
struct mt792x_phy *phy = mvif->phy;
struct bss_ifs_time_tlv *ifs_time;
struct tlv *tlv;
@@ -2295,18 +2551,18 @@ mt7925_mcu_bss_ifs_tlv(struct sk_buff *skb, struct ieee80211_vif *vif)
}
int mt7925_mcu_set_timing(struct mt792x_phy *phy,
- struct ieee80211_vif *vif)
+ struct ieee80211_bss_conf *link_conf)
{
- struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+ struct mt792x_bss_conf *mconf = mt792x_link_conf_to_mconf(link_conf);
struct mt792x_dev *dev = phy->dev;
struct sk_buff *skb;
- skb = __mt7925_mcu_alloc_bss_req(&dev->mt76, &mvif->mt76,
+ skb = __mt7925_mcu_alloc_bss_req(&dev->mt76, &mconf->mt76,
MT7925_BSS_UPDATE_MAX_SIZE);
if (IS_ERR(skb))
return PTR_ERR(skb);
- mt7925_mcu_bss_ifs_tlv(skb, vif);
+ mt7925_mcu_bss_ifs_tlv(skb, link_conf);
return mt76_mcu_skb_send_msg(&dev->mt76, skb,
MCU_UNI_CMD(BSS_INFO_UPDATE), true);
@@ -2314,41 +2570,42 @@ int mt7925_mcu_set_timing(struct mt792x_phy *phy,
int mt7925_mcu_add_bss_info(struct mt792x_phy *phy,
struct ieee80211_chanctx_conf *ctx,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta,
+ struct ieee80211_bss_conf *link_conf,
+ struct ieee80211_link_sta *link_sta,
int enable)
{
- struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+ struct mt792x_vif *mvif = (struct mt792x_vif *)link_conf->vif->drv_priv;
+ struct mt792x_bss_conf *mconf = mt792x_link_conf_to_mconf(link_conf);
struct mt792x_dev *dev = phy->dev;
+ struct mt792x_link_sta *mlink_bc;
struct sk_buff *skb;
- int err;
- skb = __mt7925_mcu_alloc_bss_req(&dev->mt76, &mvif->mt76,
+ skb = __mt7925_mcu_alloc_bss_req(&dev->mt76, &mconf->mt76,
MT7925_BSS_UPDATE_MAX_SIZE);
if (IS_ERR(skb))
return PTR_ERR(skb);
- /* bss_basic must be first */
- mt7925_mcu_bss_basic_tlv(skb, vif, sta, ctx, phy->mt76,
- mvif->sta.wcid.idx, enable);
- mt7925_mcu_bss_sec_tlv(skb, vif);
+ mlink_bc = mt792x_sta_to_link(&mvif->sta, mconf->link_id);
- mt7925_mcu_bss_bmc_tlv(skb, phy, ctx, vif, sta);
- mt7925_mcu_bss_qos_tlv(skb, vif);
- mt7925_mcu_bss_mld_tlv(skb, vif, sta);
- mt7925_mcu_bss_ifs_tlv(skb, vif);
+ /* bss_basic must be first */
+ mt7925_mcu_bss_basic_tlv(skb, link_conf, link_sta, ctx, phy->mt76,
+ mlink_bc->wcid.idx, enable);
+ mt7925_mcu_bss_sec_tlv(skb, link_conf);
+ mt7925_mcu_bss_bmc_tlv(skb, phy, ctx, link_conf);
+ mt7925_mcu_bss_qos_tlv(skb, link_conf);
+ mt7925_mcu_bss_mld_tlv(skb, link_conf);
+ mt7925_mcu_bss_ifs_tlv(skb, link_conf);
- if (vif->bss_conf.he_support) {
- mt7925_mcu_bss_he_tlv(skb, vif, phy);
- mt7925_mcu_bss_color_tlv(skb, vif, enable);
+ if (link_conf->he_support) {
+ mt7925_mcu_bss_he_tlv(skb, link_conf, phy);
+ mt7925_mcu_bss_color_tlv(skb, link_conf, enable);
}
- err = mt76_mcu_skb_send_msg(&dev->mt76, skb,
- MCU_UNI_CMD(BSS_INFO_UPDATE), true);
- if (err < 0)
- return err;
+ if (enable)
+ mt7925_mcu_bss_rlm_tlv(skb, phy->mt76, link_conf, ctx);
- return mt7925_mcu_set_chctx(phy->mt76, &mvif->mt76, ctx);
+ return mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_UNI_CMD(BSS_INFO_UPDATE), true);
}
int mt7925_mcu_set_dbdc(struct mt76_phy *phy)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h
index b8315a89f4a9..ac53bdc99332 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h
@@ -366,7 +366,10 @@ struct bss_mld_tlv {
u8 mac_addr[ETH_ALEN];
u8 remap_idx;
u8 link_id;
- u8 __rsv[2];
+ u8 eml_enable;
+ u8 max_link_num;
+ u8 hybrid_mode;
+ u8 __rsv[3];
} __packed;
struct sta_rec_ba_uni {
@@ -440,6 +443,17 @@ struct sta_rec_mld {
} __packed link[2];
} __packed;
+struct sta_rec_eht_mld {
+ __le16 tag;
+ __le16 len;
+ u8 nsep;
+ u8 mld_type;
+ u8 __rsv1[1];
+ u8 str_cap[3];
+ u8 eml_cap[3];
+ u8 __rsv2[3];
+} __packed;
+
struct bss_ifs_time_tlv {
__le16 tag;
__le16 len;
@@ -456,6 +470,21 @@ struct bss_ifs_time_tlv {
__le16 eifs_cck_time;
} __packed;
+struct bss_rlm_tlv {
+ __le16 tag;
+ __le16 len;
+ u8 control_channel;
+ u8 center_chan;
+ u8 center_chan2;
+ u8 bw;
+ u8 tx_streams;
+ u8 rx_streams;
+ u8 ht_op_info;
+ u8 sco;
+ u8 band;
+ u8 pad[3];
+} __packed;
+
#define MT7925_STA_UPDATE_MAX_SIZE (sizeof(struct sta_req_hdr) + \
sizeof(struct sta_rec_basic) + \
sizeof(struct sta_rec_bf) + \
@@ -474,7 +503,8 @@ struct bss_ifs_time_tlv {
sizeof(struct sta_rec_eht) + \
sizeof(struct sta_rec_hdr_trans) + \
sizeof(struct sta_rec_mld) + \
- sizeof(struct tlv))
+ sizeof(struct tlv) * 2 + \
+ sizeof(struct sta_rec_remove))
#define MT7925_BSS_UPDATE_MAX_SIZE (sizeof(struct bss_req_hdr) + \
sizeof(struct mt76_connac_bss_basic_tlv) + \
@@ -484,6 +514,7 @@ struct bss_ifs_time_tlv {
sizeof(struct bss_info_uni_he) + \
sizeof(struct bss_info_uni_bss_color) + \
sizeof(struct bss_ifs_time_tlv) + \
+ sizeof(struct bss_rlm_tlv) + \
sizeof(struct tlv))
#define MT_CONNAC3_SKU_POWER_LIMIT 449
@@ -538,6 +569,26 @@ struct mt7925_wow_pattern_tlv {
u8 rsv[7];
} __packed;
+struct roc_acquire_tlv {
+ __le16 tag;
+ __le16 len;
+ u8 bss_idx;
+ u8 tokenid;
+ u8 control_channel;
+ u8 sco;
+ u8 band;
+ u8 bw;
+ u8 center_chan;
+ u8 center_chan2;
+ u8 bw_from_ap;
+ u8 center_chan_from_ap;
+ u8 center_chan2_from_ap;
+ u8 reqtype;
+ __le32 maxinterval;
+ u8 dbdcband;
+ u8 rsv[3];
+} __packed;
+
static inline enum connac3_mcu_cipher_type
mt7925_mcu_get_cipher(int cipher)
{
@@ -578,18 +629,18 @@ int mt7925_mcu_sched_scan_enable(struct mt76_phy *phy,
bool enable);
int mt7925_mcu_add_bss_info(struct mt792x_phy *phy,
struct ieee80211_chanctx_conf *ctx,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta,
+ struct ieee80211_bss_conf *link_conf,
+ struct ieee80211_link_sta *link_sta,
int enable);
int mt7925_mcu_set_timing(struct mt792x_phy *phy,
- struct ieee80211_vif *vif);
+ struct ieee80211_bss_conf *link_conf);
int mt7925_mcu_set_deep_sleep(struct mt792x_dev *dev, bool enable);
int mt7925_mcu_set_channel_domain(struct mt76_phy *phy);
int mt7925_mcu_set_radio_en(struct mt792x_phy *phy, bool enable);
int mt7925_mcu_set_chctx(struct mt76_phy *phy, struct mt76_vif *mvif,
+ struct ieee80211_bss_conf *link_conf,
struct ieee80211_chanctx_conf *ctx);
int mt7925_mcu_set_rate_txpower(struct mt76_phy *phy);
int mt7925_mcu_update_arp_filter(struct mt76_dev *dev,
- struct mt76_vif *vif,
- struct ieee80211_bss_conf *info);
+ struct ieee80211_bss_conf *link_conf);
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h b/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h
index 8a4a71f6bcb6..669f3a079d04 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h
@@ -30,17 +30,22 @@
enum {
UNI_ROC_ACQUIRE,
UNI_ROC_ABORT,
+ UNI_ROC_SUB_LINK = 3,
UNI_ROC_NUM
};
enum mt7925_roc_req {
MT7925_ROC_REQ_JOIN,
MT7925_ROC_REQ_ROC,
+ MT7925_ROC_REQ_SUB_LINK,
+ MT7925_ROC_REQ_MLSR_AG = 10,
+ MT7925_ROC_REQ_MLSR_AA,
MT7925_ROC_REQ_NUM
};
enum {
UNI_EVENT_ROC_GRANT = 0,
+ UNI_EVENT_ROC_GRANT_SUB_LINK = 4,
UNI_EVENT_ROC_TAG_NUM
};
@@ -192,13 +197,15 @@ int __mt7925_start(struct mt792x_phy *phy);
int mt7925_register_device(struct mt792x_dev *dev);
void mt7925_unregister_device(struct mt792x_dev *dev);
int mt7925_run_firmware(struct mt792x_dev *dev);
-int mt7925_mcu_set_bss_pm(struct mt792x_dev *dev, struct ieee80211_vif *vif,
+int mt7925_mcu_set_bss_pm(struct mt792x_dev *dev,
+ struct ieee80211_bss_conf *link_conf,
bool enable);
-int mt7925_mcu_sta_update(struct mt792x_dev *dev, struct ieee80211_sta *sta,
+int mt7925_mcu_sta_update(struct mt792x_dev *dev,
+ struct ieee80211_link_sta *link_sta,
struct ieee80211_vif *vif, bool enable,
enum mt76_sta_info_state state);
int mt7925_mcu_set_chan_info(struct mt792x_phy *phy, u16 tag);
-int mt7925_mcu_set_tx(struct mt792x_dev *dev, struct ieee80211_vif *vif);
+int mt7925_mcu_set_tx(struct mt792x_dev *dev, struct ieee80211_bss_conf *bss_conf);
int mt7925_mcu_set_eeprom(struct mt792x_dev *dev);
int mt7925_mcu_get_rx_rate(struct mt792x_phy *phy, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct rate_info *rate);
@@ -228,6 +235,7 @@ void mt7925_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb, u32 *info);
void mt7925_stats_work(struct work_struct *work);
void mt7925_set_stream_he_eht_caps(struct mt792x_phy *phy);
+int mt7925_init_mlo_caps(struct mt792x_phy *phy);
int mt7925_init_debugfs(struct mt792x_dev *dev);
int mt7925_mcu_set_beacon_filter(struct mt792x_dev *dev,
@@ -241,7 +249,8 @@ int mt7925_mcu_uni_rx_ba(struct mt792x_dev *dev,
bool enable);
void mt7925_scan_work(struct work_struct *work);
void mt7925_roc_work(struct work_struct *work);
-int mt7925_mcu_uni_bss_ps(struct mt792x_dev *dev, struct ieee80211_vif *vif);
+int mt7925_mcu_uni_bss_ps(struct mt792x_dev *dev,
+ struct ieee80211_bss_conf *link_conf);
void mt7925_coredump_work(struct work_struct *work);
int mt7925_get_txpwr_info(struct mt792x_dev *dev, u8 band_idx,
struct mt7925_txpwr *txpwr);
@@ -252,7 +261,7 @@ void mt7925_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi,
struct ieee80211_key_conf *key, int pid,
enum mt76_txq_id qid, u32 changed);
void mt7925_txwi_free(struct mt792x_dev *dev, struct mt76_txwi_cache *t,
- struct ieee80211_sta *sta, bool clear_status,
+ struct ieee80211_sta *sta, struct mt76_wcid *wcid,
struct list_head *free_list);
int mt7925_mcu_parse_response(struct mt76_dev *mdev, int cmd,
struct sk_buff *skb, int seq);
@@ -291,20 +300,24 @@ int mt7925_set_tx_sar_pwr(struct ieee80211_hw *hw,
int mt7925_mcu_regval(struct mt792x_dev *dev, u32 regidx, u32 *val, bool set);
int mt7925_mcu_set_clc(struct mt792x_dev *dev, u8 *alpha2,
enum environment_cap env_cap);
-int mt7925_mcu_set_roc(struct mt792x_phy *phy, struct mt792x_vif *vif,
+int mt7925_mcu_set_mlo_roc(struct mt792x_bss_conf *mconf, u16 sel_links,
+ int duration, u8 token_id);
+int mt7925_mcu_set_roc(struct mt792x_phy *phy, struct mt792x_bss_conf *mconf,
struct ieee80211_channel *chan, int duration,
enum mt7925_roc_req type, u8 token_id);
-int mt7925_mcu_abort_roc(struct mt792x_phy *phy, struct mt792x_vif *vif,
+int mt7925_mcu_abort_roc(struct mt792x_phy *phy, struct mt792x_bss_conf *mconf,
u8 token_id);
int mt7925_mcu_fill_message(struct mt76_dev *mdev, struct sk_buff *skb,
int cmd, int *wait_seq);
int mt7925_mcu_add_key(struct mt76_dev *dev, struct ieee80211_vif *vif,
struct mt76_connac_sta_key_conf *sta_key_conf,
struct ieee80211_key_conf *key, int mcu_cmd,
- struct mt76_wcid *wcid, enum set_key_cmd cmd);
+ struct mt76_wcid *wcid, enum set_key_cmd cmd,
+ struct mt792x_sta *msta);
int mt7925_mcu_set_rts_thresh(struct mt792x_phy *phy, u32 val);
int mt7925_mcu_wtbl_update_hdr_trans(struct mt792x_dev *dev,
struct ieee80211_vif *vif,
- struct ieee80211_sta *sta);
+ struct ieee80211_sta *sta,
+ int link_id);
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/pci.c b/drivers/net/wireless/mediatek/mt76/mt7925/pci.c
index 07b74d492ce1..6e4f4e78c350 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/pci.c
@@ -254,7 +254,7 @@ static int mt7925_dma_init(struct mt792x_dev *dev)
if (ret < 0)
return ret;
- netif_napi_add_tx(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi,
+ netif_napi_add_tx(dev->mt76.tx_napi_dev, &dev->mt76.tx_napi,
mt792x_poll_tx);
napi_enable(&dev->mt76.tx_napi);
@@ -373,6 +373,9 @@ static int mt7925_pci_probe(struct pci_dev *pdev,
bus_ops->rmw = mt7925_rmw;
dev->mt76.bus = bus_ops;
+ if (!mt7925_disable_aspm && mt76_pci_aspm_supported(pdev))
+ dev->aspm_supported = true;
+
ret = __mt792x_mcu_fw_pmctrl(dev);
if (ret)
goto err_free_dev;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/pci_mac.c b/drivers/net/wireless/mediatek/mt76/mt7925/pci_mac.c
index 9fca887977d2..faedbf766d1a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/pci_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/pci_mac.c
@@ -34,9 +34,9 @@ int mt7925e_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
if (sta) {
struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv;
- if (time_after(jiffies, msta->last_txs + HZ / 4)) {
+ if (time_after(jiffies, msta->deflink.last_txs + HZ / 4)) {
info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
- msta->last_txs = jiffies;
+ msta->deflink.last_txs = jiffies;
}
}
@@ -60,7 +60,7 @@ void mt7925_tx_token_put(struct mt792x_dev *dev)
spin_lock_bh(&dev->mt76.token_lock);
idr_for_each_entry(&dev->mt76.token, txwi, id) {
- mt7925_txwi_free(dev, txwi, NULL, false, NULL);
+ mt7925_txwi_free(dev, txwi, NULL, NULL, NULL);
dev->mt76.token_count--;
}
spin_unlock_bh(&dev->mt76.token_lock);
diff --git a/drivers/net/wireless/mediatek/mt76/mt792x.h b/drivers/net/wireless/mediatek/mt76/mt792x.h
index 20578497a405..7fa74d59cc48 100644
--- a/drivers/net/wireless/mediatek/mt76/mt792x.h
+++ b/drivers/net/wireless/mediatek/mt76/mt792x.h
@@ -27,6 +27,7 @@
#define MT792x_CHIP_CAP_CLC_EVT_EN BIT(0)
#define MT792x_CHIP_CAP_RSSI_NOTIFY_EVT_EN BIT(1)
+#define MT792x_CHIP_CAP_MLO_EVT_EN BIT(2)
/* NOTE: used to map mt76_rates. idx may change if firmware expands table */
#define MT792x_BASIC_RATES_TBL 11
@@ -81,11 +82,9 @@ enum mt792x_reg_power_type {
DECLARE_EWMA(avg_signal, 10, 8)
-struct mt792x_sta {
+struct mt792x_link_sta {
struct mt76_wcid wcid; /* must be first */
- struct mt792x_vif *vif;
-
u32 airtime_ac[8];
int ack_signal;
@@ -94,21 +93,46 @@ struct mt792x_sta {
unsigned long last_txs;
struct mt76_connac_sta_key_conf bip;
+
+ struct mt792x_sta *sta;
+
+ struct ieee80211_link_sta *pri_link;
+};
+
+struct mt792x_sta {
+ struct mt792x_link_sta deflink; /* must be first */
+ struct mt792x_link_sta __rcu *link[IEEE80211_MLD_MAX_NUM_LINKS];
+
+ struct mt792x_vif *vif;
+
+ u16 valid_links;
+ u8 deflink_id;
};
DECLARE_EWMA(rssi, 10, 8);
-struct mt792x_vif {
+struct mt792x_chanctx {
+ struct mt792x_bss_conf *bss_conf;
+};
+
+struct mt792x_bss_conf {
struct mt76_vif mt76; /* must be first */
+ struct mt792x_vif *vif;
+ struct ewma_rssi rssi;
+ struct ieee80211_tx_queue_params queue_params[IEEE80211_NUM_ACS];
+ unsigned int link_id;
+};
+
+struct mt792x_vif {
+ struct mt792x_bss_conf bss_conf; /* must be first */
+ struct mt792x_bss_conf __rcu *link_conf[IEEE80211_MLD_MAX_NUM_LINKS];
struct mt792x_sta sta;
struct mt792x_sta *wep_sta;
struct mt792x_phy *phy;
-
- struct ewma_rssi rssi;
-
- struct ieee80211_tx_queue_params queue_params[IEEE80211_NUM_ACS];
+ u16 valid_links;
+ u8 deflink_id;
};
struct mt792x_phy {
@@ -140,6 +164,7 @@ struct mt792x_phy {
#endif
void *clc[MT792x_CLC_MAX_NUM];
u64 chip_cap;
+ u16 eml_cap;
struct work_struct roc_work;
struct timer_list roc_timer;
@@ -190,6 +215,7 @@ struct mt792x_dev {
bool fw_assert:1;
bool has_eht:1;
bool regd_in_progress:1;
+ bool aspm_supported:1;
wait_queue_head_t wait;
struct work_struct init_work;
@@ -211,6 +237,66 @@ struct mt792x_dev {
u32 backup_l2;
};
+static inline struct mt792x_bss_conf *
+mt792x_vif_to_link(struct mt792x_vif *mvif, u8 link_id)
+{
+ struct ieee80211_vif *vif;
+
+ vif = container_of((void *)mvif, struct ieee80211_vif, drv_priv);
+
+ if (!ieee80211_vif_is_mld(vif) ||
+ link_id >= IEEE80211_LINK_UNSPECIFIED)
+ return &mvif->bss_conf;
+
+ return rcu_dereference_protected(mvif->link_conf[link_id],
+ lockdep_is_held(&mvif->phy->dev->mt76.mutex));
+}
+
+static inline struct mt792x_link_sta *
+mt792x_sta_to_link(struct mt792x_sta *msta, u8 link_id)
+{
+ struct ieee80211_vif *vif;
+
+ vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv);
+
+ if (!ieee80211_vif_is_mld(vif) ||
+ link_id >= IEEE80211_LINK_UNSPECIFIED)
+ return &msta->deflink;
+
+ return rcu_dereference_protected(msta->link[link_id],
+ lockdep_is_held(&msta->vif->phy->dev->mt76.mutex));
+}
+
+static inline struct mt792x_bss_conf *
+mt792x_link_conf_to_mconf(struct ieee80211_bss_conf *link_conf)
+{
+ struct ieee80211_vif *vif = link_conf->vif;
+ struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+
+ return mt792x_vif_to_link(mvif, link_conf->link_id);
+}
+
+static inline struct ieee80211_bss_conf *
+mt792x_vif_to_bss_conf(struct ieee80211_vif *vif, unsigned int link_id)
+{
+ if (!ieee80211_vif_is_mld(vif) ||
+ link_id >= IEEE80211_LINK_UNSPECIFIED)
+ return &vif->bss_conf;
+
+ return link_conf_dereference_protected(vif, link_id);
+}
+
+static inline struct ieee80211_link_sta *
+mt792x_sta_to_link_sta(struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ unsigned int link_id)
+{
+ if (!ieee80211_vif_is_mld(vif) ||
+ link_id >= IEEE80211_LINK_UNSPECIFIED)
+ return &sta->deflink;
+
+ return link_sta_dereference_protected(sta, link_id);
+}
+
static inline struct mt792x_dev *
mt792x_hw_dev(struct ieee80211_hw *hw)
{
@@ -251,7 +337,7 @@ static inline bool mt792x_dma_need_reinit(struct mt792x_dev *dev)
#define mt792x_mutex_release(dev) \
mt76_connac_mutex_release(&(dev)->mt76, &(dev)->pm)
-void mt792x_stop(struct ieee80211_hw *hw);
+void mt792x_stop(struct ieee80211_hw *hw, bool suspend);
void mt792x_pm_wake_work(struct work_struct *work);
void mt792x_pm_power_save_work(struct work_struct *work);
void mt792x_reset(struct mt76_dev *mdev);
@@ -325,6 +411,9 @@ mt792x_get_mac80211_ops(struct device *dev,
int mt792x_init_wcid(struct mt792x_dev *dev);
int mt792x_mcu_drv_pmctrl(struct mt792x_dev *dev);
int mt792x_mcu_fw_pmctrl(struct mt792x_dev *dev);
+void mt792x_mac_link_bss_remove(struct mt792x_dev *dev,
+ struct mt792x_bss_conf *mconf,
+ struct mt792x_link_sta *mlink);
static inline char *mt792x_ram_name(struct mt792x_dev *dev)
{
@@ -368,7 +457,7 @@ void mt792xu_wr(struct mt76_dev *dev, u32 addr, u32 val);
u32 mt792xu_rmw(struct mt76_dev *dev, u32 addr, u32 mask, u32 val);
void mt792xu_copy(struct mt76_dev *dev, u32 offset, const void *data, int len);
void mt792xu_disconnect(struct usb_interface *usb_intf);
-void mt792xu_stop(struct ieee80211_hw *hw);
+void mt792xu_stop(struct ieee80211_hw *hw, bool suspend);
static inline void
mt792x_skb_add_usb_sdio_hdr(struct mt792x_dev *dev, struct sk_buff *skb,
diff --git a/drivers/net/wireless/mediatek/mt76/mt792x_core.c b/drivers/net/wireless/mediatek/mt76/mt792x_core.c
index a405af8d9052..78fe37c2e07b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt792x_core.c
+++ b/drivers/net/wireless/mediatek/mt76/mt792x_core.c
@@ -59,20 +59,42 @@ void mt792x_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_vif *vif = info->control.vif;
struct mt76_wcid *wcid = &dev->mt76.global_wcid;
+ u8 link_id;
int qid;
if (control->sta) {
+ struct mt792x_link_sta *mlink;
struct mt792x_sta *sta;
-
+ link_id = u32_get_bits(info->control.flags,
+ IEEE80211_TX_CTRL_MLO_LINK);
sta = (struct mt792x_sta *)control->sta->drv_priv;
- wcid = &sta->wcid;
+ mlink = mt792x_sta_to_link(sta, link_id);
+ wcid = &mlink->wcid;
}
if (vif && !control->sta) {
struct mt792x_vif *mvif;
mvif = (struct mt792x_vif *)vif->drv_priv;
- wcid = &mvif->sta.wcid;
+ wcid = &mvif->sta.deflink.wcid;
+ }
+
+ if (vif && control->sta && ieee80211_vif_is_mld(vif)) {
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_link_sta *link_sta;
+ struct ieee80211_bss_conf *conf;
+
+ link_id = wcid->link_id;
+ rcu_read_lock();
+ conf = rcu_dereference(vif->link_conf[link_id]);
+ memcpy(hdr->addr2, conf->addr, ETH_ALEN);
+
+ link_sta = rcu_dereference(control->sta->link[link_id]);
+ memcpy(hdr->addr1, link_sta->addr, ETH_ALEN);
+
+ if (vif->type == NL80211_IFTYPE_STATION)
+ memcpy(hdr->addr3, conf->bssid, ETH_ALEN);
+ rcu_read_unlock();
}
if (mt76_connac_pm_ref(mphy, &dev->pm)) {
@@ -91,7 +113,7 @@ void mt792x_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
}
EXPORT_SYMBOL_GPL(mt792x_tx);
-void mt792x_stop(struct ieee80211_hw *hw)
+void mt792x_stop(struct ieee80211_hw *hw, bool suspend)
{
struct mt792x_dev *dev = mt792x_hw_dev(hw);
struct mt792x_phy *phy = mt792x_hw_phy(hw);
@@ -113,31 +135,47 @@ void mt792x_stop(struct ieee80211_hw *hw)
}
EXPORT_SYMBOL_GPL(mt792x_stop);
+void mt792x_mac_link_bss_remove(struct mt792x_dev *dev,
+ struct mt792x_bss_conf *mconf,
+ struct mt792x_link_sta *mlink)
+{
+ struct ieee80211_vif *vif = container_of((void *)mconf->vif,
+ struct ieee80211_vif, drv_priv);
+ struct ieee80211_bss_conf *link_conf;
+ int idx = mlink->wcid.idx;
+
+ link_conf = mt792x_vif_to_bss_conf(vif, mconf->link_id);
+
+ mt76_connac_free_pending_tx_skbs(&dev->pm, &mlink->wcid);
+ mt76_connac_mcu_uni_add_dev(&dev->mphy, link_conf, &mlink->wcid, false);
+
+ rcu_assign_pointer(dev->mt76.wcid[idx], NULL);
+
+ dev->mt76.vif_mask &= ~BIT_ULL(mconf->mt76.idx);
+ mconf->vif->phy->omac_mask &= ~BIT_ULL(mconf->mt76.omac_idx);
+
+ spin_lock_bh(&dev->mt76.sta_poll_lock);
+ if (!list_empty(&mlink->wcid.poll_list))
+ list_del_init(&mlink->wcid.poll_list);
+ spin_unlock_bh(&dev->mt76.sta_poll_lock);
+
+ mt76_wcid_cleanup(&dev->mt76, &mlink->wcid);
+}
+EXPORT_SYMBOL_GPL(mt792x_mac_link_bss_remove);
+
void mt792x_remove_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
- struct mt792x_sta *msta = &mvif->sta;
struct mt792x_dev *dev = mt792x_hw_dev(hw);
- struct mt792x_phy *phy = mt792x_hw_phy(hw);
- int idx = msta->wcid.idx;
+ struct mt792x_bss_conf *mconf;
mt792x_mutex_acquire(dev);
- mt76_connac_free_pending_tx_skbs(&dev->pm, &msta->wcid);
- mt76_connac_mcu_uni_add_dev(&dev->mphy, vif, &mvif->sta.wcid, false);
- rcu_assign_pointer(dev->mt76.wcid[idx], NULL);
+ mconf = mt792x_link_conf_to_mconf(&vif->bss_conf);
+ mt792x_mac_link_bss_remove(dev, mconf, &mvif->sta.deflink);
- dev->mt76.vif_mask &= ~BIT_ULL(mvif->mt76.idx);
- phy->omac_mask &= ~BIT_ULL(mvif->mt76.omac_idx);
mt792x_mutex_release(dev);
-
- spin_lock_bh(&dev->mt76.sta_poll_lock);
- if (!list_empty(&msta->wcid.poll_list))
- list_del_init(&msta->wcid.poll_list);
- spin_unlock_bh(&dev->mt76.sta_poll_lock);
-
- mt76_wcid_cleanup(&dev->mt76, &msta->wcid);
}
EXPORT_SYMBOL_GPL(mt792x_remove_interface);
@@ -149,7 +187,7 @@ int mt792x_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
/* no need to update right away, we'll get BSS_CHANGED_QOS */
queue = mt76_connac_lmac_mapping(queue);
- mvif->queue_params[queue] = *params;
+ mvif->bss_conf.queue_params[queue] = *params;
return 0;
}
@@ -178,7 +216,7 @@ u64 mt792x_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
struct mt792x_dev *dev = mt792x_hw_dev(hw);
- u8 omac_idx = mvif->mt76.omac_idx;
+ u8 omac_idx = mvif->bss_conf.mt76.omac_idx;
union {
u64 t64;
u32 t32[2];
@@ -204,7 +242,7 @@ void mt792x_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
{
struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
struct mt792x_dev *dev = mt792x_hw_dev(hw);
- u8 omac_idx = mvif->mt76.omac_idx;
+ u8 omac_idx = mvif->bss_conf.mt76.omac_idx;
union {
u64 t64;
u32 t32[2];
@@ -261,11 +299,13 @@ int mt792x_assign_vif_chanctx(struct ieee80211_hw *hw,
struct ieee80211_bss_conf *link_conf,
struct ieee80211_chanctx_conf *ctx)
{
+ struct mt792x_chanctx *mctx = (struct mt792x_chanctx *)ctx->drv_priv;
struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
struct mt792x_dev *dev = mt792x_hw_dev(hw);
mutex_lock(&dev->mt76.mutex);
- mvif->mt76.ctx = ctx;
+ mvif->bss_conf.mt76.ctx = ctx;
+ mctx->bss_conf = &mvif->bss_conf;
mutex_unlock(&dev->mt76.mutex);
return 0;
@@ -277,11 +317,13 @@ void mt792x_unassign_vif_chanctx(struct ieee80211_hw *hw,
struct ieee80211_bss_conf *link_conf,
struct ieee80211_chanctx_conf *ctx)
{
+ struct mt792x_chanctx *mctx = (struct mt792x_chanctx *)ctx->drv_priv;
struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
struct mt792x_dev *dev = mt792x_hw_dev(hw);
mutex_lock(&dev->mt76.mutex);
- mvif->mt76.ctx = NULL;
+ mctx->bss_conf = NULL;
+ mvif->bss_conf.mt76.ctx = NULL;
mutex_unlock(&dev->mt76.mutex);
}
EXPORT_SYMBOL_GPL(mt792x_unassign_vif_chanctx);
@@ -405,10 +447,10 @@ mt792x_ethtool_worker(void *wi_data, struct ieee80211_sta *sta)
struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv;
struct mt76_ethtool_worker_info *wi = wi_data;
- if (msta->vif->mt76.idx != wi->idx)
+ if (msta->vif->bss_conf.mt76.idx != wi->idx)
return;
- mt76_ethtool_worker(wi, &msta->wcid.stats, true);
+ mt76_ethtool_worker(wi, &msta->deflink.wcid.stats, true);
}
void mt792x_get_et_stats(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
@@ -421,7 +463,7 @@ void mt792x_get_et_stats(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct mt76_mib_stats *mib = &phy->mib;
struct mt76_ethtool_worker_info wi = {
.data = data,
- .idx = mvif->mt76.idx,
+ .idx = mvif->bss_conf.mt76.idx,
};
int i, ei = 0;
@@ -487,7 +529,7 @@ void mt792x_sta_statistics(struct ieee80211_hw *hw,
struct station_info *sinfo)
{
struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv;
- struct rate_info *txrate = &msta->wcid.rate;
+ struct rate_info *txrate = &msta->deflink.wcid.rate;
if (!txrate->legacy && !txrate->flags)
return;
@@ -502,19 +544,19 @@ void mt792x_sta_statistics(struct ieee80211_hw *hw,
sinfo->txrate.he_dcm = txrate->he_dcm;
sinfo->txrate.he_ru_alloc = txrate->he_ru_alloc;
}
- sinfo->tx_failed = msta->wcid.stats.tx_failed;
+ sinfo->tx_failed = msta->deflink.wcid.stats.tx_failed;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_FAILED);
- sinfo->tx_retries = msta->wcid.stats.tx_retries;
+ sinfo->tx_retries = msta->deflink.wcid.stats.tx_retries;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_RETRIES);
sinfo->txrate.flags = txrate->flags;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
- sinfo->ack_signal = (s8)msta->ack_signal;
+ sinfo->ack_signal = (s8)msta->deflink.ack_signal;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL);
- sinfo->avg_ack_signal = -(s8)ewma_avg_signal_read(&msta->avg_ack_signal);
+ sinfo->avg_ack_signal = -(s8)ewma_avg_signal_read(&msta->deflink.avg_ack_signal);
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL_AVG);
}
EXPORT_SYMBOL_GPL(mt792x_sta_statistics);
@@ -556,6 +598,7 @@ int mt792x_init_wiphy(struct ieee80211_hw *hw)
hw->sta_data_size = sizeof(struct mt792x_sta);
hw->vif_data_size = sizeof(struct mt792x_vif);
+ hw->chanctx_data_size = sizeof(struct mt792x_chanctx);
if (dev->fw_features & MT792x_FW_CAP_CNM) {
wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
@@ -766,6 +809,10 @@ int __mt792xe_mcu_drv_pmctrl(struct mt792x_dev *dev)
for (i = 0; i < MT792x_DRV_OWN_RETRY_COUNT; i++) {
mt76_wr(dev, MT_CONN_ON_LPCTL, PCIE_LPCR_HOST_CLR_OWN);
+
+ if (dev->aspm_supported)
+ usleep_range(2000, 3000);
+
if (mt76_poll_msec_tick(dev, MT_CONN_ON_LPCTL,
PCIE_LPCR_HOST_OWN_SYNC, 0, 50, 1))
break;
diff --git a/drivers/net/wireless/mediatek/mt76/mt792x_dma.c b/drivers/net/wireless/mediatek/mt76/mt792x_dma.c
index 5cc2d59b774a..6f9db782338e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt792x_dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt792x_dma.c
@@ -340,7 +340,7 @@ int mt792x_poll_rx(struct napi_struct *napi, int budget)
struct mt792x_dev *dev;
int done;
- dev = container_of(napi->dev, struct mt792x_dev, mt76.napi_dev);
+ dev = mt76_priv(napi->dev);
if (!mt76_connac_pm_ref(&dev->mphy, &dev->pm)) {
napi_complete(napi);
diff --git a/drivers/net/wireless/mediatek/mt76/mt792x_mac.c b/drivers/net/wireless/mediatek/mt76/mt792x_mac.c
index eb29434abee1..106273935b26 100644
--- a/drivers/net/wireless/mediatek/mt76/mt792x_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt792x_mac.c
@@ -138,6 +138,7 @@ EXPORT_SYMBOL_GPL(mt792x_mac_update_mib_stats);
struct mt76_wcid *mt792x_rx_get_wcid(struct mt792x_dev *dev, u16 idx,
bool unicast)
{
+ struct mt792x_link_sta *link;
struct mt792x_sta *sta;
struct mt76_wcid *wcid;
@@ -151,11 +152,12 @@ struct mt76_wcid *mt792x_rx_get_wcid(struct mt792x_dev *dev, u16 idx,
if (!wcid->sta)
return NULL;
- sta = container_of(wcid, struct mt792x_sta, wcid);
+ link = container_of(wcid, struct mt792x_link_sta, wcid);
+ sta = container_of(link, struct mt792x_sta, deflink);
if (!sta->vif)
return NULL;
- return &sta->vif->sta.wcid;
+ return &sta->vif->sta.deflink.wcid;
}
EXPORT_SYMBOL_GPL(mt792x_rx_get_wcid);
@@ -173,7 +175,7 @@ mt792x_mac_rssi_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
if (!ether_addr_equal(vif->addr, hdr->addr1))
return;
- ewma_rssi_add(&mvif->rssi, -status->signal);
+ ewma_rssi_add(&mvif->bss_conf.rssi, -status->signal);
}
void mt792x_mac_assoc_rssi(struct mt792x_dev *dev, struct sk_buff *skb)
diff --git a/drivers/net/wireless/mediatek/mt76/mt792x_usb.c b/drivers/net/wireless/mediatek/mt76/mt792x_usb.c
index b49668a4b784..76272a03b22e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt792x_usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt792x_usb.c
@@ -285,12 +285,12 @@ int mt792xu_init_reset(struct mt792x_dev *dev)
}
EXPORT_SYMBOL_GPL(mt792xu_init_reset);
-void mt792xu_stop(struct ieee80211_hw *hw)
+void mt792xu_stop(struct ieee80211_hw *hw, bool suspend)
{
struct mt792x_dev *dev = mt792x_hw_dev(hw);
mt76u_stop_tx(&dev->mt76);
- mt792x_stop(hw);
+ mt792x_stop(hw, false);
}
EXPORT_SYMBOL_GPL(mt792xu_stop);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/dma.c b/drivers/net/wireless/mediatek/mt76/mt7996/dma.c
index 73e633d0d700..69a7d9b2e38b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/dma.c
@@ -641,7 +641,7 @@ int mt7996_dma_init(struct mt7996_dev *dev)
if (ret < 0)
return ret;
- netif_napi_add_tx(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi,
+ netif_napi_add_tx(dev->mt76.tx_napi_dev, &dev->mt76.tx_napi,
mt7996_poll_tx);
napi_enable(&dev->mt76.tx_napi);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/main.c b/drivers/net/wireless/mediatek/mt76/mt7996/main.c
index 7c97140d8255..bce082038219 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/main.c
@@ -93,7 +93,7 @@ static int mt7996_start(struct ieee80211_hw *hw)
return ret;
}
-static void mt7996_stop(struct ieee80211_hw *hw)
+static void mt7996_stop(struct ieee80211_hw *hw, bool suspend)
{
struct mt7996_dev *dev = mt7996_hw_dev(hw);
struct mt7996_phy *phy = mt7996_hw_phy(hw);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
index 2c8578677800..2e4fa9f48dfb 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
@@ -2002,7 +2002,7 @@ mt7996_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7996_dev *dev,
ra->valid = true;
ra->auto_rate = true;
- ra->phy_mode = mt76_connac_get_phy_mode(mphy, vif, band, sta);
+ ra->phy_mode = mt76_connac_get_phy_mode(mphy, vif, band, &sta->deflink);
ra->channel = chandef->chan->hw_value;
ra->bw = (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_320) ?
CMD_CBW_320MHZ : sta->deflink.bandwidth;
@@ -2157,11 +2157,13 @@ int mt7996_mcu_add_sta(struct mt7996_dev *dev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, bool enable, bool newly)
{
struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
+ struct ieee80211_link_sta *link_sta;
struct mt7996_sta *msta;
struct sk_buff *skb;
int ret;
msta = sta ? (struct mt7996_sta *)sta->drv_priv : &mvif->sta;
+ link_sta = sta ? &sta->deflink : NULL;
skb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76,
&msta->wcid,
@@ -2170,7 +2172,8 @@ int mt7996_mcu_add_sta(struct mt7996_dev *dev, struct ieee80211_vif *vif,
return PTR_ERR(skb);
/* starec basic */
- mt76_connac_mcu_sta_basic_tlv(&dev->mt76, skb, vif, sta, enable, newly);
+ mt76_connac_mcu_sta_basic_tlv(&dev->mt76, skb, vif, link_sta,
+ enable, newly);
if (!enable)
goto out;
diff --git a/drivers/net/wireless/mediatek/mt76/pci.c b/drivers/net/wireless/mediatek/mt76/pci.c
index 4c1c159fbb62..b5031ca7f73f 100644
--- a/drivers/net/wireless/mediatek/mt76/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/pci.c
@@ -45,3 +45,26 @@ void mt76_pci_disable_aspm(struct pci_dev *pdev)
aspm_conf);
}
EXPORT_SYMBOL_GPL(mt76_pci_disable_aspm);
+
+bool mt76_pci_aspm_supported(struct pci_dev *pdev)
+{
+ struct pci_dev *parent = pdev->bus->self;
+ u16 aspm_conf, parent_aspm_conf = 0;
+ bool result = true;
+
+ pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &aspm_conf);
+ aspm_conf &= PCI_EXP_LNKCTL_ASPMC;
+ if (parent) {
+ pcie_capability_read_word(parent, PCI_EXP_LNKCTL,
+ &parent_aspm_conf);
+ parent_aspm_conf &= PCI_EXP_LNKCTL_ASPMC;
+ }
+
+ if (!aspm_conf && (!parent || !parent_aspm_conf)) {
+ /* aspm already disabled */
+ result = false;
+ }
+
+ return result;
+}
+EXPORT_SYMBOL_GPL(mt76_pci_aspm_supported);
diff --git a/drivers/net/wireless/mediatek/mt7601u/main.c b/drivers/net/wireless/mediatek/mt7601u/main.c
index a7330576486b..7570c6ceecea 100644
--- a/drivers/net/wireless/mediatek/mt7601u/main.c
+++ b/drivers/net/wireless/mediatek/mt7601u/main.c
@@ -28,7 +28,7 @@ out:
return ret;
}
-static void mt7601u_stop(struct ieee80211_hw *hw)
+static void mt7601u_stop(struct ieee80211_hw *hw, bool suspend)
{
struct mt7601u_dev *dev = hw->priv;
diff --git a/drivers/net/wireless/microchip/wilc1000/cfg80211.c b/drivers/net/wireless/microchip/wilc1000/cfg80211.c
index 7d9fb9f2d527..eb37b228d54e 100644
--- a/drivers/net/wireless/microchip/wilc1000/cfg80211.c
+++ b/drivers/net/wireless/microchip/wilc1000/cfg80211.c
@@ -237,11 +237,12 @@ static int set_channel(struct wiphy *wiphy,
struct wilc_vif *vif;
u32 channelnum;
int result;
+ int srcu_idx;
- rcu_read_lock();
+ srcu_idx = srcu_read_lock(&wl->srcu);
vif = wilc_get_wl_to_vif(wl);
if (IS_ERR(vif)) {
- rcu_read_unlock();
+ srcu_read_unlock(&wl->srcu, srcu_idx);
return PTR_ERR(vif);
}
@@ -252,7 +253,7 @@ static int set_channel(struct wiphy *wiphy,
if (result)
netdev_err(vif->ndev, "Error in setting channel\n");
- rcu_read_unlock();
+ srcu_read_unlock(&wl->srcu, srcu_idx);
return result;
}
@@ -805,8 +806,9 @@ static int set_wiphy_params(struct wiphy *wiphy, u32 changed)
struct wilc *wl = wiphy_priv(wiphy);
struct wilc_vif *vif;
struct wilc_priv *priv;
+ int srcu_idx;
- rcu_read_lock();
+ srcu_idx = srcu_read_lock(&wl->srcu);
vif = wilc_get_wl_to_vif(wl);
if (IS_ERR(vif))
goto out;
@@ -861,7 +863,7 @@ static int set_wiphy_params(struct wiphy *wiphy, u32 changed)
netdev_err(priv->dev, "Error in setting WIPHY PARAMS\n");
out:
- rcu_read_unlock();
+ srcu_read_unlock(&wl->srcu, srcu_idx);
return ret;
}
@@ -1537,19 +1539,20 @@ static struct wireless_dev *add_virtual_intf(struct wiphy *wiphy,
if (type == NL80211_IFTYPE_MONITOR) {
struct net_device *ndev;
+ int srcu_idx;
- rcu_read_lock();
+ srcu_idx = srcu_read_lock(&wl->srcu);
vif = wilc_get_vif_from_type(wl, WILC_AP_MODE);
if (!vif) {
vif = wilc_get_vif_from_type(wl, WILC_GO_MODE);
if (!vif) {
- rcu_read_unlock();
+ srcu_read_unlock(&wl->srcu, srcu_idx);
goto validate_interface;
}
}
if (vif->monitor_flag) {
- rcu_read_unlock();
+ srcu_read_unlock(&wl->srcu, srcu_idx);
goto validate_interface;
}
@@ -1557,12 +1560,12 @@ static struct wireless_dev *add_virtual_intf(struct wiphy *wiphy,
if (ndev) {
vif->monitor_flag = 1;
} else {
- rcu_read_unlock();
+ srcu_read_unlock(&wl->srcu, srcu_idx);
return ERR_PTR(-EINVAL);
}
wdev = &vif->priv.wdev;
- rcu_read_unlock();
+ srcu_read_unlock(&wl->srcu, srcu_idx);
return wdev;
}
@@ -1610,24 +1613,7 @@ static int del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev)
list_del_rcu(&vif->list);
wl->vif_num--;
mutex_unlock(&wl->vif_mutex);
- synchronize_rcu();
- return 0;
-}
-
-static int wilc_suspend(struct wiphy *wiphy, struct cfg80211_wowlan *wow)
-{
- struct wilc *wl = wiphy_priv(wiphy);
-
- if (!wow && wilc_wlan_get_num_conn_ifcs(wl))
- wl->suspend_event = true;
- else
- wl->suspend_event = false;
-
- return 0;
-}
-
-static int wilc_resume(struct wiphy *wiphy)
-{
+ synchronize_srcu(&wl->srcu);
return 0;
}
@@ -1635,23 +1621,25 @@ static void wilc_set_wakeup(struct wiphy *wiphy, bool enabled)
{
struct wilc *wl = wiphy_priv(wiphy);
struct wilc_vif *vif;
+ int srcu_idx;
- rcu_read_lock();
+ srcu_idx = srcu_read_lock(&wl->srcu);
vif = wilc_get_wl_to_vif(wl);
if (IS_ERR(vif)) {
- rcu_read_unlock();
+ srcu_read_unlock(&wl->srcu, srcu_idx);
return;
}
netdev_info(vif->ndev, "cfg set wake up = %d\n", enabled);
wilc_set_wowlan_trigger(vif, enabled);
- rcu_read_unlock();
+ srcu_read_unlock(&wl->srcu, srcu_idx);
}
static int set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
enum nl80211_tx_power_setting type, int mbm)
{
int ret;
+ int srcu_idx;
s32 tx_power = MBM_TO_DBM(mbm);
struct wilc *wl = wiphy_priv(wiphy);
struct wilc_vif *vif;
@@ -1659,10 +1647,10 @@ static int set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
if (!wl->initialized)
return -EIO;
- rcu_read_lock();
+ srcu_idx = srcu_read_lock(&wl->srcu);
vif = wilc_get_wl_to_vif(wl);
if (IS_ERR(vif)) {
- rcu_read_unlock();
+ srcu_read_unlock(&wl->srcu, srcu_idx);
return -EINVAL;
}
@@ -1674,7 +1662,7 @@ static int set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
ret = wilc_set_tx_power(vif, tx_power);
if (ret)
netdev_err(vif->ndev, "Failed to set tx power\n");
- rcu_read_unlock();
+ srcu_read_unlock(&wl->srcu, srcu_idx);
return ret;
}
@@ -1734,8 +1722,6 @@ static const struct cfg80211_ops wilc_cfg80211_ops = {
.set_power_mgmt = set_power_mgmt,
.set_cqm_rssi_config = set_cqm_rssi_config,
- .suspend = wilc_suspend,
- .resume = wilc_resume,
.set_wakeup = wilc_set_wakeup,
.set_tx_power = set_tx_power,
.get_tx_power = get_tx_power,
@@ -1757,6 +1743,7 @@ static void wlan_init_locks(struct wilc *wl)
init_completion(&wl->cfg_event);
init_completion(&wl->sync_event);
init_completion(&wl->txq_thread_started);
+ init_srcu_struct(&wl->srcu);
}
void wlan_deinit_locks(struct wilc *wilc)
@@ -1767,13 +1754,13 @@ void wlan_deinit_locks(struct wilc *wilc)
mutex_destroy(&wilc->txq_add_to_head_cs);
mutex_destroy(&wilc->vif_mutex);
mutex_destroy(&wilc->deinit_lock);
+ cleanup_srcu_struct(&wilc->srcu);
}
int wilc_cfg80211_init(struct wilc **wilc, struct device *dev, int io_type,
const struct wilc_hif_func *ops)
{
struct wilc *wl;
- struct wilc_vif *vif;
int ret, i;
wl = wilc_create_wiphy(dev);
@@ -1802,18 +1789,9 @@ int wilc_cfg80211_init(struct wilc **wilc, struct device *dev, int io_type,
ret = -ENOMEM;
goto free_cfg;
}
- vif = wilc_netdev_ifc_init(wl, "wlan%d", WILC_STATION_MODE,
- NL80211_IFTYPE_STATION, false);
- if (IS_ERR(vif)) {
- ret = PTR_ERR(vif);
- goto free_hq;
- }
return 0;
-free_hq:
- destroy_workqueue(wl->hif_workqueue);
-
free_cfg:
wilc_wlan_cfg_deinit(wl);
diff --git a/drivers/net/wireless/microchip/wilc1000/fw.h b/drivers/net/wireless/microchip/wilc1000/fw.h
index 5c5cac4aab02..7a930e89614c 100644
--- a/drivers/net/wireless/microchip/wilc1000/fw.h
+++ b/drivers/net/wireless/microchip/wilc1000/fw.h
@@ -13,6 +13,12 @@
#define WILC_MAX_RATES_SUPPORTED 12
#define WILC_MAX_NUM_PMKIDS 16
#define WILC_MAX_NUM_SCANNED_CH 14
+#define WILC_NVMEM_MAX_NUM_BANK 6
+#define WILC_NVMEM_BANK_BASE 0x30000000
+#define WILC_NVMEM_LOW_BANK_OFFSET 0x102c
+#define WILC_NVMEM_HIGH_BANK_OFFSET 0x1380
+#define WILC_NVMEM_IS_BANK_USED BIT(31)
+#define WILC_NVMEM_IS_BANK_INVALID BIT(30)
struct wilc_assoc_resp {
__le16 capab_info;
@@ -127,4 +133,11 @@ struct wilc_external_auth_param {
__le32 key_mgmt_suites;
__le16 status;
} __packed;
+
+static inline u32 get_bank_offset_from_bank_index(unsigned int i)
+{
+ return (((i) < 2) ? WILC_NVMEM_LOW_BANK_OFFSET + ((i) * 32) :
+ WILC_NVMEM_HIGH_BANK_OFFSET + ((i) - 2) * 16);
+}
+
#endif
diff --git a/drivers/net/wireless/microchip/wilc1000/hif.c b/drivers/net/wireless/microchip/wilc1000/hif.c
index 919de6ffb821..3c48e1a57b24 100644
--- a/drivers/net/wireless/microchip/wilc1000/hif.c
+++ b/drivers/net/wireless/microchip/wilc1000/hif.c
@@ -382,7 +382,8 @@ wilc_parse_join_bss_param(struct cfg80211_bss *bss,
struct ieee80211_p2p_noa_attr noa_attr;
const struct cfg80211_bss_ies *ies;
struct wilc_join_bss_param *param;
- u8 rates_len = 0, ies_len;
+ u8 rates_len = 0;
+ int ies_len;
int ret;
param = kzalloc(sizeof(*param), GFP_KERNEL);
@@ -1293,7 +1294,7 @@ int wilc_get_mac_address(struct wilc_vif *vif, u8 *mac_addr)
return result;
}
-int wilc_set_mac_address(struct wilc_vif *vif, u8 *mac_addr)
+int wilc_set_mac_address(struct wilc_vif *vif, const u8 *mac_addr)
{
struct wid wid;
int result;
@@ -1301,7 +1302,7 @@ int wilc_set_mac_address(struct wilc_vif *vif, u8 *mac_addr)
wid.id = WID_MAC_ADDR;
wid.type = WID_STR;
wid.size = ETH_ALEN;
- wid.val = mac_addr;
+ wid.val = (u8 *)mac_addr;
result = wilc_send_config_pkt(vif, WILC_SET_CFG, &wid, 1);
if (result)
@@ -1570,11 +1571,12 @@ void wilc_network_info_received(struct wilc *wilc, u8 *buffer, u32 length)
struct host_if_drv *hif_drv;
struct host_if_msg *msg;
struct wilc_vif *vif;
+ int srcu_idx;
int result;
int id;
id = get_unaligned_le32(&buffer[length - 4]);
- rcu_read_lock();
+ srcu_idx = srcu_read_lock(&wilc->srcu);
vif = wilc_get_vif_from_idx(wilc, id);
if (!vif)
goto out;
@@ -1593,7 +1595,7 @@ void wilc_network_info_received(struct wilc *wilc, u8 *buffer, u32 length)
msg->body.net_info.rssi = buffer[8];
msg->body.net_info.mgmt = kmemdup(&buffer[9],
msg->body.net_info.frame_len,
- GFP_ATOMIC);
+ GFP_KERNEL);
if (!msg->body.net_info.mgmt) {
kfree(msg);
goto out;
@@ -1606,7 +1608,7 @@ void wilc_network_info_received(struct wilc *wilc, u8 *buffer, u32 length)
kfree(msg);
}
out:
- rcu_read_unlock();
+ srcu_read_unlock(&wilc->srcu, srcu_idx);
}
void wilc_gnrl_async_info_received(struct wilc *wilc, u8 *buffer, u32 length)
@@ -1614,13 +1616,14 @@ void wilc_gnrl_async_info_received(struct wilc *wilc, u8 *buffer, u32 length)
struct host_if_drv *hif_drv;
struct host_if_msg *msg;
struct wilc_vif *vif;
+ int srcu_idx;
int result;
int id;
mutex_lock(&wilc->deinit_lock);
id = get_unaligned_le32(&buffer[length - 4]);
- rcu_read_lock();
+ srcu_idx = srcu_read_lock(&wilc->srcu);
vif = wilc_get_vif_from_idx(wilc, id);
if (!vif)
goto out;
@@ -1647,7 +1650,7 @@ void wilc_gnrl_async_info_received(struct wilc *wilc, u8 *buffer, u32 length)
kfree(msg);
}
out:
- rcu_read_unlock();
+ srcu_read_unlock(&wilc->srcu, srcu_idx);
mutex_unlock(&wilc->deinit_lock);
}
@@ -1655,11 +1658,12 @@ void wilc_scan_complete_received(struct wilc *wilc, u8 *buffer, u32 length)
{
struct host_if_drv *hif_drv;
struct wilc_vif *vif;
+ int srcu_idx;
int result;
int id;
id = get_unaligned_le32(&buffer[length - 4]);
- rcu_read_lock();
+ srcu_idx = srcu_read_lock(&wilc->srcu);
vif = wilc_get_vif_from_idx(wilc, id);
if (!vif)
goto out;
@@ -1684,7 +1688,7 @@ void wilc_scan_complete_received(struct wilc *wilc, u8 *buffer, u32 length)
}
}
out:
- rcu_read_unlock();
+ srcu_read_unlock(&wilc->srcu, srcu_idx);
}
int wilc_remain_on_channel(struct wilc_vif *vif, u64 cookie, u16 chan,
diff --git a/drivers/net/wireless/microchip/wilc1000/hif.h b/drivers/net/wireless/microchip/wilc1000/hif.h
index 0d380586b1d9..96eeaf31d237 100644
--- a/drivers/net/wireless/microchip/wilc1000/hif.h
+++ b/drivers/net/wireless/microchip/wilc1000/hif.h
@@ -167,7 +167,7 @@ int wilc_add_rx_gtk(struct wilc_vif *vif, const u8 *rx_gtk, u8 gtk_key_len,
u8 cipher_mode);
int wilc_set_pmkid_info(struct wilc_vif *vif, struct wilc_pmkid_attr *pmkid);
int wilc_get_mac_address(struct wilc_vif *vif, u8 *mac_addr);
-int wilc_set_mac_address(struct wilc_vif *vif, u8 *mac_addr);
+int wilc_set_mac_address(struct wilc_vif *vif, const u8 *mac_addr);
int wilc_set_join_req(struct wilc_vif *vif, u8 *bssid, const u8 *ies,
size_t ies_len);
int wilc_disconnect(struct wilc_vif *vif);
diff --git a/drivers/net/wireless/microchip/wilc1000/netdev.c b/drivers/net/wireless/microchip/wilc1000/netdev.c
index 73f56f7b002b..9ecf3fb29b55 100644
--- a/drivers/net/wireless/microchip/wilc1000/netdev.c
+++ b/drivers/net/wireless/microchip/wilc1000/netdev.c
@@ -127,28 +127,30 @@ void wilc_wlan_set_bssid(struct net_device *wilc_netdev, const u8 *bssid,
int wilc_wlan_get_num_conn_ifcs(struct wilc *wilc)
{
+ int srcu_idx;
u8 ret_val = 0;
struct wilc_vif *vif;
- rcu_read_lock();
+ srcu_idx = srcu_read_lock(&wilc->srcu);
wilc_for_each_vif(wilc, vif) {
if (!is_zero_ether_addr(vif->bssid))
ret_val++;
}
- rcu_read_unlock();
+ srcu_read_unlock(&wilc->srcu, srcu_idx);
return ret_val;
}
static void wilc_wake_tx_queues(struct wilc *wl)
{
+ int srcu_idx;
struct wilc_vif *ifc;
- rcu_read_lock();
+ srcu_idx = srcu_read_lock(&wl->srcu);
wilc_for_each_vif(wl, ifc) {
if (ifc->mac_opened && netif_queue_stopped(ifc->ndev))
netif_wake_queue(ifc->ndev);
}
- rcu_read_unlock();
+ srcu_read_unlock(&wl->srcu, srcu_idx);
}
static int wilc_txq_task(void *vp)
@@ -588,7 +590,6 @@ static int wilc_mac_open(struct net_device *ndev)
struct wilc *wl = vif->wilc;
int ret = 0;
struct mgmt_frame_regs mgmt_regs = {};
- u8 addr[ETH_ALEN] __aligned(2);
if (!wl || !wl->dev) {
netdev_err(ndev, "device not ready\n");
@@ -607,25 +608,19 @@ static int wilc_mac_open(struct net_device *ndev)
return ret;
}
- wilc_set_operation_mode(vif, wilc_get_vif_idx(vif), vif->iftype,
- vif->idx);
-
- if (is_valid_ether_addr(ndev->dev_addr)) {
- ether_addr_copy(addr, ndev->dev_addr);
- wilc_set_mac_address(vif, addr);
- } else {
- wilc_get_mac_address(vif, addr);
- eth_hw_addr_set(ndev, addr);
- }
netdev_dbg(ndev, "Mac address: %pM\n", ndev->dev_addr);
-
- if (!is_valid_ether_addr(ndev->dev_addr)) {
- netdev_err(ndev, "Wrong MAC address\n");
+ ret = wilc_set_mac_address(vif, ndev->dev_addr);
+ if (ret) {
+ netdev_err(ndev, "Failed to enforce MAC address in chip");
wilc_deinit_host_int(ndev);
- wilc_wlan_deinitialize(ndev);
- return -EINVAL;
+ if (!wl->open_ifcs)
+ wilc_wlan_deinitialize(ndev);
+ return ret;
}
+ wilc_set_operation_mode(vif, wilc_get_vif_idx(vif), vif->iftype,
+ vif->idx);
+
mgmt_regs.interface_stypes = vif->mgmt_reg_stypes;
/* so we detect a change */
vif->mgmt_reg_stypes = 0;
@@ -653,6 +648,7 @@ static int wilc_set_mac_addr(struct net_device *dev, void *p)
struct sockaddr *addr = (struct sockaddr *)p;
unsigned char mac_addr[ETH_ALEN];
struct wilc_vif *tmp_vif;
+ int srcu_idx;
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
@@ -664,21 +660,21 @@ static int wilc_set_mac_addr(struct net_device *dev, void *p)
/* Verify MAC Address is not already in use: */
- rcu_read_lock();
+ srcu_idx = srcu_read_lock(&wilc->srcu);
wilc_for_each_vif(wilc, tmp_vif) {
wilc_get_mac_address(tmp_vif, mac_addr);
if (ether_addr_equal(addr->sa_data, mac_addr)) {
if (vif != tmp_vif) {
- rcu_read_unlock();
+ srcu_read_unlock(&wilc->srcu, srcu_idx);
return -EADDRNOTAVAIL;
}
- rcu_read_unlock();
+ srcu_read_unlock(&wilc->srcu, srcu_idx);
return 0;
}
}
- rcu_read_unlock();
+ srcu_read_unlock(&wilc->srcu, srcu_idx);
- result = wilc_set_mac_address(vif, (u8 *)addr->sa_data);
+ result = wilc_set_mac_address(vif, addr->sa_data);
if (result)
return result;
@@ -764,14 +760,15 @@ netdev_tx_t wilc_mac_xmit(struct sk_buff *skb, struct net_device *ndev)
wilc_tx_complete);
if (queue_count > FLOW_CONTROL_UPPER_THRESHOLD) {
+ int srcu_idx;
struct wilc_vif *vif;
- rcu_read_lock();
+ srcu_idx = srcu_read_lock(&wilc->srcu);
wilc_for_each_vif(wilc, vif) {
if (vif->mac_opened)
netif_stop_queue(vif->ndev);
}
- rcu_read_unlock();
+ srcu_read_unlock(&wilc->srcu, srcu_idx);
}
return NETDEV_TX_OK;
@@ -815,12 +812,13 @@ void wilc_frmw_to_host(struct wilc *wilc, u8 *buff, u32 size,
unsigned int frame_len = 0;
struct wilc_vif *vif;
struct sk_buff *skb;
+ int srcu_idx;
int stats;
if (!wilc)
return;
- rcu_read_lock();
+ srcu_idx = srcu_read_lock(&wilc->srcu);
wilc_netdev = get_if_handler(wilc, buff);
if (!wilc_netdev)
goto out;
@@ -848,14 +846,15 @@ void wilc_frmw_to_host(struct wilc *wilc, u8 *buff, u32 size,
netdev_dbg(wilc_netdev, "netif_rx ret value is: %d\n", stats);
}
out:
- rcu_read_unlock();
+ srcu_read_unlock(&wilc->srcu, srcu_idx);
}
void wilc_wfi_mgmt_rx(struct wilc *wilc, u8 *buff, u32 size, bool is_auth)
{
+ int srcu_idx;
struct wilc_vif *vif;
- rcu_read_lock();
+ srcu_idx = srcu_read_lock(&wilc->srcu);
wilc_for_each_vif(wilc, vif) {
struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buff;
u16 type = le16_to_cpup((__le16 *)buff);
@@ -876,7 +875,7 @@ void wilc_wfi_mgmt_rx(struct wilc *wilc, u8 *buff, u32 size, bool is_auth)
if (vif->monitor_flag)
wilc_wfi_monitor_rx(wilc->monitor_dev, buff, size);
}
- rcu_read_unlock();
+ srcu_read_unlock(&wilc->srcu, srcu_idx);
}
static const struct net_device_ops wilc_netdev_ops = {
@@ -906,7 +905,7 @@ void wilc_netdev_cleanup(struct wilc *wilc)
list_del_rcu(&vif->list);
wilc->vif_num--;
mutex_unlock(&wilc->vif_mutex);
- synchronize_rcu();
+ synchronize_srcu(&wilc->srcu);
if (vif->ndev)
unregister_netdev(vif->ndev);
}
@@ -925,15 +924,16 @@ static u8 wilc_get_available_idx(struct wilc *wl)
{
int idx = 0;
struct wilc_vif *vif;
+ int srcu_idx;
- rcu_read_lock();
+ srcu_idx = srcu_read_lock(&wl->srcu);
wilc_for_each_vif(wl, vif) {
if (vif->idx == 0)
idx = 1;
else
idx = 0;
}
- rcu_read_unlock();
+ srcu_read_unlock(&wl->srcu, srcu_idx);
return idx;
}
@@ -941,6 +941,7 @@ struct wilc_vif *wilc_netdev_ifc_init(struct wilc *wl, const char *name,
int vif_type, enum nl80211_iftype type,
bool rtnl_locked)
{
+ u8 mac_address[ETH_ALEN];
struct net_device *ndev;
struct wilc_vif *vif;
int ret;
@@ -965,36 +966,50 @@ struct wilc_vif *wilc_netdev_ifc_init(struct wilc *wl, const char *name,
vif->priv.wdev.iftype = type;
vif->priv.dev = ndev;
- if (rtnl_locked)
- ret = cfg80211_register_netdevice(ndev);
- else
- ret = register_netdev(ndev);
-
- if (ret) {
- ret = -EFAULT;
- goto error;
- }
-
ndev->needs_free_netdev = true;
vif->iftype = vif_type;
vif->idx = wilc_get_available_idx(wl);
vif->mac_opened = 0;
+
+ memcpy(mac_address, wl->nv_mac_address, ETH_ALEN);
+ /* WILC firmware uses locally administered MAC address for the
+ * second virtual interface (bit 1 of first byte set), but
+ * since it is possibly not loaded/running yet, reproduce this behavior
+ * in the driver during interface creation.
+ */
+ if (vif->idx)
+ mac_address[0] |= 0x2;
+
+ eth_hw_addr_set(vif->ndev, mac_address);
+
mutex_lock(&wl->vif_mutex);
list_add_tail_rcu(&vif->list, &wl->vif_list);
wl->vif_num += 1;
mutex_unlock(&wl->vif_mutex);
- synchronize_rcu();
-
- return vif;
+ synchronize_srcu(&wl->srcu);
-error:
if (rtnl_locked)
- cfg80211_unregister_netdevice(ndev);
+ ret = cfg80211_register_netdevice(ndev);
else
- unregister_netdev(ndev);
+ ret = register_netdev(ndev);
+
+ if (ret) {
+ ret = -EFAULT;
+ goto error_remove_vif;
+ }
+
+ return vif;
+
+error_remove_vif:
+ mutex_lock(&wl->vif_mutex);
+ list_del_rcu(&vif->list);
+ wl->vif_num -= 1;
+ mutex_unlock(&wl->vif_mutex);
+ synchronize_srcu(&wl->srcu);
free_netdev(ndev);
return ERR_PTR(ret);
}
+EXPORT_SYMBOL_GPL(wilc_netdev_ifc_init);
MODULE_DESCRIPTION("Atmel WILC1000 core wireless driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/microchip/wilc1000/netdev.h b/drivers/net/wireless/microchip/wilc1000/netdev.h
index eecee3973d6a..95bc8b8fe65a 100644
--- a/drivers/net/wireless/microchip/wilc1000/netdev.h
+++ b/drivers/net/wireless/microchip/wilc1000/netdev.h
@@ -14,6 +14,7 @@
#include <linux/if_arp.h>
#include <linux/gpio/consumer.h>
#include <linux/rculist.h>
+#include <uapi/linux/if_ether.h>
#include "hif.h"
#include "wlan.h"
@@ -32,8 +33,8 @@
#define wilc_for_each_vif(w, v) \
struct wilc *_w = w; \
- list_for_each_entry_rcu(v, &_w->vif_list, list, \
- rcu_read_lock_held())
+ list_for_each_entry_srcu(v, &_w->vif_list, list, \
+ srcu_read_lock_held(&_w->srcu))
struct wilc_wfi_stats {
unsigned long rx_packets;
@@ -220,6 +221,14 @@ struct wilc {
/* protect vif list */
struct mutex vif_mutex;
+ /* Sleepable RCU struct to manipulate vif list. Sleepable version is
+ * needed over the classic RCU version because the driver's current
+ * design involves some sleeping code while manipulating a vif
+ * retrieved from vif list (so in a SRCU critical section), like:
+ * - sending commands to the chip, using info from retrieved vif
+ * - registering a new monitoring net device
+ */
+ struct srcu_struct srcu;
u8 open_ifcs;
/* protect head of transmit queue */
@@ -263,7 +272,6 @@ struct wilc {
const struct firmware *firmware;
struct device *dev;
- bool suspend_event;
struct workqueue_struct *hif_workqueue;
struct wilc_cfg cfg;
@@ -278,6 +286,7 @@ struct wilc {
struct ieee80211_rate bitrates[ARRAY_SIZE(wilc_bitrates)];
struct ieee80211_supported_band band;
u32 cipher_suites[ARRAY_SIZE(wilc_cipher_suites)];
+ u8 nv_mac_address[ETH_ALEN];
};
struct wilc_wfi_mon_priv {
diff --git a/drivers/net/wireless/microchip/wilc1000/sdio.c b/drivers/net/wireless/microchip/wilc1000/sdio.c
index 52a770c5e76f..0043f7a0fdf9 100644
--- a/drivers/net/wireless/microchip/wilc1000/sdio.c
+++ b/drivers/net/wireless/microchip/wilc1000/sdio.c
@@ -24,6 +24,9 @@ MODULE_DEVICE_TABLE(sdio, wilc_sdio_ids);
#define WILC_SDIO_BLOCK_SIZE 512
+static int wilc_sdio_init(struct wilc *wilc, bool resume);
+static int wilc_sdio_deinit(struct wilc *wilc);
+
struct wilc_sdio {
bool irq_gpio;
u32 block_size;
@@ -136,9 +139,11 @@ out:
static int wilc_sdio_probe(struct sdio_func *func,
const struct sdio_device_id *id)
{
+ struct wilc_sdio *sdio_priv;
+ struct wilc_vif *vif;
struct wilc *wilc;
int ret;
- struct wilc_sdio *sdio_priv;
+
sdio_priv = kzalloc(sizeof(*sdio_priv), GFP_KERNEL);
if (!sdio_priv)
@@ -176,9 +181,28 @@ static int wilc_sdio_probe(struct sdio_func *func,
}
clk_prepare_enable(wilc->rtc_clk);
+ wilc_sdio_init(wilc, false);
+
+ ret = wilc_load_mac_from_nv(wilc);
+ if (ret) {
+ pr_err("Can not retrieve MAC address from chip\n");
+ goto clk_disable_unprepare;
+ }
+
+ wilc_sdio_deinit(wilc);
+
+ vif = wilc_netdev_ifc_init(wilc, "wlan%d", WILC_STATION_MODE,
+ NL80211_IFTYPE_STATION, false);
+ if (IS_ERR(vif)) {
+ ret = PTR_ERR(vif);
+ goto clk_disable_unprepare;
+ }
+
dev_info(&func->dev, "Driver Initializing success\n");
return 0;
+clk_disable_unprepare:
+ clk_disable_unprepare(wilc->rtc_clk);
dispose_irq:
irq_dispose_mapping(wilc->dev_irq_num);
wilc_netdev_cleanup(wilc);
@@ -225,33 +249,6 @@ static bool wilc_sdio_is_init(struct wilc *wilc)
return sdio_priv->isinit;
}
-static int wilc_sdio_suspend(struct device *dev)
-{
- struct sdio_func *func = dev_to_sdio_func(dev);
- struct wilc *wilc = sdio_get_drvdata(func);
- int ret;
-
- dev_info(dev, "sdio suspend\n");
- chip_wakeup(wilc);
-
- if (!IS_ERR(wilc->rtc_clk))
- clk_disable_unprepare(wilc->rtc_clk);
-
- if (wilc->suspend_event) {
- host_sleep_notify(wilc);
- chip_allow_sleep(wilc);
- }
-
- ret = wilc_sdio_reset(wilc);
- if (ret) {
- dev_err(&func->dev, "Fail reset sdio\n");
- return ret;
- }
- sdio_claim_host(func);
-
- return 0;
-}
-
static int wilc_sdio_enable_interrupt(struct wilc *dev)
{
struct sdio_func *func = container_of(dev->dev, struct sdio_func, dev);
@@ -617,7 +614,52 @@ static int wilc_sdio_read(struct wilc *wilc, u32 addr, u8 *buf, u32 size)
static int wilc_sdio_deinit(struct wilc *wilc)
{
+ struct sdio_func *func = dev_to_sdio_func(wilc->dev);
struct wilc_sdio *sdio_priv = wilc->bus_data;
+ struct sdio_cmd52 cmd;
+ int ret;
+
+ cmd.read_write = 1;
+ cmd.function = 0;
+ cmd.raw = 1;
+
+ /* Disable all functions interrupts */
+ cmd.address = SDIO_CCCR_IENx;
+ cmd.data = 0;
+ ret = wilc_sdio_cmd52(wilc, &cmd);
+ if (ret) {
+ dev_err(&func->dev, "Failed to disable functions interrupts\n");
+ return ret;
+ }
+
+ /* Disable all functions */
+ cmd.address = SDIO_CCCR_IOEx;
+ cmd.data = 0;
+ ret = wilc_sdio_cmd52(wilc, &cmd);
+ if (ret) {
+ dev_err(&func->dev,
+ "Failed to reset all functions\n");
+ return ret;
+ }
+
+ /* Disable CSA */
+ cmd.read_write = 0;
+ cmd.address = SDIO_FBR_BASE(1);
+ ret = wilc_sdio_cmd52(wilc, &cmd);
+ if (ret) {
+ dev_err(&func->dev,
+ "Failed to read CSA for function 1\n");
+ return ret;
+ }
+ cmd.read_write = 1;
+ cmd.address = SDIO_FBR_BASE(1);
+ cmd.data &= ~SDIO_FBR_ENABLE_CSA;
+ ret = wilc_sdio_cmd52(wilc, &cmd);
+ if (ret) {
+ dev_err(&func->dev,
+ "Failed to disable CSA for function 1\n");
+ return ret;
+ }
sdio_priv->isinit = false;
return 0;
@@ -838,27 +880,12 @@ static int wilc_sdio_sync_ext(struct wilc *wilc, int nint)
{
struct sdio_func *func = dev_to_sdio_func(wilc->dev);
struct wilc_sdio *sdio_priv = wilc->bus_data;
- u32 reg;
if (nint > MAX_NUM_INT) {
dev_err(&func->dev, "Too many interrupts (%d)...\n", nint);
return -EINVAL;
}
- /**
- * Disable power sequencer
- **/
- if (wilc_sdio_read_reg(wilc, WILC_MISC, &reg)) {
- dev_err(&func->dev, "Failed read misc reg...\n");
- return -EINVAL;
- }
-
- reg &= ~BIT(8);
- if (wilc_sdio_write_reg(wilc, WILC_MISC, reg)) {
- dev_err(&func->dev, "Failed write misc reg...\n");
- return -EINVAL;
- }
-
if (sdio_priv->irq_gpio) {
u32 reg;
int ret, i;
@@ -942,20 +969,40 @@ static const struct wilc_hif_func wilc_hif_sdio = {
.hif_is_init = wilc_sdio_is_init,
};
+static int wilc_sdio_suspend(struct device *dev)
+{
+ struct sdio_func *func = dev_to_sdio_func(dev);
+ struct wilc *wilc = sdio_get_drvdata(func);
+ int ret;
+
+ dev_info(dev, "sdio suspend\n");
+
+ if (!IS_ERR(wilc->rtc_clk))
+ clk_disable_unprepare(wilc->rtc_clk);
+
+ host_sleep_notify(wilc);
+
+ wilc_sdio_disable_interrupt(wilc);
+
+ ret = wilc_sdio_reset(wilc);
+ if (ret) {
+ dev_err(&func->dev, "Fail reset sdio\n");
+ return ret;
+ }
+
+ return 0;
+}
+
static int wilc_sdio_resume(struct device *dev)
{
struct sdio_func *func = dev_to_sdio_func(dev);
struct wilc *wilc = sdio_get_drvdata(func);
dev_info(dev, "sdio resume\n");
- sdio_release_host(func);
- chip_wakeup(wilc);
wilc_sdio_init(wilc, true);
+ wilc_sdio_enable_interrupt(wilc);
- if (wilc->suspend_event)
- host_wakeup_notify(wilc);
-
- chip_allow_sleep(wilc);
+ host_wakeup_notify(wilc);
return 0;
}
diff --git a/drivers/net/wireless/microchip/wilc1000/spi.c b/drivers/net/wireless/microchip/wilc1000/spi.c
index 61c3572ce321..5ff940c53ad9 100644
--- a/drivers/net/wireless/microchip/wilc1000/spi.c
+++ b/drivers/net/wireless/microchip/wilc1000/spi.c
@@ -206,9 +206,10 @@ static void wilc_wlan_power(struct wilc *wilc, bool on)
static int wilc_bus_probe(struct spi_device *spi)
{
- int ret;
- struct wilc *wilc;
struct wilc_spi *spi_priv;
+ struct wilc_vif *vif;
+ struct wilc *wilc;
+ int ret;
spi_priv = kzalloc(sizeof(*spi_priv), GFP_KERNEL);
if (!spi_priv)
@@ -249,7 +250,19 @@ static int wilc_bus_probe(struct spi_device *spi)
if (ret)
goto power_down;
+ ret = wilc_load_mac_from_nv(wilc);
+ if (ret) {
+ pr_err("Can not retrieve MAC address from chip\n");
+ goto power_down;
+ }
+
wilc_wlan_power(wilc, false);
+ vif = wilc_netdev_ifc_init(wilc, "wlan%d", WILC_STATION_MODE,
+ NL80211_IFTYPE_STATION, false);
+ if (IS_ERR(vif)) {
+ ret = PTR_ERR(vif);
+ goto power_down;
+ }
return 0;
power_down:
diff --git a/drivers/net/wireless/microchip/wilc1000/wlan.c b/drivers/net/wireless/microchip/wilc1000/wlan.c
index 37c32d17856e..533939e71534 100644
--- a/drivers/net/wireless/microchip/wilc1000/wlan.c
+++ b/drivers/net/wireless/microchip/wilc1000/wlan.c
@@ -678,17 +678,17 @@ EXPORT_SYMBOL_GPL(chip_wakeup);
void host_wakeup_notify(struct wilc *wilc)
{
- acquire_bus(wilc, WILC_BUS_ACQUIRE_ONLY);
+ acquire_bus(wilc, WILC_BUS_ACQUIRE_AND_WAKEUP);
wilc->hif_func->hif_write_reg(wilc, WILC_CORTUS_INTERRUPT_2, 1);
- release_bus(wilc, WILC_BUS_RELEASE_ONLY);
+ release_bus(wilc, WILC_BUS_RELEASE_ALLOW_SLEEP);
}
EXPORT_SYMBOL_GPL(host_wakeup_notify);
void host_sleep_notify(struct wilc *wilc)
{
- acquire_bus(wilc, WILC_BUS_ACQUIRE_ONLY);
+ acquire_bus(wilc, WILC_BUS_ACQUIRE_AND_WAKEUP);
wilc->hif_func->hif_write_reg(wilc, WILC_CORTUS_INTERRUPT_1, 1);
- release_bus(wilc, WILC_BUS_RELEASE_ONLY);
+ release_bus(wilc, WILC_BUS_RELEASE_ALLOW_SLEEP);
}
EXPORT_SYMBOL_GPL(host_sleep_notify);
@@ -712,6 +712,7 @@ int wilc_wlan_handle_txq(struct wilc *wilc, u32 *txq_count)
u32 *vmm_table = wilc->vmm_table;
u8 ac_pkt_num_to_chip[NQUEUES] = {0, 0, 0, 0};
const struct wilc_hif_func *func;
+ int srcu_idx;
u8 *txb = wilc->tx_buffer;
struct wilc_vif *vif;
@@ -723,10 +724,10 @@ int wilc_wlan_handle_txq(struct wilc *wilc, u32 *txq_count)
mutex_lock(&wilc->txq_add_to_head_cs);
- rcu_read_lock();
+ srcu_idx = srcu_read_lock(&wilc->srcu);
wilc_for_each_vif(wilc, vif)
wilc_wlan_txq_filter_dup_tcp_ack(vif->ndev);
- rcu_read_unlock();
+ srcu_read_unlock(&wilc->srcu, srcu_idx);
for (ac = 0; ac < NQUEUES; ac++)
tqe_q[ac] = wilc_wlan_txq_get_first(wilc, ac);
@@ -1472,6 +1473,55 @@ u32 wilc_get_chipid(struct wilc *wilc, bool update)
return wilc->chipid;
}
+int wilc_load_mac_from_nv(struct wilc *wl)
+{
+ int ret = -EINVAL;
+ unsigned int i;
+
+ acquire_bus(wl, WILC_BUS_ACQUIRE_AND_WAKEUP);
+
+ for (i = 0; i < WILC_NVMEM_MAX_NUM_BANK; i++) {
+ int bank_offset = get_bank_offset_from_bank_index(i);
+ u32 reg1, reg2;
+ u8 invalid;
+ u8 used;
+
+ ret = wl->hif_func->hif_read_reg(wl,
+ WILC_NVMEM_BANK_BASE + bank_offset,
+ &reg1);
+ if (ret) {
+ pr_err("Can not read address %d lower part", i);
+ break;
+ }
+ ret = wl->hif_func->hif_read_reg(wl,
+ WILC_NVMEM_BANK_BASE + bank_offset + 4,
+ &reg2);
+ if (ret) {
+ pr_err("Can not read address %d upper part", i);
+ break;
+ }
+
+ used = FIELD_GET(WILC_NVMEM_IS_BANK_USED, reg1);
+ invalid = FIELD_GET(WILC_NVMEM_IS_BANK_INVALID, reg1);
+ if (!used || invalid)
+ continue;
+
+ wl->nv_mac_address[0] = FIELD_GET(GENMASK(23, 16), reg1);
+ wl->nv_mac_address[1] = FIELD_GET(GENMASK(15, 8), reg1);
+ wl->nv_mac_address[2] = FIELD_GET(GENMASK(7, 0), reg1);
+ wl->nv_mac_address[3] = FIELD_GET(GENMASK(31, 24), reg2);
+ wl->nv_mac_address[4] = FIELD_GET(GENMASK(23, 16), reg2);
+ wl->nv_mac_address[5] = FIELD_GET(GENMASK(15, 8), reg2);
+
+ ret = 0;
+ break;
+ }
+
+ release_bus(wl, WILC_BUS_RELEASE_ALLOW_SLEEP);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(wilc_load_mac_from_nv);
+
int wilc_wlan_init(struct net_device *dev)
{
int ret = 0;
diff --git a/drivers/net/wireless/microchip/wilc1000/wlan.h b/drivers/net/wireless/microchip/wilc1000/wlan.h
index 54643d8fef04..dd2fb3c2f06a 100644
--- a/drivers/net/wireless/microchip/wilc1000/wlan.h
+++ b/drivers/net/wireless/microchip/wilc1000/wlan.h
@@ -56,7 +56,6 @@
#define WILC_HOST_RX_CTRL (WILC_PERIPH_REG_BASE + 0x80)
#define WILC_HOST_RX_EXTRA_SIZE (WILC_PERIPH_REG_BASE + 0x84)
#define WILC_HOST_TX_CTRL_1 (WILC_PERIPH_REG_BASE + 0x88)
-#define WILC_MISC (WILC_PERIPH_REG_BASE + 0x428)
#define WILC_INTR_REG_BASE (WILC_PERIPH_REG_BASE + 0xa00)
#define WILC_INTR_ENABLE WILC_INTR_REG_BASE
#define WILC_INTR2_ENABLE (WILC_INTR_REG_BASE + 4)
@@ -445,4 +444,5 @@ int wilc_send_config_pkt(struct wilc_vif *vif, u8 mode, struct wid *wids,
u32 count);
int wilc_wlan_init(struct net_device *dev);
u32 wilc_get_chipid(struct wilc *wilc, bool update);
+int wilc_load_mac_from_nv(struct wilc *wilc);
#endif
diff --git a/drivers/net/wireless/purelifi/plfxlc/mac.c b/drivers/net/wireless/purelifi/plfxlc/mac.c
index 641f847d47ab..eae93efa6150 100644
--- a/drivers/net/wireless/purelifi/plfxlc/mac.c
+++ b/drivers/net/wireless/purelifi/plfxlc/mac.c
@@ -111,7 +111,7 @@ int plfxlc_op_start(struct ieee80211_hw *hw)
return 0;
}
-void plfxlc_op_stop(struct ieee80211_hw *hw)
+void plfxlc_op_stop(struct ieee80211_hw *hw, bool suspend)
{
struct plfxlc_mac *mac = plfxlc_hw_mac(hw);
diff --git a/drivers/net/wireless/purelifi/plfxlc/mac.h b/drivers/net/wireless/purelifi/plfxlc/mac.h
index 49b92413729b..9384acddcf26 100644
--- a/drivers/net/wireless/purelifi/plfxlc/mac.h
+++ b/drivers/net/wireless/purelifi/plfxlc/mac.h
@@ -178,7 +178,7 @@ int plfxlc_mac_rx(struct ieee80211_hw *hw, const u8 *buffer,
void plfxlc_mac_tx_failed(struct urb *urb);
void plfxlc_mac_tx_to_dev(struct sk_buff *skb, int error);
int plfxlc_op_start(struct ieee80211_hw *hw);
-void plfxlc_op_stop(struct ieee80211_hw *hw);
+void plfxlc_op_stop(struct ieee80211_hw *hw, bool suspend);
int plfxlc_restore_settings(struct plfxlc_mac *mac);
#endif /* PLFXLC_MAC_H */
diff --git a/drivers/net/wireless/purelifi/plfxlc/usb.c b/drivers/net/wireless/purelifi/plfxlc/usb.c
index 311676c1ece0..15334940287d 100644
--- a/drivers/net/wireless/purelifi/plfxlc/usb.c
+++ b/drivers/net/wireless/purelifi/plfxlc/usb.c
@@ -408,7 +408,7 @@ void plfxlc_usb_init(struct plfxlc_usb *usb, struct ieee80211_hw *hw,
void plfxlc_usb_release(struct plfxlc_usb *usb)
{
- plfxlc_op_stop(plfxlc_usb_to_hw(usb));
+ plfxlc_op_stop(plfxlc_usb_to_hw(usb), false);
plfxlc_usb_disable_tx(usb);
plfxlc_usb_disable_rx(usb);
usb_set_intfdata(usb->intf, NULL);
@@ -761,7 +761,7 @@ static void plfxlc_usb_resume(struct plfxlc_usb *usb)
static void plfxlc_usb_stop(struct plfxlc_usb *usb)
{
- plfxlc_op_stop(plfxlc_usb_to_hw(usb));
+ plfxlc_op_stop(plfxlc_usb_to_hw(usb), false);
plfxlc_usb_disable_tx(usb);
plfxlc_usb_disable_rx(usb);
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00.h b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
index 82af01448a0a..dfb4bb370f01 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
@@ -335,16 +335,6 @@ struct link {
struct delayed_work watchdog_work;
unsigned int watchdog_interval;
unsigned int watchdog;
-
- /*
- * Work structure for scheduling periodic AGC adjustments.
- */
- struct delayed_work agc_work;
-
- /*
- * Work structure for scheduling periodic VCO calibration.
- */
- struct delayed_work vco_work;
};
enum rt2x00_delayed_flags {
@@ -1460,7 +1450,7 @@ void rt2x00mac_tx(struct ieee80211_hw *hw,
struct ieee80211_tx_control *control,
struct sk_buff *skb);
int rt2x00mac_start(struct ieee80211_hw *hw);
-void rt2x00mac_stop(struct ieee80211_hw *hw);
+void rt2x00mac_stop(struct ieee80211_hw *hw, bool suspend);
void rt2x00mac_reconfig_complete(struct ieee80211_hw *hw,
enum ieee80211_reconfig_type reconfig_type);
int rt2x00mac_add_interface(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
index 75fda72c14ca..451632488805 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
@@ -178,7 +178,7 @@ int rt2x00mac_start(struct ieee80211_hw *hw)
}
EXPORT_SYMBOL_GPL(rt2x00mac_start);
-void rt2x00mac_stop(struct ieee80211_hw *hw)
+void rt2x00mac_stop(struct ieee80211_hw *hw, bool suspend)
{
struct rt2x00_dev *rt2x00dev = hw->priv;
diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c b/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
index 77b6cb7e1f6b..ded8d4d59289 100644
--- a/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
+++ b/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
@@ -1249,7 +1249,7 @@ static int rtl8180_start(struct ieee80211_hw *dev)
return ret;
}
-static void rtl8180_stop(struct ieee80211_hw *dev)
+static void rtl8180_stop(struct ieee80211_hw *dev, bool suspend)
{
struct rtl8180_priv *priv = dev->priv;
u8 reg;
diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c b/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
index 78d99afa373d..220ac5bdf279 100644
--- a/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
+++ b/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
@@ -1019,7 +1019,7 @@ rtl8187_start_exit:
return ret;
}
-static void rtl8187_stop(struct ieee80211_hw *dev)
+static void rtl8187_stop(struct ieee80211_hw *dev, bool suspend)
{
struct rtl8187_priv *priv = dev->priv;
struct sk_buff *skb;
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/8188f.c b/drivers/net/wireless/realtek/rtl8xxxu/8188f.c
index bd5a0603b4a2..3abf14d7044f 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/8188f.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/8188f.c
@@ -697,9 +697,14 @@ static void rtl8188fu_init_statistics(struct rtl8xxxu_priv *priv)
rtl8xxxu_write32(priv, REG_OFDM0_FA_RSTC, val32);
}
+#define TX_POWER_INDEX_MAX 0x3F
+#define TX_POWER_INDEX_DEFAULT_CCK 0x22
+#define TX_POWER_INDEX_DEFAULT_HT40 0x27
+
static int rtl8188fu_parse_efuse(struct rtl8xxxu_priv *priv)
{
struct rtl8188fu_efuse *efuse = &priv->efuse_wifi.efuse8188fu;
+ int i;
if (efuse->rtl_id != cpu_to_le16(0x8129))
return -EINVAL;
@@ -713,6 +718,16 @@ static int rtl8188fu_parse_efuse(struct rtl8xxxu_priv *priv)
efuse->tx_power_index_A.ht40_base,
sizeof(efuse->tx_power_index_A.ht40_base));
+ for (i = 0; i < ARRAY_SIZE(priv->cck_tx_power_index_A); i++) {
+ if (priv->cck_tx_power_index_A[i] > TX_POWER_INDEX_MAX)
+ priv->cck_tx_power_index_A[i] = TX_POWER_INDEX_DEFAULT_CCK;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(priv->ht40_1s_tx_power_index_A); i++) {
+ if (priv->ht40_1s_tx_power_index_A[i] > TX_POWER_INDEX_MAX)
+ priv->ht40_1s_tx_power_index_A[i] = TX_POWER_INDEX_DEFAULT_HT40;
+ }
+
priv->ofdm_tx_power_diff[0].a = efuse->tx_power_index_A.ht20_ofdm_1s_diff.a;
priv->ht20_tx_power_diff[0].a = efuse->tx_power_index_A.ht20_ofdm_1s_diff.b;
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/core.c b/drivers/net/wireless/realtek/rtl8xxxu/core.c
index 89a841b4e8d5..043fa364e701 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/core.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/core.c
@@ -6679,7 +6679,6 @@ static void rtl8xxxu_switch_ports(struct rtl8xxxu_priv *priv)
u8 macid[ETH_ALEN], bssid[ETH_ALEN], macid_1[ETH_ALEN], bssid_1[ETH_ALEN];
u8 msr, bcn_ctrl, bcn_ctrl_1, atimwnd[2], atimwnd_1[2];
struct rtl8xxxu_vif *rtlvif;
- struct ieee80211_vif *vif;
u8 tsftr[8], tsftr_1[8];
int i;
@@ -6744,10 +6743,7 @@ static void rtl8xxxu_switch_ports(struct rtl8xxxu_priv *priv)
/* write bcn ctl */
rtl8xxxu_write8(priv, REG_BEACON_CTRL, bcn_ctrl_1);
rtl8xxxu_write8(priv, REG_BEACON_CTRL_1, bcn_ctrl);
-
- vif = priv->vifs[0];
- priv->vifs[0] = priv->vifs[1];
- priv->vifs[1] = vif;
+ swap(priv->vifs[0], priv->vifs[1]);
/* priv->vifs[0] is NULL here, based on how this function is currently
* called from rtl8xxxu_add_interface().
@@ -7521,7 +7517,7 @@ error_out:
return ret;
}
-static void rtl8xxxu_stop(struct ieee80211_hw *hw)
+static void rtl8xxxu_stop(struct ieee80211_hw *hw, bool suspend)
{
struct rtl8xxxu_priv *priv = hw->priv;
unsigned long flags;
diff --git a/drivers/net/wireless/realtek/rtlwifi/Kconfig b/drivers/net/wireless/realtek/rtlwifi/Kconfig
index cfe63f7b28d9..1e66c1bf7c8b 100644
--- a/drivers/net/wireless/realtek/rtlwifi/Kconfig
+++ b/drivers/net/wireless/realtek/rtlwifi/Kconfig
@@ -119,6 +119,18 @@ config RTL8192CU
If you choose to build it as a module, it will be called rtl8192cu
+config RTL8192DU
+ tristate "Realtek RTL8192DU USB Wireless Network Adapter"
+ depends on USB
+ select RTLWIFI
+ select RTLWIFI_USB
+ select RTL8192D_COMMON
+ help
+ This is the driver for Realtek RTL8192DU 802.11n USB
+ wireless network adapters.
+
+ If you choose to build it as a module, it will be called rtl8192du
+
config RTLWIFI
tristate
select FW_LOADER
diff --git a/drivers/net/wireless/realtek/rtlwifi/Makefile b/drivers/net/wireless/realtek/rtlwifi/Makefile
index 423981b148df..9cf32277c7f1 100644
--- a/drivers/net/wireless/realtek/rtlwifi/Makefile
+++ b/drivers/net/wireless/realtek/rtlwifi/Makefile
@@ -25,6 +25,7 @@ obj-$(CONFIG_RTL8192CU) += rtl8192cu/
obj-$(CONFIG_RTL8192SE) += rtl8192se/
obj-$(CONFIG_RTL8192D_COMMON) += rtl8192d/
obj-$(CONFIG_RTL8192DE) += rtl8192de/
+obj-$(CONFIG_RTL8192DU) += rtl8192du/
obj-$(CONFIG_RTL8723AE) += rtl8723ae/
obj-$(CONFIG_RTL8723BE) += rtl8723be/
obj-$(CONFIG_RTL8188EE) += rtl8188ee/
diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c
index 1a8d715b7c07..aab4605de9c4 100644
--- a/drivers/net/wireless/realtek/rtlwifi/base.c
+++ b/drivers/net/wireless/realtek/rtlwifi/base.c
@@ -2272,7 +2272,7 @@ static void rtl_c2h_content_parsing(struct ieee80211_hw *hw,
struct sk_buff *skb)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_hal_ops *hal_ops = rtlpriv->cfg->ops;
+ const struct rtl_hal_ops *hal_ops = rtlpriv->cfg->ops;
const struct rtl_btc_ops *btc_ops = rtlpriv->btcoexist.btc_ops;
u8 cmd_id, cmd_len;
u8 *cmd_buf = NULL;
diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c
index 2e60a6991ca1..7537f04b1930 100644
--- a/drivers/net/wireless/realtek/rtlwifi/core.c
+++ b/drivers/net/wireless/realtek/rtlwifi/core.c
@@ -144,7 +144,7 @@ static int rtl_op_start(struct ieee80211_hw *hw)
return err;
}
-static void rtl_op_stop(struct ieee80211_hw *hw)
+static void rtl_op_stop(struct ieee80211_hw *hw, bool suspend)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
@@ -547,7 +547,7 @@ static int rtl_op_suspend(struct ieee80211_hw *hw,
rtlhal->enter_pnp_sleep = true;
rtl_lps_leave(hw, true);
- rtl_op_stop(hw);
+ rtl_op_stop(hw, false);
device_set_wakeup_enable(wiphy_dev(hw->wiphy), true);
return 0;
}
@@ -633,21 +633,6 @@ static int rtl_op_config(struct ieee80211_hw *hw, u32 changed)
}
}
- if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS) {
- rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD,
- "IEEE80211_CONF_CHANGE_RETRY_LIMITS %x\n",
- hw->conf.long_frame_max_tx_count);
- /* brought up everything changes (changed == ~0) indicates first
- * open, so use our default value instead of that of wiphy.
- */
- if (changed != ~0) {
- mac->retry_long = hw->conf.long_frame_max_tx_count;
- mac->retry_short = hw->conf.long_frame_max_tx_count;
- rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RETRY_LIMIT,
- (u8 *)(&hw->conf.long_frame_max_tx_count));
- }
- }
-
if (changed & IEEE80211_CONF_CHANGE_CHANNEL &&
!rtlpriv->proximity.proxim_on) {
struct ieee80211_channel *channel = hw->conf.chandef.chan;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
index 37bb59fa8bfa..35875cda30fc 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
@@ -27,7 +27,7 @@ static void rtl88e_init_aspm_vars(struct ieee80211_hw *hw)
* 2 - Enable ASPM with Clock Req,
* 3 - Alwyas Enable ASPM with Clock Req,
* 4 - Always Enable ASPM without Clock Req.
- * set defult to RTL8192CE:3 RTL8192E:2
+ * set default to RTL8192CE:3 RTL8192E:2
*/
rtlpci->const_pci_aspm = 3;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
index e20f2bec45c4..ce7c28d9c874 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
@@ -31,7 +31,7 @@ static void rtl92c_init_aspm_vars(struct ieee80211_hw *hw)
* 2 - Enable ASPM with Clock Req,
* 3 - Alwyas Enable ASPM with Clock Req,
* 4 - Always Enable ASPM without Clock Req.
- * set defult to RTL8192CE:3 RTL8192E:2
+ * set default to RTL8192CE:3 RTL8192E:2
* */
rtlpci->const_pci_aspm = 3;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
index 48be7e346efc..c9b9e2bc90cc 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
@@ -53,8 +53,6 @@ static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw)
} else {
fw_name = "rtlwifi/rtl8192cufw_TMSC.bin";
}
- /* provide name of alternative file */
- rtlpriv->cfg->alt_fw_name = "rtlwifi/rtl8192cufw.bin";
pr_info("Loading firmware %s\n", fw_name);
rtlpriv->max_fw_size = 0x4000;
err = request_firmware_nowait(THIS_MODULE, 1,
@@ -160,6 +158,7 @@ static struct rtl_hal_usbint_cfg rtl92cu_interface_cfg = {
static struct rtl_hal_cfg rtl92cu_hal_cfg = {
.name = "rtl92c_usb",
+ .alt_fw_name = "rtlwifi/rtl8192cufw.bin",
.ops = &rtl8192cu_hal_ops,
.mod_params = &rtl92cu_mod_params,
.usb_interface_cfg = &rtl92cu_interface_cfg,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192d/hw_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/hw_common.c
index 6570d5e168e9..97e0d9c01e0a 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192d/hw_common.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/hw_common.c
@@ -14,7 +14,7 @@
#include "hw_common.h"
#include "phy_common.h"
-void rtl92de_stop_tx_beacon(struct ieee80211_hw *hw)
+void rtl92d_stop_tx_beacon(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
u8 tmp1byte;
@@ -27,9 +27,9 @@ void rtl92de_stop_tx_beacon(struct ieee80211_hw *hw)
tmp1byte &= ~(BIT(0));
rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 2, tmp1byte);
}
-EXPORT_SYMBOL_GPL(rtl92de_stop_tx_beacon);
+EXPORT_SYMBOL_GPL(rtl92d_stop_tx_beacon);
-void rtl92de_resume_tx_beacon(struct ieee80211_hw *hw)
+void rtl92d_resume_tx_beacon(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
u8 tmp1byte;
@@ -42,7 +42,7 @@ void rtl92de_resume_tx_beacon(struct ieee80211_hw *hw)
tmp1byte |= BIT(0);
rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 2, tmp1byte);
}
-EXPORT_SYMBOL_GPL(rtl92de_resume_tx_beacon);
+EXPORT_SYMBOL_GPL(rtl92d_resume_tx_beacon);
void rtl92d_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
{
@@ -285,7 +285,7 @@ void rtl92d_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
}
EXPORT_SYMBOL_GPL(rtl92d_set_hw_reg);
-bool rtl92de_llt_write(struct ieee80211_hw *hw, u32 address, u32 data)
+bool rtl92d_llt_write(struct ieee80211_hw *hw, u32 address, u32 data)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
bool status = true;
@@ -307,9 +307,9 @@ bool rtl92de_llt_write(struct ieee80211_hw *hw, u32 address, u32 data)
} while (++count);
return status;
}
-EXPORT_SYMBOL_GPL(rtl92de_llt_write);
+EXPORT_SYMBOL_GPL(rtl92d_llt_write);
-void rtl92de_enable_hw_security_config(struct ieee80211_hw *hw)
+void rtl92d_enable_hw_security_config(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
u8 sec_reg_value;
@@ -334,16 +334,16 @@ void rtl92de_enable_hw_security_config(struct ieee80211_hw *hw)
"The SECR-value %x\n", sec_reg_value);
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_WPA_CONFIG, &sec_reg_value);
}
-EXPORT_SYMBOL_GPL(rtl92de_enable_hw_security_config);
+EXPORT_SYMBOL_GPL(rtl92d_enable_hw_security_config);
/* don't set REG_EDCA_BE_PARAM here because
* mac80211 will send pkt when scan
*/
-void rtl92de_set_qos(struct ieee80211_hw *hw, int aci)
+void rtl92d_set_qos(struct ieee80211_hw *hw, int aci)
{
rtl92d_dm_init_edca_turbo(hw);
}
-EXPORT_SYMBOL_GPL(rtl92de_set_qos);
+EXPORT_SYMBOL_GPL(rtl92d_set_qos);
static enum version_8192d _rtl92d_read_chip_version(struct ieee80211_hw *hw)
{
@@ -362,8 +362,8 @@ static enum version_8192d _rtl92d_read_chip_version(struct ieee80211_hw *hw)
return version;
}
-static void _rtl92de_readpowervalue_fromprom(struct txpower_info *pwrinfo,
- u8 *efuse, bool autoloadfail)
+static void _rtl92d_readpowervalue_fromprom(struct txpower_info *pwrinfo,
+ u8 *efuse, bool autoloadfail)
{
u32 rfpath, eeaddr, group, offset, offset1, offset2;
u8 i, val8;
@@ -500,8 +500,8 @@ static void _rtl92de_readpowervalue_fromprom(struct txpower_info *pwrinfo,
}
}
-static void _rtl92de_read_txpower_info(struct ieee80211_hw *hw,
- bool autoload_fail, u8 *hwinfo)
+static void _rtl92d_read_txpower_info(struct ieee80211_hw *hw,
+ bool autoload_fail, u8 *hwinfo)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
@@ -509,7 +509,7 @@ static void _rtl92de_read_txpower_info(struct ieee80211_hw *hw,
u8 tempval[2], i, pwr, diff;
u32 ch, rfpath, group;
- _rtl92de_readpowervalue_fromprom(&pwrinfo, hwinfo, autoload_fail);
+ _rtl92d_readpowervalue_fromprom(&pwrinfo, hwinfo, autoload_fail);
if (!autoload_fail) {
/* bit0~2 */
rtlefuse->eeprom_regulatory = (hwinfo[EEPROM_RF_OPT1] & 0x7);
@@ -613,8 +613,8 @@ static void _rtl92de_read_txpower_info(struct ieee80211_hw *hw,
}
}
-static void _rtl92de_read_macphymode_from_prom(struct ieee80211_hw *hw,
- u8 *content)
+static void _rtl92d_read_macphymode_from_prom(struct ieee80211_hw *hw,
+ u8 *content)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
@@ -636,15 +636,15 @@ static void _rtl92de_read_macphymode_from_prom(struct ieee80211_hw *hw,
}
}
-static void _rtl92de_read_macphymode_and_bandtype(struct ieee80211_hw *hw,
- u8 *content)
+static void _rtl92d_read_macphymode_and_bandtype(struct ieee80211_hw *hw,
+ u8 *content)
{
- _rtl92de_read_macphymode_from_prom(hw, content);
+ _rtl92d_read_macphymode_from_prom(hw, content);
rtl92d_phy_config_macphymode(hw);
rtl92d_phy_config_macphymode_info(hw);
}
-static void _rtl92de_efuse_update_chip_version(struct ieee80211_hw *hw)
+static void _rtl92d_efuse_update_chip_version(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
enum version_8192d chipver = rtlpriv->rtlhal.version;
@@ -676,7 +676,7 @@ static void _rtl92de_efuse_update_chip_version(struct ieee80211_hw *hw)
rtlpriv->rtlhal.version = chipver;
}
-static void _rtl92de_read_adapter_info(struct ieee80211_hw *hw)
+static void _rtl92d_read_adapter_info(struct ieee80211_hw *hw)
{
static const int params_pci[] = {
RTL8190_EEPROM_ID, EEPROM_VID, EEPROM_DID,
@@ -706,8 +706,8 @@ static void _rtl92de_read_adapter_info(struct ieee80211_hw *hw)
if (rtl_get_hwinfo(hw, rtlpriv, HWSET_MAX_SIZE, hwinfo, params))
goto exit;
- _rtl92de_efuse_update_chip_version(hw);
- _rtl92de_read_macphymode_and_bandtype(hw, hwinfo);
+ _rtl92d_efuse_update_chip_version(hw);
+ _rtl92d_read_macphymode_and_bandtype(hw, hwinfo);
/* Read Permanent MAC address for 2nd interface */
if (rtlhal->interfaceindex != 0)
@@ -717,7 +717,7 @@ static void _rtl92de_read_adapter_info(struct ieee80211_hw *hw)
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ETHER_ADDR,
rtlefuse->dev_addr);
rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "%pM\n", rtlefuse->dev_addr);
- _rtl92de_read_txpower_info(hw, rtlefuse->autoload_failflag, hwinfo);
+ _rtl92d_read_txpower_info(hw, rtlefuse->autoload_failflag, hwinfo);
/* Read Channel Plan */
switch (rtlhal->bandset) {
@@ -739,7 +739,7 @@ exit:
kfree(hwinfo);
}
-void rtl92de_read_eeprom_info(struct ieee80211_hw *hw)
+void rtl92d_read_eeprom_info(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
@@ -760,15 +760,15 @@ void rtl92de_read_eeprom_info(struct ieee80211_hw *hw)
rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n");
rtlefuse->autoload_failflag = false;
- _rtl92de_read_adapter_info(hw);
+ _rtl92d_read_adapter_info(hw);
} else {
pr_err("Autoload ERR!!\n");
}
}
-EXPORT_SYMBOL_GPL(rtl92de_read_eeprom_info);
+EXPORT_SYMBOL_GPL(rtl92d_read_eeprom_info);
-static void rtl92de_update_hal_rate_table(struct ieee80211_hw *hw,
- struct ieee80211_sta *sta)
+static void rtl92d_update_hal_rate_table(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta)
{
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
@@ -851,9 +851,9 @@ static void rtl92de_update_hal_rate_table(struct ieee80211_hw *hw,
rtl_read_dword(rtlpriv, REG_ARFR0));
}
-static void rtl92de_update_hal_rate_mask(struct ieee80211_hw *hw,
- struct ieee80211_sta *sta,
- u8 rssi_level, bool update_bw)
+static void rtl92d_update_hal_rate_mask(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta,
+ u8 rssi_level, bool update_bw)
{
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
@@ -1009,20 +1009,20 @@ static void rtl92de_update_hal_rate_mask(struct ieee80211_hw *hw,
sta_entry->ratr_index = ratr_index;
}
-void rtl92de_update_hal_rate_tbl(struct ieee80211_hw *hw,
- struct ieee80211_sta *sta,
- u8 rssi_level, bool update_bw)
+void rtl92d_update_hal_rate_tbl(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta,
+ u8 rssi_level, bool update_bw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
if (rtlpriv->dm.useramask)
- rtl92de_update_hal_rate_mask(hw, sta, rssi_level, update_bw);
+ rtl92d_update_hal_rate_mask(hw, sta, rssi_level, update_bw);
else
- rtl92de_update_hal_rate_table(hw, sta);
+ rtl92d_update_hal_rate_table(hw, sta);
}
-EXPORT_SYMBOL_GPL(rtl92de_update_hal_rate_tbl);
+EXPORT_SYMBOL_GPL(rtl92d_update_hal_rate_tbl);
-void rtl92de_update_channel_access_setting(struct ieee80211_hw *hw)
+void rtl92d_update_channel_access_setting(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
@@ -1036,9 +1036,9 @@ void rtl92de_update_channel_access_setting(struct ieee80211_hw *hw)
sifs_timer = 0x1010;
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SIFS, (u8 *)&sifs_timer);
}
-EXPORT_SYMBOL_GPL(rtl92de_update_channel_access_setting);
+EXPORT_SYMBOL_GPL(rtl92d_update_channel_access_setting);
-bool rtl92de_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid)
+bool rtl92d_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
@@ -1093,11 +1093,11 @@ bool rtl92de_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid)
*valid = 1;
return !ppsc->hwradiooff;
}
-EXPORT_SYMBOL_GPL(rtl92de_gpio_radio_on_off_checking);
+EXPORT_SYMBOL_GPL(rtl92d_gpio_radio_on_off_checking);
-void rtl92de_set_key(struct ieee80211_hw *hw, u32 key_index,
- u8 *p_macaddr, bool is_group, u8 enc_algo,
- bool is_wepkey, bool clear_all)
+void rtl92d_set_key(struct ieee80211_hw *hw, u32 key_index,
+ u8 *p_macaddr, bool is_group, u8 enc_algo,
+ bool is_wepkey, bool clear_all)
{
static const u8 cam_const_addr[4][6] = {
{0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
@@ -1222,4 +1222,4 @@ void rtl92de_set_key(struct ieee80211_hw *hw, u32 key_index,
}
}
}
-EXPORT_SYMBOL_GPL(rtl92de_set_key);
+EXPORT_SYMBOL_GPL(rtl92d_set_key);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192d/hw_common.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/hw_common.h
index 2c07f5cc5766..4da1bab15f36 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192d/hw_common.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/hw_common.h
@@ -4,21 +4,21 @@
#ifndef __RTL92D_HW_COMMON_H__
#define __RTL92D_HW_COMMON_H__
-void rtl92de_stop_tx_beacon(struct ieee80211_hw *hw);
-void rtl92de_resume_tx_beacon(struct ieee80211_hw *hw);
+void rtl92d_stop_tx_beacon(struct ieee80211_hw *hw);
+void rtl92d_resume_tx_beacon(struct ieee80211_hw *hw);
void rtl92d_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
void rtl92d_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
-bool rtl92de_llt_write(struct ieee80211_hw *hw, u32 address, u32 data);
-void rtl92de_enable_hw_security_config(struct ieee80211_hw *hw);
-void rtl92de_set_qos(struct ieee80211_hw *hw, int aci);
-void rtl92de_read_eeprom_info(struct ieee80211_hw *hw);
-void rtl92de_update_hal_rate_tbl(struct ieee80211_hw *hw,
- struct ieee80211_sta *sta,
- u8 rssi_level, bool update_bw);
-void rtl92de_update_channel_access_setting(struct ieee80211_hw *hw);
-bool rtl92de_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid);
-void rtl92de_set_key(struct ieee80211_hw *hw, u32 key_index,
- u8 *p_macaddr, bool is_group, u8 enc_algo,
- bool is_wepkey, bool clear_all);
+bool rtl92d_llt_write(struct ieee80211_hw *hw, u32 address, u32 data);
+void rtl92d_enable_hw_security_config(struct ieee80211_hw *hw);
+void rtl92d_set_qos(struct ieee80211_hw *hw, int aci);
+void rtl92d_read_eeprom_info(struct ieee80211_hw *hw);
+void rtl92d_update_hal_rate_tbl(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta,
+ u8 rssi_level, bool update_bw);
+void rtl92d_update_channel_access_setting(struct ieee80211_hw *hw);
+bool rtl92d_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid);
+void rtl92d_set_key(struct ieee80211_hw *hw, u32 key_index,
+ u8 *p_macaddr, bool is_group, u8 enc_algo,
+ bool is_wepkey, bool clear_all);
#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192d/trx_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/trx_common.c
index 72d2b7426d82..9f9a34492030 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192d/trx_common.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/trx_common.c
@@ -7,8 +7,8 @@
#include "def.h"
#include "trx_common.h"
-static long _rtl92de_translate_todbm(struct ieee80211_hw *hw,
- u8 signal_strength_index)
+static long _rtl92d_translate_todbm(struct ieee80211_hw *hw,
+ u8 signal_strength_index)
{
long signal_power;
@@ -17,13 +17,13 @@ static long _rtl92de_translate_todbm(struct ieee80211_hw *hw,
return signal_power;
}
-static void _rtl92de_query_rxphystatus(struct ieee80211_hw *hw,
- struct rtl_stats *pstats,
- __le32 *pdesc,
- struct rx_fwinfo_92d *p_drvinfo,
- bool packet_match_bssid,
- bool packet_toself,
- bool packet_beacon)
+static void _rtl92d_query_rxphystatus(struct ieee80211_hw *hw,
+ struct rtl_stats *pstats,
+ __le32 *pdesc,
+ struct rx_fwinfo_92d *p_drvinfo,
+ bool packet_match_bssid,
+ bool packet_toself,
+ bool packet_beacon)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_phy *rtlphy = &rtlpriv->phy;
@@ -203,8 +203,8 @@ static void rtl92d_loop_over_paths(struct ieee80211_hw *hw,
}
}
-static void _rtl92de_process_ui_rssi(struct ieee80211_hw *hw,
- struct rtl_stats *pstats)
+static void _rtl92d_process_ui_rssi(struct ieee80211_hw *hw,
+ struct rtl_stats *pstats)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rt_smooth_data *ui_rssi;
@@ -226,15 +226,15 @@ static void _rtl92de_process_ui_rssi(struct ieee80211_hw *hw,
if (ui_rssi->index >= PHY_RSSI_SLID_WIN_MAX)
ui_rssi->index = 0;
tmpval = ui_rssi->total_val / ui_rssi->total_num;
- rtlpriv->stats.signal_strength = _rtl92de_translate_todbm(hw, (u8)tmpval);
+ rtlpriv->stats.signal_strength = _rtl92d_translate_todbm(hw, (u8)tmpval);
pstats->rssi = rtlpriv->stats.signal_strength;
if (!pstats->is_cck && pstats->packet_toself)
rtl92d_loop_over_paths(hw, pstats);
}
-static void _rtl92de_update_rxsignalstatistics(struct ieee80211_hw *hw,
- struct rtl_stats *pstats)
+static void _rtl92d_update_rxsignalstatistics(struct ieee80211_hw *hw,
+ struct rtl_stats *pstats)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
int weighting = 0;
@@ -249,8 +249,8 @@ static void _rtl92de_update_rxsignalstatistics(struct ieee80211_hw *hw,
5 + pstats->recvsignalpower + weighting) / 6;
}
-static void _rtl92de_process_pwdb(struct ieee80211_hw *hw,
- struct rtl_stats *pstats)
+static void _rtl92d_process_pwdb(struct ieee80211_hw *hw,
+ struct rtl_stats *pstats)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
@@ -276,7 +276,7 @@ static void _rtl92de_process_pwdb(struct ieee80211_hw *hw,
(pstats->rx_pwdb_all)) / (RX_SMOOTH_FACTOR);
}
rtlpriv->dm.undec_sm_pwdb = undec_sm_pwdb;
- _rtl92de_update_rxsignalstatistics(hw, pstats);
+ _rtl92d_update_rxsignalstatistics(hw, pstats);
}
}
@@ -301,8 +301,8 @@ static void rtl92d_loop_over_streams(struct ieee80211_hw *hw,
}
}
-static void _rtl92de_process_ui_link_quality(struct ieee80211_hw *hw,
- struct rtl_stats *pstats)
+static void _rtl92d_process_ui_link_quality(struct ieee80211_hw *hw,
+ struct rtl_stats *pstats)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rt_smooth_data *ui_link_quality;
@@ -330,24 +330,24 @@ static void _rtl92de_process_ui_link_quality(struct ieee80211_hw *hw,
rtl92d_loop_over_streams(hw, pstats);
}
-static void _rtl92de_process_phyinfo(struct ieee80211_hw *hw,
- u8 *buffer,
- struct rtl_stats *pcurrent_stats)
+static void _rtl92d_process_phyinfo(struct ieee80211_hw *hw,
+ u8 *buffer,
+ struct rtl_stats *pcurrent_stats)
{
if (!pcurrent_stats->packet_matchbssid &&
!pcurrent_stats->packet_beacon)
return;
- _rtl92de_process_ui_rssi(hw, pcurrent_stats);
- _rtl92de_process_pwdb(hw, pcurrent_stats);
- _rtl92de_process_ui_link_quality(hw, pcurrent_stats);
+ _rtl92d_process_ui_rssi(hw, pcurrent_stats);
+ _rtl92d_process_pwdb(hw, pcurrent_stats);
+ _rtl92d_process_ui_link_quality(hw, pcurrent_stats);
}
-static void _rtl92de_translate_rx_signal_stuff(struct ieee80211_hw *hw,
- struct sk_buff *skb,
- struct rtl_stats *pstats,
- __le32 *pdesc,
- struct rx_fwinfo_92d *p_drvinfo)
+static void _rtl92d_translate_rx_signal_stuff(struct ieee80211_hw *hw,
+ struct sk_buff *skb,
+ struct rtl_stats *pstats,
+ __le32 *pdesc,
+ struct rx_fwinfo_92d *p_drvinfo)
{
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
@@ -375,15 +375,15 @@ static void _rtl92de_translate_rx_signal_stuff(struct ieee80211_hw *hw,
packet_toself = packet_matchbssid &&
ether_addr_equal(praddr, rtlefuse->dev_addr);
packet_beacon = ieee80211_is_beacon(fc);
- _rtl92de_query_rxphystatus(hw, pstats, pdesc, p_drvinfo,
- packet_matchbssid, packet_toself,
- packet_beacon);
- _rtl92de_process_phyinfo(hw, tmp_buf, pstats);
+ _rtl92d_query_rxphystatus(hw, pstats, pdesc, p_drvinfo,
+ packet_matchbssid, packet_toself,
+ packet_beacon);
+ _rtl92d_process_phyinfo(hw, tmp_buf, pstats);
}
-bool rtl92de_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
- struct ieee80211_rx_status *rx_status,
- u8 *pdesc8, struct sk_buff *skb)
+bool rtl92d_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
+ struct ieee80211_rx_status *rx_status,
+ u8 *pdesc8, struct sk_buff *skb)
{
__le32 *pdesc = (__le32 *)pdesc8;
struct rx_fwinfo_92d *p_drvinfo;
@@ -423,17 +423,17 @@ bool rtl92de_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
if (phystatus) {
p_drvinfo = (struct rx_fwinfo_92d *)(skb->data +
stats->rx_bufshift);
- _rtl92de_translate_rx_signal_stuff(hw, skb, stats, pdesc,
- p_drvinfo);
+ _rtl92d_translate_rx_signal_stuff(hw, skb, stats, pdesc,
+ p_drvinfo);
}
/*rx_status->qual = stats->signal; */
rx_status->signal = stats->recvsignalpower + 10;
return true;
}
-EXPORT_SYMBOL_GPL(rtl92de_rx_query_desc);
+EXPORT_SYMBOL_GPL(rtl92d_rx_query_desc);
-void rtl92de_set_desc(struct ieee80211_hw *hw, u8 *pdesc8, bool istx,
- u8 desc_name, u8 *val)
+void rtl92d_set_desc(struct ieee80211_hw *hw, u8 *pdesc8, bool istx,
+ u8 desc_name, u8 *val)
{
__le32 *pdesc = (__le32 *)pdesc8;
@@ -473,10 +473,10 @@ void rtl92de_set_desc(struct ieee80211_hw *hw, u8 *pdesc8, bool istx,
}
}
}
-EXPORT_SYMBOL_GPL(rtl92de_set_desc);
+EXPORT_SYMBOL_GPL(rtl92d_set_desc);
-u64 rtl92de_get_desc(struct ieee80211_hw *hw,
- u8 *p_desc8, bool istx, u8 desc_name)
+u64 rtl92d_get_desc(struct ieee80211_hw *hw,
+ u8 *p_desc8, bool istx, u8 desc_name)
{
__le32 *p_desc = (__le32 *)p_desc8;
u32 ret = 0;
@@ -513,4 +513,4 @@ u64 rtl92de_get_desc(struct ieee80211_hw *hw,
}
return ret;
}
-EXPORT_SYMBOL_GPL(rtl92de_get_desc);
+EXPORT_SYMBOL_GPL(rtl92d_get_desc);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192d/trx_common.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/trx_common.h
index 87d956d771eb..528182b1eba6 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192d/trx_common.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/trx_common.h
@@ -393,13 +393,13 @@ struct rx_fwinfo_92d {
#endif
} __packed;
-bool rtl92de_rx_query_desc(struct ieee80211_hw *hw,
- struct rtl_stats *stats,
- struct ieee80211_rx_status *rx_status,
- u8 *pdesc, struct sk_buff *skb);
-void rtl92de_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
- u8 desc_name, u8 *val);
-u64 rtl92de_get_desc(struct ieee80211_hw *hw,
- u8 *p_desc, bool istx, u8 desc_name);
+bool rtl92d_rx_query_desc(struct ieee80211_hw *hw,
+ struct rtl_stats *stats,
+ struct ieee80211_rx_status *rx_status,
+ u8 *pdesc, struct sk_buff *skb);
+void rtl92d_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
+ u8 desc_name, u8 *val);
+u64 rtl92d_get_desc(struct ieee80211_hw *hw,
+ u8 *p_desc, bool istx, u8 desc_name);
#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c
index 73b81e60cfa9..03f4314bdb2e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c
@@ -181,7 +181,7 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
u8 btype_ibss = val[0];
if (btype_ibss)
- rtl92de_stop_tx_beacon(hw);
+ rtl92d_stop_tx_beacon(hw);
_rtl92de_set_bcn_ctrl_reg(hw, 0, BIT(3));
rtl_write_dword(rtlpriv, REG_TSFTR,
(u32) (mac->tsf & 0xffffffff));
@@ -189,7 +189,7 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
(u32) ((mac->tsf >> 32) & 0xffffffff));
_rtl92de_set_bcn_ctrl_reg(hw, BIT(3), 0);
if (btype_ibss)
- rtl92de_resume_tx_beacon(hw);
+ rtl92d_resume_tx_beacon(hw);
break;
}
@@ -295,13 +295,13 @@ static bool _rtl92de_llt_table_init(struct ieee80211_hw *hw)
/* 18. LLT_table_init(Adapter); */
for (i = 0; i < (txpktbuf_bndy - 1); i++) {
- status = rtl92de_llt_write(hw, i, i + 1);
+ status = rtl92d_llt_write(hw, i, i + 1);
if (!status)
return status;
}
/* end of list */
- status = rtl92de_llt_write(hw, (txpktbuf_bndy - 1), 0xFF);
+ status = rtl92d_llt_write(hw, (txpktbuf_bndy - 1), 0xFF);
if (!status)
return status;
@@ -310,13 +310,13 @@ static bool _rtl92de_llt_table_init(struct ieee80211_hw *hw)
/* config this MAC as two MAC transfer. */
/* Otherwise used as local loopback buffer. */
for (i = txpktbuf_bndy; i < maxpage; i++) {
- status = rtl92de_llt_write(hw, i, (i + 1));
+ status = rtl92d_llt_write(hw, i, (i + 1));
if (!status)
return status;
}
/* Let last entry point to the start entry of ring buffer */
- status = rtl92de_llt_write(hw, maxpage, txpktbuf_bndy);
+ status = rtl92d_llt_write(hw, maxpage, txpktbuf_bndy);
if (!status)
return status;
@@ -688,7 +688,7 @@ int rtl92de_hw_init(struct ieee80211_hw *hw)
/* reset hw sec */
rtl_cam_reset_all_entry(hw);
- rtl92de_enable_hw_security_config(hw);
+ rtl92d_enable_hw_security_config(hw);
/* Read EEPROM TX power index and PHY_REG_PG.txt to capture correct */
/* TX power index for different rate set. */
@@ -742,11 +742,11 @@ static int _rtl92de_set_media_status(struct ieee80211_hw *hw,
if (type == NL80211_IFTYPE_UNSPECIFIED ||
type == NL80211_IFTYPE_STATION) {
- rtl92de_stop_tx_beacon(hw);
+ rtl92d_stop_tx_beacon(hw);
_rtl92de_enable_bcn_sub_func(hw);
} else if (type == NL80211_IFTYPE_ADHOC ||
type == NL80211_IFTYPE_AP) {
- rtl92de_resume_tx_beacon(hw);
+ rtl92d_resume_tx_beacon(hw);
_rtl92de_disable_bcn_sub_func(hw);
} else {
rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
index 5f6311c2aac4..e36e4aeb9a95 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
@@ -32,7 +32,7 @@ static void rtl92d_init_aspm_vars(struct ieee80211_hw *hw)
* 2 - Enable ASPM with Clock Req,
* 3 - Alwyas Enable ASPM with Clock Req,
* 4 - Always Enable ASPM without Clock Req.
- * set defult to RTL8192CE:3 RTL8192E:2
+ * set default to RTL8192CE:3 RTL8192E:2
* */
rtlpci->const_pci_aspm = 3;
@@ -187,7 +187,7 @@ static void rtl92d_deinit_sw_vars(struct ieee80211_hw *hw)
static struct rtl_hal_ops rtl8192de_hal_ops = {
.init_sw_vars = rtl92d_init_sw_vars,
.deinit_sw_vars = rtl92d_deinit_sw_vars,
- .read_eeprom_info = rtl92de_read_eeprom_info,
+ .read_eeprom_info = rtl92d_read_eeprom_info,
.interrupt_recognized = rtl92de_interrupt_recognized,
.hw_init = rtl92de_hw_init,
.hw_disable = rtl92de_card_disable,
@@ -197,30 +197,30 @@ static struct rtl_hal_ops rtl8192de_hal_ops = {
.disable_interrupt = rtl92de_disable_interrupt,
.set_network_type = rtl92de_set_network_type,
.set_chk_bssid = rtl92de_set_check_bssid,
- .set_qos = rtl92de_set_qos,
+ .set_qos = rtl92d_set_qos,
.set_bcn_reg = rtl92de_set_beacon_related_registers,
.set_bcn_intv = rtl92de_set_beacon_interval,
.update_interrupt_mask = rtl92de_update_interrupt_mask,
.get_hw_reg = rtl92de_get_hw_reg,
.set_hw_reg = rtl92de_set_hw_reg,
- .update_rate_tbl = rtl92de_update_hal_rate_tbl,
+ .update_rate_tbl = rtl92d_update_hal_rate_tbl,
.fill_tx_desc = rtl92de_tx_fill_desc,
.fill_tx_cmddesc = rtl92de_tx_fill_cmddesc,
- .query_rx_desc = rtl92de_rx_query_desc,
- .set_channel_access = rtl92de_update_channel_access_setting,
- .radio_onoff_checking = rtl92de_gpio_radio_on_off_checking,
+ .query_rx_desc = rtl92d_rx_query_desc,
+ .set_channel_access = rtl92d_update_channel_access_setting,
+ .radio_onoff_checking = rtl92d_gpio_radio_on_off_checking,
.set_bw_mode = rtl92d_phy_set_bw_mode,
.switch_channel = rtl92d_phy_sw_chnl,
.dm_watchdog = rtl92de_dm_watchdog,
.scan_operation_backup = rtl_phy_scan_operation_backup,
.set_rf_power_state = rtl92d_phy_set_rf_power_state,
.led_control = rtl92de_led_control,
- .set_desc = rtl92de_set_desc,
- .get_desc = rtl92de_get_desc,
+ .set_desc = rtl92d_set_desc,
+ .get_desc = rtl92d_get_desc,
.is_tx_desc_closed = rtl92de_is_tx_desc_closed,
.tx_polling = rtl92de_tx_polling,
- .enable_hw_sec = rtl92de_enable_hw_security_config,
- .set_key = rtl92de_set_key,
+ .enable_hw_sec = rtl92d_enable_hw_security_config,
+ .set_key = rtl92d_set_key,
.get_bbreg = rtl92d_phy_query_bb_reg,
.set_bbreg = rtl92d_phy_set_bb_reg,
.get_rfreg = rtl92d_phy_query_rf_reg,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
index 2b9b352f7783..91bf399c9ef1 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
@@ -292,7 +292,7 @@ bool rtl92de_is_tx_desc_closed(struct ieee80211_hw *hw,
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[hw_queue];
u8 *entry = (u8 *)(&ring->desc[ring->idx]);
- u8 own = (u8)rtl92de_get_desc(hw, entry, true, HW_DESC_OWN);
+ u8 own = (u8)rtl92d_get_desc(hw, entry, true, HW_DESC_OWN);
/* a beacon packet will only use the first
* descriptor by defaut, and the own bit may not
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192du/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/Makefile
new file mode 100644
index 000000000000..569bfd3d5030
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/Makefile
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0
+rtl8192du-objs := \
+ dm.o \
+ fw.o \
+ hw.o \
+ led.o \
+ phy.o \
+ rf.o \
+ sw.o \
+ table.o \
+ trx.o
+
+obj-$(CONFIG_RTL8192DU) += rtl8192du.o
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192du/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/dm.c
new file mode 100644
index 000000000000..dd57707a9184
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/dm.c
@@ -0,0 +1,120 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2024 Realtek Corporation.*/
+
+#include "../wifi.h"
+#include "../core.h"
+#include "../rtl8192d/reg.h"
+#include "../rtl8192d/def.h"
+#include "../rtl8192d/dm_common.h"
+#include "../rtl8192d/fw_common.h"
+#include "dm.h"
+
+static void rtl92du_dm_init_1r_cca(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct ps_t *dm_pstable = &rtlpriv->dm_pstable;
+
+ dm_pstable->pre_ccastate = CCA_MAX;
+ dm_pstable->cur_ccasate = CCA_MAX;
+}
+
+static void rtl92du_dm_1r_cca(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct ps_t *dm_pstable = &rtlpriv->dm_pstable;
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ int pwdb = rtlpriv->dm_digtable.min_undec_pwdb_for_dm;
+
+ if (rtlhal->macphymode != SINGLEMAC_SINGLEPHY ||
+ rtlhal->current_bandtype != BAND_ON_5G)
+ return;
+
+ if (pwdb != 0) {
+ if (dm_pstable->pre_ccastate == CCA_2R ||
+ dm_pstable->pre_ccastate == CCA_MAX)
+ dm_pstable->cur_ccasate = (pwdb >= 35) ? CCA_1R : CCA_2R;
+ else
+ dm_pstable->cur_ccasate = (pwdb <= 30) ? CCA_2R : CCA_1R;
+ } else {
+ dm_pstable->cur_ccasate = CCA_MAX;
+ }
+
+ if (dm_pstable->pre_ccastate == dm_pstable->cur_ccasate)
+ return;
+
+ rtl_dbg(rtlpriv, COMP_BB_POWERSAVING, DBG_TRACE,
+ "Old CCA state: %d new CCA state: %d\n",
+ dm_pstable->pre_ccastate, dm_pstable->cur_ccasate);
+
+ if (dm_pstable->cur_ccasate == CCA_1R) {
+ if (rtlpriv->phy.rf_type == RF_2T2R)
+ rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, MASKBYTE0, 0x13);
+ else /* Is this branch reachable? */
+ rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, MASKBYTE0, 0x23);
+ } else { /* CCA_2R or CCA_MAX */
+ rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, MASKBYTE0, 0x33);
+ }
+}
+
+static void rtl92du_dm_pwdb_monitor(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ const u32 max_macid = 32;
+ u32 temp;
+
+ /* AP & ADHOC & MESH will return tmp */
+ if (rtlpriv->mac80211.opmode != NL80211_IFTYPE_STATION)
+ return;
+
+ /* Indicate Rx signal strength to FW. */
+ if (rtlpriv->dm.useramask) {
+ temp = rtlpriv->dm.undec_sm_pwdb << 16;
+ temp |= max_macid << 8;
+
+ rtl92d_fill_h2c_cmd(hw, H2C_RSSI_REPORT, 3, (u8 *)(&temp));
+ } else {
+ rtl_write_byte(rtlpriv, 0x4fe, (u8)rtlpriv->dm.undec_sm_pwdb);
+ }
+}
+
+void rtl92du_dm_init(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtlpriv->dm.dm_type = DM_TYPE_BYDRIVER;
+ rtl_dm_diginit(hw, 0x20);
+ rtlpriv->dm_digtable.rx_gain_max = DM_DIG_FA_UPPER;
+ rtlpriv->dm_digtable.rx_gain_min = DM_DIG_FA_LOWER;
+ rtl92d_dm_init_edca_turbo(hw);
+ rtl92du_dm_init_1r_cca(hw);
+ rtl92d_dm_init_rate_adaptive_mask(hw);
+ rtl92d_dm_initialize_txpower_tracking(hw);
+}
+
+void rtl92du_dm_watchdog(struct ieee80211_hw *hw)
+{
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ bool fw_current_inpsmode = false;
+ bool fwps_awake = true;
+
+ /* 1. RF is OFF. (No need to do DM.)
+ * 2. Fw is under power saving mode for FwLPS.
+ * (Prevent from SW/FW I/O racing.)
+ * 3. IPS workitem is scheduled. (Prevent from IPS sequence
+ * to be swapped with DM.
+ * 4. RFChangeInProgress is TRUE.
+ * (Prevent from broken by IPS/HW/SW Rf off.)
+ */
+
+ if (ppsc->rfpwr_state != ERFON || fw_current_inpsmode ||
+ !fwps_awake || ppsc->rfchange_inprogress)
+ return;
+
+ rtl92du_dm_pwdb_monitor(hw);
+ rtl92d_dm_false_alarm_counter_statistics(hw);
+ rtl92d_dm_find_minimum_rssi(hw);
+ rtl92d_dm_dig(hw);
+ rtl92d_dm_check_txpower_tracking_thermal_meter(hw);
+ rtl92d_dm_check_edca_turbo(hw);
+ rtl92du_dm_1r_cca(hw);
+}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192du/dm.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/dm.h
new file mode 100644
index 000000000000..2f283bf1e4d8
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/dm.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2024 Realtek Corporation.*/
+
+#ifndef __RTL92DU_DM_H__
+#define __RTL92DU_DM_H__
+
+void rtl92du_dm_init(struct ieee80211_hw *hw);
+void rtl92du_dm_watchdog(struct ieee80211_hw *hw);
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192du/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/fw.c
new file mode 100644
index 000000000000..f74e4e84fe39
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/fw.c
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2024 Realtek Corporation.*/
+
+#include "../wifi.h"
+#include "../rtl8192d/reg.h"
+#include "../rtl8192d/def.h"
+#include "../rtl8192d/fw_common.h"
+#include "fw.h"
+
+int rtl92du_download_fw(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ enum version_8192d version = rtlhal->version;
+ u8 *pfwheader;
+ u8 *pfwdata;
+ u32 fwsize;
+ int err;
+
+ if (rtlpriv->max_fw_size == 0 || !rtlhal->pfirmware)
+ return 1;
+
+ fwsize = rtlhal->fwsize;
+ pfwheader = rtlhal->pfirmware;
+ pfwdata = rtlhal->pfirmware;
+ rtlhal->fw_version = (u16)GET_FIRMWARE_HDR_VERSION(pfwheader);
+ rtlhal->fw_subversion = (u16)GET_FIRMWARE_HDR_SUB_VER(pfwheader);
+
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "FirmwareVersion(%d), FirmwareSubVersion(%d), Signature(%#x)\n",
+ rtlhal->fw_version, rtlhal->fw_subversion,
+ GET_FIRMWARE_HDR_SIGNATURE(pfwheader));
+
+ if (IS_FW_HEADER_EXIST(pfwheader)) {
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Shift 32 bytes for FW header!!\n");
+ pfwdata = pfwdata + 32;
+ fwsize = fwsize - 32;
+ }
+
+ if (rtl92d_is_fw_downloaded(rtlpriv))
+ goto exit;
+
+ /* If 8051 is running in RAM code, driver should
+ * inform Fw to reset by itself, or it will cause
+ * download Fw fail.
+ */
+ if (rtl_read_byte(rtlpriv, REG_MCUFWDL) & BIT(7)) {
+ rtl92d_firmware_selfreset(hw);
+ rtl_write_byte(rtlpriv, REG_MCUFWDL, 0x00);
+ }
+
+ rtl92d_enable_fw_download(hw, true);
+ rtl92d_write_fw(hw, version, pfwdata, fwsize);
+ rtl92d_enable_fw_download(hw, false);
+
+ err = rtl92d_fw_free_to_go(hw);
+ if (err)
+ pr_err("fw is not ready to run!\n");
+exit:
+ err = rtl92d_fw_init(hw);
+ return err;
+}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192du/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/fw.h
new file mode 100644
index 000000000000..7904bfbda4ba
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/fw.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2024 Realtek Corporation.*/
+
+#ifndef __RTL92DU_FW_H__
+#define __RTL92DU_FW_H__
+
+int rtl92du_download_fw(struct ieee80211_hw *hw);
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192du/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/hw.c
new file mode 100644
index 000000000000..700c6e2bcad1
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/hw.c
@@ -0,0 +1,1212 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2024 Realtek Corporation.*/
+
+#include "../wifi.h"
+#include "../cam.h"
+#include "../usb.h"
+#include "../rtl8192d/reg.h"
+#include "../rtl8192d/def.h"
+#include "../rtl8192d/dm_common.h"
+#include "../rtl8192d/fw_common.h"
+#include "../rtl8192d/hw_common.h"
+#include "../rtl8192d/phy_common.h"
+#include "phy.h"
+#include "dm.h"
+#include "fw.h"
+#include "hw.h"
+#include "trx.h"
+
+static void _rtl92du_set_bcn_ctrl_reg(struct ieee80211_hw *hw,
+ u8 set_bits, u8 clear_bits)
+{
+ struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtlusb->reg_bcn_ctrl_val |= set_bits;
+ rtlusb->reg_bcn_ctrl_val &= ~clear_bits;
+ rtl_write_byte(rtlpriv, REG_BCN_CTRL, (u8)rtlusb->reg_bcn_ctrl_val);
+}
+
+static void _rtl92du_enable_bcn_sub_func(struct ieee80211_hw *hw)
+{
+ _rtl92du_set_bcn_ctrl_reg(hw, 0, BIT(1));
+}
+
+static void _rtl92du_disable_bcn_sub_func(struct ieee80211_hw *hw)
+{
+ _rtl92du_set_bcn_ctrl_reg(hw, BIT(1), 0);
+}
+
+void rtl92du_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
+{
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+
+ switch (variable) {
+ case HW_VAR_RCR:
+ *((u32 *)val) = mac->rx_conf;
+ break;
+ default:
+ rtl92d_get_hw_reg(hw, variable, val);
+ break;
+ }
+}
+
+void rtl92du_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtlpriv);
+
+ switch (variable) {
+ case HW_VAR_AC_PARAM:
+ rtl92d_dm_init_edca_turbo(hw);
+ break;
+ case HW_VAR_ACM_CTRL: {
+ u8 e_aci = *val;
+ union aci_aifsn *p_aci_aifsn =
+ (union aci_aifsn *)(&mac->ac[0].aifs);
+ u8 acm = p_aci_aifsn->f.acm;
+ u8 acm_ctrl = rtl_read_byte(rtlpriv, REG_ACMHWCTRL);
+
+ if (acm) {
+ switch (e_aci) {
+ case AC0_BE:
+ acm_ctrl |= ACMHW_BEQEN;
+ break;
+ case AC2_VI:
+ acm_ctrl |= ACMHW_VIQEN;
+ break;
+ case AC3_VO:
+ acm_ctrl |= ACMHW_VOQEN;
+ break;
+ default:
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "HW_VAR_ACM_CTRL acm set failed: eACI is %d\n",
+ acm);
+ break;
+ }
+ } else {
+ switch (e_aci) {
+ case AC0_BE:
+ acm_ctrl &= (~ACMHW_BEQEN);
+ break;
+ case AC2_VI:
+ acm_ctrl &= (~ACMHW_VIQEN);
+ break;
+ case AC3_VO:
+ acm_ctrl &= (~ACMHW_VOQEN);
+ break;
+ default:
+ pr_err("%s:%d switch case %#x not processed\n",
+ __func__, __LINE__, e_aci);
+ break;
+ }
+ }
+ rtl_dbg(rtlpriv, COMP_QOS, DBG_TRACE,
+ "SetHwReg8190pci(): [HW_VAR_ACM_CTRL] Write 0x%X\n",
+ acm_ctrl);
+ rtl_write_byte(rtlpriv, REG_ACMHWCTRL, acm_ctrl);
+ break;
+ }
+ case HW_VAR_RCR:
+ mac->rx_conf = ((u32 *)val)[0];
+ rtl_write_dword(rtlpriv, REG_RCR, mac->rx_conf);
+ break;
+ case HW_VAR_H2C_FW_JOINBSSRPT: {
+ u8 tmp_regcr, tmp_reg422;
+ bool recover = false;
+ u8 mstatus = *val;
+
+ if (mstatus == RT_MEDIA_CONNECT) {
+ rtlpriv->cfg->ops->set_hw_reg(hw,
+ HW_VAR_AID, NULL);
+ tmp_regcr = rtl_read_byte(rtlpriv, REG_CR + 1);
+ rtl_write_byte(rtlpriv, REG_CR + 1,
+ tmp_regcr | ENSWBCN);
+ _rtl92du_set_bcn_ctrl_reg(hw, 0, EN_BCN_FUNCTION);
+ _rtl92du_set_bcn_ctrl_reg(hw, DIS_TSF_UDT, 0);
+ tmp_reg422 = rtl_read_byte(rtlpriv,
+ REG_FWHW_TXQ_CTRL + 2);
+ if (tmp_reg422 & (EN_BCNQ_DL >> 16))
+ recover = true;
+ rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2,
+ tmp_reg422 & ~(EN_BCNQ_DL >> 16));
+
+ /* We don't implement FW LPS so this is not needed. */
+ /* rtl92d_set_fw_rsvdpagepkt(hw, 0); */
+
+ _rtl92du_set_bcn_ctrl_reg(hw, EN_BCN_FUNCTION, 0);
+ _rtl92du_set_bcn_ctrl_reg(hw, 0, DIS_TSF_UDT);
+ if (recover)
+ rtl_write_byte(rtlpriv,
+ REG_FWHW_TXQ_CTRL + 2,
+ tmp_reg422);
+ rtl_write_byte(rtlpriv, REG_CR + 1,
+ tmp_regcr & ~ENSWBCN);
+ }
+ rtl92d_set_fw_joinbss_report_cmd(hw, (*val));
+ break;
+ }
+ case HW_VAR_CORRECT_TSF: {
+ u8 btype_ibss = val[0];
+
+ if (btype_ibss)
+ rtl92d_stop_tx_beacon(hw);
+ _rtl92du_set_bcn_ctrl_reg(hw, 0, EN_BCN_FUNCTION);
+ rtl_write_dword(rtlpriv, REG_TSFTR,
+ (u32)(mac->tsf & 0xffffffff));
+ rtl_write_dword(rtlpriv, REG_TSFTR + 4,
+ (u32)((mac->tsf >> 32) & 0xffffffff));
+ _rtl92du_set_bcn_ctrl_reg(hw, EN_BCN_FUNCTION, 0);
+ if (btype_ibss)
+ rtl92d_resume_tx_beacon(hw);
+
+ break;
+ }
+ case HW_VAR_KEEP_ALIVE:
+ /* Avoid "switch case not processed" error. RTL8192DU doesn't
+ * need to do anything here, maybe.
+ */
+ break;
+ default:
+ rtl92d_set_hw_reg(hw, variable, val);
+ break;
+ }
+}
+
+static void _rtl92du_init_queue_reserved_page(struct ieee80211_hw *hw,
+ u8 out_ep_num,
+ u8 queue_sel)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ u32 txqpagenum, txqpageunit;
+ u32 txqremainingpage;
+ u32 numhq = 0;
+ u32 numlq = 0;
+ u32 numnq = 0;
+ u32 numpubq;
+ u32 value32;
+
+ if (rtlhal->macphymode != SINGLEMAC_SINGLEPHY) {
+ numpubq = NORMAL_PAGE_NUM_PUBQ_92D_DUAL_MAC;
+ txqpagenum = TX_TOTAL_PAGE_NUMBER_92D_DUAL_MAC - numpubq;
+ } else {
+ numpubq = TEST_PAGE_NUM_PUBQ_92DU;
+ txqpagenum = TX_TOTAL_PAGE_NUMBER_92DU - numpubq;
+ }
+
+ if (rtlhal->macphymode != SINGLEMAC_SINGLEPHY && out_ep_num == 3) {
+ numhq = NORMAL_PAGE_NUM_HPQ_92D_DUAL_MAC;
+ numlq = NORMAL_PAGE_NUM_LPQ_92D_DUAL_MAC;
+ numnq = NORMAL_PAGE_NUM_NORMALQ_92D_DUAL_MAC;
+ } else {
+ txqpageunit = txqpagenum / out_ep_num;
+ txqremainingpage = txqpagenum % out_ep_num;
+
+ if (queue_sel & TX_SELE_HQ)
+ numhq = txqpageunit;
+ if (queue_sel & TX_SELE_LQ)
+ numlq = txqpageunit;
+ if (queue_sel & TX_SELE_NQ)
+ numnq = txqpageunit;
+
+ /* HIGH priority queue always present in the
+ * configuration of 2 or 3 out-ep. Remainder pages
+ * assigned to High queue
+ */
+ if (out_ep_num > 1 && txqremainingpage)
+ numhq += txqremainingpage;
+ }
+
+ /* NOTE: This step done before writing REG_RQPN. */
+ rtl_write_byte(rtlpriv, REG_RQPN_NPQ, (u8)numnq);
+
+ /* TX DMA */
+ u32p_replace_bits(&value32, numhq, HPQ_MASK);
+ u32p_replace_bits(&value32, numlq, LPQ_MASK);
+ u32p_replace_bits(&value32, numpubq, PUBQ_MASK);
+ value32 |= LD_RQPN;
+ rtl_write_dword(rtlpriv, REG_RQPN, value32);
+}
+
+static void _rtl92du_init_tx_buffer_boundary(struct ieee80211_hw *hw,
+ u8 txpktbuf_bndy)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtl_write_byte(rtlpriv, REG_TXPKTBUF_BCNQ_BDNY, txpktbuf_bndy);
+ rtl_write_byte(rtlpriv, REG_TXPKTBUF_MGQ_BDNY, txpktbuf_bndy);
+
+ rtl_write_byte(rtlpriv, REG_TXPKTBUF_WMAC_LBK_BF_HD, txpktbuf_bndy);
+
+ /* TXRKTBUG_PG_BNDY */
+ rtl_write_byte(rtlpriv, REG_TRXFF_BNDY, txpktbuf_bndy);
+
+ /* Beacon Head for TXDMA */
+ rtl_write_byte(rtlpriv, REG_TDECTRL + 1, txpktbuf_bndy);
+}
+
+static bool _rtl92du_llt_table_init(struct ieee80211_hw *hw, u8 txpktbuf_bndy)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ unsigned short i;
+ bool status;
+ u8 maxpage;
+
+ if (rtlpriv->rtlhal.macphymode == SINGLEMAC_SINGLEPHY)
+ maxpage = 255;
+ else
+ maxpage = 127;
+
+ for (i = 0; i < (txpktbuf_bndy - 1); i++) {
+ status = rtl92d_llt_write(hw, i, i + 1);
+ if (!status)
+ return status;
+ }
+
+ /* end of list */
+ status = rtl92d_llt_write(hw, txpktbuf_bndy - 1, 0xFF);
+ if (!status)
+ return status;
+
+ /* Make the other pages as ring buffer
+ * This ring buffer is used as beacon buffer if we
+ * config this MAC as two MAC transfer.
+ * Otherwise used as local loopback buffer.
+ */
+ for (i = txpktbuf_bndy; i < maxpage; i++) {
+ status = rtl92d_llt_write(hw, i, i + 1);
+ if (!status)
+ return status;
+ }
+
+ /* Let last entry point to the start entry of ring buffer */
+ status = rtl92d_llt_write(hw, maxpage, txpktbuf_bndy);
+ if (!status)
+ return status;
+
+ return true;
+}
+
+static void _rtl92du_init_chipn_reg_priority(struct ieee80211_hw *hw, u16 beq,
+ u16 bkq, u16 viq, u16 voq,
+ u16 mgtq, u16 hiq)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u16 value16;
+
+ value16 = rtl_read_word(rtlpriv, REG_TRXDMA_CTRL) & 0x7;
+ u16p_replace_bits(&value16, beq, TXDMA_BEQ_MAP);
+ u16p_replace_bits(&value16, bkq, TXDMA_BKQ_MAP);
+ u16p_replace_bits(&value16, viq, TXDMA_VIQ_MAP);
+ u16p_replace_bits(&value16, voq, TXDMA_VOQ_MAP);
+ u16p_replace_bits(&value16, mgtq, TXDMA_MGQ_MAP);
+ u16p_replace_bits(&value16, hiq, TXDMA_HIQ_MAP);
+ rtl_write_word(rtlpriv, REG_TRXDMA_CTRL, value16);
+}
+
+static void _rtl92du_init_chipn_one_out_ep_priority(struct ieee80211_hw *hw,
+ u8 queue_sel)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u16 value;
+
+ switch (queue_sel) {
+ case TX_SELE_HQ:
+ value = QUEUE_HIGH;
+ break;
+ case TX_SELE_LQ:
+ value = QUEUE_LOW;
+ break;
+ case TX_SELE_NQ:
+ value = QUEUE_NORMAL;
+ break;
+ default:
+ WARN_ON(1); /* Shall not reach here! */
+ return;
+ }
+ _rtl92du_init_chipn_reg_priority(hw, value, value, value, value,
+ value, value);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Tx queue select: 0x%02x\n", queue_sel);
+}
+
+static void _rtl92du_init_chipn_two_out_ep_priority(struct ieee80211_hw *hw,
+ u8 queue_sel)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u16 beq, bkq, viq, voq, mgtq, hiq;
+ u16 valuehi, valuelow;
+
+ switch (queue_sel) {
+ default:
+ WARN_ON(1);
+ fallthrough;
+ case (TX_SELE_HQ | TX_SELE_LQ):
+ valuehi = QUEUE_HIGH;
+ valuelow = QUEUE_LOW;
+ break;
+ case (TX_SELE_NQ | TX_SELE_LQ):
+ valuehi = QUEUE_NORMAL;
+ valuelow = QUEUE_LOW;
+ break;
+ case (TX_SELE_HQ | TX_SELE_NQ):
+ valuehi = QUEUE_HIGH;
+ valuelow = QUEUE_NORMAL;
+ break;
+ }
+
+ beq = valuelow;
+ bkq = valuelow;
+ viq = valuehi;
+ voq = valuehi;
+ mgtq = valuehi;
+ hiq = valuehi;
+
+ _rtl92du_init_chipn_reg_priority(hw, beq, bkq, viq, voq, mgtq, hiq);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Tx queue select: 0x%02x\n", queue_sel);
+}
+
+static void _rtl92du_init_chipn_three_out_ep_priority(struct ieee80211_hw *hw,
+ u8 queue_sel)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u16 beq, bkq, viq, voq, mgtq, hiq;
+
+ beq = QUEUE_LOW;
+ bkq = QUEUE_LOW;
+ viq = QUEUE_NORMAL;
+ voq = QUEUE_HIGH;
+ mgtq = QUEUE_HIGH;
+ hiq = QUEUE_HIGH;
+
+ _rtl92du_init_chipn_reg_priority(hw, beq, bkq, viq, voq, mgtq, hiq);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Tx queue select: 0x%02x\n", queue_sel);
+}
+
+static void _rtl92du_init_queue_priority(struct ieee80211_hw *hw,
+ u8 out_ep_num,
+ u8 queue_sel)
+{
+ switch (out_ep_num) {
+ case 1:
+ _rtl92du_init_chipn_one_out_ep_priority(hw, queue_sel);
+ break;
+ case 2:
+ _rtl92du_init_chipn_two_out_ep_priority(hw, queue_sel);
+ break;
+ case 3:
+ _rtl92du_init_chipn_three_out_ep_priority(hw, queue_sel);
+ break;
+ default:
+ WARN_ON(1); /* Shall not reach here! */
+ break;
+ }
+}
+
+static void _rtl92du_init_wmac_setting(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtlpriv);
+
+ mac->rx_conf = RCR_APM | RCR_AM | RCR_AB | RCR_ADF | RCR_APP_ICV |
+ RCR_AMF | RCR_HTC_LOC_CTRL | RCR_APP_MIC |
+ RCR_APP_PHYST_RXFF | RCR_APPFCS;
+
+ rtl_write_dword(rtlpriv, REG_RCR, mac->rx_conf);
+
+ /* Set Multicast Address. */
+ rtl_write_dword(rtlpriv, REG_MAR, 0xffffffff);
+ rtl_write_dword(rtlpriv, REG_MAR + 4, 0xffffffff);
+}
+
+static void _rtl92du_init_adaptive_ctrl(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 val32;
+
+ val32 = rtl_read_dword(rtlpriv, REG_RRSR);
+ val32 &= ~0xfffff;
+ if (rtlpriv->rtlhal.current_bandtype == BAND_ON_5G)
+ val32 |= 0xffff0; /* No CCK */
+ else
+ val32 |= 0xffff1;
+ rtl_write_dword(rtlpriv, REG_RRSR, val32);
+
+ /* Set Spec SIFS (used in NAV) */
+ rtl_write_word(rtlpriv, REG_SPEC_SIFS, 0x1010);
+
+ /* Retry limit 0x30 */
+ rtl_write_word(rtlpriv, REG_RL, 0x3030);
+}
+
+static void _rtl92du_init_edca(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u16 val16;
+
+ /* Disable EDCCA count down, to reduce collison and retry */
+ val16 = rtl_read_word(rtlpriv, REG_RD_CTRL);
+ val16 |= DIS_EDCA_CNT_DWN;
+ rtl_write_word(rtlpriv, REG_RD_CTRL, val16);
+
+ /* CCK SIFS shall always be 10us. */
+ rtl_write_word(rtlpriv, REG_SIFS_CTX, 0x0a0a);
+ /* Set SIFS for OFDM */
+ rtl_write_word(rtlpriv, REG_SIFS_TRX, 0x1010);
+
+ rtl_write_word(rtlpriv, REG_PROT_MODE_CTRL, 0x0204);
+
+ rtl_write_dword(rtlpriv, REG_BAR_MODE_CTRL, 0x014004);
+
+ /* TXOP */
+ rtl_write_dword(rtlpriv, REG_EDCA_BE_PARAM, 0x005EA42B);
+ rtl_write_dword(rtlpriv, REG_EDCA_BK_PARAM, 0x0000A44F);
+ rtl_write_dword(rtlpriv, REG_EDCA_VI_PARAM, 0x005EA324);
+ rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM, 0x002FA226);
+
+ rtl_write_byte(rtlpriv, REG_PIFS, 0x1C);
+
+ rtl_write_byte(rtlpriv, REG_AGGR_BREAK_TIME, 0x16);
+
+ rtl_write_word(rtlpriv, REG_NAV_PROT_LEN, 0x0040);
+
+ rtl_write_byte(rtlpriv, REG_BCNDMATIM, 0x2);
+ rtl_write_byte(rtlpriv, REG_ATIMWND, 0x2);
+}
+
+static void _rtl92du_init_retry_function(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 val8;
+
+ val8 = rtl_read_byte(rtlpriv, REG_FWHW_TXQ_CTRL);
+ val8 |= EN_AMPDU_RTY_NEW;
+ rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL, val8);
+
+ rtl_write_byte(rtlpriv, REG_ACKTO, 0x40);
+}
+
+static void _rtl92du_init_operation_mode(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+
+ rtl_write_byte(rtlpriv, REG_BWOPMODE, BW_OPMODE_20MHZ);
+
+ switch (rtlpriv->phy.rf_type) {
+ case RF_1T2R:
+ case RF_1T1R:
+ rtlhal->minspace_cfg = (MAX_MSS_DENSITY_1T << 3);
+ break;
+ case RF_2T2R:
+ case RF_2T2R_GREEN:
+ rtlhal->minspace_cfg = (MAX_MSS_DENSITY_2T << 3);
+ break;
+ }
+ rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE, rtlhal->minspace_cfg);
+}
+
+static void _rtl92du_init_beacon_parameters(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtl_write_word(rtlpriv, REG_BCN_CTRL, 0x1010);
+
+ rtl_write_word(rtlpriv, REG_TBTT_PROHIBIT, 0x3c02);
+ rtl_write_byte(rtlpriv, REG_DRVERLYINT, 0x05);
+ rtl_write_byte(rtlpriv, REG_BCNDMATIM, 0x03);
+
+ rtl_write_word(rtlpriv, REG_BCNTCFG, 0x660f);
+}
+
+static void _rtl92du_init_ampdu_aggregation(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+
+ /* Aggregation threshold */
+ if (rtlhal->macphymode == DUALMAC_DUALPHY)
+ rtl_write_dword(rtlpriv, REG_AGGLEN_LMT, 0x66525541);
+ else if (rtlhal->macphymode == DUALMAC_SINGLEPHY)
+ rtl_write_dword(rtlpriv, REG_AGGLEN_LMT, 0x44444441);
+ else
+ rtl_write_dword(rtlpriv, REG_AGGLEN_LMT, 0x88728841);
+
+ rtl_write_byte(rtlpriv, REG_AGGR_BREAK_TIME, 0x16);
+}
+
+static bool _rtl92du_init_power_on(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ unsigned short wordtmp;
+ unsigned char bytetmp;
+ u16 retry = 0;
+
+ do {
+ if (rtl_read_byte(rtlpriv, REG_APS_FSMCO) & PFM_ALDN)
+ break;
+
+ if (retry++ > 1000)
+ return false;
+ } while (true);
+
+ /* Unlock ISO/CLK/Power control register */
+ rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x00);
+
+ /* SPS0_CTRL 0x11[7:0] = 0x2b enable SPS into PWM mode */
+ rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x2b);
+
+ msleep(1);
+
+ bytetmp = rtl_read_byte(rtlpriv, REG_LDOV12D_CTRL);
+ if ((bytetmp & LDV12_EN) == 0) {
+ bytetmp |= LDV12_EN;
+ rtl_write_byte(rtlpriv, REG_LDOV12D_CTRL, bytetmp);
+
+ msleep(1);
+
+ bytetmp = rtl_read_byte(rtlpriv, REG_SYS_ISO_CTRL);
+ bytetmp &= ~ISO_MD2PP;
+ rtl_write_byte(rtlpriv, REG_SYS_ISO_CTRL, bytetmp);
+ }
+
+ /* Auto enable WLAN */
+ wordtmp = rtl_read_word(rtlpriv, REG_APS_FSMCO);
+ wordtmp |= APFM_ONMAC;
+ rtl_write_word(rtlpriv, REG_APS_FSMCO, wordtmp);
+
+ wordtmp = rtl_read_word(rtlpriv, REG_APS_FSMCO);
+ retry = 0;
+ while ((wordtmp & APFM_ONMAC) && retry < 1000) {
+ retry++;
+ wordtmp = rtl_read_word(rtlpriv, REG_APS_FSMCO);
+ }
+
+ /* Release RF digital isolation */
+ wordtmp = rtl_read_word(rtlpriv, REG_SYS_ISO_CTRL);
+ wordtmp &= ~ISO_DIOR;
+ rtl_write_word(rtlpriv, REG_SYS_ISO_CTRL, wordtmp);
+
+ /* Enable MAC DMA/WMAC/SCHEDULE/SEC block */
+ wordtmp = rtl_read_word(rtlpriv, REG_CR);
+ wordtmp |= HCI_TXDMA_EN | HCI_RXDMA_EN | TXDMA_EN | RXDMA_EN |
+ PROTOCOL_EN | SCHEDULE_EN | MACTXEN | MACRXEN | ENSEC;
+ rtl_write_word(rtlpriv, REG_CR, wordtmp);
+
+ return true;
+}
+
+static bool _rtl92du_init_mac(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 val8;
+
+ rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x00);
+
+ val8 = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
+ val8 &= ~(FEN_MREGEN >> 8);
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, val8);
+
+ /* For s3/s4 may reset mac, Reg0xf8 may be set to 0,
+ * so reset macphy control reg here.
+ */
+ rtl92d_phy_config_macphymode(hw);
+
+ rtl92du_phy_set_poweron(hw);
+
+ if (!_rtl92du_init_power_on(hw)) {
+ pr_err("Failed to init power on!\n");
+ return false;
+ }
+
+ rtl92d_phy_config_maccoexist_rfpage(hw);
+
+ return true;
+}
+
+int rtl92du_hw_init(struct ieee80211_hw *hw)
+{
+ struct rtl_usb_priv *usb_priv = rtl_usbpriv(hw);
+ struct rtl_usb *rtlusb = rtl_usbdev(usb_priv);
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtlpriv);
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ struct rtl_mac *mac = rtl_mac(rtlpriv);
+ struct rtl_phy *rtlphy = &rtlpriv->phy;
+ u8 val8, txpktbuf_bndy;
+ int err, i;
+ u32 val32;
+ u16 val16;
+
+ mutex_lock(rtlpriv->mutex_for_hw_init);
+
+ /* we should do iqk after disable/enable */
+ rtl92d_phy_reset_iqk_result(hw);
+
+ if (!_rtl92du_init_mac(hw)) {
+ pr_err("Init MAC failed\n");
+ mutex_unlock(rtlpriv->mutex_for_hw_init);
+ return 1;
+ }
+
+ if (rtlhal->macphymode == SINGLEMAC_SINGLEPHY)
+ txpktbuf_bndy = 249;
+ else
+ txpktbuf_bndy = 123;
+
+ if (!_rtl92du_llt_table_init(hw, txpktbuf_bndy)) {
+ pr_err("Init LLT failed\n");
+ mutex_unlock(rtlpriv->mutex_for_hw_init);
+ return 1;
+ }
+
+ err = rtl92du_download_fw(hw);
+
+ /* return fail only when part number check fail */
+ if (err && rtl_read_byte(rtlpriv, 0x1c5) == 0xe0) {
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "Failed to download FW. Init HW without FW..\n");
+ mutex_unlock(rtlpriv->mutex_for_hw_init);
+ return 1;
+ }
+ rtlhal->last_hmeboxnum = 0;
+ rtlpriv->psc.fw_current_inpsmode = false;
+
+ rtl92du_phy_mac_config(hw);
+
+ /* Set reserved page for each queue */
+ _rtl92du_init_queue_reserved_page(hw, rtlusb->out_ep_nums,
+ rtlusb->out_queue_sel);
+
+ _rtl92du_init_tx_buffer_boundary(hw, txpktbuf_bndy);
+
+ _rtl92du_init_queue_priority(hw, rtlusb->out_ep_nums,
+ rtlusb->out_queue_sel);
+
+ /* Set Tx/Rx page size (Tx must be 128 Bytes,
+ * Rx can be 64, 128, 256, 512, 1024 bytes)
+ */
+ rtl_write_byte(rtlpriv, REG_PBP, 0x11);
+
+ /* Get Rx PHY status in order to report RSSI and others. */
+ rtl_write_byte(rtlpriv, REG_RX_DRVINFO_SZ, 0x4);
+
+ rtl_write_dword(rtlpriv, REG_HISR, 0xffffffff);
+ rtl_write_dword(rtlpriv, REG_HIMR, 0xffffffff);
+
+ val8 = rtl_read_byte(rtlpriv, MSR);
+ val8 &= ~MSR_MASK;
+ val8 |= MSR_INFRA;
+ rtl_write_byte(rtlpriv, MSR, val8);
+
+ _rtl92du_init_wmac_setting(hw);
+ _rtl92du_init_adaptive_ctrl(hw);
+ _rtl92du_init_edca(hw);
+
+ rtl_write_dword(rtlpriv, REG_DARFRC, 0x00000000);
+ rtl_write_dword(rtlpriv, REG_DARFRC + 4, 0x10080404);
+ rtl_write_dword(rtlpriv, REG_RARFRC, 0x04030201);
+ rtl_write_dword(rtlpriv, REG_RARFRC + 4, 0x08070605);
+
+ _rtl92du_init_retry_function(hw);
+ /* _InitUsbAggregationSetting(padapter); no aggregation for now */
+ _rtl92du_init_operation_mode(hw);
+ _rtl92du_init_beacon_parameters(hw);
+ _rtl92du_init_ampdu_aggregation(hw);
+
+ rtl_write_byte(rtlpriv, REG_BCN_MAX_ERR, 0xff);
+
+ /* unit: 256us. 256ms */
+ rtl_write_word(rtlpriv, REG_PKT_VO_VI_LIFE_TIME, 0x0400);
+ rtl_write_word(rtlpriv, REG_PKT_BE_BK_LIFE_TIME, 0x0400);
+
+ /* Hardware-controlled blinking. */
+ rtl_write_word(rtlpriv, REG_LEDCFG0, 0x8282);
+ rtl_write_byte(rtlpriv, REG_LEDCFG2, 0x82);
+
+ val32 = rtl_read_dword(rtlpriv, REG_TXDMA_OFFSET_CHK);
+ val32 |= DROP_DATA_EN;
+ rtl_write_dword(rtlpriv, REG_TXDMA_OFFSET_CHK, val32);
+
+ if (mac->rdg_en) {
+ rtl_write_byte(rtlpriv, REG_RD_CTRL, 0xff);
+ rtl_write_word(rtlpriv, REG_RD_NAV_NXT, 0x200);
+ rtl_write_byte(rtlpriv, REG_RD_RESP_PKT_TH, 0x05);
+ }
+
+ for (i = 0; i < 4; i++)
+ rtl_write_dword(rtlpriv, REG_ARFR0 + i * 4, 0x1f8ffff0);
+
+ if (rtlhal->macphymode == SINGLEMAC_SINGLEPHY) {
+ if (rtlusb->out_ep_nums == 2)
+ rtl_write_dword(rtlpriv, REG_FAST_EDCA_CTRL, 0x03066666);
+ else
+ rtl_write_word(rtlpriv, REG_FAST_EDCA_CTRL, 0x8888);
+ } else {
+ rtl_write_word(rtlpriv, REG_FAST_EDCA_CTRL, 0x5555);
+ }
+
+ val8 = rtl_read_byte(rtlpriv, 0x605);
+ val8 |= 0xf0;
+ rtl_write_byte(rtlpriv, 0x605, val8);
+
+ rtl_write_byte(rtlpriv, REG_RXTSF_OFFSET_CCK, 0x30);
+ rtl_write_byte(rtlpriv, REG_RXTSF_OFFSET_OFDM, 0x30);
+ rtl_write_byte(rtlpriv, 0x606, 0x30);
+
+ /* temp for high queue and mgnt Queue corrupt in time; it may
+ * cause hang when sw beacon use high_Q, other frame use mgnt_Q;
+ * or, sw beacon use mgnt_Q, other frame use high_Q;
+ */
+ rtl_write_byte(rtlpriv, REG_DIS_TXREQ_CLR, 0x10);
+ val16 = rtl_read_word(rtlpriv, REG_RD_CTRL);
+ val16 |= BIT(12);
+ rtl_write_word(rtlpriv, REG_RD_CTRL, val16);
+
+ rtl_write_byte(rtlpriv, REG_TXPAUSE, 0);
+
+ /* usb suspend idle time count for bitfile0927 */
+ val8 = rtl_read_byte(rtlpriv, 0xfe56);
+ val8 |= BIT(0) | BIT(1);
+ rtl_write_byte(rtlpriv, 0xfe56, val8);
+
+ if (rtlhal->earlymode_enable) {
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "EarlyMode Enabled!!!\n");
+
+ val8 = rtl_read_byte(rtlpriv, REG_EARLY_MODE_CONTROL);
+ val8 |= 0x1f;
+ rtl_write_byte(rtlpriv, REG_EARLY_MODE_CONTROL, val8);
+
+ rtl_write_byte(rtlpriv, REG_EARLY_MODE_CONTROL + 3, 0x80);
+
+ val8 = rtl_read_byte(rtlpriv, 0x605);
+ val8 |= 0x40;
+ rtl_write_byte(rtlpriv, 0x605, val8);
+ } else {
+ rtl_write_byte(rtlpriv, REG_EARLY_MODE_CONTROL, 0);
+ }
+
+ rtl92du_phy_bb_config(hw);
+
+ rtlphy->rf_mode = RF_OP_BY_SW_3WIRE;
+ /* set before initialize RF */
+ rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER4, 0x00f00000, 0xf);
+
+ /* config RF */
+ rtl92du_phy_rf_config(hw);
+
+ /* set default value after initialize RF */
+ rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER4, 0x00f00000, 0);
+
+ /* After load BB, RF params, we need to do more for 92D. */
+ rtl92du_update_bbrf_configuration(hw);
+
+ rtlphy->rfreg_chnlval[0] =
+ rtl_get_rfreg(hw, RF90_PATH_A, RF_CHNLBW, RFREG_OFFSET_MASK);
+ rtlphy->rfreg_chnlval[1] =
+ rtl_get_rfreg(hw, RF90_PATH_B, RF_CHNLBW, RFREG_OFFSET_MASK);
+
+ /*---- Set CCK and OFDM Block "ON"----*/
+ if (rtlhal->current_bandtype == BAND_ON_2_4G)
+ rtl_set_bbreg(hw, RFPGA0_RFMOD, BCCKEN, 0x1);
+ rtl_set_bbreg(hw, RFPGA0_RFMOD, BOFDMEN, 0x1);
+
+ /* reset hw sec */
+ rtl_cam_reset_all_entry(hw);
+ rtl92d_enable_hw_security_config(hw);
+
+ rtl_write_byte(rtlpriv, REG_HWSEQ_CTRL, 0xFF);
+
+ /* schmitt trigger, improve tx evm for 92du */
+ val8 = rtl_read_byte(rtlpriv, REG_AFE_XTAL_CTRL);
+ val8 |= BIT(1);
+ rtl_write_byte(rtlpriv, REG_AFE_XTAL_CTRL, val8);
+
+ /* Disable bar */
+ rtl_write_dword(rtlpriv, REG_BAR_MODE_CTRL, 0xffff);
+
+ /* Nav limit */
+ rtl_write_byte(rtlpriv, REG_NAV_CTRL + 2, 0);
+ rtl_write_byte(rtlpriv, ROFDM0_XATXAFE + 3, 0x50);
+
+ /* Read EEPROM TX power index and PHY_REG_PG.txt to capture correct
+ * TX power index for different rate set.
+ */
+ rtl92d_phy_get_hw_reg_originalvalue(hw);
+
+ ppsc->rfpwr_state = ERFON;
+
+ /* do IQK for 2.4G for better scan result */
+ if (rtlhal->current_bandtype == BAND_ON_2_4G)
+ rtl92du_phy_iq_calibrate(hw);
+
+ rtl92du_phy_lc_calibrate(hw, IS_92D_SINGLEPHY(rtlhal->version));
+
+ rtl92du_phy_init_pa_bias(hw);
+
+ mutex_unlock(rtlpriv->mutex_for_hw_init);
+
+ rtl92du_dm_init(hw);
+
+ /* For 2 PORT TSF SYNC */
+ rtl_write_word(rtlpriv, REG_BCN_CTRL, 0x1818);
+ rtlusb->reg_bcn_ctrl_val = 0x18;
+
+ udelay(500);
+
+ if (rtlhal->macphymode != DUALMAC_DUALPHY) {
+ rtl_write_dword(rtlpriv, RFPGA1_TXINFO,
+ rtl_read_dword(rtlpriv, RFPGA1_TXINFO) & ~BIT(30));
+
+ rtl_write_dword(rtlpriv, RFPGA0_TXGAINSTAGE,
+ rtl_read_dword(rtlpriv, RFPGA0_TXGAINSTAGE) & ~BIT(31));
+
+ rtl_write_dword(rtlpriv, ROFDM0_XBTXAFE, 0xa0e40000);
+ }
+
+ val32 = rtl_read_dword(rtlpriv, REG_FWHW_TXQ_CTRL);
+ val32 |= BIT(12);
+ rtl_write_dword(rtlpriv, REG_FWHW_TXQ_CTRL, val32);
+
+ return err;
+}
+
+static int _rtl92du_set_media_status(struct ieee80211_hw *hw,
+ enum nl80211_iftype type)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ enum led_ctl_mode ledaction = LED_CTL_NO_LINK;
+ u8 bt_msr = rtl_read_byte(rtlpriv, MSR);
+
+ bt_msr &= 0xfc;
+
+ if (type == NL80211_IFTYPE_UNSPECIFIED ||
+ type == NL80211_IFTYPE_STATION) {
+ rtl92d_stop_tx_beacon(hw);
+ _rtl92du_enable_bcn_sub_func(hw);
+ } else if (type == NL80211_IFTYPE_ADHOC ||
+ type == NL80211_IFTYPE_AP) {
+ rtl92d_resume_tx_beacon(hw);
+ _rtl92du_disable_bcn_sub_func(hw);
+ } else {
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "Set HW_VAR_MEDIA_STATUS: No such media status(%x)\n",
+ type);
+ }
+
+ switch (type) {
+ case NL80211_IFTYPE_UNSPECIFIED:
+ bt_msr |= MSR_NOLINK;
+ ledaction = LED_CTL_LINK;
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Set Network type to NO LINK!\n");
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ bt_msr |= MSR_ADHOC;
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Set Network type to Ad Hoc!\n");
+ break;
+ case NL80211_IFTYPE_STATION:
+ bt_msr |= MSR_INFRA;
+ ledaction = LED_CTL_LINK;
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Set Network type to STA!\n");
+ break;
+ case NL80211_IFTYPE_AP:
+ bt_msr |= MSR_AP;
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Set Network type to AP!\n");
+ break;
+ default:
+ pr_err("Network type %d not supported!\n", type);
+ return 1;
+ }
+ rtl_write_byte(rtlpriv, MSR, bt_msr);
+
+ rtlpriv->cfg->ops->led_control(hw, ledaction);
+
+ if ((bt_msr & MSR_MASK) == MSR_AP)
+ rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00);
+ else
+ rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x66);
+
+ return 0;
+}
+
+void rtl92du_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 reg_rcr;
+
+ if (rtlpriv->psc.rfpwr_state != ERFON)
+ return;
+
+ rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_RCR, (u8 *)(&reg_rcr));
+
+ if (check_bssid) {
+ reg_rcr |= RCR_CBSSID_DATA | RCR_CBSSID_BCN;
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, (u8 *)&reg_rcr);
+ _rtl92du_set_bcn_ctrl_reg(hw, 0, DIS_TSF_UDT);
+ } else if (!check_bssid) {
+ reg_rcr &= ~(RCR_CBSSID_DATA | RCR_CBSSID_BCN);
+ _rtl92du_set_bcn_ctrl_reg(hw, DIS_TSF_UDT, 0);
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, (u8 *)&reg_rcr);
+ }
+}
+
+int rtl92du_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ if (_rtl92du_set_media_status(hw, type))
+ return -EOPNOTSUPP;
+
+ /* check bssid */
+ if (rtlpriv->mac80211.link_state == MAC80211_LINKED) {
+ if (type != NL80211_IFTYPE_AP)
+ rtl92du_set_check_bssid(hw, true);
+ } else {
+ rtl92du_set_check_bssid(hw, false);
+ }
+
+ return 0;
+}
+
+/* do iqk or reload iqk */
+/* windows just rtl92d_phy_reload_iqk_setting in set channel,
+ * but it's very strict for time sequence so we add
+ * rtl92d_phy_reload_iqk_setting here
+ */
+void rtl92du_linked_set_reg(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &rtlpriv->phy;
+ u8 channel = rtlphy->current_channel;
+ u8 indexforchannel;
+
+ indexforchannel = rtl92d_get_rightchnlplace_for_iqk(channel);
+ if (!rtlphy->iqk_matrix[indexforchannel].iqk_done) {
+ rtl_dbg(rtlpriv, COMP_SCAN | COMP_INIT, DBG_DMESG,
+ "Do IQK for channel:%d\n", channel);
+ rtl92du_phy_iq_calibrate(hw);
+ }
+}
+
+void rtl92du_enable_interrupt(struct ieee80211_hw *hw)
+{
+ /* Nothing to do. */
+}
+
+void rtl92du_disable_interrupt(struct ieee80211_hw *hw)
+{
+ /* Nothing to do. */
+}
+
+static void _rtl92du_poweroff_adapter(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 retry = 100;
+ u8 u1b_tmp;
+ u16 val16;
+ u32 val32;
+
+ rtl_write_byte(rtlpriv, REG_LDOA15_CTRL, 0x04);
+
+ rtl_write_byte(rtlpriv, REG_RF_CTRL, 0x00);
+
+ /* IF fw in RAM code, do reset */
+ if (rtl_read_byte(rtlpriv, REG_MCUFWDL) & MCUFWDL_RDY) {
+ rtl_write_byte(rtlpriv, REG_FSIMR, 0);
+
+ /* We need to disable other HRCV INT to influence 8051 reset. */
+ rtl_write_byte(rtlpriv, REG_FWIMR, 0x20);
+
+ /* Close mask to prevent incorrect FW write operation. */
+ rtl_write_byte(rtlpriv, REG_FTIMR, 0);
+
+ rtl_write_byte(rtlpriv, REG_MCUFWDL, 0);
+
+ /* Set (REG_HMETFR + 3) to 0x20 is reset 8051 */
+ rtl_write_byte(rtlpriv, REG_HMETFR + 3, 0x20);
+ val16 = rtl_read_word(rtlpriv, REG_SYS_FUNC_EN);
+ while (val16 & FEN_CPUEN) {
+ retry--;
+ if (retry == 0)
+ break;
+ udelay(50);
+ val16 = rtl_read_word(rtlpriv, REG_SYS_FUNC_EN);
+ }
+
+ if (retry == 0) {
+ rtl_write_byte(rtlpriv, REG_FWIMR, 0);
+
+ /* if 8051 reset fail, reset MAC directly. */
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, 0x50);
+
+ mdelay(10);
+ }
+ }
+
+ /* reset MCU, MAC register, DCORE */
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, 0x54);
+
+ /* reset MCU ready status */
+ rtl_write_byte(rtlpriv, REG_MCUFWDL, 0x00);
+
+ /* Pull GPIO PIN to balance level and LED control */
+
+ /* Disable GPIO[7:0] */
+ rtl_write_word(rtlpriv, REG_GPIO_PIN_CTRL + 2, 0x0000);
+ val32 = rtl_read_dword(rtlpriv, REG_GPIO_PIN_CTRL);
+ u32p_replace_bits(&val32, val32 & 0xff, 0x0000ff00);
+ u32p_replace_bits(&val32, 0xff, 0x00ff0000);
+ rtl_write_dword(rtlpriv, REG_GPIO_PIN_CTRL, val32);
+
+ /* Disable GPIO[10:8] */
+ rtl_write_byte(rtlpriv, REG_MAC_PINMUX_CFG, 0);
+ val16 = rtl_read_word(rtlpriv, REG_GPIO_IO_SEL);
+ u16p_replace_bits(&val16, val16 & 0xf, 0x00f0);
+ u16p_replace_bits(&val16, 0xf, 0x0780);
+ rtl_write_word(rtlpriv, REG_GPIO_IO_SEL, val16);
+
+ /* Disable LED 0, 1, and 2 */
+ rtl_write_word(rtlpriv, REG_LEDCFG0, 0x8888);
+ rtl_write_byte(rtlpriv, REG_LEDCFG2, 0x88);
+
+ /* Disable analog sequence */
+
+ /* enter PFM mode */
+ rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x23);
+
+ rtl_write_word(rtlpriv, REG_APS_FSMCO,
+ APDM_HOST | AFSM_HSUS | PFM_ALDN);
+
+ /* lock ISO/CLK/Power control register */
+ rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x0e);
+
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "In PowerOff,reg0x%x=%X\n",
+ REG_SPS0_CTRL, rtl_read_byte(rtlpriv, REG_SPS0_CTRL));
+
+ /* 0x17[7] 1b': power off in process 0b' : power off over */
+ if (rtlpriv->rtlhal.macphymode != SINGLEMAC_SINGLEPHY) {
+ mutex_lock(rtlpriv->mutex_for_power_on_off);
+ u1b_tmp = rtl_read_byte(rtlpriv, REG_POWER_OFF_IN_PROCESS);
+ u1b_tmp &= ~BIT(7);
+ rtl_write_byte(rtlpriv, REG_POWER_OFF_IN_PROCESS, u1b_tmp);
+ mutex_unlock(rtlpriv->mutex_for_power_on_off);
+ }
+
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "<=======\n");
+}
+
+void rtl92du_card_disable(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ enum nl80211_iftype opmode;
+ u32 val32;
+ u16 val16;
+ u8 val8;
+
+ mac->link_state = MAC80211_NOLINK;
+ opmode = NL80211_IFTYPE_UNSPECIFIED;
+ _rtl92du_set_media_status(hw, opmode);
+
+ RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
+ /* Power sequence for each MAC. */
+ /* a. stop tx DMA */
+ /* b. close RF */
+ /* c. clear rx buf */
+ /* d. stop rx DMA */
+ /* e. reset MAC */
+
+ val16 = rtl_read_word(rtlpriv, REG_GPIO_MUXCFG);
+ val16 &= ~BIT(12);
+ rtl_write_word(rtlpriv, REG_GPIO_MUXCFG, val16);
+
+ rtl_write_byte(rtlpriv, REG_TXPAUSE, 0xff);
+ udelay(500);
+ rtl_write_byte(rtlpriv, REG_CR, 0);
+
+ /* RF OFF sequence */
+ rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER4, 0x00f00000, 0xf);
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_AC, RFREG_OFFSET_MASK, 0x00);
+
+ rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x40);
+
+ val8 = FEN_USBD | FEN_USBA | FEN_BB_GLB_RSTN;
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, val8);
+
+ /* Mac0 can not do Global reset. Mac1 can do. */
+ if (rtlhal->macphymode == SINGLEMAC_SINGLEPHY ||
+ rtlhal->interfaceindex == 1) {
+ /* before BB reset should do clock gated */
+ val32 = rtl_read_dword(rtlpriv, RFPGA0_XCD_RFPARAMETER);
+ val32 |= BIT(31);
+ rtl_write_dword(rtlpriv, RFPGA0_XCD_RFPARAMETER, val32);
+
+ val8 &= ~FEN_BB_GLB_RSTN;
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, val8);
+ }
+
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "==> Do power off.......\n");
+ if (!rtl92du_phy_check_poweroff(hw))
+ return;
+
+ _rtl92du_poweroff_adapter(hw);
+}
+
+void rtl92du_set_beacon_related_registers(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtlpriv);
+ u16 bcn_interval, atim_window;
+
+ bcn_interval = mac->beacon_interval;
+ atim_window = 2;
+ rtl92du_disable_interrupt(hw);
+ rtl_write_word(rtlpriv, REG_ATIMWND, atim_window);
+ rtl_write_word(rtlpriv, REG_BCN_INTERVAL, bcn_interval);
+ rtl_write_word(rtlpriv, REG_BCNTCFG, 0x660f);
+ rtl_write_byte(rtlpriv, REG_RXTSF_OFFSET_CCK, 0x20);
+ if (rtlpriv->rtlhal.current_bandtype == BAND_ON_5G)
+ rtl_write_byte(rtlpriv, REG_RXTSF_OFFSET_OFDM, 0x30);
+ else
+ rtl_write_byte(rtlpriv, REG_RXTSF_OFFSET_OFDM, 0x20);
+ rtl_write_byte(rtlpriv, 0x606, 0x30);
+}
+
+void rtl92du_set_beacon_interval(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ u16 bcn_interval = mac->beacon_interval;
+
+ rtl_dbg(rtlpriv, COMP_BEACON, DBG_DMESG,
+ "beacon_interval:%d\n", bcn_interval);
+ rtl92du_disable_interrupt(hw);
+ rtl_write_word(rtlpriv, REG_BCN_INTERVAL, bcn_interval);
+ rtl92du_enable_interrupt(hw);
+}
+
+void rtl92du_update_interrupt_mask(struct ieee80211_hw *hw,
+ u32 add_msr, u32 rm_msr)
+{
+ /* Nothing to do here. */
+}
+
+void rtl92du_read_chip_version(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ /* Chip version reading is done in rtl92d_read_eeprom_info. */
+
+ rtlpriv->rtlhal.hw_type = HARDWARE_TYPE_RTL8192DU;
+}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192du/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/hw.h
new file mode 100644
index 000000000000..80ed00c90c16
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/hw.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2024 Realtek Corporation.*/
+
+#ifndef __RTL92DU_HW_H__
+#define __RTL92DU_HW_H__
+
+void rtl92du_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
+void rtl92du_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
+void rtl92du_read_chip_version(struct ieee80211_hw *hw);
+int rtl92du_hw_init(struct ieee80211_hw *hw);
+void rtl92du_card_disable(struct ieee80211_hw *hw);
+void rtl92du_enable_interrupt(struct ieee80211_hw *hw);
+void rtl92du_disable_interrupt(struct ieee80211_hw *hw);
+int rtl92du_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type);
+void rtl92du_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid);
+void rtl92du_set_beacon_related_registers(struct ieee80211_hw *hw);
+void rtl92du_set_beacon_interval(struct ieee80211_hw *hw);
+void rtl92du_update_interrupt_mask(struct ieee80211_hw *hw,
+ u32 add_msr, u32 rm_msr);
+void rtl92du_linked_set_reg(struct ieee80211_hw *hw);
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192du/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/led.c
new file mode 100644
index 000000000000..6c12dfbd6367
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/led.c
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2024 Realtek Corporation.*/
+
+#include "../wifi.h"
+#include "led.h"
+
+void rtl92du_led_control(struct ieee80211_hw *hw, enum led_ctl_mode ledaction)
+{
+ /* The hardware has control. */
+}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192du/led.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/led.h
new file mode 100644
index 000000000000..d7ebc8afcc7b
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/led.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2024 Realtek Corporation.*/
+
+#ifndef __RTL92DU_LED_H__
+#define __RTL92DU_LED_H__
+
+void rtl92du_led_control(struct ieee80211_hw *hw, enum led_ctl_mode ledaction);
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192du/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/phy.c
new file mode 100644
index 000000000000..289ec71ce3e5
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/phy.c
@@ -0,0 +1,3123 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2024 Realtek Corporation.*/
+
+#include "../wifi.h"
+#include "../ps.h"
+#include "../core.h"
+#include "../efuse.h"
+#include "../usb.h"
+#include "../rtl8192d/reg.h"
+#include "../rtl8192d/def.h"
+#include "../rtl8192d/phy_common.h"
+#include "../rtl8192d/rf_common.h"
+#include "phy.h"
+#include "rf.h"
+#include "table.h"
+
+#define MAX_RF_IMR_INDEX 12
+#define MAX_RF_IMR_INDEX_NORMAL 13
+#define RF_REG_NUM_FOR_C_CUT_5G 6
+#define RF_REG_NUM_FOR_C_CUT_5G_INTERNALPA 7
+#define RF_REG_NUM_FOR_C_CUT_2G 5
+#define RF_CHNL_NUM_5G 19
+#define RF_CHNL_NUM_5G_40M 17
+#define CV_CURVE_CNT 64
+
+static const u32 rf_reg_for_5g_swchnl_normal[MAX_RF_IMR_INDEX_NORMAL] = {
+ 0, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x0
+};
+
+static const u8 rf_reg_for_c_cut_5g[RF_REG_NUM_FOR_C_CUT_5G] = {
+ RF_SYN_G1, RF_SYN_G2, RF_SYN_G3, RF_SYN_G4, RF_SYN_G5, RF_SYN_G6
+};
+
+static const u8 rf_reg_for_c_cut_2g[RF_REG_NUM_FOR_C_CUT_2G] = {
+ RF_SYN_G1, RF_SYN_G2, RF_SYN_G3, RF_SYN_G7, RF_SYN_G8
+};
+
+static const u8 rf_for_c_cut_5g_internal_pa[RF_REG_NUM_FOR_C_CUT_5G_INTERNALPA] = {
+ 0x0B, 0x48, 0x49, 0x4B, 0x03, 0x04, 0x0E
+};
+
+static const u32 rf_reg_mask_for_c_cut_2g[RF_REG_NUM_FOR_C_CUT_2G] = {
+ BIT(19) | BIT(18) | BIT(17) | BIT(14) | BIT(1),
+ BIT(10) | BIT(9),
+ BIT(18) | BIT(17) | BIT(16) | BIT(1),
+ BIT(2) | BIT(1),
+ BIT(15) | BIT(14) | BIT(13) | BIT(12) | BIT(11)
+};
+
+static const u8 rf_chnl_5g[RF_CHNL_NUM_5G] = {
+ 36, 40, 44, 48, 52, 56, 60, 64, 100, 104, 108,
+ 112, 116, 120, 124, 128, 132, 136, 140
+};
+
+static const u8 rf_chnl_5g_40m[RF_CHNL_NUM_5G_40M] = {
+ 38, 42, 46, 50, 54, 58, 62, 102, 106, 110, 114,
+ 118, 122, 126, 130, 134, 138
+};
+
+static const u32 rf_reg_pram_c_5g[5][RF_REG_NUM_FOR_C_CUT_5G] = {
+ {0xE43BE, 0xFC638, 0x77C0A, 0xDE471, 0xd7110, 0x8EB04},
+ {0xE43BE, 0xFC078, 0xF7C1A, 0xE0C71, 0xD7550, 0xAEB04},
+ {0xE43BF, 0xFF038, 0xF7C0A, 0xDE471, 0xE5550, 0xAEB04},
+ {0xE43BF, 0xFF079, 0xF7C1A, 0xDE471, 0xE5550, 0xAEB04},
+ {0xE43BF, 0xFF038, 0xF7C1A, 0xDE471, 0xd7550, 0xAEB04}
+};
+
+static const u32 rf_reg_param_for_c_cut_2g[3][RF_REG_NUM_FOR_C_CUT_2G] = {
+ {0x643BC, 0xFC038, 0x77C1A, 0x41289, 0x01840},
+ {0x643BC, 0xFC038, 0x07C1A, 0x41289, 0x01840},
+ {0x243BC, 0xFC438, 0x07C1A, 0x4128B, 0x0FC41}
+};
+
+static const u32 rf_syn_g4_for_c_cut_2g = 0xD1C31 & 0x7FF;
+
+static const u32 rf_pram_c_5g_int_pa[3][RF_REG_NUM_FOR_C_CUT_5G_INTERNALPA] = {
+ {0x01a00, 0x40443, 0x00eb5, 0x89bec, 0x94a12, 0x94a12, 0x94a12},
+ {0x01800, 0xc0443, 0x00730, 0x896ee, 0x94a52, 0x94a52, 0x94a52},
+ {0x01800, 0xc0443, 0x00730, 0x896ee, 0x94a12, 0x94a12, 0x94a12}
+};
+
+/* [patha+b][reg] */
+static const u32 rf_imr_param_normal[3][MAX_RF_IMR_INDEX_NORMAL] = {
+ /* channels 1-14. */
+ {
+ 0x70000, 0x00ff0, 0x4400f, 0x00ff0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x64888, 0xe266c, 0x00090, 0x22fff
+ },
+ /* channels 36-64 */
+ {
+ 0x70000, 0x22880, 0x4470f, 0x55880, 0x00070, 0x88000,
+ 0x0, 0x88080, 0x70000, 0x64a82, 0xe466c, 0x00090,
+ 0x32c9a
+ },
+ /* channels 100-165 */
+ {
+ 0x70000, 0x44880, 0x4477f, 0x77880, 0x00070, 0x88000,
+ 0x0, 0x880b0, 0x0, 0x64b82, 0xe466c, 0x00090, 0x32c9a
+ }
+};
+
+static const u32 targetchnl_5g[TARGET_CHNL_NUM_5G] = {
+ 25141, 25116, 25091, 25066, 25041,
+ 25016, 24991, 24966, 24941, 24917,
+ 24892, 24867, 24843, 24818, 24794,
+ 24770, 24765, 24721, 24697, 24672,
+ 24648, 24624, 24600, 24576, 24552,
+ 24528, 24504, 24480, 24457, 24433,
+ 24409, 24385, 24362, 24338, 24315,
+ 24291, 24268, 24245, 24221, 24198,
+ 24175, 24151, 24128, 24105, 24082,
+ 24059, 24036, 24013, 23990, 23967,
+ 23945, 23922, 23899, 23876, 23854,
+ 23831, 23809, 23786, 23764, 23741,
+ 23719, 23697, 23674, 23652, 23630,
+ 23608, 23586, 23564, 23541, 23519,
+ 23498, 23476, 23454, 23432, 23410,
+ 23388, 23367, 23345, 23323, 23302,
+ 23280, 23259, 23237, 23216, 23194,
+ 23173, 23152, 23130, 23109, 23088,
+ 23067, 23046, 23025, 23003, 22982,
+ 22962, 22941, 22920, 22899, 22878,
+ 22857, 22837, 22816, 22795, 22775,
+ 22754, 22733, 22713, 22692, 22672,
+ 22652, 22631, 22611, 22591, 22570,
+ 22550, 22530, 22510, 22490, 22469,
+ 22449, 22429, 22409, 22390, 22370,
+ 22350, 22336, 22310, 22290, 22271,
+ 22251, 22231, 22212, 22192, 22173,
+ 22153, 22134, 22114, 22095, 22075,
+ 22056, 22037, 22017, 21998, 21979,
+ 21960, 21941, 21921, 21902, 21883,
+ 21864, 21845, 21826, 21807, 21789,
+ 21770, 21751, 21732, 21713, 21695,
+ 21676, 21657, 21639, 21620, 21602,
+ 21583, 21565, 21546, 21528, 21509,
+ 21491, 21473, 21454, 21436, 21418,
+ 21400, 21381, 21363, 21345, 21327,
+ 21309, 21291, 21273, 21255, 21237,
+ 21219, 21201, 21183, 21166, 21148,
+ 21130, 21112, 21095, 21077, 21059,
+ 21042, 21024, 21007, 20989, 20972,
+ 25679, 25653, 25627, 25601, 25575,
+ 25549, 25523, 25497, 25471, 25446,
+ 25420, 25394, 25369, 25343, 25318,
+ 25292, 25267, 25242, 25216, 25191,
+ 25166
+};
+
+/* channel 1~14 */
+static const u32 targetchnl_2g[TARGET_CHNL_NUM_2G] = {
+ 26084, 26030, 25976, 25923, 25869, 25816, 25764,
+ 25711, 25658, 25606, 25554, 25502, 25451, 25328
+};
+
+u32 rtl92du_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ u32 returnvalue, originalvalue, bitshift;
+
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE, "regaddr(%#x), bitmask(%#x)\n",
+ regaddr, bitmask);
+
+ if (rtlhal->during_mac1init_radioa)
+ regaddr |= MAC1_ACCESS_PHY0;
+ else if (rtlhal->during_mac0init_radiob)
+ regaddr |= MAC0_ACCESS_PHY1;
+
+ originalvalue = rtl_read_dword(rtlpriv, regaddr);
+ bitshift = calculate_bit_shift(bitmask);
+ returnvalue = (originalvalue & bitmask) >> bitshift;
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "BBR MASK=0x%x Addr[0x%x]=0x%x\n",
+ bitmask, regaddr, originalvalue);
+ return returnvalue;
+}
+
+void rtl92du_phy_set_bb_reg(struct ieee80211_hw *hw,
+ u32 regaddr, u32 bitmask, u32 data)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ u32 originalvalue, bitshift;
+
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x), data(%#x)\n",
+ regaddr, bitmask, data);
+
+ if (rtlhal->during_mac1init_radioa)
+ regaddr |= MAC1_ACCESS_PHY0;
+ else if (rtlhal->during_mac0init_radiob)
+ regaddr |= MAC0_ACCESS_PHY1;
+
+ if (bitmask != MASKDWORD) {
+ originalvalue = rtl_read_dword(rtlpriv, regaddr);
+ bitshift = calculate_bit_shift(bitmask);
+ data = (originalvalue & (~bitmask)) |
+ ((data << bitshift) & bitmask);
+ }
+
+ rtl_write_dword(rtlpriv, regaddr, data);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x), data(%#x)\n",
+ regaddr, bitmask, data);
+}
+
+/* To avoid miswrite Reg0x800 for 92D */
+static void rtl92du_phy_set_bb_reg_1byte(struct ieee80211_hw *hw,
+ u32 regaddr, u32 bitmask, u32 data)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 originalvalue, bitshift, offset;
+ u8 value;
+
+ /* BitMask only support bit0~bit7 or bit8~bit15, bit16~bit23,
+ * bit24~bit31, should be in 1 byte scale;
+ */
+ bitshift = calculate_bit_shift(bitmask);
+ offset = bitshift / 8;
+
+ originalvalue = rtl_read_dword(rtlpriv, regaddr);
+ data = (originalvalue & (~bitmask)) | ((data << bitshift) & bitmask);
+
+ value = data >> (8 * offset);
+
+ rtl_write_byte(rtlpriv, regaddr + offset, value);
+}
+
+bool rtl92du_phy_mac_config(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 arraylength;
+ const u32 *ptrarray;
+ u32 i;
+
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, "Read Rtl819XMACPHY_Array\n");
+
+ arraylength = MAC_2T_ARRAYLENGTH;
+ ptrarray = rtl8192du_mac_2tarray;
+
+ for (i = 0; i < arraylength; i = i + 2)
+ rtl_write_byte(rtlpriv, ptrarray[i], (u8)ptrarray[i + 1]);
+
+ if (rtlpriv->rtlhal.macphymode == SINGLEMAC_SINGLEPHY) {
+ /* improve 2-stream TX EVM */
+ /* rtl_write_byte(rtlpriv, 0x14,0x71); */
+ /* AMPDU aggregation number 9 */
+ /* rtl_write_word(rtlpriv, REG_MAX_AGGR_NUM, MAX_AGGR_NUM); */
+ rtl_write_byte(rtlpriv, REG_MAX_AGGR_NUM, 0x0B);
+ } else {
+ /* 92D need to test to decide the num. */
+ rtl_write_byte(rtlpriv, REG_MAX_AGGR_NUM, 0x07);
+ }
+
+ return true;
+}
+
+static bool _rtl92du_phy_config_bb(struct ieee80211_hw *hw, u8 configtype)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ u16 phy_reg_arraylen, agctab_arraylen = 0;
+ const u32 *agctab_array_table = NULL;
+ const u32 *phy_regarray_table;
+ int i;
+
+ /* Normal chip, Mac0 use AGC_TAB.txt for 2G and 5G band. */
+ if (rtlhal->interfaceindex == 0) {
+ agctab_arraylen = AGCTAB_ARRAYLENGTH;
+ agctab_array_table = rtl8192du_agctab_array;
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ " ===> phy:MAC0, Rtl819XAGCTAB_Array\n");
+ } else {
+ if (rtlhal->current_bandtype == BAND_ON_2_4G) {
+ agctab_arraylen = AGCTAB_2G_ARRAYLENGTH;
+ agctab_array_table = rtl8192du_agctab_2garray;
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ " ===> phy:MAC1, Rtl819XAGCTAB_2GArray\n");
+ } else {
+ agctab_arraylen = AGCTAB_5G_ARRAYLENGTH;
+ agctab_array_table = rtl8192du_agctab_5garray;
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ " ===> phy:MAC1, Rtl819XAGCTAB_5GArray\n");
+ }
+ }
+ phy_reg_arraylen = PHY_REG_2T_ARRAYLENGTH;
+ phy_regarray_table = rtl8192du_phy_reg_2tarray;
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ " ===> phy:Rtl819XPHY_REG_Array_PG\n");
+
+ if (configtype == BASEBAND_CONFIG_PHY_REG) {
+ for (i = 0; i < phy_reg_arraylen; i = i + 2) {
+ rtl_addr_delay(phy_regarray_table[i]);
+ rtl_set_bbreg(hw, phy_regarray_table[i], MASKDWORD,
+ phy_regarray_table[i + 1]);
+ udelay(1);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "The phy_regarray_table[0] is %x Rtl819XPHY_REGArray[1] is %x\n",
+ phy_regarray_table[i],
+ phy_regarray_table[i + 1]);
+ }
+ } else if (configtype == BASEBAND_CONFIG_AGC_TAB) {
+ for (i = 0; i < agctab_arraylen; i = i + 2) {
+ rtl_set_bbreg(hw, agctab_array_table[i],
+ MASKDWORD, agctab_array_table[i + 1]);
+
+ /* Add 1us delay between BB/RF register setting. */
+ udelay(1);
+
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "AGC table %u %u\n",
+ agctab_array_table[i],
+ agctab_array_table[i + 1]);
+ }
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Normal Chip, loaded AGC table\n");
+ }
+ return true;
+}
+
+static bool _rtl92du_phy_config_bb_pg(struct ieee80211_hw *hw, u8 configtype)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ const u32 *phy_regarray_table_pg;
+ u16 phy_regarray_pg_len;
+ int i;
+
+ phy_regarray_pg_len = PHY_REG_ARRAY_PG_LENGTH;
+ phy_regarray_table_pg = rtl8192du_phy_reg_array_pg;
+
+ if (configtype == BASEBAND_CONFIG_PHY_REG) {
+ for (i = 0; i < phy_regarray_pg_len; i = i + 3) {
+ rtl_addr_delay(phy_regarray_table_pg[i]);
+ rtl92d_store_pwrindex_diffrate_offset(hw,
+ phy_regarray_table_pg[i],
+ phy_regarray_table_pg[i + 1],
+ phy_regarray_table_pg[i + 2]);
+ }
+ } else {
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE,
+ "configtype != BaseBand_Config_PHY_REG\n");
+ }
+ return true;
+}
+
+static bool _rtl92du_phy_bb_config(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtlpriv);
+ struct rtl_phy *rtlphy = &rtlpriv->phy;
+ bool ret;
+
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, "==>\n");
+ ret = _rtl92du_phy_config_bb(hw, BASEBAND_CONFIG_PHY_REG);
+ if (!ret) {
+ pr_err("Write BB Reg Fail!!\n");
+ return false;
+ }
+
+ if (!rtlefuse->autoload_failflag) {
+ rtlphy->pwrgroup_cnt = 0;
+ ret = _rtl92du_phy_config_bb_pg(hw, BASEBAND_CONFIG_PHY_REG);
+ }
+ if (!ret) {
+ pr_err("BB_PG Reg Fail!!\n");
+ return false;
+ }
+
+ ret = _rtl92du_phy_config_bb(hw, BASEBAND_CONFIG_AGC_TAB);
+ if (!ret) {
+ pr_err("AGC Table Fail\n");
+ return false;
+ }
+
+ rtlphy->cck_high_power = (bool)rtl_get_bbreg(hw,
+ RFPGA0_XA_HSSIPARAMETER2,
+ 0x200);
+
+ return true;
+}
+
+bool rtl92du_phy_bb_config(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ bool rtstatus;
+ u32 regvaldw;
+ u16 regval;
+ u8 value;
+
+ rtl92d_phy_init_bb_rf_register_definition(hw);
+
+ regval = rtl_read_word(rtlpriv, REG_SYS_FUNC_EN);
+ rtl_write_word(rtlpriv, REG_SYS_FUNC_EN,
+ regval | BIT(13) | BIT(0) | BIT(1));
+
+ rtl_write_byte(rtlpriv, REG_AFE_PLL_CTRL, 0x83);
+ rtl_write_byte(rtlpriv, REG_AFE_PLL_CTRL + 1, 0xdb);
+
+ /* 0x1f bit7 bit6 represent for mac0/mac1 driver ready */
+ value = rtl_read_byte(rtlpriv, REG_RF_CTRL);
+ rtl_write_byte(rtlpriv, REG_RF_CTRL, value | RF_EN | RF_RSTB |
+ RF_SDMRSTB);
+
+ value = FEN_BB_GLB_RSTN | FEN_BBRSTB;
+ if (rtlhal->interface == INTF_PCI)
+ value |= FEN_PPLL | FEN_PCIEA | FEN_DIO_PCIE;
+ else if (rtlhal->interface == INTF_USB)
+ value |= FEN_USBA | FEN_USBD;
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, value);
+
+ regvaldw = rtl_read_dword(rtlpriv, RFPGA0_XCD_RFPARAMETER);
+ regvaldw &= ~BIT(31);
+ rtl_write_dword(rtlpriv, RFPGA0_XCD_RFPARAMETER, regvaldw);
+
+ /* To Fix MAC loopback mode fail. */
+ rtl_write_byte(rtlpriv, REG_LDOHCI12_CTRL, 0x0f);
+ rtl_write_byte(rtlpriv, 0x15, 0xe9);
+
+ rtl_write_byte(rtlpriv, REG_AFE_XTAL_CTRL + 1, 0x80);
+ if (!(IS_92D_SINGLEPHY(rtlpriv->rtlhal.version)) &&
+ rtlhal->interface == INTF_PCI) {
+ regvaldw = rtl_read_dword(rtlpriv, REG_LEDCFG0);
+ rtl_write_dword(rtlpriv, REG_LEDCFG0, regvaldw | BIT(23));
+ }
+
+ rtstatus = _rtl92du_phy_bb_config(hw);
+
+ /* Crystal calibration */
+ rtl_set_bbreg(hw, REG_AFE_XTAL_CTRL, 0xf0,
+ rtlpriv->efuse.crystalcap & 0x0f);
+ rtl_set_bbreg(hw, REG_AFE_PLL_CTRL, 0xf0000000,
+ (rtlpriv->efuse.crystalcap & 0xf0) >> 4);
+
+ return rtstatus;
+}
+
+bool rtl92du_phy_rf_config(struct ieee80211_hw *hw)
+{
+ return rtl92du_phy_rf6052_config(hw);
+}
+
+bool rtl92du_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
+ enum rf_content content,
+ enum radio_path rfpath)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u16 radioa_arraylen, radiob_arraylen;
+ const u32 *radioa_array_table;
+ const u32 *radiob_array_table;
+ int i;
+
+ radioa_arraylen = RADIOA_2T_ARRAYLENGTH;
+ radioa_array_table = rtl8192du_radioa_2tarray;
+ radiob_arraylen = RADIOB_2T_ARRAYLENGTH;
+ radiob_array_table = rtl8192du_radiob_2tarray;
+ if (rtlpriv->efuse.internal_pa_5g[0]) {
+ radioa_arraylen = RADIOA_2T_INT_PA_ARRAYLENGTH;
+ radioa_array_table = rtl8192du_radioa_2t_int_paarray;
+ }
+ if (rtlpriv->efuse.internal_pa_5g[1]) {
+ radiob_arraylen = RADIOB_2T_INT_PA_ARRAYLENGTH;
+ radiob_array_table = rtl8192du_radiob_2t_int_paarray;
+ }
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "PHY_ConfigRFWithHeaderFile() Radio_A:Rtl819XRadioA_1TArray\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "PHY_ConfigRFWithHeaderFile() Radio_B:Rtl819XRadioB_1TArray\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, "Radio No %x\n", rfpath);
+
+ /* this only happens when DMDP, mac0 start on 2.4G,
+ * mac1 start on 5G, mac 0 has to set phy0 & phy1
+ * pathA or mac1 has to set phy0 & phy1 pathA
+ */
+ if (content == radiob_txt && rfpath == RF90_PATH_A) {
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ " ===> althougth Path A, we load radiob.txt\n");
+ radioa_arraylen = radiob_arraylen;
+ radioa_array_table = radiob_array_table;
+ }
+
+ switch (rfpath) {
+ case RF90_PATH_A:
+ for (i = 0; i < radioa_arraylen; i = i + 2) {
+ rtl_rfreg_delay(hw, rfpath, radioa_array_table[i],
+ RFREG_OFFSET_MASK,
+ radioa_array_table[i + 1]);
+ }
+ break;
+ case RF90_PATH_B:
+ for (i = 0; i < radiob_arraylen; i = i + 2) {
+ rtl_rfreg_delay(hw, rfpath, radiob_array_table[i],
+ RFREG_OFFSET_MASK,
+ radiob_array_table[i + 1]);
+ }
+ break;
+ case RF90_PATH_C:
+ case RF90_PATH_D:
+ pr_err("switch case %#x not processed\n", rfpath);
+ break;
+ }
+
+ return true;
+}
+
+void rtl92du_phy_set_bw_mode(struct ieee80211_hw *hw,
+ enum nl80211_channel_type ch_type)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ struct rtl_phy *rtlphy = &rtlpriv->phy;
+ struct rtl_mac *mac = rtl_mac(rtlpriv);
+ u8 reg_bw_opmode;
+ u8 reg_prsr_rsc;
+
+ if (rtlphy->set_bwmode_inprogress)
+ return;
+
+ if ((is_hal_stop(rtlhal)) || (RT_CANNOT_IO(hw))) {
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "FALSE driver sleep or unload\n");
+ return;
+ }
+
+ rtlphy->set_bwmode_inprogress = true;
+
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_TRACE, "Switch to %s bandwidth\n",
+ rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20 ?
+ "20MHz" : "40MHz");
+
+ reg_bw_opmode = rtl_read_byte(rtlpriv, REG_BWOPMODE);
+ reg_prsr_rsc = rtl_read_byte(rtlpriv, REG_RRSR + 2);
+
+ switch (rtlphy->current_chan_bw) {
+ case HT_CHANNEL_WIDTH_20:
+ reg_bw_opmode |= BW_OPMODE_20MHZ;
+ rtl_write_byte(rtlpriv, REG_BWOPMODE, reg_bw_opmode);
+ break;
+ case HT_CHANNEL_WIDTH_20_40:
+ reg_bw_opmode &= ~BW_OPMODE_20MHZ;
+ rtl_write_byte(rtlpriv, REG_BWOPMODE, reg_bw_opmode);
+
+ reg_prsr_rsc = (reg_prsr_rsc & 0x90) |
+ (mac->cur_40_prime_sc << 5);
+ rtl_write_byte(rtlpriv, REG_RRSR + 2, reg_prsr_rsc);
+ break;
+ default:
+ pr_err("unknown bandwidth: %#X\n",
+ rtlphy->current_chan_bw);
+ break;
+ }
+
+ switch (rtlphy->current_chan_bw) {
+ case HT_CHANNEL_WIDTH_20:
+ rtl92du_phy_set_bb_reg_1byte(hw, RFPGA0_RFMOD, BRFMOD, 0x0);
+ rtl_set_bbreg(hw, RFPGA1_RFMOD, BRFMOD, 0x0);
+ /* SET BIT10 BIT11 for receive cck */
+ rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER2, BIT(10) | BIT(11), 3);
+ break;
+ case HT_CHANNEL_WIDTH_20_40:
+ rtl92du_phy_set_bb_reg_1byte(hw, RFPGA0_RFMOD, BRFMOD, 0x1);
+ rtl_set_bbreg(hw, RFPGA1_RFMOD, BRFMOD, 0x1);
+ /* Set Control channel to upper or lower.
+ * These settings are required only for 40MHz
+ */
+ if (rtlhal->current_bandtype == BAND_ON_2_4G)
+ rtl_set_bbreg(hw, RCCK0_SYSTEM, BCCKSIDEBAND,
+ mac->cur_40_prime_sc >> 1);
+ rtl_set_bbreg(hw, ROFDM1_LSTF, 0xC00, mac->cur_40_prime_sc);
+ /* SET BIT10 BIT11 for receive cck */
+ rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER2,
+ BIT(10) | BIT(11), 0);
+ rtl_set_bbreg(hw, 0x818, BIT(26) | BIT(27),
+ mac->cur_40_prime_sc ==
+ HAL_PRIME_CHNL_OFFSET_LOWER ? 2 : 1);
+ break;
+ default:
+ pr_err("unknown bandwidth: %#X\n",
+ rtlphy->current_chan_bw);
+ break;
+ }
+
+ rtl92d_phy_rf6052_set_bandwidth(hw, rtlphy->current_chan_bw);
+
+ rtlphy->set_bwmode_inprogress = false;
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_TRACE, "<==\n");
+}
+
+static void _rtl92du_phy_stop_trx_before_changeband(struct ieee80211_hw *hw)
+{
+ rtl92du_phy_set_bb_reg_1byte(hw, RFPGA0_RFMOD, BCCKEN | BOFDMEN, 0);
+ rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, MASKBYTE0, 0x00);
+ rtl_set_bbreg(hw, ROFDM1_TRXPATHENABLE, BDWORD, 0x0);
+}
+
+static void rtl92du_phy_switch_wirelessband(struct ieee80211_hw *hw, u8 band)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ u16 basic_rates;
+ u32 reg_mac;
+ u8 value8;
+
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "==>\n");
+ rtlhal->bandset = band;
+ rtlhal->current_bandtype = band;
+ if (IS_92D_SINGLEPHY(rtlhal->version))
+ rtlhal->bandset = BAND_ON_BOTH;
+
+ /* stop RX/Tx */
+ _rtl92du_phy_stop_trx_before_changeband(hw);
+
+ /* reconfig BB/RF according to wireless mode */
+ if (rtlhal->current_bandtype == BAND_ON_2_4G)
+ /* BB & RF Config */
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_DMESG, "====>2.4G\n");
+ else
+ /* 5G band */
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_DMESG, "====>5G\n");
+
+ if (rtlhal->interfaceindex == 1)
+ _rtl92du_phy_config_bb(hw, BASEBAND_CONFIG_AGC_TAB);
+
+ rtl92du_update_bbrf_configuration(hw);
+
+ basic_rates = RRSR_6M | RRSR_12M | RRSR_24M;
+ if (rtlhal->current_bandtype == BAND_ON_2_4G)
+ basic_rates |= RRSR_1M | RRSR_2M | RRSR_5_5M | RRSR_11M;
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_BASIC_RATE,
+ (u8 *)&basic_rates);
+
+ rtl92du_phy_set_bb_reg_1byte(hw, RFPGA0_RFMOD, BCCKEN | BOFDMEN, 0x3);
+
+ /* 20M BW. */
+ /* rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER2, BIT(10), 1); */
+ rtlhal->reloadtxpowerindex = true;
+
+ reg_mac = rtlhal->interfaceindex == 0 ? REG_MAC0 : REG_MAC1;
+
+ /* notice fw know band status 0x81[1]/0x53[1] = 0: 5G, 1: 2G */
+ if (rtlhal->current_bandtype == BAND_ON_2_4G) {
+ value8 = rtl_read_byte(rtlpriv, reg_mac);
+ value8 |= BIT(1);
+ rtl_write_byte(rtlpriv, reg_mac, value8);
+ } else {
+ value8 = rtl_read_byte(rtlpriv, reg_mac);
+ value8 &= ~BIT(1);
+ rtl_write_byte(rtlpriv, reg_mac, value8);
+ }
+ mdelay(1);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "<==Switch Band OK\n");
+}
+
+static void _rtl92du_phy_reload_imr_setting(struct ieee80211_hw *hw,
+ u8 channel, u8 rfpath)
+{
+ struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 group, i;
+
+ if (rtlusb->udev->speed != USB_SPEED_HIGH)
+ return;
+
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "====>path %d\n", rfpath);
+ if (rtlpriv->rtlhal.current_bandtype == BAND_ON_5G) {
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "====>5G\n");
+ rtl92du_phy_set_bb_reg_1byte(hw, RFPGA0_RFMOD,
+ BOFDMEN | BCCKEN, 0);
+ rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER4, 0x00f00000, 0xf);
+
+ /* fc area 0xd2c */
+ if (channel >= 149)
+ rtl_set_bbreg(hw, ROFDM1_CFOTRACKING, BIT(13) |
+ BIT(14), 2);
+ else
+ rtl_set_bbreg(hw, ROFDM1_CFOTRACKING, BIT(13) |
+ BIT(14), 1);
+
+ /* leave 0 for channel1-14. */
+ group = channel <= 64 ? 1 : 2;
+ for (i = 0; i < MAX_RF_IMR_INDEX_NORMAL; i++)
+ rtl_set_rfreg(hw, (enum radio_path)rfpath,
+ rf_reg_for_5g_swchnl_normal[i],
+ RFREG_OFFSET_MASK,
+ rf_imr_param_normal[group][i]);
+
+ rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER4, 0x00f00000, 0);
+ rtl92du_phy_set_bb_reg_1byte(hw, RFPGA0_RFMOD,
+ BOFDMEN | BCCKEN, 3);
+ } else {
+ /* G band. */
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_LOUD,
+ "Load RF IMR parameters for G band. IMR already setting %d\n",
+ rtlpriv->rtlhal.load_imrandiqk_setting_for2g);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "====>2.4G\n");
+
+ if (!rtlpriv->rtlhal.load_imrandiqk_setting_for2g) {
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_LOUD,
+ "Load RF IMR parameters for G band. %d\n",
+ rfpath);
+ rtl92du_phy_set_bb_reg_1byte(hw, RFPGA0_RFMOD,
+ BOFDMEN | BCCKEN, 0);
+ rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER4,
+ 0x00f00000, 0xf);
+
+ for (i = 0; i < MAX_RF_IMR_INDEX_NORMAL; i++) {
+ rtl_set_rfreg(hw, (enum radio_path)rfpath,
+ rf_reg_for_5g_swchnl_normal[i],
+ RFREG_OFFSET_MASK,
+ rf_imr_param_normal[0][i]);
+ }
+
+ rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER4,
+ 0x00f00000, 0);
+ rtl92du_phy_set_bb_reg_1byte(hw, RFPGA0_RFMOD,
+ BOFDMEN | BCCKEN, 3);
+ }
+ }
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "<====\n");
+}
+
+static void _rtl92du_phy_switch_rf_setting(struct ieee80211_hw *hw, u8 channel)
+{
+ struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = &rtlpriv->rtlhal;
+ struct rtl_phy *rtlphy = &rtlpriv->phy;
+ u8 path = rtlhal->current_bandtype == BAND_ON_5G ? RF90_PATH_A
+ : RF90_PATH_B;
+ u32 u4regvalue, mask = 0x1C000, value = 0, u4tmp, u4tmp2;
+ bool need_pwr_down = false, internal_pa = false;
+ u32 regb30 = rtl_get_bbreg(hw, 0xb30, BIT(27));
+ u8 index = 0, i, rfpath;
+
+ if (rtlusb->udev->speed != USB_SPEED_HIGH)
+ return;
+
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "====>\n");
+ /* config path A for 5G */
+ if (rtlhal->current_bandtype == BAND_ON_5G) {
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "====>5G\n");
+ u4tmp = rtlpriv->curveindex_5g[channel - 1];
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ "ver 1 set RF-A, 5G, 0x28 = 0x%x !!\n", u4tmp);
+
+ for (i = 0; i < RF_CHNL_NUM_5G; i++) {
+ if (channel == rf_chnl_5g[i] && channel <= 140)
+ index = 0;
+ }
+ for (i = 0; i < RF_CHNL_NUM_5G_40M; i++) {
+ if (channel == rf_chnl_5g_40m[i] && channel <= 140)
+ index = 1;
+ }
+ if (channel == 149 || channel == 155 || channel == 161)
+ index = 2;
+ else if (channel == 151 || channel == 153 || channel == 163 ||
+ channel == 165)
+ index = 3;
+ else if (channel == 157 || channel == 159)
+ index = 4;
+
+ if (rtlhal->macphymode == DUALMAC_DUALPHY &&
+ rtlhal->interfaceindex == 1) {
+ need_pwr_down = rtl92du_phy_enable_anotherphy(hw, false);
+ rtlhal->during_mac1init_radioa = true;
+ /* asume no this case */
+ if (need_pwr_down)
+ rtl92d_phy_enable_rf_env(hw, path,
+ &u4regvalue);
+ }
+
+ /* DMDP, if band = 5G, Mac0 need to set PHY1 when regB30[27]=1 */
+ if (regb30 && rtlhal->interfaceindex == 0) {
+ need_pwr_down = rtl92du_phy_enable_anotherphy(hw, true);
+ rtlhal->during_mac0init_radiob = true;
+ if (need_pwr_down)
+ rtl92d_phy_enable_rf_env(hw, path,
+ &u4regvalue);
+ }
+
+ for (i = 0; i < RF_REG_NUM_FOR_C_CUT_5G; i++) {
+ if (i == 0 && rtlhal->macphymode == DUALMAC_DUALPHY) {
+ rtl_set_rfreg(hw, (enum radio_path)path,
+ rf_reg_for_c_cut_5g[i],
+ RFREG_OFFSET_MASK, 0xE439D);
+ } else if (rf_reg_for_c_cut_5g[i] == RF_SYN_G4) {
+ u4tmp2 = (rf_reg_pram_c_5g[index][i] &
+ 0x7FF) | (u4tmp << 11);
+ if (channel == 36)
+ u4tmp2 &= ~(BIT(7) | BIT(6));
+ rtl_set_rfreg(hw, (enum radio_path)path,
+ rf_reg_for_c_cut_5g[i],
+ RFREG_OFFSET_MASK, u4tmp2);
+ } else {
+ rtl_set_rfreg(hw, (enum radio_path)path,
+ rf_reg_for_c_cut_5g[i],
+ RFREG_OFFSET_MASK,
+ rf_reg_pram_c_5g[index][i]);
+ }
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "offset 0x%x value 0x%x path %d index %d readback 0x%x\n",
+ rf_reg_for_c_cut_5g[i],
+ rf_reg_pram_c_5g[index][i],
+ path, index,
+ rtl_get_rfreg(hw, (enum radio_path)path,
+ rf_reg_for_c_cut_5g[i],
+ RFREG_OFFSET_MASK));
+ }
+ if (rtlhal->macphymode == DUALMAC_DUALPHY &&
+ rtlhal->interfaceindex == 1) {
+ if (need_pwr_down)
+ rtl92d_phy_restore_rf_env(hw, path, &u4regvalue);
+
+ rtl92du_phy_powerdown_anotherphy(hw, false);
+ }
+
+ if (regb30 && rtlhal->interfaceindex == 0) {
+ if (need_pwr_down)
+ rtl92d_phy_restore_rf_env(hw, path, &u4regvalue);
+
+ rtl92du_phy_powerdown_anotherphy(hw, true);
+ }
+
+ if (channel < 149)
+ value = 0x07;
+ else if (channel >= 149)
+ value = 0x02;
+ if (channel >= 36 && channel <= 64)
+ index = 0;
+ else if (channel >= 100 && channel <= 140)
+ index = 1;
+ else
+ index = 2;
+
+ for (rfpath = RF90_PATH_A; rfpath < rtlphy->num_total_rfpath;
+ rfpath++) {
+ if (rtlhal->macphymode == DUALMAC_DUALPHY &&
+ rtlhal->interfaceindex == 1) /* MAC 1 5G */
+ internal_pa = rtlpriv->efuse.internal_pa_5g[1];
+ else
+ internal_pa =
+ rtlpriv->efuse.internal_pa_5g[rfpath];
+
+ if (internal_pa) {
+ for (i = 0;
+ i < RF_REG_NUM_FOR_C_CUT_5G_INTERNALPA;
+ i++) {
+ if (rf_for_c_cut_5g_internal_pa[i] == 0x03 &&
+ channel >= 36 && channel <= 64)
+ rtl_set_rfreg(hw, rfpath,
+ rf_for_c_cut_5g_internal_pa[i],
+ RFREG_OFFSET_MASK,
+ 0x7bdef);
+ else
+ rtl_set_rfreg(hw, rfpath,
+ rf_for_c_cut_5g_internal_pa[i],
+ RFREG_OFFSET_MASK,
+ rf_pram_c_5g_int_pa[index][i]);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD,
+ "offset 0x%x value 0x%x path %d index %d\n",
+ rf_for_c_cut_5g_internal_pa[i],
+ rf_pram_c_5g_int_pa[index][i],
+ rfpath, index);
+ }
+ } else {
+ rtl_set_rfreg(hw, (enum radio_path)rfpath, RF_TXPA_AG,
+ mask, value);
+ }
+ }
+ } else if (rtlhal->current_bandtype == BAND_ON_2_4G) {
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "====>2.4G\n");
+ u4tmp = rtlpriv->curveindex_2g[channel - 1];
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ "ver 3 set RF-B, 2G, 0x28 = 0x%x !!\n", u4tmp);
+
+ if (channel == 1 || channel == 2 || channel == 4 ||
+ channel == 9 || channel == 10 || channel == 11 ||
+ channel == 12)
+ index = 0;
+ else if (channel == 3 || channel == 13 || channel == 14)
+ index = 1;
+ else if (channel >= 5 && channel <= 8)
+ index = 2;
+
+ if (rtlhal->macphymode == DUALMAC_DUALPHY) {
+ path = RF90_PATH_A;
+ if (rtlhal->interfaceindex == 0) {
+ need_pwr_down =
+ rtl92du_phy_enable_anotherphy(hw, true);
+ rtlhal->during_mac0init_radiob = true;
+
+ if (need_pwr_down)
+ rtl92d_phy_enable_rf_env(hw, path,
+ &u4regvalue);
+ }
+
+ /* DMDP, if band = 2G, MAC1 need to set PHY0 when regB30[27]=1 */
+ if (regb30 && rtlhal->interfaceindex == 1) {
+ need_pwr_down =
+ rtl92du_phy_enable_anotherphy(hw, false);
+ rtlhal->during_mac1init_radioa = true;
+
+ if (need_pwr_down)
+ rtl92d_phy_enable_rf_env(hw, path,
+ &u4regvalue);
+ }
+ }
+
+ for (i = 0; i < RF_REG_NUM_FOR_C_CUT_2G; i++) {
+ if (rf_reg_for_c_cut_2g[i] == RF_SYN_G7)
+ rtl_set_rfreg(hw, (enum radio_path)path,
+ rf_reg_for_c_cut_2g[i],
+ RFREG_OFFSET_MASK,
+ rf_reg_param_for_c_cut_2g[index][i] |
+ BIT(17));
+ else
+ rtl_set_rfreg(hw, (enum radio_path)path,
+ rf_reg_for_c_cut_2g[i],
+ RFREG_OFFSET_MASK,
+ rf_reg_param_for_c_cut_2g
+ [index][i]);
+
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "offset 0x%x value 0x%x mak 0x%x path %d index %d readback 0x%x\n",
+ rf_reg_for_c_cut_2g[i],
+ rf_reg_param_for_c_cut_2g[index][i],
+ rf_reg_mask_for_c_cut_2g[i], path, index,
+ rtl_get_rfreg(hw, (enum radio_path)path,
+ rf_reg_for_c_cut_2g[i],
+ RFREG_OFFSET_MASK));
+ }
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ "cosa ver 3 set RF-B, 2G, 0x28 = 0x%x !!\n",
+ rf_syn_g4_for_c_cut_2g | (u4tmp << 11));
+
+ rtl_set_rfreg(hw, (enum radio_path)path, RF_SYN_G4,
+ RFREG_OFFSET_MASK,
+ rf_syn_g4_for_c_cut_2g | (u4tmp << 11));
+
+ if (rtlhal->macphymode == DUALMAC_DUALPHY &&
+ rtlhal->interfaceindex == 0) {
+ if (need_pwr_down)
+ rtl92d_phy_restore_rf_env(hw, path, &u4regvalue);
+
+ rtl92du_phy_powerdown_anotherphy(hw, true);
+ }
+
+ if (regb30 && rtlhal->interfaceindex == 1) {
+ if (need_pwr_down)
+ rtl92d_phy_restore_rf_env(hw, path, &u4regvalue);
+
+ rtl92du_phy_powerdown_anotherphy(hw, false);
+ }
+ }
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "<====\n");
+}
+
+/* bit0 = 1 => Tx OK, bit1 = 1 => Rx OK */
+static u8 _rtl92du_phy_patha_iqk(struct ieee80211_hw *hw, bool configpathb)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u32 regeac, rege94, rege9c, regea4;
+ u8 result = 0;
+
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "Path-A IQK setting!\n");
+
+ if (rtlhal->interfaceindex == 0) {
+ rtl_set_bbreg(hw, RTX_IQK_TONE_A, MASKDWORD, 0x10008c1f);
+ rtl_set_bbreg(hw, RRX_IQK_TONE_A, MASKDWORD, 0x10008c1f);
+ } else {
+ rtl_set_bbreg(hw, RTX_IQK_TONE_A, MASKDWORD, 0x10008c22);
+ rtl_set_bbreg(hw, RRX_IQK_TONE_A, MASKDWORD, 0x10008c22);
+ }
+ rtl_set_bbreg(hw, RTX_IQK_PI_A, MASKDWORD, 0x82140102);
+ rtl_set_bbreg(hw, RRX_IQK_PI_A, MASKDWORD,
+ configpathb ? 0x28160202 : 0x28160502);
+ /* path-B IQK setting */
+ if (configpathb) {
+ rtl_set_bbreg(hw, RTX_IQK_TONE_B, MASKDWORD, 0x10008c22);
+ rtl_set_bbreg(hw, RRX_IQK_TONE_B, MASKDWORD, 0x10008c22);
+ rtl_set_bbreg(hw, RTX_IQK_PI_B, MASKDWORD, 0x82140102);
+ rtl_set_bbreg(hw, RRX_IQK_PI_B, MASKDWORD, 0x28160206);
+ }
+
+ /* LO calibration setting */
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "LO calibration setting!\n");
+ rtl_set_bbreg(hw, RIQK_AGC_RSP, MASKDWORD, 0x00462911);
+
+ /* One shot, path A LOK & IQK */
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "One shot, path A LOK & IQK!\n");
+ rtl_set_bbreg(hw, RIQK_AGC_PTS, MASKDWORD, 0xf9000000);
+ rtl_set_bbreg(hw, RIQK_AGC_PTS, MASKDWORD, 0xf8000000);
+
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ "Delay %d ms for One shot, path A LOK & IQK\n",
+ IQK_DELAY_TIME);
+ mdelay(IQK_DELAY_TIME);
+
+ /* Check failed */
+ regeac = rtl_get_bbreg(hw, RRX_POWER_AFTER_IQK_A_2, MASKDWORD);
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xeac = 0x%x\n", regeac);
+ rege94 = rtl_get_bbreg(hw, RTX_POWER_BEFORE_IQK_A, MASKDWORD);
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xe94 = 0x%x\n", rege94);
+ rege9c = rtl_get_bbreg(hw, RTX_POWER_AFTER_IQK_A, MASKDWORD);
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xe9c = 0x%x\n", rege9c);
+ regea4 = rtl_get_bbreg(hw, RRX_POWER_BEFORE_IQK_A_2, MASKDWORD);
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xea4 = 0x%x\n", regea4);
+
+ if (!(regeac & BIT(28)) &&
+ (((rege94 & 0x03FF0000) >> 16) != 0x142) &&
+ (((rege9c & 0x03FF0000) >> 16) != 0x42))
+ result |= 0x01;
+ else /* if Tx not OK, ignore Rx */
+ return result;
+
+ /* if Tx is OK, check whether Rx is OK */
+ if (!(regeac & BIT(27)) &&
+ (((regea4 & 0x03FF0000) >> 16) != 0x132) &&
+ (((regeac & 0x03FF0000) >> 16) != 0x36))
+ result |= 0x02;
+ else
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "Path A Rx IQK fail!!\n");
+
+ return result;
+}
+
+/* bit0 = 1 => Tx OK, bit1 = 1 => Rx OK */
+static u8 _rtl92du_phy_patha_iqk_5g_normal(struct ieee80211_hw *hw,
+ bool configpathb)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ struct rtl_phy *rtlphy = &rtlpriv->phy;
+ u32 TXOKBIT = BIT(28), RXOKBIT = BIT(27);
+ u32 regeac, rege94, rege9c, regea4;
+ u8 timeout = 20, timecount = 0;
+ u8 retrycount = 2;
+ u8 result = 0;
+ u8 i;
+
+ if (rtlhal->interfaceindex == 1) { /* PHY1 */
+ TXOKBIT = BIT(31);
+ RXOKBIT = BIT(30);
+ }
+
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "Path-A IQK setting!\n");
+ rtl_set_bbreg(hw, RTX_IQK_TONE_A, MASKDWORD, 0x18008c1f);
+ rtl_set_bbreg(hw, RRX_IQK_TONE_A, MASKDWORD, 0x18008c1f);
+ rtl_set_bbreg(hw, RTX_IQK_PI_A, MASKDWORD, 0x82140307);
+ rtl_set_bbreg(hw, RRX_IQK_PI_A, MASKDWORD, 0x68160960);
+ /* path-B IQK setting */
+ if (configpathb) {
+ rtl_set_bbreg(hw, RTX_IQK_TONE_B, MASKDWORD, 0x18008c2f);
+ rtl_set_bbreg(hw, RRX_IQK_TONE_B, MASKDWORD, 0x18008c2f);
+ rtl_set_bbreg(hw, RTX_IQK_PI_B, MASKDWORD, 0x82110000);
+ rtl_set_bbreg(hw, RRX_IQK_PI_B, MASKDWORD, 0x68110000);
+ }
+
+ /* LO calibration setting */
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "LO calibration setting!\n");
+ rtl_set_bbreg(hw, RIQK_AGC_RSP, MASKDWORD, 0x00462911);
+
+ /* path-A PA on */
+ rtl_set_bbreg(hw, RFPGA0_XAB_RFINTERFACESW, MASKDWORD, 0x07000f60);
+ rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE, MASKDWORD, 0x66e60e30);
+
+ for (i = 0; i < retrycount; i++) {
+ /* One shot, path A LOK & IQK */
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ "One shot, path A LOK & IQK!\n");
+ rtl_set_bbreg(hw, RIQK_AGC_PTS, MASKDWORD, 0xf9000000);
+ rtl_set_bbreg(hw, RIQK_AGC_PTS, MASKDWORD, 0xf8000000);
+
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ "Delay %d ms for One shot, path A LOK & IQK.\n",
+ IQK_DELAY_TIME);
+ mdelay(IQK_DELAY_TIME * 10);
+
+ while (timecount < timeout &&
+ rtl_get_bbreg(hw, RRX_POWER_AFTER_IQK_A_2, BIT(26)) == 0) {
+ udelay(IQK_DELAY_TIME * 1000 * 2);
+ timecount++;
+ }
+
+ timecount = 0;
+ while (timecount < timeout &&
+ rtl_get_bbreg(hw, RRX_POWER_BEFORE_IQK_A_2, MASK_IQK_RESULT) == 0) {
+ udelay(IQK_DELAY_TIME * 1000 * 2);
+ timecount++;
+ }
+
+ /* Check failed */
+ regeac = rtl_get_bbreg(hw, RRX_POWER_AFTER_IQK_A_2, MASKDWORD);
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xeac = 0x%x\n", regeac);
+ rege94 = rtl_get_bbreg(hw, RTX_POWER_BEFORE_IQK_A, MASKDWORD);
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xe94 = 0x%x\n", rege94);
+ rege9c = rtl_get_bbreg(hw, RTX_POWER_AFTER_IQK_A, MASKDWORD);
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xe9c = 0x%x\n", rege9c);
+ regea4 = rtl_get_bbreg(hw, RRX_POWER_BEFORE_IQK_A_2, MASKDWORD);
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xea4 = 0x%x\n", regea4);
+
+ if (!(regeac & TXOKBIT) &&
+ (((rege94 & 0x03FF0000) >> 16) != 0x142)) {
+ result |= 0x01;
+ } else { /* if Tx not OK, ignore Rx */
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ "Path A Tx IQK fail!!\n");
+ continue;
+ }
+
+ /* if Tx is OK, check whether Rx is OK */
+ if (!(regeac & RXOKBIT) &&
+ (((regea4 & 0x03FF0000) >> 16) != 0x132)) {
+ result |= 0x02;
+ break;
+ }
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "Path A Rx IQK fail!!\n");
+ }
+
+ /* path A PA off */
+ rtl_set_bbreg(hw, RFPGA0_XAB_RFINTERFACESW, MASKDWORD,
+ rtlphy->iqk_bb_backup[0]);
+ rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE, MASKDWORD,
+ rtlphy->iqk_bb_backup[1]);
+
+ if (!(result & 0x01)) /* Tx IQK fail */
+ rtl_set_bbreg(hw, RTX_IQK_TONE_A, MASKDWORD, 0x19008c00);
+
+ if (!(result & 0x02)) { /* Rx IQK fail */
+ rtl_set_bbreg(hw, ROFDM0_XARXIQIMBALANCE, MASKDWORD, 0x40000100);
+ rtl_set_bbreg(hw, RRX_IQK_TONE_A, MASKDWORD, 0x19008c00);
+
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ "Path A Rx IQK fail!! 0xe34 = %#x\n",
+ rtl_get_bbreg(hw, RRX_IQK_TONE_A, MASKDWORD));
+ }
+
+ return result;
+}
+
+/* bit0 = 1 => Tx OK, bit1 = 1 => Rx OK */
+static u8 _rtl92du_phy_pathb_iqk(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 regeac, regeb4, regebc, regec4, regecc;
+ u8 result = 0;
+
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "One shot, path B LOK & IQK!\n");
+ rtl_set_bbreg(hw, RIQK_AGC_CONT, MASKDWORD, 0x00000002);
+ rtl_set_bbreg(hw, RIQK_AGC_CONT, MASKDWORD, 0x00000000);
+
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ "Delay %d ms for One shot, path B LOK & IQK\n", IQK_DELAY_TIME);
+ mdelay(IQK_DELAY_TIME);
+
+ /* Check failed */
+ regeac = rtl_get_bbreg(hw, RRX_POWER_AFTER_IQK_A_2, MASKDWORD);
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xeac = 0x%x\n", regeac);
+ regeb4 = rtl_get_bbreg(hw, RTX_POWER_BEFORE_IQK_B, MASKDWORD);
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xeb4 = 0x%x\n", regeb4);
+ regebc = rtl_get_bbreg(hw, RTX_POWER_AFTER_IQK_B, MASKDWORD);
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xebc = 0x%x\n", regebc);
+ regec4 = rtl_get_bbreg(hw, RRX_POWER_BEFORE_IQK_B_2, MASKDWORD);
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xec4 = 0x%x\n", regec4);
+ regecc = rtl_get_bbreg(hw, RRX_POWER_AFTER_IQK_B_2, MASKDWORD);
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xecc = 0x%x\n", regecc);
+
+ if (!(regeac & BIT(31)) &&
+ (((regeb4 & 0x03FF0000) >> 16) != 0x142) &&
+ (((regebc & 0x03FF0000) >> 16) != 0x42))
+ result |= 0x01;
+ else
+ return result;
+
+ if (!(regeac & BIT(30)) &&
+ (((regec4 & 0x03FF0000) >> 16) != 0x132) &&
+ (((regecc & 0x03FF0000) >> 16) != 0x36))
+ result |= 0x02;
+ else
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "Path B Rx IQK fail!!\n");
+
+ return result;
+}
+
+/* bit0 = 1 => Tx OK, bit1 = 1 => Rx OK */
+static u8 _rtl92du_phy_pathb_iqk_5g_normal(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &rtlpriv->phy;
+ u32 regeac, regeb4, regebc, regec4, regecc;
+ u8 timeout = 20, timecount = 0;
+ u8 retrycount = 2;
+ u8 result = 0;
+ u8 i;
+
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "Path-B IQK setting!\n");
+ rtl_set_bbreg(hw, RTX_IQK_TONE_A, MASKDWORD, 0x18008c1f);
+ rtl_set_bbreg(hw, RRX_IQK_TONE_A, MASKDWORD, 0x18008c1f);
+ rtl_set_bbreg(hw, RTX_IQK_PI_A, MASKDWORD, 0x82110000);
+ rtl_set_bbreg(hw, RRX_IQK_PI_A, MASKDWORD, 0x68110000);
+
+ /* path-B IQK setting */
+ rtl_set_bbreg(hw, RTX_IQK_TONE_B, MASKDWORD, 0x18008c2f);
+ rtl_set_bbreg(hw, RRX_IQK_TONE_B, MASKDWORD, 0x18008c2f);
+ rtl_set_bbreg(hw, RTX_IQK_PI_B, MASKDWORD, 0x82140307);
+ rtl_set_bbreg(hw, RRX_IQK_PI_B, MASKDWORD, 0x68160960);
+
+ /* LO calibration setting */
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "LO calibration setting!\n");
+ rtl_set_bbreg(hw, RIQK_AGC_RSP, MASKDWORD, 0x00462911);
+
+ /* path-B PA on */
+ rtl_set_bbreg(hw, RFPGA0_XAB_RFINTERFACESW, MASKDWORD, 0x0f600700);
+ rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE, MASKDWORD, 0x061f0d30);
+
+ for (i = 0; i < retrycount; i++) {
+ /* One shot, path B LOK & IQK */
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ "One shot, path A LOK & IQK!\n");
+ rtl_set_bbreg(hw, RIQK_AGC_PTS, MASKDWORD, 0xfa000000);
+ rtl_set_bbreg(hw, RIQK_AGC_PTS, MASKDWORD, 0xf8000000);
+
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ "Delay %d ms for One shot, path B LOK & IQK.\n", 10);
+ mdelay(IQK_DELAY_TIME * 10);
+
+ while (timecount < timeout &&
+ rtl_get_bbreg(hw, RRX_POWER_AFTER_IQK_A_2, BIT(29)) == 0) {
+ udelay(IQK_DELAY_TIME * 1000 * 2);
+ timecount++;
+ }
+
+ timecount = 0;
+ while (timecount < timeout &&
+ rtl_get_bbreg(hw, RRX_POWER_BEFORE_IQK_B_2, MASK_IQK_RESULT) == 0) {
+ udelay(IQK_DELAY_TIME * 1000 * 2);
+ timecount++;
+ }
+
+ /* Check failed */
+ regeac = rtl_get_bbreg(hw, RRX_POWER_AFTER_IQK_A_2, MASKDWORD);
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xeac = 0x%x\n", regeac);
+ regeb4 = rtl_get_bbreg(hw, RTX_POWER_BEFORE_IQK_B, MASKDWORD);
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xeb4 = 0x%x\n", regeb4);
+ regebc = rtl_get_bbreg(hw, RTX_POWER_AFTER_IQK_B, MASKDWORD);
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xebc = 0x%x\n", regebc);
+ regec4 = rtl_get_bbreg(hw, RRX_POWER_BEFORE_IQK_B_2, MASKDWORD);
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xec4 = 0x%x\n", regec4);
+ regecc = rtl_get_bbreg(hw, RRX_POWER_AFTER_IQK_B_2, MASKDWORD);
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xecc = 0x%x\n", regecc);
+
+ if (!(regeac & BIT(31)) &&
+ (((regeb4 & 0x03FF0000) >> 16) != 0x142))
+ result |= 0x01;
+ else
+ continue;
+
+ if (!(regeac & BIT(30)) &&
+ (((regec4 & 0x03FF0000) >> 16) != 0x132)) {
+ result |= 0x02;
+ break;
+ }
+
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "Path B Rx IQK fail!!\n");
+ }
+
+ /* path B PA off */
+ rtl_set_bbreg(hw, RFPGA0_XAB_RFINTERFACESW, MASKDWORD,
+ rtlphy->iqk_bb_backup[0]);
+ rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE, MASKDWORD,
+ rtlphy->iqk_bb_backup[2]);
+
+ if (!(result & 0x01))
+ rtl_set_bbreg(hw, RTX_IQK_TONE_B, MASKDWORD, 0x19008c00);
+
+ if (!(result & 0x02)) {
+ rtl_set_bbreg(hw, ROFDM0_XBRXIQIMBALANCE, MASKDWORD, 0x40000100);
+ rtl_set_bbreg(hw, RRX_IQK_TONE_B, MASKDWORD, 0x19008c00);
+
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ "Path B Rx IQK fail!! 0xe54 = %#x\n",
+ rtl_get_bbreg(hw, RRX_IQK_TONE_B, MASKDWORD));
+ }
+
+ return result;
+}
+
+static void _rtl92du_phy_reload_adda_registers(struct ieee80211_hw *hw,
+ const u32 *adda_reg,
+ u32 *adda_backup, u32 regnum)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 i;
+
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ "Reload ADDA power saving parameters !\n");
+ for (i = 0; i < regnum; i++) {
+ /* path-A/B BB to initial gain */
+ if (adda_reg[i] == ROFDM0_XAAGCCORE1 ||
+ adda_reg[i] == ROFDM0_XBAGCCORE1)
+ rtl_set_bbreg(hw, adda_reg[i], MASKDWORD, 0x50);
+
+ rtl_set_bbreg(hw, adda_reg[i], MASKDWORD, adda_backup[i]);
+ }
+}
+
+static void _rtl92du_phy_reload_mac_registers(struct ieee80211_hw *hw,
+ const u32 *macreg, u32 *macbackup)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 i;
+
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "Reload MAC parameters !\n");
+ for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++)
+ rtl_write_byte(rtlpriv, macreg[i], (u8)macbackup[i]);
+ rtl_write_dword(rtlpriv, macreg[i], macbackup[i]);
+}
+
+static void _rtl92du_phy_patha_standby(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "Path-A standby mode!\n");
+
+ rtl_set_bbreg(hw, RFPGA0_IQK, MASKH3BYTES, 0x0);
+ rtl_set_bbreg(hw, RFPGA0_XA_LSSIPARAMETER, MASKDWORD, 0x00010000);
+ rtl_set_bbreg(hw, RFPGA0_IQK, MASKH3BYTES, 0x808000);
+}
+
+static void _rtl92du_phy_pimode_switch(struct ieee80211_hw *hw, bool pi_mode)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 mode;
+
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ "BB Switch to %s mode!\n", pi_mode ? "PI" : "SI");
+ mode = pi_mode ? 0x01000100 : 0x01000000;
+ rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER1, MASKDWORD, mode);
+ rtl_set_bbreg(hw, RFPGA0_XB_HSSIPARAMETER1, MASKDWORD, mode);
+}
+
+static void _rtl92du_phy_iq_calibrate(struct ieee80211_hw *hw, long result[][8],
+ u8 t, bool is2t)
+{
+ static const u32 adda_reg[IQK_ADDA_REG_NUM] = {
+ RFPGA0_XCD_SWITCHCONTROL, RBLUE_TOOTH, RRX_WAIT_CCA,
+ RTX_CCK_RFON, RTX_CCK_BBON, RTX_OFDM_RFON, RTX_OFDM_BBON,
+ RTX_TO_RX, RTX_TO_TX, RRX_CCK, RRX_OFDM, RRX_WAIT_RIFS,
+ RRX_TO_RX, RSTANDBY, RSLEEP, RPMPD_ANAEN
+ };
+ static const u32 iqk_mac_reg[IQK_MAC_REG_NUM] = {
+ REG_TXPAUSE, REG_BCN_CTRL, REG_BCN_CTRL_1, REG_GPIO_MUXCFG
+ };
+ static const u32 iqk_bb_reg[IQK_BB_REG_NUM] = {
+ RFPGA0_XAB_RFINTERFACESW, RFPGA0_XA_RFINTERFACEOE,
+ RFPGA0_XB_RFINTERFACEOE, ROFDM0_TRMUXPAR,
+ RFPGA0_XCD_RFINTERFACESW, ROFDM0_TRXPATHENABLE,
+ RFPGA0_RFMOD, RFPGA0_ANALOGPARAMETER4,
+ ROFDM0_XAAGCCORE1, ROFDM0_XBAGCCORE1
+ };
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &rtlpriv->phy;
+ const u32 retrycount = 2;
+ u8 patha_ok, pathb_ok;
+ u32 bbvalue;
+ u32 i;
+
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "IQK for 2.4G :Start!!!\n");
+ if (t == 0) {
+ bbvalue = rtl_get_bbreg(hw, RFPGA0_RFMOD, MASKDWORD);
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "==>0x%08x\n", bbvalue);
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "IQ Calibration for %s\n",
+ is2t ? "2T2R" : "1T1R");
+
+ /* Save ADDA parameters, turn Path A ADDA on */
+ rtl92d_phy_save_adda_registers(hw, adda_reg,
+ rtlphy->adda_backup,
+ IQK_ADDA_REG_NUM);
+ rtl92d_phy_save_mac_registers(hw, iqk_mac_reg,
+ rtlphy->iqk_mac_backup);
+ rtl92d_phy_save_adda_registers(hw, iqk_bb_reg,
+ rtlphy->iqk_bb_backup,
+ IQK_BB_REG_NUM);
+ }
+ rtl92d_phy_path_adda_on(hw, adda_reg, true, is2t);
+
+ rtl_set_bbreg(hw, RPDP_ANTA, MASKDWORD, 0x01017038);
+
+ if (t == 0)
+ rtlphy->rfpi_enable = (u8)rtl_get_bbreg(hw,
+ RFPGA0_XA_HSSIPARAMETER1, BIT(8));
+
+ /* Switch BB to PI mode to do IQ Calibration. */
+ if (!rtlphy->rfpi_enable)
+ _rtl92du_phy_pimode_switch(hw, true);
+
+ rtl92du_phy_set_bb_reg_1byte(hw, RFPGA0_RFMOD, BCCKEN, 0x00);
+ rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, MASKDWORD, 0x03a05600);
+ rtl_set_bbreg(hw, ROFDM0_TRMUXPAR, MASKDWORD, 0x000800e4);
+ rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW, MASKDWORD, 0x22204000);
+ rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER4, 0xf00000, 0x0f);
+ if (is2t) {
+ rtl_set_bbreg(hw, RFPGA0_XA_LSSIPARAMETER, MASKDWORD,
+ 0x00010000);
+ rtl_set_bbreg(hw, RFPGA0_XB_LSSIPARAMETER, MASKDWORD,
+ 0x00010000);
+ }
+
+ /* MAC settings */
+ rtl92d_phy_mac_setting_calibration(hw, iqk_mac_reg,
+ rtlphy->iqk_mac_backup);
+
+ /* Page B init */
+ rtl_set_bbreg(hw, RCONFIG_ANTA, MASKDWORD, 0x0f600000);
+ if (is2t)
+ rtl_set_bbreg(hw, RCONFIG_ANTB, MASKDWORD, 0x0f600000);
+
+ /* IQ calibration setting */
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "IQK setting!\n");
+ rtl_set_bbreg(hw, RFPGA0_IQK, MASKH3BYTES, 0x808000);
+ rtl_set_bbreg(hw, RTX_IQK, MASKDWORD, 0x01007c00);
+ rtl_set_bbreg(hw, RRX_IQK, MASKDWORD, 0x01004800);
+
+ for (i = 0; i < retrycount; i++) {
+ patha_ok = _rtl92du_phy_patha_iqk(hw, is2t);
+ if (patha_ok == 0x03) {
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ "Path A IQK Success!!\n");
+ result[t][0] = rtl_get_bbreg(hw, RTX_POWER_BEFORE_IQK_A,
+ MASK_IQK_RESULT);
+ result[t][1] = rtl_get_bbreg(hw, RTX_POWER_AFTER_IQK_A,
+ MASK_IQK_RESULT);
+ result[t][2] = rtl_get_bbreg(hw, RRX_POWER_BEFORE_IQK_A_2,
+ MASK_IQK_RESULT);
+ result[t][3] = rtl_get_bbreg(hw, RRX_POWER_AFTER_IQK_A_2,
+ MASK_IQK_RESULT);
+ break;
+ } else if (i == (retrycount - 1) && patha_ok == 0x01) {
+ /* Tx IQK OK */
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ "Path A IQK Only Tx Success!!\n");
+
+ result[t][0] = rtl_get_bbreg(hw, RTX_POWER_BEFORE_IQK_A,
+ MASK_IQK_RESULT);
+ result[t][1] = rtl_get_bbreg(hw, RTX_POWER_AFTER_IQK_A,
+ MASK_IQK_RESULT);
+ }
+ }
+ if (patha_ok == 0x00)
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "Path A IQK failed!!\n");
+
+ if (is2t) {
+ _rtl92du_phy_patha_standby(hw);
+ /* Turn Path B ADDA on */
+ rtl92d_phy_path_adda_on(hw, adda_reg, false, is2t);
+
+ for (i = 0; i < retrycount; i++) {
+ pathb_ok = _rtl92du_phy_pathb_iqk(hw);
+ if (pathb_ok == 0x03) {
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ "Path B IQK Success!!\n");
+ result[t][4] = rtl_get_bbreg(hw, RTX_POWER_BEFORE_IQK_B,
+ MASK_IQK_RESULT);
+ result[t][5] = rtl_get_bbreg(hw, RTX_POWER_AFTER_IQK_B,
+ MASK_IQK_RESULT);
+ result[t][6] = rtl_get_bbreg(hw, RRX_POWER_BEFORE_IQK_B_2,
+ MASK_IQK_RESULT);
+ result[t][7] = rtl_get_bbreg(hw, RRX_POWER_AFTER_IQK_B_2,
+ MASK_IQK_RESULT);
+ break;
+ } else if (i == (retrycount - 1) && pathb_ok == 0x01) {
+ /* Tx IQK OK */
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ "Path B Only Tx IQK Success!!\n");
+ result[t][4] = rtl_get_bbreg(hw, RTX_POWER_BEFORE_IQK_B,
+ MASK_IQK_RESULT);
+ result[t][5] = rtl_get_bbreg(hw, RTX_POWER_AFTER_IQK_B,
+ MASK_IQK_RESULT);
+ }
+ }
+ if (pathb_ok == 0x00)
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ "Path B IQK failed!!\n");
+ }
+
+ /* Back to BB mode, load original value */
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ "IQK:Back to BB mode, load original value!\n");
+
+ rtl_set_bbreg(hw, RFPGA0_IQK, MASKH3BYTES, 0x000000);
+
+ if (t != 0) {
+ /* Switch back BB to SI mode after finish IQ Calibration. */
+ if (!rtlphy->rfpi_enable)
+ _rtl92du_phy_pimode_switch(hw, false);
+
+ /* Reload ADDA power saving parameters */
+ _rtl92du_phy_reload_adda_registers(hw, adda_reg,
+ rtlphy->adda_backup,
+ IQK_ADDA_REG_NUM);
+
+ /* Reload MAC parameters */
+ _rtl92du_phy_reload_mac_registers(hw, iqk_mac_reg,
+ rtlphy->iqk_mac_backup);
+
+ if (is2t)
+ _rtl92du_phy_reload_adda_registers(hw, iqk_bb_reg,
+ rtlphy->iqk_bb_backup,
+ IQK_BB_REG_NUM);
+ else
+ _rtl92du_phy_reload_adda_registers(hw, iqk_bb_reg,
+ rtlphy->iqk_bb_backup,
+ IQK_BB_REG_NUM - 1);
+
+ /* load 0xe30 IQC default value */
+ rtl_set_bbreg(hw, RTX_IQK_TONE_A, MASKDWORD, 0x01008c00);
+ rtl_set_bbreg(hw, RRX_IQK_TONE_A, MASKDWORD, 0x01008c00);
+ }
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "<==\n");
+}
+
+static void _rtl92du_phy_iq_calibrate_5g_normal(struct ieee80211_hw *hw,
+ long result[][8], u8 t)
+{
+ static const u32 adda_reg[IQK_ADDA_REG_NUM] = {
+ RFPGA0_XCD_SWITCHCONTROL, RBLUE_TOOTH, RRX_WAIT_CCA,
+ RTX_CCK_RFON, RTX_CCK_BBON, RTX_OFDM_RFON, RTX_OFDM_BBON,
+ RTX_TO_RX, RTX_TO_TX, RRX_CCK, RRX_OFDM, RRX_WAIT_RIFS,
+ RRX_TO_RX, RSTANDBY, RSLEEP, RPMPD_ANAEN
+ };
+ static const u32 iqk_mac_reg[IQK_MAC_REG_NUM] = {
+ REG_TXPAUSE, REG_BCN_CTRL, REG_BCN_CTRL_1, REG_GPIO_MUXCFG
+ };
+ static const u32 iqk_bb_reg[IQK_BB_REG_NUM] = {
+ RFPGA0_XAB_RFINTERFACESW, RFPGA0_XA_RFINTERFACEOE,
+ RFPGA0_XB_RFINTERFACEOE, ROFDM0_TRMUXPAR,
+ RFPGA0_XCD_RFINTERFACESW, ROFDM0_TRXPATHENABLE,
+ RFPGA0_RFMOD, RFPGA0_ANALOGPARAMETER4,
+ ROFDM0_XAAGCCORE1, ROFDM0_XBAGCCORE1
+ };
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &rtlpriv->phy;
+ struct rtl_hal *rtlhal = &rtlpriv->rtlhal;
+ bool is2t = IS_92D_SINGLEPHY(rtlhal->version);
+ u8 patha_ok, pathb_ok;
+ bool rf_path_div;
+ u32 bbvalue;
+
+ /* Note: IQ calibration must be performed after loading
+ * PHY_REG.txt , and radio_a, radio_b.txt
+ */
+
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "IQK for 5G NORMAL:Start!!!\n");
+
+ mdelay(IQK_DELAY_TIME * 20);
+
+ if (t == 0) {
+ bbvalue = rtl_get_bbreg(hw, RFPGA0_RFMOD, MASKDWORD);
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "==>0x%08x\n", bbvalue);
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "IQ Calibration for %s\n",
+ is2t ? "2T2R" : "1T1R");
+
+ /* Save ADDA parameters, turn Path A ADDA on */
+ rtl92d_phy_save_adda_registers(hw, adda_reg,
+ rtlphy->adda_backup,
+ IQK_ADDA_REG_NUM);
+ rtl92d_phy_save_mac_registers(hw, iqk_mac_reg,
+ rtlphy->iqk_mac_backup);
+ if (is2t)
+ rtl92d_phy_save_adda_registers(hw, iqk_bb_reg,
+ rtlphy->iqk_bb_backup,
+ IQK_BB_REG_NUM);
+ else
+ rtl92d_phy_save_adda_registers(hw, iqk_bb_reg,
+ rtlphy->iqk_bb_backup,
+ IQK_BB_REG_NUM - 1);
+ }
+
+ rf_path_div = rtl_get_bbreg(hw, 0xb30, BIT(27));
+ rtl92d_phy_path_adda_on(hw, adda_reg, !rf_path_div, is2t);
+
+ if (t == 0)
+ rtlphy->rfpi_enable = rtl_get_bbreg(hw,
+ RFPGA0_XA_HSSIPARAMETER1,
+ BIT(8));
+
+ /* Switch BB to PI mode to do IQ Calibration. */
+ if (!rtlphy->rfpi_enable)
+ _rtl92du_phy_pimode_switch(hw, true);
+
+ /* MAC settings */
+ rtl92d_phy_mac_setting_calibration(hw, iqk_mac_reg,
+ rtlphy->iqk_mac_backup);
+
+ rtl92du_phy_set_bb_reg_1byte(hw, RFPGA0_RFMOD, BCCKEN, 0x00);
+ rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, MASKDWORD, 0x03a05600);
+ rtl_set_bbreg(hw, ROFDM0_TRMUXPAR, MASKDWORD, 0x000800e4);
+ rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW, MASKDWORD, 0x22208000);
+ rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER4, 0xf00000, 0x0f);
+
+ /* Page A AP setting for IQK */
+ rtl_set_bbreg(hw, RPDP_ANTA, MASKDWORD, 0);
+ rtl_set_bbreg(hw, RCONFIG_ANTA, MASKDWORD, 0x20000000);
+ if (is2t) {
+ /* Page B AP setting for IQK */
+ rtl_set_bbreg(hw, RPDP_ANTB, MASKDWORD, 0);
+ rtl_set_bbreg(hw, RCONFIG_ANTB, MASKDWORD, 0x20000000);
+ }
+
+ /* IQ calibration setting */
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "IQK setting!\n");
+ rtl_set_bbreg(hw, RFPGA0_IQK, MASKH3BYTES, 0x808000);
+ rtl_set_bbreg(hw, RTX_IQK, MASKDWORD, 0x10007c00);
+ rtl_set_bbreg(hw, RRX_IQK, MASKDWORD, 0x01004800);
+
+ patha_ok = _rtl92du_phy_patha_iqk_5g_normal(hw, is2t);
+ if (patha_ok == 0x03) {
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "Path A IQK Success!!\n");
+ result[t][0] = rtl_get_bbreg(hw, RTX_POWER_BEFORE_IQK_A,
+ MASK_IQK_RESULT);
+ result[t][1] = rtl_get_bbreg(hw, RTX_POWER_AFTER_IQK_A,
+ MASK_IQK_RESULT);
+ result[t][2] = rtl_get_bbreg(hw, RRX_POWER_BEFORE_IQK_A_2,
+ MASK_IQK_RESULT);
+ result[t][3] = rtl_get_bbreg(hw, RRX_POWER_AFTER_IQK_A_2,
+ MASK_IQK_RESULT);
+ } else if (patha_ok == 0x01) { /* Tx IQK OK */
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ "Path A IQK Only Tx Success!!\n");
+
+ result[t][0] = rtl_get_bbreg(hw, RTX_POWER_BEFORE_IQK_A,
+ MASK_IQK_RESULT);
+ result[t][1] = rtl_get_bbreg(hw, RTX_POWER_AFTER_IQK_A,
+ MASK_IQK_RESULT);
+ } else {
+ rtl_set_bbreg(hw, RFPGA0_IQK, MASKH3BYTES, 0x000000);
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xe70 = %#x\n",
+ rtl_get_bbreg(hw, RRX_WAIT_CCA, MASKDWORD));
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "RF path A 0x0 = %#x\n",
+ rtl_get_rfreg(hw, RF90_PATH_A, RF_AC, RFREG_OFFSET_MASK));
+ rtl_set_bbreg(hw, RFPGA0_IQK, MASKH3BYTES, 0x808000);
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "Path A IQK Fail!!\n");
+ }
+
+ if (is2t) {
+ /* _rtl92d_phy_patha_standby(hw); */
+ /* Turn Path B ADDA on */
+ rtl92d_phy_path_adda_on(hw, adda_reg, false, is2t);
+
+ pathb_ok = _rtl92du_phy_pathb_iqk_5g_normal(hw);
+ if (pathb_ok == 0x03) {
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ "Path B IQK Success!!\n");
+ result[t][4] = rtl_get_bbreg(hw, RTX_POWER_BEFORE_IQK_B,
+ MASK_IQK_RESULT);
+ result[t][5] = rtl_get_bbreg(hw, RTX_POWER_AFTER_IQK_B,
+ MASK_IQK_RESULT);
+ result[t][6] = rtl_get_bbreg(hw, RRX_POWER_BEFORE_IQK_B_2,
+ MASK_IQK_RESULT);
+ result[t][7] = rtl_get_bbreg(hw, RRX_POWER_AFTER_IQK_B_2,
+ MASK_IQK_RESULT);
+ } else if (pathb_ok == 0x01) { /* Tx IQK OK */
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ "Path B Only Tx IQK Success!!\n");
+ result[t][4] = rtl_get_bbreg(hw, RTX_POWER_BEFORE_IQK_B,
+ MASK_IQK_RESULT);
+ result[t][5] = rtl_get_bbreg(hw, RTX_POWER_AFTER_IQK_B,
+ MASK_IQK_RESULT);
+ } else {
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ "Path B IQK failed!!\n");
+ }
+ }
+
+ /* Back to BB mode, load original value */
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ "IQK:Back to BB mode, load original value!\n");
+ rtl_set_bbreg(hw, RFPGA0_IQK, MASKH3BYTES, 0);
+
+ if (is2t)
+ _rtl92du_phy_reload_adda_registers(hw, iqk_bb_reg,
+ rtlphy->iqk_bb_backup,
+ IQK_BB_REG_NUM);
+ else
+ _rtl92du_phy_reload_adda_registers(hw, iqk_bb_reg,
+ rtlphy->iqk_bb_backup,
+ IQK_BB_REG_NUM - 1);
+
+ /* path A IQ path to DP block */
+ rtl_set_bbreg(hw, RPDP_ANTA, MASKDWORD, 0x010170b8);
+ if (is2t) /* path B IQ path to DP block */
+ rtl_set_bbreg(hw, RPDP_ANTB, MASKDWORD, 0x010170b8);
+
+ /* Reload MAC parameters */
+ _rtl92du_phy_reload_mac_registers(hw, iqk_mac_reg,
+ rtlphy->iqk_mac_backup);
+
+ /* Switch back BB to SI mode after finish IQ Calibration. */
+ if (!rtlphy->rfpi_enable)
+ _rtl92du_phy_pimode_switch(hw, false);
+
+ /* Reload ADDA power saving parameters */
+ _rtl92du_phy_reload_adda_registers(hw, adda_reg,
+ rtlphy->adda_backup,
+ IQK_ADDA_REG_NUM);
+
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "<==\n");
+}
+
+static bool _rtl92du_phy_simularity_compare(struct ieee80211_hw *hw,
+ long result[][8], u8 c1, u8 c2)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = &rtlpriv->rtlhal;
+ u32 i, j, diff, sim_bitmap, bound, u4temp = 0;
+ u8 final_candidate[2] = {0xFF, 0xFF}; /* for path A and path B */
+ bool is2t = IS_92D_SINGLEPHY(rtlhal->version);
+ bool bresult = true;
+
+ if (is2t)
+ bound = 8;
+ else
+ bound = 4;
+
+ sim_bitmap = 0;
+
+ for (i = 0; i < bound; i++) {
+ diff = abs_diff(result[c1][i], result[c2][i]);
+
+ if (diff > MAX_TOLERANCE_92D) {
+ if ((i == 2 || i == 6) && !sim_bitmap) {
+ if (result[c1][i] + result[c1][i + 1] == 0)
+ final_candidate[(i / 4)] = c2;
+ else if (result[c2][i] + result[c2][i + 1] == 0)
+ final_candidate[(i / 4)] = c1;
+ else
+ sim_bitmap = sim_bitmap | (1 << i);
+ } else {
+ sim_bitmap = sim_bitmap | (1 << i);
+ }
+ }
+ }
+
+ if (sim_bitmap == 0) {
+ for (i = 0; i < (bound / 4); i++) {
+ if (final_candidate[i] != 0xFF) {
+ for (j = i * 4; j < (i + 1) * 4 - 2; j++)
+ result[3][j] =
+ result[final_candidate[i]][j];
+ bresult = false;
+ }
+ }
+
+ for (i = 0; i < bound; i++)
+ u4temp += result[c1][i] + result[c2][i];
+
+ if (u4temp == 0) /* IQK fail for c1 & c2 */
+ bresult = false;
+
+ return bresult;
+ }
+
+ if (!(sim_bitmap & 0x0F)) { /* path A OK */
+ for (i = 0; i < 4; i++)
+ result[3][i] = result[c1][i];
+ } else if (!(sim_bitmap & 0x03)) { /* path A, Tx OK */
+ for (i = 0; i < 2; i++)
+ result[3][i] = result[c1][i];
+ }
+
+ if (!(sim_bitmap & 0xF0) && is2t) { /* path B OK */
+ for (i = 4; i < 8; i++)
+ result[3][i] = result[c1][i];
+ } else if (!(sim_bitmap & 0x30)) { /* path B, Tx OK */
+ for (i = 4; i < 6; i++)
+ result[3][i] = result[c1][i];
+ }
+
+ return false;
+}
+
+static void _rtl92du_phy_patha_fill_iqk_matrix_5g_normal(struct ieee80211_hw *hw,
+ bool iqk_ok,
+ long result[][8],
+ u8 final_candidate,
+ bool txonly)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = &rtlpriv->rtlhal;
+ u32 val_x, reg;
+ int val_y;
+
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ "Path A IQ Calibration %s !\n", iqk_ok ? "Success" : "Failed");
+ if (iqk_ok && final_candidate != 0xFF) {
+ val_x = result[final_candidate][0];
+ if ((val_x & 0x00000200) != 0)
+ val_x = val_x | 0xFFFFFC00;
+
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "X = 0x%x\n", val_x);
+ rtl_set_bbreg(hw, RTX_IQK_TONE_A, 0x3FF0000, val_x);
+ rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(24), 0);
+
+ val_y = result[final_candidate][1];
+ if ((val_y & 0x00000200) != 0)
+ val_y = val_y | 0xFFFFFC00;
+
+ /* path B IQK result + 3 */
+ if (rtlhal->current_bandtype == BAND_ON_5G)
+ val_y += 3;
+
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "Y = 0x%x\n", val_y);
+
+ rtl_set_bbreg(hw, RTX_IQK_TONE_A, 0x3FF, val_y);
+ rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(26), 0);
+
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xe30 = 0x%x\n",
+ rtl_get_bbreg(hw, RTX_IQK_TONE_A, MASKDWORD));
+
+ if (txonly) {
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "only Tx OK\n");
+ return;
+ }
+
+ reg = result[final_candidate][2];
+ rtl_set_bbreg(hw, ROFDM0_XARXIQIMBALANCE, 0x3FF, reg);
+ reg = result[final_candidate][3] & 0x3F;
+ rtl_set_bbreg(hw, ROFDM0_XARXIQIMBALANCE, 0xFC00, reg);
+ reg = (result[final_candidate][3] >> 6) & 0xF;
+ rtl_set_bbreg(hw, ROFDM0_RXIQEXTANTA, 0xF0000000, reg);
+ } else {
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ "%s: Tx/Rx fail restore default value\n", __func__);
+
+ rtl_set_bbreg(hw, RTX_IQK_TONE_A, MASKDWORD, 0x19008c00);
+ rtl_set_bbreg(hw, ROFDM0_XARXIQIMBALANCE, MASKDWORD, 0x40000100);
+ rtl_set_bbreg(hw, RRX_IQK_TONE_A, MASKDWORD, 0x19008c00);
+ }
+}
+
+static void _rtl92du_phy_patha_fill_iqk_matrix(struct ieee80211_hw *hw,
+ bool iqk_ok, long result[][8],
+ u8 final_candidate, bool txonly)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = &rtlpriv->rtlhal;
+ u32 oldval_0, val_x, tx0_a, reg;
+ long val_y, tx0_c;
+ bool is2t = IS_92D_SINGLEPHY(rtlhal->version) ||
+ rtlhal->macphymode == DUALMAC_DUALPHY;
+
+ if (rtlhal->current_bandtype == BAND_ON_5G) {
+ _rtl92du_phy_patha_fill_iqk_matrix_5g_normal(hw, iqk_ok, result,
+ final_candidate,
+ txonly);
+ return;
+ }
+
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ "Path A IQ Calibration %s !\n", iqk_ok ? "Success" : "Failed");
+ if (final_candidate == 0xFF || !iqk_ok)
+ return;
+
+ /* OFDM0_D */
+ oldval_0 = rtl_get_bbreg(hw, ROFDM0_XATXIQIMBALANCE, 0xffc00000);
+
+ val_x = result[final_candidate][0];
+ if ((val_x & 0x00000200) != 0)
+ val_x = val_x | 0xFFFFFC00;
+
+ tx0_a = (val_x * oldval_0) >> 8;
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ "X = 0x%x, tx0_a = 0x%x, oldval_0 0x%x\n",
+ val_x, tx0_a, oldval_0);
+ rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, 0x3FF, tx0_a);
+ rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(24),
+ ((val_x * oldval_0 >> 7) & 0x1));
+
+ val_y = result[final_candidate][1];
+ if ((val_y & 0x00000200) != 0)
+ val_y = val_y | 0xFFFFFC00;
+
+ /* path B IQK result + 3 */
+ if (rtlhal->interfaceindex == 1 &&
+ rtlhal->current_bandtype == BAND_ON_5G)
+ val_y += 3;
+
+ tx0_c = (val_y * oldval_0) >> 8;
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ "Y = 0x%lx, tx0_c = 0x%lx\n",
+ val_y, tx0_c);
+
+ rtl_set_bbreg(hw, ROFDM0_XCTXAFE, 0xF0000000, (tx0_c & 0x3C0) >> 6);
+ rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, 0x003F0000, tx0_c & 0x3F);
+ if (is2t)
+ rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(26),
+ (val_y * oldval_0 >> 7) & 0x1);
+
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xC80 = 0x%x\n",
+ rtl_get_bbreg(hw, ROFDM0_XATXIQIMBALANCE,
+ MASKDWORD));
+
+ if (txonly) {
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "only Tx OK\n");
+ return;
+ }
+
+ reg = result[final_candidate][2];
+ rtl_set_bbreg(hw, ROFDM0_XARXIQIMBALANCE, 0x3FF, reg);
+ reg = result[final_candidate][3] & 0x3F;
+ rtl_set_bbreg(hw, ROFDM0_XARXIQIMBALANCE, 0xFC00, reg);
+ reg = (result[final_candidate][3] >> 6) & 0xF;
+ rtl_set_bbreg(hw, ROFDM0_RXIQEXTANTA, 0xF0000000, reg);
+}
+
+static void _rtl92du_phy_pathb_fill_iqk_matrix_5g_normal(struct ieee80211_hw *hw,
+ bool iqk_ok,
+ long result[][8],
+ u8 final_candidate,
+ bool txonly)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = &rtlpriv->rtlhal;
+ u32 val_x, reg;
+ int val_y;
+
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ "Path B IQ Calibration %s !\n", iqk_ok ? "Success" : "Failed");
+ if (iqk_ok && final_candidate != 0xFF) {
+ val_x = result[final_candidate][4];
+ if ((val_x & 0x00000200) != 0)
+ val_x = val_x | 0xFFFFFC00;
+
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "X = 0x%x\n", val_x);
+ rtl_set_bbreg(hw, RTX_IQK_TONE_B, 0x3FF0000, val_x);
+ rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(28), 0);
+
+ val_y = result[final_candidate][5];
+ if ((val_y & 0x00000200) != 0)
+ val_y = val_y | 0xFFFFFC00;
+
+ /* path B IQK result + 3 */
+ if (rtlhal->current_bandtype == BAND_ON_5G)
+ val_y += 3;
+
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "Y = 0x%x\n", val_y);
+
+ rtl_set_bbreg(hw, RTX_IQK_TONE_B, 0x3FF, val_y);
+ rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(30), 0);
+
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xe50 = 0x%x\n",
+ rtl_get_bbreg(hw, RTX_IQK_TONE_B, MASKDWORD));
+
+ if (txonly) {
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "only Tx OK\n");
+ return;
+ }
+
+ reg = result[final_candidate][6];
+ rtl_set_bbreg(hw, ROFDM0_XBRXIQIMBALANCE, 0x3FF, reg);
+ reg = result[final_candidate][7] & 0x3F;
+ rtl_set_bbreg(hw, ROFDM0_XBRXIQIMBALANCE, 0xFC00, reg);
+ reg = (result[final_candidate][7] >> 6) & 0xF;
+ rtl_set_bbreg(hw, ROFDM0_AGCRSSITABLE, 0x0000F000, reg);
+ } else {
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ "%s: Tx/Rx fail restore default value\n", __func__);
+
+ rtl_set_bbreg(hw, RTX_IQK_TONE_B, MASKDWORD, 0x19008c00);
+ rtl_set_bbreg(hw, ROFDM0_XBRXIQIMBALANCE, MASKDWORD, 0x40000100);
+ rtl_set_bbreg(hw, RRX_IQK_TONE_B, MASKDWORD, 0x19008c00);
+ }
+}
+
+static void _rtl92du_phy_pathb_fill_iqk_matrix(struct ieee80211_hw *hw,
+ bool iqk_ok, long result[][8],
+ u8 final_candidate, bool txonly)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = &rtlpriv->rtlhal;
+ u32 oldval_1, val_x, tx1_a, reg;
+ long val_y, tx1_c;
+
+ if (rtlhal->current_bandtype == BAND_ON_5G) {
+ _rtl92du_phy_pathb_fill_iqk_matrix_5g_normal(hw, iqk_ok, result,
+ final_candidate,
+ txonly);
+ return;
+ }
+
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "Path B IQ Calibration %s !\n",
+ iqk_ok ? "Success" : "Failed");
+
+ if (final_candidate == 0xFF || !iqk_ok)
+ return;
+
+ oldval_1 = rtl_get_bbreg(hw, ROFDM0_XBTXIQIMBALANCE, 0xffc00000);
+
+ val_x = result[final_candidate][4];
+ if ((val_x & 0x00000200) != 0)
+ val_x = val_x | 0xFFFFFC00;
+
+ tx1_a = (val_x * oldval_1) >> 8;
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "X = 0x%x, tx1_a = 0x%x\n",
+ val_x, tx1_a);
+ rtl_set_bbreg(hw, ROFDM0_XBTXIQIMBALANCE, 0x3FF, tx1_a);
+ rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(28),
+ (val_x * oldval_1 >> 7) & 0x1);
+
+ val_y = result[final_candidate][5];
+ if ((val_y & 0x00000200) != 0)
+ val_y = val_y | 0xFFFFFC00;
+
+ if (rtlhal->current_bandtype == BAND_ON_5G)
+ val_y += 3;
+
+ tx1_c = (val_y * oldval_1) >> 8;
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "Y = 0x%lx, tx1_c = 0x%lx\n",
+ val_y, tx1_c);
+
+ rtl_set_bbreg(hw, ROFDM0_XDTXAFE, 0xF0000000, (tx1_c & 0x3C0) >> 6);
+ rtl_set_bbreg(hw, ROFDM0_XBTXIQIMBALANCE, 0x003F0000, tx1_c & 0x3F);
+ rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(30),
+ (val_y * oldval_1 >> 7) & 0x1);
+
+ if (txonly)
+ return;
+
+ reg = result[final_candidate][6];
+ rtl_set_bbreg(hw, ROFDM0_XBRXIQIMBALANCE, 0x3FF, reg);
+ reg = result[final_candidate][7] & 0x3F;
+ rtl_set_bbreg(hw, ROFDM0_XBRXIQIMBALANCE, 0xFC00, reg);
+ reg = (result[final_candidate][7] >> 6) & 0xF;
+ rtl_set_bbreg(hw, ROFDM0_AGCRSSITABLE, 0x0000F000, reg);
+}
+
+void rtl92du_phy_iq_calibrate(struct ieee80211_hw *hw)
+{
+ long rege94, rege9c, regea4, regeac, regeb4;
+ bool is12simular, is13simular, is23simular;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = &rtlpriv->rtlhal;
+ long regebc, regec4, regecc, regtmp = 0;
+ struct rtl_phy *rtlphy = &rtlpriv->phy;
+ u8 i, final_candidate, indexforchannel;
+ bool patha_ok, pathb_ok;
+ long result[4][8] = {};
+
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ "IQK:Start!!!channel %d\n", rtlphy->current_channel);
+
+ final_candidate = 0xff;
+ patha_ok = false;
+ pathb_ok = false;
+ is12simular = false;
+ is23simular = false;
+ is13simular = false;
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ "IQK !!!currentband %d\n", rtlhal->current_bandtype);
+
+ for (i = 0; i < 3; i++) {
+ if (rtlhal->current_bandtype == BAND_ON_5G) {
+ _rtl92du_phy_iq_calibrate_5g_normal(hw, result, i);
+ } else if (rtlhal->current_bandtype == BAND_ON_2_4G) {
+ if (IS_92D_SINGLEPHY(rtlhal->version))
+ _rtl92du_phy_iq_calibrate(hw, result, i, true);
+ else
+ _rtl92du_phy_iq_calibrate(hw, result, i, false);
+ }
+
+ if (i == 1) {
+ is12simular = _rtl92du_phy_simularity_compare(hw, result,
+ 0, 1);
+ if (is12simular) {
+ final_candidate = 0;
+ break;
+ }
+ }
+
+ if (i == 2) {
+ is13simular = _rtl92du_phy_simularity_compare(hw, result,
+ 0, 2);
+ if (is13simular) {
+ final_candidate = 0;
+ break;
+ }
+
+ is23simular = _rtl92du_phy_simularity_compare(hw, result,
+ 1, 2);
+ if (is23simular) {
+ final_candidate = 1;
+ } else {
+ for (i = 0; i < 8; i++)
+ regtmp += result[3][i];
+
+ if (regtmp != 0)
+ final_candidate = 3;
+ else
+ final_candidate = 0xFF;
+ }
+ }
+ }
+
+ for (i = 0; i < 4; i++) {
+ rege94 = result[i][0];
+ rege9c = result[i][1];
+ regea4 = result[i][2];
+ regeac = result[i][3];
+ regeb4 = result[i][4];
+ regebc = result[i][5];
+ regec4 = result[i][6];
+ regecc = result[i][7];
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ "IQK: rege94=%lx rege9c=%lx regea4=%lx regeac=%lx regeb4=%lx regebc=%lx regec4=%lx regecc=%lx\n",
+ rege94, rege9c, regea4, regeac, regeb4, regebc, regec4,
+ regecc);
+ }
+
+ if (final_candidate != 0xff) {
+ rege94 = result[final_candidate][0];
+ rtlphy->reg_e94 = rege94;
+ rege9c = result[final_candidate][1];
+ rtlphy->reg_e9c = rege9c;
+ regea4 = result[final_candidate][2];
+ regeac = result[final_candidate][3];
+ regeb4 = result[final_candidate][4];
+ rtlphy->reg_eb4 = regeb4;
+ regebc = result[final_candidate][5];
+ rtlphy->reg_ebc = regebc;
+ regec4 = result[final_candidate][6];
+ regecc = result[final_candidate][7];
+
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ "IQK: final_candidate is %x\n", final_candidate);
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ "IQK: rege94=%lx rege9c=%lx regea4=%lx regeac=%lx regeb4=%lx regebc=%lx regec4=%lx regecc=%lx\n",
+ rege94, rege9c, regea4, regeac, regeb4, regebc, regec4,
+ regecc);
+
+ patha_ok = true;
+ pathb_ok = true;
+ } else {
+ rtlphy->reg_e94 = 0x100;
+ rtlphy->reg_eb4 = 0x100; /* X default value */
+ rtlphy->reg_e9c = 0x0;
+ rtlphy->reg_ebc = 0x0; /* Y default value */
+ }
+ if (rege94 != 0 /*&& regea4 != 0*/)
+ _rtl92du_phy_patha_fill_iqk_matrix(hw, patha_ok, result,
+ final_candidate,
+ regea4 == 0);
+ if (IS_92D_SINGLEPHY(rtlhal->version) &&
+ regeb4 != 0 /*&& regec4 != 0*/)
+ _rtl92du_phy_pathb_fill_iqk_matrix(hw, pathb_ok, result,
+ final_candidate,
+ regec4 == 0);
+
+ if (final_candidate != 0xFF) {
+ indexforchannel =
+ rtl92d_get_rightchnlplace_for_iqk(rtlphy->current_channel);
+
+ for (i = 0; i < IQK_MATRIX_REG_NUM; i++)
+ rtlphy->iqk_matrix[indexforchannel].value[0][i] =
+ result[final_candidate][i];
+
+ rtlphy->iqk_matrix[indexforchannel].iqk_done = true;
+
+ rtl_dbg(rtlpriv, COMP_SCAN | COMP_MLME, DBG_LOUD,
+ "IQK OK indexforchannel %d\n", indexforchannel);
+ }
+}
+
+void rtl92du_phy_reload_iqk_setting(struct ieee80211_hw *hw, u8 channel)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = &rtlpriv->rtlhal;
+ struct rtl_phy *rtlphy = &rtlpriv->phy;
+ struct rtl_mac *mac = rtl_mac(rtlpriv);
+ u8 indexforchannel;
+ bool need_iqk;
+
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "channel %d\n", channel);
+ /*------Do IQK for normal chip and test chip 5G band------- */
+
+ indexforchannel = rtl92d_get_rightchnlplace_for_iqk(channel);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "indexforchannel %d done %d\n",
+ indexforchannel,
+ rtlphy->iqk_matrix[indexforchannel].iqk_done);
+
+ /* We need to do IQK if we're about to connect to a network on 5 GHz.
+ * On 5 GHz a channel switch outside of scanning happens only before
+ * connecting.
+ */
+ need_iqk = !mac->act_scanning;
+
+ if (!rtlphy->iqk_matrix[indexforchannel].iqk_done && need_iqk) {
+ rtl_dbg(rtlpriv, COMP_SCAN | COMP_INIT, DBG_LOUD,
+ "Do IQK Matrix reg for channel:%d....\n", channel);
+ rtl92du_phy_iq_calibrate(hw);
+ return;
+ }
+
+ /* Just load the value. */
+ /* 2G band just load once. */
+ if ((!rtlhal->load_imrandiqk_setting_for2g && indexforchannel == 0) ||
+ indexforchannel > 0) {
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_LOUD,
+ "Just Read IQK Matrix reg for channel:%d....\n",
+ channel);
+
+ if (rtlphy->iqk_matrix[indexforchannel].value[0][0] != 0)
+ _rtl92du_phy_patha_fill_iqk_matrix(hw, true,
+ rtlphy->iqk_matrix[indexforchannel].value, 0,
+ rtlphy->iqk_matrix[indexforchannel].value[0][2] == 0);
+
+ if (IS_92D_SINGLEPHY(rtlhal->version) &&
+ rtlphy->iqk_matrix[indexforchannel].value[0][4] != 0)
+ _rtl92du_phy_pathb_fill_iqk_matrix(hw, true,
+ rtlphy->iqk_matrix[indexforchannel].value, 0,
+ rtlphy->iqk_matrix[indexforchannel].value[0][6] == 0);
+ }
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "<====\n");
+}
+
+static void _rtl92du_phy_reload_lck_setting(struct ieee80211_hw *hw, u8 channel)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = &rtlpriv->rtlhal;
+ u8 erfpath = rtlhal->current_bandtype == BAND_ON_5G ? RF90_PATH_A :
+ IS_92D_SINGLEPHY(rtlhal->version) ? RF90_PATH_B : RF90_PATH_A;
+ bool bneed_powerdown_radio = false;
+ u32 u4tmp, u4regvalue;
+
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "path %d\n", erfpath);
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "band type = %d\n",
+ rtlpriv->rtlhal.current_bandtype);
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "channel = %d\n", channel);
+
+ if (rtlpriv->rtlhal.current_bandtype == BAND_ON_5G) {/* Path-A for 5G */
+ u4tmp = rtlpriv->curveindex_5g[channel - 1];
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ "ver 1 set RF-A, 5G, 0x28 = 0x%x !!\n", u4tmp);
+
+ if (rtlpriv->rtlhal.macphymode == DUALMAC_DUALPHY &&
+ rtlpriv->rtlhal.interfaceindex == 1) {
+ bneed_powerdown_radio =
+ rtl92du_phy_enable_anotherphy(hw, false);
+ rtlpriv->rtlhal.during_mac1init_radioa = true;
+ /* asume no this case */
+ if (bneed_powerdown_radio)
+ rtl92d_phy_enable_rf_env(hw, erfpath,
+ &u4regvalue);
+ }
+
+ rtl_set_rfreg(hw, erfpath, RF_SYN_G4, 0x3f800, u4tmp);
+
+ if (bneed_powerdown_radio) {
+ rtl92d_phy_restore_rf_env(hw, erfpath, &u4regvalue);
+ rtl92du_phy_powerdown_anotherphy(hw, false);
+ }
+ } else if (rtlpriv->rtlhal.current_bandtype == BAND_ON_2_4G) {
+ u4tmp = rtlpriv->curveindex_2g[channel - 1];
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ "ver 3 set RF-B, 2G, 0x28 = 0x%x !!\n", u4tmp);
+
+ if (rtlpriv->rtlhal.macphymode == DUALMAC_DUALPHY &&
+ rtlpriv->rtlhal.interfaceindex == 0) {
+ bneed_powerdown_radio =
+ rtl92du_phy_enable_anotherphy(hw, true);
+ rtlpriv->rtlhal.during_mac0init_radiob = true;
+ if (bneed_powerdown_radio)
+ rtl92d_phy_enable_rf_env(hw, erfpath,
+ &u4regvalue);
+ }
+
+ rtl_set_rfreg(hw, erfpath, RF_SYN_G4, 0x3f800, u4tmp);
+
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ "ver 3 set RF-B, 2G, 0x28 = 0x%x !!\n",
+ rtl_get_rfreg(hw, erfpath, RF_SYN_G4, 0x3f800));
+
+ if (bneed_powerdown_radio) {
+ rtl92d_phy_restore_rf_env(hw, erfpath, &u4regvalue);
+ rtl92du_phy_powerdown_anotherphy(hw, true);
+ }
+ }
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "<====\n");
+}
+
+static void _rtl92du_phy_lc_calibrate_sw(struct ieee80211_hw *hw, bool is2t)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = &rtlpriv->rtlhal;
+ u32 curvecount_val[CV_CURVE_CNT * 2];
+ u16 timeout = 800, timecount = 0;
+ u32 u4tmp, offset, rf_syn_g4[2];
+ u8 tmpreg, index, rf_mode[2];
+ u8 path = is2t ? 2 : 1;
+ u8 i;
+
+ /* Check continuous TX and Packet TX */
+ tmpreg = rtl_read_byte(rtlpriv, 0xd03);
+ if ((tmpreg & 0x70) != 0)
+ /* if Deal with contisuous TX case, disable all continuous TX */
+ rtl_write_byte(rtlpriv, 0xd03, tmpreg & 0x8F);
+ else
+ /* if Deal with Packet TX case, block all queues */
+ rtl_write_byte(rtlpriv, REG_TXPAUSE, 0xFF);
+
+ rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER4, 0xF00000, 0x0F);
+
+ for (index = 0; index < path; index++) {
+ /* 1. Read original RF mode */
+ offset = index == 0 ? ROFDM0_XAAGCCORE1 : ROFDM0_XBAGCCORE1;
+ rf_mode[index] = rtl_read_byte(rtlpriv, offset);
+
+ /* 2. Set RF mode = standby mode */
+ rtl_set_rfreg(hw, (enum radio_path)index, RF_AC,
+ RFREG_OFFSET_MASK, 0x010000);
+
+ rf_syn_g4[index] = rtl_get_rfreg(hw, index, RF_SYN_G4,
+ RFREG_OFFSET_MASK);
+ rtl_set_rfreg(hw, index, RF_SYN_G4, 0x700, 0x7);
+
+ /* switch CV-curve control by LC-calibration */
+ rtl_set_rfreg(hw, (enum radio_path)index, RF_SYN_G7,
+ BIT(17), 0x0);
+
+ /* 4. Set LC calibration begin */
+ rtl_set_rfreg(hw, (enum radio_path)index, RF_CHNLBW,
+ 0x08000, 0x01);
+ }
+
+ for (index = 0; index < path; index++) {
+ u4tmp = rtl_get_rfreg(hw, (enum radio_path)index, RF_SYN_G6,
+ RFREG_OFFSET_MASK);
+
+ while ((!(u4tmp & BIT(11))) && timecount <= timeout) {
+ mdelay(50);
+ timecount += 50;
+ u4tmp = rtl_get_rfreg(hw, (enum radio_path)index,
+ RF_SYN_G6, RFREG_OFFSET_MASK);
+ }
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ "PHY_LCK finish delay for %d ms=2\n", timecount);
+ }
+
+ if ((tmpreg & 0x70) != 0)
+ rtl_write_byte(rtlpriv, 0xd03, tmpreg);
+ else /* Deal with Packet TX case */
+ rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
+
+ rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER4, 0xF00000, 0x00);
+
+ for (index = 0; index < path; index++) {
+ rtl_get_rfreg(hw, index, RF_SYN_G4, RFREG_OFFSET_MASK);
+
+ if (index == 0 && rtlhal->interfaceindex == 0) {
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ "path-A / 5G LCK\n");
+ } else {
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ "path-B / 2.4G LCK\n");
+ }
+
+ memset(curvecount_val, 0, sizeof(curvecount_val));
+
+ /* Set LC calibration off */
+ rtl_set_rfreg(hw, (enum radio_path)index, RF_CHNLBW,
+ 0x08000, 0x0);
+
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "set RF 0x18[15] = 0\n");
+
+ /* save Curve-counting number */
+ for (i = 0; i < CV_CURVE_CNT; i++) {
+ u32 readval = 0, readval2 = 0;
+
+ rtl_set_rfreg(hw, (enum radio_path)index, 0x3F,
+ 0x7f, i);
+
+ rtl_set_rfreg(hw, (enum radio_path)index, 0x4D,
+ RFREG_OFFSET_MASK, 0x0);
+
+ readval = rtl_get_rfreg(hw, (enum radio_path)index,
+ 0x4F, RFREG_OFFSET_MASK);
+ curvecount_val[2 * i + 1] = (readval & 0xfffe0) >> 5;
+
+ /* reg 0x4f [4:0] */
+ /* reg 0x50 [19:10] */
+ readval2 = rtl_get_rfreg(hw, (enum radio_path)index,
+ 0x50, 0xffc00);
+ curvecount_val[2 * i] = (((readval & 0x1F) << 10) |
+ readval2);
+ }
+
+ if (index == 0 && rtlhal->interfaceindex == 0)
+ rtl92d_phy_calc_curvindex(hw, targetchnl_5g,
+ curvecount_val,
+ true, rtlpriv->curveindex_5g);
+ else
+ rtl92d_phy_calc_curvindex(hw, targetchnl_2g,
+ curvecount_val,
+ false, rtlpriv->curveindex_2g);
+
+ /* switch CV-curve control mode */
+ rtl_set_rfreg(hw, (enum radio_path)index, RF_SYN_G7,
+ BIT(17), 0x1);
+ }
+
+ /* Restore original situation */
+ for (index = 0; index < path; index++) {
+ rtl_set_rfreg(hw, index, RF_SYN_G4, RFREG_OFFSET_MASK,
+ rf_syn_g4[index]);
+
+ offset = index == 0 ? ROFDM0_XAAGCCORE1 : ROFDM0_XBAGCCORE1;
+ rtl_write_byte(rtlpriv, offset, 0x50);
+ rtl_write_byte(rtlpriv, offset, rf_mode[index]);
+ }
+
+ _rtl92du_phy_reload_lck_setting(hw, rtlpriv->phy.current_channel);
+}
+
+void rtl92du_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = &rtlpriv->rtlhal;
+ struct rtl_phy *rtlphy = &rtlpriv->phy;
+ u32 timeout = 2000, timecount = 0;
+
+ while (rtlpriv->mac80211.act_scanning && timecount < timeout) {
+ udelay(50);
+ timecount += 50;
+ }
+
+ rtlphy->lck_inprogress = true;
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ "LCK:Start!!! currentband %x delay %d ms\n",
+ rtlhal->current_bandtype, timecount);
+
+ _rtl92du_phy_lc_calibrate_sw(hw, is2t);
+
+ rtlphy->lck_inprogress = false;
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "LCK:Finish!!!\n");
+}
+
+void rtl92du_phy_ap_calibrate(struct ieee80211_hw *hw, s8 delta)
+{
+ /* Nothing to do. */
+}
+
+u8 rtl92du_phy_sw_chnl(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ struct rtl_phy *rtlphy = &rtlpriv->phy;
+ u8 num_total_rfpath = rtlphy->num_total_rfpath;
+ u8 channel = rtlphy->current_channel;
+ u32 timeout = 1000, timecount = 0;
+ u32 ret_value;
+ u8 rfpath;
+
+ if (rtlphy->sw_chnl_inprogress)
+ return 0;
+ if (rtlphy->set_bwmode_inprogress)
+ return 0;
+
+ if ((is_hal_stop(rtlhal)) || (RT_CANNOT_IO(hw))) {
+ rtl_dbg(rtlpriv, COMP_CHAN, DBG_LOUD,
+ "sw_chnl_inprogress false driver sleep or unload\n");
+ return 0;
+ }
+
+ while (rtlphy->lck_inprogress && timecount < timeout) {
+ mdelay(50);
+ timecount += 50;
+ }
+
+ if (rtlhal->macphymode == SINGLEMAC_SINGLEPHY &&
+ rtlhal->bandset == BAND_ON_BOTH) {
+ ret_value = rtl_get_bbreg(hw, RFPGA0_XAB_RFPARAMETER,
+ MASKDWORD);
+ if (rtlphy->current_channel > 14 && !(ret_value & BIT(0)))
+ rtl92du_phy_switch_wirelessband(hw, BAND_ON_5G);
+ else if (rtlphy->current_channel <= 14 && (ret_value & BIT(0)))
+ rtl92du_phy_switch_wirelessband(hw, BAND_ON_2_4G);
+ }
+
+ switch (rtlhal->current_bandtype) {
+ case BAND_ON_5G:
+ /* Get first channel error when change between
+ * 5G and 2.4G band.
+ */
+ if (WARN_ONCE(channel <= 14, "rtl8192du: 5G but channel<=14\n"))
+ return 0;
+ break;
+ case BAND_ON_2_4G:
+ /* Get first channel error when change between
+ * 5G and 2.4G band.
+ */
+ if (WARN_ONCE(channel > 14, "rtl8192du: 2G but channel>14\n"))
+ return 0;
+ break;
+ default:
+ WARN_ONCE(true, "rtl8192du: Invalid WirelessMode(%#x)!!\n",
+ rtlpriv->mac80211.mode);
+ break;
+ }
+
+ rtlphy->sw_chnl_inprogress = true;
+
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_TRACE,
+ "switch to channel%d\n", rtlphy->current_channel);
+
+ rtl92d_phy_set_txpower_level(hw, channel);
+
+ for (rfpath = 0; rfpath < num_total_rfpath; rfpath++) {
+ u32p_replace_bits(&rtlphy->rfreg_chnlval[rfpath],
+ channel, 0xff);
+
+ if (rtlpriv->rtlhal.current_bandtype == BAND_ON_5G) {
+ if (channel > 99)
+ rtlphy->rfreg_chnlval[rfpath] |= (BIT(18));
+ else
+ rtlphy->rfreg_chnlval[rfpath] &= ~BIT(18);
+ rtlphy->rfreg_chnlval[rfpath] |= (BIT(16) | BIT(8));
+ } else {
+ rtlphy->rfreg_chnlval[rfpath] &=
+ ~(BIT(8) | BIT(16) | BIT(18));
+ }
+ rtl_set_rfreg(hw, rfpath, RF_CHNLBW, RFREG_OFFSET_MASK,
+ rtlphy->rfreg_chnlval[rfpath]);
+
+ _rtl92du_phy_reload_imr_setting(hw, channel, rfpath);
+ }
+
+ _rtl92du_phy_switch_rf_setting(hw, channel);
+
+ /* do IQK when all parameters are ready */
+ rtl92du_phy_reload_iqk_setting(hw, channel);
+
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_TRACE, "<==\n");
+ rtlphy->sw_chnl_inprogress = false;
+ return 1;
+}
+
+static void _rtl92du_phy_set_rfon(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ /* a. SYS_CLKR 0x08[11] = 1 restore MAC clock */
+ /* b. SPS_CTRL 0x11[7:0] = 0x2b */
+ if (rtlpriv->rtlhal.macphymode == SINGLEMAC_SINGLEPHY)
+ rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x2b);
+
+ /* c. For PCIE: SYS_FUNC_EN 0x02[7:0] = 0xE3 enable BB TRX function */
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3);
+
+ /* RF_ON_EXCEP(d~g): */
+ /* d. APSD_CTRL 0x600[7:0] = 0x00 */
+ rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x00);
+
+ /* e. SYS_FUNC_EN 0x02[7:0] = 0xE2 reset BB TRX function again */
+ /* f. SYS_FUNC_EN 0x02[7:0] = 0xE3 enable BB TRX function*/
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3);
+
+ /* g. txpause 0x522[7:0] = 0x00 enable mac tx queue */
+ rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
+}
+
+static void _rtl92du_phy_set_rfsleep(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 u4btmp;
+ u8 retry = 5;
+
+ /* a. TXPAUSE 0x522[7:0] = 0xFF Pause MAC TX queue */
+ rtl_write_byte(rtlpriv, REG_TXPAUSE, 0xFF);
+
+ /* b. RF path 0 offset 0x00 = 0x00 disable RF */
+ rtl_set_rfreg(hw, RF90_PATH_A, 0x00, RFREG_OFFSET_MASK, 0x00);
+
+ /* c. APSD_CTRL 0x600[7:0] = 0x40 */
+ rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x40);
+
+ /* d. APSD_CTRL 0x600[7:0] = 0x00
+ * APSD_CTRL 0x600[7:0] = 0x00
+ * RF path 0 offset 0x00 = 0x00
+ * APSD_CTRL 0x600[7:0] = 0x40
+ */
+ u4btmp = rtl_get_rfreg(hw, RF90_PATH_A, 0, RFREG_OFFSET_MASK);
+ while (u4btmp != 0 && retry > 0) {
+ rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x0);
+ rtl_set_rfreg(hw, RF90_PATH_A, 0x00, RFREG_OFFSET_MASK, 0x00);
+ rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x40);
+ u4btmp = rtl_get_rfreg(hw, RF90_PATH_A, 0, RFREG_OFFSET_MASK);
+ retry--;
+ }
+ if (retry == 0) {
+ /* Jump out the LPS turn off sequence to RF_ON_EXCEP */
+ rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x00);
+
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3);
+ rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "Fail !!! Switch RF timeout\n");
+ return;
+ }
+
+ /* e. For PCIE: SYS_FUNC_EN 0x02[7:0] = 0xE2 reset BB TRX function */
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
+
+ /* f. SPS_CTRL 0x11[7:0] = 0x22 */
+ if (rtlpriv->rtlhal.macphymode == SINGLEMAC_SINGLEPHY)
+ rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x22);
+}
+
+bool rtl92du_phy_set_rf_power_state(struct ieee80211_hw *hw,
+ enum rf_pwrstate rfpwr_state)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtlpriv);
+ struct rtl_mac *mac = rtl_mac(rtlpriv);
+ bool bresult = true;
+
+ if (rfpwr_state == ppsc->rfpwr_state)
+ return false;
+
+ switch (rfpwr_state) {
+ case ERFON:
+ if (ppsc->rfpwr_state == ERFOFF &&
+ RT_IN_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC)) {
+ u32 initializecount = 0;
+ bool rtstatus;
+
+ do {
+ initializecount++;
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "IPS Set eRf nic enable\n");
+ rtstatus = rtl_ps_enable_nic(hw);
+ } while (!rtstatus && (initializecount < 10));
+
+ RT_CLEAR_PS_LEVEL(ppsc,
+ RT_RF_OFF_LEVL_HALT_NIC);
+ } else {
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_DMESG,
+ "awake, slept:%d ms state_inap:%x\n",
+ jiffies_to_msecs(jiffies -
+ ppsc->last_sleep_jiffies),
+ rtlpriv->psc.state_inap);
+ ppsc->last_awake_jiffies = jiffies;
+ _rtl92du_phy_set_rfon(hw);
+ }
+
+ if (mac->link_state == MAC80211_LINKED)
+ rtlpriv->cfg->ops->led_control(hw, LED_CTL_LINK);
+ else
+ rtlpriv->cfg->ops->led_control(hw, LED_CTL_NO_LINK);
+ break;
+ case ERFOFF:
+ if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_HALT_NIC) {
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "IPS Set eRf nic disable\n");
+ rtl_ps_disable_nic(hw);
+ RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
+ } else {
+ if (ppsc->rfoff_reason == RF_CHANGE_BY_IPS)
+ rtlpriv->cfg->ops->led_control(hw, LED_CTL_NO_LINK);
+ else
+ rtlpriv->cfg->ops->led_control(hw, LED_CTL_POWER_OFF);
+ }
+ break;
+ case ERFSLEEP:
+ if (ppsc->rfpwr_state == ERFOFF)
+ return false;
+
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_DMESG,
+ "sleep awakened:%d ms state_inap:%x\n",
+ jiffies_to_msecs(jiffies -
+ ppsc->last_awake_jiffies),
+ rtlpriv->psc.state_inap);
+ ppsc->last_sleep_jiffies = jiffies;
+ _rtl92du_phy_set_rfsleep(hw);
+ break;
+ default:
+ pr_err("switch case %#x not processed\n",
+ rfpwr_state);
+ return false;
+ }
+
+ if (bresult)
+ ppsc->rfpwr_state = rfpwr_state;
+
+ return bresult;
+}
+
+void rtl92du_phy_set_poweron(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u32 mac_reg = (rtlhal->interfaceindex == 0 ? REG_MAC0 : REG_MAC1);
+ u8 value8;
+ u16 i;
+
+ /* notice fw know band status 0x81[1]/0x53[1] = 0: 5G, 1: 2G */
+ if (rtlhal->current_bandtype == BAND_ON_2_4G) {
+ value8 = rtl_read_byte(rtlpriv, mac_reg);
+ value8 |= BIT(1);
+ rtl_write_byte(rtlpriv, mac_reg, value8);
+ } else {
+ value8 = rtl_read_byte(rtlpriv, mac_reg);
+ value8 &= ~BIT(1);
+ rtl_write_byte(rtlpriv, mac_reg, value8);
+ }
+
+ if (rtlhal->macphymode == SINGLEMAC_SINGLEPHY) {
+ value8 = rtl_read_byte(rtlpriv, REG_MAC0);
+ rtl_write_byte(rtlpriv, REG_MAC0, value8 | MAC0_ON);
+ } else {
+ mutex_lock(rtlpriv->mutex_for_power_on_off);
+ if (rtlhal->interfaceindex == 0) {
+ value8 = rtl_read_byte(rtlpriv, REG_MAC0);
+ rtl_write_byte(rtlpriv, REG_MAC0, value8 | MAC0_ON);
+ } else {
+ value8 = rtl_read_byte(rtlpriv, REG_MAC1);
+ rtl_write_byte(rtlpriv, REG_MAC1, value8 | MAC1_ON);
+ }
+ value8 = rtl_read_byte(rtlpriv, REG_POWER_OFF_IN_PROCESS);
+ mutex_unlock(rtlpriv->mutex_for_power_on_off);
+
+ for (i = 0; i < 200; i++) {
+ if ((value8 & BIT(7)) == 0)
+ break;
+
+ udelay(500);
+ mutex_lock(rtlpriv->mutex_for_power_on_off);
+ value8 = rtl_read_byte(rtlpriv,
+ REG_POWER_OFF_IN_PROCESS);
+ mutex_unlock(rtlpriv->mutex_for_power_on_off);
+ }
+ if (i == 200)
+ WARN_ONCE(true, "rtl8192du: Another mac power off over time\n");
+ }
+}
+
+void rtl92du_update_bbrf_configuration(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtlpriv);
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ struct rtl_phy *rtlphy = &rtlpriv->phy;
+ u8 rfpath, i;
+
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "==>\n");
+ /* r_select_5G for path_A/B 0 for 2.4G, 1 for 5G */
+ if (rtlhal->current_bandtype == BAND_ON_2_4G) {
+ /* r_select_5G for path_A/B, 0x878 */
+ rtl_set_bbreg(hw, RFPGA0_XAB_RFPARAMETER, BIT(0), 0x0);
+ rtl_set_bbreg(hw, RFPGA0_XAB_RFPARAMETER, BIT(15), 0x0);
+ if (rtlhal->macphymode != DUALMAC_DUALPHY) {
+ rtl_set_bbreg(hw, RFPGA0_XAB_RFPARAMETER, BIT(16), 0x0);
+ rtl_set_bbreg(hw, RFPGA0_XAB_RFPARAMETER, BIT(31), 0x0);
+ }
+
+ /* rssi_table_select: index 0 for 2.4G. 1~3 for 5G, 0xc78 */
+ rtl_set_bbreg(hw, ROFDM0_AGCRSSITABLE, BIT(6) | BIT(7), 0x0);
+
+ /* fc_area 0xd2c */
+ rtl_set_bbreg(hw, ROFDM1_CFOTRACKING, BIT(14) | BIT(13), 0x0);
+
+ /* 5G LAN ON */
+ rtl_set_bbreg(hw, 0xB30, 0x00F00000, 0xa);
+
+ /* TX BB gain shift*1, Just for testchip, 0xc80, 0xc88 */
+ rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, MASKDWORD, 0x40000100);
+ rtl_set_bbreg(hw, ROFDM0_XBTXIQIMBALANCE, MASKDWORD, 0x40000100);
+ if (rtlhal->macphymode == DUALMAC_DUALPHY) {
+ rtl_set_bbreg(hw, RFPGA0_XAB_RFINTERFACESW,
+ BIT(10) | BIT(6) | BIT(5),
+ ((rtlefuse->eeprom_c9 & BIT(3)) >> 3) |
+ (rtlefuse->eeprom_c9 & BIT(1)) |
+ ((rtlefuse->eeprom_cc & BIT(1)) << 4));
+ rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE,
+ BIT(10) | BIT(6) | BIT(5),
+ ((rtlefuse->eeprom_c9 & BIT(2)) >> 2) |
+ ((rtlefuse->eeprom_c9 & BIT(0)) << 1) |
+ ((rtlefuse->eeprom_cc & BIT(0)) << 5));
+ rtl_set_bbreg(hw, RFPGA0_XAB_RFPARAMETER, BIT(15), 0);
+
+ rtl_set_bbreg(hw, RPDP_ANTA, MASKDWORD, 0x01017038);
+ rtl_set_bbreg(hw, RCONFIG_ANTA, MASKDWORD, 0x0f600000);
+ } else {
+ rtl_set_bbreg(hw, RFPGA0_XAB_RFINTERFACESW,
+ BIT(26) | BIT(22) | BIT(21) | BIT(10) |
+ BIT(6) | BIT(5),
+ ((rtlefuse->eeprom_c9 & BIT(3)) >> 3) |
+ (rtlefuse->eeprom_c9 & BIT(1)) |
+ ((rtlefuse->eeprom_cc & BIT(1)) << 4) |
+ ((rtlefuse->eeprom_c9 & BIT(7)) << 9) |
+ ((rtlefuse->eeprom_c9 & BIT(5)) << 12) |
+ ((rtlefuse->eeprom_cc & BIT(3)) << 18));
+ rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE,
+ BIT(10) | BIT(6) | BIT(5),
+ ((rtlefuse->eeprom_c9 & BIT(2)) >> 2) |
+ ((rtlefuse->eeprom_c9 & BIT(0)) << 1) |
+ ((rtlefuse->eeprom_cc & BIT(0)) << 5));
+ rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE,
+ BIT(10) | BIT(6) | BIT(5),
+ ((rtlefuse->eeprom_c9 & BIT(6)) >> 6) |
+ ((rtlefuse->eeprom_c9 & BIT(4)) >> 3) |
+ ((rtlefuse->eeprom_cc & BIT(2)) << 3));
+ rtl_set_bbreg(hw, RFPGA0_XAB_RFPARAMETER,
+ BIT(31) | BIT(15), 0);
+
+ rtl_set_bbreg(hw, RPDP_ANTA, MASKDWORD, 0x01017038);
+ rtl_set_bbreg(hw, RPDP_ANTB, MASKDWORD, 0x01017038);
+ rtl_set_bbreg(hw, RCONFIG_ANTA, MASKDWORD, 0x0f600000);
+ rtl_set_bbreg(hw, RCONFIG_ANTB, MASKDWORD, 0x0f600000);
+ }
+ /* 1.5V_LDO */
+ } else {
+ /* r_select_5G for path_A/B */
+ rtl_set_bbreg(hw, RFPGA0_XAB_RFPARAMETER, BIT(0), 0x1);
+ rtl_set_bbreg(hw, RFPGA0_XAB_RFPARAMETER, BIT(15), 0x1);
+ if (rtlhal->macphymode != DUALMAC_DUALPHY) {
+ rtl_set_bbreg(hw, RFPGA0_XAB_RFPARAMETER, BIT(16), 0x1);
+ rtl_set_bbreg(hw, RFPGA0_XAB_RFPARAMETER, BIT(31), 0x1);
+ }
+
+ /* rssi_table_select: index 0 for 2.4G. 1~3 for 5G */
+ rtl_set_bbreg(hw, ROFDM0_AGCRSSITABLE, BIT(6) | BIT(7), 0x1);
+
+ /* fc_area */
+ rtl_set_bbreg(hw, ROFDM1_CFOTRACKING, BIT(14) | BIT(13), 0x1);
+
+ /* 5G LAN ON */
+ rtl_set_bbreg(hw, 0xB30, 0x00F00000, 0x0);
+
+ /* TX BB gain shift, Just for testchip, 0xc80, 0xc88 */
+ if (rtlefuse->internal_pa_5g[rtlhal->interfaceindex])
+ rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, MASKDWORD,
+ 0x2d4000b5);
+ else
+ rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, MASKDWORD,
+ 0x20000080);
+
+ if (rtlhal->macphymode != DUALMAC_DUALPHY) {
+ if (rtlefuse->internal_pa_5g[1])
+ rtl_set_bbreg(hw, ROFDM0_XBTXIQIMBALANCE,
+ MASKDWORD, 0x2d4000b5);
+ else
+ rtl_set_bbreg(hw, ROFDM0_XBTXIQIMBALANCE,
+ MASKDWORD, 0x20000080);
+ }
+
+ rtl_set_bbreg(hw, 0xB30, BIT(27), 0);
+
+ if (rtlhal->macphymode == DUALMAC_DUALPHY) {
+ rtl_set_bbreg(hw, RFPGA0_XAB_RFINTERFACESW,
+ BIT(10) | BIT(6) | BIT(5),
+ (rtlefuse->eeprom_cc & BIT(5)));
+ rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE, BIT(10),
+ ((rtlefuse->eeprom_cc & BIT(4)) >> 4));
+ rtl_set_bbreg(hw, RFPGA0_XAB_RFPARAMETER, BIT(15),
+ (rtlefuse->eeprom_cc & BIT(4)) >> 4);
+
+ rtl_set_bbreg(hw, RPDP_ANTA, MASKDWORD, 0x01017098);
+ rtl_set_bbreg(hw, RCONFIG_ANTA, MASKDWORD, 0x20000000);
+ } else {
+ rtl_set_bbreg(hw, RFPGA0_XAB_RFINTERFACESW,
+ BIT(26) | BIT(22) | BIT(21) | BIT(10) |
+ BIT(6) | BIT(5),
+ (rtlefuse->eeprom_cc & BIT(5)) |
+ ((rtlefuse->eeprom_cc & BIT(7)) << 14));
+ rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE, BIT(10),
+ ((rtlefuse->eeprom_cc & BIT(4)) >> 4));
+ rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE, BIT(10),
+ ((rtlefuse->eeprom_cc & BIT(6)) >> 6));
+ rtl_set_bbreg(hw, RFPGA0_XAB_RFPARAMETER,
+ BIT(31) | BIT(15),
+ ((rtlefuse->eeprom_cc & BIT(4)) >> 4) |
+ ((rtlefuse->eeprom_cc & BIT(6)) << 10));
+
+ rtl_set_bbreg(hw, RPDP_ANTA, MASKDWORD, 0x01017098);
+ rtl_set_bbreg(hw, RPDP_ANTB, MASKDWORD, 0x01017098);
+ rtl_set_bbreg(hw, RCONFIG_ANTA, MASKDWORD, 0x20000000);
+ rtl_set_bbreg(hw, RCONFIG_ANTB, MASKDWORD, 0x20000000);
+ }
+ }
+
+ /* update IQK related settings */
+ rtl_set_bbreg(hw, ROFDM0_XARXIQIMBALANCE, MASKDWORD, 0x40000100);
+ rtl_set_bbreg(hw, ROFDM0_XBRXIQIMBALANCE, MASKDWORD, 0x40000100);
+ rtl_set_bbreg(hw, ROFDM0_XCTXAFE, 0xF0000000, 0x00);
+ rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(30) | BIT(28) |
+ BIT(26) | BIT(24), 0x00);
+ rtl_set_bbreg(hw, ROFDM0_XDTXAFE, 0xF0000000, 0x00);
+ rtl_set_bbreg(hw, ROFDM0_RXIQEXTANTA, 0xF0000000, 0x00);
+ rtl_set_bbreg(hw, ROFDM0_AGCRSSITABLE, 0x0000F000, 0x00);
+
+ /* Update RF */
+ for (rfpath = RF90_PATH_A; rfpath < rtlphy->num_total_rfpath;
+ rfpath++) {
+ if (rtlhal->current_bandtype == BAND_ON_2_4G) {
+ /* MOD_AG for RF path_A 0x18 BIT8,BIT16 */
+ rtl_set_rfreg(hw, rfpath, RF_CHNLBW, BIT(8) | BIT(16) |
+ BIT(18) | 0xff, 1);
+
+ /* RF0x0b[16:14] =3b'111 */
+ rtl_set_rfreg(hw, (enum radio_path)rfpath, 0x0B,
+ 0x1c000, 0x07);
+ } else {
+ /* MOD_AG for RF path_A 0x18 BIT8,BIT16 */
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_CHNLBW, RFREG_OFFSET_MASK,
+ 0x97524);
+ }
+
+ /* Set right channel on RF reg0x18 for another mac. */
+ if (rtlhal->interfaceindex == 0 && rtlhal->bandset == BAND_ON_2_4G) {
+ /* Set MAC1 default channel if MAC1 not up. */
+ if (!(rtl_read_byte(rtlpriv, REG_MAC1) & MAC1_ON)) {
+ rtl92du_phy_enable_anotherphy(hw, true);
+ rtlhal->during_mac0init_radiob = true;
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_CHNLBW,
+ RFREG_OFFSET_MASK, 0x97524);
+ rtl92du_phy_powerdown_anotherphy(hw, true);
+ }
+ } else if (rtlhal->interfaceindex == 1 && rtlhal->bandset == BAND_ON_5G) {
+ /* Set MAC0 default channel */
+ if (!(rtl_read_byte(rtlpriv, REG_MAC0) & MAC0_ON)) {
+ rtl92du_phy_enable_anotherphy(hw, false);
+ rtlhal->during_mac1init_radioa = true;
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_CHNLBW,
+ RFREG_OFFSET_MASK, 0x87401);
+ rtl92du_phy_powerdown_anotherphy(hw, false);
+ }
+ }
+ }
+
+ /* Update for all band. */
+ /* DMDP */
+ if (rtlphy->rf_type == RF_1T1R) {
+ /* Use antenna 0, 0xc04, 0xd04 */
+ rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, MASKBYTE0, 0x11);
+ rtl_set_bbreg(hw, ROFDM1_TRXPATHENABLE, BDWORD, 0x1);
+
+ /* enable ad/da clock1 for dual-phy reg0x888 */
+ if (rtlhal->interfaceindex == 0) {
+ rtl_set_bbreg(hw, RFPGA0_ADDALLOCKEN, BIT(12) |
+ BIT(13), 0x3);
+ } else if (rtl92du_phy_enable_anotherphy(hw, false)) {
+ rtlhal->during_mac1init_radioa = true;
+ rtl_set_bbreg(hw, RFPGA0_ADDALLOCKEN,
+ BIT(12) | BIT(13), 0x3);
+ rtl92du_phy_powerdown_anotherphy(hw, false);
+ }
+
+ rtl_set_bbreg(hw, ROFDM1_LSTF, BIT(19) | BIT(20), 0x0);
+ } else {
+ /* Single PHY */
+ /* Use antenna 0 & 1, 0xc04, 0xd04 */
+ rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, MASKBYTE0, 0x33);
+ rtl_set_bbreg(hw, ROFDM1_TRXPATHENABLE, BDWORD, 0x3);
+ /* disable ad/da clock1,0x888 */
+ rtl_set_bbreg(hw, RFPGA0_ADDALLOCKEN, BIT(12) | BIT(13), 0);
+
+ rtl_set_bbreg(hw, ROFDM1_LSTF, BIT(19) | BIT(20), 0x1);
+ }
+
+ for (rfpath = RF90_PATH_A; rfpath < rtlphy->num_total_rfpath;
+ rfpath++) {
+ rtlphy->rfreg_chnlval[rfpath] = rtl_get_rfreg(hw, rfpath,
+ RF_CHNLBW,
+ RFREG_OFFSET_MASK);
+ rtlphy->reg_rf3c[rfpath] = rtl_get_rfreg(hw, rfpath, 0x3C,
+ RFREG_OFFSET_MASK);
+ }
+
+ for (i = 0; i < 2; i++)
+ rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD, "RF 0x18 = 0x%x\n",
+ rtlphy->rfreg_chnlval[i]);
+
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "<==\n");
+}
+
+bool rtl92du_phy_check_poweroff(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u8 u1btmp;
+
+ if (rtlhal->macphymode == SINGLEMAC_SINGLEPHY) {
+ u1btmp = rtl_read_byte(rtlpriv, REG_MAC0);
+ rtl_write_byte(rtlpriv, REG_MAC0, u1btmp & ~MAC0_ON);
+ return true;
+ }
+
+ mutex_lock(rtlpriv->mutex_for_power_on_off);
+ if (rtlhal->interfaceindex == 0) {
+ u1btmp = rtl_read_byte(rtlpriv, REG_MAC0);
+ rtl_write_byte(rtlpriv, REG_MAC0, u1btmp & ~MAC0_ON);
+ u1btmp = rtl_read_byte(rtlpriv, REG_MAC1);
+ u1btmp &= MAC1_ON;
+ } else {
+ u1btmp = rtl_read_byte(rtlpriv, REG_MAC1);
+ rtl_write_byte(rtlpriv, REG_MAC1, u1btmp & ~MAC1_ON);
+ u1btmp = rtl_read_byte(rtlpriv, REG_MAC0);
+ u1btmp &= MAC0_ON;
+ }
+ if (u1btmp) {
+ mutex_unlock(rtlpriv->mutex_for_power_on_off);
+ return false;
+ }
+ u1btmp = rtl_read_byte(rtlpriv, REG_POWER_OFF_IN_PROCESS);
+ u1btmp |= BIT(7);
+ rtl_write_byte(rtlpriv, REG_POWER_OFF_IN_PROCESS, u1btmp);
+ mutex_unlock(rtlpriv->mutex_for_power_on_off);
+
+ return true;
+}
+
+void rtl92du_phy_init_pa_bias(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ bool is_single_mac = rtlhal->macphymode == SINGLEMAC_SINGLEPHY;
+ enum radio_path rf_path;
+ u8 val8;
+
+ read_efuse_byte(hw, 0x3FA, &val8);
+
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE, "%s: 0x3FA %#x\n",
+ __func__, val8);
+
+ if (!(val8 & BIT(0)) && (is_single_mac || rtlhal->interfaceindex == 0)) {
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_CHNLBW, RFREG_OFFSET_MASK, 0x07401);
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_AC, RFREG_OFFSET_MASK, 0x70000);
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_IPA, RFREG_OFFSET_MASK, 0x0F425);
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_IPA, RFREG_OFFSET_MASK, 0x4F425);
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_IPA, RFREG_OFFSET_MASK, 0x8F425);
+
+ /* Back to RX Mode */
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_AC, RFREG_OFFSET_MASK, 0x30000);
+
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE, "2G PA BIAS path A\n");
+ }
+
+ if (!(val8 & BIT(1)) && (is_single_mac || rtlhal->interfaceindex == 1)) {
+ rf_path = rtlhal->interfaceindex == 1 ? RF90_PATH_A : RF90_PATH_B;
+
+ rtl_set_rfreg(hw, rf_path, RF_CHNLBW, RFREG_OFFSET_MASK, 0x07401);
+ rtl_set_rfreg(hw, rf_path, RF_AC, RFREG_OFFSET_MASK, 0x70000);
+ rtl_set_rfreg(hw, rf_path, RF_IPA, RFREG_OFFSET_MASK, 0x0F425);
+ rtl_set_rfreg(hw, rf_path, RF_IPA, RFREG_OFFSET_MASK, 0x4F425);
+ rtl_set_rfreg(hw, rf_path, RF_IPA, RFREG_OFFSET_MASK, 0x8F425);
+
+ /* Back to RX Mode */
+ rtl_set_rfreg(hw, rf_path, RF_AC, RFREG_OFFSET_MASK, 0x30000);
+
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE, "2G PA BIAS path B\n");
+ }
+
+ if (!(val8 & BIT(2)) && (is_single_mac || rtlhal->interfaceindex == 0)) {
+ /* 5GL_channel */
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_CHNLBW, RFREG_OFFSET_MASK, 0x17524);
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_AC, RFREG_OFFSET_MASK, 0x70000);
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_IPA, RFREG_OFFSET_MASK, 0x0F496);
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_IPA, RFREG_OFFSET_MASK, 0x4F496);
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_IPA, RFREG_OFFSET_MASK, 0x8F496);
+
+ /* 5GM_channel */
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_CHNLBW, RFREG_OFFSET_MASK, 0x37564);
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_AC, RFREG_OFFSET_MASK, 0x70000);
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_IPA, RFREG_OFFSET_MASK, 0x0F496);
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_IPA, RFREG_OFFSET_MASK, 0x4F496);
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_IPA, RFREG_OFFSET_MASK, 0x8F496);
+
+ /* 5GH_channel */
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_CHNLBW, RFREG_OFFSET_MASK, 0x57595);
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_AC, RFREG_OFFSET_MASK, 0x70000);
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_IPA, RFREG_OFFSET_MASK, 0x0F496);
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_IPA, RFREG_OFFSET_MASK, 0x4F496);
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_IPA, RFREG_OFFSET_MASK, 0x8F496);
+
+ /* Back to RX Mode */
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_AC, RFREG_OFFSET_MASK, 0x30000);
+
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE, "5G PA BIAS path A\n");
+ }
+
+ if (!(val8 & BIT(3)) && (is_single_mac || rtlhal->interfaceindex == 1)) {
+ rf_path = rtlhal->interfaceindex == 1 ? RF90_PATH_A : RF90_PATH_B;
+
+ /* 5GL_channel */
+ rtl_set_rfreg(hw, rf_path, RF_CHNLBW, RFREG_OFFSET_MASK, 0x17524);
+ rtl_set_rfreg(hw, rf_path, RF_AC, RFREG_OFFSET_MASK, 0x70000);
+ rtl_set_rfreg(hw, rf_path, RF_IPA, RFREG_OFFSET_MASK, 0x0F496);
+ rtl_set_rfreg(hw, rf_path, RF_IPA, RFREG_OFFSET_MASK, 0x4F496);
+ rtl_set_rfreg(hw, rf_path, RF_IPA, RFREG_OFFSET_MASK, 0x8F496);
+
+ /* 5GM_channel */
+ rtl_set_rfreg(hw, rf_path, RF_CHNLBW, RFREG_OFFSET_MASK, 0x37564);
+ rtl_set_rfreg(hw, rf_path, RF_AC, RFREG_OFFSET_MASK, 0x70000);
+ rtl_set_rfreg(hw, rf_path, RF_IPA, RFREG_OFFSET_MASK, 0x0F496);
+ rtl_set_rfreg(hw, rf_path, RF_IPA, RFREG_OFFSET_MASK, 0x4F496);
+ rtl_set_rfreg(hw, rf_path, RF_IPA, RFREG_OFFSET_MASK, 0x8F496);
+
+ /* 5GH_channel */
+ rtl_set_rfreg(hw, rf_path, RF_CHNLBW, RFREG_OFFSET_MASK, 0x57595);
+ rtl_set_rfreg(hw, rf_path, RF_AC, RFREG_OFFSET_MASK, 0x70000);
+ rtl_set_rfreg(hw, rf_path, RF_IPA, RFREG_OFFSET_MASK, 0x0F496);
+ rtl_set_rfreg(hw, rf_path, RF_IPA, RFREG_OFFSET_MASK, 0x4F496);
+ rtl_set_rfreg(hw, rf_path, RF_IPA, RFREG_OFFSET_MASK, 0x8F496);
+
+ /* Back to RX Mode */
+ rtl_set_rfreg(hw, rf_path, RF_AC, RFREG_OFFSET_MASK, 0x30000);
+
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE, "5G PA BIAS path B\n");
+ }
+}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192du/phy.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/phy.h
new file mode 100644
index 000000000000..090a6203db7e
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/phy.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2024 Realtek Corporation.*/
+
+#ifndef __RTL92DU_PHY_H__
+#define __RTL92DU_PHY_H__
+
+u32 rtl92du_phy_query_bb_reg(struct ieee80211_hw *hw,
+ u32 regaddr, u32 bitmask);
+void rtl92du_phy_set_bb_reg(struct ieee80211_hw *hw,
+ u32 regaddr, u32 bitmask, u32 data);
+bool rtl92du_phy_mac_config(struct ieee80211_hw *hw);
+bool rtl92du_phy_bb_config(struct ieee80211_hw *hw);
+bool rtl92du_phy_rf_config(struct ieee80211_hw *hw);
+void rtl92du_phy_set_bw_mode(struct ieee80211_hw *hw,
+ enum nl80211_channel_type ch_type);
+u8 rtl92du_phy_sw_chnl(struct ieee80211_hw *hw);
+bool rtl92du_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
+ enum rf_content content,
+ enum radio_path rfpath);
+bool rtl92du_phy_set_rf_power_state(struct ieee80211_hw *hw,
+ enum rf_pwrstate rfpwr_state);
+
+void rtl92du_phy_set_poweron(struct ieee80211_hw *hw);
+bool rtl92du_phy_check_poweroff(struct ieee80211_hw *hw);
+void rtl92du_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t);
+void rtl92du_update_bbrf_configuration(struct ieee80211_hw *hw);
+void rtl92du_phy_ap_calibrate(struct ieee80211_hw *hw, s8 delta);
+void rtl92du_phy_iq_calibrate(struct ieee80211_hw *hw);
+void rtl92du_phy_reload_iqk_setting(struct ieee80211_hw *hw, u8 channel);
+void rtl92du_phy_init_pa_bias(struct ieee80211_hw *hw);
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192du/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/rf.c
new file mode 100644
index 000000000000..044dd65eafd0
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/rf.c
@@ -0,0 +1,240 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2024 Realtek Corporation.*/
+
+#include "../wifi.h"
+#include "../rtl8192d/reg.h"
+#include "../rtl8192d/phy_common.h"
+#include "phy.h"
+#include "rf.h"
+
+bool rtl92du_phy_enable_anotherphy(struct ieee80211_hw *hw, bool bmac0)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = &rtlpriv->rtlhal;
+ u8 mac_on_bit = bmac0 ? MAC1_ON : MAC0_ON;
+ u8 mac_reg = bmac0 ? REG_MAC1 : REG_MAC0;
+ bool bresult = true; /* true: need to enable BB/RF power */
+ u32 maskforphyset = 0;
+ u16 val16;
+ u8 u1btmp;
+
+ rtlhal->during_mac0init_radiob = false;
+ rtlhal->during_mac1init_radioa = false;
+ rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD, "===>\n");
+
+ /* MAC0 Need PHY1 load radio_b.txt . Driver use DBI to write. */
+ u1btmp = rtl_read_byte(rtlpriv, mac_reg);
+ if (!(u1btmp & mac_on_bit)) {
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "enable BB & RF\n");
+ /* Enable BB and RF power */
+
+ maskforphyset = bmac0 ? MAC0_ACCESS_PHY1 : MAC1_ACCESS_PHY0;
+
+ val16 = rtl_read_word(rtlpriv, REG_SYS_FUNC_EN | maskforphyset);
+ val16 &= 0xfffc;
+ rtl_write_word(rtlpriv, REG_SYS_FUNC_EN | maskforphyset, val16);
+
+ val16 = rtl_read_word(rtlpriv, REG_SYS_FUNC_EN | maskforphyset);
+ val16 |= BIT(13) | BIT(0) | BIT(1);
+ rtl_write_word(rtlpriv, REG_SYS_FUNC_EN | maskforphyset, val16);
+ } else {
+ /* We think if MAC1 is ON,then radio_a.txt
+ * and radio_b.txt has been load.
+ */
+ bresult = false;
+ }
+ rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD, "<===\n");
+ return bresult;
+}
+
+void rtl92du_phy_powerdown_anotherphy(struct ieee80211_hw *hw, bool bmac0)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = &rtlpriv->rtlhal;
+ u8 mac_on_bit = bmac0 ? MAC1_ON : MAC0_ON;
+ u8 mac_reg = bmac0 ? REG_MAC1 : REG_MAC0;
+ u32 maskforphyset = 0;
+ u8 u1btmp;
+
+ rtlhal->during_mac0init_radiob = false;
+ rtlhal->during_mac1init_radioa = false;
+ rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD, "====>\n");
+
+ /* check MAC0 enable or not again now, if
+ * enabled, not power down radio A.
+ */
+ u1btmp = rtl_read_byte(rtlpriv, mac_reg);
+ if (!(u1btmp & mac_on_bit)) {
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "power down\n");
+ /* power down RF radio A according to YuNan's advice. */
+ maskforphyset = bmac0 ? MAC0_ACCESS_PHY1 : MAC1_ACCESS_PHY0;
+ rtl_write_dword(rtlpriv, RFPGA0_XA_LSSIPARAMETER | maskforphyset,
+ 0x00000000);
+ }
+ rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD, "<====\n");
+}
+
+bool rtl92du_phy_rf6052_config(struct ieee80211_hw *hw)
+{
+ bool mac1_initradioa_first = false, mac0_initradiob_first = false;
+ bool need_pwrdown_radioa = false, need_pwrdown_radiob = false;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = &rtlpriv->rtlhal;
+ struct rtl_phy *rtlphy = &rtlpriv->phy;
+ struct bb_reg_def *pphyreg;
+ bool true_bpath = false;
+ bool rtstatus = true;
+ u32 u4_regvalue = 0;
+ u8 rfpath;
+
+ if (rtlphy->rf_type == RF_1T1R)
+ rtlphy->num_total_rfpath = 1;
+ else
+ rtlphy->num_total_rfpath = 2;
+
+ /* Single phy mode: use radio_a radio_b config path_A path_B
+ * separately by MAC0, and MAC1 needn't configure RF;
+ * Dual PHY mode: MAC0 use radio_a config 1st phy path_A,
+ * MAC1 use radio_b config 2nd PHY path_A.
+ * DMDP, MAC0 on G band, MAC1 on A band.
+ */
+ if (rtlhal->macphymode == DUALMAC_DUALPHY) {
+ if (rtlhal->current_bandtype == BAND_ON_2_4G &&
+ rtlhal->interfaceindex == 0) {
+ /* MAC0 needs PHY1 load radio_b.txt. */
+ if (rtl92du_phy_enable_anotherphy(hw, true)) {
+ rtlphy->num_total_rfpath = 2;
+ mac0_initradiob_first = true;
+ } else {
+ /* We think if MAC1 is ON,then radio_a.txt and
+ * radio_b.txt has been load.
+ */
+ return rtstatus;
+ }
+ } else if (rtlhal->current_bandtype == BAND_ON_5G &&
+ rtlhal->interfaceindex == 1) {
+ /* MAC1 needs PHY0 load radio_a.txt. */
+ if (rtl92du_phy_enable_anotherphy(hw, false)) {
+ rtlphy->num_total_rfpath = 2;
+ mac1_initradioa_first = true;
+ } else {
+ /* We think if MAC0 is ON, then radio_a.txt and
+ * radio_b.txt has been load.
+ */
+ return rtstatus;
+ }
+ } else if (rtlhal->interfaceindex == 1) {
+ /* MAC0 enabled, only init radia B. */
+ true_bpath = true;
+ }
+ }
+
+ for (rfpath = 0; rfpath < rtlphy->num_total_rfpath; rfpath++) {
+ /* Mac1 use PHY0 write */
+ if (mac1_initradioa_first) {
+ if (rfpath == RF90_PATH_A) {
+ rtlhal->during_mac1init_radioa = true;
+ need_pwrdown_radioa = true;
+ } else if (rfpath == RF90_PATH_B) {
+ rtlhal->during_mac1init_radioa = false;
+ mac1_initradioa_first = false;
+ rfpath = RF90_PATH_A;
+ true_bpath = true;
+ rtlphy->num_total_rfpath = 1;
+ }
+ } else if (mac0_initradiob_first) {
+ /* Mac0 use PHY1 write */
+ if (rfpath == RF90_PATH_A)
+ rtlhal->during_mac0init_radiob = false;
+ if (rfpath == RF90_PATH_B) {
+ rtlhal->during_mac0init_radiob = true;
+ mac0_initradiob_first = false;
+ need_pwrdown_radiob = true;
+ rfpath = RF90_PATH_A;
+ true_bpath = true;
+ rtlphy->num_total_rfpath = 1;
+ }
+ }
+
+ pphyreg = &rtlphy->phyreg_def[rfpath];
+
+ switch (rfpath) {
+ case RF90_PATH_A:
+ case RF90_PATH_C:
+ u4_regvalue = rtl_get_bbreg(hw, pphyreg->rfintfs,
+ BRFSI_RFENV);
+ break;
+ case RF90_PATH_B:
+ case RF90_PATH_D:
+ u4_regvalue = rtl_get_bbreg(hw, pphyreg->rfintfs,
+ BRFSI_RFENV << 16);
+ break;
+ }
+
+ rtl_set_bbreg(hw, pphyreg->rfintfe, BRFSI_RFENV << 16, 0x1);
+ udelay(1);
+ rtl_set_bbreg(hw, pphyreg->rfintfo, BRFSI_RFENV, 0x1);
+ udelay(1);
+
+ /* Set bit number of Address and Data for RF register */
+ rtl_set_bbreg(hw, pphyreg->rfhssi_para2,
+ B3WIREADDRESSLENGTH, 0x0);
+ udelay(1);
+ rtl_set_bbreg(hw, pphyreg->rfhssi_para2, B3WIREDATALENGTH, 0x0);
+ udelay(1);
+
+ switch (rfpath) {
+ case RF90_PATH_A:
+ if (true_bpath)
+ rtstatus = rtl92du_phy_config_rf_with_headerfile(
+ hw, radiob_txt,
+ (enum radio_path)rfpath);
+ else
+ rtstatus = rtl92du_phy_config_rf_with_headerfile(
+ hw, radioa_txt,
+ (enum radio_path)rfpath);
+ break;
+ case RF90_PATH_B:
+ rtstatus =
+ rtl92du_phy_config_rf_with_headerfile(hw, radiob_txt,
+ (enum radio_path)rfpath);
+ break;
+ case RF90_PATH_C:
+ break;
+ case RF90_PATH_D:
+ break;
+ }
+
+ switch (rfpath) {
+ case RF90_PATH_A:
+ case RF90_PATH_C:
+ rtl_set_bbreg(hw, pphyreg->rfintfs, BRFSI_RFENV,
+ u4_regvalue);
+ break;
+ case RF90_PATH_B:
+ case RF90_PATH_D:
+ rtl_set_bbreg(hw, pphyreg->rfintfs, BRFSI_RFENV << 16,
+ u4_regvalue);
+ break;
+ }
+
+ if (!rtstatus) {
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Radio[%d] Fail!!\n", rfpath);
+ return rtstatus;
+ }
+ }
+
+ /* check MAC0 enable or not again, if enabled,
+ * not power down radio A.
+ * check MAC1 enable or not again, if enabled,
+ * not power down radio B.
+ */
+ if (need_pwrdown_radioa)
+ rtl92du_phy_powerdown_anotherphy(hw, false);
+ else if (need_pwrdown_radiob)
+ rtl92du_phy_powerdown_anotherphy(hw, true);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, "<---\n");
+
+ return rtstatus;
+}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192du/rf.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/rf.h
new file mode 100644
index 000000000000..4a92cbdd00c0
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/rf.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2024 Realtek Corporation.*/
+
+#ifndef __RTL92DU_RF_H__
+#define __RTL92DU_RF_H__
+
+bool rtl92du_phy_rf6052_config(struct ieee80211_hw *hw);
+bool rtl92du_phy_enable_anotherphy(struct ieee80211_hw *hw, bool bmac0);
+void rtl92du_phy_powerdown_anotherphy(struct ieee80211_hw *hw, bool bmac0);
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192du/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/sw.c
new file mode 100644
index 000000000000..d069a81ac617
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/sw.c
@@ -0,0 +1,395 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2024 Realtek Corporation.*/
+
+#include "../wifi.h"
+#include "../core.h"
+#include "../usb.h"
+#include "../base.h"
+#include "../rtl8192d/reg.h"
+#include "../rtl8192d/def.h"
+#include "../rtl8192d/fw_common.h"
+#include "../rtl8192d/hw_common.h"
+#include "../rtl8192d/phy_common.h"
+#include "../rtl8192d/trx_common.h"
+#include "phy.h"
+#include "dm.h"
+#include "hw.h"
+#include "trx.h"
+#include "led.h"
+
+#include <linux/module.h>
+
+static struct usb_interface *rtl92du_get_other_intf(struct ieee80211_hw *hw)
+{
+ struct usb_interface *intf;
+ struct usb_device *udev;
+ u8 other_interfaceindex;
+
+ /* See SET_IEEE80211_DEV(hw, &intf->dev); in usb.c */
+ intf = container_of_const(wiphy_dev(hw->wiphy), struct usb_interface, dev);
+
+ if (intf->altsetting[0].desc.bInterfaceNumber == 0)
+ other_interfaceindex = 1;
+ else
+ other_interfaceindex = 0;
+
+ udev = interface_to_usbdev(intf);
+
+ return usb_ifnum_to_if(udev, other_interfaceindex);
+}
+
+static int rtl92du_init_shared_data(struct ieee80211_hw *hw)
+{
+ struct usb_interface *other_intf = rtl92du_get_other_intf(hw);
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_priv *other_rtlpriv = NULL;
+ struct ieee80211_hw *other_hw = NULL;
+
+ if (other_intf)
+ other_hw = usb_get_intfdata(other_intf);
+
+ if (other_hw) {
+ /* The other interface was already probed. */
+ other_rtlpriv = rtl_priv(other_hw);
+ rtlpriv->curveindex_2g = other_rtlpriv->curveindex_2g;
+ rtlpriv->curveindex_5g = other_rtlpriv->curveindex_5g;
+ rtlpriv->mutex_for_power_on_off = other_rtlpriv->mutex_for_power_on_off;
+ rtlpriv->mutex_for_hw_init = other_rtlpriv->mutex_for_hw_init;
+
+ if (!rtlpriv->curveindex_2g || !rtlpriv->curveindex_5g ||
+ !rtlpriv->mutex_for_power_on_off || !rtlpriv->mutex_for_hw_init)
+ return -ENOMEM;
+
+ return 0;
+ }
+
+ /* The other interface doesn't exist or was not probed yet. */
+ rtlpriv->curveindex_2g = kcalloc(TARGET_CHNL_NUM_2G,
+ sizeof(*rtlpriv->curveindex_2g),
+ GFP_KERNEL);
+ rtlpriv->curveindex_5g = kcalloc(TARGET_CHNL_NUM_5G,
+ sizeof(*rtlpriv->curveindex_5g),
+ GFP_KERNEL);
+ rtlpriv->mutex_for_power_on_off =
+ kzalloc(sizeof(*rtlpriv->mutex_for_power_on_off), GFP_KERNEL);
+ rtlpriv->mutex_for_hw_init =
+ kzalloc(sizeof(*rtlpriv->mutex_for_hw_init), GFP_KERNEL);
+
+ if (!rtlpriv->curveindex_2g || !rtlpriv->curveindex_5g ||
+ !rtlpriv->mutex_for_power_on_off || !rtlpriv->mutex_for_hw_init) {
+ kfree(rtlpriv->curveindex_2g);
+ kfree(rtlpriv->curveindex_5g);
+ kfree(rtlpriv->mutex_for_power_on_off);
+ kfree(rtlpriv->mutex_for_hw_init);
+ rtlpriv->curveindex_2g = NULL;
+ rtlpriv->curveindex_5g = NULL;
+ rtlpriv->mutex_for_power_on_off = NULL;
+ rtlpriv->mutex_for_hw_init = NULL;
+ return -ENOMEM;
+ }
+
+ mutex_init(rtlpriv->mutex_for_power_on_off);
+ mutex_init(rtlpriv->mutex_for_hw_init);
+
+ return 0;
+}
+
+static void rtl92du_deinit_shared_data(struct ieee80211_hw *hw)
+{
+ struct usb_interface *other_intf = rtl92du_get_other_intf(hw);
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ if (!other_intf || !usb_get_intfdata(other_intf)) {
+ /* The other interface doesn't exist or was already disconnected. */
+ kfree(rtlpriv->curveindex_2g);
+ kfree(rtlpriv->curveindex_5g);
+ if (rtlpriv->mutex_for_power_on_off)
+ mutex_destroy(rtlpriv->mutex_for_power_on_off);
+ if (rtlpriv->mutex_for_hw_init)
+ mutex_destroy(rtlpriv->mutex_for_hw_init);
+ kfree(rtlpriv->mutex_for_power_on_off);
+ kfree(rtlpriv->mutex_for_hw_init);
+ }
+}
+
+static int rtl92du_init_sw_vars(struct ieee80211_hw *hw)
+{
+ const char *fw_name = "rtlwifi/rtl8192dufw.bin";
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ int err;
+
+ err = rtl92du_init_shared_data(hw);
+ if (err)
+ return err;
+
+ rtlpriv->dm.dm_initialgain_enable = true;
+ rtlpriv->dm.dm_flag = 0;
+ rtlpriv->dm.disable_framebursting = false;
+ rtlpriv->dm.thermalvalue = 0;
+ rtlpriv->dm.useramask = true;
+
+ /* dual mac */
+ if (rtlpriv->rtlhal.current_bandtype == BAND_ON_5G)
+ rtlpriv->phy.current_channel = 36;
+ else
+ rtlpriv->phy.current_channel = 1;
+
+ if (rtlpriv->rtlhal.macphymode != SINGLEMAC_SINGLEPHY)
+ rtlpriv->rtlhal.disable_amsdu_8k = true;
+
+ /* for LPS & IPS */
+ rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps;
+ rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
+ rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps;
+
+ /* for early mode */
+ rtlpriv->rtlhal.earlymode_enable = false;
+
+ /* for firmware buf */
+ rtlpriv->rtlhal.pfirmware = kmalloc(0x8000, GFP_KERNEL);
+ if (!rtlpriv->rtlhal.pfirmware)
+ return -ENOMEM;
+
+ rtlpriv->max_fw_size = 0x8000;
+ pr_info("Driver for Realtek RTL8192DU WLAN interface\n");
+ pr_info("Loading firmware file %s\n", fw_name);
+
+ /* request fw */
+ err = request_firmware_nowait(THIS_MODULE, 1, fw_name,
+ rtlpriv->io.dev, GFP_KERNEL, hw,
+ rtl_fw_cb);
+ if (err) {
+ pr_err("Failed to request firmware!\n");
+ kfree(rtlpriv->rtlhal.pfirmware);
+ rtlpriv->rtlhal.pfirmware = NULL;
+ return err;
+ }
+
+ return 0;
+}
+
+static void rtl92du_deinit_sw_vars(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ kfree(rtlpriv->rtlhal.pfirmware);
+ rtlpriv->rtlhal.pfirmware = NULL;
+
+ rtl92du_deinit_shared_data(hw);
+}
+
+static const struct rtl_hal_ops rtl8192du_hal_ops = {
+ .init_sw_vars = rtl92du_init_sw_vars,
+ .deinit_sw_vars = rtl92du_deinit_sw_vars,
+ .read_chip_version = rtl92du_read_chip_version,
+ .read_eeprom_info = rtl92d_read_eeprom_info,
+ .hw_init = rtl92du_hw_init,
+ .hw_disable = rtl92du_card_disable,
+ .enable_interrupt = rtl92du_enable_interrupt,
+ .disable_interrupt = rtl92du_disable_interrupt,
+ .set_network_type = rtl92du_set_network_type,
+ .set_chk_bssid = rtl92du_set_check_bssid,
+ .set_qos = rtl92d_set_qos,
+ .set_bcn_reg = rtl92du_set_beacon_related_registers,
+ .set_bcn_intv = rtl92du_set_beacon_interval,
+ .update_interrupt_mask = rtl92du_update_interrupt_mask,
+ .get_hw_reg = rtl92du_get_hw_reg,
+ .set_hw_reg = rtl92du_set_hw_reg,
+ .update_rate_tbl = rtl92d_update_hal_rate_tbl,
+ .fill_tx_desc = rtl92du_tx_fill_desc,
+ .query_rx_desc = rtl92d_rx_query_desc,
+ .set_channel_access = rtl92d_update_channel_access_setting,
+ .radio_onoff_checking = rtl92d_gpio_radio_on_off_checking,
+ .set_bw_mode = rtl92du_phy_set_bw_mode,
+ .switch_channel = rtl92du_phy_sw_chnl,
+ .dm_watchdog = rtl92du_dm_watchdog,
+ .scan_operation_backup = rtl_phy_scan_operation_backup,
+ .set_rf_power_state = rtl92du_phy_set_rf_power_state,
+ .led_control = rtl92du_led_control,
+ .set_desc = rtl92d_set_desc,
+ .get_desc = rtl92d_get_desc,
+ .enable_hw_sec = rtl92d_enable_hw_security_config,
+ .set_key = rtl92d_set_key,
+ .get_bbreg = rtl92du_phy_query_bb_reg,
+ .set_bbreg = rtl92du_phy_set_bb_reg,
+ .get_rfreg = rtl92d_phy_query_rf_reg,
+ .set_rfreg = rtl92d_phy_set_rf_reg,
+ .linked_set_reg = rtl92du_linked_set_reg,
+ .fill_h2c_cmd = rtl92d_fill_h2c_cmd,
+ .get_btc_status = rtl_btc_status_false,
+ .phy_iq_calibrate = rtl92du_phy_iq_calibrate,
+ .phy_lc_calibrate = rtl92du_phy_lc_calibrate,
+};
+
+static struct rtl_mod_params rtl92du_mod_params = {
+ .sw_crypto = false,
+ .inactiveps = false,
+ .swctrl_lps = false,
+ .debug_level = 0,
+ .debug_mask = 0,
+};
+
+static const struct rtl_hal_usbint_cfg rtl92du_interface_cfg = {
+ /* rx */
+ .rx_urb_num = 8,
+ .rx_max_size = 15360,
+ .usb_rx_hdl = NULL,
+ .usb_rx_segregate_hdl = NULL,
+ /* tx */
+ .usb_tx_cleanup = rtl92du_tx_cleanup,
+ .usb_tx_post_hdl = rtl92du_tx_post_hdl,
+ .usb_tx_aggregate_hdl = rtl92du_tx_aggregate_hdl,
+ .usb_endpoint_mapping = rtl92du_endpoint_mapping,
+ .usb_mq_to_hwq = rtl92du_mq_to_hwq,
+};
+
+static const struct rtl_hal_cfg rtl92du_hal_cfg = {
+ .name = "rtl8192du",
+ .ops = &rtl8192du_hal_ops,
+ .mod_params = &rtl92du_mod_params,
+ .usb_interface_cfg = &rtl92du_interface_cfg,
+
+ .maps[SYS_ISO_CTRL] = REG_SYS_ISO_CTRL,
+ .maps[SYS_FUNC_EN] = REG_SYS_FUNC_EN,
+ .maps[SYS_CLK] = REG_SYS_CLKR,
+ .maps[MAC_RCR_AM] = RCR_AM,
+ .maps[MAC_RCR_AB] = RCR_AB,
+ .maps[MAC_RCR_ACRC32] = RCR_ACRC32,
+ .maps[MAC_RCR_ACF] = RCR_ACF,
+ .maps[MAC_RCR_AAP] = RCR_AAP,
+
+ .maps[EFUSE_TEST] = REG_EFUSE_TEST,
+ .maps[EFUSE_ACCESS] = REG_EFUSE_ACCESS,
+ .maps[EFUSE_CTRL] = REG_EFUSE_CTRL,
+ .maps[EFUSE_CLK] = 0, /* just for 92se */
+ .maps[EFUSE_CLK_CTRL] = REG_EFUSE_CTRL,
+ .maps[EFUSE_PWC_EV12V] = PWC_EV12V,
+ .maps[EFUSE_FEN_ELDR] = FEN_ELDR,
+ .maps[EFUSE_LOADER_CLK_EN] = 0,
+ .maps[EFUSE_ANA8M] = 0, /* just for 92se */
+ .maps[EFUSE_HWSET_MAX_SIZE] = HWSET_MAX_SIZE,
+ .maps[EFUSE_MAX_SECTION_MAP] = EFUSE_MAX_SECTION,
+ .maps[EFUSE_REAL_CONTENT_SIZE] = EFUSE_REAL_CONTENT_LEN,
+
+ .maps[RWCAM] = REG_CAMCMD,
+ .maps[WCAMI] = REG_CAMWRITE,
+ .maps[RCAMO] = REG_CAMREAD,
+ .maps[CAMDBG] = REG_CAMDBG,
+ .maps[SECR] = REG_SECCFG,
+ .maps[SEC_CAM_NONE] = CAM_NONE,
+ .maps[SEC_CAM_WEP40] = CAM_WEP40,
+ .maps[SEC_CAM_TKIP] = CAM_TKIP,
+ .maps[SEC_CAM_AES] = CAM_AES,
+ .maps[SEC_CAM_WEP104] = CAM_WEP104,
+
+ .maps[RTL_IMR_BCNDMAINT6] = IMR_BCNDMAINT6,
+ .maps[RTL_IMR_BCNDMAINT5] = IMR_BCNDMAINT5,
+ .maps[RTL_IMR_BCNDMAINT4] = IMR_BCNDMAINT4,
+ .maps[RTL_IMR_BCNDMAINT3] = IMR_BCNDMAINT3,
+ .maps[RTL_IMR_BCNDMAINT2] = IMR_BCNDMAINT2,
+ .maps[RTL_IMR_BCNDMAINT1] = IMR_BCNDMAINT1,
+ .maps[RTL_IMR_BCNDOK8] = IMR_BCNDOK8,
+ .maps[RTL_IMR_BCNDOK7] = IMR_BCNDOK7,
+ .maps[RTL_IMR_BCNDOK6] = IMR_BCNDOK6,
+ .maps[RTL_IMR_BCNDOK5] = IMR_BCNDOK5,
+ .maps[RTL_IMR_BCNDOK4] = IMR_BCNDOK4,
+ .maps[RTL_IMR_BCNDOK3] = IMR_BCNDOK3,
+ .maps[RTL_IMR_BCNDOK2] = IMR_BCNDOK2,
+ .maps[RTL_IMR_BCNDOK1] = IMR_BCNDOK1,
+ .maps[RTL_IMR_TIMEOUT2] = IMR_TIMEOUT2,
+ .maps[RTL_IMR_TIMEOUT1] = IMR_TIMEOUT1,
+
+ .maps[RTL_IMR_TXFOVW] = IMR_TXFOVW,
+ .maps[RTL_IMR_PSTIMEOUT] = IMR_PSTIMEOUT,
+ .maps[RTL_IMR_BCNINT] = IMR_BCNINT,
+ .maps[RTL_IMR_RXFOVW] = IMR_RXFOVW,
+ .maps[RTL_IMR_RDU] = IMR_RDU,
+ .maps[RTL_IMR_ATIMEND] = IMR_ATIMEND,
+ .maps[RTL_IMR_BDOK] = IMR_BDOK,
+ .maps[RTL_IMR_MGNTDOK] = IMR_MGNTDOK,
+ .maps[RTL_IMR_TBDER] = IMR_TBDER,
+ .maps[RTL_IMR_HIGHDOK] = IMR_HIGHDOK,
+ .maps[RTL_IMR_TBDOK] = IMR_TBDOK,
+ .maps[RTL_IMR_BKDOK] = IMR_BKDOK,
+ .maps[RTL_IMR_BEDOK] = IMR_BEDOK,
+ .maps[RTL_IMR_VIDOK] = IMR_VIDOK,
+ .maps[RTL_IMR_VODOK] = IMR_VODOK,
+ .maps[RTL_IMR_ROK] = IMR_ROK,
+ .maps[RTL_IBSS_INT_MASKS] = (IMR_BCNINT | IMR_TBDOK | IMR_TBDER),
+
+ .maps[RTL_RC_CCK_RATE1M] = DESC_RATE1M,
+ .maps[RTL_RC_CCK_RATE2M] = DESC_RATE2M,
+ .maps[RTL_RC_CCK_RATE5_5M] = DESC_RATE5_5M,
+ .maps[RTL_RC_CCK_RATE11M] = DESC_RATE11M,
+ .maps[RTL_RC_OFDM_RATE6M] = DESC_RATE6M,
+ .maps[RTL_RC_OFDM_RATE9M] = DESC_RATE9M,
+ .maps[RTL_RC_OFDM_RATE12M] = DESC_RATE12M,
+ .maps[RTL_RC_OFDM_RATE18M] = DESC_RATE18M,
+ .maps[RTL_RC_OFDM_RATE24M] = DESC_RATE24M,
+ .maps[RTL_RC_OFDM_RATE36M] = DESC_RATE36M,
+ .maps[RTL_RC_OFDM_RATE48M] = DESC_RATE48M,
+ .maps[RTL_RC_OFDM_RATE54M] = DESC_RATE54M,
+
+ .maps[RTL_RC_HT_RATEMCS7] = DESC_RATEMCS7,
+ .maps[RTL_RC_HT_RATEMCS15] = DESC_RATEMCS15,
+};
+
+module_param_named(swenc, rtl92du_mod_params.sw_crypto, bool, 0444);
+module_param_named(debug_level, rtl92du_mod_params.debug_level, int, 0644);
+module_param_named(ips, rtl92du_mod_params.inactiveps, bool, 0444);
+module_param_named(swlps, rtl92du_mod_params.swctrl_lps, bool, 0444);
+module_param_named(debug_mask, rtl92du_mod_params.debug_mask, ullong, 0644);
+MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n");
+MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 0)\n");
+MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n");
+MODULE_PARM_DESC(debug_level, "Set debug level (0-5) (default 0)");
+MODULE_PARM_DESC(debug_mask, "Set debug mask (default 0)");
+
+#define USB_VENDOR_ID_REALTEK 0x0bda
+
+static const struct usb_device_id rtl8192d_usb_ids[] = {
+ {RTL_USB_DEVICE(USB_VENDOR_ID_REALTEK, 0x8193, rtl92du_hal_cfg)},
+ {RTL_USB_DEVICE(USB_VENDOR_ID_REALTEK, 0x8194, rtl92du_hal_cfg)},
+ {RTL_USB_DEVICE(USB_VENDOR_ID_REALTEK, 0x8111, rtl92du_hal_cfg)},
+ {RTL_USB_DEVICE(USB_VENDOR_ID_REALTEK, 0x0193, rtl92du_hal_cfg)},
+ {RTL_USB_DEVICE(USB_VENDOR_ID_REALTEK, 0x8171, rtl92du_hal_cfg)},
+ {RTL_USB_DEVICE(USB_VENDOR_ID_REALTEK, 0xe194, rtl92du_hal_cfg)},
+ {RTL_USB_DEVICE(0x2019, 0xab2c, rtl92du_hal_cfg)},
+ {RTL_USB_DEVICE(0x2019, 0xab2d, rtl92du_hal_cfg)},
+ {RTL_USB_DEVICE(0x2019, 0x4903, rtl92du_hal_cfg)},
+ {RTL_USB_DEVICE(0x2019, 0x4904, rtl92du_hal_cfg)},
+ {RTL_USB_DEVICE(0x07b8, 0x8193, rtl92du_hal_cfg)},
+ {RTL_USB_DEVICE(0x20f4, 0x664b, rtl92du_hal_cfg)},
+ {RTL_USB_DEVICE(0x04dd, 0x954f, rtl92du_hal_cfg)},
+ {RTL_USB_DEVICE(0x04dd, 0x96a6, rtl92du_hal_cfg)},
+ {RTL_USB_DEVICE(0x050d, 0x110a, rtl92du_hal_cfg)},
+ {RTL_USB_DEVICE(0x050d, 0x1105, rtl92du_hal_cfg)},
+ {RTL_USB_DEVICE(0x050d, 0x120a, rtl92du_hal_cfg)},
+ {RTL_USB_DEVICE(0x1668, 0x8102, rtl92du_hal_cfg)},
+ {RTL_USB_DEVICE(0x0930, 0x0a0a, rtl92du_hal_cfg)},
+ {RTL_USB_DEVICE(0x2001, 0x330c, rtl92du_hal_cfg)},
+ {}
+};
+
+MODULE_DEVICE_TABLE(usb, rtl8192d_usb_ids);
+
+static int rtl8192du_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ return rtl_usb_probe(intf, id, &rtl92du_hal_cfg);
+}
+
+static struct usb_driver rtl8192du_driver = {
+ .name = "rtl8192du",
+ .probe = rtl8192du_probe,
+ .disconnect = rtl_usb_disconnect,
+ .id_table = rtl8192d_usb_ids,
+ .disable_hub_initiated_lpm = 1,
+};
+
+module_usb_driver(rtl8192du_driver);
+
+MODULE_AUTHOR("Bitterblue Smith <rtl8821cerfe2@gmail.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Realtek 8192DU 802.11n Dual Mac USB wireless");
+MODULE_FIRMWARE("rtlwifi/rtl8192dufw.bin");
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192du/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/table.c
new file mode 100644
index 000000000000..036701433d85
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/table.c
@@ -0,0 +1,1675 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2024 Realtek Corporation.*/
+
+#include <linux/types.h>
+
+#include "table.h"
+
+const u32 rtl8192du_phy_reg_2tarray[PHY_REG_2T_ARRAYLENGTH] = {
+ 0x800, 0x80040002,
+ 0x804, 0x00000003,
+ 0x808, 0x0000fc00,
+ 0x80c, 0x0000000a,
+ 0x810, 0x10001331,
+ 0x814, 0x020c3d10,
+ 0x818, 0x02200385,
+ 0x81c, 0x00000000,
+ 0x820, 0x01000100,
+ 0x824, 0x00390004,
+ 0x828, 0x01000100,
+ 0x82c, 0x00390004,
+ 0x830, 0x27272727,
+ 0x834, 0x27272727,
+ 0x838, 0x27272727,
+ 0x83c, 0x27272727,
+ 0x840, 0x00010000,
+ 0x844, 0x00010000,
+ 0x848, 0x27272727,
+ 0x84c, 0x27272727,
+ 0x850, 0x00000000,
+ 0x854, 0x00000000,
+ 0x858, 0x569a569a,
+ 0x85c, 0x0c1b25a4,
+ 0x860, 0x66e60250,
+ 0x864, 0x061f0150,
+ 0x868, 0x27272727,
+ 0x86c, 0x272b2b2b,
+ 0x870, 0x07000700,
+ 0x874, 0x22188000,
+ 0x878, 0x08080808,
+ 0x87c, 0x0001fff8,
+ 0x880, 0xc0083070,
+ 0x884, 0x00000cd5,
+ 0x888, 0x00000000,
+ 0x88c, 0xcc0000c0,
+ 0x890, 0x00000800,
+ 0x894, 0xfffffffe,
+ 0x898, 0x40302010,
+ 0x89c, 0x00706050,
+ 0x900, 0x00000000,
+ 0x904, 0x00000023,
+ 0x908, 0x00000000,
+ 0x90c, 0x81121313,
+ 0xa00, 0x00d047c8,
+ 0xa04, 0x80ff000c,
+ 0xa08, 0x8c8a8300,
+ 0xa0c, 0x2e68120f,
+ 0xa10, 0x9500bb78,
+ 0xa14, 0x11144028,
+ 0xa18, 0x00881117,
+ 0xa1c, 0x89140f00,
+ 0xa20, 0x1a1b0000,
+ 0xa24, 0x090e1317,
+ 0xa28, 0x00000204,
+ 0xa2c, 0x00d30000,
+ 0xa70, 0x101fff00,
+ 0xa74, 0x00000007,
+ 0xc00, 0x40071d40,
+ 0xc04, 0x03a05633,
+ 0xc08, 0x001000e4,
+ 0xc0c, 0x6c6c6c6c,
+ 0xc10, 0x08800000,
+ 0xc14, 0x40000100,
+ 0xc18, 0x08800000,
+ 0xc1c, 0x40000100,
+ 0xc20, 0x00000000,
+ 0xc24, 0x00000000,
+ 0xc28, 0x00000000,
+ 0xc2c, 0x00000000,
+ 0xc30, 0x69e9ac44,
+ 0xc34, 0x469652af,
+ 0xc38, 0x49795994,
+ 0xc3c, 0x0a979718,
+ 0xc40, 0x1f7c403f,
+ 0xc44, 0x000100b7,
+ 0xc48, 0xec020107,
+ 0xc4c, 0x007f037f,
+ 0xc50, 0x69543420,
+ 0xc54, 0x43bc009e,
+ 0xc58, 0x69543420,
+ 0xc5c, 0x433c00a8,
+ 0xc60, 0x00000000,
+ 0xc64, 0x7112848b,
+ 0xc68, 0x47c00bff,
+ 0xc6c, 0x00000036,
+ 0xc70, 0x2c7f000d,
+ 0xc74, 0x258610db,
+ 0xc78, 0x0000001f,
+ 0xc7c, 0x40b95612,
+ 0xc80, 0x40000100,
+ 0xc84, 0x20f60000,
+ 0xc88, 0x40000100,
+ 0xc8c, 0xa0e40000,
+ 0xc90, 0x00121820,
+ 0xc94, 0x00000007,
+ 0xc98, 0x00121820,
+ 0xc9c, 0x00007f7f,
+ 0xca0, 0x00000000,
+ 0xca4, 0x00000080,
+ 0xca8, 0x00000000,
+ 0xcac, 0x00000000,
+ 0xcb0, 0x00000000,
+ 0xcb4, 0x00000000,
+ 0xcb8, 0x00000000,
+ 0xcbc, 0x28000000,
+ 0xcc0, 0x00000000,
+ 0xcc4, 0x00000000,
+ 0xcc8, 0x00000000,
+ 0xccc, 0x00000000,
+ 0xcd0, 0x00000000,
+ 0xcd4, 0x00000000,
+ 0xcd8, 0x64b11e20,
+ 0xcdc, 0xe0767533,
+ 0xce0, 0x00222222,
+ 0xce4, 0x00000000,
+ 0xce8, 0x37644302,
+ 0xcec, 0x2f97d40c,
+ 0xd00, 0x00080740,
+ 0xd04, 0x00020403,
+ 0xd08, 0x0000907f,
+ 0xd0c, 0x20010201,
+ 0xd10, 0xa0633333,
+ 0xd14, 0x3333bc43,
+ 0xd18, 0x7a8f5b6b,
+ 0xd2c, 0xcc979975,
+ 0xd30, 0x00000000,
+ 0xd34, 0x80608404,
+ 0xd38, 0x00000000,
+ 0xd3c, 0x00027353,
+ 0xd40, 0x00000000,
+ 0xd44, 0x00000000,
+ 0xd48, 0x00000000,
+ 0xd4c, 0x00000000,
+ 0xd50, 0x6437140a,
+ 0xd54, 0x00000000,
+ 0xd58, 0x00000000,
+ 0xd5c, 0x30032064,
+ 0xd60, 0x4653de68,
+ 0xd64, 0x04518a3c,
+ 0xd68, 0x00002101,
+ 0xd6c, 0x2a201c16,
+ 0xd70, 0x1812362e,
+ 0xd74, 0x322c2220,
+ 0xd78, 0x000e3c24,
+ 0xe00, 0x2a2a2a2a,
+ 0xe04, 0x2a2a2a2a,
+ 0xe08, 0x03902a2a,
+ 0xe10, 0x2a2a2a2a,
+ 0xe14, 0x2a2a2a2a,
+ 0xe18, 0x2a2a2a2a,
+ 0xe1c, 0x2a2a2a2a,
+ 0xe28, 0x00000000,
+ 0xe30, 0x1000dc1f,
+ 0xe34, 0x10008c1f,
+ 0xe38, 0x02140102,
+ 0xe3c, 0x681604c2,
+ 0xe40, 0x01007c00,
+ 0xe44, 0x01004800,
+ 0xe48, 0xfb000000,
+ 0xe4c, 0x000028d1,
+ 0xe50, 0x1000dc1f,
+ 0xe54, 0x10008c1f,
+ 0xe58, 0x02140102,
+ 0xe5c, 0x28160d05,
+ 0xe60, 0x00000010,
+ 0xe68, 0x001b25a4,
+ 0xe6c, 0x63db25a4,
+ 0xe70, 0x63db25a4,
+ 0xe74, 0x0c126da4,
+ 0xe78, 0x0c126da4,
+ 0xe7c, 0x0c126da4,
+ 0xe80, 0x0c126da4,
+ 0xe84, 0x63db25a4,
+ 0xe88, 0x0c126da4,
+ 0xe8c, 0x63db25a4,
+ 0xed0, 0x63db25a4,
+ 0xed4, 0x63db25a4,
+ 0xed8, 0x63db25a4,
+ 0xedc, 0x001b25a4,
+ 0xee0, 0x001b25a4,
+ 0xeec, 0x6fdb25a4,
+ 0xf14, 0x00000003,
+ 0xf1c, 0x00000064,
+ 0xf4c, 0x00000004,
+ 0xf00, 0x00000300,
+};
+
+const u32 rtl8192du_phy_reg_array_pg[PHY_REG_ARRAY_PG_LENGTH] = {
+ 0xe00, 0xffffffff, 0x07090c0c,
+ 0xe04, 0xffffffff, 0x01020405,
+ 0xe08, 0x0000ff00, 0x00000000,
+ 0x86c, 0xffffff00, 0x00000000,
+ 0xe10, 0xffffffff, 0x0b0c0c0e,
+ 0xe14, 0xffffffff, 0x01030506,
+ 0xe18, 0xffffffff, 0x0b0c0d0e,
+ 0xe1c, 0xffffffff, 0x01030509,
+ 0x830, 0xffffffff, 0x07090c0c,
+ 0x834, 0xffffffff, 0x01020405,
+ 0x838, 0xffffff00, 0x00000000,
+ 0x86c, 0x000000ff, 0x00000000,
+ 0x83c, 0xffffffff, 0x0b0c0c0e,
+ 0x848, 0xffffffff, 0x01030506,
+ 0x84c, 0xffffffff, 0x0b0c0d0e,
+ 0x868, 0xffffffff, 0x01030509,
+ 0xe00, 0xffffffff, 0x00000000,
+ 0xe04, 0xffffffff, 0x00000000,
+ 0xe08, 0x0000ff00, 0x00000000,
+ 0x86c, 0xffffff00, 0x00000000,
+ 0xe10, 0xffffffff, 0x00000000,
+ 0xe14, 0xffffffff, 0x00000000,
+ 0xe18, 0xffffffff, 0x00000000,
+ 0xe1c, 0xffffffff, 0x00000000,
+ 0x830, 0xffffffff, 0x00000000,
+ 0x834, 0xffffffff, 0x00000000,
+ 0x838, 0xffffff00, 0x00000000,
+ 0x86c, 0x000000ff, 0x00000000,
+ 0x83c, 0xffffffff, 0x00000000,
+ 0x848, 0xffffffff, 0x00000000,
+ 0x84c, 0xffffffff, 0x00000000,
+ 0x868, 0xffffffff, 0x00000000,
+ 0xe00, 0xffffffff, 0x04040404,
+ 0xe04, 0xffffffff, 0x00020204,
+ 0xe08, 0x0000ff00, 0x00000000,
+ 0x86c, 0xffffff00, 0x00000000,
+ 0xe10, 0xffffffff, 0x06060606,
+ 0xe14, 0xffffffff, 0x00020406,
+ 0xe18, 0xffffffff, 0x00000000,
+ 0xe1c, 0xffffffff, 0x00000000,
+ 0x830, 0xffffffff, 0x04040404,
+ 0x834, 0xffffffff, 0x00020204,
+ 0x838, 0xffffff00, 0x00000000,
+ 0x86c, 0x000000ff, 0x00000000,
+ 0x83c, 0xffffffff, 0x06060606,
+ 0x848, 0xffffffff, 0x00020406,
+ 0x84c, 0xffffffff, 0x00000000,
+ 0x868, 0xffffffff, 0x00000000,
+ 0xe00, 0xffffffff, 0x00000000,
+ 0xe04, 0xffffffff, 0x00000000,
+ 0xe08, 0x0000ff00, 0x00000000,
+ 0x86c, 0xffffff00, 0x00000000,
+ 0xe10, 0xffffffff, 0x00000000,
+ 0xe14, 0xffffffff, 0x00000000,
+ 0xe18, 0xffffffff, 0x00000000,
+ 0xe1c, 0xffffffff, 0x00000000,
+ 0x830, 0xffffffff, 0x00000000,
+ 0x834, 0xffffffff, 0x00000000,
+ 0x838, 0xffffff00, 0x00000000,
+ 0x86c, 0x000000ff, 0x00000000,
+ 0x83c, 0xffffffff, 0x00000000,
+ 0x848, 0xffffffff, 0x00000000,
+ 0x84c, 0xffffffff, 0x00000000,
+ 0x868, 0xffffffff, 0x00000000,
+ 0xe00, 0xffffffff, 0x00000000,
+ 0xe04, 0xffffffff, 0x00000000,
+ 0xe08, 0x0000ff00, 0x00000000,
+ 0x86c, 0xffffff00, 0x00000000,
+ 0xe10, 0xffffffff, 0x00000000,
+ 0xe14, 0xffffffff, 0x00000000,
+ 0xe18, 0xffffffff, 0x00000000,
+ 0xe1c, 0xffffffff, 0x00000000,
+ 0x830, 0xffffffff, 0x00000000,
+ 0x834, 0xffffffff, 0x00000000,
+ 0x838, 0xffffff00, 0x00000000,
+ 0x86c, 0x000000ff, 0x00000000,
+ 0x83c, 0xffffffff, 0x00000000,
+ 0x848, 0xffffffff, 0x00000000,
+ 0x84c, 0xffffffff, 0x00000000,
+ 0x868, 0xffffffff, 0x00000000,
+ 0xe00, 0xffffffff, 0x04040404,
+ 0xe04, 0xffffffff, 0x00020204,
+ 0xe08, 0x0000ff00, 0x00000000,
+ 0x86c, 0xffffff00, 0x00000000,
+ 0xe10, 0xffffffff, 0x00000000,
+ 0xe14, 0xffffffff, 0x00000000,
+ 0xe18, 0xffffffff, 0x00000000,
+ 0xe1c, 0xffffffff, 0x00000000,
+ 0x830, 0xffffffff, 0x04040404,
+ 0x834, 0xffffffff, 0x00020204,
+ 0x838, 0xffffff00, 0x00000000,
+ 0x86c, 0x000000ff, 0x00000000,
+ 0x83c, 0xffffffff, 0x00000000,
+ 0x848, 0xffffffff, 0x00000000,
+ 0x84c, 0xffffffff, 0x00000000,
+ 0x868, 0xffffffff, 0x00000000,
+ 0xe00, 0xffffffff, 0x00000000,
+ 0xe04, 0xffffffff, 0x00000000,
+ 0xe08, 0x0000ff00, 0x00000000,
+ 0x86c, 0xffffff00, 0x00000000,
+ 0xe10, 0xffffffff, 0x00000000,
+ 0xe14, 0xffffffff, 0x00000000,
+ 0xe18, 0xffffffff, 0x00000000,
+ 0xe1c, 0xffffffff, 0x00000000,
+ 0x830, 0xffffffff, 0x00000000,
+ 0x834, 0xffffffff, 0x00000000,
+ 0x838, 0xffffff00, 0x00000000,
+ 0x86c, 0x000000ff, 0x00000000,
+ 0x83c, 0xffffffff, 0x00000000,
+ 0x848, 0xffffffff, 0x00000000,
+ 0x84c, 0xffffffff, 0x00000000,
+ 0x868, 0xffffffff, 0x00000000,
+ 0xe00, 0xffffffff, 0x04040404,
+ 0xe04, 0xffffffff, 0x00020204,
+ 0xe08, 0x0000ff00, 0x00000000,
+ 0x86c, 0xffffff00, 0x00000000,
+ 0xe10, 0xffffffff, 0x08080808,
+ 0xe14, 0xffffffff, 0x00040408,
+ 0xe18, 0xffffffff, 0x00000000,
+ 0xe1c, 0xffffffff, 0x00000000,
+ 0x830, 0xffffffff, 0x04040404,
+ 0x834, 0xffffffff, 0x00020204,
+ 0x838, 0xffffff00, 0x00000000,
+ 0x86c, 0x000000ff, 0x00000000,
+ 0x83c, 0xffffffff, 0x08080808,
+ 0x848, 0xffffffff, 0x00040408,
+ 0x84c, 0xffffffff, 0x00000000,
+ 0x868, 0xffffffff, 0x00000000,
+ 0xe00, 0xffffffff, 0x04040404,
+ 0xe04, 0xffffffff, 0x00020204,
+ 0xe08, 0x0000ff00, 0x00000000,
+ 0x86c, 0xffffff00, 0x00000000,
+ 0xe10, 0xffffffff, 0x08080808,
+ 0xe14, 0xffffffff, 0x00040408,
+ 0xe18, 0xffffffff, 0x00000000,
+ 0xe1c, 0xffffffff, 0x00000000,
+ 0x830, 0xffffffff, 0x04040404,
+ 0x834, 0xffffffff, 0x00020204,
+ 0x838, 0xffffff00, 0x00000000,
+ 0x86c, 0x000000ff, 0x00000000,
+ 0x83c, 0xffffffff, 0x08080808,
+ 0x848, 0xffffffff, 0x00040408,
+ 0x84c, 0xffffffff, 0x00000000,
+ 0x868, 0xffffffff, 0x00000000,
+ 0xe00, 0xffffffff, 0x04040404,
+ 0xe04, 0xffffffff, 0x00020204,
+ 0xe08, 0x0000ff00, 0x00000000,
+ 0x86c, 0xffffff00, 0x00000000,
+ 0xe10, 0xffffffff, 0x08080808,
+ 0xe14, 0xffffffff, 0x00040408,
+ 0xe18, 0xffffffff, 0x00000000,
+ 0xe1c, 0xffffffff, 0x00000000,
+ 0x830, 0xffffffff, 0x04040404,
+ 0x834, 0xffffffff, 0x00020204,
+ 0x838, 0xffffff00, 0x00000000,
+ 0x86c, 0x000000ff, 0x00000000,
+ 0x83c, 0xffffffff, 0x08080808,
+ 0x848, 0xffffffff, 0x00040408,
+ 0x84c, 0xffffffff, 0x00000000,
+ 0x868, 0xffffffff, 0x00000000,
+ 0xe00, 0xffffffff, 0x04040404,
+ 0xe04, 0xffffffff, 0x00020204,
+ 0xe08, 0x0000ff00, 0x00000000,
+ 0x86c, 0xffffff00, 0x00000000,
+ 0xe10, 0xffffffff, 0x08080808,
+ 0xe14, 0xffffffff, 0x00040408,
+ 0xe18, 0xffffffff, 0x00000000,
+ 0xe1c, 0xffffffff, 0x00000000,
+ 0x830, 0xffffffff, 0x04040404,
+ 0x834, 0xffffffff, 0x00020204,
+ 0x838, 0xffffff00, 0x00000000,
+ 0x86c, 0x000000ff, 0x00000000,
+ 0x83c, 0xffffffff, 0x08080808,
+ 0x848, 0xffffffff, 0x00040408,
+ 0x84c, 0xffffffff, 0x00000000,
+ 0x868, 0xffffffff, 0x00000000,
+ 0xe00, 0xffffffff, 0x04040404,
+ 0xe04, 0xffffffff, 0x00020204,
+ 0xe08, 0x0000ff00, 0x00000000,
+ 0x86c, 0xffffff00, 0x00000000,
+ 0xe10, 0xffffffff, 0x08080808,
+ 0xe14, 0xffffffff, 0x00040408,
+ 0xe18, 0xffffffff, 0x00000000,
+ 0xe1c, 0xffffffff, 0x00000000,
+ 0x830, 0xffffffff, 0x04040404,
+ 0x834, 0xffffffff, 0x00020204,
+ 0x838, 0xffffff00, 0x00000000,
+ 0x86c, 0x000000ff, 0x00000000,
+ 0x83c, 0xffffffff, 0x08080808,
+ 0x848, 0xffffffff, 0x00040408,
+ 0x84c, 0xffffffff, 0x00000000,
+ 0x868, 0xffffffff, 0x00000000,
+ 0xe00, 0xffffffff, 0x04040404,
+ 0xe04, 0xffffffff, 0x00020204,
+ 0xe08, 0x0000ff00, 0x00000000,
+ 0x86c, 0xffffff00, 0x00000000,
+ 0xe10, 0xffffffff, 0x08080808,
+ 0xe14, 0xffffffff, 0x00040408,
+ 0xe18, 0xffffffff, 0x00000000,
+ 0xe1c, 0xffffffff, 0x00000000,
+ 0x830, 0xffffffff, 0x04040404,
+ 0x834, 0xffffffff, 0x00020204,
+ 0x838, 0xffffff00, 0x00000000,
+ 0x86c, 0x000000ff, 0x00000000,
+ 0x83c, 0xffffffff, 0x08080808,
+ 0x848, 0xffffffff, 0x00040408,
+ 0x84c, 0xffffffff, 0x00000000,
+ 0x868, 0xffffffff, 0x00000000,
+};
+
+const u32 rtl8192du_radioa_2tarray[RADIOA_2T_ARRAYLENGTH] = {
+ 0x000, 0x00030000,
+ 0x001, 0x00030000,
+ 0x002, 0x00000000,
+ 0x003, 0x00018c63,
+ 0x004, 0x00018c63,
+ 0x008, 0x00084000,
+ 0x00b, 0x0001c000,
+ 0x00e, 0x00018c67,
+ 0x00f, 0x00000851,
+ 0x014, 0x00021440,
+ 0x018, 0x00017524,
+ 0x019, 0x00000000,
+ 0x01d, 0x000a1290,
+ 0x023, 0x00001558,
+ 0x01a, 0x00030a99,
+ 0x01b, 0x00040b00,
+ 0x01c, 0x000fc339,
+ 0x03a, 0x000a57eb,
+ 0x03b, 0x00020000,
+ 0x03c, 0x000ff454,
+ 0x020, 0x0000aa52,
+ 0x021, 0x00054000,
+ 0x040, 0x0000aa52,
+ 0x041, 0x00014000,
+ 0x025, 0x000803be,
+ 0x026, 0x000fc638,
+ 0x027, 0x00077c18,
+ 0x028, 0x000de471,
+ 0x029, 0x000d7110,
+ 0x02a, 0x0008cb04,
+ 0x02b, 0x0004128b,
+ 0x02c, 0x00001840,
+ 0x043, 0x0002444f,
+ 0x044, 0x0001adb0,
+ 0x045, 0x00056467,
+ 0x046, 0x0008992c,
+ 0x047, 0x0000452c,
+ 0x048, 0x000f9c43,
+ 0x049, 0x00002e0c,
+ 0x04a, 0x000546eb,
+ 0x04b, 0x0008966c,
+ 0x04c, 0x0000dde9,
+ 0x018, 0x00007401,
+ 0x000, 0x00070000,
+ 0x012, 0x000dc000,
+ 0x012, 0x00090000,
+ 0x012, 0x00051000,
+ 0x012, 0x00012000,
+ 0x013, 0x000287b7,
+ 0x013, 0x000247ab,
+ 0x013, 0x0002079f,
+ 0x013, 0x0001c793,
+ 0x013, 0x0001839b,
+ 0x013, 0x00014392,
+ 0x013, 0x0001019a,
+ 0x013, 0x0000c191,
+ 0x013, 0x00008194,
+ 0x013, 0x000040a0,
+ 0x013, 0x00000018,
+ 0x015, 0x0000f424,
+ 0x015, 0x0004f424,
+ 0x015, 0x0008f424,
+ 0x016, 0x000e1330,
+ 0x016, 0x000a1330,
+ 0x016, 0x00061330,
+ 0x016, 0x00021330,
+ 0x018, 0x00017524,
+ 0x000, 0x00070000,
+ 0x012, 0x000cf000,
+ 0x012, 0x000bc000,
+ 0x012, 0x00078000,
+ 0x012, 0x00000000,
+ 0x013, 0x000287bc,
+ 0x013, 0x000247b0,
+ 0x013, 0x000203b4,
+ 0x013, 0x0001c3a8,
+ 0x013, 0x000181b4,
+ 0x013, 0x000141a8,
+ 0x013, 0x000100b4,
+ 0x013, 0x0000c0a8,
+ 0x013, 0x0000b030,
+ 0x013, 0x00004024,
+ 0x013, 0x00000018,
+ 0x015, 0x0000f4c3,
+ 0x015, 0x0004f4c3,
+ 0x015, 0x0008f4c3,
+ 0x016, 0x000e085f,
+ 0x016, 0x000a085f,
+ 0x016, 0x0006085f,
+ 0x016, 0x0002085f,
+ 0x018, 0x00037524,
+ 0x000, 0x00070000,
+ 0x012, 0x000cf000,
+ 0x012, 0x000bc000,
+ 0x012, 0x00078000,
+ 0x012, 0x00000000,
+ 0x013, 0x000287bc,
+ 0x013, 0x000247b0,
+ 0x013, 0x000203b4,
+ 0x013, 0x0001c3a8,
+ 0x013, 0x000181b4,
+ 0x013, 0x000141a8,
+ 0x013, 0x000100b4,
+ 0x013, 0x0000c0a8,
+ 0x013, 0x0000b030,
+ 0x013, 0x00004024,
+ 0x013, 0x00000018,
+ 0x015, 0x0000f4c3,
+ 0x015, 0x0004f4c3,
+ 0x015, 0x0008f4c3,
+ 0x016, 0x000e085f,
+ 0x016, 0x000a085f,
+ 0x016, 0x0006085f,
+ 0x016, 0x0002085f,
+ 0x018, 0x00057568,
+ 0x000, 0x00070000,
+ 0x012, 0x000cf000,
+ 0x012, 0x000bc000,
+ 0x012, 0x00078000,
+ 0x012, 0x00000000,
+ 0x013, 0x000287bc,
+ 0x013, 0x000247b0,
+ 0x013, 0x000203b4,
+ 0x013, 0x0001c3a8,
+ 0x013, 0x000181b4,
+ 0x013, 0x000141a8,
+ 0x013, 0x000100b4,
+ 0x013, 0x0000c0a8,
+ 0x013, 0x0000b030,
+ 0x013, 0x00004024,
+ 0x013, 0x00000018,
+ 0x015, 0x0000f4c3,
+ 0x015, 0x0004f4c3,
+ 0x015, 0x0008f4c3,
+ 0x016, 0x000e085f,
+ 0x016, 0x000a085f,
+ 0x016, 0x0006085f,
+ 0x016, 0x0002085f,
+ 0x030, 0x0004470f,
+ 0x031, 0x00044ff0,
+ 0x032, 0x00000070,
+ 0x033, 0x000dd480,
+ 0x034, 0x000ffac0,
+ 0x035, 0x000b80c0,
+ 0x036, 0x00077000,
+ 0x037, 0x00064ff2,
+ 0x038, 0x000e7661,
+ 0x039, 0x00000e90,
+ 0x000, 0x00030000,
+ 0x018, 0x0000f401,
+ 0x0fe, 0x00000000,
+ 0x0fe, 0x00000000,
+ 0x01e, 0x00088009,
+ 0x01f, 0x00080003,
+ 0x0fe, 0x00000000,
+ 0x01e, 0x00088001,
+ 0x01f, 0x00080000,
+ 0x0fe, 0x00000000,
+ 0x018, 0x00097524,
+ 0x0fe, 0x00000000,
+ 0x0fe, 0x00000000,
+ 0x0fe, 0x00000000,
+ 0x0fe, 0x00000000,
+ 0x02b, 0x00041289,
+ 0x0fe, 0x00000000,
+ 0x02d, 0x0006aaaa,
+ 0x02e, 0x000b4d01,
+ 0x02d, 0x00080000,
+ 0x02e, 0x00004d02,
+ 0x02d, 0x00095555,
+ 0x02e, 0x00054d03,
+ 0x02d, 0x000aaaaa,
+ 0x02e, 0x000b4d04,
+ 0x02d, 0x000c0000,
+ 0x02e, 0x00004d05,
+ 0x02d, 0x000d5555,
+ 0x02e, 0x00054d06,
+ 0x02d, 0x000eaaaa,
+ 0x02e, 0x000b4d07,
+ 0x02d, 0x00000000,
+ 0x02e, 0x00005108,
+ 0x02d, 0x00015555,
+ 0x02e, 0x00055109,
+ 0x02d, 0x0002aaaa,
+ 0x02e, 0x000b510a,
+ 0x02d, 0x00040000,
+ 0x02e, 0x0000510b,
+ 0x02d, 0x00055555,
+ 0x02e, 0x0005510c,
+};
+
+const u32 rtl8192du_radiob_2tarray[RADIOB_2T_ARRAYLENGTH] = {
+ 0x000, 0x00030000,
+ 0x001, 0x00030000,
+ 0x002, 0x00000000,
+ 0x003, 0x00018c63,
+ 0x004, 0x00018c63,
+ 0x008, 0x00084000,
+ 0x00b, 0x0001c000,
+ 0x00e, 0x00018c67,
+ 0x00f, 0x00000851,
+ 0x014, 0x00021440,
+ 0x018, 0x00007401,
+ 0x019, 0x00000060,
+ 0x01d, 0x000a1290,
+ 0x023, 0x00001558,
+ 0x01a, 0x00030a99,
+ 0x01b, 0x00040b00,
+ 0x01c, 0x000fc339,
+ 0x03a, 0x000a57eb,
+ 0x03b, 0x00020000,
+ 0x03c, 0x000ff454,
+ 0x020, 0x0000aa52,
+ 0x021, 0x00054000,
+ 0x040, 0x0000aa52,
+ 0x041, 0x00014000,
+ 0x025, 0x000803be,
+ 0x026, 0x000fc638,
+ 0x027, 0x00077c18,
+ 0x028, 0x000d1c31,
+ 0x029, 0x000d7110,
+ 0x02a, 0x000aeb04,
+ 0x02b, 0x0004128b,
+ 0x02c, 0x00001840,
+ 0x043, 0x0002444f,
+ 0x044, 0x0001adb0,
+ 0x045, 0x00056467,
+ 0x046, 0x0008992c,
+ 0x047, 0x0000452c,
+ 0x048, 0x000f9c43,
+ 0x049, 0x00002e0c,
+ 0x04a, 0x000546eb,
+ 0x04b, 0x0008966c,
+ 0x04c, 0x0000dde9,
+ 0x018, 0x00007401,
+ 0x000, 0x00070000,
+ 0x012, 0x000dc000,
+ 0x012, 0x00090000,
+ 0x012, 0x00051000,
+ 0x012, 0x00012000,
+ 0x013, 0x000287b7,
+ 0x013, 0x000247ab,
+ 0x013, 0x0002079f,
+ 0x013, 0x0001c793,
+ 0x013, 0x0001839b,
+ 0x013, 0x00014392,
+ 0x013, 0x0001019a,
+ 0x013, 0x0000c191,
+ 0x013, 0x00008194,
+ 0x013, 0x000040a0,
+ 0x013, 0x00000018,
+ 0x015, 0x0000f424,
+ 0x015, 0x0004f424,
+ 0x015, 0x0008f424,
+ 0x016, 0x000e1330,
+ 0x016, 0x000a1330,
+ 0x016, 0x00061330,
+ 0x016, 0x00021330,
+ 0x018, 0x00017524,
+ 0x000, 0x00070000,
+ 0x012, 0x000cf000,
+ 0x012, 0x000bc000,
+ 0x012, 0x00078000,
+ 0x012, 0x00000000,
+ 0x013, 0x000287bc,
+ 0x013, 0x000247b0,
+ 0x013, 0x000203b4,
+ 0x013, 0x0001c3a8,
+ 0x013, 0x000181b4,
+ 0x013, 0x000141a8,
+ 0x013, 0x000100b4,
+ 0x013, 0x0000c0a8,
+ 0x013, 0x0000b030,
+ 0x013, 0x00004024,
+ 0x013, 0x00000018,
+ 0x015, 0x0000f4c3,
+ 0x015, 0x0004f4c3,
+ 0x015, 0x0008f4c3,
+ 0x016, 0x000e085f,
+ 0x016, 0x000a085f,
+ 0x016, 0x0006085f,
+ 0x016, 0x0002085f,
+ 0x018, 0x00037524,
+ 0x000, 0x00070000,
+ 0x012, 0x000cf000,
+ 0x012, 0x000bc000,
+ 0x012, 0x00078000,
+ 0x012, 0x00000000,
+ 0x013, 0x000287bc,
+ 0x013, 0x000247b0,
+ 0x013, 0x000203b4,
+ 0x013, 0x0001c3a8,
+ 0x013, 0x000181b4,
+ 0x013, 0x000141a8,
+ 0x013, 0x000100b4,
+ 0x013, 0x0000c0a8,
+ 0x013, 0x0000b030,
+ 0x013, 0x00004024,
+ 0x013, 0x00000018,
+ 0x015, 0x0000f4c3,
+ 0x015, 0x0004f4c3,
+ 0x015, 0x0008f4c3,
+ 0x016, 0x000e085f,
+ 0x016, 0x000a085f,
+ 0x016, 0x0006085f,
+ 0x016, 0x0002085f,
+ 0x018, 0x00057524,
+ 0x000, 0x00070000,
+ 0x012, 0x000cf000,
+ 0x012, 0x000bc000,
+ 0x012, 0x00078000,
+ 0x012, 0x00000000,
+ 0x013, 0x000287bc,
+ 0x013, 0x000247b0,
+ 0x013, 0x000203b4,
+ 0x013, 0x0001c3a8,
+ 0x013, 0x000181b4,
+ 0x013, 0x000141a8,
+ 0x013, 0x000100b4,
+ 0x013, 0x0000c0a8,
+ 0x013, 0x0000b030,
+ 0x013, 0x00004024,
+ 0x013, 0x00000018,
+ 0x015, 0x0000f4c3,
+ 0x015, 0x0004f4c3,
+ 0x015, 0x0008f4c3,
+ 0x016, 0x000e085f,
+ 0x016, 0x000a085f,
+ 0x016, 0x0006085f,
+ 0x016, 0x0002085f,
+ 0x030, 0x0004470f,
+ 0x031, 0x00044ff0,
+ 0x032, 0x00000070,
+ 0x033, 0x000dd480,
+ 0x034, 0x000ffac0,
+ 0x035, 0x000b80c0,
+ 0x036, 0x00077000,
+ 0x037, 0x00064ff2,
+ 0x038, 0x000e7661,
+ 0x039, 0x00000e90,
+ 0x000, 0x00030000,
+ 0x018, 0x0000f401,
+ 0x0fe, 0x00000000,
+ 0x0fe, 0x00000000,
+ 0x01e, 0x00088009,
+ 0x01f, 0x00080003,
+ 0x0fe, 0x00000000,
+ 0x01e, 0x00088001,
+ 0x01f, 0x00080000,
+ 0x0fe, 0x00000000,
+ 0x018, 0x00087401,
+ 0x0fe, 0x00000000,
+ 0x0fe, 0x00000000,
+ 0x0fe, 0x00000000,
+ 0x02b, 0x00041289,
+ 0x0fe, 0x00000000,
+ 0x02d, 0x00066666,
+ 0x02e, 0x00064001,
+ 0x02d, 0x00091111,
+ 0x02e, 0x00014002,
+ 0x02d, 0x000bbbbb,
+ 0x02e, 0x000b4003,
+ 0x02d, 0x000e6666,
+ 0x02e, 0x00064004,
+ 0x02d, 0x00088888,
+ 0x02e, 0x00084005,
+ 0x02d, 0x0009dddd,
+ 0x02e, 0x000d4006,
+ 0x02d, 0x000b3333,
+ 0x02e, 0x00034007,
+ 0x02d, 0x00048888,
+ 0x02e, 0x00084408,
+ 0x02d, 0x000bbbbb,
+ 0x02e, 0x000b4409,
+ 0x02d, 0x000e6666,
+ 0x02e, 0x0006440a,
+ 0x02d, 0x00011111,
+ 0x02e, 0x0001480b,
+ 0x02d, 0x0003bbbb,
+ 0x02e, 0x000b480c,
+ 0x02d, 0x00066666,
+ 0x02e, 0x0006480d,
+ 0x02d, 0x000ccccc,
+ 0x02e, 0x000c480e,
+};
+
+const u32 rtl8192du_radioa_2t_int_paarray[RADIOA_2T_INT_PA_ARRAYLENGTH] = {
+ 0x000, 0x00030000,
+ 0x001, 0x00030000,
+ 0x002, 0x00000000,
+ 0x003, 0x00018c63,
+ 0x004, 0x00018c63,
+ 0x008, 0x00084000,
+ 0x00b, 0x0001c000,
+ 0x00e, 0x00018c67,
+ 0x00f, 0x00000851,
+ 0x014, 0x00021440,
+ 0x018, 0x00017524,
+ 0x019, 0x00000000,
+ 0x01d, 0x000a1290,
+ 0x023, 0x00001558,
+ 0x01a, 0x00030a99,
+ 0x01b, 0x00040b00,
+ 0x01c, 0x000fc339,
+ 0x03a, 0x000a57eb,
+ 0x03b, 0x00020000,
+ 0x03c, 0x000ff455,
+ 0x020, 0x0000aa52,
+ 0x021, 0x00054000,
+ 0x040, 0x0000aa52,
+ 0x041, 0x00014000,
+ 0x025, 0x000803be,
+ 0x026, 0x000fc638,
+ 0x027, 0x00077c18,
+ 0x028, 0x000de471,
+ 0x029, 0x000d7110,
+ 0x02a, 0x0008eb04,
+ 0x02b, 0x0004128b,
+ 0x02c, 0x00001840,
+ 0x043, 0x0002444f,
+ 0x044, 0x0001adb0,
+ 0x045, 0x00056467,
+ 0x046, 0x0008992c,
+ 0x047, 0x0000452c,
+ 0x048, 0x000c0443,
+ 0x049, 0x00000730,
+ 0x04a, 0x00050f0f,
+ 0x04b, 0x000896ef,
+ 0x04c, 0x0000ddee,
+ 0x018, 0x00007401,
+ 0x000, 0x00070000,
+ 0x012, 0x000dc000,
+ 0x012, 0x00090000,
+ 0x012, 0x00051000,
+ 0x012, 0x00012000,
+ 0x013, 0x000287b7,
+ 0x013, 0x000247ab,
+ 0x013, 0x0002079f,
+ 0x013, 0x0001c793,
+ 0x013, 0x0001839b,
+ 0x013, 0x00014392,
+ 0x013, 0x0001019a,
+ 0x013, 0x0000c191,
+ 0x013, 0x00008194,
+ 0x013, 0x000040a0,
+ 0x013, 0x00000018,
+ 0x015, 0x0000f424,
+ 0x015, 0x0004f424,
+ 0x015, 0x0008f424,
+ 0x016, 0x000e1330,
+ 0x016, 0x000a1330,
+ 0x016, 0x00061330,
+ 0x016, 0x00021330,
+ 0x018, 0x00017524,
+ 0x000, 0x00070000,
+ 0x012, 0x000cf000,
+ 0x012, 0x000bc000,
+ 0x012, 0x00078000,
+ 0x012, 0x00000000,
+ 0x013, 0x000287bf,
+ 0x013, 0x000247b3,
+ 0x013, 0x000207a7,
+ 0x013, 0x0001c79b,
+ 0x013, 0x0001839f,
+ 0x013, 0x00014393,
+ 0x013, 0x00010399,
+ 0x013, 0x0000c38d,
+ 0x013, 0x00008199,
+ 0x013, 0x0000418d,
+ 0x013, 0x00000099,
+ 0x015, 0x0000f495,
+ 0x015, 0x0004f495,
+ 0x015, 0x0008f495,
+ 0x016, 0x000e1874,
+ 0x016, 0x000a1874,
+ 0x016, 0x00061874,
+ 0x016, 0x00021874,
+ 0x018, 0x00037564,
+ 0x000, 0x00070000,
+ 0x012, 0x000cf000,
+ 0x012, 0x000bc000,
+ 0x012, 0x00078000,
+ 0x012, 0x00000000,
+ 0x013, 0x000287bf,
+ 0x013, 0x000247b3,
+ 0x013, 0x000207a7,
+ 0x013, 0x0001c79b,
+ 0x013, 0x0001839f,
+ 0x013, 0x00014393,
+ 0x013, 0x00010399,
+ 0x013, 0x0000c38d,
+ 0x013, 0x00008199,
+ 0x013, 0x0000418d,
+ 0x013, 0x00000099,
+ 0x015, 0x0000f495,
+ 0x015, 0x0004f495,
+ 0x015, 0x0008f495,
+ 0x016, 0x000e1874,
+ 0x016, 0x000a1874,
+ 0x016, 0x00061874,
+ 0x016, 0x00021874,
+ 0x018, 0x00057595,
+ 0x000, 0x00070000,
+ 0x012, 0x000cf000,
+ 0x012, 0x000bc000,
+ 0x012, 0x00078000,
+ 0x012, 0x00000000,
+ 0x013, 0x000287bf,
+ 0x013, 0x000247b3,
+ 0x013, 0x000207a7,
+ 0x013, 0x0001c79b,
+ 0x013, 0x0001839f,
+ 0x013, 0x00014393,
+ 0x013, 0x00010399,
+ 0x013, 0x0000c38d,
+ 0x013, 0x00008199,
+ 0x013, 0x0000418d,
+ 0x013, 0x00000099,
+ 0x015, 0x0000f495,
+ 0x015, 0x0004f495,
+ 0x015, 0x0008f495,
+ 0x016, 0x000e1874,
+ 0x016, 0x000a1874,
+ 0x016, 0x00061874,
+ 0x016, 0x00021874,
+ 0x030, 0x0004470f,
+ 0x031, 0x00044ff0,
+ 0x032, 0x00000070,
+ 0x033, 0x000dd480,
+ 0x034, 0x000ffac0,
+ 0x035, 0x000b80c0,
+ 0x036, 0x00077000,
+ 0x037, 0x00064ff2,
+ 0x038, 0x000e7661,
+ 0x039, 0x00000e90,
+ 0x000, 0x00030000,
+ 0x018, 0x0000f401,
+ 0x0fe, 0x00000000,
+ 0x0fe, 0x00000000,
+ 0x01e, 0x00088009,
+ 0x01f, 0x00080003,
+ 0x0fe, 0x00000000,
+ 0x01e, 0x00088001,
+ 0x01f, 0x00080000,
+ 0x0fe, 0x00000000,
+ 0x018, 0x00097524,
+ 0x0fe, 0x00000000,
+ 0x0fe, 0x00000000,
+ 0x0fe, 0x00000000,
+ 0x0fe, 0x00000000,
+ 0x02b, 0x00041289,
+ 0x0fe, 0x00000000,
+ 0x02d, 0x0006aaaa,
+ 0x02e, 0x000b4d01,
+ 0x02d, 0x00080000,
+ 0x02e, 0x00004d02,
+ 0x02d, 0x00095555,
+ 0x02e, 0x00054d03,
+ 0x02d, 0x000aaaaa,
+ 0x02e, 0x000b4d04,
+ 0x02d, 0x000c0000,
+ 0x02e, 0x00004d05,
+ 0x02d, 0x000d5555,
+ 0x02e, 0x00054d06,
+ 0x02d, 0x000eaaaa,
+ 0x02e, 0x000b4d07,
+ 0x02d, 0x00000000,
+ 0x02e, 0x00005108,
+ 0x02d, 0x00015555,
+ 0x02e, 0x00055109,
+ 0x02d, 0x0002aaaa,
+ 0x02e, 0x000b510a,
+ 0x02d, 0x00040000,
+ 0x02e, 0x0000510b,
+ 0x02d, 0x00055555,
+ 0x02e, 0x0005510c,
+};
+
+const u32 rtl8192du_radiob_2t_int_paarray[RADIOB_2T_INT_PA_ARRAYLENGTH] = {
+ 0x000, 0x00030000,
+ 0x001, 0x00030000,
+ 0x002, 0x00000000,
+ 0x003, 0x00018c63,
+ 0x004, 0x00018c63,
+ 0x008, 0x00084000,
+ 0x00b, 0x0001c000,
+ 0x00e, 0x00018c67,
+ 0x00f, 0x00000851,
+ 0x014, 0x00021440,
+ 0x018, 0x00007401,
+ 0x019, 0x00000060,
+ 0x01d, 0x000a1290,
+ 0x023, 0x00001558,
+ 0x01a, 0x00030a99,
+ 0x01b, 0x00040b00,
+ 0x01c, 0x000fc339,
+ 0x03a, 0x000a57eb,
+ 0x03b, 0x00020000,
+ 0x03c, 0x000ff455,
+ 0x020, 0x0000aa52,
+ 0x021, 0x00054000,
+ 0x040, 0x0000aa52,
+ 0x041, 0x00014000,
+ 0x025, 0x000803be,
+ 0x026, 0x000fc638,
+ 0x027, 0x00077c18,
+ 0x028, 0x000d1c31,
+ 0x029, 0x000d7110,
+ 0x02a, 0x000aeb04,
+ 0x02b, 0x0004128b,
+ 0x02c, 0x00001840,
+ 0x043, 0x0002444f,
+ 0x044, 0x0001adb0,
+ 0x045, 0x00056467,
+ 0x046, 0x0008992c,
+ 0x047, 0x0000452c,
+ 0x048, 0x000c0443,
+ 0x049, 0x00000730,
+ 0x04a, 0x00050f0f,
+ 0x04b, 0x000896ef,
+ 0x04c, 0x0000ddee,
+ 0x018, 0x00007401,
+ 0x000, 0x00070000,
+ 0x012, 0x000dc000,
+ 0x012, 0x00090000,
+ 0x012, 0x00051000,
+ 0x012, 0x00012000,
+ 0x013, 0x000287b7,
+ 0x013, 0x000247ab,
+ 0x013, 0x0002079f,
+ 0x013, 0x0001c793,
+ 0x013, 0x0001839b,
+ 0x013, 0x00014392,
+ 0x013, 0x0001019a,
+ 0x013, 0x0000c191,
+ 0x013, 0x00008194,
+ 0x013, 0x000040a0,
+ 0x013, 0x00000018,
+ 0x015, 0x0000f424,
+ 0x015, 0x0004f424,
+ 0x015, 0x0008f424,
+ 0x016, 0x000e1330,
+ 0x016, 0x000a1330,
+ 0x016, 0x00061330,
+ 0x016, 0x00021330,
+ 0x018, 0x00017524,
+ 0x000, 0x00070000,
+ 0x012, 0x000cf000,
+ 0x012, 0x000bc000,
+ 0x012, 0x00078000,
+ 0x012, 0x00000000,
+ 0x013, 0x000287bf,
+ 0x013, 0x000247b3,
+ 0x013, 0x000207a7,
+ 0x013, 0x0001c79b,
+ 0x013, 0x0001839f,
+ 0x013, 0x00014393,
+ 0x013, 0x00010399,
+ 0x013, 0x0000c38d,
+ 0x013, 0x00008199,
+ 0x013, 0x0000418d,
+ 0x013, 0x00000099,
+ 0x015, 0x0000f495,
+ 0x015, 0x0004f495,
+ 0x015, 0x0008f495,
+ 0x016, 0x000e1874,
+ 0x016, 0x000a1874,
+ 0x016, 0x00061874,
+ 0x016, 0x00021874,
+ 0x018, 0x00037564,
+ 0x000, 0x00070000,
+ 0x012, 0x000cf000,
+ 0x012, 0x000bc000,
+ 0x012, 0x00078000,
+ 0x012, 0x00000000,
+ 0x013, 0x000287bf,
+ 0x013, 0x000247b3,
+ 0x013, 0x000207a7,
+ 0x013, 0x0001c79b,
+ 0x013, 0x0001839f,
+ 0x013, 0x00014393,
+ 0x013, 0x00010399,
+ 0x013, 0x0000c38d,
+ 0x013, 0x00008199,
+ 0x013, 0x0000418d,
+ 0x013, 0x00000099,
+ 0x015, 0x0000f495,
+ 0x015, 0x0004f495,
+ 0x015, 0x0008f495,
+ 0x016, 0x000e1874,
+ 0x016, 0x000a1874,
+ 0x016, 0x00061874,
+ 0x016, 0x00021874,
+ 0x018, 0x00057595,
+ 0x000, 0x00070000,
+ 0x012, 0x000cf000,
+ 0x012, 0x000bc000,
+ 0x012, 0x00078000,
+ 0x012, 0x00000000,
+ 0x013, 0x000287bf,
+ 0x013, 0x000247b3,
+ 0x013, 0x000207a7,
+ 0x013, 0x0001c79b,
+ 0x013, 0x0001839f,
+ 0x013, 0x00014393,
+ 0x013, 0x00010399,
+ 0x013, 0x0000c38d,
+ 0x013, 0x00008199,
+ 0x013, 0x0000418d,
+ 0x013, 0x00000099,
+ 0x015, 0x0000f495,
+ 0x015, 0x0004f495,
+ 0x015, 0x0008f495,
+ 0x016, 0x000e1874,
+ 0x016, 0x000a1874,
+ 0x016, 0x00061874,
+ 0x016, 0x00021874,
+ 0x030, 0x0004470f,
+ 0x031, 0x00044ff0,
+ 0x032, 0x00000070,
+ 0x033, 0x000dd480,
+ 0x034, 0x000ffac0,
+ 0x035, 0x000b80c0,
+ 0x036, 0x00077000,
+ 0x037, 0x00064ff2,
+ 0x038, 0x000e7661,
+ 0x039, 0x00000e90,
+ 0x000, 0x00030000,
+ 0x018, 0x0000f401,
+ 0x0fe, 0x00000000,
+ 0x0fe, 0x00000000,
+ 0x01e, 0x00088009,
+ 0x01f, 0x00080003,
+ 0x0fe, 0x00000000,
+ 0x01e, 0x00088001,
+ 0x01f, 0x00080000,
+ 0x0fe, 0x00000000,
+ 0x018, 0x00087401,
+ 0x0fe, 0x00000000,
+ 0x0fe, 0x00000000,
+ 0x0fe, 0x00000000,
+ 0x02b, 0x00041289,
+ 0x0fe, 0x00000000,
+ 0x02d, 0x00066666,
+ 0x02e, 0x00064001,
+ 0x02d, 0x00091111,
+ 0x02e, 0x00014002,
+ 0x02d, 0x000bbbbb,
+ 0x02e, 0x000b4003,
+ 0x02d, 0x000e6666,
+ 0x02e, 0x00064004,
+ 0x02d, 0x00088888,
+ 0x02e, 0x00084005,
+ 0x02d, 0x0009dddd,
+ 0x02e, 0x000d4006,
+ 0x02d, 0x000b3333,
+ 0x02e, 0x00034007,
+ 0x02d, 0x00048888,
+ 0x02e, 0x00084408,
+ 0x02d, 0x000bbbbb,
+ 0x02e, 0x000b4409,
+ 0x02d, 0x000e6666,
+ 0x02e, 0x0006440a,
+ 0x02d, 0x00011111,
+ 0x02e, 0x0001480b,
+ 0x02d, 0x0003bbbb,
+ 0x02e, 0x000b480c,
+ 0x02d, 0x00066666,
+ 0x02e, 0x0006480d,
+ 0x02d, 0x000ccccc,
+ 0x02e, 0x000c480e,
+};
+
+const u32 rtl8192du_mac_2tarray[MAC_2T_ARRAYLENGTH] = {
+ 0x420, 0x00000080,
+ 0x423, 0x00000000,
+ 0x430, 0x00000000,
+ 0x431, 0x00000000,
+ 0x432, 0x00000000,
+ 0x433, 0x00000001,
+ 0x434, 0x00000004,
+ 0x435, 0x00000005,
+ 0x436, 0x00000006,
+ 0x437, 0x00000007,
+ 0x438, 0x00000000,
+ 0x439, 0x00000000,
+ 0x43a, 0x00000000,
+ 0x43b, 0x00000001,
+ 0x43c, 0x00000004,
+ 0x43d, 0x00000005,
+ 0x43e, 0x00000006,
+ 0x43f, 0x00000007,
+ 0x440, 0x00000050,
+ 0x441, 0x00000001,
+ 0x442, 0x00000000,
+ 0x444, 0x00000015,
+ 0x445, 0x000000f0,
+ 0x446, 0x0000000f,
+ 0x447, 0x00000000,
+ 0x462, 0x00000008,
+ 0x463, 0x00000003,
+ 0x4c8, 0x000000ff,
+ 0x4c9, 0x00000008,
+ 0x4cc, 0x000000ff,
+ 0x4cd, 0x000000ff,
+ 0x4ce, 0x00000001,
+ 0x500, 0x00000026,
+ 0x501, 0x000000a2,
+ 0x502, 0x0000002f,
+ 0x503, 0x00000000,
+ 0x504, 0x00000028,
+ 0x505, 0x000000a3,
+ 0x506, 0x0000005e,
+ 0x507, 0x00000000,
+ 0x508, 0x0000002b,
+ 0x509, 0x000000a4,
+ 0x50a, 0x0000005e,
+ 0x50b, 0x00000000,
+ 0x50c, 0x0000004f,
+ 0x50d, 0x000000a4,
+ 0x50e, 0x00000000,
+ 0x50f, 0x00000000,
+ 0x512, 0x0000001c,
+ 0x514, 0x0000000a,
+ 0x515, 0x00000010,
+ 0x516, 0x0000000a,
+ 0x517, 0x00000010,
+ 0x51a, 0x00000016,
+ 0x524, 0x0000000f,
+ 0x525, 0x0000004f,
+ 0x546, 0x00000040,
+ 0x547, 0x00000000,
+ 0x550, 0x00000010,
+ 0x551, 0x00000010,
+ 0x559, 0x00000002,
+ 0x55a, 0x00000002,
+ 0x55d, 0x000000ff,
+ 0x605, 0x00000080,
+ 0x608, 0x0000000e,
+ 0x609, 0x0000002a,
+ 0x652, 0x00000020,
+ 0x63c, 0x0000000a,
+ 0x63d, 0x0000000a,
+ 0x63e, 0x0000000e,
+ 0x63f, 0x0000000e,
+ 0x66e, 0x00000005,
+ 0x700, 0x00000021,
+ 0x701, 0x00000043,
+ 0x702, 0x00000065,
+ 0x703, 0x00000087,
+ 0x708, 0x00000021,
+ 0x709, 0x00000043,
+ 0x70a, 0x00000065,
+ 0x70b, 0x00000087,
+ 0x024, 0x0000000d,
+ 0x025, 0x00000080,
+ 0x026, 0x00000011,
+ 0x027, 0x00000000,
+ 0x028, 0x00000083,
+ 0x029, 0x000000db,
+ 0x02a, 0x000000ff,
+ 0x02b, 0x00000000,
+ 0x014, 0x00000055,
+ 0x015, 0x000000a9,
+ 0x016, 0x0000008b,
+ 0x017, 0x00000008,
+ 0x010, 0x00000003,
+ 0x011, 0x0000002b,
+ 0x012, 0x00000002,
+ 0x013, 0x00000049,
+};
+
+const u32 rtl8192du_agctab_array[AGCTAB_ARRAYLENGTH] = {
+ 0xc78, 0x7b000001,
+ 0xc78, 0x7b010001,
+ 0xc78, 0x7b020001,
+ 0xc78, 0x7b030001,
+ 0xc78, 0x7b040001,
+ 0xc78, 0x7b050001,
+ 0xc78, 0x7b060001,
+ 0xc78, 0x7a070001,
+ 0xc78, 0x79080001,
+ 0xc78, 0x78090001,
+ 0xc78, 0x770a0001,
+ 0xc78, 0x760b0001,
+ 0xc78, 0x750c0001,
+ 0xc78, 0x740d0001,
+ 0xc78, 0x730e0001,
+ 0xc78, 0x720f0001,
+ 0xc78, 0x71100001,
+ 0xc78, 0x70110001,
+ 0xc78, 0x6f120001,
+ 0xc78, 0x6e130001,
+ 0xc78, 0x6d140001,
+ 0xc78, 0x6c150001,
+ 0xc78, 0x6b160001,
+ 0xc78, 0x6a170001,
+ 0xc78, 0x69180001,
+ 0xc78, 0x68190001,
+ 0xc78, 0x671a0001,
+ 0xc78, 0x661b0001,
+ 0xc78, 0x651c0001,
+ 0xc78, 0x641d0001,
+ 0xc78, 0x631e0001,
+ 0xc78, 0x621f0001,
+ 0xc78, 0x61200001,
+ 0xc78, 0x60210001,
+ 0xc78, 0x49220001,
+ 0xc78, 0x48230001,
+ 0xc78, 0x47240001,
+ 0xc78, 0x46250001,
+ 0xc78, 0x45260001,
+ 0xc78, 0x44270001,
+ 0xc78, 0x43280001,
+ 0xc78, 0x42290001,
+ 0xc78, 0x412a0001,
+ 0xc78, 0x402b0001,
+ 0xc78, 0x262c0001,
+ 0xc78, 0x252d0001,
+ 0xc78, 0x242e0001,
+ 0xc78, 0x232f0001,
+ 0xc78, 0x22300001,
+ 0xc78, 0x21310001,
+ 0xc78, 0x20320001,
+ 0xc78, 0x06330001,
+ 0xc78, 0x05340001,
+ 0xc78, 0x04350001,
+ 0xc78, 0x03360001,
+ 0xc78, 0x02370001,
+ 0xc78, 0x01380001,
+ 0xc78, 0x00390001,
+ 0xc78, 0x003a0001,
+ 0xc78, 0x003b0001,
+ 0xc78, 0x003c0001,
+ 0xc78, 0x003d0001,
+ 0xc78, 0x003e0001,
+ 0xc78, 0x003f0001,
+ 0xc78, 0x7b400001,
+ 0xc78, 0x7b410001,
+ 0xc78, 0x7a420001,
+ 0xc78, 0x79430001,
+ 0xc78, 0x78440001,
+ 0xc78, 0x77450001,
+ 0xc78, 0x76460001,
+ 0xc78, 0x75470001,
+ 0xc78, 0x74480001,
+ 0xc78, 0x73490001,
+ 0xc78, 0x724a0001,
+ 0xc78, 0x714b0001,
+ 0xc78, 0x704c0001,
+ 0xc78, 0x6f4d0001,
+ 0xc78, 0x6e4e0001,
+ 0xc78, 0x6d4f0001,
+ 0xc78, 0x6c500001,
+ 0xc78, 0x6b510001,
+ 0xc78, 0x6a520001,
+ 0xc78, 0x69530001,
+ 0xc78, 0x68540001,
+ 0xc78, 0x67550001,
+ 0xc78, 0x66560001,
+ 0xc78, 0x65570001,
+ 0xc78, 0x64580001,
+ 0xc78, 0x63590001,
+ 0xc78, 0x625a0001,
+ 0xc78, 0x615b0001,
+ 0xc78, 0x605c0001,
+ 0xc78, 0x485d0001,
+ 0xc78, 0x475e0001,
+ 0xc78, 0x465f0001,
+ 0xc78, 0x45600001,
+ 0xc78, 0x44610001,
+ 0xc78, 0x43620001,
+ 0xc78, 0x42630001,
+ 0xc78, 0x41640001,
+ 0xc78, 0x40650001,
+ 0xc78, 0x27660001,
+ 0xc78, 0x26670001,
+ 0xc78, 0x25680001,
+ 0xc78, 0x24690001,
+ 0xc78, 0x236a0001,
+ 0xc78, 0x226b0001,
+ 0xc78, 0x216c0001,
+ 0xc78, 0x206d0001,
+ 0xc78, 0x206e0001,
+ 0xc78, 0x206f0001,
+ 0xc78, 0x20700001,
+ 0xc78, 0x20710001,
+ 0xc78, 0x20720001,
+ 0xc78, 0x20730001,
+ 0xc78, 0x20740001,
+ 0xc78, 0x20750001,
+ 0xc78, 0x20760001,
+ 0xc78, 0x20770001,
+ 0xc78, 0x20780001,
+ 0xc78, 0x20790001,
+ 0xc78, 0x207a0001,
+ 0xc78, 0x207b0001,
+ 0xc78, 0x207c0001,
+ 0xc78, 0x207d0001,
+ 0xc78, 0x207e0001,
+ 0xc78, 0x207f0001,
+ 0xc78, 0x38000002,
+ 0xc78, 0x38010002,
+ 0xc78, 0x38020002,
+ 0xc78, 0x38030002,
+ 0xc78, 0x38040002,
+ 0xc78, 0x38050002,
+ 0xc78, 0x38060002,
+ 0xc78, 0x38070002,
+ 0xc78, 0x38080002,
+ 0xc78, 0x3c090002,
+ 0xc78, 0x3e0a0002,
+ 0xc78, 0x400b0002,
+ 0xc78, 0x440c0002,
+ 0xc78, 0x480d0002,
+ 0xc78, 0x4c0e0002,
+ 0xc78, 0x500f0002,
+ 0xc78, 0x52100002,
+ 0xc78, 0x56110002,
+ 0xc78, 0x5a120002,
+ 0xc78, 0x5e130002,
+ 0xc78, 0x60140002,
+ 0xc78, 0x60150002,
+ 0xc78, 0x60160002,
+ 0xc78, 0x62170002,
+ 0xc78, 0x62180002,
+ 0xc78, 0x62190002,
+ 0xc78, 0x621a0002,
+ 0xc78, 0x621b0002,
+ 0xc78, 0x621c0002,
+ 0xc78, 0x621d0002,
+ 0xc78, 0x621e0002,
+ 0xc78, 0x621f0002,
+ 0xc78, 0x32000044,
+ 0xc78, 0x32010044,
+ 0xc78, 0x32020044,
+ 0xc78, 0x32030044,
+ 0xc78, 0x32040044,
+ 0xc78, 0x32050044,
+ 0xc78, 0x32060044,
+ 0xc78, 0x34070044,
+ 0xc78, 0x35080044,
+ 0xc78, 0x36090044,
+ 0xc78, 0x370a0044,
+ 0xc78, 0x380b0044,
+ 0xc78, 0x390c0044,
+ 0xc78, 0x3a0d0044,
+ 0xc78, 0x3e0e0044,
+ 0xc78, 0x420f0044,
+ 0xc78, 0x44100044,
+ 0xc78, 0x46110044,
+ 0xc78, 0x4a120044,
+ 0xc78, 0x4e130044,
+ 0xc78, 0x50140044,
+ 0xc78, 0x55150044,
+ 0xc78, 0x5a160044,
+ 0xc78, 0x5e170044,
+ 0xc78, 0x64180044,
+ 0xc78, 0x6e190044,
+ 0xc78, 0x6e1a0044,
+ 0xc78, 0x6e1b0044,
+ 0xc78, 0x6e1c0044,
+ 0xc78, 0x6e1d0044,
+ 0xc78, 0x6e1e0044,
+ 0xc78, 0x6e1f0044,
+ 0xc78, 0x6e1f0000,
+};
+
+const u32 rtl8192du_agctab_5garray[AGCTAB_5G_ARRAYLENGTH] = {
+ 0xc78, 0x7b000001,
+ 0xc78, 0x7b010001,
+ 0xc78, 0x7a020001,
+ 0xc78, 0x79030001,
+ 0xc78, 0x78040001,
+ 0xc78, 0x77050001,
+ 0xc78, 0x76060001,
+ 0xc78, 0x75070001,
+ 0xc78, 0x74080001,
+ 0xc78, 0x73090001,
+ 0xc78, 0x720a0001,
+ 0xc78, 0x710b0001,
+ 0xc78, 0x700c0001,
+ 0xc78, 0x6f0d0001,
+ 0xc78, 0x6e0e0001,
+ 0xc78, 0x6d0f0001,
+ 0xc78, 0x6c100001,
+ 0xc78, 0x6b110001,
+ 0xc78, 0x6a120001,
+ 0xc78, 0x69130001,
+ 0xc78, 0x68140001,
+ 0xc78, 0x67150001,
+ 0xc78, 0x66160001,
+ 0xc78, 0x65170001,
+ 0xc78, 0x64180001,
+ 0xc78, 0x63190001,
+ 0xc78, 0x621a0001,
+ 0xc78, 0x611b0001,
+ 0xc78, 0x601c0001,
+ 0xc78, 0x481d0001,
+ 0xc78, 0x471e0001,
+ 0xc78, 0x461f0001,
+ 0xc78, 0x45200001,
+ 0xc78, 0x44210001,
+ 0xc78, 0x43220001,
+ 0xc78, 0x42230001,
+ 0xc78, 0x41240001,
+ 0xc78, 0x40250001,
+ 0xc78, 0x27260001,
+ 0xc78, 0x26270001,
+ 0xc78, 0x25280001,
+ 0xc78, 0x24290001,
+ 0xc78, 0x232a0001,
+ 0xc78, 0x222b0001,
+ 0xc78, 0x212c0001,
+ 0xc78, 0x202d0001,
+ 0xc78, 0x202e0001,
+ 0xc78, 0x202f0001,
+ 0xc78, 0x20300001,
+ 0xc78, 0x20310001,
+ 0xc78, 0x20320001,
+ 0xc78, 0x20330001,
+ 0xc78, 0x20340001,
+ 0xc78, 0x20350001,
+ 0xc78, 0x20360001,
+ 0xc78, 0x20370001,
+ 0xc78, 0x20380001,
+ 0xc78, 0x20390001,
+ 0xc78, 0x203a0001,
+ 0xc78, 0x203b0001,
+ 0xc78, 0x203c0001,
+ 0xc78, 0x203d0001,
+ 0xc78, 0x203e0001,
+ 0xc78, 0x203f0001,
+ 0xc78, 0x32000044,
+ 0xc78, 0x32010044,
+ 0xc78, 0x32020044,
+ 0xc78, 0x32030044,
+ 0xc78, 0x32040044,
+ 0xc78, 0x32050044,
+ 0xc78, 0x32060044,
+ 0xc78, 0x34070044,
+ 0xc78, 0x35080044,
+ 0xc78, 0x36090044,
+ 0xc78, 0x370a0044,
+ 0xc78, 0x380b0044,
+ 0xc78, 0x390c0044,
+ 0xc78, 0x3a0d0044,
+ 0xc78, 0x3e0e0044,
+ 0xc78, 0x420f0044,
+ 0xc78, 0x44100044,
+ 0xc78, 0x46110044,
+ 0xc78, 0x4a120044,
+ 0xc78, 0x4e130044,
+ 0xc78, 0x50140044,
+ 0xc78, 0x55150044,
+ 0xc78, 0x5a160044,
+ 0xc78, 0x5e170044,
+ 0xc78, 0x64180044,
+ 0xc78, 0x6e190044,
+ 0xc78, 0x6e1a0044,
+ 0xc78, 0x6e1b0044,
+ 0xc78, 0x6e1c0044,
+ 0xc78, 0x6e1d0044,
+ 0xc78, 0x6e1e0044,
+ 0xc78, 0x6e1f0044,
+ 0xc78, 0x6e1f0000,
+};
+
+const u32 rtl8192du_agctab_2garray[AGCTAB_2G_ARRAYLENGTH] = {
+ 0xc78, 0x7b000001,
+ 0xc78, 0x7b010001,
+ 0xc78, 0x7b020001,
+ 0xc78, 0x7b030001,
+ 0xc78, 0x7b040001,
+ 0xc78, 0x7b050001,
+ 0xc78, 0x7b060001,
+ 0xc78, 0x7a070001,
+ 0xc78, 0x79080001,
+ 0xc78, 0x78090001,
+ 0xc78, 0x770a0001,
+ 0xc78, 0x760b0001,
+ 0xc78, 0x750c0001,
+ 0xc78, 0x740d0001,
+ 0xc78, 0x730e0001,
+ 0xc78, 0x720f0001,
+ 0xc78, 0x71100001,
+ 0xc78, 0x70110001,
+ 0xc78, 0x6f120001,
+ 0xc78, 0x6e130001,
+ 0xc78, 0x6d140001,
+ 0xc78, 0x6c150001,
+ 0xc78, 0x6b160001,
+ 0xc78, 0x6a170001,
+ 0xc78, 0x69180001,
+ 0xc78, 0x68190001,
+ 0xc78, 0x671a0001,
+ 0xc78, 0x661b0001,
+ 0xc78, 0x651c0001,
+ 0xc78, 0x641d0001,
+ 0xc78, 0x631e0001,
+ 0xc78, 0x621f0001,
+ 0xc78, 0x61200001,
+ 0xc78, 0x60210001,
+ 0xc78, 0x49220001,
+ 0xc78, 0x48230001,
+ 0xc78, 0x47240001,
+ 0xc78, 0x46250001,
+ 0xc78, 0x45260001,
+ 0xc78, 0x44270001,
+ 0xc78, 0x43280001,
+ 0xc78, 0x42290001,
+ 0xc78, 0x412a0001,
+ 0xc78, 0x402b0001,
+ 0xc78, 0x262c0001,
+ 0xc78, 0x252d0001,
+ 0xc78, 0x242e0001,
+ 0xc78, 0x232f0001,
+ 0xc78, 0x22300001,
+ 0xc78, 0x21310001,
+ 0xc78, 0x20320001,
+ 0xc78, 0x06330001,
+ 0xc78, 0x05340001,
+ 0xc78, 0x04350001,
+ 0xc78, 0x03360001,
+ 0xc78, 0x02370001,
+ 0xc78, 0x01380001,
+ 0xc78, 0x00390001,
+ 0xc78, 0x003a0001,
+ 0xc78, 0x003b0001,
+ 0xc78, 0x003c0001,
+ 0xc78, 0x003d0001,
+ 0xc78, 0x003e0001,
+ 0xc78, 0x003f0001,
+ 0xc78, 0x38000002,
+ 0xc78, 0x38010002,
+ 0xc78, 0x38020002,
+ 0xc78, 0x38030002,
+ 0xc78, 0x38040002,
+ 0xc78, 0x38050002,
+ 0xc78, 0x38060002,
+ 0xc78, 0x38070002,
+ 0xc78, 0x38080002,
+ 0xc78, 0x3c090002,
+ 0xc78, 0x3e0a0002,
+ 0xc78, 0x400b0002,
+ 0xc78, 0x440c0002,
+ 0xc78, 0x480d0002,
+ 0xc78, 0x4c0e0002,
+ 0xc78, 0x500f0002,
+ 0xc78, 0x52100002,
+ 0xc78, 0x56110002,
+ 0xc78, 0x5a120002,
+ 0xc78, 0x5e130002,
+ 0xc78, 0x60140002,
+ 0xc78, 0x60150002,
+ 0xc78, 0x60160002,
+ 0xc78, 0x62170002,
+ 0xc78, 0x62180002,
+ 0xc78, 0x62190002,
+ 0xc78, 0x621a0002,
+ 0xc78, 0x621b0002,
+ 0xc78, 0x621c0002,
+ 0xc78, 0x621d0002,
+ 0xc78, 0x621e0002,
+ 0xc78, 0x621f0002,
+ 0xc78, 0x6e1f0000,
+};
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192du/table.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/table.h
new file mode 100644
index 000000000000..b809ba511320
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/table.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2024 Realtek Corporation.*/
+
+#ifndef __RTL92DU_TABLE_H__
+#define __RTL92DU_TABLE_H__
+
+#define PHY_REG_2T_ARRAYLENGTH 372
+#define PHY_REG_ARRAY_PG_LENGTH 624
+#define RADIOA_2T_ARRAYLENGTH 378
+#define RADIOB_2T_ARRAYLENGTH 384
+#define RADIOA_2T_INT_PA_ARRAYLENGTH 378
+#define RADIOB_2T_INT_PA_ARRAYLENGTH 384
+#define MAC_2T_ARRAYLENGTH 192
+#define AGCTAB_ARRAYLENGTH 386
+#define AGCTAB_5G_ARRAYLENGTH 194
+#define AGCTAB_2G_ARRAYLENGTH 194
+
+extern const u32 rtl8192du_phy_reg_2tarray[PHY_REG_2T_ARRAYLENGTH];
+extern const u32 rtl8192du_phy_reg_array_pg[PHY_REG_ARRAY_PG_LENGTH];
+extern const u32 rtl8192du_radioa_2tarray[RADIOA_2T_ARRAYLENGTH];
+extern const u32 rtl8192du_radiob_2tarray[RADIOB_2T_ARRAYLENGTH];
+extern const u32 rtl8192du_radioa_2t_int_paarray[RADIOA_2T_INT_PA_ARRAYLENGTH];
+extern const u32 rtl8192du_radiob_2t_int_paarray[RADIOB_2T_INT_PA_ARRAYLENGTH];
+extern const u32 rtl8192du_mac_2tarray[MAC_2T_ARRAYLENGTH];
+extern const u32 rtl8192du_agctab_array[AGCTAB_ARRAYLENGTH];
+extern const u32 rtl8192du_agctab_5garray[AGCTAB_5G_ARRAYLENGTH];
+extern const u32 rtl8192du_agctab_2garray[AGCTAB_2G_ARRAYLENGTH];
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192du/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/trx.c
new file mode 100644
index 000000000000..743ce0cfffe6
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/trx.c
@@ -0,0 +1,372 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2024 Realtek Corporation.*/
+
+#include "../wifi.h"
+#include "../base.h"
+#include "../usb.h"
+#include "../rtl8192d/reg.h"
+#include "../rtl8192d/def.h"
+#include "../rtl8192d/trx_common.h"
+#include "trx.h"
+
+void rtl92du_tx_cleanup(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+}
+
+int rtl92du_tx_post_hdl(struct ieee80211_hw *hw, struct urb *urb,
+ struct sk_buff *skb)
+{
+ return 0;
+}
+
+struct sk_buff *rtl92du_tx_aggregate_hdl(struct ieee80211_hw *hw,
+ struct sk_buff_head *list)
+{
+ return skb_dequeue(list);
+}
+
+static enum rtl_desc_qsel _rtl92du_hwq_to_descq(u16 queue_index)
+{
+ switch (queue_index) {
+ case RTL_TXQ_BCN:
+ return QSLT_BEACON;
+ case RTL_TXQ_MGT:
+ return QSLT_MGNT;
+ case RTL_TXQ_VO:
+ return QSLT_VO;
+ case RTL_TXQ_VI:
+ return QSLT_VI;
+ case RTL_TXQ_BK:
+ return QSLT_BK;
+ default:
+ case RTL_TXQ_BE:
+ return QSLT_BE;
+ }
+}
+
+/* For HW recovery information */
+static void _rtl92du_tx_desc_checksum(__le32 *txdesc)
+{
+ __le16 *ptr = (__le16 *)txdesc;
+ u16 checksum = 0;
+ u32 index;
+
+ /* Clear first */
+ set_tx_desc_tx_desc_checksum(txdesc, 0);
+ for (index = 0; index < 16; index++)
+ checksum = checksum ^ le16_to_cpu(*(ptr + index));
+ set_tx_desc_tx_desc_checksum(txdesc, checksum);
+}
+
+void rtl92du_tx_fill_desc(struct ieee80211_hw *hw,
+ struct ieee80211_hdr *hdr, u8 *pdesc_tx,
+ u8 *pbd_desc_tx, struct ieee80211_tx_info *info,
+ struct ieee80211_sta *sta,
+ struct sk_buff *skb,
+ u8 queue_index,
+ struct rtl_tcb_desc *tcb_desc)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtlpriv);
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ struct rtl_mac *mac = rtl_mac(rtlpriv);
+ struct rtl_sta_info *sta_entry;
+ __le16 fc = hdr->frame_control;
+ u8 agg_state = RTL_AGG_STOP;
+ u16 pktlen = skb->len;
+ u32 rts_en, hw_rts_en;
+ u8 ampdu_density = 0;
+ u16 seq_number;
+ __le32 *txdesc;
+ u8 rate_flag;
+ u8 tid;
+
+ rtl_get_tcb_desc(hw, info, sta, skb, tcb_desc);
+
+ txdesc = (__le32 *)skb_push(skb, RTL_TX_HEADER_SIZE);
+ memset(txdesc, 0, RTL_TX_HEADER_SIZE);
+
+ set_tx_desc_pkt_size(txdesc, pktlen);
+ set_tx_desc_linip(txdesc, 0);
+ set_tx_desc_pkt_offset(txdesc, RTL_DUMMY_OFFSET);
+ set_tx_desc_offset(txdesc, RTL_TX_HEADER_SIZE);
+ /* 5G have no CCK rate */
+ if (rtlhal->current_bandtype == BAND_ON_5G)
+ if (tcb_desc->hw_rate < DESC_RATE6M)
+ tcb_desc->hw_rate = DESC_RATE6M;
+
+ set_tx_desc_tx_rate(txdesc, tcb_desc->hw_rate);
+ if (tcb_desc->use_shortgi || tcb_desc->use_shortpreamble)
+ set_tx_desc_data_shortgi(txdesc, 1);
+
+ if (rtlhal->macphymode == DUALMAC_DUALPHY &&
+ tcb_desc->hw_rate == DESC_RATEMCS7)
+ set_tx_desc_data_shortgi(txdesc, 1);
+
+ if (sta) {
+ sta_entry = (struct rtl_sta_info *)sta->drv_priv;
+ tid = ieee80211_get_tid(hdr);
+ agg_state = sta_entry->tids[tid].agg.agg_state;
+ ampdu_density = sta->deflink.ht_cap.ampdu_density;
+ }
+
+ if (agg_state == RTL_AGG_OPERATIONAL &&
+ info->flags & IEEE80211_TX_CTL_AMPDU) {
+ set_tx_desc_agg_enable(txdesc, 1);
+ set_tx_desc_max_agg_num(txdesc, 0x14);
+ set_tx_desc_ampdu_density(txdesc, ampdu_density);
+ tcb_desc->rts_enable = 1;
+ tcb_desc->rts_rate = DESC_RATE24M;
+ } else {
+ set_tx_desc_agg_break(txdesc, 1);
+ }
+ seq_number = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
+ set_tx_desc_seq(txdesc, seq_number);
+
+ rts_en = tcb_desc->rts_enable && !tcb_desc->cts_enable;
+ hw_rts_en = tcb_desc->rts_enable || tcb_desc->cts_enable;
+ set_tx_desc_rts_enable(txdesc, rts_en);
+ set_tx_desc_hw_rts_enable(txdesc, hw_rts_en);
+ set_tx_desc_cts2self(txdesc, tcb_desc->cts_enable);
+ set_tx_desc_rts_stbc(txdesc, tcb_desc->rts_stbc);
+ /* 5G have no CCK rate */
+ if (rtlhal->current_bandtype == BAND_ON_5G)
+ if (tcb_desc->rts_rate < DESC_RATE6M)
+ tcb_desc->rts_rate = DESC_RATE6M;
+ set_tx_desc_rts_rate(txdesc, tcb_desc->rts_rate);
+ set_tx_desc_rts_bw(txdesc, 0);
+ set_tx_desc_rts_sc(txdesc, tcb_desc->rts_sc);
+ set_tx_desc_rts_short(txdesc, tcb_desc->rts_use_shortpreamble);
+
+ rate_flag = info->control.rates[0].flags;
+ if (mac->bw_40) {
+ if (rate_flag & IEEE80211_TX_RC_DUP_DATA) {
+ set_tx_desc_data_bw(txdesc, 1);
+ set_tx_desc_tx_sub_carrier(txdesc, 3);
+ } else if (rate_flag & IEEE80211_TX_RC_40_MHZ_WIDTH) {
+ set_tx_desc_data_bw(txdesc, 1);
+ set_tx_desc_tx_sub_carrier(txdesc, mac->cur_40_prime_sc);
+ } else {
+ set_tx_desc_data_bw(txdesc, 0);
+ set_tx_desc_tx_sub_carrier(txdesc, 0);
+ }
+ } else {
+ set_tx_desc_data_bw(txdesc, 0);
+ set_tx_desc_tx_sub_carrier(txdesc, 0);
+ }
+
+ if (info->control.hw_key) {
+ struct ieee80211_key_conf *keyconf = info->control.hw_key;
+
+ switch (keyconf->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ case WLAN_CIPHER_SUITE_TKIP:
+ set_tx_desc_sec_type(txdesc, 0x1);
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ set_tx_desc_sec_type(txdesc, 0x3);
+ break;
+ default:
+ set_tx_desc_sec_type(txdesc, 0x0);
+ break;
+ }
+ }
+
+ set_tx_desc_pkt_id(txdesc, 0);
+ set_tx_desc_queue_sel(txdesc, _rtl92du_hwq_to_descq(queue_index));
+ set_tx_desc_data_rate_fb_limit(txdesc, 0x1F);
+ set_tx_desc_rts_rate_fb_limit(txdesc, 0xF);
+ set_tx_desc_disable_fb(txdesc, 0);
+ set_tx_desc_use_rate(txdesc, tcb_desc->use_driver_rate);
+
+ if (ieee80211_is_data_qos(fc)) {
+ if (mac->rdg_en) {
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE,
+ "Enable RDG function\n");
+ set_tx_desc_rdg_enable(txdesc, 1);
+ set_tx_desc_htc(txdesc, 1);
+ }
+ set_tx_desc_qos(txdesc, 1);
+ }
+
+ if (rtlpriv->dm.useramask) {
+ set_tx_desc_rate_id(txdesc, tcb_desc->ratr_index);
+ set_tx_desc_macid(txdesc, tcb_desc->mac_id);
+ } else {
+ set_tx_desc_rate_id(txdesc, 0xC + tcb_desc->ratr_index);
+ set_tx_desc_macid(txdesc, tcb_desc->ratr_index);
+ }
+
+ if (!ieee80211_is_data_qos(fc) && ppsc->leisure_ps &&
+ ppsc->fwctrl_lps) {
+ set_tx_desc_hwseq_en(txdesc, 1);
+ set_tx_desc_pkt_id(txdesc, 8);
+ }
+
+ if (ieee80211_has_morefrags(fc))
+ set_tx_desc_more_frag(txdesc, 1);
+ if (is_multicast_ether_addr(ieee80211_get_DA(hdr)) ||
+ is_broadcast_ether_addr(ieee80211_get_DA(hdr)))
+ set_tx_desc_bmc(txdesc, 1);
+
+ set_tx_desc_own(txdesc, 1);
+ set_tx_desc_last_seg(txdesc, 1);
+ set_tx_desc_first_seg(txdesc, 1);
+ _rtl92du_tx_desc_checksum(txdesc);
+
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE, "==>\n");
+}
+
+static void _rtl92du_config_out_ep(struct ieee80211_hw *hw, u8 num_out_pipe)
+{
+ struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ u16 ep_cfg;
+
+ rtlusb->out_queue_sel = 0;
+ rtlusb->out_ep_nums = 0;
+
+ if (rtlhal->interfaceindex == 0)
+ ep_cfg = rtl_read_word(rtlpriv, REG_USB_Queue_Select_MAC0);
+ else
+ ep_cfg = rtl_read_word(rtlpriv, REG_USB_Queue_Select_MAC1);
+
+ if (ep_cfg & 0x00f) {
+ rtlusb->out_queue_sel |= TX_SELE_HQ;
+ rtlusb->out_ep_nums++;
+ }
+ if (ep_cfg & 0x0f0) {
+ rtlusb->out_queue_sel |= TX_SELE_NQ;
+ rtlusb->out_ep_nums++;
+ }
+ if (ep_cfg & 0xf00) {
+ rtlusb->out_queue_sel |= TX_SELE_LQ;
+ rtlusb->out_ep_nums++;
+ }
+
+ switch (num_out_pipe) {
+ case 3:
+ rtlusb->out_queue_sel = TX_SELE_HQ | TX_SELE_NQ | TX_SELE_LQ;
+ rtlusb->out_ep_nums = 3;
+ break;
+ case 2:
+ rtlusb->out_queue_sel = TX_SELE_HQ | TX_SELE_NQ;
+ rtlusb->out_ep_nums = 2;
+ break;
+ case 1:
+ rtlusb->out_queue_sel = TX_SELE_HQ;
+ rtlusb->out_ep_nums = 1;
+ break;
+ default:
+ break;
+ }
+}
+
+static void _rtl92du_one_out_ep_mapping(struct rtl_usb *rtlusb,
+ struct rtl_ep_map *ep_map)
+{
+ ep_map->ep_mapping[RTL_TXQ_BE] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_BK] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_VI] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_VO] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_MGT] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_BCN] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_HI] = rtlusb->out_eps[0];
+}
+
+static void _rtl92du_two_out_ep_mapping(struct rtl_usb *rtlusb,
+ struct rtl_ep_map *ep_map)
+{
+ ep_map->ep_mapping[RTL_TXQ_BE] = rtlusb->out_eps[1];
+ ep_map->ep_mapping[RTL_TXQ_BK] = rtlusb->out_eps[1];
+ ep_map->ep_mapping[RTL_TXQ_VI] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_VO] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_MGT] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_BCN] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_HI] = rtlusb->out_eps[0];
+}
+
+static void _rtl92du_three_out_ep_mapping(struct rtl_usb *rtlusb,
+ struct rtl_ep_map *ep_map)
+{
+ ep_map->ep_mapping[RTL_TXQ_BE] = rtlusb->out_eps[2];
+ ep_map->ep_mapping[RTL_TXQ_BK] = rtlusb->out_eps[2];
+ ep_map->ep_mapping[RTL_TXQ_VI] = rtlusb->out_eps[1];
+ ep_map->ep_mapping[RTL_TXQ_VO] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_MGT] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_BCN] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_HI] = rtlusb->out_eps[0];
+}
+
+static int _rtl92du_out_ep_mapping(struct ieee80211_hw *hw)
+{
+ struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
+ struct rtl_ep_map *ep_map = &rtlusb->ep_map;
+
+ switch (rtlusb->out_ep_nums) {
+ case 1:
+ _rtl92du_one_out_ep_mapping(rtlusb, ep_map);
+ break;
+ case 2:
+ _rtl92du_two_out_ep_mapping(rtlusb, ep_map);
+ break;
+ case 3:
+ _rtl92du_three_out_ep_mapping(rtlusb, ep_map);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int rtl92du_endpoint_mapping(struct ieee80211_hw *hw)
+{
+ struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
+
+ _rtl92du_config_out_ep(hw, rtlusb->out_ep_nums);
+
+ /* Normal chip with one IN and one OUT doesn't have interrupt IN EP. */
+ if (rtlusb->out_ep_nums == 1 && rtlusb->in_ep_nums != 1)
+ return -EINVAL;
+
+ return _rtl92du_out_ep_mapping(hw);
+}
+
+u16 rtl92du_mq_to_hwq(__le16 fc, u16 mac80211_queue_index)
+{
+ u16 hw_queue_index;
+
+ if (unlikely(ieee80211_is_beacon(fc))) {
+ hw_queue_index = RTL_TXQ_BCN;
+ goto out;
+ }
+ if (ieee80211_is_mgmt(fc)) {
+ hw_queue_index = RTL_TXQ_MGT;
+ goto out;
+ }
+
+ switch (mac80211_queue_index) {
+ case 0:
+ hw_queue_index = RTL_TXQ_VO;
+ break;
+ case 1:
+ hw_queue_index = RTL_TXQ_VI;
+ break;
+ case 2:
+ hw_queue_index = RTL_TXQ_BE;
+ break;
+ case 3:
+ hw_queue_index = RTL_TXQ_BK;
+ break;
+ default:
+ hw_queue_index = RTL_TXQ_BE;
+ WARN_ONCE(true, "rtl8192du: QSLT_BE queue, skb_queue:%d\n",
+ mac80211_queue_index);
+ break;
+ }
+out:
+ return hw_queue_index;
+}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192du/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/trx.h
new file mode 100644
index 000000000000..8c3d24622fa7
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/trx.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2024 Realtek Corporation.*/
+
+#ifndef __RTL92DU_TRX_H__
+#define __RTL92DU_TRX_H__
+
+#define TX_SELE_HQ BIT(0) /* High Queue */
+#define TX_SELE_LQ BIT(1) /* Low Queue */
+#define TX_SELE_NQ BIT(2) /* Normal Queue */
+
+#define TX_TOTAL_PAGE_NUMBER_92DU 0xF8
+#define TEST_PAGE_NUM_PUBQ_92DU 0x89
+#define TX_TOTAL_PAGE_NUMBER_92D_DUAL_MAC 0x7A
+#define NORMAL_PAGE_NUM_PUBQ_92D_DUAL_MAC 0x5A
+#define NORMAL_PAGE_NUM_HPQ_92D_DUAL_MAC 0x10
+#define NORMAL_PAGE_NUM_LPQ_92D_DUAL_MAC 0x10
+#define NORMAL_PAGE_NUM_NORMALQ_92D_DUAL_MAC 0
+
+#define WMM_NORMAL_TX_TOTAL_PAGE_NUMBER 0xF5
+
+#define WMM_NORMAL_PAGE_NUM_PUBQ_92D 0x65
+#define WMM_NORMAL_PAGE_NUM_HPQ_92D 0x30
+#define WMM_NORMAL_PAGE_NUM_LPQ_92D 0x30
+#define WMM_NORMAL_PAGE_NUM_NPQ_92D 0x30
+
+#define WMM_NORMAL_PAGE_NUM_PUBQ_92D_DUAL_MAC 0x32
+#define WMM_NORMAL_PAGE_NUM_HPQ_92D_DUAL_MAC 0x18
+#define WMM_NORMAL_PAGE_NUM_LPQ_92D_DUAL_MAC 0x18
+#define WMM_NORMAL_PAGE_NUM_NPQ_92D_DUAL_MAC 0x18
+
+static inline void set_tx_desc_bmc(__le32 *__txdesc, u32 __value)
+{
+ le32p_replace_bits(__txdesc, __value, BIT(24));
+}
+
+static inline void set_tx_desc_agg_break(__le32 *__txdesc, u32 __value)
+{
+ le32p_replace_bits((__txdesc + 1), __value, BIT(6));
+}
+
+static inline void set_tx_desc_tx_desc_checksum(__le32 *__txdesc, u32 __value)
+{
+ le32p_replace_bits((__txdesc + 7), __value, GENMASK(15, 0));
+}
+
+void rtl92du_tx_fill_desc(struct ieee80211_hw *hw,
+ struct ieee80211_hdr *hdr, u8 *pdesc,
+ u8 *pbd_desc_tx, struct ieee80211_tx_info *info,
+ struct ieee80211_sta *sta,
+ struct sk_buff *skb, u8 hw_queue,
+ struct rtl_tcb_desc *ptcb_desc);
+int rtl92du_endpoint_mapping(struct ieee80211_hw *hw);
+u16 rtl92du_mq_to_hwq(__le16 fc, u16 mac80211_queue_index);
+struct sk_buff *rtl92du_tx_aggregate_hdl(struct ieee80211_hw *hw,
+ struct sk_buff_head *list);
+void rtl92du_tx_cleanup(struct ieee80211_hw *hw, struct sk_buff *skb);
+int rtl92du_tx_post_hdl(struct ieee80211_hw *hw, struct urb *urb,
+ struct sk_buff *skb);
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c
index 7bde20fdbeab..162e734d5b08 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c
@@ -31,7 +31,7 @@ static void rtl92ee_init_aspm_vars(struct ieee80211_hw *hw)
* 2 - Enable ASPM with Clock Req,
* 3 - Alwyas Enable ASPM with Clock Req,
* 4 - Always Enable ASPM without Clock Req.
- * set defult to RTL8192CE:3 RTL8192E:2
+ * set default to RTL8192CE:3 RTL8192E:2
*/
rtlpci->const_pci_aspm = 3;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
index 675bdd32feb1..bbf8ff63dced 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
@@ -27,7 +27,7 @@ static void rtl92s_init_aspm_vars(struct ieee80211_hw *hw)
* 2 - Enable ASPM with Clock Req,
* 3 - Alwyas Enable ASPM with Clock Req,
* 4 - Always Enable ASPM without Clock Req.
- * set defult to RTL8192CE:3 RTL8192E:2
+ * set default to RTL8192CE:3 RTL8192E:2
* */
rtlpci->const_pci_aspm = 2;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
index dd7505e2f22c..1b144fbd4d26 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
@@ -33,7 +33,7 @@ static void rtl8723e_init_aspm_vars(struct ieee80211_hw *hw)
* 2 - Enable ASPM with Clock Req,
* 3 - Alwyas Enable ASPM with Clock Req,
* 4 - Always Enable ASPM without Clock Req.
- * set defult to RTL8192CE:3 RTL8192E:2
+ * set default to RTL8192CE:3 RTL8192E:2
*/
rtlpci->const_pci_aspm = 3;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
index 162c34f0e9b7..0a92d0325098 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
@@ -32,7 +32,7 @@ static void rtl8723be_init_aspm_vars(struct ieee80211_hw *hw)
* 2 - Enable ASPM with Clock Req,
* 3 - Alwyas Enable ASPM with Clock Req,
* 4 - Always Enable ASPM without Clock Req.
- * set defult to RTL8192CE:3 RTL8192E:2
+ * set default to RTL8192CE:3 RTL8192E:2
*/
rtlpci->const_pci_aspm = 3;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
index 7b911695db33..a65503c5ae5a 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
@@ -30,7 +30,7 @@ static void rtl8821ae_init_aspm_vars(struct ieee80211_hw *hw)
* 2 - Enable ASPM with Clock Req,
* 3 - Alwyas Enable ASPM with Clock Req,
* 4 - Always Enable ASPM without Clock Req.
- * set defult to RTL8192CE:3 RTL8192E:2
+ * set default to RTL8192CE:3 RTL8192E:2
*/
rtlpci->const_pci_aspm = 3;
diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c
index 2ea72d9e3957..d37a017b2b81 100644
--- a/drivers/net/wireless/realtek/rtlwifi/usb.c
+++ b/drivers/net/wireless/realtek/rtlwifi/usb.c
@@ -23,6 +23,8 @@ MODULE_DESCRIPTION("USB basic driver for rtlwifi");
#define MAX_USBCTRL_VENDORREQ_TIMES 10
+static void _rtl_usb_cleanup_tx(struct ieee80211_hw *hw);
+
static void _usbctrl_vendorreq_sync(struct usb_device *udev, u8 reqtype,
u16 value, void *pdata, u16 len)
{
@@ -285,9 +287,23 @@ static int _rtl_usb_init(struct ieee80211_hw *hw)
}
/* usb endpoint mapping */
err = rtlpriv->cfg->usb_interface_cfg->usb_endpoint_mapping(hw);
- rtlusb->usb_mq_to_hwq = rtlpriv->cfg->usb_interface_cfg->usb_mq_to_hwq;
- _rtl_usb_init_tx(hw);
- _rtl_usb_init_rx(hw);
+ if (err)
+ return err;
+
+ rtlusb->usb_mq_to_hwq = rtlpriv->cfg->usb_interface_cfg->usb_mq_to_hwq;
+
+ err = _rtl_usb_init_tx(hw);
+ if (err)
+ return err;
+
+ err = _rtl_usb_init_rx(hw);
+ if (err)
+ goto err_out;
+
+ return 0;
+
+err_out:
+ _rtl_usb_cleanup_tx(hw);
return err;
}
@@ -691,17 +707,13 @@ static int rtl_usb_start(struct ieee80211_hw *hw)
}
/*======================= tx =========================================*/
-static void rtl_usb_cleanup(struct ieee80211_hw *hw)
+static void _rtl_usb_cleanup_tx(struct ieee80211_hw *hw)
{
u32 i;
struct sk_buff *_skb;
struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
struct ieee80211_tx_info *txinfo;
- /* clean up rx stuff. */
- _rtl_usb_cleanup_rx(hw);
-
- /* clean up tx stuff */
for (i = 0; i < RTL_USB_MAX_EP_NUM; i++) {
while ((_skb = skb_dequeue(&rtlusb->tx_skb_queue[i]))) {
rtlusb->usb_tx_cleanup(hw, _skb);
@@ -715,6 +727,12 @@ static void rtl_usb_cleanup(struct ieee80211_hw *hw)
usb_kill_anchored_urbs(&rtlusb->tx_submitted);
}
+static void rtl_usb_cleanup(struct ieee80211_hw *hw)
+{
+ _rtl_usb_cleanup_rx(hw);
+ _rtl_usb_cleanup_tx(hw);
+}
+
/* We may add some struct into struct rtl_usb later. Do deinit here. */
static void rtl_usb_deinit(struct ieee80211_hw *hw)
{
@@ -937,7 +955,7 @@ static const struct rtl_intf_ops rtl_usb_ops = {
int rtl_usb_probe(struct usb_interface *intf,
const struct usb_device_id *id,
- struct rtl_hal_cfg *rtl_hal_cfg)
+ const struct rtl_hal_cfg *rtl_hal_cfg)
{
int err;
struct ieee80211_hw *hw = NULL;
diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.h b/drivers/net/wireless/realtek/rtlwifi/usb.h
index 12529afc0510..b66d6f9ae564 100644
--- a/drivers/net/wireless/realtek/rtlwifi/usb.h
+++ b/drivers/net/wireless/realtek/rtlwifi/usb.h
@@ -136,7 +136,7 @@ struct rtl_usb_priv {
int rtl_usb_probe(struct usb_interface *intf,
const struct usb_device_id *id,
- struct rtl_hal_cfg *rtl92cu_hal_cfg);
+ const struct rtl_hal_cfg *rtl92cu_hal_cfg);
void rtl_usb_disconnect(struct usb_interface *intf);
int rtl_usb_suspend(struct usb_interface *pusb_intf, pm_message_t message);
int rtl_usb_resume(struct usb_interface *pusb_intf);
diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h
index 442419568734..ae6e351bc83c 100644
--- a/drivers/net/wireless/realtek/rtlwifi/wifi.h
+++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h
@@ -2356,9 +2356,9 @@ struct rtl_hal_cfg {
bool write_readback;
char *name;
char *alt_fw_name;
- struct rtl_hal_ops *ops;
+ const struct rtl_hal_ops *ops;
struct rtl_mod_params *mod_params;
- struct rtl_hal_usbint_cfg *usb_interface_cfg;
+ const struct rtl_hal_usbint_cfg *usb_interface_cfg;
enum rtl_spec_ver spec_ver;
/*this map used for some registers or vars
@@ -2707,7 +2707,7 @@ struct rtl_priv {
/* hal_cfg : for diff cards
* intf_ops : for diff interrface usb/pcie
*/
- struct rtl_hal_cfg *cfg;
+ const struct rtl_hal_cfg *cfg;
const struct rtl_intf_ops *intf_ops;
/* this var will be set by set_bit,
@@ -2746,6 +2746,12 @@ struct rtl_priv {
*/
bool use_new_trx_flow;
+ /* For dual MAC RTL8192DU, things shared by the 2 USB interfaces */
+ u32 *curveindex_2g;
+ u32 *curveindex_5g;
+ struct mutex *mutex_for_power_on_off; /* for power on/off */
+ struct mutex *mutex_for_hw_init; /* for hardware init */
+
#ifdef CONFIG_PM
struct wiphy_wowlan_support wowlan;
#endif
diff --git a/drivers/net/wireless/realtek/rtw88/mac.c b/drivers/net/wireless/realtek/rtw88/mac.c
index 0dba8aae7716..564f5988ee82 100644
--- a/drivers/net/wireless/realtek/rtw88/mac.c
+++ b/drivers/net/wireless/realtek/rtw88/mac.c
@@ -1201,6 +1201,15 @@ static int __priority_queue_cfg(struct rtw_dev *rtwdev,
rtw_write16(rtwdev, REG_FIFOPAGE_CTRL_2 + 2, fifo->rsvd_boundary);
rtw_write16(rtwdev, REG_BCNQ1_BDNY_V1, fifo->rsvd_boundary);
rtw_write32(rtwdev, REG_RXFF_BNDY, chip->rxff_size - C2H_PKT_BUF - 1);
+
+ if (rtwdev->hci.type == RTW_HCI_TYPE_USB) {
+ rtw_write8_mask(rtwdev, REG_AUTO_LLT_V1, BIT_MASK_BLK_DESC_NUM,
+ chip->usb_tx_agg_desc_num);
+
+ rtw_write8(rtwdev, REG_AUTO_LLT_V1 + 3, chip->usb_tx_agg_desc_num);
+ rtw_write8_set(rtwdev, REG_TXDMA_OFFSET_CHK + 1, BIT(1));
+ }
+
rtw_write8_set(rtwdev, REG_AUTO_LLT_V1, BIT_AUTO_INIT_LLT_V1);
if (!check_hw_ready(rtwdev, REG_AUTO_LLT_V1, BIT_AUTO_INIT_LLT_V1, 0))
diff --git a/drivers/net/wireless/realtek/rtw88/mac80211.c b/drivers/net/wireless/realtek/rtw88/mac80211.c
index 0acebbfa13c4..63326b352738 100644
--- a/drivers/net/wireless/realtek/rtw88/mac80211.c
+++ b/drivers/net/wireless/realtek/rtw88/mac80211.c
@@ -62,7 +62,7 @@ static int rtw_ops_start(struct ieee80211_hw *hw)
return ret;
}
-static void rtw_ops_stop(struct ieee80211_hw *hw)
+static void rtw_ops_stop(struct ieee80211_hw *hw, bool suspend)
{
struct rtw_dev *rtwdev = hw->priv;
diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h
index 49894331f7b4..49a3fd4fb7dc 100644
--- a/drivers/net/wireless/realtek/rtw88/main.h
+++ b/drivers/net/wireless/realtek/rtw88/main.h
@@ -1197,6 +1197,8 @@ struct rtw_chip_info {
u16 fw_fifo_addr[RTW_FW_FIFO_MAX];
const struct rtw_fwcd_segs *fwcd_segs;
+ u8 usb_tx_agg_desc_num;
+
u8 default_1ss_tx_path;
bool path_div_supported;
diff --git a/drivers/net/wireless/realtek/rtw88/pci.c b/drivers/net/wireless/realtek/rtw88/pci.c
index 30232f7e3ec5..a5b9d6c7be37 100644
--- a/drivers/net/wireless/realtek/rtw88/pci.c
+++ b/drivers/net/wireless/realtek/rtw88/pci.c
@@ -1682,12 +1682,16 @@ static int rtw_pci_napi_poll(struct napi_struct *napi, int budget)
return work_done;
}
-static void rtw_pci_napi_init(struct rtw_dev *rtwdev)
+static int rtw_pci_napi_init(struct rtw_dev *rtwdev)
{
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
- init_dummy_netdev(&rtwpci->netdev);
- netif_napi_add(&rtwpci->netdev, &rtwpci->napi, rtw_pci_napi_poll);
+ rtwpci->netdev = alloc_netdev_dummy(0);
+ if (!rtwpci->netdev)
+ return -ENOMEM;
+
+ netif_napi_add(rtwpci->netdev, &rtwpci->napi, rtw_pci_napi_poll);
+ return 0;
}
static void rtw_pci_napi_deinit(struct rtw_dev *rtwdev)
@@ -1696,6 +1700,7 @@ static void rtw_pci_napi_deinit(struct rtw_dev *rtwdev)
rtw_pci_napi_stop(rtwdev);
netif_napi_del(&rtwpci->napi);
+ free_netdev(rtwpci->netdev);
}
int rtw_pci_probe(struct pci_dev *pdev,
@@ -1745,7 +1750,11 @@ int rtw_pci_probe(struct pci_dev *pdev,
goto err_pci_declaim;
}
- rtw_pci_napi_init(rtwdev);
+ ret = rtw_pci_napi_init(rtwdev);
+ if (ret) {
+ rtw_err(rtwdev, "failed to setup NAPI\n");
+ goto err_pci_declaim;
+ }
ret = rtw_chip_info_setup(rtwdev);
if (ret) {
diff --git a/drivers/net/wireless/realtek/rtw88/pci.h b/drivers/net/wireless/realtek/rtw88/pci.h
index 0c37efd8c66f..13988db1cb4c 100644
--- a/drivers/net/wireless/realtek/rtw88/pci.h
+++ b/drivers/net/wireless/realtek/rtw88/pci.h
@@ -215,7 +215,7 @@ struct rtw_pci {
bool running;
/* napi structure */
- struct net_device netdev;
+ struct net_device *netdev;
struct napi_struct napi;
u16 rx_tag;
diff --git a/drivers/net/wireless/realtek/rtw88/reg.h b/drivers/net/wireless/realtek/rtw88/reg.h
index b122f226924b..02ef9a77316b 100644
--- a/drivers/net/wireless/realtek/rtw88/reg.h
+++ b/drivers/net/wireless/realtek/rtw88/reg.h
@@ -270,6 +270,7 @@
#define BIT_MASK_BCN_HEAD_1_V1 0xfff
#define REG_AUTO_LLT_V1 0x0208
#define BIT_AUTO_INIT_LLT_V1 BIT(0)
+#define BIT_MASK_BLK_DESC_NUM GENMASK(7, 4)
#define REG_DWBCN0_CTRL 0x0208
#define BIT_BCN_VALID BIT(16)
#define REG_TXDMA_OFFSET_CHK 0x020C
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8703b.c b/drivers/net/wireless/realtek/rtw88/rtw8703b.c
index 8919f9e11f03..222608de33cd 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8703b.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8703b.c
@@ -2013,6 +2013,7 @@ const struct rtw_chip_info rtw8703b_hw_spec = {
.tx_stbc = false,
.max_power_index = 0x3f,
.ampdu_density = IEEE80211_HT_MPDU_DENSITY_16,
+ .usb_tx_agg_desc_num = 1, /* Not sure if this chip has USB interface */
.path_div_supported = false,
.ht_supported = true,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723d.c b/drivers/net/wireless/realtek/rtw88/rtw8723d.c
index f8df4c84d39f..3fba4054d45f 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8723d.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8723d.c
@@ -2171,6 +2171,7 @@ const struct rtw_chip_info rtw8723d_hw_spec = {
.band = RTW_BAND_2G,
.page_size = TX_PAGE_SIZE,
.dig_min = 0x20,
+ .usb_tx_agg_desc_num = 1,
.ht_supported = true,
.vht_supported = false,
.lps_deep_mode_supported = 0,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c.c b/drivers/net/wireless/realtek/rtw88/rtw8821c.c
index fe5d8e188350..526e8de77b3e 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8821c.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8821c.c
@@ -2008,6 +2008,7 @@ const struct rtw_chip_info rtw8821c_hw_spec = {
.band = RTW_BAND_2G | RTW_BAND_5G,
.page_size = TX_PAGE_SIZE,
.dig_min = 0x1c,
+ .usb_tx_agg_desc_num = 3,
.ht_supported = true,
.vht_supported = true,
.lps_deep_mode_supported = BIT(LPS_DEEP_MODE_LCLK),
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822b.c b/drivers/net/wireless/realtek/rtw88/rtw8822b.c
index 3017a9760da8..2456ff242818 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822b.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822b.c
@@ -2548,6 +2548,7 @@ const struct rtw_chip_info rtw8822b_hw_spec = {
.band = RTW_BAND_2G | RTW_BAND_5G,
.page_size = TX_PAGE_SIZE,
.dig_min = 0x1c,
+ .usb_tx_agg_desc_num = 3,
.ht_supported = true,
.vht_supported = true,
.lps_deep_mode_supported = BIT(LPS_DEEP_MODE_LCLK),
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.c b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
index cd965edc29ce..62376d1cca22 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822c.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
@@ -5366,6 +5366,7 @@ const struct rtw_chip_info rtw8822c_hw_spec = {
.band = RTW_BAND_2G | RTW_BAND_5G,
.page_size = TX_PAGE_SIZE,
.dig_min = 0x20,
+ .usb_tx_agg_desc_num = 3,
.default_1ss_tx_path = BB_PATH_A,
.path_div_supported = true,
.ht_supported = true,
diff --git a/drivers/net/wireless/realtek/rtw88/usb.c b/drivers/net/wireless/realtek/rtw88/usb.c
index a0188511099a..a55ca5a24227 100644
--- a/drivers/net/wireless/realtek/rtw88/usb.c
+++ b/drivers/net/wireless/realtek/rtw88/usb.c
@@ -273,6 +273,8 @@ static void rtw_usb_write_port_tx_complete(struct urb *urb)
info = IEEE80211_SKB_CB(skb);
tx_data = rtw_usb_get_tx_data(skb);
+ skb_pull(skb, rtwdev->chip->tx_pkt_desc_sz);
+
/* enqueue to wait for tx report */
if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) {
rtw_tx_report_enqueue(rtwdev, skb, tx_data->sn);
@@ -377,7 +379,9 @@ static bool rtw_usb_tx_agg_skb(struct rtw_usb *rtwusb, struct sk_buff_head *list
skb_iter = skb_peek(list);
- if (skb_iter && skb_iter->len + skb_head->len <= RTW_USB_MAX_XMITBUF_SZ)
+ if (skb_iter &&
+ skb_iter->len + skb_head->len <= RTW_USB_MAX_XMITBUF_SZ &&
+ agg_num < rtwdev->chip->usb_tx_agg_desc_num)
__skb_unlink(skb_iter, list);
else
skb_iter = NULL;
@@ -433,23 +437,21 @@ static int rtw_usb_write_data(struct rtw_dev *rtwdev,
{
const struct rtw_chip_info *chip = rtwdev->chip;
struct sk_buff *skb;
- unsigned int desclen, headsize, size;
+ unsigned int size;
u8 qsel;
int ret = 0;
size = pkt_info->tx_pkt_size;
qsel = pkt_info->qsel;
- desclen = chip->tx_pkt_desc_sz;
- headsize = pkt_info->offset ? pkt_info->offset : desclen;
- skb = dev_alloc_skb(headsize + size);
+ skb = dev_alloc_skb(chip->tx_pkt_desc_sz + size);
if (unlikely(!skb))
return -ENOMEM;
- skb_reserve(skb, headsize);
+ skb_reserve(skb, chip->tx_pkt_desc_sz);
skb_put_data(skb, buf, size);
- skb_push(skb, headsize);
- memset(skb->data, 0, headsize);
+ skb_push(skb, chip->tx_pkt_desc_sz);
+ memset(skb->data, 0, chip->tx_pkt_desc_sz);
rtw_tx_fill_tx_desc(pkt_info, skb);
rtw_tx_fill_txdesc_checksum(rtwdev, pkt_info, skb->data);
@@ -740,7 +742,6 @@ static struct rtw_hci_ops rtw_usb_ops = {
static int rtw_usb_init_rx(struct rtw_dev *rtwdev)
{
struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev);
- int i;
rtwusb->rxwq = create_singlethread_workqueue("rtw88_usb: rx wq");
if (!rtwusb->rxwq) {
@@ -752,13 +753,19 @@ static int rtw_usb_init_rx(struct rtw_dev *rtwdev)
INIT_WORK(&rtwusb->rx_work, rtw_usb_rx_handler);
+ return 0;
+}
+
+static void rtw_usb_setup_rx(struct rtw_dev *rtwdev)
+{
+ struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev);
+ int i;
+
for (i = 0; i < RTW_USB_RXCB_NUM; i++) {
struct rx_usb_ctrl_block *rxcb = &rtwusb->rx_cb[i];
rtw_usb_rx_resubmit(rtwusb, rxcb);
}
-
- return 0;
}
static void rtw_usb_deinit_rx(struct rtw_dev *rtwdev)
@@ -895,6 +902,8 @@ int rtw_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
goto err_destroy_rxwq;
}
+ rtw_usb_setup_rx(rtwdev);
+
return 0;
err_destroy_rxwq:
diff --git a/drivers/net/wireless/realtek/rtw89/Kconfig b/drivers/net/wireless/realtek/rtw89/Kconfig
index eaea4eaeb361..3c9f864805b1 100644
--- a/drivers/net/wireless/realtek/rtw89/Kconfig
+++ b/drivers/net/wireless/realtek/rtw89/Kconfig
@@ -22,6 +22,9 @@ config RTW89_8851B
config RTW89_8852A
tristate
+config RTW89_8852B_COMMON
+ tristate
+
config RTW89_8852B
tristate
@@ -59,6 +62,7 @@ config RTW89_8852BE
select RTW89_CORE
select RTW89_PCI
select RTW89_8852B
+ select RTW89_8852B_COMMON
help
Select this option will enable support for 8852BE chipset
diff --git a/drivers/net/wireless/realtek/rtw89/Makefile b/drivers/net/wireless/realtek/rtw89/Makefile
index 86a553fb0136..1f1050a7a89d 100644
--- a/drivers/net/wireless/realtek/rtw89/Makefile
+++ b/drivers/net/wireless/realtek/rtw89/Makefile
@@ -17,7 +17,8 @@ rtw89_core-y += core.o \
ps.o \
chan.o \
ser.o \
- acpi.o
+ acpi.o \
+ util.o
rtw89_core-$(CONFIG_PM) += wow.o
@@ -39,6 +40,9 @@ rtw89_8852a-objs := rtw8852a.o \
obj-$(CONFIG_RTW89_8852AE) += rtw89_8852ae.o
rtw89_8852ae-objs := rtw8852ae.o
+obj-$(CONFIG_RTW89_8852B_COMMON) += rtw89_8852b_common.o
+rtw89_8852b_common-objs := rtw8852b_common.o
+
obj-$(CONFIG_RTW89_8852B) += rtw89_8852b.o
rtw89_8852b-objs := rtw8852b.o \
rtw8852b_table.o \
diff --git a/drivers/net/wireless/realtek/rtw89/cam.c b/drivers/net/wireless/realtek/rtw89/cam.c
index 1864f543a6c6..4557c6e035a9 100644
--- a/drivers/net/wireless/realtek/rtw89/cam.c
+++ b/drivers/net/wireless/realtek/rtw89/cam.c
@@ -211,6 +211,46 @@ static int rtw89_cam_get_addr_cam_key_idx(struct rtw89_addr_cam_entry *addr_cam,
return 0;
}
+static int rtw89_cam_detach_sec_cam(struct rtw89_dev *rtwdev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ const struct rtw89_sec_cam_entry *sec_cam,
+ bool inform_fw)
+{
+ struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta);
+ struct rtw89_vif *rtwvif;
+ struct rtw89_addr_cam_entry *addr_cam;
+ unsigned int i;
+ int ret = 0;
+
+ if (!vif) {
+ rtw89_err(rtwdev, "No iface for deleting sec cam\n");
+ return -EINVAL;
+ }
+
+ rtwvif = (struct rtw89_vif *)vif->drv_priv;
+ addr_cam = rtw89_get_addr_cam_of(rtwvif, rtwsta);
+
+ for_each_set_bit(i, addr_cam->sec_cam_map, RTW89_SEC_CAM_IN_ADDR_CAM) {
+ if (addr_cam->sec_ent[i] != sec_cam->sec_cam_idx)
+ continue;
+
+ clear_bit(i, addr_cam->sec_cam_map);
+ }
+
+ if (inform_fw) {
+ ret = rtw89_chip_h2c_dctl_sec_cam(rtwdev, rtwvif, rtwsta);
+ if (ret)
+ rtw89_err(rtwdev,
+ "failed to update dctl cam del key: %d\n", ret);
+ ret = rtw89_fw_h2c_cam(rtwdev, rtwvif, rtwsta, NULL);
+ if (ret)
+ rtw89_err(rtwdev, "failed to update cam del key: %d\n", ret);
+ }
+
+ return ret;
+}
+
static int rtw89_cam_attach_sec_cam(struct rtw89_dev *rtwdev,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
@@ -242,10 +282,8 @@ static int rtw89_cam_attach_sec_cam(struct rtw89_dev *rtwdev,
return ret;
}
- key->hw_key_idx = key_idx;
addr_cam->sec_ent_keyid[key_idx] = key->keyidx;
addr_cam->sec_ent[key_idx] = sec_cam->sec_cam_idx;
- addr_cam->sec_entries[key_idx] = sec_cam;
set_bit(key_idx, addr_cam->sec_cam_map);
ret = rtw89_chip_h2c_dctl_sec_cam(rtwdev, rtwvif, rtwsta);
if (ret) {
@@ -258,7 +296,6 @@ static int rtw89_cam_attach_sec_cam(struct rtw89_dev *rtwdev,
rtw89_err(rtwdev, "failed to update addr cam sec entry: %d\n",
ret);
clear_bit(key_idx, addr_cam->sec_cam_map);
- addr_cam->sec_entries[key_idx] = NULL;
return ret;
}
@@ -295,6 +332,9 @@ static int rtw89_cam_sec_key_install(struct rtw89_dev *rtwdev,
goto err_release_cam;
}
+ key->hw_key_idx = sec_cam_idx;
+ cam_info->sec_entries[sec_cam_idx] = sec_cam;
+
sec_cam->sec_cam_idx = sec_cam_idx;
sec_cam->type = hw_key_type;
sec_cam->len = RTW89_SEC_CAM_LEN;
@@ -316,6 +356,7 @@ static int rtw89_cam_sec_key_install(struct rtw89_dev *rtwdev,
return 0;
err_release_cam:
+ cam_info->sec_entries[sec_cam_idx] = NULL;
kfree(sec_cam);
clear_bit(sec_cam_idx, cam_info->sec_cam_map);
if (ext_key)
@@ -386,42 +427,22 @@ int rtw89_cam_sec_key_del(struct rtw89_dev *rtwdev,
struct ieee80211_key_conf *key,
bool inform_fw)
{
- struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta);
struct rtw89_cam_info *cam_info = &rtwdev->cam_info;
- struct rtw89_vif *rtwvif;
- struct rtw89_addr_cam_entry *addr_cam;
- struct rtw89_sec_cam_entry *sec_cam;
- u8 key_idx = key->hw_key_idx;
+ const struct rtw89_sec_cam_entry *sec_cam;
u8 sec_cam_idx;
- int ret = 0;
-
- if (!vif) {
- rtw89_err(rtwdev, "No iface for deleting sec cam\n");
- return -EINVAL;
- }
+ int ret;
- rtwvif = (struct rtw89_vif *)vif->drv_priv;
- addr_cam = rtw89_get_addr_cam_of(rtwvif, rtwsta);
- sec_cam = addr_cam->sec_entries[key_idx];
+ sec_cam_idx = key->hw_key_idx;
+ sec_cam = cam_info->sec_entries[sec_cam_idx];
if (!sec_cam)
return -EINVAL;
- /* detach sec cam from addr cam */
- clear_bit(key_idx, addr_cam->sec_cam_map);
- addr_cam->sec_entries[key_idx] = NULL;
- if (inform_fw) {
- ret = rtw89_chip_h2c_dctl_sec_cam(rtwdev, rtwvif, rtwsta);
- if (ret)
- rtw89_err(rtwdev, "failed to update dctl cam del key: %d\n", ret);
- ret = rtw89_fw_h2c_cam(rtwdev, rtwvif, rtwsta, NULL);
- if (ret)
- rtw89_err(rtwdev, "failed to update cam del key: %d\n", ret);
- }
+ ret = rtw89_cam_detach_sec_cam(rtwdev, vif, sta, sec_cam, inform_fw);
/* clear valid bit in addr cam will disable sec cam,
* so we don't need to send H2C command again
*/
- sec_cam_idx = sec_cam->sec_cam_idx;
+ cam_info->sec_entries[sec_cam_idx] = NULL;
clear_bit(sec_cam_idx, cam_info->sec_cam_map);
if (sec_cam->ext_key)
clear_bit(sec_cam_idx + 1, cam_info->sec_cam_map);
@@ -502,6 +523,7 @@ static u8 rtw89_get_addr_cam_entry_size(struct rtw89_dev *rtwdev)
case RTL8852A:
case RTL8852B:
case RTL8851B:
+ case RTL8852BT:
return ADDR_CAM_ENT_SIZE;
default:
return ADDR_CAM_ENT_SHORT_SIZE;
diff --git a/drivers/net/wireless/realtek/rtw89/chan.c b/drivers/net/wireless/realtek/rtw89/chan.c
index 051a3cad6101..7f90d93dcdc0 100644
--- a/drivers/net/wireless/realtek/rtw89/chan.c
+++ b/drivers/net/wireless/realtek/rtw89/chan.c
@@ -141,6 +141,28 @@ bool rtw89_assign_entity_chan(struct rtw89_dev *rtwdev,
return band_changed;
}
+int rtw89_iterate_entity_chan(struct rtw89_dev *rtwdev,
+ int (*iterator)(const struct rtw89_chan *chan,
+ void *data),
+ void *data)
+{
+ struct rtw89_hal *hal = &rtwdev->hal;
+ const struct rtw89_chan *chan;
+ int ret;
+ u8 idx;
+
+ lockdep_assert_held(&rtwdev->mutex);
+
+ for_each_set_bit(idx, hal->entity_map, NUM_OF_RTW89_SUB_ENTITY) {
+ chan = rtw89_chan_get(rtwdev, idx);
+ ret = iterator(chan, data);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static void __rtw89_config_entity_chandef(struct rtw89_dev *rtwdev,
enum rtw89_sub_entity_idx idx,
const struct cfg80211_chan_def *chandef,
@@ -2322,7 +2344,6 @@ static void rtw89_swap_sub_entity(struct rtw89_dev *rtwdev,
enum rtw89_sub_entity_idx idx2)
{
struct rtw89_hal *hal = &rtwdev->hal;
- struct rtw89_sub_entity tmp;
struct rtw89_vif *rtwvif;
u8 cur;
@@ -2332,9 +2353,7 @@ static void rtw89_swap_sub_entity(struct rtw89_dev *rtwdev,
hal->sub[idx1].cfg->idx = idx2;
hal->sub[idx2].cfg->idx = idx1;
- tmp = hal->sub[idx1];
- hal->sub[idx1] = hal->sub[idx2];
- hal->sub[idx2] = tmp;
+ swap(hal->sub[idx1], hal->sub[idx2]);
rtw89_for_each_rtwvif(rtwdev, rtwvif) {
if (!rtwvif->chanctx_assigned)
diff --git a/drivers/net/wireless/realtek/rtw89/chan.h b/drivers/net/wireless/realtek/rtw89/chan.h
index ffa412f281f3..5278ff8c513b 100644
--- a/drivers/net/wireless/realtek/rtw89/chan.h
+++ b/drivers/net/wireless/realtek/rtw89/chan.h
@@ -78,6 +78,10 @@ void rtw89_chan_create(struct rtw89_chan *chan, u8 center_chan, u8 primary_chan,
bool rtw89_assign_entity_chan(struct rtw89_dev *rtwdev,
enum rtw89_sub_entity_idx idx,
const struct rtw89_chan *new);
+int rtw89_iterate_entity_chan(struct rtw89_dev *rtwdev,
+ int (*iterator)(const struct rtw89_chan *chan,
+ void *data),
+ void *data);
void rtw89_config_entity_chandef(struct rtw89_dev *rtwdev,
enum rtw89_sub_entity_idx idx,
const struct cfg80211_chan_def *chandef);
diff --git a/drivers/net/wireless/realtek/rtw89/coex.c b/drivers/net/wireless/realtek/rtw89/coex.c
index c443b39ab3c6..24929ef534e0 100644
--- a/drivers/net/wireless/realtek/rtw89/coex.c
+++ b/drivers/net/wireless/realtek/rtw89/coex.c
@@ -91,7 +91,7 @@ static const struct rtw89_btc_fbtc_slot s_def[] = {
[CXST_BLK] = __DEF_FBTC_SLOT(500, 0x55555555, SLOT_MIX),
[CXST_E2G] = __DEF_FBTC_SLOT(0, 0xea5a5a5a, SLOT_MIX),
[CXST_E5G] = __DEF_FBTC_SLOT(0, 0xffffffff, SLOT_ISO),
- [CXST_EBT] = __DEF_FBTC_SLOT(0, 0xe5555555, SLOT_MIX),
+ [CXST_EBT] = __DEF_FBTC_SLOT(5, 0xe5555555, SLOT_MIX),
[CXST_ENULL] = __DEF_FBTC_SLOT(0, 0xaaaaaaaa, SLOT_ISO),
[CXST_WLK] = __DEF_FBTC_SLOT(250, 0xea5a5a5a, SLOT_MIX),
[CXST_W1FDD] = __DEF_FBTC_SLOT(50, 0xffffffff, SLOT_ISO),
@@ -228,6 +228,7 @@ static u32 chip_id_to_bt_rom_code_id(u32 id)
case RTL8852A:
case RTL8852B:
case RTL8852C:
+ case RTL8852BT:
return 0x8852;
case RTL8851B:
return 0x8851;
@@ -3616,6 +3617,7 @@ void rtw89_btc_set_policy_v1(struct rtw89_dev *rtwdev, u16 policy_type)
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
u8 type, null_role;
u32 tbl_w1, tbl_b1, tbl_b4;
+ u16 dur_2;
type = FIELD_GET(BTC_CXP_MASK, policy_type);
@@ -3726,7 +3728,21 @@ void rtw89_btc_set_policy_v1(struct rtw89_dev *rtwdev, u16 policy_type)
if (hid->exist || hfp->exist)
tbl_w1 = cxtbl[16];
+ dur_2 = dm->e2g_slot_limit;
+
switch (policy_type) {
+ case BTC_CXP_OFFE_2GBWISOB: /* for normal-case */
+ _slot_set(btc, CXST_E2G, 0, tbl_w1, SLOT_ISO);
+ _slot_set_le(btc, CXST_EBT, s_def[CXST_EBT].dur,
+ s_def[CXST_EBT].cxtbl, s_def[CXST_EBT].cxtype);
+ _slot_set_dur(btc, CXST_EBT, dur_2);
+ break;
+ case BTC_CXP_OFFE_2GISOB: /* for bt no-link */
+ _slot_set(btc, CXST_E2G, 0, cxtbl[1], SLOT_ISO);
+ _slot_set_le(btc, CXST_EBT, s_def[CXST_EBT].dur,
+ s_def[CXST_EBT].cxtbl, s_def[CXST_EBT].cxtype);
+ _slot_set_dur(btc, CXST_EBT, dur_2);
+ break;
case BTC_CXP_OFFE_DEF:
_slot_set_le(btc, CXST_E2G, s_def[CXST_E2G].dur,
s_def[CXST_E2G].cxtbl, s_def[CXST_E2G].cxtype);
@@ -3746,6 +3762,15 @@ void rtw89_btc_set_policy_v1(struct rtw89_dev *rtwdev, u16 policy_type)
_slot_set_le(btc, CXST_ENULL, s_def[CXST_ENULL].dur,
s_def[CXST_ENULL].cxtbl, s_def[CXST_ENULL].cxtype);
break;
+ case BTC_CXP_OFFE_2GBWMIXB:
+ _slot_set(btc, CXST_E2G, 0, 0x55555555, SLOT_MIX);
+ _slot_set_le(btc, CXST_EBT, s_def[CXST_EBT].dur,
+ cpu_to_le32(0x55555555), s_def[CXST_EBT].cxtype);
+ break;
+ case BTC_CXP_OFFE_WL: /* for 4-way */
+ _slot_set(btc, CXST_E2G, 0, cxtbl[1], SLOT_MIX);
+ _slot_set(btc, CXST_EBT, 0, cxtbl[1], SLOT_MIX);
+ break;
default:
break;
}
@@ -9514,7 +9539,7 @@ static void _get_gnt(struct rtw89_dev *rtwdev, struct rtw89_mac_ax_coex_gnt *gnt
u32 val, status;
if (chip->chip_id == RTL8852A || chip->chip_id == RTL8852B ||
- chip->chip_id == RTL8851B) {
+ chip->chip_id == RTL8851B || chip->chip_id == RTL8852BT) {
rtw89_mac_read_lte(rtwdev, R_AX_LTE_SW_CFG_1, &val);
rtw89_mac_read_lte(rtwdev, R_AX_GNT_VAL, &status);
diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c
index ddc390d24ec1..7019f7d482a8 100644
--- a/drivers/net/wireless/realtek/rtw89/core.c
+++ b/drivers/net/wireless/realtek/rtw89/core.c
@@ -499,31 +499,21 @@ static void
rtw89_core_tx_update_sec_key(struct rtw89_dev *rtwdev,
struct rtw89_core_tx_request *tx_req)
{
+ struct rtw89_cam_info *cam_info = &rtwdev->cam_info;
const struct rtw89_chip_info *chip = rtwdev->chip;
- struct ieee80211_vif *vif = tx_req->vif;
- struct ieee80211_sta *sta = tx_req->sta;
+ const struct rtw89_sec_cam_entry *sec_cam;
struct ieee80211_tx_info *info;
struct ieee80211_key_conf *key;
- struct rtw89_vif *rtwvif;
- struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta);
- struct rtw89_addr_cam_entry *addr_cam;
- struct rtw89_sec_cam_entry *sec_cam;
struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
struct sk_buff *skb = tx_req->skb;
u8 sec_type = RTW89_SEC_KEY_TYPE_NONE;
+ u8 sec_cam_idx;
u64 pn64;
- if (!vif) {
- rtw89_warn(rtwdev, "cannot set sec key without vif\n");
- return;
- }
-
- rtwvif = (struct rtw89_vif *)vif->drv_priv;
- addr_cam = rtw89_get_addr_cam_of(rtwvif, rtwsta);
-
info = IEEE80211_SKB_CB(skb);
key = info->control.hw_key;
- sec_cam = addr_cam->sec_entries[key->hw_key_idx];
+ sec_cam_idx = key->hw_key_idx;
+ sec_cam = cam_info->sec_entries[sec_cam_idx];
if (!sec_cam) {
rtw89_warn(rtwdev, "sec cam entry is empty\n");
return;
@@ -823,6 +813,8 @@ rtw89_core_tx_update_data_info(struct rtw89_dev *rtwdev,
desc_info->mac_id = rtw89_core_tx_get_mac_id(rtwdev, tx_req);
desc_info->port = desc_info->hiq ? rtwvif->port : 0;
desc_info->er_cap = rtwsta ? rtwsta->er_cap : false;
+ desc_info->stbc = rtwsta ? rtwsta->ra.stbc_cap : false;
+ desc_info->ldpc = rtwsta ? rtwsta->ra.ldpc_cap : false;
/* enable wd_info for AMPDU */
desc_info->en_wd_info = true;
@@ -1137,6 +1129,8 @@ static __le32 rtw89_build_txwd_info0(struct rtw89_tx_desc_info *desc_info)
{
u32 dword = FIELD_PREP(RTW89_TXWD_INFO0_USE_RATE, desc_info->use_rate) |
FIELD_PREP(RTW89_TXWD_INFO0_DATA_RATE, desc_info->data_rate) |
+ FIELD_PREP(RTW89_TXWD_INFO0_DATA_STBC, desc_info->stbc) |
+ FIELD_PREP(RTW89_TXWD_INFO0_DATA_LDPC, desc_info->ldpc) |
FIELD_PREP(RTW89_TXWD_INFO0_DISDATAFB, desc_info->dis_data_fb) |
FIELD_PREP(RTW89_TXWD_INFO0_MULTIPORT_ID, desc_info->port);
@@ -1145,7 +1139,9 @@ static __le32 rtw89_build_txwd_info0(struct rtw89_tx_desc_info *desc_info)
static __le32 rtw89_build_txwd_info0_v1(struct rtw89_tx_desc_info *desc_info)
{
- u32 dword = FIELD_PREP(RTW89_TXWD_INFO0_DISDATAFB, desc_info->dis_data_fb) |
+ u32 dword = FIELD_PREP(RTW89_TXWD_INFO0_DATA_STBC, desc_info->stbc) |
+ FIELD_PREP(RTW89_TXWD_INFO0_DATA_LDPC, desc_info->ldpc) |
+ FIELD_PREP(RTW89_TXWD_INFO0_DISDATAFB, desc_info->dis_data_fb) |
FIELD_PREP(RTW89_TXWD_INFO0_MULTIPORT_ID, desc_info->port) |
FIELD_PREP(RTW89_TXWD_INFO0_DATA_ER, desc_info->er_cap) |
FIELD_PREP(RTW89_TXWD_INFO0_DATA_BW_ER, 0);
@@ -1311,7 +1307,9 @@ static __le32 rtw89_build_txwd_body7_v2(struct rtw89_tx_desc_info *desc_info)
static __le32 rtw89_build_txwd_info0_v2(struct rtw89_tx_desc_info *desc_info)
{
- u32 dword = FIELD_PREP(BE_TXD_INFO0_DISDATAFB, desc_info->dis_data_fb) |
+ u32 dword = FIELD_PREP(BE_TXD_INFO0_DATA_STBC, desc_info->stbc) |
+ FIELD_PREP(BE_TXD_INFO0_DATA_LDPC, desc_info->ldpc) |
+ FIELD_PREP(BE_TXD_INFO0_DISDATAFB, desc_info->dis_data_fb) |
FIELD_PREP(BE_TXD_INFO0_MULTIPORT_ID, desc_info->port);
return cpu_to_le32(dword);
@@ -1559,6 +1557,12 @@ static void rtw89_core_parse_phy_status_ie01(struct rtw89_dev *rtwdev,
u32 t;
phy_ppdu->chan_idx = le32_get_bits(ie->w0, RTW89_PHY_STS_IE01_W0_CH_IDX);
+
+ if (rtwdev->hw->conf.flags & IEEE80211_CONF_MONITOR) {
+ phy_ppdu->ldpc = le32_get_bits(ie->w2, RTW89_PHY_STS_IE01_W2_LDPC);
+ phy_ppdu->stbc = le32_get_bits(ie->w2, RTW89_PHY_STS_IE01_W2_STBC);
+ }
+
if (phy_ppdu->rate < RTW89_HW_RATE_OFDM6)
return;
@@ -1917,7 +1921,8 @@ static void rtw89_vif_rx_stats_iter(void *data, u8 *mac,
return;
if (ieee80211_is_beacon(hdr->frame_control)) {
- if (vif->type == NL80211_IFTYPE_STATION) {
+ if (vif->type == NL80211_IFTYPE_STATION &&
+ !test_bit(RTW89_FLAG_WOWLAN, rtwdev->flags)) {
rtw89_vif_sync_bcn_tsf(rtwvif, hdr, skb->len);
rtw89_fw_h2c_rssi_offload(rtwdev, phy_ppdu);
}
@@ -1984,6 +1989,23 @@ static void rtw89_core_hw_to_sband_rate(struct ieee80211_rx_status *rx_status)
rx_status->rate_idx -= 4;
}
+static
+void rtw89_core_update_rx_status_by_ppdu(struct rtw89_dev *rtwdev,
+ struct ieee80211_rx_status *rx_status,
+ struct rtw89_rx_phy_ppdu *phy_ppdu)
+{
+ if (!(rtwdev->hw->conf.flags & IEEE80211_CONF_MONITOR))
+ return;
+
+ if (!phy_ppdu)
+ return;
+
+ if (phy_ppdu->ldpc)
+ rx_status->enc_flags |= RX_ENC_FLAG_LDPC;
+ if (phy_ppdu->stbc)
+ rx_status->enc_flags |= u8_encode_bits(1, RX_ENC_FLAG_STBC_MASK);
+}
+
static const u8 rx_status_bw_to_radiotap_eht_usig[] = {
[RATE_INFO_BW_20] = IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW_20MHZ,
[RATE_INFO_BW_5] = U8_MAX,
@@ -2027,10 +2049,14 @@ static void rtw89_core_update_radiotap_eht(struct rtw89_dev *rtwdev,
eht->user_info[0] =
cpu_to_le32(IEEE80211_RADIOTAP_EHT_USER_INFO_MCS_KNOWN |
- IEEE80211_RADIOTAP_EHT_USER_INFO_NSS_KNOWN_O);
+ IEEE80211_RADIOTAP_EHT_USER_INFO_NSS_KNOWN_O |
+ IEEE80211_RADIOTAP_EHT_USER_INFO_CODING_KNOWN);
eht->user_info[0] |=
le32_encode_bits(rx_status->rate_idx, IEEE80211_RADIOTAP_EHT_USER_INFO_MCS) |
le32_encode_bits(rx_status->nss, IEEE80211_RADIOTAP_EHT_USER_INFO_NSS_O);
+ if (rx_status->enc_flags & RX_ENC_FLAG_LDPC)
+ eht->user_info[0] |=
+ cpu_to_le32(IEEE80211_RADIOTAP_EHT_USER_INFO_CODING);
/* U-SIG */
tlv = (void *)tlv + sizeof(*tlv) + ALIGN(eht_len, 4);
@@ -2056,6 +2082,8 @@ static void rtw89_core_update_radiotap(struct rtw89_dev *rtwdev,
{
static const struct ieee80211_radiotap_he known_he = {
.data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_CODING_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_STBC_KNOWN |
IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN),
.data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN),
};
@@ -2087,6 +2115,7 @@ static void rtw89_core_rx_to_mac80211(struct rtw89_dev *rtwdev,
rtw89_core_hw_to_sband_rate(rx_status);
rtw89_core_rx_stats(rtwdev, phy_ppdu, desc_info, skb_ppdu);
+ rtw89_core_update_rx_status_by_ppdu(rtwdev, rx_status, phy_ppdu);
rtw89_core_update_radiotap(rtwdev, skb_ppdu, rx_status);
/* In low power mode, it does RX in thread context. */
local_bh_disable();
@@ -2492,11 +2521,15 @@ void rtw89_core_napi_stop(struct rtw89_dev *rtwdev)
}
EXPORT_SYMBOL(rtw89_core_napi_stop);
-void rtw89_core_napi_init(struct rtw89_dev *rtwdev)
+int rtw89_core_napi_init(struct rtw89_dev *rtwdev)
{
- init_dummy_netdev(&rtwdev->netdev);
- netif_napi_add(&rtwdev->netdev, &rtwdev->napi,
+ rtwdev->netdev = alloc_netdev_dummy(0);
+ if (!rtwdev->netdev)
+ return -ENOMEM;
+
+ netif_napi_add(rtwdev->netdev, &rtwdev->napi,
rtwdev->hci.ops->napi_poll);
+ return 0;
}
EXPORT_SYMBOL(rtw89_core_napi_init);
@@ -2504,6 +2537,7 @@ void rtw89_core_napi_deinit(struct rtw89_dev *rtwdev)
{
rtw89_core_napi_stop(rtwdev);
netif_napi_del(&rtwdev->napi);
+ free_netdev(rtwdev->netdev);
}
EXPORT_SYMBOL(rtw89_core_napi_deinit);
@@ -3342,20 +3376,23 @@ int rtw89_core_sta_add(struct rtw89_dev *rtwdev,
if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) {
/* for station mode, assign the mac_id from itself */
rtwsta->mac_id = rtwvif->mac_id;
- /* must do rtw89_reg_6ghz_power_recalc() before rfk channel */
- rtw89_reg_6ghz_power_recalc(rtwdev, rtwvif, true);
+
+ /* must do rtw89_reg_6ghz_recalc() before rfk channel */
+ ret = rtw89_reg_6ghz_recalc(rtwdev, rtwvif, true);
+ if (ret)
+ return ret;
+
rtw89_btc_ntfy_role_info(rtwdev, rtwvif, rtwsta,
BTC_ROLE_MSTS_STA_CONN_START);
rtw89_chip_rfk_channel(rtwdev);
} else if (vif->type == NL80211_IFTYPE_AP || sta->tdls) {
- rtwsta->mac_id = rtw89_core_acquire_bit_map(rtwdev->mac_id_map,
- RTW89_MAX_MAC_ID_NUM);
+ rtwsta->mac_id = rtw89_acquire_mac_id(rtwdev);
if (rtwsta->mac_id == RTW89_MAX_MAC_ID_NUM)
return -ENOSPC;
ret = rtw89_mac_set_macid_pause(rtwdev, rtwsta->mac_id, false);
if (ret) {
- rtw89_core_release_bit_map(rtwdev->mac_id_map, rtwsta->mac_id);
+ rtw89_release_mac_id(rtwdev, rtwsta->mac_id);
rtw89_warn(rtwdev, "failed to send h2c macid pause\n");
return ret;
}
@@ -3363,7 +3400,7 @@ int rtw89_core_sta_add(struct rtw89_dev *rtwdev,
ret = rtw89_fw_h2c_role_maintain(rtwdev, rtwvif, rtwsta,
RTW89_ROLE_CREATE);
if (ret) {
- rtw89_core_release_bit_map(rtwdev->mac_id_map, rtwsta->mac_id);
+ rtw89_release_mac_id(rtwdev, rtwsta->mac_id);
rtw89_warn(rtwdev, "failed to send h2c role info\n");
return ret;
}
@@ -3532,11 +3569,11 @@ int rtw89_core_sta_remove(struct rtw89_dev *rtwdev,
int ret;
if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) {
- rtw89_reg_6ghz_power_recalc(rtwdev, rtwvif, false);
+ rtw89_reg_6ghz_recalc(rtwdev, rtwvif, false);
rtw89_btc_ntfy_role_info(rtwdev, rtwvif, rtwsta,
BTC_ROLE_MSTS_STA_DIS_CONN);
} else if (vif->type == NL80211_IFTYPE_AP || sta->tdls) {
- rtw89_core_release_bit_map(rtwdev->mac_id_map, rtwsta->mac_id);
+ rtw89_release_mac_id(rtwdev, rtwsta->mac_id);
ret = rtw89_fw_h2c_role_maintain(rtwdev, rtwvif, rtwsta,
RTW89_ROLE_REMOVE);
@@ -4022,15 +4059,15 @@ void rtw89_core_update_beacon_work(struct work_struct *work)
int rtw89_wait_for_cond(struct rtw89_wait_info *wait, unsigned int cond)
{
struct completion *cmpl = &wait->completion;
- unsigned long timeout;
+ unsigned long time_left;
unsigned int cur;
cur = atomic_cmpxchg(&wait->cond, RTW89_WAIT_COND_IDLE, cond);
if (cur != RTW89_WAIT_COND_IDLE)
return -EBUSY;
- timeout = wait_for_completion_timeout(cmpl, RTW89_WAIT_FOR_COND_TIMEOUT);
- if (timeout == 0) {
+ time_left = wait_for_completion_timeout(cmpl, RTW89_WAIT_FOR_COND_TIMEOUT);
+ if (time_left == 0) {
atomic_set(&wait->cond, RTW89_WAIT_COND_IDLE);
return -ETIMEDOUT;
}
@@ -4186,6 +4223,25 @@ void rtw89_core_stop(struct rtw89_dev *rtwdev)
rtw89_hci_reset(rtwdev);
}
+u8 rtw89_acquire_mac_id(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ u8 mac_id_num = chip->support_macid_num;
+ u8 mac_id;
+
+ mac_id = find_first_zero_bit(rtwdev->mac_id_map, mac_id_num);
+ if (mac_id == mac_id_num)
+ return RTW89_MAX_MAC_ID_NUM;
+
+ set_bit(mac_id, rtwdev->mac_id_map);
+ return mac_id;
+}
+
+void rtw89_release_mac_id(struct rtw89_dev *rtwdev, u8 mac_id)
+{
+ clear_bit(mac_id, rtwdev->mac_id_map);
+}
+
int rtw89_core_init(struct rtw89_dev *rtwdev)
{
struct rtw89_btc *btc = &rtwdev->btc;
@@ -4331,7 +4387,7 @@ static void rtw89_read_chip_ver(struct rtw89_dev *rtwdev)
rtwdev->hal.cv = cv;
- if (chip->chip_id == RTL8852B || chip->chip_id == RTL8851B) {
+ if (rtw89_is_rtl885xb(rtwdev)) {
ret = rtw89_mac_read_xtal_si(rtwdev, XTAL_SI_CV, &val);
if (ret)
return;
@@ -4475,6 +4531,10 @@ static int rtw89_core_register_hw(struct rtw89_dev *rtwdev)
hw->max_tx_aggregation_subframes = RTW89_MAX_TX_AGG_NUM;
hw->uapsd_max_sp_len = IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL;
+ hw->radiotap_mcs_details |= IEEE80211_RADIOTAP_MCS_HAVE_FEC |
+ IEEE80211_RADIOTAP_MCS_HAVE_STBC;
+ hw->radiotap_vht_details |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC;
+
ieee80211_hw_set(hw, SIGNAL_DBM);
ieee80211_hw_set(hw, HAS_RATE_CONTROL);
ieee80211_hw_set(hw, MFP_CAPABLE);
diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h
index 112bdd95fc6e..11fa003a9788 100644
--- a/drivers/net/wireless/realtek/rtw89/core.h
+++ b/drivers/net/wireless/realtek/rtw89/core.h
@@ -132,6 +132,7 @@ enum rtw89_hci_type {
enum rtw89_core_chip_id {
RTL8852A,
RTL8852B,
+ RTL8852BT,
RTL8852C,
RTL8851B,
RTL8922A,
@@ -745,6 +746,14 @@ enum rtw89_reg_6ghz_power {
RTW89_REG_6GHZ_POWER_DFLT = RTW89_REG_6GHZ_POWER_VLP,
};
+#define RTW89_MIN_VALID_POWER_CONSTRAINT (-10) /* unit: dBm */
+
+/* calculate based on ieee80211 Transmit Power Envelope */
+struct rtw89_reg_6ghz_tpe {
+ bool valid;
+ s8 constraint; /* unit: dBm */
+};
+
enum rtw89_fw_pkt_ofld_type {
RTW89_PKT_OFLD_TYPE_PROBE_RSP = 0,
RTW89_PKT_OFLD_TYPE_PS_POLL = 1,
@@ -793,6 +802,8 @@ struct rtw89_rx_phy_ppdu {
u8 evm_max;
u8 evm_min;
} ofdm;
+ bool ldpc;
+ bool stbc;
bool to_self;
bool valid;
};
@@ -884,6 +895,13 @@ enum rtw89_ps_mode {
#define RTW89_BYR_BW_NUM (RTW89_CHANNEL_WIDTH_320 + 1)
#define RTW89_PPE_BW_NUM (RTW89_CHANNEL_WIDTH_320 + 1)
+enum rtw89_pe_duration {
+ RTW89_PE_DURATION_0 = 0,
+ RTW89_PE_DURATION_8 = 1,
+ RTW89_PE_DURATION_16 = 2,
+ RTW89_PE_DURATION_16_20 = 3,
+};
+
enum rtw89_ru_bandwidth {
RTW89_RU26 = 0,
RTW89_RU52 = 1,
@@ -1129,6 +1147,8 @@ struct rtw89_tx_desc_info {
bool hiq;
u8 port;
bool er_cap;
+ bool stbc;
+ bool ldpc;
};
struct rtw89_core_tx_request {
@@ -1318,6 +1338,7 @@ struct rtw89_btc_wl_smap {
u32 scan: 1;
u32 connecting: 1;
u32 roaming: 1;
+ u32 dbccing: 1;
u32 transacting: 1;
u32 _4way: 1;
u32 rf_off: 1;
@@ -3249,7 +3270,6 @@ struct rtw89_addr_cam_entry {
DECLARE_BITMAP(sec_cam_map, RTW89_SEC_CAM_IN_ADDR_CAM);
u8 sec_ent_keyid[RTW89_SEC_CAM_IN_ADDR_CAM];
u8 sec_ent[RTW89_SEC_CAM_IN_ADDR_CAM];
- struct rtw89_sec_cam_entry *sec_entries[RTW89_SEC_CAM_IN_ADDR_CAM];
};
struct rtw89_bssid_cam_entry {
@@ -3385,6 +3405,7 @@ struct rtw89_vif {
bool chanctx_assigned; /* only valid when running with chanctx_ops */
enum rtw89_sub_entity_idx sub_entity_idx;
enum rtw89_reg_6ghz_power reg_6ghz_power;
+ struct rtw89_reg_6ghz_tpe reg_6ghz_tpe;
u8 mac_id;
u8 port;
@@ -4144,6 +4165,7 @@ struct rtw89_chip_info {
u8 wde_qempty_acq_grpnum;
u8 wde_qempty_mgq_grpsel;
u32 rf_base_addr[2];
+ u8 support_macid_num;
u8 support_chanctx_num;
u8 support_bands;
u16 support_bandwidths;
@@ -4224,7 +4246,7 @@ struct rtw89_chip_info {
const u32 *c2h_regs;
struct rtw89_reg_def c2h_counter_reg;
const struct rtw89_page_regs *page_regs;
- u32 wow_reason_reg;
+ const u32 *wow_reason_reg;
bool cfo_src_fd;
bool cfo_hw_comp;
const struct rtw89_reg_def *dcfo_comp;
@@ -4334,6 +4356,7 @@ enum rtw89_fw_feature {
RTW89_FW_FEATURE_NO_LPS_PG,
RTW89_FW_FEATURE_BEACON_FILTER,
RTW89_FW_FEATURE_MACID_PAUSE_SLEEP,
+ RTW89_FW_FEATURE_WOW_REASON_V1,
};
struct rtw89_fw_suit {
@@ -4436,6 +4459,7 @@ struct rtw89_cam_info {
DECLARE_BITMAP(sec_cam_map, RTW89_MAX_SEC_CAM_NUM);
DECLARE_BITMAP(ba_cam_map, RTW89_MAX_BA_CAM_NUM);
struct rtw89_ba_cam_entry ba_cam_entry[RTW89_MAX_BA_CAM_NUM];
+ const struct rtw89_sec_cam_entry *sec_entries[RTW89_MAX_SEC_CAM_NUM];
};
enum rtw89_sar_sources {
@@ -4671,7 +4695,12 @@ struct rtw89_dack_info {
bool msbk_timeout[RTW89_DACK_PATH_NR];
};
-#define RTW89_RFK_CHS_NR 3
+enum rtw89_rfk_chs_nrs {
+ __RTW89_RFK_CHS_NR_V0 = 2,
+ __RTW89_RFK_CHS_NR_V1 = 3,
+
+ RTW89_RFK_CHS_NR = __RTW89_RFK_CHS_NR_V1,
+};
struct rtw89_rfk_mcc_info {
u8 ch[RTW89_RFK_CHS_NR];
@@ -4750,6 +4779,8 @@ struct rtw89_dpk_info {
u8 cur_idx[RTW89_DPK_RF_PATH];
u8 cur_k_set;
struct rtw89_dpk_bkup_para bp[RTW89_DPK_RF_PATH][RTW89_DPK_BKUP_NUM];
+ u8 max_dpk_txagc[RTW89_DPK_RF_PATH];
+ u32 dpk_order[RTW89_DPK_RF_PATH];
};
struct rtw89_fem_info {
@@ -4924,6 +4955,7 @@ struct rtw89_regd {
struct rtw89_regulatory_info {
const struct rtw89_regd *regd;
enum rtw89_reg_6ghz_power reg_6ghz_power;
+ struct rtw89_reg_6ghz_tpe reg_6ghz_tpe;
DECLARE_BITMAP(block_unii4, RTW89_REGD_MAX_COUNTRY_NUM);
DECLARE_BITMAP(block_6ghz, RTW89_REGD_MAX_COUNTRY_NUM);
DECLARE_BITMAP(block_6ghz_sp, RTW89_REGD_MAX_COUNTRY_NUM);
@@ -5469,7 +5501,7 @@ struct rtw89_dev {
struct rtw89_wow_param wow;
/* napi structure */
- struct net_device netdev;
+ struct net_device *netdev;
struct napi_struct napi;
int napi_budget_countdown;
@@ -6408,6 +6440,16 @@ static inline bool rtw89_is_mlo_1_1(struct rtw89_dev *rtwdev)
}
}
+static inline bool rtw89_is_rtl885xb(struct rtw89_dev *rtwdev)
+{
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
+
+ if (chip_id == RTL8852B || chip_id == RTL8851B || chip_id == RTL8852BT)
+ return true;
+
+ return false;
+}
+
int rtw89_core_tx_write(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct sk_buff *skb, int *qsel);
int rtw89_h2c_tx(struct rtw89_dev *rtwdev,
@@ -6441,7 +6483,7 @@ void rtw89_core_query_rxdesc_v2(struct rtw89_dev *rtwdev,
u8 *data, u32 data_offset);
void rtw89_core_napi_start(struct rtw89_dev *rtwdev);
void rtw89_core_napi_stop(struct rtw89_dev *rtwdev);
-void rtw89_core_napi_init(struct rtw89_dev *rtwdev);
+int rtw89_core_napi_init(struct rtw89_dev *rtwdev);
void rtw89_core_napi_deinit(struct rtw89_dev *rtwdev);
int rtw89_core_sta_add(struct rtw89_dev *rtwdev,
struct ieee80211_vif *vif,
@@ -6470,6 +6512,8 @@ struct rtw89_dev *rtw89_alloc_ieee80211_hw(struct device *device,
u32 bus_data_size,
const struct rtw89_chip_info *chip);
void rtw89_free_ieee80211_hw(struct rtw89_dev *rtwdev);
+u8 rtw89_acquire_mac_id(struct rtw89_dev *rtwdev);
+void rtw89_release_mac_id(struct rtw89_dev *rtwdev, u8 mac_id);
void rtw89_core_set_chip_txpwr(struct rtw89_dev *rtwdev);
void rtw89_get_default_chandef(struct cfg80211_chan_def *chandef);
void rtw89_get_channel_params(const struct cfg80211_chan_def *chandef,
@@ -6506,8 +6550,8 @@ void rtw89_core_scan_start(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
const u8 *mac_addr, bool hw_scan);
void rtw89_core_scan_complete(struct rtw89_dev *rtwdev,
struct ieee80211_vif *vif, bool hw_scan);
-void rtw89_reg_6ghz_power_recalc(struct rtw89_dev *rtwdev,
- struct rtw89_vif *rtwvif, bool active);
+int rtw89_reg_6ghz_recalc(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ bool active);
void rtw89_core_update_p2p_ps(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif);
void rtw89_core_ntfy_btc_event(struct rtw89_dev *rtwdev, enum rtw89_btc_hmsg event);
diff --git a/drivers/net/wireless/realtek/rtw89/debug.c b/drivers/net/wireless/realtek/rtw89/debug.c
index affffc4092ba..9e1353cce9cc 100644
--- a/drivers/net/wireless/realtek/rtw89/debug.c
+++ b/drivers/net/wireless/realtek/rtw89/debug.c
@@ -818,6 +818,28 @@ static const struct dbgfs_txpwr_table *dbgfs_txpwr_tables[RTW89_CHIP_GEN_NUM] =
[RTW89_CHIP_BE] = &dbgfs_txpwr_table_be,
};
+static
+void rtw89_debug_priv_txpwr_table_get_regd(struct seq_file *m,
+ struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan)
+{
+ const struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory;
+ const struct rtw89_reg_6ghz_tpe *tpe6 = &regulatory->reg_6ghz_tpe;
+
+ seq_printf(m, "[Chanctx] band %u, ch %u, bw %u\n",
+ chan->band_type, chan->channel, chan->band_width);
+
+ seq_puts(m, "[Regulatory] ");
+ __print_regd(m, rtwdev, chan);
+
+ if (chan->band_type == RTW89_BAND_6G) {
+ seq_printf(m, "[reg6_pwr_type] %u\n", regulatory->reg_6ghz_power);
+
+ if (tpe6->valid)
+ seq_printf(m, "[TPE] %d dBm\n", tpe6->constraint);
+ }
+}
+
static int rtw89_debug_priv_txpwr_table_get(struct seq_file *m, void *v)
{
struct rtw89_debugfs_priv *debugfs_priv = m->private;
@@ -831,8 +853,7 @@ static int rtw89_debug_priv_txpwr_table_get(struct seq_file *m, void *v)
rtw89_leave_ps_mode(rtwdev);
chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
- seq_puts(m, "[Regulatory] ");
- __print_regd(m, rtwdev, chan);
+ rtw89_debug_priv_txpwr_table_get_regd(m, rtwdev, chan);
seq_puts(m, "[SAR]\n");
rtw89_print_sar(m, rtwdev, chan->freq);
@@ -2996,7 +3017,7 @@ static bool is_dbg_port_valid(struct rtw89_dev *rtwdev, u32 sel)
sel >= RTW89_DBG_PORT_SEL_PCIE_TXDMA &&
sel <= RTW89_DBG_PORT_SEL_PCIE_MISC2)
return false;
- if (rtwdev->chip->chip_id == RTL8852B &&
+ if (rtw89_is_rtl885xb(rtwdev) &&
sel >= RTW89_DBG_PORT_SEL_PTCL_C1 &&
sel <= RTW89_DBG_PORT_SEL_TXTF_INFOH_C1)
return false;
@@ -3531,7 +3552,7 @@ static void rtw89_sta_info_get_iter(void *data, struct ieee80211_sta *sta)
case RX_ENC_HE:
seq_printf(m, "HE %dSS MCS-%d GI:%s", status->nss, status->rate_idx,
status->he_gi <= NL80211_RATE_INFO_HE_GI_3_2 ?
- he_gi_str[rate->he_gi] : "N/A");
+ he_gi_str[status->he_gi] : "N/A");
break;
case RX_ENC_EHT:
seq_printf(m, "EHT %dSS MCS-%d GI:%s", status->nss, status->rate_idx,
@@ -3645,17 +3666,21 @@ static int rtw89_debug_priv_phy_info_get(struct seq_file *m, void *v)
}
static void rtw89_dump_addr_cam(struct seq_file *m,
+ struct rtw89_dev *rtwdev,
struct rtw89_addr_cam_entry *addr_cam)
{
- struct rtw89_sec_cam_entry *sec_entry;
+ struct rtw89_cam_info *cam_info = &rtwdev->cam_info;
+ const struct rtw89_sec_cam_entry *sec_entry;
+ u8 sec_cam_idx;
int i;
seq_printf(m, "\taddr_cam_idx=%u\n", addr_cam->addr_cam_idx);
seq_printf(m, "\t-> bssid_cam_idx=%u\n", addr_cam->bssid_cam_idx);
seq_printf(m, "\tsec_cam_bitmap=%*ph\n", (int)sizeof(addr_cam->sec_cam_map),
addr_cam->sec_cam_map);
- for (i = 0; i < RTW89_SEC_CAM_IN_ADDR_CAM; i++) {
- sec_entry = addr_cam->sec_entries[i];
+ for_each_set_bit(i, addr_cam->sec_cam_map, RTW89_SEC_CAM_IN_ADDR_CAM) {
+ sec_cam_idx = addr_cam->sec_ent[i];
+ sec_entry = cam_info->sec_entries[sec_cam_idx];
if (!sec_entry)
continue;
seq_printf(m, "\tsec[%d]: sec_cam_idx %u", i, sec_entry->sec_cam_idx);
@@ -3694,12 +3719,13 @@ static
void rtw89_vif_ids_get_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
{
struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+ struct rtw89_dev *rtwdev = rtwvif->rtwdev;
struct seq_file *m = (struct seq_file *)data;
struct rtw89_bssid_cam_entry *bssid_cam = &rtwvif->bssid_cam;
seq_printf(m, "VIF [%d] %pM\n", rtwvif->mac_id, rtwvif->mac_addr);
seq_printf(m, "\tbssid_cam_idx=%u\n", bssid_cam->bssid_cam_idx);
- rtw89_dump_addr_cam(m, &rtwvif->addr_cam);
+ rtw89_dump_addr_cam(m, rtwdev, &rtwvif->addr_cam);
rtw89_dump_pkt_offload(m, &rtwvif->general_pkt_list, "\tpkt_ofld[GENERAL]: ");
}
@@ -3726,11 +3752,12 @@ static void rtw89_dump_ba_cam(struct seq_file *m, struct rtw89_sta *rtwsta)
static void rtw89_sta_ids_get_iter(void *data, struct ieee80211_sta *sta)
{
struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
+ struct rtw89_dev *rtwdev = rtwsta->rtwdev;
struct seq_file *m = (struct seq_file *)data;
seq_printf(m, "STA [%d] %pM %s\n", rtwsta->mac_id, sta->addr,
sta->tdls ? "(TDLS)" : "");
- rtw89_dump_addr_cam(m, &rtwsta->addr_cam);
+ rtw89_dump_addr_cam(m, rtwdev, &rtwsta->addr_cam);
rtw89_dump_ba_cam(m, rtwsta);
}
diff --git a/drivers/net/wireless/realtek/rtw89/fw.c b/drivers/net/wireless/realtek/rtw89/fw.c
index 044a5b90c7f4..fbe08c162b93 100644
--- a/drivers/net/wireless/realtek/rtw89/fw.c
+++ b/drivers/net/wireless/realtek/rtw89/fw.c
@@ -13,22 +13,20 @@
#include "ps.h"
#include "reg.h"
#include "util.h"
+#include "wow.h"
struct rtw89_eapol_2_of_2 {
- struct ieee80211_hdr_3addr hdr;
u8 gtkbody[14];
u8 key_des_ver;
u8 rsvd[92];
-} __packed __aligned(2);
+} __packed;
struct rtw89_sa_query {
- struct ieee80211_hdr_3addr hdr;
u8 category;
u8 action;
-} __packed __aligned(2);
+} __packed;
struct rtw89_arp_rsp {
- struct ieee80211_hdr_3addr addr;
u8 llc_hdr[sizeof(rfc1042_header)];
__be16 llc_type;
struct arphdr arp_hdr;
@@ -36,7 +34,7 @@ struct rtw89_arp_rsp {
__be32 sender_ip;
u8 target_hw[ETH_ALEN];
__be32 target_ip;
-} __packed __aligned(2);
+} __packed;
static const u8 mss_signature[] = {0x4D, 0x53, 0x53, 0x4B, 0x50, 0x4F, 0x4F, 0x4C};
@@ -462,7 +460,7 @@ int rtw89_mfw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type,
const u8 *mfw = firmware->data;
u32 mfw_len = firmware->size;
const struct rtw89_mfw_hdr *mfw_hdr = (const struct rtw89_mfw_hdr *)mfw;
- const struct rtw89_mfw_info *mfw_info;
+ const struct rtw89_mfw_info *mfw_info = NULL, *tmp;
int i;
if (mfw_hdr->sig != RTW89_MFW_SIG) {
@@ -476,15 +474,27 @@ int rtw89_mfw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type,
}
for (i = 0; i < mfw_hdr->fw_nr; i++) {
- mfw_info = &mfw_hdr->info[i];
- if (mfw_info->type == type) {
- if (mfw_info->cv == rtwdev->hal.cv && !mfw_info->mp)
- goto found;
- if (type == RTW89_FW_LOGFMT)
- goto found;
+ tmp = &mfw_hdr->info[i];
+ if (tmp->type != type)
+ continue;
+
+ if (type == RTW89_FW_LOGFMT) {
+ mfw_info = tmp;
+ goto found;
+ }
+
+ /* Version order of WiFi firmware in firmware file are not in order,
+ * pass all firmware to find the equal or less but closest version.
+ */
+ if (tmp->cv <= rtwdev->hal.cv && !tmp->mp) {
+ if (!mfw_info || mfw_info->cv < tmp->cv)
+ mfw_info = tmp;
}
}
+ if (mfw_info)
+ goto found;
+
if (!nowarn)
rtw89_err(rtwdev, "no suitable firmware found\n");
return -ENOENT;
@@ -606,10 +616,16 @@ int __rtw89_fw_recognize_from_elm(struct rtw89_dev *rtwdev,
struct rtw89_hal *hal = &rtwdev->hal;
struct rtw89_fw_suit *fw_suit;
- if (hal->cv != elm->u.bbmcu.cv)
+ /* Version of BB MCU is in decreasing order in firmware file, so take
+ * first equal or less version, which is equal or less but closest version.
+ */
+ if (hal->cv < elm->u.bbmcu.cv)
return 1; /* ignore this element */
fw_suit = rtw89_fw_suit_get(rtwdev, type);
+ if (fw_suit->data)
+ return 1; /* ignore this element (a firmware is taken already) */
+
fw_suit->data = elm->u.bbmcu.contents;
fw_suit->size = le32_to_cpu(elm->size);
@@ -659,10 +675,12 @@ static const struct __fw_feat_cfg fw_feat_tbl[] = {
__CFG_FW_FEAT(RTL8852C, ge, 0, 27, 36, 0, SCAN_OFFLOAD),
__CFG_FW_FEAT(RTL8852C, ge, 0, 27, 40, 0, CRASH_TRIGGER),
__CFG_FW_FEAT(RTL8852C, ge, 0, 27, 56, 10, BEACON_FILTER),
+ __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 80, 0, WOW_REASON_V1),
__CFG_FW_FEAT(RTL8922A, ge, 0, 34, 30, 0, CRASH_TRIGGER),
__CFG_FW_FEAT(RTL8922A, ge, 0, 34, 11, 0, MACID_PAUSE_SLEEP),
__CFG_FW_FEAT(RTL8922A, ge, 0, 34, 35, 0, SCAN_OFFLOAD),
__CFG_FW_FEAT(RTL8922A, ge, 0, 35, 12, 0, BEACON_FILTER),
+ __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 22, 0, WOW_REASON_V1),
};
static void rtw89_fw_iterate_feature_cfg(struct rtw89_fw_info *fw,
@@ -2179,8 +2197,10 @@ static struct sk_buff *rtw89_eapol_get(struct rtw89_dev *rtwdev,
0x8E, 0x01, 0x03, 0x00, 0x5F, 0x02, 0x03};
struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
+ u8 sec_hdr_len = rtw89_wow_get_sec_hdr_len(rtwdev);
struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
struct rtw89_eapol_2_of_2 *eapol_pkt;
+ struct ieee80211_hdr_3addr *hdr;
struct sk_buff *skb;
u8 key_des_ver;
@@ -2193,17 +2213,21 @@ static struct sk_buff *rtw89_eapol_get(struct rtw89_dev *rtwdev,
else
key_des_ver = 0;
- skb = dev_alloc_skb(sizeof(*eapol_pkt));
+ skb = dev_alloc_skb(sizeof(*hdr) + sec_hdr_len + sizeof(*eapol_pkt));
if (!skb)
return NULL;
+ hdr = skb_put_zero(skb, sizeof(*hdr));
+ hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
+ IEEE80211_FCTL_TODS |
+ IEEE80211_FCTL_PROTECTED);
+ ether_addr_copy(hdr->addr1, bss_conf->bssid);
+ ether_addr_copy(hdr->addr2, vif->addr);
+ ether_addr_copy(hdr->addr3, bss_conf->bssid);
+
+ skb_put_zero(skb, sec_hdr_len);
+
eapol_pkt = skb_put_zero(skb, sizeof(*eapol_pkt));
- eapol_pkt->hdr.frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
- IEEE80211_FCTL_TODS |
- IEEE80211_FCTL_PROTECTED);
- ether_addr_copy(eapol_pkt->hdr.addr1, bss_conf->bssid);
- ether_addr_copy(eapol_pkt->hdr.addr2, vif->addr);
- ether_addr_copy(eapol_pkt->hdr.addr3, bss_conf->bssid);
memcpy(eapol_pkt->gtkbody, gtkbody, sizeof(gtkbody));
eapol_pkt->key_des_ver = key_des_ver;
@@ -2215,20 +2239,26 @@ static struct sk_buff *rtw89_sa_query_get(struct rtw89_dev *rtwdev,
{
struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
+ u8 sec_hdr_len = rtw89_wow_get_sec_hdr_len(rtwdev);
+ struct ieee80211_hdr_3addr *hdr;
struct rtw89_sa_query *sa_query;
struct sk_buff *skb;
- skb = dev_alloc_skb(sizeof(*sa_query));
+ skb = dev_alloc_skb(sizeof(*hdr) + sec_hdr_len + sizeof(*sa_query));
if (!skb)
return NULL;
+ hdr = skb_put_zero(skb, sizeof(*hdr));
+ hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
+ IEEE80211_STYPE_ACTION |
+ IEEE80211_FCTL_PROTECTED);
+ ether_addr_copy(hdr->addr1, bss_conf->bssid);
+ ether_addr_copy(hdr->addr2, vif->addr);
+ ether_addr_copy(hdr->addr3, bss_conf->bssid);
+
+ skb_put_zero(skb, sec_hdr_len);
+
sa_query = skb_put_zero(skb, sizeof(*sa_query));
- sa_query->hdr.frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
- IEEE80211_STYPE_ACTION |
- IEEE80211_FCTL_PROTECTED);
- ether_addr_copy(sa_query->hdr.addr1, bss_conf->bssid);
- ether_addr_copy(sa_query->hdr.addr2, vif->addr);
- ether_addr_copy(sa_query->hdr.addr3, bss_conf->bssid);
sa_query->category = WLAN_CATEGORY_SA_QUERY;
sa_query->action = WLAN_ACTION_SA_QUERY_RESPONSE;
@@ -2238,17 +2268,19 @@ static struct sk_buff *rtw89_sa_query_get(struct rtw89_dev *rtwdev,
static struct sk_buff *rtw89_arp_response_get(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif)
{
+ u8 sec_hdr_len = rtw89_wow_get_sec_hdr_len(rtwdev);
struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+ struct ieee80211_hdr_3addr *hdr;
struct rtw89_arp_rsp *arp_skb;
struct arphdr *arp_hdr;
struct sk_buff *skb;
__le16 fc;
- skb = dev_alloc_skb(sizeof(struct rtw89_arp_rsp));
+ skb = dev_alloc_skb(sizeof(*hdr) + sec_hdr_len + sizeof(*arp_skb));
if (!skb)
return NULL;
- arp_skb = skb_put_zero(skb, sizeof(*arp_skb));
+ hdr = skb_put_zero(skb, sizeof(*hdr));
if (rtw_wow->ptk_alg)
fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_FCTL_TODS |
@@ -2256,11 +2288,14 @@ static struct sk_buff *rtw89_arp_response_get(struct rtw89_dev *rtwdev,
else
fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_FCTL_TODS);
- arp_skb->addr.frame_control = fc;
- ether_addr_copy(arp_skb->addr.addr1, rtwvif->bssid);
- ether_addr_copy(arp_skb->addr.addr2, rtwvif->mac_addr);
- ether_addr_copy(arp_skb->addr.addr3, rtwvif->bssid);
+ hdr->frame_control = fc;
+ ether_addr_copy(hdr->addr1, rtwvif->bssid);
+ ether_addr_copy(hdr->addr2, rtwvif->mac_addr);
+ ether_addr_copy(hdr->addr3, rtwvif->bssid);
+
+ skb_put_zero(skb, sec_hdr_len);
+ arp_skb = skb_put_zero(skb, sizeof(*arp_skb));
memcpy(arp_skb->llc_hdr, rfc1042_header, sizeof(rfc1042_header));
arp_skb->llc_type = htons(ETH_P_ARP);
@@ -2461,6 +2496,7 @@ int rtw89_fw_h2c_lps_ch_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
struct rtw89_h2c_lps_ch_info *h2c;
u32 len = sizeof(*h2c);
struct sk_buff *skb;
+ u32 done;
int ret;
if (chip->chip_gen != RTW89_CHIP_BE)
@@ -2484,12 +2520,18 @@ int rtw89_fw_h2c_lps_ch_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
H2C_CAT_OUTSRC, H2C_CL_OUTSRC_DM,
H2C_FUNC_FW_LPS_CH_INFO, 0, 0, len);
+ rtw89_phy_write32_mask(rtwdev, R_CHK_LPS_STAT, B_CHK_LPS_STAT, 0);
ret = rtw89_h2c_tx(rtwdev, skb, false);
if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
goto fail;
}
+ ret = read_poll_timeout(rtw89_phy_read32_mask, done, done, 50, 5000,
+ true, rtwdev, R_CHK_LPS_STAT, B_CHK_LPS_STAT);
+ if (ret)
+ rtw89_warn(rtwdev, "h2c_lps_ch_info done polling timeout\n");
+
return 0;
fail:
dev_kfree_skb_any(skb);
@@ -2752,11 +2794,11 @@ static void __get_sta_he_pkt_padding(struct rtw89_dev *rtwdev,
ppe8 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK;
if (ppe16 != 7 && ppe8 == 7)
- pads[i] = 2;
+ pads[i] = RTW89_PE_DURATION_16;
else if (ppe8 != 7)
- pads[i] = 1;
+ pads[i] = RTW89_PE_DURATION_8;
else
- pads[i] = 0;
+ pads[i] = RTW89_PE_DURATION_0;
}
}
@@ -2889,11 +2931,11 @@ static void __get_sta_eht_pkt_padding(struct rtw89_dev *rtwdev,
ppe8 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK;
if (ppe16 != 7 && ppe8 == 7)
- pads[i] = 2;
+ pads[i] = RTW89_PE_DURATION_16_20;
else if (ppe8 != 7)
- pads[i] = 1;
+ pads[i] = RTW89_PE_DURATION_8;
else
- pads[i] = 0;
+ pads[i] = RTW89_PE_DURATION_0;
}
}
@@ -4850,6 +4892,7 @@ int rtw89_fw_h2c_scan_offload_be(struct rtw89_dev *rtwdev,
{
struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait;
+ struct cfg80211_scan_request *req = rtwvif->scan_req;
struct rtw89_h2c_scanofld_be_macc_role *macc_role;
struct rtw89_chan *op = &scan_info->op_chan;
struct rtw89_h2c_scanofld_be_opch *opch;
@@ -4923,6 +4966,15 @@ int rtw89_fw_h2c_scan_offload_be(struct rtw89_dev *rtwdev,
RTW89_H2C_SCANOFLD_BE_W6_CHAN_PROHIB_LOW);
h2c->w7 = le32_encode_bits(option->prohib_chan >> 32,
RTW89_H2C_SCANOFLD_BE_W7_CHAN_PROHIB_HIGH);
+ if (req->no_cck) {
+ h2c->w0 |= le32_encode_bits(true, RTW89_H2C_SCANOFLD_BE_W0_PROBE_WITH_RATE);
+ h2c->w8 = le32_encode_bits(RTW89_HW_RATE_OFDM6,
+ RTW89_H2C_SCANOFLD_BE_W8_PROBE_RATE_2GHZ) |
+ le32_encode_bits(RTW89_HW_RATE_OFDM6,
+ RTW89_H2C_SCANOFLD_BE_W8_PROBE_RATE_5GHZ) |
+ le32_encode_bits(RTW89_HW_RATE_OFDM6,
+ RTW89_H2C_SCANOFLD_BE_W8_PROBE_RATE_6GHZ);
+ }
ptr += sizeof(*h2c);
for (i = 0; i < option->num_macc_role; i++) {
@@ -6245,7 +6297,14 @@ void rtw89_hw_scan_abort(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif)
ret = rtw89_hw_scan_offload(rtwdev, vif, false);
if (ret)
- rtw89_hw_scan_complete(rtwdev, vif, true);
+ rtw89_warn(rtwdev, "rtw89_hw_scan_offload failed ret %d\n", ret);
+
+ /* Indicate ieee80211_scan_completed() before returning, which is safe
+ * because scan abort command always waits for completion of
+ * RTW89_SCAN_END_SCAN_NOTIFY, so that ieee80211_stop() can flush scan
+ * work properly.
+ */
+ rtw89_hw_scan_complete(rtwdev, vif, true);
}
static bool rtw89_is_any_vif_connected_or_connecting(struct rtw89_dev *rtwdev)
@@ -6715,10 +6774,8 @@ int rtw89_fw_h2c_wow_gtk_ofld(struct rtw89_dev *rtwdev,
skb_put(skb, len);
h2c = (struct rtw89_h2c_wow_gtk_ofld *)skb->data;
- if (!enable) {
- skb_put_zero(skb, sizeof(*gtk_info));
+ if (!enable)
goto hdr;
- }
ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif,
RTW89_PKT_OFLD_TYPE_EAPOL_KEY,
diff --git a/drivers/net/wireless/realtek/rtw89/fw.h b/drivers/net/wireless/realtek/rtw89/fw.h
index 4151c9d566bd..c3b4324c621c 100644
--- a/drivers/net/wireless/realtek/rtw89/fw.h
+++ b/drivers/net/wireless/realtek/rtw89/fw.h
@@ -2722,6 +2722,7 @@ struct rtw89_h2c_scanofld_be {
#define RTW89_H2C_SCANOFLD_BE_W0_MACID GENMASK(23, 8)
#define RTW89_H2C_SCANOFLD_BE_W0_PORT GENMASK(26, 24)
#define RTW89_H2C_SCANOFLD_BE_W0_BAND GENMASK(28, 27)
+#define RTW89_H2C_SCANOFLD_BE_W0_PROBE_WITH_RATE BIT(29)
#define RTW89_H2C_SCANOFLD_BE_W1_NUM_MACC_ROLE GENMASK(7, 0)
#define RTW89_H2C_SCANOFLD_BE_W1_NUM_OP GENMASK(15, 8)
#define RTW89_H2C_SCANOFLD_BE_W1_NORM_PD GENMASK(31, 16)
@@ -2738,6 +2739,9 @@ struct rtw89_h2c_scanofld_be {
#define RTW89_H2C_SCANOFLD_BE_W5_MLO_MODE GENMASK(31, 0)
#define RTW89_H2C_SCANOFLD_BE_W6_CHAN_PROHIB_LOW GENMASK(31, 0)
#define RTW89_H2C_SCANOFLD_BE_W7_CHAN_PROHIB_HIGH GENMASK(31, 0)
+#define RTW89_H2C_SCANOFLD_BE_W8_PROBE_RATE_2GHZ GENMASK(7, 0)
+#define RTW89_H2C_SCANOFLD_BE_W8_PROBE_RATE_5GHZ GENMASK(15, 8)
+#define RTW89_H2C_SCANOFLD_BE_W8_PROBE_RATE_6GHZ GENMASK(23, 16)
static inline void RTW89_SET_FWCMD_P2P_MACID(void *cmd, u32 val)
{
@@ -4655,4 +4659,10 @@ const struct rtw89_rfe_parms *
rtw89_load_rfe_data_from_fw(struct rtw89_dev *rtwdev,
const struct rtw89_rfe_parms *init);
+enum rtw89_wow_wakeup_ver {
+ RTW89_WOW_REASON_V0,
+ RTW89_WOW_REASON_V1,
+ RTW89_WOW_REASON_NUM,
+};
+
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c
index 3fe0046f6eaa..e2399796aeb1 100644
--- a/drivers/net/wireless/realtek/rtw89/mac.c
+++ b/drivers/net/wireless/realtek/rtw89/mac.c
@@ -1568,6 +1568,8 @@ static int dmac_func_en_ax(struct rtw89_dev *rtwdev)
B_AX_DLE_CPUIO_CLK_EN | B_AX_PKT_IN_CLK_EN |
B_AX_STA_SCH_CLK_EN | B_AX_TXPKT_CTRL_CLK_EN |
B_AX_WD_RLS_CLK_EN | B_AX_BBRPT_CLK_EN);
+ if (chip_id == RTL8852BT)
+ val32 |= B_AX_AXIDMA_CLK_EN;
rtw89_write32(rtwdev, R_AX_DMAC_CLK_EN, val32);
return 0;
@@ -1577,7 +1579,7 @@ static int chip_func_en_ax(struct rtw89_dev *rtwdev)
{
enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
- if (chip_id == RTL8852A || chip_id == RTL8852B)
+ if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev))
rtw89_write32_set(rtwdev, R_AX_SPS_DIG_ON_CTRL0,
B_AX_OCP_L1_MASK);
@@ -2146,8 +2148,8 @@ int rtw89_mac_preload_init(struct rtw89_dev *rtwdev, enum rtw89_mac_idx mac_idx,
{
const struct rtw89_chip_info *chip = rtwdev->chip;
- if (chip->chip_id == RTL8852A || chip->chip_id == RTL8852B ||
- chip->chip_id == RTL8851B || !is_qta_poh(rtwdev))
+ if (chip->chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev) ||
+ !is_qta_poh(rtwdev))
return 0;
return preload_init_set(rtwdev, mac_idx, mode);
@@ -2183,8 +2185,7 @@ static void _patch_ss2f_path(struct rtw89_dev *rtwdev)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
- if (chip->chip_id == RTL8852A || chip->chip_id == RTL8852B ||
- chip->chip_id == RTL8851B)
+ if (chip->chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev))
return;
rtw89_write32_mask(rtwdev, R_AX_SS2FINFO_PATH, B_AX_SS_DEST_QUEUE_MASK,
@@ -2360,7 +2361,7 @@ static int scheduler_init_ax(struct rtw89_dev *rtwdev, u8 mac_idx)
rtw89_write32_mask(rtwdev, reg, B_AX_SIFS_MACTXEN_T1_MASK,
SIFS_MACTXEN_T1);
- if (rtwdev->chip->chip_id == RTL8852B || rtwdev->chip->chip_id == RTL8851B) {
+ if (rtw89_is_rtl885xb(rtwdev)) {
reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_SCH_EXT_CTRL, mac_idx);
rtw89_write32_set(rtwdev, reg, B_AX_PORT_RST_TSF_ADV);
}
@@ -2588,7 +2589,9 @@ static int trxptcl_init_ax(struct rtw89_dev *rtwdev, u8 mac_idx)
case RTL8852A:
sifs = WMAC_SPEC_SIFS_OFDM_52A;
break;
+ case RTL8851B:
case RTL8852B:
+ case RTL8852BT:
sifs = WMAC_SPEC_SIFS_OFDM_52B;
break;
default:
@@ -2632,6 +2635,7 @@ static int rmac_init_ax(struct rtw89_dev *rtwdev, u8 mac_idx)
#define RX_MAX_LEN_UNIT 512
#define PLD_RLS_MAX_PG 127
#define RX_SPEC_MAX_LEN (11454 + RX_MAX_LEN_UNIT)
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
int ret;
u32 reg, rx_max_len, rx_qta;
u16 val;
@@ -2652,6 +2656,8 @@ static int rmac_init_ax(struct rtw89_dev *rtwdev, u8 mac_idx)
B_AX_RX_DLK_DATA_TIME_MASK);
val = u16_replace_bits(val, TRXCFG_RMAC_CCA_TO,
B_AX_RX_DLK_CCA_TIME_MASK);
+ if (chip_id == RTL8852BT)
+ val |= B_AX_RX_DLK_RST_EN;
rtw89_write16(rtwdev, reg, val);
reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_RCR, mac_idx);
@@ -2668,8 +2674,7 @@ static int rmac_init_ax(struct rtw89_dev *rtwdev, u8 mac_idx)
rx_max_len /= RX_MAX_LEN_UNIT;
rtw89_write32_mask(rtwdev, reg, B_AX_RX_MPDU_MAX_LEN_MASK, rx_max_len);
- if (rtwdev->chip->chip_id == RTL8852A &&
- rtwdev->hal.cv == CHIP_CBV) {
+ if (chip_id == RTL8852A && rtwdev->hal.cv == CHIP_CBV) {
rtw89_write16_mask(rtwdev,
rtw89_mac_reg_by_idx(rtwdev, R_AX_DLK_PROTECT_CTL, mac_idx),
B_AX_RX_DLK_CCA_TIME_MASK, 0);
@@ -2700,7 +2705,7 @@ static int cmac_com_init_ax(struct rtw89_dev *rtwdev, u8 mac_idx)
val = u32_replace_bits(val, 0, B_AX_TXSC_80M_MASK);
rtw89_write32(rtwdev, reg, val);
- if (chip_id == RTL8852A || chip_id == RTL8852B) {
+ if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) {
reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PTCL_RRSR1, mac_idx);
rtw89_write32_mask(rtwdev, reg, B_AX_RRSR_RATE_EN_MASK, RRSR_OFDM_CCK_EN);
}
@@ -2766,11 +2771,10 @@ static int ptcl_init_ax(struct rtw89_dev *rtwdev, u8 mac_idx)
static int cmac_dma_init_ax(struct rtw89_dev *rtwdev, u8 mac_idx)
{
- enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
u32 reg;
int ret;
- if (chip_id != RTL8852B)
+ if (!rtw89_is_rtl885xb(rtwdev))
return 0;
ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL);
@@ -3587,13 +3591,11 @@ static int enable_imr_ax(struct rtw89_dev *rtwdev, u8 mac_idx,
static void err_imr_ctrl_ax(struct rtw89_dev *rtwdev, bool en)
{
- enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
-
rtw89_write32(rtwdev, R_AX_DMAC_ERR_IMR,
en ? DMAC_ERR_IMR_EN : DMAC_ERR_IMR_DIS);
rtw89_write32(rtwdev, R_AX_CMAC_ERR_IMR,
en ? CMAC0_ERR_IMR_EN : CMAC0_ERR_IMR_DIS);
- if (chip_id != RTL8852B && rtwdev->mac.dle_info.c1_rx_qta)
+ if (!rtw89_is_rtl885xb(rtwdev) && rtwdev->mac.dle_info.c1_rx_qta)
rtw89_write32(rtwdev, R_AX_CMAC_ERR_IMR_C1,
en ? CMAC1_ERR_IMR_EN : CMAC1_ERR_IMR_DIS);
}
@@ -3719,10 +3721,9 @@ static int rtw89_mac_feat_init(struct rtw89_dev *rtwdev)
static void rtw89_disable_fw_watchdog(struct rtw89_dev *rtwdev)
{
- enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
u32 val32;
- if (chip_id == RTL8852B || chip_id == RTL8851B) {
+ if (rtw89_is_rtl885xb(rtwdev)) {
rtw89_write32_clr(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_APB_WRAP_EN);
rtw89_write32_set(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_APB_WRAP_EN);
return;
@@ -3818,7 +3819,7 @@ static void rtw89_mac_dmac_func_pre_en_ax(struct rtw89_dev *rtwdev)
enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
u32 val;
- if (chip_id == RTL8851B)
+ if (chip_id == RTL8851B || chip_id == RTL8852BT)
val = B_AX_DISPATCHER_CLK_EN | B_AX_AXIDMA_CLK_EN;
else
val = B_AX_DISPATCHER_CLK_EN;
@@ -4664,8 +4665,7 @@ int rtw89_mac_add_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
{
int ret;
- rtwvif->mac_id = rtw89_core_acquire_bit_map(rtwdev->mac_id_map,
- RTW89_MAX_MAC_ID_NUM);
+ rtwvif->mac_id = rtw89_acquire_mac_id(rtwdev);
if (rtwvif->mac_id == RTW89_MAX_MAC_ID_NUM)
return -ENOSPC;
@@ -4676,7 +4676,7 @@ int rtw89_mac_add_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
return 0;
release_mac_id:
- rtw89_core_release_bit_map(rtwdev->mac_id_map, rtwvif->mac_id);
+ rtw89_release_mac_id(rtwdev, rtwvif->mac_id);
return ret;
}
@@ -4686,7 +4686,7 @@ int rtw89_mac_remove_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
int ret;
ret = rtw89_mac_vif_deinit(rtwdev, rtwvif);
- rtw89_core_release_bit_map(rtwdev->mac_id_map, rtwvif->mac_id);
+ rtw89_release_mac_id(rtwdev, rtwvif->mac_id);
return ret;
}
@@ -4757,6 +4757,9 @@ rtw89_mac_c2h_scanofld_rsp(struct rtw89_dev *rtwdev, struct sk_buff *skb,
}
return;
case RTW89_SCAN_END_SCAN_NOTIFY:
+ if (rtwdev->scan_info.abort)
+ return;
+
if (rtwvif && rtwvif->scan_req &&
last_chan < rtwvif->scan_req->n_channels) {
ret = rtw89_hw_scan_offload(rtwdev, vif, true);
@@ -4765,7 +4768,7 @@ rtw89_mac_c2h_scanofld_rsp(struct rtw89_dev *rtwdev, struct sk_buff *skb,
rtw89_warn(rtwdev, "HW scan failed: %d\n", ret);
}
} else {
- rtw89_hw_scan_complete(rtwdev, vif, rtwdev->scan_info.abort);
+ rtw89_hw_scan_complete(rtwdev, vif, false);
}
break;
case RTW89_SCAN_ENTER_OP_NOTIFY:
@@ -5199,6 +5202,46 @@ rtw89_mac_c2h_mrc_status_rpt(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32
case RTW89_MAC_MRC_DEL_SCH_OK:
func = H2C_FUNC_DEL_MRC;
break;
+ case RTW89_MAC_MRC_EMPTY_SCH_FAIL:
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+ "MRC C2H STS RPT: empty sch fail\n");
+ return;
+ case RTW89_MAC_MRC_ROLE_NOT_EXIST_FAIL:
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+ "MRC C2H STS RPT: role not exist fail\n");
+ return;
+ case RTW89_MAC_MRC_DATA_NOT_FOUND_FAIL:
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+ "MRC C2H STS RPT: data not found fail\n");
+ return;
+ case RTW89_MAC_MRC_GET_NEXT_SLOT_FAIL:
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+ "MRC C2H STS RPT: get next slot fail\n");
+ return;
+ case RTW89_MAC_MRC_ALT_ROLE_FAIL:
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+ "MRC C2H STS RPT: alt role fail\n");
+ return;
+ case RTW89_MAC_MRC_ADD_PSTIMER_FAIL:
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+ "MRC C2H STS RPT: add ps timer fail\n");
+ return;
+ case RTW89_MAC_MRC_MALLOC_FAIL:
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+ "MRC C2H STS RPT: malloc fail\n");
+ return;
+ case RTW89_MAC_MRC_SWITCH_CH_FAIL:
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+ "MRC C2H STS RPT: switch ch fail\n");
+ return;
+ case RTW89_MAC_MRC_TXNULL0_FAIL:
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+ "MRC C2H STS RPT: tx null-0 fail\n");
+ return;
+ case RTW89_MAC_MRC_PORT_FUNC_EN_FAIL:
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+ "MRC C2H STS RPT: port func en fail\n");
+ return;
default:
rtw89_debug(rtwdev, RTW89_DBG_CHAN,
"invalid MRC C2H STS RPT: status %d\n", status);
@@ -5461,18 +5504,19 @@ void rtw89_mac_flush_txq(struct rtw89_dev *rtwdev, u32 queues, bool drop)
int rtw89_mac_coex_init(struct rtw89_dev *rtwdev, const struct rtw89_mac_ax_coex *coex)
{
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
u8 val;
u16 val16;
u32 val32;
int ret;
rtw89_write8_set(rtwdev, R_AX_GPIO_MUXCFG, B_AX_ENBT);
- if (rtwdev->chip->chip_id != RTL8851B)
+ if (chip_id != RTL8851B && chip_id != RTL8852BT)
rtw89_write8_set(rtwdev, R_AX_BTC_FUNC_EN, B_AX_PTA_WL_TX_EN);
rtw89_write8_set(rtwdev, R_AX_BT_COEX_CFG_2 + 1, B_AX_GNT_BT_POLARITY >> 8);
rtw89_write8_set(rtwdev, R_AX_CSR_MODE, B_AX_STATIS_BT_EN | B_AX_WL_ACT_MSK);
rtw89_write8_set(rtwdev, R_AX_CSR_MODE + 2, B_AX_BT_CNT_RST >> 16);
- if (rtwdev->chip->chip_id != RTL8851B)
+ if (chip_id != RTL8851B && chip_id != RTL8852BT)
rtw89_write8_clr(rtwdev, R_AX_TRXPTCL_RESP_0 + 3, B_AX_RSP_CHK_BTCCA >> 24);
val16 = rtw89_read16(rtwdev, R_AX_CCA_CFG_0);
@@ -5755,8 +5799,7 @@ bool rtw89_mac_get_ctrl_path(struct rtw89_dev *rtwdev)
if (chip->chip_id == RTL8852C || chip->chip_id == RTL8922A)
return false;
- else if (chip->chip_id == RTL8852A || chip->chip_id == RTL8852B ||
- chip->chip_id == RTL8851B)
+ else if (chip->chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev))
val = rtw89_read8_mask(rtwdev, R_AX_SYS_SDIO_CTRL + 3,
B_AX_LTE_MUX_CTRL_PATH >> 24);
@@ -6317,9 +6360,30 @@ int rtw89_mac_ptk_drop_by_band_and_wait(struct rtw89_dev *rtwdev,
return ret;
}
+int rtw89_mac_cpu_io_rx(struct rtw89_dev *rtwdev, bool wow_enable)
+{
+ struct rtw89_mac_h2c_info h2c_info = {};
+ struct rtw89_mac_c2h_info c2h_info = {};
+ u32 ret;
+
+ h2c_info.id = RTW89_FWCMD_H2CREG_FUNC_WOW_CPUIO_RX_CTRL;
+ h2c_info.content_len = sizeof(h2c_info.u.hdr);
+ h2c_info.u.hdr.w0 = u32_encode_bits(wow_enable, RTW89_H2CREG_WOW_CPUIO_RX_CTRL_EN);
+
+ ret = rtw89_fw_msg_reg(rtwdev, &h2c_info, &c2h_info);
+ if (ret)
+ return ret;
+
+ if (c2h_info.id != RTW89_FWCMD_C2HREG_FUNC_WOW_CPUIO_RX_ACK)
+ ret = -EINVAL;
+
+ return ret;
+}
+
static int rtw89_wow_config_mac_ax(struct rtw89_dev *rtwdev, bool enable_wow)
{
const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
+ const struct rtw89_chip_info *chip = rtwdev->chip;
int ret;
if (enable_wow) {
@@ -6330,12 +6394,19 @@ static int rtw89_wow_config_mac_ax(struct rtw89_dev *rtwdev, bool enable_wow)
}
rtw89_write32_set(rtwdev, R_AX_RX_FUNCTION_STOP, B_AX_HDR_RX_STOP);
+ rtw89_mac_cpu_io_rx(rtwdev, enable_wow);
rtw89_write32_clr(rtwdev, mac->rx_fltr, B_AX_SNIFFER_MODE);
rtw89_mac_cfg_ppdu_status(rtwdev, RTW89_MAC_0, false);
rtw89_write32(rtwdev, R_AX_ACTION_FWD0, 0);
rtw89_write32(rtwdev, R_AX_ACTION_FWD1, 0);
rtw89_write32(rtwdev, R_AX_TF_FWD, 0);
rtw89_write32(rtwdev, R_AX_HW_RPT_FWD, 0);
+
+ if (chip->chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev))
+ rtw89_write8(rtwdev, R_BE_DBG_WOW_READY, WOWLAN_NOT_READY);
+ else
+ rtw89_write32_set(rtwdev, R_AX_DBG_WOW,
+ B_AX_DBG_WOW_CPU_IO_RX_EN);
} else {
ret = rtw89_mac_resize_ple_rx_quota(rtwdev, false);
if (ret) {
@@ -6343,6 +6414,7 @@ static int rtw89_wow_config_mac_ax(struct rtw89_dev *rtwdev, bool enable_wow)
return ret;
}
+ rtw89_mac_cpu_io_rx(rtwdev, enable_wow);
rtw89_write32_clr(rtwdev, R_AX_RX_FUNCTION_STOP, B_AX_HDR_RX_STOP);
rtw89_mac_cfg_ppdu_status(rtwdev, RTW89_MAC_0, true);
rtw89_write32(rtwdev, R_AX_ACTION_FWD0, TRXCFG_MPDU_PROC_ACT_FRWD);
diff --git a/drivers/net/wireless/realtek/rtw89/mac.h b/drivers/net/wireless/realtek/rtw89/mac.h
index a580cb719233..d5895516b3ed 100644
--- a/drivers/net/wireless/realtek/rtw89/mac.h
+++ b/drivers/net/wireless/realtek/rtw89/mac.h
@@ -466,6 +466,16 @@ enum rtw89_mac_mrc_status {
RTW89_MAC_MRC_START_SCH_OK = 0,
RTW89_MAC_MRC_STOP_SCH_OK = 1,
RTW89_MAC_MRC_DEL_SCH_OK = 2,
+ RTW89_MAC_MRC_EMPTY_SCH_FAIL = 16,
+ RTW89_MAC_MRC_ROLE_NOT_EXIST_FAIL = 17,
+ RTW89_MAC_MRC_DATA_NOT_FOUND_FAIL = 18,
+ RTW89_MAC_MRC_GET_NEXT_SLOT_FAIL = 19,
+ RTW89_MAC_MRC_ALT_ROLE_FAIL = 20,
+ RTW89_MAC_MRC_ADD_PSTIMER_FAIL = 21,
+ RTW89_MAC_MRC_MALLOC_FAIL = 22,
+ RTW89_MAC_MRC_SWITCH_CH_FAIL = 23,
+ RTW89_MAC_MRC_TXNULL0_FAIL = 24,
+ RTW89_MAC_MRC_PORT_FUNC_EN_FAIL = 25,
};
struct rtw89_mac_ax_coex {
@@ -1446,5 +1456,6 @@ int rtw89_mac_dle_quota_change(struct rtw89_dev *rtwdev, enum rtw89_qta_mode mod
int rtw89_mac_get_dle_rsvd_qt_cfg(struct rtw89_dev *rtwdev,
enum rtw89_mac_dle_rsvd_qt_type type,
struct rtw89_mac_dle_rsvd_qt_cfg *cfg);
+int rtw89_mac_cpu_io_rx(struct rtw89_dev *rtwdev, bool wow_enable);
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/mac80211.c b/drivers/net/wireless/realtek/rtw89/mac80211.c
index 1ec97250e88e..1508693032cb 100644
--- a/drivers/net/wireless/realtek/rtw89/mac80211.c
+++ b/drivers/net/wireless/realtek/rtw89/mac80211.c
@@ -66,7 +66,7 @@ static int rtw89_ops_start(struct ieee80211_hw *hw)
return ret;
}
-static void rtw89_ops_stop(struct ieee80211_hw *hw)
+static void rtw89_ops_stop(struct ieee80211_hw *hw, bool suspend)
{
struct rtw89_dev *rtwdev = hw->priv;
@@ -397,15 +397,14 @@ static void rtw89_conf_tx(struct rtw89_dev *rtwdev,
}
static void rtw89_station_mode_sta_assoc(struct rtw89_dev *rtwdev,
- struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *conf)
+ struct ieee80211_vif *vif)
{
struct ieee80211_sta *sta;
if (vif->type != NL80211_IFTYPE_STATION)
return;
- sta = ieee80211_find_sta(vif, conf->bssid);
+ sta = ieee80211_find_sta(vif, vif->cfg.ap_addr);
if (!sta) {
rtw89_err(rtwdev, "can't find sta to set sta_assoc state\n");
return;
@@ -416,10 +415,8 @@ static void rtw89_station_mode_sta_assoc(struct rtw89_dev *rtwdev,
rtw89_core_sta_assoc(rtwdev, vif, sta);
}
-static void rtw89_ops_bss_info_changed(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *conf,
- u64 changed)
+static void rtw89_ops_vif_cfg_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, u64 changed)
{
struct rtw89_dev *rtwdev = hw->priv;
struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
@@ -429,7 +426,7 @@ static void rtw89_ops_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_ASSOC) {
if (vif->cfg.assoc) {
- rtw89_station_mode_sta_assoc(rtwdev, vif, conf);
+ rtw89_station_mode_sta_assoc(rtwdev, vif);
rtw89_phy_set_bss_color(rtwdev, vif);
rtw89_chip_cfg_txpwr_ul_tb_offset(rtwdev, vif);
rtw89_mac_port_update(rtwdev, rtwvif);
@@ -445,6 +442,26 @@ static void rtw89_ops_bss_info_changed(struct ieee80211_hw *hw,
}
}
+ if (changed & BSS_CHANGED_PS)
+ rtw89_recalc_lps(rtwdev);
+
+ if (changed & BSS_CHANGED_ARP_FILTER)
+ rtwvif->ip_addr = vif->cfg.arp_addr_list[0];
+
+ mutex_unlock(&rtwdev->mutex);
+}
+
+static void rtw89_ops_link_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *conf,
+ u64 changed)
+{
+ struct rtw89_dev *rtwdev = hw->priv;
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+
+ mutex_lock(&rtwdev->mutex);
+ rtw89_leave_ps_mode(rtwdev);
+
if (changed & BSS_CHANGED_BSSID) {
ether_addr_copy(rtwvif->bssid, conf->bssid);
rtw89_cam_bssid_changed(rtwdev, rtwvif);
@@ -470,11 +487,8 @@ static void rtw89_ops_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_CQM)
rtw89_fw_h2c_set_bcn_fltr_cfg(rtwdev, vif, true);
- if (changed & BSS_CHANGED_PS)
- rtw89_recalc_lps(rtwdev);
-
- if (changed & BSS_CHANGED_ARP_FILTER)
- rtwvif->ip_addr = vif->cfg.arp_addr_list[0];
+ if (changed & BSS_CHANGED_TPE)
+ rtw89_reg_6ghz_recalc(rtwdev, rtwvif, true);
mutex_unlock(&rtwdev->mutex);
}
@@ -1143,7 +1157,8 @@ const struct ieee80211_ops rtw89_ops = {
.change_interface = rtw89_ops_change_interface,
.remove_interface = rtw89_ops_remove_interface,
.configure_filter = rtw89_ops_configure_filter,
- .bss_info_changed = rtw89_ops_bss_info_changed,
+ .vif_cfg_changed = rtw89_ops_vif_cfg_changed,
+ .link_info_changed = rtw89_ops_link_info_changed,
.start_ap = rtw89_ops_start_ap,
.stop_ap = rtw89_ops_stop_ap,
.set_tim = rtw89_ops_set_tim,
diff --git a/drivers/net/wireless/realtek/rtw89/mac_be.c b/drivers/net/wireless/realtek/rtw89/mac_be.c
index 934bdf3b398f..f212b67771d5 100644
--- a/drivers/net/wireless/realtek/rtw89/mac_be.c
+++ b/drivers/net/wireless/realtek/rtw89/mac_be.c
@@ -2312,26 +2312,6 @@ static void rtw89_mac_dump_qta_lost_be(struct rtw89_dev *rtwdev)
dump_err_status_dispatcher_be(rtwdev);
}
-static int rtw89_mac_cpu_io_rx(struct rtw89_dev *rtwdev, bool wow_enable)
-{
- struct rtw89_mac_h2c_info h2c_info = {};
- struct rtw89_mac_c2h_info c2h_info = {};
- u32 ret;
-
- h2c_info.id = RTW89_FWCMD_H2CREG_FUNC_WOW_CPUIO_RX_CTRL;
- h2c_info.content_len = sizeof(h2c_info.u.hdr);
- h2c_info.u.hdr.w0 = u32_encode_bits(wow_enable, RTW89_H2CREG_WOW_CPUIO_RX_CTRL_EN);
-
- ret = rtw89_fw_msg_reg(rtwdev, &h2c_info, &c2h_info);
- if (ret)
- return ret;
-
- if (c2h_info.id != RTW89_FWCMD_C2HREG_FUNC_WOW_CPUIO_RX_ACK)
- ret = -EINVAL;
-
- return ret;
-}
-
static int rtw89_wow_config_mac_be(struct rtw89_dev *rtwdev, bool enable_wow)
{
if (enable_wow) {
diff --git a/drivers/net/wireless/realtek/rtw89/pci.c b/drivers/net/wireless/realtek/rtw89/pci.c
index 03bbcf9b6737..02afeb3acce4 100644
--- a/drivers/net/wireless/realtek/rtw89/pci.c
+++ b/drivers/net/wireless/realtek/rtw89/pci.c
@@ -183,14 +183,17 @@ static void rtw89_pci_sync_skb_for_device(struct rtw89_dev *rtwdev,
static void rtw89_pci_rxbd_info_update(struct rtw89_dev *rtwdev,
struct sk_buff *skb)
{
- struct rtw89_pci_rxbd_info *rxbd_info;
struct rtw89_pci_rx_info *rx_info = RTW89_PCI_RX_SKB_CB(skb);
+ struct rtw89_pci_rxbd_info *rxbd_info;
+ __le32 info;
rxbd_info = (struct rtw89_pci_rxbd_info *)skb->data;
- rx_info->fs = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_FS);
- rx_info->ls = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_LS);
- rx_info->len = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_WRITE_SIZE);
- rx_info->tag = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_TAG);
+ info = rxbd_info->dword;
+
+ rx_info->fs = le32_get_bits(info, RTW89_PCI_RXBD_FS);
+ rx_info->ls = le32_get_bits(info, RTW89_PCI_RXBD_LS);
+ rx_info->len = le32_get_bits(info, RTW89_PCI_RXBD_WRITE_SIZE);
+ rx_info->tag = le32_get_bits(info, RTW89_PCI_RXBD_TAG);
}
static int rtw89_pci_validate_rx_tag(struct rtw89_dev *rtwdev,
@@ -1298,10 +1301,12 @@ u32 rtw89_pci_fill_txaddr_info(struct rtw89_dev *rtwdev,
dma_addr_t dma, u8 *add_info_nr)
{
struct rtw89_pci_tx_addr_info_32 *txaddr_info = txaddr_info_addr;
+ __le16 option;
txaddr_info->length = cpu_to_le16(total_len);
- txaddr_info->option = cpu_to_le16(RTW89_PCI_ADDR_MSDU_LS |
- RTW89_PCI_ADDR_NUM(1));
+ option = cpu_to_le16(RTW89_PCI_ADDR_MSDU_LS | RTW89_PCI_ADDR_NUM(1));
+ option |= le16_encode_bits(upper_32_bits(dma), RTW89_PCI_ADDR_HIGH_MASK);
+ txaddr_info->option = option;
txaddr_info->dma = cpu_to_le32(dma);
*add_info_nr = 1;
@@ -1328,6 +1333,8 @@ u32 rtw89_pci_fill_txaddr_info_v1(struct rtw89_dev *rtwdev,
length_option = FIELD_PREP(B_PCIADDR_LEN_V1_MASK, len) |
FIELD_PREP(B_PCIADDR_HIGH_SEL_V1_MASK, 0) |
FIELD_PREP(B_PCIADDR_LS_V1_MASK, remain == 0);
+ length_option |= u16_encode_bits(upper_32_bits(dma),
+ B_PCIADDR_HIGH_SEL_V1_MASK);
txaddr_info->length_opt = cpu_to_le16(length_option);
txaddr_info->dma_low_lsb = cpu_to_le16(FIELD_GET(GENMASK(15, 0), dma));
txaddr_info->dma_low_msb = cpu_to_le16(FIELD_GET(GENMASK(31, 16), dma));
@@ -1418,6 +1425,7 @@ static int rtw89_pci_fwcmd_submit(struct rtw89_dev *rtwdev,
struct sk_buff *skb = tx_req->skb;
struct rtw89_pci_tx_data *tx_data = RTW89_PCI_TX_SKB_CB(skb);
dma_addr_t dma;
+ __le16 opt;
txdesc = skb_push(skb, txdesc_size);
memset(txdesc, 0, txdesc_size);
@@ -1430,7 +1438,9 @@ static int rtw89_pci_fwcmd_submit(struct rtw89_dev *rtwdev,
}
tx_data->dma = dma;
- txbd->option = cpu_to_le16(RTW89_PCI_TXBD_OPTION_LS);
+ opt = cpu_to_le16(RTW89_PCI_TXBD_OPT_LS);
+ opt |= le16_encode_bits(upper_32_bits(dma), RTW89_PCI_TXBD_OPT_DMA_HI);
+ txbd->opt = opt;
txbd->length = cpu_to_le16(skb->len);
txbd->dma = cpu_to_le32(tx_data->dma);
skb_queue_tail(&rtwpci->h2c_queue, skb);
@@ -1446,6 +1456,7 @@ static int rtw89_pci_txbd_submit(struct rtw89_dev *rtwdev,
struct rtw89_core_tx_request *tx_req)
{
struct rtw89_pci_tx_wd *txwd;
+ __le16 opt;
int ret;
/* FWCMD queue doesn't have wd pages. Instead, it submits the CMD
@@ -1470,7 +1481,9 @@ static int rtw89_pci_txbd_submit(struct rtw89_dev *rtwdev,
list_add_tail(&txwd->list, &tx_ring->busy_pages);
- txbd->option = cpu_to_le16(RTW89_PCI_TXBD_OPTION_LS);
+ opt = cpu_to_le16(RTW89_PCI_TXBD_OPT_LS);
+ opt |= le16_encode_bits(upper_32_bits(txwd->paddr), RTW89_PCI_TXBD_OPT_DMA_HI);
+ txbd->opt = opt;
txbd->length = cpu_to_le16(txwd->len);
txbd->dma = cpu_to_le32(txwd->paddr);
@@ -1569,6 +1582,25 @@ const struct rtw89_pci_bd_ram rtw89_bd_ram_table_single[RTW89_TXCH_NUM] = {
};
EXPORT_SYMBOL(rtw89_bd_ram_table_single);
+static void rtw89_pci_init_wp_16sel(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
+ u32 addr = info->wp_sel_addr;
+ u32 val;
+ int i;
+
+ if (!info->wp_sel_addr)
+ return;
+
+ for (i = 0; i < 16; i += 4) {
+ val = u32_encode_bits(i + 0, MASKBYTE0) |
+ u32_encode_bits(i + 1, MASKBYTE1) |
+ u32_encode_bits(i + 2, MASKBYTE2) |
+ u32_encode_bits(i + 3, MASKBYTE3);
+ rtw89_write32(rtwdev, addr + i, val);
+ }
+}
+
static void rtw89_pci_reset_trx_rings(struct rtw89_dev *rtwdev)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
@@ -1607,6 +1639,7 @@ static void rtw89_pci_reset_trx_rings(struct rtw89_dev *rtwdev)
rtw89_write32(rtwdev, addr_bdram, val32);
}
rtw89_write32(rtwdev, addr_desa_l, bd_ring->dma);
+ rtw89_write32(rtwdev, addr_desa_l + 4, upper_32_bits(bd_ring->dma));
}
for (i = 0; i < RTW89_RXCH_NUM; i++) {
@@ -1626,10 +1659,13 @@ static void rtw89_pci_reset_trx_rings(struct rtw89_dev *rtwdev)
rtw89_write16(rtwdev, addr_num, bd_ring->len);
rtw89_write32(rtwdev, addr_desa_l, bd_ring->dma);
+ rtw89_write32(rtwdev, addr_desa_l + 4, upper_32_bits(bd_ring->dma));
if (info->rx_ring_eq_is_full)
rtw89_write16(rtwdev, addr_idx, bd_ring->wp);
}
+
+ rtw89_pci_init_wp_16sel(rtwdev);
}
static void rtw89_pci_release_tx_ring(struct rtw89_dev *rtwdev,
@@ -2039,7 +2075,7 @@ static int rtw89_pci_write_config_byte(struct rtw89_dev *rtwdev, u16 addr,
if (!ret)
return 0;
- if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B)
+ if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev))
ret = rtw89_dbi_write8(rtwdev, addr, data);
return ret;
@@ -2057,7 +2093,7 @@ static int rtw89_pci_read_config_byte(struct rtw89_dev *rtwdev, u16 addr,
if (!ret)
return 0;
- if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B)
+ if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev))
ret = rtw89_dbi_read8(rtwdev, addr, value);
return ret;
@@ -2137,10 +2173,9 @@ __get_target(struct rtw89_dev *rtwdev, u16 *target, enum rtw89_pcie_phy phy_rate
static int rtw89_pci_autok_x(struct rtw89_dev *rtwdev)
{
- enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
int ret;
- if (chip_id != RTL8852B && chip_id != RTL8851B)
+ if (!rtw89_is_rtl885xb(rtwdev))
return 0;
ret = rtw89_write16_mdio_mask(rtwdev, RAC_REG_FLD_0, BAC_AUTOK_N_MASK,
@@ -2150,14 +2185,13 @@ static int rtw89_pci_autok_x(struct rtw89_dev *rtwdev)
static int rtw89_pci_auto_refclk_cal(struct rtw89_dev *rtwdev, bool autook_en)
{
- enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
enum rtw89_pcie_phy phy_rate;
u16 val16, mgn_set, div_set, tar;
u8 val8, bdr_ori;
bool l1_flag = false;
int ret = 0;
- if (chip_id != RTL8852B && chip_id != RTL8851B)
+ if (!rtw89_is_rtl885xb(rtwdev))
return 0;
ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_PHY_RATE, &val8);
@@ -2330,21 +2364,20 @@ static void rtw89_pci_disable_eq(struct rtw89_dev *rtwdev)
u32 backup_aspm;
u32 phy_offset;
u16 oobs_val;
- u16 val16;
int ret;
if (rtwdev->chip->chip_id != RTL8852C)
return;
- backup_aspm = rtw89_read32(rtwdev, R_AX_PCIE_MIX_CFG_V1);
- rtw89_write32_clr(rtwdev, R_AX_PCIE_MIX_CFG_V1, B_AX_ASPM_CTRL_MASK);
-
g1_oobs = rtw89_read16_mask(rtwdev, R_RAC_DIRECT_OFFSET_G1 +
RAC_ANA09 * RAC_MULT, BAC_OOBS_SEL);
g2_oobs = rtw89_read16_mask(rtwdev, R_RAC_DIRECT_OFFSET_G2 +
RAC_ANA09 * RAC_MULT, BAC_OOBS_SEL);
if (g1_oobs && g2_oobs)
- goto out;
+ return;
+
+ backup_aspm = rtw89_read32(rtwdev, R_AX_PCIE_MIX_CFG_V1);
+ rtw89_write32_clr(rtwdev, R_AX_PCIE_MIX_CFG_V1, B_AX_ASPM_CTRL_MASK);
ret = rtw89_pci_get_phy_offset_by_link_speed(rtwdev, &phy_offset);
if (ret)
@@ -2354,15 +2387,16 @@ static void rtw89_pci_disable_eq(struct rtw89_dev *rtwdev)
rtw89_write16(rtwdev, phy_offset + RAC_ANA10 * RAC_MULT, ADDR_SEL_PINOUT_DIS_VAL);
rtw89_write16_set(rtwdev, phy_offset + RAC_ANA19 * RAC_MULT, B_PCIE_BIT_RD_SEL);
- val16 = rtw89_read16_mask(rtwdev, phy_offset + RAC_ANA1F * RAC_MULT,
- OOBS_LEVEL_MASK);
- oobs_val = u16_encode_bits(val16, OOBS_SEN_MASK);
+ oobs_val = rtw89_read16_mask(rtwdev, phy_offset + RAC_ANA1F * RAC_MULT,
+ OOBS_LEVEL_MASK);
- rtw89_write16(rtwdev, R_RAC_DIRECT_OFFSET_G1 + RAC_ANA03 * RAC_MULT, oobs_val);
+ rtw89_write16_mask(rtwdev, R_RAC_DIRECT_OFFSET_G1 + RAC_ANA03 * RAC_MULT,
+ OOBS_SEN_MASK, oobs_val);
rtw89_write16_set(rtwdev, R_RAC_DIRECT_OFFSET_G1 + RAC_ANA09 * RAC_MULT,
BAC_OOBS_SEL);
- rtw89_write16(rtwdev, R_RAC_DIRECT_OFFSET_G2 + RAC_ANA03 * RAC_MULT, oobs_val);
+ rtw89_write16_mask(rtwdev, R_RAC_DIRECT_OFFSET_G2 + RAC_ANA03 * RAC_MULT,
+ OOBS_SEN_MASK, oobs_val);
rtw89_write16_set(rtwdev, R_RAC_DIRECT_OFFSET_G2 + RAC_ANA09 * RAC_MULT,
BAC_OOBS_SEL);
@@ -2398,7 +2432,7 @@ static void rtw89_pci_l1off_pwroff(struct rtw89_dev *rtwdev)
{
enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
- if (chip_id != RTL8852A && chip_id != RTL8852B && chip_id != RTL8851B)
+ if (chip_id != RTL8852A && !rtw89_is_rtl885xb(rtwdev))
return;
rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL, B_AX_L1OFF_PWR_OFF_EN);
@@ -2428,7 +2462,7 @@ static void rtw89_pci_aphy_pwrcut(struct rtw89_dev *rtwdev)
{
enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
- if (chip_id != RTL8852A && chip_id != RTL8852B && chip_id != RTL8851B)
+ if (chip_id != RTL8852A && !rtw89_is_rtl885xb(rtwdev))
return;
rtw89_write32_clr(rtwdev, R_AX_SYS_PW_CTRL, B_AX_PSUS_OFF_CAPC_EN);
@@ -2438,7 +2472,7 @@ static void rtw89_pci_hci_ldo(struct rtw89_dev *rtwdev)
{
enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
- if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B) {
+ if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) {
rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL,
B_AX_PCIE_DIS_L2_CTRL_LDO_HCI);
rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL,
@@ -2451,9 +2485,7 @@ static void rtw89_pci_hci_ldo(struct rtw89_dev *rtwdev)
static int rtw89_pci_dphy_delay(struct rtw89_dev *rtwdev)
{
- enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
-
- if (chip_id != RTL8852B && chip_id != RTL8851B)
+ if (!rtw89_is_rtl885xb(rtwdev))
return 0;
return rtw89_write16_mdio_mask(rtwdev, RAC_REG_REV2, BAC_CMU_EN_DLY_MASK,
@@ -2715,7 +2747,7 @@ static int rtw89_pci_mode_op(struct rtw89_dev *rtwdev)
B_AX_PCIE_RX_APPLEN_MASK, 0);
}
- if (chip_id == RTL8852A || chip_id == RTL8852B) {
+ if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) {
rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_PCIE_MAX_TXDMA_MASK, tx_burst);
rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_PCIE_MAX_RXDMA_MASK, rx_burst);
} else if (chip_id == RTL8852C) {
@@ -2723,7 +2755,7 @@ static int rtw89_pci_mode_op(struct rtw89_dev *rtwdev)
rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_HAXI_MAX_RXDMA_MASK, rx_burst);
}
- if (chip_id == RTL8852A || chip_id == RTL8852B) {
+ if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) {
if (tag_mode == MAC_AX_TAG_SGL) {
val32 = rtw89_read32(rtwdev, R_AX_PCIE_INIT_CFG1) &
~B_AX_LATENCY_CONTROL;
@@ -2738,7 +2770,7 @@ static int rtw89_pci_mode_op(struct rtw89_dev *rtwdev)
rtw89_write32_mask(rtwdev, info->exp_ctrl_reg, info->max_tag_num_mask,
info->multi_tag_num);
- if (chip_id == RTL8852A || chip_id == RTL8852B) {
+ if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) {
rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, B_AX_WD_ITVL_IDLE,
wd_dma_idle_intvl);
rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, B_AX_WD_ITVL_ACT,
@@ -2783,7 +2815,6 @@ static int rtw89_pci_ops_mac_pre_init_ax(struct rtw89_dev *rtwdev)
const struct rtw89_pci_info *info = rtwdev->pci_info;
int ret;
- rtw89_pci_disable_eq(rtwdev);
rtw89_pci_ber(rtwdev);
rtw89_pci_rxdma_prefth(rtwdev);
rtw89_pci_l1off_pwroff(rtwdev);
@@ -2952,7 +2983,7 @@ static int rtw89_pci_ops_mac_post_init_ax(struct rtw89_dev *rtwdev)
/* ltr sw trigger */
rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_APP_LTR_ACT);
}
- if (chip_id == RTL8852A || chip_id == RTL8852B) {
+ if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) {
/* ADDR info 8-byte mode */
rtw89_write32_set(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING,
B_AX_HOST_ADDR_INFO_8B_SEL);
@@ -2995,6 +3026,27 @@ static void rtw89_pci_declaim_device(struct rtw89_dev *rtwdev,
pci_disable_device(pdev);
}
+static void rtw89_pci_cfg_dac(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ if (!rtwpci->enable_dac)
+ return;
+
+ switch (chip->chip_id) {
+ case RTL8852A:
+ case RTL8852B:
+ case RTL8851B:
+ case RTL8852BT:
+ break;
+ default:
+ return;
+ }
+
+ rtw89_pci_config_byte_set(rtwdev, RTW89_PCIE_L1_CTRL, RTW89_PCIE_BIT_EN_64BITS);
+}
+
static int rtw89_pci_setup_mapping(struct rtw89_dev *rtwdev,
struct pci_dev *pdev)
{
@@ -3009,16 +3061,17 @@ static int rtw89_pci_setup_mapping(struct rtw89_dev *rtwdev,
goto err;
}
- ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
- if (ret) {
- rtw89_err(rtwdev, "failed to set dma mask to 32-bit\n");
- goto err_release_regions;
- }
-
- ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
- if (ret) {
- rtw89_err(rtwdev, "failed to set consistent dma mask to 32-bit\n");
- goto err_release_regions;
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(36));
+ if (!ret) {
+ rtwpci->enable_dac = true;
+ rtw89_pci_cfg_dac(rtwdev);
+ } else {
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ rtw89_err(rtwdev,
+ "failed to set dma and consistent mask to 32/36-bit\n");
+ goto err_release_regions;
+ }
}
resource_len = pci_resource_len(pdev, bar_id);
@@ -3169,6 +3222,7 @@ static int rtw89_pci_init_rx_bd(struct rtw89_dev *rtwdev, struct pci_dev *pdev,
memset(rx_bd, 0, sizeof(*rx_bd));
rx_bd->buf_size = cpu_to_le16(buf_sz);
rx_bd->dma = cpu_to_le32(dma);
+ rx_bd->opt = le16_encode_bits(upper_32_bits(dma), RTW89_PCI_RXBD_OPT_DMA_HI);
rx_info->dma = dma;
return 0;
@@ -3761,7 +3815,7 @@ static void rtw89_pci_clkreq_set_ax(struct rtw89_dev *rtwdev, bool enable)
if (ret)
rtw89_err(rtwdev, "failed to set CLKREQ Delay\n");
- if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B) {
+ if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) {
if (enable)
ret = rtw89_pci_config_byte_set(rtwdev,
RTW89_PCIE_L1_CTRL,
@@ -3813,7 +3867,7 @@ static void rtw89_pci_aspm_set_ax(struct rtw89_dev *rtwdev, bool enable)
if (ret)
rtw89_warn(rtwdev, "failed to read ASPM Delay\n");
- if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B) {
+ if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) {
if (enable)
ret = rtw89_pci_config_byte_set(rtwdev,
RTW89_PCIE_L1_CTRL,
@@ -3912,7 +3966,7 @@ static void rtw89_pci_l1ss_set_ax(struct rtw89_dev *rtwdev, bool enable)
enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
int ret;
- if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B) {
+ if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) {
if (enable)
ret = rtw89_pci_config_byte_set(rtwdev,
RTW89_PCIE_TIMER_CTRL,
@@ -4109,7 +4163,7 @@ static int __maybe_unused rtw89_pci_suspend(struct device *dev)
rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6);
rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_R_DIS_PRST);
rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6);
- if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B) {
+ if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) {
rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL,
B_AX_PCIE_DIS_L2_CTRL_LDO_HCI);
rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1,
@@ -4143,7 +4197,7 @@ static int __maybe_unused rtw89_pci_resume(struct device *dev)
rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6);
rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_R_DIS_PRST);
rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6);
- if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B) {
+ if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) {
rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL,
B_AX_PCIE_DIS_L2_CTRL_LDO_HCI);
rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1,
@@ -4155,6 +4209,8 @@ static int __maybe_unused rtw89_pci_resume(struct device *dev)
B_AX_SEL_REQ_ENTR_L1);
}
rtw89_pci_l2_hci_ldo(rtwdev);
+ rtw89_pci_disable_eq(rtwdev);
+ rtw89_pci_cfg_dac(rtwdev);
rtw89_pci_filter_out(rtwdev);
rtw89_pci_link_cfg(rtwdev);
rtw89_pci_l1ss_cfg(rtwdev);
@@ -4289,11 +4345,16 @@ int rtw89_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_clear_resource;
}
+ rtw89_pci_disable_eq(rtwdev);
rtw89_pci_filter_out(rtwdev);
rtw89_pci_link_cfg(rtwdev);
rtw89_pci_l1ss_cfg(rtwdev);
- rtw89_core_napi_init(rtwdev);
+ ret = rtw89_core_napi_init(rtwdev);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to init napi\n");
+ goto err_clear_resource;
+ }
ret = rtw89_pci_request_irq(rtwdev, pdev);
if (ret) {
diff --git a/drivers/net/wireless/realtek/rtw89/pci.h b/drivers/net/wireless/realtek/rtw89/pci.h
index 7666753ae983..48c3ab735db2 100644
--- a/drivers/net/wireless/realtek/rtw89/pci.h
+++ b/drivers/net/wireless/realtek/rtw89/pci.h
@@ -724,6 +724,11 @@
#define B_AX_CH11_BUSY BIT(1)
#define B_AX_CH10_BUSY BIT(0)
+#define R_AX_WP_ADDR_H_SEL0_3 0x1334
+#define R_AX_WP_ADDR_H_SEL4_7 0x1338
+#define R_AX_WP_ADDR_H_SEL8_11 0x133C
+#define R_AX_WP_ADDR_H_SEL12_15 0x1340
+
#define R_BE_HAXI_DMA_STOP1 0xB010
#define B_BE_STOP_WPDMA BIT(31)
#define B_BE_STOP_CH14 BIT(14)
@@ -823,6 +828,11 @@
#define R_BE_RPQ0_RXBD_DESA_L_V1 0xB308
#define R_BE_RPQ0_RXBD_DESA_H_V1 0xB30C
+#define R_BE_WP_ADDR_H_SEL0_3_V1 0xB420
+#define R_BE_WP_ADDR_H_SEL4_7_V1 0xB424
+#define R_BE_WP_ADDR_H_SEL8_11_V1 0xB428
+#define R_BE_WP_ADDR_H_SEL12_15_V1 0xB42C
+
/* Configure */
#define R_AX_PCIE_INIT_CFG2 0x1004
#define B_AX_WD_ITVL_IDLE GENMASK(27, 24)
@@ -1055,6 +1065,7 @@
#define RTW89_PCIE_TIMER_CTRL 0x0718
#define RTW89_PCIE_BIT_L1SUB BIT(5)
#define RTW89_PCIE_L1_CTRL 0x0719
+#define RTW89_PCIE_BIT_EN_64BITS BIT(5)
#define RTW89_PCIE_BIT_CLK BIT(4)
#define RTW89_PCIE_BIT_L1 BIT(3)
#define RTW89_PCIE_CLK_CTRL 0x0725
@@ -1304,6 +1315,7 @@ struct rtw89_pci_info {
u32 rpwm_addr;
u32 cpwm_addr;
u32 mit_addr;
+ u32 wp_sel_addr;
u32 tx_dma_ch_mask;
const struct rtw89_pci_bd_idx_addr *bd_idx_addr_low_power;
const struct rtw89_pci_ch_dma_addr_set *dma_addr_set;
@@ -1330,11 +1342,11 @@ struct rtw89_pci_rx_info {
u32 fs:1, ls:1, tag:13, len:14;
};
-#define RTW89_PCI_TXBD_OPTION_LS BIT(14)
-
struct rtw89_pci_tx_bd_32 {
__le16 length;
- __le16 option;
+ __le16 opt;
+#define RTW89_PCI_TXBD_OPT_LS BIT(14)
+#define RTW89_PCI_TXBD_OPT_DMA_HI GENMASK(13, 6)
__le32 dma;
} __packed;
@@ -1349,7 +1361,7 @@ struct rtw89_pci_tx_wp_info {
#define RTW89_PCI_ADDR_MSDU_LS BIT(15)
#define RTW89_PCI_ADDR_LS BIT(14)
-#define RTW89_PCI_ADDR_HIGH(a) (((a) << 6) & GENMASK(13, 6))
+#define RTW89_PCI_ADDR_HIGH_MASK GENMASK(13, 6)
#define RTW89_PCI_ADDR_NUM(x) ((x) & GENMASK(5, 0))
struct rtw89_pci_tx_addr_info_32 {
@@ -1386,7 +1398,8 @@ struct rtw89_pci_rpp_fmt {
struct rtw89_pci_rx_bd_32 {
__le16 buf_size;
- __le16 rsvd;
+ __le16 opt;
+#define RTW89_PCI_RXBD_OPT_DMA_HI GENMASK(13, 6)
__le32 dma;
} __packed;
@@ -1475,6 +1488,7 @@ struct rtw89_pci {
bool running;
bool low_power;
bool under_recovery;
+ bool enable_dac;
struct rtw89_pci_tx_ring tx_rings[RTW89_TXCH_NUM];
struct rtw89_pci_rx_ring rx_rings[RTW89_RXCH_NUM];
struct sk_buff_head h2c_queue;
diff --git a/drivers/net/wireless/realtek/rtw89/phy.c b/drivers/net/wireless/realtek/rtw89/phy.c
index a82b4c56a6f4..ad11d1414874 100644
--- a/drivers/net/wireless/realtek/rtw89/phy.c
+++ b/drivers/net/wireless/realtek/rtw89/phy.c
@@ -2,6 +2,7 @@
/* Copyright(c) 2019-2020 Realtek Corporation
*/
+#include "chan.h"
#include "coex.h"
#include "debug.h"
#include "fw.h"
@@ -1676,7 +1677,7 @@ static void rtw89_phy_preinit_rf_nctl_ax(struct rtw89_dev *rtwdev)
rtw89_phy_write32_set(rtwdev, R_P0_PATH_RST, 0x8000000);
if (chip->chip_id != RTL8851B)
rtw89_phy_write32_set(rtwdev, R_P1_PATH_RST, 0x8000000);
- if (chip->chip_id == RTL8852B)
+ if (chip->chip_id == RTL8852B || chip->chip_id == RTL8852BT)
rtw89_phy_write32_set(rtwdev, R_IOQ_IQK_DPK, 0x2);
/* check 0x8080 */
@@ -1847,6 +1848,36 @@ static s8 rtw89_phy_txpwr_rf_to_mac(struct rtw89_dev *rtwdev, s8 txpwr_rf)
return txpwr_rf >> (chip->txpwr_factor_rf - chip->txpwr_factor_mac);
}
+static s8 rtw89_phy_txpwr_dbm_to_mac(struct rtw89_dev *rtwdev, s8 dbm)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ return clamp_t(s16, dbm << chip->txpwr_factor_mac, -64, 63);
+}
+
+static s8 rtw89_phy_txpwr_dbm_without_tolerance(s8 dbm)
+{
+ const u8 tssi_deviation_point = 0;
+ const u8 tssi_max_deviation = 2;
+
+ if (dbm <= tssi_deviation_point)
+ dbm -= tssi_max_deviation;
+
+ return dbm;
+}
+
+static s8 rtw89_phy_get_tpe_constraint(struct rtw89_dev *rtwdev, u8 band)
+{
+ struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory;
+ const struct rtw89_reg_6ghz_tpe *tpe = &regulatory->reg_6ghz_tpe;
+ s8 cstr = S8_MAX;
+
+ if (band == RTW89_BAND_6G && tpe->valid)
+ cstr = rtw89_phy_txpwr_dbm_without_tolerance(tpe->constraint);
+
+ return rtw89_phy_txpwr_dbm_to_mac(rtwdev, cstr);
+}
+
s8 rtw89_phy_read_txpwr_byrate(struct rtw89_dev *rtwdev, u8 band, u8 bw,
const struct rtw89_rate_desc *rate_desc)
{
@@ -1921,6 +1952,7 @@ s8 rtw89_phy_read_txpwr_limit(struct rtw89_dev *rtwdev, u8 band,
u8 regd = rtw89_regd_get(rtwdev, band);
u8 reg6 = regulatory->reg_6ghz_power;
s8 lmt = 0, sar;
+ s8 cstr;
switch (band) {
case RTW89_BAND_2G:
@@ -1953,8 +1985,9 @@ s8 rtw89_phy_read_txpwr_limit(struct rtw89_dev *rtwdev, u8 band,
lmt = rtw89_phy_txpwr_rf_to_mac(rtwdev, lmt);
sar = rtw89_query_sar(rtwdev, freq);
+ cstr = rtw89_phy_get_tpe_constraint(rtwdev, band);
- return min(lmt, sar);
+ return min3(lmt, sar, cstr);
}
EXPORT_SYMBOL(rtw89_phy_read_txpwr_limit);
@@ -2178,6 +2211,7 @@ s8 rtw89_phy_read_txpwr_limit_ru(struct rtw89_dev *rtwdev, u8 band,
u8 regd = rtw89_regd_get(rtwdev, band);
u8 reg6 = regulatory->reg_6ghz_power;
s8 lmt_ru = 0, sar;
+ s8 cstr;
switch (band) {
case RTW89_BAND_2G:
@@ -2210,8 +2244,9 @@ s8 rtw89_phy_read_txpwr_limit_ru(struct rtw89_dev *rtwdev, u8 band,
lmt_ru = rtw89_phy_txpwr_rf_to_mac(rtwdev, lmt_ru);
sar = rtw89_query_sar(rtwdev, freq);
+ cstr = rtw89_phy_get_tpe_constraint(rtwdev, band);
- return min(lmt_ru, sar);
+ return min3(lmt_ru, sar, cstr);
}
static void
@@ -5969,6 +6004,74 @@ void rtw89_phy_set_bss_color(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif
vif->cfg.aid, phy_idx);
}
+static bool rfk_chan_validate_desc(const struct rtw89_rfk_chan_desc *desc)
+{
+ return desc->ch != 0;
+}
+
+static bool rfk_chan_is_equivalent(const struct rtw89_rfk_chan_desc *desc,
+ const struct rtw89_chan *chan)
+{
+ if (!rfk_chan_validate_desc(desc))
+ return false;
+
+ if (desc->ch != chan->channel)
+ return false;
+
+ if (desc->has_band && desc->band != chan->band_type)
+ return false;
+
+ if (desc->has_bw && desc->bw != chan->band_width)
+ return false;
+
+ return true;
+}
+
+struct rfk_chan_iter_data {
+ const struct rtw89_rfk_chan_desc desc;
+ unsigned int found;
+};
+
+static int rfk_chan_iter_search(const struct rtw89_chan *chan, void *data)
+{
+ struct rfk_chan_iter_data *iter_data = data;
+
+ if (rfk_chan_is_equivalent(&iter_data->desc, chan))
+ iter_data->found++;
+
+ return 0;
+}
+
+u8 rtw89_rfk_chan_lookup(struct rtw89_dev *rtwdev,
+ const struct rtw89_rfk_chan_desc *desc, u8 desc_nr,
+ const struct rtw89_chan *target_chan)
+{
+ int sel = -1;
+ u8 i;
+
+ for (i = 0; i < desc_nr; i++) {
+ struct rfk_chan_iter_data iter_data = {
+ .desc = desc[i],
+ };
+
+ if (rfk_chan_is_equivalent(&desc[i], target_chan))
+ return i;
+
+ rtw89_iterate_entity_chan(rtwdev, rfk_chan_iter_search, &iter_data);
+ if (!iter_data.found && sel == -1)
+ sel = i;
+ }
+
+ if (sel == -1) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "no idle rfk entry; force replace the first\n");
+ sel = 0;
+ }
+
+ return sel;
+}
+EXPORT_SYMBOL(rtw89_rfk_chan_lookup);
+
static void
_rfk_write_rf(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def)
{
diff --git a/drivers/net/wireless/realtek/rtw89/phy.h b/drivers/net/wireless/realtek/rtw89/phy.h
index 082231ebbee5..d8df553b9cb0 100644
--- a/drivers/net/wireless/realtek/rtw89/phy.h
+++ b/drivers/net/wireless/realtek/rtw89/phy.h
@@ -129,6 +129,7 @@
#define EDCCA_HL_DIFF_NORMAL 8
#define RSSI_UNIT_CONVER 110
#define EDCCA_UNIT_CONVER 128
+#define EDCCA_PWROFST_DEFAULT 18
enum rtw89_phy_c2h_ra_func {
RTW89_PHY_C2H_FUNC_STS_RPT,
@@ -714,6 +715,19 @@ enum rtw89_phy_gain_band_be rtw89_subband_to_gain_band_be(enum rtw89_subband sub
}
}
+struct rtw89_rfk_chan_desc {
+ /* desc is valid iff ch is non-zero */
+ u8 ch;
+
+ /* To avoid us from extending old chip code every time, each new
+ * field must be defined along with a bool flag in positivte way.
+ */
+ bool has_band;
+ u8 band;
+ bool has_bw;
+ u8 bw;
+};
+
enum rtw89_rfk_flag {
RTW89_RFK_F_WRF = 0,
RTW89_RFK_F_WM = 1,
@@ -949,5 +963,8 @@ enum rtw89_rf_path_bit rtw89_phy_get_kpath(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx);
enum rtw89_rf_path rtw89_phy_get_syn_sel(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx);
+u8 rtw89_rfk_chan_lookup(struct rtw89_dev *rtwdev,
+ const struct rtw89_rfk_chan_desc *desc, u8 desc_nr,
+ const struct rtw89_chan *target_chan);
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/reg.h b/drivers/net/wireless/realtek/rtw89/reg.h
index 01cbd0312102..7df36f3bff0b 100644
--- a/drivers/net/wireless/realtek/rtw89/reg.h
+++ b/drivers/net/wireless/realtek/rtw89/reg.h
@@ -311,6 +311,9 @@
#define B_AX_S1_LDO2PWRCUT_F BIT(23)
#define B_AX_S0_LDO_VSEL_F_MASK GENMASK(22, 21)
+#define R_AX_DBG_WOW 0x0504
+#define B_AX_DBG_WOW_CPU_IO_RX_EN BIT(8)
+
#define R_AX_SEC_CTRL 0x0C00
#define B_AX_SEC_IDMEM_SIZE_CONFIG_MASK GENMASK(17, 16)
@@ -3169,6 +3172,8 @@
#define R_AX_DLK_PROTECT_CTL_C1 0xEE02
#define B_AX_RX_DLK_CCA_TIME_MASK GENMASK(15, 8)
#define B_AX_RX_DLK_DATA_TIME_MASK GENMASK(7, 4)
+#define B_AX_RX_DLK_RST_EN BIT(1)
+#define B_AX_RX_DLK_INT_EN BIT(0)
#define R_AX_PLCP_HDR_FLTR 0xCE04
#define R_AX_PLCP_HDR_FLTR_C1 0xEE04
@@ -4313,6 +4318,8 @@
#define R_BE_WLCPU_PORT_PC 0x03FC
+#define R_BE_DBG_WOW 0x0504
+
#define R_BE_DCPU_PLATFORM_ENABLE 0x0888
#define B_BE_DCPU_SYM_DPLT_MEM_MUX_EN BIT(10)
#define B_BE_DCPU_WARM_EN BIT(9)
@@ -7811,6 +7818,8 @@
#define B_UPD_P0_EN BIT(31)
#define R_EMLSR 0x0044
#define B_EMLSR_PARM GENMASK(27, 12)
+#define R_CHK_LPS_STAT 0x0058
+#define B_CHK_LPS_STAT BIT(0)
#define R_SPOOF_CG 0x00B4
#define B_SPOOF_CG_EN BIT(17)
#define R_CHINFO_SEG 0x00B4
@@ -7827,6 +7836,7 @@
#define B_ANAPAR_PW15_H2 GENMASK(27, 26)
#define R_ANAPAR 0x032C
#define B_ANAPAR_15 GENMASK(31, 16)
+#define B_ANAPAR_EN1 BIT(31)
#define B_ANAPAR_ADCCLK BIT(30)
#define B_ANAPAR_FLTRST BIT(22)
#define B_ANAPAR_CRXBB GENMASK(18, 16)
@@ -7868,10 +7878,12 @@
#define R_RXCCA_BE1 0x0520
#define B_RXCCA_BE1_DIS BIT(0)
#define R_UPD_CLK_ADC 0x0700
+#define B_UPD_GEN_ON BIT(27)
#define B_UPD_CLK_ADC_VAL GENMASK(26, 25)
#define B_UPD_CLK_ADC_ON BIT(24)
#define B_ENABLE_CCK BIT(5)
#define R_RSTB_ASYNC 0x0704
+#define B_RSTB_ASYNC_BW80 GENMASK(9, 8)
#define B_RSTB_ASYNC_ALL BIT(1)
#define R_P0_ANT_SW 0x0728
#define B_P0_HW_ANTSW_DIS_BY_GNT_BT BIT(12)
@@ -7935,6 +7947,8 @@
#define B_MEASUREMENT_TRIG_MSK BIT(2)
#define B_CCX_TRIG_OPT_MSK BIT(1)
#define B_CCX_EN_MSK BIT(0)
+#define R_FAHM 0x0C1C
+#define B_RXTD_CKEN BIT(2)
#define R_IFS_COUNTER 0x0C28
#define B_IFS_CLM_PERIOD_MSK GENMASK(31, 16)
#define B_IFS_CLM_COUNTER_UNIT_MSK GENMASK(15, 14)
@@ -7968,6 +7982,7 @@
#define B_IQK_DPK_RST BIT(0)
#define R_TX_COLLISION_T2R_ST 0x0C70
#define B_TX_COLLISION_T2R_ST_M GENMASK(25, 20)
+#define B_TXRX_FORCE_VAL GENMASK(9, 0)
#define R_TXGATING 0x0C74
#define B_TXGATING_EN BIT(4)
#define R_TXRFC 0x0C7C
@@ -8028,6 +8043,7 @@
#define B_P0_RFMODE_FTM_RX GENMASK(11, 0)
#define R_P0_NRBW 0x12B8
#define B_P0_NRBW_DBG BIT(30)
+#define B_P0_NRBW_RSTB BIT(28)
#define R_S0_RXDC 0x12D4
#define B_S0_RXDC_I GENMASK(25, 16)
#define B_S0_RXDC_Q GENMASK(31, 26)
@@ -8109,6 +8125,8 @@
#define R_S0_ADDCK 0x1E00
#define B_S0_ADDCK_I GENMASK(9, 0)
#define B_S0_ADDCK_Q GENMASK(19, 10)
+#define R_TXCKEN_FORCE 0x2008
+#define B_TXCKEN_FORCE_ALL GENMASK(24, 0)
#define R_EDCCA_RPT_SEL 0x20CC
#define B_EDCCA_RPT_SEL_MSK GENMASK(2, 0)
#define R_ADC_FIFO 0x20fc
@@ -8264,6 +8282,7 @@
#define R_DCFO_COMP_S0 0x448C
#define B_DCFO_COMP_S0_MSK GENMASK(11, 0)
#define R_DCFO_WEIGHT 0x4490
+#define B_DAC_CLK_IDX BIT(31)
#define B_DCFO_WEIGHT_MSK GENMASK(27, 24)
#define R_DCFO_OPT 0x4494
#define B_DCFO_OPT_EN BIT(29)
@@ -8379,6 +8398,7 @@
#define B_CDD_EVM_CHK_EN BIT(0)
#define R_PATH0_BAND_SEL_V1 0x4738
#define B_PATH0_BAND_SEL_MSK_V1 BIT(17)
+#define B_PATH0_BAND_NRBW_EN_V1 BIT(16)
#define R_PATH0_BT_SHARE_V1 0x4738
#define B_PATH0_BT_SHARE_V1 BIT(19)
#define R_PATH0_BTG_PATH_V1 0x4738
@@ -8422,6 +8442,7 @@
#define B_PATH1_G_TIA1_LNA6_OP1DB_V1 GENMASK(15, 8)
#define R_PATH1_BAND_SEL_V1 0x4AA4
#define B_PATH1_BAND_SEL_MSK_V1 BIT(17)
+#define B_PATH1_BAND_NRBW_EN_V1 BIT(16)
#define R_PATH1_BT_SHARE_V1 0x4AA4
#define B_PATH1_BT_SHARE_V1 BIT(19)
#define R_PATH1_BTG_PATH_V1 0x4AA4
@@ -8442,6 +8463,8 @@
#define B_SEG0R_PD_SPATIAL_REUSE_EN_MSK_V1 BIT(30)
#define B_SEG0R_PD_SPATIAL_REUSE_EN_MSK BIT(29)
#define B_SEG0R_PD_LOWER_BOUND_MSK GENMASK(10, 6)
+#define R_PWOFST 0x488C
+#define B_PWOFST GENMASK(21, 17)
#define R_2P4G_BAND 0x4970
#define B_2P4G_BAND_SEL BIT(1)
#define R_FC0_BW 0x4974
@@ -8622,6 +8645,8 @@
#define B_P0_TMETER GENMASK(15, 10)
#define B_P0_TMETER_DIS BIT(16)
#define B_P0_TMETER_TRK BIT(24)
+#define R_P0_ADCFF_EN 0x58C8
+#define B_P0_ADCFF_EN BIT(24)
#define R_P1_TSSIC 0x7814
#define B_P1_TSSIC_BYPASS BIT(11)
#define R_P0_TSSI_TRK 0x5818
@@ -8633,7 +8658,9 @@
#define B_P0_TSSI_EN BIT(31)
#define B_P0_TSSI_AVG GENMASK(15, 12)
#define R_P0_RFCTM 0x5864
+#define B_P0_CLKG_FORCE GENMASK(31, 30)
#define B_P0_RFCTM_EN BIT(29)
+#define B_P0_GOT_TXRX GENMASK(28, 27)
#define B_P0_RFCTM_VAL GENMASK(25, 20)
#define R_P0_RFCTM_RDY BIT(26)
#define R_P0_TRSW 0x5868
@@ -8666,12 +8693,14 @@
#define B_P0_RFM_BT_EN BIT(5)
#define B_P0_RFM_OUT GENMASK(4, 0)
#define R_P0_PATH_RST 0x58AC
+#define B_P0_PATH_RST BIT(27)
#define R_P0_TXDPD 0x58D4
#define B_P0_TXDPD GENMASK(31, 28)
#define R_P0_TXPW_RSTB 0x58DC
#define B_P0_TXPW_RSTB_MANON BIT(30)
#define B_P0_TXPW_RSTB_TSSI BIT(31)
#define R_P0_TSSI_MV_AVG 0x58E4
+#define B_P0_TXPW_RSTB GENMASK(28, 27)
#define B_P0_TSSI_MV_MIX GENMASK(19, 11)
#define B_P0_TSSI_MV_AVG GENMASK(13, 11)
#define B_P0_TSSI_MV_CLR BIT(14)
@@ -8796,6 +8825,10 @@
#define B_P1_TSSI_ALIM2 GENMASK(29, 0)
#define R_P1_TSSI_ADC_CLK 0x766c
#define B_P1_TSSI_ADC_CLK GENMASK(17, 16)
+#define R_P1_TXAGC_TH 0x7800
+#define B_P1_TXAGC_MAXMIN GENMASK(15, 0)
+#define R_P1_TXPW_FORCE 0x780C
+#define B_P1_TXPW_RDY BIT(15)
#define R_P1_TSSIC 0x7814
#define B_P1_TSSIC_BYPASS BIT(11)
#define R_P1_TMETER 0x7810
@@ -8811,14 +8844,20 @@
#define B_P1_TSSI_EN BIT(31)
#define B_P1_TSSI_AVG GENMASK(15, 12)
#define R_P1_RFCTM 0x7864
+#define B_P1_CLKG_FORCE GENMASK(31, 30)
+#define B_P1_GOT_TXRX GENMASK(28, 27)
#define R_P1_RFCTM_RDY BIT(26)
#define B_P1_RFCTM_VAL GENMASK(25, 20)
#define B_P1_RFCTM_DEL GENMASK(19, 11)
#define R_P1_PATH_RST 0x78AC
+#define B_P1_PATH_RST BIT(27)
+#define R_P1_ADCFF_EN 0x78C8
+#define B_P1_ADCFF_EN BIT(24)
#define R_P1_TXPW_RSTB 0x78DC
#define B_P1_TXPW_RSTB_MANON BIT(30)
#define B_P1_TXPW_RSTB_TSSI BIT(31)
#define R_P1_TSSI_MV_AVG 0x78E4
+#define B_P1_TXPW_RSTB GENMASK(28, 27)
#define B_P1_TSSI_MV_MIX GENMASK(19, 11)
#define B_P1_TSSI_MV_AVG GENMASK(13, 11)
#define B_P1_TSSI_MV_CLR BIT(14)
@@ -9003,6 +9042,7 @@
#define R_IQRSN 0x8220
#define B_IQRSN_K1 BIT(28)
#define B_IQRSN_K2 BIT(16)
+#define R_DPD_CH0B 0x82BC
#define R_RXCFIR_P0C0 0x8D40
#define R_RXCFIR_P0C1 0x8D84
#define R_RXCFIR_P0C2 0x8DC8
@@ -9036,15 +9076,18 @@
#define B_IQKINF2_FCNT GENMASK(23, 16)
#define B_IQKINF2_KCNT GENMASK(15, 8)
#define B_IQKINF2_NCTLV GENMASK(7, 0)
+#define R_RFK_ST 0xBFF8
#define R_DCOF0 0xC000
#define B_DCOF0_RST BIT(17)
#define B_DCOF0_V GENMASK(4, 1)
#define R_DCOF1 0xC004
+#define B_DCOF1_VAL GENMASK(31, 20)
#define B_DCOF1_RST BIT(17)
#define B_DCOF1_S BIT(0)
#define R_DCOF8 0xC020
#define B_DCOF8_V GENMASK(4, 1)
#define R_DCOF9 0xC024
+#define B_DCOF9_VAL GENMASK(31, 20)
#define B_DCOF9_RST BIT(17)
#define R_DACK_S0P0 0xC040
#define B_DACK_S0P0_OK BIT(31)
@@ -9095,6 +9138,7 @@
#define R_ADCMOD 0xC0E8
#define B_ADCMOD_LP GENMASK(31, 16)
#define R_DCIM 0xC0EC
+#define B_DCIM_RC GENMASK(23, 16)
#define B_DCIM_FR GENMASK(14, 13)
#define R_ADDCK0D 0xC0F0
#define B_ADDCK0D_VAL2 GENMASK(31, 26)
@@ -9117,11 +9161,18 @@
#define B_ADDCKR0_DC GENMASK(15, 4)
#define B_ADDCKR0_A1 GENMASK(9, 0)
#define R_DACK10 0xC100
+#define B_DACK10_RST BIT(17)
#define B_DACK10 GENMASK(4, 1)
#define R_DACK1_K 0xc104
+#define B_DACK1_VAL GENMASK(31, 20)
+#define B_DACK1_RST BIT(17)
#define B_DACK1_EN BIT(0)
#define R_DACK11 0xC120
#define B_DACK11 GENMASK(4, 1)
+#define R_DACK2_K 0xC124
+#define B_DACK2_VAL GENMASK(31, 20)
+#define B_DACK2_RST BIT(17)
+#define B_DACK2_EN BIT(0)
#define R_DACK_S1P0 0xC140
#define B_DACK_S1P0_OK BIT(31)
#define R_DACK_BIAS10 0xC148
@@ -9170,6 +9221,11 @@
#define B_DACKN0_V GENMASK(21, 14)
#define R_DACKN1_CTL 0xC224
#define B_DACKN1_V GENMASK(21, 14)
+#define B_DACKN1_ON BIT(0)
+#define R_DACKN2_CTL 0xC238
+#define B_DACKN2_ON BIT(0)
+#define R_DACKN3_CTL 0xC24C
+#define B_DACKN3_ON BIT(0)
#define R_GAIN_MAP0 0xE44C
#define B_GAIN_MAP0_EN BIT(0)
#define R_GAIN_MAP1 0xE54C
diff --git a/drivers/net/wireless/realtek/rtw89/regd.c b/drivers/net/wireless/realtek/rtw89/regd.c
index 1a133914f673..a251b0e3b16e 100644
--- a/drivers/net/wireless/realtek/rtw89/regd.c
+++ b/drivers/net/wireless/realtek/rtw89/regd.c
@@ -714,7 +714,154 @@ exit:
mutex_unlock(&rtwdev->mutex);
}
-static void __rtw89_reg_6ghz_power_recalc(struct rtw89_dev *rtwdev)
+/* Maximum Transmit Power field (@raw) can be EIRP or PSD.
+ * Both units are 0.5 dB-based. Return a constraint in dB.
+ */
+static s8 tpe_get_constraint(s8 raw)
+{
+ const u8 hw_deviation = 3; /* unit: 0.5 dB */
+ const u8 antenna_gain = 10; /* unit: 0.5 dB */
+ const u8 array_gain = 6; /* unit: 0.5 dB */
+ const u8 offset = hw_deviation + antenna_gain + array_gain;
+
+ return (raw - offset) / 2;
+}
+
+static void tpe_intersect_constraint(struct rtw89_reg_6ghz_tpe *tpe, s8 cstr)
+{
+ if (tpe->valid) {
+ tpe->constraint = min(tpe->constraint, cstr);
+ return;
+ }
+
+ tpe->constraint = cstr;
+ tpe->valid = true;
+}
+
+static void tpe_deal_with_eirp(struct rtw89_reg_6ghz_tpe *tpe,
+ const struct ieee80211_parsed_tpe_eirp *eirp)
+{
+ unsigned int i;
+ s8 cstr;
+
+ if (!eirp->valid)
+ return;
+
+ for (i = 0; i < eirp->count; i++) {
+ cstr = tpe_get_constraint(eirp->power[i]);
+ tpe_intersect_constraint(tpe, cstr);
+ }
+}
+
+static s8 tpe_convert_psd_to_eirp(s8 psd)
+{
+ static const unsigned int mlog20 = 1301;
+
+ return psd + 10 * mlog20 / 1000;
+}
+
+static void tpe_deal_with_psd(struct rtw89_reg_6ghz_tpe *tpe,
+ const struct ieee80211_parsed_tpe_psd *psd)
+{
+ unsigned int i;
+ s8 cstr_psd;
+ s8 cstr;
+
+ if (!psd->valid)
+ return;
+
+ for (i = 0; i < psd->count; i++) {
+ cstr_psd = tpe_get_constraint(psd->power[i]);
+ cstr = tpe_convert_psd_to_eirp(cstr_psd);
+ tpe_intersect_constraint(tpe, cstr);
+ }
+}
+
+static void rtw89_calculate_tpe(struct rtw89_dev *rtwdev,
+ struct rtw89_reg_6ghz_tpe *result_tpe,
+ const struct ieee80211_parsed_tpe *parsed_tpe)
+{
+ static const u8 category = IEEE80211_TPE_CAT_6GHZ_DEFAULT;
+
+ tpe_deal_with_eirp(result_tpe, &parsed_tpe->max_local[category]);
+ tpe_deal_with_eirp(result_tpe, &parsed_tpe->max_reg_client[category]);
+ tpe_deal_with_psd(result_tpe, &parsed_tpe->psd_local[category]);
+ tpe_deal_with_psd(result_tpe, &parsed_tpe->psd_reg_client[category]);
+}
+
+static bool __rtw89_reg_6ghz_tpe_recalc(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory;
+ struct rtw89_reg_6ghz_tpe new = {};
+ struct rtw89_vif *rtwvif;
+ bool changed = false;
+
+ rtw89_for_each_rtwvif(rtwdev, rtwvif) {
+ const struct rtw89_reg_6ghz_tpe *tmp;
+ const struct rtw89_chan *chan;
+
+ chan = rtw89_chan_get(rtwdev, rtwvif->sub_entity_idx);
+ if (chan->band_type != RTW89_BAND_6G)
+ continue;
+
+ tmp = &rtwvif->reg_6ghz_tpe;
+ if (!tmp->valid)
+ continue;
+
+ tpe_intersect_constraint(&new, tmp->constraint);
+ }
+
+ if (memcmp(&regulatory->reg_6ghz_tpe, &new,
+ sizeof(regulatory->reg_6ghz_tpe)) != 0)
+ changed = true;
+
+ if (changed) {
+ if (new.valid)
+ rtw89_debug(rtwdev, RTW89_DBG_REGD,
+ "recalc 6 GHz reg TPE to %d dBm\n",
+ new.constraint);
+ else
+ rtw89_debug(rtwdev, RTW89_DBG_REGD,
+ "recalc 6 GHz reg TPE to none\n");
+
+ regulatory->reg_6ghz_tpe = new;
+ }
+
+ return changed;
+}
+
+static int rtw89_reg_6ghz_tpe_recalc(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif, bool active,
+ unsigned int *changed)
+{
+ struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
+ struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
+ struct rtw89_reg_6ghz_tpe *tpe = &rtwvif->reg_6ghz_tpe;
+
+ memset(tpe, 0, sizeof(*tpe));
+
+ if (!active || rtwvif->reg_6ghz_power != RTW89_REG_6GHZ_POWER_STD)
+ goto bottom;
+
+ rtw89_calculate_tpe(rtwdev, tpe, &bss_conf->tpe);
+ if (!tpe->valid)
+ goto bottom;
+
+ if (tpe->constraint < RTW89_MIN_VALID_POWER_CONSTRAINT) {
+ rtw89_err(rtwdev,
+ "%s: constraint %d dBm is less than min valid val\n",
+ __func__, tpe->constraint);
+
+ tpe->valid = false;
+ return -EINVAL;
+ }
+
+bottom:
+ *changed += __rtw89_reg_6ghz_tpe_recalc(rtwdev);
+ return 0;
+}
+
+static bool __rtw89_reg_6ghz_power_recalc(struct rtw89_dev *rtwdev)
{
struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory;
const struct rtw89_regd *regd = regulatory->regd;
@@ -751,23 +898,21 @@ static void __rtw89_reg_6ghz_power_recalc(struct rtw89_dev *rtwdev)
}
if (regulatory->reg_6ghz_power == sel)
- return;
+ return false;
rtw89_debug(rtwdev, RTW89_DBG_REGD,
"recalc 6 GHz reg power type to %d\n", sel);
regulatory->reg_6ghz_power = sel;
-
- rtw89_core_set_chip_txpwr(rtwdev);
+ return true;
}
-void rtw89_reg_6ghz_power_recalc(struct rtw89_dev *rtwdev,
- struct rtw89_vif *rtwvif, bool active)
+static int rtw89_reg_6ghz_power_recalc(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif, bool active,
+ unsigned int *changed)
{
struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
- lockdep_assert_held(&rtwdev->mutex);
-
if (active) {
switch (vif->bss_conf.power_type) {
case IEEE80211_REG_VLP_AP:
@@ -787,5 +932,32 @@ void rtw89_reg_6ghz_power_recalc(struct rtw89_dev *rtwdev,
rtwvif->reg_6ghz_power = RTW89_REG_6GHZ_POWER_DFLT;
}
- __rtw89_reg_6ghz_power_recalc(rtwdev);
+ *changed += __rtw89_reg_6ghz_power_recalc(rtwdev);
+ return 0;
+}
+
+int rtw89_reg_6ghz_recalc(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ bool active)
+{
+ unsigned int changed = 0;
+ int ret;
+
+ lockdep_assert_held(&rtwdev->mutex);
+
+ /* The result of reg_6ghz_tpe may depend on reg_6ghz_power type,
+ * so must do reg_6ghz_tpe_recalc() after reg_6ghz_power_recalc().
+ */
+
+ ret = rtw89_reg_6ghz_power_recalc(rtwdev, rtwvif, active, &changed);
+ if (ret)
+ return ret;
+
+ ret = rtw89_reg_6ghz_tpe_recalc(rtwdev, rtwvif, active, &changed);
+ if (ret)
+ return ret;
+
+ if (changed)
+ rtw89_core_set_chip_txpwr(rtwdev);
+
+ return 0;
}
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8851b.c b/drivers/net/wireless/realtek/rtw89/rtw8851b.c
index 87b51823244d..40cf84a79c46 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8851b.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8851b.c
@@ -105,6 +105,10 @@ static const u32 rtw8851b_c2h_regs[RTW89_C2HREG_MAX] = {
R_AX_C2HREG_DATA3
};
+static const u32 rtw8851b_wow_wakeup_regs[RTW89_WOW_REASON_NUM] = {
+ R_AX_C2HREG_DATA3 + 3, R_AX_C2HREG_DATA3 + 3,
+};
+
static const struct rtw89_page_regs rtw8851b_page_regs = {
.hci_fc_ctrl = R_AX_HCI_FC_CTRL,
.ch_page_ctrl = R_AX_CH_PAGE_CTRL,
@@ -2447,6 +2451,7 @@ const struct rtw89_chip_info rtw8851b_chip_info = {
.dig_table = NULL,
.dig_regs = &rtw8851b_dig_regs,
.tssi_dbw_table = NULL,
+ .support_macid_num = RTW89_MAX_MAC_ID_NUM,
.support_chanctx_num = 0,
.support_rnr = false,
.support_bands = BIT(NL80211_BAND_2GHZ) |
@@ -2508,7 +2513,7 @@ const struct rtw89_chip_info rtw8851b_chip_info = {
.c2h_counter_reg = {R_AX_UDM1 + 1, B_AX_UDM1_HALMAC_C2H_ENQ_CNT_MASK >> 8},
.c2h_regs = rtw8851b_c2h_regs,
.page_regs = &rtw8851b_page_regs,
- .wow_reason_reg = R_AX_C2HREG_DATA3 + 3,
+ .wow_reason_reg = rtw8851b_wow_wakeup_regs,
.cfo_src_fd = true,
.cfo_hw_comp = true,
.dcfo_comp = &rtw8851b_dcfo_comp,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8851be.c b/drivers/net/wireless/realtek/rtw89/rtw8851be.c
index ec3629d95fda..d334924faec8 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8851be.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8851be.c
@@ -46,6 +46,7 @@ static const struct rtw89_pci_info rtw8851b_pci_info = {
.rpwm_addr = R_AX_PCIE_HRPWM,
.cpwm_addr = R_AX_CPWM,
.mit_addr = R_AX_INT_MIT_RX,
+ .wp_sel_addr = 0,
.tx_dma_ch_mask = BIT(RTW89_TXCH_ACH4) | BIT(RTW89_TXCH_ACH5) |
BIT(RTW89_TXCH_ACH6) | BIT(RTW89_TXCH_ACH7) |
BIT(RTW89_TXCH_CH10) | BIT(RTW89_TXCH_CH11),
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a.c b/drivers/net/wireless/realtek/rtw89/rtw8852a.c
index e93cee1456bd..08e148328c62 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852a.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852a.c
@@ -398,6 +398,10 @@ static const u32 rtw8852a_c2h_regs[RTW89_C2HREG_MAX] = {
R_AX_C2HREG_DATA3
};
+static const u32 rtw8852a_wow_wakeup_regs[RTW89_WOW_REASON_NUM] = {
+ R_AX_C2HREG_DATA3 + 3, R_AX_C2HREG_DATA3 + 3,
+};
+
static const struct rtw89_page_regs rtw8852a_page_regs = {
.hci_fc_ctrl = R_AX_HCI_FC_CTRL,
.ch_page_ctrl = R_AX_CH_PAGE_CTRL,
@@ -2162,6 +2166,7 @@ const struct rtw89_chip_info rtw8852a_chip_info = {
.dig_table = &rtw89_8852a_phy_dig_table,
.dig_regs = &rtw8852a_dig_regs,
.tssi_dbw_table = NULL,
+ .support_macid_num = RTW89_MAX_MAC_ID_NUM,
.support_chanctx_num = 1,
.support_rnr = false,
.support_bands = BIT(NL80211_BAND_2GHZ) |
@@ -2224,7 +2229,7 @@ const struct rtw89_chip_info rtw8852a_chip_info = {
.c2h_regs = rtw8852a_c2h_regs,
.c2h_counter_reg = {R_AX_UDM1 + 1, B_AX_UDM1_HALMAC_C2H_ENQ_CNT_MASK >> 8},
.page_regs = &rtw8852a_page_regs,
- .wow_reason_reg = R_AX_C2HREG_DATA3 + 3,
+ .wow_reason_reg = rtw8852a_wow_wakeup_regs,
.cfo_src_fd = false,
.cfo_hw_comp = false,
.dcfo_comp = &rtw8852a_dcfo_comp,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852ae.c b/drivers/net/wireless/realtek/rtw89/rtw8852ae.c
index fdee5dd4ba14..9a675e2193bc 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852ae.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852ae.c
@@ -46,6 +46,7 @@ static const struct rtw89_pci_info rtw8852a_pci_info = {
.rpwm_addr = R_AX_PCIE_HRPWM,
.cpwm_addr = R_AX_CPWM,
.mit_addr = R_AX_INT_MIT_RX,
+ .wp_sel_addr = 0,
.tx_dma_ch_mask = 0,
.bd_idx_addr_low_power = NULL,
.dma_addr_set = &rtw89_pci_ch_dma_addr_set,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b.c b/drivers/net/wireless/realtek/rtw89/rtw8852b.c
index d351096fa4b4..a22847a311ad 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852b.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852b.c
@@ -8,6 +8,7 @@
#include "phy.h"
#include "reg.h"
#include "rtw8852b.h"
+#include "rtw8852b_common.h"
#include "rtw8852b_rfk.h"
#include "rtw8852b_table.h"
#include "txrx.h"
@@ -65,167 +66,6 @@ static const struct rtw89_dle_mem rtw8852b_dle_mem_pcie[] = {
NULL},
};
-static const struct rtw89_reg3_def rtw8852b_pmac_ht20_mcs7_tbl[] = {
- {0x4580, 0x0000ffff, 0x0},
- {0x4580, 0xffff0000, 0x0},
- {0x4584, 0x0000ffff, 0x0},
- {0x4584, 0xffff0000, 0x0},
- {0x4580, 0x0000ffff, 0x1},
- {0x4578, 0x00ffffff, 0x2018b},
- {0x4570, 0x03ffffff, 0x7},
- {0x4574, 0x03ffffff, 0x32407},
- {0x45b8, 0x00000010, 0x0},
- {0x45b8, 0x00000100, 0x0},
- {0x45b8, 0x00000080, 0x0},
- {0x45b8, 0x00000008, 0x0},
- {0x45a0, 0x0000ff00, 0x0},
- {0x45a0, 0xff000000, 0x1},
- {0x45a4, 0x0000ff00, 0x2},
- {0x45a4, 0xff000000, 0x3},
- {0x45b8, 0x00000020, 0x0},
- {0x4568, 0xe0000000, 0x0},
- {0x45b8, 0x00000002, 0x1},
- {0x456c, 0xe0000000, 0x0},
- {0x45b4, 0x00006000, 0x0},
- {0x45b4, 0x00001800, 0x1},
- {0x45b8, 0x00000040, 0x0},
- {0x45b8, 0x00000004, 0x0},
- {0x45b8, 0x00000200, 0x0},
- {0x4598, 0xf8000000, 0x0},
- {0x45b8, 0x00100000, 0x0},
- {0x45a8, 0x00000fc0, 0x0},
- {0x45b8, 0x00200000, 0x0},
- {0x45b0, 0x00000038, 0x0},
- {0x45b0, 0x000001c0, 0x0},
- {0x45a0, 0x000000ff, 0x0},
- {0x45b8, 0x00400000, 0x0},
- {0x4590, 0x000007ff, 0x0},
- {0x45b0, 0x00000e00, 0x0},
- {0x45ac, 0x0000001f, 0x0},
- {0x45b8, 0x00800000, 0x0},
- {0x45a8, 0x0003f000, 0x0},
- {0x45b8, 0x01000000, 0x0},
- {0x45b0, 0x00007000, 0x0},
- {0x45b0, 0x00038000, 0x0},
- {0x45a0, 0x00ff0000, 0x0},
- {0x45b8, 0x02000000, 0x0},
- {0x4590, 0x003ff800, 0x0},
- {0x45b0, 0x001c0000, 0x0},
- {0x45ac, 0x000003e0, 0x0},
- {0x45b8, 0x04000000, 0x0},
- {0x45a8, 0x00fc0000, 0x0},
- {0x45b8, 0x08000000, 0x0},
- {0x45b0, 0x00e00000, 0x0},
- {0x45b0, 0x07000000, 0x0},
- {0x45a4, 0x000000ff, 0x0},
- {0x45b8, 0x10000000, 0x0},
- {0x4594, 0x000007ff, 0x0},
- {0x45b0, 0x38000000, 0x0},
- {0x45ac, 0x00007c00, 0x0},
- {0x45b8, 0x20000000, 0x0},
- {0x45a8, 0x3f000000, 0x0},
- {0x45b8, 0x40000000, 0x0},
- {0x45b4, 0x00000007, 0x0},
- {0x45b4, 0x00000038, 0x0},
- {0x45a4, 0x00ff0000, 0x0},
- {0x45b8, 0x80000000, 0x0},
- {0x4594, 0x003ff800, 0x0},
- {0x45b4, 0x000001c0, 0x0},
- {0x4598, 0xf8000000, 0x0},
- {0x45b8, 0x00100000, 0x0},
- {0x45a8, 0x00000fc0, 0x7},
- {0x45b8, 0x00200000, 0x0},
- {0x45b0, 0x00000038, 0x0},
- {0x45b0, 0x000001c0, 0x0},
- {0x45a0, 0x000000ff, 0x0},
- {0x45b4, 0x06000000, 0x0},
- {0x45b0, 0x00000007, 0x0},
- {0x45b8, 0x00080000, 0x0},
- {0x45a8, 0x0000003f, 0x0},
- {0x457c, 0xffe00000, 0x1},
- {0x4530, 0xffffffff, 0x0},
- {0x4588, 0x00003fff, 0x0},
- {0x4598, 0x000001ff, 0x0},
- {0x4534, 0xffffffff, 0x0},
- {0x4538, 0xffffffff, 0x0},
- {0x453c, 0xffffffff, 0x0},
- {0x4588, 0x0fffc000, 0x0},
- {0x4598, 0x0003fe00, 0x0},
- {0x4540, 0xffffffff, 0x0},
- {0x4544, 0xffffffff, 0x0},
- {0x4548, 0xffffffff, 0x0},
- {0x458c, 0x00003fff, 0x0},
- {0x4598, 0x07fc0000, 0x0},
- {0x454c, 0xffffffff, 0x0},
- {0x4550, 0xffffffff, 0x0},
- {0x4554, 0xffffffff, 0x0},
- {0x458c, 0x0fffc000, 0x0},
- {0x459c, 0x000001ff, 0x0},
- {0x4558, 0xffffffff, 0x0},
- {0x455c, 0xffffffff, 0x0},
- {0x4530, 0xffffffff, 0x4e790001},
- {0x4588, 0x00003fff, 0x0},
- {0x4598, 0x000001ff, 0x1},
- {0x4534, 0xffffffff, 0x0},
- {0x4538, 0xffffffff, 0x4b},
- {0x45ac, 0x38000000, 0x7},
- {0x4588, 0xf0000000, 0x0},
- {0x459c, 0x7e000000, 0x0},
- {0x45b8, 0x00040000, 0x0},
- {0x45b8, 0x00020000, 0x0},
- {0x4590, 0xffc00000, 0x0},
- {0x45b8, 0x00004000, 0x0},
- {0x4578, 0xff000000, 0x0},
- {0x45b8, 0x00000400, 0x0},
- {0x45b8, 0x00000800, 0x0},
- {0x45b8, 0x00001000, 0x0},
- {0x45b8, 0x00002000, 0x0},
- {0x45b4, 0x00018000, 0x0},
- {0x45ac, 0x07800000, 0x0},
- {0x45b4, 0x00000600, 0x2},
- {0x459c, 0x0001fe00, 0x80},
- {0x45ac, 0x00078000, 0x3},
- {0x459c, 0x01fe0000, 0x1},
-};
-
-static const struct rtw89_reg3_def rtw8852b_btc_preagc_en_defs[] = {
- {0x46D0, GENMASK(1, 0), 0x3},
- {0x4790, GENMASK(1, 0), 0x3},
- {0x4AD4, GENMASK(31, 0), 0xf},
- {0x4AE0, GENMASK(31, 0), 0xf},
- {0x4688, GENMASK(31, 24), 0x80},
- {0x476C, GENMASK(31, 24), 0x80},
- {0x4694, GENMASK(7, 0), 0x80},
- {0x4694, GENMASK(15, 8), 0x80},
- {0x4778, GENMASK(7, 0), 0x80},
- {0x4778, GENMASK(15, 8), 0x80},
- {0x4AE4, GENMASK(23, 0), 0x780D1E},
- {0x4AEC, GENMASK(23, 0), 0x780D1E},
- {0x469C, GENMASK(31, 26), 0x34},
- {0x49F0, GENMASK(31, 26), 0x34},
-};
-
-static DECLARE_PHY_REG3_TBL(rtw8852b_btc_preagc_en_defs);
-
-static const struct rtw89_reg3_def rtw8852b_btc_preagc_dis_defs[] = {
- {0x46D0, GENMASK(1, 0), 0x0},
- {0x4790, GENMASK(1, 0), 0x0},
- {0x4AD4, GENMASK(31, 0), 0x60},
- {0x4AE0, GENMASK(31, 0), 0x60},
- {0x4688, GENMASK(31, 24), 0x1a},
- {0x476C, GENMASK(31, 24), 0x1a},
- {0x4694, GENMASK(7, 0), 0x2a},
- {0x4694, GENMASK(15, 8), 0x2a},
- {0x4778, GENMASK(7, 0), 0x2a},
- {0x4778, GENMASK(15, 8), 0x2a},
- {0x4AE4, GENMASK(23, 0), 0x79E99E},
- {0x4AEC, GENMASK(23, 0), 0x79E99E},
- {0x469C, GENMASK(31, 26), 0x26},
- {0x49F0, GENMASK(31, 26), 0x26},
-};
-
-static DECLARE_PHY_REG3_TBL(rtw8852b_btc_preagc_dis_defs);
-
static const u32 rtw8852b_h2c_regs[RTW89_H2CREG_MAX] = {
R_AX_H2CREG_DATA0, R_AX_H2CREG_DATA1, R_AX_H2CREG_DATA2,
R_AX_H2CREG_DATA3
@@ -236,6 +76,10 @@ static const u32 rtw8852b_c2h_regs[RTW89_C2HREG_MAX] = {
R_AX_C2HREG_DATA3
};
+static const u32 rtw8852b_wow_wakeup_regs[RTW89_WOW_REASON_NUM] = {
+ R_AX_C2HREG_DATA3 + 3, R_AX_C2HREG_DATA3 + 3,
+};
+
static const struct rtw89_page_regs rtw8852b_page_regs = {
.hci_fc_ctrl = R_AX_HCI_FC_CTRL,
.ch_page_ctrl = R_AX_CH_PAGE_CTRL,
@@ -403,6 +247,8 @@ static int rtw8852b_pwr_on_func(struct rtw89_dev *rtwdev)
u32 val32;
u32 ret;
+ rtw8852b_pwr_sps_ana(rtwdev);
+
rtw89_write32_clr(rtwdev, R_AX_SYS_PW_CTRL, B_AX_AFSM_WLSUS_EN |
B_AX_AFSM_PCIE_SUS_EN);
rtw89_write32_set(rtwdev, R_AX_SYS_PW_CTRL, B_AX_DIS_WLBT_PDNSUSEN_SOPC);
@@ -530,9 +376,7 @@ static int rtw8852b_pwr_off_func(struct rtw89_dev *rtwdev)
u32 val32;
u32 ret;
- /* Only do once during probe stage after reading efuse */
- if (!test_bit(RTW89_FLAG_PROBE_DONE, rtwdev->flags))
- rtw8852b_pwr_sps_ana(rtwdev);
+ rtw8852b_pwr_sps_ana(rtwdev);
ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, XTAL_SI_RFC2RF,
XTAL_SI_RFC2RF);
@@ -591,806 +435,6 @@ static int rtw8852b_pwr_off_func(struct rtw89_dev *rtwdev)
return 0;
}
-static void rtw8852be_efuse_parsing(struct rtw89_efuse *efuse,
- struct rtw8852b_efuse *map)
-{
- ether_addr_copy(efuse->addr, map->e.mac_addr);
- efuse->rfe_type = map->rfe_type;
- efuse->xtal_cap = map->xtal_k;
-}
-
-static void rtw8852b_efuse_parsing_tssi(struct rtw89_dev *rtwdev,
- struct rtw8852b_efuse *map)
-{
- struct rtw89_tssi_info *tssi = &rtwdev->tssi;
- struct rtw8852b_tssi_offset *ofst[] = {&map->path_a_tssi, &map->path_b_tssi};
- u8 i, j;
-
- tssi->thermal[RF_PATH_A] = map->path_a_therm;
- tssi->thermal[RF_PATH_B] = map->path_b_therm;
-
- for (i = 0; i < RF_PATH_NUM_8852B; i++) {
- memcpy(tssi->tssi_cck[i], ofst[i]->cck_tssi,
- sizeof(ofst[i]->cck_tssi));
-
- for (j = 0; j < TSSI_CCK_CH_GROUP_NUM; j++)
- rtw89_debug(rtwdev, RTW89_DBG_TSSI,
- "[TSSI][EFUSE] path=%d cck[%d]=0x%x\n",
- i, j, tssi->tssi_cck[i][j]);
-
- memcpy(tssi->tssi_mcs[i], ofst[i]->bw40_tssi,
- sizeof(ofst[i]->bw40_tssi));
- memcpy(tssi->tssi_mcs[i] + TSSI_MCS_2G_CH_GROUP_NUM,
- ofst[i]->bw40_1s_tssi_5g, sizeof(ofst[i]->bw40_1s_tssi_5g));
-
- for (j = 0; j < TSSI_MCS_CH_GROUP_NUM; j++)
- rtw89_debug(rtwdev, RTW89_DBG_TSSI,
- "[TSSI][EFUSE] path=%d mcs[%d]=0x%x\n",
- i, j, tssi->tssi_mcs[i][j]);
- }
-}
-
-static bool _decode_efuse_gain(u8 data, s8 *high, s8 *low)
-{
- if (high)
- *high = sign_extend32(FIELD_GET(GENMASK(7, 4), data), 3);
- if (low)
- *low = sign_extend32(FIELD_GET(GENMASK(3, 0), data), 3);
-
- return data != 0xff;
-}
-
-static void rtw8852b_efuse_parsing_gain_offset(struct rtw89_dev *rtwdev,
- struct rtw8852b_efuse *map)
-{
- struct rtw89_phy_efuse_gain *gain = &rtwdev->efuse_gain;
- bool valid = false;
-
- valid |= _decode_efuse_gain(map->rx_gain_2g_cck,
- &gain->offset[RF_PATH_A][RTW89_GAIN_OFFSET_2G_CCK],
- &gain->offset[RF_PATH_B][RTW89_GAIN_OFFSET_2G_CCK]);
- valid |= _decode_efuse_gain(map->rx_gain_2g_ofdm,
- &gain->offset[RF_PATH_A][RTW89_GAIN_OFFSET_2G_OFDM],
- &gain->offset[RF_PATH_B][RTW89_GAIN_OFFSET_2G_OFDM]);
- valid |= _decode_efuse_gain(map->rx_gain_5g_low,
- &gain->offset[RF_PATH_A][RTW89_GAIN_OFFSET_5G_LOW],
- &gain->offset[RF_PATH_B][RTW89_GAIN_OFFSET_5G_LOW]);
- valid |= _decode_efuse_gain(map->rx_gain_5g_mid,
- &gain->offset[RF_PATH_A][RTW89_GAIN_OFFSET_5G_MID],
- &gain->offset[RF_PATH_B][RTW89_GAIN_OFFSET_5G_MID]);
- valid |= _decode_efuse_gain(map->rx_gain_5g_high,
- &gain->offset[RF_PATH_A][RTW89_GAIN_OFFSET_5G_HIGH],
- &gain->offset[RF_PATH_B][RTW89_GAIN_OFFSET_5G_HIGH]);
-
- gain->offset_valid = valid;
-}
-
-static int rtw8852b_read_efuse(struct rtw89_dev *rtwdev, u8 *log_map,
- enum rtw89_efuse_block block)
-{
- struct rtw89_efuse *efuse = &rtwdev->efuse;
- struct rtw8852b_efuse *map;
-
- map = (struct rtw8852b_efuse *)log_map;
-
- efuse->country_code[0] = map->country_code[0];
- efuse->country_code[1] = map->country_code[1];
- rtw8852b_efuse_parsing_tssi(rtwdev, map);
- rtw8852b_efuse_parsing_gain_offset(rtwdev, map);
-
- switch (rtwdev->hci.type) {
- case RTW89_HCI_TYPE_PCIE:
- rtw8852be_efuse_parsing(efuse, map);
- break;
- default:
- return -EOPNOTSUPP;
- }
-
- rtw89_info(rtwdev, "chip rfe_type is %d\n", efuse->rfe_type);
-
- return 0;
-}
-
-static void rtw8852b_phycap_parsing_power_cal(struct rtw89_dev *rtwdev, u8 *phycap_map)
-{
-#define PWR_K_CHK_OFFSET 0x5E9
-#define PWR_K_CHK_VALUE 0xAA
- u32 offset = PWR_K_CHK_OFFSET - rtwdev->chip->phycap_addr;
-
- if (phycap_map[offset] == PWR_K_CHK_VALUE)
- rtwdev->efuse.power_k_valid = true;
-}
-
-static void rtw8852b_phycap_parsing_tssi(struct rtw89_dev *rtwdev, u8 *phycap_map)
-{
- struct rtw89_tssi_info *tssi = &rtwdev->tssi;
- static const u32 tssi_trim_addr[RF_PATH_NUM_8852B] = {0x5D6, 0x5AB};
- u32 addr = rtwdev->chip->phycap_addr;
- bool pg = false;
- u32 ofst;
- u8 i, j;
-
- for (i = 0; i < RF_PATH_NUM_8852B; i++) {
- for (j = 0; j < TSSI_TRIM_CH_GROUP_NUM; j++) {
- /* addrs are in decreasing order */
- ofst = tssi_trim_addr[i] - addr - j;
- tssi->tssi_trim[i][j] = phycap_map[ofst];
-
- if (phycap_map[ofst] != 0xff)
- pg = true;
- }
- }
-
- if (!pg) {
- memset(tssi->tssi_trim, 0, sizeof(tssi->tssi_trim));
- rtw89_debug(rtwdev, RTW89_DBG_TSSI,
- "[TSSI][TRIM] no PG, set all trim info to 0\n");
- }
-
- for (i = 0; i < RF_PATH_NUM_8852B; i++)
- for (j = 0; j < TSSI_TRIM_CH_GROUP_NUM; j++)
- rtw89_debug(rtwdev, RTW89_DBG_TSSI,
- "[TSSI] path=%d idx=%d trim=0x%x addr=0x%x\n",
- i, j, tssi->tssi_trim[i][j],
- tssi_trim_addr[i] - j);
-}
-
-static void rtw8852b_phycap_parsing_thermal_trim(struct rtw89_dev *rtwdev,
- u8 *phycap_map)
-{
- struct rtw89_power_trim_info *info = &rtwdev->pwr_trim;
- static const u32 thm_trim_addr[RF_PATH_NUM_8852B] = {0x5DF, 0x5DC};
- u32 addr = rtwdev->chip->phycap_addr;
- u8 i;
-
- for (i = 0; i < RF_PATH_NUM_8852B; i++) {
- info->thermal_trim[i] = phycap_map[thm_trim_addr[i] - addr];
-
- rtw89_debug(rtwdev, RTW89_DBG_RFK,
- "[THERMAL][TRIM] path=%d thermal_trim=0x%x\n",
- i, info->thermal_trim[i]);
-
- if (info->thermal_trim[i] != 0xff)
- info->pg_thermal_trim = true;
- }
-}
-
-static void rtw8852b_thermal_trim(struct rtw89_dev *rtwdev)
-{
-#define __thm_setting(raw) \
-({ \
- u8 __v = (raw); \
- ((__v & 0x1) << 3) | ((__v & 0x1f) >> 1); \
-})
- struct rtw89_power_trim_info *info = &rtwdev->pwr_trim;
- u8 i, val;
-
- if (!info->pg_thermal_trim) {
- rtw89_debug(rtwdev, RTW89_DBG_RFK,
- "[THERMAL][TRIM] no PG, do nothing\n");
-
- return;
- }
-
- for (i = 0; i < RF_PATH_NUM_8852B; i++) {
- val = __thm_setting(info->thermal_trim[i]);
- rtw89_write_rf(rtwdev, i, RR_TM2, RR_TM2_OFF, val);
-
- rtw89_debug(rtwdev, RTW89_DBG_RFK,
- "[THERMAL][TRIM] path=%d thermal_setting=0x%x\n",
- i, val);
- }
-#undef __thm_setting
-}
-
-static void rtw8852b_phycap_parsing_pa_bias_trim(struct rtw89_dev *rtwdev,
- u8 *phycap_map)
-{
- struct rtw89_power_trim_info *info = &rtwdev->pwr_trim;
- static const u32 pabias_trim_addr[RF_PATH_NUM_8852B] = {0x5DE, 0x5DB};
- u32 addr = rtwdev->chip->phycap_addr;
- u8 i;
-
- for (i = 0; i < RF_PATH_NUM_8852B; i++) {
- info->pa_bias_trim[i] = phycap_map[pabias_trim_addr[i] - addr];
-
- rtw89_debug(rtwdev, RTW89_DBG_RFK,
- "[PA_BIAS][TRIM] path=%d pa_bias_trim=0x%x\n",
- i, info->pa_bias_trim[i]);
-
- if (info->pa_bias_trim[i] != 0xff)
- info->pg_pa_bias_trim = true;
- }
-}
-
-static void rtw8852b_pa_bias_trim(struct rtw89_dev *rtwdev)
-{
- struct rtw89_power_trim_info *info = &rtwdev->pwr_trim;
- u8 pabias_2g, pabias_5g;
- u8 i;
-
- if (!info->pg_pa_bias_trim) {
- rtw89_debug(rtwdev, RTW89_DBG_RFK,
- "[PA_BIAS][TRIM] no PG, do nothing\n");
-
- return;
- }
-
- for (i = 0; i < RF_PATH_NUM_8852B; i++) {
- pabias_2g = FIELD_GET(GENMASK(3, 0), info->pa_bias_trim[i]);
- pabias_5g = FIELD_GET(GENMASK(7, 4), info->pa_bias_trim[i]);
-
- rtw89_debug(rtwdev, RTW89_DBG_RFK,
- "[PA_BIAS][TRIM] path=%d 2G=0x%x 5G=0x%x\n",
- i, pabias_2g, pabias_5g);
-
- rtw89_write_rf(rtwdev, i, RR_BIASA, RR_BIASA_TXG, pabias_2g);
- rtw89_write_rf(rtwdev, i, RR_BIASA, RR_BIASA_TXA, pabias_5g);
- }
-}
-
-static void rtw8852b_phycap_parsing_gain_comp(struct rtw89_dev *rtwdev, u8 *phycap_map)
-{
- static const u32 comp_addrs[][RTW89_SUBBAND_2GHZ_5GHZ_NR] = {
- {0x5BB, 0x5BA, 0, 0x5B9, 0x5B8},
- {0x590, 0x58F, 0, 0x58E, 0x58D},
- };
- struct rtw89_phy_efuse_gain *gain = &rtwdev->efuse_gain;
- u32 phycap_addr = rtwdev->chip->phycap_addr;
- bool valid = false;
- int path, i;
- u8 data;
-
- for (path = 0; path < 2; path++)
- for (i = 0; i < RTW89_SUBBAND_2GHZ_5GHZ_NR; i++) {
- if (comp_addrs[path][i] == 0)
- continue;
-
- data = phycap_map[comp_addrs[path][i] - phycap_addr];
- valid |= _decode_efuse_gain(data, NULL,
- &gain->comp[path][i]);
- }
-
- gain->comp_valid = valid;
-}
-
-static int rtw8852b_read_phycap(struct rtw89_dev *rtwdev, u8 *phycap_map)
-{
- rtw8852b_phycap_parsing_power_cal(rtwdev, phycap_map);
- rtw8852b_phycap_parsing_tssi(rtwdev, phycap_map);
- rtw8852b_phycap_parsing_thermal_trim(rtwdev, phycap_map);
- rtw8852b_phycap_parsing_pa_bias_trim(rtwdev, phycap_map);
- rtw8852b_phycap_parsing_gain_comp(rtwdev, phycap_map);
-
- return 0;
-}
-
-static void rtw8852b_power_trim(struct rtw89_dev *rtwdev)
-{
- rtw8852b_thermal_trim(rtwdev);
- rtw8852b_pa_bias_trim(rtwdev);
-}
-
-static void rtw8852b_set_channel_mac(struct rtw89_dev *rtwdev,
- const struct rtw89_chan *chan,
- u8 mac_idx)
-{
- u32 rf_mod = rtw89_mac_reg_by_idx(rtwdev, R_AX_WMAC_RFMOD, mac_idx);
- u32 sub_carr = rtw89_mac_reg_by_idx(rtwdev, R_AX_TX_SUB_CARRIER_VALUE, mac_idx);
- u32 chk_rate = rtw89_mac_reg_by_idx(rtwdev, R_AX_TXRATE_CHK, mac_idx);
- u8 txsc20 = 0, txsc40 = 0;
-
- switch (chan->band_width) {
- case RTW89_CHANNEL_WIDTH_80:
- txsc40 = rtw89_phy_get_txsc(rtwdev, chan, RTW89_CHANNEL_WIDTH_40);
- fallthrough;
- case RTW89_CHANNEL_WIDTH_40:
- txsc20 = rtw89_phy_get_txsc(rtwdev, chan, RTW89_CHANNEL_WIDTH_20);
- break;
- default:
- break;
- }
-
- switch (chan->band_width) {
- case RTW89_CHANNEL_WIDTH_80:
- rtw89_write8_mask(rtwdev, rf_mod, B_AX_WMAC_RFMOD_MASK, BIT(1));
- rtw89_write32(rtwdev, sub_carr, txsc20 | (txsc40 << 4));
- break;
- case RTW89_CHANNEL_WIDTH_40:
- rtw89_write8_mask(rtwdev, rf_mod, B_AX_WMAC_RFMOD_MASK, BIT(0));
- rtw89_write32(rtwdev, sub_carr, txsc20);
- break;
- case RTW89_CHANNEL_WIDTH_20:
- rtw89_write8_clr(rtwdev, rf_mod, B_AX_WMAC_RFMOD_MASK);
- rtw89_write32(rtwdev, sub_carr, 0);
- break;
- default:
- break;
- }
-
- if (chan->channel > 14) {
- rtw89_write8_clr(rtwdev, chk_rate, B_AX_BAND_MODE);
- rtw89_write8_set(rtwdev, chk_rate,
- B_AX_CHECK_CCK_EN | B_AX_RTS_LIMIT_IN_OFDM6);
- } else {
- rtw89_write8_set(rtwdev, chk_rate, B_AX_BAND_MODE);
- rtw89_write8_clr(rtwdev, chk_rate,
- B_AX_CHECK_CCK_EN | B_AX_RTS_LIMIT_IN_OFDM6);
- }
-}
-
-static const u32 rtw8852b_sco_barker_threshold[14] = {
- 0x1cfea, 0x1d0e1, 0x1d1d7, 0x1d2cd, 0x1d3c3, 0x1d4b9, 0x1d5b0, 0x1d6a6,
- 0x1d79c, 0x1d892, 0x1d988, 0x1da7f, 0x1db75, 0x1ddc4
-};
-
-static const u32 rtw8852b_sco_cck_threshold[14] = {
- 0x27de3, 0x27f35, 0x28088, 0x281da, 0x2832d, 0x2847f, 0x285d2, 0x28724,
- 0x28877, 0x289c9, 0x28b1c, 0x28c6e, 0x28dc1, 0x290ed
-};
-
-static void rtw8852b_ctrl_sco_cck(struct rtw89_dev *rtwdev, u8 primary_ch)
-{
- u8 ch_element = primary_ch - 1;
-
- rtw89_phy_write32_mask(rtwdev, R_RXSCOBC, B_RXSCOBC_TH,
- rtw8852b_sco_barker_threshold[ch_element]);
- rtw89_phy_write32_mask(rtwdev, R_RXSCOCCK, B_RXSCOCCK_TH,
- rtw8852b_sco_cck_threshold[ch_element]);
-}
-
-static u8 rtw8852b_sco_mapping(u8 central_ch)
-{
- if (central_ch == 1)
- return 109;
- else if (central_ch >= 2 && central_ch <= 6)
- return 108;
- else if (central_ch >= 7 && central_ch <= 10)
- return 107;
- else if (central_ch >= 11 && central_ch <= 14)
- return 106;
- else if (central_ch == 36 || central_ch == 38)
- return 51;
- else if (central_ch >= 40 && central_ch <= 58)
- return 50;
- else if (central_ch >= 60 && central_ch <= 64)
- return 49;
- else if (central_ch == 100 || central_ch == 102)
- return 48;
- else if (central_ch >= 104 && central_ch <= 126)
- return 47;
- else if (central_ch >= 128 && central_ch <= 151)
- return 46;
- else if (central_ch >= 153 && central_ch <= 177)
- return 45;
- else
- return 0;
-}
-
-struct rtw8852b_bb_gain {
- u32 gain_g[BB_PATH_NUM_8852B];
- u32 gain_a[BB_PATH_NUM_8852B];
- u32 gain_mask;
-};
-
-static const struct rtw8852b_bb_gain bb_gain_lna[LNA_GAIN_NUM] = {
- { .gain_g = {0x4678, 0x475C}, .gain_a = {0x45DC, 0x4740},
- .gain_mask = 0x00ff0000 },
- { .gain_g = {0x4678, 0x475C}, .gain_a = {0x45DC, 0x4740},
- .gain_mask = 0xff000000 },
- { .gain_g = {0x467C, 0x4760}, .gain_a = {0x4660, 0x4744},
- .gain_mask = 0x000000ff },
- { .gain_g = {0x467C, 0x4760}, .gain_a = {0x4660, 0x4744},
- .gain_mask = 0x0000ff00 },
- { .gain_g = {0x467C, 0x4760}, .gain_a = {0x4660, 0x4744},
- .gain_mask = 0x00ff0000 },
- { .gain_g = {0x467C, 0x4760}, .gain_a = {0x4660, 0x4744},
- .gain_mask = 0xff000000 },
- { .gain_g = {0x4680, 0x4764}, .gain_a = {0x4664, 0x4748},
- .gain_mask = 0x000000ff },
-};
-
-static const struct rtw8852b_bb_gain bb_gain_tia[TIA_GAIN_NUM] = {
- { .gain_g = {0x4680, 0x4764}, .gain_a = {0x4664, 0x4748},
- .gain_mask = 0x00ff0000 },
- { .gain_g = {0x4680, 0x4764}, .gain_a = {0x4664, 0x4748},
- .gain_mask = 0xff000000 },
-};
-
-static void rtw8852b_set_gain_error(struct rtw89_dev *rtwdev,
- enum rtw89_subband subband,
- enum rtw89_rf_path path)
-{
- const struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain.ax;
- u8 gain_band = rtw89_subband_to_bb_gain_band(subband);
- s32 val;
- u32 reg;
- u32 mask;
- int i;
-
- for (i = 0; i < LNA_GAIN_NUM; i++) {
- if (subband == RTW89_CH_2G)
- reg = bb_gain_lna[i].gain_g[path];
- else
- reg = bb_gain_lna[i].gain_a[path];
-
- mask = bb_gain_lna[i].gain_mask;
- val = gain->lna_gain[gain_band][path][i];
- rtw89_phy_write32_mask(rtwdev, reg, mask, val);
- }
-
- for (i = 0; i < TIA_GAIN_NUM; i++) {
- if (subband == RTW89_CH_2G)
- reg = bb_gain_tia[i].gain_g[path];
- else
- reg = bb_gain_tia[i].gain_a[path];
-
- mask = bb_gain_tia[i].gain_mask;
- val = gain->tia_gain[gain_band][path][i];
- rtw89_phy_write32_mask(rtwdev, reg, mask, val);
- }
-}
-
-static void rtw8852b_set_gain_offset(struct rtw89_dev *rtwdev,
- enum rtw89_subband subband,
- enum rtw89_phy_idx phy_idx)
-{
- static const u32 gain_err_addr[2] = {R_P0_AGC_RSVD, R_P1_AGC_RSVD};
- static const u32 rssi_ofst_addr[2] = {R_PATH0_G_TIA1_LNA6_OP1DB_V1,
- R_PATH1_G_TIA1_LNA6_OP1DB_V1};
- struct rtw89_hal *hal = &rtwdev->hal;
- struct rtw89_phy_efuse_gain *efuse_gain = &rtwdev->efuse_gain;
- enum rtw89_gain_offset gain_ofdm_band;
- s32 offset_a, offset_b;
- s32 offset_ofdm, offset_cck;
- s32 tmp;
- u8 path;
-
- if (!efuse_gain->comp_valid)
- goto next;
-
- for (path = RF_PATH_A; path < BB_PATH_NUM_8852B; path++) {
- tmp = efuse_gain->comp[path][subband];
- tmp = clamp_t(s32, tmp << 2, S8_MIN, S8_MAX);
- rtw89_phy_write32_mask(rtwdev, gain_err_addr[path], MASKBYTE0, tmp);
- }
-
-next:
- if (!efuse_gain->offset_valid)
- return;
-
- gain_ofdm_band = rtw89_subband_to_gain_offset_band_of_ofdm(subband);
-
- offset_a = -efuse_gain->offset[RF_PATH_A][gain_ofdm_band];
- offset_b = -efuse_gain->offset[RF_PATH_B][gain_ofdm_band];
-
- tmp = -((offset_a << 2) + (efuse_gain->offset_base[RTW89_PHY_0] >> 2));
- tmp = clamp_t(s32, tmp, S8_MIN, S8_MAX);
- rtw89_phy_write32_mask(rtwdev, rssi_ofst_addr[RF_PATH_A], B_PATH0_R_G_OFST_MASK, tmp);
-
- tmp = -((offset_b << 2) + (efuse_gain->offset_base[RTW89_PHY_0] >> 2));
- tmp = clamp_t(s32, tmp, S8_MIN, S8_MAX);
- rtw89_phy_write32_mask(rtwdev, rssi_ofst_addr[RF_PATH_B], B_PATH0_R_G_OFST_MASK, tmp);
-
- if (hal->antenna_rx == RF_B) {
- offset_ofdm = -efuse_gain->offset[RF_PATH_B][gain_ofdm_band];
- offset_cck = -efuse_gain->offset[RF_PATH_B][0];
- } else {
- offset_ofdm = -efuse_gain->offset[RF_PATH_A][gain_ofdm_band];
- offset_cck = -efuse_gain->offset[RF_PATH_A][0];
- }
-
- tmp = (offset_ofdm << 4) + efuse_gain->offset_base[RTW89_PHY_0];
- tmp = clamp_t(s32, tmp, S8_MIN, S8_MAX);
- rtw89_phy_write32_idx(rtwdev, R_P0_RPL1, B_P0_RPL1_BIAS_MASK, tmp, phy_idx);
-
- tmp = (offset_ofdm << 4) + efuse_gain->rssi_base[RTW89_PHY_0];
- tmp = clamp_t(s32, tmp, S8_MIN, S8_MAX);
- rtw89_phy_write32_idx(rtwdev, R_P1_RPL1, B_P0_RPL1_BIAS_MASK, tmp, phy_idx);
-
- if (subband == RTW89_CH_2G) {
- tmp = (offset_cck << 3) + (efuse_gain->offset_base[RTW89_PHY_0] >> 1);
- tmp = clamp_t(s32, tmp, S8_MIN >> 1, S8_MAX >> 1);
- rtw89_phy_write32_mask(rtwdev, R_RX_RPL_OFST,
- B_RX_RPL_OFST_CCK_MASK, tmp);
- }
-}
-
-static
-void rtw8852b_set_rxsc_rpl_comp(struct rtw89_dev *rtwdev, enum rtw89_subband subband)
-{
- const struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain.ax;
- u8 band = rtw89_subband_to_bb_gain_band(subband);
- u32 val;
-
- val = FIELD_PREP(B_P0_RPL1_20_MASK, (gain->rpl_ofst_20[band][RF_PATH_A] +
- gain->rpl_ofst_20[band][RF_PATH_B]) / 2) |
- FIELD_PREP(B_P0_RPL1_40_MASK, (gain->rpl_ofst_40[band][RF_PATH_A][0] +
- gain->rpl_ofst_40[band][RF_PATH_B][0]) / 2) |
- FIELD_PREP(B_P0_RPL1_41_MASK, (gain->rpl_ofst_40[band][RF_PATH_A][1] +
- gain->rpl_ofst_40[band][RF_PATH_B][1]) / 2);
- val >>= B_P0_RPL1_SHIFT;
- rtw89_phy_write32_mask(rtwdev, R_P0_RPL1, B_P0_RPL1_MASK, val);
- rtw89_phy_write32_mask(rtwdev, R_P1_RPL1, B_P0_RPL1_MASK, val);
-
- val = FIELD_PREP(B_P0_RTL2_42_MASK, (gain->rpl_ofst_40[band][RF_PATH_A][2] +
- gain->rpl_ofst_40[band][RF_PATH_B][2]) / 2) |
- FIELD_PREP(B_P0_RTL2_80_MASK, (gain->rpl_ofst_80[band][RF_PATH_A][0] +
- gain->rpl_ofst_80[band][RF_PATH_B][0]) / 2) |
- FIELD_PREP(B_P0_RTL2_81_MASK, (gain->rpl_ofst_80[band][RF_PATH_A][1] +
- gain->rpl_ofst_80[band][RF_PATH_B][1]) / 2) |
- FIELD_PREP(B_P0_RTL2_8A_MASK, (gain->rpl_ofst_80[band][RF_PATH_A][10] +
- gain->rpl_ofst_80[band][RF_PATH_B][10]) / 2);
- rtw89_phy_write32(rtwdev, R_P0_RPL2, val);
- rtw89_phy_write32(rtwdev, R_P1_RPL2, val);
-
- val = FIELD_PREP(B_P0_RTL3_82_MASK, (gain->rpl_ofst_80[band][RF_PATH_A][2] +
- gain->rpl_ofst_80[band][RF_PATH_B][2]) / 2) |
- FIELD_PREP(B_P0_RTL3_83_MASK, (gain->rpl_ofst_80[band][RF_PATH_A][3] +
- gain->rpl_ofst_80[band][RF_PATH_B][3]) / 2) |
- FIELD_PREP(B_P0_RTL3_84_MASK, (gain->rpl_ofst_80[band][RF_PATH_A][4] +
- gain->rpl_ofst_80[band][RF_PATH_B][4]) / 2) |
- FIELD_PREP(B_P0_RTL3_89_MASK, (gain->rpl_ofst_80[band][RF_PATH_A][9] +
- gain->rpl_ofst_80[band][RF_PATH_B][9]) / 2);
- rtw89_phy_write32(rtwdev, R_P0_RPL3, val);
- rtw89_phy_write32(rtwdev, R_P1_RPL3, val);
-}
-
-static void rtw8852b_ctrl_ch(struct rtw89_dev *rtwdev,
- const struct rtw89_chan *chan,
- enum rtw89_phy_idx phy_idx)
-{
- u8 central_ch = chan->channel;
- u8 subband = chan->subband_type;
- u8 sco_comp;
- bool is_2g = central_ch <= 14;
-
- /* Path A */
- if (is_2g)
- rtw89_phy_write32_idx(rtwdev, R_PATH0_BAND_SEL_V1,
- B_PATH0_BAND_SEL_MSK_V1, 1, phy_idx);
- else
- rtw89_phy_write32_idx(rtwdev, R_PATH0_BAND_SEL_V1,
- B_PATH0_BAND_SEL_MSK_V1, 0, phy_idx);
-
- /* Path B */
- if (is_2g)
- rtw89_phy_write32_idx(rtwdev, R_PATH1_BAND_SEL_V1,
- B_PATH1_BAND_SEL_MSK_V1, 1, phy_idx);
- else
- rtw89_phy_write32_idx(rtwdev, R_PATH1_BAND_SEL_V1,
- B_PATH1_BAND_SEL_MSK_V1, 0, phy_idx);
-
- /* SCO compensate FC setting */
- sco_comp = rtw8852b_sco_mapping(central_ch);
- rtw89_phy_write32_idx(rtwdev, R_FC0_BW_V1, B_FC0_BW_INV, sco_comp, phy_idx);
-
- if (chan->band_type == RTW89_BAND_6G)
- return;
-
- /* CCK parameters */
- if (central_ch == 14) {
- rtw89_phy_write32_mask(rtwdev, R_TXFIR0, B_TXFIR_C01, 0x3b13ff);
- rtw89_phy_write32_mask(rtwdev, R_TXFIR2, B_TXFIR_C23, 0x1c42de);
- rtw89_phy_write32_mask(rtwdev, R_TXFIR4, B_TXFIR_C45, 0xfdb0ad);
- rtw89_phy_write32_mask(rtwdev, R_TXFIR6, B_TXFIR_C67, 0xf60f6e);
- rtw89_phy_write32_mask(rtwdev, R_TXFIR8, B_TXFIR_C89, 0xfd8f92);
- rtw89_phy_write32_mask(rtwdev, R_TXFIRA, B_TXFIR_CAB, 0x2d011);
- rtw89_phy_write32_mask(rtwdev, R_TXFIRC, B_TXFIR_CCD, 0x1c02c);
- rtw89_phy_write32_mask(rtwdev, R_TXFIRE, B_TXFIR_CEF, 0xfff00a);
- } else {
- rtw89_phy_write32_mask(rtwdev, R_TXFIR0, B_TXFIR_C01, 0x3d23ff);
- rtw89_phy_write32_mask(rtwdev, R_TXFIR2, B_TXFIR_C23, 0x29b354);
- rtw89_phy_write32_mask(rtwdev, R_TXFIR4, B_TXFIR_C45, 0xfc1c8);
- rtw89_phy_write32_mask(rtwdev, R_TXFIR6, B_TXFIR_C67, 0xfdb053);
- rtw89_phy_write32_mask(rtwdev, R_TXFIR8, B_TXFIR_C89, 0xf86f9a);
- rtw89_phy_write32_mask(rtwdev, R_TXFIRA, B_TXFIR_CAB, 0xfaef92);
- rtw89_phy_write32_mask(rtwdev, R_TXFIRC, B_TXFIR_CCD, 0xfe5fcc);
- rtw89_phy_write32_mask(rtwdev, R_TXFIRE, B_TXFIR_CEF, 0xffdff5);
- }
-
- rtw8852b_set_gain_error(rtwdev, subband, RF_PATH_A);
- rtw8852b_set_gain_error(rtwdev, subband, RF_PATH_B);
- rtw8852b_set_gain_offset(rtwdev, subband, phy_idx);
- rtw8852b_set_rxsc_rpl_comp(rtwdev, subband);
-}
-
-static void rtw8852b_bw_setting(struct rtw89_dev *rtwdev, u8 bw, u8 path)
-{
- static const u32 adc_sel[2] = {0xC0EC, 0xC1EC};
- static const u32 wbadc_sel[2] = {0xC0E4, 0xC1E4};
-
- switch (bw) {
- case RTW89_CHANNEL_WIDTH_5:
- rtw89_phy_write32_mask(rtwdev, adc_sel[path], 0x6000, 0x1);
- rtw89_phy_write32_mask(rtwdev, wbadc_sel[path], 0x30, 0x0);
- break;
- case RTW89_CHANNEL_WIDTH_10:
- rtw89_phy_write32_mask(rtwdev, adc_sel[path], 0x6000, 0x2);
- rtw89_phy_write32_mask(rtwdev, wbadc_sel[path], 0x30, 0x1);
- break;
- case RTW89_CHANNEL_WIDTH_20:
- rtw89_phy_write32_mask(rtwdev, adc_sel[path], 0x6000, 0x0);
- rtw89_phy_write32_mask(rtwdev, wbadc_sel[path], 0x30, 0x2);
- break;
- case RTW89_CHANNEL_WIDTH_40:
- rtw89_phy_write32_mask(rtwdev, adc_sel[path], 0x6000, 0x0);
- rtw89_phy_write32_mask(rtwdev, wbadc_sel[path], 0x30, 0x2);
- break;
- case RTW89_CHANNEL_WIDTH_80:
- rtw89_phy_write32_mask(rtwdev, adc_sel[path], 0x6000, 0x0);
- rtw89_phy_write32_mask(rtwdev, wbadc_sel[path], 0x30, 0x2);
- break;
- default:
- rtw89_warn(rtwdev, "Fail to set ADC\n");
- }
-}
-
-static void rtw8852b_ctrl_bw(struct rtw89_dev *rtwdev, u8 pri_ch, u8 bw,
- enum rtw89_phy_idx phy_idx)
-{
- u32 rx_path_0;
-
- switch (bw) {
- case RTW89_CHANNEL_WIDTH_5:
- rtw89_phy_write32_idx(rtwdev, R_FC0_BW_V1, B_FC0_BW_SET, 0x0, phy_idx);
- rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_CHBW_MOD_SBW, 0x1, phy_idx);
- rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_CHBW_MOD_PRICH, 0x0, phy_idx);
-
- /*Set RF mode at 3 */
- rtw89_phy_write32_idx(rtwdev, R_P0_RFMODE_ORI_RX,
- B_P0_RFMODE_ORI_RX_ALL, 0x333, phy_idx);
- rtw89_phy_write32_idx(rtwdev, R_P1_RFMODE_ORI_RX,
- B_P1_RFMODE_ORI_RX_ALL, 0x333, phy_idx);
- break;
- case RTW89_CHANNEL_WIDTH_10:
- rtw89_phy_write32_idx(rtwdev, R_FC0_BW_V1, B_FC0_BW_SET, 0x0, phy_idx);
- rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_CHBW_MOD_SBW, 0x2, phy_idx);
- rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_CHBW_MOD_PRICH, 0x0, phy_idx);
-
- /*Set RF mode at 3 */
- rtw89_phy_write32_idx(rtwdev, R_P0_RFMODE_ORI_RX,
- B_P0_RFMODE_ORI_RX_ALL, 0x333, phy_idx);
- rtw89_phy_write32_idx(rtwdev, R_P1_RFMODE_ORI_RX,
- B_P1_RFMODE_ORI_RX_ALL, 0x333, phy_idx);
- break;
- case RTW89_CHANNEL_WIDTH_20:
- rtw89_phy_write32_idx(rtwdev, R_FC0_BW_V1, B_FC0_BW_SET, 0x0, phy_idx);
- rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_CHBW_MOD_SBW, 0x0, phy_idx);
- rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_CHBW_MOD_PRICH, 0x0, phy_idx);
-
- /*Set RF mode at 3 */
- rtw89_phy_write32_idx(rtwdev, R_P0_RFMODE_ORI_RX,
- B_P0_RFMODE_ORI_RX_ALL, 0x333, phy_idx);
- rtw89_phy_write32_idx(rtwdev, R_P1_RFMODE_ORI_RX,
- B_P1_RFMODE_ORI_RX_ALL, 0x333, phy_idx);
- break;
- case RTW89_CHANNEL_WIDTH_40:
- rtw89_phy_write32_idx(rtwdev, R_FC0_BW_V1, B_FC0_BW_SET, 0x1, phy_idx);
- rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_CHBW_MOD_SBW, 0x0, phy_idx);
- rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_CHBW_MOD_PRICH,
- pri_ch, phy_idx);
-
- /*Set RF mode at 3 */
- rtw89_phy_write32_idx(rtwdev, R_P0_RFMODE_ORI_RX,
- B_P0_RFMODE_ORI_RX_ALL, 0x333, phy_idx);
- rtw89_phy_write32_idx(rtwdev, R_P1_RFMODE_ORI_RX,
- B_P1_RFMODE_ORI_RX_ALL, 0x333, phy_idx);
- /*CCK primary channel */
- if (pri_ch == RTW89_SC_20_UPPER)
- rtw89_phy_write32_mask(rtwdev, R_RXSC, B_RXSC_EN, 1);
- else
- rtw89_phy_write32_mask(rtwdev, R_RXSC, B_RXSC_EN, 0);
-
- break;
- case RTW89_CHANNEL_WIDTH_80:
- rtw89_phy_write32_idx(rtwdev, R_FC0_BW_V1, B_FC0_BW_SET, 0x2, phy_idx);
- rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_CHBW_MOD_SBW, 0x0, phy_idx);
- rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_CHBW_MOD_PRICH,
- pri_ch, phy_idx);
-
- /*Set RF mode at A */
- rtw89_phy_write32_idx(rtwdev, R_P0_RFMODE_ORI_RX,
- B_P0_RFMODE_ORI_RX_ALL, 0xaaa, phy_idx);
- rtw89_phy_write32_idx(rtwdev, R_P1_RFMODE_ORI_RX,
- B_P1_RFMODE_ORI_RX_ALL, 0xaaa, phy_idx);
- break;
- default:
- rtw89_warn(rtwdev, "Fail to switch bw (bw:%d, pri ch:%d)\n", bw,
- pri_ch);
- }
-
- rtw8852b_bw_setting(rtwdev, bw, RF_PATH_A);
- rtw8852b_bw_setting(rtwdev, bw, RF_PATH_B);
-
- rx_path_0 = rtw89_phy_read32_idx(rtwdev, R_CHBW_MOD_V1, B_ANT_RX_SEG0,
- phy_idx);
- if (rx_path_0 == 0x1)
- rtw89_phy_write32_idx(rtwdev, R_P1_RFMODE_ORI_RX,
- B_P1_RFMODE_ORI_RX_ALL, 0x111, phy_idx);
- else if (rx_path_0 == 0x2)
- rtw89_phy_write32_idx(rtwdev, R_P0_RFMODE_ORI_RX,
- B_P0_RFMODE_ORI_RX_ALL, 0x111, phy_idx);
-}
-
-static void rtw8852b_ctrl_cck_en(struct rtw89_dev *rtwdev, bool cck_en)
-{
- if (cck_en) {
- rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_ENABLE_CCK, 1);
- rtw89_phy_write32_mask(rtwdev, R_RXCCA, B_RXCCA_DIS, 0);
- } else {
- rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_ENABLE_CCK, 0);
- rtw89_phy_write32_mask(rtwdev, R_RXCCA, B_RXCCA_DIS, 1);
- }
-}
-
-static void rtw8852b_5m_mask(struct rtw89_dev *rtwdev, const struct rtw89_chan *chan,
- enum rtw89_phy_idx phy_idx)
-{
- u8 pri_ch = chan->pri_ch_idx;
- bool mask_5m_low;
- bool mask_5m_en;
-
- switch (chan->band_width) {
- case RTW89_CHANNEL_WIDTH_40:
- /* Prich=1: Mask 5M High, Prich=2: Mask 5M Low */
- mask_5m_en = true;
- mask_5m_low = pri_ch == RTW89_SC_20_LOWER;
- break;
- case RTW89_CHANNEL_WIDTH_80:
- /* Prich=3: Mask 5M High, Prich=4: Mask 5M Low, Else: Disable */
- mask_5m_en = pri_ch == RTW89_SC_20_UPMOST ||
- pri_ch == RTW89_SC_20_LOWEST;
- mask_5m_low = pri_ch == RTW89_SC_20_LOWEST;
- break;
- default:
- mask_5m_en = false;
- break;
- }
-
- if (!mask_5m_en) {
- rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET_V1, B_PATH0_5MDET_EN, 0x0);
- rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET_V1, B_PATH1_5MDET_EN, 0x0);
- rtw89_phy_write32_idx(rtwdev, R_ASSIGN_SBD_OPT_V1,
- B_ASSIGN_SBD_OPT_EN_V1, 0x0, phy_idx);
- return;
- }
-
- if (mask_5m_low) {
- rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET_V1, B_PATH0_5MDET_TH, 0x4);
- rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET_V1, B_PATH0_5MDET_EN, 0x1);
- rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET_V1, B_PATH0_5MDET_SB2, 0x0);
- rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET_V1, B_PATH0_5MDET_SB0, 0x1);
- rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET_V1, B_PATH1_5MDET_TH, 0x4);
- rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET_V1, B_PATH1_5MDET_EN, 0x1);
- rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET_V1, B_PATH1_5MDET_SB2, 0x0);
- rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET_V1, B_PATH1_5MDET_SB0, 0x1);
- } else {
- rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET_V1, B_PATH0_5MDET_TH, 0x4);
- rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET_V1, B_PATH0_5MDET_EN, 0x1);
- rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET_V1, B_PATH0_5MDET_SB2, 0x1);
- rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET_V1, B_PATH0_5MDET_SB0, 0x0);
- rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET_V1, B_PATH1_5MDET_TH, 0x4);
- rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET_V1, B_PATH1_5MDET_EN, 0x1);
- rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET_V1, B_PATH1_5MDET_SB2, 0x1);
- rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET_V1, B_PATH1_5MDET_SB0, 0x0);
- }
- rtw89_phy_write32_idx(rtwdev, R_ASSIGN_SBD_OPT_V1,
- B_ASSIGN_SBD_OPT_EN_V1, 0x1, phy_idx);
-}
-
-static void rtw8852b_bb_reset_all(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
-{
- rtw89_phy_write32_idx(rtwdev, R_S0_HW_SI_DIS, B_S0_HW_SI_DIS_W_R_TRIG, 0x7, phy_idx);
- rtw89_phy_write32_idx(rtwdev, R_S1_HW_SI_DIS, B_S1_HW_SI_DIS_W_R_TRIG, 0x7, phy_idx);
- fsleep(1);
- rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL, 1, phy_idx);
- rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL, 0, phy_idx);
- rtw89_phy_write32_idx(rtwdev, R_S0_HW_SI_DIS, B_S0_HW_SI_DIS_W_R_TRIG, 0x0, phy_idx);
- rtw89_phy_write32_idx(rtwdev, R_S1_HW_SI_DIS, B_S1_HW_SI_DIS_W_R_TRIG, 0x0, phy_idx);
- rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL, 1, phy_idx);
-}
-
static void rtw8852b_bb_reset_en(struct rtw89_dev *rtwdev, enum rtw89_band band,
enum rtw89_phy_idx phy_idx, bool en)
{
@@ -1422,87 +466,20 @@ static void rtw8852b_bb_reset(struct rtw89_dev *rtwdev,
rtw89_phy_write32_set(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_TRK_EN);
rtw89_phy_write32_set(rtwdev, R_P1_TXPW_RSTB, B_P1_TXPW_RSTB_MANON);
rtw89_phy_write32_set(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_TRK_EN);
- rtw8852b_bb_reset_all(rtwdev, phy_idx);
+ rtw8852bx_bb_reset_all(rtwdev, phy_idx);
rtw89_phy_write32_clr(rtwdev, R_P0_TXPW_RSTB, B_P0_TXPW_RSTB_MANON);
rtw89_phy_write32_clr(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_TRK_EN);
rtw89_phy_write32_clr(rtwdev, R_P1_TXPW_RSTB, B_P1_TXPW_RSTB_MANON);
rtw89_phy_write32_clr(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_TRK_EN);
}
-static void rtw8852b_bb_macid_ctrl_init(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy_idx)
-{
- u32 addr;
-
- for (addr = R_AX_PWR_MACID_LMT_TABLE0;
- addr <= R_AX_PWR_MACID_LMT_TABLE127; addr += 4)
- rtw89_mac_txpwr_write32(rtwdev, phy_idx, addr, 0);
-}
-
-static void rtw8852b_bb_sethw(struct rtw89_dev *rtwdev)
-{
- struct rtw89_phy_efuse_gain *gain = &rtwdev->efuse_gain;
-
- rtw89_phy_write32_clr(rtwdev, R_P0_EN_SOUND_WO_NDP, B_P0_EN_SOUND_WO_NDP);
- rtw89_phy_write32_clr(rtwdev, R_P1_EN_SOUND_WO_NDP, B_P1_EN_SOUND_WO_NDP);
-
- rtw8852b_bb_macid_ctrl_init(rtwdev, RTW89_PHY_0);
-
- /* read these registers after loading BB parameters */
- gain->offset_base[RTW89_PHY_0] =
- rtw89_phy_read32_mask(rtwdev, R_P0_RPL1, B_P0_RPL1_BIAS_MASK);
- gain->rssi_base[RTW89_PHY_0] =
- rtw89_phy_read32_mask(rtwdev, R_P1_RPL1, B_P0_RPL1_BIAS_MASK);
-}
-
-static void rtw8852b_bb_set_pop(struct rtw89_dev *rtwdev)
-{
- if (rtwdev->hw->conf.flags & IEEE80211_CONF_MONITOR)
- rtw89_phy_write32_clr(rtwdev, R_PKT_CTRL, B_PKT_POP_EN);
-}
-
-static void rtw8852b_set_channel_bb(struct rtw89_dev *rtwdev, const struct rtw89_chan *chan,
- enum rtw89_phy_idx phy_idx)
-{
- bool cck_en = chan->channel <= 14;
- u8 pri_ch_idx = chan->pri_ch_idx;
- u8 band = chan->band_type, chan_idx;
-
- if (cck_en)
- rtw8852b_ctrl_sco_cck(rtwdev, chan->primary_channel);
-
- rtw8852b_ctrl_ch(rtwdev, chan, phy_idx);
- rtw8852b_ctrl_bw(rtwdev, pri_ch_idx, chan->band_width, phy_idx);
- rtw8852b_ctrl_cck_en(rtwdev, cck_en);
- if (chan->band_type == RTW89_BAND_5G) {
- rtw89_phy_write32_mask(rtwdev, R_PATH0_BT_SHARE_V1,
- B_PATH0_BT_SHARE_V1, 0x0);
- rtw89_phy_write32_mask(rtwdev, R_PATH0_BTG_PATH_V1,
- B_PATH0_BTG_PATH_V1, 0x0);
- rtw89_phy_write32_mask(rtwdev, R_PATH1_BT_SHARE_V1,
- B_PATH1_BT_SHARE_V1, 0x0);
- rtw89_phy_write32_mask(rtwdev, R_PATH1_BTG_PATH_V1,
- B_PATH1_BTG_PATH_V1, 0x0);
- rtw89_phy_write32_mask(rtwdev, R_CHBW_MOD_V1, B_BT_SHARE, 0x0);
- rtw89_phy_write32_mask(rtwdev, R_FC0_BW_V1, B_ANT_RX_BT_SEG0, 0x0);
- rtw89_phy_write32_mask(rtwdev, R_BT_DYN_DC_EST_EN_V1,
- B_BT_DYN_DC_EST_EN_MSK, 0x0);
- rtw89_phy_write32_mask(rtwdev, R_GNT_BT_WGT_EN, B_GNT_BT_WGT_EN, 0x0);
- }
- chan_idx = rtw89_encode_chan_idx(rtwdev, chan->primary_channel, band);
- rtw89_phy_write32_mask(rtwdev, R_MAC_PIN_SEL, B_CH_IDX_SEG0, chan_idx);
- rtw8852b_5m_mask(rtwdev, chan, phy_idx);
- rtw8852b_bb_set_pop(rtwdev);
- rtw8852b_bb_reset_all(rtwdev, phy_idx);
-}
-
static void rtw8852b_set_channel(struct rtw89_dev *rtwdev,
const struct rtw89_chan *chan,
enum rtw89_mac_idx mac_idx,
enum rtw89_phy_idx phy_idx)
{
- rtw8852b_set_channel_mac(rtwdev, chan, mac_idx);
- rtw8852b_set_channel_bb(rtwdev, chan, phy_idx);
+ rtw8852bx_set_channel_mac(rtwdev, chan, mac_idx);
+ rtw8852bx_set_channel_bb(rtwdev, chan, phy_idx);
rtw8852b_set_channel_rf(rtwdev, chan, phy_idx);
}
@@ -1602,540 +579,6 @@ static void rtw8852b_rfk_track(struct rtw89_dev *rtwdev)
rtw8852b_dpk_track(rtwdev);
}
-static u32 rtw8852b_bb_cal_txpwr_ref(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy_idx, s16 ref)
-{
- const u16 tssi_16dbm_cw = 0x12c;
- const u8 base_cw_0db = 0x27;
- const s8 ofst_int = 0;
- s16 pwr_s10_3;
- s16 rf_pwr_cw;
- u16 bb_pwr_cw;
- u32 pwr_cw;
- u32 tssi_ofst_cw;
-
- pwr_s10_3 = (ref << 1) + (s16)(ofst_int) + (s16)(base_cw_0db << 3);
- bb_pwr_cw = FIELD_GET(GENMASK(2, 0), pwr_s10_3);
- rf_pwr_cw = FIELD_GET(GENMASK(8, 3), pwr_s10_3);
- rf_pwr_cw = clamp_t(s16, rf_pwr_cw, 15, 63);
- pwr_cw = (rf_pwr_cw << 3) | bb_pwr_cw;
-
- tssi_ofst_cw = (u32)((s16)tssi_16dbm_cw + (ref << 1) - (16 << 3));
- rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
- "[TXPWR] tssi_ofst_cw=%d rf_cw=0x%x bb_cw=0x%x\n",
- tssi_ofst_cw, rf_pwr_cw, bb_pwr_cw);
-
- return FIELD_PREP(B_DPD_TSSI_CW, tssi_ofst_cw) |
- FIELD_PREP(B_DPD_PWR_CW, pwr_cw) |
- FIELD_PREP(B_DPD_REF, ref);
-}
-
-static void rtw8852b_set_txpwr_ref(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy_idx)
-{
- static const u32 addr[RF_PATH_NUM_8852B] = {0x5800, 0x7800};
- const u32 mask = B_DPD_TSSI_CW | B_DPD_PWR_CW | B_DPD_REF;
- const u8 ofst_ofdm = 0x4;
- const u8 ofst_cck = 0x8;
- const s16 ref_ofdm = 0;
- const s16 ref_cck = 0;
- u32 val;
- u8 i;
-
- rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set txpwr reference\n");
-
- rtw89_mac_txpwr_write32_mask(rtwdev, phy_idx, R_AX_PWR_RATE_CTRL,
- B_AX_PWR_REF, 0x0);
-
- rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set bb ofdm txpwr ref\n");
- val = rtw8852b_bb_cal_txpwr_ref(rtwdev, phy_idx, ref_ofdm);
-
- for (i = 0; i < RF_PATH_NUM_8852B; i++)
- rtw89_phy_write32_idx(rtwdev, addr[i] + ofst_ofdm, mask, val,
- phy_idx);
-
- rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set bb cck txpwr ref\n");
- val = rtw8852b_bb_cal_txpwr_ref(rtwdev, phy_idx, ref_cck);
-
- for (i = 0; i < RF_PATH_NUM_8852B; i++)
- rtw89_phy_write32_idx(rtwdev, addr[i] + ofst_cck, mask, val,
- phy_idx);
-}
-
-static void rtw8852b_bb_set_tx_shape_dfir(struct rtw89_dev *rtwdev,
- const struct rtw89_chan *chan,
- u8 tx_shape_idx,
- enum rtw89_phy_idx phy_idx)
-{
-#define __DFIR_CFG_ADDR(i) (R_TXFIR0 + ((i) << 2))
-#define __DFIR_CFG_MASK 0xffffffff
-#define __DFIR_CFG_NR 8
-#define __DECL_DFIR_PARAM(_name, _val...) \
- static const u32 param_ ## _name[] = {_val}; \
- static_assert(ARRAY_SIZE(param_ ## _name) == __DFIR_CFG_NR)
-
- __DECL_DFIR_PARAM(flat,
- 0x023D23FF, 0x0029B354, 0x000FC1C8, 0x00FDB053,
- 0x00F86F9A, 0x06FAEF92, 0x00FE5FCC, 0x00FFDFF5);
- __DECL_DFIR_PARAM(sharp,
- 0x023D83FF, 0x002C636A, 0x0013F204, 0x00008090,
- 0x00F87FB0, 0x06F99F83, 0x00FDBFBA, 0x00003FF5);
- __DECL_DFIR_PARAM(sharp_14,
- 0x023B13FF, 0x001C42DE, 0x00FDB0AD, 0x00F60F6E,
- 0x00FD8F92, 0x0602D011, 0x0001C02C, 0x00FFF00A);
- u8 ch = chan->channel;
- const u32 *param;
- u32 addr;
- int i;
-
- if (ch > 14) {
- rtw89_warn(rtwdev,
- "set tx shape dfir by unknown ch: %d on 2G\n", ch);
- return;
- }
-
- if (ch == 14)
- param = param_sharp_14;
- else
- param = tx_shape_idx == 0 ? param_flat : param_sharp;
-
- for (i = 0; i < __DFIR_CFG_NR; i++) {
- addr = __DFIR_CFG_ADDR(i);
- rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
- "set tx shape dfir: 0x%x: 0x%x\n", addr, param[i]);
- rtw89_phy_write32_idx(rtwdev, addr, __DFIR_CFG_MASK, param[i],
- phy_idx);
- }
-
-#undef __DECL_DFIR_PARAM
-#undef __DFIR_CFG_NR
-#undef __DFIR_CFG_MASK
-#undef __DECL_CFG_ADDR
-}
-
-static void rtw8852b_set_tx_shape(struct rtw89_dev *rtwdev,
- const struct rtw89_chan *chan,
- enum rtw89_phy_idx phy_idx)
-{
- const struct rtw89_rfe_parms *rfe_parms = rtwdev->rfe_parms;
- u8 band = chan->band_type;
- u8 regd = rtw89_regd_get(rtwdev, band);
- u8 tx_shape_cck = (*rfe_parms->tx_shape.lmt)[band][RTW89_RS_CCK][regd];
- u8 tx_shape_ofdm = (*rfe_parms->tx_shape.lmt)[band][RTW89_RS_OFDM][regd];
-
- if (band == RTW89_BAND_2G)
- rtw8852b_bb_set_tx_shape_dfir(rtwdev, chan, tx_shape_cck, phy_idx);
-
- rtw89_phy_write32_mask(rtwdev, R_DCFO_OPT, B_TXSHAPE_TRIANGULAR_CFG,
- tx_shape_ofdm);
-}
-
-static void rtw8852b_set_txpwr(struct rtw89_dev *rtwdev,
- const struct rtw89_chan *chan,
- enum rtw89_phy_idx phy_idx)
-{
- rtw89_phy_set_txpwr_byrate(rtwdev, chan, phy_idx);
- rtw89_phy_set_txpwr_offset(rtwdev, chan, phy_idx);
- rtw8852b_set_tx_shape(rtwdev, chan, phy_idx);
- rtw89_phy_set_txpwr_limit(rtwdev, chan, phy_idx);
- rtw89_phy_set_txpwr_limit_ru(rtwdev, chan, phy_idx);
-}
-
-static void rtw8852b_set_txpwr_ctrl(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy_idx)
-{
- rtw8852b_set_txpwr_ref(rtwdev, phy_idx);
-}
-
-static
-void rtw8852b_set_txpwr_ul_tb_offset(struct rtw89_dev *rtwdev,
- s8 pw_ofst, enum rtw89_mac_idx mac_idx)
-{
- u32 reg;
-
- if (pw_ofst < -16 || pw_ofst > 15) {
- rtw89_warn(rtwdev, "[ULTB] Err pwr_offset=%d\n", pw_ofst);
- return;
- }
-
- reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PWR_UL_TB_CTRL, mac_idx);
- rtw89_write32_set(rtwdev, reg, B_AX_PWR_UL_TB_CTRL_EN);
-
- reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PWR_UL_TB_1T, mac_idx);
- rtw89_write32_mask(rtwdev, reg, B_AX_PWR_UL_TB_1T_MASK, pw_ofst);
-
- pw_ofst = max_t(s8, pw_ofst - 3, -16);
- reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PWR_UL_TB_2T, mac_idx);
- rtw89_write32_mask(rtwdev, reg, B_AX_PWR_UL_TB_2T_MASK, pw_ofst);
-}
-
-static int
-rtw8852b_init_txpwr_unit(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
-{
- int ret;
-
- ret = rtw89_mac_txpwr_write32(rtwdev, phy_idx, R_AX_PWR_UL_CTRL2, 0x07763333);
- if (ret)
- return ret;
-
- ret = rtw89_mac_txpwr_write32(rtwdev, phy_idx, R_AX_PWR_COEXT_CTRL, 0x01ebf000);
- if (ret)
- return ret;
-
- ret = rtw89_mac_txpwr_write32(rtwdev, phy_idx, R_AX_PWR_UL_CTRL0, 0x0002f8ff);
- if (ret)
- return ret;
-
- rtw8852b_set_txpwr_ul_tb_offset(rtwdev, 0, phy_idx == RTW89_PHY_1 ?
- RTW89_MAC_1 : RTW89_MAC_0);
-
- return 0;
-}
-
-void rtw8852b_bb_set_plcp_tx(struct rtw89_dev *rtwdev)
-{
- const struct rtw89_reg3_def *def = rtw8852b_pmac_ht20_mcs7_tbl;
- u8 i;
-
- for (i = 0; i < ARRAY_SIZE(rtw8852b_pmac_ht20_mcs7_tbl); i++, def++)
- rtw89_phy_write32_mask(rtwdev, def->addr, def->mask, def->data);
-}
-
-static void rtw8852b_stop_pmac_tx(struct rtw89_dev *rtwdev,
- struct rtw8852b_bb_pmac_info *tx_info,
- enum rtw89_phy_idx idx)
-{
- rtw89_debug(rtwdev, RTW89_DBG_TSSI, "PMAC Stop Tx");
- if (tx_info->mode == CONT_TX)
- rtw89_phy_write32_idx(rtwdev, R_PMAC_TX_PRD, B_PMAC_CTX_EN, 0, idx);
- else if (tx_info->mode == PKTS_TX)
- rtw89_phy_write32_idx(rtwdev, R_PMAC_TX_PRD, B_PMAC_PTX_EN, 0, idx);
-}
-
-static void rtw8852b_start_pmac_tx(struct rtw89_dev *rtwdev,
- struct rtw8852b_bb_pmac_info *tx_info,
- enum rtw89_phy_idx idx)
-{
- enum rtw8852b_pmac_mode mode = tx_info->mode;
- u32 pkt_cnt = tx_info->tx_cnt;
- u16 period = tx_info->period;
-
- if (mode == CONT_TX && !tx_info->is_cck) {
- rtw89_phy_write32_idx(rtwdev, R_PMAC_TX_PRD, B_PMAC_CTX_EN, 1, idx);
- rtw89_debug(rtwdev, RTW89_DBG_TSSI, "PMAC CTx Start");
- } else if (mode == PKTS_TX) {
- rtw89_phy_write32_idx(rtwdev, R_PMAC_TX_PRD, B_PMAC_PTX_EN, 1, idx);
- rtw89_phy_write32_idx(rtwdev, R_PMAC_TX_PRD,
- B_PMAC_TX_PRD_MSK, period, idx);
- rtw89_phy_write32_idx(rtwdev, R_PMAC_TX_CNT, B_PMAC_TX_CNT_MSK,
- pkt_cnt, idx);
- rtw89_debug(rtwdev, RTW89_DBG_TSSI, "PMAC PTx Start");
- }
-
- rtw89_phy_write32_idx(rtwdev, R_PMAC_TX_CTRL, B_PMAC_TXEN_DIS, 1, idx);
- rtw89_phy_write32_idx(rtwdev, R_PMAC_TX_CTRL, B_PMAC_TXEN_DIS, 0, idx);
-}
-
-void rtw8852b_bb_set_pmac_tx(struct rtw89_dev *rtwdev,
- struct rtw8852b_bb_pmac_info *tx_info,
- enum rtw89_phy_idx idx)
-{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
-
- if (!tx_info->en_pmac_tx) {
- rtw8852b_stop_pmac_tx(rtwdev, tx_info, idx);
- rtw89_phy_write32_idx(rtwdev, R_PD_CTRL, B_PD_HIT_DIS, 0, idx);
- if (chan->band_type == RTW89_BAND_2G)
- rtw89_phy_write32_clr(rtwdev, R_RXCCA, B_RXCCA_DIS);
- return;
- }
-
- rtw89_debug(rtwdev, RTW89_DBG_TSSI, "PMAC Tx Enable");
-
- rtw89_phy_write32_idx(rtwdev, R_PMAC_GNT, B_PMAC_GNT_TXEN, 1, idx);
- rtw89_phy_write32_idx(rtwdev, R_PMAC_GNT, B_PMAC_GNT_RXEN, 1, idx);
- rtw89_phy_write32_idx(rtwdev, R_PMAC_RX_CFG1, B_PMAC_OPT1_MSK, 0x3f, idx);
- rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL, 0, idx);
- rtw89_phy_write32_idx(rtwdev, R_PD_CTRL, B_PD_HIT_DIS, 1, idx);
- rtw89_phy_write32_set(rtwdev, R_RXCCA, B_RXCCA_DIS);
- rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL, 1, idx);
-
- rtw8852b_start_pmac_tx(rtwdev, tx_info, idx);
-}
-
-void rtw8852b_bb_set_pmac_pkt_tx(struct rtw89_dev *rtwdev, u8 enable,
- u16 tx_cnt, u16 period, u16 tx_time,
- enum rtw89_phy_idx idx)
-{
- struct rtw8852b_bb_pmac_info tx_info = {0};
-
- tx_info.en_pmac_tx = enable;
- tx_info.is_cck = 0;
- tx_info.mode = PKTS_TX;
- tx_info.tx_cnt = tx_cnt;
- tx_info.period = period;
- tx_info.tx_time = tx_time;
-
- rtw8852b_bb_set_pmac_tx(rtwdev, &tx_info, idx);
-}
-
-void rtw8852b_bb_set_power(struct rtw89_dev *rtwdev, s16 pwr_dbm,
- enum rtw89_phy_idx idx)
-{
- rtw89_debug(rtwdev, RTW89_DBG_TSSI, "PMAC CFG Tx PWR = %d", pwr_dbm);
-
- rtw89_phy_write32_idx(rtwdev, R_MAC_SEL, B_MAC_SEL_PWR_EN, 1, idx);
- rtw89_phy_write32_idx(rtwdev, R_TXPWR, B_TXPWR_MSK, pwr_dbm, idx);
-}
-
-void rtw8852b_bb_cfg_tx_path(struct rtw89_dev *rtwdev, u8 tx_path)
-{
- rtw89_phy_write32_idx(rtwdev, R_MAC_SEL, B_MAC_SEL_MOD, 7, RTW89_PHY_0);
-
- rtw89_debug(rtwdev, RTW89_DBG_TSSI, "PMAC CFG Tx Path = %d", tx_path);
-
- if (tx_path == RF_PATH_A) {
- rtw89_phy_write32_mask(rtwdev, R_TXPATH_SEL, B_TXPATH_SEL_MSK, 1);
- rtw89_phy_write32_mask(rtwdev, R_TXNSS_MAP, B_TXNSS_MAP_MSK, 0);
- } else if (tx_path == RF_PATH_B) {
- rtw89_phy_write32_mask(rtwdev, R_TXPATH_SEL, B_TXPATH_SEL_MSK, 2);
- rtw89_phy_write32_mask(rtwdev, R_TXNSS_MAP, B_TXNSS_MAP_MSK, 0);
- } else if (tx_path == RF_PATH_AB) {
- rtw89_phy_write32_mask(rtwdev, R_TXPATH_SEL, B_TXPATH_SEL_MSK, 3);
- rtw89_phy_write32_mask(rtwdev, R_TXNSS_MAP, B_TXNSS_MAP_MSK, 4);
- } else {
- rtw89_debug(rtwdev, RTW89_DBG_TSSI, "Error Tx Path");
- }
-}
-
-void rtw8852b_bb_tx_mode_switch(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx idx, u8 mode)
-{
- if (mode != 0)
- return;
-
- rtw89_debug(rtwdev, RTW89_DBG_TSSI, "Tx mode switch");
-
- rtw89_phy_write32_idx(rtwdev, R_PMAC_GNT, B_PMAC_GNT_TXEN, 0, idx);
- rtw89_phy_write32_idx(rtwdev, R_PMAC_GNT, B_PMAC_GNT_RXEN, 0, idx);
- rtw89_phy_write32_idx(rtwdev, R_PMAC_RX_CFG1, B_PMAC_OPT1_MSK, 0, idx);
- rtw89_phy_write32_idx(rtwdev, R_PMAC_RXMOD, B_PMAC_RXMOD_MSK, 0, idx);
- rtw89_phy_write32_idx(rtwdev, R_MAC_SEL, B_MAC_SEL_DPD_EN, 0, idx);
- rtw89_phy_write32_idx(rtwdev, R_MAC_SEL, B_MAC_SEL_MOD, 0, idx);
- rtw89_phy_write32_idx(rtwdev, R_MAC_SEL, B_MAC_SEL_PWR_EN, 0, idx);
-}
-
-void rtw8852b_bb_backup_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx idx,
- struct rtw8852b_bb_tssi_bak *bak)
-{
- s32 tmp;
-
- bak->tx_path = rtw89_phy_read32_idx(rtwdev, R_TXPATH_SEL, B_TXPATH_SEL_MSK, idx);
- bak->rx_path = rtw89_phy_read32_idx(rtwdev, R_CHBW_MOD_V1, B_ANT_RX_SEG0, idx);
- bak->p0_rfmode = rtw89_phy_read32_idx(rtwdev, R_P0_RFMODE, MASKDWORD, idx);
- bak->p0_rfmode_ftm = rtw89_phy_read32_idx(rtwdev, R_P0_RFMODE_FTM_RX, MASKDWORD, idx);
- bak->p1_rfmode = rtw89_phy_read32_idx(rtwdev, R_P1_RFMODE, MASKDWORD, idx);
- bak->p1_rfmode_ftm = rtw89_phy_read32_idx(rtwdev, R_P1_RFMODE_FTM_RX, MASKDWORD, idx);
- tmp = rtw89_phy_read32_idx(rtwdev, R_TXPWR, B_TXPWR_MSK, idx);
- bak->tx_pwr = sign_extend32(tmp, 8);
-}
-
-void rtw8852b_bb_restore_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx idx,
- const struct rtw8852b_bb_tssi_bak *bak)
-{
- rtw89_phy_write32_idx(rtwdev, R_TXPATH_SEL, B_TXPATH_SEL_MSK, bak->tx_path, idx);
- if (bak->tx_path == RF_AB)
- rtw89_phy_write32_mask(rtwdev, R_TXNSS_MAP, B_TXNSS_MAP_MSK, 0x4);
- else
- rtw89_phy_write32_mask(rtwdev, R_TXNSS_MAP, B_TXNSS_MAP_MSK, 0x0);
- rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_ANT_RX_SEG0, bak->rx_path, idx);
- rtw89_phy_write32_idx(rtwdev, R_MAC_SEL, B_MAC_SEL_PWR_EN, 1, idx);
- rtw89_phy_write32_idx(rtwdev, R_P0_RFMODE, MASKDWORD, bak->p0_rfmode, idx);
- rtw89_phy_write32_idx(rtwdev, R_P0_RFMODE_FTM_RX, MASKDWORD, bak->p0_rfmode_ftm, idx);
- rtw89_phy_write32_idx(rtwdev, R_P1_RFMODE, MASKDWORD, bak->p1_rfmode, idx);
- rtw89_phy_write32_idx(rtwdev, R_P1_RFMODE_FTM_RX, MASKDWORD, bak->p1_rfmode_ftm, idx);
- rtw89_phy_write32_idx(rtwdev, R_TXPWR, B_TXPWR_MSK, bak->tx_pwr, idx);
-}
-
-static void rtw8852b_ctrl_nbtg_bt_tx(struct rtw89_dev *rtwdev, bool en,
- enum rtw89_phy_idx phy_idx)
-{
- rtw89_phy_write_reg3_tbl(rtwdev, en ? &rtw8852b_btc_preagc_en_defs_tbl :
- &rtw8852b_btc_preagc_dis_defs_tbl);
-}
-
-static void rtw8852b_ctrl_btg_bt_rx(struct rtw89_dev *rtwdev, bool en,
- enum rtw89_phy_idx phy_idx)
-{
- if (en) {
- rtw89_phy_write32_mask(rtwdev, R_PATH0_BT_SHARE_V1,
- B_PATH0_BT_SHARE_V1, 0x1);
- rtw89_phy_write32_mask(rtwdev, R_PATH0_BTG_PATH_V1,
- B_PATH0_BTG_PATH_V1, 0x0);
- rtw89_phy_write32_mask(rtwdev, R_PATH1_G_LNA6_OP1DB_V1,
- B_PATH1_G_LNA6_OP1DB_V1, 0x20);
- rtw89_phy_write32_mask(rtwdev, R_PATH1_G_TIA0_LNA6_OP1DB_V1,
- B_PATH1_G_TIA0_LNA6_OP1DB_V1, 0x30);
- rtw89_phy_write32_mask(rtwdev, R_PATH1_BT_SHARE_V1,
- B_PATH1_BT_SHARE_V1, 0x1);
- rtw89_phy_write32_mask(rtwdev, R_PATH1_BTG_PATH_V1,
- B_PATH1_BTG_PATH_V1, 0x1);
- rtw89_phy_write32_mask(rtwdev, R_PMAC_GNT, B_PMAC_GNT_P1, 0x0);
- rtw89_phy_write32_mask(rtwdev, R_CHBW_MOD_V1, B_BT_SHARE, 0x1);
- rtw89_phy_write32_mask(rtwdev, R_FC0_BW_V1, B_ANT_RX_BT_SEG0, 0x2);
- rtw89_phy_write32_mask(rtwdev, R_BT_DYN_DC_EST_EN_V1,
- B_BT_DYN_DC_EST_EN_MSK, 0x1);
- rtw89_phy_write32_mask(rtwdev, R_GNT_BT_WGT_EN, B_GNT_BT_WGT_EN, 0x1);
- } else {
- rtw89_phy_write32_mask(rtwdev, R_PATH0_BT_SHARE_V1,
- B_PATH0_BT_SHARE_V1, 0x0);
- rtw89_phy_write32_mask(rtwdev, R_PATH0_BTG_PATH_V1,
- B_PATH0_BTG_PATH_V1, 0x0);
- rtw89_phy_write32_mask(rtwdev, R_PATH1_G_LNA6_OP1DB_V1,
- B_PATH1_G_LNA6_OP1DB_V1, 0x1a);
- rtw89_phy_write32_mask(rtwdev, R_PATH1_G_TIA0_LNA6_OP1DB_V1,
- B_PATH1_G_TIA0_LNA6_OP1DB_V1, 0x2a);
- rtw89_phy_write32_mask(rtwdev, R_PATH1_BT_SHARE_V1,
- B_PATH1_BT_SHARE_V1, 0x0);
- rtw89_phy_write32_mask(rtwdev, R_PATH1_BTG_PATH_V1,
- B_PATH1_BTG_PATH_V1, 0x0);
- rtw89_phy_write32_mask(rtwdev, R_PMAC_GNT, B_PMAC_GNT_P1, 0xc);
- rtw89_phy_write32_mask(rtwdev, R_CHBW_MOD_V1, B_BT_SHARE, 0x0);
- rtw89_phy_write32_mask(rtwdev, R_FC0_BW_V1, B_ANT_RX_BT_SEG0, 0x0);
- rtw89_phy_write32_mask(rtwdev, R_BT_DYN_DC_EST_EN_V1,
- B_BT_DYN_DC_EST_EN_MSK, 0x1);
- rtw89_phy_write32_mask(rtwdev, R_GNT_BT_WGT_EN, B_GNT_BT_WGT_EN, 0x0);
- }
-}
-
-void rtw8852b_bb_ctrl_rx_path(struct rtw89_dev *rtwdev,
- enum rtw89_rf_path_bit rx_path)
-{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
- u32 rst_mask0;
- u32 rst_mask1;
-
- if (rx_path == RF_A) {
- rtw89_phy_write32_mask(rtwdev, R_CHBW_MOD_V1, B_ANT_RX_SEG0, 1);
- rtw89_phy_write32_mask(rtwdev, R_FC0_BW_V1, B_ANT_RX_1RCCA_SEG0, 1);
- rtw89_phy_write32_mask(rtwdev, R_FC0_BW_V1, B_ANT_RX_1RCCA_SEG1, 1);
- rtw89_phy_write32_mask(rtwdev, R_RXHT_MCS_LIMIT, B_RXHT_MCS_LIMIT, 0);
- rtw89_phy_write32_mask(rtwdev, R_RXVHT_MCS_LIMIT, B_RXVHT_MCS_LIMIT, 0);
- rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_USER_MAX, 4);
- rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_MAX_NSS, 0);
- rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHETB_MAX_NSS, 0);
- } else if (rx_path == RF_B) {
- rtw89_phy_write32_mask(rtwdev, R_CHBW_MOD_V1, B_ANT_RX_SEG0, 2);
- rtw89_phy_write32_mask(rtwdev, R_FC0_BW_V1, B_ANT_RX_1RCCA_SEG0, 2);
- rtw89_phy_write32_mask(rtwdev, R_FC0_BW_V1, B_ANT_RX_1RCCA_SEG1, 2);
- rtw89_phy_write32_mask(rtwdev, R_RXHT_MCS_LIMIT, B_RXHT_MCS_LIMIT, 0);
- rtw89_phy_write32_mask(rtwdev, R_RXVHT_MCS_LIMIT, B_RXVHT_MCS_LIMIT, 0);
- rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_USER_MAX, 4);
- rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_MAX_NSS, 0);
- rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHETB_MAX_NSS, 0);
- } else if (rx_path == RF_AB) {
- rtw89_phy_write32_mask(rtwdev, R_CHBW_MOD_V1, B_ANT_RX_SEG0, 3);
- rtw89_phy_write32_mask(rtwdev, R_FC0_BW_V1, B_ANT_RX_1RCCA_SEG0, 3);
- rtw89_phy_write32_mask(rtwdev, R_FC0_BW_V1, B_ANT_RX_1RCCA_SEG1, 3);
- rtw89_phy_write32_mask(rtwdev, R_RXHT_MCS_LIMIT, B_RXHT_MCS_LIMIT, 1);
- rtw89_phy_write32_mask(rtwdev, R_RXVHT_MCS_LIMIT, B_RXVHT_MCS_LIMIT, 1);
- rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_USER_MAX, 4);
- rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_MAX_NSS, 1);
- rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHETB_MAX_NSS, 1);
- }
-
- rtw8852b_set_gain_offset(rtwdev, chan->subband_type, RTW89_PHY_0);
-
- if (chan->band_type == RTW89_BAND_2G &&
- (rx_path == RF_B || rx_path == RF_AB))
- rtw8852b_ctrl_btg_bt_rx(rtwdev, true, RTW89_PHY_0);
- else
- rtw8852b_ctrl_btg_bt_rx(rtwdev, false, RTW89_PHY_0);
-
- rst_mask0 = B_P0_TXPW_RSTB_MANON | B_P0_TXPW_RSTB_TSSI;
- rst_mask1 = B_P1_TXPW_RSTB_MANON | B_P1_TXPW_RSTB_TSSI;
- if (rx_path == RF_A) {
- rtw89_phy_write32_mask(rtwdev, R_P0_TXPW_RSTB, rst_mask0, 1);
- rtw89_phy_write32_mask(rtwdev, R_P0_TXPW_RSTB, rst_mask0, 3);
- } else {
- rtw89_phy_write32_mask(rtwdev, R_P1_TXPW_RSTB, rst_mask1, 1);
- rtw89_phy_write32_mask(rtwdev, R_P1_TXPW_RSTB, rst_mask1, 3);
- }
-}
-
-static void rtw8852b_bb_ctrl_rf_mode_rx_path(struct rtw89_dev *rtwdev,
- enum rtw89_rf_path_bit rx_path)
-{
- if (rx_path == RF_A) {
- rtw89_phy_write32_mask(rtwdev, R_P0_RFMODE,
- B_P0_RFMODE_ORI_TXRX_FTM_TX, 0x1233312);
- rtw89_phy_write32_mask(rtwdev, R_P0_RFMODE_FTM_RX,
- B_P0_RFMODE_FTM_RX, 0x333);
- rtw89_phy_write32_mask(rtwdev, R_P1_RFMODE,
- B_P1_RFMODE_ORI_TXRX_FTM_TX, 0x1111111);
- rtw89_phy_write32_mask(rtwdev, R_P1_RFMODE_FTM_RX,
- B_P1_RFMODE_FTM_RX, 0x111);
- } else if (rx_path == RF_B) {
- rtw89_phy_write32_mask(rtwdev, R_P0_RFMODE,
- B_P0_RFMODE_ORI_TXRX_FTM_TX, 0x1111111);
- rtw89_phy_write32_mask(rtwdev, R_P0_RFMODE_FTM_RX,
- B_P0_RFMODE_FTM_RX, 0x111);
- rtw89_phy_write32_mask(rtwdev, R_P1_RFMODE,
- B_P1_RFMODE_ORI_TXRX_FTM_TX, 0x1233312);
- rtw89_phy_write32_mask(rtwdev, R_P1_RFMODE_FTM_RX,
- B_P1_RFMODE_FTM_RX, 0x333);
- } else if (rx_path == RF_AB) {
- rtw89_phy_write32_mask(rtwdev, R_P0_RFMODE,
- B_P0_RFMODE_ORI_TXRX_FTM_TX, 0x1233312);
- rtw89_phy_write32_mask(rtwdev, R_P0_RFMODE_FTM_RX,
- B_P0_RFMODE_FTM_RX, 0x333);
- rtw89_phy_write32_mask(rtwdev, R_P1_RFMODE,
- B_P1_RFMODE_ORI_TXRX_FTM_TX, 0x1233312);
- rtw89_phy_write32_mask(rtwdev, R_P1_RFMODE_FTM_RX,
- B_P1_RFMODE_FTM_RX, 0x333);
- }
-}
-
-static void rtw8852b_bb_cfg_txrx_path(struct rtw89_dev *rtwdev)
-{
- struct rtw89_hal *hal = &rtwdev->hal;
- enum rtw89_rf_path_bit rx_path = hal->antenna_rx ? hal->antenna_rx : RF_AB;
-
- rtw8852b_bb_ctrl_rx_path(rtwdev, rx_path);
- rtw8852b_bb_ctrl_rf_mode_rx_path(rtwdev, rx_path);
-
- if (rtwdev->hal.rx_nss == 1) {
- rtw89_phy_write32_mask(rtwdev, R_RXHT_MCS_LIMIT, B_RXHT_MCS_LIMIT, 0);
- rtw89_phy_write32_mask(rtwdev, R_RXVHT_MCS_LIMIT, B_RXVHT_MCS_LIMIT, 0);
- rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_MAX_NSS, 0);
- rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHETB_MAX_NSS, 0);
- } else {
- rtw89_phy_write32_mask(rtwdev, R_RXHT_MCS_LIMIT, B_RXHT_MCS_LIMIT, 1);
- rtw89_phy_write32_mask(rtwdev, R_RXVHT_MCS_LIMIT, B_RXVHT_MCS_LIMIT, 1);
- rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_MAX_NSS, 1);
- rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHETB_MAX_NSS, 1);
- }
-
- rtw89_phy_write32_idx(rtwdev, R_MAC_SEL, B_MAC_SEL_MOD, 0x0, RTW89_PHY_0);
-}
-
-static u8 rtw8852b_get_thermal(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path)
-{
- if (rtwdev->is_tssi_mode[rf_path]) {
- u32 addr = 0x1c10 + (rf_path << 13);
-
- return rtw89_phy_read32_mask(rtwdev, addr, 0x3F000000);
- }
-
- rtw89_write_rf(rtwdev, rf_path, RR_TM, RR_TM_TRI, 0x1);
- rtw89_write_rf(rtwdev, rf_path, RR_TM, RR_TM_TRI, 0x0);
- rtw89_write_rf(rtwdev, rf_path, RR_TM, RR_TM_TRI, 0x1);
-
- fsleep(200);
-
- return rtw89_read_rf(rtwdev, rf_path, RR_TM, RR_TM_VAL);
-}
-
static void rtw8852b_btc_set_rfe(struct rtw89_dev *rtwdev)
{
const struct rtw89_btc_ver *ver = rtwdev->btc.ver;
@@ -2190,86 +633,6 @@ static void rtw8852b_btc_set_rfe(struct rtw89_dev *rtwdev)
}
}
-static
-void rtw8852b_set_trx_mask(struct rtw89_dev *rtwdev, u8 path, u8 group, u32 val)
-{
- rtw89_write_rf(rtwdev, path, RR_LUTWE, RFREG_MASK, 0x20000);
- rtw89_write_rf(rtwdev, path, RR_LUTWA, RFREG_MASK, group);
- rtw89_write_rf(rtwdev, path, RR_LUTWD0, RFREG_MASK, val);
- rtw89_write_rf(rtwdev, path, RR_LUTWE, RFREG_MASK, 0x0);
-}
-
-static void rtw8852b_btc_init_cfg(struct rtw89_dev *rtwdev)
-{
- struct rtw89_btc *btc = &rtwdev->btc;
- const struct rtw89_chip_info *chip = rtwdev->chip;
- const struct rtw89_mac_ax_coex coex_params = {
- .pta_mode = RTW89_MAC_AX_COEX_RTK_MODE,
- .direction = RTW89_MAC_AX_COEX_INNER,
- };
-
- /* PTA init */
- rtw89_mac_coex_init(rtwdev, &coex_params);
-
- /* set WL Tx response = Hi-Pri */
- chip->ops->btc_set_wl_pri(rtwdev, BTC_PRI_MASK_TX_RESP, true);
- chip->ops->btc_set_wl_pri(rtwdev, BTC_PRI_MASK_BEACON, true);
-
- /* set rf gnt debug off */
- rtw89_write_rf(rtwdev, RF_PATH_A, RR_WLSEL, RFREG_MASK, 0x0);
- rtw89_write_rf(rtwdev, RF_PATH_B, RR_WLSEL, RFREG_MASK, 0x0);
-
- /* set WL Tx thru in TRX mask table if GNT_WL = 0 && BT_S1 = ss group */
- if (btc->ant_type == BTC_ANT_SHARED) {
- rtw8852b_set_trx_mask(rtwdev, RF_PATH_A, BTC_BT_SS_GROUP, 0x5ff);
- rtw8852b_set_trx_mask(rtwdev, RF_PATH_B, BTC_BT_SS_GROUP, 0x5ff);
- /* set path-A(S0) Tx/Rx no-mask if GNT_WL=0 && BT_S1=tx group */
- rtw8852b_set_trx_mask(rtwdev, RF_PATH_A, BTC_BT_TX_GROUP, 0x5ff);
- rtw8852b_set_trx_mask(rtwdev, RF_PATH_B, BTC_BT_TX_GROUP, 0x55f);
- } else { /* set WL Tx stb if GNT_WL = 0 && BT_S1 = ss group for 3-ant */
- rtw8852b_set_trx_mask(rtwdev, RF_PATH_A, BTC_BT_SS_GROUP, 0x5df);
- rtw8852b_set_trx_mask(rtwdev, RF_PATH_B, BTC_BT_SS_GROUP, 0x5df);
- rtw8852b_set_trx_mask(rtwdev, RF_PATH_A, BTC_BT_TX_GROUP, 0x5ff);
- rtw8852b_set_trx_mask(rtwdev, RF_PATH_B, BTC_BT_TX_GROUP, 0x5ff);
- }
-
- /* set PTA break table */
- rtw89_write32(rtwdev, R_BTC_BREAK_TABLE, BTC_BREAK_PARAM);
-
- /* enable BT counter 0xda40[16,2] = 2b'11 */
- rtw89_write32_set(rtwdev, R_AX_CSR_MODE, B_AX_BT_CNT_RST | B_AX_STATIS_BT_EN);
- btc->cx.wl.status.map.init_ok = true;
-}
-
-static
-void rtw8852b_btc_set_wl_pri(struct rtw89_dev *rtwdev, u8 map, bool state)
-{
- u32 bitmap;
- u32 reg;
-
- switch (map) {
- case BTC_PRI_MASK_TX_RESP:
- reg = R_BTC_BT_COEX_MSK_TABLE;
- bitmap = B_BTC_PRI_MASK_TX_RESP_V1;
- break;
- case BTC_PRI_MASK_BEACON:
- reg = R_AX_WL_PRI_MSK;
- bitmap = B_AX_PTA_WL_PRI_MASK_BCNQ;
- break;
- case BTC_PRI_MASK_RX_CCK:
- reg = R_BTC_BT_COEX_MSK_TABLE;
- bitmap = B_BTC_PRI_MASK_RXCCK_V1;
- break;
- default:
- return;
- }
-
- if (state)
- rtw89_write32_set(rtwdev, reg, bitmap);
- else
- rtw89_write32_clr(rtwdev, reg, bitmap);
-}
-
union rtw8852b_btc_wl_txpwr_ctrl {
u32 txpwr_val;
struct {
@@ -2337,186 +700,19 @@ do { \
#undef __write_ctrl
}
-static
-s8 rtw8852b_btc_get_bt_rssi(struct rtw89_dev *rtwdev, s8 val)
-{
- /* +6 for compensate offset */
- return clamp_t(s8, val + 6, -100, 0) + 100;
-}
-
-static
-void rtw8852b_btc_update_bt_cnt(struct rtw89_dev *rtwdev)
-{
- /* Feature move to firmware */
-}
-
-static void rtw8852b_btc_wl_s1_standby(struct rtw89_dev *rtwdev, bool state)
-{
- rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x80000);
- rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x1);
- rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD1, RFREG_MASK, 0x31);
-
- /* set WL standby = Rx for GNT_BT_Tx = 1->0 settle issue */
- if (state)
- rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x179);
- else
- rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x20);
-
- rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x0);
-}
-
-static void rtw8852b_btc_set_wl_lna2(struct rtw89_dev *rtwdev, u8 level)
-{
- switch (level) {
- case 0: /* default */
- rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x1000);
- rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x0);
- rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x15);
- rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x1);
- rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x17);
- rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x2);
- rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x15);
- rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x3);
- rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x17);
- rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x0);
- break;
- case 1: /* Fix LNA2=5 */
- rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x1000);
- rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x0);
- rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x15);
- rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x1);
- rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x5);
- rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x2);
- rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x15);
- rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x3);
- rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x5);
- rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x0);
- break;
- }
-}
-
-static void rtw8852b_btc_set_wl_rx_gain(struct rtw89_dev *rtwdev, u32 level)
-{
- struct rtw89_btc *btc = &rtwdev->btc;
-
- switch (level) {
- case 0: /* original */
- default:
- rtw8852b_ctrl_nbtg_bt_tx(rtwdev, false, RTW89_PHY_0);
- btc->dm.wl_lna2 = 0;
- break;
- case 1: /* for FDD free-run */
- rtw8852b_ctrl_nbtg_bt_tx(rtwdev, true, RTW89_PHY_0);
- btc->dm.wl_lna2 = 0;
- break;
- case 2: /* for BTG Co-Rx*/
- rtw8852b_ctrl_nbtg_bt_tx(rtwdev, false, RTW89_PHY_0);
- btc->dm.wl_lna2 = 1;
- break;
- }
-
- rtw8852b_btc_set_wl_lna2(rtwdev, btc->dm.wl_lna2);
-}
-
-static void rtw8852b_fill_freq_with_ppdu(struct rtw89_dev *rtwdev,
- struct rtw89_rx_phy_ppdu *phy_ppdu,
- struct ieee80211_rx_status *status)
-{
- u16 chan = phy_ppdu->chan_idx;
- enum nl80211_band band;
- u8 ch;
-
- if (chan == 0)
- return;
-
- rtw89_decode_chan_idx(rtwdev, chan, &ch, &band);
- status->freq = ieee80211_channel_to_frequency(ch, band);
- status->band = band;
-}
-
-static void rtw8852b_query_ppdu(struct rtw89_dev *rtwdev,
- struct rtw89_rx_phy_ppdu *phy_ppdu,
- struct ieee80211_rx_status *status)
-{
- u8 path;
- u8 *rx_power = phy_ppdu->rssi;
-
- status->signal = RTW89_RSSI_RAW_TO_DBM(max(rx_power[RF_PATH_A], rx_power[RF_PATH_B]));
- for (path = 0; path < rtwdev->chip->rf_path_num; path++) {
- status->chains |= BIT(path);
- status->chain_signal[path] = RTW89_RSSI_RAW_TO_DBM(rx_power[path]);
- }
- if (phy_ppdu->valid)
- rtw8852b_fill_freq_with_ppdu(rtwdev, phy_ppdu, status);
-}
-
-static int rtw8852b_mac_enable_bb_rf(struct rtw89_dev *rtwdev)
-{
- int ret;
-
- rtw89_write8_set(rtwdev, R_AX_SYS_FUNC_EN,
- B_AX_FEN_BBRSTB | B_AX_FEN_BB_GLB_RSTN);
- rtw89_write32_mask(rtwdev, R_AX_SPS_DIG_ON_CTRL0, B_AX_REG_ZCDC_H_MASK, 0x1);
- rtw89_write32_set(rtwdev, R_AX_WLRF_CTRL, B_AX_AFC_AFEDIG);
- rtw89_write32_clr(rtwdev, R_AX_WLRF_CTRL, B_AX_AFC_AFEDIG);
- rtw89_write32_set(rtwdev, R_AX_WLRF_CTRL, B_AX_AFC_AFEDIG);
-
- ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_WL_RFC_S0, 0xC7,
- FULL_BIT_MASK);
- if (ret)
- return ret;
-
- ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_WL_RFC_S1, 0xC7,
- FULL_BIT_MASK);
- if (ret)
- return ret;
-
- rtw89_write8(rtwdev, R_AX_PHYREG_SET, PHYREG_SET_XYN_CYCLE);
-
- return 0;
-}
-
-static int rtw8852b_mac_disable_bb_rf(struct rtw89_dev *rtwdev)
-{
- u8 wl_rfc_s0;
- u8 wl_rfc_s1;
- int ret;
-
- rtw89_write32_clr(rtwdev, R_AX_WLRF_CTRL, B_AX_AFC_AFEDIG);
- rtw89_write8_clr(rtwdev, R_AX_SYS_FUNC_EN,
- B_AX_FEN_BBRSTB | B_AX_FEN_BB_GLB_RSTN);
-
- ret = rtw89_mac_read_xtal_si(rtwdev, XTAL_SI_WL_RFC_S0, &wl_rfc_s0);
- if (ret)
- return ret;
- wl_rfc_s0 &= ~XTAL_SI_RF00S_EN;
- ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_WL_RFC_S0, wl_rfc_s0,
- FULL_BIT_MASK);
- if (ret)
- return ret;
-
- ret = rtw89_mac_read_xtal_si(rtwdev, XTAL_SI_WL_RFC_S1, &wl_rfc_s1);
- if (ret)
- return ret;
- wl_rfc_s1 &= ~XTAL_SI_RF10S_EN;
- ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_WL_RFC_S1, wl_rfc_s1,
- FULL_BIT_MASK);
- return ret;
-}
-
static const struct rtw89_chip_ops rtw8852b_chip_ops = {
- .enable_bb_rf = rtw8852b_mac_enable_bb_rf,
- .disable_bb_rf = rtw8852b_mac_disable_bb_rf,
+ .enable_bb_rf = rtw8852bx_mac_enable_bb_rf,
+ .disable_bb_rf = rtw8852bx_mac_disable_bb_rf,
.bb_preinit = NULL,
.bb_postinit = NULL,
.bb_reset = rtw8852b_bb_reset,
- .bb_sethw = rtw8852b_bb_sethw,
+ .bb_sethw = rtw8852bx_bb_sethw,
.read_rf = rtw89_phy_read_rf_v1,
.write_rf = rtw89_phy_write_rf_v1,
.set_channel = rtw8852b_set_channel,
.set_channel_help = rtw8852b_set_channel_help,
- .read_efuse = rtw8852b_read_efuse,
- .read_phycap = rtw8852b_read_phycap,
+ .read_efuse = rtw8852bx_read_efuse,
+ .read_phycap = rtw8852bx_read_phycap,
.fem_setup = NULL,
.rfe_gpio = NULL,
.rfk_hw_init = NULL,
@@ -2526,16 +722,16 @@ static const struct rtw89_chip_ops rtw8852b_chip_ops = {
.rfk_band_changed = rtw8852b_rfk_band_changed,
.rfk_scan = rtw8852b_rfk_scan,
.rfk_track = rtw8852b_rfk_track,
- .power_trim = rtw8852b_power_trim,
- .set_txpwr = rtw8852b_set_txpwr,
- .set_txpwr_ctrl = rtw8852b_set_txpwr_ctrl,
- .init_txpwr_unit = rtw8852b_init_txpwr_unit,
- .get_thermal = rtw8852b_get_thermal,
- .ctrl_btg_bt_rx = rtw8852b_ctrl_btg_bt_rx,
- .query_ppdu = rtw8852b_query_ppdu,
- .ctrl_nbtg_bt_tx = rtw8852b_ctrl_nbtg_bt_tx,
- .cfg_txrx_path = rtw8852b_bb_cfg_txrx_path,
- .set_txpwr_ul_tb_offset = rtw8852b_set_txpwr_ul_tb_offset,
+ .power_trim = rtw8852bx_power_trim,
+ .set_txpwr = rtw8852bx_set_txpwr,
+ .set_txpwr_ctrl = rtw8852bx_set_txpwr_ctrl,
+ .init_txpwr_unit = rtw8852bx_init_txpwr_unit,
+ .get_thermal = rtw8852bx_get_thermal,
+ .ctrl_btg_bt_rx = rtw8852bx_ctrl_btg_bt_rx,
+ .query_ppdu = rtw8852bx_query_ppdu,
+ .ctrl_nbtg_bt_tx = rtw8852bx_ctrl_nbtg_bt_tx,
+ .cfg_txrx_path = rtw8852bx_bb_cfg_txrx_path,
+ .set_txpwr_ul_tb_offset = rtw8852bx_set_txpwr_ul_tb_offset,
.pwr_on_func = rtw8852b_pwr_on_func,
.pwr_off_func = rtw8852b_pwr_off_func,
.query_rxdesc = rtw89_core_query_rxdesc,
@@ -2554,13 +750,13 @@ static const struct rtw89_chip_ops rtw8852b_chip_ops = {
.h2c_ba_cam = rtw89_fw_h2c_ba_cam,
.btc_set_rfe = rtw8852b_btc_set_rfe,
- .btc_init_cfg = rtw8852b_btc_init_cfg,
- .btc_set_wl_pri = rtw8852b_btc_set_wl_pri,
+ .btc_init_cfg = rtw8852bx_btc_init_cfg,
+ .btc_set_wl_pri = rtw8852bx_btc_set_wl_pri,
.btc_set_wl_txpwr_ctrl = rtw8852b_btc_set_wl_txpwr_ctrl,
- .btc_get_bt_rssi = rtw8852b_btc_get_bt_rssi,
- .btc_update_bt_cnt = rtw8852b_btc_update_bt_cnt,
- .btc_wl_s1_standby = rtw8852b_btc_wl_s1_standby,
- .btc_set_wl_rx_gain = rtw8852b_btc_set_wl_rx_gain,
+ .btc_get_bt_rssi = rtw8852bx_btc_get_bt_rssi,
+ .btc_update_bt_cnt = rtw8852bx_btc_update_bt_cnt,
+ .btc_wl_s1_standby = rtw8852bx_btc_wl_s1_standby,
+ .btc_set_wl_rx_gain = rtw8852bx_btc_set_wl_rx_gain,
.btc_set_policy = rtw89_btc_set_policy_v1,
};
@@ -2587,7 +783,7 @@ const struct rtw89_chip_info rtw8852b_chip_info = {
.fifo_size = 196608,
.small_fifo_size = true,
.dle_scc_rsvd_size = 98304,
- .max_amsdu_limit = 3500,
+ .max_amsdu_limit = 5000,
.dis_2g_40m_ul_ofdma = true,
.rsvd_ple_ofst = 0x2f800,
.hfc_param_ini = rtw8852b_hfc_param_ini_pcie,
@@ -2610,6 +806,7 @@ const struct rtw89_chip_info rtw8852b_chip_info = {
.dig_table = NULL,
.dig_regs = &rtw8852b_dig_regs,
.tssi_dbw_table = NULL,
+ .support_macid_num = RTW89_MAX_MAC_ID_NUM,
.support_chanctx_num = 0,
.support_rnr = false,
.support_bands = BIT(NL80211_BAND_2GHZ) |
@@ -2672,7 +869,7 @@ const struct rtw89_chip_info rtw8852b_chip_info = {
.c2h_counter_reg = {R_AX_UDM1 + 1, B_AX_UDM1_HALMAC_C2H_ENQ_CNT_MASK >> 8},
.c2h_regs = rtw8852b_c2h_regs,
.page_regs = &rtw8852b_page_regs,
- .wow_reason_reg = R_AX_C2HREG_DATA3 + 3,
+ .wow_reason_reg = rtw8852b_wow_wakeup_regs,
.cfo_src_fd = true,
.cfo_hw_comp = true,
.dcfo_comp = &rtw8852b_dcfo_comp,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b.h b/drivers/net/wireless/realtek/rtw89/rtw8852b.h
index 4f9b3d476879..5ec7180fd355 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852b.h
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852b.h
@@ -10,128 +10,6 @@
#define RF_PATH_NUM_8852B 2
#define BB_PATH_NUM_8852B 2
-enum rtw8852b_pmac_mode {
- NONE_TEST,
- PKTS_TX,
- PKTS_RX,
- CONT_TX
-};
-
-struct rtw8852b_u_efuse {
- u8 rsvd[0x88];
- u8 mac_addr[ETH_ALEN];
-};
-
-struct rtw8852b_e_efuse {
- u8 mac_addr[ETH_ALEN];
-};
-
-struct rtw8852b_tssi_offset {
- u8 cck_tssi[TSSI_CCK_CH_GROUP_NUM];
- u8 bw40_tssi[TSSI_MCS_2G_CH_GROUP_NUM];
- u8 rsvd[7];
- u8 bw40_1s_tssi_5g[TSSI_MCS_5G_CH_GROUP_NUM];
-} __packed;
-
-struct rtw8852b_efuse {
- u8 rsvd[0x210];
- struct rtw8852b_tssi_offset path_a_tssi;
- u8 rsvd1[10];
- struct rtw8852b_tssi_offset path_b_tssi;
- u8 rsvd2[94];
- u8 channel_plan;
- u8 xtal_k;
- u8 rsvd3;
- u8 iqk_lck;
- u8 rsvd4[5];
- u8 reg_setting:2;
- u8 tx_diversity:1;
- u8 rx_diversity:2;
- u8 ac_mode:1;
- u8 module_type:2;
- u8 rsvd5;
- u8 shared_ant:1;
- u8 coex_type:3;
- u8 ant_iso:1;
- u8 radio_on_off:1;
- u8 rsvd6:2;
- u8 eeprom_version;
- u8 customer_id;
- u8 tx_bb_swing_2g;
- u8 tx_bb_swing_5g;
- u8 tx_cali_pwr_trk_mode;
- u8 trx_path_selection;
- u8 rfe_type;
- u8 country_code[2];
- u8 rsvd7[3];
- u8 path_a_therm;
- u8 path_b_therm;
- u8 rsvd8[2];
- u8 rx_gain_2g_ofdm;
- u8 rsvd9;
- u8 rx_gain_2g_cck;
- u8 rsvd10;
- u8 rx_gain_5g_low;
- u8 rsvd11;
- u8 rx_gain_5g_mid;
- u8 rsvd12;
- u8 rx_gain_5g_high;
- u8 rsvd13[35];
- u8 path_a_cck_pwr_idx[6];
- u8 path_a_bw40_1tx_pwr_idx[5];
- u8 path_a_ofdm_1tx_pwr_idx_diff:4;
- u8 path_a_bw20_1tx_pwr_idx_diff:4;
- u8 path_a_bw20_2tx_pwr_idx_diff:4;
- u8 path_a_bw40_2tx_pwr_idx_diff:4;
- u8 path_a_cck_2tx_pwr_idx_diff:4;
- u8 path_a_ofdm_2tx_pwr_idx_diff:4;
- u8 rsvd14[0xf2];
- union {
- struct rtw8852b_u_efuse u;
- struct rtw8852b_e_efuse e;
- };
-} __packed;
-
-struct rtw8852b_bb_pmac_info {
- u8 en_pmac_tx:1;
- u8 is_cck:1;
- u8 mode:3;
- u8 rsvd:3;
- u16 tx_cnt;
- u16 period;
- u16 tx_time;
- u8 duty_cycle;
-};
-
-struct rtw8852b_bb_tssi_bak {
- u8 tx_path;
- u8 rx_path;
- u32 p0_rfmode;
- u32 p0_rfmode_ftm;
- u32 p1_rfmode;
- u32 p1_rfmode_ftm;
- s16 tx_pwr; /* S9 */
-};
-
extern const struct rtw89_chip_info rtw8852b_chip_info;
-void rtw8852b_bb_set_plcp_tx(struct rtw89_dev *rtwdev);
-void rtw8852b_bb_set_pmac_tx(struct rtw89_dev *rtwdev,
- struct rtw8852b_bb_pmac_info *tx_info,
- enum rtw89_phy_idx idx);
-void rtw8852b_bb_set_pmac_pkt_tx(struct rtw89_dev *rtwdev, u8 enable,
- u16 tx_cnt, u16 period, u16 tx_time,
- enum rtw89_phy_idx idx);
-void rtw8852b_bb_set_power(struct rtw89_dev *rtwdev, s16 pwr_dbm,
- enum rtw89_phy_idx idx);
-void rtw8852b_bb_cfg_tx_path(struct rtw89_dev *rtwdev, u8 tx_path);
-void rtw8852b_bb_ctrl_rx_path(struct rtw89_dev *rtwdev,
- enum rtw89_rf_path_bit rx_path);
-void rtw8852b_bb_tx_mode_switch(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx idx, u8 mode);
-void rtw8852b_bb_backup_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx idx,
- struct rtw8852b_bb_tssi_bak *bak);
-void rtw8852b_bb_restore_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx idx,
- const struct rtw8852b_bb_tssi_bak *bak);
-
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b_common.c b/drivers/net/wireless/realtek/rtw89/rtw8852b_common.c
new file mode 100644
index 000000000000..1745c2882acf
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852b_common.c
@@ -0,0 +1,2053 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2024 Realtek Corporation
+ */
+
+#include "coex.h"
+#include "debug.h"
+#include "mac.h"
+#include "phy.h"
+#include "reg.h"
+#include "rtw8852b_common.h"
+#include "util.h"
+
+static const struct rtw89_reg3_def rtw8852bx_pmac_ht20_mcs7_tbl[] = {
+ {0x4580, 0x0000ffff, 0x0},
+ {0x4580, 0xffff0000, 0x0},
+ {0x4584, 0x0000ffff, 0x0},
+ {0x4584, 0xffff0000, 0x0},
+ {0x4580, 0x0000ffff, 0x1},
+ {0x4578, 0x00ffffff, 0x2018b},
+ {0x4570, 0x03ffffff, 0x7},
+ {0x4574, 0x03ffffff, 0x32407},
+ {0x45b8, 0x00000010, 0x0},
+ {0x45b8, 0x00000100, 0x0},
+ {0x45b8, 0x00000080, 0x0},
+ {0x45b8, 0x00000008, 0x0},
+ {0x45a0, 0x0000ff00, 0x0},
+ {0x45a0, 0xff000000, 0x1},
+ {0x45a4, 0x0000ff00, 0x2},
+ {0x45a4, 0xff000000, 0x3},
+ {0x45b8, 0x00000020, 0x0},
+ {0x4568, 0xe0000000, 0x0},
+ {0x45b8, 0x00000002, 0x1},
+ {0x456c, 0xe0000000, 0x0},
+ {0x45b4, 0x00006000, 0x0},
+ {0x45b4, 0x00001800, 0x1},
+ {0x45b8, 0x00000040, 0x0},
+ {0x45b8, 0x00000004, 0x0},
+ {0x45b8, 0x00000200, 0x0},
+ {0x4598, 0xf8000000, 0x0},
+ {0x45b8, 0x00100000, 0x0},
+ {0x45a8, 0x00000fc0, 0x0},
+ {0x45b8, 0x00200000, 0x0},
+ {0x45b0, 0x00000038, 0x0},
+ {0x45b0, 0x000001c0, 0x0},
+ {0x45a0, 0x000000ff, 0x0},
+ {0x45b8, 0x00400000, 0x0},
+ {0x4590, 0x000007ff, 0x0},
+ {0x45b0, 0x00000e00, 0x0},
+ {0x45ac, 0x0000001f, 0x0},
+ {0x45b8, 0x00800000, 0x0},
+ {0x45a8, 0x0003f000, 0x0},
+ {0x45b8, 0x01000000, 0x0},
+ {0x45b0, 0x00007000, 0x0},
+ {0x45b0, 0x00038000, 0x0},
+ {0x45a0, 0x00ff0000, 0x0},
+ {0x45b8, 0x02000000, 0x0},
+ {0x4590, 0x003ff800, 0x0},
+ {0x45b0, 0x001c0000, 0x0},
+ {0x45ac, 0x000003e0, 0x0},
+ {0x45b8, 0x04000000, 0x0},
+ {0x45a8, 0x00fc0000, 0x0},
+ {0x45b8, 0x08000000, 0x0},
+ {0x45b0, 0x00e00000, 0x0},
+ {0x45b0, 0x07000000, 0x0},
+ {0x45a4, 0x000000ff, 0x0},
+ {0x45b8, 0x10000000, 0x0},
+ {0x4594, 0x000007ff, 0x0},
+ {0x45b0, 0x38000000, 0x0},
+ {0x45ac, 0x00007c00, 0x0},
+ {0x45b8, 0x20000000, 0x0},
+ {0x45a8, 0x3f000000, 0x0},
+ {0x45b8, 0x40000000, 0x0},
+ {0x45b4, 0x00000007, 0x0},
+ {0x45b4, 0x00000038, 0x0},
+ {0x45a4, 0x00ff0000, 0x0},
+ {0x45b8, 0x80000000, 0x0},
+ {0x4594, 0x003ff800, 0x0},
+ {0x45b4, 0x000001c0, 0x0},
+ {0x4598, 0xf8000000, 0x0},
+ {0x45b8, 0x00100000, 0x0},
+ {0x45a8, 0x00000fc0, 0x7},
+ {0x45b8, 0x00200000, 0x0},
+ {0x45b0, 0x00000038, 0x0},
+ {0x45b0, 0x000001c0, 0x0},
+ {0x45a0, 0x000000ff, 0x0},
+ {0x45b4, 0x06000000, 0x0},
+ {0x45b0, 0x00000007, 0x0},
+ {0x45b8, 0x00080000, 0x0},
+ {0x45a8, 0x0000003f, 0x0},
+ {0x457c, 0xffe00000, 0x1},
+ {0x4530, 0xffffffff, 0x0},
+ {0x4588, 0x00003fff, 0x0},
+ {0x4598, 0x000001ff, 0x0},
+ {0x4534, 0xffffffff, 0x0},
+ {0x4538, 0xffffffff, 0x0},
+ {0x453c, 0xffffffff, 0x0},
+ {0x4588, 0x0fffc000, 0x0},
+ {0x4598, 0x0003fe00, 0x0},
+ {0x4540, 0xffffffff, 0x0},
+ {0x4544, 0xffffffff, 0x0},
+ {0x4548, 0xffffffff, 0x0},
+ {0x458c, 0x00003fff, 0x0},
+ {0x4598, 0x07fc0000, 0x0},
+ {0x454c, 0xffffffff, 0x0},
+ {0x4550, 0xffffffff, 0x0},
+ {0x4554, 0xffffffff, 0x0},
+ {0x458c, 0x0fffc000, 0x0},
+ {0x459c, 0x000001ff, 0x0},
+ {0x4558, 0xffffffff, 0x0},
+ {0x455c, 0xffffffff, 0x0},
+ {0x4530, 0xffffffff, 0x4e790001},
+ {0x4588, 0x00003fff, 0x0},
+ {0x4598, 0x000001ff, 0x1},
+ {0x4534, 0xffffffff, 0x0},
+ {0x4538, 0xffffffff, 0x4b},
+ {0x45ac, 0x38000000, 0x7},
+ {0x4588, 0xf0000000, 0x0},
+ {0x459c, 0x7e000000, 0x0},
+ {0x45b8, 0x00040000, 0x0},
+ {0x45b8, 0x00020000, 0x0},
+ {0x4590, 0xffc00000, 0x0},
+ {0x45b8, 0x00004000, 0x0},
+ {0x4578, 0xff000000, 0x0},
+ {0x45b8, 0x00000400, 0x0},
+ {0x45b8, 0x00000800, 0x0},
+ {0x45b8, 0x00001000, 0x0},
+ {0x45b8, 0x00002000, 0x0},
+ {0x45b4, 0x00018000, 0x0},
+ {0x45ac, 0x07800000, 0x0},
+ {0x45b4, 0x00000600, 0x2},
+ {0x459c, 0x0001fe00, 0x80},
+ {0x45ac, 0x00078000, 0x3},
+ {0x459c, 0x01fe0000, 0x1},
+};
+
+static const struct rtw89_reg3_def rtw8852bx_btc_preagc_en_defs[] = {
+ {0x46D0, GENMASK(1, 0), 0x3},
+ {0x4790, GENMASK(1, 0), 0x3},
+ {0x4AD4, GENMASK(31, 0), 0xf},
+ {0x4AE0, GENMASK(31, 0), 0xf},
+ {0x4688, GENMASK(31, 24), 0x80},
+ {0x476C, GENMASK(31, 24), 0x80},
+ {0x4694, GENMASK(7, 0), 0x80},
+ {0x4694, GENMASK(15, 8), 0x80},
+ {0x4778, GENMASK(7, 0), 0x80},
+ {0x4778, GENMASK(15, 8), 0x80},
+ {0x4AE4, GENMASK(23, 0), 0x780D1E},
+ {0x4AEC, GENMASK(23, 0), 0x780D1E},
+ {0x469C, GENMASK(31, 26), 0x34},
+ {0x49F0, GENMASK(31, 26), 0x34},
+};
+
+static DECLARE_PHY_REG3_TBL(rtw8852bx_btc_preagc_en_defs);
+
+static const struct rtw89_reg3_def rtw8852bx_btc_preagc_dis_defs[] = {
+ {0x46D0, GENMASK(1, 0), 0x0},
+ {0x4790, GENMASK(1, 0), 0x0},
+ {0x4AD4, GENMASK(31, 0), 0x60},
+ {0x4AE0, GENMASK(31, 0), 0x60},
+ {0x4688, GENMASK(31, 24), 0x1a},
+ {0x476C, GENMASK(31, 24), 0x1a},
+ {0x4694, GENMASK(7, 0), 0x2a},
+ {0x4694, GENMASK(15, 8), 0x2a},
+ {0x4778, GENMASK(7, 0), 0x2a},
+ {0x4778, GENMASK(15, 8), 0x2a},
+ {0x4AE4, GENMASK(23, 0), 0x79E99E},
+ {0x4AEC, GENMASK(23, 0), 0x79E99E},
+ {0x469C, GENMASK(31, 26), 0x26},
+ {0x49F0, GENMASK(31, 26), 0x26},
+};
+
+static DECLARE_PHY_REG3_TBL(rtw8852bx_btc_preagc_dis_defs);
+
+static void rtw8852be_efuse_parsing(struct rtw89_efuse *efuse,
+ struct rtw8852bx_efuse *map)
+{
+ ether_addr_copy(efuse->addr, map->e.mac_addr);
+ efuse->rfe_type = map->rfe_type;
+ efuse->xtal_cap = map->xtal_k;
+}
+
+static void rtw8852bx_efuse_parsing_tssi(struct rtw89_dev *rtwdev,
+ struct rtw8852bx_efuse *map)
+{
+ struct rtw89_tssi_info *tssi = &rtwdev->tssi;
+ struct rtw8852bx_tssi_offset *ofst[] = {&map->path_a_tssi, &map->path_b_tssi};
+ u8 i, j;
+
+ tssi->thermal[RF_PATH_A] = map->path_a_therm;
+ tssi->thermal[RF_PATH_B] = map->path_b_therm;
+
+ for (i = 0; i < RF_PATH_NUM_8852BX; i++) {
+ memcpy(tssi->tssi_cck[i], ofst[i]->cck_tssi,
+ sizeof(ofst[i]->cck_tssi));
+
+ for (j = 0; j < TSSI_CCK_CH_GROUP_NUM; j++)
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][EFUSE] path=%d cck[%d]=0x%x\n",
+ i, j, tssi->tssi_cck[i][j]);
+
+ memcpy(tssi->tssi_mcs[i], ofst[i]->bw40_tssi,
+ sizeof(ofst[i]->bw40_tssi));
+ memcpy(tssi->tssi_mcs[i] + TSSI_MCS_2G_CH_GROUP_NUM,
+ ofst[i]->bw40_1s_tssi_5g, sizeof(ofst[i]->bw40_1s_tssi_5g));
+
+ for (j = 0; j < TSSI_MCS_CH_GROUP_NUM; j++)
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][EFUSE] path=%d mcs[%d]=0x%x\n",
+ i, j, tssi->tssi_mcs[i][j]);
+ }
+}
+
+static bool _decode_efuse_gain(u8 data, s8 *high, s8 *low)
+{
+ if (high)
+ *high = sign_extend32(FIELD_GET(GENMASK(7, 4), data), 3);
+ if (low)
+ *low = sign_extend32(FIELD_GET(GENMASK(3, 0), data), 3);
+
+ return data != 0xff;
+}
+
+static void rtw8852bx_efuse_parsing_gain_offset(struct rtw89_dev *rtwdev,
+ struct rtw8852bx_efuse *map)
+{
+ struct rtw89_phy_efuse_gain *gain = &rtwdev->efuse_gain;
+ bool valid = false;
+
+ valid |= _decode_efuse_gain(map->rx_gain_2g_cck,
+ &gain->offset[RF_PATH_A][RTW89_GAIN_OFFSET_2G_CCK],
+ &gain->offset[RF_PATH_B][RTW89_GAIN_OFFSET_2G_CCK]);
+ valid |= _decode_efuse_gain(map->rx_gain_2g_ofdm,
+ &gain->offset[RF_PATH_A][RTW89_GAIN_OFFSET_2G_OFDM],
+ &gain->offset[RF_PATH_B][RTW89_GAIN_OFFSET_2G_OFDM]);
+ valid |= _decode_efuse_gain(map->rx_gain_5g_low,
+ &gain->offset[RF_PATH_A][RTW89_GAIN_OFFSET_5G_LOW],
+ &gain->offset[RF_PATH_B][RTW89_GAIN_OFFSET_5G_LOW]);
+ valid |= _decode_efuse_gain(map->rx_gain_5g_mid,
+ &gain->offset[RF_PATH_A][RTW89_GAIN_OFFSET_5G_MID],
+ &gain->offset[RF_PATH_B][RTW89_GAIN_OFFSET_5G_MID]);
+ valid |= _decode_efuse_gain(map->rx_gain_5g_high,
+ &gain->offset[RF_PATH_A][RTW89_GAIN_OFFSET_5G_HIGH],
+ &gain->offset[RF_PATH_B][RTW89_GAIN_OFFSET_5G_HIGH]);
+
+ gain->offset_valid = valid;
+}
+
+static int __rtw8852bx_read_efuse(struct rtw89_dev *rtwdev, u8 *log_map,
+ enum rtw89_efuse_block block)
+{
+ struct rtw89_efuse *efuse = &rtwdev->efuse;
+ struct rtw8852bx_efuse *map;
+
+ map = (struct rtw8852bx_efuse *)log_map;
+
+ efuse->country_code[0] = map->country_code[0];
+ efuse->country_code[1] = map->country_code[1];
+ rtw8852bx_efuse_parsing_tssi(rtwdev, map);
+ rtw8852bx_efuse_parsing_gain_offset(rtwdev, map);
+
+ switch (rtwdev->hci.type) {
+ case RTW89_HCI_TYPE_PCIE:
+ rtw8852be_efuse_parsing(efuse, map);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ rtw89_info(rtwdev, "chip rfe_type is %d\n", efuse->rfe_type);
+
+ return 0;
+}
+
+static void rtw8852bx_phycap_parsing_power_cal(struct rtw89_dev *rtwdev, u8 *phycap_map)
+{
+#define PWR_K_CHK_OFFSET 0x5E9
+#define PWR_K_CHK_VALUE 0xAA
+ u32 offset = PWR_K_CHK_OFFSET - rtwdev->chip->phycap_addr;
+
+ if (phycap_map[offset] == PWR_K_CHK_VALUE)
+ rtwdev->efuse.power_k_valid = true;
+}
+
+static void rtw8852bx_phycap_parsing_tssi(struct rtw89_dev *rtwdev, u8 *phycap_map)
+{
+ struct rtw89_tssi_info *tssi = &rtwdev->tssi;
+ static const u32 tssi_trim_addr[RF_PATH_NUM_8852BX] = {0x5D6, 0x5AB};
+ u32 addr = rtwdev->chip->phycap_addr;
+ bool pg = false;
+ u32 ofst;
+ u8 i, j;
+
+ for (i = 0; i < RF_PATH_NUM_8852BX; i++) {
+ for (j = 0; j < TSSI_TRIM_CH_GROUP_NUM; j++) {
+ /* addrs are in decreasing order */
+ ofst = tssi_trim_addr[i] - addr - j;
+ tssi->tssi_trim[i][j] = phycap_map[ofst];
+
+ if (phycap_map[ofst] != 0xff)
+ pg = true;
+ }
+ }
+
+ if (!pg) {
+ memset(tssi->tssi_trim, 0, sizeof(tssi->tssi_trim));
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM] no PG, set all trim info to 0\n");
+ }
+
+ for (i = 0; i < RF_PATH_NUM_8852BX; i++)
+ for (j = 0; j < TSSI_TRIM_CH_GROUP_NUM; j++)
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI] path=%d idx=%d trim=0x%x addr=0x%x\n",
+ i, j, tssi->tssi_trim[i][j],
+ tssi_trim_addr[i] - j);
+}
+
+static void rtw8852bx_phycap_parsing_thermal_trim(struct rtw89_dev *rtwdev,
+ u8 *phycap_map)
+{
+ struct rtw89_power_trim_info *info = &rtwdev->pwr_trim;
+ static const u32 thm_trim_addr[RF_PATH_NUM_8852BX] = {0x5DF, 0x5DC};
+ u32 addr = rtwdev->chip->phycap_addr;
+ u8 i;
+
+ for (i = 0; i < RF_PATH_NUM_8852BX; i++) {
+ info->thermal_trim[i] = phycap_map[thm_trim_addr[i] - addr];
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[THERMAL][TRIM] path=%d thermal_trim=0x%x\n",
+ i, info->thermal_trim[i]);
+
+ if (info->thermal_trim[i] != 0xff)
+ info->pg_thermal_trim = true;
+ }
+}
+
+static void rtw8852bx_thermal_trim(struct rtw89_dev *rtwdev)
+{
+#define __thm_setting(raw) \
+({ \
+ u8 __v = (raw); \
+ ((__v & 0x1) << 3) | ((__v & 0x1f) >> 1); \
+})
+ struct rtw89_power_trim_info *info = &rtwdev->pwr_trim;
+ u8 i, val;
+
+ if (!info->pg_thermal_trim) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[THERMAL][TRIM] no PG, do nothing\n");
+
+ return;
+ }
+
+ for (i = 0; i < RF_PATH_NUM_8852BX; i++) {
+ val = __thm_setting(info->thermal_trim[i]);
+ rtw89_write_rf(rtwdev, i, RR_TM2, RR_TM2_OFF, val);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[THERMAL][TRIM] path=%d thermal_setting=0x%x\n",
+ i, val);
+ }
+#undef __thm_setting
+}
+
+static void rtw8852bx_phycap_parsing_pa_bias_trim(struct rtw89_dev *rtwdev,
+ u8 *phycap_map)
+{
+ struct rtw89_power_trim_info *info = &rtwdev->pwr_trim;
+ static const u32 pabias_trim_addr[RF_PATH_NUM_8852BX] = {0x5DE, 0x5DB};
+ u32 addr = rtwdev->chip->phycap_addr;
+ u8 i;
+
+ for (i = 0; i < RF_PATH_NUM_8852BX; i++) {
+ info->pa_bias_trim[i] = phycap_map[pabias_trim_addr[i] - addr];
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[PA_BIAS][TRIM] path=%d pa_bias_trim=0x%x\n",
+ i, info->pa_bias_trim[i]);
+
+ if (info->pa_bias_trim[i] != 0xff)
+ info->pg_pa_bias_trim = true;
+ }
+}
+
+static void rtw8852bx_pa_bias_trim(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_power_trim_info *info = &rtwdev->pwr_trim;
+ u8 pabias_2g, pabias_5g;
+ u8 i;
+
+ if (!info->pg_pa_bias_trim) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[PA_BIAS][TRIM] no PG, do nothing\n");
+
+ return;
+ }
+
+ for (i = 0; i < RF_PATH_NUM_8852BX; i++) {
+ pabias_2g = FIELD_GET(GENMASK(3, 0), info->pa_bias_trim[i]);
+ pabias_5g = FIELD_GET(GENMASK(7, 4), info->pa_bias_trim[i]);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[PA_BIAS][TRIM] path=%d 2G=0x%x 5G=0x%x\n",
+ i, pabias_2g, pabias_5g);
+
+ rtw89_write_rf(rtwdev, i, RR_BIASA, RR_BIASA_TXG, pabias_2g);
+ rtw89_write_rf(rtwdev, i, RR_BIASA, RR_BIASA_TXA, pabias_5g);
+ }
+}
+
+static void rtw8852bx_phycap_parsing_gain_comp(struct rtw89_dev *rtwdev, u8 *phycap_map)
+{
+ static const u32 comp_addrs[][RTW89_SUBBAND_2GHZ_5GHZ_NR] = {
+ {0x5BB, 0x5BA, 0, 0x5B9, 0x5B8},
+ {0x590, 0x58F, 0, 0x58E, 0x58D},
+ };
+ struct rtw89_phy_efuse_gain *gain = &rtwdev->efuse_gain;
+ u32 phycap_addr = rtwdev->chip->phycap_addr;
+ bool valid = false;
+ int path, i;
+ u8 data;
+
+ for (path = 0; path < 2; path++)
+ for (i = 0; i < RTW89_SUBBAND_2GHZ_5GHZ_NR; i++) {
+ if (comp_addrs[path][i] == 0)
+ continue;
+
+ data = phycap_map[comp_addrs[path][i] - phycap_addr];
+ valid |= _decode_efuse_gain(data, NULL,
+ &gain->comp[path][i]);
+ }
+
+ gain->comp_valid = valid;
+}
+
+static int __rtw8852bx_read_phycap(struct rtw89_dev *rtwdev, u8 *phycap_map)
+{
+ rtw8852bx_phycap_parsing_power_cal(rtwdev, phycap_map);
+ rtw8852bx_phycap_parsing_tssi(rtwdev, phycap_map);
+ rtw8852bx_phycap_parsing_thermal_trim(rtwdev, phycap_map);
+ rtw8852bx_phycap_parsing_pa_bias_trim(rtwdev, phycap_map);
+ rtw8852bx_phycap_parsing_gain_comp(rtwdev, phycap_map);
+
+ return 0;
+}
+
+static void __rtw8852bx_power_trim(struct rtw89_dev *rtwdev)
+{
+ rtw8852bx_thermal_trim(rtwdev);
+ rtw8852bx_pa_bias_trim(rtwdev);
+}
+
+static void __rtw8852bx_set_channel_mac(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ u8 mac_idx)
+{
+ u32 rf_mod = rtw89_mac_reg_by_idx(rtwdev, R_AX_WMAC_RFMOD, mac_idx);
+ u32 sub_carr = rtw89_mac_reg_by_idx(rtwdev, R_AX_TX_SUB_CARRIER_VALUE, mac_idx);
+ u32 chk_rate = rtw89_mac_reg_by_idx(rtwdev, R_AX_TXRATE_CHK, mac_idx);
+ u8 txsc20 = 0, txsc40 = 0;
+
+ switch (chan->band_width) {
+ case RTW89_CHANNEL_WIDTH_80:
+ txsc40 = rtw89_phy_get_txsc(rtwdev, chan, RTW89_CHANNEL_WIDTH_40);
+ fallthrough;
+ case RTW89_CHANNEL_WIDTH_40:
+ txsc20 = rtw89_phy_get_txsc(rtwdev, chan, RTW89_CHANNEL_WIDTH_20);
+ break;
+ default:
+ break;
+ }
+
+ switch (chan->band_width) {
+ case RTW89_CHANNEL_WIDTH_80:
+ rtw89_write8_mask(rtwdev, rf_mod, B_AX_WMAC_RFMOD_MASK, BIT(1));
+ rtw89_write32(rtwdev, sub_carr, txsc20 | (txsc40 << 4));
+ break;
+ case RTW89_CHANNEL_WIDTH_40:
+ rtw89_write8_mask(rtwdev, rf_mod, B_AX_WMAC_RFMOD_MASK, BIT(0));
+ rtw89_write32(rtwdev, sub_carr, txsc20);
+ break;
+ case RTW89_CHANNEL_WIDTH_20:
+ rtw89_write8_clr(rtwdev, rf_mod, B_AX_WMAC_RFMOD_MASK);
+ rtw89_write32(rtwdev, sub_carr, 0);
+ break;
+ default:
+ break;
+ }
+
+ if (chan->channel > 14) {
+ rtw89_write8_clr(rtwdev, chk_rate, B_AX_BAND_MODE);
+ rtw89_write8_set(rtwdev, chk_rate,
+ B_AX_CHECK_CCK_EN | B_AX_RTS_LIMIT_IN_OFDM6);
+ } else {
+ rtw89_write8_set(rtwdev, chk_rate, B_AX_BAND_MODE);
+ rtw89_write8_clr(rtwdev, chk_rate,
+ B_AX_CHECK_CCK_EN | B_AX_RTS_LIMIT_IN_OFDM6);
+ }
+}
+
+static const u32 rtw8852bx_sco_barker_threshold[14] = {
+ 0x1cfea, 0x1d0e1, 0x1d1d7, 0x1d2cd, 0x1d3c3, 0x1d4b9, 0x1d5b0, 0x1d6a6,
+ 0x1d79c, 0x1d892, 0x1d988, 0x1da7f, 0x1db75, 0x1ddc4
+};
+
+static const u32 rtw8852bx_sco_cck_threshold[14] = {
+ 0x27de3, 0x27f35, 0x28088, 0x281da, 0x2832d, 0x2847f, 0x285d2, 0x28724,
+ 0x28877, 0x289c9, 0x28b1c, 0x28c6e, 0x28dc1, 0x290ed
+};
+
+static void rtw8852bx_ctrl_sco_cck(struct rtw89_dev *rtwdev, u8 primary_ch)
+{
+ u8 ch_element = primary_ch - 1;
+
+ rtw89_phy_write32_mask(rtwdev, R_RXSCOBC, B_RXSCOBC_TH,
+ rtw8852bx_sco_barker_threshold[ch_element]);
+ rtw89_phy_write32_mask(rtwdev, R_RXSCOCCK, B_RXSCOCCK_TH,
+ rtw8852bx_sco_cck_threshold[ch_element]);
+}
+
+static u8 rtw8852bx_sco_mapping(u8 central_ch)
+{
+ if (central_ch == 1)
+ return 109;
+ else if (central_ch >= 2 && central_ch <= 6)
+ return 108;
+ else if (central_ch >= 7 && central_ch <= 10)
+ return 107;
+ else if (central_ch >= 11 && central_ch <= 14)
+ return 106;
+ else if (central_ch == 36 || central_ch == 38)
+ return 51;
+ else if (central_ch >= 40 && central_ch <= 58)
+ return 50;
+ else if (central_ch >= 60 && central_ch <= 64)
+ return 49;
+ else if (central_ch == 100 || central_ch == 102)
+ return 48;
+ else if (central_ch >= 104 && central_ch <= 126)
+ return 47;
+ else if (central_ch >= 128 && central_ch <= 151)
+ return 46;
+ else if (central_ch >= 153 && central_ch <= 177)
+ return 45;
+ else
+ return 0;
+}
+
+struct rtw8852bx_bb_gain {
+ u32 gain_g[BB_PATH_NUM_8852BX];
+ u32 gain_a[BB_PATH_NUM_8852BX];
+ u32 gain_mask;
+};
+
+static const struct rtw8852bx_bb_gain bb_gain_lna[LNA_GAIN_NUM] = {
+ { .gain_g = {0x4678, 0x475C}, .gain_a = {0x45DC, 0x4740},
+ .gain_mask = 0x00ff0000 },
+ { .gain_g = {0x4678, 0x475C}, .gain_a = {0x45DC, 0x4740},
+ .gain_mask = 0xff000000 },
+ { .gain_g = {0x467C, 0x4760}, .gain_a = {0x4660, 0x4744},
+ .gain_mask = 0x000000ff },
+ { .gain_g = {0x467C, 0x4760}, .gain_a = {0x4660, 0x4744},
+ .gain_mask = 0x0000ff00 },
+ { .gain_g = {0x467C, 0x4760}, .gain_a = {0x4660, 0x4744},
+ .gain_mask = 0x00ff0000 },
+ { .gain_g = {0x467C, 0x4760}, .gain_a = {0x4660, 0x4744},
+ .gain_mask = 0xff000000 },
+ { .gain_g = {0x4680, 0x4764}, .gain_a = {0x4664, 0x4748},
+ .gain_mask = 0x000000ff },
+};
+
+static const struct rtw8852bx_bb_gain bb_gain_tia[TIA_GAIN_NUM] = {
+ { .gain_g = {0x4680, 0x4764}, .gain_a = {0x4664, 0x4748},
+ .gain_mask = 0x00ff0000 },
+ { .gain_g = {0x4680, 0x4764}, .gain_a = {0x4664, 0x4748},
+ .gain_mask = 0xff000000 },
+};
+
+static void rtw8852bx_set_gain_error(struct rtw89_dev *rtwdev,
+ enum rtw89_subband subband,
+ enum rtw89_rf_path path)
+{
+ const struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain.ax;
+ u8 gain_band = rtw89_subband_to_bb_gain_band(subband);
+ s32 val;
+ u32 reg;
+ u32 mask;
+ int i;
+
+ for (i = 0; i < LNA_GAIN_NUM; i++) {
+ if (subband == RTW89_CH_2G)
+ reg = bb_gain_lna[i].gain_g[path];
+ else
+ reg = bb_gain_lna[i].gain_a[path];
+
+ mask = bb_gain_lna[i].gain_mask;
+ val = gain->lna_gain[gain_band][path][i];
+ rtw89_phy_write32_mask(rtwdev, reg, mask, val);
+ }
+
+ for (i = 0; i < TIA_GAIN_NUM; i++) {
+ if (subband == RTW89_CH_2G)
+ reg = bb_gain_tia[i].gain_g[path];
+ else
+ reg = bb_gain_tia[i].gain_a[path];
+
+ mask = bb_gain_tia[i].gain_mask;
+ val = gain->tia_gain[gain_band][path][i];
+ rtw89_phy_write32_mask(rtwdev, reg, mask, val);
+ }
+}
+
+static void rtw8852bt_ext_loss_avg_update(struct rtw89_dev *rtwdev,
+ s8 ext_loss_a, s8 ext_loss_b)
+{
+ s8 ext_loss_avg;
+ u64 linear;
+ u8 pwrofst;
+
+ if (ext_loss_a == ext_loss_b) {
+ ext_loss_avg = ext_loss_a;
+ } else {
+ linear = rtw89_db_2_linear(abs(ext_loss_a - ext_loss_b)) + 1;
+ linear = DIV_ROUND_CLOSEST_ULL(linear / 2, 1 << RTW89_LINEAR_FRAC_BITS);
+ ext_loss_avg = rtw89_linear_2_db(linear);
+ ext_loss_avg += min(ext_loss_a, ext_loss_b);
+ }
+
+ pwrofst = max(DIV_ROUND_CLOSEST(ext_loss_avg, 4) + 16, EDCCA_PWROFST_DEFAULT);
+
+ rtw89_phy_write32_mask(rtwdev, R_PWOFST, B_PWOFST, pwrofst);
+}
+
+static void rtw8852bx_set_gain_offset(struct rtw89_dev *rtwdev,
+ enum rtw89_subband subband,
+ enum rtw89_phy_idx phy_idx)
+{
+ static const u32 gain_err_addr[2] = {R_P0_AGC_RSVD, R_P1_AGC_RSVD};
+ static const u32 rssi_ofst_addr[2] = {R_PATH0_G_TIA1_LNA6_OP1DB_V1,
+ R_PATH1_G_TIA1_LNA6_OP1DB_V1};
+ struct rtw89_hal *hal = &rtwdev->hal;
+ struct rtw89_phy_efuse_gain *efuse_gain = &rtwdev->efuse_gain;
+ enum rtw89_gain_offset gain_ofdm_band;
+ s8 ext_loss_a = 0, ext_loss_b = 0;
+ s32 offset_a, offset_b;
+ s32 offset_ofdm, offset_cck;
+ s32 tmp;
+ u8 path;
+
+ if (!efuse_gain->comp_valid)
+ goto next;
+
+ for (path = RF_PATH_A; path < BB_PATH_NUM_8852BX; path++) {
+ tmp = efuse_gain->comp[path][subband];
+ tmp = clamp_t(s32, tmp << 2, S8_MIN, S8_MAX);
+ rtw89_phy_write32_mask(rtwdev, gain_err_addr[path], MASKBYTE0, tmp);
+ }
+
+next:
+ if (!efuse_gain->offset_valid)
+ goto ext_loss;
+
+ gain_ofdm_band = rtw89_subband_to_gain_offset_band_of_ofdm(subband);
+
+ offset_a = -efuse_gain->offset[RF_PATH_A][gain_ofdm_band];
+ offset_b = -efuse_gain->offset[RF_PATH_B][gain_ofdm_band];
+
+ tmp = -((offset_a << 2) + (efuse_gain->offset_base[RTW89_PHY_0] >> 2));
+ tmp = clamp_t(s32, tmp, S8_MIN, S8_MAX);
+ rtw89_phy_write32_mask(rtwdev, rssi_ofst_addr[RF_PATH_A], B_PATH0_R_G_OFST_MASK, tmp);
+
+ tmp = -((offset_b << 2) + (efuse_gain->offset_base[RTW89_PHY_0] >> 2));
+ tmp = clamp_t(s32, tmp, S8_MIN, S8_MAX);
+ rtw89_phy_write32_mask(rtwdev, rssi_ofst_addr[RF_PATH_B], B_PATH0_R_G_OFST_MASK, tmp);
+
+ if (hal->antenna_rx == RF_B) {
+ offset_ofdm = -efuse_gain->offset[RF_PATH_B][gain_ofdm_band];
+ offset_cck = -efuse_gain->offset[RF_PATH_B][0];
+ } else {
+ offset_ofdm = -efuse_gain->offset[RF_PATH_A][gain_ofdm_band];
+ offset_cck = -efuse_gain->offset[RF_PATH_A][0];
+ }
+
+ tmp = (offset_ofdm << 4) + efuse_gain->offset_base[RTW89_PHY_0];
+ tmp = clamp_t(s32, tmp, S8_MIN, S8_MAX);
+ rtw89_phy_write32_idx(rtwdev, R_P0_RPL1, B_P0_RPL1_BIAS_MASK, tmp, phy_idx);
+
+ tmp = (offset_ofdm << 4) + efuse_gain->rssi_base[RTW89_PHY_0];
+ tmp = clamp_t(s32, tmp, S8_MIN, S8_MAX);
+ rtw89_phy_write32_idx(rtwdev, R_P1_RPL1, B_P0_RPL1_BIAS_MASK, tmp, phy_idx);
+
+ if (subband == RTW89_CH_2G) {
+ tmp = (offset_cck << 3) + (efuse_gain->offset_base[RTW89_PHY_0] >> 1);
+ tmp = clamp_t(s32, tmp, S8_MIN >> 1, S8_MAX >> 1);
+ rtw89_phy_write32_mask(rtwdev, R_RX_RPL_OFST,
+ B_RX_RPL_OFST_CCK_MASK, tmp);
+ }
+
+ ext_loss_a = (offset_a << 2) + (efuse_gain->offset_base[RTW89_PHY_0] >> 2);
+ ext_loss_b = (offset_b << 2) + (efuse_gain->offset_base[RTW89_PHY_0] >> 2);
+
+ext_loss:
+ if (rtwdev->chip->chip_id == RTL8852BT)
+ rtw8852bt_ext_loss_avg_update(rtwdev, ext_loss_a, ext_loss_b);
+}
+
+static
+void rtw8852bx_set_rxsc_rpl_comp(struct rtw89_dev *rtwdev, enum rtw89_subband subband)
+{
+ const struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain.ax;
+ u8 band = rtw89_subband_to_bb_gain_band(subband);
+ u32 val;
+
+ val = u32_encode_bits((gain->rpl_ofst_20[band][RF_PATH_A] +
+ gain->rpl_ofst_20[band][RF_PATH_B]) >> 1, B_P0_RPL1_20_MASK) |
+ u32_encode_bits((gain->rpl_ofst_40[band][RF_PATH_A][0] +
+ gain->rpl_ofst_40[band][RF_PATH_B][0]) >> 1, B_P0_RPL1_40_MASK) |
+ u32_encode_bits((gain->rpl_ofst_40[band][RF_PATH_A][1] +
+ gain->rpl_ofst_40[band][RF_PATH_B][1]) >> 1, B_P0_RPL1_41_MASK);
+ val >>= B_P0_RPL1_SHIFT;
+ rtw89_phy_write32_mask(rtwdev, R_P0_RPL1, B_P0_RPL1_MASK, val);
+ rtw89_phy_write32_mask(rtwdev, R_P1_RPL1, B_P0_RPL1_MASK, val);
+
+ val = u32_encode_bits((gain->rpl_ofst_40[band][RF_PATH_A][2] +
+ gain->rpl_ofst_40[band][RF_PATH_B][2]) >> 1, B_P0_RTL2_42_MASK) |
+ u32_encode_bits((gain->rpl_ofst_80[band][RF_PATH_A][0] +
+ gain->rpl_ofst_80[band][RF_PATH_B][0]) >> 1, B_P0_RTL2_80_MASK) |
+ u32_encode_bits((gain->rpl_ofst_80[band][RF_PATH_A][1] +
+ gain->rpl_ofst_80[band][RF_PATH_B][1]) >> 1, B_P0_RTL2_81_MASK) |
+ u32_encode_bits((gain->rpl_ofst_80[band][RF_PATH_A][10] +
+ gain->rpl_ofst_80[band][RF_PATH_B][10]) >> 1, B_P0_RTL2_8A_MASK);
+ rtw89_phy_write32(rtwdev, R_P0_RPL2, val);
+ rtw89_phy_write32(rtwdev, R_P1_RPL2, val);
+
+ val = u32_encode_bits((gain->rpl_ofst_80[band][RF_PATH_A][2] +
+ gain->rpl_ofst_80[band][RF_PATH_B][2]) >> 1, B_P0_RTL3_82_MASK) |
+ u32_encode_bits((gain->rpl_ofst_80[band][RF_PATH_A][3] +
+ gain->rpl_ofst_80[band][RF_PATH_B][3]) >> 1, B_P0_RTL3_83_MASK) |
+ u32_encode_bits((gain->rpl_ofst_80[band][RF_PATH_A][4] +
+ gain->rpl_ofst_80[band][RF_PATH_B][4]) >> 1, B_P0_RTL3_84_MASK) |
+ u32_encode_bits((gain->rpl_ofst_80[band][RF_PATH_A][9] +
+ gain->rpl_ofst_80[band][RF_PATH_B][9]) >> 1, B_P0_RTL3_89_MASK);
+ rtw89_phy_write32(rtwdev, R_P0_RPL3, val);
+ rtw89_phy_write32(rtwdev, R_P1_RPL3, val);
+}
+
+static void rtw8852bx_ctrl_ch(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx)
+{
+ u8 central_ch = chan->channel;
+ u8 subband = chan->subband_type;
+ u8 sco_comp;
+ bool is_2g = central_ch <= 14;
+
+ /* Path A */
+ if (is_2g)
+ rtw89_phy_write32_idx(rtwdev, R_PATH0_BAND_SEL_V1,
+ B_PATH0_BAND_SEL_MSK_V1, 1, phy_idx);
+ else
+ rtw89_phy_write32_idx(rtwdev, R_PATH0_BAND_SEL_V1,
+ B_PATH0_BAND_SEL_MSK_V1, 0, phy_idx);
+
+ /* Path B */
+ if (is_2g)
+ rtw89_phy_write32_idx(rtwdev, R_PATH1_BAND_SEL_V1,
+ B_PATH1_BAND_SEL_MSK_V1, 1, phy_idx);
+ else
+ rtw89_phy_write32_idx(rtwdev, R_PATH1_BAND_SEL_V1,
+ B_PATH1_BAND_SEL_MSK_V1, 0, phy_idx);
+
+ /* SCO compensate FC setting */
+ sco_comp = rtw8852bx_sco_mapping(central_ch);
+ rtw89_phy_write32_idx(rtwdev, R_FC0_BW_V1, B_FC0_BW_INV, sco_comp, phy_idx);
+
+ if (chan->band_type == RTW89_BAND_6G)
+ return;
+
+ /* CCK parameters */
+ if (central_ch == 14) {
+ rtw89_phy_write32_mask(rtwdev, R_TXFIR0, B_TXFIR_C01, 0x3b13ff);
+ rtw89_phy_write32_mask(rtwdev, R_TXFIR2, B_TXFIR_C23, 0x1c42de);
+ rtw89_phy_write32_mask(rtwdev, R_TXFIR4, B_TXFIR_C45, 0xfdb0ad);
+ rtw89_phy_write32_mask(rtwdev, R_TXFIR6, B_TXFIR_C67, 0xf60f6e);
+ rtw89_phy_write32_mask(rtwdev, R_TXFIR8, B_TXFIR_C89, 0xfd8f92);
+ rtw89_phy_write32_mask(rtwdev, R_TXFIRA, B_TXFIR_CAB, 0x2d011);
+ rtw89_phy_write32_mask(rtwdev, R_TXFIRC, B_TXFIR_CCD, 0x1c02c);
+ rtw89_phy_write32_mask(rtwdev, R_TXFIRE, B_TXFIR_CEF, 0xfff00a);
+ } else {
+ rtw89_phy_write32_mask(rtwdev, R_TXFIR0, B_TXFIR_C01, 0x3d23ff);
+ rtw89_phy_write32_mask(rtwdev, R_TXFIR2, B_TXFIR_C23, 0x29b354);
+ rtw89_phy_write32_mask(rtwdev, R_TXFIR4, B_TXFIR_C45, 0xfc1c8);
+ rtw89_phy_write32_mask(rtwdev, R_TXFIR6, B_TXFIR_C67, 0xfdb053);
+ rtw89_phy_write32_mask(rtwdev, R_TXFIR8, B_TXFIR_C89, 0xf86f9a);
+ rtw89_phy_write32_mask(rtwdev, R_TXFIRA, B_TXFIR_CAB, 0xfaef92);
+ rtw89_phy_write32_mask(rtwdev, R_TXFIRC, B_TXFIR_CCD, 0xfe5fcc);
+ rtw89_phy_write32_mask(rtwdev, R_TXFIRE, B_TXFIR_CEF, 0xffdff5);
+ }
+
+ rtw8852bx_set_gain_error(rtwdev, subband, RF_PATH_A);
+ rtw8852bx_set_gain_error(rtwdev, subband, RF_PATH_B);
+ rtw8852bx_set_gain_offset(rtwdev, subband, phy_idx);
+ rtw8852bx_set_rxsc_rpl_comp(rtwdev, subband);
+}
+
+static void rtw8852b_bw_setting(struct rtw89_dev *rtwdev, u8 bw, u8 path)
+{
+ static const u32 adc_sel[2] = {0xC0EC, 0xC1EC};
+ static const u32 wbadc_sel[2] = {0xC0E4, 0xC1E4};
+
+ switch (bw) {
+ case RTW89_CHANNEL_WIDTH_5:
+ rtw89_phy_write32_mask(rtwdev, adc_sel[path], 0x6000, 0x1);
+ rtw89_phy_write32_mask(rtwdev, wbadc_sel[path], 0x30, 0x0);
+ break;
+ case RTW89_CHANNEL_WIDTH_10:
+ rtw89_phy_write32_mask(rtwdev, adc_sel[path], 0x6000, 0x2);
+ rtw89_phy_write32_mask(rtwdev, wbadc_sel[path], 0x30, 0x1);
+ break;
+ case RTW89_CHANNEL_WIDTH_20:
+ rtw89_phy_write32_mask(rtwdev, adc_sel[path], 0x6000, 0x0);
+ rtw89_phy_write32_mask(rtwdev, wbadc_sel[path], 0x30, 0x2);
+ break;
+ case RTW89_CHANNEL_WIDTH_40:
+ rtw89_phy_write32_mask(rtwdev, adc_sel[path], 0x6000, 0x0);
+ rtw89_phy_write32_mask(rtwdev, wbadc_sel[path], 0x30, 0x2);
+ break;
+ case RTW89_CHANNEL_WIDTH_80:
+ rtw89_phy_write32_mask(rtwdev, adc_sel[path], 0x6000, 0x0);
+ rtw89_phy_write32_mask(rtwdev, wbadc_sel[path], 0x30, 0x2);
+ break;
+ default:
+ rtw89_warn(rtwdev, "Fail to set ADC\n");
+ }
+}
+
+static
+void rtw8852bt_adc_cfg(struct rtw89_dev *rtwdev, u8 bw, u8 path)
+{
+ static const u32 rck_reset_count[2] = {0xC0E8, 0xC1E8};
+ static const u32 adc_op5_bw_sel[2] = {0xC0D8, 0xC1D8};
+ static const u32 adc_sample_td[2] = {0xC0D4, 0xC1D4};
+ static const u32 adc_rst_cycle[2] = {0xC0EC, 0xC1EC};
+ static const u32 decim_filter[2] = {0xC0EC, 0xC1EC};
+ static const u32 rck_offset[2] = {0xC0C4, 0xC1C4};
+ static const u32 rx_adc_clk[2] = {0x12A0, 0x32A0};
+ static const u32 wbadc_sel[2] = {0xC0E4, 0xC1E4};
+ static const u32 idac2_1[2] = {0xC0D4, 0xC1D4};
+ static const u32 idac2[2] = {0xC0D4, 0xC1D4};
+ static const u32 upd_clk_adc = {0x704};
+
+ if (rtwdev->chip->chip_id != RTL8852BT)
+ return;
+
+ rtw89_phy_write32_mask(rtwdev, idac2[path], B_P0_CFCH_CTL, 0x8);
+ rtw89_phy_write32_mask(rtwdev, rck_reset_count[path], B_ADCMOD_LP, 0x9);
+ rtw89_phy_write32_mask(rtwdev, wbadc_sel[path], B_WDADC_SEL, 0x2);
+ rtw89_phy_write32_mask(rtwdev, rx_adc_clk[path], B_P0_RXCK_ADJ, 0x49);
+ rtw89_phy_write32_mask(rtwdev, decim_filter[path], B_DCIM_FR, 0x0);
+
+ switch (bw) {
+ case RTW89_CHANNEL_WIDTH_5:
+ case RTW89_CHANNEL_WIDTH_10:
+ case RTW89_CHANNEL_WIDTH_20:
+ case RTW89_CHANNEL_WIDTH_40:
+ rtw89_phy_write32_mask(rtwdev, idac2_1[path], B_P0_CFCH_EN, 0x2);
+ rtw89_phy_write32_mask(rtwdev, adc_sample_td[path], B_P0_CFCH_BW0, 0x3);
+ rtw89_phy_write32_mask(rtwdev, adc_op5_bw_sel[path], B_P0_CFCH_BW1, 0xf);
+ rtw89_phy_write32_mask(rtwdev, rck_offset[path], B_DRCK_MUL, 0x0);
+ /* Tx TSSI ADC update */
+ rtw89_phy_write32_mask(rtwdev, upd_clk_adc, B_RSTB_ASYNC_BW80, 0);
+
+ if (rtwdev->efuse.rfe_type >= 51)
+ rtw89_phy_write32_mask(rtwdev, adc_rst_cycle[path], B_DCIM_RC, 0x2);
+ else
+ rtw89_phy_write32_mask(rtwdev, adc_rst_cycle[path], B_DCIM_RC, 0x3);
+ break;
+ case RTW89_CHANNEL_WIDTH_80:
+ rtw89_phy_write32_mask(rtwdev, idac2_1[path], B_P0_CFCH_EN, 0x2);
+ rtw89_phy_write32_mask(rtwdev, adc_sample_td[path], B_P0_CFCH_BW0, 0x2);
+ rtw89_phy_write32_mask(rtwdev, adc_op5_bw_sel[path], B_P0_CFCH_BW1, 0x8);
+ rtw89_phy_write32_mask(rtwdev, rck_offset[path], B_DRCK_MUL, 0x0);
+ rtw89_phy_write32_mask(rtwdev, adc_rst_cycle[path], B_DCIM_RC, 0x3);
+ /* Tx TSSI ADC update */
+ rtw89_phy_write32_mask(rtwdev, upd_clk_adc, B_RSTB_ASYNC_BW80, 1);
+ break;
+ case RTW89_CHANNEL_WIDTH_160:
+ rtw89_phy_write32_mask(rtwdev, idac2_1[path], B_P0_CFCH_EN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, adc_sample_td[path], B_P0_CFCH_BW0, 0x2);
+ rtw89_phy_write32_mask(rtwdev, adc_op5_bw_sel[path], B_P0_CFCH_BW1, 0x4);
+ rtw89_phy_write32_mask(rtwdev, rck_offset[path], B_DRCK_MUL, 0x6);
+ rtw89_phy_write32_mask(rtwdev, adc_rst_cycle[path], B_DCIM_RC, 0x3);
+ /* Tx TSSI ADC update */
+ rtw89_phy_write32_mask(rtwdev, upd_clk_adc, B_RSTB_ASYNC_BW80, 2);
+ break;
+ default:
+ rtw89_warn(rtwdev, "Fail to set ADC\n");
+ break;
+ }
+}
+
+static void rtw8852bx_ctrl_bw(struct rtw89_dev *rtwdev, u8 pri_ch, u8 bw,
+ enum rtw89_phy_idx phy_idx)
+{
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
+ u32 rx_path_0;
+ u32 val;
+
+ rx_path_0 = rtw89_phy_read32_idx(rtwdev, R_CHBW_MOD_V1, B_ANT_RX_SEG0, phy_idx);
+
+ switch (bw) {
+ case RTW89_CHANNEL_WIDTH_5:
+ rtw89_phy_write32_idx(rtwdev, R_FC0_BW_V1, B_FC0_BW_SET, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_CHBW_MOD_SBW, 0x1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_CHBW_MOD_PRICH, 0x0, phy_idx);
+
+ /*Set RF mode at 3 */
+ rtw89_phy_write32_idx(rtwdev, R_P0_RFMODE_ORI_RX,
+ B_P0_RFMODE_ORI_RX_ALL, 0x333, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_P1_RFMODE_ORI_RX,
+ B_P1_RFMODE_ORI_RX_ALL, 0x333, phy_idx);
+ if (chip_id == RTL8852BT) {
+ rtw89_phy_write32_idx(rtwdev, R_PATH0_BAND_SEL_V1,
+ B_PATH0_BAND_NRBW_EN_V1, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_PATH1_BAND_SEL_V1,
+ B_PATH1_BAND_NRBW_EN_V1, 0x0, phy_idx);
+ }
+ break;
+ case RTW89_CHANNEL_WIDTH_10:
+ rtw89_phy_write32_idx(rtwdev, R_FC0_BW_V1, B_FC0_BW_SET, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_CHBW_MOD_SBW, 0x2, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_CHBW_MOD_PRICH, 0x0, phy_idx);
+
+ /*Set RF mode at 3 */
+ rtw89_phy_write32_idx(rtwdev, R_P0_RFMODE_ORI_RX,
+ B_P0_RFMODE_ORI_RX_ALL, 0x333, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_P1_RFMODE_ORI_RX,
+ B_P1_RFMODE_ORI_RX_ALL, 0x333, phy_idx);
+ if (chip_id == RTL8852BT) {
+ rtw89_phy_write32_idx(rtwdev, R_PATH0_BAND_SEL_V1,
+ B_PATH0_BAND_NRBW_EN_V1, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_PATH1_BAND_SEL_V1,
+ B_PATH1_BAND_NRBW_EN_V1, 0x0, phy_idx);
+ }
+ break;
+ case RTW89_CHANNEL_WIDTH_20:
+ rtw89_phy_write32_idx(rtwdev, R_FC0_BW_V1, B_FC0_BW_SET, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_CHBW_MOD_SBW, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_CHBW_MOD_PRICH, 0x0, phy_idx);
+
+ /*Set RF mode at 3 */
+ rtw89_phy_write32_idx(rtwdev, R_P0_RFMODE_ORI_RX,
+ B_P0_RFMODE_ORI_RX_ALL, 0x333, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_P1_RFMODE_ORI_RX,
+ B_P1_RFMODE_ORI_RX_ALL, 0x333, phy_idx);
+ if (chip_id == RTL8852BT) {
+ rtw89_phy_write32_idx(rtwdev, R_PATH0_BAND_SEL_V1,
+ B_PATH0_BAND_NRBW_EN_V1, 0x1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_PATH1_BAND_SEL_V1,
+ B_PATH1_BAND_NRBW_EN_V1, 0x1, phy_idx);
+ }
+ break;
+ case RTW89_CHANNEL_WIDTH_40:
+ rtw89_phy_write32_idx(rtwdev, R_FC0_BW_V1, B_FC0_BW_SET, 0x1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_CHBW_MOD_SBW, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_CHBW_MOD_PRICH,
+ pri_ch, phy_idx);
+
+ /*Set RF mode at 3 */
+ rtw89_phy_write32_idx(rtwdev, R_P0_RFMODE_ORI_RX,
+ B_P0_RFMODE_ORI_RX_ALL, 0x333, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_P1_RFMODE_ORI_RX,
+ B_P1_RFMODE_ORI_RX_ALL, 0x333, phy_idx);
+ /*CCK primary channel */
+ if (pri_ch == RTW89_SC_20_UPPER)
+ rtw89_phy_write32_mask(rtwdev, R_RXSC, B_RXSC_EN, 1);
+ else
+ rtw89_phy_write32_mask(rtwdev, R_RXSC, B_RXSC_EN, 0);
+
+ break;
+ case RTW89_CHANNEL_WIDTH_80:
+ rtw89_phy_write32_idx(rtwdev, R_FC0_BW_V1, B_FC0_BW_SET, 0x2, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_CHBW_MOD_SBW, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_CHBW_MOD_PRICH,
+ pri_ch, phy_idx);
+
+ /*Set RF mode at A */
+ val = chip_id == RTL8852BT ? 0x333 : 0xaaa;
+ rtw89_phy_write32_idx(rtwdev, R_P0_RFMODE_ORI_RX,
+ B_P0_RFMODE_ORI_RX_ALL, val, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_P1_RFMODE_ORI_RX,
+ B_P1_RFMODE_ORI_RX_ALL, val, phy_idx);
+ break;
+ default:
+ rtw89_warn(rtwdev, "Fail to switch bw (bw:%d, pri ch:%d)\n", bw,
+ pri_ch);
+ }
+
+ if (chip_id == RTL8852B) {
+ rtw8852b_bw_setting(rtwdev, bw, RF_PATH_A);
+ rtw8852b_bw_setting(rtwdev, bw, RF_PATH_B);
+ } else if (chip_id == RTL8852BT) {
+ rtw8852bt_adc_cfg(rtwdev, bw, RF_PATH_A);
+ rtw8852bt_adc_cfg(rtwdev, bw, RF_PATH_B);
+ }
+
+ if (rx_path_0 == 0x1)
+ rtw89_phy_write32_idx(rtwdev, R_P1_RFMODE_ORI_RX,
+ B_P1_RFMODE_ORI_RX_ALL, 0x111, phy_idx);
+ else if (rx_path_0 == 0x2)
+ rtw89_phy_write32_idx(rtwdev, R_P0_RFMODE_ORI_RX,
+ B_P0_RFMODE_ORI_RX_ALL, 0x111, phy_idx);
+}
+
+static void rtw8852bx_ctrl_cck_en(struct rtw89_dev *rtwdev, bool cck_en)
+{
+ if (cck_en) {
+ rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_ENABLE_CCK, 1);
+ rtw89_phy_write32_mask(rtwdev, R_RXCCA, B_RXCCA_DIS, 0);
+ } else {
+ rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_ENABLE_CCK, 0);
+ rtw89_phy_write32_mask(rtwdev, R_RXCCA, B_RXCCA_DIS, 1);
+ }
+}
+
+static void rtw8852bx_5m_mask(struct rtw89_dev *rtwdev, const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx)
+{
+ u8 pri_ch = chan->pri_ch_idx;
+ bool mask_5m_low;
+ bool mask_5m_en;
+
+ switch (chan->band_width) {
+ case RTW89_CHANNEL_WIDTH_40:
+ /* Prich=1: Mask 5M High, Prich=2: Mask 5M Low */
+ mask_5m_en = true;
+ mask_5m_low = pri_ch == RTW89_SC_20_LOWER;
+ break;
+ case RTW89_CHANNEL_WIDTH_80:
+ /* Prich=3: Mask 5M High, Prich=4: Mask 5M Low, Else: Disable */
+ mask_5m_en = pri_ch == RTW89_SC_20_UPMOST ||
+ pri_ch == RTW89_SC_20_LOWEST;
+ mask_5m_low = pri_ch == RTW89_SC_20_LOWEST;
+ break;
+ default:
+ mask_5m_en = false;
+ break;
+ }
+
+ if (!mask_5m_en) {
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET_V1, B_PATH0_5MDET_EN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET_V1, B_PATH1_5MDET_EN, 0x0);
+ rtw89_phy_write32_idx(rtwdev, R_ASSIGN_SBD_OPT_V1,
+ B_ASSIGN_SBD_OPT_EN_V1, 0x0, phy_idx);
+ return;
+ }
+
+ if (mask_5m_low) {
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET_V1, B_PATH0_5MDET_TH, 0x4);
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET_V1, B_PATH0_5MDET_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET_V1, B_PATH0_5MDET_SB2, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET_V1, B_PATH0_5MDET_SB0, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET_V1, B_PATH1_5MDET_TH, 0x4);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET_V1, B_PATH1_5MDET_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET_V1, B_PATH1_5MDET_SB2, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET_V1, B_PATH1_5MDET_SB0, 0x1);
+ } else {
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET_V1, B_PATH0_5MDET_TH, 0x4);
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET_V1, B_PATH0_5MDET_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET_V1, B_PATH0_5MDET_SB2, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET_V1, B_PATH0_5MDET_SB0, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET_V1, B_PATH1_5MDET_TH, 0x4);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET_V1, B_PATH1_5MDET_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET_V1, B_PATH1_5MDET_SB2, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET_V1, B_PATH1_5MDET_SB0, 0x0);
+ }
+ rtw89_phy_write32_idx(rtwdev, R_ASSIGN_SBD_OPT_V1,
+ B_ASSIGN_SBD_OPT_EN_V1, 0x1, phy_idx);
+}
+
+static void __rtw8852bx_bb_reset_all(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+{
+ rtw89_phy_write32_idx(rtwdev, R_S0_HW_SI_DIS, B_S0_HW_SI_DIS_W_R_TRIG, 0x7, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_S1_HW_SI_DIS, B_S1_HW_SI_DIS_W_R_TRIG, 0x7, phy_idx);
+ fsleep(1);
+ rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL, 1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL, 0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_S0_HW_SI_DIS, B_S0_HW_SI_DIS_W_R_TRIG, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_S1_HW_SI_DIS, B_S1_HW_SI_DIS_W_R_TRIG, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL, 1, phy_idx);
+}
+
+static void rtw8852bx_bb_macid_ctrl_init(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx)
+{
+ u32 addr;
+
+ for (addr = R_AX_PWR_MACID_LMT_TABLE0;
+ addr <= R_AX_PWR_MACID_LMT_TABLE127; addr += 4)
+ rtw89_mac_txpwr_write32(rtwdev, phy_idx, addr, 0);
+}
+
+static void __rtw8852bx_bb_sethw(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_phy_efuse_gain *gain = &rtwdev->efuse_gain;
+
+ rtw89_phy_write32_clr(rtwdev, R_P0_EN_SOUND_WO_NDP, B_P0_EN_SOUND_WO_NDP);
+ rtw89_phy_write32_clr(rtwdev, R_P1_EN_SOUND_WO_NDP, B_P1_EN_SOUND_WO_NDP);
+
+ rtw8852bx_bb_macid_ctrl_init(rtwdev, RTW89_PHY_0);
+
+ /* read these registers after loading BB parameters */
+ gain->offset_base[RTW89_PHY_0] =
+ rtw89_phy_read32_mask(rtwdev, R_P0_RPL1, B_P0_RPL1_BIAS_MASK);
+ gain->rssi_base[RTW89_PHY_0] =
+ rtw89_phy_read32_mask(rtwdev, R_P1_RPL1, B_P0_RPL1_BIAS_MASK);
+}
+
+static void rtw8852bx_bb_set_pop(struct rtw89_dev *rtwdev)
+{
+ if (rtwdev->hw->conf.flags & IEEE80211_CONF_MONITOR)
+ rtw89_phy_write32_clr(rtwdev, R_PKT_CTRL, B_PKT_POP_EN);
+}
+
+static u32 rtw8852bt_spur_freq(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan)
+{
+ u8 center_chan = chan->channel;
+
+ switch (chan->band_type) {
+ case RTW89_BAND_5G:
+ if (center_chan == 151 || center_chan == 153 ||
+ center_chan == 155 || center_chan == 163)
+ return 5760;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+#define CARRIER_SPACING_312_5 312500 /* 312.5 kHz */
+#define CARRIER_SPACING_78_125 78125 /* 78.125 kHz */
+#define MAX_TONE_NUM 2048
+
+static void rtw8852bt_set_csi_tone_idx(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx)
+{
+ s32 freq_diff, csi_idx, csi_tone_idx;
+ u32 spur_freq;
+
+ spur_freq = rtw8852bt_spur_freq(rtwdev, chan);
+ if (spur_freq == 0) {
+ rtw89_phy_write32_idx(rtwdev, R_SEG0CSI_EN_V1, B_SEG0CSI_EN,
+ 0, phy_idx);
+ return;
+ }
+
+ freq_diff = (spur_freq - chan->freq) * 1000000;
+ csi_idx = s32_div_u32_round_closest(freq_diff, CARRIER_SPACING_78_125);
+ s32_div_u32_round_down(csi_idx, MAX_TONE_NUM, &csi_tone_idx);
+
+ rtw89_phy_write32_idx(rtwdev, R_SEG0CSI_V1, B_SEG0CSI_IDX,
+ csi_tone_idx, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_SEG0CSI_EN_V1, B_SEG0CSI_EN, 1, phy_idx);
+}
+
+static
+void __rtw8852bx_set_channel_bb(struct rtw89_dev *rtwdev, const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx)
+{
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
+ bool cck_en = chan->channel <= 14;
+ u8 pri_ch_idx = chan->pri_ch_idx;
+ u8 band = chan->band_type, chan_idx;
+
+ if (cck_en)
+ rtw8852bx_ctrl_sco_cck(rtwdev, chan->primary_channel);
+
+ rtw8852bx_ctrl_ch(rtwdev, chan, phy_idx);
+ rtw8852bx_ctrl_bw(rtwdev, pri_ch_idx, chan->band_width, phy_idx);
+ rtw8852bx_ctrl_cck_en(rtwdev, cck_en);
+ if (chip_id == RTL8852BT)
+ rtw8852bt_set_csi_tone_idx(rtwdev, chan, phy_idx);
+ if (chip_id == RTL8852B && chan->band_type == RTW89_BAND_5G) {
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_BT_SHARE_V1,
+ B_PATH0_BT_SHARE_V1, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_BTG_PATH_V1,
+ B_PATH0_BTG_PATH_V1, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_BT_SHARE_V1,
+ B_PATH1_BT_SHARE_V1, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_BTG_PATH_V1,
+ B_PATH1_BTG_PATH_V1, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_CHBW_MOD_V1, B_BT_SHARE, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_FC0_BW_V1, B_ANT_RX_BT_SEG0, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_BT_DYN_DC_EST_EN_V1,
+ B_BT_DYN_DC_EST_EN_MSK, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_GNT_BT_WGT_EN, B_GNT_BT_WGT_EN, 0x0);
+ }
+ chan_idx = rtw89_encode_chan_idx(rtwdev, chan->primary_channel, band);
+ rtw89_phy_write32_mask(rtwdev, R_MAC_PIN_SEL, B_CH_IDX_SEG0, chan_idx);
+ rtw8852bx_5m_mask(rtwdev, chan, phy_idx);
+ rtw8852bx_bb_set_pop(rtwdev);
+ __rtw8852bx_bb_reset_all(rtwdev, phy_idx);
+}
+
+static u32 rtw8852bx_bb_cal_txpwr_ref(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx, s16 ref)
+{
+ const u16 tssi_16dbm_cw = 0x12c;
+ const u8 base_cw_0db = 0x27;
+ const s8 ofst_int = 0;
+ s16 pwr_s10_3;
+ s16 rf_pwr_cw;
+ u16 bb_pwr_cw;
+ u32 pwr_cw;
+ u32 tssi_ofst_cw;
+
+ pwr_s10_3 = (ref << 1) + (s16)(ofst_int) + (s16)(base_cw_0db << 3);
+ bb_pwr_cw = u16_get_bits(pwr_s10_3, GENMASK(2, 0));
+ rf_pwr_cw = u16_get_bits(pwr_s10_3, GENMASK(8, 3));
+ rf_pwr_cw = clamp_t(s16, rf_pwr_cw, 15, 63);
+ pwr_cw = (rf_pwr_cw << 3) | bb_pwr_cw;
+
+ tssi_ofst_cw = (u32)((s16)tssi_16dbm_cw + (ref << 1) - (16 << 3));
+ rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
+ "[TXPWR] tssi_ofst_cw=%d rf_cw=0x%x bb_cw=0x%x\n",
+ tssi_ofst_cw, rf_pwr_cw, bb_pwr_cw);
+
+ return u32_encode_bits(tssi_ofst_cw, B_DPD_TSSI_CW) |
+ u32_encode_bits(pwr_cw, B_DPD_PWR_CW) |
+ u32_encode_bits(ref, B_DPD_REF);
+}
+
+static void rtw8852bx_set_txpwr_ref(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx)
+{
+ static const u32 addr[RF_PATH_NUM_8852BX] = {0x5800, 0x7800};
+ const u32 mask = B_DPD_TSSI_CW | B_DPD_PWR_CW | B_DPD_REF;
+ const u8 ofst_ofdm = 0x4;
+ const u8 ofst_cck = 0x8;
+ const s16 ref_ofdm = 0;
+ const s16 ref_cck = 0;
+ u32 val;
+ u8 i;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set txpwr reference\n");
+
+ rtw89_mac_txpwr_write32_mask(rtwdev, phy_idx, R_AX_PWR_RATE_CTRL,
+ B_AX_PWR_REF, 0x0);
+
+ rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set bb ofdm txpwr ref\n");
+ val = rtw8852bx_bb_cal_txpwr_ref(rtwdev, phy_idx, ref_ofdm);
+
+ for (i = 0; i < RF_PATH_NUM_8852BX; i++)
+ rtw89_phy_write32_idx(rtwdev, addr[i] + ofst_ofdm, mask, val,
+ phy_idx);
+
+ rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set bb cck txpwr ref\n");
+ val = rtw8852bx_bb_cal_txpwr_ref(rtwdev, phy_idx, ref_cck);
+
+ for (i = 0; i < RF_PATH_NUM_8852BX; i++)
+ rtw89_phy_write32_idx(rtwdev, addr[i] + ofst_cck, mask, val,
+ phy_idx);
+}
+
+static void rtw8852bx_bb_set_tx_shape_dfir(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ u8 tx_shape_idx,
+ enum rtw89_phy_idx phy_idx)
+{
+#define __DFIR_CFG_ADDR(i) (R_TXFIR0 + ((i) << 2))
+#define __DFIR_CFG_MASK 0xffffffff
+#define __DFIR_CFG_NR 8
+#define __DECL_DFIR_PARAM(_name, _val...) \
+ static const u32 param_ ## _name[] = {_val}; \
+ static_assert(ARRAY_SIZE(param_ ## _name) == __DFIR_CFG_NR)
+
+ __DECL_DFIR_PARAM(flat,
+ 0x023D23FF, 0x0029B354, 0x000FC1C8, 0x00FDB053,
+ 0x00F86F9A, 0x06FAEF92, 0x00FE5FCC, 0x00FFDFF5);
+ __DECL_DFIR_PARAM(sharp,
+ 0x023D83FF, 0x002C636A, 0x0013F204, 0x00008090,
+ 0x00F87FB0, 0x06F99F83, 0x00FDBFBA, 0x00003FF5);
+ __DECL_DFIR_PARAM(sharp_14,
+ 0x023B13FF, 0x001C42DE, 0x00FDB0AD, 0x00F60F6E,
+ 0x00FD8F92, 0x0602D011, 0x0001C02C, 0x00FFF00A);
+ u8 ch = chan->channel;
+ const u32 *param;
+ u32 addr;
+ int i;
+
+ if (ch > 14) {
+ rtw89_warn(rtwdev,
+ "set tx shape dfir by unknown ch: %d on 2G\n", ch);
+ return;
+ }
+
+ if (ch == 14)
+ param = param_sharp_14;
+ else
+ param = tx_shape_idx == 0 ? param_flat : param_sharp;
+
+ for (i = 0; i < __DFIR_CFG_NR; i++) {
+ addr = __DFIR_CFG_ADDR(i);
+ rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
+ "set tx shape dfir: 0x%x: 0x%x\n", addr, param[i]);
+ rtw89_phy_write32_idx(rtwdev, addr, __DFIR_CFG_MASK, param[i],
+ phy_idx);
+ }
+
+#undef __DECL_DFIR_PARAM
+#undef __DFIR_CFG_NR
+#undef __DFIR_CFG_MASK
+#undef __DECL_CFG_ADDR
+}
+
+static void rtw8852bx_set_tx_shape(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx)
+{
+ const struct rtw89_rfe_parms *rfe_parms = rtwdev->rfe_parms;
+ u8 band = chan->band_type;
+ u8 regd = rtw89_regd_get(rtwdev, band);
+ u8 tx_shape_cck = (*rfe_parms->tx_shape.lmt)[band][RTW89_RS_CCK][regd];
+ u8 tx_shape_ofdm = (*rfe_parms->tx_shape.lmt)[band][RTW89_RS_OFDM][regd];
+
+ if (band == RTW89_BAND_2G)
+ rtw8852bx_bb_set_tx_shape_dfir(rtwdev, chan, tx_shape_cck, phy_idx);
+
+ rtw89_phy_write32_mask(rtwdev, R_DCFO_OPT, B_TXSHAPE_TRIANGULAR_CFG,
+ tx_shape_ofdm);
+}
+
+static void __rtw8852bx_set_txpwr(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx)
+{
+ rtw89_phy_set_txpwr_byrate(rtwdev, chan, phy_idx);
+ rtw89_phy_set_txpwr_offset(rtwdev, chan, phy_idx);
+ rtw8852bx_set_tx_shape(rtwdev, chan, phy_idx);
+ rtw89_phy_set_txpwr_limit(rtwdev, chan, phy_idx);
+ rtw89_phy_set_txpwr_limit_ru(rtwdev, chan, phy_idx);
+}
+
+static void __rtw8852bx_set_txpwr_ctrl(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx)
+{
+ rtw8852bx_set_txpwr_ref(rtwdev, phy_idx);
+}
+
+static
+void __rtw8852bx_set_txpwr_ul_tb_offset(struct rtw89_dev *rtwdev,
+ s8 pw_ofst, enum rtw89_mac_idx mac_idx)
+{
+ u32 reg;
+
+ if (pw_ofst < -16 || pw_ofst > 15) {
+ rtw89_warn(rtwdev, "[ULTB] Err pwr_offset=%d\n", pw_ofst);
+ return;
+ }
+
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PWR_UL_TB_CTRL, mac_idx);
+ rtw89_write32_set(rtwdev, reg, B_AX_PWR_UL_TB_CTRL_EN);
+
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PWR_UL_TB_1T, mac_idx);
+ rtw89_write32_mask(rtwdev, reg, B_AX_PWR_UL_TB_1T_MASK, pw_ofst);
+
+ pw_ofst = max_t(s8, pw_ofst - 3, -16);
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PWR_UL_TB_2T, mac_idx);
+ rtw89_write32_mask(rtwdev, reg, B_AX_PWR_UL_TB_2T_MASK, pw_ofst);
+}
+
+static int
+__rtw8852bx_init_txpwr_unit(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+{
+ int ret;
+
+ ret = rtw89_mac_txpwr_write32(rtwdev, phy_idx, R_AX_PWR_UL_CTRL2, 0x07763333);
+ if (ret)
+ return ret;
+
+ ret = rtw89_mac_txpwr_write32(rtwdev, phy_idx, R_AX_PWR_COEXT_CTRL, 0x01ebf000);
+ if (ret)
+ return ret;
+
+ ret = rtw89_mac_txpwr_write32(rtwdev, phy_idx, R_AX_PWR_UL_CTRL0, 0x0002f8ff);
+ if (ret)
+ return ret;
+
+ rtw8852bx_set_txpwr_ul_tb_offset(rtwdev, 0, phy_idx == RTW89_PHY_1 ?
+ RTW89_MAC_1 : RTW89_MAC_0);
+
+ return 0;
+}
+
+static
+void __rtw8852bx_bb_set_plcp_tx(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_reg3_def *def = rtw8852bx_pmac_ht20_mcs7_tbl;
+ u8 i;
+
+ for (i = 0; i < ARRAY_SIZE(rtw8852bx_pmac_ht20_mcs7_tbl); i++, def++)
+ rtw89_phy_write32_mask(rtwdev, def->addr, def->mask, def->data);
+}
+
+static void rtw8852bx_stop_pmac_tx(struct rtw89_dev *rtwdev,
+ struct rtw8852bx_bb_pmac_info *tx_info,
+ enum rtw89_phy_idx idx)
+{
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI, "PMAC Stop Tx");
+ if (tx_info->mode == CONT_TX)
+ rtw89_phy_write32_idx(rtwdev, R_PMAC_TX_PRD, B_PMAC_CTX_EN, 0, idx);
+ else if (tx_info->mode == PKTS_TX)
+ rtw89_phy_write32_idx(rtwdev, R_PMAC_TX_PRD, B_PMAC_PTX_EN, 0, idx);
+}
+
+static void rtw8852bx_start_pmac_tx(struct rtw89_dev *rtwdev,
+ struct rtw8852bx_bb_pmac_info *tx_info,
+ enum rtw89_phy_idx idx)
+{
+ enum rtw8852bx_pmac_mode mode = tx_info->mode;
+ u32 pkt_cnt = tx_info->tx_cnt;
+ u16 period = tx_info->period;
+
+ if (mode == CONT_TX && !tx_info->is_cck) {
+ rtw89_phy_write32_idx(rtwdev, R_PMAC_TX_PRD, B_PMAC_CTX_EN, 1, idx);
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI, "PMAC CTx Start");
+ } else if (mode == PKTS_TX) {
+ rtw89_phy_write32_idx(rtwdev, R_PMAC_TX_PRD, B_PMAC_PTX_EN, 1, idx);
+ rtw89_phy_write32_idx(rtwdev, R_PMAC_TX_PRD,
+ B_PMAC_TX_PRD_MSK, period, idx);
+ rtw89_phy_write32_idx(rtwdev, R_PMAC_TX_CNT, B_PMAC_TX_CNT_MSK,
+ pkt_cnt, idx);
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI, "PMAC PTx Start");
+ }
+
+ rtw89_phy_write32_idx(rtwdev, R_PMAC_TX_CTRL, B_PMAC_TXEN_DIS, 1, idx);
+ rtw89_phy_write32_idx(rtwdev, R_PMAC_TX_CTRL, B_PMAC_TXEN_DIS, 0, idx);
+}
+
+static
+void rtw8852bx_bb_set_pmac_tx(struct rtw89_dev *rtwdev,
+ struct rtw8852bx_bb_pmac_info *tx_info,
+ enum rtw89_phy_idx idx)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+
+ if (!tx_info->en_pmac_tx) {
+ rtw8852bx_stop_pmac_tx(rtwdev, tx_info, idx);
+ rtw89_phy_write32_idx(rtwdev, R_PD_CTRL, B_PD_HIT_DIS, 0, idx);
+ if (chan->band_type == RTW89_BAND_2G)
+ rtw89_phy_write32_clr(rtwdev, R_RXCCA, B_RXCCA_DIS);
+ return;
+ }
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI, "PMAC Tx Enable");
+
+ rtw89_phy_write32_idx(rtwdev, R_PMAC_GNT, B_PMAC_GNT_TXEN, 1, idx);
+ rtw89_phy_write32_idx(rtwdev, R_PMAC_GNT, B_PMAC_GNT_RXEN, 1, idx);
+ rtw89_phy_write32_idx(rtwdev, R_PMAC_RX_CFG1, B_PMAC_OPT1_MSK, 0x3f, idx);
+ rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL, 0, idx);
+ rtw89_phy_write32_idx(rtwdev, R_PD_CTRL, B_PD_HIT_DIS, 1, idx);
+ rtw89_phy_write32_set(rtwdev, R_RXCCA, B_RXCCA_DIS);
+ rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL, 1, idx);
+
+ rtw8852bx_start_pmac_tx(rtwdev, tx_info, idx);
+}
+
+static
+void __rtw8852bx_bb_set_pmac_pkt_tx(struct rtw89_dev *rtwdev, u8 enable,
+ u16 tx_cnt, u16 period, u16 tx_time,
+ enum rtw89_phy_idx idx)
+{
+ struct rtw8852bx_bb_pmac_info tx_info = {0};
+
+ tx_info.en_pmac_tx = enable;
+ tx_info.is_cck = 0;
+ tx_info.mode = PKTS_TX;
+ tx_info.tx_cnt = tx_cnt;
+ tx_info.period = period;
+ tx_info.tx_time = tx_time;
+
+ rtw8852bx_bb_set_pmac_tx(rtwdev, &tx_info, idx);
+}
+
+static
+void __rtw8852bx_bb_set_power(struct rtw89_dev *rtwdev, s16 pwr_dbm,
+ enum rtw89_phy_idx idx)
+{
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI, "PMAC CFG Tx PWR = %d", pwr_dbm);
+
+ rtw89_phy_write32_idx(rtwdev, R_MAC_SEL, B_MAC_SEL_PWR_EN, 1, idx);
+ rtw89_phy_write32_idx(rtwdev, R_TXPWR, B_TXPWR_MSK, pwr_dbm, idx);
+}
+
+static
+void __rtw8852bx_bb_cfg_tx_path(struct rtw89_dev *rtwdev, u8 tx_path)
+{
+ rtw89_phy_write32_idx(rtwdev, R_MAC_SEL, B_MAC_SEL_MOD, 7, RTW89_PHY_0);
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI, "PMAC CFG Tx Path = %d", tx_path);
+
+ if (tx_path == RF_PATH_A) {
+ rtw89_phy_write32_mask(rtwdev, R_TXPATH_SEL, B_TXPATH_SEL_MSK, 1);
+ rtw89_phy_write32_mask(rtwdev, R_TXNSS_MAP, B_TXNSS_MAP_MSK, 0);
+ } else if (tx_path == RF_PATH_B) {
+ rtw89_phy_write32_mask(rtwdev, R_TXPATH_SEL, B_TXPATH_SEL_MSK, 2);
+ rtw89_phy_write32_mask(rtwdev, R_TXNSS_MAP, B_TXNSS_MAP_MSK, 0);
+ } else if (tx_path == RF_PATH_AB) {
+ rtw89_phy_write32_mask(rtwdev, R_TXPATH_SEL, B_TXPATH_SEL_MSK, 3);
+ rtw89_phy_write32_mask(rtwdev, R_TXNSS_MAP, B_TXNSS_MAP_MSK, 4);
+ } else {
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI, "Error Tx Path");
+ }
+}
+
+static
+void __rtw8852bx_bb_tx_mode_switch(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx idx, u8 mode)
+{
+ if (mode != 0)
+ return;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI, "Tx mode switch");
+
+ rtw89_phy_write32_idx(rtwdev, R_PMAC_GNT, B_PMAC_GNT_TXEN, 0, idx);
+ rtw89_phy_write32_idx(rtwdev, R_PMAC_GNT, B_PMAC_GNT_RXEN, 0, idx);
+ rtw89_phy_write32_idx(rtwdev, R_PMAC_RX_CFG1, B_PMAC_OPT1_MSK, 0, idx);
+ rtw89_phy_write32_idx(rtwdev, R_PMAC_RXMOD, B_PMAC_RXMOD_MSK, 0, idx);
+ rtw89_phy_write32_idx(rtwdev, R_MAC_SEL, B_MAC_SEL_DPD_EN, 0, idx);
+ rtw89_phy_write32_idx(rtwdev, R_MAC_SEL, B_MAC_SEL_MOD, 0, idx);
+ rtw89_phy_write32_idx(rtwdev, R_MAC_SEL, B_MAC_SEL_PWR_EN, 0, idx);
+}
+
+static
+void __rtw8852bx_bb_backup_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx idx,
+ struct rtw8852bx_bb_tssi_bak *bak)
+{
+ s32 tmp;
+
+ bak->tx_path = rtw89_phy_read32_idx(rtwdev, R_TXPATH_SEL, B_TXPATH_SEL_MSK, idx);
+ bak->rx_path = rtw89_phy_read32_idx(rtwdev, R_CHBW_MOD_V1, B_ANT_RX_SEG0, idx);
+ bak->p0_rfmode = rtw89_phy_read32_idx(rtwdev, R_P0_RFMODE, MASKDWORD, idx);
+ bak->p0_rfmode_ftm = rtw89_phy_read32_idx(rtwdev, R_P0_RFMODE_FTM_RX, MASKDWORD, idx);
+ bak->p1_rfmode = rtw89_phy_read32_idx(rtwdev, R_P1_RFMODE, MASKDWORD, idx);
+ bak->p1_rfmode_ftm = rtw89_phy_read32_idx(rtwdev, R_P1_RFMODE_FTM_RX, MASKDWORD, idx);
+ tmp = rtw89_phy_read32_idx(rtwdev, R_TXPWR, B_TXPWR_MSK, idx);
+ bak->tx_pwr = sign_extend32(tmp, 8);
+}
+
+static
+void __rtw8852bx_bb_restore_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx idx,
+ const struct rtw8852bx_bb_tssi_bak *bak)
+{
+ rtw89_phy_write32_idx(rtwdev, R_TXPATH_SEL, B_TXPATH_SEL_MSK, bak->tx_path, idx);
+ if (bak->tx_path == RF_AB)
+ rtw89_phy_write32_mask(rtwdev, R_TXNSS_MAP, B_TXNSS_MAP_MSK, 0x4);
+ else
+ rtw89_phy_write32_mask(rtwdev, R_TXNSS_MAP, B_TXNSS_MAP_MSK, 0x0);
+ rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_ANT_RX_SEG0, bak->rx_path, idx);
+ rtw89_phy_write32_idx(rtwdev, R_MAC_SEL, B_MAC_SEL_PWR_EN, 1, idx);
+ rtw89_phy_write32_idx(rtwdev, R_P0_RFMODE, MASKDWORD, bak->p0_rfmode, idx);
+ rtw89_phy_write32_idx(rtwdev, R_P0_RFMODE_FTM_RX, MASKDWORD, bak->p0_rfmode_ftm, idx);
+ rtw89_phy_write32_idx(rtwdev, R_P1_RFMODE, MASKDWORD, bak->p1_rfmode, idx);
+ rtw89_phy_write32_idx(rtwdev, R_P1_RFMODE_FTM_RX, MASKDWORD, bak->p1_rfmode_ftm, idx);
+ rtw89_phy_write32_idx(rtwdev, R_TXPWR, B_TXPWR_MSK, bak->tx_pwr, idx);
+}
+
+static void __rtw8852bx_ctrl_nbtg_bt_tx(struct rtw89_dev *rtwdev, bool en,
+ enum rtw89_phy_idx phy_idx)
+{
+ rtw89_phy_write_reg3_tbl(rtwdev, en ? &rtw8852bx_btc_preagc_en_defs_tbl :
+ &rtw8852bx_btc_preagc_dis_defs_tbl);
+}
+
+static void __rtw8852bx_ctrl_btg_bt_rx(struct rtw89_dev *rtwdev, bool en,
+ enum rtw89_phy_idx phy_idx)
+{
+ if (en) {
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_BT_SHARE_V1,
+ B_PATH0_BT_SHARE_V1, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_BTG_PATH_V1,
+ B_PATH0_BTG_PATH_V1, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_G_LNA6_OP1DB_V1,
+ B_PATH1_G_LNA6_OP1DB_V1, 0x20);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_G_TIA0_LNA6_OP1DB_V1,
+ B_PATH1_G_TIA0_LNA6_OP1DB_V1, 0x30);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_BT_SHARE_V1,
+ B_PATH1_BT_SHARE_V1, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_BTG_PATH_V1,
+ B_PATH1_BTG_PATH_V1, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_PMAC_GNT, B_PMAC_GNT_P1, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_CHBW_MOD_V1, B_BT_SHARE, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_FC0_BW_V1, B_ANT_RX_BT_SEG0, 0x2);
+ rtw89_phy_write32_mask(rtwdev, R_BT_DYN_DC_EST_EN_V1,
+ B_BT_DYN_DC_EST_EN_MSK, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_GNT_BT_WGT_EN, B_GNT_BT_WGT_EN, 0x1);
+ } else {
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_BT_SHARE_V1,
+ B_PATH0_BT_SHARE_V1, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_BTG_PATH_V1,
+ B_PATH0_BTG_PATH_V1, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_G_LNA6_OP1DB_V1,
+ B_PATH1_G_LNA6_OP1DB_V1, 0x1a);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_G_TIA0_LNA6_OP1DB_V1,
+ B_PATH1_G_TIA0_LNA6_OP1DB_V1, 0x2a);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_BT_SHARE_V1,
+ B_PATH1_BT_SHARE_V1, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_BTG_PATH_V1,
+ B_PATH1_BTG_PATH_V1, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_PMAC_GNT, B_PMAC_GNT_P1, 0xc);
+ rtw89_phy_write32_mask(rtwdev, R_CHBW_MOD_V1, B_BT_SHARE, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_FC0_BW_V1, B_ANT_RX_BT_SEG0, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_BT_DYN_DC_EST_EN_V1,
+ B_BT_DYN_DC_EST_EN_MSK, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_GNT_BT_WGT_EN, B_GNT_BT_WGT_EN, 0x0);
+ }
+}
+
+static
+void __rtw8852bx_bb_ctrl_rx_path(struct rtw89_dev *rtwdev,
+ enum rtw89_rf_path_bit rx_path)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ u32 rst_mask0;
+ u32 rst_mask1;
+
+ if (rx_path == RF_A) {
+ rtw89_phy_write32_mask(rtwdev, R_CHBW_MOD_V1, B_ANT_RX_SEG0, 1);
+ rtw89_phy_write32_mask(rtwdev, R_FC0_BW_V1, B_ANT_RX_1RCCA_SEG0, 1);
+ rtw89_phy_write32_mask(rtwdev, R_FC0_BW_V1, B_ANT_RX_1RCCA_SEG1, 1);
+ rtw89_phy_write32_mask(rtwdev, R_RXHT_MCS_LIMIT, B_RXHT_MCS_LIMIT, 0);
+ rtw89_phy_write32_mask(rtwdev, R_RXVHT_MCS_LIMIT, B_RXVHT_MCS_LIMIT, 0);
+ rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_USER_MAX, 4);
+ rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_MAX_NSS, 0);
+ rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHETB_MAX_NSS, 0);
+ } else if (rx_path == RF_B) {
+ rtw89_phy_write32_mask(rtwdev, R_CHBW_MOD_V1, B_ANT_RX_SEG0, 2);
+ rtw89_phy_write32_mask(rtwdev, R_FC0_BW_V1, B_ANT_RX_1RCCA_SEG0, 2);
+ rtw89_phy_write32_mask(rtwdev, R_FC0_BW_V1, B_ANT_RX_1RCCA_SEG1, 2);
+ rtw89_phy_write32_mask(rtwdev, R_RXHT_MCS_LIMIT, B_RXHT_MCS_LIMIT, 0);
+ rtw89_phy_write32_mask(rtwdev, R_RXVHT_MCS_LIMIT, B_RXVHT_MCS_LIMIT, 0);
+ rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_USER_MAX, 4);
+ rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_MAX_NSS, 0);
+ rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHETB_MAX_NSS, 0);
+ } else if (rx_path == RF_AB) {
+ rtw89_phy_write32_mask(rtwdev, R_CHBW_MOD_V1, B_ANT_RX_SEG0, 3);
+ rtw89_phy_write32_mask(rtwdev, R_FC0_BW_V1, B_ANT_RX_1RCCA_SEG0, 3);
+ rtw89_phy_write32_mask(rtwdev, R_FC0_BW_V1, B_ANT_RX_1RCCA_SEG1, 3);
+ rtw89_phy_write32_mask(rtwdev, R_RXHT_MCS_LIMIT, B_RXHT_MCS_LIMIT, 1);
+ rtw89_phy_write32_mask(rtwdev, R_RXVHT_MCS_LIMIT, B_RXVHT_MCS_LIMIT, 1);
+ rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_USER_MAX, 4);
+ rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_MAX_NSS, 1);
+ rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHETB_MAX_NSS, 1);
+ }
+
+ rtw8852bx_set_gain_offset(rtwdev, chan->subband_type, RTW89_PHY_0);
+
+ if (chan->band_type == RTW89_BAND_2G &&
+ (rx_path == RF_B || rx_path == RF_AB))
+ rtw8852bx_ctrl_btg_bt_rx(rtwdev, true, RTW89_PHY_0);
+ else
+ rtw8852bx_ctrl_btg_bt_rx(rtwdev, false, RTW89_PHY_0);
+
+ rst_mask0 = B_P0_TXPW_RSTB_MANON | B_P0_TXPW_RSTB_TSSI;
+ rst_mask1 = B_P1_TXPW_RSTB_MANON | B_P1_TXPW_RSTB_TSSI;
+ if (rx_path == RF_A) {
+ rtw89_phy_write32_mask(rtwdev, R_P0_TXPW_RSTB, rst_mask0, 1);
+ rtw89_phy_write32_mask(rtwdev, R_P0_TXPW_RSTB, rst_mask0, 3);
+ } else {
+ rtw89_phy_write32_mask(rtwdev, R_P1_TXPW_RSTB, rst_mask1, 1);
+ rtw89_phy_write32_mask(rtwdev, R_P1_TXPW_RSTB, rst_mask1, 3);
+ }
+}
+
+static void rtw8852bx_bb_ctrl_rf_mode_rx_path(struct rtw89_dev *rtwdev,
+ enum rtw89_rf_path_bit rx_path)
+{
+ if (rx_path == RF_A) {
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFMODE,
+ B_P0_RFMODE_ORI_TXRX_FTM_TX, 0x1233312);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFMODE_FTM_RX,
+ B_P0_RFMODE_FTM_RX, 0x333);
+ rtw89_phy_write32_mask(rtwdev, R_P1_RFMODE,
+ B_P1_RFMODE_ORI_TXRX_FTM_TX, 0x1111111);
+ rtw89_phy_write32_mask(rtwdev, R_P1_RFMODE_FTM_RX,
+ B_P1_RFMODE_FTM_RX, 0x111);
+ } else if (rx_path == RF_B) {
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFMODE,
+ B_P0_RFMODE_ORI_TXRX_FTM_TX, 0x1111111);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFMODE_FTM_RX,
+ B_P0_RFMODE_FTM_RX, 0x111);
+ rtw89_phy_write32_mask(rtwdev, R_P1_RFMODE,
+ B_P1_RFMODE_ORI_TXRX_FTM_TX, 0x1233312);
+ rtw89_phy_write32_mask(rtwdev, R_P1_RFMODE_FTM_RX,
+ B_P1_RFMODE_FTM_RX, 0x333);
+ } else if (rx_path == RF_AB) {
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFMODE,
+ B_P0_RFMODE_ORI_TXRX_FTM_TX, 0x1233312);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFMODE_FTM_RX,
+ B_P0_RFMODE_FTM_RX, 0x333);
+ rtw89_phy_write32_mask(rtwdev, R_P1_RFMODE,
+ B_P1_RFMODE_ORI_TXRX_FTM_TX, 0x1233312);
+ rtw89_phy_write32_mask(rtwdev, R_P1_RFMODE_FTM_RX,
+ B_P1_RFMODE_FTM_RX, 0x333);
+ }
+}
+
+static void __rtw8852bx_bb_cfg_txrx_path(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_hal *hal = &rtwdev->hal;
+ enum rtw89_rf_path_bit rx_path = hal->antenna_rx ? hal->antenna_rx : RF_AB;
+
+ rtw8852bx_bb_ctrl_rx_path(rtwdev, rx_path);
+ rtw8852bx_bb_ctrl_rf_mode_rx_path(rtwdev, rx_path);
+
+ if (rtwdev->hal.rx_nss == 1) {
+ rtw89_phy_write32_mask(rtwdev, R_RXHT_MCS_LIMIT, B_RXHT_MCS_LIMIT, 0);
+ rtw89_phy_write32_mask(rtwdev, R_RXVHT_MCS_LIMIT, B_RXVHT_MCS_LIMIT, 0);
+ rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_MAX_NSS, 0);
+ rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHETB_MAX_NSS, 0);
+ } else {
+ rtw89_phy_write32_mask(rtwdev, R_RXHT_MCS_LIMIT, B_RXHT_MCS_LIMIT, 1);
+ rtw89_phy_write32_mask(rtwdev, R_RXVHT_MCS_LIMIT, B_RXVHT_MCS_LIMIT, 1);
+ rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_MAX_NSS, 1);
+ rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHETB_MAX_NSS, 1);
+ }
+
+ rtw89_phy_write32_idx(rtwdev, R_MAC_SEL, B_MAC_SEL_MOD, 0x0, RTW89_PHY_0);
+}
+
+static u8 __rtw8852bx_get_thermal(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path)
+{
+ if (rtwdev->is_tssi_mode[rf_path]) {
+ u32 addr = 0x1c10 + (rf_path << 13);
+
+ return rtw89_phy_read32_mask(rtwdev, addr, 0x3F000000);
+ }
+
+ rtw89_write_rf(rtwdev, rf_path, RR_TM, RR_TM_TRI, 0x1);
+ rtw89_write_rf(rtwdev, rf_path, RR_TM, RR_TM_TRI, 0x0);
+ rtw89_write_rf(rtwdev, rf_path, RR_TM, RR_TM_TRI, 0x1);
+
+ fsleep(200);
+
+ return rtw89_read_rf(rtwdev, rf_path, RR_TM, RR_TM_VAL);
+}
+
+static
+void rtw8852bx_set_trx_mask(struct rtw89_dev *rtwdev, u8 path, u8 group, u32 val)
+{
+ rtw89_write_rf(rtwdev, path, RR_LUTWE, RFREG_MASK, 0x20000);
+ rtw89_write_rf(rtwdev, path, RR_LUTWA, RFREG_MASK, group);
+ rtw89_write_rf(rtwdev, path, RR_LUTWD0, RFREG_MASK, val);
+ rtw89_write_rf(rtwdev, path, RR_LUTWE, RFREG_MASK, 0x0);
+}
+
+static void __rtw8852bx_btc_init_cfg(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ const struct rtw89_mac_ax_coex coex_params = {
+ .pta_mode = RTW89_MAC_AX_COEX_RTK_MODE,
+ .direction = RTW89_MAC_AX_COEX_INNER,
+ };
+
+ /* PTA init */
+ rtw89_mac_coex_init(rtwdev, &coex_params);
+
+ /* set WL Tx response = Hi-Pri */
+ chip->ops->btc_set_wl_pri(rtwdev, BTC_PRI_MASK_TX_RESP, true);
+ chip->ops->btc_set_wl_pri(rtwdev, BTC_PRI_MASK_BEACON, true);
+
+ /* set rf gnt debug off */
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_WLSEL, RFREG_MASK, 0x0);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_WLSEL, RFREG_MASK, 0x0);
+
+ /* set WL Tx thru in TRX mask table if GNT_WL = 0 && BT_S1 = ss group */
+ if (btc->ant_type == BTC_ANT_SHARED) {
+ rtw8852bx_set_trx_mask(rtwdev, RF_PATH_A, BTC_BT_SS_GROUP, 0x5ff);
+ rtw8852bx_set_trx_mask(rtwdev, RF_PATH_B, BTC_BT_SS_GROUP, 0x5ff);
+ /* set path-A(S0) Tx/Rx no-mask if GNT_WL=0 && BT_S1=tx group */
+ rtw8852bx_set_trx_mask(rtwdev, RF_PATH_A, BTC_BT_TX_GROUP, 0x5ff);
+ rtw8852bx_set_trx_mask(rtwdev, RF_PATH_B, BTC_BT_TX_GROUP, 0x55f);
+ } else { /* set WL Tx stb if GNT_WL = 0 && BT_S1 = ss group for 3-ant */
+ rtw8852bx_set_trx_mask(rtwdev, RF_PATH_A, BTC_BT_SS_GROUP, 0x5df);
+ rtw8852bx_set_trx_mask(rtwdev, RF_PATH_B, BTC_BT_SS_GROUP, 0x5df);
+ rtw8852bx_set_trx_mask(rtwdev, RF_PATH_A, BTC_BT_TX_GROUP, 0x5ff);
+ rtw8852bx_set_trx_mask(rtwdev, RF_PATH_B, BTC_BT_TX_GROUP, 0x5ff);
+ }
+
+ if (rtwdev->chip->chip_id == RTL8852BT) {
+ rtw8852bx_set_trx_mask(rtwdev, RF_PATH_A, BTC_BT_RX_GROUP, 0x5df);
+ rtw8852bx_set_trx_mask(rtwdev, RF_PATH_B, BTC_BT_RX_GROUP, 0x5df);
+ }
+
+ /* set PTA break table */
+ rtw89_write32(rtwdev, R_BTC_BREAK_TABLE, BTC_BREAK_PARAM);
+
+ /* enable BT counter 0xda40[16,2] = 2b'11 */
+ rtw89_write32_set(rtwdev, R_AX_CSR_MODE, B_AX_BT_CNT_RST | B_AX_STATIS_BT_EN);
+ btc->cx.wl.status.map.init_ok = true;
+}
+
+static
+void __rtw8852bx_btc_set_wl_pri(struct rtw89_dev *rtwdev, u8 map, bool state)
+{
+ u32 bitmap;
+ u32 reg;
+
+ switch (map) {
+ case BTC_PRI_MASK_TX_RESP:
+ reg = R_BTC_BT_COEX_MSK_TABLE;
+ bitmap = B_BTC_PRI_MASK_TX_RESP_V1;
+ break;
+ case BTC_PRI_MASK_BEACON:
+ reg = R_AX_WL_PRI_MSK;
+ bitmap = B_AX_PTA_WL_PRI_MASK_BCNQ;
+ break;
+ case BTC_PRI_MASK_RX_CCK:
+ reg = R_BTC_BT_COEX_MSK_TABLE;
+ bitmap = B_BTC_PRI_MASK_RXCCK_V1;
+ break;
+ default:
+ return;
+ }
+
+ if (state)
+ rtw89_write32_set(rtwdev, reg, bitmap);
+ else
+ rtw89_write32_clr(rtwdev, reg, bitmap);
+}
+
+static
+s8 __rtw8852bx_btc_get_bt_rssi(struct rtw89_dev *rtwdev, s8 val)
+{
+ /* +6 for compensate offset */
+ return clamp_t(s8, val + 6, -100, 0) + 100;
+}
+
+static
+void __rtw8852bx_btc_update_bt_cnt(struct rtw89_dev *rtwdev)
+{
+ /* Feature move to firmware */
+}
+
+static void __rtw8852bx_btc_wl_s1_standby(struct rtw89_dev *rtwdev, bool state)
+{
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x80000);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x1);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD1, RFREG_MASK, 0x31);
+
+ /* set WL standby = Rx for GNT_BT_Tx = 1->0 settle issue */
+ if (state)
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x179);
+ else
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x20);
+
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x0);
+}
+
+static void rtw8852bx_btc_set_wl_lna2(struct rtw89_dev *rtwdev, u8 level)
+{
+ switch (level) {
+ case 0: /* default */
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x1000);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x0);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x15);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x1);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x17);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x2);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x15);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x3);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x17);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x0);
+ break;
+ case 1: /* Fix LNA2=5 */
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x1000);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x0);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x15);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x1);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x5);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x2);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x15);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x3);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x5);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x0);
+ break;
+ }
+}
+
+static void __rtw8852bx_btc_set_wl_rx_gain(struct rtw89_dev *rtwdev, u32 level)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+
+ switch (level) {
+ case 0: /* original */
+ default:
+ rtw8852bx_ctrl_nbtg_bt_tx(rtwdev, false, RTW89_PHY_0);
+ btc->dm.wl_lna2 = 0;
+ break;
+ case 1: /* for FDD free-run */
+ rtw8852bx_ctrl_nbtg_bt_tx(rtwdev, true, RTW89_PHY_0);
+ btc->dm.wl_lna2 = 0;
+ break;
+ case 2: /* for BTG Co-Rx*/
+ rtw8852bx_ctrl_nbtg_bt_tx(rtwdev, false, RTW89_PHY_0);
+ btc->dm.wl_lna2 = 1;
+ break;
+ }
+
+ rtw8852bx_btc_set_wl_lna2(rtwdev, btc->dm.wl_lna2);
+}
+
+static void rtw8852bx_fill_freq_with_ppdu(struct rtw89_dev *rtwdev,
+ struct rtw89_rx_phy_ppdu *phy_ppdu,
+ struct ieee80211_rx_status *status)
+{
+ u16 chan = phy_ppdu->chan_idx;
+ enum nl80211_band band;
+ u8 ch;
+
+ if (chan == 0)
+ return;
+
+ rtw89_decode_chan_idx(rtwdev, chan, &ch, &band);
+ status->freq = ieee80211_channel_to_frequency(ch, band);
+ status->band = band;
+}
+
+static void __rtw8852bx_query_ppdu(struct rtw89_dev *rtwdev,
+ struct rtw89_rx_phy_ppdu *phy_ppdu,
+ struct ieee80211_rx_status *status)
+{
+ u8 path;
+ u8 *rx_power = phy_ppdu->rssi;
+
+ status->signal = RTW89_RSSI_RAW_TO_DBM(max(rx_power[RF_PATH_A], rx_power[RF_PATH_B]));
+ for (path = 0; path < rtwdev->chip->rf_path_num; path++) {
+ status->chains |= BIT(path);
+ status->chain_signal[path] = RTW89_RSSI_RAW_TO_DBM(rx_power[path]);
+ }
+ if (phy_ppdu->valid)
+ rtw8852bx_fill_freq_with_ppdu(rtwdev, phy_ppdu, status);
+}
+
+static int __rtw8852bx_mac_enable_bb_rf(struct rtw89_dev *rtwdev)
+{
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
+ u32 val32;
+ int ret;
+
+ rtw89_write8_set(rtwdev, R_AX_SYS_FUNC_EN,
+ B_AX_FEN_BBRSTB | B_AX_FEN_BB_GLB_RSTN);
+ rtw89_write32_mask(rtwdev, R_AX_SPS_DIG_ON_CTRL0, B_AX_REG_ZCDC_H_MASK, 0x1);
+ rtw89_write32_set(rtwdev, R_AX_WLRF_CTRL, B_AX_AFC_AFEDIG);
+ rtw89_write32_clr(rtwdev, R_AX_WLRF_CTRL, B_AX_AFC_AFEDIG);
+ rtw89_write32_set(rtwdev, R_AX_WLRF_CTRL, B_AX_AFC_AFEDIG);
+
+ if (chip_id == RTL8852BT) {
+ val32 = rtw89_read32(rtwdev, R_AX_AFE_OFF_CTRL1);
+ val32 = u32_replace_bits(val32, 0x1, B_AX_S0_LDO_VSEL_F_MASK);
+ val32 = u32_replace_bits(val32, 0x1, B_AX_S1_LDO_VSEL_F_MASK);
+ rtw89_write32(rtwdev, R_AX_AFE_OFF_CTRL1, val32);
+ }
+
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_WL_RFC_S0, 0xC7,
+ FULL_BIT_MASK);
+ if (ret)
+ return ret;
+
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_WL_RFC_S1, 0xC7,
+ FULL_BIT_MASK);
+ if (ret)
+ return ret;
+
+ rtw89_write8(rtwdev, R_AX_PHYREG_SET, PHYREG_SET_XYN_CYCLE);
+
+ return 0;
+}
+
+static int __rtw8852bx_mac_disable_bb_rf(struct rtw89_dev *rtwdev)
+{
+ u8 wl_rfc_s0;
+ u8 wl_rfc_s1;
+ int ret;
+
+ rtw89_write32_clr(rtwdev, R_AX_WLRF_CTRL, B_AX_AFC_AFEDIG);
+ rtw89_write8_clr(rtwdev, R_AX_SYS_FUNC_EN,
+ B_AX_FEN_BBRSTB | B_AX_FEN_BB_GLB_RSTN);
+
+ ret = rtw89_mac_read_xtal_si(rtwdev, XTAL_SI_WL_RFC_S0, &wl_rfc_s0);
+ if (ret)
+ return ret;
+ wl_rfc_s0 &= ~XTAL_SI_RF00S_EN;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_WL_RFC_S0, wl_rfc_s0,
+ FULL_BIT_MASK);
+ if (ret)
+ return ret;
+
+ ret = rtw89_mac_read_xtal_si(rtwdev, XTAL_SI_WL_RFC_S1, &wl_rfc_s1);
+ if (ret)
+ return ret;
+ wl_rfc_s1 &= ~XTAL_SI_RF10S_EN;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_WL_RFC_S1, wl_rfc_s1,
+ FULL_BIT_MASK);
+ return ret;
+}
+
+const struct rtw8852bx_info rtw8852bx_info = {
+ .mac_enable_bb_rf = __rtw8852bx_mac_enable_bb_rf,
+ .mac_disable_bb_rf = __rtw8852bx_mac_disable_bb_rf,
+ .bb_sethw = __rtw8852bx_bb_sethw,
+ .bb_reset_all = __rtw8852bx_bb_reset_all,
+ .bb_cfg_txrx_path = __rtw8852bx_bb_cfg_txrx_path,
+ .bb_cfg_tx_path = __rtw8852bx_bb_cfg_tx_path,
+ .bb_ctrl_rx_path = __rtw8852bx_bb_ctrl_rx_path,
+ .bb_set_plcp_tx = __rtw8852bx_bb_set_plcp_tx,
+ .bb_set_power = __rtw8852bx_bb_set_power,
+ .bb_set_pmac_pkt_tx = __rtw8852bx_bb_set_pmac_pkt_tx,
+ .bb_backup_tssi = __rtw8852bx_bb_backup_tssi,
+ .bb_restore_tssi = __rtw8852bx_bb_restore_tssi,
+ .bb_tx_mode_switch = __rtw8852bx_bb_tx_mode_switch,
+ .set_channel_mac = __rtw8852bx_set_channel_mac,
+ .set_channel_bb = __rtw8852bx_set_channel_bb,
+ .ctrl_nbtg_bt_tx = __rtw8852bx_ctrl_nbtg_bt_tx,
+ .ctrl_btg_bt_rx = __rtw8852bx_ctrl_btg_bt_rx,
+ .query_ppdu = __rtw8852bx_query_ppdu,
+ .read_efuse = __rtw8852bx_read_efuse,
+ .read_phycap = __rtw8852bx_read_phycap,
+ .power_trim = __rtw8852bx_power_trim,
+ .set_txpwr = __rtw8852bx_set_txpwr,
+ .set_txpwr_ctrl = __rtw8852bx_set_txpwr_ctrl,
+ .init_txpwr_unit = __rtw8852bx_init_txpwr_unit,
+ .set_txpwr_ul_tb_offset = __rtw8852bx_set_txpwr_ul_tb_offset,
+ .get_thermal = __rtw8852bx_get_thermal,
+ .adc_cfg = rtw8852bt_adc_cfg,
+ .btc_init_cfg = __rtw8852bx_btc_init_cfg,
+ .btc_set_wl_pri = __rtw8852bx_btc_set_wl_pri,
+ .btc_get_bt_rssi = __rtw8852bx_btc_get_bt_rssi,
+ .btc_update_bt_cnt = __rtw8852bx_btc_update_bt_cnt,
+ .btc_wl_s1_standby = __rtw8852bx_btc_wl_s1_standby,
+ .btc_set_wl_rx_gain = __rtw8852bx_btc_set_wl_rx_gain,
+};
+EXPORT_SYMBOL(rtw8852bx_info);
+
+MODULE_AUTHOR("Realtek Corporation");
+MODULE_DESCRIPTION("Realtek 802.11ax wireless 8852B common routines");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b_common.h b/drivers/net/wireless/realtek/rtw89/rtw8852b_common.h
new file mode 100644
index 000000000000..801e7ab9f4fa
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852b_common.h
@@ -0,0 +1,388 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2024 Realtek Corporation
+ */
+
+#ifndef __RTW89_8852BX_H__
+#define __RTW89_8852BX_H__
+
+#include "core.h"
+
+#define RF_PATH_NUM_8852BX 2
+#define BB_PATH_NUM_8852BX 2
+
+enum rtw8852bx_pmac_mode {
+ NONE_TEST,
+ PKTS_TX,
+ PKTS_RX,
+ CONT_TX
+};
+
+struct rtw8852bx_u_efuse {
+ u8 rsvd[0x88];
+ u8 mac_addr[ETH_ALEN];
+};
+
+struct rtw8852bx_e_efuse {
+ u8 mac_addr[ETH_ALEN];
+};
+
+struct rtw8852bx_tssi_offset {
+ u8 cck_tssi[TSSI_CCK_CH_GROUP_NUM];
+ u8 bw40_tssi[TSSI_MCS_2G_CH_GROUP_NUM];
+ u8 rsvd[7];
+ u8 bw40_1s_tssi_5g[TSSI_MCS_5G_CH_GROUP_NUM];
+} __packed;
+
+struct rtw8852bx_efuse {
+ u8 rsvd[0x210];
+ struct rtw8852bx_tssi_offset path_a_tssi;
+ u8 rsvd1[10];
+ struct rtw8852bx_tssi_offset path_b_tssi;
+ u8 rsvd2[94];
+ u8 channel_plan;
+ u8 xtal_k;
+ u8 rsvd3;
+ u8 iqk_lck;
+ u8 rsvd4[5];
+ u8 reg_setting:2;
+ u8 tx_diversity:1;
+ u8 rx_diversity:2;
+ u8 ac_mode:1;
+ u8 module_type:2;
+ u8 rsvd5;
+ u8 shared_ant:1;
+ u8 coex_type:3;
+ u8 ant_iso:1;
+ u8 radio_on_off:1;
+ u8 rsvd6:2;
+ u8 eeprom_version;
+ u8 customer_id;
+ u8 tx_bb_swing_2g;
+ u8 tx_bb_swing_5g;
+ u8 tx_cali_pwr_trk_mode;
+ u8 trx_path_selection;
+ u8 rfe_type;
+ u8 country_code[2];
+ u8 rsvd7[3];
+ u8 path_a_therm;
+ u8 path_b_therm;
+ u8 rsvd8[2];
+ u8 rx_gain_2g_ofdm;
+ u8 rsvd9;
+ u8 rx_gain_2g_cck;
+ u8 rsvd10;
+ u8 rx_gain_5g_low;
+ u8 rsvd11;
+ u8 rx_gain_5g_mid;
+ u8 rsvd12;
+ u8 rx_gain_5g_high;
+ u8 rsvd13[35];
+ u8 path_a_cck_pwr_idx[6];
+ u8 path_a_bw40_1tx_pwr_idx[5];
+ u8 path_a_ofdm_1tx_pwr_idx_diff:4;
+ u8 path_a_bw20_1tx_pwr_idx_diff:4;
+ u8 path_a_bw20_2tx_pwr_idx_diff:4;
+ u8 path_a_bw40_2tx_pwr_idx_diff:4;
+ u8 path_a_cck_2tx_pwr_idx_diff:4;
+ u8 path_a_ofdm_2tx_pwr_idx_diff:4;
+ u8 rsvd14[0xf2];
+ union {
+ struct rtw8852bx_u_efuse u;
+ struct rtw8852bx_e_efuse e;
+ };
+} __packed;
+
+struct rtw8852bx_bb_pmac_info {
+ u8 en_pmac_tx:1;
+ u8 is_cck:1;
+ u8 mode:3;
+ u8 rsvd:3;
+ u16 tx_cnt;
+ u16 period;
+ u16 tx_time;
+ u8 duty_cycle;
+};
+
+struct rtw8852bx_bb_tssi_bak {
+ u8 tx_path;
+ u8 rx_path;
+ u32 p0_rfmode;
+ u32 p0_rfmode_ftm;
+ u32 p1_rfmode;
+ u32 p1_rfmode_ftm;
+ s16 tx_pwr; /* S9 */
+};
+
+struct rtw8852bx_info {
+ int (*mac_enable_bb_rf)(struct rtw89_dev *rtwdev);
+ int (*mac_disable_bb_rf)(struct rtw89_dev *rtwdev);
+ void (*bb_sethw)(struct rtw89_dev *rtwdev);
+ void (*bb_reset_all)(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
+ void (*bb_cfg_txrx_path)(struct rtw89_dev *rtwdev);
+ void (*bb_cfg_tx_path)(struct rtw89_dev *rtwdev, u8 tx_path);
+ void (*bb_ctrl_rx_path)(struct rtw89_dev *rtwdev,
+ enum rtw89_rf_path_bit rx_path);
+ void (*bb_set_plcp_tx)(struct rtw89_dev *rtwdev);
+ void (*bb_set_power)(struct rtw89_dev *rtwdev, s16 pwr_dbm,
+ enum rtw89_phy_idx idx);
+ void (*bb_set_pmac_pkt_tx)(struct rtw89_dev *rtwdev, u8 enable,
+ u16 tx_cnt, u16 period, u16 tx_time,
+ enum rtw89_phy_idx idx);
+ void (*bb_backup_tssi)(struct rtw89_dev *rtwdev, enum rtw89_phy_idx idx,
+ struct rtw8852bx_bb_tssi_bak *bak);
+ void (*bb_restore_tssi)(struct rtw89_dev *rtwdev, enum rtw89_phy_idx idx,
+ const struct rtw8852bx_bb_tssi_bak *bak);
+ void (*bb_tx_mode_switch)(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx idx, u8 mode);
+ void (*set_channel_mac)(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan, u8 mac_idx);
+ void (*set_channel_bb)(struct rtw89_dev *rtwdev, const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx);
+ void (*ctrl_nbtg_bt_tx)(struct rtw89_dev *rtwdev, bool en,
+ enum rtw89_phy_idx phy_idx);
+ void (*ctrl_btg_bt_rx)(struct rtw89_dev *rtwdev, bool en,
+ enum rtw89_phy_idx phy_idx);
+ void (*query_ppdu)(struct rtw89_dev *rtwdev,
+ struct rtw89_rx_phy_ppdu *phy_ppdu,
+ struct ieee80211_rx_status *status);
+ int (*read_efuse)(struct rtw89_dev *rtwdev, u8 *log_map,
+ enum rtw89_efuse_block block);
+ int (*read_phycap)(struct rtw89_dev *rtwdev, u8 *phycap_map);
+ void (*power_trim)(struct rtw89_dev *rtwdev);
+ void (*set_txpwr)(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx);
+ void (*set_txpwr_ctrl)(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx);
+ int (*init_txpwr_unit)(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
+ void (*set_txpwr_ul_tb_offset)(struct rtw89_dev *rtwdev,
+ s8 pw_ofst, enum rtw89_mac_idx mac_idx);
+ u8 (*get_thermal)(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path);
+ void (*adc_cfg)(struct rtw89_dev *rtwdev, u8 bw, u8 path);
+ void (*btc_init_cfg)(struct rtw89_dev *rtwdev);
+ void (*btc_set_wl_pri)(struct rtw89_dev *rtwdev, u8 map, bool state);
+ s8 (*btc_get_bt_rssi)(struct rtw89_dev *rtwdev, s8 val);
+ void (*btc_update_bt_cnt)(struct rtw89_dev *rtwdev);
+ void (*btc_wl_s1_standby)(struct rtw89_dev *rtwdev, bool state);
+ void (*btc_set_wl_rx_gain)(struct rtw89_dev *rtwdev, u32 level);
+};
+
+extern const struct rtw8852bx_info rtw8852bx_info;
+
+static inline
+int rtw8852bx_mac_enable_bb_rf(struct rtw89_dev *rtwdev)
+{
+ return rtw8852bx_info.mac_enable_bb_rf(rtwdev);
+}
+
+static inline
+int rtw8852bx_mac_disable_bb_rf(struct rtw89_dev *rtwdev)
+{
+ return rtw8852bx_info.mac_disable_bb_rf(rtwdev);
+}
+
+static inline
+void rtw8852bx_bb_sethw(struct rtw89_dev *rtwdev)
+{
+ rtw8852bx_info.bb_sethw(rtwdev);
+}
+
+static inline
+void rtw8852bx_bb_reset_all(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+{
+ rtw8852bx_info.bb_reset_all(rtwdev, phy_idx);
+}
+
+static inline
+void rtw8852bx_bb_cfg_txrx_path(struct rtw89_dev *rtwdev)
+{
+ rtw8852bx_info.bb_cfg_txrx_path(rtwdev);
+}
+
+static inline
+void rtw8852bx_bb_cfg_tx_path(struct rtw89_dev *rtwdev, u8 tx_path)
+{
+ rtw8852bx_info.bb_cfg_tx_path(rtwdev, tx_path);
+}
+
+static inline
+void rtw8852bx_bb_ctrl_rx_path(struct rtw89_dev *rtwdev,
+ enum rtw89_rf_path_bit rx_path)
+{
+ rtw8852bx_info.bb_ctrl_rx_path(rtwdev, rx_path);
+}
+
+static inline
+void rtw8852bx_bb_set_plcp_tx(struct rtw89_dev *rtwdev)
+{
+ rtw8852bx_info.bb_set_plcp_tx(rtwdev);
+}
+
+static inline
+void rtw8852bx_bb_set_power(struct rtw89_dev *rtwdev, s16 pwr_dbm,
+ enum rtw89_phy_idx idx)
+{
+ rtw8852bx_info.bb_set_power(rtwdev, pwr_dbm, idx);
+}
+
+static inline
+void rtw8852bx_bb_set_pmac_pkt_tx(struct rtw89_dev *rtwdev, u8 enable,
+ u16 tx_cnt, u16 period, u16 tx_time,
+ enum rtw89_phy_idx idx)
+{
+ rtw8852bx_info.bb_set_pmac_pkt_tx(rtwdev, enable, tx_cnt, period, tx_time, idx);
+}
+
+static inline
+void rtw8852bx_bb_backup_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx idx,
+ struct rtw8852bx_bb_tssi_bak *bak)
+{
+ rtw8852bx_info.bb_backup_tssi(rtwdev, idx, bak);
+}
+
+static inline
+void rtw8852bx_bb_restore_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx idx,
+ const struct rtw8852bx_bb_tssi_bak *bak)
+{
+ rtw8852bx_info.bb_restore_tssi(rtwdev, idx, bak);
+}
+
+static inline
+void rtw8852bx_bb_tx_mode_switch(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx idx, u8 mode)
+{
+ rtw8852bx_info.bb_tx_mode_switch(rtwdev, idx, mode);
+}
+
+static inline
+void rtw8852bx_set_channel_mac(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan, u8 mac_idx)
+{
+ rtw8852bx_info.set_channel_mac(rtwdev, chan, mac_idx);
+}
+
+static inline
+void rtw8852bx_set_channel_bb(struct rtw89_dev *rtwdev, const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx)
+{
+ rtw8852bx_info.set_channel_bb(rtwdev, chan, phy_idx);
+}
+
+static inline
+void rtw8852bx_ctrl_nbtg_bt_tx(struct rtw89_dev *rtwdev, bool en,
+ enum rtw89_phy_idx phy_idx)
+{
+ rtw8852bx_info.ctrl_nbtg_bt_tx(rtwdev, en, phy_idx);
+}
+
+static inline
+void rtw8852bx_ctrl_btg_bt_rx(struct rtw89_dev *rtwdev, bool en,
+ enum rtw89_phy_idx phy_idx)
+{
+ rtw8852bx_info.ctrl_btg_bt_rx(rtwdev, en, phy_idx);
+}
+
+static inline
+void rtw8852bx_query_ppdu(struct rtw89_dev *rtwdev,
+ struct rtw89_rx_phy_ppdu *phy_ppdu,
+ struct ieee80211_rx_status *status)
+{
+ rtw8852bx_info.query_ppdu(rtwdev, phy_ppdu, status);
+}
+
+static inline
+int rtw8852bx_read_efuse(struct rtw89_dev *rtwdev, u8 *log_map,
+ enum rtw89_efuse_block block)
+{
+ return rtw8852bx_info.read_efuse(rtwdev, log_map, block);
+}
+
+static inline
+int rtw8852bx_read_phycap(struct rtw89_dev *rtwdev, u8 *phycap_map)
+{
+ return rtw8852bx_info.read_phycap(rtwdev, phycap_map);
+}
+
+static inline
+void rtw8852bx_power_trim(struct rtw89_dev *rtwdev)
+{
+ rtw8852bx_info.power_trim(rtwdev);
+}
+
+static inline
+void rtw8852bx_set_txpwr(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx)
+{
+ rtw8852bx_info.set_txpwr(rtwdev, chan, phy_idx);
+}
+
+static inline
+void rtw8852bx_set_txpwr_ctrl(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx)
+{
+ rtw8852bx_info.set_txpwr_ctrl(rtwdev, phy_idx);
+}
+
+static inline
+int rtw8852bx_init_txpwr_unit(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+{
+ return rtw8852bx_info.init_txpwr_unit(rtwdev, phy_idx);
+}
+
+static inline
+void rtw8852bx_set_txpwr_ul_tb_offset(struct rtw89_dev *rtwdev,
+ s8 pw_ofst, enum rtw89_mac_idx mac_idx)
+{
+ rtw8852bx_info.set_txpwr_ul_tb_offset(rtwdev, pw_ofst, mac_idx);
+}
+
+static inline
+u8 rtw8852bx_get_thermal(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path)
+{
+ return rtw8852bx_info.get_thermal(rtwdev, rf_path);
+}
+
+static inline
+void rtw8852bx_adc_cfg(struct rtw89_dev *rtwdev, u8 bw, u8 path)
+{
+ rtw8852bx_info.adc_cfg(rtwdev, bw, path);
+}
+
+static inline
+void rtw8852bx_btc_init_cfg(struct rtw89_dev *rtwdev)
+{
+ rtw8852bx_info.btc_init_cfg(rtwdev);
+}
+
+static inline
+void rtw8852bx_btc_set_wl_pri(struct rtw89_dev *rtwdev, u8 map, bool state)
+{
+ rtw8852bx_info.btc_set_wl_pri(rtwdev, map, state);
+}
+
+static inline
+s8 rtw8852bx_btc_get_bt_rssi(struct rtw89_dev *rtwdev, s8 val)
+{
+ return rtw8852bx_info.btc_get_bt_rssi(rtwdev, val);
+}
+
+static inline
+void rtw8852bx_btc_update_bt_cnt(struct rtw89_dev *rtwdev)
+{
+ rtw8852bx_info.btc_update_bt_cnt(rtwdev);
+}
+
+static inline
+void rtw8852bx_btc_wl_s1_standby(struct rtw89_dev *rtwdev, bool state)
+{
+ rtw8852bx_info.btc_wl_s1_standby(rtwdev, state);
+}
+
+static inline
+void rtw8852bx_btc_set_wl_rx_gain(struct rtw89_dev *rtwdev, u32 level)
+{
+ rtw8852bx_info.btc_set_wl_rx_gain(rtwdev, level);
+}
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c
index 259df67836a0..12354612441c 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c
@@ -8,6 +8,7 @@
#include "phy.h"
#include "reg.h"
#include "rtw8852b.h"
+#include "rtw8852b_common.h"
#include "rtw8852b_rfk.h"
#include "rtw8852b_rfk_table.h"
#include "rtw8852b_table.h"
@@ -20,7 +21,7 @@
#define RTW8852B_RF_REL_VERSION 34
#define RTW8852B_DPK_VER 0x0d
#define RTW8852B_DPK_RF_PATH 2
-#define RTW8852B_DPK_KIP_REG_NUM 2
+#define RTW8852B_DPK_KIP_REG_NUM 3
#define _TSSI_DE_MASK GENMASK(21, 12)
#define ADDC_T_AVG 100
@@ -3433,13 +3434,13 @@ static void _tssi_hw_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
rx_path = RF_ABCD; /* don't change path, but still set others */
if (enable) {
- rtw8852b_bb_set_plcp_tx(rtwdev);
- rtw8852b_bb_cfg_tx_path(rtwdev, path);
- rtw8852b_bb_ctrl_rx_path(rtwdev, rx_path);
- rtw8852b_bb_set_power(rtwdev, pwr_dbm, phy);
+ rtw8852bx_bb_set_plcp_tx(rtwdev);
+ rtw8852bx_bb_cfg_tx_path(rtwdev, path);
+ rtw8852bx_bb_ctrl_rx_path(rtwdev, rx_path);
+ rtw8852bx_bb_set_power(rtwdev, pwr_dbm, phy);
}
- rtw8852b_bb_set_pmac_pkt_tx(rtwdev, enable, cnt, period, 20, phy);
+ rtw8852bx_bb_set_pmac_pkt_tx(rtwdev, enable, cnt, period, 20, phy);
}
static void _tssi_backup_bb_registers(struct rtw89_dev *rtwdev,
@@ -3578,7 +3579,7 @@ static void _tssi_alimentk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
u32 tssi_cw_rpt[RTW8852B_TSSI_PATH_NR] = {0};
u8 channel = chan->channel;
u8 ch_idx = _tssi_ch_to_idx(rtwdev, channel);
- struct rtw8852b_bb_tssi_bak tssi_bak;
+ struct rtw8852bx_bb_tssi_bak tssi_bak;
s32 aliment_diff, tssi_cw_default;
u32 start_time, finish_time;
u32 bb_reg_backup[8] = {0};
@@ -3626,7 +3627,7 @@ static void _tssi_alimentk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
else
band = TSSI_ALIMK_2G;
- rtw8852b_bb_backup_tssi(rtwdev, phy, &tssi_bak);
+ rtw8852bx_bb_backup_tssi(rtwdev, phy, &tssi_bak);
_tssi_backup_bb_registers(rtwdev, phy, bb_reg, bb_reg_backup, ARRAY_SIZE(bb_reg_backup));
rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_AVG, B_P0_TSSI_AVG, 0x8);
@@ -3730,8 +3731,8 @@ static void _tssi_alimentk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
out:
_tssi_reload_bb_registers(rtwdev, phy, bb_reg, bb_reg_backup, ARRAY_SIZE(bb_reg_backup));
- rtw8852b_bb_restore_tssi(rtwdev, phy, &tssi_bak);
- rtw8852b_bb_tx_mode_switch(rtwdev, phy, 0);
+ rtw8852bx_bb_restore_tssi(rtwdev, phy, &tssi_bak);
+ rtw8852bx_bb_tx_mode_switch(rtwdev, phy, 0);
finish_time = ktime_get_ns();
tssi_info->tssi_alimk_time += finish_time - start_time;
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852be.c b/drivers/net/wireless/realtek/rtw89/rtw8852be.c
index 5f941122655c..d8f9d92ca0fb 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852be.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852be.c
@@ -46,6 +46,7 @@ static const struct rtw89_pci_info rtw8852b_pci_info = {
.rpwm_addr = R_AX_PCIE_HRPWM,
.cpwm_addr = R_AX_CPWM,
.mit_addr = R_AX_INT_MIT_RX,
+ .wp_sel_addr = 0,
.tx_dma_ch_mask = BIT(RTW89_TXCH_ACH4) | BIT(RTW89_TXCH_ACH5) |
BIT(RTW89_TXCH_ACH6) | BIT(RTW89_TXCH_ACH7) |
BIT(RTW89_TXCH_CH10) | BIT(RTW89_TXCH_CH11),
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852bt.h b/drivers/net/wireless/realtek/rtw89/rtw8852bt.h
new file mode 100644
index 000000000000..6177f36ad667
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852bt.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2024 Realtek Corporation
+ */
+
+#ifndef __RTW89_8852BT_H__
+#define __RTW89_8852BT_H__
+
+#include "core.h"
+
+#define RF_PATH_NUM_8852BT 2
+#define BB_PATH_NUM_8852BT 2
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.c
new file mode 100644
index 000000000000..fa0e49d58112
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.c
@@ -0,0 +1,4019 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2024 Realtek Corporation
+ */
+
+#include "coex.h"
+#include "debug.h"
+#include "fw.h"
+#include "mac.h"
+#include "phy.h"
+#include "reg.h"
+#include "rtw8852bt.h"
+#include "rtw8852bt_rfk.h"
+#include "rtw8852bt_rfk_table.h"
+#include "rtw8852b_common.h"
+
+#define RTW8852BT_RXDCK_VER 0x1
+#define RTW8852BT_IQK_VER 0x2a
+#define RTW8852BT_SS 2
+#define RTW8852BT_TSSI_PATH_NR 2
+#define RTW8852BT_DPK_VER 0x06
+#define DPK_RF_PATH_MAX_8852BT 2
+
+#define _TSSI_DE_MASK GENMASK(21, 12)
+#define DPK_TXAGC_LOWER 0x2e
+#define DPK_TXAGC_UPPER 0x3f
+#define DPK_TXAGC_INVAL 0xff
+#define RFREG_MASKRXBB 0x003e0
+#define RFREG_MASKMODE 0xf0000
+
+enum rf_mode {
+ RF_SHUT_DOWN = 0x0,
+ RF_STANDBY = 0x1,
+ RF_TX = 0x2,
+ RF_RX = 0x3,
+ RF_TXIQK = 0x4,
+ RF_DPK = 0x5,
+ RF_RXK1 = 0x6,
+ RF_RXK2 = 0x7,
+};
+
+enum rtw8852bt_dpk_id {
+ LBK_RXIQK = 0x06,
+ SYNC = 0x10,
+ MDPK_IDL = 0x11,
+ MDPK_MPA = 0x12,
+ GAIN_LOSS = 0x13,
+ GAIN_CAL = 0x14,
+ DPK_RXAGC = 0x15,
+ KIP_PRESET = 0x16,
+ KIP_RESTORE = 0x17,
+ DPK_TXAGC = 0x19,
+ D_KIP_PRESET = 0x28,
+ D_TXAGC = 0x29,
+ D_RXAGC = 0x2a,
+ D_SYNC = 0x2b,
+ D_GAIN_LOSS = 0x2c,
+ D_MDPK_IDL = 0x2d,
+ D_GAIN_NORM = 0x2f,
+ D_KIP_THERMAL = 0x30,
+ D_KIP_RESTORE = 0x31
+};
+
+enum dpk_agc_step {
+ DPK_AGC_STEP_SYNC_DGAIN,
+ DPK_AGC_STEP_GAIN_ADJ,
+ DPK_AGC_STEP_GAIN_LOSS_IDX,
+ DPK_AGC_STEP_GL_GT_CRITERION,
+ DPK_AGC_STEP_GL_LT_CRITERION,
+ DPK_AGC_STEP_SET_TX_GAIN,
+};
+
+enum rtw8852bt_iqk_type {
+ ID_TXAGC = 0x0,
+ ID_FLOK_COARSE = 0x1,
+ ID_FLOK_FINE = 0x2,
+ ID_TXK = 0x3,
+ ID_RXAGC = 0x4,
+ ID_RXK = 0x5,
+ ID_NBTXK = 0x6,
+ ID_NBRXK = 0x7,
+ ID_FLOK_VBUFFER = 0x8,
+ ID_A_FLOK_COARSE = 0x9,
+ ID_G_FLOK_COARSE = 0xa,
+ ID_A_FLOK_FINE = 0xb,
+ ID_G_FLOK_FINE = 0xc,
+ ID_IQK_RESTORE = 0x10,
+};
+
+enum adc_ck {
+ ADC_NA = 0,
+ ADC_480M = 1,
+ ADC_960M = 2,
+ ADC_1920M = 3,
+};
+
+enum dac_ck {
+ DAC_40M = 0,
+ DAC_80M = 1,
+ DAC_120M = 2,
+ DAC_160M = 3,
+ DAC_240M = 4,
+ DAC_320M = 5,
+ DAC_480M = 6,
+ DAC_960M = 7,
+};
+
+static const u32 _tssi_trigger[RTW8852BT_TSSI_PATH_NR] = {0x5820, 0x7820};
+static const u32 _tssi_cw_rpt_addr[RTW8852BT_TSSI_PATH_NR] = {0x1c18, 0x3c18};
+static const u32 _tssi_cw_default_addr[RTW8852BT_TSSI_PATH_NR][4] = {
+ {0x5634, 0x5630, 0x5630, 0x5630},
+ {0x7634, 0x7630, 0x7630, 0x7630} };
+static const u32 _tssi_cw_default_mask[4] = {
+ 0x000003ff, 0x3ff00000, 0x000ffc00, 0x000003ff};
+static const u32 _tssi_de_cck_long[RF_PATH_NUM_8852BT] = {0x5858, 0x7858};
+static const u32 _tssi_de_cck_short[RF_PATH_NUM_8852BT] = {0x5860, 0x7860};
+static const u32 _tssi_de_mcs_20m[RF_PATH_NUM_8852BT] = {0x5838, 0x7838};
+static const u32 _tssi_de_mcs_40m[RF_PATH_NUM_8852BT] = {0x5840, 0x7840};
+static const u32 _tssi_de_mcs_80m[RF_PATH_NUM_8852BT] = {0x5848, 0x7848};
+static const u32 _tssi_de_mcs_80m_80m[RF_PATH_NUM_8852BT] = {0x5850, 0x7850};
+static const u32 _tssi_de_mcs_5m[RF_PATH_NUM_8852BT] = {0x5828, 0x7828};
+static const u32 _tssi_de_mcs_10m[RF_PATH_NUM_8852BT] = {0x5830, 0x7830};
+
+static const u32 rtw8852bt_backup_bb_regs[] = {0x2344, 0x5800, 0x7800, 0x0704};
+static const u32 rtw8852bt_backup_rf_regs[] = {
+ 0xde, 0xdf, 0x8b, 0x90, 0x97, 0x85, 0x5, 0x10005};
+static const u32 rtw8852bt_backup_kip_regs[] = {
+ 0x813c, 0x8124, 0x8120, 0xc0d4, 0xc0d8, 0xc0c4, 0xc0ec,
+ 0x823c, 0x8224, 0x8220, 0xc1d4, 0xc1d8, 0xc1c4, 0xc1ec};
+
+#define BACKUP_BB_REGS_NR ARRAY_SIZE(rtw8852bt_backup_bb_regs)
+#define BACKUP_RF_REGS_NR ARRAY_SIZE(rtw8852bt_backup_rf_regs)
+#define BACKUP_KIP_REGS_NR ARRAY_SIZE(rtw8852bt_backup_kip_regs)
+
+static void _rfk_get_thermal(struct rtw89_dev *rtwdev, u8 kidx, enum rtw89_rf_path path)
+{
+ struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+
+ rtw89_write_rf(rtwdev, path, RR_TM, RR_TM_TRI, 0x1);
+ rtw89_write_rf(rtwdev, path, RR_TM, RR_TM_TRI, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_TM, RR_TM_TRI, 0x1);
+
+ udelay(200);
+
+ dpk->bp[path][kidx].ther_dpk = rtw89_read_rf(rtwdev, path, RR_TM, RR_TM_VAL);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] thermal@DPK = 0x%x\n",
+ dpk->bp[path][kidx].ther_dpk);
+}
+
+static void _rfk_backup_bb_reg(struct rtw89_dev *rtwdev, u32 backup_bb_reg_val[])
+{
+ u32 i;
+
+ for (i = 0; i < BACKUP_BB_REGS_NR; i++) {
+ backup_bb_reg_val[i] =
+ rtw89_phy_read32_mask(rtwdev, rtw8852bt_backup_bb_regs[i], MASKDWORD);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[RFK]backup bb reg : %x, value =%x\n",
+ rtw8852bt_backup_bb_regs[i], backup_bb_reg_val[i]);
+ }
+}
+
+static void _rfk_backup_kip_reg(struct rtw89_dev *rtwdev, u32 backup_kip_reg_val[])
+{
+ u32 i;
+
+ for (i = 0; i < BACKUP_KIP_REGS_NR; i++) {
+ backup_kip_reg_val[i] =
+ rtw89_phy_read32_mask(rtwdev, rtw8852bt_backup_kip_regs[i],
+ MASKDWORD);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Backup 0x%x = %x\n",
+ rtw8852bt_backup_kip_regs[i], backup_kip_reg_val[i]);
+ }
+}
+
+static
+void _rfk_backup_rf_reg(struct rtw89_dev *rtwdev, u32 backup_rf_reg_val[], u8 rf_path)
+{
+ u32 i;
+
+ for (i = 0; i < BACKUP_RF_REGS_NR; i++) {
+ backup_rf_reg_val[i] =
+ rtw89_read_rf(rtwdev, rf_path, rtw8852bt_backup_rf_regs[i],
+ RFREG_MASK);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Backup RF S%d 0x%x = %x\n",
+ rf_path, rtw8852bt_backup_rf_regs[i], backup_rf_reg_val[i]);
+ }
+}
+
+static void _rfk_reload_bb_reg(struct rtw89_dev *rtwdev, const u32 backup_bb_reg_val[])
+{
+ u32 i;
+
+ for (i = 0; i < BACKUP_BB_REGS_NR; i++) {
+ rtw89_phy_write32_mask(rtwdev, rtw8852bt_backup_bb_regs[i],
+ MASKDWORD, backup_bb_reg_val[i]);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[RFK]restore bb reg : %x, value =%x\n",
+ rtw8852bt_backup_bb_regs[i], backup_bb_reg_val[i]);
+ }
+}
+
+static void _rfk_reload_kip_reg(struct rtw89_dev *rtwdev, u32 backup_kip_reg_val[])
+{
+ u32 i;
+
+ for (i = 0; i < BACKUP_KIP_REGS_NR; i++) {
+ rtw89_phy_write32_mask(rtwdev, rtw8852bt_backup_kip_regs[i],
+ MASKDWORD, backup_kip_reg_val[i]);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[RFK]restore kip reg : %x, value =%x\n",
+ rtw8852bt_backup_kip_regs[i], backup_kip_reg_val[i]);
+ }
+}
+
+static void _rfk_reload_rf_reg(struct rtw89_dev *rtwdev,
+ const u32 backup_rf_reg_val[], u8 rf_path)
+{
+ u32 i;
+
+ for (i = 0; i < BACKUP_RF_REGS_NR; i++) {
+ rtw89_write_rf(rtwdev, rf_path, rtw8852bt_backup_rf_regs[i],
+ RFREG_MASK, backup_rf_reg_val[i]);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[RFK]restore rf S%d reg: %x, value =%x\n", rf_path,
+ rtw8852bt_backup_rf_regs[i], backup_rf_reg_val[i]);
+ }
+}
+
+static u8 _kpath(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+{
+ u8 val;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]dbcc_en: %x,PHY%d\n",
+ rtwdev->dbcc_en, phy_idx);
+
+ if (!rtwdev->dbcc_en) {
+ val = RF_AB;
+ } else {
+ if (phy_idx == RTW89_PHY_0)
+ val = RF_A;
+ else
+ val = RF_B;
+ }
+ return val;
+}
+
+static
+void _txck_force(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, bool force,
+ enum dac_ck ck)
+{
+ rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_TXCK_ON, 0x0);
+
+ if (!force)
+ return;
+
+ rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_TXCK_VAL, ck);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_TXCK_ON, 0x1);
+}
+
+static
+void _rxck_force(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, bool force,
+ enum adc_ck ck)
+{
+ u32 bw = 0;
+
+ rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_RXCK_ON, 0x0);
+
+ if (!force)
+ return;
+
+ rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_RXCK_VAL, ck);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_RXCK_ON, 0x1);
+
+ switch (ck) {
+ case ADC_480M:
+ bw = RTW89_CHANNEL_WIDTH_40;
+ break;
+ case ADC_960M:
+ bw = RTW89_CHANNEL_WIDTH_80;
+ break;
+ case ADC_1920M:
+ bw = RTW89_CHANNEL_WIDTH_160;
+ break;
+ default:
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "%s==>Invalid ck", __func__);
+ break;
+ }
+
+ rtw8852bx_adc_cfg(rtwdev, bw, path);
+}
+
+static void _rfk_bb_afe_setting(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path, u8 kpath)
+{
+ rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, MASKHWORD, 0x0303);
+ rtw89_phy_write32_mask(rtwdev, R_P0_ADCFF_EN, B_P0_ADCFF_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P1_ADCFF_EN, B_P1_ADCFF_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_CLKG_FORCE, 0x3);
+ rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_CLKG_FORCE, 0x3);
+ rtw89_phy_write32_mask(rtwdev, R_TXCKEN_FORCE, B_TXCKEN_FORCE_ALL, 0x1ffffff);
+ rtw89_phy_write32_mask(rtwdev, R_FAHM, B_RXTD_CKEN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_UPD_GEN_ON, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_TX_COLLISION_T2R_ST, B_TXRX_FORCE_VAL, 0x3ff);
+ rtw89_phy_write32_mask(rtwdev, R_IOQ_IQK_DPK, B_IOQ_IQK_DPK_CLKEN, 0x3);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_DPK_RST, B_IQK_DPK_RST, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P0_PATH_RST, B_P0_PATH_RST, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P1_PATH_RST, B_P1_PATH_RST, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_PD_CTRL, B_PD_HIT_DIS, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_RXCCA, B_RXCCA_DIS, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_DCFO_WEIGHT, B_DAC_CLK_IDX, 0x1);
+
+ _txck_force(rtwdev, RF_PATH_A, true, DAC_960M);
+ _txck_force(rtwdev, RF_PATH_B, true, DAC_960M);
+ _rxck_force(rtwdev, RF_PATH_A, true, ADC_1920M);
+ _rxck_force(rtwdev, RF_PATH_B, true, ADC_1920M);
+
+ rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC,
+ B_UPD_CLK_ADC_VAL | B_UPD_CLK_ADC_ON, 0x5);
+ rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, MASKBYTE3, 0x1f);
+ udelay(1);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, MASKBYTE3, 0x13);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR, MASKHWORD, 0x0001);
+ udelay(1);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR, MASKHWORD, 0x0041);
+ rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_RSTB, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, MASKHWORD, 0x3333);
+
+ rtw89_phy_write32_mask(rtwdev, R_TXPWRB_H, B_TXPWRB_RDY, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_DPD_OFT_EN, MASKLWORD, 0x0000);
+ rtw89_phy_write32_mask(rtwdev, R_P1_TXPW_FORCE, B_P1_TXPW_RDY, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P1_TXAGC_TH, MASKLWORD, 0x0000);
+}
+
+static void _rfk_bb_afe_restore(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path, u8 kpath)
+{
+ rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, MASKHWORD, 0x0303);
+ rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_CLKG_FORCE, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_CLKG_FORCE, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_TXCKEN_FORCE, B_TXCKEN_FORCE_ALL, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_FAHM, B_RXTD_CKEN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_UPD_GEN_ON, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_TX_COLLISION_T2R_ST, B_TXRX_FORCE_VAL, 0x63);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RXCK, B_P0_TXCK_ALL, 0x00);
+ rtw89_phy_write32_mask(rtwdev, R_P1_RXCK, B_P1_TXCK_ALL, 0x00);
+ rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC,
+ B_UPD_CLK_ADC_VAL | B_UPD_CLK_ADC_ON, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, MASKHWORD, 0x0000);
+ rtw89_phy_write32_mask(rtwdev, R_P0_ADCFF_EN, B_P0_ADCFF_EN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_P1_ADCFF_EN, B_P1_ADCFF_EN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_PD_CTRL, B_PD_HIT_DIS, 0x0);
+
+ rtw89_phy_write32_mask(rtwdev, R_TXPWRB_H, B_TXPWRB_RDY, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG, B_P0_TXPW_RSTB, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG, B_P0_TXPW_RSTB, 0x2);
+ rtw89_phy_write32_mask(rtwdev, R_P1_TXPW_FORCE, B_P1_TXPW_RDY, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG, B_P1_TXPW_RSTB, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG, B_P1_TXPW_RSTB, 0x2);
+}
+
+static void _set_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ rtw89_write_rf(rtwdev, path, RR_DCK1, RR_DCK1_CLR, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x1);
+ mdelay(1);
+}
+
+static void _rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+{
+ u8 path, dck_tune;
+ u32 rf_reg5;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[RX_DCK] ****** RXDCK Start (Ver: 0x%x, CV : 0x%x) ******\n",
+ RTW8852BT_RXDCK_VER, rtwdev->hal.cv);
+
+ for (path = 0; path < RF_PATH_NUM_8852BT; path++) {
+ rf_reg5 = rtw89_read_rf(rtwdev, path, RR_RSV1, RFREG_MASK);
+ dck_tune = rtw89_read_rf(rtwdev, path, RR_DCK, RR_DCK_FINE);
+
+ if (rtwdev->is_tssi_mode[path])
+ rtw89_phy_write32_mask(rtwdev,
+ R_P0_TSSI_TRK + (path << 13),
+ B_P0_TSSI_TRK_EN, 0x1);
+
+ rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_FINE, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX);
+ _set_rx_dck(rtwdev, phy, path);
+ rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_FINE, dck_tune);
+ rtw89_write_rf(rtwdev, path, RR_RSV1, RFREG_MASK, rf_reg5);
+
+ if (rtwdev->is_tssi_mode[path])
+ rtw89_phy_write32_mask(rtwdev,
+ R_P0_TSSI_TRK + (path << 13),
+ B_P0_TSSI_TRK_EN, 0x0);
+ }
+}
+
+static void _rck(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
+{
+ u32 rf_reg5;
+ u32 rck_val;
+ u32 val;
+ int ret;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] ====== S%d RCK ======\n", path);
+
+ rf_reg5 = rtw89_read_rf(rtwdev, path, RR_RSV1, RFREG_MASK);
+
+ rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] RF0x00 = 0x%05x\n",
+ rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK));
+
+ /* RCK trigger */
+ rtw89_write_rf(rtwdev, path, RR_RCKC, RFREG_MASK, 0x00240);
+
+ ret = read_poll_timeout_atomic(rtw89_read_rf, val, val, 2, 30,
+ false, rtwdev, path, RR_RCKS, BIT(3));
+
+ rck_val = rtw89_read_rf(rtwdev, path, RR_RCKC, RR_RCKC_CA);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] rck_val = 0x%x, ret = %d\n",
+ rck_val, ret);
+
+ rtw89_write_rf(rtwdev, path, RR_RCKC, RFREG_MASK, rck_val);
+ rtw89_write_rf(rtwdev, path, RR_RSV1, RFREG_MASK, rf_reg5);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] RF 0x1b = 0x%x\n",
+ rtw89_read_rf(rtwdev, path, RR_RCKC, RFREG_MASK));
+}
+
+static void _drck(struct rtw89_dev *rtwdev)
+{
+ u32 rck_d;
+ u32 val;
+ int ret;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]Ddie RCK start!!!\n");
+ rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_EN, 0x1);
+
+ ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val,
+ 1, 10000, false,
+ rtwdev, R_DRCK_RES, B_DRCK_POL);
+ if (ret)
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DRCK timeout\n");
+
+ rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_EN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_DRCK_FH, B_DRCK_LAT, 0x1);
+ udelay(1);
+ rtw89_phy_write32_mask(rtwdev, R_DRCK_FH, B_DRCK_LAT, 0x0);
+
+ rck_d = rtw89_phy_read32_mask(rtwdev, R_DRCK_RES, 0x7c00);
+ rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_IDLE, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_VAL, rck_d);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0xc0c4 = 0x%x\n",
+ rtw89_phy_read32_mask(rtwdev, R_DRCK, MASKDWORD));
+}
+
+static void _dack_backup_s0(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_dack_info *dack = &rtwdev->dack;
+ u8 i;
+
+ rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x1);
+
+ for (i = 0; i < 0x10; i++) {
+ rtw89_phy_write32_mask(rtwdev, R_DCOF0, B_DCOF0_V, i);
+ dack->msbk_d[0][0][i] =
+ rtw89_phy_read32_mask(rtwdev, R_DACK_S0P2, B_DACK_S0M0);
+
+ rtw89_phy_write32_mask(rtwdev, R_DCOF8, B_DCOF8_V, i);
+ dack->msbk_d[0][1][i] =
+ rtw89_phy_read32_mask(rtwdev, R_DACK_S0P3, B_DACK_S0M1);
+ }
+
+ dack->biask_d[0][0] =
+ rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS00, B_DACK_BIAS00);
+ dack->biask_d[0][1] =
+ rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS01, B_DACK_BIAS01);
+
+ dack->dadck_d[0][0] =
+ rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK00, B_DACK_DADCK00);
+ dack->dadck_d[0][1] =
+ rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK01, B_DACK_DADCK01);
+}
+
+static void _dack_backup_s1(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_dack_info *dack = &rtwdev->dack;
+ u8 i;
+
+ rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x1);
+
+ for (i = 0; i < 0x10; i++) {
+ rtw89_phy_write32_mask(rtwdev, R_DACK10, B_DACK10, i);
+ dack->msbk_d[1][0][i] =
+ rtw89_phy_read32_mask(rtwdev, R_DACK10S, B_DACK10S);
+
+ rtw89_phy_write32_mask(rtwdev, R_DACK11, B_DACK11, i);
+ dack->msbk_d[1][1][i] =
+ rtw89_phy_read32_mask(rtwdev, R_DACK11S, B_DACK11S);
+ }
+
+ dack->biask_d[1][0] =
+ rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS10, B_DACK_BIAS10);
+ dack->biask_d[1][1] =
+ rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS11, B_DACK_BIAS11);
+
+ dack->dadck_d[1][0] =
+ rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK10, B_DACK_DADCK10);
+ dack->dadck_d[1][1] =
+ rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK11, B_DACK_DADCK11);
+}
+
+static
+void _dack_reset(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
+{
+ if (path == RF_PATH_A) {
+ rtw89_phy_write32_mask(rtwdev, R_DCOF0, B_DCOF0_RST, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_DCOF0, B_DCOF0_RST, 0x1);
+ } else {
+ rtw89_phy_write32_mask(rtwdev, R_DACK10, B_DACK10_RST, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_DACK10, B_DACK10_RST, 0x1);
+ }
+}
+
+static
+void _dack_reload_by_path(struct rtw89_dev *rtwdev, u8 path, u8 index)
+{
+ struct rtw89_dack_info *dack = &rtwdev->dack;
+ u32 tmp, tmp_offset, tmp_reg;
+ u32 idx_offset, path_offset;
+ u8 i;
+
+ if (index == 0)
+ idx_offset = 0;
+ else
+ idx_offset = 0x14;
+
+ if (path == RF_PATH_A)
+ path_offset = 0;
+ else
+ path_offset = 0x28;
+
+ tmp_offset = idx_offset + path_offset;
+
+ rtw89_phy_write32_mask(rtwdev, R_DCOF1, B_DCOF1_RST, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_DCOF9, B_DCOF9_RST, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_DACK1_K, B_DACK1_RST, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_DACK2_K, B_DACK2_RST, 0x1);
+
+ /* msbk_d: 15/14/13/12 */
+ tmp = 0x0;
+ for (i = 0; i < 4; i++)
+ tmp |= dack->msbk_d[path][index][i + 12] << (i * 8);
+ tmp_reg = 0xc200 + tmp_offset;
+ rtw89_phy_write32(rtwdev, tmp_reg, tmp);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", tmp_reg,
+ rtw89_phy_read32_mask(rtwdev, tmp_reg, MASKDWORD));
+
+ /* msbk_d: 11/10/9/8 */
+ tmp = 0x0;
+ for (i = 0; i < 4; i++)
+ tmp |= dack->msbk_d[path][index][i + 8] << (i * 8);
+ tmp_reg = 0xc204 + tmp_offset;
+ rtw89_phy_write32(rtwdev, tmp_reg, tmp);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", tmp_reg,
+ rtw89_phy_read32_mask(rtwdev, tmp_reg, MASKDWORD));
+
+ /* msbk_d: 7/6/5/4 */
+ tmp = 0x0;
+ for (i = 0; i < 4; i++)
+ tmp |= dack->msbk_d[path][index][i + 4] << (i * 8);
+ tmp_reg = 0xc208 + tmp_offset;
+ rtw89_phy_write32(rtwdev, tmp_reg, tmp);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", tmp_reg,
+ rtw89_phy_read32_mask(rtwdev, tmp_reg, MASKDWORD));
+
+ /* msbk_d: 3/2/1/0 */
+ tmp = 0x0;
+ for (i = 0; i < 4; i++)
+ tmp |= dack->msbk_d[path][index][i] << (i * 8);
+ tmp_reg = 0xc20c + tmp_offset;
+ rtw89_phy_write32(rtwdev, tmp_reg, tmp);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", tmp_reg,
+ rtw89_phy_read32_mask(rtwdev, tmp_reg, MASKDWORD));
+
+ /* dadak_d/biask_d */
+ tmp = (dack->biask_d[path][index] << 22) |
+ (dack->dadck_d[path][index] << 14);
+ tmp_reg = 0xc210 + tmp_offset;
+ rtw89_phy_write32(rtwdev, tmp_reg, tmp);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", tmp_reg,
+ rtw89_phy_read32_mask(rtwdev, tmp_reg, MASKDWORD));
+
+ /* enable DACK result from reg */
+ rtw89_phy_write32_mask(rtwdev, R_DACKN0_CTL + tmp_offset, B_DACKN0_EN, 0x1);
+}
+
+static
+void _dack_reload(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
+{
+ u8 i;
+
+ for (i = 0; i < 2; i++)
+ _dack_reload_by_path(rtwdev, path, i);
+}
+
+static bool _dack_s0_poll(struct rtw89_dev *rtwdev)
+{
+ if (rtw89_phy_read32_mask(rtwdev, R_DACK_S0P0, B_DACK_S0P0_OK) == 0 ||
+ rtw89_phy_read32_mask(rtwdev, R_DACK_S0P1, B_DACK_S0P1_OK) == 0 ||
+ rtw89_phy_read32_mask(rtwdev, R_DACK_S0P2, B_DACK_S0P2_OK) == 0 ||
+ rtw89_phy_read32_mask(rtwdev, R_DACK_S0P3, B_DACK_S0P3_OK) == 0)
+ return false;
+
+ return true;
+}
+
+static void _dack_s0(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_dack_info *dack = &rtwdev->dack;
+ bool done;
+ int ret;
+
+ _txck_force(rtwdev, RF_PATH_A, true, DAC_160M);
+
+ rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, BIT(28), 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_EN1, 0x0);
+ udelay(100);
+ rtw89_phy_write32_mask(rtwdev, R_DCOF1, B_DCOF1_VAL, 0x30);
+ rtw89_phy_write32_mask(rtwdev, R_DCOF9, B_DCOF9_VAL, 0x30);
+
+ _dack_reset(rtwdev, RF_PATH_A);
+
+ rtw89_phy_write32_mask(rtwdev, R_DCOF1, B_DCOF1_S, 0x1);
+ udelay(1);
+
+ dack->msbk_timeout[0] = false;
+
+ ret = read_poll_timeout_atomic(_dack_s0_poll, done, done,
+ 1, 20000, false, rtwdev);
+ if (ret) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 DACK timeout\n");
+ dack->msbk_timeout[0] = true;
+ }
+
+ rtw89_phy_write32_mask(rtwdev, R_DCOF1, B_DCOF1_S, 0x0);
+
+ _txck_force(rtwdev, RF_PATH_A, false, DAC_960M);
+ _dack_backup_s0(rtwdev);
+ _dack_reload(rtwdev, RF_PATH_A);
+
+ rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x0);
+}
+
+static bool _dack_s1_poll(struct rtw89_dev *rtwdev)
+{
+ if (rtw89_phy_read32_mask(rtwdev, R_DACK_S1P0, B_DACK_S1P0_OK) == 0 ||
+ rtw89_phy_read32_mask(rtwdev, R_DACK_S1P1, B_DACK_S1P1_OK) == 0 ||
+ rtw89_phy_read32_mask(rtwdev, R_DACK_S1P2, B_DACK_S1P2_OK) == 0 ||
+ rtw89_phy_read32_mask(rtwdev, R_DACK_S1P3, B_DACK_S1P3_OK) == 0)
+ return false;
+
+ return true;
+}
+
+static void _dack_s1(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_dack_info *dack = &rtwdev->dack;
+ bool done;
+ int ret;
+
+ _txck_force(rtwdev, RF_PATH_B, true, DAC_160M);
+
+ rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, BIT(28), 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_EN1, 0x0);
+ udelay(100);
+ rtw89_phy_write32_mask(rtwdev, R_DACK1_K, B_DACK1_VAL, 0x30);
+ rtw89_phy_write32_mask(rtwdev, R_DACK2_K, B_DACK2_VAL, 0x30);
+
+ _dack_reset(rtwdev, RF_PATH_B);
+
+ rtw89_phy_write32_mask(rtwdev, R_DACK1_K, B_DACK1_EN, 0x1);
+ udelay(1);
+
+ dack->msbk_timeout[1] = false;
+
+ ret = read_poll_timeout_atomic(_dack_s1_poll, done, done,
+ 1, 10000, false, rtwdev);
+ if (ret) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 DACK timeout\n");
+ dack->msbk_timeout[1] = true;
+ }
+
+ rtw89_phy_write32_mask(rtwdev, R_DACK1_K, B_DACK1_EN, 0x0);
+
+ _txck_force(rtwdev, RF_PATH_B, false, DAC_960M);
+ _dack_backup_s1(rtwdev);
+ _dack_reload(rtwdev, RF_PATH_B);
+
+ rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x0);
+}
+
+static void _dack(struct rtw89_dev *rtwdev)
+{
+ _dack_s0(rtwdev);
+ _dack_s1(rtwdev);
+}
+
+static void _dack_dump(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_dack_info *dack = &rtwdev->dack;
+ u8 i;
+ u8 t;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DACK]S0 ADC_DCK ic = 0x%x, qc = 0x%x\n",
+ dack->addck_d[0][0], dack->addck_d[0][1]);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DACK]S1 ADC_DCK ic = 0x%x, qc = 0x%x\n",
+ dack->addck_d[1][0], dack->addck_d[1][1]);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DACK]S0 DAC_DCK ic = 0x%x, qc = 0x%x\n",
+ dack->dadck_d[0][0], dack->dadck_d[0][1]);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DACK]S1 DAC_DCK ic = 0x%x, qc = 0x%x\n",
+ dack->dadck_d[1][0], dack->dadck_d[1][1]);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DACK]S0 biask ic = 0x%x, qc = 0x%x\n",
+ dack->biask_d[0][0], dack->biask_d[0][1]);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DACK]S1 biask ic = 0x%x, qc = 0x%x\n",
+ dack->biask_d[1][0], dack->biask_d[1][1]);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK ic:\n");
+ for (i = 0; i < 0x10; i++) {
+ t = dack->msbk_d[0][0][i];
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
+ }
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK qc:\n");
+ for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
+ t = dack->msbk_d[0][1][i];
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
+ }
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK ic:\n");
+ for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
+ t = dack->msbk_d[1][0][i];
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
+ }
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK qc:\n");
+ for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
+ t = dack->msbk_d[1][1][i];
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
+ }
+}
+
+static void _addck_ori(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_dack_info *dack = &rtwdev->dack;
+ u32 val;
+ int ret;
+
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_MAN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1_MAN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_ADCCLK, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_FLTRST, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_FLTRST, 0x1);
+
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, 0xf);
+ udelay(100);
+
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_EN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_SAMPL_DLY_T_V1, BIT(4), 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, 0x3);
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_TRG, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_TRG, 0x0);
+ udelay(1);
+
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0, 0x1);
+ dack->addck_timeout[0] = false;
+
+ ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val,
+ 1, 10000, false,
+ rtwdev, R_ADDCKR0, BIT(0));
+ if (ret) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 ADDCK timeout\n");
+ dack->addck_timeout[0] = true;
+ }
+
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_SAMPL_DLY_T_V1, BIT(4), 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, 0xc);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_ADCCLK, 0x1);
+
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0, 0x0);
+ dack->addck_d[0][0] =
+ rtw89_phy_read32_mask(rtwdev, R_ADDCKR0, B_ADDCKR0_A0);
+ dack->addck_d[0][1] =
+ rtw89_phy_read32_mask(rtwdev, R_ADDCKR0, B_ADDCKR0_A1);
+ rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x0);
+
+ rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_ADCCLK, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_FLTRST, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_FLTRST, 0x1);
+
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, 0xf);
+ udelay(100);
+
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_EN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_SAMPL_DLY_T_V1, BIT(4), 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, 0x3);
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1_TRG, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1_TRG, 0x0);
+ udelay(1);
+
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1, 0x1);
+ dack->addck_timeout[1] = false;
+
+ ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val,
+ 1, 10000, false,
+ rtwdev, R_ADDCKR1, BIT(0));
+ if (ret) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 ADDCK timeout\n");
+ dack->addck_timeout[1] = true;
+ }
+
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_SAMPL_DLY_T_V1, BIT(4), 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, 0xc);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_ADCCLK, 0x1);
+
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1, 0x0);
+ dack->addck_d[1][0] =
+ rtw89_phy_read32_mask(rtwdev, R_ADDCKR1, B_ADDCKR1_A0);
+ dack->addck_d[1][1] =
+ rtw89_phy_read32_mask(rtwdev, R_ADDCKR1, B_ADDCKR1_A1);
+
+ rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x0);
+}
+
+static void _addck_reload(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_dack_info *dack = &rtwdev->dack;
+
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK0_RL, B_ADDCK0_RL1, dack->addck_d[0][0]);
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK0_RL, B_ADDCK0_RL0, dack->addck_d[0][1]);
+
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK0_RL, B_ADDCK0_RLS, 0x3);
+
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK1_RL, B_ADDCK1_RL1, dack->addck_d[1][0]);
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK1_RL, B_ADDCK1_RL0, dack->addck_d[1][1]);
+
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK1_RL, B_ADDCK1_RLS, 0x3);
+}
+
+static void _dack_manual_off(struct rtw89_dev *rtwdev)
+{
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK0_RL, B_ADDCK0_RLS, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK1_RL, B_ADDCK1_RLS, 0x0);
+
+ rtw89_phy_write32_mask(rtwdev, R_DACKN0_CTL, B_DACKN0_EN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_DACKN1_CTL, B_DACKN1_ON, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_DACKN2_CTL, B_DACKN2_ON, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_DACKN3_CTL, B_DACKN3_ON, 0x0);
+}
+
+static void _dac_cal(struct rtw89_dev *rtwdev, bool force)
+{
+ struct rtw89_dack_info *dack = &rtwdev->dack;
+
+ dack->dack_done = false;
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK start!!!\n");
+
+ _drck(rtwdev);
+ _dack_manual_off(rtwdev);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_RSV1, RFREG_MASK, 0x0);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV1, RFREG_MASK, 0x0);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK, 0x337e1);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_MOD, RFREG_MASK, 0x337e1);
+ _rxck_force(rtwdev, RF_PATH_A, true, ADC_960M);
+ _rxck_force(rtwdev, RF_PATH_B, true, ADC_960M);
+ _addck_ori(rtwdev);
+
+ _rxck_force(rtwdev, RF_PATH_A, false, ADC_960M);
+ _rxck_force(rtwdev, RF_PATH_B, false, ADC_960M);
+ _addck_reload(rtwdev);
+
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_MODOPT, RFREG_MASK, 0x0);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_MODOPT, RFREG_MASK, 0x0);
+
+ _dack(rtwdev);
+ _dack_dump(rtwdev);
+ dack->dack_done = true;
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_RSV1, RFREG_MASK, 0x1);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV1, RFREG_MASK, 0x1);
+
+ dack->dack_cnt++;
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK finish!!!\n");
+}
+
+static bool _iqk_check_cal(struct rtw89_dev *rtwdev, u8 path, u8 ktype)
+{
+ bool notready = false;
+ u32 val;
+ int ret;
+
+ ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x55,
+ 10, 8200, false,
+ rtwdev, R_RFK_ST, MASKBYTE0);
+ if (ret)
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]NCTL1 IQK timeout!!!\n");
+
+ udelay(10);
+
+ ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x8000,
+ 10, 400, false,
+ rtwdev, R_RPT_COM, B_RPT_COM_RDY);
+ if (ret) {
+ notready = true;
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]NCTL2 IQK timeout!!!\n");
+ }
+
+ udelay(10);
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, MASKBYTE0, 0x0);
+
+ return notready;
+}
+
+static bool _iqk_one_shot(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ u8 path, u8 ktype)
+{
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+ u32 iqk_cmd;
+ bool fail;
+
+ switch (ktype) {
+ case ID_TXAGC:
+ iqk_cmd = 0x008 | (1 << (4 + path)) | (path << 1);
+ break;
+ case ID_FLOK_COARSE:
+ iqk_cmd = 0x108 | (1 << (4 + path));
+ break;
+ case ID_FLOK_FINE:
+ iqk_cmd = 0x208 | (1 << (4 + path));
+ break;
+ case ID_FLOK_VBUFFER:
+ iqk_cmd = 0x308 | (1 << (4 + path));
+ break;
+ case ID_TXK:
+ iqk_cmd = 0x008 | (1 << (path + 4)) |
+ (((0x8 + iqk_info->iqk_bw[path]) & 0xf) << 8);
+ break;
+ case ID_RXAGC:
+ iqk_cmd = 0x508 | (1 << (4 + path)) | (path << 1);
+ break;
+ case ID_RXK:
+ iqk_cmd = 0x008 | (1 << (path + 4)) |
+ (((0xb + iqk_info->iqk_bw[path]) & 0xf) << 8);
+ break;
+ case ID_NBTXK:
+ iqk_cmd = 0x408 | (1 << (4 + path));
+ break;
+ case ID_NBRXK:
+ iqk_cmd = 0x608 | (1 << (4 + path));
+ break;
+ default:
+ return false;
+ }
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s, iqk_cmd = %x\n",
+ __func__, iqk_cmd + 1);
+
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, iqk_cmd + 1);
+ fail = _iqk_check_cal(rtwdev, path, ktype);
+
+ return fail;
+}
+
+static void _iqk_txk_setting(struct rtw89_dev *rtwdev, u8 path)
+{
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+
+ switch (iqk_info->iqk_band[path]) {
+ case RTW89_BAND_2G:
+ rtw89_write_rf(rtwdev, path, RR_TXG1, RR_TXG1_ATT2, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_TXG1, RR_TXG1_ATT1, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_TXG2, RR_TXG2_ATT0, 0x1);
+ rtw89_write_rf(rtwdev, path, RR_TXGA, RR_TXGA_LOK_EXT, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x1);
+ rtw89_write_rf(rtwdev, path, RR_LUTWA, RR_LUTWA_M1, 0x00);
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_IQK, 0x403e);
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, 0x5);
+ udelay(1);
+ break;
+ case RTW89_BAND_5G:
+ rtw89_write_rf(rtwdev, path, RR_BIASA, RR_BIASA_A, 0x1);
+ rtw89_write_rf(rtwdev, path, RR_TXGA, RR_TXGA_LOK_EXT, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x1);
+ rtw89_write_rf(rtwdev, path, RR_LUTWA, RR_LUTWA_M1, 0x80);
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_IQK, 0x403e);
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, 0x4);
+ udelay(1);
+ break;
+ default:
+ break;
+ }
+}
+
+static bool _iqk_2g_lok(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
+{
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
+
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_IQSW, 0x09);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x021);
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, 0x00000119 + (path << 4));
+
+ _iqk_check_cal(rtwdev, path, ID_FLOK_COARSE);
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
+
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_IQSW, 0x24);
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, 0x00000319 + (path << 4));
+
+ _iqk_check_cal(rtwdev, path, ID_FLOK_VBUFFER);
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
+
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_IQSW, 0x09);
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, 0x00000219 + (path << 4));
+
+ _iqk_check_cal(rtwdev, path, ID_FLOK_COARSE);
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
+
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_IQSW, 0x24);
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, 0x00000319 + (path << 4));
+
+ _iqk_check_cal(rtwdev, path, ID_FLOK_VBUFFER);
+
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
+
+ return false;
+}
+
+static bool _iqk_5g_lok(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
+{
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
+
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_IQSW, 0x09);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x021);
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, 0x00000119 + (path << 4));
+
+ _iqk_check_cal(rtwdev, path, ID_FLOK_COARSE);
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
+
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_IQSW, 0x24);
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, 0x00000319 + (path << 4));
+
+ _iqk_check_cal(rtwdev, path, ID_FLOK_VBUFFER);
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
+
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_IQSW, 0x09);
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, 0x00000219 + (path << 4));
+
+ _iqk_check_cal(rtwdev, path, ID_FLOK_COARSE);
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
+
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_IQSW, 0x24);
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, 0x00000319 + (path << 4));
+
+ _iqk_check_cal(rtwdev, path, ID_FLOK_VBUFFER);
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
+
+ return false;
+}
+
+static bool _iqk_2g_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
+{
+ static const u32 g_power_range[4] = {0x0, 0x0, 0x0, 0x0};
+ static const u32 g_track_range[4] = {0x4, 0x4, 0x6, 0x6};
+ static const u32 g_gain_bb[4] = {0x08, 0x0e, 0x08, 0x0e};
+ static const u32 g_itqt[4] = {0x09, 0x12, 0x1b, 0x24};
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+ bool notready = false;
+ bool kfail = false;
+ u8 gp;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
+
+ for (gp = 0x0; gp < 0x4; gp++) {
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0,
+ g_power_range[gp]);
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1,
+ g_track_range[gp]);
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG,
+ g_gain_bb[gp]);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
+ 0x00000100, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
+ 0x00000010, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
+ 0x00000004, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
+ 0x00000003, gp);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT,
+ 0x009);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
+ B_KIP_IQP_IQSW, g_itqt[gp]);
+ notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBTXK);
+ iqk_info->nb_txcfir[path] =
+ rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD);
+
+ if (iqk_info->is_nbiqk)
+ break;
+
+ rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
+ B_KIP_IQP_IQSW, g_itqt[gp]);
+ notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_TXK);
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[IQK]S%x, gp = 0x%x, 0x8%x38 = 0x%x\n",
+ path, gp, 1 << path, iqk_info->nb_txcfir[path]);
+ }
+
+ if (!notready)
+ kfail = !!rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, B_NCTL_RPT_FLG);
+
+ if (kfail) {
+ iqk_info->nb_txcfir[path] = 0x40000002;
+ rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8),
+ B_IQK_RES_TXCFIR, 0x0);
+ }
+
+ return kfail;
+}
+
+static bool _iqk_5g_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
+{
+ static const u32 a_power_range[4] = {0x0, 0x0, 0x0, 0x0};
+ static const u32 a_track_range[4] = {0x3, 0x3, 0x6, 0x6};
+ static const u32 a_gain_bb[4] = {0x08, 0x10, 0x08, 0x0e};
+ static const u32 a_itqt[4] = {0x09, 0x12, 0x1b, 0x24};
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+ bool notready = false;
+ bool kfail = false;
+ u8 gp;
+
+ for (gp = 0x0; gp < 0x4; gp++) {
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, a_power_range[gp]);
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, a_track_range[gp]);
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, a_gain_bb[gp]);
+
+ rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
+ MASKDWORD, a_itqt[gp]);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
+ 0x00000100, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
+ 0x00000010, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
+ 0x00000004, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
+ 0x00000003, gp);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT,
+ 0x009);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
+ B_KIP_IQP_IQSW, a_itqt[gp]);
+
+ notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBTXK);
+ iqk_info->nb_txcfir[path] =
+ rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD);
+
+ if (iqk_info->is_nbiqk)
+ break;
+
+ rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
+ B_KIP_IQP_IQSW, a_itqt[gp]);
+ notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_TXK);
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[IQK]S%x, gp = 0x%x, 0x8%x38 = 0x%x\n",
+ path, gp, 1 << path, iqk_info->nb_txcfir[path]);
+ }
+
+ if (!notready)
+ kfail = !!rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, B_NCTL_RPT_FLG);
+
+ if (kfail) {
+ iqk_info->nb_txcfir[path] = 0x40000002;
+ rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8),
+ B_IQK_RES_TXCFIR, 0x0);
+ }
+
+ return kfail;
+}
+
+static void _iqk_adc_fifo_rst(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx, u8 path)
+{
+ rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x0303);
+ udelay(10);
+ rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x3333);
+}
+
+static void _iqk_rxclk_setting(struct rtw89_dev *rtwdev, u8 path)
+{
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
+ rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x0303);
+
+ if (iqk_info->iqk_bw[path] == RTW89_CHANNEL_WIDTH_80) {
+ _rxck_force(rtwdev, RF_PATH_A, true, ADC_960M);
+ _rxck_force(rtwdev, RF_PATH_B, true, ADC_960M);
+ udelay(1);
+
+ rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC,
+ B_UPD_CLK_ADC_ON, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC,
+ B_UPD_CLK_ADC_VAL, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_SAMPL_DLY_T_V1,
+ B_PATH0_SAMPL_DLY_T_MSK_V1, 0x2);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_SAMPL_DLY_T_V1,
+ B_PATH1_SAMPL_DLY_T_MSK_V1, 0x2);
+ rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1, B_P0_CFCH_BW1, 0x8);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_BW_SEL_V1,
+ B_PATH1_BW_SEL_MSK_V1, 0x8);
+ } else {
+ _rxck_force(rtwdev, RF_PATH_A, true, ADC_480M);
+ _rxck_force(rtwdev, RF_PATH_B, true, ADC_480M);
+ udelay(1);
+
+ rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC,
+ B_UPD_CLK_ADC_ON, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC,
+ B_UPD_CLK_ADC_VAL, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_SAMPL_DLY_T_V1,
+ B_PATH0_SAMPL_DLY_T_MSK_V1, 0x3);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_SAMPL_DLY_T_V1,
+ B_PATH1_SAMPL_DLY_T_MSK_V1, 0x3);
+ rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1, B_P0_CFCH_BW1, 0xf);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_BW_SEL_V1,
+ B_PATH1_BW_SEL_MSK_V1, 0xf);
+ }
+
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_SAMPL_DLY_T_V1, 0x00000780, 0x8);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_SAMPL_DLY_T_V1, 0x00000780, 0x8);
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_SAMPL_DLY_T_V1, 0x00007800, 0x2);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_SAMPL_DLY_T_V1, 0x00007800, 0x2);
+ rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_MUL, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x1);
+ udelay(1);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x0f);
+ udelay(1);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x03);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0xa001);
+ udelay(1);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0xa041);
+ rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x3333);
+}
+
+static bool _iqk_2g_rx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
+{
+ static const u32 g_idxrxgain[2] = {0x212, 0x310};
+ static const u32 g_idxattc2[2] = {0x00, 0x20};
+ static const u32 g_idxattc1[2] = {0x3, 0x2};
+ static const u32 g_idxrxagc[2] = {0x0, 0x2};
+ static const u32 g_idx[2] = {0x0, 0x2};
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+ bool notready = false;
+ bool kfail = false;
+ u32 rf_18, tmp;
+ u8 gp;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
+
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc);
+ rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL2G, 0x1);
+ rf_18 = rtw89_read_rf(rtwdev, path, RR_CFGCH, RFREG_MASK);
+ rtw89_write_rf(rtwdev, path, RR_RSV4, RFREG_MASK, rf_18);
+
+ for (gp = 0x0; gp < 0x2; gp++) {
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_RGM, g_idxrxgain[gp]);
+ rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_C2G, g_idxattc2[gp]);
+ rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_C1G, g_idxattc1[gp]);
+
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
+ 0x00000100, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
+ 0x00000010, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
+ 0x00000007, g_idx[gp]);
+ rtw89_write_rf(rtwdev, path, RR_RXKPLL, RFREG_MASK, 0x80013);
+ udelay(100);
+ udelay(100);
+
+ tmp = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_DIF2, B_IQK_DIF2_RXPI, tmp);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_RXA, B_IQK_RXAGC, g_idxrxagc[gp]);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x11);
+
+ notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXAGC);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, rf rxbb = %x\n", path,
+ rtw89_read_rf(rtwdev, path, RR_MOD, 0x003c0));
+
+ rtw89_write_rf(rtwdev, path, RR_RXKPLL, RFREG_MASK, 0x80013);
+ udelay(100);
+ udelay(100);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x011);
+
+ notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBRXK);
+ iqk_info->nb_rxcfir[path] =
+ rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8),
+ MASKDWORD) | 0x2;
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[IQK]S%x, gp = 0x%x, 0x8%x3c = 0x%x\n", path,
+ g_idx[gp], 1 << path, iqk_info->nb_rxcfir[path]);
+
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
+
+ if (iqk_info->is_nbiqk)
+ break;
+
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
+ notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXK);
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
+ }
+
+ if (!notready)
+ kfail = !!rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, B_NCTL_RPT_FLG);
+
+ if (kfail) {
+ iqk_info->nb_txcfir[path] = 0x40000002;
+ rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8),
+ B_IQK_RES_RXCFIR, 0x0);
+ }
+ rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL2G, 0x0);
+
+ return kfail;
+}
+
+static bool _iqk_5g_rx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
+{
+ static const u32 a_idxrxgain[2] = {0x110, 0x290};
+ static const u32 a_idxattc2[2] = {0x0f, 0x0f};
+ static const u32 a_idxattc1[2] = {0x2, 0x2};
+ static const u32 a_idxrxagc[2] = {0x4, 0x6};
+ static const u32 a_idx[2] = {0x0, 0x2};
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+ bool notready = false;
+ bool kfail = false;
+ u32 rf_18, tmp;
+ u8 gp;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
+
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc);
+ rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL5G, 0x1);
+ rf_18 = rtw89_read_rf(rtwdev, path, RR_CFGCH, RFREG_MASK);
+ rtw89_write_rf(rtwdev, path, RR_RSV4, RFREG_MASK, rf_18);
+
+ for (gp = 0x0; gp < 0x2; gp++) {
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_RGM, a_idxrxgain[gp]);
+ rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_HATT, a_idxattc2[gp]);
+ rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_CC2, a_idxattc1[gp]);
+
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
+ 0x00000100, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
+ 0x00000010, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
+ 0x00000007, a_idx[gp]);
+ rtw89_write_rf(rtwdev, path, RR_RXKPLL, RFREG_MASK, 0x80013);
+ udelay(100);
+ udelay(100);
+
+ tmp = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_DIF2, B_IQK_DIF2_RXPI, tmp);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_RXA, B_IQK_RXAGC, a_idxrxagc[gp]);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x11);
+
+ notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXAGC);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, rf rxbb = %x\n", path,
+ rtw89_read_rf(rtwdev, path, RR_MOD, 0x003c0));
+
+ rtw89_write_rf(rtwdev, path, RR_RXKPLL, RFREG_MASK, 0x80013);
+ udelay(200);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x011);
+ notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBRXK);
+ iqk_info->nb_rxcfir[path] =
+ rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8),
+ MASKDWORD) | 0x2;
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[IQK]S%x, gp = 0x%x, 0x8%x3c = 0x%x\n",
+ path, a_idx[gp], 1 << path, iqk_info->nb_rxcfir[path]);
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
+
+ if (iqk_info->is_nbiqk)
+ break;
+
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
+ notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXK);
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
+ }
+
+ if (!notready)
+ kfail = !!rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, B_NCTL_RPT_FLG);
+
+ if (kfail) {
+ iqk_info->nb_txcfir[path] = 0x40000002;
+ rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8),
+ B_IQK_RES_RXCFIR, 0x0);
+ }
+ rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL2G, 0x0);
+
+ return kfail;
+}
+
+static void _iqk_by_path(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
+{
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+ bool lok_result = false;
+ bool txk_result = false;
+ bool rxk_result = false;
+ u8 i;
+
+ for (i = 0; i < 3; i++) {
+ _iqk_txk_setting(rtwdev, path);
+ if (iqk_info->iqk_band[path] == RTW89_BAND_2G)
+ lok_result = _iqk_2g_lok(rtwdev, phy_idx, path);
+ else
+ lok_result = _iqk_5g_lok(rtwdev, phy_idx, path);
+
+ if (!lok_result)
+ break;
+ }
+
+ if (lok_result) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[IQK]!!!!!!!!!!LOK by Pass !!!!!!!!!!!\n");
+ rtw89_write_rf(rtwdev, path, RR_DTXLOK, RFREG_MASK, 0x80200);
+ rtw89_write_rf(rtwdev, path, RR_RSV2, RFREG_MASK, 0x80200);
+ rtw89_write_rf(rtwdev, path, RR_LOKVB, RFREG_MASK, 0x80200);
+ }
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]RF_0x08[00:19] = 0x%x\n",
+ rtw89_read_rf(rtwdev, path, RR_DTXLOK, RFREG_MASK));
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]RF_0x09[00:19] = 0x%x\n",
+ rtw89_read_rf(rtwdev, path, RR_RSV2, RFREG_MASK));
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]RF_0x0a[00:19] = 0x%x\n",
+ rtw89_read_rf(rtwdev, path, RR_LOKVB, RFREG_MASK));
+
+ if (iqk_info->iqk_band[path] == RTW89_BAND_2G)
+ txk_result = _iqk_2g_tx(rtwdev, phy_idx, path);
+ else
+ txk_result = _iqk_5g_tx(rtwdev, phy_idx, path);
+
+ _iqk_rxclk_setting(rtwdev, path);
+ _iqk_adc_fifo_rst(rtwdev, phy_idx, path);
+
+ if (iqk_info->iqk_band[path] == RTW89_BAND_2G)
+ rxk_result = _iqk_2g_rx(rtwdev, phy_idx, path);
+ else
+ rxk_result = _iqk_5g_rx(rtwdev, phy_idx, path);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[IQK]result : lok_= %x, txk_= %x, rxk_= %x\n",
+ lok_result, txk_result, rxk_result);
+}
+
+static void _iqk_get_ch_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, u8 path)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+ u8 get_empty_table = false;
+ u32 reg_rf18;
+ u32 reg_35c;
+ u8 idx;
+
+ for (idx = 0; idx < RTW89_IQK_CHS_NR; idx++) {
+ if (iqk_info->iqk_mcc_ch[idx][path] == 0) {
+ get_empty_table = true;
+ break;
+ }
+ }
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] (1)idx = %x\n", idx);
+
+ if (!get_empty_table) {
+ idx = iqk_info->iqk_table_idx[path] + 1;
+ if (idx > 1)
+ idx = 0;
+ }
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] (2)idx = %x\n", idx);
+
+ reg_rf18 = rtw89_read_rf(rtwdev, path, RR_CFGCH, RFREG_MASK);
+ reg_35c = rtw89_phy_read32_mask(rtwdev, R_CIRST, B_CIRST_SYN);
+
+ iqk_info->iqk_band[path] = chan->band_type;
+ iqk_info->iqk_bw[path] = chan->band_width;
+ iqk_info->iqk_ch[path] = chan->channel;
+ iqk_info->iqk_mcc_ch[idx][path] = chan->channel;
+ iqk_info->iqk_table_idx[path] = idx;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x18= 0x%x, idx = %x\n",
+ path, reg_rf18, idx);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x18= 0x%x\n",
+ path, reg_rf18);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x35c= 0x%x\n",
+ path, reg_35c);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]times = 0x%x, ch =%x\n",
+ iqk_info->iqk_times, idx);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]iqk_mcc_ch[%x][%x] = 0x%x\n",
+ idx, path, iqk_info->iqk_mcc_ch[idx][path]);
+}
+
+static void _iqk_start_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
+{
+ _iqk_by_path(rtwdev, phy_idx, path);
+}
+
+static void _iqk_restore(struct rtw89_dev *rtwdev, u8 path)
+{
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "===> %s\n", __func__);
+
+ if (iqk_info->is_nbiqk) {
+ rtw89_phy_write32_mask(rtwdev, R_TXIQC + (path << 8),
+ MASKDWORD, iqk_info->nb_txcfir[path]);
+ rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8),
+ MASKDWORD, iqk_info->nb_rxcfir[path]);
+ } else {
+ rtw89_phy_write32_mask(rtwdev, R_TXIQC + (path << 8),
+ MASKDWORD, 0x40000000);
+ rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8),
+ MASKDWORD, 0x40000000);
+ }
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD,
+ 0x00000e19 + (path << 4));
+
+ _iqk_check_cal(rtwdev, path, 0x0);
+
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000000);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x80000000);
+
+ rtw89_phy_write32_mask(rtwdev, R_KIP_CLK, MASKDWORD, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_IQRSN, B_IQRSN_K2, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_SYS + (path << 8), BIT(28), 0x0);
+
+ rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0x3);
+ rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x1);
+ rtw89_write_rf(rtwdev, path, RR_BBDC, RR_BBDC_SEL, 0x1);
+}
+
+static void _iqk_afebb_restore(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx, u8 path)
+{
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "===> %s\n", __func__);
+ rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x0303);
+ rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_CLKG_FORCE, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_CLKG_FORCE, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_TXCKEN_FORCE, B_TXCKEN_FORCE_ALL, 0x0000000);
+ rtw89_phy_write32_mask(rtwdev, R_FAHM, B_RXTD_CKEN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_UPD_GEN_ON, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_TX_COLLISION_T2R_ST, 0x0000001f, 0x03);
+ rtw89_phy_write32_mask(rtwdev, R_TX_COLLISION_T2R_ST, 0x000003e0, 0x03);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RXCK, B_P0_TXCK_ALL, 0x00);
+ rtw89_phy_write32_mask(rtwdev, R_P1_RXCK, B_P1_TXCK_ALL, 0x00);
+ rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC,
+ B_UPD_CLK_ADC_VAL | B_UPD_CLK_ADC_ON, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_PD_CTRL, B_PD_HIT_DIS, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_RXCCA, B_RXCCA_DIS, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x0000);
+ rtw89_phy_write32_mask(rtwdev, R_P0_ADCFF_EN, B_P0_ADCFF_EN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_P1_ADCFF_EN, B_P1_ADCFF_EN, 0x0);
+}
+
+static void _iqk_preset(struct rtw89_dev *rtwdev, u8 path)
+{
+ u8 idx = 0;
+
+ rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8), 0x00000001, idx);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), 0x00000008, idx);
+ rtw89_phy_write32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD, 0x40000000);
+ rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD, 0x40000000);
+
+ rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_BBDC, RR_BBDC_SEL, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000080);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x81ff010a);
+}
+
+static void _iqk_macbb_setting(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx, u8 path)
+{
+ rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x0303);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_GOT_TXRX, 0x3);
+ rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_GOT_TXRX, 0x3);
+ rtw89_phy_write32_mask(rtwdev, R_P0_ADCFF_EN, B_P0_ADCFF_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P1_ADCFF_EN, B_P1_ADCFF_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_CLKG_FORCE, 0x3);
+ rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P0_CLKG_FORCE, 0x3);
+ rtw89_phy_write32_mask(rtwdev, R_TXCKEN_FORCE, B_TXCKEN_FORCE_ALL, 0x1ffffff);
+ rtw89_phy_write32_mask(rtwdev, R_FAHM, B_RXTD_CKEN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_UPD_GEN_ON, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_TX_COLLISION_T2R_ST, B_TXRX_FORCE_VAL, 0x3ff);
+ rtw89_phy_write32_mask(rtwdev, R_IOQ_IQK_DPK, B_IOQ_IQK_DPK_CLKEN, 0x3);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_DPK_RST, B_IQK_DPK_RST, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P0_PATH_RST, B_P0_PATH_RST, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P1_PATH_RST, B_P1_PATH_RST, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_PD_CTRL, B_PD_HIT_DIS, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_RXCCA, B_RXCCA_DIS, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_DCFO_WEIGHT, B_DAC_CLK_IDX, 0x1);
+
+ _txck_force(rtwdev, RF_PATH_A, true, DAC_960M);
+ _txck_force(rtwdev, RF_PATH_B, true, DAC_960M);
+ _rxck_force(rtwdev, RF_PATH_A, true, ADC_1920M);
+ _rxck_force(rtwdev, RF_PATH_B, true, ADC_1920M);
+
+ rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_UPD_CLK_ADC_ON, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_UPD_CLK_ADC_VAL, 0x2);
+
+ rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x1);
+ udelay(10);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x1f);
+ udelay(10);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x13);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0x0001);
+ udelay(10);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0x0041);
+ rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_RSTB, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x3333);
+}
+
+static void _iqk_init(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+ u8 idx, path;
+
+ rtw89_phy_write32_mask(rtwdev, R_IQKINF, MASKDWORD, 0x0);
+
+ if (iqk_info->is_iqk_init)
+ return;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
+ iqk_info->is_iqk_init = true;
+ iqk_info->is_nbiqk = false;
+ iqk_info->iqk_fft_en = false;
+ iqk_info->iqk_sram_en = false;
+ iqk_info->iqk_cfir_en = false;
+ iqk_info->iqk_xym_en = false;
+ iqk_info->iqk_times = 0x0;
+
+ for (idx = 0; idx < RTW89_IQK_CHS_NR; idx++) {
+ iqk_info->iqk_channel[idx] = 0x0;
+ for (path = 0; path < RTW8852BT_SS; path++) {
+ iqk_info->lok_cor_fail[idx][path] = false;
+ iqk_info->lok_fin_fail[idx][path] = false;
+ iqk_info->iqk_tx_fail[idx][path] = false;
+ iqk_info->iqk_rx_fail[idx][path] = false;
+ iqk_info->iqk_mcc_ch[idx][path] = 0x0;
+ iqk_info->iqk_table_idx[path] = 0x0;
+ }
+ }
+}
+
+static void _wait_rx_mode(struct rtw89_dev *rtwdev, u8 kpath)
+{
+ u32 rf_mode;
+ u8 path;
+ int ret;
+
+ for (path = 0; path < RF_PATH_MAX; path++) {
+ if (!(kpath & BIT(path)))
+ continue;
+
+ ret = read_poll_timeout_atomic(rtw89_read_rf, rf_mode,
+ rf_mode != 2, 2, 5000, false,
+ rtwdev, path, RR_MOD, RR_MOD_MASK);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[RFK] Wait S%d to Rx mode!! (ret = %d)\n", path, ret);
+ }
+}
+
+static void _tmac_tx_pause(struct rtw89_dev *rtwdev, enum rtw89_phy_idx band_idx,
+ bool is_pause)
+{
+ if (!is_pause)
+ return;
+
+ _wait_rx_mode(rtwdev, _kpath(rtwdev, band_idx));
+}
+
+static void _doiqk(struct rtw89_dev *rtwdev, bool force,
+ enum rtw89_phy_idx phy_idx, u8 path)
+{
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+ u32 backup_bb_val[BACKUP_BB_REGS_NR];
+ u32 backup_rf_val[RTW8852BT_SS][BACKUP_RF_REGS_NR];
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB);
+
+ rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_START);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[IQK]==========IQK start!!!!!==========\n");
+ iqk_info->iqk_times++;
+ iqk_info->version = RTW8852BT_IQK_VER;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]Test Ver 0x%x\n", iqk_info->version);
+ _iqk_get_ch_info(rtwdev, phy_idx, path);
+
+ _rfk_backup_bb_reg(rtwdev, backup_bb_val);
+ _rfk_backup_rf_reg(rtwdev, backup_rf_val[path], path);
+ _iqk_macbb_setting(rtwdev, phy_idx, path);
+ _iqk_preset(rtwdev, path);
+ _iqk_start_iqk(rtwdev, phy_idx, path);
+ _iqk_restore(rtwdev, path);
+ _iqk_afebb_restore(rtwdev, phy_idx, path);
+ _rfk_reload_bb_reg(rtwdev, backup_bb_val);
+ _rfk_reload_rf_reg(rtwdev, backup_rf_val[path], path);
+
+ rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_STOP);
+}
+
+static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool force)
+{
+ u8 kpath = _kpath(rtwdev, phy_idx);
+
+ switch (kpath) {
+ case RF_A:
+ _doiqk(rtwdev, force, phy_idx, RF_PATH_A);
+ break;
+ case RF_B:
+ _doiqk(rtwdev, force, phy_idx, RF_PATH_B);
+ break;
+ case RF_AB:
+ _doiqk(rtwdev, force, phy_idx, RF_PATH_A);
+ _doiqk(rtwdev, force, phy_idx, RF_PATH_B);
+ break;
+ default:
+ break;
+ }
+}
+
+static void _dpk_onoff(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, bool off)
+{
+ struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+ u8 val, kidx = dpk->cur_idx[path];
+ bool off_reverse;
+
+ val = dpk->is_dpk_enable && !off && dpk->bp[path][kidx].path_ok;
+
+ if (off)
+ off_reverse = false;
+ else
+ off_reverse = true;
+
+ val = dpk->is_dpk_enable & off_reverse & dpk->bp[path][kidx].path_ok;
+
+ rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2),
+ BIT(24), val);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d] DPK %s !!!\n", path,
+ kidx, dpk->is_dpk_enable & off_reverse ? "enable" : "disable");
+}
+
+static void _dpk_one_shot(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path, enum rtw8852bt_dpk_id id)
+{
+ u16 dpk_cmd;
+ u32 val;
+ int ret;
+
+ dpk_cmd = (id << 8) | (0x19 + (path << 4));
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, dpk_cmd);
+
+ ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x55,
+ 1, 30000, false,
+ rtwdev, R_RFK_ST, MASKBYTE0);
+ if (ret)
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] one-shot 1 over 30ms!!!!\n");
+
+ udelay(1);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKDWORD, 0x00030000);
+
+ ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x8000,
+ 1, 2000, false,
+ rtwdev, R_RPT_COM, MASKLWORD);
+ if (ret)
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] one-shot 2 over 2ms!!!!\n");
+
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, MASKBYTE0, 0x0);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] one-shot for %s = 0x%04x\n",
+ id == 0x06 ? "LBK_RXIQK" :
+ id == 0x10 ? "SYNC" :
+ id == 0x11 ? "MDPK_IDL" :
+ id == 0x12 ? "MDPK_MPA" :
+ id == 0x13 ? "GAIN_LOSS" :
+ id == 0x14 ? "PWR_CAL" :
+ id == 0x15 ? "DPK_RXAGC" :
+ id == 0x16 ? "KIP_PRESET" :
+ id == 0x17 ? "KIP_RESOTRE" :
+ "DPK_TXAGC", dpk_cmd);
+}
+
+static void _dpk_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x1);
+
+ udelay(600);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d RXDCK\n", path);
+}
+
+static void _dpk_information(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+
+ u8 kidx = dpk->cur_idx[path];
+
+ dpk->bp[path][kidx].band = chan->band_type;
+ dpk->bp[path][kidx].ch = chan->channel;
+ dpk->bp[path][kidx].bw = chan->band_width;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] S%d[%d] (PHY%d): TSSI %s/ DBCC %s/ %s/ CH%d/ %s\n",
+ path, dpk->cur_idx[path], phy,
+ rtwdev->is_tssi_mode[path] ? "on" : "off",
+ rtwdev->dbcc_en ? "on" : "off",
+ dpk->bp[path][kidx].band == 0 ? "2G" :
+ dpk->bp[path][kidx].band == 1 ? "5G" : "6G",
+ dpk->bp[path][kidx].ch,
+ dpk->bp[path][kidx].bw == 0 ? "20M" :
+ dpk->bp[path][kidx].bw == 1 ? "40M" : "80M");
+}
+
+static void _dpk_tssi_pause(struct rtw89_dev *rtwdev,
+ enum rtw89_rf_path path, bool is_pause)
+{
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK + (path << 13),
+ B_P0_TSSI_TRK_EN, is_pause);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d TSSI %s\n", path,
+ is_pause ? "pause" : "resume");
+}
+
+static void _dpk_kip_restore(struct rtw89_dev *rtwdev,
+ enum rtw89_rf_path path)
+{
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000000);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x80000000);
+
+ if (rtwdev->hal.cv > CHIP_CAV)
+ rtw89_phy_write32_mask(rtwdev, R_DPD_COM + (path << 8),
+ B_DPD_COM_OF, 0x1);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d restore KIP\n", path);
+}
+
+static void _dpk_lbk_rxiqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path, u8 cur_rxbb, u32 rf_18)
+{
+ rtw89_phy_write32_mask(rtwdev, R_MDPK_RX_DCK, B_MDPK_RX_DCK_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8), B_IQK_RES_RXCFIR, 0x0);
+
+ rtw89_write_rf(rtwdev, path, RR_RSV4, RFREG_MASK, rf_18);
+ rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASKMODE, 0xd);
+ rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_PLLEN, 0x1);
+
+ if (cur_rxbb >= 0x11)
+ rtw89_write_rf(rtwdev, path, RR_TXIQK, RR_TXIQK_ATT1, 0x13);
+ else if (cur_rxbb <= 0xa)
+ rtw89_write_rf(rtwdev, path, RR_TXIQK, RR_TXIQK_ATT1, 0x00);
+ else
+ rtw89_write_rf(rtwdev, path, RR_TXIQK, RR_TXIQK_ATT1, 0x05);
+
+ rtw89_write_rf(rtwdev, path, RR_XGLNA2, RR_XGLNA2_SW, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_RXKPLL, RFREG_MASK, 0x80014);
+
+ udelay(100);
+
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x025);
+
+ _dpk_one_shot(rtwdev, phy, path, LBK_RXIQK);
+
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
+
+ rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_PLLEN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_MDPK_RX_DCK, B_MDPK_RX_DCK_EN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_KPATH_CFG, B_KPATH_CFG_ED, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_DI, 0x1);
+ rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASKMODE, 0x5);
+}
+
+static void _dpk_rf_setting(struct rtw89_dev *rtwdev, u8 gain,
+ enum rtw89_rf_path path, u8 kidx)
+{
+ struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+
+ if (dpk->bp[path][kidx].band == RTW89_BAND_2G) {
+ rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK, 0x50220);
+ rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_FATT, 0xf2);
+ rtw89_write_rf(rtwdev, path, RR_LUTDBG, RR_LUTDBG_TIA, 0x1);
+ rtw89_write_rf(rtwdev, path, RR_TIA, RR_TIA_N6, 0x1);
+ } else {
+ rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK, 0x50220);
+ rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RAA2_SWATT, 0x5);
+ rtw89_write_rf(rtwdev, path, RR_LUTDBG, RR_LUTDBG_TIA, 0x1);
+ rtw89_write_rf(rtwdev, path, RR_TIA, RR_TIA_N6, 0x1);
+ rtw89_write_rf(rtwdev, path, RR_RXA_LNA, RFREG_MASK, 0x920FC);
+ rtw89_write_rf(rtwdev, path, RR_XALNA2, RFREG_MASK, 0x002C0);
+ rtw89_write_rf(rtwdev, path, RR_IQGEN, RFREG_MASK, 0x38800);
+ }
+
+ rtw89_write_rf(rtwdev, path, RR_RCKD, RR_RCKD_BW, 0x1);
+ rtw89_write_rf(rtwdev, path, RR_BTC, RR_BTC_TXBB, dpk->bp[path][kidx].bw + 1);
+ rtw89_write_rf(rtwdev, path, RR_BTC, RR_BTC_RXBB, 0x0);
+}
+
+static void _dpk_bypass_rxcfir(struct rtw89_dev *rtwdev,
+ enum rtw89_rf_path path, bool is_bypass)
+{
+ if (is_bypass) {
+ rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8),
+ B_RXIQC_BYPASS2, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8),
+ B_RXIQC_BYPASS, 0x1);
+ } else {
+ rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8),
+ B_RXIQC_BYPASS2, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8),
+ B_RXIQC_BYPASS, 0x0);
+ }
+}
+
+static
+void _dpk_tpg_sel(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 kidx)
+{
+ struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+
+ if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_80)
+ rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x0);
+ else if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_40)
+ rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x2);
+ else
+ rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x1);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] TPG_Select for %s\n",
+ dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_80 ? "80M" :
+ dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_40 ? "40M" : "20M");
+}
+
+static void _dpk_table_select(struct rtw89_dev *rtwdev,
+ enum rtw89_rf_path path, u8 kidx, u8 gain)
+{
+ u8 val;
+
+ val = 0x80 + kidx * 0x20 + gain * 0x10;
+ rtw89_phy_write32_mask(rtwdev, R_DPD_CH0 + (path << 8), MASKBYTE3, val);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] table select for Kidx[%d], Gain[%d] (0x%x)\n", kidx,
+ gain, val);
+}
+
+static bool _dpk_sync_check(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 kidx)
+{
+#define DPK_SYNC_TH_DC_I 200
+#define DPK_SYNC_TH_DC_Q 200
+#define DPK_SYNC_TH_CORR 170
+ struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+ u8 corr_val, corr_idx;
+ u16 dc_i, dc_q;
+ u32 corr, dc;
+
+ rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x0);
+
+ corr = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD);
+ corr_idx = u32_get_bits(corr, B_PRT_COM_CORI);
+ corr_val = u32_get_bits(corr, B_PRT_COM_CORV);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] S%d Corr_idx / Corr_val = %d / %d\n",
+ path, corr_idx, corr_val);
+
+ dpk->corr_idx[path][kidx] = corr_idx;
+ dpk->corr_val[path][kidx] = corr_val;
+
+ rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x9);
+
+ dc = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD);
+ dc_i = u32_get_bits(dc, B_PRT_COM_DCI);
+ dc_q = u32_get_bits(dc, B_PRT_COM_DCQ);
+
+ dc_i = abs(sign_extend32(dc_i, 11));
+ dc_q = abs(sign_extend32(dc_q, 11));
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d DC I/Q, = %d / %d\n",
+ path, dc_i, dc_q);
+
+ dpk->dc_i[path][kidx] = dc_i;
+ dpk->dc_q[path][kidx] = dc_q;
+
+ if (dc_i > DPK_SYNC_TH_DC_I || dc_q > DPK_SYNC_TH_DC_Q ||
+ corr_val < DPK_SYNC_TH_CORR)
+ return true;
+ else
+ return false;
+}
+
+static void _dpk_sync(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path, u8 kidx)
+{
+ _dpk_one_shot(rtwdev, phy, path, SYNC);
+}
+
+static u16 _dpk_dgain_read(struct rtw89_dev *rtwdev)
+{
+ u16 dgain;
+
+ rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x0);
+
+ dgain = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCI);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] DGain = 0x%x\n", dgain);
+
+ return dgain;
+}
+
+static s8 _dpk_dgain_mapping(struct rtw89_dev *rtwdev, u16 dgain)
+{
+ static const u16 bnd[15] = {
+ 0xbf1, 0xaa5, 0x97d, 0x875, 0x789, 0x6b7, 0x5fc, 0x556,
+ 0x4c1, 0x43d, 0x3c7, 0x35e, 0x2ac, 0x262, 0x220
+ };
+ s8 offset;
+
+ if (dgain >= bnd[0])
+ offset = 0x6;
+ else if (bnd[0] > dgain && dgain >= bnd[1])
+ offset = 0x6;
+ else if (bnd[1] > dgain && dgain >= bnd[2])
+ offset = 0x5;
+ else if (bnd[2] > dgain && dgain >= bnd[3])
+ offset = 0x4;
+ else if (bnd[3] > dgain && dgain >= bnd[4])
+ offset = 0x3;
+ else if (bnd[4] > dgain && dgain >= bnd[5])
+ offset = 0x2;
+ else if (bnd[5] > dgain && dgain >= bnd[6])
+ offset = 0x1;
+ else if (bnd[6] > dgain && dgain >= bnd[7])
+ offset = 0x0;
+ else if (bnd[7] > dgain && dgain >= bnd[8])
+ offset = 0xff;
+ else if (bnd[8] > dgain && dgain >= bnd[9])
+ offset = 0xfe;
+ else if (bnd[9] > dgain && dgain >= bnd[10])
+ offset = 0xfd;
+ else if (bnd[10] > dgain && dgain >= bnd[11])
+ offset = 0xfc;
+ else if (bnd[11] > dgain && dgain >= bnd[12])
+ offset = 0xfb;
+ else if (bnd[12] > dgain && dgain >= bnd[13])
+ offset = 0xfa;
+ else if (bnd[13] > dgain && dgain >= bnd[14])
+ offset = 0xf9;
+ else if (bnd[14] > dgain)
+ offset = 0xf8;
+ else
+ offset = 0x0;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] DGain offset = %d\n", offset);
+
+ return offset;
+}
+
+static u8 _dpk_gainloss_read(struct rtw89_dev *rtwdev)
+{
+ rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x6);
+ rtw89_phy_write32_mask(rtwdev, R_DPK_CFG2, B_DPK_CFG2_ST, 0x1);
+
+ return rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_GL);
+}
+
+static void _dpk_gainloss(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path, u8 kidx)
+{
+ _dpk_one_shot(rtwdev, phy, path, GAIN_LOSS);
+
+ rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x6);
+ rtw89_phy_write32_mask(rtwdev, R_DPK_CFG2, B_DPK_CFG2_ST, 0x1);
+}
+
+static void _dpk_kip_preset(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path, u8 kidx)
+{
+ _dpk_tpg_sel(rtwdev, path, kidx);
+ _dpk_one_shot(rtwdev, phy, path, KIP_PRESET);
+}
+
+static void _dpk_kip_pwr_clk_on(struct rtw89_dev *rtwdev,
+ enum rtw89_rf_path path)
+{
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000080);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x807f030a);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_SYS + (path << 8), MASKDWORD, 0xce000a08);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] KIP Power/CLK on\n");
+}
+
+static
+u8 _dpk_txagc_check_8852bt(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 txagc)
+{
+ struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+
+ if (txagc >= dpk->max_dpk_txagc[path])
+ txagc = dpk->max_dpk_txagc[path];
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Set TxAGC = 0x%x\n", txagc);
+
+ return txagc;
+}
+
+static void _dpk_kip_set_txagc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path, u8 txagc)
+{
+ u8 val;
+
+ val = _dpk_txagc_check_8852bt(rtwdev, path, txagc);
+ rtw89_write_rf(rtwdev, path, RR_TXAGC, RFREG_MASK, val);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
+ _dpk_one_shot(rtwdev, phy, path, DPK_TXAGC);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] set TXAGC = 0x%x\n", txagc);
+}
+
+static void _dpk_kip_set_rxagc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ rtw89_phy_write32_mask(rtwdev, R_KIP_MOD, B_KIP_MOD, 0x50220);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
+ _dpk_one_shot(rtwdev, phy, path, DPK_RXAGC);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
+}
+
+static u8 _dpk_set_offset(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path, u8 txagc, s8 gain_offset)
+{
+ txagc = rtw89_read_rf(rtwdev, path, RR_TXAGC, RFREG_MASK);
+
+ if ((txagc - gain_offset) < DPK_TXAGC_LOWER)
+ txagc = DPK_TXAGC_LOWER;
+ else if ((txagc - gain_offset) > DPK_TXAGC_UPPER)
+ txagc = DPK_TXAGC_UPPER;
+ else
+ txagc = txagc - gain_offset;
+
+ _dpk_kip_set_txagc(rtwdev, phy, path, txagc);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] tmp_txagc (GL=%d) = 0x%x\n",
+ gain_offset, txagc);
+ return txagc;
+}
+
+static bool _dpk_pas_read(struct rtw89_dev *rtwdev, enum rtw89_rf_path path,
+ u8 is_check)
+{
+ u32 val1_i = 0, val1_q = 0, val2_i = 0, val2_q = 0;
+ u8 i;
+
+ rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKBYTE2, 0x06);
+ rtw89_phy_write32_mask(rtwdev, R_DPK_CFG2, B_DPK_CFG2_ST, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE2, 0x08);
+
+ if (is_check) {
+ rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, 0x00);
+ val1_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKHWORD);
+ val1_i = abs(sign_extend32(val1_i, 11));
+ val1_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKLWORD);
+ val1_q = abs(sign_extend32(val1_q, 11));
+
+ rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, 0x1f);
+ val2_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKHWORD);
+ val2_i = abs(sign_extend32(val2_i, 11));
+ val2_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKLWORD);
+ val2_q = abs(sign_extend32(val2_q, 11));
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] PAS_delta = 0x%x\n",
+ phy_div(val1_i * val1_i + val1_q * val1_q,
+ val2_i * val2_i + val2_q * val2_q));
+ } else {
+ for (i = 0; i < 32; i++) {
+ rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, i);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] PAS_Read[%02d]= 0x%08x\n", i,
+ rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD));
+ }
+ }
+
+ if (val1_i * val1_i + val1_q * val1_q >=
+ (val2_i * val2_i + val2_q * val2_q) * 8 / 5)
+ return true;
+
+ return false;
+}
+
+static u8 _dpk_agc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path, u8 kidx, u8 init_txagc,
+ bool loss_only)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+ u8 goout = 0, agc_cnt = 0, limited_rxbb = 0, gl_cnt = 0;
+ u8 tmp_txagc, tmp_rxbb, tmp_gl_idx = 0;
+ u8 step = DPK_AGC_STEP_SYNC_DGAIN;
+ int limit = 200;
+ s8 offset = 0;
+ u16 dgain = 0;
+ u32 rf_18;
+
+ tmp_txagc = init_txagc;
+
+ tmp_rxbb = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASKRXBB);
+ rf_18 = rtw89_read_rf(rtwdev, path, RR_CFGCH, RFREG_MASK);
+
+ do {
+ switch (step) {
+ case DPK_AGC_STEP_SYNC_DGAIN:
+ _dpk_sync(rtwdev, phy, path, kidx);
+ if (agc_cnt == 0) {
+ if (chan->band_width < 2)
+ _dpk_bypass_rxcfir(rtwdev, path, true);
+ else
+ _dpk_lbk_rxiqk(rtwdev, phy, path,
+ tmp_rxbb, rf_18);
+ }
+
+ if (_dpk_sync_check(rtwdev, path, kidx) == true) {
+ tmp_txagc = 0xff;
+ goout = 1;
+ break;
+ }
+
+ dgain = _dpk_dgain_read(rtwdev);
+ offset = _dpk_dgain_mapping(rtwdev, dgain);
+
+ if (loss_only == 1 || limited_rxbb == 1 || offset == 0)
+ step = DPK_AGC_STEP_GAIN_LOSS_IDX;
+ else
+ step = DPK_AGC_STEP_GAIN_ADJ;
+ break;
+ case DPK_AGC_STEP_GAIN_ADJ:
+ tmp_rxbb = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASKRXBB);
+
+ if (tmp_rxbb + offset > 0x1f) {
+ tmp_rxbb = 0x1f;
+ limited_rxbb = 1;
+ } else if (tmp_rxbb + offset < 0) {
+ tmp_rxbb = 0;
+ limited_rxbb = 1;
+ } else {
+ tmp_rxbb = tmp_rxbb + offset;
+ }
+
+ rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASKRXBB, tmp_rxbb);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] Adjust RXBB (%d) = 0x%x\n", offset, tmp_rxbb);
+
+ if (chan->band_width == RTW89_CHANNEL_WIDTH_80)
+ _dpk_lbk_rxiqk(rtwdev, phy, path, tmp_rxbb, rf_18);
+ if (dgain > 1922 || dgain < 342)
+ step = DPK_AGC_STEP_SYNC_DGAIN;
+ else
+ step = DPK_AGC_STEP_GAIN_LOSS_IDX;
+
+ agc_cnt++;
+ break;
+ case DPK_AGC_STEP_GAIN_LOSS_IDX:
+ _dpk_gainloss(rtwdev, phy, path, kidx);
+
+ tmp_gl_idx = _dpk_gainloss_read(rtwdev);
+
+ if ((tmp_gl_idx == 0 && _dpk_pas_read(rtwdev, path, true)) ||
+ tmp_gl_idx >= 7)
+ step = DPK_AGC_STEP_GL_GT_CRITERION;
+ else if (tmp_gl_idx == 0)
+ step = DPK_AGC_STEP_GL_LT_CRITERION;
+ else
+ step = DPK_AGC_STEP_SET_TX_GAIN;
+
+ gl_cnt++;
+ break;
+ case DPK_AGC_STEP_GL_GT_CRITERION:
+ if (tmp_txagc == 0x2e ||
+ tmp_txagc == dpk->max_dpk_txagc[path]) {
+ goout = 1;
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] Txagc@lower bound!!\n");
+ } else {
+ tmp_txagc = _dpk_set_offset(rtwdev, phy, path,
+ tmp_txagc, 0x3);
+ }
+ step = DPK_AGC_STEP_GAIN_LOSS_IDX;
+ agc_cnt++;
+ break;
+
+ case DPK_AGC_STEP_GL_LT_CRITERION:
+ if (tmp_txagc == 0x3f || tmp_txagc == dpk->max_dpk_txagc[path]) {
+ goout = 1;
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] Txagc@upper bound!!\n");
+ } else {
+ tmp_txagc = _dpk_set_offset(rtwdev, phy, path,
+ tmp_txagc, 0xfe);
+ }
+ step = DPK_AGC_STEP_GAIN_LOSS_IDX;
+ agc_cnt++;
+ break;
+
+ case DPK_AGC_STEP_SET_TX_GAIN:
+ tmp_txagc = _dpk_set_offset(rtwdev, phy, path, tmp_txagc,
+ tmp_gl_idx);
+ goout = 1;
+ agc_cnt++;
+ break;
+
+ default:
+ goout = 1;
+ break;
+ }
+ } while (!goout && agc_cnt < 6 && limit-- > 0);
+
+ if (gl_cnt >= 6)
+ _dpk_pas_read(rtwdev, path, false);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] Txagc / RXBB for DPK = 0x%x / 0x%x\n", tmp_txagc, tmp_rxbb);
+
+ return tmp_txagc;
+}
+
+static void _dpk_set_mdpd_para(struct rtw89_dev *rtwdev,
+ enum rtw89_rf_path path, u8 order)
+{
+ struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+
+ switch (order) {
+ case 0: /* (5,3,1) */
+ rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP, order);
+ rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_PN, 0x3);
+ rtw89_phy_write32_mask(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_MAN, 0x1);
+ dpk->dpk_order[path] = 0x3;
+ break;
+ case 1: /* (5,3,0) */
+ rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP, order);
+ rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_PN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_MAN, 0x0);
+ dpk->dpk_order[path] = 0x1;
+ break;
+ case 2: /* (5,0,0) */
+ rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP, order);
+ rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_PN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_MAN, 0x0);
+ dpk->dpk_order[path] = 0x0;
+ break;
+ default:
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] Wrong MDPD order!!(0x%x)\n", order);
+ break;
+ }
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Set %s for IDL\n",
+ order == 0x0 ? "(5,3,1)" :
+ order == 0x1 ? "(5,3,0)" : "(5,0,0)");
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] Set MDPD order to 0x%x for IDL\n", order);
+}
+
+static void _dpk_idl_mpa(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path, u8 kidx, u8 gain)
+{
+ struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+
+ if (dpk->bp[path][kidx].bw < RTW89_CHANNEL_WIDTH_80 &&
+ dpk->bp[path][kidx].band == RTW89_BAND_5G)
+ _dpk_set_mdpd_para(rtwdev, path, 0x2);
+ else
+ _dpk_set_mdpd_para(rtwdev, path, 0x0);
+
+ _dpk_one_shot(rtwdev, phy, path, MDPK_IDL);
+}
+
+static void _dpk_fill_result(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path, u8 kidx, u8 gain, u8 txagc)
+{
+ struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+ u8 gs = dpk->dpk_gs[phy];
+ u16 pwsf = 0x78;
+
+ rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8), BIT(8), kidx);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] Fill txagc/ pwsf/ gs = 0x%x/ 0x%x/ 0x%x\n",
+ txagc, pwsf, gs);
+
+ dpk->bp[path][kidx].txagc_dpk = txagc;
+ rtw89_phy_write32_mask(rtwdev, R_TXAGC_RFK + (path << 8),
+ 0x3F << ((gain << 3) + (kidx << 4)), txagc);
+
+ dpk->bp[path][kidx].pwsf = pwsf;
+ rtw89_phy_write32_mask(rtwdev, R_DPD_BND + (path << 8) + (kidx << 2),
+ 0x1FF << (gain << 4), pwsf);
+
+ rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_MDPD, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_MDPD, 0x0);
+
+ dpk->bp[path][kidx].gs = gs;
+ if (dpk->dpk_gs[phy] == 0x7f)
+ rtw89_phy_write32_mask(rtwdev,
+ R_DPD_CH0A + (path << 8) + (kidx << 2),
+ MASKDWORD, 0x007f7f7f);
+ else
+ rtw89_phy_write32_mask(rtwdev,
+ R_DPD_CH0A + (path << 8) + (kidx << 2),
+ MASKDWORD, 0x005b5b5b);
+
+ rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2),
+ B_DPD_ORDER_V1, dpk->dpk_order[path]);
+
+ rtw89_phy_write32_mask(rtwdev, R_DPD_V1 + (path << 8), MASKDWORD, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_SEL, 0x0);
+}
+
+static bool _dpk_reload_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+ u8 idx, cur_band, cur_ch;
+ bool is_reload = false;
+
+ cur_band = chan->band_type;
+ cur_ch = chan->channel;
+
+ for (idx = 0; idx < RTW89_DPK_BKUP_NUM; idx++) {
+ if (cur_band != dpk->bp[path][idx].band ||
+ cur_ch != dpk->bp[path][idx].ch)
+ continue;
+
+ rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8),
+ B_COEF_SEL_MDPD, idx);
+ dpk->cur_idx[path] = idx;
+ is_reload = true;
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] reload S%d[%d] success\n", path, idx);
+ }
+
+ return is_reload;
+}
+
+static
+void _rf_direct_cntrl(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, bool is_bybb)
+{
+ if (is_bybb)
+ rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x1);
+ else
+ rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
+}
+
+static
+void _drf_direct_cntrl(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, bool is_bybb)
+{
+ if (is_bybb)
+ rtw89_write_rf(rtwdev, path, RR_BBDC, RR_BBDC_SEL, 0x1);
+ else
+ rtw89_write_rf(rtwdev, path, RR_BBDC, RR_BBDC_SEL, 0x0);
+}
+
+static bool _dpk_main(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path, u8 gain)
+{
+ struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+ u8 txagc = 0x38, kidx = dpk->cur_idx[path];
+ bool is_fail = false;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] ========= S%d[%d] DPK Start =========\n", path, kidx);
+
+ _rf_direct_cntrl(rtwdev, path, false);
+ _drf_direct_cntrl(rtwdev, path, false);
+
+ _dpk_kip_pwr_clk_on(rtwdev, path);
+ _dpk_kip_set_txagc(rtwdev, phy, path, txagc);
+ _dpk_rf_setting(rtwdev, gain, path, kidx);
+ _dpk_rx_dck(rtwdev, phy, path);
+ _dpk_kip_preset(rtwdev, phy, path, kidx);
+ _dpk_kip_set_rxagc(rtwdev, phy, path);
+ _dpk_table_select(rtwdev, path, kidx, gain);
+
+ txagc = _dpk_agc(rtwdev, phy, path, kidx, txagc, false);
+
+ _rfk_get_thermal(rtwdev, kidx, path);
+
+ if (txagc == 0xff) {
+ is_fail = true;
+ goto _error;
+ }
+
+ _dpk_idl_mpa(rtwdev, phy, path, kidx, gain);
+
+ rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASKMODE, RF_RX);
+ _dpk_fill_result(rtwdev, phy, path, kidx, gain, txagc);
+
+_error:
+ if (!is_fail)
+ dpk->bp[path][kidx].path_ok = 1;
+ else
+ dpk->bp[path][kidx].path_ok = 0;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d] DPK %s\n", path, kidx,
+ is_fail ? "Check" : "Success");
+
+ _dpk_onoff(rtwdev, path, is_fail);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d] DPK %s\n", path, kidx,
+ is_fail ? "Check" : "Success");
+
+ return is_fail;
+}
+
+static void _dpk_cal_select(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy, u8 kpath)
+{
+ struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+ u32 backup_kip_val[BACKUP_KIP_REGS_NR];
+ u32 backup_bb_val[BACKUP_BB_REGS_NR];
+ u32 backup_rf_val[RTW8852BT_SS][BACKUP_RF_REGS_NR];
+ bool reloaded[2] = {false};
+ u8 path;
+
+ for (path = 0; path < DPK_RF_PATH_MAX_8852BT; path++) {
+ reloaded[path] = _dpk_reload_check(rtwdev, phy, path);
+ if (!reloaded[path] && dpk->bp[path][0].ch != 0)
+ dpk->cur_idx[path] = !dpk->cur_idx[path];
+ else
+ _dpk_onoff(rtwdev, path, false);
+ }
+
+ _rfk_backup_bb_reg(rtwdev, backup_bb_val);
+ _rfk_backup_kip_reg(rtwdev, backup_kip_val);
+
+ for (path = 0; path < DPK_RF_PATH_MAX_8852BT; path++) {
+ _rfk_backup_rf_reg(rtwdev, backup_rf_val[path], path);
+ _dpk_information(rtwdev, phy, path);
+ if (rtwdev->is_tssi_mode[path])
+ _dpk_tssi_pause(rtwdev, path, true);
+ }
+
+ _rfk_bb_afe_setting(rtwdev, phy, path, kpath);
+
+ for (path = 0; path < DPK_RF_PATH_MAX_8852BT; path++)
+ _dpk_main(rtwdev, phy, path, 1);
+
+ _rfk_bb_afe_restore(rtwdev, phy, path, kpath);
+
+ _dpk_kip_restore(rtwdev, path);
+ _rfk_reload_bb_reg(rtwdev, backup_bb_val);
+ _rfk_reload_kip_reg(rtwdev, backup_kip_val);
+
+ for (path = 0; path < DPK_RF_PATH_MAX_8852BT; path++) {
+ _rfk_reload_rf_reg(rtwdev, backup_rf_val[path], path);
+ if (rtwdev->is_tssi_mode[path])
+ _dpk_tssi_pause(rtwdev, path, false);
+ }
+}
+
+static bool _dpk_bypass_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ struct rtw89_fem_info *fem = &rtwdev->fem;
+
+ if (fem->epa_2g && chan->band_type == RTW89_BAND_2G) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] Skip DPK due to 2G_ext_PA exist!!\n");
+ return true;
+ } else if (fem->epa_5g && chan->band_type == RTW89_BAND_5G) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] Skip DPK due to 5G_ext_PA exist!!\n");
+ return true;
+ } else if (fem->epa_6g && chan->band_type == RTW89_BAND_6G) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] Skip DPK due to 6G_ext_PA exist!!\n");
+ return true;
+ }
+
+ return false;
+}
+
+static void _dpk_force_bypass(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+{
+ u8 path, kpath;
+
+ kpath = _kpath(rtwdev, phy);
+
+ for (path = 0; path < RTW8852BT_SS; path++) {
+ if (kpath & BIT(path))
+ _dpk_onoff(rtwdev, path, true);
+ }
+}
+
+static void _dpk_track(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+ s8 txagc_bb, txagc_bb_tp, ini_diff = 0, txagc_ofst;
+ s8 delta_ther[2] = {};
+ u8 trk_idx, txagc_rf;
+ u8 path, kidx;
+ u16 pwsf[2];
+ u8 cur_ther;
+ u32 tmp;
+
+ for (path = 0; path < RF_PATH_NUM_8852BT; path++) {
+ kidx = dpk->cur_idx[path];
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
+ "[DPK_TRK] ================[S%d[%d] (CH %d)]================\n",
+ path, kidx, dpk->bp[path][kidx].ch);
+
+ cur_ther = ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
+ "[DPK_TRK] thermal now = %d\n", cur_ther);
+
+ if (dpk->bp[path][kidx].ch && cur_ther)
+ delta_ther[path] = dpk->bp[path][kidx].ther_dpk - cur_ther;
+
+ if (dpk->bp[path][kidx].band == RTW89_BAND_2G)
+ delta_ther[path] = delta_ther[path] * 3 / 2;
+ else
+ delta_ther[path] = delta_ther[path] * 5 / 2;
+
+ txagc_rf = rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB + (path << 13),
+ B_TXAGC_RF);
+
+ if (rtwdev->is_tssi_mode[path]) {
+ trk_idx = rtw89_read_rf(rtwdev, path, RR_TXA, RR_TXA_TRK);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
+ "[DPK_TRK] txagc_RF / track_idx = 0x%x / %d\n",
+ txagc_rf, trk_idx);
+
+ txagc_bb =
+ rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB + (path << 13),
+ MASKBYTE2);
+ txagc_bb_tp =
+ rtw89_phy_read32_mask(rtwdev, R_TXAGC_TP + (path << 13),
+ B_TXAGC_TP);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
+ "[DPK_TRK] txagc_bb_tp / txagc_bb = 0x%x / 0x%x\n",
+ txagc_bb_tp, txagc_bb);
+
+ txagc_ofst =
+ rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB + (path << 13),
+ MASKBYTE3);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
+ "[DPK_TRK] txagc_offset / delta_ther = %d / %d\n",
+ txagc_ofst, delta_ther[path]);
+ tmp = rtw89_phy_read32_mask(rtwdev, R_DPD_COM + (path << 8),
+ B_DPD_COM_OF);
+ if (tmp == 0x1) {
+ txagc_ofst = 0;
+ rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
+ "[DPK_TRK] HW txagc offset mode\n");
+ }
+
+ if (txagc_rf && cur_ther)
+ ini_diff = txagc_ofst + (delta_ther[path]);
+
+ tmp = rtw89_phy_read32_mask(rtwdev,
+ R_P0_TXDPD + (path << 13),
+ B_P0_TXDPD);
+ if (tmp == 0x0) {
+ pwsf[0] = dpk->bp[path][kidx].pwsf +
+ txagc_bb_tp - txagc_bb + ini_diff;
+ pwsf[1] = dpk->bp[path][kidx].pwsf +
+ txagc_bb_tp - txagc_bb + ini_diff;
+ } else {
+ pwsf[0] = dpk->bp[path][kidx].pwsf + ini_diff;
+ pwsf[1] = dpk->bp[path][kidx].pwsf + ini_diff;
+ }
+ } else {
+ pwsf[0] = (dpk->bp[path][kidx].pwsf + delta_ther[path]) & 0x1ff;
+ pwsf[1] = (dpk->bp[path][kidx].pwsf + delta_ther[path]) & 0x1ff;
+ }
+
+ tmp = rtw89_phy_read32_mask(rtwdev, R_DPK_TRK, B_DPK_TRK_DIS);
+ if (!tmp && txagc_rf) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
+ "[DPK_TRK] New pwsf[0] / pwsf[1] = 0x%x / 0x%x\n",
+ pwsf[0], pwsf[1]);
+
+ rtw89_phy_write32_mask(rtwdev,
+ R_DPD_BND + (path << 8) + (kidx << 2),
+ B_DPD_BND_0, pwsf[0]);
+ rtw89_phy_write32_mask(rtwdev,
+ R_DPD_BND + (path << 8) + (kidx << 2),
+ B_DPD_BND_1, pwsf[1]);
+ }
+ }
+}
+
+static void _set_dpd_backoff(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+{
+ struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+ u8 tx_scale, ofdm_bkof, path, kpath;
+
+ kpath = _kpath(rtwdev, phy);
+
+ ofdm_bkof = rtw89_phy_read32_mask(rtwdev, R_DPD_BF + (phy << 13), B_DPD_BF_OFDM);
+ tx_scale = rtw89_phy_read32_mask(rtwdev, R_DPD_BF + (phy << 13), B_DPD_BF_SCA);
+
+ if (ofdm_bkof + tx_scale >= 44) {
+ /* move dpd backoff to bb, and set dpd backoff to 0 */
+ dpk->dpk_gs[phy] = 0x7f;
+ for (path = 0; path < RF_PATH_NUM_8852BT; path++) {
+ if (!(kpath & BIT(path)))
+ continue;
+
+ rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8),
+ B_DPD_CFG, 0x7f7f7f);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[RFK] Set S%d DPD backoff to 0dB\n", path);
+ }
+ } else {
+ dpk->dpk_gs[phy] = 0x5b;
+ }
+}
+
+static void _tssi_dpk_off(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+{
+ rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A, BIT(24), 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_DPD_CH0B, BIT(24), 0x0);
+}
+
+static void _tssi_rf_setting(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ enum rtw89_band band = chan->band_type;
+
+ if (band == RTW89_BAND_2G)
+ rtw89_write_rf(rtwdev, path, RR_TXPOW, RR_TXPOW_TXG, 0x1);
+ else
+ rtw89_write_rf(rtwdev, path, RR_TXPOW, RR_TXPOW_TXA, 0x1);
+}
+
+static void _tssi_set_sys(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ enum rtw89_band band = chan->band_type;
+
+ rtw89_rfk_parser(rtwdev, &rtw8852bt_tssi_sys_defs_tbl);
+
+ if (chan->band_width == RTW89_CHANNEL_WIDTH_80)
+ rtw89_phy_write32_mask(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_BW80, 0x1);
+ else
+ rtw89_phy_write32_mask(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_BW80, 0x0);
+
+ if (path == RF_PATH_A)
+ rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
+ &rtw8852bt_tssi_sys_a_defs_2g_tbl,
+ &rtw8852bt_tssi_sys_a_defs_5g_tbl);
+ else
+ rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
+ &rtw8852bt_tssi_sys_b_defs_2g_tbl,
+ &rtw8852bt_tssi_sys_b_defs_5g_tbl);
+}
+
+static void _tssi_ini_txpwr_ctrl_bb(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
+ &rtw8852bt_tssi_init_txpwr_defs_a_tbl,
+ &rtw8852bt_tssi_init_txpwr_defs_b_tbl);
+}
+
+static void _tssi_ini_txpwr_ctrl_bb_he_tb(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
+ &rtw8852bt_tssi_init_txpwr_he_tb_defs_a_tbl,
+ &rtw8852bt_tssi_init_txpwr_he_tb_defs_b_tbl);
+}
+
+static void _tssi_set_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
+ &rtw8852bt_tssi_dck_defs_a_tbl,
+ &rtw8852bt_tssi_dck_defs_b_tbl);
+}
+
+static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+#define RTW8852BT_TSSI_GET_VAL(ptr, idx) \
+({ \
+ s8 *__ptr = (ptr); \
+ u8 __idx = (idx), __i, __v; \
+ u32 __val = 0; \
+ for (__i = 0; __i < 4; __i++) { \
+ __v = (__ptr[__idx + __i]); \
+ __val |= (__v << (8 * __i)); \
+ } \
+ __val; \
+})
+ struct rtw89_fw_txpwr_track_cfg *trk = rtwdev->fw.elm_info.txpwr_trk;
+ struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ u8 ch = chan->channel;
+ u8 subband = chan->subband_type;
+ const s8 *thm_up_a = NULL;
+ const s8 *thm_down_a = NULL;
+ const s8 *thm_up_b = NULL;
+ const s8 *thm_down_b = NULL;
+ u8 thermal = 0xff;
+ s8 thm_ofst[64] = {0};
+ u32 tmp = 0;
+ u8 i, j;
+
+ switch (subband) {
+ default:
+ case RTW89_CH_2G:
+ thm_up_a = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GA_P][0];
+ thm_down_a = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GA_N][0];
+ thm_up_b = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GB_P][0];
+ thm_down_b = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GB_N][0];
+ break;
+ case RTW89_CH_5G_BAND_1:
+ thm_up_a = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_P][0];
+ thm_down_a = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_N][0];
+ thm_up_b = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_P][0];
+ thm_down_b = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_N][0];
+ break;
+ case RTW89_CH_5G_BAND_3:
+ thm_up_a = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_P][1];
+ thm_down_a = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_N][1];
+ thm_up_b = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_P][1];
+ thm_down_b = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_N][1];
+ break;
+ case RTW89_CH_5G_BAND_4:
+ thm_up_a = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_P][2];
+ thm_down_a = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_N][2];
+ thm_up_b = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_P][2];
+ thm_down_b = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_N][2];
+ break;
+ }
+
+ if (path == RF_PATH_A) {
+ thermal = tssi_info->thermal[RF_PATH_A];
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI] ch=%d thermal_pathA=0x%x\n", ch, thermal);
+
+ rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER_DIS, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER_TRK, 0x1);
+
+ if (thermal == 0xff) {
+ rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER, 32);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_VAL, 32);
+
+ for (i = 0; i < 64; i += 4) {
+ rtw89_phy_write32(rtwdev, R_P0_TSSI_BASE + i, 0x0);
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI] write 0x%x val=0x%08x\n",
+ R_P0_TSSI_BASE + i, 0x0);
+ }
+
+ } else {
+ rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER,
+ thermal);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_VAL,
+ thermal);
+
+ i = 0;
+ for (j = 0; j < 32; j++)
+ thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
+ -thm_down_a[i++] :
+ -thm_down_a[DELTA_SWINGIDX_SIZE - 1];
+
+ i = 1;
+ for (j = 63; j >= 32; j--)
+ thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
+ thm_up_a[i++] :
+ thm_up_a[DELTA_SWINGIDX_SIZE - 1];
+
+ for (i = 0; i < 64; i += 4) {
+ tmp = RTW8852BT_TSSI_GET_VAL(thm_ofst, i);
+ rtw89_phy_write32(rtwdev, R_P0_TSSI_BASE + i, tmp);
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI] write 0x%x val=0x%08x\n",
+ 0x5c00 + i, tmp);
+ }
+ }
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, R_P0_RFCTM_RDY, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, R_P0_RFCTM_RDY, 0x0);
+
+ } else {
+ thermal = tssi_info->thermal[RF_PATH_B];
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI] ch=%d thermal_pathB=0x%x\n", ch, thermal);
+
+ rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER_DIS, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER_TRK, 0x1);
+
+ if (thermal == 0xff) {
+ rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER, 32);
+ rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_RFCTM_VAL, 32);
+
+ for (i = 0; i < 64; i += 4) {
+ rtw89_phy_write32(rtwdev, R_TSSI_THOF + i, 0x0);
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI] write 0x%x val=0x%08x\n",
+ 0x7c00 + i, 0x0);
+ }
+
+ } else {
+ rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER,
+ thermal);
+ rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_RFCTM_VAL,
+ thermal);
+
+ i = 0;
+ for (j = 0; j < 32; j++)
+ thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
+ -thm_down_b[i++] :
+ -thm_down_b[DELTA_SWINGIDX_SIZE - 1];
+
+ i = 1;
+ for (j = 63; j >= 32; j--)
+ thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
+ thm_up_b[i++] :
+ thm_up_b[DELTA_SWINGIDX_SIZE - 1];
+
+ for (i = 0; i < 64; i += 4) {
+ tmp = RTW8852BT_TSSI_GET_VAL(thm_ofst, i);
+ rtw89_phy_write32(rtwdev, R_TSSI_THOF + i, tmp);
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI] write 0x%x val=0x%08x\n",
+ 0x7c00 + i, tmp);
+ }
+ }
+ rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, R_P1_RFCTM_RDY, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, R_P1_RFCTM_RDY, 0x0);
+ }
+#undef RTW8852BT_TSSI_GET_VAL
+}
+
+static void _tssi_set_dac_gain_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
+ &rtw8852bt_tssi_dac_gain_defs_a_tbl,
+ &rtw8852bt_tssi_dac_gain_defs_b_tbl);
+}
+
+static void _tssi_slope_cal_org(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ enum rtw89_band band = chan->band_type;
+
+ if (path == RF_PATH_A)
+ rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
+ &rtw8852bt_tssi_slope_a_defs_2g_tbl,
+ &rtw8852bt_tssi_slope_a_defs_5g_tbl);
+ else
+ rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
+ &rtw8852bt_tssi_slope_b_defs_2g_tbl,
+ &rtw8852bt_tssi_slope_b_defs_5g_tbl);
+}
+
+static void _tssi_alignment_default(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path, bool all)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ enum rtw89_band band = chan->band_type;
+ const struct rtw89_rfk_tbl *tbl = NULL;
+ u8 ch = chan->channel;
+
+ if (path == RF_PATH_A) {
+ if (band == RTW89_BAND_2G)
+ tbl = &rtw8852bt_tssi_align_a_2g_all_defs_tbl;
+ else if (ch >= 36 && ch <= 64)
+ tbl = &rtw8852bt_tssi_align_a_5g1_all_defs_tbl;
+ else if (ch >= 100 && ch <= 144)
+ tbl = &rtw8852bt_tssi_align_a_5g2_all_defs_tbl;
+ else if (ch >= 149 && ch <= 177)
+ tbl = &rtw8852bt_tssi_align_a_5g3_all_defs_tbl;
+ } else {
+ if (ch >= 1 && ch <= 14)
+ tbl = &rtw8852bt_tssi_align_b_2g_all_defs_tbl;
+ else if (ch >= 36 && ch <= 64)
+ tbl = &rtw8852bt_tssi_align_b_5g1_all_defs_tbl;
+ else if (ch >= 100 && ch <= 144)
+ tbl = &rtw8852bt_tssi_align_b_5g2_all_defs_tbl;
+ else if (ch >= 149 && ch <= 177)
+ tbl = &rtw8852bt_tssi_align_b_5g3_all_defs_tbl;
+ }
+
+ if (tbl)
+ rtw89_rfk_parser(rtwdev, tbl);
+}
+
+static void _tssi_set_tssi_slope(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
+ &rtw8852bt_tssi_slope_defs_a_tbl,
+ &rtw8852bt_tssi_slope_defs_b_tbl);
+}
+
+static void _tssi_set_tssi_track(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ if (path == RF_PATH_A)
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSIC, B_P0_TSSIC_BYPASS, 0x0);
+ else
+ rtw89_phy_write32_mask(rtwdev, R_P1_TSSIC, B_P1_TSSIC_BYPASS, 0x0);
+}
+
+static void _tssi_set_txagc_offset_mv_avg(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI, "======>%s path=%d\n", __func__,
+ path);
+
+ if (path == RF_PATH_A)
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG,
+ B_P0_TSSI_MV_MIX, 0x010);
+ else
+ rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG,
+ B_P1_RFCTM_DEL, 0x010);
+}
+
+static void _tssi_enable(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+{
+ u8 i;
+
+ for (i = 0; i < RF_PATH_NUM_8852BT; i++) {
+ _tssi_set_tssi_track(rtwdev, phy, i);
+ _tssi_set_txagc_offset_mv_avg(rtwdev, phy, i);
+
+ if (i == RF_PATH_A) {
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG,
+ B_P0_TSSI_MV_CLR, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_AVG,
+ B_P0_TSSI_EN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_AVG,
+ B_P0_TSSI_EN, 0x1);
+ rtw89_write_rf(rtwdev, i, RR_TXGA_V1,
+ RR_TXGA_V1_TRK_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK,
+ B_P0_TSSI_RFC, 0x3);
+
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK,
+ B_P0_TSSI_OFT, 0xc0);
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK,
+ B_P0_TSSI_OFT_EN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK,
+ B_P0_TSSI_OFT_EN, 0x1);
+
+ rtwdev->is_tssi_mode[RF_PATH_A] = true;
+ } else {
+ rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG,
+ B_P1_TSSI_MV_CLR, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_AVG,
+ B_P1_TSSI_EN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_AVG,
+ B_P1_TSSI_EN, 0x1);
+ rtw89_write_rf(rtwdev, i, RR_TXGA_V1,
+ RR_TXGA_V1_TRK_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK,
+ B_P1_TSSI_RFC, 0x3);
+
+ rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK,
+ B_P1_TSSI_OFT, 0xc0);
+ rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK,
+ B_P1_TSSI_OFT_EN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK,
+ B_P1_TSSI_OFT_EN, 0x1);
+
+ rtwdev->is_tssi_mode[RF_PATH_B] = true;
+ }
+ }
+}
+
+static void _tssi_disable(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+{
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_AVG, B_P0_TSSI_EN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_RFC, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG, B_P0_TSSI_MV_CLR, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_AVG, B_P1_TSSI_EN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_RFC, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG, B_P1_TSSI_MV_CLR, 0x1);
+
+ rtwdev->is_tssi_mode[RF_PATH_A] = false;
+ rtwdev->is_tssi_mode[RF_PATH_B] = false;
+}
+
+static u32 _tssi_get_cck_group(struct rtw89_dev *rtwdev, u8 ch)
+{
+ switch (ch) {
+ case 1 ... 2:
+ return 0;
+ case 3 ... 5:
+ return 1;
+ case 6 ... 8:
+ return 2;
+ case 9 ... 11:
+ return 3;
+ case 12 ... 13:
+ return 4;
+ case 14:
+ return 5;
+ }
+
+ return 0;
+}
+
+#define TSSI_EXTRA_GROUP_BIT (BIT(31))
+#define TSSI_EXTRA_GROUP(idx) (TSSI_EXTRA_GROUP_BIT | (idx))
+#define IS_TSSI_EXTRA_GROUP(group) ((group) & TSSI_EXTRA_GROUP_BIT)
+#define TSSI_EXTRA_GET_GROUP_IDX1(group) ((group) & ~TSSI_EXTRA_GROUP_BIT)
+#define TSSI_EXTRA_GET_GROUP_IDX2(group) (TSSI_EXTRA_GET_GROUP_IDX1(group) + 1)
+
+static u32 _tssi_get_ofdm_group(struct rtw89_dev *rtwdev, u8 ch)
+{
+ switch (ch) {
+ case 1 ... 2:
+ return 0;
+ case 3 ... 5:
+ return 1;
+ case 6 ... 8:
+ return 2;
+ case 9 ... 11:
+ return 3;
+ case 12 ... 14:
+ return 4;
+ case 36 ... 40:
+ return 5;
+ case 41 ... 43:
+ return TSSI_EXTRA_GROUP(5);
+ case 44 ... 48:
+ return 6;
+ case 49 ... 51:
+ return TSSI_EXTRA_GROUP(6);
+ case 52 ... 56:
+ return 7;
+ case 57 ... 59:
+ return TSSI_EXTRA_GROUP(7);
+ case 60 ... 64:
+ return 8;
+ case 100 ... 104:
+ return 9;
+ case 105 ... 107:
+ return TSSI_EXTRA_GROUP(9);
+ case 108 ... 112:
+ return 10;
+ case 113 ... 115:
+ return TSSI_EXTRA_GROUP(10);
+ case 116 ... 120:
+ return 11;
+ case 121 ... 123:
+ return TSSI_EXTRA_GROUP(11);
+ case 124 ... 128:
+ return 12;
+ case 129 ... 131:
+ return TSSI_EXTRA_GROUP(12);
+ case 132 ... 136:
+ return 13;
+ case 137 ... 139:
+ return TSSI_EXTRA_GROUP(13);
+ case 140 ... 144:
+ return 14;
+ case 149 ... 153:
+ return 15;
+ case 154 ... 156:
+ return TSSI_EXTRA_GROUP(15);
+ case 157 ... 161:
+ return 16;
+ case 162 ... 164:
+ return TSSI_EXTRA_GROUP(16);
+ case 165 ... 169:
+ return 17;
+ case 170 ... 172:
+ return TSSI_EXTRA_GROUP(17);
+ case 173 ... 177:
+ return 18;
+ }
+
+ return 0;
+}
+
+static u32 _tssi_get_trim_group(struct rtw89_dev *rtwdev, u8 ch)
+{
+ switch (ch) {
+ case 1 ... 8:
+ return 0;
+ case 9 ... 14:
+ return 1;
+ case 36 ... 48:
+ return 2;
+ case 52 ... 64:
+ return 3;
+ case 100 ... 112:
+ return 4;
+ case 116 ... 128:
+ return 5;
+ case 132 ... 144:
+ return 6;
+ case 149 ... 177:
+ return 7;
+ }
+
+ return 0;
+}
+
+static s8 _tssi_get_ofdm_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ u8 ch = chan->channel;
+ u32 gidx, gidx_1st, gidx_2nd;
+ s8 de_1st;
+ s8 de_2nd;
+ s8 val;
+
+ gidx = _tssi_get_ofdm_group(rtwdev, ch);
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs group_idx=0x%x\n", path, gidx);
+
+ if (IS_TSSI_EXTRA_GROUP(gidx)) {
+ gidx_1st = TSSI_EXTRA_GET_GROUP_IDX1(gidx);
+ gidx_2nd = TSSI_EXTRA_GET_GROUP_IDX2(gidx);
+ de_1st = tssi_info->tssi_mcs[path][gidx_1st];
+ de_2nd = tssi_info->tssi_mcs[path][gidx_2nd];
+ val = (de_1st + de_2nd) / 2;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs de=%d 1st=%d 2nd=%d\n",
+ path, val, de_1st, de_2nd);
+ } else {
+ val = tssi_info->tssi_mcs[path][gidx];
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs de=%d\n", path, val);
+ }
+
+ return val;
+}
+
+static s8 _tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ u8 ch = chan->channel;
+ u32 tgidx, tgidx_1st, tgidx_2nd;
+ s8 tde_1st;
+ s8 tde_2nd;
+ s8 val;
+
+ tgidx = _tssi_get_trim_group(rtwdev, ch);
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs trim_group_idx=0x%x\n",
+ path, tgidx);
+
+ if (IS_TSSI_EXTRA_GROUP(tgidx)) {
+ tgidx_1st = TSSI_EXTRA_GET_GROUP_IDX1(tgidx);
+ tgidx_2nd = TSSI_EXTRA_GET_GROUP_IDX2(tgidx);
+ tde_1st = tssi_info->tssi_trim[path][tgidx_1st];
+ tde_2nd = tssi_info->tssi_trim[path][tgidx_2nd];
+ val = (tde_1st + tde_2nd) / 2;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs trim_de=%d 1st=%d 2nd=%d\n",
+ path, val, tde_1st, tde_2nd);
+ } else {
+ val = tssi_info->tssi_trim[path][tgidx];
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs trim_de=%d\n",
+ path, val);
+ }
+
+ return val;
+}
+
+static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+{
+ struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ u8 ch = chan->channel;
+ u8 gidx;
+ s8 ofdm_de;
+ s8 trim_de;
+ s32 val;
+ u32 i;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI][TRIM]: phy=%d ch=%d\n",
+ phy, ch);
+
+ for (i = RF_PATH_A; i < RF_PATH_NUM_8852BT; i++) {
+ gidx = _tssi_get_cck_group(rtwdev, ch);
+ trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i);
+ val = tssi_info->tssi_cck[i][gidx] + trim_de;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d cck[%d]=0x%x trim=0x%x\n",
+ i, gidx, tssi_info->tssi_cck[i][gidx], trim_de);
+
+ rtw89_phy_write32_mask(rtwdev, _tssi_de_cck_long[i], _TSSI_DE_MASK, val);
+ rtw89_phy_write32_mask(rtwdev, _tssi_de_cck_short[i], _TSSI_DE_MASK, val);
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI] Set TSSI CCK DE 0x%x[21:12]=0x%x\n",
+ _tssi_de_cck_long[i],
+ rtw89_phy_read32_mask(rtwdev, _tssi_de_cck_long[i],
+ _TSSI_DE_MASK));
+
+ ofdm_de = _tssi_get_ofdm_de(rtwdev, phy, i);
+ trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i);
+ val = ofdm_de + trim_de;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs=0x%x trim=0x%x\n",
+ i, ofdm_de, trim_de);
+
+ rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_20m[i], _TSSI_DE_MASK, val);
+ rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_40m[i], _TSSI_DE_MASK, val);
+ rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_80m[i], _TSSI_DE_MASK, val);
+ rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_80m_80m[i],
+ _TSSI_DE_MASK, val);
+ rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_5m[i], _TSSI_DE_MASK, val);
+ rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_10m[i], _TSSI_DE_MASK, val);
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI] Set TSSI MCS DE 0x%x[21:12]=0x%x\n",
+ _tssi_de_mcs_20m[i],
+ rtw89_phy_read32_mask(rtwdev, _tssi_de_mcs_20m[i],
+ _TSSI_DE_MASK));
+ }
+}
+
+static void _tssi_alimentk_dump_result(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
+{
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[TSSI PA K]\n0x%x = 0x%08x\n0x%x = 0x%08x\n0x%x = 0x%08x\n0x%x = 0x%08x\n"
+ "0x%x = 0x%08x\n0x%x = 0x%08x\n0x%x = 0x%08x\n0x%x = 0x%08x\n",
+ R_TSSI_PA_K1 + (path << 13),
+ rtw89_phy_read32(rtwdev, R_TSSI_PA_K1 + (path << 13)),
+ R_TSSI_PA_K2 + (path << 13),
+ rtw89_phy_read32(rtwdev, R_TSSI_PA_K2 + (path << 13)),
+ R_P0_TSSI_ALIM1 + (path << 13),
+ rtw89_phy_read32(rtwdev, R_P0_TSSI_ALIM1 + (path << 13)),
+ R_P0_TSSI_ALIM3 + (path << 13),
+ rtw89_phy_read32(rtwdev, R_P0_TSSI_ALIM3 + (path << 13)),
+ R_TSSI_PA_K5 + (path << 13),
+ rtw89_phy_read32(rtwdev, R_TSSI_PA_K5 + (path << 13)),
+ R_P0_TSSI_ALIM2 + (path << 13),
+ rtw89_phy_read32(rtwdev, R_P0_TSSI_ALIM2 + (path << 13)),
+ R_P0_TSSI_ALIM4 + (path << 13),
+ rtw89_phy_read32(rtwdev, R_P0_TSSI_ALIM4 + (path << 13)),
+ R_TSSI_PA_K8 + (path << 13),
+ rtw89_phy_read32(rtwdev, R_TSSI_PA_K8 + (path << 13)));
+}
+
+static void _tssi_alimentk_done(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy, enum rtw89_rf_path path)
+{
+ struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ u8 channel = chan->channel;
+ u8 band;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "======>%s phy=%d path=%d\n", __func__, phy, path);
+
+ if (channel >= 1 && channel <= 14)
+ band = TSSI_ALIMK_2G;
+ else if (channel >= 36 && channel <= 64)
+ band = TSSI_ALIMK_5GL;
+ else if (channel >= 100 && channel <= 144)
+ band = TSSI_ALIMK_5GM;
+ else if (channel >= 149 && channel <= 177)
+ band = TSSI_ALIMK_5GH;
+ else
+ band = TSSI_ALIMK_2G;
+
+ if (tssi_info->alignment_done[path][band]) {
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM1 + (path << 13), MASKDWORD,
+ tssi_info->alignment_value[path][band][0]);
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM3 + (path << 13), MASKDWORD,
+ tssi_info->alignment_value[path][band][1]);
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM2 + (path << 13), MASKDWORD,
+ tssi_info->alignment_value[path][band][2]);
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM4 + (path << 13), MASKDWORD,
+ tssi_info->alignment_value[path][band][3]);
+ }
+
+ _tssi_alimentk_dump_result(rtwdev, path);
+}
+
+static void _tssi_hw_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path, u16 cnt, u16 period, s16 pwr_dbm,
+ u8 enable)
+{
+ enum rtw89_rf_path_bit rx_path;
+
+ if (path == RF_PATH_A)
+ rx_path = RF_A;
+ else if (path == RF_PATH_B)
+ rx_path = RF_B;
+ else if (path == RF_PATH_AB)
+ rx_path = RF_AB;
+ else
+ rx_path = RF_ABCD; /* don't change path, but still set others */
+
+ if (enable) {
+ rtw8852bx_bb_set_plcp_tx(rtwdev);
+ rtw8852bx_bb_cfg_tx_path(rtwdev, path);
+ rtw8852bx_bb_ctrl_rx_path(rtwdev, rx_path);
+ rtw8852bx_bb_set_power(rtwdev, pwr_dbm, phy);
+ }
+
+ rtw8852bx_bb_set_pmac_pkt_tx(rtwdev, enable, cnt, period, 20, phy);
+}
+
+static void _tssi_backup_bb_registers(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy, const u32 reg[],
+ u32 reg_backup[], u32 reg_num)
+{
+ u32 i;
+
+ for (i = 0; i < reg_num; i++) {
+ reg_backup[i] = rtw89_phy_read32_mask(rtwdev, reg[i], MASKDWORD);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[TSSI] Backup BB 0x%x = 0x%x\n", reg[i],
+ reg_backup[i]);
+ }
+}
+
+static void _tssi_reload_bb_registers(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy, const u32 reg[],
+ u32 reg_backup[], u32 reg_num)
+
+{
+ u32 i;
+
+ for (i = 0; i < reg_num; i++) {
+ rtw89_phy_write32_mask(rtwdev, reg[i], MASKDWORD, reg_backup[i]);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[TSSI] Reload BB 0x%x = 0x%x\n", reg[i],
+ reg_backup[i]);
+ }
+}
+
+static u8 _tssi_ch_to_idx(struct rtw89_dev *rtwdev, u8 channel)
+{
+ u8 channel_index;
+
+ if (channel >= 1 && channel <= 14)
+ channel_index = channel - 1;
+ else if (channel >= 36 && channel <= 64)
+ channel_index = (channel - 36) / 2 + 14;
+ else if (channel >= 100 && channel <= 144)
+ channel_index = ((channel - 100) / 2) + 15 + 14;
+ else if (channel >= 149 && channel <= 177)
+ channel_index = ((channel - 149) / 2) + 38 + 14;
+ else
+ channel_index = 0;
+
+ return channel_index;
+}
+
+static bool _tssi_get_cw_report(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path, const s16 *power,
+ u32 *tssi_cw_rpt)
+{
+ u32 tx_counter, tx_counter_tmp;
+ const int retry = 100;
+ u32 tmp;
+ int j, k;
+
+ for (j = 0; j < RTW8852BT_TSSI_PATH_NR; j++) {
+ rtw89_phy_write32_mask(rtwdev, _tssi_trigger[path], B_P0_TSSI_EN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, _tssi_trigger[path], B_P0_TSSI_EN, 0x1);
+
+ tx_counter = rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD);
+
+ tmp = rtw89_phy_read32_mask(rtwdev, _tssi_trigger[path], MASKDWORD);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[TSSI PA K] 0x%x = 0x%08x path=%d\n",
+ _tssi_trigger[path], tmp, path);
+
+ if (j == 0)
+ _tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], true);
+ else
+ _tssi_hw_tx(rtwdev, phy, RF_PATH_ABCD, 100, 5000, power[j], true);
+
+ tx_counter_tmp = rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD);
+ tx_counter_tmp -= tx_counter;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[TSSI PA K] First HWTXcounter=%d path=%d\n",
+ tx_counter_tmp, path);
+
+ for (k = 0; k < retry; k++) {
+ tmp = rtw89_phy_read32_mask(rtwdev, _tssi_cw_rpt_addr[path],
+ B_TSSI_CWRPT_RDY);
+ if (tmp)
+ break;
+
+ udelay(30);
+
+ tx_counter_tmp =
+ rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD);
+ tx_counter_tmp -= tx_counter;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[TSSI PA K] Flow k = %d HWTXcounter=%d path=%d\n",
+ k, tx_counter_tmp, path);
+ }
+
+ if (k >= retry) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[TSSI PA K] TSSI finish bit k > %d mp:100ms normal:30us path=%d\n",
+ k, path);
+
+ _tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], false);
+ return false;
+ }
+
+ tssi_cw_rpt[j] =
+ rtw89_phy_read32_mask(rtwdev, _tssi_cw_rpt_addr[path],
+ B_TSSI_CWRPT);
+
+ _tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], false);
+
+ tx_counter_tmp = rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD);
+ tx_counter_tmp -= tx_counter;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[TSSI PA K] Final HWTXcounter=%d path=%d\n",
+ tx_counter_tmp, path);
+ }
+
+ return true;
+}
+
+static void _tssi_alimentk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ static const u32 bb_reg[8] = {0x5820, 0x7820, 0x4978, 0x58e4,
+ 0x78e4, 0x49c0, 0x0d18, 0x0d80};
+ static const s16 power_2g[4] = {48, 20, 4, -8};
+ static const s16 power_5g[4] = {48, 20, 4, 4};
+ struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ s32 tssi_alim_offset_1, tssi_alim_offset_2, tssi_alim_offset_3;
+ u32 tssi_cw_rpt[RTW8852BT_TSSI_PATH_NR] = {};
+ u8 channel = chan->channel;
+ u8 ch_idx = _tssi_ch_to_idx(rtwdev, channel);
+ struct rtw8852bx_bb_tssi_bak tssi_bak;
+ s32 aliment_diff, tssi_cw_default;
+ u32 start_time, finish_time;
+ u32 bb_reg_backup[8] = {};
+ const s16 *power;
+ u8 band;
+ bool ok;
+ u32 tmp;
+ u8 j;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "======> %s channel=%d path=%d\n", __func__, channel,
+ path);
+
+ start_time = ktime_get_ns();
+
+ if (chan->band_type == RTW89_BAND_2G)
+ power = power_2g;
+ else
+ power = power_5g;
+
+ if (channel >= 1 && channel <= 14)
+ band = TSSI_ALIMK_2G;
+ else if (channel >= 36 && channel <= 64)
+ band = TSSI_ALIMK_5GL;
+ else if (channel >= 100 && channel <= 144)
+ band = TSSI_ALIMK_5GM;
+ else if (channel >= 149 && channel <= 177)
+ band = TSSI_ALIMK_5GH;
+ else
+ band = TSSI_ALIMK_2G;
+
+ rtw8852bx_bb_backup_tssi(rtwdev, phy, &tssi_bak);
+ _tssi_backup_bb_registers(rtwdev, phy, bb_reg, bb_reg_backup,
+ ARRAY_SIZE(bb_reg_backup));
+
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_AVG, B_P0_TSSI_AVG, 0x8);
+ rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_AVG, B_P1_TSSI_AVG, 0x8);
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG, B_P0_TSSI_MV_AVG, 0x2);
+ rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG, B_P1_TSSI_MV_AVG, 0x2);
+
+ ok = _tssi_get_cw_report(rtwdev, phy, path, power, tssi_cw_rpt);
+ if (!ok)
+ goto out;
+
+ for (j = 0; j < RTW8852BT_TSSI_PATH_NR; j++) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[TSSI PA K] power[%d]=%d tssi_cw_rpt[%d]=%d\n", j,
+ power[j], j, tssi_cw_rpt[j]);
+ }
+
+ tmp = rtw89_phy_read32_mask(rtwdev, _tssi_cw_default_addr[path][1],
+ _tssi_cw_default_mask[1]);
+ tssi_cw_default = sign_extend32(tmp, 8);
+ tssi_alim_offset_1 = tssi_cw_rpt[0] - ((power[0] - power[1]) * 2) -
+ tssi_cw_rpt[1] + tssi_cw_default;
+ aliment_diff = tssi_alim_offset_1 - tssi_cw_default;
+
+ tmp = rtw89_phy_read32_mask(rtwdev, _tssi_cw_default_addr[path][2],
+ _tssi_cw_default_mask[2]);
+ tssi_cw_default = sign_extend32(tmp, 8);
+ tssi_alim_offset_2 = tssi_cw_default + aliment_diff;
+
+ tmp = rtw89_phy_read32_mask(rtwdev, _tssi_cw_default_addr[path][3],
+ _tssi_cw_default_mask[3]);
+ tssi_cw_default = sign_extend32(tmp, 8);
+ tssi_alim_offset_3 = tssi_cw_default + aliment_diff;
+
+ if (path == RF_PATH_A) {
+ tmp = FIELD_PREP(B_P1_TSSI_ALIM11, tssi_alim_offset_1) |
+ FIELD_PREP(B_P1_TSSI_ALIM12, tssi_alim_offset_2) |
+ FIELD_PREP(B_P1_TSSI_ALIM13, tssi_alim_offset_3);
+
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM1, B_P0_TSSI_ALIM1, tmp);
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM2, B_P0_TSSI_ALIM2, tmp);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[TSSI PA K] tssi_alim_offset = 0x%x 0x%x 0x%x 0x%x\n",
+ rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM3, B_P0_TSSI_ALIM31),
+ rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM1, B_P0_TSSI_ALIM11),
+ rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM1, B_P0_TSSI_ALIM12),
+ rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM1, B_P0_TSSI_ALIM13));
+ } else {
+ tmp = FIELD_PREP(B_P1_TSSI_ALIM11, tssi_alim_offset_1) |
+ FIELD_PREP(B_P1_TSSI_ALIM12, tssi_alim_offset_2) |
+ FIELD_PREP(B_P1_TSSI_ALIM13, tssi_alim_offset_3);
+
+ rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_ALIM1, B_P1_TSSI_ALIM1, tmp);
+ rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_ALIM2, B_P1_TSSI_ALIM2, tmp);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[TSSI PA K] tssi_alim_offset = 0x%x 0x%x 0x%x 0x%x\n",
+ rtw89_phy_read32_mask(rtwdev, R_P1_TSSI_ALIM3, B_P1_TSSI_ALIM31),
+ rtw89_phy_read32_mask(rtwdev, R_P1_TSSI_ALIM1, B_P1_TSSI_ALIM11),
+ rtw89_phy_read32_mask(rtwdev, R_P1_TSSI_ALIM1, B_P1_TSSI_ALIM12),
+ rtw89_phy_read32_mask(rtwdev, R_P1_TSSI_ALIM1, B_P1_TSSI_ALIM13));
+ }
+
+ tssi_info->alignment_done[path][band] = true;
+ tssi_info->alignment_value[path][band][0] =
+ rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM1 + (path << 13), MASKDWORD);
+ tssi_info->alignment_value[path][band][1] =
+ rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM3 + (path << 13), MASKDWORD);
+ tssi_info->alignment_value[path][band][2] =
+ rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM2 + (path << 13), MASKDWORD);
+ tssi_info->alignment_value[path][band][3] =
+ rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM4 + (path << 13), MASKDWORD);
+
+ tssi_info->check_backup_aligmk[path][ch_idx] = true;
+ tssi_info->alignment_backup_by_ch[path][ch_idx][0] =
+ rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM1 + (path << 13), MASKDWORD);
+ tssi_info->alignment_backup_by_ch[path][ch_idx][1] =
+ rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM3 + (path << 13), MASKDWORD);
+ tssi_info->alignment_backup_by_ch[path][ch_idx][2] =
+ rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM2 + (path << 13), MASKDWORD);
+ tssi_info->alignment_backup_by_ch[path][ch_idx][3] =
+ rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM4 + (path << 13), MASKDWORD);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[TSSI PA K] tssi_info->alignment_value[path=%d][band=%d][0], 0x%x = 0x%08x\n",
+ path, band, R_P0_TSSI_ALIM1 + (path << 13),
+ tssi_info->alignment_value[path][band][0]);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[TSSI PA K] tssi_info->alignment_value[path=%d][band=%d][1], 0x%x = 0x%08x\n",
+ path, band, R_P0_TSSI_ALIM3 + (path << 13),
+ tssi_info->alignment_value[path][band][1]);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[TSSI PA K] tssi_info->alignment_value[path=%d][band=%d][2], 0x%x = 0x%08x\n",
+ path, band, R_P0_TSSI_ALIM2 + (path << 13),
+ tssi_info->alignment_value[path][band][2]);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[TSSI PA K] tssi_info->alignment_value[path=%d][band=%d][3], 0x%x = 0x%08x\n",
+ path, band, R_P0_TSSI_ALIM4 + (path << 13),
+ tssi_info->alignment_value[path][band][3]);
+
+out:
+ _tssi_reload_bb_registers(rtwdev, phy, bb_reg, bb_reg_backup,
+ ARRAY_SIZE(bb_reg_backup));
+ rtw8852bx_bb_restore_tssi(rtwdev, phy, &tssi_bak);
+ rtw8852bx_bb_tx_mode_switch(rtwdev, phy, 0);
+
+ finish_time = ktime_get_ns();
+ tssi_info->tssi_alimk_time += finish_time - start_time;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[TSSI PA K] %s processing time = %d ms\n", __func__,
+ tssi_info->tssi_alimk_time);
+}
+
+void rtw8852bt_dpk_init(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+
+ u8 path;
+
+ for (path = 0; path < 2; path++) {
+ dpk->cur_idx[path] = 0;
+ dpk->max_dpk_txagc[path] = 0x3F;
+ }
+
+ dpk->is_dpk_enable = true;
+ dpk->is_dpk_reload_en = false;
+ _set_dpd_backoff(rtwdev, RTW89_PHY_0);
+}
+
+void rtw8852bt_rck(struct rtw89_dev *rtwdev)
+{
+ u8 path;
+
+ for (path = 0; path < RF_PATH_NUM_8852BT; path++)
+ _rck(rtwdev, path);
+}
+
+void rtw8852bt_dack(struct rtw89_dev *rtwdev)
+{
+ u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, 0);
+
+ rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_START);
+ _dac_cal(rtwdev, false);
+ rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_STOP);
+}
+
+void rtw8852bt_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+{
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0);
+ u32 tx_en;
+
+ rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_START);
+ rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
+ _wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
+
+ _iqk_init(rtwdev);
+ _iqk(rtwdev, phy_idx, false);
+
+ rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
+ rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_STOP);
+}
+
+void rtw8852bt_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+{
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0);
+ u32 tx_en;
+
+ rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_START);
+ rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
+ _wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
+
+ _rx_dck(rtwdev, phy_idx);
+
+ rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
+ rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_STOP);
+}
+
+void rtw8852bt_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+{
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] ****** DPK Start (Ver: 0x%x) ******\n", RTW8852BT_DPK_VER);
+
+ if (_dpk_bypass_check(rtwdev, phy_idx))
+ _dpk_force_bypass(rtwdev, phy_idx);
+ else
+ _dpk_cal_select(rtwdev, phy_idx, RF_AB);
+}
+
+void rtw8852bt_dpk_track(struct rtw89_dev *rtwdev)
+{
+ _dpk_track(rtwdev);
+}
+
+void rtw8852bt_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool hwtx_en)
+{
+ static const u32 reg[2] = {R_DPD_CH0A, R_DPD_CH0B};
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy, RF_AB);
+ u32 reg_backup[2] = {};
+ u32 tx_en;
+ u8 i;
+
+ _tssi_backup_bb_registers(rtwdev, phy, reg, reg_backup, 2);
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI] %s: phy=%d\n", __func__, phy);
+ rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_START);
+
+ _tssi_dpk_off(rtwdev, phy);
+ _tssi_disable(rtwdev, phy);
+
+ for (i = RF_PATH_A; i < RF_PATH_NUM_8852BT; i++) {
+ _tssi_rf_setting(rtwdev, phy, i);
+ _tssi_set_sys(rtwdev, phy, i);
+ _tssi_ini_txpwr_ctrl_bb(rtwdev, phy, i);
+ _tssi_ini_txpwr_ctrl_bb_he_tb(rtwdev, phy, i);
+ _tssi_set_dck(rtwdev, phy, i);
+ _tssi_set_tmeter_tbl(rtwdev, phy, i);
+ _tssi_set_dac_gain_tbl(rtwdev, phy, i);
+ _tssi_slope_cal_org(rtwdev, phy, i);
+ _tssi_alignment_default(rtwdev, phy, i, true);
+ _tssi_set_tssi_slope(rtwdev, phy, i);
+
+ rtw89_chip_stop_sch_tx(rtwdev, phy, &tx_en, RTW89_SCH_TX_SEL_ALL);
+ _tmac_tx_pause(rtwdev, phy, true);
+ if (hwtx_en)
+ _tssi_alimentk(rtwdev, phy, i);
+ _tmac_tx_pause(rtwdev, phy, false);
+ rtw89_chip_resume_sch_tx(rtwdev, phy, tx_en);
+ }
+
+ _tssi_enable(rtwdev, phy);
+ _tssi_set_efuse_to_de(rtwdev, phy);
+
+ _tssi_reload_bb_registers(rtwdev, phy, reg, reg_backup, 2);
+
+ rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_STOP);
+}
+
+void rtw8852bt_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
+ u8 channel = chan->channel;
+ u8 band;
+ u32 i;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "======>%s phy=%d channel=%d\n", __func__, phy, channel);
+
+ if (channel >= 1 && channel <= 14)
+ band = TSSI_ALIMK_2G;
+ else if (channel >= 36 && channel <= 64)
+ band = TSSI_ALIMK_5GL;
+ else if (channel >= 100 && channel <= 144)
+ band = TSSI_ALIMK_5GM;
+ else if (channel >= 149 && channel <= 177)
+ band = TSSI_ALIMK_5GH;
+ else
+ band = TSSI_ALIMK_2G;
+
+ _tssi_disable(rtwdev, phy);
+
+ for (i = RF_PATH_A; i < RTW8852BT_TSSI_PATH_NR; i++) {
+ _tssi_rf_setting(rtwdev, phy, i);
+ _tssi_set_sys(rtwdev, phy, i);
+ _tssi_set_tmeter_tbl(rtwdev, phy, i);
+
+ if (tssi_info->alignment_done[i][band])
+ _tssi_alimentk_done(rtwdev, phy, i);
+ else
+ _tssi_alignment_default(rtwdev, phy, i, true);
+ }
+
+ _tssi_enable(rtwdev, phy);
+ _tssi_set_efuse_to_de(rtwdev, phy);
+}
+
+static void rtw8852bt_tssi_default_txagc(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy, bool enable)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ u8 channel = chan->channel;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "======> %s ch=%d\n",
+ __func__, channel);
+
+ if (enable)
+ return;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "======>%s 1 SCAN_END Set 0x5818[7:0]=0x%x 0x7818[7:0]=0x%x\n",
+ __func__,
+ rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT),
+ rtw89_phy_read32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT));
+
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT, 0xc0);
+ rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT, 0xc0);
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT_EN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT_EN, 0x1);
+
+ _tssi_alimentk_done(rtwdev, phy, RF_PATH_A);
+ _tssi_alimentk_done(rtwdev, phy, RF_PATH_B);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "======>%s 2 SCAN_END Set 0x5818[7:0]=0x%x 0x7818[7:0]=0x%x\n",
+ __func__,
+ rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT),
+ rtw89_phy_read32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT));
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "======> %s SCAN_END\n", __func__);
+}
+
+void rtw8852bt_wifi_scan_notify(struct rtw89_dev *rtwdev, bool scan_start,
+ enum rtw89_phy_idx phy_idx)
+{
+ if (scan_start)
+ rtw8852bt_tssi_default_txagc(rtwdev, phy_idx, true);
+ else
+ rtw8852bt_tssi_default_txagc(rtwdev, phy_idx, false);
+}
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.h b/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.h
new file mode 100644
index 000000000000..09918835c6e8
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2024 Realtek Corporation
+ */
+
+#ifndef __RTW89_8852BT_RFK_H__
+#define __RTW89_8852BT_RFK_H__
+
+#include "core.h"
+
+void rtw8852bt_rck(struct rtw89_dev *rtwdev);
+void rtw8852bt_dack(struct rtw89_dev *rtwdev);
+void rtw8852bt_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
+void rtw8852bt_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
+void rtw8852bt_dpk_init(struct rtw89_dev *rtwdev);
+void rtw8852bt_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy);
+void rtw8852bt_dpk_track(struct rtw89_dev *rtwdev);
+void rtw8852bt_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool hwtx_en);
+void rtw8852bt_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy);
+void rtw8852bt_wifi_scan_notify(struct rtw89_dev *rtwdev, bool scan_start,
+ enum rtw89_phy_idx phy_idx);
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk_table.c b/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk_table.c
new file mode 100644
index 000000000000..782144bb7f49
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk_table.c
@@ -0,0 +1,490 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2024 Realtek Corporation
+ */
+
+#include "rtw8852bt_rfk_table.h"
+
+static const struct rtw89_reg5_def rtw8852bt_tssi_sys_defs[] = {
+ RTW89_DECL_RFK_WM(0x12a8, 0x0000000f, 0x4),
+ RTW89_DECL_RFK_WM(0x32a8, 0x0000000f, 0x4),
+ RTW89_DECL_RFK_WM(0x12bc, 0x000ffff0, 0x5555),
+ RTW89_DECL_RFK_WM(0x32bc, 0x000ffff0, 0x5555),
+ RTW89_DECL_RFK_WM(0x0300, 0xff000000, 0x16),
+ RTW89_DECL_RFK_WM(0x0304, 0x000000ff, 0x19),
+ RTW89_DECL_RFK_WM(0x0314, 0xffff0000, 0x2041),
+ RTW89_DECL_RFK_WM(0x0318, 0xffffffff, 0x2041),
+ RTW89_DECL_RFK_WM(0x0318, 0xffffffff, 0x20012041),
+ RTW89_DECL_RFK_WM(0x0020, 0x00006000, 0x3),
+ RTW89_DECL_RFK_WM(0x0024, 0x00006000, 0x3),
+ RTW89_DECL_RFK_WM(0x0704, 0xffff0000, 0x601e),
+ RTW89_DECL_RFK_WM(0x2704, 0xffff0000, 0x601e),
+ RTW89_DECL_RFK_WM(0x0700, 0xf0000000, 0x4),
+ RTW89_DECL_RFK_WM(0x2700, 0xf0000000, 0x4),
+ RTW89_DECL_RFK_WM(0x0650, 0x3c000000, 0x0),
+ RTW89_DECL_RFK_WM(0x2650, 0x3c000000, 0x0),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_sys_defs);
+
+static const struct rtw89_reg5_def rtw8852bt_tssi_sys_a_defs_2g[] = {
+ RTW89_DECL_RFK_WM(0x120c, 0x000000ff, 0x33),
+ RTW89_DECL_RFK_WM(0x12c0, 0x0ff00000, 0x33),
+ RTW89_DECL_RFK_WM(0x58f8, 0x40000000, 0x1),
+ RTW89_DECL_RFK_WM(0x0304, 0x0000ff00, 0x1e),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_sys_a_defs_2g);
+
+static const struct rtw89_reg5_def rtw8852bt_tssi_sys_a_defs_5g[] = {
+ RTW89_DECL_RFK_WM(0x120c, 0x000000ff, 0x44),
+ RTW89_DECL_RFK_WM(0x12c0, 0x0ff00000, 0x44),
+ RTW89_DECL_RFK_WM(0x58f8, 0x40000000, 0x0),
+ RTW89_DECL_RFK_WM(0x0304, 0x0000ff00, 0x1d),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_sys_a_defs_5g);
+
+static const struct rtw89_reg5_def rtw8852bt_tssi_sys_b_defs_2g[] = {
+ RTW89_DECL_RFK_WM(0x32c0, 0x0ff00000, 0x33),
+ RTW89_DECL_RFK_WM(0x320c, 0x000000ff, 0x33),
+ RTW89_DECL_RFK_WM(0x78f8, 0x40000000, 0x1),
+ RTW89_DECL_RFK_WM(0x0304, 0x0000ff00, 0x1e),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_sys_b_defs_2g);
+
+static const struct rtw89_reg5_def rtw8852bt_tssi_sys_b_defs_5g[] = {
+ RTW89_DECL_RFK_WM(0x32c0, 0x0ff00000, 0x44),
+ RTW89_DECL_RFK_WM(0x320c, 0x000000ff, 0x44),
+ RTW89_DECL_RFK_WM(0x78f8, 0x40000000, 0x0),
+ RTW89_DECL_RFK_WM(0x0304, 0x0000ff00, 0x1d),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_sys_b_defs_5g);
+
+static const struct rtw89_reg5_def rtw8852bt_tssi_init_txpwr_defs_a[] = {
+ RTW89_DECL_RFK_WM(0x566c, 0x00001000, 0x0),
+ RTW89_DECL_RFK_WM(0x5800, 0xffffffff, 0x003f807f),
+ RTW89_DECL_RFK_WM(0x580c, 0x0000007f, 0x40),
+ RTW89_DECL_RFK_WM(0x580c, 0x0fffff00, 0x00040),
+ RTW89_DECL_RFK_WM(0x5810, 0xffffffff, 0x59010000),
+ RTW89_DECL_RFK_WM(0x5814, 0x01ffffff, 0x002d000),
+ RTW89_DECL_RFK_WM(0x5814, 0xf8000000, 0x00),
+ RTW89_DECL_RFK_WM(0x5818, 0xffffffff, 0x002c1800),
+ RTW89_DECL_RFK_WM(0x581c, 0x3fffffff, 0x1dc80280),
+ RTW89_DECL_RFK_WM(0x5820, 0xffffffff, 0x00002080),
+ RTW89_DECL_RFK_WM(0x580c, 0x10000000, 0x1),
+ RTW89_DECL_RFK_WM(0x580c, 0x40000000, 0x1),
+ RTW89_DECL_RFK_WM(0x5834, 0x3fffffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x5838, 0x7fffffff, 0x0000121),
+ RTW89_DECL_RFK_WM(0x5854, 0x3fffffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x5858, 0x7fffffff, 0x0000121),
+ RTW89_DECL_RFK_WM(0x5860, 0x80000000, 0x0),
+ RTW89_DECL_RFK_WM(0x5864, 0x07ffffff, 0x00801ff),
+ RTW89_DECL_RFK_WM(0x5898, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x589c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58a4, 0x000000ff, 0x16),
+ RTW89_DECL_RFK_WM(0x58b0, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58b4, 0x7fffffff, 0x0a002000),
+ RTW89_DECL_RFK_WM(0x58b8, 0x7fffffff, 0x00007628),
+ RTW89_DECL_RFK_WM(0x58bc, 0x07ffffff, 0x7a7807f),
+ RTW89_DECL_RFK_WM(0x58c0, 0xfffe0000, 0x003f),
+ RTW89_DECL_RFK_WM(0x58c4, 0xffffffff, 0x0003ffff),
+ RTW89_DECL_RFK_WM(0x58c8, 0x00ffffff, 0x000000),
+ RTW89_DECL_RFK_WM(0x58c8, 0xf0000000, 0x0),
+ RTW89_DECL_RFK_WM(0x58cc, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58d0, 0x07ffffff, 0x2008101),
+ RTW89_DECL_RFK_WM(0x58d4, 0x000000ff, 0x00),
+ RTW89_DECL_RFK_WM(0x58d4, 0x0003fe00, 0x0ff),
+ RTW89_DECL_RFK_WM(0x58d4, 0x07fc0000, 0x100),
+ RTW89_DECL_RFK_WM(0x58d8, 0xffffffff, 0x8008016c),
+ RTW89_DECL_RFK_WM(0x58dc, 0x0001ffff, 0x0807f),
+ RTW89_DECL_RFK_WM(0x58dc, 0xfff00000, 0xc00),
+ RTW89_DECL_RFK_WM(0x58f0, 0x0003ffff, 0x001ff),
+ RTW89_DECL_RFK_WM(0x58f4, 0x000fffff, 0x000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_init_txpwr_defs_a);
+
+static const struct rtw89_reg5_def rtw8852bt_tssi_init_txpwr_defs_b[] = {
+ RTW89_DECL_RFK_WM(0x566c, 0x00001000, 0x0),
+ RTW89_DECL_RFK_WM(0x7800, 0xffffffff, 0x003f807f),
+ RTW89_DECL_RFK_WM(0x780c, 0x0000007f, 0x40),
+ RTW89_DECL_RFK_WM(0x780c, 0x0fffff00, 0x00040),
+ RTW89_DECL_RFK_WM(0x7810, 0xffffffff, 0x59010000),
+ RTW89_DECL_RFK_WM(0x7814, 0x01ffffff, 0x002d000),
+ RTW89_DECL_RFK_WM(0x7814, 0xf8000000, 0x00),
+ RTW89_DECL_RFK_WM(0x7818, 0xffffffff, 0x002c1800),
+ RTW89_DECL_RFK_WM(0x781c, 0x3fffffff, 0x1dc80280),
+ RTW89_DECL_RFK_WM(0x7820, 0xffffffff, 0x00002080),
+ RTW89_DECL_RFK_WM(0x780c, 0x10000000, 0x1),
+ RTW89_DECL_RFK_WM(0x780c, 0x40000000, 0x1),
+ RTW89_DECL_RFK_WM(0x7834, 0x3fffffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x7838, 0x7fffffff, 0x0000121),
+ RTW89_DECL_RFK_WM(0x7854, 0x3fffffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x7858, 0x7fffffff, 0x0000121),
+ RTW89_DECL_RFK_WM(0x7860, 0x80000000, 0x0),
+ RTW89_DECL_RFK_WM(0x7864, 0x07ffffff, 0x00801ff),
+ RTW89_DECL_RFK_WM(0x7898, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x789c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78a4, 0x000000ff, 0x16),
+ RTW89_DECL_RFK_WM(0x78b0, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78b4, 0x7fffffff, 0x0a002000),
+ RTW89_DECL_RFK_WM(0x78b8, 0x7fffffff, 0x00007628),
+ RTW89_DECL_RFK_WM(0x78bc, 0x07ffffff, 0x7a7807f),
+ RTW89_DECL_RFK_WM(0x78c0, 0xfffe0000, 0x003f),
+ RTW89_DECL_RFK_WM(0x78c4, 0xffffffff, 0x0003ffff),
+ RTW89_DECL_RFK_WM(0x78c8, 0x00ffffff, 0x000000),
+ RTW89_DECL_RFK_WM(0x78c8, 0xf0000000, 0x0),
+ RTW89_DECL_RFK_WM(0x78cc, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78d0, 0x07ffffff, 0x2008101),
+ RTW89_DECL_RFK_WM(0x78d4, 0x000000ff, 0x00),
+ RTW89_DECL_RFK_WM(0x78d4, 0x0003fe00, 0x0ff),
+ RTW89_DECL_RFK_WM(0x78d4, 0x07fc0000, 0x100),
+ RTW89_DECL_RFK_WM(0x78d8, 0xffffffff, 0x8008016c),
+ RTW89_DECL_RFK_WM(0x78dc, 0x0001ffff, 0x0807f),
+ RTW89_DECL_RFK_WM(0x78dc, 0xfff00000, 0xc00),
+ RTW89_DECL_RFK_WM(0x78f0, 0x0003ffff, 0x001ff),
+ RTW89_DECL_RFK_WM(0x78f4, 0x000fffff, 0x000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_init_txpwr_defs_b);
+
+static const struct rtw89_reg5_def rtw8852bt_tssi_init_txpwr_he_tb_defs_a[] = {
+ RTW89_DECL_RFK_WM(0x58a0, 0xffffffff, 0x000000fe),
+ RTW89_DECL_RFK_WM(0x58e4, 0x0000007f, 0x1f),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_init_txpwr_he_tb_defs_a);
+
+static const struct rtw89_reg5_def rtw8852bt_tssi_init_txpwr_he_tb_defs_b[] = {
+ RTW89_DECL_RFK_WM(0x78a0, 0xffffffff, 0x000000fe),
+ RTW89_DECL_RFK_WM(0x78e4, 0x0000007f, 0x1f),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_init_txpwr_he_tb_defs_b);
+
+static const struct rtw89_reg5_def rtw8852bt_tssi_dck_defs_a[] = {
+ RTW89_DECL_RFK_WM(0x580c, 0x0fff0000, 0x000),
+ RTW89_DECL_RFK_WM(0x5814, 0x003ff000, 0x0ef),
+ RTW89_DECL_RFK_WM(0x5814, 0x18000000, 0x0),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_dck_defs_a);
+
+static const struct rtw89_reg5_def rtw8852bt_tssi_dck_defs_b[] = {
+ RTW89_DECL_RFK_WM(0x780c, 0x0fff0000, 0x000),
+ RTW89_DECL_RFK_WM(0x7814, 0x003ff000, 0x0ef),
+ RTW89_DECL_RFK_WM(0x7814, 0x18000000, 0x0),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_dck_defs_b);
+
+static const struct rtw89_reg5_def rtw8852bt_tssi_dac_gain_defs_a[] = {
+ RTW89_DECL_RFK_WM(0x58b0, 0x00000400, 0x1),
+ RTW89_DECL_RFK_WM(0x58b0, 0x00000fff, 0x000),
+ RTW89_DECL_RFK_WM(0x58b0, 0x00000800, 0x1),
+ RTW89_DECL_RFK_WM(0x5a00, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a04, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a08, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a0c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a10, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a14, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a18, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a1c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a20, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a24, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a28, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a2c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a30, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a34, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a38, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a3c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a40, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a44, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a48, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a4c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a50, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a54, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a58, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a5c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a60, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a64, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a68, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a6c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a70, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a74, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a78, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a7c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a80, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a84, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a88, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a8c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a90, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a94, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a98, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a9c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5aa0, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5aa4, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5aa8, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5aac, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5ab0, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5ab4, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5ab8, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5abc, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5ac0, 0xffffffff, 0x00000000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_dac_gain_defs_a);
+
+static const struct rtw89_reg5_def rtw8852bt_tssi_dac_gain_defs_b[] = {
+ RTW89_DECL_RFK_WM(0x78b0, 0x00000fff, 0x000),
+ RTW89_DECL_RFK_WM(0x78b0, 0x00000800, 0x1),
+ RTW89_DECL_RFK_WM(0x7a00, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a04, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a08, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a0c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a10, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a14, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a18, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a1c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a20, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a24, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a28, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a2c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a30, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a34, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a38, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a3c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a40, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a44, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a48, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a4c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a50, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a54, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a58, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a5c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a60, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a64, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a68, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a6c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a70, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a74, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a78, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a7c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a80, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a84, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a88, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a8c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a90, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a94, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a98, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a9c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7aa0, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7aa4, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7aa8, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7aac, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7ab0, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7ab4, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7ab8, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7abc, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7ac0, 0xffffffff, 0x00000000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_dac_gain_defs_b);
+
+static const struct rtw89_reg5_def rtw8852bt_tssi_slope_a_defs_2g[] = {
+ RTW89_DECL_RFK_WM(0x5608, 0x07ffffff, 0x0801008),
+ RTW89_DECL_RFK_WM(0x560c, 0x07ffffff, 0x0201020),
+ RTW89_DECL_RFK_WM(0x5610, 0x07ffffff, 0x0201008),
+ RTW89_DECL_RFK_WM(0x5614, 0x07ffffff, 0x0804008),
+ RTW89_DECL_RFK_WM(0x5618, 0x07ffffff, 0x0201008),
+ RTW89_DECL_RFK_WM(0x561c, 0x000001ff, 0x008),
+ RTW89_DECL_RFK_WM(0x561c, 0xffff0000, 0x0808),
+ RTW89_DECL_RFK_WM(0x5620, 0xffffffff, 0x08081e28),
+ RTW89_DECL_RFK_WM(0x5624, 0xffffffff, 0x08080808),
+ RTW89_DECL_RFK_WM(0x5628, 0xffffffff, 0x08081e28),
+ RTW89_DECL_RFK_WM(0x562c, 0x0000ffff, 0x0808),
+ RTW89_DECL_RFK_WM(0x581c, 0x00100000, 0x1),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_slope_a_defs_2g);
+
+static const struct rtw89_reg5_def rtw8852bt_tssi_slope_a_defs_5g[] = {
+ RTW89_DECL_RFK_WM(0x5608, 0x07ffffff, 0x0201008),
+ RTW89_DECL_RFK_WM(0x560c, 0x07ffffff, 0x0201019),
+ RTW89_DECL_RFK_WM(0x5610, 0x07ffffff, 0x0201008),
+ RTW89_DECL_RFK_WM(0x5614, 0x07ffffff, 0x0201008),
+ RTW89_DECL_RFK_WM(0x5618, 0x07ffffff, 0x0201008),
+ RTW89_DECL_RFK_WM(0x561c, 0x000001ff, 0x008),
+ RTW89_DECL_RFK_WM(0x561c, 0xffff0000, 0x0808),
+ RTW89_DECL_RFK_WM(0x5620, 0xffffffff, 0x08081808),
+ RTW89_DECL_RFK_WM(0x5624, 0xffffffff, 0x08080808),
+ RTW89_DECL_RFK_WM(0x5628, 0xffffffff, 0x08080808),
+ RTW89_DECL_RFK_WM(0x562c, 0x0000ffff, 0x0808),
+ RTW89_DECL_RFK_WM(0x581c, 0x00100000, 0x1),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_slope_a_defs_5g);
+
+static const struct rtw89_reg5_def rtw8852bt_tssi_slope_b_defs_2g[] = {
+ RTW89_DECL_RFK_WM(0x7608, 0x07ffffff, 0x0801008),
+ RTW89_DECL_RFK_WM(0x760c, 0x07ffffff, 0x0201020),
+ RTW89_DECL_RFK_WM(0x7610, 0x07ffffff, 0x0201008),
+ RTW89_DECL_RFK_WM(0x7614, 0x07ffffff, 0x0804008),
+ RTW89_DECL_RFK_WM(0x7618, 0x07ffffff, 0x0201008),
+ RTW89_DECL_RFK_WM(0x761c, 0x000001ff, 0x008),
+ RTW89_DECL_RFK_WM(0x761c, 0xffff0000, 0x0808),
+ RTW89_DECL_RFK_WM(0x7620, 0xffffffff, 0x08081e28),
+ RTW89_DECL_RFK_WM(0x7624, 0xffffffff, 0x08080808),
+ RTW89_DECL_RFK_WM(0x7628, 0xffffffff, 0x08081e28),
+ RTW89_DECL_RFK_WM(0x762c, 0x0000ffff, 0x0808),
+ RTW89_DECL_RFK_WM(0x781c, 0x00100000, 0x1),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_slope_b_defs_2g);
+
+static const struct rtw89_reg5_def rtw8852bt_tssi_slope_b_defs_5g[] = {
+ RTW89_DECL_RFK_WM(0x7608, 0x07ffffff, 0x0201008),
+ RTW89_DECL_RFK_WM(0x760c, 0x07ffffff, 0x0201019),
+ RTW89_DECL_RFK_WM(0x7610, 0x07ffffff, 0x0201008),
+ RTW89_DECL_RFK_WM(0x7614, 0x07ffffff, 0x0201008),
+ RTW89_DECL_RFK_WM(0x7618, 0x07ffffff, 0x0201008),
+ RTW89_DECL_RFK_WM(0x761c, 0x000001ff, 0x008),
+ RTW89_DECL_RFK_WM(0x761c, 0xffff0000, 0x0808),
+ RTW89_DECL_RFK_WM(0x7620, 0xffffffff, 0x08081808),
+ RTW89_DECL_RFK_WM(0x7624, 0xffffffff, 0x08080808),
+ RTW89_DECL_RFK_WM(0x7628, 0xffffffff, 0x08080808),
+ RTW89_DECL_RFK_WM(0x762c, 0x0000ffff, 0x0808),
+ RTW89_DECL_RFK_WM(0x781c, 0x00100000, 0x1),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_slope_b_defs_5g);
+
+static const struct rtw89_reg5_def rtw8852bt_tssi_align_a_2g_all_defs[] = {
+ RTW89_DECL_RFK_WM(0x5604, 0x80000000, 0x1),
+ RTW89_DECL_RFK_WM(0x5600, 0x3fffffff, 0x3f2d2721),
+ RTW89_DECL_RFK_WM(0x5604, 0x003fffff, 0x010101),
+ RTW89_DECL_RFK_WM(0x5630, 0x3fffffff, 0x029f57c0),
+ RTW89_DECL_RFK_WM(0x5634, 0x3fffffff, 0x00000077),
+ RTW89_DECL_RFK_WM(0x5638, 0x000fffff, 0x00000),
+ RTW89_DECL_RFK_WM(0x563c, 0x3fffffff, 0x029f5bc0),
+ RTW89_DECL_RFK_WM(0x5640, 0x3fffffff, 0x00000076),
+ RTW89_DECL_RFK_WM(0x5644, 0x000fffff, 0x00000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_align_a_2g_all_defs);
+
+static const struct rtw89_reg5_def rtw8852bt_tssi_align_a_5g1_all_defs[] = {
+ RTW89_DECL_RFK_WM(0x5604, 0x80000000, 0x1),
+ RTW89_DECL_RFK_WM(0x5600, 0x3fffffff, 0x3f2d2721),
+ RTW89_DECL_RFK_WM(0x5604, 0x003fffff, 0x010101),
+ RTW89_DECL_RFK_WM(0x5630, 0x3fffffff, 0x007ff3d7),
+ RTW89_DECL_RFK_WM(0x5634, 0x3fffffff, 0x00000068),
+ RTW89_DECL_RFK_WM(0x5638, 0x000fffff, 0x00000),
+ RTW89_DECL_RFK_WM(0x563c, 0x3fffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5640, 0x3fffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5644, 0x000fffff, 0x00000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_align_a_5g1_all_defs);
+
+static const struct rtw89_reg5_def rtw8852bt_tssi_align_a_5g2_all_defs[] = {
+ RTW89_DECL_RFK_WM(0x5604, 0x80000000, 0x1),
+ RTW89_DECL_RFK_WM(0x5600, 0x3fffffff, 0x3f2d2721),
+ RTW89_DECL_RFK_WM(0x5604, 0x003fffff, 0x010101),
+ RTW89_DECL_RFK_WM(0x5630, 0x3fffffff, 0x00a003db),
+ RTW89_DECL_RFK_WM(0x5634, 0x3fffffff, 0x00000065),
+ RTW89_DECL_RFK_WM(0x5638, 0x000fffff, 0x00000),
+ RTW89_DECL_RFK_WM(0x563c, 0x3fffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5640, 0x3fffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5644, 0x000fffff, 0x00000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_align_a_5g2_all_defs);
+
+static const struct rtw89_reg5_def rtw8852bt_tssi_align_a_5g3_all_defs[] = {
+ RTW89_DECL_RFK_WM(0x5604, 0x80000000, 0x1),
+ RTW89_DECL_RFK_WM(0x5600, 0x3fffffff, 0x3f2d2721),
+ RTW89_DECL_RFK_WM(0x5604, 0x003fffff, 0x010101),
+ RTW89_DECL_RFK_WM(0x5630, 0x3fffffff, 0x01101be2),
+ RTW89_DECL_RFK_WM(0x5634, 0x3fffffff, 0x00000065),
+ RTW89_DECL_RFK_WM(0x5638, 0x000fffff, 0x00000),
+ RTW89_DECL_RFK_WM(0x563c, 0x3fffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5640, 0x3fffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5644, 0x000fffff, 0x00000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_align_a_5g3_all_defs);
+
+static const struct rtw89_reg5_def rtw8852bt_tssi_align_b_2g_all_defs[] = {
+ RTW89_DECL_RFK_WM(0x7604, 0x80000000, 0x1),
+ RTW89_DECL_RFK_WM(0x7600, 0x3fffffff, 0x3f2d2721),
+ RTW89_DECL_RFK_WM(0x7604, 0x003fffff, 0x010101),
+ RTW89_DECL_RFK_WM(0x7630, 0x3fffffff, 0x023f3fb9),
+ RTW89_DECL_RFK_WM(0x7634, 0x3fffffff, 0x00000075),
+ RTW89_DECL_RFK_WM(0x7638, 0x000fffff, 0x00000),
+ RTW89_DECL_RFK_WM(0x763c, 0x3fffffff, 0x01df3fb8),
+ RTW89_DECL_RFK_WM(0x7640, 0x3fffffff, 0x00000074),
+ RTW89_DECL_RFK_WM(0x7644, 0x000fffff, 0x00000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_align_b_2g_all_defs);
+
+static const struct rtw89_reg5_def rtw8852bt_tssi_align_b_5g1_all_defs[] = {
+ RTW89_DECL_RFK_WM(0x7604, 0x80000000, 0x1),
+ RTW89_DECL_RFK_WM(0x7600, 0x3fffffff, 0x3f2d2721),
+ RTW89_DECL_RFK_WM(0x7604, 0x003fffff, 0x010101),
+ RTW89_DECL_RFK_WM(0x7630, 0x3fffffff, 0x010017e0),
+ RTW89_DECL_RFK_WM(0x7634, 0x3fffffff, 0x00000069),
+ RTW89_DECL_RFK_WM(0x7638, 0x000fffff, 0x00000),
+ RTW89_DECL_RFK_WM(0x763c, 0x3fffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7640, 0x3fffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7644, 0x000fffff, 0x00000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_align_b_5g1_all_defs);
+
+static const struct rtw89_reg5_def rtw8852bt_tssi_align_b_5g2_all_defs[] = {
+ RTW89_DECL_RFK_WM(0x7604, 0x80000000, 0x1),
+ RTW89_DECL_RFK_WM(0x7600, 0x3fffffff, 0x3f2d2721),
+ RTW89_DECL_RFK_WM(0x7604, 0x003fffff, 0x010101),
+ RTW89_DECL_RFK_WM(0x7630, 0x3fffffff, 0x01201fe2),
+ RTW89_DECL_RFK_WM(0x7634, 0x3fffffff, 0x00000066),
+ RTW89_DECL_RFK_WM(0x7638, 0x000fffff, 0x00000),
+ RTW89_DECL_RFK_WM(0x763c, 0x3fffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7640, 0x3fffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7644, 0x000fffff, 0x00000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_align_b_5g2_all_defs);
+
+static const struct rtw89_reg5_def rtw8852bt_tssi_align_b_5g3_all_defs[] = {
+ RTW89_DECL_RFK_WM(0x7604, 0x80000000, 0x1),
+ RTW89_DECL_RFK_WM(0x7600, 0x3fffffff, 0x3f2d2721),
+ RTW89_DECL_RFK_WM(0x7604, 0x003fffff, 0x010101),
+ RTW89_DECL_RFK_WM(0x7630, 0x3fffffff, 0x01602fe5),
+ RTW89_DECL_RFK_WM(0x7634, 0x3fffffff, 0x00000068),
+ RTW89_DECL_RFK_WM(0x7638, 0x000fffff, 0x00000),
+ RTW89_DECL_RFK_WM(0x763c, 0x3fffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7640, 0x3fffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7644, 0x000fffff, 0x00000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_align_b_5g3_all_defs);
+
+static const struct rtw89_reg5_def rtw8852bt_tssi_slope_defs_a[] = {
+ RTW89_DECL_RFK_WM(0x5814, 0x00000800, 0x1),
+ RTW89_DECL_RFK_WM(0x581c, 0x20000000, 0x1),
+ RTW89_DECL_RFK_WM(0x5814, 0x20000000, 0x1),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_slope_defs_a);
+
+static const struct rtw89_reg5_def rtw8852bt_tssi_slope_defs_b[] = {
+ RTW89_DECL_RFK_WM(0x7814, 0x00000800, 0x1),
+ RTW89_DECL_RFK_WM(0x781c, 0x20000000, 0x1),
+ RTW89_DECL_RFK_WM(0x7814, 0x20000000, 0x1),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_slope_defs_b);
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk_table.h b/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk_table.h
new file mode 100644
index 000000000000..beb246237d17
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk_table.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2024 Realtek Corporation
+ */
+
+#ifndef __RTW89_8852BT_RFK_TABLE_H__
+#define __RTW89_8852BT_RFK_TABLE_H__
+
+#include "phy.h"
+
+extern const struct rtw89_rfk_tbl rtw8852bt_tssi_sys_defs_tbl;
+extern const struct rtw89_rfk_tbl rtw8852bt_tssi_sys_a_defs_2g_tbl;
+extern const struct rtw89_rfk_tbl rtw8852bt_tssi_sys_a_defs_5g_tbl;
+extern const struct rtw89_rfk_tbl rtw8852bt_tssi_sys_b_defs_2g_tbl;
+extern const struct rtw89_rfk_tbl rtw8852bt_tssi_sys_b_defs_5g_tbl;
+extern const struct rtw89_rfk_tbl rtw8852bt_tssi_init_txpwr_defs_a_tbl;
+extern const struct rtw89_rfk_tbl rtw8852bt_tssi_init_txpwr_defs_b_tbl;
+extern const struct rtw89_rfk_tbl rtw8852bt_tssi_init_txpwr_he_tb_defs_a_tbl;
+extern const struct rtw89_rfk_tbl rtw8852bt_tssi_init_txpwr_he_tb_defs_b_tbl;
+extern const struct rtw89_rfk_tbl rtw8852bt_tssi_dck_defs_a_tbl;
+extern const struct rtw89_rfk_tbl rtw8852bt_tssi_dck_defs_b_tbl;
+extern const struct rtw89_rfk_tbl rtw8852bt_tssi_dac_gain_defs_a_tbl;
+extern const struct rtw89_rfk_tbl rtw8852bt_tssi_dac_gain_defs_b_tbl;
+extern const struct rtw89_rfk_tbl rtw8852bt_tssi_slope_a_defs_2g_tbl;
+extern const struct rtw89_rfk_tbl rtw8852bt_tssi_slope_a_defs_5g_tbl;
+extern const struct rtw89_rfk_tbl rtw8852bt_tssi_slope_b_defs_2g_tbl;
+extern const struct rtw89_rfk_tbl rtw8852bt_tssi_slope_b_defs_5g_tbl;
+extern const struct rtw89_rfk_tbl rtw8852bt_tssi_align_a_2g_all_defs_tbl;
+extern const struct rtw89_rfk_tbl rtw8852bt_tssi_align_a_5g1_all_defs_tbl;
+extern const struct rtw89_rfk_tbl rtw8852bt_tssi_align_a_5g2_all_defs_tbl;
+extern const struct rtw89_rfk_tbl rtw8852bt_tssi_align_a_5g3_all_defs_tbl;
+extern const struct rtw89_rfk_tbl rtw8852bt_tssi_align_b_2g_all_defs_tbl;
+extern const struct rtw89_rfk_tbl rtw8852bt_tssi_align_b_5g1_all_defs_tbl;
+extern const struct rtw89_rfk_tbl rtw8852bt_tssi_align_b_5g2_all_defs_tbl;
+extern const struct rtw89_rfk_tbl rtw8852bt_tssi_align_b_5g3_all_defs_tbl;
+extern const struct rtw89_rfk_tbl rtw8852bt_tssi_slope_defs_a_tbl;
+extern const struct rtw89_rfk_tbl rtw8852bt_tssi_slope_defs_b_tbl;
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.c b/drivers/net/wireless/realtek/rtw89/rtw8852c.c
index 3571b41786d7..193168dc7b6c 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852c.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.c
@@ -73,6 +73,10 @@ static const u32 rtw8852c_c2h_regs[RTW89_H2CREG_MAX] = {
R_AX_C2HREG_DATA3_V1
};
+static const u32 rtw8852c_wow_wakeup_regs[RTW89_WOW_REASON_NUM] = {
+ R_AX_C2HREG_DATA3_V1 + 3, R_AX_DBG_WOW,
+};
+
static const struct rtw89_page_regs rtw8852c_page_regs = {
.hci_fc_ctrl = R_AX_HCI_FC_CTRL_V1,
.ch_page_ctrl = R_AX_CH_PAGE_CTRL_V1,
@@ -2941,6 +2945,7 @@ const struct rtw89_chip_info rtw8852c_chip_info = {
.dig_table = NULL,
.dig_regs = &rtw8852c_dig_regs,
.tssi_dbw_table = &rtw89_8852c_tssi_dbw_table,
+ .support_macid_num = RTW89_MAX_MAC_ID_NUM,
.support_chanctx_num = 2,
.support_rnr = false,
.support_bands = BIT(NL80211_BAND_2GHZ) |
@@ -3006,7 +3011,7 @@ const struct rtw89_chip_info rtw8852c_chip_info = {
.c2h_counter_reg = {R_AX_UDM1 + 1, B_AX_UDM1_HALMAC_C2H_ENQ_CNT_MASK >> 8},
.c2h_regs = rtw8852c_c2h_regs,
.page_regs = &rtw8852c_page_regs,
- .wow_reason_reg = R_AX_C2HREG_DATA3_V1 + 3,
+ .wow_reason_reg = rtw8852c_wow_wakeup_regs,
.cfo_src_fd = false,
.cfo_hw_comp = false,
.dcfo_comp = &rtw8852c_dcfo_comp,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c
index 654e3e5507cb..743f7014bf3e 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c
@@ -4070,12 +4070,11 @@ void rtw8852c_set_channel_rf(struct rtw89_dev *rtwdev,
void rtw8852c_mcc_get_ch_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
{
struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc;
- DECLARE_BITMAP(map, RTW89_IQK_CHS_NR) = {};
+ struct rtw89_rfk_chan_desc desc[__RTW89_RFK_CHS_NR_V0] = {};
const struct rtw89_chan *chan;
enum rtw89_entity_mode mode;
u8 chan_idx;
u8 idx;
- u8 i;
mode = rtw89_get_entity_mode(rtwdev);
switch (mode) {
@@ -4087,34 +4086,21 @@ void rtw8852c_mcc_get_ch_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_i
break;
}
- for (i = 0; i <= chan_idx; i++) {
- chan = rtw89_chan_get(rtwdev, i);
+ chan = rtw89_chan_get(rtwdev, chan_idx);
- for (idx = 0; idx < RTW89_IQK_CHS_NR; idx++) {
- if (rfk_mcc->ch[idx] == chan->channel &&
- rfk_mcc->band[idx] == chan->band_type) {
- if (i != chan_idx) {
- set_bit(idx, map);
- break;
- }
+ for (idx = 0; idx < ARRAY_SIZE(desc); idx++) {
+ struct rtw89_rfk_chan_desc *p = &desc[idx];
- goto bottom;
- }
- }
- }
+ p->ch = rfk_mcc->ch[idx];
- idx = find_first_zero_bit(map, RTW89_IQK_CHS_NR);
- if (idx == RTW89_IQK_CHS_NR) {
- rtw89_debug(rtwdev, RTW89_DBG_RFK,
- "%s: no empty rfk table; force replace the first\n",
- __func__);
- idx = 0;
+ p->has_band = true;
+ p->band = rfk_mcc->band[idx];
}
+ idx = rtw89_rfk_chan_lookup(rtwdev, desc, ARRAY_SIZE(desc), chan);
+
rfk_mcc->ch[idx] = chan->channel;
rfk_mcc->band[idx] = chan->band_type;
-
-bottom:
rfk_mcc->table_idx = idx;
}
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852ce.c b/drivers/net/wireless/realtek/rtw89/rtw8852ce.c
index e07c7f3ade41..8aaad7d58c0d 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852ce.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852ce.c
@@ -55,6 +55,7 @@ static const struct rtw89_pci_info rtw8852c_pci_info = {
.rpwm_addr = R_AX_PCIE_HRPWM_V1,
.cpwm_addr = R_AX_PCIE_CRPWM,
.mit_addr = R_AX_INT_MIT_RX_V1,
+ .wp_sel_addr = R_AX_WP_ADDR_H_SEL0_3,
.tx_dma_ch_mask = 0,
.bd_idx_addr_low_power = &rtw8852c_bd_idx_addr_low_power,
.dma_addr_set = &rtw89_pci_ch_dma_addr_set_v1,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8922a.c b/drivers/net/wireless/realtek/rtw89/rtw8922a.c
index 3b3ea3a7c19a..2af568a3264d 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8922a.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8922a.c
@@ -85,6 +85,10 @@ static const u32 rtw8922a_c2h_regs[RTW89_H2CREG_MAX] = {
R_BE_C2HREG_DATA3
};
+static const u32 rtw8922a_wow_wakeup_regs[RTW89_WOW_REASON_NUM] = {
+ R_AX_C2HREG_DATA3_V1 + 3, R_BE_DBG_WOW,
+};
+
static const struct rtw89_page_regs rtw8922a_page_regs = {
.hci_fc_ctrl = R_BE_HCI_FC_CTRL,
.ch_page_ctrl = R_BE_CH_PAGE_CTRL,
@@ -2544,6 +2548,7 @@ const struct rtw89_chip_info rtw8922a_chip_info = {
.dig_table = NULL,
.dig_regs = &rtw8922a_dig_regs,
.tssi_dbw_table = NULL,
+ .support_macid_num = 32,
.support_chanctx_num = 2,
.support_rnr = true,
.support_bands = BIT(NL80211_BAND_2GHZ) |
@@ -2608,7 +2613,7 @@ const struct rtw89_chip_info rtw8922a_chip_info = {
.c2h_counter_reg = {R_BE_UDM1 + 1, B_BE_UDM1_HALMAC_C2H_ENQ_CNT_MASK >> 8},
.c2h_regs = rtw8922a_c2h_regs,
.page_regs = &rtw8922a_page_regs,
- .wow_reason_reg = R_AX_C2HREG_DATA3_V1 + 3,
+ .wow_reason_reg = rtw8922a_wow_wakeup_regs,
.cfo_src_fd = true,
.cfo_hw_comp = true,
.dcfo_comp = NULL,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.c
index 2a371829268c..0ebcb06ae848 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.c
@@ -255,6 +255,7 @@ static void rtw8922a_chlk_ktbl_sel(struct rtw89_dev *rtwdev, u8 kpath, u8 idx)
static void rtw8922a_chlk_reload(struct rtw89_dev *rtwdev)
{
struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc;
+ struct rtw89_rfk_chan_desc desc[__RTW89_RFK_CHS_NR_V1] = {};
enum rtw89_sub_entity_idx sub_entity_idx;
const struct rtw89_chan *chan;
enum rtw89_entity_mode mode;
@@ -265,16 +266,28 @@ static void rtw8922a_chlk_reload(struct rtw89_dev *rtwdev)
switch (mode) {
case RTW89_ENTITY_MODE_MCC_PREPARE:
sub_entity_idx = RTW89_SUB_ENTITY_1;
- tbl_sel = 1;
break;
default:
sub_entity_idx = RTW89_SUB_ENTITY_0;
- tbl_sel = 0;
break;
}
chan = rtw89_chan_get(rtwdev, sub_entity_idx);
+ for (tbl_sel = 0; tbl_sel < ARRAY_SIZE(desc); tbl_sel++) {
+ struct rtw89_rfk_chan_desc *p = &desc[tbl_sel];
+
+ p->ch = rfk_mcc->ch[tbl_sel];
+
+ p->has_band = true;
+ p->band = rfk_mcc->band[tbl_sel];
+
+ p->has_bw = true;
+ p->bw = rfk_mcc->bw[tbl_sel];
+ }
+
+ tbl_sel = rtw89_rfk_chan_lookup(rtwdev, desc, ARRAY_SIZE(desc), chan);
+
rfk_mcc->ch[tbl_sel] = chan->channel;
rfk_mcc->band[tbl_sel] = chan->band_type;
rfk_mcc->bw[tbl_sel] = chan->band_width;
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8922ae.c b/drivers/net/wireless/realtek/rtw89/rtw8922ae.c
index ce8aaa9501e1..47f855a7a268 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8922ae.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8922ae.c
@@ -46,6 +46,7 @@ static const struct rtw89_pci_info rtw8922a_pci_info = {
.rpwm_addr = R_BE_PCIE_HRPWM,
.cpwm_addr = R_BE_PCIE_CRPWM,
.mit_addr = R_BE_PCIE_MIT_CH_EN,
+ .wp_sel_addr = R_BE_WP_ADDR_H_SEL0_3_V1,
.tx_dma_ch_mask = 0,
.bd_idx_addr_low_power = NULL,
.dma_addr_set = &rtw89_pci_ch_dma_addr_set_be,
diff --git a/drivers/net/wireless/realtek/rtw89/ser.c b/drivers/net/wireless/realtek/rtw89/ser.c
index 99896d85d2f8..5fc2faa9ba5a 100644
--- a/drivers/net/wireless/realtek/rtw89/ser.c
+++ b/drivers/net/wireless/realtek/rtw89/ser.c
@@ -308,9 +308,13 @@ static void ser_reset_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
static void ser_sta_deinit_cam_iter(void *data, struct ieee80211_sta *sta)
{
- struct rtw89_vif *rtwvif = (struct rtw89_vif *)data;
- struct rtw89_dev *rtwdev = rtwvif->rtwdev;
+ struct rtw89_vif *target_rtwvif = (struct rtw89_vif *)data;
struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
+ struct rtw89_vif *rtwvif = rtwsta->rtwvif;
+ struct rtw89_dev *rtwdev = rtwvif->rtwdev;
+
+ if (rtwvif != target_rtwvif)
+ return;
if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE || sta->tdls)
rtw89_cam_deinit_addr_cam(rtwdev, &rtwsta->addr_cam);
diff --git a/drivers/net/wireless/realtek/rtw89/txrx.h b/drivers/net/wireless/realtek/rtw89/txrx.h
index c467a80ffa88..3882938c0893 100644
--- a/drivers/net/wireless/realtek/rtw89/txrx.h
+++ b/drivers/net/wireless/realtek/rtw89/txrx.h
@@ -113,6 +113,8 @@ static inline u8 rtw89_get_data_nss(struct rtw89_dev *rtwdev, u16 hw_rate)
#define RTW89_TXWD_INFO0_GI_LTF GENMASK(27, 25)
#define RTW89_TXWD_INFO0_DATA_RATE GENMASK(24, 16)
#define RTW89_TXWD_INFO0_DATA_ER BIT(15)
+#define RTW89_TXWD_INFO0_DATA_STBC BIT(12)
+#define RTW89_TXWD_INFO0_DATA_LDPC BIT(11)
#define RTW89_TXWD_INFO0_DISDATAFB BIT(10)
#define RTW89_TXWD_INFO0_DATA_BW_ER BIT(8)
#define RTW89_TXWD_INFO0_MULTIPORT_ID GENMASK(6, 4)
@@ -556,6 +558,8 @@ struct rtw89_phy_sts_ie0 {
#define RTW89_PHY_STS_IE01_W2_AVG_SNR GENMASK(5, 0)
#define RTW89_PHY_STS_IE01_W2_EVM_MAX GENMASK(15, 8)
#define RTW89_PHY_STS_IE01_W2_EVM_MIN GENMASK(23, 16)
+#define RTW89_PHY_STS_IE01_W2_LDPC BIT(28)
+#define RTW89_PHY_STS_IE01_W2_STBC BIT(30)
enum rtw89_tx_channel {
RTW89_TXCH_ACH0 = 0,
diff --git a/drivers/net/wireless/realtek/rtw89/util.c b/drivers/net/wireless/realtek/rtw89/util.c
new file mode 100644
index 000000000000..e71956ce9853
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/util.c
@@ -0,0 +1,106 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2024 Realtek Corporation
+ */
+
+#include "util.h"
+
+#define FRAC_ROWS 3
+#define FRAC_ROW_MAX (FRAC_ROWS - 1)
+#define NORM_ROW_MIN FRAC_ROWS
+
+static const u32 db_invert_table[12][8] = {
+ /* rows 0~2 in unit of U(32,3) */
+ {10, 13, 16, 20, 25, 32, 40, 50},
+ {64, 80, 101, 128, 160, 201, 256, 318},
+ {401, 505, 635, 800, 1007, 1268, 1596, 2010},
+ /* rows 3~11 in unit of U(32,0) */
+ {316, 398, 501, 631, 794, 1000, 1259, 1585},
+ {1995, 2512, 3162, 3981, 5012, 6310, 7943, 10000},
+ {12589, 15849, 19953, 25119, 31623, 39811, 50119, 63098},
+ {79433, 100000, 125893, 158489, 199526, 251189, 316228, 398107},
+ {501187, 630957, 794328, 1000000, 1258925, 1584893, 1995262, 2511886},
+ {3162278, 3981072, 5011872, 6309573, 7943282, 1000000, 12589254,
+ 15848932},
+ {19952623, 25118864, 31622777, 39810717, 50118723, 63095734, 79432823,
+ 100000000},
+ {125892541, 158489319, 199526232, 251188643, 316227766, 398107171,
+ 501187234, 630957345},
+ {794328235, 1000000000, 1258925412, 1584893192, 1995262315, 2511886432U,
+ 3162277660U, 3981071706U},
+};
+
+u32 rtw89_linear_2_db(u64 val)
+{
+ u8 i, j;
+ u32 dB;
+
+ for (i = 0; i < 12; i++) {
+ for (j = 0; j < 8; j++) {
+ if (i <= FRAC_ROW_MAX &&
+ (val << RTW89_LINEAR_FRAC_BITS) <= db_invert_table[i][j])
+ goto cnt;
+ else if (i > FRAC_ROW_MAX && val <= db_invert_table[i][j])
+ goto cnt;
+ }
+ }
+
+ return 96; /* maximum 96 dB */
+
+cnt:
+ /* special cases */
+ if (j == 0 && i == 0)
+ goto end;
+
+ if (i == NORM_ROW_MIN && j == 0) {
+ if (db_invert_table[NORM_ROW_MIN][0] - val >
+ val - (db_invert_table[FRAC_ROW_MAX][7] >> RTW89_LINEAR_FRAC_BITS)) {
+ i = FRAC_ROW_MAX;
+ j = 7;
+ }
+ goto end;
+ }
+
+ if (i <= FRAC_ROW_MAX)
+ val <<= RTW89_LINEAR_FRAC_BITS;
+
+ /* compare difference to get precise dB */
+ if (j == 0) {
+ if (db_invert_table[i][j] - val >
+ val - db_invert_table[i - 1][7]) {
+ i--;
+ j = 7;
+ }
+ } else {
+ if (db_invert_table[i][j] - val >
+ val - db_invert_table[i][j - 1]) {
+ j--;
+ }
+ }
+end:
+ dB = (i << 3) + j + 1;
+
+ return dB;
+}
+EXPORT_SYMBOL(rtw89_linear_2_db);
+
+u64 rtw89_db_2_linear(u32 db)
+{
+ u64 linear;
+ u8 i, j;
+
+ if (db > 96)
+ db = 96;
+ else if (db < 1)
+ return 1;
+
+ i = (db - 1) >> 3;
+ j = (db - 1) & 0x7;
+
+ linear = db_invert_table[i][j];
+
+ if (i >= NORM_ROW_MIN)
+ linear = linear << RTW89_LINEAR_FRAC_BITS;
+
+ return linear;
+}
+EXPORT_SYMBOL(rtw89_db_2_linear);
diff --git a/drivers/net/wireless/realtek/rtw89/util.h b/drivers/net/wireless/realtek/rtw89/util.h
index e2ed4565025d..e82e7df052d8 100644
--- a/drivers/net/wireless/realtek/rtw89/util.h
+++ b/drivers/net/wireless/realtek/rtw89/util.h
@@ -6,6 +6,8 @@
#include "core.h"
+#define RTW89_LINEAR_FRAC_BITS 3
+
#define rtw89_iterate_vifs_bh(rtwdev, iterator, data) \
ieee80211_iterate_active_interfaces_atomic((rtwdev)->hw, \
IEEE80211_IFACE_ITER_NORMAL, iterator, data)
@@ -55,4 +57,7 @@ static inline void ether_addr_copy_mask(u8 *dst, const u8 *src, u8 mask)
}
}
+u32 rtw89_linear_2_db(u64 linear);
+u64 rtw89_db_2_linear(u32 db);
+
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/wow.c b/drivers/net/wireless/realtek/rtw89/wow.c
index fa61484c3839..9882064ef68d 100644
--- a/drivers/net/wireless/realtek/rtw89/wow.c
+++ b/drivers/net/wireless/realtek/rtw89/wow.c
@@ -27,17 +27,23 @@ void rtw89_wow_parse_akm(struct rtw89_dev *rtwdev, struct sk_buff *skb)
rtw_wow->akm = rsn_ie->akm_cipher_suite.type;
}
+#define RTW89_CIPHER_INFO_DEF(cipher) \
+ {WLAN_CIPHER_SUITE_ ## cipher, .fw_alg = RTW89_WOW_FW_ALG_ ## cipher, \
+ .len = WLAN_KEY_LEN_ ## cipher}
+
static const struct rtw89_cipher_info rtw89_cipher_info_defs[] = {
- {WLAN_CIPHER_SUITE_WEP40, .fw_alg = 1, .len = WLAN_KEY_LEN_WEP40,},
- {WLAN_CIPHER_SUITE_WEP104, .fw_alg = 2, .len = WLAN_KEY_LEN_WEP104,},
- {WLAN_CIPHER_SUITE_TKIP, .fw_alg = 3, .len = WLAN_KEY_LEN_TKIP,},
- {WLAN_CIPHER_SUITE_CCMP, .fw_alg = 6, .len = WLAN_KEY_LEN_CCMP,},
- {WLAN_CIPHER_SUITE_GCMP, .fw_alg = 8, .len = WLAN_KEY_LEN_GCMP,},
- {WLAN_CIPHER_SUITE_CCMP_256, .fw_alg = 7, .len = WLAN_KEY_LEN_CCMP_256,},
- {WLAN_CIPHER_SUITE_GCMP_256, .fw_alg = 23, .len = WLAN_KEY_LEN_GCMP_256,},
- {WLAN_CIPHER_SUITE_AES_CMAC, .fw_alg = 32, .len = WLAN_KEY_LEN_AES_CMAC,},
+ RTW89_CIPHER_INFO_DEF(WEP40),
+ RTW89_CIPHER_INFO_DEF(WEP104),
+ RTW89_CIPHER_INFO_DEF(TKIP),
+ RTW89_CIPHER_INFO_DEF(CCMP),
+ RTW89_CIPHER_INFO_DEF(GCMP),
+ RTW89_CIPHER_INFO_DEF(CCMP_256),
+ RTW89_CIPHER_INFO_DEF(GCMP_256),
+ RTW89_CIPHER_INFO_DEF(AES_CMAC),
};
+#undef RTW89_CIPHER_INFO_DEF
+
static const
struct rtw89_cipher_info *rtw89_cipher_alg_recognize(u32 cipher)
{
@@ -717,13 +723,18 @@ static void rtw89_wow_show_wakeup_reason(struct rtw89_dev *rtwdev)
{
struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
struct rtw89_wow_aoac_report *aoac_rpt = &rtw_wow->aoac_rpt;
- u32 wow_reason_reg = rtwdev->chip->wow_reason_reg;
struct cfg80211_wowlan_nd_info nd_info;
struct cfg80211_wowlan_wakeup wakeup = {
.pattern_idx = -1,
};
+ u32 wow_reason_reg;
u8 reason;
+ if (RTW89_CHK_FW_FEATURE(WOW_REASON_V1, &rtwdev->fw))
+ wow_reason_reg = rtwdev->chip->wow_reason_reg[RTW89_WOW_REASON_V1];
+ else
+ wow_reason_reg = rtwdev->chip->wow_reason_reg[RTW89_WOW_REASON_V0];
+
reason = rtw89_read8(rtwdev, wow_reason_reg);
switch (reason) {
case RTW89_WOW_RSN_RX_DEAUTH:
@@ -1284,12 +1295,16 @@ static int rtw89_wow_disable_trx_pre(struct rtw89_dev *rtwdev)
static int rtw89_wow_disable_trx_post(struct rtw89_dev *rtwdev)
{
+ struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+ struct ieee80211_vif *vif = rtw_wow->wow_vif;
int ret;
ret = rtw89_mac_cfg_ppdu_status(rtwdev, RTW89_MAC_0, true);
if (ret)
rtw89_err(rtwdev, "cfg ppdu status\n");
+ rtw89_fw_h2c_set_bcn_fltr_cfg(rtwdev, vif, true);
+
return ret;
}
diff --git a/drivers/net/wireless/realtek/rtw89/wow.h b/drivers/net/wireless/realtek/rtw89/wow.h
index e595aee0196d..0d90add0e88d 100644
--- a/drivers/net/wireless/realtek/rtw89/wow.h
+++ b/drivers/net/wireless/realtek/rtw89/wow.h
@@ -35,6 +35,17 @@ enum rtw89_wake_reason {
RTW89_WOW_RSN_RX_NLO = 0x55,
};
+enum rtw89_fw_alg {
+ RTW89_WOW_FW_ALG_WEP40 = 0x1,
+ RTW89_WOW_FW_ALG_WEP104 = 0x2,
+ RTW89_WOW_FW_ALG_TKIP = 0x3,
+ RTW89_WOW_FW_ALG_CCMP = 0x6,
+ RTW89_WOW_FW_ALG_CCMP_256 = 0x7,
+ RTW89_WOW_FW_ALG_GCMP = 0x8,
+ RTW89_WOW_FW_ALG_GCMP_256 = 0x9,
+ RTW89_WOW_FW_ALG_AES_CMAC = 0xa,
+};
+
struct rtw89_cipher_suite {
u8 oui[3];
u8 type;
@@ -64,6 +75,25 @@ struct rtw89_set_key_info_iter_data {
bool error;
};
+static inline int rtw89_wow_get_sec_hdr_len(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+
+ if (!(rtwdev->chip->chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)))
+ return 0;
+
+ switch (rtw_wow->ptk_alg) {
+ case RTW89_WOW_FW_ALG_WEP40:
+ return 4;
+ case RTW89_WOW_FW_ALG_TKIP:
+ case RTW89_WOW_FW_ALG_CCMP:
+ case RTW89_WOW_FW_ALG_GCMP_256:
+ return 8;
+ default:
+ return 0;
+ }
+}
+
#ifdef CONFIG_PM
int rtw89_wow_suspend(struct rtw89_dev *rtwdev, struct cfg80211_wowlan *wowlan);
int rtw89_wow_resume(struct rtw89_dev *rtwdev);
diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
index 211fa25b9a78..3425a473b9a1 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
@@ -410,10 +410,11 @@ static int rsi_mac80211_start(struct ieee80211_hw *hw)
/**
* rsi_mac80211_stop() - This is the last handler that 802.11 module calls.
* @hw: Pointer to the ieee80211_hw structure.
+ * @suspend: true if the this was called from suspend flow.
*
* Return: None.
*/
-static void rsi_mac80211_stop(struct ieee80211_hw *hw)
+static void rsi_mac80211_stop(struct ieee80211_hw *hw, bool suspend)
{
struct rsi_hw *adapter = hw->priv;
struct rsi_common *common = adapter->priv;
diff --git a/drivers/net/wireless/silabs/wfx/sta.c b/drivers/net/wireless/silabs/wfx/sta.c
index a904602f02ce..216d43c8bd6e 100644
--- a/drivers/net/wireless/silabs/wfx/sta.c
+++ b/drivers/net/wireless/silabs/wfx/sta.c
@@ -805,7 +805,7 @@ int wfx_start(struct ieee80211_hw *hw)
return 0;
}
-void wfx_stop(struct ieee80211_hw *hw)
+void wfx_stop(struct ieee80211_hw *hw, bool suspend)
{
struct wfx_dev *wdev = hw->priv;
diff --git a/drivers/net/wireless/silabs/wfx/sta.h b/drivers/net/wireless/silabs/wfx/sta.h
index c478ddcb934b..7817c7c6f3dd 100644
--- a/drivers/net/wireless/silabs/wfx/sta.h
+++ b/drivers/net/wireless/silabs/wfx/sta.h
@@ -20,7 +20,7 @@ struct wfx_sta_priv {
/* mac80211 interface */
int wfx_start(struct ieee80211_hw *hw);
-void wfx_stop(struct ieee80211_hw *hw);
+void wfx_stop(struct ieee80211_hw *hw, bool suspend);
int wfx_config(struct ieee80211_hw *hw, u32 changed);
int wfx_set_rts_threshold(struct ieee80211_hw *hw, u32 value);
void wfx_set_default_unicast_key(struct ieee80211_hw *hw, struct ieee80211_vif *vif, int idx);
diff --git a/drivers/net/wireless/st/cw1200/sta.c b/drivers/net/wireless/st/cw1200/sta.c
index 8ef1d06b9bbd..c259da8161e4 100644
--- a/drivers/net/wireless/st/cw1200/sta.c
+++ b/drivers/net/wireless/st/cw1200/sta.c
@@ -90,7 +90,7 @@ out:
return ret;
}
-void cw1200_stop(struct ieee80211_hw *dev)
+void cw1200_stop(struct ieee80211_hw *dev, bool suspend)
{
struct cw1200_common *priv = dev->priv;
LIST_HEAD(list);
diff --git a/drivers/net/wireless/st/cw1200/sta.h b/drivers/net/wireless/st/cw1200/sta.h
index a49f187c7049..b955b92cfd73 100644
--- a/drivers/net/wireless/st/cw1200/sta.h
+++ b/drivers/net/wireless/st/cw1200/sta.h
@@ -13,7 +13,7 @@
/* mac80211 API */
int cw1200_start(struct ieee80211_hw *dev);
-void cw1200_stop(struct ieee80211_hw *dev);
+void cw1200_stop(struct ieee80211_hw *dev, bool suspend);
int cw1200_add_interface(struct ieee80211_hw *dev,
struct ieee80211_vif *vif);
void cw1200_remove_interface(struct ieee80211_hw *dev,
diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c
index 0da2d29dd7bd..bb53d681c11b 100644
--- a/drivers/net/wireless/ti/wl1251/main.c
+++ b/drivers/net/wireless/ti/wl1251/main.c
@@ -415,7 +415,7 @@ out:
return ret;
}
-static void wl1251_op_stop(struct ieee80211_hw *hw)
+static void wl1251_op_stop(struct ieee80211_hw *hw, bool suspend)
{
struct wl1251 *wl = hw->priv;
diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
index 2ccac1cdec01..39d8eebb9b6e 100644
--- a/drivers/net/wireless/ti/wl18xx/main.c
+++ b/drivers/net/wireless/ti/wl18xx/main.c
@@ -1177,8 +1177,49 @@ static int wl18xx_hw_init(struct wl1271 *wl)
return ret;
}
-static void wl18xx_convert_fw_status(struct wl1271 *wl, void *raw_fw_status,
- struct wl_fw_status *fw_status)
+static void wl18xx_convert_fw_status_8_9_1(struct wl1271 *wl,
+ void *raw_fw_status,
+ struct wl_fw_status *fw_status)
+{
+ struct wl18xx_fw_status_8_9_1 *int_fw_status = raw_fw_status;
+
+ fw_status->intr = le32_to_cpu(int_fw_status->intr);
+ fw_status->fw_rx_counter = int_fw_status->fw_rx_counter;
+ fw_status->drv_rx_counter = int_fw_status->drv_rx_counter;
+ fw_status->tx_results_counter = int_fw_status->tx_results_counter;
+ fw_status->rx_pkt_descs = int_fw_status->rx_pkt_descs;
+
+ fw_status->fw_localtime = le32_to_cpu(int_fw_status->fw_localtime);
+ fw_status->link_ps_bitmap = le32_to_cpu(int_fw_status->link_ps_bitmap);
+ fw_status->link_fast_bitmap =
+ le32_to_cpu(int_fw_status->link_fast_bitmap);
+ fw_status->total_released_blks =
+ le32_to_cpu(int_fw_status->total_released_blks);
+ fw_status->tx_total = le32_to_cpu(int_fw_status->tx_total);
+
+ fw_status->counters.tx_released_pkts =
+ int_fw_status->counters.tx_released_pkts;
+ fw_status->counters.tx_lnk_free_pkts =
+ int_fw_status->counters.tx_lnk_free_pkts;
+ fw_status->counters.tx_lnk_sec_pn16 =
+ int_fw_status->counters.tx_lnk_sec_pn16;
+ fw_status->counters.tx_voice_released_blks =
+ int_fw_status->counters.tx_voice_released_blks;
+ fw_status->counters.tx_last_rate =
+ int_fw_status->counters.tx_last_rate;
+ fw_status->counters.tx_last_rate_mbps =
+ int_fw_status->counters.tx_last_rate_mbps;
+ fw_status->counters.hlid =
+ int_fw_status->counters.hlid;
+
+ fw_status->log_start_addr = le32_to_cpu(int_fw_status->log_start_addr);
+
+ fw_status->priv = &int_fw_status->priv;
+}
+
+static void wl18xx_convert_fw_status_8_9_0(struct wl1271 *wl,
+ void *raw_fw_status,
+ struct wl_fw_status *fw_status)
{
struct wl18xx_fw_status *int_fw_status = raw_fw_status;
@@ -1214,6 +1255,15 @@ static void wl18xx_convert_fw_status(struct wl1271 *wl, void *raw_fw_status,
fw_status->priv = &int_fw_status->priv;
}
+static void wl18xx_convert_fw_status(struct wl1271 *wl, void *raw_fw_status,
+ struct wl_fw_status *fw_status)
+{
+ if (wl->chip.fw_ver[FW_VER_MAJOR] == 0)
+ wl18xx_convert_fw_status_8_9_0(wl, raw_fw_status, fw_status);
+ else
+ wl18xx_convert_fw_status_8_9_1(wl, raw_fw_status, fw_status);
+}
+
static void wl18xx_set_tx_desc_csum(struct wl1271 *wl,
struct wl1271_tx_hw_descr *desc,
struct sk_buff *skb)
@@ -1515,12 +1565,29 @@ static int wl18xx_handle_static_data(struct wl1271 *wl,
{
struct wl18xx_static_data_priv *static_data_priv =
(struct wl18xx_static_data_priv *) static_data->priv;
+ size_t fw_status_len;
strscpy(wl->chip.phy_fw_ver_str, static_data_priv->phy_version,
sizeof(wl->chip.phy_fw_ver_str));
wl1271_info("PHY firmware version: %s", static_data_priv->phy_version);
+ /* Adjust the firmware status size according to the firmware version */
+ if (wl->chip.fw_ver[FW_VER_MAJOR] == 0)
+ fw_status_len = sizeof(struct wl18xx_fw_status);
+ else
+ fw_status_len = sizeof(struct wl18xx_fw_status_8_9_1);
+
+ if (wl->fw_status_len != fw_status_len) {
+ void *new_status = krealloc(wl->raw_fw_status, fw_status_len,
+ GFP_KERNEL | __GFP_ZERO);
+ if (!new_status)
+ return -ENOMEM;
+
+ wl->raw_fw_status = new_status;
+ wl->fw_status_len = fw_status_len;
+ }
+
return 0;
}
diff --git a/drivers/net/wireless/ti/wl18xx/tx.c b/drivers/net/wireless/ti/wl18xx/tx.c
index 55d9b0861c53..beef393853ef 100644
--- a/drivers/net/wireless/ti/wl18xx/tx.c
+++ b/drivers/net/wireless/ti/wl18xx/tx.c
@@ -129,6 +129,14 @@ static void wl18xx_tx_complete_packet(struct wl1271 *wl, u8 tx_stat_byte)
wl1271_free_tx_id(wl, id);
}
+static u8 wl18xx_next_tx_idx(u8 idx)
+{
+ if (++idx >= WL18XX_FW_MAX_TX_STATUS_DESC)
+ idx = 0;
+
+ return idx;
+}
+
void wl18xx_tx_immediate_complete(struct wl1271 *wl)
{
struct wl18xx_fw_status_priv *status_priv =
@@ -161,9 +169,8 @@ void wl18xx_tx_immediate_complete(struct wl1271 *wl)
return;
}
- for (i = priv->last_fw_rls_idx;
- i != status_priv->fw_release_idx;
- i = (i + 1) % WL18XX_FW_MAX_TX_STATUS_DESC) {
+ for (i = priv->last_fw_rls_idx; i != status_priv->fw_release_idx;
+ i = wl18xx_next_tx_idx(i)) {
wl18xx_tx_complete_packet(wl,
status_priv->released_tx_desc[i]);
diff --git a/drivers/net/wireless/ti/wl18xx/wl18xx.h b/drivers/net/wireless/ti/wl18xx/wl18xx.h
index b642e0c437bb..de6c671c4be6 100644
--- a/drivers/net/wireless/ti/wl18xx/wl18xx.h
+++ b/drivers/net/wireless/ti/wl18xx/wl18xx.h
@@ -13,7 +13,7 @@
/* minimum FW required for driver */
#define WL18XX_CHIP_VER 8
#define WL18XX_IFTYPE_VER 9
-#define WL18XX_MAJOR_VER WLCORE_FW_VER_IGNORE
+#define WL18XX_MAJOR_VER 0
#define WL18XX_SUBTYPE_VER WLCORE_FW_VER_IGNORE
#define WL18XX_MINOR_VER 58
@@ -155,6 +155,66 @@ struct wl18xx_fw_status {
struct wl18xx_fw_status_priv priv;
} __packed;
+struct wl18xx_fw_packet_counters_8_9_1 {
+ /* Cumulative counter of released packets per AC */
+ u8 tx_released_pkts[NUM_TX_QUEUES];
+
+ /* Cumulative counter of freed packets per HLID */
+ u8 tx_lnk_free_pkts[WL18XX_MAX_LINKS];
+
+ /* PN16 of last TKIP/AES seq-num per HLID */
+ __le16 tx_lnk_sec_pn16[WL18XX_MAX_LINKS];
+
+ /* Cumulative counter of released Voice memory blocks */
+ u8 tx_voice_released_blks;
+
+ /* Tx rate of the last transmitted packet */
+ u8 tx_last_rate;
+
+ /* Tx rate or Tx rate estimate pre-calculated by fw in mbps units */
+ u8 tx_last_rate_mbps;
+
+ /* hlid for which the rates were reported */
+ u8 hlid;
+} __packed;
+
+/* FW status registers */
+struct wl18xx_fw_status_8_9_1 {
+ __le32 intr;
+ u8 fw_rx_counter;
+ u8 drv_rx_counter;
+ u8 reserved;
+ u8 tx_results_counter;
+ __le32 rx_pkt_descs[WL18XX_NUM_RX_DESCRIPTORS];
+
+ __le32 fw_localtime;
+
+ /*
+ * A bitmap (where each bit represents a single HLID)
+ * to indicate if the station is in PS mode.
+ */
+ __le32 link_ps_bitmap;
+
+ /*
+ * A bitmap (where each bit represents a single HLID) to indicate
+ * if the station is in Fast mode
+ */
+ __le32 link_fast_bitmap;
+
+ /* Cumulative counter of total released mem blocks since FW-reset */
+ __le32 total_released_blks;
+
+ /* Size (in Memory Blocks) of TX pool */
+ __le32 tx_total;
+
+ struct wl18xx_fw_packet_counters_8_9_1 counters;
+
+ __le32 log_start_addr;
+
+ /* Private status to be used by the lower drivers */
+ struct wl18xx_fw_status_priv priv;
+} __packed;
+
#define WL18XX_PHY_VERSION_MAX_LEN 20
struct wl18xx_static_data_priv {
diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c
index a939fd89a7f5..cd8ad0fe59cc 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.c
+++ b/drivers/net/wireless/ti/wlcore/cmd.c
@@ -333,6 +333,14 @@ int wl12xx_allocate_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 *hlid)
wl->links[link].wlvif = wlvif;
/*
+ * Take the last sec_pn16 value from the current FW status. On recovery,
+ * we might not have fw_status yet, and tx_lnk_sec_pn16[] will be NULL.
+ */
+ if (wl->fw_status->counters.tx_lnk_sec_pn16)
+ wl->links[link].prev_sec_pn16 =
+ le16_to_cpu(wl->fw_status->counters.tx_lnk_sec_pn16[link]);
+
+ /*
* Take saved value for total freed packets from wlvif, in case this is
* recovery/resume
*/
@@ -360,6 +368,7 @@ void wl12xx_free_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 *hlid)
wl->links[*hlid].allocated_pkts = 0;
wl->links[*hlid].prev_freed_pkts = 0;
+ wl->links[*hlid].prev_sec_pn16 = 0;
wl->links[*hlid].ba_bitmap = 0;
eth_zero_addr(wl->links[*hlid].addr);
@@ -1566,13 +1575,6 @@ int wl12xx_cmd_add_peer(struct wl1271 *wl, struct wl12xx_vif *wlvif,
cpu_to_le32(wl1271_tx_enabled_rates_get(wl, sta_rates,
wlvif->band));
- if (!cmd->supported_rates) {
- wl1271_debug(DEBUG_CMD,
- "peer has no supported rates yet, configuring basic rates: 0x%x",
- wlvif->basic_rate_set);
- cmd->supported_rates = cpu_to_le32(wlvif->basic_rate_set);
- }
-
wl1271_debug(DEBUG_CMD, "new peer rates=0x%x queues=0x%x",
cmd->supported_rates, sta->uapsd_queues);
diff --git a/drivers/net/wireless/ti/wlcore/event.c b/drivers/net/wireless/ti/wlcore/event.c
index 2499dc908305..6c3a8ea9613e 100644
--- a/drivers/net/wireless/ti/wlcore/event.c
+++ b/drivers/net/wireless/ti/wlcore/event.c
@@ -83,7 +83,7 @@ int wlcore_event_fw_logger(struct wl1271 *wl)
/* Copy initial part up to the end of ring buffer */
len = min(actual_len, available_len);
wl12xx_copy_fwlog(wl, &buffer[start_loc], len);
- clear_ptr = addr_ptr + start_loc + actual_len;
+ clear_ptr = addr_ptr + start_loc + len;
if (clear_ptr == buff_end_ptr)
clear_ptr = buff_start_ptr;
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index ef12169f8044..0c77b8524160 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -379,6 +379,8 @@ static void wl12xx_irq_update_links_status(struct wl1271 *wl,
static int wlcore_fw_status(struct wl1271 *wl, struct wl_fw_status *status)
{
+ struct wl12xx_vif *wlvifsta;
+ struct wl12xx_vif *wlvifap;
struct wl12xx_vif *wlvif;
u32 old_tx_blk_count = wl->tx_blocks_available;
int avail, freed_blocks;
@@ -392,7 +394,7 @@ static int wlcore_fw_status(struct wl1271 *wl, struct wl_fw_status *status)
if (ret < 0)
return ret;
- wlcore_hw_convert_fw_status(wl, wl->raw_fw_status, wl->fw_status);
+ wlcore_hw_convert_fw_status(wl, wl->raw_fw_status, status);
wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
"drv_rx_counter = %d, tx_results_counter = %d)",
@@ -410,23 +412,100 @@ static int wlcore_fw_status(struct wl1271 *wl, struct wl_fw_status *status)
wl->tx_pkts_freed[i] = status->counters.tx_released_pkts[i];
}
+ /* Find an authorized STA vif */
+ wlvifsta = NULL;
+ wl12xx_for_each_wlvif_sta(wl, wlvif) {
+ if (wlvif->sta.hlid != WL12XX_INVALID_LINK_ID &&
+ test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags)) {
+ wlvifsta = wlvif;
+ break;
+ }
+ }
+
+ /* Find a started AP vif */
+ wlvifap = NULL;
+ wl12xx_for_each_wlvif(wl, wlvif) {
+ if (wlvif->bss_type == BSS_TYPE_AP_BSS &&
+ wlvif->inconn_count == 0 &&
+ test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
+ wlvifap = wlvif;
+ break;
+ }
+ }
for_each_set_bit(i, wl->links_map, wl->num_links) {
- u8 diff;
+ u16 diff16, sec_pn16;
+ u8 diff, tx_lnk_free_pkts;
+
lnk = &wl->links[i];
/* prevent wrap-around in freed-packets counter */
- diff = (status->counters.tx_lnk_free_pkts[i] -
- lnk->prev_freed_pkts) & 0xff;
+ tx_lnk_free_pkts = status->counters.tx_lnk_free_pkts[i];
+ diff = (tx_lnk_free_pkts - lnk->prev_freed_pkts) & 0xff;
+
+ if (diff) {
+ lnk->allocated_pkts -= diff;
+ lnk->prev_freed_pkts = tx_lnk_free_pkts;
+ }
+
+ /* Get the current sec_pn16 value if present */
+ if (status->counters.tx_lnk_sec_pn16)
+ sec_pn16 = __le16_to_cpu(status->counters.tx_lnk_sec_pn16[i]);
+ else
+ sec_pn16 = 0;
+ /* prevent wrap-around in pn16 counter */
+ diff16 = (sec_pn16 - lnk->prev_sec_pn16) & 0xffff;
- if (diff == 0)
+ /* FIXME: since free_pkts is a 8-bit counter of packets that
+ * rolls over, it can become zero. If it is zero, then we
+ * omit processing below. Is that really correct?
+ */
+ if (tx_lnk_free_pkts <= 0)
continue;
- lnk->allocated_pkts -= diff;
- lnk->prev_freed_pkts = status->counters.tx_lnk_free_pkts[i];
+ /* For a station that has an authorized link: */
+ if (wlvifsta && wlvifsta->sta.hlid == i) {
+ if (wlvifsta->encryption_type == KEY_TKIP ||
+ wlvifsta->encryption_type == KEY_AES) {
+ if (diff16) {
+ lnk->prev_sec_pn16 = sec_pn16;
+ /* accumulate the prev_freed_pkts
+ * counter according to the PN from
+ * firmware
+ */
+ lnk->total_freed_pkts += diff16;
+ }
+ } else {
+ if (diff)
+ /* accumulate the prev_freed_pkts
+ * counter according to the free packets
+ * count from firmware
+ */
+ lnk->total_freed_pkts += diff;
+ }
+ }
- /* accumulate the prev_freed_pkts counter */
- lnk->total_freed_pkts += diff;
+ /* For an AP that has been started */
+ if (wlvifap && test_bit(i, wlvifap->ap.sta_hlid_map)) {
+ if (wlvifap->encryption_type == KEY_TKIP ||
+ wlvifap->encryption_type == KEY_AES) {
+ if (diff16) {
+ lnk->prev_sec_pn16 = sec_pn16;
+ /* accumulate the prev_freed_pkts
+ * counter according to the PN from
+ * firmware
+ */
+ lnk->total_freed_pkts += diff16;
+ }
+ } else {
+ if (diff)
+ /* accumulate the prev_freed_pkts
+ * counter according to the free packets
+ * count from firmware
+ */
+ lnk->total_freed_pkts += diff;
+ }
+ }
}
/* prevent wrap-around in total blocks counter */
@@ -2006,7 +2085,7 @@ static void wlcore_op_stop_locked(struct wl1271 *wl)
memset(wl->reg_ch_conf_last, 0, sizeof(wl->reg_ch_conf_last));
}
-static void wlcore_op_stop(struct ieee80211_hw *hw)
+static void wlcore_op_stop(struct ieee80211_hw *hw, bool suspend)
{
struct wl1271 *wl = hw->priv;
@@ -3537,6 +3616,10 @@ int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
return ret;
}
+ /* Store AP encryption key type */
+ if (wlvif->bss_type == BSS_TYPE_AP_BSS)
+ wlvif->encryption_type = key_type;
+
/*
* reconfiguring arp response if the unicast (or common)
* encryption key type was changed
@@ -5139,19 +5222,23 @@ static int wl12xx_update_sta_state(struct wl1271 *wl,
/* Add station (AP mode) */
if (is_ap &&
- old_state == IEEE80211_STA_NOTEXIST &&
- new_state == IEEE80211_STA_NONE) {
+ old_state == IEEE80211_STA_AUTH &&
+ new_state == IEEE80211_STA_ASSOC) {
ret = wl12xx_sta_add(wl, wlvif, sta);
if (ret)
return ret;
+ wl_sta->fw_added = true;
+
wlcore_update_inconn_sta(wl, wlvif, wl_sta, true);
}
/* Remove station (AP mode) */
if (is_ap &&
- old_state == IEEE80211_STA_NONE &&
- new_state == IEEE80211_STA_NOTEXIST) {
+ old_state == IEEE80211_STA_ASSOC &&
+ new_state == IEEE80211_STA_AUTH) {
+ wl_sta->fw_added = false;
+
/* must not fail */
wl12xx_sta_remove(wl, wlvif, sta);
@@ -5165,11 +5252,6 @@ static int wl12xx_update_sta_state(struct wl1271 *wl,
if (ret < 0)
return ret;
- /* reconfigure rates */
- ret = wl12xx_cmd_add_peer(wl, wlvif, sta, wl_sta->hlid);
- if (ret < 0)
- return ret;
-
ret = wl1271_acx_set_ht_capabilities(wl, &sta->deflink.ht_cap,
true,
wl_sta->hlid);
diff --git a/drivers/net/wireless/ti/wlcore/tx.c b/drivers/net/wireless/ti/wlcore/tx.c
index 7bd3ce2f0804..464587d16ab2 100644
--- a/drivers/net/wireless/ti/wlcore/tx.c
+++ b/drivers/net/wireless/ti/wlcore/tx.c
@@ -140,11 +140,8 @@ EXPORT_SYMBOL(wl12xx_is_dummy_packet);
static u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif,
struct sk_buff *skb, struct ieee80211_sta *sta)
{
- if (sta) {
- struct wl1271_station *wl_sta;
-
- wl_sta = (struct wl1271_station *)sta->drv_priv;
- return wl_sta->hlid;
+ if (sta && wl1271_station(sta)->fw_added) {
+ return wl1271_station(sta)->hlid;
} else {
struct ieee80211_hdr *hdr;
diff --git a/drivers/net/wireless/ti/wlcore/wlcore_i.h b/drivers/net/wireless/ti/wlcore/wlcore_i.h
index eefae3f867b9..5bdcb341629c 100644
--- a/drivers/net/wireless/ti/wlcore/wlcore_i.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore_i.h
@@ -151,6 +151,9 @@ struct wl_fw_status {
*/
u8 *tx_lnk_free_pkts;
+ /* PN16 of last TKIP/AES seq-num per HLID */
+ __le16 *tx_lnk_sec_pn16;
+
/* Cumulative counter of released Voice memory blocks */
u8 tx_voice_released_blks;
@@ -259,6 +262,7 @@ struct wl1271_link {
/* accounting for allocated / freed packets in FW */
u8 allocated_pkts;
u8 prev_freed_pkts;
+ u16 prev_sec_pn16;
u8 addr[ETH_ALEN];
@@ -324,6 +328,7 @@ struct wl12xx_rx_filter {
struct wl1271_station {
u8 hlid;
+ bool fw_added;
bool in_connection;
/*
@@ -335,6 +340,11 @@ struct wl1271_station {
u64 total_freed_pkts;
};
+static inline struct wl1271_station *wl1271_station(struct ieee80211_sta *sta)
+{
+ return (struct wl1271_station *)sta->drv_priv;
+}
+
struct wl12xx_vif {
struct wl1271 *wl;
struct list_head list;
diff --git a/drivers/net/wireless/virtual/mac80211_hwsim.c b/drivers/net/wireless/virtual/mac80211_hwsim.c
index c5d896994e70..714e1f04b0cb 100644
--- a/drivers/net/wireless/virtual/mac80211_hwsim.c
+++ b/drivers/net/wireless/virtual/mac80211_hwsim.c
@@ -69,6 +69,10 @@ static bool mlo;
module_param(mlo, bool, 0444);
MODULE_PARM_DESC(mlo, "Support MLO");
+static bool multi_radio;
+module_param(multi_radio, bool, 0444);
+MODULE_PARM_DESC(mlo, "Support Multiple Radios per wiphy");
+
/**
* enum hwsim_regtest - the type of regulatory tests we offer
*
@@ -669,6 +673,10 @@ struct mac80211_hwsim_data {
struct ieee80211_iface_limit if_limits[3];
int n_if_limits;
+ struct ieee80211_iface_combination if_combination_radio;
+ struct wiphy_radio_freq_range radio_range[NUM_NL80211_BANDS];
+ struct wiphy_radio radio[NUM_NL80211_BANDS];
+
u32 ciphers[ARRAY_SIZE(hwsim_ciphers)];
struct mac_address addresses[2];
@@ -917,6 +925,7 @@ static const struct nla_policy hwsim_genl_policy[HWSIM_ATTR_MAX + 1] = {
[HWSIM_ATTR_MLO_SUPPORT] = { .type = NLA_FLAG },
[HWSIM_ATTR_PMSR_SUPPORT] = NLA_POLICY_NESTED(hwsim_pmsr_capa_policy),
[HWSIM_ATTR_PMSR_RESULT] = NLA_POLICY_NESTED(hwsim_pmsr_peers_result_policy),
+ [HWSIM_ATTR_MULTI_RADIO] = { .type = NLA_FLAG },
};
#if IS_REACHABLE(CONFIG_VIRTIO)
@@ -2098,7 +2107,7 @@ static int mac80211_hwsim_start(struct ieee80211_hw *hw)
}
-static void mac80211_hwsim_stop(struct ieee80211_hw *hw)
+static void mac80211_hwsim_stop(struct ieee80211_hw *hw, bool suspend)
{
struct mac80211_hwsim_data *data = hw->priv;
int i;
@@ -2361,6 +2370,7 @@ static const char * const hwsim_chanwidths[] = {
[NL80211_CHAN_WIDTH_4] = "4MHz",
[NL80211_CHAN_WIDTH_8] = "8MHz",
[NL80211_CHAN_WIDTH_16] = "16MHz",
+ [NL80211_CHAN_WIDTH_320] = "eht320",
};
static int mac80211_hwsim_config(struct ieee80211_hw *hw, u32 changed)
@@ -3260,7 +3270,7 @@ static int mac80211_hwsim_switch_vif_chanctx(struct ieee80211_hw *hw,
hwsim_clear_chanctx_magic(vifs[i].old_ctx);
break;
default:
- WARN_ON("Invalid mode");
+ WARN(1, "Invalid mode %d\n", mode);
}
}
return 0;
@@ -4017,6 +4027,7 @@ struct hwsim_new_radio_params {
bool reg_strict;
bool p2p_device;
bool use_chanctx;
+ bool multi_radio;
bool destroy_on_close;
const char *hwname;
bool no_vif;
@@ -4093,6 +4104,12 @@ static int append_radio_msg(struct sk_buff *skb, int id,
return ret;
}
+ if (param->multi_radio) {
+ ret = nla_put_flag(skb, HWSIM_ATTR_MULTI_RADIO);
+ if (ret < 0)
+ return ret;
+ }
+
if (param->hwname) {
ret = nla_put(skb, HWSIM_ATTR_RADIO_NAME,
strlen(param->hwname), param->hwname);
@@ -5113,6 +5130,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
struct net *net;
int idx, i;
int n_limits = 0;
+ int n_bands = 0;
if (WARN_ON(param->channels > 1 && !param->use_chanctx))
return -EINVAL;
@@ -5216,22 +5234,22 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
n_limits++;
}
+ data->if_combination.radar_detect_widths =
+ BIT(NL80211_CHAN_WIDTH_5) |
+ BIT(NL80211_CHAN_WIDTH_10) |
+ BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+ BIT(NL80211_CHAN_WIDTH_20) |
+ BIT(NL80211_CHAN_WIDTH_40) |
+ BIT(NL80211_CHAN_WIDTH_80) |
+ BIT(NL80211_CHAN_WIDTH_160);
+
if (data->use_chanctx) {
hw->wiphy->max_scan_ssids = 255;
hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN;
hw->wiphy->max_remain_on_channel_duration = 1000;
- data->if_combination.radar_detect_widths = 0;
data->if_combination.num_different_channels = data->channels;
} else {
data->if_combination.num_different_channels = 1;
- data->if_combination.radar_detect_widths =
- BIT(NL80211_CHAN_WIDTH_5) |
- BIT(NL80211_CHAN_WIDTH_10) |
- BIT(NL80211_CHAN_WIDTH_20_NOHT) |
- BIT(NL80211_CHAN_WIDTH_20) |
- BIT(NL80211_CHAN_WIDTH_40) |
- BIT(NL80211_CHAN_WIDTH_80) |
- BIT(NL80211_CHAN_WIDTH_160);
}
if (!n_limits) {
@@ -5349,6 +5367,9 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
struct ieee80211_supported_band *sband = &data->bands[band];
+ struct wiphy_radio_freq_range *radio_range;
+ const struct ieee80211_channel *c;
+ struct wiphy_radio *radio;
sband->band = band;
@@ -5422,8 +5443,36 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
mac80211_hwsim_sband_capab(sband);
hw->wiphy->bands[band] = sband;
+
+ if (!param->multi_radio)
+ continue;
+
+ c = sband->channels;
+ radio_range = &data->radio_range[n_bands];
+ radio_range->start_freq = ieee80211_channel_to_khz(c) - 10000;
+
+ c += sband->n_channels - 1;
+ radio_range->end_freq = ieee80211_channel_to_khz(c) + 10000;
+
+ radio = &data->radio[n_bands++];
+ radio->freq_range = radio_range;
+ radio->n_freq_range = 1;
+ radio->iface_combinations = &data->if_combination_radio;
+ radio->n_iface_combinations = 1;
}
+ if (param->multi_radio) {
+ hw->wiphy->radio = data->radio;
+ hw->wiphy->n_radio = n_bands;
+
+ memcpy(&data->if_combination_radio, &data->if_combination,
+ sizeof(data->if_combination));
+ data->if_combination.num_different_channels *= n_bands;
+ }
+
+ if (data->use_chanctx)
+ data->if_combination.radar_detect_widths = 0;
+
/* By default all radios belong to the first group */
data->group = 1;
mutex_init(&data->mutex);
@@ -6041,6 +6090,9 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
else
param.use_chanctx = (param.channels > 1);
+ if (info->attrs[HWSIM_ATTR_MULTI_RADIO])
+ param.multi_radio = true;
+
if (info->attrs[HWSIM_ATTR_REG_HINT_ALPHA2])
param.reg_alpha2 =
nla_data(info->attrs[HWSIM_ATTR_REG_HINT_ALPHA2]);
@@ -6121,7 +6173,7 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
param.mlo = info->attrs[HWSIM_ATTR_MLO_SUPPORT];
- if (param.mlo)
+ if (param.mlo || param.multi_radio)
param.use_chanctx = true;
if (info->attrs[HWSIM_ATTR_RADIO_NAME]) {
@@ -6814,7 +6866,8 @@ static int __init init_mac80211_hwsim(void)
param.p2p_device = support_p2p_device;
param.mlo = mlo;
- param.use_chanctx = channels > 1 || mlo;
+ param.multi_radio = multi_radio;
+ param.use_chanctx = channels > 1 || mlo || multi_radio;
param.iftypes = HWSIM_IFTYPE_SUPPORT_MASK;
if (param.p2p_device)
param.iftypes |= BIT(NL80211_IFTYPE_P2P_DEVICE);
diff --git a/drivers/net/wireless/virtual/mac80211_hwsim.h b/drivers/net/wireless/virtual/mac80211_hwsim.h
index 21b1afd83dc1..f32fc3a492b0 100644
--- a/drivers/net/wireless/virtual/mac80211_hwsim.h
+++ b/drivers/net/wireless/virtual/mac80211_hwsim.h
@@ -157,6 +157,9 @@ enum hwsim_commands {
* to provide details about peer measurement request (nl80211_peer_measurement_attrs)
* @HWSIM_ATTR_PMSR_RESULT: nested attributed used with %HWSIM_CMD_REPORT_PMSR
* to provide peer measurement result (nl80211_peer_measurement_attrs)
+ * @HWSIM_ATTR_MULTI_RADIO: Register multiple wiphy radios (flag).
+ * Adds one radio for each band. Number of supported channels will be set for
+ * each radio instead of for the wiphy.
* @__HWSIM_ATTR_MAX: enum limit
*/
enum hwsim_attrs {
@@ -189,6 +192,7 @@ enum hwsim_attrs {
HWSIM_ATTR_PMSR_SUPPORT,
HWSIM_ATTR_PMSR_REQUEST,
HWSIM_ATTR_PMSR_RESULT,
+ HWSIM_ATTR_MULTI_RADIO,
__HWSIM_ATTR_MAX,
};
#define HWSIM_ATTR_MAX (__HWSIM_ATTR_MAX - 1)
@@ -257,7 +261,7 @@ enum hwsim_tx_rate_flags {
};
/**
- * struct hwsim_tx_rate - rate selection/status
+ * struct hwsim_tx_rate_flag - rate selection/status
*
* @idx: rate index to attempt to send with
* @flags: the rate flags according to &enum hwsim_tx_rate_flags
@@ -295,7 +299,7 @@ enum hwsim_vqs {
};
/**
- * enum hwsim_rate_info -- bitrate information.
+ * enum hwsim_rate_info_attributes - bitrate information.
*
* Information about a receiving or transmitting bitrate
* that can be mapped to struct rate_info
diff --git a/drivers/net/wireless/virtual/virt_wifi.c b/drivers/net/wireless/virtual/virt_wifi.c
index 6a84ec58d618..4ee374080466 100644
--- a/drivers/net/wireless/virtual/virt_wifi.c
+++ b/drivers/net/wireless/virtual/virt_wifi.c
@@ -136,6 +136,9 @@ static struct ieee80211_supported_band band_5ghz = {
/* Assigned at module init. Guaranteed locally-administered and unicast. */
static u8 fake_router_bssid[ETH_ALEN] __ro_after_init = {};
+#define VIRT_WIFI_SSID "VirtWifi"
+#define VIRT_WIFI_SSID_LEN 8
+
static void virt_wifi_inform_bss(struct wiphy *wiphy)
{
u64 tsf = div_u64(ktime_get_boottime_ns(), 1000);
@@ -146,8 +149,8 @@ static void virt_wifi_inform_bss(struct wiphy *wiphy)
u8 ssid[8];
} __packed ssid = {
.tag = WLAN_EID_SSID,
- .len = 8,
- .ssid = "VirtWifi",
+ .len = VIRT_WIFI_SSID_LEN,
+ .ssid = VIRT_WIFI_SSID,
};
informed_bss = cfg80211_inform_bss(wiphy, &channel_5ghz,
@@ -213,6 +216,8 @@ struct virt_wifi_netdev_priv {
struct net_device *upperdev;
u32 tx_packets;
u32 tx_failed;
+ u32 connect_requested_ssid_len;
+ u8 connect_requested_ssid[IEEE80211_MAX_SSID_LEN];
u8 connect_requested_bss[ETH_ALEN];
bool is_up;
bool is_connected;
@@ -229,6 +234,12 @@ static int virt_wifi_connect(struct wiphy *wiphy, struct net_device *netdev,
if (priv->being_deleted || !priv->is_up)
return -EBUSY;
+ if (!sme->ssid)
+ return -EINVAL;
+
+ priv->connect_requested_ssid_len = sme->ssid_len;
+ memcpy(priv->connect_requested_ssid, sme->ssid, sme->ssid_len);
+
could_schedule = schedule_delayed_work(&priv->connect, HZ * 2);
if (!could_schedule)
return -EBUSY;
@@ -252,12 +263,15 @@ static void virt_wifi_connect_complete(struct work_struct *work)
container_of(work, struct virt_wifi_netdev_priv, connect.work);
u8 *requested_bss = priv->connect_requested_bss;
bool right_addr = ether_addr_equal(requested_bss, fake_router_bssid);
+ bool right_ssid = priv->connect_requested_ssid_len == VIRT_WIFI_SSID_LEN &&
+ !memcmp(priv->connect_requested_ssid, VIRT_WIFI_SSID,
+ priv->connect_requested_ssid_len);
u16 status = WLAN_STATUS_SUCCESS;
if (is_zero_ether_addr(requested_bss))
requested_bss = NULL;
- if (!priv->is_up || (requested_bss && !right_addr))
+ if (!priv->is_up || (requested_bss && !right_addr) || !right_ssid)
status = WLAN_STATUS_UNSPECIFIED_FAILURE;
else
priv->is_connected = true;
diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_mac.c b/drivers/net/wireless/zydas/zd1211rw/zd_mac.c
index 900c063bd724..f90c33d19b39 100644
--- a/drivers/net/wireless/zydas/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zydas/zd1211rw/zd_mac.c
@@ -326,7 +326,7 @@ out:
return r;
}
-void zd_op_stop(struct ieee80211_hw *hw)
+void zd_op_stop(struct ieee80211_hw *hw, bool suspend)
{
struct zd_mac *mac = zd_hw_mac(hw);
struct zd_chip *chip = &mac->chip;
diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_mac.h b/drivers/net/wireless/zydas/zd1211rw/zd_mac.h
index 5ff84bdc5a4c..053748a474ec 100644
--- a/drivers/net/wireless/zydas/zd1211rw/zd_mac.h
+++ b/drivers/net/wireless/zydas/zd1211rw/zd_mac.h
@@ -303,7 +303,7 @@ void zd_mac_tx_failed(struct urb *urb);
void zd_mac_tx_to_dev(struct sk_buff *skb, int error);
int zd_op_start(struct ieee80211_hw *hw);
-void zd_op_stop(struct ieee80211_hw *hw);
+void zd_op_stop(struct ieee80211_hw *hw, bool suspend);
int zd_restore_settings(struct zd_mac *mac);
#ifdef DEBUG
diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_usb.c b/drivers/net/wireless/zydas/zd1211rw/zd_usb.c
index f3b567a13ded..a8a94edf2a70 100644
--- a/drivers/net/wireless/zydas/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zydas/zd1211rw/zd_usb.c
@@ -1476,7 +1476,7 @@ static void zd_usb_stop(struct zd_usb *usb)
{
dev_dbg_f(zd_usb_dev(usb), "\n");
- zd_op_stop(zd_usb_to_hw(usb));
+ zd_op_stop(zd_usb_to_hw(usb), false);
zd_usb_disable_tx(usb);
zd_usb_disable_rx(usb);
@@ -1698,7 +1698,7 @@ int zd_usb_ioread16v(struct zd_usb *usb, u16 *values,
int r, i, req_len, actual_req_len, try_count = 0;
struct usb_device *udev;
struct usb_req_read_regs *req = NULL;
- unsigned long timeout;
+ unsigned long time_left;
bool retry = false;
if (count < 1) {
@@ -1748,9 +1748,9 @@ retry_read:
goto error;
}
- timeout = wait_for_completion_timeout(&usb->intr.read_regs.completion,
- msecs_to_jiffies(50));
- if (!timeout) {
+ time_left = wait_for_completion_timeout(&usb->intr.read_regs.completion,
+ msecs_to_jiffies(50));
+ if (!time_left) {
disable_read_regs_int(usb);
dev_dbg_f(zd_usb_dev(usb), "read timed out\n");
r = -ETIMEDOUT;
diff --git a/drivers/net/wwan/iosm/iosm_ipc_devlink.c b/drivers/net/wwan/iosm/iosm_ipc_devlink.c
index bef6819986e9..33d6342124bc 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_devlink.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_devlink.c
@@ -211,7 +211,7 @@ static int ipc_devlink_create_region(struct iosm_devlink *devlink)
rc = PTR_ERR(devlink->cd_regions[i]);
dev_err(devlink->dev, "Devlink region fail,err %d", rc);
/* Delete previously created regions */
- for ( ; i >= 0; i--)
+ for (i--; i >= 0; i--)
devlink_region_destroy(devlink->cd_regions[i]);
goto region_create_fail;
}
diff --git a/drivers/nfc/microread/i2c.c b/drivers/nfc/microread/i2c.c
index 642df4e0ce24..113b2e306e35 100644
--- a/drivers/nfc/microread/i2c.c
+++ b/drivers/nfc/microread/i2c.c
@@ -277,7 +277,7 @@ static void microread_i2c_remove(struct i2c_client *client)
}
static const struct i2c_device_id microread_i2c_id[] = {
- { MICROREAD_I2C_DRIVER_NAME, 0},
+ { MICROREAD_I2C_DRIVER_NAME },
{ }
};
MODULE_DEVICE_TABLE(i2c, microread_i2c_id);
diff --git a/drivers/nfc/nfcmrvl/i2c.c b/drivers/nfc/nfcmrvl/i2c.c
index 74553134c1b1..39ecf2aeda80 100644
--- a/drivers/nfc/nfcmrvl/i2c.c
+++ b/drivers/nfc/nfcmrvl/i2c.c
@@ -252,7 +252,7 @@ static const struct of_device_id of_nfcmrvl_i2c_match[] __maybe_unused = {
MODULE_DEVICE_TABLE(of, of_nfcmrvl_i2c_match);
static const struct i2c_device_id nfcmrvl_i2c_id_table[] = {
- { "nfcmrvl_i2c", 0 },
+ { "nfcmrvl_i2c" },
{}
};
MODULE_DEVICE_TABLE(i2c, nfcmrvl_i2c_id_table);
diff --git a/drivers/nfc/nxp-nci/i2c.c b/drivers/nfc/nxp-nci/i2c.c
index 3ae4b41c59ac..a8aced0b8010 100644
--- a/drivers/nfc/nxp-nci/i2c.c
+++ b/drivers/nfc/nxp-nci/i2c.c
@@ -322,7 +322,7 @@ static void nxp_nci_i2c_remove(struct i2c_client *client)
}
static const struct i2c_device_id nxp_nci_i2c_id_table[] = {
- {"nxp-nci_i2c", 0},
+ { "nxp-nci_i2c" },
{}
};
MODULE_DEVICE_TABLE(i2c, nxp_nci_i2c_id_table);
diff --git a/drivers/nfc/pn533/i2c.c b/drivers/nfc/pn533/i2c.c
index 438ab9553f7a..132c050a365d 100644
--- a/drivers/nfc/pn533/i2c.c
+++ b/drivers/nfc/pn533/i2c.c
@@ -249,7 +249,7 @@ static const struct of_device_id of_pn533_i2c_match[] __maybe_unused = {
MODULE_DEVICE_TABLE(of, of_pn533_i2c_match);
static const struct i2c_device_id pn533_i2c_id_table[] = {
- { PN533_I2C_DRIVER_NAME, 0 },
+ { PN533_I2C_DRIVER_NAME },
{}
};
MODULE_DEVICE_TABLE(i2c, pn533_i2c_id_table);
diff --git a/drivers/nfc/pn544/i2c.c b/drivers/nfc/pn544/i2c.c
index 3f6d74832bac..9fe664960b38 100644
--- a/drivers/nfc/pn544/i2c.c
+++ b/drivers/nfc/pn544/i2c.c
@@ -44,7 +44,7 @@
PN544_HCI_I2C_LLC_MAX_PAYLOAD)
static const struct i2c_device_id pn544_hci_i2c_id_table[] = {
- {"pn544", 0},
+ { "pn544" },
{}
};
diff --git a/drivers/nfc/s3fwrn5/i2c.c b/drivers/nfc/s3fwrn5/i2c.c
index 720d4a72493c..536c566e3f59 100644
--- a/drivers/nfc/s3fwrn5/i2c.c
+++ b/drivers/nfc/s3fwrn5/i2c.c
@@ -245,7 +245,7 @@ static void s3fwrn5_i2c_remove(struct i2c_client *client)
}
static const struct i2c_device_id s3fwrn5_i2c_id_table[] = {
- {S3FWRN5_I2C_DRIVER_NAME, 0},
+ { S3FWRN5_I2C_DRIVER_NAME },
{}
};
MODULE_DEVICE_TABLE(i2c, s3fwrn5_i2c_id_table);
diff --git a/drivers/nfc/st-nci/i2c.c b/drivers/nfc/st-nci/i2c.c
index d20a337e90b4..416770adbeba 100644
--- a/drivers/nfc/st-nci/i2c.c
+++ b/drivers/nfc/st-nci/i2c.c
@@ -257,7 +257,7 @@ static void st_nci_i2c_remove(struct i2c_client *client)
}
static const struct i2c_device_id st_nci_i2c_id_table[] = {
- {ST_NCI_DRIVER_NAME, 0},
+ { ST_NCI_DRIVER_NAME },
{}
};
MODULE_DEVICE_TABLE(i2c, st_nci_i2c_id_table);
diff --git a/drivers/nfc/st21nfca/i2c.c b/drivers/nfc/st21nfca/i2c.c
index 064a63db288b..02c3d11a19c4 100644
--- a/drivers/nfc/st21nfca/i2c.c
+++ b/drivers/nfc/st21nfca/i2c.c
@@ -573,7 +573,7 @@ static void st21nfca_hci_i2c_remove(struct i2c_client *client)
}
static const struct i2c_device_id st21nfca_hci_i2c_id_table[] = {
- {ST21NFCA_HCI_DRIVER_NAME, 0},
+ { ST21NFCA_HCI_DRIVER_NAME },
{}
};
MODULE_DEVICE_TABLE(i2c, st21nfca_hci_i2c_id_table);
diff --git a/drivers/nfc/virtual_ncidev.c b/drivers/nfc/virtual_ncidev.c
index 590b038e449e..6b89d596ba9a 100644
--- a/drivers/nfc/virtual_ncidev.c
+++ b/drivers/nfc/virtual_ncidev.c
@@ -125,6 +125,10 @@ static ssize_t virtual_ncidev_write(struct file *file,
kfree_skb(skb);
return -EFAULT;
}
+ if (strnlen(skb->data, count) != count) {
+ kfree_skb(skb);
+ return -EINVAL;
+ }
nci_recv_frame(vdev->ndev, skb);
return count;
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index 1e5aedaf8c7b..e79c06d65bb7 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -1501,9 +1501,15 @@ static int btt_blk_init(struct btt *btt)
.logical_block_size = btt->sector_size,
.max_hw_sectors = UINT_MAX,
.max_integrity_segments = 1,
+ .features = BLK_FEAT_SYNCHRONOUS,
};
int rc;
+ if (btt_meta_size(btt) && IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY)) {
+ lim.integrity.tuple_size = btt_meta_size(btt);
+ lim.integrity.tag_size = btt_meta_size(btt);
+ }
+
btt->btt_disk = blk_alloc_disk(&lim, NUMA_NO_NODE);
if (IS_ERR(btt->btt_disk))
return PTR_ERR(btt->btt_disk);
@@ -1513,17 +1519,6 @@ static int btt_blk_init(struct btt *btt)
btt->btt_disk->fops = &btt_fops;
btt->btt_disk->private_data = btt;
- blk_queue_flag_set(QUEUE_FLAG_NONROT, btt->btt_disk->queue);
- blk_queue_flag_set(QUEUE_FLAG_SYNCHRONOUS, btt->btt_disk->queue);
-
- if (btt_meta_size(btt) && IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY)) {
- struct blk_integrity bi = {
- .tuple_size = btt_meta_size(btt),
- .tag_size = btt_meta_size(btt),
- };
- blk_integrity_register(btt->btt_disk, &bi);
- }
-
set_capacity(btt->btt_disk, btt->nlba * btt->sector_size >> 9);
rc = device_add_disk(&btt->nd_btt->dev, btt->btt_disk, NULL);
if (rc)
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 598fe2e89bda..1dd74c969d5a 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -455,6 +455,8 @@ static int pmem_attach_disk(struct device *dev,
.logical_block_size = pmem_sector_size(ndns),
.physical_block_size = PAGE_SIZE,
.max_hw_sectors = UINT_MAX,
+ .features = BLK_FEAT_WRITE_CACHE |
+ BLK_FEAT_SYNCHRONOUS,
};
int nid = dev_to_node(dev), fua;
struct resource *res = &nsio->res;
@@ -463,7 +465,6 @@ static int pmem_attach_disk(struct device *dev,
struct dax_device *dax_dev;
struct nd_pfn_sb *pfn_sb;
struct pmem_device *pmem;
- struct request_queue *q;
struct gendisk *disk;
void *addr;
int rc;
@@ -495,6 +496,10 @@ static int pmem_attach_disk(struct device *dev,
dev_warn(dev, "unable to guarantee persistence of writes\n");
fua = 0;
}
+ if (fua)
+ lim.features |= BLK_FEAT_FUA;
+ if (is_nd_pfn(dev))
+ lim.features |= BLK_FEAT_DAX;
if (!devm_request_mem_region(dev, res->start, resource_size(res),
dev_name(&ndns->dev))) {
@@ -505,7 +510,6 @@ static int pmem_attach_disk(struct device *dev,
disk = blk_alloc_disk(&lim, nid);
if (IS_ERR(disk))
return PTR_ERR(disk);
- q = disk->queue;
pmem->disk = disk;
pmem->pgmap.owner = pmem;
@@ -543,12 +547,6 @@ static int pmem_attach_disk(struct device *dev,
}
pmem->virt_addr = addr;
- blk_queue_write_cache(q, true, fua);
- blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
- blk_queue_flag_set(QUEUE_FLAG_SYNCHRONOUS, q);
- if (pmem->pfn_flags & PFN_MAP)
- blk_queue_flag_set(QUEUE_FLAG_DAX, q);
-
disk->fops = &pmem_fops;
disk->private_data = pmem;
nvdimm_namespace_disk_name(ndns, disk->disk_name);
diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig
index b309c8be720f..a3caef75aa0a 100644
--- a/drivers/nvme/host/Kconfig
+++ b/drivers/nvme/host/Kconfig
@@ -1,7 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
config NVME_CORE
tristate
- select BLK_DEV_INTEGRITY_T10 if BLK_DEV_INTEGRITY
config BLK_DEV_NVME
tristate "NVM Express block device"
diff --git a/drivers/nvme/host/apple.c b/drivers/nvme/host/apple.c
index dd6ec0865141..b1387dc459a3 100644
--- a/drivers/nvme/host/apple.c
+++ b/drivers/nvme/host/apple.c
@@ -1388,7 +1388,7 @@ static void devm_apple_nvme_mempool_destroy(void *data)
mempool_destroy(data);
}
-static int apple_nvme_probe(struct platform_device *pdev)
+static struct apple_nvme *apple_nvme_alloc(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct apple_nvme *anv;
@@ -1396,7 +1396,7 @@ static int apple_nvme_probe(struct platform_device *pdev)
anv = devm_kzalloc(dev, sizeof(*anv), GFP_KERNEL);
if (!anv)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
anv->dev = get_device(dev);
anv->adminq.is_adminq = true;
@@ -1516,10 +1516,30 @@ static int apple_nvme_probe(struct platform_device *pdev)
goto put_dev;
}
+ return anv;
+put_dev:
+ put_device(anv->dev);
+ return ERR_PTR(ret);
+}
+
+static int apple_nvme_probe(struct platform_device *pdev)
+{
+ struct apple_nvme *anv;
+ int ret;
+
+ anv = apple_nvme_alloc(pdev);
+ if (IS_ERR(anv))
+ return PTR_ERR(anv);
+
+ ret = nvme_add_ctrl(&anv->ctrl);
+ if (ret)
+ goto out_put_ctrl;
+
anv->ctrl.admin_q = blk_mq_alloc_queue(&anv->admin_tagset, NULL, NULL);
if (IS_ERR(anv->ctrl.admin_q)) {
ret = -ENOMEM;
- goto put_dev;
+ anv->ctrl.admin_q = NULL;
+ goto out_uninit_ctrl;
}
nvme_reset_ctrl(&anv->ctrl);
@@ -1527,8 +1547,10 @@ static int apple_nvme_probe(struct platform_device *pdev)
return 0;
-put_dev:
- put_device(anv->dev);
+out_uninit_ctrl:
+ nvme_uninit_ctrl(&anv->ctrl);
+out_put_ctrl:
+ nvme_put_ctrl(&anv->ctrl);
return ret;
}
@@ -1602,4 +1624,5 @@ static struct platform_driver apple_nvme_driver = {
module_platform_driver(apple_nvme_driver);
MODULE_AUTHOR("Sven Peter <sven@svenpeter.dev>");
+MODULE_DESCRIPTION("Apple ANS NVM Express device driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/nvme/host/constants.c b/drivers/nvme/host/constants.c
index 6f2ebb5fcdb0..2b9e6cfaf2a8 100644
--- a/drivers/nvme/host/constants.c
+++ b/drivers/nvme/host/constants.c
@@ -173,7 +173,7 @@ static const char * const nvme_statuses[] = {
const char *nvme_get_error_status_str(u16 status)
{
- status &= 0x7ff;
+ status &= NVME_SCT_SC_MASK;
if (status < ARRAY_SIZE(nvme_statuses) && nvme_statuses[status])
return nvme_statuses[status];
return "Unknown";
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 954f850f113a..19917253ba7b 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -110,7 +110,7 @@ struct workqueue_struct *nvme_delete_wq;
EXPORT_SYMBOL_GPL(nvme_delete_wq);
static LIST_HEAD(nvme_subsystems);
-static DEFINE_MUTEX(nvme_subsystems_lock);
+DEFINE_MUTEX(nvme_subsystems_lock);
static DEFINE_IDA(nvme_instance_ida);
static dev_t nvme_ctrl_base_chr_devt;
@@ -261,7 +261,7 @@ void nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl)
static blk_status_t nvme_error_status(u16 status)
{
- switch (status & 0x7ff) {
+ switch (status & NVME_SCT_SC_MASK) {
case NVME_SC_SUCCESS:
return BLK_STS_OK;
case NVME_SC_CAP_EXCEEDED:
@@ -307,7 +307,7 @@ static void nvme_retry_req(struct request *req)
u16 crd;
/* The mask and shift result must be <= 3 */
- crd = (nvme_req(req)->status & NVME_SC_CRD) >> 11;
+ crd = (nvme_req(req)->status & NVME_STATUS_CRD) >> 11;
if (crd)
delay = nvme_req(req)->ctrl->crdt[crd - 1] * 100;
@@ -329,10 +329,10 @@ static void nvme_log_error(struct request *req)
nvme_sect_to_lba(ns->head, blk_rq_pos(req)),
blk_rq_bytes(req) >> ns->head->lba_shift,
nvme_get_error_status_str(nr->status),
- nr->status >> 8 & 7, /* Status Code Type */
- nr->status & 0xff, /* Status Code */
- nr->status & NVME_SC_MORE ? "MORE " : "",
- nr->status & NVME_SC_DNR ? "DNR " : "");
+ NVME_SCT(nr->status), /* Status Code Type */
+ nr->status & NVME_SC_MASK, /* Status Code */
+ nr->status & NVME_STATUS_MORE ? "MORE " : "",
+ nr->status & NVME_STATUS_DNR ? "DNR " : "");
return;
}
@@ -341,10 +341,10 @@ static void nvme_log_error(struct request *req)
nvme_get_admin_opcode_str(nr->cmd->common.opcode),
nr->cmd->common.opcode,
nvme_get_error_status_str(nr->status),
- nr->status >> 8 & 7, /* Status Code Type */
- nr->status & 0xff, /* Status Code */
- nr->status & NVME_SC_MORE ? "MORE " : "",
- nr->status & NVME_SC_DNR ? "DNR " : "");
+ NVME_SCT(nr->status), /* Status Code Type */
+ nr->status & NVME_SC_MASK, /* Status Code */
+ nr->status & NVME_STATUS_MORE ? "MORE " : "",
+ nr->status & NVME_STATUS_DNR ? "DNR " : "");
}
static void nvme_log_err_passthru(struct request *req)
@@ -359,10 +359,10 @@ static void nvme_log_err_passthru(struct request *req)
nvme_get_admin_opcode_str(nr->cmd->common.opcode),
nr->cmd->common.opcode,
nvme_get_error_status_str(nr->status),
- nr->status >> 8 & 7, /* Status Code Type */
- nr->status & 0xff, /* Status Code */
- nr->status & NVME_SC_MORE ? "MORE " : "",
- nr->status & NVME_SC_DNR ? "DNR " : "",
+ NVME_SCT(nr->status), /* Status Code Type */
+ nr->status & NVME_SC_MASK, /* Status Code */
+ nr->status & NVME_STATUS_MORE ? "MORE " : "",
+ nr->status & NVME_STATUS_DNR ? "DNR " : "",
nr->cmd->common.cdw10,
nr->cmd->common.cdw11,
nr->cmd->common.cdw12,
@@ -384,11 +384,11 @@ static inline enum nvme_disposition nvme_decide_disposition(struct request *req)
return COMPLETE;
if (blk_noretry_request(req) ||
- (nvme_req(req)->status & NVME_SC_DNR) ||
+ (nvme_req(req)->status & NVME_STATUS_DNR) ||
nvme_req(req)->retries >= nvme_max_retries)
return COMPLETE;
- if ((nvme_req(req)->status & 0x7ff) == NVME_SC_AUTH_REQUIRED)
+ if ((nvme_req(req)->status & NVME_SCT_SC_MASK) == NVME_SC_AUTH_REQUIRED)
return AUTHENTICATE;
if (req->cmd_flags & REQ_NVME_MPATH) {
@@ -414,7 +414,15 @@ static inline void nvme_end_req_zoned(struct request *req)
}
}
-static inline void nvme_end_req(struct request *req)
+static inline void __nvme_end_req(struct request *req)
+{
+ nvme_end_req_zoned(req);
+ nvme_trace_bio_complete(req);
+ if (req->cmd_flags & REQ_NVME_MPATH)
+ nvme_mpath_end_request(req);
+}
+
+void nvme_end_req(struct request *req)
{
blk_status_t status = nvme_error_status(nvme_req(req)->status);
@@ -424,10 +432,7 @@ static inline void nvme_end_req(struct request *req)
else
nvme_log_error(req);
}
- nvme_end_req_zoned(req);
- nvme_trace_bio_complete(req);
- if (req->cmd_flags & REQ_NVME_MPATH)
- nvme_mpath_end_request(req);
+ __nvme_end_req(req);
blk_mq_end_request(req, status);
}
@@ -476,7 +481,7 @@ void nvme_complete_batch_req(struct request *req)
{
trace_nvme_complete_rq(req);
nvme_cleanup_cmd(req);
- nvme_end_req_zoned(req);
+ __nvme_end_req(req);
}
EXPORT_SYMBOL_GPL(nvme_complete_batch_req);
@@ -673,7 +678,7 @@ static void nvme_free_ns(struct kref *kref)
kfree(ns);
}
-static inline bool nvme_get_ns(struct nvme_ns *ns)
+bool nvme_get_ns(struct nvme_ns *ns)
{
return kref_get_unless_zero(&ns->kref);
}
@@ -922,6 +927,36 @@ static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns,
return BLK_STS_OK;
}
+/*
+ * NVMe does not support a dedicated command to issue an atomic write. A write
+ * which does adhere to the device atomic limits will silently be executed
+ * non-atomically. The request issuer should ensure that the write is within
+ * the queue atomic writes limits, but just validate this in case it is not.
+ */
+static bool nvme_valid_atomic_write(struct request *req)
+{
+ struct request_queue *q = req->q;
+ u32 boundary_bytes = queue_atomic_write_boundary_bytes(q);
+
+ if (blk_rq_bytes(req) > queue_atomic_write_unit_max_bytes(q))
+ return false;
+
+ if (boundary_bytes) {
+ u64 mask = boundary_bytes - 1, imask = ~mask;
+ u64 start = blk_rq_pos(req) << SECTOR_SHIFT;
+ u64 end = start + blk_rq_bytes(req) - 1;
+
+ /* If greater then must be crossing a boundary */
+ if (blk_rq_bytes(req) > boundary_bytes)
+ return false;
+
+ if ((start & imask) != (end & imask))
+ return false;
+ }
+
+ return true;
+}
+
static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
struct request *req, struct nvme_command *cmnd,
enum nvme_opcode op)
@@ -937,6 +972,9 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
if (req->cmd_flags & REQ_RAHEAD)
dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
+ if (req->cmd_flags & REQ_ATOMIC && !nvme_valid_atomic_write(req))
+ return BLK_STS_INVAL;
+
cmnd->rw.opcode = op;
cmnd->rw.flags = 0;
cmnd->rw.nsid = cpu_to_le32(ns->head->ns_id);
@@ -993,6 +1031,7 @@ void nvme_cleanup_cmd(struct request *req)
clear_bit_unlock(0, &ctrl->discard_page_busy);
else
kfree(bvec_virt(&req->special_vec));
+ req->rq_flags &= ~RQF_SPECIAL_PAYLOAD;
}
}
EXPORT_SYMBOL_GPL(nvme_cleanup_cmd);
@@ -1218,7 +1257,7 @@ EXPORT_SYMBOL_NS_GPL(nvme_passthru_end, NVME_TARGET_PASSTHRU);
/*
* Recommended frequency for KATO commands per NVMe 1.4 section 7.12.1:
- *
+ *
* The host should send Keep Alive commands at half of the Keep Alive Timeout
* accounting for transport roundtrip times [..].
*/
@@ -1718,11 +1757,12 @@ int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo)
return 0;
}
-static bool nvme_init_integrity(struct gendisk *disk, struct nvme_ns_head *head)
+static bool nvme_init_integrity(struct gendisk *disk, struct nvme_ns_head *head,
+ struct queue_limits *lim)
{
- struct blk_integrity integrity = { };
+ struct blk_integrity *bi = &lim->integrity;
- blk_integrity_unregister(disk);
+ memset(bi, 0, sizeof(*bi));
if (!head->ms)
return true;
@@ -1739,17 +1779,16 @@ static bool nvme_init_integrity(struct gendisk *disk, struct nvme_ns_head *head)
case NVME_NS_DPS_PI_TYPE3:
switch (head->guard_type) {
case NVME_NVM_NS_16B_GUARD:
- integrity.profile = &t10_pi_type3_crc;
- integrity.tag_size = sizeof(u16) + sizeof(u32);
- integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
+ bi->csum_type = BLK_INTEGRITY_CSUM_CRC;
+ bi->tag_size = sizeof(u16) + sizeof(u32);
+ bi->flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
break;
case NVME_NVM_NS_64B_GUARD:
- integrity.profile = &ext_pi_type3_crc64;
- integrity.tag_size = sizeof(u16) + 6;
- integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
+ bi->csum_type = BLK_INTEGRITY_CSUM_CRC64;
+ bi->tag_size = sizeof(u16) + 6;
+ bi->flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
break;
default:
- integrity.profile = NULL;
break;
}
break;
@@ -1757,28 +1796,27 @@ static bool nvme_init_integrity(struct gendisk *disk, struct nvme_ns_head *head)
case NVME_NS_DPS_PI_TYPE2:
switch (head->guard_type) {
case NVME_NVM_NS_16B_GUARD:
- integrity.profile = &t10_pi_type1_crc;
- integrity.tag_size = sizeof(u16);
- integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
+ bi->csum_type = BLK_INTEGRITY_CSUM_CRC;
+ bi->tag_size = sizeof(u16);
+ bi->flags |= BLK_INTEGRITY_DEVICE_CAPABLE |
+ BLK_INTEGRITY_REF_TAG;
break;
case NVME_NVM_NS_64B_GUARD:
- integrity.profile = &ext_pi_type1_crc64;
- integrity.tag_size = sizeof(u16);
- integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
+ bi->csum_type = BLK_INTEGRITY_CSUM_CRC64;
+ bi->tag_size = sizeof(u16);
+ bi->flags |= BLK_INTEGRITY_DEVICE_CAPABLE |
+ BLK_INTEGRITY_REF_TAG;
break;
default:
- integrity.profile = NULL;
break;
}
break;
default:
- integrity.profile = NULL;
break;
}
- integrity.tuple_size = head->ms;
- integrity.pi_offset = head->pi_offset;
- blk_integrity_register(disk, &integrity);
+ bi->tuple_size = head->ms;
+ bi->pi_offset = head->pi_offset;
return true;
}
@@ -1916,6 +1954,23 @@ static void nvme_configure_metadata(struct nvme_ctrl *ctrl,
}
}
+
+static void nvme_update_atomic_write_disk_info(struct nvme_ns *ns,
+ struct nvme_id_ns *id, struct queue_limits *lim,
+ u32 bs, u32 atomic_bs)
+{
+ unsigned int boundary = 0;
+
+ if (id->nsfeat & NVME_NS_FEAT_ATOMICS && id->nawupf) {
+ if (le16_to_cpu(id->nabspf))
+ boundary = (le16_to_cpu(id->nabspf) + 1) * bs;
+ }
+ lim->atomic_write_hw_max = atomic_bs;
+ lim->atomic_write_hw_boundary = boundary;
+ lim->atomic_write_hw_unit_min = bs;
+ lim->atomic_write_hw_unit_max = rounddown_pow_of_two(atomic_bs);
+}
+
static u32 nvme_max_drv_segments(struct nvme_ctrl *ctrl)
{
return ctrl->max_hw_sectors / (NVME_CTRL_PAGE_SIZE >> SECTOR_SHIFT) + 1;
@@ -1962,13 +2017,16 @@ static bool nvme_update_disk_info(struct nvme_ns *ns, struct nvme_id_ns *id,
atomic_bs = (1 + le16_to_cpu(id->nawupf)) * bs;
else
atomic_bs = (1 + ns->ctrl->subsys->awupf) * bs;
+
+ nvme_update_atomic_write_disk_info(ns, id, lim, bs, atomic_bs);
}
if (id->nsfeat & NVME_NS_FEAT_IO_OPT) {
/* NPWG = Namespace Preferred Write Granularity */
phys_bs = bs * (1 + le16_to_cpu(id->npwg));
/* NOWS = Namespace Optimal Write Size */
- io_opt = bs * (1 + le16_to_cpu(id->nows));
+ if (id->nows)
+ io_opt = bs * (1 + le16_to_cpu(id->nows));
}
/*
@@ -2052,7 +2110,6 @@ static int nvme_update_ns_info_generic(struct nvme_ns *ns,
static int nvme_update_ns_info_block(struct nvme_ns *ns,
struct nvme_ns_info *info)
{
- bool vwc = ns->ctrl->vwc & NVME_CTRL_VWC_PRESENT;
struct queue_limits lim;
struct nvme_id_ns_nvm *nvm = NULL;
struct nvme_zone_info zi = {};
@@ -2101,11 +2158,11 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
ns->head->ids.csi == NVME_CSI_ZNS)
nvme_update_zone_info(ns, &lim, &zi);
- ret = queue_limits_commit_update(ns->disk->queue, &lim);
- if (ret) {
- blk_mq_unfreeze_queue(ns->disk->queue);
- goto out;
- }
+
+ if (ns->ctrl->vwc & NVME_CTRL_VWC_PRESENT)
+ lim.features |= BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA;
+ else
+ lim.features &= ~(BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA);
/*
* Register a metadata profile for PI, or the plain non-integrity NVMe
@@ -2113,9 +2170,15 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
* I/O to namespaces with metadata except when the namespace supports
* PI, as it can strip/insert in that case.
*/
- if (!nvme_init_integrity(ns->disk, ns->head))
+ if (!nvme_init_integrity(ns->disk, ns->head, &lim))
capacity = 0;
+ ret = queue_limits_commit_update(ns->disk->queue, &lim);
+ if (ret) {
+ blk_mq_unfreeze_queue(ns->disk->queue);
+ goto out;
+ }
+
set_capacity_and_notify(ns->disk, capacity);
/*
@@ -2127,7 +2190,6 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
if ((id->dlfeat & 0x7) == 0x1 && (id->dlfeat & (1 << 3)))
ns->head->features |= NVME_NS_DEAC;
set_disk_ro(ns->disk, nvme_ns_is_readonly(ns, info));
- blk_queue_write_cache(ns->disk->queue, vwc, vwc);
set_bit(NVME_NS_READY, &ns->flags);
blk_mq_unfreeze_queue(ns->disk->queue);
@@ -2187,14 +2249,6 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_ns_info *info)
struct queue_limits lim;
blk_mq_freeze_queue(ns->head->disk->queue);
- if (unsupported)
- ns->head->disk->flags |= GENHD_FL_HIDDEN;
- else
- nvme_init_integrity(ns->head->disk, ns->head);
- set_capacity_and_notify(ns->head->disk, get_capacity(ns->disk));
- set_disk_ro(ns->head->disk, nvme_ns_is_readonly(ns, info));
- nvme_mpath_revalidate_paths(ns);
-
/*
* queue_limits mixes values that are the hardware limitations
* for bio splitting with what is the device configuration.
@@ -2217,13 +2271,48 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_ns_info *info)
lim.io_opt = ns_lim->io_opt;
queue_limits_stack_bdev(&lim, ns->disk->part0, 0,
ns->head->disk->disk_name);
+ if (unsupported)
+ ns->head->disk->flags |= GENHD_FL_HIDDEN;
+ else
+ nvme_init_integrity(ns->head->disk, ns->head, &lim);
ret = queue_limits_commit_update(ns->head->disk->queue, &lim);
+
+ set_capacity_and_notify(ns->head->disk, get_capacity(ns->disk));
+ set_disk_ro(ns->head->disk, nvme_ns_is_readonly(ns, info));
+ nvme_mpath_revalidate_paths(ns);
+
blk_mq_unfreeze_queue(ns->head->disk->queue);
}
return ret;
}
+int nvme_ns_get_unique_id(struct nvme_ns *ns, u8 id[16],
+ enum blk_unique_id type)
+{
+ struct nvme_ns_ids *ids = &ns->head->ids;
+
+ if (type != BLK_UID_EUI64)
+ return -EINVAL;
+
+ if (memchr_inv(ids->nguid, 0, sizeof(ids->nguid))) {
+ memcpy(id, &ids->nguid, sizeof(ids->nguid));
+ return sizeof(ids->nguid);
+ }
+ if (memchr_inv(ids->eui64, 0, sizeof(ids->eui64))) {
+ memcpy(id, &ids->eui64, sizeof(ids->eui64));
+ return sizeof(ids->eui64);
+ }
+
+ return -EINVAL;
+}
+
+static int nvme_get_unique_id(struct gendisk *disk, u8 id[16],
+ enum blk_unique_id type)
+{
+ return nvme_ns_get_unique_id(disk->private_data, id, type);
+}
+
#ifdef CONFIG_BLK_SED_OPAL
static int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len,
bool send)
@@ -2279,6 +2368,7 @@ const struct block_device_operations nvme_bdev_ops = {
.open = nvme_open,
.release = nvme_release,
.getgeo = nvme_getgeo,
+ .get_unique_id = nvme_get_unique_id,
.report_zones = nvme_report_zones,
.pr_ops = &nvme_pr_ops,
};
@@ -3679,9 +3769,10 @@ out_unlock:
struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid)
{
struct nvme_ns *ns, *ret = NULL;
+ int srcu_idx;
- down_read(&ctrl->namespaces_rwsem);
- list_for_each_entry(ns, &ctrl->namespaces, list) {
+ srcu_idx = srcu_read_lock(&ctrl->srcu);
+ list_for_each_entry_rcu(ns, &ctrl->namespaces, list) {
if (ns->head->ns_id == nsid) {
if (!nvme_get_ns(ns))
continue;
@@ -3691,7 +3782,7 @@ struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid)
if (ns->head->ns_id > nsid)
break;
}
- up_read(&ctrl->namespaces_rwsem);
+ srcu_read_unlock(&ctrl->srcu, srcu_idx);
return ret;
}
EXPORT_SYMBOL_NS_GPL(nvme_find_get_ns, NVME_TARGET_PASSTHRU);
@@ -3705,7 +3796,7 @@ static void nvme_ns_add_to_ctrl_list(struct nvme_ns *ns)
list_for_each_entry_reverse(tmp, &ns->ctrl->namespaces, list) {
if (tmp->head->ns_id < ns->head->ns_id) {
- list_add(&ns->list, &tmp->list);
+ list_add_rcu(&ns->list, &tmp->list);
return;
}
}
@@ -3714,6 +3805,7 @@ static void nvme_ns_add_to_ctrl_list(struct nvme_ns *ns)
static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info)
{
+ struct queue_limits lim = { };
struct nvme_ns *ns;
struct gendisk *disk;
int node = ctrl->numa_node;
@@ -3722,7 +3814,13 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info)
if (!ns)
return;
- disk = blk_mq_alloc_disk(ctrl->tagset, NULL, ns);
+ if (ctrl->opts && ctrl->opts->data_digest)
+ lim.features |= BLK_FEAT_STABLE_WRITES;
+ if (ctrl->ops->supports_pci_p2pdma &&
+ ctrl->ops->supports_pci_p2pdma(ctrl))
+ lim.features |= BLK_FEAT_PCI_P2PDMA;
+
+ disk = blk_mq_alloc_disk(ctrl->tagset, &lim, ns);
if (IS_ERR(disk))
goto out_free_ns;
disk->fops = &nvme_bdev_ops;
@@ -3730,15 +3828,6 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info)
ns->disk = disk;
ns->queue = disk->queue;
-
- if (ctrl->opts && ctrl->opts->data_digest)
- blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, ns->queue);
-
- blk_queue_flag_set(QUEUE_FLAG_NONROT, ns->queue);
- if (ctrl->ops->supports_pci_p2pdma &&
- ctrl->ops->supports_pci_p2pdma(ctrl))
- blk_queue_flag_set(QUEUE_FLAG_PCI_P2PDMA, ns->queue);
-
ns->ctrl = ctrl;
kref_init(&ns->kref);
@@ -3771,17 +3860,18 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info)
if (nvme_update_ns_info(ns, info))
goto out_unlink_ns;
- down_write(&ctrl->namespaces_rwsem);
+ mutex_lock(&ctrl->namespaces_lock);
/*
* Ensure that no namespaces are added to the ctrl list after the queues
* are frozen, thereby avoiding a deadlock between scan and reset.
*/
if (test_bit(NVME_CTRL_FROZEN, &ctrl->flags)) {
- up_write(&ctrl->namespaces_rwsem);
+ mutex_unlock(&ctrl->namespaces_lock);
goto out_unlink_ns;
}
nvme_ns_add_to_ctrl_list(ns);
- up_write(&ctrl->namespaces_rwsem);
+ mutex_unlock(&ctrl->namespaces_lock);
+ synchronize_srcu(&ctrl->srcu);
nvme_get_ctrl(ctrl);
if (device_add_disk(ctrl->device, ns->disk, nvme_ns_attr_groups))
@@ -3804,9 +3894,10 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info)
out_cleanup_ns_from_list:
nvme_put_ctrl(ctrl);
- down_write(&ctrl->namespaces_rwsem);
- list_del_init(&ns->list);
- up_write(&ctrl->namespaces_rwsem);
+ mutex_lock(&ctrl->namespaces_lock);
+ list_del_rcu(&ns->list);
+ mutex_unlock(&ctrl->namespaces_lock);
+ synchronize_srcu(&ctrl->srcu);
out_unlink_ns:
mutex_lock(&ctrl->subsys->lock);
list_del_rcu(&ns->siblings);
@@ -3856,9 +3947,10 @@ static void nvme_ns_remove(struct nvme_ns *ns)
nvme_cdev_del(&ns->cdev, &ns->cdev_device);
del_gendisk(ns->disk);
- down_write(&ns->ctrl->namespaces_rwsem);
- list_del_init(&ns->list);
- up_write(&ns->ctrl->namespaces_rwsem);
+ mutex_lock(&ns->ctrl->namespaces_lock);
+ list_del_rcu(&ns->list);
+ mutex_unlock(&ns->ctrl->namespaces_lock);
+ synchronize_srcu(&ns->ctrl->srcu);
if (last_path)
nvme_mpath_shutdown_disk(ns->head);
@@ -3877,7 +3969,7 @@ static void nvme_ns_remove_by_nsid(struct nvme_ctrl *ctrl, u32 nsid)
static void nvme_validate_ns(struct nvme_ns *ns, struct nvme_ns_info *info)
{
- int ret = NVME_SC_INVALID_NS | NVME_SC_DNR;
+ int ret = NVME_SC_INVALID_NS | NVME_STATUS_DNR;
if (!nvme_ns_ids_equal(&ns->head->ids, &info->ids)) {
dev_err(ns->ctrl->device,
@@ -3893,7 +3985,7 @@ out:
*
* TODO: we should probably schedule a delayed retry here.
*/
- if (ret > 0 && (ret & NVME_SC_DNR))
+ if (ret > 0 && (ret & NVME_STATUS_DNR))
nvme_ns_remove(ns);
}
@@ -3948,16 +4040,18 @@ static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
struct nvme_ns *ns, *next;
LIST_HEAD(rm_list);
- down_write(&ctrl->namespaces_rwsem);
+ mutex_lock(&ctrl->namespaces_lock);
list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) {
- if (ns->head->ns_id > nsid)
- list_move_tail(&ns->list, &rm_list);
+ if (ns->head->ns_id > nsid) {
+ list_del_rcu(&ns->list);
+ synchronize_srcu(&ctrl->srcu);
+ list_add_tail_rcu(&ns->list, &rm_list);
+ }
}
- up_write(&ctrl->namespaces_rwsem);
+ mutex_unlock(&ctrl->namespaces_lock);
list_for_each_entry_safe(ns, next, &rm_list, list)
nvme_ns_remove(ns);
-
}
static int nvme_scan_ns_list(struct nvme_ctrl *ctrl)
@@ -4083,7 +4177,7 @@ static void nvme_scan_work(struct work_struct *work)
* they report) but don't actually support it.
*/
ret = nvme_scan_ns_list(ctrl);
- if (ret > 0 && ret & NVME_SC_DNR)
+ if (ret > 0 && ret & NVME_STATUS_DNR)
nvme_scan_ns_sequential(ctrl);
}
mutex_unlock(&ctrl->scan_lock);
@@ -4127,9 +4221,10 @@ void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
/* this is a no-op when called from the controller reset handler */
nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING_NOIO);
- down_write(&ctrl->namespaces_rwsem);
- list_splice_init(&ctrl->namespaces, &ns_list);
- up_write(&ctrl->namespaces_rwsem);
+ mutex_lock(&ctrl->namespaces_lock);
+ list_splice_init_rcu(&ctrl->namespaces, &ns_list, synchronize_rcu);
+ mutex_unlock(&ctrl->namespaces_lock);
+ synchronize_srcu(&ctrl->srcu);
list_for_each_entry_safe(ns, next, &ns_list, list)
nvme_ns_remove(ns);
@@ -4476,13 +4571,15 @@ int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
return ret;
if (ctrl->ops->flags & NVME_F_FABRICS) {
- ctrl->connect_q = blk_mq_alloc_queue(set, NULL, NULL);
+ struct queue_limits lim = {
+ .features = BLK_FEAT_SKIP_TAGSET_QUIESCE,
+ };
+
+ ctrl->connect_q = blk_mq_alloc_queue(set, &lim, NULL);
if (IS_ERR(ctrl->connect_q)) {
ret = PTR_ERR(ctrl->connect_q);
goto out_free_tag_set;
}
- blk_queue_flag_set(QUEUE_FLAG_SKIP_TAGSET_QUIESCE,
- ctrl->connect_q);
}
ctrl->tagset = set;
@@ -4577,6 +4674,7 @@ static void nvme_free_ctrl(struct device *dev)
key_put(ctrl->tls_key);
nvme_free_cels(ctrl);
nvme_mpath_uninit(ctrl);
+ cleanup_srcu_struct(&ctrl->srcu);
nvme_auth_stop(ctrl);
nvme_auth_free(ctrl);
__free_page(ctrl->discard_page);
@@ -4599,6 +4697,9 @@ static void nvme_free_ctrl(struct device *dev)
* Initialize a NVMe controller structures. This needs to be called during
* earliest initialization so that we have the initialized structured around
* during probing.
+ *
+ * On success, the caller must use the nvme_put_ctrl() to release this when
+ * needed, which also invokes the ops->free_ctrl() callback.
*/
int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
const struct nvme_ctrl_ops *ops, unsigned long quirks)
@@ -4609,10 +4710,15 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
ctrl->passthru_err_log_enabled = false;
clear_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags);
spin_lock_init(&ctrl->lock);
+ mutex_init(&ctrl->namespaces_lock);
+
+ ret = init_srcu_struct(&ctrl->srcu);
+ if (ret)
+ return ret;
+
mutex_init(&ctrl->scan_lock);
INIT_LIST_HEAD(&ctrl->namespaces);
xa_init(&ctrl->cels);
- init_rwsem(&ctrl->namespaces_rwsem);
ctrl->dev = dev;
ctrl->ops = ops;
ctrl->quirks = quirks;
@@ -4642,6 +4748,12 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
goto out;
ctrl->instance = ret;
+ ret = nvme_auth_init_ctrl(ctrl);
+ if (ret)
+ goto out_release_instance;
+
+ nvme_mpath_init_ctrl(ctrl);
+
device_initialize(&ctrl->ctrl_device);
ctrl->device = &ctrl->ctrl_device;
ctrl->device->devt = MKDEV(MAJOR(nvme_ctrl_base_chr_devt),
@@ -4654,16 +4766,36 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
ctrl->device->groups = nvme_dev_attr_groups;
ctrl->device->release = nvme_free_ctrl;
dev_set_drvdata(ctrl->device, ctrl);
+
+ return ret;
+
+out_release_instance:
+ ida_free(&nvme_instance_ida, ctrl->instance);
+out:
+ if (ctrl->discard_page)
+ __free_page(ctrl->discard_page);
+ cleanup_srcu_struct(&ctrl->srcu);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nvme_init_ctrl);
+
+/*
+ * On success, returns with an elevated controller reference and caller must
+ * use nvme_uninit_ctrl() to properly free resources associated with the ctrl.
+ */
+int nvme_add_ctrl(struct nvme_ctrl *ctrl)
+{
+ int ret;
+
ret = dev_set_name(ctrl->device, "nvme%d", ctrl->instance);
if (ret)
- goto out_release_instance;
+ return ret;
- nvme_get_ctrl(ctrl);
cdev_init(&ctrl->cdev, &nvme_dev_fops);
- ctrl->cdev.owner = ops->module;
+ ctrl->cdev.owner = ctrl->ops->module;
ret = cdev_device_add(&ctrl->cdev, ctrl->device);
if (ret)
- goto out_free_name;
+ return ret;
/*
* Initialize latency tolerance controls. The sysfs files won't
@@ -4674,48 +4806,34 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
min(default_ps_max_latency_us, (unsigned long)S32_MAX));
nvme_fault_inject_init(&ctrl->fault_inject, dev_name(ctrl->device));
- nvme_mpath_init_ctrl(ctrl);
- ret = nvme_auth_init_ctrl(ctrl);
- if (ret)
- goto out_free_cdev;
+ nvme_get_ctrl(ctrl);
return 0;
-out_free_cdev:
- nvme_fault_inject_fini(&ctrl->fault_inject);
- dev_pm_qos_hide_latency_tolerance(ctrl->device);
- cdev_device_del(&ctrl->cdev, ctrl->device);
-out_free_name:
- nvme_put_ctrl(ctrl);
- kfree_const(ctrl->device->kobj.name);
-out_release_instance:
- ida_free(&nvme_instance_ida, ctrl->instance);
-out:
- if (ctrl->discard_page)
- __free_page(ctrl->discard_page);
- return ret;
}
-EXPORT_SYMBOL_GPL(nvme_init_ctrl);
+EXPORT_SYMBOL_GPL(nvme_add_ctrl);
/* let I/O to all namespaces fail in preparation for surprise removal */
void nvme_mark_namespaces_dead(struct nvme_ctrl *ctrl)
{
struct nvme_ns *ns;
+ int srcu_idx;
- down_read(&ctrl->namespaces_rwsem);
- list_for_each_entry(ns, &ctrl->namespaces, list)
+ srcu_idx = srcu_read_lock(&ctrl->srcu);
+ list_for_each_entry_rcu(ns, &ctrl->namespaces, list)
blk_mark_disk_dead(ns->disk);
- up_read(&ctrl->namespaces_rwsem);
+ srcu_read_unlock(&ctrl->srcu, srcu_idx);
}
EXPORT_SYMBOL_GPL(nvme_mark_namespaces_dead);
void nvme_unfreeze(struct nvme_ctrl *ctrl)
{
struct nvme_ns *ns;
+ int srcu_idx;
- down_read(&ctrl->namespaces_rwsem);
- list_for_each_entry(ns, &ctrl->namespaces, list)
+ srcu_idx = srcu_read_lock(&ctrl->srcu);
+ list_for_each_entry_rcu(ns, &ctrl->namespaces, list)
blk_mq_unfreeze_queue(ns->queue);
- up_read(&ctrl->namespaces_rwsem);
+ srcu_read_unlock(&ctrl->srcu, srcu_idx);
clear_bit(NVME_CTRL_FROZEN, &ctrl->flags);
}
EXPORT_SYMBOL_GPL(nvme_unfreeze);
@@ -4723,14 +4841,15 @@ EXPORT_SYMBOL_GPL(nvme_unfreeze);
int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout)
{
struct nvme_ns *ns;
+ int srcu_idx;
- down_read(&ctrl->namespaces_rwsem);
- list_for_each_entry(ns, &ctrl->namespaces, list) {
+ srcu_idx = srcu_read_lock(&ctrl->srcu);
+ list_for_each_entry_rcu(ns, &ctrl->namespaces, list) {
timeout = blk_mq_freeze_queue_wait_timeout(ns->queue, timeout);
if (timeout <= 0)
break;
}
- up_read(&ctrl->namespaces_rwsem);
+ srcu_read_unlock(&ctrl->srcu, srcu_idx);
return timeout;
}
EXPORT_SYMBOL_GPL(nvme_wait_freeze_timeout);
@@ -4738,23 +4857,25 @@ EXPORT_SYMBOL_GPL(nvme_wait_freeze_timeout);
void nvme_wait_freeze(struct nvme_ctrl *ctrl)
{
struct nvme_ns *ns;
+ int srcu_idx;
- down_read(&ctrl->namespaces_rwsem);
- list_for_each_entry(ns, &ctrl->namespaces, list)
+ srcu_idx = srcu_read_lock(&ctrl->srcu);
+ list_for_each_entry_rcu(ns, &ctrl->namespaces, list)
blk_mq_freeze_queue_wait(ns->queue);
- up_read(&ctrl->namespaces_rwsem);
+ srcu_read_unlock(&ctrl->srcu, srcu_idx);
}
EXPORT_SYMBOL_GPL(nvme_wait_freeze);
void nvme_start_freeze(struct nvme_ctrl *ctrl)
{
struct nvme_ns *ns;
+ int srcu_idx;
set_bit(NVME_CTRL_FROZEN, &ctrl->flags);
- down_read(&ctrl->namespaces_rwsem);
- list_for_each_entry(ns, &ctrl->namespaces, list)
+ srcu_idx = srcu_read_lock(&ctrl->srcu);
+ list_for_each_entry_rcu(ns, &ctrl->namespaces, list)
blk_freeze_queue_start(ns->queue);
- up_read(&ctrl->namespaces_rwsem);
+ srcu_read_unlock(&ctrl->srcu, srcu_idx);
}
EXPORT_SYMBOL_GPL(nvme_start_freeze);
@@ -4797,11 +4918,12 @@ EXPORT_SYMBOL_GPL(nvme_unquiesce_admin_queue);
void nvme_sync_io_queues(struct nvme_ctrl *ctrl)
{
struct nvme_ns *ns;
+ int srcu_idx;
- down_read(&ctrl->namespaces_rwsem);
- list_for_each_entry(ns, &ctrl->namespaces, list)
+ srcu_idx = srcu_read_lock(&ctrl->srcu);
+ list_for_each_entry_rcu(ns, &ctrl->namespaces, list)
blk_sync_queue(ns->queue);
- up_read(&ctrl->namespaces_rwsem);
+ srcu_read_unlock(&ctrl->srcu, srcu_idx);
}
EXPORT_SYMBOL_GPL(nvme_sync_io_queues);
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index c6ad2148c2e0..44e342a46f39 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -180,14 +180,14 @@ int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
cmd.prop_get.offset = cpu_to_le32(off);
ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0,
- NVME_QID_ANY, 0);
+ NVME_QID_ANY, NVME_SUBMIT_RESERVED);
if (ret >= 0)
*val = le64_to_cpu(res.u64);
if (unlikely(ret != 0))
dev_err(ctrl->device,
"Property Get error: %d, offset %#x\n",
- ret > 0 ? ret & ~NVME_SC_DNR : ret, off);
+ ret > 0 ? ret & ~NVME_STATUS_DNR : ret, off);
return ret;
}
@@ -226,14 +226,14 @@ int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
cmd.prop_get.offset = cpu_to_le32(off);
ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0,
- NVME_QID_ANY, 0);
+ NVME_QID_ANY, NVME_SUBMIT_RESERVED);
if (ret >= 0)
*val = le64_to_cpu(res.u64);
if (unlikely(ret != 0))
dev_err(ctrl->device,
"Property Get error: %d, offset %#x\n",
- ret > 0 ? ret & ~NVME_SC_DNR : ret, off);
+ ret > 0 ? ret & ~NVME_STATUS_DNR : ret, off);
return ret;
}
EXPORT_SYMBOL_GPL(nvmf_reg_read64);
@@ -271,15 +271,30 @@ int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
cmd.prop_set.value = cpu_to_le64(val);
ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, NULL, NULL, 0,
- NVME_QID_ANY, 0);
+ NVME_QID_ANY, NVME_SUBMIT_RESERVED);
if (unlikely(ret))
dev_err(ctrl->device,
"Property Set error: %d, offset %#x\n",
- ret > 0 ? ret & ~NVME_SC_DNR : ret, off);
+ ret > 0 ? ret & ~NVME_STATUS_DNR : ret, off);
return ret;
}
EXPORT_SYMBOL_GPL(nvmf_reg_write32);
+int nvmf_subsystem_reset(struct nvme_ctrl *ctrl)
+{
+ int ret;
+
+ if (!nvme_wait_reset(ctrl))
+ return -EBUSY;
+
+ ret = ctrl->ops->reg_write32(ctrl, NVME_REG_NSSR, NVME_SUBSYS_RESET);
+ if (ret)
+ return ret;
+
+ return nvme_try_sched_reset(ctrl);
+}
+EXPORT_SYMBOL_GPL(nvmf_subsystem_reset);
+
/**
* nvmf_log_connect_error() - Error-parsing-diagnostic print out function for
* connect() errors.
@@ -295,7 +310,7 @@ static void nvmf_log_connect_error(struct nvme_ctrl *ctrl,
int errval, int offset, struct nvme_command *cmd,
struct nvmf_connect_data *data)
{
- int err_sctype = errval & ~NVME_SC_DNR;
+ int err_sctype = errval & ~NVME_STATUS_DNR;
if (errval < 0) {
dev_err(ctrl->device,
@@ -573,7 +588,7 @@ EXPORT_SYMBOL_GPL(nvmf_connect_io_queue);
*/
bool nvmf_should_reconnect(struct nvme_ctrl *ctrl, int status)
{
- if (status > 0 && (status & NVME_SC_DNR))
+ if (status > 0 && (status & NVME_STATUS_DNR))
return false;
if (status == -EKEYREJECTED)
diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
index 602135910ae9..21d75dc4a3a0 100644
--- a/drivers/nvme/host/fabrics.h
+++ b/drivers/nvme/host/fabrics.h
@@ -217,6 +217,7 @@ static inline unsigned int nvmf_nr_io_queues(struct nvmf_ctrl_options *opts)
int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val);
int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val);
int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val);
+int nvmf_subsystem_reset(struct nvme_ctrl *ctrl);
int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl);
int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid);
int nvmf_register_transport(struct nvmf_transport_ops *ops);
diff --git a/drivers/nvme/host/fault_inject.c b/drivers/nvme/host/fault_inject.c
index 1ba10a5c656d..1d1b6441a339 100644
--- a/drivers/nvme/host/fault_inject.c
+++ b/drivers/nvme/host/fault_inject.c
@@ -75,7 +75,7 @@ void nvme_should_fail(struct request *req)
/* inject status code and DNR bit */
status = fault_inject->status;
if (fault_inject->dont_retry)
- status |= NVME_SC_DNR;
+ status |= NVME_STATUS_DNR;
nvme_req(req)->status = status;
}
}
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index f0b081332749..b81af7919e94 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -3132,7 +3132,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
if (ctrl->ctrl.icdoff) {
dev_err(ctrl->ctrl.device, "icdoff %d is not supported!\n",
ctrl->ctrl.icdoff);
- ret = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ ret = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
goto out_stop_keep_alive;
}
@@ -3140,7 +3140,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
if (!nvme_ctrl_sgl_supported(&ctrl->ctrl)) {
dev_err(ctrl->ctrl.device,
"Mandatory sgls are not supported!\n");
- ret = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ ret = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
goto out_stop_keep_alive;
}
@@ -3325,7 +3325,7 @@ nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status)
queue_delayed_work(nvme_wq, &ctrl->connect_work, recon_delay);
} else {
if (portptr->port_state == FC_OBJSTATE_ONLINE) {
- if (status > 0 && (status & NVME_SC_DNR))
+ if (status > 0 && (status & NVME_STATUS_DNR))
dev_warn(ctrl->ctrl.device,
"NVME-FC{%d}: reconnect failure\n",
ctrl->cnum);
@@ -3382,6 +3382,7 @@ static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = {
.reg_read32 = nvmf_reg_read32,
.reg_read64 = nvmf_reg_read64,
.reg_write32 = nvmf_reg_write32,
+ .subsystem_reset = nvmf_subsystem_reset,
.free_ctrl = nvme_fc_free_ctrl,
.submit_async_event = nvme_fc_submit_async_event,
.delete_ctrl = nvme_fc_delete_ctrl,
@@ -3444,12 +3445,11 @@ nvme_fc_existing_controller(struct nvme_fc_rport *rport,
return found;
}
-static struct nvme_ctrl *
-nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
+static struct nvme_fc_ctrl *
+nvme_fc_alloc_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
struct nvme_fc_lport *lport, struct nvme_fc_rport *rport)
{
struct nvme_fc_ctrl *ctrl;
- unsigned long flags;
int ret, idx, ctrl_loss_tmo;
if (!(rport->remoteport.port_role &
@@ -3538,7 +3538,35 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
if (lport->dev)
ctrl->ctrl.numa_node = dev_to_node(lport->dev);
- /* at this point, teardown path changes to ref counting on nvme ctrl */
+ return ctrl;
+
+out_free_queues:
+ kfree(ctrl->queues);
+out_free_ida:
+ put_device(ctrl->dev);
+ ida_free(&nvme_fc_ctrl_cnt, ctrl->cnum);
+out_free_ctrl:
+ kfree(ctrl);
+out_fail:
+ /* exit via here doesn't follow ctlr ref points */
+ return ERR_PTR(ret);
+}
+
+static struct nvme_ctrl *
+nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
+ struct nvme_fc_lport *lport, struct nvme_fc_rport *rport)
+{
+ struct nvme_fc_ctrl *ctrl;
+ unsigned long flags;
+ int ret;
+
+ ctrl = nvme_fc_alloc_ctrl(dev, opts, lport, rport);
+ if (IS_ERR(ctrl))
+ return ERR_CAST(ctrl);
+
+ ret = nvme_add_ctrl(&ctrl->ctrl);
+ if (ret)
+ goto out_put_ctrl;
ret = nvme_alloc_admin_tag_set(&ctrl->ctrl, &ctrl->admin_tag_set,
&nvme_fc_admin_mq_ops,
@@ -3584,6 +3612,7 @@ fail_ctrl:
/* initiate nvme ctrl ref counting teardown */
nvme_uninit_ctrl(&ctrl->ctrl);
+out_put_ctrl:
/* Remove core ctrl ref. */
nvme_put_ctrl(&ctrl->ctrl);
@@ -3597,20 +3626,8 @@ fail_ctrl:
nvme_fc_rport_get(rport);
return ERR_PTR(-EIO);
-
-out_free_queues:
- kfree(ctrl->queues);
-out_free_ida:
- put_device(ctrl->dev);
- ida_free(&nvme_fc_ctrl_cnt, ctrl->cnum);
-out_free_ctrl:
- kfree(ctrl);
-out_fail:
- /* exit via here doesn't follow ctlr ref points */
- return ERR_PTR(ret);
}
-
struct nvmet_fc_traddr {
u64 nn;
u64 pn;
diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
index 499a8bb7cac7..8b69427a4476 100644
--- a/drivers/nvme/host/ioctl.c
+++ b/drivers/nvme/host/ioctl.c
@@ -111,6 +111,13 @@ static struct request *nvme_alloc_user_request(struct request_queue *q,
return req;
}
+static void nvme_unmap_bio(struct bio *bio)
+{
+ if (bio_integrity(bio))
+ bio_integrity_unmap_free_user(bio);
+ blk_rq_unmap_user(bio);
+}
+
static int nvme_map_user_request(struct request *req, u64 ubuffer,
unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
u32 meta_seed, struct io_uring_cmd *ioucmd, unsigned int flags)
@@ -157,7 +164,7 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer,
out_unmap:
if (bio)
- blk_rq_unmap_user(bio);
+ nvme_unmap_bio(bio);
out:
blk_mq_free_request(req);
return ret;
@@ -195,7 +202,7 @@ static int nvme_submit_user_cmd(struct request_queue *q,
if (result)
*result = le64_to_cpu(nvme_req(req)->result.u64);
if (bio)
- blk_rq_unmap_user(bio);
+ nvme_unmap_bio(bio);
blk_mq_free_request(req);
if (effects)
@@ -406,7 +413,7 @@ static void nvme_uring_task_cb(struct io_uring_cmd *ioucmd,
struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
if (pdu->bio)
- blk_rq_unmap_user(pdu->bio);
+ nvme_unmap_bio(pdu->bio);
io_uring_cmd_done(ioucmd, pdu->status, pdu->result, issue_flags);
}
@@ -432,7 +439,7 @@ static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req,
*/
if (blk_rq_is_poll(req)) {
if (pdu->bio)
- blk_rq_unmap_user(pdu->bio);
+ nvme_unmap_bio(pdu->bio);
io_uring_cmd_iopoll_done(ioucmd, pdu->result, pdu->status);
} else {
io_uring_cmd_do_in_task_lazy(ioucmd, nvme_uring_task_cb);
@@ -789,15 +796,15 @@ static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp,
bool open_for_write)
{
struct nvme_ns *ns;
- int ret;
+ int ret, srcu_idx;
- down_read(&ctrl->namespaces_rwsem);
+ srcu_idx = srcu_read_lock(&ctrl->srcu);
if (list_empty(&ctrl->namespaces)) {
ret = -ENOTTY;
goto out_unlock;
}
- ns = list_first_entry(&ctrl->namespaces, struct nvme_ns, list);
+ ns = list_first_or_null_rcu(&ctrl->namespaces, struct nvme_ns, list);
if (ns != list_last_entry(&ctrl->namespaces, struct nvme_ns, list)) {
dev_warn(ctrl->device,
"NVME_IOCTL_IO_CMD not supported when multiple namespaces present!\n");
@@ -807,15 +814,18 @@ static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp,
dev_warn(ctrl->device,
"using deprecated NVME_IOCTL_IO_CMD ioctl on the char device!\n");
- kref_get(&ns->kref);
- up_read(&ctrl->namespaces_rwsem);
+ if (!nvme_get_ns(ns)) {
+ ret = -ENXIO;
+ goto out_unlock;
+ }
+ srcu_read_unlock(&ctrl->srcu, srcu_idx);
ret = nvme_user_cmd(ctrl, ns, argp, 0, open_for_write);
nvme_put_ns(ns);
return ret;
out_unlock:
- up_read(&ctrl->namespaces_rwsem);
+ srcu_read_unlock(&ctrl->srcu, srcu_idx);
return ret;
}
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index d16e976ae1a4..91d9eb3c22ef 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -17,6 +17,7 @@ MODULE_PARM_DESC(multipath,
static const char *nvme_iopolicy_names[] = {
[NVME_IOPOLICY_NUMA] = "numa",
[NVME_IOPOLICY_RR] = "round-robin",
+ [NVME_IOPOLICY_QD] = "queue-depth",
};
static int iopolicy = NVME_IOPOLICY_NUMA;
@@ -29,6 +30,8 @@ static int nvme_set_iopolicy(const char *val, const struct kernel_param *kp)
iopolicy = NVME_IOPOLICY_NUMA;
else if (!strncmp(val, "round-robin", 11))
iopolicy = NVME_IOPOLICY_RR;
+ else if (!strncmp(val, "queue-depth", 11))
+ iopolicy = NVME_IOPOLICY_QD;
else
return -EINVAL;
@@ -43,7 +46,7 @@ static int nvme_get_iopolicy(char *buf, const struct kernel_param *kp)
module_param_call(iopolicy, nvme_set_iopolicy, nvme_get_iopolicy,
&iopolicy, 0644);
MODULE_PARM_DESC(iopolicy,
- "Default multipath I/O policy; 'numa' (default) or 'round-robin'");
+ "Default multipath I/O policy; 'numa' (default), 'round-robin' or 'queue-depth'");
void nvme_mpath_default_iopolicy(struct nvme_subsystem *subsys)
{
@@ -83,7 +86,7 @@ void nvme_mpath_start_freeze(struct nvme_subsystem *subsys)
void nvme_failover_req(struct request *req)
{
struct nvme_ns *ns = req->q->queuedata;
- u16 status = nvme_req(req)->status & 0x7ff;
+ u16 status = nvme_req(req)->status & NVME_SCT_SC_MASK;
unsigned long flags;
struct bio *bio;
@@ -118,7 +121,8 @@ void nvme_failover_req(struct request *req)
blk_steal_bios(&ns->head->requeue_list, req);
spin_unlock_irqrestore(&ns->head->requeue_lock, flags);
- blk_mq_end_request(req, 0);
+ nvme_req(req)->status = 0;
+ nvme_end_req(req);
kblockd_schedule_work(&ns->head->requeue_work);
}
@@ -127,6 +131,11 @@ void nvme_mpath_start_request(struct request *rq)
struct nvme_ns *ns = rq->q->queuedata;
struct gendisk *disk = ns->head->disk;
+ if (READ_ONCE(ns->head->subsys->iopolicy) == NVME_IOPOLICY_QD) {
+ atomic_inc(&ns->ctrl->nr_active);
+ nvme_req(rq)->flags |= NVME_MPATH_CNT_ACTIVE;
+ }
+
if (!blk_queue_io_stat(disk->queue) || blk_rq_is_passthrough(rq))
return;
@@ -140,6 +149,9 @@ void nvme_mpath_end_request(struct request *rq)
{
struct nvme_ns *ns = rq->q->queuedata;
+ if (nvme_req(rq)->flags & NVME_MPATH_CNT_ACTIVE)
+ atomic_dec_if_positive(&ns->ctrl->nr_active);
+
if (!(nvme_req(rq)->flags & NVME_MPATH_IO_STATS))
return;
bdev_end_io_acct(ns->head->disk->part0, req_op(rq),
@@ -150,16 +162,17 @@ void nvme_mpath_end_request(struct request *rq)
void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
{
struct nvme_ns *ns;
+ int srcu_idx;
- down_read(&ctrl->namespaces_rwsem);
- list_for_each_entry(ns, &ctrl->namespaces, list) {
+ srcu_idx = srcu_read_lock(&ctrl->srcu);
+ list_for_each_entry_rcu(ns, &ctrl->namespaces, list) {
if (!ns->head->disk)
continue;
kblockd_schedule_work(&ns->head->requeue_work);
if (nvme_ctrl_state(ns->ctrl) == NVME_CTRL_LIVE)
disk_uevent(ns->head->disk, KOBJ_CHANGE);
}
- up_read(&ctrl->namespaces_rwsem);
+ srcu_read_unlock(&ctrl->srcu, srcu_idx);
}
static const char *nvme_ana_state_names[] = {
@@ -193,13 +206,14 @@ out:
void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl)
{
struct nvme_ns *ns;
+ int srcu_idx;
- down_read(&ctrl->namespaces_rwsem);
- list_for_each_entry(ns, &ctrl->namespaces, list) {
+ srcu_idx = srcu_read_lock(&ctrl->srcu);
+ list_for_each_entry_rcu(ns, &ctrl->namespaces, list) {
nvme_mpath_clear_current_path(ns);
kblockd_schedule_work(&ns->head->requeue_work);
}
- up_read(&ctrl->namespaces_rwsem);
+ srcu_read_unlock(&ctrl->srcu, srcu_idx);
}
void nvme_mpath_revalidate_paths(struct nvme_ns *ns)
@@ -288,10 +302,15 @@ static struct nvme_ns *nvme_next_ns(struct nvme_ns_head *head,
return list_first_or_null_rcu(&head->list, struct nvme_ns, siblings);
}
-static struct nvme_ns *nvme_round_robin_path(struct nvme_ns_head *head,
- int node, struct nvme_ns *old)
+static struct nvme_ns *nvme_round_robin_path(struct nvme_ns_head *head)
{
struct nvme_ns *ns, *found = NULL;
+ int node = numa_node_id();
+ struct nvme_ns *old = srcu_dereference(head->current_path[node],
+ &head->srcu);
+
+ if (unlikely(!old))
+ return __nvme_find_path(head, node);
if (list_is_singular(&head->list)) {
if (nvme_path_is_disabled(old))
@@ -331,13 +350,49 @@ out:
return found;
}
+static struct nvme_ns *nvme_queue_depth_path(struct nvme_ns_head *head)
+{
+ struct nvme_ns *best_opt = NULL, *best_nonopt = NULL, *ns;
+ unsigned int min_depth_opt = UINT_MAX, min_depth_nonopt = UINT_MAX;
+ unsigned int depth;
+
+ list_for_each_entry_rcu(ns, &head->list, siblings) {
+ if (nvme_path_is_disabled(ns))
+ continue;
+
+ depth = atomic_read(&ns->ctrl->nr_active);
+
+ switch (ns->ana_state) {
+ case NVME_ANA_OPTIMIZED:
+ if (depth < min_depth_opt) {
+ min_depth_opt = depth;
+ best_opt = ns;
+ }
+ break;
+ case NVME_ANA_NONOPTIMIZED:
+ if (depth < min_depth_nonopt) {
+ min_depth_nonopt = depth;
+ best_nonopt = ns;
+ }
+ break;
+ default:
+ break;
+ }
+
+ if (min_depth_opt == 0)
+ return best_opt;
+ }
+
+ return best_opt ? best_opt : best_nonopt;
+}
+
static inline bool nvme_path_is_optimized(struct nvme_ns *ns)
{
return nvme_ctrl_state(ns->ctrl) == NVME_CTRL_LIVE &&
ns->ana_state == NVME_ANA_OPTIMIZED;
}
-inline struct nvme_ns *nvme_find_path(struct nvme_ns_head *head)
+static struct nvme_ns *nvme_numa_path(struct nvme_ns_head *head)
{
int node = numa_node_id();
struct nvme_ns *ns;
@@ -345,14 +400,23 @@ inline struct nvme_ns *nvme_find_path(struct nvme_ns_head *head)
ns = srcu_dereference(head->current_path[node], &head->srcu);
if (unlikely(!ns))
return __nvme_find_path(head, node);
-
- if (READ_ONCE(head->subsys->iopolicy) == NVME_IOPOLICY_RR)
- return nvme_round_robin_path(head, node, ns);
if (unlikely(!nvme_path_is_optimized(ns)))
return __nvme_find_path(head, node);
return ns;
}
+inline struct nvme_ns *nvme_find_path(struct nvme_ns_head *head)
+{
+ switch (READ_ONCE(head->subsys->iopolicy)) {
+ case NVME_IOPOLICY_QD:
+ return nvme_queue_depth_path(head);
+ case NVME_IOPOLICY_RR:
+ return nvme_round_robin_path(head);
+ default:
+ return nvme_numa_path(head);
+ }
+}
+
static bool nvme_available_path(struct nvme_ns_head *head)
{
struct nvme_ns *ns;
@@ -424,6 +488,21 @@ static void nvme_ns_head_release(struct gendisk *disk)
nvme_put_ns_head(disk->private_data);
}
+static int nvme_ns_head_get_unique_id(struct gendisk *disk, u8 id[16],
+ enum blk_unique_id type)
+{
+ struct nvme_ns_head *head = disk->private_data;
+ struct nvme_ns *ns;
+ int srcu_idx, ret = -EWOULDBLOCK;
+
+ srcu_idx = srcu_read_lock(&head->srcu);
+ ns = nvme_find_path(head);
+ if (ns)
+ ret = nvme_ns_get_unique_id(ns, id, type);
+ srcu_read_unlock(&head->srcu, srcu_idx);
+ return ret;
+}
+
#ifdef CONFIG_BLK_DEV_ZONED
static int nvme_ns_head_report_zones(struct gendisk *disk, sector_t sector,
unsigned int nr_zones, report_zones_cb cb, void *data)
@@ -451,6 +530,7 @@ const struct block_device_operations nvme_ns_head_ops = {
.ioctl = nvme_ns_head_ioctl,
.compat_ioctl = blkdev_compat_ptr_ioctl,
.getgeo = nvme_getgeo,
+ .get_unique_id = nvme_ns_head_get_unique_id,
.report_zones = nvme_ns_head_report_zones,
.pr_ops = &nvme_pr_ops,
};
@@ -518,7 +598,6 @@ static void nvme_requeue_work(struct work_struct *work)
int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
{
struct queue_limits lim;
- bool vwc = false;
mutex_init(&head->lock);
bio_list_init(&head->requeue_list);
@@ -536,6 +615,7 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
blk_set_stacking_limits(&lim);
lim.dma_alignment = 3;
+ lim.features |= BLK_FEAT_IO_STAT | BLK_FEAT_NOWAIT | BLK_FEAT_POLL;
if (head->ids.csi != NVME_CSI_ZNS)
lim.max_zone_append_sectors = 0;
@@ -546,24 +626,6 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
head->disk->private_data = head;
sprintf(head->disk->disk_name, "nvme%dn%d",
ctrl->subsys->instance, head->instance);
-
- blk_queue_flag_set(QUEUE_FLAG_NONROT, head->disk->queue);
- blk_queue_flag_set(QUEUE_FLAG_NOWAIT, head->disk->queue);
- blk_queue_flag_set(QUEUE_FLAG_IO_STAT, head->disk->queue);
- /*
- * This assumes all controllers that refer to a namespace either
- * support poll queues or not. That is not a strict guarantee,
- * but if the assumption is wrong the effect is only suboptimal
- * performance but not correctness problem.
- */
- if (ctrl->tagset->nr_maps > HCTX_TYPE_POLL &&
- ctrl->tagset->map[HCTX_TYPE_POLL].nr_queues)
- blk_queue_flag_set(QUEUE_FLAG_POLL, head->disk->queue);
-
- /* we need to propagate up the VMC settings */
- if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
- vwc = true;
- blk_queue_write_cache(head->disk->queue, vwc, vwc);
return 0;
}
@@ -595,7 +657,7 @@ static void nvme_mpath_set_live(struct nvme_ns *ns)
int node, srcu_idx;
srcu_idx = srcu_read_lock(&head->srcu);
- for_each_node(node)
+ for_each_online_node(node)
__nvme_find_path(head, node);
srcu_read_unlock(&head->srcu, srcu_idx);
}
@@ -680,6 +742,7 @@ static int nvme_update_ana_state(struct nvme_ctrl *ctrl,
u32 nr_nsids = le32_to_cpu(desc->nnsids), n = 0;
unsigned *nr_change_groups = data;
struct nvme_ns *ns;
+ int srcu_idx;
dev_dbg(ctrl->device, "ANA group %d: %s.\n",
le32_to_cpu(desc->grpid),
@@ -691,8 +754,8 @@ static int nvme_update_ana_state(struct nvme_ctrl *ctrl,
if (!nr_nsids)
return 0;
- down_read(&ctrl->namespaces_rwsem);
- list_for_each_entry(ns, &ctrl->namespaces, list) {
+ srcu_idx = srcu_read_lock(&ctrl->srcu);
+ list_for_each_entry_rcu(ns, &ctrl->namespaces, list) {
unsigned nsid;
again:
nsid = le32_to_cpu(desc->nsids[n]);
@@ -705,7 +768,7 @@ again:
if (ns->head->ns_id > nsid)
goto again;
}
- up_read(&ctrl->namespaces_rwsem);
+ srcu_read_unlock(&ctrl->srcu, srcu_idx);
return 0;
}
@@ -799,6 +862,29 @@ static ssize_t nvme_subsys_iopolicy_show(struct device *dev,
nvme_iopolicy_names[READ_ONCE(subsys->iopolicy)]);
}
+static void nvme_subsys_iopolicy_update(struct nvme_subsystem *subsys,
+ int iopolicy)
+{
+ struct nvme_ctrl *ctrl;
+ int old_iopolicy = READ_ONCE(subsys->iopolicy);
+
+ if (old_iopolicy == iopolicy)
+ return;
+
+ WRITE_ONCE(subsys->iopolicy, iopolicy);
+
+ /* iopolicy changes clear the mpath by design */
+ mutex_lock(&nvme_subsystems_lock);
+ list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
+ nvme_mpath_clear_ctrl_paths(ctrl);
+ mutex_unlock(&nvme_subsystems_lock);
+
+ pr_notice("subsysnqn %s iopolicy changed from %s to %s\n",
+ subsys->subnqn,
+ nvme_iopolicy_names[old_iopolicy],
+ nvme_iopolicy_names[iopolicy]);
+}
+
static ssize_t nvme_subsys_iopolicy_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
@@ -808,7 +894,7 @@ static ssize_t nvme_subsys_iopolicy_store(struct device *dev,
for (i = 0; i < ARRAY_SIZE(nvme_iopolicy_names); i++) {
if (sysfs_streq(buf, nvme_iopolicy_names[i])) {
- WRITE_ONCE(subsys->iopolicy, i);
+ nvme_subsys_iopolicy_update(subsys, i);
return count;
}
}
@@ -871,9 +957,6 @@ void nvme_mpath_add_disk(struct nvme_ns *ns, __le32 anagrpid)
nvme_mpath_set_live(ns);
}
- if (blk_queue_stable_writes(ns->queue) && ns->head->disk)
- blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES,
- ns->head->disk->queue);
#ifdef CONFIG_BLK_DEV_ZONED
if (blk_queue_is_zoned(ns->queue) && ns->head->disk)
ns->head->disk->nr_zones = ns->disk->nr_zones;
@@ -919,6 +1002,9 @@ int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
!(ctrl->subsys->cmic & NVME_CTRL_CMIC_ANA))
return 0;
+ /* initialize this in the identify path to cover controller resets */
+ atomic_set(&ctrl->nr_active, 0);
+
if (!ctrl->max_namespaces ||
ctrl->max_namespaces > le32_to_cpu(id->nn)) {
dev_err(ctrl->device,
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index cacc56f4bbf4..f900e44243ae 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -49,6 +49,7 @@ extern unsigned int admin_timeout;
extern struct workqueue_struct *nvme_wq;
extern struct workqueue_struct *nvme_reset_wq;
extern struct workqueue_struct *nvme_delete_wq;
+extern struct mutex nvme_subsystems_lock;
/*
* List of workarounds for devices that required behavior not specified in
@@ -195,6 +196,7 @@ enum {
NVME_REQ_CANCELLED = (1 << 0),
NVME_REQ_USERCMD = (1 << 1),
NVME_MPATH_IO_STATS = (1 << 2),
+ NVME_MPATH_CNT_ACTIVE = (1 << 3),
};
static inline struct nvme_request *nvme_req(struct request *req)
@@ -282,7 +284,8 @@ struct nvme_ctrl {
struct blk_mq_tag_set *tagset;
struct blk_mq_tag_set *admin_tagset;
struct list_head namespaces;
- struct rw_semaphore namespaces_rwsem;
+ struct mutex namespaces_lock;
+ struct srcu_struct srcu;
struct device ctrl_device;
struct device *device; /* char device */
#ifdef CONFIG_NVME_HWMON
@@ -359,6 +362,7 @@ struct nvme_ctrl {
size_t ana_log_size;
struct timer_list anatt_timer;
struct work_struct ana_work;
+ atomic_t nr_active;
#endif
#ifdef CONFIG_NVME_HOST_AUTH
@@ -407,6 +411,7 @@ static inline enum nvme_ctrl_state nvme_ctrl_state(struct nvme_ctrl *ctrl)
enum nvme_iopolicy {
NVME_IOPOLICY_NUMA,
NVME_IOPOLICY_RR,
+ NVME_IOPOLICY_QD,
};
struct nvme_subsystem {
@@ -471,8 +476,6 @@ struct nvme_ns_head {
u8 pi_type;
u8 pi_offset;
u8 guard_type;
- u16 sgs;
- u32 sws;
#ifdef CONFIG_BLK_DEV_ZONED
u64 zsze;
#endif
@@ -503,7 +506,7 @@ static inline bool nvme_ns_head_multipath(struct nvme_ns_head *head)
enum nvme_ns_features {
NVME_NS_EXT_LBAS = 1 << 0, /* support extended LBA format */
NVME_NS_METADATA_SUPPORTED = 1 << 1, /* support getting generated md */
- NVME_NS_DEAC, /* DEAC bit in Write Zeores supported */
+ NVME_NS_DEAC = 1 << 2, /* DEAC bit in Write Zeores supported */
};
struct nvme_ns {
@@ -552,6 +555,7 @@ struct nvme_ctrl_ops {
int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val);
void (*free_ctrl)(struct nvme_ctrl *ctrl);
void (*submit_async_event)(struct nvme_ctrl *ctrl);
+ int (*subsystem_reset)(struct nvme_ctrl *ctrl);
void (*delete_ctrl)(struct nvme_ctrl *ctrl);
void (*stop_ctrl)(struct nvme_ctrl *ctrl);
int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size);
@@ -650,18 +654,9 @@ int nvme_try_sched_reset(struct nvme_ctrl *ctrl);
static inline int nvme_reset_subsystem(struct nvme_ctrl *ctrl)
{
- int ret;
-
- if (!ctrl->subsystem)
+ if (!ctrl->subsystem || !ctrl->ops->subsystem_reset)
return -ENOTTY;
- if (!nvme_wait_reset(ctrl))
- return -EBUSY;
-
- ret = ctrl->ops->reg_write32(ctrl, NVME_REG_NSSR, 0x4E564D65);
- if (ret)
- return ret;
-
- return nvme_try_sched_reset(ctrl);
+ return ctrl->ops->subsystem_reset(ctrl);
}
/*
@@ -690,7 +685,7 @@ static inline u32 nvme_bytes_to_numd(size_t len)
static inline bool nvme_is_ana_error(u16 status)
{
- switch (status & 0x7ff) {
+ switch (status & NVME_SCT_SC_MASK) {
case NVME_SC_ANA_TRANSITION:
case NVME_SC_ANA_INACCESSIBLE:
case NVME_SC_ANA_PERSISTENT_LOSS:
@@ -703,7 +698,7 @@ static inline bool nvme_is_ana_error(u16 status)
static inline bool nvme_is_path_error(u16 status)
{
/* check for a status code type of 'path related status' */
- return (status & 0x700) == 0x300;
+ return (status & NVME_SCT_MASK) == NVME_SCT_PATH;
}
/*
@@ -767,6 +762,7 @@ static inline bool nvme_state_terminal(struct nvme_ctrl *ctrl)
}
}
+void nvme_end_req(struct request *req);
void nvme_complete_rq(struct request *req);
void nvme_complete_batch_req(struct request *req);
@@ -792,6 +788,7 @@ int nvme_disable_ctrl(struct nvme_ctrl *ctrl, bool shutdown);
int nvme_enable_ctrl(struct nvme_ctrl *ctrl);
int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
const struct nvme_ctrl_ops *ops, unsigned long quirks);
+int nvme_add_ctrl(struct nvme_ctrl *ctrl);
void nvme_uninit_ctrl(struct nvme_ctrl *ctrl);
void nvme_start_ctrl(struct nvme_ctrl *ctrl);
void nvme_stop_ctrl(struct nvme_ctrl *ctrl);
@@ -877,7 +874,7 @@ enum {
NVME_SUBMIT_NOWAIT = (__force nvme_submit_flags_t)(1 << 1),
/* Set BLK_MQ_REQ_RESERVED when allocating request */
NVME_SUBMIT_RESERVED = (__force nvme_submit_flags_t)(1 << 2),
- /* Retry command when NVME_SC_DNR is not set in the result */
+ /* Retry command when NVME_STATUS_DNR is not set in the result */
NVME_SUBMIT_RETRY = (__force nvme_submit_flags_t)(1 << 3),
};
@@ -1062,6 +1059,9 @@ static inline bool nvme_disk_is_ns_head(struct gendisk *disk)
}
#endif /* CONFIG_NVME_MULTIPATH */
+int nvme_ns_get_unique_id(struct nvme_ns *ns, u8 id[16],
+ enum blk_unique_id type);
+
struct nvme_zone_info {
u64 zone_size;
unsigned int max_open_zones;
@@ -1161,6 +1161,7 @@ void nvme_passthru_end(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u32 effects,
struct nvme_command *cmd, int status);
struct nvme_ctrl *nvme_ctrl_from_file(struct file *file);
struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid);
+bool nvme_get_ns(struct nvme_ns *ns);
void nvme_put_ns(struct nvme_ns *ns);
static inline bool nvme_multi_css(struct nvme_ctrl *ctrl)
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 710043086dff..21f8e1b9801f 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -778,7 +778,8 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
struct bio_vec bv = req_bvec(req);
if (!is_pci_p2pdma_page(bv.bv_page)) {
- if (bv.bv_offset + bv.bv_len <= NVME_CTRL_PAGE_SIZE * 2)
+ if ((bv.bv_offset & (NVME_CTRL_PAGE_SIZE - 1)) +
+ bv.bv_len <= NVME_CTRL_PAGE_SIZE * 2)
return nvme_setup_prp_simple(dev, req,
&cmnd->rw, &bv);
@@ -825,9 +826,9 @@ static blk_status_t nvme_map_metadata(struct nvme_dev *dev, struct request *req,
struct nvme_command *cmnd)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ struct bio_vec bv = rq_integrity_vec(req);
- iod->meta_dma = dma_map_bvec(dev->dev, rq_integrity_vec(req),
- rq_dma_dir(req), 0);
+ iod->meta_dma = dma_map_bvec(dev->dev, &bv, rq_dma_dir(req), 0);
if (dma_mapping_error(dev->dev, iod->meta_dma))
return BLK_STS_IOERR;
cmnd->rw.metadata = cpu_to_le64(iod->meta_dma);
@@ -966,7 +967,7 @@ static __always_inline void nvme_pci_unmap_rq(struct request *req)
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
dma_unmap_page(dev->dev, iod->meta_dma,
- rq_integrity_vec(req)->bv_len, rq_dma_dir(req));
+ rq_integrity_vec(req).bv_len, rq_dma_dir(req));
}
if (blk_rq_nr_phys_segments(req))
@@ -1142,6 +1143,41 @@ static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl)
spin_unlock(&nvmeq->sq_lock);
}
+static int nvme_pci_subsystem_reset(struct nvme_ctrl *ctrl)
+{
+ struct nvme_dev *dev = to_nvme_dev(ctrl);
+ int ret = 0;
+
+ /*
+ * Taking the shutdown_lock ensures the BAR mapping is not being
+ * altered by reset_work. Holding this lock before the RESETTING state
+ * change, if successful, also ensures nvme_remove won't be able to
+ * proceed to iounmap until we're done.
+ */
+ mutex_lock(&dev->shutdown_lock);
+ if (!dev->bar_mapped_size) {
+ ret = -ENODEV;
+ goto unlock;
+ }
+
+ if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ writel(NVME_SUBSYS_RESET, dev->bar + NVME_REG_NSSR);
+ nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE);
+
+ /*
+ * Read controller status to flush the previous write and trigger a
+ * pcie read error.
+ */
+ readl(dev->bar + NVME_REG_CSTS);
+unlock:
+ mutex_unlock(&dev->shutdown_lock);
+ return ret;
+}
+
static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
{
struct nvme_command c = { };
@@ -2858,6 +2894,7 @@ static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
.reg_read64 = nvme_pci_reg_read64,
.free_ctrl = nvme_pci_free_ctrl,
.submit_async_event = nvme_pci_submit_async_event,
+ .subsystem_reset = nvme_pci_subsystem_reset,
.get_address = nvme_pci_get_address,
.print_device_info = nvme_pci_print_device_info,
.supports_pci_p2pdma = nvme_pci_supports_pci_p2pdma,
@@ -3014,6 +3051,10 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (IS_ERR(dev))
return PTR_ERR(dev);
+ result = nvme_add_ctrl(&dev->ctrl);
+ if (result)
+ goto out_put_ctrl;
+
result = nvme_dev_map(dev);
if (result)
goto out_uninit_ctrl;
@@ -3100,6 +3141,7 @@ out_dev_unmap:
nvme_dev_unmap(dev);
out_uninit_ctrl:
nvme_uninit_ctrl(&dev->ctrl);
+out_put_ctrl:
nvme_put_ctrl(&dev->ctrl);
return result;
}
diff --git a/drivers/nvme/host/pr.c b/drivers/nvme/host/pr.c
index e05571b2a1b0..7347ddf85f00 100644
--- a/drivers/nvme/host/pr.c
+++ b/drivers/nvme/host/pr.c
@@ -72,12 +72,12 @@ static int nvme_send_ns_pr_command(struct nvme_ns *ns, struct nvme_command *c,
return nvme_submit_sync_cmd(ns->queue, c, data, data_len);
}
-static int nvme_sc_to_pr_err(int nvme_sc)
+static int nvme_status_to_pr_err(int status)
{
- if (nvme_is_path_error(nvme_sc))
+ if (nvme_is_path_error(status))
return PR_STS_PATH_FAILED;
- switch (nvme_sc) {
+ switch (status & NVME_SCT_SC_MASK) {
case NVME_SC_SUCCESS:
return PR_STS_SUCCESS;
case NVME_SC_RESERVATION_CONFLICT:
@@ -121,7 +121,7 @@ static int nvme_pr_command(struct block_device *bdev, u32 cdw10,
if (ret < 0)
return ret;
- return nvme_sc_to_pr_err(ret);
+ return nvme_status_to_pr_err(ret);
}
static int nvme_pr_register(struct block_device *bdev, u64 old,
@@ -196,7 +196,7 @@ retry:
if (ret < 0)
return ret;
- return nvme_sc_to_pr_err(ret);
+ return nvme_status_to_pr_err(ret);
}
static int nvme_pr_read_keys(struct block_device *bdev,
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 51a62b0c645a..2eb33842f971 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -2201,6 +2201,7 @@ static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = {
.reg_read32 = nvmf_reg_read32,
.reg_read64 = nvmf_reg_read64,
.reg_write32 = nvmf_reg_write32,
+ .subsystem_reset = nvmf_subsystem_reset,
.free_ctrl = nvme_rdma_free_ctrl,
.submit_async_event = nvme_rdma_submit_async_event,
.delete_ctrl = nvme_rdma_delete_ctrl,
@@ -2237,12 +2238,11 @@ nvme_rdma_existing_controller(struct nvmf_ctrl_options *opts)
return found;
}
-static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
+static struct nvme_rdma_ctrl *nvme_rdma_alloc_ctrl(struct device *dev,
struct nvmf_ctrl_options *opts)
{
struct nvme_rdma_ctrl *ctrl;
int ret;
- bool changed;
ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
if (!ctrl)
@@ -2304,6 +2304,30 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
if (ret)
goto out_kfree_queues;
+ return ctrl;
+
+out_kfree_queues:
+ kfree(ctrl->queues);
+out_free_ctrl:
+ kfree(ctrl);
+ return ERR_PTR(ret);
+}
+
+static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
+ struct nvmf_ctrl_options *opts)
+{
+ struct nvme_rdma_ctrl *ctrl;
+ bool changed;
+ int ret;
+
+ ctrl = nvme_rdma_alloc_ctrl(dev, opts);
+ if (IS_ERR(ctrl))
+ return ERR_CAST(ctrl);
+
+ ret = nvme_add_ctrl(&ctrl->ctrl);
+ if (ret)
+ goto out_put_ctrl;
+
changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING);
WARN_ON_ONCE(!changed);
@@ -2322,15 +2346,11 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
out_uninit_ctrl:
nvme_uninit_ctrl(&ctrl->ctrl);
+out_put_ctrl:
nvme_put_ctrl(&ctrl->ctrl);
if (ret > 0)
ret = -EIO;
return ERR_PTR(ret);
-out_kfree_queues:
- kfree(ctrl->queues);
-out_free_ctrl:
- kfree(ctrl);
- return ERR_PTR(ret);
}
static struct nvmf_transport_ops nvme_rdma_transport = {
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 8b5e4327fe83..a2a47d3ab99f 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -2662,6 +2662,7 @@ static const struct nvme_ctrl_ops nvme_tcp_ctrl_ops = {
.reg_read32 = nvmf_reg_read32,
.reg_read64 = nvmf_reg_read64,
.reg_write32 = nvmf_reg_write32,
+ .subsystem_reset = nvmf_subsystem_reset,
.free_ctrl = nvme_tcp_free_ctrl,
.submit_async_event = nvme_tcp_submit_async_event,
.delete_ctrl = nvme_tcp_delete_ctrl,
@@ -2686,7 +2687,7 @@ nvme_tcp_existing_controller(struct nvmf_ctrl_options *opts)
return found;
}
-static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev,
+static struct nvme_tcp_ctrl *nvme_tcp_alloc_ctrl(struct device *dev,
struct nvmf_ctrl_options *opts)
{
struct nvme_tcp_ctrl *ctrl;
@@ -2761,6 +2762,28 @@ static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev,
if (ret)
goto out_kfree_queues;
+ return ctrl;
+out_kfree_queues:
+ kfree(ctrl->queues);
+out_free_ctrl:
+ kfree(ctrl);
+ return ERR_PTR(ret);
+}
+
+static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev,
+ struct nvmf_ctrl_options *opts)
+{
+ struct nvme_tcp_ctrl *ctrl;
+ int ret;
+
+ ctrl = nvme_tcp_alloc_ctrl(dev, opts);
+ if (IS_ERR(ctrl))
+ return ERR_CAST(ctrl);
+
+ ret = nvme_add_ctrl(&ctrl->ctrl);
+ if (ret)
+ goto out_put_ctrl;
+
if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
WARN_ON_ONCE(1);
ret = -EINTR;
@@ -2782,15 +2805,11 @@ static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev,
out_uninit_ctrl:
nvme_uninit_ctrl(&ctrl->ctrl);
+out_put_ctrl:
nvme_put_ctrl(&ctrl->ctrl);
if (ret > 0)
ret = -EIO;
return ERR_PTR(ret);
-out_kfree_queues:
- kfree(ctrl->queues);
-out_free_ctrl:
- kfree(ctrl);
- return ERR_PTR(ret);
}
static struct nvmf_transport_ops nvme_tcp_transport = {
diff --git a/drivers/nvme/host/zns.c b/drivers/nvme/host/zns.c
index 77aa0f440a6d..9a06f9d98cd6 100644
--- a/drivers/nvme/host/zns.c
+++ b/drivers/nvme/host/zns.c
@@ -108,13 +108,12 @@ free_data:
void nvme_update_zone_info(struct nvme_ns *ns, struct queue_limits *lim,
struct nvme_zone_info *zi)
{
- lim->zoned = 1;
+ lim->features |= BLK_FEAT_ZONED;
lim->max_open_zones = zi->max_open_zones;
lim->max_active_zones = zi->max_active_zones;
lim->max_zone_append_sectors = ns->ctrl->max_zone_append;
lim->chunk_sectors = ns->head->zsze =
nvme_lba_to_sect(ns->head, zi->zone_size);
- blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, ns->queue);
}
static void *nvme_zns_alloc_report_buffer(struct nvme_ns *ns,
diff --git a/drivers/nvme/target/Kconfig b/drivers/nvme/target/Kconfig
index 872dd1a0acd8..46be031f91b4 100644
--- a/drivers/nvme/target/Kconfig
+++ b/drivers/nvme/target/Kconfig
@@ -6,7 +6,6 @@ config NVME_TARGET
depends on CONFIGFS_FS
select NVME_KEYRING if NVME_TARGET_TCP_TLS
select KEYS if NVME_TARGET_TCP_TLS
- select BLK_DEV_INTEGRITY_T10 if BLK_DEV_INTEGRITY
select SGL_ALLOC
help
This enabled target side support for the NVMe protocol, that is
@@ -18,6 +17,15 @@ config NVME_TARGET
To configure the NVMe target you probably want to use the nvmetcli
tool from http://git.infradead.org/users/hch/nvmetcli.git.
+config NVME_TARGET_DEBUGFS
+ bool "NVMe Target debugfs support"
+ depends on NVME_TARGET
+ help
+ This enables debugfs support to display the connected controllers
+ to each subsystem
+
+ If unsure, say N.
+
config NVME_TARGET_PASSTHRU
bool "NVMe Target Passthrough support"
depends on NVME_TARGET
diff --git a/drivers/nvme/target/Makefile b/drivers/nvme/target/Makefile
index c66820102493..c402c44350b2 100644
--- a/drivers/nvme/target/Makefile
+++ b/drivers/nvme/target/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_NVME_TARGET_TCP) += nvmet-tcp.o
nvmet-y += core.o configfs.o admin-cmd.o fabrics-cmd.o \
discovery.o io-cmd-file.o io-cmd-bdev.o
+nvmet-$(CONFIG_NVME_TARGET_DEBUGFS) += debugfs.o
nvmet-$(CONFIG_NVME_TARGET_PASSTHRU) += passthru.o
nvmet-$(CONFIG_BLK_DEV_ZONED) += zns.o
nvmet-$(CONFIG_NVME_TARGET_AUTH) += fabrics-cmd-auth.o auth.o
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index f5b7054a4a05..f7e1156ac7ec 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -344,7 +344,7 @@ static void nvmet_execute_get_log_page(struct nvmet_req *req)
pr_debug("unhandled lid %d on qid %d\n",
req->cmd->get_log_page.lid, req->sq->qid);
req->error_loc = offsetof(struct nvme_get_log_page_command, lid);
- nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
+ nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_STATUS_DNR);
}
static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
@@ -496,7 +496,7 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req)
if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) {
req->error_loc = offsetof(struct nvme_identify, nsid);
- status = NVME_SC_INVALID_NS | NVME_SC_DNR;
+ status = NVME_SC_INVALID_NS | NVME_STATUS_DNR;
goto out;
}
@@ -662,7 +662,7 @@ static void nvmet_execute_identify_desclist(struct nvmet_req *req)
if (sg_zero_buffer(req->sg, req->sg_cnt, NVME_IDENTIFY_DATA_SIZE - off,
off) != NVME_IDENTIFY_DATA_SIZE - off)
- status = NVME_SC_INTERNAL | NVME_SC_DNR;
+ status = NVME_SC_INTERNAL | NVME_STATUS_DNR;
out:
nvmet_req_complete(req, status);
@@ -724,7 +724,7 @@ static void nvmet_execute_identify(struct nvmet_req *req)
pr_debug("unhandled identify cns %d on qid %d\n",
req->cmd->identify.cns, req->sq->qid);
req->error_loc = offsetof(struct nvme_identify, cns);
- nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
+ nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_STATUS_DNR);
}
/*
@@ -807,7 +807,7 @@ u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask)
if (val32 & ~mask) {
req->error_loc = offsetof(struct nvme_common_command, cdw11);
- return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
}
WRITE_ONCE(req->sq->ctrl->aen_enabled, val32);
@@ -833,7 +833,7 @@ void nvmet_execute_set_features(struct nvmet_req *req)
ncqr = (cdw11 >> 16) & 0xffff;
nsqr = cdw11 & 0xffff;
if (ncqr == 0xffff || nsqr == 0xffff) {
- status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
break;
}
nvmet_set_result(req,
@@ -846,14 +846,14 @@ void nvmet_execute_set_features(struct nvmet_req *req)
status = nvmet_set_feat_async_event(req, NVMET_AEN_CFG_ALL);
break;
case NVME_FEAT_HOST_ID:
- status = NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
+ status = NVME_SC_CMD_SEQ_ERROR | NVME_STATUS_DNR;
break;
case NVME_FEAT_WRITE_PROTECT:
status = nvmet_set_feat_write_protect(req);
break;
default:
req->error_loc = offsetof(struct nvme_common_command, cdw10);
- status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
break;
}
@@ -939,7 +939,7 @@ void nvmet_execute_get_features(struct nvmet_req *req)
if (!(req->cmd->common.cdw11 & cpu_to_le32(1 << 0))) {
req->error_loc =
offsetof(struct nvme_common_command, cdw11);
- status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
break;
}
@@ -952,7 +952,7 @@ void nvmet_execute_get_features(struct nvmet_req *req)
default:
req->error_loc =
offsetof(struct nvme_common_command, cdw10);
- status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
break;
}
@@ -969,7 +969,7 @@ void nvmet_execute_async_event(struct nvmet_req *req)
mutex_lock(&ctrl->lock);
if (ctrl->nr_async_event_cmds >= NVMET_ASYNC_EVENTS) {
mutex_unlock(&ctrl->lock);
- nvmet_req_complete(req, NVME_SC_ASYNC_LIMIT | NVME_SC_DNR);
+ nvmet_req_complete(req, NVME_SC_ASYNC_LIMIT | NVME_STATUS_DNR);
return;
}
ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req;
@@ -1006,7 +1006,7 @@ u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
if (nvme_is_fabrics(cmd))
return nvmet_parse_fabrics_admin_cmd(req);
if (unlikely(!nvmet_check_auth_status(req)))
- return NVME_SC_AUTH_REQUIRED | NVME_SC_DNR;
+ return NVME_SC_AUTH_REQUIRED | NVME_STATUS_DNR;
if (nvmet_is_disc_subsys(nvmet_req_subsys(req)))
return nvmet_parse_discovery_cmd(req);
diff --git a/drivers/nvme/target/auth.c b/drivers/nvme/target/auth.c
index 7d2633940f9b..8bc3f431c77f 100644
--- a/drivers/nvme/target/auth.c
+++ b/drivers/nvme/target/auth.c
@@ -314,7 +314,7 @@ int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response,
req->sq->dhchap_c1,
challenge, shash_len);
if (ret)
- goto out_free_response;
+ goto out_free_challenge;
}
pr_debug("ctrl %d qid %d host response seq %u transaction %d\n",
@@ -325,7 +325,7 @@ int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response,
GFP_KERNEL);
if (!shash) {
ret = -ENOMEM;
- goto out_free_response;
+ goto out_free_challenge;
}
shash->tfm = shash_tfm;
ret = crypto_shash_init(shash);
@@ -361,9 +361,10 @@ int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response,
goto out;
ret = crypto_shash_final(shash, response);
out:
+ kfree(shash);
+out_free_challenge:
if (challenge != req->sq->dhchap_c1)
kfree(challenge);
- kfree(shash);
out_free_response:
nvme_auth_free_key(transformed_key);
out_free_tfm:
@@ -427,14 +428,14 @@ int nvmet_auth_ctrl_hash(struct nvmet_req *req, u8 *response,
req->sq->dhchap_c2,
challenge, shash_len);
if (ret)
- goto out_free_response;
+ goto out_free_challenge;
}
shash = kzalloc(sizeof(*shash) + crypto_shash_descsize(shash_tfm),
GFP_KERNEL);
if (!shash) {
ret = -ENOMEM;
- goto out_free_response;
+ goto out_free_challenge;
}
shash->tfm = shash_tfm;
@@ -471,9 +472,10 @@ int nvmet_auth_ctrl_hash(struct nvmet_req *req, u8 *response,
goto out;
ret = crypto_shash_final(shash, response);
out:
+ kfree(shash);
+out_free_challenge:
if (challenge != req->sq->dhchap_c2)
kfree(challenge);
- kfree(shash);
out_free_response:
nvme_auth_free_key(transformed_key);
out_free_tfm:
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index 7c43a0ad6877..685e89b35d33 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -410,7 +410,29 @@ static ssize_t nvmet_addr_tsas_show(struct config_item *item,
return sprintf(page, "%s\n", nvmet_addr_tsas_rdma[i].name);
}
}
- return sprintf(page, "reserved\n");
+ return sprintf(page, "\n");
+}
+
+static u8 nvmet_addr_tsas_rdma_store(const char *page)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(nvmet_addr_tsas_rdma); i++) {
+ if (sysfs_streq(page, nvmet_addr_tsas_rdma[i].name))
+ return nvmet_addr_tsas_rdma[i].type;
+ }
+ return NVMF_RDMA_QPTYPE_INVALID;
+}
+
+static u8 nvmet_addr_tsas_tcp_store(const char *page)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(nvmet_addr_tsas_tcp); i++) {
+ if (sysfs_streq(page, nvmet_addr_tsas_tcp[i].name))
+ return nvmet_addr_tsas_tcp[i].type;
+ }
+ return NVMF_TCP_SECTYPE_INVALID;
}
static ssize_t nvmet_addr_tsas_store(struct config_item *item,
@@ -418,20 +440,19 @@ static ssize_t nvmet_addr_tsas_store(struct config_item *item,
{
struct nvmet_port *port = to_nvmet_port(item);
u8 treq = nvmet_port_disc_addr_treq_mask(port);
- u8 sectype;
- int i;
+ u8 sectype, qptype;
if (nvmet_is_port_enabled(port, __func__))
return -EACCES;
- if (port->disc_addr.trtype != NVMF_TRTYPE_TCP)
- return -EINVAL;
-
- for (i = 0; i < ARRAY_SIZE(nvmet_addr_tsas_tcp); i++) {
- if (sysfs_streq(page, nvmet_addr_tsas_tcp[i].name)) {
- sectype = nvmet_addr_tsas_tcp[i].type;
+ if (port->disc_addr.trtype == NVMF_TRTYPE_RDMA) {
+ qptype = nvmet_addr_tsas_rdma_store(page);
+ if (qptype == port->disc_addr.tsas.rdma.qptype)
+ return count;
+ } else if (port->disc_addr.trtype == NVMF_TRTYPE_TCP) {
+ sectype = nvmet_addr_tsas_tcp_store(page);
+ if (sectype != NVMF_TCP_SECTYPE_INVALID)
goto found;
- }
}
pr_err("Invalid value '%s' for tsas\n", page);
@@ -676,10 +697,18 @@ static ssize_t nvmet_ns_enable_store(struct config_item *item,
if (kstrtobool(page, &enable))
return -EINVAL;
+ /*
+ * take a global nvmet_config_sem because the disable routine has a
+ * window where it releases the subsys-lock, giving a chance to
+ * a parallel enable to concurrently execute causing the disable to
+ * have a misaccounting of the ns percpu_ref.
+ */
+ down_write(&nvmet_config_sem);
if (enable)
ret = nvmet_ns_enable(ns);
else
nvmet_ns_disable(ns);
+ up_write(&nvmet_config_sem);
return ret ? ret : count;
}
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 2fde22323622..ed2424f8a396 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -16,6 +16,7 @@
#include "trace.h"
#include "nvmet.h"
+#include "debugfs.h"
struct kmem_cache *nvmet_bvec_cache;
struct workqueue_struct *buffered_io_wq;
@@ -55,18 +56,18 @@ inline u16 errno_to_nvme_status(struct nvmet_req *req, int errno)
return NVME_SC_SUCCESS;
case -ENOSPC:
req->error_loc = offsetof(struct nvme_rw_command, length);
- return NVME_SC_CAP_EXCEEDED | NVME_SC_DNR;
+ return NVME_SC_CAP_EXCEEDED | NVME_STATUS_DNR;
case -EREMOTEIO:
req->error_loc = offsetof(struct nvme_rw_command, slba);
- return NVME_SC_LBA_RANGE | NVME_SC_DNR;
+ return NVME_SC_LBA_RANGE | NVME_STATUS_DNR;
case -EOPNOTSUPP:
req->error_loc = offsetof(struct nvme_common_command, opcode);
switch (req->cmd->common.opcode) {
case nvme_cmd_dsm:
case nvme_cmd_write_zeroes:
- return NVME_SC_ONCS_NOT_SUPPORTED | NVME_SC_DNR;
+ return NVME_SC_ONCS_NOT_SUPPORTED | NVME_STATUS_DNR;
default:
- return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR;
}
break;
case -ENODATA:
@@ -76,7 +77,7 @@ inline u16 errno_to_nvme_status(struct nvmet_req *req, int errno)
fallthrough;
default:
req->error_loc = offsetof(struct nvme_common_command, opcode);
- return NVME_SC_INTERNAL | NVME_SC_DNR;
+ return NVME_SC_INTERNAL | NVME_STATUS_DNR;
}
}
@@ -86,7 +87,7 @@ u16 nvmet_report_invalid_opcode(struct nvmet_req *req)
req->sq->qid);
req->error_loc = offsetof(struct nvme_common_command, opcode);
- return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR;
}
static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
@@ -97,7 +98,7 @@ u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf,
{
if (sg_pcopy_from_buffer(req->sg, req->sg_cnt, buf, len, off) != len) {
req->error_loc = offsetof(struct nvme_common_command, dptr);
- return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
+ return NVME_SC_SGL_INVALID_DATA | NVME_STATUS_DNR;
}
return 0;
}
@@ -106,7 +107,7 @@ u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf, size_t len)
{
if (sg_pcopy_to_buffer(req->sg, req->sg_cnt, buf, len, off) != len) {
req->error_loc = offsetof(struct nvme_common_command, dptr);
- return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
+ return NVME_SC_SGL_INVALID_DATA | NVME_STATUS_DNR;
}
return 0;
}
@@ -115,7 +116,7 @@ u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len)
{
if (sg_zero_buffer(req->sg, req->sg_cnt, len, off) != len) {
req->error_loc = offsetof(struct nvme_common_command, dptr);
- return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
+ return NVME_SC_SGL_INVALID_DATA | NVME_STATUS_DNR;
}
return 0;
}
@@ -145,7 +146,7 @@ static void nvmet_async_events_failall(struct nvmet_ctrl *ctrl)
while (ctrl->nr_async_event_cmds) {
req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
mutex_unlock(&ctrl->lock);
- nvmet_req_complete(req, NVME_SC_INTERNAL | NVME_SC_DNR);
+ nvmet_req_complete(req, NVME_SC_INTERNAL | NVME_STATUS_DNR);
mutex_lock(&ctrl->lock);
}
mutex_unlock(&ctrl->lock);
@@ -444,7 +445,7 @@ u16 nvmet_req_find_ns(struct nvmet_req *req)
req->error_loc = offsetof(struct nvme_common_command, nsid);
if (nvmet_subsys_nsid_exists(subsys, nsid))
return NVME_SC_INTERNAL_PATH_ERROR;
- return NVME_SC_INVALID_NS | NVME_SC_DNR;
+ return NVME_SC_INVALID_NS | NVME_STATUS_DNR;
}
percpu_ref_get(&req->ns->ref);
@@ -818,6 +819,15 @@ void nvmet_sq_destroy(struct nvmet_sq *sq)
percpu_ref_exit(&sq->ref);
nvmet_auth_sq_free(sq);
+ /*
+ * we must reference the ctrl again after waiting for inflight IO
+ * to complete. Because admin connect may have sneaked in after we
+ * store sq->ctrl locally, but before we killed the percpu_ref. the
+ * admin connect allocates and assigns sq->ctrl, which now needs a
+ * final ref put, as this ctrl is going away.
+ */
+ ctrl = sq->ctrl;
+
if (ctrl) {
/*
* The teardown flow may take some time, and the host may not
@@ -895,7 +905,7 @@ static u16 nvmet_parse_io_cmd(struct nvmet_req *req)
return nvmet_parse_fabrics_io_cmd(req);
if (unlikely(!nvmet_check_auth_status(req)))
- return NVME_SC_AUTH_REQUIRED | NVME_SC_DNR;
+ return NVME_SC_AUTH_REQUIRED | NVME_STATUS_DNR;
ret = nvmet_check_ctrl_status(req);
if (unlikely(ret))
@@ -948,6 +958,7 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
req->metadata_sg_cnt = 0;
req->transfer_len = 0;
req->metadata_len = 0;
+ req->cqe->result.u64 = 0;
req->cqe->status = 0;
req->cqe->sq_head = 0;
req->ns = NULL;
@@ -957,7 +968,7 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
/* no support for fused commands yet */
if (unlikely(flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND))) {
req->error_loc = offsetof(struct nvme_common_command, flags);
- status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
goto fail;
}
@@ -968,7 +979,7 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
*/
if (unlikely((flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METABUF)) {
req->error_loc = offsetof(struct nvme_common_command, flags);
- status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
goto fail;
}
@@ -986,7 +997,7 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
trace_nvmet_req_init(req, req->cmd);
if (unlikely(!percpu_ref_tryget_live(&sq->ref))) {
- status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
goto fail;
}
@@ -1013,7 +1024,7 @@ bool nvmet_check_transfer_len(struct nvmet_req *req, size_t len)
{
if (unlikely(len != req->transfer_len)) {
req->error_loc = offsetof(struct nvme_common_command, dptr);
- nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR);
+ nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_STATUS_DNR);
return false;
}
@@ -1025,7 +1036,7 @@ bool nvmet_check_data_len_lte(struct nvmet_req *req, size_t data_len)
{
if (unlikely(data_len > req->transfer_len)) {
req->error_loc = offsetof(struct nvme_common_command, dptr);
- nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR);
+ nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_STATUS_DNR);
return false;
}
@@ -1294,18 +1305,18 @@ u16 nvmet_check_ctrl_status(struct nvmet_req *req)
if (unlikely(!(req->sq->ctrl->cc & NVME_CC_ENABLE))) {
pr_err("got cmd %d while CC.EN == 0 on qid = %d\n",
req->cmd->common.opcode, req->sq->qid);
- return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
+ return NVME_SC_CMD_SEQ_ERROR | NVME_STATUS_DNR;
}
if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) {
pr_err("got cmd %d while CSTS.RDY == 0 on qid = %d\n",
req->cmd->common.opcode, req->sq->qid);
- return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
+ return NVME_SC_CMD_SEQ_ERROR | NVME_STATUS_DNR;
}
if (unlikely(!nvmet_check_auth_status(req))) {
pr_warn("qid %d not authenticated\n", req->sq->qid);
- return NVME_SC_AUTH_REQUIRED | NVME_SC_DNR;
+ return NVME_SC_AUTH_REQUIRED | NVME_STATUS_DNR;
}
return 0;
}
@@ -1379,7 +1390,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
int ret;
u16 status;
- status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
+ status = NVME_SC_CONNECT_INVALID_PARAM | NVME_STATUS_DNR;
subsys = nvmet_find_get_subsys(req->port, subsysnqn);
if (!subsys) {
pr_warn("connect request for invalid subsystem %s!\n",
@@ -1395,7 +1406,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
hostnqn, subsysnqn);
req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(hostnqn);
up_read(&nvmet_config_sem);
- status = NVME_SC_CONNECT_INVALID_HOST | NVME_SC_DNR;
+ status = NVME_SC_CONNECT_INVALID_HOST | NVME_STATUS_DNR;
req->error_loc = offsetof(struct nvme_common_command, dptr);
goto out_put_subsystem;
}
@@ -1446,7 +1457,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
subsys->cntlid_min, subsys->cntlid_max,
GFP_KERNEL);
if (ret < 0) {
- status = NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR;
+ status = NVME_SC_CONNECT_CTRL_BUSY | NVME_STATUS_DNR;
goto out_free_sqs;
}
ctrl->cntlid = ret;
@@ -1469,6 +1480,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
mutex_lock(&subsys->lock);
list_add_tail(&ctrl->subsys_entry, &subsys->ctrls);
nvmet_setup_p2p_ns_map(ctrl, req);
+ nvmet_debugfs_ctrl_setup(ctrl);
mutex_unlock(&subsys->lock);
*ctrlp = ctrl;
@@ -1503,6 +1515,8 @@ static void nvmet_ctrl_free(struct kref *ref)
nvmet_destroy_auth(ctrl);
+ nvmet_debugfs_ctrl_free(ctrl);
+
ida_free(&cntlid_ida, ctrl->cntlid);
nvmet_async_events_free(ctrl);
@@ -1529,6 +1543,14 @@ void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl)
}
EXPORT_SYMBOL_GPL(nvmet_ctrl_fatal_error);
+ssize_t nvmet_ctrl_host_traddr(struct nvmet_ctrl *ctrl,
+ char *traddr, size_t traddr_len)
+{
+ if (!ctrl->ops->host_traddr)
+ return -EOPNOTSUPP;
+ return ctrl->ops->host_traddr(ctrl, traddr, traddr_len);
+}
+
static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
const char *subsysnqn)
{
@@ -1623,8 +1645,14 @@ struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
INIT_LIST_HEAD(&subsys->ctrls);
INIT_LIST_HEAD(&subsys->hosts);
+ ret = nvmet_debugfs_subsys_setup(subsys);
+ if (ret)
+ goto free_subsysnqn;
+
return subsys;
+free_subsysnqn:
+ kfree(subsys->subsysnqn);
free_fr:
kfree(subsys->firmware_rev);
free_mn:
@@ -1641,6 +1669,8 @@ static void nvmet_subsys_free(struct kref *ref)
WARN_ON_ONCE(!xa_empty(&subsys->namespaces));
+ nvmet_debugfs_subsys_free(subsys);
+
xa_destroy(&subsys->namespaces);
nvmet_passthru_subsys_free(subsys);
@@ -1695,11 +1725,18 @@ static int __init nvmet_init(void)
if (error)
goto out_free_nvmet_work_queue;
- error = nvmet_init_configfs();
+ error = nvmet_init_debugfs();
if (error)
goto out_exit_discovery;
+
+ error = nvmet_init_configfs();
+ if (error)
+ goto out_exit_debugfs;
+
return 0;
+out_exit_debugfs:
+ nvmet_exit_debugfs();
out_exit_discovery:
nvmet_exit_discovery();
out_free_nvmet_work_queue:
@@ -1716,6 +1753,7 @@ out_destroy_bvec_cache:
static void __exit nvmet_exit(void)
{
nvmet_exit_configfs();
+ nvmet_exit_debugfs();
nvmet_exit_discovery();
ida_destroy(&cntlid_ida);
destroy_workqueue(nvmet_wq);
diff --git a/drivers/nvme/target/debugfs.c b/drivers/nvme/target/debugfs.c
new file mode 100644
index 000000000000..cb2befc8619e
--- /dev/null
+++ b/drivers/nvme/target/debugfs.c
@@ -0,0 +1,202 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * DebugFS interface for the NVMe target.
+ * Copyright (c) 2022-2024 Shadow
+ * Copyright (c) 2024 SUSE LLC
+ */
+
+#include <linux/debugfs.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+
+#include "nvmet.h"
+#include "debugfs.h"
+
+struct dentry *nvmet_debugfs;
+
+#define NVMET_DEBUGFS_ATTR(field) \
+ static int field##_open(struct inode *inode, struct file *file) \
+ { return single_open(file, field##_show, inode->i_private); } \
+ \
+ static const struct file_operations field##_fops = { \
+ .open = field##_open, \
+ .read = seq_read, \
+ .release = single_release, \
+ }
+
+#define NVMET_DEBUGFS_RW_ATTR(field) \
+ static int field##_open(struct inode *inode, struct file *file) \
+ { return single_open(file, field##_show, inode->i_private); } \
+ \
+ static const struct file_operations field##_fops = { \
+ .open = field##_open, \
+ .read = seq_read, \
+ .write = field##_write, \
+ .release = single_release, \
+ }
+
+static int nvmet_ctrl_hostnqn_show(struct seq_file *m, void *p)
+{
+ struct nvmet_ctrl *ctrl = m->private;
+
+ seq_puts(m, ctrl->hostnqn);
+ return 0;
+}
+NVMET_DEBUGFS_ATTR(nvmet_ctrl_hostnqn);
+
+static int nvmet_ctrl_kato_show(struct seq_file *m, void *p)
+{
+ struct nvmet_ctrl *ctrl = m->private;
+
+ seq_printf(m, "%d\n", ctrl->kato);
+ return 0;
+}
+NVMET_DEBUGFS_ATTR(nvmet_ctrl_kato);
+
+static int nvmet_ctrl_port_show(struct seq_file *m, void *p)
+{
+ struct nvmet_ctrl *ctrl = m->private;
+
+ seq_printf(m, "%d\n", le16_to_cpu(ctrl->port->disc_addr.portid));
+ return 0;
+}
+NVMET_DEBUGFS_ATTR(nvmet_ctrl_port);
+
+static const char *const csts_state_names[] = {
+ [NVME_CSTS_RDY] = "ready",
+ [NVME_CSTS_CFS] = "fatal",
+ [NVME_CSTS_NSSRO] = "reset",
+ [NVME_CSTS_SHST_OCCUR] = "shutdown",
+ [NVME_CSTS_SHST_CMPLT] = "completed",
+ [NVME_CSTS_PP] = "paused",
+};
+
+static int nvmet_ctrl_state_show(struct seq_file *m, void *p)
+{
+ struct nvmet_ctrl *ctrl = m->private;
+ bool sep = false;
+ int i;
+
+ for (i = 0; i < 7; i++) {
+ int state = BIT(i);
+
+ if (!(ctrl->csts & state))
+ continue;
+ if (sep)
+ seq_puts(m, "|");
+ sep = true;
+ if (csts_state_names[state])
+ seq_puts(m, csts_state_names[state]);
+ else
+ seq_printf(m, "%d", state);
+ }
+ if (sep)
+ seq_printf(m, "\n");
+ return 0;
+}
+
+static ssize_t nvmet_ctrl_state_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *m = file->private_data;
+ struct nvmet_ctrl *ctrl = m->private;
+ char reset[16];
+
+ if (count >= sizeof(reset))
+ return -EINVAL;
+ if (copy_from_user(reset, buf, count))
+ return -EFAULT;
+ if (!memcmp(reset, "fatal", 5))
+ nvmet_ctrl_fatal_error(ctrl);
+ else
+ return -EINVAL;
+ return count;
+}
+NVMET_DEBUGFS_RW_ATTR(nvmet_ctrl_state);
+
+static int nvmet_ctrl_host_traddr_show(struct seq_file *m, void *p)
+{
+ struct nvmet_ctrl *ctrl = m->private;
+ ssize_t size;
+ char buf[NVMF_TRADDR_SIZE + 1];
+
+ size = nvmet_ctrl_host_traddr(ctrl, buf, NVMF_TRADDR_SIZE);
+ if (size < 0) {
+ buf[0] = '\0';
+ size = 0;
+ }
+ buf[size] = '\0';
+ seq_printf(m, "%s\n", buf);
+ return 0;
+}
+NVMET_DEBUGFS_ATTR(nvmet_ctrl_host_traddr);
+
+int nvmet_debugfs_ctrl_setup(struct nvmet_ctrl *ctrl)
+{
+ char name[32];
+ struct dentry *parent = ctrl->subsys->debugfs_dir;
+ int ret;
+
+ if (!parent)
+ return -ENODEV;
+ snprintf(name, sizeof(name), "ctrl%d", ctrl->cntlid);
+ ctrl->debugfs_dir = debugfs_create_dir(name, parent);
+ if (IS_ERR(ctrl->debugfs_dir)) {
+ ret = PTR_ERR(ctrl->debugfs_dir);
+ ctrl->debugfs_dir = NULL;
+ return ret;
+ }
+ debugfs_create_file("port", S_IRUSR, ctrl->debugfs_dir, ctrl,
+ &nvmet_ctrl_port_fops);
+ debugfs_create_file("hostnqn", S_IRUSR, ctrl->debugfs_dir, ctrl,
+ &nvmet_ctrl_hostnqn_fops);
+ debugfs_create_file("kato", S_IRUSR, ctrl->debugfs_dir, ctrl,
+ &nvmet_ctrl_kato_fops);
+ debugfs_create_file("state", S_IRUSR | S_IWUSR, ctrl->debugfs_dir, ctrl,
+ &nvmet_ctrl_state_fops);
+ debugfs_create_file("host_traddr", S_IRUSR, ctrl->debugfs_dir, ctrl,
+ &nvmet_ctrl_host_traddr_fops);
+ return 0;
+}
+
+void nvmet_debugfs_ctrl_free(struct nvmet_ctrl *ctrl)
+{
+ debugfs_remove_recursive(ctrl->debugfs_dir);
+}
+
+int nvmet_debugfs_subsys_setup(struct nvmet_subsys *subsys)
+{
+ int ret = 0;
+
+ subsys->debugfs_dir = debugfs_create_dir(subsys->subsysnqn,
+ nvmet_debugfs);
+ if (IS_ERR(subsys->debugfs_dir)) {
+ ret = PTR_ERR(subsys->debugfs_dir);
+ subsys->debugfs_dir = NULL;
+ }
+ return ret;
+}
+
+void nvmet_debugfs_subsys_free(struct nvmet_subsys *subsys)
+{
+ debugfs_remove_recursive(subsys->debugfs_dir);
+}
+
+int __init nvmet_init_debugfs(void)
+{
+ struct dentry *parent;
+
+ parent = debugfs_create_dir("nvmet", NULL);
+ if (IS_ERR(parent)) {
+ pr_warn("%s: failed to create debugfs directory\n", "nvmet");
+ return PTR_ERR(parent);
+ }
+ nvmet_debugfs = parent;
+ return 0;
+}
+
+void nvmet_exit_debugfs(void)
+{
+ debugfs_remove_recursive(nvmet_debugfs);
+}
diff --git a/drivers/nvme/target/debugfs.h b/drivers/nvme/target/debugfs.h
new file mode 100644
index 000000000000..cfb8bbf6a297
--- /dev/null
+++ b/drivers/nvme/target/debugfs.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * DebugFS interface for the NVMe target.
+ * Copyright (c) 2022-2024 Shadow
+ * Copyright (c) 2024 SUSE LLC
+ */
+#ifndef NVMET_DEBUGFS_H
+#define NVMET_DEBUGFS_H
+
+#include <linux/types.h>
+
+#ifdef CONFIG_NVME_TARGET_DEBUGFS
+int nvmet_debugfs_subsys_setup(struct nvmet_subsys *subsys);
+void nvmet_debugfs_subsys_free(struct nvmet_subsys *subsys);
+int nvmet_debugfs_ctrl_setup(struct nvmet_ctrl *ctrl);
+void nvmet_debugfs_ctrl_free(struct nvmet_ctrl *ctrl);
+
+int __init nvmet_init_debugfs(void);
+void nvmet_exit_debugfs(void);
+#else
+static inline int nvmet_debugfs_subsys_setup(struct nvmet_subsys *subsys)
+{
+ return 0;
+}
+static inline void nvmet_debugfs_subsys_free(struct nvmet_subsys *subsys){}
+
+static inline int nvmet_debugfs_ctrl_setup(struct nvmet_ctrl *ctrl)
+{
+ return 0;
+}
+static inline void nvmet_debugfs_ctrl_free(struct nvmet_ctrl *ctrl) {}
+
+static inline int __init nvmet_init_debugfs(void)
+{
+ return 0;
+}
+
+static inline void nvmet_exit_debugfs(void) {}
+
+#endif
+
+#endif /* NVMET_DEBUGFS_H */
diff --git a/drivers/nvme/target/discovery.c b/drivers/nvme/target/discovery.c
index ce54da8c6b36..28843df5fa7c 100644
--- a/drivers/nvme/target/discovery.c
+++ b/drivers/nvme/target/discovery.c
@@ -179,7 +179,7 @@ static void nvmet_execute_disc_get_log_page(struct nvmet_req *req)
if (req->cmd->get_log_page.lid != NVME_LOG_DISC) {
req->error_loc =
offsetof(struct nvme_get_log_page_command, lid);
- status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
goto out;
}
@@ -187,7 +187,7 @@ static void nvmet_execute_disc_get_log_page(struct nvmet_req *req)
if (offset & 0x3) {
req->error_loc =
offsetof(struct nvme_get_log_page_command, lpo);
- status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
goto out;
}
@@ -256,7 +256,7 @@ static void nvmet_execute_disc_identify(struct nvmet_req *req)
if (req->cmd->identify.cns != NVME_ID_CNS_CTRL) {
req->error_loc = offsetof(struct nvme_identify, cns);
- status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
goto out;
}
@@ -320,7 +320,7 @@ static void nvmet_execute_disc_set_features(struct nvmet_req *req)
default:
req->error_loc =
offsetof(struct nvme_common_command, cdw10);
- stat = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ stat = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
break;
}
@@ -345,7 +345,7 @@ static void nvmet_execute_disc_get_features(struct nvmet_req *req)
default:
req->error_loc =
offsetof(struct nvme_common_command, cdw10);
- stat = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ stat = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
break;
}
@@ -361,7 +361,7 @@ u16 nvmet_parse_discovery_cmd(struct nvmet_req *req)
cmd->common.opcode);
req->error_loc =
offsetof(struct nvme_common_command, opcode);
- return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR;
}
switch (cmd->common.opcode) {
@@ -386,7 +386,7 @@ u16 nvmet_parse_discovery_cmd(struct nvmet_req *req)
default:
pr_debug("unhandled cmd %d\n", cmd->common.opcode);
req->error_loc = offsetof(struct nvme_common_command, opcode);
- return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR;
}
}
diff --git a/drivers/nvme/target/fabrics-cmd-auth.c b/drivers/nvme/target/fabrics-cmd-auth.c
index d61b8c6ff3b2..3f2857c17d95 100644
--- a/drivers/nvme/target/fabrics-cmd-auth.c
+++ b/drivers/nvme/target/fabrics-cmd-auth.c
@@ -189,26 +189,26 @@ void nvmet_execute_auth_send(struct nvmet_req *req)
u8 dhchap_status;
if (req->cmd->auth_send.secp != NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER) {
- status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
req->error_loc =
offsetof(struct nvmf_auth_send_command, secp);
goto done;
}
if (req->cmd->auth_send.spsp0 != 0x01) {
- status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
req->error_loc =
offsetof(struct nvmf_auth_send_command, spsp0);
goto done;
}
if (req->cmd->auth_send.spsp1 != 0x01) {
- status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
req->error_loc =
offsetof(struct nvmf_auth_send_command, spsp1);
goto done;
}
tl = le32_to_cpu(req->cmd->auth_send.tl);
if (!tl) {
- status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
req->error_loc =
offsetof(struct nvmf_auth_send_command, tl);
goto done;
@@ -333,7 +333,6 @@ done:
pr_debug("%s: ctrl %d qid %d nvme status %x error loc %d\n",
__func__, ctrl->cntlid, req->sq->qid,
status, req->error_loc);
- req->cqe->result.u64 = 0;
if (req->sq->dhchap_step != NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2 &&
req->sq->dhchap_step != NVME_AUTH_DHCHAP_MESSAGE_FAILURE2) {
unsigned long auth_expire_secs = ctrl->kato ? ctrl->kato : 120;
@@ -438,26 +437,26 @@ void nvmet_execute_auth_receive(struct nvmet_req *req)
u16 status = 0;
if (req->cmd->auth_receive.secp != NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER) {
- status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
req->error_loc =
offsetof(struct nvmf_auth_receive_command, secp);
goto done;
}
if (req->cmd->auth_receive.spsp0 != 0x01) {
- status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
req->error_loc =
offsetof(struct nvmf_auth_receive_command, spsp0);
goto done;
}
if (req->cmd->auth_receive.spsp1 != 0x01) {
- status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
req->error_loc =
offsetof(struct nvmf_auth_receive_command, spsp1);
goto done;
}
al = le32_to_cpu(req->cmd->auth_receive.al);
if (!al) {
- status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
req->error_loc =
offsetof(struct nvmf_auth_receive_command, al);
goto done;
@@ -516,8 +515,6 @@ void nvmet_execute_auth_receive(struct nvmet_req *req)
status = nvmet_copy_to_sgl(req, 0, d, al);
kfree(d);
done:
- req->cqe->result.u64 = 0;
-
if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2)
nvmet_auth_sq_free(req->sq);
else if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_FAILURE1) {
diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c
index 042b379cbb36..c4b2eddd5666 100644
--- a/drivers/nvme/target/fabrics-cmd.c
+++ b/drivers/nvme/target/fabrics-cmd.c
@@ -18,7 +18,7 @@ static void nvmet_execute_prop_set(struct nvmet_req *req)
if (req->cmd->prop_set.attrib & 1) {
req->error_loc =
offsetof(struct nvmf_property_set_command, attrib);
- status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
goto out;
}
@@ -29,7 +29,7 @@ static void nvmet_execute_prop_set(struct nvmet_req *req)
default:
req->error_loc =
offsetof(struct nvmf_property_set_command, offset);
- status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
}
out:
nvmet_req_complete(req, status);
@@ -50,7 +50,7 @@ static void nvmet_execute_prop_get(struct nvmet_req *req)
val = ctrl->cap;
break;
default:
- status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
break;
}
} else {
@@ -65,7 +65,7 @@ static void nvmet_execute_prop_get(struct nvmet_req *req)
val = ctrl->csts;
break;
default:
- status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
break;
}
}
@@ -105,7 +105,7 @@ u16 nvmet_parse_fabrics_admin_cmd(struct nvmet_req *req)
pr_debug("received unknown capsule type 0x%x\n",
cmd->fabrics.fctype);
req->error_loc = offsetof(struct nvmf_common_command, fctype);
- return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR;
}
return 0;
@@ -128,7 +128,7 @@ u16 nvmet_parse_fabrics_io_cmd(struct nvmet_req *req)
pr_debug("received unknown capsule type 0x%x\n",
cmd->fabrics.fctype);
req->error_loc = offsetof(struct nvmf_common_command, fctype);
- return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR;
}
return 0;
@@ -147,14 +147,14 @@ static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req)
pr_warn("queue size zero!\n");
req->error_loc = offsetof(struct nvmf_connect_command, sqsize);
req->cqe->result.u32 = IPO_IATTR_CONNECT_SQE(sqsize);
- ret = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
+ ret = NVME_SC_CONNECT_INVALID_PARAM | NVME_STATUS_DNR;
goto err;
}
if (ctrl->sqs[qid] != NULL) {
pr_warn("qid %u has already been created\n", qid);
req->error_loc = offsetof(struct nvmf_connect_command, qid);
- return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
+ return NVME_SC_CMD_SEQ_ERROR | NVME_STATUS_DNR;
}
/* for fabrics, this value applies to only the I/O Submission Queues */
@@ -163,14 +163,14 @@ static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req)
sqsize, mqes, ctrl->cntlid);
req->error_loc = offsetof(struct nvmf_connect_command, sqsize);
req->cqe->result.u32 = IPO_IATTR_CONNECT_SQE(sqsize);
- return NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
+ return NVME_SC_CONNECT_INVALID_PARAM | NVME_STATUS_DNR;
}
old = cmpxchg(&req->sq->ctrl, NULL, ctrl);
if (old) {
pr_warn("queue already connected!\n");
req->error_loc = offsetof(struct nvmf_connect_command, opcode);
- return NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR;
+ return NVME_SC_CONNECT_CTRL_BUSY | NVME_STATUS_DNR;
}
/* note: convert queue size from 0's-based value to 1's-based value */
@@ -226,21 +226,18 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
if (status)
goto out;
- /* zero out initial completion result, assign values as needed */
- req->cqe->result.u32 = 0;
-
if (c->recfmt != 0) {
pr_warn("invalid connect version (%d).\n",
le16_to_cpu(c->recfmt));
req->error_loc = offsetof(struct nvmf_connect_command, recfmt);
- status = NVME_SC_CONNECT_FORMAT | NVME_SC_DNR;
+ status = NVME_SC_CONNECT_FORMAT | NVME_STATUS_DNR;
goto out;
}
if (unlikely(d->cntlid != cpu_to_le16(0xffff))) {
pr_warn("connect attempt for invalid controller ID %#x\n",
d->cntlid);
- status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
+ status = NVME_SC_CONNECT_INVALID_PARAM | NVME_STATUS_DNR;
req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid);
goto out;
}
@@ -260,7 +257,7 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
dhchap_status);
nvmet_ctrl_put(ctrl);
if (dhchap_status == NVME_AUTH_DHCHAP_FAILURE_FAILED)
- status = (NVME_SC_CONNECT_INVALID_HOST | NVME_SC_DNR);
+ status = (NVME_SC_CONNECT_INVALID_HOST | NVME_STATUS_DNR);
else
status = NVME_SC_INTERNAL;
goto out;
@@ -305,13 +302,10 @@ static void nvmet_execute_io_connect(struct nvmet_req *req)
if (status)
goto out;
- /* zero out initial completion result, assign values as needed */
- req->cqe->result.u32 = 0;
-
if (c->recfmt != 0) {
pr_warn("invalid connect version (%d).\n",
le16_to_cpu(c->recfmt));
- status = NVME_SC_CONNECT_FORMAT | NVME_SC_DNR;
+ status = NVME_SC_CONNECT_FORMAT | NVME_STATUS_DNR;
goto out;
}
@@ -320,13 +314,13 @@ static void nvmet_execute_io_connect(struct nvmet_req *req)
ctrl = nvmet_ctrl_find_get(d->subsysnqn, d->hostnqn,
le16_to_cpu(d->cntlid), req);
if (!ctrl) {
- status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
+ status = NVME_SC_CONNECT_INVALID_PARAM | NVME_STATUS_DNR;
goto out;
}
if (unlikely(qid > ctrl->subsys->max_qid)) {
pr_warn("invalid queue id (%d)\n", qid);
- status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
+ status = NVME_SC_CONNECT_INVALID_PARAM | NVME_STATUS_DNR;
req->cqe->result.u32 = IPO_IATTR_CONNECT_SQE(qid);
goto out_ctrl_put;
}
@@ -356,13 +350,13 @@ u16 nvmet_parse_connect_cmd(struct nvmet_req *req)
pr_debug("invalid command 0x%x on unconnected queue.\n",
cmd->fabrics.opcode);
req->error_loc = offsetof(struct nvme_common_command, opcode);
- return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR;
}
if (cmd->fabrics.fctype != nvme_fabrics_type_connect) {
pr_debug("invalid capsule type 0x%x on unconnected queue.\n",
cmd->fabrics.fctype);
req->error_loc = offsetof(struct nvmf_common_command, fctype);
- return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR;
}
if (cmd->connect.qid == 0)
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 337ee1cb09ae..3ef4beacde32 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -148,7 +148,7 @@ struct nvmet_fc_tgt_queue {
struct workqueue_struct *work_q;
struct kref ref;
/* array of fcp_iods */
- struct nvmet_fc_fcp_iod fod[] __counted_by(sqsize);
+ struct nvmet_fc_fcp_iod fod[] /* __counted_by(sqsize) */;
} __aligned(sizeof(unsigned long long));
struct nvmet_fc_hostport {
@@ -2934,6 +2934,38 @@ nvmet_fc_discovery_chg(struct nvmet_port *port)
tgtport->ops->discovery_event(&tgtport->fc_target_port);
}
+static ssize_t
+nvmet_fc_host_traddr(struct nvmet_ctrl *ctrl,
+ char *traddr, size_t traddr_size)
+{
+ struct nvmet_sq *sq = ctrl->sqs[0];
+ struct nvmet_fc_tgt_queue *queue =
+ container_of(sq, struct nvmet_fc_tgt_queue, nvme_sq);
+ struct nvmet_fc_tgtport *tgtport = queue->assoc ? queue->assoc->tgtport : NULL;
+ struct nvmet_fc_hostport *hostport = queue->assoc ? queue->assoc->hostport : NULL;
+ u64 wwnn, wwpn;
+ ssize_t ret = 0;
+
+ if (!tgtport || !nvmet_fc_tgtport_get(tgtport))
+ return -ENODEV;
+ if (!hostport || !nvmet_fc_hostport_get(hostport)) {
+ ret = -ENODEV;
+ goto out_put;
+ }
+
+ if (tgtport->ops->host_traddr) {
+ ret = tgtport->ops->host_traddr(hostport->hosthandle, &wwnn, &wwpn);
+ if (ret)
+ goto out_put_host;
+ ret = snprintf(traddr, traddr_size, "nn-0x%llx:pn-0x%llx", wwnn, wwpn);
+ }
+out_put_host:
+ nvmet_fc_hostport_put(hostport);
+out_put:
+ nvmet_fc_tgtport_put(tgtport);
+ return ret;
+}
+
static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = {
.owner = THIS_MODULE,
.type = NVMF_TRTYPE_FC,
@@ -2943,6 +2975,7 @@ static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = {
.queue_response = nvmet_fc_fcp_nvme_cmd_done,
.delete_ctrl = nvmet_fc_delete_ctrl,
.discovery_chg = nvmet_fc_discovery_chg,
+ .host_traddr = nvmet_fc_host_traddr,
};
static int __init nvmet_fc_init_module(void)
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
index 913cd2ec7a6f..e1abb27927ff 100644
--- a/drivers/nvme/target/fcloop.c
+++ b/drivers/nvme/target/fcloop.c
@@ -492,6 +492,16 @@ fcloop_t2h_host_release(void *hosthandle)
/* host handle ignored for now */
}
+static int
+fcloop_t2h_host_traddr(void *hosthandle, u64 *wwnn, u64 *wwpn)
+{
+ struct fcloop_rport *rport = hosthandle;
+
+ *wwnn = rport->lport->localport->node_name;
+ *wwpn = rport->lport->localport->port_name;
+ return 0;
+}
+
/*
* Simulate reception of RSCN and converting it to a initiator transport
* call to rescan a remote port.
@@ -1074,6 +1084,7 @@ static struct nvmet_fc_target_template tgttemplate = {
.ls_req = fcloop_t2h_ls_req,
.ls_abort = fcloop_t2h_ls_abort,
.host_release = fcloop_t2h_host_release,
+ .host_traddr = fcloop_t2h_host_traddr,
.max_hw_queues = FCLOOP_HW_QUEUES,
.max_sgl_segments = FCLOOP_SGL_SEGS,
.max_dif_sgl_segments = FCLOOP_SGL_SEGS,
diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
index 6426aac2634a..0bda83d0fc3e 100644
--- a/drivers/nvme/target/io-cmd-bdev.c
+++ b/drivers/nvme/target/io-cmd-bdev.c
@@ -61,15 +61,17 @@ static void nvmet_bdev_ns_enable_integrity(struct nvmet_ns *ns)
{
struct blk_integrity *bi = bdev_get_integrity(ns->bdev);
- if (bi) {
+ if (!bi)
+ return;
+
+ if (bi->csum_type == BLK_INTEGRITY_CSUM_CRC) {
ns->metadata_size = bi->tuple_size;
- if (bi->profile == &t10_pi_type1_crc)
+ if (bi->flags & BLK_INTEGRITY_REF_TAG)
ns->pi_type = NVME_NS_DPS_PI_TYPE1;
- else if (bi->profile == &t10_pi_type3_crc)
- ns->pi_type = NVME_NS_DPS_PI_TYPE3;
else
- /* Unsupported metadata type */
- ns->metadata_size = 0;
+ ns->pi_type = NVME_NS_DPS_PI_TYPE3;
+ } else {
+ ns->metadata_size = 0;
}
}
@@ -102,7 +104,7 @@ int nvmet_bdev_ns_enable(struct nvmet_ns *ns)
ns->pi_type = 0;
ns->metadata_size = 0;
- if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY_T10))
+ if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY))
nvmet_bdev_ns_enable_integrity(ns);
if (bdev_is_zoned(ns->bdev)) {
@@ -135,11 +137,11 @@ u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts)
*/
switch (blk_sts) {
case BLK_STS_NOSPC:
- status = NVME_SC_CAP_EXCEEDED | NVME_SC_DNR;
+ status = NVME_SC_CAP_EXCEEDED | NVME_STATUS_DNR;
req->error_loc = offsetof(struct nvme_rw_command, length);
break;
case BLK_STS_TARGET:
- status = NVME_SC_LBA_RANGE | NVME_SC_DNR;
+ status = NVME_SC_LBA_RANGE | NVME_STATUS_DNR;
req->error_loc = offsetof(struct nvme_rw_command, slba);
break;
case BLK_STS_NOTSUPP:
@@ -147,10 +149,10 @@ u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts)
switch (req->cmd->common.opcode) {
case nvme_cmd_dsm:
case nvme_cmd_write_zeroes:
- status = NVME_SC_ONCS_NOT_SUPPORTED | NVME_SC_DNR;
+ status = NVME_SC_ONCS_NOT_SUPPORTED | NVME_STATUS_DNR;
break;
default:
- status = NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ status = NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR;
}
break;
case BLK_STS_MEDIUM:
@@ -159,7 +161,7 @@ u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts)
break;
case BLK_STS_IOERR:
default:
- status = NVME_SC_INTERNAL | NVME_SC_DNR;
+ status = NVME_SC_INTERNAL | NVME_STATUS_DNR;
req->error_loc = offsetof(struct nvme_common_command, opcode);
}
@@ -356,7 +358,7 @@ u16 nvmet_bdev_flush(struct nvmet_req *req)
return 0;
if (blkdev_issue_flush(req->ns->bdev))
- return NVME_SC_INTERNAL | NVME_SC_DNR;
+ return NVME_SC_INTERNAL | NVME_STATUS_DNR;
return 0;
}
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index e589915ddef8..e32790d8fc26 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -555,6 +555,10 @@ static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
goto out;
}
+ ret = nvme_add_ctrl(&ctrl->ctrl);
+ if (ret)
+ goto out_put_ctrl;
+
if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING))
WARN_ON_ONCE(1);
@@ -611,6 +615,7 @@ out_free_queues:
kfree(ctrl->queues);
out_uninit_ctrl:
nvme_uninit_ctrl(&ctrl->ctrl);
+out_put_ctrl:
nvme_put_ctrl(&ctrl->ctrl);
out:
if (ret > 0)
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 2f22b07eab29..190f55e6d753 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -230,7 +230,9 @@ struct nvmet_ctrl {
struct device *p2p_client;
struct radix_tree_root p2p_ns_map;
-
+#ifdef CONFIG_NVME_TARGET_DEBUGFS
+ struct dentry *debugfs_dir;
+#endif
spinlock_t error_lock;
u64 err_counter;
struct nvme_error_slot slots[NVMET_ERROR_LOG_SLOTS];
@@ -262,7 +264,9 @@ struct nvmet_subsys {
struct list_head hosts;
bool allow_any_host;
-
+#ifdef CONFIG_NVME_TARGET_DEBUGFS
+ struct dentry *debugfs_dir;
+#endif
u16 max_qid;
u64 ver;
@@ -350,6 +354,8 @@ struct nvmet_fabrics_ops {
void (*delete_ctrl)(struct nvmet_ctrl *ctrl);
void (*disc_traddr)(struct nvmet_req *req,
struct nvmet_port *port, char *traddr);
+ ssize_t (*host_traddr)(struct nvmet_ctrl *ctrl,
+ char *traddr, size_t traddr_len);
u16 (*install_queue)(struct nvmet_sq *nvme_sq);
void (*discovery_chg)(struct nvmet_port *port);
u8 (*get_mdts)(const struct nvmet_ctrl *ctrl);
@@ -498,6 +504,8 @@ struct nvmet_ctrl *nvmet_ctrl_find_get(const char *subsysnqn,
struct nvmet_req *req);
void nvmet_ctrl_put(struct nvmet_ctrl *ctrl);
u16 nvmet_check_ctrl_status(struct nvmet_req *req);
+ssize_t nvmet_ctrl_host_traddr(struct nvmet_ctrl *ctrl,
+ char *traddr, size_t traddr_len);
struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
enum nvme_subsys_type type);
diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
index bb4a69d538fd..24d0e2418d2e 100644
--- a/drivers/nvme/target/passthru.c
+++ b/drivers/nvme/target/passthru.c
@@ -226,13 +226,13 @@ static void nvmet_passthru_execute_cmd_work(struct work_struct *w)
req->cmd->common.opcode == nvme_admin_identify) {
switch (req->cmd->identify.cns) {
case NVME_ID_CNS_CTRL:
- nvmet_passthru_override_id_ctrl(req);
+ status = nvmet_passthru_override_id_ctrl(req);
break;
case NVME_ID_CNS_NS:
- nvmet_passthru_override_id_ns(req);
+ status = nvmet_passthru_override_id_ns(req);
break;
case NVME_ID_CNS_NS_DESC_LIST:
- nvmet_passthru_override_id_descs(req);
+ status = nvmet_passthru_override_id_descs(req);
break;
}
} else if (status < 0)
@@ -306,7 +306,7 @@ static void nvmet_passthru_execute_cmd(struct nvmet_req *req)
ns = nvme_find_get_ns(ctrl, nsid);
if (unlikely(!ns)) {
pr_err("failed to get passthru ns nsid:%u\n", nsid);
- status = NVME_SC_INVALID_NS | NVME_SC_DNR;
+ status = NVME_SC_INVALID_NS | NVME_STATUS_DNR;
goto out;
}
@@ -426,7 +426,7 @@ u16 nvmet_parse_passthru_io_cmd(struct nvmet_req *req)
* emulated in the future if regular targets grow support for
* this feature.
*/
- return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR;
}
return nvmet_setup_passthru_command(req);
@@ -478,7 +478,7 @@ static u16 nvmet_passthru_get_set_features(struct nvmet_req *req)
case NVME_FEAT_RESV_PERSIST:
/* No reservations, see nvmet_parse_passthru_io_cmd() */
default:
- return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR;
}
}
@@ -546,7 +546,7 @@ u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req)
req->p.use_workqueue = true;
return NVME_SC_SUCCESS;
}
- return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR;
case NVME_ID_CNS_NS:
req->execute = nvmet_passthru_execute_cmd;
req->p.use_workqueue = true;
@@ -558,7 +558,7 @@ u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req)
req->p.use_workqueue = true;
return NVME_SC_SUCCESS;
}
- return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR;
default:
return nvmet_setup_passthru_command(req);
}
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 689bb5d3cfdc..1eff8ca6a5f1 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -852,12 +852,12 @@ static u16 nvmet_rdma_map_sgl_inline(struct nvmet_rdma_rsp *rsp)
if (!nvme_is_write(rsp->req.cmd)) {
rsp->req.error_loc =
offsetof(struct nvme_common_command, opcode);
- return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
}
if (off + len > rsp->queue->dev->inline_data_size) {
pr_err("invalid inline data offset!\n");
- return NVME_SC_SGL_INVALID_OFFSET | NVME_SC_DNR;
+ return NVME_SC_SGL_INVALID_OFFSET | NVME_STATUS_DNR;
}
/* no data command? */
@@ -919,7 +919,7 @@ static u16 nvmet_rdma_map_sgl(struct nvmet_rdma_rsp *rsp)
pr_err("invalid SGL subtype: %#x\n", sgl->type);
rsp->req.error_loc =
offsetof(struct nvme_common_command, dptr);
- return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
}
case NVME_KEY_SGL_FMT_DATA_DESC:
switch (sgl->type & 0xf) {
@@ -931,12 +931,12 @@ static u16 nvmet_rdma_map_sgl(struct nvmet_rdma_rsp *rsp)
pr_err("invalid SGL subtype: %#x\n", sgl->type);
rsp->req.error_loc =
offsetof(struct nvme_common_command, dptr);
- return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
}
default:
pr_err("invalid SGL type: %#x\n", sgl->type);
rsp->req.error_loc = offsetof(struct nvme_common_command, dptr);
- return NVME_SC_SGL_INVALID_TYPE | NVME_SC_DNR;
+ return NVME_SC_SGL_INVALID_TYPE | NVME_STATUS_DNR;
}
}
@@ -2000,6 +2000,17 @@ static void nvmet_rdma_disc_port_addr(struct nvmet_req *req,
}
}
+static ssize_t nvmet_rdma_host_port_addr(struct nvmet_ctrl *ctrl,
+ char *traddr, size_t traddr_len)
+{
+ struct nvmet_sq *nvme_sq = ctrl->sqs[0];
+ struct nvmet_rdma_queue *queue =
+ container_of(nvme_sq, struct nvmet_rdma_queue, nvme_sq);
+
+ return snprintf(traddr, traddr_len, "%pISc",
+ (struct sockaddr *)&queue->cm_id->route.addr.dst_addr);
+}
+
static u8 nvmet_rdma_get_mdts(const struct nvmet_ctrl *ctrl)
{
if (ctrl->pi_support)
@@ -2024,6 +2035,7 @@ static const struct nvmet_fabrics_ops nvmet_rdma_ops = {
.queue_response = nvmet_rdma_queue_response,
.delete_ctrl = nvmet_rdma_delete_ctrl,
.disc_traddr = nvmet_rdma_disc_port_addr,
+ .host_traddr = nvmet_rdma_host_port_addr,
.get_mdts = nvmet_rdma_get_mdts,
.get_max_queue_size = nvmet_rdma_get_max_queue_size,
};
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index 380f22ee3ebb..5bff0d5464d1 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -416,10 +416,10 @@ static int nvmet_tcp_map_data(struct nvmet_tcp_cmd *cmd)
if (sgl->type == ((NVME_SGL_FMT_DATA_DESC << 4) |
NVME_SGL_FMT_OFFSET)) {
if (!nvme_is_write(cmd->req.cmd))
- return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
if (len > cmd->req.port->inline_data_size)
- return NVME_SC_SGL_INVALID_OFFSET | NVME_SC_DNR;
+ return NVME_SC_SGL_INVALID_OFFSET | NVME_STATUS_DNR;
cmd->pdu_len = len;
}
cmd->req.transfer_len += len;
@@ -2167,6 +2167,19 @@ static void nvmet_tcp_disc_port_addr(struct nvmet_req *req,
}
}
+static ssize_t nvmet_tcp_host_port_addr(struct nvmet_ctrl *ctrl,
+ char *traddr, size_t traddr_len)
+{
+ struct nvmet_sq *sq = ctrl->sqs[0];
+ struct nvmet_tcp_queue *queue =
+ container_of(sq, struct nvmet_tcp_queue, nvme_sq);
+
+ if (queue->sockaddr_peer.ss_family == AF_UNSPEC)
+ return -EINVAL;
+ return snprintf(traddr, traddr_len, "%pISc",
+ (struct sockaddr *)&queue->sockaddr_peer);
+}
+
static const struct nvmet_fabrics_ops nvmet_tcp_ops = {
.owner = THIS_MODULE,
.type = NVMF_TRTYPE_TCP,
@@ -2177,6 +2190,7 @@ static const struct nvmet_fabrics_ops nvmet_tcp_ops = {
.delete_ctrl = nvmet_tcp_delete_ctrl,
.install_queue = nvmet_tcp_install_queue,
.disc_traddr = nvmet_tcp_disc_port_addr,
+ .host_traddr = nvmet_tcp_host_port_addr,
};
static int __init nvmet_tcp_init(void)
diff --git a/drivers/nvme/target/zns.c b/drivers/nvme/target/zns.c
index 0021d06041c1..af9e13be7678 100644
--- a/drivers/nvme/target/zns.c
+++ b/drivers/nvme/target/zns.c
@@ -100,7 +100,7 @@ void nvmet_execute_identify_ns_zns(struct nvmet_req *req)
if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) {
req->error_loc = offsetof(struct nvme_identify, nsid);
- status = NVME_SC_INVALID_NS | NVME_SC_DNR;
+ status = NVME_SC_INVALID_NS | NVME_STATUS_DNR;
goto out;
}
@@ -121,7 +121,7 @@ void nvmet_execute_identify_ns_zns(struct nvmet_req *req)
}
if (!bdev_is_zoned(req->ns->bdev)) {
- status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
req->error_loc = offsetof(struct nvme_identify, nsid);
goto out;
}
@@ -158,17 +158,17 @@ static u16 nvmet_bdev_validate_zone_mgmt_recv(struct nvmet_req *req)
if (sect >= get_capacity(req->ns->bdev->bd_disk)) {
req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, slba);
- return NVME_SC_LBA_RANGE | NVME_SC_DNR;
+ return NVME_SC_LBA_RANGE | NVME_STATUS_DNR;
}
if (out_bufsize < sizeof(struct nvme_zone_report)) {
req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, numd);
- return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
}
if (req->cmd->zmr.zra != NVME_ZRA_ZONE_REPORT) {
req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, zra);
- return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
}
switch (req->cmd->zmr.pr) {
@@ -177,7 +177,7 @@ static u16 nvmet_bdev_validate_zone_mgmt_recv(struct nvmet_req *req)
break;
default:
req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, pr);
- return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
}
switch (req->cmd->zmr.zrasf) {
@@ -193,7 +193,7 @@ static u16 nvmet_bdev_validate_zone_mgmt_recv(struct nvmet_req *req)
default:
req->error_loc =
offsetof(struct nvme_zone_mgmt_recv_cmd, zrasf);
- return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
}
return NVME_SC_SUCCESS;
@@ -341,7 +341,7 @@ static u16 blkdev_zone_mgmt_errno_to_nvme_status(int ret)
return NVME_SC_SUCCESS;
case -EINVAL:
case -EIO:
- return NVME_SC_ZONE_INVALID_TRANSITION | NVME_SC_DNR;
+ return NVME_SC_ZONE_INVALID_TRANSITION | NVME_STATUS_DNR;
default:
return NVME_SC_INTERNAL;
}
@@ -463,7 +463,7 @@ static u16 nvmet_bdev_execute_zmgmt_send_all(struct nvmet_req *req)
default:
/* this is needed to quiet compiler warning */
req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, zsa);
- return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
}
return NVME_SC_SUCCESS;
@@ -481,7 +481,7 @@ static void nvmet_bdev_zmgmt_send_work(struct work_struct *w)
if (op == REQ_OP_LAST) {
req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, zsa);
- status = NVME_SC_ZONE_INVALID_TRANSITION | NVME_SC_DNR;
+ status = NVME_SC_ZONE_INVALID_TRANSITION | NVME_STATUS_DNR;
goto out;
}
@@ -493,13 +493,13 @@ static void nvmet_bdev_zmgmt_send_work(struct work_struct *w)
if (sect >= get_capacity(bdev->bd_disk)) {
req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, slba);
- status = NVME_SC_LBA_RANGE | NVME_SC_DNR;
+ status = NVME_SC_LBA_RANGE | NVME_STATUS_DNR;
goto out;
}
if (sect & (zone_sectors - 1)) {
req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, slba);
- status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
goto out;
}
@@ -551,13 +551,13 @@ void nvmet_bdev_execute_zone_append(struct nvmet_req *req)
if (sect >= get_capacity(req->ns->bdev->bd_disk)) {
req->error_loc = offsetof(struct nvme_rw_command, slba);
- status = NVME_SC_LBA_RANGE | NVME_SC_DNR;
+ status = NVME_SC_LBA_RANGE | NVME_STATUS_DNR;
goto out;
}
if (sect & (bdev_zone_sectors(req->ns->bdev) - 1)) {
req->error_loc = offsetof(struct nvme_rw_command, slba);
- status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
goto out;
}
@@ -590,7 +590,7 @@ void nvmet_bdev_execute_zone_append(struct nvmet_req *req)
}
if (total_len != nvmet_rw_data_len(req)) {
- status = NVME_SC_INTERNAL | NVME_SC_DNR;
+ status = NVME_SC_INTERNAL | NVME_STATUS_DNR;
goto out_put_bio;
}
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index e1ec3b7200d7..f8dd7eb40fbe 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -396,10 +396,9 @@ static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem,
if (!config->base_dev)
return -EINVAL;
- if (config->type == NVMEM_TYPE_FRAM)
- bin_attr_nvmem_eeprom_compat.attr.name = "fram";
-
nvmem->eeprom = bin_attr_nvmem_eeprom_compat;
+ if (config->type == NVMEM_TYPE_FRAM)
+ nvmem->eeprom.attr.name = "fram";
nvmem->eeprom.attr.mode = nvmem_bin_attr_get_umode(nvmem);
nvmem->eeprom.size = nvmem->size;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
@@ -463,7 +462,7 @@ static int nvmem_populate_sysfs_cells(struct nvmem_device *nvmem)
"%s@%x,%x", entry->name,
entry->offset,
entry->bit_offset);
- attrs[i].attr.mode = 0444;
+ attrs[i].attr.mode = 0444 & nvmem_bin_attr_get_umode(nvmem);
attrs[i].size = entry->bytes;
attrs[i].read = &nvmem_cell_attr_read;
attrs[i].private = entry;
diff --git a/drivers/nvmem/meson-efuse.c b/drivers/nvmem/meson-efuse.c
index 33678d0af2c2..6c2f80e166e2 100644
--- a/drivers/nvmem/meson-efuse.c
+++ b/drivers/nvmem/meson-efuse.c
@@ -18,18 +18,24 @@ static int meson_efuse_read(void *context, unsigned int offset,
void *val, size_t bytes)
{
struct meson_sm_firmware *fw = context;
+ int ret;
- return meson_sm_call_read(fw, (u8 *)val, bytes, SM_EFUSE_READ, offset,
- bytes, 0, 0, 0);
+ ret = meson_sm_call_read(fw, (u8 *)val, bytes, SM_EFUSE_READ, offset,
+ bytes, 0, 0, 0);
+
+ return ret < 0 ? ret : 0;
}
static int meson_efuse_write(void *context, unsigned int offset,
void *val, size_t bytes)
{
struct meson_sm_firmware *fw = context;
+ int ret;
+
+ ret = meson_sm_call_write(fw, (u8 *)val, bytes, SM_EFUSE_WRITE, offset,
+ bytes, 0, 0, 0);
- return meson_sm_call_write(fw, (u8 *)val, bytes, SM_EFUSE_WRITE, offset,
- bytes, 0, 0, 0);
+ return ret < 0 ? ret : 0;
}
static const struct of_device_id meson_efuse_match[] = {
diff --git a/drivers/nvmem/rmem.c b/drivers/nvmem/rmem.c
index 752d0bf4445e..7f907c5a445e 100644
--- a/drivers/nvmem/rmem.c
+++ b/drivers/nvmem/rmem.c
@@ -46,7 +46,10 @@ static int rmem_read(void *context, unsigned int offset,
memunmap(addr);
- return count;
+ if (count < 0)
+ return count;
+
+ return count == bytes ? 0 : -EIO;
}
static int rmem_probe(struct platform_device *pdev)
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 174900072c18..c94203ce65bb 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -25,6 +25,8 @@
#include <linux/string.h>
#include <linux/slab.h>
+#include "of_private.h"
+
/**
* irq_of_parse_and_map - Parse and map an interrupt into linux virq space
* @dev: Device node of the device whose interrupt is to be mapped
@@ -79,7 +81,8 @@ EXPORT_SYMBOL_GPL(of_irq_find_parent);
/*
* These interrupt controllers abuse interrupt-map for unspeakable
* reasons and rely on the core code to *ignore* it (the drivers do
- * their own parsing of the property).
+ * their own parsing of the property). The PAsemi entry covers a
+ * non-sensical interrupt-map that is better left ignored.
*
* If you think of adding to the list for something *new*, think
* again. There is a high chance that you will be sent back to the
@@ -93,9 +96,61 @@ static const char * const of_irq_imap_abusers[] = {
"fsl,ls1043a-extirq",
"fsl,ls1088a-extirq",
"renesas,rza1-irqc",
+ "pasemi,rootbus",
NULL,
};
+const __be32 *of_irq_parse_imap_parent(const __be32 *imap, int len, struct of_phandle_args *out_irq)
+{
+ u32 intsize, addrsize;
+ struct device_node *np;
+
+ /* Get the interrupt parent */
+ if (of_irq_workarounds & OF_IMAP_NO_PHANDLE)
+ np = of_node_get(of_irq_dflt_pic);
+ else
+ np = of_find_node_by_phandle(be32_to_cpup(imap));
+ imap++;
+
+ /* Check if not found */
+ if (!np) {
+ pr_debug(" -> imap parent not found !\n");
+ return NULL;
+ }
+
+ /* Get #interrupt-cells and #address-cells of new parent */
+ if (of_property_read_u32(np, "#interrupt-cells",
+ &intsize)) {
+ pr_debug(" -> parent lacks #interrupt-cells!\n");
+ of_node_put(np);
+ return NULL;
+ }
+ if (of_property_read_u32(np, "#address-cells",
+ &addrsize))
+ addrsize = 0;
+
+ pr_debug(" -> intsize=%d, addrsize=%d\n",
+ intsize, addrsize);
+
+ /* Check for malformed properties */
+ if (WARN_ON(addrsize + intsize > MAX_PHANDLE_ARGS)
+ || (len < (addrsize + intsize))) {
+ of_node_put(np);
+ return NULL;
+ }
+
+ pr_debug(" -> imaplen=%d\n", len);
+
+ imap += addrsize + intsize;
+
+ out_irq->np = np;
+ for (int i = 0; i < intsize; i++)
+ out_irq->args[i] = be32_to_cpup(imap - intsize + i);
+ out_irq->args_count = intsize;
+
+ return imap;
+}
+
/**
* of_irq_parse_raw - Low level interrupt tree parsing
* @addr: address specifier (start of "reg" property of the device) in be32 format
@@ -112,12 +167,12 @@ static const char * const of_irq_imap_abusers[] = {
*/
int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
{
- struct device_node *ipar, *tnode, *old = NULL, *newpar = NULL;
+ struct device_node *ipar, *tnode, *old = NULL;
__be32 initial_match_array[MAX_PHANDLE_ARGS];
const __be32 *match_array = initial_match_array;
- const __be32 *tmp, *imap, *imask, dummy_imask[] = { [0 ... MAX_PHANDLE_ARGS] = cpu_to_be32(~0) };
- u32 intsize = 1, addrsize, newintsize = 0, newaddrsize = 0;
- int imaplen, match, i, rc = -EINVAL;
+ const __be32 *tmp, dummy_imask[] = { [0 ... MAX_PHANDLE_ARGS] = cpu_to_be32(~0) };
+ u32 intsize = 1, addrsize;
+ int i, rc = -EINVAL;
#ifdef DEBUG
of_print_phandle_args("of_irq_parse_raw: ", out_irq);
@@ -176,6 +231,9 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
/* Now start the actual "proper" walk of the interrupt tree */
while (ipar != NULL) {
+ int imaplen, match;
+ const __be32 *imap, *oldimap, *imask;
+ struct device_node *newpar;
/*
* Now check if cursor is an interrupt-controller and
* if it is then we are done, unless there is an
@@ -216,7 +274,7 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
/* Parse interrupt-map */
match = 0;
- while (imaplen > (addrsize + intsize + 1) && !match) {
+ while (imaplen > (addrsize + intsize + 1)) {
/* Compare specifiers */
match = 1;
for (i = 0; i < (addrsize + intsize); i++, imaplen--)
@@ -224,74 +282,31 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
pr_debug(" -> match=%d (imaplen=%d)\n", match, imaplen);
- /* Get the interrupt parent */
- if (of_irq_workarounds & OF_IMAP_NO_PHANDLE)
- newpar = of_node_get(of_irq_dflt_pic);
- else
- newpar = of_find_node_by_phandle(be32_to_cpup(imap));
- imap++;
- --imaplen;
-
- /* Check if not found */
- if (newpar == NULL) {
- pr_debug(" -> imap parent not found !\n");
+ oldimap = imap;
+ imap = of_irq_parse_imap_parent(oldimap, imaplen, out_irq);
+ if (!imap)
goto fail;
- }
-
- if (!of_device_is_available(newpar))
- match = 0;
-
- /* Get #interrupt-cells and #address-cells of new
- * parent
- */
- if (of_property_read_u32(newpar, "#interrupt-cells",
- &newintsize)) {
- pr_debug(" -> parent lacks #interrupt-cells!\n");
- goto fail;
- }
- if (of_property_read_u32(newpar, "#address-cells",
- &newaddrsize))
- newaddrsize = 0;
- pr_debug(" -> newintsize=%d, newaddrsize=%d\n",
- newintsize, newaddrsize);
-
- /* Check for malformed properties */
- if (WARN_ON(newaddrsize + newintsize > MAX_PHANDLE_ARGS)
- || (imaplen < (newaddrsize + newintsize))) {
- rc = -EFAULT;
- goto fail;
- }
-
- imap += newaddrsize + newintsize;
- imaplen -= newaddrsize + newintsize;
+ match &= of_device_is_available(out_irq->np);
+ if (match)
+ break;
+ of_node_put(out_irq->np);
+ imaplen -= imap - oldimap;
pr_debug(" -> imaplen=%d\n", imaplen);
}
- if (!match) {
- if (intc) {
- /*
- * The PASEMI Nemo is a known offender, so
- * let's only warn for anyone else.
- */
- WARN(!IS_ENABLED(CONFIG_PPC_PASEMI),
- "%pOF interrupt-map failed, using interrupt-controller\n",
- ipar);
- return 0;
- }
-
+ if (!match)
goto fail;
- }
/*
* Successfully parsed an interrupt-map translation; copy new
* interrupt specifier into the out_irq structure
*/
- match_array = imap - newaddrsize - newintsize;
- for (i = 0; i < newintsize; i++)
- out_irq->args[i] = be32_to_cpup(imap - newintsize + i);
- out_irq->args_count = intsize = newintsize;
- addrsize = newaddrsize;
+ match_array = oldimap + 1;
+
+ newpar = out_irq->np;
+ intsize = out_irq->args_count;
+ addrsize = (imap - match_array) - intsize;
if (ipar == newpar) {
pr_debug("%pOF interrupt-map entry to self\n", ipar);
@@ -300,7 +315,6 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
skiplevel:
/* Iterate again with new parent */
- out_irq->np = newpar;
pr_debug(" -> new parent: %pOF\n", newpar);
of_node_put(ipar);
ipar = newpar;
@@ -310,7 +324,6 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
fail:
of_node_put(ipar);
- of_node_put(newpar);
return rc;
}
diff --git a/drivers/of/of_private.h b/drivers/of/of_private.h
index 94fc0aa07af9..04aa2a91f851 100644
--- a/drivers/of/of_private.h
+++ b/drivers/of/of_private.h
@@ -159,6 +159,9 @@ extern void __of_sysfs_remove_bin_file(struct device_node *np,
extern int of_bus_n_addr_cells(struct device_node *np);
extern int of_bus_n_size_cells(struct device_node *np);
+const __be32 *of_irq_parse_imap_parent(const __be32 *imap, int len,
+ struct of_phandle_args *out_irq);
+
struct bus_dma_region;
#if defined(CONFIG_OF_ADDRESS) && defined(CONFIG_HAS_DMA)
int of_dma_get_range(struct device_node *np,
diff --git a/drivers/of/of_test.c b/drivers/of/of_test.c
index a9301d293f01..c85a258bc6ae 100644
--- a/drivers/of/of_test.c
+++ b/drivers/of/of_test.c
@@ -54,4 +54,5 @@ static struct kunit_suite of_dtb_suite = {
kunit_test_suites(
&of_dtb_suite,
);
+MODULE_DESCRIPTION("KUnit tests for OF APIs");
MODULE_LICENSE("GPL");
diff --git a/drivers/of/property.c b/drivers/of/property.c
index 1c83e68f805b..164d77cb9445 100644
--- a/drivers/of/property.c
+++ b/drivers/of/property.c
@@ -1306,10 +1306,10 @@ static struct device_node *parse_interrupts(struct device_node *np,
static struct device_node *parse_interrupt_map(struct device_node *np,
const char *prop_name, int index)
{
- const __be32 *imap, *imap_end, *addr;
+ const __be32 *imap, *imap_end;
struct of_phandle_args sup_args;
u32 addrcells, intcells;
- int i, imaplen;
+ int imaplen;
if (!IS_ENABLED(CONFIG_OF_IRQ))
return NULL;
@@ -1322,33 +1322,23 @@ static struct device_node *parse_interrupt_map(struct device_node *np,
addrcells = of_bus_n_addr_cells(np);
imap = of_get_property(np, "interrupt-map", &imaplen);
- if (!imap || imaplen <= (addrcells + intcells))
+ imaplen /= sizeof(*imap);
+ if (!imap)
return NULL;
- imap_end = imap + imaplen;
- while (imap < imap_end) {
- addr = imap;
- imap += addrcells;
+ imap_end = imap + imaplen;
- sup_args.np = np;
- sup_args.args_count = intcells;
- for (i = 0; i < intcells; i++)
- sup_args.args[i] = be32_to_cpu(imap[i]);
- imap += intcells;
+ for (int i = 0; imap + addrcells + intcells + 1 < imap_end; i++) {
+ imap += addrcells + intcells;
- /*
- * Upon success, the function of_irq_parse_raw() returns
- * interrupt controller DT node pointer in sup_args.np.
- */
- if (of_irq_parse_raw(addr, &sup_args))
+ imap = of_irq_parse_imap_parent(imap, imap_end - imap, &sup_args);
+ if (!imap)
return NULL;
- if (!index)
+ if (i == index)
return sup_args.np;
of_node_put(sup_args.np);
- imap += sup_args.args_count + 1;
- index--;
}
return NULL;
diff --git a/drivers/opp/core.c b/drivers/opp/core.c
index cb4611fe1b5b..5f4598246a87 100644
--- a/drivers/opp/core.c
+++ b/drivers/opp/core.c
@@ -1102,8 +1102,7 @@ static int _set_required_opps(struct device *dev, struct opp_table *opp_table,
return 0;
}
-static int _set_opp_level(struct device *dev, struct opp_table *opp_table,
- struct dev_pm_opp *opp)
+static int _set_opp_level(struct device *dev, struct dev_pm_opp *opp)
{
unsigned int level = 0;
int ret = 0;
@@ -1171,7 +1170,7 @@ static int _disable_opp_table(struct device *dev, struct opp_table *opp_table)
if (opp_table->regulators)
regulator_disable(opp_table->regulators[0]);
- ret = _set_opp_level(dev, opp_table, NULL);
+ ret = _set_opp_level(dev, NULL);
if (ret)
goto out;
@@ -1220,7 +1219,7 @@ static int _set_opp(struct device *dev, struct opp_table *opp_table,
return ret;
}
- ret = _set_opp_level(dev, opp_table, opp);
+ ret = _set_opp_level(dev, opp);
if (ret)
return ret;
@@ -1267,7 +1266,7 @@ static int _set_opp(struct device *dev, struct opp_table *opp_table,
return ret;
}
- ret = _set_opp_level(dev, opp_table, opp);
+ ret = _set_opp_level(dev, opp);
if (ret)
return ret;
@@ -2443,8 +2442,10 @@ static int _opp_attach_genpd(struct opp_table *opp_table, struct device *dev,
* Cross check it again and fix if required.
*/
gdev = dev_to_genpd_dev(virt_dev);
- if (IS_ERR(gdev))
- return PTR_ERR(gdev);
+ if (IS_ERR(gdev)) {
+ ret = PTR_ERR(gdev);
+ goto err;
+ }
genpd_table = _find_opp_table(gdev);
if (!IS_ERR(genpd_table)) {
diff --git a/drivers/opp/of.c b/drivers/opp/of.c
index 282eb5966fd0..55c8cfef97d4 100644
--- a/drivers/opp/of.c
+++ b/drivers/opp/of.c
@@ -1444,6 +1444,38 @@ put_required_np:
EXPORT_SYMBOL_GPL(of_get_required_opp_performance_state);
/**
+ * dev_pm_opp_of_has_required_opp - Find out if a required-opps exists.
+ * @dev: The device to investigate.
+ *
+ * Returns true if the device's node has a "operating-points-v2" property and if
+ * the corresponding node for the opp-table describes opp nodes that uses the
+ * "required-opps" property.
+ *
+ * Return: True if a required-opps is present, else false.
+ */
+bool dev_pm_opp_of_has_required_opp(struct device *dev)
+{
+ struct device_node *opp_np, *np;
+ int count;
+
+ opp_np = _opp_of_get_opp_desc_node(dev->of_node, 0);
+ if (!opp_np)
+ return false;
+
+ np = of_get_next_available_child(opp_np, NULL);
+ of_node_put(opp_np);
+ if (!np) {
+ dev_warn(dev, "Empty OPP table\n");
+ return false;
+ }
+
+ count = of_count_phandle_with_args(np, "required-opps", NULL);
+ of_node_put(np);
+
+ return count > 0;
+}
+
+/**
* dev_pm_opp_get_of_node() - Gets the DT node corresponding to an opp
* @opp: opp for which DT node has to be returned for
*
diff --git a/drivers/opp/ti-opp-supply.c b/drivers/opp/ti-opp-supply.c
index e3b97cd1fbbf..ec0056a4bb13 100644
--- a/drivers/opp/ti-opp-supply.c
+++ b/drivers/opp/ti-opp-supply.c
@@ -393,10 +393,12 @@ static int ti_opp_supply_probe(struct platform_device *pdev)
}
ret = dev_pm_opp_set_config_regulators(cpu_dev, ti_opp_config_regulators);
- if (ret < 0)
+ if (ret < 0) {
_free_optimized_voltages(dev, &opp_data);
+ return ret;
+ }
- return ret;
+ return 0;
}
static struct platform_driver ti_opp_supply_driver = {
diff --git a/drivers/parport/parport_amiga.c b/drivers/parport/parport_amiga.c
index e6dc857aac3f..e06c7b2aac5c 100644
--- a/drivers/parport/parport_amiga.c
+++ b/drivers/parport/parport_amiga.c
@@ -229,7 +229,13 @@ static void __exit amiga_parallel_remove(struct platform_device *pdev)
parport_put_port(port);
}
-static struct platform_driver amiga_parallel_driver = {
+/*
+ * amiga_parallel_remove() lives in .exit.text. For drivers registered via
+ * module_platform_driver_probe() this is ok because they cannot get unbound at
+ * runtime. So mark the driver struct with __refdata to prevent modpost
+ * triggering a section mismatch warning.
+ */
+static struct platform_driver amiga_parallel_driver __refdata = {
.remove_new = __exit_p(amiga_parallel_remove),
.driver = {
.name = "amiga-parallel",
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index d35001589d88..aa4d1833f442 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -296,5 +296,6 @@ source "drivers/pci/hotplug/Kconfig"
source "drivers/pci/controller/Kconfig"
source "drivers/pci/endpoint/Kconfig"
source "drivers/pci/switch/Kconfig"
+source "drivers/pci/pwrctl/Kconfig"
endif
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 175302036890..8ddad57934a6 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_PCI) += access.o bus.o probe.o host-bridge.o \
obj-$(CONFIG_PCI) += msi/
obj-$(CONFIG_PCI) += pcie/
+obj-$(CONFIG_PCI) += pwrctl/
ifdef CONFIG_PCI
obj-$(CONFIG_PROC_FS) += proc.o
diff --git a/drivers/pci/access.c b/drivers/pci/access.c
index 30f031de9cfe..b123da16b63b 100644
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
@@ -289,8 +289,6 @@ void pci_cfg_access_lock(struct pci_dev *dev)
{
might_sleep();
- lock_map_acquire(&dev->cfg_access_lock);
-
raw_spin_lock_irq(&pci_lock);
if (dev->block_cfg_access)
pci_wait_cfg(dev);
@@ -345,8 +343,6 @@ void pci_cfg_access_unlock(struct pci_dev *dev)
raw_spin_unlock_irqrestore(&pci_lock, flags);
wake_up_all(&pci_cfg_wait);
-
- lock_map_release(&dev->cfg_access_lock);
}
EXPORT_SYMBOL_GPL(pci_cfg_access_unlock);
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index 826b5016a101..8765e2d2aafa 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -12,6 +12,7 @@
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/of.h>
+#include <linux/of_platform.h>
#include <linux/proc_fs.h>
#include <linux/slab.h>
@@ -354,6 +355,14 @@ void pci_bus_add_device(struct pci_dev *dev)
pci_warn(dev, "device attach failed (%d)\n", retval);
pci_dev_assign_added(dev, true);
+
+ if (IS_ENABLED(CONFIG_OF) && pci_is_bridge(dev)) {
+ retval = of_platform_populate(dev->dev.of_node, NULL, NULL,
+ &dev->dev);
+ if (retval)
+ pci_err(dev, "failed to populate child OF nodes (%d)\n",
+ retval);
+ }
}
EXPORT_SYMBOL_GPL(pci_bus_add_device);
diff --git a/drivers/pci/msi/msi.c b/drivers/pci/msi/msi.c
index c5625dd9bf49..3a45879d85db 100644
--- a/drivers/pci/msi/msi.c
+++ b/drivers/pci/msi/msi.c
@@ -352,7 +352,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec,
struct irq_affinity *affd)
{
struct irq_affinity_desc *masks = NULL;
- struct msi_desc *entry;
+ struct msi_desc *entry, desc;
int ret;
/* Reject multi-MSI early on irq domain enabled architectures */
@@ -377,6 +377,12 @@ static int msi_capability_init(struct pci_dev *dev, int nvec,
/* All MSIs are unmasked by default; mask them all */
entry = msi_first_desc(&dev->dev, MSI_DESC_ALL);
pci_msi_mask(entry, msi_multi_mask(entry));
+ /*
+ * Copy the MSI descriptor for the error path because
+ * pci_msi_setup_msi_irqs() will free it for the hierarchical
+ * interrupt domain case.
+ */
+ memcpy(&desc, entry, sizeof(desc));
/* Configure MSI capability structure */
ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI);
@@ -396,7 +402,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec,
goto unlock;
err:
- pci_msi_unmask(entry, msi_multi_mask(entry));
+ pci_msi_unmask(&desc, msi_multi_mask(&desc));
pci_free_msi_irqs(dev);
fail:
dev->msi_enabled = 0;
diff --git a/drivers/pci/of.c b/drivers/pci/of.c
index 51e3dd0ea5ab..b908fe1ae951 100644
--- a/drivers/pci/of.c
+++ b/drivers/pci/of.c
@@ -6,6 +6,7 @@
*/
#define pr_fmt(fmt) "PCI: OF: " fmt
+#include <linux/cleanup.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/pci.h>
@@ -13,6 +14,7 @@
#include <linux/of_irq.h>
#include <linux/of_address.h>
#include <linux/of_pci.h>
+#include <linux/platform_device.h>
#include "pci.h"
#ifdef CONFIG_PCI
@@ -25,16 +27,20 @@
*/
int pci_set_of_node(struct pci_dev *dev)
{
- struct device_node *node;
-
if (!dev->bus->dev.of_node)
return 0;
- node = of_pci_find_child_device(dev->bus->dev.of_node, dev->devfn);
+ struct device_node *node __free(device_node) =
+ of_pci_find_child_device(dev->bus->dev.of_node, dev->devfn);
if (!node)
return 0;
- device_set_node(&dev->dev, of_fwnode_handle(node));
+ struct device *pdev __free(put_device) =
+ bus_find_device_by_of_node(&platform_bus_type, node);
+ if (pdev)
+ dev->bus->dev.of_node_reused = true;
+
+ device_set_node(&dev->dev, of_fwnode_handle(no_free_ptr(node)));
return 0;
}
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 59e0949fb079..35fb1f17a589 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -4883,7 +4883,6 @@ void __weak pcibios_reset_secondary_bus(struct pci_dev *dev)
*/
int pci_bridge_secondary_bus_reset(struct pci_dev *dev)
{
- lock_map_assert_held(&dev->cfg_access_lock);
pcibios_reset_secondary_bus(dev);
return pci_bridge_wait_for_secondary_bus(dev, "bus reset");
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 8e696e547565..4c367f13acdc 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -2546,9 +2546,6 @@ void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
dev->dev.dma_mask = &dev->dma_mask;
dev->dev.dma_parms = &dev->dma_parms;
dev->dev.coherent_dma_mask = 0xffffffffull;
- lockdep_register_key(&dev->cfg_access_key);
- lockdep_init_map(&dev->cfg_access_lock, dev_name(&dev->dev),
- &dev->cfg_access_key, 0);
dma_set_max_seg_size(&dev->dev, 65536);
dma_set_seg_boundary(&dev->dev, 0xffffffff);
@@ -3072,7 +3069,9 @@ int pci_host_probe(struct pci_host_bridge *bridge)
struct pci_bus *bus, *child;
int ret;
+ pci_lock_rescan_remove();
ret = pci_scan_root_bus_bridge(bridge);
+ pci_unlock_rescan_remove();
if (ret < 0) {
dev_err(bridge->dev.parent, "Scanning root bridge failed");
return ret;
diff --git a/drivers/pci/pwrctl/Kconfig b/drivers/pci/pwrctl/Kconfig
new file mode 100644
index 000000000000..f1b824955d4b
--- /dev/null
+++ b/drivers/pci/pwrctl/Kconfig
@@ -0,0 +1,17 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+menu "PCI Power control drivers"
+
+config PCI_PWRCTL
+ tristate
+
+config PCI_PWRCTL_PWRSEQ
+ tristate "PCI Power Control driver using the Power Sequencing subsystem"
+ select POWER_SEQUENCING
+ select PCI_PWRCTL
+ default m if ((ATH11K_PCI || ATH12K) && ARCH_QCOM)
+ help
+ Enable support for the PCI power control driver for device
+ drivers using the Power Sequencing subsystem.
+
+endmenu
diff --git a/drivers/pci/pwrctl/Makefile b/drivers/pci/pwrctl/Makefile
new file mode 100644
index 000000000000..d308aae4800c
--- /dev/null
+++ b/drivers/pci/pwrctl/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+obj-$(CONFIG_PCI_PWRCTL) += pci-pwrctl-core.o
+pci-pwrctl-core-y := core.o
+
+obj-$(CONFIG_PCI_PWRCTL_PWRSEQ) += pci-pwrctl-pwrseq.o
diff --git a/drivers/pci/pwrctl/core.c b/drivers/pci/pwrctl/core.c
new file mode 100644
index 000000000000..feca26ad2f6a
--- /dev/null
+++ b/drivers/pci/pwrctl/core.c
@@ -0,0 +1,137 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2024 Linaro Ltd.
+ */
+
+#include <linux/device.h>
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/pci-pwrctl.h>
+#include <linux/property.h>
+#include <linux/slab.h>
+
+static int pci_pwrctl_notify(struct notifier_block *nb, unsigned long action,
+ void *data)
+{
+ struct pci_pwrctl *pwrctl = container_of(nb, struct pci_pwrctl, nb);
+ struct device *dev = data;
+
+ if (dev_fwnode(dev) != dev_fwnode(pwrctl->dev))
+ return NOTIFY_DONE;
+
+ switch (action) {
+ case BUS_NOTIFY_ADD_DEVICE:
+ /*
+ * We will have two struct device objects bound to two different
+ * drivers on different buses but consuming the same DT node. We
+ * must not bind the pins twice in this case but only once for
+ * the first device to be added.
+ *
+ * If we got here then the PCI device is the second after the
+ * power control platform device. Mark its OF node as reused.
+ */
+ dev->of_node_reused = true;
+ break;
+ case BUS_NOTIFY_BOUND_DRIVER:
+ pwrctl->link = device_link_add(dev, pwrctl->dev,
+ DL_FLAG_AUTOREMOVE_CONSUMER);
+ if (!pwrctl->link)
+ dev_err(pwrctl->dev, "Failed to add device link\n");
+ break;
+ case BUS_NOTIFY_UNBOUND_DRIVER:
+ if (pwrctl->link)
+ device_link_remove(dev, pwrctl->dev);
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+/**
+ * pci_pwrctl_device_set_ready() - Notify the pwrctl subsystem that the PCI
+ * device is powered-up and ready to be detected.
+ *
+ * @pwrctl: PCI power control data.
+ *
+ * Returns:
+ * 0 on success, negative error number on error.
+ *
+ * Note:
+ * This function returning 0 doesn't mean the device was detected. It means,
+ * that the bus rescan was successfully started. The device will get bound to
+ * its PCI driver asynchronously.
+ */
+int pci_pwrctl_device_set_ready(struct pci_pwrctl *pwrctl)
+{
+ int ret;
+
+ if (!pwrctl->dev)
+ return -ENODEV;
+
+ pwrctl->nb.notifier_call = pci_pwrctl_notify;
+ ret = bus_register_notifier(&pci_bus_type, &pwrctl->nb);
+ if (ret)
+ return ret;
+
+ pci_lock_rescan_remove();
+ pci_rescan_bus(to_pci_dev(pwrctl->dev->parent)->bus);
+ pci_unlock_rescan_remove();
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pci_pwrctl_device_set_ready);
+
+/**
+ * pci_pwrctl_device_unset_ready() - Notify the pwrctl subsystem that the PCI
+ * device is about to be powered-down.
+ *
+ * @pwrctl: PCI power control data.
+ */
+void pci_pwrctl_device_unset_ready(struct pci_pwrctl *pwrctl)
+{
+ /*
+ * We don't have to delete the link here. Typically, this function
+ * is only called when the power control device is being detached. If
+ * it is being detached then the child PCI device must have already
+ * been unbound too or the device core wouldn't let us unbind.
+ */
+ bus_unregister_notifier(&pci_bus_type, &pwrctl->nb);
+}
+EXPORT_SYMBOL_GPL(pci_pwrctl_device_unset_ready);
+
+static void devm_pci_pwrctl_device_unset_ready(void *data)
+{
+ struct pci_pwrctl *pwrctl = data;
+
+ pci_pwrctl_device_unset_ready(pwrctl);
+}
+
+/**
+ * devm_pci_pwrctl_device_set_ready - Managed variant of
+ * pci_pwrctl_device_set_ready().
+ *
+ * @dev: Device managing this pwrctl provider.
+ * @pwrctl: PCI power control data.
+ *
+ * Returns:
+ * 0 on success, negative error number on error.
+ */
+int devm_pci_pwrctl_device_set_ready(struct device *dev,
+ struct pci_pwrctl *pwrctl)
+{
+ int ret;
+
+ ret = pci_pwrctl_device_set_ready(pwrctl);
+ if (ret)
+ return ret;
+
+ return devm_add_action_or_reset(dev,
+ devm_pci_pwrctl_device_unset_ready,
+ pwrctl);
+}
+EXPORT_SYMBOL_GPL(devm_pci_pwrctl_device_set_ready);
+
+MODULE_AUTHOR("Bartosz Golaszewski <bartosz.golaszewski@linaro.org>");
+MODULE_DESCRIPTION("PCI Device Power Control core driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pci/pwrctl/pci-pwrctl-pwrseq.c b/drivers/pci/pwrctl/pci-pwrctl-pwrseq.c
new file mode 100644
index 000000000000..c7a113a76c0c
--- /dev/null
+++ b/drivers/pci/pwrctl/pci-pwrctl-pwrseq.c
@@ -0,0 +1,89 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2024 Linaro Ltd.
+ */
+
+#include <linux/device.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pci-pwrctl.h>
+#include <linux/platform_device.h>
+#include <linux/pwrseq/consumer.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+struct pci_pwrctl_pwrseq_data {
+ struct pci_pwrctl ctx;
+ struct pwrseq_desc *pwrseq;
+};
+
+static void devm_pci_pwrctl_pwrseq_power_off(void *data)
+{
+ struct pwrseq_desc *pwrseq = data;
+
+ pwrseq_power_off(pwrseq);
+}
+
+static int pci_pwrctl_pwrseq_probe(struct platform_device *pdev)
+{
+ struct pci_pwrctl_pwrseq_data *data;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->pwrseq = devm_pwrseq_get(dev, of_device_get_match_data(dev));
+ if (IS_ERR(data->pwrseq))
+ return dev_err_probe(dev, PTR_ERR(data->pwrseq),
+ "Failed to get the power sequencer\n");
+
+ ret = pwrseq_power_on(data->pwrseq);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to power-on the device\n");
+
+ ret = devm_add_action_or_reset(dev, devm_pci_pwrctl_pwrseq_power_off,
+ data->pwrseq);
+ if (ret)
+ return ret;
+
+ data->ctx.dev = dev;
+
+ ret = devm_pci_pwrctl_device_set_ready(dev, &data->ctx);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to register the pwrctl wrapper\n");
+
+ return 0;
+}
+
+static const struct of_device_id pci_pwrctl_pwrseq_of_match[] = {
+ {
+ /* ATH11K in QCA6390 package. */
+ .compatible = "pci17cb,1101",
+ .data = "wlan",
+ },
+ {
+ /* ATH12K in WCN7850 package. */
+ .compatible = "pci17cb,1107",
+ .data = "wlan",
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, pci_pwrctl_pwrseq_of_match);
+
+static struct platform_driver pci_pwrctl_pwrseq_driver = {
+ .driver = {
+ .name = "pci-pwrctl-pwrseq",
+ .of_match_table = pci_pwrctl_pwrseq_of_match,
+ },
+ .probe = pci_pwrctl_pwrseq_probe,
+};
+module_platform_driver(pci_pwrctl_pwrseq_driver);
+
+MODULE_AUTHOR("Bartosz Golaszewski <bartosz.golaszewski@linaro.org>");
+MODULE_DESCRIPTION("Generic PCI Power Control module for power sequenced devices");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
index d749ea8250d6..910387e5bdbf 100644
--- a/drivers/pci/remove.c
+++ b/drivers/pci/remove.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/pci.h>
#include <linux/module.h>
+#include <linux/of_platform.h>
#include "pci.h"
static void pci_free_resources(struct pci_dev *dev)
@@ -18,7 +19,7 @@ static void pci_stop_dev(struct pci_dev *dev)
pci_pme_active(dev, false);
if (pci_dev_is_added(dev)) {
-
+ of_platform_depopulate(&dev->dev);
device_release_driver(&dev->dev);
pci_proc_detach_device(dev);
pci_remove_sysfs_dev_files(dev);
diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig
index 7526a9e714fa..aa9530b4064f 100644
--- a/drivers/perf/Kconfig
+++ b/drivers/perf/Kconfig
@@ -56,6 +56,18 @@ config ARM_PMU
Say y if you want to use CPU performance monitors on ARM-based
systems.
+config ARM_V6_PMU
+ depends on ARM_PMU && (CPU_V6 || CPU_V6K)
+ def_bool y
+
+config ARM_V7_PMU
+ depends on ARM_PMU && CPU_V7
+ def_bool y
+
+config ARM_XSCALE_PMU
+ depends on ARM_PMU && CPU_XSCALE
+ def_bool y
+
config RISCV_PMU
depends on RISCV
bool "RISC-V PMU framework"
diff --git a/drivers/perf/Makefile b/drivers/perf/Makefile
index 29b1c28203ef..d43df81d52f7 100644
--- a/drivers/perf/Makefile
+++ b/drivers/perf/Makefile
@@ -6,6 +6,9 @@ obj-$(CONFIG_ARM_DSU_PMU) += arm_dsu_pmu.o
obj-$(CONFIG_ARM_PMU) += arm_pmu.o arm_pmu_platform.o
obj-$(CONFIG_ARM_PMU_ACPI) += arm_pmu_acpi.o
obj-$(CONFIG_ARM_PMUV3) += arm_pmuv3.o
+obj-$(CONFIG_ARM_V6_PMU) += arm_v6_pmu.o
+obj-$(CONFIG_ARM_V7_PMU) += arm_v7_pmu.o
+obj-$(CONFIG_ARM_XSCALE_PMU) += arm_xscale_pmu.o
obj-$(CONFIG_ARM_SMMU_V3_PMU) += arm_smmuv3_pmu.o
obj-$(CONFIG_FSL_IMX8_DDR_PMU) += fsl_imx8_ddr_perf.o
obj-$(CONFIG_FSL_IMX9_DDR_PMU) += fsl_imx9_ddr_perf.o
diff --git a/drivers/perf/arm-ccn.c b/drivers/perf/arm-ccn.c
index 86ef31ac7503..5c66b9278862 100644
--- a/drivers/perf/arm-ccn.c
+++ b/drivers/perf/arm-ccn.c
@@ -1561,4 +1561,5 @@ module_init(arm_ccn_init);
module_exit(arm_ccn_exit);
MODULE_AUTHOR("Pawel Moll <pawel.moll@arm.com>");
+MODULE_DESCRIPTION("ARM CCN (Cache Coherent Network) Performance Monitor Driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c
index e26ad1d3ed0b..c932d9d355cf 100644
--- a/drivers/perf/arm-cmn.c
+++ b/drivers/perf/arm-cmn.c
@@ -174,9 +174,8 @@
#define CMN_CONFIG_WP_COMBINE GENMASK_ULL(30, 27)
#define CMN_CONFIG_WP_DEV_SEL GENMASK_ULL(50, 48)
#define CMN_CONFIG_WP_CHN_SEL GENMASK_ULL(55, 51)
-/* Note that we don't yet support the tertiary match group on newer IPs */
-#define CMN_CONFIG_WP_GRP BIT_ULL(56)
-#define CMN_CONFIG_WP_EXCLUSIVE BIT_ULL(57)
+#define CMN_CONFIG_WP_GRP GENMASK_ULL(57, 56)
+#define CMN_CONFIG_WP_EXCLUSIVE BIT_ULL(58)
#define CMN_CONFIG1_WP_VAL GENMASK_ULL(63, 0)
#define CMN_CONFIG2_WP_MASK GENMASK_ULL(63, 0)
@@ -590,6 +589,13 @@ struct arm_cmn_hw_event {
s8 dtc_idx[CMN_MAX_DTCS];
u8 num_dns;
u8 dtm_offset;
+
+ /*
+ * WP config registers are divided to UP and DOWN events. We need to
+ * keep to track only one of them.
+ */
+ DECLARE_BITMAP(wp_idx, CMN_MAX_XPS);
+
bool wide_sel;
enum cmn_filter_select filter_sel;
};
@@ -617,6 +623,17 @@ static unsigned int arm_cmn_get_index(u64 x[], unsigned int pos)
return (x[pos / 32] >> ((pos % 32) * 2)) & 3;
}
+static void arm_cmn_set_wp_idx(unsigned long *wp_idx, unsigned int pos, bool val)
+{
+ if (val)
+ set_bit(pos, wp_idx);
+}
+
+static unsigned int arm_cmn_get_wp_idx(unsigned long *wp_idx, unsigned int pos)
+{
+ return test_bit(pos, wp_idx);
+}
+
struct arm_cmn_event_attr {
struct device_attribute attr;
enum cmn_model model;
@@ -1336,12 +1353,37 @@ static const struct attribute_group *arm_cmn_attr_groups[] = {
NULL
};
-static int arm_cmn_wp_idx(struct perf_event *event)
+static int arm_cmn_find_free_wp_idx(struct arm_cmn_dtm *dtm,
+ struct perf_event *event)
+{
+ int wp_idx = CMN_EVENT_EVENTID(event);
+
+ if (dtm->wp_event[wp_idx] >= 0)
+ if (dtm->wp_event[++wp_idx] >= 0)
+ return -ENOSPC;
+
+ return wp_idx;
+}
+
+static int arm_cmn_get_assigned_wp_idx(struct perf_event *event,
+ struct arm_cmn_hw_event *hw,
+ unsigned int pos)
+{
+ return CMN_EVENT_EVENTID(event) + arm_cmn_get_wp_idx(hw->wp_idx, pos);
+}
+
+static void arm_cmn_claim_wp_idx(struct arm_cmn_dtm *dtm,
+ struct perf_event *event,
+ unsigned int dtc, int wp_idx,
+ unsigned int pos)
{
- return CMN_EVENT_EVENTID(event) + CMN_EVENT_WP_GRP(event);
+ struct arm_cmn_hw_event *hw = to_cmn_hw(event);
+
+ dtm->wp_event[wp_idx] = hw->dtc_idx[dtc];
+ arm_cmn_set_wp_idx(hw->wp_idx, pos, wp_idx - CMN_EVENT_EVENTID(event));
}
-static u32 arm_cmn_wp_config(struct perf_event *event)
+static u32 arm_cmn_wp_config(struct perf_event *event, int wp_idx)
{
u32 config;
u32 dev = CMN_EVENT_WP_DEV_SEL(event);
@@ -1351,6 +1393,10 @@ static u32 arm_cmn_wp_config(struct perf_event *event)
u32 combine = CMN_EVENT_WP_COMBINE(event);
bool is_cmn600 = to_cmn(event->pmu)->part == PART_CMN600;
+ /* CMN-600 supports only primary and secondary matching groups */
+ if (is_cmn600)
+ grp &= 1;
+
config = FIELD_PREP(CMN_DTM_WPn_CONFIG_WP_DEV_SEL, dev) |
FIELD_PREP(CMN_DTM_WPn_CONFIG_WP_CHN_SEL, chn) |
FIELD_PREP(CMN_DTM_WPn_CONFIG_WP_GRP, grp) |
@@ -1358,7 +1404,9 @@ static u32 arm_cmn_wp_config(struct perf_event *event)
if (exc)
config |= is_cmn600 ? CMN600_WPn_CONFIG_WP_EXCLUSIVE :
CMN_DTM_WPn_CONFIG_WP_EXCLUSIVE;
- if (combine && !grp)
+
+ /* wp_combine is available only on WP0 and WP2 */
+ if (combine && !(wp_idx & 0x1))
config |= is_cmn600 ? CMN600_WPn_CONFIG_WP_COMBINE :
CMN_DTM_WPn_CONFIG_WP_COMBINE;
return config;
@@ -1520,12 +1568,12 @@ static void arm_cmn_event_start(struct perf_event *event, int flags)
writeq_relaxed(CMN_CC_INIT, cmn->dtc[i].base + CMN_DT_PMCCNTR);
cmn->dtc[i].cc_active = true;
} else if (type == CMN_TYPE_WP) {
- int wp_idx = arm_cmn_wp_idx(event);
u64 val = CMN_EVENT_WP_VAL(event);
u64 mask = CMN_EVENT_WP_MASK(event);
for_each_hw_dn(hw, dn, i) {
void __iomem *base = dn->pmu_base + CMN_DTM_OFFSET(hw->dtm_offset);
+ int wp_idx = arm_cmn_get_assigned_wp_idx(event, hw, i);
writeq_relaxed(val, base + CMN_DTM_WPn_VAL(wp_idx));
writeq_relaxed(mask, base + CMN_DTM_WPn_MASK(wp_idx));
@@ -1550,10 +1598,9 @@ static void arm_cmn_event_stop(struct perf_event *event, int flags)
i = hw->dtc_idx[0];
cmn->dtc[i].cc_active = false;
} else if (type == CMN_TYPE_WP) {
- int wp_idx = arm_cmn_wp_idx(event);
-
for_each_hw_dn(hw, dn, i) {
void __iomem *base = dn->pmu_base + CMN_DTM_OFFSET(hw->dtm_offset);
+ int wp_idx = arm_cmn_get_assigned_wp_idx(event, hw, i);
writeq_relaxed(0, base + CMN_DTM_WPn_MASK(wp_idx));
writeq_relaxed(~0ULL, base + CMN_DTM_WPn_VAL(wp_idx));
@@ -1571,10 +1618,23 @@ struct arm_cmn_val {
u8 dtm_count[CMN_MAX_DTMS];
u8 occupid[CMN_MAX_DTMS][SEL_MAX];
u8 wp[CMN_MAX_DTMS][4];
+ u8 wp_combine[CMN_MAX_DTMS][2];
int dtc_count[CMN_MAX_DTCS];
bool cycles;
};
+static int arm_cmn_val_find_free_wp_config(struct perf_event *event,
+ struct arm_cmn_val *val, int dtm)
+{
+ int wp_idx = CMN_EVENT_EVENTID(event);
+
+ if (val->wp[dtm][wp_idx])
+ if (val->wp[dtm][++wp_idx])
+ return -ENOSPC;
+
+ return wp_idx;
+}
+
static void arm_cmn_val_add_event(struct arm_cmn *cmn, struct arm_cmn_val *val,
struct perf_event *event)
{
@@ -1606,8 +1666,9 @@ static void arm_cmn_val_add_event(struct arm_cmn *cmn, struct arm_cmn_val *val,
if (type != CMN_TYPE_WP)
continue;
- wp_idx = arm_cmn_wp_idx(event);
- val->wp[dtm][wp_idx] = CMN_EVENT_WP_COMBINE(event) + 1;
+ wp_idx = arm_cmn_val_find_free_wp_config(event, val, dtm);
+ val->wp[dtm][wp_idx] = 1;
+ val->wp_combine[dtm][wp_idx >> 1] += !!CMN_EVENT_WP_COMBINE(event);
}
}
@@ -1631,6 +1692,7 @@ static int arm_cmn_validate_group(struct arm_cmn *cmn, struct perf_event *event)
return -ENOMEM;
arm_cmn_val_add_event(cmn, val, leader);
+
for_each_sibling_event(sibling, leader)
arm_cmn_val_add_event(cmn, val, sibling);
@@ -1645,7 +1707,7 @@ static int arm_cmn_validate_group(struct arm_cmn *cmn, struct perf_event *event)
goto done;
for_each_hw_dn(hw, dn, i) {
- int wp_idx, wp_cmb, dtm = dn->dtm, sel = hw->filter_sel;
+ int wp_idx, dtm = dn->dtm, sel = hw->filter_sel;
if (val->dtm_count[dtm] == CMN_DTM_NUM_COUNTERS)
goto done;
@@ -1657,12 +1719,12 @@ static int arm_cmn_validate_group(struct arm_cmn *cmn, struct perf_event *event)
if (type != CMN_TYPE_WP)
continue;
- wp_idx = arm_cmn_wp_idx(event);
- if (val->wp[dtm][wp_idx])
+ wp_idx = arm_cmn_val_find_free_wp_config(event, val, dtm);
+ if (wp_idx < 0)
goto done;
- wp_cmb = val->wp[dtm][wp_idx ^ 1];
- if (wp_cmb && wp_cmb != CMN_EVENT_WP_COMBINE(event) + 1)
+ if (wp_idx & 1 &&
+ val->wp_combine[dtm][wp_idx >> 1] != !!CMN_EVENT_WP_COMBINE(event))
goto done;
}
@@ -1773,8 +1835,11 @@ static void arm_cmn_event_clear(struct arm_cmn *cmn, struct perf_event *event,
struct arm_cmn_dtm *dtm = &cmn->dtms[hw->dn[i].dtm] + hw->dtm_offset;
unsigned int dtm_idx = arm_cmn_get_index(hw->dtm_idx, i);
- if (type == CMN_TYPE_WP)
- dtm->wp_event[arm_cmn_wp_idx(event)] = -1;
+ if (type == CMN_TYPE_WP) {
+ int wp_idx = arm_cmn_get_assigned_wp_idx(event, hw, i);
+
+ dtm->wp_event[wp_idx] = -1;
+ }
if (hw->filter_sel > SEL_NONE)
hw->dn[i].occupid[hw->filter_sel].count--;
@@ -1783,6 +1848,7 @@ static void arm_cmn_event_clear(struct arm_cmn *cmn, struct perf_event *event,
writel_relaxed(dtm->pmu_config_low, dtm->base + CMN_DTM_PMU_CONFIG);
}
memset(hw->dtm_idx, 0, sizeof(hw->dtm_idx));
+ memset(hw->wp_idx, 0, sizeof(hw->wp_idx));
for_each_hw_dtc_idx(hw, j, idx)
cmn->dtc[j].counters[idx] = NULL;
@@ -1836,19 +1902,23 @@ static int arm_cmn_event_add(struct perf_event *event, int flags)
if (type == CMN_TYPE_XP) {
input_sel = CMN__PMEVCNT0_INPUT_SEL_XP + dtm_idx;
} else if (type == CMN_TYPE_WP) {
- int tmp, wp_idx = arm_cmn_wp_idx(event);
- u32 cfg = arm_cmn_wp_config(event);
+ int tmp, wp_idx;
+ u32 cfg;
- if (dtm->wp_event[wp_idx] >= 0)
+ wp_idx = arm_cmn_find_free_wp_idx(dtm, event);
+ if (wp_idx < 0)
goto free_dtms;
+ cfg = arm_cmn_wp_config(event, wp_idx);
+
tmp = dtm->wp_event[wp_idx ^ 1];
if (tmp >= 0 && CMN_EVENT_WP_COMBINE(event) !=
CMN_EVENT_WP_COMBINE(cmn->dtc[d].counters[tmp]))
goto free_dtms;
input_sel = CMN__PMEVCNT0_INPUT_SEL_WP + wp_idx;
- dtm->wp_event[wp_idx] = hw->dtc_idx[d];
+
+ arm_cmn_claim_wp_idx(dtm, event, d, wp_idx, i);
writel_relaxed(cfg, dtm->base + CMN_DTM_WPn_CONFIG(wp_idx));
} else {
struct arm_cmn_nodeid nid = arm_cmn_nid(cmn, dn->id);
diff --git a/drivers/perf/arm_cspmu/ampere_cspmu.c b/drivers/perf/arm_cspmu/ampere_cspmu.c
index f146a455e838..f72f5689923c 100644
--- a/drivers/perf/arm_cspmu/ampere_cspmu.c
+++ b/drivers/perf/arm_cspmu/ampere_cspmu.c
@@ -269,4 +269,5 @@ static void __exit ampere_cspmu_exit(void)
module_init(ampere_cspmu_init);
module_exit(ampere_cspmu_exit);
+MODULE_DESCRIPTION("Ampere SoC Performance Monitor Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/perf/arm_cspmu/arm_cspmu.c b/drivers/perf/arm_cspmu/arm_cspmu.c
index c318dc909767..2158a5975c90 100644
--- a/drivers/perf/arm_cspmu/arm_cspmu.c
+++ b/drivers/perf/arm_cspmu/arm_cspmu.c
@@ -1427,4 +1427,5 @@ EXPORT_SYMBOL_GPL(arm_cspmu_impl_unregister);
module_init(arm_cspmu_init);
module_exit(arm_cspmu_exit);
+MODULE_DESCRIPTION("ARM CoreSight Architecture Performance Monitor Driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/perf/arm_cspmu/nvidia_cspmu.c b/drivers/perf/arm_cspmu/nvidia_cspmu.c
index 5b84b701ad62..d0ef611240aa 100644
--- a/drivers/perf/arm_cspmu/nvidia_cspmu.c
+++ b/drivers/perf/arm_cspmu/nvidia_cspmu.c
@@ -417,4 +417,5 @@ static void __exit nvidia_cspmu_exit(void)
module_init(nvidia_cspmu_init);
module_exit(nvidia_cspmu_exit);
+MODULE_DESCRIPTION("NVIDIA Coresight Architecture Performance Monitor Driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/perf/arm_pmuv3.c b/drivers/perf/arm_pmuv3.c
index 23fa6c5da82c..cf0430c266a6 100644
--- a/drivers/perf/arm_pmuv3.c
+++ b/drivers/perf/arm_pmuv3.c
@@ -25,8 +25,6 @@
#include <linux/smp.h>
#include <linux/nmi.h>
-#include <asm/arm_pmuv3.h>
-
/* ARMv8 Cortex-A53 specific event types. */
#define ARMV8_A53_PERFCTR_PREF_LINEFILL 0xC2
@@ -338,6 +336,11 @@ static bool armv8pmu_event_want_user_access(struct perf_event *event)
return ATTR_CFG_GET_FLD(&event->attr, rdpmc);
}
+static u32 armv8pmu_event_get_threshold(struct perf_event_attr *attr)
+{
+ return ATTR_CFG_GET_FLD(attr, threshold);
+}
+
static u8 armv8pmu_event_threshold_control(struct perf_event_attr *attr)
{
u8 th_compare = ATTR_CFG_GET_FLD(attr, threshold_compare);
@@ -941,7 +944,8 @@ static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc,
unsigned long evtype = hwc->config_base & ARMV8_PMU_EVTYPE_EVENT;
/* Always prefer to place a cycle counter into the cycle counter. */
- if (evtype == ARMV8_PMUV3_PERFCTR_CPU_CYCLES) {
+ if ((evtype == ARMV8_PMUV3_PERFCTR_CPU_CYCLES) &&
+ !armv8pmu_event_get_threshold(&event->attr)) {
if (!test_and_set_bit(ARMV8_IDX_CYCLE_COUNTER, cpuc->used_mask))
return ARMV8_IDX_CYCLE_COUNTER;
else if (armv8pmu_event_is_64bit(event) &&
@@ -1033,13 +1037,13 @@ static int armv8pmu_set_event_filter(struct hw_perf_event *event,
* If FEAT_PMUv3_TH isn't implemented, then THWIDTH (threshold_max) will
* be 0 and will also trigger this check, preventing it from being used.
*/
- th = ATTR_CFG_GET_FLD(attr, threshold);
+ th = armv8pmu_event_get_threshold(attr);
if (th > threshold_max(cpu_pmu)) {
pr_debug("PMU event threshold exceeds max value\n");
return -EINVAL;
}
- if (IS_ENABLED(CONFIG_ARM64) && th) {
+ if (th) {
config_base |= FIELD_PREP(ARMV8_PMU_EVTYPE_TH, th);
config_base |= FIELD_PREP(ARMV8_PMU_EVTYPE_TC,
armv8pmu_event_threshold_control(attr));
@@ -1340,14 +1344,20 @@ PMUV3_INIT_SIMPLE(armv9_cortex_a520)
PMUV3_INIT_SIMPLE(armv9_cortex_a710)
PMUV3_INIT_SIMPLE(armv9_cortex_a715)
PMUV3_INIT_SIMPLE(armv9_cortex_a720)
+PMUV3_INIT_SIMPLE(armv9_cortex_a725)
PMUV3_INIT_SIMPLE(armv8_cortex_x1)
PMUV3_INIT_SIMPLE(armv9_cortex_x2)
PMUV3_INIT_SIMPLE(armv9_cortex_x3)
PMUV3_INIT_SIMPLE(armv9_cortex_x4)
+PMUV3_INIT_SIMPLE(armv9_cortex_x925)
PMUV3_INIT_SIMPLE(armv8_neoverse_e1)
PMUV3_INIT_SIMPLE(armv8_neoverse_n1)
PMUV3_INIT_SIMPLE(armv9_neoverse_n2)
+PMUV3_INIT_SIMPLE(armv9_neoverse_n3)
PMUV3_INIT_SIMPLE(armv8_neoverse_v1)
+PMUV3_INIT_SIMPLE(armv8_neoverse_v2)
+PMUV3_INIT_SIMPLE(armv8_neoverse_v3)
+PMUV3_INIT_SIMPLE(armv8_neoverse_v3ae)
PMUV3_INIT_SIMPLE(armv8_nvidia_carmel)
PMUV3_INIT_SIMPLE(armv8_nvidia_denver)
@@ -1379,14 +1389,20 @@ static const struct of_device_id armv8_pmu_of_device_ids[] = {
{.compatible = "arm,cortex-a710-pmu", .data = armv9_cortex_a710_pmu_init},
{.compatible = "arm,cortex-a715-pmu", .data = armv9_cortex_a715_pmu_init},
{.compatible = "arm,cortex-a720-pmu", .data = armv9_cortex_a720_pmu_init},
+ {.compatible = "arm,cortex-a725-pmu", .data = armv9_cortex_a725_pmu_init},
{.compatible = "arm,cortex-x1-pmu", .data = armv8_cortex_x1_pmu_init},
{.compatible = "arm,cortex-x2-pmu", .data = armv9_cortex_x2_pmu_init},
{.compatible = "arm,cortex-x3-pmu", .data = armv9_cortex_x3_pmu_init},
{.compatible = "arm,cortex-x4-pmu", .data = armv9_cortex_x4_pmu_init},
+ {.compatible = "arm,cortex-x925-pmu", .data = armv9_cortex_x925_pmu_init},
{.compatible = "arm,neoverse-e1-pmu", .data = armv8_neoverse_e1_pmu_init},
{.compatible = "arm,neoverse-n1-pmu", .data = armv8_neoverse_n1_pmu_init},
{.compatible = "arm,neoverse-n2-pmu", .data = armv9_neoverse_n2_pmu_init},
+ {.compatible = "arm,neoverse-n3-pmu", .data = armv9_neoverse_n3_pmu_init},
{.compatible = "arm,neoverse-v1-pmu", .data = armv8_neoverse_v1_pmu_init},
+ {.compatible = "arm,neoverse-v2-pmu", .data = armv8_neoverse_v2_pmu_init},
+ {.compatible = "arm,neoverse-v3-pmu", .data = armv8_neoverse_v3_pmu_init},
+ {.compatible = "arm,neoverse-v3ae-pmu", .data = armv8_neoverse_v3ae_pmu_init},
{.compatible = "cavium,thunder-pmu", .data = armv8_cavium_thunder_pmu_init},
{.compatible = "brcm,vulcan-pmu", .data = armv8_brcm_vulcan_pmu_init},
{.compatible = "nvidia,carmel-pmu", .data = armv8_nvidia_carmel_pmu_init},
diff --git a/arch/arm/kernel/perf_event_v6.c b/drivers/perf/arm_v6_pmu.c
index d9fd53841591..0bb685b4bac5 100644
--- a/arch/arm/kernel/perf_event_v6.c
+++ b/drivers/perf/arm_v6_pmu.c
@@ -31,8 +31,6 @@
* enable the interrupt.
*/
-#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K)
-
#include <asm/cputype.h>
#include <asm/irq_regs.h>
@@ -403,13 +401,6 @@ static int armv6_1136_pmu_init(struct arm_pmu *cpu_pmu)
return 0;
}
-static int armv6_1156_pmu_init(struct arm_pmu *cpu_pmu)
-{
- armv6pmu_init(cpu_pmu);
- cpu_pmu->name = "armv6_1156";
- return 0;
-}
-
static int armv6_1176_pmu_init(struct arm_pmu *cpu_pmu)
{
armv6pmu_init(cpu_pmu);
@@ -423,17 +414,9 @@ static const struct of_device_id armv6_pmu_of_device_ids[] = {
{ /* sentinel value */ }
};
-static const struct pmu_probe_info armv6_pmu_probe_table[] = {
- ARM_PMU_PROBE(ARM_CPU_PART_ARM1136, armv6_1136_pmu_init),
- ARM_PMU_PROBE(ARM_CPU_PART_ARM1156, armv6_1156_pmu_init),
- ARM_PMU_PROBE(ARM_CPU_PART_ARM1176, armv6_1176_pmu_init),
- { /* sentinel value */ }
-};
-
static int armv6_pmu_device_probe(struct platform_device *pdev)
{
- return arm_pmu_device_probe(pdev, armv6_pmu_of_device_ids,
- armv6_pmu_probe_table);
+ return arm_pmu_device_probe(pdev, armv6_pmu_of_device_ids, NULL);
}
static struct platform_driver armv6_pmu_driver = {
@@ -445,4 +428,3 @@ static struct platform_driver armv6_pmu_driver = {
};
builtin_platform_driver(armv6_pmu_driver);
-#endif /* CONFIG_CPU_V6 || CONFIG_CPU_V6K */
diff --git a/arch/arm/kernel/perf_event_v7.c b/drivers/perf/arm_v7_pmu.c
index a3322e2b3ea4..928ac3d626ed 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/drivers/perf/arm_v7_pmu.c
@@ -17,8 +17,6 @@
* counter and all 4 performance counters together can be reset separately.
*/
-#ifdef CONFIG_CPU_V7
-
#include <asm/cp15.h>
#include <asm/cputype.h>
#include <asm/irq_regs.h>
@@ -1979,17 +1977,9 @@ static const struct of_device_id armv7_pmu_of_device_ids[] = {
{},
};
-static const struct pmu_probe_info armv7_pmu_probe_table[] = {
- ARM_PMU_PROBE(ARM_CPU_PART_CORTEX_A8, armv7_a8_pmu_init),
- ARM_PMU_PROBE(ARM_CPU_PART_CORTEX_A9, armv7_a9_pmu_init),
- { /* sentinel value */ }
-};
-
-
static int armv7_pmu_device_probe(struct platform_device *pdev)
{
- return arm_pmu_device_probe(pdev, armv7_pmu_of_device_ids,
- armv7_pmu_probe_table);
+ return arm_pmu_device_probe(pdev, armv7_pmu_of_device_ids, NULL);
}
static struct platform_driver armv7_pmu_driver = {
@@ -2002,4 +1992,3 @@ static struct platform_driver armv7_pmu_driver = {
};
builtin_platform_driver(armv7_pmu_driver);
-#endif /* CONFIG_CPU_V7 */
diff --git a/arch/arm/kernel/perf_event_xscale.c b/drivers/perf/arm_xscale_pmu.c
index 7a2ba1c689a7..3d8b72d6b37f 100644
--- a/arch/arm/kernel/perf_event_xscale.c
+++ b/drivers/perf/arm_xscale_pmu.c
@@ -13,8 +13,6 @@
* PMU structures.
*/
-#ifdef CONFIG_CPU_XSCALE
-
#include <asm/cputype.h>
#include <asm/irq_regs.h>
@@ -745,4 +743,3 @@ static struct platform_driver xscale_pmu_driver = {
};
builtin_platform_driver(xscale_pmu_driver);
-#endif /* CONFIG_CPU_XSCALE */
diff --git a/drivers/perf/cxl_pmu.c b/drivers/perf/cxl_pmu.c
index 1f93a66eff5b..43d68b69e630 100644
--- a/drivers/perf/cxl_pmu.c
+++ b/drivers/perf/cxl_pmu.c
@@ -972,6 +972,7 @@ static __exit void cxl_pmu_exit(void)
cpuhp_remove_multi_state(cxl_pmu_cpuhp_state_num);
}
+MODULE_DESCRIPTION("CXL Performance Monitor Driver");
MODULE_LICENSE("GPL");
MODULE_IMPORT_NS(CXL);
module_init(cxl_pmu_init);
diff --git a/drivers/perf/fsl_imx8_ddr_perf.c b/drivers/perf/fsl_imx8_ddr_perf.c
index 1bbdb29743c4..746b92330ca7 100644
--- a/drivers/perf/fsl_imx8_ddr_perf.c
+++ b/drivers/perf/fsl_imx8_ddr_perf.c
@@ -850,4 +850,5 @@ static struct platform_driver imx_ddr_pmu_driver = {
};
module_platform_driver(imx_ddr_pmu_driver);
+MODULE_DESCRIPTION("Freescale i.MX8 DDR Performance Monitor Driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/perf/fsl_imx9_ddr_perf.c b/drivers/perf/fsl_imx9_ddr_perf.c
index 72c2d3074cde..69f920b1caf2 100644
--- a/drivers/perf/fsl_imx9_ddr_perf.c
+++ b/drivers/perf/fsl_imx9_ddr_perf.c
@@ -11,14 +11,24 @@
#include <linux/perf_event.h>
/* Performance monitor configuration */
-#define PMCFG1 0x00
-#define PMCFG1_RD_TRANS_FILT_EN BIT(31)
-#define PMCFG1_WR_TRANS_FILT_EN BIT(30)
-#define PMCFG1_RD_BT_FILT_EN BIT(29)
-#define PMCFG1_ID_MASK GENMASK(17, 0)
+#define PMCFG1 0x00
+#define MX93_PMCFG1_RD_TRANS_FILT_EN BIT(31)
+#define MX93_PMCFG1_WR_TRANS_FILT_EN BIT(30)
+#define MX93_PMCFG1_RD_BT_FILT_EN BIT(29)
+#define MX93_PMCFG1_ID_MASK GENMASK(17, 0)
-#define PMCFG2 0x04
-#define PMCFG2_ID GENMASK(17, 0)
+#define MX95_PMCFG1_WR_BEAT_FILT_EN BIT(31)
+#define MX95_PMCFG1_RD_BEAT_FILT_EN BIT(30)
+
+#define PMCFG2 0x04
+#define MX93_PMCFG2_ID GENMASK(17, 0)
+
+#define PMCFG3 0x08
+#define PMCFG4 0x0C
+#define PMCFG5 0x10
+#define PMCFG6 0x14
+#define MX95_PMCFG_ID_MASK GENMASK(9, 0)
+#define MX95_PMCFG_ID GENMASK(25, 16)
/* Global control register affects all counters and takes priority over local control registers */
#define PMGC0 0x40
@@ -41,6 +51,10 @@
#define NUM_COUNTERS 11
#define CYCLES_COUNTER 0
+#define CYCLES_EVENT_ID 0
+
+#define CONFIG_EVENT_MASK GENMASK(7, 0)
+#define CONFIG_COUNTER_MASK GENMASK(23, 16)
#define to_ddr_pmu(p) container_of(p, struct ddr_pmu, pmu)
@@ -71,8 +85,23 @@ static const struct imx_ddr_devtype_data imx93_devtype_data = {
.identifier = "imx93",
};
+static const struct imx_ddr_devtype_data imx95_devtype_data = {
+ .identifier = "imx95",
+};
+
+static inline bool is_imx93(struct ddr_pmu *pmu)
+{
+ return pmu->devtype_data == &imx93_devtype_data;
+}
+
+static inline bool is_imx95(struct ddr_pmu *pmu)
+{
+ return pmu->devtype_data == &imx95_devtype_data;
+}
+
static const struct of_device_id imx_ddr_pmu_dt_ids[] = {
- {.compatible = "fsl,imx93-ddr-pmu", .data = &imx93_devtype_data},
+ { .compatible = "fsl,imx93-ddr-pmu", .data = &imx93_devtype_data },
+ { .compatible = "fsl,imx95-ddr-pmu", .data = &imx95_devtype_data },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, imx_ddr_pmu_dt_ids);
@@ -118,21 +147,40 @@ static const struct attribute_group ddr_perf_cpumask_attr_group = {
.attrs = ddr_perf_cpumask_attrs,
};
+struct imx9_pmu_events_attr {
+ struct device_attribute attr;
+ u64 id;
+ const void *devtype_data;
+};
+
static ssize_t ddr_pmu_event_show(struct device *dev,
struct device_attribute *attr, char *page)
{
- struct perf_pmu_events_attr *pmu_attr;
+ struct imx9_pmu_events_attr *pmu_attr;
- pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
+ pmu_attr = container_of(attr, struct imx9_pmu_events_attr, attr);
return sysfs_emit(page, "event=0x%02llx\n", pmu_attr->id);
}
-#define IMX9_DDR_PMU_EVENT_ATTR(_name, _id) \
- (&((struct perf_pmu_events_attr[]) { \
+#define COUNTER_OFFSET_IN_EVENT 8
+#define ID(counter, id) ((counter << COUNTER_OFFSET_IN_EVENT) | id)
+
+#define DDR_PMU_EVENT_ATTR_COMM(_name, _id, _data) \
+ (&((struct imx9_pmu_events_attr[]) { \
{ .attr = __ATTR(_name, 0444, ddr_pmu_event_show, NULL),\
- .id = _id, } \
+ .id = _id, \
+ .devtype_data = _data, } \
})[0].attr.attr)
+#define IMX9_DDR_PMU_EVENT_ATTR(_name, _id) \
+ DDR_PMU_EVENT_ATTR_COMM(_name, _id, NULL)
+
+#define IMX93_DDR_PMU_EVENT_ATTR(_name, _id) \
+ DDR_PMU_EVENT_ATTR_COMM(_name, _id, &imx93_devtype_data)
+
+#define IMX95_DDR_PMU_EVENT_ATTR(_name, _id) \
+ DDR_PMU_EVENT_ATTR_COMM(_name, _id, &imx95_devtype_data)
+
static struct attribute *ddr_perf_events_attrs[] = {
/* counter0 cycles event */
IMX9_DDR_PMU_EVENT_ATTR(cycles, 0),
@@ -159,90 +207,114 @@ static struct attribute *ddr_perf_events_attrs[] = {
IMX9_DDR_PMU_EVENT_ATTR(ddrc_pm_29, 63),
/* counter1 specific events */
- IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_0, 64),
- IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_1, 65),
- IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_2, 66),
- IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_3, 67),
- IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_4, 68),
- IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_5, 69),
- IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_6, 70),
- IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_7, 71),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_0, ID(1, 64)),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_1, ID(1, 65)),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_2, ID(1, 66)),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_3, ID(1, 67)),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_4, ID(1, 68)),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_5, ID(1, 69)),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_6, ID(1, 70)),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_7, ID(1, 71)),
/* counter2 specific events */
- IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_0, 64),
- IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_1, 65),
- IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_2, 66),
- IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_3, 67),
- IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_4, 68),
- IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_5, 69),
- IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_6, 70),
- IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_7, 71),
- IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_empty, 72),
- IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pm_rd_trans_filt, 73),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_0, ID(2, 64)),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_1, ID(2, 65)),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_2, ID(2, 66)),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_3, ID(2, 67)),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_4, ID(2, 68)),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_5, ID(2, 69)),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_6, ID(2, 70)),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_7, ID(2, 71)),
+ IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_empty, ID(2, 72)),
+ IMX93_DDR_PMU_EVENT_ATTR(eddrtq_pm_rd_trans_filt, ID(2, 73)), /* imx93 specific*/
+ IMX95_DDR_PMU_EVENT_ATTR(eddrtq_pm_wr_beat_filt, ID(2, 73)), /* imx95 specific*/
/* counter3 specific events */
- IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_0, 64),
- IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_1, 65),
- IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_2, 66),
- IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_3, 67),
- IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_4, 68),
- IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_5, 69),
- IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_6, 70),
- IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_7, 71),
- IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_full, 72),
- IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pm_wr_trans_filt, 73),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_0, ID(3, 64)),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_1, ID(3, 65)),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_2, ID(3, 66)),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_3, ID(3, 67)),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_4, ID(3, 68)),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_5, ID(3, 69)),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_6, ID(3, 70)),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_7, ID(3, 71)),
+ IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_full, ID(3, 72)),
+ IMX93_DDR_PMU_EVENT_ATTR(eddrtq_pm_wr_trans_filt, ID(3, 73)), /* imx93 specific*/
+ IMX95_DDR_PMU_EVENT_ATTR(eddrtq_pm_rd_beat_filt2, ID(3, 73)), /* imx95 specific*/
/* counter4 specific events */
- IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_0, 64),
- IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_1, 65),
- IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_2, 66),
- IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_3, 67),
- IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_4, 68),
- IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_5, 69),
- IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_6, 70),
- IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_7, 71),
- IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_rdq2_rmw, 72),
- IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pm_rd_beat_filt, 73),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_0, ID(4, 64)),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_1, ID(4, 65)),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_2, ID(4, 66)),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_3, ID(4, 67)),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_4, ID(4, 68)),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_5, ID(4, 69)),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_6, ID(4, 70)),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_7, ID(4, 71)),
+ IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_rdq2_rmw, ID(4, 72)),
+ IMX93_DDR_PMU_EVENT_ATTR(eddrtq_pm_rd_beat_filt, ID(4, 73)), /* imx93 specific*/
+ IMX95_DDR_PMU_EVENT_ATTR(eddrtq_pm_rd_beat_filt1, ID(4, 73)), /* imx95 specific*/
/* counter5 specific events */
- IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_0, 64),
- IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_1, 65),
- IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_2, 66),
- IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_3, 67),
- IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_4, 68),
- IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_5, 69),
- IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_6, 70),
- IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_7, 71),
- IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_rdq1, 72),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_0, ID(5, 64)),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_1, ID(5, 65)),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_2, ID(5, 66)),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_3, ID(5, 67)),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_4, ID(5, 68)),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_5, ID(5, 69)),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_6, ID(5, 70)),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_7, ID(5, 71)),
+ IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_rdq1, ID(5, 72)),
+ IMX95_DDR_PMU_EVENT_ATTR(eddrtq_pm_rd_beat_filt0, ID(5, 73)), /* imx95 specific*/
/* counter6 specific events */
- IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_end_0, 64),
- IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_rdq2, 72),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_end_0, ID(6, 64)),
+ IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_rdq2, ID(6, 72)),
/* counter7 specific events */
- IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_1_2_full, 64),
- IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_wrq0, 65),
+ IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_1_2_full, ID(7, 64)),
+ IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_wrq0, ID(7, 65)),
/* counter8 specific events */
- IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_bias_switched, 64),
- IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_1_4_full, 65),
+ IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_bias_switched, ID(8, 64)),
+ IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_1_4_full, ID(8, 65)),
/* counter9 specific events */
- IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_wrq1, 65),
- IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_3_4_full, 66),
+ IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_wrq1, ID(9, 65)),
+ IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_3_4_full, ID(9, 66)),
/* counter10 specific events */
- IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_misc_mrk, 65),
- IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_rdq0, 66),
+ IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_misc_mrk, ID(10, 65)),
+ IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_rdq0, ID(10, 66)),
NULL,
};
+static umode_t
+ddr_perf_events_attrs_is_visible(struct kobject *kobj,
+ struct attribute *attr, int unused)
+{
+ struct pmu *pmu = dev_get_drvdata(kobj_to_dev(kobj));
+ struct ddr_pmu *ddr_pmu = to_ddr_pmu(pmu);
+ struct imx9_pmu_events_attr *eattr;
+
+ eattr = container_of(attr, typeof(*eattr), attr.attr);
+
+ if (!eattr->devtype_data)
+ return attr->mode;
+
+ if (eattr->devtype_data != ddr_pmu->devtype_data)
+ return 0;
+
+ return attr->mode;
+}
+
static const struct attribute_group ddr_perf_events_attr_group = {
.name = "events",
.attrs = ddr_perf_events_attrs,
+ .is_visible = ddr_perf_events_attrs_is_visible,
};
-PMU_FORMAT_ATTR(event, "config:0-7");
+PMU_FORMAT_ATTR(event, "config:0-7,16-23");
PMU_FORMAT_ATTR(counter, "config:8-15");
PMU_FORMAT_ATTR(axi_id, "config1:0-17");
PMU_FORMAT_ATTR(axi_mask, "config2:0-17");
@@ -339,8 +411,10 @@ static void ddr_perf_counter_local_config(struct ddr_pmu *pmu, int config,
int counter, bool enable)
{
u32 ctrl_a;
+ int event;
ctrl_a = readl_relaxed(pmu->base + PMLCA(counter));
+ event = FIELD_GET(CONFIG_EVENT_MASK, config);
if (enable) {
ctrl_a |= PMLCA_FC;
@@ -352,7 +426,7 @@ static void ddr_perf_counter_local_config(struct ddr_pmu *pmu, int config,
ctrl_a &= ~PMLCA_FC;
ctrl_a |= PMLCA_CE;
ctrl_a &= ~FIELD_PREP(PMLCA_EVENT, 0x7F);
- ctrl_a |= FIELD_PREP(PMLCA_EVENT, (config & 0x000000FF));
+ ctrl_a |= FIELD_PREP(PMLCA_EVENT, event);
writel(ctrl_a, pmu->base + PMLCA(counter));
} else {
/* Freeze counter. */
@@ -361,39 +435,79 @@ static void ddr_perf_counter_local_config(struct ddr_pmu *pmu, int config,
}
}
-static void ddr_perf_monitor_config(struct ddr_pmu *pmu, int cfg, int cfg1, int cfg2)
+static void imx93_ddr_perf_monitor_config(struct ddr_pmu *pmu, int event,
+ int counter, int axi_id, int axi_mask)
{
u32 pmcfg1, pmcfg2;
- int event, counter;
-
- event = cfg & 0x000000FF;
- counter = (cfg & 0x0000FF00) >> 8;
+ u32 mask[] = { MX93_PMCFG1_RD_TRANS_FILT_EN,
+ MX93_PMCFG1_WR_TRANS_FILT_EN,
+ MX93_PMCFG1_RD_BT_FILT_EN };
pmcfg1 = readl_relaxed(pmu->base + PMCFG1);
- if (counter == 2 && event == 73)
- pmcfg1 |= PMCFG1_RD_TRANS_FILT_EN;
- else if (counter == 2 && event != 73)
- pmcfg1 &= ~PMCFG1_RD_TRANS_FILT_EN;
+ if (counter >= 2 && counter <= 4)
+ pmcfg1 = event == 73 ? pmcfg1 | mask[counter - 2] :
+ pmcfg1 & ~mask[counter - 2];
- if (counter == 3 && event == 73)
- pmcfg1 |= PMCFG1_WR_TRANS_FILT_EN;
- else if (counter == 3 && event != 73)
- pmcfg1 &= ~PMCFG1_WR_TRANS_FILT_EN;
+ pmcfg1 &= ~FIELD_PREP(MX93_PMCFG1_ID_MASK, 0x3FFFF);
+ pmcfg1 |= FIELD_PREP(MX93_PMCFG1_ID_MASK, axi_mask);
+ writel_relaxed(pmcfg1, pmu->base + PMCFG1);
- if (counter == 4 && event == 73)
- pmcfg1 |= PMCFG1_RD_BT_FILT_EN;
- else if (counter == 4 && event != 73)
- pmcfg1 &= ~PMCFG1_RD_BT_FILT_EN;
+ pmcfg2 = readl_relaxed(pmu->base + PMCFG2);
+ pmcfg2 &= ~FIELD_PREP(MX93_PMCFG2_ID, 0x3FFFF);
+ pmcfg2 |= FIELD_PREP(MX93_PMCFG2_ID, axi_id);
+ writel_relaxed(pmcfg2, pmu->base + PMCFG2);
+}
- pmcfg1 &= ~FIELD_PREP(PMCFG1_ID_MASK, 0x3FFFF);
- pmcfg1 |= FIELD_PREP(PMCFG1_ID_MASK, cfg2);
- writel(pmcfg1, pmu->base + PMCFG1);
+static void imx95_ddr_perf_monitor_config(struct ddr_pmu *pmu, int event,
+ int counter, int axi_id, int axi_mask)
+{
+ u32 pmcfg1, pmcfg, offset = 0;
- pmcfg2 = readl_relaxed(pmu->base + PMCFG2);
- pmcfg2 &= ~FIELD_PREP(PMCFG2_ID, 0x3FFFF);
- pmcfg2 |= FIELD_PREP(PMCFG2_ID, cfg1);
- writel(pmcfg2, pmu->base + PMCFG2);
+ pmcfg1 = readl_relaxed(pmu->base + PMCFG1);
+
+ if (event == 73) {
+ switch (counter) {
+ case 2:
+ pmcfg1 |= MX95_PMCFG1_WR_BEAT_FILT_EN;
+ offset = PMCFG3;
+ break;
+ case 3:
+ pmcfg1 |= MX95_PMCFG1_RD_BEAT_FILT_EN;
+ offset = PMCFG4;
+ break;
+ case 4:
+ pmcfg1 |= MX95_PMCFG1_RD_BEAT_FILT_EN;
+ offset = PMCFG5;
+ break;
+ case 5:
+ pmcfg1 |= MX95_PMCFG1_RD_BEAT_FILT_EN;
+ offset = PMCFG6;
+ break;
+ }
+ } else {
+ switch (counter) {
+ case 2:
+ pmcfg1 &= ~MX95_PMCFG1_WR_BEAT_FILT_EN;
+ break;
+ case 3:
+ case 4:
+ case 5:
+ pmcfg1 &= ~MX95_PMCFG1_RD_BEAT_FILT_EN;
+ break;
+ }
+ }
+
+ writel_relaxed(pmcfg1, pmu->base + PMCFG1);
+
+ if (offset) {
+ pmcfg = readl_relaxed(pmu->base + offset);
+ pmcfg &= ~(FIELD_PREP(MX95_PMCFG_ID_MASK, 0x3FF) |
+ FIELD_PREP(MX95_PMCFG_ID, 0x3FF));
+ pmcfg |= (FIELD_PREP(MX95_PMCFG_ID_MASK, axi_mask) |
+ FIELD_PREP(MX95_PMCFG_ID, axi_id));
+ writel_relaxed(pmcfg, pmu->base + offset);
+ }
}
static void ddr_perf_event_update(struct perf_event *event)
@@ -460,6 +574,28 @@ static void ddr_perf_event_start(struct perf_event *event, int flags)
hwc->state = 0;
}
+static int ddr_perf_alloc_counter(struct ddr_pmu *pmu, int event, int counter)
+{
+ int i;
+
+ if (event == CYCLES_EVENT_ID) {
+ // Cycles counter is dedicated for cycle event.
+ if (pmu->events[CYCLES_COUNTER] == NULL)
+ return CYCLES_COUNTER;
+ } else if (counter != 0) {
+ // Counter specific event use specific counter.
+ if (pmu->events[counter] == NULL)
+ return counter;
+ } else {
+ // Auto allocate counter for referene event.
+ for (i = 1; i < NUM_COUNTERS; i++)
+ if (pmu->events[i] == NULL)
+ return i;
+ }
+
+ return -ENOENT;
+}
+
static int ddr_perf_event_add(struct perf_event *event, int flags)
{
struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
@@ -467,21 +603,33 @@ static int ddr_perf_event_add(struct perf_event *event, int flags)
int cfg = event->attr.config;
int cfg1 = event->attr.config1;
int cfg2 = event->attr.config2;
- int counter;
+ int event_id, counter;
- counter = (cfg & 0x0000FF00) >> 8;
+ event_id = FIELD_GET(CONFIG_EVENT_MASK, cfg);
+ counter = FIELD_GET(CONFIG_COUNTER_MASK, cfg);
+
+ counter = ddr_perf_alloc_counter(pmu, event_id, counter);
+ if (counter < 0) {
+ dev_dbg(pmu->dev, "There are not enough counters\n");
+ return -EOPNOTSUPP;
+ }
pmu->events[counter] = event;
pmu->active_events++;
hwc->idx = counter;
hwc->state |= PERF_HES_STOPPED;
+ if (is_imx93(pmu))
+ /* read trans, write trans, read beat */
+ imx93_ddr_perf_monitor_config(pmu, event_id, counter, cfg1, cfg2);
+
+ if (is_imx95(pmu))
+ /* write beat, read beat2, read beat1, read beat */
+ imx95_ddr_perf_monitor_config(pmu, event_id, counter, cfg1, cfg2);
+
if (flags & PERF_EF_START)
ddr_perf_event_start(event, flags);
- /* read trans, write trans, read beat */
- ddr_perf_monitor_config(pmu, cfg, cfg1, cfg2);
-
return 0;
}
@@ -501,9 +649,11 @@ static void ddr_perf_event_del(struct perf_event *event, int flags)
{
struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
+ int counter = hwc->idx;
ddr_perf_event_stop(event, PERF_EF_UPDATE);
+ pmu->events[counter] = NULL;
pmu->active_events--;
hwc->idx = -1;
}
diff --git a/drivers/perf/hisilicon/hisi_uncore_pmu.c b/drivers/perf/hisilicon/hisi_uncore_pmu.c
index 6392cbedcd06..918cdc31de57 100644
--- a/drivers/perf/hisilicon/hisi_uncore_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_pmu.c
@@ -537,4 +537,5 @@ void hisi_pmu_init(struct hisi_pmu *hisi_pmu, struct module *module)
}
EXPORT_SYMBOL_GPL(hisi_pmu_init);
+MODULE_DESCRIPTION("HiSilicon SoC uncore Performance Monitor driver framework");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/perf/marvell_cn10k_ddr_pmu.c b/drivers/perf/marvell_cn10k_ddr_pmu.c
index e2abca188dbe..94f1ebcd2a27 100644
--- a/drivers/perf/marvell_cn10k_ddr_pmu.c
+++ b/drivers/perf/marvell_cn10k_ddr_pmu.c
@@ -763,4 +763,5 @@ module_init(cn10k_ddr_pmu_init);
module_exit(cn10k_ddr_pmu_exit);
MODULE_AUTHOR("Bharat Bhushan <bbhushan2@marvell.com>");
+MODULE_DESCRIPTION("Marvell CN10K DRAM Subsystem (DSS) Performance Monitor Driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/perf/riscv_pmu.c b/drivers/perf/riscv_pmu.c
index 78c490e0505a..0a02e85a8951 100644
--- a/drivers/perf/riscv_pmu.c
+++ b/drivers/perf/riscv_pmu.c
@@ -167,7 +167,7 @@ u64 riscv_pmu_event_update(struct perf_event *event)
unsigned long cmask;
u64 oldval, delta;
- if (!rvpmu->ctr_read)
+ if (!rvpmu->ctr_read || (hwc->state & PERF_HES_UPTODATE))
return 0;
cmask = riscv_pmu_ctr_get_width_mask(event);
diff --git a/drivers/perf/riscv_pmu_sbi.c b/drivers/perf/riscv_pmu_sbi.c
index a2e4005e1fd0..4e842dcedfba 100644
--- a/drivers/perf/riscv_pmu_sbi.c
+++ b/drivers/perf/riscv_pmu_sbi.c
@@ -20,6 +20,7 @@
#include <linux/cpu_pm.h>
#include <linux/sched/clock.h>
#include <linux/soc/andes/irq.h>
+#include <linux/workqueue.h>
#include <asm/errata_list.h>
#include <asm/sbi.h>
@@ -114,7 +115,7 @@ struct sbi_pmu_event_data {
};
};
-static const struct sbi_pmu_event_data pmu_hw_event_map[] = {
+static struct sbi_pmu_event_data pmu_hw_event_map[] = {
[PERF_COUNT_HW_CPU_CYCLES] = {.hw_gen_event = {
SBI_PMU_HW_CPU_CYCLES,
SBI_PMU_EVENT_TYPE_HW, 0}},
@@ -148,7 +149,7 @@ static const struct sbi_pmu_event_data pmu_hw_event_map[] = {
};
#define C(x) PERF_COUNT_HW_CACHE_##x
-static const struct sbi_pmu_event_data pmu_cache_event_map[PERF_COUNT_HW_CACHE_MAX]
+static struct sbi_pmu_event_data pmu_cache_event_map[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
[C(L1D)] = {
@@ -293,6 +294,34 @@ static const struct sbi_pmu_event_data pmu_cache_event_map[PERF_COUNT_HW_CACHE_M
},
};
+static void pmu_sbi_check_event(struct sbi_pmu_event_data *edata)
+{
+ struct sbiret ret;
+
+ ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_CFG_MATCH,
+ 0, cmask, 0, edata->event_idx, 0, 0);
+ if (!ret.error) {
+ sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP,
+ ret.value, 0x1, SBI_PMU_STOP_FLAG_RESET, 0, 0, 0);
+ } else if (ret.error == SBI_ERR_NOT_SUPPORTED) {
+ /* This event cannot be monitored by any counter */
+ edata->event_idx = -EINVAL;
+ }
+}
+
+static void pmu_sbi_check_std_events(struct work_struct *work)
+{
+ for (int i = 0; i < ARRAY_SIZE(pmu_hw_event_map); i++)
+ pmu_sbi_check_event(&pmu_hw_event_map[i]);
+
+ for (int i = 0; i < ARRAY_SIZE(pmu_cache_event_map); i++)
+ for (int j = 0; j < ARRAY_SIZE(pmu_cache_event_map[i]); j++)
+ for (int k = 0; k < ARRAY_SIZE(pmu_cache_event_map[i][j]); k++)
+ pmu_sbi_check_event(&pmu_cache_event_map[i][j][k]);
+}
+
+static DECLARE_WORK(check_std_events_work, pmu_sbi_check_std_events);
+
static int pmu_sbi_ctr_get_width(int idx)
{
return pmu_ctr_list[idx].width;
@@ -478,6 +507,12 @@ static int pmu_sbi_event_map(struct perf_event *event, u64 *econfig)
u64 raw_config_val;
int ret;
+ /*
+ * Ensure we are finished checking standard hardware events for
+ * validity before allowing userspace to configure any events.
+ */
+ flush_work(&check_std_events_work);
+
switch (type) {
case PERF_TYPE_HARDWARE:
if (config >= PERF_COUNT_HW_MAX)
@@ -762,7 +797,7 @@ static inline void pmu_sbi_stop_all(struct riscv_pmu *pmu)
* which may include counters that are not enabled yet.
*/
sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP,
- 0, pmu->cmask, 0, 0, 0, 0);
+ 0, pmu->cmask, SBI_PMU_STOP_FLAG_RESET, 0, 0, 0);
}
static inline void pmu_sbi_stop_hw_ctrs(struct riscv_pmu *pmu)
@@ -1359,6 +1394,9 @@ static int pmu_sbi_device_probe(struct platform_device *pdev)
if (ret)
goto out_unregister;
+ /* Asynchronously check which standard events are available */
+ schedule_work(&check_std_events_work);
+
return 0;
out_unregister:
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
index 7f999e8a433d..7b00945f7191 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
@@ -187,6 +187,31 @@ static const unsigned int qmp_v6_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
[QPHY_TX_TRANSCEIVER_BIAS_EN] = QSERDES_V6_TX_TRANSCEIVER_BIAS_EN,
};
+static const unsigned int qmp_v6_n4_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
+ [QPHY_SW_RESET] = QPHY_V6_N4_PCS_SW_RESET,
+ [QPHY_START_CTRL] = QPHY_V6_N4_PCS_START_CONTROL,
+ [QPHY_PCS_STATUS] = QPHY_V6_N4_PCS_PCS_STATUS1,
+ [QPHY_PCS_POWER_DOWN_CONTROL] = QPHY_V6_N4_PCS_POWER_DOWN_CONTROL,
+
+ /* In PCS_USB */
+ [QPHY_PCS_AUTONOMOUS_MODE_CTRL] = QPHY_V6_PCS_USB3_AUTONOMOUS_MODE_CTRL,
+ [QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = QPHY_V6_PCS_USB3_LFPS_RXTERM_IRQ_CLEAR,
+
+ [QPHY_COM_RESETSM_CNTRL] = QSERDES_V6_COM_RESETSM_CNTRL,
+ [QPHY_COM_C_READY_STATUS] = QSERDES_V6_COM_C_READY_STATUS,
+ [QPHY_COM_CMN_STATUS] = QSERDES_V6_COM_CMN_STATUS,
+ [QPHY_COM_BIAS_EN_CLKBUFLR_EN] = QSERDES_V6_COM_PLL_BIAS_EN_CLK_BUFLR_EN,
+
+ [QPHY_DP_PHY_STATUS] = QSERDES_V6_DP_PHY_STATUS,
+ [QPHY_DP_PHY_VCO_DIV] = QSERDES_V6_DP_PHY_VCO_DIV,
+
+ [QPHY_TX_TX_POL_INV] = QSERDES_V6_N4_TX_TX_POL_INV,
+ [QPHY_TX_TX_DRV_LVL] = QSERDES_V6_N4_TX_TX_DRV_LVL,
+ [QPHY_TX_TX_EMP_POST1_LVL] = QSERDES_V6_N4_TX_TX_EMP_POST1_LVL,
+ [QPHY_TX_HIGHZ_DRVR_EN] = QSERDES_V6_N4_TX_HIGHZ_DRVR_EN,
+ [QPHY_TX_TRANSCEIVER_BIAS_EN] = QSERDES_V6_N4_TX_TRANSCEIVER_BIAS_EN,
+};
+
static const struct qmp_phy_init_tbl qmp_v3_usb3_serdes_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_IVCO, 0x07),
QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_EN_SEL, 0x14),
@@ -997,6 +1022,31 @@ static const struct qmp_phy_init_tbl qmp_v6_dp_serdes_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V6_COM_CORE_CLK_EN, 0x0f),
};
+static const struct qmp_phy_init_tbl qmp_v6_n4_dp_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SVS_MODE_CLK_SEL, 0x15),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SYSCLK_EN_SEL, 0x3b),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SYS_CLK_CTRL, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CLK_ENABLE1, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SYSCLK_BUF_ENABLE, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CLK_SELECT, 0x30),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_IVCO, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_CCTRL_MODE0, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CP_CTRL_MODE0, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE0, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START1_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START2_MODE0, 0xc0),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CMN_CONFIG_1, 0x12),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_INTEGLOOP_GAIN0_MODE0, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_VCO_TUNE_MAP, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_BG_TIMER, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_CORE_CLK_DIV_MODE0, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_VCO_TUNE_CTRL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_BIAS_EN_CLK_BUFLR_EN, 0x17),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CORE_CLK_EN, 0x0f),
+};
+
static const struct qmp_phy_init_tbl qmp_v6_dp_tx_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V6_TX_VMODE_CTRL1, 0x40),
QMP_PHY_INIT_CFG(QSERDES_V6_TX_PRE_STALL_LDO_BOOST_EN, 0x30),
@@ -1011,6 +1061,19 @@ static const struct qmp_phy_init_tbl qmp_v6_dp_tx_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V6_TX_TX_BAND, 0x4),
};
+static const struct qmp_phy_init_tbl qmp_v6_n4_dp_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V6_N4_TX_VMODE_CTRL1, 0x40),
+ QMP_PHY_INIT_CFG(QSERDES_V6_N4_TX_PRE_STALL_LDO_BOOST_EN, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V6_N4_TX_INTERFACE_SELECT, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V6_N4_TX_CLKBUF_ENABLE, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V6_N4_TX_RESET_TSYNC_EN, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V6_N4_TX_TRAN_DRVR_EMP_EN, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V6_N4_TX_PARRATE_REC_DETECT_IDLE_EN, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V6_N4_TX_RES_CODE_LANE_OFFSET_TX, 0x11),
+ QMP_PHY_INIT_CFG(QSERDES_V6_N4_TX_RES_CODE_LANE_OFFSET_RX, 0x11),
+ QMP_PHY_INIT_CFG(QSERDES_V6_N4_TX_TX_BAND, 0x1),
+};
+
static const struct qmp_phy_init_tbl qmp_v6_dp_serdes_tbl_rbr[] = {
QMP_PHY_INIT_CFG(QSERDES_V6_COM_HSCLK_SEL_1, 0x05),
QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE0, 0x34),
@@ -1059,6 +1122,74 @@ static const struct qmp_phy_init_tbl qmp_v6_dp_serdes_tbl_hbr3[] = {
QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x0c),
};
+static const struct qmp_phy_init_tbl qmp_v6_n4_dp_serdes_tbl_rbr[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_HSCLK_SEL_1, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE0, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP_EN, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START3_MODE0, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP1_MODE0, 0x37),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP2_MODE0, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0x71),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_EN_CENTER, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_ADJ_PER1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_PER1, 0x6b),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_PER2, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE1_MODE0, 0x92),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE2_MODE0, 0x01),
+};
+
+static const struct qmp_phy_init_tbl qmp_v6_n4_dp_serdes_tbl_hbr[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_HSCLK_SEL_1, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE0, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP_EN, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START3_MODE0, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP1_MODE0, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP2_MODE0, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0x71),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_EN_CENTER, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_ADJ_PER1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_PER1, 0x6b),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_PER2, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE1_MODE0, 0x92),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE2_MODE0, 0x01),
+};
+
+static const struct qmp_phy_init_tbl qmp_v6_n4_dp_serdes_tbl_hbr2[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_HSCLK_SEL_1, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE0, 0x46),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP_EN, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START3_MODE0, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP1_MODE0, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP2_MODE0, 0x0e),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0x97),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x10),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_EN_CENTER, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_ADJ_PER1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_PER1, 0x6b),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_PER2, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE1_MODE0, 0x18),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE2_MODE0, 0x02),
+};
+
+static const struct qmp_phy_init_tbl qmp_v6_n4_dp_serdes_tbl_hbr3[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_HSCLK_SEL_1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE0, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP_EN, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START3_MODE0, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP1_MODE0, 0x17),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP2_MODE0, 0x15),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0x71),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_EN_CENTER, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_ADJ_PER1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_PER1, 0x6b),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_PER2, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE1_MODE0, 0x92),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE2_MODE0, 0x01),
+};
+
static const struct qmp_phy_init_tbl sc8280xp_usb43dp_serdes_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_EN_CENTER, 0x01),
QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_PER1, 0x31),
@@ -1273,20 +1404,20 @@ static const struct qmp_phy_init_tbl x1e80100_usb43dp_rx_tbl[] = {
};
static const struct qmp_phy_init_tbl x1e80100_usb43dp_pcs_tbl[] = {
- QMP_PHY_INIT_CFG(QPHY_V6_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7),
- QMP_PHY_INIT_CFG(QPHY_V6_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03),
- QMP_PHY_INIT_CFG(QPHY_V6_PCS_LOCK_DETECT_CONFIG1, 0xc4),
- QMP_PHY_INIT_CFG(QPHY_V6_PCS_LOCK_DETECT_CONFIG2, 0x89),
- QMP_PHY_INIT_CFG(QPHY_V6_PCS_LOCK_DETECT_CONFIG3, 0x20),
- QMP_PHY_INIT_CFG(QPHY_V6_PCS_LOCK_DETECT_CONFIG6, 0x13),
- QMP_PHY_INIT_CFG(QPHY_V6_PCS_REFGEN_REQ_CONFIG1, 0x21),
- QMP_PHY_INIT_CFG(QPHY_V6_PCS_RX_SIGDET_LVL, 0x55),
- QMP_PHY_INIT_CFG(QPHY_V6_PCS_CDR_RESET_TIME, 0x0a),
- QMP_PHY_INIT_CFG(QPHY_V6_PCS_ALIGN_DETECT_CONFIG1, 0xd4),
- QMP_PHY_INIT_CFG(QPHY_V6_PCS_ALIGN_DETECT_CONFIG2, 0x30),
- QMP_PHY_INIT_CFG(QPHY_V6_PCS_PCS_TX_RX_CONFIG, 0x0c),
- QMP_PHY_INIT_CFG(QPHY_V6_PCS_EQ_CONFIG1, 0x4b),
- QMP_PHY_INIT_CFG(QPHY_V6_PCS_EQ_CONFIG5, 0x10),
+ QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7),
+ QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03),
+ QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_LOCK_DETECT_CONFIG1, 0xc4),
+ QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_LOCK_DETECT_CONFIG2, 0x89),
+ QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_LOCK_DETECT_CONFIG3, 0x20),
+ QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_LOCK_DETECT_CONFIG6, 0x13),
+ QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_REFGEN_REQ_CONFIG1, 0x21),
+ QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_RX_SIGDET_LVL, 0x55),
+ QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_RX_CONFIG, 0x0a),
+ QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_ALIGN_DETECT_CONFIG1, 0xd4),
+ QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_ALIGN_DETECT_CONFIG2, 0x30),
+ QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_PCS_TX_RX_CONFIG, 0x0c),
+ QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_EQ_CONFIG1, 0x4b),
+ QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_EQ_CONFIG5, 0x10),
};
static const struct qmp_phy_init_tbl x1e80100_usb43dp_pcs_usb_tbl[] = {
@@ -1794,22 +1925,22 @@ static const struct qmp_phy_cfg x1e80100_usb3dpphy_cfg = {
.pcs_usb_tbl = x1e80100_usb43dp_pcs_usb_tbl,
.pcs_usb_tbl_num = ARRAY_SIZE(x1e80100_usb43dp_pcs_usb_tbl),
- .dp_serdes_tbl = qmp_v6_dp_serdes_tbl,
- .dp_serdes_tbl_num = ARRAY_SIZE(qmp_v6_dp_serdes_tbl),
- .dp_tx_tbl = qmp_v6_dp_tx_tbl,
- .dp_tx_tbl_num = ARRAY_SIZE(qmp_v6_dp_tx_tbl),
+ .dp_serdes_tbl = qmp_v6_n4_dp_serdes_tbl,
+ .dp_serdes_tbl_num = ARRAY_SIZE(qmp_v6_n4_dp_serdes_tbl),
+ .dp_tx_tbl = qmp_v6_n4_dp_tx_tbl,
+ .dp_tx_tbl_num = ARRAY_SIZE(qmp_v6_n4_dp_tx_tbl),
- .serdes_tbl_rbr = qmp_v6_dp_serdes_tbl_rbr,
- .serdes_tbl_rbr_num = ARRAY_SIZE(qmp_v6_dp_serdes_tbl_rbr),
- .serdes_tbl_hbr = qmp_v6_dp_serdes_tbl_hbr,
- .serdes_tbl_hbr_num = ARRAY_SIZE(qmp_v6_dp_serdes_tbl_hbr),
- .serdes_tbl_hbr2 = qmp_v6_dp_serdes_tbl_hbr2,
- .serdes_tbl_hbr2_num = ARRAY_SIZE(qmp_v6_dp_serdes_tbl_hbr2),
- .serdes_tbl_hbr3 = qmp_v6_dp_serdes_tbl_hbr3,
- .serdes_tbl_hbr3_num = ARRAY_SIZE(qmp_v6_dp_serdes_tbl_hbr3),
+ .serdes_tbl_rbr = qmp_v6_n4_dp_serdes_tbl_rbr,
+ .serdes_tbl_rbr_num = ARRAY_SIZE(qmp_v6_n4_dp_serdes_tbl_rbr),
+ .serdes_tbl_hbr = qmp_v6_n4_dp_serdes_tbl_hbr,
+ .serdes_tbl_hbr_num = ARRAY_SIZE(qmp_v6_n4_dp_serdes_tbl_hbr),
+ .serdes_tbl_hbr2 = qmp_v6_n4_dp_serdes_tbl_hbr2,
+ .serdes_tbl_hbr2_num = ARRAY_SIZE(qmp_v6_n4_dp_serdes_tbl_hbr2),
+ .serdes_tbl_hbr3 = qmp_v6_n4_dp_serdes_tbl_hbr3,
+ .serdes_tbl_hbr3_num = ARRAY_SIZE(qmp_v6_n4_dp_serdes_tbl_hbr3),
- .swing_hbr_rbr = &qmp_dp_v5_voltage_swing_hbr_rbr,
- .pre_emphasis_hbr_rbr = &qmp_dp_v5_pre_emphasis_hbr_rbr,
+ .swing_hbr_rbr = &qmp_dp_v6_voltage_swing_hbr_rbr,
+ .pre_emphasis_hbr_rbr = &qmp_dp_v6_pre_emphasis_hbr_rbr,
.swing_hbr3_hbr2 = &qmp_dp_v5_voltage_swing_hbr3_hbr2,
.pre_emphasis_hbr3_hbr2 = &qmp_dp_v5_pre_emphasis_hbr3_hbr2,
@@ -1822,7 +1953,7 @@ static const struct qmp_phy_cfg x1e80100_usb3dpphy_cfg = {
.num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
.vreg_list = qmp_phy_vreg_l,
.num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
- .regs = qmp_v45_usb3phy_regs_layout,
+ .regs = qmp_v6_n4_usb3phy_regs_layout,
};
static const struct qmp_phy_cfg sm6350_usb3dpphy_cfg = {
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v6-n4.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v6-n4.h
new file mode 100644
index 000000000000..b3024714dab4
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v6-n4.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef QCOM_PHY_QMP_PCS_V6_N4_H_
+#define QCOM_PHY_QMP_PCS_V6_N4_H_
+
+/* Only for QMP V6 N4 PHY - USB/PCIe PCS registers */
+#define QPHY_V6_N4_PCS_SW_RESET 0x000
+#define QPHY_V6_N4_PCS_PCS_STATUS1 0x014
+#define QPHY_V6_N4_PCS_POWER_DOWN_CONTROL 0x040
+#define QPHY_V6_N4_PCS_START_CONTROL 0x044
+#define QPHY_V6_N4_PCS_POWER_STATE_CONFIG1 0x090
+#define QPHY_V6_N4_PCS_LOCK_DETECT_CONFIG1 0x0c4
+#define QPHY_V6_N4_PCS_LOCK_DETECT_CONFIG2 0x0c8
+#define QPHY_V6_N4_PCS_LOCK_DETECT_CONFIG3 0x0cc
+#define QPHY_V6_N4_PCS_LOCK_DETECT_CONFIG6 0x0d8
+#define QPHY_V6_N4_PCS_REFGEN_REQ_CONFIG1 0x0dc
+#define QPHY_V6_N4_PCS_RX_SIGDET_LVL 0x188
+#define QPHY_V6_N4_PCS_RCVR_DTCT_DLY_P1U2_L 0x190
+#define QPHY_V6_N4_PCS_RCVR_DTCT_DLY_P1U2_H 0x194
+#define QPHY_V6_N4_PCS_RATE_SLEW_CNTRL1 0x198
+#define QPHY_V6_N4_PCS_RX_CONFIG 0x1b0
+#define QPHY_V6_N4_PCS_ALIGN_DETECT_CONFIG1 0x1c0
+#define QPHY_V6_N4_PCS_ALIGN_DETECT_CONFIG2 0x1c4
+#define QPHY_V6_N4_PCS_PCS_TX_RX_CONFIG 0x1d0
+#define QPHY_V6_N4_PCS_EQ_CONFIG1 0x1dc
+#define QPHY_V6_N4_PCS_EQ_CONFIG2 0x1e0
+#define QPHY_V6_N4_PCS_EQ_CONFIG5 0x1ec
+
+#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v6_n4.h b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v6_n4.h
index a814ad11af07..d37cc0d4fd36 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v6_n4.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v6_n4.h
@@ -6,11 +6,24 @@
#ifndef QCOM_PHY_QMP_QSERDES_TXRX_V6_N4_H_
#define QCOM_PHY_QMP_QSERDES_TXRX_V6_N4_H_
+#define QSERDES_V6_N4_TX_CLKBUF_ENABLE 0x08
+#define QSERDES_V6_N4_TX_TX_EMP_POST1_LVL 0x0c
+#define QSERDES_V6_N4_TX_TX_DRV_LVL 0x14
+#define QSERDES_V6_N4_TX_RESET_TSYNC_EN 0x1c
+#define QSERDES_V6_N4_TX_PRE_STALL_LDO_BOOST_EN 0x20
#define QSERDES_V6_N4_TX_RES_CODE_LANE_OFFSET_TX 0x30
#define QSERDES_V6_N4_TX_RES_CODE_LANE_OFFSET_RX 0x34
+#define QSERDES_V6_N4_TX_TRANSCEIVER_BIAS_EN 0x48
+#define QSERDES_V6_N4_TX_HIGHZ_DRVR_EN 0x4c
+#define QSERDES_V6_N4_TX_TX_POL_INV 0x50
+#define QSERDES_V6_N4_TX_PARRATE_REC_DETECT_IDLE_EN 0x54
#define QSERDES_V6_N4_TX_LANE_MODE_1 0x78
#define QSERDES_V6_N4_TX_LANE_MODE_2 0x7c
#define QSERDES_V6_N4_TX_LANE_MODE_3 0x80
+#define QSERDES_V6_N4_TX_TRAN_DRVR_EMP_EN 0xac
+#define QSERDES_V6_N4_TX_TX_BAND 0xd8
+#define QSERDES_V6_N4_TX_INTERFACE_SELECT 0xe4
+#define QSERDES_V6_N4_TX_VMODE_CTRL1 0xb0
#define QSERDES_V6_N4_RX_UCDR_FO_GAIN_RATE2 0x8
#define QSERDES_V6_N4_RX_UCDR_SO_GAIN_RATE2 0x18
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.h b/drivers/phy/qualcomm/phy-qcom-qmp.h
index d10b8f653c4b..d0f41e4aaa85 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp.h
@@ -46,6 +46,8 @@
#include "phy-qcom-qmp-pcs-v6.h"
+#include "phy-qcom-qmp-pcs-v6-n4.h"
+
#include "phy-qcom-qmp-pcs-v6_20.h"
#include "phy-qcom-qmp-pcs-v7.h"
diff --git a/drivers/phy/renesas/phy-rcar-gen3-usb2.c b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
index fbab6ac0f0d1..7594f64eb737 100644
--- a/drivers/phy/renesas/phy-rcar-gen3-usb2.c
+++ b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
@@ -188,6 +188,9 @@ static void rcar_gen3_enable_vbus_ctrl(struct rcar_gen3_chan *ch, int vbus)
dev_vdbg(ch->dev, "%s: %08x, %d\n", __func__, val, vbus);
if (ch->soc_no_adp_ctrl) {
+ if (ch->vbus)
+ regulator_hardware_enable(ch->vbus, vbus);
+
vbus_ctrl_reg = USB2_VBCTRL;
vbus_ctrl_val = USB2_VBCTRL_VBOUT;
}
@@ -718,7 +721,10 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
phy_set_drvdata(channel->rphys[i].phy, &channel->rphys[i]);
}
- channel->vbus = devm_regulator_get_optional(dev, "vbus");
+ if (channel->soc_no_adp_ctrl && channel->is_otg_channel)
+ channel->vbus = devm_regulator_get_exclusive(dev, "vbus");
+ else
+ channel->vbus = devm_regulator_get_optional(dev, "vbus");
if (IS_ERR(channel->vbus)) {
if (PTR_ERR(channel->vbus) == -EPROBE_DEFER) {
ret = PTR_ERR(channel->vbus);
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
index 7178a38475cc..27fd54795791 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
@@ -245,7 +245,7 @@ static const char * const irq_type_names[] = {
};
static bool persist_gpio_outputs;
-module_param(persist_gpio_outputs, bool, 0644);
+module_param(persist_gpio_outputs, bool, 0444);
MODULE_PARM_DESC(persist_gpio_outputs, "Enable GPIO_OUT persistence when pin is freed");
static inline u32 bcm2835_gpio_rd(struct bcm2835_pinctrl *pc, unsigned reg)
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index cffeb869130d..f424a57f0013 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -1106,8 +1106,8 @@ static struct pinctrl *create_pinctrl(struct device *dev,
* an -EPROBE_DEFER later, as that is the worst case.
*/
if (ret == -EPROBE_DEFER) {
- pinctrl_free(p, false);
mutex_unlock(&pinctrl_maps_mutex);
+ pinctrl_free(p, false);
return ERR_PTR(ret);
}
}
diff --git a/drivers/pinctrl/pinctrl-da9062.c b/drivers/pinctrl/pinctrl-da9062.c
index 22e3cd2cc963..6f44a13b90ce 100644
--- a/drivers/pinctrl/pinctrl-da9062.c
+++ b/drivers/pinctrl/pinctrl-da9062.c
@@ -139,7 +139,7 @@ static int da9062_gpio_direction_input(struct gpio_chip *gc,
{
struct da9062_pctl *pctl = gpiochip_get_data(gc);
struct regmap *regmap = pctl->da9062->regmap;
- struct gpio_desc *desc = gpiochip_get_desc(gc, offset);
+ struct gpio_desc *desc = gpio_device_get_desc(gc->gpiodev, offset);
unsigned int gpi_type;
int ret;
diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
index 3bedf36a0019..3f56991f5b89 100644
--- a/drivers/pinctrl/pinctrl-rockchip.c
+++ b/drivers/pinctrl/pinctrl-rockchip.c
@@ -634,23 +634,68 @@ static struct rockchip_mux_recalced_data rk3308_mux_recalced_data[] = {
static struct rockchip_mux_recalced_data rk3328_mux_recalced_data[] = {
{
- .num = 2,
- .pin = 12,
- .reg = 0x24,
- .bit = 8,
- .mask = 0x3
- }, {
+ /* gpio2_b7_sel */
.num = 2,
.pin = 15,
.reg = 0x28,
.bit = 0,
.mask = 0x7
}, {
+ /* gpio2_c7_sel */
.num = 2,
.pin = 23,
.reg = 0x30,
.bit = 14,
.mask = 0x3
+ }, {
+ /* gpio3_b1_sel */
+ .num = 3,
+ .pin = 9,
+ .reg = 0x44,
+ .bit = 2,
+ .mask = 0x3
+ }, {
+ /* gpio3_b2_sel */
+ .num = 3,
+ .pin = 10,
+ .reg = 0x44,
+ .bit = 4,
+ .mask = 0x3
+ }, {
+ /* gpio3_b3_sel */
+ .num = 3,
+ .pin = 11,
+ .reg = 0x44,
+ .bit = 6,
+ .mask = 0x3
+ }, {
+ /* gpio3_b4_sel */
+ .num = 3,
+ .pin = 12,
+ .reg = 0x44,
+ .bit = 8,
+ .mask = 0x3
+ }, {
+ /* gpio3_b5_sel */
+ .num = 3,
+ .pin = 13,
+ .reg = 0x44,
+ .bit = 10,
+ .mask = 0x3
+ }, {
+ /* gpio3_b6_sel */
+ .num = 3,
+ .pin = 14,
+ .reg = 0x44,
+ .bit = 12,
+ .mask = 0x3
+ }, {
+ /* gpio3_b7_sel */
+ .num = 3,
+ .pin = 15,
+ .reg = 0x44,
+ .bit = 14,
+ .mask = 0x3
},
};
@@ -2433,6 +2478,7 @@ static int rockchip_get_pull(struct rockchip_pin_bank *bank, int pin_num)
case RK3188:
case RK3288:
case RK3308:
+ case RK3328:
case RK3368:
case RK3399:
case RK3568:
@@ -2491,6 +2537,7 @@ static int rockchip_set_pull(struct rockchip_pin_bank *bank,
case RK3188:
case RK3288:
case RK3308:
+ case RK3328:
case RK3368:
case RK3399:
case RK3568:
@@ -2704,8 +2751,10 @@ static int rockchip_pmx_set(struct pinctrl_dev *pctldev, unsigned selector,
if (ret) {
/* revert the already done pin settings */
- for (cnt--; cnt >= 0; cnt--)
+ for (cnt--; cnt >= 0; cnt--) {
+ bank = pin_to_bank(info, pins[cnt]);
rockchip_set_mux(bank, pins[cnt] - bank->pin_base, 0);
+ }
return ret;
}
@@ -2753,6 +2802,7 @@ static bool rockchip_pinconf_pull_valid(struct rockchip_pin_ctrl *ctrl,
case RK3188:
case RK3288:
case RK3308:
+ case RK3328:
case RK3368:
case RK3399:
case RK3568:
@@ -3763,7 +3813,7 @@ static struct rockchip_pin_bank rk3328_pin_banks[] = {
PIN_BANK_IOMUX_FLAGS(0, 32, "gpio0", 0, 0, 0, 0),
PIN_BANK_IOMUX_FLAGS(1, 32, "gpio1", 0, 0, 0, 0),
PIN_BANK_IOMUX_FLAGS(2, 32, "gpio2", 0,
- IOMUX_WIDTH_3BIT,
+ 0,
IOMUX_WIDTH_3BIT,
0),
PIN_BANK_IOMUX_FLAGS(3, 32, "gpio3",
@@ -3777,7 +3827,7 @@ static struct rockchip_pin_ctrl rk3328_pin_ctrl = {
.pin_banks = rk3328_pin_banks,
.nr_banks = ARRAY_SIZE(rk3328_pin_banks),
.label = "RK3328-GPIO",
- .type = RK3288,
+ .type = RK3328,
.grf_mux_offset = 0x0,
.iomux_recalced = rk3328_mux_recalced_data,
.niomux_recalced = ARRAY_SIZE(rk3328_mux_recalced_data),
diff --git a/drivers/pinctrl/pinctrl-rockchip.h b/drivers/pinctrl/pinctrl-rockchip.h
index 4759f336941e..849266f8b191 100644
--- a/drivers/pinctrl/pinctrl-rockchip.h
+++ b/drivers/pinctrl/pinctrl-rockchip.h
@@ -193,6 +193,7 @@ enum rockchip_pinctrl_type {
RK3188,
RK3288,
RK3308,
+ RK3328,
RK3368,
RK3399,
RK3568,
diff --git a/drivers/pinctrl/pinctrl-tps6594.c b/drivers/pinctrl/pinctrl-tps6594.c
index 085047320853..5e7c7cf93445 100644
--- a/drivers/pinctrl/pinctrl-tps6594.c
+++ b/drivers/pinctrl/pinctrl-tps6594.c
@@ -486,6 +486,7 @@ static int tps6594_pinctrl_probe(struct platform_device *pdev)
break;
case TPS6593:
case TPS6594:
+ case LP8764:
pctrl_desc->pins = tps6594_pins;
pctrl_desc->npins = ARRAY_SIZE(tps6594_pins);
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
index 4e80c7204e5f..4abd6f18bbef 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
@@ -1207,7 +1207,6 @@ static const struct of_device_id pmic_gpio_of_match[] = {
{ .compatible = "qcom,pm7325-gpio", .data = (void *) 10 },
{ .compatible = "qcom,pm7550ba-gpio", .data = (void *) 8},
{ .compatible = "qcom,pm8005-gpio", .data = (void *) 4 },
- { .compatible = "qcom,pm8008-gpio", .data = (void *) 2 },
{ .compatible = "qcom,pm8019-gpio", .data = (void *) 6 },
/* pm8150 has 10 GPIOs with holes on 2, 5, 7 and 8 */
{ .compatible = "qcom,pm8150-gpio", .data = (void *) 10 },
diff --git a/drivers/pinctrl/renesas/pinctrl-rzg2l.c b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
index c3256bfde502..60be78da9f52 100644
--- a/drivers/pinctrl/renesas/pinctrl-rzg2l.c
+++ b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
@@ -2071,11 +2071,11 @@ static void rzg2l_gpio_irq_restore(struct rzg2l_pinctrl *pctrl)
* This has to be atomically executed to protect against a concurrent
* interrupt.
*/
- raw_spin_lock_irqsave(&pctrl->lock.rlock, flags);
+ spin_lock_irqsave(&pctrl->lock, flags);
ret = rzg2l_gpio_irq_set_type(data, irqd_get_trigger_type(data));
if (!ret && !irqd_irq_disabled(data))
rzg2l_gpio_irq_enable(data);
- raw_spin_unlock_irqrestore(&pctrl->lock.rlock, flags);
+ spin_unlock_irqrestore(&pctrl->lock, flags);
if (ret)
dev_crit(pctrl->dev, "Failed to set IRQ type for virq=%u\n", virq);
diff --git a/drivers/platform/Kconfig b/drivers/platform/Kconfig
index 81a298517df2..960fd6a82450 100644
--- a/drivers/platform/Kconfig
+++ b/drivers/platform/Kconfig
@@ -7,6 +7,8 @@ source "drivers/platform/goldfish/Kconfig"
source "drivers/platform/chrome/Kconfig"
+source "drivers/platform/cznic/Kconfig"
+
source "drivers/platform/mellanox/Kconfig"
source "drivers/platform/olpc/Kconfig"
diff --git a/drivers/platform/Makefile b/drivers/platform/Makefile
index fbbe4f77aa5d..19ac54648586 100644
--- a/drivers/platform/Makefile
+++ b/drivers/platform/Makefile
@@ -10,5 +10,6 @@ obj-$(CONFIG_MIPS) += mips/
obj-$(CONFIG_OLPC_EC) += olpc/
obj-$(CONFIG_GOLDFISH) += goldfish/
obj-$(CONFIG_CHROME_PLATFORMS) += chrome/
+obj-$(CONFIG_CZNIC_PLATFORMS) += cznic/
obj-$(CONFIG_SURFACE_PLATFORMS) += surface/
-obj-$(CONFIG_ARM64) += arm64/
+obj-$(CONFIG_ARM64_PLATFORM_DEVICES) += arm64/
diff --git a/drivers/platform/arm64/Kconfig b/drivers/platform/arm64/Kconfig
index 8fdca0f8e909..f88395ea3376 100644
--- a/drivers/platform/arm64/Kconfig
+++ b/drivers/platform/arm64/Kconfig
@@ -18,6 +18,7 @@ if ARM64_PLATFORM_DEVICES
config EC_ACER_ASPIRE1
tristate "Acer Aspire 1 Embedded Controller driver"
+ depends on ARCH_QCOM || COMPILE_TEST
depends on I2C
depends on DRM
depends on POWER_SUPPLY
@@ -32,4 +33,20 @@ config EC_ACER_ASPIRE1
laptop where this information is not properly exposed via the
standard ACPI devices.
+config EC_LENOVO_YOGA_C630
+ tristate "Lenovo Yoga C630 Embedded Controller driver"
+ depends on ARCH_QCOM || COMPILE_TEST
+ depends on I2C
+ select AUXILIARY_BUS
+ help
+ Driver for the Embedded Controller in the Qualcomm Snapdragon-based
+ Lenovo Yoga C630, which provides battery and power adapter
+ information.
+
+ This driver provides battery and AC status support for the mentioned
+ laptop where this information is not properly exposed via the
+ standard ACPI devices.
+
+ Say M or Y here to include this support.
+
endif # ARM64_PLATFORM_DEVICES
diff --git a/drivers/platform/arm64/Makefile b/drivers/platform/arm64/Makefile
index 4fcc9855579b..b2ae9114fdd8 100644
--- a/drivers/platform/arm64/Makefile
+++ b/drivers/platform/arm64/Makefile
@@ -6,3 +6,4 @@
#
obj-$(CONFIG_EC_ACER_ASPIRE1) += acer-aspire1-ec.o
+obj-$(CONFIG_EC_LENOVO_YOGA_C630) += lenovo-yoga-c630.o
diff --git a/drivers/platform/arm64/lenovo-yoga-c630.c b/drivers/platform/arm64/lenovo-yoga-c630.c
new file mode 100644
index 000000000000..1f05c9a6a89d
--- /dev/null
+++ b/drivers/platform/arm64/lenovo-yoga-c630.c
@@ -0,0 +1,291 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022-2024, Linaro Ltd
+ * Authors:
+ * Bjorn Andersson
+ * Dmitry Baryshkov
+ */
+#include <linux/auxiliary_bus.h>
+#include <linux/cleanup.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/irqreturn.h>
+#include <linux/lockdep.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/notifier.h>
+#include <linux/slab.h>
+#include <linux/platform_data/lenovo-yoga-c630.h>
+
+#define LENOVO_EC_RESPONSE_REG 0x01
+#define LENOVO_EC_REQUEST_REG 0x02
+
+#define LENOVO_EC_UCSI_WRITE 0x20
+#define LENOVO_EC_UCSI_READ 0x21
+
+#define LENOVO_EC_READ_REG 0xb0
+#define LENOVO_EC_REQUEST_NEXT_EVENT 0x84
+
+#define LENOVO_EC_UCSI_VERSION 0x20
+
+struct yoga_c630_ec {
+ struct i2c_client *client;
+ struct mutex lock;
+ struct blocking_notifier_head notifier_list;
+};
+
+static int yoga_c630_ec_request(struct yoga_c630_ec *ec, u8 *req, size_t req_len,
+ u8 *resp, size_t resp_len)
+{
+ int ret;
+
+ lockdep_assert_held(&ec->lock);
+
+ ret = i2c_smbus_write_i2c_block_data(ec->client, LENOVO_EC_REQUEST_REG,
+ req_len, req);
+ if (ret < 0)
+ return ret;
+
+ return i2c_smbus_read_i2c_block_data(ec->client, LENOVO_EC_RESPONSE_REG,
+ resp_len, resp);
+}
+
+int yoga_c630_ec_read8(struct yoga_c630_ec *ec, u8 addr)
+{
+ u8 req[2] = { LENOVO_EC_READ_REG, };
+ int ret;
+ u8 val;
+
+ guard(mutex)(&ec->lock);
+
+ req[1] = addr;
+ ret = yoga_c630_ec_request(ec, req, sizeof(req), &val, 1);
+ if (ret < 0)
+ return ret;
+
+ return val;
+}
+EXPORT_SYMBOL_GPL(yoga_c630_ec_read8);
+
+int yoga_c630_ec_read16(struct yoga_c630_ec *ec, u8 addr)
+{
+ u8 req[2] = { LENOVO_EC_READ_REG, };
+ int ret;
+ u8 msb;
+ u8 lsb;
+
+ /* don't overflow the address */
+ if (addr == 0xff)
+ return -EINVAL;
+
+ guard(mutex)(&ec->lock);
+
+ req[1] = addr;
+ ret = yoga_c630_ec_request(ec, req, sizeof(req), &lsb, 1);
+ if (ret < 0)
+ return ret;
+
+ req[1] = addr + 1;
+ ret = yoga_c630_ec_request(ec, req, sizeof(req), &msb, 1);
+ if (ret < 0)
+ return ret;
+
+ return msb << 8 | lsb;
+}
+EXPORT_SYMBOL_GPL(yoga_c630_ec_read16);
+
+u16 yoga_c630_ec_ucsi_get_version(struct yoga_c630_ec *ec)
+{
+ u8 req[3] = { 0xb3, 0xf2, };
+ int ret;
+ u8 msb;
+ u8 lsb;
+
+ guard(mutex)(&ec->lock);
+
+ req[2] = LENOVO_EC_UCSI_VERSION;
+ ret = yoga_c630_ec_request(ec, req, sizeof(req), &lsb, 1);
+ if (ret < 0)
+ return ret;
+
+ req[2] = LENOVO_EC_UCSI_VERSION + 1;
+ ret = yoga_c630_ec_request(ec, req, sizeof(req), &msb, 1);
+ if (ret < 0)
+ return ret;
+
+ return msb << 8 | lsb;
+}
+EXPORT_SYMBOL_GPL(yoga_c630_ec_ucsi_get_version);
+
+int yoga_c630_ec_ucsi_write(struct yoga_c630_ec *ec,
+ const u8 req[YOGA_C630_UCSI_WRITE_SIZE])
+{
+ int ret;
+
+ mutex_lock(&ec->lock);
+ ret = i2c_smbus_write_i2c_block_data(ec->client, LENOVO_EC_UCSI_WRITE,
+ YOGA_C630_UCSI_WRITE_SIZE, req);
+ mutex_unlock(&ec->lock);
+
+ return ret < 0 ? ret : 0;
+}
+EXPORT_SYMBOL_GPL(yoga_c630_ec_ucsi_write);
+
+int yoga_c630_ec_ucsi_read(struct yoga_c630_ec *ec,
+ u8 resp[YOGA_C630_UCSI_READ_SIZE])
+{
+ int ret;
+
+ mutex_lock(&ec->lock);
+ ret = i2c_smbus_read_i2c_block_data(ec->client, LENOVO_EC_UCSI_READ,
+ YOGA_C630_UCSI_READ_SIZE, resp);
+ mutex_unlock(&ec->lock);
+
+ return ret < 0 ? ret : 0;
+}
+EXPORT_SYMBOL_GPL(yoga_c630_ec_ucsi_read);
+
+static irqreturn_t yoga_c630_ec_thread_intr(int irq, void *data)
+{
+ u8 req[] = { LENOVO_EC_REQUEST_NEXT_EVENT };
+ struct yoga_c630_ec *ec = data;
+ u8 event;
+ int ret;
+
+ mutex_lock(&ec->lock);
+ ret = yoga_c630_ec_request(ec, req, sizeof(req), &event, 1);
+ mutex_unlock(&ec->lock);
+ if (ret < 0)
+ return IRQ_HANDLED;
+
+ blocking_notifier_call_chain(&ec->notifier_list, event, ec);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * yoga_c630_ec_register_notify - Register a notifier callback for EC events.
+ * @ec: Yoga C630 EC
+ * @nb: Notifier block pointer to register
+ *
+ * Return: 0 on success or negative error code.
+ */
+int yoga_c630_ec_register_notify(struct yoga_c630_ec *ec, struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(&ec->notifier_list, nb);
+}
+EXPORT_SYMBOL_GPL(yoga_c630_ec_register_notify);
+
+/**
+ * yoga_c630_ec_unregister_notify - Unregister notifier callback for EC events.
+ * @ec: Yoga C630 EC
+ * @nb: Notifier block pointer to unregister
+ *
+ * Unregister a notifier callback that was previously registered with
+ * yoga_c630_ec_register_notify().
+ */
+void yoga_c630_ec_unregister_notify(struct yoga_c630_ec *ec, struct notifier_block *nb)
+{
+ blocking_notifier_chain_unregister(&ec->notifier_list, nb);
+}
+EXPORT_SYMBOL_GPL(yoga_c630_ec_unregister_notify);
+
+static void yoga_c630_aux_release(struct device *dev)
+{
+ struct auxiliary_device *adev = to_auxiliary_dev(dev);
+
+ kfree(adev);
+}
+
+static void yoga_c630_aux_remove(void *data)
+{
+ struct auxiliary_device *adev = data;
+
+ auxiliary_device_delete(adev);
+ auxiliary_device_uninit(adev);
+}
+
+static int yoga_c630_aux_init(struct device *parent, const char *name,
+ struct yoga_c630_ec *ec)
+{
+ struct auxiliary_device *adev;
+ int ret;
+
+ adev = kzalloc(sizeof(*adev), GFP_KERNEL);
+ if (!adev)
+ return -ENOMEM;
+
+ adev->name = name;
+ adev->id = 0;
+ adev->dev.parent = parent;
+ adev->dev.release = yoga_c630_aux_release;
+ adev->dev.platform_data = ec;
+
+ ret = auxiliary_device_init(adev);
+ if (ret) {
+ kfree(adev);
+ return ret;
+ }
+
+ ret = auxiliary_device_add(adev);
+ if (ret) {
+ auxiliary_device_uninit(adev);
+ return ret;
+ }
+
+ return devm_add_action_or_reset(parent, yoga_c630_aux_remove, adev);
+}
+
+static int yoga_c630_ec_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct yoga_c630_ec *ec;
+ int ret;
+
+ ec = devm_kzalloc(dev, sizeof(*ec), GFP_KERNEL);
+ if (!ec)
+ return -ENOMEM;
+
+ mutex_init(&ec->lock);
+ ec->client = client;
+ BLOCKING_INIT_NOTIFIER_HEAD(&ec->notifier_list);
+
+ ret = devm_request_threaded_irq(dev, client->irq,
+ NULL, yoga_c630_ec_thread_intr,
+ IRQF_ONESHOT, "yoga_c630_ec", ec);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "unable to request irq\n");
+
+ ret = yoga_c630_aux_init(dev, YOGA_C630_DEV_PSY, ec);
+ if (ret)
+ return ret;
+
+ return yoga_c630_aux_init(dev, YOGA_C630_DEV_UCSI, ec);
+}
+
+
+static const struct of_device_id yoga_c630_ec_of_match[] = {
+ { .compatible = "lenovo,yoga-c630-ec" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, yoga_c630_ec_of_match);
+
+static const struct i2c_device_id yoga_c630_ec_i2c_id_table[] = {
+ { "yoga-c630-ec", },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, yoga_c630_ec_i2c_id_table);
+
+static struct i2c_driver yoga_c630_ec_i2c_driver = {
+ .driver = {
+ .name = "yoga-c630-ec",
+ .of_match_table = yoga_c630_ec_of_match
+ },
+ .probe = yoga_c630_ec_probe,
+ .id_table = yoga_c630_ec_i2c_id_table,
+};
+module_i2c_driver(yoga_c630_ec_i2c_driver);
+
+MODULE_DESCRIPTION("Lenovo Yoga C630 Embedded Controller");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/chrome/Kconfig b/drivers/platform/chrome/Kconfig
index 073616b5b5a0..7dbeb786352a 100644
--- a/drivers/platform/chrome/Kconfig
+++ b/drivers/platform/chrome/Kconfig
@@ -150,7 +150,7 @@ config CROS_EC_PROTO
config CROS_KBD_LED_BACKLIGHT
tristate "Backlight LED support for Chrome OS keyboards"
- depends on LEDS_CLASS && (ACPI || CROS_EC)
+ depends on LEDS_CLASS && (ACPI || CROS_EC || MFD_CROS_EC_DEV)
help
This option enables support for the keyboard backlight LEDs on
select Chrome OS systems.
diff --git a/drivers/platform/chrome/cros_ec.c b/drivers/platform/chrome/cros_ec.c
index 47d19f7e295a..e821b3d39590 100644
--- a/drivers/platform/chrome/cros_ec.c
+++ b/drivers/platform/chrome/cros_ec.c
@@ -388,8 +388,8 @@ EXPORT_SYMBOL(cros_ec_suspend_late);
*/
int cros_ec_suspend(struct cros_ec_device *ec_dev)
{
- cros_ec_send_suspend_event(ec_dev);
- cros_ec_disable_irq(ec_dev);
+ cros_ec_suspend_prepare(ec_dev);
+ cros_ec_suspend_late(ec_dev);
return 0;
}
EXPORT_SYMBOL(cros_ec_suspend);
diff --git a/drivers/platform/chrome/cros_ec_debugfs.c b/drivers/platform/chrome/cros_ec_debugfs.c
index e1d313246beb..4525ad1b59f4 100644
--- a/drivers/platform/chrome/cros_ec_debugfs.c
+++ b/drivers/platform/chrome/cros_ec_debugfs.c
@@ -26,6 +26,10 @@
#define CIRC_ADD(idx, size, value) (((idx) + (value)) & ((size) - 1))
+static unsigned int log_poll_period_ms = LOG_POLL_SEC * MSEC_PER_SEC;
+module_param(log_poll_period_ms, uint, 0644);
+MODULE_PARM_DESC(log_poll_period_ms, "EC log polling period(ms)");
+
/* waitqueue for log readers */
static DECLARE_WAIT_QUEUE_HEAD(cros_ec_debugfs_log_wq);
@@ -57,7 +61,7 @@ struct cros_ec_debugfs {
/*
* We need to make sure that the EC log buffer on the UART is large enough,
- * so that it is unlikely enough to overlow within LOG_POLL_SEC.
+ * so that it is unlikely enough to overlow within log_poll_period_ms.
*/
static void cros_ec_console_log_work(struct work_struct *__work)
{
@@ -119,7 +123,7 @@ static void cros_ec_console_log_work(struct work_struct *__work)
resched:
schedule_delayed_work(&debug_info->log_poll_work,
- msecs_to_jiffies(LOG_POLL_SEC * 1000));
+ msecs_to_jiffies(log_poll_period_ms));
}
static int cros_ec_console_log_open(struct inode *inode, struct file *file)
@@ -330,6 +334,7 @@ static int ec_read_version_supported(struct cros_ec_dev *ec)
if (!msg)
return 0;
+ msg->version = 1;
msg->command = EC_CMD_GET_CMD_VERSIONS + ec->cmd_offset;
msg->outsize = sizeof(*params);
msg->insize = sizeof(*response);
diff --git a/drivers/platform/chrome/cros_ec_lpc.c b/drivers/platform/chrome/cros_ec_lpc.c
index ddfbfec44f4c..f0470248b109 100644
--- a/drivers/platform/chrome/cros_ec_lpc.c
+++ b/drivers/platform/chrome/cros_ec_lpc.c
@@ -39,6 +39,16 @@ static bool cros_ec_lpc_acpi_device_found;
* be used as the base port for EC mapped memory.
*/
#define CROS_EC_LPC_QUIRK_REMAP_MEMORY BIT(0)
+/*
+ * Indicates that lpc_driver_data.quirk_acpi_id should be used to find
+ * the ACPI device.
+ */
+#define CROS_EC_LPC_QUIRK_ACPI_ID BIT(1)
+/*
+ * Indicates that lpc_driver_data.quirk_aml_mutex_name should be used
+ * to find an AML mutex to protect access to Microchip EC.
+ */
+#define CROS_EC_LPC_QUIRK_AML_MUTEX BIT(2)
/**
* struct lpc_driver_data - driver data attached to a DMI device ID to indicate
@@ -46,10 +56,15 @@ static bool cros_ec_lpc_acpi_device_found;
* @quirks: a bitfield composed of quirks from CROS_EC_LPC_QUIRK_*
* @quirk_mmio_memory_base: The first I/O port addressing EC mapped memory (used
* when quirk ...REMAP_MEMORY is set.)
+ * @quirk_acpi_id: An ACPI HID to be used to find the ACPI device.
+ * @quirk_aml_mutex_name: The name of an AML mutex to be used to protect access
+ * to Microchip EC.
*/
struct lpc_driver_data {
u32 quirks;
u16 quirk_mmio_memory_base;
+ const char *quirk_acpi_id;
+ const char *quirk_aml_mutex_name;
};
/**
@@ -62,14 +77,16 @@ struct cros_ec_lpc {
/**
* struct lpc_driver_ops - LPC driver operations
- * @read: Copy length bytes from EC address offset into buffer dest. Returns
- * the 8-bit checksum of all bytes read.
- * @write: Copy length bytes from buffer msg into EC address offset. Returns
- * the 8-bit checksum of all bytes written.
+ * @read: Copy length bytes from EC address offset into buffer dest.
+ * Returns a negative error code on error, or the 8-bit checksum
+ * of all bytes read.
+ * @write: Copy length bytes from buffer msg into EC address offset.
+ * Returns a negative error code on error, or the 8-bit checksum
+ * of all bytes written.
*/
struct lpc_driver_ops {
- u8 (*read)(unsigned int offset, unsigned int length, u8 *dest);
- u8 (*write)(unsigned int offset, unsigned int length, const u8 *msg);
+ int (*read)(unsigned int offset, unsigned int length, u8 *dest);
+ int (*write)(unsigned int offset, unsigned int length, const u8 *msg);
};
static struct lpc_driver_ops cros_ec_lpc_ops = { };
@@ -78,10 +95,10 @@ static struct lpc_driver_ops cros_ec_lpc_ops = { };
* A generic instance of the read function of struct lpc_driver_ops, used for
* the LPC EC.
*/
-static u8 cros_ec_lpc_read_bytes(unsigned int offset, unsigned int length,
- u8 *dest)
+static int cros_ec_lpc_read_bytes(unsigned int offset, unsigned int length,
+ u8 *dest)
{
- int sum = 0;
+ u8 sum = 0;
int i;
for (i = 0; i < length; ++i) {
@@ -97,10 +114,10 @@ static u8 cros_ec_lpc_read_bytes(unsigned int offset, unsigned int length,
* A generic instance of the write function of struct lpc_driver_ops, used for
* the LPC EC.
*/
-static u8 cros_ec_lpc_write_bytes(unsigned int offset, unsigned int length,
- const u8 *msg)
+static int cros_ec_lpc_write_bytes(unsigned int offset, unsigned int length,
+ const u8 *msg)
{
- int sum = 0;
+ u8 sum = 0;
int i;
for (i = 0; i < length; ++i) {
@@ -116,13 +133,13 @@ static u8 cros_ec_lpc_write_bytes(unsigned int offset, unsigned int length,
* An instance of the read function of struct lpc_driver_ops, used for the
* MEC variant of LPC EC.
*/
-static u8 cros_ec_lpc_mec_read_bytes(unsigned int offset, unsigned int length,
- u8 *dest)
+static int cros_ec_lpc_mec_read_bytes(unsigned int offset, unsigned int length,
+ u8 *dest)
{
int in_range = cros_ec_lpc_mec_in_range(offset, length);
if (in_range < 0)
- return 0;
+ return in_range;
return in_range ?
cros_ec_lpc_io_bytes_mec(MEC_IO_READ,
@@ -135,13 +152,13 @@ static u8 cros_ec_lpc_mec_read_bytes(unsigned int offset, unsigned int length,
* An instance of the write function of struct lpc_driver_ops, used for the
* MEC variant of LPC EC.
*/
-static u8 cros_ec_lpc_mec_write_bytes(unsigned int offset, unsigned int length,
- const u8 *msg)
+static int cros_ec_lpc_mec_write_bytes(unsigned int offset, unsigned int length,
+ const u8 *msg)
{
int in_range = cros_ec_lpc_mec_in_range(offset, length);
if (in_range < 0)
- return 0;
+ return in_range;
return in_range ?
cros_ec_lpc_io_bytes_mec(MEC_IO_WRITE,
@@ -154,11 +171,14 @@ static int ec_response_timed_out(void)
{
unsigned long one_second = jiffies + HZ;
u8 data;
+ int ret;
usleep_range(200, 300);
do {
- if (!(cros_ec_lpc_ops.read(EC_LPC_ADDR_HOST_CMD, 1, &data) &
- EC_LPC_STATUS_BUSY_MASK))
+ ret = cros_ec_lpc_ops.read(EC_LPC_ADDR_HOST_CMD, 1, &data);
+ if (ret < 0)
+ return ret;
+ if (!(data & EC_LPC_STATUS_BUSY_MASK))
return 0;
usleep_range(100, 200);
} while (time_before(jiffies, one_second));
@@ -179,28 +199,41 @@ static int cros_ec_pkt_xfer_lpc(struct cros_ec_device *ec,
goto done;
/* Write buffer */
- cros_ec_lpc_ops.write(EC_LPC_ADDR_HOST_PACKET, ret, ec->dout);
+ ret = cros_ec_lpc_ops.write(EC_LPC_ADDR_HOST_PACKET, ret, ec->dout);
+ if (ret < 0)
+ goto done;
/* Here we go */
sum = EC_COMMAND_PROTOCOL_3;
- cros_ec_lpc_ops.write(EC_LPC_ADDR_HOST_CMD, 1, &sum);
+ ret = cros_ec_lpc_ops.write(EC_LPC_ADDR_HOST_CMD, 1, &sum);
+ if (ret < 0)
+ goto done;
- if (ec_response_timed_out()) {
+ ret = ec_response_timed_out();
+ if (ret < 0)
+ goto done;
+ if (ret) {
dev_warn(ec->dev, "EC response timed out\n");
ret = -EIO;
goto done;
}
/* Check result */
- msg->result = cros_ec_lpc_ops.read(EC_LPC_ADDR_HOST_DATA, 1, &sum);
+ ret = cros_ec_lpc_ops.read(EC_LPC_ADDR_HOST_DATA, 1, &sum);
+ if (ret < 0)
+ goto done;
+ msg->result = ret;
ret = cros_ec_check_result(ec, msg);
if (ret)
goto done;
/* Read back response */
dout = (u8 *)&response;
- sum = cros_ec_lpc_ops.read(EC_LPC_ADDR_HOST_PACKET, sizeof(response),
+ ret = cros_ec_lpc_ops.read(EC_LPC_ADDR_HOST_PACKET, sizeof(response),
dout);
+ if (ret < 0)
+ goto done;
+ sum = ret;
msg->result = response.result;
@@ -213,9 +246,12 @@ static int cros_ec_pkt_xfer_lpc(struct cros_ec_device *ec,
}
/* Read response and process checksum */
- sum += cros_ec_lpc_ops.read(EC_LPC_ADDR_HOST_PACKET +
- sizeof(response), response.data_len,
- msg->data);
+ ret = cros_ec_lpc_ops.read(EC_LPC_ADDR_HOST_PACKET +
+ sizeof(response), response.data_len,
+ msg->data);
+ if (ret < 0)
+ goto done;
+ sum += ret;
if (sum) {
dev_err(ec->dev,
@@ -255,32 +291,47 @@ static int cros_ec_cmd_xfer_lpc(struct cros_ec_device *ec,
sum = msg->command + args.flags + args.command_version + args.data_size;
/* Copy data and update checksum */
- sum += cros_ec_lpc_ops.write(EC_LPC_ADDR_HOST_PARAM, msg->outsize,
- msg->data);
+ ret = cros_ec_lpc_ops.write(EC_LPC_ADDR_HOST_PARAM, msg->outsize,
+ msg->data);
+ if (ret < 0)
+ goto done;
+ sum += ret;
/* Finalize checksum and write args */
args.checksum = sum;
- cros_ec_lpc_ops.write(EC_LPC_ADDR_HOST_ARGS, sizeof(args),
- (u8 *)&args);
+ ret = cros_ec_lpc_ops.write(EC_LPC_ADDR_HOST_ARGS, sizeof(args),
+ (u8 *)&args);
+ if (ret < 0)
+ goto done;
/* Here we go */
sum = msg->command;
- cros_ec_lpc_ops.write(EC_LPC_ADDR_HOST_CMD, 1, &sum);
+ ret = cros_ec_lpc_ops.write(EC_LPC_ADDR_HOST_CMD, 1, &sum);
+ if (ret < 0)
+ goto done;
- if (ec_response_timed_out()) {
+ ret = ec_response_timed_out();
+ if (ret < 0)
+ goto done;
+ if (ret) {
dev_warn(ec->dev, "EC response timed out\n");
ret = -EIO;
goto done;
}
/* Check result */
- msg->result = cros_ec_lpc_ops.read(EC_LPC_ADDR_HOST_DATA, 1, &sum);
+ ret = cros_ec_lpc_ops.read(EC_LPC_ADDR_HOST_DATA, 1, &sum);
+ if (ret < 0)
+ goto done;
+ msg->result = ret;
ret = cros_ec_check_result(ec, msg);
if (ret)
goto done;
/* Read back args */
- cros_ec_lpc_ops.read(EC_LPC_ADDR_HOST_ARGS, sizeof(args), (u8 *)&args);
+ ret = cros_ec_lpc_ops.read(EC_LPC_ADDR_HOST_ARGS, sizeof(args), (u8 *)&args);
+ if (ret < 0)
+ goto done;
if (args.data_size > msg->insize) {
dev_err(ec->dev,
@@ -294,8 +345,11 @@ static int cros_ec_cmd_xfer_lpc(struct cros_ec_device *ec,
sum = msg->command + args.flags + args.command_version + args.data_size;
/* Read response and update checksum */
- sum += cros_ec_lpc_ops.read(EC_LPC_ADDR_HOST_PARAM, args.data_size,
- msg->data);
+ ret = cros_ec_lpc_ops.read(EC_LPC_ADDR_HOST_PARAM, args.data_size,
+ msg->data);
+ if (ret < 0)
+ goto done;
+ sum += ret;
/* Verify checksum */
if (args.checksum != sum) {
@@ -320,19 +374,24 @@ static int cros_ec_lpc_readmem(struct cros_ec_device *ec, unsigned int offset,
int i = offset;
char *s = dest;
int cnt = 0;
+ int ret;
if (offset >= EC_MEMMAP_SIZE - bytes)
return -EINVAL;
/* fixed length */
if (bytes) {
- cros_ec_lpc_ops.read(ec_lpc->mmio_memory_base + offset, bytes, s);
+ ret = cros_ec_lpc_ops.read(ec_lpc->mmio_memory_base + offset, bytes, s);
+ if (ret < 0)
+ return ret;
return bytes;
}
/* string */
for (; i < EC_MEMMAP_SIZE; i++, s++) {
- cros_ec_lpc_ops.read(ec_lpc->mmio_memory_base + i, 1, s);
+ ret = cros_ec_lpc_ops.read(ec_lpc->mmio_memory_base + i, 1, s);
+ if (ret < 0)
+ return ret;
cnt++;
if (!*s)
break;
@@ -374,6 +433,26 @@ static void cros_ec_lpc_acpi_notify(acpi_handle device, u32 value, void *data)
pm_system_wakeup();
}
+static acpi_status cros_ec_lpc_parse_device(acpi_handle handle, u32 level,
+ void *context, void **retval)
+{
+ *(struct acpi_device **)context = acpi_fetch_acpi_dev(handle);
+ return AE_CTRL_TERMINATE;
+}
+
+static struct acpi_device *cros_ec_lpc_get_device(const char *id)
+{
+ struct acpi_device *adev = NULL;
+ acpi_status status = acpi_get_devices(id, cros_ec_lpc_parse_device,
+ &adev, NULL);
+ if (ACPI_FAILURE(status)) {
+ pr_warn(DRV_NAME ": Looking for %s failed\n", id);
+ return NULL;
+ }
+
+ return adev;
+}
+
static int cros_ec_lpc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -401,6 +480,27 @@ static int cros_ec_lpc_probe(struct platform_device *pdev)
if (quirks & CROS_EC_LPC_QUIRK_REMAP_MEMORY)
ec_lpc->mmio_memory_base = driver_data->quirk_mmio_memory_base;
+
+ if (quirks & CROS_EC_LPC_QUIRK_ACPI_ID) {
+ adev = cros_ec_lpc_get_device(driver_data->quirk_acpi_id);
+ if (!adev) {
+ dev_err(dev, "failed to get ACPI device '%s'",
+ driver_data->quirk_acpi_id);
+ return -ENODEV;
+ }
+ ACPI_COMPANION_SET(dev, adev);
+ }
+
+ if (quirks & CROS_EC_LPC_QUIRK_AML_MUTEX) {
+ const char *name
+ = driver_data->quirk_aml_mutex_name;
+ ret = cros_ec_lpc_mec_acpi_mutex(ACPI_COMPANION(dev), name);
+ if (ret) {
+ dev_err(dev, "failed to get AML mutex '%s'", name);
+ return ret;
+ }
+ dev_info(dev, "got AML mutex '%s'", name);
+ }
}
/*
@@ -425,7 +525,9 @@ static int cros_ec_lpc_probe(struct platform_device *pdev)
*/
cros_ec_lpc_ops.read = cros_ec_lpc_mec_read_bytes;
cros_ec_lpc_ops.write = cros_ec_lpc_mec_write_bytes;
- cros_ec_lpc_ops.read(EC_LPC_ADDR_MEMMAP + EC_MEMMAP_ID, 2, buf);
+ ret = cros_ec_lpc_ops.read(EC_LPC_ADDR_MEMMAP + EC_MEMMAP_ID, 2, buf);
+ if (ret < 0)
+ return ret;
if (buf[0] != 'E' || buf[1] != 'C') {
if (!devm_request_region(dev, ec_lpc->mmio_memory_base, EC_MEMMAP_SIZE,
dev_name(dev))) {
@@ -436,8 +538,10 @@ static int cros_ec_lpc_probe(struct platform_device *pdev)
/* Re-assign read/write operations for the non MEC variant */
cros_ec_lpc_ops.read = cros_ec_lpc_read_bytes;
cros_ec_lpc_ops.write = cros_ec_lpc_write_bytes;
- cros_ec_lpc_ops.read(ec_lpc->mmio_memory_base + EC_MEMMAP_ID, 2,
- buf);
+ ret = cros_ec_lpc_ops.read(ec_lpc->mmio_memory_base + EC_MEMMAP_ID, 2,
+ buf);
+ if (ret < 0)
+ return ret;
if (buf[0] != 'E' || buf[1] != 'C') {
dev_err(dev, "EC ID not detected\n");
return -ENODEV;
@@ -532,6 +636,12 @@ static const struct lpc_driver_data framework_laptop_amd_lpc_driver_data __initc
.quirk_mmio_memory_base = 0xE00,
};
+static const struct lpc_driver_data framework_laptop_11_lpc_driver_data __initconst = {
+ .quirks = CROS_EC_LPC_QUIRK_ACPI_ID|CROS_EC_LPC_QUIRK_AML_MUTEX,
+ .quirk_acpi_id = "PNP0C09",
+ .quirk_aml_mutex_name = "ECMT",
+};
+
static const struct dmi_system_id cros_ec_lpc_dmi_table[] __initconst = {
{
/*
@@ -600,6 +710,7 @@ static const struct dmi_system_id cros_ec_lpc_dmi_table[] __initconst = {
DMI_MATCH(DMI_SYS_VENDOR, "Framework"),
DMI_MATCH(DMI_PRODUCT_NAME, "Laptop"),
},
+ .driver_data = (void *)&framework_laptop_11_lpc_driver_data,
},
{ /* sentinel */ }
};
@@ -661,23 +772,12 @@ static struct platform_device cros_ec_lpc_device = {
.name = DRV_NAME
};
-static acpi_status cros_ec_lpc_parse_device(acpi_handle handle, u32 level,
- void *context, void **retval)
-{
- *(bool *)context = true;
- return AE_CTRL_TERMINATE;
-}
-
static int __init cros_ec_lpc_init(void)
{
int ret;
- acpi_status status;
const struct dmi_system_id *dmi_match;
- status = acpi_get_devices(ACPI_DRV_NAME, cros_ec_lpc_parse_device,
- &cros_ec_lpc_acpi_device_found, NULL);
- if (ACPI_FAILURE(status))
- pr_warn(DRV_NAME ": Looking for %s failed\n", ACPI_DRV_NAME);
+ cros_ec_lpc_acpi_device_found = !!cros_ec_lpc_get_device(ACPI_DRV_NAME);
dmi_match = dmi_first_match(cros_ec_lpc_dmi_table);
diff --git a/drivers/platform/chrome/cros_ec_lpc_mec.c b/drivers/platform/chrome/cros_ec_lpc_mec.c
index 0d9c79b270ce..a56584171168 100644
--- a/drivers/platform/chrome/cros_ec_lpc_mec.c
+++ b/drivers/platform/chrome/cros_ec_lpc_mec.c
@@ -10,14 +10,66 @@
#include "cros_ec_lpc_mec.h"
+#define ACPI_LOCK_DELAY_MS 500
+
/*
* This mutex must be held while accessing the EMI unit. We can't rely on the
* EC mutex because memmap data may be accessed without it being held.
*/
static DEFINE_MUTEX(io_mutex);
+/*
+ * An alternative mutex to be used when the ACPI AML code may also
+ * access memmap data. When set, this mutex is used in preference to
+ * io_mutex.
+ */
+static acpi_handle aml_mutex;
+
static u16 mec_emi_base, mec_emi_end;
/**
+ * cros_ec_lpc_mec_lock() - Acquire mutex for EMI
+ *
+ * @return: Negative error code, or zero for success
+ */
+static int cros_ec_lpc_mec_lock(void)
+{
+ bool success;
+
+ if (!aml_mutex) {
+ mutex_lock(&io_mutex);
+ return 0;
+ }
+
+ success = ACPI_SUCCESS(acpi_acquire_mutex(aml_mutex,
+ NULL, ACPI_LOCK_DELAY_MS));
+ if (!success)
+ return -EBUSY;
+
+ return 0;
+}
+
+/**
+ * cros_ec_lpc_mec_unlock() - Release mutex for EMI
+ *
+ * @return: Negative error code, or zero for success
+ */
+static int cros_ec_lpc_mec_unlock(void)
+{
+ bool success;
+
+ if (!aml_mutex) {
+ mutex_unlock(&io_mutex);
+ return 0;
+ }
+
+ success = ACPI_SUCCESS(acpi_release_mutex(aml_mutex, NULL));
+ if (!success)
+ return -EBUSY;
+
+ return 0;
+}
+
+/**
* cros_ec_lpc_mec_emi_write_address() - Initialize EMI at a given address.
*
* @addr: Starting read / write address
@@ -41,9 +93,6 @@ static void cros_ec_lpc_mec_emi_write_address(u16 addr,
*/
int cros_ec_lpc_mec_in_range(unsigned int offset, unsigned int length)
{
- if (length == 0)
- return -EINVAL;
-
if (WARN_ON(mec_emi_base == 0 || mec_emi_end == 0))
return -EINVAL;
@@ -67,16 +116,21 @@ int cros_ec_lpc_mec_in_range(unsigned int offset, unsigned int length)
* @length: Number of bytes to read / write
* @buf: Destination / source buffer
*
- * Return: 8-bit checksum of all bytes read / written
+ * @return: A negative error code on error, or 8-bit checksum of all
+ * bytes read / written
*/
-u8 cros_ec_lpc_io_bytes_mec(enum cros_ec_lpc_mec_io_type io_type,
- unsigned int offset, unsigned int length,
- u8 *buf)
+int cros_ec_lpc_io_bytes_mec(enum cros_ec_lpc_mec_io_type io_type,
+ unsigned int offset, unsigned int length,
+ u8 *buf)
{
int i = 0;
int io_addr;
u8 sum = 0;
enum cros_ec_lpc_mec_emi_access_mode access, new_access;
+ int ret;
+
+ if (length == 0)
+ return 0;
/* Return checksum of 0 if window is not initialized */
WARN_ON(mec_emi_base == 0 || mec_emi_end == 0);
@@ -92,7 +146,9 @@ u8 cros_ec_lpc_io_bytes_mec(enum cros_ec_lpc_mec_io_type io_type,
else
access = ACCESS_TYPE_LONG_AUTO_INCREMENT;
- mutex_lock(&io_mutex);
+ ret = cros_ec_lpc_mec_lock();
+ if (ret)
+ return ret;
/* Initialize I/O at desired address */
cros_ec_lpc_mec_emi_write_address(offset, access);
@@ -134,7 +190,9 @@ u8 cros_ec_lpc_io_bytes_mec(enum cros_ec_lpc_mec_io_type io_type,
}
done:
- mutex_unlock(&io_mutex);
+ ret = cros_ec_lpc_mec_unlock();
+ if (ret)
+ return ret;
return sum;
}
@@ -146,3 +204,18 @@ void cros_ec_lpc_mec_init(unsigned int base, unsigned int end)
mec_emi_end = end;
}
EXPORT_SYMBOL(cros_ec_lpc_mec_init);
+
+int cros_ec_lpc_mec_acpi_mutex(struct acpi_device *adev, const char *pathname)
+{
+ int status;
+
+ if (!adev)
+ return -ENOENT;
+
+ status = acpi_get_handle(adev->handle, pathname, &aml_mutex);
+ if (ACPI_FAILURE(status))
+ return -ENOENT;
+
+ return 0;
+}
+EXPORT_SYMBOL(cros_ec_lpc_mec_acpi_mutex);
diff --git a/drivers/platform/chrome/cros_ec_lpc_mec.h b/drivers/platform/chrome/cros_ec_lpc_mec.h
index 9d0521b23e8a..69f9d8786f61 100644
--- a/drivers/platform/chrome/cros_ec_lpc_mec.h
+++ b/drivers/platform/chrome/cros_ec_lpc_mec.h
@@ -8,6 +8,8 @@
#ifndef __CROS_EC_LPC_MEC_H
#define __CROS_EC_LPC_MEC_H
+#include <linux/acpi.h>
+
enum cros_ec_lpc_mec_emi_access_mode {
/* 8-bit access */
ACCESS_TYPE_BYTE = 0x0,
@@ -46,6 +48,15 @@ enum cros_ec_lpc_mec_io_type {
void cros_ec_lpc_mec_init(unsigned int base, unsigned int end);
/**
+ * cros_ec_lpc_mec_acpi_mutex() - Find and set ACPI mutex for MEC
+ *
+ * @adev: Parent ACPI device
+ * @pathname: Name of AML mutex
+ * @return: Negative error code, or zero for success
+ */
+int cros_ec_lpc_mec_acpi_mutex(struct acpi_device *adev, const char *pathname);
+
+/**
* cros_ec_lpc_mec_in_range() - Determine if addresses are in MEC EMI range.
*
* @offset: Address offset
@@ -64,9 +75,10 @@ int cros_ec_lpc_mec_in_range(unsigned int offset, unsigned int length);
* @length: Number of bytes to read / write
* @buf: Destination / source buffer
*
- * @return 8-bit checksum of all bytes read / written
+ * @return: A negative error code on error, or 8-bit checksum of all
+ * bytes read / written
*/
-u8 cros_ec_lpc_io_bytes_mec(enum cros_ec_lpc_mec_io_type io_type,
- unsigned int offset, unsigned int length, u8 *buf);
+int cros_ec_lpc_io_bytes_mec(enum cros_ec_lpc_mec_io_type io_type,
+ unsigned int offset, unsigned int length, u8 *buf);
#endif /* __CROS_EC_LPC_MEC_H */
diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c
index 945b1b15a04c..f776fd42244f 100644
--- a/drivers/platform/chrome/cros_ec_proto.c
+++ b/drivers/platform/chrome/cros_ec_proto.c
@@ -5,6 +5,7 @@
#include <linux/delay.h>
#include <linux/device.h>
+#include <linux/limits.h>
#include <linux/module.h>
#include <linux/platform_data/cros_ec_commands.h>
#include <linux/platform_data/cros_ec_proto.h>
@@ -239,13 +240,12 @@ int cros_ec_check_result(struct cros_ec_device *ec_dev,
}
EXPORT_SYMBOL(cros_ec_check_result);
-/*
+/**
* cros_ec_get_host_event_wake_mask
*
* Get the mask of host events that cause wake from suspend.
*
* @ec_dev: EC device to call
- * @msg: message structure to use
* @mask: result when function returns 0.
*
* LOCKING:
@@ -427,13 +427,12 @@ exit:
return ret;
}
-/*
+/**
* cros_ec_get_host_command_version_mask
*
* Get the version mask of a given command.
*
* @ec_dev: EC device to call
- * @msg: message structure to use
* @cmd: command to get the version of.
* @mask: result when function returns 0.
*
@@ -686,7 +685,7 @@ EXPORT_SYMBOL(cros_ec_cmd_xfer_status);
static int get_next_event_xfer(struct cros_ec_device *ec_dev,
struct cros_ec_command *msg,
- struct ec_response_get_next_event_v1 *event,
+ struct ec_response_get_next_event_v3 *event,
int version, uint32_t size)
{
int ret;
@@ -709,11 +708,12 @@ static int get_next_event(struct cros_ec_device *ec_dev)
{
struct {
struct cros_ec_command msg;
- struct ec_response_get_next_event_v1 event;
+ struct ec_response_get_next_event_v3 event;
} __packed buf;
struct cros_ec_command *msg = &buf.msg;
- struct ec_response_get_next_event_v1 *event = &buf.event;
- const int cmd_version = ec_dev->mkbp_event_supported - 1;
+ struct ec_response_get_next_event_v3 *event = &buf.event;
+ int cmd_version = ec_dev->mkbp_event_supported - 1;
+ u32 size;
memset(msg, 0, sizeof(*msg));
if (ec_dev->suspended) {
@@ -721,12 +721,20 @@ static int get_next_event(struct cros_ec_device *ec_dev)
return -EHOSTDOWN;
}
- if (cmd_version == 0)
- return get_next_event_xfer(ec_dev, msg, event, 0,
- sizeof(struct ec_response_get_next_event));
+ if (cmd_version == 0) {
+ size = sizeof(struct ec_response_get_next_event);
+ } else if (cmd_version < 3) {
+ size = sizeof(struct ec_response_get_next_event_v1);
+ } else {
+ /*
+ * The max version we support is v3. So, we speak v3 even if the
+ * EC says it supports v4+.
+ */
+ cmd_version = 3;
+ size = sizeof(struct ec_response_get_next_event_v3);
+ }
- return get_next_event_xfer(ec_dev, msg, event, cmd_version,
- sizeof(struct ec_response_get_next_event_v1));
+ return get_next_event_xfer(ec_dev, msg, event, cmd_version, size);
}
static int get_keyboard_state_event(struct cros_ec_device *ec_dev)
@@ -1035,3 +1043,64 @@ error:
return ret;
}
EXPORT_SYMBOL_GPL(cros_ec_cmd);
+
+/**
+ * cros_ec_cmd_readmem - Read from EC memory.
+ *
+ * @ec_dev: EC device
+ * @offset: Is within EC_LPC_ADDR_MEMMAP region.
+ * @size: Number of bytes to read.
+ * @dest: EC command output data
+ *
+ * Return: >= 0 on success, negative error number on failure.
+ */
+int cros_ec_cmd_readmem(struct cros_ec_device *ec_dev, u8 offset, u8 size, void *dest)
+{
+ struct ec_params_read_memmap params = {};
+
+ if (!size)
+ return -EINVAL;
+
+ if (ec_dev->cmd_readmem)
+ return ec_dev->cmd_readmem(ec_dev, offset, size, dest);
+
+ params.offset = offset;
+ params.size = size;
+ return cros_ec_cmd(ec_dev, 0, EC_CMD_READ_MEMMAP,
+ &params, sizeof(params), dest, size);
+}
+EXPORT_SYMBOL_GPL(cros_ec_cmd_readmem);
+
+/**
+ * cros_ec_get_cmd_versions - Get supported version mask.
+ *
+ * @ec_dev: EC device
+ * @cmd: Command to test
+ *
+ * Return: version mask on success, negative error number on failure.
+ */
+int cros_ec_get_cmd_versions(struct cros_ec_device *ec_dev, u16 cmd)
+{
+ struct ec_params_get_cmd_versions req_v0;
+ struct ec_params_get_cmd_versions_v1 req_v1;
+ struct ec_response_get_cmd_versions resp;
+ int ret;
+
+ if (cmd <= U8_MAX) {
+ req_v0.cmd = cmd;
+ ret = cros_ec_cmd(ec_dev, 0, EC_CMD_GET_CMD_VERSIONS,
+ &req_v0, sizeof(req_v0), &resp, sizeof(resp));
+ } else {
+ req_v1.cmd = cmd;
+ ret = cros_ec_cmd(ec_dev, 1, EC_CMD_GET_CMD_VERSIONS,
+ &req_v1, sizeof(req_v1), &resp, sizeof(resp));
+ }
+
+ if (ret == -EINVAL)
+ return 0; /* Command not implemented */
+ else if (ret < 0)
+ return ret;
+ else
+ return resp.version_mask;
+}
+EXPORT_SYMBOL_GPL(cros_ec_get_cmd_versions);
diff --git a/drivers/platform/chrome/cros_ec_proto_test.c b/drivers/platform/chrome/cros_ec_proto_test.c
index 41378c2ee6a0..7ca9895a0065 100644
--- a/drivers/platform/chrome/cros_ec_proto_test.c
+++ b/drivers/platform/chrome/cros_ec_proto_test.c
@@ -2060,17 +2060,17 @@ static void cros_ec_proto_test_get_next_event_no_mkbp_event(struct kunit *test)
/* For get_keyboard_state_event(). */
{
- union ec_response_get_next_data_v1 *data;
+ union ec_response_get_next_data_v3 *data;
mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
KUNIT_ASSERT_PTR_NE(test, mock, NULL);
- data = (union ec_response_get_next_data_v1 *)mock->o_data;
+ data = (union ec_response_get_next_data_v3 *)mock->o_data;
data->host_event = 0xbeef;
}
ret = cros_ec_get_next_event(ec_dev, &wake_event, &more_events);
- KUNIT_EXPECT_EQ(test, ret, sizeof(union ec_response_get_next_data_v1));
+ KUNIT_EXPECT_EQ(test, ret, sizeof(union ec_response_get_next_data_v3));
KUNIT_EXPECT_EQ(test, ec_dev->event_data.event_type, EC_MKBP_EVENT_KEY_MATRIX);
KUNIT_EXPECT_EQ(test, ec_dev->event_data.data.host_event, 0xbeef);
@@ -2085,7 +2085,7 @@ static void cros_ec_proto_test_get_next_event_no_mkbp_event(struct kunit *test)
KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_MKBP_STATE);
- KUNIT_EXPECT_EQ(test, mock->msg.insize, sizeof(union ec_response_get_next_data_v1));
+ KUNIT_EXPECT_EQ(test, mock->msg.insize, sizeof(union ec_response_get_next_data_v3));
KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
}
}
@@ -2740,4 +2740,5 @@ static struct kunit_suite cros_ec_proto_test_suite = {
kunit_test_suite(cros_ec_proto_test_suite);
+MODULE_DESCRIPTION("Kunit tests for ChromeOS Embedded Controller protocol");
MODULE_LICENSE("GPL");
diff --git a/drivers/platform/chrome/cros_kbd_led_backlight.c b/drivers/platform/chrome/cros_kbd_led_backlight.c
index b83e4f328620..78097c8a4966 100644
--- a/drivers/platform/chrome/cros_kbd_led_backlight.c
+++ b/drivers/platform/chrome/cros_kbd_led_backlight.c
@@ -9,6 +9,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/leds.h>
+#include <linux/mfd/core.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -194,13 +195,46 @@ static const __maybe_unused struct keyboard_led_drvdata keyboard_led_drvdata_ec_
#endif /* IS_ENABLED(CONFIG_CROS_EC) */
+#if IS_ENABLED(CONFIG_MFD_CROS_EC_DEV)
+static int keyboard_led_init_ec_pwm_mfd(struct platform_device *pdev)
+{
+ struct cros_ec_dev *ec_dev = dev_get_drvdata(pdev->dev.parent);
+ struct cros_ec_device *cros_ec = ec_dev->ec_dev;
+ struct keyboard_led *keyboard_led = platform_get_drvdata(pdev);
+
+ keyboard_led->ec = cros_ec;
+
+ return 0;
+}
+
+static const struct keyboard_led_drvdata keyboard_led_drvdata_ec_pwm_mfd = {
+ .init = keyboard_led_init_ec_pwm_mfd,
+ .brightness_set_blocking = keyboard_led_set_brightness_ec_pwm,
+ .brightness_get = keyboard_led_get_brightness_ec_pwm,
+ .max_brightness = KEYBOARD_BACKLIGHT_MAX,
+};
+
+#else /* IS_ENABLED(CONFIG_MFD_CROS_EC_DEV) */
+
+static const struct keyboard_led_drvdata keyboard_led_drvdata_ec_pwm_mfd = {};
+
+#endif /* IS_ENABLED(CONFIG_MFD_CROS_EC_DEV) */
+
+static int keyboard_led_is_mfd_device(struct platform_device *pdev)
+{
+ return IS_ENABLED(CONFIG_MFD_CROS_EC_DEV) && mfd_get_cell(pdev);
+}
+
static int keyboard_led_probe(struct platform_device *pdev)
{
const struct keyboard_led_drvdata *drvdata;
struct keyboard_led *keyboard_led;
int error;
- drvdata = device_get_match_data(&pdev->dev);
+ if (keyboard_led_is_mfd_device(pdev))
+ drvdata = &keyboard_led_drvdata_ec_pwm_mfd;
+ else
+ drvdata = device_get_match_data(&pdev->dev);
if (!drvdata)
return -EINVAL;
@@ -216,13 +250,15 @@ static int keyboard_led_probe(struct platform_device *pdev)
}
keyboard_led->cdev.name = "chromeos::kbd_backlight";
- keyboard_led->cdev.flags |= LED_CORE_SUSPENDRESUME;
+ keyboard_led->cdev.flags |= LED_CORE_SUSPENDRESUME | LED_REJECT_NAME_CONFLICT;
keyboard_led->cdev.max_brightness = drvdata->max_brightness;
keyboard_led->cdev.brightness_set = drvdata->brightness_set;
keyboard_led->cdev.brightness_set_blocking = drvdata->brightness_set_blocking;
keyboard_led->cdev.brightness_get = drvdata->brightness_get;
error = devm_led_classdev_register(&pdev->dev, &keyboard_led->cdev);
+ if (error == -EEXIST) /* Already bound via other mechanism */
+ return -ENODEV;
if (error)
return error;
diff --git a/drivers/platform/chrome/wilco_ec/mailbox.c b/drivers/platform/chrome/wilco_ec/mailbox.c
index 0f98358ea824..4d8273b47cde 100644
--- a/drivers/platform/chrome/wilco_ec/mailbox.c
+++ b/drivers/platform/chrome/wilco_ec/mailbox.c
@@ -117,13 +117,17 @@ static int wilco_ec_transfer(struct wilco_ec_device *ec,
struct wilco_ec_request *rq)
{
struct wilco_ec_response *rs;
- u8 checksum;
+ int ret;
u8 flag;
/* Write request header, then data */
- cros_ec_lpc_io_bytes_mec(MEC_IO_WRITE, 0, sizeof(*rq), (u8 *)rq);
- cros_ec_lpc_io_bytes_mec(MEC_IO_WRITE, sizeof(*rq), msg->request_size,
- msg->request_data);
+ ret = cros_ec_lpc_io_bytes_mec(MEC_IO_WRITE, 0, sizeof(*rq), (u8 *)rq);
+ if (ret < 0)
+ return ret;
+ ret = cros_ec_lpc_io_bytes_mec(MEC_IO_WRITE, sizeof(*rq), msg->request_size,
+ msg->request_data);
+ if (ret < 0)
+ return ret;
/* Start the command */
outb(EC_MAILBOX_START_COMMAND, ec->io_command->start);
@@ -149,10 +153,12 @@ static int wilco_ec_transfer(struct wilco_ec_device *ec,
/* Read back response */
rs = ec->data_buffer;
- checksum = cros_ec_lpc_io_bytes_mec(MEC_IO_READ, 0,
- sizeof(*rs) + EC_MAILBOX_DATA_SIZE,
- (u8 *)rs);
- if (checksum) {
+ ret = cros_ec_lpc_io_bytes_mec(MEC_IO_READ, 0,
+ sizeof(*rs) + EC_MAILBOX_DATA_SIZE,
+ (u8 *)rs);
+ if (ret < 0)
+ return ret;
+ if (ret) {
dev_dbg(ec->dev, "bad packet checksum 0x%02x\n", rs->checksum);
return -EBADMSG;
}
diff --git a/drivers/platform/cznic/Kconfig b/drivers/platform/cznic/Kconfig
new file mode 100644
index 000000000000..cb0d4d686d8a
--- /dev/null
+++ b/drivers/platform/cznic/Kconfig
@@ -0,0 +1,50 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# For a description of the syntax of this configuration file,
+# see Documentation/kbuild/kconfig-language.rst.
+#
+
+menuconfig CZNIC_PLATFORMS
+ bool "Platform support for CZ.NIC's Turris hardware"
+ help
+ Say Y here to be able to choose driver support for CZ.NIC's Turris
+ devices. This option alone does not add any kernel code.
+
+if CZNIC_PLATFORMS
+
+config TURRIS_OMNIA_MCU
+ tristate "Turris Omnia MCU driver"
+ depends on MACH_ARMADA_38X || COMPILE_TEST
+ depends on I2C
+ depends on OF
+ depends on WATCHDOG
+ depends on GPIOLIB
+ depends on HW_RANDOM
+ depends on RTC_CLASS
+ depends on WATCHDOG_CORE
+ select GPIOLIB_IRQCHIP
+ help
+ Say Y here to add support for the features implemented by the
+ microcontroller on the CZ.NIC's Turris Omnia SOHO router.
+ The features include:
+ - board poweroff into true low power mode (with voltage regulators
+ disabled) and the ability to configure wake up from this mode (via
+ rtcwake)
+ - true random number generator (if available on the MCU)
+ - MCU watchdog
+ - GPIO pins
+ - to get front button press events (the front button can be
+ configured either to generate press events to the CPU or to change
+ front LEDs panel brightness)
+ - to enable / disable USB port voltage regulators and to detect
+ USB overcurrent
+ - to detect MiniPCIe / mSATA card presence in MiniPCIe port 0
+ - to configure resets of various peripherals on board revisions 32+
+ - to enable / disable the VHV voltage regulator to the SOC in order
+ to be able to program SOC's OTP on board revisions 32+
+ - to get input from the LED output pins of the WAN ethernet PHY, LAN
+ switch and MiniPCIe ports
+ To compile this driver as a module, choose M here; the module will be
+ called turris-omnia-mcu.
+
+endif # CZNIC_PLATFORMS
diff --git a/drivers/platform/cznic/Makefile b/drivers/platform/cznic/Makefile
new file mode 100644
index 000000000000..eae4c6b341ff
--- /dev/null
+++ b/drivers/platform/cznic/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+obj-$(CONFIG_TURRIS_OMNIA_MCU) += turris-omnia-mcu.o
+turris-omnia-mcu-y := turris-omnia-mcu-base.o
+turris-omnia-mcu-y += turris-omnia-mcu-gpio.o
+turris-omnia-mcu-y += turris-omnia-mcu-sys-off-wakeup.o
+turris-omnia-mcu-y += turris-omnia-mcu-trng.o
+turris-omnia-mcu-y += turris-omnia-mcu-watchdog.o
diff --git a/drivers/platform/cznic/turris-omnia-mcu-base.c b/drivers/platform/cznic/turris-omnia-mcu-base.c
new file mode 100644
index 000000000000..c68a7a84a951
--- /dev/null
+++ b/drivers/platform/cznic/turris-omnia-mcu-base.c
@@ -0,0 +1,408 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * CZ.NIC's Turris Omnia MCU driver
+ *
+ * 2024 by Marek Behún <kabel@kernel.org>
+ */
+
+#include <linux/array_size.h>
+#include <linux/bits.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/hex.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
+
+#include <linux/turris-omnia-mcu-interface.h>
+#include "turris-omnia-mcu.h"
+
+#define OMNIA_FW_VERSION_LEN 20
+#define OMNIA_FW_VERSION_HEX_LEN (2 * OMNIA_FW_VERSION_LEN + 1)
+#define OMNIA_BOARD_INFO_LEN 16
+
+int omnia_cmd_write_read(const struct i2c_client *client,
+ void *cmd, unsigned int cmd_len,
+ void *reply, unsigned int reply_len)
+{
+ struct i2c_msg msgs[2];
+ int ret, num;
+
+ msgs[0].addr = client->addr;
+ msgs[0].flags = 0;
+ msgs[0].len = cmd_len;
+ msgs[0].buf = cmd;
+ num = 1;
+
+ if (reply_len) {
+ msgs[1].addr = client->addr;
+ msgs[1].flags = I2C_M_RD;
+ msgs[1].len = reply_len;
+ msgs[1].buf = reply;
+ num++;
+ }
+
+ ret = i2c_transfer(client->adapter, msgs, num);
+ if (ret < 0)
+ return ret;
+ if (ret != num)
+ return -EIO;
+
+ return 0;
+}
+
+static int omnia_get_version_hash(struct omnia_mcu *mcu, bool bootloader,
+ char version[static OMNIA_FW_VERSION_HEX_LEN])
+{
+ u8 reply[OMNIA_FW_VERSION_LEN];
+ char *p;
+ int err;
+
+ err = omnia_cmd_read(mcu->client,
+ bootloader ? OMNIA_CMD_GET_FW_VERSION_BOOT
+ : OMNIA_CMD_GET_FW_VERSION_APP,
+ reply, sizeof(reply));
+ if (err)
+ return err;
+
+ p = bin2hex(version, reply, OMNIA_FW_VERSION_LEN);
+ *p = '\0';
+
+ return 0;
+}
+
+static ssize_t fw_version_hash_show(struct device *dev, char *buf,
+ bool bootloader)
+{
+ struct omnia_mcu *mcu = dev_get_drvdata(dev);
+ char version[OMNIA_FW_VERSION_HEX_LEN];
+ int err;
+
+ err = omnia_get_version_hash(mcu, bootloader, version);
+ if (err)
+ return err;
+
+ return sysfs_emit(buf, "%s\n", version);
+}
+
+static ssize_t fw_version_hash_application_show(struct device *dev,
+ struct device_attribute *a,
+ char *buf)
+{
+ return fw_version_hash_show(dev, buf, false);
+}
+static DEVICE_ATTR_RO(fw_version_hash_application);
+
+static ssize_t fw_version_hash_bootloader_show(struct device *dev,
+ struct device_attribute *a,
+ char *buf)
+{
+ return fw_version_hash_show(dev, buf, true);
+}
+static DEVICE_ATTR_RO(fw_version_hash_bootloader);
+
+static ssize_t fw_features_show(struct device *dev, struct device_attribute *a,
+ char *buf)
+{
+ struct omnia_mcu *mcu = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "0x%x\n", mcu->features);
+}
+static DEVICE_ATTR_RO(fw_features);
+
+static ssize_t mcu_type_show(struct device *dev, struct device_attribute *a,
+ char *buf)
+{
+ struct omnia_mcu *mcu = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%s\n", mcu->type);
+}
+static DEVICE_ATTR_RO(mcu_type);
+
+static ssize_t reset_selector_show(struct device *dev,
+ struct device_attribute *a, char *buf)
+{
+ u8 reply;
+ int err;
+
+ err = omnia_cmd_read_u8(to_i2c_client(dev), OMNIA_CMD_GET_RESET,
+ &reply);
+ if (err)
+ return err;
+
+ return sysfs_emit(buf, "%d\n", reply);
+}
+static DEVICE_ATTR_RO(reset_selector);
+
+static ssize_t serial_number_show(struct device *dev,
+ struct device_attribute *a, char *buf)
+{
+ struct omnia_mcu *mcu = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%016llX\n", mcu->board_serial_number);
+}
+static DEVICE_ATTR_RO(serial_number);
+
+static ssize_t first_mac_address_show(struct device *dev,
+ struct device_attribute *a, char *buf)
+{
+ struct omnia_mcu *mcu = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%pM\n", mcu->board_first_mac);
+}
+static DEVICE_ATTR_RO(first_mac_address);
+
+static ssize_t board_revision_show(struct device *dev,
+ struct device_attribute *a, char *buf)
+{
+ struct omnia_mcu *mcu = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%u\n", mcu->board_revision);
+}
+static DEVICE_ATTR_RO(board_revision);
+
+static struct attribute *omnia_mcu_base_attrs[] = {
+ &dev_attr_fw_version_hash_application.attr,
+ &dev_attr_fw_version_hash_bootloader.attr,
+ &dev_attr_fw_features.attr,
+ &dev_attr_mcu_type.attr,
+ &dev_attr_reset_selector.attr,
+ &dev_attr_serial_number.attr,
+ &dev_attr_first_mac_address.attr,
+ &dev_attr_board_revision.attr,
+ NULL
+};
+
+static umode_t omnia_mcu_base_attrs_visible(struct kobject *kobj,
+ struct attribute *a, int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct omnia_mcu *mcu = dev_get_drvdata(dev);
+
+ if ((a == &dev_attr_serial_number.attr ||
+ a == &dev_attr_first_mac_address.attr ||
+ a == &dev_attr_board_revision.attr) &&
+ !(mcu->features & OMNIA_FEAT_BOARD_INFO))
+ return 0;
+
+ return a->mode;
+}
+
+static const struct attribute_group omnia_mcu_base_group = {
+ .attrs = omnia_mcu_base_attrs,
+ .is_visible = omnia_mcu_base_attrs_visible,
+};
+
+static const struct attribute_group *omnia_mcu_groups[] = {
+ &omnia_mcu_base_group,
+ &omnia_mcu_gpio_group,
+ &omnia_mcu_poweroff_group,
+ NULL
+};
+
+static void omnia_mcu_print_version_hash(struct omnia_mcu *mcu, bool bootloader)
+{
+ const char *type = bootloader ? "bootloader" : "application";
+ struct device *dev = &mcu->client->dev;
+ char version[OMNIA_FW_VERSION_HEX_LEN];
+ int err;
+
+ err = omnia_get_version_hash(mcu, bootloader, version);
+ if (err) {
+ dev_err(dev, "Cannot read MCU %s firmware version: %d\n",
+ type, err);
+ return;
+ }
+
+ dev_info(dev, "MCU %s firmware version hash: %s\n", type, version);
+}
+
+static const char *omnia_status_to_mcu_type(u16 status)
+{
+ switch (status & OMNIA_STS_MCU_TYPE_MASK) {
+ case OMNIA_STS_MCU_TYPE_STM32:
+ return "STM32";
+ case OMNIA_STS_MCU_TYPE_GD32:
+ return "GD32";
+ case OMNIA_STS_MCU_TYPE_MKL:
+ return "MKL";
+ default:
+ return "unknown";
+ }
+}
+
+static void omnia_info_missing_feature(struct device *dev, const char *feature)
+{
+ dev_info(dev,
+ "Your board's MCU firmware does not support the %s feature.\n",
+ feature);
+}
+
+static int omnia_mcu_read_features(struct omnia_mcu *mcu)
+{
+ static const struct {
+ u16 mask;
+ const char *name;
+ } features[] = {
+#define _DEF_FEAT(_n, _m) { OMNIA_FEAT_ ## _n, _m }
+ _DEF_FEAT(EXT_CMDS, "extended control and status"),
+ _DEF_FEAT(WDT_PING, "watchdog pinging"),
+ _DEF_FEAT(LED_STATE_EXT_MASK, "peripheral LED pins reading"),
+ _DEF_FEAT(NEW_INT_API, "new interrupt API"),
+ _DEF_FEAT(POWEROFF_WAKEUP, "poweroff and wakeup"),
+ _DEF_FEAT(TRNG, "true random number generator"),
+#undef _DEF_FEAT
+ };
+ struct i2c_client *client = mcu->client;
+ struct device *dev = &client->dev;
+ bool suggest_fw_upgrade = false;
+ u16 status;
+ int err;
+
+ /* status word holds MCU type, which we need below */
+ err = omnia_cmd_read_u16(client, OMNIA_CMD_GET_STATUS_WORD, &status);
+ if (err)
+ return err;
+
+ /*
+ * Check whether MCU firmware supports the OMNIA_CMD_GET_FEATURES
+ * command.
+ */
+ if (status & OMNIA_STS_FEATURES_SUPPORTED) {
+ /* try read 32-bit features */
+ err = omnia_cmd_read_u32(client, OMNIA_CMD_GET_FEATURES,
+ &mcu->features);
+ if (err) {
+ /* try read 16-bit features */
+ u16 features16;
+
+ err = omnia_cmd_read_u16(client, OMNIA_CMD_GET_FEATURES,
+ &features16);
+ if (err)
+ return err;
+
+ mcu->features = features16;
+ } else {
+ if (mcu->features & OMNIA_FEAT_FROM_BIT_16_INVALID)
+ mcu->features &= GENMASK(15, 0);
+ }
+ } else {
+ dev_info(dev,
+ "Your board's MCU firmware does not support feature reading.\n");
+ suggest_fw_upgrade = true;
+ }
+
+ mcu->type = omnia_status_to_mcu_type(status);
+ dev_info(dev, "MCU type %s%s\n", mcu->type,
+ (mcu->features & OMNIA_FEAT_PERIPH_MCU) ?
+ ", with peripheral resets wired" : "");
+
+ omnia_mcu_print_version_hash(mcu, true);
+
+ if (mcu->features & OMNIA_FEAT_BOOTLOADER)
+ dev_warn(dev,
+ "MCU is running bootloader firmware. Was firmware upgrade interrupted?\n");
+ else
+ omnia_mcu_print_version_hash(mcu, false);
+
+ for (unsigned int i = 0; i < ARRAY_SIZE(features); i++) {
+ if (mcu->features & features[i].mask)
+ continue;
+
+ omnia_info_missing_feature(dev, features[i].name);
+ suggest_fw_upgrade = true;
+ }
+
+ if (suggest_fw_upgrade)
+ dev_info(dev,
+ "Consider upgrading MCU firmware with the omnia-mcutool utility.\n");
+
+ return 0;
+}
+
+static int omnia_mcu_read_board_info(struct omnia_mcu *mcu)
+{
+ u8 reply[1 + OMNIA_BOARD_INFO_LEN];
+ int err;
+
+ err = omnia_cmd_read(mcu->client, OMNIA_CMD_BOARD_INFO_GET, reply,
+ sizeof(reply));
+ if (err)
+ return err;
+
+ if (reply[0] != OMNIA_BOARD_INFO_LEN)
+ return -EIO;
+
+ mcu->board_serial_number = get_unaligned_le64(&reply[1]);
+
+ /* we can't use ether_addr_copy() because reply is not u16-aligned */
+ memcpy(mcu->board_first_mac, &reply[9], sizeof(mcu->board_first_mac));
+
+ mcu->board_revision = reply[15];
+
+ return 0;
+}
+
+static int omnia_mcu_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct omnia_mcu *mcu;
+ int err;
+
+ if (!client->irq)
+ return dev_err_probe(dev, -EINVAL, "IRQ resource not found\n");
+
+ mcu = devm_kzalloc(dev, sizeof(*mcu), GFP_KERNEL);
+ if (!mcu)
+ return -ENOMEM;
+
+ mcu->client = client;
+ i2c_set_clientdata(client, mcu);
+
+ err = omnia_mcu_read_features(mcu);
+ if (err)
+ return dev_err_probe(dev, err,
+ "Cannot determine MCU supported features\n");
+
+ if (mcu->features & OMNIA_FEAT_BOARD_INFO) {
+ err = omnia_mcu_read_board_info(mcu);
+ if (err)
+ return dev_err_probe(dev, err,
+ "Cannot read board info\n");
+ }
+
+ err = omnia_mcu_register_sys_off_and_wakeup(mcu);
+ if (err)
+ return err;
+
+ err = omnia_mcu_register_watchdog(mcu);
+ if (err)
+ return err;
+
+ err = omnia_mcu_register_gpiochip(mcu);
+ if (err)
+ return err;
+
+ return omnia_mcu_register_trng(mcu);
+}
+
+static const struct of_device_id of_omnia_mcu_match[] = {
+ { .compatible = "cznic,turris-omnia-mcu" },
+ {}
+};
+
+static struct i2c_driver omnia_mcu_driver = {
+ .probe = omnia_mcu_probe,
+ .driver = {
+ .name = "turris-omnia-mcu",
+ .of_match_table = of_omnia_mcu_match,
+ .dev_groups = omnia_mcu_groups,
+ },
+};
+module_i2c_driver(omnia_mcu_driver);
+
+MODULE_AUTHOR("Marek Behun <kabel@kernel.org>");
+MODULE_DESCRIPTION("CZ.NIC's Turris Omnia MCU");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/cznic/turris-omnia-mcu-gpio.c b/drivers/platform/cznic/turris-omnia-mcu-gpio.c
new file mode 100644
index 000000000000..91da56a704c7
--- /dev/null
+++ b/drivers/platform/cznic/turris-omnia-mcu-gpio.c
@@ -0,0 +1,1095 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * CZ.NIC's Turris Omnia MCU GPIO and IRQ driver
+ *
+ * 2024 by Marek Behún <kabel@kernel.org>
+ */
+
+#include <linux/array_size.h>
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/bug.h>
+#include <linux/cleanup.h>
+#include <linux/device.h>
+#include <linux/devm-helpers.h>
+#include <linux/errno.h>
+#include <linux/gpio/driver.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+#include <asm/unaligned.h>
+
+#include <linux/turris-omnia-mcu-interface.h>
+#include "turris-omnia-mcu.h"
+
+#define OMNIA_CMD_INT_ARG_LEN 8
+#define FRONT_BUTTON_RELEASE_DELAY_MS 50
+
+static const char * const omnia_mcu_gpio_templates[64] = {
+ /* GPIOs with value read from the 16-bit wide status */
+ [4] = "MiniPCIe0 Card Detect",
+ [5] = "MiniPCIe0 mSATA Indicator",
+ [6] = "Front USB3 port over-current",
+ [7] = "Rear USB3 port over-current",
+ [8] = "Front USB3 port power",
+ [9] = "Rear USB3 port power",
+ [12] = "Front Button",
+
+ /* GPIOs with value read from the 32-bit wide extended status */
+ [16] = "SFP nDET",
+ [28] = "MiniPCIe0 LED",
+ [29] = "MiniPCIe1 LED",
+ [30] = "MiniPCIe2 LED",
+ [31] = "MiniPCIe0 PAN LED",
+ [32] = "MiniPCIe1 PAN LED",
+ [33] = "MiniPCIe2 PAN LED",
+ [34] = "WAN PHY LED0",
+ [35] = "WAN PHY LED1",
+ [36] = "LAN switch p0 LED0",
+ [37] = "LAN switch p0 LED1",
+ [38] = "LAN switch p1 LED0",
+ [39] = "LAN switch p1 LED1",
+ [40] = "LAN switch p2 LED0",
+ [41] = "LAN switch p2 LED1",
+ [42] = "LAN switch p3 LED0",
+ [43] = "LAN switch p3 LED1",
+ [44] = "LAN switch p4 LED0",
+ [45] = "LAN switch p4 LED1",
+ [46] = "LAN switch p5 LED0",
+ [47] = "LAN switch p5 LED1",
+
+ /* GPIOs with value read from the 16-bit wide extended control status */
+ [48] = "eMMC nRESET",
+ [49] = "LAN switch nRESET",
+ [50] = "WAN PHY nRESET",
+ [51] = "MiniPCIe0 nPERST",
+ [52] = "MiniPCIe1 nPERST",
+ [53] = "MiniPCIe2 nPERST",
+ [54] = "WAN PHY SFP mux",
+ [56] = "VHV power disable",
+};
+
+struct omnia_gpio {
+ u8 cmd;
+ u8 ctl_cmd;
+ u8 bit;
+ u8 ctl_bit;
+ u8 int_bit;
+ u16 feat;
+ u16 feat_mask;
+};
+
+#define OMNIA_GPIO_INVALID_INT_BIT 0xff
+
+#define _DEF_GPIO(_cmd, _ctl_cmd, _bit, _ctl_bit, _int_bit, _feat, _feat_mask) \
+ { \
+ .cmd = _cmd, \
+ .ctl_cmd = _ctl_cmd, \
+ .bit = _bit, \
+ .ctl_bit = _ctl_bit, \
+ .int_bit = (_int_bit) < 0 ? OMNIA_GPIO_INVALID_INT_BIT \
+ : (_int_bit), \
+ .feat = _feat, \
+ .feat_mask = _feat_mask, \
+ }
+
+#define _DEF_GPIO_STS(_name) \
+ _DEF_GPIO(OMNIA_CMD_GET_STATUS_WORD, 0, __bf_shf(OMNIA_STS_ ## _name), \
+ 0, __bf_shf(OMNIA_INT_ ## _name), 0, 0)
+
+#define _DEF_GPIO_CTL(_name) \
+ _DEF_GPIO(OMNIA_CMD_GET_STATUS_WORD, OMNIA_CMD_GENERAL_CONTROL, \
+ __bf_shf(OMNIA_STS_ ## _name), __bf_shf(OMNIA_CTL_ ## _name), \
+ -1, 0, 0)
+
+#define _DEF_GPIO_EXT_STS(_name, _feat) \
+ _DEF_GPIO(OMNIA_CMD_GET_EXT_STATUS_DWORD, 0, \
+ __bf_shf(OMNIA_EXT_STS_ ## _name), 0, \
+ __bf_shf(OMNIA_INT_ ## _name), \
+ OMNIA_FEAT_ ## _feat | OMNIA_FEAT_EXT_CMDS, \
+ OMNIA_FEAT_ ## _feat | OMNIA_FEAT_EXT_CMDS)
+
+#define _DEF_GPIO_EXT_STS_LED(_name, _ledext) \
+ _DEF_GPIO(OMNIA_CMD_GET_EXT_STATUS_DWORD, 0, \
+ __bf_shf(OMNIA_EXT_STS_ ## _name), 0, \
+ __bf_shf(OMNIA_INT_ ## _name), \
+ OMNIA_FEAT_LED_STATE_ ## _ledext, \
+ OMNIA_FEAT_LED_STATE_EXT_MASK)
+
+#define _DEF_GPIO_EXT_STS_LEDALL(_name) \
+ _DEF_GPIO(OMNIA_CMD_GET_EXT_STATUS_DWORD, 0, \
+ __bf_shf(OMNIA_EXT_STS_ ## _name), 0, \
+ __bf_shf(OMNIA_INT_ ## _name), \
+ OMNIA_FEAT_LED_STATE_EXT_MASK, 0)
+
+#define _DEF_GPIO_EXT_CTL(_name, _feat) \
+ _DEF_GPIO(OMNIA_CMD_GET_EXT_CONTROL_STATUS, OMNIA_CMD_EXT_CONTROL, \
+ __bf_shf(OMNIA_EXT_CTL_ ## _name), \
+ __bf_shf(OMNIA_EXT_CTL_ ## _name), -1, \
+ OMNIA_FEAT_ ## _feat | OMNIA_FEAT_EXT_CMDS, \
+ OMNIA_FEAT_ ## _feat | OMNIA_FEAT_EXT_CMDS)
+
+#define _DEF_INT(_name) \
+ _DEF_GPIO(0, 0, 0, 0, __bf_shf(OMNIA_INT_ ## _name), 0, 0)
+
+static inline bool is_int_bit_valid(const struct omnia_gpio *gpio)
+{
+ return gpio->int_bit != OMNIA_GPIO_INVALID_INT_BIT;
+}
+
+static const struct omnia_gpio omnia_gpios[64] = {
+ /* GPIOs with value read from the 16-bit wide status */
+ [4] = _DEF_GPIO_STS(CARD_DET),
+ [5] = _DEF_GPIO_STS(MSATA_IND),
+ [6] = _DEF_GPIO_STS(USB30_OVC),
+ [7] = _DEF_GPIO_STS(USB31_OVC),
+ [8] = _DEF_GPIO_CTL(USB30_PWRON),
+ [9] = _DEF_GPIO_CTL(USB31_PWRON),
+
+ /* brightness changed interrupt, no GPIO */
+ [11] = _DEF_INT(BRIGHTNESS_CHANGED),
+
+ [12] = _DEF_GPIO_STS(BUTTON_PRESSED),
+
+ /* TRNG interrupt, no GPIO */
+ [13] = _DEF_INT(TRNG),
+
+ /* MESSAGE_SIGNED interrupt, no GPIO */
+ [14] = _DEF_INT(MESSAGE_SIGNED),
+
+ /* GPIOs with value read from the 32-bit wide extended status */
+ [16] = _DEF_GPIO_EXT_STS(SFP_nDET, PERIPH_MCU),
+ [28] = _DEF_GPIO_EXT_STS_LEDALL(WLAN0_MSATA_LED),
+ [29] = _DEF_GPIO_EXT_STS_LEDALL(WLAN1_LED),
+ [30] = _DEF_GPIO_EXT_STS_LEDALL(WLAN2_LED),
+ [31] = _DEF_GPIO_EXT_STS_LED(WPAN0_LED, EXT),
+ [32] = _DEF_GPIO_EXT_STS_LED(WPAN1_LED, EXT),
+ [33] = _DEF_GPIO_EXT_STS_LED(WPAN2_LED, EXT),
+ [34] = _DEF_GPIO_EXT_STS_LEDALL(WAN_LED0),
+ [35] = _DEF_GPIO_EXT_STS_LED(WAN_LED1, EXT_V32),
+ [36] = _DEF_GPIO_EXT_STS_LEDALL(LAN0_LED0),
+ [37] = _DEF_GPIO_EXT_STS_LEDALL(LAN0_LED1),
+ [38] = _DEF_GPIO_EXT_STS_LEDALL(LAN1_LED0),
+ [39] = _DEF_GPIO_EXT_STS_LEDALL(LAN1_LED1),
+ [40] = _DEF_GPIO_EXT_STS_LEDALL(LAN2_LED0),
+ [41] = _DEF_GPIO_EXT_STS_LEDALL(LAN2_LED1),
+ [42] = _DEF_GPIO_EXT_STS_LEDALL(LAN3_LED0),
+ [43] = _DEF_GPIO_EXT_STS_LEDALL(LAN3_LED1),
+ [44] = _DEF_GPIO_EXT_STS_LEDALL(LAN4_LED0),
+ [45] = _DEF_GPIO_EXT_STS_LEDALL(LAN4_LED1),
+ [46] = _DEF_GPIO_EXT_STS_LEDALL(LAN5_LED0),
+ [47] = _DEF_GPIO_EXT_STS_LEDALL(LAN5_LED1),
+
+ /* GPIOs with value read from the 16-bit wide extended control status */
+ [48] = _DEF_GPIO_EXT_CTL(nRES_MMC, PERIPH_MCU),
+ [49] = _DEF_GPIO_EXT_CTL(nRES_LAN, PERIPH_MCU),
+ [50] = _DEF_GPIO_EXT_CTL(nRES_PHY, PERIPH_MCU),
+ [51] = _DEF_GPIO_EXT_CTL(nPERST0, PERIPH_MCU),
+ [52] = _DEF_GPIO_EXT_CTL(nPERST1, PERIPH_MCU),
+ [53] = _DEF_GPIO_EXT_CTL(nPERST2, PERIPH_MCU),
+ [54] = _DEF_GPIO_EXT_CTL(PHY_SFP, PERIPH_MCU),
+ [56] = _DEF_GPIO_EXT_CTL(nVHV_CTRL, PERIPH_MCU),
+};
+
+/* mapping from interrupts to indexes of GPIOs in the omnia_gpios array */
+const u8 omnia_int_to_gpio_idx[32] = {
+ [__bf_shf(OMNIA_INT_CARD_DET)] = 4,
+ [__bf_shf(OMNIA_INT_MSATA_IND)] = 5,
+ [__bf_shf(OMNIA_INT_USB30_OVC)] = 6,
+ [__bf_shf(OMNIA_INT_USB31_OVC)] = 7,
+ [__bf_shf(OMNIA_INT_BUTTON_PRESSED)] = 12,
+ [__bf_shf(OMNIA_INT_TRNG)] = 13,
+ [__bf_shf(OMNIA_INT_MESSAGE_SIGNED)] = 14,
+ [__bf_shf(OMNIA_INT_SFP_nDET)] = 16,
+ [__bf_shf(OMNIA_INT_BRIGHTNESS_CHANGED)] = 11,
+ [__bf_shf(OMNIA_INT_WLAN0_MSATA_LED)] = 28,
+ [__bf_shf(OMNIA_INT_WLAN1_LED)] = 29,
+ [__bf_shf(OMNIA_INT_WLAN2_LED)] = 30,
+ [__bf_shf(OMNIA_INT_WPAN0_LED)] = 31,
+ [__bf_shf(OMNIA_INT_WPAN1_LED)] = 32,
+ [__bf_shf(OMNIA_INT_WPAN2_LED)] = 33,
+ [__bf_shf(OMNIA_INT_WAN_LED0)] = 34,
+ [__bf_shf(OMNIA_INT_WAN_LED1)] = 35,
+ [__bf_shf(OMNIA_INT_LAN0_LED0)] = 36,
+ [__bf_shf(OMNIA_INT_LAN0_LED1)] = 37,
+ [__bf_shf(OMNIA_INT_LAN1_LED0)] = 38,
+ [__bf_shf(OMNIA_INT_LAN1_LED1)] = 39,
+ [__bf_shf(OMNIA_INT_LAN2_LED0)] = 40,
+ [__bf_shf(OMNIA_INT_LAN2_LED1)] = 41,
+ [__bf_shf(OMNIA_INT_LAN3_LED0)] = 42,
+ [__bf_shf(OMNIA_INT_LAN3_LED1)] = 43,
+ [__bf_shf(OMNIA_INT_LAN4_LED0)] = 44,
+ [__bf_shf(OMNIA_INT_LAN4_LED1)] = 45,
+ [__bf_shf(OMNIA_INT_LAN5_LED0)] = 46,
+ [__bf_shf(OMNIA_INT_LAN5_LED1)] = 47,
+};
+
+/* index of PHY_SFP GPIO in the omnia_gpios array */
+#define OMNIA_GPIO_PHY_SFP_OFFSET 54
+
+static int omnia_ctl_cmd_locked(struct omnia_mcu *mcu, u8 cmd, u16 val, u16 mask)
+{
+ unsigned int len;
+ u8 buf[5];
+
+ buf[0] = cmd;
+
+ switch (cmd) {
+ case OMNIA_CMD_GENERAL_CONTROL:
+ buf[1] = val;
+ buf[2] = mask;
+ len = 3;
+ break;
+
+ case OMNIA_CMD_EXT_CONTROL:
+ put_unaligned_le16(val, &buf[1]);
+ put_unaligned_le16(mask, &buf[3]);
+ len = 5;
+ break;
+
+ default:
+ BUG();
+ }
+
+ return omnia_cmd_write(mcu->client, buf, len);
+}
+
+static int omnia_ctl_cmd(struct omnia_mcu *mcu, u8 cmd, u16 val, u16 mask)
+{
+ guard(mutex)(&mcu->lock);
+
+ return omnia_ctl_cmd_locked(mcu, cmd, val, mask);
+}
+
+static int omnia_gpio_request(struct gpio_chip *gc, unsigned int offset)
+{
+ if (!omnia_gpios[offset].cmd)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int omnia_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
+{
+ struct omnia_mcu *mcu = gpiochip_get_data(gc);
+
+ if (offset == OMNIA_GPIO_PHY_SFP_OFFSET) {
+ int val;
+
+ scoped_guard(mutex, &mcu->lock) {
+ val = omnia_cmd_read_bit(mcu->client,
+ OMNIA_CMD_GET_EXT_CONTROL_STATUS,
+ OMNIA_EXT_CTL_PHY_SFP_AUTO);
+ if (val < 0)
+ return val;
+ }
+
+ if (val)
+ return GPIO_LINE_DIRECTION_IN;
+
+ return GPIO_LINE_DIRECTION_OUT;
+ }
+
+ if (omnia_gpios[offset].ctl_cmd)
+ return GPIO_LINE_DIRECTION_OUT;
+
+ return GPIO_LINE_DIRECTION_IN;
+}
+
+static int omnia_gpio_direction_input(struct gpio_chip *gc, unsigned int offset)
+{
+ const struct omnia_gpio *gpio = &omnia_gpios[offset];
+ struct omnia_mcu *mcu = gpiochip_get_data(gc);
+
+ if (offset == OMNIA_GPIO_PHY_SFP_OFFSET)
+ return omnia_ctl_cmd(mcu, OMNIA_CMD_EXT_CONTROL,
+ OMNIA_EXT_CTL_PHY_SFP_AUTO,
+ OMNIA_EXT_CTL_PHY_SFP_AUTO);
+
+ if (gpio->ctl_cmd)
+ return -ENOTSUPP;
+
+ return 0;
+}
+
+static int omnia_gpio_direction_output(struct gpio_chip *gc,
+ unsigned int offset, int value)
+{
+ const struct omnia_gpio *gpio = &omnia_gpios[offset];
+ struct omnia_mcu *mcu = gpiochip_get_data(gc);
+ u16 val, mask;
+
+ if (!gpio->ctl_cmd)
+ return -ENOTSUPP;
+
+ mask = BIT(gpio->ctl_bit);
+ val = value ? mask : 0;
+
+ if (offset == OMNIA_GPIO_PHY_SFP_OFFSET)
+ mask |= OMNIA_EXT_CTL_PHY_SFP_AUTO;
+
+ return omnia_ctl_cmd(mcu, gpio->ctl_cmd, val, mask);
+}
+
+static int omnia_gpio_get(struct gpio_chip *gc, unsigned int offset)
+{
+ const struct omnia_gpio *gpio = &omnia_gpios[offset];
+ struct omnia_mcu *mcu = gpiochip_get_data(gc);
+
+ /*
+ * If firmware does not support the new interrupt API, we are informed
+ * of every change of the status word by an interrupt from MCU and save
+ * its value in the interrupt service routine. Simply return the saved
+ * value.
+ */
+ if (gpio->cmd == OMNIA_CMD_GET_STATUS_WORD &&
+ !(mcu->features & OMNIA_FEAT_NEW_INT_API))
+ return test_bit(gpio->bit, &mcu->last_status);
+
+ guard(mutex)(&mcu->lock);
+
+ /*
+ * If firmware does support the new interrupt API, we may have cached
+ * the value of a GPIO in the interrupt service routine. If not, read
+ * the relevant bit now.
+ */
+ if (is_int_bit_valid(gpio) && test_bit(gpio->int_bit, &mcu->is_cached))
+ return test_bit(gpio->int_bit, &mcu->cached);
+
+ return omnia_cmd_read_bit(mcu->client, gpio->cmd, BIT(gpio->bit));
+}
+
+static unsigned long *
+_relevant_field_for_sts_cmd(u8 cmd, unsigned long *sts, unsigned long *ext_sts,
+ unsigned long *ext_ctl)
+{
+ switch (cmd) {
+ case OMNIA_CMD_GET_STATUS_WORD:
+ return sts;
+ case OMNIA_CMD_GET_EXT_STATUS_DWORD:
+ return ext_sts;
+ case OMNIA_CMD_GET_EXT_CONTROL_STATUS:
+ return ext_ctl;
+ default:
+ return NULL;
+ }
+}
+
+static int omnia_gpio_get_multiple(struct gpio_chip *gc, unsigned long *mask,
+ unsigned long *bits)
+{
+ unsigned long sts = 0, ext_sts = 0, ext_ctl = 0, *field;
+ struct omnia_mcu *mcu = gpiochip_get_data(gc);
+ struct i2c_client *client = mcu->client;
+ unsigned int i;
+ int err;
+
+ /* determine which bits to read from the 3 possible commands */
+ for_each_set_bit(i, mask, ARRAY_SIZE(omnia_gpios)) {
+ field = _relevant_field_for_sts_cmd(omnia_gpios[i].cmd,
+ &sts, &ext_sts, &ext_ctl);
+ if (!field)
+ continue;
+
+ __set_bit(omnia_gpios[i].bit, field);
+ }
+
+ guard(mutex)(&mcu->lock);
+
+ if (mcu->features & OMNIA_FEAT_NEW_INT_API) {
+ /* read relevant bits from status */
+ err = omnia_cmd_read_bits(client, OMNIA_CMD_GET_STATUS_WORD,
+ sts, &sts);
+ if (err)
+ return err;
+ } else {
+ /*
+ * Use status word value cached in the interrupt service routine
+ * if firmware does not support the new interrupt API.
+ */
+ sts = mcu->last_status;
+ }
+
+ /* read relevant bits from extended status */
+ err = omnia_cmd_read_bits(client, OMNIA_CMD_GET_EXT_STATUS_DWORD,
+ ext_sts, &ext_sts);
+ if (err)
+ return err;
+
+ /* read relevant bits from extended control */
+ err = omnia_cmd_read_bits(client, OMNIA_CMD_GET_EXT_CONTROL_STATUS,
+ ext_ctl, &ext_ctl);
+ if (err)
+ return err;
+
+ /* assign relevant bits in result */
+ for_each_set_bit(i, mask, ARRAY_SIZE(omnia_gpios)) {
+ field = _relevant_field_for_sts_cmd(omnia_gpios[i].cmd,
+ &sts, &ext_sts, &ext_ctl);
+ if (!field)
+ continue;
+
+ __assign_bit(i, bits, test_bit(omnia_gpios[i].bit, field));
+ }
+
+ return 0;
+}
+
+static void omnia_gpio_set(struct gpio_chip *gc, unsigned int offset, int value)
+{
+ const struct omnia_gpio *gpio = &omnia_gpios[offset];
+ struct omnia_mcu *mcu = gpiochip_get_data(gc);
+ u16 val, mask;
+
+ if (!gpio->ctl_cmd)
+ return;
+
+ mask = BIT(gpio->ctl_bit);
+ val = value ? mask : 0;
+
+ omnia_ctl_cmd(mcu, gpio->ctl_cmd, val, mask);
+}
+
+static void omnia_gpio_set_multiple(struct gpio_chip *gc, unsigned long *mask,
+ unsigned long *bits)
+{
+ unsigned long ctl = 0, ctl_mask = 0, ext_ctl = 0, ext_ctl_mask = 0;
+ struct omnia_mcu *mcu = gpiochip_get_data(gc);
+ unsigned int i;
+
+ for_each_set_bit(i, mask, ARRAY_SIZE(omnia_gpios)) {
+ unsigned long *field, *field_mask;
+ u8 bit = omnia_gpios[i].ctl_bit;
+
+ switch (omnia_gpios[i].ctl_cmd) {
+ case OMNIA_CMD_GENERAL_CONTROL:
+ field = &ctl;
+ field_mask = &ctl_mask;
+ break;
+ case OMNIA_CMD_EXT_CONTROL:
+ field = &ext_ctl;
+ field_mask = &ext_ctl_mask;
+ break;
+ default:
+ field = field_mask = NULL;
+ break;
+ }
+
+ if (!field)
+ continue;
+
+ __set_bit(bit, field_mask);
+ __assign_bit(bit, field, test_bit(i, bits));
+ }
+
+ guard(mutex)(&mcu->lock);
+
+ if (ctl_mask)
+ omnia_ctl_cmd_locked(mcu, OMNIA_CMD_GENERAL_CONTROL,
+ ctl, ctl_mask);
+
+ if (ext_ctl_mask)
+ omnia_ctl_cmd_locked(mcu, OMNIA_CMD_EXT_CONTROL,
+ ext_ctl, ext_ctl_mask);
+}
+
+static bool omnia_gpio_available(struct omnia_mcu *mcu,
+ const struct omnia_gpio *gpio)
+{
+ if (gpio->feat_mask)
+ return (mcu->features & gpio->feat_mask) == gpio->feat;
+
+ if (gpio->feat)
+ return mcu->features & gpio->feat;
+
+ return true;
+}
+
+static int omnia_gpio_init_valid_mask(struct gpio_chip *gc,
+ unsigned long *valid_mask,
+ unsigned int ngpios)
+{
+ struct omnia_mcu *mcu = gpiochip_get_data(gc);
+
+ for (unsigned int i = 0; i < ngpios; i++) {
+ const struct omnia_gpio *gpio = &omnia_gpios[i];
+
+ if (gpio->cmd || is_int_bit_valid(gpio))
+ __assign_bit(i, valid_mask,
+ omnia_gpio_available(mcu, gpio));
+ else
+ __clear_bit(i, valid_mask);
+ }
+
+ return 0;
+}
+
+static int omnia_gpio_of_xlate(struct gpio_chip *gc,
+ const struct of_phandle_args *gpiospec,
+ u32 *flags)
+{
+ u32 bank, gpio;
+
+ if (WARN_ON(gpiospec->args_count != 3))
+ return -EINVAL;
+
+ if (flags)
+ *flags = gpiospec->args[2];
+
+ bank = gpiospec->args[0];
+ gpio = gpiospec->args[1];
+
+ switch (bank) {
+ case 0:
+ return gpio < 16 ? gpio : -EINVAL;
+ case 1:
+ return gpio < 32 ? 16 + gpio : -EINVAL;
+ case 2:
+ return gpio < 16 ? 48 + gpio : -EINVAL;
+ default:
+ return -EINVAL;
+ }
+}
+
+static void omnia_irq_shutdown(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct omnia_mcu *mcu = gpiochip_get_data(gc);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ u8 bit = omnia_gpios[hwirq].int_bit;
+
+ __clear_bit(bit, &mcu->rising);
+ __clear_bit(bit, &mcu->falling);
+}
+
+static void omnia_irq_mask(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct omnia_mcu *mcu = gpiochip_get_data(gc);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ u8 bit = omnia_gpios[hwirq].int_bit;
+
+ if (!omnia_gpios[hwirq].cmd)
+ __clear_bit(bit, &mcu->rising);
+ __clear_bit(bit, &mcu->mask);
+ gpiochip_disable_irq(gc, hwirq);
+}
+
+static void omnia_irq_unmask(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct omnia_mcu *mcu = gpiochip_get_data(gc);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ u8 bit = omnia_gpios[hwirq].int_bit;
+
+ gpiochip_enable_irq(gc, hwirq);
+ __set_bit(bit, &mcu->mask);
+ if (!omnia_gpios[hwirq].cmd)
+ __set_bit(bit, &mcu->rising);
+}
+
+static int omnia_irq_set_type(struct irq_data *d, unsigned int type)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct omnia_mcu *mcu = gpiochip_get_data(gc);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ struct device *dev = &mcu->client->dev;
+ u8 bit = omnia_gpios[hwirq].int_bit;
+
+ if (!(type & IRQ_TYPE_EDGE_BOTH)) {
+ dev_err(dev, "irq %u: unsupported type %u\n", d->irq, type);
+ return -EINVAL;
+ }
+
+ __assign_bit(bit, &mcu->rising, type & IRQ_TYPE_EDGE_RISING);
+ __assign_bit(bit, &mcu->falling, type & IRQ_TYPE_EDGE_FALLING);
+
+ return 0;
+}
+
+static void omnia_irq_bus_lock(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct omnia_mcu *mcu = gpiochip_get_data(gc);
+
+ /* nothing to do if MCU firmware does not support new interrupt API */
+ if (!(mcu->features & OMNIA_FEAT_NEW_INT_API))
+ return;
+
+ mutex_lock(&mcu->lock);
+}
+
+/**
+ * omnia_mask_interleave - Interleaves the bytes from @rising and @falling
+ * @dst: the destination u8 array of interleaved bytes
+ * @rising: rising mask
+ * @falling: falling mask
+ *
+ * Interleaves the little-endian bytes from @rising and @falling words.
+ *
+ * If @rising = (r0, r1, r2, r3) and @falling = (f0, f1, f2, f3), the result is
+ * @dst = (r0, f0, r1, f1, r2, f2, r3, f3).
+ *
+ * The MCU receives an interrupt mask and reports a pending interrupt bitmap in
+ * this interleaved format. The rationale behind this is that the low-indexed
+ * bits are more important - in many cases, the user will be interested only in
+ * interrupts with indexes 0 to 7, and so the system can stop reading after
+ * first 2 bytes (r0, f0), to save time on the slow I2C bus.
+ *
+ * Feel free to remove this function and its inverse, omnia_mask_deinterleave,
+ * and use an appropriate bitmap_*() function once such a function exists.
+ */
+static void
+omnia_mask_interleave(u8 *dst, unsigned long rising, unsigned long falling)
+{
+ for (unsigned int i = 0; i < sizeof(u32); i++) {
+ dst[2 * i] = rising >> (8 * i);
+ dst[2 * i + 1] = falling >> (8 * i);
+ }
+}
+
+/**
+ * omnia_mask_deinterleave - Deinterleaves the bytes into @rising and @falling
+ * @src: the source u8 array containing the interleaved bytes
+ * @rising: pointer where to store the rising mask gathered from @src
+ * @falling: pointer where to store the falling mask gathered from @src
+ *
+ * This is the inverse function to omnia_mask_interleave.
+ */
+static void omnia_mask_deinterleave(const u8 *src, unsigned long *rising,
+ unsigned long *falling)
+{
+ *rising = *falling = 0;
+
+ for (unsigned int i = 0; i < sizeof(u32); i++) {
+ *rising |= src[2 * i] << (8 * i);
+ *falling |= src[2 * i + 1] << (8 * i);
+ }
+}
+
+static void omnia_irq_bus_sync_unlock(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct omnia_mcu *mcu = gpiochip_get_data(gc);
+ struct device *dev = &mcu->client->dev;
+ u8 cmd[1 + OMNIA_CMD_INT_ARG_LEN];
+ unsigned long rising, falling;
+ int err;
+
+ /* nothing to do if MCU firmware does not support new interrupt API */
+ if (!(mcu->features & OMNIA_FEAT_NEW_INT_API))
+ return;
+
+ cmd[0] = OMNIA_CMD_SET_INT_MASK;
+
+ rising = mcu->rising & mcu->mask;
+ falling = mcu->falling & mcu->mask;
+
+ /* interleave the rising and falling bytes into the command arguments */
+ omnia_mask_interleave(&cmd[1], rising, falling);
+
+ dev_dbg(dev, "set int mask %8ph\n", &cmd[1]);
+
+ err = omnia_cmd_write(mcu->client, cmd, sizeof(cmd));
+ if (err) {
+ dev_err(dev, "Cannot set mask: %d\n", err);
+ goto unlock;
+ }
+
+ /*
+ * Remember which GPIOs have both rising and falling interrupts enabled.
+ * For those we will cache their value so that .get() method is faster.
+ * We also need to forget cached values of GPIOs that aren't cached
+ * anymore.
+ */
+ mcu->both = rising & falling;
+ mcu->is_cached &= mcu->both;
+
+unlock:
+ mutex_unlock(&mcu->lock);
+}
+
+static const struct irq_chip omnia_mcu_irq_chip = {
+ .name = "Turris Omnia MCU interrupts",
+ .irq_shutdown = omnia_irq_shutdown,
+ .irq_mask = omnia_irq_mask,
+ .irq_unmask = omnia_irq_unmask,
+ .irq_set_type = omnia_irq_set_type,
+ .irq_bus_lock = omnia_irq_bus_lock,
+ .irq_bus_sync_unlock = omnia_irq_bus_sync_unlock,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
+
+static void omnia_irq_init_valid_mask(struct gpio_chip *gc,
+ unsigned long *valid_mask,
+ unsigned int ngpios)
+{
+ struct omnia_mcu *mcu = gpiochip_get_data(gc);
+
+ for (unsigned int i = 0; i < ngpios; i++) {
+ const struct omnia_gpio *gpio = &omnia_gpios[i];
+
+ if (is_int_bit_valid(gpio))
+ __assign_bit(i, valid_mask,
+ omnia_gpio_available(mcu, gpio));
+ else
+ __clear_bit(i, valid_mask);
+ }
+}
+
+static int omnia_irq_init_hw(struct gpio_chip *gc)
+{
+ struct omnia_mcu *mcu = gpiochip_get_data(gc);
+ u8 cmd[1 + OMNIA_CMD_INT_ARG_LEN] = {};
+
+ cmd[0] = OMNIA_CMD_SET_INT_MASK;
+
+ return omnia_cmd_write(mcu->client, cmd, sizeof(cmd));
+}
+
+/*
+ * Determine how many bytes we need to read from the reply to the
+ * OMNIA_CMD_GET_INT_AND_CLEAR command in order to retrieve all unmasked
+ * interrupts.
+ */
+static unsigned int
+omnia_irq_compute_pending_length(unsigned long rising, unsigned long falling)
+{
+ return max(omnia_compute_reply_length(rising, true, 0),
+ omnia_compute_reply_length(falling, true, 1));
+}
+
+static bool omnia_irq_read_pending_new(struct omnia_mcu *mcu,
+ unsigned long *pending)
+{
+ struct device *dev = &mcu->client->dev;
+ u8 reply[OMNIA_CMD_INT_ARG_LEN] = {};
+ unsigned long rising, falling;
+ unsigned int len;
+ int err;
+
+ len = omnia_irq_compute_pending_length(mcu->rising & mcu->mask,
+ mcu->falling & mcu->mask);
+ if (!len)
+ return false;
+
+ guard(mutex)(&mcu->lock);
+
+ err = omnia_cmd_read(mcu->client, OMNIA_CMD_GET_INT_AND_CLEAR, reply,
+ len);
+ if (err) {
+ dev_err(dev, "Cannot read pending IRQs: %d\n", err);
+ return false;
+ }
+
+ /* deinterleave the reply bytes into rising and falling */
+ omnia_mask_deinterleave(reply, &rising, &falling);
+
+ rising &= mcu->mask;
+ falling &= mcu->mask;
+ *pending = rising | falling;
+
+ /* cache values for GPIOs that have both edges enabled */
+ mcu->is_cached &= ~(rising & falling);
+ mcu->is_cached |= mcu->both & (rising ^ falling);
+ mcu->cached = (mcu->cached | rising) & ~falling;
+
+ return true;
+}
+
+static int omnia_read_status_word_old_fw(struct omnia_mcu *mcu,
+ unsigned long *status)
+{
+ u16 raw_status;
+ int err;
+
+ err = omnia_cmd_read_u16(mcu->client, OMNIA_CMD_GET_STATUS_WORD,
+ &raw_status);
+ if (err)
+ return err;
+
+ /*
+ * Old firmware has a bug wherein it never resets the USB port
+ * overcurrent bits back to zero. Ignore them.
+ */
+ *status = raw_status & ~(OMNIA_STS_USB30_OVC | OMNIA_STS_USB31_OVC);
+
+ return 0;
+}
+
+static void button_release_emul_fn(struct work_struct *work)
+{
+ struct omnia_mcu *mcu = container_of(to_delayed_work(work),
+ struct omnia_mcu,
+ button_release_emul_work);
+
+ mcu->button_pressed_emul = false;
+ generic_handle_irq_safe(mcu->client->irq);
+}
+
+static void
+fill_int_from_sts(unsigned long *rising, unsigned long *falling,
+ unsigned long rising_sts, unsigned long falling_sts,
+ unsigned long sts_bit, unsigned long int_bit)
+{
+ if (rising_sts & sts_bit)
+ *rising |= int_bit;
+ if (falling_sts & sts_bit)
+ *falling |= int_bit;
+}
+
+static bool omnia_irq_read_pending_old(struct omnia_mcu *mcu,
+ unsigned long *pending)
+{
+ unsigned long status, rising_sts, falling_sts, rising, falling;
+ struct device *dev = &mcu->client->dev;
+ int err;
+
+ guard(mutex)(&mcu->lock);
+
+ err = omnia_read_status_word_old_fw(mcu, &status);
+ if (err) {
+ dev_err(dev, "Cannot read pending IRQs: %d\n", err);
+ return false;
+ }
+
+ /*
+ * The old firmware triggers an interrupt whenever status word changes,
+ * but does not inform about which bits rose or fell. We need to compute
+ * this here by comparing with the last status word value.
+ *
+ * The OMNIA_STS_BUTTON_PRESSED bit needs special handling, because the
+ * old firmware clears the OMNIA_STS_BUTTON_PRESSED bit on successful
+ * completion of the OMNIA_CMD_GET_STATUS_WORD command, resulting in
+ * another interrupt:
+ * - first we get an interrupt, we read the status word where
+ * OMNIA_STS_BUTTON_PRESSED is present,
+ * - MCU clears the OMNIA_STS_BUTTON_PRESSED bit because we read the
+ * status word,
+ * - we get another interrupt because the status word changed again
+ * (the OMNIA_STS_BUTTON_PRESSED bit was cleared).
+ *
+ * The gpiolib-cdev, gpiolib-sysfs and gpio-keys input driver all call
+ * the gpiochip's .get() method after an edge event on a requested GPIO
+ * occurs.
+ *
+ * We ensure that the .get() method reads 1 for the button GPIO for some
+ * time.
+ */
+
+ if (status & OMNIA_STS_BUTTON_PRESSED) {
+ mcu->button_pressed_emul = true;
+ mod_delayed_work(system_wq, &mcu->button_release_emul_work,
+ msecs_to_jiffies(FRONT_BUTTON_RELEASE_DELAY_MS));
+ } else if (mcu->button_pressed_emul) {
+ status |= OMNIA_STS_BUTTON_PRESSED;
+ }
+
+ rising_sts = ~mcu->last_status & status;
+ falling_sts = mcu->last_status & ~status;
+
+ mcu->last_status = status;
+
+ /*
+ * Fill in the relevant interrupt bits from status bits for CARD_DET,
+ * MSATA_IND and BUTTON_PRESSED.
+ */
+ rising = 0;
+ falling = 0;
+ fill_int_from_sts(&rising, &falling, rising_sts, falling_sts,
+ OMNIA_STS_CARD_DET, OMNIA_INT_CARD_DET);
+ fill_int_from_sts(&rising, &falling, rising_sts, falling_sts,
+ OMNIA_STS_MSATA_IND, OMNIA_INT_MSATA_IND);
+ fill_int_from_sts(&rising, &falling, rising_sts, falling_sts,
+ OMNIA_STS_BUTTON_PRESSED, OMNIA_INT_BUTTON_PRESSED);
+
+ /* Use only bits that are enabled */
+ rising &= mcu->rising & mcu->mask;
+ falling &= mcu->falling & mcu->mask;
+ *pending = rising | falling;
+
+ return true;
+}
+
+static bool omnia_irq_read_pending(struct omnia_mcu *mcu,
+ unsigned long *pending)
+{
+ if (mcu->features & OMNIA_FEAT_NEW_INT_API)
+ return omnia_irq_read_pending_new(mcu, pending);
+ else
+ return omnia_irq_read_pending_old(mcu, pending);
+}
+
+static irqreturn_t omnia_irq_thread_handler(int irq, void *dev_id)
+{
+ struct omnia_mcu *mcu = dev_id;
+ struct irq_domain *domain;
+ unsigned long pending;
+ unsigned int i;
+
+ if (!omnia_irq_read_pending(mcu, &pending))
+ return IRQ_NONE;
+
+ domain = mcu->gc.irq.domain;
+
+ for_each_set_bit(i, &pending, 32) {
+ unsigned int nested_irq;
+
+ nested_irq = irq_find_mapping(domain, omnia_int_to_gpio_idx[i]);
+
+ handle_nested_irq(nested_irq);
+ }
+
+ return IRQ_RETVAL(pending);
+}
+
+static const char * const front_button_modes[] = { "mcu", "cpu" };
+
+static ssize_t front_button_mode_show(struct device *dev,
+ struct device_attribute *a, char *buf)
+{
+ struct omnia_mcu *mcu = dev_get_drvdata(dev);
+ int val;
+
+ if (mcu->features & OMNIA_FEAT_NEW_INT_API) {
+ val = omnia_cmd_read_bit(mcu->client, OMNIA_CMD_GET_STATUS_WORD,
+ OMNIA_STS_BUTTON_MODE);
+ if (val < 0)
+ return val;
+ } else {
+ val = !!(mcu->last_status & OMNIA_STS_BUTTON_MODE);
+ }
+
+ return sysfs_emit(buf, "%s\n", front_button_modes[val]);
+}
+
+static ssize_t front_button_mode_store(struct device *dev,
+ struct device_attribute *a,
+ const char *buf, size_t count)
+{
+ struct omnia_mcu *mcu = dev_get_drvdata(dev);
+ int err, i;
+
+ i = sysfs_match_string(front_button_modes, buf);
+ if (i < 0)
+ return i;
+
+ err = omnia_ctl_cmd_locked(mcu, OMNIA_CMD_GENERAL_CONTROL,
+ i ? OMNIA_CTL_BUTTON_MODE : 0,
+ OMNIA_CTL_BUTTON_MODE);
+ if (err)
+ return err;
+
+ return count;
+}
+static DEVICE_ATTR_RW(front_button_mode);
+
+static struct attribute *omnia_mcu_gpio_attrs[] = {
+ &dev_attr_front_button_mode.attr,
+ NULL
+};
+
+const struct attribute_group omnia_mcu_gpio_group = {
+ .attrs = omnia_mcu_gpio_attrs,
+};
+
+int omnia_mcu_register_gpiochip(struct omnia_mcu *mcu)
+{
+ bool new_api = mcu->features & OMNIA_FEAT_NEW_INT_API;
+ struct device *dev = &mcu->client->dev;
+ unsigned long irqflags;
+ int err;
+
+ err = devm_mutex_init(dev, &mcu->lock);
+ if (err)
+ return err;
+
+ mcu->gc.request = omnia_gpio_request;
+ mcu->gc.get_direction = omnia_gpio_get_direction;
+ mcu->gc.direction_input = omnia_gpio_direction_input;
+ mcu->gc.direction_output = omnia_gpio_direction_output;
+ mcu->gc.get = omnia_gpio_get;
+ mcu->gc.get_multiple = omnia_gpio_get_multiple;
+ mcu->gc.set = omnia_gpio_set;
+ mcu->gc.set_multiple = omnia_gpio_set_multiple;
+ mcu->gc.init_valid_mask = omnia_gpio_init_valid_mask;
+ mcu->gc.can_sleep = true;
+ mcu->gc.names = omnia_mcu_gpio_templates;
+ mcu->gc.base = -1;
+ mcu->gc.ngpio = ARRAY_SIZE(omnia_gpios);
+ mcu->gc.label = "Turris Omnia MCU GPIOs";
+ mcu->gc.parent = dev;
+ mcu->gc.owner = THIS_MODULE;
+ mcu->gc.of_gpio_n_cells = 3;
+ mcu->gc.of_xlate = omnia_gpio_of_xlate;
+
+ gpio_irq_chip_set_chip(&mcu->gc.irq, &omnia_mcu_irq_chip);
+ /* This will let us handle the parent IRQ in the driver */
+ mcu->gc.irq.parent_handler = NULL;
+ mcu->gc.irq.num_parents = 0;
+ mcu->gc.irq.parents = NULL;
+ mcu->gc.irq.default_type = IRQ_TYPE_NONE;
+ mcu->gc.irq.handler = handle_bad_irq;
+ mcu->gc.irq.threaded = true;
+ if (new_api)
+ mcu->gc.irq.init_hw = omnia_irq_init_hw;
+ mcu->gc.irq.init_valid_mask = omnia_irq_init_valid_mask;
+
+ err = devm_gpiochip_add_data(dev, &mcu->gc, mcu);
+ if (err)
+ return dev_err_probe(dev, err, "Cannot add GPIO chip\n");
+
+ /*
+ * Before requesting the interrupt, if firmware does not support the new
+ * interrupt API, we need to cache the value of the status word, so that
+ * when it changes, we may compare the new value with the cached one in
+ * the interrupt handler.
+ */
+ if (!new_api) {
+ err = omnia_read_status_word_old_fw(mcu, &mcu->last_status);
+ if (err)
+ return dev_err_probe(dev, err,
+ "Cannot read status word\n");
+
+ INIT_DELAYED_WORK(&mcu->button_release_emul_work,
+ button_release_emul_fn);
+ }
+
+ irqflags = IRQF_ONESHOT;
+ if (new_api)
+ irqflags |= IRQF_TRIGGER_LOW;
+ else
+ irqflags |= IRQF_TRIGGER_FALLING;
+
+ err = devm_request_threaded_irq(dev, mcu->client->irq, NULL,
+ omnia_irq_thread_handler, irqflags,
+ "turris-omnia-mcu", mcu);
+ if (err)
+ return dev_err_probe(dev, err, "Cannot request IRQ\n");
+
+ if (!new_api) {
+ /*
+ * The button_release_emul_work has to be initialized before the
+ * thread is requested, and on driver remove it needs to be
+ * canceled before the thread is freed. Therefore we can't use
+ * devm_delayed_work_autocancel() directly, because the order
+ * devm_delayed_work_autocancel();
+ * devm_request_threaded_irq();
+ * would cause improper release order:
+ * free_irq();
+ * cancel_delayed_work_sync();
+ * Instead we first initialize the work above, and only now
+ * after IRQ is requested we add the work devm action.
+ */
+ err = devm_add_action(dev, devm_delayed_work_drop,
+ &mcu->button_release_emul_work);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
diff --git a/drivers/platform/cznic/turris-omnia-mcu-sys-off-wakeup.c b/drivers/platform/cznic/turris-omnia-mcu-sys-off-wakeup.c
new file mode 100644
index 000000000000..0e8ab15b6037
--- /dev/null
+++ b/drivers/platform/cznic/turris-omnia-mcu-sys-off-wakeup.c
@@ -0,0 +1,260 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * CZ.NIC's Turris Omnia MCU system off and RTC wakeup driver
+ *
+ * This is not a true RTC driver (in the sense that it does not provide a
+ * real-time clock), rather the MCU implements a wakeup from powered off state
+ * at a specified time relative to MCU boot, and we expose this feature via RTC
+ * alarm, so that it can be used via the rtcwake command, which is the standard
+ * Linux command for this.
+ *
+ * 2024 by Marek Behún <kabel@kernel.org>
+ */
+
+#include <linux/crc32.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/kstrtox.h>
+#include <linux/reboot.h>
+#include <linux/rtc.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
+
+#include <linux/turris-omnia-mcu-interface.h>
+#include "turris-omnia-mcu.h"
+
+static int omnia_get_uptime_wakeup(const struct i2c_client *client, u32 *uptime,
+ u32 *wakeup)
+{
+ __le32 reply[2];
+ int err;
+
+ err = omnia_cmd_read(client, OMNIA_CMD_GET_UPTIME_AND_WAKEUP, reply,
+ sizeof(reply));
+ if (err)
+ return err;
+
+ if (uptime)
+ *uptime = le32_to_cpu(reply[0]);
+
+ if (wakeup)
+ *wakeup = le32_to_cpu(reply[1]);
+
+ return 0;
+}
+
+static int omnia_read_time(struct device *dev, struct rtc_time *tm)
+{
+ u32 uptime;
+ int err;
+
+ err = omnia_get_uptime_wakeup(to_i2c_client(dev), &uptime, NULL);
+ if (err)
+ return err;
+
+ rtc_time64_to_tm(uptime, tm);
+
+ return 0;
+}
+
+static int omnia_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct omnia_mcu *mcu = i2c_get_clientdata(client);
+ u32 wakeup;
+ int err;
+
+ err = omnia_get_uptime_wakeup(client, NULL, &wakeup);
+ if (err)
+ return err;
+
+ alrm->enabled = !!wakeup;
+ rtc_time64_to_tm(wakeup ?: mcu->rtc_alarm, &alrm->time);
+
+ return 0;
+}
+
+static int omnia_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct omnia_mcu *mcu = i2c_get_clientdata(client);
+
+ mcu->rtc_alarm = rtc_tm_to_time64(&alrm->time);
+
+ if (alrm->enabled)
+ return omnia_cmd_write_u32(client, OMNIA_CMD_SET_WAKEUP,
+ mcu->rtc_alarm);
+
+ return 0;
+}
+
+static int omnia_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct omnia_mcu *mcu = i2c_get_clientdata(client);
+
+ return omnia_cmd_write_u32(client, OMNIA_CMD_SET_WAKEUP,
+ enabled ? mcu->rtc_alarm : 0);
+}
+
+static const struct rtc_class_ops omnia_rtc_ops = {
+ .read_time = omnia_read_time,
+ .read_alarm = omnia_read_alarm,
+ .set_alarm = omnia_set_alarm,
+ .alarm_irq_enable = omnia_alarm_irq_enable,
+};
+
+static int omnia_power_off(struct sys_off_data *data)
+{
+ struct omnia_mcu *mcu = data->cb_data;
+ __be32 tmp;
+ u8 cmd[9];
+ u16 arg;
+ int err;
+
+ if (mcu->front_button_poweron)
+ arg = OMNIA_CMD_POWER_OFF_POWERON_BUTTON;
+ else
+ arg = 0;
+
+ cmd[0] = OMNIA_CMD_POWER_OFF;
+ put_unaligned_le16(OMNIA_CMD_POWER_OFF_MAGIC, &cmd[1]);
+ put_unaligned_le16(arg, &cmd[3]);
+
+ /*
+ * Although all values from and to MCU are passed in little-endian, the
+ * MCU's CRC unit uses big-endian CRC32 polynomial (0x04c11db7), so we
+ * need to use crc32_be() here.
+ */
+ tmp = cpu_to_be32(get_unaligned_le32(&cmd[1]));
+ put_unaligned_le32(crc32_be(~0, (void *)&tmp, sizeof(tmp)), &cmd[5]);
+
+ err = omnia_cmd_write(mcu->client, cmd, sizeof(cmd));
+ if (err)
+ dev_err(&mcu->client->dev,
+ "Unable to send the poweroff command: %d\n", err);
+
+ return NOTIFY_DONE;
+}
+
+static int omnia_restart(struct sys_off_data *data)
+{
+ struct omnia_mcu *mcu = data->cb_data;
+ u8 cmd[3];
+ int err;
+
+ cmd[0] = OMNIA_CMD_GENERAL_CONTROL;
+
+ if (reboot_mode == REBOOT_HARD)
+ cmd[1] = cmd[2] = OMNIA_CTL_HARD_RST;
+ else
+ cmd[1] = cmd[2] = OMNIA_CTL_LIGHT_RST;
+
+ err = omnia_cmd_write(mcu->client, cmd, sizeof(cmd));
+ if (err)
+ dev_err(&mcu->client->dev,
+ "Unable to send the restart command: %d\n", err);
+
+ /*
+ * MCU needs a little bit to process the I2C command, otherwise it will
+ * do a light reset based on SOC SYSRES_OUT pin.
+ */
+ mdelay(1);
+
+ return NOTIFY_DONE;
+}
+
+static ssize_t front_button_poweron_show(struct device *dev,
+ struct device_attribute *a, char *buf)
+{
+ struct omnia_mcu *mcu = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%d\n", mcu->front_button_poweron);
+}
+
+static ssize_t front_button_poweron_store(struct device *dev,
+ struct device_attribute *a,
+ const char *buf, size_t count)
+{
+ struct omnia_mcu *mcu = dev_get_drvdata(dev);
+ bool val;
+ int err;
+
+ err = kstrtobool(buf, &val);
+ if (err)
+ return err;
+
+ mcu->front_button_poweron = val;
+
+ return count;
+}
+static DEVICE_ATTR_RW(front_button_poweron);
+
+static struct attribute *omnia_mcu_poweroff_attrs[] = {
+ &dev_attr_front_button_poweron.attr,
+ NULL
+};
+
+static umode_t poweroff_attrs_visible(struct kobject *kobj, struct attribute *a,
+ int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct omnia_mcu *mcu = dev_get_drvdata(dev);
+
+ if (mcu->features & OMNIA_FEAT_POWEROFF_WAKEUP)
+ return a->mode;
+
+ return 0;
+}
+
+const struct attribute_group omnia_mcu_poweroff_group = {
+ .attrs = omnia_mcu_poweroff_attrs,
+ .is_visible = poweroff_attrs_visible,
+};
+
+int omnia_mcu_register_sys_off_and_wakeup(struct omnia_mcu *mcu)
+{
+ struct device *dev = &mcu->client->dev;
+ int err;
+
+ /* MCU restart is always available */
+ err = devm_register_sys_off_handler(dev, SYS_OFF_MODE_RESTART,
+ SYS_OFF_PRIO_FIRMWARE,
+ omnia_restart, mcu);
+ if (err)
+ return dev_err_probe(dev, err,
+ "Cannot register system restart handler\n");
+
+ /*
+ * Poweroff and wakeup are available only if POWEROFF_WAKEUP feature is
+ * present.
+ */
+ if (!(mcu->features & OMNIA_FEAT_POWEROFF_WAKEUP))
+ return 0;
+
+ err = devm_register_sys_off_handler(dev, SYS_OFF_MODE_POWER_OFF,
+ SYS_OFF_PRIO_FIRMWARE,
+ omnia_power_off, mcu);
+ if (err)
+ return dev_err_probe(dev, err,
+ "Cannot register system power off handler\n");
+
+ mcu->rtcdev = devm_rtc_allocate_device(dev);
+ if (IS_ERR(mcu->rtcdev))
+ return dev_err_probe(dev, PTR_ERR(mcu->rtcdev),
+ "Cannot allocate RTC device\n");
+
+ mcu->rtcdev->ops = &omnia_rtc_ops;
+ mcu->rtcdev->range_max = U32_MAX;
+ set_bit(RTC_FEATURE_ALARM_WAKEUP_ONLY, mcu->rtcdev->features);
+
+ err = devm_rtc_register_device(mcu->rtcdev);
+ if (err)
+ return dev_err_probe(dev, err, "Cannot register RTC device\n");
+
+ mcu->front_button_poweron = true;
+
+ return 0;
+}
diff --git a/drivers/platform/cznic/turris-omnia-mcu-trng.c b/drivers/platform/cznic/turris-omnia-mcu-trng.c
new file mode 100644
index 000000000000..ad953fb3c37a
--- /dev/null
+++ b/drivers/platform/cznic/turris-omnia-mcu-trng.c
@@ -0,0 +1,103 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * CZ.NIC's Turris Omnia MCU TRNG driver
+ *
+ * 2024 by Marek Behún <kabel@kernel.org>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/completion.h>
+#include <linux/container_of.h>
+#include <linux/errno.h>
+#include <linux/gpio/consumer.h>
+#include <linux/gpio/driver.h>
+#include <linux/hw_random.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/minmax.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+#include <linux/turris-omnia-mcu-interface.h>
+#include "turris-omnia-mcu.h"
+
+#define OMNIA_CMD_TRNG_MAX_ENTROPY_LEN 64
+
+static irqreturn_t omnia_trng_irq_handler(int irq, void *dev_id)
+{
+ struct omnia_mcu *mcu = dev_id;
+
+ complete(&mcu->trng_entropy_ready);
+
+ return IRQ_HANDLED;
+}
+
+static int omnia_trng_read(struct hwrng *rng, void *data, size_t max, bool wait)
+{
+ struct omnia_mcu *mcu = container_of(rng, struct omnia_mcu, trng);
+ u8 reply[1 + OMNIA_CMD_TRNG_MAX_ENTROPY_LEN];
+ int err, bytes;
+
+ if (!wait && !completion_done(&mcu->trng_entropy_ready))
+ return 0;
+
+ do {
+ if (wait_for_completion_interruptible(&mcu->trng_entropy_ready))
+ return -ERESTARTSYS;
+
+ err = omnia_cmd_read(mcu->client,
+ OMNIA_CMD_TRNG_COLLECT_ENTROPY,
+ reply, sizeof(reply));
+ if (err)
+ return err;
+
+ bytes = min3(reply[0], max, OMNIA_CMD_TRNG_MAX_ENTROPY_LEN);
+ } while (wait && !bytes);
+
+ memcpy(data, &reply[1], bytes);
+
+ return bytes;
+}
+
+int omnia_mcu_register_trng(struct omnia_mcu *mcu)
+{
+ struct device *dev = &mcu->client->dev;
+ u8 irq_idx, dummy;
+ int irq, err;
+
+ if (!(mcu->features & OMNIA_FEAT_TRNG))
+ return 0;
+
+ irq_idx = omnia_int_to_gpio_idx[__bf_shf(OMNIA_INT_TRNG)];
+ irq = gpiod_to_irq(gpio_device_get_desc(mcu->gc.gpiodev, irq_idx));
+ if (!irq)
+ return dev_err_probe(dev, -ENXIO, "Cannot get TRNG IRQ\n");
+
+ /*
+ * If someone else cleared the TRNG interrupt but did not read the
+ * entropy, a new interrupt won't be generated, and entropy collection
+ * will be stuck. Ensure an interrupt will be generated by executing
+ * the collect entropy command (and discarding the result).
+ */
+ err = omnia_cmd_read(mcu->client, OMNIA_CMD_TRNG_COLLECT_ENTROPY,
+ &dummy, 1);
+ if (err)
+ return err;
+
+ init_completion(&mcu->trng_entropy_ready);
+
+ err = devm_request_threaded_irq(dev, irq, NULL, omnia_trng_irq_handler,
+ IRQF_ONESHOT, "turris-omnia-mcu-trng",
+ mcu);
+ if (err)
+ return dev_err_probe(dev, err, "Cannot request TRNG IRQ\n");
+
+ mcu->trng.name = "turris-omnia-mcu-trng";
+ mcu->trng.read = omnia_trng_read;
+
+ err = devm_hwrng_register(dev, &mcu->trng);
+ if (err)
+ return dev_err_probe(dev, err, "Cannot register TRNG\n");
+
+ return 0;
+}
diff --git a/drivers/platform/cznic/turris-omnia-mcu-watchdog.c b/drivers/platform/cznic/turris-omnia-mcu-watchdog.c
new file mode 100644
index 000000000000..3ad146ec1d80
--- /dev/null
+++ b/drivers/platform/cznic/turris-omnia-mcu-watchdog.c
@@ -0,0 +1,130 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * CZ.NIC's Turris Omnia MCU watchdog driver
+ *
+ * 2024 by Marek Behún <kabel@kernel.org>
+ */
+
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/i2c.h>
+#include <linux/moduleparam.h>
+#include <linux/types.h>
+#include <linux/units.h>
+#include <linux/watchdog.h>
+
+#include <linux/turris-omnia-mcu-interface.h>
+#include "turris-omnia-mcu.h"
+
+#define WATCHDOG_TIMEOUT 120
+
+static unsigned int timeout;
+module_param(timeout, int, 0);
+MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds");
+
+static bool nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, bool, 0);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
+ __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+static int omnia_wdt_start(struct watchdog_device *wdt)
+{
+ struct omnia_mcu *mcu = watchdog_get_drvdata(wdt);
+
+ return omnia_cmd_write_u8(mcu->client, OMNIA_CMD_SET_WATCHDOG_STATE, 1);
+}
+
+static int omnia_wdt_stop(struct watchdog_device *wdt)
+{
+ struct omnia_mcu *mcu = watchdog_get_drvdata(wdt);
+
+ return omnia_cmd_write_u8(mcu->client, OMNIA_CMD_SET_WATCHDOG_STATE, 0);
+}
+
+static int omnia_wdt_ping(struct watchdog_device *wdt)
+{
+ struct omnia_mcu *mcu = watchdog_get_drvdata(wdt);
+
+ return omnia_cmd_write_u8(mcu->client, OMNIA_CMD_SET_WATCHDOG_STATE, 1);
+}
+
+static int omnia_wdt_set_timeout(struct watchdog_device *wdt,
+ unsigned int timeout)
+{
+ struct omnia_mcu *mcu = watchdog_get_drvdata(wdt);
+
+ return omnia_cmd_write_u16(mcu->client, OMNIA_CMD_SET_WDT_TIMEOUT,
+ timeout * DECI);
+}
+
+static unsigned int omnia_wdt_get_timeleft(struct watchdog_device *wdt)
+{
+ struct omnia_mcu *mcu = watchdog_get_drvdata(wdt);
+ u16 timeleft;
+ int err;
+
+ err = omnia_cmd_read_u16(mcu->client, OMNIA_CMD_GET_WDT_TIMELEFT,
+ &timeleft);
+ if (err) {
+ dev_err(&mcu->client->dev, "Cannot get watchdog timeleft: %d\n",
+ err);
+ return 0;
+ }
+
+ return timeleft / DECI;
+}
+
+static const struct watchdog_info omnia_wdt_info = {
+ .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE,
+ .identity = "Turris Omnia MCU Watchdog",
+};
+
+static const struct watchdog_ops omnia_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = omnia_wdt_start,
+ .stop = omnia_wdt_stop,
+ .ping = omnia_wdt_ping,
+ .set_timeout = omnia_wdt_set_timeout,
+ .get_timeleft = omnia_wdt_get_timeleft,
+};
+
+int omnia_mcu_register_watchdog(struct omnia_mcu *mcu)
+{
+ struct device *dev = &mcu->client->dev;
+ u8 state;
+ int err;
+
+ if (!(mcu->features & OMNIA_FEAT_WDT_PING))
+ return 0;
+
+ mcu->wdt.info = &omnia_wdt_info;
+ mcu->wdt.ops = &omnia_wdt_ops;
+ mcu->wdt.parent = dev;
+ mcu->wdt.min_timeout = 1;
+ mcu->wdt.max_timeout = 65535 / DECI;
+
+ mcu->wdt.timeout = WATCHDOG_TIMEOUT;
+ watchdog_init_timeout(&mcu->wdt, timeout, dev);
+
+ watchdog_set_drvdata(&mcu->wdt, mcu);
+
+ omnia_wdt_set_timeout(&mcu->wdt, mcu->wdt.timeout);
+
+ err = omnia_cmd_read_u8(mcu->client, OMNIA_CMD_GET_WATCHDOG_STATE,
+ &state);
+ if (err)
+ return dev_err_probe(dev, err,
+ "Cannot get MCU watchdog state\n");
+
+ if (state)
+ set_bit(WDOG_HW_RUNNING, &mcu->wdt.status);
+
+ watchdog_set_nowayout(&mcu->wdt, nowayout);
+ watchdog_stop_on_reboot(&mcu->wdt);
+ err = devm_watchdog_register_device(dev, &mcu->wdt);
+ if (err)
+ return dev_err_probe(dev, err,
+ "Cannot register MCU watchdog\n");
+
+ return 0;
+}
diff --git a/drivers/platform/cznic/turris-omnia-mcu.h b/drivers/platform/cznic/turris-omnia-mcu.h
new file mode 100644
index 000000000000..2ca56ae13aa9
--- /dev/null
+++ b/drivers/platform/cznic/turris-omnia-mcu.h
@@ -0,0 +1,194 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * CZ.NIC's Turris Omnia MCU driver
+ *
+ * 2024 by Marek Behún <kabel@kernel.org>
+ */
+
+#ifndef __TURRIS_OMNIA_MCU_H
+#define __TURRIS_OMNIA_MCU_H
+
+#include <linux/bitops.h>
+#include <linux/completion.h>
+#include <linux/gpio/driver.h>
+#include <linux/hw_random.h>
+#include <linux/if_ether.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/watchdog.h>
+#include <linux/workqueue.h>
+#include <asm/byteorder.h>
+#include <asm/unaligned.h>
+
+struct i2c_client;
+struct rtc_device;
+
+struct omnia_mcu {
+ struct i2c_client *client;
+ const char *type;
+ u32 features;
+
+ /* board information */
+ u64 board_serial_number;
+ u8 board_first_mac[ETH_ALEN];
+ u8 board_revision;
+
+ /* GPIO chip */
+ struct gpio_chip gc;
+ struct mutex lock;
+ unsigned long mask, rising, falling, both, cached, is_cached;
+ /* Old MCU firmware handling needs the following */
+ struct delayed_work button_release_emul_work;
+ unsigned long last_status;
+ bool button_pressed_emul;
+
+ /* RTC device for configuring wake-up */
+ struct rtc_device *rtcdev;
+ u32 rtc_alarm;
+ bool front_button_poweron;
+
+ /* MCU watchdog */
+ struct watchdog_device wdt;
+
+ /* true random number generator */
+ struct hwrng trng;
+ struct completion trng_entropy_ready;
+};
+
+int omnia_cmd_write_read(const struct i2c_client *client,
+ void *cmd, unsigned int cmd_len,
+ void *reply, unsigned int reply_len);
+
+static inline int omnia_cmd_write(const struct i2c_client *client, void *cmd,
+ unsigned int len)
+{
+ return omnia_cmd_write_read(client, cmd, len, NULL, 0);
+}
+
+static inline int omnia_cmd_write_u8(const struct i2c_client *client, u8 cmd,
+ u8 val)
+{
+ u8 buf[2] = { cmd, val };
+
+ return omnia_cmd_write(client, buf, sizeof(buf));
+}
+
+static inline int omnia_cmd_write_u16(const struct i2c_client *client, u8 cmd,
+ u16 val)
+{
+ u8 buf[3];
+
+ buf[0] = cmd;
+ put_unaligned_le16(val, &buf[1]);
+
+ return omnia_cmd_write(client, buf, sizeof(buf));
+}
+
+static inline int omnia_cmd_write_u32(const struct i2c_client *client, u8 cmd,
+ u32 val)
+{
+ u8 buf[5];
+
+ buf[0] = cmd;
+ put_unaligned_le32(val, &buf[1]);
+
+ return omnia_cmd_write(client, buf, sizeof(buf));
+}
+
+static inline int omnia_cmd_read(const struct i2c_client *client, u8 cmd,
+ void *reply, unsigned int len)
+{
+ return omnia_cmd_write_read(client, &cmd, 1, reply, len);
+}
+
+static inline unsigned int
+omnia_compute_reply_length(unsigned long mask, bool interleaved,
+ unsigned int offset)
+{
+ if (!mask)
+ return 0;
+
+ return ((__fls(mask) >> 3) << interleaved) + 1 + offset;
+}
+
+/* Returns 0 on success */
+static inline int omnia_cmd_read_bits(const struct i2c_client *client, u8 cmd,
+ unsigned long bits, unsigned long *dst)
+{
+ __le32 reply;
+ int err;
+
+ if (!bits) {
+ *dst = 0;
+ return 0;
+ }
+
+ err = omnia_cmd_read(client, cmd, &reply,
+ omnia_compute_reply_length(bits, false, 0));
+ if (err)
+ return err;
+
+ *dst = le32_to_cpu(reply) & bits;
+
+ return 0;
+}
+
+static inline int omnia_cmd_read_bit(const struct i2c_client *client, u8 cmd,
+ unsigned long bit)
+{
+ unsigned long reply;
+ int err;
+
+ err = omnia_cmd_read_bits(client, cmd, bit, &reply);
+ if (err)
+ return err;
+
+ return !!reply;
+}
+
+static inline int omnia_cmd_read_u32(const struct i2c_client *client, u8 cmd,
+ u32 *dst)
+{
+ __le32 reply;
+ int err;
+
+ err = omnia_cmd_read(client, cmd, &reply, sizeof(reply));
+ if (err)
+ return err;
+
+ *dst = le32_to_cpu(reply);
+
+ return 0;
+}
+
+static inline int omnia_cmd_read_u16(const struct i2c_client *client, u8 cmd,
+ u16 *dst)
+{
+ __le16 reply;
+ int err;
+
+ err = omnia_cmd_read(client, cmd, &reply, sizeof(reply));
+ if (err)
+ return err;
+
+ *dst = le16_to_cpu(reply);
+
+ return 0;
+}
+
+static inline int omnia_cmd_read_u8(const struct i2c_client *client, u8 cmd,
+ u8 *reply)
+{
+ return omnia_cmd_read(client, cmd, reply, sizeof(*reply));
+}
+
+extern const u8 omnia_int_to_gpio_idx[32];
+extern const struct attribute_group omnia_mcu_gpio_group;
+extern const struct attribute_group omnia_mcu_poweroff_group;
+
+int omnia_mcu_register_gpiochip(struct omnia_mcu *mcu);
+int omnia_mcu_register_sys_off_and_wakeup(struct omnia_mcu *mcu);
+int omnia_mcu_register_trng(struct omnia_mcu *mcu);
+int omnia_mcu_register_watchdog(struct omnia_mcu *mcu);
+
+#endif /* __TURRIS_OMNIA_MCU_H */
diff --git a/drivers/platform/mellanox/nvsw-sn2201.c b/drivers/platform/mellanox/nvsw-sn2201.c
index 3ef655591424..abe7be602f84 100644
--- a/drivers/platform/mellanox/nvsw-sn2201.c
+++ b/drivers/platform/mellanox/nvsw-sn2201.c
@@ -1198,6 +1198,7 @@ static int nvsw_sn2201_config_pre_init(struct nvsw_sn2201 *nvsw_sn2201)
static int nvsw_sn2201_probe(struct platform_device *pdev)
{
struct nvsw_sn2201 *nvsw_sn2201;
+ int ret;
nvsw_sn2201 = devm_kzalloc(&pdev->dev, sizeof(*nvsw_sn2201), GFP_KERNEL);
if (!nvsw_sn2201)
@@ -1205,8 +1206,10 @@ static int nvsw_sn2201_probe(struct platform_device *pdev)
nvsw_sn2201->dev = &pdev->dev;
platform_set_drvdata(pdev, nvsw_sn2201);
- platform_device_add_resources(pdev, nvsw_sn2201_lpc_io_resources,
+ ret = platform_device_add_resources(pdev, nvsw_sn2201_lpc_io_resources,
ARRAY_SIZE(nvsw_sn2201_lpc_io_resources));
+ if (ret)
+ return ret;
nvsw_sn2201->main_mux_deferred_nr = NVSW_SN2201_MAIN_MUX_DEFER_NR;
nvsw_sn2201->main_mux_devs = nvsw_sn2201_main_mux_brdinfo;
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 0ec952b5d03e..665fa9524986 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -136,6 +136,7 @@ config YOGABOOK
config YT2_1380
tristate "Lenovo Yoga Tablet 2 1380 fast charge driver"
depends on SERIAL_DEV_BUS
+ depends on EXTCON
depends on ACPI
help
Say Y here to enable support for the custom fast charging protocol
@@ -515,6 +516,7 @@ config THINKPAD_ACPI
select NVRAM
select NEW_LEDS
select LEDS_CLASS
+ select INPUT_SPARSEKMAP
help
This is a driver for the IBM and Lenovo ThinkPad laptops. It adds
support for Fn-Fx key combinations, Bluetooth control, video
diff --git a/drivers/platform/x86/amd/hsmp.c b/drivers/platform/x86/amd/hsmp.c
index d84ea66eecc6..8fcf38eed7f0 100644
--- a/drivers/platform/x86/amd/hsmp.c
+++ b/drivers/platform/x86/amd/hsmp.c
@@ -907,16 +907,44 @@ static int hsmp_plat_dev_register(void)
return ret;
}
+/*
+ * This check is only needed for backward compatibility of previous platforms.
+ * All new platforms are expected to support ACPI based probing.
+ */
+static bool legacy_hsmp_support(void)
+{
+ if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
+ return false;
+
+ switch (boot_cpu_data.x86) {
+ case 0x19:
+ switch (boot_cpu_data.x86_model) {
+ case 0x00 ... 0x1F:
+ case 0x30 ... 0x3F:
+ case 0x90 ... 0x9F:
+ case 0xA0 ... 0xAF:
+ return true;
+ default:
+ return false;
+ }
+ case 0x1A:
+ switch (boot_cpu_data.x86_model) {
+ case 0x00 ... 0x1F:
+ return true;
+ default:
+ return false;
+ }
+ default:
+ return false;
+ }
+
+ return false;
+}
+
static int __init hsmp_plt_init(void)
{
int ret = -ENODEV;
- if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD || boot_cpu_data.x86 < 0x19) {
- pr_err("HSMP is not supported on Family:%x model:%x\n",
- boot_cpu_data.x86, boot_cpu_data.x86_model);
- return ret;
- }
-
/*
* amd_nb_num() returns number of SMN/DF interfaces present in the system
* if we have N SMN/DF interfaces that ideally means N sockets
@@ -930,7 +958,15 @@ static int __init hsmp_plt_init(void)
return ret;
if (!plat_dev.is_acpi_device) {
- ret = hsmp_plat_dev_register();
+ if (legacy_hsmp_support()) {
+ /* Not ACPI device, but supports HSMP, register a plat_dev */
+ ret = hsmp_plat_dev_register();
+ } else {
+ /* Not ACPI, Does not support HSMP */
+ pr_info("HSMP is not supported on Family:%x model:%x\n",
+ boot_cpu_data.x86, boot_cpu_data.x86_model);
+ ret = -ENODEV;
+ }
if (ret)
platform_driver_unregister(&amd_hsmp_driver);
}
diff --git a/drivers/platform/x86/amd/pmf/pmf.h b/drivers/platform/x86/amd/pmf/pmf.h
index eeedd0c0395a..753d5662c080 100644
--- a/drivers/platform/x86/amd/pmf/pmf.h
+++ b/drivers/platform/x86/amd/pmf/pmf.h
@@ -12,6 +12,7 @@
#define PMF_H
#include <linux/acpi.h>
+#include <linux/input.h>
#include <linux/platform_profile.h>
#define POLICY_BUF_MAX_SZ 0x4b000
@@ -300,6 +301,7 @@ struct amd_pmf_dev {
void __iomem *policy_base;
bool smart_pc_enabled;
u16 pmf_if_version;
+ struct input_dev *pmf_idev;
};
struct apmf_sps_prop_granular_v2 {
diff --git a/drivers/platform/x86/amd/pmf/tee-if.c b/drivers/platform/x86/amd/pmf/tee-if.c
index b438de4d6bfc..e246367aacee 100644
--- a/drivers/platform/x86/amd/pmf/tee-if.c
+++ b/drivers/platform/x86/amd/pmf/tee-if.c
@@ -62,18 +62,12 @@ static void amd_pmf_prepare_args(struct amd_pmf_dev *dev, int cmd,
param[0].u.memref.shm_offs = 0;
}
-static int amd_pmf_update_uevents(struct amd_pmf_dev *dev, u16 event)
+static void amd_pmf_update_uevents(struct amd_pmf_dev *dev, u16 event)
{
- char *envp[2] = {};
-
- envp[0] = kasprintf(GFP_KERNEL, "EVENT_ID=%d", event);
- if (!envp[0])
- return -EINVAL;
-
- kobject_uevent_env(&dev->dev->kobj, KOBJ_CHANGE, envp);
-
- kfree(envp[0]);
- return 0;
+ input_report_key(dev->pmf_idev, event, 1); /* key press */
+ input_sync(dev->pmf_idev);
+ input_report_key(dev->pmf_idev, event, 0); /* key release */
+ input_sync(dev->pmf_idev);
}
static void amd_pmf_apply_policies(struct amd_pmf_dev *dev, struct ta_pmf_enact_result *out)
@@ -149,7 +143,20 @@ static void amd_pmf_apply_policies(struct amd_pmf_dev *dev, struct ta_pmf_enact_
break;
case PMF_POLICY_SYSTEM_STATE:
- amd_pmf_update_uevents(dev, val);
+ switch (val) {
+ case 0:
+ amd_pmf_update_uevents(dev, KEY_SLEEP);
+ break;
+ case 1:
+ amd_pmf_update_uevents(dev, KEY_SUSPEND);
+ break;
+ case 2:
+ amd_pmf_update_uevents(dev, KEY_SCREENLOCK);
+ break;
+ default:
+ dev_err(dev->dev, "Invalid PMF policy system state: %d\n", val);
+ }
+
dev_dbg(dev->dev, "update SYSTEM_STATE: %s\n",
amd_pmf_uevent_as_str(val));
break;
@@ -301,14 +308,9 @@ static ssize_t amd_pmf_get_pb_data(struct file *filp, const char __user *buf,
return -EINVAL;
/* re-alloc to the new buffer length of the policy binary */
- new_policy_buf = kzalloc(length, GFP_KERNEL);
- if (!new_policy_buf)
- return -ENOMEM;
-
- if (copy_from_user(new_policy_buf, buf, length)) {
- kfree(new_policy_buf);
- return -EFAULT;
- }
+ new_policy_buf = memdup_user(buf, length);
+ if (IS_ERR(new_policy_buf))
+ return PTR_ERR(new_policy_buf);
kfree(dev->policy_buf);
dev->policy_buf = new_policy_buf;
@@ -368,6 +370,30 @@ static int amd_pmf_ta_open_session(struct tee_context *ctx, u32 *id)
return rc;
}
+static int amd_pmf_register_input_device(struct amd_pmf_dev *dev)
+{
+ int err;
+
+ dev->pmf_idev = devm_input_allocate_device(dev->dev);
+ if (!dev->pmf_idev)
+ return -ENOMEM;
+
+ dev->pmf_idev->name = "PMF-TA output events";
+ dev->pmf_idev->phys = "amd-pmf/input0";
+
+ input_set_capability(dev->pmf_idev, EV_KEY, KEY_SLEEP);
+ input_set_capability(dev->pmf_idev, EV_KEY, KEY_SCREENLOCK);
+ input_set_capability(dev->pmf_idev, EV_KEY, KEY_SUSPEND);
+
+ err = input_register_device(dev->pmf_idev);
+ if (err) {
+ dev_err(dev->dev, "Failed to register input device: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
static int amd_pmf_tee_init(struct amd_pmf_dev *dev)
{
u32 size;
@@ -475,6 +501,10 @@ int amd_pmf_init_smart_pc(struct amd_pmf_dev *dev)
if (pb_side_load)
amd_pmf_open_pb(dev, dev->dbgfs_dir);
+ ret = amd_pmf_register_input_device(dev);
+ if (ret)
+ goto error;
+
return 0;
error:
@@ -485,6 +515,9 @@ error:
void amd_pmf_deinit_smart_pc(struct amd_pmf_dev *dev)
{
+ if (dev->pmf_idev)
+ input_unregister_device(dev->pmf_idev);
+
if (pb_side_load && dev->esbin)
amd_pmf_remove_pb(dev);
diff --git a/drivers/platform/x86/amilo-rfkill.c b/drivers/platform/x86/amilo-rfkill.c
index efcf909786a5..2423dc91debb 100644
--- a/drivers/platform/x86/amilo-rfkill.c
+++ b/drivers/platform/x86/amilo-rfkill.c
@@ -171,6 +171,7 @@ static void __exit amilo_rfkill_exit(void)
}
MODULE_AUTHOR("Ben Hutchings <ben@decadent.org.uk>");
+MODULE_DESCRIPTION("Fujitsu-Siemens Amilo rfkill support");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(dmi, amilo_rfkill_id_table);
diff --git a/drivers/platform/x86/asus-tf103c-dock.c b/drivers/platform/x86/asus-tf103c-dock.c
index 8f0f87637c5f..b441d8ca72d3 100644
--- a/drivers/platform/x86/asus-tf103c-dock.c
+++ b/drivers/platform/x86/asus-tf103c-dock.c
@@ -490,7 +490,7 @@ static void tf103c_dock_enable_touchpad(struct tf103c_dock_data *dock)
return;
}
- strscpy(board_info.type, "elan_i2c", I2C_NAME_SIZE);
+ strscpy(board_info.type, "elan_i2c");
board_info.addr = TF103C_DOCK_TP_ADDR;
board_info.dev_name = TF103C_DOCK_DEV_NAME "-tp";
board_info.irq = dock->tp_irq;
@@ -795,7 +795,7 @@ static int tf103c_dock_probe(struct i2c_client *client)
*/
dock->ec_client = client;
- strscpy(board_info.type, "tf103c-dock-intr", I2C_NAME_SIZE);
+ strscpy(board_info.type, "tf103c-dock-intr");
board_info.addr = TF103C_DOCK_INTR_ADDR;
board_info.dev_name = TF103C_DOCK_DEV_NAME "-intr";
@@ -803,7 +803,7 @@ static int tf103c_dock_probe(struct i2c_client *client)
if (IS_ERR(dock->intr_client))
return dev_err_probe(dev, PTR_ERR(dock->intr_client), "creating intr client\n");
- strscpy(board_info.type, "tf103c-dock-kbd", I2C_NAME_SIZE);
+ strscpy(board_info.type, "tf103c-dock-kbd");
board_info.addr = TF103C_DOCK_KBD_ADDR;
board_info.dev_name = TF103C_DOCK_DEV_NAME "-kbd";
@@ -846,8 +846,8 @@ static int tf103c_dock_probe(struct i2c_client *client)
dock->hid->vendor = 0x0b05; /* USB_VENDOR_ID_ASUSTEK */
dock->hid->product = 0x0103; /* From TF-103-C */
dock->hid->version = 0x0100; /* 1.0 */
- strscpy(dock->hid->name, "Asus TF103C Dock Keyboard", sizeof(dock->hid->name));
- strscpy(dock->hid->phys, dev_name(dev), sizeof(dock->hid->phys));
+ strscpy(dock->hid->name, "Asus TF103C Dock Keyboard");
+ strscpy(dock->hid->phys, dev_name(dev));
ret = hid_add_device(dock->hid);
if (ret)
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index 3f9b6285c9a6..cc735931f97b 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -238,6 +238,7 @@ struct asus_wmi {
struct led_classdev lightbar_led;
int lightbar_led_wk;
struct led_classdev micmute_led;
+ struct led_classdev camera_led;
struct workqueue_struct *led_workqueue;
struct work_struct tpd_led_work;
struct work_struct wlan_led_work;
@@ -879,10 +880,14 @@ static ssize_t kbd_rgb_mode_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct asus_wmi *asus = dev_get_drvdata(dev);
u32 cmd, mode, r, g, b, speed;
+ struct led_classdev *led;
+ struct asus_wmi *asus;
int err;
+ led = dev_get_drvdata(dev);
+ asus = container_of(led, struct asus_wmi, kbd_led);
+
if (sscanf(buf, "%d %d %d %d %d %d", &cmd, &mode, &r, &g, &b, &speed) != 6)
return -EINVAL;
@@ -1642,6 +1647,27 @@ static int micmute_led_set(struct led_classdev *led_cdev,
return err < 0 ? err : 0;
}
+static enum led_brightness camera_led_get(struct led_classdev *led_cdev)
+{
+ struct asus_wmi *asus;
+ u32 result;
+
+ asus = container_of(led_cdev, struct asus_wmi, camera_led);
+ asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_CAMERA_LED, &result);
+
+ return result & ASUS_WMI_DSTS_BRIGHTNESS_MASK;
+}
+
+static int camera_led_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ int state = brightness != LED_OFF;
+ int err;
+
+ err = asus_wmi_set_devstate(ASUS_WMI_DEVID_CAMERA_LED, state, NULL);
+ return err < 0 ? err : 0;
+}
+
static void asus_wmi_led_exit(struct asus_wmi *asus)
{
led_classdev_unregister(&asus->kbd_led);
@@ -1649,6 +1675,7 @@ static void asus_wmi_led_exit(struct asus_wmi *asus)
led_classdev_unregister(&asus->wlan_led);
led_classdev_unregister(&asus->lightbar_led);
led_classdev_unregister(&asus->micmute_led);
+ led_classdev_unregister(&asus->camera_led);
if (asus->led_workqueue)
destroy_workqueue(asus->led_workqueue);
@@ -1740,6 +1767,18 @@ static int asus_wmi_led_init(struct asus_wmi *asus)
goto error;
}
+ if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_CAMERA_LED)) {
+ asus->camera_led.name = "asus::camera";
+ asus->camera_led.max_brightness = 1;
+ asus->camera_led.brightness_get = camera_led_get;
+ asus->camera_led.brightness_set_blocking = camera_led_set;
+
+ rv = led_classdev_register(&asus->platform_device->dev,
+ &asus->camera_led);
+ if (rv)
+ goto error;
+ }
+
error:
if (rv)
asus_wmi_led_exit(asus);
diff --git a/drivers/platform/x86/dell/Kconfig b/drivers/platform/x86/dell/Kconfig
index 195a8bf532cc..85a78ef91182 100644
--- a/drivers/platform/x86/dell/Kconfig
+++ b/drivers/platform/x86/dell/Kconfig
@@ -91,6 +91,19 @@ config DELL_RBTN
To compile this driver as a module, choose M here: the module will
be called dell-rbtn.
+config DELL_PC
+ tristate "Dell PC Extras"
+ default m
+ depends on ACPI
+ depends on DMI
+ depends on DELL_SMBIOS
+ select ACPI_PLATFORM_PROFILE
+ help
+ This driver adds support for controlling the fan modes via platform_profile
+ on supported Dell systems regardless of formfactor.
+ Module will simply do nothing if thermal management commands are not
+ supported.
+
#
# The DELL_SMBIOS driver depends on ACPI_WMI and/or DCDBAS if those
# backends are selected. The "depends" line prevents a configuration
diff --git a/drivers/platform/x86/dell/Makefile b/drivers/platform/x86/dell/Makefile
index 8176a257d9c3..79d60f1bf4c1 100644
--- a/drivers/platform/x86/dell/Makefile
+++ b/drivers/platform/x86/dell/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_DCDBAS) += dcdbas.o
obj-$(CONFIG_DELL_LAPTOP) += dell-laptop.o
obj-$(CONFIG_DELL_RBTN) += dell-rbtn.o
obj-$(CONFIG_DELL_RBU) += dell_rbu.o
+obj-$(CONFIG_DELL_PC) += dell-pc.o
obj-$(CONFIG_DELL_SMBIOS) += dell-smbios.o
dell-smbios-objs := dell-smbios-base.o
dell-smbios-$(CONFIG_DELL_SMBIOS_WMI) += dell-smbios-wmi.o
diff --git a/drivers/platform/x86/dell/dell-laptop.c b/drivers/platform/x86/dell/dell-laptop.c
index 42f7de2b4522..6552dfe491c6 100644
--- a/drivers/platform/x86/dell/dell-laptop.c
+++ b/drivers/platform/x86/dell/dell-laptop.c
@@ -353,29 +353,6 @@ static const struct dmi_system_id dell_quirks[] __initconst = {
{ }
};
-static void dell_fill_request(struct calling_interface_buffer *buffer,
- u32 arg0, u32 arg1, u32 arg2, u32 arg3)
-{
- memset(buffer, 0, sizeof(struct calling_interface_buffer));
- buffer->input[0] = arg0;
- buffer->input[1] = arg1;
- buffer->input[2] = arg2;
- buffer->input[3] = arg3;
-}
-
-static int dell_send_request(struct calling_interface_buffer *buffer,
- u16 class, u16 select)
-{
- int ret;
-
- buffer->cmd_class = class;
- buffer->cmd_select = select;
- ret = dell_smbios_call(buffer);
- if (ret != 0)
- return ret;
- return dell_smbios_error(buffer->output[0]);
-}
-
/*
* Derived from information in smbios-wireless-ctl:
*
diff --git a/drivers/platform/x86/dell/dell-pc.c b/drivers/platform/x86/dell/dell-pc.c
new file mode 100644
index 000000000000..972385ca1990
--- /dev/null
+++ b/drivers/platform/x86/dell/dell-pc.c
@@ -0,0 +1,309 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Driver for Dell laptop extras
+ *
+ * Copyright (c) Lyndon Sanche <lsanche@lyndeno.ca>
+ *
+ * Based on documentation in the libsmbios package:
+ * Copyright (C) 2005-2014 Dell Inc.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/dmi.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_profile.h>
+#include <linux/slab.h>
+
+#include "dell-smbios.h"
+
+static const struct dmi_system_id dell_device_table[] __initconst = {
+ {
+ .ident = "Dell Inc.",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ },
+ },
+ {
+ .ident = "Dell Computer Corporation",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"),
+ },
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(dmi, dell_device_table);
+
+/* Derived from smbios-thermal-ctl
+ *
+ * cbClass 17
+ * cbSelect 19
+ * User Selectable Thermal Tables(USTT)
+ * cbArg1 determines the function to be performed
+ * cbArg1 0x0 = Get Thermal Information
+ * cbRES1 Standard return codes (0, -1, -2)
+ * cbRES2, byte 0 Bitmap of supported thermal modes. A mode is supported if
+ * its bit is set to 1
+ * Bit 0 Balanced
+ * Bit 1 Cool Bottom
+ * Bit 2 Quiet
+ * Bit 3 Performance
+ * cbRES2, byte 1 Bitmap of supported Active Acoustic Controller (AAC) modes.
+ * Each mode corresponds to the supported thermal modes in
+ * byte 0. A mode is supported if its bit is set to 1.
+ * Bit 0 AAC (Balanced)
+ * Bit 1 AAC (Cool Bottom
+ * Bit 2 AAC (Quiet)
+ * Bit 3 AAC (Performance)
+ * cbRes3, byte 0 Current Thermal Mode
+ * Bit 0 Balanced
+ * Bit 1 Cool Bottom
+ * Bit 2 Quiet
+ * Bit 3 Performanc
+ * cbRes3, byte 1 AAC Configuration type
+ * 0 Global (AAC enable/disable applies to all supported USTT modes)
+ * 1 USTT mode specific
+ * cbRes3, byte 2 Current Active Acoustic Controller (AAC) Mode
+ * If AAC Configuration Type is Global,
+ * 0 AAC mode disabled
+ * 1 AAC mode enabled
+ * If AAC Configuration Type is USTT mode specific (multiple bits may be set),
+ * Bit 0 AAC (Balanced)
+ * Bit 1 AAC (Cool Bottom
+ * Bit 2 AAC (Quiet)
+ * Bit 3 AAC (Performance)
+ * cbRes3, byte 3 Current Fan Failure Mode
+ * Bit 0 Minimal Fan Failure (at least one fan has failed, one fan working)
+ * Bit 1 Catastrophic Fan Failure (all fans have failed)
+ *
+ * cbArg1 0x1 (Set Thermal Information), both desired thermal mode and
+ * desired AAC mode shall be applied
+ * cbArg2, byte 0 Desired Thermal Mode to set
+ * (only one bit may be set for this parameter)
+ * Bit 0 Balanced
+ * Bit 1 Cool Bottom
+ * Bit 2 Quiet
+ * Bit 3 Performance
+ * cbArg2, byte 1 Desired Active Acoustic Controller (AAC) Mode to set
+ * If AAC Configuration Type is Global,
+ * 0 AAC mode disabled
+ * 1 AAC mode enabled
+ * If AAC Configuration Type is USTT mode specific
+ * (multiple bits may be set for this parameter),
+ * Bit 0 AAC (Balanced)
+ * Bit 1 AAC (Cool Bottom
+ * Bit 2 AAC (Quiet)
+ * Bit 3 AAC (Performance)
+ */
+
+#define DELL_ACC_GET_FIELD GENMASK(19, 16)
+#define DELL_ACC_SET_FIELD GENMASK(11, 8)
+#define DELL_THERMAL_SUPPORTED GENMASK(3, 0)
+
+static struct platform_profile_handler *thermal_handler;
+
+enum thermal_mode_bits {
+ DELL_BALANCED = BIT(0),
+ DELL_COOL_BOTTOM = BIT(1),
+ DELL_QUIET = BIT(2),
+ DELL_PERFORMANCE = BIT(3),
+};
+
+static int thermal_get_mode(void)
+{
+ struct calling_interface_buffer buffer;
+ int state;
+ int ret;
+
+ dell_fill_request(&buffer, 0x0, 0, 0, 0);
+ ret = dell_send_request(&buffer, CLASS_INFO, SELECT_THERMAL_MANAGEMENT);
+ if (ret)
+ return ret;
+ state = buffer.output[2];
+ if (state & DELL_BALANCED)
+ return DELL_BALANCED;
+ else if (state & DELL_COOL_BOTTOM)
+ return DELL_COOL_BOTTOM;
+ else if (state & DELL_QUIET)
+ return DELL_QUIET;
+ else if (state & DELL_PERFORMANCE)
+ return DELL_PERFORMANCE;
+ else
+ return -ENXIO;
+}
+
+static int thermal_get_supported_modes(int *supported_bits)
+{
+ struct calling_interface_buffer buffer;
+ int ret;
+
+ dell_fill_request(&buffer, 0x0, 0, 0, 0);
+ ret = dell_send_request(&buffer, CLASS_INFO, SELECT_THERMAL_MANAGEMENT);
+ /* Thermal function not supported */
+ if (ret == -ENXIO) {
+ *supported_bits = 0;
+ return 0;
+ }
+ if (ret)
+ return ret;
+ *supported_bits = FIELD_GET(DELL_THERMAL_SUPPORTED, buffer.output[1]);
+ return 0;
+}
+
+static int thermal_get_acc_mode(int *acc_mode)
+{
+ struct calling_interface_buffer buffer;
+ int ret;
+
+ dell_fill_request(&buffer, 0x0, 0, 0, 0);
+ ret = dell_send_request(&buffer, CLASS_INFO, SELECT_THERMAL_MANAGEMENT);
+ if (ret)
+ return ret;
+ *acc_mode = FIELD_GET(DELL_ACC_GET_FIELD, buffer.output[3]);
+ return 0;
+}
+
+static int thermal_set_mode(enum thermal_mode_bits state)
+{
+ struct calling_interface_buffer buffer;
+ int ret;
+ int acc_mode;
+
+ ret = thermal_get_acc_mode(&acc_mode);
+ if (ret)
+ return ret;
+
+ dell_fill_request(&buffer, 0x1, FIELD_PREP(DELL_ACC_SET_FIELD, acc_mode) | state, 0, 0);
+ return dell_send_request(&buffer, CLASS_INFO, SELECT_THERMAL_MANAGEMENT);
+}
+
+static int thermal_platform_profile_set(struct platform_profile_handler *pprof,
+ enum platform_profile_option profile)
+{
+ switch (profile) {
+ case PLATFORM_PROFILE_BALANCED:
+ return thermal_set_mode(DELL_BALANCED);
+ case PLATFORM_PROFILE_PERFORMANCE:
+ return thermal_set_mode(DELL_PERFORMANCE);
+ case PLATFORM_PROFILE_QUIET:
+ return thermal_set_mode(DELL_QUIET);
+ case PLATFORM_PROFILE_COOL:
+ return thermal_set_mode(DELL_COOL_BOTTOM);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int thermal_platform_profile_get(struct platform_profile_handler *pprof,
+ enum platform_profile_option *profile)
+{
+ int ret;
+
+ ret = thermal_get_mode();
+ if (ret < 0)
+ return ret;
+
+ switch (ret) {
+ case DELL_BALANCED:
+ *profile = PLATFORM_PROFILE_BALANCED;
+ break;
+ case DELL_PERFORMANCE:
+ *profile = PLATFORM_PROFILE_PERFORMANCE;
+ break;
+ case DELL_COOL_BOTTOM:
+ *profile = PLATFORM_PROFILE_COOL;
+ break;
+ case DELL_QUIET:
+ *profile = PLATFORM_PROFILE_QUIET;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int thermal_init(void)
+{
+ int ret;
+ int supported_modes;
+
+ /* If thermal commands are not supported, exit without error */
+ if (!dell_smbios_class_is_supported(CLASS_INFO))
+ return 0;
+
+ /* If thermal modes are not supported, exit without error */
+ ret = thermal_get_supported_modes(&supported_modes);
+ if (ret < 0)
+ return ret;
+ if (!supported_modes)
+ return 0;
+
+ thermal_handler = kzalloc(sizeof(*thermal_handler), GFP_KERNEL);
+ if (!thermal_handler)
+ return -ENOMEM;
+ thermal_handler->profile_get = thermal_platform_profile_get;
+ thermal_handler->profile_set = thermal_platform_profile_set;
+
+ if (supported_modes & DELL_QUIET)
+ set_bit(PLATFORM_PROFILE_QUIET, thermal_handler->choices);
+ if (supported_modes & DELL_COOL_BOTTOM)
+ set_bit(PLATFORM_PROFILE_COOL, thermal_handler->choices);
+ if (supported_modes & DELL_BALANCED)
+ set_bit(PLATFORM_PROFILE_BALANCED, thermal_handler->choices);
+ if (supported_modes & DELL_PERFORMANCE)
+ set_bit(PLATFORM_PROFILE_PERFORMANCE, thermal_handler->choices);
+
+ /* Clean up if failed */
+ ret = platform_profile_register(thermal_handler);
+ if (ret) {
+ kfree(thermal_handler);
+ thermal_handler = NULL;
+ }
+
+ return ret;
+}
+
+static void thermal_cleanup(void)
+{
+ if (thermal_handler) {
+ platform_profile_remove();
+ kfree(thermal_handler);
+ }
+}
+
+static int __init dell_init(void)
+{
+ int ret;
+
+ if (!dmi_check_system(dell_device_table))
+ return -ENODEV;
+
+ /* Do not fail module if thermal modes not supported, just skip */
+ ret = thermal_init();
+ if (ret)
+ goto fail_thermal;
+
+ return 0;
+
+fail_thermal:
+ thermal_cleanup();
+ return ret;
+}
+
+static void __exit dell_exit(void)
+{
+ thermal_cleanup();
+}
+
+module_init(dell_init);
+module_exit(dell_exit);
+
+MODULE_AUTHOR("Lyndon Sanche <lsanche@lyndeno.ca>");
+MODULE_DESCRIPTION("Dell PC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/dell/dell-smbios-base.c b/drivers/platform/x86/dell/dell-smbios-base.c
index e61bfaf8b5c4..6565fac24cde 100644
--- a/drivers/platform/x86/dell/dell-smbios-base.c
+++ b/drivers/platform/x86/dell/dell-smbios-base.c
@@ -11,6 +11,7 @@
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/container_of.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/capability.h>
@@ -25,11 +26,16 @@ static u32 da_supported_commands;
static int da_num_tokens;
static struct platform_device *platform_device;
static struct calling_interface_token *da_tokens;
-static struct device_attribute *token_location_attrs;
-static struct device_attribute *token_value_attrs;
+static struct token_sysfs_data *token_entries;
static struct attribute **token_attrs;
static DEFINE_MUTEX(smbios_mutex);
+struct token_sysfs_data {
+ struct device_attribute location_attr;
+ struct device_attribute value_attr;
+ struct calling_interface_token *token;
+};
+
struct smbios_device {
struct list_head list;
struct device *device;
@@ -71,6 +77,7 @@ static struct smbios_call call_blacklist[] = {
/* handled by kernel: dell-laptop */
{0x0000, CLASS_INFO, SELECT_RFKILL},
{0x0000, CLASS_KBD_BACKLIGHT, SELECT_KBD_BACKLIGHT},
+ {0x0000, CLASS_INFO, SELECT_THERMAL_MANAGEMENT},
};
struct token_range {
@@ -314,6 +321,31 @@ out_smbios_call:
}
EXPORT_SYMBOL_GPL(dell_smbios_call);
+void dell_fill_request(struct calling_interface_buffer *buffer,
+ u32 arg0, u32 arg1, u32 arg2, u32 arg3)
+{
+ memset(buffer, 0, sizeof(struct calling_interface_buffer));
+ buffer->input[0] = arg0;
+ buffer->input[1] = arg1;
+ buffer->input[2] = arg2;
+ buffer->input[3] = arg3;
+}
+EXPORT_SYMBOL_GPL(dell_fill_request);
+
+int dell_send_request(struct calling_interface_buffer *buffer,
+ u16 class, u16 select)
+{
+ int ret;
+
+ buffer->cmd_class = class;
+ buffer->cmd_select = select;
+ ret = dell_smbios_call(buffer);
+ if (ret != 0)
+ return ret;
+ return dell_smbios_error(buffer->output[0]);
+}
+EXPORT_SYMBOL_GPL(dell_send_request);
+
struct calling_interface_token *dell_smbios_find_token(int tokenid)
{
int i;
@@ -350,6 +382,15 @@ void dell_laptop_call_notifier(unsigned long action, void *data)
}
EXPORT_SYMBOL_GPL(dell_laptop_call_notifier);
+bool dell_smbios_class_is_supported(u16 class)
+{
+ /* Classes over 30 always unsupported */
+ if (class > 30)
+ return false;
+ return da_supported_commands & (1 << class);
+}
+EXPORT_SYMBOL_GPL(dell_smbios_class_is_supported);
+
static void __init parse_da_table(const struct dmi_header *dm)
{
/* Final token is a terminator, so we don't want to copy it */
@@ -416,47 +457,26 @@ static void __init find_tokens(const struct dmi_header *dm, void *dummy)
}
}
-static int match_attribute(struct device *dev,
- struct device_attribute *attr)
-{
- int i;
-
- for (i = 0; i < da_num_tokens * 2; i++) {
- if (!token_attrs[i])
- continue;
- if (strcmp(token_attrs[i]->name, attr->attr.name) == 0)
- return i/2;
- }
- dev_dbg(dev, "couldn't match: %s\n", attr->attr.name);
- return -EINVAL;
-}
-
static ssize_t location_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- int i;
+ struct token_sysfs_data *data = container_of(attr, struct token_sysfs_data, location_attr);
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- i = match_attribute(dev, attr);
- if (i > 0)
- return sysfs_emit(buf, "%08x", da_tokens[i].location);
- return 0;
+ return sysfs_emit(buf, "%08x", data->token->location);
}
static ssize_t value_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- int i;
+ struct token_sysfs_data *data = container_of(attr, struct token_sysfs_data, value_attr);
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- i = match_attribute(dev, attr);
- if (i > 0)
- return sysfs_emit(buf, "%08x", da_tokens[i].value);
- return 0;
+ return sysfs_emit(buf, "%08x", data->token->value);
}
static struct attribute_group smbios_attribute_group = {
@@ -473,22 +493,15 @@ static int build_tokens_sysfs(struct platform_device *dev)
{
char *location_name;
char *value_name;
- size_t size;
int ret;
int i, j;
- /* (number of tokens + 1 for null terminated */
- size = sizeof(struct device_attribute) * (da_num_tokens + 1);
- token_location_attrs = kzalloc(size, GFP_KERNEL);
- if (!token_location_attrs)
+ token_entries = kcalloc(da_num_tokens, sizeof(*token_entries), GFP_KERNEL);
+ if (!token_entries)
return -ENOMEM;
- token_value_attrs = kzalloc(size, GFP_KERNEL);
- if (!token_value_attrs)
- goto out_allocate_value;
/* need to store both location and value + terminator*/
- size = sizeof(struct attribute *) * ((2 * da_num_tokens) + 1);
- token_attrs = kzalloc(size, GFP_KERNEL);
+ token_attrs = kcalloc((2 * da_num_tokens) + 1, sizeof(*token_attrs), GFP_KERNEL);
if (!token_attrs)
goto out_allocate_attrs;
@@ -496,32 +509,34 @@ static int build_tokens_sysfs(struct platform_device *dev)
/* skip empty */
if (da_tokens[i].tokenID == 0)
continue;
+
+ token_entries[i].token = &da_tokens[i];
+
/* add location */
location_name = kasprintf(GFP_KERNEL, "%04x_location",
da_tokens[i].tokenID);
if (location_name == NULL)
goto out_unwind_strings;
- sysfs_attr_init(&token_location_attrs[i].attr);
- token_location_attrs[i].attr.name = location_name;
- token_location_attrs[i].attr.mode = 0444;
- token_location_attrs[i].show = location_show;
- token_attrs[j++] = &token_location_attrs[i].attr;
+
+ sysfs_attr_init(&token_entries[i].location_attr.attr);
+ token_entries[i].location_attr.attr.name = location_name;
+ token_entries[i].location_attr.attr.mode = 0444;
+ token_entries[i].location_attr.show = location_show;
+ token_attrs[j++] = &token_entries[i].location_attr.attr;
/* add value */
value_name = kasprintf(GFP_KERNEL, "%04x_value",
da_tokens[i].tokenID);
- if (value_name == NULL)
- goto loop_fail_create_value;
- sysfs_attr_init(&token_value_attrs[i].attr);
- token_value_attrs[i].attr.name = value_name;
- token_value_attrs[i].attr.mode = 0444;
- token_value_attrs[i].show = value_show;
- token_attrs[j++] = &token_value_attrs[i].attr;
- continue;
-
-loop_fail_create_value:
- kfree(location_name);
- goto out_unwind_strings;
+ if (!value_name) {
+ kfree(location_name);
+ goto out_unwind_strings;
+ }
+
+ sysfs_attr_init(&token_entries[i].value_attr.attr);
+ token_entries[i].value_attr.attr.name = value_name;
+ token_entries[i].value_attr.attr.mode = 0444;
+ token_entries[i].value_attr.show = value_show;
+ token_attrs[j++] = &token_entries[i].value_attr.attr;
}
smbios_attribute_group.attrs = token_attrs;
@@ -532,14 +547,12 @@ loop_fail_create_value:
out_unwind_strings:
while (i--) {
- kfree(token_location_attrs[i].attr.name);
- kfree(token_value_attrs[i].attr.name);
+ kfree(token_entries[i].location_attr.attr.name);
+ kfree(token_entries[i].value_attr.attr.name);
}
kfree(token_attrs);
out_allocate_attrs:
- kfree(token_value_attrs);
-out_allocate_value:
- kfree(token_location_attrs);
+ kfree(token_entries);
return -ENOMEM;
}
@@ -551,12 +564,11 @@ static void free_group(struct platform_device *pdev)
sysfs_remove_group(&pdev->dev.kobj,
&smbios_attribute_group);
for (i = 0; i < da_num_tokens; i++) {
- kfree(token_location_attrs[i].attr.name);
- kfree(token_value_attrs[i].attr.name);
+ kfree(token_entries[i].location_attr.attr.name);
+ kfree(token_entries[i].value_attr.attr.name);
}
kfree(token_attrs);
- kfree(token_value_attrs);
- kfree(token_location_attrs);
+ kfree(token_entries);
}
static int __init dell_smbios_init(void)
diff --git a/drivers/platform/x86/dell/dell-smbios.h b/drivers/platform/x86/dell/dell-smbios.h
index eb341bf000c6..ea0cc38642a2 100644
--- a/drivers/platform/x86/dell/dell-smbios.h
+++ b/drivers/platform/x86/dell/dell-smbios.h
@@ -19,6 +19,7 @@
/* Classes and selects used only in kernel drivers */
#define CLASS_KBD_BACKLIGHT 4
#define SELECT_KBD_BACKLIGHT 11
+#define SELECT_THERMAL_MANAGEMENT 19
/* Tokens used in kernel drivers, any of these
* should be filtered from userspace access
@@ -64,6 +65,11 @@ int dell_smbios_call_filter(struct device *d,
struct calling_interface_buffer *buffer);
int dell_smbios_call(struct calling_interface_buffer *buffer);
+void dell_fill_request(struct calling_interface_buffer *buffer,
+ u32 arg0, u32 arg1, u32 arg2, u32 arg3);
+int dell_send_request(struct calling_interface_buffer *buffer,
+ u16 class, u16 select);
+
struct calling_interface_token *dell_smbios_find_token(int tokenid);
enum dell_laptop_notifier_actions {
@@ -73,6 +79,7 @@ enum dell_laptop_notifier_actions {
int dell_laptop_register_notifier(struct notifier_block *nb);
int dell_laptop_unregister_notifier(struct notifier_block *nb);
void dell_laptop_call_notifier(unsigned long action, void *data);
+bool dell_smbios_class_is_supported(u16 class);
/* for the supported backends */
#ifdef CONFIG_DELL_SMBIOS_WMI
diff --git a/drivers/platform/x86/firmware_attributes_class.c b/drivers/platform/x86/firmware_attributes_class.c
index dd8240009565..182a07d8ae3d 100644
--- a/drivers/platform/x86/firmware_attributes_class.c
+++ b/drivers/platform/x86/firmware_attributes_class.c
@@ -49,4 +49,5 @@ int fw_attributes_class_put(void)
EXPORT_SYMBOL_GPL(fw_attributes_class_put);
MODULE_AUTHOR("Mark Pearson <markpearson@lenovo.com>");
+MODULE_DESCRIPTION("Firmware attributes class helper module");
MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/hp/Kconfig b/drivers/platform/x86/hp/Kconfig
index 7fef4f12e498..d776761cd5fd 100644
--- a/drivers/platform/x86/hp/Kconfig
+++ b/drivers/platform/x86/hp/Kconfig
@@ -40,6 +40,7 @@ config HP_WMI
depends on ACPI_WMI
depends on INPUT
depends on RFKILL || RFKILL = n
+ select POWER_SUPPLY
select INPUT_SPARSEKMAP
select ACPI_PLATFORM_PROFILE
select HWMON
diff --git a/drivers/platform/x86/hp/hp-bioscfg/enum-attributes.c b/drivers/platform/x86/hp/hp-bioscfg/enum-attributes.c
index a2402d31c146..c50ad5880503 100644
--- a/drivers/platform/x86/hp/hp-bioscfg/enum-attributes.c
+++ b/drivers/platform/x86/hp/hp-bioscfg/enum-attributes.c
@@ -52,9 +52,7 @@ static void update_enumeration_value(int instance_id, char *attr_value)
{
struct enumeration_data *enum_data = &bioscfg_drv.enumeration_data[instance_id];
- strscpy(enum_data->current_value,
- attr_value,
- sizeof(enum_data->current_value));
+ strscpy(enum_data->current_value, attr_value);
}
ATTRIBUTE_S_COMMON_PROPERTY_SHOW(display_name, enumeration);
@@ -174,8 +172,7 @@ static int hp_populate_enumeration_elements_from_package(union acpi_object *enum
case VALUE:
break;
case PATH:
- strscpy(enum_data->common.path, str_value,
- sizeof(enum_data->common.path));
+ strscpy(enum_data->common.path, str_value);
break;
case IS_READONLY:
enum_data->common.is_readonly = int_value;
@@ -222,9 +219,7 @@ static int hp_populate_enumeration_elements_from_package(union acpi_object *enum
if (ret)
return -EINVAL;
- strscpy(enum_data->common.prerequisites[reqs],
- str_value,
- sizeof(enum_data->common.prerequisites[reqs]));
+ strscpy(enum_data->common.prerequisites[reqs], str_value);
kfree(str_value);
str_value = NULL;
@@ -236,8 +231,7 @@ static int hp_populate_enumeration_elements_from_package(union acpi_object *enum
break;
case ENUM_CURRENT_VALUE:
- strscpy(enum_data->current_value,
- str_value, sizeof(enum_data->current_value));
+ strscpy(enum_data->current_value, str_value);
break;
case ENUM_SIZE:
if (int_value > MAX_VALUES_SIZE) {
@@ -278,9 +272,7 @@ static int hp_populate_enumeration_elements_from_package(union acpi_object *enum
* is greater than MAX_VALUES_SIZE
*/
if (size < MAX_VALUES_SIZE)
- strscpy(enum_data->possible_values[pos_values],
- str_value,
- sizeof(enum_data->possible_values[pos_values]));
+ strscpy(enum_data->possible_values[pos_values], str_value);
kfree(str_value);
str_value = NULL;
diff --git a/drivers/platform/x86/hp/hp-bioscfg/int-attributes.c b/drivers/platform/x86/hp/hp-bioscfg/int-attributes.c
index 86b7ac63fec2..6c7f4d5fa9cb 100644
--- a/drivers/platform/x86/hp/hp-bioscfg/int-attributes.c
+++ b/drivers/platform/x86/hp/hp-bioscfg/int-attributes.c
@@ -192,8 +192,7 @@ static int hp_populate_integer_elements_from_package(union acpi_object *integer_
integer_data->current_value = int_value;
break;
case PATH:
- strscpy(integer_data->common.path, str_value,
- sizeof(integer_data->common.path));
+ strscpy(integer_data->common.path, str_value);
break;
case IS_READONLY:
integer_data->common.is_readonly = int_value;
@@ -240,9 +239,7 @@ static int hp_populate_integer_elements_from_package(union acpi_object *integer_
if (ret)
continue;
- strscpy(integer_data->common.prerequisites[reqs],
- str_value,
- sizeof(integer_data->common.prerequisites[reqs]));
+ strscpy(integer_data->common.prerequisites[reqs], str_value);
kfree(str_value);
str_value = NULL;
}
diff --git a/drivers/platform/x86/hp/hp-bioscfg/order-list-attributes.c b/drivers/platform/x86/hp/hp-bioscfg/order-list-attributes.c
index 1ff09dfb7d7e..c6e57bb9d8b7 100644
--- a/drivers/platform/x86/hp/hp-bioscfg/order-list-attributes.c
+++ b/drivers/platform/x86/hp/hp-bioscfg/order-list-attributes.c
@@ -57,9 +57,7 @@ static void update_ordered_list_value(int instance, char *attr_value)
{
struct ordered_list_data *ordered_list_data = &bioscfg_drv.ordered_list_data[instance];
- strscpy(ordered_list_data->current_value,
- attr_value,
- sizeof(ordered_list_data->current_value));
+ strscpy(ordered_list_data->current_value, attr_value);
}
ATTRIBUTE_S_COMMON_PROPERTY_SHOW(display_name, ordered_list);
@@ -179,13 +177,11 @@ static int hp_populate_ordered_list_elements_from_package(union acpi_object *ord
/* Assign appropriate element value to corresponding field*/
switch (eloc) {
case VALUE:
- strscpy(ordered_list_data->current_value,
- str_value, sizeof(ordered_list_data->current_value));
+ strscpy(ordered_list_data->current_value, str_value);
replace_char_str(ordered_list_data->current_value, COMMA_SEP, SEMICOLON_SEP);
break;
case PATH:
- strscpy(ordered_list_data->common.path, str_value,
- sizeof(ordered_list_data->common.path));
+ strscpy(ordered_list_data->common.path, str_value);
break;
case IS_READONLY:
ordered_list_data->common.is_readonly = int_value;
@@ -227,9 +223,7 @@ static int hp_populate_ordered_list_elements_from_package(union acpi_object *ord
if (ret)
continue;
- strscpy(ordered_list_data->common.prerequisites[reqs],
- str_value,
- sizeof(ordered_list_data->common.prerequisites[reqs]));
+ strscpy(ordered_list_data->common.prerequisites[reqs], str_value);
kfree(str_value);
str_value = NULL;
@@ -271,9 +265,7 @@ static int hp_populate_ordered_list_elements_from_package(union acpi_object *ord
part = strsep(&part_tmp, COMMA_SEP);
for (olist_elem = 0; olist_elem < MAX_ELEMENTS_SIZE && part; olist_elem++) {
- strscpy(ordered_list_data->elements[olist_elem],
- part,
- sizeof(ordered_list_data->elements[olist_elem]));
+ strscpy(ordered_list_data->elements[olist_elem], part);
part = strsep(&part_tmp, COMMA_SEP);
}
ordered_list_data->elements_size = olist_elem;
diff --git a/drivers/platform/x86/hp/hp-bioscfg/passwdobj-attributes.c b/drivers/platform/x86/hp/hp-bioscfg/passwdobj-attributes.c
index f7efe217a4bb..35936c05e45b 100644
--- a/drivers/platform/x86/hp/hp-bioscfg/passwdobj-attributes.c
+++ b/drivers/platform/x86/hp/hp-bioscfg/passwdobj-attributes.c
@@ -101,13 +101,9 @@ static int store_password_instance(struct kobject *kobj, const char *buf,
if (!ret) {
if (is_current)
- strscpy(bioscfg_drv.password_data[id].current_password,
- buf_cp,
- sizeof(bioscfg_drv.password_data[id].current_password));
+ strscpy(bioscfg_drv.password_data[id].current_password, buf_cp);
else
- strscpy(bioscfg_drv.password_data[id].new_password,
- buf_cp,
- sizeof(bioscfg_drv.password_data[id].new_password));
+ strscpy(bioscfg_drv.password_data[id].new_password, buf_cp);
}
kfree(buf_cp);
@@ -272,8 +268,7 @@ static int hp_populate_password_elements_from_package(union acpi_object *passwor
case VALUE:
break;
case PATH:
- strscpy(password_data->common.path, str_value,
- sizeof(password_data->common.path));
+ strscpy(password_data->common.path, str_value);
break;
case IS_READONLY:
password_data->common.is_readonly = int_value;
@@ -315,9 +310,7 @@ static int hp_populate_password_elements_from_package(union acpi_object *passwor
if (ret)
break;
- strscpy(password_data->common.prerequisites[reqs],
- str_value,
- sizeof(password_data->common.prerequisites[reqs]));
+ strscpy(password_data->common.prerequisites[reqs], str_value);
kfree(str_value);
str_value = NULL;
@@ -359,9 +352,7 @@ static int hp_populate_password_elements_from_package(union acpi_object *passwor
if (ret)
break;
- strscpy(password_data->encodings[pos_values],
- str_value,
- sizeof(password_data->encodings[pos_values]));
+ strscpy(password_data->encodings[pos_values], str_value);
kfree(str_value);
str_value = NULL;
diff --git a/drivers/platform/x86/hp/hp-bioscfg/spmobj-attributes.c b/drivers/platform/x86/hp/hp-bioscfg/spmobj-attributes.c
index 86f90238750c..2b00a14792e9 100644
--- a/drivers/platform/x86/hp/hp-bioscfg/spmobj-attributes.c
+++ b/drivers/platform/x86/hp/hp-bioscfg/spmobj-attributes.c
@@ -365,8 +365,7 @@ int hp_populate_secure_platform_data(struct kobject *attr_name_kobj)
/* Populate data for Secure Platform Management */
bioscfg_drv.spm_data.attr_name_kobj = attr_name_kobj;
- strscpy(bioscfg_drv.spm_data.attribute_name, SPM_STR,
- sizeof(bioscfg_drv.spm_data.attribute_name));
+ strscpy(bioscfg_drv.spm_data.attribute_name, SPM_STR);
bioscfg_drv.spm_data.is_enabled = 0;
bioscfg_drv.spm_data.mechanism = 0;
diff --git a/drivers/platform/x86/hp/hp-bioscfg/string-attributes.c b/drivers/platform/x86/hp/hp-bioscfg/string-attributes.c
index f0c20070094d..27758b779b2d 100644
--- a/drivers/platform/x86/hp/hp-bioscfg/string-attributes.c
+++ b/drivers/platform/x86/hp/hp-bioscfg/string-attributes.c
@@ -50,7 +50,7 @@ static void update_string_value(int instance_id, char *attr_value)
struct string_data *string_data = &bioscfg_drv.string_data[instance_id];
/* Write settings to BIOS */
- strscpy(string_data->current_value, attr_value, sizeof(string_data->current_value));
+ strscpy(string_data->current_value, attr_value);
}
/*
@@ -178,12 +178,10 @@ static int hp_populate_string_elements_from_package(union acpi_object *string_ob
/* Assign appropriate element value to corresponding field*/
switch (eloc) {
case VALUE:
- strscpy(string_data->current_value,
- str_value, sizeof(string_data->current_value));
+ strscpy(string_data->current_value, str_value);
break;
case PATH:
- strscpy(string_data->common.path, str_value,
- sizeof(string_data->common.path));
+ strscpy(string_data->common.path, str_value);
break;
case IS_READONLY:
string_data->common.is_readonly = int_value;
@@ -231,9 +229,7 @@ static int hp_populate_string_elements_from_package(union acpi_object *string_ob
if (ret)
continue;
- strscpy(string_data->common.prerequisites[reqs],
- str_value,
- sizeof(string_data->common.prerequisites[reqs]));
+ strscpy(string_data->common.prerequisites[reqs], str_value);
kfree(str_value);
str_value = NULL;
}
diff --git a/drivers/platform/x86/hp/hp-wmi.c b/drivers/platform/x86/hp/hp-wmi.c
index 5fa553023842..876e0a97cee1 100644
--- a/drivers/platform/x86/hp/hp-wmi.c
+++ b/drivers/platform/x86/hp/hp-wmi.c
@@ -24,6 +24,9 @@
#include <linux/platform_profile.h>
#include <linux/hwmon.h>
#include <linux/acpi.h>
+#include <linux/mutex.h>
+#include <linux/cleanup.h>
+#include <linux/power_supply.h>
#include <linux/rfkill.h>
#include <linux/string.h>
#include <linux/dmi.h>
@@ -42,6 +45,8 @@ MODULE_ALIAS("wmi:5FB7F034-2C63-45E9-BE91-3D44E2C707E4");
#define HP_OMEN_EC_THERMAL_PROFILE_TIMER_OFFSET 0x63
#define HP_OMEN_EC_THERMAL_PROFILE_OFFSET 0x95
+#define ACPI_AC_CLASS "ac_adapter"
+
#define zero_if_sup(tmp) (zero_insize_support?0:sizeof(tmp)) // use when zero insize is required
/* DMI board names of devices that should use the omen specific path for
@@ -259,10 +264,18 @@ static const struct key_entry hp_wmi_keymap[] = {
{ KE_END, 0 }
};
+/*
+ * Mutex for the active_platform_profile variable,
+ * see omen_powersource_event.
+ */
+static DEFINE_MUTEX(active_platform_profile_lock);
+
static struct input_dev *hp_wmi_input_dev;
static struct input_dev *camera_shutter_input_dev;
static struct platform_device *hp_wmi_platform_dev;
static struct platform_profile_handler platform_profile_handler;
+static struct notifier_block platform_power_source_nb;
+static enum platform_profile_option active_platform_profile;
static bool platform_profile_support;
static bool zero_insize_support;
@@ -1194,8 +1207,7 @@ fail:
return err;
}
-static int platform_profile_omen_get(struct platform_profile_handler *pprof,
- enum platform_profile_option *profile)
+static int platform_profile_omen_get_ec(enum platform_profile_option *profile)
{
int tp;
@@ -1223,6 +1235,27 @@ static int platform_profile_omen_get(struct platform_profile_handler *pprof,
return 0;
}
+static int platform_profile_omen_get(struct platform_profile_handler *pprof,
+ enum platform_profile_option *profile)
+{
+ /*
+ * We directly return the stored platform profile, as the embedded
+ * controller will not accept switching to the performance option when
+ * the conditions are not met (e.g. the laptop is not plugged in).
+ *
+ * If we directly return what the EC reports, the platform profile will
+ * immediately "switch back" to normal mode, which is against the
+ * expected behaviour from a userspace point of view, as described in
+ * the Platform Profile Section page of the kernel documentation.
+ *
+ * See also omen_powersource_event.
+ */
+ guard(mutex)(&active_platform_profile_lock);
+ *profile = active_platform_profile;
+
+ return 0;
+}
+
static bool has_omen_thermal_profile_ec_timer(void)
{
const char *board_name = dmi_get_system_info(DMI_BOARD_NAME);
@@ -1245,8 +1278,7 @@ inline int omen_thermal_profile_ec_timer_set(u8 value)
return ec_write(HP_OMEN_EC_THERMAL_PROFILE_TIMER_OFFSET, value);
}
-static int platform_profile_omen_set(struct platform_profile_handler *pprof,
- enum platform_profile_option profile)
+static int platform_profile_omen_set_ec(enum platform_profile_option profile)
{
int err, tp, tp_version;
enum hp_thermal_profile_omen_flags flags = 0;
@@ -1300,6 +1332,22 @@ static int platform_profile_omen_set(struct platform_profile_handler *pprof,
return 0;
}
+static int platform_profile_omen_set(struct platform_profile_handler *pprof,
+ enum platform_profile_option profile)
+{
+ int err;
+
+ guard(mutex)(&active_platform_profile_lock);
+
+ err = platform_profile_omen_set_ec(profile);
+ if (err < 0)
+ return err;
+
+ active_platform_profile = profile;
+
+ return 0;
+}
+
static int thermal_profile_get(void)
{
return hp_wmi_read_int(HPWMI_THERMAL_PROFILE_QUERY);
@@ -1381,8 +1429,7 @@ static bool is_victus_thermal_profile(void)
board_name) >= 0;
}
-static int platform_profile_victus_get(struct platform_profile_handler *pprof,
- enum platform_profile_option *profile)
+static int platform_profile_victus_get_ec(enum platform_profile_option *profile)
{
int tp;
@@ -1407,8 +1454,14 @@ static int platform_profile_victus_get(struct platform_profile_handler *pprof,
return 0;
}
-static int platform_profile_victus_set(struct platform_profile_handler *pprof,
- enum platform_profile_option profile)
+static int platform_profile_victus_get(struct platform_profile_handler *pprof,
+ enum platform_profile_option *profile)
+{
+ /* Same behaviour as platform_profile_omen_get */
+ return platform_profile_omen_get(pprof, profile);
+}
+
+static int platform_profile_victus_set_ec(enum platform_profile_option profile)
{
int err, tp;
@@ -1433,21 +1486,113 @@ static int platform_profile_victus_set(struct platform_profile_handler *pprof,
return 0;
}
+static int platform_profile_victus_set(struct platform_profile_handler *pprof,
+ enum platform_profile_option profile)
+{
+ int err;
+
+ guard(mutex)(&active_platform_profile_lock);
+
+ err = platform_profile_victus_set_ec(profile);
+ if (err < 0)
+ return err;
+
+ active_platform_profile = profile;
+
+ return 0;
+}
+
+static int omen_powersource_event(struct notifier_block *nb,
+ unsigned long value,
+ void *data)
+{
+ struct acpi_bus_event *event_entry = data;
+ enum platform_profile_option actual_profile;
+ int err;
+
+ if (strcmp(event_entry->device_class, ACPI_AC_CLASS) != 0)
+ return NOTIFY_DONE;
+
+ pr_debug("Received power source device event\n");
+
+ guard(mutex)(&active_platform_profile_lock);
+
+ /*
+ * This handler can only be called on Omen and Victus models, so
+ * there's no need to call is_victus_thermal_profile() here.
+ */
+ if (is_omen_thermal_profile())
+ err = platform_profile_omen_get_ec(&actual_profile);
+ else
+ err = platform_profile_victus_get_ec(&actual_profile);
+
+ if (err < 0) {
+ /*
+ * Although we failed to get the current platform profile, we
+ * still want the other event consumers to process it.
+ */
+ pr_warn("Failed to read current platform profile (%d)\n", err);
+ return NOTIFY_DONE;
+ }
+
+ /*
+ * If we're back on AC and that the user-chosen power profile is
+ * different from what the EC reports, we restore the user-chosen
+ * one.
+ */
+ if (power_supply_is_system_supplied() <= 0 ||
+ active_platform_profile == actual_profile) {
+ pr_debug("Platform profile update skipped, conditions unmet\n");
+ return NOTIFY_DONE;
+ }
+
+ if (is_omen_thermal_profile())
+ err = platform_profile_omen_set_ec(active_platform_profile);
+ else
+ err = platform_profile_victus_set_ec(active_platform_profile);
+
+ if (err < 0) {
+ pr_warn("Failed to restore platform profile (%d)\n", err);
+ return NOTIFY_DONE;
+ }
+
+ return NOTIFY_OK;
+}
+
+static int omen_register_powersource_event_handler(void)
+{
+ int err;
+
+ platform_power_source_nb.notifier_call = omen_powersource_event;
+ err = register_acpi_notifier(&platform_power_source_nb);
+
+ if (err < 0) {
+ pr_warn("Failed to install ACPI power source notify handler\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static inline void omen_unregister_powersource_event_handler(void)
+{
+ unregister_acpi_notifier(&platform_power_source_nb);
+}
+
static int thermal_profile_setup(void)
{
int err, tp;
if (is_omen_thermal_profile()) {
- tp = omen_thermal_profile_get();
- if (tp < 0)
- return tp;
+ err = platform_profile_omen_get_ec(&active_platform_profile);
+ if (err < 0)
+ return err;
/*
* call thermal profile write command to ensure that the
* firmware correctly sets the OEM variables
*/
-
- err = omen_thermal_profile_set(tp);
+ err = platform_profile_omen_set_ec(active_platform_profile);
if (err < 0)
return err;
@@ -1456,15 +1601,15 @@ static int thermal_profile_setup(void)
set_bit(PLATFORM_PROFILE_COOL, platform_profile_handler.choices);
} else if (is_victus_thermal_profile()) {
- tp = omen_thermal_profile_get();
- if (tp < 0)
- return tp;
+ err = platform_profile_victus_get_ec(&active_platform_profile);
+ if (err < 0)
+ return err;
/*
* call thermal profile write command to ensure that the
* firmware correctly sets the OEM variables
*/
- err = omen_thermal_profile_set(tp);
+ err = platform_profile_victus_set_ec(active_platform_profile);
if (err < 0)
return err;
@@ -1758,6 +1903,12 @@ static int __init hp_wmi_init(void)
goto err_unregister_device;
}
+ if (is_omen_thermal_profile() || is_victus_thermal_profile()) {
+ err = omen_register_powersource_event_handler();
+ if (err)
+ goto err_unregister_device;
+ }
+
return 0;
err_unregister_device:
@@ -1772,6 +1923,9 @@ module_init(hp_wmi_init);
static void __exit hp_wmi_exit(void)
{
+ if (is_omen_thermal_profile() || is_victus_thermal_profile())
+ omen_unregister_powersource_event_handler();
+
if (wmi_has_guid(HPWMI_EVENT_GUID))
hp_wmi_input_destroy();
diff --git a/drivers/platform/x86/ibm_rtl.c b/drivers/platform/x86/ibm_rtl.c
index 1d4bbae115f1..231b37909801 100644
--- a/drivers/platform/x86/ibm_rtl.c
+++ b/drivers/platform/x86/ibm_rtl.c
@@ -29,6 +29,7 @@ static bool debug;
module_param(debug, bool, 0644);
MODULE_PARM_DESC(debug, "Show debug output");
+MODULE_DESCRIPTION("IBM Premium Real Time Mode (PRTM) driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Keith Mannthey <kmmanth@us.ibm.com>");
MODULE_AUTHOR("Vernon Mauery <vernux@us.ibm.com>");
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index fcf13d88fd6e..1ace711f7442 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -13,6 +13,7 @@
#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/bug.h>
+#include <linux/cleanup.h>
#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/dmi.h>
@@ -204,7 +205,7 @@ static int ideapad_shared_init(struct ideapad_private *priv)
{
int ret;
- mutex_lock(&ideapad_shared_mutex);
+ guard(mutex)(&ideapad_shared_mutex);
if (!ideapad_shared) {
ideapad_shared = priv;
@@ -214,19 +215,15 @@ static int ideapad_shared_init(struct ideapad_private *priv)
ret = -EINVAL;
}
- mutex_unlock(&ideapad_shared_mutex);
-
return ret;
}
static void ideapad_shared_exit(struct ideapad_private *priv)
{
- mutex_lock(&ideapad_shared_mutex);
+ guard(mutex)(&ideapad_shared_mutex);
if (ideapad_shared == priv)
ideapad_shared = NULL;
-
- mutex_unlock(&ideapad_shared_mutex);
}
/*
@@ -840,36 +837,33 @@ static int dytc_profile_set(struct platform_profile_handler *pprof,
unsigned long output;
int err;
- err = mutex_lock_interruptible(&dytc->mutex);
- if (err)
- return err;
-
- if (profile == PLATFORM_PROFILE_BALANCED) {
- /* To get back to balanced mode we just issue a reset command */
- err = eval_dytc(priv->adev->handle, DYTC_CMD_RESET, NULL);
- if (err)
- goto unlock;
- } else {
- int perfmode;
-
- err = convert_profile_to_dytc(profile, &perfmode);
- if (err)
- goto unlock;
+ scoped_guard(mutex_intr, &dytc->mutex) {
+ if (profile == PLATFORM_PROFILE_BALANCED) {
+ /* To get back to balanced mode we just issue a reset command */
+ err = eval_dytc(priv->adev->handle, DYTC_CMD_RESET, NULL);
+ if (err)
+ return err;
+ } else {
+ int perfmode;
+
+ err = convert_profile_to_dytc(profile, &perfmode);
+ if (err)
+ return err;
+
+ /* Determine if we are in CQL mode. This alters the commands we do */
+ err = dytc_cql_command(priv,
+ DYTC_SET_COMMAND(DYTC_FUNCTION_MMC, perfmode, 1),
+ &output);
+ if (err)
+ return err;
+ }
- /* Determine if we are in CQL mode. This alters the commands we do */
- err = dytc_cql_command(priv, DYTC_SET_COMMAND(DYTC_FUNCTION_MMC, perfmode, 1),
- &output);
- if (err)
- goto unlock;
+ /* Success - update current profile */
+ dytc->current_profile = profile;
+ return 0;
}
- /* Success - update current profile */
- dytc->current_profile = profile;
-
-unlock:
- mutex_unlock(&dytc->mutex);
-
- return err;
+ return -EINTR;
}
static void dytc_profile_refresh(struct ideapad_private *priv)
@@ -878,9 +872,8 @@ static void dytc_profile_refresh(struct ideapad_private *priv)
unsigned long output;
int err, perfmode;
- mutex_lock(&priv->dytc->mutex);
- err = dytc_cql_command(priv, DYTC_CMD_GET, &output);
- mutex_unlock(&priv->dytc->mutex);
+ scoped_guard(mutex, &priv->dytc->mutex)
+ err = dytc_cql_command(priv, DYTC_CMD_GET, &output);
if (err)
return;
@@ -1809,11 +1802,11 @@ static void ideapad_wmi_notify(struct wmi_device *wdev, union acpi_object *data)
struct ideapad_wmi_private *wpriv = dev_get_drvdata(&wdev->dev);
struct ideapad_private *priv;
- mutex_lock(&ideapad_shared_mutex);
+ guard(mutex)(&ideapad_shared_mutex);
priv = ideapad_shared;
if (!priv)
- goto unlock;
+ return;
switch (wpriv->event) {
case IDEAPAD_WMI_EVENT_ESC:
@@ -1847,8 +1840,6 @@ static void ideapad_wmi_notify(struct wmi_device *wdev, union acpi_object *data)
break;
}
-unlock:
- mutex_unlock(&ideapad_shared_mutex);
}
static const struct ideapad_wmi_private ideapad_wmi_context_esc = {
diff --git a/drivers/platform/x86/intel/Kconfig b/drivers/platform/x86/intel/Kconfig
index e9dc0c021029..ad50bbabec61 100644
--- a/drivers/platform/x86/intel/Kconfig
+++ b/drivers/platform/x86/intel/Kconfig
@@ -192,10 +192,14 @@ config INTEL_SMARTCONNECT
This driver checks to determine whether the device has Intel Smart
Connect enabled, and if so disables it.
+config INTEL_TPMI_POWER_DOMAINS
+ tristate
+
config INTEL_TPMI
tristate "Intel Topology Aware Register and PM Capsule Interface (TPMI)"
depends on INTEL_VSEC
depends on X86_64
+ select INTEL_TPMI_POWER_DOMAINS
help
The Intel Topology Aware Register and PM Capsule Interface (TPMI),
provides enumerable MMIO interface for power management features.
@@ -205,6 +209,13 @@ config INTEL_TPMI
To compile this driver as a module, choose M here: the module will
be called intel_vsec_tpmi.
+config INTEL_PLR_TPMI
+ tristate "Intel SoC TPMI Power Limit Reasons driver"
+ depends on INTEL_TPMI
+ help
+ This driver provides the TPMI power limit reasons status information
+ via debugfs files.
+
config INTEL_TURBO_MAX_3
bool "Intel Turbo Boost Max Technology 3.0 enumeration driver"
depends on X86_64 && SCHED_MC_PRIO
diff --git a/drivers/platform/x86/intel/Makefile b/drivers/platform/x86/intel/Makefile
index c1d5fe05e3f3..74db065c82d6 100644
--- a/drivers/platform/x86/intel/Makefile
+++ b/drivers/platform/x86/intel/Makefile
@@ -52,6 +52,10 @@ obj-$(CONFIG_INTEL_PUNIT_IPC) += intel_punit_ipc.o
# TPMI drivers
intel_vsec_tpmi-y := tpmi.o
obj-$(CONFIG_INTEL_TPMI) += intel_vsec_tpmi.o
+obj-$(CONFIG_INTEL_PLR_TPMI) += intel_plr_tpmi.o
+
+intel_tpmi_power_domains-y := tpmi_power_domains.o
+obj-$(CONFIG_INTEL_TPMI_POWER_DOMAINS) += intel_tpmi_power_domains.o
# Intel Uncore drivers
intel-rst-y := rst.o
diff --git a/drivers/platform/x86/intel/chtwc_int33fe.c b/drivers/platform/x86/intel/chtwc_int33fe.c
index 93f75ba1dafd..11503b1c85f3 100644
--- a/drivers/platform/x86/intel/chtwc_int33fe.c
+++ b/drivers/platform/x86/intel/chtwc_int33fe.c
@@ -270,7 +270,7 @@ cht_int33fe_register_max17047(struct device *dev, struct cht_int33fe_data *data)
}
memset(&board_info, 0, sizeof(board_info));
- strscpy(board_info.type, "max17047", I2C_NAME_SIZE);
+ strscpy(board_info.type, "max17047");
board_info.dev_name = "max17047";
board_info.fwnode = fwnode;
data->battery_fg = i2c_acpi_new_device(dev, 1, &board_info);
@@ -361,7 +361,7 @@ static int cht_int33fe_typec_probe(struct platform_device *pdev)
}
memset(&board_info, 0, sizeof(board_info));
- strscpy(board_info.type, "typec_fusb302", I2C_NAME_SIZE);
+ strscpy(board_info.type, "typec_fusb302");
board_info.dev_name = "fusb302";
board_info.fwnode = fwnode;
board_info.irq = fusb302_irq;
@@ -381,7 +381,7 @@ static int cht_int33fe_typec_probe(struct platform_device *pdev)
memset(&board_info, 0, sizeof(board_info));
board_info.dev_name = "pi3usb30532";
board_info.fwnode = fwnode;
- strscpy(board_info.type, "pi3usb30532", I2C_NAME_SIZE);
+ strscpy(board_info.type, "pi3usb30532");
data->pi3usb30532 = i2c_acpi_new_device(dev, 3, &board_info);
if (IS_ERR(data->pi3usb30532)) {
diff --git a/drivers/platform/x86/intel/hid.c b/drivers/platform/x86/intel/hid.c
index c7a827645864..10cd65497cc1 100644
--- a/drivers/platform/x86/intel/hid.c
+++ b/drivers/platform/x86/intel/hid.c
@@ -38,6 +38,7 @@ MODULE_PARM_DESC(enable_sw_tablet_mode,
/* When NOT in tablet mode, VGBS returns with the flag 0x40 */
#define TABLET_MODE_FLAG BIT(6)
+MODULE_DESCRIPTION("Intel HID Event hotkey driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Alex Hung");
diff --git a/drivers/platform/x86/intel/ifs/core.c b/drivers/platform/x86/intel/ifs/core.c
index 7b11198d85a1..33412a584836 100644
--- a/drivers/platform/x86/intel/ifs/core.c
+++ b/drivers/platform/x86/intel/ifs/core.c
@@ -11,16 +11,15 @@
#include "ifs.h"
-#define X86_MATCH(model, array_gen) \
- X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, \
- INTEL_FAM6_##model, X86_FEATURE_CORE_CAPABILITIES, array_gen)
+#define X86_MATCH(vfm, array_gen) \
+ X86_MATCH_VFM_FEATURE(vfm, X86_FEATURE_CORE_CAPABILITIES, array_gen)
static const struct x86_cpu_id ifs_cpu_ids[] __initconst = {
- X86_MATCH(SAPPHIRERAPIDS_X, ARRAY_GEN0),
- X86_MATCH(EMERALDRAPIDS_X, ARRAY_GEN0),
- X86_MATCH(GRANITERAPIDS_X, ARRAY_GEN0),
- X86_MATCH(GRANITERAPIDS_D, ARRAY_GEN0),
- X86_MATCH(ATOM_CRESTMONT_X, ARRAY_GEN1),
+ X86_MATCH(INTEL_SAPPHIRERAPIDS_X, ARRAY_GEN0),
+ X86_MATCH(INTEL_EMERALDRAPIDS_X, ARRAY_GEN0),
+ X86_MATCH(INTEL_GRANITERAPIDS_X, ARRAY_GEN0),
+ X86_MATCH(INTEL_GRANITERAPIDS_D, ARRAY_GEN0),
+ X86_MATCH(INTEL_ATOM_CRESTMONT_X, ARRAY_GEN1),
{}
};
MODULE_DEVICE_TABLE(x86cpu, ifs_cpu_ids);
diff --git a/drivers/platform/x86/intel/intel_plr_tpmi.c b/drivers/platform/x86/intel/intel_plr_tpmi.c
new file mode 100644
index 000000000000..69ace6a629bc
--- /dev/null
+++ b/drivers/platform/x86/intel/intel_plr_tpmi.c
@@ -0,0 +1,354 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Performance Limit Reasons via TPMI
+ *
+ * Copyright (c) 2024, Intel Corporation.
+ */
+
+#include <linux/array_size.h>
+#include <linux/auxiliary_bus.h>
+#include <linux/bitfield.h>
+#include <linux/bitmap.h>
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/gfp_types.h>
+#include <linux/intel_tpmi.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kstrtox.h>
+#include <linux/lockdep.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/mutex.h>
+#include <linux/seq_file.h>
+#include <linux/sprintf.h>
+#include <linux/types.h>
+
+#include "tpmi_power_domains.h"
+
+#define PLR_HEADER 0x00
+#define PLR_MAILBOX_INTERFACE 0x08
+#define PLR_MAILBOX_DATA 0x10
+#define PLR_DIE_LEVEL 0x18
+
+#define PLR_MODULE_ID_MASK GENMASK_ULL(19, 12)
+#define PLR_RUN_BUSY BIT_ULL(63)
+
+#define PLR_COMMAND_WRITE 1
+
+#define PLR_INVALID GENMASK_ULL(63, 0)
+
+#define PLR_TIMEOUT_US 5
+#define PLR_TIMEOUT_MAX_US 1000
+
+#define PLR_COARSE_REASON_BITS 32
+
+struct tpmi_plr;
+
+struct tpmi_plr_die {
+ void __iomem *base;
+ struct mutex lock; /* Protect access to PLR mailbox */
+ int package_id;
+ int die_id;
+ struct tpmi_plr *plr;
+};
+
+struct tpmi_plr {
+ struct dentry *dbgfs_dir;
+ struct tpmi_plr_die *die_info;
+ int num_dies;
+ struct auxiliary_device *auxdev;
+};
+
+static const char * const plr_coarse_reasons[] = {
+ "FREQUENCY",
+ "CURRENT",
+ "POWER",
+ "THERMAL",
+ "PLATFORM",
+ "MCP",
+ "RAS",
+ "MISC",
+ "QOS",
+ "DFC",
+};
+
+static const char * const plr_fine_reasons[] = {
+ "FREQUENCY_CDYN0",
+ "FREQUENCY_CDYN1",
+ "FREQUENCY_CDYN2",
+ "FREQUENCY_CDYN3",
+ "FREQUENCY_CDYN4",
+ "FREQUENCY_CDYN5",
+ "FREQUENCY_FCT",
+ "FREQUENCY_PCS_TRL",
+ "CURRENT_MTPMAX",
+ "POWER_FAST_RAPL",
+ "POWER_PKG_PL1_MSR_TPMI",
+ "POWER_PKG_PL1_MMIO",
+ "POWER_PKG_PL1_PCS",
+ "POWER_PKG_PL2_MSR_TPMI",
+ "POWER_PKG_PL2_MMIO",
+ "POWER_PKG_PL2_PCS",
+ "POWER_PLATFORM_PL1_MSR_TPMI",
+ "POWER_PLATFORM_PL1_MMIO",
+ "POWER_PLATFORM_PL1_PCS",
+ "POWER_PLATFORM_PL2_MSR_TPMI",
+ "POWER_PLATFORM_PL2_MMIO",
+ "POWER_PLATFORM_PL2_PCS",
+ "UNKNOWN(22)",
+ "THERMAL_PER_CORE",
+ "DFC_UFS",
+ "PLATFORM_PROCHOT",
+ "PLATFORM_HOT_VR",
+ "UNKNOWN(27)",
+ "UNKNOWN(28)",
+ "MISC_PCS_PSTATE",
+};
+
+static u64 plr_read(struct tpmi_plr_die *plr_die, int offset)
+{
+ return readq(plr_die->base + offset);
+}
+
+static void plr_write(u64 val, struct tpmi_plr_die *plr_die, int offset)
+{
+ writeq(val, plr_die->base + offset);
+}
+
+static int plr_read_cpu_status(struct tpmi_plr_die *plr_die, int cpu,
+ u64 *status)
+{
+ u64 regval;
+ int ret;
+
+ lockdep_assert_held(&plr_die->lock);
+
+ regval = FIELD_PREP(PLR_MODULE_ID_MASK, tpmi_get_punit_core_number(cpu));
+ regval |= PLR_RUN_BUSY;
+
+ plr_write(regval, plr_die, PLR_MAILBOX_INTERFACE);
+
+ ret = readq_poll_timeout(plr_die->base + PLR_MAILBOX_INTERFACE, regval,
+ !(regval & PLR_RUN_BUSY), PLR_TIMEOUT_US,
+ PLR_TIMEOUT_MAX_US);
+ if (ret)
+ return ret;
+
+ *status = plr_read(plr_die, PLR_MAILBOX_DATA);
+
+ return 0;
+}
+
+static int plr_clear_cpu_status(struct tpmi_plr_die *plr_die, int cpu)
+{
+ u64 regval;
+
+ lockdep_assert_held(&plr_die->lock);
+
+ regval = FIELD_PREP(PLR_MODULE_ID_MASK, tpmi_get_punit_core_number(cpu));
+ regval |= PLR_RUN_BUSY | PLR_COMMAND_WRITE;
+
+ plr_write(0, plr_die, PLR_MAILBOX_DATA);
+
+ plr_write(regval, plr_die, PLR_MAILBOX_INTERFACE);
+
+ return readq_poll_timeout(plr_die->base + PLR_MAILBOX_INTERFACE, regval,
+ !(regval & PLR_RUN_BUSY), PLR_TIMEOUT_US,
+ PLR_TIMEOUT_MAX_US);
+}
+
+static void plr_print_bits(struct seq_file *s, u64 val, int bits)
+{
+ const unsigned long mask[] = { BITMAP_FROM_U64(val) };
+ int bit, index;
+
+ for_each_set_bit(bit, mask, bits) {
+ const char *str = NULL;
+
+ if (bit < PLR_COARSE_REASON_BITS) {
+ if (bit < ARRAY_SIZE(plr_coarse_reasons))
+ str = plr_coarse_reasons[bit];
+ } else {
+ index = bit - PLR_COARSE_REASON_BITS;
+ if (index < ARRAY_SIZE(plr_fine_reasons))
+ str = plr_fine_reasons[index];
+ }
+
+ if (str)
+ seq_printf(s, " %s", str);
+ else
+ seq_printf(s, " UNKNOWN(%d)", bit);
+ }
+
+ if (!val)
+ seq_puts(s, " none");
+
+ seq_putc(s, '\n');
+}
+
+static int plr_status_show(struct seq_file *s, void *unused)
+{
+ struct tpmi_plr_die *plr_die = s->private;
+ int ret;
+ u64 val;
+
+ val = plr_read(plr_die, PLR_DIE_LEVEL);
+ seq_puts(s, "cpus");
+ plr_print_bits(s, val, 32);
+
+ guard(mutex)(&plr_die->lock);
+
+ for (int cpu = 0; cpu < nr_cpu_ids; cpu++) {
+ if (plr_die->die_id != tpmi_get_power_domain_id(cpu))
+ continue;
+
+ if (plr_die->package_id != topology_physical_package_id(cpu))
+ continue;
+
+ seq_printf(s, "cpu%d", cpu);
+ ret = plr_read_cpu_status(plr_die, cpu, &val);
+ if (ret) {
+ dev_err(&plr_die->plr->auxdev->dev, "Failed to read PLR for cpu %d, ret=%d\n",
+ cpu, ret);
+ return ret;
+ }
+
+ plr_print_bits(s, val, 64);
+ }
+
+ return 0;
+}
+
+static ssize_t plr_status_write(struct file *filp, const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *s = filp->private_data;
+ struct tpmi_plr_die *plr_die = s->private;
+ bool val;
+ int ret;
+
+ ret = kstrtobool_from_user(ubuf, count, &val);
+ if (ret)
+ return ret;
+
+ if (val != 0)
+ return -EINVAL;
+
+ plr_write(0, plr_die, PLR_DIE_LEVEL);
+
+ guard(mutex)(&plr_die->lock);
+
+ for (int cpu = 0; cpu < nr_cpu_ids; cpu++) {
+ if (plr_die->die_id != tpmi_get_power_domain_id(cpu))
+ continue;
+
+ if (plr_die->package_id != topology_physical_package_id(cpu))
+ continue;
+
+ plr_clear_cpu_status(plr_die, cpu);
+ }
+
+ return count;
+}
+DEFINE_SHOW_STORE_ATTRIBUTE(plr_status);
+
+static int intel_plr_probe(struct auxiliary_device *auxdev, const struct auxiliary_device_id *id)
+{
+ struct intel_tpmi_plat_info *plat_info;
+ struct dentry *dentry;
+ int i, num_resources;
+ struct resource *res;
+ struct tpmi_plr *plr;
+ void __iomem *base;
+ char name[16];
+ int err;
+
+ plat_info = tpmi_get_platform_data(auxdev);
+ if (!plat_info)
+ return dev_err_probe(&auxdev->dev, -EINVAL, "No platform info\n");
+
+ dentry = tpmi_get_debugfs_dir(auxdev);
+ if (!dentry)
+ return dev_err_probe(&auxdev->dev, -ENODEV, "No TPMI debugfs directory.\n");
+
+ num_resources = tpmi_get_resource_count(auxdev);
+ if (!num_resources)
+ return -EINVAL;
+
+ plr = devm_kzalloc(&auxdev->dev, sizeof(*plr), GFP_KERNEL);
+ if (!plr)
+ return -ENOMEM;
+
+ plr->die_info = devm_kcalloc(&auxdev->dev, num_resources, sizeof(*plr->die_info),
+ GFP_KERNEL);
+ if (!plr->die_info)
+ return -ENOMEM;
+
+ plr->num_dies = num_resources;
+ plr->dbgfs_dir = debugfs_create_dir("plr", dentry);
+ plr->auxdev = auxdev;
+
+ for (i = 0; i < num_resources; i++) {
+ res = tpmi_get_resource_at_index(auxdev, i);
+ if (!res) {
+ err = dev_err_probe(&auxdev->dev, -EINVAL, "No resource\n");
+ goto err;
+ }
+
+ base = devm_ioremap_resource(&auxdev->dev, res);
+ if (IS_ERR(base)) {
+ err = PTR_ERR(base);
+ goto err;
+ }
+
+ plr->die_info[i].base = base;
+ plr->die_info[i].package_id = plat_info->package_id;
+ plr->die_info[i].die_id = i;
+ plr->die_info[i].plr = plr;
+ mutex_init(&plr->die_info[i].lock);
+
+ if (plr_read(&plr->die_info[i], PLR_HEADER) == PLR_INVALID)
+ continue;
+
+ snprintf(name, sizeof(name), "domain%d", i);
+
+ dentry = debugfs_create_dir(name, plr->dbgfs_dir);
+ debugfs_create_file("status", 0444, dentry, &plr->die_info[i],
+ &plr_status_fops);
+ }
+
+ auxiliary_set_drvdata(auxdev, plr);
+
+ return 0;
+
+err:
+ debugfs_remove_recursive(plr->dbgfs_dir);
+ return err;
+}
+
+static void intel_plr_remove(struct auxiliary_device *auxdev)
+{
+ struct tpmi_plr *plr = auxiliary_get_drvdata(auxdev);
+
+ debugfs_remove_recursive(plr->dbgfs_dir);
+}
+
+static const struct auxiliary_device_id intel_plr_id_table[] = {
+ { .name = "intel_vsec.tpmi-plr" },
+ {}
+};
+MODULE_DEVICE_TABLE(auxiliary, intel_plr_id_table);
+
+static struct auxiliary_driver intel_plr_aux_driver = {
+ .id_table = intel_plr_id_table,
+ .remove = intel_plr_remove,
+ .probe = intel_plr_probe,
+};
+module_auxiliary_driver(intel_plr_aux_driver);
+
+MODULE_IMPORT_NS(INTEL_TPMI);
+MODULE_IMPORT_NS(INTEL_TPMI_POWER_DOMAIN);
+MODULE_DESCRIPTION("Intel TPMI PLR Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/intel/pmc/core.c b/drivers/platform/x86/intel/pmc/core.c
index 2ad2f8753e5d..01ae71c6df59 100644
--- a/drivers/platform/x86/intel/pmc/core.c
+++ b/drivers/platform/x86/intel/pmc/core.c
@@ -87,35 +87,26 @@ static int set_etr3(struct pmc_dev *pmcdev)
struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
const struct pmc_reg_map *map = pmc->map;
u32 reg;
- int err;
if (!map->etr3_offset)
return -EOPNOTSUPP;
- mutex_lock(&pmcdev->lock);
+ guard(mutex)(&pmcdev->lock);
/* check if CF9 is locked */
reg = pmc_core_reg_read(pmc, map->etr3_offset);
- if (reg & ETR3_CF9LOCK) {
- err = -EACCES;
- goto out_unlock;
- }
+ if (reg & ETR3_CF9LOCK)
+ return -EACCES;
/* write CF9 global reset bit */
reg |= ETR3_CF9GR;
pmc_core_reg_write(pmc, map->etr3_offset, reg);
reg = pmc_core_reg_read(pmc, map->etr3_offset);
- if (!(reg & ETR3_CF9GR)) {
- err = -EIO;
- goto out_unlock;
- }
-
- err = 0;
+ if (!(reg & ETR3_CF9GR))
+ return -EIO;
-out_unlock:
- mutex_unlock(&pmcdev->lock);
- return err;
+ return 0;
}
static umode_t etr3_is_visible(struct kobject *kobj,
struct attribute *attr,
@@ -127,9 +118,8 @@ static umode_t etr3_is_visible(struct kobject *kobj,
const struct pmc_reg_map *map = pmc->map;
u32 reg;
- mutex_lock(&pmcdev->lock);
- reg = pmc_core_reg_read(pmc, map->etr3_offset);
- mutex_unlock(&pmcdev->lock);
+ scoped_guard(mutex, &pmcdev->lock)
+ reg = pmc_core_reg_read(pmc, map->etr3_offset);
return reg & ETR3_CF9LOCK ? attr->mode & (SYSFS_PREALLOC | 0444) : attr->mode;
}
@@ -145,12 +135,10 @@ static ssize_t etr3_show(struct device *dev,
if (!map->etr3_offset)
return -EOPNOTSUPP;
- mutex_lock(&pmcdev->lock);
-
- reg = pmc_core_reg_read(pmc, map->etr3_offset);
- reg &= ETR3_CF9GR | ETR3_CF9LOCK;
-
- mutex_unlock(&pmcdev->lock);
+ scoped_guard(mutex, &pmcdev->lock) {
+ reg = pmc_core_reg_read(pmc, map->etr3_offset);
+ reg &= ETR3_CF9GR | ETR3_CF9LOCK;
+ }
return sysfs_emit(buf, "0x%08x", reg);
}
@@ -257,9 +245,9 @@ static void pmc_core_slps0_display(struct pmc *pmc, struct device *dev,
}
}
-static int pmc_core_lpm_get_arr_size(const struct pmc_bit_map **maps)
+static unsigned int pmc_core_lpm_get_arr_size(const struct pmc_bit_map **maps)
{
- int idx;
+ unsigned int idx;
for (idx = 0; maps[idx]; idx++)
;/* Nothing */
@@ -272,8 +260,8 @@ static void pmc_core_lpm_display(struct pmc *pmc, struct device *dev,
const char *str,
const struct pmc_bit_map **maps)
{
- int index, idx, len = 32, bit_mask, arr_size;
- u32 *lpm_regs;
+ unsigned int index, idx, len = 32, arr_size;
+ u32 bit_mask, *lpm_regs;
arr_size = pmc_core_lpm_get_arr_size(maps);
lpm_regs = kmalloc_array(arr_size, sizeof(*lpm_regs), GFP_KERNEL);
@@ -326,13 +314,13 @@ static void pmc_core_display_map(struct seq_file *s, int index, int idx, int ip,
static int pmc_core_ppfear_show(struct seq_file *s, void *unused)
{
struct pmc_dev *pmcdev = s->private;
- int i;
+ unsigned int i;
for (i = 0; i < ARRAY_SIZE(pmcdev->pmcs); ++i) {
struct pmc *pmc = pmcdev->pmcs[i];
const struct pmc_bit_map **maps;
u8 pf_regs[PPFEAR_MAX_NUM_ENTRIES];
- int index, iter, idx, ip = 0;
+ unsigned int index, iter, idx, ip = 0;
if (!pmc)
continue;
@@ -391,7 +379,8 @@ static int pmc_core_mphy_pg_show(struct seq_file *s, void *unused)
const struct pmc_bit_map *map = pmc->map->mphy_sts;
u32 mphy_core_reg_low, mphy_core_reg_high;
u32 val_low, val_high;
- int index, err = 0;
+ unsigned int index;
+ int err = 0;
if (pmcdev->pmc_xram_read_bit) {
seq_puts(s, "Access denied: please disable PMC_READ_DISABLE setting in BIOS.");
@@ -401,20 +390,18 @@ static int pmc_core_mphy_pg_show(struct seq_file *s, void *unused)
mphy_core_reg_low = (SPT_PMC_MPHY_CORE_STS_0 << 16);
mphy_core_reg_high = (SPT_PMC_MPHY_CORE_STS_1 << 16);
- mutex_lock(&pmcdev->lock);
+ guard(mutex)(&pmcdev->lock);
- if (pmc_core_send_msg(pmc, &mphy_core_reg_low) != 0) {
- err = -EBUSY;
- goto out_unlock;
- }
+ err = pmc_core_send_msg(pmc, &mphy_core_reg_low);
+ if (err)
+ return err;
msleep(10);
val_low = pmc_core_reg_read(pmc, SPT_PMC_MFPMC_OFFSET);
- if (pmc_core_send_msg(pmc, &mphy_core_reg_high) != 0) {
- err = -EBUSY;
- goto out_unlock;
- }
+ err = pmc_core_send_msg(pmc, &mphy_core_reg_high);
+ if (err)
+ return err;
msleep(10);
val_high = pmc_core_reg_read(pmc, SPT_PMC_MFPMC_OFFSET);
@@ -433,9 +420,7 @@ static int pmc_core_mphy_pg_show(struct seq_file *s, void *unused)
"Power gated");
}
-out_unlock:
- mutex_unlock(&pmcdev->lock);
- return err;
+ return 0;
}
DEFINE_SHOW_ATTRIBUTE(pmc_core_mphy_pg);
@@ -445,7 +430,8 @@ static int pmc_core_pll_show(struct seq_file *s, void *unused)
struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
const struct pmc_bit_map *map = pmc->map->pll_sts;
u32 mphy_common_reg, val;
- int index, err = 0;
+ unsigned int index;
+ int err = 0;
if (pmcdev->pmc_xram_read_bit) {
seq_puts(s, "Access denied: please disable PMC_READ_DISABLE setting in BIOS.");
@@ -453,12 +439,11 @@ static int pmc_core_pll_show(struct seq_file *s, void *unused)
}
mphy_common_reg = (SPT_PMC_MPHY_COM_STS_0 << 16);
- mutex_lock(&pmcdev->lock);
+ guard(mutex)(&pmcdev->lock);
- if (pmc_core_send_msg(pmc, &mphy_common_reg) != 0) {
- err = -EBUSY;
- goto out_unlock;
- }
+ err = pmc_core_send_msg(pmc, &mphy_common_reg);
+ if (err)
+ return err;
/* Observed PMC HW response latency for MTPMC-MFPMC is ~10 ms */
msleep(10);
@@ -470,9 +455,7 @@ static int pmc_core_pll_show(struct seq_file *s, void *unused)
map[index].bit_mask & val ? "Active" : "Idle");
}
-out_unlock:
- mutex_unlock(&pmcdev->lock);
- return err;
+ return 0;
}
DEFINE_SHOW_ATTRIBUTE(pmc_core_pll);
@@ -481,7 +464,8 @@ int pmc_core_send_ltr_ignore(struct pmc_dev *pmcdev, u32 value, int ignore)
struct pmc *pmc;
const struct pmc_reg_map *map;
u32 reg;
- int pmc_index, ltr_index;
+ unsigned int pmc_index;
+ int ltr_index;
ltr_index = value;
/* For platforms with multiple pmcs, ltr index value given by user
@@ -511,7 +495,7 @@ int pmc_core_send_ltr_ignore(struct pmc_dev *pmcdev, u32 value, int ignore)
pr_debug("ltr_ignore for pmc%d: ltr_index:%d\n", pmc_index, ltr_index);
- mutex_lock(&pmcdev->lock);
+ guard(mutex)(&pmcdev->lock);
reg = pmc_core_reg_read(pmc, map->ltr_ignore_offset);
if (ignore)
@@ -520,48 +504,56 @@ int pmc_core_send_ltr_ignore(struct pmc_dev *pmcdev, u32 value, int ignore)
reg &= ~BIT(ltr_index);
pmc_core_reg_write(pmc, map->ltr_ignore_offset, reg);
- mutex_unlock(&pmcdev->lock);
-
return 0;
}
-static ssize_t pmc_core_ltr_ignore_write(struct file *file,
- const char __user *userbuf,
- size_t count, loff_t *ppos)
+static ssize_t pmc_core_ltr_write(struct pmc_dev *pmcdev,
+ const char __user *userbuf,
+ size_t count, int ignore)
{
- struct seq_file *s = file->private_data;
- struct pmc_dev *pmcdev = s->private;
- u32 buf_size, value;
+ u32 value;
int err;
- buf_size = min_t(u32, count, 64);
-
- err = kstrtou32_from_user(userbuf, buf_size, 10, &value);
+ err = kstrtou32_from_user(userbuf, count, 10, &value);
if (err)
return err;
- err = pmc_core_send_ltr_ignore(pmcdev, value, 1);
+ err = pmc_core_send_ltr_ignore(pmcdev, value, ignore);
+
+ return err ?: count;
+}
+
+static ssize_t pmc_core_ltr_ignore_write(struct file *file,
+ const char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct pmc_dev *pmcdev = s->private;
- return err == 0 ? count : err;
+ return pmc_core_ltr_write(pmcdev, userbuf, count, 1);
}
static int pmc_core_ltr_ignore_show(struct seq_file *s, void *unused)
{
return 0;
}
+DEFINE_SHOW_STORE_ATTRIBUTE(pmc_core_ltr_ignore);
-static int pmc_core_ltr_ignore_open(struct inode *inode, struct file *file)
+static ssize_t pmc_core_ltr_restore_write(struct file *file,
+ const char __user *userbuf,
+ size_t count, loff_t *ppos)
{
- return single_open(file, pmc_core_ltr_ignore_show, inode->i_private);
+ struct seq_file *s = file->private_data;
+ struct pmc_dev *pmcdev = s->private;
+
+ return pmc_core_ltr_write(pmcdev, userbuf, count, 0);
}
-static const struct file_operations pmc_core_ltr_ignore_ops = {
- .open = pmc_core_ltr_ignore_open,
- .read = seq_read,
- .write = pmc_core_ltr_ignore_write,
- .llseek = seq_lseek,
- .release = single_release,
-};
+static int pmc_core_ltr_restore_show(struct seq_file *s, void *unused)
+{
+ return 0;
+}
+DEFINE_SHOW_STORE_ATTRIBUTE(pmc_core_ltr_restore);
static void pmc_core_slps0_dbg_latch(struct pmc_dev *pmcdev, bool reset)
{
@@ -569,10 +561,10 @@ static void pmc_core_slps0_dbg_latch(struct pmc_dev *pmcdev, bool reset)
const struct pmc_reg_map *map = pmc->map;
u32 fd;
- mutex_lock(&pmcdev->lock);
+ guard(mutex)(&pmcdev->lock);
if (!reset && !slps0_dbg_latch)
- goto out_unlock;
+ return;
fd = pmc_core_reg_read(pmc, map->slps0_dbg_offset);
if (reset)
@@ -582,9 +574,6 @@ static void pmc_core_slps0_dbg_latch(struct pmc_dev *pmcdev, bool reset)
pmc_core_reg_write(pmc, map->slps0_dbg_offset, fd);
slps0_dbg_latch = false;
-
-out_unlock:
- mutex_unlock(&pmcdev->lock);
}
static int pmc_core_slps0_dbg_show(struct seq_file *s, void *unused)
@@ -639,17 +628,29 @@ static int pmc_core_ltr_show(struct seq_file *s, void *unused)
u64 decoded_snoop_ltr, decoded_non_snoop_ltr;
u32 ltr_raw_data, scale, val;
u16 snoop_ltr, nonsnoop_ltr;
- int i, index, ltr_index = 0;
+ unsigned int i, index, ltr_index = 0;
for (i = 0; i < ARRAY_SIZE(pmcdev->pmcs); ++i) {
- struct pmc *pmc = pmcdev->pmcs[i];
+ struct pmc *pmc;
const struct pmc_bit_map *map;
+ u32 ltr_ign_reg;
+ pmc = pmcdev->pmcs[i];
if (!pmc)
continue;
+ scoped_guard(mutex, &pmcdev->lock)
+ ltr_ign_reg = pmc_core_reg_read(pmc, pmc->map->ltr_ignore_offset);
+
map = pmc->map->ltr_show_sts;
for (index = 0; map[index].name; index++) {
+ bool ltr_ign_data;
+
+ if (index > pmc->map->ltr_ignore_max)
+ ltr_ign_data = false;
+ else
+ ltr_ign_data = ltr_ign_reg & BIT(index);
+
decoded_snoop_ltr = decoded_non_snoop_ltr = 0;
ltr_raw_data = pmc_core_reg_read(pmc,
map[index].bit_mask);
@@ -667,10 +668,10 @@ static int pmc_core_ltr_show(struct seq_file *s, void *unused)
decoded_snoop_ltr = val * convert_ltr_scale(scale);
}
- seq_printf(s, "%d\tPMC%d:%-32s\tLTR: RAW: 0x%-16x\tNon-Snoop(ns): %-16llu\tSnoop(ns): %-16llu\n",
+ seq_printf(s, "%d\tPMC%d:%-32s\tLTR: RAW: 0x%-16x\tNon-Snoop(ns): %-16llu\tSnoop(ns): %-16llu\tLTR_IGNORE: %d\n",
ltr_index, i, map[index].name, ltr_raw_data,
decoded_non_snoop_ltr,
- decoded_snoop_ltr);
+ decoded_snoop_ltr, ltr_ign_data);
ltr_index++;
}
}
@@ -727,7 +728,8 @@ static int pmc_core_substate_res_show(struct seq_file *s, void *unused)
struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
const int lpm_adj_x2 = pmc->map->lpm_res_counter_step_x2;
u32 offset = pmc->map->lpm_residency_offset;
- int i, mode;
+ unsigned int i;
+ int mode;
seq_printf(s, "%-10s %-15s\n", "Substate", "Residency");
@@ -743,7 +745,7 @@ DEFINE_SHOW_ATTRIBUTE(pmc_core_substate_res);
static int pmc_core_substate_sts_regs_show(struct seq_file *s, void *unused)
{
struct pmc_dev *pmcdev = s->private;
- int i;
+ unsigned int i;
for (i = 0; i < ARRAY_SIZE(pmcdev->pmcs); ++i) {
struct pmc *pmc = pmcdev->pmcs[i];
@@ -764,7 +766,7 @@ DEFINE_SHOW_ATTRIBUTE(pmc_core_substate_sts_regs);
static int pmc_core_substate_l_sts_regs_show(struct seq_file *s, void *unused)
{
struct pmc_dev *pmcdev = s->private;
- int i;
+ unsigned int i;
for (i = 0; i < ARRAY_SIZE(pmcdev->pmcs); ++i) {
struct pmc *pmc = pmcdev->pmcs[i];
@@ -785,7 +787,8 @@ DEFINE_SHOW_ATTRIBUTE(pmc_core_substate_l_sts_regs);
static void pmc_core_substate_req_header_show(struct seq_file *s, int pmc_index)
{
struct pmc_dev *pmcdev = s->private;
- int i, mode;
+ unsigned int i;
+ int mode;
seq_printf(s, "%30s |", "Element");
pmc_for_each_mode(i, mode, pmcdev)
@@ -799,7 +802,8 @@ static int pmc_core_substate_req_regs_show(struct seq_file *s, void *unused)
struct pmc_dev *pmcdev = s->private;
u32 sts_offset;
u32 *lpm_req_regs;
- int num_maps, mp, pmc_index;
+ unsigned int mp, pmc_index;
+ int num_maps;
for (pmc_index = 0; pmc_index < ARRAY_SIZE(pmcdev->pmcs); ++pmc_index) {
struct pmc *pmc = pmcdev->pmcs[pmc_index];
@@ -921,9 +925,10 @@ static int pmc_core_lpm_latch_mode_show(struct seq_file *s, void *unused)
{
struct pmc_dev *pmcdev = s->private;
struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
+ unsigned int idx;
bool c10;
u32 reg;
- int idx, mode;
+ int mode;
reg = pmc_core_reg_read(pmc, pmc->map->lpm_sts_latch_en_offset);
if (reg & LPM_STS_LATCH_MODE) {
@@ -955,7 +960,8 @@ static ssize_t pmc_core_lpm_latch_mode_write(struct file *file,
struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
bool clear = false, c10 = false;
unsigned char buf[8];
- int idx, m, mode;
+ unsigned int idx;
+ int m, mode;
u32 reg;
if (count > sizeof(buf) - 1)
@@ -987,26 +993,22 @@ static ssize_t pmc_core_lpm_latch_mode_write(struct file *file,
}
if (clear) {
- mutex_lock(&pmcdev->lock);
+ guard(mutex)(&pmcdev->lock);
reg = pmc_core_reg_read(pmc, pmc->map->etr3_offset);
reg |= ETR3_CLEAR_LPM_EVENTS;
pmc_core_reg_write(pmc, pmc->map->etr3_offset, reg);
- mutex_unlock(&pmcdev->lock);
-
return count;
}
if (c10) {
- mutex_lock(&pmcdev->lock);
+ guard(mutex)(&pmcdev->lock);
reg = pmc_core_reg_read(pmc, pmc->map->lpm_sts_latch_en_offset);
reg &= ~LPM_STS_LATCH_MODE;
pmc_core_reg_write(pmc, pmc->map->lpm_sts_latch_en_offset, reg);
- mutex_unlock(&pmcdev->lock);
-
return count;
}
@@ -1015,9 +1017,8 @@ static ssize_t pmc_core_lpm_latch_mode_write(struct file *file,
* and clear everything else.
*/
reg = LPM_STS_LATCH_MODE | BIT(mode);
- mutex_lock(&pmcdev->lock);
+ guard(mutex)(&pmcdev->lock);
pmc_core_reg_write(pmc, pmc->map->lpm_sts_latch_en_offset, reg);
- mutex_unlock(&pmcdev->lock);
return count;
}
@@ -1028,7 +1029,7 @@ static int pmc_core_pkgc_show(struct seq_file *s, void *unused)
struct pmc *pmc = s->private;
const struct pmc_bit_map *map = pmc->map->msr_sts;
u64 pcstate_count;
- int index;
+ unsigned int index;
for (index = 0; map[index].name ; index++) {
if (rdmsrl_safe(map[index].bit_mask, &pcstate_count))
@@ -1046,7 +1047,7 @@ DEFINE_SHOW_ATTRIBUTE(pmc_core_pkgc);
static bool pmc_core_pri_verify(u32 lpm_pri, u8 *mode_order)
{
- int i, j;
+ unsigned int i, j;
if (!lpm_pri)
return false;
@@ -1081,7 +1082,8 @@ void pmc_core_get_low_power_modes(struct pmc_dev *pmcdev)
u8 mode_order[LPM_MAX_NUM_MODES];
u32 lpm_pri;
u32 lpm_en;
- int mode, i, p;
+ unsigned int i;
+ int mode, p;
/* Use LPM Maps to indicate support for substates */
if (!pmc->map->lpm_num_maps)
@@ -1228,7 +1230,9 @@ static void pmc_core_dbgfs_register(struct pmc_dev *pmcdev)
pmcdev, &pmc_core_ppfear_fops);
debugfs_create_file("ltr_ignore", 0644, dir, pmcdev,
- &pmc_core_ltr_ignore_ops);
+ &pmc_core_ltr_ignore_fops);
+
+ debugfs_create_file("ltr_restore", 0200, dir, pmcdev, &pmc_core_ltr_restore_fops);
debugfs_create_file("ltr_show", 0444, dir, pmcdev, &pmc_core_ltr_fops);
@@ -1293,29 +1297,29 @@ static void pmc_core_dbgfs_register(struct pmc_dev *pmcdev)
}
static const struct x86_cpu_id intel_pmc_core_ids[] = {
- X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_L, spt_core_init),
- X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE, spt_core_init),
- X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE_L, spt_core_init),
- X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE, spt_core_init),
- X86_MATCH_INTEL_FAM6_MODEL(CANNONLAKE_L, cnp_core_init),
- X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, icl_core_init),
- X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_NNPI, icl_core_init),
- X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE, cnp_core_init),
- X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE_L, cnp_core_init),
- X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, tgl_l_core_init),
- X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, tgl_core_init),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT, tgl_l_core_init),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_L, icl_core_init),
- X86_MATCH_INTEL_FAM6_MODEL(ROCKETLAKE, tgl_core_init),
- X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, tgl_l_core_init),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_GRACEMONT, tgl_l_core_init),
- X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, adl_core_init),
- X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, tgl_l_core_init),
- X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, adl_core_init),
- X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_S, adl_core_init),
- X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L, mtl_core_init),
- X86_MATCH_INTEL_FAM6_MODEL(ARROWLAKE, arl_core_init),
- X86_MATCH_INTEL_FAM6_MODEL(LUNARLAKE_M, lnl_core_init),
+ X86_MATCH_VFM(INTEL_SKYLAKE_L, spt_core_init),
+ X86_MATCH_VFM(INTEL_SKYLAKE, spt_core_init),
+ X86_MATCH_VFM(INTEL_KABYLAKE_L, spt_core_init),
+ X86_MATCH_VFM(INTEL_KABYLAKE, spt_core_init),
+ X86_MATCH_VFM(INTEL_CANNONLAKE_L, cnp_core_init),
+ X86_MATCH_VFM(INTEL_ICELAKE_L, icl_core_init),
+ X86_MATCH_VFM(INTEL_ICELAKE_NNPI, icl_core_init),
+ X86_MATCH_VFM(INTEL_COMETLAKE, cnp_core_init),
+ X86_MATCH_VFM(INTEL_COMETLAKE_L, cnp_core_init),
+ X86_MATCH_VFM(INTEL_TIGERLAKE_L, tgl_l_core_init),
+ X86_MATCH_VFM(INTEL_TIGERLAKE, tgl_core_init),
+ X86_MATCH_VFM(INTEL_ATOM_TREMONT, tgl_l_core_init),
+ X86_MATCH_VFM(INTEL_ATOM_TREMONT_L, icl_core_init),
+ X86_MATCH_VFM(INTEL_ROCKETLAKE, tgl_core_init),
+ X86_MATCH_VFM(INTEL_ALDERLAKE_L, tgl_l_core_init),
+ X86_MATCH_VFM(INTEL_ATOM_GRACEMONT, tgl_l_core_init),
+ X86_MATCH_VFM(INTEL_ALDERLAKE, adl_core_init),
+ X86_MATCH_VFM(INTEL_RAPTORLAKE_P, tgl_l_core_init),
+ X86_MATCH_VFM(INTEL_RAPTORLAKE, adl_core_init),
+ X86_MATCH_VFM(INTEL_RAPTORLAKE_S, adl_core_init),
+ X86_MATCH_VFM(INTEL_METEORLAKE_L, mtl_core_init),
+ X86_MATCH_VFM(INTEL_ARROWLAKE, arl_core_init),
+ X86_MATCH_VFM(INTEL_LUNARLAKE_M, lnl_core_init),
{}
};
@@ -1373,7 +1377,7 @@ static void pmc_core_do_dmi_quirks(struct pmc *pmc)
static void pmc_core_clean_structure(struct platform_device *pdev)
{
struct pmc_dev *pmcdev = platform_get_drvdata(pdev);
- int i;
+ unsigned int i;
for (i = 0; i < ARRAY_SIZE(pmcdev->pmcs); ++i) {
struct pmc *pmc = pmcdev->pmcs[i];
@@ -1536,7 +1540,7 @@ int pmc_core_resume_common(struct pmc_dev *pmcdev)
struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
const struct pmc_bit_map **maps = pmc->map->lpm_sts;
int offset = pmc->map->lpm_status_offset;
- int i;
+ unsigned int i;
/* Check if the syspend used S0ix */
if (pm_suspend_via_firmware())
diff --git a/drivers/platform/x86/intel/pmc/pltdrv.c b/drivers/platform/x86/intel/pmc/pltdrv.c
index ddfba38c2104..3141d6cbc41b 100644
--- a/drivers/platform/x86/intel/pmc/pltdrv.c
+++ b/drivers/platform/x86/intel/pmc/pltdrv.c
@@ -35,14 +35,14 @@ static struct platform_device *pmc_core_device;
* other list may grow, but this list should not.
*/
static const struct x86_cpu_id intel_pmc_core_platform_ids[] = {
- X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_L, &pmc_core_device),
- X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE, &pmc_core_device),
- X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE_L, &pmc_core_device),
- X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE, &pmc_core_device),
- X86_MATCH_INTEL_FAM6_MODEL(CANNONLAKE_L, &pmc_core_device),
- X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, &pmc_core_device),
- X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE, &pmc_core_device),
- X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE_L, &pmc_core_device),
+ X86_MATCH_VFM(INTEL_SKYLAKE_L, &pmc_core_device),
+ X86_MATCH_VFM(INTEL_SKYLAKE, &pmc_core_device),
+ X86_MATCH_VFM(INTEL_KABYLAKE_L, &pmc_core_device),
+ X86_MATCH_VFM(INTEL_KABYLAKE, &pmc_core_device),
+ X86_MATCH_VFM(INTEL_CANNONLAKE_L, &pmc_core_device),
+ X86_MATCH_VFM(INTEL_ICELAKE_L, &pmc_core_device),
+ X86_MATCH_VFM(INTEL_COMETLAKE, &pmc_core_device),
+ X86_MATCH_VFM(INTEL_COMETLAKE_L, &pmc_core_device),
{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_pmc_core_platform_ids);
@@ -86,4 +86,5 @@ static void __exit pmc_core_platform_exit(void)
module_init(pmc_core_platform_init);
module_exit(pmc_core_platform_exit);
+MODULE_DESCRIPTION("Intel PMC Core platform driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/intel/rst.c b/drivers/platform/x86/intel/rst.c
index 6bc9c4a603e0..f3a60e14d4c1 100644
--- a/drivers/platform/x86/intel/rst.c
+++ b/drivers/platform/x86/intel/rst.c
@@ -7,6 +7,7 @@
#include <linux/module.h>
#include <linux/slab.h>
+MODULE_DESCRIPTION("Intel Rapid Start Technology Driver");
MODULE_LICENSE("GPL");
static ssize_t irst_show_wakeup_events(struct device *dev,
diff --git a/drivers/platform/x86/intel/smartconnect.c b/drivers/platform/x86/intel/smartconnect.c
index cd25d0585324..31019a1a6d5e 100644
--- a/drivers/platform/x86/intel/smartconnect.c
+++ b/drivers/platform/x86/intel/smartconnect.c
@@ -6,6 +6,7 @@
#include <linux/acpi.h>
#include <linux/module.h>
+MODULE_DESCRIPTION("Intel Smart Connect disabling driver");
MODULE_LICENSE("GPL");
static int smartconnect_acpi_init(struct acpi_device *acpi)
diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
index 713c0d1fa85f..10e21563fa46 100644
--- a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
+++ b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
@@ -718,14 +718,6 @@ static struct miscdevice isst_if_char_driver = {
.fops = &isst_if_char_driver_ops,
};
-static const struct x86_cpu_id hpm_cpu_ids[] = {
- X86_MATCH_INTEL_FAM6_MODEL(GRANITERAPIDS_D, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(GRANITERAPIDS_X, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT_X, NULL),
- {}
-};
-
static int isst_misc_reg(void)
{
mutex_lock(&punit_misc_dev_reg_lock);
@@ -733,12 +725,6 @@ static int isst_misc_reg(void)
goto unlock_exit;
if (!misc_usage_count) {
- const struct x86_cpu_id *id;
-
- id = x86_match_cpu(hpm_cpu_ids);
- if (id)
- isst_hpm_support = true;
-
misc_device_ret = isst_if_cpu_info_init();
if (misc_device_ret)
goto unlock_exit;
@@ -786,11 +772,12 @@ static void isst_misc_unreg(void)
*/
int isst_if_cdev_register(int device_type, struct isst_if_cmd_cb *cb)
{
- int ret;
-
if (device_type >= ISST_IF_DEV_MAX)
return -EINVAL;
+ if (device_type < ISST_IF_DEV_TPMI && isst_hpm_support)
+ return -ENODEV;
+
mutex_lock(&punit_misc_dev_open_lock);
/* Device is already open, we don't want to add new callbacks */
if (misc_device_open) {
@@ -805,15 +792,6 @@ int isst_if_cdev_register(int device_type, struct isst_if_cmd_cb *cb)
punit_callbacks[device_type].registered = 1;
mutex_unlock(&punit_misc_dev_open_lock);
- ret = isst_misc_reg();
- if (ret) {
- /*
- * No need of mutex as the misc device register failed
- * as no one can open device yet. Hence no contention.
- */
- punit_callbacks[device_type].registered = 0;
- return ret;
- }
return 0;
}
EXPORT_SYMBOL_GPL(isst_if_cdev_register);
@@ -829,7 +807,6 @@ EXPORT_SYMBOL_GPL(isst_if_cdev_register);
*/
void isst_if_cdev_unregister(int device_type)
{
- isst_misc_unreg();
mutex_lock(&punit_misc_dev_open_lock);
punit_callbacks[device_type].def_ioctl = NULL;
punit_callbacks[device_type].registered = 0;
@@ -839,5 +816,51 @@ void isst_if_cdev_unregister(int device_type)
}
EXPORT_SYMBOL_GPL(isst_if_cdev_unregister);
+#define SST_HPM_SUPPORTED 0x01
+#define SST_MBOX_SUPPORTED 0x02
+
+static const struct x86_cpu_id isst_cpu_ids[] = {
+ X86_MATCH_VFM(INTEL_ATOM_CRESTMONT, SST_HPM_SUPPORTED),
+ X86_MATCH_VFM(INTEL_ATOM_CRESTMONT_X, SST_HPM_SUPPORTED),
+ X86_MATCH_VFM(INTEL_EMERALDRAPIDS_X, 0),
+ X86_MATCH_VFM(INTEL_GRANITERAPIDS_D, SST_HPM_SUPPORTED),
+ X86_MATCH_VFM(INTEL_GRANITERAPIDS_X, SST_HPM_SUPPORTED),
+ X86_MATCH_VFM(INTEL_ICELAKE_D, 0),
+ X86_MATCH_VFM(INTEL_ICELAKE_X, 0),
+ X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, 0),
+ X86_MATCH_VFM(INTEL_SKYLAKE_X, SST_MBOX_SUPPORTED),
+ {}
+};
+MODULE_DEVICE_TABLE(x86cpu, isst_cpu_ids);
+
+static int __init isst_if_common_init(void)
+{
+ const struct x86_cpu_id *id;
+
+ id = x86_match_cpu(isst_cpu_ids);
+ if (!id)
+ return -ENODEV;
+
+ if (id->driver_data == SST_HPM_SUPPORTED) {
+ isst_hpm_support = true;
+ } else if (id->driver_data == SST_MBOX_SUPPORTED) {
+ u64 data;
+
+ /* Can fail only on some Skylake-X generations */
+ if (rdmsrl_safe(MSR_OS_MAILBOX_INTERFACE, &data) ||
+ rdmsrl_safe(MSR_OS_MAILBOX_DATA, &data))
+ return -ENODEV;
+ }
+
+ return isst_misc_reg();
+}
+module_init(isst_if_common_init)
+
+static void __exit isst_if_common_exit(void)
+{
+ isst_misc_unreg();
+}
+module_exit(isst_if_common_exit)
+
MODULE_DESCRIPTION("ISST common interface module");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_common.h b/drivers/platform/x86/intel/speed_select_if/isst_if_common.h
index 1004f2c9cca8..378055fe1d16 100644
--- a/drivers/platform/x86/intel/speed_select_if/isst_if_common.h
+++ b/drivers/platform/x86/intel/speed_select_if/isst_if_common.h
@@ -16,6 +16,9 @@
#define PCI_DEVICE_ID_INTEL_RAPL_PRIO_DEVID_1 0x3251
#define PCI_DEVICE_ID_INTEL_CFG_MBOX_DEVID_1 0x3259
+#define MSR_OS_MAILBOX_INTERFACE 0xB0
+#define MSR_OS_MAILBOX_DATA 0xB1
+
/*
* Validate maximum commands in a single request.
* This is enough to handle command to every core in one ioctl, or all
diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_mbox_msr.c b/drivers/platform/x86/intel/speed_select_if/isst_if_mbox_msr.c
index 1b6eab071068..c4b7af00352b 100644
--- a/drivers/platform/x86/intel/speed_select_if/isst_if_mbox_msr.c
+++ b/drivers/platform/x86/intel/speed_select_if/isst_if_mbox_msr.c
@@ -21,8 +21,6 @@
#include "isst_if_common.h"
-#define MSR_OS_MAILBOX_INTERFACE 0xB0
-#define MSR_OS_MAILBOX_DATA 0xB1
#define MSR_OS_MAILBOX_BUSY_BIT 31
/*
@@ -161,7 +159,7 @@ static struct notifier_block isst_pm_nb = {
};
static const struct x86_cpu_id isst_if_cpu_ids[] = {
- X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, NULL),
+ X86_MATCH_VFM(INTEL_SKYLAKE_X, NULL),
{}
};
MODULE_DEVICE_TABLE(x86cpu, isst_if_cpu_ids);
diff --git a/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c b/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c
index 7bac7841ff0a..7fa360073f6e 100644
--- a/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c
+++ b/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c
@@ -1610,8 +1610,8 @@ void tpmi_sst_dev_remove(struct auxiliary_device *auxdev)
tpmi_sst->partition_mask_current &= ~BIT(plat_info->partition);
/* Free the package instance when the all partitions are removed */
if (!tpmi_sst->partition_mask_current) {
- kfree(tpmi_sst);
isst_common.sst_inst[tpmi_sst->package_id] = NULL;
+ kfree(tpmi_sst);
}
mutex_unlock(&isst_tpmi_dev_lock);
}
diff --git a/drivers/platform/x86/intel/telemetry/debugfs.c b/drivers/platform/x86/intel/telemetry/debugfs.c
index 1d4d0fbfd63c..70e5736c44c7 100644
--- a/drivers/platform/x86/intel/telemetry/debugfs.c
+++ b/drivers/platform/x86/intel/telemetry/debugfs.c
@@ -308,8 +308,8 @@ static struct telemetry_debugfs_conf telem_apl_debugfs_conf = {
};
static const struct x86_cpu_id telemetry_debugfs_cpu_ids[] = {
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, &telem_apl_debugfs_conf),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_PLUS, &telem_apl_debugfs_conf),
+ X86_MATCH_VFM(INTEL_ATOM_GOLDMONT, &telem_apl_debugfs_conf),
+ X86_MATCH_VFM(INTEL_ATOM_GOLDMONT_PLUS, &telem_apl_debugfs_conf),
{}
};
MODULE_DEVICE_TABLE(x86cpu, telemetry_debugfs_cpu_ids);
diff --git a/drivers/platform/x86/intel/telemetry/pltdrv.c b/drivers/platform/x86/intel/telemetry/pltdrv.c
index 06311d0e9451..767a0bc6c7ad 100644
--- a/drivers/platform/x86/intel/telemetry/pltdrv.c
+++ b/drivers/platform/x86/intel/telemetry/pltdrv.c
@@ -177,8 +177,8 @@ static struct telemetry_plt_config telem_glk_config = {
};
static const struct x86_cpu_id telemetry_cpu_ids[] = {
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, &telem_apl_config),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_PLUS, &telem_glk_config),
+ X86_MATCH_VFM(INTEL_ATOM_GOLDMONT, &telem_apl_config),
+ X86_MATCH_VFM(INTEL_ATOM_GOLDMONT_PLUS, &telem_glk_config),
{}
};
diff --git a/drivers/platform/x86/intel/tpmi.c b/drivers/platform/x86/intel/tpmi.c
index 6c0cbccd80bb..83e8b1fe53b3 100644
--- a/drivers/platform/x86/intel/tpmi.c
+++ b/drivers/platform/x86/intel/tpmi.c
@@ -357,6 +357,15 @@ int tpmi_get_feature_status(struct auxiliary_device *auxdev,
}
EXPORT_SYMBOL_NS_GPL(tpmi_get_feature_status, INTEL_TPMI);
+struct dentry *tpmi_get_debugfs_dir(struct auxiliary_device *auxdev)
+{
+ struct intel_vsec_device *intel_vsec_dev = dev_to_ivdev(auxdev->dev.parent);
+ struct intel_tpmi_info *tpmi_info = auxiliary_get_drvdata(&intel_vsec_dev->auxdev);
+
+ return tpmi_info->dbgfs_dir;
+}
+EXPORT_SYMBOL_NS_GPL(tpmi_get_debugfs_dir, INTEL_TPMI);
+
static int tpmi_pfs_dbg_show(struct seq_file *s, void *unused)
{
struct intel_tpmi_info *tpmi_info = s->private;
@@ -577,6 +586,8 @@ static const char *intel_tpmi_name(enum intel_tpmi_id id)
return "uncore";
case TPMI_ID_SST:
return "sst";
+ case TPMI_ID_PLR:
+ return "plr";
default:
return NULL;
}
diff --git a/drivers/platform/x86/intel/tpmi_power_domains.c b/drivers/platform/x86/intel/tpmi_power_domains.c
new file mode 100644
index 000000000000..4eb02553957c
--- /dev/null
+++ b/drivers/platform/x86/intel/tpmi_power_domains.c
@@ -0,0 +1,235 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Mapping of TPMI power domains CPU mapping
+ *
+ * Copyright (c) 2024, Intel Corporation.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/cleanup.h>
+#include <linux/cpuhotplug.h>
+#include <linux/cpumask.h>
+#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/hashtable.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/overflow.h>
+#include <linux/slab.h>
+#include <linux/topology.h>
+#include <linux/types.h>
+
+#include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
+#include <asm/msr.h>
+
+#include "tpmi_power_domains.h"
+
+#define MSR_PM_LOGICAL_ID 0x54
+
+/*
+ * Struct of MSR 0x54
+ * [15:11] PM_DOMAIN_ID
+ * [10:3] MODULE_ID (aka IDI_AGENT_ID)
+ * [2:0] LP_ID
+ * For Atom:
+ * [2] Always 0
+ * [1:0] core ID within module
+ * For Core
+ * [2:1] Always 0
+ * [0] thread ID
+ */
+
+#define LP_ID_MASK GENMASK_ULL(2, 0)
+#define MODULE_ID_MASK GENMASK_ULL(10, 3)
+#define PM_DOMAIN_ID_MASK GENMASK_ULL(15, 11)
+
+/**
+ * struct tpmi_cpu_info - Mapping information for a CPU
+ * @hnode: Used to add mapping information to hash list
+ * @linux_cpu: Linux CPU number
+ * @pkg_id: Package ID of this CPU
+ * @punit_thread_id: Punit thread id of this CPU
+ * @punit_core_id: Punit core id
+ * @punit_domain_id: Power domain id from Punit
+ *
+ * Structure to store mapping information for a Linux CPU
+ * to a Punit core, thread and power domain.
+ */
+struct tpmi_cpu_info {
+ struct hlist_node hnode;
+ int linux_cpu;
+ u8 pkg_id;
+ u8 punit_thread_id;
+ u8 punit_core_id;
+ u8 punit_domain_id;
+};
+
+static DEFINE_PER_CPU(struct tpmi_cpu_info, tpmi_cpu_info);
+
+/* The dynamically assigned cpu hotplug state to free later */
+static enum cpuhp_state tpmi_hp_state __read_mostly;
+
+#define MAX_POWER_DOMAINS 8
+
+static cpumask_t *tpmi_power_domain_mask;
+
+/* Lock to protect tpmi_power_domain_mask and tpmi_cpu_hash */
+static DEFINE_MUTEX(tpmi_lock);
+
+static const struct x86_cpu_id tpmi_cpu_ids[] = {
+ X86_MATCH_VFM(INTEL_GRANITERAPIDS_X, NULL),
+ X86_MATCH_VFM(INTEL_ATOM_CRESTMONT_X, NULL),
+ X86_MATCH_VFM(INTEL_ATOM_CRESTMONT, NULL),
+ X86_MATCH_VFM(INTEL_GRANITERAPIDS_D, NULL),
+ {}
+};
+MODULE_DEVICE_TABLE(x86cpu, tpmi_cpu_ids);
+
+static DECLARE_HASHTABLE(tpmi_cpu_hash, 8);
+
+static bool tpmi_domain_is_valid(struct tpmi_cpu_info *info)
+{
+ return info->pkg_id < topology_max_packages() &&
+ info->punit_domain_id < MAX_POWER_DOMAINS;
+}
+
+int tpmi_get_linux_cpu_number(int package_id, int domain_id, int punit_core_id)
+{
+ struct tpmi_cpu_info *info;
+ int ret = -EINVAL;
+
+ guard(mutex)(&tpmi_lock);
+ hash_for_each_possible(tpmi_cpu_hash, info, hnode, punit_core_id) {
+ if (info->punit_domain_id == domain_id && info->pkg_id == package_id) {
+ ret = info->linux_cpu;
+ break;
+ }
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_NS_GPL(tpmi_get_linux_cpu_number, INTEL_TPMI_POWER_DOMAIN);
+
+int tpmi_get_punit_core_number(int cpu_no)
+{
+ if (cpu_no >= num_possible_cpus())
+ return -EINVAL;
+
+ return per_cpu(tpmi_cpu_info, cpu_no).punit_core_id;
+}
+EXPORT_SYMBOL_NS_GPL(tpmi_get_punit_core_number, INTEL_TPMI_POWER_DOMAIN);
+
+int tpmi_get_power_domain_id(int cpu_no)
+{
+ if (cpu_no >= num_possible_cpus())
+ return -EINVAL;
+
+ return per_cpu(tpmi_cpu_info, cpu_no).punit_domain_id;
+}
+EXPORT_SYMBOL_NS_GPL(tpmi_get_power_domain_id, INTEL_TPMI_POWER_DOMAIN);
+
+cpumask_t *tpmi_get_power_domain_mask(int cpu_no)
+{
+ struct tpmi_cpu_info *info;
+ cpumask_t *mask;
+ int index;
+
+ if (cpu_no >= num_possible_cpus())
+ return NULL;
+
+ info = &per_cpu(tpmi_cpu_info, cpu_no);
+ if (!tpmi_domain_is_valid(info))
+ return NULL;
+
+ index = info->pkg_id * MAX_POWER_DOMAINS + info->punit_domain_id;
+ guard(mutex)(&tpmi_lock);
+ mask = &tpmi_power_domain_mask[index];
+
+ return mask;
+}
+EXPORT_SYMBOL_NS_GPL(tpmi_get_power_domain_mask, INTEL_TPMI_POWER_DOMAIN);
+
+static int tpmi_get_logical_id(unsigned int cpu, struct tpmi_cpu_info *info)
+{
+ u64 data;
+ int ret;
+
+ ret = rdmsrl_safe(MSR_PM_LOGICAL_ID, &data);
+ if (ret)
+ return ret;
+
+ info->punit_domain_id = FIELD_GET(PM_DOMAIN_ID_MASK, data);
+ if (info->punit_domain_id >= MAX_POWER_DOMAINS)
+ return -EINVAL;
+
+ info->punit_thread_id = FIELD_GET(LP_ID_MASK, data);
+ info->punit_core_id = FIELD_GET(MODULE_ID_MASK, data);
+ info->pkg_id = topology_physical_package_id(cpu);
+ info->linux_cpu = cpu;
+
+ return 0;
+}
+
+static int tpmi_cpu_online(unsigned int cpu)
+{
+ struct tpmi_cpu_info *info = &per_cpu(tpmi_cpu_info, cpu);
+ int ret, index;
+
+ /* Don't fail CPU online for some bad mapping of CPUs */
+ ret = tpmi_get_logical_id(cpu, info);
+ if (ret)
+ return 0;
+
+ index = info->pkg_id * MAX_POWER_DOMAINS + info->punit_domain_id;
+
+ guard(mutex)(&tpmi_lock);
+ cpumask_set_cpu(cpu, &tpmi_power_domain_mask[index]);
+ hash_add(tpmi_cpu_hash, &info->hnode, info->punit_core_id);
+
+ return 0;
+}
+
+static int __init tpmi_init(void)
+{
+ const struct x86_cpu_id *id;
+ u64 data;
+ int ret;
+
+ id = x86_match_cpu(tpmi_cpu_ids);
+ if (!id)
+ return -ENODEV;
+
+ /* Check for MSR 0x54 presence */
+ ret = rdmsrl_safe(MSR_PM_LOGICAL_ID, &data);
+ if (ret)
+ return ret;
+
+ tpmi_power_domain_mask = kcalloc(size_mul(topology_max_packages(), MAX_POWER_DOMAINS),
+ sizeof(*tpmi_power_domain_mask), GFP_KERNEL);
+ if (!tpmi_power_domain_mask)
+ return -ENOMEM;
+
+ ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
+ "platform/x86/tpmi_power_domains:online",
+ tpmi_cpu_online, NULL);
+ if (ret < 0) {
+ kfree(tpmi_power_domain_mask);
+ return ret;
+ }
+
+ tpmi_hp_state = ret;
+
+ return 0;
+}
+module_init(tpmi_init)
+
+static void __exit tpmi_exit(void)
+{
+ cpuhp_remove_state(tpmi_hp_state);
+ kfree(tpmi_power_domain_mask);
+}
+module_exit(tpmi_exit)
+
+MODULE_DESCRIPTION("TPMI Power Domains Mapping");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/intel/tpmi_power_domains.h b/drivers/platform/x86/intel/tpmi_power_domains.h
new file mode 100644
index 000000000000..e35750dd9273
--- /dev/null
+++ b/drivers/platform/x86/intel/tpmi_power_domains.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Mapping of TPMI power domain and CPUs
+ *
+ * Copyright (c) 2024, Intel Corporation.
+ */
+
+#ifndef _TPMI_POWER_DOMAINS_H_
+#define _TPMI_POWER_DOMAINS_H_
+
+#include <linux/cpumask.h>
+
+int tpmi_get_linux_cpu_number(int package_id, int die_id, int punit_core_id);
+int tpmi_get_punit_core_number(int cpu_no);
+int tpmi_get_power_domain_id(int cpu_no);
+cpumask_t *tpmi_get_power_domain_mask(int cpu_no);
+
+#endif
diff --git a/drivers/platform/x86/intel/turbo_max_3.c b/drivers/platform/x86/intel/turbo_max_3.c
index 892140b62898..79a0bcdeffb8 100644
--- a/drivers/platform/x86/intel/turbo_max_3.c
+++ b/drivers/platform/x86/intel/turbo_max_3.c
@@ -114,8 +114,8 @@ static int itmt_legacy_cpu_online(unsigned int cpu)
}
static const struct x86_cpu_id itmt_legacy_cpu_ids[] = {
- X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, NULL),
+ X86_MATCH_VFM(INTEL_BROADWELL_X, NULL),
+ X86_MATCH_VFM(INTEL_SKYLAKE_X, NULL),
{}
};
diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c
index 33bb58dc3f78..4e880585cbe4 100644
--- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c
+++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c
@@ -19,9 +19,8 @@ static int uncore_instance_count;
static DEFINE_IDA(intel_uncore_ida);
/* callbacks for actual HW read/write */
-static int (*uncore_read)(struct uncore_data *data, unsigned int *min, unsigned int *max);
-static int (*uncore_write)(struct uncore_data *data, unsigned int input, unsigned int min_max);
-static int (*uncore_read_freq)(struct uncore_data *data, unsigned int *freq);
+static int (*uncore_read)(struct uncore_data *data, unsigned int *value, enum uncore_index index);
+static int (*uncore_write)(struct uncore_data *data, unsigned int input, enum uncore_index index);
static ssize_t show_domain_id(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
@@ -44,27 +43,22 @@ static ssize_t show_package_id(struct kobject *kobj, struct kobj_attribute *attr
return sprintf(buf, "%u\n", data->package_id);
}
-static ssize_t show_min_max_freq_khz(struct uncore_data *data,
- char *buf, int min_max)
+static ssize_t show_attr(struct uncore_data *data, char *buf, enum uncore_index index)
{
- unsigned int min, max;
+ unsigned int value;
int ret;
mutex_lock(&uncore_lock);
- ret = uncore_read(data, &min, &max);
+ ret = uncore_read(data, &value, index);
mutex_unlock(&uncore_lock);
if (ret)
return ret;
- if (min_max)
- return sprintf(buf, "%u\n", max);
-
- return sprintf(buf, "%u\n", min);
+ return sprintf(buf, "%u\n", value);
}
-static ssize_t store_min_max_freq_khz(struct uncore_data *data,
- const char *buf, ssize_t count,
- int min_max)
+static ssize_t store_attr(struct uncore_data *data, const char *buf, ssize_t count,
+ enum uncore_index index)
{
unsigned int input;
int ret;
@@ -73,7 +67,7 @@ static ssize_t store_min_max_freq_khz(struct uncore_data *data,
return -EINVAL;
mutex_lock(&uncore_lock);
- ret = uncore_write(data, input, min_max);
+ ret = uncore_write(data, input, index);
mutex_unlock(&uncore_lock);
if (ret)
@@ -82,56 +76,32 @@ static ssize_t store_min_max_freq_khz(struct uncore_data *data,
return count;
}
-static ssize_t show_perf_status_freq_khz(struct uncore_data *data, char *buf)
-{
- unsigned int freq;
- int ret;
-
- mutex_lock(&uncore_lock);
- ret = uncore_read_freq(data, &freq);
- mutex_unlock(&uncore_lock);
- if (ret)
- return ret;
-
- return sprintf(buf, "%u\n", freq);
-}
-
-#define store_uncore_min_max(name, min_max) \
+#define store_uncore_attr(name, index) \
static ssize_t store_##name(struct kobject *kobj, \
struct kobj_attribute *attr, \
const char *buf, size_t count) \
{ \
struct uncore_data *data = container_of(attr, struct uncore_data, name##_kobj_attr);\
\
- return store_min_max_freq_khz(data, buf, count, \
- min_max); \
+ return store_attr(data, buf, count, index); \
}
-#define show_uncore_min_max(name, min_max) \
+#define show_uncore_attr(name, index) \
static ssize_t show_##name(struct kobject *kobj, \
struct kobj_attribute *attr, char *buf)\
{ \
struct uncore_data *data = container_of(attr, struct uncore_data, name##_kobj_attr);\
\
- return show_min_max_freq_khz(data, buf, min_max); \
- }
-
-#define show_uncore_perf_status(name) \
- static ssize_t show_##name(struct kobject *kobj, \
- struct kobj_attribute *attr, char *buf)\
- { \
- struct uncore_data *data = container_of(attr, struct uncore_data, name##_kobj_attr);\
- \
- return show_perf_status_freq_khz(data, buf); \
+ return show_attr(data, buf, index); \
}
-store_uncore_min_max(min_freq_khz, 0);
-store_uncore_min_max(max_freq_khz, 1);
+store_uncore_attr(min_freq_khz, UNCORE_INDEX_MIN_FREQ);
+store_uncore_attr(max_freq_khz, UNCORE_INDEX_MAX_FREQ);
-show_uncore_min_max(min_freq_khz, 0);
-show_uncore_min_max(max_freq_khz, 1);
+show_uncore_attr(min_freq_khz, UNCORE_INDEX_MIN_FREQ);
+show_uncore_attr(max_freq_khz, UNCORE_INDEX_MAX_FREQ);
-show_uncore_perf_status(current_freq_khz);
+show_uncore_attr(current_freq_khz, UNCORE_INDEX_CURRENT_FREQ);
#define show_uncore_data(member_name) \
static ssize_t show_##member_name(struct kobject *kobj, \
@@ -198,7 +168,7 @@ static int create_attr_group(struct uncore_data *data, char *name)
data->uncore_attrs[index++] = &data->initial_min_freq_khz_kobj_attr.attr;
data->uncore_attrs[index++] = &data->initial_max_freq_khz_kobj_attr.attr;
- ret = uncore_read_freq(data, &freq);
+ ret = uncore_read(data, &freq, UNCORE_INDEX_CURRENT_FREQ);
if (!ret)
data->uncore_attrs[index++] = &data->current_freq_khz_kobj_attr.attr;
@@ -238,7 +208,8 @@ int uncore_freq_add_entry(struct uncore_data *data, int cpu)
sprintf(data->name, "package_%02d_die_%02d", data->package_id, data->die_id);
}
- uncore_read(data, &data->initial_min_freq_khz, &data->initial_max_freq_khz);
+ uncore_read(data, &data->initial_min_freq_khz, UNCORE_INDEX_MIN_FREQ);
+ uncore_read(data, &data->initial_max_freq_khz, UNCORE_INDEX_MAX_FREQ);
ret = create_attr_group(data, data->name);
if (ret) {
@@ -269,15 +240,15 @@ void uncore_freq_remove_die_entry(struct uncore_data *data)
}
EXPORT_SYMBOL_NS_GPL(uncore_freq_remove_die_entry, INTEL_UNCORE_FREQUENCY);
-int uncore_freq_common_init(int (*read_control_freq)(struct uncore_data *data, unsigned int *min, unsigned int *max),
- int (*write_control_freq)(struct uncore_data *data, unsigned int input, unsigned int set_max),
- int (*read_freq)(struct uncore_data *data, unsigned int *freq))
+int uncore_freq_common_init(int (*read)(struct uncore_data *data, unsigned int *value,
+ enum uncore_index index),
+ int (*write)(struct uncore_data *data, unsigned int input,
+ enum uncore_index index))
{
mutex_lock(&uncore_lock);
- uncore_read = read_control_freq;
- uncore_write = write_control_freq;
- uncore_read_freq = read_freq;
+ uncore_read = read;
+ uncore_write = write;
if (!uncore_root_kobj) {
struct device *dev_root = bus_get_dev_root(&cpu_subsys);
diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h
index 0e5bf507e555..4c245b945e4e 100644
--- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h
+++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h
@@ -66,9 +66,16 @@ struct uncore_data {
#define UNCORE_DOMAIN_ID_INVALID -1
-int uncore_freq_common_init(int (*read_control_freq)(struct uncore_data *data, unsigned int *min, unsigned int *max),
- int (*write_control_freq)(struct uncore_data *data, unsigned int input, unsigned int min_max),
- int (*uncore_read_freq)(struct uncore_data *data, unsigned int *freq));
+enum uncore_index {
+ UNCORE_INDEX_MIN_FREQ,
+ UNCORE_INDEX_MAX_FREQ,
+ UNCORE_INDEX_CURRENT_FREQ,
+};
+
+int uncore_freq_common_init(int (*read)(struct uncore_data *data, unsigned int *value,
+ enum uncore_index index),
+ int (*write)(struct uncore_data *data, unsigned int input,
+ enum uncore_index index));
void uncore_freq_common_exit(void);
int uncore_freq_add_entry(struct uncore_data *data, int cpu);
void uncore_freq_remove_die_entry(struct uncore_data *data);
diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c
index bb8e72deb354..9fa3037c03d1 100644
--- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c
+++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c
@@ -69,26 +69,31 @@ struct tpmi_uncore_struct {
bool write_blocked;
};
-#define UNCORE_GENMASK_MIN_RATIO GENMASK_ULL(21, 15)
-#define UNCORE_GENMASK_MAX_RATIO GENMASK_ULL(14, 8)
-#define UNCORE_GENMASK_CURRENT_RATIO GENMASK_ULL(6, 0)
+/* Bit definitions for STATUS register */
+#define UNCORE_CURRENT_RATIO_MASK GENMASK_ULL(6, 0)
+
+/* Bit definitions for CONTROL register */
+#define UNCORE_MAX_RATIO_MASK GENMASK_ULL(14, 8)
+#define UNCORE_MIN_RATIO_MASK GENMASK_ULL(21, 15)
/* Helper function to read MMIO offset for max/min control frequency */
static void read_control_freq(struct tpmi_uncore_cluster_info *cluster_info,
- unsigned int *min, unsigned int *max)
+ unsigned int *value, enum uncore_index index)
{
u64 control;
control = readq(cluster_info->cluster_base + UNCORE_CONTROL_INDEX);
- *max = FIELD_GET(UNCORE_GENMASK_MAX_RATIO, control) * UNCORE_FREQ_KHZ_MULTIPLIER;
- *min = FIELD_GET(UNCORE_GENMASK_MIN_RATIO, control) * UNCORE_FREQ_KHZ_MULTIPLIER;
+ if (index == UNCORE_INDEX_MAX_FREQ)
+ *value = FIELD_GET(UNCORE_MAX_RATIO_MASK, control) * UNCORE_FREQ_KHZ_MULTIPLIER;
+ else
+ *value = FIELD_GET(UNCORE_MIN_RATIO_MASK, control) * UNCORE_FREQ_KHZ_MULTIPLIER;
}
-#define UNCORE_MAX_RATIO FIELD_MAX(UNCORE_GENMASK_MAX_RATIO)
+#define UNCORE_MAX_RATIO FIELD_MAX(UNCORE_MAX_RATIO_MASK)
-/* Callback for sysfs read for max/min frequencies. Called under mutex locks */
-static int uncore_read_control_freq(struct uncore_data *data, unsigned int *min,
- unsigned int *max)
+/* Helper for sysfs read for max/min frequencies. Called under mutex locks */
+static int uncore_read_control_freq(struct uncore_data *data, unsigned int *value,
+ enum uncore_index index)
{
struct tpmi_uncore_cluster_info *cluster_info;
@@ -96,10 +101,11 @@ static int uncore_read_control_freq(struct uncore_data *data, unsigned int *min,
if (cluster_info->root_domain) {
struct tpmi_uncore_struct *uncore_root = cluster_info->uncore_root;
- int i, _min = 0, _max = 0;
+ unsigned int min, max, v;
+ int i;
- *min = UNCORE_MAX_RATIO * UNCORE_FREQ_KHZ_MULTIPLIER;
- *max = 0;
+ min = UNCORE_MAX_RATIO * UNCORE_FREQ_KHZ_MULTIPLIER;
+ max = 0;
/*
* Get the max/min by looking at each cluster. Get the lowest
@@ -110,35 +116,41 @@ static int uncore_read_control_freq(struct uncore_data *data, unsigned int *min,
for (j = 0; j < uncore_root->pd_info[i].cluster_count; ++j) {
read_control_freq(&uncore_root->pd_info[i].cluster_infos[j],
- &_min, &_max);
- if (*min > _min)
- *min = _min;
- if (*max < _max)
- *max = _max;
+ &v, index);
+ if (v < min)
+ min = v;
+ if (v > max)
+ max = v;
}
}
+
+ if (index == UNCORE_INDEX_MIN_FREQ)
+ *value = min;
+ else
+ *value = max;
+
return 0;
}
- read_control_freq(cluster_info, min, max);
+ read_control_freq(cluster_info, value, index);
return 0;
}
/* Helper function to write MMIO offset for max/min control frequency */
static void write_control_freq(struct tpmi_uncore_cluster_info *cluster_info, unsigned int input,
- unsigned int min_max)
+ unsigned int index)
{
u64 control;
control = readq(cluster_info->cluster_base + UNCORE_CONTROL_INDEX);
- if (min_max) {
- control &= ~UNCORE_GENMASK_MAX_RATIO;
- control |= FIELD_PREP(UNCORE_GENMASK_MAX_RATIO, input);
+ if (index == UNCORE_INDEX_MAX_FREQ) {
+ control &= ~UNCORE_MAX_RATIO_MASK;
+ control |= FIELD_PREP(UNCORE_MAX_RATIO_MASK, input);
} else {
- control &= ~UNCORE_GENMASK_MIN_RATIO;
- control |= FIELD_PREP(UNCORE_GENMASK_MIN_RATIO, input);
+ control &= ~UNCORE_MIN_RATIO_MASK;
+ control |= FIELD_PREP(UNCORE_MIN_RATIO_MASK, input);
}
writeq(control, (cluster_info->cluster_base + UNCORE_CONTROL_INDEX));
@@ -146,7 +158,7 @@ static void write_control_freq(struct tpmi_uncore_cluster_info *cluster_info, un
/* Callback for sysfs write for max/min frequencies. Called under mutex locks */
static int uncore_write_control_freq(struct uncore_data *data, unsigned int input,
- unsigned int min_max)
+ enum uncore_index index)
{
struct tpmi_uncore_cluster_info *cluster_info;
struct tpmi_uncore_struct *uncore_root;
@@ -171,10 +183,10 @@ static int uncore_write_control_freq(struct uncore_data *data, unsigned int inpu
for (j = 0; j < uncore_root->pd_info[i].cluster_count; ++j)
write_control_freq(&uncore_root->pd_info[i].cluster_infos[j],
- input, min_max);
+ input, index);
}
- if (min_max)
+ if (index == UNCORE_INDEX_MAX_FREQ)
uncore_root->max_ratio = input;
else
uncore_root->min_ratio = input;
@@ -182,18 +194,20 @@ static int uncore_write_control_freq(struct uncore_data *data, unsigned int inpu
return 0;
}
- if (min_max && uncore_root->max_ratio && uncore_root->max_ratio < input)
+ if (index == UNCORE_INDEX_MAX_FREQ && uncore_root->max_ratio &&
+ uncore_root->max_ratio < input)
return -EINVAL;
- if (!min_max && uncore_root->min_ratio && uncore_root->min_ratio > input)
+ if (index == UNCORE_INDEX_MIN_FREQ && uncore_root->min_ratio &&
+ uncore_root->min_ratio > input)
return -EINVAL;
- write_control_freq(cluster_info, input, min_max);
+ write_control_freq(cluster_info, input, index);
return 0;
}
-/* Callback for sysfs read for the current uncore frequency. Called under mutex locks */
+/* Helper for sysfs read for the current uncore frequency. Called under mutex locks */
static int uncore_read_freq(struct uncore_data *data, unsigned int *freq)
{
struct tpmi_uncore_cluster_info *cluster_info;
@@ -204,11 +218,29 @@ static int uncore_read_freq(struct uncore_data *data, unsigned int *freq)
return -ENODATA;
status = readq((u8 __iomem *)cluster_info->cluster_base + UNCORE_STATUS_INDEX);
- *freq = FIELD_GET(UNCORE_GENMASK_CURRENT_RATIO, status) * UNCORE_FREQ_KHZ_MULTIPLIER;
+ *freq = FIELD_GET(UNCORE_CURRENT_RATIO_MASK, status) * UNCORE_FREQ_KHZ_MULTIPLIER;
return 0;
}
+/* Callback for sysfs read for TPMI uncore values. Called under mutex locks. */
+static int uncore_read(struct uncore_data *data, unsigned int *value, enum uncore_index index)
+{
+ switch (index) {
+ case UNCORE_INDEX_MIN_FREQ:
+ case UNCORE_INDEX_MAX_FREQ:
+ return uncore_read_control_freq(data, value, index);
+
+ case UNCORE_INDEX_CURRENT_FREQ:
+ return uncore_read_freq(data, value);
+
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
static void remove_cluster_entries(struct tpmi_uncore_struct *tpmi_uncore)
{
int i;
@@ -259,8 +291,7 @@ static int uncore_probe(struct auxiliary_device *auxdev, const struct auxiliary_
return -EINVAL;
/* Register callbacks to uncore core */
- ret = uncore_freq_common_init(uncore_read_control_freq, uncore_write_control_freq,
- uncore_read_freq);
+ ret = uncore_freq_common_init(uncore_read, uncore_write_control_freq);
if (ret)
return ret;
diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c
index b89c0dda9e5d..a450b8a6bcec 100644
--- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c
+++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c
@@ -14,6 +14,7 @@
* Author: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
*/
+#include <linux/bitfield.h>
#include <linux/cpu.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -36,8 +37,13 @@ static enum cpuhp_state uncore_hp_state __read_mostly;
#define MSR_UNCORE_PERF_STATUS 0x621
#define UNCORE_FREQ_KHZ_MULTIPLIER 100000
-static int uncore_read_control_freq(struct uncore_data *data, unsigned int *min,
- unsigned int *max)
+#define UNCORE_MAX_RATIO_MASK GENMASK_ULL(6, 0)
+#define UNCORE_MIN_RATIO_MASK GENMASK_ULL(14, 8)
+
+#define UNCORE_CURRENT_RATIO_MASK GENMASK_ULL(6, 0)
+
+static int uncore_read_control_freq(struct uncore_data *data, unsigned int *value,
+ enum uncore_index index)
{
u64 cap;
int ret;
@@ -49,20 +55,22 @@ static int uncore_read_control_freq(struct uncore_data *data, unsigned int *min,
if (ret)
return ret;
- *max = (cap & 0x7F) * UNCORE_FREQ_KHZ_MULTIPLIER;
- *min = ((cap & GENMASK(14, 8)) >> 8) * UNCORE_FREQ_KHZ_MULTIPLIER;
+ if (index == UNCORE_INDEX_MAX_FREQ)
+ *value = FIELD_GET(UNCORE_MAX_RATIO_MASK, cap) * UNCORE_FREQ_KHZ_MULTIPLIER;
+ else
+ *value = FIELD_GET(UNCORE_MIN_RATIO_MASK, cap) * UNCORE_FREQ_KHZ_MULTIPLIER;
return 0;
}
static int uncore_write_control_freq(struct uncore_data *data, unsigned int input,
- unsigned int min_max)
+ enum uncore_index index)
{
int ret;
u64 cap;
input /= UNCORE_FREQ_KHZ_MULTIPLIER;
- if (!input || input > 0x7F)
+ if (!input || input > FIELD_MAX(UNCORE_MAX_RATIO_MASK))
return -EINVAL;
if (data->control_cpu < 0)
@@ -72,12 +80,12 @@ static int uncore_write_control_freq(struct uncore_data *data, unsigned int inpu
if (ret)
return ret;
- if (min_max) {
- cap &= ~0x7F;
- cap |= input;
+ if (index == UNCORE_INDEX_MAX_FREQ) {
+ cap &= ~UNCORE_MAX_RATIO_MASK;
+ cap |= FIELD_PREP(UNCORE_MAX_RATIO_MASK, input);
} else {
- cap &= ~GENMASK(14, 8);
- cap |= (input << 8);
+ cap &= ~UNCORE_MIN_RATIO_MASK;
+ cap |= FIELD_PREP(UNCORE_MIN_RATIO_MASK, input);
}
ret = wrmsrl_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, cap);
@@ -101,11 +109,28 @@ static int uncore_read_freq(struct uncore_data *data, unsigned int *freq)
if (ret)
return ret;
- *freq = (ratio & 0x7F) * UNCORE_FREQ_KHZ_MULTIPLIER;
+ *freq = FIELD_GET(UNCORE_CURRENT_RATIO_MASK, ratio) * UNCORE_FREQ_KHZ_MULTIPLIER;
return 0;
}
+static int uncore_read(struct uncore_data *data, unsigned int *value, enum uncore_index index)
+{
+ switch (index) {
+ case UNCORE_INDEX_MIN_FREQ:
+ case UNCORE_INDEX_MAX_FREQ:
+ return uncore_read_control_freq(data, value, index);
+
+ case UNCORE_INDEX_CURRENT_FREQ:
+ return uncore_read_freq(data, value);
+
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
/* Caller provides protection */
static struct uncore_data *uncore_get_instance(unsigned int cpu)
{
@@ -197,34 +222,34 @@ static struct notifier_block uncore_pm_nb = {
};
static const struct x86_cpu_id intel_uncore_cpu_ids[] = {
- X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_G, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_D, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE_L, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE_L, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(CANNONLAKE_L, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(ICELAKE, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(ROCKETLAKE, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_S, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(ARROWLAKE, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(ARROWLAKE_H, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(LUNARLAKE_M, NULL),
+ X86_MATCH_VFM(INTEL_BROADWELL_G, NULL),
+ X86_MATCH_VFM(INTEL_BROADWELL_X, NULL),
+ X86_MATCH_VFM(INTEL_BROADWELL_D, NULL),
+ X86_MATCH_VFM(INTEL_SKYLAKE_X, NULL),
+ X86_MATCH_VFM(INTEL_ICELAKE_X, NULL),
+ X86_MATCH_VFM(INTEL_ICELAKE_D, NULL),
+ X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, NULL),
+ X86_MATCH_VFM(INTEL_EMERALDRAPIDS_X, NULL),
+ X86_MATCH_VFM(INTEL_KABYLAKE, NULL),
+ X86_MATCH_VFM(INTEL_KABYLAKE_L, NULL),
+ X86_MATCH_VFM(INTEL_COMETLAKE, NULL),
+ X86_MATCH_VFM(INTEL_COMETLAKE_L, NULL),
+ X86_MATCH_VFM(INTEL_CANNONLAKE_L, NULL),
+ X86_MATCH_VFM(INTEL_ICELAKE, NULL),
+ X86_MATCH_VFM(INTEL_ICELAKE_L, NULL),
+ X86_MATCH_VFM(INTEL_ROCKETLAKE, NULL),
+ X86_MATCH_VFM(INTEL_TIGERLAKE, NULL),
+ X86_MATCH_VFM(INTEL_TIGERLAKE_L, NULL),
+ X86_MATCH_VFM(INTEL_ALDERLAKE, NULL),
+ X86_MATCH_VFM(INTEL_ALDERLAKE_L, NULL),
+ X86_MATCH_VFM(INTEL_RAPTORLAKE, NULL),
+ X86_MATCH_VFM(INTEL_RAPTORLAKE_P, NULL),
+ X86_MATCH_VFM(INTEL_RAPTORLAKE_S, NULL),
+ X86_MATCH_VFM(INTEL_METEORLAKE, NULL),
+ X86_MATCH_VFM(INTEL_METEORLAKE_L, NULL),
+ X86_MATCH_VFM(INTEL_ARROWLAKE, NULL),
+ X86_MATCH_VFM(INTEL_ARROWLAKE_H, NULL),
+ X86_MATCH_VFM(INTEL_LUNARLAKE_M, NULL),
{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_uncore_cpu_ids);
@@ -248,8 +273,7 @@ static int __init intel_uncore_init(void)
if (!uncore_instances)
return -ENOMEM;
- ret = uncore_freq_common_init(uncore_read_control_freq, uncore_write_control_freq,
- uncore_read_freq);
+ ret = uncore_freq_common_init(uncore_read, uncore_write_control_freq);
if (ret)
goto err_free;
diff --git a/drivers/platform/x86/intel/vbtn.c b/drivers/platform/x86/intel/vbtn.c
index 84c1353eb12b..9b7ce03ba085 100644
--- a/drivers/platform/x86/intel/vbtn.c
+++ b/drivers/platform/x86/intel/vbtn.c
@@ -24,6 +24,7 @@
#define VGBS_TABLET_MODE_FLAGS (VGBS_TABLET_MODE_FLAG | VGBS_TABLET_MODE_FLAG_ALT)
+MODULE_DESCRIPTION("Intel Virtual Button driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("AceLan Kao");
diff --git a/drivers/platform/x86/intel_ips.c b/drivers/platform/x86/intel_ips.c
index 73ec4460a151..c62c3c4ec20a 100644
--- a/drivers/platform/x86/intel_ips.c
+++ b/drivers/platform/x86/intel_ips.c
@@ -62,6 +62,7 @@
#include <drm/i915_drm.h>
#include <asm/msr.h>
#include <asm/processor.h>
+#include <asm/cpu_device_id.h>
#include "intel_ips.h"
#include <linux/io-64-nonatomic-lo-hi.h>
@@ -1284,7 +1285,7 @@ static struct ips_mcp_limits *ips_detect_cpu(struct ips_driver *ips)
struct ips_mcp_limits *limits = NULL;
u16 tdp;
- if (!(boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 37)) {
+ if (!(boot_cpu_data.x86_vfm == INTEL_WESTMERE)) {
dev_info(ips->dev, "Non-IPS CPU detected.\n");
return NULL;
}
diff --git a/drivers/platform/x86/intel_scu_wdt.c b/drivers/platform/x86/intel_scu_wdt.c
index a5031a25632e..d0b6637861d3 100644
--- a/drivers/platform/x86/intel_scu_wdt.c
+++ b/drivers/platform/x86/intel_scu_wdt.c
@@ -50,7 +50,7 @@ static struct intel_mid_wdt_pdata tangier_pdata = {
};
static const struct x86_cpu_id intel_mid_cpu_ids[] = {
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT_MID, &tangier_pdata),
+ X86_MATCH_VFM(INTEL_ATOM_SILVERMONT_MID, &tangier_pdata),
{}
};
diff --git a/drivers/platform/x86/lg-laptop.c b/drivers/platform/x86/lg-laptop.c
index d0fee5d375d7..9c7857842caf 100644
--- a/drivers/platform/x86/lg-laptop.c
+++ b/drivers/platform/x86/lg-laptop.c
@@ -39,8 +39,6 @@ MODULE_LICENSE("GPL");
#define WMI_METHOD_WMBB "2B4F501A-BD3C-4394-8DCF-00A7D2BC8210"
#define WMI_EVENT_GUID WMI_EVENT_GUID0
-#define WMAB_METHOD "\\XINI.WMAB"
-#define WMBB_METHOD "\\XINI.WMBB"
#define SB_GGOV_METHOD "\\_SB.GGOV"
#define GOV_TLED 0x2020008
#define WM_GET 1
@@ -74,7 +72,7 @@ static u32 inited;
static int battery_limit_use_wmbb;
static struct led_classdev kbd_backlight;
-static enum led_brightness get_kbd_backlight_level(void);
+static enum led_brightness get_kbd_backlight_level(struct device *dev);
static const struct key_entry wmi_keymap[] = {
{KE_KEY, 0x70, {KEY_F15} }, /* LG control panel (F1) */
@@ -84,7 +82,6 @@ static const struct key_entry wmi_keymap[] = {
* this key both sends an event and
* changes backlight level.
*/
- {KE_KEY, 0x80, {KEY_RFKILL} },
{KE_END, 0}
};
@@ -128,11 +125,10 @@ static int ggov(u32 arg0)
return res;
}
-static union acpi_object *lg_wmab(u32 method, u32 arg1, u32 arg2)
+static union acpi_object *lg_wmab(struct device *dev, u32 method, u32 arg1, u32 arg2)
{
union acpi_object args[3];
acpi_status status;
- acpi_handle handle;
struct acpi_object_list arg;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
@@ -143,29 +139,22 @@ static union acpi_object *lg_wmab(u32 method, u32 arg1, u32 arg2)
args[2].type = ACPI_TYPE_INTEGER;
args[2].integer.value = arg2;
- status = acpi_get_handle(NULL, (acpi_string) WMAB_METHOD, &handle);
- if (ACPI_FAILURE(status)) {
- pr_err("Cannot get handle");
- return NULL;
- }
-
arg.count = 3;
arg.pointer = args;
- status = acpi_evaluate_object(handle, NULL, &arg, &buffer);
+ status = acpi_evaluate_object(ACPI_HANDLE(dev), "WMAB", &arg, &buffer);
if (ACPI_FAILURE(status)) {
- acpi_handle_err(handle, "WMAB: call failed.\n");
+ dev_err(dev, "WMAB: call failed.\n");
return NULL;
}
return buffer.pointer;
}
-static union acpi_object *lg_wmbb(u32 method_id, u32 arg1, u32 arg2)
+static union acpi_object *lg_wmbb(struct device *dev, u32 method_id, u32 arg1, u32 arg2)
{
union acpi_object args[3];
acpi_status status;
- acpi_handle handle;
struct acpi_object_list arg;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
u8 buf[32];
@@ -181,18 +170,12 @@ static union acpi_object *lg_wmbb(u32 method_id, u32 arg1, u32 arg2)
args[2].buffer.length = 32;
args[2].buffer.pointer = buf;
- status = acpi_get_handle(NULL, (acpi_string)WMBB_METHOD, &handle);
- if (ACPI_FAILURE(status)) {
- pr_err("Cannot get handle");
- return NULL;
- }
-
arg.count = 3;
arg.pointer = args;
- status = acpi_evaluate_object(handle, NULL, &arg, &buffer);
+ status = acpi_evaluate_object(ACPI_HANDLE(dev), "WMBB", &arg, &buffer);
if (ACPI_FAILURE(status)) {
- acpi_handle_err(handle, "WMAB: call failed.\n");
+ dev_err(dev, "WMBB: call failed.\n");
return NULL;
}
@@ -223,7 +206,7 @@ static void wmi_notify(u32 value, void *context)
if (eventcode == 0x10000000) {
led_classdev_notify_brightness_hw_changed(
- &kbd_backlight, get_kbd_backlight_level());
+ &kbd_backlight, get_kbd_backlight_level(kbd_backlight.dev->parent));
} else {
key = sparse_keymap_entry_from_scancode(
wmi_input_dev, eventcode);
@@ -272,14 +255,7 @@ static void wmi_input_setup(void)
static void acpi_notify(struct acpi_device *device, u32 event)
{
- struct key_entry *key;
-
acpi_handle_debug(device->handle, "notify: %d\n", event);
- if (inited & INIT_SPARSE_KEYMAP) {
- key = sparse_keymap_entry_from_scancode(wmi_input_dev, 0x80);
- if (key && key->type == KE_KEY)
- sparse_keymap_report_entry(wmi_input_dev, key, 1, true);
- }
}
static ssize_t fan_mode_store(struct device *dev,
@@ -295,7 +271,7 @@ static ssize_t fan_mode_store(struct device *dev,
if (ret)
return ret;
- r = lg_wmab(WM_FAN_MODE, WM_GET, 0);
+ r = lg_wmab(dev, WM_FAN_MODE, WM_GET, 0);
if (!r)
return -EIO;
@@ -306,9 +282,9 @@ static ssize_t fan_mode_store(struct device *dev,
m = r->integer.value;
kfree(r);
- r = lg_wmab(WM_FAN_MODE, WM_SET, (m & 0xffffff0f) | (value << 4));
+ r = lg_wmab(dev, WM_FAN_MODE, WM_SET, (m & 0xffffff0f) | (value << 4));
kfree(r);
- r = lg_wmab(WM_FAN_MODE, WM_SET, (m & 0xfffffff0) | value);
+ r = lg_wmab(dev, WM_FAN_MODE, WM_SET, (m & 0xfffffff0) | value);
kfree(r);
return count;
@@ -320,7 +296,7 @@ static ssize_t fan_mode_show(struct device *dev,
unsigned int status;
union acpi_object *r;
- r = lg_wmab(WM_FAN_MODE, WM_GET, 0);
+ r = lg_wmab(dev, WM_FAN_MODE, WM_GET, 0);
if (!r)
return -EIO;
@@ -347,7 +323,7 @@ static ssize_t usb_charge_store(struct device *dev,
if (ret)
return ret;
- r = lg_wmbb(WMBB_USB_CHARGE, WM_SET, value);
+ r = lg_wmbb(dev, WMBB_USB_CHARGE, WM_SET, value);
if (!r)
return -EIO;
@@ -361,7 +337,7 @@ static ssize_t usb_charge_show(struct device *dev,
unsigned int status;
union acpi_object *r;
- r = lg_wmbb(WMBB_USB_CHARGE, WM_GET, 0);
+ r = lg_wmbb(dev, WMBB_USB_CHARGE, WM_GET, 0);
if (!r)
return -EIO;
@@ -389,7 +365,7 @@ static ssize_t reader_mode_store(struct device *dev,
if (ret)
return ret;
- r = lg_wmab(WM_READER_MODE, WM_SET, value);
+ r = lg_wmab(dev, WM_READER_MODE, WM_SET, value);
if (!r)
return -EIO;
@@ -403,7 +379,7 @@ static ssize_t reader_mode_show(struct device *dev,
unsigned int status;
union acpi_object *r;
- r = lg_wmab(WM_READER_MODE, WM_GET, 0);
+ r = lg_wmab(dev, WM_READER_MODE, WM_GET, 0);
if (!r)
return -EIO;
@@ -431,7 +407,7 @@ static ssize_t fn_lock_store(struct device *dev,
if (ret)
return ret;
- r = lg_wmab(WM_FN_LOCK, WM_SET, value);
+ r = lg_wmab(dev, WM_FN_LOCK, WM_SET, value);
if (!r)
return -EIO;
@@ -445,7 +421,7 @@ static ssize_t fn_lock_show(struct device *dev,
unsigned int status;
union acpi_object *r;
- r = lg_wmab(WM_FN_LOCK, WM_GET, 0);
+ r = lg_wmab(dev, WM_FN_LOCK, WM_GET, 0);
if (!r)
return -EIO;
@@ -475,9 +451,9 @@ static ssize_t charge_control_end_threshold_store(struct device *dev,
union acpi_object *r;
if (battery_limit_use_wmbb)
- r = lg_wmbb(WMBB_BATT_LIMIT, WM_SET, value);
+ r = lg_wmbb(&pf_device->dev, WMBB_BATT_LIMIT, WM_SET, value);
else
- r = lg_wmab(WM_BATT_LIMIT, WM_SET, value);
+ r = lg_wmab(&pf_device->dev, WM_BATT_LIMIT, WM_SET, value);
if (!r)
return -EIO;
@@ -496,7 +472,7 @@ static ssize_t charge_control_end_threshold_show(struct device *device,
union acpi_object *r;
if (battery_limit_use_wmbb) {
- r = lg_wmbb(WMBB_BATT_LIMIT, WM_GET, 0);
+ r = lg_wmbb(&pf_device->dev, WMBB_BATT_LIMIT, WM_GET, 0);
if (!r)
return -EIO;
@@ -507,7 +483,7 @@ static ssize_t charge_control_end_threshold_show(struct device *device,
status = r->buffer.pointer[0x10];
} else {
- r = lg_wmab(WM_BATT_LIMIT, WM_GET, 0);
+ r = lg_wmab(&pf_device->dev, WM_BATT_LIMIT, WM_GET, 0);
if (!r)
return -EIO;
@@ -586,7 +562,7 @@ static void tpad_led_set(struct led_classdev *cdev,
{
union acpi_object *r;
- r = lg_wmab(WM_TLED, WM_SET, brightness > LED_OFF);
+ r = lg_wmab(cdev->dev->parent, WM_TLED, WM_SET, brightness > LED_OFF);
kfree(r);
}
@@ -608,16 +584,16 @@ static void kbd_backlight_set(struct led_classdev *cdev,
val = 0;
if (brightness >= LED_FULL)
val = 0x24;
- r = lg_wmab(WM_KEY_LIGHT, WM_SET, val);
+ r = lg_wmab(cdev->dev->parent, WM_KEY_LIGHT, WM_SET, val);
kfree(r);
}
-static enum led_brightness get_kbd_backlight_level(void)
+static enum led_brightness get_kbd_backlight_level(struct device *dev)
{
union acpi_object *r;
int val;
- r = lg_wmab(WM_KEY_LIGHT, WM_GET, 0);
+ r = lg_wmab(dev, WM_KEY_LIGHT, WM_GET, 0);
if (!r)
return LED_OFF;
@@ -645,7 +621,7 @@ static enum led_brightness get_kbd_backlight_level(void)
static enum led_brightness kbd_backlight_get(struct led_classdev *cdev)
{
- return get_kbd_backlight_level();
+ return get_kbd_backlight_level(cdev->dev->parent);
}
static LED_DEVICE(kbd_backlight, 255, LED_BRIGHT_HW_CHANGED);
@@ -672,6 +648,11 @@ static struct platform_driver pf_driver = {
static int acpi_add(struct acpi_device *device)
{
+ struct platform_device_info pdev_info = {
+ .fwnode = acpi_fwnode_handle(device),
+ .name = PLATFORM_NAME,
+ .id = PLATFORM_DEVID_NONE,
+ };
int ret;
const char *product;
int year = 2017;
@@ -683,9 +664,7 @@ static int acpi_add(struct acpi_device *device)
if (ret)
return ret;
- pf_device = platform_device_register_simple(PLATFORM_NAME,
- PLATFORM_DEVID_NONE,
- NULL, 0);
+ pf_device = platform_device_register_full(&pdev_info);
if (IS_ERR(pf_device)) {
ret = PTR_ERR(pf_device);
pf_device = NULL;
@@ -776,7 +755,7 @@ static void acpi_remove(struct acpi_device *device)
}
static const struct acpi_device_id device_ids[] = {
- {"LGEX0815", 0},
+ {"LGEX0820", 0},
{"", 0}
};
MODULE_DEVICE_TABLE(acpi, device_ids);
diff --git a/drivers/platform/x86/p2sb.c b/drivers/platform/x86/p2sb.c
index 3bf5d2243491..31f38309b389 100644
--- a/drivers/platform/x86/p2sb.c
+++ b/drivers/platform/x86/p2sb.c
@@ -24,7 +24,7 @@
#define SPI_DEVFN_GOLDMONT PCI_DEVFN(13, 2)
static const struct x86_cpu_id p2sb_cpu_ids[] = {
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, P2SB_DEVFN_GOLDMONT),
+ X86_MATCH_VFM(INTEL_ATOM_GOLDMONT, P2SB_DEVFN_GOLDMONT),
{}
};
diff --git a/drivers/platform/x86/serial-multi-instantiate.c b/drivers/platform/x86/serial-multi-instantiate.c
index 97b9c6392230..3be016cfe601 100644
--- a/drivers/platform/x86/serial-multi-instantiate.c
+++ b/drivers/platform/x86/serial-multi-instantiate.c
@@ -131,7 +131,7 @@ static int smi_spi_probe(struct platform_device *pdev, struct smi *smi,
ctlr = spi_dev->controller;
- strscpy(spi_dev->modalias, inst_array[i].type, sizeof(spi_dev->modalias));
+ strscpy(spi_dev->modalias, inst_array[i].type);
ret = smi_get_irq(pdev, adev, &inst_array[i]);
if (ret < 0) {
@@ -205,7 +205,7 @@ static int smi_i2c_probe(struct platform_device *pdev, struct smi *smi,
for (i = 0; i < count && inst_array[i].type; i++) {
memset(&board_info, 0, sizeof(board_info));
- strscpy(board_info.type, inst_array[i].type, I2C_NAME_SIZE);
+ strscpy(board_info.type, inst_array[i].type);
snprintf(name, sizeof(name), "%s-%s.%d", dev_name(dev), inst_array[i].type, i);
board_info.dev_name = name;
diff --git a/drivers/platform/x86/siemens/simatic-ipc-batt-apollolake.c b/drivers/platform/x86/siemens/simatic-ipc-batt-apollolake.c
index 31a139d87d9a..5edc294de6e4 100644
--- a/drivers/platform/x86/siemens/simatic-ipc-batt-apollolake.c
+++ b/drivers/platform/x86/siemens/simatic-ipc-batt-apollolake.c
@@ -45,6 +45,7 @@ static struct platform_driver simatic_ipc_batt_driver = {
module_platform_driver(simatic_ipc_batt_driver);
+MODULE_DESCRIPTION("CMOS Battery monitoring for Simatic IPCs based on Apollo Lake GPIO");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" KBUILD_MODNAME);
MODULE_SOFTDEP("pre: simatic-ipc-batt platform:apollolake-pinctrl");
diff --git a/drivers/platform/x86/siemens/simatic-ipc-batt-elkhartlake.c b/drivers/platform/x86/siemens/simatic-ipc-batt-elkhartlake.c
index a7676f224075..e6a56d14b505 100644
--- a/drivers/platform/x86/siemens/simatic-ipc-batt-elkhartlake.c
+++ b/drivers/platform/x86/siemens/simatic-ipc-batt-elkhartlake.c
@@ -45,6 +45,7 @@ static struct platform_driver simatic_ipc_batt_driver = {
module_platform_driver(simatic_ipc_batt_driver);
+MODULE_DESCRIPTION("CMOS Battery monitoring for Simatic IPCs based on Elkhart Lake GPIO");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" KBUILD_MODNAME);
MODULE_SOFTDEP("pre: simatic-ipc-batt platform:elkhartlake-pinctrl");
diff --git a/drivers/platform/x86/siemens/simatic-ipc-batt-f7188x.c b/drivers/platform/x86/siemens/simatic-ipc-batt-f7188x.c
index 5e77e05fdb5d..f8849d0e48a8 100644
--- a/drivers/platform/x86/siemens/simatic-ipc-batt-f7188x.c
+++ b/drivers/platform/x86/siemens/simatic-ipc-batt-f7188x.c
@@ -81,6 +81,7 @@ static struct platform_driver simatic_ipc_batt_driver = {
module_platform_driver(simatic_ipc_batt_driver);
+MODULE_DESCRIPTION("CMOS Battery monitoring for Simatic IPCs based on Nuvoton GPIO");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" KBUILD_MODNAME);
MODULE_SOFTDEP("pre: simatic-ipc-batt gpio_f7188x platform:elkhartlake-pinctrl platform:alderlake-pinctrl");
diff --git a/drivers/platform/x86/siemens/simatic-ipc-batt.c b/drivers/platform/x86/siemens/simatic-ipc-batt.c
index c6dd263b4ee3..d9aff10608cf 100644
--- a/drivers/platform/x86/siemens/simatic-ipc-batt.c
+++ b/drivers/platform/x86/siemens/simatic-ipc-batt.c
@@ -247,6 +247,7 @@ static struct platform_driver simatic_ipc_batt_driver = {
module_platform_driver(simatic_ipc_batt_driver);
+MODULE_DESCRIPTION("CMOS core battery driver for Siemens Simatic IPCs");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" KBUILD_MODNAME);
MODULE_AUTHOR("Henning Schild <henning.schild@siemens.com>");
diff --git a/drivers/platform/x86/siemens/simatic-ipc.c b/drivers/platform/x86/siemens/simatic-ipc.c
index 8ca6e277fa03..7039874d8f11 100644
--- a/drivers/platform/x86/siemens/simatic-ipc.c
+++ b/drivers/platform/x86/siemens/simatic-ipc.c
@@ -231,6 +231,7 @@ static void __exit simatic_ipc_exit_module(void)
module_init(simatic_ipc_init_module);
module_exit(simatic_ipc_exit_module);
+MODULE_DESCRIPTION("Siemens SIMATIC IPC platform driver");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Gerd Haeussler <gerd.haeussler.ext@siemens.com>");
MODULE_ALIAS("dmi:*:svnSIEMENSAG:*");
diff --git a/drivers/platform/x86/think-lmi.c b/drivers/platform/x86/think-lmi.c
index 0f2264bb7577..4cfb53206cb8 100644
--- a/drivers/platform/x86/think-lmi.c
+++ b/drivers/platform/x86/think-lmi.c
@@ -1508,7 +1508,7 @@ static struct tlmi_pwd_setting *tlmi_create_auth(const char *pwd_type,
if (!new_pwd)
return NULL;
- strscpy(new_pwd->kbdlang, "us", TLMI_LANG_MAXLEN);
+ strscpy(new_pwd->kbdlang, "us");
new_pwd->encoding = TLMI_ENCODING_ASCII;
new_pwd->pwd_type = pwd_type;
new_pwd->role = pwd_role;
@@ -1582,7 +1582,7 @@ static int tlmi_analyze(void)
goto fail_clear_attr;
}
setting->index = i;
- strscpy(setting->display_name, item, TLMI_SETTINGS_MAXLEN);
+ strscpy(setting->display_name, item);
/* If BIOS selections supported, load those */
if (tlmi_priv.can_get_bios_selections) {
ret = tlmi_get_bios_selections(setting->display_name,
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 397b409064c9..f269ca1ff771 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -7416,10 +7416,8 @@ static int __init volume_create_alsa_mixer(void)
data = card->private_data;
data->card = card;
- strscpy(card->driver, TPACPI_ALSA_DRVNAME,
- sizeof(card->driver));
- strscpy(card->shortname, TPACPI_ALSA_SHRTNAME,
- sizeof(card->shortname));
+ strscpy(card->driver, TPACPI_ALSA_DRVNAME);
+ strscpy(card->shortname, TPACPI_ALSA_SHRTNAME);
snprintf(card->mixername, sizeof(card->mixername), "ThinkPad EC %s",
(thinkpad_id.ec_version_str) ?
thinkpad_id.ec_version_str : "(unknown)");
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index 3a8d8df89186..78a5aac2dcfd 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -3271,7 +3271,7 @@ static const char *find_hci_method(acpi_handle handle)
*/
#define QUIRK_HCI_HOTKEY_QUICKSTART BIT(1)
-static const struct dmi_system_id toshiba_dmi_quirks[] = {
+static const struct dmi_system_id toshiba_dmi_quirks[] __initconst = {
{
/* Toshiba Portégé R700 */
/* https://bugzilla.kernel.org/show_bug.cgi?id=21012 */
@@ -3299,6 +3299,7 @@ static const struct dmi_system_id toshiba_dmi_quirks[] = {
},
.driver_data = (void *)(QUIRK_TURN_ON_PANEL_ON_RESUME | QUIRK_HCI_HOTKEY_QUICKSTART),
},
+ { }
};
static int toshiba_acpi_add(struct acpi_device *acpi_dev)
@@ -3306,8 +3307,6 @@ static int toshiba_acpi_add(struct acpi_device *acpi_dev)
struct toshiba_acpi_dev *dev;
const char *hci_method;
u32 dummy;
- const struct dmi_system_id *dmi_id;
- long quirks = 0;
int ret = 0;
if (toshiba_acpi)
@@ -3460,16 +3459,6 @@ iio_error:
}
#endif
- dmi_id = dmi_first_match(toshiba_dmi_quirks);
- if (dmi_id)
- quirks = (long)dmi_id->driver_data;
-
- if (turn_on_panel_on_resume == -1)
- turn_on_panel_on_resume = !!(quirks & QUIRK_TURN_ON_PANEL_ON_RESUME);
-
- if (hci_hotkey_quickstart == -1)
- hci_hotkey_quickstart = !!(quirks & QUIRK_HCI_HOTKEY_QUICKSTART);
-
toshiba_wwan_available(dev);
if (dev->wwan_supported)
toshiba_acpi_setup_wwan_rfkill(dev);
@@ -3618,10 +3607,27 @@ static struct acpi_driver toshiba_acpi_driver = {
.drv.pm = &toshiba_acpi_pm,
};
+static void __init toshiba_dmi_init(void)
+{
+ const struct dmi_system_id *dmi_id;
+ long quirks = 0;
+
+ dmi_id = dmi_first_match(toshiba_dmi_quirks);
+ if (dmi_id)
+ quirks = (long)dmi_id->driver_data;
+
+ if (turn_on_panel_on_resume == -1)
+ turn_on_panel_on_resume = !!(quirks & QUIRK_TURN_ON_PANEL_ON_RESUME);
+
+ if (hci_hotkey_quickstart == -1)
+ hci_hotkey_quickstart = !!(quirks & QUIRK_HCI_HOTKEY_QUICKSTART);
+}
+
static int __init toshiba_acpi_init(void)
{
int ret;
+ toshiba_dmi_init();
toshiba_proc_dir = proc_mkdir(PROC_TOSHIBA, acpi_root_dir);
if (!toshiba_proc_dir) {
pr_err("Unable to create proc dir " PROC_TOSHIBA "\n");
diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c
index c6a10ec2c83f..f74af0a689f2 100644
--- a/drivers/platform/x86/touchscreen_dmi.c
+++ b/drivers/platform/x86/touchscreen_dmi.c
@@ -9,10 +9,13 @@
*/
#include <linux/acpi.h>
+#include <linux/ctype.h>
#include <linux/device.h>
#include <linux/dmi.h>
#include <linux/efi_embedded_fw.h>
#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/kstrtox.h>
#include <linux/notifier.h>
#include <linux/property.h>
#include <linux/string.h>
@@ -31,7 +34,6 @@ static const struct property_entry archos_101_cesium_educ_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-y", 1280),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-archos-101-cesium-educ.fw"),
{ }
@@ -46,7 +48,6 @@ static const struct property_entry bush_bush_windows_tablet_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1850),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1280),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-bush-bush-windows-tablet.fw"),
{ }
@@ -76,7 +77,6 @@ static const struct property_entry chuwi_hi8_air_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-y", 1148),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl3676-chuwi-hi8-air.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
{ }
};
@@ -92,7 +92,6 @@ static const struct property_entry chuwi_hi8_pro_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-y", 1148),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-chuwi-hi8-pro.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
@@ -120,7 +119,6 @@ static const struct property_entry chuwi_hi10_air_props[] = {
PROPERTY_ENTRY_U32("touchscreen-fuzz-x", 5),
PROPERTY_ENTRY_U32("touchscreen-fuzz-y", 4),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-chuwi-hi10-air.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
@@ -136,7 +134,6 @@ static const struct property_entry chuwi_hi10_plus_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1908),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1270),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-chuwi-hi10plus.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
PROPERTY_ENTRY_BOOL("silead,pen-supported"),
PROPERTY_ENTRY_U32("silead,pen-resolution-x", 8),
@@ -168,7 +165,6 @@ static const struct property_entry chuwi_hi10_pro_props[] = {
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-chuwi-hi10-pro.fw"),
PROPERTY_ENTRY_U32_ARRAY("silead,efi-fw-min-max", chuwi_hi10_pro_efi_min_max),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
PROPERTY_ENTRY_BOOL("silead,pen-supported"),
PROPERTY_ENTRY_U32("silead,pen-resolution-x", 8),
@@ -198,7 +194,6 @@ static const struct property_entry chuwi_hibook_props[] = {
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-chuwi-hibook.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
@@ -224,7 +219,6 @@ static const struct property_entry chuwi_vi8_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-y", 1140),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl3676-chuwi-vi8.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
@@ -252,7 +246,6 @@ static const struct property_entry chuwi_vi10_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1858),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1280),
PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-chuwi-vi10.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
@@ -268,7 +261,6 @@ static const struct property_entry chuwi_surbook_mini_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 2040),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1524),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-chuwi-surbook-mini.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
{ }
};
@@ -286,7 +278,6 @@ static const struct property_entry connect_tablet9_props[] = {
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-connect-tablet9.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
{ }
};
@@ -303,7 +294,6 @@ static const struct property_entry csl_panther_tab_hd_props[] = {
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-csl-panther-tab-hd.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
{ }
};
@@ -319,7 +309,6 @@ static const struct property_entry cube_iwork8_air_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-y", 896),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl3670-cube-iwork8-air.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
{ }
};
@@ -343,7 +332,6 @@ static const struct property_entry cube_knote_i1101_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1961),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1513),
PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-cube-knote-i1101.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
@@ -357,7 +345,6 @@ static const struct property_entry dexp_ursus_7w_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 890),
PROPERTY_ENTRY_U32("touchscreen-size-y", 630),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1686-dexp-ursus-7w.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
@@ -373,7 +360,6 @@ static const struct property_entry dexp_ursus_kx210i_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1720),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1137),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-dexp-ursus-kx210i.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
@@ -388,7 +374,6 @@ static const struct property_entry digma_citi_e200_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-y", 1500),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1686-digma_citi_e200.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
@@ -447,7 +432,6 @@ static const struct property_entry irbis_tw90_props[] = {
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-irbis_tw90.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
@@ -463,7 +447,6 @@ static const struct property_entry irbis_tw118_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1960),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1510),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-irbis-tw118.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
{ }
};
@@ -480,7 +463,6 @@ static const struct property_entry itworks_tw891_props[] = {
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl3670-itworks-tw891.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
{ }
};
@@ -493,7 +475,6 @@ static const struct property_entry jumper_ezpad_6_pro_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1980),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1500),
PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-jumper-ezpad-6-pro.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
@@ -508,7 +489,6 @@ static const struct property_entry jumper_ezpad_6_pro_b_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-y", 1500),
PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-jumper-ezpad-6-pro-b.fw"),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
@@ -524,7 +504,6 @@ static const struct property_entry jumper_ezpad_6_m4_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1950),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1525),
PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-jumper-ezpad-6-m4.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
@@ -541,7 +520,6 @@ static const struct property_entry jumper_ezpad_7_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-y", 1526),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-jumper-ezpad-7.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,stuck-controller-bug"),
{ }
};
@@ -558,7 +536,6 @@ static const struct property_entry jumper_ezpad_mini3_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-y", 1138),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl3676-jumper-ezpad-mini3.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
{ }
};
@@ -575,7 +552,6 @@ static const struct property_entry mpman_converter9_props[] = {
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-mpman-converter9.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
{ }
};
@@ -591,7 +567,6 @@ static const struct property_entry mpman_mpwin895cl_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-y", 1150),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-mpman-mpwin895cl.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
@@ -608,7 +583,6 @@ static const struct property_entry myria_my8307_props[] = {
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-myria-my8307.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
@@ -625,7 +599,6 @@ static const struct property_entry onda_obook_20_plus_props[] = {
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl3676-onda-obook-20-plus.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
@@ -642,7 +615,6 @@ static const struct property_entry onda_v80_plus_v3_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-y", 1140),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl3676-onda-v80-plus-v3.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
@@ -666,7 +638,6 @@ static const struct property_entry onda_v820w_32g_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-y", 1140),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-onda-v820w-32g.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
@@ -684,7 +655,6 @@ static const struct property_entry onda_v891_v5_props[] = {
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
PROPERTY_ENTRY_STRING("firmware-name",
"gsl3676-onda-v891-v5.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
@@ -700,7 +670,6 @@ static const struct property_entry onda_v891w_v1_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1676),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1130),
PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-onda-v891w-v1.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
@@ -717,7 +686,6 @@ static const struct property_entry onda_v891w_v3_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-y", 1135),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl3676-onda-v891w-v3.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
@@ -756,7 +724,6 @@ static const struct property_entry pipo_w11_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1984),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1532),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-pipo-w11.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
@@ -772,7 +739,6 @@ static const struct property_entry positivo_c4128b_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1915),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1269),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-positivo-c4128b.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
{ }
};
@@ -788,7 +754,6 @@ static const struct property_entry pov_mobii_wintab_p800w_v20_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-y", 1146),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-pov-mobii-wintab-p800w-v20.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
@@ -805,7 +770,6 @@ static const struct property_entry pov_mobii_wintab_p800w_v21_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-y", 1148),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-pov-mobii-wintab-p800w.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
@@ -822,7 +786,6 @@ static const struct property_entry pov_mobii_wintab_p1006w_v10_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-y", 1520),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-pov-mobii-wintab-p1006w-v10.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
@@ -839,7 +802,6 @@ static const struct property_entry predia_basic_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-y", 1144),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-predia-basic.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
@@ -856,7 +818,6 @@ static const struct property_entry rca_cambio_w101_v2_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-y", 874),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-rca-cambio-w101-v2.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
{ }
};
@@ -871,7 +832,6 @@ static const struct property_entry rwc_nanote_p8_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-y", 1140),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-rwc-nanote-p8.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
{ }
};
@@ -887,7 +847,6 @@ static const struct property_entry schneider_sct101ctm_props[] = {
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-schneider-sct101ctm.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
@@ -897,6 +856,21 @@ static const struct ts_dmi_data schneider_sct101ctm_data = {
.properties = schneider_sct101ctm_props,
};
+static const struct property_entry globalspace_solt_ivw116_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 7),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 22),
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1723),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1077),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-globalspace-solt-ivw116.fw"),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
+ { }
+};
+
+static const struct ts_dmi_data globalspace_solt_ivw116_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = globalspace_solt_ivw116_props,
+};
+
static const struct property_entry techbite_arc_11_6_props[] = {
PROPERTY_ENTRY_U32("touchscreen-min-x", 5),
PROPERTY_ENTRY_U32("touchscreen-min-y", 7),
@@ -904,7 +878,6 @@ static const struct property_entry techbite_arc_11_6_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-y", 1270),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-techbite-arc-11-6.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
{ }
};
@@ -920,7 +893,6 @@ static const struct property_entry teclast_tbook11_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-y", 1264),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-teclast-tbook11.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
@@ -946,7 +918,6 @@ static const struct property_entry teclast_x16_plus_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-y", 1264),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-teclast-x16-plus.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
@@ -969,7 +940,6 @@ static const struct property_entry teclast_x3_plus_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1980),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1500),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-teclast-x3-plus.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
@@ -985,7 +955,6 @@ static const struct property_entry teclast_x98plus2_props[] = {
PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1686-teclast_x98plus2.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
{ }
};
@@ -999,7 +968,6 @@ static const struct property_entry trekstor_primebook_c11_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-y", 1530),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-trekstor-primebook-c11.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
@@ -1013,7 +981,6 @@ static const struct property_entry trekstor_primebook_c13_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 2624),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1920),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-trekstor-primebook-c13.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
@@ -1027,7 +994,6 @@ static const struct property_entry trekstor_primetab_t13b_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 2500),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1900),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-trekstor-primetab-t13b.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
{ }
@@ -1055,7 +1021,6 @@ static const struct property_entry trekstor_surftab_twin_10_1_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-y", 1280),
PROPERTY_ENTRY_U32("touchscreen-inverted-y", 1),
PROPERTY_ENTRY_STRING("firmware-name", "gsl3670-surftab-twin-10-1-st10432-8.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
@@ -1071,7 +1036,6 @@ static const struct property_entry trekstor_surftab_wintron70_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 884),
PROPERTY_ENTRY_U32("touchscreen-size-y", 632),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1686-surftab-wintron70-st70416-6.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
@@ -1088,7 +1052,6 @@ static const struct property_entry viglen_connect_10_props[] = {
PROPERTY_ENTRY_U32("touchscreen-fuzz-y", 6),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-viglen-connect-10.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
@@ -1102,7 +1065,6 @@ static const struct property_entry vinga_twizzle_j116_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1920),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1280),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-vinga-twizzle_j116.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
@@ -1386,6 +1348,17 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
},
},
{
+ /* Jumper EZpad 6s Pro */
+ .driver_data = (void *)&jumper_ezpad_6_pro_b_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Jumper"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Ezpad"),
+ /* Above matches are too generic, add bios match */
+ DMI_MATCH(DMI_BIOS_VERSION, "E.WSA116_8.E1.042.bin"),
+ DMI_MATCH(DMI_BIOS_DATE, "01/08/2020"),
+ },
+ },
+ {
/* Jumper EZpad 6 m4 */
.driver_data = (void *)&jumper_ezpad_6_m4_data,
.matches = {
@@ -1625,6 +1598,15 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
},
},
{
+ /* GlobalSpace SoLT IVW 11.6" */
+ .driver_data = (void *)&globalspace_solt_ivw116_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Globalspace Tech Pvt Ltd"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "SolTIVW"),
+ DMI_MATCH(DMI_PRODUCT_SKU, "PN20170413488"),
+ },
+ },
+ {
/* Techbite Arc 11.6 */
.driver_data = (void *)&techbite_arc_11_6_data,
.matches = {
@@ -1817,7 +1799,7 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
{ }
};
-static const struct ts_dmi_data *ts_data;
+static struct ts_dmi_data *ts_data;
static void ts_dmi_add_props(struct i2c_client *client)
{
@@ -1852,6 +1834,64 @@ static int ts_dmi_notifier_call(struct notifier_block *nb,
return 0;
}
+#define MAX_CMDLINE_PROPS 16
+
+static struct property_entry ts_cmdline_props[MAX_CMDLINE_PROPS + 1];
+
+static struct ts_dmi_data ts_cmdline_data = {
+ .properties = ts_cmdline_props,
+};
+
+static int __init ts_parse_props(char *str)
+{
+ /* Save the original str to show it on syntax errors */
+ char orig_str[256];
+ char *name, *value;
+ u32 u32val;
+ int i, ret;
+
+ strscpy(orig_str, str);
+
+ /*
+ * str is part of the static_command_line from init/main.c and poking
+ * holes in that by writing 0 to it is allowed, as is taking long
+ * lasting references to it.
+ */
+ ts_cmdline_data.acpi_name = strsep(&str, ":");
+
+ for (i = 0; i < MAX_CMDLINE_PROPS; i++) {
+ name = strsep(&str, ":");
+ if (!name || !name[0])
+ break;
+
+ /* Replace '=' with 0 and make value point past '=' or NULL */
+ value = name;
+ strsep(&value, "=");
+ if (!value) {
+ ts_cmdline_props[i] = PROPERTY_ENTRY_BOOL(name);
+ } else if (isdigit(value[0])) {
+ ret = kstrtou32(value, 0, &u32val);
+ if (ret)
+ goto syntax_error;
+
+ ts_cmdline_props[i] = PROPERTY_ENTRY_U32(name, u32val);
+ } else {
+ ts_cmdline_props[i] = PROPERTY_ENTRY_STRING(name, value);
+ }
+ }
+
+ if (!i || str)
+ goto syntax_error;
+
+ ts_data = &ts_cmdline_data;
+ return 1;
+
+syntax_error:
+ pr_err("Invalid '%s' value for 'i2c_touchscreen_props='\n", orig_str);
+ return 1; /* "i2c_touchscreen_props=" is still a known parameter */
+}
+__setup("i2c_touchscreen_props=", ts_parse_props);
+
static struct notifier_block ts_dmi_notifier = {
.notifier_call = ts_dmi_notifier_call,
};
@@ -1859,13 +1899,25 @@ static struct notifier_block ts_dmi_notifier = {
static int __init ts_dmi_init(void)
{
const struct dmi_system_id *dmi_id;
+ struct ts_dmi_data *ts_data_dmi;
int error;
dmi_id = dmi_first_match(touchscreen_dmi_table);
- if (!dmi_id)
+ ts_data_dmi = dmi_id ? dmi_id->driver_data : NULL;
+
+ if (ts_data) {
+ /*
+ * Kernel cmdline provided data takes precedence, copy over
+ * DMI efi_embedded_fw info if available.
+ */
+ if (ts_data_dmi)
+ ts_data->embedded_fw = ts_data_dmi->embedded_fw;
+ } else if (ts_data_dmi) {
+ ts_data = ts_data_dmi;
+ } else {
return 0; /* Not an error */
+ }
- ts_data = dmi_id->driver_data;
/* Some dmi table entries only provide an efi_embedded_fw_desc */
if (!ts_data->properties)
return 0;
diff --git a/drivers/platform/x86/uv_sysfs.c b/drivers/platform/x86/uv_sysfs.c
index 37372d7cc54a..f6a0627f36db 100644
--- a/drivers/platform/x86/uv_sysfs.c
+++ b/drivers/platform/x86/uv_sysfs.c
@@ -929,4 +929,5 @@ module_init(uv_sysfs_init);
module_exit(uv_sysfs_exit);
MODULE_AUTHOR("Hewlett Packard Enterprise");
+MODULE_DESCRIPTION("Sysfs structure for HPE UV systems");
MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/wireless-hotkey.c b/drivers/platform/x86/wireless-hotkey.c
index e95cdbbfb708..a220fe4f9ef8 100644
--- a/drivers/platform/x86/wireless-hotkey.c
+++ b/drivers/platform/x86/wireless-hotkey.c
@@ -14,11 +14,13 @@
#include <linux/acpi.h>
#include <acpi/acpi_bus.h>
+MODULE_DESCRIPTION("Airplane mode button for AMD, HP & Xiaomi laptops");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Alex Hung");
MODULE_ALIAS("acpi*:HPQ6001:*");
MODULE_ALIAS("acpi*:WSTADEF:*");
MODULE_ALIAS("acpi*:AMDI0051:*");
+MODULE_ALIAS("acpi*:LGEX0815:*");
struct wl_button {
struct input_dev *input_dev;
@@ -29,6 +31,7 @@ static const struct acpi_device_id wl_ids[] = {
{"HPQ6001", 0},
{"WSTADEF", 0},
{"AMDI0051", 0},
+ {"LGEX0815", 0},
{"", 0},
};
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
index d21f3fa25823..6bfae28b962a 100644
--- a/drivers/platform/x86/wmi.c
+++ b/drivers/platform/x86/wmi.c
@@ -772,11 +772,39 @@ static ssize_t expensive_show(struct device *dev,
}
static DEVICE_ATTR_RO(expensive);
+static ssize_t driver_override_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct wmi_device *wdev = to_wmi_device(dev);
+ ssize_t ret;
+
+ device_lock(dev);
+ ret = sysfs_emit(buf, "%s\n", wdev->driver_override);
+ device_unlock(dev);
+
+ return ret;
+}
+
+static ssize_t driver_override_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct wmi_device *wdev = to_wmi_device(dev);
+ int ret;
+
+ ret = driver_set_override(dev, &wdev->driver_override, buf, count);
+ if (ret < 0)
+ return ret;
+
+ return count;
+}
+static DEVICE_ATTR_RW(driver_override);
+
static struct attribute *wmi_attrs[] = {
&dev_attr_modalias.attr,
&dev_attr_guid.attr,
&dev_attr_instance_count.attr,
&dev_attr_expensive.attr,
+ &dev_attr_driver_override.attr,
NULL
};
ATTRIBUTE_GROUPS(wmi);
@@ -845,6 +873,7 @@ static void wmi_dev_release(struct device *dev)
{
struct wmi_block *wblock = dev_to_wblock(dev);
+ kfree(wblock->dev.driver_override);
kfree(wblock);
}
@@ -854,6 +883,10 @@ static int wmi_dev_match(struct device *dev, struct device_driver *driver)
struct wmi_block *wblock = dev_to_wblock(dev);
const struct wmi_device_id *id = wmi_driver->id_table;
+ /* When driver_override is set, only bind to the matching driver */
+ if (wblock->dev.driver_override)
+ return !strcmp(wblock->dev.driver_override, driver->name);
+
if (id == NULL)
return 0;
diff --git a/drivers/platform/x86/x86-android-tablets/Kconfig b/drivers/platform/x86/x86-android-tablets/Kconfig
index 6603461d4273..b591419de80c 100644
--- a/drivers/platform/x86/x86-android-tablets/Kconfig
+++ b/drivers/platform/x86/x86-android-tablets/Kconfig
@@ -6,6 +6,8 @@
config X86_ANDROID_TABLETS
tristate "X86 Android tablet support"
depends on I2C && SPI && SERIAL_DEV_BUS && ACPI && EFI && GPIOLIB && PMIC_OPREGION
+ select NEW_LEDS
+ select LEDS_CLASS
help
X86 tablets which ship with Android as (part of) the factory image
typically have various problems with their DSDTs. The factory kernels
diff --git a/drivers/platform/x86/xo1-rfkill.c b/drivers/platform/x86/xo1-rfkill.c
index e64d5646b4c7..5fe68296501c 100644
--- a/drivers/platform/x86/xo1-rfkill.c
+++ b/drivers/platform/x86/xo1-rfkill.c
@@ -74,5 +74,6 @@ static struct platform_driver xo1_rfkill_driver = {
module_platform_driver(xo1_rfkill_driver);
MODULE_AUTHOR("Daniel Drake <dsd@laptop.org>");
+MODULE_DESCRIPTION("OLPC XO-1 software RF kill switch");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:xo1-rfkill");
diff --git a/drivers/pmdomain/amlogic/meson-ee-pwrc.c b/drivers/pmdomain/amlogic/meson-ee-pwrc.c
index fcec6eb610e4..fbb2b4103930 100644
--- a/drivers/pmdomain/amlogic/meson-ee-pwrc.c
+++ b/drivers/pmdomain/amlogic/meson-ee-pwrc.c
@@ -648,4 +648,5 @@ static struct platform_driver meson_ee_pwrc_driver = {
},
};
module_platform_driver(meson_ee_pwrc_driver);
+MODULE_DESCRIPTION("Amlogic Meson Everything-Else Power Domains driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/pmdomain/amlogic/meson-gx-pwrc-vpu.c b/drivers/pmdomain/amlogic/meson-gx-pwrc-vpu.c
index 33df520eab95..6028e91664a4 100644
--- a/drivers/pmdomain/amlogic/meson-gx-pwrc-vpu.c
+++ b/drivers/pmdomain/amlogic/meson-gx-pwrc-vpu.c
@@ -376,4 +376,5 @@ static struct platform_driver meson_gx_pwrc_vpu_driver = {
},
};
module_platform_driver(meson_gx_pwrc_vpu_driver);
+MODULE_DESCRIPTION("Amlogic Meson GX Power Domains driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/pmdomain/amlogic/meson-secure-pwrc.c b/drivers/pmdomain/amlogic/meson-secure-pwrc.c
index 4d5bda0d60fc..42ce41a2fe3a 100644
--- a/drivers/pmdomain/amlogic/meson-secure-pwrc.c
+++ b/drivers/pmdomain/amlogic/meson-secure-pwrc.c
@@ -14,6 +14,8 @@
#include <dt-bindings/power/amlogic,c3-pwrc.h>
#include <dt-bindings/power/meson-s4-power.h>
#include <dt-bindings/power/amlogic,t7-pwrc.h>
+#include <dt-bindings/power/amlogic,a4-pwrc.h>
+#include <dt-bindings/power/amlogic,a5-pwrc.h>
#include <linux/arm-smccc.h>
#include <linux/firmware/meson/meson_sm.h>
#include <linux/module.h>
@@ -45,7 +47,7 @@ struct meson_secure_pwrc_domain_desc {
struct meson_secure_pwrc_domain_data {
unsigned int count;
- struct meson_secure_pwrc_domain_desc *domains;
+ const struct meson_secure_pwrc_domain_desc *domains;
};
static bool pwrc_secure_is_off(struct meson_secure_pwrc_domain *pwrc_domain)
@@ -109,7 +111,7 @@ static int meson_secure_pwrc_on(struct generic_pm_domain *domain)
.parent = __parent, \
}
-static struct meson_secure_pwrc_domain_desc a1_pwrc_domains[] = {
+static const struct meson_secure_pwrc_domain_desc a1_pwrc_domains[] = {
SEC_PD(DSPA, 0),
SEC_PD(DSPB, 0),
/* UART should keep working in ATF after suspend and before resume */
@@ -136,7 +138,41 @@ static struct meson_secure_pwrc_domain_desc a1_pwrc_domains[] = {
SEC_PD(RSA, 0),
};
-static struct meson_secure_pwrc_domain_desc c3_pwrc_domains[] = {
+static const struct meson_secure_pwrc_domain_desc a4_pwrc_domains[] = {
+ SEC_PD(A4_AUDIO, 0),
+ SEC_PD(A4_SDIOA, 0),
+ SEC_PD(A4_EMMC, 0),
+ SEC_PD(A4_USB_COMB, 0),
+ SEC_PD(A4_ETH, 0),
+ SEC_PD(A4_VOUT, 0),
+ SEC_PD(A4_AUDIO_PDM, 0),
+ /* DMC is for DDR PHY ana/dig and DMC, and should be always on */
+ SEC_PD(A4_DMC, GENPD_FLAG_ALWAYS_ON),
+ /* WRAP is secure_top, a lot of modules are included, and should be always on */
+ SEC_PD(A4_SYS_WRAP, GENPD_FLAG_ALWAYS_ON),
+ SEC_PD(A4_AO_I2C_S, 0),
+ SEC_PD(A4_AO_UART, 0),
+ /* IR is wake up trigger source, and should be always on */
+ SEC_PD(A4_AO_IR, GENPD_FLAG_ALWAYS_ON),
+};
+
+static const struct meson_secure_pwrc_domain_desc a5_pwrc_domains[] = {
+ SEC_PD(A5_NNA, 0),
+ SEC_PD(A5_AUDIO, 0),
+ SEC_PD(A5_SDIOA, 0),
+ SEC_PD(A5_EMMC, 0),
+ SEC_PD(A5_USB_COMB, 0),
+ SEC_PD(A5_ETH, 0),
+ SEC_PD(A5_RSA, 0),
+ SEC_PD(A5_AUDIO_PDM, 0),
+ /* DMC is for DDR PHY ana/dig and DMC, and should be always on */
+ SEC_PD(A5_DMC, GENPD_FLAG_ALWAYS_ON),
+ /* WRAP is secure_top, a lot of modules are included, and should be always on */
+ SEC_PD(A5_SYS_WRAP, GENPD_FLAG_ALWAYS_ON),
+ SEC_PD(A5_DSPA, 0),
+};
+
+static const struct meson_secure_pwrc_domain_desc c3_pwrc_domains[] = {
SEC_PD(C3_NNA, 0),
SEC_PD(C3_AUDIO, 0),
SEC_PD(C3_SDIOA, 0),
@@ -153,7 +189,7 @@ static struct meson_secure_pwrc_domain_desc c3_pwrc_domains[] = {
SEC_PD(C3_VCODEC, 0),
};
-static struct meson_secure_pwrc_domain_desc s4_pwrc_domains[] = {
+static const struct meson_secure_pwrc_domain_desc s4_pwrc_domains[] = {
SEC_PD(S4_DOS_HEVC, 0),
SEC_PD(S4_DOS_VDEC, 0),
SEC_PD(S4_VPU_HDMI, 0),
@@ -165,7 +201,7 @@ static struct meson_secure_pwrc_domain_desc s4_pwrc_domains[] = {
SEC_PD(S4_AUDIO, 0),
};
-static struct meson_secure_pwrc_domain_desc t7_pwrc_domains[] = {
+static const struct meson_secure_pwrc_domain_desc t7_pwrc_domains[] = {
SEC_PD(T7_DSPA, 0),
SEC_PD(T7_DSPB, 0),
TOP_PD(T7_DOS_HCODEC, 0, PWRC_T7_NIC3_ID),
@@ -311,6 +347,16 @@ static struct meson_secure_pwrc_domain_data meson_secure_a1_pwrc_data = {
.count = ARRAY_SIZE(a1_pwrc_domains),
};
+static struct meson_secure_pwrc_domain_data amlogic_secure_a4_pwrc_data = {
+ .domains = a4_pwrc_domains,
+ .count = ARRAY_SIZE(a4_pwrc_domains),
+};
+
+static struct meson_secure_pwrc_domain_data amlogic_secure_a5_pwrc_data = {
+ .domains = a5_pwrc_domains,
+ .count = ARRAY_SIZE(a5_pwrc_domains),
+};
+
static struct meson_secure_pwrc_domain_data amlogic_secure_c3_pwrc_data = {
.domains = c3_pwrc_domains,
.count = ARRAY_SIZE(c3_pwrc_domains),
@@ -332,6 +378,14 @@ static const struct of_device_id meson_secure_pwrc_match_table[] = {
.data = &meson_secure_a1_pwrc_data,
},
{
+ .compatible = "amlogic,a4-pwrc",
+ .data = &amlogic_secure_a4_pwrc_data,
+ },
+ {
+ .compatible = "amlogic,a5-pwrc",
+ .data = &amlogic_secure_a5_pwrc_data,
+ },
+ {
.compatible = "amlogic,c3-pwrc",
.data = &amlogic_secure_c3_pwrc_data,
},
@@ -355,4 +409,5 @@ static struct platform_driver meson_secure_pwrc_driver = {
},
};
module_platform_driver(meson_secure_pwrc_driver);
+MODULE_DESCRIPTION("Amlogic Meson Secure Power Domains driver");
MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/pmdomain/arm/scmi_pm_domain.c b/drivers/pmdomain/arm/scmi_pm_domain.c
index 0e05a79de82d..a7784a8bb5db 100644
--- a/drivers/pmdomain/arm/scmi_pm_domain.c
+++ b/drivers/pmdomain/arm/scmi_pm_domain.c
@@ -102,6 +102,7 @@ static int scmi_pm_domain_probe(struct scmi_device *sdev)
scmi_pd->genpd.name = scmi_pd->name;
scmi_pd->genpd.power_off = scmi_pd_power_off;
scmi_pd->genpd.power_on = scmi_pd_power_on;
+ scmi_pd->genpd.flags = GENPD_FLAG_ACTIVE_WAKEUP;
pm_genpd_init(&scmi_pd->genpd, NULL,
state == SCMI_POWER_STATE_GENERIC_OFF);
diff --git a/drivers/pmdomain/core.c b/drivers/pmdomain/core.c
index 623d15b68707..7a61aa88c061 100644
--- a/drivers/pmdomain/core.c
+++ b/drivers/pmdomain/core.c
@@ -588,6 +588,68 @@ void dev_pm_genpd_synced_poweroff(struct device *dev)
}
EXPORT_SYMBOL_GPL(dev_pm_genpd_synced_poweroff);
+/**
+ * dev_pm_genpd_set_hwmode() - Set the HW mode for the device and its PM domain.
+ *
+ * @dev: Device for which the HW-mode should be changed.
+ * @enable: Value to set or unset the HW-mode.
+ *
+ * Some PM domains can rely on HW signals to control the power for a device. To
+ * allow a consumer driver to switch the behaviour for its device in runtime,
+ * which may be beneficial from a latency or energy point of view, this function
+ * may be called.
+ *
+ * It is assumed that the users guarantee that the genpd wouldn't be detached
+ * while this routine is getting called.
+ *
+ * Return: Returns 0 on success and negative error values on failures.
+ */
+int dev_pm_genpd_set_hwmode(struct device *dev, bool enable)
+{
+ struct generic_pm_domain *genpd;
+ int ret = 0;
+
+ genpd = dev_to_genpd_safe(dev);
+ if (!genpd)
+ return -ENODEV;
+
+ if (!genpd->set_hwmode_dev)
+ return -EOPNOTSUPP;
+
+ genpd_lock(genpd);
+
+ if (dev_gpd_data(dev)->hw_mode == enable)
+ goto out;
+
+ ret = genpd->set_hwmode_dev(genpd, dev, enable);
+ if (!ret)
+ dev_gpd_data(dev)->hw_mode = enable;
+
+out:
+ genpd_unlock(genpd);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_genpd_set_hwmode);
+
+/**
+ * dev_pm_genpd_get_hwmode() - Get the HW mode setting for the device.
+ *
+ * @dev: Device for which the current HW-mode setting should be fetched.
+ *
+ * This helper function allows consumer drivers to fetch the current HW mode
+ * setting of its the device.
+ *
+ * It is assumed that the users guarantee that the genpd wouldn't be detached
+ * while this routine is getting called.
+ *
+ * Return: Returns the HW mode setting of device from SW cached hw_mode.
+ */
+bool dev_pm_genpd_get_hwmode(struct device *dev)
+{
+ return dev_gpd_data(dev)->hw_mode;
+}
+EXPORT_SYMBOL_GPL(dev_pm_genpd_get_hwmode);
+
static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)
{
unsigned int state_idx = genpd->state_idx;
@@ -1687,6 +1749,8 @@ static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
gpd_data->cpu = genpd_get_cpu(genpd, base_dev);
+ gpd_data->hw_mode = genpd->get_hwmode_dev ? genpd->get_hwmode_dev(genpd, dev) : false;
+
ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0;
if (ret)
goto out;
@@ -2079,7 +2143,7 @@ static void genpd_free_data(struct generic_pm_domain *genpd)
static void genpd_lock_init(struct generic_pm_domain *genpd)
{
- if (genpd->flags & GENPD_FLAG_IRQ_SAFE) {
+ if (genpd_is_irq_safe(genpd)) {
spin_lock_init(&genpd->slock);
genpd->lock_ops = &genpd_spin_ops;
} else {
@@ -3120,6 +3184,15 @@ static void rtpm_status_str(struct seq_file *s, struct device *dev)
seq_printf(s, "%-25s ", p);
}
+static void mode_status_str(struct seq_file *s, struct device *dev)
+{
+ struct generic_pm_domain_data *gpd_data;
+
+ gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
+
+ seq_printf(s, "%20s", gpd_data->hw_mode ? "HW" : "SW");
+}
+
static void perf_status_str(struct seq_file *s, struct device *dev)
{
struct generic_pm_domain_data *gpd_data;
@@ -3178,6 +3251,7 @@ static int genpd_summary_one(struct seq_file *s,
seq_printf(s, "\n %-50s ", kobj_path);
rtpm_status_str(s, pm_data->dev);
perf_status_str(s, pm_data->dev);
+ mode_status_str(s, pm_data->dev);
kfree(kobj_path);
}
@@ -3194,8 +3268,8 @@ static int summary_show(struct seq_file *s, void *data)
int ret = 0;
seq_puts(s, "domain status children performance\n");
- seq_puts(s, " /device runtime status\n");
- seq_puts(s, "----------------------------------------------------------------------------------------------\n");
+ seq_puts(s, " /device runtime status managed by\n");
+ seq_puts(s, "------------------------------------------------------------------------------------------------------------\n");
ret = mutex_lock_interruptible(&gpd_list_lock);
if (ret)
diff --git a/drivers/pmdomain/imx/gpcv2.c b/drivers/pmdomain/imx/gpcv2.c
index 4b828d74a606..856eaac0ec14 100644
--- a/drivers/pmdomain/imx/gpcv2.c
+++ b/drivers/pmdomain/imx/gpcv2.c
@@ -393,6 +393,17 @@ static int imx_pgc_power_up(struct generic_pm_domain *genpd)
* automatically there. Just add a delay and suppose the handshake finish
* after that.
*/
+
+ /*
+ * For some BLK-CTL module (eg. AudioMix on i.MX8MP) doesn't have BUS
+ * clk-en bit, it is better to add delay here, as the BLK-CTL module
+ * doesn't need to care about how it is powered up.
+ *
+ * regmap_read_bypassed() is to make sure the above write IO transaction
+ * already reaches target before udelay()
+ */
+ regmap_read_bypassed(domain->regmap, domain->regs->hsk, &reg_val);
+ udelay(5);
}
/* Disable reset clocks for all devices in the domain */
diff --git a/drivers/pmdomain/qcom/rpmhpd.c b/drivers/pmdomain/qcom/rpmhpd.c
index de9121ef4216..d2cb4271a1ca 100644
--- a/drivers/pmdomain/qcom/rpmhpd.c
+++ b/drivers/pmdomain/qcom/rpmhpd.c
@@ -40,6 +40,7 @@
* @addr: Resource address as looped up using resource name from
* cmd-db
* @state_synced: Indicator that sync_state has been invoked for the rpmhpd resource
+ * @skip_retention_level: Indicate that retention level should not be used for the power domain
*/
struct rpmhpd {
struct device *dev;
@@ -56,6 +57,7 @@ struct rpmhpd {
const char *res_name;
u32 addr;
bool state_synced;
+ bool skip_retention_level;
};
struct rpmhpd_desc {
@@ -173,6 +175,7 @@ static struct rpmhpd mxc = {
.pd = { .name = "mxc", },
.peer = &mxc_ao,
.res_name = "mxc.lvl",
+ .skip_retention_level = true,
};
static struct rpmhpd mxc_ao = {
@@ -180,6 +183,7 @@ static struct rpmhpd mxc_ao = {
.active_only = true,
.peer = &mxc,
.res_name = "mxc.lvl",
+ .skip_retention_level = true,
};
static struct rpmhpd nsp = {
@@ -819,6 +823,9 @@ static int rpmhpd_update_level_mapping(struct rpmhpd *rpmhpd)
return -EINVAL;
for (i = 0; i < rpmhpd->level_count; i++) {
+ if (rpmhpd->skip_retention_level && buf[i] == RPMH_REGULATOR_LEVEL_RETENTION)
+ continue;
+
rpmhpd->level[i] = buf[i];
/* Remember the first corner with non-zero level */
diff --git a/drivers/pmdomain/renesas/rmobile-sysc.c b/drivers/pmdomain/renesas/rmobile-sysc.c
index 0b77f37787d5..5848e79aa438 100644
--- a/drivers/pmdomain/renesas/rmobile-sysc.c
+++ b/drivers/pmdomain/renesas/rmobile-sysc.c
@@ -268,9 +268,7 @@ static int __init rmobile_add_pm_domains(void __iomem *base,
struct device_node *parent,
struct generic_pm_domain *genpd_parent)
{
- struct device_node *np;
-
- for_each_child_of_node(parent, np) {
+ for_each_child_of_node_scoped(parent, np) {
struct rmobile_pm_domain *pd;
u32 idx = ~0;
@@ -279,10 +277,8 @@ static int __init rmobile_add_pm_domains(void __iomem *base,
}
pd = kzalloc(sizeof(*pd), GFP_KERNEL);
- if (!pd) {
- of_node_put(np);
+ if (!pd)
return -ENOMEM;
- }
pd->genpd.name = np->name;
pd->base = base;
diff --git a/drivers/pnp/base.h b/drivers/pnp/base.h
index e74a0f6a3157..4e80273dfb1e 100644
--- a/drivers/pnp/base.h
+++ b/drivers/pnp/base.h
@@ -6,6 +6,7 @@
extern struct mutex pnp_lock;
extern const struct attribute_group *pnp_dev_groups[];
+extern const struct bus_type pnp_bus_type;
int pnp_register_protocol(struct pnp_protocol *protocol);
void pnp_unregister_protocol(struct pnp_protocol *protocol);
diff --git a/drivers/pnp/driver.c b/drivers/pnp/driver.c
index 0a5d0d8befa8..3483e52e3a81 100644
--- a/drivers/pnp/driver.c
+++ b/drivers/pnp/driver.c
@@ -266,6 +266,12 @@ const struct bus_type pnp_bus_type = {
.dev_groups = pnp_dev_groups,
};
+bool dev_is_pnp(const struct device *dev)
+{
+ return dev->bus == &pnp_bus_type;
+}
+EXPORT_SYMBOL_GPL(dev_is_pnp);
+
int pnp_register_driver(struct pnp_driver *drv)
{
drv->driver.name = drv->name;
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index 696bf77a7042..9a8e44ca9ae4 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -1,3 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
source "drivers/power/reset/Kconfig"
+source "drivers/power/sequencing/Kconfig"
source "drivers/power/supply/Kconfig"
diff --git a/drivers/power/Makefile b/drivers/power/Makefile
index effbf0377f32..962a2cd30a51 100644
--- a/drivers/power/Makefile
+++ b/drivers/power/Makefile
@@ -1,3 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_POWER_RESET) += reset/
+obj-$(CONFIG_POWER_SEQUENCING) += sequencing/
obj-$(CONFIG_POWER_SUPPLY) += supply/
diff --git a/drivers/power/sequencing/Kconfig b/drivers/power/sequencing/Kconfig
new file mode 100644
index 000000000000..c9f1cdb66524
--- /dev/null
+++ b/drivers/power/sequencing/Kconfig
@@ -0,0 +1,29 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+menuconfig POWER_SEQUENCING
+ tristate "Power Sequencing support"
+ help
+ Say Y here to enable the Power Sequencing subsystem.
+
+ This subsystem is designed to control power to devices that share
+ complex resources and/or require specific power sequences to be run
+ during power-up.
+
+ If unsure, say no.
+
+if POWER_SEQUENCING
+
+config POWER_SEQUENCING_QCOM_WCN
+ tristate "Qualcomm WCN family PMU driver"
+ default m if ARCH_QCOM
+ help
+ Say Y here to enable the power sequencing driver for Qualcomm
+ WCN Bluetooth/WLAN chipsets.
+
+ Typically, a package from the Qualcomm WCN family contains the BT
+ and WLAN modules whose power is controlled by the PMU module. As the
+ former two share the power-up sequence which is executed by the PMU,
+ this driver is needed for correct power control or else we'd risk not
+ respecting the required delays between enabling Bluetooth and WLAN.
+
+endif
diff --git a/drivers/power/sequencing/Makefile b/drivers/power/sequencing/Makefile
new file mode 100644
index 000000000000..2eec2df7912d
--- /dev/null
+++ b/drivers/power/sequencing/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_POWER_SEQUENCING) += pwrseq-core.o
+pwrseq-core-y := core.o
+
+obj-$(CONFIG_POWER_SEQUENCING_QCOM_WCN) += pwrseq-qcom-wcn.o
diff --git a/drivers/power/sequencing/core.c b/drivers/power/sequencing/core.c
new file mode 100644
index 000000000000..9c32b07a55e7
--- /dev/null
+++ b/drivers/power/sequencing/core.c
@@ -0,0 +1,1105 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2024 Linaro Ltd.
+ */
+
+#include <linux/bug.h>
+#include <linux/cleanup.h>
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/idr.h>
+#include <linux/kernel.h>
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/lockdep.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/property.h>
+#include <linux/pwrseq/consumer.h>
+#include <linux/pwrseq/provider.h>
+#include <linux/radix-tree.h>
+#include <linux/rwsem.h>
+#include <linux/slab.h>
+
+/*
+ * Power-sequencing framework for linux.
+ *
+ * This subsystem allows power sequence providers to register a set of targets
+ * that consumers may request and power-up/down.
+ *
+ * Glossary:
+ *
+ * Unit - a unit is a discreet chunk of a power sequence. For instance one unit
+ * may enable a set of regulators, another may enable a specific GPIO. Units
+ * can define dependencies in the form of other units that must be enabled
+ * before it itself can be.
+ *
+ * Target - a target is a set of units (composed of the "final" unit and its
+ * dependencies) that a consumer selects by its name when requesting a handle
+ * to the power sequencer. Via the dependency system, multiple targets may
+ * share the same parts of a power sequence but ignore parts that are
+ * irrelevant.
+ *
+ * Descriptor - a handle passed by the pwrseq core to every consumer that
+ * serves as the entry point to the provider layer. It ensures coherence
+ * between different users and keeps reference counting consistent.
+ *
+ * Each provider must define a .match() callback whose role is to determine
+ * whether a potential consumer is in fact associated with this sequencer.
+ * This allows creating abstraction layers on top of regular device-tree
+ * resources like regulators, clocks and other nodes connected to the consumer
+ * via phandle.
+ */
+
+static DEFINE_IDA(pwrseq_ida);
+
+/*
+ * Protects the device list on the pwrseq bus from concurrent modifications
+ * but allows simultaneous read-only access.
+ */
+static DECLARE_RWSEM(pwrseq_sem);
+
+/**
+ * struct pwrseq_unit - Private power-sequence unit data.
+ * @ref: Reference count for this object. When it goes to 0, the object is
+ * destroyed.
+ * @name: Name of this target.
+ * @list: Link to siblings on the list of all units of a single sequencer.
+ * @deps: List of units on which this unit depends.
+ * @enable: Callback running the part of the power-on sequence provided by
+ * this unit.
+ * @disable: Callback running the part of the power-off sequence provided
+ * by this unit.
+ * @enable_count: Current number of users that enabled this unit. May be the
+ * consumer of the power sequencer or other units that depend
+ * on this one.
+ */
+struct pwrseq_unit {
+ struct kref ref;
+ const char *name;
+ struct list_head list;
+ struct list_head deps;
+ pwrseq_power_state_func enable;
+ pwrseq_power_state_func disable;
+ unsigned int enable_count;
+};
+
+static struct pwrseq_unit *pwrseq_unit_new(const struct pwrseq_unit_data *data)
+{
+ struct pwrseq_unit *unit;
+
+ unit = kzalloc(sizeof(*unit), GFP_KERNEL);
+ if (!unit)
+ return NULL;
+
+ unit->name = kstrdup_const(data->name, GFP_KERNEL);
+ if (!unit->name) {
+ kfree(unit);
+ return NULL;
+ }
+
+ kref_init(&unit->ref);
+ INIT_LIST_HEAD(&unit->deps);
+ unit->enable = data->enable;
+ unit->disable = data->disable;
+
+ return unit;
+}
+
+static struct pwrseq_unit *pwrseq_unit_get(struct pwrseq_unit *unit)
+{
+ kref_get(&unit->ref);
+
+ return unit;
+}
+
+static void pwrseq_unit_release(struct kref *ref);
+
+static void pwrseq_unit_put(struct pwrseq_unit *unit)
+{
+ kref_put(&unit->ref, pwrseq_unit_release);
+}
+
+/**
+ * struct pwrseq_unit_dep - Wrapper around a reference to the unit structure
+ * allowing to keep it on multiple dependency lists
+ * in different units.
+ * @list: Siblings on the list.
+ * @unit: Address of the referenced unit.
+ */
+struct pwrseq_unit_dep {
+ struct list_head list;
+ struct pwrseq_unit *unit;
+};
+
+static struct pwrseq_unit_dep *pwrseq_unit_dep_new(struct pwrseq_unit *unit)
+{
+ struct pwrseq_unit_dep *dep;
+
+ dep = kzalloc(sizeof(*dep), GFP_KERNEL);
+ if (!dep)
+ return NULL;
+
+ dep->unit = unit;
+
+ return dep;
+}
+
+static void pwrseq_unit_dep_free(struct pwrseq_unit_dep *ref)
+{
+ pwrseq_unit_put(ref->unit);
+ kfree(ref);
+}
+
+static void pwrseq_unit_free_deps(struct list_head *list)
+{
+ struct pwrseq_unit_dep *dep, *next;
+
+ list_for_each_entry_safe(dep, next, list, list) {
+ list_del(&dep->list);
+ pwrseq_unit_dep_free(dep);
+ }
+}
+
+static void pwrseq_unit_release(struct kref *ref)
+{
+ struct pwrseq_unit *unit = container_of(ref, struct pwrseq_unit, ref);
+
+ pwrseq_unit_free_deps(&unit->deps);
+ list_del(&unit->list);
+ kfree_const(unit->name);
+ kfree(unit);
+}
+
+/**
+ * struct pwrseq_target - Private power-sequence target data.
+ * @list: Siblings on the list of all targets exposed by a power sequencer.
+ * @name: Name of the target.
+ * @unit: Final unit for this target.
+ * @post_enable: Callback run after the target unit has been enabled, *after*
+ * the state lock has been released. It's useful for implementing
+ * boot-up delays without blocking other users from powering up
+ * using the same power sequencer.
+ */
+struct pwrseq_target {
+ struct list_head list;
+ const char *name;
+ struct pwrseq_unit *unit;
+ pwrseq_power_state_func post_enable;
+};
+
+static struct pwrseq_target *
+pwrseq_target_new(const struct pwrseq_target_data *data)
+{
+ struct pwrseq_target *target;
+
+ target = kzalloc(sizeof(*target), GFP_KERNEL);
+ if (!target)
+ return NULL;
+
+ target->name = kstrdup_const(data->name, GFP_KERNEL);
+ if (!target->name) {
+ kfree(target);
+ return NULL;
+ }
+
+ target->post_enable = data->post_enable;
+
+ return target;
+}
+
+static void pwrseq_target_free(struct pwrseq_target *target)
+{
+ pwrseq_unit_put(target->unit);
+ kfree_const(target->name);
+ kfree(target);
+}
+
+/**
+ * struct pwrseq_device - Private power sequencing data.
+ * @dev: Device struct associated with this sequencer.
+ * @id: Device ID.
+ * @owner: Prevents removal of active power sequencing providers.
+ * @rw_lock: Protects the device from being unregistered while in use.
+ * @state_lock: Prevents multiple users running the power sequence at the same
+ * time.
+ * @match: Power sequencer matching callback.
+ * @targets: List of targets exposed by this sequencer.
+ * @units: List of all units supported by this sequencer.
+ */
+struct pwrseq_device {
+ struct device dev;
+ int id;
+ struct module *owner;
+ struct rw_semaphore rw_lock;
+ struct mutex state_lock;
+ pwrseq_match_func match;
+ struct list_head targets;
+ struct list_head units;
+};
+
+static struct pwrseq_device *to_pwrseq_device(struct device *dev)
+{
+ return container_of(dev, struct pwrseq_device, dev);
+}
+
+static struct pwrseq_device *pwrseq_device_get(struct pwrseq_device *pwrseq)
+{
+ get_device(&pwrseq->dev);
+
+ return pwrseq;
+}
+
+static void pwrseq_device_put(struct pwrseq_device *pwrseq)
+{
+ put_device(&pwrseq->dev);
+}
+
+/**
+ * struct pwrseq_desc - Wraps access to the pwrseq_device and ensures that one
+ * user cannot break the reference counting for others.
+ * @pwrseq: Reference to the power sequencing device.
+ * @target: Reference to the target this descriptor allows to control.
+ * @powered_on: Power state set by the holder of the descriptor (not necessarily
+ * corresponding to the actual power state of the device).
+ */
+struct pwrseq_desc {
+ struct pwrseq_device *pwrseq;
+ struct pwrseq_target *target;
+ bool powered_on;
+};
+
+static const struct bus_type pwrseq_bus = {
+ .name = "pwrseq",
+};
+
+static void pwrseq_release(struct device *dev)
+{
+ struct pwrseq_device *pwrseq = to_pwrseq_device(dev);
+ struct pwrseq_target *target, *pos;
+
+ list_for_each_entry_safe(target, pos, &pwrseq->targets, list) {
+ list_del(&target->list);
+ pwrseq_target_free(target);
+ }
+
+ mutex_destroy(&pwrseq->state_lock);
+ ida_free(&pwrseq_ida, pwrseq->id);
+ kfree(pwrseq);
+}
+
+static const struct device_type pwrseq_device_type = {
+ .name = "power_sequencer",
+ .release = pwrseq_release,
+};
+
+static int pwrseq_check_unit_deps(const struct pwrseq_unit_data *data,
+ struct radix_tree_root *visited_units)
+{
+ const struct pwrseq_unit_data *tmp, **cur;
+ int ret;
+
+ ret = radix_tree_insert(visited_units, (unsigned long)data,
+ (void *)data);
+ if (ret)
+ return ret;
+
+ for (cur = data->deps; cur && *cur; cur++) {
+ tmp = radix_tree_lookup(visited_units, (unsigned long)*cur);
+ if (tmp) {
+ WARN(1, "Circular dependency in power sequencing flow detected!\n");
+ return -EINVAL;
+ }
+
+ ret = pwrseq_check_unit_deps(*cur, visited_units);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int pwrseq_check_target_deps(const struct pwrseq_target_data *data)
+{
+ struct radix_tree_root visited_units;
+ struct radix_tree_iter iter;
+ void __rcu **slot;
+ int ret;
+
+ if (!data->unit)
+ return -EINVAL;
+
+ INIT_RADIX_TREE(&visited_units, GFP_KERNEL);
+ ret = pwrseq_check_unit_deps(data->unit, &visited_units);
+ radix_tree_for_each_slot(slot, &visited_units, &iter, 0)
+ radix_tree_delete(&visited_units, iter.index);
+
+ return ret;
+}
+
+static int pwrseq_unit_setup_deps(const struct pwrseq_unit_data **data,
+ struct list_head *dep_list,
+ struct list_head *unit_list,
+ struct radix_tree_root *processed_units);
+
+static struct pwrseq_unit *
+pwrseq_unit_setup(const struct pwrseq_unit_data *data,
+ struct list_head *unit_list,
+ struct radix_tree_root *processed_units)
+{
+ struct pwrseq_unit *unit;
+ int ret;
+
+ unit = radix_tree_lookup(processed_units, (unsigned long)data);
+ if (unit)
+ return pwrseq_unit_get(unit);
+
+ unit = pwrseq_unit_new(data);
+ if (!unit)
+ return ERR_PTR(-ENOMEM);
+
+ if (data->deps) {
+ ret = pwrseq_unit_setup_deps(data->deps, &unit->deps,
+ unit_list, processed_units);
+ if (ret) {
+ pwrseq_unit_put(unit);
+ return ERR_PTR(ret);
+ }
+ }
+
+ ret = radix_tree_insert(processed_units, (unsigned long)data, unit);
+ if (ret) {
+ pwrseq_unit_put(unit);
+ return ERR_PTR(ret);
+ }
+
+ list_add_tail(&unit->list, unit_list);
+
+ return unit;
+}
+
+static int pwrseq_unit_setup_deps(const struct pwrseq_unit_data **data,
+ struct list_head *dep_list,
+ struct list_head *unit_list,
+ struct radix_tree_root *processed_units)
+{
+ const struct pwrseq_unit_data *pos;
+ struct pwrseq_unit_dep *dep;
+ struct pwrseq_unit *unit;
+ int i;
+
+ for (i = 0; data[i]; i++) {
+ pos = data[i];
+
+ unit = pwrseq_unit_setup(pos, unit_list, processed_units);
+ if (IS_ERR(unit))
+ return PTR_ERR(unit);
+
+ dep = pwrseq_unit_dep_new(unit);
+ if (!dep) {
+ pwrseq_unit_put(unit);
+ return -ENOMEM;
+ }
+
+ list_add_tail(&dep->list, dep_list);
+ }
+
+ return 0;
+}
+
+static int pwrseq_do_setup_targets(const struct pwrseq_target_data **data,
+ struct pwrseq_device *pwrseq,
+ struct radix_tree_root *processed_units)
+{
+ const struct pwrseq_target_data *pos;
+ struct pwrseq_target *target;
+ int ret, i;
+
+ for (i = 0; data[i]; i++) {
+ pos = data[i];
+
+ ret = pwrseq_check_target_deps(pos);
+ if (ret)
+ return ret;
+
+ target = pwrseq_target_new(pos);
+ if (!target)
+ return -ENOMEM;
+
+ target->unit = pwrseq_unit_setup(pos->unit, &pwrseq->units,
+ processed_units);
+ if (IS_ERR(target->unit)) {
+ ret = PTR_ERR(target->unit);
+ pwrseq_target_free(target);
+ return ret;
+ }
+
+ list_add_tail(&target->list, &pwrseq->targets);
+ }
+
+ return 0;
+}
+
+static int pwrseq_setup_targets(const struct pwrseq_target_data **targets,
+ struct pwrseq_device *pwrseq)
+{
+ struct radix_tree_root processed_units;
+ struct radix_tree_iter iter;
+ void __rcu **slot;
+ int ret;
+
+ INIT_RADIX_TREE(&processed_units, GFP_KERNEL);
+ ret = pwrseq_do_setup_targets(targets, pwrseq, &processed_units);
+ radix_tree_for_each_slot(slot, &processed_units, &iter, 0)
+ radix_tree_delete(&processed_units, iter.index);
+
+ return ret;
+}
+
+/**
+ * pwrseq_device_register() - Register a new power sequencer.
+ * @config: Configuration of the new power sequencing device.
+ *
+ * The config structure is only used during the call and can be freed after
+ * the function returns. The config structure *must* have the parent device
+ * as well as the match() callback and at least one target set.
+ *
+ * Returns:
+ * Returns the address of the new pwrseq device or ERR_PTR() on failure.
+ */
+struct pwrseq_device *
+pwrseq_device_register(const struct pwrseq_config *config)
+{
+ struct pwrseq_device *pwrseq;
+ int ret, id;
+
+ if (!config->parent || !config->match || !config->targets ||
+ !config->targets[0])
+ return ERR_PTR(-EINVAL);
+
+ pwrseq = kzalloc(sizeof(*pwrseq), GFP_KERNEL);
+ if (!pwrseq)
+ return ERR_PTR(-ENOMEM);
+
+ pwrseq->dev.type = &pwrseq_device_type;
+ pwrseq->dev.bus = &pwrseq_bus;
+ pwrseq->dev.parent = config->parent;
+ device_set_node(&pwrseq->dev, dev_fwnode(config->parent));
+ dev_set_drvdata(&pwrseq->dev, config->drvdata);
+
+ id = ida_alloc(&pwrseq_ida, GFP_KERNEL);
+ if (id < 0) {
+ kfree(pwrseq);
+ return ERR_PTR(id);
+ }
+
+ pwrseq->id = id;
+
+ /*
+ * From this point onwards the device's release() callback is
+ * responsible for freeing resources.
+ */
+ device_initialize(&pwrseq->dev);
+
+ ret = dev_set_name(&pwrseq->dev, "pwrseq.%d", pwrseq->id);
+ if (ret)
+ goto err_put_pwrseq;
+
+ pwrseq->owner = config->owner ?: THIS_MODULE;
+ pwrseq->match = config->match;
+
+ init_rwsem(&pwrseq->rw_lock);
+ mutex_init(&pwrseq->state_lock);
+ INIT_LIST_HEAD(&pwrseq->targets);
+ INIT_LIST_HEAD(&pwrseq->units);
+
+ ret = pwrseq_setup_targets(config->targets, pwrseq);
+ if (ret)
+ goto err_put_pwrseq;
+
+ scoped_guard(rwsem_write, &pwrseq_sem) {
+ ret = device_add(&pwrseq->dev);
+ if (ret)
+ goto err_put_pwrseq;
+ }
+
+ return pwrseq;
+
+err_put_pwrseq:
+ pwrseq_device_put(pwrseq);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(pwrseq_device_register);
+
+/**
+ * pwrseq_device_unregister() - Unregister the power sequencer.
+ * @pwrseq: Power sequencer to unregister.
+ */
+void pwrseq_device_unregister(struct pwrseq_device *pwrseq)
+{
+ struct device *dev = &pwrseq->dev;
+ struct pwrseq_target *target;
+
+ scoped_guard(mutex, &pwrseq->state_lock) {
+ guard(rwsem_write)(&pwrseq->rw_lock);
+
+ list_for_each_entry(target, &pwrseq->targets, list)
+ WARN(target->unit->enable_count,
+ "REMOVING POWER SEQUENCER WITH ACTIVE USERS\n");
+
+ guard(rwsem_write)(&pwrseq_sem);
+
+ device_del(dev);
+ }
+
+ pwrseq_device_put(pwrseq);
+}
+EXPORT_SYMBOL_GPL(pwrseq_device_unregister);
+
+static void devm_pwrseq_device_unregister(void *data)
+{
+ struct pwrseq_device *pwrseq = data;
+
+ pwrseq_device_unregister(pwrseq);
+}
+
+/**
+ * devm_pwrseq_device_register() - Managed variant of pwrseq_device_register().
+ * @dev: Managing device.
+ * @config: Configuration of the new power sequencing device.
+ *
+ * Returns:
+ * Returns the address of the new pwrseq device or ERR_PTR() on failure.
+ */
+struct pwrseq_device *
+devm_pwrseq_device_register(struct device *dev,
+ const struct pwrseq_config *config)
+{
+ struct pwrseq_device *pwrseq;
+ int ret;
+
+ pwrseq = pwrseq_device_register(config);
+ if (IS_ERR(pwrseq))
+ return pwrseq;
+
+ ret = devm_add_action_or_reset(dev, devm_pwrseq_device_unregister,
+ pwrseq);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return pwrseq;
+}
+EXPORT_SYMBOL_GPL(devm_pwrseq_device_register);
+
+/**
+ * pwrseq_device_get_drvdata() - Get the driver private data associated with
+ * this sequencer.
+ * @pwrseq: Power sequencer object.
+ *
+ * Returns:
+ * Address of the private driver data.
+ */
+void *pwrseq_device_get_drvdata(struct pwrseq_device *pwrseq)
+{
+ return dev_get_drvdata(&pwrseq->dev);
+}
+EXPORT_SYMBOL_GPL(pwrseq_device_get_drvdata);
+
+struct pwrseq_match_data {
+ struct pwrseq_desc *desc;
+ struct device *dev;
+ const char *target;
+};
+
+static int pwrseq_match_device(struct device *pwrseq_dev, void *data)
+{
+ struct pwrseq_device *pwrseq = to_pwrseq_device(pwrseq_dev);
+ struct pwrseq_match_data *match_data = data;
+ struct pwrseq_target *target;
+ int ret;
+
+ lockdep_assert_held_read(&pwrseq_sem);
+
+ guard(rwsem_read)(&pwrseq->rw_lock);
+ if (!device_is_registered(&pwrseq->dev))
+ return 0;
+
+ ret = pwrseq->match(pwrseq, match_data->dev);
+ if (ret <= 0)
+ return ret;
+
+ /* We got the matching device, let's find the right target. */
+ list_for_each_entry(target, &pwrseq->targets, list) {
+ if (strcmp(target->name, match_data->target))
+ continue;
+
+ match_data->desc->target = target;
+ }
+
+ /*
+ * This device does not have this target. No point in deferring as it
+ * will not get a new target dynamically later.
+ */
+ if (!match_data->desc->target)
+ return -ENOENT;
+
+ if (!try_module_get(pwrseq->owner))
+ return -EPROBE_DEFER;
+
+ match_data->desc->pwrseq = pwrseq_device_get(pwrseq);
+
+ return 1;
+}
+
+/**
+ * pwrseq_get() - Get the power sequencer associated with this device.
+ * @dev: Device for which to get the sequencer.
+ * @target: Name of the target exposed by the sequencer this device wants to
+ * reach.
+ *
+ * Returns:
+ * New power sequencer descriptor for use by the consumer driver or ERR_PTR()
+ * on failure.
+ */
+struct pwrseq_desc *pwrseq_get(struct device *dev, const char *target)
+{
+ struct pwrseq_match_data match_data;
+ int ret;
+
+ struct pwrseq_desc *desc __free(kfree) = kzalloc(sizeof(*desc),
+ GFP_KERNEL);
+ if (!desc)
+ return ERR_PTR(-ENOMEM);
+
+ match_data.desc = desc;
+ match_data.dev = dev;
+ match_data.target = target;
+
+ guard(rwsem_read)(&pwrseq_sem);
+
+ ret = bus_for_each_dev(&pwrseq_bus, NULL, &match_data,
+ pwrseq_match_device);
+ if (ret < 0)
+ return ERR_PTR(ret);
+ if (ret == 0)
+ /* No device matched. */
+ return ERR_PTR(-EPROBE_DEFER);
+
+ return_ptr(desc);
+}
+EXPORT_SYMBOL_GPL(pwrseq_get);
+
+/**
+ * pwrseq_put() - Release the power sequencer descriptor.
+ * @desc: Descriptor to release.
+ */
+void pwrseq_put(struct pwrseq_desc *desc)
+{
+ struct pwrseq_device *pwrseq;
+
+ if (!desc)
+ return;
+
+ pwrseq = desc->pwrseq;
+
+ if (desc->powered_on)
+ pwrseq_power_off(desc);
+
+ kfree(desc);
+ module_put(pwrseq->owner);
+ pwrseq_device_put(pwrseq);
+}
+EXPORT_SYMBOL_GPL(pwrseq_put);
+
+static void devm_pwrseq_put(void *data)
+{
+ struct pwrseq_desc *desc = data;
+
+ pwrseq_put(desc);
+}
+
+/**
+ * devm_pwrseq_get() - Managed variant of pwrseq_get().
+ * @dev: Device for which to get the sequencer and which also manages its
+ * lifetime.
+ * @target: Name of the target exposed by the sequencer this device wants to
+ * reach.
+ *
+ * Returns:
+ * New power sequencer descriptor for use by the consumer driver or ERR_PTR()
+ * on failure.
+ */
+struct pwrseq_desc *devm_pwrseq_get(struct device *dev, const char *target)
+{
+ struct pwrseq_desc *desc;
+ int ret;
+
+ desc = pwrseq_get(dev, target);
+ if (IS_ERR(desc))
+ return desc;
+
+ ret = devm_add_action_or_reset(dev, devm_pwrseq_put, desc);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return desc;
+}
+EXPORT_SYMBOL_GPL(devm_pwrseq_get);
+
+static int pwrseq_unit_enable(struct pwrseq_device *pwrseq,
+ struct pwrseq_unit *target);
+static int pwrseq_unit_disable(struct pwrseq_device *pwrseq,
+ struct pwrseq_unit *target);
+
+static int pwrseq_unit_enable_deps(struct pwrseq_device *pwrseq,
+ struct list_head *list)
+{
+ struct pwrseq_unit_dep *pos;
+ int ret = 0;
+
+ list_for_each_entry(pos, list, list) {
+ ret = pwrseq_unit_enable(pwrseq, pos->unit);
+ if (ret) {
+ list_for_each_entry_continue_reverse(pos, list, list)
+ pwrseq_unit_disable(pwrseq, pos->unit);
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int pwrseq_unit_disable_deps(struct pwrseq_device *pwrseq,
+ struct list_head *list)
+{
+ struct pwrseq_unit_dep *pos;
+ int ret = 0;
+
+ list_for_each_entry_reverse(pos, list, list) {
+ ret = pwrseq_unit_disable(pwrseq, pos->unit);
+ if (ret) {
+ list_for_each_entry_continue(pos, list, list)
+ pwrseq_unit_enable(pwrseq, pos->unit);
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int pwrseq_unit_enable(struct pwrseq_device *pwrseq,
+ struct pwrseq_unit *unit)
+{
+ int ret;
+
+ lockdep_assert_held_read(&pwrseq->rw_lock);
+ lockdep_assert_held(&pwrseq->state_lock);
+
+ if (unit->enable_count != 0) {
+ unit->enable_count++;
+ return 0;
+ }
+
+ ret = pwrseq_unit_enable_deps(pwrseq, &unit->deps);
+ if (ret) {
+ dev_err(&pwrseq->dev,
+ "Failed to enable dependencies before power-on for target '%s': %d\n",
+ unit->name, ret);
+ return ret;
+ }
+
+ if (unit->enable) {
+ ret = unit->enable(pwrseq);
+ if (ret) {
+ dev_err(&pwrseq->dev,
+ "Failed to enable target '%s': %d\n",
+ unit->name, ret);
+ pwrseq_unit_disable_deps(pwrseq, &unit->deps);
+ return ret;
+ }
+ }
+
+ unit->enable_count++;
+
+ return 0;
+}
+
+static int pwrseq_unit_disable(struct pwrseq_device *pwrseq,
+ struct pwrseq_unit *unit)
+{
+ int ret;
+
+ lockdep_assert_held_read(&pwrseq->rw_lock);
+ lockdep_assert_held(&pwrseq->state_lock);
+
+ if (unit->enable_count == 0) {
+ WARN(1, "Unmatched power-off for target '%s'\n",
+ unit->name);
+ return -EBUSY;
+ }
+
+ if (unit->enable_count != 1) {
+ unit->enable_count--;
+ return 0;
+ }
+
+ if (unit->disable) {
+ ret = unit->disable(pwrseq);
+ if (ret) {
+ dev_err(&pwrseq->dev,
+ "Failed to disable target '%s': %d\n",
+ unit->name, ret);
+ return ret;
+ }
+ }
+
+ ret = pwrseq_unit_disable_deps(pwrseq, &unit->deps);
+ if (ret) {
+ dev_err(&pwrseq->dev,
+ "Failed to disable dependencies after power-off for target '%s': %d\n",
+ unit->name, ret);
+ if (unit->enable)
+ unit->enable(pwrseq);
+ return ret;
+ }
+
+ unit->enable_count--;
+
+ return 0;
+}
+
+/**
+ * pwrseq_power_on() - Issue a power-on request on behalf of the consumer
+ * device.
+ * @desc: Descriptor referencing the power sequencer.
+ *
+ * This function tells the power sequencer that the consumer wants to be
+ * powered-up. The sequencer may already have powered-up the device in which
+ * case the function returns 0. If the power-up sequence is already in
+ * progress, the function will block until it's done and return 0. If this is
+ * the first request, the device will be powered up.
+ *
+ * Returns:
+ * 0 on success, negative error number on failure.
+ */
+int pwrseq_power_on(struct pwrseq_desc *desc)
+{
+ struct pwrseq_device *pwrseq;
+ struct pwrseq_target *target;
+ struct pwrseq_unit *unit;
+ int ret;
+
+ might_sleep();
+
+ if (!desc || desc->powered_on)
+ return 0;
+
+ pwrseq = desc->pwrseq;
+ target = desc->target;
+ unit = target->unit;
+
+ guard(rwsem_read)(&pwrseq->rw_lock);
+ if (!device_is_registered(&pwrseq->dev))
+ return -ENODEV;
+
+ scoped_guard(mutex, &pwrseq->state_lock) {
+ ret = pwrseq_unit_enable(pwrseq, unit);
+ if (!ret)
+ desc->powered_on = true;
+ }
+
+ if (target->post_enable) {
+ ret = target->post_enable(pwrseq);
+ if (ret) {
+ pwrseq_unit_disable(pwrseq, unit);
+ desc->powered_on = false;
+ }
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pwrseq_power_on);
+
+/**
+ * pwrseq_power_off() - Issue a power-off request on behalf of the consumer
+ * device.
+ * @desc: Descriptor referencing the power sequencer.
+ *
+ * This undoes the effects of pwrseq_power_on(). It issues a power-off request
+ * on behalf of the consumer and when the last remaining user does so, the
+ * power-down sequence will be started. If one is in progress, the function
+ * will block until it's complete and then return.
+ *
+ * Returns:
+ * 0 on success, negative error number on failure.
+ */
+int pwrseq_power_off(struct pwrseq_desc *desc)
+{
+ struct pwrseq_device *pwrseq;
+ struct pwrseq_unit *unit;
+ int ret;
+
+ might_sleep();
+
+ if (!desc || !desc->powered_on)
+ return 0;
+
+ pwrseq = desc->pwrseq;
+ unit = desc->target->unit;
+
+ guard(rwsem_read)(&pwrseq->rw_lock);
+ if (!device_is_registered(&pwrseq->dev))
+ return -ENODEV;
+
+ guard(mutex)(&pwrseq->state_lock);
+
+ ret = pwrseq_unit_disable(pwrseq, unit);
+ if (!ret)
+ desc->powered_on = false;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pwrseq_power_off);
+
+#if IS_ENABLED(CONFIG_DEBUG_FS)
+
+struct pwrseq_debugfs_count_ctx {
+ struct device *dev;
+ loff_t index;
+};
+
+static int pwrseq_debugfs_seq_count(struct device *dev, void *data)
+{
+ struct pwrseq_debugfs_count_ctx *ctx = data;
+
+ ctx->dev = dev;
+
+ return ctx->index-- ? 0 : 1;
+}
+
+static void *pwrseq_debugfs_seq_start(struct seq_file *seq, loff_t *pos)
+{
+ struct pwrseq_debugfs_count_ctx ctx;
+
+ ctx.dev = NULL;
+ ctx.index = *pos;
+
+ /*
+ * We're holding the lock for the entire printout so no need to fiddle
+ * with device reference count.
+ */
+ down_read(&pwrseq_sem);
+
+ bus_for_each_dev(&pwrseq_bus, NULL, &ctx, pwrseq_debugfs_seq_count);
+ if (!ctx.index)
+ return NULL;
+
+ return ctx.dev;
+}
+
+static void *pwrseq_debugfs_seq_next(struct seq_file *seq, void *data,
+ loff_t *pos)
+{
+ struct device *curr = data;
+
+ ++*pos;
+
+ struct device *next __free(put_device) =
+ bus_find_next_device(&pwrseq_bus, curr);
+ return next;
+}
+
+static void pwrseq_debugfs_seq_show_target(struct seq_file *seq,
+ struct pwrseq_target *target)
+{
+ seq_printf(seq, " target: [%s] (target unit: [%s])\n",
+ target->name, target->unit->name);
+}
+
+static void pwrseq_debugfs_seq_show_unit(struct seq_file *seq,
+ struct pwrseq_unit *unit)
+{
+ struct pwrseq_unit_dep *ref;
+
+ seq_printf(seq, " unit: [%s] - enable count: %u\n",
+ unit->name, unit->enable_count);
+
+ if (list_empty(&unit->deps))
+ return;
+
+ seq_puts(seq, " dependencies:\n");
+ list_for_each_entry(ref, &unit->deps, list)
+ seq_printf(seq, " [%s]\n", ref->unit->name);
+}
+
+static int pwrseq_debugfs_seq_show(struct seq_file *seq, void *data)
+{
+ struct device *dev = data;
+ struct pwrseq_device *pwrseq = to_pwrseq_device(dev);
+ struct pwrseq_target *target;
+ struct pwrseq_unit *unit;
+
+ seq_printf(seq, "%s:\n", dev_name(dev));
+
+ seq_puts(seq, " targets:\n");
+ list_for_each_entry(target, &pwrseq->targets, list)
+ pwrseq_debugfs_seq_show_target(seq, target);
+
+ seq_puts(seq, " units:\n");
+ list_for_each_entry(unit, &pwrseq->units, list)
+ pwrseq_debugfs_seq_show_unit(seq, unit);
+
+ return 0;
+}
+
+static void pwrseq_debugfs_seq_stop(struct seq_file *seq, void *data)
+{
+ up_read(&pwrseq_sem);
+}
+
+static const struct seq_operations pwrseq_debugfs_sops = {
+ .start = pwrseq_debugfs_seq_start,
+ .next = pwrseq_debugfs_seq_next,
+ .show = pwrseq_debugfs_seq_show,
+ .stop = pwrseq_debugfs_seq_stop,
+};
+DEFINE_SEQ_ATTRIBUTE(pwrseq_debugfs);
+
+static struct dentry *pwrseq_debugfs_dentry;
+
+#endif /* CONFIG_DEBUG_FS */
+
+static int __init pwrseq_init(void)
+{
+ int ret;
+
+ ret = bus_register(&pwrseq_bus);
+ if (ret) {
+ pr_err("Failed to register the power sequencer bus\n");
+ return ret;
+ }
+
+#if IS_ENABLED(CONFIG_DEBUG_FS)
+ pwrseq_debugfs_dentry = debugfs_create_file("pwrseq", 0444, NULL, NULL,
+ &pwrseq_debugfs_fops);
+#endif /* CONFIG_DEBUG_FS */
+
+ return 0;
+}
+subsys_initcall(pwrseq_init);
+
+static void __exit pwrseq_exit(void)
+{
+#if IS_ENABLED(CONFIG_DEBUG_FS)
+ debugfs_remove_recursive(pwrseq_debugfs_dentry);
+#endif /* CONFIG_DEBUG_FS */
+
+ bus_unregister(&pwrseq_bus);
+}
+module_exit(pwrseq_exit);
+
+MODULE_AUTHOR("Bartosz Golaszewski <bartosz.golaszewski@linaro.org>");
+MODULE_DESCRIPTION("Power Sequencing subsystem core");
+MODULE_LICENSE("GPL");
diff --git a/drivers/power/sequencing/pwrseq-qcom-wcn.c b/drivers/power/sequencing/pwrseq-qcom-wcn.c
new file mode 100644
index 000000000000..42dacfda745e
--- /dev/null
+++ b/drivers/power/sequencing/pwrseq-qcom-wcn.c
@@ -0,0 +1,336 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2024 Linaro Ltd.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/gpio/consumer.h>
+#include <linux/jiffies.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/pwrseq/provider.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+struct pwrseq_qcom_wcn_pdata {
+ const char *const *vregs;
+ size_t num_vregs;
+ unsigned int pwup_delay_ms;
+ unsigned int gpio_enable_delay_ms;
+};
+
+struct pwrseq_qcom_wcn_ctx {
+ struct pwrseq_device *pwrseq;
+ struct device_node *of_node;
+ const struct pwrseq_qcom_wcn_pdata *pdata;
+ struct regulator_bulk_data *regs;
+ struct gpio_desc *bt_gpio;
+ struct gpio_desc *wlan_gpio;
+ struct clk *clk;
+ unsigned long last_gpio_enable_jf;
+};
+
+static void pwrseq_qcom_wcn_ensure_gpio_delay(struct pwrseq_qcom_wcn_ctx *ctx)
+{
+ unsigned long diff_jiffies;
+ unsigned int diff_msecs;
+
+ if (!ctx->pdata->gpio_enable_delay_ms)
+ return;
+
+ diff_jiffies = jiffies - ctx->last_gpio_enable_jf;
+ diff_msecs = jiffies_to_msecs(diff_jiffies);
+
+ if (diff_msecs < ctx->pdata->gpio_enable_delay_ms)
+ msleep(ctx->pdata->gpio_enable_delay_ms - diff_msecs);
+}
+
+static int pwrseq_qcom_wcn_vregs_enable(struct pwrseq_device *pwrseq)
+{
+ struct pwrseq_qcom_wcn_ctx *ctx = pwrseq_device_get_drvdata(pwrseq);
+
+ return regulator_bulk_enable(ctx->pdata->num_vregs, ctx->regs);
+}
+
+static int pwrseq_qcom_wcn_vregs_disable(struct pwrseq_device *pwrseq)
+{
+ struct pwrseq_qcom_wcn_ctx *ctx = pwrseq_device_get_drvdata(pwrseq);
+
+ return regulator_bulk_disable(ctx->pdata->num_vregs, ctx->regs);
+}
+
+static const struct pwrseq_unit_data pwrseq_qcom_wcn_vregs_unit_data = {
+ .name = "regulators-enable",
+ .enable = pwrseq_qcom_wcn_vregs_enable,
+ .disable = pwrseq_qcom_wcn_vregs_disable,
+};
+
+static int pwrseq_qcom_wcn_clk_enable(struct pwrseq_device *pwrseq)
+{
+ struct pwrseq_qcom_wcn_ctx *ctx = pwrseq_device_get_drvdata(pwrseq);
+
+ return clk_prepare_enable(ctx->clk);
+}
+
+static int pwrseq_qcom_wcn_clk_disable(struct pwrseq_device *pwrseq)
+{
+ struct pwrseq_qcom_wcn_ctx *ctx = pwrseq_device_get_drvdata(pwrseq);
+
+ clk_disable_unprepare(ctx->clk);
+
+ return 0;
+}
+
+static const struct pwrseq_unit_data pwrseq_qcom_wcn_clk_unit_data = {
+ .name = "clock-enable",
+ .enable = pwrseq_qcom_wcn_clk_enable,
+ .disable = pwrseq_qcom_wcn_clk_disable,
+};
+
+static const struct pwrseq_unit_data *pwrseq_qcom_wcn_unit_deps[] = {
+ &pwrseq_qcom_wcn_vregs_unit_data,
+ &pwrseq_qcom_wcn_clk_unit_data,
+ NULL
+};
+
+static int pwrseq_qcom_wcn_bt_enable(struct pwrseq_device *pwrseq)
+{
+ struct pwrseq_qcom_wcn_ctx *ctx = pwrseq_device_get_drvdata(pwrseq);
+
+ pwrseq_qcom_wcn_ensure_gpio_delay(ctx);
+ gpiod_set_value_cansleep(ctx->bt_gpio, 1);
+ ctx->last_gpio_enable_jf = jiffies;
+
+ return 0;
+}
+
+static int pwrseq_qcom_wcn_bt_disable(struct pwrseq_device *pwrseq)
+{
+ struct pwrseq_qcom_wcn_ctx *ctx = pwrseq_device_get_drvdata(pwrseq);
+
+ gpiod_set_value_cansleep(ctx->bt_gpio, 0);
+
+ return 0;
+}
+
+static const struct pwrseq_unit_data pwrseq_qcom_wcn_bt_unit_data = {
+ .name = "bluetooth-enable",
+ .deps = pwrseq_qcom_wcn_unit_deps,
+ .enable = pwrseq_qcom_wcn_bt_enable,
+ .disable = pwrseq_qcom_wcn_bt_disable,
+};
+
+static int pwrseq_qcom_wcn_wlan_enable(struct pwrseq_device *pwrseq)
+{
+ struct pwrseq_qcom_wcn_ctx *ctx = pwrseq_device_get_drvdata(pwrseq);
+
+ pwrseq_qcom_wcn_ensure_gpio_delay(ctx);
+ gpiod_set_value_cansleep(ctx->wlan_gpio, 1);
+ ctx->last_gpio_enable_jf = jiffies;
+
+ return 0;
+}
+
+static int pwrseq_qcom_wcn_wlan_disable(struct pwrseq_device *pwrseq)
+{
+ struct pwrseq_qcom_wcn_ctx *ctx = pwrseq_device_get_drvdata(pwrseq);
+
+ gpiod_set_value_cansleep(ctx->wlan_gpio, 0);
+
+ return 0;
+}
+
+static const struct pwrseq_unit_data pwrseq_qcom_wcn_wlan_unit_data = {
+ .name = "wlan-enable",
+ .deps = pwrseq_qcom_wcn_unit_deps,
+ .enable = pwrseq_qcom_wcn_wlan_enable,
+ .disable = pwrseq_qcom_wcn_wlan_disable,
+};
+
+static int pwrseq_qcom_wcn_pwup_delay(struct pwrseq_device *pwrseq)
+{
+ struct pwrseq_qcom_wcn_ctx *ctx = pwrseq_device_get_drvdata(pwrseq);
+
+ if (ctx->pdata->pwup_delay_ms)
+ msleep(ctx->pdata->pwup_delay_ms);
+
+ return 0;
+}
+
+static const struct pwrseq_target_data pwrseq_qcom_wcn_bt_target_data = {
+ .name = "bluetooth",
+ .unit = &pwrseq_qcom_wcn_bt_unit_data,
+ .post_enable = pwrseq_qcom_wcn_pwup_delay,
+};
+
+static const struct pwrseq_target_data pwrseq_qcom_wcn_wlan_target_data = {
+ .name = "wlan",
+ .unit = &pwrseq_qcom_wcn_wlan_unit_data,
+ .post_enable = pwrseq_qcom_wcn_pwup_delay,
+};
+
+static const struct pwrseq_target_data *pwrseq_qcom_wcn_targets[] = {
+ &pwrseq_qcom_wcn_bt_target_data,
+ &pwrseq_qcom_wcn_wlan_target_data,
+ NULL
+};
+
+static const char *const pwrseq_qca6390_vregs[] = {
+ "vddio",
+ "vddaon",
+ "vddpmu",
+ "vddrfa0p95",
+ "vddrfa1p3",
+ "vddrfa1p9",
+ "vddpcie1p3",
+ "vddpcie1p9",
+};
+
+static const struct pwrseq_qcom_wcn_pdata pwrseq_qca6390_of_data = {
+ .vregs = pwrseq_qca6390_vregs,
+ .num_vregs = ARRAY_SIZE(pwrseq_qca6390_vregs),
+ .pwup_delay_ms = 60,
+ .gpio_enable_delay_ms = 100,
+};
+
+static const char *const pwrseq_wcn7850_vregs[] = {
+ "vdd",
+ "vddio",
+ "vddio1p2",
+ "vddaon",
+ "vdddig",
+ "vddrfa1p2",
+ "vddrfa1p8",
+};
+
+static const struct pwrseq_qcom_wcn_pdata pwrseq_wcn7850_of_data = {
+ .vregs = pwrseq_wcn7850_vregs,
+ .num_vregs = ARRAY_SIZE(pwrseq_wcn7850_vregs),
+ .pwup_delay_ms = 50,
+};
+
+static int pwrseq_qcom_wcn_match(struct pwrseq_device *pwrseq,
+ struct device *dev)
+{
+ struct pwrseq_qcom_wcn_ctx *ctx = pwrseq_device_get_drvdata(pwrseq);
+ struct device_node *dev_node = dev->of_node;
+
+ /*
+ * The PMU supplies power to the Bluetooth and WLAN modules. both
+ * consume the PMU AON output so check the presence of the
+ * 'vddaon-supply' property and whether it leads us to the right
+ * device.
+ */
+ if (!of_property_present(dev_node, "vddaon-supply"))
+ return 0;
+
+ struct device_node *reg_node __free(device_node) =
+ of_parse_phandle(dev_node, "vddaon-supply", 0);
+ if (!reg_node)
+ return 0;
+
+ /*
+ * `reg_node` is the PMU AON regulator, its parent is the `regulators`
+ * node and finally its grandparent is the PMU device node that we're
+ * looking for.
+ */
+ if (!reg_node->parent || !reg_node->parent->parent ||
+ reg_node->parent->parent != ctx->of_node)
+ return 0;
+
+ return 1;
+}
+
+static int pwrseq_qcom_wcn_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct pwrseq_qcom_wcn_ctx *ctx;
+ struct pwrseq_config config;
+ int i, ret;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->of_node = dev->of_node;
+
+ ctx->pdata = of_device_get_match_data(dev);
+ if (!ctx->pdata)
+ return dev_err_probe(dev, -ENODEV,
+ "Failed to obtain platform data\n");
+
+ ctx->regs = devm_kcalloc(dev, ctx->pdata->num_vregs,
+ sizeof(*ctx->regs), GFP_KERNEL);
+ if (!ctx->regs)
+ return -ENOMEM;
+
+ for (i = 0; i < ctx->pdata->num_vregs; i++)
+ ctx->regs[i].supply = ctx->pdata->vregs[i];
+
+ ret = devm_regulator_bulk_get(dev, ctx->pdata->num_vregs, ctx->regs);
+ if (ret < 0)
+ return dev_err_probe(dev, ret,
+ "Failed to get all regulators\n");
+
+ ctx->bt_gpio = devm_gpiod_get_optional(dev, "bt-enable", GPIOD_OUT_LOW);
+ if (IS_ERR(ctx->bt_gpio))
+ return dev_err_probe(dev, PTR_ERR(ctx->bt_gpio),
+ "Failed to get the Bluetooth enable GPIO\n");
+
+ ctx->wlan_gpio = devm_gpiod_get_optional(dev, "wlan-enable",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(ctx->wlan_gpio))
+ return dev_err_probe(dev, PTR_ERR(ctx->wlan_gpio),
+ "Failed to get the WLAN enable GPIO\n");
+
+ ctx->clk = devm_clk_get_optional(dev, NULL);
+ if (IS_ERR(ctx->clk))
+ return dev_err_probe(dev, PTR_ERR(ctx->clk),
+ "Failed to get the reference clock\n");
+
+ memset(&config, 0, sizeof(config));
+
+ config.parent = dev;
+ config.owner = THIS_MODULE;
+ config.drvdata = ctx;
+ config.match = pwrseq_qcom_wcn_match;
+ config.targets = pwrseq_qcom_wcn_targets;
+
+ ctx->pwrseq = devm_pwrseq_device_register(dev, &config);
+ if (IS_ERR(ctx->pwrseq))
+ return dev_err_probe(dev, PTR_ERR(ctx->pwrseq),
+ "Failed to register the power sequencer\n");
+
+ return 0;
+}
+
+static const struct of_device_id pwrseq_qcom_wcn_of_match[] = {
+ {
+ .compatible = "qcom,qca6390-pmu",
+ .data = &pwrseq_qca6390_of_data,
+ },
+ {
+ .compatible = "qcom,wcn7850-pmu",
+ .data = &pwrseq_wcn7850_of_data,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, pwrseq_qcom_wcn_of_match);
+
+static struct platform_driver pwrseq_qcom_wcn_driver = {
+ .driver = {
+ .name = "pwrseq-qcom_wcn",
+ .of_match_table = pwrseq_qcom_wcn_of_match,
+ },
+ .probe = pwrseq_qcom_wcn_probe,
+};
+module_platform_driver(pwrseq_qcom_wcn_driver);
+
+MODULE_AUTHOR("Bartosz Golaszewski <bartosz.golaszewski@linaro.org>");
+MODULE_DESCRIPTION("Qualcomm WCN PMU power sequencing driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/power/supply/Kconfig b/drivers/power/supply/Kconfig
index 3e31375491d5..f6321a42aa53 100644
--- a/drivers/power/supply/Kconfig
+++ b/drivers/power/supply/Kconfig
@@ -860,6 +860,18 @@ config CHARGER_CROS_PCHG
the peripheral charge ports from the EC and converts that into
power_supply properties.
+config CHARGER_CROS_CONTROL
+ tristate "ChromeOS EC based charge control"
+ depends on MFD_CROS_EC_DEV
+ depends on ACPI_BATTERY
+ default MFD_CROS_EC_DEV
+ help
+ Say Y here to enable ChromeOS EC based battery charge control.
+ This driver can manage charge thresholds and behaviour.
+
+ This driver can also be built as a module. If so, the module will be
+ called cros_charge-control.
+
config CHARGER_SC2731
tristate "Spreadtrum SC2731 charger driver"
depends on MFD_SC27XX_PMIC || COMPILE_TEST
diff --git a/drivers/power/supply/Makefile b/drivers/power/supply/Makefile
index 58b567278034..31ca6653a564 100644
--- a/drivers/power/supply/Makefile
+++ b/drivers/power/supply/Makefile
@@ -100,6 +100,7 @@ obj-$(CONFIG_CHARGER_TPS65090) += tps65090-charger.o
obj-$(CONFIG_CHARGER_TPS65217) += tps65217_charger.o
obj-$(CONFIG_AXP288_FUEL_GAUGE) += axp288_fuel_gauge.o
obj-$(CONFIG_AXP288_CHARGER) += axp288_charger.o
+obj-$(CONFIG_CHARGER_CROS_CONTROL) += cros_charge-control.o
obj-$(CONFIG_CHARGER_CROS_USBPD) += cros_usbpd-charger.o
obj-$(CONFIG_CHARGER_CROS_PCHG) += cros_peripheral_charger.o
obj-$(CONFIG_CHARGER_SC2731) += sc2731_charger.o
diff --git a/drivers/power/supply/cros_charge-control.c b/drivers/power/supply/cros_charge-control.c
new file mode 100644
index 000000000000..17c53591ce19
--- /dev/null
+++ b/drivers/power/supply/cros_charge-control.c
@@ -0,0 +1,352 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * ChromeOS EC driver for charge control
+ *
+ * Copyright (C) 2024 Thomas Weißschuh <linux@weissschuh.net>
+ */
+#include <acpi/battery.h>
+#include <linux/container_of.h>
+#include <linux/dmi.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_data/cros_ec_commands.h>
+#include <linux/platform_data/cros_ec_proto.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
+#define EC_CHARGE_CONTROL_BEHAVIOURS (BIT(POWER_SUPPLY_CHARGE_BEHAVIOUR_AUTO) | \
+ BIT(POWER_SUPPLY_CHARGE_BEHAVIOUR_INHIBIT_CHARGE) | \
+ BIT(POWER_SUPPLY_CHARGE_BEHAVIOUR_FORCE_DISCHARGE))
+
+enum CROS_CHCTL_ATTR {
+ CROS_CHCTL_ATTR_START_THRESHOLD,
+ CROS_CHCTL_ATTR_END_THRESHOLD,
+ CROS_CHCTL_ATTR_CHARGE_BEHAVIOUR,
+ _CROS_CHCTL_ATTR_COUNT
+};
+
+/*
+ * Semantics of data *returned* from the EC API and Linux sysfs differ
+ * slightly, also the v1 API can not return any data.
+ * To match the expected sysfs API, data is never read back from the EC but
+ * cached in the driver.
+ *
+ * Changes to the EC bypassing the driver will not be reflected in sysfs.
+ * Any change to "charge_behaviour" will synchronize the EC with the driver state.
+ */
+
+struct cros_chctl_priv {
+ struct cros_ec_device *cros_ec;
+ struct acpi_battery_hook battery_hook;
+ struct power_supply *hooked_battery;
+ u8 cmd_version;
+
+ /* The callbacks need to access this priv structure.
+ * As neither the struct device nor power_supply are under the drivers
+ * control, embed the attributes within priv to use with container_of().
+ */
+ struct device_attribute device_attrs[_CROS_CHCTL_ATTR_COUNT];
+ struct attribute *attributes[_CROS_CHCTL_ATTR_COUNT];
+ struct attribute_group group;
+
+ enum power_supply_charge_behaviour current_behaviour;
+ u8 current_start_threshold, current_end_threshold;
+};
+
+static int cros_chctl_send_charge_control_cmd(struct cros_ec_device *cros_ec,
+ u8 cmd_version, struct ec_params_charge_control *req)
+{
+ static const u8 outsizes[] = {
+ [1] = offsetof(struct ec_params_charge_control, cmd),
+ [2] = sizeof(struct ec_params_charge_control),
+ [3] = sizeof(struct ec_params_charge_control),
+ };
+
+ struct {
+ struct cros_ec_command msg;
+ union {
+ struct ec_params_charge_control req;
+ struct ec_response_charge_control resp;
+ } __packed data;
+ } __packed buf = {
+ .msg = {
+ .command = EC_CMD_CHARGE_CONTROL,
+ .version = cmd_version,
+ .insize = 0,
+ .outsize = outsizes[cmd_version],
+ },
+ .data.req = *req,
+ };
+
+ return cros_ec_cmd_xfer_status(cros_ec, &buf.msg);
+}
+
+static int cros_chctl_configure_ec(struct cros_chctl_priv *priv)
+{
+ struct ec_params_charge_control req = {};
+
+ req.cmd = EC_CHARGE_CONTROL_CMD_SET;
+
+ switch (priv->current_behaviour) {
+ case POWER_SUPPLY_CHARGE_BEHAVIOUR_AUTO:
+ req.mode = CHARGE_CONTROL_NORMAL;
+ break;
+ case POWER_SUPPLY_CHARGE_BEHAVIOUR_INHIBIT_CHARGE:
+ req.mode = CHARGE_CONTROL_IDLE;
+ break;
+ case POWER_SUPPLY_CHARGE_BEHAVIOUR_FORCE_DISCHARGE:
+ req.mode = CHARGE_CONTROL_DISCHARGE;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (priv->current_behaviour == POWER_SUPPLY_CHARGE_BEHAVIOUR_AUTO &&
+ !(priv->current_start_threshold == 0 && priv->current_end_threshold == 100)) {
+ req.sustain_soc.lower = priv->current_start_threshold;
+ req.sustain_soc.upper = priv->current_end_threshold;
+ } else {
+ /* Disable charging limits */
+ req.sustain_soc.lower = -1;
+ req.sustain_soc.upper = -1;
+ }
+
+ return cros_chctl_send_charge_control_cmd(priv->cros_ec, priv->cmd_version, &req);
+}
+
+static struct cros_chctl_priv *cros_chctl_attr_to_priv(struct attribute *attr,
+ enum CROS_CHCTL_ATTR idx)
+{
+ struct device_attribute *dev_attr = container_of(attr, struct device_attribute, attr);
+
+ return container_of(dev_attr, struct cros_chctl_priv, device_attrs[idx]);
+}
+
+static ssize_t cros_chctl_store_threshold(struct device *dev, struct cros_chctl_priv *priv,
+ int is_end_threshold, const char *buf, size_t count)
+{
+ int ret, val;
+
+ ret = kstrtoint(buf, 10, &val);
+ if (ret < 0)
+ return ret;
+ if (val < 0 || val > 100)
+ return -EINVAL;
+
+ if (is_end_threshold) {
+ if (val <= priv->current_start_threshold)
+ return -EINVAL;
+ priv->current_end_threshold = val;
+ } else {
+ if (val >= priv->current_end_threshold)
+ return -EINVAL;
+ priv->current_start_threshold = val;
+ }
+
+ if (priv->current_behaviour == POWER_SUPPLY_CHARGE_BEHAVIOUR_AUTO) {
+ ret = cros_chctl_configure_ec(priv);
+ if (ret < 0)
+ return ret;
+ }
+
+ return count;
+}
+
+static ssize_t charge_control_start_threshold_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct cros_chctl_priv *priv = cros_chctl_attr_to_priv(&attr->attr,
+ CROS_CHCTL_ATTR_START_THRESHOLD);
+
+ return sysfs_emit(buf, "%u\n", (unsigned int)priv->current_start_threshold);
+}
+
+static ssize_t charge_control_start_threshold_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct cros_chctl_priv *priv = cros_chctl_attr_to_priv(&attr->attr,
+ CROS_CHCTL_ATTR_START_THRESHOLD);
+
+ return cros_chctl_store_threshold(dev, priv, 0, buf, count);
+}
+
+static ssize_t charge_control_end_threshold_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct cros_chctl_priv *priv = cros_chctl_attr_to_priv(&attr->attr,
+ CROS_CHCTL_ATTR_END_THRESHOLD);
+
+ return sysfs_emit(buf, "%u\n", (unsigned int)priv->current_end_threshold);
+}
+
+static ssize_t charge_control_end_threshold_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct cros_chctl_priv *priv = cros_chctl_attr_to_priv(&attr->attr,
+ CROS_CHCTL_ATTR_END_THRESHOLD);
+
+ return cros_chctl_store_threshold(dev, priv, 1, buf, count);
+}
+
+static ssize_t charge_behaviour_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct cros_chctl_priv *priv = cros_chctl_attr_to_priv(&attr->attr,
+ CROS_CHCTL_ATTR_CHARGE_BEHAVIOUR);
+
+ return power_supply_charge_behaviour_show(dev, EC_CHARGE_CONTROL_BEHAVIOURS,
+ priv->current_behaviour, buf);
+}
+
+static ssize_t charge_behaviour_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct cros_chctl_priv *priv = cros_chctl_attr_to_priv(&attr->attr,
+ CROS_CHCTL_ATTR_CHARGE_BEHAVIOUR);
+ int ret;
+
+ ret = power_supply_charge_behaviour_parse(EC_CHARGE_CONTROL_BEHAVIOURS, buf);
+ if (ret < 0)
+ return ret;
+
+ priv->current_behaviour = ret;
+
+ ret = cros_chctl_configure_ec(priv);
+ if (ret < 0)
+ return ret;
+
+ return count;
+}
+
+static umode_t cros_chtl_attr_is_visible(struct kobject *kobj, struct attribute *attr, int n)
+{
+ struct cros_chctl_priv *priv = cros_chctl_attr_to_priv(attr, n);
+
+ if (priv->cmd_version < 2) {
+ if (n == CROS_CHCTL_ATTR_START_THRESHOLD)
+ return 0;
+ if (n == CROS_CHCTL_ATTR_END_THRESHOLD)
+ return 0;
+ }
+
+ return attr->mode;
+}
+
+static int cros_chctl_add_battery(struct power_supply *battery, struct acpi_battery_hook *hook)
+{
+ struct cros_chctl_priv *priv = container_of(hook, struct cros_chctl_priv, battery_hook);
+
+ if (priv->hooked_battery)
+ return 0;
+
+ priv->hooked_battery = battery;
+ return device_add_group(&battery->dev, &priv->group);
+}
+
+static int cros_chctl_remove_battery(struct power_supply *battery, struct acpi_battery_hook *hook)
+{
+ struct cros_chctl_priv *priv = container_of(hook, struct cros_chctl_priv, battery_hook);
+
+ if (priv->hooked_battery == battery) {
+ device_remove_group(&battery->dev, &priv->group);
+ priv->hooked_battery = NULL;
+ }
+
+ return 0;
+}
+
+static bool probe_with_fwk_charge_control;
+module_param(probe_with_fwk_charge_control, bool, 0644);
+MODULE_PARM_DESC(probe_with_fwk_charge_control,
+ "Probe the driver in the presence of the custom Framework EC charge control");
+
+static int cros_chctl_fwk_charge_control_versions(struct cros_ec_device *cros_ec)
+{
+ if (!dmi_match(DMI_SYS_VENDOR, "Framework"))
+ return 0;
+
+ return cros_ec_get_cmd_versions(cros_ec, 0x3E03 /* FW_EC_CMD_CHARGE_LIMIT */);
+}
+
+static int cros_chctl_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct cros_ec_dev *ec_dev = dev_get_drvdata(dev->parent);
+ struct cros_ec_device *cros_ec = ec_dev->ec_dev;
+ struct cros_chctl_priv *priv;
+ size_t i;
+ int ret;
+
+ ret = cros_chctl_fwk_charge_control_versions(cros_ec);
+ if (ret < 0)
+ return ret;
+ if (ret > 0 && !probe_with_fwk_charge_control) {
+ dev_info(dev, "Framework charge control detected, preventing load\n");
+ return -ENODEV;
+ }
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ ret = cros_ec_get_cmd_versions(cros_ec, EC_CMD_CHARGE_CONTROL);
+ if (ret < 0)
+ return ret;
+ else if (ret & EC_VER_MASK(3))
+ priv->cmd_version = 3;
+ else if (ret & EC_VER_MASK(2))
+ priv->cmd_version = 2;
+ else if (ret & EC_VER_MASK(1))
+ priv->cmd_version = 1;
+ else
+ return -ENODEV;
+
+ dev_dbg(dev, "Command version: %u\n", (unsigned int)priv->cmd_version);
+
+ priv->cros_ec = cros_ec;
+ priv->device_attrs[CROS_CHCTL_ATTR_START_THRESHOLD] =
+ (struct device_attribute)__ATTR_RW(charge_control_start_threshold);
+ priv->device_attrs[CROS_CHCTL_ATTR_END_THRESHOLD] =
+ (struct device_attribute)__ATTR_RW(charge_control_end_threshold);
+ priv->device_attrs[CROS_CHCTL_ATTR_CHARGE_BEHAVIOUR] =
+ (struct device_attribute)__ATTR_RW(charge_behaviour);
+ for (i = 0; i < _CROS_CHCTL_ATTR_COUNT; i++) {
+ sysfs_attr_init(&priv->device_attrs[i].attr);
+ priv->attributes[i] = &priv->device_attrs[i].attr;
+ }
+ priv->group.is_visible = cros_chtl_attr_is_visible;
+ priv->group.attrs = priv->attributes;
+
+ priv->battery_hook.name = dev_name(dev);
+ priv->battery_hook.add_battery = cros_chctl_add_battery;
+ priv->battery_hook.remove_battery = cros_chctl_remove_battery;
+
+ priv->current_behaviour = POWER_SUPPLY_CHARGE_BEHAVIOUR_AUTO;
+ priv->current_start_threshold = 0;
+ priv->current_end_threshold = 100;
+
+ /* Bring EC into well-known state */
+ ret = cros_chctl_configure_ec(priv);
+ if (ret < 0)
+ return ret;
+
+ return devm_battery_hook_register(dev, &priv->battery_hook);
+}
+
+static const struct platform_device_id cros_chctl_id[] = {
+ { "cros-charge-control", 0 },
+ {}
+};
+
+static struct platform_driver cros_chctl_driver = {
+ .driver.name = "cros-charge-control",
+ .probe = cros_chctl_probe,
+ .id_table = cros_chctl_id,
+};
+module_platform_driver(cros_chctl_driver);
+
+MODULE_DEVICE_TABLE(platform, cros_chctl_id);
+MODULE_DESCRIPTION("ChromeOS EC charge control");
+MODULE_AUTHOR("Thomas Weißschuh <linux@weissschuh.net>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/power/supply/power_supply_leds.c b/drivers/power/supply/power_supply_leds.c
index c7db29d5fcb8..73935de844d9 100644
--- a/drivers/power/supply/power_supply_leds.c
+++ b/drivers/power/supply/power_supply_leds.c
@@ -22,6 +22,8 @@
static void power_supply_update_bat_leds(struct power_supply *psy)
{
union power_supply_propval status;
+ unsigned int intensity_green[3] = { 0, 255, 0 };
+ unsigned int intensity_orange[3] = { 255, 128, 0 };
if (power_supply_get_property(psy, POWER_SUPPLY_PROP_STATUS, &status))
return;
@@ -36,12 +38,20 @@ static void power_supply_update_bat_leds(struct power_supply *psy)
/* Going from blink to LED on requires a LED_OFF event to stop blink */
led_trigger_event(psy->charging_blink_full_solid_trig, LED_OFF);
led_trigger_event(psy->charging_blink_full_solid_trig, LED_FULL);
+ led_mc_trigger_event(psy->charging_orange_full_green_trig,
+ intensity_green,
+ ARRAY_SIZE(intensity_green),
+ LED_FULL);
break;
case POWER_SUPPLY_STATUS_CHARGING:
led_trigger_event(psy->charging_full_trig, LED_FULL);
led_trigger_event(psy->charging_trig, LED_FULL);
led_trigger_event(psy->full_trig, LED_OFF);
led_trigger_blink(psy->charging_blink_full_solid_trig, 0, 0);
+ led_mc_trigger_event(psy->charging_orange_full_green_trig,
+ intensity_orange,
+ ARRAY_SIZE(intensity_orange),
+ LED_FULL);
break;
default:
led_trigger_event(psy->charging_full_trig, LED_OFF);
@@ -49,6 +59,8 @@ static void power_supply_update_bat_leds(struct power_supply *psy)
led_trigger_event(psy->full_trig, LED_OFF);
led_trigger_event(psy->charging_blink_full_solid_trig,
LED_OFF);
+ led_trigger_event(psy->charging_orange_full_green_trig,
+ LED_OFF);
break;
}
}
@@ -74,6 +86,11 @@ static int power_supply_create_bat_triggers(struct power_supply *psy)
if (!psy->charging_blink_full_solid_trig_name)
goto charging_blink_full_solid_failed;
+ psy->charging_orange_full_green_trig_name = kasprintf(GFP_KERNEL,
+ "%s-charging-orange-full-green", psy->desc->name);
+ if (!psy->charging_orange_full_green_trig_name)
+ goto charging_red_full_green_failed;
+
led_trigger_register_simple(psy->charging_full_trig_name,
&psy->charging_full_trig);
led_trigger_register_simple(psy->charging_trig_name,
@@ -82,9 +99,13 @@ static int power_supply_create_bat_triggers(struct power_supply *psy)
&psy->full_trig);
led_trigger_register_simple(psy->charging_blink_full_solid_trig_name,
&psy->charging_blink_full_solid_trig);
+ led_trigger_register_simple(psy->charging_orange_full_green_trig_name,
+ &psy->charging_orange_full_green_trig);
return 0;
+charging_red_full_green_failed:
+ kfree(psy->charging_blink_full_solid_trig_name);
charging_blink_full_solid_failed:
kfree(psy->full_trig_name);
full_failed:
@@ -101,10 +122,12 @@ static void power_supply_remove_bat_triggers(struct power_supply *psy)
led_trigger_unregister_simple(psy->charging_trig);
led_trigger_unregister_simple(psy->full_trig);
led_trigger_unregister_simple(psy->charging_blink_full_solid_trig);
+ led_trigger_unregister_simple(psy->charging_orange_full_green_trig);
kfree(psy->charging_blink_full_solid_trig_name);
kfree(psy->full_trig_name);
kfree(psy->charging_trig_name);
kfree(psy->charging_full_trig_name);
+ kfree(psy->charging_orange_full_green_trig_name);
}
/* Generated power specific LEDs triggers. */
diff --git a/drivers/powercap/idle_inject.c b/drivers/powercap/idle_inject.c
index e18a2cc4e46a..bafc59904ed3 100644
--- a/drivers/powercap/idle_inject.c
+++ b/drivers/powercap/idle_inject.c
@@ -127,7 +127,7 @@ static enum hrtimer_restart idle_inject_timer_fn(struct hrtimer *timer)
struct idle_inject_device *ii_dev =
container_of(timer, struct idle_inject_device, timer);
- if (!ii_dev->update || (ii_dev->update && ii_dev->update()))
+ if (!ii_dev->update || ii_dev->update())
idle_inject_wakeup(ii_dev);
duration_us = READ_ONCE(ii_dev->run_duration_us);
diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c
index aac0744011a3..3cffa6c79538 100644
--- a/drivers/powercap/intel_rapl_common.c
+++ b/drivers/powercap/intel_rapl_common.c
@@ -1222,66 +1222,66 @@ static const struct rapl_defaults rapl_defaults_amd = {
};
static const struct x86_cpu_id rapl_ids[] __initconst = {
- X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE, &rapl_defaults_core),
- X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE_X, &rapl_defaults_core),
-
- X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE, &rapl_defaults_core),
- X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE_X, &rapl_defaults_core),
-
- X86_MATCH_INTEL_FAM6_MODEL(HASWELL, &rapl_defaults_core),
- X86_MATCH_INTEL_FAM6_MODEL(HASWELL_L, &rapl_defaults_core),
- X86_MATCH_INTEL_FAM6_MODEL(HASWELL_G, &rapl_defaults_core),
- X86_MATCH_INTEL_FAM6_MODEL(HASWELL_X, &rapl_defaults_hsw_server),
-
- X86_MATCH_INTEL_FAM6_MODEL(BROADWELL, &rapl_defaults_core),
- X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_G, &rapl_defaults_core),
- X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_D, &rapl_defaults_core),
- X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, &rapl_defaults_hsw_server),
-
- X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE, &rapl_defaults_core),
- X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_L, &rapl_defaults_core),
- X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, &rapl_defaults_hsw_server),
- X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE_L, &rapl_defaults_core),
- X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE, &rapl_defaults_core),
- X86_MATCH_INTEL_FAM6_MODEL(CANNONLAKE_L, &rapl_defaults_core),
- X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, &rapl_defaults_core),
- X86_MATCH_INTEL_FAM6_MODEL(ICELAKE, &rapl_defaults_core),
- X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_NNPI, &rapl_defaults_core),
- X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, &rapl_defaults_hsw_server),
- X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, &rapl_defaults_hsw_server),
- X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE_L, &rapl_defaults_core),
- X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE, &rapl_defaults_core),
- X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, &rapl_defaults_core),
- X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, &rapl_defaults_core),
- X86_MATCH_INTEL_FAM6_MODEL(ROCKETLAKE, &rapl_defaults_core),
- X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, &rapl_defaults_core),
- X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, &rapl_defaults_core),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_GRACEMONT, &rapl_defaults_core),
- X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, &rapl_defaults_core),
- X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, &rapl_defaults_core),
- X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_S, &rapl_defaults_core),
- X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE, &rapl_defaults_core),
- X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L, &rapl_defaults_core),
- X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &rapl_defaults_spr_server),
- X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X, &rapl_defaults_spr_server),
- X86_MATCH_INTEL_FAM6_MODEL(LUNARLAKE_M, &rapl_defaults_core),
- X86_MATCH_INTEL_FAM6_MODEL(ARROWLAKE_H, &rapl_defaults_core),
- X86_MATCH_INTEL_FAM6_MODEL(ARROWLAKE, &rapl_defaults_core),
- X86_MATCH_INTEL_FAM6_MODEL(LAKEFIELD, &rapl_defaults_core),
-
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT, &rapl_defaults_byt),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT, &rapl_defaults_cht),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT_MID, &rapl_defaults_tng),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT_MID, &rapl_defaults_ann),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, &rapl_defaults_core),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_PLUS, &rapl_defaults_core),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_D, &rapl_defaults_core),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT, &rapl_defaults_core),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, &rapl_defaults_core),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_L, &rapl_defaults_core),
-
- X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNL, &rapl_defaults_hsw_server),
- X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNM, &rapl_defaults_hsw_server),
+ X86_MATCH_VFM(INTEL_SANDYBRIDGE, &rapl_defaults_core),
+ X86_MATCH_VFM(INTEL_SANDYBRIDGE_X, &rapl_defaults_core),
+
+ X86_MATCH_VFM(INTEL_IVYBRIDGE, &rapl_defaults_core),
+ X86_MATCH_VFM(INTEL_IVYBRIDGE_X, &rapl_defaults_core),
+
+ X86_MATCH_VFM(INTEL_HASWELL, &rapl_defaults_core),
+ X86_MATCH_VFM(INTEL_HASWELL_L, &rapl_defaults_core),
+ X86_MATCH_VFM(INTEL_HASWELL_G, &rapl_defaults_core),
+ X86_MATCH_VFM(INTEL_HASWELL_X, &rapl_defaults_hsw_server),
+
+ X86_MATCH_VFM(INTEL_BROADWELL, &rapl_defaults_core),
+ X86_MATCH_VFM(INTEL_BROADWELL_G, &rapl_defaults_core),
+ X86_MATCH_VFM(INTEL_BROADWELL_D, &rapl_defaults_core),
+ X86_MATCH_VFM(INTEL_BROADWELL_X, &rapl_defaults_hsw_server),
+
+ X86_MATCH_VFM(INTEL_SKYLAKE, &rapl_defaults_core),
+ X86_MATCH_VFM(INTEL_SKYLAKE_L, &rapl_defaults_core),
+ X86_MATCH_VFM(INTEL_SKYLAKE_X, &rapl_defaults_hsw_server),
+ X86_MATCH_VFM(INTEL_KABYLAKE_L, &rapl_defaults_core),
+ X86_MATCH_VFM(INTEL_KABYLAKE, &rapl_defaults_core),
+ X86_MATCH_VFM(INTEL_CANNONLAKE_L, &rapl_defaults_core),
+ X86_MATCH_VFM(INTEL_ICELAKE_L, &rapl_defaults_core),
+ X86_MATCH_VFM(INTEL_ICELAKE, &rapl_defaults_core),
+ X86_MATCH_VFM(INTEL_ICELAKE_NNPI, &rapl_defaults_core),
+ X86_MATCH_VFM(INTEL_ICELAKE_X, &rapl_defaults_hsw_server),
+ X86_MATCH_VFM(INTEL_ICELAKE_D, &rapl_defaults_hsw_server),
+ X86_MATCH_VFM(INTEL_COMETLAKE_L, &rapl_defaults_core),
+ X86_MATCH_VFM(INTEL_COMETLAKE, &rapl_defaults_core),
+ X86_MATCH_VFM(INTEL_TIGERLAKE_L, &rapl_defaults_core),
+ X86_MATCH_VFM(INTEL_TIGERLAKE, &rapl_defaults_core),
+ X86_MATCH_VFM(INTEL_ROCKETLAKE, &rapl_defaults_core),
+ X86_MATCH_VFM(INTEL_ALDERLAKE, &rapl_defaults_core),
+ X86_MATCH_VFM(INTEL_ALDERLAKE_L, &rapl_defaults_core),
+ X86_MATCH_VFM(INTEL_ATOM_GRACEMONT, &rapl_defaults_core),
+ X86_MATCH_VFM(INTEL_RAPTORLAKE, &rapl_defaults_core),
+ X86_MATCH_VFM(INTEL_RAPTORLAKE_P, &rapl_defaults_core),
+ X86_MATCH_VFM(INTEL_RAPTORLAKE_S, &rapl_defaults_core),
+ X86_MATCH_VFM(INTEL_METEORLAKE, &rapl_defaults_core),
+ X86_MATCH_VFM(INTEL_METEORLAKE_L, &rapl_defaults_core),
+ X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, &rapl_defaults_spr_server),
+ X86_MATCH_VFM(INTEL_EMERALDRAPIDS_X, &rapl_defaults_spr_server),
+ X86_MATCH_VFM(INTEL_LUNARLAKE_M, &rapl_defaults_core),
+ X86_MATCH_VFM(INTEL_ARROWLAKE_H, &rapl_defaults_core),
+ X86_MATCH_VFM(INTEL_ARROWLAKE, &rapl_defaults_core),
+ X86_MATCH_VFM(INTEL_LAKEFIELD, &rapl_defaults_core),
+
+ X86_MATCH_VFM(INTEL_ATOM_SILVERMONT, &rapl_defaults_byt),
+ X86_MATCH_VFM(INTEL_ATOM_AIRMONT, &rapl_defaults_cht),
+ X86_MATCH_VFM(INTEL_ATOM_SILVERMONT_MID, &rapl_defaults_tng),
+ X86_MATCH_VFM(INTEL_ATOM_AIRMONT_MID, &rapl_defaults_ann),
+ X86_MATCH_VFM(INTEL_ATOM_GOLDMONT, &rapl_defaults_core),
+ X86_MATCH_VFM(INTEL_ATOM_GOLDMONT_PLUS, &rapl_defaults_core),
+ X86_MATCH_VFM(INTEL_ATOM_GOLDMONT_D, &rapl_defaults_core),
+ X86_MATCH_VFM(INTEL_ATOM_TREMONT, &rapl_defaults_core),
+ X86_MATCH_VFM(INTEL_ATOM_TREMONT_D, &rapl_defaults_core),
+ X86_MATCH_VFM(INTEL_ATOM_TREMONT_L, &rapl_defaults_core),
+
+ X86_MATCH_VFM(INTEL_XEON_PHI_KNL, &rapl_defaults_hsw_server),
+ X86_MATCH_VFM(INTEL_XEON_PHI_KNM, &rapl_defaults_hsw_server),
X86_MATCH_VENDOR_FAM(AMD, 0x17, &rapl_defaults_amd),
X86_MATCH_VENDOR_FAM(AMD, 0x19, &rapl_defaults_amd),
diff --git a/drivers/powercap/intel_rapl_msr.c b/drivers/powercap/intel_rapl_msr.c
index 35cb152fa9aa..733a36f67fbc 100644
--- a/drivers/powercap/intel_rapl_msr.c
+++ b/drivers/powercap/intel_rapl_msr.c
@@ -139,14 +139,14 @@ static int rapl_msr_write_raw(int cpu, struct reg_action *ra)
/* List of verified CPUs. */
static const struct x86_cpu_id pl4_support_ids[] = {
- X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_GRACEMONT, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L, NULL),
+ X86_MATCH_VFM(INTEL_TIGERLAKE_L, NULL),
+ X86_MATCH_VFM(INTEL_ALDERLAKE, NULL),
+ X86_MATCH_VFM(INTEL_ALDERLAKE_L, NULL),
+ X86_MATCH_VFM(INTEL_ATOM_GRACEMONT, NULL),
+ X86_MATCH_VFM(INTEL_RAPTORLAKE, NULL),
+ X86_MATCH_VFM(INTEL_RAPTORLAKE_P, NULL),
+ X86_MATCH_VFM(INTEL_METEORLAKE, NULL),
+ X86_MATCH_VFM(INTEL_METEORLAKE_L, NULL),
{}
};
diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c
index 7513018c9f9a..2067b0120d08 100644
--- a/drivers/ptp/ptp_chardev.c
+++ b/drivers/ptp/ptp_chardev.c
@@ -85,7 +85,8 @@ int ptp_set_pinfunc(struct ptp_clock *ptp, unsigned int pin,
}
if (info->verify(info, pin, func, chan)) {
- pr_err("driver cannot use function %u on pin %u\n", func, chan);
+ pr_err("driver cannot use function %u and channel %u on pin %u\n",
+ func, chan, pin);
return -EOPNOTSUPP;
}
diff --git a/drivers/ptp/ptp_ines.c b/drivers/ptp/ptp_ines.c
index 385643f3f8fe..e6f7d2bf8dde 100644
--- a/drivers/ptp/ptp_ines.c
+++ b/drivers/ptp/ptp_ines.c
@@ -556,7 +556,7 @@ static bool ines_timestamp_expired(struct ines_timestamp *ts)
}
static int ines_ts_info(struct mii_timestamper *mii_ts,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
info->so_timestamping =
SOF_TIMESTAMPING_TX_HARDWARE |
diff --git a/drivers/ptp/ptp_sysfs.c b/drivers/ptp/ptp_sysfs.c
index a15460aaa03b..6b1b8f57cd95 100644
--- a/drivers/ptp/ptp_sysfs.c
+++ b/drivers/ptp/ptp_sysfs.c
@@ -296,8 +296,7 @@ static ssize_t max_vclocks_store(struct device *dev,
if (max < ptp->n_vclocks)
goto out;
- size = sizeof(int) * max;
- vclock_index = kzalloc(size, GFP_KERNEL);
+ vclock_index = kcalloc(max, sizeof(int), GFP_KERNEL);
if (!vclock_index) {
err = -ENOMEM;
goto out;
diff --git a/drivers/ptp/ptp_vmw.c b/drivers/ptp/ptp_vmw.c
index 7ec90359428a..20ab05c4daa8 100644
--- a/drivers/ptp/ptp_vmw.c
+++ b/drivers/ptp/ptp_vmw.c
@@ -14,7 +14,6 @@
#include <asm/hypervisor.h>
#include <asm/vmware.h>
-#define VMWARE_MAGIC 0x564D5868
#define VMWARE_CMD_PCLK(nr) ((nr << 16) | 97)
#define VMWARE_CMD_PCLK_GETTIME VMWARE_CMD_PCLK(0)
@@ -24,15 +23,10 @@ static struct ptp_clock *ptp_vmw_clock;
static int ptp_vmw_pclk_read(u64 *ns)
{
- u32 ret, nsec_hi, nsec_lo, unused1, unused2, unused3;
-
- asm volatile (VMWARE_HYPERCALL :
- "=a"(ret), "=b"(nsec_hi), "=c"(nsec_lo), "=d"(unused1),
- "=S"(unused2), "=D"(unused3) :
- "a"(VMWARE_MAGIC), "b"(0),
- "c"(VMWARE_CMD_PCLK_GETTIME), "d"(0) :
- "memory");
+ u32 ret, nsec_hi, nsec_lo;
+ ret = vmware_hypercall3(VMWARE_CMD_PCLK_GETTIME, 0,
+ &nsec_hi, &nsec_lo);
if (ret == 0)
*ns = ((u64)nsec_hi << 32) | nsec_lo;
return ret;
diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig
index 1dd7921194f5..3e53838990f5 100644
--- a/drivers/pwm/Kconfig
+++ b/drivers/pwm/Kconfig
@@ -94,6 +94,19 @@ config PWM_ATMEL_TCB
To compile this driver as a module, choose M here: the module
will be called pwm-atmel-tcb.
+config PWM_AXI_PWMGEN
+ tristate "Analog Devices AXI PWM generator"
+ depends on MICROBLAZE || NIOS2 || ARCH_ZYNQ || ARCH_ZYNQMP || ARCH_INTEL_SOCFPGA || COMPILE_TEST
+ select REGMAP_MMIO
+ help
+ This enables support for the Analog Devices AXI PWM generator.
+
+ This is a configurable PWM generator with variable pulse width and
+ period.
+
+ To compile this driver as a module, choose M here: the module will be
+ called pwm-axi-pwmgen.
+
config PWM_BCM_IPROC
tristate "iProc PWM support"
depends on ARCH_BCM_IPROC || COMPILE_TEST
@@ -223,6 +236,17 @@ config PWM_FSL_FTM
To compile this driver as a module, choose M here: the module
will be called pwm-fsl-ftm.
+config PWM_GPIO
+ tristate "GPIO PWM support"
+ depends on GPIOLIB
+ depends on HIGH_RES_TIMERS
+ help
+ Generic PWM framework driver for software PWM toggling a GPIO pin
+ from kernel high-resolution timers.
+
+ To compile this driver as a module, choose M here: the module
+ will be called pwm-gpio.
+
config PWM_HIBVT
tristate "HiSilicon BVT PWM support"
depends on ARCH_HISI || COMPILE_TEST
diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile
index 90913519f11a..0be4f3e6dd43 100644
--- a/drivers/pwm/Makefile
+++ b/drivers/pwm/Makefile
@@ -5,6 +5,7 @@ obj-$(CONFIG_PWM_APPLE) += pwm-apple.o
obj-$(CONFIG_PWM_ATMEL) += pwm-atmel.o
obj-$(CONFIG_PWM_ATMEL_HLCDC_PWM) += pwm-atmel-hlcdc.o
obj-$(CONFIG_PWM_ATMEL_TCB) += pwm-atmel-tcb.o
+obj-$(CONFIG_PWM_AXI_PWMGEN) += pwm-axi-pwmgen.o
obj-$(CONFIG_PWM_BCM_IPROC) += pwm-bcm-iproc.o
obj-$(CONFIG_PWM_BCM_KONA) += pwm-bcm-kona.o
obj-$(CONFIG_PWM_BCM2835) += pwm-bcm2835.o
@@ -18,6 +19,7 @@ obj-$(CONFIG_PWM_DWC_CORE) += pwm-dwc-core.o
obj-$(CONFIG_PWM_DWC) += pwm-dwc.o
obj-$(CONFIG_PWM_EP93XX) += pwm-ep93xx.o
obj-$(CONFIG_PWM_FSL_FTM) += pwm-fsl-ftm.o
+obj-$(CONFIG_PWM_GPIO) += pwm-gpio.o
obj-$(CONFIG_PWM_HIBVT) += pwm-hibvt.o
obj-$(CONFIG_PWM_IMG) += pwm-img.o
obj-$(CONFIG_PWM_IMX1) += pwm-imx1.o
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
index 18574857641e..8acbcf5b6673 100644
--- a/drivers/pwm/core.c
+++ b/drivers/pwm/core.c
@@ -6,6 +6,8 @@
* Copyright (C) 2011-2012 Avionic Design GmbH
*/
+#define DEFAULT_SYMBOL_NAMESPACE PWM
+
#include <linux/acpi.h>
#include <linux/module.h>
#include <linux/idr.h>
@@ -135,6 +137,25 @@ static void pwm_apply_debug(struct pwm_device *pwm,
}
}
+static bool pwm_state_valid(const struct pwm_state *state)
+{
+ /*
+ * For a disabled state all other state description is irrelevant and
+ * and supposed to be ignored. So also ignore any strange values and
+ * consider the state ok.
+ */
+ if (state->enabled)
+ return true;
+
+ if (!state->period)
+ return false;
+
+ if (state->duty_cycle > state->period)
+ return false;
+
+ return true;
+}
+
/**
* __pwm_apply() - atomically apply a new state to a PWM device
* @pwm: PWM device
@@ -145,9 +166,25 @@ static int __pwm_apply(struct pwm_device *pwm, const struct pwm_state *state)
struct pwm_chip *chip;
int err;
- if (!pwm || !state || !state->period ||
- state->duty_cycle > state->period)
+ if (!pwm || !state)
+ return -EINVAL;
+
+ if (!pwm_state_valid(state)) {
+ /*
+ * Allow to transition from one invalid state to another.
+ * This ensures that you can e.g. change the polarity while
+ * the period is zero. (This happens on stm32 when the hardware
+ * is in its poweron default state.) This greatly simplifies
+ * working with the sysfs API where you can only change one
+ * parameter at a time.
+ */
+ if (!pwm_state_valid(&pwm->state)) {
+ pwm->state = *state;
+ return 0;
+ }
+
return -EINVAL;
+ }
chip = pwm->chip;
@@ -291,19 +328,15 @@ EXPORT_SYMBOL_GPL(pwm_adjust_config);
int pwm_capture(struct pwm_device *pwm, struct pwm_capture *result,
unsigned long timeout)
{
- int err;
-
if (!pwm || !pwm->chip->ops)
return -EINVAL;
if (!pwm->chip->ops->capture)
return -ENOSYS;
- mutex_lock(&pwm_lock);
- err = pwm->chip->ops->capture(pwm->chip, pwm, result, timeout);
- mutex_unlock(&pwm_lock);
+ guard(mutex)(&pwm_lock);
- return err;
+ return pwm->chip->ops->capture(pwm->chip, pwm, result, timeout);
}
EXPORT_SYMBOL_GPL(pwm_capture);
@@ -315,19 +348,15 @@ static struct pwm_chip *pwmchip_find_by_name(const char *name)
if (!name)
return NULL;
- mutex_lock(&pwm_lock);
+ guard(mutex)(&pwm_lock);
idr_for_each_entry_ul(&pwm_chips, chip, tmp, id) {
const char *chip_name = dev_name(pwmchip_parent(chip));
- if (chip_name && strcmp(chip_name, name) == 0) {
- mutex_unlock(&pwm_lock);
+ if (chip_name && strcmp(chip_name, name) == 0)
return chip;
- }
}
- mutex_unlock(&pwm_lock);
-
return NULL;
}
@@ -394,9 +423,9 @@ err_get_device:
* chip. A negative error code is returned if the index is not valid for the
* specified PWM chip or if the PWM device cannot be requested.
*/
-struct pwm_device *pwm_request_from_chip(struct pwm_chip *chip,
- unsigned int index,
- const char *label)
+static struct pwm_device *pwm_request_from_chip(struct pwm_chip *chip,
+ unsigned int index,
+ const char *label)
{
struct pwm_device *pwm;
int err;
@@ -404,18 +433,16 @@ struct pwm_device *pwm_request_from_chip(struct pwm_chip *chip,
if (!chip || index >= chip->npwm)
return ERR_PTR(-EINVAL);
- mutex_lock(&pwm_lock);
+ guard(mutex)(&pwm_lock);
+
pwm = &chip->pwms[index];
err = pwm_device_request(pwm, label);
if (err < 0)
- pwm = ERR_PTR(err);
+ return ERR_PTR(err);
- mutex_unlock(&pwm_lock);
return pwm;
}
-EXPORT_SYMBOL_GPL(pwm_request_from_chip);
-
struct pwm_device *
of_pwm_xlate_with_flags(struct pwm_chip *chip, const struct of_phandle_args *args)
@@ -511,11 +538,11 @@ static ssize_t period_store(struct device *pwm_dev,
if (ret)
return ret;
- mutex_lock(&export->lock);
+ guard(mutex)(&export->lock);
+
pwm_get_state(pwm, &state);
state.period = val;
ret = pwm_apply_might_sleep(pwm, &state);
- mutex_unlock(&export->lock);
return ret ? : size;
}
@@ -546,11 +573,11 @@ static ssize_t duty_cycle_store(struct device *pwm_dev,
if (ret)
return ret;
- mutex_lock(&export->lock);
+ guard(mutex)(&export->lock);
+
pwm_get_state(pwm, &state);
state.duty_cycle = val;
ret = pwm_apply_might_sleep(pwm, &state);
- mutex_unlock(&export->lock);
return ret ? : size;
}
@@ -580,7 +607,7 @@ static ssize_t enable_store(struct device *pwm_dev,
if (ret)
return ret;
- mutex_lock(&export->lock);
+ guard(mutex)(&export->lock);
pwm_get_state(pwm, &state);
@@ -592,14 +619,11 @@ static ssize_t enable_store(struct device *pwm_dev,
state.enabled = true;
break;
default:
- ret = -EINVAL;
- goto unlock;
+ return -EINVAL;
}
ret = pwm_apply_might_sleep(pwm, &state);
-unlock:
- mutex_unlock(&export->lock);
return ret ? : size;
}
@@ -643,11 +667,11 @@ static ssize_t polarity_store(struct device *pwm_dev,
else
return -EINVAL;
- mutex_lock(&export->lock);
+ guard(mutex)(&export->lock);
+
pwm_get_state(pwm, &state);
state.polarity = polarity;
ret = pwm_apply_might_sleep(pwm, &state);
- mutex_unlock(&export->lock);
return ret ? : size;
}
@@ -1102,11 +1126,11 @@ int __pwmchip_add(struct pwm_chip *chip, struct module *owner)
chip->owner = owner;
- mutex_lock(&pwm_lock);
+ guard(mutex)(&pwm_lock);
ret = idr_alloc(&pwm_chips, chip, 0, 0, GFP_KERNEL);
if (ret < 0)
- goto err_idr_alloc;
+ return ret;
chip->id = ret;
@@ -1119,8 +1143,6 @@ int __pwmchip_add(struct pwm_chip *chip, struct module *owner)
if (ret)
goto err_device_add;
- mutex_unlock(&pwm_lock);
-
return 0;
err_device_add:
@@ -1128,9 +1150,6 @@ err_device_add:
of_pwmchip_remove(chip);
idr_remove(&pwm_chips, chip->id);
-err_idr_alloc:
-
- mutex_unlock(&pwm_lock);
return ret;
}
@@ -1149,11 +1168,8 @@ void pwmchip_remove(struct pwm_chip *chip)
if (IS_ENABLED(CONFIG_OF))
of_pwmchip_remove(chip);
- mutex_lock(&pwm_lock);
-
- idr_remove(&pwm_chips, chip->id);
-
- mutex_unlock(&pwm_lock);
+ scoped_guard(mutex, &pwm_lock)
+ idr_remove(&pwm_chips, chip->id);
device_del(&chip->dev);
}
@@ -1209,15 +1225,11 @@ static struct pwm_chip *fwnode_to_pwmchip(struct fwnode_handle *fwnode)
struct pwm_chip *chip;
unsigned long id, tmp;
- mutex_lock(&pwm_lock);
+ guard(mutex)(&pwm_lock);
idr_for_each_entry_ul(&pwm_chips, chip, tmp, id)
- if (pwmchip_parent(chip) && device_match_fwnode(pwmchip_parent(chip), fwnode)) {
- mutex_unlock(&pwm_lock);
+ if (pwmchip_parent(chip) && device_match_fwnode(pwmchip_parent(chip), fwnode))
return chip;
- }
-
- mutex_unlock(&pwm_lock);
return ERR_PTR(-EPROBE_DEFER);
}
@@ -1366,14 +1378,12 @@ static LIST_HEAD(pwm_lookup_list);
*/
void pwm_add_table(struct pwm_lookup *table, size_t num)
{
- mutex_lock(&pwm_lookup_lock);
+ guard(mutex)(&pwm_lookup_lock);
while (num--) {
list_add_tail(&table->list, &pwm_lookup_list);
table++;
}
-
- mutex_unlock(&pwm_lookup_lock);
}
/**
@@ -1383,14 +1393,12 @@ void pwm_add_table(struct pwm_lookup *table, size_t num)
*/
void pwm_remove_table(struct pwm_lookup *table, size_t num)
{
- mutex_lock(&pwm_lookup_lock);
+ guard(mutex)(&pwm_lookup_lock);
while (num--) {
list_del(&table->list);
table++;
}
-
- mutex_unlock(&pwm_lookup_lock);
}
/**
@@ -1451,36 +1459,33 @@ struct pwm_device *pwm_get(struct device *dev, const char *con_id)
* Then we take the most specific entry - with the following order
* of precedence: dev+con > dev only > con only.
*/
- mutex_lock(&pwm_lookup_lock);
-
- list_for_each_entry(p, &pwm_lookup_list, list) {
- match = 0;
+ scoped_guard(mutex, &pwm_lookup_lock)
+ list_for_each_entry(p, &pwm_lookup_list, list) {
+ match = 0;
- if (p->dev_id) {
- if (!dev_id || strcmp(p->dev_id, dev_id))
- continue;
+ if (p->dev_id) {
+ if (!dev_id || strcmp(p->dev_id, dev_id))
+ continue;
- match += 2;
- }
+ match += 2;
+ }
- if (p->con_id) {
- if (!con_id || strcmp(p->con_id, con_id))
- continue;
+ if (p->con_id) {
+ if (!con_id || strcmp(p->con_id, con_id))
+ continue;
- match += 1;
- }
+ match += 1;
+ }
- if (match > best) {
- chosen = p;
+ if (match > best) {
+ chosen = p;
- if (match != 3)
- best = match;
- else
- break;
+ if (match != 3)
+ best = match;
+ else
+ break;
+ }
}
- }
-
- mutex_unlock(&pwm_lookup_lock);
if (!chosen)
return ERR_PTR(-ENODEV);
@@ -1532,11 +1537,11 @@ void pwm_put(struct pwm_device *pwm)
chip = pwm->chip;
- mutex_lock(&pwm_lock);
+ guard(mutex)(&pwm_lock);
if (!test_and_clear_bit(PWMF_REQUESTED, &pwm->flags)) {
pr_warn("PWM device already freed\n");
- goto out;
+ return;
}
if (chip->ops->free)
@@ -1547,8 +1552,6 @@ void pwm_put(struct pwm_device *pwm)
put_device(&chip->dev);
module_put(chip->owner);
-out:
- mutex_unlock(&pwm_lock);
}
EXPORT_SYMBOL_GPL(pwm_put);
@@ -1705,9 +1708,17 @@ DEFINE_SEQ_ATTRIBUTE(pwm_debugfs);
static int __init pwm_init(void)
{
+ int ret;
+
+ ret = class_register(&pwm_class);
+ if (ret) {
+ pr_err("Failed to initialize PWM class (%pe)\n", ERR_PTR(ret));
+ return ret;
+ }
+
if (IS_ENABLED(CONFIG_DEBUG_FS))
debugfs_create_file("pwm", 0444, NULL, NULL, &pwm_debugfs_fops);
- return class_register(&pwm_class);
+ return 0;
}
subsys_initcall(pwm_init);
diff --git a/drivers/pwm/pwm-atmel-tcb.c b/drivers/pwm/pwm-atmel-tcb.c
index 528e54c5999d..f9a9c12cbcdd 100644
--- a/drivers/pwm/pwm-atmel-tcb.c
+++ b/drivers/pwm/pwm-atmel-tcb.c
@@ -81,7 +81,8 @@ static int atmel_tcb_pwm_request(struct pwm_chip *chip,
tcbpwm->period = 0;
tcbpwm->div = 0;
- spin_lock(&tcbpwmc->lock);
+ guard(spinlock)(&tcbpwmc->lock);
+
regmap_read(tcbpwmc->regmap, ATMEL_TC_REG(tcbpwmc->channel, CMR), &cmr);
/*
* Get init config from Timer Counter registers if
@@ -107,7 +108,6 @@ static int atmel_tcb_pwm_request(struct pwm_chip *chip,
cmr |= ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO | ATMEL_TC_EEVT_XC0;
regmap_write(tcbpwmc->regmap, ATMEL_TC_REG(tcbpwmc->channel, CMR), cmr);
- spin_unlock(&tcbpwmc->lock);
return 0;
}
@@ -137,7 +137,6 @@ static void atmel_tcb_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm,
if (tcbpwm->duty == 0)
polarity = !polarity;
- spin_lock(&tcbpwmc->lock);
regmap_read(tcbpwmc->regmap, ATMEL_TC_REG(tcbpwmc->channel, CMR), &cmr);
/* flush old setting and set the new one */
@@ -172,8 +171,6 @@ static void atmel_tcb_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm,
ATMEL_TC_SWTRG);
tcbpwmc->bkup.enabled = 0;
}
-
- spin_unlock(&tcbpwmc->lock);
}
static int atmel_tcb_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm,
@@ -194,7 +191,6 @@ static int atmel_tcb_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm,
if (tcbpwm->duty == 0)
polarity = !polarity;
- spin_lock(&tcbpwmc->lock);
regmap_read(tcbpwmc->regmap, ATMEL_TC_REG(tcbpwmc->channel, CMR), &cmr);
/* flush old setting and set the new one */
@@ -256,7 +252,6 @@ static int atmel_tcb_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm,
regmap_write(tcbpwmc->regmap, ATMEL_TC_REG(tcbpwmc->channel, CCR),
ATMEL_TC_SWTRG | ATMEL_TC_CLKEN);
tcbpwmc->bkup.enabled = 1;
- spin_unlock(&tcbpwmc->lock);
return 0;
}
@@ -265,7 +260,8 @@ static int atmel_tcb_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
{
struct atmel_tcb_pwm_chip *tcbpwmc = to_tcb_chip(chip);
struct atmel_tcb_pwm_device *tcbpwm = &tcbpwmc->pwms[pwm->hwpwm];
- struct atmel_tcb_pwm_device *atcbpwm = NULL;
+ /* companion PWM sharing register values period and div */
+ struct atmel_tcb_pwm_device *atcbpwm = &tcbpwmc->pwms[pwm->hwpwm ^ 1];
int i = 0;
int slowclk = 0;
unsigned period;
@@ -310,11 +306,6 @@ static int atmel_tcb_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
duty = div_u64(duty_ns, min);
period = div_u64(period_ns, min);
- if (pwm->hwpwm == 0)
- atcbpwm = &tcbpwmc->pwms[1];
- else
- atcbpwm = &tcbpwmc->pwms[0];
-
/*
* PWM devices provided by the TCB driver are grouped by 2.
* PWM devices in a given group must be configured with the
@@ -323,8 +314,7 @@ static int atmel_tcb_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
* We're checking the period value of the second PWM device
* in this group before applying the new config.
*/
- if ((atcbpwm && atcbpwm->duty > 0 &&
- atcbpwm->duty != atcbpwm->period) &&
+ if ((atcbpwm->duty > 0 && atcbpwm->duty != atcbpwm->period) &&
(atcbpwm->div != i || atcbpwm->period != period)) {
dev_err(pwmchip_parent(chip),
"failed to configure period_ns: PWM group already configured with a different value\n");
@@ -341,9 +331,12 @@ static int atmel_tcb_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
static int atmel_tcb_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
const struct pwm_state *state)
{
+ struct atmel_tcb_pwm_chip *tcbpwmc = to_tcb_chip(chip);
int duty_cycle, period;
int ret;
+ guard(spinlock)(&tcbpwmc->lock);
+
if (!state->enabled) {
atmel_tcb_pwm_disable(chip, pwm, state->polarity);
return 0;
@@ -389,17 +382,17 @@ static int atmel_tcb_pwm_probe(struct platform_device *pdev)
{
struct pwm_chip *chip;
const struct of_device_id *match;
- struct atmel_tcb_pwm_chip *tcbpwm;
+ struct atmel_tcb_pwm_chip *tcbpwmc;
const struct atmel_tcb_config *config;
struct device_node *np = pdev->dev.of_node;
char clk_name[] = "t0_clk";
int err;
int channel;
- chip = devm_pwmchip_alloc(&pdev->dev, NPWM, sizeof(*tcbpwm));
+ chip = devm_pwmchip_alloc(&pdev->dev, NPWM, sizeof(*tcbpwmc));
if (IS_ERR(chip))
return PTR_ERR(chip);
- tcbpwm = to_tcb_chip(chip);
+ tcbpwmc = to_tcb_chip(chip);
err = of_property_read_u32(np, "reg", &channel);
if (err < 0) {
@@ -409,20 +402,20 @@ static int atmel_tcb_pwm_probe(struct platform_device *pdev)
return err;
}
- tcbpwm->regmap = syscon_node_to_regmap(np->parent);
- if (IS_ERR(tcbpwm->regmap))
- return PTR_ERR(tcbpwm->regmap);
+ tcbpwmc->regmap = syscon_node_to_regmap(np->parent);
+ if (IS_ERR(tcbpwmc->regmap))
+ return PTR_ERR(tcbpwmc->regmap);
- tcbpwm->slow_clk = of_clk_get_by_name(np->parent, "slow_clk");
- if (IS_ERR(tcbpwm->slow_clk))
- return PTR_ERR(tcbpwm->slow_clk);
+ tcbpwmc->slow_clk = of_clk_get_by_name(np->parent, "slow_clk");
+ if (IS_ERR(tcbpwmc->slow_clk))
+ return PTR_ERR(tcbpwmc->slow_clk);
clk_name[1] += channel;
- tcbpwm->clk = of_clk_get_by_name(np->parent, clk_name);
- if (IS_ERR(tcbpwm->clk))
- tcbpwm->clk = of_clk_get_by_name(np->parent, "t0_clk");
- if (IS_ERR(tcbpwm->clk)) {
- err = PTR_ERR(tcbpwm->clk);
+ tcbpwmc->clk = of_clk_get_by_name(np->parent, clk_name);
+ if (IS_ERR(tcbpwmc->clk))
+ tcbpwmc->clk = of_clk_get_by_name(np->parent, "t0_clk");
+ if (IS_ERR(tcbpwmc->clk)) {
+ err = PTR_ERR(tcbpwmc->clk);
goto err_slow_clk;
}
@@ -430,22 +423,22 @@ static int atmel_tcb_pwm_probe(struct platform_device *pdev)
config = match->data;
if (config->has_gclk) {
- tcbpwm->gclk = of_clk_get_by_name(np->parent, "gclk");
- if (IS_ERR(tcbpwm->gclk)) {
- err = PTR_ERR(tcbpwm->gclk);
+ tcbpwmc->gclk = of_clk_get_by_name(np->parent, "gclk");
+ if (IS_ERR(tcbpwmc->gclk)) {
+ err = PTR_ERR(tcbpwmc->gclk);
goto err_clk;
}
}
chip->ops = &atmel_tcb_pwm_ops;
- tcbpwm->channel = channel;
- tcbpwm->width = config->counter_width;
+ tcbpwmc->channel = channel;
+ tcbpwmc->width = config->counter_width;
- err = clk_prepare_enable(tcbpwm->slow_clk);
+ err = clk_prepare_enable(tcbpwmc->slow_clk);
if (err)
goto err_gclk;
- spin_lock_init(&tcbpwm->lock);
+ spin_lock_init(&tcbpwmc->lock);
err = pwmchip_add(chip);
if (err < 0)
@@ -456,16 +449,16 @@ static int atmel_tcb_pwm_probe(struct platform_device *pdev)
return 0;
err_disable_clk:
- clk_disable_unprepare(tcbpwm->slow_clk);
+ clk_disable_unprepare(tcbpwmc->slow_clk);
err_gclk:
- clk_put(tcbpwm->gclk);
+ clk_put(tcbpwmc->gclk);
err_clk:
- clk_put(tcbpwm->clk);
+ clk_put(tcbpwmc->clk);
err_slow_clk:
- clk_put(tcbpwm->slow_clk);
+ clk_put(tcbpwmc->slow_clk);
return err;
}
@@ -473,14 +466,14 @@ err_slow_clk:
static void atmel_tcb_pwm_remove(struct platform_device *pdev)
{
struct pwm_chip *chip = platform_get_drvdata(pdev);
- struct atmel_tcb_pwm_chip *tcbpwm = to_tcb_chip(chip);
+ struct atmel_tcb_pwm_chip *tcbpwmc = to_tcb_chip(chip);
pwmchip_remove(chip);
- clk_disable_unprepare(tcbpwm->slow_clk);
- clk_put(tcbpwm->gclk);
- clk_put(tcbpwm->clk);
- clk_put(tcbpwm->slow_clk);
+ clk_disable_unprepare(tcbpwmc->slow_clk);
+ clk_put(tcbpwmc->gclk);
+ clk_put(tcbpwmc->clk);
+ clk_put(tcbpwmc->slow_clk);
}
static const struct of_device_id atmel_tcb_pwm_dt_ids[] = {
@@ -492,14 +485,14 @@ MODULE_DEVICE_TABLE(of, atmel_tcb_pwm_dt_ids);
static int atmel_tcb_pwm_suspend(struct device *dev)
{
struct pwm_chip *chip = dev_get_drvdata(dev);
- struct atmel_tcb_pwm_chip *tcbpwm = to_tcb_chip(chip);
- struct atmel_tcb_channel *chan = &tcbpwm->bkup;
- unsigned int channel = tcbpwm->channel;
+ struct atmel_tcb_pwm_chip *tcbpwmc = to_tcb_chip(chip);
+ struct atmel_tcb_channel *chan = &tcbpwmc->bkup;
+ unsigned int channel = tcbpwmc->channel;
- regmap_read(tcbpwm->regmap, ATMEL_TC_REG(channel, CMR), &chan->cmr);
- regmap_read(tcbpwm->regmap, ATMEL_TC_REG(channel, RA), &chan->ra);
- regmap_read(tcbpwm->regmap, ATMEL_TC_REG(channel, RB), &chan->rb);
- regmap_read(tcbpwm->regmap, ATMEL_TC_REG(channel, RC), &chan->rc);
+ regmap_read(tcbpwmc->regmap, ATMEL_TC_REG(channel, CMR), &chan->cmr);
+ regmap_read(tcbpwmc->regmap, ATMEL_TC_REG(channel, RA), &chan->ra);
+ regmap_read(tcbpwmc->regmap, ATMEL_TC_REG(channel, RB), &chan->rb);
+ regmap_read(tcbpwmc->regmap, ATMEL_TC_REG(channel, RC), &chan->rc);
return 0;
}
@@ -507,17 +500,17 @@ static int atmel_tcb_pwm_suspend(struct device *dev)
static int atmel_tcb_pwm_resume(struct device *dev)
{
struct pwm_chip *chip = dev_get_drvdata(dev);
- struct atmel_tcb_pwm_chip *tcbpwm = to_tcb_chip(chip);
- struct atmel_tcb_channel *chan = &tcbpwm->bkup;
- unsigned int channel = tcbpwm->channel;
+ struct atmel_tcb_pwm_chip *tcbpwmc = to_tcb_chip(chip);
+ struct atmel_tcb_channel *chan = &tcbpwmc->bkup;
+ unsigned int channel = tcbpwmc->channel;
- regmap_write(tcbpwm->regmap, ATMEL_TC_REG(channel, CMR), chan->cmr);
- regmap_write(tcbpwm->regmap, ATMEL_TC_REG(channel, RA), chan->ra);
- regmap_write(tcbpwm->regmap, ATMEL_TC_REG(channel, RB), chan->rb);
- regmap_write(tcbpwm->regmap, ATMEL_TC_REG(channel, RC), chan->rc);
+ regmap_write(tcbpwmc->regmap, ATMEL_TC_REG(channel, CMR), chan->cmr);
+ regmap_write(tcbpwmc->regmap, ATMEL_TC_REG(channel, RA), chan->ra);
+ regmap_write(tcbpwmc->regmap, ATMEL_TC_REG(channel, RB), chan->rb);
+ regmap_write(tcbpwmc->regmap, ATMEL_TC_REG(channel, RC), chan->rc);
if (chan->enabled)
- regmap_write(tcbpwm->regmap,
+ regmap_write(tcbpwmc->regmap,
ATMEL_TC_CLKEN | ATMEL_TC_SWTRG,
ATMEL_TC_REG(channel, CCR));
diff --git a/drivers/pwm/pwm-axi-pwmgen.c b/drivers/pwm/pwm-axi-pwmgen.c
new file mode 100644
index 000000000000..3ad60edf20a5
--- /dev/null
+++ b/drivers/pwm/pwm-axi-pwmgen.c
@@ -0,0 +1,242 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Analog Devices AXI PWM generator
+ *
+ * Copyright 2024 Analog Devices Inc.
+ * Copyright 2024 Baylibre SAS
+ *
+ * Device docs: https://analogdevicesinc.github.io/hdl/library/axi_pwm_gen/index.html
+ *
+ * Limitations:
+ * - The writes to registers for period and duty are shadowed until
+ * LOAD_CONFIG is written to AXI_PWMGEN_REG_CONFIG, at which point
+ * they take effect.
+ * - Writing LOAD_CONFIG also has the effect of re-synchronizing all
+ * enabled channels, which could cause glitching on other channels. It
+ * is therefore expected that channels are assigned harmonic periods
+ * and all have a single user coordinating this.
+ * - Supports normal polarity. Does not support changing polarity.
+ * - On disable, the PWM output becomes low (inactive).
+ */
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/fpga/adi-axi-common.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pwm.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#define AXI_PWMGEN_REG_CORE_VERSION 0x00
+#define AXI_PWMGEN_REG_ID 0x04
+#define AXI_PWMGEN_REG_SCRATCHPAD 0x08
+#define AXI_PWMGEN_REG_CORE_MAGIC 0x0C
+#define AXI_PWMGEN_REG_CONFIG 0x10
+#define AXI_PWMGEN_REG_NPWM 0x14
+#define AXI_PWMGEN_CHX_PERIOD(ch) (0x40 + (4 * (ch)))
+#define AXI_PWMGEN_CHX_DUTY(ch) (0x80 + (4 * (ch)))
+#define AXI_PWMGEN_CHX_OFFSET(ch) (0xC0 + (4 * (ch)))
+#define AXI_PWMGEN_REG_CORE_MAGIC_VAL 0x601A3471 /* Identification number to test during setup */
+#define AXI_PWMGEN_LOAD_CONFIG BIT(1)
+#define AXI_PWMGEN_REG_CONFIG_RESET BIT(0)
+
+struct axi_pwmgen_ddata {
+ struct regmap *regmap;
+ unsigned long clk_rate_hz;
+};
+
+static const struct regmap_config axi_pwmgen_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0xFC,
+};
+
+static int axi_pwmgen_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ const struct pwm_state *state)
+{
+ struct axi_pwmgen_ddata *ddata = pwmchip_get_drvdata(chip);
+ unsigned int ch = pwm->hwpwm;
+ struct regmap *regmap = ddata->regmap;
+ u64 period_cnt, duty_cnt;
+ int ret;
+
+ if (state->polarity != PWM_POLARITY_NORMAL)
+ return -EINVAL;
+
+ if (state->enabled) {
+ period_cnt = mul_u64_u64_div_u64(state->period, ddata->clk_rate_hz, NSEC_PER_SEC);
+ if (period_cnt > UINT_MAX)
+ period_cnt = UINT_MAX;
+
+ if (period_cnt == 0)
+ return -EINVAL;
+
+ ret = regmap_write(regmap, AXI_PWMGEN_CHX_PERIOD(ch), period_cnt);
+ if (ret)
+ return ret;
+
+ duty_cnt = mul_u64_u64_div_u64(state->duty_cycle, ddata->clk_rate_hz, NSEC_PER_SEC);
+ if (duty_cnt > UINT_MAX)
+ duty_cnt = UINT_MAX;
+
+ ret = regmap_write(regmap, AXI_PWMGEN_CHX_DUTY(ch), duty_cnt);
+ if (ret)
+ return ret;
+ } else {
+ ret = regmap_write(regmap, AXI_PWMGEN_CHX_PERIOD(ch), 0);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(regmap, AXI_PWMGEN_CHX_DUTY(ch), 0);
+ if (ret)
+ return ret;
+ }
+
+ return regmap_write(regmap, AXI_PWMGEN_REG_CONFIG, AXI_PWMGEN_LOAD_CONFIG);
+}
+
+static int axi_pwmgen_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+ struct pwm_state *state)
+{
+ struct axi_pwmgen_ddata *ddata = pwmchip_get_drvdata(chip);
+ struct regmap *regmap = ddata->regmap;
+ unsigned int ch = pwm->hwpwm;
+ u32 cnt;
+ int ret;
+
+ ret = regmap_read(regmap, AXI_PWMGEN_CHX_PERIOD(ch), &cnt);
+ if (ret)
+ return ret;
+
+ state->enabled = cnt != 0;
+
+ state->period = DIV_ROUND_UP_ULL((u64)cnt * NSEC_PER_SEC, ddata->clk_rate_hz);
+
+ ret = regmap_read(regmap, AXI_PWMGEN_CHX_DUTY(ch), &cnt);
+ if (ret)
+ return ret;
+
+ state->duty_cycle = DIV_ROUND_UP_ULL((u64)cnt * NSEC_PER_SEC, ddata->clk_rate_hz);
+
+ state->polarity = PWM_POLARITY_NORMAL;
+
+ return 0;
+}
+
+static const struct pwm_ops axi_pwmgen_pwm_ops = {
+ .apply = axi_pwmgen_apply,
+ .get_state = axi_pwmgen_get_state,
+};
+
+static int axi_pwmgen_setup(struct regmap *regmap, struct device *dev)
+{
+ int ret;
+ u32 val;
+
+ ret = regmap_read(regmap, AXI_PWMGEN_REG_CORE_MAGIC, &val);
+ if (ret)
+ return ret;
+
+ if (val != AXI_PWMGEN_REG_CORE_MAGIC_VAL)
+ return dev_err_probe(dev, -ENODEV,
+ "failed to read expected value from register: got %08x, expected %08x\n",
+ val, AXI_PWMGEN_REG_CORE_MAGIC_VAL);
+
+ ret = regmap_read(regmap, AXI_PWMGEN_REG_CORE_VERSION, &val);
+ if (ret)
+ return ret;
+
+ if (ADI_AXI_PCORE_VER_MAJOR(val) != 2) {
+ return dev_err_probe(dev, -ENODEV, "Unsupported peripheral version %u.%u.%u\n",
+ ADI_AXI_PCORE_VER_MAJOR(val),
+ ADI_AXI_PCORE_VER_MINOR(val),
+ ADI_AXI_PCORE_VER_PATCH(val));
+ }
+
+ /* Enable the core */
+ ret = regmap_clear_bits(regmap, AXI_PWMGEN_REG_CONFIG, AXI_PWMGEN_REG_CONFIG_RESET);
+ if (ret)
+ return ret;
+
+ ret = regmap_read(regmap, AXI_PWMGEN_REG_NPWM, &val);
+ if (ret)
+ return ret;
+
+ /* Return the number of PWMs */
+ return val;
+}
+
+static int axi_pwmgen_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct regmap *regmap;
+ struct pwm_chip *chip;
+ struct axi_pwmgen_ddata *ddata;
+ struct clk *clk;
+ void __iomem *io_base;
+ int ret;
+
+ io_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(io_base))
+ return PTR_ERR(io_base);
+
+ regmap = devm_regmap_init_mmio(dev, io_base, &axi_pwmgen_regmap_config);
+ if (IS_ERR(regmap))
+ return dev_err_probe(dev, PTR_ERR(regmap),
+ "failed to init register map\n");
+
+ ret = axi_pwmgen_setup(regmap, dev);
+ if (ret < 0)
+ return ret;
+
+ chip = devm_pwmchip_alloc(dev, ret, sizeof(*ddata));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ ddata = pwmchip_get_drvdata(chip);
+ ddata->regmap = regmap;
+
+ clk = devm_clk_get_enabled(dev, NULL);
+ if (IS_ERR(clk))
+ return dev_err_probe(dev, PTR_ERR(clk), "failed to get clock\n");
+
+ ret = devm_clk_rate_exclusive_get(dev, clk);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to get exclusive rate\n");
+
+ ddata->clk_rate_hz = clk_get_rate(clk);
+ if (!ddata->clk_rate_hz || ddata->clk_rate_hz > NSEC_PER_SEC)
+ return dev_err_probe(dev, -EINVAL,
+ "Invalid clock rate: %lu\n", ddata->clk_rate_hz);
+
+ chip->ops = &axi_pwmgen_pwm_ops;
+ chip->atomic = true;
+
+ ret = devm_pwmchip_add(dev, chip);
+ if (ret)
+ return dev_err_probe(dev, ret, "could not add PWM chip\n");
+
+ return 0;
+}
+
+static const struct of_device_id axi_pwmgen_ids[] = {
+ { .compatible = "adi,axi-pwmgen-2.00.a" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, axi_pwmgen_ids);
+
+static struct platform_driver axi_pwmgen_driver = {
+ .driver = {
+ .name = "axi-pwmgen",
+ .of_match_table = axi_pwmgen_ids,
+ },
+ .probe = axi_pwmgen_probe,
+};
+module_platform_driver(axi_pwmgen_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Sergiu Cuciurean <sergiu.cuciurean@analog.com>");
+MODULE_AUTHOR("Trevor Gamblin <tgamblin@baylibre.com>");
+MODULE_DESCRIPTION("Driver for the Analog Devices AXI PWM generator");
diff --git a/drivers/pwm/pwm-cros-ec.c b/drivers/pwm/pwm-cros-ec.c
index 606ccfdaf4cc..189301dc395e 100644
--- a/drivers/pwm/pwm-cros-ec.c
+++ b/drivers/pwm/pwm-cros-ec.c
@@ -20,20 +20,10 @@
*
* @ec: Pointer to EC device
* @use_pwm_type: Use PWM types instead of generic channels
- * @channel: array with per-channel data
*/
struct cros_ec_pwm_device {
struct cros_ec_device *ec;
bool use_pwm_type;
- struct cros_ec_pwm *channel;
-};
-
-/**
- * struct cros_ec_pwm - per-PWM driver data
- * @duty_cycle: cached duty cycle
- */
-struct cros_ec_pwm {
- u16 duty_cycle;
};
static inline struct cros_ec_pwm_device *pwm_to_cros_ec_pwm(struct pwm_chip *chip)
@@ -135,7 +125,6 @@ static int cros_ec_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
const struct pwm_state *state)
{
struct cros_ec_pwm_device *ec_pwm = pwm_to_cros_ec_pwm(chip);
- struct cros_ec_pwm *channel = &ec_pwm->channel[pwm->hwpwm];
u16 duty_cycle;
int ret;
@@ -156,8 +145,6 @@ static int cros_ec_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
if (ret < 0)
return ret;
- channel->duty_cycle = state->duty_cycle;
-
return 0;
}
@@ -165,7 +152,6 @@ static int cros_ec_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
struct pwm_state *state)
{
struct cros_ec_pwm_device *ec_pwm = pwm_to_cros_ec_pwm(chip);
- struct cros_ec_pwm *channel = &ec_pwm->channel[pwm->hwpwm];
int ret;
ret = cros_ec_pwm_get_duty(ec_pwm->ec, ec_pwm->use_pwm_type, pwm->hwpwm);
@@ -175,44 +161,13 @@ static int cros_ec_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
}
state->enabled = (ret > 0);
+ state->duty_cycle = ret;
state->period = EC_PWM_MAX_DUTY;
state->polarity = PWM_POLARITY_NORMAL;
- /*
- * Note that "disabled" and "duty cycle == 0" are treated the same. If
- * the cached duty cycle is not zero, used the cached duty cycle. This
- * ensures that the configured duty cycle is kept across a disable and
- * enable operation and avoids potentially confusing consumers.
- *
- * For the case of the initial hardware readout, channel->duty_cycle
- * will be 0 and the actual duty cycle read from the EC is used.
- */
- if (ret == 0 && channel->duty_cycle > 0)
- state->duty_cycle = channel->duty_cycle;
- else
- state->duty_cycle = ret;
-
return 0;
}
-static struct pwm_device *
-cros_ec_pwm_xlate(struct pwm_chip *chip, const struct of_phandle_args *args)
-{
- struct pwm_device *pwm;
-
- if (args->args[0] >= chip->npwm)
- return ERR_PTR(-EINVAL);
-
- pwm = pwm_request_from_chip(chip, args->args[0], NULL);
- if (IS_ERR(pwm))
- return pwm;
-
- /* The EC won't let us change the period */
- pwm->args.period = EC_PWM_MAX_DUTY;
-
- return pwm;
-}
-
static const struct pwm_ops cros_ec_pwm_ops = {
.get_state = cros_ec_pwm_get_state,
.apply = cros_ec_pwm_apply,
@@ -263,7 +218,7 @@ static int cros_ec_pwm_probe(struct platform_device *pdev)
struct cros_ec_pwm_device *ec_pwm;
struct pwm_chip *chip;
bool use_pwm_type = false;
- unsigned int npwm;
+ unsigned int i, npwm;
int ret;
if (!ec)
@@ -289,12 +244,17 @@ static int cros_ec_pwm_probe(struct platform_device *pdev)
/* PWM chip */
chip->ops = &cros_ec_pwm_ops;
- chip->of_xlate = cros_ec_pwm_xlate;
- ec_pwm->channel = devm_kcalloc(dev, chip->npwm, sizeof(*ec_pwm->channel),
- GFP_KERNEL);
- if (!ec_pwm->channel)
- return -ENOMEM;
+ /*
+ * The device tree binding for this device is special as it only uses a
+ * single cell (for the hwid) and so doesn't provide a default period.
+ * This isn't a big problem though as the hardware only supports a
+ * single period length, it's just a bit ugly to make this fit into the
+ * pwm core abstractions. So initialize the period here, as
+ * of_pwm_xlate_with_flags() won't do that for us.
+ */
+ for (i = 0; i < npwm; ++i)
+ chip->pwms[i].args.period = EC_PWM_MAX_DUTY;
dev_dbg(dev, "Probed %u PWMs\n", chip->npwm);
diff --git a/drivers/pwm/pwm-gpio.c b/drivers/pwm/pwm-gpio.c
new file mode 100644
index 000000000000..9f8884ac7504
--- /dev/null
+++ b/drivers/pwm/pwm-gpio.c
@@ -0,0 +1,241 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Generic software PWM for modulating GPIOs
+ *
+ * Copyright (C) 2020 Axis Communications AB
+ * Copyright (C) 2020 Nicola Di Lieto
+ * Copyright (C) 2024 Stefan Wahren
+ * Copyright (C) 2024 Linus Walleij
+ */
+
+#include <linux/cleanup.h>
+#include <linux/container_of.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/hrtimer.h>
+#include <linux/math.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/pwm.h>
+#include <linux/spinlock.h>
+#include <linux/time.h>
+#include <linux/types.h>
+
+struct pwm_gpio {
+ struct hrtimer gpio_timer;
+ struct gpio_desc *gpio;
+ struct pwm_state state;
+ struct pwm_state next_state;
+
+ /* Protect internal state between pwm_ops and hrtimer */
+ spinlock_t lock;
+
+ bool changing;
+ bool running;
+ bool level;
+};
+
+static void pwm_gpio_round(struct pwm_state *dest, const struct pwm_state *src)
+{
+ u64 dividend;
+ u32 remainder;
+
+ *dest = *src;
+
+ /* Round down to hrtimer resolution */
+ dividend = dest->period;
+ remainder = do_div(dividend, hrtimer_resolution);
+ dest->period -= remainder;
+
+ dividend = dest->duty_cycle;
+ remainder = do_div(dividend, hrtimer_resolution);
+ dest->duty_cycle -= remainder;
+}
+
+static u64 pwm_gpio_toggle(struct pwm_gpio *gpwm, bool level)
+{
+ const struct pwm_state *state = &gpwm->state;
+ bool invert = state->polarity == PWM_POLARITY_INVERSED;
+
+ gpwm->level = level;
+ gpiod_set_value(gpwm->gpio, gpwm->level ^ invert);
+
+ if (!state->duty_cycle || state->duty_cycle == state->period) {
+ gpwm->running = false;
+ return 0;
+ }
+
+ gpwm->running = true;
+ return level ? state->duty_cycle : state->period - state->duty_cycle;
+}
+
+static enum hrtimer_restart pwm_gpio_timer(struct hrtimer *gpio_timer)
+{
+ struct pwm_gpio *gpwm = container_of(gpio_timer, struct pwm_gpio,
+ gpio_timer);
+ u64 next_toggle;
+ bool new_level;
+
+ guard(spinlock_irqsave)(&gpwm->lock);
+
+ /* Apply new state at end of current period */
+ if (!gpwm->level && gpwm->changing) {
+ gpwm->changing = false;
+ gpwm->state = gpwm->next_state;
+ new_level = !!gpwm->state.duty_cycle;
+ } else {
+ new_level = !gpwm->level;
+ }
+
+ next_toggle = pwm_gpio_toggle(gpwm, new_level);
+ if (next_toggle)
+ hrtimer_forward(gpio_timer, hrtimer_get_expires(gpio_timer),
+ ns_to_ktime(next_toggle));
+
+ return next_toggle ? HRTIMER_RESTART : HRTIMER_NORESTART;
+}
+
+static int pwm_gpio_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ const struct pwm_state *state)
+{
+ struct pwm_gpio *gpwm = pwmchip_get_drvdata(chip);
+ bool invert = state->polarity == PWM_POLARITY_INVERSED;
+
+ if (state->duty_cycle && state->duty_cycle < hrtimer_resolution)
+ return -EINVAL;
+
+ if (state->duty_cycle != state->period &&
+ (state->period - state->duty_cycle < hrtimer_resolution))
+ return -EINVAL;
+
+ if (!state->enabled) {
+ hrtimer_cancel(&gpwm->gpio_timer);
+ } else if (!gpwm->running) {
+ int ret;
+
+ /*
+ * This just enables the output, but pwm_gpio_toggle()
+ * really starts the duty cycle.
+ */
+ ret = gpiod_direction_output(gpwm->gpio, invert);
+ if (ret)
+ return ret;
+ }
+
+ guard(spinlock_irqsave)(&gpwm->lock);
+
+ if (!state->enabled) {
+ pwm_gpio_round(&gpwm->state, state);
+ gpwm->running = false;
+ gpwm->changing = false;
+
+ gpiod_set_value(gpwm->gpio, invert);
+ } else if (gpwm->running) {
+ pwm_gpio_round(&gpwm->next_state, state);
+ gpwm->changing = true;
+ } else {
+ unsigned long next_toggle;
+
+ pwm_gpio_round(&gpwm->state, state);
+ gpwm->changing = false;
+
+ next_toggle = pwm_gpio_toggle(gpwm, !!state->duty_cycle);
+ if (next_toggle)
+ hrtimer_start(&gpwm->gpio_timer, next_toggle,
+ HRTIMER_MODE_REL);
+ }
+
+ return 0;
+}
+
+static int pwm_gpio_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+ struct pwm_state *state)
+{
+ struct pwm_gpio *gpwm = pwmchip_get_drvdata(chip);
+
+ guard(spinlock_irqsave)(&gpwm->lock);
+
+ if (gpwm->changing)
+ *state = gpwm->next_state;
+ else
+ *state = gpwm->state;
+
+ return 0;
+}
+
+static const struct pwm_ops pwm_gpio_ops = {
+ .apply = pwm_gpio_apply,
+ .get_state = pwm_gpio_get_state,
+};
+
+static void pwm_gpio_disable_hrtimer(void *data)
+{
+ struct pwm_gpio *gpwm = data;
+
+ hrtimer_cancel(&gpwm->gpio_timer);
+}
+
+static int pwm_gpio_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct pwm_chip *chip;
+ struct pwm_gpio *gpwm;
+ int ret;
+
+ chip = devm_pwmchip_alloc(dev, 1, sizeof(*gpwm));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+
+ gpwm = pwmchip_get_drvdata(chip);
+
+ spin_lock_init(&gpwm->lock);
+
+ gpwm->gpio = devm_gpiod_get(dev, NULL, GPIOD_ASIS);
+ if (IS_ERR(gpwm->gpio))
+ return dev_err_probe(dev, PTR_ERR(gpwm->gpio),
+ "%pfw: could not get gpio\n",
+ dev_fwnode(dev));
+
+ if (gpiod_cansleep(gpwm->gpio))
+ return dev_err_probe(dev, -EINVAL,
+ "%pfw: sleeping GPIO not supported\n",
+ dev_fwnode(dev));
+
+ chip->ops = &pwm_gpio_ops;
+ chip->atomic = true;
+
+ hrtimer_init(&gpwm->gpio_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ ret = devm_add_action_or_reset(dev, pwm_gpio_disable_hrtimer, gpwm);
+ if (ret)
+ return ret;
+
+ gpwm->gpio_timer.function = pwm_gpio_timer;
+
+ ret = pwmchip_add(chip);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "could not add pwmchip\n");
+
+ return 0;
+}
+
+static const struct of_device_id pwm_gpio_dt_ids[] = {
+ { .compatible = "pwm-gpio" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, pwm_gpio_dt_ids);
+
+static struct platform_driver pwm_gpio_driver = {
+ .driver = {
+ .name = "pwm-gpio",
+ .of_match_table = pwm_gpio_dt_ids,
+ },
+ .probe = pwm_gpio_probe,
+};
+module_platform_driver(pwm_gpio_driver);
+
+MODULE_DESCRIPTION("PWM GPIO driver");
+MODULE_AUTHOR("Vincent Whitchurch");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pwm/pwm-imx-tpm.c b/drivers/pwm/pwm-imx-tpm.c
index c50ddbac43c8..96ea343856f0 100644
--- a/drivers/pwm/pwm-imx-tpm.c
+++ b/drivers/pwm/pwm-imx-tpm.c
@@ -20,6 +20,7 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
#include <linux/slab.h>
@@ -380,6 +381,7 @@ static int pwm_imx_tpm_probe(struct platform_device *pdev)
static int pwm_imx_tpm_suspend(struct device *dev)
{
struct imx_tpm_pwm_chip *tpm = dev_get_drvdata(dev);
+ int ret;
if (tpm->enable_count > 0)
return -EBUSY;
@@ -393,7 +395,11 @@ static int pwm_imx_tpm_suspend(struct device *dev)
clk_disable_unprepare(tpm->clk);
- return 0;
+ ret = pinctrl_pm_select_sleep_state(dev);
+ if (ret)
+ clk_prepare_enable(tpm->clk);
+
+ return ret;
}
static int pwm_imx_tpm_resume(struct device *dev)
@@ -401,9 +407,15 @@ static int pwm_imx_tpm_resume(struct device *dev)
struct imx_tpm_pwm_chip *tpm = dev_get_drvdata(dev);
int ret = 0;
- ret = clk_prepare_enable(tpm->clk);
+ ret = pinctrl_pm_select_default_state(dev);
if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(tpm->clk);
+ if (ret) {
dev_err(dev, "failed to prepare or enable clock: %d\n", ret);
+ pinctrl_pm_select_sleep_state(dev);
+ }
return ret;
}
diff --git a/drivers/pwm/pwm-imx1.c b/drivers/pwm/pwm-imx1.c
index 1d2aae2d278f..d5535d208005 100644
--- a/drivers/pwm/pwm-imx1.c
+++ b/drivers/pwm/pwm-imx1.c
@@ -194,5 +194,6 @@ static struct platform_driver pwm_imx1_driver = {
};
module_platform_driver(pwm_imx1_driver);
+MODULE_DESCRIPTION("i.MX1 and i.MX21 Pulse Width Modulator driver");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
diff --git a/drivers/pwm/pwm-imx27.c b/drivers/pwm/pwm-imx27.c
index e1412116ef65..9e2bbf5b4a8c 100644
--- a/drivers/pwm/pwm-imx27.c
+++ b/drivers/pwm/pwm-imx27.c
@@ -352,5 +352,6 @@ static struct platform_driver imx_pwm_driver = {
};
module_platform_driver(imx_pwm_driver);
+MODULE_DESCRIPTION("i.MX27 and later i.MX SoCs Pulse Width Modulator driver");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
diff --git a/drivers/pwm/pwm-intel-lgm.c b/drivers/pwm/pwm-intel-lgm.c
index f9cc7c17c8f0..084c71a0a11b 100644
--- a/drivers/pwm/pwm-intel-lgm.c
+++ b/drivers/pwm/pwm-intel-lgm.c
@@ -230,4 +230,5 @@ static struct platform_driver lgm_pwm_driver = {
};
module_platform_driver(lgm_pwm_driver);
+MODULE_DESCRIPTION("Intel LGM Pulse Width Modulator driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/pwm/pwm-jz4740.c b/drivers/pwm/pwm-jz4740.c
index da4bf543d357..6bdb01619380 100644
--- a/drivers/pwm/pwm-jz4740.c
+++ b/drivers/pwm/pwm-jz4740.c
@@ -201,12 +201,11 @@ static int jz4740_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
* state instead of its inactive state.
*/
if ((state->polarity == PWM_POLARITY_NORMAL) ^ state->enabled)
- regmap_update_bits(jz->map, TCU_REG_TCSRc(pwm->hwpwm),
- TCU_TCSR_PWM_INITL_HIGH, 0);
+ regmap_clear_bits(jz->map, TCU_REG_TCSRc(pwm->hwpwm),
+ TCU_TCSR_PWM_INITL_HIGH);
else
- regmap_update_bits(jz->map, TCU_REG_TCSRc(pwm->hwpwm),
- TCU_TCSR_PWM_INITL_HIGH,
- TCU_TCSR_PWM_INITL_HIGH);
+ regmap_set_bits(jz->map, TCU_REG_TCSRc(pwm->hwpwm),
+ TCU_TCSR_PWM_INITL_HIGH);
if (state->enabled)
jz4740_pwm_enable(chip, pwm);
diff --git a/drivers/pwm/pwm-lpss-pci.c b/drivers/pwm/pwm-lpss-pci.c
index 25045c229520..f7ece2809e6b 100644
--- a/drivers/pwm/pwm-lpss-pci.c
+++ b/drivers/pwm/pwm-lpss-pci.c
@@ -46,25 +46,6 @@ static void pwm_lpss_remove_pci(struct pci_dev *pdev)
pm_runtime_get_sync(&pdev->dev);
}
-static int pwm_lpss_runtime_suspend_pci(struct device *dev)
-{
- /*
- * The PCI core will handle transition to D3 automatically. We only
- * need to provide runtime PM hooks for that to happen.
- */
- return 0;
-}
-
-static int pwm_lpss_runtime_resume_pci(struct device *dev)
-{
- return 0;
-}
-
-static DEFINE_RUNTIME_DEV_PM_OPS(pwm_lpss_pci_pm,
- pwm_lpss_runtime_suspend_pci,
- pwm_lpss_runtime_resume_pci,
- NULL);
-
static const struct pci_device_id pwm_lpss_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x0ac8), (unsigned long)&pwm_lpss_bxt_info},
{ PCI_VDEVICE(INTEL, 0x0f08), (unsigned long)&pwm_lpss_byt_info},
@@ -84,9 +65,6 @@ static struct pci_driver pwm_lpss_driver_pci = {
.id_table = pwm_lpss_pci_ids,
.probe = pwm_lpss_probe_pci,
.remove = pwm_lpss_remove_pci,
- .driver = {
- .pm = pm_ptr(&pwm_lpss_pci_pm),
- },
};
module_pci_driver(pwm_lpss_driver_pci);
diff --git a/drivers/pwm/pwm-lpss-platform.c b/drivers/pwm/pwm-lpss-platform.c
index dbc9f5b17bdc..5130238a4567 100644
--- a/drivers/pwm/pwm-lpss-platform.c
+++ b/drivers/pwm/pwm-lpss-platform.c
@@ -55,14 +55,7 @@ static int pwm_lpss_probe_platform(struct platform_device *pdev)
DPM_FLAG_SMART_SUSPEND);
pm_runtime_set_active(&pdev->dev);
- pm_runtime_enable(&pdev->dev);
-
- return 0;
-}
-
-static void pwm_lpss_remove_platform(struct platform_device *pdev)
-{
- pm_runtime_disable(&pdev->dev);
+ return devm_pm_runtime_enable(&pdev->dev);
}
static const struct acpi_device_id pwm_lpss_acpi_match[] = {
@@ -80,7 +73,6 @@ static struct platform_driver pwm_lpss_driver_platform = {
.acpi_match_table = pwm_lpss_acpi_match,
},
.probe = pwm_lpss_probe_platform,
- .remove_new = pwm_lpss_remove_platform,
};
module_platform_driver(pwm_lpss_driver_platform);
diff --git a/drivers/pwm/pwm-mediatek.c b/drivers/pwm/pwm-mediatek.c
index 19a87873ad60..01dfa0fab80a 100644
--- a/drivers/pwm/pwm-mediatek.c
+++ b/drivers/pwm/pwm-mediatek.c
@@ -395,4 +395,5 @@ static struct platform_driver pwm_mediatek_driver = {
module_platform_driver(pwm_mediatek_driver);
MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
+MODULE_DESCRIPTION("MediaTek general purpose Pulse Width Modulator driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/pwm/pwm-meson.c b/drivers/pwm/pwm-meson.c
index b2f97dfb01bb..98e6c1533312 100644
--- a/drivers/pwm/pwm-meson.c
+++ b/drivers/pwm/pwm-meson.c
@@ -460,6 +460,37 @@ static int meson_pwm_init_channels_meson8b_v2(struct pwm_chip *chip)
return meson_pwm_init_clocks_meson8b(chip, mux_parent_data);
}
+static void meson_pwm_s4_put_clk(void *data)
+{
+ struct clk *clk = data;
+
+ clk_put(clk);
+}
+
+static int meson_pwm_init_channels_s4(struct pwm_chip *chip)
+{
+ struct device *dev = pwmchip_parent(chip);
+ struct device_node *np = dev->of_node;
+ struct meson_pwm *meson = to_meson_pwm(chip);
+ int i, ret;
+
+ for (i = 0; i < MESON_NUM_PWMS; i++) {
+ meson->channels[i].clk = of_clk_get(np, i);
+ if (IS_ERR(meson->channels[i].clk))
+ return dev_err_probe(dev,
+ PTR_ERR(meson->channels[i].clk),
+ "Failed to get clk\n");
+
+ ret = devm_add_action_or_reset(dev, meson_pwm_s4_put_clk,
+ meson->channels[i].clk);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to add clk_put action\n");
+ }
+
+ return 0;
+}
+
static const struct meson_pwm_data pwm_meson8b_data = {
.parent_names = { "xtal", NULL, "fclk_div4", "fclk_div3" },
.channels_init = meson_pwm_init_channels_meson8b_legacy,
@@ -498,6 +529,10 @@ static const struct meson_pwm_data pwm_meson8_v2_data = {
.channels_init = meson_pwm_init_channels_meson8b_v2,
};
+static const struct meson_pwm_data pwm_s4_data = {
+ .channels_init = meson_pwm_init_channels_s4,
+};
+
static const struct of_device_id meson_pwm_matches[] = {
{
.compatible = "amlogic,meson8-pwm-v2",
@@ -536,6 +571,10 @@ static const struct of_device_id meson_pwm_matches[] = {
.compatible = "amlogic,meson-g12a-ao-pwm-cd",
.data = &pwm_g12a_ao_cd_data
},
+ {
+ .compatible = "amlogic,meson-s4-pwm",
+ .data = &pwm_s4_data
+ },
{},
};
MODULE_DEVICE_TABLE(of, meson_pwm_matches);
diff --git a/drivers/pwm/pwm-pxa.c b/drivers/pwm/pwm-pxa.c
index bb7bb48b2e6d..430bd6a709e9 100644
--- a/drivers/pwm/pwm-pxa.c
+++ b/drivers/pwm/pwm-pxa.c
@@ -208,4 +208,5 @@ static struct platform_driver pwm_driver = {
module_platform_driver(pwm_driver);
+MODULE_DESCRIPTION("PXA Pulse Width Modulator driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/pwm/pwm-samsung.c b/drivers/pwm/pwm-samsung.c
index efb60c9f0cb3..7adf4f2b1049 100644
--- a/drivers/pwm/pwm-samsung.c
+++ b/drivers/pwm/pwm-samsung.c
@@ -644,6 +644,7 @@ static struct platform_driver pwm_samsung_driver = {
};
module_platform_driver(pwm_samsung_driver);
+MODULE_DESCRIPTION("Samsung Pulse Width Modulator driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Tomasz Figa <tomasz.figa@gmail.com>");
MODULE_ALIAS("platform:samsung-pwm");
diff --git a/drivers/pwm/pwm-spear.c b/drivers/pwm/pwm-spear.c
index 6c6f3b38c835..4f372279f313 100644
--- a/drivers/pwm/pwm-spear.c
+++ b/drivers/pwm/pwm-spear.c
@@ -255,6 +255,7 @@ static struct platform_driver spear_pwm_driver = {
module_platform_driver(spear_pwm_driver);
+MODULE_DESCRIPTION("ST Microelectronics SPEAr Pulse Width Modulator driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Shiraz Hashim <shiraz.linux.kernel@gmail.com>");
MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.com>");
diff --git a/drivers/pwm/pwm-stm32.c b/drivers/pwm/pwm-stm32.c
index a2f231d13a9f..fd754a99cf2e 100644
--- a/drivers/pwm/pwm-stm32.c
+++ b/drivers/pwm/pwm-stm32.c
@@ -321,22 +321,30 @@ static int stm32_pwm_config(struct stm32_pwm *priv, unsigned int ch,
* First we need to find the minimal value for prescaler such that
*
* period_ns * clkrate
- * ------------------------------
+ * ------------------------------ < max_arr + 1
* NSEC_PER_SEC * (prescaler + 1)
*
- * isn't bigger than max_arr.
+ * This equation is equivalent to
+ *
+ * period_ns * clkrate
+ * ---------------------------- < prescaler + 1
+ * NSEC_PER_SEC * (max_arr + 1)
+ *
+ * Using integer division and knowing that the right hand side is
+ * integer, this is further equivalent to
+ *
+ * (period_ns * clkrate) // (NSEC_PER_SEC * (max_arr + 1)) ≤ prescaler
*/
prescaler = mul_u64_u64_div_u64(period_ns, clk_get_rate(priv->clk),
- (u64)NSEC_PER_SEC * priv->max_arr);
- if (prescaler > 0)
- prescaler -= 1;
-
+ (u64)NSEC_PER_SEC * ((u64)priv->max_arr + 1));
if (prescaler > MAX_TIM_PSC)
return -EINVAL;
prd = mul_u64_u64_div_u64(period_ns, clk_get_rate(priv->clk),
(u64)NSEC_PER_SEC * (prescaler + 1));
+ if (!prd)
+ return -EINVAL;
/*
* All channels share the same prescaler and counter so when two
@@ -360,7 +368,7 @@ static int stm32_pwm_config(struct stm32_pwm *priv, unsigned int ch,
dty = mul_u64_u64_div_u64(duty_ns, clk_get_rate(priv->clk),
(u64)NSEC_PER_SEC * (prescaler + 1));
- regmap_write(priv->regmap, TIM_CCR1 + 4 * ch, dty);
+ regmap_write(priv->regmap, TIM_CCRx(ch + 1), dty);
/* Configure output mode */
shift = (ch & 0x1) * CCMR_CHANNEL_SHIFT;
@@ -382,9 +390,9 @@ static int stm32_pwm_set_polarity(struct stm32_pwm *priv, unsigned int ch,
{
u32 mask;
- mask = TIM_CCER_CC1P << (ch * 4);
+ mask = TIM_CCER_CCxP(ch + 1);
if (priv->have_complementary_output)
- mask |= TIM_CCER_CC1NP << (ch * 4);
+ mask |= TIM_CCER_CCxNP(ch + 1);
regmap_update_bits(priv->regmap, TIM_CCER, mask,
polarity == PWM_POLARITY_NORMAL ? 0 : mask);
@@ -402,9 +410,9 @@ static int stm32_pwm_enable(struct stm32_pwm *priv, unsigned int ch)
return ret;
/* Enable channel */
- mask = TIM_CCER_CC1E << (ch * 4);
+ mask = TIM_CCER_CCxE(ch + 1);
if (priv->have_complementary_output)
- mask |= TIM_CCER_CC1NE << (ch * 4);
+ mask |= TIM_CCER_CCxNE(ch);
regmap_set_bits(priv->regmap, TIM_CCER, mask);
@@ -422,9 +430,9 @@ static void stm32_pwm_disable(struct stm32_pwm *priv, unsigned int ch)
u32 mask;
/* Disable channel */
- mask = TIM_CCER_CC1E << (ch * 4);
+ mask = TIM_CCER_CCxE(ch + 1);
if (priv->have_complementary_output)
- mask |= TIM_CCER_CC1NE << (ch * 4);
+ mask |= TIM_CCER_CCxNE(ch + 1);
regmap_clear_bits(priv->regmap, TIM_CCER, mask);
@@ -444,8 +452,9 @@ static int stm32_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
enabled = pwm->state.enabled;
- if (enabled && !state->enabled) {
- stm32_pwm_disable(priv, pwm->hwpwm);
+ if (!state->enabled) {
+ if (enabled)
+ stm32_pwm_disable(priv, pwm->hwpwm);
return 0;
}
@@ -493,8 +502,8 @@ static int stm32_pwm_get_state(struct pwm_chip *chip,
if (ret)
goto out;
- state->enabled = ccer & (TIM_CCER_CC1E << (ch * 4));
- state->polarity = (ccer & (TIM_CCER_CC1P << (ch * 4))) ?
+ state->enabled = ccer & TIM_CCER_CCxE(ch + 1);
+ state->polarity = (ccer & TIM_CCER_CCxP(ch + 1)) ?
PWM_POLARITY_INVERSED : PWM_POLARITY_NORMAL;
ret = regmap_read(priv->regmap, TIM_PSC, &psc);
if (ret)
@@ -502,7 +511,7 @@ static int stm32_pwm_get_state(struct pwm_chip *chip,
ret = regmap_read(priv->regmap, TIM_ARR, &arr);
if (ret)
goto out;
- ret = regmap_read(priv->regmap, TIM_CCR1 + 4 * ch, &ccr);
+ ret = regmap_read(priv->regmap, TIM_CCRx(ch + 1), &ccr);
if (ret)
goto out;
@@ -673,7 +682,8 @@ static int stm32_pwm_probe(struct platform_device *pdev)
* .apply() won't overflow.
*/
if (clk_get_rate(priv->clk) > 1000000000)
- return dev_err_probe(dev, -EINVAL, "Failed to lock clock\n");
+ return dev_err_probe(dev, -EINVAL, "Clock freq too high (%lu)\n",
+ clk_get_rate(priv->clk));
chip->ops = &stm32pwm_ops;
@@ -702,7 +712,7 @@ static int stm32_pwm_suspend(struct device *dev)
ccer = active_channels(priv);
for (i = 0; i < chip->npwm; i++) {
- mask = TIM_CCER_CC1E << (i * 4);
+ mask = TIM_CCER_CCxE(i + 1);
if (ccer & mask) {
dev_err(dev, "PWM %u still in use by consumer %s\n",
i, chip->pwms[i].label);
diff --git a/drivers/pwm/pwm-visconti.c b/drivers/pwm/pwm-visconti.c
index 9e55380957be..28fae4979e3f 100644
--- a/drivers/pwm/pwm-visconti.c
+++ b/drivers/pwm/pwm-visconti.c
@@ -170,6 +170,7 @@ static struct platform_driver visconti_pwm_driver = {
};
module_platform_driver(visconti_pwm_driver);
+MODULE_DESCRIPTION("Toshiba Visconti Pulse Width Modulator driver");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Nobuhiro Iwamatsu <nobuhiro1.iwamatsu@toshiba.co.jp>");
MODULE_ALIAS("platform:pwm-visconti");
diff --git a/drivers/pwm/pwm-xilinx.c b/drivers/pwm/pwm-xilinx.c
index 3a7deebb0d0c..52c241982807 100644
--- a/drivers/pwm/pwm-xilinx.c
+++ b/drivers/pwm/pwm-xilinx.c
@@ -224,7 +224,6 @@ static int xilinx_pwm_probe(struct platform_device *pdev)
if (IS_ERR(chip))
return PTR_ERR(chip);
priv = xilinx_pwm_chip_to_priv(chip);
- platform_set_drvdata(pdev, chip);
regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(regs))
@@ -263,37 +262,24 @@ static int xilinx_pwm_probe(struct platform_device *pdev)
* alas, such properties are not allowed to be used.
*/
- priv->clk = devm_clk_get(dev, "s_axi_aclk");
+ priv->clk = devm_clk_get_enabled(dev, "s_axi_aclk");
if (IS_ERR(priv->clk))
return dev_err_probe(dev, PTR_ERR(priv->clk),
"Could not get clock\n");
- ret = clk_prepare_enable(priv->clk);
+ ret = devm_clk_rate_exclusive_get(dev, priv->clk);
if (ret)
- return dev_err_probe(dev, ret, "Clock enable failed\n");
- clk_rate_exclusive_get(priv->clk);
+ return dev_err_probe(dev, ret,
+ "Failed to lock clock rate\n");
chip->ops = &xilinx_pwm_ops;
- ret = pwmchip_add(chip);
- if (ret) {
- clk_rate_exclusive_put(priv->clk);
- clk_disable_unprepare(priv->clk);
+ ret = devm_pwmchip_add(dev, chip);
+ if (ret)
return dev_err_probe(dev, ret, "Could not register PWM chip\n");
- }
return 0;
}
-static void xilinx_pwm_remove(struct platform_device *pdev)
-{
- struct pwm_chip *chip = platform_get_drvdata(pdev);
- struct xilinx_timer_priv *priv = xilinx_pwm_chip_to_priv(chip);
-
- pwmchip_remove(chip);
- clk_rate_exclusive_put(priv->clk);
- clk_disable_unprepare(priv->clk);
-}
-
static const struct of_device_id xilinx_pwm_of_match[] = {
{ .compatible = "xlnx,xps-timer-1.00.a", },
{},
@@ -302,7 +288,6 @@ MODULE_DEVICE_TABLE(of, xilinx_pwm_of_match);
static struct platform_driver xilinx_pwm_driver = {
.probe = xilinx_pwm_probe,
- .remove_new = xilinx_pwm_remove,
.driver = {
.name = "xilinx-pwm",
.of_match_table = of_match_ptr(xilinx_pwm_of_match),
diff --git a/drivers/ras/amd/atl/core.c b/drivers/ras/amd/atl/core.c
index 6dc4e06305f7..4197e10993ac 100644
--- a/drivers/ras/amd/atl/core.c
+++ b/drivers/ras/amd/atl/core.c
@@ -49,26 +49,26 @@ static bool legacy_hole_en(struct addr_ctx *ctx)
return FIELD_GET(DF_LEGACY_MMIO_HOLE_EN, reg);
}
-static int add_legacy_hole(struct addr_ctx *ctx)
+static u64 add_legacy_hole(struct addr_ctx *ctx, u64 addr)
{
- u32 dram_hole_base;
- u8 func = 0;
-
if (!legacy_hole_en(ctx))
- return 0;
+ return addr;
- if (df_cfg.rev >= DF4)
- func = 7;
+ if (addr >= df_cfg.dram_hole_base)
+ addr += (BIT_ULL(32) - df_cfg.dram_hole_base);
- if (df_indirect_read_broadcast(ctx->node_id, func, 0x104, &dram_hole_base))
- return -EINVAL;
+ return addr;
+}
- dram_hole_base &= DF_DRAM_HOLE_BASE_MASK;
+static u64 remove_legacy_hole(struct addr_ctx *ctx, u64 addr)
+{
+ if (!legacy_hole_en(ctx))
+ return addr;
- if (ctx->ret_addr >= dram_hole_base)
- ctx->ret_addr += (BIT_ULL(32) - dram_hole_base);
+ if (addr >= df_cfg.dram_hole_base)
+ addr -= (BIT_ULL(32) - df_cfg.dram_hole_base);
- return 0;
+ return addr;
}
static u64 get_base_addr(struct addr_ctx *ctx)
@@ -83,14 +83,14 @@ static u64 get_base_addr(struct addr_ctx *ctx)
return base_addr << DF_DRAM_BASE_LIMIT_LSB;
}
-static int add_base_and_hole(struct addr_ctx *ctx)
+u64 add_base_and_hole(struct addr_ctx *ctx, u64 addr)
{
- ctx->ret_addr += get_base_addr(ctx);
-
- if (add_legacy_hole(ctx))
- return -EINVAL;
+ return add_legacy_hole(ctx, addr + get_base_addr(ctx));
+}
- return 0;
+u64 remove_base_and_hole(struct addr_ctx *ctx, u64 addr)
+{
+ return remove_legacy_hole(ctx, addr) - get_base_addr(ctx);
}
static bool late_hole_remove(struct addr_ctx *ctx)
@@ -125,6 +125,9 @@ unsigned long norm_to_sys_addr(u8 socket_id, u8 die_id, u8 coh_st_inst_id, unsig
ctx.inputs.die_id = die_id;
ctx.inputs.coh_st_inst_id = coh_st_inst_id;
+ if (legacy_hole_en(&ctx) && !df_cfg.dram_hole_base)
+ return -EINVAL;
+
if (determine_node_id(&ctx, socket_id, die_id))
return -EINVAL;
@@ -134,14 +137,14 @@ unsigned long norm_to_sys_addr(u8 socket_id, u8 die_id, u8 coh_st_inst_id, unsig
if (denormalize_address(&ctx))
return -EINVAL;
- if (!late_hole_remove(&ctx) && add_base_and_hole(&ctx))
- return -EINVAL;
+ if (!late_hole_remove(&ctx))
+ ctx.ret_addr = add_base_and_hole(&ctx, ctx.ret_addr);
if (dehash_address(&ctx))
return -EINVAL;
- if (late_hole_remove(&ctx) && add_base_and_hole(&ctx))
- return -EINVAL;
+ if (late_hole_remove(&ctx))
+ ctx.ret_addr = add_base_and_hole(&ctx, ctx.ret_addr);
if (addr_over_limit(&ctx))
return -EINVAL;
@@ -206,7 +209,7 @@ static int __init amd_atl_init(void)
__module_get(THIS_MODULE);
amd_atl_register_decoder(convert_umc_mca_addr_to_sys_addr);
- pr_info("AMD Address Translation Library initialized");
+ pr_info("AMD Address Translation Library initialized\n");
return 0;
}
@@ -222,4 +225,5 @@ static void __exit amd_atl_exit(void)
module_init(amd_atl_init);
module_exit(amd_atl_exit);
+MODULE_DESCRIPTION("AMD Address Translation Library");
MODULE_LICENSE("GPL");
diff --git a/drivers/ras/amd/atl/dehash.c b/drivers/ras/amd/atl/dehash.c
index 4ea46262c4f5..d4ee7ecabaee 100644
--- a/drivers/ras/amd/atl/dehash.c
+++ b/drivers/ras/amd/atl/dehash.c
@@ -12,41 +12,10 @@
#include "internal.h"
-/*
- * Verify the interleave bits are correct in the different interleaving
- * settings.
- *
- * If @num_intlv_dies and/or @num_intlv_sockets are 1, it means the
- * respective interleaving is disabled.
- */
-static inline bool map_bits_valid(struct addr_ctx *ctx, u8 bit1, u8 bit2,
- u8 num_intlv_dies, u8 num_intlv_sockets)
-{
- if (!(ctx->map.intlv_bit_pos == bit1 || ctx->map.intlv_bit_pos == bit2)) {
- pr_debug("Invalid interleave bit: %u", ctx->map.intlv_bit_pos);
- return false;
- }
-
- if (ctx->map.num_intlv_dies > num_intlv_dies) {
- pr_debug("Invalid number of interleave dies: %u", ctx->map.num_intlv_dies);
- return false;
- }
-
- if (ctx->map.num_intlv_sockets > num_intlv_sockets) {
- pr_debug("Invalid number of interleave sockets: %u", ctx->map.num_intlv_sockets);
- return false;
- }
-
- return true;
-}
-
static int df2_dehash_addr(struct addr_ctx *ctx)
{
u8 hashed_bit, intlv_bit, intlv_bit_pos;
- if (!map_bits_valid(ctx, 8, 9, 1, 1))
- return -EINVAL;
-
intlv_bit_pos = ctx->map.intlv_bit_pos;
intlv_bit = !!(BIT_ULL(intlv_bit_pos) & ctx->ret_addr);
@@ -67,9 +36,6 @@ static int df3_dehash_addr(struct addr_ctx *ctx)
bool hash_ctl_64k, hash_ctl_2M, hash_ctl_1G;
u8 hashed_bit, intlv_bit, intlv_bit_pos;
- if (!map_bits_valid(ctx, 8, 9, 1, 1))
- return -EINVAL;
-
hash_ctl_64k = FIELD_GET(DF3_HASH_CTL_64K, ctx->map.ctl);
hash_ctl_2M = FIELD_GET(DF3_HASH_CTL_2M, ctx->map.ctl);
hash_ctl_1G = FIELD_GET(DF3_HASH_CTL_1G, ctx->map.ctl);
@@ -171,9 +137,6 @@ static int df4_dehash_addr(struct addr_ctx *ctx)
bool hash_ctl_64k, hash_ctl_2M, hash_ctl_1G;
u8 hashed_bit, intlv_bit;
- if (!map_bits_valid(ctx, 8, 8, 1, 2))
- return -EINVAL;
-
hash_ctl_64k = FIELD_GET(DF4_HASH_CTL_64K, ctx->map.ctl);
hash_ctl_2M = FIELD_GET(DF4_HASH_CTL_2M, ctx->map.ctl);
hash_ctl_1G = FIELD_GET(DF4_HASH_CTL_1G, ctx->map.ctl);
@@ -247,9 +210,6 @@ static int df4p5_dehash_addr(struct addr_ctx *ctx)
u8 hashed_bit, intlv_bit;
u64 rehash_vector;
- if (!map_bits_valid(ctx, 8, 8, 1, 2))
- return -EINVAL;
-
hash_ctl_64k = FIELD_GET(DF4_HASH_CTL_64K, ctx->map.ctl);
hash_ctl_2M = FIELD_GET(DF4_HASH_CTL_2M, ctx->map.ctl);
hash_ctl_1G = FIELD_GET(DF4_HASH_CTL_1G, ctx->map.ctl);
@@ -360,9 +320,6 @@ static int mi300_dehash_addr(struct addr_ctx *ctx)
bool hashed_bit, intlv_bit, test_bit;
u8 num_intlv_bits, base_bit, i;
- if (!map_bits_valid(ctx, 8, 8, 4, 1))
- return -EINVAL;
-
hash_ctl_4k = FIELD_GET(DF4p5_HASH_CTL_4K, ctx->map.ctl);
hash_ctl_64k = FIELD_GET(DF4_HASH_CTL_64K, ctx->map.ctl);
hash_ctl_2M = FIELD_GET(DF4_HASH_CTL_2M, ctx->map.ctl);
diff --git a/drivers/ras/amd/atl/denormalize.c b/drivers/ras/amd/atl/denormalize.c
index e279224288d6..1a525cfa983c 100644
--- a/drivers/ras/amd/atl/denormalize.c
+++ b/drivers/ras/amd/atl/denormalize.c
@@ -448,6 +448,118 @@ static u16 get_logical_coh_st_fabric_id(struct addr_ctx *ctx)
return (phys_fabric_id & df_cfg.node_id_mask) | log_fabric_id;
}
+static u16 get_logical_coh_st_fabric_id_for_current_spa(struct addr_ctx *ctx,
+ struct df4p5_denorm_ctx *denorm_ctx)
+{
+ bool hash_ctl_64k, hash_ctl_2M, hash_ctl_1G, hash_ctl_1T;
+ bool hash_pa8, hash_pa9, hash_pa12, hash_pa13;
+ u64 cs_id = 0;
+
+ hash_ctl_64k = FIELD_GET(DF4_HASH_CTL_64K, ctx->map.ctl);
+ hash_ctl_2M = FIELD_GET(DF4_HASH_CTL_2M, ctx->map.ctl);
+ hash_ctl_1G = FIELD_GET(DF4_HASH_CTL_1G, ctx->map.ctl);
+ hash_ctl_1T = FIELD_GET(DF4p5_HASH_CTL_1T, ctx->map.ctl);
+
+ hash_pa8 = FIELD_GET(BIT_ULL(8), denorm_ctx->current_spa);
+ hash_pa8 ^= FIELD_GET(BIT_ULL(14), denorm_ctx->current_spa);
+ hash_pa8 ^= FIELD_GET(BIT_ULL(16), denorm_ctx->current_spa) & hash_ctl_64k;
+ hash_pa8 ^= FIELD_GET(BIT_ULL(21), denorm_ctx->current_spa) & hash_ctl_2M;
+ hash_pa8 ^= FIELD_GET(BIT_ULL(30), denorm_ctx->current_spa) & hash_ctl_1G;
+ hash_pa8 ^= FIELD_GET(BIT_ULL(40), denorm_ctx->current_spa) & hash_ctl_1T;
+
+ hash_pa9 = FIELD_GET(BIT_ULL(9), denorm_ctx->current_spa);
+ hash_pa9 ^= FIELD_GET(BIT_ULL(17), denorm_ctx->current_spa) & hash_ctl_64k;
+ hash_pa9 ^= FIELD_GET(BIT_ULL(22), denorm_ctx->current_spa) & hash_ctl_2M;
+ hash_pa9 ^= FIELD_GET(BIT_ULL(31), denorm_ctx->current_spa) & hash_ctl_1G;
+ hash_pa9 ^= FIELD_GET(BIT_ULL(41), denorm_ctx->current_spa) & hash_ctl_1T;
+
+ hash_pa12 = FIELD_GET(BIT_ULL(12), denorm_ctx->current_spa);
+ hash_pa12 ^= FIELD_GET(BIT_ULL(18), denorm_ctx->current_spa) & hash_ctl_64k;
+ hash_pa12 ^= FIELD_GET(BIT_ULL(23), denorm_ctx->current_spa) & hash_ctl_2M;
+ hash_pa12 ^= FIELD_GET(BIT_ULL(32), denorm_ctx->current_spa) & hash_ctl_1G;
+ hash_pa12 ^= FIELD_GET(BIT_ULL(42), denorm_ctx->current_spa) & hash_ctl_1T;
+
+ hash_pa13 = FIELD_GET(BIT_ULL(13), denorm_ctx->current_spa);
+ hash_pa13 ^= FIELD_GET(BIT_ULL(19), denorm_ctx->current_spa) & hash_ctl_64k;
+ hash_pa13 ^= FIELD_GET(BIT_ULL(24), denorm_ctx->current_spa) & hash_ctl_2M;
+ hash_pa13 ^= FIELD_GET(BIT_ULL(33), denorm_ctx->current_spa) & hash_ctl_1G;
+ hash_pa13 ^= FIELD_GET(BIT_ULL(43), denorm_ctx->current_spa) & hash_ctl_1T;
+
+ switch (ctx->map.intlv_mode) {
+ case DF4p5_NPS0_24CHAN_1K_HASH:
+ cs_id = FIELD_GET(GENMASK_ULL(63, 13), denorm_ctx->current_spa) << 3;
+ cs_id %= denorm_ctx->mod_value;
+ cs_id <<= 2;
+ cs_id |= (hash_pa9 | (hash_pa12 << 1));
+ cs_id |= hash_pa8 << df_cfg.socket_id_shift;
+ break;
+
+ case DF4p5_NPS0_24CHAN_2K_HASH:
+ cs_id = FIELD_GET(GENMASK_ULL(63, 14), denorm_ctx->current_spa) << 4;
+ cs_id %= denorm_ctx->mod_value;
+ cs_id <<= 2;
+ cs_id |= (hash_pa12 | (hash_pa13 << 1));
+ cs_id |= hash_pa8 << df_cfg.socket_id_shift;
+ break;
+
+ case DF4p5_NPS1_12CHAN_1K_HASH:
+ cs_id = FIELD_GET(GENMASK_ULL(63, 12), denorm_ctx->current_spa) << 2;
+ cs_id %= denorm_ctx->mod_value;
+ cs_id <<= 2;
+ cs_id |= (hash_pa8 | (hash_pa9 << 1));
+ break;
+
+ case DF4p5_NPS1_12CHAN_2K_HASH:
+ cs_id = FIELD_GET(GENMASK_ULL(63, 13), denorm_ctx->current_spa) << 3;
+ cs_id %= denorm_ctx->mod_value;
+ cs_id <<= 2;
+ cs_id |= (hash_pa8 | (hash_pa12 << 1));
+ break;
+
+ case DF4p5_NPS2_6CHAN_1K_HASH:
+ case DF4p5_NPS1_10CHAN_1K_HASH:
+ cs_id = FIELD_GET(GENMASK_ULL(63, 12), denorm_ctx->current_spa) << 2;
+ cs_id |= (FIELD_GET(BIT_ULL(9), denorm_ctx->current_spa) << 1);
+ cs_id %= denorm_ctx->mod_value;
+ cs_id <<= 1;
+ cs_id |= hash_pa8;
+ break;
+
+ case DF4p5_NPS2_6CHAN_2K_HASH:
+ case DF4p5_NPS1_10CHAN_2K_HASH:
+ cs_id = FIELD_GET(GENMASK_ULL(63, 12), denorm_ctx->current_spa) << 2;
+ cs_id %= denorm_ctx->mod_value;
+ cs_id <<= 1;
+ cs_id |= hash_pa8;
+ break;
+
+ case DF4p5_NPS4_3CHAN_1K_HASH:
+ case DF4p5_NPS2_5CHAN_1K_HASH:
+ cs_id = FIELD_GET(GENMASK_ULL(63, 12), denorm_ctx->current_spa) << 2;
+ cs_id |= FIELD_GET(GENMASK_ULL(9, 8), denorm_ctx->current_spa);
+ cs_id %= denorm_ctx->mod_value;
+ break;
+
+ case DF4p5_NPS4_3CHAN_2K_HASH:
+ case DF4p5_NPS2_5CHAN_2K_HASH:
+ cs_id = FIELD_GET(GENMASK_ULL(63, 12), denorm_ctx->current_spa) << 2;
+ cs_id |= FIELD_GET(BIT_ULL(8), denorm_ctx->current_spa) << 1;
+ cs_id %= denorm_ctx->mod_value;
+ break;
+
+ default:
+ atl_debug_on_bad_intlv_mode(ctx);
+ return 0;
+ }
+
+ if (cs_id > 0xffff) {
+ atl_debug(ctx, "Translation error: Resulting cs_id larger than u16\n");
+ return 0;
+ }
+
+ return cs_id;
+}
+
static int denorm_addr_common(struct addr_ctx *ctx)
{
u64 denorm_addr;
@@ -699,6 +811,442 @@ static int denorm_addr_df4_np2(struct addr_ctx *ctx)
return 0;
}
+static u64 normalize_addr_df4p5_np2(struct addr_ctx *ctx, struct df4p5_denorm_ctx *denorm_ctx,
+ u64 addr)
+{
+ u64 temp_addr_a = 0, temp_addr_b = 0;
+
+ switch (ctx->map.intlv_mode) {
+ case DF4p5_NPS0_24CHAN_1K_HASH:
+ case DF4p5_NPS1_12CHAN_1K_HASH:
+ case DF4p5_NPS2_6CHAN_1K_HASH:
+ case DF4p5_NPS4_3CHAN_1K_HASH:
+ case DF4p5_NPS1_10CHAN_1K_HASH:
+ case DF4p5_NPS2_5CHAN_1K_HASH:
+ temp_addr_a = FIELD_GET(GENMASK_ULL(11, 10), addr) << 8;
+ break;
+
+ case DF4p5_NPS0_24CHAN_2K_HASH:
+ case DF4p5_NPS1_12CHAN_2K_HASH:
+ case DF4p5_NPS2_6CHAN_2K_HASH:
+ case DF4p5_NPS4_3CHAN_2K_HASH:
+ case DF4p5_NPS1_10CHAN_2K_HASH:
+ case DF4p5_NPS2_5CHAN_2K_HASH:
+ temp_addr_a = FIELD_GET(GENMASK_ULL(11, 9), addr) << 8;
+ break;
+
+ default:
+ atl_debug_on_bad_intlv_mode(ctx);
+ return 0;
+ }
+
+ switch (ctx->map.intlv_mode) {
+ case DF4p5_NPS0_24CHAN_1K_HASH:
+ temp_addr_b = FIELD_GET(GENMASK_ULL(63, 13), addr) / denorm_ctx->mod_value;
+ temp_addr_b <<= 10;
+ break;
+
+ case DF4p5_NPS0_24CHAN_2K_HASH:
+ temp_addr_b = FIELD_GET(GENMASK_ULL(63, 14), addr) / denorm_ctx->mod_value;
+ temp_addr_b <<= 11;
+ break;
+
+ case DF4p5_NPS1_12CHAN_1K_HASH:
+ temp_addr_b = FIELD_GET(GENMASK_ULL(63, 12), addr) / denorm_ctx->mod_value;
+ temp_addr_b <<= 10;
+ break;
+
+ case DF4p5_NPS1_12CHAN_2K_HASH:
+ temp_addr_b = FIELD_GET(GENMASK_ULL(63, 13), addr) / denorm_ctx->mod_value;
+ temp_addr_b <<= 11;
+ break;
+
+ case DF4p5_NPS2_6CHAN_1K_HASH:
+ case DF4p5_NPS1_10CHAN_1K_HASH:
+ temp_addr_b = FIELD_GET(GENMASK_ULL(63, 12), addr) << 1;
+ temp_addr_b |= FIELD_GET(BIT_ULL(9), addr);
+ temp_addr_b /= denorm_ctx->mod_value;
+ temp_addr_b <<= 10;
+ break;
+
+ case DF4p5_NPS2_6CHAN_2K_HASH:
+ case DF4p5_NPS1_10CHAN_2K_HASH:
+ temp_addr_b = FIELD_GET(GENMASK_ULL(63, 12), addr) / denorm_ctx->mod_value;
+ temp_addr_b <<= 11;
+ break;
+
+ case DF4p5_NPS4_3CHAN_1K_HASH:
+ case DF4p5_NPS2_5CHAN_1K_HASH:
+ temp_addr_b = FIELD_GET(GENMASK_ULL(63, 12), addr) << 2;
+ temp_addr_b |= FIELD_GET(GENMASK_ULL(9, 8), addr);
+ temp_addr_b /= denorm_ctx->mod_value;
+ temp_addr_b <<= 10;
+ break;
+
+ case DF4p5_NPS4_3CHAN_2K_HASH:
+ case DF4p5_NPS2_5CHAN_2K_HASH:
+ temp_addr_b = FIELD_GET(GENMASK_ULL(63, 12), addr) << 1;
+ temp_addr_b |= FIELD_GET(BIT_ULL(8), addr);
+ temp_addr_b /= denorm_ctx->mod_value;
+ temp_addr_b <<= 11;
+ break;
+
+ default:
+ atl_debug_on_bad_intlv_mode(ctx);
+ return 0;
+ }
+
+ return denorm_ctx->base_denorm_addr | temp_addr_a | temp_addr_b;
+}
+
+static void recalculate_hashed_bits_df4p5_np2(struct addr_ctx *ctx,
+ struct df4p5_denorm_ctx *denorm_ctx)
+{
+ bool hash_ctl_64k, hash_ctl_2M, hash_ctl_1G, hash_ctl_1T, hashed_bit;
+
+ if (!denorm_ctx->rehash_vector)
+ return;
+
+ hash_ctl_64k = FIELD_GET(DF4_HASH_CTL_64K, ctx->map.ctl);
+ hash_ctl_2M = FIELD_GET(DF4_HASH_CTL_2M, ctx->map.ctl);
+ hash_ctl_1G = FIELD_GET(DF4_HASH_CTL_1G, ctx->map.ctl);
+ hash_ctl_1T = FIELD_GET(DF4p5_HASH_CTL_1T, ctx->map.ctl);
+
+ if (denorm_ctx->rehash_vector & BIT_ULL(8)) {
+ hashed_bit = FIELD_GET(BIT_ULL(8), denorm_ctx->current_spa);
+ hashed_bit ^= FIELD_GET(BIT_ULL(14), denorm_ctx->current_spa);
+ hashed_bit ^= FIELD_GET(BIT_ULL(16), denorm_ctx->current_spa) & hash_ctl_64k;
+ hashed_bit ^= FIELD_GET(BIT_ULL(21), denorm_ctx->current_spa) & hash_ctl_2M;
+ hashed_bit ^= FIELD_GET(BIT_ULL(30), denorm_ctx->current_spa) & hash_ctl_1G;
+ hashed_bit ^= FIELD_GET(BIT_ULL(40), denorm_ctx->current_spa) & hash_ctl_1T;
+
+ if (FIELD_GET(BIT_ULL(8), denorm_ctx->current_spa) != hashed_bit)
+ denorm_ctx->current_spa ^= BIT_ULL(8);
+ }
+
+ if (denorm_ctx->rehash_vector & BIT_ULL(9)) {
+ hashed_bit = FIELD_GET(BIT_ULL(9), denorm_ctx->current_spa);
+ hashed_bit ^= FIELD_GET(BIT_ULL(17), denorm_ctx->current_spa) & hash_ctl_64k;
+ hashed_bit ^= FIELD_GET(BIT_ULL(22), denorm_ctx->current_spa) & hash_ctl_2M;
+ hashed_bit ^= FIELD_GET(BIT_ULL(31), denorm_ctx->current_spa) & hash_ctl_1G;
+ hashed_bit ^= FIELD_GET(BIT_ULL(41), denorm_ctx->current_spa) & hash_ctl_1T;
+
+ if (FIELD_GET(BIT_ULL(9), denorm_ctx->current_spa) != hashed_bit)
+ denorm_ctx->current_spa ^= BIT_ULL(9);
+ }
+
+ if (denorm_ctx->rehash_vector & BIT_ULL(12)) {
+ hashed_bit = FIELD_GET(BIT_ULL(12), denorm_ctx->current_spa);
+ hashed_bit ^= FIELD_GET(BIT_ULL(18), denorm_ctx->current_spa) & hash_ctl_64k;
+ hashed_bit ^= FIELD_GET(BIT_ULL(23), denorm_ctx->current_spa) & hash_ctl_2M;
+ hashed_bit ^= FIELD_GET(BIT_ULL(32), denorm_ctx->current_spa) & hash_ctl_1G;
+ hashed_bit ^= FIELD_GET(BIT_ULL(42), denorm_ctx->current_spa) & hash_ctl_1T;
+
+ if (FIELD_GET(BIT_ULL(12), denorm_ctx->current_spa) != hashed_bit)
+ denorm_ctx->current_spa ^= BIT_ULL(12);
+ }
+
+ if (denorm_ctx->rehash_vector & BIT_ULL(13)) {
+ hashed_bit = FIELD_GET(BIT_ULL(13), denorm_ctx->current_spa);
+ hashed_bit ^= FIELD_GET(BIT_ULL(19), denorm_ctx->current_spa) & hash_ctl_64k;
+ hashed_bit ^= FIELD_GET(BIT_ULL(24), denorm_ctx->current_spa) & hash_ctl_2M;
+ hashed_bit ^= FIELD_GET(BIT_ULL(33), denorm_ctx->current_spa) & hash_ctl_1G;
+ hashed_bit ^= FIELD_GET(BIT_ULL(43), denorm_ctx->current_spa) & hash_ctl_1T;
+
+ if (FIELD_GET(BIT_ULL(13), denorm_ctx->current_spa) != hashed_bit)
+ denorm_ctx->current_spa ^= BIT_ULL(13);
+ }
+}
+
+static bool match_logical_coh_st_fabric_id(struct addr_ctx *ctx,
+ struct df4p5_denorm_ctx *denorm_ctx)
+{
+ /*
+ * The logical CS fabric ID of the permutation must be calculated from the
+ * current SPA with the base and with the MMIO hole.
+ */
+ u16 id = get_logical_coh_st_fabric_id_for_current_spa(ctx, denorm_ctx);
+
+ atl_debug(ctx, "Checking calculated logical coherent station fabric id:\n");
+ atl_debug(ctx, " calculated fabric id = 0x%x\n", id);
+ atl_debug(ctx, " expected fabric id = 0x%x\n", denorm_ctx->coh_st_fabric_id);
+
+ return denorm_ctx->coh_st_fabric_id == id;
+}
+
+static bool match_norm_addr(struct addr_ctx *ctx, struct df4p5_denorm_ctx *denorm_ctx)
+{
+ u64 addr = remove_base_and_hole(ctx, denorm_ctx->current_spa);
+
+ /*
+ * The normalized address must be calculated with the current SPA without
+ * the base and without the MMIO hole.
+ */
+ addr = normalize_addr_df4p5_np2(ctx, denorm_ctx, addr);
+
+ atl_debug(ctx, "Checking calculated normalized address:\n");
+ atl_debug(ctx, " calculated normalized addr = 0x%016llx\n", addr);
+ atl_debug(ctx, " expected normalized addr = 0x%016llx\n", ctx->ret_addr);
+
+ return addr == ctx->ret_addr;
+}
+
+static int check_permutations(struct addr_ctx *ctx, struct df4p5_denorm_ctx *denorm_ctx)
+{
+ u64 test_perm, temp_addr, denorm_addr, num_perms;
+ unsigned int dropped_remainder;
+
+ denorm_ctx->div_addr *= denorm_ctx->mod_value;
+
+ /*
+ * The high order bits of num_permutations represent the permutations
+ * of the dropped remainder. This will be either 0-3 or 0-5 depending
+ * on the interleave mode. The low order bits represent the
+ * permutations of other "lost" bits which will be any combination of
+ * 1, 2, or 3 bits depending on the interleave mode.
+ */
+ num_perms = denorm_ctx->mod_value << denorm_ctx->perm_shift;
+
+ for (test_perm = 0; test_perm < num_perms; test_perm++) {
+ denorm_addr = denorm_ctx->base_denorm_addr;
+ dropped_remainder = test_perm >> denorm_ctx->perm_shift;
+ temp_addr = denorm_ctx->div_addr + dropped_remainder;
+
+ switch (ctx->map.intlv_mode) {
+ case DF4p5_NPS0_24CHAN_2K_HASH:
+ denorm_addr |= temp_addr << 14;
+ break;
+
+ case DF4p5_NPS0_24CHAN_1K_HASH:
+ case DF4p5_NPS1_12CHAN_2K_HASH:
+ denorm_addr |= temp_addr << 13;
+ break;
+
+ case DF4p5_NPS1_12CHAN_1K_HASH:
+ case DF4p5_NPS2_6CHAN_2K_HASH:
+ case DF4p5_NPS1_10CHAN_2K_HASH:
+ denorm_addr |= temp_addr << 12;
+ break;
+
+ case DF4p5_NPS2_6CHAN_1K_HASH:
+ case DF4p5_NPS1_10CHAN_1K_HASH:
+ denorm_addr |= FIELD_GET(BIT_ULL(0), temp_addr) << 9;
+ denorm_addr |= FIELD_GET(GENMASK_ULL(63, 1), temp_addr) << 12;
+ break;
+
+ case DF4p5_NPS4_3CHAN_1K_HASH:
+ case DF4p5_NPS2_5CHAN_1K_HASH:
+ denorm_addr |= FIELD_GET(GENMASK_ULL(1, 0), temp_addr) << 8;
+ denorm_addr |= FIELD_GET(GENMASK_ULL(63, 2), (temp_addr)) << 12;
+ break;
+
+ case DF4p5_NPS4_3CHAN_2K_HASH:
+ case DF4p5_NPS2_5CHAN_2K_HASH:
+ denorm_addr |= FIELD_GET(BIT_ULL(0), temp_addr) << 8;
+ denorm_addr |= FIELD_GET(GENMASK_ULL(63, 1), temp_addr) << 12;
+ break;
+
+ default:
+ atl_debug_on_bad_intlv_mode(ctx);
+ return -EINVAL;
+ }
+
+ switch (ctx->map.intlv_mode) {
+ case DF4p5_NPS0_24CHAN_1K_HASH:
+ denorm_addr |= FIELD_GET(BIT_ULL(0), test_perm) << 8;
+ denorm_addr |= FIELD_GET(BIT_ULL(1), test_perm) << 9;
+ denorm_addr |= FIELD_GET(BIT_ULL(2), test_perm) << 12;
+ break;
+
+ case DF4p5_NPS0_24CHAN_2K_HASH:
+ denorm_addr |= FIELD_GET(BIT_ULL(0), test_perm) << 8;
+ denorm_addr |= FIELD_GET(BIT_ULL(1), test_perm) << 12;
+ denorm_addr |= FIELD_GET(BIT_ULL(2), test_perm) << 13;
+ break;
+
+ case DF4p5_NPS1_12CHAN_2K_HASH:
+ denorm_addr |= FIELD_GET(BIT_ULL(0), test_perm) << 8;
+ denorm_addr |= FIELD_GET(BIT_ULL(1), test_perm) << 12;
+ break;
+
+ case DF4p5_NPS1_12CHAN_1K_HASH:
+ case DF4p5_NPS4_3CHAN_1K_HASH:
+ case DF4p5_NPS2_5CHAN_1K_HASH:
+ denorm_addr |= FIELD_GET(BIT_ULL(0), test_perm) << 8;
+ denorm_addr |= FIELD_GET(BIT_ULL(1), test_perm) << 9;
+ break;
+
+ case DF4p5_NPS2_6CHAN_1K_HASH:
+ case DF4p5_NPS2_6CHAN_2K_HASH:
+ case DF4p5_NPS4_3CHAN_2K_HASH:
+ case DF4p5_NPS1_10CHAN_1K_HASH:
+ case DF4p5_NPS1_10CHAN_2K_HASH:
+ case DF4p5_NPS2_5CHAN_2K_HASH:
+ denorm_addr |= FIELD_GET(BIT_ULL(0), test_perm) << 8;
+ break;
+
+ default:
+ atl_debug_on_bad_intlv_mode(ctx);
+ return -EINVAL;
+ }
+
+ denorm_ctx->current_spa = add_base_and_hole(ctx, denorm_addr);
+ recalculate_hashed_bits_df4p5_np2(ctx, denorm_ctx);
+
+ atl_debug(ctx, "Checking potential system physical address 0x%016llx\n",
+ denorm_ctx->current_spa);
+
+ if (!match_logical_coh_st_fabric_id(ctx, denorm_ctx))
+ continue;
+
+ if (!match_norm_addr(ctx, denorm_ctx))
+ continue;
+
+ if (denorm_ctx->resolved_spa == INVALID_SPA ||
+ denorm_ctx->current_spa > denorm_ctx->resolved_spa)
+ denorm_ctx->resolved_spa = denorm_ctx->current_spa;
+ }
+
+ if (denorm_ctx->resolved_spa == INVALID_SPA) {
+ atl_debug(ctx, "Failed to find valid SPA for normalized address 0x%016llx\n",
+ ctx->ret_addr);
+ return -EINVAL;
+ }
+
+ /* Return the resolved SPA without the base, without the MMIO hole */
+ ctx->ret_addr = remove_base_and_hole(ctx, denorm_ctx->resolved_spa);
+
+ return 0;
+}
+
+static int init_df4p5_denorm_ctx(struct addr_ctx *ctx, struct df4p5_denorm_ctx *denorm_ctx)
+{
+ denorm_ctx->current_spa = INVALID_SPA;
+ denorm_ctx->resolved_spa = INVALID_SPA;
+
+ switch (ctx->map.intlv_mode) {
+ case DF4p5_NPS0_24CHAN_1K_HASH:
+ denorm_ctx->perm_shift = 3;
+ denorm_ctx->rehash_vector = BIT(8) | BIT(9) | BIT(12);
+ break;
+
+ case DF4p5_NPS0_24CHAN_2K_HASH:
+ denorm_ctx->perm_shift = 3;
+ denorm_ctx->rehash_vector = BIT(8) | BIT(12) | BIT(13);
+ break;
+
+ case DF4p5_NPS1_12CHAN_1K_HASH:
+ denorm_ctx->perm_shift = 2;
+ denorm_ctx->rehash_vector = BIT(8);
+ break;
+
+ case DF4p5_NPS1_12CHAN_2K_HASH:
+ denorm_ctx->perm_shift = 2;
+ denorm_ctx->rehash_vector = BIT(8) | BIT(12);
+ break;
+
+ case DF4p5_NPS2_6CHAN_1K_HASH:
+ case DF4p5_NPS2_6CHAN_2K_HASH:
+ case DF4p5_NPS1_10CHAN_1K_HASH:
+ case DF4p5_NPS1_10CHAN_2K_HASH:
+ denorm_ctx->perm_shift = 1;
+ denorm_ctx->rehash_vector = BIT(8);
+ break;
+
+ case DF4p5_NPS4_3CHAN_1K_HASH:
+ case DF4p5_NPS2_5CHAN_1K_HASH:
+ denorm_ctx->perm_shift = 2;
+ denorm_ctx->rehash_vector = 0;
+ break;
+
+ case DF4p5_NPS4_3CHAN_2K_HASH:
+ case DF4p5_NPS2_5CHAN_2K_HASH:
+ denorm_ctx->perm_shift = 1;
+ denorm_ctx->rehash_vector = 0;
+ break;
+
+ default:
+ atl_debug_on_bad_intlv_mode(ctx);
+ return -EINVAL;
+ }
+
+ denorm_ctx->base_denorm_addr = FIELD_GET(GENMASK_ULL(7, 0), ctx->ret_addr);
+
+ switch (ctx->map.intlv_mode) {
+ case DF4p5_NPS0_24CHAN_1K_HASH:
+ case DF4p5_NPS1_12CHAN_1K_HASH:
+ case DF4p5_NPS2_6CHAN_1K_HASH:
+ case DF4p5_NPS4_3CHAN_1K_HASH:
+ case DF4p5_NPS1_10CHAN_1K_HASH:
+ case DF4p5_NPS2_5CHAN_1K_HASH:
+ denorm_ctx->base_denorm_addr |= FIELD_GET(GENMASK_ULL(9, 8), ctx->ret_addr) << 10;
+ denorm_ctx->div_addr = FIELD_GET(GENMASK_ULL(63, 10), ctx->ret_addr);
+ break;
+
+ case DF4p5_NPS0_24CHAN_2K_HASH:
+ case DF4p5_NPS1_12CHAN_2K_HASH:
+ case DF4p5_NPS2_6CHAN_2K_HASH:
+ case DF4p5_NPS4_3CHAN_2K_HASH:
+ case DF4p5_NPS1_10CHAN_2K_HASH:
+ case DF4p5_NPS2_5CHAN_2K_HASH:
+ denorm_ctx->base_denorm_addr |= FIELD_GET(GENMASK_ULL(10, 8), ctx->ret_addr) << 9;
+ denorm_ctx->div_addr = FIELD_GET(GENMASK_ULL(63, 11), ctx->ret_addr);
+ break;
+
+ default:
+ atl_debug_on_bad_intlv_mode(ctx);
+ return -EINVAL;
+ }
+
+ if (ctx->map.num_intlv_chan % 3 == 0)
+ denorm_ctx->mod_value = 3;
+ else
+ denorm_ctx->mod_value = 5;
+
+ denorm_ctx->coh_st_fabric_id = get_logical_coh_st_fabric_id(ctx) - get_dst_fabric_id(ctx);
+
+ atl_debug(ctx, "Initialized df4p5_denorm_ctx:");
+ atl_debug(ctx, " mod_value = %d", denorm_ctx->mod_value);
+ atl_debug(ctx, " perm_shift = %d", denorm_ctx->perm_shift);
+ atl_debug(ctx, " rehash_vector = 0x%x", denorm_ctx->rehash_vector);
+ atl_debug(ctx, " base_denorm_addr = 0x%016llx", denorm_ctx->base_denorm_addr);
+ atl_debug(ctx, " div_addr = 0x%016llx", denorm_ctx->div_addr);
+ atl_debug(ctx, " coh_st_fabric_id = 0x%x", denorm_ctx->coh_st_fabric_id);
+
+ return 0;
+}
+
+/*
+ * For DF 4.5, parts of the physical address can be directly pulled from the
+ * normalized address. The exact bits will differ between interleave modes, but
+ * using NPS0_24CHAN_1K_HASH as an example, the normalized address consists of
+ * bits [63:13] (divided by 3), bits [11:10], and bits [7:0] of the system
+ * physical address.
+ *
+ * In this case, there is no way to reconstruct the missing bits (bits 8, 9,
+ * and 12) from the normalized address. Additionally, when bits [63:13] are
+ * divided by 3, the remainder is dropped. Determine the proper combination of
+ * "lost" bits and dropped remainder by iterating through each possible
+ * permutation of these bits and then normalizing the generated system physical
+ * addresses. If the normalized address matches the address we are trying to
+ * translate, then we have found the correct permutation of bits.
+ */
+static int denorm_addr_df4p5_np2(struct addr_ctx *ctx)
+{
+ struct df4p5_denorm_ctx denorm_ctx;
+ int ret = 0;
+
+ memset(&denorm_ctx, 0, sizeof(denorm_ctx));
+
+ atl_debug(ctx, "Denormalizing DF 4.5 normalized address 0x%016llx", ctx->ret_addr);
+
+ ret = init_df4p5_denorm_ctx(ctx, &denorm_ctx);
+ if (ret)
+ return ret;
+
+ return check_permutations(ctx, &denorm_ctx);
+}
+
int denormalize_address(struct addr_ctx *ctx)
{
switch (ctx->map.intlv_mode) {
@@ -710,6 +1258,19 @@ int denormalize_address(struct addr_ctx *ctx)
case DF4_NPS2_5CHAN_HASH:
case DF4_NPS1_10CHAN_HASH:
return denorm_addr_df4_np2(ctx);
+ case DF4p5_NPS0_24CHAN_1K_HASH:
+ case DF4p5_NPS4_3CHAN_1K_HASH:
+ case DF4p5_NPS2_6CHAN_1K_HASH:
+ case DF4p5_NPS1_12CHAN_1K_HASH:
+ case DF4p5_NPS2_5CHAN_1K_HASH:
+ case DF4p5_NPS1_10CHAN_1K_HASH:
+ case DF4p5_NPS4_3CHAN_2K_HASH:
+ case DF4p5_NPS2_6CHAN_2K_HASH:
+ case DF4p5_NPS1_12CHAN_2K_HASH:
+ case DF4p5_NPS0_24CHAN_2K_HASH:
+ case DF4p5_NPS2_5CHAN_2K_HASH:
+ case DF4p5_NPS1_10CHAN_2K_HASH:
+ return denorm_addr_df4p5_np2(ctx);
case DF3_6CHAN:
return denorm_addr_df3_6chan(ctx);
default:
diff --git a/drivers/ras/amd/atl/internal.h b/drivers/ras/amd/atl/internal.h
index 5de69e0bb0f9..9de5d53d0568 100644
--- a/drivers/ras/amd/atl/internal.h
+++ b/drivers/ras/amd/atl/internal.h
@@ -21,6 +21,9 @@
#include "reg_fields.h"
+#undef pr_fmt
+#define pr_fmt(fmt) "amd_atl: " fmt
+
/* Maximum possible number of Coherent Stations within a single Data Fabric. */
#define MAX_COH_ST_CHANNELS 32
@@ -34,6 +37,8 @@
#define DF_DRAM_BASE_LIMIT_LSB 28
#define MI300_DRAM_LIMIT_LSB 20
+#define INVALID_SPA ~0ULL
+
enum df_revisions {
UNKNOWN,
DF2,
@@ -90,6 +95,44 @@ enum intlv_modes {
DF4p5_NPS1_10CHAN_2K_HASH = 0x49,
};
+struct df4p5_denorm_ctx {
+ /* Indicates the number of "lost" bits. This will be 1, 2, or 3. */
+ u8 perm_shift;
+
+ /* A mask indicating the bits that need to be rehashed. */
+ u16 rehash_vector;
+
+ /*
+ * Represents the value that the high bits of the normalized address
+ * are divided by during normalization. This value will be 3 for
+ * interleave modes with a number of channels divisible by 3 or the
+ * value will be 5 for interleave modes with a number of channels
+ * divisible by 5. Power-of-two interleave modes are handled
+ * separately.
+ */
+ u8 mod_value;
+
+ /*
+ * Represents the bits that can be directly pulled from the normalized
+ * address. In each case, pass through bits [7:0] of the normalized
+ * address. The other bits depend on the interleave bit position which
+ * will be bit 10 for 1K interleave stripe cases and bit 11 for 2K
+ * interleave stripe cases.
+ */
+ u64 base_denorm_addr;
+
+ /*
+ * Represents the high bits of the physical address that have been
+ * divided by the mod_value.
+ */
+ u64 div_addr;
+
+ u64 current_spa;
+ u64 resolved_spa;
+
+ u16 coh_st_fabric_id;
+};
+
struct df_flags {
__u8 legacy_ficaa : 1,
socket_id_shift_quirk : 1,
@@ -132,6 +175,8 @@ struct df_config {
/* Number of DRAM Address maps visible in a Coherent Station. */
u8 num_coh_st_maps;
+ u32 dram_hole_base;
+
/* Global flags to handle special cases. */
struct df_flags flags;
};
@@ -224,7 +269,7 @@ int df_indirect_read_broadcast(u16 node, u8 func, u16 reg, u32 *lo);
int get_df_system_info(void);
int determine_node_id(struct addr_ctx *ctx, u8 socket_num, u8 die_num);
-int get_addr_hash_mi300(void);
+int get_umc_info_mi300(void);
int get_address_map(struct addr_ctx *ctx);
@@ -234,6 +279,9 @@ int dehash_address(struct addr_ctx *ctx);
unsigned long norm_to_sys_addr(u8 socket_id, u8 die_id, u8 coh_st_inst_id, unsigned long addr);
unsigned long convert_umc_mca_addr_to_sys_addr(struct atl_err *err);
+u64 add_base_and_hole(struct addr_ctx *ctx, u64 addr);
+u64 remove_base_and_hole(struct addr_ctx *ctx, u64 addr);
+
/*
* Make a gap in @data that is @num_bits long starting at @bit_num.
* e.g. data = 11111111'b
diff --git a/drivers/ras/amd/atl/map.c b/drivers/ras/amd/atl/map.c
index 8b908e8d7495..24a05af747d5 100644
--- a/drivers/ras/amd/atl/map.c
+++ b/drivers/ras/amd/atl/map.c
@@ -642,6 +642,99 @@ static int get_global_map_data(struct addr_ctx *ctx)
return 0;
}
+/*
+ * Verify the interleave bits are correct in the different interleaving
+ * settings.
+ *
+ * If @num_intlv_dies and/or @num_intlv_sockets are 1, it means the
+ * respective interleaving is disabled.
+ */
+static inline bool map_bits_valid(struct addr_ctx *ctx, u8 bit1, u8 bit2,
+ u8 num_intlv_dies, u8 num_intlv_sockets)
+{
+ if (!(ctx->map.intlv_bit_pos == bit1 || ctx->map.intlv_bit_pos == bit2)) {
+ pr_debug("Invalid interleave bit: %u", ctx->map.intlv_bit_pos);
+ return false;
+ }
+
+ if (ctx->map.num_intlv_dies > num_intlv_dies) {
+ pr_debug("Invalid number of interleave dies: %u", ctx->map.num_intlv_dies);
+ return false;
+ }
+
+ if (ctx->map.num_intlv_sockets > num_intlv_sockets) {
+ pr_debug("Invalid number of interleave sockets: %u", ctx->map.num_intlv_sockets);
+ return false;
+ }
+
+ return true;
+}
+
+static int validate_address_map(struct addr_ctx *ctx)
+{
+ switch (ctx->map.intlv_mode) {
+ case DF2_2CHAN_HASH:
+ case DF3_COD4_2CHAN_HASH:
+ case DF3_COD2_4CHAN_HASH:
+ case DF3_COD1_8CHAN_HASH:
+ if (!map_bits_valid(ctx, 8, 9, 1, 1))
+ goto err;
+ break;
+
+ case DF4_NPS4_2CHAN_HASH:
+ case DF4_NPS2_4CHAN_HASH:
+ case DF4_NPS1_8CHAN_HASH:
+ case DF4p5_NPS4_2CHAN_1K_HASH:
+ case DF4p5_NPS4_2CHAN_2K_HASH:
+ case DF4p5_NPS2_4CHAN_1K_HASH:
+ case DF4p5_NPS2_4CHAN_2K_HASH:
+ case DF4p5_NPS1_8CHAN_1K_HASH:
+ case DF4p5_NPS1_8CHAN_2K_HASH:
+ case DF4p5_NPS1_16CHAN_1K_HASH:
+ case DF4p5_NPS1_16CHAN_2K_HASH:
+ if (!map_bits_valid(ctx, 8, 8, 1, 2))
+ goto err;
+ break;
+
+ case DF4p5_NPS4_3CHAN_1K_HASH:
+ case DF4p5_NPS4_3CHAN_2K_HASH:
+ case DF4p5_NPS2_5CHAN_1K_HASH:
+ case DF4p5_NPS2_5CHAN_2K_HASH:
+ case DF4p5_NPS2_6CHAN_1K_HASH:
+ case DF4p5_NPS2_6CHAN_2K_HASH:
+ case DF4p5_NPS1_10CHAN_1K_HASH:
+ case DF4p5_NPS1_10CHAN_2K_HASH:
+ case DF4p5_NPS1_12CHAN_1K_HASH:
+ case DF4p5_NPS1_12CHAN_2K_HASH:
+ if (ctx->map.num_intlv_sockets != 1 || !map_bits_valid(ctx, 8, 0, 1, 1))
+ goto err;
+ break;
+
+ case DF4p5_NPS0_24CHAN_1K_HASH:
+ case DF4p5_NPS0_24CHAN_2K_HASH:
+ if (ctx->map.num_intlv_sockets < 2 || !map_bits_valid(ctx, 8, 0, 1, 2))
+ goto err;
+ break;
+
+ case MI3_HASH_8CHAN:
+ case MI3_HASH_16CHAN:
+ case MI3_HASH_32CHAN:
+ if (!map_bits_valid(ctx, 8, 8, 4, 1))
+ goto err;
+ break;
+
+ /* Nothing to do for modes that don't need special validation checks. */
+ default:
+ break;
+ }
+
+ return 0;
+
+err:
+ atl_debug(ctx, "Inconsistent address map");
+ return -EINVAL;
+}
+
static void dump_address_map(struct dram_addr_map *map)
{
u8 i;
@@ -678,5 +771,9 @@ int get_address_map(struct addr_ctx *ctx)
dump_address_map(&ctx->map);
+ ret = validate_address_map(ctx);
+ if (ret)
+ return ret;
+
return ret;
}
diff --git a/drivers/ras/amd/atl/system.c b/drivers/ras/amd/atl/system.c
index 701349e84942..e18d916d5e8b 100644
--- a/drivers/ras/amd/atl/system.c
+++ b/drivers/ras/amd/atl/system.c
@@ -127,7 +127,7 @@ static int df4_determine_df_rev(u32 reg)
if (reg == DF_FUNC0_ID_MI300) {
df_cfg.flags.heterogeneous = 1;
- if (get_addr_hash_mi300())
+ if (get_umc_info_mi300())
return -EINVAL;
}
@@ -223,6 +223,21 @@ static int determine_df_rev(void)
return -EINVAL;
}
+static int get_dram_hole_base(void)
+{
+ u8 func = 0;
+
+ if (df_cfg.rev >= DF4)
+ func = 7;
+
+ if (df_indirect_read_broadcast(0, func, 0x104, &df_cfg.dram_hole_base))
+ return -EINVAL;
+
+ df_cfg.dram_hole_base &= DF_DRAM_HOLE_BASE_MASK;
+
+ return 0;
+}
+
static void get_num_maps(void)
{
switch (df_cfg.rev) {
@@ -266,6 +281,7 @@ static void dump_df_cfg(void)
pr_debug("num_coh_st_maps=%u", df_cfg.num_coh_st_maps);
+ pr_debug("dram_hole_base=0x%x", df_cfg.dram_hole_base);
pr_debug("flags.legacy_ficaa=%u", df_cfg.flags.legacy_ficaa);
pr_debug("flags.socket_id_shift_quirk=%u", df_cfg.flags.socket_id_shift_quirk);
}
@@ -273,7 +289,7 @@ static void dump_df_cfg(void)
int get_df_system_info(void)
{
if (determine_df_rev()) {
- pr_warn("amd_atl: Failed to determine DF Revision");
+ pr_warn("Failed to determine DF Revision");
df_cfg.rev = UNKNOWN;
return -EINVAL;
}
@@ -282,6 +298,9 @@ int get_df_system_info(void)
get_num_maps();
+ if (get_dram_hole_base())
+ pr_warn("Failed to read DRAM hole base");
+
dump_df_cfg();
return 0;
diff --git a/drivers/ras/amd/atl/umc.c b/drivers/ras/amd/atl/umc.c
index 59b6169093f7..a1b4accf7b96 100644
--- a/drivers/ras/amd/atl/umc.c
+++ b/drivers/ras/amd/atl/umc.c
@@ -68,6 +68,8 @@ struct xor_bits {
};
#define NUM_BANK_BITS 4
+#define NUM_COL_BITS 5
+#define NUM_SID_BITS 2
static struct {
/* UMC::CH::AddrHashBank */
@@ -80,7 +82,22 @@ static struct {
u8 bank_xor;
} addr_hash;
+static struct {
+ u8 bank[NUM_BANK_BITS];
+ u8 col[NUM_COL_BITS];
+ u8 sid[NUM_SID_BITS];
+ u8 num_row_lo;
+ u8 num_row_hi;
+ u8 row_lo;
+ u8 row_hi;
+ u8 pc;
+} bit_shifts;
+
#define MI300_UMC_CH_BASE 0x90000
+#define MI300_ADDR_CFG (MI300_UMC_CH_BASE + 0x30)
+#define MI300_ADDR_SEL (MI300_UMC_CH_BASE + 0x40)
+#define MI300_COL_SEL_LO (MI300_UMC_CH_BASE + 0x50)
+#define MI300_ADDR_SEL_2 (MI300_UMC_CH_BASE + 0xA4)
#define MI300_ADDR_HASH_BANK0 (MI300_UMC_CH_BASE + 0xC8)
#define MI300_ADDR_HASH_PC (MI300_UMC_CH_BASE + 0xE0)
#define MI300_ADDR_HASH_PC2 (MI300_UMC_CH_BASE + 0xE4)
@@ -90,17 +107,42 @@ static struct {
#define ADDR_HASH_ROW_XOR GENMASK(31, 14)
#define ADDR_HASH_BANK_XOR GENMASK(5, 0)
+#define ADDR_CFG_NUM_ROW_LO GENMASK(11, 8)
+#define ADDR_CFG_NUM_ROW_HI GENMASK(15, 12)
+
+#define ADDR_SEL_BANK0 GENMASK(3, 0)
+#define ADDR_SEL_BANK1 GENMASK(7, 4)
+#define ADDR_SEL_BANK2 GENMASK(11, 8)
+#define ADDR_SEL_BANK3 GENMASK(15, 12)
+#define ADDR_SEL_BANK4 GENMASK(20, 16)
+#define ADDR_SEL_ROW_LO GENMASK(27, 24)
+#define ADDR_SEL_ROW_HI GENMASK(31, 28)
+
+#define COL_SEL_LO_COL0 GENMASK(3, 0)
+#define COL_SEL_LO_COL1 GENMASK(7, 4)
+#define COL_SEL_LO_COL2 GENMASK(11, 8)
+#define COL_SEL_LO_COL3 GENMASK(15, 12)
+#define COL_SEL_LO_COL4 GENMASK(19, 16)
+
+#define ADDR_SEL_2_BANK5 GENMASK(4, 0)
+#define ADDR_SEL_2_CHAN GENMASK(15, 12)
+
/*
* Read UMC::CH::AddrHash{Bank,PC,PC2} registers to get XOR bits used
- * for hashing. Do this during module init, since the values will not
- * change during run time.
+ * for hashing.
+ *
+ * Also, read UMC::CH::Addr{Cfg,Sel,Sel2} and UMC::CH:ColSelLo registers to
+ * get the values needed to reconstruct the normalized address. Apply additional
+ * offsets to the raw register values, as needed.
+ *
+ * Do this during module init, since the values will not change during run time.
*
* These registers are instantiated for each UMC across each AMD Node.
* However, they should be identically programmed due to the fixed hardware
* design of MI300 systems. So read the values from Node 0 UMC 0 and keep a
* single global structure for simplicity.
*/
-int get_addr_hash_mi300(void)
+int get_umc_info_mi300(void)
{
u32 temp;
int ret;
@@ -130,6 +172,44 @@ int get_addr_hash_mi300(void)
addr_hash.bank_xor = FIELD_GET(ADDR_HASH_BANK_XOR, temp);
+ ret = amd_smn_read(0, MI300_ADDR_CFG, &temp);
+ if (ret)
+ return ret;
+
+ bit_shifts.num_row_hi = FIELD_GET(ADDR_CFG_NUM_ROW_HI, temp);
+ bit_shifts.num_row_lo = 10 + FIELD_GET(ADDR_CFG_NUM_ROW_LO, temp);
+
+ ret = amd_smn_read(0, MI300_ADDR_SEL, &temp);
+ if (ret)
+ return ret;
+
+ bit_shifts.bank[0] = 5 + FIELD_GET(ADDR_SEL_BANK0, temp);
+ bit_shifts.bank[1] = 5 + FIELD_GET(ADDR_SEL_BANK1, temp);
+ bit_shifts.bank[2] = 5 + FIELD_GET(ADDR_SEL_BANK2, temp);
+ bit_shifts.bank[3] = 5 + FIELD_GET(ADDR_SEL_BANK3, temp);
+ /* Use BankBit4 for the SID0 position. */
+ bit_shifts.sid[0] = 5 + FIELD_GET(ADDR_SEL_BANK4, temp);
+ bit_shifts.row_lo = 12 + FIELD_GET(ADDR_SEL_ROW_LO, temp);
+ bit_shifts.row_hi = 24 + FIELD_GET(ADDR_SEL_ROW_HI, temp);
+
+ ret = amd_smn_read(0, MI300_COL_SEL_LO, &temp);
+ if (ret)
+ return ret;
+
+ bit_shifts.col[0] = 2 + FIELD_GET(COL_SEL_LO_COL0, temp);
+ bit_shifts.col[1] = 2 + FIELD_GET(COL_SEL_LO_COL1, temp);
+ bit_shifts.col[2] = 2 + FIELD_GET(COL_SEL_LO_COL2, temp);
+ bit_shifts.col[3] = 2 + FIELD_GET(COL_SEL_LO_COL3, temp);
+ bit_shifts.col[4] = 2 + FIELD_GET(COL_SEL_LO_COL4, temp);
+
+ ret = amd_smn_read(0, MI300_ADDR_SEL_2, &temp);
+ if (ret)
+ return ret;
+
+ /* Use BankBit5 for the SID1 position. */
+ bit_shifts.sid[1] = 5 + FIELD_GET(ADDR_SEL_2_BANK5, temp);
+ bit_shifts.pc = 5 + FIELD_GET(ADDR_SEL_2_CHAN, temp);
+
return 0;
}
@@ -146,9 +226,6 @@ int get_addr_hash_mi300(void)
* The MCA address format is as follows:
* MCA_ADDR[27:0] = {S[1:0], P[0], R[14:0], B[3:0], C[4:0], Z[0]}
*
- * The normalized address format is fixed in hardware and is as follows:
- * NA[30:0] = {S[1:0], R[13:0], C4, B[1:0], B[3:2], C[3:2], P, C[1:0], Z[4:0]}
- *
* Additionally, the PC and Bank bits may be hashed. This must be accounted for before
* reconstructing the normalized address.
*/
@@ -158,18 +235,10 @@ int get_addr_hash_mi300(void)
#define MI300_UMC_MCA_PC BIT(25)
#define MI300_UMC_MCA_SID GENMASK(27, 26)
-#define MI300_NA_COL_1_0 GENMASK(6, 5)
-#define MI300_NA_PC BIT(7)
-#define MI300_NA_COL_3_2 GENMASK(9, 8)
-#define MI300_NA_BANK_3_2 GENMASK(11, 10)
-#define MI300_NA_BANK_1_0 GENMASK(13, 12)
-#define MI300_NA_COL_4 BIT(14)
-#define MI300_NA_ROW GENMASK(28, 15)
-#define MI300_NA_SID GENMASK(30, 29)
-
static unsigned long convert_dram_to_norm_addr_mi300(unsigned long addr)
{
- u16 i, col, row, bank, pc, sid, temp;
+ u16 i, col, row, bank, pc, sid;
+ u32 temp;
col = FIELD_GET(MI300_UMC_MCA_COL, addr);
bank = FIELD_GET(MI300_UMC_MCA_BANK, addr);
@@ -189,49 +258,48 @@ static unsigned long convert_dram_to_norm_addr_mi300(unsigned long addr)
/* Calculate hash for PC bit. */
if (addr_hash.pc.xor_enable) {
- /* Bits SID[1:0] act as Bank[6:5] for PC hash, so apply them here. */
- bank |= sid << 5;
-
temp = bitwise_xor_bits(col & addr_hash.pc.col_xor);
temp ^= bitwise_xor_bits(row & addr_hash.pc.row_xor);
- temp ^= bitwise_xor_bits(bank & addr_hash.bank_xor);
+ /* Bits SID[1:0] act as Bank[5:4] for PC hash, so apply them here. */
+ temp ^= bitwise_xor_bits((bank | sid << NUM_BANK_BITS) & addr_hash.bank_xor);
pc ^= temp;
-
- /* Drop SID bits for the sake of debug printing later. */
- bank &= 0x1F;
}
/* Reconstruct the normalized address starting with NA[4:0] = 0 */
addr = 0;
- /* NA[6:5] = Column[1:0] */
- temp = col & 0x3;
- addr |= FIELD_PREP(MI300_NA_COL_1_0, temp);
-
- /* NA[7] = PC */
- addr |= FIELD_PREP(MI300_NA_PC, pc);
-
- /* NA[9:8] = Column[3:2] */
- temp = (col >> 2) & 0x3;
- addr |= FIELD_PREP(MI300_NA_COL_3_2, temp);
+ /* Column bits */
+ for (i = 0; i < NUM_COL_BITS; i++) {
+ temp = (col >> i) & 0x1;
+ addr |= temp << bit_shifts.col[i];
+ }
- /* NA[11:10] = Bank[3:2] */
- temp = (bank >> 2) & 0x3;
- addr |= FIELD_PREP(MI300_NA_BANK_3_2, temp);
+ /* Bank bits */
+ for (i = 0; i < NUM_BANK_BITS; i++) {
+ temp = (bank >> i) & 0x1;
+ addr |= temp << bit_shifts.bank[i];
+ }
- /* NA[13:12] = Bank[1:0] */
- temp = bank & 0x3;
- addr |= FIELD_PREP(MI300_NA_BANK_1_0, temp);
+ /* Row lo bits */
+ for (i = 0; i < bit_shifts.num_row_lo; i++) {
+ temp = (row >> i) & 0x1;
+ addr |= temp << (i + bit_shifts.row_lo);
+ }
- /* NA[14] = Column[4] */
- temp = (col >> 4) & 0x1;
- addr |= FIELD_PREP(MI300_NA_COL_4, temp);
+ /* Row hi bits */
+ for (i = 0; i < bit_shifts.num_row_hi; i++) {
+ temp = (row >> (i + bit_shifts.num_row_lo)) & 0x1;
+ addr |= temp << (i + bit_shifts.row_hi);
+ }
- /* NA[28:15] = Row[13:0] */
- addr |= FIELD_PREP(MI300_NA_ROW, row);
+ /* PC bit */
+ addr |= pc << bit_shifts.pc;
- /* NA[30:29] = SID[1:0] */
- addr |= FIELD_PREP(MI300_NA_SID, sid);
+ /* SID bits */
+ for (i = 0; i < NUM_SID_BITS; i++) {
+ temp = (sid >> i) & 0x1;
+ addr |= temp << bit_shifts.sid[i];
+ }
pr_debug("Addr=0x%016lx", addr);
pr_debug("Bank=%u Row=%u Column=%u PC=%u SID=%u", bank, row, col, pc, sid);
diff --git a/drivers/ras/amd/fmpm.c b/drivers/ras/amd/fmpm.c
index 271dfad05d68..90de737fbc90 100644
--- a/drivers/ras/amd/fmpm.c
+++ b/drivers/ras/amd/fmpm.c
@@ -56,6 +56,8 @@
#include "../debugfs.h"
+#include "atl/internal.h"
+
#define INVALID_CPU UINT_MAX
/* Validation Bits */
@@ -116,8 +118,6 @@ static struct fru_rec **fru_records;
/* system physical addresses array */
static u64 *spa_entries;
-#define INVALID_SPA ~0ULL
-
static struct dentry *fmpm_dfs_dir;
static struct dentry *fmpm_dfs_entries;
diff --git a/drivers/regulator/88pm886-regulator.c b/drivers/regulator/88pm886-regulator.c
new file mode 100644
index 000000000000..a38bd4f312b7
--- /dev/null
+++ b/drivers/regulator/88pm886-regulator.c
@@ -0,0 +1,392 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+
+#include <linux/mfd/88pm886.h>
+
+static const struct regmap_config pm886_regulator_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = PM886_REG_BUCK5_VOUT,
+};
+
+static const struct regulator_ops pm886_ldo_ops = {
+ .list_voltage = regulator_list_voltage_table,
+ .map_voltage = regulator_map_voltage_iterate,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+};
+
+static const struct regulator_ops pm886_buck_ops = {
+ .list_voltage = regulator_list_voltage_linear_range,
+ .map_voltage = regulator_map_voltage_linear_range,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+};
+
+static const unsigned int pm886_ldo_volt_table1[] = {
+ 1700000, 1800000, 1900000, 2500000, 2800000, 2900000, 3100000, 3300000,
+};
+
+static const unsigned int pm886_ldo_volt_table2[] = {
+ 1200000, 1250000, 1700000, 1800000, 1850000, 1900000, 2500000, 2600000,
+ 2700000, 2750000, 2800000, 2850000, 2900000, 3000000, 3100000, 3300000,
+};
+
+static const unsigned int pm886_ldo_volt_table3[] = {
+ 1700000, 1800000, 1900000, 2000000, 2100000, 2500000, 2700000, 2800000,
+};
+
+static const struct linear_range pm886_buck_volt_ranges1[] = {
+ REGULATOR_LINEAR_RANGE(600000, 0, 79, 12500),
+ REGULATOR_LINEAR_RANGE(1600000, 80, 84, 50000),
+};
+
+static const struct linear_range pm886_buck_volt_ranges2[] = {
+ REGULATOR_LINEAR_RANGE(600000, 0, 79, 12500),
+ REGULATOR_LINEAR_RANGE(1600000, 80, 114, 50000),
+};
+
+static struct regulator_desc pm886_regulators[] = {
+ {
+ .name = "LDO1",
+ .regulators_node = "regulators",
+ .of_match = "ldo1",
+ .ops = &pm886_ldo_ops,
+ .type = REGULATOR_VOLTAGE,
+ .enable_reg = PM886_REG_LDO_EN1,
+ .enable_mask = BIT(0),
+ .volt_table = pm886_ldo_volt_table1,
+ .n_voltages = ARRAY_SIZE(pm886_ldo_volt_table1),
+ .vsel_reg = PM886_REG_LDO1_VOUT,
+ .vsel_mask = PM886_LDO_VSEL_MASK,
+ },
+ {
+ .name = "LDO2",
+ .regulators_node = "regulators",
+ .of_match = "ldo2",
+ .ops = &pm886_ldo_ops,
+ .type = REGULATOR_VOLTAGE,
+ .enable_reg = PM886_REG_LDO_EN1,
+ .enable_mask = BIT(1),
+ .volt_table = pm886_ldo_volt_table1,
+ .n_voltages = ARRAY_SIZE(pm886_ldo_volt_table1),
+ .vsel_reg = PM886_REG_LDO2_VOUT,
+ .vsel_mask = PM886_LDO_VSEL_MASK,
+ },
+ {
+ .name = "LDO3",
+ .regulators_node = "regulators",
+ .of_match = "ldo3",
+ .ops = &pm886_ldo_ops,
+ .type = REGULATOR_VOLTAGE,
+ .enable_reg = PM886_REG_LDO_EN1,
+ .enable_mask = BIT(2),
+ .volt_table = pm886_ldo_volt_table1,
+ .n_voltages = ARRAY_SIZE(pm886_ldo_volt_table1),
+ .vsel_reg = PM886_REG_LDO3_VOUT,
+ .vsel_mask = PM886_LDO_VSEL_MASK,
+ },
+ {
+ .name = "LDO4",
+ .regulators_node = "regulators",
+ .of_match = "ldo4",
+ .ops = &pm886_ldo_ops,
+ .type = REGULATOR_VOLTAGE,
+ .enable_reg = PM886_REG_LDO_EN1,
+ .enable_mask = BIT(3),
+ .volt_table = pm886_ldo_volt_table2,
+ .n_voltages = ARRAY_SIZE(pm886_ldo_volt_table2),
+ .vsel_reg = PM886_REG_LDO4_VOUT,
+ .vsel_mask = PM886_LDO_VSEL_MASK,
+ },
+ {
+ .name = "LDO5",
+ .regulators_node = "regulators",
+ .of_match = "ldo5",
+ .ops = &pm886_ldo_ops,
+ .type = REGULATOR_VOLTAGE,
+ .enable_reg = PM886_REG_LDO_EN1,
+ .enable_mask = BIT(4),
+ .volt_table = pm886_ldo_volt_table2,
+ .n_voltages = ARRAY_SIZE(pm886_ldo_volt_table2),
+ .vsel_reg = PM886_REG_LDO5_VOUT,
+ .vsel_mask = PM886_LDO_VSEL_MASK,
+ },
+ {
+ .name = "LDO6",
+ .regulators_node = "regulators",
+ .of_match = "ldo6",
+ .ops = &pm886_ldo_ops,
+ .type = REGULATOR_VOLTAGE,
+ .enable_reg = PM886_REG_LDO_EN1,
+ .enable_mask = BIT(5),
+ .volt_table = pm886_ldo_volt_table2,
+ .n_voltages = ARRAY_SIZE(pm886_ldo_volt_table2),
+ .vsel_reg = PM886_REG_LDO6_VOUT,
+ .vsel_mask = PM886_LDO_VSEL_MASK,
+ },
+ {
+ .name = "LDO7",
+ .regulators_node = "regulators",
+ .of_match = "ldo7",
+ .ops = &pm886_ldo_ops,
+ .type = REGULATOR_VOLTAGE,
+ .enable_reg = PM886_REG_LDO_EN1,
+ .enable_mask = BIT(6),
+ .volt_table = pm886_ldo_volt_table2,
+ .n_voltages = ARRAY_SIZE(pm886_ldo_volt_table2),
+ .vsel_reg = PM886_REG_LDO7_VOUT,
+ .vsel_mask = PM886_LDO_VSEL_MASK,
+ },
+ {
+ .name = "LDO8",
+ .regulators_node = "regulators",
+ .of_match = "ldo8",
+ .ops = &pm886_ldo_ops,
+ .type = REGULATOR_VOLTAGE,
+ .enable_reg = PM886_REG_LDO_EN1,
+ .enable_mask = BIT(7),
+ .volt_table = pm886_ldo_volt_table2,
+ .n_voltages = ARRAY_SIZE(pm886_ldo_volt_table2),
+ .vsel_reg = PM886_REG_LDO8_VOUT,
+ .vsel_mask = PM886_LDO_VSEL_MASK,
+ },
+ {
+ .name = "LDO9",
+ .regulators_node = "regulators",
+ .of_match = "ldo9",
+ .ops = &pm886_ldo_ops,
+ .type = REGULATOR_VOLTAGE,
+ .enable_reg = PM886_REG_LDO_EN2,
+ .enable_mask = BIT(0),
+ .volt_table = pm886_ldo_volt_table2,
+ .n_voltages = ARRAY_SIZE(pm886_ldo_volt_table2),
+ .vsel_reg = PM886_REG_LDO9_VOUT,
+ .vsel_mask = PM886_LDO_VSEL_MASK,
+ },
+ {
+ .name = "LDO10",
+ .regulators_node = "regulators",
+ .of_match = "ldo10",
+ .ops = &pm886_ldo_ops,
+ .type = REGULATOR_VOLTAGE,
+ .enable_reg = PM886_REG_LDO_EN2,
+ .enable_mask = BIT(1),
+ .volt_table = pm886_ldo_volt_table2,
+ .n_voltages = ARRAY_SIZE(pm886_ldo_volt_table2),
+ .vsel_reg = PM886_REG_LDO10_VOUT,
+ .vsel_mask = PM886_LDO_VSEL_MASK,
+ },
+ {
+ .name = "LDO11",
+ .regulators_node = "regulators",
+ .of_match = "ldo11",
+ .ops = &pm886_ldo_ops,
+ .type = REGULATOR_VOLTAGE,
+ .enable_reg = PM886_REG_LDO_EN2,
+ .enable_mask = BIT(2),
+ .volt_table = pm886_ldo_volt_table2,
+ .n_voltages = ARRAY_SIZE(pm886_ldo_volt_table2),
+ .vsel_reg = PM886_REG_LDO11_VOUT,
+ .vsel_mask = PM886_LDO_VSEL_MASK,
+ },
+ {
+ .name = "LDO12",
+ .regulators_node = "regulators",
+ .of_match = "ldo12",
+ .ops = &pm886_ldo_ops,
+ .type = REGULATOR_VOLTAGE,
+ .enable_reg = PM886_REG_LDO_EN2,
+ .enable_mask = BIT(3),
+ .volt_table = pm886_ldo_volt_table2,
+ .n_voltages = ARRAY_SIZE(pm886_ldo_volt_table2),
+ .vsel_reg = PM886_REG_LDO12_VOUT,
+ .vsel_mask = PM886_LDO_VSEL_MASK,
+ },
+ {
+ .name = "LDO13",
+ .regulators_node = "regulators",
+ .of_match = "ldo13",
+ .ops = &pm886_ldo_ops,
+ .type = REGULATOR_VOLTAGE,
+ .enable_reg = PM886_REG_LDO_EN2,
+ .enable_mask = BIT(4),
+ .volt_table = pm886_ldo_volt_table2,
+ .n_voltages = ARRAY_SIZE(pm886_ldo_volt_table2),
+ .vsel_reg = PM886_REG_LDO13_VOUT,
+ .vsel_mask = PM886_LDO_VSEL_MASK,
+ },
+ {
+ .name = "LDO14",
+ .regulators_node = "regulators",
+ .of_match = "ldo14",
+ .ops = &pm886_ldo_ops,
+ .type = REGULATOR_VOLTAGE,
+ .enable_reg = PM886_REG_LDO_EN2,
+ .enable_mask = BIT(5),
+ .volt_table = pm886_ldo_volt_table2,
+ .n_voltages = ARRAY_SIZE(pm886_ldo_volt_table2),
+ .vsel_reg = PM886_REG_LDO14_VOUT,
+ .vsel_mask = PM886_LDO_VSEL_MASK,
+ },
+ {
+ .name = "LDO15",
+ .regulators_node = "regulators",
+ .of_match = "ldo15",
+ .ops = &pm886_ldo_ops,
+ .type = REGULATOR_VOLTAGE,
+ .enable_reg = PM886_REG_LDO_EN2,
+ .enable_mask = BIT(6),
+ .volt_table = pm886_ldo_volt_table2,
+ .n_voltages = ARRAY_SIZE(pm886_ldo_volt_table2),
+ .vsel_reg = PM886_REG_LDO15_VOUT,
+ .vsel_mask = PM886_LDO_VSEL_MASK,
+ },
+ {
+ .name = "LDO16",
+ .regulators_node = "regulators",
+ .of_match = "ldo16",
+ .ops = &pm886_ldo_ops,
+ .type = REGULATOR_VOLTAGE,
+ .enable_reg = PM886_REG_LDO_EN2,
+ .enable_mask = BIT(7),
+ .volt_table = pm886_ldo_volt_table3,
+ .n_voltages = ARRAY_SIZE(pm886_ldo_volt_table3),
+ .vsel_reg = PM886_REG_LDO16_VOUT,
+ .vsel_mask = PM886_LDO_VSEL_MASK,
+ },
+ {
+ .name = "buck1",
+ .regulators_node = "regulators",
+ .of_match = "buck1",
+ .ops = &pm886_buck_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = 85,
+ .linear_ranges = pm886_buck_volt_ranges1,
+ .n_linear_ranges = ARRAY_SIZE(pm886_buck_volt_ranges1),
+ .vsel_reg = PM886_REG_BUCK1_VOUT,
+ .vsel_mask = PM886_BUCK_VSEL_MASK,
+ .enable_reg = PM886_REG_BUCK_EN,
+ .enable_mask = BIT(0),
+ },
+ {
+ .name = "buck2",
+ .regulators_node = "regulators",
+ .of_match = "buck2",
+ .ops = &pm886_buck_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = 115,
+ .linear_ranges = pm886_buck_volt_ranges2,
+ .n_linear_ranges = ARRAY_SIZE(pm886_buck_volt_ranges2),
+ .vsel_reg = PM886_REG_BUCK2_VOUT,
+ .vsel_mask = PM886_BUCK_VSEL_MASK,
+ .enable_reg = PM886_REG_BUCK_EN,
+ .enable_mask = BIT(1),
+ },
+ {
+ .name = "buck3",
+ .regulators_node = "regulators",
+ .of_match = "buck3",
+ .ops = &pm886_buck_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = 115,
+ .linear_ranges = pm886_buck_volt_ranges2,
+ .n_linear_ranges = ARRAY_SIZE(pm886_buck_volt_ranges2),
+ .vsel_reg = PM886_REG_BUCK3_VOUT,
+ .vsel_mask = PM886_BUCK_VSEL_MASK,
+ .enable_reg = PM886_REG_BUCK_EN,
+ .enable_mask = BIT(2),
+ },
+ {
+ .name = "buck4",
+ .regulators_node = "regulators",
+ .of_match = "buck4",
+ .ops = &pm886_buck_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = 115,
+ .linear_ranges = pm886_buck_volt_ranges2,
+ .n_linear_ranges = ARRAY_SIZE(pm886_buck_volt_ranges2),
+ .vsel_reg = PM886_REG_BUCK4_VOUT,
+ .vsel_mask = PM886_BUCK_VSEL_MASK,
+ .enable_reg = PM886_REG_BUCK_EN,
+ .enable_mask = BIT(3),
+ },
+ {
+ .name = "buck5",
+ .regulators_node = "regulators",
+ .of_match = "buck5",
+ .ops = &pm886_buck_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = 115,
+ .linear_ranges = pm886_buck_volt_ranges2,
+ .n_linear_ranges = ARRAY_SIZE(pm886_buck_volt_ranges2),
+ .vsel_reg = PM886_REG_BUCK5_VOUT,
+ .vsel_mask = PM886_BUCK_VSEL_MASK,
+ .enable_reg = PM886_REG_BUCK_EN,
+ .enable_mask = BIT(4),
+ },
+};
+
+static int pm886_regulator_probe(struct platform_device *pdev)
+{
+ struct pm886_chip *chip = dev_get_drvdata(pdev->dev.parent);
+ struct regulator_config rcfg = { };
+ struct device *dev = &pdev->dev;
+ struct regulator_desc *rdesc;
+ struct regulator_dev *rdev;
+ struct i2c_client *page;
+ struct regmap *regmap;
+
+ page = devm_i2c_new_dummy_device(dev, chip->client->adapter,
+ chip->client->addr + PM886_PAGE_OFFSET_REGULATORS);
+ if (IS_ERR(page))
+ return dev_err_probe(dev, PTR_ERR(page),
+ "Failed to initialize regulators client\n");
+
+ regmap = devm_regmap_init_i2c(page, &pm886_regulator_regmap_config);
+ if (IS_ERR(regmap))
+ return dev_err_probe(dev, PTR_ERR(regmap),
+ "Failed to initialize regulators regmap\n");
+ rcfg.regmap = regmap;
+
+ rcfg.dev = dev->parent;
+
+ for (int i = 0; i < ARRAY_SIZE(pm886_regulators); i++) {
+ rdesc = &pm886_regulators[i];
+ rdev = devm_regulator_register(dev, rdesc, &rcfg);
+ if (IS_ERR(rdev))
+ return dev_err_probe(dev, PTR_ERR(rdev),
+ "Failed to register %s\n", rdesc->name);
+ }
+
+ return 0;
+}
+
+static const struct platform_device_id pm886_regulator_id_table[] = {
+ { "88pm886-regulator", },
+ { }
+};
+MODULE_DEVICE_TABLE(platform, pm886_regulator_id_table);
+
+static struct platform_driver pm886_regulator_driver = {
+ .driver = {
+ .name = "88pm886-regulator",
+ },
+ .probe = pm886_regulator_probe,
+ .id_table = pm886_regulator_id_table,
+};
+module_platform_driver(pm886_regulator_driver);
+
+MODULE_DESCRIPTION("Marvell 88PM886 PMIC regulator driver");
+MODULE_AUTHOR("Karel Balej <balejk@matfyz.cz>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index d333be2bea3b..e6a9027773fc 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -91,6 +91,12 @@ config REGULATOR_88PM8607
help
This driver supports 88PM8607 voltage regulator chips.
+config REGULATOR_88PM886
+ tristate "Marvell 88PM886 voltage regulators"
+ depends on MFD_88PM886_PMIC
+ help
+ This driver implements support for Marvell 88PM886 voltage regulators.
+
config REGULATOR_ACT8865
tristate "Active-semi act8865 voltage regulator"
depends on I2C
@@ -268,6 +274,18 @@ config REGULATOR_BD957XMUF
This driver can also be built as a module. If so, the module
will be called bd9576-regulator.
+config REGULATOR_BD96801
+ tristate "ROHM BD96801 Power Regulator"
+ depends on MFD_ROHM_BD96801
+ select REGULATOR_ROHM
+ help
+ This driver supports voltage regulators on ROHM BD96801 PMIC.
+ This will enable support for the software controllable buck
+ and LDO regulators.
+
+ This driver can also be built as a module. If so, the module
+ will be called bd96801-regulator.
+
config REGULATOR_CPCAP
tristate "Motorola CPCAP regulator"
depends on MFD_CPCAP
@@ -1027,6 +1045,13 @@ config REGULATOR_PWM
This driver supports PWM controlled voltage regulators. PWM
duty cycle can increase or decrease the voltage.
+config REGULATOR_QCOM_PM8008
+ tristate "Qualcomm PM8008 PMIC regulators"
+ depends on MFD_QCOM_PM8008
+ help
+ Select this option to enable support for the voltage regulators in
+ Qualcomm PM8008 PMICs.
+
config REGULATOR_QCOM_REFGEN
tristate "Qualcomm REFGEN regulator driver"
depends on ARCH_QCOM || COMPILE_TEST
@@ -1634,6 +1659,15 @@ config REGULATOR_UNIPHIER
help
Support for regulators implemented on Socionext UniPhier SoCs.
+config REGULATOR_RZG2L_VBCTRL
+ tristate "Renesas RZ/G2L USB VBUS regulator driver"
+ depends on ARCH_RZG2L || COMPILE_TEST
+ depends on OF
+ select REGMAP_MMIO
+ default ARCH_RZG2L
+ help
+ Support for VBUS regulators implemented on Renesas RZ/G2L SoCs.
+
config REGULATOR_VCTRL
tristate "Voltage controlled regulators"
depends on OF
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index ba15fa5f30ad..a61fa42b13c4 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_REGULATOR_USERSPACE_CONSUMER) += userspace-consumer.o
obj-$(CONFIG_REGULATOR_88PG86X) += 88pg86x.o
obj-$(CONFIG_REGULATOR_88PM800) += 88pm800-regulator.o
obj-$(CONFIG_REGULATOR_88PM8607) += 88pm8607.o
+obj-$(CONFIG_REGULATOR_88PM886) += 88pm886-regulator.o
obj-$(CONFIG_REGULATOR_CROS_EC) += cros-ec-regulator.o
obj-$(CONFIG_REGULATOR_CPCAP) += cpcap-regulator.o
obj-$(CONFIG_REGULATOR_AAT2870) += aat2870-regulator.o
@@ -37,6 +38,7 @@ obj-$(CONFIG_REGULATOR_BD718XX) += bd718x7-regulator.o
obj-$(CONFIG_REGULATOR_BD9571MWV) += bd9571mwv-regulator.o
obj-$(CONFIG_REGULATOR_BD957XMUF) += bd9576-regulator.o
obj-$(CONFIG_REGULATOR_DA903X) += da903x-regulator.o
+obj-$(CONFIG_REGULATOR_BD96801) += bd96801-regulator.o
obj-$(CONFIG_REGULATOR_DA9052) += da9052-regulator.o
obj-$(CONFIG_REGULATOR_DA9055) += da9055-regulator.o
obj-$(CONFIG_REGULATOR_DA9062) += da9062-regulator.o
@@ -112,6 +114,7 @@ obj-$(CONFIG_REGULATOR_MT6380) += mt6380-regulator.o
obj-$(CONFIG_REGULATOR_MT6397) += mt6397-regulator.o
obj-$(CONFIG_REGULATOR_MTK_DVFSRC) += mtk-dvfsrc-regulator.o
obj-$(CONFIG_REGULATOR_QCOM_LABIBB) += qcom-labibb-regulator.o
+obj-$(CONFIG_REGULATOR_QCOM_PM8008) += qcom-pm8008-regulator.o
obj-$(CONFIG_REGULATOR_QCOM_REFGEN) += qcom-refgen-regulator.o
obj-$(CONFIG_REGULATOR_QCOM_RPM) += qcom_rpm-regulator.o
obj-$(CONFIG_REGULATOR_QCOM_RPMH) += qcom-rpmh-regulator.o
@@ -189,6 +192,7 @@ obj-$(CONFIG_REGULATOR_TPS65132) += tps65132-regulator.o
obj-$(CONFIG_REGULATOR_TPS68470) += tps68470-regulator.o
obj-$(CONFIG_REGULATOR_TWL4030) += twl-regulator.o twl6030-regulator.o
obj-$(CONFIG_REGULATOR_UNIPHIER) += uniphier-regulator.o
+obj-$(CONFIG_REGULATOR_RZG2L_VBCTRL) += renesas-usb-vbus-regulator.o
obj-$(CONFIG_REGULATOR_VCTRL) += vctrl-regulator.o
obj-$(CONFIG_REGULATOR_VEXPRESS) += vexpress-regulator.o
obj-$(CONFIG_REGULATOR_VQMMC_IPQ4019) += vqmmc-ipq4019-regulator.o
diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c
index 34fcdd82b2ea..f3c447ecdc3b 100644
--- a/drivers/regulator/axp20x-regulator.c
+++ b/drivers/regulator/axp20x-regulator.c
@@ -140,7 +140,7 @@
#define AXP717_DCDC1_NUM_VOLTAGES 88
#define AXP717_DCDC2_NUM_VOLTAGES 107
-#define AXP717_DCDC3_NUM_VOLTAGES 104
+#define AXP717_DCDC3_NUM_VOLTAGES 103
#define AXP717_DCDC_V_OUT_MASK GENMASK(6, 0)
#define AXP717_LDO_V_OUT_MASK GENMASK(4, 0)
@@ -763,10 +763,15 @@ static const struct linear_range axp717_dcdc1_ranges[] = {
REGULATOR_LINEAR_RANGE(1220000, 71, 87, 20000),
};
+/*
+ * The manual says that the last voltage is 3.4V, encoded as 0b1101011 (107),
+ * but every other method proves that this is wrong, so it's really 106 that
+ * programs the final 3.4V.
+ */
static const struct linear_range axp717_dcdc2_ranges[] = {
REGULATOR_LINEAR_RANGE(500000, 0, 70, 10000),
REGULATOR_LINEAR_RANGE(1220000, 71, 87, 20000),
- REGULATOR_LINEAR_RANGE(1600000, 88, 107, 100000),
+ REGULATOR_LINEAR_RANGE(1600000, 88, 106, 100000),
};
static const struct linear_range axp717_dcdc3_ranges[] = {
@@ -790,40 +795,40 @@ static const struct regulator_desc axp717_regulators[] = {
AXP_DESC(AXP717, DCDC4, "dcdc4", "vin4", 1000, 3700, 100,
AXP717_DCDC4_CONTROL, AXP717_DCDC_V_OUT_MASK,
AXP717_DCDC_OUTPUT_CONTROL, BIT(3)),
- AXP_DESC(AXP717, ALDO1, "aldo1", "vin1", 500, 3500, 100,
+ AXP_DESC(AXP717, ALDO1, "aldo1", "aldoin", 500, 3500, 100,
AXP717_ALDO1_CONTROL, AXP717_LDO_V_OUT_MASK,
AXP717_LDO0_OUTPUT_CONTROL, BIT(0)),
- AXP_DESC(AXP717, ALDO2, "aldo2", "vin1", 500, 3500, 100,
+ AXP_DESC(AXP717, ALDO2, "aldo2", "aldoin", 500, 3500, 100,
AXP717_ALDO2_CONTROL, AXP717_LDO_V_OUT_MASK,
AXP717_LDO0_OUTPUT_CONTROL, BIT(1)),
- AXP_DESC(AXP717, ALDO3, "aldo3", "vin1", 500, 3500, 100,
+ AXP_DESC(AXP717, ALDO3, "aldo3", "aldoin", 500, 3500, 100,
AXP717_ALDO3_CONTROL, AXP717_LDO_V_OUT_MASK,
AXP717_LDO0_OUTPUT_CONTROL, BIT(2)),
- AXP_DESC(AXP717, ALDO4, "aldo4", "vin1", 500, 3500, 100,
+ AXP_DESC(AXP717, ALDO4, "aldo4", "aldoin", 500, 3500, 100,
AXP717_ALDO4_CONTROL, AXP717_LDO_V_OUT_MASK,
AXP717_LDO0_OUTPUT_CONTROL, BIT(3)),
- AXP_DESC(AXP717, BLDO1, "bldo1", "vin1", 500, 3500, 100,
+ AXP_DESC(AXP717, BLDO1, "bldo1", "bldoin", 500, 3500, 100,
AXP717_BLDO1_CONTROL, AXP717_LDO_V_OUT_MASK,
AXP717_LDO0_OUTPUT_CONTROL, BIT(4)),
- AXP_DESC(AXP717, BLDO2, "bldo2", "vin1", 500, 3500, 100,
+ AXP_DESC(AXP717, BLDO2, "bldo2", "bldoin", 500, 3500, 100,
AXP717_BLDO2_CONTROL, AXP717_LDO_V_OUT_MASK,
AXP717_LDO0_OUTPUT_CONTROL, BIT(5)),
- AXP_DESC(AXP717, BLDO3, "bldo3", "vin1", 500, 3500, 100,
+ AXP_DESC(AXP717, BLDO3, "bldo3", "bldoin", 500, 3500, 100,
AXP717_BLDO3_CONTROL, AXP717_LDO_V_OUT_MASK,
AXP717_LDO0_OUTPUT_CONTROL, BIT(6)),
- AXP_DESC(AXP717, BLDO4, "bldo4", "vin1", 500, 3500, 100,
+ AXP_DESC(AXP717, BLDO4, "bldo4", "bldoin", 500, 3500, 100,
AXP717_BLDO4_CONTROL, AXP717_LDO_V_OUT_MASK,
AXP717_LDO0_OUTPUT_CONTROL, BIT(7)),
- AXP_DESC(AXP717, CLDO1, "cldo1", "vin1", 500, 3500, 100,
+ AXP_DESC(AXP717, CLDO1, "cldo1", "cldoin", 500, 3500, 100,
AXP717_CLDO1_CONTROL, AXP717_LDO_V_OUT_MASK,
AXP717_LDO1_OUTPUT_CONTROL, BIT(0)),
- AXP_DESC(AXP717, CLDO2, "cldo2", "vin1", 500, 3500, 100,
+ AXP_DESC(AXP717, CLDO2, "cldo2", "cldoin", 500, 3500, 100,
AXP717_CLDO2_CONTROL, AXP717_LDO_V_OUT_MASK,
AXP717_LDO1_OUTPUT_CONTROL, BIT(1)),
- AXP_DESC(AXP717, CLDO3, "cldo3", "vin1", 500, 3500, 100,
+ AXP_DESC(AXP717, CLDO3, "cldo3", "cldoin", 500, 3500, 100,
AXP717_CLDO3_CONTROL, AXP717_LDO_V_OUT_MASK,
AXP717_LDO1_OUTPUT_CONTROL, BIT(2)),
- AXP_DESC(AXP717, CLDO4, "cldo4", "vin1", 500, 3500, 100,
+ AXP_DESC(AXP717, CLDO4, "cldo4", "cldoin", 500, 3500, 100,
AXP717_CLDO4_CONTROL, AXP717_LDO_V_OUT_MASK,
AXP717_LDO1_OUTPUT_CONTROL, BIT(3)),
AXP_DESC(AXP717, CPUSLDO, "cpusldo", "vin1", 500, 1400, 50,
diff --git a/drivers/regulator/bd71815-regulator.c b/drivers/regulator/bd71815-regulator.c
index 26192d55a685..79fbb45297f6 100644
--- a/drivers/regulator/bd71815-regulator.c
+++ b/drivers/regulator/bd71815-regulator.c
@@ -256,7 +256,7 @@ static int buck12_set_hw_dvs_levels(struct device_node *np,
* 10: 2.50mV/usec 10mV 4uS
* 11: 1.25mV/usec 10mV 8uS
*/
-static const unsigned int bd7181x_ramp_table[] = { 1250, 2500, 5000, 10000 };
+static const unsigned int bd7181x_ramp_table[] = { 10000, 5000, 2500, 1250 };
static int bd7181x_led_set_current_limit(struct regulator_dev *rdev,
int min_uA, int max_uA)
diff --git a/drivers/regulator/bd96801-regulator.c b/drivers/regulator/bd96801-regulator.c
new file mode 100644
index 000000000000..46ca81f18703
--- /dev/null
+++ b/drivers/regulator/bd96801-regulator.c
@@ -0,0 +1,908 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2024 ROHM Semiconductors
+// bd96801-regulator.c ROHM BD96801 regulator driver
+
+/*
+ * This version of the "BD86801 scalable PMIC"'s driver supports only very
+ * basic set of the PMIC features. Most notably, there is no support for
+ * the ERRB interrupt and the configurations which should be done when the
+ * PMIC is in STBY mode.
+ *
+ * Supporting the ERRB interrupt would require dropping the regmap-IRQ
+ * usage or working around (or accepting a presense of) a naming conflict
+ * in debugFS IRQs.
+ *
+ * Being able to reliably do the configurations like changing the
+ * regulator safety limits (like limits for the over/under -voltages, over
+ * current, thermal protection) would require the configuring driver to be
+ * synchronized with entity causing the PMIC state transitions. Eg, one
+ * should be able to ensure the PMIC is in STBY state when the
+ * configurations are applied to the hardware. How and when the PMIC state
+ * transitions are to be done is likely to be very system specific, as will
+ * be the need to configure these safety limits. Hence it's not simple to
+ * come up with a generic solution.
+ *
+ * Users who require the ERRB handling and STBY state configurations can
+ * have a look at the original RFC:
+ * https://lore.kernel.org/all/cover.1712920132.git.mazziesaccount@gmail.com/
+ * which implements a workaround to debugFS naming conflict and some of
+ * the safety limit configurations - but leaves the state change handling
+ * and synchronization to be implemented.
+ *
+ * It would be great to hear (and receive a patch!) if you implement the
+ * STBY configuration support or a proper fix to the debugFS naming
+ * conflict in your downstream driver ;)
+ */
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/linear_range.h>
+#include <linux/mfd/rohm-generic.h>
+#include <linux/mfd/rohm-bd96801.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/coupler.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/slab.h>
+#include <linux/timer.h>
+
+enum {
+ BD96801_BUCK1,
+ BD96801_BUCK2,
+ BD96801_BUCK3,
+ BD96801_BUCK4,
+ BD96801_LDO5,
+ BD96801_LDO6,
+ BD96801_LDO7,
+ BD96801_REGULATOR_AMOUNT,
+};
+
+enum {
+ BD96801_PROT_OVP,
+ BD96801_PROT_UVP,
+ BD96801_PROT_OCP,
+ BD96801_PROT_TEMP,
+ BD96801_NUM_PROT,
+};
+
+#define BD96801_ALWAYS_ON_REG 0x3c
+#define BD96801_REG_ENABLE 0x0b
+#define BD96801_BUCK1_EN_MASK BIT(0)
+#define BD96801_BUCK2_EN_MASK BIT(1)
+#define BD96801_BUCK3_EN_MASK BIT(2)
+#define BD96801_BUCK4_EN_MASK BIT(3)
+#define BD96801_LDO5_EN_MASK BIT(4)
+#define BD96801_LDO6_EN_MASK BIT(5)
+#define BD96801_LDO7_EN_MASK BIT(6)
+
+#define BD96801_BUCK1_VSEL_REG 0x28
+#define BD96801_BUCK2_VSEL_REG 0x29
+#define BD96801_BUCK3_VSEL_REG 0x2a
+#define BD96801_BUCK4_VSEL_REG 0x2b
+#define BD96801_LDO5_VSEL_REG 0x25
+#define BD96801_LDO6_VSEL_REG 0x26
+#define BD96801_LDO7_VSEL_REG 0x27
+#define BD96801_BUCK_VSEL_MASK 0x1F
+#define BD96801_LDO_VSEL_MASK 0xff
+
+#define BD96801_MASK_RAMP_DELAY 0xc0
+#define BD96801_INT_VOUT_BASE_REG 0x21
+#define BD96801_BUCK_INT_VOUT_MASK 0xff
+
+#define BD96801_BUCK_VOLTS 256
+#define BD96801_LDO_VOLTS 256
+
+#define BD96801_OVP_MASK 0x03
+#define BD96801_MASK_BUCK1_OVP_SHIFT 0x00
+#define BD96801_MASK_BUCK2_OVP_SHIFT 0x02
+#define BD96801_MASK_BUCK3_OVP_SHIFT 0x04
+#define BD96801_MASK_BUCK4_OVP_SHIFT 0x06
+#define BD96801_MASK_LDO5_OVP_SHIFT 0x00
+#define BD96801_MASK_LDO6_OVP_SHIFT 0x02
+#define BD96801_MASK_LDO7_OVP_SHIFT 0x04
+
+#define BD96801_PROT_LIMIT_OCP_MIN 0x00
+#define BD96801_PROT_LIMIT_LOW 0x01
+#define BD96801_PROT_LIMIT_MID 0x02
+#define BD96801_PROT_LIMIT_HI 0x03
+
+#define BD96801_REG_BUCK1_OCP 0x32
+#define BD96801_REG_BUCK2_OCP 0x32
+#define BD96801_REG_BUCK3_OCP 0x33
+#define BD96801_REG_BUCK4_OCP 0x33
+
+#define BD96801_MASK_BUCK1_OCP_SHIFT 0x00
+#define BD96801_MASK_BUCK2_OCP_SHIFT 0x04
+#define BD96801_MASK_BUCK3_OCP_SHIFT 0x00
+#define BD96801_MASK_BUCK4_OCP_SHIFT 0x04
+
+#define BD96801_REG_LDO5_OCP 0x34
+#define BD96801_REG_LDO6_OCP 0x34
+#define BD96801_REG_LDO7_OCP 0x34
+
+#define BD96801_MASK_LDO5_OCP_SHIFT 0x00
+#define BD96801_MASK_LDO6_OCP_SHIFT 0x02
+#define BD96801_MASK_LDO7_OCP_SHIFT 0x04
+
+#define BD96801_MASK_SHD_INTB BIT(7)
+#define BD96801_INTB_FATAL BIT(7)
+
+#define BD96801_NUM_REGULATORS 7
+#define BD96801_NUM_LDOS 4
+
+/*
+ * Ramp rates for bucks are controlled by bits [7:6] as follows:
+ * 00 => 1 mV/uS
+ * 01 => 5 mV/uS
+ * 10 => 10 mV/uS
+ * 11 => 20 mV/uS
+ */
+static const unsigned int buck_ramp_table[] = { 1000, 5000, 10000, 20000 };
+
+/*
+ * This is a voltage range that get's appended to selected
+ * bd96801_buck_init_volts value. The range from 0x0 to 0xF is actually
+ * bd96801_buck_init_volts + 0 ... bd96801_buck_init_volts + 150mV
+ * and the range from 0x10 to 0x1f is bd96801_buck_init_volts - 150mV ...
+ * bd96801_buck_init_volts - 0. But as the members of linear_range
+ * are all unsigned I will apply offset of -150 mV to value in
+ * linear_range - which should increase these ranges with
+ * 150 mV getting all the values to >= 0.
+ */
+static const struct linear_range bd96801_tune_volts[] = {
+ REGULATOR_LINEAR_RANGE(150000, 0x00, 0xF, 10000),
+ REGULATOR_LINEAR_RANGE(0, 0x10, 0x1F, 10000),
+};
+
+static const struct linear_range bd96801_buck_init_volts[] = {
+ REGULATOR_LINEAR_RANGE(500000 - 150000, 0x00, 0xc8, 5000),
+ REGULATOR_LINEAR_RANGE(1550000 - 150000, 0xc9, 0xec, 50000),
+ REGULATOR_LINEAR_RANGE(3300000 - 150000, 0xed, 0xff, 0),
+};
+
+static const struct linear_range bd96801_ldo_int_volts[] = {
+ REGULATOR_LINEAR_RANGE(300000, 0x00, 0x78, 25000),
+ REGULATOR_LINEAR_RANGE(3300000, 0x79, 0xff, 0),
+};
+
+#define BD96801_LDO_SD_VOLT_MASK 0x1
+#define BD96801_LDO_MODE_MASK 0x6
+#define BD96801_LDO_MODE_INT 0x0
+#define BD96801_LDO_MODE_SD 0x2
+#define BD96801_LDO_MODE_DDR 0x4
+
+static int ldo_ddr_volt_table[] = {500000, 300000};
+static int ldo_sd_volt_table[] = {3300000, 1800000};
+
+/* Constant IRQ initialization data (templates) */
+struct bd96801_irqinfo {
+ int type;
+ struct regulator_irq_desc irq_desc;
+ int err_cfg;
+ int wrn_cfg;
+ const char *irq_name;
+};
+
+#define BD96801_IRQINFO(_type, _name, _irqoff_ms, _irqname) \
+{ \
+ .type = (_type), \
+ .err_cfg = -1, \
+ .wrn_cfg = -1, \
+ .irq_name = (_irqname), \
+ .irq_desc = { \
+ .name = (_name), \
+ .irq_off_ms = (_irqoff_ms), \
+ .map_event = regulator_irq_map_event_simple, \
+ }, \
+}
+
+static const struct bd96801_irqinfo buck1_irqinfo[] = {
+ BD96801_IRQINFO(BD96801_PROT_OCP, "buck1-over-curr-h", 500,
+ "bd96801-buck1-overcurr-h"),
+ BD96801_IRQINFO(BD96801_PROT_OCP, "buck1-over-curr-l", 500,
+ "bd96801-buck1-overcurr-l"),
+ BD96801_IRQINFO(BD96801_PROT_OCP, "buck1-over-curr-n", 500,
+ "bd96801-buck1-overcurr-n"),
+ BD96801_IRQINFO(BD96801_PROT_OVP, "buck1-over-voltage", 500,
+ "bd96801-buck1-overvolt"),
+ BD96801_IRQINFO(BD96801_PROT_UVP, "buck1-under-voltage", 500,
+ "bd96801-buck1-undervolt"),
+ BD96801_IRQINFO(BD96801_PROT_TEMP, "buck1-over-temp", 500,
+ "bd96801-buck1-thermal")
+};
+
+static const struct bd96801_irqinfo buck2_irqinfo[] = {
+ BD96801_IRQINFO(BD96801_PROT_OCP, "buck2-over-curr-h", 500,
+ "bd96801-buck2-overcurr-h"),
+ BD96801_IRQINFO(BD96801_PROT_OCP, "buck2-over-curr-l", 500,
+ "bd96801-buck2-overcurr-l"),
+ BD96801_IRQINFO(BD96801_PROT_OCP, "buck2-over-curr-n", 500,
+ "bd96801-buck2-overcurr-n"),
+ BD96801_IRQINFO(BD96801_PROT_OVP, "buck2-over-voltage", 500,
+ "bd96801-buck2-overvolt"),
+ BD96801_IRQINFO(BD96801_PROT_UVP, "buck2-under-voltage", 500,
+ "bd96801-buck2-undervolt"),
+ BD96801_IRQINFO(BD96801_PROT_TEMP, "buck2-over-temp", 500,
+ "bd96801-buck2-thermal")
+};
+
+static const struct bd96801_irqinfo buck3_irqinfo[] = {
+ BD96801_IRQINFO(BD96801_PROT_OCP, "buck3-over-curr-h", 500,
+ "bd96801-buck3-overcurr-h"),
+ BD96801_IRQINFO(BD96801_PROT_OCP, "buck3-over-curr-l", 500,
+ "bd96801-buck3-overcurr-l"),
+ BD96801_IRQINFO(BD96801_PROT_OCP, "buck3-over-curr-n", 500,
+ "bd96801-buck3-overcurr-n"),
+ BD96801_IRQINFO(BD96801_PROT_OVP, "buck3-over-voltage", 500,
+ "bd96801-buck3-overvolt"),
+ BD96801_IRQINFO(BD96801_PROT_UVP, "buck3-under-voltage", 500,
+ "bd96801-buck3-undervolt"),
+ BD96801_IRQINFO(BD96801_PROT_TEMP, "buck3-over-temp", 500,
+ "bd96801-buck3-thermal")
+};
+
+static const struct bd96801_irqinfo buck4_irqinfo[] = {
+ BD96801_IRQINFO(BD96801_PROT_OCP, "buck4-over-curr-h", 500,
+ "bd96801-buck4-overcurr-h"),
+ BD96801_IRQINFO(BD96801_PROT_OCP, "buck4-over-curr-l", 500,
+ "bd96801-buck4-overcurr-l"),
+ BD96801_IRQINFO(BD96801_PROT_OCP, "buck4-over-curr-n", 500,
+ "bd96801-buck4-overcurr-n"),
+ BD96801_IRQINFO(BD96801_PROT_OVP, "buck4-over-voltage", 500,
+ "bd96801-buck4-overvolt"),
+ BD96801_IRQINFO(BD96801_PROT_UVP, "buck4-under-voltage", 500,
+ "bd96801-buck4-undervolt"),
+ BD96801_IRQINFO(BD96801_PROT_TEMP, "buck4-over-temp", 500,
+ "bd96801-buck4-thermal")
+};
+
+static const struct bd96801_irqinfo ldo5_irqinfo[] = {
+ BD96801_IRQINFO(BD96801_PROT_OCP, "ldo5-overcurr", 500,
+ "bd96801-ldo5-overcurr"),
+ BD96801_IRQINFO(BD96801_PROT_OVP, "ldo5-over-voltage", 500,
+ "bd96801-ldo5-overvolt"),
+ BD96801_IRQINFO(BD96801_PROT_UVP, "ldo5-under-voltage", 500,
+ "bd96801-ldo5-undervolt"),
+};
+
+static const struct bd96801_irqinfo ldo6_irqinfo[] = {
+ BD96801_IRQINFO(BD96801_PROT_OCP, "ldo6-overcurr", 500,
+ "bd96801-ldo6-overcurr"),
+ BD96801_IRQINFO(BD96801_PROT_OVP, "ldo6-over-voltage", 500,
+ "bd96801-ldo6-overvolt"),
+ BD96801_IRQINFO(BD96801_PROT_UVP, "ldo6-under-voltage", 500,
+ "bd96801-ldo6-undervolt"),
+};
+
+static const struct bd96801_irqinfo ldo7_irqinfo[] = {
+ BD96801_IRQINFO(BD96801_PROT_OCP, "ldo7-overcurr", 500,
+ "bd96801-ldo7-overcurr"),
+ BD96801_IRQINFO(BD96801_PROT_OVP, "ldo7-over-voltage", 500,
+ "bd96801-ldo7-overvolt"),
+ BD96801_IRQINFO(BD96801_PROT_UVP, "ldo7-under-voltage", 500,
+ "bd96801-ldo7-undervolt"),
+};
+
+struct bd96801_irq_desc {
+ struct bd96801_irqinfo *irqinfo;
+ int num_irqs;
+};
+
+struct bd96801_regulator_data {
+ struct regulator_desc desc;
+ const struct linear_range *init_ranges;
+ int num_ranges;
+ struct bd96801_irq_desc irq_desc;
+ int initial_voltage;
+ int ldo_vol_lvl;
+ int ldo_errs;
+};
+
+struct bd96801_pmic_data {
+ struct bd96801_regulator_data regulator_data[BD96801_NUM_REGULATORS];
+ struct regmap *regmap;
+ int fatal_ind;
+};
+
+static int ldo_map_notif(int irq, struct regulator_irq_data *rid,
+ unsigned long *dev_mask)
+{
+ int i;
+
+ for (i = 0; i < rid->num_states; i++) {
+ struct bd96801_regulator_data *rdata;
+ struct regulator_dev *rdev;
+
+ rdev = rid->states[i].rdev;
+ rdata = container_of(rdev->desc, struct bd96801_regulator_data,
+ desc);
+ rid->states[i].notifs = regulator_err2notif(rdata->ldo_errs);
+ rid->states[i].errors = rdata->ldo_errs;
+ *dev_mask |= BIT(i);
+ }
+ return 0;
+}
+
+static int bd96801_list_voltage_lr(struct regulator_dev *rdev,
+ unsigned int selector)
+{
+ int voltage;
+ struct bd96801_regulator_data *data;
+
+ data = container_of(rdev->desc, struct bd96801_regulator_data, desc);
+
+ /*
+ * The BD096801 has voltage setting in two registers. One giving the
+ * "initial voltage" (can be changed only when regulator is disabled.
+ * This driver caches the value and sets it only at startup. The other
+ * register is voltage tuning value which applies -150 mV ... +150 mV
+ * offset to the voltage.
+ *
+ * Note that the cached initial voltage stored in regulator data is
+ * 'scaled down' by the 150 mV so that all of our tuning values are
+ * >= 0. This is done because the linear_ranges uses unsigned values.
+ *
+ * As a result, we increase the tuning voltage which we get based on
+ * the selector by the stored initial_voltage.
+ */
+ voltage = regulator_list_voltage_linear_range(rdev, selector);
+ if (voltage < 0)
+ return voltage;
+
+ return voltage + data->initial_voltage;
+}
+
+
+static const struct regulator_ops bd96801_ldo_table_ops = {
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_table,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+};
+
+static const struct regulator_ops bd96801_buck_ops = {
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = bd96801_list_voltage_lr,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+ .set_ramp_delay = regulator_set_ramp_delay_regmap,
+};
+
+static const struct regulator_ops bd96801_ldo_ops = {
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+};
+
+static int buck_get_initial_voltage(struct regmap *regmap, struct device *dev,
+ struct bd96801_regulator_data *data)
+{
+ int ret = 0, sel, initial_uv;
+ int reg = BD96801_INT_VOUT_BASE_REG + data->desc.id;
+
+ if (data->num_ranges) {
+ ret = regmap_read(regmap, reg, &sel);
+ sel &= BD96801_BUCK_INT_VOUT_MASK;
+
+ ret = linear_range_get_value_array(data->init_ranges,
+ data->num_ranges, sel,
+ &initial_uv);
+ if (ret)
+ return ret;
+
+ data->initial_voltage = initial_uv;
+ dev_dbg(dev, "Tune-scaled initial voltage %u\n",
+ data->initial_voltage);
+ }
+
+ return 0;
+}
+
+static int get_ldo_initial_voltage(struct regmap *regmap,
+ struct device *dev,
+ struct bd96801_regulator_data *data)
+{
+ int ret;
+ int cfgreg;
+
+ ret = regmap_read(regmap, data->ldo_vol_lvl, &cfgreg);
+ if (ret)
+ return ret;
+
+ switch (cfgreg & BD96801_LDO_MODE_MASK) {
+ case BD96801_LDO_MODE_DDR:
+ data->desc.volt_table = ldo_ddr_volt_table;
+ data->desc.n_voltages = ARRAY_SIZE(ldo_ddr_volt_table);
+ break;
+ case BD96801_LDO_MODE_SD:
+ data->desc.volt_table = ldo_sd_volt_table;
+ data->desc.n_voltages = ARRAY_SIZE(ldo_sd_volt_table);
+ break;
+ default:
+ dev_info(dev, "Leaving LDO to normal mode");
+ return 0;
+ }
+
+ /* SD or DDR mode => override default ops */
+ data->desc.ops = &bd96801_ldo_table_ops,
+ data->desc.vsel_mask = 1;
+ data->desc.vsel_reg = data->ldo_vol_lvl;
+
+ return 0;
+}
+
+static int get_initial_voltage(struct device *dev, struct regmap *regmap,
+ struct bd96801_regulator_data *data)
+{
+ /* BUCK */
+ if (data->desc.id <= BD96801_BUCK4)
+ return buck_get_initial_voltage(regmap, dev, data);
+
+ /* LDO */
+ return get_ldo_initial_voltage(regmap, dev, data);
+}
+
+static int bd96801_walk_regulator_dt(struct device *dev, struct regmap *regmap,
+ struct bd96801_regulator_data *data,
+ int num)
+{
+ int i, ret;
+ struct device_node *np;
+ struct device_node *nproot = dev->parent->of_node;
+
+ nproot = of_get_child_by_name(nproot, "regulators");
+ if (!nproot) {
+ dev_err(dev, "failed to find regulators node\n");
+ return -ENODEV;
+ }
+ for_each_child_of_node(nproot, np)
+ for (i = 0; i < num; i++) {
+ if (!of_node_name_eq(np, data[i].desc.of_match))
+ continue;
+ /*
+ * If STBY configs are supported, we must pass node
+ * here to extract the initial voltages from the DT.
+ * Thus we do the initial voltage getting in this
+ * loop.
+ */
+ ret = get_initial_voltage(dev, regmap, &data[i]);
+ if (ret) {
+ dev_err(dev,
+ "Initializing voltages for %s failed\n",
+ data[i].desc.name);
+ of_node_put(np);
+ of_node_put(nproot);
+
+ return ret;
+ }
+ if (of_property_read_bool(np, "rohm,keep-on-stby")) {
+ ret = regmap_set_bits(regmap,
+ BD96801_ALWAYS_ON_REG,
+ 1 << data[i].desc.id);
+ if (ret) {
+ dev_err(dev,
+ "failed to set %s on-at-stby\n",
+ data[i].desc.name);
+ of_node_put(np);
+ of_node_put(nproot);
+
+ return ret;
+ }
+ }
+ }
+ of_node_put(nproot);
+
+ return 0;
+}
+
+/*
+ * Template for regulator data. Probe will allocate dynamic / driver instance
+ * struct so we should be on a safe side even if there were multiple PMICs to
+ * control. Note that there is a plan to allow multiple PMICs to be used so
+ * systems can scale better. I am however still slightly unsure how the
+ * multi-PMIC case will be handled. I don't know if the processor will have I2C
+ * acces to all of the PMICs or only the first one. I'd guess there will be
+ * access provided to all PMICs for voltage scaling - but the errors will only
+ * be informed via the master PMIC. Eg, we should prepare to support multiple
+ * driver instances - either with or without the IRQs... Well, let's first
+ * just support the simple and clear single-PMIC setup and ponder the multi PMIC
+ * case later. What we can easly do for preparing is to not use static global
+ * data for regulators though.
+ */
+static const struct bd96801_pmic_data bd96801_data = {
+ .regulator_data = {
+ {
+ .desc = {
+ .name = "buck1",
+ .of_match = of_match_ptr("buck1"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD96801_BUCK1,
+ .ops = &bd96801_buck_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd96801_tune_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd96801_tune_volts),
+ .n_voltages = BD96801_BUCK_VOLTS,
+ .enable_reg = BD96801_REG_ENABLE,
+ .enable_mask = BD96801_BUCK1_EN_MASK,
+ .enable_is_inverted = true,
+ .vsel_reg = BD96801_BUCK1_VSEL_REG,
+ .vsel_mask = BD96801_BUCK_VSEL_MASK,
+ .ramp_reg = BD96801_BUCK1_VSEL_REG,
+ .ramp_mask = BD96801_MASK_RAMP_DELAY,
+ .ramp_delay_table = &buck_ramp_table[0],
+ .n_ramp_values = ARRAY_SIZE(buck_ramp_table),
+ .owner = THIS_MODULE,
+ },
+ .init_ranges = bd96801_buck_init_volts,
+ .num_ranges = ARRAY_SIZE(bd96801_buck_init_volts),
+ .irq_desc = {
+ .irqinfo = (struct bd96801_irqinfo *)&buck1_irqinfo[0],
+ .num_irqs = ARRAY_SIZE(buck1_irqinfo),
+ },
+ }, {
+ .desc = {
+ .name = "buck2",
+ .of_match = of_match_ptr("buck2"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD96801_BUCK2,
+ .ops = &bd96801_buck_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd96801_tune_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd96801_tune_volts),
+ .n_voltages = BD96801_BUCK_VOLTS,
+ .enable_reg = BD96801_REG_ENABLE,
+ .enable_mask = BD96801_BUCK2_EN_MASK,
+ .enable_is_inverted = true,
+ .vsel_reg = BD96801_BUCK2_VSEL_REG,
+ .vsel_mask = BD96801_BUCK_VSEL_MASK,
+ .ramp_reg = BD96801_BUCK2_VSEL_REG,
+ .ramp_mask = BD96801_MASK_RAMP_DELAY,
+ .ramp_delay_table = &buck_ramp_table[0],
+ .n_ramp_values = ARRAY_SIZE(buck_ramp_table),
+ .owner = THIS_MODULE,
+ },
+ .irq_desc = {
+ .irqinfo = (struct bd96801_irqinfo *)&buck2_irqinfo[0],
+ .num_irqs = ARRAY_SIZE(buck2_irqinfo),
+ },
+ .init_ranges = bd96801_buck_init_volts,
+ .num_ranges = ARRAY_SIZE(bd96801_buck_init_volts),
+ }, {
+ .desc = {
+ .name = "buck3",
+ .of_match = of_match_ptr("buck3"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD96801_BUCK3,
+ .ops = &bd96801_buck_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd96801_tune_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd96801_tune_volts),
+ .n_voltages = BD96801_BUCK_VOLTS,
+ .enable_reg = BD96801_REG_ENABLE,
+ .enable_mask = BD96801_BUCK3_EN_MASK,
+ .enable_is_inverted = true,
+ .vsel_reg = BD96801_BUCK3_VSEL_REG,
+ .vsel_mask = BD96801_BUCK_VSEL_MASK,
+ .ramp_reg = BD96801_BUCK3_VSEL_REG,
+ .ramp_mask = BD96801_MASK_RAMP_DELAY,
+ .ramp_delay_table = &buck_ramp_table[0],
+ .n_ramp_values = ARRAY_SIZE(buck_ramp_table),
+ .owner = THIS_MODULE,
+ },
+ .irq_desc = {
+ .irqinfo = (struct bd96801_irqinfo *)&buck3_irqinfo[0],
+ .num_irqs = ARRAY_SIZE(buck3_irqinfo),
+ },
+ .init_ranges = bd96801_buck_init_volts,
+ .num_ranges = ARRAY_SIZE(bd96801_buck_init_volts),
+ }, {
+ .desc = {
+ .name = "buck4",
+ .of_match = of_match_ptr("buck4"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD96801_BUCK4,
+ .ops = &bd96801_buck_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd96801_tune_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd96801_tune_volts),
+ .n_voltages = BD96801_BUCK_VOLTS,
+ .enable_reg = BD96801_REG_ENABLE,
+ .enable_mask = BD96801_BUCK4_EN_MASK,
+ .enable_is_inverted = true,
+ .vsel_reg = BD96801_BUCK4_VSEL_REG,
+ .vsel_mask = BD96801_BUCK_VSEL_MASK,
+ .ramp_reg = BD96801_BUCK4_VSEL_REG,
+ .ramp_mask = BD96801_MASK_RAMP_DELAY,
+ .ramp_delay_table = &buck_ramp_table[0],
+ .n_ramp_values = ARRAY_SIZE(buck_ramp_table),
+ .owner = THIS_MODULE,
+ },
+ .irq_desc = {
+ .irqinfo = (struct bd96801_irqinfo *)&buck4_irqinfo[0],
+ .num_irqs = ARRAY_SIZE(buck4_irqinfo),
+ },
+ .init_ranges = bd96801_buck_init_volts,
+ .num_ranges = ARRAY_SIZE(bd96801_buck_init_volts),
+ }, {
+ .desc = {
+ .name = "ldo5",
+ .of_match = of_match_ptr("ldo5"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD96801_LDO5,
+ .ops = &bd96801_ldo_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd96801_ldo_int_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd96801_ldo_int_volts),
+ .n_voltages = BD96801_LDO_VOLTS,
+ .enable_reg = BD96801_REG_ENABLE,
+ .enable_mask = BD96801_LDO5_EN_MASK,
+ .enable_is_inverted = true,
+ .vsel_reg = BD96801_LDO5_VSEL_REG,
+ .vsel_mask = BD96801_LDO_VSEL_MASK,
+ .owner = THIS_MODULE,
+ },
+ .irq_desc = {
+ .irqinfo = (struct bd96801_irqinfo *)&ldo5_irqinfo[0],
+ .num_irqs = ARRAY_SIZE(ldo5_irqinfo),
+ },
+ .ldo_vol_lvl = BD96801_LDO5_VOL_LVL_REG,
+ }, {
+ .desc = {
+ .name = "ldo6",
+ .of_match = of_match_ptr("ldo6"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD96801_LDO6,
+ .ops = &bd96801_ldo_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd96801_ldo_int_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd96801_ldo_int_volts),
+ .n_voltages = BD96801_LDO_VOLTS,
+ .enable_reg = BD96801_REG_ENABLE,
+ .enable_mask = BD96801_LDO6_EN_MASK,
+ .enable_is_inverted = true,
+ .vsel_reg = BD96801_LDO6_VSEL_REG,
+ .vsel_mask = BD96801_LDO_VSEL_MASK,
+ .owner = THIS_MODULE,
+ },
+ .irq_desc = {
+ .irqinfo = (struct bd96801_irqinfo *)&ldo6_irqinfo[0],
+ .num_irqs = ARRAY_SIZE(ldo6_irqinfo),
+ },
+ .ldo_vol_lvl = BD96801_LDO6_VOL_LVL_REG,
+ }, {
+ .desc = {
+ .name = "ldo7",
+ .of_match = of_match_ptr("ldo7"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD96801_LDO7,
+ .ops = &bd96801_ldo_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd96801_ldo_int_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd96801_ldo_int_volts),
+ .n_voltages = BD96801_LDO_VOLTS,
+ .enable_reg = BD96801_REG_ENABLE,
+ .enable_mask = BD96801_LDO7_EN_MASK,
+ .enable_is_inverted = true,
+ .vsel_reg = BD96801_LDO7_VSEL_REG,
+ .vsel_mask = BD96801_LDO_VSEL_MASK,
+ .owner = THIS_MODULE,
+ },
+ .irq_desc = {
+ .irqinfo = (struct bd96801_irqinfo *)&ldo7_irqinfo[0],
+ .num_irqs = ARRAY_SIZE(ldo7_irqinfo),
+ },
+ .ldo_vol_lvl = BD96801_LDO7_VOL_LVL_REG,
+ },
+ },
+};
+
+static int initialize_pmic_data(struct device *dev,
+ struct bd96801_pmic_data *pdata)
+{
+ int r, i;
+
+ /*
+ * Allocate and initialize IRQ data for all of the regulators. We
+ * wish to modify IRQ information independently for each driver
+ * instance.
+ */
+ for (r = 0; r < BD96801_NUM_REGULATORS; r++) {
+ const struct bd96801_irqinfo *template;
+ struct bd96801_irqinfo *new;
+ int num_infos;
+
+ template = pdata->regulator_data[r].irq_desc.irqinfo;
+ num_infos = pdata->regulator_data[r].irq_desc.num_irqs;
+
+ new = devm_kcalloc(dev, num_infos, sizeof(*new), GFP_KERNEL);
+ if (!new)
+ return -ENOMEM;
+
+ pdata->regulator_data[r].irq_desc.irqinfo = new;
+
+ for (i = 0; i < num_infos; i++)
+ new[i] = template[i];
+ }
+
+ return 0;
+}
+
+static int bd96801_rdev_intb_irqs(struct platform_device *pdev,
+ struct bd96801_pmic_data *pdata,
+ struct bd96801_irqinfo *iinfo,
+ struct regulator_dev *rdev)
+{
+ struct regulator_dev *rdev_arr[1];
+ void *retp;
+ int err = 0;
+ int irq;
+ int err_flags[] = {
+ [BD96801_PROT_OVP] = REGULATOR_ERROR_REGULATION_OUT,
+ [BD96801_PROT_UVP] = REGULATOR_ERROR_UNDER_VOLTAGE,
+ [BD96801_PROT_OCP] = REGULATOR_ERROR_OVER_CURRENT,
+ [BD96801_PROT_TEMP] = REGULATOR_ERROR_OVER_TEMP,
+
+ };
+ int wrn_flags[] = {
+ [BD96801_PROT_OVP] = REGULATOR_ERROR_OVER_VOLTAGE_WARN,
+ [BD96801_PROT_UVP] = REGULATOR_ERROR_UNDER_VOLTAGE_WARN,
+ [BD96801_PROT_OCP] = REGULATOR_ERROR_OVER_CURRENT_WARN,
+ [BD96801_PROT_TEMP] = REGULATOR_ERROR_OVER_TEMP_WARN,
+ };
+
+ /*
+ * Don't install IRQ handler if both error and warning
+ * notifications are explicitly disabled
+ */
+ if (!iinfo->err_cfg && !iinfo->wrn_cfg)
+ return 0;
+
+ if (WARN_ON(iinfo->type >= BD96801_NUM_PROT))
+ return -EINVAL;
+
+ if (iinfo->err_cfg)
+ err = err_flags[iinfo->type];
+ else if (iinfo->wrn_cfg)
+ err = wrn_flags[iinfo->type];
+
+ iinfo->irq_desc.data = pdata;
+ irq = platform_get_irq_byname(pdev, iinfo->irq_name);
+ if (irq < 0)
+ return irq;
+ /* Find notifications for this IRQ (WARN/ERR) */
+
+ rdev_arr[0] = rdev;
+ retp = devm_regulator_irq_helper(&pdev->dev,
+ &iinfo->irq_desc, irq,
+ 0, err, NULL, rdev_arr,
+ 1);
+ if (IS_ERR(retp))
+ return PTR_ERR(retp);
+
+ return 0;
+}
+
+
+
+static int bd96801_probe(struct platform_device *pdev)
+{
+ struct regulator_dev *ldo_errs_rdev_arr[BD96801_NUM_LDOS];
+ struct bd96801_regulator_data *rdesc;
+ struct regulator_config config = {};
+ int ldo_errs_arr[BD96801_NUM_LDOS];
+ struct bd96801_pmic_data *pdata;
+ int temp_notif_ldos = 0;
+ struct device *parent;
+ int i, ret;
+ void *retp;
+
+ parent = pdev->dev.parent;
+
+ pdata = devm_kmemdup(&pdev->dev, &bd96801_data, sizeof(bd96801_data),
+ GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ if (initialize_pmic_data(&pdev->dev, pdata))
+ return -ENOMEM;
+
+ pdata->regmap = dev_get_regmap(parent, NULL);
+ if (!pdata->regmap) {
+ dev_err(&pdev->dev, "No register map found\n");
+ return -ENODEV;
+ }
+
+ rdesc = &pdata->regulator_data[0];
+
+ config.driver_data = pdata;
+ config.regmap = pdata->regmap;
+ config.dev = parent;
+
+ ret = bd96801_walk_regulator_dt(&pdev->dev, pdata->regmap, rdesc,
+ BD96801_NUM_REGULATORS);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < ARRAY_SIZE(pdata->regulator_data); i++) {
+ struct regulator_dev *rdev;
+ struct bd96801_irq_desc *idesc = &rdesc[i].irq_desc;
+ int j;
+
+ rdev = devm_regulator_register(&pdev->dev,
+ &rdesc[i].desc, &config);
+ if (IS_ERR(rdev)) {
+ dev_err(&pdev->dev,
+ "failed to register %s regulator\n",
+ rdesc[i].desc.name);
+ return PTR_ERR(rdev);
+ }
+ /*
+ * LDOs don't have own temperature monitoring. If temperature
+ * notification was requested for this LDO from DT then we will
+ * add the regulator to be notified if central IC temperature
+ * exceeds threshold.
+ */
+ if (rdesc[i].ldo_errs) {
+ ldo_errs_rdev_arr[temp_notif_ldos] = rdev;
+ ldo_errs_arr[temp_notif_ldos] = rdesc[i].ldo_errs;
+ temp_notif_ldos++;
+ }
+ if (!idesc)
+ continue;
+
+ /* Register INTB handlers for configured protections */
+ for (j = 0; j < idesc->num_irqs; j++) {
+ ret = bd96801_rdev_intb_irqs(pdev, pdata,
+ &idesc->irqinfo[j], rdev);
+ if (ret)
+ return ret;
+ }
+ }
+ if (temp_notif_ldos) {
+ int irq;
+ struct regulator_irq_desc tw_desc = {
+ .name = "bd96801-core-thermal",
+ .irq_off_ms = 500,
+ .map_event = ldo_map_notif,
+ };
+
+ irq = platform_get_irq_byname(pdev, "bd96801-core-thermal");
+ if (irq < 0)
+ return irq;
+
+ retp = devm_regulator_irq_helper(&pdev->dev, &tw_desc, irq, 0,
+ 0, &ldo_errs_arr[0],
+ &ldo_errs_rdev_arr[0],
+ temp_notif_ldos);
+ if (IS_ERR(retp))
+ return PTR_ERR(retp);
+ }
+
+ return 0;
+}
+
+static const struct platform_device_id bd96801_pmic_id[] = {
+ { "bd96801-regulator", },
+ { }
+};
+MODULE_DEVICE_TABLE(platform, bd96801_pmic_id);
+
+static struct platform_driver bd96801_regulator = {
+ .driver = {
+ .name = "bd96801-pmic"
+ },
+ .probe = bd96801_probe,
+ .id_table = bd96801_pmic_id,
+};
+
+module_platform_driver(bd96801_regulator);
+
+MODULE_AUTHOR("Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>");
+MODULE_DESCRIPTION("BD96801 voltage regulator driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 5794f4e9dd52..7674b7f2df14 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -3347,6 +3347,7 @@ struct regmap *regulator_get_regmap(struct regulator *regulator)
return map ? map : ERR_PTR(-EOPNOTSUPP);
}
+EXPORT_SYMBOL_GPL(regulator_get_regmap);
/**
* regulator_get_hardware_vsel_register - get the HW voltage selector register
@@ -3408,6 +3409,34 @@ int regulator_list_hardware_vsel(struct regulator *regulator,
EXPORT_SYMBOL_GPL(regulator_list_hardware_vsel);
/**
+ * regulator_hardware_enable - access the HW for enable/disable regulator
+ * @regulator: regulator source
+ * @enable: true for enable, false for disable
+ *
+ * Request that the regulator be enabled/disabled with the regulator output at
+ * the predefined voltage or current value.
+ *
+ * On success 0 is returned, otherwise a negative errno is returned.
+ */
+int regulator_hardware_enable(struct regulator *regulator, bool enable)
+{
+ struct regulator_dev *rdev = regulator->rdev;
+ const struct regulator_ops *ops = rdev->desc->ops;
+ int ret = -EOPNOTSUPP;
+
+ if (!rdev->exclusive || !ops || !ops->enable || !ops->disable)
+ return ret;
+
+ if (enable)
+ ret = ops->enable(rdev);
+ else
+ ret = ops->disable(rdev);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regulator_hardware_enable);
+
+/**
* regulator_get_linear_step - return the voltage step size between VSEL values
* @regulator: regulator source
*
diff --git a/drivers/regulator/da9121-regulator.c b/drivers/regulator/da9121-regulator.c
index 96257551bb12..d97162f73793 100644
--- a/drivers/regulator/da9121-regulator.c
+++ b/drivers/regulator/da9121-regulator.c
@@ -865,7 +865,7 @@ static const struct regmap_access_table da9121_volatile_table = {
};
/* DA9121 regmap config for 1 channel variants */
-static struct regmap_config da9121_1ch_regmap_config = {
+static const struct regmap_config da9121_1ch_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.max_register = DA9121_REG_OTP_CONFIG_ID,
@@ -876,7 +876,7 @@ static struct regmap_config da9121_1ch_regmap_config = {
};
/* DA9121 regmap config for 2 channel variants */
-static struct regmap_config da9121_2ch_regmap_config = {
+static const struct regmap_config da9121_2ch_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.max_register = DA9121_REG_OTP_CONFIG_ID,
@@ -993,7 +993,7 @@ error:
static int da9121_assign_chip_model(struct i2c_client *i2c,
struct da9121 *chip)
{
- struct regmap_config *regmap;
+ const struct regmap_config *regmap;
int ret = 0;
chip->dev = &i2c->dev;
@@ -1192,4 +1192,5 @@ static struct i2c_driver da9121_regulator_driver = {
module_i2c_driver(da9121_regulator_driver);
+MODULE_DESCRIPTION("Dialog Semiconductor DA9121/DA9122/DA9220/DA9217/DA9130/DA9131/DA9132 regulator driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/da9210-regulator.c b/drivers/regulator/da9210-regulator.c
index 02b85ca4a6fc..39ade0dba40f 100644
--- a/drivers/regulator/da9210-regulator.c
+++ b/drivers/regulator/da9210-regulator.c
@@ -202,8 +202,8 @@ static int da9210_i2c_probe(struct i2c_client *i2c)
}
static const struct i2c_device_id da9210_i2c_id[] = {
- {"da9210", 0},
- {},
+ { "da9210" },
+ {}
};
MODULE_DEVICE_TABLE(i2c, da9210_i2c_id);
diff --git a/drivers/regulator/lp3971.c b/drivers/regulator/lp3971.c
index e1b5c45f97f4..d4dab86fe385 100644
--- a/drivers/regulator/lp3971.c
+++ b/drivers/regulator/lp3971.c
@@ -439,7 +439,7 @@ static int lp3971_i2c_probe(struct i2c_client *i2c)
}
static const struct i2c_device_id lp3971_i2c_id[] = {
- { "lp3971", 0 },
+ { "lp3971" },
{ }
};
MODULE_DEVICE_TABLE(i2c, lp3971_i2c_id);
diff --git a/drivers/regulator/lp3972.c b/drivers/regulator/lp3972.c
index 7bd6f05edd8d..1b918fb72134 100644
--- a/drivers/regulator/lp3972.c
+++ b/drivers/regulator/lp3972.c
@@ -537,7 +537,7 @@ static int lp3972_i2c_probe(struct i2c_client *i2c)
}
static const struct i2c_device_id lp3972_i2c_id[] = {
- { "lp3972", 0 },
+ { "lp3972" },
{ }
};
MODULE_DEVICE_TABLE(i2c, lp3972_i2c_id);
diff --git a/drivers/regulator/lp8755.c b/drivers/regulator/lp8755.c
index 8d01e18046f3..5509bee49bda 100644
--- a/drivers/regulator/lp8755.c
+++ b/drivers/regulator/lp8755.c
@@ -430,7 +430,7 @@ static void lp8755_remove(struct i2c_client *client)
}
static const struct i2c_device_id lp8755_id[] = {
- {LP8755_NAME, 0},
+ { LP8755_NAME },
{}
};
diff --git a/drivers/regulator/max1586.c b/drivers/regulator/max1586.c
index 0f133129252e..4242fbb7b147 100644
--- a/drivers/regulator/max1586.c
+++ b/drivers/regulator/max1586.c
@@ -276,7 +276,7 @@ static int max1586_pmic_probe(struct i2c_client *client)
}
static const struct i2c_device_id max1586_id[] = {
- { "max1586", 0 },
+ { "max1586" },
{ }
};
MODULE_DEVICE_TABLE(i2c, max1586_id);
diff --git a/drivers/regulator/max20411-regulator.c b/drivers/regulator/max20411-regulator.c
index 8c09dc71b16d..02d7009ea0e6 100644
--- a/drivers/regulator/max20411-regulator.c
+++ b/drivers/regulator/max20411-regulator.c
@@ -145,8 +145,8 @@ static const struct of_device_id of_max20411_match_tbl[] = {
MODULE_DEVICE_TABLE(of, of_max20411_match_tbl);
static const struct i2c_device_id max20411_id[] = {
- { "max20411", 0 },
- { },
+ { "max20411" },
+ { }
};
MODULE_DEVICE_TABLE(i2c, max20411_id);
@@ -161,4 +161,5 @@ static struct i2c_driver max20411_i2c_driver = {
};
module_i2c_driver(max20411_i2c_driver);
+MODULE_DESCRIPTION("Maxim MAX20411 High-Efficiency Single Step-Down Converter driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/max77503-regulator.c b/drivers/regulator/max77503-regulator.c
index 4a6ba4dd2acd..c7c94e868fc1 100644
--- a/drivers/regulator/max77503-regulator.c
+++ b/drivers/regulator/max77503-regulator.c
@@ -25,14 +25,6 @@
#define MAX77503_AD_ENABLED 0x1
#define MAX77503_AD_DISABLED 0x0
-struct max77503_dev {
- struct device *dev;
- struct device_node *of_node;
- struct regulator_desc desc;
- struct regulator_dev *rdev;
- struct regmap *regmap;
-};
-
static const struct regmap_config max77503_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
diff --git a/drivers/regulator/max77857-regulator.c b/drivers/regulator/max77857-regulator.c
index 145ad0281857..bc28dc8503a8 100644
--- a/drivers/regulator/max77857-regulator.c
+++ b/drivers/regulator/max77857-regulator.c
@@ -67,7 +67,7 @@ static bool max77857_volatile_reg(struct device *dev, unsigned int reg)
}
}
-static struct regmap_config max77857_regmap_config = {
+static const struct regmap_config max77857_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.cache_type = REGCACHE_MAPLE,
diff --git a/drivers/regulator/max8649.c b/drivers/regulator/max8649.c
index 24e1dfba78c8..f57c588bcf28 100644
--- a/drivers/regulator/max8649.c
+++ b/drivers/regulator/max8649.c
@@ -240,7 +240,7 @@ static int max8649_regulator_probe(struct i2c_client *client)
}
static const struct i2c_device_id max8649_id[] = {
- { "max8649", 0 },
+ { "max8649" },
{ }
};
MODULE_DEVICE_TABLE(i2c, max8649_id);
diff --git a/drivers/regulator/max8893.c b/drivers/regulator/max8893.c
index 30592425e193..5a90633d8536 100644
--- a/drivers/regulator/max8893.c
+++ b/drivers/regulator/max8893.c
@@ -162,8 +162,8 @@ MODULE_DEVICE_TABLE(of, max8893_dt_match);
#endif
static const struct i2c_device_id max8893_ids[] = {
- { "max8893", 0 },
- { },
+ { "max8893" },
+ { }
};
MODULE_DEVICE_TABLE(i2c, max8893_ids);
diff --git a/drivers/regulator/max8952.c b/drivers/regulator/max8952.c
index 0b0b841d214a..1f94315bfb02 100644
--- a/drivers/regulator/max8952.c
+++ b/drivers/regulator/max8952.c
@@ -307,8 +307,8 @@ static int max8952_pmic_probe(struct i2c_client *client)
}
static const struct i2c_device_id max8952_ids[] = {
- { "max8952", 0 },
- { },
+ { "max8952" },
+ { }
};
MODULE_DEVICE_TABLE(i2c, max8952_ids);
diff --git a/drivers/regulator/mcp16502.c b/drivers/regulator/mcp16502.c
index 0c15a19fe83a..5de9d4fa5113 100644
--- a/drivers/regulator/mcp16502.c
+++ b/drivers/regulator/mcp16502.c
@@ -577,7 +577,7 @@ static const struct dev_pm_ops mcp16502_pm_ops = {
};
#endif
static const struct i2c_device_id mcp16502_i2c_id[] = {
- { "mcp16502", 0 },
+ { "mcp16502" },
{ }
};
MODULE_DEVICE_TABLE(i2c, mcp16502_i2c_id);
diff --git a/drivers/regulator/mt6311-regulator.c b/drivers/regulator/mt6311-regulator.c
index c00638cd2d1e..2ebc1c0b5e6f 100644
--- a/drivers/regulator/mt6311-regulator.c
+++ b/drivers/regulator/mt6311-regulator.c
@@ -133,8 +133,8 @@ static int mt6311_i2c_probe(struct i2c_client *i2c)
}
static const struct i2c_device_id mt6311_i2c_id[] = {
- {"mt6311", 0},
- {},
+ { "mt6311" },
+ {}
};
MODULE_DEVICE_TABLE(i2c, mt6311_i2c_id);
diff --git a/drivers/regulator/mtk-dvfsrc-regulator.c b/drivers/regulator/mtk-dvfsrc-regulator.c
index f1280d45265d..9bf4163221f1 100644
--- a/drivers/regulator/mtk-dvfsrc-regulator.c
+++ b/drivers/regulator/mtk-dvfsrc-regulator.c
@@ -1,99 +1,94 @@
// SPDX-License-Identifier: GPL-2.0
-//
-// Copyright (c) 2020 MediaTek Inc.
+/*
+ * Copyright (C) 2020 MediaTek Inc.
+ * Copyright (c) 2024 Collabora Ltd.
+ * AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
-#include <linux/err.h>
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/of_.h>
+#include <linux/of.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/of_regulator.h>
-#include <linux/soc/mediatek/mtk_dvfsrc.h>
-
-#define DVFSRC_ID_VCORE 0
-#define DVFSRC_ID_VSCP 1
-
-#define MT_DVFSRC_REGULAR(match, _name, _volt_table) \
-[DVFSRC_ID_##_name] = { \
- .desc = { \
- .name = match, \
- .of_match = of_match_ptr(match), \
- .ops = &dvfsrc_vcore_ops, \
- .type = REGULATOR_VOLTAGE, \
- .id = DVFSRC_ID_##_name, \
- .owner = THIS_MODULE, \
- .n_voltages = ARRAY_SIZE(_volt_table), \
- .volt_table = _volt_table, \
- }, \
-}
-
-/*
- * DVFSRC regulators' information
- *
- * @desc: standard fields of regulator description.
- * @voltage_selector: Selector used for get_voltage_sel() and
- * set_voltage_sel() callbacks
- */
+#include <linux/soc/mediatek/dvfsrc.h>
-struct dvfsrc_regulator {
- struct regulator_desc desc;
+enum dvfsrc_regulator_id {
+ DVFSRC_ID_VCORE,
+ DVFSRC_ID_VSCP,
+ DVFSRC_ID_MAX
};
-/*
- * MTK DVFSRC regulators' init data
- *
- * @size: num of regulators
- * @regulator_info: regulator info.
- */
-struct dvfsrc_regulator_init_data {
+struct dvfsrc_regulator_pdata {
+ struct regulator_desc *descs;
u32 size;
- struct dvfsrc_regulator *regulator_info;
};
-static inline struct device *to_dvfsrc_dev(struct regulator_dev *rdev)
+#define MTK_DVFSRC_VREG(match, _name, _volt_table) \
+{ \
+ .name = match, \
+ .of_match = match, \
+ .ops = &dvfsrc_vcore_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .id = DVFSRC_ID_##_name, \
+ .owner = THIS_MODULE, \
+ .n_voltages = ARRAY_SIZE(_volt_table), \
+ .volt_table = _volt_table, \
+}
+
+static inline struct device *to_dvfs_regulator_dev(struct regulator_dev *rdev)
{
return rdev_get_dev(rdev)->parent;
}
+static inline struct device *to_dvfsrc_dev(struct regulator_dev *rdev)
+{
+ return to_dvfs_regulator_dev(rdev)->parent;
+}
+
+static int dvfsrc_get_cmd(int rdev_id, enum mtk_dvfsrc_cmd *cmd)
+{
+ switch (rdev_id) {
+ case DVFSRC_ID_VCORE:
+ *cmd = MTK_DVFSRC_CMD_VCORE_LEVEL;
+ break;
+ case DVFSRC_ID_VSCP:
+ *cmd = MTK_DVFSRC_CMD_VSCP_LEVEL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int dvfsrc_set_voltage_sel(struct regulator_dev *rdev,
unsigned int selector)
{
struct device *dvfsrc_dev = to_dvfsrc_dev(rdev);
+ enum mtk_dvfsrc_cmd req_cmd;
int id = rdev_get_id(rdev);
+ int ret;
- if (id == DVFSRC_ID_VCORE)
- mtk_dvfsrc_send_request(dvfsrc_dev,
- MTK_DVFSRC_CMD_VCORE_REQUEST,
- selector);
- else if (id == DVFSRC_ID_VSCP)
- mtk_dvfsrc_send_request(dvfsrc_dev,
- MTK_DVFSRC_CMD_VSCP_REQUEST,
- selector);
- else
- return -EINVAL;
+ ret = dvfsrc_get_cmd(id, &req_cmd);
+ if (ret)
+ return ret;
- return 0;
+ return mtk_dvfsrc_send_request(dvfsrc_dev, req_cmd, selector);
}
static int dvfsrc_get_voltage_sel(struct regulator_dev *rdev)
{
struct device *dvfsrc_dev = to_dvfsrc_dev(rdev);
+ enum mtk_dvfsrc_cmd query_cmd;
int id = rdev_get_id(rdev);
int val, ret;
- if (id == DVFSRC_ID_VCORE)
- ret = mtk_dvfsrc_query_info(dvfsrc_dev,
- MTK_DVFSRC_CMD_VCORE_LEVEL_QUERY,
- &val);
- else if (id == DVFSRC_ID_VSCP)
- ret = mtk_dvfsrc_query_info(dvfsrc_dev,
- MTK_DVFSRC_CMD_VSCP_LEVEL_QUERY,
- &val);
- else
- return -EINVAL;
+ ret = dvfsrc_get_cmd(id, &query_cmd);
+ if (ret)
+ return ret;
- if (ret != 0)
+ ret = mtk_dvfsrc_query_info(dvfsrc_dev, query_cmd, &val);
+ if (ret)
return ret;
return val;
@@ -105,110 +100,97 @@ static const struct regulator_ops dvfsrc_vcore_ops = {
.set_voltage_sel = dvfsrc_set_voltage_sel,
};
+static const unsigned int mt6873_voltages[] = {
+ 575000,
+ 600000,
+ 650000,
+ 725000,
+};
+
+static struct regulator_desc mt6873_regulators[] = {
+ MTK_DVFSRC_VREG("dvfsrc-vcore", VCORE, mt6873_voltages),
+ MTK_DVFSRC_VREG("dvfsrc-vscp", VSCP, mt6873_voltages),
+};
+
+static const struct dvfsrc_regulator_pdata mt6873_data = {
+ .descs = mt6873_regulators,
+ .size = ARRAY_SIZE(mt6873_regulators),
+};
+
static const unsigned int mt8183_voltages[] = {
725000,
800000,
};
-static struct dvfsrc_regulator mt8183_regulators[] = {
- MT_DVFSRC_REGULAR("dvfsrc-vcore", VCORE,
- mt8183_voltages),
+static struct regulator_desc mt8183_regulators[] = {
+ MTK_DVFSRC_VREG("dvfsrc-vcore", VCORE, mt8183_voltages),
};
-static const struct dvfsrc_regulator_init_data regulator_mt8183_data = {
+static const struct dvfsrc_regulator_pdata mt8183_data = {
+ .descs = mt8183_regulators,
.size = ARRAY_SIZE(mt8183_regulators),
- .regulator_info = &mt8183_regulators[0],
};
-static const unsigned int mt6873_voltages[] = {
- 575000,
+static const unsigned int mt8195_voltages[] = {
+ 550000,
600000,
650000,
- 725000,
+ 750000,
};
-static struct dvfsrc_regulator mt6873_regulators[] = {
- MT_DVFSRC_REGULAR("dvfsrc-vcore", VCORE,
- mt6873_voltages),
- MT_DVFSRC_REGULAR("dvfsrc-vscp", VSCP,
- mt6873_voltages),
-};
-
-static const struct dvfsrc_regulator_init_data regulator_mt6873_data = {
- .size = ARRAY_SIZE(mt6873_regulators),
- .regulator_info = &mt6873_regulators[0],
+static struct regulator_desc mt8195_regulators[] = {
+ MTK_DVFSRC_VREG("dvfsrc-vcore", VCORE, mt8195_voltages),
+ MTK_DVFSRC_VREG("dvfsrc-vscp", VSCP, mt8195_voltages),
};
-static const struct of_device_id mtk_dvfsrc_regulator_match[] = {
- {
- .compatible = "mediatek,mt8183-dvfsrc",
- .data = &regulator_mt8183_data,
- }, {
- .compatible = "mediatek,mt8192-dvfsrc",
- .data = &regulator_mt6873_data,
- }, {
- .compatible = "mediatek,mt6873-dvfsrc",
- .data = &regulator_mt6873_data,
- }, {
- /* sentinel */
- },
+static const struct dvfsrc_regulator_pdata mt8195_data = {
+ .descs = mt8195_regulators,
+ .size = ARRAY_SIZE(mt8195_regulators),
};
-MODULE_DEVICE_TABLE(of, mtk_dvfsrc_regulator_match);
static int dvfsrc_vcore_regulator_probe(struct platform_device *pdev)
{
- const struct of_device_id *match;
- struct device *dev = &pdev->dev;
- struct regulator_config config = { };
- struct regulator_dev *rdev;
- const struct dvfsrc_regulator_init_data *regulator_init_data;
- struct dvfsrc_regulator *mt_regulators;
+ struct regulator_config config = { .dev = &pdev->dev };
+ const struct dvfsrc_regulator_pdata *pdata;
int i;
- match = of_match_node(mtk_dvfsrc_regulator_match, dev->parent->of_node);
+ pdata = device_get_match_data(&pdev->dev);
+ if (!pdata)
+ return -EINVAL;
- if (!match) {
- dev_err(dev, "invalid compatible string\n");
- return -ENODEV;
- }
+ for (i = 0; i < pdata->size; i++) {
+ struct regulator_desc *vrdesc = &pdata->descs[i];
+ struct regulator_dev *rdev;
- regulator_init_data = match->data;
-
- mt_regulators = regulator_init_data->regulator_info;
- for (i = 0; i < regulator_init_data->size; i++) {
- config.dev = dev->parent;
- config.driver_data = (mt_regulators + i);
- rdev = devm_regulator_register(dev, &(mt_regulators + i)->desc,
- &config);
- if (IS_ERR(rdev)) {
- dev_err(dev, "failed to register %s\n",
- (mt_regulators + i)->desc.name);
- return PTR_ERR(rdev);
- }
+ rdev = devm_regulator_register(&pdev->dev, vrdesc, &config);
+ if (IS_ERR(rdev))
+ return dev_err_probe(&pdev->dev, PTR_ERR(rdev),
+ "failed to register %s\n", vrdesc->name);
}
return 0;
}
+static const struct of_device_id mtk_dvfsrc_regulator_match[] = {
+ { .compatible = "mediatek,mt6873-dvfsrc-regulator", .data = &mt6873_data },
+ { .compatible = "mediatek,mt8183-dvfsrc-regulator", .data = &mt8183_data },
+ { .compatible = "mediatek,mt8192-dvfsrc-regulator", .data = &mt6873_data },
+ { .compatible = "mediatek,mt8195-dvfsrc-regulator", .data = &mt8195_data },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mtk_dvfsrc_regulator_match);
+
static struct platform_driver mtk_dvfsrc_regulator_driver = {
.driver = {
.name = "mtk-dvfsrc-regulator",
+ .of_match_table = mtk_dvfsrc_regulator_match,
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
.probe = dvfsrc_vcore_regulator_probe,
};
+module_platform_driver(mtk_dvfsrc_regulator_driver);
-static int __init mtk_dvfsrc_regulator_init(void)
-{
- return platform_driver_register(&mtk_dvfsrc_regulator_driver);
-}
-subsys_initcall(mtk_dvfsrc_regulator_init);
-
-static void __exit mtk_dvfsrc_regulator_exit(void)
-{
- platform_driver_unregister(&mtk_dvfsrc_regulator_driver);
-}
-module_exit(mtk_dvfsrc_regulator_exit);
-
+MODULE_AUTHOR("AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>");
MODULE_AUTHOR("Arvin wang <arvin.wang@mediatek.com>");
-MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MediaTek DVFS Resource Collector Regulator driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/pca9450-regulator.c b/drivers/regulator/pca9450-regulator.c
index be488c5dff14..9714afe347dc 100644
--- a/drivers/regulator/pca9450-regulator.c
+++ b/drivers/regulator/pca9450-regulator.c
@@ -891,11 +891,6 @@ static int pca9450_i2c_probe(struct i2c_client *i2c)
unsigned int reset_ctrl;
int ret;
- if (!i2c->irq) {
- dev_err(&i2c->dev, "No IRQ configured?\n");
- return -EINVAL;
- }
-
pca9450 = devm_kzalloc(&i2c->dev, sizeof(struct pca9450), GFP_KERNEL);
if (!pca9450)
return -ENOMEM;
@@ -967,23 +962,25 @@ static int pca9450_i2c_probe(struct i2c_client *i2c)
}
}
- ret = devm_request_threaded_irq(pca9450->dev, pca9450->irq, NULL,
- pca9450_irq_handler,
- (IRQF_TRIGGER_FALLING | IRQF_ONESHOT),
- "pca9450-irq", pca9450);
- if (ret != 0) {
- dev_err(pca9450->dev, "Failed to request IRQ: %d\n",
- pca9450->irq);
- return ret;
- }
- /* Unmask all interrupt except PWRON/WDOG/RSVD */
- ret = regmap_update_bits(pca9450->regmap, PCA9450_REG_INT1_MSK,
- IRQ_VR_FLT1 | IRQ_VR_FLT2 | IRQ_LOWVSYS |
- IRQ_THERM_105 | IRQ_THERM_125,
- IRQ_PWRON | IRQ_WDOGB | IRQ_RSVD);
- if (ret) {
- dev_err(&i2c->dev, "Unmask irq error\n");
- return ret;
+ if (pca9450->irq) {
+ ret = devm_request_threaded_irq(pca9450->dev, pca9450->irq, NULL,
+ pca9450_irq_handler,
+ (IRQF_TRIGGER_FALLING | IRQF_ONESHOT),
+ "pca9450-irq", pca9450);
+ if (ret != 0) {
+ dev_err(pca9450->dev, "Failed to request IRQ: %d\n",
+ pca9450->irq);
+ return ret;
+ }
+ /* Unmask all interrupt except PWRON/WDOG/RSVD */
+ ret = regmap_update_bits(pca9450->regmap, PCA9450_REG_INT1_MSK,
+ IRQ_VR_FLT1 | IRQ_VR_FLT2 | IRQ_LOWVSYS |
+ IRQ_THERM_105 | IRQ_THERM_125,
+ IRQ_PWRON | IRQ_WDOGB | IRQ_RSVD);
+ if (ret) {
+ dev_err(&i2c->dev, "Unmask irq error\n");
+ return ret;
+ }
}
/* Clear PRESET_EN bit in BUCK123_DVS to use DVS registers */
diff --git a/drivers/regulator/pf8x00-regulator.c b/drivers/regulator/pf8x00-regulator.c
index 9fd8e0949b32..ea3611de42b4 100644
--- a/drivers/regulator/pf8x00-regulator.c
+++ b/drivers/regulator/pf8x00-regulator.c
@@ -596,10 +596,10 @@ static const struct of_device_id pf8x00_dt_ids[] = {
MODULE_DEVICE_TABLE(of, pf8x00_dt_ids);
static const struct i2c_device_id pf8x00_i2c_id[] = {
- { "pf8100", 0 },
- { "pf8121a", 0 },
- { "pf8200", 0 },
- {},
+ { "pf8100" },
+ { "pf8121a" },
+ { "pf8200" },
+ {}
};
MODULE_DEVICE_TABLE(i2c, pf8x00_i2c_id);
diff --git a/drivers/regulator/pv88060-regulator.c b/drivers/regulator/pv88060-regulator.c
index aa90360fa046..ae1c4b9daaa1 100644
--- a/drivers/regulator/pv88060-regulator.c
+++ b/drivers/regulator/pv88060-regulator.c
@@ -360,8 +360,8 @@ static int pv88060_i2c_probe(struct i2c_client *i2c)
}
static const struct i2c_device_id pv88060_i2c_id[] = {
- {"pv88060", 0},
- {},
+ { "pv88060" },
+ {}
};
MODULE_DEVICE_TABLE(i2c, pv88060_i2c_id);
diff --git a/drivers/regulator/pv88090-regulator.c b/drivers/regulator/pv88090-regulator.c
index f4acde4d56c8..3c48757bbbda 100644
--- a/drivers/regulator/pv88090-regulator.c
+++ b/drivers/regulator/pv88090-regulator.c
@@ -381,8 +381,8 @@ static int pv88090_i2c_probe(struct i2c_client *i2c)
}
static const struct i2c_device_id pv88090_i2c_id[] = {
- {"pv88090", 0},
- {},
+ { "pv88090" },
+ {}
};
MODULE_DEVICE_TABLE(i2c, pv88090_i2c_id);
diff --git a/drivers/regulator/qcom-pm8008-regulator.c b/drivers/regulator/qcom-pm8008-regulator.c
new file mode 100644
index 000000000000..da017c1969d0
--- /dev/null
+++ b/drivers/regulator/qcom-pm8008-regulator.c
@@ -0,0 +1,198 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2024 Linaro Limited
+ */
+
+#include <linux/array_size.h>
+#include <linux/bits.h>
+#include <linux/device.h>
+#include <linux/math.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+
+#include <asm/byteorder.h>
+
+#define DEFAULT_VOLTAGE_STEPPER_RATE 38400
+
+#define LDO_STEPPER_CTL_REG 0x3b
+#define STEP_RATE_MASK GENMASK(1, 0)
+
+#define LDO_VSET_LB_REG 0x40
+
+#define LDO_ENABLE_REG 0x46
+#define ENABLE_BIT BIT(7)
+
+struct pm8008_regulator {
+ struct regmap *regmap;
+ struct regulator_desc desc;
+ unsigned int base;
+};
+
+struct pm8008_regulator_data {
+ const char *name;
+ const char *supply_name;
+ unsigned int base;
+ int min_dropout_uV;
+ const struct linear_range *voltage_range;
+};
+
+static const struct linear_range nldo_ranges[] = {
+ REGULATOR_LINEAR_RANGE(528000, 0, 122, 8000),
+};
+
+static const struct linear_range pldo_ranges[] = {
+ REGULATOR_LINEAR_RANGE(1504000, 0, 237, 8000),
+};
+
+static const struct pm8008_regulator_data pm8008_reg_data[] = {
+ { "ldo1", "vdd-l1-l2", 0x4000, 225000, nldo_ranges, },
+ { "ldo2", "vdd-l1-l2", 0x4100, 225000, nldo_ranges, },
+ { "ldo3", "vdd-l3-l4", 0x4200, 300000, pldo_ranges, },
+ { "ldo4", "vdd-l3-l4", 0x4300, 300000, pldo_ranges, },
+ { "ldo5", "vdd-l5", 0x4400, 200000, pldo_ranges, },
+ { "ldo6", "vdd-l6", 0x4500, 200000, pldo_ranges, },
+ { "ldo7", "vdd-l7", 0x4600, 200000, pldo_ranges, },
+};
+
+static int pm8008_regulator_set_voltage_sel(struct regulator_dev *rdev, unsigned int sel)
+{
+ struct pm8008_regulator *preg = rdev_get_drvdata(rdev);
+ unsigned int mV;
+ __le16 val;
+ int ret;
+
+ ret = regulator_list_voltage_linear_range(rdev, sel);
+ if (ret < 0)
+ return ret;
+
+ mV = DIV_ROUND_UP(ret, 1000);
+
+ val = cpu_to_le16(mV);
+
+ ret = regmap_bulk_write(preg->regmap, preg->base + LDO_VSET_LB_REG,
+ &val, sizeof(val));
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int pm8008_regulator_get_voltage_sel(struct regulator_dev *rdev)
+{
+ struct pm8008_regulator *preg = rdev_get_drvdata(rdev);
+ unsigned int uV;
+ __le16 val;
+ int ret;
+
+ ret = regmap_bulk_read(preg->regmap, preg->base + LDO_VSET_LB_REG,
+ &val, sizeof(val));
+ if (ret < 0)
+ return ret;
+
+ uV = le16_to_cpu(val) * 1000;
+
+ return (uV - preg->desc.min_uV) / preg->desc.uV_step;
+}
+
+static const struct regulator_ops pm8008_regulator_ops = {
+ .list_voltage = regulator_list_voltage_linear,
+ .set_voltage_sel = pm8008_regulator_set_voltage_sel,
+ .get_voltage_sel = pm8008_regulator_get_voltage_sel,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+};
+
+static int pm8008_regulator_probe(struct platform_device *pdev)
+{
+ const struct pm8008_regulator_data *data;
+ struct regulator_config config = {};
+ struct device *dev = &pdev->dev;
+ struct pm8008_regulator *preg;
+ struct regulator_desc *desc;
+ struct regulator_dev *rdev;
+ struct regmap *regmap;
+ unsigned int val;
+ int ret, i;
+
+ regmap = dev_get_regmap(dev->parent, "secondary");
+ if (!regmap)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(pm8008_reg_data); i++) {
+ data = &pm8008_reg_data[i];
+
+ preg = devm_kzalloc(dev, sizeof(*preg), GFP_KERNEL);
+ if (!preg)
+ return -ENOMEM;
+
+ preg->regmap = regmap;
+ preg->base = data->base;
+
+ desc = &preg->desc;
+
+ desc->name = data->name;
+ desc->supply_name = data->supply_name;
+ desc->of_match = data->name;
+ desc->regulators_node = of_match_ptr("regulators");
+ desc->ops = &pm8008_regulator_ops;
+ desc->type = REGULATOR_VOLTAGE;
+ desc->owner = THIS_MODULE;
+
+ desc->linear_ranges = data->voltage_range;
+ desc->n_linear_ranges = 1;
+ desc->uV_step = desc->linear_ranges[0].step;
+ desc->min_uV = desc->linear_ranges[0].min;
+ desc->n_voltages = linear_range_values_in_range(&desc->linear_ranges[0]);
+
+ ret = regmap_read(regmap, preg->base + LDO_STEPPER_CTL_REG, &val);
+ if (ret < 0) {
+ dev_err(dev, "failed to read step rate: %d\n", ret);
+ return ret;
+ }
+ val &= STEP_RATE_MASK;
+ desc->ramp_delay = DEFAULT_VOLTAGE_STEPPER_RATE >> val;
+
+ desc->min_dropout_uV = data->min_dropout_uV;
+
+ desc->enable_reg = preg->base + LDO_ENABLE_REG;
+ desc->enable_mask = ENABLE_BIT;
+
+ config.dev = dev->parent;
+ config.driver_data = preg;
+ config.regmap = regmap;
+
+ rdev = devm_regulator_register(dev, desc, &config);
+ if (IS_ERR(rdev)) {
+ ret = PTR_ERR(rdev);
+ dev_err(dev, "failed to register regulator %s: %d\n",
+ desc->name, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static const struct platform_device_id pm8008_regulator_id_table[] = {
+ { "pm8008-regulator" },
+ { }
+};
+MODULE_DEVICE_TABLE(platform, pm8008_regulator_id_table);
+
+static struct platform_driver pm8008_regulator_driver = {
+ .driver = {
+ .name = "qcom-pm8008-regulator",
+ },
+ .probe = pm8008_regulator_probe,
+ .id_table = pm8008_regulator_id_table,
+};
+module_platform_driver(pm8008_regulator_driver);
+
+MODULE_DESCRIPTION("Qualcomm PM8008 PMIC regulator driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/renesas-usb-vbus-regulator.c b/drivers/regulator/renesas-usb-vbus-regulator.c
new file mode 100644
index 000000000000..4eceb6b54497
--- /dev/null
+++ b/drivers/regulator/renesas-usb-vbus-regulator.c
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Renesas USB VBUS output regulator driver
+//
+// Copyright (C) 2024 Renesas Electronics Corporation
+//
+
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/of_regulator.h>
+
+static const struct regulator_ops rzg2l_usb_vbus_reg_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+};
+
+static const struct regulator_desc rzg2l_usb_vbus_rdesc = {
+ .name = "vbus",
+ .of_match = of_match_ptr("regulator-vbus"),
+ .ops = &rzg2l_usb_vbus_reg_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .enable_reg = 0,
+ .enable_mask = BIT(0),
+ .enable_is_inverted = true,
+ .fixed_uV = 5000000,
+ .n_voltages = 1,
+};
+
+static int rzg2l_usb_vbus_regulator_probe(struct platform_device *pdev)
+{
+ struct regulator_config config = { };
+ struct device *dev = &pdev->dev;
+ struct regulator_dev *rdev;
+
+ config.regmap = dev_get_regmap(dev->parent, NULL);
+ if (!config.regmap)
+ return dev_err_probe(dev, -ENOENT, "Failed to get regmap\n");
+
+ config.dev = dev;
+ config.of_node = of_get_child_by_name(dev->parent->of_node, "regulator-vbus");
+ if (!config.of_node)
+ return dev_err_probe(dev, -ENODEV, "regulator node not found\n");
+
+ rdev = devm_regulator_register(dev, &rzg2l_usb_vbus_rdesc, &config);
+ if (IS_ERR(rdev)) {
+ of_node_put(config.of_node);
+ return dev_err_probe(dev, PTR_ERR(rdev),
+ "not able to register vbus regulator\n");
+ }
+
+ of_node_put(config.of_node);
+
+ return 0;
+}
+
+static struct platform_driver rzg2l_usb_vbus_regulator_driver = {
+ .probe = rzg2l_usb_vbus_regulator_probe,
+ .driver = {
+ .name = "rzg2l-usb-vbus-regulator",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+};
+module_platform_driver(rzg2l_usb_vbus_regulator_driver);
+
+MODULE_AUTHOR("Biju Das <biju.das.jz@bp.renesas.com>");
+MODULE_DESCRIPTION("Renesas RZ/G2L USB Vbus Regulator Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/rt4831-regulator.c b/drivers/regulator/rt4831-regulator.c
index 97e6f7e2a0ba..dfc868a24056 100644
--- a/drivers/regulator/rt4831-regulator.c
+++ b/drivers/regulator/rt4831-regulator.c
@@ -202,4 +202,5 @@ static struct platform_driver rt4831_regulator_driver = {
module_platform_driver(rt4831_regulator_driver);
MODULE_AUTHOR("ChiYuan Huang <cy_huang@richtek.com>");
+MODULE_DESCRIPTION("Richtek RT4831 DSV Regulators driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/rtq2208-regulator.c b/drivers/regulator/rtq2208-regulator.c
index b90e53d922d6..a5c126afc648 100644
--- a/drivers/regulator/rtq2208-regulator.c
+++ b/drivers/regulator/rtq2208-regulator.c
@@ -219,7 +219,7 @@ static const struct regulator_ops rtq2208_regulator_buck_ops = {
.set_suspend_mode = rtq2208_set_suspend_mode,
};
-static const struct regulator_ops rtq2208_regulator_ldo_ops = {
+static const struct regulator_ops rtq2208_regulator_ldo_fix_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
@@ -228,6 +228,28 @@ static const struct regulator_ops rtq2208_regulator_ldo_ops = {
.set_suspend_disable = rtq2208_set_suspend_disable,
};
+static const struct regulator_ops rtq2208_regulator_ldo_adj_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_table,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_active_discharge = regulator_set_active_discharge_regmap,
+ .set_suspend_enable = rtq2208_set_suspend_enable,
+ .set_suspend_disable = rtq2208_set_suspend_disable,
+};
+
+static const unsigned int rtq2208_ldo_volt_table[] = {
+ 1800000,
+ 3300000,
+};
+
+static struct of_regulator_match rtq2208_ldo_match[] = {
+ {.name = "ldo2", },
+ {.name = "ldo1", },
+};
+
static unsigned int rtq2208_of_map_mode(unsigned int mode)
{
switch (mode) {
@@ -322,13 +344,13 @@ static irqreturn_t rtq2208_irq_handler(int irqno, void *devid)
return IRQ_HANDLED;
}
-static int rtq2208_of_get_fixed_voltage(struct device *dev,
- struct of_regulator_match *rtq2208_ldo_match, int n_fixed)
+static int rtq2208_of_get_ldo_dvs_ability(struct device *dev)
{
struct device_node *np;
struct of_regulator_match *match;
- struct rtq2208_regulator_desc *rdesc;
+ struct regulator_desc *desc;
struct regulator_init_data *init_data;
+ u32 fixed_uV;
int ret, i;
if (!dev->of_node)
@@ -338,23 +360,37 @@ static int rtq2208_of_get_fixed_voltage(struct device *dev,
if (!np)
np = dev->of_node;
- ret = of_regulator_match(dev, np, rtq2208_ldo_match, n_fixed);
+ ret = of_regulator_match(dev, np, rtq2208_ldo_match, ARRAY_SIZE(rtq2208_ldo_match));
of_node_put(np);
if (ret < 0)
return ret;
- for (i = 0; i < n_fixed; i++) {
+ for (i = 0; i < ARRAY_SIZE(rtq2208_ldo_match); i++) {
match = rtq2208_ldo_match + i;
init_data = match->init_data;
- rdesc = (struct rtq2208_regulator_desc *)match->driver_data;
+ desc = (struct regulator_desc *)match->desc;
- if (!init_data || !rdesc)
+ if (!init_data || !desc)
continue;
- if (init_data->constraints.min_uV == init_data->constraints.max_uV)
- rdesc->desc.fixed_uV = init_data->constraints.min_uV;
+ /* specify working fixed voltage if the propery exists */
+ ret = of_property_read_u32(match->of_node, "richtek,fixed-microvolt", &fixed_uV);
+
+ if (!ret) {
+ if (fixed_uV != init_data->constraints.min_uV ||
+ fixed_uV != init_data->constraints.max_uV)
+ return -EINVAL;
+ desc->n_voltages = 1;
+ desc->fixed_uV = fixed_uV;
+ desc->fixed_uV = init_data->constraints.min_uV;
+ desc->ops = &rtq2208_regulator_ldo_fix_ops;
+ } else {
+ desc->n_voltages = ARRAY_SIZE(rtq2208_ldo_volt_table);
+ desc->volt_table = rtq2208_ldo_volt_table;
+ desc->ops = &rtq2208_regulator_ldo_adj_ops;
+ }
}
return 0;
@@ -388,8 +424,7 @@ static const struct linear_range rtq2208_vout_range[] = {
REGULATOR_LINEAR_RANGE(1310000, 181, 255, 10000),
};
-static void rtq2208_init_regulator_desc(struct rtq2208_regulator_desc *rdesc, int mtp_sel,
- int idx, struct of_regulator_match *rtq2208_ldo_match, int *ldo_idx)
+static void rtq2208_init_regulator_desc(struct rtq2208_regulator_desc *rdesc, int mtp_sel, int idx)
{
struct regulator_desc *desc;
static const struct {
@@ -461,8 +496,7 @@ static void rtq2208_init_regulator_desc(struct rtq2208_regulator_desc *rdesc, in
static int rtq2208_parse_regulator_dt_data(int n_regulator, const unsigned int *regulator_idx_table,
struct rtq2208_regulator_desc *rdesc[RTQ2208_LDO_MAX], struct device *dev)
{
- struct of_regulator_match rtq2208_ldo_match[2];
- int mtp_sel, ret, i, idx, ldo_idx = 0;
+ int mtp_sel, i, idx, ret;
/* get mtp_sel0 or mtp_sel1 */
mtp_sel = device_property_read_bool(dev, "richtek,mtp-sel-high");
@@ -474,7 +508,7 @@ static int rtq2208_parse_regulator_dt_data(int n_regulator, const unsigned int *
if (!rdesc[i])
return -ENOMEM;
- rtq2208_init_regulator_desc(rdesc[i], mtp_sel, idx, rtq2208_ldo_match, &ldo_idx);
+ rtq2208_init_regulator_desc(rdesc[i], mtp_sel, idx);
/* init ldo dvs ability */
if (idx >= RTQ2208_LDO2)
@@ -482,7 +516,7 @@ static int rtq2208_parse_regulator_dt_data(int n_regulator, const unsigned int *
}
/* init ldo fixed_uV */
- ret = rtq2208_of_get_fixed_voltage(dev, rtq2208_ldo_match, ldo_idx);
+ ret = rtq2208_of_get_ldo_dvs_ability(dev);
if (ret)
return dev_err_probe(dev, ret, "Failed to get ldo fixed_uV\n");
diff --git a/drivers/regulator/slg51000-regulator.c b/drivers/regulator/slg51000-regulator.c
index 59aa16825d8a..3bbd4a29e6d3 100644
--- a/drivers/regulator/slg51000-regulator.c
+++ b/drivers/regulator/slg51000-regulator.c
@@ -497,8 +497,8 @@ static int slg51000_i2c_probe(struct i2c_client *client)
}
static const struct i2c_device_id slg51000_i2c_id[] = {
- {"slg51000", 0},
- {},
+ { "slg51000" },
+ {}
};
MODULE_DEVICE_TABLE(i2c, slg51000_i2c_id);
diff --git a/drivers/regulator/stm32-pwr.c b/drivers/regulator/stm32-pwr.c
index 85b0102fb9b1..b7aeef6e09e7 100644
--- a/drivers/regulator/stm32-pwr.c
+++ b/drivers/regulator/stm32-pwr.c
@@ -166,6 +166,7 @@ static int stm32_pwr_regulator_probe(struct platform_device *pdev)
static const struct of_device_id __maybe_unused stm32_pwr_of_match[] = {
{ .compatible = "st,stm32mp1,pwr-reg", },
+ { .compatible = "st,stm32mp13-pwr-reg", },
{},
};
MODULE_DEVICE_TABLE(of, stm32_pwr_of_match);
diff --git a/drivers/regulator/sy8106a-regulator.c b/drivers/regulator/sy8106a-regulator.c
index 1bcfdd6dcfc1..d79a4cc25a0d 100644
--- a/drivers/regulator/sy8106a-regulator.c
+++ b/drivers/regulator/sy8106a-regulator.c
@@ -130,8 +130,8 @@ static const struct of_device_id sy8106a_i2c_of_match[] = {
MODULE_DEVICE_TABLE(of, sy8106a_i2c_of_match);
static const struct i2c_device_id sy8106a_i2c_id[] = {
- { "sy8106a", 0 },
- { },
+ { "sy8106a" },
+ { }
};
MODULE_DEVICE_TABLE(i2c, sy8106a_i2c_id);
diff --git a/drivers/regulator/tps6286x-regulator.c b/drivers/regulator/tps6286x-regulator.c
index 758c70269653..75f441f36de7 100644
--- a/drivers/regulator/tps6286x-regulator.c
+++ b/drivers/regulator/tps6286x-regulator.c
@@ -136,11 +136,11 @@ static int tps6286x_i2c_probe(struct i2c_client *i2c)
}
static const struct i2c_device_id tps6286x_i2c_id[] = {
- { "tps62864", 0 },
- { "tps62866", 0 },
- { "tps62868", 0 },
- { "tps62869", 0 },
- {},
+ { "tps62864" },
+ { "tps62866" },
+ { "tps62868" },
+ { "tps62869" },
+ {}
};
MODULE_DEVICE_TABLE(i2c, tps6286x_i2c_id);
@@ -156,4 +156,5 @@ static struct i2c_driver tps6286x_regulator_driver = {
module_i2c_driver(tps6286x_regulator_driver);
+MODULE_DESCRIPTION("TI TPS6286x Power Regulator driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/tps6287x-regulator.c b/drivers/regulator/tps6287x-regulator.c
index 3c9d79e003e4..7a0551f0c8c0 100644
--- a/drivers/regulator/tps6287x-regulator.c
+++ b/drivers/regulator/tps6287x-regulator.c
@@ -165,11 +165,11 @@ static const struct of_device_id tps6287x_dt_ids[] = {
MODULE_DEVICE_TABLE(of, tps6287x_dt_ids);
static const struct i2c_device_id tps6287x_i2c_id[] = {
- { "tps62870", 0 },
- { "tps62871", 0 },
- { "tps62872", 0 },
- { "tps62873", 0 },
- {},
+ { "tps62870" },
+ { "tps62871" },
+ { "tps62872" },
+ { "tps62873" },
+ {}
};
MODULE_DEVICE_TABLE(i2c, tps6287x_i2c_id);
diff --git a/drivers/regulator/tps6594-regulator.c b/drivers/regulator/tps6594-regulator.c
index 4a859f4c0f83..ac53792e3fed 100644
--- a/drivers/regulator/tps6594-regulator.c
+++ b/drivers/regulator/tps6594-regulator.c
@@ -653,18 +653,14 @@ static int tps6594_regulator_probe(struct platform_device *pdev)
}
}
- if (tps->chip_id == LP8764) {
- nr_buck = ARRAY_SIZE(buck_regs);
- nr_ldo = 0;
- nr_types = REGS_INT_NB;
- } else if (tps->chip_id == TPS65224) {
+ if (tps->chip_id == TPS65224) {
nr_buck = ARRAY_SIZE(tps65224_buck_regs);
nr_ldo = ARRAY_SIZE(tps65224_ldo_regs);
- nr_types = REGS_INT_NB;
+ nr_types = TPS65224_REGS_INT_NB;
} else {
nr_buck = ARRAY_SIZE(buck_regs);
- nr_ldo = ARRAY_SIZE(tps6594_ldo_regs);
- nr_types = TPS65224_REGS_INT_NB;
+ nr_ldo = (tps->chip_id == LP8764) ? 0 : ARRAY_SIZE(tps6594_ldo_regs);
+ nr_types = REGS_INT_NB;
}
reg_irq_nb = nr_types * (nr_buck + nr_ldo);
diff --git a/drivers/regulator/userspace-consumer.c b/drivers/regulator/userspace-consumer.c
index 86a626a4f610..6153d0295b6d 100644
--- a/drivers/regulator/userspace-consumer.c
+++ b/drivers/regulator/userspace-consumer.c
@@ -158,10 +158,8 @@ static int regulator_userspace_consumer_probe(struct platform_device *pdev)
ret = devm_regulator_bulk_get_exclusive(&pdev->dev, drvdata->num_supplies,
drvdata->supplies);
- if (ret) {
- dev_err(&pdev->dev, "Failed to get supplies: %d\n", ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "Failed to get supplies\n");
platform_set_drvdata(pdev, drvdata);
diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig
index 7112f5932609..67bce340a87e 100644
--- a/drivers/reset/Kconfig
+++ b/drivers/reset/Kconfig
@@ -68,6 +68,7 @@ config RESET_BRCMSTB_RESCAL
config RESET_GPIO
tristate "GPIO reset controller"
+ depends on GPIOLIB
help
This enables a generic reset controller for resets attached via
GPIOs. Typically for OF platforms this driver expects "reset-gpios"
@@ -91,6 +92,14 @@ config RESET_IMX7
help
This enables the reset controller driver for i.MX7 SoCs.
+config RESET_IMX8MP_AUDIOMIX
+ tristate "i.MX8MP AudioMix Reset Driver"
+ depends on ARCH_MXC || COMPILE_TEST
+ select AUXILIARY_BUS
+ default CLK_IMX8MP
+ help
+ This enables the reset controller driver for i.MX8MP AudioMix
+
config RESET_INTEL_GW
bool "Intel Reset Controller Driver"
depends on X86 || COMPILE_TEST
@@ -328,6 +337,12 @@ config RESET_ZYNQ
help
This enables the reset controller driver for Xilinx Zynq SoCs.
+config RESET_ZYNQMP
+ bool "ZYNQMP Reset Driver" if COMPILE_TEST
+ default ARCH_ZYNQMP
+ help
+ This enables the reset controller driver for Xilinx ZynqMP SoCs.
+
source "drivers/reset/starfive/Kconfig"
source "drivers/reset/sti/Kconfig"
source "drivers/reset/hisilicon/Kconfig"
diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile
index fd8b49fa46fc..27b0bbdfcc04 100644
--- a/drivers/reset/Makefile
+++ b/drivers/reset/Makefile
@@ -2,8 +2,8 @@
obj-y += core.o
obj-y += hisilicon/
obj-y += starfive/
-obj-$(CONFIG_ARCH_STI) += sti/
-obj-$(CONFIG_ARCH_TEGRA) += tegra/
+obj-y += sti/
+obj-y += tegra/
obj-$(CONFIG_RESET_A10SR) += reset-a10sr.o
obj-$(CONFIG_RESET_ATH79) += reset-ath79.o
obj-$(CONFIG_RESET_AXS10X) += reset-axs10x.o
@@ -14,6 +14,7 @@ obj-$(CONFIG_RESET_BRCMSTB_RESCAL) += reset-brcmstb-rescal.o
obj-$(CONFIG_RESET_GPIO) += reset-gpio.o
obj-$(CONFIG_RESET_HSDK) += reset-hsdk.o
obj-$(CONFIG_RESET_IMX7) += reset-imx7.o
+obj-$(CONFIG_RESET_IMX8MP_AUDIOMIX) += reset-imx8mp-audiomix.o
obj-$(CONFIG_RESET_INTEL_GW) += reset-intel-gw.o
obj-$(CONFIG_RESET_K210) += reset-k210.o
obj-$(CONFIG_RESET_LANTIQ) += reset-lantiq.o
@@ -41,4 +42,4 @@ obj-$(CONFIG_RESET_TN48M_CPLD) += reset-tn48m.o
obj-$(CONFIG_RESET_UNIPHIER) += reset-uniphier.o
obj-$(CONFIG_RESET_UNIPHIER_GLUE) += reset-uniphier-glue.o
obj-$(CONFIG_RESET_ZYNQ) += reset-zynq.o
-obj-$(CONFIG_ARCH_ZYNQMP) += reset-zynqmp.o
+obj-$(CONFIG_RESET_ZYNQMP) += reset-zynqmp.o
diff --git a/drivers/reset/hisilicon/hi6220_reset.c b/drivers/reset/hisilicon/hi6220_reset.c
index 5c3267acd2b1..65aa5ff5ed82 100644
--- a/drivers/reset/hisilicon/hi6220_reset.c
+++ b/drivers/reset/hisilicon/hi6220_reset.c
@@ -219,4 +219,5 @@ static int __init hi6220_reset_init(void)
postcore_initcall(hi6220_reset_init);
+MODULE_DESCRIPTION("Hisilicon Hi6220 reset controller driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/reset/reset-imx8mp-audiomix.c b/drivers/reset/reset-imx8mp-audiomix.c
new file mode 100644
index 000000000000..6e3f3069f727
--- /dev/null
+++ b/drivers/reset/reset-imx8mp-audiomix.c
@@ -0,0 +1,128 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright 2024 NXP
+ */
+
+#include <linux/auxiliary_bus.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/reset-controller.h>
+
+#define EARC 0x200
+#define EARC_RESET_MASK 0x3
+
+struct imx8mp_audiomix_reset {
+ struct reset_controller_dev rcdev;
+ spinlock_t lock; /* protect register read-modify-write cycle */
+ void __iomem *base;
+};
+
+static struct imx8mp_audiomix_reset *to_imx8mp_audiomix_reset(struct reset_controller_dev *rcdev)
+{
+ return container_of(rcdev, struct imx8mp_audiomix_reset, rcdev);
+}
+
+static int imx8mp_audiomix_reset_assert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct imx8mp_audiomix_reset *priv = to_imx8mp_audiomix_reset(rcdev);
+ void __iomem *reg_addr = priv->base;
+ unsigned int mask, reg;
+ unsigned long flags;
+
+ mask = BIT(id);
+ spin_lock_irqsave(&priv->lock, flags);
+ reg = readl(reg_addr + EARC);
+ writel(reg & ~mask, reg_addr + EARC);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+}
+
+static int imx8mp_audiomix_reset_deassert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct imx8mp_audiomix_reset *priv = to_imx8mp_audiomix_reset(rcdev);
+ void __iomem *reg_addr = priv->base;
+ unsigned int mask, reg;
+ unsigned long flags;
+
+ mask = BIT(id);
+ spin_lock_irqsave(&priv->lock, flags);
+ reg = readl(reg_addr + EARC);
+ writel(reg | mask, reg_addr + EARC);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+}
+
+static const struct reset_control_ops imx8mp_audiomix_reset_ops = {
+ .assert = imx8mp_audiomix_reset_assert,
+ .deassert = imx8mp_audiomix_reset_deassert,
+};
+
+static int imx8mp_audiomix_reset_probe(struct auxiliary_device *adev,
+ const struct auxiliary_device_id *id)
+{
+ struct imx8mp_audiomix_reset *priv;
+ struct device *dev = &adev->dev;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ spin_lock_init(&priv->lock);
+
+ priv->rcdev.owner = THIS_MODULE;
+ priv->rcdev.nr_resets = fls(EARC_RESET_MASK);
+ priv->rcdev.ops = &imx8mp_audiomix_reset_ops;
+ priv->rcdev.of_node = dev->parent->of_node;
+ priv->rcdev.dev = dev;
+ priv->rcdev.of_reset_n_cells = 1;
+ priv->base = of_iomap(dev->parent->of_node, 0);
+ if (!priv->base)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, priv);
+
+ ret = devm_reset_controller_register(dev, &priv->rcdev);
+ if (ret)
+ goto out_unmap;
+
+ return 0;
+
+out_unmap:
+ iounmap(priv->base);
+ return ret;
+}
+
+static void imx8mp_audiomix_reset_remove(struct auxiliary_device *adev)
+{
+ struct imx8mp_audiomix_reset *priv = dev_get_drvdata(&adev->dev);
+
+ iounmap(priv->base);
+}
+
+static const struct auxiliary_device_id imx8mp_audiomix_reset_ids[] = {
+ {
+ .name = "clk_imx8mp_audiomix.reset",
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(auxiliary, imx8mp_audiomix_reset_ids);
+
+static struct auxiliary_driver imx8mp_audiomix_reset_driver = {
+ .probe = imx8mp_audiomix_reset_probe,
+ .remove = imx8mp_audiomix_reset_remove,
+ .id_table = imx8mp_audiomix_reset_ids,
+};
+
+module_auxiliary_driver(imx8mp_audiomix_reset_driver);
+
+MODULE_AUTHOR("Shengjiu Wang <shengjiu.wang@nxp.com>");
+MODULE_DESCRIPTION("Freescale i.MX8MP Audio Block Controller reset driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/reset/reset-meson-audio-arb.c b/drivers/reset/reset-meson-audio-arb.c
index 7891d52fa899..894ad9d37a66 100644
--- a/drivers/reset/reset-meson-audio-arb.c
+++ b/drivers/reset/reset-meson-audio-arb.c
@@ -129,8 +129,6 @@ static int meson_audio_arb_remove(struct platform_device *pdev)
writel(0, arb->regs);
spin_unlock(&arb->lock);
- clk_disable_unprepare(arb->clk);
-
return 0;
}
@@ -150,7 +148,7 @@ static int meson_audio_arb_probe(struct platform_device *pdev)
return -ENOMEM;
platform_set_drvdata(pdev, arb);
- arb->clk = devm_clk_get(dev, NULL);
+ arb->clk = devm_clk_get_enabled(dev, NULL);
if (IS_ERR(arb->clk))
return dev_err_probe(dev, PTR_ERR(arb->clk), "failed to get clock\n");
@@ -170,11 +168,6 @@ static int meson_audio_arb_probe(struct platform_device *pdev)
* In the initial state, all memory interfaces are disabled
* and the general bit is on
*/
- ret = clk_prepare_enable(arb->clk);
- if (ret) {
- dev_err(dev, "failed to enable arb clock\n");
- return ret;
- }
writel(BIT(ARB_GENERAL_BIT), arb->regs);
/* Register reset controller */
diff --git a/drivers/reset/reset-rzg2l-usbphy-ctrl.c b/drivers/reset/reset-rzg2l-usbphy-ctrl.c
index a8dde4606360..255c894a4782 100644
--- a/drivers/reset/reset-rzg2l-usbphy-ctrl.c
+++ b/drivers/reset/reset-rzg2l-usbphy-ctrl.c
@@ -10,10 +10,12 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
#include <linux/reset.h>
#include <linux/reset-controller.h>
#define RESET 0x000
+#define VBENCTL 0x03c
#define RESET_SEL_PLLRESET BIT(12)
#define RESET_PLLRESET BIT(8)
@@ -32,6 +34,7 @@ struct rzg2l_usbphy_ctrl_priv {
struct reset_controller_dev rcdev;
struct reset_control *rstc;
void __iomem *base;
+ struct platform_device *vdev;
spinlock_t lock;
};
@@ -100,10 +103,19 @@ static const struct reset_control_ops rzg2l_usbphy_ctrl_reset_ops = {
.status = rzg2l_usbphy_ctrl_status,
};
+static const struct regmap_config rzg2l_usb_regconf = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = 1,
+};
+
static int rzg2l_usbphy_ctrl_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct rzg2l_usbphy_ctrl_priv *priv;
+ struct platform_device *vdev;
+ struct regmap *regmap;
unsigned long flags;
int error;
u32 val;
@@ -116,6 +128,10 @@ static int rzg2l_usbphy_ctrl_probe(struct platform_device *pdev)
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
+ regmap = devm_regmap_init_mmio(dev, priv->base + VBENCTL, &rzg2l_usb_regconf);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
priv->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
if (IS_ERR(priv->rstc))
return dev_err_probe(dev, PTR_ERR(priv->rstc),
@@ -125,25 +141,14 @@ static int rzg2l_usbphy_ctrl_probe(struct platform_device *pdev)
if (error)
return error;
- priv->rcdev.ops = &rzg2l_usbphy_ctrl_reset_ops;
- priv->rcdev.of_reset_n_cells = 1;
- priv->rcdev.nr_resets = NUM_PORTS;
- priv->rcdev.of_node = dev->of_node;
- priv->rcdev.dev = dev;
-
- error = devm_reset_controller_register(dev, &priv->rcdev);
- if (error)
- return error;
-
spin_lock_init(&priv->lock);
dev_set_drvdata(dev, priv);
pm_runtime_enable(&pdev->dev);
error = pm_runtime_resume_and_get(&pdev->dev);
if (error < 0) {
- pm_runtime_disable(&pdev->dev);
- reset_control_assert(priv->rstc);
- return dev_err_probe(&pdev->dev, error, "pm_runtime_resume_and_get failed");
+ dev_err_probe(&pdev->dev, error, "pm_runtime_resume_and_get failed");
+ goto err_pm_disable_reset_deassert;
}
/* put pll and phy into reset state */
@@ -153,13 +158,45 @@ static int rzg2l_usbphy_ctrl_probe(struct platform_device *pdev)
writel(val, priv->base + RESET);
spin_unlock_irqrestore(&priv->lock, flags);
+ priv->rcdev.ops = &rzg2l_usbphy_ctrl_reset_ops;
+ priv->rcdev.of_reset_n_cells = 1;
+ priv->rcdev.nr_resets = NUM_PORTS;
+ priv->rcdev.of_node = dev->of_node;
+ priv->rcdev.dev = dev;
+
+ error = devm_reset_controller_register(dev, &priv->rcdev);
+ if (error)
+ goto err_pm_runtime_put;
+
+ vdev = platform_device_alloc("rzg2l-usb-vbus-regulator", pdev->id);
+ if (!vdev) {
+ error = -ENOMEM;
+ goto err_pm_runtime_put;
+ }
+ vdev->dev.parent = dev;
+ priv->vdev = vdev;
+
+ error = platform_device_add(vdev);
+ if (error)
+ goto err_device_put;
+
return 0;
+
+err_device_put:
+ platform_device_put(vdev);
+err_pm_runtime_put:
+ pm_runtime_put(&pdev->dev);
+err_pm_disable_reset_deassert:
+ pm_runtime_disable(&pdev->dev);
+ reset_control_assert(priv->rstc);
+ return error;
}
static int rzg2l_usbphy_ctrl_remove(struct platform_device *pdev)
{
struct rzg2l_usbphy_ctrl_priv *priv = dev_get_drvdata(&pdev->dev);
+ platform_device_unregister(priv->vdev);
pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
reset_control_assert(priv->rstc);
diff --git a/drivers/reset/sti/Kconfig b/drivers/reset/sti/Kconfig
index a2622e146b8b..0b599f7cf6ed 100644
--- a/drivers/reset/sti/Kconfig
+++ b/drivers/reset/sti/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
-if ARCH_STI
+if ARCH_STI || COMPILE_TEST
config STIH407_RESET
- bool
+ bool "STIH407 Reset Driver" if COMPILE_TEST
endif
diff --git a/drivers/reset/tegra/Kconfig b/drivers/reset/tegra/Kconfig
index e4a9a389e98c..4a2d26d1210a 100644
--- a/drivers/reset/tegra/Kconfig
+++ b/drivers/reset/tegra/Kconfig
@@ -1,3 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
config RESET_TEGRA_BPMP
- def_bool TEGRA_BPMP
+ bool "Tegra BPMP Reset Driver" if COMPILE_TEST
+ default TEGRA_BPMP
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 2f16f543079b..a76c6af9ea63 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -4906,7 +4906,7 @@ dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
ccw++;
if (dst) {
if (ccw->flags & CCW_FLAG_IDA)
- cda = *((char **)dma32_to_virt(ccw->cda));
+ cda = dma64_to_virt(*((dma64_t *)dma32_to_virt(ccw->cda)));
else
cda = dma32_to_virt(ccw->cda);
if (dst != cda) {
@@ -5525,7 +5525,7 @@ dasd_eckd_dump_ccw_range(struct dasd_device *device, struct ccw1 *from,
/* get pointer to data (consider IDALs) */
if (from->flags & CCW_FLAG_IDA)
- datap = (char *)*((addr_t *)dma32_to_virt(from->cda));
+ datap = dma64_to_virt(*((dma64_t *)dma32_to_virt(from->cda)));
else
datap = dma32_to_virt(from->cda);
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index 361e9bd75257..9f2023a077c2 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -585,7 +585,7 @@ dasd_fba_free_cp(struct dasd_ccw_req *cqr, struct request *req)
ccw++;
if (dst) {
if (ccw->flags & CCW_FLAG_IDA)
- cda = *((char **)dma32_to_virt(ccw->cda));
+ cda = dma64_to_virt(*((dma64_t *)dma32_to_virt(ccw->cda)));
else
cda = dma32_to_virt(ccw->cda);
if (dst != cda) {
diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c
index 4533dd055ca8..1aa426b1dedd 100644
--- a/drivers/s390/block/dasd_genhd.c
+++ b/drivers/s390/block/dasd_genhd.c
@@ -68,7 +68,6 @@ int dasd_gendisk_alloc(struct dasd_block *block)
blk_mq_free_tag_set(&block->tag_set);
return PTR_ERR(gdp);
}
- blk_queue_flag_set(QUEUE_FLAG_NONROT, gdp->queue);
/* Initialize gendisk structure. */
gdp->major = DASD_MAJOR;
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 6d1689a2717e..d5a5d11ae0dc 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -548,6 +548,7 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
{
struct queue_limits lim = {
.logical_block_size = 4096,
+ .features = BLK_FEAT_DAX,
};
int rc, i, j, num_of_segments;
struct dcssblk_dev_info *dev_info;
@@ -643,7 +644,6 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
dev_info->gd->fops = &dcssblk_devops;
dev_info->gd->private_data = dev_info;
dev_info->gd->flags |= GENHD_FL_NO_PART;
- blk_queue_flag_set(QUEUE_FLAG_DAX, dev_info->gd->queue);
seg_byte_size = (dev_info->end - dev_info->start + 1);
set_capacity(dev_info->gd, seg_byte_size >> 9); // size in sectors
diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c
index 1d456a5a3bfb..3fcfe029db1b 100644
--- a/drivers/s390/block/scm_blk.c
+++ b/drivers/s390/block/scm_blk.c
@@ -439,7 +439,6 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
.logical_block_size = 1 << 12,
};
unsigned int devindex;
- struct request_queue *rq;
int len, ret;
lim.max_segments = min(scmdev->nr_max_block,
@@ -474,10 +473,6 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
ret = PTR_ERR(bdev->gendisk);
goto out_tag;
}
- rq = bdev->rq = bdev->gendisk->queue;
- blk_queue_flag_set(QUEUE_FLAG_NONROT, rq);
- blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, rq);
-
bdev->gendisk->private_data = scmdev;
bdev->gendisk->fops = &scm_blk_devops;
bdev->gendisk->major = scm_major;
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
index d53ee34d398f..fbe29cabcbb8 100644
--- a/drivers/s390/char/sclp.c
+++ b/drivers/s390/char/sclp.c
@@ -1293,6 +1293,7 @@ sclp_init(void)
fail_unregister_reboot_notifier:
unregister_reboot_notifier(&sclp_reboot_notifier);
fail_init_state_uninitialized:
+ list_del(&sclp_state_change_event.list);
sclp_init_state = sclp_init_state_uninitialized;
free_page((unsigned long) sclp_read_sccb);
free_page((unsigned long) sclp_init_sccb);
diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c
index 6e5c508b1e07..5f6e10225627 100644
--- a/drivers/s390/cio/vfio_ccw_cp.c
+++ b/drivers/s390/cio/vfio_ccw_cp.c
@@ -490,13 +490,14 @@ static int ccwchain_fetch_tic(struct ccw1 *ccw,
struct channel_program *cp)
{
struct ccwchain *iter;
- u32 cda, ccw_head;
+ u32 offset, ccw_head;
list_for_each_entry(iter, &cp->ccwchain_list, next) {
ccw_head = iter->ch_iova;
if (is_cpa_within_range(ccw->cda, ccw_head, iter->ch_len)) {
- cda = (u64)iter->ch_ccw + dma32_to_u32(ccw->cda) - ccw_head;
- ccw->cda = u32_to_dma32(cda);
+ /* Calculate offset of TIC target */
+ offset = dma32_to_u32(ccw->cda) - ccw_head;
+ ccw->cda = virt_to_dma32((void *)iter->ch_ccw + offset);
return 0;
}
}
@@ -914,7 +915,7 @@ void cp_update_scsw(struct channel_program *cp, union scsw *scsw)
* in the ioctl directly. Path status changes etc.
*/
list_for_each_entry(chain, &cp->ccwchain_list, next) {
- ccw_head = (u32)(u64)chain->ch_ccw;
+ ccw_head = dma32_to_u32(virt_to_dma32(chain->ch_ccw));
/*
* On successful execution, cpa points just beyond the end
* of the chain.
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index 25d4e6376591..88db8378325a 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0+
/*
- * Linux for S/390 Lan Channel Station Network Driver
+ * Linux for S/390 LAN channel station device driver
*
* Copyright IBM Corp. 1999, 2009
* Author(s): Original Code written by
@@ -2380,5 +2380,6 @@ module_init(lcs_init_module);
module_exit(lcs_cleanup_module);
MODULE_AUTHOR("Frank Pavlic <fpavlic@de.ibm.com>");
+MODULE_DESCRIPTION("S/390 LAN channel station device driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/s390/net/qeth_ethtool.c b/drivers/s390/net/qeth_ethtool.c
index c1caf7734c3e..f184c58ecf24 100644
--- a/drivers/s390/net/qeth_ethtool.c
+++ b/drivers/s390/net/qeth_ethtool.c
@@ -247,7 +247,7 @@ static int qeth_set_channels(struct net_device *dev,
}
static int qeth_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct qeth_card *card = dev->ml_priv;
diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
index d7569f395559..d6491fc84e8c 100644
--- a/drivers/s390/virtio/virtio_ccw.c
+++ b/drivers/s390/virtio/virtio_ccw.c
@@ -698,6 +698,7 @@ static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs,
dma64_t *indicatorp = NULL;
int ret, i, queue_idx = 0;
struct ccw1 *ccw;
+ dma32_t indicatorp_dma = 0;
ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw), NULL);
if (!ccw)
@@ -725,7 +726,7 @@ static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs,
*/
indicatorp = ccw_device_dma_zalloc(vcdev->cdev,
sizeof(*indicatorp),
- &ccw->cda);
+ &indicatorp_dma);
if (!indicatorp)
goto out;
*indicatorp = indicators_dma(vcdev);
@@ -735,6 +736,7 @@ static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs,
/* no error, just fall back to legacy interrupts */
vcdev->is_thinint = false;
}
+ ccw->cda = indicatorp_dma;
if (!vcdev->is_thinint) {
/* Register queue indicators with host. */
*indicators(vcdev) = 0;
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 065db86d6021..37c24ffea65c 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -82,7 +82,6 @@ comment "SCSI support type (disk, tape, CD-ROM)"
config BLK_DEV_SD
tristate "SCSI disk support"
depends on SCSI
- select BLK_DEV_INTEGRITY_T10 if BLK_DEV_INTEGRITY
help
If you want to use SCSI hard disks, Fibre Channel disks,
Serial ATA (SATA) or Parallel ATA (PATA) hard disks,
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index a226dc1b65d7..4eb0837298d4 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -414,28 +414,40 @@ static char print_alua_state(unsigned char state)
}
}
-static enum scsi_disposition alua_check_sense(struct scsi_device *sdev,
- struct scsi_sense_hdr *sense_hdr)
+static void alua_handle_state_transition(struct scsi_device *sdev)
{
struct alua_dh_data *h = sdev->handler_data;
struct alua_port_group *pg;
+ rcu_read_lock();
+ pg = rcu_dereference(h->pg);
+ if (pg)
+ pg->state = SCSI_ACCESS_STATE_TRANSITIONING;
+ rcu_read_unlock();
+ alua_check(sdev, false);
+}
+
+static enum scsi_disposition alua_check_sense(struct scsi_device *sdev,
+ struct scsi_sense_hdr *sense_hdr)
+{
switch (sense_hdr->sense_key) {
case NOT_READY:
if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x0a) {
/*
* LUN Not Accessible - ALUA state transition
*/
- rcu_read_lock();
- pg = rcu_dereference(h->pg);
- if (pg)
- pg->state = SCSI_ACCESS_STATE_TRANSITIONING;
- rcu_read_unlock();
- alua_check(sdev, false);
+ alua_handle_state_transition(sdev);
return NEEDS_RETRY;
}
break;
case UNIT_ATTENTION:
+ if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x0a) {
+ /*
+ * LUN Not Accessible - ALUA state transition
+ */
+ alua_handle_state_transition(sdev);
+ return NEEDS_RETRY;
+ }
if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x00) {
/*
* Power On, Reset, or Bus Device Reset.
@@ -502,7 +514,8 @@ static int alua_tur(struct scsi_device *sdev)
retval = scsi_test_unit_ready(sdev, ALUA_FAILOVER_TIMEOUT * HZ,
ALUA_FAILOVER_RETRIES, &sense_hdr);
- if (sense_hdr.sense_key == NOT_READY &&
+ if ((sense_hdr.sense_key == NOT_READY ||
+ sense_hdr.sense_key == UNIT_ATTENTION) &&
sense_hdr.asc == 0x04 && sense_hdr.ascq == 0x0a)
return SCSI_DH_RETRY;
else if (retval)
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 60688f18fac6..c708e1059638 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -1057,15 +1057,15 @@ static umode_t iscsi_sw_tcp_attr_is_visible(int param_type, int param)
return 0;
}
-static int iscsi_sw_tcp_slave_configure(struct scsi_device *sdev)
+static int iscsi_sw_tcp_device_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct iscsi_sw_tcp_host *tcp_sw_host = iscsi_host_priv(sdev->host);
struct iscsi_session *session = tcp_sw_host->session;
struct iscsi_conn *conn = session->leadconn;
if (conn->datadgst_en)
- blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES,
- sdev->request_queue);
+ lim->features |= BLK_FEAT_STABLE_WRITES;
return 0;
}
@@ -1083,7 +1083,7 @@ static const struct scsi_host_template iscsi_sw_tcp_sht = {
.eh_device_reset_handler= iscsi_eh_device_reset,
.eh_target_reset_handler = iscsi_eh_recover_target,
.dma_boundary = PAGE_SIZE - 1,
- .slave_configure = iscsi_sw_tcp_slave_configure,
+ .device_configure = iscsi_sw_tcp_device_configure,
.proc_name = "iscsi_tcp",
.this_id = -1,
.track_queue_depth = 1,
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 4c69fc63c119..88714b7b0dba 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -572,15 +572,6 @@ static struct ata_port_operations sas_sata_ops = {
.end_eh = sas_ata_end_eh,
};
-static struct ata_port_info sata_port_info = {
- .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA | ATA_FLAG_NCQ |
- ATA_FLAG_SAS_HOST | ATA_FLAG_FPDMA_AUX,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA2,
- .udma_mask = ATA_UDMA6,
- .port_ops = &sas_sata_ops
-};
-
int sas_ata_init(struct domain_device *found_dev)
{
struct sas_ha_struct *ha = found_dev->port->ha;
@@ -597,28 +588,35 @@ int sas_ata_init(struct domain_device *found_dev)
ata_host_init(ata_host, ha->dev, &sas_sata_ops);
- ap = ata_sas_port_alloc(ata_host, &sata_port_info, shost);
+ ap = ata_port_alloc(ata_host);
if (!ap) {
- pr_err("ata_sas_port_alloc failed.\n");
+ pr_err("ata_port_alloc failed.\n");
rc = -ENODEV;
goto free_host;
}
+ ap->port_no = 0;
+ ap->pio_mask = ATA_PIO4;
+ ap->mwdma_mask = ATA_MWDMA2;
+ ap->udma_mask = ATA_UDMA6;
+ ap->flags |= ATA_FLAG_SATA | ATA_FLAG_PIO_DMA | ATA_FLAG_NCQ |
+ ATA_FLAG_SAS_HOST | ATA_FLAG_FPDMA_AUX;
+ ap->ops = &sas_sata_ops;
ap->private_data = found_dev;
ap->cbl = ATA_CBL_SATA;
ap->scsi_host = shost;
- rc = ata_sas_tport_add(ata_host->dev, ap);
+ rc = ata_tport_add(ata_host->dev, ap);
if (rc)
- goto destroy_port;
+ goto free_port;
found_dev->sata_dev.ata_host = ata_host;
found_dev->sata_dev.ap = ap;
return 0;
-destroy_port:
- kfree(ap);
+free_port:
+ ata_port_free(ap);
free_host:
ata_host_put(ata_host);
return rc;
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
index 8fb7c41c0962..951bdc554a10 100644
--- a/drivers/scsi/libsas/sas_discover.c
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -300,8 +300,8 @@ void sas_free_device(struct kref *kref)
kfree(dev->ex_dev.ex_phy);
if (dev_is_sata(dev) && dev->sata_dev.ap) {
- ata_sas_tport_delete(dev->sata_dev.ap);
- kfree(dev->sata_dev.ap);
+ ata_tport_delete(dev->sata_dev.ap);
+ ata_port_free(dev->sata_dev.ap);
ata_host_put(dev->sata_dev.ata_host);
dev->sata_dev.ata_host = NULL;
dev->sata_dev.ap = NULL;
diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h
index 85948963fb97..03d6ec1eb970 100644
--- a/drivers/scsi/libsas/sas_internal.h
+++ b/drivers/scsi/libsas/sas_internal.h
@@ -145,6 +145,20 @@ static inline void sas_fail_probe(struct domain_device *dev, const char *func, i
func, dev->parent ? "exp-attached" :
"direct-attached",
SAS_ADDR(dev->sas_addr), err);
+
+ /*
+ * If the device probe failed, the expander phy attached address
+ * needs to be reset so that the phy will not be treated as flutter
+ * in the next revalidation
+ */
+ if (dev->parent && !dev_is_expander(dev->dev_type)) {
+ struct sas_phy *phy = dev->phy;
+ struct domain_device *parent = dev->parent;
+ struct ex_phy *ex_phy = &parent->ex_dev.ex_phy[phy->number];
+
+ memset(ex_phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
+ }
+
sas_unregister_dev(dev->port, dev);
}
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index 5297cacc8beb..0cef5d089f34 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -1363,6 +1363,16 @@ lpfc_nvmet_ls_abort(struct nvmet_fc_target_port *targetport,
atomic_inc(&lpfc_nvmet->xmt_ls_abort);
}
+static int
+lpfc_nvmet_host_traddr(void *hosthandle, u64 *wwnn, u64 *wwpn)
+{
+ struct lpfc_nodelist *ndlp = hosthandle;
+
+ *wwnn = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
+ *wwpn = wwn_to_u64(ndlp->nlp_portname.u.wwn);
+ return 0;
+}
+
static void
lpfc_nvmet_host_release(void *hosthandle)
{
@@ -1413,6 +1423,7 @@ static struct nvmet_fc_target_template lpfc_tgttemplate = {
.ls_req = lpfc_nvmet_ls_req,
.ls_abort = lpfc_nvmet_ls_abort,
.host_release = lpfc_nvmet_host_release,
+ .host_traddr = lpfc_nvmet_host_traddr,
.max_hw_queues = 1,
.max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 88acefbf9aea..6c79c350a4d5 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -1981,8 +1981,6 @@ megasas_set_nvme_device_properties(struct scsi_device *sdev,
lim->max_hw_sectors = max_io_size / 512;
lim->virt_boundary_mask = mr_nvme_pg_size - 1;
-
- blk_queue_flag_set(QUEUE_FLAG_NOMERGES, sdev->request_queue);
}
/*
diff --git a/drivers/scsi/mpi3mr/mpi3mr_app.c b/drivers/scsi/mpi3mr/mpi3mr_app.c
index 1638109a68a0..cd261b48eb46 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_app.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_app.c
@@ -2163,10 +2163,72 @@ persistent_id_show(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR_RO(persistent_id);
+/**
+ * sas_ncq_prio_supported_show - Indicate if device supports NCQ priority
+ * @dev: pointer to embedded device
+ * @attr: sas_ncq_prio_supported attribute descriptor
+ * @buf: the buffer returned
+ *
+ * A sysfs 'read-only' sdev attribute, only works with SATA devices
+ */
+static ssize_t
+sas_ncq_prio_supported_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+
+ return sysfs_emit(buf, "%d\n", sas_ata_ncq_prio_supported(sdev));
+}
+static DEVICE_ATTR_RO(sas_ncq_prio_supported);
+
+/**
+ * sas_ncq_prio_enable_show - send prioritized io commands to device
+ * @dev: pointer to embedded device
+ * @attr: sas_ncq_prio_enable attribute descriptor
+ * @buf: the buffer returned
+ *
+ * A sysfs 'read/write' sdev attribute, only works with SATA devices
+ */
+static ssize_t
+sas_ncq_prio_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct mpi3mr_sdev_priv_data *sdev_priv_data = sdev->hostdata;
+
+ if (!sdev_priv_data)
+ return 0;
+
+ return sysfs_emit(buf, "%d\n", sdev_priv_data->ncq_prio_enable);
+}
+
+static ssize_t
+sas_ncq_prio_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct mpi3mr_sdev_priv_data *sdev_priv_data = sdev->hostdata;
+ bool ncq_prio_enable = 0;
+
+ if (kstrtobool(buf, &ncq_prio_enable))
+ return -EINVAL;
+
+ if (!sas_ata_ncq_prio_supported(sdev))
+ return -EINVAL;
+
+ sdev_priv_data->ncq_prio_enable = ncq_prio_enable;
+
+ return strlen(buf);
+}
+static DEVICE_ATTR_RW(sas_ncq_prio_enable);
+
static struct attribute *mpi3mr_dev_attrs[] = {
&dev_attr_sas_address.attr,
&dev_attr_device_handle.attr,
&dev_attr_persistent_id.attr,
+ &dev_attr_sas_ncq_prio_supported.attr,
+ &dev_attr_sas_ncq_prio_enable.attr,
NULL,
};
diff --git a/drivers/scsi/mpi3mr/mpi3mr_transport.c b/drivers/scsi/mpi3mr/mpi3mr_transport.c
index 329cc6ec3b58..82aa4e418c5a 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_transport.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_transport.c
@@ -1364,7 +1364,7 @@ static struct mpi3mr_sas_port *mpi3mr_sas_port_add(struct mpi3mr_ioc *mrioc,
continue;
if (i > sizeof(mr_sas_port->phy_mask) * 8) {
- ioc_warn(mrioc, "skipping port %u, max allowed value is %lu\n",
+ ioc_warn(mrioc, "skipping port %u, max allowed value is %zu\n",
i, sizeof(mr_sas_port->phy_mask) * 8);
goto out_fail;
}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 258647fc6bdd..b2bcf4a27ddc 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -4774,7 +4774,7 @@ _base_display_ioc_capabilities(struct MPT3SAS_ADAPTER *ioc)
char desc[17] = {0};
u32 iounit_pg1_flags;
- strscpy(desc, ioc->manu_pg0.ChipName, sizeof(desc));
+ memtostr(desc, ioc->manu_pg0.ChipName);
ioc_info(ioc, "%s: FWVersion(%02d.%02d.%02d.%02d), ChipRevision(0x%02x)\n",
desc,
(ioc->facts.FWVersion.Word & 0xFF000000) >> 24,
@@ -8512,6 +8512,12 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
ioc->pd_handles_sz = (ioc->facts.MaxDevHandle / 8);
if (ioc->facts.MaxDevHandle % 8)
ioc->pd_handles_sz++;
+ /*
+ * pd_handles_sz should have, at least, the minimal room for
+ * set_bit()/test_bit(), otherwise out-of-memory touch may occur.
+ */
+ ioc->pd_handles_sz = ALIGN(ioc->pd_handles_sz, sizeof(unsigned long));
+
ioc->pd_handles = kzalloc(ioc->pd_handles_sz,
GFP_KERNEL);
if (!ioc->pd_handles) {
@@ -8529,6 +8535,13 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
ioc->pend_os_device_add_sz = (ioc->facts.MaxDevHandle / 8);
if (ioc->facts.MaxDevHandle % 8)
ioc->pend_os_device_add_sz++;
+
+ /*
+ * pend_os_device_add_sz should have, at least, the minimal room for
+ * set_bit()/test_bit(), otherwise out-of-memory may occur.
+ */
+ ioc->pend_os_device_add_sz = ALIGN(ioc->pend_os_device_add_sz,
+ sizeof(unsigned long));
ioc->pend_os_device_add = kzalloc(ioc->pend_os_device_add_sz,
GFP_KERNEL);
if (!ioc->pend_os_device_add) {
@@ -8820,6 +8833,12 @@ _base_check_ioc_facts_changes(struct MPT3SAS_ADAPTER *ioc)
if (ioc->facts.MaxDevHandle % 8)
pd_handles_sz++;
+ /*
+ * pd_handles should have, at least, the minimal room for
+ * set_bit()/test_bit(), otherwise out-of-memory touch may
+ * occur.
+ */
+ pd_handles_sz = ALIGN(pd_handles_sz, sizeof(unsigned long));
pd_handles = krealloc(ioc->pd_handles, pd_handles_sz,
GFP_KERNEL);
if (!pd_handles) {
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index bf100a4ebfc3..fe1e96fda284 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -2048,9 +2048,6 @@ void
mpt3sas_setup_direct_io(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
struct _raid_device *raid_device, Mpi25SCSIIORequest_t *mpi_request);
-/* NCQ Prio Handling Check */
-bool scsih_ncq_prio_supp(struct scsi_device *sdev);
-
void mpt3sas_setup_debugfs(struct MPT3SAS_ADAPTER *ioc);
void mpt3sas_destroy_debugfs(struct MPT3SAS_ADAPTER *ioc);
void mpt3sas_init_debugfs(void);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index 1c9fd26195b8..87784c96249a 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -4088,7 +4088,7 @@ sas_ncq_prio_supported_show(struct device *dev,
{
struct scsi_device *sdev = to_scsi_device(dev);
- return sysfs_emit(buf, "%d\n", scsih_ncq_prio_supp(sdev));
+ return sysfs_emit(buf, "%d\n", sas_ata_ncq_prio_supported(sdev));
}
static DEVICE_ATTR_RO(sas_ncq_prio_supported);
@@ -4123,7 +4123,7 @@ sas_ncq_prio_enable_store(struct device *dev,
if (kstrtobool(buf, &ncq_prio_enable))
return -EINVAL;
- if (!scsih_ncq_prio_supp(sdev))
+ if (!sas_ata_ncq_prio_supported(sdev))
return -EINVAL;
sas_device_priv_data->ncq_prio_enable = ncq_prio_enable;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 89ef43a5ef86..97c2472cd434 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -302,8 +302,8 @@ struct _scsi_io_transfer {
/**
* _scsih_set_debug_level - global setting of ioc->logging_level.
- * @val: ?
- * @kp: ?
+ * @val: value of the parameter to be set
+ * @kp: pointer to kernel_param structure
*
* Note: The logging levels are defined in mpt3sas_debug.h.
*/
@@ -2680,12 +2680,6 @@ scsih_device_configure(struct scsi_device *sdev, struct queue_limits *lim)
pcie_device_put(pcie_device);
spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
mpt3sas_scsih_change_queue_depth(sdev, qdepth);
- /* Enable QUEUE_FLAG_NOMERGES flag, so that IOs won't be
- ** merged and can eliminate holes created during merging
- ** operation.
- **/
- blk_queue_flag_set(QUEUE_FLAG_NOMERGES,
- sdev->request_queue);
lim->virt_boundary_mask = ioc->page_size - 1;
return 0;
}
@@ -12571,29 +12565,6 @@ scsih_pci_mmio_enabled(struct pci_dev *pdev)
return PCI_ERS_RESULT_RECOVERED;
}
-/**
- * scsih_ncq_prio_supp - Check for NCQ command priority support
- * @sdev: scsi device struct
- *
- * This is called when a user indicates they would like to enable
- * ncq command priorities. This works only on SATA devices.
- */
-bool scsih_ncq_prio_supp(struct scsi_device *sdev)
-{
- struct scsi_vpd *vpd;
- bool ncq_prio_supp = false;
-
- rcu_read_lock();
- vpd = rcu_dereference(sdev->vpd_pg89);
- if (!vpd || vpd->len < 214)
- goto out;
-
- ncq_prio_supp = (vpd->data[213] >> 4) & 1;
-out:
- rcu_read_unlock();
-
- return ncq_prio_supp;
-}
/*
* The pci device ids are defined in mpi/mpi2_cnfg.h.
*/
diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c
index 76f9a9177198..d84413b77d84 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_transport.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c
@@ -458,17 +458,13 @@ _transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc,
goto out;
manufacture_reply = data_out + sizeof(struct rep_manu_request);
- strscpy(edev->vendor_id, manufacture_reply->vendor_id,
- sizeof(edev->vendor_id));
- strscpy(edev->product_id, manufacture_reply->product_id,
- sizeof(edev->product_id));
- strscpy(edev->product_rev, manufacture_reply->product_rev,
- sizeof(edev->product_rev));
+ memtostr(edev->vendor_id, manufacture_reply->vendor_id);
+ memtostr(edev->product_id, manufacture_reply->product_id);
+ memtostr(edev->product_rev, manufacture_reply->product_rev);
edev->level = manufacture_reply->sas_format & 1;
if (edev->level) {
- strscpy(edev->component_vendor_id,
- manufacture_reply->component_vendor_id,
- sizeof(edev->component_vendor_id));
+ memtostr(edev->component_vendor_id,
+ manufacture_reply->component_vendor_id);
tmp = (u8 *)&manufacture_reply->component_id;
edev->component_id = tmp[0] << 8 | tmp[1];
edev->component_revision_id =
diff --git a/drivers/scsi/qedf/qedf.h b/drivers/scsi/qedf/qedf.h
index 5058e01b65a2..98afdfe63600 100644
--- a/drivers/scsi/qedf/qedf.h
+++ b/drivers/scsi/qedf/qedf.h
@@ -363,6 +363,7 @@ struct qedf_ctx {
#define QEDF_IN_RECOVERY 5
#define QEDF_DBG_STOP_IO 6
#define QEDF_PROBING 8
+#define QEDF_STAG_IN_PROGRESS 9
unsigned long flags; /* Miscellaneous state flags */
int fipvlan_retries;
u8 num_queues;
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index fd12439cbaab..49adddf978cc 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -318,11 +318,18 @@ static struct fc_seq *qedf_elsct_send(struct fc_lport *lport, u32 did,
*/
if (resp == fc_lport_flogi_resp) {
qedf->flogi_cnt++;
+ qedf->flogi_pending++;
+
+ if (test_bit(QEDF_UNLOADING, &qedf->flags)) {
+ QEDF_ERR(&qedf->dbg_ctx, "Driver unloading\n");
+ qedf->flogi_pending = 0;
+ }
+
if (qedf->flogi_pending >= QEDF_FLOGI_RETRY_CNT) {
schedule_delayed_work(&qedf->stag_work, 2);
return NULL;
}
- qedf->flogi_pending++;
+
return fc_elsct_send(lport, did, fp, op, qedf_flogi_resp,
arg, timeout);
}
@@ -912,13 +919,14 @@ void qedf_ctx_soft_reset(struct fc_lport *lport)
struct qedf_ctx *qedf;
struct qed_link_output if_link;
+ qedf = lport_priv(lport);
+
if (lport->vport) {
+ clear_bit(QEDF_STAG_IN_PROGRESS, &qedf->flags);
printk_ratelimited("Cannot issue host reset on NPIV port.\n");
return;
}
- qedf = lport_priv(lport);
-
qedf->flogi_pending = 0;
/* For host reset, essentially do a soft link up/down */
atomic_set(&qedf->link_state, QEDF_LINK_DOWN);
@@ -938,6 +946,7 @@ void qedf_ctx_soft_reset(struct fc_lport *lport)
if (!if_link.link_up) {
QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
"Physical link is not up.\n");
+ clear_bit(QEDF_STAG_IN_PROGRESS, &qedf->flags);
return;
}
/* Flush and wait to make sure link down is processed */
@@ -950,6 +959,7 @@ void qedf_ctx_soft_reset(struct fc_lport *lport)
"Queue link up work.\n");
queue_delayed_work(qedf->link_update_wq, &qedf->link_update,
0);
+ clear_bit(QEDF_STAG_IN_PROGRESS, &qedf->flags);
}
/* Reset the host by gracefully logging out and then logging back in */
@@ -3463,6 +3473,7 @@ retry_probe:
}
/* Start the Slowpath-process */
+ memset(&slowpath_params, 0, sizeof(struct qed_slowpath_params));
slowpath_params.int_mode = QED_INT_MODE_MSIX;
slowpath_params.drv_major = QEDF_DRIVER_MAJOR_VER;
slowpath_params.drv_minor = QEDF_DRIVER_MINOR_VER;
@@ -3721,6 +3732,7 @@ static void __qedf_remove(struct pci_dev *pdev, int mode)
{
struct qedf_ctx *qedf;
int rc;
+ int cnt = 0;
if (!pdev) {
QEDF_ERR(NULL, "pdev is NULL.\n");
@@ -3738,6 +3750,17 @@ static void __qedf_remove(struct pci_dev *pdev, int mode)
return;
}
+stag_in_prog:
+ if (test_bit(QEDF_STAG_IN_PROGRESS, &qedf->flags)) {
+ QEDF_ERR(&qedf->dbg_ctx, "Stag in progress, cnt=%d.\n", cnt);
+ cnt++;
+
+ if (cnt < 5) {
+ msleep(500);
+ goto stag_in_prog;
+ }
+ }
+
if (mode != QEDF_MODE_RECOVERY)
set_bit(QEDF_UNLOADING, &qedf->flags);
@@ -3997,6 +4020,24 @@ void qedf_stag_change_work(struct work_struct *work)
struct qedf_ctx *qedf =
container_of(work, struct qedf_ctx, stag_work.work);
+ if (!qedf) {
+ QEDF_ERR(&qedf->dbg_ctx, "qedf is NULL");
+ return;
+ }
+
+ if (test_bit(QEDF_IN_RECOVERY, &qedf->flags)) {
+ QEDF_ERR(&qedf->dbg_ctx,
+ "Already is in recovery, hence not calling software context reset.\n");
+ return;
+ }
+
+ if (test_bit(QEDF_UNLOADING, &qedf->flags)) {
+ QEDF_ERR(&qedf->dbg_ctx, "Driver unloading\n");
+ return;
+ }
+
+ set_bit(QEDF_STAG_IN_PROGRESS, &qedf->flags);
+
printk_ratelimited("[%s]:[%s:%d]:%d: Performing software context reset.",
dev_name(&qedf->pdev->dev), __func__, __LINE__,
qedf->dbg_ctx.host_no);
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 3e0c0381277a..ee69bd35889d 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -350,6 +350,13 @@ static int scsi_get_vpd_size(struct scsi_device *sdev, u8 page)
if (result < SCSI_VPD_HEADER_SIZE)
return 0;
+ if (result > sizeof(vpd)) {
+ dev_warn_once(&sdev->sdev_gendev,
+ "%s: long VPD page 0 length: %d bytes\n",
+ __func__, result);
+ result = sizeof(vpd);
+ }
+
result -= SCSI_VPD_HEADER_SIZE;
if (!memchr(&vpd[SCSI_VPD_HEADER_SIZE], page, result))
return 0;
@@ -666,6 +673,13 @@ void scsi_cdl_check(struct scsi_device *sdev)
sdev->use_10_for_rw = 0;
sdev->cdl_supported = 1;
+
+ /*
+ * If the device supports CDL, make sure that the current drive
+ * feature status is consistent with the user controlled
+ * cdl_enable state.
+ */
+ scsi_cdl_enable(sdev, sdev->cdl_enable);
} else {
sdev->cdl_supported = 0;
}
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index acf0592d63da..a9d8a9c62663 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -69,6 +69,8 @@ static const char *sdebug_version_date = "20210520";
/* Additional Sense Code (ASC) */
#define NO_ADDITIONAL_SENSE 0x0
+#define OVERLAP_ATOMIC_COMMAND_ASC 0x0
+#define OVERLAP_ATOMIC_COMMAND_ASCQ 0x23
#define LOGICAL_UNIT_NOT_READY 0x4
#define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
#define UNRECOVERED_READ_ERR 0x11
@@ -103,6 +105,7 @@ static const char *sdebug_version_date = "20210520";
#define READ_BOUNDARY_ASCQ 0x7
#define ATTEMPT_ACCESS_GAP 0x9
#define INSUFF_ZONE_ASCQ 0xe
+/* see drivers/scsi/sense_codes.h */
/* Additional Sense Code Qualifier (ASCQ) */
#define ACK_NAK_TO 0x3
@@ -152,6 +155,12 @@ static const char *sdebug_version_date = "20210520";
#define DEF_VIRTUAL_GB 0
#define DEF_VPD_USE_HOSTNO 1
#define DEF_WRITESAME_LENGTH 0xFFFF
+#define DEF_ATOMIC_WR 0
+#define DEF_ATOMIC_WR_MAX_LENGTH 8192
+#define DEF_ATOMIC_WR_ALIGN 2
+#define DEF_ATOMIC_WR_GRAN 2
+#define DEF_ATOMIC_WR_MAX_LENGTH_BNDRY (DEF_ATOMIC_WR_MAX_LENGTH)
+#define DEF_ATOMIC_WR_MAX_BNDRY 128
#define DEF_STRICT 0
#define DEF_STATISTICS false
#define DEF_SUBMIT_QUEUES 1
@@ -374,7 +383,9 @@ struct sdebug_host_info {
/* There is an xarray of pointers to this struct's objects, one per host */
struct sdeb_store_info {
- rwlock_t macc_lck; /* for atomic media access on this store */
+ rwlock_t macc_data_lck; /* for media data access on this store */
+ rwlock_t macc_meta_lck; /* for atomic media meta access on this store */
+ rwlock_t macc_sector_lck; /* per-sector media data access on this store */
u8 *storep; /* user data storage (ram) */
struct t10_pi_tuple *dif_storep; /* protection info */
void *map_storep; /* provisioning map */
@@ -398,12 +409,20 @@ struct sdebug_defer {
enum sdeb_defer_type defer_t;
};
+struct sdebug_device_access_info {
+ bool atomic_write;
+ u64 lba;
+ u32 num;
+ struct scsi_cmnd *self;
+};
+
struct sdebug_queued_cmd {
/* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
* instance indicates this slot is in use.
*/
struct sdebug_defer sd_dp;
struct scsi_cmnd *scmd;
+ struct sdebug_device_access_info *i;
};
struct sdebug_scsi_cmd {
@@ -463,7 +482,8 @@ enum sdeb_opcode_index {
SDEB_I_PRE_FETCH = 29, /* 10, 16 */
SDEB_I_ZONE_OUT = 30, /* 0x94+SA; includes no data xfer */
SDEB_I_ZONE_IN = 31, /* 0x95+SA; all have data-in */
- SDEB_I_LAST_ELEM_P1 = 32, /* keep this last (previous + 1) */
+ SDEB_I_ATOMIC_WRITE_16 = 32,
+ SDEB_I_LAST_ELEM_P1 = 33, /* keep this last (previous + 1) */
};
@@ -497,7 +517,8 @@ static const unsigned char opcode_ind_arr[256] = {
0, 0, 0, SDEB_I_VERIFY,
SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME,
SDEB_I_ZONE_OUT, SDEB_I_ZONE_IN, 0, 0,
- 0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
+ 0, 0, 0, 0,
+ SDEB_I_ATOMIC_WRITE_16, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
/* 0xa0; 0xa0->0xbf: 12 byte cdbs */
SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
SDEB_I_MAINT_OUT, 0, 0, 0,
@@ -547,6 +568,7 @@ static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_pre_fetch(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_report_zones(struct scsi_cmnd *, struct sdebug_dev_info *);
+static int resp_atomic_write(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_open_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_close_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_finish_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
@@ -788,6 +810,11 @@ static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
resp_report_zones, zone_in_iarr, /* ZONE_IN(16), REPORT ZONES) */
{16, 0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} },
+/* 31 */
+ {0, 0x0, 0x0, F_D_OUT | FF_MEDIA_IO,
+ resp_atomic_write, NULL, /* ATOMIC WRITE 16 */
+ {16, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff} },
/* sentinel */
{0xff, 0, 0, 0, NULL, NULL, /* terminating element */
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
@@ -835,6 +862,13 @@ static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
+static unsigned int sdebug_atomic_wr = DEF_ATOMIC_WR;
+static unsigned int sdebug_atomic_wr_max_length = DEF_ATOMIC_WR_MAX_LENGTH;
+static unsigned int sdebug_atomic_wr_align = DEF_ATOMIC_WR_ALIGN;
+static unsigned int sdebug_atomic_wr_gran = DEF_ATOMIC_WR_GRAN;
+static unsigned int sdebug_atomic_wr_max_length_bndry =
+ DEF_ATOMIC_WR_MAX_LENGTH_BNDRY;
+static unsigned int sdebug_atomic_wr_max_bndry = DEF_ATOMIC_WR_MAX_BNDRY;
static int sdebug_uuid_ctl = DEF_UUID_CTL;
static bool sdebug_random = DEF_RANDOM;
static bool sdebug_per_host_store = DEF_PER_HOST_STORE;
@@ -926,6 +960,7 @@ static const int device_qfull_result =
static const int condition_met_result = SAM_STAT_CONDITION_MET;
static struct dentry *sdebug_debugfs_root;
+static ASYNC_DOMAIN_EXCLUSIVE(sdebug_async_domain);
static void sdebug_err_free(struct rcu_head *head)
{
@@ -1148,6 +1183,8 @@ static int sdebug_target_alloc(struct scsi_target *starget)
if (!targetip)
return -ENOMEM;
+ async_synchronize_full_domain(&sdebug_async_domain);
+
targetip->debugfs_entry = debugfs_create_dir(dev_name(&starget->dev),
sdebug_debugfs_root);
@@ -1174,7 +1211,8 @@ static void sdebug_target_destroy(struct scsi_target *starget)
targetip = (struct sdebug_target_info *)starget->hostdata;
if (targetip) {
starget->hostdata = NULL;
- async_schedule(sdebug_tartget_cleanup_async, targetip);
+ async_schedule_domain(sdebug_tartget_cleanup_async, targetip,
+ &sdebug_async_domain);
}
}
@@ -1188,6 +1226,11 @@ static inline bool scsi_debug_lbp(void)
(sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
}
+static inline bool scsi_debug_atomic_write(void)
+{
+ return sdebug_fake_rw == 0 && sdebug_atomic_wr;
+}
+
static void *lba2fake_store(struct sdeb_store_info *sip,
unsigned long long lba)
{
@@ -1815,6 +1858,14 @@ static int inquiry_vpd_b0(unsigned char *arr)
/* Maximum WRITE SAME Length */
put_unaligned_be64(sdebug_write_same_length, &arr[32]);
+ if (sdebug_atomic_wr) {
+ put_unaligned_be32(sdebug_atomic_wr_max_length, &arr[40]);
+ put_unaligned_be32(sdebug_atomic_wr_align, &arr[44]);
+ put_unaligned_be32(sdebug_atomic_wr_gran, &arr[48]);
+ put_unaligned_be32(sdebug_atomic_wr_max_length_bndry, &arr[52]);
+ put_unaligned_be32(sdebug_atomic_wr_max_bndry, &arr[56]);
+ }
+
return 0x3c; /* Mandatory page length for Logical Block Provisioning */
}
@@ -3377,16 +3428,238 @@ static inline struct sdeb_store_info *devip2sip(struct sdebug_dev_info *devip,
return xa_load(per_store_ap, devip->sdbg_host->si_idx);
}
+static inline void
+sdeb_read_lock(rwlock_t *lock)
+{
+ if (sdebug_no_rwlock)
+ __acquire(lock);
+ else
+ read_lock(lock);
+}
+
+static inline void
+sdeb_read_unlock(rwlock_t *lock)
+{
+ if (sdebug_no_rwlock)
+ __release(lock);
+ else
+ read_unlock(lock);
+}
+
+static inline void
+sdeb_write_lock(rwlock_t *lock)
+{
+ if (sdebug_no_rwlock)
+ __acquire(lock);
+ else
+ write_lock(lock);
+}
+
+static inline void
+sdeb_write_unlock(rwlock_t *lock)
+{
+ if (sdebug_no_rwlock)
+ __release(lock);
+ else
+ write_unlock(lock);
+}
+
+static inline void
+sdeb_data_read_lock(struct sdeb_store_info *sip)
+{
+ BUG_ON(!sip);
+
+ sdeb_read_lock(&sip->macc_data_lck);
+}
+
+static inline void
+sdeb_data_read_unlock(struct sdeb_store_info *sip)
+{
+ BUG_ON(!sip);
+
+ sdeb_read_unlock(&sip->macc_data_lck);
+}
+
+static inline void
+sdeb_data_write_lock(struct sdeb_store_info *sip)
+{
+ BUG_ON(!sip);
+
+ sdeb_write_lock(&sip->macc_data_lck);
+}
+
+static inline void
+sdeb_data_write_unlock(struct sdeb_store_info *sip)
+{
+ BUG_ON(!sip);
+
+ sdeb_write_unlock(&sip->macc_data_lck);
+}
+
+static inline void
+sdeb_data_sector_read_lock(struct sdeb_store_info *sip)
+{
+ BUG_ON(!sip);
+
+ sdeb_read_lock(&sip->macc_sector_lck);
+}
+
+static inline void
+sdeb_data_sector_read_unlock(struct sdeb_store_info *sip)
+{
+ BUG_ON(!sip);
+
+ sdeb_read_unlock(&sip->macc_sector_lck);
+}
+
+static inline void
+sdeb_data_sector_write_lock(struct sdeb_store_info *sip)
+{
+ BUG_ON(!sip);
+
+ sdeb_write_lock(&sip->macc_sector_lck);
+}
+
+static inline void
+sdeb_data_sector_write_unlock(struct sdeb_store_info *sip)
+{
+ BUG_ON(!sip);
+
+ sdeb_write_unlock(&sip->macc_sector_lck);
+}
+
+/*
+ * Atomic locking:
+ * We simplify the atomic model to allow only 1x atomic write and many non-
+ * atomic reads or writes for all LBAs.
+
+ * A RW lock has a similar bahaviour:
+ * Only 1x writer and many readers.
+
+ * So use a RW lock for per-device read and write locking:
+ * An atomic access grabs the lock as a writer and non-atomic grabs the lock
+ * as a reader.
+ */
+
+static inline void
+sdeb_data_lock(struct sdeb_store_info *sip, bool atomic)
+{
+ if (atomic)
+ sdeb_data_write_lock(sip);
+ else
+ sdeb_data_read_lock(sip);
+}
+
+static inline void
+sdeb_data_unlock(struct sdeb_store_info *sip, bool atomic)
+{
+ if (atomic)
+ sdeb_data_write_unlock(sip);
+ else
+ sdeb_data_read_unlock(sip);
+}
+
+/* Allow many reads but only 1x write per sector */
+static inline void
+sdeb_data_sector_lock(struct sdeb_store_info *sip, bool do_write)
+{
+ if (do_write)
+ sdeb_data_sector_write_lock(sip);
+ else
+ sdeb_data_sector_read_lock(sip);
+}
+
+static inline void
+sdeb_data_sector_unlock(struct sdeb_store_info *sip, bool do_write)
+{
+ if (do_write)
+ sdeb_data_sector_write_unlock(sip);
+ else
+ sdeb_data_sector_read_unlock(sip);
+}
+
+static inline void
+sdeb_meta_read_lock(struct sdeb_store_info *sip)
+{
+ if (sdebug_no_rwlock) {
+ if (sip)
+ __acquire(&sip->macc_meta_lck);
+ else
+ __acquire(&sdeb_fake_rw_lck);
+ } else {
+ if (sip)
+ read_lock(&sip->macc_meta_lck);
+ else
+ read_lock(&sdeb_fake_rw_lck);
+ }
+}
+
+static inline void
+sdeb_meta_read_unlock(struct sdeb_store_info *sip)
+{
+ if (sdebug_no_rwlock) {
+ if (sip)
+ __release(&sip->macc_meta_lck);
+ else
+ __release(&sdeb_fake_rw_lck);
+ } else {
+ if (sip)
+ read_unlock(&sip->macc_meta_lck);
+ else
+ read_unlock(&sdeb_fake_rw_lck);
+ }
+}
+
+static inline void
+sdeb_meta_write_lock(struct sdeb_store_info *sip)
+{
+ if (sdebug_no_rwlock) {
+ if (sip)
+ __acquire(&sip->macc_meta_lck);
+ else
+ __acquire(&sdeb_fake_rw_lck);
+ } else {
+ if (sip)
+ write_lock(&sip->macc_meta_lck);
+ else
+ write_lock(&sdeb_fake_rw_lck);
+ }
+}
+
+static inline void
+sdeb_meta_write_unlock(struct sdeb_store_info *sip)
+{
+ if (sdebug_no_rwlock) {
+ if (sip)
+ __release(&sip->macc_meta_lck);
+ else
+ __release(&sdeb_fake_rw_lck);
+ } else {
+ if (sip)
+ write_unlock(&sip->macc_meta_lck);
+ else
+ write_unlock(&sdeb_fake_rw_lck);
+ }
+}
+
/* Returns number of bytes copied or -1 if error. */
static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
- u32 sg_skip, u64 lba, u32 num, bool do_write,
- u8 group_number)
+ u32 sg_skip, u64 lba, u32 num, u8 group_number,
+ bool do_write, bool atomic)
{
int ret;
- u64 block, rest = 0;
+ u64 block;
enum dma_data_direction dir;
struct scsi_data_buffer *sdb = &scp->sdb;
u8 *fsp;
+ int i;
+
+ /*
+ * Even though reads are inherently atomic (in this driver), we expect
+ * the atomic flag only for writes.
+ */
+ if (!do_write && atomic)
+ return -1;
if (do_write) {
dir = DMA_TO_DEVICE;
@@ -3406,21 +3679,26 @@ static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
fsp = sip->storep;
block = do_div(lba, sdebug_store_sectors);
- if (block + num > sdebug_store_sectors)
- rest = block + num - sdebug_store_sectors;
- ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
+ /* Only allow 1x atomic write or multiple non-atomic writes at any given time */
+ sdeb_data_lock(sip, atomic);
+ for (i = 0; i < num; i++) {
+ /* We shouldn't need to lock for atomic writes, but do it anyway */
+ sdeb_data_sector_lock(sip, do_write);
+ ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
fsp + (block * sdebug_sector_size),
- (num - rest) * sdebug_sector_size, sg_skip, do_write);
- if (ret != (num - rest) * sdebug_sector_size)
- return ret;
-
- if (rest) {
- ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
- fsp, rest * sdebug_sector_size,
- sg_skip + ((num - rest) * sdebug_sector_size),
- do_write);
+ sdebug_sector_size, sg_skip, do_write);
+ sdeb_data_sector_unlock(sip, do_write);
+ if (ret != sdebug_sector_size) {
+ ret += (i * sdebug_sector_size);
+ break;
+ }
+ sg_skip += sdebug_sector_size;
+ if (++block >= sdebug_store_sectors)
+ block = 0;
}
+ ret = num * sdebug_sector_size;
+ sdeb_data_unlock(sip, atomic);
return ret;
}
@@ -3596,70 +3874,6 @@ static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
return ret;
}
-static inline void
-sdeb_read_lock(struct sdeb_store_info *sip)
-{
- if (sdebug_no_rwlock) {
- if (sip)
- __acquire(&sip->macc_lck);
- else
- __acquire(&sdeb_fake_rw_lck);
- } else {
- if (sip)
- read_lock(&sip->macc_lck);
- else
- read_lock(&sdeb_fake_rw_lck);
- }
-}
-
-static inline void
-sdeb_read_unlock(struct sdeb_store_info *sip)
-{
- if (sdebug_no_rwlock) {
- if (sip)
- __release(&sip->macc_lck);
- else
- __release(&sdeb_fake_rw_lck);
- } else {
- if (sip)
- read_unlock(&sip->macc_lck);
- else
- read_unlock(&sdeb_fake_rw_lck);
- }
-}
-
-static inline void
-sdeb_write_lock(struct sdeb_store_info *sip)
-{
- if (sdebug_no_rwlock) {
- if (sip)
- __acquire(&sip->macc_lck);
- else
- __acquire(&sdeb_fake_rw_lck);
- } else {
- if (sip)
- write_lock(&sip->macc_lck);
- else
- write_lock(&sdeb_fake_rw_lck);
- }
-}
-
-static inline void
-sdeb_write_unlock(struct sdeb_store_info *sip)
-{
- if (sdebug_no_rwlock) {
- if (sip)
- __release(&sip->macc_lck);
- else
- __release(&sdeb_fake_rw_lck);
- } else {
- if (sip)
- write_unlock(&sip->macc_lck);
- else
- write_unlock(&sdeb_fake_rw_lck);
- }
-}
-
static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
{
bool check_prot;
@@ -3669,6 +3883,7 @@ static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
u64 lba;
struct sdeb_store_info *sip = devip2sip(devip, true);
u8 *cmd = scp->cmnd;
+ bool meta_data_locked = false;
switch (cmd[0]) {
case READ_16:
@@ -3727,6 +3942,10 @@ static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
atomic_set(&sdeb_inject_pending, 0);
}
+ /*
+ * When checking device access params, for reads we only check data
+ * versus what is set at init time, so no need to lock.
+ */
ret = check_device_access_params(scp, lba, num, false);
if (ret)
return ret;
@@ -3746,29 +3965,33 @@ static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
return check_condition_result;
}
- sdeb_read_lock(sip);
+ if (sdebug_dev_is_zoned(devip) ||
+ (sdebug_dix && scsi_prot_sg_count(scp))) {
+ sdeb_meta_read_lock(sip);
+ meta_data_locked = true;
+ }
/* DIX + T10 DIF */
if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
switch (prot_verify_read(scp, lba, num, ei_lba)) {
case 1: /* Guard tag error */
if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
- sdeb_read_unlock(sip);
+ sdeb_meta_read_unlock(sip);
mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
return check_condition_result;
} else if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
- sdeb_read_unlock(sip);
+ sdeb_meta_read_unlock(sip);
mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
return illegal_condition_result;
}
break;
case 3: /* Reference tag error */
if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
- sdeb_read_unlock(sip);
+ sdeb_meta_read_unlock(sip);
mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
return check_condition_result;
} else if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
- sdeb_read_unlock(sip);
+ sdeb_meta_read_unlock(sip);
mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
return illegal_condition_result;
}
@@ -3776,8 +3999,9 @@ static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
}
}
- ret = do_device_access(sip, scp, 0, lba, num, false, 0);
- sdeb_read_unlock(sip);
+ ret = do_device_access(sip, scp, 0, lba, num, 0, false, false);
+ if (meta_data_locked)
+ sdeb_meta_read_unlock(sip);
if (unlikely(ret == -1))
return DID_ERROR << 16;
@@ -3967,6 +4191,7 @@ static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
u64 lba;
struct sdeb_store_info *sip = devip2sip(devip, true);
u8 *cmd = scp->cmnd;
+ bool meta_data_locked = false;
switch (cmd[0]) {
case WRITE_16:
@@ -4025,10 +4250,17 @@ static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
"to DIF device\n");
}
- sdeb_write_lock(sip);
+ if (sdebug_dev_is_zoned(devip) ||
+ (sdebug_dix && scsi_prot_sg_count(scp)) ||
+ scsi_debug_lbp()) {
+ sdeb_meta_write_lock(sip);
+ meta_data_locked = true;
+ }
+
ret = check_device_access_params(scp, lba, num, true);
if (ret) {
- sdeb_write_unlock(sip);
+ if (meta_data_locked)
+ sdeb_meta_write_unlock(sip);
return ret;
}
@@ -4037,22 +4269,22 @@ static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
switch (prot_verify_write(scp, lba, num, ei_lba)) {
case 1: /* Guard tag error */
if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
- sdeb_write_unlock(sip);
+ sdeb_meta_write_unlock(sip);
mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
return illegal_condition_result;
} else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
- sdeb_write_unlock(sip);
+ sdeb_meta_write_unlock(sip);
mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
return check_condition_result;
}
break;
case 3: /* Reference tag error */
if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
- sdeb_write_unlock(sip);
+ sdeb_meta_write_unlock(sip);
mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
return illegal_condition_result;
} else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
- sdeb_write_unlock(sip);
+ sdeb_meta_write_unlock(sip);
mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
return check_condition_result;
}
@@ -4060,13 +4292,16 @@ static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
}
}
- ret = do_device_access(sip, scp, 0, lba, num, true, group);
+ ret = do_device_access(sip, scp, 0, lba, num, group, true, false);
if (unlikely(scsi_debug_lbp()))
map_region(sip, lba, num);
+
/* If ZBC zone then bump its write pointer */
if (sdebug_dev_is_zoned(devip))
zbc_inc_wp(devip, lba, num);
- sdeb_write_unlock(sip);
+ if (meta_data_locked)
+ sdeb_meta_write_unlock(sip);
+
if (unlikely(-1 == ret))
return DID_ERROR << 16;
else if (unlikely(sdebug_verbose &&
@@ -4176,7 +4411,8 @@ static int resp_write_scat(struct scsi_cmnd *scp,
goto err_out;
}
- sdeb_write_lock(sip);
+ /* Just keep it simple and always lock for now */
+ sdeb_meta_write_lock(sip);
sg_off = lbdof_blen;
/* Spec says Buffer xfer Length field in number of LBs in dout */
cum_lb = 0;
@@ -4219,7 +4455,11 @@ static int resp_write_scat(struct scsi_cmnd *scp,
}
}
- ret = do_device_access(sip, scp, sg_off, lba, num, true, group);
+ /*
+ * Write ranges atomically to keep as close to pre-atomic
+ * writes behaviour as possible.
+ */
+ ret = do_device_access(sip, scp, sg_off, lba, num, group, true, true);
/* If ZBC zone then bump its write pointer */
if (sdebug_dev_is_zoned(devip))
zbc_inc_wp(devip, lba, num);
@@ -4258,7 +4498,7 @@ static int resp_write_scat(struct scsi_cmnd *scp,
}
ret = 0;
err_out_unlock:
- sdeb_write_unlock(sip);
+ sdeb_meta_write_unlock(sip);
err_out:
kfree(lrdp);
return ret;
@@ -4277,14 +4517,16 @@ static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
scp->device->hostdata, true);
u8 *fs1p;
u8 *fsp;
+ bool meta_data_locked = false;
- sdeb_write_lock(sip);
+ if (sdebug_dev_is_zoned(devip) || scsi_debug_lbp()) {
+ sdeb_meta_write_lock(sip);
+ meta_data_locked = true;
+ }
ret = check_device_access_params(scp, lba, num, true);
- if (ret) {
- sdeb_write_unlock(sip);
- return ret;
- }
+ if (ret)
+ goto out;
if (unmap && scsi_debug_lbp()) {
unmap_region(sip, lba, num);
@@ -4295,6 +4537,7 @@ static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
/* if ndob then zero 1 logical block, else fetch 1 logical block */
fsp = sip->storep;
fs1p = fsp + (block * lb_size);
+ sdeb_data_write_lock(sip);
if (ndob) {
memset(fs1p, 0, lb_size);
ret = 0;
@@ -4302,8 +4545,8 @@ static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
if (-1 == ret) {
- sdeb_write_unlock(sip);
- return DID_ERROR << 16;
+ ret = DID_ERROR << 16;
+ goto out;
} else if (sdebug_verbose && !ndob && (ret < lb_size))
sdev_printk(KERN_INFO, scp->device,
"%s: %s: lb size=%u, IO sent=%d bytes\n",
@@ -4320,10 +4563,12 @@ static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
/* If ZBC zone then bump its write pointer */
if (sdebug_dev_is_zoned(devip))
zbc_inc_wp(devip, lba, num);
+ sdeb_data_write_unlock(sip);
+ ret = 0;
out:
- sdeb_write_unlock(sip);
-
- return 0;
+ if (meta_data_locked)
+ sdeb_meta_write_unlock(sip);
+ return ret;
}
static int resp_write_same_10(struct scsi_cmnd *scp,
@@ -4466,25 +4711,30 @@ static int resp_comp_write(struct scsi_cmnd *scp,
return check_condition_result;
}
- sdeb_write_lock(sip);
-
ret = do_dout_fetch(scp, dnum, arr);
if (ret == -1) {
retval = DID_ERROR << 16;
- goto cleanup;
+ goto cleanup_free;
} else if (sdebug_verbose && (ret < (dnum * lb_size)))
sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
"indicated=%u, IO sent=%d bytes\n", my_name,
dnum * lb_size, ret);
+
+ sdeb_data_write_lock(sip);
+ sdeb_meta_write_lock(sip);
if (!comp_write_worker(sip, lba, num, arr, false)) {
mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
retval = check_condition_result;
- goto cleanup;
+ goto cleanup_unlock;
}
+
+ /* Cover sip->map_storep (which map_region()) sets with data lock */
if (scsi_debug_lbp())
map_region(sip, lba, num);
-cleanup:
- sdeb_write_unlock(sip);
+cleanup_unlock:
+ sdeb_meta_write_unlock(sip);
+ sdeb_data_write_unlock(sip);
+cleanup_free:
kfree(arr);
return retval;
}
@@ -4528,7 +4778,7 @@ static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
desc = (void *)&buf[8];
- sdeb_write_lock(sip);
+ sdeb_meta_write_lock(sip);
for (i = 0 ; i < descriptors ; i++) {
unsigned long long lba = get_unaligned_be64(&desc[i].lba);
@@ -4544,7 +4794,7 @@ static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
ret = 0;
out:
- sdeb_write_unlock(sip);
+ sdeb_meta_write_unlock(sip);
kfree(buf);
return ret;
@@ -4702,12 +4952,13 @@ static int resp_pre_fetch(struct scsi_cmnd *scp,
rest = block + nblks - sdebug_store_sectors;
/* Try to bring the PRE-FETCH range into CPU's cache */
- sdeb_read_lock(sip);
+ sdeb_data_read_lock(sip);
prefetch_range(fsp + (sdebug_sector_size * block),
(nblks - rest) * sdebug_sector_size);
if (rest)
prefetch_range(fsp, rest * sdebug_sector_size);
- sdeb_read_unlock(sip);
+
+ sdeb_data_read_unlock(sip);
fini:
if (cmd[1] & 0x2)
res = SDEG_RES_IMMED_MASK;
@@ -4866,7 +5117,7 @@ static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
return check_condition_result;
}
/* Not changing store, so only need read access */
- sdeb_read_lock(sip);
+ sdeb_data_read_lock(sip);
ret = do_dout_fetch(scp, a_num, arr);
if (ret == -1) {
@@ -4888,7 +5139,7 @@ static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
goto cleanup;
}
cleanup:
- sdeb_read_unlock(sip);
+ sdeb_data_read_unlock(sip);
kfree(arr);
return ret;
}
@@ -4934,7 +5185,7 @@ static int resp_report_zones(struct scsi_cmnd *scp,
return check_condition_result;
}
- sdeb_read_lock(sip);
+ sdeb_meta_read_lock(sip);
desc = arr + 64;
for (lba = zs_lba; lba < sdebug_capacity;
@@ -5032,11 +5283,70 @@ static int resp_report_zones(struct scsi_cmnd *scp,
ret = fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, rep_len));
fini:
- sdeb_read_unlock(sip);
+ sdeb_meta_read_unlock(sip);
kfree(arr);
return ret;
}
+static int resp_atomic_write(struct scsi_cmnd *scp,
+ struct sdebug_dev_info *devip)
+{
+ struct sdeb_store_info *sip;
+ u8 *cmd = scp->cmnd;
+ u16 boundary, len;
+ u64 lba, lba_tmp;
+ int ret;
+
+ if (!scsi_debug_atomic_write()) {
+ mk_sense_invalid_opcode(scp);
+ return check_condition_result;
+ }
+
+ sip = devip2sip(devip, true);
+
+ lba = get_unaligned_be64(cmd + 2);
+ boundary = get_unaligned_be16(cmd + 10);
+ len = get_unaligned_be16(cmd + 12);
+
+ lba_tmp = lba;
+ if (sdebug_atomic_wr_align &&
+ do_div(lba_tmp, sdebug_atomic_wr_align)) {
+ /* Does not meet alignment requirement */
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
+ return check_condition_result;
+ }
+
+ if (sdebug_atomic_wr_gran && len % sdebug_atomic_wr_gran) {
+ /* Does not meet alignment requirement */
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
+ return check_condition_result;
+ }
+
+ if (boundary > 0) {
+ if (boundary > sdebug_atomic_wr_max_bndry) {
+ mk_sense_invalid_fld(scp, SDEB_IN_CDB, 12, -1);
+ return check_condition_result;
+ }
+
+ if (len > sdebug_atomic_wr_max_length_bndry) {
+ mk_sense_invalid_fld(scp, SDEB_IN_CDB, 12, -1);
+ return check_condition_result;
+ }
+ } else {
+ if (len > sdebug_atomic_wr_max_length) {
+ mk_sense_invalid_fld(scp, SDEB_IN_CDB, 12, -1);
+ return check_condition_result;
+ }
+ }
+
+ ret = do_device_access(sip, scp, 0, lba, len, 0, true, true);
+ if (unlikely(ret == -1))
+ return DID_ERROR << 16;
+ if (unlikely(ret != len * sdebug_sector_size))
+ return DID_ERROR << 16;
+ return 0;
+}
+
/* Logic transplanted from tcmu-runner, file_zbc.c */
static void zbc_open_all(struct sdebug_dev_info *devip)
{
@@ -5063,8 +5373,7 @@ static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
mk_sense_invalid_opcode(scp);
return check_condition_result;
}
-
- sdeb_write_lock(sip);
+ sdeb_meta_write_lock(sip);
if (all) {
/* Check if all closed zones can be open */
@@ -5113,7 +5422,7 @@ static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
zbc_open_zone(devip, zsp, true);
fini:
- sdeb_write_unlock(sip);
+ sdeb_meta_write_unlock(sip);
return res;
}
@@ -5140,7 +5449,7 @@ static int resp_close_zone(struct scsi_cmnd *scp,
return check_condition_result;
}
- sdeb_write_lock(sip);
+ sdeb_meta_write_lock(sip);
if (all) {
zbc_close_all(devip);
@@ -5169,7 +5478,7 @@ static int resp_close_zone(struct scsi_cmnd *scp,
zbc_close_zone(devip, zsp);
fini:
- sdeb_write_unlock(sip);
+ sdeb_meta_write_unlock(sip);
return res;
}
@@ -5212,7 +5521,7 @@ static int resp_finish_zone(struct scsi_cmnd *scp,
return check_condition_result;
}
- sdeb_write_lock(sip);
+ sdeb_meta_write_lock(sip);
if (all) {
zbc_finish_all(devip);
@@ -5241,7 +5550,7 @@ static int resp_finish_zone(struct scsi_cmnd *scp,
zbc_finish_zone(devip, zsp, true);
fini:
- sdeb_write_unlock(sip);
+ sdeb_meta_write_unlock(sip);
return res;
}
@@ -5292,7 +5601,7 @@ static int resp_rwp_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
return check_condition_result;
}
- sdeb_write_lock(sip);
+ sdeb_meta_write_lock(sip);
if (all) {
zbc_rwp_all(devip);
@@ -5320,7 +5629,7 @@ static int resp_rwp_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
zbc_rwp_zone(devip, zsp);
fini:
- sdeb_write_unlock(sip);
+ sdeb_meta_write_unlock(sip);
return res;
}
@@ -6284,6 +6593,7 @@ module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
+module_param_named(atomic_wr, sdebug_atomic_wr, int, S_IRUGO);
module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
module_param_named(lun_format, sdebug_lun_am_i, int, S_IRUGO | S_IWUSR);
module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
@@ -6318,6 +6628,11 @@ module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
+module_param_named(atomic_wr_max_length, sdebug_atomic_wr_max_length, int, S_IRUGO);
+module_param_named(atomic_wr_align, sdebug_atomic_wr_align, int, S_IRUGO);
+module_param_named(atomic_wr_gran, sdebug_atomic_wr_gran, int, S_IRUGO);
+module_param_named(atomic_wr_max_length_bndry, sdebug_atomic_wr_max_length_bndry, int, S_IRUGO);
+module_param_named(atomic_wr_max_bndry, sdebug_atomic_wr_max_bndry, int, S_IRUGO);
module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
@@ -6361,6 +6676,7 @@ MODULE_PARM_DESC(lbprz,
MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
+MODULE_PARM_DESC(atomic_write, "enable ATOMIC WRITE support, support WRITE ATOMIC(16) (def=0)");
MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
MODULE_PARM_DESC(lun_format, "LUN format: 0->peripheral (def); 1 --> flat address method");
MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
@@ -6392,6 +6708,11 @@ MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)"
MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
+MODULE_PARM_DESC(atomic_wr_max_length, "max # of blocks can be atomically written in one cmd (def=8192)");
+MODULE_PARM_DESC(atomic_wr_align, "minimum alignment of atomic write in blocks (def=2)");
+MODULE_PARM_DESC(atomic_wr_gran, "minimum granularity of atomic write in blocks (def=2)");
+MODULE_PARM_DESC(atomic_wr_max_length_bndry, "max # of blocks can be atomically written in one cmd with boundary set (def=8192)");
+MODULE_PARM_DESC(atomic_wr_max_bndry, "max # boundaries per atomic write (def=128)");
MODULE_PARM_DESC(uuid_ctl,
"1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
@@ -7563,6 +7884,7 @@ static int __init scsi_debug_init(void)
return -EINVAL;
}
}
+
xa_init_flags(per_store_ap, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
if (want_store) {
idx = sdebug_add_store();
@@ -7770,7 +8092,9 @@ static int sdebug_add_store(void)
map_region(sip, 0, 2);
}
- rwlock_init(&sip->macc_lck);
+ rwlock_init(&sip->macc_data_lck);
+ rwlock_init(&sip->macc_meta_lck);
+ rwlock_init(&sip->macc_sector_lck);
return (int)n_idx;
err:
sdebug_erase_store((int)n_idx, sip);
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index ec39acc986d6..3958a6d14bf4 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -631,8 +631,7 @@ static bool scsi_end_request(struct request *req, blk_status_t error,
if (blk_update_request(req, error, bytes))
return true;
- // XXX:
- if (blk_queue_add_random(q))
+ if (q->limits.features & BLK_FEAT_ADD_RANDOM)
add_disk_randomness(req->q->disk);
WARN_ON_ONCE(!blk_rq_is_passthrough(req) &&
@@ -1140,9 +1139,9 @@ blk_status_t scsi_alloc_sgtables(struct scsi_cmnd *cmd)
*/
count = __blk_rq_map_sg(rq->q, rq, cmd->sdb.table.sgl, &last_sg);
- if (blk_rq_bytes(rq) & rq->q->dma_pad_mask) {
+ if (blk_rq_bytes(rq) & rq->q->limits.dma_pad_mask) {
unsigned int pad_len =
- (rq->q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1;
+ (rq->q->limits.dma_pad_mask & ~blk_rq_bytes(rq)) + 1;
last_sg->length += pad_len;
cmd->extra_len += pad_len;
@@ -1987,7 +1986,7 @@ void scsi_init_limits(struct Scsi_Host *shost, struct queue_limits *lim)
shost->dma_alignment, dma_get_cache_alignment() - 1);
if (shost->no_highmem)
- lim->bounce = BLK_BOUNCE_HIGH;
+ lim->features |= BLK_FEAT_BOUNCE_HIGH;
dma_set_seg_boundary(dev, shost->dma_boundary);
dma_set_max_seg_size(dev, shost->max_segment_size);
diff --git a/drivers/scsi/scsi_trace.c b/drivers/scsi/scsi_trace.c
index 41a950075913..3e47c4472a80 100644
--- a/drivers/scsi/scsi_trace.c
+++ b/drivers/scsi/scsi_trace.c
@@ -326,6 +326,26 @@ out:
}
static const char *
+scsi_trace_atomic_write16_out(struct trace_seq *p, unsigned char *cdb, int len)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ unsigned int boundary_size;
+ unsigned int nr_blocks;
+ sector_t lba;
+
+ lba = get_unaligned_be64(&cdb[2]);
+ boundary_size = get_unaligned_be16(&cdb[10]);
+ nr_blocks = get_unaligned_be16(&cdb[12]);
+
+ trace_seq_printf(p, "lba=%llu txlen=%u boundary_size=%u",
+ lba, nr_blocks, boundary_size);
+
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *
scsi_trace_varlen(struct trace_seq *p, unsigned char *cdb, int len)
{
switch (SERVICE_ACTION32(cdb)) {
@@ -385,6 +405,8 @@ scsi_trace_parse_cdb(struct trace_seq *p, unsigned char *cdb, int len)
return scsi_trace_zbc_in(p, cdb, len);
case ZBC_OUT:
return scsi_trace_zbc_out(p, cdb, len);
+ case WRITE_ATOMIC_16:
+ return scsi_trace_atomic_write16_out(p, cdb, len);
default:
return scsi_trace_misc(p, cdb, len);
}
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index 424a89513814..4e33f1661e4c 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -416,6 +416,29 @@ unsigned int sas_is_tlr_enabled(struct scsi_device *sdev)
}
EXPORT_SYMBOL_GPL(sas_is_tlr_enabled);
+/**
+ * sas_ata_ncq_prio_supported - Check for ATA NCQ command priority support
+ * @sdev: SCSI device
+ *
+ * Check if an ATA device supports NCQ priority using VPD page 89h (ATA
+ * Information). Since this VPD page is implemented only for ATA devices,
+ * this function always returns false for SCSI devices.
+ */
+bool sas_ata_ncq_prio_supported(struct scsi_device *sdev)
+{
+ struct scsi_vpd *vpd;
+ bool ncq_prio_supported = false;
+
+ rcu_read_lock();
+ vpd = rcu_dereference(sdev->vpd_pg89);
+ if (vpd && vpd->len >= 214)
+ ncq_prio_supported = (vpd->data[213] >> 4) & 1;
+ rcu_read_unlock();
+
+ return ncq_prio_supported;
+}
+EXPORT_SYMBOL_GPL(sas_ata_ncq_prio_supported);
+
/*
* SAS Phy attributes
*/
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 332eb9dac22d..2e933fd1de70 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -63,6 +63,7 @@
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_dbg.h>
#include <scsi/scsi_device.h>
+#include <scsi/scsi_devinfo.h>
#include <scsi/scsi_driver.h>
#include <scsi/scsi_eh.h>
#include <scsi/scsi_host.h>
@@ -101,12 +102,13 @@ MODULE_ALIAS_SCSI_DEVICE(TYPE_ZBC);
#define SD_MINORS 16
-static void sd_config_discard(struct scsi_disk *, unsigned int);
-static void sd_config_write_same(struct scsi_disk *);
+static void sd_config_discard(struct scsi_disk *sdkp, struct queue_limits *lim,
+ unsigned int mode);
+static void sd_config_write_same(struct scsi_disk *sdkp,
+ struct queue_limits *lim);
static int sd_revalidate_disk(struct gendisk *);
static void sd_unlock_native_capacity(struct gendisk *disk);
static void sd_shutdown(struct device *);
-static void sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer);
static void scsi_disk_release(struct device *cdev);
static DEFINE_IDA(sd_index_ida);
@@ -119,17 +121,18 @@ static const char *sd_cache_types[] = {
"write back, no read (daft)"
};
-static void sd_set_flush_flag(struct scsi_disk *sdkp)
+static void sd_set_flush_flag(struct scsi_disk *sdkp,
+ struct queue_limits *lim)
{
- bool wc = false, fua = false;
-
if (sdkp->WCE) {
- wc = true;
+ lim->features |= BLK_FEAT_WRITE_CACHE;
if (sdkp->DPOFUA)
- fua = true;
+ lim->features |= BLK_FEAT_FUA;
+ else
+ lim->features &= ~BLK_FEAT_FUA;
+ } else {
+ lim->features &= ~(BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA);
}
-
- blk_queue_write_cache(sdkp->disk->queue, wc, fua);
}
static ssize_t
@@ -167,9 +170,18 @@ cache_type_store(struct device *dev, struct device_attribute *attr,
wce = (ct & 0x02) && !sdkp->write_prot ? 1 : 0;
if (sdkp->cache_override) {
+ struct queue_limits lim;
+
sdkp->WCE = wce;
sdkp->RCD = rcd;
- sd_set_flush_flag(sdkp);
+
+ lim = queue_limits_start_update(sdkp->disk->queue);
+ sd_set_flush_flag(sdkp, &lim);
+ blk_mq_freeze_queue(sdkp->disk->queue);
+ ret = queue_limits_commit_update(sdkp->disk->queue, &lim);
+ blk_mq_unfreeze_queue(sdkp->disk->queue);
+ if (ret)
+ return ret;
return count;
}
@@ -456,16 +468,12 @@ provisioning_mode_store(struct device *dev, struct device_attribute *attr,
{
struct scsi_disk *sdkp = to_scsi_disk(dev);
struct scsi_device *sdp = sdkp->device;
- int mode;
+ struct queue_limits lim;
+ int mode, err;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
- if (sd_is_zoned(sdkp)) {
- sd_config_discard(sdkp, SD_LBP_DISABLE);
- return count;
- }
-
if (sdp->type != TYPE_DISK)
return -EINVAL;
@@ -473,8 +481,13 @@ provisioning_mode_store(struct device *dev, struct device_attribute *attr,
if (mode < 0)
return -EINVAL;
- sd_config_discard(sdkp, mode);
-
+ lim = queue_limits_start_update(sdkp->disk->queue);
+ sd_config_discard(sdkp, &lim, mode);
+ blk_mq_freeze_queue(sdkp->disk->queue);
+ err = queue_limits_commit_update(sdkp->disk->queue, &lim);
+ blk_mq_unfreeze_queue(sdkp->disk->queue);
+ if (err)
+ return err;
return count;
}
static DEVICE_ATTR_RW(provisioning_mode);
@@ -557,6 +570,7 @@ max_write_same_blocks_store(struct device *dev, struct device_attribute *attr,
{
struct scsi_disk *sdkp = to_scsi_disk(dev);
struct scsi_device *sdp = sdkp->device;
+ struct queue_limits lim;
unsigned long max;
int err;
@@ -578,8 +592,13 @@ max_write_same_blocks_store(struct device *dev, struct device_attribute *attr,
sdkp->max_ws_blocks = max;
}
- sd_config_write_same(sdkp);
-
+ lim = queue_limits_start_update(sdkp->disk->queue);
+ sd_config_write_same(sdkp, &lim);
+ blk_mq_freeze_queue(sdkp->disk->queue);
+ err = queue_limits_commit_update(sdkp->disk->queue, &lim);
+ blk_mq_unfreeze_queue(sdkp->disk->queue);
+ if (err)
+ return err;
return count;
}
static DEVICE_ATTR_RW(max_write_same_blocks);
@@ -822,25 +841,28 @@ static unsigned char sd_setup_protect_cmnd(struct scsi_cmnd *scmd,
return protect;
}
-static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
+static void sd_disable_discard(struct scsi_disk *sdkp)
+{
+ sdkp->provisioning_mode = SD_LBP_DISABLE;
+ blk_queue_disable_discard(sdkp->disk->queue);
+}
+
+static void sd_config_discard(struct scsi_disk *sdkp, struct queue_limits *lim,
+ unsigned int mode)
{
- struct request_queue *q = sdkp->disk->queue;
unsigned int logical_block_size = sdkp->device->sector_size;
unsigned int max_blocks = 0;
- q->limits.discard_alignment =
- sdkp->unmap_alignment * logical_block_size;
- q->limits.discard_granularity =
- max(sdkp->physical_block_size,
- sdkp->unmap_granularity * logical_block_size);
+ lim->discard_alignment = sdkp->unmap_alignment * logical_block_size;
+ lim->discard_granularity = max(sdkp->physical_block_size,
+ sdkp->unmap_granularity * logical_block_size);
sdkp->provisioning_mode = mode;
switch (mode) {
case SD_LBP_FULL:
case SD_LBP_DISABLE:
- blk_queue_max_discard_sectors(q, 0);
- return;
+ break;
case SD_LBP_UNMAP:
max_blocks = min_not_zero(sdkp->max_unmap_blocks,
@@ -871,7 +893,8 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
break;
}
- blk_queue_max_discard_sectors(q, max_blocks * (logical_block_size >> 9));
+ lim->max_hw_discard_sectors = max_blocks *
+ (logical_block_size >> SECTOR_SHIFT);
}
static void *sd_set_special_bvec(struct request *rq, unsigned int data_len)
@@ -917,6 +940,64 @@ static blk_status_t sd_setup_unmap_cmnd(struct scsi_cmnd *cmd)
return scsi_alloc_sgtables(cmd);
}
+static void sd_config_atomic(struct scsi_disk *sdkp, struct queue_limits *lim)
+{
+ unsigned int logical_block_size = sdkp->device->sector_size,
+ physical_block_size_sectors, max_atomic, unit_min, unit_max;
+
+ if ((!sdkp->max_atomic && !sdkp->max_atomic_with_boundary) ||
+ sdkp->protection_type == T10_PI_TYPE2_PROTECTION)
+ return;
+
+ physical_block_size_sectors = sdkp->physical_block_size /
+ sdkp->device->sector_size;
+
+ unit_min = rounddown_pow_of_two(sdkp->atomic_granularity ?
+ sdkp->atomic_granularity :
+ physical_block_size_sectors);
+
+ /*
+ * Only use atomic boundary when we have the odd scenario of
+ * sdkp->max_atomic == 0, which the spec does permit.
+ */
+ if (sdkp->max_atomic) {
+ max_atomic = sdkp->max_atomic;
+ unit_max = rounddown_pow_of_two(sdkp->max_atomic);
+ sdkp->use_atomic_write_boundary = 0;
+ } else {
+ max_atomic = sdkp->max_atomic_with_boundary;
+ unit_max = rounddown_pow_of_two(sdkp->max_atomic_boundary);
+ sdkp->use_atomic_write_boundary = 1;
+ }
+
+ /*
+ * Ensure compliance with granularity and alignment. For now, keep it
+ * simple and just don't support atomic writes for values mismatched
+ * with max_{boundary}atomic, physical block size, and
+ * atomic_granularity itself.
+ *
+ * We're really being distrustful by checking unit_max also...
+ */
+ if (sdkp->atomic_granularity > 1) {
+ if (unit_min > 1 && unit_min % sdkp->atomic_granularity)
+ return;
+ if (unit_max > 1 && unit_max % sdkp->atomic_granularity)
+ return;
+ }
+
+ if (sdkp->atomic_alignment > 1) {
+ if (unit_min > 1 && unit_min % sdkp->atomic_alignment)
+ return;
+ if (unit_max > 1 && unit_max % sdkp->atomic_alignment)
+ return;
+ }
+
+ lim->atomic_write_hw_max = max_atomic * logical_block_size;
+ lim->atomic_write_hw_boundary = 0;
+ lim->atomic_write_hw_unit_min = unit_min * logical_block_size;
+ lim->atomic_write_hw_unit_max = unit_max * logical_block_size;
+}
+
static blk_status_t sd_setup_write_same16_cmnd(struct scsi_cmnd *cmd,
bool unmap)
{
@@ -999,9 +1080,16 @@ static blk_status_t sd_setup_write_zeroes_cmnd(struct scsi_cmnd *cmd)
return sd_setup_write_same10_cmnd(cmd, false);
}
-static void sd_config_write_same(struct scsi_disk *sdkp)
+static void sd_disable_write_same(struct scsi_disk *sdkp)
+{
+ sdkp->device->no_write_same = 1;
+ sdkp->max_ws_blocks = 0;
+ blk_queue_disable_write_zeroes(sdkp->disk->queue);
+}
+
+static void sd_config_write_same(struct scsi_disk *sdkp,
+ struct queue_limits *lim)
{
- struct request_queue *q = sdkp->disk->queue;
unsigned int logical_block_size = sdkp->device->sector_size;
if (sdkp->device->no_write_same) {
@@ -1055,8 +1143,8 @@ static void sd_config_write_same(struct scsi_disk *sdkp)
}
out:
- blk_queue_max_write_zeroes_sectors(q, sdkp->max_ws_blocks *
- (logical_block_size >> 9));
+ lim->max_write_zeroes_sectors =
+ sdkp->max_ws_blocks * (logical_block_size >> SECTOR_SHIFT);
}
static blk_status_t sd_setup_flush_cmnd(struct scsi_cmnd *cmd)
@@ -1208,6 +1296,26 @@ static int sd_cdl_dld(struct scsi_disk *sdkp, struct scsi_cmnd *scmd)
return (hint - IOPRIO_HINT_DEV_DURATION_LIMIT_1) + 1;
}
+static blk_status_t sd_setup_atomic_cmnd(struct scsi_cmnd *cmd,
+ sector_t lba, unsigned int nr_blocks,
+ bool boundary, unsigned char flags)
+{
+ cmd->cmd_len = 16;
+ cmd->cmnd[0] = WRITE_ATOMIC_16;
+ cmd->cmnd[1] = flags;
+ put_unaligned_be64(lba, &cmd->cmnd[2]);
+ put_unaligned_be16(nr_blocks, &cmd->cmnd[12]);
+ if (boundary)
+ put_unaligned_be16(nr_blocks, &cmd->cmnd[10]);
+ else
+ put_unaligned_be16(0, &cmd->cmnd[10]);
+ put_unaligned_be16(nr_blocks, &cmd->cmnd[12]);
+ cmd->cmnd[14] = 0;
+ cmd->cmnd[15] = 0;
+
+ return BLK_STS_OK;
+}
+
static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd)
{
struct request *rq = scsi_cmd_to_rq(cmd);
@@ -1273,6 +1381,10 @@ static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd)
if (protect && sdkp->protection_type == T10_PI_TYPE2_PROTECTION) {
ret = sd_setup_rw32_cmnd(cmd, write, lba, nr_blocks,
protect | fua, dld);
+ } else if (rq->cmd_flags & REQ_ATOMIC && write) {
+ ret = sd_setup_atomic_cmnd(cmd, lba, nr_blocks,
+ sdkp->use_atomic_write_boundary,
+ protect | fua);
} else if (sdp->use_16_for_rw || (nr_blocks > 0xffff)) {
ret = sd_setup_rw16_cmnd(cmd, write, lba, nr_blocks,
protect | fua, dld);
@@ -2246,15 +2358,14 @@ static int sd_done(struct scsi_cmnd *SCpnt)
case 0x24: /* INVALID FIELD IN CDB */
switch (SCpnt->cmnd[0]) {
case UNMAP:
- sd_config_discard(sdkp, SD_LBP_DISABLE);
+ sd_disable_discard(sdkp);
break;
case WRITE_SAME_16:
case WRITE_SAME:
if (SCpnt->cmnd[1] & 8) { /* UNMAP */
- sd_config_discard(sdkp, SD_LBP_DISABLE);
+ sd_disable_discard(sdkp);
} else {
- sdkp->device->no_write_same = 1;
- sd_config_write_same(sdkp);
+ sd_disable_write_same(sdkp);
req->rq_flags |= RQF_QUIET;
}
break;
@@ -2266,7 +2377,7 @@ static int sd_done(struct scsi_cmnd *SCpnt)
}
out:
- if (sd_is_zoned(sdkp))
+ if (sdkp->device->type == TYPE_ZBC)
good_bytes = sd_zbc_complete(SCpnt, good_bytes, &sshdr);
SCSI_LOG_HLCOMPLETE(1, scmd_printk(KERN_INFO, SCpnt,
@@ -2460,11 +2571,13 @@ static int sd_read_protection_type(struct scsi_disk *sdkp, unsigned char *buffer
return 0;
}
-static void sd_config_protection(struct scsi_disk *sdkp)
+static void sd_config_protection(struct scsi_disk *sdkp,
+ struct queue_limits *lim)
{
struct scsi_device *sdp = sdkp->device;
- sd_dif_config_host(sdkp);
+ if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY))
+ sd_dif_config_host(sdkp, lim);
if (!sdkp->protection_type)
return;
@@ -2513,7 +2626,7 @@ static void read_capacity_error(struct scsi_disk *sdkp, struct scsi_device *sdp,
#define READ_CAPACITY_RETRIES_ON_RESET 10
static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
- unsigned char *buffer)
+ struct queue_limits *lim, unsigned char *buffer)
{
unsigned char cmd[16];
struct scsi_sense_hdr sshdr;
@@ -2587,7 +2700,7 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
/* Lowest aligned logical block */
alignment = ((buffer[14] & 0x3f) << 8 | buffer[15]) * sector_size;
- blk_queue_alignment_offset(sdp->request_queue, alignment);
+ lim->alignment_offset = alignment;
if (alignment && sdkp->first_scan)
sd_printk(KERN_NOTICE, sdkp,
"physical block alignment offset: %u\n", alignment);
@@ -2598,7 +2711,7 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
if (buffer[14] & 0x40) /* LBPRZ */
sdkp->lbprz = 1;
- sd_config_discard(sdkp, SD_LBP_WS16);
+ sd_config_discard(sdkp, lim, SD_LBP_WS16);
}
sdkp->capacity = lba + 1;
@@ -2701,13 +2814,14 @@ static int sd_try_rc16_first(struct scsi_device *sdp)
* read disk capacity
*/
static void
-sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer)
+sd_read_capacity(struct scsi_disk *sdkp, struct queue_limits *lim,
+ unsigned char *buffer)
{
int sector_size;
struct scsi_device *sdp = sdkp->device;
if (sd_try_rc16_first(sdp)) {
- sector_size = read_capacity_16(sdkp, sdp, buffer);
+ sector_size = read_capacity_16(sdkp, sdp, lim, buffer);
if (sector_size == -EOVERFLOW)
goto got_data;
if (sector_size == -ENODEV)
@@ -2727,7 +2841,7 @@ sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer)
int old_sector_size = sector_size;
sd_printk(KERN_NOTICE, sdkp, "Very big device. "
"Trying to use READ CAPACITY(16).\n");
- sector_size = read_capacity_16(sdkp, sdp, buffer);
+ sector_size = read_capacity_16(sdkp, sdp, lim, buffer);
if (sector_size < 0) {
sd_printk(KERN_NOTICE, sdkp,
"Using 0xffffffff as device size\n");
@@ -2786,9 +2900,8 @@ got_data:
*/
sector_size = 512;
}
- blk_queue_logical_block_size(sdp->request_queue, sector_size);
- blk_queue_physical_block_size(sdp->request_queue,
- sdkp->physical_block_size);
+ lim->logical_block_size = sector_size;
+ lim->physical_block_size = sdkp->physical_block_size;
sdkp->device->sector_size = sector_size;
if (sdkp->capacity > 0xffffffff)
@@ -3118,6 +3231,9 @@ static void sd_read_io_hints(struct scsi_disk *sdkp, unsigned char *buffer)
struct scsi_mode_data data;
int res;
+ if (sdp->sdev_bflags & BLIST_SKIP_IO_HINTS)
+ return;
+
res = scsi_mode_sense(sdp, /*dbd=*/0x8, /*modepage=*/0x0a,
/*subpage=*/0x05, buffer, SD_BUF_SIZE, SD_TIMEOUT,
sdkp->max_retries, &data, &sshdr);
@@ -3191,11 +3307,30 @@ static void sd_read_app_tag_own(struct scsi_disk *sdkp, unsigned char *buffer)
return;
}
-/**
- * sd_read_block_limits - Query disk device for preferred I/O sizes.
- * @sdkp: disk to query
+static unsigned int sd_discard_mode(struct scsi_disk *sdkp)
+{
+ if (!sdkp->lbpvpd) {
+ /* LBP VPD page not provided */
+ if (sdkp->max_unmap_blocks)
+ return SD_LBP_UNMAP;
+ return SD_LBP_WS16;
+ }
+
+ /* LBP VPD page tells us what to use */
+ if (sdkp->lbpu && sdkp->max_unmap_blocks)
+ return SD_LBP_UNMAP;
+ if (sdkp->lbpws)
+ return SD_LBP_WS16;
+ if (sdkp->lbpws10)
+ return SD_LBP_WS10;
+ return SD_LBP_DISABLE;
+}
+
+/*
+ * Query disk device for preferred I/O sizes.
*/
-static void sd_read_block_limits(struct scsi_disk *sdkp)
+static void sd_read_block_limits(struct scsi_disk *sdkp,
+ struct queue_limits *lim)
{
struct scsi_vpd *vpd;
@@ -3215,7 +3350,7 @@ static void sd_read_block_limits(struct scsi_disk *sdkp)
sdkp->max_ws_blocks = (u32)get_unaligned_be64(&vpd->data[36]);
if (!sdkp->lbpme)
- goto out;
+ goto config_atomic;
lba_count = get_unaligned_be32(&vpd->data[20]);
desc_count = get_unaligned_be32(&vpd->data[24]);
@@ -3229,23 +3364,16 @@ static void sd_read_block_limits(struct scsi_disk *sdkp)
sdkp->unmap_alignment =
get_unaligned_be32(&vpd->data[32]) & ~(1 << 31);
- if (!sdkp->lbpvpd) { /* LBP VPD page not provided */
-
- if (sdkp->max_unmap_blocks)
- sd_config_discard(sdkp, SD_LBP_UNMAP);
- else
- sd_config_discard(sdkp, SD_LBP_WS16);
-
- } else { /* LBP VPD page tells us what to use */
- if (sdkp->lbpu && sdkp->max_unmap_blocks)
- sd_config_discard(sdkp, SD_LBP_UNMAP);
- else if (sdkp->lbpws)
- sd_config_discard(sdkp, SD_LBP_WS16);
- else if (sdkp->lbpws10)
- sd_config_discard(sdkp, SD_LBP_WS10);
- else
- sd_config_discard(sdkp, SD_LBP_DISABLE);
- }
+ sd_config_discard(sdkp, lim, sd_discard_mode(sdkp));
+
+config_atomic:
+ sdkp->max_atomic = get_unaligned_be32(&vpd->data[44]);
+ sdkp->atomic_alignment = get_unaligned_be32(&vpd->data[48]);
+ sdkp->atomic_granularity = get_unaligned_be32(&vpd->data[52]);
+ sdkp->max_atomic_with_boundary = get_unaligned_be32(&vpd->data[56]);
+ sdkp->max_atomic_boundary = get_unaligned_be32(&vpd->data[60]);
+
+ sd_config_atomic(sdkp, lim);
}
out:
@@ -3264,13 +3392,10 @@ static void sd_read_block_limits_ext(struct scsi_disk *sdkp)
rcu_read_unlock();
}
-/**
- * sd_read_block_characteristics - Query block dev. characteristics
- * @sdkp: disk to query
- */
-static void sd_read_block_characteristics(struct scsi_disk *sdkp)
+/* Query block device characteristics */
+static void sd_read_block_characteristics(struct scsi_disk *sdkp,
+ struct queue_limits *lim)
{
- struct request_queue *q = sdkp->disk->queue;
struct scsi_vpd *vpd;
u16 rot;
@@ -3286,37 +3411,13 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp)
sdkp->zoned = (vpd->data[8] >> 4) & 3;
rcu_read_unlock();
- if (rot == 1) {
- blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
- blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
- }
-
-
-#ifdef CONFIG_BLK_DEV_ZONED /* sd_probe rejects ZBD devices early otherwise */
- if (sdkp->device->type == TYPE_ZBC) {
- /*
- * Host-managed.
- */
- disk_set_zoned(sdkp->disk);
-
- /*
- * Per ZBC and ZAC specifications, writes in sequential write
- * required zones of host-managed devices must be aligned to
- * the device physical block size.
- */
- blk_queue_zone_write_granularity(q, sdkp->physical_block_size);
- } else {
- /*
- * Host-aware devices are treated as conventional.
- */
- WARN_ON_ONCE(blk_queue_is_zoned(q));
- }
-#endif /* CONFIG_BLK_DEV_ZONED */
+ if (rot == 1)
+ lim->features &= ~(BLK_FEAT_ROTATIONAL | BLK_FEAT_ADD_RANDOM);
if (!sdkp->first_scan)
return;
- if (blk_queue_is_zoned(q))
+ if (sdkp->device->type == TYPE_ZBC)
sd_printk(KERN_NOTICE, sdkp, "Host-managed zoned block device\n");
else if (sdkp->zoned == 1)
sd_printk(KERN_NOTICE, sdkp, "Host-aware SMR disk used as regular disk\n");
@@ -3565,16 +3666,23 @@ static bool sd_validate_opt_xfer_size(struct scsi_disk *sdkp,
static void sd_read_block_zero(struct scsi_disk *sdkp)
{
- unsigned int buf_len = sdkp->device->sector_size;
- char *buffer, cmd[10] = { };
+ struct scsi_device *sdev = sdkp->device;
+ unsigned int buf_len = sdev->sector_size;
+ u8 *buffer, cmd[16] = { };
buffer = kmalloc(buf_len, GFP_KERNEL);
if (!buffer)
return;
- cmd[0] = READ_10;
- put_unaligned_be32(0, &cmd[2]); /* Logical block address 0 */
- put_unaligned_be16(1, &cmd[7]); /* Transfer 1 logical block */
+ if (sdev->use_16_for_rw) {
+ cmd[0] = READ_16;
+ put_unaligned_be64(0, &cmd[2]); /* Logical block address 0 */
+ put_unaligned_be32(1, &cmd[10]);/* Transfer 1 logical block */
+ } else {
+ cmd[0] = READ_10;
+ put_unaligned_be32(0, &cmd[2]); /* Logical block address 0 */
+ put_unaligned_be16(1, &cmd[7]); /* Transfer 1 logical block */
+ }
scsi_execute_cmd(sdkp->device, cmd, REQ_OP_DRV_IN, buffer, buf_len,
SD_TIMEOUT, sdkp->max_retries, NULL);
@@ -3590,10 +3698,11 @@ static int sd_revalidate_disk(struct gendisk *disk)
{
struct scsi_disk *sdkp = scsi_disk(disk);
struct scsi_device *sdp = sdkp->device;
- struct request_queue *q = sdkp->disk->queue;
sector_t old_capacity = sdkp->capacity;
+ struct queue_limits lim;
unsigned char *buffer;
- unsigned int dev_max, rw_max;
+ unsigned int dev_max;
+ int err;
SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp,
"sd_revalidate_disk\n"));
@@ -3614,12 +3723,14 @@ static int sd_revalidate_disk(struct gendisk *disk)
sd_spinup_disk(sdkp);
+ lim = queue_limits_start_update(sdkp->disk->queue);
+
/*
* Without media there is no reason to ask; moreover, some devices
* react badly if we do.
*/
if (sdkp->media_present) {
- sd_read_capacity(sdkp, buffer);
+ sd_read_capacity(sdkp, &lim, buffer);
/*
* Some USB/UAS devices return generic values for mode pages
* until the media has been accessed. Trigger a READ operation
@@ -3633,15 +3744,14 @@ static int sd_revalidate_disk(struct gendisk *disk)
* cause this to be updated correctly and any device which
* doesn't support it should be treated as rotational.
*/
- blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
- blk_queue_flag_set(QUEUE_FLAG_ADD_RANDOM, q);
+ lim.features |= (BLK_FEAT_ROTATIONAL | BLK_FEAT_ADD_RANDOM);
if (scsi_device_supports_vpd(sdp)) {
sd_read_block_provisioning(sdkp);
- sd_read_block_limits(sdkp);
+ sd_read_block_limits(sdkp, &lim);
sd_read_block_limits_ext(sdkp);
- sd_read_block_characteristics(sdkp);
- sd_zbc_read_zones(sdkp, buffer);
+ sd_read_block_characteristics(sdkp, &lim);
+ sd_zbc_read_zones(sdkp, &lim, buffer);
sd_read_cpr(sdkp);
}
@@ -3653,62 +3763,50 @@ static int sd_revalidate_disk(struct gendisk *disk)
sd_read_app_tag_own(sdkp, buffer);
sd_read_write_same(sdkp, buffer);
sd_read_security(sdkp, buffer);
- sd_config_protection(sdkp);
+ sd_config_protection(sdkp, &lim);
}
/*
* We now have all cache related info, determine how we deal
* with flush requests.
*/
- sd_set_flush_flag(sdkp);
+ sd_set_flush_flag(sdkp, &lim);
/* Initial block count limit based on CDB TRANSFER LENGTH field size. */
dev_max = sdp->use_16_for_rw ? SD_MAX_XFER_BLOCKS : SD_DEF_XFER_BLOCKS;
/* Some devices report a maximum block count for READ/WRITE requests. */
dev_max = min_not_zero(dev_max, sdkp->max_xfer_blocks);
- q->limits.max_dev_sectors = logical_to_sectors(sdp, dev_max);
+ lim.max_dev_sectors = logical_to_sectors(sdp, dev_max);
if (sd_validate_min_xfer_size(sdkp))
- blk_queue_io_min(sdkp->disk->queue,
- logical_to_bytes(sdp, sdkp->min_xfer_blocks));
+ lim.io_min = logical_to_bytes(sdp, sdkp->min_xfer_blocks);
else
- blk_queue_io_min(sdkp->disk->queue, 0);
-
- if (sd_validate_opt_xfer_size(sdkp, dev_max)) {
- q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
- rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks);
- } else {
- q->limits.io_opt = 0;
- rw_max = min_not_zero(logical_to_sectors(sdp, dev_max),
- (sector_t)BLK_DEF_MAX_SECTORS_CAP);
- }
+ lim.io_min = 0;
/*
* Limit default to SCSI host optimal sector limit if set. There may be
* an impact on performance for when the size of a request exceeds this
* host limit.
*/
- rw_max = min_not_zero(rw_max, sdp->host->opt_sectors);
-
- /* Do not exceed controller limit */
- rw_max = min(rw_max, queue_max_hw_sectors(q));
-
- /*
- * Only update max_sectors if previously unset or if the current value
- * exceeds the capabilities of the hardware.
- */
- if (sdkp->first_scan ||
- q->limits.max_sectors > q->limits.max_dev_sectors ||
- q->limits.max_sectors > q->limits.max_hw_sectors)
- q->limits.max_sectors = rw_max;
+ lim.io_opt = sdp->host->opt_sectors << SECTOR_SHIFT;
+ if (sd_validate_opt_xfer_size(sdkp, dev_max)) {
+ lim.io_opt = min_not_zero(lim.io_opt,
+ logical_to_bytes(sdp, sdkp->opt_xfer_blocks));
+ }
sdkp->first_scan = 0;
set_capacity_and_notify(disk, logical_to_sectors(sdp, sdkp->capacity));
- sd_config_write_same(sdkp);
+ sd_config_write_same(sdkp, &lim);
kfree(buffer);
+ blk_mq_freeze_queue(sdkp->disk->queue);
+ err = queue_limits_commit_update(sdkp->disk->queue, &lim);
+ blk_mq_unfreeze_queue(sdkp->disk->queue);
+ if (err)
+ return err;
+
/*
* For a zoned drive, revalidating the zones can be done only once
* the gendisk capacity is set. So if this fails, set back the gendisk
@@ -4106,8 +4204,6 @@ static int sd_resume(struct device *dev)
{
struct scsi_disk *sdkp = dev_get_drvdata(dev);
- sd_printk(KERN_NOTICE, sdkp, "Starting disk\n");
-
if (opal_unlock_from_suspend(sdkp->opal_dev)) {
sd_printk(KERN_NOTICE, sdkp, "OPAL unlock failed\n");
return -EIO;
@@ -4124,12 +4220,13 @@ static int sd_resume_common(struct device *dev, bool runtime)
if (!sdkp) /* E.g.: runtime resume at the start of sd_probe() */
return 0;
+ sd_printk(KERN_NOTICE, sdkp, "Starting disk\n");
+
if (!sd_do_start_stop(sdkp->device, runtime)) {
sdkp->suspended = false;
return 0;
}
- sd_printk(KERN_NOTICE, sdkp, "Starting disk\n");
ret = sd_start_stop_device(sdkp, 1);
if (!ret) {
sd_resume(dev);
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 49dd600bfa48..36382eca941c 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -115,6 +115,13 @@ struct scsi_disk {
u32 max_unmap_blocks;
u32 unmap_granularity;
u32 unmap_alignment;
+
+ u32 max_atomic;
+ u32 atomic_alignment;
+ u32 atomic_granularity;
+ u32 max_atomic_with_boundary;
+ u32 max_atomic_boundary;
+
u32 index;
unsigned int physical_block_size;
unsigned int max_medium_access_timeouts;
@@ -148,6 +155,7 @@ struct scsi_disk {
unsigned security : 1;
unsigned ignore_medium_access_errors : 1;
unsigned rscs : 1; /* reduced stream control support */
+ unsigned use_atomic_write_boundary : 1;
};
#define to_scsi_disk(obj) container_of(obj, struct scsi_disk, disk_dev)
@@ -220,26 +228,12 @@ static inline sector_t sectors_to_logical(struct scsi_device *sdev, sector_t sec
return sector >> (ilog2(sdev->sector_size) - 9);
}
-#ifdef CONFIG_BLK_DEV_INTEGRITY
-
-extern void sd_dif_config_host(struct scsi_disk *);
-
-#else /* CONFIG_BLK_DEV_INTEGRITY */
-
-static inline void sd_dif_config_host(struct scsi_disk *disk)
-{
-}
-
-#endif /* CONFIG_BLK_DEV_INTEGRITY */
-
-static inline int sd_is_zoned(struct scsi_disk *sdkp)
-{
- return sdkp->zoned == 1 || sdkp->device->type == TYPE_ZBC;
-}
+void sd_dif_config_host(struct scsi_disk *sdkp, struct queue_limits *lim);
#ifdef CONFIG_BLK_DEV_ZONED
-int sd_zbc_read_zones(struct scsi_disk *sdkp, u8 buf[SD_BUF_SIZE]);
+int sd_zbc_read_zones(struct scsi_disk *sdkp, struct queue_limits *lim,
+ u8 buf[SD_BUF_SIZE]);
int sd_zbc_revalidate_zones(struct scsi_disk *sdkp);
blk_status_t sd_zbc_setup_zone_mgmt_cmnd(struct scsi_cmnd *cmd,
unsigned char op, bool all);
@@ -250,7 +244,8 @@ int sd_zbc_report_zones(struct gendisk *disk, sector_t sector,
#else /* CONFIG_BLK_DEV_ZONED */
-static inline int sd_zbc_read_zones(struct scsi_disk *sdkp, u8 buf[SD_BUF_SIZE])
+static inline int sd_zbc_read_zones(struct scsi_disk *sdkp,
+ struct queue_limits *lim, u8 buf[SD_BUF_SIZE])
{
return 0;
}
diff --git a/drivers/scsi/sd_dif.c b/drivers/scsi/sd_dif.c
index 1df847b5f747..ae6ce6f5d622 100644
--- a/drivers/scsi/sd_dif.c
+++ b/drivers/scsi/sd_dif.c
@@ -24,14 +24,15 @@
/*
* Configure exchange of protection information between OS and HBA.
*/
-void sd_dif_config_host(struct scsi_disk *sdkp)
+void sd_dif_config_host(struct scsi_disk *sdkp, struct queue_limits *lim)
{
struct scsi_device *sdp = sdkp->device;
- struct gendisk *disk = sdkp->disk;
u8 type = sdkp->protection_type;
- struct blk_integrity bi;
+ struct blk_integrity *bi = &lim->integrity;
int dif, dix;
+ memset(bi, 0, sizeof(*bi));
+
dif = scsi_host_dif_capable(sdp->host, type);
dix = scsi_host_dix_capable(sdp->host, type);
@@ -39,45 +40,33 @@ void sd_dif_config_host(struct scsi_disk *sdkp)
dif = 0; dix = 1;
}
- if (!dix) {
- blk_integrity_unregister(disk);
+ if (!dix)
return;
- }
-
- memset(&bi, 0, sizeof(bi));
/* Enable DMA of protection information */
- if (scsi_host_get_guard(sdkp->device->host) & SHOST_DIX_GUARD_IP) {
- if (type == T10_PI_TYPE3_PROTECTION)
- bi.profile = &t10_pi_type3_ip;
- else
- bi.profile = &t10_pi_type1_ip;
+ if (scsi_host_get_guard(sdkp->device->host) & SHOST_DIX_GUARD_IP)
+ bi->csum_type = BLK_INTEGRITY_CSUM_IP;
+ else
+ bi->csum_type = BLK_INTEGRITY_CSUM_CRC;
- bi.flags |= BLK_INTEGRITY_IP_CHECKSUM;
- } else
- if (type == T10_PI_TYPE3_PROTECTION)
- bi.profile = &t10_pi_type3_crc;
- else
- bi.profile = &t10_pi_type1_crc;
+ if (type != T10_PI_TYPE3_PROTECTION)
+ bi->flags |= BLK_INTEGRITY_REF_TAG;
- bi.tuple_size = sizeof(struct t10_pi_tuple);
+ bi->tuple_size = sizeof(struct t10_pi_tuple);
if (dif && type) {
- bi.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
+ bi->flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
if (!sdkp->ATO)
- goto out;
+ return;
if (type == T10_PI_TYPE3_PROTECTION)
- bi.tag_size = sizeof(u16) + sizeof(u32);
+ bi->tag_size = sizeof(u16) + sizeof(u32);
else
- bi.tag_size = sizeof(u16);
+ bi->tag_size = sizeof(u16);
}
sd_first_printk(KERN_NOTICE, sdkp,
"Enabling DIX %s, application tag size %u bytes\n",
- bi.profile->name, bi.tag_size);
-out:
- blk_integrity_register(disk, &bi);
+ blk_integrity_profile_name(bi), bi->tag_size);
}
-
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
index 806036e48abe..c8b9654d30f0 100644
--- a/drivers/scsi/sd_zbc.c
+++ b/drivers/scsi/sd_zbc.c
@@ -232,7 +232,7 @@ int sd_zbc_report_zones(struct gendisk *disk, sector_t sector,
int zone_idx = 0;
int ret;
- if (!sd_is_zoned(sdkp))
+ if (sdkp->device->type != TYPE_ZBC)
/* Not a zoned device */
return -EOPNOTSUPP;
@@ -300,7 +300,7 @@ static blk_status_t sd_zbc_cmnd_checks(struct scsi_cmnd *cmd)
struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
sector_t sector = blk_rq_pos(rq);
- if (!sd_is_zoned(sdkp))
+ if (sdkp->device->type != TYPE_ZBC)
/* Not a zoned device */
return BLK_STS_IOERR;
@@ -521,7 +521,7 @@ static int sd_zbc_check_capacity(struct scsi_disk *sdkp, unsigned char *buf,
static void sd_zbc_print_zones(struct scsi_disk *sdkp)
{
- if (!sd_is_zoned(sdkp) || !sdkp->capacity)
+ if (sdkp->device->type != TYPE_ZBC || !sdkp->capacity)
return;
if (sdkp->capacity & (sdkp->zone_info.zone_blocks - 1))
@@ -565,12 +565,6 @@ int sd_zbc_revalidate_zones(struct scsi_disk *sdkp)
sdkp->zone_info.zone_blocks = zone_blocks;
sdkp->zone_info.nr_zones = nr_zones;
- blk_queue_chunk_sectors(q,
- logical_to_sectors(sdkp->device, zone_blocks));
-
- /* Enable block layer zone append emulation */
- blk_queue_max_zone_append_sectors(q, 0);
-
flags = memalloc_noio_save();
ret = blk_revalidate_disk_zones(disk);
memalloc_noio_restore(flags);
@@ -588,27 +582,31 @@ int sd_zbc_revalidate_zones(struct scsi_disk *sdkp)
/**
* sd_zbc_read_zones - Read zone information and update the request queue
* @sdkp: SCSI disk pointer.
+ * @lim: queue limits to read into
* @buf: 512 byte buffer used for storing SCSI command output.
*
* Read zone information and update the request queue zone characteristics and
* also the zoned device information in *sdkp. Called by sd_revalidate_disk()
* before the gendisk capacity has been set.
*/
-int sd_zbc_read_zones(struct scsi_disk *sdkp, u8 buf[SD_BUF_SIZE])
+int sd_zbc_read_zones(struct scsi_disk *sdkp, struct queue_limits *lim,
+ u8 buf[SD_BUF_SIZE])
{
- struct gendisk *disk = sdkp->disk;
- struct request_queue *q = disk->queue;
unsigned int nr_zones;
u32 zone_blocks = 0;
int ret;
- if (!sd_is_zoned(sdkp)) {
- /*
- * Device managed or normal SCSI disk, no special handling
- * required.
- */
+ if (sdkp->device->type != TYPE_ZBC)
return 0;
- }
+
+ lim->features |= BLK_FEAT_ZONED;
+
+ /*
+ * Per ZBC and ZAC specifications, writes in sequential write required
+ * zones of host-managed devices must be aligned to the device physical
+ * block size.
+ */
+ lim->zone_write_granularity = sdkp->physical_block_size;
/* READ16/WRITE16/SYNC16 is mandatory for ZBC devices */
sdkp->device->use_16_for_rw = 1;
@@ -625,18 +623,20 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, u8 buf[SD_BUF_SIZE])
if (ret != 0)
goto err;
- /* The drive satisfies the kernel restrictions: set it up */
- blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q);
- if (sdkp->zones_max_open == U32_MAX)
- disk_set_max_open_zones(disk, 0);
- else
- disk_set_max_open_zones(disk, sdkp->zones_max_open);
- disk_set_max_active_zones(disk, 0);
nr_zones = round_up(sdkp->capacity, zone_blocks) >> ilog2(zone_blocks);
-
sdkp->early_zone_info.nr_zones = nr_zones;
sdkp->early_zone_info.zone_blocks = zone_blocks;
+ /* The drive satisfies the kernel restrictions: set it up */
+ if (sdkp->zones_max_open == U32_MAX)
+ lim->max_open_zones = 0;
+ else
+ lim->max_open_zones = sdkp->zones_max_open;
+ lim->max_active_zones = 0;
+ lim->chunk_sectors = logical_to_sectors(sdkp->device, zone_blocks);
+ /* Enable block layer zone append emulation */
+ lim->max_zone_append_sectors = 0;
+
return 0;
err:
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 7ab000942b97..3f491019103e 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -111,7 +111,7 @@ static struct lock_class_key sr_bio_compl_lkclass;
static int sr_open(struct cdrom_device_info *, int);
static void sr_release(struct cdrom_device_info *);
-static void get_sectorsize(struct scsi_cd *);
+static int get_sectorsize(struct scsi_cd *);
static int get_capabilities(struct scsi_cd *);
static unsigned int sr_check_events(struct cdrom_device_info *cdi,
@@ -473,15 +473,15 @@ static blk_status_t sr_init_command(struct scsi_cmnd *SCpnt)
return BLK_STS_IOERR;
}
-static void sr_revalidate_disk(struct scsi_cd *cd)
+static int sr_revalidate_disk(struct scsi_cd *cd)
{
struct scsi_sense_hdr sshdr;
/* if the unit is not ready, nothing more to do */
if (scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr))
- return;
+ return 0;
sr_cd_check(&cd->cdi);
- get_sectorsize(cd);
+ return get_sectorsize(cd);
}
static int sr_block_open(struct gendisk *disk, blk_mode_t mode)
@@ -494,13 +494,16 @@ static int sr_block_open(struct gendisk *disk, blk_mode_t mode)
return -ENXIO;
scsi_autopm_get_device(sdev);
- if (disk_check_media_change(disk))
- sr_revalidate_disk(cd);
+ if (disk_check_media_change(disk)) {
+ ret = sr_revalidate_disk(cd);
+ if (ret)
+ goto out;
+ }
mutex_lock(&cd->lock);
ret = cdrom_open(&cd->cdi, mode);
mutex_unlock(&cd->lock);
-
+out:
scsi_autopm_put_device(sdev);
if (ret)
scsi_device_put(cd->device);
@@ -685,7 +688,9 @@ static int sr_probe(struct device *dev)
blk_pm_runtime_init(sdev->request_queue, dev);
dev_set_drvdata(dev, cd);
- sr_revalidate_disk(cd);
+ error = sr_revalidate_disk(cd);
+ if (error)
+ goto unregister_cdrom;
error = device_add_disk(&sdev->sdev_gendev, disk, NULL);
if (error)
@@ -714,13 +719,14 @@ fail:
}
-static void get_sectorsize(struct scsi_cd *cd)
+static int get_sectorsize(struct scsi_cd *cd)
{
+ struct request_queue *q = cd->device->request_queue;
static const u8 cmd[10] = { READ_CAPACITY };
unsigned char buffer[8] = { };
- int the_result;
+ struct queue_limits lim;
+ int err;
int sector_size;
- struct request_queue *queue;
struct scsi_failure failure_defs[] = {
{
.result = SCMD_FAILURE_RESULT_ANY,
@@ -736,10 +742,10 @@ static void get_sectorsize(struct scsi_cd *cd)
};
/* Do the command and wait.. */
- the_result = scsi_execute_cmd(cd->device, cmd, REQ_OP_DRV_IN, buffer,
+ err = scsi_execute_cmd(cd->device, cmd, REQ_OP_DRV_IN, buffer,
sizeof(buffer), SR_TIMEOUT, MAX_RETRIES,
&exec_args);
- if (the_result) {
+ if (err) {
cd->capacity = 0x1fffff;
sector_size = 2048; /* A guess, just in case */
} else {
@@ -789,10 +795,12 @@ static void get_sectorsize(struct scsi_cd *cd)
set_capacity(cd->disk, cd->capacity);
}
- queue = cd->device->request_queue;
- blk_queue_logical_block_size(queue, sector_size);
-
- return;
+ lim = queue_limits_start_update(q);
+ lim.logical_block_size = sector_size;
+ blk_mq_freeze_queue(q);
+ err = queue_limits_commit_update(q, &lim);
+ blk_mq_unfreeze_queue(q);
+ return err;
}
static int get_capabilities(struct scsi_cd *cd)
diff --git a/drivers/scsi/sr.h b/drivers/scsi/sr.h
index 1175f2e213b5..dc899277b3a4 100644
--- a/drivers/scsi/sr.h
+++ b/drivers/scsi/sr.h
@@ -65,7 +65,7 @@ int sr_disk_status(struct cdrom_device_info *);
int sr_get_last_session(struct cdrom_device_info *, struct cdrom_multisession *);
int sr_get_mcn(struct cdrom_device_info *, struct cdrom_mcn *);
int sr_reset(struct cdrom_device_info *);
-int sr_select_speed(struct cdrom_device_info *cdi, int speed);
+int sr_select_speed(struct cdrom_device_info *cdi, unsigned long speed);
int sr_audio_ioctl(struct cdrom_device_info *, unsigned int, void *);
int sr_is_xa(Scsi_CD *);
diff --git a/drivers/scsi/sr_ioctl.c b/drivers/scsi/sr_ioctl.c
index 5b0b35e60e61..a0d2556a27bb 100644
--- a/drivers/scsi/sr_ioctl.c
+++ b/drivers/scsi/sr_ioctl.c
@@ -425,11 +425,14 @@ int sr_reset(struct cdrom_device_info *cdi)
return 0;
}
-int sr_select_speed(struct cdrom_device_info *cdi, int speed)
+int sr_select_speed(struct cdrom_device_info *cdi, unsigned long speed)
{
Scsi_CD *cd = cdi->handle;
struct packet_command cgc;
+ /* avoid exceeding the max speed or overflowing integer bounds */
+ speed = clamp(0, speed, 0xffff / 177);
+
if (speed == 0)
speed = 0xffff; /* set to max */
else
diff --git a/drivers/soc/amlogic/meson-clk-measure.c b/drivers/soc/amlogic/meson-clk-measure.c
index 3f3039600357..a6453ffeb753 100644
--- a/drivers/soc/amlogic/meson-clk-measure.c
+++ b/drivers/soc/amlogic/meson-clk-measure.c
@@ -688,4 +688,5 @@ static struct platform_driver meson_msr_driver = {
},
};
module_platform_driver(meson_msr_driver);
+MODULE_DESCRIPTION("Amlogic Meson SoC Clock Measure driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/amlogic/meson-gx-socinfo.c b/drivers/soc/amlogic/meson-gx-socinfo.c
index 6abb730344ab..8809a948201a 100644
--- a/drivers/soc/amlogic/meson-gx-socinfo.c
+++ b/drivers/soc/amlogic/meson-gx-socinfo.c
@@ -63,7 +63,9 @@ static const struct meson_gx_package_id {
{ "962X", 0x24, 0x10, 0xf0 },
{ "962E", 0x24, 0x20, 0xf0 },
{ "A113X", 0x25, 0x37, 0xff },
+ { "A113X", 0x25, 0x43, 0xff },
{ "A113D", 0x25, 0x22, 0xff },
+ { "S905L", 0x26, 0, 0x0 },
{ "S905D2", 0x28, 0x10, 0xf0 },
{ "S905Y2", 0x28, 0x30, 0xf0 },
{ "S905X2", 0x28, 0x40, 0xf0 },
diff --git a/drivers/soc/fsl/Kconfig b/drivers/soc/fsl/Kconfig
index fcec6ed83d5e..a1e0bc8c1757 100644
--- a/drivers/soc/fsl/Kconfig
+++ b/drivers/soc/fsl/Kconfig
@@ -22,7 +22,7 @@ config FSL_GUTS
config FSL_MC_DPIO
tristate "QorIQ DPAA2 DPIO driver"
- depends on FSL_MC_BUS
+ depends on FSL_MC_BUS && NET
select SOC_BUS
select FSL_GUTS
select DIMLIB
diff --git a/drivers/soc/fsl/qbman/Kconfig b/drivers/soc/fsl/qbman/Kconfig
index bdecb86bb656..27774ec6ff90 100644
--- a/drivers/soc/fsl/qbman/Kconfig
+++ b/drivers/soc/fsl/qbman/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
menuconfig FSL_DPAA
bool "QorIQ DPAA1 framework support"
- depends on ((FSL_SOC_BOOKE || ARCH_LAYERSCAPE) && ARCH_DMA_ADDR_T_64BIT)
+ depends on ((FSL_SOC_BOOKE || ARCH_LAYERSCAPE || COMPILE_TEST) && ARCH_DMA_ADDR_T_64BIT)
select GENERIC_ALLOCATOR
help
The Freescale Data Path Acceleration Architecture (DPAA) is a set of
diff --git a/drivers/soc/imx/soc-imx8m.c b/drivers/soc/imx/soc-imx8m.c
index ec87d9d878f3..fe111bae38c8 100644
--- a/drivers/soc/imx/soc-imx8m.c
+++ b/drivers/soc/imx/soc-imx8m.c
@@ -252,4 +252,5 @@ free_soc:
return ret;
}
device_initcall(imx8_soc_init);
+MODULE_DESCRIPTION("NXP i.MX8M SoC driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/soc/ixp4xx/ixp4xx-npe.c b/drivers/soc/ixp4xx/ixp4xx-npe.c
index 35825ee95dff..34a6f187c220 100644
--- a/drivers/soc/ixp4xx/ixp4xx-npe.c
+++ b/drivers/soc/ixp4xx/ixp4xx-npe.c
@@ -764,6 +764,7 @@ static struct platform_driver ixp4xx_npe_driver = {
module_platform_driver(ixp4xx_npe_driver);
MODULE_AUTHOR("Krzysztof Halasa");
+MODULE_DESCRIPTION("Intel IXP4xx Network Processor Engine driver");
MODULE_LICENSE("GPL v2");
MODULE_FIRMWARE(NPE_A_FIRMWARE);
MODULE_FIRMWARE(NPE_B_FIRMWARE);
diff --git a/drivers/soc/ixp4xx/ixp4xx-qmgr.c b/drivers/soc/ixp4xx/ixp4xx-qmgr.c
index 244ad8d7e80b..cb112f3643e9 100644
--- a/drivers/soc/ixp4xx/ixp4xx-qmgr.c
+++ b/drivers/soc/ixp4xx/ixp4xx-qmgr.c
@@ -465,6 +465,7 @@ static struct platform_driver ixp4xx_qmgr_driver = {
};
module_platform_driver(ixp4xx_qmgr_driver);
+MODULE_DESCRIPTION("Intel IXP4xx Queue Manager driver");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Krzysztof Halasa");
diff --git a/drivers/soc/litex/Kconfig b/drivers/soc/litex/Kconfig
index e6ba3573a772..f3f869639588 100644
--- a/drivers/soc/litex/Kconfig
+++ b/drivers/soc/litex/Kconfig
@@ -7,7 +7,7 @@ config LITEX
config LITEX_SOC_CONTROLLER
tristate "Enable LiteX SoC Controller driver"
- depends on OF || COMPILE_TEST
+ depends on OF
depends on HAS_IOMEM
select LITEX
help
diff --git a/drivers/soc/litex/litex_soc_ctrl.c b/drivers/soc/litex/litex_soc_ctrl.c
index 10813299aa10..72c44119dd54 100644
--- a/drivers/soc/litex/litex_soc_ctrl.c
+++ b/drivers/soc/litex/litex_soc_ctrl.c
@@ -82,13 +82,11 @@ static int litex_reset_handler(struct notifier_block *this, unsigned long mode,
return NOTIFY_DONE;
}
-#ifdef CONFIG_OF
static const struct of_device_id litex_soc_ctrl_of_match[] = {
{.compatible = "litex,soc-controller"},
{},
};
MODULE_DEVICE_TABLE(of, litex_soc_ctrl_of_match);
-#endif /* CONFIG_OF */
static int litex_soc_ctrl_probe(struct platform_device *pdev)
{
@@ -130,7 +128,7 @@ static void litex_soc_ctrl_remove(struct platform_device *pdev)
static struct platform_driver litex_soc_ctrl_driver = {
.driver = {
.name = "litex-soc-controller",
- .of_match_table = of_match_ptr(litex_soc_ctrl_of_match)
+ .of_match_table = litex_soc_ctrl_of_match,
},
.probe = litex_soc_ctrl_probe,
.remove_new = litex_soc_ctrl_remove,
diff --git a/drivers/soc/mediatek/mtk-cmdq-helper.c b/drivers/soc/mediatek/mtk-cmdq-helper.c
index 046522664dc1..a8fccedba83f 100644
--- a/drivers/soc/mediatek/mtk-cmdq-helper.c
+++ b/drivers/soc/mediatek/mtk-cmdq-helper.c
@@ -15,6 +15,7 @@
/* dedicate the last GPR_R15 to assign the register address to be poll */
#define CMDQ_POLL_ADDR_GPR (15)
#define CMDQ_EOC_IRQ_EN BIT(0)
+#define CMDQ_IMMEDIATE_VALUE 0
#define CMDQ_REG_TYPE 1
#define CMDQ_JUMP_RELATIVE 0
#define CMDQ_JUMP_ABSOLUTE 1
@@ -45,6 +46,16 @@ struct cmdq_instruction {
u8 op;
};
+static inline u8 cmdq_operand_get_type(struct cmdq_operand *op)
+{
+ return op->reg ? CMDQ_REG_TYPE : CMDQ_IMMEDIATE_VALUE;
+}
+
+static inline u16 cmdq_operand_get_idx_value(struct cmdq_operand *op)
+{
+ return op->reg ? op->idx : op->value;
+}
+
int cmdq_dev_get_client_reg(struct device *dev,
struct cmdq_client_reg *client_reg, int idx)
{
@@ -461,6 +472,29 @@ int cmdq_pkt_poll_addr(struct cmdq_pkt *pkt, dma_addr_t addr, u32 value, u32 mas
}
EXPORT_SYMBOL(cmdq_pkt_poll_addr);
+int cmdq_pkt_logic_command(struct cmdq_pkt *pkt, u16 result_reg_idx,
+ struct cmdq_operand *left_operand,
+ enum cmdq_logic_op s_op,
+ struct cmdq_operand *right_operand)
+{
+ struct cmdq_instruction inst = { {0} };
+
+ if (!left_operand || !right_operand || s_op >= CMDQ_LOGIC_MAX)
+ return -EINVAL;
+
+ inst.op = CMDQ_CODE_LOGIC;
+ inst.dst_t = CMDQ_REG_TYPE;
+ inst.src_t = cmdq_operand_get_type(left_operand);
+ inst.arg_c_t = cmdq_operand_get_type(right_operand);
+ inst.sop = s_op;
+ inst.reg_dst = result_reg_idx;
+ inst.src_reg = cmdq_operand_get_idx_value(left_operand);
+ inst.arg_c = cmdq_operand_get_idx_value(right_operand);
+
+ return cmdq_pkt_append_command(pkt, inst);
+}
+EXPORT_SYMBOL(cmdq_pkt_logic_command);
+
int cmdq_pkt_assign(struct cmdq_pkt *pkt, u16 reg_idx, u32 value)
{
struct cmdq_instruction inst = {};
@@ -526,4 +560,5 @@ int cmdq_pkt_finalize(struct cmdq_pkt *pkt)
}
EXPORT_SYMBOL(cmdq_pkt_finalize);
+MODULE_DESCRIPTION("MediaTek Command Queue (CMDQ) driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/mediatek/mtk-mmsys.c b/drivers/soc/mediatek/mtk-mmsys.c
index f370f4ec4b88..938240714e54 100644
--- a/drivers/soc/mediatek/mtk-mmsys.c
+++ b/drivers/soc/mediatek/mtk-mmsys.c
@@ -236,6 +236,7 @@ void mtk_mmsys_mixer_in_config(struct device *dev, int idx, bool alpha_sel, u16
mtk_mmsys_update_bits(mmsys, MT8195_VDO1_MIXER_IN1_ALPHA + (idx - 1) * 4, ~0,
alpha << 16 | alpha, cmdq_pkt);
+ mtk_mmsys_update_bits(mmsys, MT8195_VDO1_HDR_TOP_CFG, BIT(15 + idx), 0, cmdq_pkt);
mtk_mmsys_update_bits(mmsys, MT8195_VDO1_HDR_TOP_CFG, BIT(19 + idx),
alpha_sel << (19 + idx), cmdq_pkt);
mtk_mmsys_update_bits(mmsys, MT8195_VDO1_MIXER_IN1_PAD + (idx - 1) * 4,
diff --git a/drivers/soc/mediatek/mtk-mutex.c b/drivers/soc/mediatek/mtk-mutex.c
index b5af1fb5847e..01b129caf1eb 100644
--- a/drivers/soc/mediatek/mtk-mutex.c
+++ b/drivers/soc/mediatek/mtk-mutex.c
@@ -524,6 +524,7 @@ static const unsigned int mt8188_mdp_mutex_table_mod[MUTEX_MOD_IDX_MAX] = {
[MUTEX_MOD_IDX_MDP_PAD0] = MT8195_MUTEX_MOD_MDP_PAD0,
[MUTEX_MOD_IDX_MDP_PAD2] = MT8195_MUTEX_MOD_MDP_PAD2,
[MUTEX_MOD_IDX_MDP_PAD3] = MT8195_MUTEX_MOD_MDP_PAD3,
+ [MUTEX_MOD_IDX_MDP_TCC0] = MT8195_MUTEX_MOD_MDP_TCC0,
[MUTEX_MOD_IDX_MDP_WROT0] = MT8195_MUTEX_MOD_MDP_WROT0,
[MUTEX_MOD_IDX_MDP_WROT2] = MT8195_MUTEX_MOD_MDP_WROT2,
[MUTEX_MOD_IDX_MDP_WROT3] = MT8195_MUTEX_MOD_MDP_WROT3,
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 5af33b0e3470..7f02f0525933 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -72,11 +72,28 @@ config QCOM_OCMEM
requirements. This is typically used by the GPU, camera/video, and
audio components on some Snapdragon SoCs.
+config QCOM_PD_MAPPER
+ tristate "Qualcomm Protection Domain Mapper"
+ select QCOM_QMI_HELPERS
+ select QCOM_PDR_MSG
+ select AUXILIARY_BUS
+ depends on NET && QRTR
+ default QCOM_RPROC_COMMON
+ help
+ The Protection Domain Mapper maps registered services to the domains
+ and instances handled by the remote DSPs. This is a kernel-space
+ implementation of the service. It is a simpler alternative to the
+ userspace daemon.
+
config QCOM_PDR_HELPERS
tristate
select QCOM_QMI_HELPERS
+ select QCOM_PDR_MSG
depends on NET
+config QCOM_PDR_MSG
+ tristate
+
config QCOM_PMIC_PDCHARGER_ULOG
tristate "Qualcomm PMIC PDCharger ULOG driver"
depends on RPMSG
@@ -194,6 +211,7 @@ config QCOM_SMP2P
config QCOM_SMSM
tristate "Qualcomm Shared Memory State Machine"
+ depends on MAILBOX
depends on QCOM_SMEM
select QCOM_SMEM_STATE
select IRQ_DOMAIN
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index ca0bece0dfff..d3560f861085 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -7,7 +7,9 @@ obj-$(CONFIG_QCOM_COMMAND_DB) += cmd-db.o
obj-$(CONFIG_QCOM_GSBI) += qcom_gsbi.o
obj-$(CONFIG_QCOM_MDT_LOADER) += mdt_loader.o
obj-$(CONFIG_QCOM_OCMEM) += ocmem.o
+obj-$(CONFIG_QCOM_PD_MAPPER) += qcom_pd_mapper.o
obj-$(CONFIG_QCOM_PDR_HELPERS) += pdr_interface.o
+obj-$(CONFIG_QCOM_PDR_MSG) += qcom_pdr_msg.o
obj-$(CONFIG_QCOM_PMIC_GLINK) += pmic_glink.o
obj-$(CONFIG_QCOM_PMIC_GLINK) += pmic_glink_altmode.o
obj-$(CONFIG_QCOM_PMIC_PDCHARGER_ULOG) += pmic_pdcharger_ulog.o
diff --git a/drivers/soc/qcom/icc-bwmon.c b/drivers/soc/qcom/icc-bwmon.c
index fb323b3364db..e7851974084b 100644
--- a/drivers/soc/qcom/icc-bwmon.c
+++ b/drivers/soc/qcom/icc-bwmon.c
@@ -565,7 +565,7 @@ static void bwmon_start(struct icc_bwmon *bwmon)
int window;
/* No need to check for errors, as this must have succeeded before. */
- dev_pm_opp_find_bw_ceil(bwmon->dev, &bw_low, 0);
+ dev_pm_opp_put(dev_pm_opp_find_bw_ceil(bwmon->dev, &bw_low, 0));
bwmon_clear_counters(bwmon, true);
@@ -772,18 +772,25 @@ static int bwmon_probe(struct platform_device *pdev)
opp = dev_pm_opp_find_bw_floor(dev, &bwmon->max_bw_kbps, 0);
if (IS_ERR(opp))
return dev_err_probe(dev, PTR_ERR(opp), "failed to find max peak bandwidth\n");
+ dev_pm_opp_put(opp);
bwmon->min_bw_kbps = 0;
opp = dev_pm_opp_find_bw_ceil(dev, &bwmon->min_bw_kbps, 0);
if (IS_ERR(opp))
return dev_err_probe(dev, PTR_ERR(opp), "failed to find min peak bandwidth\n");
+ dev_pm_opp_put(opp);
bwmon->dev = dev;
bwmon_disable(bwmon);
- ret = devm_request_threaded_irq(dev, bwmon->irq, bwmon_intr,
- bwmon_intr_thread,
- IRQF_ONESHOT, dev_name(dev), bwmon);
+
+ /*
+ * SoCs with multiple cpu-bwmon instances can end up using a shared interrupt
+ * line. Using the devm_ variant might result in the IRQ handler being executed
+ * after bwmon_disable in bwmon_remove()
+ */
+ ret = request_threaded_irq(bwmon->irq, bwmon_intr, bwmon_intr_thread,
+ IRQF_ONESHOT | IRQF_SHARED, dev_name(dev), bwmon);
if (ret)
return dev_err_probe(dev, ret, "failed to request IRQ\n");
@@ -798,6 +805,7 @@ static void bwmon_remove(struct platform_device *pdev)
struct icc_bwmon *bwmon = platform_get_drvdata(pdev);
bwmon_disable(bwmon);
+ free_irq(bwmon->irq, bwmon);
}
static const struct icc_bwmon_data msm8998_bwmon_data = {
diff --git a/drivers/soc/qcom/llcc-qcom.c b/drivers/soc/qcom/llcc-qcom.c
index cbef0dea1d5d..37e11e501728 100644
--- a/drivers/soc/qcom/llcc-qcom.c
+++ b/drivers/soc/qcom/llcc-qcom.c
@@ -7,6 +7,7 @@
#include <linux/bitfield.h>
#include <linux/bitmap.h>
#include <linux/bitops.h>
+#include <linux/cleanup.h>
#include <linux/device.h>
#include <linux/io.h>
#include <linux/kernel.h>
@@ -150,6 +151,25 @@ enum llcc_reg_offset {
LLCC_COMMON_STATUS0,
};
+static const struct llcc_slice_config sa8775p_data[] = {
+ {LLCC_CPUSS, 1, 2048, 1, 0, 0x00FF, 0x0, 0, 0, 0, 1, 1, 0, 0},
+ {LLCC_VIDSC0, 2, 512, 3, 1, 0x00FF, 0x0, 0, 0, 0, 1, 0, 0, 0},
+ {LLCC_CPUSS1, 3, 1024, 1, 1, 0x00FF, 0x0, 0, 0, 0, 1, 0, 0, 0},
+ {LLCC_CPUHWT, 5, 512, 1, 1, 0x00FF, 0x0, 0, 0, 0, 1, 0, 0, 0},
+ {LLCC_AUDIO, 6, 1024, 1, 1, 0x00FF, 0x0, 0, 0, 0, 0, 0, 0, 0},
+ {LLCC_CMPT, 10, 4096, 1, 1, 0x00FF, 0x0, 0, 0, 0, 1, 0, 0, 0},
+ {LLCC_GPUHTW, 11, 1024, 1, 1, 0x00FF, 0x0, 0, 0, 0, 1, 0, 0, 0},
+ {LLCC_GPU, 12, 1024, 1, 1, 0x00FF, 0x0, 0, 0, 0, 1, 0, 1, 0},
+ {LLCC_MMUHWT, 13, 1024, 1, 1, 0x00FF, 0x0, 0, 0, 0, 0, 1, 0, 0},
+ {LLCC_CMPTDMA, 15, 1024, 1, 1, 0x00FF, 0x0, 0, 0, 0, 1, 0, 0, 0},
+ {LLCC_DISP, 16, 4096, 2, 1, 0x00FF, 0x0, 0, 0, 0, 1, 0, 0, 0},
+ {LLCC_VIDFW, 17, 3072, 1, 0, 0x00FF, 0x0, 0, 0, 0, 1, 0, 0, 0},
+ {LLCC_AUDHW, 22, 1024, 1, 1, 0x00FF, 0x0, 0, 0, 0, 0, 0, 0, 0},
+ {LLCC_CVP, 28, 256, 3, 1, 0x00FF, 0x0, 0, 0, 0, 1, 0, 0, 0},
+ {LLCC_APTCM, 30, 1024, 3, 1, 0x0, 0xF0, 1, 0, 0, 1, 0, 0, 0},
+ {LLCC_WRCACHE, 31, 512, 1, 1, 0x00FF, 0x0, 0, 0, 0, 0, 1, 0, 0},
+};
+
static const struct llcc_slice_config sc7180_data[] = {
{ LLCC_CPUSS, 1, 256, 1, 0, 0xf, 0x0, 0, 0, 0, 1, 1 },
{ LLCC_MDM, 8, 128, 1, 0, 0xf, 0x0, 0, 0, 0, 1, 0 },
@@ -552,6 +572,16 @@ static const struct qcom_llcc_config qdu1000_cfg[] = {
},
};
+static const struct qcom_llcc_config sa8775p_cfg[] = {
+ {
+ .sct_data = sa8775p_data,
+ .size = ARRAY_SIZE(sa8775p_data),
+ .need_llcc_cfg = true,
+ .reg_offset = llcc_v2_1_reg_offset,
+ .edac_reg_offset = &llcc_v2_1_edac_reg_offset,
+ },
+};
+
static const struct qcom_llcc_config sc7180_cfg[] = {
{
.sct_data = sc7180_data,
@@ -698,6 +728,11 @@ static const struct qcom_sct_config qdu1000_cfgs = {
.num_config = ARRAY_SIZE(qdu1000_cfg),
};
+static const struct qcom_sct_config sa8775p_cfgs = {
+ .llcc_config = sa8775p_cfg,
+ .num_config = ARRAY_SIZE(sa8775p_cfg),
+};
+
static const struct qcom_sct_config sc7180_cfgs = {
.llcc_config = sc7180_cfg,
.num_config = ARRAY_SIZE(sc7180_cfg),
@@ -821,6 +856,7 @@ EXPORT_SYMBOL_GPL(llcc_slice_putd);
static int llcc_update_act_ctrl(u32 sid,
u32 act_ctrl_reg_val, u32 status)
{
+ struct regmap *regmap;
u32 act_ctrl_reg;
u32 act_clear_reg;
u32 status_reg;
@@ -849,7 +885,8 @@ static int llcc_update_act_ctrl(u32 sid,
return ret;
if (drv_data->version >= LLCC_VERSION_4_1_0_0) {
- ret = regmap_read_poll_timeout(drv_data->bcast_regmap, status_reg,
+ regmap = drv_data->bcast_and_regmap ?: drv_data->bcast_regmap;
+ ret = regmap_read_poll_timeout(regmap, status_reg,
slice_status, (slice_status & ACT_COMPLETE),
0, LLCC_STATUS_READ_DELAY);
if (ret)
@@ -1258,16 +1295,13 @@ static int qcom_llcc_probe(struct platform_device *pdev)
/* Initialize rest of LLCC bank regmaps */
for (i = 1; i < num_banks; i++) {
- char *base = kasprintf(GFP_KERNEL, "llcc%d_base", i);
+ char *base __free(kfree) = kasprintf(GFP_KERNEL, "llcc%d_base", i);
drv_data->regmaps[i] = qcom_llcc_init_mmio(pdev, i, base);
if (IS_ERR(drv_data->regmaps[i])) {
ret = PTR_ERR(drv_data->regmaps[i]);
- kfree(base);
goto err;
}
-
- kfree(base);
}
drv_data->bcast_regmap = qcom_llcc_init_mmio(pdev, i, "llcc_broadcast_base");
@@ -1284,6 +1318,18 @@ static int qcom_llcc_probe(struct platform_device *pdev)
drv_data->version = version;
+ /* Applicable only when drv_data->version >= 4.1 */
+ if (drv_data->version >= LLCC_VERSION_4_1_0_0) {
+ drv_data->bcast_and_regmap = qcom_llcc_init_mmio(pdev, i + 1, "llcc_broadcast_and_base");
+ if (IS_ERR(drv_data->bcast_and_regmap)) {
+ ret = PTR_ERR(drv_data->bcast_and_regmap);
+ if (ret == -EINVAL)
+ drv_data->bcast_and_regmap = NULL;
+ else
+ goto err;
+ }
+ }
+
llcc_cfg = cfg->sct_data;
sz = cfg->size;
@@ -1332,6 +1378,7 @@ err:
static const struct of_device_id qcom_llcc_of_match[] = {
{ .compatible = "qcom,qdu1000-llcc", .data = &qdu1000_cfgs},
+ { .compatible = "qcom,sa8775p-llcc", .data = &sa8775p_cfgs },
{ .compatible = "qcom,sc7180-llcc", .data = &sc7180_cfgs },
{ .compatible = "qcom,sc7280-llcc", .data = &sc7280_cfgs },
{ .compatible = "qcom,sc8180x-llcc", .data = &sc8180x_cfgs },
diff --git a/drivers/soc/qcom/mdt_loader.c b/drivers/soc/qcom/mdt_loader.c
index 6f177e46fa0f..b2c0fb55d4ae 100644
--- a/drivers/soc/qcom/mdt_loader.c
+++ b/drivers/soc/qcom/mdt_loader.c
@@ -7,6 +7,7 @@
* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*/
+#include <linux/cleanup.h>
#include <linux/device.h>
#include <linux/elf.h>
#include <linux/firmware.h>
@@ -37,13 +38,12 @@ static ssize_t mdt_load_split_segment(void *ptr, const struct elf32_phdr *phdrs,
{
const struct elf32_phdr *phdr = &phdrs[segment];
const struct firmware *seg_fw;
- char *seg_name;
ssize_t ret;
if (strlen(fw_name) < 4)
return -EINVAL;
- seg_name = kstrdup(fw_name, GFP_KERNEL);
+ char *seg_name __free(kfree) = kstrdup(fw_name, GFP_KERNEL);
if (!seg_name)
return -ENOMEM;
@@ -52,7 +52,6 @@ static ssize_t mdt_load_split_segment(void *ptr, const struct elf32_phdr *phdrs,
ptr, phdr->p_filesz);
if (ret) {
dev_err(dev, "error %zd loading %s\n", ret, seg_name);
- kfree(seg_name);
return ret;
}
@@ -64,7 +63,6 @@ static ssize_t mdt_load_split_segment(void *ptr, const struct elf32_phdr *phdrs,
}
release_firmware(seg_fw);
- kfree(seg_name);
return ret;
}
diff --git a/drivers/soc/qcom/ocmem.c b/drivers/soc/qcom/ocmem.c
index e8841d247953..6b6dd80cbc0f 100644
--- a/drivers/soc/qcom/ocmem.c
+++ b/drivers/soc/qcom/ocmem.c
@@ -10,6 +10,7 @@
*/
#include <linux/bitfield.h>
+#include <linux/cleanup.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/kernel.h>
@@ -216,7 +217,6 @@ EXPORT_SYMBOL_GPL(of_get_ocmem);
struct ocmem_buf *ocmem_allocate(struct ocmem *ocmem, enum ocmem_client client,
unsigned long size)
{
- struct ocmem_buf *buf;
int ret;
/* TODO: add support for other clients... */
@@ -229,7 +229,7 @@ struct ocmem_buf *ocmem_allocate(struct ocmem *ocmem, enum ocmem_client client,
if (test_and_set_bit_lock(BIT(client), &ocmem->active_allocations))
return ERR_PTR(-EBUSY);
- buf = kzalloc(sizeof(*buf), GFP_KERNEL);
+ struct ocmem_buf *buf __free(kfree) = kzalloc(sizeof(*buf), GFP_KERNEL);
if (!buf) {
ret = -ENOMEM;
goto err_unlock;
@@ -247,7 +247,7 @@ struct ocmem_buf *ocmem_allocate(struct ocmem *ocmem, enum ocmem_client client,
if (ret) {
dev_err(ocmem->dev, "could not lock: %d\n", ret);
ret = -EINVAL;
- goto err_kfree;
+ goto err_unlock;
}
} else {
ocmem_write(ocmem, OCMEM_REG_GFX_MPU_START, buf->offset);
@@ -258,10 +258,8 @@ struct ocmem_buf *ocmem_allocate(struct ocmem *ocmem, enum ocmem_client client,
dev_dbg(ocmem->dev, "using %ldK of OCMEM at 0x%08lx for client %d\n",
size / 1024, buf->addr, client);
- return buf;
+ return_ptr(buf);
-err_kfree:
- kfree(buf);
err_unlock:
clear_bit_unlock(BIT(client), &ocmem->active_allocations);
diff --git a/drivers/soc/qcom/pdr_interface.c b/drivers/soc/qcom/pdr_interface.c
index a1b6a4081dea..328b6153b2be 100644
--- a/drivers/soc/qcom/pdr_interface.c
+++ b/drivers/soc/qcom/pdr_interface.c
@@ -3,6 +3,7 @@
* Copyright (C) 2020 The Linux Foundation. All rights reserved.
*/
+#include <linux/cleanup.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -76,12 +77,12 @@ static int pdr_locator_new_server(struct qmi_handle *qmi,
locator_hdl);
struct pdr_service *pds;
+ mutex_lock(&pdr->lock);
/* Create a local client port for QMI communication */
pdr->locator_addr.sq_family = AF_QIPCRTR;
pdr->locator_addr.sq_node = svc->node;
pdr->locator_addr.sq_port = svc->port;
- mutex_lock(&pdr->lock);
pdr->locator_init_complete = true;
mutex_unlock(&pdr->lock);
@@ -104,10 +105,10 @@ static void pdr_locator_del_server(struct qmi_handle *qmi,
mutex_lock(&pdr->lock);
pdr->locator_init_complete = false;
- mutex_unlock(&pdr->lock);
pdr->locator_addr.sq_node = 0;
pdr->locator_addr.sq_port = 0;
+ mutex_unlock(&pdr->lock);
}
static const struct qmi_ops pdr_locator_ops = {
@@ -365,12 +366,14 @@ static int pdr_get_domain_list(struct servreg_get_domain_list_req *req,
if (ret < 0)
return ret;
+ mutex_lock(&pdr->lock);
ret = qmi_send_request(&pdr->locator_hdl,
&pdr->locator_addr,
&txn, SERVREG_GET_DOMAIN_LIST_REQ,
SERVREG_GET_DOMAIN_LIST_REQ_MAX_LEN,
servreg_get_domain_list_req_ei,
req);
+ mutex_unlock(&pdr->lock);
if (ret < 0) {
qmi_txn_cancel(&txn);
return ret;
@@ -394,13 +397,13 @@ static int pdr_get_domain_list(struct servreg_get_domain_list_req *req,
static int pdr_locate_service(struct pdr_handle *pdr, struct pdr_service *pds)
{
- struct servreg_get_domain_list_resp *resp;
struct servreg_get_domain_list_req req;
struct servreg_location_entry *entry;
int domains_read = 0;
int ret, i;
- resp = kzalloc(sizeof(*resp), GFP_KERNEL);
+ struct servreg_get_domain_list_resp *resp __free(kfree) = kzalloc(sizeof(*resp),
+ GFP_KERNEL);
if (!resp)
return -ENOMEM;
@@ -413,9 +416,9 @@ static int pdr_locate_service(struct pdr_handle *pdr, struct pdr_service *pds)
req.domain_offset = domains_read;
ret = pdr_get_domain_list(&req, resp, pdr);
if (ret < 0)
- goto out;
+ return ret;
- for (i = domains_read; i < resp->domain_list_len; i++) {
+ for (i = 0; i < resp->domain_list_len; i++) {
entry = &resp->domain_list[i];
if (strnlen(entry->name, sizeof(entry->name)) == sizeof(entry->name))
@@ -425,7 +428,7 @@ static int pdr_locate_service(struct pdr_handle *pdr, struct pdr_service *pds)
pds->service_data_valid = entry->service_data_valid;
pds->service_data = entry->service_data;
pds->instance = entry->instance;
- goto out;
+ return 0;
}
}
@@ -438,8 +441,7 @@ static int pdr_locate_service(struct pdr_handle *pdr, struct pdr_service *pds)
domains_read += resp->domain_list_len;
} while (domains_read < resp->total_domains);
-out:
- kfree(resp);
+
return ret;
}
@@ -515,8 +517,7 @@ struct pdr_service *pdr_add_lookup(struct pdr_handle *pdr,
const char *service_name,
const char *service_path)
{
- struct pdr_service *pds, *tmp;
- int ret;
+ struct pdr_service *tmp;
if (IS_ERR_OR_NULL(pdr))
return ERR_PTR(-EINVAL);
@@ -525,7 +526,7 @@ struct pdr_service *pdr_add_lookup(struct pdr_handle *pdr,
!service_path || strlen(service_path) > SERVREG_NAME_LENGTH)
return ERR_PTR(-EINVAL);
- pds = kzalloc(sizeof(*pds), GFP_KERNEL);
+ struct pdr_service *pds __free(kfree) = kzalloc(sizeof(*pds), GFP_KERNEL);
if (!pds)
return ERR_PTR(-ENOMEM);
@@ -540,8 +541,7 @@ struct pdr_service *pdr_add_lookup(struct pdr_handle *pdr,
continue;
mutex_unlock(&pdr->list_lock);
- ret = -EALREADY;
- goto err;
+ return ERR_PTR(-EALREADY);
}
list_add(&pds->node, &pdr->lookups);
@@ -549,10 +549,7 @@ struct pdr_service *pdr_add_lookup(struct pdr_handle *pdr,
schedule_work(&pdr->locator_work);
- return pds;
-err:
- kfree(pds);
- return ERR_PTR(ret);
+ return_ptr(pds);
}
EXPORT_SYMBOL_GPL(pdr_add_lookup);
@@ -649,13 +646,12 @@ struct pdr_handle *pdr_handle_alloc(void (*status)(int state,
char *service_path,
void *priv), void *priv)
{
- struct pdr_handle *pdr;
int ret;
if (!status)
return ERR_PTR(-EINVAL);
- pdr = kzalloc(sizeof(*pdr), GFP_KERNEL);
+ struct pdr_handle *pdr __free(kfree) = kzalloc(sizeof(*pdr), GFP_KERNEL);
if (!pdr)
return ERR_PTR(-ENOMEM);
@@ -674,10 +670,8 @@ struct pdr_handle *pdr_handle_alloc(void (*status)(int state,
INIT_WORK(&pdr->indack_work, pdr_indack_work);
pdr->notifier_wq = create_singlethread_workqueue("pdr_notifier_wq");
- if (!pdr->notifier_wq) {
- ret = -ENOMEM;
- goto free_pdr_handle;
- }
+ if (!pdr->notifier_wq)
+ return ERR_PTR(-ENOMEM);
pdr->indack_wq = alloc_ordered_workqueue("pdr_indack_wq", WQ_HIGHPRI);
if (!pdr->indack_wq) {
@@ -702,7 +696,7 @@ struct pdr_handle *pdr_handle_alloc(void (*status)(int state,
if (ret < 0)
goto release_qmi_handle;
- return pdr;
+ return_ptr(pdr);
release_qmi_handle:
qmi_handle_release(&pdr->locator_hdl);
@@ -710,8 +704,6 @@ destroy_indack:
destroy_workqueue(pdr->indack_wq);
destroy_notifier:
destroy_workqueue(pdr->notifier_wq);
-free_pdr_handle:
- kfree(pdr);
return ERR_PTR(ret);
}
diff --git a/drivers/soc/qcom/pdr_internal.h b/drivers/soc/qcom/pdr_internal.h
index 03c282b7f17e..8d17f7fb79e7 100644
--- a/drivers/soc/qcom/pdr_internal.h
+++ b/drivers/soc/qcom/pdr_internal.h
@@ -13,6 +13,8 @@
#define SERVREG_SET_ACK_REQ 0x23
#define SERVREG_RESTART_PD_REQ 0x24
+#define SERVREG_LOC_PFR_REQ 0x24
+
#define SERVREG_DOMAIN_LIST_LENGTH 32
#define SERVREG_RESTART_PD_REQ_MAX_LEN 67
#define SERVREG_REGISTER_LISTENER_REQ_LEN 71
@@ -20,6 +22,7 @@
#define SERVREG_GET_DOMAIN_LIST_REQ_MAX_LEN 74
#define SERVREG_STATE_UPDATED_IND_MAX_LEN 79
#define SERVREG_GET_DOMAIN_LIST_RESP_MAX_LEN 2389
+#define SERVREG_LOC_PFR_RESP_MAX_LEN 10
struct servreg_location_entry {
char name[SERVREG_NAME_LENGTH + 1];
@@ -28,83 +31,12 @@ struct servreg_location_entry {
u32 instance;
};
-static const struct qmi_elem_info servreg_location_entry_ei[] = {
- {
- .data_type = QMI_STRING,
- .elem_len = SERVREG_NAME_LENGTH + 1,
- .elem_size = sizeof(char),
- .array_type = NO_ARRAY,
- .tlv_type = 0,
- .offset = offsetof(struct servreg_location_entry,
- name),
- },
- {
- .data_type = QMI_UNSIGNED_4_BYTE,
- .elem_len = 1,
- .elem_size = sizeof(u32),
- .array_type = NO_ARRAY,
- .tlv_type = 0,
- .offset = offsetof(struct servreg_location_entry,
- instance),
- },
- {
- .data_type = QMI_UNSIGNED_1_BYTE,
- .elem_len = 1,
- .elem_size = sizeof(u8),
- .array_type = NO_ARRAY,
- .tlv_type = 0,
- .offset = offsetof(struct servreg_location_entry,
- service_data_valid),
- },
- {
- .data_type = QMI_UNSIGNED_4_BYTE,
- .elem_len = 1,
- .elem_size = sizeof(u32),
- .array_type = NO_ARRAY,
- .tlv_type = 0,
- .offset = offsetof(struct servreg_location_entry,
- service_data),
- },
- {}
-};
-
struct servreg_get_domain_list_req {
char service_name[SERVREG_NAME_LENGTH + 1];
u8 domain_offset_valid;
u32 domain_offset;
};
-static const struct qmi_elem_info servreg_get_domain_list_req_ei[] = {
- {
- .data_type = QMI_STRING,
- .elem_len = SERVREG_NAME_LENGTH + 1,
- .elem_size = sizeof(char),
- .array_type = NO_ARRAY,
- .tlv_type = 0x01,
- .offset = offsetof(struct servreg_get_domain_list_req,
- service_name),
- },
- {
- .data_type = QMI_OPT_FLAG,
- .elem_len = 1,
- .elem_size = sizeof(u8),
- .array_type = NO_ARRAY,
- .tlv_type = 0x10,
- .offset = offsetof(struct servreg_get_domain_list_req,
- domain_offset_valid),
- },
- {
- .data_type = QMI_UNSIGNED_4_BYTE,
- .elem_len = 1,
- .elem_size = sizeof(u32),
- .array_type = NO_ARRAY,
- .tlv_type = 0x10,
- .offset = offsetof(struct servreg_get_domain_list_req,
- domain_offset),
- },
- {}
-};
-
struct servreg_get_domain_list_resp {
struct qmi_response_type_v01 resp;
u8 total_domains_valid;
@@ -116,264 +48,60 @@ struct servreg_get_domain_list_resp {
struct servreg_location_entry domain_list[SERVREG_DOMAIN_LIST_LENGTH];
};
-static const struct qmi_elem_info servreg_get_domain_list_resp_ei[] = {
- {
- .data_type = QMI_STRUCT,
- .elem_len = 1,
- .elem_size = sizeof(struct qmi_response_type_v01),
- .array_type = NO_ARRAY,
- .tlv_type = 0x02,
- .offset = offsetof(struct servreg_get_domain_list_resp,
- resp),
- .ei_array = qmi_response_type_v01_ei,
- },
- {
- .data_type = QMI_OPT_FLAG,
- .elem_len = 1,
- .elem_size = sizeof(u8),
- .array_type = NO_ARRAY,
- .tlv_type = 0x10,
- .offset = offsetof(struct servreg_get_domain_list_resp,
- total_domains_valid),
- },
- {
- .data_type = QMI_UNSIGNED_2_BYTE,
- .elem_len = 1,
- .elem_size = sizeof(u16),
- .array_type = NO_ARRAY,
- .tlv_type = 0x10,
- .offset = offsetof(struct servreg_get_domain_list_resp,
- total_domains),
- },
- {
- .data_type = QMI_OPT_FLAG,
- .elem_len = 1,
- .elem_size = sizeof(u8),
- .array_type = NO_ARRAY,
- .tlv_type = 0x11,
- .offset = offsetof(struct servreg_get_domain_list_resp,
- db_rev_count_valid),
- },
- {
- .data_type = QMI_UNSIGNED_2_BYTE,
- .elem_len = 1,
- .elem_size = sizeof(u16),
- .array_type = NO_ARRAY,
- .tlv_type = 0x11,
- .offset = offsetof(struct servreg_get_domain_list_resp,
- db_rev_count),
- },
- {
- .data_type = QMI_OPT_FLAG,
- .elem_len = 1,
- .elem_size = sizeof(u8),
- .array_type = NO_ARRAY,
- .tlv_type = 0x12,
- .offset = offsetof(struct servreg_get_domain_list_resp,
- domain_list_valid),
- },
- {
- .data_type = QMI_DATA_LEN,
- .elem_len = 1,
- .elem_size = sizeof(u8),
- .array_type = NO_ARRAY,
- .tlv_type = 0x12,
- .offset = offsetof(struct servreg_get_domain_list_resp,
- domain_list_len),
- },
- {
- .data_type = QMI_STRUCT,
- .elem_len = SERVREG_DOMAIN_LIST_LENGTH,
- .elem_size = sizeof(struct servreg_location_entry),
- .array_type = VAR_LEN_ARRAY,
- .tlv_type = 0x12,
- .offset = offsetof(struct servreg_get_domain_list_resp,
- domain_list),
- .ei_array = servreg_location_entry_ei,
- },
- {}
-};
-
struct servreg_register_listener_req {
u8 enable;
char service_path[SERVREG_NAME_LENGTH + 1];
};
-static const struct qmi_elem_info servreg_register_listener_req_ei[] = {
- {
- .data_type = QMI_UNSIGNED_1_BYTE,
- .elem_len = 1,
- .elem_size = sizeof(u8),
- .array_type = NO_ARRAY,
- .tlv_type = 0x01,
- .offset = offsetof(struct servreg_register_listener_req,
- enable),
- },
- {
- .data_type = QMI_STRING,
- .elem_len = SERVREG_NAME_LENGTH + 1,
- .elem_size = sizeof(char),
- .array_type = NO_ARRAY,
- .tlv_type = 0x02,
- .offset = offsetof(struct servreg_register_listener_req,
- service_path),
- },
- {}
-};
-
struct servreg_register_listener_resp {
struct qmi_response_type_v01 resp;
u8 curr_state_valid;
enum servreg_service_state curr_state;
};
-static const struct qmi_elem_info servreg_register_listener_resp_ei[] = {
- {
- .data_type = QMI_STRUCT,
- .elem_len = 1,
- .elem_size = sizeof(struct qmi_response_type_v01),
- .array_type = NO_ARRAY,
- .tlv_type = 0x02,
- .offset = offsetof(struct servreg_register_listener_resp,
- resp),
- .ei_array = qmi_response_type_v01_ei,
- },
- {
- .data_type = QMI_OPT_FLAG,
- .elem_len = 1,
- .elem_size = sizeof(u8),
- .array_type = NO_ARRAY,
- .tlv_type = 0x10,
- .offset = offsetof(struct servreg_register_listener_resp,
- curr_state_valid),
- },
- {
- .data_type = QMI_SIGNED_4_BYTE_ENUM,
- .elem_len = 1,
- .elem_size = sizeof(enum servreg_service_state),
- .array_type = NO_ARRAY,
- .tlv_type = 0x10,
- .offset = offsetof(struct servreg_register_listener_resp,
- curr_state),
- },
- {}
-};
-
struct servreg_restart_pd_req {
char service_path[SERVREG_NAME_LENGTH + 1];
};
-static const struct qmi_elem_info servreg_restart_pd_req_ei[] = {
- {
- .data_type = QMI_STRING,
- .elem_len = SERVREG_NAME_LENGTH + 1,
- .elem_size = sizeof(char),
- .array_type = NO_ARRAY,
- .tlv_type = 0x01,
- .offset = offsetof(struct servreg_restart_pd_req,
- service_path),
- },
- {}
-};
-
struct servreg_restart_pd_resp {
struct qmi_response_type_v01 resp;
};
-static const struct qmi_elem_info servreg_restart_pd_resp_ei[] = {
- {
- .data_type = QMI_STRUCT,
- .elem_len = 1,
- .elem_size = sizeof(struct qmi_response_type_v01),
- .array_type = NO_ARRAY,
- .tlv_type = 0x02,
- .offset = offsetof(struct servreg_restart_pd_resp,
- resp),
- .ei_array = qmi_response_type_v01_ei,
- },
- {}
-};
-
struct servreg_state_updated_ind {
enum servreg_service_state curr_state;
char service_path[SERVREG_NAME_LENGTH + 1];
u16 transaction_id;
};
-static const struct qmi_elem_info servreg_state_updated_ind_ei[] = {
- {
- .data_type = QMI_SIGNED_4_BYTE_ENUM,
- .elem_len = 1,
- .elem_size = sizeof(u32),
- .array_type = NO_ARRAY,
- .tlv_type = 0x01,
- .offset = offsetof(struct servreg_state_updated_ind,
- curr_state),
- },
- {
- .data_type = QMI_STRING,
- .elem_len = SERVREG_NAME_LENGTH + 1,
- .elem_size = sizeof(char),
- .array_type = NO_ARRAY,
- .tlv_type = 0x02,
- .offset = offsetof(struct servreg_state_updated_ind,
- service_path),
- },
- {
- .data_type = QMI_UNSIGNED_2_BYTE,
- .elem_len = 1,
- .elem_size = sizeof(u16),
- .array_type = NO_ARRAY,
- .tlv_type = 0x03,
- .offset = offsetof(struct servreg_state_updated_ind,
- transaction_id),
- },
- {}
-};
-
struct servreg_set_ack_req {
char service_path[SERVREG_NAME_LENGTH + 1];
u16 transaction_id;
};
-static const struct qmi_elem_info servreg_set_ack_req_ei[] = {
- {
- .data_type = QMI_STRING,
- .elem_len = SERVREG_NAME_LENGTH + 1,
- .elem_size = sizeof(char),
- .array_type = NO_ARRAY,
- .tlv_type = 0x01,
- .offset = offsetof(struct servreg_set_ack_req,
- service_path),
- },
- {
- .data_type = QMI_UNSIGNED_2_BYTE,
- .elem_len = 1,
- .elem_size = sizeof(u16),
- .array_type = NO_ARRAY,
- .tlv_type = 0x02,
- .offset = offsetof(struct servreg_set_ack_req,
- transaction_id),
- },
- {}
-};
-
struct servreg_set_ack_resp {
struct qmi_response_type_v01 resp;
};
-static const struct qmi_elem_info servreg_set_ack_resp_ei[] = {
- {
- .data_type = QMI_STRUCT,
- .elem_len = 1,
- .elem_size = sizeof(struct qmi_response_type_v01),
- .array_type = NO_ARRAY,
- .tlv_type = 0x02,
- .offset = offsetof(struct servreg_set_ack_resp,
- resp),
- .ei_array = qmi_response_type_v01_ei,
- },
- {}
+struct servreg_loc_pfr_req {
+ char service[SERVREG_NAME_LENGTH + 1];
+ char reason[257];
};
+struct servreg_loc_pfr_resp {
+ struct qmi_response_type_v01 rsp;
+};
+
+extern const struct qmi_elem_info servreg_location_entry_ei[];
+extern const struct qmi_elem_info servreg_get_domain_list_req_ei[];
+extern const struct qmi_elem_info servreg_get_domain_list_resp_ei[];
+extern const struct qmi_elem_info servreg_register_listener_req_ei[];
+extern const struct qmi_elem_info servreg_register_listener_resp_ei[];
+extern const struct qmi_elem_info servreg_restart_pd_req_ei[];
+extern const struct qmi_elem_info servreg_restart_pd_resp_ei[];
+extern const struct qmi_elem_info servreg_state_updated_ind_ei[];
+extern const struct qmi_elem_info servreg_set_ack_req_ei[];
+extern const struct qmi_elem_info servreg_set_ack_resp_ei[];
+extern const struct qmi_elem_info servreg_loc_pfr_req_ei[];
+extern const struct qmi_elem_info servreg_loc_pfr_resp_ei[];
+
#endif
diff --git a/drivers/soc/qcom/pmic_glink.c b/drivers/soc/qcom/pmic_glink.c
index 40fb09d69014..9ebc0ba35947 100644
--- a/drivers/soc/qcom/pmic_glink.c
+++ b/drivers/soc/qcom/pmic_glink.c
@@ -348,11 +348,15 @@ static void pmic_glink_remove(struct platform_device *pdev)
mutex_unlock(&__pmic_glink_lock);
}
+static const unsigned long pmic_glink_sc8280xp_client_mask = BIT(PMIC_GLINK_CLIENT_BATT) |
+ BIT(PMIC_GLINK_CLIENT_ALTMODE);
+
static const unsigned long pmic_glink_sm8450_client_mask = BIT(PMIC_GLINK_CLIENT_BATT) |
BIT(PMIC_GLINK_CLIENT_ALTMODE) |
BIT(PMIC_GLINK_CLIENT_UCSI);
static const struct of_device_id pmic_glink_of_match[] = {
+ { .compatible = "qcom,sc8280xp-pmic-glink", .data = &pmic_glink_sc8280xp_client_mask },
{ .compatible = "qcom,pmic-glink", .data = &pmic_glink_sm8450_client_mask },
{}
};
@@ -369,8 +373,17 @@ static struct platform_driver pmic_glink_driver = {
static int pmic_glink_init(void)
{
- platform_driver_register(&pmic_glink_driver);
- register_rpmsg_driver(&pmic_glink_rpmsg_driver);
+ int ret;
+
+ ret = platform_driver_register(&pmic_glink_driver);
+ if (ret < 0)
+ return ret;
+
+ ret = register_rpmsg_driver(&pmic_glink_rpmsg_driver);
+ if (ret < 0) {
+ platform_driver_unregister(&pmic_glink_driver);
+ return ret;
+ }
return 0;
}
diff --git a/drivers/soc/qcom/pmic_glink_altmode.c b/drivers/soc/qcom/pmic_glink_altmode.c
index b3808fc24c69..1e0808b3cb93 100644
--- a/drivers/soc/qcom/pmic_glink_altmode.c
+++ b/drivers/soc/qcom/pmic_glink_altmode.c
@@ -20,7 +20,7 @@
#include <linux/soc/qcom/pmic_glink.h>
-#define PMIC_GLINK_MAX_PORTS 2
+#define PMIC_GLINK_MAX_PORTS 3
#define USBC_SC8180X_NOTIFY_IND 0x13
#define USBC_CMD_WRITE_REQ 0x15
diff --git a/drivers/soc/qcom/qcom_pd_mapper.c b/drivers/soc/qcom/qcom_pd_mapper.c
new file mode 100644
index 000000000000..a4c007080665
--- /dev/null
+++ b/drivers/soc/qcom/qcom_pd_mapper.c
@@ -0,0 +1,677 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Qualcomm Protection Domain mapper
+ *
+ * Copyright (c) 2023 Linaro Ltd.
+ */
+
+#include <linux/auxiliary_bus.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/refcount.h>
+#include <linux/slab.h>
+#include <linux/soc/qcom/qmi.h>
+
+#include "pdr_internal.h"
+
+#define SERVREG_QMI_VERSION 0x101
+#define SERVREG_QMI_INSTANCE 0
+
+#define TMS_SERVREG_SERVICE "tms/servreg"
+
+struct qcom_pdm_domain_data {
+ const char *domain;
+ u32 instance_id;
+ /* NULL-terminated array */
+ const char * services[];
+};
+
+struct qcom_pdm_domain {
+ struct list_head list;
+ const char *name;
+ u32 instance_id;
+};
+
+struct qcom_pdm_service {
+ struct list_head list;
+ struct list_head domains;
+ const char *name;
+};
+
+struct qcom_pdm_data {
+ refcount_t refcnt;
+ struct qmi_handle handle;
+ struct list_head services;
+};
+
+static DEFINE_MUTEX(qcom_pdm_mutex); /* protects __qcom_pdm_data */
+static struct qcom_pdm_data *__qcom_pdm_data;
+
+static struct qcom_pdm_service *qcom_pdm_find(struct qcom_pdm_data *data,
+ const char *name)
+{
+ struct qcom_pdm_service *service;
+
+ list_for_each_entry(service, &data->services, list) {
+ if (!strcmp(service->name, name))
+ return service;
+ }
+
+ return NULL;
+}
+
+static int qcom_pdm_add_service_domain(struct qcom_pdm_data *data,
+ const char *service_name,
+ const char *domain_name,
+ u32 instance_id)
+{
+ struct qcom_pdm_service *service;
+ struct qcom_pdm_domain *domain;
+
+ service = qcom_pdm_find(data, service_name);
+ if (service) {
+ list_for_each_entry(domain, &service->domains, list) {
+ if (!strcmp(domain->name, domain_name))
+ return -EBUSY;
+ }
+ } else {
+ service = kzalloc(sizeof(*service), GFP_KERNEL);
+ if (!service)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&service->domains);
+ service->name = service_name;
+
+ list_add_tail(&service->list, &data->services);
+ }
+
+ domain = kzalloc(sizeof(*domain), GFP_KERNEL);
+ if (!domain) {
+ if (list_empty(&service->domains)) {
+ list_del(&service->list);
+ kfree(service);
+ }
+
+ return -ENOMEM;
+ }
+
+ domain->name = domain_name;
+ domain->instance_id = instance_id;
+ list_add_tail(&domain->list, &service->domains);
+
+ return 0;
+}
+
+static int qcom_pdm_add_domain(struct qcom_pdm_data *data,
+ const struct qcom_pdm_domain_data *domain)
+{
+ int ret;
+ int i;
+
+ ret = qcom_pdm_add_service_domain(data,
+ TMS_SERVREG_SERVICE,
+ domain->domain,
+ domain->instance_id);
+ if (ret)
+ return ret;
+
+ for (i = 0; domain->services[i]; i++) {
+ ret = qcom_pdm_add_service_domain(data,
+ domain->services[i],
+ domain->domain,
+ domain->instance_id);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+
+}
+
+static void qcom_pdm_free_domains(struct qcom_pdm_data *data)
+{
+ struct qcom_pdm_service *service, *tservice;
+ struct qcom_pdm_domain *domain, *tdomain;
+
+ list_for_each_entry_safe(service, tservice, &data->services, list) {
+ list_for_each_entry_safe(domain, tdomain, &service->domains, list) {
+ list_del(&domain->list);
+ kfree(domain);
+ }
+
+ list_del(&service->list);
+ kfree(service);
+ }
+}
+
+static void qcom_pdm_get_domain_list(struct qmi_handle *qmi,
+ struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn,
+ const void *decoded)
+{
+ struct qcom_pdm_data *data = container_of(qmi, struct qcom_pdm_data, handle);
+ const struct servreg_get_domain_list_req *req = decoded;
+ struct servreg_get_domain_list_resp *rsp;
+ struct qcom_pdm_service *service;
+ u32 offset;
+ int ret;
+
+ rsp = kzalloc(sizeof(*rsp), GFP_KERNEL);
+ if (!rsp)
+ return;
+
+ offset = req->domain_offset_valid ? req->domain_offset : 0;
+
+ rsp->resp.result = QMI_RESULT_SUCCESS_V01;
+ rsp->resp.error = QMI_ERR_NONE_V01;
+
+ rsp->db_rev_count_valid = true;
+ rsp->db_rev_count = 1;
+
+ rsp->total_domains_valid = true;
+ rsp->total_domains = 0;
+
+ mutex_lock(&qcom_pdm_mutex);
+
+ service = qcom_pdm_find(data, req->service_name);
+ if (service) {
+ struct qcom_pdm_domain *domain;
+
+ rsp->domain_list_valid = true;
+ rsp->domain_list_len = 0;
+
+ list_for_each_entry(domain, &service->domains, list) {
+ u32 i = rsp->total_domains++;
+
+ if (i >= offset && i < SERVREG_DOMAIN_LIST_LENGTH) {
+ u32 j = rsp->domain_list_len++;
+
+ strscpy(rsp->domain_list[j].name, domain->name,
+ sizeof(rsp->domain_list[i].name));
+ rsp->domain_list[j].instance = domain->instance_id;
+
+ pr_debug("PDM: found %s / %d\n", domain->name,
+ domain->instance_id);
+ }
+ }
+ }
+
+ pr_debug("PDM: service '%s' offset %d returning %d domains (of %d)\n", req->service_name,
+ req->domain_offset_valid ? req->domain_offset : -1, rsp->domain_list_len, rsp->total_domains);
+
+ ret = qmi_send_response(qmi, sq, txn, SERVREG_GET_DOMAIN_LIST_REQ,
+ SERVREG_GET_DOMAIN_LIST_RESP_MAX_LEN,
+ servreg_get_domain_list_resp_ei, rsp);
+ if (ret)
+ pr_err("Error sending servreg response: %d\n", ret);
+
+ mutex_unlock(&qcom_pdm_mutex);
+
+ kfree(rsp);
+}
+
+static void qcom_pdm_pfr(struct qmi_handle *qmi,
+ struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn,
+ const void *decoded)
+{
+ const struct servreg_loc_pfr_req *req = decoded;
+ struct servreg_loc_pfr_resp rsp = {};
+ int ret;
+
+ pr_warn_ratelimited("PDM: service '%s' crash: '%s'\n", req->service, req->reason);
+
+ rsp.rsp.result = QMI_RESULT_SUCCESS_V01;
+ rsp.rsp.error = QMI_ERR_NONE_V01;
+
+ ret = qmi_send_response(qmi, sq, txn, SERVREG_LOC_PFR_REQ,
+ SERVREG_LOC_PFR_RESP_MAX_LEN,
+ servreg_loc_pfr_resp_ei, &rsp);
+ if (ret)
+ pr_err("Error sending servreg response: %d\n", ret);
+}
+
+static const struct qmi_msg_handler qcom_pdm_msg_handlers[] = {
+ {
+ .type = QMI_REQUEST,
+ .msg_id = SERVREG_GET_DOMAIN_LIST_REQ,
+ .ei = servreg_get_domain_list_req_ei,
+ .decoded_size = sizeof(struct servreg_get_domain_list_req),
+ .fn = qcom_pdm_get_domain_list,
+ },
+ {
+ .type = QMI_REQUEST,
+ .msg_id = SERVREG_LOC_PFR_REQ,
+ .ei = servreg_loc_pfr_req_ei,
+ .decoded_size = sizeof(struct servreg_loc_pfr_req),
+ .fn = qcom_pdm_pfr,
+ },
+ { },
+};
+
+static const struct qcom_pdm_domain_data adsp_audio_pd = {
+ .domain = "msm/adsp/audio_pd",
+ .instance_id = 74,
+ .services = {
+ "avs/audio",
+ NULL,
+ },
+};
+
+static const struct qcom_pdm_domain_data adsp_charger_pd = {
+ .domain = "msm/adsp/charger_pd",
+ .instance_id = 74,
+ .services = { NULL },
+};
+
+static const struct qcom_pdm_domain_data adsp_root_pd = {
+ .domain = "msm/adsp/root_pd",
+ .instance_id = 74,
+ .services = { NULL },
+};
+
+static const struct qcom_pdm_domain_data adsp_root_pd_pdr = {
+ .domain = "msm/adsp/root_pd",
+ .instance_id = 74,
+ .services = {
+ "tms/pdr_enabled",
+ NULL,
+ },
+};
+
+static const struct qcom_pdm_domain_data adsp_sensor_pd = {
+ .domain = "msm/adsp/sensor_pd",
+ .instance_id = 74,
+ .services = { NULL },
+};
+
+static const struct qcom_pdm_domain_data msm8996_adsp_audio_pd = {
+ .domain = "msm/adsp/audio_pd",
+ .instance_id = 4,
+ .services = { NULL },
+};
+
+static const struct qcom_pdm_domain_data msm8996_adsp_root_pd = {
+ .domain = "msm/adsp/root_pd",
+ .instance_id = 4,
+ .services = { NULL },
+};
+
+static const struct qcom_pdm_domain_data cdsp_root_pd = {
+ .domain = "msm/cdsp/root_pd",
+ .instance_id = 76,
+ .services = { NULL },
+};
+
+static const struct qcom_pdm_domain_data slpi_root_pd = {
+ .domain = "msm/slpi/root_pd",
+ .instance_id = 90,
+ .services = { NULL },
+};
+
+static const struct qcom_pdm_domain_data slpi_sensor_pd = {
+ .domain = "msm/slpi/sensor_pd",
+ .instance_id = 90,
+ .services = { NULL },
+};
+
+static const struct qcom_pdm_domain_data mpss_root_pd = {
+ .domain = "msm/modem/root_pd",
+ .instance_id = 180,
+ .services = {
+ NULL,
+ },
+};
+
+static const struct qcom_pdm_domain_data mpss_root_pd_gps = {
+ .domain = "msm/modem/root_pd",
+ .instance_id = 180,
+ .services = {
+ "gps/gps_service",
+ NULL,
+ },
+};
+
+static const struct qcom_pdm_domain_data mpss_root_pd_gps_pdr = {
+ .domain = "msm/modem/root_pd",
+ .instance_id = 180,
+ .services = {
+ "gps/gps_service",
+ "tms/pdr_enabled",
+ NULL,
+ },
+};
+
+static const struct qcom_pdm_domain_data msm8996_mpss_root_pd = {
+ .domain = "msm/modem/root_pd",
+ .instance_id = 100,
+ .services = { NULL },
+};
+
+static const struct qcom_pdm_domain_data mpss_wlan_pd = {
+ .domain = "msm/modem/wlan_pd",
+ .instance_id = 180,
+ .services = {
+ "kernel/elf_loader",
+ "wlan/fw",
+ NULL,
+ },
+};
+
+static const struct qcom_pdm_domain_data *msm8996_domains[] = {
+ &msm8996_adsp_audio_pd,
+ &msm8996_adsp_root_pd,
+ &msm8996_mpss_root_pd,
+ NULL,
+};
+
+static const struct qcom_pdm_domain_data *msm8998_domains[] = {
+ &mpss_root_pd,
+ &mpss_wlan_pd,
+ NULL,
+};
+
+static const struct qcom_pdm_domain_data *qcm2290_domains[] = {
+ &adsp_audio_pd,
+ &adsp_root_pd,
+ &adsp_sensor_pd,
+ &mpss_root_pd_gps,
+ &mpss_wlan_pd,
+ NULL,
+};
+
+static const struct qcom_pdm_domain_data *qcs404_domains[] = {
+ &adsp_audio_pd,
+ &adsp_root_pd,
+ &adsp_sensor_pd,
+ &cdsp_root_pd,
+ &mpss_root_pd,
+ &mpss_wlan_pd,
+ NULL,
+};
+
+static const struct qcom_pdm_domain_data *sc7180_domains[] = {
+ &adsp_audio_pd,
+ &adsp_root_pd_pdr,
+ &adsp_sensor_pd,
+ &mpss_root_pd_gps_pdr,
+ &mpss_wlan_pd,
+ NULL,
+};
+
+static const struct qcom_pdm_domain_data *sc7280_domains[] = {
+ &adsp_audio_pd,
+ &adsp_root_pd_pdr,
+ &adsp_charger_pd,
+ &adsp_sensor_pd,
+ &cdsp_root_pd,
+ &mpss_root_pd_gps_pdr,
+ NULL,
+};
+
+static const struct qcom_pdm_domain_data *sc8180x_domains[] = {
+ &adsp_audio_pd,
+ &adsp_root_pd,
+ &adsp_charger_pd,
+ &cdsp_root_pd,
+ &mpss_root_pd_gps,
+ &mpss_wlan_pd,
+ NULL,
+};
+
+static const struct qcom_pdm_domain_data *sc8280xp_domains[] = {
+ &adsp_audio_pd,
+ &adsp_root_pd_pdr,
+ &adsp_charger_pd,
+ &cdsp_root_pd,
+ NULL,
+};
+
+static const struct qcom_pdm_domain_data *sdm660_domains[] = {
+ &adsp_audio_pd,
+ &adsp_root_pd,
+ &adsp_sensor_pd,
+ &cdsp_root_pd,
+ &mpss_root_pd,
+ &mpss_wlan_pd,
+ NULL,
+};
+
+static const struct qcom_pdm_domain_data *sdm670_domains[] = {
+ &adsp_audio_pd,
+ &adsp_root_pd,
+ &cdsp_root_pd,
+ &mpss_root_pd,
+ &mpss_wlan_pd,
+ NULL,
+};
+
+static const struct qcom_pdm_domain_data *sdm845_domains[] = {
+ &adsp_audio_pd,
+ &adsp_root_pd,
+ &cdsp_root_pd,
+ &mpss_root_pd,
+ &mpss_wlan_pd,
+ &slpi_root_pd,
+ &slpi_sensor_pd,
+ NULL,
+};
+
+static const struct qcom_pdm_domain_data *sm6115_domains[] = {
+ &adsp_audio_pd,
+ &adsp_root_pd,
+ &adsp_sensor_pd,
+ &cdsp_root_pd,
+ &mpss_root_pd_gps,
+ &mpss_wlan_pd,
+ NULL,
+};
+
+static const struct qcom_pdm_domain_data *sm6350_domains[] = {
+ &adsp_audio_pd,
+ &adsp_root_pd,
+ &adsp_sensor_pd,
+ &cdsp_root_pd,
+ &mpss_wlan_pd,
+ NULL,
+};
+
+static const struct qcom_pdm_domain_data *sm8150_domains[] = {
+ &adsp_audio_pd,
+ &adsp_root_pd,
+ &cdsp_root_pd,
+ &mpss_root_pd_gps,
+ &mpss_wlan_pd,
+ NULL,
+};
+
+static const struct qcom_pdm_domain_data *sm8250_domains[] = {
+ &adsp_audio_pd,
+ &adsp_root_pd,
+ &cdsp_root_pd,
+ &slpi_root_pd,
+ &slpi_sensor_pd,
+ NULL,
+};
+
+static const struct qcom_pdm_domain_data *sm8350_domains[] = {
+ &adsp_audio_pd,
+ &adsp_root_pd_pdr,
+ &adsp_charger_pd,
+ &cdsp_root_pd,
+ &mpss_root_pd_gps,
+ &slpi_root_pd,
+ &slpi_sensor_pd,
+ NULL,
+};
+
+static const struct qcom_pdm_domain_data *sm8550_domains[] = {
+ &adsp_audio_pd,
+ &adsp_root_pd,
+ &adsp_charger_pd,
+ &adsp_sensor_pd,
+ &cdsp_root_pd,
+ &mpss_root_pd_gps,
+ NULL,
+};
+
+static const struct of_device_id qcom_pdm_domains[] = {
+ { .compatible = "qcom,apq8064", .data = NULL, },
+ { .compatible = "qcom,apq8074", .data = NULL, },
+ { .compatible = "qcom,apq8084", .data = NULL, },
+ { .compatible = "qcom,apq8096", .data = msm8996_domains, },
+ { .compatible = "qcom,msm8226", .data = NULL, },
+ { .compatible = "qcom,msm8974", .data = NULL, },
+ { .compatible = "qcom,msm8996", .data = msm8996_domains, },
+ { .compatible = "qcom,msm8998", .data = msm8998_domains, },
+ { .compatible = "qcom,qcm2290", .data = qcm2290_domains, },
+ { .compatible = "qcom,qcs404", .data = qcs404_domains, },
+ { .compatible = "qcom,sc7180", .data = sc7180_domains, },
+ { .compatible = "qcom,sc7280", .data = sc7280_domains, },
+ { .compatible = "qcom,sc8180x", .data = sc8180x_domains, },
+ { .compatible = "qcom,sc8280xp", .data = sc8280xp_domains, },
+ { .compatible = "qcom,sda660", .data = sdm660_domains, },
+ { .compatible = "qcom,sdm660", .data = sdm660_domains, },
+ { .compatible = "qcom,sdm670", .data = sdm670_domains, },
+ { .compatible = "qcom,sdm845", .data = sdm845_domains, },
+ { .compatible = "qcom,sm4250", .data = sm6115_domains, },
+ { .compatible = "qcom,sm6115", .data = sm6115_domains, },
+ { .compatible = "qcom,sm6350", .data = sm6350_domains, },
+ { .compatible = "qcom,sm8150", .data = sm8150_domains, },
+ { .compatible = "qcom,sm8250", .data = sm8250_domains, },
+ { .compatible = "qcom,sm8350", .data = sm8350_domains, },
+ { .compatible = "qcom,sm8450", .data = sm8350_domains, },
+ { .compatible = "qcom,sm8550", .data = sm8550_domains, },
+ { .compatible = "qcom,sm8650", .data = sm8550_domains, },
+ {},
+};
+
+static void qcom_pdm_stop(struct qcom_pdm_data *data)
+{
+ qcom_pdm_free_domains(data);
+
+ /* The server is removed automatically */
+ qmi_handle_release(&data->handle);
+
+ kfree(data);
+}
+
+static struct qcom_pdm_data *qcom_pdm_start(void)
+{
+ const struct qcom_pdm_domain_data * const *domains;
+ const struct of_device_id *match;
+ struct qcom_pdm_data *data;
+ struct device_node *root;
+ int ret, i;
+
+ root = of_find_node_by_path("/");
+ if (!root)
+ return ERR_PTR(-ENODEV);
+
+ match = of_match_node(qcom_pdm_domains, root);
+ of_node_put(root);
+ if (!match) {
+ pr_notice("PDM: no support for the platform, userspace daemon might be required.\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ domains = match->data;
+ if (!domains) {
+ pr_debug("PDM: no domains\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&data->services);
+
+ ret = qmi_handle_init(&data->handle, SERVREG_GET_DOMAIN_LIST_REQ_MAX_LEN,
+ NULL, qcom_pdm_msg_handlers);
+ if (ret) {
+ kfree(data);
+ return ERR_PTR(ret);
+ }
+
+ refcount_set(&data->refcnt, 1);
+
+ for (i = 0; domains[i]; i++) {
+ ret = qcom_pdm_add_domain(data, domains[i]);
+ if (ret)
+ goto err_stop;
+ }
+
+ ret = qmi_add_server(&data->handle, SERVREG_LOCATOR_SERVICE,
+ SERVREG_QMI_VERSION, SERVREG_QMI_INSTANCE);
+ if (ret) {
+ pr_err("PDM: error adding server %d\n", ret);
+ goto err_stop;
+ }
+
+ return data;
+
+err_stop:
+ qcom_pdm_stop(data);
+
+ return ERR_PTR(ret);
+}
+
+static int qcom_pdm_probe(struct auxiliary_device *auxdev,
+ const struct auxiliary_device_id *id)
+
+{
+ struct qcom_pdm_data *data;
+ int ret = 0;
+
+ mutex_lock(&qcom_pdm_mutex);
+
+ if (!__qcom_pdm_data) {
+ data = qcom_pdm_start();
+
+ if (IS_ERR(data))
+ ret = PTR_ERR(data);
+ else
+ __qcom_pdm_data = data;
+ }
+
+ auxiliary_set_drvdata(auxdev, __qcom_pdm_data);
+
+ mutex_unlock(&qcom_pdm_mutex);
+
+ return ret;
+}
+
+static void qcom_pdm_remove(struct auxiliary_device *auxdev)
+{
+ struct qcom_pdm_data *data;
+
+ data = auxiliary_get_drvdata(auxdev);
+ if (!data)
+ return;
+
+ if (refcount_dec_and_mutex_lock(&data->refcnt, &qcom_pdm_mutex)) {
+ __qcom_pdm_data = NULL;
+ qcom_pdm_stop(data);
+ mutex_unlock(&qcom_pdm_mutex);
+ }
+}
+
+static const struct auxiliary_device_id qcom_pdm_table[] = {
+ { .name = "qcom_common.pd-mapper" },
+ {},
+};
+MODULE_DEVICE_TABLE(auxiliary, qcom_pdm_table);
+
+static struct auxiliary_driver qcom_pdm_drv = {
+ .name = "qcom-pdm-mapper",
+ .id_table = qcom_pdm_table,
+ .probe = qcom_pdm_probe,
+ .remove = qcom_pdm_remove,
+};
+module_auxiliary_driver(qcom_pdm_drv);
+
+MODULE_DESCRIPTION("Qualcomm Protection Domain Mapper");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/qcom/qcom_pdr_msg.c b/drivers/soc/qcom/qcom_pdr_msg.c
new file mode 100644
index 000000000000..bf3e4a47165e
--- /dev/null
+++ b/drivers/soc/qcom/qcom_pdr_msg.c
@@ -0,0 +1,353 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/soc/qcom/qmi.h>
+
+#include "pdr_internal.h"
+
+const struct qmi_elem_info servreg_location_entry_ei[] = {
+ {
+ .data_type = QMI_STRING,
+ .elem_len = SERVREG_NAME_LENGTH + 1,
+ .elem_size = sizeof(char),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct servreg_location_entry,
+ name),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct servreg_location_entry,
+ instance),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct servreg_location_entry,
+ service_data_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct servreg_location_entry,
+ service_data),
+ },
+ {}
+};
+EXPORT_SYMBOL_GPL(servreg_location_entry_ei);
+
+const struct qmi_elem_info servreg_get_domain_list_req_ei[] = {
+ {
+ .data_type = QMI_STRING,
+ .elem_len = SERVREG_NAME_LENGTH + 1,
+ .elem_size = sizeof(char),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct servreg_get_domain_list_req,
+ service_name),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct servreg_get_domain_list_req,
+ domain_offset_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct servreg_get_domain_list_req,
+ domain_offset),
+ },
+ {}
+};
+EXPORT_SYMBOL_GPL(servreg_get_domain_list_req_ei);
+
+const struct qmi_elem_info servreg_get_domain_list_resp_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct servreg_get_domain_list_resp,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct servreg_get_domain_list_resp,
+ total_domains_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct servreg_get_domain_list_resp,
+ total_domains),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct servreg_get_domain_list_resp,
+ db_rev_count_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct servreg_get_domain_list_resp,
+ db_rev_count),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct servreg_get_domain_list_resp,
+ domain_list_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct servreg_get_domain_list_resp,
+ domain_list_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = SERVREG_DOMAIN_LIST_LENGTH,
+ .elem_size = sizeof(struct servreg_location_entry),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct servreg_get_domain_list_resp,
+ domain_list),
+ .ei_array = servreg_location_entry_ei,
+ },
+ {}
+};
+EXPORT_SYMBOL_GPL(servreg_get_domain_list_resp_ei);
+
+const struct qmi_elem_info servreg_register_listener_req_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct servreg_register_listener_req,
+ enable),
+ },
+ {
+ .data_type = QMI_STRING,
+ .elem_len = SERVREG_NAME_LENGTH + 1,
+ .elem_size = sizeof(char),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct servreg_register_listener_req,
+ service_path),
+ },
+ {}
+};
+EXPORT_SYMBOL_GPL(servreg_register_listener_req_ei);
+
+const struct qmi_elem_info servreg_register_listener_resp_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct servreg_register_listener_resp,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct servreg_register_listener_resp,
+ curr_state_valid),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum servreg_service_state),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct servreg_register_listener_resp,
+ curr_state),
+ },
+ {}
+};
+EXPORT_SYMBOL_GPL(servreg_register_listener_resp_ei);
+
+const struct qmi_elem_info servreg_restart_pd_req_ei[] = {
+ {
+ .data_type = QMI_STRING,
+ .elem_len = SERVREG_NAME_LENGTH + 1,
+ .elem_size = sizeof(char),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct servreg_restart_pd_req,
+ service_path),
+ },
+ {}
+};
+EXPORT_SYMBOL_GPL(servreg_restart_pd_req_ei);
+
+const struct qmi_elem_info servreg_restart_pd_resp_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct servreg_restart_pd_resp,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {}
+};
+EXPORT_SYMBOL_GPL(servreg_restart_pd_resp_ei);
+
+const struct qmi_elem_info servreg_state_updated_ind_ei[] = {
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct servreg_state_updated_ind,
+ curr_state),
+ },
+ {
+ .data_type = QMI_STRING,
+ .elem_len = SERVREG_NAME_LENGTH + 1,
+ .elem_size = sizeof(char),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct servreg_state_updated_ind,
+ service_path),
+ },
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x03,
+ .offset = offsetof(struct servreg_state_updated_ind,
+ transaction_id),
+ },
+ {}
+};
+EXPORT_SYMBOL_GPL(servreg_state_updated_ind_ei);
+
+const struct qmi_elem_info servreg_set_ack_req_ei[] = {
+ {
+ .data_type = QMI_STRING,
+ .elem_len = SERVREG_NAME_LENGTH + 1,
+ .elem_size = sizeof(char),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct servreg_set_ack_req,
+ service_path),
+ },
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct servreg_set_ack_req,
+ transaction_id),
+ },
+ {}
+};
+EXPORT_SYMBOL_GPL(servreg_set_ack_req_ei);
+
+const struct qmi_elem_info servreg_set_ack_resp_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct servreg_set_ack_resp,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {}
+};
+EXPORT_SYMBOL_GPL(servreg_set_ack_resp_ei);
+
+const struct qmi_elem_info servreg_loc_pfr_req_ei[] = {
+ {
+ .data_type = QMI_STRING,
+ .elem_len = SERVREG_NAME_LENGTH + 1,
+ .elem_size = sizeof(char),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct servreg_loc_pfr_req, service)
+ },
+ {
+ .data_type = QMI_STRING,
+ .elem_len = SERVREG_NAME_LENGTH + 1,
+ .elem_size = sizeof(char),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct servreg_loc_pfr_req, reason)
+ },
+ {}
+};
+EXPORT_SYMBOL_GPL(servreg_loc_pfr_req_ei);
+
+const struct qmi_elem_info servreg_loc_pfr_resp_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof_field(struct servreg_loc_pfr_resp, rsp),
+ .tlv_type = 0x02,
+ .offset = offsetof(struct servreg_loc_pfr_resp, rsp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {}
+};
+EXPORT_SYMBOL_GPL(servreg_loc_pfr_resp_ei);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Qualcomm Protection Domain messages data");
diff --git a/drivers/soc/qcom/rpmh-rsc.c b/drivers/soc/qcom/rpmh-rsc.c
index 561d8037b50a..de86009ecd91 100644
--- a/drivers/soc/qcom/rpmh-rsc.c
+++ b/drivers/soc/qcom/rpmh-rsc.c
@@ -646,13 +646,14 @@ int rpmh_rsc_send_data(struct rsc_drv *drv, const struct tcs_request *msg)
{
struct tcs_group *tcs;
int tcs_id;
- unsigned long flags;
+
+ might_sleep();
tcs = get_tcs_for_msg(drv, msg);
if (IS_ERR(tcs))
return PTR_ERR(tcs);
- spin_lock_irqsave(&drv->lock, flags);
+ spin_lock_irq(&drv->lock);
/* Wait forever for a free tcs. It better be there eventually! */
wait_event_lock_irq(drv->tcs_wait,
@@ -670,7 +671,7 @@ int rpmh_rsc_send_data(struct rsc_drv *drv, const struct tcs_request *msg)
write_tcs_reg_sync(drv, drv->regs[RSC_DRV_CMD_ENABLE], tcs_id, 0);
enable_tcs_irq(drv, tcs_id, true);
}
- spin_unlock_irqrestore(&drv->lock, flags);
+ spin_unlock_irq(&drv->lock);
/*
* These two can be done after the lock is released because:
diff --git a/drivers/soc/qcom/rpmh.c b/drivers/soc/qcom/rpmh.c
index 9f26d7f9b9dc..8903ed956312 100644
--- a/drivers/soc/qcom/rpmh.c
+++ b/drivers/soc/qcom/rpmh.c
@@ -183,7 +183,6 @@ static int __rpmh_write(const struct device *dev, enum rpmh_state state,
}
if (state == RPMH_ACTIVE_ONLY_STATE) {
- WARN_ON(irqs_disabled());
ret = rpmh_rsc_send_data(ctrlr_to_drv(ctrlr), &rpm_msg->msg);
} else {
/* Clean up our call by spoofing tx_done */
diff --git a/drivers/soc/qcom/smem.c b/drivers/soc/qcom/smem.c
index 7191fa0c087f..e40aac281b06 100644
--- a/drivers/soc/qcom/smem.c
+++ b/drivers/soc/qcom/smem.c
@@ -795,6 +795,39 @@ int qcom_smem_get_soc_id(u32 *id)
}
EXPORT_SYMBOL_GPL(qcom_smem_get_soc_id);
+/**
+ * qcom_smem_get_feature_code() - return the feature code
+ * @code: On success, return the feature code here.
+ *
+ * Look up the feature code identifier from SMEM and return it.
+ *
+ * Return: 0 on success, negative errno on failure.
+ */
+int qcom_smem_get_feature_code(u32 *code)
+{
+ struct socinfo *info;
+ u32 raw_code;
+
+ info = qcom_smem_get(QCOM_SMEM_HOST_ANY, SMEM_HW_SW_BUILD_ID, NULL);
+ if (IS_ERR(info))
+ return PTR_ERR(info);
+
+ /* This only makes sense for socinfo >= 16 */
+ if (__le32_to_cpu(info->fmt) < SOCINFO_VERSION(0, 16))
+ return -EOPNOTSUPP;
+
+ raw_code = __le32_to_cpu(info->feature_code);
+
+ /* Ensure the value makes sense */
+ if (raw_code > SOCINFO_FC_INT_MAX)
+ raw_code = SOCINFO_FC_UNKNOWN;
+
+ *code = raw_code;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qcom_smem_get_feature_code);
+
static int qcom_smem_get_sbl_version(struct qcom_smem *smem)
{
struct smem_header *header;
diff --git a/drivers/soc/qcom/smp2p.c b/drivers/soc/qcom/smp2p.c
index a21241cbeec7..696c2a8387d0 100644
--- a/drivers/soc/qcom/smp2p.c
+++ b/drivers/soc/qcom/smp2p.c
@@ -16,6 +16,7 @@
#include <linux/platform_device.h>
#include <linux/pm_wakeirq.h>
#include <linux/regmap.h>
+#include <linux/seq_file.h>
#include <linux/soc/qcom/smem.h>
#include <linux/soc/qcom/smem_state.h>
#include <linux/spinlock.h>
@@ -353,11 +354,19 @@ static int smp2p_set_irq_type(struct irq_data *irqd, unsigned int type)
return 0;
}
+static void smp2p_irq_print_chip(struct irq_data *irqd, struct seq_file *p)
+{
+ struct smp2p_entry *entry = irq_data_get_irq_chip_data(irqd);
+
+ seq_printf(p, " %8s", dev_name(entry->smp2p->dev));
+}
+
static struct irq_chip smp2p_irq_chip = {
.name = "smp2p",
.irq_mask = smp2p_mask_irq,
.irq_unmask = smp2p_unmask_irq,
.irq_set_type = smp2p_set_irq_type,
+ .irq_print_chip = smp2p_irq_print_chip,
};
static int smp2p_irq_map(struct irq_domain *d,
@@ -617,7 +626,7 @@ static int qcom_smp2p_probe(struct platform_device *pdev)
ret = devm_request_threaded_irq(&pdev->dev, irq,
NULL, qcom_smp2p_intr,
IRQF_ONESHOT,
- "smp2p", (void *)smp2p);
+ NULL, (void *)smp2p);
if (ret) {
dev_err(&pdev->dev, "failed to request interrupt\n");
goto unwind_interfaces;
diff --git a/drivers/soc/qcom/smsm.c b/drivers/soc/qcom/smsm.c
index e7c7e9a640a6..ffe78ae34386 100644
--- a/drivers/soc/qcom/smsm.c
+++ b/drivers/soc/qcom/smsm.c
@@ -5,6 +5,7 @@
*/
#include <linux/interrupt.h>
+#include <linux/mailbox_client.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of_irq.h>
@@ -71,6 +72,7 @@ struct smsm_host;
* @lock: spinlock for read-modify-write of the outgoing state
* @entries: context for each of the entries
* @hosts: context for each of the hosts
+ * @mbox_client: mailbox client handle
*/
struct qcom_smsm {
struct device *dev;
@@ -88,6 +90,8 @@ struct qcom_smsm {
struct smsm_entry *entries;
struct smsm_host *hosts;
+
+ struct mbox_client mbox_client;
};
/**
@@ -120,11 +124,14 @@ struct smsm_entry {
* @ipc_regmap: regmap for outgoing interrupt
* @ipc_offset: offset in @ipc_regmap for outgoing interrupt
* @ipc_bit: bit in @ipc_regmap + @ipc_offset for outgoing interrupt
+ * @mbox_chan: apcs ipc mailbox channel handle
*/
struct smsm_host {
struct regmap *ipc_regmap;
int ipc_offset;
int ipc_bit;
+
+ struct mbox_chan *mbox_chan;
};
/**
@@ -172,7 +179,13 @@ static int smsm_update_bits(void *data, u32 mask, u32 value)
hostp = &smsm->hosts[host];
val = readl(smsm->subscription + host);
- if (val & changes && hostp->ipc_regmap) {
+ if (!(val & changes))
+ continue;
+
+ if (hostp->mbox_chan) {
+ mbox_send_message(hostp->mbox_chan, NULL);
+ mbox_client_txdone(hostp->mbox_chan, 0);
+ } else if (hostp->ipc_regmap) {
regmap_write(hostp->ipc_regmap,
hostp->ipc_offset,
BIT(hostp->ipc_bit));
@@ -353,6 +366,28 @@ static const struct irq_domain_ops smsm_irq_ops = {
};
/**
+ * smsm_parse_mbox() - requests an mbox channel
+ * @smsm: smsm driver context
+ * @host_id: index of the remote host to be resolved
+ *
+ * Requests the desired channel using the mbox interface which is needed for
+ * sending the outgoing interrupts to a remove hosts - identified by @host_id.
+ */
+static int smsm_parse_mbox(struct qcom_smsm *smsm, unsigned int host_id)
+{
+ struct smsm_host *host = &smsm->hosts[host_id];
+ int ret = 0;
+
+ host->mbox_chan = mbox_request_channel(&smsm->mbox_client, host_id);
+ if (IS_ERR(host->mbox_chan)) {
+ ret = PTR_ERR(host->mbox_chan);
+ host->mbox_chan = NULL;
+ }
+
+ return ret;
+}
+
+/**
* smsm_parse_ipc() - parses a qcom,ipc-%d device tree property
* @smsm: smsm driver context
* @host_id: index of the remote host to be resolved
@@ -521,8 +556,16 @@ static int qcom_smsm_probe(struct platform_device *pdev)
"qcom,local-host",
&smsm->local_host);
+ smsm->mbox_client.dev = &pdev->dev;
+ smsm->mbox_client.knows_txdone = true;
+
/* Parse the host properties */
for (id = 0; id < smsm->num_hosts; id++) {
+ /* Try using mbox interface first, otherwise fall back to syscon */
+ ret = smsm_parse_mbox(smsm, id);
+ if (!ret)
+ continue;
+
ret = smsm_parse_ipc(smsm, id);
if (ret < 0)
goto out_put;
@@ -609,6 +652,9 @@ unwind_interfaces:
qcom_smem_state_unregister(smsm->state);
out_put:
+ for (id = 0; id < smsm->num_hosts; id++)
+ mbox_free_channel(smsm->hosts[id].mbox_chan);
+
of_node_put(local_node);
return ret;
}
@@ -622,6 +668,9 @@ static void qcom_smsm_remove(struct platform_device *pdev)
if (smsm->entries[id].domain)
irq_domain_remove(smsm->entries[id].domain);
+ for (id = 0; id < smsm->num_hosts; id++)
+ mbox_free_channel(smsm->hosts[id].mbox_chan);
+
qcom_smem_state_unregister(smsm->state);
}
diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c
index 277c07a6603d..d7359a235e3c 100644
--- a/drivers/soc/qcom/socinfo.c
+++ b/drivers/soc/qcom/socinfo.c
@@ -21,14 +21,6 @@
#include <dt-bindings/arm/qcom,ids.h>
-/*
- * SoC version type with major number in the upper 16 bits and minor
- * number in the lower 16 bits.
- */
-#define SOCINFO_MAJOR(ver) (((ver) >> 16) & 0xffff)
-#define SOCINFO_MINOR(ver) ((ver) & 0xffff)
-#define SOCINFO_VERSION(maj, min) ((((maj) & 0xffff) << 16)|((min) & 0xffff))
-
/* Helper macros to create soc_id table */
#define qcom_board_id(id) QCOM_ID_ ## id, __stringify(id)
#define qcom_board_id_named(id, name) QCOM_ID_ ## id, (name)
@@ -124,6 +116,7 @@ static const char *const pmic_models[] = {
[50] = "PM8350B",
[51] = "PMR735A",
[52] = "PMR735B",
+ [54] = "PM6350",
[55] = "PM4125",
[58] = "PM8450",
[65] = "PM8010",
@@ -133,7 +126,8 @@ static const char *const pmic_models[] = {
[72] = "PMR735D",
[73] = "PM8550",
[74] = "PMK8550",
- [82] = "SMB2360",
+ [82] = "PMC8380",
+ [83] = "SMB2360",
};
struct socinfo_params {
@@ -348,6 +342,7 @@ static const struct soc_id soc_id[] = {
{ qcom_board_id(SDA630) },
{ qcom_board_id(MSM8905) },
{ qcom_board_id(SDX202) },
+ { qcom_board_id(SDM670) },
{ qcom_board_id(SDM450) },
{ qcom_board_id(SM8150) },
{ qcom_board_id(SDA845) },
@@ -445,6 +440,7 @@ static const struct soc_id soc_id[] = {
{ qcom_board_id(QCS8550) },
{ qcom_board_id(QCM8550) },
{ qcom_board_id(IPQ5300) },
+ { qcom_board_id(IPQ5321) },
};
static const char *socinfo_machine(struct device *dev, unsigned int id)
diff --git a/drivers/soc/qcom/spm.c b/drivers/soc/qcom/spm.c
index 06e2c4c2a4a8..f75659fff287 100644
--- a/drivers/soc/qcom/spm.c
+++ b/drivers/soc/qcom/spm.c
@@ -572,4 +572,5 @@ static int __init qcom_spm_init(void)
}
arch_initcall(qcom_spm_init);
+MODULE_DESCRIPTION("Qualcomm Subsystem Power Manager (SPM)");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/wcnss_ctrl.c b/drivers/soc/qcom/wcnss_ctrl.c
index 148bcbac332d..62b424e90d90 100644
--- a/drivers/soc/qcom/wcnss_ctrl.c
+++ b/drivers/soc/qcom/wcnss_ctrl.c
@@ -3,6 +3,7 @@
* Copyright (c) 2016, Linaro Ltd.
* Copyright (c) 2015, Sony Mobile Communications Inc.
*/
+#include <linux/cleanup.h>
#include <linux/firmware.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -198,7 +199,6 @@ static int wcnss_request_version(struct wcnss_ctrl *wcnss)
*/
static int wcnss_download_nv(struct wcnss_ctrl *wcnss, bool *expect_cbc)
{
- struct wcnss_download_nv_req *req;
const struct firmware *fw;
struct device *dev = wcnss->dev;
const char *nvbin = NVBIN_FILE;
@@ -206,18 +206,19 @@ static int wcnss_download_nv(struct wcnss_ctrl *wcnss, bool *expect_cbc)
ssize_t left;
int ret;
- req = kzalloc(sizeof(*req) + NV_FRAGMENT_SIZE, GFP_KERNEL);
+ struct wcnss_download_nv_req *req __free(kfree) = kzalloc(sizeof(*req) + NV_FRAGMENT_SIZE,
+ GFP_KERNEL);
if (!req)
return -ENOMEM;
ret = of_property_read_string(dev->of_node, "firmware-name", &nvbin);
if (ret < 0 && ret != -EINVAL)
- goto free_req;
+ return ret;
ret = request_firmware(&fw, nvbin, dev);
if (ret < 0) {
dev_err(dev, "Failed to load nv file %s: %d\n", nvbin, ret);
- goto free_req;
+ return ret;
}
data = fw->data;
@@ -263,8 +264,6 @@ static int wcnss_download_nv(struct wcnss_ctrl *wcnss, bool *expect_cbc)
release_fw:
release_firmware(fw);
-free_req:
- kfree(req);
return ret;
}
diff --git a/drivers/soc/samsung/exynos-pmu.c b/drivers/soc/samsung/exynos-pmu.c
index fd8b6ac06656..d8c53cec7f37 100644
--- a/drivers/soc/samsung/exynos-pmu.c
+++ b/drivers/soc/samsung/exynos-pmu.c
@@ -129,14 +129,30 @@ static int tensor_set_bits_atomic(void *ctx, unsigned int offset, u32 val,
return ret;
}
-static int tensor_sec_update_bits(void *ctx, unsigned int reg,
- unsigned int mask, unsigned int val)
+static bool tensor_is_atomic(unsigned int reg)
{
/*
* Use atomic operations for PMU_ALIVE registers (offset 0~0x3FFF)
- * as the target registers can be accessed by multiple masters.
+ * as the target registers can be accessed by multiple masters. SFRs
+ * that don't support atomic are added to the switch statement below.
*/
if (reg > PMUALIVE_MASK)
+ return false;
+
+ switch (reg) {
+ case GS101_SYSIP_DAT0:
+ case GS101_SYSTEM_CONFIGURATION:
+ return false;
+ default:
+ return true;
+ }
+}
+
+static int tensor_sec_update_bits(void *ctx, unsigned int reg,
+ unsigned int mask, unsigned int val)
+{
+
+ if (!tensor_is_atomic(reg))
return tensor_sec_reg_rmw(ctx, reg, mask, val);
return tensor_set_bits_atomic(ctx, reg, val, mask);
@@ -204,16 +220,6 @@ static const struct regmap_config regmap_smccfg = {
.reg_update_bits = tensor_sec_update_bits,
};
-static const struct regmap_config regmap_mmiocfg = {
- .name = "pmu_regs",
- .reg_bits = 32,
- .reg_stride = 4,
- .val_bits = 32,
- .fast_io = true,
- .use_single_read = true,
- .use_single_write = true,
-};
-
static const struct exynos_pmu_data gs101_pmu_data = {
.pmu_secure = true
};
@@ -290,7 +296,6 @@ EXPORT_SYMBOL_GPL(exynos_get_pmu_regmap);
struct regmap *exynos_get_pmu_regmap_by_phandle(struct device_node *np,
const char *propname)
{
- struct exynos_pmu_context *ctx;
struct device_node *pmu_np;
struct device *dev;
@@ -316,9 +321,7 @@ struct regmap *exynos_get_pmu_regmap_by_phandle(struct device_node *np,
if (!dev)
return ERR_PTR(-EPROBE_DEFER);
- ctx = dev_get_drvdata(dev);
-
- return ctx->pmureg;
+ return syscon_node_to_regmap(pmu_np);
}
EXPORT_SYMBOL_GPL(exynos_get_pmu_regmap_by_phandle);
@@ -355,19 +358,22 @@ static int exynos_pmu_probe(struct platform_device *pdev)
regmap = devm_regmap_init(dev, NULL,
(void *)(uintptr_t)res->start,
&pmu_regmcfg);
+
+ if (IS_ERR(regmap))
+ return dev_err_probe(&pdev->dev, PTR_ERR(regmap),
+ "regmap init failed\n");
+
+ ret = of_syscon_register_regmap(dev->of_node, regmap);
+ if (ret)
+ return ret;
} else {
- /* All other SoCs use a MMIO regmap */
- pmu_regmcfg = regmap_mmiocfg;
- pmu_regmcfg.max_register = resource_size(res) -
- pmu_regmcfg.reg_stride;
- regmap = devm_regmap_init_mmio(dev, pmu_base_addr,
- &pmu_regmcfg);
+ /* let syscon create mmio regmap */
+ regmap = syscon_node_to_regmap(dev->of_node);
+ if (IS_ERR(regmap))
+ return dev_err_probe(&pdev->dev, PTR_ERR(regmap),
+ "syscon_node_to_regmap failed\n");
}
- if (IS_ERR(regmap))
- return dev_err_probe(&pdev->dev, PTR_ERR(regmap),
- "regmap init failed\n");
-
pmu_context->pmureg = regmap;
pmu_context->dev = dev;
diff --git a/drivers/soc/sunxi/sunxi_sram.c b/drivers/soc/sunxi/sunxi_sram.c
index 6eb6cf06278e..2781a091a6a6 100644
--- a/drivers/soc/sunxi/sunxi_sram.c
+++ b/drivers/soc/sunxi/sunxi_sram.c
@@ -33,7 +33,6 @@ struct sunxi_sram_data {
u8 offset;
u8 width;
struct sunxi_sram_func *func;
- struct list_head list;
};
struct sunxi_sram_desc {
@@ -103,7 +102,6 @@ static const struct of_device_id sunxi_sram_dt_ids[] = {
};
static struct device *sram_dev;
-static LIST_HEAD(claimed_sram);
static DEFINE_SPINLOCK(sram_lock);
static void __iomem *base;
@@ -346,7 +344,7 @@ static void sunxi_sram_unlock(void *_lock)
spin_unlock(lock);
}
-static struct regmap_config sunxi_sram_regmap_config = {
+static const struct regmap_config sunxi_sram_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c
index b6bfd6729df3..d27667283846 100644
--- a/drivers/soc/tegra/fuse/fuse-tegra.c
+++ b/drivers/soc/tegra/fuse/fuse-tegra.c
@@ -127,8 +127,8 @@ static void tegra_fuse_print_sku_info(struct tegra_sku_info *tegra_sku_info)
static int tegra_fuse_add_lookups(struct tegra_fuse *fuse)
{
- fuse->lookups = kmemdup_array(fuse->soc->lookups, sizeof(*fuse->lookups),
- fuse->soc->num_lookups, GFP_KERNEL);
+ fuse->lookups = kmemdup_array(fuse->soc->lookups, fuse->soc->num_lookups,
+ sizeof(*fuse->lookups), GFP_KERNEL);
if (!fuse->lookups)
return -ENOMEM;
diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c
index 91d0ad6ddefc..6c37d6eb8b49 100644
--- a/drivers/soc/tegra/pmc.c
+++ b/drivers/soc/tegra/pmc.c
@@ -2891,15 +2891,11 @@ static int tegra_pmc_probe(struct platform_device *pdev)
pmc->aotag = base;
pmc->scratch = base;
} else {
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- "wake");
- pmc->wake = devm_ioremap_resource(&pdev->dev, res);
+ pmc->wake = devm_platform_ioremap_resource_byname(pdev, "wake");
if (IS_ERR(pmc->wake))
return PTR_ERR(pmc->wake);
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- "aotag");
- pmc->aotag = devm_ioremap_resource(&pdev->dev, res);
+ pmc->aotag = devm_platform_ioremap_resource_byname(pdev, "aotag");
if (IS_ERR(pmc->aotag))
return PTR_ERR(pmc->aotag);
diff --git a/drivers/soc/ti/k3-socinfo.c b/drivers/soc/ti/k3-socinfo.c
index 59101bf7cf23..4fb0f0a24828 100644
--- a/drivers/soc/ti/k3-socinfo.c
+++ b/drivers/soc/ti/k3-socinfo.c
@@ -61,7 +61,7 @@ static const struct k3_soc_id {
};
static const char * const j721e_rev_string_map[] = {
- "1.0", "1.1",
+ "1.0", "1.1", "2.0",
};
static int
diff --git a/drivers/soc/ti/knav_qmss.h b/drivers/soc/ti/knav_qmss.h
index a01eda720bf6..9325e8ce2e25 100644
--- a/drivers/soc/ti/knav_qmss.h
+++ b/drivers/soc/ti/knav_qmss.h
@@ -333,7 +333,7 @@ struct knav_range_info {
void *queue_base_inst;
unsigned flags;
struct list_head list;
- struct knav_range_ops *ops;
+ const struct knav_range_ops *ops;
struct knav_acc_info acc_info;
struct knav_acc_channel *acc;
unsigned num_irqs;
diff --git a/drivers/soc/ti/knav_qmss_acc.c b/drivers/soc/ti/knav_qmss_acc.c
index 3d388646ed43..269b4e75ae40 100644
--- a/drivers/soc/ti/knav_qmss_acc.c
+++ b/drivers/soc/ti/knav_qmss_acc.c
@@ -450,7 +450,7 @@ static int knav_acc_free_range(struct knav_range_info *range)
return 0;
}
-static struct knav_range_ops knav_acc_range_ops = {
+static const struct knav_range_ops knav_acc_range_ops = {
.set_notify = knav_acc_set_notify,
.init_queue = knav_acc_init_queue,
.open_queue = knav_acc_open_queue,
diff --git a/drivers/soc/ti/knav_qmss_queue.c b/drivers/soc/ti/knav_qmss_queue.c
index 06fb5505c22c..f2055a76f84c 100644
--- a/drivers/soc/ti/knav_qmss_queue.c
+++ b/drivers/soc/ti/knav_qmss_queue.c
@@ -411,7 +411,7 @@ static int knav_gp_close_queue(struct knav_range_info *range,
return 0;
}
-static struct knav_range_ops knav_gp_range_ops = {
+static const struct knav_range_ops knav_gp_range_ops = {
.set_notify = knav_gp_set_notify,
.open_queue = knav_gp_open_queue,
.close_queue = knav_gp_close_queue,
diff --git a/drivers/soc/ti/pm33xx.c b/drivers/soc/ti/pm33xx.c
index 8e983c3c4e03..3a56bbf3268a 100644
--- a/drivers/soc/ti/pm33xx.c
+++ b/drivers/soc/ti/pm33xx.c
@@ -450,14 +450,14 @@ static int am33xx_pm_rtc_setup(void)
rtc_base_virt = of_iomap(np, 0);
if (!rtc_base_virt) {
- pr_warn("PM: could not iomap rtc");
+ pr_warn("PM: could not iomap rtc\n");
error = -ENODEV;
goto err_clk_put;
}
omap_rtc = rtc_class_open("rtc0");
if (!omap_rtc) {
- pr_warn("PM: rtc0 not available");
+ pr_warn("PM: rtc0 not available\n");
error = -EPROBE_DEFER;
goto err_iounmap;
}
diff --git a/drivers/soc/xilinx/xlnx_event_manager.c b/drivers/soc/xilinx/xlnx_event_manager.c
index 253299e4214d..f529e1346247 100644
--- a/drivers/soc/xilinx/xlnx_event_manager.c
+++ b/drivers/soc/xilinx/xlnx_event_manager.c
@@ -3,6 +3,7 @@
* Xilinx Event Management Driver
*
* Copyright (C) 2021 Xilinx, Inc.
+ * Copyright (C) 2024 Advanced Micro Devices, Inc.
*
* Abhyuday Godhasara <abhyuday.godhasara@xilinx.com>
*/
@@ -19,7 +20,7 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
-static DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number1);
+static DEFINE_PER_CPU_READ_MOSTLY(int, dummy_cpu_number);
static int virq_sgi;
static int event_manager_availability = -EACCES;
@@ -35,7 +36,6 @@ static int event_manager_availability = -EACCES;
#define MAX_BITS (32U) /* Number of bits available for error mask */
-#define FIRMWARE_VERSION_MASK (0xFFFFU)
#define REGISTER_NOTIFIER_FIRMWARE_VERSION (2U)
static DEFINE_HASHTABLE(reg_driver_map, REGISTERED_DRIVER_MAX_ORDER);
@@ -570,7 +570,6 @@ static void xlnx_disable_percpu_irq(void *data)
static int xlnx_event_init_sgi(struct platform_device *pdev)
{
int ret = 0;
- int cpu;
/*
* IRQ related structures are used for the following:
* for each SGI interrupt ensure its mapped by GIC IRQ domain
@@ -607,11 +606,8 @@ static int xlnx_event_init_sgi(struct platform_device *pdev)
sgi_fwspec.param[0] = sgi_num;
virq_sgi = irq_create_fwspec_mapping(&sgi_fwspec);
- cpu = get_cpu();
- per_cpu(cpu_number1, cpu) = cpu;
ret = request_percpu_irq(virq_sgi, xlnx_event_handler, "xlnx_event_mgmt",
- &cpu_number1);
- put_cpu();
+ &dummy_cpu_number);
WARN_ON(ret);
if (ret) {
@@ -627,16 +623,12 @@ static int xlnx_event_init_sgi(struct platform_device *pdev)
static void xlnx_event_cleanup_sgi(struct platform_device *pdev)
{
- int cpu = smp_processor_id();
-
- per_cpu(cpu_number1, cpu) = cpu;
-
cpuhp_remove_state(CPUHP_AP_ONLINE_DYN);
on_each_cpu(xlnx_disable_percpu_irq, NULL, 1);
irq_clear_status_flags(virq_sgi, IRQ_PER_CPU);
- free_percpu_irq(virq_sgi, &cpu_number1);
+ free_percpu_irq(virq_sgi, &dummy_cpu_number);
irq_dispose_mapping(virq_sgi);
}
diff --git a/drivers/soc/xilinx/zynqmp_power.c b/drivers/soc/xilinx/zynqmp_power.c
index 965b1143936a..411d33f2fb05 100644
--- a/drivers/soc/xilinx/zynqmp_power.c
+++ b/drivers/soc/xilinx/zynqmp_power.c
@@ -30,9 +30,27 @@ struct zynqmp_pm_work_struct {
u32 args[CB_ARG_CNT];
};
-static struct zynqmp_pm_work_struct *zynqmp_pm_init_suspend_work;
+/**
+ * struct zynqmp_pm_event_info - event related information
+ * @cb_fun: Function pointer to store the callback function.
+ * @cb_type: Type of callback from pm_api_cb_id,
+ * PM_NOTIFY_CB - for Error Events,
+ * PM_INIT_SUSPEND_CB - for suspend callback.
+ * @node_id: Node-Id related to event.
+ * @event: Event Mask for the Error Event.
+ * @wake: Flag specifying whether the subsystem should be woken upon
+ * event notification.
+ */
+struct zynqmp_pm_event_info {
+ event_cb_func_t cb_fun;
+ enum pm_api_cb_id cb_type;
+ u32 node_id;
+ u32 event;
+ bool wake;
+};
+
+static struct zynqmp_pm_work_struct *zynqmp_pm_init_suspend_work, *zynqmp_pm_init_restart_work;
static struct mbox_chan *rx_chan;
-static bool event_registered;
enum pm_suspend_mode {
PM_SUSPEND_MODE_FIRST = 0,
@@ -54,6 +72,19 @@ static void zynqmp_pm_get_callback_data(u32 *buf)
zynqmp_pm_invoke_fn(GET_CALLBACK_DATA, buf, 0);
}
+static void subsystem_restart_event_callback(const u32 *payload, void *data)
+{
+ /* First element is callback API ID, others are callback arguments */
+ if (work_pending(&zynqmp_pm_init_restart_work->callback_work))
+ return;
+
+ /* Copy callback arguments into work's structure */
+ memcpy(zynqmp_pm_init_restart_work->args, &payload[0],
+ sizeof(zynqmp_pm_init_restart_work->args));
+
+ queue_work(system_unbound_wq, &zynqmp_pm_init_restart_work->callback_work);
+}
+
static void suspend_event_callback(const u32 *payload, void *data)
{
/* First element is callback API ID, others are callback arguments */
@@ -120,6 +151,37 @@ static void ipi_receive_callback(struct mbox_client *cl, void *data)
}
/**
+ * zynqmp_pm_subsystem_restart_work_fn - Initiate Subsystem restart
+ * @work: Pointer to work_struct
+ *
+ * Bottom-half of PM callback IRQ handler.
+ */
+static void zynqmp_pm_subsystem_restart_work_fn(struct work_struct *work)
+{
+ int ret;
+ struct zynqmp_pm_work_struct *pm_work = container_of(work, struct zynqmp_pm_work_struct,
+ callback_work);
+
+ /* First element is callback API ID, others are callback arguments */
+ if (pm_work->args[0] == PM_NOTIFY_CB) {
+ if (pm_work->args[2] == EVENT_SUBSYSTEM_RESTART) {
+ ret = zynqmp_pm_system_shutdown(ZYNQMP_PM_SHUTDOWN_TYPE_SETSCOPE_ONLY,
+ ZYNQMP_PM_SHUTDOWN_SUBTYPE_SUBSYSTEM);
+ if (ret) {
+ pr_err("unable to set shutdown scope\n");
+ return;
+ }
+
+ kernel_restart(NULL);
+ } else {
+ pr_err("%s Unsupported Event - %d\n", __func__, pm_work->args[2]);
+ }
+ } else {
+ pr_err("%s() Unsupported Callback %d\n", __func__, pm_work->args[0]);
+ }
+}
+
+/**
* zynqmp_pm_init_suspend_work_fn - Initialize suspend
* @work: Pointer to work_struct
*
@@ -184,13 +246,51 @@ static ssize_t suspend_mode_store(struct device *dev,
static DEVICE_ATTR_RW(suspend_mode);
+static void unregister_event(struct device *dev, void *res)
+{
+ struct zynqmp_pm_event_info *event_info = res;
+
+ xlnx_unregister_event(event_info->cb_type, event_info->node_id,
+ event_info->event, event_info->cb_fun, NULL);
+}
+
+static int register_event(struct device *dev, const enum pm_api_cb_id cb_type, const u32 node_id,
+ const u32 event, const bool wake, event_cb_func_t cb_fun)
+{
+ int ret;
+ struct zynqmp_pm_event_info *event_info;
+
+ event_info = devres_alloc(unregister_event, sizeof(struct zynqmp_pm_event_info),
+ GFP_KERNEL);
+ if (!event_info)
+ return -ENOMEM;
+
+ event_info->cb_type = cb_type;
+ event_info->node_id = node_id;
+ event_info->event = event;
+ event_info->wake = wake;
+ event_info->cb_fun = cb_fun;
+
+ ret = xlnx_register_event(event_info->cb_type, event_info->node_id,
+ event_info->event, event_info->wake, event_info->cb_fun, NULL);
+ if (ret) {
+ devres_free(event_info);
+ return ret;
+ }
+
+ devres_add(dev, event_info);
+ return 0;
+}
+
static int zynqmp_pm_probe(struct platform_device *pdev)
{
int ret, irq;
- u32 pm_api_version;
+ u32 pm_api_version, pm_family_code, pm_sub_family_code, node_id;
struct mbox_client *client;
- zynqmp_pm_get_api_version(&pm_api_version);
+ ret = zynqmp_pm_get_api_version(&pm_api_version);
+ if (ret)
+ return ret;
/* Check PM API version number */
if (pm_api_version < ZYNQMP_PM_VERSION)
@@ -203,21 +303,43 @@ static int zynqmp_pm_probe(struct platform_device *pdev)
* is not available to use) or -ENODEV(Xilinx Event Manager not compiled),
* then use ipi-mailbox or interrupt method.
*/
- ret = xlnx_register_event(PM_INIT_SUSPEND_CB, 0, 0, false,
- suspend_event_callback, NULL);
+ ret = register_event(&pdev->dev, PM_INIT_SUSPEND_CB, 0, 0, false,
+ suspend_event_callback);
if (!ret) {
zynqmp_pm_init_suspend_work = devm_kzalloc(&pdev->dev,
sizeof(struct zynqmp_pm_work_struct),
GFP_KERNEL);
- if (!zynqmp_pm_init_suspend_work) {
- xlnx_unregister_event(PM_INIT_SUSPEND_CB, 0, 0,
- suspend_event_callback, NULL);
+ if (!zynqmp_pm_init_suspend_work)
return -ENOMEM;
- }
- event_registered = true;
INIT_WORK(&zynqmp_pm_init_suspend_work->callback_work,
zynqmp_pm_init_suspend_work_fn);
+
+ ret = zynqmp_pm_get_family_info(&pm_family_code, &pm_sub_family_code);
+ if (ret < 0)
+ return ret;
+
+ if (pm_sub_family_code == VERSALNET_SUB_FAMILY_CODE)
+ node_id = PM_DEV_ACPU_0_0;
+ else
+ node_id = PM_DEV_ACPU_0;
+
+ ret = register_event(&pdev->dev, PM_NOTIFY_CB, node_id, EVENT_SUBSYSTEM_RESTART,
+ false, subsystem_restart_event_callback);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to Register with Xilinx Event manager %d\n",
+ ret);
+ return ret;
+ }
+
+ zynqmp_pm_init_restart_work = devm_kzalloc(&pdev->dev,
+ sizeof(struct zynqmp_pm_work_struct),
+ GFP_KERNEL);
+ if (!zynqmp_pm_init_restart_work)
+ return -ENOMEM;
+
+ INIT_WORK(&zynqmp_pm_init_restart_work->callback_work,
+ zynqmp_pm_subsystem_restart_work_fn);
} else if (ret != -EACCES && ret != -ENODEV) {
dev_err(&pdev->dev, "Failed to Register with Xilinx Event manager %d\n", ret);
return ret;
@@ -264,15 +386,8 @@ static int zynqmp_pm_probe(struct platform_device *pdev)
}
ret = sysfs_create_file(&pdev->dev.kobj, &dev_attr_suspend_mode.attr);
- if (ret) {
- if (event_registered) {
- xlnx_unregister_event(PM_INIT_SUSPEND_CB, 0, 0, suspend_event_callback,
- NULL);
- event_registered = false;
- }
- dev_err(&pdev->dev, "unable to create sysfs interface\n");
+ if (ret)
return ret;
- }
return 0;
}
@@ -280,8 +395,6 @@ static int zynqmp_pm_probe(struct platform_device *pdev)
static void zynqmp_pm_remove(struct platform_device *pdev)
{
sysfs_remove_file(&pdev->dev.kobj, &dev_attr_suspend_mode.attr);
- if (event_registered)
- xlnx_unregister_event(PM_INIT_SUSPEND_CB, 0, 0, suspend_event_callback, NULL);
if (!rx_chan)
mbox_free_channel(rx_chan);
diff --git a/drivers/soundwire/amd_manager.c b/drivers/soundwire/amd_manager.c
index 20d94bcfc9b4..795e223f7e5c 100644
--- a/drivers/soundwire/amd_manager.c
+++ b/drivers/soundwire/amd_manager.c
@@ -571,6 +571,9 @@ static int sdw_master_read_amd_prop(struct sdw_bus *bus)
amd_manager->wake_en_mask = wake_en_mask;
fwnode_property_read_u32(link, "amd-sdw-power-mode", &power_mode_mask);
amd_manager->power_mode_mask = power_mode_mask;
+
+ fwnode_handle_put(link);
+
return 0;
}
diff --git a/drivers/soundwire/intel_auxdevice.c b/drivers/soundwire/intel_auxdevice.c
index 17cf27e6ea73..18517121cc89 100644
--- a/drivers/soundwire/intel_auxdevice.c
+++ b/drivers/soundwire/intel_auxdevice.c
@@ -155,8 +155,10 @@ static int sdw_master_read_intel_prop(struct sdw_bus *bus)
SDW_MASTER_QUIRKS_CLEAR_INITIAL_PARITY;
intel_prop = devm_kzalloc(bus->dev, sizeof(*intel_prop), GFP_KERNEL);
- if (!intel_prop)
+ if (!intel_prop) {
+ fwnode_handle_put(link);
return -ENOMEM;
+ }
/* initialize with hardware defaults, in case the properties are not found */
intel_prop->doaise = 0x1;
@@ -184,6 +186,8 @@ static int sdw_master_read_intel_prop(struct sdw_bus *bus)
intel_prop->dodse,
intel_prop->dods);
+ fwnode_handle_put(link);
+
return 0;
}
diff --git a/drivers/soundwire/mipi_disco.c b/drivers/soundwire/mipi_disco.c
index 55a9c51c84c1..e5d9df26d4dc 100644
--- a/drivers/soundwire/mipi_disco.c
+++ b/drivers/soundwire/mipi_disco.c
@@ -66,8 +66,10 @@ int sdw_master_read_prop(struct sdw_bus *bus)
prop->clk_freq = devm_kcalloc(bus->dev, prop->num_clk_freq,
sizeof(*prop->clk_freq),
GFP_KERNEL);
- if (!prop->clk_freq)
+ if (!prop->clk_freq) {
+ fwnode_handle_put(link);
return -ENOMEM;
+ }
fwnode_property_read_u32_array(link,
"mipi-sdw-clock-frequencies-supported",
@@ -92,8 +94,10 @@ int sdw_master_read_prop(struct sdw_bus *bus)
prop->clk_gears = devm_kcalloc(bus->dev, prop->num_clk_gears,
sizeof(*prop->clk_gears),
GFP_KERNEL);
- if (!prop->clk_gears)
+ if (!prop->clk_gears) {
+ fwnode_handle_put(link);
return -ENOMEM;
+ }
fwnode_property_read_u32_array(link,
"mipi-sdw-supported-clock-gears",
@@ -116,6 +120,8 @@ int sdw_master_read_prop(struct sdw_bus *bus)
fwnode_property_read_u32(link, "mipi-sdw-command-error-threshold",
&prop->err_threshold);
+ fwnode_handle_put(link);
+
return 0;
}
EXPORT_SYMBOL(sdw_master_read_prop);
@@ -197,8 +203,10 @@ static int sdw_slave_read_dpn(struct sdw_slave *slave,
dpn[i].num_words,
sizeof(*dpn[i].words),
GFP_KERNEL);
- if (!dpn[i].words)
+ if (!dpn[i].words) {
+ fwnode_handle_put(node);
return -ENOMEM;
+ }
fwnode_property_read_u32_array(node,
"mipi-sdw-port-wordlength-configs",
@@ -236,8 +244,10 @@ static int sdw_slave_read_dpn(struct sdw_slave *slave,
dpn[i].num_channels,
sizeof(*dpn[i].channels),
GFP_KERNEL);
- if (!dpn[i].channels)
+ if (!dpn[i].channels) {
+ fwnode_handle_put(node);
return -ENOMEM;
+ }
fwnode_property_read_u32_array(node,
"mipi-sdw-channel-number-list",
@@ -251,8 +261,10 @@ static int sdw_slave_read_dpn(struct sdw_slave *slave,
dpn[i].num_ch_combinations,
sizeof(*dpn[i].ch_combinations),
GFP_KERNEL);
- if (!dpn[i].ch_combinations)
+ if (!dpn[i].ch_combinations) {
+ fwnode_handle_put(node);
return -ENOMEM;
+ }
fwnode_property_read_u32_array(node,
"mipi-sdw-channel-combination-list",
@@ -274,6 +286,8 @@ static int sdw_slave_read_dpn(struct sdw_slave *slave,
/* TODO: Read audio mode */
+ fwnode_handle_put(node);
+
i++;
}
@@ -348,10 +362,14 @@ int sdw_slave_read_prop(struct sdw_slave *slave)
prop->dp0_prop = devm_kzalloc(&slave->dev,
sizeof(*prop->dp0_prop),
GFP_KERNEL);
- if (!prop->dp0_prop)
+ if (!prop->dp0_prop) {
+ fwnode_handle_put(port);
return -ENOMEM;
+ }
sdw_slave_read_dp0(slave, port, prop->dp0_prop);
+
+ fwnode_handle_put(port);
}
/*
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index a2c99ff33e0a..ec1550c698d5 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -277,6 +277,12 @@ config SPI_CADENCE_XSPI
device with a Cadence XSPI controller and want to access the
Flash as an MTD device.
+config SPI_CH341
+ tristate "CH341 USB2SPI adapter"
+ depends on SPI_MASTER && USB
+ help
+ Enables the SPI controller on the CH341a USB to serial chip
+
config SPI_CLPS711X
tristate "CLPS711X host SPI controller"
depends on ARCH_CLPS711X || COMPILE_TEST
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index e694254dec04..a9b1bc259b68 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -39,6 +39,7 @@ obj-$(CONFIG_SPI_BUTTERFLY) += spi-butterfly.o
obj-$(CONFIG_SPI_CADENCE) += spi-cadence.o
obj-$(CONFIG_SPI_CADENCE_QUADSPI) += spi-cadence-quadspi.o
obj-$(CONFIG_SPI_CADENCE_XSPI) += spi-cadence-xspi.o
+obj-$(CONFIG_SPI_CH341) += spi-ch341.o
obj-$(CONFIG_SPI_CLPS711X) += spi-clps711x.o
obj-$(CONFIG_SPI_COLDFIRE_QSPI) += spi-coldfire-qspi.o
obj-$(CONFIG_SPI_CS42L43) += spi-cs42l43.o
@@ -107,7 +108,8 @@ obj-$(CONFIG_SPI_PIC32) += spi-pic32.o
obj-$(CONFIG_SPI_PIC32_SQI) += spi-pic32-sqi.o
obj-$(CONFIG_SPI_PL022) += spi-pl022.o
obj-$(CONFIG_SPI_PPC4xx) += spi-ppc4xx.o
-spi-pxa2xx-platform-objs := spi-pxa2xx.o spi-pxa2xx-dma.o
+obj-$(CONFIG_SPI_PXA2XX) += spi-pxa2xx-core.o
+spi-pxa2xx-core-y := spi-pxa2xx.o spi-pxa2xx-dma.o
obj-$(CONFIG_SPI_PXA2XX) += spi-pxa2xx-platform.o
obj-$(CONFIG_SPI_PXA2XX_PCI) += spi-pxa2xx-pci.o
obj-$(CONFIG_SPI_QCOM_GENI) += spi-geni-qcom.o
diff --git a/drivers/spi/atmel-quadspi.c b/drivers/spi/atmel-quadspi.c
index 370c4d1572ed..5aaff3bee1b7 100644
--- a/drivers/spi/atmel-quadspi.c
+++ b/drivers/spi/atmel-quadspi.c
@@ -756,8 +756,15 @@ static int __maybe_unused atmel_qspi_resume(struct device *dev)
struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
int ret;
- clk_prepare(aq->pclk);
- clk_prepare(aq->qspick);
+ ret = clk_prepare(aq->pclk);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare(aq->qspick);
+ if (ret) {
+ clk_unprepare(aq->pclk);
+ return ret;
+ }
ret = pm_runtime_force_resume(dev);
if (ret < 0)
diff --git a/drivers/spi/internals.h b/drivers/spi/internals.h
index 4a28a8395552..1f459b895891 100644
--- a/drivers/spi/internals.h
+++ b/drivers/spi/internals.h
@@ -40,4 +40,12 @@ static inline void spi_unmap_buf(struct spi_controller *ctlr,
}
#endif /* CONFIG_HAS_DMA */
+static inline bool spi_xfer_is_dma_mapped(struct spi_controller *ctlr,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ return ctlr->can_dma && ctlr->can_dma(ctlr, spi, xfer) &&
+ (xfer->tx_sg_mapped || xfer->rx_sg_mapped);
+}
+
#endif /* __LINUX_SPI_INTERNALS_H */
diff --git a/drivers/spi/spi-altera-core.c b/drivers/spi/spi-altera-core.c
index 87e37f48f196..7af097929116 100644
--- a/drivers/spi/spi-altera-core.c
+++ b/drivers/spi/spi-altera-core.c
@@ -219,4 +219,5 @@ void altera_spi_init_host(struct spi_controller *host)
}
EXPORT_SYMBOL_GPL(altera_spi_init_host);
+MODULE_DESCRIPTION("Altera SPI Controller driver core");
MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-axi-spi-engine.c b/drivers/spi/spi-axi-spi-engine.c
index e358ac5b4509..447e5a962dee 100644
--- a/drivers/spi/spi-axi-spi-engine.c
+++ b/drivers/spi/spi-axi-spi-engine.c
@@ -46,6 +46,7 @@
#define SPI_ENGINE_INST_ASSERT 0x1
#define SPI_ENGINE_INST_WRITE 0x2
#define SPI_ENGINE_INST_MISC 0x3
+#define SPI_ENGINE_INST_CS_INV 0x4
#define SPI_ENGINE_CMD_REG_CLK_DIV 0x0
#define SPI_ENGINE_CMD_REG_CONFIG 0x1
@@ -73,6 +74,8 @@
SPI_ENGINE_CMD(SPI_ENGINE_INST_MISC, SPI_ENGINE_MISC_SLEEP, (delay))
#define SPI_ENGINE_CMD_SYNC(id) \
SPI_ENGINE_CMD(SPI_ENGINE_INST_MISC, SPI_ENGINE_MISC_SYNC, (id))
+#define SPI_ENGINE_CMD_CS_INV(flags) \
+ SPI_ENGINE_CMD(SPI_ENGINE_INST_CS_INV, 0, (flags))
struct spi_engine_program {
unsigned int length;
@@ -111,6 +114,8 @@ struct spi_engine {
struct spi_engine_message_state msg_state;
struct completion msg_complete;
unsigned int int_enable;
+ /* shadows hardware CS inversion flag state */
+ u8 cs_inv;
};
static void spi_engine_program_add_cmd(struct spi_engine_program *p,
@@ -164,16 +169,20 @@ static void spi_engine_gen_xfer(struct spi_engine_program *p, bool dry,
}
static void spi_engine_gen_sleep(struct spi_engine_program *p, bool dry,
- int delay_ns, u32 sclk_hz)
+ int delay_ns, int inst_ns, u32 sclk_hz)
{
unsigned int t;
- /* negative delay indicates error, e.g. from spi_delay_to_ns() */
- if (delay_ns <= 0)
+ /*
+ * Negative delay indicates error, e.g. from spi_delay_to_ns(). And if
+ * delay is less that the instruction execution time, there is no need
+ * for an extra sleep instruction since the instruction execution time
+ * will already cover the required delay.
+ */
+ if (delay_ns < 0 || delay_ns <= inst_ns)
return;
- /* rounding down since executing the instruction adds a couple of ticks delay */
- t = DIV_ROUND_DOWN_ULL((u64)delay_ns * sclk_hz, NSEC_PER_SEC);
+ t = DIV_ROUND_UP_ULL((u64)(delay_ns - inst_ns) * sclk_hz, NSEC_PER_SEC);
while (t) {
unsigned int n = min(t, 256U);
@@ -220,10 +229,16 @@ static void spi_engine_compile_message(struct spi_message *msg, bool dry,
struct spi_device *spi = msg->spi;
struct spi_controller *host = spi->controller;
struct spi_transfer *xfer;
- int clk_div, new_clk_div;
+ int clk_div, new_clk_div, inst_ns;
bool keep_cs = false;
u8 bits_per_word = 0;
+ /*
+ * Take into account instruction execution time for more accurate sleep
+ * times, especially when the delay is small.
+ */
+ inst_ns = DIV_ROUND_UP(NSEC_PER_SEC, host->max_speed_hz);
+
clk_div = 1;
spi_engine_program_add_cmd(p, dry,
@@ -252,7 +267,7 @@ static void spi_engine_compile_message(struct spi_message *msg, bool dry,
spi_engine_gen_xfer(p, dry, xfer);
spi_engine_gen_sleep(p, dry, spi_delay_to_ns(&xfer->delay, xfer),
- xfer->effective_speed_hz);
+ inst_ns, xfer->effective_speed_hz);
if (xfer->cs_change) {
if (list_is_last(&xfer->transfer_list, &msg->transfers)) {
@@ -262,7 +277,7 @@ static void spi_engine_compile_message(struct spi_message *msg, bool dry,
spi_engine_gen_cs(p, dry, spi, false);
spi_engine_gen_sleep(p, dry, spi_delay_to_ns(
- &xfer->cs_change_delay, xfer),
+ &xfer->cs_change_delay, xfer), inst_ns,
xfer->effective_speed_hz);
if (!list_next_entry(xfer, transfer_list)->cs_off)
@@ -530,6 +545,29 @@ static int spi_engine_unoptimize_message(struct spi_message *msg)
return 0;
}
+static int spi_engine_setup(struct spi_device *device)
+{
+ struct spi_controller *host = device->controller;
+ struct spi_engine *spi_engine = spi_controller_get_devdata(host);
+
+ if (device->mode & SPI_CS_HIGH)
+ spi_engine->cs_inv |= BIT(spi_get_chipselect(device, 0));
+ else
+ spi_engine->cs_inv &= ~BIT(spi_get_chipselect(device, 0));
+
+ writel_relaxed(SPI_ENGINE_CMD_CS_INV(spi_engine->cs_inv),
+ spi_engine->base + SPI_ENGINE_REG_CMD_FIFO);
+
+ /*
+ * In addition to setting the flags, we have to do a CS assert command
+ * to make the new setting actually take effect.
+ */
+ writel_relaxed(SPI_ENGINE_CMD_ASSERT(0, 0xff),
+ spi_engine->base + SPI_ENGINE_REG_CMD_FIFO);
+
+ return 0;
+}
+
static int spi_engine_transfer_one_message(struct spi_controller *host,
struct spi_message *msg)
{
@@ -653,16 +691,16 @@ static int spi_engine_probe(struct platform_device *pdev)
host->unoptimize_message = spi_engine_unoptimize_message;
host->num_chipselect = 8;
+ /* Some features depend of the IP core version. */
+ if (ADI_AXI_PCORE_VER_MINOR(version) >= 2) {
+ host->mode_bits |= SPI_CS_HIGH;
+ host->setup = spi_engine_setup;
+ }
+
if (host->max_speed_hz == 0)
return dev_err_probe(&pdev->dev, -EINVAL, "spi_clk rate is 0");
- ret = devm_spi_register_controller(&pdev->dev, host);
- if (ret)
- return ret;
-
- platform_set_drvdata(pdev, host);
-
- return 0;
+ return devm_spi_register_controller(&pdev->dev, host);
}
static const struct of_device_id spi_engine_match_table[] = {
diff --git a/drivers/spi/spi-bitbang.c b/drivers/spi/spi-bitbang.c
index ca5cc67555c5..afb1b1105ec2 100644
--- a/drivers/spi/spi-bitbang.c
+++ b/drivers/spi/spi-bitbang.c
@@ -38,33 +38,24 @@
* working quickly, or testing for differences that aren't speed related.
*/
+typedef unsigned int (*spi_bb_txrx_bufs_fn)(struct spi_device *, spi_bb_txrx_word_fn,
+ unsigned int, struct spi_transfer *,
+ unsigned int);
+
struct spi_bitbang_cs {
- unsigned nsecs; /* (clock cycle time)/2 */
- u32 (*txrx_word)(struct spi_device *spi, unsigned nsecs,
- u32 word, u8 bits, unsigned flags);
- unsigned (*txrx_bufs)(struct spi_device *,
- u32 (*txrx_word)(
- struct spi_device *spi,
- unsigned nsecs,
- u32 word, u8 bits,
- unsigned flags),
- unsigned, struct spi_transfer *,
- unsigned);
+ unsigned int nsecs; /* (clock cycle time) / 2 */
+ spi_bb_txrx_word_fn txrx_word;
+ spi_bb_txrx_bufs_fn txrx_bufs;
};
-static unsigned bitbang_txrx_8(
- struct spi_device *spi,
- u32 (*txrx_word)(struct spi_device *spi,
- unsigned nsecs,
- u32 word, u8 bits,
- unsigned flags),
- unsigned ns,
+static unsigned int bitbang_txrx_8(struct spi_device *spi,
+ spi_bb_txrx_word_fn txrx_word,
+ unsigned int ns,
struct spi_transfer *t,
- unsigned flags
-)
+ unsigned int flags)
{
- unsigned bits = t->bits_per_word;
- unsigned count = t->len;
+ unsigned int bits = t->bits_per_word;
+ unsigned int count = t->len;
const u8 *tx = t->tx_buf;
u8 *rx = t->rx_buf;
@@ -81,19 +72,14 @@ static unsigned bitbang_txrx_8(
return t->len - count;
}
-static unsigned bitbang_txrx_16(
- struct spi_device *spi,
- u32 (*txrx_word)(struct spi_device *spi,
- unsigned nsecs,
- u32 word, u8 bits,
- unsigned flags),
- unsigned ns,
+static unsigned int bitbang_txrx_16(struct spi_device *spi,
+ spi_bb_txrx_word_fn txrx_word,
+ unsigned int ns,
struct spi_transfer *t,
- unsigned flags
-)
+ unsigned int flags)
{
- unsigned bits = t->bits_per_word;
- unsigned count = t->len;
+ unsigned int bits = t->bits_per_word;
+ unsigned int count = t->len;
const u16 *tx = t->tx_buf;
u16 *rx = t->rx_buf;
@@ -110,19 +96,14 @@ static unsigned bitbang_txrx_16(
return t->len - count;
}
-static unsigned bitbang_txrx_32(
- struct spi_device *spi,
- u32 (*txrx_word)(struct spi_device *spi,
- unsigned nsecs,
- u32 word, u8 bits,
- unsigned flags),
- unsigned ns,
+static unsigned int bitbang_txrx_32(struct spi_device *spi,
+ spi_bb_txrx_word_fn txrx_word,
+ unsigned int ns,
struct spi_transfer *t,
- unsigned flags
-)
+ unsigned int flags)
{
- unsigned bits = t->bits_per_word;
- unsigned count = t->len;
+ unsigned int bits = t->bits_per_word;
+ unsigned int count = t->len;
const u32 *tx = t->tx_buf;
u32 *rx = t->rx_buf;
@@ -234,7 +215,7 @@ EXPORT_SYMBOL_GPL(spi_bitbang_cleanup);
static int spi_bitbang_bufs(struct spi_device *spi, struct spi_transfer *t)
{
struct spi_bitbang_cs *cs = spi->controller_state;
- unsigned nsecs = cs->nsecs;
+ unsigned int nsecs = cs->nsecs;
struct spi_bitbang *bitbang;
bitbang = spi_controller_get_devdata(spi->controller);
@@ -247,7 +228,7 @@ static int spi_bitbang_bufs(struct spi_device *spi, struct spi_transfer *t)
}
if (spi->mode & SPI_3WIRE) {
- unsigned flags;
+ unsigned int flags;
flags = t->tx_buf ? SPI_CONTROLLER_NO_RX : SPI_CONTROLLER_NO_TX;
return cs->txrx_bufs(spi, cs->txrx_word, nsecs, t, flags);
diff --git a/drivers/spi/spi-cadence-xspi.c b/drivers/spi/spi-cadence-xspi.c
index 2209e9fc378f..2e3eacd46b72 100644
--- a/drivers/spi/spi-cadence-xspi.c
+++ b/drivers/spi/spi-cadence-xspi.c
@@ -145,6 +145,9 @@
#define CDNS_XSPI_STIG_DONE_FLAG BIT(0)
#define CDNS_XSPI_TRD_STATUS 0x0104
+#define MODE_NO_OF_BYTES GENMASK(25, 24)
+#define MODEBYTES_COUNT 1
+
/* Helper macros for filling command registers */
#define CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_1(op, data_phase) ( \
FIELD_PREP(CDNS_XSPI_CMD_INSTR_TYPE, (data_phase) ? \
@@ -157,9 +160,10 @@
FIELD_PREP(CDNS_XSPI_CMD_P1_R2_ADDR3, ((op)->addr.val >> 24) & 0xFF) | \
FIELD_PREP(CDNS_XSPI_CMD_P1_R2_ADDR4, ((op)->addr.val >> 32) & 0xFF))
-#define CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_3(op) ( \
+#define CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_3(op, modebytes) ( \
FIELD_PREP(CDNS_XSPI_CMD_P1_R3_ADDR5, ((op)->addr.val >> 40) & 0xFF) | \
FIELD_PREP(CDNS_XSPI_CMD_P1_R3_CMD, (op)->cmd.opcode) | \
+ FIELD_PREP(MODE_NO_OF_BYTES, modebytes) | \
FIELD_PREP(CDNS_XSPI_CMD_P1_R3_NUM_ADDR_BYTES, (op)->addr.nbytes))
#define CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_4(op, chipsel) ( \
@@ -173,12 +177,12 @@
#define CDNS_XSPI_CMD_FLD_DSEQ_CMD_2(op) \
FIELD_PREP(CDNS_XSPI_CMD_DSEQ_R2_DCNT_L, (op)->data.nbytes & 0xFFFF)
-#define CDNS_XSPI_CMD_FLD_DSEQ_CMD_3(op) ( \
+#define CDNS_XSPI_CMD_FLD_DSEQ_CMD_3(op, dummybytes) ( \
FIELD_PREP(CDNS_XSPI_CMD_DSEQ_R3_DCNT_H, \
((op)->data.nbytes >> 16) & 0xffff) | \
FIELD_PREP(CDNS_XSPI_CMD_DSEQ_R3_NUM_OF_DUMMY, \
(op)->dummy.buswidth != 0 ? \
- (((op)->dummy.nbytes * 8) / (op)->dummy.buswidth) : \
+ (((dummybytes) * 8) / (op)->dummy.buswidth) : \
0))
#define CDNS_XSPI_CMD_FLD_DSEQ_CMD_4(op, chipsel) ( \
@@ -351,6 +355,7 @@ static int cdns_xspi_send_stig_command(struct cdns_xspi_dev *cdns_xspi,
u32 cmd_regs[6];
u32 cmd_status;
int ret;
+ int dummybytes = op->dummy.nbytes;
ret = cdns_xspi_wait_for_controller_idle(cdns_xspi);
if (ret < 0)
@@ -365,7 +370,12 @@ static int cdns_xspi_send_stig_command(struct cdns_xspi_dev *cdns_xspi,
memset(cmd_regs, 0, sizeof(cmd_regs));
cmd_regs[1] = CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_1(op, data_phase);
cmd_regs[2] = CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_2(op);
- cmd_regs[3] = CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_3(op);
+ if (dummybytes != 0) {
+ cmd_regs[3] = CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_3(op, 1);
+ dummybytes--;
+ } else {
+ cmd_regs[3] = CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_3(op, 0);
+ }
cmd_regs[4] = CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_4(op,
cdns_xspi->cur_cs);
@@ -375,7 +385,7 @@ static int cdns_xspi_send_stig_command(struct cdns_xspi_dev *cdns_xspi,
cmd_regs[0] = CDNS_XSPI_STIG_DONE_FLAG;
cmd_regs[1] = CDNS_XSPI_CMD_FLD_DSEQ_CMD_1(op);
cmd_regs[2] = CDNS_XSPI_CMD_FLD_DSEQ_CMD_2(op);
- cmd_regs[3] = CDNS_XSPI_CMD_FLD_DSEQ_CMD_3(op);
+ cmd_regs[3] = CDNS_XSPI_CMD_FLD_DSEQ_CMD_3(op, dummybytes);
cmd_regs[4] = CDNS_XSPI_CMD_FLD_DSEQ_CMD_4(op,
cdns_xspi->cur_cs);
diff --git a/drivers/spi/spi-cadence.c b/drivers/spi/spi-cadence.c
index e5140532071d..e07e081de5ea 100644
--- a/drivers/spi/spi-cadence.c
+++ b/drivers/spi/spi-cadence.c
@@ -18,6 +18,7 @@
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/reset.h>
#include <linux/spi/spi.h>
/* Name of this driver */
@@ -111,6 +112,7 @@
* @dev_busy: Device busy flag
* @is_decoded_cs: Flag for decoder property set or not
* @tx_fifo_depth: Depth of the TX FIFO
+ * @rstc: Optional reset control for SPI controller
*/
struct cdns_spi {
void __iomem *regs;
@@ -125,6 +127,7 @@ struct cdns_spi {
u8 dev_busy;
u32 is_decoded_cs;
unsigned int tx_fifo_depth;
+ struct reset_control *rstc;
};
/* Macros for the SPI controller read/write */
@@ -588,14 +591,24 @@ static int cdns_spi_probe(struct platform_device *pdev)
goto remove_ctlr;
}
- if (!spi_controller_is_target(ctlr)) {
- xspi->ref_clk = devm_clk_get_enabled(&pdev->dev, "ref_clk");
- if (IS_ERR(xspi->ref_clk)) {
- dev_err(&pdev->dev, "ref_clk clock not found.\n");
- ret = PTR_ERR(xspi->ref_clk);
- goto remove_ctlr;
- }
+ xspi->rstc = devm_reset_control_get_optional_exclusive(&pdev->dev, "spi");
+ if (IS_ERR(xspi->rstc)) {
+ ret = dev_err_probe(&pdev->dev, PTR_ERR(xspi->rstc),
+ "Cannot get SPI reset.\n");
+ goto remove_ctlr;
+ }
+
+ reset_control_assert(xspi->rstc);
+ reset_control_deassert(xspi->rstc);
+ xspi->ref_clk = devm_clk_get_enabled(&pdev->dev, "ref_clk");
+ if (IS_ERR(xspi->ref_clk)) {
+ dev_err(&pdev->dev, "ref_clk clock not found.\n");
+ ret = PTR_ERR(xspi->ref_clk);
+ goto remove_ctlr;
+ }
+
+ if (!spi_controller_is_target(ctlr)) {
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT);
pm_runtime_get_noresume(&pdev->dev);
diff --git a/drivers/spi/spi-ch341.c b/drivers/spi/spi-ch341.c
new file mode 100644
index 000000000000..d2351812d310
--- /dev/null
+++ b/drivers/spi/spi-ch341.c
@@ -0,0 +1,241 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// QiHeng Electronics ch341a USB-to-SPI adapter driver
+//
+// Copyright (C) 2024 Johannes Thumshirn <jth@kernel.org>
+//
+// Based on ch341a_spi.c from the flashrom project.
+
+#include <linux/module.h>
+#include <linux/usb.h>
+#include <linux/spi/spi.h>
+
+#define CH341_PACKET_LENGTH 32
+#define CH341_DEFAULT_TIMEOUT 1000
+
+#define CH341A_CMD_UIO_STREAM 0xab
+
+#define CH341A_CMD_UIO_STM_END 0x20
+#define CH341A_CMD_UIO_STM_DIR 0x40
+#define CH341A_CMD_UIO_STM_OUT 0x80
+
+#define CH341A_CMD_I2C_STREAM 0xaa
+#define CH341A_CMD_I2C_STM_SET 0x60
+#define CH341A_CMD_I2C_STM_END 0x00
+
+#define CH341A_CMD_SPI_STREAM 0xa8
+
+#define CH341A_STM_I2C_100K 0x01
+
+struct ch341_spi_dev {
+ struct spi_controller *ctrl;
+ struct usb_device *udev;
+ unsigned int write_pipe;
+ unsigned int read_pipe;
+ int rx_len;
+ void *rx_buf;
+ u8 *tx_buf;
+ struct urb *rx_urb;
+ struct spi_device *spidev;
+};
+
+static void ch341_set_cs(struct spi_device *spi, bool is_high)
+{
+ struct ch341_spi_dev *ch341 =
+ spi_controller_get_devdata(spi->controller);
+ int err;
+
+ memset(ch341->tx_buf, 0, CH341_PACKET_LENGTH);
+ ch341->tx_buf[0] = CH341A_CMD_UIO_STREAM;
+ ch341->tx_buf[1] = CH341A_CMD_UIO_STM_OUT | (is_high ? 0x36 : 0x37);
+
+ if (is_high) {
+ ch341->tx_buf[2] = CH341A_CMD_UIO_STM_DIR | 0x3f;
+ ch341->tx_buf[3] = CH341A_CMD_UIO_STM_END;
+ } else {
+ ch341->tx_buf[2] = CH341A_CMD_UIO_STM_END;
+ }
+
+ err = usb_bulk_msg(ch341->udev, ch341->write_pipe, ch341->tx_buf,
+ (is_high ? 4 : 3), NULL, CH341_DEFAULT_TIMEOUT);
+ if (err)
+ dev_err(&spi->dev,
+ "error sending USB message for setting CS (%d)\n", err);
+}
+
+static int ch341_transfer_one(struct spi_controller *host,
+ struct spi_device *spi,
+ struct spi_transfer *trans)
+{
+ struct ch341_spi_dev *ch341 =
+ spi_controller_get_devdata(spi->controller);
+ int len;
+ int ret;
+
+ len = min(CH341_PACKET_LENGTH, trans->len + 1);
+
+ memset(ch341->tx_buf, 0, CH341_PACKET_LENGTH);
+
+ ch341->tx_buf[0] = CH341A_CMD_SPI_STREAM;
+
+ memcpy(ch341->tx_buf + 1, trans->tx_buf, len);
+
+ ret = usb_bulk_msg(ch341->udev, ch341->write_pipe, ch341->tx_buf, len,
+ NULL, CH341_DEFAULT_TIMEOUT);
+ if (ret)
+ return ret;
+
+ return usb_bulk_msg(ch341->udev, ch341->read_pipe, trans->rx_buf,
+ len - 1, NULL, CH341_DEFAULT_TIMEOUT);
+}
+
+static void ch341_recv(struct urb *urb)
+{
+ struct ch341_spi_dev *ch341 = urb->context;
+ struct usb_device *udev = ch341->udev;
+
+ switch (urb->status) {
+ case 0:
+ /* success */
+ break;
+ case -ENOENT:
+ case -ECONNRESET:
+ case -EPIPE:
+ case -ESHUTDOWN:
+ dev_dbg(&udev->dev, "rx urb terminated with status: %d\n",
+ urb->status);
+ return;
+ default:
+ dev_dbg(&udev->dev, "rx urb error: %d\n", urb->status);
+ break;
+ }
+}
+
+static int ch341_config_stream(struct ch341_spi_dev *ch341)
+{
+ memset(ch341->tx_buf, 0, CH341_PACKET_LENGTH);
+ ch341->tx_buf[0] = CH341A_CMD_I2C_STREAM;
+ ch341->tx_buf[1] = CH341A_CMD_I2C_STM_SET | CH341A_STM_I2C_100K;
+ ch341->tx_buf[2] = CH341A_CMD_I2C_STM_END;
+
+ return usb_bulk_msg(ch341->udev, ch341->write_pipe, ch341->tx_buf, 3,
+ NULL, CH341_DEFAULT_TIMEOUT);
+}
+
+static int ch341_enable_pins(struct ch341_spi_dev *ch341, bool enable)
+{
+ memset(ch341->tx_buf, 0, CH341_PACKET_LENGTH);
+ ch341->tx_buf[0] = CH341A_CMD_UIO_STREAM;
+ ch341->tx_buf[1] = CH341A_CMD_UIO_STM_OUT | 0x37;
+ ch341->tx_buf[2] = CH341A_CMD_UIO_STM_DIR | (enable ? 0x3f : 0x00);
+ ch341->tx_buf[3] = CH341A_CMD_UIO_STM_END;
+
+ return usb_bulk_msg(ch341->udev, ch341->write_pipe, ch341->tx_buf, 4,
+ NULL, CH341_DEFAULT_TIMEOUT);
+}
+
+static struct spi_board_info chip = {
+ .modalias = "spi-ch341a",
+};
+
+static int ch341_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ struct usb_device *udev = interface_to_usbdev(intf);
+ struct usb_endpoint_descriptor *in, *out;
+ struct ch341_spi_dev *ch341;
+ struct spi_controller *ctrl;
+ int ret;
+
+ ret = usb_find_common_endpoints(intf->cur_altsetting, &in, &out, NULL,
+ NULL);
+ if (ret)
+ return ret;
+
+ ctrl = devm_spi_alloc_master(&udev->dev, sizeof(struct ch341_spi_dev));
+ if (!ctrl)
+ return -ENOMEM;
+
+ ch341 = spi_controller_get_devdata(ctrl);
+ ch341->ctrl = ctrl;
+ ch341->udev = udev;
+ ch341->write_pipe = usb_sndbulkpipe(udev, usb_endpoint_num(out));
+ ch341->read_pipe = usb_rcvbulkpipe(udev, usb_endpoint_num(in));
+
+ ch341->rx_len = usb_endpoint_maxp(in);
+ ch341->rx_buf = devm_kzalloc(&udev->dev, ch341->rx_len, GFP_KERNEL);
+ if (!ch341->rx_buf)
+ return -ENOMEM;
+
+ ch341->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!ch341->rx_urb)
+ return -ENOMEM;
+
+ ch341->tx_buf =
+ devm_kzalloc(&udev->dev, CH341_PACKET_LENGTH, GFP_KERNEL);
+ if (!ch341->tx_buf)
+ return -ENOMEM;
+
+ usb_fill_bulk_urb(ch341->rx_urb, udev, ch341->read_pipe, ch341->rx_buf,
+ ch341->rx_len, ch341_recv, ch341);
+
+ ret = usb_submit_urb(ch341->rx_urb, GFP_KERNEL);
+ if (ret) {
+ usb_free_urb(ch341->rx_urb);
+ return -ENOMEM;
+ }
+
+ ctrl->bus_num = -1;
+ ctrl->mode_bits = SPI_CPHA;
+ ctrl->transfer_one = ch341_transfer_one;
+ ctrl->set_cs = ch341_set_cs;
+ ctrl->auto_runtime_pm = false;
+
+ usb_set_intfdata(intf, ch341);
+
+ ret = ch341_config_stream(ch341);
+ if (ret)
+ return ret;
+
+ ret = ch341_enable_pins(ch341, true);
+ if (ret)
+ return ret;
+
+ ret = spi_register_controller(ctrl);
+ if (ret)
+ return ret;
+
+ ch341->spidev = spi_new_device(ctrl, &chip);
+ if (!ch341->spidev)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void ch341_disconnect(struct usb_interface *intf)
+{
+ struct ch341_spi_dev *ch341 = usb_get_intfdata(intf);
+
+ spi_unregister_device(ch341->spidev);
+ spi_unregister_controller(ch341->ctrl);
+ ch341_enable_pins(ch341, false);
+ usb_free_urb(ch341->rx_urb);
+}
+
+static const struct usb_device_id ch341_id_table[] = {
+ { USB_DEVICE(0x1a86, 0x5512) },
+ { }
+};
+MODULE_DEVICE_TABLE(usb, ch341_id_table);
+
+static struct usb_driver ch341a_usb_driver = {
+ .name = "spi-ch341",
+ .probe = ch341_probe,
+ .disconnect = ch341_disconnect,
+ .id_table = ch341_id_table,
+};
+module_usb_driver(ch341a_usb_driver);
+
+MODULE_AUTHOR("Johannes Thumshirn <jth@kernel.org>");
+MODULE_DESCRIPTION("QiHeng Electronics ch341 USB2SPI");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-cs42l43.c b/drivers/spi/spi-cs42l43.c
index 9d747ea69926..5b8ed65f8094 100644
--- a/drivers/spi/spi-cs42l43.c
+++ b/drivers/spi/spi-cs42l43.c
@@ -9,6 +9,7 @@
#include <linux/array_size.h>
#include <linux/bits.h>
#include <linux/bitfield.h>
+#include <linux/cleanup.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/gpio/machine.h>
@@ -26,7 +27,7 @@
#include <linux/units.h>
#define CS42L43_FIFO_SIZE 16
-#define CS42L43_SPI_ROOT_HZ (40 * HZ_PER_MHZ)
+#define CS42L43_SPI_ROOT_HZ 49152000
#define CS42L43_SPI_MAX_LENGTH 65532
enum cs42l43_spi_cmd {
@@ -44,28 +45,10 @@ static const unsigned int cs42l43_clock_divs[] = {
2, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30
};
-static const struct software_node ampl = {
- .name = "cs35l56-left",
-};
-
-static const struct software_node ampr = {
- .name = "cs35l56-right",
-};
-
-static struct spi_board_info ampl_info = {
+static struct spi_board_info amp_info_template = {
.modalias = "cs35l56",
- .max_speed_hz = 20 * HZ_PER_MHZ,
- .chip_select = 0,
+ .max_speed_hz = 11 * HZ_PER_MHZ,
.mode = SPI_MODE_0,
- .swnode = &ampl,
-};
-
-static struct spi_board_info ampr_info = {
- .modalias = "cs35l56",
- .max_speed_hz = 20 * HZ_PER_MHZ,
- .chip_select = 1,
- .mode = SPI_MODE_0,
- .swnode = &ampr,
};
static const struct software_node cs42l43_gpiochip_swnode = {
@@ -246,11 +229,10 @@ static size_t cs42l43_spi_max_length(struct spi_device *spi)
return CS42L43_SPI_MAX_LENGTH;
}
-static bool cs42l43_has_sidecar(struct fwnode_handle *fwnode)
+static struct fwnode_handle *cs42l43_find_xu_node(struct fwnode_handle *fwnode)
{
static const u32 func_smart_amp = 0x1;
struct fwnode_handle *child_fwnode, *ext_fwnode;
- unsigned int val;
u32 function;
int ret;
@@ -266,21 +248,45 @@ static bool cs42l43_has_sidecar(struct fwnode_handle *fwnode)
if (!ext_fwnode)
continue;
- ret = fwnode_property_read_u32(ext_fwnode,
- "01fa-sidecar-instances",
- &val);
+ fwnode_handle_put(child_fwnode);
- fwnode_handle_put(ext_fwnode);
+ return ext_fwnode;
+ }
- if (ret)
- continue;
+ return NULL;
+}
- fwnode_handle_put(child_fwnode);
+static struct spi_board_info *cs42l43_create_bridge_amp(struct cs42l43_spi *priv,
+ const char * const name,
+ int cs, int spkid)
+{
+ struct property_entry *props = NULL;
+ struct software_node *swnode;
+ struct spi_board_info *info;
- return !!val;
+ if (spkid >= 0) {
+ props = devm_kmalloc(priv->dev, sizeof(*props), GFP_KERNEL);
+ if (!props)
+ return NULL;
+
+ *props = PROPERTY_ENTRY_U32("cirrus,speaker-id", spkid);
}
- return false;
+ swnode = devm_kmalloc(priv->dev, sizeof(*swnode), GFP_KERNEL);
+ if (!swnode)
+ return NULL;
+
+ *swnode = SOFTWARE_NODE(name, props, NULL);
+
+ info = devm_kmemdup(priv->dev, &amp_info_template,
+ sizeof(amp_info_template), GFP_KERNEL);
+ if (!info)
+ return NULL;
+
+ info->chip_select = cs;
+ info->swnode = swnode;
+
+ return info;
}
static void cs42l43_release_of_node(void *data)
@@ -298,7 +304,8 @@ static int cs42l43_spi_probe(struct platform_device *pdev)
struct cs42l43 *cs42l43 = dev_get_drvdata(pdev->dev.parent);
struct cs42l43_spi *priv;
struct fwnode_handle *fwnode = dev_fwnode(cs42l43->dev);
- bool has_sidecar = cs42l43_has_sidecar(fwnode);
+ struct fwnode_handle *xu_fwnode __free(fwnode_handle) = cs42l43_find_xu_node(fwnode);
+ int nsidecars = 0;
int ret;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
@@ -350,7 +357,9 @@ static int cs42l43_spi_probe(struct platform_device *pdev)
return ret;
}
- if (has_sidecar) {
+ fwnode_property_read_u32(xu_fwnode, "01fa-sidecar-instances", &nsidecars);
+
+ if (nsidecars) {
ret = software_node_register(&cs42l43_gpiochip_swnode);
if (ret)
return dev_err_probe(priv->dev, ret,
@@ -373,12 +382,28 @@ static int cs42l43_spi_probe(struct platform_device *pdev)
return dev_err_probe(priv->dev, ret,
"Failed to register SPI controller\n");
- if (has_sidecar) {
- if (!spi_new_device(priv->ctlr, &ampl_info))
+ if (nsidecars) {
+ struct spi_board_info *ampl_info;
+ struct spi_board_info *ampr_info;
+ int spkid = -EINVAL;
+
+ fwnode_property_read_u32(xu_fwnode, "01fa-spk-id-val", &spkid);
+
+ dev_dbg(priv->dev, "Found speaker ID %d\n", spkid);
+
+ ampl_info = cs42l43_create_bridge_amp(priv, "cs35l56-left", 0, spkid);
+ if (!ampl_info)
+ return -ENOMEM;
+
+ ampr_info = cs42l43_create_bridge_amp(priv, "cs35l56-right", 1, spkid);
+ if (!ampr_info)
+ return -ENOMEM;
+
+ if (!spi_new_device(priv->ctlr, ampl_info))
return dev_err_probe(priv->dev, -ENODEV,
"Failed to create left amp slave\n");
- if (!spi_new_device(priv->ctlr, &ampr_info))
+ if (!spi_new_device(priv->ctlr, ampr_info))
return dev_err_probe(priv->dev, -ENODEV,
"Failed to create right amp slave\n");
}
diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
index be3998104bfb..f7e8b5efa50e 100644
--- a/drivers/spi/spi-davinci.c
+++ b/drivers/spi/spi-davinci.c
@@ -984,6 +984,9 @@ static int davinci_spi_probe(struct platform_device *pdev)
return ret;
free_dma:
+ /* This bit needs to be cleared to disable dpsi->clk */
+ clear_io_bits(dspi->base + SPIGCR1, SPIGCR1_POWERDOWN_MASK);
+
if (dspi->dma_rx) {
dma_release_channel(dspi->dma_rx);
dma_release_channel(dspi->dma_tx);
@@ -1013,6 +1016,9 @@ static void davinci_spi_remove(struct platform_device *pdev)
spi_bitbang_stop(&dspi->bitbang);
+ /* This bit needs to be cleared to disable dpsi->clk */
+ clear_io_bits(dspi->base + SPIGCR1, SPIGCR1_POWERDOWN_MASK);
+
if (dspi->dma_rx) {
dma_release_channel(dspi->dma_rx);
dma_release_channel(dspi->dma_tx);
diff --git a/drivers/spi/spi-dw-bt1.c b/drivers/spi/spi-dw-bt1.c
index 5391bcac305c..4577e8096cd9 100644
--- a/drivers/spi/spi-dw-bt1.c
+++ b/drivers/spi/spi-dw-bt1.c
@@ -55,13 +55,15 @@ static int dw_spi_bt1_dirmap_create(struct spi_mem_dirmap_desc *desc)
!dwsbt1->dws.mem_ops.supports_op(desc->mem, &desc->info.op_tmpl))
return -EOPNOTSUPP;
+ if (desc->info.op_tmpl.data.dir != SPI_MEM_DATA_IN)
+ return -EOPNOTSUPP;
+
/*
* Make sure the requested region doesn't go out of the physically
- * mapped flash memory bounds and the operation is read-only.
+ * mapped flash memory bounds.
*/
- if (desc->info.offset + desc->info.length > dwsbt1->map_len ||
- desc->info.op_tmpl.data.dir != SPI_MEM_DATA_IN)
- return -EOPNOTSUPP;
+ if (desc->info.offset + desc->info.length > dwsbt1->map_len)
+ return -EINVAL;
return 0;
}
diff --git a/drivers/spi/spi-dw-core.c b/drivers/spi/spi-dw-core.c
index ddfdb903047a..431788dd848c 100644
--- a/drivers/spi/spi-dw-core.c
+++ b/drivers/spi/spi-dw-core.c
@@ -19,6 +19,7 @@
#include <linux/string.h>
#include <linux/of.h>
+#include "internals.h"
#include "spi-dw.h"
#ifdef CONFIG_DEBUG_FS
@@ -438,8 +439,7 @@ static int dw_spi_transfer_one(struct spi_controller *host,
transfer->effective_speed_hz = dws->current_freq;
/* Check if current transfer is a DMA transaction */
- if (host->can_dma && host->can_dma(host, spi, transfer))
- dws->dma_mapped = host->cur_msg_mapped;
+ dws->dma_mapped = spi_xfer_is_dma_mapped(host, spi, transfer);
/* For poll mode just disable all interrupts */
dw_spi_mask_intr(dws, 0xff);
diff --git a/drivers/spi/spi-fsl-cpm.c b/drivers/spi/spi-fsl-cpm.c
index e335132080bf..23ad1249f121 100644
--- a/drivers/spi/spi-fsl-cpm.c
+++ b/drivers/spi/spi-fsl-cpm.c
@@ -415,4 +415,5 @@ void fsl_spi_cpm_free(struct mpc8xxx_spi *mspi)
}
EXPORT_SYMBOL_GPL(fsl_spi_cpm_free);
+MODULE_DESCRIPTION("Freescale SPI controller driver CPM functions");
MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index 0a2730cd07c6..191de1917f83 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -1006,6 +1006,7 @@ static int dspi_setup(struct spi_device *spi)
struct chip_data *chip;
unsigned long clkrate;
bool cs = true;
+ int val;
/* Only alloc on first setup */
chip = spi_get_ctldata(spi);
@@ -1018,11 +1019,19 @@ static int dspi_setup(struct spi_device *spi)
pdata = dev_get_platdata(&dspi->pdev->dev);
if (!pdata) {
- of_property_read_u32(spi->dev.of_node, "fsl,spi-cs-sck-delay",
- &cs_sck_delay);
-
- of_property_read_u32(spi->dev.of_node, "fsl,spi-sck-cs-delay",
- &sck_cs_delay);
+ val = spi_delay_to_ns(&spi->cs_setup, NULL);
+ cs_sck_delay = val >= 0 ? val : 0;
+ if (!cs_sck_delay)
+ of_property_read_u32(spi->dev.of_node,
+ "fsl,spi-cs-sck-delay",
+ &cs_sck_delay);
+
+ val = spi_delay_to_ns(&spi->cs_hold, NULL);
+ sck_cs_delay = val >= 0 ? val : 0;
+ if (!sck_cs_delay)
+ of_property_read_u32(spi->dev.of_node,
+ "fsl,spi-sck-cs-delay",
+ &sck_cs_delay);
} else {
cs_sck_delay = pdata->cs_sck_delay;
sck_cs_delay = pdata->sck_cs_delay;
diff --git a/drivers/spi/spi-fsl-lib.c b/drivers/spi/spi-fsl-lib.c
index 4fc2c56555b5..bb7a625db5b0 100644
--- a/drivers/spi/spi-fsl-lib.c
+++ b/drivers/spi/spi-fsl-lib.c
@@ -158,4 +158,5 @@ int of_mpc8xxx_spi_probe(struct platform_device *ofdev)
}
EXPORT_SYMBOL_GPL(of_mpc8xxx_spi_probe);
+MODULE_DESCRIPTION("Freescale SPI/eSPI controller driver library");
MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c
index aa5ed254be46..32baa14dfd83 100644
--- a/drivers/spi/spi-fsl-lpspi.c
+++ b/drivers/spi/spi-fsl-lpspi.c
@@ -960,13 +960,13 @@ static void fsl_lpspi_remove(struct platform_device *pdev)
pm_runtime_disable(fsl_lpspi->dev);
}
-static int __maybe_unused fsl_lpspi_suspend(struct device *dev)
+static int fsl_lpspi_suspend(struct device *dev)
{
pinctrl_pm_select_sleep_state(dev);
return pm_runtime_force_suspend(dev);
}
-static int __maybe_unused fsl_lpspi_resume(struct device *dev)
+static int fsl_lpspi_resume(struct device *dev)
{
int ret;
@@ -984,14 +984,14 @@ static int __maybe_unused fsl_lpspi_resume(struct device *dev)
static const struct dev_pm_ops fsl_lpspi_pm_ops = {
SET_RUNTIME_PM_OPS(fsl_lpspi_runtime_suspend,
fsl_lpspi_runtime_resume, NULL)
- SET_SYSTEM_SLEEP_PM_OPS(fsl_lpspi_suspend, fsl_lpspi_resume)
+ SYSTEM_SLEEP_PM_OPS(fsl_lpspi_suspend, fsl_lpspi_resume)
};
static struct platform_driver fsl_lpspi_driver = {
.driver = {
.name = DRIVER_NAME,
.of_match_table = fsl_lpspi_dt_ids,
- .pm = &fsl_lpspi_pm_ops,
+ .pm = pm_ptr(&fsl_lpspi_pm_ops),
},
.probe = fsl_lpspi_probe,
.remove_new = fsl_lpspi_remove,
diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c
index 909cce109bba..36c587be9e28 100644
--- a/drivers/spi/spi-gpio.c
+++ b/drivers/spi/spi-gpio.c
@@ -5,17 +5,17 @@
* Copyright (C) 2006,2008 David Brownell
* Copyright (C) 2017 Linus Walleij
*/
+#include <linux/gpio/consumer.h>
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/gpio/consumer.h>
-#include <linux/of.h>
+#include <linux/property.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi_bitbang.h>
#include <linux/spi/spi_gpio.h>
-
/*
* This bitbanging SPI host driver should help make systems usable
* when a native hardware SPI engine is not available, perhaps because
@@ -239,8 +239,8 @@ static void spi_gpio_chipselect(struct spi_device *spi, int is_active)
static int spi_gpio_setup(struct spi_device *spi)
{
struct gpio_desc *cs;
- int status = 0;
struct spi_gpio *spi_gpio = spi_to_spi_gpio(spi);
+ int ret;
/*
* The CS GPIOs have already been
@@ -248,15 +248,14 @@ static int spi_gpio_setup(struct spi_device *spi)
*/
if (spi_gpio->cs_gpios) {
cs = spi_gpio->cs_gpios[spi_get_chipselect(spi, 0)];
- if (!spi->controller_state && cs)
- status = gpiod_direction_output(cs,
- !(spi->mode & SPI_CS_HIGH));
+ if (!spi->controller_state && cs) {
+ ret = gpiod_direction_output(cs, !(spi->mode & SPI_CS_HIGH));
+ if (ret)
+ return ret;
+ }
}
- if (!status)
- status = spi_bitbang_setup(spi);
-
- return status;
+ return spi_bitbang_setup(spi);
}
static int spi_gpio_set_direction(struct spi_device *spi, bool output)
@@ -326,29 +325,6 @@ static int spi_gpio_request(struct device *dev, struct spi_gpio *spi_gpio)
return PTR_ERR_OR_ZERO(spi_gpio->sck);
}
-#ifdef CONFIG_OF
-static const struct of_device_id spi_gpio_dt_ids[] = {
- { .compatible = "spi-gpio" },
- {}
-};
-MODULE_DEVICE_TABLE(of, spi_gpio_dt_ids);
-
-static int spi_gpio_probe_dt(struct platform_device *pdev,
- struct spi_controller *host)
-{
- host->dev.of_node = pdev->dev.of_node;
- host->use_gpio_descriptors = true;
-
- return 0;
-}
-#else
-static inline int spi_gpio_probe_dt(struct platform_device *pdev,
- struct spi_controller *host)
-{
- return 0;
-}
-#endif
-
static int spi_gpio_probe_pdata(struct platform_device *pdev,
struct spi_controller *host)
{
@@ -389,19 +365,21 @@ static int spi_gpio_probe(struct platform_device *pdev)
struct spi_controller *host;
struct spi_gpio *spi_gpio;
struct device *dev = &pdev->dev;
+ struct fwnode_handle *fwnode = dev_fwnode(dev);
struct spi_bitbang *bb;
host = devm_spi_alloc_host(dev, sizeof(*spi_gpio));
if (!host)
return -ENOMEM;
- if (pdev->dev.of_node)
- status = spi_gpio_probe_dt(pdev, host);
- else
+ if (fwnode) {
+ device_set_node(&host->dev, fwnode);
+ host->use_gpio_descriptors = true;
+ } else {
status = spi_gpio_probe_pdata(pdev, host);
-
- if (status)
- return status;
+ if (status)
+ return status;
+ }
spi_gpio = spi_controller_get_devdata(host);
@@ -459,10 +437,16 @@ static int spi_gpio_probe(struct platform_device *pdev)
MODULE_ALIAS("platform:" DRIVER_NAME);
+static const struct of_device_id spi_gpio_dt_ids[] = {
+ { .compatible = "spi-gpio" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, spi_gpio_dt_ids);
+
static struct platform_driver spi_gpio_driver = {
.driver = {
.name = DRIVER_NAME,
- .of_match_table = of_match_ptr(spi_gpio_dt_ids),
+ .of_match_table = spi_gpio_dt_ids,
},
.probe = spi_gpio_probe,
};
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index f4006c82f867..85bd1a82a34e 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -660,18 +660,8 @@ static int mx51_ecspi_prepare_transfer(struct spi_imx_data *spi_imx,
ctrl |= (spi_imx->target_burst * 8 - 1)
<< MX51_ECSPI_CTRL_BL_OFFSET;
else {
- if (spi_imx->usedma) {
- ctrl |= (spi_imx->bits_per_word - 1)
- << MX51_ECSPI_CTRL_BL_OFFSET;
- } else {
- if (spi_imx->count >= MX51_ECSPI_CTRL_MAX_BURST)
- ctrl |= (MX51_ECSPI_CTRL_MAX_BURST * BITS_PER_BYTE - 1)
- << MX51_ECSPI_CTRL_BL_OFFSET;
- else
- ctrl |= (spi_imx->count / DIV_ROUND_UP(spi_imx->bits_per_word,
- BITS_PER_BYTE) * spi_imx->bits_per_word - 1)
- << MX51_ECSPI_CTRL_BL_OFFSET;
- }
+ ctrl |= (spi_imx->bits_per_word - 1)
+ << MX51_ECSPI_CTRL_BL_OFFSET;
}
/* set clock speed */
@@ -1060,7 +1050,7 @@ static struct spi_imx_devtype_data imx35_cspi_devtype_data = {
.rx_available = mx31_rx_available,
.reset = mx31_reset,
.fifo_size = 8,
- .has_dmamode = true,
+ .has_dmamode = false,
.dynamic_burst = false,
.has_targetmode = false,
.devtype = IMX35_CSPI,
@@ -1666,10 +1656,6 @@ static int spi_imx_setup(struct spi_device *spi)
return 0;
}
-static void spi_imx_cleanup(struct spi_device *spi)
-{
-}
-
static int
spi_imx_prepare_message(struct spi_controller *controller, struct spi_message *msg)
{
@@ -1766,7 +1752,6 @@ static int spi_imx_probe(struct platform_device *pdev)
controller->transfer_one = spi_imx_transfer_one;
controller->setup = spi_imx_setup;
- controller->cleanup = spi_imx_cleanup;
controller->prepare_message = spi_imx_prepare_message;
controller->unprepare_message = spi_imx_unprepare_message;
controller->target_abort = spi_imx_target_abort;
@@ -1913,7 +1898,7 @@ static void spi_imx_remove(struct platform_device *pdev)
spi_imx_sdma_exit(spi_imx);
}
-static int __maybe_unused spi_imx_runtime_resume(struct device *dev)
+static int spi_imx_runtime_resume(struct device *dev)
{
struct spi_controller *controller = dev_get_drvdata(dev);
struct spi_imx_data *spi_imx;
@@ -1934,7 +1919,7 @@ static int __maybe_unused spi_imx_runtime_resume(struct device *dev)
return 0;
}
-static int __maybe_unused spi_imx_runtime_suspend(struct device *dev)
+static int spi_imx_runtime_suspend(struct device *dev)
{
struct spi_controller *controller = dev_get_drvdata(dev);
struct spi_imx_data *spi_imx;
@@ -1947,29 +1932,28 @@ static int __maybe_unused spi_imx_runtime_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused spi_imx_suspend(struct device *dev)
+static int spi_imx_suspend(struct device *dev)
{
pinctrl_pm_select_sleep_state(dev);
return 0;
}
-static int __maybe_unused spi_imx_resume(struct device *dev)
+static int spi_imx_resume(struct device *dev)
{
pinctrl_pm_select_default_state(dev);
return 0;
}
static const struct dev_pm_ops imx_spi_pm = {
- SET_RUNTIME_PM_OPS(spi_imx_runtime_suspend,
- spi_imx_runtime_resume, NULL)
- SET_SYSTEM_SLEEP_PM_OPS(spi_imx_suspend, spi_imx_resume)
+ RUNTIME_PM_OPS(spi_imx_runtime_suspend, spi_imx_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(spi_imx_suspend, spi_imx_resume)
};
static struct platform_driver spi_imx_driver = {
.driver = {
.name = DRIVER_NAME,
.of_match_table = spi_imx_dt_ids,
- .pm = &imx_spi_pm,
+ .pm = pm_ptr(&imx_spi_pm),
},
.probe = spi_imx_probe,
.remove_new = spi_imx_remove,
diff --git a/drivers/spi/spi-ingenic.c b/drivers/spi/spi-ingenic.c
index 003a6d21c4c3..318b0768701e 100644
--- a/drivers/spi/spi-ingenic.c
+++ b/drivers/spi/spi-ingenic.c
@@ -16,6 +16,7 @@
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/spi/spi.h>
+#include "internals.h"
#define REG_SSIDR 0x0
#define REG_SSICR0 0x4
@@ -242,11 +243,10 @@ static int spi_ingenic_transfer_one(struct spi_controller *ctlr,
{
struct ingenic_spi *priv = spi_controller_get_devdata(ctlr);
unsigned int bits = xfer->bits_per_word ?: spi->bits_per_word;
- bool can_dma = ctlr->can_dma && ctlr->can_dma(ctlr, spi, xfer);
spi_ingenic_prepare_transfer(priv, spi, xfer);
- if (ctlr->cur_msg_mapped && can_dma)
+ if (spi_xfer_is_dma_mapped(ctlr, spi, xfer))
return spi_ingenic_dma_tx(ctlr, xfer, bits);
if (bits > 16)
diff --git a/drivers/spi/spi-meson-spicc.c b/drivers/spi/spi-meson-spicc.c
index fc75492e50ff..8838a98b04c2 100644
--- a/drivers/spi/spi-meson-spicc.c
+++ b/drivers/spi/spi-meson-spicc.c
@@ -514,7 +514,9 @@ static int meson_spicc_prepare_message(struct spi_controller *host,
/* Setup no wait cycles by default */
writel_relaxed(0, spicc->base + SPICC_PERIODREG);
- writel_bits_relaxed(SPICC_LBC_W1, 0, spicc->base + SPICC_TESTREG);
+ writel_bits_relaxed(SPICC_LBC_W1,
+ spi->mode & SPI_LOOP ? SPICC_LBC_W1 : 0,
+ spicc->base + SPICC_TESTREG);
return 0;
}
@@ -644,11 +646,13 @@ static int meson_spicc_pow2_clk_init(struct meson_spicc_device *spicc)
snprintf(name, sizeof(name), "%s#pow2_fixed_div", dev_name(dev));
init.name = name;
init.ops = &clk_fixed_factor_ops;
- init.flags = 0;
- if (spicc->data->has_pclk)
+ if (spicc->data->has_pclk) {
+ init.flags = CLK_SET_RATE_PARENT;
parent_data[0].hw = __clk_get_hw(spicc->pclk);
- else
+ } else {
+ init.flags = 0;
parent_data[0].hw = __clk_get_hw(spicc->core);
+ }
init.num_parents = 1;
pow2_fixed_div->mult = 1,
@@ -708,11 +712,13 @@ static int meson_spicc_enh_clk_init(struct meson_spicc_device *spicc)
snprintf(name, sizeof(name), "%s#enh_fixed_div", dev_name(dev));
init.name = name;
init.ops = &clk_fixed_factor_ops;
- init.flags = 0;
- if (spicc->data->has_pclk)
+ if (spicc->data->has_pclk) {
+ init.flags = CLK_SET_RATE_PARENT;
parent_data[0].hw = __clk_get_hw(spicc->pclk);
- else
+ } else {
+ init.flags = 0;
parent_data[0].hw = __clk_get_hw(spicc->core);
+ }
init.num_parents = 1;
enh_fixed_div->mult = 1,
@@ -846,7 +852,7 @@ static int meson_spicc_probe(struct platform_device *pdev)
host->num_chipselect = 4;
host->dev.of_node = pdev->dev.of_node;
- host->mode_bits = SPI_CPHA | SPI_CPOL | SPI_CS_HIGH;
+ host->mode_bits = SPI_CPHA | SPI_CPOL | SPI_CS_HIGH | SPI_LOOP;
host->bits_per_word_mask = SPI_BPW_MASK(32) |
SPI_BPW_MASK(24) |
SPI_BPW_MASK(16) |
diff --git a/drivers/spi/spi-microchip-core.c b/drivers/spi/spi-microchip-core.c
index 634364c7cfe6..6246254e1dff 100644
--- a/drivers/spi/spi-microchip-core.c
+++ b/drivers/spi/spi-microchip-core.c
@@ -21,7 +21,7 @@
#include <linux/spi/spi.h>
#define MAX_LEN (0xffff)
-#define MAX_CS (8)
+#define MAX_CS (1)
#define DEFAULT_FRAMESIZE (8)
#define FIFO_DEPTH (32)
#define CLK_GEN_MODE1_MAX (255)
@@ -258,6 +258,9 @@ static int mchp_corespi_setup(struct spi_device *spi)
struct mchp_corespi *corespi = spi_controller_get_devdata(spi->controller);
u32 reg;
+ if (spi_is_csgpiod(spi))
+ return 0;
+
/*
* Active high targets need to be specifically set to their inactive
* states during probe by adding them to the "control group" & thus
@@ -516,6 +519,7 @@ static int mchp_corespi_probe(struct platform_device *pdev)
host->num_chipselect = num_cs;
host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+ host->use_gpio_descriptors = true;
host->setup = mchp_corespi_setup;
host->bits_per_word_mask = SPI_BPW_MASK(8);
host->transfer_one = mchp_corespi_transfer_one;
diff --git a/drivers/spi/spi-mux.c b/drivers/spi/spi-mux.c
index 5d72e3d59df8..c02c4204442f 100644
--- a/drivers/spi/spi-mux.c
+++ b/drivers/spi/spi-mux.c
@@ -158,12 +158,14 @@ static int spi_mux_probe(struct spi_device *spi)
/* supported modes are the same as our parent's */
ctlr->mode_bits = spi->controller->mode_bits;
ctlr->flags = spi->controller->flags;
+ ctlr->bits_per_word_mask = spi->controller->bits_per_word_mask;
ctlr->transfer_one_message = spi_mux_transfer_one_message;
ctlr->setup = spi_mux_setup;
ctlr->num_chipselect = mux_control_states(priv->mux);
ctlr->bus_num = -1;
ctlr->dev.of_node = spi->dev.of_node;
ctlr->must_async = true;
+ ctlr->defer_optimize_message = true;
ret = devm_spi_register_controller(&spi->dev, ctlr);
if (ret)
diff --git a/drivers/spi/spi-mxic.c b/drivers/spi/spi-mxic.c
index 60c9f3048ac9..6156d691630a 100644
--- a/drivers/spi/spi-mxic.c
+++ b/drivers/spi/spi-mxic.c
@@ -496,7 +496,7 @@ static int mxic_spi_mem_dirmap_create(struct spi_mem_dirmap_desc *desc)
struct mxic_spi *mxic = spi_controller_get_devdata(desc->mem->spi->controller);
if (!mxic->linear.map)
- return -EINVAL;
+ return -EOPNOTSUPP;
if (desc->info.offset + desc->info.length > U32_MAX)
return -EINVAL;
diff --git a/drivers/spi/spi-omap-uwire.c b/drivers/spi/spi-omap-uwire.c
index 210a98d903fa..03b820e85651 100644
--- a/drivers/spi/spi-omap-uwire.c
+++ b/drivers/spi/spi-omap-uwire.c
@@ -541,5 +541,6 @@ static void __exit omap_uwire_exit(void)
subsys_initcall(omap_uwire_init);
module_exit(omap_uwire_exit);
+MODULE_DESCRIPTION("MicroWire interface driver for OMAP");
MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index 7e3083b83534..2c043817c66a 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -27,6 +27,8 @@
#include <linux/spi/spi.h>
+#include "internals.h"
+
#include <linux/platform_data/spi-omap2-mcspi.h>
#define OMAP2_MCSPI_MAX_FREQ 48000000
@@ -1208,8 +1210,7 @@ static int omap2_mcspi_transfer_one(struct spi_controller *ctlr,
unsigned count;
if ((mcspi_dma->dma_rx && mcspi_dma->dma_tx) &&
- ctlr->cur_msg_mapped &&
- ctlr->can_dma(ctlr, spi, t))
+ spi_xfer_is_dma_mapped(ctlr, spi, t))
omap2_mcspi_set_fifo(spi, t, 1);
omap2_mcspi_set_enable(spi, 1);
@@ -1220,8 +1221,7 @@ static int omap2_mcspi_transfer_one(struct spi_controller *ctlr,
+ OMAP2_MCSPI_TX0);
if ((mcspi_dma->dma_rx && mcspi_dma->dma_tx) &&
- ctlr->cur_msg_mapped &&
- ctlr->can_dma(ctlr, spi, t))
+ spi_xfer_is_dma_mapped(ctlr, spi, t))
count = omap2_mcspi_txrx_dma(spi, t);
else
count = omap2_mcspi_txrx_pio(spi, t);
@@ -1277,24 +1277,11 @@ static int omap2_mcspi_prepare_message(struct spi_controller *ctlr,
/*
* Check if this transfer contains only one word;
- * OR contains 1 to 4 words, with bits_per_word == 8 and no delay between each word
- * OR contains 1 to 2 words, with bits_per_word == 16 and no delay between each word
- *
- * If one of the two last case is true, this also change the bits_per_word of this
- * transfer to make it a bit faster.
- * It's not an issue to change the bits_per_word here even if the multi-mode is not
- * applicable for this message, the signal on the wire will be the same.
*/
if (bits_per_word < 8 && tr->len == 1) {
/* multi-mode is applicable, only one word (1..7 bits) */
- } else if (tr->word_delay.value == 0 && bits_per_word == 8 && tr->len <= 4) {
- /* multi-mode is applicable, only one "bigger" word (8,16,24,32 bits) */
- tr->bits_per_word = tr->len * bits_per_word;
- } else if (tr->word_delay.value == 0 && bits_per_word == 16 && tr->len <= 2) {
- /* multi-mode is applicable, only one "bigger" word (16,32 bits) */
- tr->bits_per_word = tr->len * bits_per_word / 2;
} else if (bits_per_word >= 8 && tr->len == bits_per_word / 8) {
- /* multi-mode is applicable, only one word (9..15,17..32 bits) */
+ /* multi-mode is applicable, only one word (8..32 bits) */
} else {
/* multi-mode is not applicable: more than one word in the transfer */
mcspi->use_multi_mode = false;
@@ -1671,4 +1658,5 @@ static struct platform_driver omap2_mcspi_driver = {
};
module_platform_driver(omap2_mcspi_driver);
+MODULE_DESCRIPTION("OMAP2 McSPI controller driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-pci1xxxx.c b/drivers/spi/spi-pci1xxxx.c
index cc18d320370f..fc98979eba48 100644
--- a/drivers/spi/spi-pci1xxxx.c
+++ b/drivers/spi/spi-pci1xxxx.c
@@ -6,6 +6,7 @@
#include <linux/bitfield.h>
+#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/iopoll.h>
#include <linux/irq.h>
@@ -15,7 +16,7 @@
#include <linux/pci.h>
#include <linux/spinlock.h>
#include <linux/spi/spi.h>
-#include <linux/delay.h>
+#include "internals.h"
#define DRV_NAME "spi-pci1xxxx"
@@ -567,7 +568,7 @@ error:
static int pci1xxxx_spi_transfer_one(struct spi_controller *spi_ctlr,
struct spi_device *spi, struct spi_transfer *xfer)
{
- if (spi_ctlr->can_dma(spi_ctlr, spi, xfer) && spi_ctlr->cur_msg_mapped)
+ if (spi_xfer_is_dma_mapped(spi_ctlr, spi, xfer))
return pci1xxxx_spi_transfer_with_dma(spi_ctlr, spi, xfer);
else
return pci1xxxx_spi_transfer_with_io(spi_ctlr, spi, xfer);
diff --git a/drivers/spi/spi-pxa2xx-pci.c b/drivers/spi/spi-pxa2xx-pci.c
index 6d2efdb0e95f..616d032f1a89 100644
--- a/drivers/spi/spi-pxa2xx-pci.c
+++ b/drivers/spi/spi-pxa2xx-pci.c
@@ -10,8 +10,7 @@
#include <linux/err.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/platform_device.h>
-#include <linux/property.h>
+#include <linux/pm.h>
#include <linux/sprintf.h>
#include <linux/string.h>
#include <linux/types.h>
@@ -265,10 +264,8 @@ static int pxa2xx_spi_pci_probe(struct pci_dev *dev,
const struct pci_device_id *ent)
{
const struct pxa_spi_info *info;
- struct platform_device_info pi;
int ret;
- struct platform_device *pdev;
- struct pxa2xx_spi_controller spi_pdata;
+ struct pxa2xx_spi_controller *pdata;
struct ssp_device *ssp;
ret = pcim_enable_device(dev);
@@ -279,15 +276,17 @@ static int pxa2xx_spi_pci_probe(struct pci_dev *dev,
if (ret)
return ret;
- memset(&spi_pdata, 0, sizeof(spi_pdata));
+ pdata = devm_kzalloc(&dev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
- ssp = &spi_pdata.ssp;
+ ssp = &pdata->ssp;
ssp->dev = &dev->dev;
ssp->phys_base = pci_resource_start(dev, 0);
ssp->mmio_base = pcim_iomap_table(dev)[0];
info = (struct pxa_spi_info *)ent->driver_data;
- ret = info->setup(dev, &spi_pdata);
+ ret = info->setup(dev, pdata);
if (ret)
return ret;
@@ -298,28 +297,12 @@ static int pxa2xx_spi_pci_probe(struct pci_dev *dev,
return ret;
ssp->irq = pci_irq_vector(dev, 0);
- memset(&pi, 0, sizeof(pi));
- pi.fwnode = dev_fwnode(&dev->dev);
- pi.parent = &dev->dev;
- pi.name = "pxa2xx-spi";
- pi.id = ssp->port_id;
- pi.data = &spi_pdata;
- pi.size_data = sizeof(spi_pdata);
-
- pdev = platform_device_register_full(&pi);
- if (IS_ERR(pdev))
- return PTR_ERR(pdev);
-
- pci_set_drvdata(dev, pdev);
-
- return 0;
+ return pxa2xx_spi_probe(&dev->dev, ssp);
}
static void pxa2xx_spi_pci_remove(struct pci_dev *dev)
{
- struct platform_device *pdev = pci_get_drvdata(dev);
-
- platform_device_unregister(pdev);
+ pxa2xx_spi_remove(&dev->dev);
}
static const struct pci_device_id pxa2xx_spi_pci_devices[] = {
@@ -341,6 +324,9 @@ MODULE_DEVICE_TABLE(pci, pxa2xx_spi_pci_devices);
static struct pci_driver pxa2xx_spi_pci_driver = {
.name = "pxa2xx_spi_pci",
.id_table = pxa2xx_spi_pci_devices,
+ .driver = {
+ .pm = pm_ptr(&pxa2xx_spi_pm_ops),
+ },
.probe = pxa2xx_spi_pci_probe,
.remove = pxa2xx_spi_pci_remove,
};
@@ -349,4 +335,5 @@ module_pci_driver(pxa2xx_spi_pci_driver);
MODULE_DESCRIPTION("CE4100/LPSS PCI-SPI glue code for PXA's driver");
MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS(SPI_PXA2xx);
MODULE_AUTHOR("Sebastian Andrzej Siewior <bigeasy@linutronix.de>");
diff --git a/drivers/spi/spi-pxa2xx-platform.c b/drivers/spi/spi-pxa2xx-platform.c
new file mode 100644
index 000000000000..98a8ceb7db6f
--- /dev/null
+++ b/drivers/spi/spi-pxa2xx-platform.c
@@ -0,0 +1,214 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/acpi.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/types.h>
+
+#include "spi-pxa2xx.h"
+
+static bool pxa2xx_spi_idma_filter(struct dma_chan *chan, void *param)
+{
+ return param == chan->device->dev;
+}
+
+static int
+pxa2xx_spi_init_ssp(struct platform_device *pdev, struct ssp_device *ssp, enum pxa_ssp_type type)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ int status;
+ u64 uid;
+
+ ssp->mmio_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(ssp->mmio_base))
+ return PTR_ERR(ssp->mmio_base);
+
+ ssp->phys_base = res->start;
+
+ ssp->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(ssp->clk))
+ return PTR_ERR(ssp->clk);
+
+ ssp->irq = platform_get_irq(pdev, 0);
+ if (ssp->irq < 0)
+ return ssp->irq;
+
+ ssp->type = type;
+ ssp->dev = dev;
+
+ status = acpi_dev_uid_to_integer(ACPI_COMPANION(dev), &uid);
+ if (status)
+ ssp->port_id = -1;
+ else
+ ssp->port_id = uid;
+
+ return 0;
+}
+
+static void pxa2xx_spi_ssp_release(void *ssp)
+{
+ pxa_ssp_free(ssp);
+}
+
+static struct ssp_device *pxa2xx_spi_ssp_request(struct platform_device *pdev)
+{
+ struct ssp_device *ssp;
+ int status;
+
+ ssp = pxa_ssp_request(pdev->id, pdev->name);
+ if (!ssp)
+ return ssp;
+
+ status = devm_add_action_or_reset(&pdev->dev, pxa2xx_spi_ssp_release, ssp);
+ if (status)
+ return ERR_PTR(status);
+
+ return ssp;
+}
+
+static struct pxa2xx_spi_controller *
+pxa2xx_spi_init_pdata(struct platform_device *pdev)
+{
+ struct pxa2xx_spi_controller *pdata;
+ struct device *dev = &pdev->dev;
+ struct device *parent = dev->parent;
+ const void *match = device_get_match_data(dev);
+ enum pxa_ssp_type type = SSP_UNDEFINED;
+ struct ssp_device *ssp;
+ bool is_lpss_priv;
+ u32 num_cs = 1;
+ int status;
+
+ ssp = pxa2xx_spi_ssp_request(pdev);
+ if (IS_ERR(ssp))
+ return ERR_CAST(ssp);
+ if (ssp) {
+ type = ssp->type;
+ } else if (match) {
+ type = (enum pxa_ssp_type)(uintptr_t)match;
+ } else {
+ u32 value;
+
+ status = device_property_read_u32(dev, "intel,spi-pxa2xx-type", &value);
+ if (status)
+ return ERR_PTR(status);
+
+ type = (enum pxa_ssp_type)value;
+ }
+
+ /* Validate the SSP type correctness */
+ if (!(type > SSP_UNDEFINED && type < SSP_MAX))
+ return ERR_PTR(-EINVAL);
+
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return ERR_PTR(-ENOMEM);
+
+ /* Platforms with iDMA 64-bit */
+ is_lpss_priv = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lpss_priv");
+ if (is_lpss_priv) {
+ pdata->tx_param = parent;
+ pdata->rx_param = parent;
+ pdata->dma_filter = pxa2xx_spi_idma_filter;
+ }
+
+ /* Read number of chip select pins, if provided */
+ device_property_read_u32(dev, "num-cs", &num_cs);
+
+ pdata->num_chipselect = num_cs;
+ pdata->is_target = device_property_read_bool(dev, "spi-slave");
+ pdata->enable_dma = true;
+ pdata->dma_burst_size = 1;
+
+ /* If SSP has been already enumerated, use it */
+ if (ssp)
+ return pdata;
+
+ status = pxa2xx_spi_init_ssp(pdev, &pdata->ssp, type);
+ if (status)
+ return ERR_PTR(status);
+
+ return pdata;
+}
+
+static int pxa2xx_spi_platform_probe(struct platform_device *pdev)
+{
+ struct pxa2xx_spi_controller *platform_info;
+ struct device *dev = &pdev->dev;
+ struct ssp_device *ssp;
+
+ platform_info = dev_get_platdata(dev);
+ if (!platform_info) {
+ platform_info = pxa2xx_spi_init_pdata(pdev);
+ if (IS_ERR(platform_info))
+ return dev_err_probe(dev, PTR_ERR(platform_info), "missing platform data\n");
+
+ dev->platform_data = platform_info;
+ }
+
+ ssp = pxa2xx_spi_ssp_request(pdev);
+ if (IS_ERR(ssp))
+ return PTR_ERR(ssp);
+ if (!ssp)
+ ssp = &platform_info->ssp;
+
+ return pxa2xx_spi_probe(dev, ssp);
+}
+
+static void pxa2xx_spi_platform_remove(struct platform_device *pdev)
+{
+ pxa2xx_spi_remove(&pdev->dev);
+}
+
+static const struct acpi_device_id pxa2xx_spi_acpi_match[] = {
+ { "80860F0E" },
+ { "8086228E" },
+ { "INT33C0" },
+ { "INT33C1" },
+ { "INT3430" },
+ { "INT3431" },
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, pxa2xx_spi_acpi_match);
+
+static const struct of_device_id pxa2xx_spi_of_match[] = {
+ { .compatible = "marvell,mmp2-ssp", .data = (void *)MMP2_SSP },
+ {}
+};
+MODULE_DEVICE_TABLE(of, pxa2xx_spi_of_match);
+
+static struct platform_driver driver = {
+ .driver = {
+ .name = "pxa2xx-spi",
+ .pm = pm_ptr(&pxa2xx_spi_pm_ops),
+ .acpi_match_table = pxa2xx_spi_acpi_match,
+ .of_match_table = pxa2xx_spi_of_match,
+ },
+ .probe = pxa2xx_spi_platform_probe,
+ .remove_new = pxa2xx_spi_platform_remove,
+};
+
+static int __init pxa2xx_spi_init(void)
+{
+ return platform_driver_register(&driver);
+}
+subsys_initcall(pxa2xx_spi_init);
+
+static void __exit pxa2xx_spi_exit(void)
+{
+ platform_driver_unregister(&driver);
+}
+module_exit(pxa2xx_spi_exit);
+
+MODULE_AUTHOR("Stephen Street");
+MODULE_DESCRIPTION("PXA2xx SSP SPI Controller platform driver");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(SPI_PXA2xx);
+MODULE_ALIAS("platform:pxa2xx-spi");
+MODULE_SOFTDEP("pre: dw_dmac");
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index efe76d0c21bb..16b96eb176cd 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -4,7 +4,6 @@
* Copyright (C) 2013, 2021 Intel Corporation
*/
-#include <linux/acpi.h>
#include <linux/atomic.h>
#include <linux/bitops.h>
#include <linux/bug.h>
@@ -14,15 +13,12 @@
#include <linux/dmaengine.h>
#include <linux/err.h>
#include <linux/gpio/consumer.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/math64.h>
#include <linux/minmax.h>
-#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/property.h>
#include <linux/slab.h>
@@ -30,13 +26,9 @@
#include <linux/spi/spi.h>
+#include "internals.h"
#include "spi-pxa2xx.h"
-MODULE_AUTHOR("Stephen Street");
-MODULE_DESCRIPTION("PXA2xx SSP SPI Controller");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:pxa2xx-spi");
-
#define TIMOUT_DFLT 1000
/*
@@ -99,7 +91,6 @@ struct lpss_config {
/* Chip select control */
unsigned cs_sel_shift;
unsigned cs_sel_mask;
- unsigned cs_num;
/* Quirks */
unsigned cs_clk_stays_gated : 1;
};
@@ -137,7 +128,6 @@ static const struct lpss_config lpss_platforms[] = {
.tx_threshold_hi = 224,
.cs_sel_shift = 2,
.cs_sel_mask = 1 << 2,
- .cs_num = 2,
},
{ /* LPSS_SPT_SSP */
.offset = 0x200,
@@ -1004,11 +994,8 @@ static int pxa2xx_spi_transfer_one(struct spi_controller *controller,
}
dma_thresh = SSCR1_RxTresh(RX_THRESH_DFLT) | SSCR1_TxTresh(TX_THRESH_DFLT);
- dma_mapped = controller->can_dma &&
- controller->can_dma(controller, spi, transfer) &&
- controller->cur_msg_mapped;
+ dma_mapped = spi_xfer_is_dma_mapped(controller, spi, transfer);
if (dma_mapped) {
-
/* Ensure we have the correct interrupt handler */
drv_data->transfer_handler = pxa2xx_spi_dma_transfer;
@@ -1265,135 +1252,24 @@ static void cleanup(struct spi_device *spi)
kfree(chip);
}
-static bool pxa2xx_spi_idma_filter(struct dma_chan *chan, void *param)
-{
- return param == chan->device->dev;
-}
-
-static int
-pxa2xx_spi_init_ssp(struct platform_device *pdev, struct ssp_device *ssp, enum pxa_ssp_type type)
-{
- struct device *dev = &pdev->dev;
- struct resource *res;
- int status;
- u64 uid;
-
- ssp->mmio_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
- if (IS_ERR(ssp->mmio_base))
- return PTR_ERR(ssp->mmio_base);
-
- ssp->phys_base = res->start;
-
- ssp->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(ssp->clk))
- return PTR_ERR(ssp->clk);
-
- ssp->irq = platform_get_irq(pdev, 0);
- if (ssp->irq < 0)
- return ssp->irq;
-
- ssp->type = type;
- ssp->dev = dev;
-
- status = acpi_dev_uid_to_integer(ACPI_COMPANION(dev), &uid);
- if (status)
- ssp->port_id = -1;
- else
- ssp->port_id = uid;
-
- return 0;
-}
-
-static struct pxa2xx_spi_controller *
-pxa2xx_spi_init_pdata(struct platform_device *pdev)
-{
- struct pxa2xx_spi_controller *pdata;
- struct device *dev = &pdev->dev;
- struct device *parent = dev->parent;
- enum pxa_ssp_type type = SSP_UNDEFINED;
- struct ssp_device *ssp = NULL;
- const void *match;
- bool is_lpss_priv;
- u32 num_cs = 1;
- int status;
-
- is_lpss_priv = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lpss_priv");
-
- match = device_get_match_data(dev);
- if (match)
- type = (uintptr_t)match;
- else if (is_lpss_priv) {
- u32 value;
-
- status = device_property_read_u32(dev, "intel,spi-pxa2xx-type", &value);
- if (status)
- return ERR_PTR(status);
-
- type = (enum pxa_ssp_type)value;
- } else {
- ssp = pxa_ssp_request(pdev->id, pdev->name);
- if (ssp) {
- type = ssp->type;
- pxa_ssp_free(ssp);
- }
- }
-
- /* Validate the SSP type correctness */
- if (!(type > SSP_UNDEFINED && type < SSP_MAX))
- return ERR_PTR(-EINVAL);
-
- pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
- return ERR_PTR(-ENOMEM);
-
- /* Platforms with iDMA 64-bit */
- if (is_lpss_priv) {
- pdata->tx_param = parent;
- pdata->rx_param = parent;
- pdata->dma_filter = pxa2xx_spi_idma_filter;
- }
-
- /* Read number of chip select pins, if provided */
- device_property_read_u32(dev, "num-cs", &num_cs);
-
- pdata->num_chipselect = num_cs;
- pdata->is_target = device_property_read_bool(dev, "spi-slave");
- pdata->enable_dma = true;
- pdata->dma_burst_size = 1;
-
- /* If SSP has been already enumerated, use it */
- if (ssp)
- return pdata;
-
- status = pxa2xx_spi_init_ssp(pdev, &pdata->ssp, type);
- if (status)
- return ERR_PTR(status);
-
- return pdata;
-}
-
static int pxa2xx_spi_fw_translate_cs(struct spi_controller *controller,
unsigned int cs)
{
struct driver_data *drv_data = spi_controller_get_devdata(controller);
- if (has_acpi_companion(drv_data->ssp->dev)) {
- switch (drv_data->ssp_type) {
- /*
- * For Atoms the ACPI DeviceSelection used by the Windows
- * driver starts from 1 instead of 0 so translate it here
- * to match what Linux expects.
- */
- case LPSS_BYT_SSP:
- case LPSS_BSW_SSP:
- return cs - 1;
+ switch (drv_data->ssp_type) {
+ /*
+ * For some of Intel Atoms the ACPI DeviceSelection used by the Windows
+ * driver starts from 1 instead of 0 so translate it here to match what
+ * Linux expects.
+ */
+ case LPSS_BYT_SSP:
+ case LPSS_BSW_SSP:
+ return cs - 1;
- default:
- break;
- }
+ default:
+ return cs;
}
-
- return cs;
}
static size_t pxa2xx_spi_max_dma_transfer_size(struct spi_device *spi)
@@ -1401,41 +1277,23 @@ static size_t pxa2xx_spi_max_dma_transfer_size(struct spi_device *spi)
return MAX_DMA_LEN;
}
-static int pxa2xx_spi_probe(struct platform_device *pdev)
+int pxa2xx_spi_probe(struct device *dev, struct ssp_device *ssp)
{
- struct device *dev = &pdev->dev;
struct pxa2xx_spi_controller *platform_info;
struct spi_controller *controller;
struct driver_data *drv_data;
- struct ssp_device *ssp;
const struct lpss_config *config;
int status;
u32 tmp;
platform_info = dev_get_platdata(dev);
- if (!platform_info) {
- platform_info = pxa2xx_spi_init_pdata(pdev);
- if (IS_ERR(platform_info))
- return dev_err_probe(dev, PTR_ERR(platform_info), "missing platform data\n");
- }
- dev_dbg(dev, "DMA burst size set to %u\n", platform_info->dma_burst_size);
-
- ssp = pxa_ssp_request(pdev->id, pdev->name);
- if (!ssp)
- ssp = &platform_info->ssp;
-
- if (!ssp->mmio_base)
- return dev_err_probe(dev, -ENODEV, "failed to get SSP\n");
-
if (platform_info->is_target)
controller = devm_spi_alloc_target(dev, sizeof(*drv_data));
else
controller = devm_spi_alloc_host(dev, sizeof(*drv_data));
+ if (!controller)
+ return dev_err_probe(dev, -ENOMEM, "cannot alloc spi_controller\n");
- if (!controller) {
- status = dev_err_probe(dev, -ENOMEM, "cannot alloc spi_controller\n");
- goto out_error_controller_alloc;
- }
drv_data = spi_controller_get_devdata(controller);
drv_data->controller = controller;
drv_data->controller_info = platform_info;
@@ -1486,10 +1344,8 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
status = request_irq(ssp->irq, ssp_int, IRQF_SHARED, dev_name(dev),
drv_data);
- if (status < 0) {
- dev_err_probe(dev, status, "cannot get IRQ %d\n", ssp->irq);
- goto out_error_controller_alloc;
- }
+ if (status < 0)
+ return dev_err_probe(dev, status, "cannot get IRQ %d\n", ssp->irq);
/* Setup DMA if requested */
if (platform_info->enable_dma) {
@@ -1502,6 +1358,8 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
controller->max_dma_len = MAX_DMA_LEN;
controller->max_transfer_size =
pxa2xx_spi_max_dma_transfer_size;
+
+ dev_dbg(dev, "DMA burst size set to %u\n", platform_info->dma_burst_size);
}
}
@@ -1578,8 +1436,6 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
tmp &= LPSS_CAPS_CS_EN_MASK;
tmp >>= LPSS_CAPS_CS_EN_SHIFT;
platform_info->num_chipselect = ffz(tmp);
- } else if (config->cs_num) {
- platform_info->num_chipselect = config->cs_num;
}
}
controller->num_chipselect = platform_info->num_chipselect;
@@ -1594,13 +1450,13 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
}
}
- pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
- pm_runtime_use_autosuspend(&pdev->dev);
- pm_runtime_set_active(&pdev->dev);
- pm_runtime_enable(&pdev->dev);
+ pm_runtime_set_autosuspend_delay(dev, 50);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
/* Register with the SPI framework */
- platform_set_drvdata(pdev, drv_data);
+ dev_set_drvdata(dev, drv_data);
status = spi_register_controller(controller);
if (status) {
dev_err_probe(dev, status, "problem registering SPI controller\n");
@@ -1610,7 +1466,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
return status;
out_error_pm_runtime_enabled:
- pm_runtime_disable(&pdev->dev);
+ pm_runtime_disable(dev);
out_error_clock_enabled:
clk_disable_unprepare(ssp->clk);
@@ -1619,17 +1475,16 @@ out_error_dma_irq_alloc:
pxa2xx_spi_dma_release(drv_data);
free_irq(ssp->irq, drv_data);
-out_error_controller_alloc:
- pxa_ssp_free(ssp);
return status;
}
+EXPORT_SYMBOL_NS_GPL(pxa2xx_spi_probe, SPI_PXA2xx);
-static void pxa2xx_spi_remove(struct platform_device *pdev)
+void pxa2xx_spi_remove(struct device *dev)
{
- struct driver_data *drv_data = platform_get_drvdata(pdev);
+ struct driver_data *drv_data = dev_get_drvdata(dev);
struct ssp_device *ssp = drv_data->ssp;
- pm_runtime_get_sync(&pdev->dev);
+ pm_runtime_get_sync(dev);
spi_unregister_controller(drv_data->controller);
@@ -1641,15 +1496,13 @@ static void pxa2xx_spi_remove(struct platform_device *pdev)
if (drv_data->controller_info->enable_dma)
pxa2xx_spi_dma_release(drv_data);
- pm_runtime_put_noidle(&pdev->dev);
- pm_runtime_disable(&pdev->dev);
+ pm_runtime_put_noidle(dev);
+ pm_runtime_disable(dev);
/* Release IRQ */
free_irq(ssp->irq, drv_data);
-
- /* Release SSP */
- pxa_ssp_free(ssp);
}
+EXPORT_SYMBOL_NS_GPL(pxa2xx_spi_remove, SPI_PXA2xx);
static int pxa2xx_spi_suspend(struct device *dev)
{
@@ -1701,49 +1554,11 @@ static int pxa2xx_spi_runtime_resume(struct device *dev)
return clk_prepare_enable(drv_data->ssp->clk);
}
-static const struct dev_pm_ops pxa2xx_spi_pm_ops = {
+EXPORT_NS_GPL_DEV_PM_OPS(pxa2xx_spi_pm_ops, SPI_PXA2xx) = {
SYSTEM_SLEEP_PM_OPS(pxa2xx_spi_suspend, pxa2xx_spi_resume)
RUNTIME_PM_OPS(pxa2xx_spi_runtime_suspend, pxa2xx_spi_runtime_resume, NULL)
};
-static const struct acpi_device_id pxa2xx_spi_acpi_match[] = {
- { "80860F0E", LPSS_BYT_SSP },
- { "8086228E", LPSS_BSW_SSP },
- { "INT33C0", LPSS_LPT_SSP },
- { "INT33C1", LPSS_LPT_SSP },
- { "INT3430", LPSS_LPT_SSP },
- { "INT3431", LPSS_LPT_SSP },
- {}
-};
-MODULE_DEVICE_TABLE(acpi, pxa2xx_spi_acpi_match);
-
-static const struct of_device_id pxa2xx_spi_of_match[] = {
- { .compatible = "marvell,mmp2-ssp", .data = (void *)MMP2_SSP },
- {}
-};
-MODULE_DEVICE_TABLE(of, pxa2xx_spi_of_match);
-
-static struct platform_driver driver = {
- .driver = {
- .name = "pxa2xx-spi",
- .pm = pm_ptr(&pxa2xx_spi_pm_ops),
- .acpi_match_table = pxa2xx_spi_acpi_match,
- .of_match_table = pxa2xx_spi_of_match,
- },
- .probe = pxa2xx_spi_probe,
- .remove_new = pxa2xx_spi_remove,
-};
-
-static int __init pxa2xx_spi_init(void)
-{
- return platform_driver_register(&driver);
-}
-subsys_initcall(pxa2xx_spi_init);
-
-static void __exit pxa2xx_spi_exit(void)
-{
- platform_driver_unregister(&driver);
-}
-module_exit(pxa2xx_spi_exit);
-
-MODULE_SOFTDEP("pre: dw_dmac");
+MODULE_AUTHOR("Stephen Street");
+MODULE_DESCRIPTION("PXA2xx SSP SPI Controller core driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-pxa2xx.h b/drivers/spi/spi-pxa2xx.h
index 93e1e471e1c6..a470d3d634d3 100644
--- a/drivers/spi/spi-pxa2xx.h
+++ b/drivers/spi/spi-pxa2xx.h
@@ -14,6 +14,7 @@
#include <linux/pxa2xx_ssp.h>
+struct device;
struct gpio_desc;
/*
@@ -131,4 +132,9 @@ extern void pxa2xx_spi_dma_stop(struct driver_data *drv_data);
extern int pxa2xx_spi_dma_setup(struct driver_data *drv_data);
extern void pxa2xx_spi_dma_release(struct driver_data *drv_data);
+int pxa2xx_spi_probe(struct device *dev, struct ssp_device *ssp);
+void pxa2xx_spi_remove(struct device *dev);
+
+extern const struct dev_pm_ops pxa2xx_spi_pm_ops;
+
#endif /* SPI_PXA2XX_H */
diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c
index 2af63040ac6e..1a2f9cd92b3c 100644
--- a/drivers/spi/spi-qup.c
+++ b/drivers/spi/spi-qup.c
@@ -5,6 +5,8 @@
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
#include <linux/err.h>
#include <linux/interconnect.h>
#include <linux/interrupt.h>
@@ -16,8 +18,7 @@
#include <linux/pm_opp.h>
#include <linux/pm_runtime.h>
#include <linux/spi/spi.h>
-#include <linux/dmaengine.h>
-#include <linux/dma-mapping.h>
+#include "internals.h"
#define QUP_CONFIG 0x0000
#define QUP_STATE 0x0004
@@ -709,9 +710,7 @@ static int spi_qup_io_prep(struct spi_device *spi, struct spi_transfer *xfer)
if (controller->n_words <= (controller->in_fifo_sz / sizeof(u32)))
controller->mode = QUP_IO_M_MODE_FIFO;
- else if (spi->controller->can_dma &&
- spi->controller->can_dma(spi->controller, spi, xfer) &&
- spi->controller->cur_msg_mapped)
+ else if (spi_xfer_is_dma_mapped(spi->controller, spi, xfer))
controller->mode = QUP_IO_M_MODE_BAM;
else
controller->mode = QUP_IO_M_MODE_BLOCK;
@@ -1369,5 +1368,6 @@ static struct platform_driver spi_qup_driver = {
};
module_platform_driver(spi_qup_driver);
+MODULE_DESCRIPTION("Qualcomm SPI controller with QUP interface");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:spi_qup");
diff --git a/drivers/spi/spi-rpc-if.c b/drivers/spi/spi-rpc-if.c
index e11146932828..d3f07fd719bd 100644
--- a/drivers/spi/spi-rpc-if.c
+++ b/drivers/spi/spi-rpc-if.c
@@ -95,16 +95,16 @@ static int rpcif_spi_mem_dirmap_create(struct spi_mem_dirmap_desc *desc)
spi_controller_get_devdata(desc->mem->spi->controller);
if (desc->info.offset + desc->info.length > U32_MAX)
- return -ENOTSUPP;
+ return -EINVAL;
if (!rpcif_spi_mem_supports_op(desc->mem, &desc->info.op_tmpl))
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
- if (!rpc->dirmap && desc->info.op_tmpl.data.dir == SPI_MEM_DATA_IN)
- return -ENOTSUPP;
+ if (!rpc->dirmap)
+ return -EOPNOTSUPP;
- if (desc->info.op_tmpl.data.dir == SPI_MEM_DATA_OUT)
- return -ENOTSUPP;
+ if (desc->info.op_tmpl.data.dir != SPI_MEM_DATA_IN)
+ return -EOPNOTSUPP;
return 0;
}
diff --git a/drivers/spi/spi-stm32-qspi.c b/drivers/spi/spi-stm32-qspi.c
index f1e922fd362a..955c920c4b63 100644
--- a/drivers/spi/spi-stm32-qspi.c
+++ b/drivers/spi/spi-stm32-qspi.c
@@ -349,7 +349,7 @@ static int stm32_qspi_wait_poll_status(struct stm32_qspi *qspi)
static int stm32_qspi_get_mode(u8 buswidth)
{
- if (buswidth == 4)
+ if (buswidth >= 4)
return CCR_BUSWIDTH_4;
return buswidth;
@@ -653,9 +653,7 @@ static int stm32_qspi_setup(struct spi_device *spi)
return -EINVAL;
mode = spi->mode & (SPI_TX_OCTAL | SPI_RX_OCTAL);
- if ((mode == SPI_TX_OCTAL || mode == SPI_RX_OCTAL) ||
- ((mode == (SPI_TX_OCTAL | SPI_RX_OCTAL)) &&
- gpiod_count(qspi->dev, "cs") == -ENOENT)) {
+ if (mode && gpiod_count(qspi->dev, "cs") == -ENOENT) {
dev_err(qspi->dev, "spi-rx-bus-width\\/spi-tx-bus-width\\/cs-gpios\n");
dev_err(qspi->dev, "configuration not supported\n");
@@ -676,10 +674,10 @@ static int stm32_qspi_setup(struct spi_device *spi)
qspi->cr_reg = CR_APMS | 3 << CR_FTHRES_SHIFT | CR_SSHIFT | CR_EN;
/*
- * Dual flash mode is only enable in case SPI_TX_OCTAL and SPI_TX_OCTAL
- * are both set in spi->mode and "cs-gpios" properties is found in DT
+ * Dual flash mode is only enable in case SPI_TX_OCTAL or SPI_RX_OCTAL
+ * is set in spi->mode and "cs-gpios" properties is found in DT
*/
- if (mode == (SPI_TX_OCTAL | SPI_RX_OCTAL)) {
+ if (mode) {
qspi->cr_reg |= CR_DFM;
dev_dbg(qspi->dev, "Dual flash mode enable");
}
diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c
index 4a68abcdcc35..4c4ff074e3f6 100644
--- a/drivers/spi/spi-stm32.c
+++ b/drivers/spi/spi-stm32.c
@@ -1016,8 +1016,10 @@ end_irq:
static irqreturn_t stm32fx_spi_irq_thread(int irq, void *dev_id)
{
struct spi_controller *ctrl = dev_id;
+ struct stm32_spi *spi = spi_controller_get_devdata(ctrl);
spi_finalize_current_transfer(ctrl);
+ stm32fx_spi_disable(spi);
return IRQ_HANDLED;
}
@@ -1055,7 +1057,7 @@ static irqreturn_t stm32h7_spi_irq_thread(int irq, void *dev_id)
mask |= STM32H7_SPI_SR_TXP | STM32H7_SPI_SR_RXP;
if (!(sr & mask)) {
- dev_warn(spi->dev, "spurious IT (sr=0x%08x, ier=0x%08x)\n",
+ dev_vdbg(spi->dev, "spurious IT (sr=0x%08x, ier=0x%08x)\n",
sr, ier);
spin_unlock_irqrestore(&spi->lock, flags);
return IRQ_NONE;
@@ -1185,8 +1187,6 @@ static int stm32_spi_prepare_msg(struct spi_controller *ctrl,
~clrb) | setb,
spi->base + spi->cfg->regs->cpol.reg);
- stm32_spi_enable(spi);
-
spin_unlock_irqrestore(&spi->lock, flags);
return 0;
@@ -1204,6 +1204,7 @@ static void stm32fx_spi_dma_tx_cb(void *data)
if (spi->cur_comm == SPI_SIMPLEX_TX || spi->cur_comm == SPI_3WIRE_TX) {
spi_finalize_current_transfer(spi->ctrl);
+ stm32fx_spi_disable(spi);
}
}
@@ -1218,6 +1219,7 @@ static void stm32_spi_dma_rx_cb(void *data)
struct stm32_spi *spi = data;
spi_finalize_current_transfer(spi->ctrl);
+ spi->cfg->disable(spi);
}
/**
@@ -1305,6 +1307,8 @@ static int stm32fx_spi_transfer_one_irq(struct stm32_spi *spi)
stm32_spi_set_bits(spi, STM32FX_SPI_CR2, cr2);
+ stm32_spi_enable(spi);
+
/* starting data transfer when buffer is loaded */
if (spi->tx_buf)
spi->cfg->write_tx(spi);
@@ -1341,6 +1345,8 @@ static int stm32h7_spi_transfer_one_irq(struct stm32_spi *spi)
spin_lock_irqsave(&spi->lock, flags);
+ stm32_spi_enable(spi);
+
/* Be sure to have data in fifo before starting data transfer */
if (spi->tx_buf)
stm32h7_spi_write_txfifo(spi);
@@ -1372,6 +1378,8 @@ static void stm32fx_spi_transfer_one_dma_start(struct stm32_spi *spi)
*/
stm32_spi_set_bits(spi, STM32FX_SPI_CR2, STM32FX_SPI_CR2_ERRIE);
}
+
+ stm32_spi_enable(spi);
}
/**
@@ -1405,6 +1413,8 @@ static void stm32h7_spi_transfer_one_dma_start(struct stm32_spi *spi)
stm32_spi_set_bits(spi, STM32H7_SPI_IER, ier);
+ stm32_spi_enable(spi);
+
if (STM32_SPI_HOST_MODE(spi))
stm32_spi_set_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_CSTART);
}
diff --git a/drivers/spi/spi-wpcm-fiu.c b/drivers/spi/spi-wpcm-fiu.c
index 6b16a22cc3a4..886d6d7771d4 100644
--- a/drivers/spi/spi-wpcm-fiu.c
+++ b/drivers/spi/spi-wpcm-fiu.c
@@ -378,7 +378,7 @@ static int wpcm_fiu_dirmap_create(struct spi_mem_dirmap_desc *desc)
int cs = spi_get_chipselect(desc->mem->spi, 0);
if (desc->info.op_tmpl.data.dir != SPI_MEM_DATA_IN)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
/*
* Unfortunately, FIU only supports a 16 MiB direct mapping window (per
@@ -387,11 +387,11 @@ static int wpcm_fiu_dirmap_create(struct spi_mem_dirmap_desc *desc)
* flashes that are bigger than 16 MiB.
*/
if (desc->info.offset + desc->info.length > MAX_MEMORY_SIZE_PER_CS)
- return -ENOTSUPP;
+ return -EINVAL;
/* Don't read past the memory window */
if (cs * MAX_MEMORY_SIZE_PER_CS + desc->info.offset + desc->info.length > fiu->memory_size)
- return -ENOTSUPP;
+ return -EINVAL;
return 0;
}
diff --git a/drivers/spi/spi-xcomm.c b/drivers/spi/spi-xcomm.c
index 63354dd3110f..846f00e23b71 100644
--- a/drivers/spi/spi-xcomm.c
+++ b/drivers/spi/spi-xcomm.c
@@ -10,6 +10,7 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/i2c.h>
+#include <linux/gpio/driver.h>
#include <linux/spi/spi.h>
#include <asm/unaligned.h>
@@ -26,24 +27,63 @@
#define SPI_XCOMM_CMD_UPDATE_CONFIG 0x03
#define SPI_XCOMM_CMD_WRITE 0x04
+#define SPI_XCOMM_CMD_GPIO_SET 0x05
#define SPI_XCOMM_CLOCK 48000000
struct spi_xcomm {
struct i2c_client *i2c;
- uint16_t settings;
- uint16_t chipselect;
+ struct gpio_chip gc;
+
+ u16 settings;
+ u16 chipselect;
unsigned int current_speed;
- uint8_t buf[63];
+ u8 buf[63];
};
+static void spi_xcomm_gpio_set_value(struct gpio_chip *chip,
+ unsigned int offset, int val)
+{
+ struct spi_xcomm *spi_xcomm = gpiochip_get_data(chip);
+ unsigned char buf[2];
+
+ buf[0] = SPI_XCOMM_CMD_GPIO_SET;
+ buf[1] = !!val;
+
+ i2c_master_send(spi_xcomm->i2c, buf, 2);
+}
+
+static int spi_xcomm_gpio_get_direction(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ return GPIO_LINE_DIRECTION_OUT;
+}
+
+static int spi_xcomm_gpio_add(struct spi_xcomm *spi_xcomm)
+{
+ struct device *dev = &spi_xcomm->i2c->dev;
+
+ if (!IS_ENABLED(CONFIG_GPIOLIB))
+ return 0;
+
+ spi_xcomm->gc.get_direction = spi_xcomm_gpio_get_direction;
+ spi_xcomm->gc.set = spi_xcomm_gpio_set_value;
+ spi_xcomm->gc.can_sleep = 1;
+ spi_xcomm->gc.base = -1;
+ spi_xcomm->gc.ngpio = 1;
+ spi_xcomm->gc.label = spi_xcomm->i2c->name;
+ spi_xcomm->gc.owner = THIS_MODULE;
+
+ return devm_gpiochip_add_data(dev, &spi_xcomm->gc, spi_xcomm);
+}
+
static int spi_xcomm_sync_config(struct spi_xcomm *spi_xcomm, unsigned int len)
{
- uint16_t settings;
- uint8_t *buf = spi_xcomm->buf;
+ u16 settings;
+ u8 *buf = spi_xcomm->buf;
settings = spi_xcomm->settings;
settings |= len << SPI_XCOMM_SETTINGS_LEN_OFFSET;
@@ -56,10 +96,10 @@ static int spi_xcomm_sync_config(struct spi_xcomm *spi_xcomm, unsigned int len)
}
static void spi_xcomm_chipselect(struct spi_xcomm *spi_xcomm,
- struct spi_device *spi, int is_active)
+ struct spi_device *spi, int is_active)
{
unsigned long cs = spi_get_chipselect(spi, 0);
- uint16_t chipselect = spi_xcomm->chipselect;
+ u16 chipselect = spi_xcomm->chipselect;
if (is_active)
chipselect |= BIT(cs);
@@ -70,7 +110,8 @@ static void spi_xcomm_chipselect(struct spi_xcomm *spi_xcomm,
}
static int spi_xcomm_setup_transfer(struct spi_xcomm *spi_xcomm,
- struct spi_device *spi, struct spi_transfer *t, unsigned int *settings)
+ struct spi_device *spi, struct spi_transfer *t,
+ unsigned int *settings)
{
if (t->len > 62)
return -EINVAL;
@@ -108,7 +149,7 @@ static int spi_xcomm_setup_transfer(struct spi_xcomm *spi_xcomm,
}
static int spi_xcomm_txrx_bufs(struct spi_xcomm *spi_xcomm,
- struct spi_device *spi, struct spi_transfer *t)
+ struct spi_device *spi, struct spi_transfer *t)
{
int ret;
@@ -119,13 +160,13 @@ static int spi_xcomm_txrx_bufs(struct spi_xcomm *spi_xcomm,
ret = i2c_master_send(spi_xcomm->i2c, spi_xcomm->buf, t->len + 1);
if (ret < 0)
return ret;
- else if (ret != t->len + 1)
+ if (ret != t->len + 1)
return -EIO;
} else if (t->rx_buf) {
ret = i2c_master_recv(spi_xcomm->i2c, t->rx_buf, t->len);
if (ret < 0)
return ret;
- else if (ret != t->len)
+ if (ret != t->len)
return -EIO;
}
@@ -133,12 +174,12 @@ static int spi_xcomm_txrx_bufs(struct spi_xcomm *spi_xcomm,
}
static int spi_xcomm_transfer_one(struct spi_controller *host,
- struct spi_message *msg)
+ struct spi_message *msg)
{
struct spi_xcomm *spi_xcomm = spi_controller_get_devdata(host);
unsigned int settings = spi_xcomm->settings;
struct spi_device *spi = msg->spi;
- unsigned cs_change = 0;
+ unsigned int cs_change = 0;
struct spi_transfer *t;
bool is_first = true;
int status = 0;
@@ -147,7 +188,6 @@ static int spi_xcomm_transfer_one(struct spi_controller *host,
spi_xcomm_chipselect(spi_xcomm, spi, true);
list_for_each_entry(t, &msg->transfers, transfer_list) {
-
if (!t->tx_buf && !t->rx_buf && t->len) {
status = -EINVAL;
break;
@@ -208,7 +248,7 @@ static int spi_xcomm_probe(struct i2c_client *i2c)
struct spi_controller *host;
int ret;
- host = spi_alloc_host(&i2c->dev, sizeof(*spi_xcomm));
+ host = devm_spi_alloc_host(&i2c->dev, sizeof(*spi_xcomm));
if (!host)
return -ENOMEM;
@@ -221,13 +261,12 @@ static int spi_xcomm_probe(struct i2c_client *i2c)
host->flags = SPI_CONTROLLER_HALF_DUPLEX;
host->transfer_one_message = spi_xcomm_transfer_one;
host->dev.of_node = i2c->dev.of_node;
- i2c_set_clientdata(i2c, host);
ret = devm_spi_register_controller(&i2c->dev, host);
if (ret < 0)
- spi_controller_put(host);
+ return ret;
- return ret;
+ return spi_xcomm_gpio_add(spi_xcomm);
}
static const struct i2c_device_id spi_xcomm_ids[] = {
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 289feccca376..d4da5464dbd0 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -689,10 +689,12 @@ static int __spi_add_device(struct spi_device *spi)
* Make sure that multiple logical CS doesn't map to the same physical CS.
* For example, spi->chip_select[0] != spi->chip_select[1] and so on.
*/
- for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
- status = spi_dev_check_cs(dev, spi, idx, spi, idx + 1);
- if (status)
- return status;
+ if (!spi_controller_is_target(ctlr)) {
+ for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
+ status = spi_dev_check_cs(dev, spi, idx, spi, idx + 1);
+ if (status)
+ return status;
+ }
}
/* Set the bus ID string */
@@ -1243,6 +1245,7 @@ static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
else
rx_dev = ctlr->dev.parent;
+ ret = -ENOMSG;
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
/* The sync is done before each transfer. */
unsigned long attrs = DMA_ATTR_SKIP_CPU_SYNC;
@@ -1257,6 +1260,8 @@ static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
attrs);
if (ret != 0)
return ret;
+
+ xfer->tx_sg_mapped = true;
}
if (xfer->rx_buf != NULL) {
@@ -1270,12 +1275,16 @@ static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
return ret;
}
+
+ xfer->rx_sg_mapped = true;
}
}
+ /* No transfer has been mapped, bail out with success */
+ if (ret)
+ return 0;
ctlr->cur_rx_dma_dev = rx_dev;
ctlr->cur_tx_dma_dev = tx_dev;
- ctlr->cur_msg_mapped = true;
return 0;
}
@@ -1286,24 +1295,21 @@ static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg)
struct device *tx_dev = ctlr->cur_tx_dma_dev;
struct spi_transfer *xfer;
- if (!ctlr->cur_msg_mapped || !ctlr->can_dma)
- return 0;
-
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
/* The sync has already been done after each transfer. */
unsigned long attrs = DMA_ATTR_SKIP_CPU_SYNC;
- if (!ctlr->can_dma(ctlr, msg->spi, xfer))
- continue;
+ if (xfer->rx_sg_mapped)
+ spi_unmap_buf_attrs(ctlr, rx_dev, &xfer->rx_sg,
+ DMA_FROM_DEVICE, attrs);
+ xfer->rx_sg_mapped = false;
- spi_unmap_buf_attrs(ctlr, rx_dev, &xfer->rx_sg,
- DMA_FROM_DEVICE, attrs);
- spi_unmap_buf_attrs(ctlr, tx_dev, &xfer->tx_sg,
- DMA_TO_DEVICE, attrs);
+ if (xfer->tx_sg_mapped)
+ spi_unmap_buf_attrs(ctlr, tx_dev, &xfer->tx_sg,
+ DMA_TO_DEVICE, attrs);
+ xfer->tx_sg_mapped = false;
}
- ctlr->cur_msg_mapped = false;
-
return 0;
}
@@ -1313,11 +1319,10 @@ static void spi_dma_sync_for_device(struct spi_controller *ctlr,
struct device *rx_dev = ctlr->cur_rx_dma_dev;
struct device *tx_dev = ctlr->cur_tx_dma_dev;
- if (!ctlr->cur_msg_mapped)
- return;
-
- dma_sync_sgtable_for_device(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
- dma_sync_sgtable_for_device(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
+ if (xfer->tx_sg_mapped)
+ dma_sync_sgtable_for_device(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
+ if (xfer->rx_sg_mapped)
+ dma_sync_sgtable_for_device(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
}
static void spi_dma_sync_for_cpu(struct spi_controller *ctlr,
@@ -1326,11 +1331,10 @@ static void spi_dma_sync_for_cpu(struct spi_controller *ctlr,
struct device *rx_dev = ctlr->cur_rx_dma_dev;
struct device *tx_dev = ctlr->cur_tx_dma_dev;
- if (!ctlr->cur_msg_mapped)
- return;
-
- dma_sync_sgtable_for_cpu(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
- dma_sync_sgtable_for_cpu(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
+ if (xfer->rx_sg_mapped)
+ dma_sync_sgtable_for_cpu(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
+ if (xfer->tx_sg_mapped)
+ dma_sync_sgtable_for_cpu(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
}
#else /* !CONFIG_HAS_DMA */
static inline int __spi_map_msg(struct spi_controller *ctlr,
@@ -1627,8 +1631,8 @@ fallback_pio:
if (ret < 0) {
spi_dma_sync_for_cpu(ctlr, xfer);
- if (ctlr->cur_msg_mapped &&
- (xfer->error & SPI_TRANS_FAIL_NO_START)) {
+ if ((xfer->tx_sg_mapped || xfer->rx_sg_mapped) &&
+ (xfer->error & SPI_TRANS_FAIL_NO_START)) {
__spi_unmap_msg(ctlr, msg);
ctlr->fallback = true;
xfer->error &= ~SPI_TRANS_FAIL_NO_START;
@@ -2128,7 +2132,8 @@ static void __spi_unoptimize_message(struct spi_message *msg)
*/
static void spi_maybe_unoptimize_message(struct spi_message *msg)
{
- if (!msg->pre_optimized && msg->optimized)
+ if (!msg->pre_optimized && msg->optimized &&
+ !msg->spi->controller->defer_optimize_message)
__spi_unoptimize_message(msg);
}
@@ -2207,11 +2212,8 @@ static int spi_start_queue(struct spi_controller *ctlr)
static int spi_stop_queue(struct spi_controller *ctlr)
{
+ unsigned int limit = 500;
unsigned long flags;
- unsigned limit = 500;
- int ret = 0;
-
- spin_lock_irqsave(&ctlr->queue_lock, flags);
/*
* This is a bit lame, but is optimized for the common execution path.
@@ -2219,20 +2221,18 @@ static int spi_stop_queue(struct spi_controller *ctlr)
* execution path (pump_messages) would be required to call wake_up or
* friends on every SPI message. Do this instead.
*/
- while ((!list_empty(&ctlr->queue) || ctlr->busy) && limit--) {
+ do {
+ spin_lock_irqsave(&ctlr->queue_lock, flags);
+ if (list_empty(&ctlr->queue) && !ctlr->busy) {
+ ctlr->running = false;
+ spin_unlock_irqrestore(&ctlr->queue_lock, flags);
+ return 0;
+ }
spin_unlock_irqrestore(&ctlr->queue_lock, flags);
usleep_range(10000, 11000);
- spin_lock_irqsave(&ctlr->queue_lock, flags);
- }
-
- if (!list_empty(&ctlr->queue) || ctlr->busy)
- ret = -EBUSY;
- else
- ctlr->running = false;
+ } while (--limit);
- spin_unlock_irqrestore(&ctlr->queue_lock, flags);
-
- return ret;
+ return -EBUSY;
}
static int spi_destroy_queue(struct spi_controller *ctlr)
@@ -2571,7 +2571,7 @@ struct spi_device *spi_new_ancillary_device(struct spi_device *spi,
{
struct spi_controller *ctlr = spi->controller;
struct spi_device *ancillary;
- int rc = 0;
+ int rc;
/* Alloc an spi_device */
ancillary = spi_alloc_device(ctlr);
@@ -2717,7 +2717,7 @@ static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
return -ENODEV;
if (ctlr) {
- if (ACPI_HANDLE(ctlr->dev.parent) != parent_handle)
+ if (!device_match_acpi_handle(ctlr->dev.parent, parent_handle))
return -ENODEV;
} else {
struct acpi_device *adev;
@@ -2816,7 +2816,7 @@ struct spi_device *acpi_spi_device_alloc(struct spi_controller *ctlr,
if (!lookup.max_speed_hz &&
ACPI_SUCCESS(acpi_get_parent(adev->handle, &parent_handle)) &&
- ACPI_HANDLE(lookup.ctlr->dev.parent) == parent_handle) {
+ device_match_acpi_handle(lookup.ctlr->dev.parent, parent_handle)) {
/* Apple does not use _CRS but nested devices for SPI slaves */
acpi_spi_parse_apple_properties(adev, &lookup);
}
@@ -3902,7 +3902,7 @@ static int spi_set_cs_timing(struct spi_device *spi)
int spi_setup(struct spi_device *spi)
{
unsigned bad_bits, ugly_bits;
- int status = 0;
+ int status;
/*
* Check mode to prevent that any two of DUAL, QUAD and NO_MOSI/MISO
@@ -4135,7 +4135,8 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
return -EINVAL;
if (xfer->tx_nbits != SPI_NBITS_SINGLE &&
xfer->tx_nbits != SPI_NBITS_DUAL &&
- xfer->tx_nbits != SPI_NBITS_QUAD)
+ xfer->tx_nbits != SPI_NBITS_QUAD &&
+ xfer->tx_nbits != SPI_NBITS_OCTAL)
return -EINVAL;
if ((xfer->tx_nbits == SPI_NBITS_DUAL) &&
!(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
@@ -4150,7 +4151,8 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
return -EINVAL;
if (xfer->rx_nbits != SPI_NBITS_SINGLE &&
xfer->rx_nbits != SPI_NBITS_DUAL &&
- xfer->rx_nbits != SPI_NBITS_QUAD)
+ xfer->rx_nbits != SPI_NBITS_QUAD &&
+ xfer->rx_nbits != SPI_NBITS_OCTAL)
return -EINVAL;
if ((xfer->rx_nbits == SPI_NBITS_DUAL) &&
!(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
@@ -4269,6 +4271,11 @@ static int __spi_optimize_message(struct spi_device *spi,
static int spi_maybe_optimize_message(struct spi_device *spi,
struct spi_message *msg)
{
+ if (spi->controller->defer_optimize_message) {
+ msg->spi = spi;
+ return 0;
+ }
+
if (msg->pre_optimized)
return 0;
@@ -4299,6 +4306,13 @@ int spi_optimize_message(struct spi_device *spi, struct spi_message *msg)
{
int ret;
+ /*
+ * Pre-optimization is not supported and optimization is deferred e.g.
+ * when using spi-mux.
+ */
+ if (spi->controller->defer_optimize_message)
+ return 0;
+
ret = __spi_optimize_message(spi, msg);
if (ret)
return ret;
@@ -4325,6 +4339,9 @@ EXPORT_SYMBOL_GPL(spi_optimize_message);
*/
void spi_unoptimize_message(struct spi_message *msg)
{
+ if (msg->spi->controller->defer_optimize_message)
+ return;
+
__spi_unoptimize_message(msg);
msg->pre_optimized = false;
}
@@ -4357,6 +4374,34 @@ static int __spi_async(struct spi_device *spi, struct spi_message *message)
return ctlr->transfer(spi, message);
}
+static void devm_spi_unoptimize_message(void *msg)
+{
+ spi_unoptimize_message(msg);
+}
+
+/**
+ * devm_spi_optimize_message - managed version of spi_optimize_message()
+ * @dev: the device that manages @msg (usually @spi->dev)
+ * @spi: the device that will be used for the message
+ * @msg: the message to optimize
+ * Return: zero on success, else a negative error code
+ *
+ * spi_unoptimize_message() will automatically be called when the device is
+ * removed.
+ */
+int devm_spi_optimize_message(struct device *dev, struct spi_device *spi,
+ struct spi_message *msg)
+{
+ int ret;
+
+ ret = spi_optimize_message(spi, msg);
+ if (ret)
+ return ret;
+
+ return devm_add_action_or_reset(dev, devm_spi_unoptimize_message, msg);
+}
+EXPORT_SYMBOL_GPL(devm_spi_optimize_message);
+
/**
* spi_async - asynchronous SPI transfer
* @spi: device with which data will be exchanged
@@ -4407,8 +4452,6 @@ int spi_async(struct spi_device *spi, struct spi_message *message)
spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
- spi_maybe_unoptimize_message(message);
-
return ret;
}
EXPORT_SYMBOL_GPL(spi_async);
diff --git a/drivers/staging/greybus/gpio.c b/drivers/staging/greybus/gpio.c
index 2a115a8fc263..5217aacfcf54 100644
--- a/drivers/staging/greybus/gpio.c
+++ b/drivers/staging/greybus/gpio.c
@@ -579,7 +579,7 @@ static int gb_gpio_probe(struct gbphy_device *gbphy_dev,
if (ret)
goto exit_line_free;
- ret = gpiochip_add(gpio);
+ ret = gpiochip_add_data(gpio, NULL);
if (ret) {
dev_err(&gbphy_dev->dev, "failed to add gpio chip: %d\n", ret);
goto exit_line_free;
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
index 297af1d80b12..5f518e5a9273 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
@@ -707,7 +707,7 @@ int vchiq_initialise(struct vchiq_state *state, struct vchiq_instance **instance
* block forever.
*/
for (i = 0; i < VCHIQ_INIT_RETRIES; i++) {
- if (state)
+ if (vchiq_remote_initialised(state))
break;
usleep_range(500, 600);
}
@@ -1202,7 +1202,7 @@ void vchiq_dump_platform_instances(struct vchiq_state *state, struct seq_file *f
{
int i;
- if (!state)
+ if (!vchiq_remote_initialised(state))
return;
/*
@@ -1759,7 +1759,7 @@ static int vchiq_probe(struct platform_device *pdev)
if (err)
goto failed_platform_init;
- vchiq_debugfs_init();
+ vchiq_debugfs_init(&mgmt->state);
dev_dbg(&pdev->dev, "arm: platform initialised - version %d (min %d)\n",
VCHIQ_VERSION, VCHIQ_VERSION_MIN);
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
index 8af209e34fb2..382ec08f6a14 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
@@ -413,6 +413,11 @@ struct vchiq_state {
struct opaque_platform_state *platform_state;
};
+static inline bool vchiq_remote_initialised(const struct vchiq_state *state)
+{
+ return state->remote && state->remote->initialised;
+}
+
struct bulk_waiter {
struct vchiq_bulk *bulk;
struct completion event;
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c
index 54e7bf029d9a..d5f7f61c5626 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c
@@ -42,9 +42,9 @@ static int debugfs_trace_show(struct seq_file *f, void *offset)
static int vchiq_dump_show(struct seq_file *f, void *offset)
{
- struct vchiq_instance *instance = f->private;
+ struct vchiq_state *state = f->private;
- vchiq_dump_state(f, instance->state);
+ vchiq_dump_state(f, state);
return 0;
}
@@ -121,12 +121,12 @@ void vchiq_debugfs_remove_instance(struct vchiq_instance *instance)
debugfs_remove_recursive(node->dentry);
}
-void vchiq_debugfs_init(void)
+void vchiq_debugfs_init(struct vchiq_state *state)
{
vchiq_dbg_dir = debugfs_create_dir("vchiq", NULL);
vchiq_dbg_clients = debugfs_create_dir("clients", vchiq_dbg_dir);
- debugfs_create_file("state", S_IFREG | 0444, vchiq_dbg_dir, NULL,
+ debugfs_create_file("state", S_IFREG | 0444, vchiq_dbg_dir, state,
&vchiq_dump_fops);
}
@@ -138,7 +138,7 @@ void vchiq_debugfs_deinit(void)
#else /* CONFIG_DEBUG_FS */
-void vchiq_debugfs_init(void)
+void vchiq_debugfs_init(struct vchiq_state *state)
{
}
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.h
index e9bf055a4ca9..fabffd81b1ec 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.h
@@ -10,7 +10,7 @@ struct vchiq_debugfs_node {
struct dentry *dentry;
};
-void vchiq_debugfs_init(void);
+void vchiq_debugfs_init(struct vchiq_state *state);
void vchiq_debugfs_deinit(void);
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_dev.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_dev.c
index 3c63347d2d08..430f2ed2ccd3 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_dev.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_dev.c
@@ -1170,6 +1170,11 @@ static int vchiq_open(struct inode *inode, struct file *file)
dev_dbg(state->dev, "arm: vchiq open\n");
+ if (!vchiq_remote_initialised(state)) {
+ dev_dbg(state->dev, "arm: vchiq has no connection to VideoCore\n");
+ return -ENOTCONN;
+ }
+
instance = kzalloc(sizeof(*instance), GFP_KERNEL);
if (!instance)
return -ENOMEM;
@@ -1200,7 +1205,7 @@ static int vchiq_release(struct inode *inode, struct file *file)
dev_dbg(state->dev, "arm: instance=%p\n", instance);
- if (!state) {
+ if (!vchiq_remote_initialised(state)) {
ret = -EPERM;
goto out;
}
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index 283804b49e91..3ff8103366c1 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -1339,7 +1339,7 @@ err_free_rings:
return ret;
}
-static void vnt_stop(struct ieee80211_hw *hw)
+static void vnt_stop(struct ieee80211_hw *hw, bool suspend)
{
struct vnt_private *priv = hw->priv;
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index 7bbed462f062..4f09e733e7a8 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -613,7 +613,7 @@ err:
return ret;
}
-static void vnt_stop(struct ieee80211_hw *hw)
+static void vnt_stop(struct ieee80211_hw *hw, bool suspend)
{
struct vnt_private *priv = hw->priv;
int i;
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 7f6ca8177845..a3e09adc4e76 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -148,35 +148,38 @@ static int iblock_configure_device(struct se_device *dev)
dev->dev_attrib.is_nonrot = 1;
bi = bdev_get_integrity(bd);
- if (bi) {
- struct bio_set *bs = &ib_dev->ibd_bio_set;
-
- if (!strcmp(bi->profile->name, "T10-DIF-TYPE3-IP") ||
- !strcmp(bi->profile->name, "T10-DIF-TYPE1-IP")) {
- pr_err("IBLOCK export of blk_integrity: %s not"
- " supported\n", bi->profile->name);
- ret = -ENOSYS;
- goto out_blkdev_put;
- }
+ if (!bi)
+ return 0;
- if (!strcmp(bi->profile->name, "T10-DIF-TYPE3-CRC")) {
- dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE3_PROT;
- } else if (!strcmp(bi->profile->name, "T10-DIF-TYPE1-CRC")) {
+ switch (bi->csum_type) {
+ case BLK_INTEGRITY_CSUM_IP:
+ pr_err("IBLOCK export of blk_integrity: %s not supported\n",
+ blk_integrity_profile_name(bi));
+ ret = -ENOSYS;
+ goto out_blkdev_put;
+ case BLK_INTEGRITY_CSUM_CRC:
+ if (bi->flags & BLK_INTEGRITY_REF_TAG)
dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE1_PROT;
- }
+ else
+ dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE3_PROT;
+ break;
+ default:
+ break;
+ }
- if (dev->dev_attrib.pi_prot_type) {
- if (bioset_integrity_create(bs, IBLOCK_BIO_POOL_SIZE) < 0) {
- pr_err("Unable to allocate bioset for PI\n");
- ret = -ENOMEM;
- goto out_blkdev_put;
- }
- pr_debug("IBLOCK setup BIP bs->bio_integrity_pool: %p\n",
- &bs->bio_integrity_pool);
+ if (dev->dev_attrib.pi_prot_type) {
+ struct bio_set *bs = &ib_dev->ibd_bio_set;
+
+ if (bioset_integrity_create(bs, IBLOCK_BIO_POOL_SIZE) < 0) {
+ pr_err("Unable to allocate bioset for PI\n");
+ ret = -ENOMEM;
+ goto out_blkdev_put;
}
- dev->dev_attrib.hw_pi_prot_type = dev->dev_attrib.pi_prot_type;
+ pr_debug("IBLOCK setup BIP bs->bio_integrity_pool: %p\n",
+ &bs->bio_integrity_pool);
}
+ dev->dev_attrib.hw_pi_prot_type = dev->dev_attrib.pi_prot_type;
return 0;
out_blkdev_put:
diff --git a/drivers/tee/optee/ffa_abi.c b/drivers/tee/optee/ffa_abi.c
index 3235e1c719e8..3e73efa51bba 100644
--- a/drivers/tee/optee/ffa_abi.c
+++ b/drivers/tee/optee/ffa_abi.c
@@ -660,7 +660,9 @@ static bool optee_ffa_api_is_compatbile(struct ffa_device *ffa_dev,
const struct ffa_ops *ops)
{
const struct ffa_msg_ops *msg_ops = ops->msg_ops;
- struct ffa_send_direct_data data = { OPTEE_FFA_GET_API_VERSION };
+ struct ffa_send_direct_data data = {
+ .data0 = OPTEE_FFA_GET_API_VERSION,
+ };
int rc;
msg_ops->mode_32bit_set(ffa_dev);
@@ -677,7 +679,9 @@ static bool optee_ffa_api_is_compatbile(struct ffa_device *ffa_dev,
return false;
}
- data = (struct ffa_send_direct_data){ OPTEE_FFA_GET_OS_VERSION };
+ data = (struct ffa_send_direct_data){
+ .data0 = OPTEE_FFA_GET_OS_VERSION,
+ };
rc = msg_ops->sync_send_receive(ffa_dev, &data);
if (rc) {
pr_err("Unexpected error %d\n", rc);
@@ -698,7 +702,9 @@ static bool optee_ffa_exchange_caps(struct ffa_device *ffa_dev,
unsigned int *rpc_param_count,
unsigned int *max_notif_value)
{
- struct ffa_send_direct_data data = { OPTEE_FFA_EXCHANGE_CAPABILITIES };
+ struct ffa_send_direct_data data = {
+ .data0 = OPTEE_FFA_EXCHANGE_CAPABILITIES,
+ };
int rc;
rc = ops->msg_ops->sync_send_receive(ffa_dev, &data);
diff --git a/drivers/tee/optee/notif.c b/drivers/tee/optee/notif.c
index 0d7878e770cd..1970880c796f 100644
--- a/drivers/tee/optee/notif.c
+++ b/drivers/tee/optee/notif.c
@@ -29,7 +29,7 @@ static bool have_key(struct optee *optee, u_int key)
return false;
}
-int optee_notif_wait(struct optee *optee, u_int key)
+int optee_notif_wait(struct optee *optee, u_int key, u32 timeout)
{
unsigned long flags;
struct notif_entry *entry;
@@ -70,7 +70,12 @@ int optee_notif_wait(struct optee *optee, u_int key)
* Unlock temporarily and wait for completion.
*/
spin_unlock_irqrestore(&optee->notif.lock, flags);
- wait_for_completion(&entry->c);
+ if (timeout != 0) {
+ if (!wait_for_completion_timeout(&entry->c, timeout))
+ rc = -ETIMEDOUT;
+ } else {
+ wait_for_completion(&entry->c);
+ }
spin_lock_irqsave(&optee->notif.lock, flags);
list_del(&entry->link);
diff --git a/drivers/tee/optee/optee_private.h b/drivers/tee/optee/optee_private.h
index 429cc20be5cc..424898cdc4e9 100644
--- a/drivers/tee/optee/optee_private.h
+++ b/drivers/tee/optee/optee_private.h
@@ -26,6 +26,9 @@
#define TEEC_ERROR_BUSY 0xFFFF000D
#define TEEC_ERROR_SHORT_BUFFER 0xFFFF0010
+/* API Return Codes are from the GP TEE Internal Core API Specification */
+#define TEE_ERROR_TIMEOUT 0xFFFF3001
+
#define TEEC_ORIGIN_COMMS 0x00000002
/*
@@ -252,7 +255,7 @@ struct optee_call_ctx {
int optee_notif_init(struct optee *optee, u_int max_key);
void optee_notif_uninit(struct optee *optee);
-int optee_notif_wait(struct optee *optee, u_int key);
+int optee_notif_wait(struct optee *optee, u_int key, u32 timeout);
int optee_notif_send(struct optee *optee, u_int key);
u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params,
diff --git a/drivers/tee/optee/optee_rpc_cmd.h b/drivers/tee/optee/optee_rpc_cmd.h
index f3f06e0994a7..4576751b490c 100644
--- a/drivers/tee/optee/optee_rpc_cmd.h
+++ b/drivers/tee/optee/optee_rpc_cmd.h
@@ -41,6 +41,7 @@
* Waiting on notification
* [in] value[0].a OPTEE_RPC_NOTIFICATION_WAIT
* [in] value[0].b notification value
+ * [in] value[0].c timeout in milliseconds or 0 if no timeout
*
* Sending a synchronous notification
* [in] value[0].a OPTEE_RPC_NOTIFICATION_SEND
diff --git a/drivers/tee/optee/rpc.c b/drivers/tee/optee/rpc.c
index f086812f1179..5de4504665be 100644
--- a/drivers/tee/optee/rpc.c
+++ b/drivers/tee/optee/rpc.c
@@ -130,6 +130,8 @@ static void handle_rpc_func_cmd_i2c_transfer(struct tee_context *ctx,
static void handle_rpc_func_cmd_wq(struct optee *optee,
struct optee_msg_arg *arg)
{
+ int rc = 0;
+
if (arg->num_params != 1)
goto bad;
@@ -139,7 +141,8 @@ static void handle_rpc_func_cmd_wq(struct optee *optee,
switch (arg->params[0].u.value.a) {
case OPTEE_RPC_NOTIFICATION_WAIT:
- if (optee_notif_wait(optee, arg->params[0].u.value.b))
+ rc = optee_notif_wait(optee, arg->params[0].u.value.b, arg->params[0].u.value.c);
+ if (rc)
goto bad;
break;
case OPTEE_RPC_NOTIFICATION_SEND:
@@ -153,7 +156,10 @@ static void handle_rpc_func_cmd_wq(struct optee *optee,
arg->ret = TEEC_SUCCESS;
return;
bad:
- arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ if (rc == -ETIMEDOUT)
+ arg->ret = TEE_ERROR_TIMEOUT;
+ else
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
}
static void handle_rpc_func_cmd_wait(struct optee_msg_arg *arg)
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index 204ed89a3ec9..ed16897584b4 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -343,32 +343,6 @@ config ROCKCHIP_THERMAL
trip point. Cpufreq is used as the cooling device and will throttle
CPUs when the Temperature crosses the passive trip point.
-config RCAR_THERMAL
- tristate "Renesas R-Car thermal driver"
- depends on ARCH_RENESAS || COMPILE_TEST
- depends on HAS_IOMEM
- help
- Enable this to plug the R-Car thermal sensor driver into the Linux
- thermal framework.
-
-config RCAR_GEN3_THERMAL
- tristate "Renesas R-Car Gen3 and RZ/G2 thermal driver"
- depends on ARCH_RENESAS || COMPILE_TEST
- depends on HAS_IOMEM
- depends on OF
- help
- Enable this to plug the R-Car Gen3 or RZ/G2 thermal sensor driver into
- the Linux thermal framework.
-
-config RZG2L_THERMAL
- tristate "Renesas RZ/G2L thermal driver"
- depends on ARCH_RENESAS || COMPILE_TEST
- depends on HAS_IOMEM
- depends on OF
- help
- Enable this to plug the RZ/G2L thermal sensor driver into the Linux
- thermal framework.
-
config KIRKWOOD_THERMAL
tristate "Temperature sensor on Marvell Kirkwood SoCs"
depends on MACH_KIRKWOOD || COMPILE_TEST
@@ -459,6 +433,8 @@ depends on (ARCH_STI || ARCH_STM32) && OF
source "drivers/thermal/st/Kconfig"
endmenu
+source "drivers/thermal/renesas/Kconfig"
+
source "drivers/thermal/tegra/Kconfig"
config GENERIC_ADC_THERMAL
diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile
index 5cdf7d68687f..ce7a4752ef52 100644
--- a/drivers/thermal/Makefile
+++ b/drivers/thermal/Makefile
@@ -38,9 +38,7 @@ obj-$(CONFIG_THERMAL_MMIO) += thermal_mmio.o
obj-$(CONFIG_SPEAR_THERMAL) += spear_thermal.o
obj-$(CONFIG_SUN8I_THERMAL) += sun8i_thermal.o
obj-$(CONFIG_ROCKCHIP_THERMAL) += rockchip_thermal.o
-obj-$(CONFIG_RCAR_THERMAL) += rcar_thermal.o
-obj-$(CONFIG_RCAR_GEN3_THERMAL) += rcar_gen3_thermal.o
-obj-$(CONFIG_RZG2L_THERMAL) += rzg2l_thermal.o
+obj-y += renesas/
obj-$(CONFIG_KIRKWOOD_THERMAL) += kirkwood_thermal.o
obj-y += samsung/
obj-$(CONFIG_DOVE_THERMAL) += dove_thermal.o
diff --git a/drivers/thermal/broadcom/bcm2835_thermal.c b/drivers/thermal/broadcom/bcm2835_thermal.c
index 5c1cebe07580..5ad87eb3f578 100644
--- a/drivers/thermal/broadcom/bcm2835_thermal.c
+++ b/drivers/thermal/broadcom/bcm2835_thermal.c
@@ -163,6 +163,7 @@ MODULE_DEVICE_TABLE(of, bcm2835_thermal_of_match_table);
static int bcm2835_thermal_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
const struct of_device_id *match;
struct thermal_zone_device *tz;
struct bcm2835_thermal_data *data;
@@ -170,12 +171,11 @@ static int bcm2835_thermal_probe(struct platform_device *pdev)
u32 val;
unsigned long rate;
- data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- match = of_match_device(bcm2835_thermal_of_match_table,
- &pdev->dev);
+ match = of_match_device(bcm2835_thermal_of_match_table, dev);
if (!match)
return -EINVAL;
@@ -185,34 +185,20 @@ static int bcm2835_thermal_probe(struct platform_device *pdev)
return err;
}
- data->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(data->clk)) {
- err = PTR_ERR(data->clk);
- if (err != -EPROBE_DEFER)
- dev_err(&pdev->dev, "Could not get clk: %d\n", err);
- return err;
- }
-
- err = clk_prepare_enable(data->clk);
- if (err)
- return err;
+ data->clk = devm_clk_get_enabled(dev, NULL);
+ if (IS_ERR(data->clk))
+ return dev_err_probe(dev, PTR_ERR(data->clk), "Could not get clk\n");
rate = clk_get_rate(data->clk);
if ((rate < 1920000) || (rate > 5000000))
- dev_warn(&pdev->dev,
+ dev_warn(dev,
"Clock %pCn running at %lu Hz is outside of the recommended range: 1.92 to 5MHz\n",
data->clk, rate);
/* register of thermal sensor and get info from DT */
- tz = devm_thermal_of_zone_register(&pdev->dev, 0, data,
- &bcm2835_thermal_ops);
- if (IS_ERR(tz)) {
- err = PTR_ERR(tz);
- dev_err(&pdev->dev,
- "Failed to register the thermal device: %d\n",
- err);
- goto err_clk;
- }
+ tz = devm_thermal_of_zone_register(dev, 0, data, &bcm2835_thermal_ops);
+ if (IS_ERR(tz))
+ return dev_err_probe(dev, PTR_ERR(tz), "Failed to register the thermal device\n");
/*
* right now the FW does set up the HW-block, so we are not
@@ -233,10 +219,8 @@ static int bcm2835_thermal_probe(struct platform_device *pdev)
*/
err = thermal_zone_get_trip(tz, 0, &trip);
if (err < 0) {
- dev_err(&pdev->dev,
- "Not able to read trip_temp: %d\n",
- err);
- goto err_tz;
+ dev_err(dev, "Not able to read trip_temp: %d\n", err);
+ return err;
}
/* set bandgap reference voltage and enable voltage regulator */
@@ -269,17 +253,11 @@ static int bcm2835_thermal_probe(struct platform_device *pdev)
*/
err = thermal_add_hwmon_sysfs(tz);
if (err)
- goto err_tz;
+ return err;
bcm2835_thermal_debugfs(pdev);
return 0;
-err_tz:
- devm_thermal_of_zone_unregister(&pdev->dev, tz);
-err_clk:
- clk_disable_unprepare(data->clk);
-
- return err;
}
static void bcm2835_thermal_remove(struct platform_device *pdev)
@@ -287,7 +265,6 @@ static void bcm2835_thermal_remove(struct platform_device *pdev)
struct bcm2835_thermal_data *data = platform_get_drvdata(pdev);
debugfs_remove_recursive(data->debugfsdir);
- clk_disable_unprepare(data->clk);
}
static struct platform_driver bcm2835_thermal_driver = {
diff --git a/drivers/thermal/gov_bang_bang.c b/drivers/thermal/gov_bang_bang.c
index acb52c9ee10f..4a2e869b9538 100644
--- a/drivers/thermal/gov_bang_bang.c
+++ b/drivers/thermal/gov_bang_bang.c
@@ -57,24 +57,16 @@ static void bang_bang_control(struct thermal_zone_device *tz,
if (instance->trip != trip)
continue;
- if (instance->target == THERMAL_NO_TARGET)
- instance->target = 0;
-
- if (instance->target != 0 && instance->target != 1) {
+ if (instance->target != 0 && instance->target != 1 &&
+ instance->target != THERMAL_NO_TARGET)
pr_debug("Unexpected state %ld of thermal instance %s in bang-bang\n",
instance->target, instance->name);
- instance->target = 1;
- }
-
/*
* Enable the fan when the trip is crossed on the way up and
* disable it when the trip is crossed on the way down.
*/
- if (instance->target == 0 && crossed_up)
- instance->target = 1;
- else if (instance->target == 1 && !crossed_up)
- instance->target = 0;
+ instance->target = crossed_up;
dev_dbg(&instance->cdev->device, "target=%ld\n", instance->target);
diff --git a/drivers/thermal/gov_power_allocator.c b/drivers/thermal/gov_power_allocator.c
index 45f04a25255a..1b2345a697c5 100644
--- a/drivers/thermal/gov_power_allocator.c
+++ b/drivers/thermal/gov_power_allocator.c
@@ -759,6 +759,9 @@ static void power_allocator_manage(struct thermal_zone_device *tz)
return;
}
+ if (!params->trip_max)
+ return;
+
allocate_power(tz, params->trip_max->temperature);
params->update_cdevs = true;
}
diff --git a/drivers/thermal/gov_step_wise.c b/drivers/thermal/gov_step_wise.c
index e0fdc497bfcc..fd5527188cf9 100644
--- a/drivers/thermal/gov_step_wise.c
+++ b/drivers/thermal/gov_step_wise.c
@@ -55,7 +55,11 @@ static unsigned long get_target_state(struct thermal_instance *instance,
if (cur_state <= instance->lower)
return THERMAL_NO_TARGET;
- return clamp(cur_state - 1, instance->lower, instance->upper);
+ /*
+ * If 'throttle' is false, no mitigation is necessary, so
+ * request the lower state for this instance.
+ */
+ return instance->lower;
}
return instance->target;
diff --git a/drivers/thermal/hisi_thermal.c b/drivers/thermal/hisi_thermal.c
index dd751ae63608..0eb657db62e4 100644
--- a/drivers/thermal/hisi_thermal.c
+++ b/drivers/thermal/hisi_thermal.c
@@ -388,15 +388,10 @@ static int hi6220_thermal_probe(struct hisi_thermal_data *data)
{
struct platform_device *pdev = data->pdev;
struct device *dev = &pdev->dev;
- int ret;
data->clk = devm_clk_get(dev, "thermal_clk");
- if (IS_ERR(data->clk)) {
- ret = PTR_ERR(data->clk);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "failed to get thermal clk: %d\n", ret);
- return ret;
- }
+ if (IS_ERR(data->clk))
+ return dev_err_probe(dev, PTR_ERR(data->clk), "failed to get thermal clk\n");
data->sensor = devm_kzalloc(dev, sizeof(*data->sensor), GFP_KERNEL);
if (!data->sensor)
diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c
index 83eaae5ca3b8..091fb30dedf3 100644
--- a/drivers/thermal/imx_thermal.c
+++ b/drivers/thermal/imx_thermal.c
@@ -331,25 +331,16 @@ static int imx_change_mode(struct thermal_zone_device *tz,
return 0;
}
-static int imx_set_trip_temp(struct thermal_zone_device *tz, int trip_id,
- int temp)
+static int imx_set_trip_temp(struct thermal_zone_device *tz,
+ const struct thermal_trip *trip, int temp)
{
struct imx_thermal_data *data = thermal_zone_device_priv(tz);
- struct thermal_trip trip;
int ret;
ret = pm_runtime_resume_and_get(data->dev);
if (ret < 0)
return ret;
- ret = __thermal_zone_get_trip(tz, trip_id, &trip);
- if (ret)
- return ret;
-
- /* do not allow changing critical threshold */
- if (trip.type == THERMAL_TRIP_CRITICAL)
- return -EPERM;
-
/* do not allow passive to be set higher than critical */
if (temp < 0 || temp > trips[IMX_TRIP_CRITICAL].temperature)
return -EINVAL;
@@ -601,28 +592,29 @@ static inline void imx_thermal_unregister_legacy_cooling(struct imx_thermal_data
static int imx_thermal_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct imx_thermal_data *data;
struct regmap *map;
int measure_freq;
int ret;
- data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- data->dev = &pdev->dev;
+ data->dev = dev;
- map = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, "fsl,tempmon");
+ map = syscon_regmap_lookup_by_phandle(dev->of_node, "fsl,tempmon");
if (IS_ERR(map)) {
ret = PTR_ERR(map);
- dev_err(&pdev->dev, "failed to get tempmon regmap: %d\n", ret);
+ dev_err(dev, "failed to get tempmon regmap: %d\n", ret);
return ret;
}
data->tempmon = map;
- data->socdata = of_device_get_match_data(&pdev->dev);
+ data->socdata = of_device_get_match_data(dev);
if (!data->socdata) {
- dev_err(&pdev->dev, "no device match found\n");
+ dev_err(dev, "no device match found\n");
return -ENODEV;
}
@@ -645,15 +637,15 @@ static int imx_thermal_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, data);
- if (of_property_present(pdev->dev.of_node, "nvmem-cells")) {
+ if (of_property_present(dev->of_node, "nvmem-cells")) {
ret = imx_init_from_nvmem_cells(pdev);
if (ret)
- return dev_err_probe(&pdev->dev, ret,
+ return dev_err_probe(dev, ret,
"failed to init from nvmem\n");
} else {
ret = imx_init_from_tempmon_data(pdev);
if (ret) {
- dev_err(&pdev->dev, "failed to init from fsl,tempmon-data\n");
+ dev_err(dev, "failed to init from fsl,tempmon-data\n");
return ret;
}
}
@@ -673,15 +665,12 @@ static int imx_thermal_probe(struct platform_device *pdev)
ret = imx_thermal_register_legacy_cooling(data);
if (ret)
- return dev_err_probe(&pdev->dev, ret,
+ return dev_err_probe(dev, ret,
"failed to register cpufreq cooling device\n");
- data->thermal_clk = devm_clk_get(&pdev->dev, NULL);
+ data->thermal_clk = devm_clk_get(dev, NULL);
if (IS_ERR(data->thermal_clk)) {
- ret = PTR_ERR(data->thermal_clk);
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev,
- "failed to get thermal clk: %d\n", ret);
+ ret = dev_err_probe(dev, PTR_ERR(data->thermal_clk), "failed to get thermal clk\n");
goto legacy_cleanup;
}
@@ -694,7 +683,7 @@ static int imx_thermal_probe(struct platform_device *pdev)
*/
ret = clk_prepare_enable(data->thermal_clk);
if (ret) {
- dev_err(&pdev->dev, "failed to enable thermal clk: %d\n", ret);
+ dev_err(dev, "failed to enable thermal clk: %d\n", ret);
goto legacy_cleanup;
}
@@ -707,12 +696,12 @@ static int imx_thermal_probe(struct platform_device *pdev)
IMX_POLLING_DELAY);
if (IS_ERR(data->tz)) {
ret = PTR_ERR(data->tz);
- dev_err(&pdev->dev,
- "failed to register thermal zone device %d\n", ret);
+ dev_err(dev, "failed to register thermal zone device %d\n",
+ ret);
goto clk_disable;
}
- dev_info(&pdev->dev, "%s CPU temperature grade - max:%dC"
+ dev_info(dev, "%s CPU temperature grade - max:%dC"
" critical:%dC passive:%dC\n", data->temp_grade,
data->temp_max / 1000, trips[IMX_TRIP_CRITICAL].temperature / 1000,
trips[IMX_TRIP_PASSIVE].temperature / 1000);
@@ -736,7 +725,7 @@ static int imx_thermal_probe(struct platform_device *pdev)
usleep_range(20, 50);
/* the core was configured and enabled just before */
- pm_runtime_set_active(&pdev->dev);
+ pm_runtime_set_active(dev);
pm_runtime_enable(data->dev);
ret = pm_runtime_resume_and_get(data->dev);
@@ -748,11 +737,11 @@ static int imx_thermal_probe(struct platform_device *pdev)
if (ret)
goto thermal_zone_unregister;
- ret = devm_request_threaded_irq(&pdev->dev, data->irq,
+ ret = devm_request_threaded_irq(dev, data->irq,
imx_thermal_alarm_irq, imx_thermal_alarm_irq_thread,
0, "imx_thermal", data);
if (ret < 0) {
- dev_err(&pdev->dev, "failed to request alarm irq: %d\n", ret);
+ dev_err(dev, "failed to request alarm irq: %d\n", ret);
goto thermal_zone_unregister;
}
diff --git a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
index fa96972266e4..b0c0f0ffdcb0 100644
--- a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
+++ b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
@@ -571,7 +571,7 @@ static int int3400_thermal_probe(struct platform_device *pdev)
if (!adev)
return -ENODEV;
- priv = kzalloc(sizeof(struct int3400_thermal_priv), GFP_KERNEL);
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
diff --git a/drivers/thermal/intel/int340x_thermal/int3403_thermal.c b/drivers/thermal/intel/int340x_thermal/int3403_thermal.c
index 86901f9f54d8..c094a422ded3 100644
--- a/drivers/thermal/intel/int340x_thermal/int3403_thermal.c
+++ b/drivers/thermal/intel/int340x_thermal/int3403_thermal.c
@@ -25,17 +25,6 @@ struct int3403_sensor {
struct int34x_thermal_zone *int340x_zone;
};
-struct int3403_performance_state {
- u64 performance;
- u64 power;
- u64 latency;
- u64 linear;
- u64 control;
- u64 raw_performace;
- char *raw_unit;
- int reserved;
-};
-
struct int3403_cdev {
struct thermal_cooling_device *cdev;
unsigned long max_state;
diff --git a/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c b/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c
index 400fde7cb3b1..31ed338eb83c 100644
--- a/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c
+++ b/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c
@@ -39,13 +39,14 @@ static int int340x_thermal_get_zone_temp(struct thermal_zone_device *zone,
}
static int int340x_thermal_set_trip_temp(struct thermal_zone_device *zone,
- int trip, int temp)
+ const struct thermal_trip *trip, int temp)
{
struct int34x_thermal_zone *d = thermal_zone_device_priv(zone);
- char name[] = {'P', 'A', 'T', '0' + trip, '\0'};
+ unsigned int trip_index = THERMAL_TRIP_PRIV_TO_INT(trip->priv);
+ char name[] = {'P', 'A', 'T', '0' + trip_index, '\0'};
acpi_status status;
- if (trip > 9)
+ if (trip_index > 9)
return -EINVAL;
status = acpi_execute_simple_method(d->adev->handle, name,
@@ -62,16 +63,6 @@ static void int340x_thermal_critical(struct thermal_zone_device *zone)
thermal_zone_device_type(zone));
}
-static inline void *int_to_trip_priv(int i)
-{
- return (void *)(long)i;
-}
-
-static inline int trip_priv_to_int(const struct thermal_trip *trip)
-{
- return (long)trip->priv;
-}
-
static int int340x_thermal_read_trips(struct acpi_device *zone_adev,
struct thermal_trip *zone_trips,
int trip_cnt)
@@ -106,7 +97,7 @@ static int int340x_thermal_read_trips(struct acpi_device *zone_adev,
break;
zone_trips[trip_cnt].type = THERMAL_TRIP_ACTIVE;
- zone_trips[trip_cnt].priv = int_to_trip_priv(i);
+ zone_trips[trip_cnt].priv = THERMAL_INT_TO_TRIP_PRIV(i);
trip_cnt++;
}
@@ -154,6 +145,7 @@ struct int34x_thermal_zone *int340x_thermal_zone_add(struct acpi_device *adev,
zone_trips[i].type = THERMAL_TRIP_PASSIVE;
zone_trips[i].temperature = THERMAL_TEMP_INVALID;
zone_trips[i].flags |= THERMAL_TRIP_FLAG_RW_TEMP;
+ zone_trips[i].priv = THERMAL_INT_TO_TRIP_PRIV(i);
}
trip_cnt = int340x_thermal_read_trips(adev, zone_trips, trip_cnt);
@@ -224,7 +216,7 @@ static int int340x_update_one_trip(struct thermal_trip *trip, void *arg)
break;
case THERMAL_TRIP_ACTIVE:
err = thermal_acpi_active_trip_temp(zone_adev,
- trip_priv_to_int(trip),
+ THERMAL_TRIP_PRIV_TO_INT(trip->priv),
&temp);
break;
default:
diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
index d75fae7b7ed2..7c46dd6bee73 100644
--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
+++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
@@ -440,7 +440,8 @@ void proc_thermal_mmio_remove(struct pci_dev *pdev, struct proc_thermal_device *
proc_thermal_rapl_remove();
if (proc_priv->mmio_feature_mask & PROC_THERMAL_FEATURE_FIVR ||
- proc_priv->mmio_feature_mask & PROC_THERMAL_FEATURE_DVFS)
+ proc_priv->mmio_feature_mask & PROC_THERMAL_FEATURE_DVFS ||
+ proc_priv->mmio_feature_mask & PROC_THERMAL_FEATURE_DLVR)
proc_thermal_rfim_remove(pdev);
if (proc_priv->mmio_feature_mask & PROC_THERMAL_FEATURE_POWER_FLOOR)
diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.h b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.h
index 674f3c85dfbc..d5eca6db2c00 100644
--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.h
+++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.h
@@ -65,6 +65,7 @@ struct rapl_mmio_regs {
#define PROC_THERMAL_FEATURE_DLVR 0x10
#define PROC_THERMAL_FEATURE_WT_HINT 0x20
#define PROC_THERMAL_FEATURE_POWER_FLOOR 0x40
+#define PROC_THERMAL_FEATURE_MSI_SUPPORT 0x80
#if IS_ENABLED(CONFIG_PROC_THERMAL_MMIO_RAPL)
int proc_thermal_rapl_add(struct pci_dev *pdev, struct proc_thermal_device *proc_priv);
diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
index 14e34eabc419..114136893a59 100644
--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
+++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
@@ -63,6 +63,18 @@ static struct proc_thermal_mmio_info proc_thermal_mmio_info[] = {
{ PROC_THERMAL_MMIO_INT_STATUS_1, 0x7200, 8, 0x01 },
};
+/* List of supported MSI IDs (sources) */
+enum proc_thermal_msi_ids {
+ PKG_THERMAL,
+ DDR_THERMAL,
+ THERM_POWER_FLOOR,
+ WORKLOAD_CHANGE,
+ MSI_THERMAL_MAX
+};
+
+/* Stores IRQ associated with a MSI ID */
+static int proc_thermal_msi_map[MSI_THERMAL_MAX];
+
#define B0D4_THERMAL_NOTIFY_DELAY 1000
static int notify_delay_ms = B0D4_THERMAL_NOTIFY_DELAY;
@@ -146,22 +158,41 @@ static irqreturn_t proc_thermal_irq_thread_handler(int irq, void *devid)
return IRQ_HANDLED;
}
+static int proc_thermal_match_msi_irq(int irq)
+{
+ int i;
+
+ if (!use_msi)
+ goto msi_fail;
+
+ for (i = 0; i < MSI_THERMAL_MAX; i++) {
+ if (proc_thermal_msi_map[i] == irq)
+ return i;
+ }
+
+msi_fail:
+ return -EOPNOTSUPP;
+}
+
static irqreturn_t proc_thermal_irq_handler(int irq, void *devid)
{
struct proc_thermal_pci *pci_info = devid;
struct proc_thermal_device *proc_priv;
- int ret = IRQ_HANDLED;
+ int ret = IRQ_NONE, msi_id;
u32 status;
proc_priv = pci_info->proc_priv;
+ msi_id = proc_thermal_match_msi_irq(irq);
+
if (proc_priv->mmio_feature_mask & PROC_THERMAL_FEATURE_WT_HINT) {
- if (proc_thermal_check_wt_intr(pci_info->proc_priv))
+ if (msi_id == WORKLOAD_CHANGE || proc_thermal_check_wt_intr(pci_info->proc_priv))
ret = IRQ_WAKE_THREAD;
}
if (proc_priv->mmio_feature_mask & PROC_THERMAL_FEATURE_POWER_FLOOR) {
- if (proc_thermal_check_power_floor_intr(pci_info->proc_priv))
+ if (msi_id == THERM_POWER_FLOOR ||
+ proc_thermal_check_power_floor_intr(pci_info->proc_priv))
ret = IRQ_WAKE_THREAD;
}
@@ -171,10 +202,11 @@ static irqreturn_t proc_thermal_irq_handler(int irq, void *devid)
* interrupt before scheduling work function for thermal threshold.
*/
proc_thermal_mmio_read(pci_info, PROC_THERMAL_MMIO_INT_STATUS_0, &status);
- if (status) {
+ if (msi_id == PKG_THERMAL || status) {
/* Disable enable interrupt flag */
proc_thermal_mmio_write(pci_info, PROC_THERMAL_MMIO_INT_ENABLE_0, 0);
pkg_thermal_schedule_work(&pci_info->work);
+ ret = IRQ_HANDLED;
}
pci_write_config_byte(pci_info->pdev, 0xdc, 0x01);
@@ -193,7 +225,8 @@ static int sys_get_curr_temp(struct thermal_zone_device *tzd, int *temp)
return 0;
}
-static int sys_set_trip_temp(struct thermal_zone_device *tzd, int trip, int temp)
+static int sys_set_trip_temp(struct thermal_zone_device *tzd,
+ const struct thermal_trip *trip, int temp)
{
struct proc_thermal_pci *pci_info = thermal_zone_device_priv(tzd);
int tjmax, _temp;
@@ -243,6 +276,45 @@ static struct thermal_zone_params tzone_params = {
.no_hwmon = true,
};
+static bool msi_irq;
+
+static int proc_thermal_setup_msi(struct pci_dev *pdev, struct proc_thermal_pci *pci_info)
+{
+ int ret, i, irq;
+
+ ret = pci_alloc_irq_vectors(pdev, 1, MSI_THERMAL_MAX, PCI_IRQ_MSI | PCI_IRQ_MSIX);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to allocate vectors!\n");
+ return ret;
+ }
+
+ dev_info(&pdev->dev, "msi enabled:%d msix enabled:%d\n", pdev->msi_enabled,
+ pdev->msix_enabled);
+
+ for (i = 0; i < MSI_THERMAL_MAX; i++) {
+ irq = pci_irq_vector(pdev, i);
+
+ ret = devm_request_threaded_irq(&pdev->dev, irq, proc_thermal_irq_handler,
+ proc_thermal_irq_thread_handler,
+ 0, KBUILD_MODNAME, pci_info);
+ if (ret) {
+ dev_err(&pdev->dev, "Request IRQ %d failed\n", irq);
+ goto err_free_msi_vectors;
+ }
+
+ proc_thermal_msi_map[i] = irq;
+ }
+
+ msi_irq = true;
+
+ return 0;
+
+err_free_msi_vectors:
+ pci_free_irq_vectors(pdev);
+
+ return ret;
+}
+
static int proc_thermal_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct proc_thermal_device *proc_priv;
@@ -252,7 +324,6 @@ static int proc_thermal_pci_probe(struct pci_dev *pdev, const struct pci_device_
.flags = THERMAL_TRIP_FLAG_RW_TEMP,
};
int irq_flag = 0, irq, ret;
- bool msi_irq = false;
proc_priv = devm_kzalloc(&pdev->dev, sizeof(*proc_priv), GFP_KERNEL);
if (!proc_priv)
@@ -298,27 +369,24 @@ static int proc_thermal_pci_probe(struct pci_dev *pdev, const struct pci_device_
goto err_del_legacy;
}
- if (use_msi && (pdev->msi_enabled || pdev->msix_enabled)) {
- /* request and enable interrupt */
- ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
- if (ret < 0) {
- dev_err(&pdev->dev, "Failed to allocate vectors!\n");
- goto err_ret_tzone;
- }
+ if (proc_priv->mmio_feature_mask & PROC_THERMAL_FEATURE_MSI_SUPPORT)
+ use_msi = true;
- irq = pci_irq_vector(pdev, 0);
- msi_irq = true;
+ if (use_msi) {
+ ret = proc_thermal_setup_msi(pdev, pci_info);
+ if (ret)
+ goto err_ret_tzone;
} else {
irq_flag = IRQF_SHARED;
irq = pdev->irq;
- }
- ret = devm_request_threaded_irq(&pdev->dev, irq,
- proc_thermal_irq_handler, proc_thermal_irq_thread_handler,
- irq_flag, KBUILD_MODNAME, pci_info);
- if (ret) {
- dev_err(&pdev->dev, "Request IRQ %d failed\n", pdev->irq);
- goto err_free_vectors;
+ ret = devm_request_threaded_irq(&pdev->dev, irq, proc_thermal_irq_handler,
+ proc_thermal_irq_thread_handler, irq_flag,
+ KBUILD_MODNAME, pci_info);
+ if (ret) {
+ dev_err(&pdev->dev, "Request IRQ %d failed\n", pdev->irq);
+ goto err_ret_tzone;
+ }
}
ret = thermal_zone_device_enable(pci_info->tzone);
@@ -351,9 +419,6 @@ static void proc_thermal_pci_remove(struct pci_dev *pdev)
proc_thermal_mmio_write(pci_info, PROC_THERMAL_MMIO_THRES_0, 0);
proc_thermal_mmio_write(pci_info, PROC_THERMAL_MMIO_INT_ENABLE_0, 0);
- devm_free_irq(&pdev->dev, pdev->irq, pci_info);
- pci_free_irq_vectors(pdev);
-
thermal_zone_device_unregister(pci_info->tzone);
proc_thermal_mmio_remove(pdev, pci_info->proc_priv);
if (!pci_info->no_legacy)
@@ -407,7 +472,9 @@ static SIMPLE_DEV_PM_OPS(proc_thermal_pci_pm, proc_thermal_pci_suspend,
static const struct pci_device_id proc_thermal_pci_ids[] = {
{ PCI_DEVICE_DATA(INTEL, ADL_THERMAL, PROC_THERMAL_FEATURE_RAPL |
PROC_THERMAL_FEATURE_FIVR | PROC_THERMAL_FEATURE_DVFS | PROC_THERMAL_FEATURE_WT_REQ) },
- { PCI_DEVICE_DATA(INTEL, LNLM_THERMAL, PROC_THERMAL_FEATURE_RAPL) },
+ { PCI_DEVICE_DATA(INTEL, LNLM_THERMAL, PROC_THERMAL_FEATURE_MSI_SUPPORT |
+ PROC_THERMAL_FEATURE_RAPL | PROC_THERMAL_FEATURE_DLVR |
+ PROC_THERMAL_FEATURE_WT_HINT | PROC_THERMAL_FEATURE_POWER_FLOOR) },
{ PCI_DEVICE_DATA(INTEL, MTLP_THERMAL, PROC_THERMAL_FEATURE_RAPL |
PROC_THERMAL_FEATURE_FIVR | PROC_THERMAL_FEATURE_DVFS | PROC_THERMAL_FEATURE_DLVR |
PROC_THERMAL_FEATURE_WT_HINT | PROC_THERMAL_FEATURE_POWER_FLOOR) },
diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c
index e56db75a94fb..0e2dc1426282 100644
--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c
+++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c
@@ -19,6 +19,12 @@ struct mmio_reg {
u16 shift;
};
+struct mapping_table {
+ const char *attr_name;
+ const u32 value;
+ const char *mapped_str;
+};
+
/* These will represent sysfs attribute names */
static const char * const fivr_strings[] = {
"vco_ref_code_lo",
@@ -62,6 +68,78 @@ static const struct mmio_reg dlvr_mmio_regs[] = {
{ 1, 0x15A10, 1, 0x1, 16}, /* dlvr_pll_busy */
};
+static const struct mmio_reg lnl_dlvr_mmio_regs[] = {
+ { 0, 0x5A08, 5, 0x1F, 0}, /* dlvr_spread_spectrum_pct */
+ { 0, 0x5A08, 1, 0x1, 5}, /* dlvr_control_mode */
+ { 0, 0x5A08, 1, 0x1, 6}, /* dlvr_control_lock */
+ { 0, 0x5A08, 1, 0x1, 7}, /* dlvr_rfim_enable */
+ { 0, 0x5A08, 2, 0x3, 8}, /* dlvr_freq_select */
+ { 1, 0x5A10, 2, 0x3, 30}, /* dlvr_hardware_rev */
+ { 1, 0x5A10, 2, 0x3, 0}, /* dlvr_freq_mhz */
+ { 1, 0x5A10, 1, 0x1, 23}, /* dlvr_pll_busy */
+};
+
+static const struct mapping_table lnl_dlvr_mapping[] = {
+ {"dlvr_freq_select", 0, "2227.2"},
+ {"dlvr_freq_select", 1, "2140"},
+ {"dlvr_freq_mhz", 0, "2227.2"},
+ {"dlvr_freq_mhz", 1, "2140"},
+ {NULL, 0, NULL},
+};
+
+static int match_mapping_table(const struct mapping_table *table, const char *attr_name,
+ bool match_int_value, const u32 value, const char *value_str,
+ char **result_str, u32 *result_int)
+{
+ bool attr_matched = false;
+ int i = 0;
+
+ if (!table)
+ return -EOPNOTSUPP;
+
+ while (table[i].attr_name) {
+ if (strncmp(table[i].attr_name, attr_name, strlen(attr_name)))
+ goto match_next;
+
+ attr_matched = true;
+
+ if (match_int_value) {
+ if (table[i].value != value)
+ goto match_next;
+
+ *result_str = (char *)table[i].mapped_str;
+ return 0;
+ }
+
+ if (strncmp(table[i].mapped_str, value_str, strlen(table[i].mapped_str)))
+ goto match_next;
+
+ *result_int = table[i].value;
+
+ return 0;
+match_next:
+ i++;
+ }
+
+ /* If attribute name is matched, then the user space value is invalid */
+ if (attr_matched)
+ return -EINVAL;
+
+ return -EOPNOTSUPP;
+}
+
+static int get_mapped_string(const struct mapping_table *table, const char *attr_name,
+ u32 value, char **result)
+{
+ return match_mapping_table(table, attr_name, true, value, NULL, result, NULL);
+}
+
+static int get_mapped_value(const struct mapping_table *table, const char *attr_name,
+ const char *value, unsigned int *result)
+{
+ return match_mapping_table(table, attr_name, false, 0, value, NULL, result);
+}
+
/* These will represent sysfs attribute names */
static const char * const dvfs_strings[] = {
"rfi_restriction_run_busy",
@@ -93,12 +171,14 @@ static ssize_t suffix##_show(struct device *dev,\
struct device_attribute *attr,\
char *buf)\
{\
+ const struct mapping_table *mapping = NULL;\
struct proc_thermal_device *proc_priv;\
struct pci_dev *pdev = to_pci_dev(dev);\
const struct mmio_reg *mmio_regs;\
const char **match_strs;\
+ int ret, err;\
u32 reg_val;\
- int ret;\
+ char *str;\
\
proc_priv = pci_get_drvdata(pdev);\
if (table == 1) {\
@@ -106,7 +186,12 @@ static ssize_t suffix##_show(struct device *dev,\
mmio_regs = adl_dvfs_mmio_regs;\
} else if (table == 2) { \
match_strs = (const char **)dlvr_strings;\
- mmio_regs = dlvr_mmio_regs;\
+ if (pdev->device == PCI_DEVICE_ID_INTEL_LNLM_THERMAL) {\
+ mmio_regs = lnl_dlvr_mmio_regs;\
+ mapping = lnl_dlvr_mapping;\
+ } else {\
+ mmio_regs = dlvr_mmio_regs;\
+ } \
} else {\
match_strs = (const char **)fivr_strings;\
mmio_regs = tgl_fivr_mmio_regs;\
@@ -116,7 +201,12 @@ static ssize_t suffix##_show(struct device *dev,\
return ret;\
reg_val = readl((void __iomem *) (proc_priv->mmio_base + mmio_regs[ret].offset));\
ret = (reg_val >> mmio_regs[ret].shift) & mmio_regs[ret].mask;\
- return sprintf(buf, "%u\n", ret);\
+ err = get_mapped_string(mapping, attr->attr.name, ret, &str);\
+ if (!err)\
+ return sprintf(buf, "%s\n", str);\
+ if (err == -EOPNOTSUPP)\
+ return sprintf(buf, "%u\n", ret);\
+ return err;\
}
#define RFIM_STORE(suffix, table)\
@@ -124,6 +214,7 @@ static ssize_t suffix##_store(struct device *dev,\
struct device_attribute *attr,\
const char *buf, size_t count)\
{\
+ const struct mapping_table *mapping = NULL;\
struct proc_thermal_device *proc_priv;\
struct pci_dev *pdev = to_pci_dev(dev);\
unsigned int input;\
@@ -139,7 +230,12 @@ static ssize_t suffix##_store(struct device *dev,\
mmio_regs = adl_dvfs_mmio_regs;\
} else if (table == 2) { \
match_strs = (const char **)dlvr_strings;\
- mmio_regs = dlvr_mmio_regs;\
+ if (pdev->device == PCI_DEVICE_ID_INTEL_LNLM_THERMAL) {\
+ mmio_regs = lnl_dlvr_mmio_regs;\
+ mapping = lnl_dlvr_mapping;\
+ } else {\
+ mmio_regs = dlvr_mmio_regs;\
+ } \
} else {\
match_strs = (const char **)fivr_strings;\
mmio_regs = tgl_fivr_mmio_regs;\
@@ -150,9 +246,14 @@ static ssize_t suffix##_store(struct device *dev,\
return ret;\
if (mmio_regs[ret].read_only)\
return -EPERM;\
- err = kstrtouint(buf, 10, &input);\
- if (err)\
+ err = get_mapped_value(mapping, attr->attr.name, buf, &input);\
+ if (err == -EINVAL)\
return err;\
+ if (err == -EOPNOTSUPP) {\
+ err = kstrtouint(buf, 10, &input);\
+ if (err)\
+ return err;\
+ } \
mask = GENMASK(mmio_regs[ret].shift + mmio_regs[ret].bits - 1, mmio_regs[ret].shift);\
reg_val = readl((void __iomem *) (proc_priv->mmio_base + mmio_regs[ret].offset));\
reg_val &= ~mask;\
diff --git a/drivers/thermal/intel/intel_hfi.c b/drivers/thermal/intel/intel_hfi.c
index a180a98bb9f1..5b18a46a10b0 100644
--- a/drivers/thermal/intel/intel_hfi.c
+++ b/drivers/thermal/intel/intel_hfi.c
@@ -401,10 +401,10 @@ static void hfi_disable(void)
* intel_hfi_online() - Enable HFI on @cpu
* @cpu: CPU in which the HFI will be enabled
*
- * Enable the HFI to be used in @cpu. The HFI is enabled at the die/package
- * level. The first CPU in the die/package to come online does the full HFI
+ * Enable the HFI to be used in @cpu. The HFI is enabled at the package
+ * level. The first CPU in the package to come online does the full HFI
* initialization. Subsequent CPUs will just link themselves to the HFI
- * instance of their die/package.
+ * instance of their package.
*
* This function is called before enabling the thermal vector in the local APIC
* in order to ensure that @cpu has an associated HFI instance when it receives
@@ -414,31 +414,31 @@ void intel_hfi_online(unsigned int cpu)
{
struct hfi_instance *hfi_instance;
struct hfi_cpu_info *info;
- u16 die_id;
+ u16 pkg_id;
/* Nothing to do if hfi_instances are missing. */
if (!hfi_instances)
return;
/*
- * Link @cpu to the HFI instance of its package/die. It does not
+ * Link @cpu to the HFI instance of its package. It does not
* matter whether the instance has been initialized.
*/
info = &per_cpu(hfi_cpu_info, cpu);
- die_id = topology_logical_die_id(cpu);
+ pkg_id = topology_logical_package_id(cpu);
hfi_instance = info->hfi_instance;
if (!hfi_instance) {
- if (die_id >= max_hfi_instances)
+ if (pkg_id >= max_hfi_instances)
return;
- hfi_instance = &hfi_instances[die_id];
+ hfi_instance = &hfi_instances[pkg_id];
info->hfi_instance = hfi_instance;
}
init_hfi_cpu_index(info);
/*
- * Now check if the HFI instance of the package/die of @cpu has been
+ * Now check if the HFI instance of the package of @cpu has been
* initialized (by checking its header). In such case, all we have to
* do is to add @cpu to this instance's cpumask and enable the instance
* if needed.
@@ -504,7 +504,7 @@ free_hw_table:
*
* On some processors, hardware remembers previous programming settings even
* after being reprogrammed. Thus, keep HFI enabled even if all CPUs in the
- * die/package of @cpu are offline. See note in intel_hfi_online().
+ * package of @cpu are offline. See note in intel_hfi_online().
*/
void intel_hfi_offline(unsigned int cpu)
{
@@ -674,9 +674,13 @@ void __init intel_hfi_init(void)
if (hfi_parse_features())
return;
- /* There is one HFI instance per die/package. */
- max_hfi_instances = topology_max_packages() *
- topology_max_dies_per_package();
+ /*
+ * Note: HFI resources are managed at the physical package scope.
+ * There could be platforms that enumerate packages as Linux dies.
+ * Special handling would be needed if this happens on an HFI-capable
+ * platform.
+ */
+ max_hfi_instances = topology_max_packages();
/*
* This allocation may fail. CPU hotplug callbacks must check
diff --git a/drivers/thermal/intel/intel_pch_thermal.c b/drivers/thermal/intel/intel_pch_thermal.c
index f5be2c389351..fc326985796c 100644
--- a/drivers/thermal/intel/intel_pch_thermal.c
+++ b/drivers/thermal/intel/intel_pch_thermal.c
@@ -298,6 +298,11 @@ static int intel_pch_thermal_suspend_noirq(struct device *device)
/* Get the PCH current temperature value */
pch_cur_temp = GET_PCH_TEMP(WPT_TEMP_TSR & readw(ptd->hw_base + WPT_TEMP));
+ if (pch_cur_temp >= pch_thr_temp)
+ dev_warn(&ptd->pdev->dev,
+ "CPU-PCH current temp [%dC] higher than the threshold temp [%dC], S0ix might fail. Start cooling...\n",
+ pch_cur_temp, pch_thr_temp);
+
/*
* If current PCH temperature is higher than configured PCH threshold
* value, run some delay loop with sleep to let the current temperature
diff --git a/drivers/thermal/intel/intel_quark_dts_thermal.c b/drivers/thermal/intel/intel_quark_dts_thermal.c
index ec6ad26027bc..47296a14db3c 100644
--- a/drivers/thermal/intel/intel_quark_dts_thermal.c
+++ b/drivers/thermal/intel/intel_quark_dts_thermal.c
@@ -195,7 +195,7 @@ static int get_trip_temp(int trip)
}
static int update_trip_temp(struct soc_sensor_entry *aux_entry,
- int trip, int temp)
+ int trip_index, int temp)
{
u32 out;
u32 temp_out;
@@ -230,9 +230,9 @@ static int update_trip_temp(struct soc_sensor_entry *aux_entry,
*/
temp_out = temp + QRK_DTS_TEMP_BASE;
out = (store_ptps & ~(QRK_DTS_MASK_TP_THRES <<
- (trip * QRK_DTS_SHIFT_TP)));
+ (trip_index * QRK_DTS_SHIFT_TP)));
out |= (temp_out & QRK_DTS_MASK_TP_THRES) <<
- (trip * QRK_DTS_SHIFT_TP);
+ (trip_index * QRK_DTS_SHIFT_TP);
ret = iosf_mbi_write(QRK_MBI_UNIT_RMU, MBI_REG_WRITE,
QRK_DTS_REG_OFFSET_PTPS, out);
@@ -242,10 +242,26 @@ failed:
return ret;
}
-static inline int sys_set_trip_temp(struct thermal_zone_device *tzd, int trip,
- int temp)
+static inline int sys_set_trip_temp(struct thermal_zone_device *tzd,
+ const struct thermal_trip *trip,
+ int temp)
{
- return update_trip_temp(thermal_zone_device_priv(tzd), trip, temp);
+ unsigned int trip_index;
+
+ switch (trip->type) {
+ case THERMAL_TRIP_HOT:
+ trip_index = QRK_DTS_ID_TP_HOT;
+ break;
+
+ case THERMAL_TRIP_CRITICAL:
+ trip_index = QRK_DTS_ID_TP_CRITICAL;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return update_trip_temp(thermal_zone_device_priv(tzd), trip_index, temp);
}
static int sys_get_curr_temp(struct thermal_zone_device *tzd,
diff --git a/drivers/thermal/intel/intel_soc_dts_iosf.c b/drivers/thermal/intel/intel_soc_dts_iosf.c
index 7adf942665d4..43a29551ba17 100644
--- a/drivers/thermal/intel/intel_soc_dts_iosf.c
+++ b/drivers/thermal/intel/intel_soc_dts_iosf.c
@@ -129,18 +129,20 @@ err_restore_ptps:
return status;
}
-static int sys_set_trip_temp(struct thermal_zone_device *tzd, int trip,
+static int sys_set_trip_temp(struct thermal_zone_device *tzd,
+ const struct thermal_trip *trip,
int temp)
{
struct intel_soc_dts_sensor_entry *dts = thermal_zone_device_priv(tzd);
struct intel_soc_dts_sensors *sensors = dts->sensors;
+ unsigned int trip_index = THERMAL_TRIP_PRIV_TO_INT(trip->priv);
int status;
if (temp > sensors->tj_max)
return -EINVAL;
mutex_lock(&sensors->dts_update_lock);
- status = update_trip_temp(sensors, trip, temp);
+ status = update_trip_temp(sensors, trip_index, temp);
mutex_unlock(&sensors->dts_update_lock);
return status;
@@ -293,11 +295,12 @@ static void dts_trips_reset(struct intel_soc_dts_sensors *sensors, int dts_index
}
static void set_trip(struct thermal_trip *trip, enum thermal_trip_type type,
- u8 flags, int temp)
+ u8 flags, int temp, unsigned int index)
{
trip->type = type;
trip->flags = flags;
trip->temperature = temp;
+ trip->priv = THERMAL_INT_TO_TRIP_PRIV(index);
}
struct intel_soc_dts_sensors *
@@ -332,7 +335,7 @@ intel_soc_dts_iosf_init(enum intel_soc_dts_interrupt_type intr_type,
sensors->soc_dts[i].sensors = sensors;
set_trip(&trips[i][0], THERMAL_TRIP_PASSIVE,
- THERMAL_TRIP_FLAG_RW_TEMP, 0);
+ THERMAL_TRIP_FLAG_RW_TEMP, 0, 0);
ret = update_trip_temp(sensors, 0, 0);
if (ret)
@@ -340,10 +343,10 @@ intel_soc_dts_iosf_init(enum intel_soc_dts_interrupt_type intr_type,
if (critical_trip) {
temp = sensors->tj_max - crit_offset;
- set_trip(&trips[i][1], THERMAL_TRIP_CRITICAL, 0, temp);
+ set_trip(&trips[i][1], THERMAL_TRIP_CRITICAL, 0, temp, 1);
} else {
set_trip(&trips[i][1], THERMAL_TRIP_PASSIVE,
- THERMAL_TRIP_FLAG_RW_TEMP, 0);
+ THERMAL_TRIP_FLAG_RW_TEMP, 0, 1);
temp = 0;
}
diff --git a/drivers/thermal/intel/intel_soc_dts_thermal.c b/drivers/thermal/intel/intel_soc_dts_thermal.c
index 9c825c6e1f38..718c6326eaf4 100644
--- a/drivers/thermal/intel/intel_soc_dts_thermal.c
+++ b/drivers/thermal/intel/intel_soc_dts_thermal.c
@@ -36,7 +36,7 @@ static irqreturn_t soc_irq_thread_fn(int irq, void *dev_data)
}
static const struct x86_cpu_id soc_thermal_ids[] = {
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT, BYT_SOC_DTS_APIC_IRQ),
+ X86_MATCH_VFM(INTEL_ATOM_SILVERMONT, BYT_SOC_DTS_APIC_IRQ),
{}
};
MODULE_DEVICE_TABLE(x86cpu, soc_thermal_ids);
diff --git a/drivers/thermal/intel/intel_tcc.c b/drivers/thermal/intel/intel_tcc.c
index 5e8b7f34b395..c86654f28aa5 100644
--- a/drivers/thermal/intel/intel_tcc.c
+++ b/drivers/thermal/intel/intel_tcc.c
@@ -6,9 +6,171 @@
#include <linux/errno.h>
#include <linux/intel_tcc.h>
+#include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
#include <asm/msr.h>
/**
+ * struct temp_masks - Bitmasks for temperature readings
+ * @tcc_offset: TCC offset in MSR_TEMPERATURE_TARGET
+ * @digital_readout: Digital readout in MSR_IA32_THERM_STATUS
+ * @pkg_digital_readout: Digital readout in MSR_IA32_PACKAGE_THERM_STATUS
+ *
+ * Bitmasks to extract the fields of the MSR_TEMPERATURE and IA32_[PACKAGE]_
+ * THERM_STATUS registers for different processor models.
+ *
+ * The bitmask of TjMax is not included in this structure. It is always 0xff.
+ */
+struct temp_masks {
+ u32 tcc_offset;
+ u32 digital_readout;
+ u32 pkg_digital_readout;
+};
+
+#define TCC_MODEL_TEMP_MASKS(model, _tcc_offset, _digital_readout, \
+ _pkg_digital_readout) \
+ static const struct temp_masks temp_##model __initconst = { \
+ .tcc_offset = _tcc_offset, \
+ .digital_readout = _digital_readout, \
+ .pkg_digital_readout = _pkg_digital_readout \
+ }
+
+TCC_MODEL_TEMP_MASKS(nehalem, 0, 0x7f, 0x7f);
+TCC_MODEL_TEMP_MASKS(haswell_x, 0xf, 0x7f, 0x7f);
+TCC_MODEL_TEMP_MASKS(broadwell, 0x3f, 0x7f, 0x7f);
+TCC_MODEL_TEMP_MASKS(goldmont, 0x7f, 0x7f, 0x7f);
+TCC_MODEL_TEMP_MASKS(tigerlake, 0x3f, 0xff, 0xff);
+TCC_MODEL_TEMP_MASKS(sapphirerapids, 0x3f, 0x7f, 0xff);
+
+/* Use these masks for processors not included in @tcc_cpu_ids. */
+static struct temp_masks intel_tcc_temp_masks __ro_after_init = {
+ .tcc_offset = 0x7f,
+ .digital_readout = 0xff,
+ .pkg_digital_readout = 0xff,
+};
+
+static const struct x86_cpu_id intel_tcc_cpu_ids[] __initconst = {
+ X86_MATCH_VFM(INTEL_CORE_YONAH, &temp_nehalem),
+ X86_MATCH_VFM(INTEL_CORE2_MEROM, &temp_nehalem),
+ X86_MATCH_VFM(INTEL_CORE2_MEROM_L, &temp_nehalem),
+ X86_MATCH_VFM(INTEL_CORE2_PENRYN, &temp_nehalem),
+ X86_MATCH_VFM(INTEL_CORE2_DUNNINGTON, &temp_nehalem),
+ X86_MATCH_VFM(INTEL_NEHALEM, &temp_nehalem),
+ X86_MATCH_VFM(INTEL_NEHALEM_G, &temp_nehalem),
+ X86_MATCH_VFM(INTEL_NEHALEM_EP, &temp_nehalem),
+ X86_MATCH_VFM(INTEL_NEHALEM_EX, &temp_nehalem),
+ X86_MATCH_VFM(INTEL_WESTMERE, &temp_nehalem),
+ X86_MATCH_VFM(INTEL_WESTMERE_EP, &temp_nehalem),
+ X86_MATCH_VFM(INTEL_WESTMERE_EX, &temp_nehalem),
+ X86_MATCH_VFM(INTEL_SANDYBRIDGE, &temp_nehalem),
+ X86_MATCH_VFM(INTEL_SANDYBRIDGE_X, &temp_nehalem),
+ X86_MATCH_VFM(INTEL_IVYBRIDGE, &temp_nehalem),
+ X86_MATCH_VFM(INTEL_IVYBRIDGE_X, &temp_haswell_x),
+ X86_MATCH_VFM(INTEL_HASWELL, &temp_nehalem),
+ X86_MATCH_VFM(INTEL_HASWELL_X, &temp_haswell_x),
+ X86_MATCH_VFM(INTEL_HASWELL_L, &temp_nehalem),
+ X86_MATCH_VFM(INTEL_HASWELL_G, &temp_nehalem),
+ X86_MATCH_VFM(INTEL_BROADWELL, &temp_broadwell),
+ X86_MATCH_VFM(INTEL_BROADWELL_G, &temp_broadwell),
+ X86_MATCH_VFM(INTEL_BROADWELL_X, &temp_haswell_x),
+ X86_MATCH_VFM(INTEL_BROADWELL_D, &temp_haswell_x),
+ X86_MATCH_VFM(INTEL_SKYLAKE_L, &temp_broadwell),
+ X86_MATCH_VFM(INTEL_SKYLAKE, &temp_broadwell),
+ X86_MATCH_VFM(INTEL_SKYLAKE_X, &temp_haswell_x),
+ X86_MATCH_VFM(INTEL_KABYLAKE_L, &temp_broadwell),
+ X86_MATCH_VFM(INTEL_KABYLAKE, &temp_broadwell),
+ X86_MATCH_VFM(INTEL_COMETLAKE, &temp_broadwell),
+ X86_MATCH_VFM(INTEL_COMETLAKE_L, &temp_broadwell),
+ X86_MATCH_VFM(INTEL_CANNONLAKE_L, &temp_broadwell),
+ X86_MATCH_VFM(INTEL_ICELAKE_X, &temp_broadwell),
+ X86_MATCH_VFM(INTEL_ICELAKE_D, &temp_broadwell),
+ X86_MATCH_VFM(INTEL_ICELAKE, &temp_broadwell),
+ X86_MATCH_VFM(INTEL_ICELAKE_L, &temp_broadwell),
+ X86_MATCH_VFM(INTEL_ICELAKE_NNPI, &temp_broadwell),
+ X86_MATCH_VFM(INTEL_ROCKETLAKE, &temp_broadwell),
+ X86_MATCH_VFM(INTEL_TIGERLAKE_L, &temp_tigerlake),
+ X86_MATCH_VFM(INTEL_TIGERLAKE, &temp_tigerlake),
+ X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, &temp_sapphirerapids),
+ X86_MATCH_VFM(INTEL_EMERALDRAPIDS_X, &temp_sapphirerapids),
+ X86_MATCH_VFM(INTEL_LAKEFIELD, &temp_broadwell),
+ X86_MATCH_VFM(INTEL_ALDERLAKE, &temp_tigerlake),
+ X86_MATCH_VFM(INTEL_ALDERLAKE_L, &temp_tigerlake),
+ X86_MATCH_VFM(INTEL_RAPTORLAKE, &temp_tigerlake),
+ X86_MATCH_VFM(INTEL_RAPTORLAKE_P, &temp_tigerlake),
+ X86_MATCH_VFM(INTEL_RAPTORLAKE_S, &temp_tigerlake),
+ X86_MATCH_VFM(INTEL_ATOM_BONNELL, &temp_nehalem),
+ X86_MATCH_VFM(INTEL_ATOM_BONNELL_MID, &temp_nehalem),
+ X86_MATCH_VFM(INTEL_ATOM_SALTWELL, &temp_nehalem),
+ X86_MATCH_VFM(INTEL_ATOM_SALTWELL_MID, &temp_nehalem),
+ X86_MATCH_VFM(INTEL_ATOM_SILVERMONT, &temp_broadwell),
+ X86_MATCH_VFM(INTEL_ATOM_SILVERMONT_D, &temp_broadwell),
+ X86_MATCH_VFM(INTEL_ATOM_SILVERMONT_MID, &temp_broadwell),
+ X86_MATCH_VFM(INTEL_ATOM_AIRMONT, &temp_broadwell),
+ X86_MATCH_VFM(INTEL_ATOM_AIRMONT_MID, &temp_broadwell),
+ X86_MATCH_VFM(INTEL_ATOM_AIRMONT_NP, &temp_broadwell),
+ X86_MATCH_VFM(INTEL_ATOM_GOLDMONT, &temp_goldmont),
+ X86_MATCH_VFM(INTEL_ATOM_GOLDMONT_D, &temp_goldmont),
+ X86_MATCH_VFM(INTEL_ATOM_GOLDMONT_PLUS, &temp_goldmont),
+ X86_MATCH_VFM(INTEL_ATOM_TREMONT_D, &temp_broadwell),
+ X86_MATCH_VFM(INTEL_ATOM_TREMONT, &temp_broadwell),
+ X86_MATCH_VFM(INTEL_ATOM_TREMONT_L, &temp_broadwell),
+ X86_MATCH_VFM(INTEL_ATOM_GRACEMONT, &temp_tigerlake),
+ X86_MATCH_VFM(INTEL_XEON_PHI_KNL, &temp_broadwell),
+ X86_MATCH_VFM(INTEL_XEON_PHI_KNM, &temp_broadwell),
+ {}
+};
+
+static int __init intel_tcc_init(void)
+{
+ const struct x86_cpu_id *id;
+
+ id = x86_match_cpu(intel_tcc_cpu_ids);
+ if (id)
+ memcpy(&intel_tcc_temp_masks, (const void *)id->driver_data,
+ sizeof(intel_tcc_temp_masks));
+
+ return 0;
+}
+/*
+ * Use subsys_initcall to ensure temperature bitmasks are initialized before
+ * the drivers that use this library.
+ */
+subsys_initcall(intel_tcc_init);
+
+/**
+ * intel_tcc_get_offset_mask() - Returns the bitmask to read TCC offset
+ *
+ * Get the model-specific bitmask to extract TCC_OFFSET from the MSR
+ * TEMPERATURE_TARGET register. If the mask is 0, it means the processor does
+ * not support TCC offset.
+ *
+ * Return: The model-specific bitmask for TCC offset.
+ */
+u32 intel_tcc_get_offset_mask(void)
+{
+ return intel_tcc_temp_masks.tcc_offset;
+}
+EXPORT_SYMBOL_NS(intel_tcc_get_offset_mask, INTEL_TCC);
+
+/**
+ * get_temp_mask() - Returns the model-specific bitmask for temperature
+ *
+ * @pkg: true: Package Thermal Sensor. false: Core Thermal Sensor.
+ *
+ * Get the model-specific bitmask to extract the temperature reading from the
+ * MSR_IA32_[PACKAGE]_THERM_STATUS register.
+ *
+ * Callers must check if the thermal status registers are supported.
+ *
+ * Return: The model-specific bitmask for temperature reading
+ */
+static u32 get_temp_mask(bool pkg)
+{
+ return pkg ? intel_tcc_temp_masks.pkg_digital_readout :
+ intel_tcc_temp_masks.digital_readout;
+}
+
+/**
* intel_tcc_get_tjmax() - returns the default TCC activation Temperature
* @cpu: cpu that the MSR should be run on, nagative value means any cpu.
*
@@ -56,7 +218,7 @@ int intel_tcc_get_offset(int cpu)
if (err)
return err;
- return (low >> 24) & 0x3f;
+ return (low >> 24) & intel_tcc_temp_masks.tcc_offset;
}
EXPORT_SYMBOL_NS_GPL(intel_tcc_get_offset, INTEL_TCC);
@@ -76,7 +238,10 @@ int intel_tcc_set_offset(int cpu, int offset)
u32 low, high;
int err;
- if (offset < 0 || offset > 0x3f)
+ if (!intel_tcc_temp_masks.tcc_offset)
+ return -ENODEV;
+
+ if (offset < 0 || offset > intel_tcc_temp_masks.tcc_offset)
return -EINVAL;
if (cpu < 0)
@@ -90,7 +255,7 @@ int intel_tcc_set_offset(int cpu, int offset)
if (low & BIT(31))
return -EPERM;
- low &= ~(0x3f << 24);
+ low &= ~(intel_tcc_temp_masks.tcc_offset << 24);
low |= offset << 24;
if (cpu < 0)
@@ -113,8 +278,8 @@ EXPORT_SYMBOL_NS_GPL(intel_tcc_set_offset, INTEL_TCC);
*/
int intel_tcc_get_temp(int cpu, int *temp, bool pkg)
{
- u32 low, high;
u32 msr = pkg ? MSR_IA32_PACKAGE_THERM_STATUS : MSR_IA32_THERM_STATUS;
+ u32 low, high, mask;
int tjmax, err;
tjmax = intel_tcc_get_tjmax(cpu);
@@ -132,7 +297,9 @@ int intel_tcc_get_temp(int cpu, int *temp, bool pkg)
if (!(low & BIT(31)))
return -ENODATA;
- *temp = tjmax - ((low >> 16) & 0x7f);
+ mask = get_temp_mask(pkg);
+
+ *temp = tjmax - ((low >> 16) & mask);
return 0;
}
diff --git a/drivers/thermal/intel/intel_tcc_cooling.c b/drivers/thermal/intel/intel_tcc_cooling.c
index 6c392147e6d1..17110ffa80bb 100644
--- a/drivers/thermal/intel/intel_tcc_cooling.c
+++ b/drivers/thermal/intel/intel_tcc_cooling.c
@@ -20,7 +20,7 @@ static struct thermal_cooling_device *tcc_cdev;
static int tcc_get_max_state(struct thermal_cooling_device *cdev, unsigned long
*state)
{
- *state = 0x3f;
+ *state = intel_tcc_get_offset_mask();
return 0;
}
@@ -49,21 +49,21 @@ static const struct thermal_cooling_device_ops tcc_cooling_ops = {
};
static const struct x86_cpu_id tcc_ids[] __initconst = {
- X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_L, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE_L, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(ICELAKE, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_GRACEMONT, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_S, NULL),
+ X86_MATCH_VFM(INTEL_SKYLAKE, NULL),
+ X86_MATCH_VFM(INTEL_SKYLAKE_L, NULL),
+ X86_MATCH_VFM(INTEL_KABYLAKE, NULL),
+ X86_MATCH_VFM(INTEL_KABYLAKE_L, NULL),
+ X86_MATCH_VFM(INTEL_ICELAKE, NULL),
+ X86_MATCH_VFM(INTEL_ICELAKE_L, NULL),
+ X86_MATCH_VFM(INTEL_TIGERLAKE, NULL),
+ X86_MATCH_VFM(INTEL_TIGERLAKE_L, NULL),
+ X86_MATCH_VFM(INTEL_COMETLAKE, NULL),
+ X86_MATCH_VFM(INTEL_ALDERLAKE, NULL),
+ X86_MATCH_VFM(INTEL_ALDERLAKE_L, NULL),
+ X86_MATCH_VFM(INTEL_ATOM_GRACEMONT, NULL),
+ X86_MATCH_VFM(INTEL_RAPTORLAKE, NULL),
+ X86_MATCH_VFM(INTEL_RAPTORLAKE_P, NULL),
+ X86_MATCH_VFM(INTEL_RAPTORLAKE_S, NULL),
{}
};
diff --git a/drivers/thermal/intel/x86_pkg_temp_thermal.c b/drivers/thermal/intel/x86_pkg_temp_thermal.c
index c0ca8e3ff2e7..65b33b56a9be 100644
--- a/drivers/thermal/intel/x86_pkg_temp_thermal.c
+++ b/drivers/thermal/intel/x86_pkg_temp_thermal.c
@@ -119,9 +119,11 @@ static int sys_get_curr_temp(struct thermal_zone_device *tzd, int *temp)
}
static int
-sys_set_trip_temp(struct thermal_zone_device *tzd, int trip, int temp)
+sys_set_trip_temp(struct thermal_zone_device *tzd,
+ const struct thermal_trip *trip, int temp)
{
struct zone_device *zonedev = thermal_zone_device_priv(tzd);
+ unsigned int trip_index = THERMAL_TRIP_PRIV_TO_INT(trip->priv);
u32 l, h, mask, shift, intr;
int tj_max, val, ret;
@@ -132,7 +134,7 @@ sys_set_trip_temp(struct thermal_zone_device *tzd, int trip, int temp)
val = (tj_max - temp)/1000;
- if (trip >= MAX_NUMBER_OF_TRIPS || val < 0 || val > 0x7f)
+ if (trip_index >= MAX_NUMBER_OF_TRIPS || val < 0 || val > 0x7f)
return -EINVAL;
ret = rdmsr_on_cpu(zonedev->cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT,
@@ -140,7 +142,7 @@ sys_set_trip_temp(struct thermal_zone_device *tzd, int trip, int temp)
if (ret < 0)
return ret;
- if (trip) {
+ if (trip_index) {
mask = THERM_MASK_THRESHOLD1;
shift = THERM_SHIFT_THRESHOLD1;
intr = THERM_INT_THRESHOLD1_ENABLE;
@@ -296,6 +298,7 @@ static int pkg_temp_thermal_trips_init(int cpu, int tj_max,
trips[i].type = THERMAL_TRIP_PASSIVE;
trips[i].flags |= THERMAL_TRIP_FLAG_RW_TEMP;
+ trips[i].priv = THERMAL_INT_TO_TRIP_PRIV(i);
pr_debug("%s: cpu=%d, trip=%d, temp=%d\n",
__func__, cpu, i, trips[i].temperature);
diff --git a/drivers/thermal/k3_j72xx_bandgap.c b/drivers/thermal/k3_j72xx_bandgap.c
index c74094a86982..9bc279ac131a 100644
--- a/drivers/thermal/k3_j72xx_bandgap.c
+++ b/drivers/thermal/k3_j72xx_bandgap.c
@@ -178,6 +178,7 @@ struct k3_j72xx_bandgap {
void __iomem *base;
void __iomem *cfg2_base;
struct k3_thermal_data *ts_data[K3_VTM_MAX_NUM_TS];
+ int cnt;
};
/* common data structures */
@@ -338,24 +339,52 @@ static void print_look_up_table(struct device *dev, int *ref_table)
dev_dbg(dev, "%d %d %d\n", i, derived_table[i], ref_table[i]);
}
+static void k3_j72xx_bandgap_init_hw(struct k3_j72xx_bandgap *bgp)
+{
+ struct k3_thermal_data *data;
+ int id, high_max, low_temp;
+ u32 val;
+
+ for (id = 0; id < bgp->cnt; id++) {
+ data = bgp->ts_data[id];
+ val = readl(bgp->cfg2_base + data->ctrl_offset);
+ val |= (K3_VTM_TMPSENS_CTRL_MAXT_OUTRG_EN |
+ K3_VTM_TMPSENS_CTRL_SOC |
+ K3_VTM_TMPSENS_CTRL_CLRZ | BIT(4));
+ writel(val, bgp->cfg2_base + data->ctrl_offset);
+ }
+
+ /*
+ * Program TSHUT thresholds
+ * Step 1: set the thresholds to ~123C and 105C WKUP_VTM_MISC_CTRL2
+ * Step 2: WKUP_VTM_TMPSENS_CTRL_j set the MAXT_OUTRG_EN bit
+ * This is already taken care as per of init
+ * Step 3: WKUP_VTM_MISC_CTRL set the ANYMAXT_OUTRG_ALERT_EN bit
+ */
+ high_max = k3_j72xx_bandgap_temp_to_adc_code(MAX_TEMP);
+ low_temp = k3_j72xx_bandgap_temp_to_adc_code(COOL_DOWN_TEMP);
+
+ writel((low_temp << 16) | high_max, bgp->cfg2_base + K3_VTM_MISC_CTRL2_OFFSET);
+ writel(K3_VTM_ANYMAXT_OUTRG_ALERT_EN, bgp->cfg2_base + K3_VTM_MISC_CTRL_OFFSET);
+}
+
struct k3_j72xx_bandgap_data {
const bool has_errata_i2128;
};
static int k3_j72xx_bandgap_probe(struct platform_device *pdev)
{
- int ret = 0, cnt, val, id;
- int high_max, low_temp;
- struct resource *res;
+ const struct k3_j72xx_bandgap_data *driver_data;
+ struct thermal_zone_device *ti_thermal;
struct device *dev = &pdev->dev;
+ bool workaround_needed = false;
struct k3_j72xx_bandgap *bgp;
struct k3_thermal_data *data;
- bool workaround_needed = false;
- const struct k3_j72xx_bandgap_data *driver_data;
- struct thermal_zone_device *ti_thermal;
- int *ref_table;
struct err_values err_vals;
void __iomem *fuse_base;
+ int ret = 0, val, id;
+ struct resource *res;
+ int *ref_table;
const s64 golden_factors[] = {
-490019999999999936,
@@ -422,10 +451,10 @@ static int k3_j72xx_bandgap_probe(struct platform_device *pdev)
/* Get the sensor count in the VTM */
val = readl(bgp->base + K3_VTM_DEVINFO_PWR0_OFFSET);
- cnt = val & K3_VTM_DEVINFO_PWR0_TEMPSENS_CT_MASK;
- cnt >>= __ffs(K3_VTM_DEVINFO_PWR0_TEMPSENS_CT_MASK);
+ bgp->cnt = val & K3_VTM_DEVINFO_PWR0_TEMPSENS_CT_MASK;
+ bgp->cnt >>= __ffs(K3_VTM_DEVINFO_PWR0_TEMPSENS_CT_MASK);
- data = devm_kcalloc(bgp->dev, cnt, sizeof(*data), GFP_KERNEL);
+ data = devm_kcalloc(bgp->dev, bgp->cnt, sizeof(*data), GFP_KERNEL);
if (!data) {
ret = -ENOMEM;
goto err_alloc;
@@ -449,8 +478,8 @@ static int k3_j72xx_bandgap_probe(struct platform_device *pdev)
else
init_table(3, ref_table, pvt_wa_factors);
- /* Register the thermal sensors */
- for (id = 0; id < cnt; id++) {
+ /* Precompute the derived table & fill each thermal sensor struct */
+ for (id = 0; id < bgp->cnt; id++) {
data[id].bgp = bgp;
data[id].ctrl_offset = K3_VTM_TMPSENS0_CTRL_OFFSET + id * 0x20;
data[id].stat_offset = data[id].ctrl_offset +
@@ -470,13 +499,13 @@ static int k3_j72xx_bandgap_probe(struct platform_device *pdev)
else if (id == 0 && !workaround_needed)
memcpy(derived_table, ref_table, TABLE_SIZE * 4);
- val = readl(data[id].bgp->cfg2_base + data[id].ctrl_offset);
- val |= (K3_VTM_TMPSENS_CTRL_MAXT_OUTRG_EN |
- K3_VTM_TMPSENS_CTRL_SOC |
- K3_VTM_TMPSENS_CTRL_CLRZ | BIT(4));
- writel(val, data[id].bgp->cfg2_base + data[id].ctrl_offset);
-
bgp->ts_data[id] = &data[id];
+ }
+
+ k3_j72xx_bandgap_init_hw(bgp);
+
+ /* Register the thermal sensors */
+ for (id = 0; id < bgp->cnt; id++) {
ti_thermal = devm_thermal_of_zone_register(bgp->dev, id, &data[id],
&k3_of_thermal_ops);
if (IS_ERR(ti_thermal)) {
@@ -486,21 +515,7 @@ static int k3_j72xx_bandgap_probe(struct platform_device *pdev)
}
}
- /*
- * Program TSHUT thresholds
- * Step 1: set the thresholds to ~123C and 105C WKUP_VTM_MISC_CTRL2
- * Step 2: WKUP_VTM_TMPSENS_CTRL_j set the MAXT_OUTRG_EN bit
- * This is already taken care as per of init
- * Step 3: WKUP_VTM_MISC_CTRL set the ANYMAXT_OUTRG_ALERT_EN bit
- */
- high_max = k3_j72xx_bandgap_temp_to_adc_code(MAX_TEMP);
- low_temp = k3_j72xx_bandgap_temp_to_adc_code(COOL_DOWN_TEMP);
-
- writel((low_temp << 16) | high_max, data[0].bgp->cfg2_base +
- K3_VTM_MISC_CTRL2_OFFSET);
- mdelay(100);
- writel(K3_VTM_ANYMAXT_OUTRG_ALERT_EN, data[0].bgp->cfg2_base +
- K3_VTM_MISC_CTRL_OFFSET);
+ platform_set_drvdata(pdev, bgp);
print_look_up_table(dev, ref_table);
/*
@@ -527,6 +542,35 @@ static void k3_j72xx_bandgap_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
}
+static int k3_j72xx_bandgap_suspend(struct device *dev)
+{
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
+ return 0;
+}
+
+static int k3_j72xx_bandgap_resume(struct device *dev)
+{
+ struct k3_j72xx_bandgap *bgp = dev_get_drvdata(dev);
+ int ret;
+
+ pm_runtime_enable(dev);
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(dev);
+ pm_runtime_disable(dev);
+ return ret;
+ }
+
+ k3_j72xx_bandgap_init_hw(bgp);
+
+ return 0;
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(k3_j72xx_bandgap_pm_ops,
+ k3_j72xx_bandgap_suspend,
+ k3_j72xx_bandgap_resume);
+
static const struct k3_j72xx_bandgap_data k3_j72xx_bandgap_j721e_data = {
.has_errata_i2128 = true,
};
@@ -554,6 +598,7 @@ static struct platform_driver k3_j72xx_bandgap_sensor_driver = {
.driver = {
.name = "k3-j72xx-soc-thermal",
.of_match_table = of_k3_j72xx_bandgap_match,
+ .pm = pm_sleep_ptr(&k3_j72xx_bandgap_pm_ops),
},
};
diff --git a/drivers/thermal/mediatek/lvts_thermal.c b/drivers/thermal/mediatek/lvts_thermal.c
index 0bb3a495b56e..1997e91bb3be 100644
--- a/drivers/thermal/mediatek/lvts_thermal.c
+++ b/drivers/thermal/mediatek/lvts_thermal.c
@@ -128,6 +128,7 @@ struct lvts_data {
int temp_factor;
int temp_offset;
int gt_calib_bit_offset;
+ unsigned int def_calibration;
};
struct lvts_sensor {
@@ -689,6 +690,10 @@ static int lvts_calibration_init(struct device *dev, struct lvts_ctrl *lvts_ctrl
size_t calib_len)
{
int i;
+ u32 gt;
+
+ /* A zero value for gt means that device has invalid efuse data */
+ gt = (((u32 *)efuse_calibration)[0] >> lvts_ctrl->lvts_data->gt_calib_bit_offset) & 0xff;
lvts_for_each_valid_sensor(i, lvts_ctrl_data) {
const struct lvts_sensor_data *sensor =
@@ -699,10 +704,17 @@ static int lvts_calibration_init(struct device *dev, struct lvts_ctrl *lvts_ctrl
sensor->cal_offsets[2] >= calib_len)
return -EINVAL;
- lvts_ctrl->calibration[i] =
- (efuse_calibration[sensor->cal_offsets[0]] << 0) +
- (efuse_calibration[sensor->cal_offsets[1]] << 8) +
- (efuse_calibration[sensor->cal_offsets[2]] << 16);
+ if (gt) {
+ lvts_ctrl->calibration[i] =
+ (efuse_calibration[sensor->cal_offsets[0]] << 0) +
+ (efuse_calibration[sensor->cal_offsets[1]] << 8) +
+ (efuse_calibration[sensor->cal_offsets[2]] << 16);
+ } else if (lvts_ctrl->lvts_data->def_calibration) {
+ lvts_ctrl->calibration[i] = lvts_ctrl->lvts_data->def_calibration;
+ } else {
+ dev_err(dev, "efuse contains invalid calibration data and no default given.\n");
+ return -ENODATA;
+ }
}
return 0;
@@ -769,11 +781,14 @@ static int lvts_golden_temp_init(struct device *dev, u8 *calib,
*/
gt = (((u32 *)calib)[0] >> lvts_data->gt_calib_bit_offset) & 0xff;
+ /* A zero value for gt means that device has invalid efuse data */
if (gt && gt < LVTS_GOLDEN_TEMP_MAX)
golden_temp = gt;
golden_temp_offset = golden_temp * 500 + lvts_data->temp_offset;
+ dev_info(dev, "%sgolden temp=%d\n", gt ? "" : "fake ", golden_temp);
+
return 0;
}
@@ -1436,7 +1451,7 @@ static const struct lvts_ctrl_data mt8186_lvts_data_ctrl[] = {
.cal_offsets = { 29, 30, 31 } },
{ .dt_id = MT8186_ADSP,
.cal_offsets = { 34, 35, 28 } },
- { .dt_id = MT8186_MFG,
+ { .dt_id = MT8186_GPU,
.cal_offsets = { 39, 32, 33 } }
},
VALID_SENSOR_MAP(1, 1, 1, 0),
@@ -1458,7 +1473,6 @@ static const struct lvts_ctrl_data mt8188_lvts_mcu_data_ctrl[] = {
},
VALID_SENSOR_MAP(1, 1, 1, 1),
.offset = 0x0,
- .mode = LVTS_MSR_FILTERED_MODE,
},
{
.lvts_sensor = {
@@ -1469,7 +1483,6 @@ static const struct lvts_ctrl_data mt8188_lvts_mcu_data_ctrl[] = {
},
VALID_SENSOR_MAP(1, 1, 0, 0),
.offset = 0x100,
- .mode = LVTS_MSR_FILTERED_MODE,
}
};
@@ -1483,31 +1496,28 @@ static const struct lvts_ctrl_data mt8188_lvts_ap_data_ctrl[] = {
},
VALID_SENSOR_MAP(0, 1, 0, 0),
.offset = 0x0,
- .mode = LVTS_MSR_FILTERED_MODE,
},
{
.lvts_sensor = {
- { .dt_id = MT8188_AP_GPU1,
+ { .dt_id = MT8188_AP_GPU0,
.cal_offsets = { 43, 44, 45 } },
- { .dt_id = MT8188_AP_GPU2,
+ { .dt_id = MT8188_AP_GPU1,
.cal_offsets = { 46, 47, 48 } },
- { .dt_id = MT8188_AP_SOC1,
+ { .dt_id = MT8188_AP_ADSP,
.cal_offsets = { 49, 50, 51 } },
},
VALID_SENSOR_MAP(1, 1, 1, 0),
.offset = 0x100,
- .mode = LVTS_MSR_FILTERED_MODE,
},
{
.lvts_sensor = {
- { .dt_id = MT8188_AP_SOC2,
+ { .dt_id = MT8188_AP_VDO,
.cal_offsets = { 52, 53, 54 } },
- { .dt_id = MT8188_AP_SOC3,
+ { .dt_id = MT8188_AP_INFRA,
.cal_offsets = { 55, 56, 57 } },
},
VALID_SENSOR_MAP(1, 1, 0, 0),
.offset = 0x200,
- .mode = LVTS_MSR_FILTERED_MODE,
},
{
.lvts_sensor = {
@@ -1518,7 +1528,6 @@ static const struct lvts_ctrl_data mt8188_lvts_ap_data_ctrl[] = {
},
VALID_SENSOR_MAP(1, 1, 0, 0),
.offset = 0x300,
- .mode = LVTS_MSR_FILTERED_MODE,
}
};
@@ -1703,6 +1712,7 @@ static const struct lvts_data mt8186_lvts_data = {
.temp_factor = LVTS_COEFF_A_MT7988,
.temp_offset = LVTS_COEFF_B_MT7988,
.gt_calib_bit_offset = 24,
+ .def_calibration = 19000,
};
static const struct lvts_data mt8188_lvts_mcu_data = {
@@ -1711,6 +1721,7 @@ static const struct lvts_data mt8188_lvts_mcu_data = {
.temp_factor = LVTS_COEFF_A_MT8195,
.temp_offset = LVTS_COEFF_B_MT8195,
.gt_calib_bit_offset = 20,
+ .def_calibration = 35000,
};
static const struct lvts_data mt8188_lvts_ap_data = {
@@ -1719,6 +1730,7 @@ static const struct lvts_data mt8188_lvts_ap_data = {
.temp_factor = LVTS_COEFF_A_MT8195,
.temp_offset = LVTS_COEFF_B_MT8195,
.gt_calib_bit_offset = 20,
+ .def_calibration = 35000,
};
static const struct lvts_data mt8192_lvts_mcu_data = {
@@ -1727,6 +1739,7 @@ static const struct lvts_data mt8192_lvts_mcu_data = {
.temp_factor = LVTS_COEFF_A_MT8195,
.temp_offset = LVTS_COEFF_B_MT8195,
.gt_calib_bit_offset = 24,
+ .def_calibration = 35000,
};
static const struct lvts_data mt8192_lvts_ap_data = {
@@ -1735,6 +1748,7 @@ static const struct lvts_data mt8192_lvts_ap_data = {
.temp_factor = LVTS_COEFF_A_MT8195,
.temp_offset = LVTS_COEFF_B_MT8195,
.gt_calib_bit_offset = 24,
+ .def_calibration = 35000,
};
static const struct lvts_data mt8195_lvts_mcu_data = {
@@ -1743,6 +1757,7 @@ static const struct lvts_data mt8195_lvts_mcu_data = {
.temp_factor = LVTS_COEFF_A_MT8195,
.temp_offset = LVTS_COEFF_B_MT8195,
.gt_calib_bit_offset = 24,
+ .def_calibration = 35000,
};
static const struct lvts_data mt8195_lvts_ap_data = {
@@ -1751,6 +1766,7 @@ static const struct lvts_data mt8195_lvts_ap_data = {
.temp_factor = LVTS_COEFF_A_MT8195,
.temp_offset = LVTS_COEFF_B_MT8195,
.gt_calib_bit_offset = 24,
+ .def_calibration = 35000,
};
static const struct of_device_id lvts_of_match[] = {
diff --git a/drivers/thermal/qcom/qcom-spmi-adc-tm5.c b/drivers/thermal/qcom/qcom-spmi-adc-tm5.c
index 756ac6842ff9..7c9f4023babc 100644
--- a/drivers/thermal/qcom/qcom-spmi-adc-tm5.c
+++ b/drivers/thermal/qcom/qcom-spmi-adc-tm5.c
@@ -829,12 +829,9 @@ static int adc_tm5_get_dt_channel_data(struct adc_tm5_chip *adc_tm,
channel->iio = devm_fwnode_iio_channel_get_by_name(adc_tm->dev,
of_fwnode_handle(node), NULL);
- if (IS_ERR(channel->iio)) {
- ret = PTR_ERR(channel->iio);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "%s: error getting channel: %d\n", name, ret);
- return ret;
- }
+ if (IS_ERR(channel->iio))
+ return dev_err_probe(dev, PTR_ERR(channel->iio), "%s: error getting channel\n",
+ name);
ret = of_property_read_u32_array(node, "qcom,pre-scaling", varr, 2);
if (!ret) {
diff --git a/drivers/thermal/qcom/qcom-spmi-temp-alarm.c b/drivers/thermal/qcom/qcom-spmi-temp-alarm.c
index 3cd74f6cac8f..96daad28b0c0 100644
--- a/drivers/thermal/qcom/qcom-spmi-temp-alarm.c
+++ b/drivers/thermal/qcom/qcom-spmi-temp-alarm.c
@@ -261,17 +261,13 @@ skip:
return qpnp_tm_write(chip, QPNP_TM_REG_SHUTDOWN_CTRL1, reg);
}
-static int qpnp_tm_set_trip_temp(struct thermal_zone_device *tz, int trip_id, int temp)
+static int qpnp_tm_set_trip_temp(struct thermal_zone_device *tz,
+ const struct thermal_trip *trip, int temp)
{
struct qpnp_tm_chip *chip = thermal_zone_device_priv(tz);
- struct thermal_trip trip;
int ret;
- ret = __thermal_zone_get_trip(chip->tz_dev, trip_id, &trip);
- if (ret)
- return ret;
-
- if (trip.type != THERMAL_TRIP_CRITICAL)
+ if (trip->type != THERMAL_TRIP_CRITICAL)
return 0;
mutex_lock(&chip->lock);
diff --git a/drivers/thermal/qcom/tsens.c b/drivers/thermal/qcom/tsens.c
index e76e23026dc8..0b4421bf4785 100644
--- a/drivers/thermal/qcom/tsens.c
+++ b/drivers/thermal/qcom/tsens.c
@@ -1336,11 +1336,9 @@ static int tsens_probe(struct platform_device *pdev)
if (priv->ops->calibrate) {
ret = priv->ops->calibrate(priv);
- if (ret < 0) {
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "%s: calibration failed\n", __func__);
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "%s: calibration failed\n",
+ __func__);
}
ret = tsens_register(priv);
diff --git a/drivers/thermal/renesas/Kconfig b/drivers/thermal/renesas/Kconfig
new file mode 100644
index 000000000000..dcf5fc5ae08e
--- /dev/null
+++ b/drivers/thermal/renesas/Kconfig
@@ -0,0 +1,28 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config RCAR_THERMAL
+ tristate "Renesas R-Car thermal driver"
+ depends on ARCH_RENESAS || COMPILE_TEST
+ depends on HAS_IOMEM
+ depends on OF
+ help
+ Enable this to plug the R-Car thermal sensor driver into the Linux
+ thermal framework.
+
+config RCAR_GEN3_THERMAL
+ tristate "Renesas R-Car Gen3 and RZ/G2 thermal driver"
+ depends on ARCH_RENESAS || COMPILE_TEST
+ depends on HAS_IOMEM
+ depends on OF
+ help
+ Enable this to plug the R-Car Gen3 or RZ/G2 thermal sensor driver into
+ the Linux thermal framework.
+
+config RZG2L_THERMAL
+ tristate "Renesas RZ/G2L thermal driver"
+ depends on ARCH_RENESAS || COMPILE_TEST
+ depends on HAS_IOMEM
+ depends on OF
+ help
+ Enable this to plug the RZ/G2L thermal sensor driver into the Linux
+ thermal framework.
diff --git a/drivers/thermal/renesas/Makefile b/drivers/thermal/renesas/Makefile
new file mode 100644
index 000000000000..bf9cb3cb94d6
--- /dev/null
+++ b/drivers/thermal/renesas/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+obj-$(CONFIG_RCAR_GEN3_THERMAL) += rcar_gen3_thermal.o
+obj-$(CONFIG_RCAR_THERMAL) += rcar_thermal.o
+obj-$(CONFIG_RZG2L_THERMAL) += rzg2l_thermal.o
diff --git a/drivers/thermal/rcar_gen3_thermal.c b/drivers/thermal/renesas/rcar_gen3_thermal.c
index 02494fa142c3..5c769871753a 100644
--- a/drivers/thermal/rcar_gen3_thermal.c
+++ b/drivers/thermal/renesas/rcar_gen3_thermal.c
@@ -16,7 +16,7 @@
#include <linux/pm_runtime.h>
#include <linux/thermal.h>
-#include "thermal_hwmon.h"
+#include "../thermal_hwmon.h"
/* Register offsets */
#define REG_GEN3_IRQSTR 0x04
diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/renesas/rcar_thermal.c
index 925183753fcb..1e93f60b6d74 100644
--- a/drivers/thermal/rcar_thermal.c
+++ b/drivers/thermal/renesas/rcar_thermal.c
@@ -19,7 +19,7 @@
#include <linux/spinlock.h>
#include <linux/thermal.h>
-#include "thermal_hwmon.h"
+#include "../thermal_hwmon.h"
#define IDLE_INTERVAL 5000
diff --git a/drivers/thermal/rzg2l_thermal.c b/drivers/thermal/renesas/rzg2l_thermal.c
index 04efd824ac4c..0e1cb9045ee6 100644
--- a/drivers/thermal/rzg2l_thermal.c
+++ b/drivers/thermal/renesas/rzg2l_thermal.c
@@ -17,7 +17,7 @@
#include <linux/thermal.h>
#include <linux/units.h>
-#include "thermal_hwmon.h"
+#include "../thermal_hwmon.h"
#define CTEMP_MASK 0xFFF
diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c
index 6482513bfe66..96cffb2c44ba 100644
--- a/drivers/thermal/samsung/exynos_tmu.c
+++ b/drivers/thermal/samsung/exynos_tmu.c
@@ -1004,11 +1004,11 @@ static const struct thermal_zone_device_ops exynos_sensor_ops = {
static int exynos_tmu_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct exynos_tmu_data *data;
int ret;
- data = devm_kzalloc(&pdev->dev, sizeof(struct exynos_tmu_data),
- GFP_KERNEL);
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -1020,7 +1020,7 @@ static int exynos_tmu_probe(struct platform_device *pdev)
* TODO: Add regulator as an SOC feature, so that regulator enable
* is a compulsory call.
*/
- ret = devm_regulator_get_enable_optional(&pdev->dev, "vtmu");
+ ret = devm_regulator_get_enable_optional(dev, "vtmu");
switch (ret) {
case 0:
case -ENODEV:
@@ -1028,8 +1028,7 @@ static int exynos_tmu_probe(struct platform_device *pdev)
case -EPROBE_DEFER:
return -EPROBE_DEFER;
default:
- dev_err(&pdev->dev, "Failed to get enabled regulator: %d\n",
- ret);
+ dev_err(dev, "Failed to get enabled regulator: %d\n", ret);
return ret;
}
@@ -1037,44 +1036,40 @@ static int exynos_tmu_probe(struct platform_device *pdev)
if (ret)
return ret;
- data->clk = devm_clk_get(&pdev->dev, "tmu_apbif");
- if (IS_ERR(data->clk)) {
- dev_err(&pdev->dev, "Failed to get clock\n");
- return PTR_ERR(data->clk);
- }
+ data->clk = devm_clk_get(dev, "tmu_apbif");
+ if (IS_ERR(data->clk))
+ return dev_err_probe(dev, PTR_ERR(data->clk), "Failed to get clock\n");
- data->clk_sec = devm_clk_get(&pdev->dev, "tmu_triminfo_apbif");
+ data->clk_sec = devm_clk_get(dev, "tmu_triminfo_apbif");
if (IS_ERR(data->clk_sec)) {
- if (data->soc == SOC_ARCH_EXYNOS5420_TRIMINFO) {
- dev_err(&pdev->dev, "Failed to get triminfo clock\n");
- return PTR_ERR(data->clk_sec);
- }
+ if (data->soc == SOC_ARCH_EXYNOS5420_TRIMINFO)
+ return dev_err_probe(dev, PTR_ERR(data->clk_sec),
+ "Failed to get triminfo clock\n");
} else {
ret = clk_prepare(data->clk_sec);
if (ret) {
- dev_err(&pdev->dev, "Failed to get clock\n");
+ dev_err(dev, "Failed to get clock\n");
return ret;
}
}
ret = clk_prepare(data->clk);
if (ret) {
- dev_err(&pdev->dev, "Failed to get clock\n");
+ dev_err(dev, "Failed to get clock\n");
goto err_clk_sec;
}
switch (data->soc) {
case SOC_ARCH_EXYNOS5433:
case SOC_ARCH_EXYNOS7:
- data->sclk = devm_clk_get(&pdev->dev, "tmu_sclk");
+ data->sclk = devm_clk_get(dev, "tmu_sclk");
if (IS_ERR(data->sclk)) {
- dev_err(&pdev->dev, "Failed to get sclk\n");
- ret = PTR_ERR(data->sclk);
+ ret = dev_err_probe(dev, PTR_ERR(data->sclk), "Failed to get sclk\n");
goto err_clk;
} else {
ret = clk_prepare_enable(data->sclk);
if (ret) {
- dev_err(&pdev->dev, "Failed to enable sclk\n");
+ dev_err(dev, "Failed to enable sclk\n");
goto err_clk;
}
}
@@ -1085,33 +1080,30 @@ static int exynos_tmu_probe(struct platform_device *pdev)
ret = exynos_tmu_initialize(pdev);
if (ret) {
- dev_err(&pdev->dev, "Failed to initialize TMU\n");
+ dev_err(dev, "Failed to initialize TMU\n");
goto err_sclk;
}
- data->tzd = devm_thermal_of_zone_register(&pdev->dev, 0, data,
+ data->tzd = devm_thermal_of_zone_register(dev, 0, data,
&exynos_sensor_ops);
if (IS_ERR(data->tzd)) {
- ret = PTR_ERR(data->tzd);
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev, "Failed to register sensor: %d\n",
- ret);
+ ret = dev_err_probe(dev, PTR_ERR(data->tzd), "Failed to register sensor\n");
goto err_sclk;
}
ret = exynos_thermal_zone_configure(pdev);
if (ret) {
- dev_err(&pdev->dev, "Failed to configure the thermal zone\n");
+ dev_err(dev, "Failed to configure the thermal zone\n");
goto err_sclk;
}
- ret = devm_request_threaded_irq(&pdev->dev, data->irq, NULL,
+ ret = devm_request_threaded_irq(dev, data->irq, NULL,
exynos_tmu_threaded_irq,
IRQF_TRIGGER_RISING
| IRQF_SHARED | IRQF_ONESHOT,
- dev_name(&pdev->dev), data);
+ dev_name(dev), data);
if (ret) {
- dev_err(&pdev->dev, "Failed to request irq: %d\n", data->irq);
+ dev_err(dev, "Failed to request irq: %d\n", data->irq);
goto err_sclk;
}
diff --git a/drivers/thermal/st/st_thermal_memmap.c b/drivers/thermal/st/st_thermal_memmap.c
index 29c2269b0fb3..e427117381a4 100644
--- a/drivers/thermal/st/st_thermal_memmap.c
+++ b/drivers/thermal/st/st_thermal_memmap.c
@@ -142,15 +142,6 @@ static const struct st_thermal_sensor_ops st_mmap_sensor_ops = {
.enable_irq = st_mmap_enable_irq,
};
-/* Compatible device data stih416 mpe thermal sensor */
-static const struct st_thermal_compat_data st_416mpe_cdata = {
- .reg_fields = st_mmap_thermal_regfields,
- .ops = &st_mmap_sensor_ops,
- .calibration_val = 14,
- .temp_adjust_val = -95,
- .crit_temp = 120,
-};
-
/* Compatible device data stih407 thermal sensor */
static const struct st_thermal_compat_data st_407_cdata = {
.reg_fields = st_mmap_thermal_regfields,
@@ -161,7 +152,6 @@ static const struct st_thermal_compat_data st_407_cdata = {
};
static const struct of_device_id st_mmap_thermal_of_match[] = {
- { .compatible = "st,stih416-mpe-thermal", .data = &st_416mpe_cdata },
{ .compatible = "st,stih407-thermal", .data = &st_407_cdata },
{ /* sentinel */ }
};
diff --git a/drivers/thermal/tegra/soctherm.c b/drivers/thermal/tegra/soctherm.c
index e7fe8683bfc5..d3dfc34c62c6 100644
--- a/drivers/thermal/tegra/soctherm.c
+++ b/drivers/thermal/tegra/soctherm.c
@@ -582,23 +582,18 @@ static int tsensor_group_thermtrip_get(struct tegra_soctherm *ts, int id)
return temp;
}
-static int tegra_thermctl_set_trip_temp(struct thermal_zone_device *tz, int trip_id, int temp)
+static int tegra_thermctl_set_trip_temp(struct thermal_zone_device *tz,
+ const struct thermal_trip *trip, int temp)
{
struct tegra_thermctl_zone *zone = thermal_zone_device_priv(tz);
struct tegra_soctherm *ts = zone->ts;
- struct thermal_trip trip;
const struct tegra_tsensor_group *sg = zone->sg;
struct device *dev = zone->dev;
- int ret;
if (!tz)
return -EINVAL;
- ret = __thermal_zone_get_trip(tz, trip_id, &trip);
- if (ret)
- return ret;
-
- if (trip.type == THERMAL_TRIP_CRITICAL) {
+ if (trip->type == THERMAL_TRIP_CRITICAL) {
/*
* If thermtrips property is set in DT,
* doesn't need to program critical type trip to HW,
@@ -609,7 +604,7 @@ static int tegra_thermctl_set_trip_temp(struct thermal_zone_device *tz, int trip
else
return 0;
- } else if (trip.type == THERMAL_TRIP_HOT) {
+ } else if (trip->type == THERMAL_TRIP_HOT) {
int i;
for (i = 0; i < THROTTLE_SIZE; i++) {
@@ -620,7 +615,7 @@ static int tegra_thermctl_set_trip_temp(struct thermal_zone_device *tz, int trip
continue;
cdev = ts->throt_cfgs[i].cdev;
- if (get_thermal_instance(tz, cdev, trip_id))
+ if (thermal_trip_is_bound_to_cdev(tz, trip, cdev))
stc = find_throttle_cfg_by_name(ts, cdev->type);
else
continue;
diff --git a/drivers/thermal/thermal-generic-adc.c b/drivers/thermal/thermal-generic-adc.c
index 1717e4a19dcb..ee3d0aa31406 100644
--- a/drivers/thermal/thermal-generic-adc.c
+++ b/drivers/thermal/thermal-generic-adc.c
@@ -117,44 +117,41 @@ static int gadc_thermal_read_linear_lookup_table(struct device *dev,
static int gadc_thermal_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct gadc_thermal_info *gti;
int ret;
- if (!pdev->dev.of_node) {
- dev_err(&pdev->dev, "Only DT based supported\n");
+ if (!dev->of_node) {
+ dev_err(dev, "Only DT based supported\n");
return -ENODEV;
}
- gti = devm_kzalloc(&pdev->dev, sizeof(*gti), GFP_KERNEL);
+ gti = devm_kzalloc(dev, sizeof(*gti), GFP_KERNEL);
if (!gti)
return -ENOMEM;
- gti->channel = devm_iio_channel_get(&pdev->dev, "sensor-channel");
- if (IS_ERR(gti->channel)) {
- ret = PTR_ERR(gti->channel);
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev, "IIO channel not found: %d\n", ret);
- return ret;
- }
+ gti->channel = devm_iio_channel_get(dev, "sensor-channel");
+ if (IS_ERR(gti->channel))
+ return dev_err_probe(dev, PTR_ERR(gti->channel), "IIO channel not found\n");
- ret = gadc_thermal_read_linear_lookup_table(&pdev->dev, gti);
+ ret = gadc_thermal_read_linear_lookup_table(dev, gti);
if (ret < 0)
return ret;
- gti->dev = &pdev->dev;
+ gti->dev = dev;
- gti->tz_dev = devm_thermal_of_zone_register(&pdev->dev, 0, gti,
+ gti->tz_dev = devm_thermal_of_zone_register(dev, 0, gti,
&gadc_thermal_ops);
if (IS_ERR(gti->tz_dev)) {
ret = PTR_ERR(gti->tz_dev);
if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev,
+ dev_err(dev,
"Thermal zone sensor register failed: %d\n",
ret);
return ret;
}
- devm_thermal_add_hwmon_sysfs(&pdev->dev, gti->tz_dev);
+ devm_thermal_add_hwmon_sysfs(dev, gti->tz_dev);
return 0;
}
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 54cce4e523bc..8795187fbc52 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -300,6 +300,8 @@ static void monitor_thermal_zone(struct thermal_zone_device *tz)
thermal_zone_device_set_polling(tz, tz->passive_delay_jiffies);
else if (tz->polling_delay_jiffies)
thermal_zone_device_set_polling(tz, tz->polling_delay_jiffies);
+ else if (tz->temperature == THERMAL_TEMP_INVALID)
+ thermal_zone_device_set_polling(tz, msecs_to_jiffies(THERMAL_RECHECK_DELAY_MS));
}
static struct thermal_governor *thermal_get_tz_governor(struct thermal_zone_device *tz)
@@ -463,20 +465,36 @@ static void thermal_governor_trip_crossed(struct thermal_governor *governor,
const struct thermal_trip *trip,
bool crossed_up)
{
+ if (trip->type == THERMAL_TRIP_HOT || trip->type == THERMAL_TRIP_CRITICAL)
+ return;
+
if (governor->trip_crossed)
governor->trip_crossed(tz, trip, crossed_up);
}
-static int thermal_trip_notify_cmp(void *ascending, const struct list_head *a,
+static void thermal_trip_crossed(struct thermal_zone_device *tz,
+ const struct thermal_trip *trip,
+ struct thermal_governor *governor,
+ bool crossed_up)
+{
+ if (crossed_up) {
+ thermal_notify_tz_trip_up(tz, trip);
+ thermal_debug_tz_trip_up(tz, trip);
+ } else {
+ thermal_notify_tz_trip_down(tz, trip);
+ thermal_debug_tz_trip_down(tz, trip);
+ }
+ thermal_governor_trip_crossed(governor, tz, trip, crossed_up);
+}
+
+static int thermal_trip_notify_cmp(void *not_used, const struct list_head *a,
const struct list_head *b)
{
struct thermal_trip_desc *tda = container_of(a, struct thermal_trip_desc,
notify_list_node);
struct thermal_trip_desc *tdb = container_of(b, struct thermal_trip_desc,
notify_list_node);
- int ret = tdb->notify_temp - tda->notify_temp;
-
- return ascending ? ret : -ret;
+ return tda->notify_temp - tdb->notify_temp;
}
void __thermal_zone_device_update(struct thermal_zone_device *tz,
@@ -496,34 +514,29 @@ void __thermal_zone_device_update(struct thermal_zone_device *tz,
update_temperature(tz);
if (tz->temperature == THERMAL_TEMP_INVALID)
- return;
-
- __thermal_zone_set_trips(tz);
+ goto monitor;
tz->notify_event = event;
for_each_trip_desc(tz, td)
handle_thermal_trip(tz, td, &way_up_list, &way_down_list);
- list_sort(&way_up_list, &way_up_list, thermal_trip_notify_cmp);
- list_for_each_entry(td, &way_up_list, notify_list_node) {
- thermal_notify_tz_trip_up(tz, &td->trip);
- thermal_debug_tz_trip_up(tz, &td->trip);
- thermal_governor_trip_crossed(governor, tz, &td->trip, true);
- }
+ thermal_zone_set_trips(tz);
+
+ list_sort(NULL, &way_up_list, thermal_trip_notify_cmp);
+ list_for_each_entry(td, &way_up_list, notify_list_node)
+ thermal_trip_crossed(tz, &td->trip, governor, true);
list_sort(NULL, &way_down_list, thermal_trip_notify_cmp);
- list_for_each_entry(td, &way_down_list, notify_list_node) {
- thermal_notify_tz_trip_down(tz, &td->trip);
- thermal_debug_tz_trip_down(tz, &td->trip);
- thermal_governor_trip_crossed(governor, tz, &td->trip, false);
- }
+ list_for_each_entry_reverse(td, &way_down_list, notify_list_node)
+ thermal_trip_crossed(tz, &td->trip, governor, false);
if (governor->manage)
governor->manage(tz);
thermal_debug_update_trip_stats(tz);
+monitor:
monitor_thermal_zone(tz);
}
@@ -593,6 +606,12 @@ void thermal_zone_device_update(struct thermal_zone_device *tz,
}
EXPORT_SYMBOL_GPL(thermal_zone_device_update);
+void thermal_zone_trip_down(struct thermal_zone_device *tz,
+ const struct thermal_trip *trip)
+{
+ thermal_trip_crossed(tz, trip, thermal_get_tz_governor(tz), false);
+}
+
int for_each_thermal_governor(int (*cb)(struct thermal_governor *, void *),
void *data)
{
@@ -984,9 +1003,17 @@ __thermal_cooling_device_register(struct device_node *np,
if (ret)
goto out_cdev_type;
+ /*
+ * The cooling device's current state is only needed for debug
+ * initialization below, so a failure to get it does not cause
+ * the entire cooling device initialization to fail. However,
+ * the debug will not work for the device if its initial state
+ * cannot be determined and drivers are responsible for ensuring
+ * that this will not happen.
+ */
ret = cdev->ops->get_cur_state(cdev, &current_state);
if (ret)
- goto out_cdev_type;
+ current_state = ULONG_MAX;
thermal_cooling_device_setup_sysfs(cdev);
@@ -1001,7 +1028,8 @@ __thermal_cooling_device_register(struct device_node *np,
return ERR_PTR(ret);
}
- thermal_debug_cdev_add(cdev, current_state);
+ if (current_state <= cdev->max_state)
+ thermal_debug_cdev_add(cdev, current_state);
/* Add 'this' new cdev to the global cdev list */
mutex_lock(&thermal_list_lock);
@@ -1102,7 +1130,7 @@ static void thermal_cooling_device_release(struct device *dev, void *res)
struct thermal_cooling_device *
devm_thermal_of_cooling_device_register(struct device *dev,
struct device_node *np,
- char *type, void *devdata,
+ const char *type, void *devdata,
const struct thermal_cooling_device_ops *ops)
{
struct thermal_cooling_device **ptr, *tcd;
@@ -1329,7 +1357,8 @@ thermal_zone_device_register_with_trips(const char *type,
int num_trips, void *devdata,
const struct thermal_zone_device_ops *ops,
const struct thermal_zone_params *tzp,
- int passive_delay, int polling_delay)
+ unsigned int passive_delay,
+ unsigned int polling_delay)
{
const struct thermal_trip *trip = trips;
struct thermal_zone_device *tz;
@@ -1362,6 +1391,14 @@ thermal_zone_device_register_with_trips(const char *type,
if (num_trips > 0 && !trips)
return ERR_PTR(-EINVAL);
+ if (polling_delay) {
+ if (passive_delay > polling_delay)
+ return ERR_PTR(-EINVAL);
+
+ if (!passive_delay)
+ passive_delay = polling_delay;
+ }
+
if (!thermal_class)
return ERR_PTR(-ENODEV);
@@ -1382,6 +1419,7 @@ thermal_zone_device_register_with_trips(const char *type,
ida_init(&tz->ida);
mutex_init(&tz->lock);
init_completion(&tz->removal);
+ init_completion(&tz->resume);
id = ida_alloc(&thermal_tz_ida, GFP_KERNEL);
if (id < 0) {
result = id;
@@ -1624,9 +1662,13 @@ static void thermal_zone_device_resume(struct work_struct *work)
tz->suspended = false;
+ thermal_debug_tz_resume(tz);
thermal_zone_device_init(tz);
__thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED);
+ complete(&tz->resume);
+ tz->resuming = false;
+
mutex_unlock(&tz->lock);
}
@@ -1644,6 +1686,20 @@ static int thermal_pm_notify(struct notifier_block *nb,
list_for_each_entry(tz, &thermal_tz_list, node) {
mutex_lock(&tz->lock);
+ if (tz->resuming) {
+ /*
+ * thermal_zone_device_resume() queued up for
+ * this zone has not acquired the lock yet, so
+ * release it to let the function run and wait
+ * util it has done the work.
+ */
+ mutex_unlock(&tz->lock);
+
+ wait_for_completion(&tz->resume);
+
+ mutex_lock(&tz->lock);
+ }
+
tz->suspended = true;
mutex_unlock(&tz->lock);
@@ -1661,6 +1717,9 @@ static int thermal_pm_notify(struct notifier_block *nb,
cancel_delayed_work(&tz->poll_queue);
+ reinit_completion(&tz->resume);
+ tz->resuming = true;
+
/*
* Replace the work function with the resume one, which
* will restore the original work function and schedule
@@ -1685,6 +1744,12 @@ static int thermal_pm_notify(struct notifier_block *nb,
static struct notifier_block thermal_pm_nb = {
.notifier_call = thermal_pm_notify,
+ /*
+ * Run at the lowest priority to avoid interference between the thermal
+ * zone resume work items spawned by thermal_pm_notify() and the other
+ * PM notifiers.
+ */
+ .priority = INT_MIN,
};
static int __init thermal_init(void)
diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h
index d9785e5bbb08..30c0e78859a7 100644
--- a/drivers/thermal/thermal_core.h
+++ b/drivers/thermal/thermal_core.h
@@ -55,6 +55,7 @@ struct thermal_governor {
* @type: the thermal zone device type
* @device: &struct device for this thermal zone
* @removal: removal completion
+ * @resume: resume completion
* @trip_temp_attrs: attributes for trip points for sysfs: trip temperature
* @trip_type_attrs: attributes for trip points for sysfs: trip type
* @trip_hyst_attrs: attributes for trip points for sysfs: trip hysteresis
@@ -89,6 +90,7 @@ struct thermal_governor {
* @poll_queue: delayed work for polling
* @notify_event: Last notification event
* @suspended: thermal zone suspend indicator
+ * @resuming: indicates whether or not thermal zone resume is in progress
* @trips: array of struct thermal_trip objects
*/
struct thermal_zone_device {
@@ -96,6 +98,7 @@ struct thermal_zone_device {
char type[THERMAL_NAME_LENGTH];
struct device device;
struct completion removal;
+ struct completion resume;
struct attribute_group trips_attribute_group;
struct thermal_attr *trip_temp_attrs;
struct thermal_attr *trip_type_attrs;
@@ -123,12 +126,19 @@ struct thermal_zone_device {
struct delayed_work poll_queue;
enum thermal_notify_event notify_event;
bool suspended;
+ bool resuming;
#ifdef CONFIG_THERMAL_DEBUGFS
struct thermal_debugfs *debugfs;
#endif
struct thermal_trip_desc trips[] __counted_by(num_trips);
};
+/*
+ * Default delay after a failing thermal zone temperature check before
+ * attempting to check it again.
+ */
+#define THERMAL_RECHECK_DELAY_MS 250
+
/* Default Thermal Governor */
#if defined(CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE)
#define DEFAULT_THERMAL_GOVERNOR "step_wise"
@@ -240,12 +250,16 @@ void thermal_governor_update_tz(struct thermal_zone_device *tz,
#define trip_to_trip_desc(__trip) \
container_of(__trip, struct thermal_trip_desc, trip)
-void __thermal_zone_set_trips(struct thermal_zone_device *tz);
+const char *thermal_trip_type_name(enum thermal_trip_type trip_type);
+
+void thermal_zone_set_trips(struct thermal_zone_device *tz);
int thermal_zone_trip_id(const struct thermal_zone_device *tz,
const struct thermal_trip *trip);
void thermal_zone_trip_updated(struct thermal_zone_device *tz,
const struct thermal_trip *trip);
int __thermal_zone_get_temp(struct thermal_zone_device *tz, int *temp);
+void thermal_zone_trip_down(struct thermal_zone_device *tz,
+ const struct thermal_trip *trip);
/* sysfs I/F */
int thermal_zone_create_device_groups(struct thermal_zone_device *tz);
diff --git a/drivers/thermal/thermal_debugfs.c b/drivers/thermal/thermal_debugfs.c
index 91f9c21235a8..7dd67bf48571 100644
--- a/drivers/thermal/thermal_debugfs.c
+++ b/drivers/thermal/thermal_debugfs.c
@@ -91,16 +91,18 @@ struct cdev_record {
*
* @timestamp: the trip crossing timestamp
* @duration: total time when the zone temperature was above the trip point
+ * @trip_temp: trip temperature at mitigation start
+ * @trip_hyst: trip hysteresis at mitigation start
* @count: the number of times the zone temperature was above the trip point
- * @max: maximum recorded temperature above the trip point
* @min: minimum recorded temperature above the trip point
* @avg: average temperature above the trip point
*/
struct trip_stats {
ktime_t timestamp;
ktime_t duration;
+ int trip_temp;
+ int trip_hyst;
int count;
- int max;
int min;
int avg;
};
@@ -118,12 +120,14 @@ struct trip_stats {
* @timestamp: first trip point crossed the way up
* @duration: total duration of the mitigation episode
* @node: a list element to be added to the list of tz events
+ * @max_temp: maximum zone temperature during this episode
* @trip_stats: per trip point statistics, flexible array
*/
struct tz_episode {
ktime_t timestamp;
ktime_t duration;
struct list_head node;
+ int max_temp;
struct trip_stats trip_stats[];
};
@@ -557,10 +561,11 @@ static struct tz_episode *thermal_debugfs_tz_event_alloc(struct thermal_zone_dev
INIT_LIST_HEAD(&tze->node);
tze->timestamp = now;
tze->duration = KTIME_MIN;
+ tze->max_temp = INT_MIN;
for (i = 0; i < tz->num_trips; i++) {
+ tze->trip_stats[i].trip_temp = THERMAL_TEMP_INVALID;
tze->trip_stats[i].min = INT_MAX;
- tze->trip_stats[i].max = INT_MIN;
}
return tze;
@@ -569,19 +574,20 @@ static struct tz_episode *thermal_debugfs_tz_event_alloc(struct thermal_zone_dev
void thermal_debug_tz_trip_up(struct thermal_zone_device *tz,
const struct thermal_trip *trip)
{
- struct tz_episode *tze;
- struct tz_debugfs *tz_dbg;
struct thermal_debugfs *thermal_dbg = tz->debugfs;
int trip_id = thermal_zone_trip_id(tz, trip);
ktime_t now = ktime_get();
+ struct trip_stats *trip_stats;
+ struct tz_debugfs *tz_dbg;
+ struct tz_episode *tze;
if (!thermal_dbg)
return;
- mutex_lock(&thermal_dbg->lock);
-
tz_dbg = &thermal_dbg->tz_dbg;
+ mutex_lock(&thermal_dbg->lock);
+
/*
* The mitigation is starting. A mitigation can contain
* several episodes where each of them is related to a
@@ -639,29 +645,42 @@ void thermal_debug_tz_trip_up(struct thermal_zone_device *tz,
tz_dbg->trips_crossed[tz_dbg->nr_trips++] = trip_id;
tze = list_first_entry(&tz_dbg->tz_episodes, struct tz_episode, node);
- tze->trip_stats[trip_id].timestamp = now;
+ trip_stats = &tze->trip_stats[trip_id];
+ trip_stats->trip_temp = trip->temperature;
+ trip_stats->trip_hyst = trip->hysteresis;
+ trip_stats->timestamp = now;
unlock:
mutex_unlock(&thermal_dbg->lock);
}
+static void tz_episode_close_trip(struct tz_episode *tze, int trip_id, ktime_t now)
+{
+ struct trip_stats *trip_stats = &tze->trip_stats[trip_id];
+ ktime_t delta = ktime_sub(now, trip_stats->timestamp);
+
+ trip_stats->duration = ktime_add(delta, trip_stats->duration);
+ /* Mark the end of mitigation for this trip point. */
+ trip_stats->timestamp = KTIME_MAX;
+}
+
void thermal_debug_tz_trip_down(struct thermal_zone_device *tz,
const struct thermal_trip *trip)
{
struct thermal_debugfs *thermal_dbg = tz->debugfs;
+ int trip_id = thermal_zone_trip_id(tz, trip);
+ ktime_t now = ktime_get();
struct tz_episode *tze;
struct tz_debugfs *tz_dbg;
- ktime_t delta, now = ktime_get();
- int trip_id = thermal_zone_trip_id(tz, trip);
int i;
if (!thermal_dbg)
return;
- mutex_lock(&thermal_dbg->lock);
-
tz_dbg = &thermal_dbg->tz_dbg;
+ mutex_lock(&thermal_dbg->lock);
+
/*
* The temperature crosses the way down but there was not
* mitigation detected before. That may happen when the
@@ -687,13 +706,7 @@ void thermal_debug_tz_trip_down(struct thermal_zone_device *tz,
tze = list_first_entry(&tz_dbg->tz_episodes, struct tz_episode, node);
- delta = ktime_sub(now, tze->trip_stats[trip_id].timestamp);
-
- tze->trip_stats[trip_id].duration =
- ktime_add(delta, tze->trip_stats[trip_id].duration);
-
- /* Mark the end of mitigation for this trip point. */
- tze->trip_stats[trip_id].timestamp = KTIME_MAX;
+ tz_episode_close_trip(tze, trip_id, now);
/*
* This event closes the mitigation as we are crossing the
@@ -716,20 +729,22 @@ void thermal_debug_update_trip_stats(struct thermal_zone_device *tz)
if (!thermal_dbg)
return;
- mutex_lock(&thermal_dbg->lock);
-
tz_dbg = &thermal_dbg->tz_dbg;
+ mutex_lock(&thermal_dbg->lock);
+
if (!tz_dbg->nr_trips)
goto out;
tze = list_first_entry(&tz_dbg->tz_episodes, struct tz_episode, node);
+ if (tz->temperature > tze->max_temp)
+ tze->max_temp = tz->temperature;
+
for (i = 0; i < tz_dbg->nr_trips; i++) {
int trip_id = tz_dbg->trips_crossed[i];
struct trip_stats *trip_stats = &tze->trip_stats[trip_id];
- trip_stats->max = max(trip_stats->max, tz->temperature);
trip_stats->min = min(trip_stats->min, tz->temperature);
trip_stats->avg += (tz->temperature - trip_stats->avg) /
++trip_stats->count;
@@ -769,7 +784,6 @@ static int tze_seq_show(struct seq_file *s, void *v)
struct thermal_zone_device *tz = thermal_dbg->tz_dbg.tz;
struct thermal_trip_desc *td;
struct tz_episode *tze;
- const char *type;
u64 duration_ms;
int trip_id;
char c;
@@ -785,19 +799,15 @@ static int tze_seq_show(struct seq_file *s, void *v)
c = '=';
}
- seq_printf(s, ",-Mitigation at %lluus, duration%c%llums\n",
- ktime_to_us(tze->timestamp), c, duration_ms);
+ seq_printf(s, ",-Mitigation at %llums, duration%c%llums, max. temp=%dm°C\n",
+ ktime_to_ms(tze->timestamp), c, duration_ms, tze->max_temp);
- seq_printf(s, "| trip | type | temp(°mC) | hyst(°mC) | duration | avg(°mC) | min(°mC) | max(°mC) |\n");
+ seq_printf(s, "| trip | type | temp(m°C) | hyst(m°C) | duration(ms) | avg(m°C) | min(m°C) |\n");
for_each_trip_desc(tz, td) {
const struct thermal_trip *trip = &td->trip;
struct trip_stats *trip_stats;
- /* Skip invalid trips. */
- if (trip->temperature == THERMAL_TEMP_INVALID)
- continue;
-
/*
* There is no possible mitigation happening at the
* critical trip point, so the stats will be always
@@ -810,16 +820,9 @@ static int tze_seq_show(struct seq_file *s, void *v)
trip_stats = &tze->trip_stats[trip_id];
/* Skip trips without any stats. */
- if (trip_stats->min > trip_stats->max)
+ if (trip_stats->trip_temp == THERMAL_TEMP_INVALID)
continue;
- if (trip->type == THERMAL_TRIP_PASSIVE)
- type = "passive";
- else if (trip->type == THERMAL_TRIP_ACTIVE)
- type = "active";
- else
- type = "hot";
-
if (trip_stats->timestamp != KTIME_MAX) {
/* Mitigation in progress. */
ktime_t delta = ktime_sub(ktime_get(),
@@ -833,15 +836,14 @@ static int tze_seq_show(struct seq_file *s, void *v)
c = ' ';
}
- seq_printf(s, "| %*d | %*s | %*d | %*d | %c%*lld | %*d | %*d | %*d |\n",
+ seq_printf(s, "| %*d | %*s | %*d | %*d | %c%*lld | %*d | %*d |\n",
4 , trip_id,
- 8, type,
- 9, trip->temperature,
- 9, trip->hysteresis,
- c, 10, duration_ms,
+ 8, thermal_trip_type_name(trip->type),
+ 9, trip_stats->trip_temp,
+ 9, trip_stats->trip_hyst,
+ c, 11, duration_ms,
9, trip_stats->avg,
- 9, trip_stats->min,
- 9, trip_stats->max);
+ 9, trip_stats->min);
}
return 0;
@@ -918,3 +920,39 @@ void thermal_debug_tz_remove(struct thermal_zone_device *tz)
thermal_debugfs_remove_id(thermal_dbg);
kfree(trips_crossed);
}
+
+void thermal_debug_tz_resume(struct thermal_zone_device *tz)
+{
+ struct thermal_debugfs *thermal_dbg = tz->debugfs;
+ ktime_t now = ktime_get();
+ struct tz_debugfs *tz_dbg;
+ struct tz_episode *tze;
+ int i;
+
+ if (!thermal_dbg)
+ return;
+
+ mutex_lock(&thermal_dbg->lock);
+
+ tz_dbg = &thermal_dbg->tz_dbg;
+
+ if (!tz_dbg->nr_trips)
+ goto out;
+
+ /*
+ * A mitigation episode was in progress before the preceding system
+ * suspend transition, so close it because the zone handling is starting
+ * over from scratch.
+ */
+ tze = list_first_entry(&tz_dbg->tz_episodes, struct tz_episode, node);
+
+ for (i = 0; i < tz_dbg->nr_trips; i++)
+ tz_episode_close_trip(tze, tz_dbg->trips_crossed[i], now);
+
+ tze->duration = ktime_sub(now, tze->timestamp);
+
+ tz_dbg->nr_trips = 0;
+
+out:
+ mutex_unlock(&thermal_dbg->lock);
+}
diff --git a/drivers/thermal/thermal_debugfs.h b/drivers/thermal/thermal_debugfs.h
index 74ee65ee82ff..1c957ce2ec8f 100644
--- a/drivers/thermal/thermal_debugfs.h
+++ b/drivers/thermal/thermal_debugfs.h
@@ -7,6 +7,7 @@ void thermal_debug_cdev_remove(struct thermal_cooling_device *cdev);
void thermal_debug_cdev_state_update(const struct thermal_cooling_device *cdev, int state);
void thermal_debug_tz_add(struct thermal_zone_device *tz);
void thermal_debug_tz_remove(struct thermal_zone_device *tz);
+void thermal_debug_tz_resume(struct thermal_zone_device *tz);
void thermal_debug_tz_trip_up(struct thermal_zone_device *tz,
const struct thermal_trip *trip);
void thermal_debug_tz_trip_down(struct thermal_zone_device *tz,
@@ -20,6 +21,7 @@ static inline void thermal_debug_cdev_state_update(const struct thermal_cooling_
int state) {}
static inline void thermal_debug_tz_add(struct thermal_zone_device *tz) {}
static inline void thermal_debug_tz_remove(struct thermal_zone_device *tz) {}
+static inline void thermal_debug_tz_resume(struct thermal_zone_device *tz) {}
static inline void thermal_debug_tz_trip_up(struct thermal_zone_device *tz,
const struct thermal_trip *trip) {};
static inline void thermal_debug_tz_trip_down(struct thermal_zone_device *tz,
diff --git a/drivers/thermal/thermal_helpers.c b/drivers/thermal/thermal_helpers.c
index d9f4e26ec125..81e019493557 100644
--- a/drivers/thermal/thermal_helpers.c
+++ b/drivers/thermal/thermal_helpers.c
@@ -39,30 +39,53 @@ int get_tz_trend(struct thermal_zone_device *tz, const struct thermal_trip *trip
return trend;
}
+static struct thermal_instance *get_instance(struct thermal_zone_device *tz,
+ struct thermal_cooling_device *cdev,
+ const struct thermal_trip *trip)
+{
+ struct thermal_instance *ti;
+
+ list_for_each_entry(ti, &tz->thermal_instances, tz_node) {
+ if (ti->trip == trip && ti->cdev == cdev)
+ return ti;
+ }
+
+ return NULL;
+}
+
+bool thermal_trip_is_bound_to_cdev(struct thermal_zone_device *tz,
+ const struct thermal_trip *trip,
+ struct thermal_cooling_device *cdev)
+{
+ bool ret;
+
+ mutex_lock(&tz->lock);
+ mutex_lock(&cdev->lock);
+
+ ret = !!get_instance(tz, cdev, trip);
+
+ mutex_unlock(&cdev->lock);
+ mutex_unlock(&tz->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(thermal_trip_is_bound_to_cdev);
+
struct thermal_instance *
get_thermal_instance(struct thermal_zone_device *tz,
struct thermal_cooling_device *cdev, int trip_index)
{
- struct thermal_instance *pos = NULL;
- struct thermal_instance *target_instance = NULL;
- const struct thermal_trip *trip;
+ struct thermal_instance *ti;
mutex_lock(&tz->lock);
mutex_lock(&cdev->lock);
- trip = &tz->trips[trip_index].trip;
-
- list_for_each_entry(pos, &tz->thermal_instances, tz_node) {
- if (pos->tz == tz && pos->trip == trip && pos->cdev == cdev) {
- target_instance = pos;
- break;
- }
- }
+ ti = get_instance(tz, cdev, &tz->trips[trip_index].trip);
mutex_unlock(&cdev->lock);
mutex_unlock(&tz->lock);
- return target_instance;
+ return ti;
}
EXPORT_SYMBOL(get_thermal_instance);
diff --git a/drivers/thermal/thermal_sysfs.c b/drivers/thermal/thermal_sysfs.c
index 88211ccdfbd6..72b302bf914e 100644
--- a/drivers/thermal/thermal_sysfs.c
+++ b/drivers/thermal/thermal_sysfs.c
@@ -88,18 +88,7 @@ trip_point_type_show(struct device *dev, struct device_attribute *attr,
if (sscanf(attr->attr.name, "trip_point_%d_type", &trip_id) != 1)
return -EINVAL;
- switch (tz->trips[trip_id].trip.type) {
- case THERMAL_TRIP_CRITICAL:
- return sprintf(buf, "critical\n");
- case THERMAL_TRIP_HOT:
- return sprintf(buf, "hot\n");
- case THERMAL_TRIP_PASSIVE:
- return sprintf(buf, "passive\n");
- case THERMAL_TRIP_ACTIVE:
- return sprintf(buf, "active\n");
- default:
- return sprintf(buf, "unknown\n");
- }
+ return sprintf(buf, "%s\n", thermal_trip_type_name(tz->trips[trip_id].trip.type));
}
static ssize_t
@@ -124,7 +113,7 @@ trip_point_temp_store(struct device *dev, struct device_attribute *attr,
if (temp != trip->temperature) {
if (tz->ops.set_trip_temp) {
- ret = tz->ops.set_trip_temp(tz, trip_id, temp);
+ ret = tz->ops.set_trip_temp(tz, trip, temp);
if (ret)
goto unlock;
}
@@ -150,7 +139,7 @@ trip_point_temp_show(struct device *dev, struct device_attribute *attr,
if (sscanf(attr->attr.name, "trip_point_%d_temp", &trip_id) != 1)
return -EINVAL;
- return sprintf(buf, "%d\n", tz->trips[trip_id].trip.temperature);
+ return sprintf(buf, "%d\n", READ_ONCE(tz->trips[trip_id].trip.temperature));
}
static ssize_t
@@ -174,7 +163,7 @@ trip_point_hyst_store(struct device *dev, struct device_attribute *attr,
trip = &tz->trips[trip_id].trip;
if (hyst != trip->hysteresis) {
- trip->hysteresis = hyst;
+ WRITE_ONCE(trip->hysteresis, hyst);
thermal_zone_trip_updated(tz, trip);
}
@@ -194,7 +183,7 @@ trip_point_hyst_show(struct device *dev, struct device_attribute *attr,
if (sscanf(attr->attr.name, "trip_point_%d_hyst", &trip_id) != 1)
return -EINVAL;
- return sprintf(buf, "%d\n", tz->trips[trip_id].trip.hysteresis);
+ return sprintf(buf, "%d\n", READ_ONCE(tz->trips[trip_id].trip.hysteresis));
}
static ssize_t
diff --git a/drivers/thermal/thermal_trip.c b/drivers/thermal/thermal_trip.c
index d6a6acc78ddb..c0b679b846b3 100644
--- a/drivers/thermal/thermal_trip.c
+++ b/drivers/thermal/thermal_trip.c
@@ -9,6 +9,21 @@
*/
#include "thermal_core.h"
+static const char *trip_type_names[] = {
+ [THERMAL_TRIP_ACTIVE] = "active",
+ [THERMAL_TRIP_PASSIVE] = "passive",
+ [THERMAL_TRIP_HOT] = "hot",
+ [THERMAL_TRIP_CRITICAL] = "critical",
+};
+
+const char *thermal_trip_type_name(enum thermal_trip_type trip_type)
+{
+ if (trip_type < THERMAL_TRIP_ACTIVE || trip_type > THERMAL_TRIP_CRITICAL)
+ return "unknown";
+
+ return trip_type_names[trip_type];
+}
+
int for_each_thermal_trip(struct thermal_zone_device *tz,
int (*cb)(struct thermal_trip *, void *),
void *data)
@@ -47,7 +62,7 @@ int thermal_zone_get_num_trips(struct thermal_zone_device *tz)
EXPORT_SYMBOL_GPL(thermal_zone_get_num_trips);
/**
- * __thermal_zone_set_trips - Computes the next trip points for the driver
+ * thermal_zone_set_trips - Computes the next trip points for the driver
* @tz: a pointer to a thermal zone device structure
*
* The function computes the next temperature boundaries by browsing
@@ -61,7 +76,7 @@ EXPORT_SYMBOL_GPL(thermal_zone_get_num_trips);
*
* It does not return a value
*/
-void __thermal_zone_set_trips(struct thermal_zone_device *tz)
+void thermal_zone_set_trips(struct thermal_zone_device *tz)
{
const struct thermal_trip_desc *td;
int low = -INT_MAX, high = INT_MAX;
@@ -73,17 +88,11 @@ void __thermal_zone_set_trips(struct thermal_zone_device *tz)
return;
for_each_trip_desc(tz, td) {
- const struct thermal_trip *trip = &td->trip;
- int trip_low;
-
- trip_low = trip->temperature - trip->hysteresis;
-
- if (trip_low < tz->temperature && trip_low > low)
- low = trip_low;
+ if (td->threshold < tz->temperature && td->threshold > low)
+ low = td->threshold;
- if (trip->temperature > tz->temperature &&
- trip->temperature < high)
- high = trip->temperature;
+ if (td->threshold > tz->temperature && td->threshold < high)
+ high = td->threshold;
}
/* No need to change trip points */
@@ -105,27 +114,17 @@ void __thermal_zone_set_trips(struct thermal_zone_device *tz)
dev_err(&tz->device, "Failed to set trips: %d\n", ret);
}
-int __thermal_zone_get_trip(struct thermal_zone_device *tz, int trip_id,
- struct thermal_trip *trip)
-{
- if (!tz || trip_id < 0 || trip_id >= tz->num_trips || !trip)
- return -EINVAL;
-
- *trip = tz->trips[trip_id].trip;
- return 0;
-}
-EXPORT_SYMBOL_GPL(__thermal_zone_get_trip);
-
int thermal_zone_get_trip(struct thermal_zone_device *tz, int trip_id,
struct thermal_trip *trip)
{
- int ret;
+ if (!tz || !trip || trip_id < 0 || trip_id >= tz->num_trips)
+ return -EINVAL;
mutex_lock(&tz->lock);
- ret = __thermal_zone_get_trip(tz, trip_id, trip);
+ *trip = tz->trips[trip_id].trip;
mutex_unlock(&tz->lock);
- return ret;
+ return 0;
}
EXPORT_SYMBOL_GPL(thermal_zone_get_trip);
@@ -152,17 +151,23 @@ void thermal_zone_set_trip_temp(struct thermal_zone_device *tz,
if (trip->temperature == temp)
return;
+ WRITE_ONCE(trip->temperature, temp);
+ thermal_notify_tz_trip_change(tz, trip);
+
if (temp == THERMAL_TEMP_INVALID) {
struct thermal_trip_desc *td = trip_to_trip_desc(trip);
- if (trip->type == THERMAL_TRIP_PASSIVE &&
- tz->temperature >= td->threshold) {
+ if (tz->temperature >= td->threshold) {
/*
- * The trip has been crossed, so the thermal zone's
- * passive count needs to be adjusted.
+ * The trip has been crossed on the way up, so some
+ * adjustments are needed to compensate for the lack
+ * of it going forward.
*/
- tz->passive--;
- WARN_ON_ONCE(tz->passive < 0);
+ if (trip->type == THERMAL_TRIP_PASSIVE) {
+ tz->passive--;
+ WARN_ON_ONCE(tz->passive < 0);
+ }
+ thermal_zone_trip_down(tz, trip);
}
/*
* Invalidate the threshold to avoid triggering a spurious
@@ -170,7 +175,5 @@ void thermal_zone_set_trip_temp(struct thermal_zone_device *tz,
*/
td->threshold = INT_MAX;
}
- trip->temperature = temp;
- thermal_notify_tz_trip_change(tz, trip);
}
EXPORT_SYMBOL_GPL(thermal_zone_set_trip_temp);
diff --git a/drivers/thermal/uniphier_thermal.c b/drivers/thermal/uniphier_thermal.c
index 274f36358b21..0325b7195136 100644
--- a/drivers/thermal/uniphier_thermal.c
+++ b/drivers/thermal/uniphier_thermal.c
@@ -239,13 +239,34 @@ static irqreturn_t uniphier_tm_alarm_irq_thread(int irq, void *_tdev)
return IRQ_HANDLED;
}
+struct trip_walk_data {
+ struct uniphier_tm_dev *tdev;
+ int crit_temp;
+ int index;
+};
+
+static int uniphier_tm_trip_walk_cb(struct thermal_trip *trip, void *arg)
+{
+ struct trip_walk_data *twd = arg;
+
+ if (trip->type == THERMAL_TRIP_CRITICAL &&
+ trip->temperature < twd->crit_temp)
+ twd->crit_temp = trip->temperature;
+
+ uniphier_tm_set_alert(twd->tdev, twd->index, trip->temperature);
+ twd->tdev->alert_en[twd->index++] = true;
+
+ return 0;
+}
+
static int uniphier_tm_probe(struct platform_device *pdev)
{
+ struct trip_walk_data twd = { .crit_temp = INT_MAX, .index = 0 };
struct device *dev = &pdev->dev;
struct regmap *regmap;
struct device_node *parent;
struct uniphier_tm_dev *tdev;
- int i, ret, irq, crit_temp = INT_MAX;
+ int ret, irq;
tdev = devm_kzalloc(dev, sizeof(*tdev), GFP_KERNEL);
if (!tdev)
@@ -293,20 +314,10 @@ static int uniphier_tm_probe(struct platform_device *pdev)
}
/* set alert temperatures */
- for (i = 0; i < thermal_zone_get_num_trips(tdev->tz_dev); i++) {
- struct thermal_trip trip;
+ twd.tdev = tdev;
+ thermal_zone_for_each_trip(tdev->tz_dev, uniphier_tm_trip_walk_cb, &twd);
- ret = thermal_zone_get_trip(tdev->tz_dev, i, &trip);
- if (ret)
- return ret;
-
- if (trip.type == THERMAL_TRIP_CRITICAL &&
- trip.temperature < crit_temp)
- crit_temp = trip.temperature;
- uniphier_tm_set_alert(tdev, i, trip.temperature);
- tdev->alert_en[i] = true;
- }
- if (crit_temp > CRITICAL_TEMP_LIMIT) {
+ if (twd.crit_temp > CRITICAL_TEMP_LIMIT) {
dev_err(dev, "critical trip is over limit(>%d), or not set\n",
CRITICAL_TEMP_LIMIT);
return -EINVAL;
diff --git a/drivers/thunderbolt/debugfs.c b/drivers/thunderbolt/debugfs.c
index 193e9dfc983b..70b52aac3d97 100644
--- a/drivers/thunderbolt/debugfs.c
+++ b/drivers/thunderbolt/debugfs.c
@@ -943,8 +943,9 @@ static void margining_port_init(struct tb_port *port)
debugfs_create_file("run", 0600, dir, port, &margining_run_fops);
debugfs_create_file("results", 0600, dir, port, &margining_results_fops);
debugfs_create_file("test", 0600, dir, port, &margining_test_fops);
- if (independent_voltage_margins(usb4) ||
- (supports_time(usb4) && independent_time_margins(usb4)))
+ if (independent_voltage_margins(usb4) == USB4_MARGIN_CAP_0_VOLTAGE_HL ||
+ (supports_time(usb4) &&
+ independent_time_margins(usb4) == USB4_MARGIN_CAP_1_TIME_LR))
debugfs_create_file("margin", 0600, dir, port, &margining_margin_fops);
}
diff --git a/drivers/tty/mxser.c b/drivers/tty/mxser.c
index 458bb1280ebf..5b97e420a95f 100644
--- a/drivers/tty/mxser.c
+++ b/drivers/tty/mxser.c
@@ -288,7 +288,7 @@ struct mxser_board {
enum mxser_must_hwid must_hwid;
speed_t max_baud;
- struct mxser_port ports[] __counted_by(nports);
+ struct mxser_port ports[] /* __counted_by(nports) */;
};
static DECLARE_BITMAP(mxser_boards, MXSER_BOARDS);
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index f252d0b5a434..5e9ca4376d68 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -1619,15 +1619,25 @@ static void __receive_buf(struct tty_struct *tty, const u8 *cp, const u8 *fp,
else if (ldata->raw || (L_EXTPROC(tty) && !preops))
n_tty_receive_buf_raw(tty, cp, fp, count);
else if (tty->closing && !L_EXTPROC(tty)) {
- if (la_count > 0)
+ if (la_count > 0) {
n_tty_receive_buf_closing(tty, cp, fp, la_count, true);
- if (count > la_count)
- n_tty_receive_buf_closing(tty, cp, fp, count - la_count, false);
+ cp += la_count;
+ if (fp)
+ fp += la_count;
+ count -= la_count;
+ }
+ if (count > 0)
+ n_tty_receive_buf_closing(tty, cp, fp, count, false);
} else {
- if (la_count > 0)
+ if (la_count > 0) {
n_tty_receive_buf_standard(tty, cp, fp, la_count, true);
- if (count > la_count)
- n_tty_receive_buf_standard(tty, cp, fp, count - la_count, false);
+ cp += la_count;
+ if (fp)
+ fp += la_count;
+ count -= la_count;
+ }
+ if (count > 0)
+ n_tty_receive_buf_standard(tty, cp, fp, count, false);
flush_echoes(tty);
if (tty->ops->flush_chars)
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index ff15022369e4..b0adafc44747 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -15,7 +15,6 @@
*/
#include <linux/acpi.h>
-#include <linux/cleanup.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/ioport.h>
@@ -42,8 +41,6 @@
#include <asm/irq.h>
-#include "../serial_base.h" /* For serial_base_add_isa_preferred_console() */
-
#include "8250.h"
/*
@@ -563,8 +560,6 @@ static void __init serial8250_isa_init_ports(void)
port->irqflags |= irqflag;
if (serial8250_isa_config != NULL)
serial8250_isa_config(i, &up->port, &up->capabilities);
-
- serial_base_add_isa_preferred_console(serial8250_reg.dev_name, i);
}
}
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index ba9f4dc4e71d..fb809e32c6ae 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -55,6 +55,34 @@
#define DW_UART_QUIRK_SKIP_SET_RATE BIT(2)
#define DW_UART_QUIRK_IS_DMA_FC BIT(3)
#define DW_UART_QUIRK_APMC0D08 BIT(4)
+#define DW_UART_QUIRK_CPR_VALUE BIT(5)
+
+struct dw8250_platform_data {
+ u8 usr_reg;
+ u32 cpr_value;
+ unsigned int quirks;
+};
+
+struct dw8250_data {
+ struct dw8250_port_data data;
+ const struct dw8250_platform_data *pdata;
+
+ int msr_mask_on;
+ int msr_mask_off;
+ struct clk *clk;
+ struct clk *pclk;
+ struct notifier_block clk_notifier;
+ struct work_struct clk_work;
+ struct reset_control *rst;
+
+ unsigned int skip_autocfg:1;
+ unsigned int uart_16550_compatible:1;
+};
+
+static inline struct dw8250_data *to_dw8250_data(struct dw8250_port_data *data)
+{
+ return container_of(data, struct dw8250_data, data);
+}
static inline struct dw8250_data *clk_to_dw8250_data(struct notifier_block *nb)
{
@@ -432,6 +460,10 @@ static void dw8250_prepare_rx_dma(struct uart_8250_port *p)
static void dw8250_quirks(struct uart_port *p, struct dw8250_data *data)
{
unsigned int quirks = data->pdata ? data->pdata->quirks : 0;
+ u32 cpr_value = data->pdata ? data->pdata->cpr_value : 0;
+
+ if (quirks & DW_UART_QUIRK_CPR_VALUE)
+ data->data.cpr_value = cpr_value;
#ifdef CONFIG_64BIT
if (quirks & DW_UART_QUIRK_OCTEON) {
@@ -714,8 +746,8 @@ static const struct dw8250_platform_data dw8250_armada_38x_data = {
static const struct dw8250_platform_data dw8250_renesas_rzn1_data = {
.usr_reg = DW_UART_USR,
- .cpr_val = 0x00012f32,
- .quirks = DW_UART_QUIRK_IS_DMA_FC,
+ .cpr_value = 0x00012f32,
+ .quirks = DW_UART_QUIRK_CPR_VALUE | DW_UART_QUIRK_IS_DMA_FC,
};
static const struct dw8250_platform_data dw8250_starfive_jh7100_data = {
diff --git a/drivers/tty/serial/8250/8250_dwlib.c b/drivers/tty/serial/8250/8250_dwlib.c
index 3e33ddf7bc80..5a2520943dfd 100644
--- a/drivers/tty/serial/8250/8250_dwlib.c
+++ b/drivers/tty/serial/8250/8250_dwlib.c
@@ -242,7 +242,6 @@ static const struct serial_rs485 dw8250_rs485_supported = {
void dw8250_setup_port(struct uart_port *p)
{
struct dw8250_port_data *pd = p->private_data;
- struct dw8250_data *data = to_dw8250_data(pd);
struct uart_8250_port *up = up_to_u8250p(p);
u32 reg, old_dlf;
@@ -278,7 +277,7 @@ void dw8250_setup_port(struct uart_port *p)
reg = dw8250_readl_ext(p, DW_UART_CPR);
if (!reg) {
- reg = data->pdata->cpr_val;
+ reg = pd->cpr_value;
dev_dbg(p->dev, "CPR is not available, using 0x%08x instead\n", reg);
}
if (!reg)
diff --git a/drivers/tty/serial/8250/8250_dwlib.h b/drivers/tty/serial/8250/8250_dwlib.h
index f13e91f2cace..7dd2a8e7b780 100644
--- a/drivers/tty/serial/8250/8250_dwlib.h
+++ b/drivers/tty/serial/8250/8250_dwlib.h
@@ -2,15 +2,10 @@
/* Synopsys DesignWare 8250 library header file. */
#include <linux/io.h>
-#include <linux/notifier.h>
#include <linux/types.h>
-#include <linux/workqueue.h>
#include "8250.h"
-struct clk;
-struct reset_control;
-
struct dw8250_port_data {
/* Port properties */
int line;
@@ -19,42 +14,16 @@ struct dw8250_port_data {
struct uart_8250_dma dma;
/* Hardware configuration */
+ u32 cpr_value;
u8 dlf_size;
/* RS485 variables */
bool hw_rs485_support;
};
-struct dw8250_platform_data {
- u8 usr_reg;
- u32 cpr_val;
- unsigned int quirks;
-};
-
-struct dw8250_data {
- struct dw8250_port_data data;
- const struct dw8250_platform_data *pdata;
-
- int msr_mask_on;
- int msr_mask_off;
- struct clk *clk;
- struct clk *pclk;
- struct notifier_block clk_notifier;
- struct work_struct clk_work;
- struct reset_control *rst;
-
- unsigned int skip_autocfg:1;
- unsigned int uart_16550_compatible:1;
-};
-
void dw8250_do_set_termios(struct uart_port *p, struct ktermios *termios, const struct ktermios *old);
void dw8250_setup_port(struct uart_port *p);
-static inline struct dw8250_data *to_dw8250_data(struct dw8250_port_data *data)
-{
- return container_of(data, struct dw8250_data, data);
-}
-
static inline u32 dw8250_readl_ext(struct uart_port *p, int offset)
{
if (p->iotype == UPIO_MEM32BE)
diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
index 170639d12b2a..1af9aed99c65 100644
--- a/drivers/tty/serial/8250/8250_omap.c
+++ b/drivers/tty/serial/8250/8250_omap.c
@@ -115,6 +115,10 @@
/* RX FIFO occupancy indicator */
#define UART_OMAP_RX_LVL 0x19
+/* Timeout low and High */
+#define UART_OMAP_TO_L 0x26
+#define UART_OMAP_TO_H 0x27
+
/*
* Copy of the genpd flags for the console.
* Only used if console suspend is disabled
@@ -663,13 +667,25 @@ static irqreturn_t omap8250_irq(int irq, void *dev_id)
/*
* On K3 SoCs, it is observed that RX TIMEOUT is signalled after
- * FIFO has been drained, in which case a dummy read of RX FIFO
- * is required to clear RX TIMEOUT condition.
+ * FIFO has been drained or erroneously.
+ * So apply solution of Errata i2310 as mentioned in
+ * https://www.ti.com/lit/pdf/sprz536
*/
if (priv->habit & UART_RX_TIMEOUT_QUIRK &&
(iir & UART_IIR_RX_TIMEOUT) == UART_IIR_RX_TIMEOUT &&
serial_port_in(port, UART_OMAP_RX_LVL) == 0) {
- serial_port_in(port, UART_RX);
+ unsigned char efr2, timeout_h, timeout_l;
+
+ efr2 = serial_in(up, UART_OMAP_EFR2);
+ timeout_h = serial_in(up, UART_OMAP_TO_H);
+ timeout_l = serial_in(up, UART_OMAP_TO_L);
+ serial_out(up, UART_OMAP_TO_H, 0xFF);
+ serial_out(up, UART_OMAP_TO_L, 0xFF);
+ serial_out(up, UART_OMAP_EFR2, UART_OMAP_EFR2_TIMEOUT_BEHAVE);
+ serial_in(up, UART_IIR);
+ serial_out(up, UART_OMAP_EFR2, efr2);
+ serial_out(up, UART_OMAP_TO_H, timeout_h);
+ serial_out(up, UART_OMAP_TO_L, timeout_l);
}
/* Stop processing interrupts on input overrun */
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index 40af74b55933..e1d7aa2fa347 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -1985,6 +1985,17 @@ enum {
MOXA_SUPP_RS485 = BIT(2),
};
+static unsigned short moxa_get_nports(unsigned short device)
+{
+ switch (device) {
+ case PCI_DEVICE_ID_MOXA_CP116E_A_A:
+ case PCI_DEVICE_ID_MOXA_CP116E_A_B:
+ return 8;
+ }
+
+ return FIELD_GET(0x00F0, device);
+}
+
static bool pci_moxa_is_mini_pcie(unsigned short device)
{
if (device == PCI_DEVICE_ID_MOXA_CP102N ||
@@ -2038,7 +2049,7 @@ static int pci_moxa_init(struct pci_dev *dev)
{
unsigned short device = dev->device;
resource_size_t iobar_addr = pci_resource_start(dev, 2);
- unsigned int num_ports = (device & 0x00F0) >> 4, i;
+ unsigned int i, num_ports = moxa_get_nports(device);
u8 val, init_mode = MOXA_RS232;
if (!(pci_moxa_supported_rs(dev) & MOXA_SUPP_RS232)) {
diff --git a/drivers/tty/serial/8250/8250_pxa.c b/drivers/tty/serial/8250/8250_pxa.c
index f1a51b00b1b9..ba96fa913e7f 100644
--- a/drivers/tty/serial/8250/8250_pxa.c
+++ b/drivers/tty/serial/8250/8250_pxa.c
@@ -125,6 +125,7 @@ static int serial_pxa_probe(struct platform_device *pdev)
uart.port.iotype = UPIO_MEM32;
uart.port.regshift = 2;
uart.port.fifosize = 64;
+ uart.tx_loadsz = 32;
uart.dl_write = serial_pxa_dl_write;
ret = serial8250_register_8250_port(&uart);
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 4fdd7857ef4d..28e4beeabf8f 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -1023,8 +1023,9 @@ config SERIAL_SCCNXP_CONSOLE
help
Support for console on SCCNXP serial ports.
-config SERIAL_SC16IS7XX_CORE
+config SERIAL_SC16IS7XX
tristate "NXP SC16IS7xx UART support"
+ depends on SPI_MASTER || I2C
select SERIAL_CORE
select SERIAL_SC16IS7XX_SPI if SPI_MASTER
select SERIAL_SC16IS7XX_I2C if I2C
diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile
index faa45f2b8bb0..6ff74f0a9530 100644
--- a/drivers/tty/serial/Makefile
+++ b/drivers/tty/serial/Makefile
@@ -75,7 +75,7 @@ obj-$(CONFIG_SERIAL_SA1100) += sa1100.o
obj-$(CONFIG_SERIAL_SAMSUNG) += samsung_tty.o
obj-$(CONFIG_SERIAL_SB1250_DUART) += sb1250-duart.o
obj-$(CONFIG_SERIAL_SCCNXP) += sccnxp.o
-obj-$(CONFIG_SERIAL_SC16IS7XX_CORE) += sc16is7xx.o
+obj-$(CONFIG_SERIAL_SC16IS7XX) += sc16is7xx.o
obj-$(CONFIG_SERIAL_SC16IS7XX_SPI) += sc16is7xx_spi.o
obj-$(CONFIG_SERIAL_SC16IS7XX_I2C) += sc16is7xx_i2c.o
obj-$(CONFIG_SERIAL_SH_SCI) += sh-sci.o
diff --git a/drivers/tty/serial/bcm63xx_uart.c b/drivers/tty/serial/bcm63xx_uart.c
index 34801a6f300b..b88cc28c94e3 100644
--- a/drivers/tty/serial/bcm63xx_uart.c
+++ b/drivers/tty/serial/bcm63xx_uart.c
@@ -308,8 +308,8 @@ static void bcm_uart_do_tx(struct uart_port *port)
val = bcm_uart_readl(port, UART_MCTL_REG);
val = (val & UART_MCTL_TXFIFOFILL_MASK) >> UART_MCTL_TXFIFOFILL_SHIFT;
-
- pending = uart_port_tx_limited(port, ch, port->fifosize - val,
+ pending = uart_port_tx_limited_flags(port, ch, UART_TX_NOSTOP,
+ port->fifosize - val,
true,
bcm_uart_writel(port, ch, UART_FIFO_REG),
({}));
@@ -320,6 +320,9 @@ static void bcm_uart_do_tx(struct uart_port *port)
val = bcm_uart_readl(port, UART_IR_REG);
val &= ~UART_TX_INT_MASK;
bcm_uart_writel(port, val, UART_IR_REG);
+
+ if (uart_tx_stopped(port))
+ bcm_uart_stop_tx(port);
}
/*
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 2eb22594960f..ff32cd2d2863 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -120,6 +120,7 @@
#define UCR4_OREN (1<<1) /* Receiver overrun interrupt enable */
#define UCR4_DREN (1<<0) /* Recv data ready interrupt enable */
#define UFCR_RXTL_SHF 0 /* Receiver trigger level shift */
+#define UFCR_RXTL_MASK 0x3F /* Receiver trigger 6 bits wide */
#define UFCR_DCEDTE (1<<6) /* DCE/DTE mode select */
#define UFCR_RFDIV (7<<7) /* Reference freq divider mask */
#define UFCR_RFDIV_REG(x) (((x) < 7 ? 6 - (x) : 6) << 7)
@@ -1551,6 +1552,7 @@ static void imx_uart_shutdown(struct uart_port *port)
struct imx_port *sport = (struct imx_port *)port;
unsigned long flags;
u32 ucr1, ucr2, ucr4, uts;
+ int loops;
if (sport->dma_is_enabled) {
dmaengine_terminate_sync(sport->dma_chan_tx);
@@ -1613,6 +1615,56 @@ static void imx_uart_shutdown(struct uart_port *port)
ucr4 &= ~UCR4_TCEN;
imx_uart_writel(sport, ucr4, UCR4);
+ /*
+ * We have to ensure the tx state machine ends up in OFF. This
+ * is especially important for rs485 where we must not leave
+ * the RTS signal high, blocking the bus indefinitely.
+ *
+ * All interrupts are now disabled, so imx_uart_stop_tx() will
+ * no longer be called from imx_uart_transmit_buffer(). It may
+ * still be called via the hrtimers, and if those are in play,
+ * we have to honour the delays.
+ */
+ if (sport->tx_state == WAIT_AFTER_RTS || sport->tx_state == SEND)
+ imx_uart_stop_tx(port);
+
+ /*
+ * In many cases (rs232 mode, or if tx_state was
+ * WAIT_AFTER_RTS, or if tx_state was SEND and there is no
+ * delay_rts_after_send), this will have moved directly to
+ * OFF. In rs485 mode, tx_state might already have been
+ * WAIT_AFTER_SEND and the hrtimer thus already started, or
+ * the above imx_uart_stop_tx() call could have started it. In
+ * those cases, we have to wait for the hrtimer to fire and
+ * complete the transition to OFF.
+ */
+ loops = port->rs485.flags & SER_RS485_ENABLED ?
+ port->rs485.delay_rts_after_send : 0;
+ while (sport->tx_state != OFF && loops--) {
+ uart_port_unlock_irqrestore(&sport->port, flags);
+ msleep(1);
+ uart_port_lock_irqsave(&sport->port, &flags);
+ }
+
+ if (sport->tx_state != OFF) {
+ dev_warn(sport->port.dev, "unexpected tx_state %d\n",
+ sport->tx_state);
+ /*
+ * This machine may be busted, but ensure the RTS
+ * signal is inactive in order not to block other
+ * devices.
+ */
+ if (port->rs485.flags & SER_RS485_ENABLED) {
+ ucr2 = imx_uart_readl(sport, UCR2);
+ if (port->rs485.flags & SER_RS485_RTS_AFTER_SEND)
+ imx_uart_rts_active(sport, &ucr2);
+ else
+ imx_uart_rts_inactive(sport, &ucr2);
+ imx_uart_writel(sport, ucr2, UCR2);
+ }
+ sport->tx_state = OFF;
+ }
+
uart_port_unlock_irqrestore(&sport->port, flags);
clk_disable_unprepare(sport->clk_per);
@@ -1933,7 +1985,7 @@ static int imx_uart_rs485_config(struct uart_port *port, struct ktermios *termio
struct serial_rs485 *rs485conf)
{
struct imx_port *sport = (struct imx_port *)port;
- u32 ucr2;
+ u32 ucr2, ufcr;
if (rs485conf->flags & SER_RS485_ENABLED) {
/* Enable receiver if low-active RTS signal is requested */
@@ -1952,8 +2004,13 @@ static int imx_uart_rs485_config(struct uart_port *port, struct ktermios *termio
/* Make sure Rx is enabled in case Tx is active with Rx disabled */
if (!(rs485conf->flags & SER_RS485_ENABLED) ||
- rs485conf->flags & SER_RS485_RX_DURING_TX)
+ rs485conf->flags & SER_RS485_RX_DURING_TX) {
+ /* If the receiver trigger is 0, set it to a default value */
+ ufcr = imx_uart_readl(sport, UFCR);
+ if ((ufcr & UFCR_RXTL_MASK) == 0)
+ imx_uart_setup_ufcr(sport, TXTL_DEFAULT, RXTL_DEFAULT);
imx_uart_start_rx(port);
+ }
return 0;
}
diff --git a/drivers/tty/serial/ma35d1_serial.c b/drivers/tty/serial/ma35d1_serial.c
index 19f0a305cc43..3b4206e815fe 100644
--- a/drivers/tty/serial/ma35d1_serial.c
+++ b/drivers/tty/serial/ma35d1_serial.c
@@ -688,12 +688,13 @@ static int ma35d1serial_probe(struct platform_device *pdev)
struct uart_ma35d1_port *up;
int ret = 0;
- if (pdev->dev.of_node) {
- ret = of_alias_get_id(pdev->dev.of_node, "serial");
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to get alias/pdev id, errno %d\n", ret);
- return ret;
- }
+ if (!pdev->dev.of_node)
+ return -ENODEV;
+
+ ret = of_alias_get_id(pdev->dev.of_node, "serial");
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to get alias/pdev id, errno %d\n", ret);
+ return ret;
}
up = &ma35d1serial_ports[ret];
up->port.line = ret;
diff --git a/drivers/tty/serial/mcf.c b/drivers/tty/serial/mcf.c
index b0604d6da025..58858dd352c5 100644
--- a/drivers/tty/serial/mcf.c
+++ b/drivers/tty/serial/mcf.c
@@ -462,7 +462,7 @@ static const struct uart_ops mcf_uart_ops = {
.verify_port = mcf_verify_port,
};
-static struct mcf_uart mcf_ports[4];
+static struct mcf_uart mcf_ports[10];
#define MCF_MAXPORTS ARRAY_SIZE(mcf_ports)
diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c
index 2bd25afe0d92..69a632fefc41 100644
--- a/drivers/tty/serial/qcom_geni_serial.c
+++ b/drivers/tty/serial/qcom_geni_serial.c
@@ -649,15 +649,25 @@ static void qcom_geni_serial_start_tx_dma(struct uart_port *uport)
static void qcom_geni_serial_start_tx_fifo(struct uart_port *uport)
{
+ unsigned char c;
u32 irq_en;
- if (qcom_geni_serial_main_active(uport) ||
- !qcom_geni_serial_tx_empty(uport))
- return;
+ /*
+ * Start a new transfer in case the previous command was cancelled and
+ * left data in the FIFO which may prevent the watermark interrupt
+ * from triggering. Note that the stale data is discarded.
+ */
+ if (!qcom_geni_serial_main_active(uport) &&
+ !qcom_geni_serial_tx_empty(uport)) {
+ if (uart_fifo_out(uport, &c, 1) == 1) {
+ writel(M_CMD_DONE_EN, uport->membase + SE_GENI_M_IRQ_CLEAR);
+ qcom_geni_serial_setup_tx(uport, 1);
+ writel(c, uport->membase + SE_GENI_TX_FIFOn);
+ }
+ }
irq_en = readl(uport->membase + SE_GENI_M_IRQ_EN);
irq_en |= M_TX_FIFO_WATERMARK_EN | M_CMD_DONE_EN;
-
writel(DEF_TX_WM, uport->membase + SE_GENI_TX_WATERMARK_REG);
writel(irq_en, uport->membase + SE_GENI_M_IRQ_EN);
}
@@ -665,13 +675,17 @@ static void qcom_geni_serial_start_tx_fifo(struct uart_port *uport)
static void qcom_geni_serial_stop_tx_fifo(struct uart_port *uport)
{
u32 irq_en;
- struct qcom_geni_serial_port *port = to_dev_port(uport);
irq_en = readl(uport->membase + SE_GENI_M_IRQ_EN);
irq_en &= ~(M_CMD_DONE_EN | M_TX_FIFO_WATERMARK_EN);
writel(0, uport->membase + SE_GENI_TX_WATERMARK_REG);
writel(irq_en, uport->membase + SE_GENI_M_IRQ_EN);
- /* Possible stop tx is called multiple times. */
+}
+
+static void qcom_geni_serial_cancel_tx_cmd(struct uart_port *uport)
+{
+ struct qcom_geni_serial_port *port = to_dev_port(uport);
+
if (!qcom_geni_serial_main_active(uport))
return;
@@ -684,6 +698,8 @@ static void qcom_geni_serial_stop_tx_fifo(struct uart_port *uport)
writel(M_CMD_ABORT_EN, uport->membase + SE_GENI_M_IRQ_CLEAR);
}
writel(M_CMD_CANCEL_EN, uport->membase + SE_GENI_M_IRQ_CLEAR);
+
+ port->tx_remaining = 0;
}
static void qcom_geni_serial_handle_rx_fifo(struct uart_port *uport, bool drop)
@@ -862,7 +878,7 @@ static void qcom_geni_serial_send_chunk_fifo(struct uart_port *uport,
memset(buf, 0, sizeof(buf));
tx_bytes = min(remaining, BYTES_PER_FIFO_WORD);
- tx_bytes = uart_fifo_out(uport, buf, tx_bytes);
+ uart_fifo_out(uport, buf, tx_bytes);
iowrite32_rep(uport->membase + SE_GENI_TX_FIFOn, buf, 1);
@@ -890,13 +906,17 @@ static void qcom_geni_serial_handle_tx_fifo(struct uart_port *uport,
else
pending = kfifo_len(&tport->xmit_fifo);
- /* All data has been transmitted and acknowledged as received */
- if (!pending && !status && done) {
+ /* All data has been transmitted or command has been cancelled */
+ if (!pending && done) {
qcom_geni_serial_stop_tx_fifo(uport);
goto out_write_wakeup;
}
- avail = port->tx_fifo_depth - (status & TX_FIFO_WC);
+ if (active)
+ avail = port->tx_fifo_depth - (status & TX_FIFO_WC);
+ else
+ avail = port->tx_fifo_depth;
+
avail *= BYTES_PER_FIFO_WORD;
chunk = min(avail, pending);
@@ -1069,11 +1089,15 @@ static void qcom_geni_serial_shutdown(struct uart_port *uport)
{
disable_irq(uport->irq);
- if (uart_console(uport))
- return;
-
qcom_geni_serial_stop_tx(uport);
qcom_geni_serial_stop_rx(uport);
+
+ qcom_geni_serial_cancel_tx_cmd(uport);
+}
+
+static void qcom_geni_serial_flush_buffer(struct uart_port *uport)
+{
+ qcom_geni_serial_cancel_tx_cmd(uport);
}
static int qcom_geni_serial_port_setup(struct uart_port *uport)
@@ -1532,6 +1556,7 @@ static const struct uart_ops qcom_geni_console_pops = {
.request_port = qcom_geni_serial_request_port,
.config_port = qcom_geni_serial_config_port,
.shutdown = qcom_geni_serial_shutdown,
+ .flush_buffer = qcom_geni_serial_flush_buffer,
.type = qcom_geni_serial_get_type,
.set_mctrl = qcom_geni_serial_set_mctrl,
.get_mctrl = qcom_geni_serial_get_mctrl,
diff --git a/drivers/tty/serial/serial_base.h b/drivers/tty/serial/serial_base.h
index 743a72ac34f3..b6c38d2edfd4 100644
--- a/drivers/tty/serial/serial_base.h
+++ b/drivers/tty/serial/serial_base.h
@@ -49,33 +49,3 @@ void serial_ctrl_unregister_port(struct uart_driver *drv, struct uart_port *port
int serial_core_register_port(struct uart_driver *drv, struct uart_port *port);
void serial_core_unregister_port(struct uart_driver *drv, struct uart_port *port);
-
-#ifdef CONFIG_SERIAL_CORE_CONSOLE
-
-int serial_base_add_preferred_console(struct uart_driver *drv,
- struct uart_port *port);
-
-#else
-
-static inline
-int serial_base_add_preferred_console(struct uart_driver *drv,
- struct uart_port *port)
-{
- return 0;
-}
-
-#endif
-
-#ifdef CONFIG_SERIAL_8250_CONSOLE
-
-int serial_base_add_isa_preferred_console(const char *name, int idx);
-
-#else
-
-static inline
-int serial_base_add_isa_preferred_console(const char *name, int idx)
-{
- return 0;
-}
-
-#endif
diff --git a/drivers/tty/serial/serial_base_bus.c b/drivers/tty/serial/serial_base_bus.c
index 73c6ee540c83..4df2a4b10445 100644
--- a/drivers/tty/serial/serial_base_bus.c
+++ b/drivers/tty/serial/serial_base_bus.c
@@ -8,7 +8,6 @@
* The serial core bus manages the serial core controller instances.
*/
-#include <linux/cleanup.h>
#include <linux/container_of.h>
#include <linux/device.h>
#include <linux/idr.h>
@@ -205,134 +204,6 @@ void serial_base_port_device_remove(struct serial_port_device *port_dev)
put_device(&port_dev->dev);
}
-#ifdef CONFIG_SERIAL_CORE_CONSOLE
-
-static int serial_base_add_one_prefcon(const char *match, const char *dev_name,
- int port_id)
-{
- int ret;
-
- ret = add_preferred_console_match(match, dev_name, port_id);
- if (ret == -ENOENT)
- return 0;
-
- return ret;
-}
-
-#ifdef __sparc__
-
-/* Handle Sparc ttya and ttyb options as done in console_setup() */
-static int serial_base_add_sparc_console(const char *dev_name, int idx)
-{
- const char *name;
-
- switch (idx) {
- case 0:
- name = "ttya";
- break;
- case 1:
- name = "ttyb";
- break;
- default:
- return 0;
- }
-
- return serial_base_add_one_prefcon(name, dev_name, idx);
-}
-
-#else
-
-static inline int serial_base_add_sparc_console(const char *dev_name, int idx)
-{
- return 0;
-}
-
-#endif
-
-static int serial_base_add_prefcon(const char *name, int idx)
-{
- const char *char_match __free(kfree) = NULL;
- const char *nmbr_match __free(kfree) = NULL;
- int ret;
-
- /* Handle ttyS specific options */
- if (strstarts(name, "ttyS")) {
- /* No name, just a number */
- nmbr_match = kasprintf(GFP_KERNEL, "%i", idx);
- if (!nmbr_match)
- return -ENODEV;
-
- ret = serial_base_add_one_prefcon(nmbr_match, name, idx);
- if (ret)
- return ret;
-
- /* Sparc ttya and ttyb */
- ret = serial_base_add_sparc_console(name, idx);
- if (ret)
- return ret;
- }
-
- /* Handle the traditional character device name style console=ttyS0 */
- char_match = kasprintf(GFP_KERNEL, "%s%i", name, idx);
- if (!char_match)
- return -ENOMEM;
-
- return serial_base_add_one_prefcon(char_match, name, idx);
-}
-
-/**
- * serial_base_add_preferred_console - Adds a preferred console
- * @drv: Serial port device driver
- * @port: Serial port instance
- *
- * Tries to add a preferred console for a serial port if specified in the
- * kernel command line. Supports both the traditional character device such
- * as console=ttyS0, and a hardware addressing based console=DEVNAME:0.0
- * style name.
- *
- * Translates the kernel command line option using a hardware based addressing
- * console=DEVNAME:0.0 to the serial port character device such as ttyS0.
- * Cannot be called early for ISA ports, depends on struct device.
- *
- * Note that duplicates are ignored by add_preferred_console().
- *
- * Return: 0 on success, negative error code on failure.
- */
-int serial_base_add_preferred_console(struct uart_driver *drv,
- struct uart_port *port)
-{
- const char *port_match __free(kfree) = NULL;
- int ret;
-
- ret = serial_base_add_prefcon(drv->dev_name, port->line);
- if (ret)
- return ret;
-
- port_match = kasprintf(GFP_KERNEL, "%s:%i.%i", dev_name(port->dev),
- port->ctrl_id, port->port_id);
- if (!port_match)
- return -ENOMEM;
-
- /* Translate a hardware addressing style console=DEVNAME:0.0 */
- return serial_base_add_one_prefcon(port_match, drv->dev_name, port->line);
-}
-
-#endif
-
-#ifdef CONFIG_SERIAL_8250_CONSOLE
-
-/*
- * Early ISA ports initialize the console before there is no struct device.
- * This should be only called from serial8250_isa_init_preferred_console(),
- * other callers are likely wrong and should rely on earlycon instead.
- */
-int serial_base_add_isa_preferred_console(const char *name, int idx)
-{
- return serial_base_add_prefcon(name, idx);
-}
-
-#endif
-
static int serial_base_init(void)
{
int ret;
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 2c1a0254d3f4..2a8006e3d687 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -622,7 +622,7 @@ static ssize_t uart_write(struct tty_struct *tty, const u8 *buf, size_t count)
return -EL3HLT;
port = uart_port_lock(state, flags);
- if (WARN_ON_ONCE(!state->port.xmit_buf)) {
+ if (!state->port.xmit_buf) {
uart_port_unlock(port, flags);
return 0;
}
@@ -3422,10 +3422,6 @@ int serial_core_register_port(struct uart_driver *drv, struct uart_port *port)
if (ret)
goto err_unregister_ctrl_dev;
- ret = serial_base_add_preferred_console(drv, port);
- if (ret)
- goto err_unregister_port_dev;
-
ret = serial_core_add_one_port(drv, port);
if (ret)
goto err_unregister_port_dev;
diff --git a/drivers/tty/serial/serial_port.c b/drivers/tty/serial/serial_port.c
index 91a338d3cb34..d35f1d24156c 100644
--- a/drivers/tty/serial/serial_port.c
+++ b/drivers/tty/serial/serial_port.c
@@ -64,6 +64,13 @@ static int serial_port_runtime_suspend(struct device *dev)
if (port->flags & UPF_DEAD)
return 0;
+ /*
+ * Nothing to do on pm_runtime_force_suspend(), see
+ * DEFINE_RUNTIME_DEV_PM_OPS.
+ */
+ if (!pm_runtime_enabled(dev))
+ return 0;
+
uart_port_lock_irqsave(port, &flags);
if (!port_dev->tx_enabled) {
uart_port_unlock_irqrestore(port, flags);
diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c
index 005d63ab1f44..c532416aec22 100644
--- a/drivers/ufs/core/ufs-mcq.c
+++ b/drivers/ufs/core/ufs-mcq.c
@@ -105,16 +105,15 @@ EXPORT_SYMBOL_GPL(ufshcd_mcq_config_mac);
* @hba: per adapter instance
* @req: pointer to the request to be issued
*
- * Return: the hardware queue instance on which the request would
- * be queued.
+ * Return: the hardware queue instance on which the request will be or has
+ * been queued. %NULL if the request has already been freed.
*/
struct ufs_hw_queue *ufshcd_mcq_req_to_hwq(struct ufs_hba *hba,
struct request *req)
{
- u32 utag = blk_mq_unique_tag(req);
- u32 hwq = blk_mq_unique_tag_to_hwq(utag);
+ struct blk_mq_hw_ctx *hctx = READ_ONCE(req->mq_hctx);
- return &hba->uhq[hwq];
+ return hctx ? &hba->uhq[hctx->queue_num] : NULL;
}
/**
@@ -515,6 +514,8 @@ int ufshcd_mcq_sq_cleanup(struct ufs_hba *hba, int task_tag)
if (!cmd)
return -EINVAL;
hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd));
+ if (!hwq)
+ return 0;
} else {
hwq = hba->dev_cmd_queue;
}
@@ -634,20 +635,20 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd)
struct ufshcd_lrb *lrbp = &hba->lrb[tag];
struct ufs_hw_queue *hwq;
unsigned long flags;
- int err = FAILED;
+ int err;
if (!ufshcd_cmd_inflight(lrbp->cmd)) {
dev_err(hba->dev,
"%s: skip abort. cmd at tag %d already completed.\n",
__func__, tag);
- goto out;
+ return FAILED;
}
/* Skip task abort in case previous aborts failed and report failure */
if (lrbp->req_abort_skip) {
dev_err(hba->dev, "%s: skip abort. tag %d failed earlier\n",
__func__, tag);
- goto out;
+ return FAILED;
}
hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd));
@@ -659,7 +660,7 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd)
*/
dev_err(hba->dev, "%s: cmd found in sq. hwq=%d, tag=%d\n",
__func__, hwq->id, tag);
- goto out;
+ return FAILED;
}
/*
@@ -667,18 +668,17 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd)
* in the completion queue either. Query the device to see if
* the command is being processed in the device.
*/
- if (ufshcd_try_to_abort_task(hba, tag)) {
+ err = ufshcd_try_to_abort_task(hba, tag);
+ if (err) {
dev_err(hba->dev, "%s: device abort failed %d\n", __func__, err);
lrbp->req_abort_skip = true;
- goto out;
+ return FAILED;
}
- err = SUCCESS;
spin_lock_irqsave(&hwq->cq_lock, flags);
if (ufshcd_cmd_inflight(lrbp->cmd))
ufshcd_release_scsi_cmd(hba, lrbp);
spin_unlock_irqrestore(&hwq->cq_lock, flags);
-out:
- return err;
+ return SUCCESS;
}
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index 0cf07194bbe8..702fa1996992 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -1366,7 +1366,7 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba, u64 timeout_us)
* make sure that there are no outstanding requests when
* clock scaling is in progress
*/
- ufshcd_scsi_block_requests(hba);
+ blk_mq_quiesce_tagset(&hba->host->tag_set);
mutex_lock(&hba->wb_mutex);
down_write(&hba->clk_scaling_lock);
@@ -1375,7 +1375,7 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba, u64 timeout_us)
ret = -EBUSY;
up_write(&hba->clk_scaling_lock);
mutex_unlock(&hba->wb_mutex);
- ufshcd_scsi_unblock_requests(hba);
+ blk_mq_unquiesce_tagset(&hba->host->tag_set);
goto out;
}
@@ -1396,7 +1396,7 @@ static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, int err, bool sc
mutex_unlock(&hba->wb_mutex);
- ufshcd_scsi_unblock_requests(hba);
+ blk_mq_unquiesce_tagset(&hba->host->tag_set);
ufshcd_release(hba);
}
@@ -5193,17 +5193,19 @@ static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth)
}
/**
- * ufshcd_slave_configure - adjust SCSI device configurations
+ * ufshcd_device_configure - adjust SCSI device configurations
* @sdev: pointer to SCSI device
+ * @lim: queue limits
*
* Return: 0 (success).
*/
-static int ufshcd_slave_configure(struct scsi_device *sdev)
+static int ufshcd_device_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct ufs_hba *hba = shost_priv(sdev->host);
struct request_queue *q = sdev->request_queue;
- blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
+ lim->dma_pad_mask = PRDT_DATA_BYTE_COUNT_PAD - 1;
/*
* Block runtime-pm until all consumers are added.
@@ -6456,6 +6458,8 @@ static bool ufshcd_abort_one(struct request *rq, void *priv)
/* Release cmd in MCQ mode if abort succeeds */
if (is_mcq_enabled(hba) && (*ret == 0)) {
hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(lrbp->cmd));
+ if (!hwq)
+ return 0;
spin_lock_irqsave(&hwq->cq_lock, flags);
if (ufshcd_cmd_inflight(lrbp->cmd))
ufshcd_release_scsi_cmd(hba, lrbp);
@@ -8787,6 +8791,7 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params)
(hba->quirks & UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH)) {
/* Reset the device and controller before doing reinit */
ufshcd_device_reset(hba);
+ ufs_put_device_desc(hba);
ufshcd_hba_stop(hba);
ufshcd_vops_reinit_notify(hba);
ret = ufshcd_hba_enable(hba);
@@ -8907,7 +8912,7 @@ static const struct scsi_host_template ufshcd_driver_template = {
.queuecommand = ufshcd_queuecommand,
.mq_poll = ufshcd_poll,
.slave_alloc = ufshcd_slave_alloc,
- .slave_configure = ufshcd_slave_configure,
+ .device_configure = ufshcd_device_configure,
.slave_destroy = ufshcd_slave_destroy,
.change_queue_depth = ufshcd_change_queue_depth,
.eh_abort_handler = ufshcd_abort,
diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile
index 3a9a0dd4be70..949eca0adebe 100644
--- a/drivers/usb/Makefile
+++ b/drivers/usb/Makefile
@@ -35,6 +35,7 @@ obj-$(CONFIG_USB_R8A66597_HCD) += host/
obj-$(CONFIG_USB_FSL_USB2) += host/
obj-$(CONFIG_USB_FOTG210_HCD) += host/
obj-$(CONFIG_USB_MAX3421_HCD) += host/
+obj-$(CONFIG_USB_XEN_HCD) += host/
obj-$(CONFIG_USB_C67X00_HCD) += c67x00/
diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
index 4ce7cba2b48a..8f3b9a0a38e1 100644
--- a/drivers/usb/atm/cxacru.c
+++ b/drivers/usb/atm/cxacru.c
@@ -1131,6 +1131,7 @@ static int cxacru_bind(struct usbatm_data *usbatm_instance,
struct cxacru_data *instance;
struct usb_device *usb_dev = interface_to_usbdev(intf);
struct usb_host_endpoint *cmd_ep = usb_dev->ep_in[CXACRU_EP_CMD];
+ struct usb_endpoint_descriptor *in, *out;
int ret;
/* instance init */
@@ -1177,6 +1178,19 @@ static int cxacru_bind(struct usbatm_data *usbatm_instance,
goto fail;
}
+ if (usb_endpoint_xfer_int(&cmd_ep->desc))
+ ret = usb_find_common_endpoints(intf->cur_altsetting,
+ NULL, NULL, &in, &out);
+ else
+ ret = usb_find_common_endpoints(intf->cur_altsetting,
+ &in, &out, NULL, NULL);
+
+ if (ret) {
+ usb_err(usbatm_instance, "cxacru_bind: interface has incorrect endpoints\n");
+ ret = -ENODEV;
+ goto fail;
+ }
+
if ((cmd_ep->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
== USB_ENDPOINT_XFER_INT) {
usb_fill_int_urb(instance->rcv_urb,
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index bada13f704b6..835bf2428dc6 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -1084,6 +1084,10 @@ static int ci_hdrc_probe(struct platform_device *pdev)
return -ENODEV;
}
+ ret = ci_ulpi_init(ci);
+ if (ret)
+ return ret;
+
if (ci->platdata->phy) {
ci->phy = ci->platdata->phy;
} else if (ci->platdata->usb_phy) {
@@ -1138,10 +1142,6 @@ static int ci_hdrc_probe(struct platform_device *pdev)
goto ulpi_exit;
}
- ret = ci_ulpi_init(ci);
- if (ret)
- return ret;
-
ci->hw_bank.phys = res->start;
ci->irq = platform_get_irq(pdev, 0);
diff --git a/drivers/usb/chipidea/ulpi.c b/drivers/usb/chipidea/ulpi.c
index 89fb51e2c3de..dfec07e8ae1d 100644
--- a/drivers/usb/chipidea/ulpi.c
+++ b/drivers/usb/chipidea/ulpi.c
@@ -68,6 +68,11 @@ int ci_ulpi_init(struct ci_hdrc *ci)
if (ci->platdata->phy_mode != USBPHY_INTERFACE_MODE_ULPI)
return 0;
+ /*
+ * Set PORTSC correctly so we can read/write ULPI registers for
+ * identification purposes
+ */
+ hw_phymode_configure(ci);
ci->ulpi_ops.read = ci_ulpi_read;
ci->ulpi_ops.write = ci_ulpi_write;
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index c553decb5461..6830be4419e2 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -266,14 +266,14 @@ static void wdm_int_callback(struct urb *urb)
dev_err(&desc->intf->dev, "Stall on int endpoint\n");
goto sw; /* halt is cleared in work */
default:
- dev_err(&desc->intf->dev,
+ dev_err_ratelimited(&desc->intf->dev,
"nonzero urb status received: %d\n", status);
break;
}
}
if (urb->actual_length < sizeof(struct usb_cdc_notification)) {
- dev_err(&desc->intf->dev, "wdm_int_callback - %d bytes\n",
+ dev_err_ratelimited(&desc->intf->dev, "wdm_int_callback - %d bytes\n",
urb->actual_length);
goto exit;
}
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index 3362af165ef5..880d52c0949d 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -291,6 +291,20 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
if (ifp->desc.bNumEndpoints >= num_ep)
goto skip_to_next_endpoint_or_interface_descriptor;
+ /* Save a copy of the descriptor and use it instead of the original */
+ endpoint = &ifp->endpoint[ifp->desc.bNumEndpoints];
+ memcpy(&endpoint->desc, d, n);
+ d = &endpoint->desc;
+
+ /* Clear the reserved bits in bEndpointAddress */
+ i = d->bEndpointAddress &
+ (USB_ENDPOINT_DIR_MASK | USB_ENDPOINT_NUMBER_MASK);
+ if (i != d->bEndpointAddress) {
+ dev_notice(ddev, "config %d interface %d altsetting %d has an endpoint descriptor with address 0x%X, changing to 0x%X\n",
+ cfgno, inum, asnum, d->bEndpointAddress, i);
+ endpoint->desc.bEndpointAddress = i;
+ }
+
/* Check for duplicate endpoint addresses */
if (config_endpoint_is_duplicate(config, inum, asnum, d)) {
dev_notice(ddev, "config %d interface %d altsetting %d has a duplicate endpoint with address 0x%X, skipping\n",
@@ -308,10 +322,8 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
}
}
- endpoint = &ifp->endpoint[ifp->desc.bNumEndpoints];
+ /* Accept this endpoint */
++ifp->desc.bNumEndpoints;
-
- memcpy(&endpoint->desc, d, n);
INIT_LIST_HEAD(&endpoint->urb_list);
/*
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index e3366f4d82b9..1ff7d901fede 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1623,6 +1623,7 @@ static void __usb_hcd_giveback_urb(struct urb *urb)
struct usb_hcd *hcd = bus_to_hcd(urb->dev->bus);
struct usb_anchor *anchor = urb->anchor;
int status = urb->unlinked;
+ unsigned long flags;
urb->hcpriv = NULL;
if (unlikely((urb->transfer_flags & URB_SHORT_NOT_OK) &&
@@ -1640,13 +1641,14 @@ static void __usb_hcd_giveback_urb(struct urb *urb)
/* pass ownership to the completion handler */
urb->status = status;
/*
- * This function can be called in task context inside another remote
- * coverage collection section, but kcov doesn't support that kind of
- * recursion yet. Only collect coverage in softirq context for now.
+ * Only collect coverage in the softirq context and disable interrupts
+ * to avoid scenarios with nested remote coverage collection sections
+ * that KCOV does not support.
+ * See the comment next to kcov_remote_start_usb_softirq() for details.
*/
- kcov_remote_start_usb_softirq((u64)urb->dev->bus->busnum);
+ flags = kcov_remote_start_usb_softirq((u64)urb->dev->bus->busnum);
urb->complete(urb);
- kcov_remote_stop_softirq();
+ kcov_remote_stop_softirq(flags);
usb_anchor_resume_wakeups(anchor);
atomic_dec(&urb->use_count);
diff --git a/drivers/usb/core/of.c b/drivers/usb/core/of.c
index f1a499ee482c..763e4122ed5b 100644
--- a/drivers/usb/core/of.c
+++ b/drivers/usb/core/of.c
@@ -84,9 +84,12 @@ static bool usb_of_has_devices_or_graph(const struct usb_device *hub)
if (of_graph_is_present(np))
return true;
- for_each_child_of_node(np, child)
- if (of_property_present(child, "reg"))
+ for_each_child_of_node(np, child) {
+ if (of_property_present(child, "reg")) {
+ of_node_put(child);
return true;
+ }
+ }
return false;
}
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index b4783574b8e6..13171454f959 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -506,6 +506,9 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x1b1c, 0x1b38), .driver_info = USB_QUIRK_DELAY_INIT |
USB_QUIRK_DELAY_CTRL_MSG },
+ /* START BP-850k Printer */
+ { USB_DEVICE(0x1bc3, 0x0003), .driver_info = USB_QUIRK_NO_SET_INTF },
+
/* MIDI keyboard WORLDE MINI */
{ USB_DEVICE(0x1c75, 0x0204), .driver_info =
USB_QUIRK_CONFIG_INTF_STRINGS },
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 7ee61a89520b..cb82557678dd 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -957,12 +957,16 @@ static bool dwc3_core_is_valid(struct dwc3 *dwc)
static void dwc3_core_setup_global_control(struct dwc3 *dwc)
{
+ unsigned int power_opt;
+ unsigned int hw_mode;
u32 reg;
reg = dwc3_readl(dwc->regs, DWC3_GCTL);
reg &= ~DWC3_GCTL_SCALEDOWN_MASK;
+ hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0);
+ power_opt = DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1);
- switch (DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1)) {
+ switch (power_opt) {
case DWC3_GHWPARAMS1_EN_PWROPT_CLK:
/**
* WORKAROUND: DWC3 revisions between 2.10a and 2.50a have an
@@ -995,6 +999,20 @@ static void dwc3_core_setup_global_control(struct dwc3 *dwc)
break;
}
+ /*
+ * This is a workaround for STAR#4846132, which only affects
+ * DWC_usb31 version2.00a operating in host mode.
+ *
+ * There is a problem in DWC_usb31 version 2.00a operating
+ * in host mode that would cause a CSR read timeout When CSR
+ * read coincides with RAM Clock Gating Entry. By disable
+ * Clock Gating, sacrificing power consumption for normal
+ * operation.
+ */
+ if (power_opt != DWC3_GHWPARAMS1_EN_PWROPT_NO &&
+ hw_mode != DWC3_GHWPARAMS0_MODE_GADGET && DWC3_VER_IS(DWC31, 200A))
+ reg |= DWC3_GCTL_DSBLCLKGTNG;
+
/* check if current dwc3 is on simulation board */
if (dwc->hwparams.hwparams6 & DWC3_GHWPARAMS6_EN_FPGA) {
dev_info(dwc->dev, "Running with FPGA optimizations\n");
@@ -2250,7 +2268,6 @@ assert_reset:
static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg)
{
- unsigned long flags;
u32 reg;
int i;
@@ -2293,9 +2310,7 @@ static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg)
break;
if (dwc->current_otg_role == DWC3_OTG_ROLE_DEVICE) {
- spin_lock_irqsave(&dwc->lock, flags);
dwc3_gadget_suspend(dwc);
- spin_unlock_irqrestore(&dwc->lock, flags);
synchronize_irq(dwc->irq_gadget);
}
@@ -2312,7 +2327,6 @@ static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg)
static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg)
{
- unsigned long flags;
int ret;
u32 reg;
int i;
@@ -2366,9 +2380,7 @@ static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg)
if (dwc->current_otg_role == DWC3_OTG_ROLE_HOST) {
dwc3_otg_host_init(dwc);
} else if (dwc->current_otg_role == DWC3_OTG_ROLE_DEVICE) {
- spin_lock_irqsave(&dwc->lock, flags);
dwc3_gadget_resume(dwc);
- spin_unlock_irqrestore(&dwc->lock, flags);
}
break;
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 9ef821ca2fc7..052852f80146 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -54,6 +54,10 @@
#define PCI_DEVICE_ID_INTEL_MTL 0x7e7e
#define PCI_DEVICE_ID_INTEL_ARLH_PCH 0x777e
#define PCI_DEVICE_ID_INTEL_TGL 0x9a15
+#define PCI_DEVICE_ID_INTEL_PTLH 0xe332
+#define PCI_DEVICE_ID_INTEL_PTLH_PCH 0xe37e
+#define PCI_DEVICE_ID_INTEL_PTLU 0xe432
+#define PCI_DEVICE_ID_INTEL_PTLU_PCH 0xe47e
#define PCI_DEVICE_ID_AMD_MR 0x163a
#define PCI_INTEL_BXT_DSM_GUID "732b85d5-b7a7-4a1b-9ba0-4bbd00ffd511"
@@ -430,6 +434,10 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
{ PCI_DEVICE_DATA(INTEL, MTLS, &dwc3_pci_intel_swnode) },
{ PCI_DEVICE_DATA(INTEL, ARLH_PCH, &dwc3_pci_intel_swnode) },
{ PCI_DEVICE_DATA(INTEL, TGL, &dwc3_pci_intel_swnode) },
+ { PCI_DEVICE_DATA(INTEL, PTLH, &dwc3_pci_intel_swnode) },
+ { PCI_DEVICE_DATA(INTEL, PTLH_PCH, &dwc3_pci_intel_swnode) },
+ { PCI_DEVICE_DATA(INTEL, PTLU, &dwc3_pci_intel_swnode) },
+ { PCI_DEVICE_DATA(INTEL, PTLU_PCH, &dwc3_pci_intel_swnode) },
{ PCI_DEVICE_DATA(AMD, NL_USB, &dwc3_pci_amd_swnode) },
{ PCI_DEVICE_DATA(AMD, MR, &dwc3_pci_amd_mr_swnode) },
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index ce3cfa1f36f5..0e7c1e947c0a 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -115,9 +115,12 @@ static int usb_string_copy(const char *s, char **s_copy)
int ret;
char *str;
char *copy = *s_copy;
+
ret = strlen(s);
if (ret > USB_MAX_STRING_LEN)
return -EOVERFLOW;
+ if (ret < 1)
+ return -EINVAL;
if (copy) {
str = copy;
diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c
index ba7d180cc9e6..44e20c6c36d3 100644
--- a/drivers/usb/gadget/function/f_printer.c
+++ b/drivers/usb/gadget/function/f_printer.c
@@ -213,6 +213,7 @@ static inline struct usb_endpoint_descriptor *ep_desc(struct usb_gadget *gadget,
struct usb_endpoint_descriptor *ss)
{
switch (gadget->speed) {
+ case USB_SPEED_SUPER_PLUS:
case USB_SPEED_SUPER:
return ss;
case USB_SPEED_HIGH:
@@ -449,11 +450,8 @@ printer_read(struct file *fd, char __user *buf, size_t len, loff_t *ptr)
mutex_lock(&dev->lock_printer_io);
spin_lock_irqsave(&dev->lock, flags);
- if (dev->interface < 0) {
- spin_unlock_irqrestore(&dev->lock, flags);
- mutex_unlock(&dev->lock_printer_io);
- return -ENODEV;
- }
+ if (dev->interface < 0)
+ goto out_disabled;
/* We will use this flag later to check if a printer reset happened
* after we turn interrupts back on.
@@ -461,6 +459,9 @@ printer_read(struct file *fd, char __user *buf, size_t len, loff_t *ptr)
dev->reset_printer = 0;
setup_rx_reqs(dev);
+ /* this dropped the lock - need to retest */
+ if (dev->interface < 0)
+ goto out_disabled;
bytes_copied = 0;
current_rx_req = dev->current_rx_req;
@@ -494,6 +495,8 @@ printer_read(struct file *fd, char __user *buf, size_t len, loff_t *ptr)
wait_event_interruptible(dev->rx_wait,
(likely(!list_empty(&dev->rx_buffers))));
spin_lock_irqsave(&dev->lock, flags);
+ if (dev->interface < 0)
+ goto out_disabled;
}
/* We have data to return then copy it to the caller's buffer.*/
@@ -537,6 +540,9 @@ printer_read(struct file *fd, char __user *buf, size_t len, loff_t *ptr)
return -EAGAIN;
}
+ if (dev->interface < 0)
+ goto out_disabled;
+
/* If we not returning all the data left in this RX request
* buffer then adjust the amount of data left in the buffer.
* Othewise if we are done with this RX request buffer then
@@ -566,6 +572,11 @@ printer_read(struct file *fd, char __user *buf, size_t len, loff_t *ptr)
return bytes_copied;
else
return -EAGAIN;
+
+out_disabled:
+ spin_unlock_irqrestore(&dev->lock, flags);
+ mutex_unlock(&dev->lock_printer_io);
+ return -ENODEV;
}
static ssize_t
@@ -586,11 +597,8 @@ printer_write(struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
mutex_lock(&dev->lock_printer_io);
spin_lock_irqsave(&dev->lock, flags);
- if (dev->interface < 0) {
- spin_unlock_irqrestore(&dev->lock, flags);
- mutex_unlock(&dev->lock_printer_io);
- return -ENODEV;
- }
+ if (dev->interface < 0)
+ goto out_disabled;
/* Check if a printer reset happens while we have interrupts on */
dev->reset_printer = 0;
@@ -613,6 +621,8 @@ printer_write(struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
wait_event_interruptible(dev->tx_wait,
(likely(!list_empty(&dev->tx_reqs))));
spin_lock_irqsave(&dev->lock, flags);
+ if (dev->interface < 0)
+ goto out_disabled;
}
while (likely(!list_empty(&dev->tx_reqs)) && len) {
@@ -662,6 +672,9 @@ printer_write(struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
return -EAGAIN;
}
+ if (dev->interface < 0)
+ goto out_disabled;
+
list_add(&req->list, &dev->tx_reqs_active);
/* here, we unlock, and only unlock, to avoid deadlock. */
@@ -674,6 +687,8 @@ printer_write(struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
mutex_unlock(&dev->lock_printer_io);
return -EAGAIN;
}
+ if (dev->interface < 0)
+ goto out_disabled;
}
spin_unlock_irqrestore(&dev->lock, flags);
@@ -685,6 +700,11 @@ printer_write(struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
return bytes_copied;
else
return -EAGAIN;
+
+out_disabled:
+ spin_unlock_irqrestore(&dev->lock, flags);
+ mutex_unlock(&dev->lock_printer_io);
+ return -ENODEV;
}
static int
diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
index 11dd0b9e847f..95191083b455 100644
--- a/drivers/usb/gadget/function/u_ether.c
+++ b/drivers/usb/gadget/function/u_ether.c
@@ -1163,8 +1163,6 @@ struct net_device *gether_connect(struct gether *link)
if (netif_running(dev->net))
eth_start(dev, GFP_ATOMIC);
- netif_device_attach(dev->net);
-
/* on error, disable any endpoints */
} else {
(void) usb_ep_disable(link->out_ep);
@@ -1202,7 +1200,7 @@ void gether_disconnect(struct gether *link)
DBG(dev, "%s\n", __func__);
- netif_device_detach(dev->net);
+ netif_stop_queue(dev->net);
netif_carrier_off(dev->net);
/* disable endpoints, forcing (synchronous) completion
diff --git a/drivers/usb/gadget/udc/aspeed_udc.c b/drivers/usb/gadget/udc/aspeed_udc.c
index 3916c8e2ba01..821a6ab5da56 100644
--- a/drivers/usb/gadget/udc/aspeed_udc.c
+++ b/drivers/usb/gadget/udc/aspeed_udc.c
@@ -66,8 +66,8 @@
#define USB_UPSTREAM_EN BIT(0)
/* Main config reg */
-#define UDC_CFG_SET_ADDR(x) ((x) & 0x3f)
-#define UDC_CFG_ADDR_MASK (0x3f)
+#define UDC_CFG_SET_ADDR(x) ((x) & UDC_CFG_ADDR_MASK)
+#define UDC_CFG_ADDR_MASK GENMASK(6, 0)
/* Interrupt ctrl & status reg */
#define UDC_IRQ_EP_POOL_NAK BIT(17)
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index c040d816e626..05881153883e 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -36,6 +36,7 @@
#define PCI_VENDOR_ID_ETRON 0x1b6f
#define PCI_DEVICE_ID_EJ168 0x7023
+#define PCI_DEVICE_ID_EJ188 0x7052
#define PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI 0x8c31
#define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31
@@ -395,6 +396,12 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
xhci->quirks |= XHCI_RESET_ON_RESUME;
xhci->quirks |= XHCI_BROKEN_STREAMS;
}
+ if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
+ pdev->device == PCI_DEVICE_ID_EJ188) {
+ xhci->quirks |= XHCI_RESET_ON_RESUME;
+ xhci->quirks |= XHCI_BROKEN_STREAMS;
+ }
+
if (pdev->vendor == PCI_VENDOR_ID_RENESAS &&
pdev->device == 0x0014) {
xhci->quirks |= XHCI_ZERO_64B_REGS;
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 9e90d2952760..fd0cde3d1569 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -1031,13 +1031,27 @@ static int xhci_invalidate_cancelled_tds(struct xhci_virt_ep *ep)
break;
case TD_DIRTY: /* TD is cached, clear it */
case TD_HALTED:
+ case TD_CLEARING_CACHE_DEFERRED:
+ if (cached_td) {
+ if (cached_td->urb->stream_id != td->urb->stream_id) {
+ /* Multiple streams case, defer move dq */
+ xhci_dbg(xhci,
+ "Move dq deferred: stream %u URB %p\n",
+ td->urb->stream_id, td->urb);
+ td->cancel_status = TD_CLEARING_CACHE_DEFERRED;
+ break;
+ }
+
+ /* Should never happen, but clear the TD if it does */
+ xhci_warn(xhci,
+ "Found multiple active URBs %p and %p in stream %u?\n",
+ td->urb, cached_td->urb,
+ td->urb->stream_id);
+ td_to_noop(xhci, ring, cached_td, false);
+ cached_td->cancel_status = TD_CLEARED;
+ }
+
td->cancel_status = TD_CLEARING_CACHE;
- if (cached_td)
- /* FIXME stream case, several stopped rings */
- xhci_dbg(xhci,
- "Move dq past stream %u URB %p instead of stream %u URB %p\n",
- td->urb->stream_id, td->urb,
- cached_td->urb->stream_id, cached_td->urb);
cached_td = td;
break;
}
@@ -1057,10 +1071,16 @@ static int xhci_invalidate_cancelled_tds(struct xhci_virt_ep *ep)
if (err) {
/* Failed to move past cached td, just set cached TDs to no-op */
list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list, cancelled_td_list) {
- if (td->cancel_status != TD_CLEARING_CACHE)
+ /*
+ * Deferred TDs need to have the deq pointer set after the above command
+ * completes, so if that failed we just give up on all of them (and
+ * complain loudly since this could cause issues due to caching).
+ */
+ if (td->cancel_status != TD_CLEARING_CACHE &&
+ td->cancel_status != TD_CLEARING_CACHE_DEFERRED)
continue;
- xhci_dbg(xhci, "Failed to clear cancelled cached URB %p, mark clear anyway\n",
- td->urb);
+ xhci_warn(xhci, "Failed to clear cancelled cached URB %p, mark clear anyway\n",
+ td->urb);
td_to_noop(xhci, ring, td, false);
td->cancel_status = TD_CLEARED;
}
@@ -1346,6 +1366,7 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
struct xhci_ep_ctx *ep_ctx;
struct xhci_slot_ctx *slot_ctx;
struct xhci_td *td, *tmp_td;
+ bool deferred = false;
ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2]));
@@ -1432,6 +1453,8 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
xhci_dbg(ep->xhci, "%s: Giveback cancelled URB %p TD\n",
__func__, td->urb);
xhci_td_cleanup(ep->xhci, td, ep_ring, td->status);
+ } else if (td->cancel_status == TD_CLEARING_CACHE_DEFERRED) {
+ deferred = true;
} else {
xhci_dbg(ep->xhci, "%s: Keep cancelled URB %p TD as cancel_status is %d\n",
__func__, td->urb, td->cancel_status);
@@ -1441,8 +1464,17 @@ cleanup:
ep->ep_state &= ~SET_DEQ_PENDING;
ep->queued_deq_seg = NULL;
ep->queued_deq_ptr = NULL;
- /* Restart any rings with pending URBs */
- ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
+
+ if (deferred) {
+ /* We have more streams to clear */
+ xhci_dbg(ep->xhci, "%s: Pending TDs to clear, continuing with invalidation\n",
+ __func__);
+ xhci_invalidate_cancelled_tds(ep);
+ } else {
+ /* Restart any rings with pending URBs */
+ xhci_dbg(ep->xhci, "%s: All TDs cleared, ring doorbell\n", __func__);
+ ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
+ }
}
static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id,
@@ -2524,9 +2556,8 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
goto finish_td;
case COMP_STOPPED_LENGTH_INVALID:
/* stopped on ep trb with invalid length, exclude it */
- ep_trb_len = 0;
- remaining = 0;
- break;
+ td->urb->actual_length = sum_trb_lengths(xhci, ep_ring, ep_trb);
+ goto finish_td;
case COMP_USB_TRANSACTION_ERROR:
if (xhci->quirks & XHCI_NO_SOFT_RETRY ||
(ep->err_count++ > MAX_SOFT_RETRY) ||
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 37eb37b0affa..0a8cf6c17f82 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -1125,10 +1125,20 @@ int xhci_resume(struct xhci_hcd *xhci, pm_message_t msg)
xhci_dbg(xhci, "Start the secondary HCD\n");
retval = xhci_run(xhci->shared_hcd);
}
-
+ if (retval)
+ return retval;
+ /*
+ * Resume roothubs unconditionally as PORTSC change bits are not
+ * immediately visible after xHC reset
+ */
hcd->state = HC_STATE_SUSPENDED;
- if (xhci->shared_hcd)
+
+ if (xhci->shared_hcd) {
xhci->shared_hcd->state = HC_STATE_SUSPENDED;
+ usb_hcd_resume_root_hub(xhci->shared_hcd);
+ }
+ usb_hcd_resume_root_hub(hcd);
+
goto done;
}
@@ -1152,7 +1162,6 @@ int xhci_resume(struct xhci_hcd *xhci, pm_message_t msg)
xhci_dbc_resume(xhci);
- done:
if (retval == 0) {
/*
* Resume roothubs only if there are pending events.
@@ -1178,6 +1187,7 @@ int xhci_resume(struct xhci_hcd *xhci, pm_message_t msg)
usb_hcd_resume_root_hub(hcd);
}
}
+done:
/*
* If system is subject to the Quirk, Compliance Mode Timer needs to
* be re-initialized Always after a system resume. Ports are subject
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 30415158ed3c..78d014c4d884 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1276,6 +1276,7 @@ enum xhci_cancelled_td_status {
TD_DIRTY = 0,
TD_HALTED,
TD_CLEARING_CACHE,
+ TD_CLEARING_CACHE_DEFERRED,
TD_CLEARED,
};
diff --git a/drivers/usb/musb/da8xx.c b/drivers/usb/musb/da8xx.c
index 8abf3a567e30..108d9a593a80 100644
--- a/drivers/usb/musb/da8xx.c
+++ b/drivers/usb/musb/da8xx.c
@@ -556,7 +556,7 @@ static int da8xx_probe(struct platform_device *pdev)
ret = of_platform_populate(pdev->dev.of_node, NULL,
da8xx_auxdata_lookup, &pdev->dev);
if (ret)
- return ret;
+ goto err_unregister_phy;
pinfo = da8xx_dev_info;
pinfo.parent = &pdev->dev;
@@ -571,9 +571,13 @@ static int da8xx_probe(struct platform_device *pdev)
ret = PTR_ERR_OR_ZERO(glue->musb);
if (ret) {
dev_err(&pdev->dev, "failed to register musb device: %d\n", ret);
- usb_phy_generic_unregister(glue->usb_phy);
+ goto err_unregister_phy;
}
+ return 0;
+
+err_unregister_phy:
+ usb_phy_generic_unregister(glue->usb_phy);
return ret;
}
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index 8b0308d84270..85697466b147 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -1737,6 +1737,49 @@ static void mos7840_port_remove(struct usb_serial_port *port)
kfree(mos7840_port);
}
+static int mos7840_suspend(struct usb_serial *serial, pm_message_t message)
+{
+ struct moschip_port *mos7840_port;
+ struct usb_serial_port *port;
+ int i;
+
+ for (i = 0; i < serial->num_ports; ++i) {
+ port = serial->port[i];
+ if (!tty_port_initialized(&port->port))
+ continue;
+
+ mos7840_port = usb_get_serial_port_data(port);
+
+ usb_kill_urb(mos7840_port->read_urb);
+ mos7840_port->read_urb_busy = false;
+ }
+
+ return 0;
+}
+
+static int mos7840_resume(struct usb_serial *serial)
+{
+ struct moschip_port *mos7840_port;
+ struct usb_serial_port *port;
+ int res;
+ int i;
+
+ for (i = 0; i < serial->num_ports; ++i) {
+ port = serial->port[i];
+ if (!tty_port_initialized(&port->port))
+ continue;
+
+ mos7840_port = usb_get_serial_port_data(port);
+
+ mos7840_port->read_urb_busy = true;
+ res = usb_submit_urb(mos7840_port->read_urb, GFP_NOIO);
+ if (res)
+ mos7840_port->read_urb_busy = false;
+ }
+
+ return 0;
+}
+
static struct usb_serial_driver moschip7840_4port_device = {
.driver = {
.owner = THIS_MODULE,
@@ -1764,6 +1807,8 @@ static struct usb_serial_driver moschip7840_4port_device = {
.port_probe = mos7840_port_probe,
.port_remove = mos7840_port_remove,
.read_bulk_callback = mos7840_bulk_in_callback,
+ .suspend = mos7840_suspend,
+ .resume = mos7840_resume,
};
static struct usb_serial_driver * const serial_drivers[] = {
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 8a5846d4adf6..311040f9b935 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -1425,6 +1425,10 @@ static const struct usb_device_id option_ids[] = {
.driver_info = NCTRL(0) | RSVD(1) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1901, 0xff), /* Telit LN940 (MBIM) */
.driver_info = NCTRL(0) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x3000, 0xff), /* Telit FN912 */
+ .driver_info = RSVD(0) | NCTRL(3) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x3001, 0xff), /* Telit FN912 */
+ .driver_info = RSVD(0) | NCTRL(2) | RSVD(3) | RSVD(4) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x7010, 0xff), /* Telit LE910-S1 (RNDIS) */
.driver_info = NCTRL(2) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x7011, 0xff), /* Telit LE910-S1 (ECM) */
@@ -1433,6 +1437,8 @@ static const struct usb_device_id option_ids[] = {
.driver_info = NCTRL(2) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x701b, 0xff), /* Telit LE910R1 (ECM) */
.driver_info = NCTRL(2) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x9000, 0xff), /* Telit generic core-dump device */
+ .driver_info = NCTRL(0) },
{ USB_DEVICE(TELIT_VENDOR_ID, 0x9010), /* Telit SBL FN980 flashing device */
.driver_info = NCTRL(0) | ZLP },
{ USB_DEVICE(TELIT_VENDOR_ID, 0x9200), /* Telit LE910S1 flashing device */
@@ -2224,6 +2230,10 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_7106_2COM, 0x02, 0x02, 0x01) },
{ USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x02, 0x01) },
{ USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x00, 0x00) },
+ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x7126, 0xff, 0x00, 0x00),
+ .driver_info = NCTRL(2) },
+ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x7127, 0xff, 0x00, 0x00),
+ .driver_info = NCTRL(2) | NCTRL(3) | NCTRL(4) },
{ USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MEN200) },
{ USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MPL200),
.driver_info = RSVD(1) | RSVD(4) },
@@ -2284,6 +2294,8 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(3) },
{ USB_DEVICE_INTERFACE_CLASS(0x0489, 0xe0f0, 0xff), /* Foxconn T99W373 MBIM */
.driver_info = RSVD(3) },
+ { USB_DEVICE_INTERFACE_CLASS(0x0489, 0xe145, 0xff), /* Foxconn T99W651 RNDIS */
+ .driver_info = RSVD(5) | RSVD(6) },
{ USB_DEVICE(0x1508, 0x1001), /* Fibocom NL668 (IOT version) */
.driver_info = RSVD(4) | RSVD(5) | RSVD(6) },
{ USB_DEVICE(0x1782, 0x4d10) }, /* Fibocom L610 (AT mode) */
@@ -2321,6 +2333,32 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(4) },
{ USB_DEVICE_INTERFACE_CLASS(0x33f8, 0x0115, 0xff), /* Rolling RW135-GL (laptop MBIM) */
.driver_info = RSVD(5) },
+ { USB_DEVICE_INTERFACE_CLASS(0x33f8, 0x0802, 0xff), /* Rolling RW350-GL (laptop MBIM) */
+ .driver_info = RSVD(5) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0100, 0xff, 0xff, 0x30) }, /* NetPrisma LCUK54-WWD for Global */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0100, 0xff, 0x00, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0100, 0xff, 0xff, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0101, 0xff, 0xff, 0x30) }, /* NetPrisma LCUK54-WRD for Global SKU */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0101, 0xff, 0x00, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0101, 0xff, 0xff, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0106, 0xff, 0xff, 0x30) }, /* NetPrisma LCUK54-WRD for China SKU */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0106, 0xff, 0x00, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0106, 0xff, 0xff, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0111, 0xff, 0xff, 0x30) }, /* NetPrisma LCUK54-WWD for SA */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0111, 0xff, 0x00, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0111, 0xff, 0xff, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0112, 0xff, 0xff, 0x30) }, /* NetPrisma LCUK54-WWD for EU */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0112, 0xff, 0x00, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0112, 0xff, 0xff, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0113, 0xff, 0xff, 0x30) }, /* NetPrisma LCUK54-WWD for NA */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0113, 0xff, 0x00, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0113, 0xff, 0xff, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0115, 0xff, 0xff, 0x30) }, /* NetPrisma LCUK54-WWD for China EDU */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0115, 0xff, 0x00, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0115, 0xff, 0xff, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0116, 0xff, 0xff, 0x30) }, /* NetPrisma LCUK54-WWD for Golbal EDU */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0116, 0xff, 0x00, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0116, 0xff, 0xff, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(OPPO_VENDOR_ID, OPPO_PRODUCT_R11, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x40) },
diff --git a/drivers/usb/storage/alauda.c b/drivers/usb/storage/alauda.c
index 115f05a6201a..40d34cc28344 100644
--- a/drivers/usb/storage/alauda.c
+++ b/drivers/usb/storage/alauda.c
@@ -105,6 +105,8 @@ struct alauda_info {
unsigned char sense_key;
unsigned long sense_asc; /* additional sense code */
unsigned long sense_ascq; /* additional sense code qualifier */
+
+ bool media_initialized;
};
#define short_pack(lsb,msb) ( ((u16)(lsb)) | ( ((u16)(msb))<<8 ) )
@@ -476,11 +478,12 @@ static int alauda_check_media(struct us_data *us)
}
/* Check for media change */
- if (status[0] & 0x08) {
+ if (status[0] & 0x08 || !info->media_initialized) {
usb_stor_dbg(us, "Media change detected\n");
alauda_free_maps(&MEDIA_INFO(us));
- alauda_init_media(us);
-
+ rc = alauda_init_media(us);
+ if (rc == USB_STOR_TRANSPORT_GOOD)
+ info->media_initialized = true;
info->sense_key = UNIT_ATTENTION;
info->sense_asc = 0x28;
info->sense_ascq = 0x00;
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index b31464740f6c..8c8b5e6041cc 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -79,6 +79,12 @@ static int slave_alloc (struct scsi_device *sdev)
if (us->protocol == USB_PR_BULK && us->max_lun > 0)
sdev->sdev_bflags |= BLIST_FORCELUN;
+ /*
+ * Some USB storage devices reset if the IO advice hints grouping mode
+ * page is queried. Hence skip that mode page.
+ */
+ sdev->sdev_bflags |= BLIST_SKIP_IO_HINTS;
+
return 0;
}
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index a48870a87a29..b610a2de4ae5 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -21,6 +21,7 @@
#include <scsi/scsi.h>
#include <scsi/scsi_eh.h>
#include <scsi/scsi_dbg.h>
+#include <scsi/scsi_devinfo.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
@@ -820,6 +821,12 @@ static int uas_slave_alloc(struct scsi_device *sdev)
struct uas_dev_info *devinfo =
(struct uas_dev_info *)sdev->host->hostdata;
+ /*
+ * Some USB storage devices reset if the IO advice hints grouping mode
+ * page is queried. Hence skip that mode page.
+ */
+ sdev->sdev_bflags |= BLIST_SKIP_IO_HINTS;
+
sdev->hostdata = devinfo;
return 0;
}
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index 8a1af08f71b6..5d4da962acc8 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -3014,8 +3014,10 @@ static int tcpm_register_source_caps(struct tcpm_port *port)
memcpy(caps.pdo, port->source_caps, sizeof(u32) * port->nr_source_caps);
caps.role = TYPEC_SOURCE;
- if (cap)
+ if (cap) {
usb_power_delivery_unregister_capabilities(cap);
+ port->partner_source_caps = NULL;
+ }
cap = usb_power_delivery_register_capabilities(port->partner_pd, &caps);
if (IS_ERR(cap))
@@ -6172,6 +6174,7 @@ static void _tcpm_pd_hard_reset(struct tcpm_port *port)
port->tcpc->set_bist_data(port->tcpc, false);
switch (port->state) {
+ case TOGGLING:
case ERROR_RECOVERY:
case PORT_RESET:
case PORT_RESET_WAIT_OFF:
diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
index cb52e7b0a2c5..2cc7aedd490f 100644
--- a/drivers/usb/typec/ucsi/ucsi.c
+++ b/drivers/usb/typec/ucsi/ucsi.c
@@ -153,8 +153,13 @@ static int ucsi_exec_command(struct ucsi *ucsi, u64 cmd)
}
if (cci & UCSI_CCI_ERROR) {
- if (cmd == UCSI_GET_ERROR_STATUS)
+ if (cmd == UCSI_GET_ERROR_STATUS) {
+ ret = ucsi_acknowledge(ucsi, false);
+ if (ret)
+ return ret;
+
return -EIO;
+ }
return ucsi_read_error(ucsi);
}
diff --git a/drivers/usb/typec/ucsi/ucsi_acpi.c b/drivers/usb/typec/ucsi/ucsi_acpi.c
index 8d112c3edae5..adf32ca0f761 100644
--- a/drivers/usb/typec/ucsi/ucsi_acpi.c
+++ b/drivers/usb/typec/ucsi/ucsi_acpi.c
@@ -25,6 +25,7 @@ struct ucsi_acpi {
unsigned long flags;
#define UCSI_ACPI_COMMAND_PENDING 1
#define UCSI_ACPI_ACK_PENDING 2
+#define UCSI_ACPI_CHECK_BOGUS_EVENT 3
guid_t guid;
u64 cmd;
};
@@ -128,6 +129,58 @@ static const struct ucsi_operations ucsi_zenbook_ops = {
.async_write = ucsi_acpi_async_write
};
+static int ucsi_gram_read(struct ucsi *ucsi, unsigned int offset,
+ void *val, size_t val_len)
+{
+ u16 bogus_change = UCSI_CONSTAT_POWER_LEVEL_CHANGE |
+ UCSI_CONSTAT_PDOS_CHANGE;
+ struct ucsi_acpi *ua = ucsi_get_drvdata(ucsi);
+ struct ucsi_connector_status *status;
+ int ret;
+
+ ret = ucsi_acpi_read(ucsi, offset, val, val_len);
+ if (ret < 0)
+ return ret;
+
+ if (UCSI_COMMAND(ua->cmd) == UCSI_GET_CONNECTOR_STATUS &&
+ test_bit(UCSI_ACPI_CHECK_BOGUS_EVENT, &ua->flags) &&
+ offset == UCSI_MESSAGE_IN) {
+ status = (struct ucsi_connector_status *)val;
+
+ /* Clear the bogus change */
+ if (status->change == bogus_change)
+ status->change = 0;
+
+ clear_bit(UCSI_ACPI_CHECK_BOGUS_EVENT, &ua->flags);
+ }
+
+ return ret;
+}
+
+static int ucsi_gram_sync_write(struct ucsi *ucsi, unsigned int offset,
+ const void *val, size_t val_len)
+{
+ struct ucsi_acpi *ua = ucsi_get_drvdata(ucsi);
+ int ret;
+
+ ret = ucsi_acpi_sync_write(ucsi, offset, val, val_len);
+ if (ret < 0)
+ return ret;
+
+ if (UCSI_COMMAND(ua->cmd) == UCSI_GET_PDOS &&
+ ua->cmd & UCSI_GET_PDOS_PARTNER_PDO(1) &&
+ ua->cmd & UCSI_GET_PDOS_SRC_PDOS)
+ set_bit(UCSI_ACPI_CHECK_BOGUS_EVENT, &ua->flags);
+
+ return ret;
+}
+
+static const struct ucsi_operations ucsi_gram_ops = {
+ .read = ucsi_gram_read,
+ .sync_write = ucsi_gram_sync_write,
+ .async_write = ucsi_acpi_async_write
+};
+
static const struct dmi_system_id ucsi_acpi_quirks[] = {
{
.matches = {
@@ -136,6 +189,14 @@ static const struct dmi_system_id ucsi_acpi_quirks[] = {
},
.driver_data = (void *)&ucsi_zenbook_ops,
},
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LG Electronics"),
+ DMI_MATCH(DMI_PRODUCT_FAMILY, "LG gram PC"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "90Q"),
+ },
+ .driver_data = (void *)&ucsi_gram_ops,
+ },
{ }
};
diff --git a/drivers/usb/typec/ucsi/ucsi_glink.c b/drivers/usb/typec/ucsi/ucsi_glink.c
index f7546bb488c3..2fa973afe4e6 100644
--- a/drivers/usb/typec/ucsi/ucsi_glink.c
+++ b/drivers/usb/typec/ucsi/ucsi_glink.c
@@ -14,7 +14,7 @@
#include <linux/soc/qcom/pmic_glink.h>
#include "ucsi.h"
-#define PMIC_GLINK_MAX_PORTS 2
+#define PMIC_GLINK_MAX_PORTS 3
#define UCSI_BUF_SIZE 48
@@ -372,6 +372,7 @@ static int pmic_glink_ucsi_probe(struct auxiliary_device *adev,
ret = fwnode_property_read_u32(fwnode, "reg", &port);
if (ret < 0) {
dev_err(dev, "missing reg property of %pOFn\n", fwnode);
+ fwnode_handle_put(fwnode);
return ret;
}
@@ -386,9 +387,11 @@ static int pmic_glink_ucsi_probe(struct auxiliary_device *adev,
if (!desc)
continue;
- if (IS_ERR(desc))
+ if (IS_ERR(desc)) {
+ fwnode_handle_put(fwnode);
return dev_err_probe(dev, PTR_ERR(desc),
"unable to acquire orientation gpio\n");
+ }
ucsi->port_orientation[port] = desc;
}
diff --git a/drivers/usb/typec/ucsi/ucsi_stm32g0.c b/drivers/usb/typec/ucsi/ucsi_stm32g0.c
index ac48b7763114..ac69288e8bb0 100644
--- a/drivers/usb/typec/ucsi/ucsi_stm32g0.c
+++ b/drivers/usb/typec/ucsi/ucsi_stm32g0.c
@@ -65,6 +65,7 @@ struct ucsi_stm32g0 {
struct device *dev;
unsigned long flags;
#define COMMAND_PENDING 1
+#define ACK_PENDING 2
const char *fw_name;
struct ucsi *ucsi;
bool suspended;
@@ -396,9 +397,13 @@ static int ucsi_stm32g0_sync_write(struct ucsi *ucsi, unsigned int offset, const
size_t len)
{
struct ucsi_stm32g0 *g0 = ucsi_get_drvdata(ucsi);
+ bool ack = UCSI_COMMAND(*(u64 *)val) == UCSI_ACK_CC_CI;
int ret;
- set_bit(COMMAND_PENDING, &g0->flags);
+ if (ack)
+ set_bit(ACK_PENDING, &g0->flags);
+ else
+ set_bit(COMMAND_PENDING, &g0->flags);
ret = ucsi_stm32g0_async_write(ucsi, offset, val, len);
if (ret)
@@ -406,9 +411,14 @@ static int ucsi_stm32g0_sync_write(struct ucsi *ucsi, unsigned int offset, const
if (!wait_for_completion_timeout(&g0->complete, msecs_to_jiffies(5000)))
ret = -ETIMEDOUT;
+ else
+ return 0;
out_clear_bit:
- clear_bit(COMMAND_PENDING, &g0->flags);
+ if (ack)
+ clear_bit(ACK_PENDING, &g0->flags);
+ else
+ clear_bit(COMMAND_PENDING, &g0->flags);
return ret;
}
@@ -429,8 +439,9 @@ static irqreturn_t ucsi_stm32g0_irq_handler(int irq, void *data)
if (UCSI_CCI_CONNECTOR(cci))
ucsi_connector_change(g0->ucsi, UCSI_CCI_CONNECTOR(cci));
- if (test_bit(COMMAND_PENDING, &g0->flags) &&
- cci & (UCSI_CCI_ACK_COMPLETE | UCSI_CCI_COMMAND_COMPLETE))
+ if (cci & UCSI_CCI_ACK_COMPLETE && test_and_clear_bit(ACK_PENDING, &g0->flags))
+ complete(&g0->complete);
+ if (cci & UCSI_CCI_COMMAND_COMPLETE && test_and_clear_bit(COMMAND_PENDING, &g0->flags))
complete(&g0->complete);
return IRQ_HANDLED;
diff --git a/drivers/vfio/device_cdev.c b/drivers/vfio/device_cdev.c
index e75da0a70d1f..bb1817bd4ff3 100644
--- a/drivers/vfio/device_cdev.c
+++ b/drivers/vfio/device_cdev.c
@@ -39,6 +39,13 @@ int vfio_device_fops_cdev_open(struct inode *inode, struct file *filep)
filep->private_data = df;
+ /*
+ * Use the pseudo fs inode on the device to link all mmaps
+ * to the same address space, allowing us to unmap all vmas
+ * associated to this device using unmap_mapping_range().
+ */
+ filep->f_mapping = device->inode->i_mapping;
+
return 0;
err_put_registration:
diff --git a/drivers/vfio/group.c b/drivers/vfio/group.c
index 610a429c6191..ded364588d29 100644
--- a/drivers/vfio/group.c
+++ b/drivers/vfio/group.c
@@ -286,6 +286,13 @@ static struct file *vfio_device_open_file(struct vfio_device *device)
*/
filep->f_mode |= (FMODE_PREAD | FMODE_PWRITE);
+ /*
+ * Use the pseudo fs inode on the device to link all mmaps
+ * to the same address space, allowing us to unmap all vmas
+ * associated to this device using unmap_mapping_range().
+ */
+ filep->f_mapping = device->inode->i_mapping;
+
if (device->group->type == VFIO_NO_IOMMU)
dev_warn(device->dev, "vfio-noiommu device opened by user "
"(%s:%d)\n", current->comm, task_pid_nr(current));
diff --git a/drivers/vfio/pci/vfio_pci_core.c b/drivers/vfio/pci/vfio_pci_core.c
index 80cae87fff36..ba0ce0075b2f 100644
--- a/drivers/vfio/pci/vfio_pci_core.c
+++ b/drivers/vfio/pci/vfio_pci_core.c
@@ -1260,7 +1260,7 @@ static int vfio_pci_ioctl_get_pci_hot_reset_info(
struct vfio_pci_hot_reset_info hdr;
struct vfio_pci_fill_info fill = {};
bool slot = false;
- int ret, count;
+ int ret, count = 0;
if (copy_from_user(&hdr, arg, minsz))
return -EFAULT;
@@ -1610,100 +1610,20 @@ ssize_t vfio_pci_core_write(struct vfio_device *core_vdev, const char __user *bu
}
EXPORT_SYMBOL_GPL(vfio_pci_core_write);
-/* Return 1 on zap and vma_lock acquired, 0 on contention (only with @try) */
-static int vfio_pci_zap_and_vma_lock(struct vfio_pci_core_device *vdev, bool try)
+static void vfio_pci_zap_bars(struct vfio_pci_core_device *vdev)
{
- struct vfio_pci_mmap_vma *mmap_vma, *tmp;
+ struct vfio_device *core_vdev = &vdev->vdev;
+ loff_t start = VFIO_PCI_INDEX_TO_OFFSET(VFIO_PCI_BAR0_REGION_INDEX);
+ loff_t end = VFIO_PCI_INDEX_TO_OFFSET(VFIO_PCI_ROM_REGION_INDEX);
+ loff_t len = end - start;
- /*
- * Lock ordering:
- * vma_lock is nested under mmap_lock for vm_ops callback paths.
- * The memory_lock semaphore is used by both code paths calling
- * into this function to zap vmas and the vm_ops.fault callback
- * to protect the memory enable state of the device.
- *
- * When zapping vmas we need to maintain the mmap_lock => vma_lock
- * ordering, which requires using vma_lock to walk vma_list to
- * acquire an mm, then dropping vma_lock to get the mmap_lock and
- * reacquiring vma_lock. This logic is derived from similar
- * requirements in uverbs_user_mmap_disassociate().
- *
- * mmap_lock must always be the top-level lock when it is taken.
- * Therefore we can only hold the memory_lock write lock when
- * vma_list is empty, as we'd need to take mmap_lock to clear
- * entries. vma_list can only be guaranteed empty when holding
- * vma_lock, thus memory_lock is nested under vma_lock.
- *
- * This enables the vm_ops.fault callback to acquire vma_lock,
- * followed by memory_lock read lock, while already holding
- * mmap_lock without risk of deadlock.
- */
- while (1) {
- struct mm_struct *mm = NULL;
-
- if (try) {
- if (!mutex_trylock(&vdev->vma_lock))
- return 0;
- } else {
- mutex_lock(&vdev->vma_lock);
- }
- while (!list_empty(&vdev->vma_list)) {
- mmap_vma = list_first_entry(&vdev->vma_list,
- struct vfio_pci_mmap_vma,
- vma_next);
- mm = mmap_vma->vma->vm_mm;
- if (mmget_not_zero(mm))
- break;
-
- list_del(&mmap_vma->vma_next);
- kfree(mmap_vma);
- mm = NULL;
- }
- if (!mm)
- return 1;
- mutex_unlock(&vdev->vma_lock);
-
- if (try) {
- if (!mmap_read_trylock(mm)) {
- mmput(mm);
- return 0;
- }
- } else {
- mmap_read_lock(mm);
- }
- if (try) {
- if (!mutex_trylock(&vdev->vma_lock)) {
- mmap_read_unlock(mm);
- mmput(mm);
- return 0;
- }
- } else {
- mutex_lock(&vdev->vma_lock);
- }
- list_for_each_entry_safe(mmap_vma, tmp,
- &vdev->vma_list, vma_next) {
- struct vm_area_struct *vma = mmap_vma->vma;
-
- if (vma->vm_mm != mm)
- continue;
-
- list_del(&mmap_vma->vma_next);
- kfree(mmap_vma);
-
- zap_vma_ptes(vma, vma->vm_start,
- vma->vm_end - vma->vm_start);
- }
- mutex_unlock(&vdev->vma_lock);
- mmap_read_unlock(mm);
- mmput(mm);
- }
+ unmap_mapping_range(core_vdev->inode->i_mapping, start, len, true);
}
void vfio_pci_zap_and_down_write_memory_lock(struct vfio_pci_core_device *vdev)
{
- vfio_pci_zap_and_vma_lock(vdev, false);
down_write(&vdev->memory_lock);
- mutex_unlock(&vdev->vma_lock);
+ vfio_pci_zap_bars(vdev);
}
u16 vfio_pci_memory_lock_and_enable(struct vfio_pci_core_device *vdev)
@@ -1725,99 +1645,56 @@ void vfio_pci_memory_unlock_and_restore(struct vfio_pci_core_device *vdev, u16 c
up_write(&vdev->memory_lock);
}
-/* Caller holds vma_lock */
-static int __vfio_pci_add_vma(struct vfio_pci_core_device *vdev,
- struct vm_area_struct *vma)
-{
- struct vfio_pci_mmap_vma *mmap_vma;
-
- mmap_vma = kmalloc(sizeof(*mmap_vma), GFP_KERNEL_ACCOUNT);
- if (!mmap_vma)
- return -ENOMEM;
-
- mmap_vma->vma = vma;
- list_add(&mmap_vma->vma_next, &vdev->vma_list);
-
- return 0;
-}
-
-/*
- * Zap mmaps on open so that we can fault them in on access and therefore
- * our vma_list only tracks mappings accessed since last zap.
- */
-static void vfio_pci_mmap_open(struct vm_area_struct *vma)
-{
- zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
-}
-
-static void vfio_pci_mmap_close(struct vm_area_struct *vma)
+static unsigned long vma_to_pfn(struct vm_area_struct *vma)
{
struct vfio_pci_core_device *vdev = vma->vm_private_data;
- struct vfio_pci_mmap_vma *mmap_vma;
+ int index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT);
+ u64 pgoff;
- mutex_lock(&vdev->vma_lock);
- list_for_each_entry(mmap_vma, &vdev->vma_list, vma_next) {
- if (mmap_vma->vma == vma) {
- list_del(&mmap_vma->vma_next);
- kfree(mmap_vma);
- break;
- }
- }
- mutex_unlock(&vdev->vma_lock);
+ pgoff = vma->vm_pgoff &
+ ((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
+
+ return (pci_resource_start(vdev->pdev, index) >> PAGE_SHIFT) + pgoff;
}
static vm_fault_t vfio_pci_mmap_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct vfio_pci_core_device *vdev = vma->vm_private_data;
- struct vfio_pci_mmap_vma *mmap_vma;
- vm_fault_t ret = VM_FAULT_NOPAGE;
+ unsigned long pfn, pgoff = vmf->pgoff - vma->vm_pgoff;
+ unsigned long addr = vma->vm_start;
+ vm_fault_t ret = VM_FAULT_SIGBUS;
+
+ pfn = vma_to_pfn(vma);
- mutex_lock(&vdev->vma_lock);
down_read(&vdev->memory_lock);
- /*
- * Memory region cannot be accessed if the low power feature is engaged
- * or memory access is disabled.
- */
- if (vdev->pm_runtime_engaged || !__vfio_pci_memory_enabled(vdev)) {
- ret = VM_FAULT_SIGBUS;
- goto up_out;
- }
+ if (vdev->pm_runtime_engaged || !__vfio_pci_memory_enabled(vdev))
+ goto out_unlock;
+
+ ret = vmf_insert_pfn(vma, vmf->address, pfn + pgoff);
+ if (ret & VM_FAULT_ERROR)
+ goto out_unlock;
/*
- * We populate the whole vma on fault, so we need to test whether
- * the vma has already been mapped, such as for concurrent faults
- * to the same vma. io_remap_pfn_range() will trigger a BUG_ON if
- * we ask it to fill the same range again.
+ * Pre-fault the remainder of the vma, abort further insertions and
+ * supress error if fault is encountered during pre-fault.
*/
- list_for_each_entry(mmap_vma, &vdev->vma_list, vma_next) {
- if (mmap_vma->vma == vma)
- goto up_out;
- }
-
- if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
- vma->vm_end - vma->vm_start,
- vma->vm_page_prot)) {
- ret = VM_FAULT_SIGBUS;
- zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
- goto up_out;
- }
+ for (; addr < vma->vm_end; addr += PAGE_SIZE, pfn++) {
+ if (addr == vmf->address)
+ continue;
- if (__vfio_pci_add_vma(vdev, vma)) {
- ret = VM_FAULT_OOM;
- zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
+ if (vmf_insert_pfn(vma, addr, pfn) & VM_FAULT_ERROR)
+ break;
}
-up_out:
+out_unlock:
up_read(&vdev->memory_lock);
- mutex_unlock(&vdev->vma_lock);
+
return ret;
}
static const struct vm_operations_struct vfio_pci_mmap_ops = {
- .open = vfio_pci_mmap_open,
- .close = vfio_pci_mmap_close,
.fault = vfio_pci_mmap_fault,
};
@@ -1880,11 +1757,12 @@ int vfio_pci_core_mmap(struct vfio_device *core_vdev, struct vm_area_struct *vma
vma->vm_private_data = vdev;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- vma->vm_pgoff = (pci_resource_start(pdev, index) >> PAGE_SHIFT) + pgoff;
+ vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
/*
- * See remap_pfn_range(), called from vfio_pci_fault() but we can't
- * change vm_flags within the fault handler. Set them now.
+ * Set vm_flags now, they should not be changed in the fault handler.
+ * We want the same flags and page protection (decrypted above) as
+ * io_remap_pfn_range() would set.
*
* VM_ALLOW_ANY_UNCACHED: The VMA flag is implemented for ARM64,
* allowing KVM stage 2 device mapping attributes to use Normal-NC
@@ -2202,8 +2080,6 @@ int vfio_pci_core_init_dev(struct vfio_device *core_vdev)
mutex_init(&vdev->ioeventfds_lock);
INIT_LIST_HEAD(&vdev->dummy_resources_list);
INIT_LIST_HEAD(&vdev->ioeventfds_list);
- mutex_init(&vdev->vma_lock);
- INIT_LIST_HEAD(&vdev->vma_list);
INIT_LIST_HEAD(&vdev->sriov_pfs_item);
init_rwsem(&vdev->memory_lock);
xa_init(&vdev->ctx);
@@ -2219,7 +2095,6 @@ void vfio_pci_core_release_dev(struct vfio_device *core_vdev)
mutex_destroy(&vdev->igate);
mutex_destroy(&vdev->ioeventfds_lock);
- mutex_destroy(&vdev->vma_lock);
kfree(vdev->region);
kfree(vdev->pm_save);
}
@@ -2497,26 +2372,15 @@ unwind:
return ret;
}
-/*
- * We need to get memory_lock for each device, but devices can share mmap_lock,
- * therefore we need to zap and hold the vma_lock for each device, and only then
- * get each memory_lock.
- */
static int vfio_pci_dev_set_hot_reset(struct vfio_device_set *dev_set,
struct vfio_pci_group_info *groups,
struct iommufd_ctx *iommufd_ctx)
{
- struct vfio_pci_core_device *cur_mem;
- struct vfio_pci_core_device *cur_vma;
- struct vfio_pci_core_device *cur;
+ struct vfio_pci_core_device *vdev;
struct pci_dev *pdev;
- bool is_mem = true;
int ret;
mutex_lock(&dev_set->lock);
- cur_mem = list_first_entry(&dev_set->device_list,
- struct vfio_pci_core_device,
- vdev.dev_set_list);
pdev = vfio_pci_dev_set_resettable(dev_set);
if (!pdev) {
@@ -2533,7 +2397,7 @@ static int vfio_pci_dev_set_hot_reset(struct vfio_device_set *dev_set,
if (ret)
goto err_unlock;
- list_for_each_entry(cur_vma, &dev_set->device_list, vdev.dev_set_list) {
+ list_for_each_entry(vdev, &dev_set->device_list, vdev.dev_set_list) {
bool owned;
/*
@@ -2557,38 +2421,38 @@ static int vfio_pci_dev_set_hot_reset(struct vfio_device_set *dev_set,
* Otherwise, reset is not allowed.
*/
if (iommufd_ctx) {
- int devid = vfio_iommufd_get_dev_id(&cur_vma->vdev,
+ int devid = vfio_iommufd_get_dev_id(&vdev->vdev,
iommufd_ctx);
owned = (devid > 0 || devid == -ENOENT);
} else {
- owned = vfio_dev_in_groups(&cur_vma->vdev, groups);
+ owned = vfio_dev_in_groups(&vdev->vdev, groups);
}
if (!owned) {
ret = -EINVAL;
- goto err_undo;
+ break;
}
/*
- * Locking multiple devices is prone to deadlock, runaway and
- * unwind if we hit contention.
+ * Take the memory write lock for each device and zap BAR
+ * mappings to prevent the user accessing the device while in
+ * reset. Locking multiple devices is prone to deadlock,
+ * runaway and unwind if we hit contention.
*/
- if (!vfio_pci_zap_and_vma_lock(cur_vma, true)) {
+ if (!down_write_trylock(&vdev->memory_lock)) {
ret = -EBUSY;
- goto err_undo;
+ break;
}
+
+ vfio_pci_zap_bars(vdev);
}
- cur_vma = NULL;
- list_for_each_entry(cur_mem, &dev_set->device_list, vdev.dev_set_list) {
- if (!down_write_trylock(&cur_mem->memory_lock)) {
- ret = -EBUSY;
- goto err_undo;
- }
- mutex_unlock(&cur_mem->vma_lock);
+ if (!list_entry_is_head(vdev,
+ &dev_set->device_list, vdev.dev_set_list)) {
+ vdev = list_prev_entry(vdev, vdev.dev_set_list);
+ goto err_undo;
}
- cur_mem = NULL;
/*
* The pci_reset_bus() will reset all the devices in the bus.
@@ -2599,25 +2463,22 @@ static int vfio_pci_dev_set_hot_reset(struct vfio_device_set *dev_set,
* cause the PCI config space reset without restoring the original
* state (saved locally in 'vdev->pm_save').
*/
- list_for_each_entry(cur, &dev_set->device_list, vdev.dev_set_list)
- vfio_pci_set_power_state(cur, PCI_D0);
+ list_for_each_entry(vdev, &dev_set->device_list, vdev.dev_set_list)
+ vfio_pci_set_power_state(vdev, PCI_D0);
ret = pci_reset_bus(pdev);
+ vdev = list_last_entry(&dev_set->device_list,
+ struct vfio_pci_core_device, vdev.dev_set_list);
+
err_undo:
- list_for_each_entry(cur, &dev_set->device_list, vdev.dev_set_list) {
- if (cur == cur_mem)
- is_mem = false;
- if (cur == cur_vma)
- break;
- if (is_mem)
- up_write(&cur->memory_lock);
- else
- mutex_unlock(&cur->vma_lock);
- }
+ list_for_each_entry_from_reverse(vdev, &dev_set->device_list,
+ vdev.dev_set_list)
+ up_write(&vdev->memory_lock);
+
+ list_for_each_entry(vdev, &dev_set->device_list, vdev.dev_set_list)
+ pm_runtime_put(&vdev->pdev->dev);
- list_for_each_entry(cur, &dev_set->device_list, vdev.dev_set_list)
- pm_runtime_put(&cur->pdev->dev);
err_unlock:
mutex_unlock(&dev_set->lock);
return ret;
diff --git a/drivers/vfio/vfio_main.c b/drivers/vfio/vfio_main.c
index e97d796a54fb..a5a62d9d963f 100644
--- a/drivers/vfio/vfio_main.c
+++ b/drivers/vfio/vfio_main.c
@@ -22,8 +22,10 @@
#include <linux/list.h>
#include <linux/miscdevice.h>
#include <linux/module.h>
+#include <linux/mount.h>
#include <linux/mutex.h>
#include <linux/pci.h>
+#include <linux/pseudo_fs.h>
#include <linux/rwsem.h>
#include <linux/sched.h>
#include <linux/slab.h>
@@ -43,9 +45,13 @@
#define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>"
#define DRIVER_DESC "VFIO - User Level meta-driver"
+#define VFIO_MAGIC 0x5646494f /* "VFIO" */
+
static struct vfio {
struct class *device_class;
struct ida device_ida;
+ struct vfsmount *vfs_mount;
+ int fs_count;
} vfio;
#ifdef CONFIG_VFIO_NOIOMMU
@@ -186,6 +192,8 @@ static void vfio_device_release(struct device *dev)
if (device->ops->release)
device->ops->release(device);
+ iput(device->inode);
+ simple_release_fs(&vfio.vfs_mount, &vfio.fs_count);
kvfree(device);
}
@@ -228,6 +236,34 @@ out_free:
}
EXPORT_SYMBOL_GPL(_vfio_alloc_device);
+static int vfio_fs_init_fs_context(struct fs_context *fc)
+{
+ return init_pseudo(fc, VFIO_MAGIC) ? 0 : -ENOMEM;
+}
+
+static struct file_system_type vfio_fs_type = {
+ .name = "vfio",
+ .owner = THIS_MODULE,
+ .init_fs_context = vfio_fs_init_fs_context,
+ .kill_sb = kill_anon_super,
+};
+
+static struct inode *vfio_fs_inode_new(void)
+{
+ struct inode *inode;
+ int ret;
+
+ ret = simple_pin_fs(&vfio_fs_type, &vfio.vfs_mount, &vfio.fs_count);
+ if (ret)
+ return ERR_PTR(ret);
+
+ inode = alloc_anon_inode(vfio.vfs_mount->mnt_sb);
+ if (IS_ERR(inode))
+ simple_release_fs(&vfio.vfs_mount, &vfio.fs_count);
+
+ return inode;
+}
+
/*
* Initialize a vfio_device so it can be registered to vfio core.
*/
@@ -246,6 +282,11 @@ static int vfio_init_device(struct vfio_device *device, struct device *dev,
init_completion(&device->comp);
device->dev = dev;
device->ops = ops;
+ device->inode = vfio_fs_inode_new();
+ if (IS_ERR(device->inode)) {
+ ret = PTR_ERR(device->inode);
+ goto out_inode;
+ }
if (ops->init) {
ret = ops->init(device);
@@ -260,6 +301,9 @@ static int vfio_init_device(struct vfio_device *device, struct device *dev,
return 0;
out_uninit:
+ iput(device->inode);
+ simple_release_fs(&vfio.vfs_mount, &vfio.fs_count);
+out_inode:
vfio_release_device_set(device);
ida_free(&vfio.device_ida, device->index);
return ret;
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index 230bca07b09d..3614a5d29c71 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -373,6 +373,13 @@ config BACKLIGHT_AAT2870
If you have a AnalogicTech AAT2870 say Y to enable the
backlight driver.
+config BACKLIGHT_LM3509
+ tristate "Backlight Driver for LM3509"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ This supports TI LM3509 Backlight Driver
+
config BACKLIGHT_LM3630A
tristate "Backlight Driver for LM3630A"
depends on I2C && PWM
diff --git a/drivers/video/backlight/Makefile b/drivers/video/backlight/Makefile
index 8d2cb252042d..8fc98f760a8a 100644
--- a/drivers/video/backlight/Makefile
+++ b/drivers/video/backlight/Makefile
@@ -36,6 +36,7 @@ obj-$(CONFIG_BACKLIGHT_IPAQ_MICRO) += ipaq_micro_bl.o
obj-$(CONFIG_BACKLIGHT_KTD253) += ktd253-backlight.o
obj-$(CONFIG_BACKLIGHT_KTD2801) += ktd2801-backlight.o
obj-$(CONFIG_BACKLIGHT_KTZ8866) += ktz8866.o
+obj-$(CONFIG_BACKLIGHT_LM3509) += lm3509_bl.o
obj-$(CONFIG_BACKLIGHT_LM3533) += lm3533_bl.o
obj-$(CONFIG_BACKLIGHT_LM3630A) += lm3630a_bl.o
obj-$(CONFIG_BACKLIGHT_LM3639) += lm3639_bl.o
diff --git a/drivers/video/backlight/aat2870_bl.c b/drivers/video/backlight/aat2870_bl.c
index b4c3354a1a8a..68d327ee4b2e 100644
--- a/drivers/video/backlight/aat2870_bl.c
+++ b/drivers/video/backlight/aat2870_bl.c
@@ -156,7 +156,7 @@ static int aat2870_bl_probe(struct platform_device *pdev)
bd->props.max_brightness = 255;
aat2870_bl->brightness = 0;
- bd->props.power = FB_BLANK_UNBLANK;
+ bd->props.power = BACKLIGHT_POWER_ON;
bd->props.brightness = bd->props.max_brightness;
ret = aat2870_bl_update_status(bd);
@@ -176,7 +176,7 @@ static void aat2870_bl_remove(struct platform_device *pdev)
struct aat2870_bl_driver_data *aat2870_bl = platform_get_drvdata(pdev);
struct backlight_device *bd = aat2870_bl->bd;
- bd->props.power = FB_BLANK_POWERDOWN;
+ bd->props.power = BACKLIGHT_POWER_OFF;
bd->props.brightness = 0;
backlight_update_status(bd);
}
diff --git a/drivers/video/backlight/adp8870_bl.c b/drivers/video/backlight/adp8870_bl.c
index 6bb18dc970e9..ad4bd4c8f441 100644
--- a/drivers/video/backlight/adp8870_bl.c
+++ b/drivers/video/backlight/adp8870_bl.c
@@ -963,7 +963,7 @@ static SIMPLE_DEV_PM_OPS(adp8870_i2c_pm_ops, adp8870_i2c_suspend,
adp8870_i2c_resume);
static const struct i2c_device_id adp8870_id[] = {
- { "adp8870", 0 },
+ { "adp8870" },
{ }
};
MODULE_DEVICE_TABLE(i2c, adp8870_id);
diff --git a/drivers/video/backlight/ams369fg06.c b/drivers/video/backlight/ams369fg06.c
index 57ec205d2bd2..f8442689ac43 100644
--- a/drivers/video/backlight/ams369fg06.c
+++ b/drivers/video/backlight/ams369fg06.c
@@ -10,7 +10,6 @@
#include <linux/backlight.h>
#include <linux/delay.h>
-#include <linux/fb.h>
#include <linux/lcd.h>
#include <linux/module.h>
#include <linux/spi/spi.h>
@@ -300,7 +299,7 @@ static int ams369fg06_ldi_disable(struct ams369fg06 *lcd)
static int ams369fg06_power_is_on(int power)
{
- return power <= FB_BLANK_NORMAL;
+ return power <= BACKLIGHT_POWER_REDUCED;
}
static int ams369fg06_power_on(struct ams369fg06 *lcd)
@@ -396,8 +395,8 @@ static int ams369fg06_set_power(struct lcd_device *ld, int power)
{
struct ams369fg06 *lcd = lcd_get_data(ld);
- if (power != FB_BLANK_UNBLANK && power != FB_BLANK_POWERDOWN &&
- power != FB_BLANK_NORMAL) {
+ if (power != BACKLIGHT_POWER_ON && power != BACKLIGHT_POWER_OFF &&
+ power != BACKLIGHT_POWER_REDUCED) {
dev_err(lcd->dev, "power value should be 0, 1 or 4.\n");
return -EINVAL;
}
@@ -492,11 +491,11 @@ static int ams369fg06_probe(struct spi_device *spi)
* current lcd status is powerdown and then
* it enables lcd panel.
*/
- lcd->power = FB_BLANK_POWERDOWN;
+ lcd->power = BACKLIGHT_POWER_OFF;
- ams369fg06_power(lcd, FB_BLANK_UNBLANK);
+ ams369fg06_power(lcd, BACKLIGHT_POWER_ON);
} else {
- lcd->power = FB_BLANK_UNBLANK;
+ lcd->power = BACKLIGHT_POWER_ON;
}
spi_set_drvdata(spi, lcd);
@@ -510,7 +509,7 @@ static void ams369fg06_remove(struct spi_device *spi)
{
struct ams369fg06 *lcd = spi_get_drvdata(spi);
- ams369fg06_power(lcd, FB_BLANK_POWERDOWN);
+ ams369fg06_power(lcd, BACKLIGHT_POWER_OFF);
}
#ifdef CONFIG_PM_SLEEP
@@ -524,16 +523,16 @@ static int ams369fg06_suspend(struct device *dev)
* when lcd panel is suspend, lcd panel becomes off
* regardless of status.
*/
- return ams369fg06_power(lcd, FB_BLANK_POWERDOWN);
+ return ams369fg06_power(lcd, BACKLIGHT_POWER_OFF);
}
static int ams369fg06_resume(struct device *dev)
{
struct ams369fg06 *lcd = dev_get_drvdata(dev);
- lcd->power = FB_BLANK_POWERDOWN;
+ lcd->power = BACKLIGHT_POWER_OFF;
- return ams369fg06_power(lcd, FB_BLANK_UNBLANK);
+ return ams369fg06_power(lcd, BACKLIGHT_POWER_ON);
}
#endif
@@ -544,7 +543,7 @@ static void ams369fg06_shutdown(struct spi_device *spi)
{
struct ams369fg06 *lcd = spi_get_drvdata(spi);
- ams369fg06_power(lcd, FB_BLANK_POWERDOWN);
+ ams369fg06_power(lcd, BACKLIGHT_POWER_OFF);
}
static struct spi_driver ams369fg06_driver = {
diff --git a/drivers/video/backlight/bd6107.c b/drivers/video/backlight/bd6107.c
index 6be2c67ba85c..90764f83d2f1 100644
--- a/drivers/video/backlight/bd6107.c
+++ b/drivers/video/backlight/bd6107.c
@@ -180,7 +180,7 @@ static void bd6107_remove(struct i2c_client *client)
}
static const struct i2c_device_id bd6107_ids[] = {
- { "bd6107", 0 },
+ { "bd6107" },
{ }
};
MODULE_DEVICE_TABLE(i2c, bd6107_ids);
diff --git a/drivers/video/backlight/corgi_lcd.c b/drivers/video/backlight/corgi_lcd.c
index aad1680c9075..e4fcfbe38dc6 100644
--- a/drivers/video/backlight/corgi_lcd.c
+++ b/drivers/video/backlight/corgi_lcd.c
@@ -526,7 +526,7 @@ static int corgi_lcd_probe(struct spi_device *spi)
return PTR_ERR(lcd->bl_dev);
lcd->bl_dev->props.brightness = pdata->default_intensity;
- lcd->bl_dev->props.power = FB_BLANK_UNBLANK;
+ lcd->bl_dev->props.power = BACKLIGHT_POWER_ON;
ret = setup_gpio_backlight(lcd, pdata);
if (ret)
@@ -547,7 +547,7 @@ static void corgi_lcd_remove(struct spi_device *spi)
{
struct corgi_lcd *lcd = spi_get_drvdata(spi);
- lcd->bl_dev->props.power = FB_BLANK_UNBLANK;
+ lcd->bl_dev->props.power = BACKLIGHT_POWER_ON;
lcd->bl_dev->props.brightness = 0;
backlight_update_status(lcd->bl_dev);
corgi_lcd_set_power(lcd->lcd_dev, FB_BLANK_POWERDOWN);
diff --git a/drivers/video/backlight/gpio_backlight.c b/drivers/video/backlight/gpio_backlight.c
index 4476c317ce29..728a546904b0 100644
--- a/drivers/video/backlight/gpio_backlight.c
+++ b/drivers/video/backlight/gpio_backlight.c
@@ -5,7 +5,6 @@
#include <linux/backlight.h>
#include <linux/err.h>
-#include <linux/fb.h>
#include <linux/gpio/consumer.h>
#include <linux/init.h>
#include <linux/kernel.h>
@@ -81,12 +80,12 @@ static int gpio_backlight_probe(struct platform_device *pdev)
/* Set the initial power state */
if (!of_node || !of_node->phandle)
/* Not booted with device tree or no phandle link to the node */
- bl->props.power = def_value ? FB_BLANK_UNBLANK
- : FB_BLANK_POWERDOWN;
+ bl->props.power = def_value ? BACKLIGHT_POWER_ON
+ : BACKLIGHT_POWER_OFF;
else if (gpiod_get_value_cansleep(gbl->gpiod) == 0)
- bl->props.power = FB_BLANK_POWERDOWN;
+ bl->props.power = BACKLIGHT_POWER_OFF;
else
- bl->props.power = FB_BLANK_UNBLANK;
+ bl->props.power = BACKLIGHT_POWER_ON;
bl->props.brightness = 1;
diff --git a/drivers/video/backlight/ipaq_micro_bl.c b/drivers/video/backlight/ipaq_micro_bl.c
index f595b8c8cbb2..19ff66e444bc 100644
--- a/drivers/video/backlight/ipaq_micro_bl.c
+++ b/drivers/video/backlight/ipaq_micro_bl.c
@@ -7,7 +7,6 @@
#include <linux/backlight.h>
#include <linux/err.h>
-#include <linux/fb.h>
#include <linux/init.h>
#include <linux/mfd/ipaq-micro.h>
#include <linux/module.h>
@@ -42,7 +41,7 @@ static const struct backlight_ops micro_bl_ops = {
static const struct backlight_properties micro_bl_props = {
.type = BACKLIGHT_RAW,
.max_brightness = 255,
- .power = FB_BLANK_UNBLANK,
+ .power = BACKLIGHT_POWER_ON,
.brightness = 64,
};
diff --git a/drivers/video/backlight/jornada720_bl.c b/drivers/video/backlight/jornada720_bl.c
index 066d0dc98f60..e28d2c071798 100644
--- a/drivers/video/backlight/jornada720_bl.c
+++ b/drivers/video/backlight/jornada720_bl.c
@@ -7,7 +7,6 @@
#include <linux/backlight.h>
#include <linux/device.h>
-#include <linux/fb.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -121,7 +120,7 @@ static int jornada_bl_probe(struct platform_device *pdev)
return ret;
}
- bd->props.power = FB_BLANK_UNBLANK;
+ bd->props.power = BACKLIGHT_POWER_ON;
bd->props.brightness = BL_DEF_BRIGHT;
/*
* note. make sure max brightness is set otherwise
diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c
index 55794b239cff..050b5c21f4a8 100644
--- a/drivers/video/backlight/kb3886_bl.c
+++ b/drivers/video/backlight/kb3886_bl.c
@@ -10,9 +10,9 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/mutex.h>
-#include <linux/fb.h>
#include <linux/backlight.h>
#include <linux/delay.h>
#include <linux/dmi.h>
@@ -151,7 +151,7 @@ static int kb3886bl_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, kb3886_backlight_device);
- kb3886_backlight_device->props.power = FB_BLANK_UNBLANK;
+ kb3886_backlight_device->props.power = BACKLIGHT_POWER_ON;
kb3886_backlight_device->props.brightness = machinfo->default_intensity;
backlight_update_status(kb3886_backlight_device);
diff --git a/drivers/video/backlight/ktd253-backlight.c b/drivers/video/backlight/ktd253-backlight.c
index d7d43454f64a..327b4ee75254 100644
--- a/drivers/video/backlight/ktd253-backlight.c
+++ b/drivers/video/backlight/ktd253-backlight.c
@@ -7,7 +7,6 @@
#include <linux/backlight.h>
#include <linux/delay.h>
#include <linux/err.h>
-#include <linux/fb.h>
#include <linux/gpio/consumer.h>
#include <linux/init.h>
#include <linux/kernel.h>
@@ -190,10 +189,10 @@ static int ktd253_backlight_probe(struct platform_device *pdev)
/* When we just enable the GPIO line we set max brightness */
if (brightness) {
bl->props.brightness = brightness;
- bl->props.power = FB_BLANK_UNBLANK;
+ bl->props.power = BACKLIGHT_POWER_ON;
} else {
bl->props.brightness = 0;
- bl->props.power = FB_BLANK_POWERDOWN;
+ bl->props.power = BACKLIGHT_POWER_OFF;
}
ktd253->bl = bl;
diff --git a/drivers/video/backlight/ktz8866.c b/drivers/video/backlight/ktz8866.c
index 014877b5a984..2e508741c0af 100644
--- a/drivers/video/backlight/ktz8866.c
+++ b/drivers/video/backlight/ktz8866.c
@@ -179,8 +179,8 @@ static void ktz8866_remove(struct i2c_client *client)
}
static const struct i2c_device_id ktz8866_ids[] = {
- { "ktz8866", 0 },
- {},
+ { "ktz8866" },
+ {}
};
MODULE_DEVICE_TABLE(i2c, ktz8866_ids);
diff --git a/drivers/video/backlight/led_bl.c b/drivers/video/backlight/led_bl.c
index 032f8bddf872..c7aefcd6e4e3 100644
--- a/drivers/video/backlight/led_bl.c
+++ b/drivers/video/backlight/led_bl.c
@@ -200,8 +200,8 @@ static int led_bl_probe(struct platform_device *pdev)
props.type = BACKLIGHT_RAW;
props.max_brightness = priv->max_brightness;
props.brightness = priv->default_brightness;
- props.power = (priv->default_brightness > 0) ? FB_BLANK_POWERDOWN :
- FB_BLANK_UNBLANK;
+ props.power = (priv->default_brightness > 0) ? BACKLIGHT_POWER_OFF :
+ BACKLIGHT_POWER_ON;
priv->bl_dev = backlight_device_register(dev_name(&pdev->dev),
&pdev->dev, priv, &led_bl_ops, &props);
if (IS_ERR(priv->bl_dev)) {
diff --git a/drivers/video/backlight/lm3509_bl.c b/drivers/video/backlight/lm3509_bl.c
new file mode 100644
index 000000000000..24e1a19ff72d
--- /dev/null
+++ b/drivers/video/backlight/lm3509_bl.c
@@ -0,0 +1,343 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+
+#define LM3509_NAME "lm3509_bl"
+
+#define LM3509_SINK_MAIN 0
+#define LM3509_SINK_SUB 1
+#define LM3509_NUM_SINKS 2
+
+#define LM3509_DEF_BRIGHTNESS 0x12
+#define LM3509_MAX_BRIGHTNESS 0x1F
+
+#define REG_GP 0x10
+#define REG_BMAIN 0xA0
+#define REG_BSUB 0xB0
+#define REG_MAX 0xFF
+
+enum {
+ REG_GP_ENM_BIT = 0,
+ REG_GP_ENS_BIT,
+ REG_GP_UNI_BIT,
+ REG_GP_RMP0_BIT,
+ REG_GP_RMP1_BIT,
+ REG_GP_OLED_BIT,
+};
+
+struct lm3509_bl {
+ struct regmap *regmap;
+ struct backlight_device *bl_main;
+ struct backlight_device *bl_sub;
+ struct gpio_desc *reset_gpio;
+};
+
+struct lm3509_bl_led_data {
+ const char *label;
+ int led_sources;
+ u32 brightness;
+ u32 max_brightness;
+};
+
+static void lm3509_reset(struct lm3509_bl *data)
+{
+ if (data->reset_gpio) {
+ gpiod_set_value(data->reset_gpio, 1);
+ udelay(1);
+ gpiod_set_value(data->reset_gpio, 0);
+ udelay(10);
+ }
+}
+
+static int lm3509_update_status(struct backlight_device *bl,
+ unsigned int en_mask, unsigned int br_reg)
+{
+ struct lm3509_bl *data = bl_get_data(bl);
+ int ret;
+ bool en;
+
+ ret = regmap_write(data->regmap, br_reg, backlight_get_brightness(bl));
+ if (ret < 0)
+ return ret;
+
+ en = !backlight_is_blank(bl);
+ return regmap_update_bits(data->regmap, REG_GP, en_mask,
+ en ? en_mask : 0);
+}
+
+static int lm3509_main_update_status(struct backlight_device *bl)
+{
+ return lm3509_update_status(bl, BIT(REG_GP_ENM_BIT), REG_BMAIN);
+}
+
+static const struct backlight_ops lm3509_main_ops = {
+ .options = BL_CORE_SUSPENDRESUME,
+ .update_status = lm3509_main_update_status,
+};
+
+static int lm3509_sub_update_status(struct backlight_device *bl)
+{
+ return lm3509_update_status(bl, BIT(REG_GP_ENS_BIT), REG_BSUB);
+}
+
+static const struct backlight_ops lm3509_sub_ops = {
+ .options = BL_CORE_SUSPENDRESUME,
+ .update_status = lm3509_sub_update_status,
+};
+
+static struct backlight_device *
+lm3509_backlight_register(struct device *dev, const char *name_suffix,
+ struct lm3509_bl *data,
+ const struct backlight_ops *ops,
+ const struct lm3509_bl_led_data *led_data)
+
+{
+ struct backlight_device *bd;
+ struct backlight_properties props;
+ const char *label = led_data->label;
+ char name[64];
+
+ memset(&props, 0, sizeof(props));
+ props.type = BACKLIGHT_RAW;
+ props.brightness = led_data->brightness;
+ props.max_brightness = led_data->max_brightness;
+ props.scale = BACKLIGHT_SCALE_NON_LINEAR;
+
+ if (!label) {
+ snprintf(name, sizeof(name), "lm3509-%s-%s", dev_name(dev),
+ name_suffix);
+ label = name;
+ }
+
+ bd = devm_backlight_device_register(dev, label, dev, data, ops, &props);
+ if (IS_ERR(bd))
+ return bd;
+
+ backlight_update_status(bd);
+ return bd;
+}
+
+static const struct regmap_config lm3509_regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = REG_MAX,
+};
+
+static int lm3509_parse_led_sources(struct device_node *node,
+ int default_led_sources)
+{
+ u32 sources[LM3509_NUM_SINKS];
+ int ret, num_sources, i;
+
+ num_sources = of_property_count_u32_elems(node, "led-sources");
+ if (num_sources < 0)
+ return default_led_sources;
+ else if (num_sources > ARRAY_SIZE(sources))
+ return -EINVAL;
+
+ ret = of_property_read_u32_array(node, "led-sources", sources,
+ num_sources);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < num_sources; i++) {
+ if (sources[i] >= LM3509_NUM_SINKS)
+ return -EINVAL;
+
+ ret |= BIT(sources[i]);
+ }
+
+ return ret;
+}
+
+static int lm3509_parse_dt_node(struct device *dev,
+ struct lm3509_bl_led_data *led_data)
+{
+ int seen_led_sources = 0;
+
+ for_each_child_of_node_scoped(dev->of_node, child) {
+ struct lm3509_bl_led_data *ld;
+ int ret;
+ u32 reg;
+ int valid_led_sources;
+
+ ret = of_property_read_u32(child, "reg", &reg);
+ if (ret < 0)
+ return ret;
+ if (reg >= LM3509_NUM_SINKS)
+ return -EINVAL;
+ ld = &led_data[reg];
+
+ ld->led_sources = lm3509_parse_led_sources(child, BIT(reg));
+ if (ld->led_sources < 0)
+ return ld->led_sources;
+
+ if (reg == 0)
+ valid_led_sources = BIT(LM3509_SINK_MAIN) |
+ BIT(LM3509_SINK_SUB);
+ else
+ valid_led_sources = BIT(LM3509_SINK_SUB);
+
+ if (ld->led_sources != (ld->led_sources & valid_led_sources))
+ return -EINVAL;
+
+ if (seen_led_sources & ld->led_sources)
+ return -EINVAL;
+
+ seen_led_sources |= ld->led_sources;
+
+ ld->label = NULL;
+ of_property_read_string(child, "label", &ld->label);
+
+ ld->max_brightness = LM3509_MAX_BRIGHTNESS;
+ of_property_read_u32(child, "max-brightness",
+ &ld->max_brightness);
+ ld->max_brightness =
+ min_t(u32, ld->max_brightness, LM3509_MAX_BRIGHTNESS);
+
+ ld->brightness = LM3509_DEF_BRIGHTNESS;
+ of_property_read_u32(child, "default-brightness",
+ &ld->brightness);
+ ld->brightness = min_t(u32, ld->brightness, ld->max_brightness);
+ }
+
+ return 0;
+}
+
+static int lm3509_probe(struct i2c_client *client)
+{
+ struct lm3509_bl *data;
+ struct device *dev = &client->dev;
+ int ret;
+ bool oled_mode = false;
+ unsigned int reg_gp_val = 0;
+ struct lm3509_bl_led_data led_data[LM3509_NUM_SINKS];
+ u32 rate_of_change = 0;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ dev_err(dev, "i2c functionality check failed\n");
+ return -EOPNOTSUPP;
+ }
+
+ data = devm_kzalloc(dev, sizeof(struct lm3509_bl), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->regmap = devm_regmap_init_i2c(client, &lm3509_regmap);
+ if (IS_ERR(data->regmap))
+ return PTR_ERR(data->regmap);
+ i2c_set_clientdata(client, data);
+
+ data->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(data->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(data->reset_gpio),
+ "Failed to get 'reset' gpio\n");
+
+ lm3509_reset(data);
+
+ memset(led_data, 0, sizeof(led_data));
+ ret = lm3509_parse_dt_node(dev, led_data);
+ if (ret)
+ return ret;
+
+ oled_mode = of_property_read_bool(dev->of_node, "ti,oled-mode");
+
+ if (!of_property_read_u32(dev->of_node,
+ "ti,brightness-rate-of-change-us",
+ &rate_of_change)) {
+ switch (rate_of_change) {
+ case 51:
+ reg_gp_val = 0;
+ break;
+ case 13000:
+ reg_gp_val = BIT(REG_GP_RMP1_BIT);
+ break;
+ case 26000:
+ reg_gp_val = BIT(REG_GP_RMP0_BIT);
+ break;
+ case 52000:
+ reg_gp_val = BIT(REG_GP_RMP0_BIT) |
+ BIT(REG_GP_RMP1_BIT);
+ break;
+ default:
+ dev_warn(dev, "invalid rate of change %u\n",
+ rate_of_change);
+ break;
+ }
+ }
+
+ if (led_data[0].led_sources ==
+ (BIT(LM3509_SINK_MAIN) | BIT(LM3509_SINK_SUB)))
+ reg_gp_val |= BIT(REG_GP_UNI_BIT);
+ if (oled_mode)
+ reg_gp_val |= BIT(REG_GP_OLED_BIT);
+
+ ret = regmap_write(data->regmap, REG_GP, reg_gp_val);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "failed to write register\n");
+
+ if (led_data[0].led_sources) {
+ data->bl_main = lm3509_backlight_register(
+ dev, "main", data, &lm3509_main_ops, &led_data[0]);
+ if (IS_ERR(data->bl_main)) {
+ return dev_err_probe(
+ dev, PTR_ERR(data->bl_main),
+ "failed to register main backlight\n");
+ }
+ }
+
+ if (led_data[1].led_sources) {
+ data->bl_sub = lm3509_backlight_register(
+ dev, "sub", data, &lm3509_sub_ops, &led_data[1]);
+ if (IS_ERR(data->bl_sub)) {
+ return dev_err_probe(
+ dev, PTR_ERR(data->bl_sub),
+ "failed to register secondary backlight\n");
+ }
+ }
+
+ return 0;
+}
+
+static void lm3509_remove(struct i2c_client *client)
+{
+ struct lm3509_bl *data = i2c_get_clientdata(client);
+
+ regmap_write(data->regmap, REG_GP, 0x00);
+}
+
+static const struct i2c_device_id lm3509_id[] = {
+ { LM3509_NAME },
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, lm3509_id);
+
+static const struct of_device_id lm3509_match_table[] = {
+ {
+ .compatible = "ti,lm3509",
+ },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, lm3509_match_table);
+
+static struct i2c_driver lm3509_i2c_driver = {
+ .driver = {
+ .name = LM3509_NAME,
+ .of_match_table = lm3509_match_table,
+ },
+ .probe = lm3509_probe,
+ .remove = lm3509_remove,
+ .id_table = lm3509_id,
+};
+
+module_i2c_driver(lm3509_i2c_driver);
+
+MODULE_DESCRIPTION("Texas Instruments Backlight driver for LM3509");
+MODULE_AUTHOR("Patrick Gansterer <paroga@paroga.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/backlight/lm3533_bl.c b/drivers/video/backlight/lm3533_bl.c
index 3e10d480cb7f..5d06f8ca976c 100644
--- a/drivers/video/backlight/lm3533_bl.c
+++ b/drivers/video/backlight/lm3533_bl.c
@@ -11,7 +11,6 @@
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/backlight.h>
-#include <linux/fb.h>
#include <linux/slab.h>
#include <linux/mfd/lm3533.h>
@@ -344,7 +343,7 @@ static void lm3533_bl_remove(struct platform_device *pdev)
dev_dbg(&bd->dev, "%s\n", __func__);
- bd->props.power = FB_BLANK_POWERDOWN;
+ bd->props.power = BACKLIGHT_POWER_OFF;
bd->props.brightness = 0;
lm3533_ctrlbank_disable(&bl->cb);
diff --git a/drivers/video/backlight/lm3630a_bl.c b/drivers/video/backlight/lm3630a_bl.c
index 76d47e2e8242..37651c2b9393 100644
--- a/drivers/video/backlight/lm3630a_bl.c
+++ b/drivers/video/backlight/lm3630a_bl.c
@@ -596,7 +596,7 @@ static void lm3630a_remove(struct i2c_client *client)
}
static const struct i2c_device_id lm3630a_id[] = {
- {LM3630A_NAME, 0},
+ { LM3630A_NAME },
{}
};
diff --git a/drivers/video/backlight/lm3639_bl.c b/drivers/video/backlight/lm3639_bl.c
index 564f62acd721..37ccc631c498 100644
--- a/drivers/video/backlight/lm3639_bl.c
+++ b/drivers/video/backlight/lm3639_bl.c
@@ -403,7 +403,7 @@ static void lm3639_remove(struct i2c_client *client)
}
static const struct i2c_device_id lm3639_id[] = {
- {LM3639_NAME, 0},
+ { LM3639_NAME },
{}
};
diff --git a/drivers/video/backlight/lv5207lp.c b/drivers/video/backlight/lv5207lp.c
index 0cf00fee0f60..5f60989fa70f 100644
--- a/drivers/video/backlight/lv5207lp.c
+++ b/drivers/video/backlight/lv5207lp.c
@@ -132,7 +132,7 @@ static void lv5207lp_remove(struct i2c_client *client)
}
static const struct i2c_device_id lv5207lp_ids[] = {
- { "lv5207lp", 0 },
+ { "lv5207lp" },
{ }
};
MODULE_DEVICE_TABLE(i2c, lv5207lp_ids);
diff --git a/drivers/video/backlight/mp3309c.c b/drivers/video/backlight/mp3309c.c
index a28036c964af..372058e26129 100644
--- a/drivers/video/backlight/mp3309c.c
+++ b/drivers/video/backlight/mp3309c.c
@@ -358,7 +358,7 @@ static int mp3309c_probe(struct i2c_client *client)
props.max_brightness = pdata->max_brightness;
props.scale = BACKLIGHT_SCALE_LINEAR;
props.type = BACKLIGHT_RAW;
- props.power = FB_BLANK_UNBLANK;
+ props.power = BACKLIGHT_POWER_ON;
chip->bl = devm_backlight_device_register(dev, "mp3309c", dev, chip,
&mp3309c_bl_ops, &props);
if (IS_ERR(chip->bl))
@@ -388,7 +388,7 @@ static void mp3309c_remove(struct i2c_client *client)
struct mp3309c_chip *chip = i2c_get_clientdata(client);
struct backlight_device *bl = chip->bl;
- bl->props.power = FB_BLANK_POWERDOWN;
+ bl->props.power = BACKLIGHT_POWER_OFF;
bl->props.brightness = 0;
backlight_update_status(chip->bl);
}
@@ -400,7 +400,7 @@ static const struct of_device_id mp3309c_match_table[] = {
MODULE_DEVICE_TABLE(of, mp3309c_match_table);
static const struct i2c_device_id mp3309c_id[] = {
- { "mp3309c", 0 },
+ { "mp3309c" },
{ }
};
MODULE_DEVICE_TABLE(i2c, mp3309c_id);
diff --git a/drivers/video/backlight/pandora_bl.c b/drivers/video/backlight/pandora_bl.c
index 51faa889e01f..8a63ded0fa90 100644
--- a/drivers/video/backlight/pandora_bl.c
+++ b/drivers/video/backlight/pandora_bl.c
@@ -11,7 +11,6 @@
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
-#include <linux/fb.h>
#include <linux/backlight.h>
#include <linux/mfd/twl.h>
#include <linux/err.h>
@@ -43,7 +42,7 @@ static int pandora_backlight_update_status(struct backlight_device *bl)
struct pandora_private *priv = bl_get_data(bl);
u8 r;
- if (bl->props.power != FB_BLANK_UNBLANK)
+ if (bl->props.power != BACKLIGHT_POWER_ON)
brightness = 0;
if (bl->props.state & BL_CORE_FBBLANK)
brightness = 0;
diff --git a/drivers/video/backlight/pcf50633-backlight.c b/drivers/video/backlight/pcf50633-backlight.c
index 540dd3380c81..157be2f366df 100644
--- a/drivers/video/backlight/pcf50633-backlight.c
+++ b/drivers/video/backlight/pcf50633-backlight.c
@@ -10,7 +10,6 @@
#include <linux/platform_device.h>
#include <linux/backlight.h>
-#include <linux/fb.h>
#include <linux/mfd/pcf50633/core.h>
#include <linux/mfd/pcf50633/backlight.h>
@@ -53,7 +52,7 @@ static int pcf50633_bl_update_status(struct backlight_device *bl)
if (bl->props.state & (BL_CORE_SUSPENDED | BL_CORE_FBBLANK) ||
- bl->props.power != FB_BLANK_UNBLANK)
+ bl->props.power != BACKLIGHT_POWER_ON)
new_brightness = 0;
else if (bl->props.brightness < pcf_bl->brightness_limit)
new_brightness = bl->props.brightness;
@@ -106,7 +105,7 @@ static int pcf50633_bl_probe(struct platform_device *pdev)
memset(&bl_props, 0, sizeof(bl_props));
bl_props.type = BACKLIGHT_RAW;
bl_props.max_brightness = 0x3f;
- bl_props.power = FB_BLANK_UNBLANK;
+ bl_props.power = BACKLIGHT_POWER_ON;
if (pdata) {
bl_props.brightness = pdata->default_brightness;
diff --git a/drivers/video/backlight/platform_lcd.c b/drivers/video/backlight/platform_lcd.c
index 76872f5c34c5..b0af612834a7 100644
--- a/drivers/video/backlight/platform_lcd.c
+++ b/drivers/video/backlight/platform_lcd.c
@@ -143,5 +143,6 @@ static struct platform_driver platform_lcd_driver = {
module_platform_driver(platform_lcd_driver);
MODULE_AUTHOR("Ben Dooks <ben-linux@fluff.org>");
+MODULE_DESCRIPTION("Generic platform-device LCD power control interface");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:platform-lcd");
diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
index 61d30bc98eea..e942908d1275 100644
--- a/drivers/video/backlight/pwm_bl.c
+++ b/drivers/video/backlight/pwm_bl.c
@@ -426,7 +426,7 @@ static int pwm_backlight_initial_power_state(const struct pwm_bl_data *pb)
/* Not booted with device tree or no phandle link to the node */
if (!node || !node->phandle)
- return FB_BLANK_UNBLANK;
+ return BACKLIGHT_POWER_ON;
/*
* If the driver is probed from the device tree and there is a
@@ -434,7 +434,7 @@ static int pwm_backlight_initial_power_state(const struct pwm_bl_data *pb)
* assume that another driver will enable the backlight at the
* appropriate time. Therefore, if it is disabled, keep it so.
*/
- return active ? FB_BLANK_UNBLANK: FB_BLANK_POWERDOWN;
+ return active ? BACKLIGHT_POWER_ON : BACKLIGHT_POWER_OFF;
}
static int pwm_backlight_probe(struct platform_device *pdev)
diff --git a/drivers/video/backlight/rave-sp-backlight.c b/drivers/video/backlight/rave-sp-backlight.c
index 05b5f003a3d1..e708a060a6e4 100644
--- a/drivers/video/backlight/rave-sp-backlight.c
+++ b/drivers/video/backlight/rave-sp-backlight.c
@@ -19,7 +19,7 @@ static int rave_sp_backlight_update_status(struct backlight_device *bd)
{
const struct backlight_properties *p = &bd->props;
const u8 intensity =
- (p->power == FB_BLANK_UNBLANK) ? p->brightness : 0;
+ (p->power == BACKLIGHT_POWER_ON) ? p->brightness : 0;
struct rave_sp *sp = dev_get_drvdata(&bd->dev);
u8 cmd[] = {
[0] = RAVE_SP_CMD_SET_BACKLIGHT,
diff --git a/drivers/video/backlight/rt4831-backlight.c b/drivers/video/backlight/rt4831-backlight.c
index 7d1af4c2ca67..c2f6fb29e1d0 100644
--- a/drivers/video/backlight/rt4831-backlight.c
+++ b/drivers/video/backlight/rt4831-backlight.c
@@ -229,4 +229,5 @@ static struct platform_driver rt4831_bl_driver = {
module_platform_driver(rt4831_bl_driver);
MODULE_AUTHOR("ChiYuan Huang <cy_huang@richtek.com>");
+MODULE_DESCRIPTION("Richtek RT4831 Backlight Driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/video/backlight/sky81452-backlight.c b/drivers/video/backlight/sky81452-backlight.c
index 19f9f84a9fd6..935043b67786 100644
--- a/drivers/video/backlight/sky81452-backlight.c
+++ b/drivers/video/backlight/sky81452-backlight.c
@@ -315,7 +315,7 @@ static void sky81452_bl_remove(struct platform_device *pdev)
sysfs_remove_group(&bd->dev.kobj, &sky81452_bl_attr_group);
- bd->props.power = FB_BLANK_UNBLANK;
+ bd->props.power = BACKLIGHT_POWER_ON;
bd->props.brightness = 0;
backlight_update_status(bd);
diff --git a/drivers/virt/coco/sev-guest/sev-guest.c b/drivers/virt/coco/sev-guest/sev-guest.c
index 654290a8e1ba..f714009b9ff7 100644
--- a/drivers/virt/coco/sev-guest/sev-guest.c
+++ b/drivers/virt/coco/sev-guest/sev-guest.c
@@ -2,7 +2,7 @@
/*
* AMD Secure Encrypted Virtualization (SEV) guest driver interface
*
- * Copyright (C) 2021 Advanced Micro Devices, Inc.
+ * Copyright (C) 2021-2024 Advanced Micro Devices, Inc.
*
* Author: Brijesh Singh <brijesh.singh@amd.com>
*/
@@ -23,6 +23,7 @@
#include <linux/sockptr.h>
#include <linux/cleanup.h>
#include <linux/uuid.h>
+#include <linux/configfs.h>
#include <uapi/linux/sev-guest.h>
#include <uapi/linux/psp-sev.h>
@@ -38,6 +39,8 @@
#define SNP_REQ_MAX_RETRY_DURATION (60*HZ)
#define SNP_REQ_RETRY_DELAY (2*HZ)
+#define SVSM_MAX_RETRIES 3
+
struct snp_guest_crypto {
struct crypto_aead *tfm;
u8 *iv, *authtag;
@@ -70,8 +73,15 @@ struct snp_guest_dev {
u8 *vmpck;
};
-static u32 vmpck_id;
-module_param(vmpck_id, uint, 0444);
+/*
+ * The VMPCK ID represents the key used by the SNP guest to communicate with the
+ * SEV firmware in the AMD Secure Processor (ASP, aka PSP). By default, the key
+ * used will be the key associated with the VMPL at which the guest is running.
+ * Should the default key be wiped (see snp_disable_vmpck()), this parameter
+ * allows for using one of the remaining VMPCKs.
+ */
+static int vmpck_id = -1;
+module_param(vmpck_id, int, 0444);
MODULE_PARM_DESC(vmpck_id, "The VMPCK ID to use when communicating with the PSP.");
/* Mutex to serialize the shared buffer access and command handling. */
@@ -783,6 +793,143 @@ struct snp_msg_cert_entry {
u32 length;
};
+static int sev_svsm_report_new(struct tsm_report *report, void *data)
+{
+ unsigned int rep_len, man_len, certs_len;
+ struct tsm_desc *desc = &report->desc;
+ struct svsm_attest_call ac = {};
+ unsigned int retry_count;
+ void *rep, *man, *certs;
+ struct svsm_call call;
+ unsigned int size;
+ bool try_again;
+ void *buffer;
+ u64 call_id;
+ int ret;
+
+ /*
+ * Allocate pages for the request:
+ * - Report blob (4K)
+ * - Manifest blob (4K)
+ * - Certificate blob (16K)
+ *
+ * Above addresses must be 4K aligned
+ */
+ rep_len = SZ_4K;
+ man_len = SZ_4K;
+ certs_len = SEV_FW_BLOB_MAX_SIZE;
+
+ guard(mutex)(&snp_cmd_mutex);
+
+ if (guid_is_null(&desc->service_guid)) {
+ call_id = SVSM_ATTEST_CALL(SVSM_ATTEST_SERVICES);
+ } else {
+ export_guid(ac.service_guid, &desc->service_guid);
+ ac.service_manifest_ver = desc->service_manifest_version;
+
+ call_id = SVSM_ATTEST_CALL(SVSM_ATTEST_SINGLE_SERVICE);
+ }
+
+ retry_count = 0;
+
+retry:
+ memset(&call, 0, sizeof(call));
+
+ size = rep_len + man_len + certs_len;
+ buffer = alloc_pages_exact(size, __GFP_ZERO);
+ if (!buffer)
+ return -ENOMEM;
+
+ rep = buffer;
+ ac.report_buf.pa = __pa(rep);
+ ac.report_buf.len = rep_len;
+
+ man = rep + rep_len;
+ ac.manifest_buf.pa = __pa(man);
+ ac.manifest_buf.len = man_len;
+
+ certs = man + man_len;
+ ac.certificates_buf.pa = __pa(certs);
+ ac.certificates_buf.len = certs_len;
+
+ ac.nonce.pa = __pa(desc->inblob);
+ ac.nonce.len = desc->inblob_len;
+
+ ret = snp_issue_svsm_attest_req(call_id, &call, &ac);
+ if (ret) {
+ free_pages_exact(buffer, size);
+
+ switch (call.rax_out) {
+ case SVSM_ERR_INVALID_PARAMETER:
+ try_again = false;
+
+ if (ac.report_buf.len > rep_len) {
+ rep_len = PAGE_ALIGN(ac.report_buf.len);
+ try_again = true;
+ }
+
+ if (ac.manifest_buf.len > man_len) {
+ man_len = PAGE_ALIGN(ac.manifest_buf.len);
+ try_again = true;
+ }
+
+ if (ac.certificates_buf.len > certs_len) {
+ certs_len = PAGE_ALIGN(ac.certificates_buf.len);
+ try_again = true;
+ }
+
+ /* If one of the buffers wasn't large enough, retry the request */
+ if (try_again && retry_count < SVSM_MAX_RETRIES) {
+ retry_count++;
+ goto retry;
+ }
+
+ return -EINVAL;
+ default:
+ pr_err_ratelimited("SVSM attestation request failed (%d / 0x%llx)\n",
+ ret, call.rax_out);
+ return -EINVAL;
+ }
+ }
+
+ /*
+ * Allocate all the blob memory buffers at once so that the cleanup is
+ * done for errors that occur after the first allocation (i.e. before
+ * using no_free_ptr()).
+ */
+ rep_len = ac.report_buf.len;
+ void *rbuf __free(kvfree) = kvzalloc(rep_len, GFP_KERNEL);
+
+ man_len = ac.manifest_buf.len;
+ void *mbuf __free(kvfree) = kvzalloc(man_len, GFP_KERNEL);
+
+ certs_len = ac.certificates_buf.len;
+ void *cbuf __free(kvfree) = certs_len ? kvzalloc(certs_len, GFP_KERNEL) : NULL;
+
+ if (!rbuf || !mbuf || (certs_len && !cbuf)) {
+ free_pages_exact(buffer, size);
+ return -ENOMEM;
+ }
+
+ memcpy(rbuf, rep, rep_len);
+ report->outblob = no_free_ptr(rbuf);
+ report->outblob_len = rep_len;
+
+ memcpy(mbuf, man, man_len);
+ report->manifestblob = no_free_ptr(mbuf);
+ report->manifestblob_len = man_len;
+
+ if (certs_len) {
+ memcpy(cbuf, certs, certs_len);
+ report->auxblob = no_free_ptr(cbuf);
+ report->auxblob_len = certs_len;
+ }
+
+ free_pages_exact(buffer, size);
+
+ return 0;
+}
+
static int sev_report_new(struct tsm_report *report, void *data)
{
struct snp_msg_cert_entry *cert_table;
@@ -797,6 +944,13 @@ static int sev_report_new(struct tsm_report *report, void *data)
if (desc->inblob_len != SNP_REPORT_USER_DATA_SIZE)
return -EINVAL;
+ if (desc->service_provider) {
+ if (strcmp(desc->service_provider, "svsm"))
+ return -EINVAL;
+
+ return sev_svsm_report_new(report, data);
+ }
+
void *buf __free(kvfree) = kvzalloc(size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
@@ -885,9 +1039,42 @@ static int sev_report_new(struct tsm_report *report, void *data)
return 0;
}
-static const struct tsm_ops sev_tsm_ops = {
+static bool sev_report_attr_visible(int n)
+{
+ switch (n) {
+ case TSM_REPORT_GENERATION:
+ case TSM_REPORT_PROVIDER:
+ case TSM_REPORT_PRIVLEVEL:
+ case TSM_REPORT_PRIVLEVEL_FLOOR:
+ return true;
+ case TSM_REPORT_SERVICE_PROVIDER:
+ case TSM_REPORT_SERVICE_GUID:
+ case TSM_REPORT_SERVICE_MANIFEST_VER:
+ return snp_vmpl;
+ }
+
+ return false;
+}
+
+static bool sev_report_bin_attr_visible(int n)
+{
+ switch (n) {
+ case TSM_REPORT_INBLOB:
+ case TSM_REPORT_OUTBLOB:
+ case TSM_REPORT_AUXBLOB:
+ return true;
+ case TSM_REPORT_MANIFESTBLOB:
+ return snp_vmpl;
+ }
+
+ return false;
+}
+
+static struct tsm_ops sev_tsm_ops = {
.name = KBUILD_MODNAME,
.report_new = sev_report_new,
+ .report_attr_visible = sev_report_attr_visible,
+ .report_bin_attr_visible = sev_report_bin_attr_visible,
};
static void unregister_sev_tsm(void *data)
@@ -923,6 +1110,10 @@ static int __init sev_guest_probe(struct platform_device *pdev)
if (!snp_dev)
goto e_unmap;
+ /* Adjust the default VMPCK key based on the executing VMPL level */
+ if (vmpck_id == -1)
+ vmpck_id = snp_vmpl;
+
ret = -EINVAL;
snp_dev->vmpck = get_vmpck(vmpck_id, secrets, &snp_dev->os_area_msg_seqno);
if (!snp_dev->vmpck) {
@@ -968,7 +1159,10 @@ static int __init sev_guest_probe(struct platform_device *pdev)
snp_dev->input.resp_gpa = __pa(snp_dev->response);
snp_dev->input.data_gpa = __pa(snp_dev->certs_data);
- ret = tsm_register(&sev_tsm_ops, snp_dev, &tsm_report_extra_type);
+ /* Set the privlevel_floor attribute based on the vmpck_id */
+ sev_tsm_ops.privlevel_floor = vmpck_id;
+
+ ret = tsm_register(&sev_tsm_ops, snp_dev);
if (ret)
goto e_free_cert_data;
@@ -1009,8 +1203,13 @@ static void __exit sev_guest_remove(struct platform_device *pdev)
* This driver is meant to be a common SEV guest interface driver and to
* support any SEV guest API. As such, even though it has been introduced
* with the SEV-SNP support, it is named "sev-guest".
+ *
+ * sev_guest_remove() lives in .exit.text. For drivers registered via
+ * module_platform_driver_probe() this is ok because they cannot get unbound
+ * at runtime. So mark the driver struct with __refdata to prevent modpost
+ * triggering a section mismatch warning.
*/
-static struct platform_driver sev_guest_driver = {
+static struct platform_driver sev_guest_driver __refdata = {
.remove_new = __exit_p(sev_guest_remove),
.driver = {
.name = "sev-guest",
diff --git a/drivers/virt/coco/tdx-guest/tdx-guest.c b/drivers/virt/coco/tdx-guest/tdx-guest.c
index 1253bf76b570..2acba56ad42e 100644
--- a/drivers/virt/coco/tdx-guest/tdx-guest.c
+++ b/drivers/virt/coco/tdx-guest/tdx-guest.c
@@ -249,6 +249,28 @@ done:
return ret;
}
+static bool tdx_report_attr_visible(int n)
+{
+ switch (n) {
+ case TSM_REPORT_GENERATION:
+ case TSM_REPORT_PROVIDER:
+ return true;
+ }
+
+ return false;
+}
+
+static bool tdx_report_bin_attr_visible(int n)
+{
+ switch (n) {
+ case TSM_REPORT_INBLOB:
+ case TSM_REPORT_OUTBLOB:
+ return true;
+ }
+
+ return false;
+}
+
static long tdx_guest_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
@@ -281,6 +303,8 @@ MODULE_DEVICE_TABLE(x86cpu, tdx_guest_ids);
static const struct tsm_ops tdx_tsm_ops = {
.name = KBUILD_MODNAME,
.report_new = tdx_report_new,
+ .report_attr_visible = tdx_report_attr_visible,
+ .report_bin_attr_visible = tdx_report_bin_attr_visible,
};
static int __init tdx_guest_init(void)
@@ -301,7 +325,7 @@ static int __init tdx_guest_init(void)
goto free_misc;
}
- ret = tsm_register(&tdx_tsm_ops, NULL, NULL);
+ ret = tsm_register(&tdx_tsm_ops, NULL);
if (ret)
goto free_quote;
diff --git a/drivers/virt/coco/tsm.c b/drivers/virt/coco/tsm.c
index d1c2db83a8ca..9432d4e303f1 100644
--- a/drivers/virt/coco/tsm.c
+++ b/drivers/virt/coco/tsm.c
@@ -14,7 +14,6 @@
static struct tsm_provider {
const struct tsm_ops *ops;
- const struct config_item_type *type;
void *data;
} provider;
static DECLARE_RWSEM(tsm_rwsem);
@@ -35,7 +34,7 @@ static DECLARE_RWSEM(tsm_rwsem);
* The attestation report format is TSM provider specific, when / if a standard
* materializes that can be published instead of the vendor layout. Until then
* the 'provider' attribute indicates the format of 'outblob', and optionally
- * 'auxblob'.
+ * 'auxblob' and 'manifestblob'.
*/
struct tsm_report_state {
@@ -48,6 +47,7 @@ struct tsm_report_state {
enum tsm_data_select {
TSM_REPORT,
TSM_CERTS,
+ TSM_MANIFEST,
};
static struct tsm_report *to_tsm_report(struct config_item *cfg)
@@ -119,6 +119,74 @@ static ssize_t tsm_report_privlevel_floor_show(struct config_item *cfg,
}
CONFIGFS_ATTR_RO(tsm_report_, privlevel_floor);
+static ssize_t tsm_report_service_provider_store(struct config_item *cfg,
+ const char *buf, size_t len)
+{
+ struct tsm_report *report = to_tsm_report(cfg);
+ size_t sp_len;
+ char *sp;
+ int rc;
+
+ guard(rwsem_write)(&tsm_rwsem);
+ rc = try_advance_write_generation(report);
+ if (rc)
+ return rc;
+
+ sp_len = (buf[len - 1] != '\n') ? len : len - 1;
+
+ sp = kstrndup(buf, sp_len, GFP_KERNEL);
+ if (!sp)
+ return -ENOMEM;
+ kfree(report->desc.service_provider);
+
+ report->desc.service_provider = sp;
+
+ return len;
+}
+CONFIGFS_ATTR_WO(tsm_report_, service_provider);
+
+static ssize_t tsm_report_service_guid_store(struct config_item *cfg,
+ const char *buf, size_t len)
+{
+ struct tsm_report *report = to_tsm_report(cfg);
+ int rc;
+
+ guard(rwsem_write)(&tsm_rwsem);
+ rc = try_advance_write_generation(report);
+ if (rc)
+ return rc;
+
+ report->desc.service_guid = guid_null;
+
+ rc = guid_parse(buf, &report->desc.service_guid);
+ if (rc)
+ return rc;
+
+ return len;
+}
+CONFIGFS_ATTR_WO(tsm_report_, service_guid);
+
+static ssize_t tsm_report_service_manifest_version_store(struct config_item *cfg,
+ const char *buf, size_t len)
+{
+ struct tsm_report *report = to_tsm_report(cfg);
+ unsigned int val;
+ int rc;
+
+ rc = kstrtouint(buf, 0, &val);
+ if (rc)
+ return rc;
+
+ guard(rwsem_write)(&tsm_rwsem);
+ rc = try_advance_write_generation(report);
+ if (rc)
+ return rc;
+ report->desc.service_manifest_version = val;
+
+ return len;
+}
+CONFIGFS_ATTR_WO(tsm_report_, service_manifest_version);
+
static ssize_t tsm_report_inblob_write(struct config_item *cfg,
const void *buf, size_t count)
{
@@ -163,6 +231,9 @@ static ssize_t __read_report(struct tsm_report *report, void *buf, size_t count,
if (select == TSM_REPORT) {
out = report->outblob;
len = report->outblob_len;
+ } else if (select == TSM_MANIFEST) {
+ out = report->manifestblob;
+ len = report->manifestblob_len;
} else {
out = report->auxblob;
len = report->auxblob_len;
@@ -188,7 +259,7 @@ static ssize_t read_cached_report(struct tsm_report *report, void *buf,
/*
* A given TSM backend always fills in ->outblob regardless of
- * whether the report includes an auxblob or not.
+ * whether the report includes an auxblob/manifestblob or not.
*/
if (!report->outblob ||
state->read_generation != state->write_generation)
@@ -224,8 +295,10 @@ static ssize_t tsm_report_read(struct tsm_report *report, void *buf,
kvfree(report->outblob);
kvfree(report->auxblob);
+ kvfree(report->manifestblob);
report->outblob = NULL;
report->auxblob = NULL;
+ report->manifestblob = NULL;
rc = ops->report_new(report, provider.data);
if (rc < 0)
return rc;
@@ -252,34 +325,31 @@ static ssize_t tsm_report_auxblob_read(struct config_item *cfg, void *buf,
}
CONFIGFS_BIN_ATTR_RO(tsm_report_, auxblob, NULL, TSM_OUTBLOB_MAX);
-#define TSM_DEFAULT_ATTRS() \
- &tsm_report_attr_generation, \
- &tsm_report_attr_provider
+static ssize_t tsm_report_manifestblob_read(struct config_item *cfg, void *buf,
+ size_t count)
+{
+ struct tsm_report *report = to_tsm_report(cfg);
-static struct configfs_attribute *tsm_report_attrs[] = {
- TSM_DEFAULT_ATTRS(),
- NULL,
-};
+ return tsm_report_read(report, buf, count, TSM_MANIFEST);
+}
+CONFIGFS_BIN_ATTR_RO(tsm_report_, manifestblob, NULL, TSM_OUTBLOB_MAX);
-static struct configfs_attribute *tsm_report_extra_attrs[] = {
- TSM_DEFAULT_ATTRS(),
- &tsm_report_attr_privlevel,
- &tsm_report_attr_privlevel_floor,
+static struct configfs_attribute *tsm_report_attrs[] = {
+ [TSM_REPORT_GENERATION] = &tsm_report_attr_generation,
+ [TSM_REPORT_PROVIDER] = &tsm_report_attr_provider,
+ [TSM_REPORT_PRIVLEVEL] = &tsm_report_attr_privlevel,
+ [TSM_REPORT_PRIVLEVEL_FLOOR] = &tsm_report_attr_privlevel_floor,
+ [TSM_REPORT_SERVICE_PROVIDER] = &tsm_report_attr_service_provider,
+ [TSM_REPORT_SERVICE_GUID] = &tsm_report_attr_service_guid,
+ [TSM_REPORT_SERVICE_MANIFEST_VER] = &tsm_report_attr_service_manifest_version,
NULL,
};
-#define TSM_DEFAULT_BIN_ATTRS() \
- &tsm_report_attr_inblob, \
- &tsm_report_attr_outblob
-
static struct configfs_bin_attribute *tsm_report_bin_attrs[] = {
- TSM_DEFAULT_BIN_ATTRS(),
- NULL,
-};
-
-static struct configfs_bin_attribute *tsm_report_bin_extra_attrs[] = {
- TSM_DEFAULT_BIN_ATTRS(),
- &tsm_report_attr_auxblob,
+ [TSM_REPORT_INBLOB] = &tsm_report_attr_inblob,
+ [TSM_REPORT_OUTBLOB] = &tsm_report_attr_outblob,
+ [TSM_REPORT_AUXBLOB] = &tsm_report_attr_auxblob,
+ [TSM_REPORT_MANIFESTBLOB] = &tsm_report_attr_manifestblob,
NULL,
};
@@ -288,8 +358,10 @@ static void tsm_report_item_release(struct config_item *cfg)
struct tsm_report *report = to_tsm_report(cfg);
struct tsm_report_state *state = to_state(report);
+ kvfree(report->manifestblob);
kvfree(report->auxblob);
kvfree(report->outblob);
+ kfree(report->desc.service_provider);
kfree(state);
}
@@ -297,21 +369,44 @@ static struct configfs_item_operations tsm_report_item_ops = {
.release = tsm_report_item_release,
};
-const struct config_item_type tsm_report_default_type = {
- .ct_owner = THIS_MODULE,
- .ct_bin_attrs = tsm_report_bin_attrs,
- .ct_attrs = tsm_report_attrs,
- .ct_item_ops = &tsm_report_item_ops,
+static bool tsm_report_is_visible(struct config_item *item,
+ struct configfs_attribute *attr, int n)
+{
+ guard(rwsem_read)(&tsm_rwsem);
+ if (!provider.ops)
+ return false;
+
+ if (!provider.ops->report_attr_visible)
+ return true;
+
+ return provider.ops->report_attr_visible(n);
+}
+
+static bool tsm_report_is_bin_visible(struct config_item *item,
+ struct configfs_bin_attribute *attr, int n)
+{
+ guard(rwsem_read)(&tsm_rwsem);
+ if (!provider.ops)
+ return false;
+
+ if (!provider.ops->report_bin_attr_visible)
+ return true;
+
+ return provider.ops->report_bin_attr_visible(n);
+}
+
+static struct configfs_group_operations tsm_report_attr_group_ops = {
+ .is_visible = tsm_report_is_visible,
+ .is_bin_visible = tsm_report_is_bin_visible,
};
-EXPORT_SYMBOL_GPL(tsm_report_default_type);
-const struct config_item_type tsm_report_extra_type = {
+static const struct config_item_type tsm_report_type = {
.ct_owner = THIS_MODULE,
- .ct_bin_attrs = tsm_report_bin_extra_attrs,
- .ct_attrs = tsm_report_extra_attrs,
+ .ct_bin_attrs = tsm_report_bin_attrs,
+ .ct_attrs = tsm_report_attrs,
.ct_item_ops = &tsm_report_item_ops,
+ .ct_group_ops = &tsm_report_attr_group_ops,
};
-EXPORT_SYMBOL_GPL(tsm_report_extra_type);
static struct config_item *tsm_report_make_item(struct config_group *group,
const char *name)
@@ -326,7 +421,7 @@ static struct config_item *tsm_report_make_item(struct config_group *group,
if (!state)
return ERR_PTR(-ENOMEM);
- config_item_init_type_name(&state->cfg, name, provider.type);
+ config_item_init_type_name(&state->cfg, name, &tsm_report_type);
return &state->cfg;
}
@@ -353,16 +448,10 @@ static struct configfs_subsystem tsm_configfs = {
.su_mutex = __MUTEX_INITIALIZER(tsm_configfs.su_mutex),
};
-int tsm_register(const struct tsm_ops *ops, void *priv,
- const struct config_item_type *type)
+int tsm_register(const struct tsm_ops *ops, void *priv)
{
const struct tsm_ops *conflict;
- if (!type)
- type = &tsm_report_default_type;
- if (!(type == &tsm_report_default_type || type == &tsm_report_extra_type))
- return -EINVAL;
-
guard(rwsem_write)(&tsm_rwsem);
conflict = provider.ops;
if (conflict) {
@@ -372,7 +461,6 @@ int tsm_register(const struct tsm_ops *ops, void *priv,
provider.ops = ops;
provider.data = priv;
- provider.type = type;
return 0;
}
EXPORT_SYMBOL_GPL(tsm_register);
@@ -384,7 +472,6 @@ int tsm_unregister(const struct tsm_ops *ops)
return -EBUSY;
provider.ops = NULL;
provider.data = NULL;
- provider.type = NULL;
return 0;
}
EXPORT_SYMBOL_GPL(tsm_unregister);
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 85eea38dbdf4..c44918768a97 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -181,6 +181,19 @@ config BD957XMUF_WATCHDOG
watchdog. Alternatively say M to compile the driver as a module,
which will be called bd9576_wdt.
+config BD96801_WATCHDOG
+ tristate "ROHM BD96801 PMIC Watchdog"
+ depends on MFD_ROHM_BD96801
+ select WATCHDOG_CORE
+ help
+ Support for the watchdog in the ROHM BD96801 PMIC. Watchdog can be
+ configured to only generate IRQ or to trigger system reset via reset
+ pin.
+
+ Say Y here to include support for the ROHM BD96801 watchdog.
+ Alternatively say M to compile the driver as a module,
+ which will be called bd96801_wdt.
+
config CROS_EC_WATCHDOG
tristate "ChromeOS EC-based watchdog"
select WATCHDOG_CORE
@@ -257,6 +270,7 @@ config GPIO_WATCHDOG_ARCH_INITCALL
config LENOVO_SE10_WDT
tristate "Lenovo SE10 Watchdog"
depends on (X86 && DMI) || COMPILE_TEST
+ depends on HAS_IOPORT
select WATCHDOG_CORE
help
If you say yes here you get support for the watchdog
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index 2d1117564f5b..b51030f035a6 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -218,6 +218,7 @@ obj-$(CONFIG_XEN_WDT) += xen_wdt.o
# Architecture Independent
obj-$(CONFIG_BD957XMUF_WATCHDOG) += bd9576_wdt.o
+obj-$(CONFIG_BD96801_WATCHDOG) += bd96801_wdt.o
obj-$(CONFIG_CROS_EC_WATCHDOG) += cros_ec_wdt.o
obj-$(CONFIG_DA9052_WATCHDOG) += da9052_wdt.o
obj-$(CONFIG_DA9055_WATCHDOG) += da9055_wdt.o
diff --git a/drivers/watchdog/bd96801_wdt.c b/drivers/watchdog/bd96801_wdt.c
new file mode 100644
index 000000000000..12b74fd2bc05
--- /dev/null
+++ b/drivers/watchdog/bd96801_wdt.c
@@ -0,0 +1,417 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2024 ROHM Semiconductors
+ *
+ * ROHM BD96801 watchdog driver
+ */
+
+#include <linux/bitfield.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mfd/rohm-bd96801.h>
+#include <linux/mfd/rohm-generic.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/reboot.h>
+#include <linux/regmap.h>
+#include <linux/watchdog.h>
+
+static bool nowayout;
+module_param(nowayout, bool, 0);
+MODULE_PARM_DESC(nowayout,
+ "Watchdog cannot be stopped once started (default=\"false\")");
+
+#define BD96801_WD_TMO_SHORT_MASK 0x70
+#define BD96801_WD_RATIO_MASK 0x3
+#define BD96801_WD_TYPE_MASK 0x4
+#define BD96801_WD_TYPE_SLOW 0x4
+#define BD96801_WD_TYPE_WIN 0x0
+
+#define BD96801_WD_EN_MASK 0x3
+#define BD96801_WD_IF_EN 0x1
+#define BD96801_WD_QA_EN 0x2
+#define BD96801_WD_DISABLE 0x0
+
+#define BD96801_WD_ASSERT_MASK 0x8
+#define BD96801_WD_ASSERT_RST 0x8
+#define BD96801_WD_ASSERT_IRQ 0x0
+
+#define BD96801_WD_FEED_MASK 0x1
+#define BD96801_WD_FEED 0x1
+
+/* 1.1 mS */
+#define FASTNG_MIN 11
+#define FASTNG_MAX_US (100 * FASTNG_MIN << 7)
+#define SLOWNG_MAX_US (16 * FASTNG_MAX_US)
+
+#define BD96801_WDT_DEFAULT_MARGIN_MS 1843
+/* Unit is seconds */
+#define DEFAULT_TIMEOUT 30
+
+/*
+ * BD96801 WDG supports window mode so the TMO consists of SHORT and LONG
+ * timeout values. SHORT time is meaningful only in window mode where feeding
+ * period shorter than SHORT would be an error. LONG time is used to detect if
+ * feeding is not occurring within given time limit (SoC SW hangs). The LONG
+ * timeout time is a multiple of (2, 4, 8 or 16 times) the SHORT timeout.
+ */
+
+struct wdtbd96801 {
+ struct device *dev;
+ struct regmap *regmap;
+ struct watchdog_device wdt;
+};
+
+static int bd96801_wdt_ping(struct watchdog_device *wdt)
+{
+ struct wdtbd96801 *w = watchdog_get_drvdata(wdt);
+
+ return regmap_update_bits(w->regmap, BD96801_REG_WD_FEED,
+ BD96801_WD_FEED_MASK, BD96801_WD_FEED);
+}
+
+static int bd96801_wdt_start(struct watchdog_device *wdt)
+{
+ struct wdtbd96801 *w = watchdog_get_drvdata(wdt);
+
+ return regmap_update_bits(w->regmap, BD96801_REG_WD_CONF,
+ BD96801_WD_EN_MASK, BD96801_WD_IF_EN);
+}
+
+static int bd96801_wdt_stop(struct watchdog_device *wdt)
+{
+ struct wdtbd96801 *w = watchdog_get_drvdata(wdt);
+
+ return regmap_update_bits(w->regmap, BD96801_REG_WD_CONF,
+ BD96801_WD_EN_MASK, BD96801_WD_DISABLE);
+}
+
+static const struct watchdog_info bd96801_wdt_info = {
+ .options = WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING |
+ WDIOF_SETTIMEOUT,
+ .identity = "BD96801 Watchdog",
+};
+
+static const struct watchdog_ops bd96801_wdt_ops = {
+ .start = bd96801_wdt_start,
+ .stop = bd96801_wdt_stop,
+ .ping = bd96801_wdt_ping,
+};
+
+static int find_closest_fast(unsigned int target, int *sel, unsigned int *val)
+{
+ unsigned int window = FASTNG_MIN;
+ int i;
+
+ for (i = 0; i < 8 && window < target; i++)
+ window <<= 1;
+
+ if (i == 8)
+ return -EINVAL;
+
+ *val = window;
+ *sel = i;
+
+ return 0;
+}
+
+static int find_closest_slow_by_fast(unsigned int fast_val, unsigned int *target,
+ int *slowsel)
+{
+ static const int multipliers[] = {2, 4, 8, 16};
+ int sel;
+
+ for (sel = 0; sel < ARRAY_SIZE(multipliers) &&
+ multipliers[sel] * fast_val < *target; sel++)
+ ;
+
+ if (sel == ARRAY_SIZE(multipliers))
+ return -EINVAL;
+
+ *slowsel = sel;
+ *target = multipliers[sel] * fast_val;
+
+ return 0;
+}
+
+static int find_closest_slow(unsigned int *target, int *slow_sel, int *fast_sel)
+{
+ static const int multipliers[] = {2, 4, 8, 16};
+ unsigned int window = FASTNG_MIN;
+ unsigned int val = 0;
+ int i, j;
+
+ for (i = 0; i < 8; i++) {
+ for (j = 0; j < ARRAY_SIZE(multipliers); j++) {
+ unsigned int slow;
+
+ slow = window * multipliers[j];
+ if (slow >= *target && (!val || slow < val)) {
+ val = slow;
+ *fast_sel = i;
+ *slow_sel = j;
+ }
+ }
+ window <<= 1;
+ }
+ if (!val)
+ return -EINVAL;
+
+ *target = val;
+
+ return 0;
+}
+
+static int bd96801_set_wdt_mode(struct wdtbd96801 *w, unsigned int hw_margin,
+ unsigned int hw_margin_min)
+{
+ int fastng, slowng, type, ret, reg, mask;
+ struct device *dev = w->dev;
+
+
+ if (hw_margin_min * 1000 > FASTNG_MAX_US) {
+ dev_err(dev, "Unsupported fast timeout %u uS [max %u]\n",
+ hw_margin_min * 1000, FASTNG_MAX_US);
+
+ return -EINVAL;
+ }
+
+ if (hw_margin * 1000 > SLOWNG_MAX_US) {
+ dev_err(dev, "Unsupported slow timeout %u uS [max %u]\n",
+ hw_margin * 1000, SLOWNG_MAX_US);
+
+ return -EINVAL;
+ }
+
+ /*
+ * Convert to 100uS to guarantee reasonable timeouts fit in
+ * 32bit maintaining also a decent accuracy.
+ */
+ hw_margin *= 10;
+ hw_margin_min *= 10;
+
+ if (hw_margin_min) {
+ unsigned int min;
+
+ type = BD96801_WD_TYPE_WIN;
+ dev_dbg(dev, "Setting type WINDOW 0x%x\n", type);
+ ret = find_closest_fast(hw_margin_min, &fastng, &min);
+ if (ret)
+ return ret;
+
+ ret = find_closest_slow_by_fast(min, &hw_margin, &slowng);
+ if (ret) {
+ dev_err(dev,
+ "can't support slow timeout %u uS using fast %u uS. [max slow %u uS]\n",
+ hw_margin * 100, min * 100, min * 100 * 16);
+
+ return ret;
+ }
+ w->wdt.min_hw_heartbeat_ms = min / 10;
+ } else {
+ type = BD96801_WD_TYPE_SLOW;
+ dev_dbg(dev, "Setting type SLOW 0x%x\n", type);
+ ret = find_closest_slow(&hw_margin, &slowng, &fastng);
+ if (ret)
+ return ret;
+ }
+
+ w->wdt.max_hw_heartbeat_ms = hw_margin / 10;
+
+ fastng = FIELD_PREP(BD96801_WD_TMO_SHORT_MASK, fastng);
+
+ reg = slowng | fastng;
+ mask = BD96801_WD_RATIO_MASK | BD96801_WD_TMO_SHORT_MASK;
+ ret = regmap_update_bits(w->regmap, BD96801_REG_WD_TMO,
+ mask, reg);
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(w->regmap, BD96801_REG_WD_CONF,
+ BD96801_WD_TYPE_MASK, type);
+
+ return ret;
+}
+
+static int bd96801_set_heartbeat_from_hw(struct wdtbd96801 *w,
+ unsigned int conf_reg)
+{
+ int ret;
+ unsigned int val, sel, fast;
+
+ /*
+ * The BD96801 supports a somewhat peculiar QA-mode, which we do not
+ * support in this driver. If the QA-mode is enabled then we just
+ * warn and bail-out.
+ */
+ if ((conf_reg & BD96801_WD_EN_MASK) != BD96801_WD_IF_EN) {
+ dev_err(w->dev, "watchdog set to Q&A mode - exiting\n");
+ return -EINVAL;
+ }
+
+ ret = regmap_read(w->regmap, BD96801_REG_WD_TMO, &val);
+ if (ret)
+ return ret;
+
+ sel = FIELD_GET(BD96801_WD_TMO_SHORT_MASK, val);
+ fast = FASTNG_MIN << sel;
+
+ sel = (val & BD96801_WD_RATIO_MASK) + 1;
+ w->wdt.max_hw_heartbeat_ms = (fast << sel) / USEC_PER_MSEC;
+
+ if ((conf_reg & BD96801_WD_TYPE_MASK) == BD96801_WD_TYPE_WIN)
+ w->wdt.min_hw_heartbeat_ms = fast / USEC_PER_MSEC;
+
+ return 0;
+}
+
+static int init_wdg_hw(struct wdtbd96801 *w)
+{
+ u32 hw_margin[2];
+ int count, ret;
+ u32 hw_margin_max = BD96801_WDT_DEFAULT_MARGIN_MS, hw_margin_min = 0;
+
+ count = device_property_count_u32(w->dev->parent, "rohm,hw-timeout-ms");
+ if (count < 0 && count != -EINVAL)
+ return count;
+
+ if (count > 0) {
+ if (count > ARRAY_SIZE(hw_margin))
+ return -EINVAL;
+
+ ret = device_property_read_u32_array(w->dev->parent,
+ "rohm,hw-timeout-ms",
+ &hw_margin[0], count);
+ if (ret < 0)
+ return ret;
+
+ if (count == 1)
+ hw_margin_max = hw_margin[0];
+
+ if (count == 2) {
+ if (hw_margin[1] > hw_margin[0]) {
+ hw_margin_max = hw_margin[1];
+ hw_margin_min = hw_margin[0];
+ } else {
+ hw_margin_max = hw_margin[0];
+ hw_margin_min = hw_margin[1];
+ }
+ }
+ }
+
+ ret = bd96801_set_wdt_mode(w, hw_margin_max, hw_margin_min);
+ if (ret)
+ return ret;
+
+ ret = device_property_match_string(w->dev->parent, "rohm,wdg-action",
+ "prstb");
+ if (ret >= 0) {
+ ret = regmap_update_bits(w->regmap, BD96801_REG_WD_CONF,
+ BD96801_WD_ASSERT_MASK,
+ BD96801_WD_ASSERT_RST);
+ return ret;
+ }
+
+ ret = device_property_match_string(w->dev->parent, "rohm,wdg-action",
+ "intb-only");
+ if (ret >= 0) {
+ ret = regmap_update_bits(w->regmap, BD96801_REG_WD_CONF,
+ BD96801_WD_ASSERT_MASK,
+ BD96801_WD_ASSERT_IRQ);
+ return ret;
+ }
+
+ return 0;
+}
+
+static irqreturn_t bd96801_irq_hnd(int irq, void *data)
+{
+ emergency_restart();
+
+ return IRQ_NONE;
+}
+
+static int bd96801_wdt_probe(struct platform_device *pdev)
+{
+ struct wdtbd96801 *w;
+ int ret, irq;
+ unsigned int val;
+
+ w = devm_kzalloc(&pdev->dev, sizeof(*w), GFP_KERNEL);
+ if (!w)
+ return -ENOMEM;
+
+ w->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ w->dev = &pdev->dev;
+
+ w->wdt.info = &bd96801_wdt_info;
+ w->wdt.ops = &bd96801_wdt_ops;
+ w->wdt.parent = pdev->dev.parent;
+ w->wdt.timeout = DEFAULT_TIMEOUT;
+ watchdog_set_drvdata(&w->wdt, w);
+
+ ret = regmap_read(w->regmap, BD96801_REG_WD_CONF, &val);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "Failed to get the watchdog state\n");
+
+ /*
+ * If the WDG is already enabled we assume it is configured by boot.
+ * In this case we just update the hw-timeout based on values set to
+ * the timeout / mode registers and leave the hardware configs
+ * untouched.
+ */
+ if ((val & BD96801_WD_EN_MASK) != BD96801_WD_DISABLE) {
+ dev_dbg(&pdev->dev, "watchdog was running during probe\n");
+ ret = bd96801_set_heartbeat_from_hw(w, val);
+ if (ret)
+ return ret;
+
+ set_bit(WDOG_HW_RUNNING, &w->wdt.status);
+ } else {
+ /* If WDG is not running so we will initializate it */
+ ret = init_wdg_hw(w);
+ if (ret)
+ return ret;
+ }
+
+ dev_dbg(w->dev, "heartbeat set to %u - %u\n",
+ w->wdt.min_hw_heartbeat_ms, w->wdt.max_hw_heartbeat_ms);
+
+ watchdog_init_timeout(&w->wdt, 0, pdev->dev.parent);
+ watchdog_set_nowayout(&w->wdt, nowayout);
+ watchdog_stop_on_reboot(&w->wdt);
+
+ irq = platform_get_irq_byname(pdev, "bd96801-wdg");
+ if (irq > 0) {
+ ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ bd96801_irq_hnd,
+ IRQF_ONESHOT, "bd96801-wdg",
+ NULL);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "Failed to register IRQ\n");
+ }
+
+ return devm_watchdog_register_device(&pdev->dev, &w->wdt);
+}
+
+static const struct platform_device_id bd96801_wdt_id[] = {
+ { "bd96801-wdt", },
+ { }
+};
+MODULE_DEVICE_TABLE(platform, bd96801_wdt_id);
+
+static struct platform_driver bd96801_wdt = {
+ .driver = {
+ .name = "bd96801-wdt"
+ },
+ .probe = bd96801_wdt_probe,
+ .id_table = bd96801_wdt_id,
+};
+module_platform_driver(bd96801_wdt);
+
+MODULE_AUTHOR("Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>");
+MODULE_DESCRIPTION("BD96801 watchdog driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/watchdog/menz69_wdt.c b/drivers/watchdog/menz69_wdt.c
index c7de30270043..0508a65acfa6 100644
--- a/drivers/watchdog/menz69_wdt.c
+++ b/drivers/watchdog/menz69_wdt.c
@@ -161,6 +161,7 @@ static struct mcb_driver men_z069_driver = {
module_mcb_driver(men_z069_driver);
MODULE_AUTHOR("Johannes Thumshirn <jth@kernel.org>");
+MODULE_DESCRIPTION("Watchdog driver for the MEN z069 IP-Core");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("mcb:16z069");
MODULE_IMPORT_NS(MCB);
diff --git a/drivers/watchdog/omap_wdt.c b/drivers/watchdog/omap_wdt.c
index a7a12f2fe9de..b6e0236509bb 100644
--- a/drivers/watchdog/omap_wdt.c
+++ b/drivers/watchdog/omap_wdt.c
@@ -370,5 +370,6 @@ static struct platform_driver omap_wdt_driver = {
module_platform_driver(omap_wdt_driver);
MODULE_AUTHOR("George G. Davis");
+MODULE_DESCRIPTION("Driver for the TI OMAP 16xx/24xx/34xx 32KHz (non-secure) watchdog");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:omap_wdt");
diff --git a/drivers/watchdog/simatic-ipc-wdt.c b/drivers/watchdog/simatic-ipc-wdt.c
index cdc1a2e15180..1e91f0a560ff 100644
--- a/drivers/watchdog/simatic-ipc-wdt.c
+++ b/drivers/watchdog/simatic-ipc-wdt.c
@@ -227,6 +227,7 @@ static struct platform_driver simatic_ipc_wdt_driver = {
module_platform_driver(simatic_ipc_wdt_driver);
+MODULE_DESCRIPTION("Siemens SIMATIC IPC driver for Watchdogs");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:" KBUILD_MODNAME);
MODULE_AUTHOR("Gerd Haeussler <gerd.haeussler.ext@siemens.com>");
diff --git a/drivers/watchdog/ts4800_wdt.c b/drivers/watchdog/ts4800_wdt.c
index 0099403f4992..24b1ad52102e 100644
--- a/drivers/watchdog/ts4800_wdt.c
+++ b/drivers/watchdog/ts4800_wdt.c
@@ -200,5 +200,6 @@ static struct platform_driver ts4800_wdt_driver = {
module_platform_driver(ts4800_wdt_driver);
MODULE_AUTHOR("Damien Riegel <damien.riegel@savoirfairelinux.com>");
+MODULE_DESCRIPTION("Watchdog driver for TS-4800 based boards");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:ts4800_wdt");
diff --git a/drivers/watchdog/twl4030_wdt.c b/drivers/watchdog/twl4030_wdt.c
index 09d17e20f4a7..8c80d04811e4 100644
--- a/drivers/watchdog/twl4030_wdt.c
+++ b/drivers/watchdog/twl4030_wdt.c
@@ -118,6 +118,7 @@ static struct platform_driver twl4030_wdt_driver = {
module_platform_driver(twl4030_wdt_driver);
MODULE_AUTHOR("Nokia Corporation");
+MODULE_DESCRIPTION("TWL4030 Watchdog");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:twl4030_wdt");
diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c
index f6a2216c2c87..9b7fcc7dbb38 100644
--- a/drivers/xen/evtchn.c
+++ b/drivers/xen/evtchn.c
@@ -729,4 +729,5 @@ static void __exit evtchn_cleanup(void)
module_init(evtchn_init);
module_exit(evtchn_cleanup);
+MODULE_DESCRIPTION("Xen /dev/xen/evtchn device driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index c16df629907e..b4b4ebed68da 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -208,7 +208,7 @@ static void do_reboot(void)
orderly_reboot();
}
-static struct shutdown_handler shutdown_handlers[] = {
+static const struct shutdown_handler shutdown_handlers[] = {
{ "poweroff", true, do_poweroff },
{ "halt", false, do_poweroff },
{ "reboot", true, do_reboot },
diff --git a/drivers/xen/privcmd-buf.c b/drivers/xen/privcmd-buf.c
index 2fa10ca5be14..0f0dad427d7e 100644
--- a/drivers/xen/privcmd-buf.c
+++ b/drivers/xen/privcmd-buf.c
@@ -19,6 +19,7 @@
#include "privcmd.h"
+MODULE_DESCRIPTION("Xen Mmap of hypercall buffers");
MODULE_LICENSE("GPL");
struct privcmd_buf_private {
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index 67dfa4778864..9563650dfbaf 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -17,6 +17,7 @@
#include <linux/poll.h>
#include <linux/sched.h>
#include <linux/slab.h>
+#include <linux/srcu.h>
#include <linux/string.h>
#include <linux/workqueue.h>
#include <linux/errno.h>
@@ -48,6 +49,7 @@
#include "privcmd.h"
+MODULE_DESCRIPTION("Xen hypercall passthrough driver");
MODULE_LICENSE("GPL");
#define PRIV_VMA_LOCKED ((void *)1)
@@ -845,7 +847,8 @@ out:
#ifdef CONFIG_XEN_PRIVCMD_EVENTFD
/* Irqfd support */
static struct workqueue_struct *irqfd_cleanup_wq;
-static DEFINE_MUTEX(irqfds_lock);
+static DEFINE_SPINLOCK(irqfds_lock);
+DEFINE_STATIC_SRCU(irqfds_srcu);
static LIST_HEAD(irqfds_list);
struct privcmd_kernel_irqfd {
@@ -873,6 +876,9 @@ static void irqfd_shutdown(struct work_struct *work)
container_of(work, struct privcmd_kernel_irqfd, shutdown);
u64 cnt;
+ /* Make sure irqfd has been initialized in assign path */
+ synchronize_srcu(&irqfds_srcu);
+
eventfd_ctx_remove_wait_queue(kirqfd->eventfd, &kirqfd->wait, &cnt);
eventfd_ctx_put(kirqfd->eventfd);
kfree(kirqfd);
@@ -909,9 +915,11 @@ irqfd_wakeup(wait_queue_entry_t *wait, unsigned int mode, int sync, void *key)
irqfd_inject(kirqfd);
if (flags & EPOLLHUP) {
- mutex_lock(&irqfds_lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&irqfds_lock, flags);
irqfd_deactivate(kirqfd);
- mutex_unlock(&irqfds_lock);
+ spin_unlock_irqrestore(&irqfds_lock, flags);
}
return 0;
@@ -929,10 +937,11 @@ irqfd_poll_func(struct file *file, wait_queue_head_t *wqh, poll_table *pt)
static int privcmd_irqfd_assign(struct privcmd_irqfd *irqfd)
{
struct privcmd_kernel_irqfd *kirqfd, *tmp;
+ unsigned long flags;
__poll_t events;
struct fd f;
void *dm_op;
- int ret;
+ int ret, idx;
kirqfd = kzalloc(sizeof(*kirqfd) + irqfd->size, GFP_KERNEL);
if (!kirqfd)
@@ -968,18 +977,19 @@ static int privcmd_irqfd_assign(struct privcmd_irqfd *irqfd)
init_waitqueue_func_entry(&kirqfd->wait, irqfd_wakeup);
init_poll_funcptr(&kirqfd->pt, irqfd_poll_func);
- mutex_lock(&irqfds_lock);
+ spin_lock_irqsave(&irqfds_lock, flags);
list_for_each_entry(tmp, &irqfds_list, list) {
if (kirqfd->eventfd == tmp->eventfd) {
ret = -EBUSY;
- mutex_unlock(&irqfds_lock);
+ spin_unlock_irqrestore(&irqfds_lock, flags);
goto error_eventfd;
}
}
+ idx = srcu_read_lock(&irqfds_srcu);
list_add_tail(&kirqfd->list, &irqfds_list);
- mutex_unlock(&irqfds_lock);
+ spin_unlock_irqrestore(&irqfds_lock, flags);
/*
* Check if there was an event already pending on the eventfd before we
@@ -989,6 +999,8 @@ static int privcmd_irqfd_assign(struct privcmd_irqfd *irqfd)
if (events & EPOLLIN)
irqfd_inject(kirqfd);
+ srcu_read_unlock(&irqfds_srcu, idx);
+
/*
* Do not drop the file until the kirqfd is fully initialized, otherwise
* we might race against the EPOLLHUP.
@@ -1011,12 +1023,13 @@ static int privcmd_irqfd_deassign(struct privcmd_irqfd *irqfd)
{
struct privcmd_kernel_irqfd *kirqfd;
struct eventfd_ctx *eventfd;
+ unsigned long flags;
eventfd = eventfd_ctx_fdget(irqfd->fd);
if (IS_ERR(eventfd))
return PTR_ERR(eventfd);
- mutex_lock(&irqfds_lock);
+ spin_lock_irqsave(&irqfds_lock, flags);
list_for_each_entry(kirqfd, &irqfds_list, list) {
if (kirqfd->eventfd == eventfd) {
@@ -1025,7 +1038,7 @@ static int privcmd_irqfd_deassign(struct privcmd_irqfd *irqfd)
}
}
- mutex_unlock(&irqfds_lock);
+ spin_unlock_irqrestore(&irqfds_lock, flags);
eventfd_ctx_put(eventfd);
@@ -1073,13 +1086,14 @@ static int privcmd_irqfd_init(void)
static void privcmd_irqfd_exit(void)
{
struct privcmd_kernel_irqfd *kirqfd, *tmp;
+ unsigned long flags;
- mutex_lock(&irqfds_lock);
+ spin_lock_irqsave(&irqfds_lock, flags);
list_for_each_entry_safe(kirqfd, tmp, &irqfds_list, list)
irqfd_deactivate(kirqfd);
- mutex_unlock(&irqfds_lock);
+ spin_unlock_irqrestore(&irqfds_lock, flags);
destroy_workqueue(irqfd_cleanup_wq);
}
diff --git a/drivers/xen/xen-pciback/pci_stub.c b/drivers/xen/xen-pciback/pci_stub.c
index e34b623e4b41..4faebbb84999 100644
--- a/drivers/xen/xen-pciback/pci_stub.c
+++ b/drivers/xen/xen-pciback/pci_stub.c
@@ -1708,5 +1708,6 @@ static void __exit xen_pcibk_cleanup(void)
module_init(xen_pcibk_init);
module_exit(xen_pcibk_cleanup);
+MODULE_DESCRIPTION("Xen PCI-device stub driver");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_ALIAS("xen-backend:pci");
diff --git a/drivers/zorro/zorro.c b/drivers/zorro/zorro.c
index 2196474ce6ef..4e23d53d269e 100644
--- a/drivers/zorro/zorro.c
+++ b/drivers/zorro/zorro.c
@@ -18,6 +18,7 @@
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <asm/byteorder.h>
#include <asm/setup.h>
@@ -152,7 +153,7 @@ static int __init amiga_zorro_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, bus);
pr_info("Zorro: Probing AutoConfig expansion devices: %u device%s\n",
- zorro_num_autocon, zorro_num_autocon == 1 ? "" : "s");
+ zorro_num_autocon, str_plural(zorro_num_autocon));
/* First identify all devices ... */
for (i = 0; i < zorro_num_autocon; i++) {
diff --git a/fs/9p/vfs_dentry.c b/fs/9p/vfs_dentry.c
index f16f73581634..01338d4c2d9e 100644
--- a/fs/9p/vfs_dentry.c
+++ b/fs/9p/vfs_dentry.c
@@ -48,12 +48,17 @@ static int v9fs_cached_dentry_delete(const struct dentry *dentry)
static void v9fs_dentry_release(struct dentry *dentry)
{
struct hlist_node *p, *n;
+ struct hlist_head head;
p9_debug(P9_DEBUG_VFS, " dentry: %pd (%p)\n",
dentry, dentry);
- hlist_for_each_safe(p, n, (struct hlist_head *)&dentry->d_fsdata)
+
+ spin_lock(&dentry->d_lock);
+ hlist_move_list((struct hlist_head *)&dentry->d_fsdata, &head);
+ spin_unlock(&dentry->d_lock);
+
+ hlist_for_each_safe(p, n, &head)
p9_fid_put(hlist_entry(p, struct p9_fid, dlist));
- dentry->d_fsdata = NULL;
}
static int v9fs_lookup_revalidate(struct dentry *dentry, unsigned int flags)
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 7a3308d77606..fd72fc38c8f5 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -348,6 +348,7 @@ void v9fs_evict_inode(struct inode *inode)
__le32 __maybe_unused version;
if (!is_bad_inode(inode)) {
+ netfs_wait_for_outstanding_io(inode);
truncate_inode_pages_final(&inode->i_data);
version = cpu_to_le32(v9inode->qid.version);
diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
index f5693164ca9a..bd2f530e5740 100644
--- a/fs/Kconfig.binfmt
+++ b/fs/Kconfig.binfmt
@@ -176,4 +176,12 @@ config COREDUMP
certainly want to say Y here. Not necessary on systems that never
need debugging or only ever run flawless code.
+config EXEC_KUNIT_TEST
+ bool "Build execve tests" if !KUNIT_ALL_TESTS
+ depends on KUNIT=y
+ default KUNIT_ALL_TESTS
+ help
+ This builds the exec KUnit tests, which tests boundary conditions
+ of various aspects of the exec internals.
+
endmenu
diff --git a/fs/affs/amigaffs.h b/fs/affs/amigaffs.h
index 81fb396d4dfa..1b973a669d23 100644
--- a/fs/affs/amigaffs.h
+++ b/fs/affs/amigaffs.h
@@ -80,7 +80,7 @@ struct affs_head {
__be32 spare1;
__be32 first_data;
__be32 checksum;
- __be32 table[1];
+ __be32 table[];
};
struct affs_tail {
@@ -108,7 +108,7 @@ struct slink_front
__be32 key;
__be32 spare1[3];
__be32 checksum;
- u8 symname[1]; /* depends on block size */
+ u8 symname[]; /* depends on block size */
};
struct affs_data_head
@@ -119,7 +119,7 @@ struct affs_data_head
__be32 size;
__be32 next;
__be32 checksum;
- u8 data[1]; /* depends on block size */
+ u8 data[]; /* depends on block size */
};
/* Permission bits */
diff --git a/fs/afs/inode.c b/fs/afs/inode.c
index 94fc049aff58..3acf5e050072 100644
--- a/fs/afs/inode.c
+++ b/fs/afs/inode.c
@@ -512,7 +512,7 @@ static int afs_iget5_set_root(struct inode *inode, void *opaque)
struct afs_vnode *vnode = AFS_FS_I(inode);
vnode->volume = as->volume;
- vnode->fid.vid = as->volume->vid,
+ vnode->fid.vid = as->volume->vid;
vnode->fid.vnode = 1;
vnode->fid.unique = 1;
inode->i_ino = 1;
@@ -545,7 +545,7 @@ struct inode *afs_root_iget(struct super_block *sb, struct key *key)
BUG_ON(!(inode->i_state & I_NEW));
vnode = AFS_FS_I(inode);
- vnode->cb_v_check = atomic_read(&as->volume->cb_v_break),
+ vnode->cb_v_check = atomic_read(&as->volume->cb_v_break);
afs_set_netfs_context(vnode);
op = afs_alloc_operation(key, as->volume);
@@ -648,6 +648,7 @@ void afs_evict_inode(struct inode *inode)
ASSERTCMP(inode->i_ino, ==, vnode->fid.vnode);
+ netfs_wait_for_outstanding_io(inode);
truncate_inode_pages_final(&inode->i_data);
afs_set_cache_aux(vnode, &aux);
diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c
index 97f50e9fd9eb..297487ee8323 100644
--- a/fs/afs/mntpt.c
+++ b/fs/afs/mntpt.c
@@ -140,6 +140,11 @@ static int afs_mntpt_set_params(struct fs_context *fc, struct dentry *mntpt)
put_page(page);
if (ret < 0)
return ret;
+
+ /* Don't cross a backup volume mountpoint from a backup volume */
+ if (src_as->volume && src_as->volume->type == AFSVL_BACKVOL &&
+ ctx->type == AFSVL_BACKVOL)
+ return -ENODEV;
}
return 0;
diff --git a/fs/aio.c b/fs/aio.c
index 57c9f7c077e6..93ef59d358b3 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -1516,7 +1516,7 @@ static void aio_complete_rw(struct kiocb *kiocb, long res)
iocb_put(iocb);
}
-static int aio_prep_rw(struct kiocb *req, const struct iocb *iocb)
+static int aio_prep_rw(struct kiocb *req, const struct iocb *iocb, int rw_type)
{
int ret;
@@ -1542,7 +1542,7 @@ static int aio_prep_rw(struct kiocb *req, const struct iocb *iocb)
} else
req->ki_ioprio = get_current_ioprio();
- ret = kiocb_set_rw_flags(req, iocb->aio_rw_flags);
+ ret = kiocb_set_rw_flags(req, iocb->aio_rw_flags, rw_type);
if (unlikely(ret))
return ret;
@@ -1594,7 +1594,7 @@ static int aio_read(struct kiocb *req, const struct iocb *iocb,
struct file *file;
int ret;
- ret = aio_prep_rw(req, iocb);
+ ret = aio_prep_rw(req, iocb, READ);
if (ret)
return ret;
file = req->ki_filp;
@@ -1621,7 +1621,7 @@ static int aio_write(struct kiocb *req, const struct iocb *iocb,
struct file *file;
int ret;
- ret = aio_prep_rw(req, iocb);
+ ret = aio_prep_rw(req, iocb, WRITE);
if (ret)
return ret;
file = req->ki_filp;
diff --git a/fs/attr.c b/fs/attr.c
index 960a310581eb..825007d5cda4 100644
--- a/fs/attr.c
+++ b/fs/attr.c
@@ -17,8 +17,6 @@
#include <linux/filelock.h>
#include <linux/security.h>
-#include "internal.h"
-
/**
* setattr_should_drop_sgid - determine whether the setgid bit needs to be
* removed
diff --git a/fs/autofs/init.c b/fs/autofs/init.c
index b5e4dfa04ed0..1d644a35ffa0 100644
--- a/fs/autofs/init.c
+++ b/fs/autofs/init.c
@@ -38,4 +38,5 @@ static void __exit exit_autofs_fs(void)
module_init(init_autofs_fs)
module_exit(exit_autofs_fs)
+MODULE_DESCRIPTION("Kernel automounter support");
MODULE_LICENSE("GPL");
diff --git a/fs/autofs/inode.c b/fs/autofs/inode.c
index 1f5db6863663..cf792d4de4f1 100644
--- a/fs/autofs/inode.c
+++ b/fs/autofs/inode.c
@@ -126,7 +126,7 @@ enum {
const struct fs_parameter_spec autofs_param_specs[] = {
fsparam_flag ("direct", Opt_direct),
fsparam_fd ("fd", Opt_fd),
- fsparam_u32 ("gid", Opt_gid),
+ fsparam_gid ("gid", Opt_gid),
fsparam_flag ("ignore", Opt_ignore),
fsparam_flag ("indirect", Opt_indirect),
fsparam_u32 ("maxproto", Opt_maxproto),
@@ -134,7 +134,7 @@ const struct fs_parameter_spec autofs_param_specs[] = {
fsparam_flag ("offset", Opt_offset),
fsparam_u32 ("pgrp", Opt_pgrp),
fsparam_flag ("strictexpire", Opt_strictexpire),
- fsparam_u32 ("uid", Opt_uid),
+ fsparam_uid ("uid", Opt_uid),
{}
};
@@ -193,8 +193,6 @@ static int autofs_parse_param(struct fs_context *fc, struct fs_parameter *param)
struct autofs_fs_context *ctx = fc->fs_private;
struct autofs_sb_info *sbi = fc->s_fs_info;
struct fs_parse_result result;
- kuid_t uid;
- kgid_t gid;
int opt;
opt = fs_parse(fc, autofs_param_specs, param, &result);
@@ -205,16 +203,10 @@ static int autofs_parse_param(struct fs_context *fc, struct fs_parameter *param)
case Opt_fd:
return autofs_parse_fd(fc, sbi, param, &result);
case Opt_uid:
- uid = make_kuid(current_user_ns(), result.uint_32);
- if (!uid_valid(uid))
- return invalfc(fc, "Invalid uid");
- ctx->uid = uid;
+ ctx->uid = result.uid;
break;
case Opt_gid:
- gid = make_kgid(current_user_ns(), result.uint_32);
- if (!gid_valid(gid))
- return invalfc(fc, "Invalid gid");
- ctx->gid = gid;
+ ctx->gid = result.gid;
break;
case Opt_pgrp:
ctx->pgrp = result.uint_32;
diff --git a/fs/bcachefs/alloc_background.c b/fs/bcachefs/alloc_background.c
index 346cd91f91f9..658f11aebda1 100644
--- a/fs/bcachefs/alloc_background.c
+++ b/fs/bcachefs/alloc_background.c
@@ -3,6 +3,7 @@
#include "alloc_background.h"
#include "alloc_foreground.h"
#include "backpointers.h"
+#include "bkey_buf.h"
#include "btree_cache.h"
#include "btree_io.h"
#include "btree_key_cache.h"
@@ -29,7 +30,7 @@
#include <linux/sched/task.h>
#include <linux/sort.h>
-static void bch2_discard_one_bucket_fast(struct bch_fs *c, struct bpos bucket);
+static void bch2_discard_one_bucket_fast(struct bch_dev *, u64);
/* Persistent alloc info: */
@@ -259,6 +260,14 @@ int bch2_alloc_v4_invalid(struct bch_fs *c, struct bkey_s_c k,
"invalid data type (got %u should be %u)",
a.v->data_type, alloc_data_type(*a.v, a.v->data_type));
+ for (unsigned i = 0; i < 2; i++)
+ bkey_fsck_err_on(a.v->io_time[i] > LRU_TIME_MAX,
+ c, err,
+ alloc_key_io_time_bad,
+ "invalid io_time[%s]: %llu, max %llu",
+ i == READ ? "read" : "write",
+ a.v->io_time[i], LRU_TIME_MAX);
+
switch (a.v->data_type) {
case BCH_DATA_free:
case BCH_DATA_need_gc_gens:
@@ -741,6 +750,7 @@ int bch2_trigger_alloc(struct btree_trans *trans,
enum btree_iter_update_trigger_flags flags)
{
struct bch_fs *c = trans->c;
+ struct printbuf buf = PRINTBUF;
int ret = 0;
struct bch_dev *ca = bch2_dev_bucket_tryget(c, new.k->p);
@@ -756,8 +766,8 @@ int bch2_trigger_alloc(struct btree_trans *trans,
alloc_data_type_set(new_a, new_a->data_type);
if (bch2_bucket_sectors_total(*new_a) > bch2_bucket_sectors_total(*old_a)) {
- new_a->io_time[READ] = max_t(u64, 1, atomic64_read(&c->io_clock[READ].now));
- new_a->io_time[WRITE]= max_t(u64, 1, atomic64_read(&c->io_clock[WRITE].now));
+ new_a->io_time[READ] = bch2_current_io_time(c, READ);
+ new_a->io_time[WRITE]= bch2_current_io_time(c, WRITE);
SET_BCH_ALLOC_V4_NEED_INC_GEN(new_a, true);
SET_BCH_ALLOC_V4_NEED_DISCARD(new_a, true);
}
@@ -767,6 +777,7 @@ int bch2_trigger_alloc(struct btree_trans *trans,
!bch2_bucket_is_open_safe(c, new.k->p.inode, new.k->p.offset)) {
new_a->gen++;
SET_BCH_ALLOC_V4_NEED_INC_GEN(new_a, false);
+ alloc_data_type_set(new_a, new_a->data_type);
}
if (old_a->data_type != new_a->data_type ||
@@ -780,7 +791,7 @@ int bch2_trigger_alloc(struct btree_trans *trans,
if (new_a->data_type == BCH_DATA_cached &&
!new_a->io_time[READ])
- new_a->io_time[READ] = max_t(u64, 1, atomic64_read(&c->io_clock[READ].now));
+ new_a->io_time[READ] = bch2_current_io_time(c, READ);
u64 old_lru = alloc_lru_idx_read(*old_a);
u64 new_lru = alloc_lru_idx_read(*new_a);
@@ -860,8 +871,14 @@ int bch2_trigger_alloc(struct btree_trans *trans,
}
percpu_down_read(&c->mark_lock);
- if (new_a->gen != old_a->gen)
- *bucket_gen(ca, new.k->p.offset) = new_a->gen;
+ if (new_a->gen != old_a->gen) {
+ u8 *gen = bucket_gen(ca, new.k->p.offset);
+ if (unlikely(!gen)) {
+ percpu_up_read(&c->mark_lock);
+ goto invalid_bucket;
+ }
+ *gen = new_a->gen;
+ }
bch2_dev_usage_update(c, ca, old_a, new_a, journal_seq, false);
percpu_up_read(&c->mark_lock);
@@ -875,14 +892,14 @@ int bch2_trigger_alloc(struct btree_trans *trans,
closure_wake_up(&c->freelist_wait);
if (statechange(a->data_type == BCH_DATA_need_discard) &&
- !bch2_bucket_is_open(c, new.k->p.inode, new.k->p.offset) &&
+ !bch2_bucket_is_open_safe(c, new.k->p.inode, new.k->p.offset) &&
bucket_flushed(new_a))
- bch2_discard_one_bucket_fast(c, new.k->p);
+ bch2_discard_one_bucket_fast(ca, new.k->p.offset);
if (statechange(a->data_type == BCH_DATA_cached) &&
!bch2_bucket_is_open(c, new.k->p.inode, new.k->p.offset) &&
should_invalidate_buckets(ca, bch2_dev_usage_read(ca)))
- bch2_do_invalidates(c);
+ bch2_dev_do_invalidates(ca);
if (statechange(a->data_type == BCH_DATA_need_gc_gens))
bch2_gc_gens_async(c);
@@ -895,6 +912,11 @@ int bch2_trigger_alloc(struct btree_trans *trans,
percpu_down_read(&c->mark_lock);
struct bucket *g = gc_bucket(ca, new.k->p.offset);
+ if (unlikely(!g)) {
+ percpu_up_read(&c->mark_lock);
+ goto invalid_bucket;
+ }
+ g->gen_valid = 1;
bucket_lock(g);
@@ -910,8 +932,14 @@ int bch2_trigger_alloc(struct btree_trans *trans,
percpu_up_read(&c->mark_lock);
}
err:
+ printbuf_exit(&buf);
bch2_dev_put(ca);
return ret;
+invalid_bucket:
+ bch2_fs_inconsistent(c, "reference to invalid bucket\n %s",
+ (bch2_bkey_val_to_text(&buf, c, new.s_c), buf.buf));
+ ret = -EIO;
+ goto err;
}
/*
@@ -1526,13 +1554,13 @@ err:
}
static int bch2_check_alloc_to_lru_ref(struct btree_trans *trans,
- struct btree_iter *alloc_iter)
+ struct btree_iter *alloc_iter,
+ struct bkey_buf *last_flushed)
{
struct bch_fs *c = trans->c;
- struct btree_iter lru_iter;
struct bch_alloc_v4 a_convert;
const struct bch_alloc_v4 *a;
- struct bkey_s_c alloc_k, lru_k;
+ struct bkey_s_c alloc_k;
struct printbuf buf = PRINTBUF;
int ret;
@@ -1546,6 +1574,14 @@ static int bch2_check_alloc_to_lru_ref(struct btree_trans *trans,
a = bch2_alloc_to_v4(alloc_k, &a_convert);
+ if (a->fragmentation_lru) {
+ ret = bch2_lru_check_set(trans, BCH_LRU_FRAGMENTATION_START,
+ a->fragmentation_lru,
+ alloc_k, last_flushed);
+ if (ret)
+ return ret;
+ }
+
if (a->data_type != BCH_DATA_cached)
return 0;
@@ -1561,7 +1597,7 @@ static int bch2_check_alloc_to_lru_ref(struct btree_trans *trans,
if (ret)
goto err;
- a_mut->v.io_time[READ] = atomic64_read(&c->io_clock[READ].now);
+ a_mut->v.io_time[READ] = bch2_current_io_time(c, READ);
ret = bch2_trans_update(trans, alloc_iter,
&a_mut->k_i, BTREE_TRIGGER_norun);
if (ret)
@@ -1570,73 +1606,66 @@ static int bch2_check_alloc_to_lru_ref(struct btree_trans *trans,
a = &a_mut->v;
}
- lru_k = bch2_bkey_get_iter(trans, &lru_iter, BTREE_ID_lru,
- lru_pos(alloc_k.k->p.inode,
- bucket_to_u64(alloc_k.k->p),
- a->io_time[READ]), 0);
- ret = bkey_err(lru_k);
+ ret = bch2_lru_check_set(trans, alloc_k.k->p.inode, a->io_time[READ],
+ alloc_k, last_flushed);
if (ret)
- return ret;
-
- if (fsck_err_on(lru_k.k->type != KEY_TYPE_set, c,
- alloc_key_to_missing_lru_entry,
- "missing lru entry\n"
- " %s",
- (printbuf_reset(&buf),
- bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) {
- ret = bch2_lru_set(trans,
- alloc_k.k->p.inode,
- bucket_to_u64(alloc_k.k->p),
- a->io_time[READ]);
- if (ret)
- goto err;
- }
+ goto err;
err:
fsck_err:
- bch2_trans_iter_exit(trans, &lru_iter);
printbuf_exit(&buf);
return ret;
}
int bch2_check_alloc_to_lru_refs(struct bch_fs *c)
{
+ struct bkey_buf last_flushed;
+
+ bch2_bkey_buf_init(&last_flushed);
+ bkey_init(&last_flushed.k->k);
+
int ret = bch2_trans_run(c,
for_each_btree_key_commit(trans, iter, BTREE_ID_alloc,
POS_MIN, BTREE_ITER_prefetch, k,
NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
- bch2_check_alloc_to_lru_ref(trans, &iter)));
+ bch2_check_alloc_to_lru_ref(trans, &iter, &last_flushed)));
+
+ bch2_bkey_buf_exit(&last_flushed, c);
bch_err_fn(c, ret);
return ret;
}
-static int discard_in_flight_add(struct bch_fs *c, struct bpos bucket)
+static int discard_in_flight_add(struct bch_dev *ca, u64 bucket, bool in_progress)
{
int ret;
- mutex_lock(&c->discard_buckets_in_flight_lock);
- darray_for_each(c->discard_buckets_in_flight, i)
- if (bkey_eq(*i, bucket)) {
- ret = -EEXIST;
+ mutex_lock(&ca->discard_buckets_in_flight_lock);
+ darray_for_each(ca->discard_buckets_in_flight, i)
+ if (i->bucket == bucket) {
+ ret = -BCH_ERR_EEXIST_discard_in_flight_add;
goto out;
}
- ret = darray_push(&c->discard_buckets_in_flight, bucket);
+ ret = darray_push(&ca->discard_buckets_in_flight, ((struct discard_in_flight) {
+ .in_progress = in_progress,
+ .bucket = bucket,
+ }));
out:
- mutex_unlock(&c->discard_buckets_in_flight_lock);
+ mutex_unlock(&ca->discard_buckets_in_flight_lock);
return ret;
}
-static void discard_in_flight_remove(struct bch_fs *c, struct bpos bucket)
+static void discard_in_flight_remove(struct bch_dev *ca, u64 bucket)
{
- mutex_lock(&c->discard_buckets_in_flight_lock);
- darray_for_each(c->discard_buckets_in_flight, i)
- if (bkey_eq(*i, bucket)) {
- darray_remove_item(&c->discard_buckets_in_flight, i);
+ mutex_lock(&ca->discard_buckets_in_flight_lock);
+ darray_for_each(ca->discard_buckets_in_flight, i)
+ if (i->bucket == bucket) {
+ BUG_ON(!i->in_progress);
+ darray_remove_item(&ca->discard_buckets_in_flight, i);
goto found;
}
BUG();
found:
- mutex_unlock(&c->discard_buckets_in_flight_lock);
+ mutex_unlock(&ca->discard_buckets_in_flight_lock);
}
struct discard_buckets_state {
@@ -1644,26 +1673,11 @@ struct discard_buckets_state {
u64 open;
u64 need_journal_commit;
u64 discarded;
- struct bch_dev *ca;
u64 need_journal_commit_this_dev;
};
-static void discard_buckets_next_dev(struct bch_fs *c, struct discard_buckets_state *s, struct bch_dev *ca)
-{
- if (s->ca == ca)
- return;
-
- if (s->ca && s->need_journal_commit_this_dev >
- bch2_dev_usage_read(s->ca).d[BCH_DATA_free].buckets)
- bch2_journal_flush_async(&c->journal, NULL);
-
- if (s->ca)
- percpu_ref_put(&s->ca->io_ref);
- s->ca = ca;
- s->need_journal_commit_this_dev = 0;
-}
-
static int bch2_discard_one_bucket(struct btree_trans *trans,
+ struct bch_dev *ca,
struct btree_iter *need_discard_iter,
struct bpos *discard_pos_done,
struct discard_buckets_state *s)
@@ -1677,16 +1691,6 @@ static int bch2_discard_one_bucket(struct btree_trans *trans,
bool discard_locked = false;
int ret = 0;
- struct bch_dev *ca = s->ca && s->ca->dev_idx == pos.inode
- ? s->ca
- : bch2_dev_get_ioref(c, pos.inode, WRITE);
- if (!ca) {
- bch2_btree_iter_set_pos(need_discard_iter, POS(pos.inode + 1, 0));
- return 0;
- }
-
- discard_buckets_next_dev(c, s, ca);
-
if (bch2_bucket_is_open_safe(c, pos.inode, pos.offset)) {
s->open++;
goto out;
@@ -1746,7 +1750,7 @@ static int bch2_discard_one_bucket(struct btree_trans *trans,
goto out;
}
- if (discard_in_flight_add(c, SPOS(iter.pos.inode, iter.pos.offset, true)))
+ if (discard_in_flight_add(ca, iter.pos.offset, true))
goto out;
discard_locked = true;
@@ -1770,8 +1774,9 @@ static int bch2_discard_one_bucket(struct btree_trans *trans,
}
SET_BCH_ALLOC_V4_NEED_DISCARD(&a->v, false);
- alloc_data_type_set(&a->v, a->v.data_type);
write:
+ alloc_data_type_set(&a->v, a->v.data_type);
+
ret = bch2_trans_update(trans, &iter, &a->k_i, 0) ?:
bch2_trans_commit(trans, NULL, NULL,
BCH_WATERMARK_btree|
@@ -1783,7 +1788,7 @@ write:
s->discarded++;
out:
if (discard_locked)
- discard_in_flight_remove(c, iter.pos);
+ discard_in_flight_remove(ca, iter.pos.offset);
s->seen++;
bch2_trans_iter_exit(trans, &iter);
printbuf_exit(&buf);
@@ -1792,7 +1797,8 @@ out:
static void bch2_do_discards_work(struct work_struct *work)
{
- struct bch_fs *c = container_of(work, struct bch_fs, discard_work);
+ struct bch_dev *ca = container_of(work, struct bch_dev, discard_work);
+ struct bch_fs *c = ca->fs;
struct discard_buckets_state s = {};
struct bpos discard_pos_done = POS_MAX;
int ret;
@@ -1803,23 +1809,41 @@ static void bch2_do_discards_work(struct work_struct *work)
* successful commit:
*/
ret = bch2_trans_run(c,
- for_each_btree_key(trans, iter,
- BTREE_ID_need_discard, POS_MIN, 0, k,
- bch2_discard_one_bucket(trans, &iter, &discard_pos_done, &s)));
-
- discard_buckets_next_dev(c, &s, NULL);
+ for_each_btree_key_upto(trans, iter,
+ BTREE_ID_need_discard,
+ POS(ca->dev_idx, 0),
+ POS(ca->dev_idx, U64_MAX), 0, k,
+ bch2_discard_one_bucket(trans, ca, &iter, &discard_pos_done, &s)));
trace_discard_buckets(c, s.seen, s.open, s.need_journal_commit, s.discarded,
bch2_err_str(ret));
bch2_write_ref_put(c, BCH_WRITE_REF_discard);
+ percpu_ref_put(&ca->io_ref);
+}
+
+void bch2_dev_do_discards(struct bch_dev *ca)
+{
+ struct bch_fs *c = ca->fs;
+
+ if (!bch2_dev_get_ioref(c, ca->dev_idx, WRITE))
+ return;
+
+ if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_discard))
+ goto put_ioref;
+
+ if (queue_work(c->write_ref_wq, &ca->discard_work))
+ return;
+
+ bch2_write_ref_put(c, BCH_WRITE_REF_discard);
+put_ioref:
+ percpu_ref_put(&ca->io_ref);
}
void bch2_do_discards(struct bch_fs *c)
{
- if (bch2_write_ref_tryget(c, BCH_WRITE_REF_discard) &&
- !queue_work(c->write_ref_wq, &c->discard_work))
- bch2_write_ref_put(c, BCH_WRITE_REF_discard);
+ for_each_member_device(c, ca)
+ bch2_dev_do_discards(ca);
}
static int bch2_clear_bucket_needs_discard(struct btree_trans *trans, struct bpos bucket)
@@ -1848,68 +1872,69 @@ err:
static void bch2_do_discards_fast_work(struct work_struct *work)
{
- struct bch_fs *c = container_of(work, struct bch_fs, discard_fast_work);
+ struct bch_dev *ca = container_of(work, struct bch_dev, discard_fast_work);
+ struct bch_fs *c = ca->fs;
while (1) {
bool got_bucket = false;
- struct bpos bucket;
- struct bch_dev *ca;
-
- mutex_lock(&c->discard_buckets_in_flight_lock);
- darray_for_each(c->discard_buckets_in_flight, i) {
- if (i->snapshot)
- continue;
+ u64 bucket;
- ca = bch2_dev_get_ioref(c, i->inode, WRITE);
- if (!ca) {
- darray_remove_item(&c->discard_buckets_in_flight, i);
+ mutex_lock(&ca->discard_buckets_in_flight_lock);
+ darray_for_each(ca->discard_buckets_in_flight, i) {
+ if (i->in_progress)
continue;
- }
got_bucket = true;
- bucket = *i;
- i->snapshot = true;
+ bucket = i->bucket;
+ i->in_progress = true;
break;
}
- mutex_unlock(&c->discard_buckets_in_flight_lock);
+ mutex_unlock(&ca->discard_buckets_in_flight_lock);
if (!got_bucket)
break;
if (ca->mi.discard && !c->opts.nochanges)
blkdev_issue_discard(ca->disk_sb.bdev,
- bucket.offset * ca->mi.bucket_size,
+ bucket_to_sector(ca, bucket),
ca->mi.bucket_size,
GFP_KERNEL);
int ret = bch2_trans_do(c, NULL, NULL,
- BCH_WATERMARK_btree|
- BCH_TRANS_COMMIT_no_enospc,
- bch2_clear_bucket_needs_discard(trans, bucket));
+ BCH_WATERMARK_btree|
+ BCH_TRANS_COMMIT_no_enospc,
+ bch2_clear_bucket_needs_discard(trans, POS(ca->dev_idx, bucket)));
bch_err_fn(c, ret);
- percpu_ref_put(&ca->io_ref);
- discard_in_flight_remove(c, bucket);
+ discard_in_flight_remove(ca, bucket);
if (ret)
break;
}
bch2_write_ref_put(c, BCH_WRITE_REF_discard_fast);
+ percpu_ref_put(&ca->io_ref);
}
-static void bch2_discard_one_bucket_fast(struct bch_fs *c, struct bpos bucket)
+static void bch2_discard_one_bucket_fast(struct bch_dev *ca, u64 bucket)
{
- rcu_read_lock();
- struct bch_dev *ca = bch2_dev_rcu(c, bucket.inode);
- bool dead = !ca || percpu_ref_is_dying(&ca->io_ref);
- rcu_read_unlock();
+ struct bch_fs *c = ca->fs;
+
+ if (discard_in_flight_add(ca, bucket, false))
+ return;
- if (!dead &&
- !discard_in_flight_add(c, bucket) &&
- bch2_write_ref_tryget(c, BCH_WRITE_REF_discard_fast) &&
- !queue_work(c->write_ref_wq, &c->discard_fast_work))
- bch2_write_ref_put(c, BCH_WRITE_REF_discard_fast);
+ if (!bch2_dev_get_ioref(c, ca->dev_idx, WRITE))
+ return;
+
+ if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_discard_fast))
+ goto put_ioref;
+
+ if (queue_work(c->write_ref_wq, &ca->discard_fast_work))
+ return;
+
+ bch2_write_ref_put(c, BCH_WRITE_REF_discard_fast);
+put_ioref:
+ percpu_ref_put(&ca->io_ref);
}
static int invalidate_one_bucket(struct btree_trans *trans,
@@ -1957,8 +1982,8 @@ static int invalidate_one_bucket(struct btree_trans *trans,
a->v.data_type = 0;
a->v.dirty_sectors = 0;
a->v.cached_sectors = 0;
- a->v.io_time[READ] = atomic64_read(&c->io_clock[READ].now);
- a->v.io_time[WRITE] = atomic64_read(&c->io_clock[WRITE].now);
+ a->v.io_time[READ] = bch2_current_io_time(c, READ);
+ a->v.io_time[WRITE] = bch2_current_io_time(c, WRITE);
ret = bch2_trans_commit(trans, NULL, NULL,
BCH_WATERMARK_btree|
@@ -1993,9 +2018,25 @@ err:
goto out;
}
+static struct bkey_s_c next_lru_key(struct btree_trans *trans, struct btree_iter *iter,
+ struct bch_dev *ca, bool *wrapped)
+{
+ struct bkey_s_c k;
+again:
+ k = bch2_btree_iter_peek_upto(iter, lru_pos(ca->dev_idx, U64_MAX, LRU_TIME_MAX));
+ if (!k.k && !*wrapped) {
+ bch2_btree_iter_set_pos(iter, lru_pos(ca->dev_idx, 0, 0));
+ *wrapped = true;
+ goto again;
+ }
+
+ return k;
+}
+
static void bch2_do_invalidates_work(struct work_struct *work)
{
- struct bch_fs *c = container_of(work, struct bch_fs, invalidate_work);
+ struct bch_dev *ca = container_of(work, struct bch_dev, invalidate_work);
+ struct bch_fs *c = ca->fs;
struct btree_trans *trans = bch2_trans_get(c);
int ret = 0;
@@ -2003,31 +2044,63 @@ static void bch2_do_invalidates_work(struct work_struct *work)
if (ret)
goto err;
- for_each_member_device(c, ca) {
- s64 nr_to_invalidate =
- should_invalidate_buckets(ca, bch2_dev_usage_read(ca));
+ s64 nr_to_invalidate =
+ should_invalidate_buckets(ca, bch2_dev_usage_read(ca));
+ struct btree_iter iter;
+ bool wrapped = false;
- ret = for_each_btree_key_upto(trans, iter, BTREE_ID_lru,
- lru_pos(ca->dev_idx, 0, 0),
- lru_pos(ca->dev_idx, U64_MAX, LRU_TIME_MAX),
- BTREE_ITER_intent, k,
- invalidate_one_bucket(trans, &iter, k, &nr_to_invalidate));
+ bch2_trans_iter_init(trans, &iter, BTREE_ID_lru,
+ lru_pos(ca->dev_idx, 0,
+ ((bch2_current_io_time(c, READ) + U32_MAX) &
+ LRU_TIME_MAX)), 0);
- if (ret < 0) {
- bch2_dev_put(ca);
+ while (true) {
+ bch2_trans_begin(trans);
+
+ struct bkey_s_c k = next_lru_key(trans, &iter, ca, &wrapped);
+ ret = bkey_err(k);
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ continue;
+ if (ret)
break;
- }
+ if (!k.k)
+ break;
+
+ ret = invalidate_one_bucket(trans, &iter, k, &nr_to_invalidate);
+ if (ret)
+ break;
+
+ bch2_btree_iter_advance(&iter);
}
+ bch2_trans_iter_exit(trans, &iter);
err:
bch2_trans_put(trans);
bch2_write_ref_put(c, BCH_WRITE_REF_invalidate);
+ percpu_ref_put(&ca->io_ref);
+}
+
+void bch2_dev_do_invalidates(struct bch_dev *ca)
+{
+ struct bch_fs *c = ca->fs;
+
+ if (!bch2_dev_get_ioref(c, ca->dev_idx, WRITE))
+ return;
+
+ if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_invalidate))
+ goto put_ioref;
+
+ if (queue_work(c->write_ref_wq, &ca->invalidate_work))
+ return;
+
+ bch2_write_ref_put(c, BCH_WRITE_REF_invalidate);
+put_ioref:
+ percpu_ref_put(&ca->io_ref);
}
void bch2_do_invalidates(struct bch_fs *c)
{
- if (bch2_write_ref_tryget(c, BCH_WRITE_REF_invalidate) &&
- !queue_work(c->write_ref_wq, &c->invalidate_work))
- bch2_write_ref_put(c, BCH_WRITE_REF_invalidate);
+ for_each_member_device(c, ca)
+ bch2_dev_do_invalidates(ca);
}
int bch2_dev_freespace_init(struct bch_fs *c, struct bch_dev *ca,
@@ -2186,7 +2259,7 @@ int bch2_bucket_io_time_reset(struct btree_trans *trans, unsigned dev,
if (ret)
return ret;
- now = atomic64_read(&c->io_clock[rw].now);
+ now = bch2_current_io_time(c, rw);
if (a->v.io_time[rw] == now)
goto out;
@@ -2343,16 +2416,20 @@ void bch2_dev_allocator_add(struct bch_fs *c, struct bch_dev *ca)
set_bit(ca->dev_idx, c->rw_devs[i].d);
}
-void bch2_fs_allocator_background_exit(struct bch_fs *c)
+void bch2_dev_allocator_background_exit(struct bch_dev *ca)
+{
+ darray_exit(&ca->discard_buckets_in_flight);
+}
+
+void bch2_dev_allocator_background_init(struct bch_dev *ca)
{
- darray_exit(&c->discard_buckets_in_flight);
+ mutex_init(&ca->discard_buckets_in_flight_lock);
+ INIT_WORK(&ca->discard_work, bch2_do_discards_work);
+ INIT_WORK(&ca->discard_fast_work, bch2_do_discards_fast_work);
+ INIT_WORK(&ca->invalidate_work, bch2_do_invalidates_work);
}
void bch2_fs_allocator_background_init(struct bch_fs *c)
{
spin_lock_init(&c->freelist_lock);
- mutex_init(&c->discard_buckets_in_flight_lock);
- INIT_WORK(&c->discard_work, bch2_do_discards_work);
- INIT_WORK(&c->discard_fast_work, bch2_do_discards_fast_work);
- INIT_WORK(&c->invalidate_work, bch2_do_invalidates_work);
}
diff --git a/fs/bcachefs/alloc_background.h b/fs/bcachefs/alloc_background.h
index ae31a94be6f9..ba2c5557a3f0 100644
--- a/fs/bcachefs/alloc_background.h
+++ b/fs/bcachefs/alloc_background.h
@@ -141,7 +141,13 @@ static inline u64 alloc_lru_idx_fragmentation(struct bch_alloc_v4 a,
!bch2_bucket_sectors_fragmented(ca, a))
return 0;
- u64 d = bch2_bucket_sectors_dirty(a);
+ /*
+ * avoid overflowing LRU_TIME_BITS on a corrupted fs, when
+ * bucket_sectors_dirty is (much) bigger than bucket_size
+ */
+ u64 d = min(bch2_bucket_sectors_dirty(a),
+ ca->mi.bucket_size);
+
return div_u64(d * (1ULL << 31), ca->mi.bucket_size);
}
@@ -269,6 +275,7 @@ int bch2_trigger_alloc(struct btree_trans *, enum btree_id, unsigned,
enum btree_iter_update_trigger_flags);
int bch2_check_alloc_info(struct bch_fs *);
int bch2_check_alloc_to_lru_refs(struct bch_fs *);
+void bch2_dev_do_discards(struct bch_dev *);
void bch2_do_discards(struct bch_fs *);
static inline u64 should_invalidate_buckets(struct bch_dev *ca,
@@ -283,6 +290,7 @@ static inline u64 should_invalidate_buckets(struct bch_dev *ca,
return clamp_t(s64, want_free - free, 0, u.d[BCH_DATA_cached].buckets);
}
+void bch2_dev_do_invalidates(struct bch_dev *);
void bch2_do_invalidates(struct bch_fs *);
static inline struct bch_backpointer *alloc_v4_backpointers(struct bch_alloc_v4 *a)
@@ -306,7 +314,9 @@ u64 bch2_min_rw_member_capacity(struct bch_fs *);
void bch2_dev_allocator_remove(struct bch_fs *, struct bch_dev *);
void bch2_dev_allocator_add(struct bch_fs *, struct bch_dev *);
-void bch2_fs_allocator_background_exit(struct bch_fs *);
+void bch2_dev_allocator_background_exit(struct bch_dev *);
+void bch2_dev_allocator_background_init(struct bch_dev *);
+
void bch2_fs_allocator_background_init(struct bch_fs *);
#endif /* _BCACHEFS_ALLOC_BACKGROUND_H */
diff --git a/fs/bcachefs/alloc_foreground.c b/fs/bcachefs/alloc_foreground.c
index 927a5f300b30..27d97c22ae27 100644
--- a/fs/bcachefs/alloc_foreground.c
+++ b/fs/bcachefs/alloc_foreground.c
@@ -621,13 +621,13 @@ again:
avail = dev_buckets_free(ca, *usage, watermark);
if (usage->d[BCH_DATA_need_discard].buckets > avail)
- bch2_do_discards(c);
+ bch2_dev_do_discards(ca);
if (usage->d[BCH_DATA_need_gc_gens].buckets > avail)
bch2_gc_gens_async(c);
if (should_invalidate_buckets(ca, *usage))
- bch2_do_invalidates(c);
+ bch2_dev_do_invalidates(ca);
if (!avail) {
if (cl && !waiting) {
@@ -1703,6 +1703,7 @@ void bch2_fs_alloc_debug_to_text(struct printbuf *out, struct bch_fs *c)
for (unsigned i = 0; i < ARRAY_SIZE(c->open_buckets); i++)
nr[c->open_buckets[i].data_type]++;
+ printbuf_tabstops_reset(out);
printbuf_tabstop_push(out, 24);
percpu_down_read(&c->mark_lock);
@@ -1736,6 +1737,7 @@ void bch2_dev_alloc_debug_to_text(struct printbuf *out, struct bch_dev *ca)
for (unsigned i = 0; i < ARRAY_SIZE(c->open_buckets); i++)
nr[c->open_buckets[i].data_type]++;
+ printbuf_tabstops_reset(out);
printbuf_tabstop_push(out, 12);
printbuf_tabstop_push(out, 16);
printbuf_tabstop_push(out, 16);
diff --git a/fs/bcachefs/backpointers.c b/fs/bcachefs/backpointers.c
index 692b1c7d5018..6d8b1bc90be0 100644
--- a/fs/bcachefs/backpointers.c
+++ b/fs/bcachefs/backpointers.c
@@ -434,13 +434,6 @@ int bch2_check_btree_backpointers(struct bch_fs *c)
return ret;
}
-static inline bool bkey_and_val_eq(struct bkey_s_c l, struct bkey_s_c r)
-{
- return bpos_eq(l.k->p, r.k->p) &&
- bkey_bytes(l.k) == bkey_bytes(r.k) &&
- !memcmp(l.v, r.v, bkey_val_bytes(l.k));
-}
-
struct extents_to_bp_state {
struct bpos bucket_start;
struct bpos bucket_end;
@@ -536,11 +529,8 @@ static int check_bp_exists(struct btree_trans *trans,
struct btree_iter other_extent_iter = {};
struct printbuf buf = PRINTBUF;
struct bkey_s_c bp_k;
- struct bkey_buf tmp;
int ret = 0;
- bch2_bkey_buf_init(&tmp);
-
struct bch_dev *ca = bch2_dev_bucket_tryget(c, bucket);
if (!ca) {
prt_str(&buf, "extent for nonexistent device:bucket ");
@@ -565,22 +555,9 @@ static int check_bp_exists(struct btree_trans *trans,
if (bp_k.k->type != KEY_TYPE_backpointer ||
memcmp(bkey_s_c_to_backpointer(bp_k).v, &bp, sizeof(bp))) {
- bch2_bkey_buf_reassemble(&tmp, c, orig_k);
-
- if (!bkey_and_val_eq(orig_k, bkey_i_to_s_c(s->last_flushed.k))) {
- if (bp.level) {
- bch2_trans_unlock(trans);
- bch2_btree_interior_updates_flush(c);
- }
-
- ret = bch2_btree_write_buffer_flush_sync(trans);
- if (ret)
- goto err;
-
- bch2_bkey_buf_copy(&s->last_flushed, c, tmp.k);
- ret = -BCH_ERR_transaction_restart_write_buffer_flush;
- goto out;
- }
+ ret = bch2_btree_write_buffer_maybe_flush(trans, orig_k, &s->last_flushed);
+ if (ret)
+ goto err;
goto check_existing_bp;
}
@@ -589,7 +566,6 @@ err:
fsck_err:
bch2_trans_iter_exit(trans, &other_extent_iter);
bch2_trans_iter_exit(trans, &bp_iter);
- bch2_bkey_buf_exit(&tmp, c);
bch2_dev_put(ca);
printbuf_exit(&buf);
return ret;
@@ -690,7 +666,7 @@ static int check_extent_to_backpointers(struct btree_trans *trans,
ptrs = bch2_bkey_ptrs_c(k);
bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
- struct bpos bucket_pos;
+ struct bpos bucket_pos = POS_MIN;
struct bch_backpointer bp;
if (p.ptr.cached)
@@ -794,6 +770,8 @@ static int bch2_get_btree_in_memory_pos(struct btree_trans *trans,
!((1U << btree) & btree_interior_mask))
continue;
+ bch2_trans_begin(trans);
+
__for_each_btree_node(trans, iter, btree,
btree == start.btree ? start.pos : POS_MIN,
0, depth, BTREE_ITER_prefetch, b, ret) {
@@ -905,7 +883,7 @@ static int check_one_backpointer(struct btree_trans *trans,
struct bbpos start,
struct bbpos end,
struct bkey_s_c_backpointer bp,
- struct bpos *last_flushed_pos)
+ struct bkey_buf *last_flushed)
{
struct bch_fs *c = trans->c;
struct btree_iter iter;
@@ -925,20 +903,18 @@ static int check_one_backpointer(struct btree_trans *trans,
if (ret)
return ret;
- if (!k.k && !bpos_eq(*last_flushed_pos, bp.k->p)) {
- *last_flushed_pos = bp.k->p;
- ret = bch2_btree_write_buffer_flush_sync(trans) ?:
- -BCH_ERR_transaction_restart_write_buffer_flush;
- goto out;
- }
+ if (!k.k) {
+ ret = bch2_btree_write_buffer_maybe_flush(trans, bp.s_c, last_flushed);
+ if (ret)
+ goto out;
- if (fsck_err_on(!k.k, c,
- backpointer_to_missing_ptr,
- "backpointer for missing %s\n %s",
- bp.v->level ? "btree node" : "extent",
- (bch2_bkey_val_to_text(&buf, c, bp.s_c), buf.buf))) {
- ret = bch2_btree_delete_at_buffered(trans, BTREE_ID_backpointers, bp.k->p);
- goto out;
+ if (fsck_err(c, backpointer_to_missing_ptr,
+ "backpointer for missing %s\n %s",
+ bp.v->level ? "btree node" : "extent",
+ (bch2_bkey_val_to_text(&buf, c, bp.s_c), buf.buf))) {
+ ret = bch2_btree_delete_at_buffered(trans, BTREE_ID_backpointers, bp.k->p);
+ goto out;
+ }
}
out:
fsck_err:
@@ -951,14 +927,20 @@ static int bch2_check_backpointers_to_extents_pass(struct btree_trans *trans,
struct bbpos start,
struct bbpos end)
{
- struct bpos last_flushed_pos = SPOS_MAX;
+ struct bkey_buf last_flushed;
- return for_each_btree_key_commit(trans, iter, BTREE_ID_backpointers,
+ bch2_bkey_buf_init(&last_flushed);
+ bkey_init(&last_flushed.k->k);
+
+ int ret = for_each_btree_key_commit(trans, iter, BTREE_ID_backpointers,
POS_MIN, BTREE_ITER_prefetch, k,
NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
check_one_backpointer(trans, start, end,
bkey_s_c_to_backpointer(k),
- &last_flushed_pos));
+ &last_flushed));
+
+ bch2_bkey_buf_exit(&last_flushed, trans->c);
+ return ret;
}
int bch2_check_backpointers_to_extents(struct bch_fs *c)
diff --git a/fs/bcachefs/bcachefs.h b/fs/bcachefs/bcachefs.h
index bc0ea2c4efef..1106fec6e155 100644
--- a/fs/bcachefs/bcachefs.h
+++ b/fs/bcachefs/bcachefs.h
@@ -457,6 +457,7 @@ enum bch_time_stats {
};
#include "alloc_types.h"
+#include "btree_gc_types.h"
#include "btree_types.h"
#include "btree_node_scan_types.h"
#include "btree_write_buffer_types.h"
@@ -488,53 +489,15 @@ enum bch_time_stats {
struct btree;
-enum gc_phase {
- GC_PHASE_NOT_RUNNING,
- GC_PHASE_START,
- GC_PHASE_SB,
-
- GC_PHASE_BTREE_stripes,
- GC_PHASE_BTREE_extents,
- GC_PHASE_BTREE_inodes,
- GC_PHASE_BTREE_dirents,
- GC_PHASE_BTREE_xattrs,
- GC_PHASE_BTREE_alloc,
- GC_PHASE_BTREE_quotas,
- GC_PHASE_BTREE_reflink,
- GC_PHASE_BTREE_subvolumes,
- GC_PHASE_BTREE_snapshots,
- GC_PHASE_BTREE_lru,
- GC_PHASE_BTREE_freespace,
- GC_PHASE_BTREE_need_discard,
- GC_PHASE_BTREE_backpointers,
- GC_PHASE_BTREE_bucket_gens,
- GC_PHASE_BTREE_snapshot_trees,
- GC_PHASE_BTREE_deleted_inodes,
- GC_PHASE_BTREE_logged_ops,
- GC_PHASE_BTREE_rebalance_work,
- GC_PHASE_BTREE_subvolume_children,
-
- GC_PHASE_PENDING_DELETE,
-};
-
-struct gc_pos {
- enum gc_phase phase;
- u16 level;
- struct bpos pos;
-};
-
-struct reflink_gc {
- u64 offset;
- u32 size;
- u32 refcount;
-};
-
-typedef GENRADIX(struct reflink_gc) reflink_gc_table;
-
struct io_count {
u64 sectors[2][BCH_DATA_NR];
};
+struct discard_in_flight {
+ bool in_progress:1;
+ u64 bucket:63;
+};
+
struct bch_dev {
struct kobject kobj;
#ifdef CONFIG_BCACHEFS_DEBUG
@@ -596,6 +559,12 @@ struct bch_dev {
size_t inc_gen_really_needs_gc;
size_t buckets_waiting_on_journal;
+ struct work_struct invalidate_work;
+ struct work_struct discard_work;
+ struct mutex discard_buckets_in_flight_lock;
+ DARRAY(struct discard_in_flight) discard_buckets_in_flight;
+ struct work_struct discard_fast_work;
+
atomic64_t rebalance_work;
struct journal_device journal;
@@ -832,7 +801,8 @@ struct bch_fs {
/* BTREE CACHE */
struct bio_set btree_bio;
- struct workqueue_struct *io_complete_wq;
+ struct workqueue_struct *btree_read_complete_wq;
+ struct workqueue_struct *btree_write_submit_wq;
struct btree_root btree_roots_known[BTREE_ID_NR];
DARRAY(struct btree_root) btree_roots_extra;
@@ -956,11 +926,6 @@ struct bch_fs {
unsigned write_points_nr;
struct buckets_waiting_for_journal buckets_waiting_for_journal;
- struct work_struct invalidate_work;
- struct work_struct discard_work;
- struct mutex discard_buckets_in_flight_lock;
- DARRAY(struct bpos) discard_buckets_in_flight;
- struct work_struct discard_fast_work;
/* GARBAGE COLLECTION */
struct work_struct gc_gens_work;
@@ -1255,6 +1220,11 @@ static inline s64 bch2_current_time(const struct bch_fs *c)
return timespec_to_bch2_time(c, now);
}
+static inline u64 bch2_current_io_time(const struct bch_fs *c, int rw)
+{
+ return max(1ULL, (u64) atomic64_read(&c->io_clock[rw].now) & LRU_TIME_MAX);
+}
+
static inline struct stdio_redirect *bch2_fs_stdio_redirect(struct bch_fs *c)
{
struct stdio_redirect *stdio = c->stdio;
diff --git a/fs/bcachefs/bcachefs_format.h b/fs/bcachefs/bcachefs_format.h
index d801e19cb489..e3b1bde489c3 100644
--- a/fs/bcachefs/bcachefs_format.h
+++ b/fs/bcachefs/bcachefs_format.h
@@ -476,6 +476,9 @@ struct bch_lru {
#define LRU_ID_STRIPES (1U << 16)
+#define LRU_TIME_BITS 48
+#define LRU_TIME_MAX ((1ULL << LRU_TIME_BITS) - 1)
+
/* Optional/variable size superblock sections: */
struct bch_sb_field {
@@ -503,16 +506,22 @@ struct bch_sb_field {
#include "alloc_background_format.h"
#include "extents_format.h"
-#include "reflink_format.h"
#include "ec_format.h"
-#include "inode_format.h"
#include "dirent_format.h"
-#include "xattr_format.h"
-#include "quota_format.h"
+#include "disk_groups_format.h"
+#include "inode_format.h"
+#include "journal_seq_blacklist_format.h"
#include "logged_ops_format.h"
+#include "quota_format.h"
+#include "reflink_format.h"
+#include "replicas_format.h"
#include "snapshot_format.h"
#include "subvolume_format.h"
#include "sb-counters_format.h"
+#include "sb-downgrade_format.h"
+#include "sb-errors_format.h"
+#include "sb-members_format.h"
+#include "xattr_format.h"
enum bch_sb_field_type {
#define x(f, nr) BCH_SB_FIELD_##f = nr,
@@ -545,107 +554,6 @@ struct bch_sb_field_journal_v2 {
} d[];
};
-/* BCH_SB_FIELD_members_v1: */
-
-#define BCH_MIN_NR_NBUCKETS (1 << 6)
-
-#define BCH_IOPS_MEASUREMENTS() \
- x(seqread, 0) \
- x(seqwrite, 1) \
- x(randread, 2) \
- x(randwrite, 3)
-
-enum bch_iops_measurement {
-#define x(t, n) BCH_IOPS_##t = n,
- BCH_IOPS_MEASUREMENTS()
-#undef x
- BCH_IOPS_NR
-};
-
-#define BCH_MEMBER_ERROR_TYPES() \
- x(read, 0) \
- x(write, 1) \
- x(checksum, 2)
-
-enum bch_member_error_type {
-#define x(t, n) BCH_MEMBER_ERROR_##t = n,
- BCH_MEMBER_ERROR_TYPES()
-#undef x
- BCH_MEMBER_ERROR_NR
-};
-
-struct bch_member {
- __uuid_t uuid;
- __le64 nbuckets; /* device size */
- __le16 first_bucket; /* index of first bucket used */
- __le16 bucket_size; /* sectors */
- __u8 btree_bitmap_shift;
- __u8 pad[3];
- __le64 last_mount; /* time_t */
-
- __le64 flags;
- __le32 iops[4];
- __le64 errors[BCH_MEMBER_ERROR_NR];
- __le64 errors_at_reset[BCH_MEMBER_ERROR_NR];
- __le64 errors_reset_time;
- __le64 seq;
- __le64 btree_allocated_bitmap;
- /*
- * On recovery from a clean shutdown we don't normally read the journal,
- * but we still want to resume writing from where we left off so we
- * don't overwrite more than is necessary, for list journal debugging:
- */
- __le32 last_journal_bucket;
- __le32 last_journal_bucket_offset;
-};
-
-/*
- * This limit comes from the bucket_gens array - it's a single allocation, and
- * kernel allocation are limited to INT_MAX
- */
-#define BCH_MEMBER_NBUCKETS_MAX (INT_MAX - 64)
-
-#define BCH_MEMBER_V1_BYTES 56
-
-LE64_BITMASK(BCH_MEMBER_STATE, struct bch_member, flags, 0, 4)
-/* 4-14 unused, was TIER, HAS_(META)DATA, REPLACEMENT */
-LE64_BITMASK(BCH_MEMBER_DISCARD, struct bch_member, flags, 14, 15)
-LE64_BITMASK(BCH_MEMBER_DATA_ALLOWED, struct bch_member, flags, 15, 20)
-LE64_BITMASK(BCH_MEMBER_GROUP, struct bch_member, flags, 20, 28)
-LE64_BITMASK(BCH_MEMBER_DURABILITY, struct bch_member, flags, 28, 30)
-LE64_BITMASK(BCH_MEMBER_FREESPACE_INITIALIZED,
- struct bch_member, flags, 30, 31)
-
-#if 0
-LE64_BITMASK(BCH_MEMBER_NR_READ_ERRORS, struct bch_member, flags[1], 0, 20);
-LE64_BITMASK(BCH_MEMBER_NR_WRITE_ERRORS,struct bch_member, flags[1], 20, 40);
-#endif
-
-#define BCH_MEMBER_STATES() \
- x(rw, 0) \
- x(ro, 1) \
- x(failed, 2) \
- x(spare, 3)
-
-enum bch_member_state {
-#define x(t, n) BCH_MEMBER_STATE_##t = n,
- BCH_MEMBER_STATES()
-#undef x
- BCH_MEMBER_STATE_NR
-};
-
-struct bch_sb_field_members_v1 {
- struct bch_sb_field field;
- struct bch_member _members[]; //Members are now variable size
-};
-
-struct bch_sb_field_members_v2 {
- struct bch_sb_field field;
- __le16 member_bytes; //size of single member entry
- u8 pad[6];
- struct bch_member _members[];
-};
-
/* BCH_SB_FIELD_crypt: */
struct nonce {
@@ -694,8 +602,6 @@ LE64_BITMASK(BCH_KDF_SCRYPT_N, struct bch_sb_field_crypt, kdf_flags, 0, 16);
LE64_BITMASK(BCH_KDF_SCRYPT_R, struct bch_sb_field_crypt, kdf_flags, 16, 32);
LE64_BITMASK(BCH_KDF_SCRYPT_P, struct bch_sb_field_crypt, kdf_flags, 32, 48);
-/* BCH_SB_FIELD_replicas: */
-
#define BCH_DATA_TYPES() \
x(free, 0) \
x(sb, 1) \
@@ -738,50 +644,6 @@ static inline bool data_type_is_hidden(enum bch_data_type type)
}
}
-struct bch_replicas_entry_v0 {
- __u8 data_type;
- __u8 nr_devs;
- __u8 devs[];
-} __packed;
-
-struct bch_sb_field_replicas_v0 {
- struct bch_sb_field field;
- struct bch_replicas_entry_v0 entries[];
-} __packed __aligned(8);
-
-struct bch_replicas_entry_v1 {
- __u8 data_type;
- __u8 nr_devs;
- __u8 nr_required;
- __u8 devs[];
-} __packed;
-
-#define replicas_entry_bytes(_i) \
- (offsetof(typeof(*(_i)), devs) + (_i)->nr_devs)
-
-struct bch_sb_field_replicas {
- struct bch_sb_field field;
- struct bch_replicas_entry_v1 entries[];
-} __packed __aligned(8);
-
-/* BCH_SB_FIELD_disk_groups: */
-
-#define BCH_SB_LABEL_SIZE 32
-
-struct bch_disk_group {
- __u8 label[BCH_SB_LABEL_SIZE];
- __le64 flags[2];
-} __packed __aligned(8);
-
-LE64_BITMASK(BCH_GROUP_DELETED, struct bch_disk_group, flags[0], 0, 1)
-LE64_BITMASK(BCH_GROUP_DATA_ALLOWED, struct bch_disk_group, flags[0], 1, 6)
-LE64_BITMASK(BCH_GROUP_PARENT, struct bch_disk_group, flags[0], 6, 24)
-
-struct bch_sb_field_disk_groups {
- struct bch_sb_field field;
- struct bch_disk_group entries[];
-} __packed __aligned(8);
-
/*
* On clean shutdown, store btree roots and current journal sequence number in
* the superblock:
@@ -809,27 +671,6 @@ struct bch_sb_field_clean {
__u64 _data[];
};
-struct journal_seq_blacklist_entry {
- __le64 start;
- __le64 end;
-};
-
-struct bch_sb_field_journal_seq_blacklist {
- struct bch_sb_field field;
- struct journal_seq_blacklist_entry start[];
-};
-
-struct bch_sb_field_errors {
- struct bch_sb_field field;
- struct bch_sb_field_error_entry {
- __le64 v;
- __le64 last_error_time;
- } entries[];
-};
-
-LE64_BITMASK(BCH_SB_ERROR_ENTRY_ID, struct bch_sb_field_error_entry, v, 0, 16);
-LE64_BITMASK(BCH_SB_ERROR_ENTRY_NR, struct bch_sb_field_error_entry, v, 16, 64);
-
struct bch_sb_field_ext {
struct bch_sb_field field;
__le64 recovery_passes_required[2];
@@ -837,18 +678,6 @@ struct bch_sb_field_ext {
__le64 btrees_lost_data;
};
-struct bch_sb_field_downgrade_entry {
- __le16 version;
- __le64 recovery_passes[2];
- __le16 nr_errors;
- __le16 errors[] __counted_by(nr_errors);
-} __packed __aligned(2);
-
-struct bch_sb_field_downgrade {
- struct bch_sb_field field;
- struct bch_sb_field_downgrade_entry entries[];
-};
-
/* Superblock: */
/*
@@ -909,7 +738,6 @@ unsigned bcachefs_metadata_required_upgrade_below = bcachefs_metadata_version_re
#define bcachefs_metadata_version_current (bcachefs_metadata_version_max - 1)
#define BCH_SB_SECTOR 8
-#define BCH_SB_MEMBERS_MAX 64 /* XXX kill */
#define BCH_SB_LAYOUT_SIZE_BITS_MAX 16 /* 32 MB */
@@ -1162,8 +990,9 @@ enum bch_version_upgrade_opts {
#define BCH_ERROR_ACTIONS() \
x(continue, 0) \
- x(ro, 1) \
- x(panic, 2)
+ x(fix_safe, 1) \
+ x(panic, 2) \
+ x(ro, 3)
enum bch_error_actions {
#define x(t, n) BCH_ON_ERROR_##t = n,
@@ -1557,9 +1386,10 @@ enum btree_id {
/*
* Maximum number of btrees that we will _ever_ have under the current scheme,
- * where we refer to them with bitfields
+ * where we refer to them with 64 bit bitfields - and we also need a bit for
+ * the interior btree node type:
*/
-#define BTREE_ID_NR_MAX 64
+#define BTREE_ID_NR_MAX 63
static inline bool btree_id_is_alloc(enum btree_id id)
{
diff --git a/fs/bcachefs/bkey.c b/fs/bcachefs/bkey.c
index f46978e5cb7c..587d7318a2e8 100644
--- a/fs/bcachefs/bkey.c
+++ b/fs/bcachefs/bkey.c
@@ -660,8 +660,9 @@ int bch2_bkey_format_invalid(struct bch_fs *c,
bch2_bkey_format_field_overflows(f, i)) {
unsigned unpacked_bits = bch2_bkey_format_current.bits_per_field[i];
u64 unpacked_max = ~((~0ULL << 1) << (unpacked_bits - 1));
- u64 packed_max = f->bits_per_field[i]
- ? ~((~0ULL << 1) << (f->bits_per_field[i] - 1))
+ unsigned packed_bits = min(64, f->bits_per_field[i]);
+ u64 packed_max = packed_bits
+ ? ~((~0ULL << 1) << (packed_bits - 1))
: 0;
prt_printf(err, "field %u too large: %llu + %llu > %llu",
@@ -1064,7 +1065,7 @@ void bch2_bkey_swab_key(const struct bkey_format *_f, struct bkey_packed *k)
{
const struct bkey_format *f = bkey_packed(k) ? _f : &bch2_bkey_format_current;
u8 *l = k->key_start;
- u8 *h = (u8 *) (k->_data + f->key_u64s) - 1;
+ u8 *h = (u8 *) ((u64 *) k->_data + f->key_u64s) - 1;
while (l < h) {
swap(*l, *h);
diff --git a/fs/bcachefs/bkey.h b/fs/bcachefs/bkey.h
index fcd43915df07..936357149cf0 100644
--- a/fs/bcachefs/bkey.h
+++ b/fs/bcachefs/bkey.h
@@ -194,6 +194,13 @@ static inline struct bpos bkey_max(struct bpos l, struct bpos r)
return bkey_gt(l, r) ? l : r;
}
+static inline bool bkey_and_val_eq(struct bkey_s_c l, struct bkey_s_c r)
+{
+ return bpos_eq(l.k->p, r.k->p) &&
+ bkey_bytes(l.k) == bkey_bytes(r.k) &&
+ !memcmp(l.v, r.v, bkey_val_bytes(l.k));
+}
+
void bch2_bpos_swab(struct bpos *);
void bch2_bkey_swab_key(const struct bkey_format *, struct bkey_packed *);
diff --git a/fs/bcachefs/bkey_methods.c b/fs/bcachefs/bkey_methods.c
index c2c3dae52186..bd32aac05192 100644
--- a/fs/bcachefs/bkey_methods.c
+++ b/fs/bcachefs/bkey_methods.c
@@ -398,8 +398,12 @@ void __bch2_bkey_compat(unsigned level, enum btree_id btree_id,
for (i = 0; i < nr_compat; i++)
switch (!write ? i : nr_compat - 1 - i) {
case 0:
- if (big_endian != CPU_BIG_ENDIAN)
+ if (big_endian != CPU_BIG_ENDIAN) {
+ bch2_bkey_swab_key(f, k);
+ } else if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG)) {
bch2_bkey_swab_key(f, k);
+ bch2_bkey_swab_key(f, k);
+ }
break;
case 1:
if (version < bcachefs_metadata_version_bkey_renumber)
diff --git a/fs/bcachefs/bkey_methods.h b/fs/bcachefs/bkey_methods.h
index 726ef7483763..baef0722f5fb 100644
--- a/fs/bcachefs/bkey_methods.h
+++ b/fs/bcachefs/bkey_methods.h
@@ -129,7 +129,8 @@ static inline void bch2_bkey_compat(unsigned level, enum btree_id btree_id,
struct bkey_packed *k)
{
if (version < bcachefs_metadata_version_current ||
- big_endian != CPU_BIG_ENDIAN)
+ big_endian != CPU_BIG_ENDIAN ||
+ IS_ENABLED(CONFIG_BCACHEFS_DEBUG))
__bch2_bkey_compat(level, btree_id, version,
big_endian, write, f, k);
diff --git a/fs/bcachefs/btree_cache.c b/fs/bcachefs/btree_cache.c
index 9e4ed75d3675..4f5e411771ba 100644
--- a/fs/bcachefs/btree_cache.c
+++ b/fs/bcachefs/btree_cache.c
@@ -91,10 +91,11 @@ static int bch2_btree_cache_cmp_fn(struct rhashtable_compare_arg *arg,
}
static const struct rhashtable_params bch_btree_cache_params = {
- .head_offset = offsetof(struct btree, hash),
- .key_offset = offsetof(struct btree, hash_val),
- .key_len = sizeof(u64),
- .obj_cmpfn = bch2_btree_cache_cmp_fn,
+ .head_offset = offsetof(struct btree, hash),
+ .key_offset = offsetof(struct btree, hash_val),
+ .key_len = sizeof(u64),
+ .obj_cmpfn = bch2_btree_cache_cmp_fn,
+ .automatic_shrinking = true,
};
static int btree_node_data_alloc(struct bch_fs *c, struct btree *b, gfp_t gfp)
diff --git a/fs/bcachefs/btree_gc.c b/fs/bcachefs/btree_gc.c
index 8035c8b797ab..a0deb8266011 100644
--- a/fs/bcachefs/btree_gc.c
+++ b/fs/bcachefs/btree_gc.c
@@ -585,16 +585,17 @@ static int bch2_gc_mark_key(struct btree_trans *trans, enum btree_id btree_id,
if (fsck_err_on(k.k->version.lo > atomic64_read(&c->key_version), c,
bkey_version_in_future,
- "key version number higher than recorded: %llu > %llu",
- k.k->version.lo,
- atomic64_read(&c->key_version)))
+ "key version number higher than recorded %llu\n %s",
+ atomic64_read(&c->key_version),
+ (bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
atomic64_set(&c->key_version, k.k->version.lo);
}
if (mustfix_fsck_err_on(level && !bch2_dev_btree_bitmap_marked(c, k),
c, btree_bitmap_not_marked,
"btree ptr not marked in member info btree allocated bitmap\n %s",
- (bch2_bkey_val_to_text(&buf, c, k),
+ (printbuf_reset(&buf),
+ bch2_bkey_val_to_text(&buf, c, k),
buf.buf))) {
mutex_lock(&c->sb_lock);
bch2_dev_btree_bitmap_mark(c, k);
@@ -640,16 +641,30 @@ static int bch2_gc_btree(struct btree_trans *trans, enum btree_id btree, bool in
target_depth = 0;
/* root */
- mutex_lock(&c->btree_root_lock);
- struct btree *b = bch2_btree_id_root(c, btree)->b;
- if (!btree_node_fake(b)) {
+ do {
+retry_root:
+ bch2_trans_begin(trans);
+
+ struct btree_iter iter;
+ bch2_trans_node_iter_init(trans, &iter, btree, POS_MIN,
+ 0, bch2_btree_id_root(c, btree)->b->c.level, 0);
+ struct btree *b = bch2_btree_iter_peek_node(&iter);
+ ret = PTR_ERR_OR_ZERO(b);
+ if (ret)
+ goto err_root;
+
+ if (b != btree_node_root(c, b)) {
+ bch2_trans_iter_exit(trans, &iter);
+ goto retry_root;
+ }
+
gc_pos_set(c, gc_pos_btree(btree, b->c.level + 1, SPOS_MAX));
- ret = lockrestart_do(trans,
- bch2_gc_mark_key(trans, b->c.btree_id, b->c.level + 1,
- NULL, NULL, bkey_i_to_s_c(&b->key), initial));
+ struct bkey_s_c k = bkey_i_to_s_c(&b->key);
+ ret = bch2_gc_mark_key(trans, btree, b->c.level + 1, NULL, NULL, k, initial);
level = b->c.level;
- }
- mutex_unlock(&c->btree_root_lock);
+err_root:
+ bch2_trans_iter_exit(trans, &iter);
+ } while (bch2_err_matches(ret, BCH_ERR_transaction_restart));
if (ret)
return ret;
@@ -673,8 +688,7 @@ static int bch2_gc_btree(struct btree_trans *trans, enum btree_id btree, bool in
static inline int btree_id_gc_phase_cmp(enum btree_id l, enum btree_id r)
{
- return (int) btree_id_to_gc_phase(l) -
- (int) btree_id_to_gc_phase(r);
+ return cmp_int(gc_btree_order(l), gc_btree_order(r));
}
static int bch2_gc_btrees(struct bch_fs *c)
@@ -711,7 +725,7 @@ fsck_err:
static int bch2_mark_superblocks(struct bch_fs *c)
{
mutex_lock(&c->sb_lock);
- gc_pos_set(c, gc_phase(GC_PHASE_SB));
+ gc_pos_set(c, gc_phase(GC_PHASE_sb));
int ret = bch2_trans_mark_dev_sbs_flags(c, BTREE_TRIGGER_gc);
mutex_unlock(&c->sb_lock);
@@ -874,6 +888,9 @@ static int bch2_alloc_write_key(struct btree_trans *trans,
const struct bch_alloc_v4 *old;
int ret;
+ if (!bucket_valid(ca, k.k->p.offset))
+ return 0;
+
old = bch2_alloc_to_v4(k, &old_convert);
gc = new = *old;
@@ -900,6 +917,8 @@ static int bch2_alloc_write_key(struct btree_trans *trans,
bch2_dev_usage_update(c, ca, &old_gc, &gc, 0, true);
percpu_up_read(&c->mark_lock);
+ gc.fragmentation_lru = alloc_lru_idx_fragmentation(gc, ca);
+
if (fsck_err_on(new.data_type != gc.data_type, c,
alloc_key_data_type_wrong,
"bucket %llu:%llu gen %u has wrong data_type"
@@ -913,23 +932,19 @@ static int bch2_alloc_write_key(struct btree_trans *trans,
#define copy_bucket_field(_errtype, _f) \
if (fsck_err_on(new._f != gc._f, c, _errtype, \
"bucket %llu:%llu gen %u data type %s has wrong " #_f \
- ": got %u, should be %u", \
+ ": got %llu, should be %llu", \
iter->pos.inode, iter->pos.offset, \
gc.gen, \
bch2_data_type_str(gc.data_type), \
- new._f, gc._f)) \
+ (u64) new._f, (u64) gc._f)) \
new._f = gc._f; \
- copy_bucket_field(alloc_key_gen_wrong,
- gen);
- copy_bucket_field(alloc_key_dirty_sectors_wrong,
- dirty_sectors);
- copy_bucket_field(alloc_key_cached_sectors_wrong,
- cached_sectors);
- copy_bucket_field(alloc_key_stripe_wrong,
- stripe);
- copy_bucket_field(alloc_key_stripe_redundancy_wrong,
- stripe_redundancy);
+ copy_bucket_field(alloc_key_gen_wrong, gen);
+ copy_bucket_field(alloc_key_dirty_sectors_wrong, dirty_sectors);
+ copy_bucket_field(alloc_key_cached_sectors_wrong, cached_sectors);
+ copy_bucket_field(alloc_key_stripe_wrong, stripe);
+ copy_bucket_field(alloc_key_stripe_redundancy_wrong, stripe_redundancy);
+ copy_bucket_field(alloc_key_fragmentation_lru_wrong, fragmentation_lru);
#undef copy_bucket_field
if (!bch2_alloc_v4_cmp(*old, new))
@@ -943,7 +958,7 @@ static int bch2_alloc_write_key(struct btree_trans *trans,
a->v = new;
/*
- * The trigger normally makes sure this is set, but we're not running
+ * The trigger normally makes sure these are set, but we're not running
* triggers:
*/
if (a->v.data_type == BCH_DATA_cached && !a->v.io_time[READ])
@@ -990,6 +1005,8 @@ static int bch2_gc_alloc_start(struct bch_fs *c)
buckets->first_bucket = ca->mi.first_bucket;
buckets->nbuckets = ca->mi.nbuckets;
+ buckets->nbuckets_minus_first =
+ buckets->nbuckets - buckets->first_bucket;
rcu_assign_pointer(ca->buckets_gc, buckets);
}
@@ -1003,12 +1020,14 @@ static int bch2_gc_alloc_start(struct bch_fs *c)
continue;
}
- struct bch_alloc_v4 a_convert;
- const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &a_convert);
+ if (bucket_valid(ca, k.k->p.offset)) {
+ struct bch_alloc_v4 a_convert;
+ const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &a_convert);
- struct bucket *g = gc_bucket(ca, k.k->p.offset);
- g->gen_valid = 1;
- g->gen = a->gen;
+ struct bucket *g = gc_bucket(ca, k.k->p.offset);
+ g->gen_valid = 1;
+ g->gen = a->gen;
+ }
0;
})));
bch2_dev_put(ca);
@@ -1209,7 +1228,7 @@ int bch2_check_allocations(struct bch_fs *c)
if (ret)
goto out;
- gc_pos_set(c, gc_phase(GC_PHASE_START));
+ gc_pos_set(c, gc_phase(GC_PHASE_start));
ret = bch2_mark_superblocks(c);
BUG_ON(ret);
@@ -1231,7 +1250,7 @@ out:
percpu_down_write(&c->mark_lock);
/* Indicates that gc is no longer in progress: */
- __gc_pos_set(c, gc_phase(GC_PHASE_NOT_RUNNING));
+ __gc_pos_set(c, gc_phase(GC_PHASE_not_running));
bch2_gc_free(c);
percpu_up_write(&c->mark_lock);
diff --git a/fs/bcachefs/btree_gc.h b/fs/bcachefs/btree_gc.h
index 1b6489d8e0f4..876d81e2017d 100644
--- a/fs/bcachefs/btree_gc.h
+++ b/fs/bcachefs/btree_gc.h
@@ -3,6 +3,7 @@
#define _BCACHEFS_BTREE_GC_H
#include "bkey.h"
+#include "btree_gc_types.h"
#include "btree_types.h"
int bch2_check_topology(struct bch_fs *);
@@ -32,36 +33,15 @@ int bch2_check_allocations(struct bch_fs *);
/* Position of (the start of) a gc phase: */
static inline struct gc_pos gc_phase(enum gc_phase phase)
{
- return (struct gc_pos) {
- .phase = phase,
- .level = 0,
- .pos = POS_MIN,
- };
-}
-
-static inline int gc_pos_cmp(struct gc_pos l, struct gc_pos r)
-{
- return cmp_int(l.phase, r.phase) ?:
- -cmp_int(l.level, r.level) ?:
- bpos_cmp(l.pos, r.pos);
-}
-
-static inline enum gc_phase btree_id_to_gc_phase(enum btree_id id)
-{
- switch (id) {
-#define x(name, v, ...) case BTREE_ID_##name: return GC_PHASE_BTREE_##name;
- BCH_BTREE_IDS()
-#undef x
- default:
- BUG();
- }
+ return (struct gc_pos) { .phase = phase, };
}
static inline struct gc_pos gc_pos_btree(enum btree_id btree, unsigned level,
struct bpos pos)
{
return (struct gc_pos) {
- .phase = btree_id_to_gc_phase(btree),
+ .phase = GC_PHASE_btree,
+ .btree = btree,
.level = level,
.pos = pos,
};
@@ -76,6 +56,22 @@ static inline struct gc_pos gc_pos_btree_node(struct btree *b)
return gc_pos_btree(b->c.btree_id, b->c.level, b->key.k.p);
}
+static inline int gc_btree_order(enum btree_id btree)
+{
+ if (btree == BTREE_ID_stripes)
+ return -1;
+ return btree;
+}
+
+static inline int gc_pos_cmp(struct gc_pos l, struct gc_pos r)
+{
+ return cmp_int(l.phase, r.phase) ?:
+ cmp_int(gc_btree_order(l.btree),
+ gc_btree_order(r.btree)) ?:
+ -cmp_int(l.level, r.level) ?:
+ bpos_cmp(l.pos, r.pos);
+}
+
static inline bool gc_visited(struct bch_fs *c, struct gc_pos pos)
{
unsigned seq;
diff --git a/fs/bcachefs/btree_gc_types.h b/fs/bcachefs/btree_gc_types.h
new file mode 100644
index 000000000000..b82c24bcc088
--- /dev/null
+++ b/fs/bcachefs/btree_gc_types.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_BTREE_GC_TYPES_H
+#define _BCACHEFS_BTREE_GC_TYPES_H
+
+#include <linux/generic-radix-tree.h>
+
+enum gc_phase {
+ GC_PHASE_not_running,
+ GC_PHASE_start,
+ GC_PHASE_sb,
+ GC_PHASE_btree,
+};
+
+struct gc_pos {
+ enum gc_phase phase:8;
+ enum btree_id btree:8;
+ u16 level;
+ struct bpos pos;
+};
+
+struct reflink_gc {
+ u64 offset;
+ u32 size;
+ u32 refcount;
+};
+
+typedef GENRADIX(struct reflink_gc) reflink_gc_table;
+
+#endif /* _BCACHEFS_BTREE_GC_TYPES_H */
diff --git a/fs/bcachefs/btree_io.c b/fs/bcachefs/btree_io.c
index cbf8f5d90602..7bca15c604f5 100644
--- a/fs/bcachefs/btree_io.c
+++ b/fs/bcachefs/btree_io.c
@@ -519,7 +519,7 @@ void bch2_btree_init_next(struct btree_trans *trans, struct btree *b)
static void btree_err_msg(struct printbuf *out, struct bch_fs *c,
struct bch_dev *ca,
- struct btree *b, struct bset *i,
+ struct btree *b, struct bset *i, struct bkey_packed *k,
unsigned offset, int write)
{
prt_printf(out, bch2_log_msg(c, "%s"),
@@ -537,15 +537,20 @@ static void btree_err_msg(struct printbuf *out, struct bch_fs *c,
b->written, btree_ptr_sectors_written(&b->key));
if (i)
prt_printf(out, " bset u64s %u", le16_to_cpu(i->u64s));
+ if (k)
+ prt_printf(out, " bset byte offset %lu",
+ (unsigned long)(void *)k -
+ ((unsigned long)(void *)i & ~511UL));
prt_str(out, ": ");
}
-__printf(9, 10)
+__printf(10, 11)
static int __btree_err(int ret,
struct bch_fs *c,
struct bch_dev *ca,
struct btree *b,
struct bset *i,
+ struct bkey_packed *k,
int write,
bool have_retry,
enum bch_sb_error_id err_type,
@@ -555,7 +560,7 @@ static int __btree_err(int ret,
bool silent = c->curr_recovery_pass == BCH_RECOVERY_PASS_scan_for_btree_nodes;
va_list args;
- btree_err_msg(&out, c, ca, b, i, b->written, write);
+ btree_err_msg(&out, c, ca, b, i, k, b->written, write);
va_start(args, fmt);
prt_vprintf(&out, fmt, args);
@@ -611,9 +616,9 @@ fsck_err:
return ret;
}
-#define btree_err(type, c, ca, b, i, _err_type, msg, ...) \
+#define btree_err(type, c, ca, b, i, k, _err_type, msg, ...) \
({ \
- int _ret = __btree_err(type, c, ca, b, i, write, have_retry, \
+ int _ret = __btree_err(type, c, ca, b, i, k, write, have_retry, \
BCH_FSCK_ERR_##_err_type, \
msg, ##__VA_ARGS__); \
\
@@ -690,7 +695,7 @@ static int validate_bset(struct bch_fs *c, struct bch_dev *ca,
btree_err_on(!bch2_version_compatible(version),
-BCH_ERR_btree_node_read_err_incompatible,
- c, ca, b, i,
+ c, ca, b, i, NULL,
btree_node_unsupported_version,
"unsupported bset version %u.%u",
BCH_VERSION_MAJOR(version),
@@ -698,7 +703,7 @@ static int validate_bset(struct bch_fs *c, struct bch_dev *ca,
if (btree_err_on(version < c->sb.version_min,
-BCH_ERR_btree_node_read_err_fixable,
- c, NULL, b, i,
+ c, NULL, b, i, NULL,
btree_node_bset_older_than_sb_min,
"bset version %u older than superblock version_min %u",
version, c->sb.version_min)) {
@@ -711,7 +716,7 @@ static int validate_bset(struct bch_fs *c, struct bch_dev *ca,
if (btree_err_on(BCH_VERSION_MAJOR(version) >
BCH_VERSION_MAJOR(c->sb.version),
-BCH_ERR_btree_node_read_err_fixable,
- c, NULL, b, i,
+ c, NULL, b, i, NULL,
btree_node_bset_newer_than_sb,
"bset version %u newer than superblock version %u",
version, c->sb.version)) {
@@ -723,13 +728,13 @@ static int validate_bset(struct bch_fs *c, struct bch_dev *ca,
btree_err_on(BSET_SEPARATE_WHITEOUTS(i),
-BCH_ERR_btree_node_read_err_incompatible,
- c, ca, b, i,
+ c, ca, b, i, NULL,
btree_node_unsupported_version,
"BSET_SEPARATE_WHITEOUTS no longer supported");
if (btree_err_on(offset + sectors > btree_sectors(c),
-BCH_ERR_btree_node_read_err_fixable,
- c, ca, b, i,
+ c, ca, b, i, NULL,
bset_past_end_of_btree_node,
"bset past end of btree node")) {
i->u64s = 0;
@@ -739,13 +744,13 @@ static int validate_bset(struct bch_fs *c, struct bch_dev *ca,
btree_err_on(offset && !i->u64s,
-BCH_ERR_btree_node_read_err_fixable,
- c, ca, b, i,
+ c, ca, b, i, NULL,
bset_empty,
"empty bset");
btree_err_on(BSET_OFFSET(i) && BSET_OFFSET(i) != offset,
-BCH_ERR_btree_node_read_err_want_retry,
- c, ca, b, i,
+ c, ca, b, i, NULL,
bset_wrong_sector_offset,
"bset at wrong sector offset");
@@ -761,20 +766,20 @@ static int validate_bset(struct bch_fs *c, struct bch_dev *ca,
/* XXX endianness */
btree_err_on(bp->seq != bn->keys.seq,
-BCH_ERR_btree_node_read_err_must_retry,
- c, ca, b, NULL,
+ c, ca, b, NULL, NULL,
bset_bad_seq,
"incorrect sequence number (wrong btree node)");
}
btree_err_on(BTREE_NODE_ID(bn) != b->c.btree_id,
-BCH_ERR_btree_node_read_err_must_retry,
- c, ca, b, i,
+ c, ca, b, i, NULL,
btree_node_bad_btree,
"incorrect btree id");
btree_err_on(BTREE_NODE_LEVEL(bn) != b->c.level,
-BCH_ERR_btree_node_read_err_must_retry,
- c, ca, b, i,
+ c, ca, b, i, NULL,
btree_node_bad_level,
"incorrect level");
@@ -793,7 +798,7 @@ static int validate_bset(struct bch_fs *c, struct bch_dev *ca,
btree_err_on(!bpos_eq(b->data->min_key, bp->min_key),
-BCH_ERR_btree_node_read_err_must_retry,
- c, ca, b, NULL,
+ c, ca, b, NULL, NULL,
btree_node_bad_min_key,
"incorrect min_key: got %s should be %s",
(printbuf_reset(&buf1),
@@ -804,7 +809,7 @@ static int validate_bset(struct bch_fs *c, struct bch_dev *ca,
btree_err_on(!bpos_eq(bn->max_key, b->key.k.p),
-BCH_ERR_btree_node_read_err_must_retry,
- c, ca, b, i,
+ c, ca, b, i, NULL,
btree_node_bad_max_key,
"incorrect max key %s",
(printbuf_reset(&buf1),
@@ -816,7 +821,7 @@ static int validate_bset(struct bch_fs *c, struct bch_dev *ca,
btree_err_on(bch2_bkey_format_invalid(c, &bn->format, write, &buf1),
-BCH_ERR_btree_node_read_err_bad_node,
- c, ca, b, i,
+ c, ca, b, i, NULL,
btree_node_bad_format,
"invalid bkey format: %s\n %s", buf1.buf,
(printbuf_reset(&buf2),
@@ -883,7 +888,7 @@ static int validate_bset_keys(struct bch_fs *c, struct btree *b,
if (btree_err_on(bkey_p_next(k) > vstruct_last(i),
-BCH_ERR_btree_node_read_err_fixable,
- c, NULL, b, i,
+ c, NULL, b, i, k,
btree_node_bkey_past_bset_end,
"key extends past end of bset")) {
i->u64s = cpu_to_le16((u64 *) k - i->_data);
@@ -892,14 +897,14 @@ static int validate_bset_keys(struct bch_fs *c, struct btree *b,
if (btree_err_on(k->format > KEY_FORMAT_CURRENT,
-BCH_ERR_btree_node_read_err_fixable,
- c, NULL, b, i,
+ c, NULL, b, i, k,
btree_node_bkey_bad_format,
"invalid bkey format %u", k->format))
goto drop_this_key;
if (btree_err_on(!bkeyp_u64s_valid(&b->format, k),
-BCH_ERR_btree_node_read_err_fixable,
- c, NULL, b, i,
+ c, NULL, b, i, k,
btree_node_bkey_bad_u64s,
"bad k->u64s %u (min %u max %zu)", k->u64s,
bkeyp_key_u64s(&b->format, k),
@@ -921,7 +926,7 @@ static int validate_bset_keys(struct bch_fs *c, struct btree *b,
bch2_bkey_val_to_text(&buf, c, u.s_c);
btree_err(-BCH_ERR_btree_node_read_err_fixable,
- c, NULL, b, i,
+ c, NULL, b, i, k,
btree_node_bad_bkey,
"invalid bkey: %s", buf.buf);
goto drop_this_key;
@@ -942,7 +947,7 @@ static int validate_bset_keys(struct bch_fs *c, struct btree *b,
bch2_bkey_to_text(&buf, u.k);
if (btree_err(-BCH_ERR_btree_node_read_err_fixable,
- c, NULL, b, i,
+ c, NULL, b, i, k,
btree_node_bkey_out_of_order,
"%s", buf.buf))
goto drop_this_key;
@@ -1011,13 +1016,13 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
if (bch2_meta_read_fault("btree"))
btree_err(-BCH_ERR_btree_node_read_err_must_retry,
- c, ca, b, NULL,
+ c, ca, b, NULL, NULL,
btree_node_fault_injected,
"dynamic fault");
btree_err_on(le64_to_cpu(b->data->magic) != bset_magic(c),
-BCH_ERR_btree_node_read_err_must_retry,
- c, ca, b, NULL,
+ c, ca, b, NULL, NULL,
btree_node_bad_magic,
"bad magic: want %llx, got %llx",
bset_magic(c), le64_to_cpu(b->data->magic));
@@ -1032,7 +1037,7 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
btree_err_on(b->data->keys.seq != bp->seq,
-BCH_ERR_btree_node_read_err_must_retry,
- c, ca, b, NULL,
+ c, ca, b, NULL, NULL,
btree_node_bad_seq,
"got wrong btree node: got\n%s",
(printbuf_reset(&buf),
@@ -1041,7 +1046,7 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
} else {
btree_err_on(!b->data->keys.seq,
-BCH_ERR_btree_node_read_err_must_retry,
- c, ca, b, NULL,
+ c, ca, b, NULL, NULL,
btree_node_bad_seq,
"bad btree header: seq 0\n%s",
(printbuf_reset(&buf),
@@ -1060,7 +1065,7 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
btree_err_on(!bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i)),
-BCH_ERR_btree_node_read_err_want_retry,
- c, ca, b, i,
+ c, ca, b, i, NULL,
bset_unknown_csum,
"unknown checksum type %llu", BSET_CSUM_TYPE(i));
@@ -1073,7 +1078,7 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
btree_err_on(csum_bad,
-BCH_ERR_btree_node_read_err_want_retry,
- c, ca, b, i,
+ c, ca, b, i, NULL,
bset_bad_csum,
"%s",
(printbuf_reset(&buf),
@@ -1088,7 +1093,7 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
btree_err_on(btree_node_type_is_extents(btree_node_type(b)) &&
!BTREE_NODE_NEW_EXTENT_OVERWRITE(b->data),
-BCH_ERR_btree_node_read_err_incompatible,
- c, NULL, b, NULL,
+ c, NULL, b, NULL, NULL,
btree_node_unsupported_version,
"btree node does not have NEW_EXTENT_OVERWRITE set");
@@ -1102,7 +1107,7 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
btree_err_on(!bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i)),
-BCH_ERR_btree_node_read_err_want_retry,
- c, ca, b, i,
+ c, ca, b, i, NULL,
bset_unknown_csum,
"unknown checksum type %llu", BSET_CSUM_TYPE(i));
@@ -1114,7 +1119,7 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
btree_err_on(csum_bad,
-BCH_ERR_btree_node_read_err_want_retry,
- c, ca, b, i,
+ c, ca, b, i, NULL,
bset_bad_csum,
"%s",
(printbuf_reset(&buf),
@@ -1152,14 +1157,14 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
btree_err_on(blacklisted && first,
-BCH_ERR_btree_node_read_err_fixable,
- c, ca, b, i,
+ c, ca, b, i, NULL,
bset_blacklisted_journal_seq,
"first btree node bset has blacklisted journal seq (%llu)",
le64_to_cpu(i->journal_seq));
btree_err_on(blacklisted && ptr_written,
-BCH_ERR_btree_node_read_err_fixable,
- c, ca, b, i,
+ c, ca, b, i, NULL,
first_bset_blacklisted_journal_seq,
"found blacklisted bset (journal seq %llu) in btree node at offset %u-%u/%u",
le64_to_cpu(i->journal_seq),
@@ -1178,7 +1183,7 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
if (ptr_written) {
btree_err_on(b->written < ptr_written,
-BCH_ERR_btree_node_read_err_want_retry,
- c, ca, b, NULL,
+ c, ca, b, NULL, NULL,
btree_node_data_missing,
"btree node data missing: expected %u sectors, found %u",
ptr_written, b->written);
@@ -1191,7 +1196,7 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
le64_to_cpu(bne->keys.journal_seq),
true),
-BCH_ERR_btree_node_read_err_want_retry,
- c, ca, b, NULL,
+ c, ca, b, NULL, NULL,
btree_node_bset_after_end,
"found bset signature after last bset");
}
@@ -1235,7 +1240,7 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
bch2_bkey_val_to_text(&buf, c, u.s_c);
btree_err(-BCH_ERR_btree_node_read_err_fixable,
- c, NULL, b, i,
+ c, NULL, b, i, k,
btree_node_bad_bkey,
"%s", buf.buf);
@@ -1384,7 +1389,7 @@ static void btree_node_read_endio(struct bio *bio)
bch2_latency_acct(ca, rb->start_time, READ);
}
- queue_work(c->io_complete_wq, &rb->work);
+ queue_work(c->btree_read_complete_wq, &rb->work);
}
struct btree_node_read_all {
@@ -1471,18 +1476,18 @@ static CLOSURE_CALLBACK(btree_node_read_all_replicas_done)
written2 = btree_node_sectors_written(c, ra->buf[i]);
if (btree_err_on(written2 != written, -BCH_ERR_btree_node_read_err_fixable,
- c, NULL, b, NULL,
+ c, NULL, b, NULL, NULL,
btree_node_replicas_sectors_written_mismatch,
"btree node sectors written mismatch: %u != %u",
written, written2) ||
btree_err_on(btree_node_has_extra_bsets(c, written2, ra->buf[i]),
-BCH_ERR_btree_node_read_err_fixable,
- c, NULL, b, NULL,
+ c, NULL, b, NULL, NULL,
btree_node_bset_after_end,
"found bset signature after last bset") ||
btree_err_on(memcmp(ra->buf[best], ra->buf[i], written << 9),
-BCH_ERR_btree_node_read_err_fixable,
- c, NULL, b, NULL,
+ c, NULL, b, NULL, NULL,
btree_node_replicas_data_mismatch,
"btree node replicas content mismatch"))
dump_bset_maps = true;
@@ -1651,7 +1656,7 @@ static int btree_node_read_all_replicas(struct bch_fs *c, struct btree *b, bool
btree_node_read_all_replicas_done(&ra->cl.work);
} else {
continue_at(&ra->cl, btree_node_read_all_replicas_done,
- c->io_complete_wq);
+ c->btree_read_complete_wq);
}
return 0;
@@ -1732,7 +1737,7 @@ void bch2_btree_node_read(struct btree_trans *trans, struct btree *b,
if (sync)
btree_node_read_work(&rb->work);
else
- queue_work(c->io_complete_wq, &rb->work);
+ queue_work(c->btree_read_complete_wq, &rb->work);
}
}
@@ -2224,7 +2229,7 @@ do_write:
atomic64_add(bytes_to_write, &c->btree_write_stats[type].bytes);
INIT_WORK(&wbio->work, btree_write_submit);
- queue_work(c->io_complete_wq, &wbio->work);
+ queue_work(c->btree_write_submit_wq, &wbio->work);
return;
err:
set_btree_node_noevict(b);
diff --git a/fs/bcachefs/btree_iter.c b/fs/bcachefs/btree_iter.c
index d3bcb4e4e230..19352a08ea20 100644
--- a/fs/bcachefs/btree_iter.c
+++ b/fs/bcachefs/btree_iter.c
@@ -221,11 +221,8 @@ static void bch2_btree_path_verify(struct btree_trans *trans,
struct btree_path *path)
{
struct bch_fs *c = trans->c;
- unsigned i;
-
- EBUG_ON(path->btree_id >= BTREE_ID_NR);
- for (i = 0; i < (!path->cached ? BTREE_MAX_DEPTH : 1); i++) {
+ for (unsigned i = 0; i < (!path->cached ? BTREE_MAX_DEPTH : 1); i++) {
if (!path->l[i].b) {
BUG_ON(!path->cached &&
bch2_btree_id_root(c, path->btree_id)->b->c.level > i);
@@ -251,8 +248,6 @@ static void bch2_btree_iter_verify(struct btree_iter *iter)
{
struct btree_trans *trans = iter->trans;
- BUG_ON(iter->btree_id >= BTREE_ID_NR);
-
BUG_ON(!!(iter->flags & BTREE_ITER_cached) != btree_iter_path(trans, iter)->cached);
BUG_ON((iter->flags & BTREE_ITER_is_extents) &&
@@ -1001,7 +996,7 @@ retry_all:
bch2_trans_unlock(trans);
cond_resched();
- trans->locked = true;
+ trans_set_locked(trans);
if (unlikely(trans->memory_allocation_failure)) {
struct closure cl;
@@ -3094,7 +3089,8 @@ u32 bch2_trans_begin(struct btree_trans *trans)
bch2_trans_srcu_unlock(trans);
trans->last_begin_ip = _RET_IP_;
- trans->locked = true;
+
+ trans_set_locked(trans);
if (trans->restarted) {
bch2_btree_path_traverse_all(trans);
@@ -3135,7 +3131,6 @@ struct btree_trans *__bch2_trans_get(struct bch_fs *c, unsigned fn_idx)
trans = mempool_alloc(&c->btree_trans_pool, GFP_NOFS);
memset(trans, 0, sizeof(*trans));
- closure_init_stack(&trans->ref);
seqmutex_lock(&c->btree_trans_lock);
if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG)) {
@@ -3155,22 +3150,16 @@ struct btree_trans *__bch2_trans_get(struct bch_fs *c, unsigned fn_idx)
BUG_ON(pos_task &&
pid == pos_task->pid &&
pos->locked);
-
- if (pos_task && pid < pos_task->pid) {
- list_add_tail(&trans->list, &pos->list);
- goto list_add_done;
- }
}
}
- list_add_tail(&trans->list, &c->btree_trans_list);
-list_add_done:
+
+ list_add(&trans->list, &c->btree_trans_list);
seqmutex_unlock(&c->btree_trans_lock);
got_trans:
trans->c = c;
trans->last_begin_time = local_clock();
trans->fn_idx = fn_idx;
trans->locking_wait.task = current;
- trans->locked = true;
trans->journal_replay_not_finished =
unlikely(!test_bit(JOURNAL_replay_done, &c->journal.flags)) &&
atomic_inc_not_zero(&c->journal_keys.ref);
@@ -3204,6 +3193,9 @@ got_trans:
trans->srcu_idx = srcu_read_lock(&c->btree_trans_barrier);
trans->srcu_lock_time = jiffies;
trans->srcu_held = true;
+ trans_set_locked(trans);
+
+ closure_init_stack_release(&trans->ref);
return trans;
}
@@ -3240,7 +3232,6 @@ void bch2_trans_put(struct btree_trans *trans)
trans_for_each_update(trans, i)
__btree_path_put(trans->paths + i->path, true);
trans->nr_updates = 0;
- trans->locking_wait.task = NULL;
check_btree_paths_leaked(trans);
@@ -3261,6 +3252,13 @@ void bch2_trans_put(struct btree_trans *trans)
if (unlikely(trans->journal_replay_not_finished))
bch2_journal_keys_put(c);
+ /*
+ * trans->ref protects trans->locking_wait.task, btree_paths array; used
+ * by cycle detector
+ */
+ closure_return_sync(&trans->ref);
+ trans->locking_wait.task = NULL;
+
unsigned long *paths_allocated = trans->paths_allocated;
trans->paths_allocated = NULL;
trans->paths = NULL;
@@ -3278,8 +3276,6 @@ void bch2_trans_put(struct btree_trans *trans)
trans = this_cpu_xchg(c->btree_trans_bufs->trans, trans);
if (trans) {
- closure_sync(&trans->ref);
-
seqmutex_lock(&c->btree_trans_lock);
list_del(&trans->list);
seqmutex_unlock(&c->btree_trans_lock);
@@ -3385,8 +3381,6 @@ void bch2_fs_btree_iter_exit(struct bch_fs *c)
per_cpu_ptr(c->btree_trans_bufs, cpu)->trans;
if (trans) {
- closure_sync(&trans->ref);
-
seqmutex_lock(&c->btree_trans_lock);
list_del(&trans->list);
seqmutex_unlock(&c->btree_trans_lock);
@@ -3406,8 +3400,10 @@ void bch2_fs_btree_iter_exit(struct bch_fs *c)
bch2_time_stats_exit(&s->lock_hold_times);
}
- if (c->btree_trans_barrier_initialized)
+ if (c->btree_trans_barrier_initialized) {
+ synchronize_srcu_expedited(&c->btree_trans_barrier);
cleanup_srcu_struct(&c->btree_trans_barrier);
+ }
mempool_exit(&c->btree_trans_mem_pool);
mempool_exit(&c->btree_trans_pool);
}
diff --git a/fs/bcachefs/btree_key_cache.c b/fs/bcachefs/btree_key_cache.c
index 75f5e6fe4634..2d3c0d45c37f 100644
--- a/fs/bcachefs/btree_key_cache.c
+++ b/fs/bcachefs/btree_key_cache.c
@@ -32,10 +32,11 @@ static int bch2_btree_key_cache_cmp_fn(struct rhashtable_compare_arg *arg,
}
static const struct rhashtable_params bch2_btree_key_cache_params = {
- .head_offset = offsetof(struct bkey_cached, hash),
- .key_offset = offsetof(struct bkey_cached, key),
- .key_len = sizeof(struct bkey_cached_key),
- .obj_cmpfn = bch2_btree_key_cache_cmp_fn,
+ .head_offset = offsetof(struct bkey_cached, hash),
+ .key_offset = offsetof(struct bkey_cached, key),
+ .key_len = sizeof(struct bkey_cached_key),
+ .obj_cmpfn = bch2_btree_key_cache_cmp_fn,
+ .automatic_shrinking = true,
};
__flatten
@@ -424,16 +425,16 @@ static int btree_key_cache_fill(struct btree_trans *trans,
goto err;
}
- if (!bch2_btree_node_relock(trans, ck_path, 0)) {
+ ret = bch2_trans_relock(trans);
+ if (ret) {
kfree(new_k);
- trace_and_count(trans->c, trans_restart_relock_key_cache_fill, trans, _THIS_IP_, ck_path);
- ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_key_cache_fill);
goto err;
}
- ret = bch2_trans_relock(trans);
- if (ret) {
+ if (!bch2_btree_node_relock(trans, ck_path, 0)) {
kfree(new_k);
+ trace_and_count(trans->c, trans_restart_relock_key_cache_fill, trans, _THIS_IP_, ck_path);
+ ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_key_cache_fill);
goto err;
}
}
@@ -840,7 +841,6 @@ static unsigned long bch2_btree_key_cache_scan(struct shrinker *shrink,
six_lock_exit(&ck->c.lock);
kmem_cache_free(bch2_key_cache, ck);
atomic_long_dec(&bc->nr_freed);
- freed++;
bc->nr_freed_nonpcpu--;
bc->freed++;
}
@@ -854,7 +854,6 @@ static unsigned long bch2_btree_key_cache_scan(struct shrinker *shrink,
six_lock_exit(&ck->c.lock);
kmem_cache_free(bch2_key_cache, ck);
atomic_long_dec(&bc->nr_freed);
- freed++;
bc->nr_freed_pcpu--;
bc->freed++;
}
@@ -876,23 +875,22 @@ static unsigned long bch2_btree_key_cache_scan(struct shrinker *shrink,
if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
bc->skipped_dirty++;
- goto next;
} else if (test_bit(BKEY_CACHED_ACCESSED, &ck->flags)) {
clear_bit(BKEY_CACHED_ACCESSED, &ck->flags);
bc->skipped_accessed++;
- goto next;
- } else if (bkey_cached_lock_for_evict(ck)) {
+ } else if (!bkey_cached_lock_for_evict(ck)) {
+ bc->skipped_lock_fail++;
+ } else {
bkey_cached_evict(bc, ck);
bkey_cached_free(bc, ck);
bc->moved_to_freelist++;
- } else {
- bc->skipped_lock_fail++;
+ freed++;
}
scanned++;
if (scanned >= nr)
break;
-next:
+
pos = next;
}
@@ -917,6 +915,14 @@ static unsigned long bch2_btree_key_cache_count(struct shrinker *shrink,
long nr = atomic_long_read(&bc->nr_keys) -
atomic_long_read(&bc->nr_dirty);
+ /*
+ * Avoid hammering our shrinker too much if it's nearly empty - the
+ * shrinker code doesn't take into account how big our cache is, if it's
+ * mostly empty but the system is under memory pressure it causes nasty
+ * lock contention:
+ */
+ nr -= 128;
+
return max(0L, nr);
}
@@ -1025,9 +1031,10 @@ int bch2_fs_btree_key_cache_init(struct btree_key_cache *bc)
if (!shrink)
return -BCH_ERR_ENOMEM_fs_btree_cache_init;
bc->shrink = shrink;
- shrink->seeks = 0;
shrink->count_objects = bch2_btree_key_cache_count;
shrink->scan_objects = bch2_btree_key_cache_scan;
+ shrink->batch = 1 << 14;
+ shrink->seeks = 0;
shrink->private_data = c;
shrinker_register(shrink);
return 0;
diff --git a/fs/bcachefs/btree_locking.c b/fs/bcachefs/btree_locking.c
index c3e9b0cc7bbd..c51826fd557f 100644
--- a/fs/bcachefs/btree_locking.c
+++ b/fs/bcachefs/btree_locking.c
@@ -215,6 +215,7 @@ static noinline int break_cycle(struct lock_graph *g, struct printbuf *cycle)
if (unlikely(!best)) {
struct printbuf buf = PRINTBUF;
+ buf.atomic++;
prt_printf(&buf, bch2_fmt(g->g->trans->c, "cycle of nofail locks"));
@@ -230,7 +231,7 @@ static noinline int break_cycle(struct lock_graph *g, struct printbuf *cycle)
prt_newline(&buf);
}
- bch2_print_string_as_lines(KERN_ERR, buf.buf);
+ bch2_print_string_as_lines_nonblocking(KERN_ERR, buf.buf);
printbuf_exit(&buf);
BUG();
}
@@ -791,7 +792,7 @@ static inline int __bch2_trans_relock(struct btree_trans *trans, bool trace)
return bch2_trans_relock_fail(trans, path, &f, trace);
}
- trans->locked = true;
+ trans_set_locked(trans);
out:
bch2_trans_verify_locks(trans);
return 0;
@@ -811,16 +812,14 @@ void bch2_trans_unlock_noassert(struct btree_trans *trans)
{
__bch2_trans_unlock(trans);
- trans->locked = false;
- trans->last_unlock_ip = _RET_IP_;
+ trans_set_unlocked(trans);
}
void bch2_trans_unlock(struct btree_trans *trans)
{
__bch2_trans_unlock(trans);
- trans->locked = false;
- trans->last_unlock_ip = _RET_IP_;
+ trans_set_unlocked(trans);
}
void bch2_trans_unlock_long(struct btree_trans *trans)
diff --git a/fs/bcachefs/btree_locking.h b/fs/bcachefs/btree_locking.h
index 7f41545b9147..75a6274c7d27 100644
--- a/fs/bcachefs/btree_locking.h
+++ b/fs/bcachefs/btree_locking.h
@@ -193,6 +193,28 @@ int bch2_six_check_for_deadlock(struct six_lock *lock, void *p);
/* lock: */
+static inline void trans_set_locked(struct btree_trans *trans)
+{
+ if (!trans->locked) {
+ trans->locked = true;
+ trans->last_unlock_ip = 0;
+
+ trans->pf_memalloc_nofs = (current->flags & PF_MEMALLOC_NOFS) != 0;
+ current->flags |= PF_MEMALLOC_NOFS;
+ }
+}
+
+static inline void trans_set_unlocked(struct btree_trans *trans)
+{
+ if (trans->locked) {
+ trans->locked = false;
+ trans->last_unlock_ip = _RET_IP_;
+
+ if (!trans->pf_memalloc_nofs)
+ current->flags &= ~PF_MEMALLOC_NOFS;
+ }
+}
+
static inline int __btree_node_lock_nopath(struct btree_trans *trans,
struct btree_bkey_cached_common *b,
enum six_lock_type type,
diff --git a/fs/bcachefs/btree_node_scan.c b/fs/bcachefs/btree_node_scan.c
index 45cb8149d374..2cb0442f6cc9 100644
--- a/fs/bcachefs/btree_node_scan.c
+++ b/fs/bcachefs/btree_node_scan.c
@@ -72,10 +72,11 @@ static bool found_btree_node_is_readable(struct btree_trans *trans,
struct btree *b = bch2_btree_node_get_noiter(trans, &k.k, f->btree_id, f->level, false);
bool ret = !IS_ERR_OR_NULL(b);
- if (ret) {
- f->sectors_written = b->written;
- six_unlock_read(&b->c.lock);
- }
+ if (!ret)
+ return ret;
+
+ f->sectors_written = b->written;
+ six_unlock_read(&b->c.lock);
/*
* We might update this node's range; if that happens, we need the node
diff --git a/fs/bcachefs/btree_types.h b/fs/bcachefs/btree_types.h
index d63db4fefe73..48cb1a7d31c5 100644
--- a/fs/bcachefs/btree_types.h
+++ b/fs/bcachefs/btree_types.h
@@ -484,6 +484,7 @@ struct btree_trans {
bool lock_may_not_fail:1;
bool srcu_held:1;
bool locked:1;
+ bool pf_memalloc_nofs:1;
bool write_locked:1;
bool used_mempool:1;
bool in_traverse_all:1;
@@ -761,13 +762,13 @@ static inline bool btree_node_type_needs_gc(enum btree_node_type type)
static inline bool btree_node_type_is_extents(enum btree_node_type type)
{
- const unsigned mask = 0
+ const u64 mask = 0
#define x(name, nr, flags, ...) |((!!((flags) & BTREE_ID_EXTENTS)) << (nr + 1))
BCH_BTREE_IDS()
#undef x
;
- return (1U << type) & mask;
+ return BIT_ULL(type) & mask;
}
static inline bool btree_id_is_extents(enum btree_id btree)
@@ -777,35 +778,35 @@ static inline bool btree_id_is_extents(enum btree_id btree)
static inline bool btree_type_has_snapshots(enum btree_id id)
{
- const unsigned mask = 0
+ const u64 mask = 0
#define x(name, nr, flags, ...) |((!!((flags) & BTREE_ID_SNAPSHOTS)) << nr)
BCH_BTREE_IDS()
#undef x
;
- return (1U << id) & mask;
+ return BIT_ULL(id) & mask;
}
static inline bool btree_type_has_snapshot_field(enum btree_id id)
{
- const unsigned mask = 0
+ const u64 mask = 0
#define x(name, nr, flags, ...) |((!!((flags) & (BTREE_ID_SNAPSHOT_FIELD|BTREE_ID_SNAPSHOTS))) << nr)
BCH_BTREE_IDS()
#undef x
;
- return (1U << id) & mask;
+ return BIT_ULL(id) & mask;
}
static inline bool btree_type_has_ptrs(enum btree_id id)
{
- const unsigned mask = 0
+ const u64 mask = 0
#define x(name, nr, flags, ...) |((!!((flags) & BTREE_ID_DATA)) << nr)
BCH_BTREE_IDS()
#undef x
;
- return (1U << id) & mask;
+ return BIT_ULL(id) & mask;
}
struct btree_root {
diff --git a/fs/bcachefs/btree_write_buffer.c b/fs/bcachefs/btree_write_buffer.c
index 75c8a196b3f6..d0e92d948002 100644
--- a/fs/bcachefs/btree_write_buffer.c
+++ b/fs/bcachefs/btree_write_buffer.c
@@ -1,11 +1,13 @@
// SPDX-License-Identifier: GPL-2.0
#include "bcachefs.h"
+#include "bkey_buf.h"
#include "btree_locking.h"
#include "btree_update.h"
#include "btree_update_interior.h"
#include "btree_write_buffer.h"
#include "error.h"
+#include "extents.h"
#include "journal.h"
#include "journal_io.h"
#include "journal_reclaim.h"
@@ -492,6 +494,41 @@ int bch2_btree_write_buffer_tryflush(struct btree_trans *trans)
return ret;
}
+/**
+ * In check and repair code, when checking references to write buffer btrees we
+ * need to issue a flush before we have a definitive error: this issues a flush
+ * if this is a key we haven't yet checked.
+ */
+int bch2_btree_write_buffer_maybe_flush(struct btree_trans *trans,
+ struct bkey_s_c referring_k,
+ struct bkey_buf *last_flushed)
+{
+ struct bch_fs *c = trans->c;
+ struct bkey_buf tmp;
+ int ret = 0;
+
+ bch2_bkey_buf_init(&tmp);
+
+ if (!bkey_and_val_eq(referring_k, bkey_i_to_s_c(last_flushed->k))) {
+ bch2_bkey_buf_reassemble(&tmp, c, referring_k);
+
+ if (bkey_is_btree_ptr(referring_k.k)) {
+ bch2_trans_unlock(trans);
+ bch2_btree_interior_updates_flush(c);
+ }
+
+ ret = bch2_btree_write_buffer_flush_sync(trans);
+ if (ret)
+ goto err;
+
+ bch2_bkey_buf_copy(last_flushed, c, tmp.k);
+ ret = -BCH_ERR_transaction_restart_write_buffer_flush;
+ }
+err:
+ bch2_bkey_buf_exit(&tmp, c);
+ return ret;
+}
+
static void bch2_btree_write_buffer_flush_work(struct work_struct *work)
{
struct bch_fs *c = container_of(work, struct bch_fs, btree_write_buffer.flush_work);
diff --git a/fs/bcachefs/btree_write_buffer.h b/fs/bcachefs/btree_write_buffer.h
index eebcd2b15249..dd5e64218b50 100644
--- a/fs/bcachefs/btree_write_buffer.h
+++ b/fs/bcachefs/btree_write_buffer.h
@@ -23,6 +23,9 @@ int bch2_btree_write_buffer_flush_sync(struct btree_trans *);
int bch2_btree_write_buffer_flush_nocheck_rw(struct btree_trans *);
int bch2_btree_write_buffer_tryflush(struct btree_trans *);
+struct bkey_buf;
+int bch2_btree_write_buffer_maybe_flush(struct btree_trans *, struct bkey_s_c, struct bkey_buf *);
+
struct journal_keys_to_wb {
struct btree_write_buffer_keys *wb;
size_t room;
diff --git a/fs/bcachefs/buckets.c b/fs/bcachefs/buckets.c
index b469586517a8..314ee3e0187f 100644
--- a/fs/bcachefs/buckets.c
+++ b/fs/bcachefs/buckets.c
@@ -465,143 +465,172 @@ int bch2_update_cached_sectors_list(struct btree_trans *trans, unsigned dev, s64
return bch2_update_replicas_list(trans, &r.e, sectors);
}
-int bch2_check_fix_ptrs(struct btree_trans *trans,
- enum btree_id btree, unsigned level, struct bkey_s_c k,
- enum btree_iter_update_trigger_flags flags)
+static int bch2_check_fix_ptr(struct btree_trans *trans,
+ struct bkey_s_c k,
+ struct extent_ptr_decoded p,
+ const union bch_extent_entry *entry,
+ bool *do_update)
{
struct bch_fs *c = trans->c;
- struct bkey_ptrs_c ptrs_c = bch2_bkey_ptrs_c(k);
- const union bch_extent_entry *entry_c;
- struct extent_ptr_decoded p = { 0 };
- bool do_update = false;
struct printbuf buf = PRINTBUF;
int ret = 0;
- percpu_down_read(&c->mark_lock);
+ struct bch_dev *ca = bch2_dev_tryget(c, p.ptr.dev);
+ if (!ca) {
+ if (fsck_err(c, ptr_to_invalid_device,
+ "pointer to missing device %u\n"
+ "while marking %s",
+ p.ptr.dev,
+ (printbuf_reset(&buf),
+ bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
+ *do_update = true;
+ return 0;
+ }
- bkey_for_each_ptr_decode(k.k, ptrs_c, p, entry_c) {
- struct bch_dev *ca = bch2_dev_tryget(c, p.ptr.dev);
- if (!ca) {
- if (fsck_err(c, ptr_to_invalid_device,
- "pointer to missing device %u\n"
- "while marking %s",
- p.ptr.dev,
- (printbuf_reset(&buf),
- bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
- do_update = true;
- continue;
- }
+ struct bucket *g = PTR_GC_BUCKET(ca, &p.ptr);
+ if (!g) {
+ if (fsck_err(c, ptr_to_invalid_device,
+ "pointer to invalid bucket on device %u\n"
+ "while marking %s",
+ p.ptr.dev,
+ (printbuf_reset(&buf),
+ bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
+ *do_update = true;
+ goto out;
+ }
- struct bucket *g = PTR_GC_BUCKET(ca, &p.ptr);
- enum bch_data_type data_type = bch2_bkey_ptr_data_type(k, p, entry_c);
+ enum bch_data_type data_type = bch2_bkey_ptr_data_type(k, p, entry);
- if (fsck_err_on(!g->gen_valid,
- c, ptr_to_missing_alloc_key,
- "bucket %u:%zu data type %s ptr gen %u missing in alloc btree\n"
- "while marking %s",
- p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
- bch2_data_type_str(ptr_data_type(k.k, &p.ptr)),
- p.ptr.gen,
- (printbuf_reset(&buf),
- bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
- if (!p.ptr.cached) {
- g->gen_valid = true;
- g->gen = p.ptr.gen;
- } else {
- do_update = true;
- }
+ if (fsck_err_on(!g->gen_valid,
+ c, ptr_to_missing_alloc_key,
+ "bucket %u:%zu data type %s ptr gen %u missing in alloc btree\n"
+ "while marking %s",
+ p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
+ bch2_data_type_str(ptr_data_type(k.k, &p.ptr)),
+ p.ptr.gen,
+ (printbuf_reset(&buf),
+ bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
+ if (!p.ptr.cached) {
+ g->gen_valid = true;
+ g->gen = p.ptr.gen;
+ } else {
+ *do_update = true;
}
+ }
- if (fsck_err_on(gen_cmp(p.ptr.gen, g->gen) > 0,
- c, ptr_gen_newer_than_bucket_gen,
- "bucket %u:%zu data type %s ptr gen in the future: %u > %u\n"
- "while marking %s",
- p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
- bch2_data_type_str(ptr_data_type(k.k, &p.ptr)),
- p.ptr.gen, g->gen,
- (printbuf_reset(&buf),
- bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
- if (!p.ptr.cached &&
- (g->data_type != BCH_DATA_btree ||
- data_type == BCH_DATA_btree)) {
- g->gen_valid = true;
- g->gen = p.ptr.gen;
- g->data_type = 0;
- g->dirty_sectors = 0;
- g->cached_sectors = 0;
- } else {
- do_update = true;
- }
+ if (fsck_err_on(gen_cmp(p.ptr.gen, g->gen) > 0,
+ c, ptr_gen_newer_than_bucket_gen,
+ "bucket %u:%zu data type %s ptr gen in the future: %u > %u\n"
+ "while marking %s",
+ p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
+ bch2_data_type_str(ptr_data_type(k.k, &p.ptr)),
+ p.ptr.gen, g->gen,
+ (printbuf_reset(&buf),
+ bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
+ if (!p.ptr.cached &&
+ (g->data_type != BCH_DATA_btree ||
+ data_type == BCH_DATA_btree)) {
+ g->gen_valid = true;
+ g->gen = p.ptr.gen;
+ g->data_type = 0;
+ g->dirty_sectors = 0;
+ g->cached_sectors = 0;
+ } else {
+ *do_update = true;
+ }
+ }
+
+ if (fsck_err_on(gen_cmp(g->gen, p.ptr.gen) > BUCKET_GC_GEN_MAX,
+ c, ptr_gen_newer_than_bucket_gen,
+ "bucket %u:%zu gen %u data type %s: ptr gen %u too stale\n"
+ "while marking %s",
+ p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), g->gen,
+ bch2_data_type_str(ptr_data_type(k.k, &p.ptr)),
+ p.ptr.gen,
+ (printbuf_reset(&buf),
+ bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
+ *do_update = true;
+
+ if (fsck_err_on(!p.ptr.cached && gen_cmp(p.ptr.gen, g->gen) < 0,
+ c, stale_dirty_ptr,
+ "bucket %u:%zu data type %s stale dirty ptr: %u < %u\n"
+ "while marking %s",
+ p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
+ bch2_data_type_str(ptr_data_type(k.k, &p.ptr)),
+ p.ptr.gen, g->gen,
+ (printbuf_reset(&buf),
+ bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
+ *do_update = true;
+
+ if (data_type != BCH_DATA_btree && p.ptr.gen != g->gen)
+ goto out;
+
+ if (fsck_err_on(bucket_data_type_mismatch(g->data_type, data_type),
+ c, ptr_bucket_data_type_mismatch,
+ "bucket %u:%zu gen %u different types of data in same bucket: %s, %s\n"
+ "while marking %s",
+ p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), g->gen,
+ bch2_data_type_str(g->data_type),
+ bch2_data_type_str(data_type),
+ (printbuf_reset(&buf),
+ bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
+ if (data_type == BCH_DATA_btree) {
+ g->gen_valid = true;
+ g->gen = p.ptr.gen;
+ g->data_type = data_type;
+ g->dirty_sectors = 0;
+ g->cached_sectors = 0;
+ } else {
+ *do_update = true;
}
+ }
+
+ if (p.has_ec) {
+ struct gc_stripe *m = genradix_ptr(&c->gc_stripes, p.ec.idx);
- if (fsck_err_on(gen_cmp(g->gen, p.ptr.gen) > BUCKET_GC_GEN_MAX,
- c, ptr_gen_newer_than_bucket_gen,
- "bucket %u:%zu gen %u data type %s: ptr gen %u too stale\n"
+ if (fsck_err_on(!m || !m->alive,
+ c, ptr_to_missing_stripe,
+ "pointer to nonexistent stripe %llu\n"
"while marking %s",
- p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), g->gen,
- bch2_data_type_str(ptr_data_type(k.k, &p.ptr)),
- p.ptr.gen,
+ (u64) p.ec.idx,
(printbuf_reset(&buf),
bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
- do_update = true;
+ *do_update = true;
- if (fsck_err_on(!p.ptr.cached && gen_cmp(p.ptr.gen, g->gen) < 0,
- c, stale_dirty_ptr,
- "bucket %u:%zu data type %s stale dirty ptr: %u < %u\n"
+ if (fsck_err_on(m && m->alive && !bch2_ptr_matches_stripe_m(m, p),
+ c, ptr_to_incorrect_stripe,
+ "pointer does not match stripe %llu\n"
"while marking %s",
- p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
- bch2_data_type_str(ptr_data_type(k.k, &p.ptr)),
- p.ptr.gen, g->gen,
+ (u64) p.ec.idx,
(printbuf_reset(&buf),
bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
- do_update = true;
+ *do_update = true;
+ }
+out:
+fsck_err:
+ bch2_dev_put(ca);
+ printbuf_exit(&buf);
+ return ret;
+}
- if (data_type != BCH_DATA_btree && p.ptr.gen != g->gen)
- goto next;
+int bch2_check_fix_ptrs(struct btree_trans *trans,
+ enum btree_id btree, unsigned level, struct bkey_s_c k,
+ enum btree_iter_update_trigger_flags flags)
+{
+ struct bch_fs *c = trans->c;
+ struct bkey_ptrs_c ptrs_c = bch2_bkey_ptrs_c(k);
+ const union bch_extent_entry *entry_c;
+ struct extent_ptr_decoded p = { 0 };
+ bool do_update = false;
+ struct printbuf buf = PRINTBUF;
+ int ret = 0;
- if (fsck_err_on(bucket_data_type_mismatch(g->data_type, data_type),
- c, ptr_bucket_data_type_mismatch,
- "bucket %u:%zu gen %u different types of data in same bucket: %s, %s\n"
- "while marking %s",
- p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), g->gen,
- bch2_data_type_str(g->data_type),
- bch2_data_type_str(data_type),
- (printbuf_reset(&buf),
- bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
- if (data_type == BCH_DATA_btree) {
- g->gen_valid = true;
- g->gen = p.ptr.gen;
- g->data_type = data_type;
- g->dirty_sectors = 0;
- g->cached_sectors = 0;
- } else {
- do_update = true;
- }
- }
+ percpu_down_read(&c->mark_lock);
- if (p.has_ec) {
- struct gc_stripe *m = genradix_ptr(&c->gc_stripes, p.ec.idx);
-
- if (fsck_err_on(!m || !m->alive, c,
- ptr_to_missing_stripe,
- "pointer to nonexistent stripe %llu\n"
- "while marking %s",
- (u64) p.ec.idx,
- (printbuf_reset(&buf),
- bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
- do_update = true;
-
- if (fsck_err_on(m && m->alive && !bch2_ptr_matches_stripe_m(m, p), c,
- ptr_to_incorrect_stripe,
- "pointer does not match stripe %llu\n"
- "while marking %s",
- (u64) p.ec.idx,
- (printbuf_reset(&buf),
- bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
- do_update = true;
- }
-next:
- bch2_dev_put(ca);
+ bkey_for_each_ptr_decode(k.k, ptrs_c, p, entry_c) {
+ ret = bch2_check_fix_ptr(trans, k, p, entry_c, &do_update);
+ if (ret)
+ goto err;
}
if (do_update) {
@@ -716,7 +745,6 @@ found:
bch2_btree_node_update_key_early(trans, btree, level - 1, k, new);
}
err:
-fsck_err:
percpu_up_read(&c->mark_lock);
printbuf_exit(&buf);
return ret;
@@ -777,7 +805,7 @@ int bch2_bucket_ref_update(struct btree_trans *trans, struct bch_dev *ca,
"bucket %u:%zu gen %u (mem gen %u) data type %s: stale dirty ptr (gen %u)\n"
"while marking %s",
ptr->dev, bucket_nr, b_gen,
- *bucket_gen(ca, bucket_nr),
+ bucket_gen_get(ca, bucket_nr),
bch2_data_type_str(bucket_data_type ?: ptr_data_type),
ptr->gen,
(printbuf_reset(&buf),
@@ -987,6 +1015,7 @@ static int bch2_trigger_pointer(struct btree_trans *trans,
enum btree_iter_update_trigger_flags flags)
{
bool insert = !(flags & BTREE_TRIGGER_overwrite);
+ struct printbuf buf = PRINTBUF;
int ret = 0;
struct bch_fs *c = trans->c;
@@ -1019,6 +1048,13 @@ static int bch2_trigger_pointer(struct btree_trans *trans,
if (flags & BTREE_TRIGGER_gc) {
percpu_down_read(&c->mark_lock);
struct bucket *g = gc_bucket(ca, bucket.offset);
+ if (bch2_fs_inconsistent_on(!g, c, "reference to invalid bucket on device %u\n %s",
+ p.ptr.dev,
+ (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
+ ret = -EIO;
+ goto err_unlock;
+ }
+
bucket_lock(g);
struct bch_alloc_v4 old = bucket_m_to_alloc(*g), new = old;
ret = __mark_pointer(trans, ca, k, &p.ptr, *sectors, bp.data_type, &new);
@@ -1027,10 +1063,12 @@ static int bch2_trigger_pointer(struct btree_trans *trans,
bch2_dev_usage_update(c, ca, &old, &new, 0, true);
}
bucket_unlock(g);
+err_unlock:
percpu_up_read(&c->mark_lock);
}
err:
bch2_dev_put(ca);
+ printbuf_exit(&buf);
return ret;
}
@@ -1134,7 +1172,7 @@ static int __trigger_extent(struct btree_trans *trans,
r.e.nr_required = 1;
bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
- s64 disk_sectors;
+ s64 disk_sectors = 0;
ret = bch2_trigger_pointer(trans, btree_id, level, k, p, entry, &disk_sectors, flags);
if (ret < 0)
return ret;
@@ -1318,10 +1356,11 @@ static int bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
u64 b, enum bch_data_type data_type, unsigned sectors,
enum btree_iter_update_trigger_flags flags)
{
- int ret = 0;
-
percpu_down_read(&c->mark_lock);
struct bucket *g = gc_bucket(ca, b);
+ if (bch2_fs_inconsistent_on(!g, c, "reference to invalid bucket on device %u when marking metadata type %s",
+ ca->dev_idx, bch2_data_type_str(data_type)))
+ goto err_unlock;
bucket_lock(g);
struct bch_alloc_v4 old = bucket_m_to_alloc(*g);
@@ -1330,29 +1369,27 @@ static int bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
g->data_type != data_type, c,
"different types of data in same bucket: %s, %s",
bch2_data_type_str(g->data_type),
- bch2_data_type_str(data_type))) {
- ret = -EIO;
+ bch2_data_type_str(data_type)))
goto err;
- }
if (bch2_fs_inconsistent_on((u64) g->dirty_sectors + sectors > ca->mi.bucket_size, c,
"bucket %u:%llu gen %u data type %s sector count overflow: %u + %u > bucket size",
ca->dev_idx, b, g->gen,
bch2_data_type_str(g->data_type ?: data_type),
- g->dirty_sectors, sectors)) {
- ret = -EIO;
+ g->dirty_sectors, sectors))
goto err;
- }
g->data_type = data_type;
g->dirty_sectors += sectors;
struct bch_alloc_v4 new = bucket_m_to_alloc(*g);
+ bch2_dev_usage_update(c, ca, &old, &new, 0, true);
+ percpu_up_read(&c->mark_lock);
+ return 0;
err:
bucket_unlock(g);
- if (!ret)
- bch2_dev_usage_update(c, ca, &old, &new, 0, true);
+err_unlock:
percpu_up_read(&c->mark_lock);
- return ret;
+ return -EIO;
}
int bch2_trans_mark_metadata_bucket(struct btree_trans *trans,
@@ -1595,6 +1632,8 @@ int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
bucket_gens->first_bucket = ca->mi.first_bucket;
bucket_gens->nbuckets = nbuckets;
+ bucket_gens->nbuckets_minus_first =
+ bucket_gens->nbuckets - bucket_gens->first_bucket;
if (resize) {
down_write(&c->gc_lock);
diff --git a/fs/bcachefs/buckets.h b/fs/bcachefs/buckets.h
index 617ffde2fb7a..8ad4be73860c 100644
--- a/fs/bcachefs/buckets.h
+++ b/fs/bcachefs/buckets.h
@@ -93,7 +93,8 @@ static inline struct bucket *gc_bucket(struct bch_dev *ca, size_t b)
{
struct bucket_array *buckets = gc_bucket_array(ca);
- BUG_ON(!bucket_valid(ca, b));
+ if (b - buckets->first_bucket >= buckets->nbuckets_minus_first)
+ return NULL;
return buckets->b + b;
}
@@ -110,10 +111,19 @@ static inline u8 *bucket_gen(struct bch_dev *ca, size_t b)
{
struct bucket_gens *gens = bucket_gens(ca);
- BUG_ON(!bucket_valid(ca, b));
+ if (b - gens->first_bucket >= gens->nbuckets_minus_first)
+ return NULL;
return gens->b + b;
}
+static inline u8 bucket_gen_get(struct bch_dev *ca, size_t b)
+{
+ rcu_read_lock();
+ u8 gen = *bucket_gen(ca, b);
+ rcu_read_unlock();
+ return gen;
+}
+
static inline size_t PTR_BUCKET_NR(const struct bch_dev *ca,
const struct bch_extent_ptr *ptr)
{
@@ -170,19 +180,22 @@ static inline int gen_after(u8 a, u8 b)
return r > 0 ? r : 0;
}
-static inline u8 dev_ptr_stale_rcu(struct bch_dev *ca, const struct bch_extent_ptr *ptr)
+static inline int dev_ptr_stale_rcu(struct bch_dev *ca, const struct bch_extent_ptr *ptr)
{
- return gen_after(*bucket_gen(ca, PTR_BUCKET_NR(ca, ptr)), ptr->gen);
+ u8 *gen = bucket_gen(ca, PTR_BUCKET_NR(ca, ptr));
+ if (!gen)
+ return -1;
+ return gen_after(*gen, ptr->gen);
}
/**
* dev_ptr_stale() - check if a pointer points into a bucket that has been
* invalidated.
*/
-static inline u8 dev_ptr_stale(struct bch_dev *ca, const struct bch_extent_ptr *ptr)
+static inline int dev_ptr_stale(struct bch_dev *ca, const struct bch_extent_ptr *ptr)
{
rcu_read_lock();
- u8 ret = dev_ptr_stale_rcu(ca, ptr);
+ int ret = dev_ptr_stale_rcu(ca, ptr);
rcu_read_unlock();
return ret;
diff --git a/fs/bcachefs/buckets_types.h b/fs/bcachefs/buckets_types.h
index 6a31740222a7..f636e17c4caf 100644
--- a/fs/bcachefs/buckets_types.h
+++ b/fs/bcachefs/buckets_types.h
@@ -22,6 +22,7 @@ struct bucket_array {
struct rcu_head rcu;
u16 first_bucket;
size_t nbuckets;
+ size_t nbuckets_minus_first;
struct bucket b[];
};
@@ -29,6 +30,7 @@ struct bucket_gens {
struct rcu_head rcu;
u16 first_bucket;
size_t nbuckets;
+ size_t nbuckets_minus_first;
u8 b[];
};
diff --git a/fs/bcachefs/chardev.c b/fs/bcachefs/chardev.c
index 9e54323f0f5f..6d82e1165adc 100644
--- a/fs/bcachefs/chardev.c
+++ b/fs/bcachefs/chardev.c
@@ -216,7 +216,8 @@ static long bch2_ioctl_fsck_offline(struct bch_ioctl_fsck_offline __user *user_a
ret = PTR_ERR_OR_ZERO(optstr) ?:
bch2_parse_mount_opts(NULL, &thr->opts, optstr);
- kfree(optstr);
+ if (!IS_ERR(optstr))
+ kfree(optstr);
if (ret)
goto err;
@@ -319,7 +320,8 @@ static long bch2_ioctl_disk_add(struct bch_fs *c, struct bch_ioctl_disk arg)
return ret;
ret = bch2_dev_add(c, path);
- kfree(path);
+ if (!IS_ERR(path))
+ kfree(path);
return ret;
}
@@ -850,7 +852,8 @@ static long bch2_ioctl_fsck_online(struct bch_fs *c,
ret = PTR_ERR_OR_ZERO(optstr) ?:
bch2_parse_mount_opts(c, &thr->opts, optstr);
- kfree(optstr);
+ if (!IS_ERR(optstr))
+ kfree(optstr);
if (ret)
goto err;
diff --git a/fs/bcachefs/clock.c b/fs/bcachefs/clock.c
index 363644451106..0f40b585ce2b 100644
--- a/fs/bcachefs/clock.c
+++ b/fs/bcachefs/clock.c
@@ -132,14 +132,9 @@ static struct io_timer *get_expired_timer(struct io_clock *clock,
{
struct io_timer *ret = NULL;
- spin_lock(&clock->timer_lock);
-
if (clock->timers.used &&
time_after_eq(now, clock->timers.data[0]->expire))
heap_pop(&clock->timers, ret, io_timer_cmp, NULL);
-
- spin_unlock(&clock->timer_lock);
-
return ret;
}
@@ -148,8 +143,10 @@ void __bch2_increment_clock(struct io_clock *clock, unsigned sectors)
struct io_timer *timer;
unsigned long now = atomic64_add_return(sectors, &clock->now);
+ spin_lock(&clock->timer_lock);
while ((timer = get_expired_timer(clock, now)))
timer->fn(timer);
+ spin_unlock(&clock->timer_lock);
}
void bch2_io_timers_to_text(struct printbuf *out, struct io_clock *clock)
diff --git a/fs/bcachefs/data_update.c b/fs/bcachefs/data_update.c
index 0d807c2ce9c6..0087b8555ead 100644
--- a/fs/bcachefs/data_update.c
+++ b/fs/bcachefs/data_update.c
@@ -5,7 +5,9 @@
#include "bkey_buf.h"
#include "btree_update.h"
#include "buckets.h"
+#include "compress.h"
#include "data_update.h"
+#include "disk_groups.h"
#include "ec.h"
#include "error.h"
#include "extents.h"
@@ -202,9 +204,8 @@ restart_drop_conflicting_replicas:
bch2_bkey_durability(c, bkey_i_to_s_c(&new->k_i));
/* Now, drop excess replicas: */
-restart_drop_extra_replicas:
-
rcu_read_lock();
+restart_drop_extra_replicas:
bkey_for_each_ptr_decode(old.k, bch2_bkey_ptrs(bkey_i_to_s(insert)), p, entry) {
unsigned ptr_durability = bch2_extent_ptr_durability(c, &p);
@@ -455,6 +456,38 @@ static void bch2_update_unwritten_extent(struct btree_trans *trans,
}
}
+void bch2_data_update_opts_to_text(struct printbuf *out, struct bch_fs *c,
+ struct bch_io_opts *io_opts,
+ struct data_update_opts *data_opts)
+{
+ printbuf_tabstop_push(out, 20);
+ prt_str(out, "rewrite ptrs:\t");
+ bch2_prt_u64_base2(out, data_opts->rewrite_ptrs);
+ prt_newline(out);
+
+ prt_str(out, "kill ptrs:\t");
+ bch2_prt_u64_base2(out, data_opts->kill_ptrs);
+ prt_newline(out);
+
+ prt_str(out, "target:\t");
+ bch2_target_to_text(out, c, data_opts->target);
+ prt_newline(out);
+
+ prt_str(out, "compression:\t");
+ bch2_compression_opt_to_text(out, background_compression(*io_opts));
+ prt_newline(out);
+
+ prt_str(out, "extra replicas:\t");
+ prt_u64(out, data_opts->extra_replicas);
+}
+
+void bch2_data_update_to_text(struct printbuf *out, struct data_update *m)
+{
+ bch2_bkey_val_to_text(out, m->op.c, bkey_i_to_s_c(m->k.k));
+ prt_newline(out);
+ bch2_data_update_opts_to_text(out, m->op.c, &m->op.opts, &m->data_opts);
+}
+
int bch2_extent_drop_ptrs(struct btree_trans *trans,
struct btree_iter *iter,
struct bkey_s_c k,
@@ -644,6 +677,16 @@ int bch2_data_update_init(struct btree_trans *trans,
if (!(durability_have + durability_removing))
m->op.nr_replicas = max((unsigned) m->op.nr_replicas, 1);
+ if (!m->op.nr_replicas) {
+ struct printbuf buf = PRINTBUF;
+
+ bch2_data_update_to_text(&buf, m);
+ WARN(1, "trying to move an extent, but nr_replicas=0\n%s", buf.buf);
+ printbuf_exit(&buf);
+ ret = -BCH_ERR_data_update_done;
+ goto done;
+ }
+
m->op.nr_replicas_required = m->op.nr_replicas;
if (reserve_sectors) {
diff --git a/fs/bcachefs/data_update.h b/fs/bcachefs/data_update.h
index 991095bbd469..8d36365bdea8 100644
--- a/fs/bcachefs/data_update.h
+++ b/fs/bcachefs/data_update.h
@@ -17,6 +17,9 @@ struct data_update_opts {
unsigned write_flags;
};
+void bch2_data_update_opts_to_text(struct printbuf *, struct bch_fs *,
+ struct bch_io_opts *, struct data_update_opts *);
+
struct data_update {
/* extent being updated: */
enum btree_id btree_id;
@@ -27,6 +30,8 @@ struct data_update {
struct bch_write_op op;
};
+void bch2_data_update_to_text(struct printbuf *, struct data_update *);
+
int bch2_data_update_index_update(struct bch_write_op *);
void bch2_data_update_read_done(struct data_update *,
diff --git a/fs/bcachefs/debug.c b/fs/bcachefs/debug.c
index 51cbf3928361..ebabab171fe5 100644
--- a/fs/bcachefs/debug.c
+++ b/fs/bcachefs/debug.c
@@ -568,6 +568,32 @@ static const struct file_operations cached_btree_nodes_ops = {
.read = bch2_cached_btree_nodes_read,
};
+typedef int (*list_cmp_fn)(const struct list_head *l, const struct list_head *r);
+
+static void list_sort(struct list_head *head, list_cmp_fn cmp)
+{
+ struct list_head *pos;
+
+ list_for_each(pos, head)
+ while (!list_is_last(pos, head) &&
+ cmp(pos, pos->next) > 0) {
+ struct list_head *pos2, *next = pos->next;
+
+ list_del(next);
+ list_for_each(pos2, head)
+ if (cmp(next, pos2) < 0)
+ goto pos_found;
+ BUG();
+pos_found:
+ list_add_tail(next, pos2);
+ }
+}
+
+static int list_ptr_order_cmp(const struct list_head *l, const struct list_head *r)
+{
+ return cmp_int(l, r);
+}
+
static ssize_t bch2_btree_transactions_read(struct file *file, char __user *buf,
size_t size, loff_t *ppos)
{
@@ -575,41 +601,39 @@ static ssize_t bch2_btree_transactions_read(struct file *file, char __user *buf,
struct bch_fs *c = i->c;
struct btree_trans *trans;
ssize_t ret = 0;
- u32 seq;
i->ubuf = buf;
i->size = size;
i->ret = 0;
restart:
seqmutex_lock(&c->btree_trans_lock);
- list_for_each_entry(trans, &c->btree_trans_list, list) {
- struct task_struct *task = READ_ONCE(trans->locking_wait.task);
+ list_sort(&c->btree_trans_list, list_ptr_order_cmp);
- if (!task || task->pid <= i->iter)
+ list_for_each_entry(trans, &c->btree_trans_list, list) {
+ if ((ulong) trans <= i->iter)
continue;
- closure_get(&trans->ref);
- seq = seqmutex_seq(&c->btree_trans_lock);
- seqmutex_unlock(&c->btree_trans_lock);
+ i->iter = (ulong) trans;
- ret = flush_buf(i);
- if (ret) {
- closure_put(&trans->ref);
- goto unlocked;
- }
+ if (!closure_get_not_zero(&trans->ref))
+ continue;
+
+ u32 seq = seqmutex_unlock(&c->btree_trans_lock);
bch2_btree_trans_to_text(&i->buf, trans);
prt_printf(&i->buf, "backtrace:\n");
printbuf_indent_add(&i->buf, 2);
- bch2_prt_task_backtrace(&i->buf, task, 0, GFP_KERNEL);
+ bch2_prt_task_backtrace(&i->buf, trans->locking_wait.task, 0, GFP_KERNEL);
printbuf_indent_sub(&i->buf, 2);
prt_newline(&i->buf);
- i->iter = task->pid;
-
closure_put(&trans->ref);
+ ret = flush_buf(i);
+ if (ret)
+ goto unlocked;
+
if (!seqmutex_relock(&c->btree_trans_lock, seq))
goto restart;
}
@@ -804,50 +828,55 @@ static const struct file_operations btree_transaction_stats_op = {
.read = btree_transaction_stats_read,
};
-static ssize_t bch2_btree_deadlock_read(struct file *file, char __user *buf,
- size_t size, loff_t *ppos)
+/* walk btree transactions until we find a deadlock and print it */
+static void btree_deadlock_to_text(struct printbuf *out, struct bch_fs *c)
{
- struct dump_iter *i = file->private_data;
- struct bch_fs *c = i->c;
struct btree_trans *trans;
- ssize_t ret = 0;
- u32 seq;
-
- i->ubuf = buf;
- i->size = size;
- i->ret = 0;
-
- if (i->iter)
- goto out;
+ ulong iter = 0;
restart:
seqmutex_lock(&c->btree_trans_lock);
- list_for_each_entry(trans, &c->btree_trans_list, list) {
- struct task_struct *task = READ_ONCE(trans->locking_wait.task);
+ list_sort(&c->btree_trans_list, list_ptr_order_cmp);
- if (!task || task->pid <= i->iter)
+ list_for_each_entry(trans, &c->btree_trans_list, list) {
+ if ((ulong) trans <= iter)
continue;
- closure_get(&trans->ref);
- seq = seqmutex_seq(&c->btree_trans_lock);
- seqmutex_unlock(&c->btree_trans_lock);
+ iter = (ulong) trans;
- ret = flush_buf(i);
- if (ret) {
- closure_put(&trans->ref);
- goto out;
- }
+ if (!closure_get_not_zero(&trans->ref))
+ continue;
- bch2_check_for_deadlock(trans, &i->buf);
+ u32 seq = seqmutex_unlock(&c->btree_trans_lock);
- i->iter = task->pid;
+ bool found = bch2_check_for_deadlock(trans, out) != 0;
closure_put(&trans->ref);
+ if (found)
+ return;
+
if (!seqmutex_relock(&c->btree_trans_lock, seq))
goto restart;
}
seqmutex_unlock(&c->btree_trans_lock);
-out:
+}
+
+static ssize_t bch2_btree_deadlock_read(struct file *file, char __user *buf,
+ size_t size, loff_t *ppos)
+{
+ struct dump_iter *i = file->private_data;
+ struct bch_fs *c = i->c;
+ ssize_t ret = 0;
+
+ i->ubuf = buf;
+ i->size = size;
+ i->ret = 0;
+
+ if (!i->iter) {
+ btree_deadlock_to_text(&i->buf, c);
+ i->iter++;
+ }
+
if (i->buf.allocation_failure)
ret = -ENOMEM;
diff --git a/fs/bcachefs/disk_groups_format.h b/fs/bcachefs/disk_groups_format.h
new file mode 100644
index 000000000000..698990bbf1d2
--- /dev/null
+++ b/fs/bcachefs/disk_groups_format.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_DISK_GROUPS_FORMAT_H
+#define _BCACHEFS_DISK_GROUPS_FORMAT_H
+
+#define BCH_SB_LABEL_SIZE 32
+
+struct bch_disk_group {
+ __u8 label[BCH_SB_LABEL_SIZE];
+ __le64 flags[2];
+} __packed __aligned(8);
+
+LE64_BITMASK(BCH_GROUP_DELETED, struct bch_disk_group, flags[0], 0, 1)
+LE64_BITMASK(BCH_GROUP_DATA_ALLOWED, struct bch_disk_group, flags[0], 1, 6)
+LE64_BITMASK(BCH_GROUP_PARENT, struct bch_disk_group, flags[0], 6, 24)
+
+struct bch_sb_field_disk_groups {
+ struct bch_sb_field field;
+ struct bch_disk_group entries[];
+} __packed __aligned(8);
+
+#endif /* _BCACHEFS_DISK_GROUPS_FORMAT_H */
diff --git a/fs/bcachefs/ec.c b/fs/bcachefs/ec.c
index b26dc7424662..83e279d41829 100644
--- a/fs/bcachefs/ec.c
+++ b/fs/bcachefs/ec.c
@@ -268,6 +268,7 @@ static int mark_stripe_bucket(struct btree_trans *trans,
{
struct bch_fs *c = trans->c;
const struct bch_extent_ptr *ptr = s.v->ptrs + ptr_idx;
+ struct printbuf buf = PRINTBUF;
int ret = 0;
struct bch_dev *ca = bch2_dev_tryget(c, ptr->dev);
@@ -289,6 +290,13 @@ static int mark_stripe_bucket(struct btree_trans *trans,
if (flags & BTREE_TRIGGER_gc) {
percpu_down_read(&c->mark_lock);
struct bucket *g = gc_bucket(ca, bucket.offset);
+ if (bch2_fs_inconsistent_on(!g, c, "reference to invalid bucket on device %u\n %s",
+ ptr->dev,
+ (bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) {
+ ret = -EIO;
+ goto err_unlock;
+ }
+
bucket_lock(g);
struct bch_alloc_v4 old = bucket_m_to_alloc(*g), new = old;
ret = __mark_stripe_bucket(trans, ca, s, ptr_idx, deleting, bucket, &new, flags);
@@ -297,10 +305,12 @@ static int mark_stripe_bucket(struct btree_trans *trans,
bch2_dev_usage_update(c, ca, &old, &new, 0, true);
}
bucket_unlock(g);
+err_unlock:
percpu_up_read(&c->mark_lock);
}
err:
bch2_dev_put(ca);
+ printbuf_exit(&buf);
return ret;
}
@@ -714,10 +724,12 @@ static void ec_block_endio(struct bio *bio)
bch2_blk_status_to_str(bio->bi_status)))
clear_bit(ec_bio->idx, ec_bio->buf->valid);
- if (dev_ptr_stale(ca, ptr)) {
+ int stale = dev_ptr_stale(ca, ptr);
+ if (stale) {
bch_err_ratelimited(ca->fs,
- "error %s stripe: stale pointer after io",
- bio_data_dir(bio) == READ ? "reading from" : "writing to");
+ "error %s stripe: stale/invalid pointer (%i) after io",
+ bio_data_dir(bio) == READ ? "reading from" : "writing to",
+ stale);
clear_bit(ec_bio->idx, ec_bio->buf->valid);
}
@@ -743,10 +755,12 @@ static void ec_block_io(struct bch_fs *c, struct ec_stripe_buf *buf,
return;
}
- if (dev_ptr_stale(ca, ptr)) {
+ int stale = dev_ptr_stale(ca, ptr);
+ if (stale) {
bch_err_ratelimited(c,
- "error %s stripe: stale pointer",
- rw == READ ? "reading from" : "writing to");
+ "error %s stripe: stale pointer (%i)",
+ rw == READ ? "reading from" : "writing to",
+ stale);
clear_bit(idx, buf->valid);
return;
}
@@ -908,7 +922,7 @@ static int __ec_stripe_mem_alloc(struct bch_fs *c, size_t idx, gfp_t gfp)
if (!genradix_ptr_alloc(&c->stripes, idx, gfp))
return -BCH_ERR_ENOMEM_ec_stripe_mem_alloc;
- if (c->gc_pos.phase != GC_PHASE_NOT_RUNNING &&
+ if (c->gc_pos.phase != GC_PHASE_not_running &&
!genradix_ptr_alloc(&c->gc_stripes, idx, gfp))
return -BCH_ERR_ENOMEM_ec_stripe_mem_alloc;
diff --git a/fs/bcachefs/errcode.h b/fs/bcachefs/errcode.h
index dbe35b80bc0b..58612abf7927 100644
--- a/fs/bcachefs/errcode.h
+++ b/fs/bcachefs/errcode.h
@@ -116,6 +116,9 @@
x(ENOENT, ENOENT_dev_idx_not_found) \
x(ENOTEMPTY, ENOTEMPTY_dir_not_empty) \
x(ENOTEMPTY, ENOTEMPTY_subvol_not_empty) \
+ x(EEXIST, EEXIST_str_hash_set) \
+ x(EEXIST, EEXIST_discard_in_flight_add) \
+ x(EEXIST, EEXIST_subvolume_create) \
x(0, open_buckets_empty) \
x(0, freelist_empty) \
x(BCH_ERR_freelist_empty, no_buckets_found) \
diff --git a/fs/bcachefs/error.c b/fs/bcachefs/error.c
index c66eeffcd7f2..d95c40f1b6af 100644
--- a/fs/bcachefs/error.c
+++ b/fs/bcachefs/error.c
@@ -15,6 +15,7 @@ bool bch2_inconsistent_error(struct bch_fs *c)
switch (c->opts.errors) {
case BCH_ON_ERROR_continue:
return false;
+ case BCH_ON_ERROR_fix_safe:
case BCH_ON_ERROR_ro:
if (bch2_fs_emergency_read_only(c))
bch_err(c, "inconsistency detected - emergency read only at journal seq %llu",
@@ -191,6 +192,12 @@ static void prt_actioning(struct printbuf *out, const char *action)
prt_str(out, "ing");
}
+static const u8 fsck_flags_extra[] = {
+#define x(t, n, flags) [BCH_FSCK_ERR_##t] = flags,
+ BCH_SB_ERRS()
+#undef x
+};
+
int bch2_fsck_err(struct bch_fs *c,
enum bch_fsck_flags flags,
enum bch_sb_error_id err,
@@ -203,6 +210,9 @@ int bch2_fsck_err(struct bch_fs *c,
int ret = -BCH_ERR_fsck_ignore;
const char *action_orig = "fix?", *action = action_orig;
+ if (!WARN_ON(err >= ARRAY_SIZE(fsck_flags_extra)))
+ flags |= fsck_flags_extra[err];
+
if ((flags & FSCK_CAN_FIX) &&
test_bit(err, c->sb.errors_silent))
return -BCH_ERR_fsck_fix;
@@ -265,7 +275,14 @@ int bch2_fsck_err(struct bch_fs *c,
prt_printf(out, bch2_log_msg(c, ""));
#endif
- if (!test_bit(BCH_FS_fsck_running, &c->flags)) {
+ if ((flags & FSCK_CAN_FIX) &&
+ (flags & FSCK_AUTOFIX) &&
+ (c->opts.errors == BCH_ON_ERROR_continue ||
+ c->opts.errors == BCH_ON_ERROR_fix_safe)) {
+ prt_str(out, ", ");
+ prt_actioning(out, action);
+ ret = -BCH_ERR_fsck_fix;
+ } else if (!test_bit(BCH_FS_fsck_running, &c->flags)) {
if (c->opts.errors != BCH_ON_ERROR_continue ||
!(flags & (FSCK_CAN_FIX|FSCK_CAN_IGNORE))) {
prt_str(out, ", shutting down");
diff --git a/fs/bcachefs/error.h b/fs/bcachefs/error.h
index 36caedf72d89..777711504c35 100644
--- a/fs/bcachefs/error.h
+++ b/fs/bcachefs/error.h
@@ -108,13 +108,6 @@ struct fsck_err_state {
char *last_msg;
};
-enum bch_fsck_flags {
- FSCK_CAN_FIX = 1 << 0,
- FSCK_CAN_IGNORE = 1 << 1,
- FSCK_NEED_FSCK = 1 << 2,
- FSCK_NO_RATELIMIT = 1 << 3,
-};
-
#define fsck_err_count(_c, _err) bch2_sb_err_count(_c, BCH_FSCK_ERR_##_err)
__printf(4, 5) __cold
diff --git a/fs/bcachefs/extents.c b/fs/bcachefs/extents.c
index 469037929685..410b8bd81b5a 100644
--- a/fs/bcachefs/extents.c
+++ b/fs/bcachefs/extents.c
@@ -137,7 +137,7 @@ int bch2_bkey_pick_read_device(struct bch_fs *c, struct bkey_s_c k,
struct bch_dev *ca = bch2_dev_rcu(c, p.ptr.dev);
- if (p.ptr.cached && (!ca || dev_ptr_stale(ca, &p.ptr)))
+ if (p.ptr.cached && (!ca || dev_ptr_stale_rcu(ca, &p.ptr)))
continue;
f = failed ? dev_io_failures(failed, p.ptr.dev) : NULL;
@@ -999,7 +999,7 @@ bool bch2_extent_normalize(struct bch_fs *c, struct bkey_s k)
bch2_bkey_drop_ptrs(k, ptr,
ptr->cached &&
(ca = bch2_dev_rcu(c, ptr->dev)) &&
- dev_ptr_stale_rcu(ca, ptr));
+ dev_ptr_stale_rcu(ca, ptr) > 0);
rcu_read_unlock();
return bkey_deleted(k.k);
@@ -1024,8 +1024,11 @@ void bch2_extent_ptr_to_text(struct printbuf *out, struct bch_fs *c, const struc
prt_str(out, " cached");
if (ptr->unwritten)
prt_str(out, " unwritten");
- if (bucket_valid(ca, b) && dev_ptr_stale_rcu(ca, ptr))
+ int stale = dev_ptr_stale_rcu(ca, ptr);
+ if (stale > 0)
prt_printf(out, " stale");
+ else if (stale)
+ prt_printf(out, " invalid");
}
rcu_read_unlock();
--out->atomic;
diff --git a/fs/bcachefs/eytzinger.h b/fs/bcachefs/eytzinger.h
index 24840aee335c..795f4fc0bab1 100644
--- a/fs/bcachefs/eytzinger.h
+++ b/fs/bcachefs/eytzinger.h
@@ -48,7 +48,7 @@ static inline unsigned eytzinger1_right_child(unsigned i)
static inline unsigned eytzinger1_first(unsigned size)
{
- return rounddown_pow_of_two(size);
+ return size ? rounddown_pow_of_two(size) : 0;
}
static inline unsigned eytzinger1_last(unsigned size)
@@ -101,7 +101,9 @@ static inline unsigned eytzinger1_prev(unsigned i, unsigned size)
static inline unsigned eytzinger1_extra(unsigned size)
{
- return (size + 1 - rounddown_pow_of_two(size)) << 1;
+ return size
+ ? (size + 1 - rounddown_pow_of_two(size)) << 1
+ : 0;
}
static inline unsigned __eytzinger1_to_inorder(unsigned i, unsigned size,
diff --git a/fs/bcachefs/fs-io-buffered.c b/fs/bcachefs/fs-io-buffered.c
index 6b69e5cd68dd..54873ecc635c 100644
--- a/fs/bcachefs/fs-io-buffered.c
+++ b/fs/bcachefs/fs-io-buffered.c
@@ -437,8 +437,8 @@ static void bch2_writepage_io_done(struct bch_write_op *op)
*/
/*
- * PageWriteback is effectively our ref on the inode - fixup i_blocks
- * before calling end_page_writeback:
+ * The writeback flag is effectively our ref on the inode -
+ * fixup i_blocks before calling folio_end_writeback:
*/
bch2_i_sectors_acct(c, io->inode, NULL, io->op.i_sectors_delta);
@@ -898,7 +898,7 @@ static int __bch2_buffered_write(struct bch_inode_info *inode,
darray_for_each(fs, fi) {
f = *fi;
f_len = min(end, folio_end_pos(f)) - f_pos;
- f_copied = copy_page_from_iter_atomic(&f->page, f_offset, f_len, iter);
+ f_copied = copy_folio_from_iter_atomic(f, f_offset, f_len, iter);
if (!f_copied) {
folios_trunc(&fs, fi);
break;
diff --git a/fs/bcachefs/fs-io-direct.c b/fs/bcachefs/fs-io-direct.c
index 09d21aef879a..049b61bc9a5b 100644
--- a/fs/bcachefs/fs-io-direct.c
+++ b/fs/bcachefs/fs-io-direct.c
@@ -609,8 +609,10 @@ ssize_t bch2_direct_write(struct kiocb *req, struct iov_iter *iter)
if (unlikely(ret))
goto err_put_write_ref;
- if (unlikely((req->ki_pos|iter->count) & (block_bytes(c) - 1)))
+ if (unlikely((req->ki_pos|iter->count) & (block_bytes(c) - 1))) {
+ ret = -EINVAL;
goto err_put_write_ref;
+ }
inode_dio_begin(&inode->v);
bch2_pagecache_block_get(inode);
diff --git a/fs/bcachefs/fs-ioctl.c b/fs/bcachefs/fs-ioctl.c
index 205a323ffc6d..79a0c8732bce 100644
--- a/fs/bcachefs/fs-ioctl.c
+++ b/fs/bcachefs/fs-ioctl.c
@@ -308,8 +308,8 @@ static int bch2_ioc_goingdown(struct bch_fs *c, u32 __user *arg)
return ret;
}
-static long __bch2_ioctl_subvolume_create(struct bch_fs *c, struct file *filp,
- struct bch_ioctl_subvolume arg)
+static long bch2_ioctl_subvolume_create(struct bch_fs *c, struct file *filp,
+ struct bch_ioctl_subvolume arg)
{
struct inode *dir;
struct bch_inode_info *inode;
@@ -373,7 +373,7 @@ retry:
}
if (dst_dentry->d_inode) {
- error = -EEXIST;
+ error = -BCH_ERR_EEXIST_subvolume_create;
goto err3;
}
@@ -406,9 +406,12 @@ retry:
!arg.src_ptr)
snapshot_src.subvol = inode_inum(to_bch_ei(dir)).subvol;
+ down_write(&c->snapshot_create_lock);
inode = __bch2_create(file_mnt_idmap(filp), to_bch_ei(dir),
dst_dentry, arg.mode|S_IFDIR,
0, snapshot_src, create_flags);
+ up_write(&c->snapshot_create_lock);
+
error = PTR_ERR_OR_ZERO(inode);
if (error)
goto err3;
@@ -429,16 +432,6 @@ err1:
return error;
}
-static long bch2_ioctl_subvolume_create(struct bch_fs *c, struct file *filp,
- struct bch_ioctl_subvolume arg)
-{
- down_write(&c->snapshot_create_lock);
- long ret = __bch2_ioctl_subvolume_create(c, filp, arg);
- up_write(&c->snapshot_create_lock);
-
- return ret;
-}
-
static long bch2_ioctl_subvolume_destroy(struct bch_fs *c, struct file *filp,
struct bch_ioctl_subvolume arg)
{
diff --git a/fs/bcachefs/fs.c b/fs/bcachefs/fs.c
index 96040a95cf46..0c7d1bc0548a 100644
--- a/fs/bcachefs/fs.c
+++ b/fs/bcachefs/fs.c
@@ -188,6 +188,18 @@ static struct bch_inode_info *bch2_inode_insert(struct bch_fs *c, struct bch_ino
BUG_ON(!old);
if (unlikely(old != inode)) {
+ /*
+ * bcachefs doesn't use I_NEW; we have no use for it since we
+ * only insert fully created inodes in the inode hash table. But
+ * discard_new_inode() expects it to be set...
+ */
+ inode->v.i_flags |= I_NEW;
+ /*
+ * We don't want bch2_evict_inode() to delete the inode on disk,
+ * we just raced and had another inode in cache. Normally new
+ * inodes don't have nlink == 0 - except tmpfiles do...
+ */
+ set_nlink(&inode->v, 1);
discard_new_inode(&inode->v);
inode = old;
} else {
@@ -195,8 +207,10 @@ static struct bch_inode_info *bch2_inode_insert(struct bch_fs *c, struct bch_ino
list_add(&inode->ei_vfs_inode_list, &c->vfs_inodes_list);
mutex_unlock(&c->vfs_inodes_lock);
/*
- * we really don't want insert_inode_locked2() to be setting
- * I_NEW...
+ * Again, I_NEW makes no sense for bcachefs. This is only needed
+ * for clearing I_NEW, but since the inode was already fully
+ * created and initialized we didn't actually want
+ * inode_insert5() to set it for us.
*/
unlock_new_inode(&inode->v);
}
@@ -227,8 +241,9 @@ static struct bch_inode_info *__bch2_new_inode(struct bch_fs *c)
mutex_init(&inode->ei_update_lock);
two_state_lock_init(&inode->ei_pagecache_lock);
INIT_LIST_HEAD(&inode->ei_vfs_inode_list);
+ inode->ei_flags = 0;
mutex_init(&inode->ei_quota_lock);
- inode->v.i_state = 0;
+ memset(&inode->ei_devs_need_flush, 0, sizeof(inode->ei_devs_need_flush));
if (unlikely(inode_init_always(c->vfs_sb, &inode->v))) {
kmem_cache_free(bch2_inode_cache, inode);
@@ -1155,6 +1170,7 @@ static const struct file_operations bch_file_operations = {
.read_iter = bch2_read_iter,
.write_iter = bch2_write_iter,
.mmap = bch2_mmap,
+ .get_unmapped_area = thp_get_unmapped_area,
.fsync = bch2_fsync,
.splice_read = filemap_splice_read,
.splice_write = iter_file_splice_write,
@@ -1486,11 +1502,6 @@ static void bch2_vfs_inode_init(struct btree_trans *trans, subvol_inum inum,
bch2_iget5_set(&inode->v, &inum);
bch2_inode_update_after_write(trans, inode, bi, ~0);
- if (BCH_SUBVOLUME_SNAP(subvol))
- set_bit(EI_INODE_SNAPSHOT, &inode->ei_flags);
- else
- clear_bit(EI_INODE_SNAPSHOT, &inode->ei_flags);
-
inode->v.i_blocks = bi->bi_sectors;
inode->v.i_ino = bi->bi_inum;
inode->v.i_rdev = bi->bi_dev;
@@ -1502,6 +1513,9 @@ static void bch2_vfs_inode_init(struct btree_trans *trans, subvol_inum inum,
inode->ei_qid = bch_qid(bi);
inode->ei_subvol = inum.subvol;
+ if (BCH_SUBVOLUME_SNAP(subvol))
+ set_bit(EI_INODE_SNAPSHOT, &inode->ei_flags);
+
inode->v.i_mapping->a_ops = &bch_address_space_operations;
switch (inode->v.i_mode & S_IFMT) {
@@ -1939,8 +1953,7 @@ got_sb:
if (IS_ERR(sb)) {
ret = PTR_ERR(sb);
- ret = bch2_err_class(ret);
- return ERR_PTR(ret);
+ goto err;
}
c = sb->s_fs_info;
@@ -1968,6 +1981,7 @@ got_sb:
sb->s_time_min = div_s64(S64_MIN, c->sb.time_units_per_sec) + 1;
sb->s_time_max = div_s64(S64_MAX, c->sb.time_units_per_sec);
sb->s_uuid = c->sb.user_uuid;
+ sb->s_shrink->seeks = 0;
c->vfs_sb = sb;
strscpy(sb->s_id, c->name, sizeof(sb->s_id));
@@ -2016,6 +2030,17 @@ out:
err_put_super:
__bch2_fs_stop(c);
deactivate_locked_super(sb);
+err:
+ if (ret)
+ pr_err("error: %s", bch2_err_str(ret));
+ /*
+ * On an inconsistency error in recovery we might see an -EROFS derived
+ * errorcode (from the journal), but we don't want to return that to
+ * userspace as that causes util-linux to retry the mount RO - which is
+ * confusing:
+ */
+ if (bch2_err_matches(ret, EROFS) && ret != -EROFS)
+ ret = -EIO;
return ERR_PTR(bch2_err_class(ret));
}
diff --git a/fs/bcachefs/fsck.c b/fs/bcachefs/fsck.c
index c8f57465131c..921bcdb3e5e4 100644
--- a/fs/bcachefs/fsck.c
+++ b/fs/bcachefs/fsck.c
@@ -77,21 +77,17 @@ static int lookup_first_inode(struct btree_trans *trans, u64 inode_nr,
struct bkey_s_c k;
int ret;
- bch2_trans_iter_init(trans, &iter, BTREE_ID_inodes,
- POS(0, inode_nr),
- BTREE_ITER_all_snapshots);
- k = bch2_btree_iter_peek(&iter);
- ret = bkey_err(k);
- if (ret)
- goto err;
-
- if (!k.k || !bkey_eq(k.k->p, POS(0, inode_nr))) {
- ret = -BCH_ERR_ENOENT_inode;
- goto err;
+ for_each_btree_key_norestart(trans, iter, BTREE_ID_inodes, POS(0, inode_nr),
+ BTREE_ITER_all_snapshots, k, ret) {
+ if (k.k->p.offset != inode_nr)
+ break;
+ if (!bkey_is_inode(k.k))
+ continue;
+ ret = bch2_inode_unpack(k, inode);
+ goto found;
}
-
- ret = bch2_inode_unpack(k, inode);
-err:
+ ret = -BCH_ERR_ENOENT_inode;
+found:
bch_err_msg(trans->c, ret, "fetching inode %llu", inode_nr);
bch2_trans_iter_exit(trans, &iter);
return ret;
@@ -770,25 +766,6 @@ static int get_visible_inodes(struct btree_trans *trans,
return ret;
}
-static int check_key_has_snapshot(struct btree_trans *trans,
- struct btree_iter *iter,
- struct bkey_s_c k)
-{
- struct bch_fs *c = trans->c;
- struct printbuf buf = PRINTBUF;
- int ret = 0;
-
- if (mustfix_fsck_err_on(!bch2_snapshot_equiv(c, k.k->p.snapshot), c,
- bkey_in_missing_snapshot,
- "key in missing snapshot: %s",
- (bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
- ret = bch2_btree_delete_at(trans, iter,
- BTREE_UPDATE_internal_snapshot_node) ?: 1;
-fsck_err:
- printbuf_exit(&buf);
- return ret;
-}
-
static int hash_redo_key(struct btree_trans *trans,
const struct bch_hash_desc desc,
struct bch_hash_info *hash_info,
@@ -983,7 +960,7 @@ static int check_inode(struct btree_trans *trans,
bool do_update = false;
int ret;
- ret = check_key_has_snapshot(trans, iter, k);
+ ret = bch2_check_key_has_snapshot(trans, iter, k);
if (ret < 0)
goto err;
if (ret)
@@ -1487,7 +1464,7 @@ static int check_extent(struct btree_trans *trans, struct btree_iter *iter,
struct printbuf buf = PRINTBUF;
int ret = 0;
- ret = check_key_has_snapshot(trans, iter, k);
+ ret = bch2_check_key_has_snapshot(trans, iter, k);
if (ret) {
ret = ret < 0 ? ret : 0;
goto out;
@@ -1700,6 +1677,7 @@ static int check_subdir_count(struct btree_trans *trans, struct inode_walker *w)
trans_was_restarted(trans, restart_count);
}
+noinline_for_stack
static int check_dirent_inode_dirent(struct btree_trans *trans,
struct btree_iter *iter,
struct bkey_s_c_dirent d,
@@ -1796,6 +1774,7 @@ out_noiter:
return ret;
}
+noinline_for_stack
static int check_dirent_target(struct btree_trans *trans,
struct btree_iter *iter,
struct bkey_s_c_dirent d,
@@ -1870,6 +1849,7 @@ found:
return ret;
}
+noinline_for_stack
static int check_dirent_to_subvol(struct btree_trans *trans, struct btree_iter *iter,
struct bkey_s_c_dirent d)
{
@@ -2010,7 +1990,7 @@ static int check_dirent(struct btree_trans *trans, struct btree_iter *iter,
struct printbuf buf = PRINTBUF;
int ret = 0;
- ret = check_key_has_snapshot(trans, iter, k);
+ ret = bch2_check_key_has_snapshot(trans, iter, k);
if (ret) {
ret = ret < 0 ? ret : 0;
goto out;
@@ -2165,7 +2145,7 @@ static int check_xattr(struct btree_trans *trans, struct btree_iter *iter,
struct inode_walker_entry *i;
int ret;
- ret = check_key_has_snapshot(trans, iter, k);
+ ret = bch2_check_key_has_snapshot(trans, iter, k);
if (ret < 0)
return ret;
if (ret)
diff --git a/fs/bcachefs/io_misc.c b/fs/bcachefs/io_misc.c
index 4ec979b4b23e..4583c9386e8c 100644
--- a/fs/bcachefs/io_misc.c
+++ b/fs/bcachefs/io_misc.c
@@ -125,7 +125,7 @@ err_noprint:
bch2_bkey_buf_exit(&old, c);
if (closure_nr_remaining(&cl) != 1) {
- bch2_trans_unlock(trans);
+ bch2_trans_unlock_long(trans);
closure_sync(&cl);
}
diff --git a/fs/bcachefs/io_read.c b/fs/bcachefs/io_read.c
index f57486794484..ebf39ef72fb2 100644
--- a/fs/bcachefs/io_read.c
+++ b/fs/bcachefs/io_read.c
@@ -84,9 +84,10 @@ struct promote_op {
};
static const struct rhashtable_params bch_promote_params = {
- .head_offset = offsetof(struct promote_op, hash),
- .key_offset = offsetof(struct promote_op, pos),
- .key_len = sizeof(struct bpos),
+ .head_offset = offsetof(struct promote_op, hash),
+ .key_offset = offsetof(struct promote_op, pos),
+ .key_len = sizeof(struct bpos),
+ .automatic_shrinking = true,
};
static inline int should_promote(struct bch_fs *c, struct bkey_s_c k,
@@ -388,7 +389,6 @@ retry:
bch2_bkey_buf_reassemble(&sk, c, k);
k = bkey_i_to_s_c(sk.k);
- bch2_trans_unlock(trans);
if (!bch2_bkey_matches_ptr(c, k,
rbio->pick.ptr,
@@ -776,18 +776,32 @@ static noinline void read_from_stale_dirty_pointer(struct btree_trans *trans,
PTR_BUCKET_POS(ca, &ptr),
BTREE_ITER_cached);
- prt_printf(&buf, "Attempting to read from stale dirty pointer:\n");
- printbuf_indent_add(&buf, 2);
-
- bch2_bkey_val_to_text(&buf, c, k);
- prt_newline(&buf);
+ u8 *gen = bucket_gen(ca, iter.pos.offset);
+ if (gen) {
- prt_printf(&buf, "memory gen: %u", *bucket_gen(ca, iter.pos.offset));
+ prt_printf(&buf, "Attempting to read from stale dirty pointer:\n");
+ printbuf_indent_add(&buf, 2);
- ret = lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_slot(&iter)));
- if (!ret) {
+ bch2_bkey_val_to_text(&buf, c, k);
prt_newline(&buf);
+
+ prt_printf(&buf, "memory gen: %u", *gen);
+
+ ret = lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_slot(&iter)));
+ if (!ret) {
+ prt_newline(&buf);
+ bch2_bkey_val_to_text(&buf, c, k);
+ }
+ } else {
+ prt_printf(&buf, "Attempting to read from invalid bucket %llu:%llu:\n",
+ iter.pos.inode, iter.pos.offset);
+ printbuf_indent_add(&buf, 2);
+
+ prt_printf(&buf, "first bucket %u nbuckets %llu\n",
+ ca->mi.first_bucket, ca->mi.nbuckets);
+
bch2_bkey_val_to_text(&buf, c, k);
+ prt_newline(&buf);
}
bch2_fs_inconsistent(c, "%s", buf.buf);
@@ -989,6 +1003,9 @@ get_bio:
rbio->promote = promote;
INIT_WORK(&rbio->work, NULL);
+ if (flags & BCH_READ_NODECODE)
+ orig->pick = pick;
+
rbio->bio.bi_opf = orig->bio.bi_opf;
rbio->bio.bi_iter.bi_sector = pick.ptr.offset;
rbio->bio.bi_end_io = bch2_read_endio;
diff --git a/fs/bcachefs/io_write.c b/fs/bcachefs/io_write.c
index 9401d13e31bb..05e0cbef420b 100644
--- a/fs/bcachefs/io_write.c
+++ b/fs/bcachefs/io_write.c
@@ -1220,7 +1220,7 @@ static void bch2_nocow_write(struct bch_write_op *op)
DARRAY_PREALLOCATED(struct bucket_to_lock, 3) buckets;
u32 snapshot;
struct bucket_to_lock *stale_at;
- int ret;
+ int stale, ret;
if (op->flags & BCH_WRITE_MOVE)
return;
@@ -1299,7 +1299,8 @@ retry:
BUCKET_NOCOW_LOCK_UPDATE);
rcu_read_lock();
- bool stale = gen_after(*bucket_gen(ca, i->b.offset), i->gen);
+ u8 *gen = bucket_gen(ca, i->b.offset);
+ stale = !gen ? -1 : gen_after(*gen, i->gen);
rcu_read_unlock();
if (unlikely(stale)) {
@@ -1380,8 +1381,18 @@ err_bucket_stale:
break;
}
- /* We can retry this: */
- ret = -BCH_ERR_transaction_restart;
+ struct printbuf buf = PRINTBUF;
+ if (bch2_fs_inconsistent_on(stale < 0, c,
+ "pointer to invalid bucket in nocow path on device %llu\n %s",
+ stale_at->b.inode,
+ (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
+ ret = -EIO;
+ } else {
+ /* We can retry this: */
+ ret = -BCH_ERR_transaction_restart;
+ }
+ printbuf_exit(&buf);
+
goto err_get_ioref;
}
diff --git a/fs/bcachefs/journal.c b/fs/bcachefs/journal.c
index adec8e1ea73e..10b19791ec98 100644
--- a/fs/bcachefs/journal.c
+++ b/fs/bcachefs/journal.c
@@ -1095,7 +1095,7 @@ unlock:
return ret;
}
-int bch2_dev_journal_alloc(struct bch_dev *ca)
+int bch2_dev_journal_alloc(struct bch_dev *ca, bool new_fs)
{
unsigned nr;
int ret;
@@ -1117,7 +1117,7 @@ int bch2_dev_journal_alloc(struct bch_dev *ca)
min(1 << 13,
(1 << 24) / ca->mi.bucket_size));
- ret = __bch2_set_nr_journal_buckets(ca, nr, true, NULL);
+ ret = __bch2_set_nr_journal_buckets(ca, nr, new_fs, NULL);
err:
bch_err_fn(ca, ret);
return ret;
@@ -1129,7 +1129,7 @@ int bch2_fs_journal_alloc(struct bch_fs *c)
if (ca->journal.nr)
continue;
- int ret = bch2_dev_journal_alloc(ca);
+ int ret = bch2_dev_journal_alloc(ca, true);
if (ret) {
percpu_ref_put(&ca->io_ref);
return ret;
@@ -1167,6 +1167,9 @@ void bch2_dev_journal_stop(struct journal *j, struct bch_dev *ca)
void bch2_fs_journal_stop(struct journal *j)
{
+ if (!test_bit(JOURNAL_running, &j->flags))
+ return;
+
bch2_journal_reclaim_stop(j);
bch2_journal_flush_all_pins(j);
@@ -1181,9 +1184,11 @@ void bch2_fs_journal_stop(struct journal *j)
journal_quiesce(j);
cancel_delayed_work_sync(&j->write_work);
- BUG_ON(!bch2_journal_error(j) &&
- test_bit(JOURNAL_replay_done, &j->flags) &&
- j->last_empty_seq != journal_cur_seq(j));
+ WARN(!bch2_journal_error(j) &&
+ test_bit(JOURNAL_replay_done, &j->flags) &&
+ j->last_empty_seq != journal_cur_seq(j),
+ "journal shutdown error: cur seq %llu but last empty seq %llu",
+ journal_cur_seq(j), j->last_empty_seq);
if (!bch2_journal_error(j))
clear_bit(JOURNAL_running, &j->flags);
@@ -1415,8 +1420,8 @@ void __bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
unsigned long now = jiffies;
u64 nr_writes = j->nr_flush_writes + j->nr_noflush_writes;
- if (!out->nr_tabstops)
- printbuf_tabstop_push(out, 28);
+ printbuf_tabstops_reset(out);
+ printbuf_tabstop_push(out, 28);
out->atomic++;
rcu_read_lock();
@@ -1518,6 +1523,11 @@ bool bch2_journal_seq_pins_to_text(struct printbuf *out, struct journal *j, u64
struct journal_entry_pin *pin;
spin_lock(&j->lock);
+ if (!test_bit(JOURNAL_running, &j->flags)) {
+ spin_unlock(&j->lock);
+ return true;
+ }
+
*seq = max(*seq, j->pin.front);
if (*seq >= j->pin.back) {
diff --git a/fs/bcachefs/journal.h b/fs/bcachefs/journal.h
index fd1f7cdaa8bc..bc6b9c39dcb4 100644
--- a/fs/bcachefs/journal.h
+++ b/fs/bcachefs/journal.h
@@ -433,7 +433,7 @@ bool bch2_journal_seq_pins_to_text(struct printbuf *, struct journal *, u64 *);
int bch2_set_nr_journal_buckets(struct bch_fs *, struct bch_dev *,
unsigned nr);
-int bch2_dev_journal_alloc(struct bch_dev *);
+int bch2_dev_journal_alloc(struct bch_dev *, bool);
int bch2_fs_journal_alloc(struct bch_fs *);
void bch2_dev_journal_stop(struct journal *, struct bch_dev *);
diff --git a/fs/bcachefs/journal_io.c b/fs/bcachefs/journal_io.c
index cdcb1ad49af4..2326e2cb9cd2 100644
--- a/fs/bcachefs/journal_io.c
+++ b/fs/bcachefs/journal_io.c
@@ -415,6 +415,8 @@ static int journal_entry_btree_keys_validate(struct bch_fs *c,
flags|BCH_VALIDATE_journal);
if (ret == FSCK_DELETED_KEY)
continue;
+ else if (ret)
+ return ret;
k = bkey_next(k);
}
@@ -1677,6 +1679,13 @@ static CLOSURE_CALLBACK(journal_write_done)
mod_delayed_work(j->wq, &j->write_work, max(0L, delta));
}
+ /*
+ * We don't typically trigger journal writes from her - the next journal
+ * write will be triggered immediately after the previous one is
+ * allocated, in bch2_journal_write() - but the journal write error path
+ * is special:
+ */
+ bch2_journal_do_writes(j);
spin_unlock(&j->lock);
}
@@ -1755,11 +1764,13 @@ static CLOSURE_CALLBACK(journal_write_preflush)
if (j->seq_ondisk + 1 != le64_to_cpu(w->data->seq)) {
spin_lock(&j->lock);
- closure_wait(&j->async_wait, cl);
+ if (j->seq_ondisk + 1 != le64_to_cpu(w->data->seq)) {
+ closure_wait(&j->async_wait, cl);
+ spin_unlock(&j->lock);
+ continue_at(cl, journal_write_preflush, j->wq);
+ return;
+ }
spin_unlock(&j->lock);
-
- continue_at(cl, journal_write_preflush, j->wq);
- return;
}
if (w->separate_flush) {
@@ -1967,7 +1978,6 @@ CLOSURE_CALLBACK(bch2_journal_write)
struct journal *j = container_of(w, struct journal, buf[w->idx]);
struct bch_fs *c = container_of(j, struct bch_fs, journal);
struct bch_replicas_padded replicas;
- struct printbuf journal_debug_buf = PRINTBUF;
unsigned nr_rw_members = 0;
int ret;
@@ -2011,11 +2021,15 @@ CLOSURE_CALLBACK(bch2_journal_write)
}
if (ret) {
- __bch2_journal_debug_to_text(&journal_debug_buf, j);
+ struct printbuf buf = PRINTBUF;
+ buf.atomic++;
+
+ prt_printf(&buf, bch2_fmt(c, "Unable to allocate journal write: %s"),
+ bch2_err_str(ret));
+ __bch2_journal_debug_to_text(&buf, j);
spin_unlock(&j->lock);
- bch_err(c, "Unable to allocate journal write:\n%s",
- journal_debug_buf.buf);
- printbuf_exit(&journal_debug_buf);
+ bch2_print_string_as_lines(KERN_ERR, buf.buf);
+ printbuf_exit(&buf);
goto err;
}
diff --git a/fs/bcachefs/journal_seq_blacklist.c b/fs/bcachefs/journal_seq_blacklist.c
index ed4846709611..1f25c111c54c 100644
--- a/fs/bcachefs/journal_seq_blacklist.c
+++ b/fs/bcachefs/journal_seq_blacklist.c
@@ -232,7 +232,7 @@ bool bch2_blacklist_entries_gc(struct bch_fs *c)
BUG_ON(nr != t->nr);
unsigned i;
- for (src = bl->start, i = eytzinger0_first(t->nr);
+ for (src = bl->start, i = t->nr == 0 ? 0 : eytzinger0_first(t->nr);
src < bl->start + nr;
src++, i = eytzinger0_next(i, nr)) {
BUG_ON(t->entries[i].start != le64_to_cpu(src->start));
diff --git a/fs/bcachefs/journal_seq_blacklist_format.h b/fs/bcachefs/journal_seq_blacklist_format.h
new file mode 100644
index 000000000000..2566b12dbc04
--- /dev/null
+++ b/fs/bcachefs/journal_seq_blacklist_format.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_JOURNAL_SEQ_BLACKLIST_FORMAT_H
+#define _BCACHEFS_JOURNAL_SEQ_BLACKLIST_FORMAT_H
+
+struct journal_seq_blacklist_entry {
+ __le64 start;
+ __le64 end;
+};
+
+struct bch_sb_field_journal_seq_blacklist {
+ struct bch_sb_field field;
+ struct journal_seq_blacklist_entry start[];
+};
+
+#endif /* _BCACHEFS_JOURNAL_SEQ_BLACKLIST_FORMAT_H */
diff --git a/fs/bcachefs/lru.c b/fs/bcachefs/lru.c
index a40d116224ed..b12894ef44f3 100644
--- a/fs/bcachefs/lru.c
+++ b/fs/bcachefs/lru.c
@@ -77,6 +77,45 @@ static const char * const bch2_lru_types[] = {
NULL
};
+int bch2_lru_check_set(struct btree_trans *trans,
+ u16 lru_id, u64 time,
+ struct bkey_s_c referring_k,
+ struct bkey_buf *last_flushed)
+{
+ struct bch_fs *c = trans->c;
+ struct printbuf buf = PRINTBUF;
+ struct btree_iter lru_iter;
+ struct bkey_s_c lru_k =
+ bch2_bkey_get_iter(trans, &lru_iter, BTREE_ID_lru,
+ lru_pos(lru_id,
+ bucket_to_u64(referring_k.k->p),
+ time), 0);
+ int ret = bkey_err(lru_k);
+ if (ret)
+ return ret;
+
+ if (lru_k.k->type != KEY_TYPE_set) {
+ ret = bch2_btree_write_buffer_maybe_flush(trans, referring_k, last_flushed);
+ if (ret)
+ goto err;
+
+ if (fsck_err(c, alloc_key_to_missing_lru_entry,
+ "missing %s lru entry\n"
+ " %s",
+ bch2_lru_types[lru_type(lru_k)],
+ (bch2_bkey_val_to_text(&buf, c, referring_k), buf.buf))) {
+ ret = bch2_lru_set(trans, lru_id, bucket_to_u64(referring_k.k->p), time);
+ if (ret)
+ goto err;
+ }
+ }
+err:
+fsck_err:
+ bch2_trans_iter_exit(trans, &lru_iter);
+ printbuf_exit(&buf);
+ return ret;
+}
+
static int bch2_check_lru_key(struct btree_trans *trans,
struct btree_iter *lru_iter,
struct bkey_s_c lru_k,
diff --git a/fs/bcachefs/lru.h b/fs/bcachefs/lru.h
index fb11ab0dd00e..ed75bcf59d47 100644
--- a/fs/bcachefs/lru.h
+++ b/fs/bcachefs/lru.h
@@ -2,9 +2,6 @@
#ifndef _BCACHEFS_LRU_H
#define _BCACHEFS_LRU_H
-#define LRU_TIME_BITS 48
-#define LRU_TIME_MAX ((1ULL << LRU_TIME_BITS) - 1)
-
static inline u64 lru_pos_id(struct bpos pos)
{
return pos.inode >> LRU_TIME_BITS;
@@ -64,6 +61,9 @@ int bch2_lru_del(struct btree_trans *, u16, u64, u64);
int bch2_lru_set(struct btree_trans *, u16, u64, u64);
int bch2_lru_change(struct btree_trans *, u16, u64, u64, u64);
+struct bkey_buf;
+int bch2_lru_check_set(struct btree_trans *, u16, u64, struct bkey_s_c, struct bkey_buf *);
+
int bch2_check_lrus(struct bch_fs *);
#endif /* _BCACHEFS_LRU_H */
diff --git a/fs/bcachefs/mean_and_variance_test.c b/fs/bcachefs/mean_and_variance_test.c
index 4c298e74723d..e9d9c0212e44 100644
--- a/fs/bcachefs/mean_and_variance_test.c
+++ b/fs/bcachefs/mean_and_variance_test.c
@@ -217,4 +217,5 @@ static struct kunit_suite mean_and_variance_test_suite = {
kunit_test_suite(mean_and_variance_test_suite);
MODULE_AUTHOR("Daniel B. Hill");
+MODULE_DESCRIPTION("bcachefs filesystem mean and variance unit tests");
MODULE_LICENSE("GPL");
diff --git a/fs/bcachefs/move.c b/fs/bcachefs/move.c
index 8171f947fac8..e714e3bd5bbb 100644
--- a/fs/bcachefs/move.c
+++ b/fs/bcachefs/move.c
@@ -36,31 +36,6 @@ const char * const bch2_data_ops_strs[] = {
NULL
};
-static void bch2_data_update_opts_to_text(struct printbuf *out, struct bch_fs *c,
- struct bch_io_opts *io_opts,
- struct data_update_opts *data_opts)
-{
- printbuf_tabstop_push(out, 20);
- prt_str(out, "rewrite ptrs:\t");
- bch2_prt_u64_base2(out, data_opts->rewrite_ptrs);
- prt_newline(out);
-
- prt_str(out, "kill ptrs:\t");
- bch2_prt_u64_base2(out, data_opts->kill_ptrs);
- prt_newline(out);
-
- prt_str(out, "target:\t");
- bch2_target_to_text(out, c, data_opts->target);
- prt_newline(out);
-
- prt_str(out, "compression:\t");
- bch2_compression_opt_to_text(out, background_compression(*io_opts));
- prt_newline(out);
-
- prt_str(out, "extra replicas:\t");
- prt_u64(out, data_opts->extra_replicas);
-}
-
static void trace_move_extent2(struct bch_fs *c, struct bkey_s_c k,
struct bch_io_opts *io_opts,
struct data_update_opts *data_opts)
@@ -547,6 +522,7 @@ static int bch2_move_data_btree(struct moving_context *ctxt,
ctxt->stats->pos = BBPOS(btree_id, start);
}
+ bch2_trans_begin(trans);
bch2_trans_iter_init(trans, &iter, btree_id, start,
BTREE_ITER_prefetch|
BTREE_ITER_all_snapshots);
@@ -920,7 +896,20 @@ static bool rereplicate_pred(struct bch_fs *c, void *arg,
? c->opts.metadata_replicas
: io_opts->data_replicas;
- if (!nr_good || nr_good >= replicas)
+ rcu_read_lock();
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
+ unsigned i = 0;
+ bkey_for_each_ptr(ptrs, ptr) {
+ struct bch_dev *ca = bch2_dev_rcu(c, ptr->dev);
+ if (!ptr->cached &&
+ (!ca || !ca->mi.durability))
+ data_opts->kill_ptrs |= BIT(i);
+ i++;
+ }
+ rcu_read_unlock();
+
+ if (!data_opts->kill_ptrs &&
+ (!nr_good || nr_good >= replicas))
return false;
data_opts->target = 0;
diff --git a/fs/bcachefs/movinggc.c b/fs/bcachefs/movinggc.c
index 10bfb31c151b..eb49dd045eff 100644
--- a/fs/bcachefs/movinggc.c
+++ b/fs/bcachefs/movinggc.c
@@ -35,9 +35,10 @@ struct buckets_in_flight {
};
static const struct rhashtable_params bch_move_bucket_params = {
- .head_offset = offsetof(struct move_bucket_in_flight, hash),
- .key_offset = offsetof(struct move_bucket_in_flight, bucket.k),
- .key_len = sizeof(struct move_bucket_key),
+ .head_offset = offsetof(struct move_bucket_in_flight, hash),
+ .key_offset = offsetof(struct move_bucket_in_flight, bucket.k),
+ .key_len = sizeof(struct move_bucket_key),
+ .automatic_shrinking = true,
};
static struct move_bucket_in_flight *
diff --git a/fs/bcachefs/opts.h b/fs/bcachefs/opts.h
index 25530e0bb2f3..b197ec90d4cb 100644
--- a/fs/bcachefs/opts.h
+++ b/fs/bcachefs/opts.h
@@ -137,7 +137,7 @@ enum fsck_err_opts {
x(errors, u8, \
OPT_FS|OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME, \
OPT_STR(bch2_error_actions), \
- BCH_SB_ERROR_ACTION, BCH_ON_ERROR_ro, \
+ BCH_SB_ERROR_ACTION, BCH_ON_ERROR_fix_safe, \
NULL, "Action to take on filesystem error") \
x(metadata_replicas, u8, \
OPT_FS|OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME, \
diff --git a/fs/bcachefs/recovery.c b/fs/bcachefs/recovery.c
index cf513fc79ce4..1f9d044ed920 100644
--- a/fs/bcachefs/recovery.c
+++ b/fs/bcachefs/recovery.c
@@ -326,6 +326,12 @@ static int journal_replay_entry_early(struct bch_fs *c,
case BCH_JSET_ENTRY_btree_root: {
struct btree_root *r;
+ if (fsck_err_on(entry->btree_id >= BTREE_ID_NR_MAX,
+ c, invalid_btree_id,
+ "invalid btree id %u (max %u)",
+ entry->btree_id, BTREE_ID_NR_MAX))
+ return 0;
+
while (entry->btree_id >= c->btree_roots_extra.nr + BTREE_ID_NR) {
ret = darray_push(&c->btree_roots_extra, (struct btree_root) { NULL });
if (ret)
@@ -415,7 +421,7 @@ static int journal_replay_entry_early(struct bch_fs *c,
atomic64_set(&c->io_clock[clock->rw].now, le64_to_cpu(clock->time));
}
}
-
+fsck_err:
return ret;
}
@@ -658,10 +664,10 @@ int bch2_fs_recovery(struct bch_fs *c)
if (check_version_upgrade(c))
write_sb = true;
+ c->recovery_passes_explicit |= bch2_recovery_passes_from_stable(le64_to_cpu(ext->recovery_passes_required[0]));
+
if (write_sb)
bch2_write_super(c);
-
- c->recovery_passes_explicit |= bch2_recovery_passes_from_stable(le64_to_cpu(ext->recovery_passes_required[0]));
mutex_unlock(&c->sb_lock);
if (c->opts.fsck && IS_ENABLED(CONFIG_BCACHEFS_DEBUG))
diff --git a/fs/bcachefs/replicas_format.h b/fs/bcachefs/replicas_format.h
new file mode 100644
index 000000000000..b97208195d06
--- /dev/null
+++ b/fs/bcachefs/replicas_format.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_REPLICAS_FORMAT_H
+#define _BCACHEFS_REPLICAS_FORMAT_H
+
+struct bch_replicas_entry_v0 {
+ __u8 data_type;
+ __u8 nr_devs;
+ __u8 devs[];
+} __packed;
+
+struct bch_sb_field_replicas_v0 {
+ struct bch_sb_field field;
+ struct bch_replicas_entry_v0 entries[];
+} __packed __aligned(8);
+
+struct bch_replicas_entry_v1 {
+ __u8 data_type;
+ __u8 nr_devs;
+ __u8 nr_required;
+ __u8 devs[];
+} __packed;
+
+struct bch_sb_field_replicas {
+ struct bch_sb_field field;
+ struct bch_replicas_entry_v1 entries[];
+} __packed __aligned(8);
+
+#define replicas_entry_bytes(_i) \
+ (offsetof(typeof(*(_i)), devs) + (_i)->nr_devs)
+
+#endif /* _BCACHEFS_REPLICAS_FORMAT_H */
diff --git a/fs/bcachefs/sb-downgrade.c b/fs/bcachefs/sb-downgrade.c
index 390a1bbd2567..4710b61631f0 100644
--- a/fs/bcachefs/sb-downgrade.c
+++ b/fs/bcachefs/sb-downgrade.c
@@ -146,10 +146,17 @@ static int bch2_sb_downgrade_validate(struct bch_sb *sb, struct bch_sb_field *f,
for (const struct bch_sb_field_downgrade_entry *i = e->entries;
(void *) i < vstruct_end(&e->field);
i = downgrade_entry_next_c(i)) {
+ /*
+ * Careful: sb_field_downgrade_entry is only 2 byte aligned, but
+ * section sizes are 8 byte aligned - an empty entry spanning
+ * the end of the section is allowed (and ignored):
+ */
+ if ((void *) &i->errors[0] > vstruct_end(&e->field))
+ break;
+
if (flags & BCH_VALIDATE_write &&
- ((void *) &i->errors[0] > vstruct_end(&e->field) ||
- (void *) downgrade_entry_next_c(i) > vstruct_end(&e->field))) {
- prt_printf(err, "downgrade entry overruns end of superblock section)");
+ (void *) downgrade_entry_next_c(i) > vstruct_end(&e->field)) {
+ prt_printf(err, "downgrade entry overruns end of superblock section");
return -BCH_ERR_invalid_sb_downgrade;
}
@@ -221,7 +228,7 @@ int bch2_sb_downgrade_update(struct bch_fs *c)
dst = (void *) &darray_top(table);
dst->version = cpu_to_le16(src->version);
- dst->recovery_passes[0] = cpu_to_le64(src->recovery_passes);
+ dst->recovery_passes[0] = cpu_to_le64(bch2_recovery_passes_to_stable(src->recovery_passes));
dst->recovery_passes[1] = 0;
dst->nr_errors = cpu_to_le16(src->nr_errors);
for (unsigned i = 0; i < src->nr_errors; i++)
diff --git a/fs/bcachefs/sb-downgrade_format.h b/fs/bcachefs/sb-downgrade_format.h
new file mode 100644
index 000000000000..cffd932be3ec
--- /dev/null
+++ b/fs/bcachefs/sb-downgrade_format.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_SB_DOWNGRADE_FORMAT_H
+#define _BCACHEFS_SB_DOWNGRADE_FORMAT_H
+
+struct bch_sb_field_downgrade_entry {
+ __le16 version;
+ __le64 recovery_passes[2];
+ __le16 nr_errors;
+ __le16 errors[] __counted_by(nr_errors);
+} __packed __aligned(2);
+
+struct bch_sb_field_downgrade {
+ struct bch_sb_field field;
+ struct bch_sb_field_downgrade_entry entries[];
+};
+
+#endif /* _BCACHEFS_SB_DOWNGRADE_FORMAT_H */
diff --git a/fs/bcachefs/sb-errors.c b/fs/bcachefs/sb-errors.c
index bda33e59e226..c1270d790e43 100644
--- a/fs/bcachefs/sb-errors.c
+++ b/fs/bcachefs/sb-errors.c
@@ -110,19 +110,25 @@ out:
void bch2_sb_errors_from_cpu(struct bch_fs *c)
{
bch_sb_errors_cpu *src = &c->fsck_error_counts;
- struct bch_sb_field_errors *dst =
- bch2_sb_field_resize(&c->disk_sb, errors,
- bch2_sb_field_errors_u64s(src->nr));
+ struct bch_sb_field_errors *dst;
unsigned i;
+ mutex_lock(&c->fsck_error_counts_lock);
+
+ dst = bch2_sb_field_resize(&c->disk_sb, errors,
+ bch2_sb_field_errors_u64s(src->nr));
+
if (!dst)
- return;
+ goto err;
for (i = 0; i < src->nr; i++) {
SET_BCH_SB_ERROR_ENTRY_ID(&dst->entries[i], src->data[i].id);
SET_BCH_SB_ERROR_ENTRY_NR(&dst->entries[i], src->data[i].nr);
dst->entries[i].last_error_time = cpu_to_le64(src->data[i].last_error_time);
}
+
+err:
+ mutex_unlock(&c->fsck_error_counts_lock);
}
static int bch2_sb_errors_to_cpu(struct bch_fs *c)
diff --git a/fs/bcachefs/sb-errors_format.h b/fs/bcachefs/sb-errors_format.h
new file mode 100644
index 000000000000..d54121ec093f
--- /dev/null
+++ b/fs/bcachefs/sb-errors_format.h
@@ -0,0 +1,310 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_SB_ERRORS_FORMAT_H
+#define _BCACHEFS_SB_ERRORS_FORMAT_H
+
+enum bch_fsck_flags {
+ FSCK_CAN_FIX = 1 << 0,
+ FSCK_CAN_IGNORE = 1 << 1,
+ FSCK_NEED_FSCK = 1 << 2,
+ FSCK_NO_RATELIMIT = 1 << 3,
+ FSCK_AUTOFIX = 1 << 4,
+};
+
+#define BCH_SB_ERRS() \
+ x(clean_but_journal_not_empty, 0, 0) \
+ x(dirty_but_no_journal_entries, 1, 0) \
+ x(dirty_but_no_journal_entries_post_drop_nonflushes, 2, 0) \
+ x(sb_clean_journal_seq_mismatch, 3, 0) \
+ x(sb_clean_btree_root_mismatch, 4, 0) \
+ x(sb_clean_missing, 5, 0) \
+ x(jset_unsupported_version, 6, 0) \
+ x(jset_unknown_csum, 7, 0) \
+ x(jset_last_seq_newer_than_seq, 8, 0) \
+ x(jset_past_bucket_end, 9, 0) \
+ x(jset_seq_blacklisted, 10, 0) \
+ x(journal_entries_missing, 11, 0) \
+ x(journal_entry_replicas_not_marked, 12, 0) \
+ x(journal_entry_past_jset_end, 13, 0) \
+ x(journal_entry_replicas_data_mismatch, 14, 0) \
+ x(journal_entry_bkey_u64s_0, 15, 0) \
+ x(journal_entry_bkey_past_end, 16, 0) \
+ x(journal_entry_bkey_bad_format, 17, 0) \
+ x(journal_entry_bkey_invalid, 18, 0) \
+ x(journal_entry_btree_root_bad_size, 19, 0) \
+ x(journal_entry_blacklist_bad_size, 20, 0) \
+ x(journal_entry_blacklist_v2_bad_size, 21, 0) \
+ x(journal_entry_blacklist_v2_start_past_end, 22, 0) \
+ x(journal_entry_usage_bad_size, 23, 0) \
+ x(journal_entry_data_usage_bad_size, 24, 0) \
+ x(journal_entry_clock_bad_size, 25, 0) \
+ x(journal_entry_clock_bad_rw, 26, 0) \
+ x(journal_entry_dev_usage_bad_size, 27, 0) \
+ x(journal_entry_dev_usage_bad_dev, 28, 0) \
+ x(journal_entry_dev_usage_bad_pad, 29, 0) \
+ x(btree_node_unreadable, 30, 0) \
+ x(btree_node_fault_injected, 31, 0) \
+ x(btree_node_bad_magic, 32, 0) \
+ x(btree_node_bad_seq, 33, 0) \
+ x(btree_node_unsupported_version, 34, 0) \
+ x(btree_node_bset_older_than_sb_min, 35, 0) \
+ x(btree_node_bset_newer_than_sb, 36, 0) \
+ x(btree_node_data_missing, 37, 0) \
+ x(btree_node_bset_after_end, 38, 0) \
+ x(btree_node_replicas_sectors_written_mismatch, 39, 0) \
+ x(btree_node_replicas_data_mismatch, 40, 0) \
+ x(bset_unknown_csum, 41, 0) \
+ x(bset_bad_csum, 42, 0) \
+ x(bset_past_end_of_btree_node, 43, 0) \
+ x(bset_wrong_sector_offset, 44, 0) \
+ x(bset_empty, 45, 0) \
+ x(bset_bad_seq, 46, 0) \
+ x(bset_blacklisted_journal_seq, 47, 0) \
+ x(first_bset_blacklisted_journal_seq, 48, 0) \
+ x(btree_node_bad_btree, 49, 0) \
+ x(btree_node_bad_level, 50, 0) \
+ x(btree_node_bad_min_key, 51, 0) \
+ x(btree_node_bad_max_key, 52, 0) \
+ x(btree_node_bad_format, 53, 0) \
+ x(btree_node_bkey_past_bset_end, 54, 0) \
+ x(btree_node_bkey_bad_format, 55, 0) \
+ x(btree_node_bad_bkey, 56, 0) \
+ x(btree_node_bkey_out_of_order, 57, 0) \
+ x(btree_root_bkey_invalid, 58, 0) \
+ x(btree_root_read_error, 59, 0) \
+ x(btree_root_bad_min_key, 60, 0) \
+ x(btree_root_bad_max_key, 61, 0) \
+ x(btree_node_read_error, 62, 0) \
+ x(btree_node_topology_bad_min_key, 63, 0) \
+ x(btree_node_topology_bad_max_key, 64, 0) \
+ x(btree_node_topology_overwritten_by_prev_node, 65, 0) \
+ x(btree_node_topology_overwritten_by_next_node, 66, 0) \
+ x(btree_node_topology_interior_node_empty, 67, 0) \
+ x(fs_usage_hidden_wrong, 68, FSCK_AUTOFIX) \
+ x(fs_usage_btree_wrong, 69, FSCK_AUTOFIX) \
+ x(fs_usage_data_wrong, 70, FSCK_AUTOFIX) \
+ x(fs_usage_cached_wrong, 71, FSCK_AUTOFIX) \
+ x(fs_usage_reserved_wrong, 72, FSCK_AUTOFIX) \
+ x(fs_usage_persistent_reserved_wrong, 73, FSCK_AUTOFIX) \
+ x(fs_usage_nr_inodes_wrong, 74, FSCK_AUTOFIX) \
+ x(fs_usage_replicas_wrong, 75, FSCK_AUTOFIX) \
+ x(dev_usage_buckets_wrong, 76, FSCK_AUTOFIX) \
+ x(dev_usage_sectors_wrong, 77, FSCK_AUTOFIX) \
+ x(dev_usage_fragmented_wrong, 78, FSCK_AUTOFIX) \
+ x(dev_usage_buckets_ec_wrong, 79, FSCK_AUTOFIX) \
+ x(bkey_version_in_future, 80, 0) \
+ x(bkey_u64s_too_small, 81, 0) \
+ x(bkey_invalid_type_for_btree, 82, 0) \
+ x(bkey_extent_size_zero, 83, 0) \
+ x(bkey_extent_size_greater_than_offset, 84, 0) \
+ x(bkey_size_nonzero, 85, 0) \
+ x(bkey_snapshot_nonzero, 86, 0) \
+ x(bkey_snapshot_zero, 87, 0) \
+ x(bkey_at_pos_max, 88, 0) \
+ x(bkey_before_start_of_btree_node, 89, 0) \
+ x(bkey_after_end_of_btree_node, 90, 0) \
+ x(bkey_val_size_nonzero, 91, 0) \
+ x(bkey_val_size_too_small, 92, 0) \
+ x(alloc_v1_val_size_bad, 93, 0) \
+ x(alloc_v2_unpack_error, 94, 0) \
+ x(alloc_v3_unpack_error, 95, 0) \
+ x(alloc_v4_val_size_bad, 96, 0) \
+ x(alloc_v4_backpointers_start_bad, 97, 0) \
+ x(alloc_key_data_type_bad, 98, 0) \
+ x(alloc_key_empty_but_have_data, 99, 0) \
+ x(alloc_key_dirty_sectors_0, 100, 0) \
+ x(alloc_key_data_type_inconsistency, 101, 0) \
+ x(alloc_key_to_missing_dev_bucket, 102, 0) \
+ x(alloc_key_cached_inconsistency, 103, 0) \
+ x(alloc_key_cached_but_read_time_zero, 104, 0) \
+ x(alloc_key_to_missing_lru_entry, 105, 0) \
+ x(alloc_key_data_type_wrong, 106, FSCK_AUTOFIX) \
+ x(alloc_key_gen_wrong, 107, FSCK_AUTOFIX) \
+ x(alloc_key_dirty_sectors_wrong, 108, FSCK_AUTOFIX) \
+ x(alloc_key_cached_sectors_wrong, 109, FSCK_AUTOFIX) \
+ x(alloc_key_stripe_wrong, 110, FSCK_AUTOFIX) \
+ x(alloc_key_stripe_redundancy_wrong, 111, FSCK_AUTOFIX) \
+ x(bucket_sector_count_overflow, 112, 0) \
+ x(bucket_metadata_type_mismatch, 113, 0) \
+ x(need_discard_key_wrong, 114, 0) \
+ x(freespace_key_wrong, 115, 0) \
+ x(freespace_hole_missing, 116, 0) \
+ x(bucket_gens_val_size_bad, 117, 0) \
+ x(bucket_gens_key_wrong, 118, 0) \
+ x(bucket_gens_hole_wrong, 119, 0) \
+ x(bucket_gens_to_invalid_dev, 120, 0) \
+ x(bucket_gens_to_invalid_buckets, 121, 0) \
+ x(bucket_gens_nonzero_for_invalid_buckets, 122, 0) \
+ x(need_discard_freespace_key_to_invalid_dev_bucket, 123, 0) \
+ x(need_discard_freespace_key_bad, 124, 0) \
+ x(backpointer_bucket_offset_wrong, 125, 0) \
+ x(backpointer_to_missing_device, 126, 0) \
+ x(backpointer_to_missing_alloc, 127, 0) \
+ x(backpointer_to_missing_ptr, 128, 0) \
+ x(lru_entry_at_time_0, 129, 0) \
+ x(lru_entry_to_invalid_bucket, 130, 0) \
+ x(lru_entry_bad, 131, 0) \
+ x(btree_ptr_val_too_big, 132, 0) \
+ x(btree_ptr_v2_val_too_big, 133, 0) \
+ x(btree_ptr_has_non_ptr, 134, 0) \
+ x(extent_ptrs_invalid_entry, 135, 0) \
+ x(extent_ptrs_no_ptrs, 136, 0) \
+ x(extent_ptrs_too_many_ptrs, 137, 0) \
+ x(extent_ptrs_redundant_crc, 138, 0) \
+ x(extent_ptrs_redundant_stripe, 139, 0) \
+ x(extent_ptrs_unwritten, 140, 0) \
+ x(extent_ptrs_written_and_unwritten, 141, 0) \
+ x(ptr_to_invalid_device, 142, 0) \
+ x(ptr_to_duplicate_device, 143, 0) \
+ x(ptr_after_last_bucket, 144, 0) \
+ x(ptr_before_first_bucket, 145, 0) \
+ x(ptr_spans_multiple_buckets, 146, 0) \
+ x(ptr_to_missing_backpointer, 147, 0) \
+ x(ptr_to_missing_alloc_key, 148, 0) \
+ x(ptr_to_missing_replicas_entry, 149, 0) \
+ x(ptr_to_missing_stripe, 150, 0) \
+ x(ptr_to_incorrect_stripe, 151, 0) \
+ x(ptr_gen_newer_than_bucket_gen, 152, 0) \
+ x(ptr_too_stale, 153, 0) \
+ x(stale_dirty_ptr, 154, 0) \
+ x(ptr_bucket_data_type_mismatch, 155, 0) \
+ x(ptr_cached_and_erasure_coded, 156, 0) \
+ x(ptr_crc_uncompressed_size_too_small, 157, 0) \
+ x(ptr_crc_csum_type_unknown, 158, 0) \
+ x(ptr_crc_compression_type_unknown, 159, 0) \
+ x(ptr_crc_redundant, 160, 0) \
+ x(ptr_crc_uncompressed_size_too_big, 161, 0) \
+ x(ptr_crc_nonce_mismatch, 162, 0) \
+ x(ptr_stripe_redundant, 163, 0) \
+ x(reservation_key_nr_replicas_invalid, 164, 0) \
+ x(reflink_v_refcount_wrong, 165, 0) \
+ x(reflink_p_to_missing_reflink_v, 166, 0) \
+ x(stripe_pos_bad, 167, 0) \
+ x(stripe_val_size_bad, 168, 0) \
+ x(stripe_sector_count_wrong, 169, 0) \
+ x(snapshot_tree_pos_bad, 170, 0) \
+ x(snapshot_tree_to_missing_snapshot, 171, 0) \
+ x(snapshot_tree_to_missing_subvol, 172, 0) \
+ x(snapshot_tree_to_wrong_subvol, 173, 0) \
+ x(snapshot_tree_to_snapshot_subvol, 174, 0) \
+ x(snapshot_pos_bad, 175, 0) \
+ x(snapshot_parent_bad, 176, 0) \
+ x(snapshot_children_not_normalized, 177, 0) \
+ x(snapshot_child_duplicate, 178, 0) \
+ x(snapshot_child_bad, 179, 0) \
+ x(snapshot_skiplist_not_normalized, 180, 0) \
+ x(snapshot_skiplist_bad, 181, 0) \
+ x(snapshot_should_not_have_subvol, 182, 0) \
+ x(snapshot_to_bad_snapshot_tree, 183, 0) \
+ x(snapshot_bad_depth, 184, 0) \
+ x(snapshot_bad_skiplist, 185, 0) \
+ x(subvol_pos_bad, 186, 0) \
+ x(subvol_not_master_and_not_snapshot, 187, 0) \
+ x(subvol_to_missing_root, 188, 0) \
+ x(subvol_root_wrong_bi_subvol, 189, 0) \
+ x(bkey_in_missing_snapshot, 190, 0) \
+ x(inode_pos_inode_nonzero, 191, 0) \
+ x(inode_pos_blockdev_range, 192, 0) \
+ x(inode_unpack_error, 193, 0) \
+ x(inode_str_hash_invalid, 194, 0) \
+ x(inode_v3_fields_start_bad, 195, 0) \
+ x(inode_snapshot_mismatch, 196, 0) \
+ x(inode_unlinked_but_clean, 197, 0) \
+ x(inode_unlinked_but_nlink_nonzero, 198, 0) \
+ x(inode_checksum_type_invalid, 199, 0) \
+ x(inode_compression_type_invalid, 200, 0) \
+ x(inode_subvol_root_but_not_dir, 201, 0) \
+ x(inode_i_size_dirty_but_clean, 202, 0) \
+ x(inode_i_sectors_dirty_but_clean, 203, 0) \
+ x(inode_i_sectors_wrong, 204, 0) \
+ x(inode_dir_wrong_nlink, 205, 0) \
+ x(inode_dir_multiple_links, 206, 0) \
+ x(inode_multiple_links_but_nlink_0, 207, 0) \
+ x(inode_wrong_backpointer, 208, 0) \
+ x(inode_wrong_nlink, 209, 0) \
+ x(inode_unreachable, 210, 0) \
+ x(deleted_inode_but_clean, 211, 0) \
+ x(deleted_inode_missing, 212, 0) \
+ x(deleted_inode_is_dir, 213, 0) \
+ x(deleted_inode_not_unlinked, 214, 0) \
+ x(extent_overlapping, 215, 0) \
+ x(extent_in_missing_inode, 216, 0) \
+ x(extent_in_non_reg_inode, 217, 0) \
+ x(extent_past_end_of_inode, 218, 0) \
+ x(dirent_empty_name, 219, 0) \
+ x(dirent_val_too_big, 220, 0) \
+ x(dirent_name_too_long, 221, 0) \
+ x(dirent_name_embedded_nul, 222, 0) \
+ x(dirent_name_dot_or_dotdot, 223, 0) \
+ x(dirent_name_has_slash, 224, 0) \
+ x(dirent_d_type_wrong, 225, 0) \
+ x(inode_bi_parent_wrong, 226, 0) \
+ x(dirent_in_missing_dir_inode, 227, 0) \
+ x(dirent_in_non_dir_inode, 228, 0) \
+ x(dirent_to_missing_inode, 229, 0) \
+ x(dirent_to_missing_subvol, 230, 0) \
+ x(dirent_to_itself, 231, 0) \
+ x(quota_type_invalid, 232, 0) \
+ x(xattr_val_size_too_small, 233, 0) \
+ x(xattr_val_size_too_big, 234, 0) \
+ x(xattr_invalid_type, 235, 0) \
+ x(xattr_name_invalid_chars, 236, 0) \
+ x(xattr_in_missing_inode, 237, 0) \
+ x(root_subvol_missing, 238, 0) \
+ x(root_dir_missing, 239, 0) \
+ x(root_inode_not_dir, 240, 0) \
+ x(dir_loop, 241, 0) \
+ x(hash_table_key_duplicate, 242, 0) \
+ x(hash_table_key_wrong_offset, 243, 0) \
+ x(unlinked_inode_not_on_deleted_list, 244, 0) \
+ x(reflink_p_front_pad_bad, 245, 0) \
+ x(journal_entry_dup_same_device, 246, 0) \
+ x(inode_bi_subvol_missing, 247, 0) \
+ x(inode_bi_subvol_wrong, 248, 0) \
+ x(inode_points_to_missing_dirent, 249, 0) \
+ x(inode_points_to_wrong_dirent, 250, 0) \
+ x(inode_bi_parent_nonzero, 251, 0) \
+ x(dirent_to_missing_parent_subvol, 252, 0) \
+ x(dirent_not_visible_in_parent_subvol, 253, 0) \
+ x(subvol_fs_path_parent_wrong, 254, 0) \
+ x(subvol_root_fs_path_parent_nonzero, 255, 0) \
+ x(subvol_children_not_set, 256, 0) \
+ x(subvol_children_bad, 257, 0) \
+ x(subvol_loop, 258, 0) \
+ x(subvol_unreachable, 259, 0) \
+ x(btree_node_bkey_bad_u64s, 260, 0) \
+ x(btree_node_topology_empty_interior_node, 261, 0) \
+ x(btree_ptr_v2_min_key_bad, 262, 0) \
+ x(btree_root_unreadable_and_scan_found_nothing, 263, 0) \
+ x(snapshot_node_missing, 264, 0) \
+ x(dup_backpointer_to_bad_csum_extent, 265, 0) \
+ x(btree_bitmap_not_marked, 266, 0) \
+ x(sb_clean_entry_overrun, 267, 0) \
+ x(btree_ptr_v2_written_0, 268, 0) \
+ x(subvol_snapshot_bad, 269, 0) \
+ x(subvol_inode_bad, 270, 0) \
+ x(alloc_key_stripe_sectors_wrong, 271, 0) \
+ x(accounting_mismatch, 272, 0) \
+ x(accounting_replicas_not_marked, 273, 0) \
+ x(invalid_btree_id, 274, 0) \
+ x(alloc_key_io_time_bad, 275, 0) \
+ x(alloc_key_fragmentation_lru_wrong, 276, FSCK_AUTOFIX)
+
+enum bch_sb_error_id {
+#define x(t, n, ...) BCH_FSCK_ERR_##t = n,
+ BCH_SB_ERRS()
+#undef x
+ BCH_SB_ERR_MAX
+};
+
+struct bch_sb_field_errors {
+ struct bch_sb_field field;
+ struct bch_sb_field_error_entry {
+ __le64 v;
+ __le64 last_error_time;
+ } entries[];
+};
+
+LE64_BITMASK(BCH_SB_ERROR_ENTRY_ID, struct bch_sb_field_error_entry, v, 0, 16);
+LE64_BITMASK(BCH_SB_ERROR_ENTRY_NR, struct bch_sb_field_error_entry, v, 16, 64);
+
+#endif /* _BCACHEFS_SB_ERRORS_FORMAT_H */
diff --git a/fs/bcachefs/sb-errors_types.h b/fs/bcachefs/sb-errors_types.h
index 666599d3fb9d..40325239c3b0 100644
--- a/fs/bcachefs/sb-errors_types.h
+++ b/fs/bcachefs/sb-errors_types.h
@@ -4,286 +4,6 @@
#include "darray.h"
-#define BCH_SB_ERRS() \
- x(clean_but_journal_not_empty, 0) \
- x(dirty_but_no_journal_entries, 1) \
- x(dirty_but_no_journal_entries_post_drop_nonflushes, 2) \
- x(sb_clean_journal_seq_mismatch, 3) \
- x(sb_clean_btree_root_mismatch, 4) \
- x(sb_clean_missing, 5) \
- x(jset_unsupported_version, 6) \
- x(jset_unknown_csum, 7) \
- x(jset_last_seq_newer_than_seq, 8) \
- x(jset_past_bucket_end, 9) \
- x(jset_seq_blacklisted, 10) \
- x(journal_entries_missing, 11) \
- x(journal_entry_replicas_not_marked, 12) \
- x(journal_entry_past_jset_end, 13) \
- x(journal_entry_replicas_data_mismatch, 14) \
- x(journal_entry_bkey_u64s_0, 15) \
- x(journal_entry_bkey_past_end, 16) \
- x(journal_entry_bkey_bad_format, 17) \
- x(journal_entry_bkey_invalid, 18) \
- x(journal_entry_btree_root_bad_size, 19) \
- x(journal_entry_blacklist_bad_size, 20) \
- x(journal_entry_blacklist_v2_bad_size, 21) \
- x(journal_entry_blacklist_v2_start_past_end, 22) \
- x(journal_entry_usage_bad_size, 23) \
- x(journal_entry_data_usage_bad_size, 24) \
- x(journal_entry_clock_bad_size, 25) \
- x(journal_entry_clock_bad_rw, 26) \
- x(journal_entry_dev_usage_bad_size, 27) \
- x(journal_entry_dev_usage_bad_dev, 28) \
- x(journal_entry_dev_usage_bad_pad, 29) \
- x(btree_node_unreadable, 30) \
- x(btree_node_fault_injected, 31) \
- x(btree_node_bad_magic, 32) \
- x(btree_node_bad_seq, 33) \
- x(btree_node_unsupported_version, 34) \
- x(btree_node_bset_older_than_sb_min, 35) \
- x(btree_node_bset_newer_than_sb, 36) \
- x(btree_node_data_missing, 37) \
- x(btree_node_bset_after_end, 38) \
- x(btree_node_replicas_sectors_written_mismatch, 39) \
- x(btree_node_replicas_data_mismatch, 40) \
- x(bset_unknown_csum, 41) \
- x(bset_bad_csum, 42) \
- x(bset_past_end_of_btree_node, 43) \
- x(bset_wrong_sector_offset, 44) \
- x(bset_empty, 45) \
- x(bset_bad_seq, 46) \
- x(bset_blacklisted_journal_seq, 47) \
- x(first_bset_blacklisted_journal_seq, 48) \
- x(btree_node_bad_btree, 49) \
- x(btree_node_bad_level, 50) \
- x(btree_node_bad_min_key, 51) \
- x(btree_node_bad_max_key, 52) \
- x(btree_node_bad_format, 53) \
- x(btree_node_bkey_past_bset_end, 54) \
- x(btree_node_bkey_bad_format, 55) \
- x(btree_node_bad_bkey, 56) \
- x(btree_node_bkey_out_of_order, 57) \
- x(btree_root_bkey_invalid, 58) \
- x(btree_root_read_error, 59) \
- x(btree_root_bad_min_key, 60) \
- x(btree_root_bad_max_key, 61) \
- x(btree_node_read_error, 62) \
- x(btree_node_topology_bad_min_key, 63) \
- x(btree_node_topology_bad_max_key, 64) \
- x(btree_node_topology_overwritten_by_prev_node, 65) \
- x(btree_node_topology_overwritten_by_next_node, 66) \
- x(btree_node_topology_interior_node_empty, 67) \
- x(fs_usage_hidden_wrong, 68) \
- x(fs_usage_btree_wrong, 69) \
- x(fs_usage_data_wrong, 70) \
- x(fs_usage_cached_wrong, 71) \
- x(fs_usage_reserved_wrong, 72) \
- x(fs_usage_persistent_reserved_wrong, 73) \
- x(fs_usage_nr_inodes_wrong, 74) \
- x(fs_usage_replicas_wrong, 75) \
- x(dev_usage_buckets_wrong, 76) \
- x(dev_usage_sectors_wrong, 77) \
- x(dev_usage_fragmented_wrong, 78) \
- x(dev_usage_buckets_ec_wrong, 79) \
- x(bkey_version_in_future, 80) \
- x(bkey_u64s_too_small, 81) \
- x(bkey_invalid_type_for_btree, 82) \
- x(bkey_extent_size_zero, 83) \
- x(bkey_extent_size_greater_than_offset, 84) \
- x(bkey_size_nonzero, 85) \
- x(bkey_snapshot_nonzero, 86) \
- x(bkey_snapshot_zero, 87) \
- x(bkey_at_pos_max, 88) \
- x(bkey_before_start_of_btree_node, 89) \
- x(bkey_after_end_of_btree_node, 90) \
- x(bkey_val_size_nonzero, 91) \
- x(bkey_val_size_too_small, 92) \
- x(alloc_v1_val_size_bad, 93) \
- x(alloc_v2_unpack_error, 94) \
- x(alloc_v3_unpack_error, 95) \
- x(alloc_v4_val_size_bad, 96) \
- x(alloc_v4_backpointers_start_bad, 97) \
- x(alloc_key_data_type_bad, 98) \
- x(alloc_key_empty_but_have_data, 99) \
- x(alloc_key_dirty_sectors_0, 100) \
- x(alloc_key_data_type_inconsistency, 101) \
- x(alloc_key_to_missing_dev_bucket, 102) \
- x(alloc_key_cached_inconsistency, 103) \
- x(alloc_key_cached_but_read_time_zero, 104) \
- x(alloc_key_to_missing_lru_entry, 105) \
- x(alloc_key_data_type_wrong, 106) \
- x(alloc_key_gen_wrong, 107) \
- x(alloc_key_dirty_sectors_wrong, 108) \
- x(alloc_key_cached_sectors_wrong, 109) \
- x(alloc_key_stripe_wrong, 110) \
- x(alloc_key_stripe_redundancy_wrong, 111) \
- x(bucket_sector_count_overflow, 112) \
- x(bucket_metadata_type_mismatch, 113) \
- x(need_discard_key_wrong, 114) \
- x(freespace_key_wrong, 115) \
- x(freespace_hole_missing, 116) \
- x(bucket_gens_val_size_bad, 117) \
- x(bucket_gens_key_wrong, 118) \
- x(bucket_gens_hole_wrong, 119) \
- x(bucket_gens_to_invalid_dev, 120) \
- x(bucket_gens_to_invalid_buckets, 121) \
- x(bucket_gens_nonzero_for_invalid_buckets, 122) \
- x(need_discard_freespace_key_to_invalid_dev_bucket, 123) \
- x(need_discard_freespace_key_bad, 124) \
- x(backpointer_bucket_offset_wrong, 125) \
- x(backpointer_to_missing_device, 126) \
- x(backpointer_to_missing_alloc, 127) \
- x(backpointer_to_missing_ptr, 128) \
- x(lru_entry_at_time_0, 129) \
- x(lru_entry_to_invalid_bucket, 130) \
- x(lru_entry_bad, 131) \
- x(btree_ptr_val_too_big, 132) \
- x(btree_ptr_v2_val_too_big, 133) \
- x(btree_ptr_has_non_ptr, 134) \
- x(extent_ptrs_invalid_entry, 135) \
- x(extent_ptrs_no_ptrs, 136) \
- x(extent_ptrs_too_many_ptrs, 137) \
- x(extent_ptrs_redundant_crc, 138) \
- x(extent_ptrs_redundant_stripe, 139) \
- x(extent_ptrs_unwritten, 140) \
- x(extent_ptrs_written_and_unwritten, 141) \
- x(ptr_to_invalid_device, 142) \
- x(ptr_to_duplicate_device, 143) \
- x(ptr_after_last_bucket, 144) \
- x(ptr_before_first_bucket, 145) \
- x(ptr_spans_multiple_buckets, 146) \
- x(ptr_to_missing_backpointer, 147) \
- x(ptr_to_missing_alloc_key, 148) \
- x(ptr_to_missing_replicas_entry, 149) \
- x(ptr_to_missing_stripe, 150) \
- x(ptr_to_incorrect_stripe, 151) \
- x(ptr_gen_newer_than_bucket_gen, 152) \
- x(ptr_too_stale, 153) \
- x(stale_dirty_ptr, 154) \
- x(ptr_bucket_data_type_mismatch, 155) \
- x(ptr_cached_and_erasure_coded, 156) \
- x(ptr_crc_uncompressed_size_too_small, 157) \
- x(ptr_crc_csum_type_unknown, 158) \
- x(ptr_crc_compression_type_unknown, 159) \
- x(ptr_crc_redundant, 160) \
- x(ptr_crc_uncompressed_size_too_big, 161) \
- x(ptr_crc_nonce_mismatch, 162) \
- x(ptr_stripe_redundant, 163) \
- x(reservation_key_nr_replicas_invalid, 164) \
- x(reflink_v_refcount_wrong, 165) \
- x(reflink_p_to_missing_reflink_v, 166) \
- x(stripe_pos_bad, 167) \
- x(stripe_val_size_bad, 168) \
- x(stripe_sector_count_wrong, 169) \
- x(snapshot_tree_pos_bad, 170) \
- x(snapshot_tree_to_missing_snapshot, 171) \
- x(snapshot_tree_to_missing_subvol, 172) \
- x(snapshot_tree_to_wrong_subvol, 173) \
- x(snapshot_tree_to_snapshot_subvol, 174) \
- x(snapshot_pos_bad, 175) \
- x(snapshot_parent_bad, 176) \
- x(snapshot_children_not_normalized, 177) \
- x(snapshot_child_duplicate, 178) \
- x(snapshot_child_bad, 179) \
- x(snapshot_skiplist_not_normalized, 180) \
- x(snapshot_skiplist_bad, 181) \
- x(snapshot_should_not_have_subvol, 182) \
- x(snapshot_to_bad_snapshot_tree, 183) \
- x(snapshot_bad_depth, 184) \
- x(snapshot_bad_skiplist, 185) \
- x(subvol_pos_bad, 186) \
- x(subvol_not_master_and_not_snapshot, 187) \
- x(subvol_to_missing_root, 188) \
- x(subvol_root_wrong_bi_subvol, 189) \
- x(bkey_in_missing_snapshot, 190) \
- x(inode_pos_inode_nonzero, 191) \
- x(inode_pos_blockdev_range, 192) \
- x(inode_unpack_error, 193) \
- x(inode_str_hash_invalid, 194) \
- x(inode_v3_fields_start_bad, 195) \
- x(inode_snapshot_mismatch, 196) \
- x(inode_unlinked_but_clean, 197) \
- x(inode_unlinked_but_nlink_nonzero, 198) \
- x(inode_checksum_type_invalid, 199) \
- x(inode_compression_type_invalid, 200) \
- x(inode_subvol_root_but_not_dir, 201) \
- x(inode_i_size_dirty_but_clean, 202) \
- x(inode_i_sectors_dirty_but_clean, 203) \
- x(inode_i_sectors_wrong, 204) \
- x(inode_dir_wrong_nlink, 205) \
- x(inode_dir_multiple_links, 206) \
- x(inode_multiple_links_but_nlink_0, 207) \
- x(inode_wrong_backpointer, 208) \
- x(inode_wrong_nlink, 209) \
- x(inode_unreachable, 210) \
- x(deleted_inode_but_clean, 211) \
- x(deleted_inode_missing, 212) \
- x(deleted_inode_is_dir, 213) \
- x(deleted_inode_not_unlinked, 214) \
- x(extent_overlapping, 215) \
- x(extent_in_missing_inode, 216) \
- x(extent_in_non_reg_inode, 217) \
- x(extent_past_end_of_inode, 218) \
- x(dirent_empty_name, 219) \
- x(dirent_val_too_big, 220) \
- x(dirent_name_too_long, 221) \
- x(dirent_name_embedded_nul, 222) \
- x(dirent_name_dot_or_dotdot, 223) \
- x(dirent_name_has_slash, 224) \
- x(dirent_d_type_wrong, 225) \
- x(inode_bi_parent_wrong, 226) \
- x(dirent_in_missing_dir_inode, 227) \
- x(dirent_in_non_dir_inode, 228) \
- x(dirent_to_missing_inode, 229) \
- x(dirent_to_missing_subvol, 230) \
- x(dirent_to_itself, 231) \
- x(quota_type_invalid, 232) \
- x(xattr_val_size_too_small, 233) \
- x(xattr_val_size_too_big, 234) \
- x(xattr_invalid_type, 235) \
- x(xattr_name_invalid_chars, 236) \
- x(xattr_in_missing_inode, 237) \
- x(root_subvol_missing, 238) \
- x(root_dir_missing, 239) \
- x(root_inode_not_dir, 240) \
- x(dir_loop, 241) \
- x(hash_table_key_duplicate, 242) \
- x(hash_table_key_wrong_offset, 243) \
- x(unlinked_inode_not_on_deleted_list, 244) \
- x(reflink_p_front_pad_bad, 245) \
- x(journal_entry_dup_same_device, 246) \
- x(inode_bi_subvol_missing, 247) \
- x(inode_bi_subvol_wrong, 248) \
- x(inode_points_to_missing_dirent, 249) \
- x(inode_points_to_wrong_dirent, 250) \
- x(inode_bi_parent_nonzero, 251) \
- x(dirent_to_missing_parent_subvol, 252) \
- x(dirent_not_visible_in_parent_subvol, 253) \
- x(subvol_fs_path_parent_wrong, 254) \
- x(subvol_root_fs_path_parent_nonzero, 255) \
- x(subvol_children_not_set, 256) \
- x(subvol_children_bad, 257) \
- x(subvol_loop, 258) \
- x(subvol_unreachable, 259) \
- x(btree_node_bkey_bad_u64s, 260) \
- x(btree_node_topology_empty_interior_node, 261) \
- x(btree_ptr_v2_min_key_bad, 262) \
- x(btree_root_unreadable_and_scan_found_nothing, 263) \
- x(snapshot_node_missing, 264) \
- x(dup_backpointer_to_bad_csum_extent, 265) \
- x(btree_bitmap_not_marked, 266) \
- x(sb_clean_entry_overrun, 267) \
- x(btree_ptr_v2_written_0, 268) \
- x(subvol_snapshot_bad, 269) \
- x(subvol_inode_bad, 270)
-
-enum bch_sb_error_id {
-#define x(t, n) BCH_FSCK_ERR_##t = n,
- BCH_SB_ERRS()
-#undef x
- BCH_SB_ERR_MAX
-};
-
struct bch_sb_error_entry_cpu {
u64 id:16,
nr:48;
@@ -293,4 +13,3 @@ struct bch_sb_error_entry_cpu {
typedef DARRAY(struct bch_sb_error_entry_cpu) bch_sb_errors_cpu;
#endif /* _BCACHEFS_SB_ERRORS_TYPES_H */
-
diff --git a/fs/bcachefs/sb-members_format.h b/fs/bcachefs/sb-members_format.h
new file mode 100644
index 000000000000..e2630548c0f6
--- /dev/null
+++ b/fs/bcachefs/sb-members_format.h
@@ -0,0 +1,110 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_SB_MEMBERS_FORMAT_H
+#define _BCACHEFS_SB_MEMBERS_FORMAT_H
+
+/*
+ * We refer to members with bitmasks in various places - but we need to get rid
+ * of this limit:
+ */
+#define BCH_SB_MEMBERS_MAX 64
+
+#define BCH_MIN_NR_NBUCKETS (1 << 6)
+
+#define BCH_IOPS_MEASUREMENTS() \
+ x(seqread, 0) \
+ x(seqwrite, 1) \
+ x(randread, 2) \
+ x(randwrite, 3)
+
+enum bch_iops_measurement {
+#define x(t, n) BCH_IOPS_##t = n,
+ BCH_IOPS_MEASUREMENTS()
+#undef x
+ BCH_IOPS_NR
+};
+
+#define BCH_MEMBER_ERROR_TYPES() \
+ x(read, 0) \
+ x(write, 1) \
+ x(checksum, 2)
+
+enum bch_member_error_type {
+#define x(t, n) BCH_MEMBER_ERROR_##t = n,
+ BCH_MEMBER_ERROR_TYPES()
+#undef x
+ BCH_MEMBER_ERROR_NR
+};
+
+struct bch_member {
+ __uuid_t uuid;
+ __le64 nbuckets; /* device size */
+ __le16 first_bucket; /* index of first bucket used */
+ __le16 bucket_size; /* sectors */
+ __u8 btree_bitmap_shift;
+ __u8 pad[3];
+ __le64 last_mount; /* time_t */
+
+ __le64 flags;
+ __le32 iops[4];
+ __le64 errors[BCH_MEMBER_ERROR_NR];
+ __le64 errors_at_reset[BCH_MEMBER_ERROR_NR];
+ __le64 errors_reset_time;
+ __le64 seq;
+ __le64 btree_allocated_bitmap;
+ /*
+ * On recovery from a clean shutdown we don't normally read the journal,
+ * but we still want to resume writing from where we left off so we
+ * don't overwrite more than is necessary, for list journal debugging:
+ */
+ __le32 last_journal_bucket;
+ __le32 last_journal_bucket_offset;
+};
+
+/*
+ * This limit comes from the bucket_gens array - it's a single allocation, and
+ * kernel allocation are limited to INT_MAX
+ */
+#define BCH_MEMBER_NBUCKETS_MAX (INT_MAX - 64)
+
+#define BCH_MEMBER_V1_BYTES 56
+
+LE64_BITMASK(BCH_MEMBER_STATE, struct bch_member, flags, 0, 4)
+/* 4-14 unused, was TIER, HAS_(META)DATA, REPLACEMENT */
+LE64_BITMASK(BCH_MEMBER_DISCARD, struct bch_member, flags, 14, 15)
+LE64_BITMASK(BCH_MEMBER_DATA_ALLOWED, struct bch_member, flags, 15, 20)
+LE64_BITMASK(BCH_MEMBER_GROUP, struct bch_member, flags, 20, 28)
+LE64_BITMASK(BCH_MEMBER_DURABILITY, struct bch_member, flags, 28, 30)
+LE64_BITMASK(BCH_MEMBER_FREESPACE_INITIALIZED,
+ struct bch_member, flags, 30, 31)
+
+#if 0
+LE64_BITMASK(BCH_MEMBER_NR_READ_ERRORS, struct bch_member, flags[1], 0, 20);
+LE64_BITMASK(BCH_MEMBER_NR_WRITE_ERRORS,struct bch_member, flags[1], 20, 40);
+#endif
+
+#define BCH_MEMBER_STATES() \
+ x(rw, 0) \
+ x(ro, 1) \
+ x(failed, 2) \
+ x(spare, 3)
+
+enum bch_member_state {
+#define x(t, n) BCH_MEMBER_STATE_##t = n,
+ BCH_MEMBER_STATES()
+#undef x
+ BCH_MEMBER_STATE_NR
+};
+
+struct bch_sb_field_members_v1 {
+ struct bch_sb_field field;
+ struct bch_member _members[]; //Members are now variable size
+};
+
+struct bch_sb_field_members_v2 {
+ struct bch_sb_field field;
+ __le16 member_bytes; //size of single member entry
+ u8 pad[6];
+ struct bch_member _members[];
+};
+
+#endif /* _BCACHEFS_SB_MEMBERS_FORMAT_H */
diff --git a/fs/bcachefs/seqmutex.h b/fs/bcachefs/seqmutex.h
index c1860d8163fb..c4b3d8d3f414 100644
--- a/fs/bcachefs/seqmutex.h
+++ b/fs/bcachefs/seqmutex.h
@@ -19,17 +19,14 @@ static inline bool seqmutex_trylock(struct seqmutex *lock)
static inline void seqmutex_lock(struct seqmutex *lock)
{
mutex_lock(&lock->lock);
-}
-
-static inline void seqmutex_unlock(struct seqmutex *lock)
-{
lock->seq++;
- mutex_unlock(&lock->lock);
}
-static inline u32 seqmutex_seq(struct seqmutex *lock)
+static inline u32 seqmutex_unlock(struct seqmutex *lock)
{
- return lock->seq;
+ u32 seq = lock->seq;
+ mutex_unlock(&lock->lock);
+ return seq;
}
static inline bool seqmutex_relock(struct seqmutex *lock, u32 seq)
diff --git a/fs/bcachefs/snapshot.c b/fs/bcachefs/snapshot.c
index 629900a5e641..24023d6a9698 100644
--- a/fs/bcachefs/snapshot.c
+++ b/fs/bcachefs/snapshot.c
@@ -168,6 +168,9 @@ static noinline struct snapshot_t *__snapshot_t_mut(struct bch_fs *c, u32 id)
size_t new_bytes = kmalloc_size_roundup(struct_size(new, s, idx + 1));
size_t new_size = (new_bytes - sizeof(*new)) / sizeof(new->s[0]);
+ if (unlikely(new_bytes > INT_MAX))
+ return NULL;
+
new = kvzalloc(new_bytes, GFP_KERNEL);
if (!new)
return NULL;
@@ -1042,6 +1045,25 @@ err:
return ret;
}
+int bch2_check_key_has_snapshot(struct btree_trans *trans,
+ struct btree_iter *iter,
+ struct bkey_s_c k)
+{
+ struct bch_fs *c = trans->c;
+ struct printbuf buf = PRINTBUF;
+ int ret = 0;
+
+ if (fsck_err_on(!bch2_snapshot_equiv(c, k.k->p.snapshot), c,
+ bkey_in_missing_snapshot,
+ "key in missing snapshot %s, delete?",
+ (bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
+ ret = bch2_btree_delete_at(trans, iter,
+ BTREE_UPDATE_internal_snapshot_node) ?: 1;
+fsck_err:
+ printbuf_exit(&buf);
+ return ret;
+}
+
/*
* Mark a snapshot as deleted, for future cleanup:
*/
@@ -1351,35 +1373,39 @@ int bch2_snapshot_node_create(struct btree_trans *trans, u32 parent,
* that key to snapshot leaf nodes, where we can mutate it
*/
-static int snapshot_delete_key(struct btree_trans *trans,
+static int delete_dead_snapshots_process_key(struct btree_trans *trans,
struct btree_iter *iter,
struct bkey_s_c k,
snapshot_id_list *deleted,
snapshot_id_list *equiv_seen,
struct bpos *last_pos)
{
+ int ret = bch2_check_key_has_snapshot(trans, iter, k);
+ if (ret)
+ return ret < 0 ? ret : 0;
+
struct bch_fs *c = trans->c;
u32 equiv = bch2_snapshot_equiv(c, k.k->p.snapshot);
+ if (!equiv) /* key for invalid snapshot node, but we chose not to delete */
+ return 0;
if (!bkey_eq(k.k->p, *last_pos))
equiv_seen->nr = 0;
- *last_pos = k.k->p;
- if (snapshot_list_has_id(deleted, k.k->p.snapshot) ||
- snapshot_list_has_id(equiv_seen, equiv)) {
+ if (snapshot_list_has_id(deleted, k.k->p.snapshot))
return bch2_btree_delete_at(trans, iter,
BTREE_UPDATE_internal_snapshot_node);
- } else {
- return snapshot_list_add(c, equiv_seen, equiv);
- }
-}
-static int move_key_to_correct_snapshot(struct btree_trans *trans,
- struct btree_iter *iter,
- struct bkey_s_c k)
-{
- struct bch_fs *c = trans->c;
- u32 equiv = bch2_snapshot_equiv(c, k.k->p.snapshot);
+ if (!bpos_eq(*last_pos, k.k->p) &&
+ snapshot_list_has_id(equiv_seen, equiv))
+ return bch2_btree_delete_at(trans, iter,
+ BTREE_UPDATE_internal_snapshot_node);
+
+ *last_pos = k.k->p;
+
+ ret = snapshot_list_add_nodup(c, equiv_seen, equiv);
+ if (ret)
+ return ret;
/*
* When we have a linear chain of snapshot nodes, we consider
@@ -1389,21 +1415,20 @@ static int move_key_to_correct_snapshot(struct btree_trans *trans,
*
* If there are multiple keys in different snapshots at the same
* position, we're only going to keep the one in the newest
- * snapshot - the rest have been overwritten and are redundant,
- * and for the key we're going to keep we need to move it to the
- * equivalance class ID if it's not there already.
+ * snapshot (we delete the others above) - the rest have been
+ * overwritten and are redundant, and for the key we're going to keep we
+ * need to move it to the equivalance class ID if it's not there
+ * already.
*/
if (equiv != k.k->p.snapshot) {
struct bkey_i *new = bch2_bkey_make_mut_noupdate(trans, k);
- struct btree_iter new_iter;
- int ret;
-
- ret = PTR_ERR_OR_ZERO(new);
+ int ret = PTR_ERR_OR_ZERO(new);
if (ret)
return ret;
new->k.p.snapshot = equiv;
+ struct btree_iter new_iter;
bch2_trans_iter_init(trans, &new_iter, iter->btree_id, new->k.p,
BTREE_ITER_all_snapshots|
BTREE_ITER_cached|
@@ -1538,19 +1563,11 @@ int bch2_delete_dead_snapshots(struct bch_fs *c)
struct btree_trans *trans;
snapshot_id_list deleted = { 0 };
snapshot_id_list deleted_interior = { 0 };
- u32 id;
int ret = 0;
if (!test_and_clear_bit(BCH_FS_need_delete_dead_snapshots, &c->flags))
return 0;
- if (!test_bit(BCH_FS_started, &c->flags)) {
- ret = bch2_fs_read_write_early(c);
- bch_err_msg(c, ret, "deleting dead snapshots: error going rw");
- if (ret)
- return ret;
- }
-
trans = bch2_trans_get(c);
/*
@@ -1585,33 +1602,20 @@ int bch2_delete_dead_snapshots(struct bch_fs *c)
if (ret)
goto err;
- for (id = 0; id < BTREE_ID_NR; id++) {
+ for (unsigned btree = 0; btree < BTREE_ID_NR; btree++) {
struct bpos last_pos = POS_MIN;
snapshot_id_list equiv_seen = { 0 };
struct disk_reservation res = { 0 };
- if (!btree_type_has_snapshots(id))
- continue;
-
- /*
- * deleted inodes btree is maintained by a trigger on the inodes
- * btree - no work for us to do here, and it's not safe to scan
- * it because we'll see out of date keys due to the btree write
- * buffer:
- */
- if (id == BTREE_ID_deleted_inodes)
+ if (!btree_type_has_snapshots(btree))
continue;
ret = for_each_btree_key_commit(trans, iter,
- id, POS_MIN,
+ btree, POS_MIN,
BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k,
&res, NULL, BCH_TRANS_COMMIT_no_enospc,
- snapshot_delete_key(trans, &iter, k, &deleted, &equiv_seen, &last_pos)) ?:
- for_each_btree_key_commit(trans, iter,
- id, POS_MIN,
- BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k,
- &res, NULL, BCH_TRANS_COMMIT_no_enospc,
- move_key_to_correct_snapshot(trans, &iter, k));
+ delete_dead_snapshots_process_key(trans, &iter, k, &deleted,
+ &equiv_seen, &last_pos));
bch2_disk_reservation_put(c, &res);
darray_exit(&equiv_seen);
@@ -1679,6 +1683,8 @@ void bch2_delete_dead_snapshots_work(struct work_struct *work)
{
struct bch_fs *c = container_of(work, struct bch_fs, snapshot_delete_work);
+ set_worker_desc("bcachefs-delete-dead-snapshots/%s", c->name);
+
bch2_delete_dead_snapshots(c);
bch2_write_ref_put(c, BCH_WRITE_REF_delete_dead_snapshots);
}
diff --git a/fs/bcachefs/snapshot.h b/fs/bcachefs/snapshot.h
index ab13d8f4b41e..31b0ee03e962 100644
--- a/fs/bcachefs/snapshot.h
+++ b/fs/bcachefs/snapshot.h
@@ -242,6 +242,7 @@ int bch2_snapshot_node_create(struct btree_trans *, u32,
int bch2_check_snapshot_trees(struct bch_fs *);
int bch2_check_snapshots(struct bch_fs *);
int bch2_reconstruct_snapshots(struct bch_fs *);
+int bch2_check_key_has_snapshot(struct btree_trans *, struct btree_iter *, struct bkey_s_c);
int bch2_snapshot_node_set_deleted(struct btree_trans *, u32);
void bch2_delete_dead_snapshots_work(struct work_struct *);
diff --git a/fs/bcachefs/str_hash.h b/fs/bcachefs/str_hash.h
index cbad9b27874f..c8c266cb5797 100644
--- a/fs/bcachefs/str_hash.h
+++ b/fs/bcachefs/str_hash.h
@@ -300,7 +300,7 @@ not_found:
if (!found && (flags & STR_HASH_must_replace)) {
ret = -BCH_ERR_ENOENT_str_hash_set_must_replace;
} else if (found && (flags & STR_HASH_must_create)) {
- ret = -EEXIST;
+ ret = -BCH_ERR_EEXIST_str_hash_set;
} else {
if (!found && slot.path)
swap(iter, slot);
diff --git a/fs/bcachefs/super-io.c b/fs/bcachefs/super-io.c
index f1bee6c5222d..b156fc85b8a3 100644
--- a/fs/bcachefs/super-io.c
+++ b/fs/bcachefs/super-io.c
@@ -649,9 +649,10 @@ reread:
bytes = vstruct_bytes(sb->sb);
- if (bytes > 512ULL << min(BCH_SB_LAYOUT_SIZE_BITS_MAX, sb->sb->layout.sb_max_size_bits)) {
- prt_printf(err, "Invalid superblock: too big (got %zu bytes, layout max %lu)",
- bytes, 512UL << sb->sb->layout.sb_max_size_bits);
+ u64 sb_size = 512ULL << min(BCH_SB_LAYOUT_SIZE_BITS_MAX, sb->sb->layout.sb_max_size_bits);
+ if (bytes > sb_size) {
+ prt_printf(err, "Invalid superblock: too big (got %zu bytes, layout max %llu)",
+ bytes, sb_size);
return -BCH_ERR_invalid_sb_too_big;
}
@@ -1132,18 +1133,12 @@ bool bch2_check_version_downgrade(struct bch_fs *c)
* c->sb will be checked before we write the superblock, so update it as
* well:
*/
- if (BCH_SB_VERSION_UPGRADE_COMPLETE(c->disk_sb.sb) > bcachefs_metadata_version_current) {
+ if (BCH_SB_VERSION_UPGRADE_COMPLETE(c->disk_sb.sb) > bcachefs_metadata_version_current)
SET_BCH_SB_VERSION_UPGRADE_COMPLETE(c->disk_sb.sb, bcachefs_metadata_version_current);
- c->sb.version_upgrade_complete = bcachefs_metadata_version_current;
- }
- if (c->sb.version > bcachefs_metadata_version_current) {
+ if (c->sb.version > bcachefs_metadata_version_current)
c->disk_sb.sb->version = cpu_to_le16(bcachefs_metadata_version_current);
- c->sb.version = bcachefs_metadata_version_current;
- }
- if (c->sb.version_min > bcachefs_metadata_version_current) {
+ if (c->sb.version_min > bcachefs_metadata_version_current)
c->disk_sb.sb->version_min = cpu_to_le16(bcachefs_metadata_version_current);
- c->sb.version_min = bcachefs_metadata_version_current;
- }
c->disk_sb.sb->compat[0] &= cpu_to_le64((1ULL << BCH_COMPAT_NR) - 1);
return ret;
}
@@ -1316,15 +1311,15 @@ void bch2_sb_to_text(struct printbuf *out, struct bch_sb *sb,
prt_printf(out, "Device index:\t%u\n", sb->dev_idx);
- prt_str(out, "Label:\t");
+ prt_printf(out, "Label:\t");
prt_printf(out, "%.*s", (int) sizeof(sb->label), sb->label);
prt_newline(out);
- prt_str(out, "Version:\t");
+ prt_printf(out, "Version:\t");
bch2_version_to_text(out, le16_to_cpu(sb->version));
prt_newline(out);
- prt_str(out, "Version upgrade complete:\t");
+ prt_printf(out, "Version upgrade complete:\t");
bch2_version_to_text(out, BCH_SB_VERSION_UPGRADE_COMPLETE(sb));
prt_newline(out);
diff --git a/fs/bcachefs/super.c b/fs/bcachefs/super.c
index 2206a8dee693..da735608d47c 100644
--- a/fs/bcachefs/super.c
+++ b/fs/bcachefs/super.c
@@ -536,7 +536,6 @@ static void __bch2_fs_free(struct bch_fs *c)
bch2_find_btree_nodes_exit(&c->found_btree_nodes);
bch2_free_pending_node_rewrites(c);
- bch2_fs_allocator_background_exit(c);
bch2_fs_sb_errors_exit(c);
bch2_fs_counters_exit(c);
bch2_fs_snapshots_exit(c);
@@ -564,8 +563,11 @@ static void __bch2_fs_free(struct bch_fs *c)
BUG_ON(atomic_read(&c->journal_keys.ref));
bch2_fs_btree_write_buffer_exit(c);
percpu_free_rwsem(&c->mark_lock);
- EBUG_ON(percpu_u64_get(c->online_reserved));
- free_percpu(c->online_reserved);
+ if (c->online_reserved) {
+ u64 v = percpu_u64_get(c->online_reserved);
+ WARN(v, "online_reserved not 0 at shutdown: %lli", v);
+ free_percpu(c->online_reserved);
+ }
darray_exit(&c->btree_roots_extra);
free_percpu(c->pcpu);
@@ -582,8 +584,10 @@ static void __bch2_fs_free(struct bch_fs *c)
if (c->write_ref_wq)
destroy_workqueue(c->write_ref_wq);
- if (c->io_complete_wq)
- destroy_workqueue(c->io_complete_wq);
+ if (c->btree_write_submit_wq)
+ destroy_workqueue(c->btree_write_submit_wq);
+ if (c->btree_read_complete_wq)
+ destroy_workqueue(c->btree_read_complete_wq);
if (c->copygc_wq)
destroy_workqueue(c->copygc_wq);
if (c->btree_io_complete_wq)
@@ -878,8 +882,10 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
WQ_HIGHPRI|WQ_FREEZABLE|WQ_MEM_RECLAIM, 1)) ||
!(c->copygc_wq = alloc_workqueue("bcachefs_copygc",
WQ_HIGHPRI|WQ_FREEZABLE|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE, 1)) ||
- !(c->io_complete_wq = alloc_workqueue("bcachefs_io",
+ !(c->btree_read_complete_wq = alloc_workqueue("bcachefs_btree_read_complete",
WQ_HIGHPRI|WQ_FREEZABLE|WQ_MEM_RECLAIM, 512)) ||
+ !(c->btree_write_submit_wq = alloc_workqueue("bcachefs_btree_write_sumit",
+ WQ_HIGHPRI|WQ_FREEZABLE|WQ_MEM_RECLAIM, 1)) ||
!(c->write_ref_wq = alloc_workqueue("bcachefs_write_ref",
WQ_FREEZABLE, 0)) ||
#ifndef BCH_WRITE_REF_DEBUG
@@ -908,9 +914,9 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
bch2_io_clock_init(&c->io_clock[WRITE]) ?:
bch2_fs_journal_init(&c->journal) ?:
bch2_fs_replicas_init(c) ?:
+ bch2_fs_btree_iter_init(c) ?:
bch2_fs_btree_cache_init(c) ?:
bch2_fs_btree_key_cache_init(&c->btree_key_cache) ?:
- bch2_fs_btree_iter_init(c) ?:
bch2_fs_btree_interior_update_init(c) ?:
bch2_fs_buckets_waiting_for_journal_init(c) ?:
bch2_fs_btree_write_buffer_init(c) ?:
@@ -927,12 +933,13 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
if (ret)
goto err;
- for (i = 0; i < c->sb.nr_devices; i++)
- if (bch2_member_exists(c->disk_sb.sb, i) &&
- bch2_dev_alloc(c, i)) {
- ret = -EEXIST;
+ for (i = 0; i < c->sb.nr_devices; i++) {
+ if (!bch2_member_exists(c->disk_sb.sb, i))
+ continue;
+ ret = bch2_dev_alloc(c, i);
+ if (ret)
goto err;
- }
+ }
bch2_journal_entry_res_resize(&c->journal,
&c->btree_root_journal_res,
@@ -1190,6 +1197,7 @@ static void bch2_dev_free(struct bch_dev *ca)
kfree(ca->buckets_nouse);
bch2_free_super(&ca->disk_sb);
+ bch2_dev_allocator_background_exit(ca);
bch2_dev_journal_exit(ca);
free_percpu(ca->io_done);
@@ -1312,6 +1320,8 @@ static struct bch_dev *__bch2_dev_alloc(struct bch_fs *c,
atomic_long_set(&ca->ref, 1);
#endif
+ bch2_dev_allocator_background_init(ca);
+
if (percpu_ref_init(&ca->io_ref, bch2_dev_io_ref_complete,
PERCPU_REF_INIT_DEAD, GFP_KERNEL) ||
!(ca->sb_read_scratch = (void *) __get_free_page(GFP_KERNEL)) ||
@@ -1524,6 +1534,7 @@ static void __bch2_dev_read_only(struct bch_fs *c, struct bch_dev *ca)
* The allocator thread itself allocates btree nodes, so stop it first:
*/
bch2_dev_allocator_remove(c, ca);
+ bch2_recalc_capacity(c);
bch2_dev_journal_stop(&c->journal, ca);
}
@@ -1535,6 +1546,7 @@ static void __bch2_dev_read_write(struct bch_fs *c, struct bch_dev *ca)
bch2_dev_allocator_add(c, ca);
bch2_recalc_capacity(c);
+ bch2_dev_do_discards(ca);
}
int __bch2_dev_set_state(struct bch_fs *c, struct bch_dev *ca,
@@ -1760,7 +1772,7 @@ int bch2_dev_add(struct bch_fs *c, const char *path)
if (ret)
goto err;
- ret = bch2_dev_journal_alloc(ca);
+ ret = bch2_dev_journal_alloc(ca, true);
bch_err_msg(c, ret, "allocating journal");
if (ret)
goto err;
@@ -1920,7 +1932,7 @@ int bch2_dev_online(struct bch_fs *c, const char *path)
}
if (!ca->journal.nr) {
- ret = bch2_dev_journal_alloc(ca);
+ ret = bch2_dev_journal_alloc(ca, false);
bch_err_msg(ca, ret, "allocating journal");
if (ret)
goto err;
diff --git a/fs/bcachefs/util.c b/fs/bcachefs/util.c
index de331dec2a99..4ec7e44d6e36 100644
--- a/fs/bcachefs/util.c
+++ b/fs/bcachefs/util.c
@@ -252,8 +252,10 @@ void bch2_prt_u64_base2(struct printbuf *out, u64 v)
bch2_prt_u64_base2_nbits(out, v, fls64(v) ?: 1);
}
-void bch2_print_string_as_lines(const char *prefix, const char *lines)
+static void __bch2_print_string_as_lines(const char *prefix, const char *lines,
+ bool nonblocking)
{
+ bool locked = false;
const char *p;
if (!lines) {
@@ -261,7 +263,13 @@ void bch2_print_string_as_lines(const char *prefix, const char *lines)
return;
}
- console_lock();
+ if (!nonblocking) {
+ console_lock();
+ locked = true;
+ } else {
+ locked = console_trylock();
+ }
+
while (1) {
p = strchrnul(lines, '\n');
printk("%s%.*s\n", prefix, (int) (p - lines), lines);
@@ -269,7 +277,18 @@ void bch2_print_string_as_lines(const char *prefix, const char *lines)
break;
lines = p + 1;
}
- console_unlock();
+ if (locked)
+ console_unlock();
+}
+
+void bch2_print_string_as_lines(const char *prefix, const char *lines)
+{
+ return __bch2_print_string_as_lines(prefix, lines, false);
+}
+
+void bch2_print_string_as_lines_nonblocking(const char *prefix, const char *lines)
+{
+ return __bch2_print_string_as_lines(prefix, lines, true);
}
int bch2_save_backtrace(bch_stacktrace *stack, struct task_struct *task, unsigned skipnr,
diff --git a/fs/bcachefs/util.h b/fs/bcachefs/util.h
index 5d2c470a49ac..5b0533ec4c7e 100644
--- a/fs/bcachefs/util.h
+++ b/fs/bcachefs/util.h
@@ -315,6 +315,7 @@ void bch2_prt_u64_base2_nbits(struct printbuf *, u64, unsigned);
void bch2_prt_u64_base2(struct printbuf *, u64);
void bch2_print_string_as_lines(const char *prefix, const char *lines);
+void bch2_print_string_as_lines_nonblocking(const char *prefix, const char *lines);
typedef DARRAY(unsigned long) bch_stacktrace;
int bch2_save_backtrace(bch_stacktrace *stack, struct task_struct *, unsigned, gfp_t);
diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
index d76f406d3b2e..f92f108840f5 100644
--- a/fs/befs/linuxvfs.c
+++ b/fs/befs/linuxvfs.c
@@ -475,6 +475,7 @@ static int befs_symlink_read_folio(struct file *unused, struct folio *folio)
befs_data_stream *data = &befs_ino->i_data.ds;
befs_off_t len = data->size;
char *link = folio_address(folio);
+ int err = -EIO;
if (len == 0 || len > PAGE_SIZE) {
befs_error(sb, "Long symlink with illegal length");
@@ -487,13 +488,10 @@ static int befs_symlink_read_folio(struct file *unused, struct folio *folio)
goto fail;
}
link[len - 1] = '\0';
- folio_mark_uptodate(folio);
- folio_unlock(folio);
- return 0;
+ err = 0;
fail:
- folio_set_error(folio);
- folio_unlock(folio);
- return -EIO;
+ folio_end_read(folio, err == 0);
+ return err;
}
/*
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index a43897b03ce9..5ae8045f4df4 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -1003,7 +1003,8 @@ out_free_interp:
if (elf_read_implies_exec(*elf_ex, executable_stack))
current->personality |= READ_IMPLIES_EXEC;
- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
+ const int snapshot_randomize_va_space = READ_ONCE(randomize_va_space);
+ if (!(current->personality & ADDR_NO_RANDOMIZE) && snapshot_randomize_va_space)
current->flags |= PF_RANDOMIZE;
setup_new_exec(bprm);
@@ -1061,10 +1062,40 @@ out_free_interp:
* Header for ET_DYN binaries to calculate the
* randomization (load_bias) for all the LOAD
* Program Headers.
+ */
+
+ /*
+ * Calculate the entire size of the ELF mapping
+ * (total_size), used for the initial mapping,
+ * due to load_addr_set which is set to true later
+ * once the initial mapping is performed.
+ *
+ * Note that this is only sensible when the LOAD
+ * segments are contiguous (or overlapping). If
+ * used for LOADs that are far apart, this would
+ * cause the holes between LOADs to be mapped,
+ * running the risk of having the mapping fail,
+ * as it would be larger than the ELF file itself.
*
+ * As a result, only ET_DYN does this, since
+ * some ET_EXEC (e.g. ia64) may have large virtual
+ * memory holes between LOADs.
+ *
+ */
+ total_size = total_mapping_size(elf_phdata,
+ elf_ex->e_phnum);
+ if (!total_size) {
+ retval = -EINVAL;
+ goto out_free_dentry;
+ }
+
+ /* Calculate any requested alignment. */
+ alignment = maximum_alignment(elf_phdata, elf_ex->e_phnum);
+
+ /*
* There are effectively two types of ET_DYN
- * binaries: programs (i.e. PIE: ET_DYN with INTERP)
- * and loaders (ET_DYN without INTERP, since they
+ * binaries: programs (i.e. PIE: ET_DYN with PT_INTERP)
+ * and loaders (ET_DYN without PT_INTERP, since they
* _are_ the ELF interpreter). The loaders must
* be loaded away from programs since the program
* may otherwise collide with the loader (especially
@@ -1084,15 +1115,44 @@ out_free_interp:
* without MAP_FIXED nor MAP_FIXED_NOREPLACE).
*/
if (interpreter) {
+ /* On ET_DYN with PT_INTERP, we do the ASLR. */
load_bias = ELF_ET_DYN_BASE;
if (current->flags & PF_RANDOMIZE)
load_bias += arch_mmap_rnd();
- alignment = maximum_alignment(elf_phdata, elf_ex->e_phnum);
+ /* Adjust alignment as requested. */
if (alignment)
load_bias &= ~(alignment - 1);
elf_flags |= MAP_FIXED_NOREPLACE;
- } else
- load_bias = 0;
+ } else {
+ /*
+ * For ET_DYN without PT_INTERP, we rely on
+ * the architectures's (potentially ASLR) mmap
+ * base address (via a load_bias of 0).
+ *
+ * When a large alignment is requested, we
+ * must do the allocation at address "0" right
+ * now to discover where things will load so
+ * that we can adjust the resulting alignment.
+ * In this case (load_bias != 0), we can use
+ * MAP_FIXED_NOREPLACE to make sure the mapping
+ * doesn't collide with anything.
+ */
+ if (alignment > ELF_MIN_ALIGN) {
+ load_bias = elf_load(bprm->file, 0, elf_ppnt,
+ elf_prot, elf_flags, total_size);
+ if (BAD_ADDR(load_bias)) {
+ retval = IS_ERR_VALUE(load_bias) ?
+ PTR_ERR((void*)load_bias) : -EINVAL;
+ goto out_free_dentry;
+ }
+ vm_munmap(load_bias, total_size);
+ /* Adjust alignment as requested. */
+ if (alignment)
+ load_bias &= ~(alignment - 1);
+ elf_flags |= MAP_FIXED_NOREPLACE;
+ } else
+ load_bias = 0;
+ }
/*
* Since load_bias is used for all subsequent loading
@@ -1102,31 +1162,6 @@ out_free_interp:
* is then page aligned.
*/
load_bias = ELF_PAGESTART(load_bias - vaddr);
-
- /*
- * Calculate the entire size of the ELF mapping
- * (total_size), used for the initial mapping,
- * due to load_addr_set which is set to true later
- * once the initial mapping is performed.
- *
- * Note that this is only sensible when the LOAD
- * segments are contiguous (or overlapping). If
- * used for LOADs that are far apart, this would
- * cause the holes between LOADs to be mapped,
- * running the risk of having the mapping fail,
- * as it would be larger than the ELF file itself.
- *
- * As a result, only ET_DYN does this, since
- * some ET_EXEC (e.g. ia64) may have large virtual
- * memory holes between LOADs.
- *
- */
- total_size = total_mapping_size(elf_phdata,
- elf_ex->e_phnum);
- if (!total_size) {
- retval = -EINVAL;
- goto out_free_dentry;
- }
}
error = elf_load(bprm->file, load_bias + vaddr, elf_ppnt,
@@ -1216,7 +1251,6 @@ out_free_interp:
}
reloc_func_desc = interp_load_addr;
- allow_write_access(interpreter);
fput(interpreter);
kfree(interp_elf_ex);
@@ -1251,7 +1285,7 @@ out_free_interp:
mm->end_data = end_data;
mm->start_stack = bprm->p;
- if ((current->flags & PF_RANDOMIZE) && (randomize_va_space > 1)) {
+ if ((current->flags & PF_RANDOMIZE) && (snapshot_randomize_va_space > 1)) {
/*
* For architectures with ELF randomization, when executing
* a loader directly (i.e. no interpreter listed in ELF
@@ -1308,7 +1342,6 @@ out_free_dentry:
kfree(interp_elf_ex);
kfree(interp_elf_phdata);
out_free_file:
- allow_write_access(interpreter);
if (interpreter)
fput(interpreter);
out_free_ph:
diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c
index b799701454a9..28a3439f163a 100644
--- a/fs/binfmt_elf_fdpic.c
+++ b/fs/binfmt_elf_fdpic.c
@@ -394,7 +394,6 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm)
goto error;
}
- allow_write_access(interpreter);
fput(interpreter);
interpreter = NULL;
}
@@ -466,10 +465,8 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm)
retval = 0;
error:
- if (interpreter) {
- allow_write_access(interpreter);
+ if (interpreter)
fput(interpreter);
- }
kfree(interpreter_name);
kfree(exec_params.phdrs);
kfree(exec_params.loadmap);
diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
index 68fa225f89e5..31660d8cc2c6 100644
--- a/fs/binfmt_misc.c
+++ b/fs/binfmt_misc.c
@@ -247,13 +247,10 @@ static int load_misc_binary(struct linux_binprm *bprm)
if (retval < 0)
goto ret;
- if (fmt->flags & MISC_FMT_OPEN_FILE) {
+ if (fmt->flags & MISC_FMT_OPEN_FILE)
interp_file = file_clone_open(fmt->interp_file);
- if (!IS_ERR(interp_file))
- deny_write_access(interp_file);
- } else {
+ else
interp_file = open_exec(fmt->interpreter);
- }
retval = PTR_ERR(interp_file);
if (IS_ERR(interp_file))
goto ret;
@@ -1086,4 +1083,5 @@ static void __exit exit_misc_binfmt(void)
core_initcall(init_misc_binfmt);
module_exit(exit_misc_binfmt);
+MODULE_DESCRIPTION("Kernel support for miscellaneous binaries");
MODULE_LICENSE("GPL");
diff --git a/fs/binfmt_script.c b/fs/binfmt_script.c
index 1b6625e95958..637daf6e4d45 100644
--- a/fs/binfmt_script.c
+++ b/fs/binfmt_script.c
@@ -155,4 +155,5 @@ static void __exit exit_script_binfmt(void)
core_initcall(init_script_binfmt);
module_exit(exit_script_binfmt);
+MODULE_DESCRIPTION("Kernel support for scripts starting with #!");
MODULE_LICENSE("GPL");
diff --git a/fs/btrfs/Makefile b/fs/btrfs/Makefile
index 525af975f61c..87617f2968bc 100644
--- a/fs/btrfs/Makefile
+++ b/fs/btrfs/Makefile
@@ -33,7 +33,7 @@ btrfs-y += super.o ctree.o extent-tree.o print-tree.o root-tree.o dir-item.o \
uuid-tree.o props.o free-space-tree.o tree-checker.o space-info.o \
block-rsv.o delalloc-space.o block-group.o discard.o reflink.o \
subpage.o tree-mod-log.o extent-io-tree.o fs.o messages.o bio.o \
- lru_cache.o raid-stripe-tree.o
+ lru_cache.o raid-stripe-tree.o fiemap.o direct-io.o
btrfs-$(CONFIG_BTRFS_FS_POSIX_ACL) += acl.o
btrfs-$(CONFIG_BTRFS_FS_REF_VERIFY) += ref-verify.o
diff --git a/fs/btrfs/accessors.h b/fs/btrfs/accessors.h
index 6fce3e8d3dac..b2eb9cde2c5d 100644
--- a/fs/btrfs/accessors.h
+++ b/fs/btrfs/accessors.h
@@ -34,7 +34,7 @@ void btrfs_init_map_token(struct btrfs_map_token *token, struct extent_buffer *e
static inline u8 get_unaligned_le8(const void *p)
{
- return *(u8 *)p;
+ return *(const u8 *)p;
}
static inline void put_unaligned_le8(u8 val, void *p)
@@ -48,8 +48,8 @@ static inline void put_unaligned_le8(u8 val, void *p)
offsetof(type, member), \
sizeof_field(type, member)))
-#define write_eb_member(eb, ptr, type, member, result) (\
- write_extent_buffer(eb, (char *)(result), \
+#define write_eb_member(eb, ptr, type, member, source) ( \
+ write_extent_buffer(eb, (const char *)(source), \
((unsigned long)(ptr)) + \
offsetof(type, member), \
sizeof_field(type, member)))
@@ -315,11 +315,8 @@ BTRFS_SETGET_FUNCS(timespec_nsec, struct btrfs_timespec, nsec, 32);
BTRFS_SETGET_STACK_FUNCS(stack_timespec_sec, struct btrfs_timespec, sec, 64);
BTRFS_SETGET_STACK_FUNCS(stack_timespec_nsec, struct btrfs_timespec, nsec, 32);
-BTRFS_SETGET_FUNCS(stripe_extent_encoding, struct btrfs_stripe_extent, encoding, 8);
BTRFS_SETGET_FUNCS(raid_stride_devid, struct btrfs_raid_stride, devid, 64);
BTRFS_SETGET_FUNCS(raid_stride_physical, struct btrfs_raid_stride, physical, 64);
-BTRFS_SETGET_STACK_FUNCS(stack_stripe_extent_encoding,
- struct btrfs_stripe_extent, encoding, 8);
BTRFS_SETGET_STACK_FUNCS(stack_raid_stride_devid, struct btrfs_raid_stride, devid, 64);
BTRFS_SETGET_STACK_FUNCS(stack_raid_stride_physical, struct btrfs_raid_stride, physical, 64);
@@ -353,7 +350,7 @@ static inline void btrfs_tree_block_key(const struct extent_buffer *eb,
static inline void btrfs_set_tree_block_key(const struct extent_buffer *eb,
struct btrfs_tree_block_info *item,
- struct btrfs_disk_key *key)
+ const struct btrfs_disk_key *key)
{
write_eb_member(eb, item, struct btrfs_tree_block_info, key, key);
}
@@ -446,7 +443,7 @@ void btrfs_node_key(const struct extent_buffer *eb,
struct btrfs_disk_key *disk_key, int nr);
static inline void btrfs_set_node_key(const struct extent_buffer *eb,
- struct btrfs_disk_key *disk_key, int nr)
+ const struct btrfs_disk_key *disk_key, int nr)
{
unsigned long ptr;
@@ -512,7 +509,7 @@ static inline void btrfs_item_key(const struct extent_buffer *eb,
}
static inline void btrfs_set_item_key(struct extent_buffer *eb,
- struct btrfs_disk_key *disk_key, int nr)
+ const struct btrfs_disk_key *disk_key, int nr)
{
struct btrfs_item *item = btrfs_item_nr(eb, nr);
diff --git a/fs/btrfs/bio.c b/fs/btrfs/bio.c
index 477f350a8bd0..f04d93109960 100644
--- a/fs/btrfs/bio.c
+++ b/fs/btrfs/bio.c
@@ -29,7 +29,7 @@ struct btrfs_failed_bio {
/* Is this a data path I/O that needs storage layer checksum and repair? */
static inline bool is_data_bbio(struct btrfs_bio *bbio)
{
- return bbio->inode && is_data_inode(&bbio->inode->vfs_inode);
+ return bbio->inode && is_data_inode(bbio->inode);
}
static bool bbio_has_ordered_extent(struct btrfs_bio *bbio)
@@ -732,7 +732,7 @@ static bool btrfs_submit_chunk(struct btrfs_bio *bbio, int mirror_num)
* point, so they are handled as part of the no-checksum case.
*/
if (inode && !(inode->flags & BTRFS_INODE_NODATASUM) &&
- !test_bit(BTRFS_FS_STATE_NO_CSUMS, &fs_info->fs_state) &&
+ !test_bit(BTRFS_FS_STATE_NO_DATA_CSUMS, &fs_info->fs_state) &&
!btrfs_is_data_reloc_root(inode->root)) {
if (should_async_write(bbio) &&
btrfs_wq_submit_bio(bbio, bioc, &smap, mirror_num))
@@ -741,7 +741,9 @@ static bool btrfs_submit_chunk(struct btrfs_bio *bbio, int mirror_num)
ret = btrfs_bio_csum(bbio);
if (ret)
goto fail_put_bio;
- } else if (use_append) {
+ } else if (use_append ||
+ (btrfs_is_zoned(fs_info) && inode &&
+ inode->flags & BTRFS_INODE_NODATASUM)) {
ret = btrfs_alloc_dummy_sum(bbio);
if (ret)
goto fail_put_bio;
diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
index 1e09aeea69c2..498442d0c216 100644
--- a/fs/btrfs/block-group.c
+++ b/fs/btrfs/block-group.c
@@ -1022,6 +1022,13 @@ static void clear_incompat_bg_bits(struct btrfs_fs_info *fs_info, u64 flags)
}
}
+static struct btrfs_root *btrfs_block_group_root(struct btrfs_fs_info *fs_info)
+{
+ if (btrfs_fs_compat_ro(fs_info, BLOCK_GROUP_TREE))
+ return fs_info->block_group_root;
+ return btrfs_extent_root(fs_info, 0);
+}
+
static int remove_block_group_item(struct btrfs_trans_handle *trans,
struct btrfs_path *path,
struct btrfs_block_group *block_group)
@@ -1757,24 +1764,21 @@ static inline bool btrfs_should_reclaim(struct btrfs_fs_info *fs_info)
static bool should_reclaim_block_group(struct btrfs_block_group *bg, u64 bytes_freed)
{
- const struct btrfs_space_info *space_info = bg->space_info;
- const int reclaim_thresh = READ_ONCE(space_info->bg_reclaim_threshold);
+ const int thresh_pct = btrfs_calc_reclaim_threshold(bg->space_info);
+ u64 thresh_bytes = mult_perc(bg->length, thresh_pct);
const u64 new_val = bg->used;
const u64 old_val = new_val + bytes_freed;
- u64 thresh;
- if (reclaim_thresh == 0)
+ if (thresh_bytes == 0)
return false;
- thresh = mult_perc(bg->length, reclaim_thresh);
-
/*
* If we were below the threshold before don't reclaim, we are likely a
* brand new block group and we don't want to relocate new block groups.
*/
- if (old_val < thresh)
+ if (old_val < thresh_bytes)
return false;
- if (new_val >= thresh)
+ if (new_val >= thresh_bytes)
return false;
return true;
}
@@ -1785,6 +1789,7 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
container_of(work, struct btrfs_fs_info, reclaim_bgs_work);
struct btrfs_block_group *bg;
struct btrfs_space_info *space_info;
+ LIST_HEAD(retry_list);
if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags))
return;
@@ -1821,6 +1826,7 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
list_sort(NULL, &fs_info->reclaim_bgs, reclaim_bgs_cmp);
while (!list_empty(&fs_info->reclaim_bgs)) {
u64 zone_unusable;
+ u64 reclaimed;
int ret = 0;
bg = list_first_entry(&fs_info->reclaim_bgs,
@@ -1834,6 +1840,7 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
/* Don't race with allocators so take the groups_sem */
down_write(&space_info->groups_sem);
+ spin_lock(&space_info->lock);
spin_lock(&bg->lock);
if (bg->reserved || bg->pinned || bg->ro) {
/*
@@ -1843,6 +1850,7 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
* this block group.
*/
spin_unlock(&bg->lock);
+ spin_unlock(&space_info->lock);
up_write(&space_info->groups_sem);
goto next;
}
@@ -1861,6 +1869,7 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
if (!btrfs_test_opt(fs_info, DISCARD_ASYNC))
btrfs_mark_bg_unused(bg);
spin_unlock(&bg->lock);
+ spin_unlock(&space_info->lock);
up_write(&space_info->groups_sem);
goto next;
@@ -1877,10 +1886,12 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
*/
if (!should_reclaim_block_group(bg, bg->length)) {
spin_unlock(&bg->lock);
+ spin_unlock(&space_info->lock);
up_write(&space_info->groups_sem);
goto next;
}
spin_unlock(&bg->lock);
+ spin_unlock(&space_info->lock);
/*
* Get out fast, in case we're read-only or unmounting the
@@ -1913,16 +1924,39 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
div64_u64(bg->used * 100, bg->length),
div64_u64(zone_unusable * 100, bg->length));
trace_btrfs_reclaim_block_group(bg);
+ reclaimed = bg->used;
ret = btrfs_relocate_chunk(fs_info, bg->start);
if (ret) {
btrfs_dec_block_group_ro(bg);
btrfs_err(fs_info, "error relocating chunk %llu",
bg->start);
+ reclaimed = 0;
+ spin_lock(&space_info->lock);
+ space_info->reclaim_errors++;
+ if (READ_ONCE(space_info->periodic_reclaim))
+ space_info->periodic_reclaim_ready = false;
+ spin_unlock(&space_info->lock);
}
+ spin_lock(&space_info->lock);
+ space_info->reclaim_count++;
+ space_info->reclaim_bytes += reclaimed;
+ spin_unlock(&space_info->lock);
next:
- if (ret)
- btrfs_mark_bg_to_reclaim(bg);
+ if (ret && !READ_ONCE(space_info->periodic_reclaim)) {
+ /* Refcount held by the reclaim_bgs list after splice. */
+ spin_lock(&fs_info->unused_bgs_lock);
+ /*
+ * This block group might be added to the unused list
+ * during the above process. Move it back to the
+ * reclaim list otherwise.
+ */
+ if (list_empty(&bg->bg_list)) {
+ btrfs_get_block_group(bg);
+ list_add_tail(&bg->bg_list, &retry_list);
+ }
+ spin_unlock(&fs_info->unused_bgs_lock);
+ }
btrfs_put_block_group(bg);
mutex_unlock(&fs_info->reclaim_bgs_lock);
@@ -1942,12 +1976,16 @@ next:
spin_unlock(&fs_info->unused_bgs_lock);
mutex_unlock(&fs_info->reclaim_bgs_lock);
end:
+ spin_lock(&fs_info->unused_bgs_lock);
+ list_splice_tail(&retry_list, &fs_info->reclaim_bgs);
+ spin_unlock(&fs_info->unused_bgs_lock);
btrfs_exclop_finish(fs_info);
sb_end_write(fs_info->sb);
}
void btrfs_reclaim_bgs(struct btrfs_fs_info *fs_info)
{
+ btrfs_reclaim_sweep(fs_info);
spin_lock(&fs_info->unused_bgs_lock);
if (!list_empty(&fs_info->reclaim_bgs))
queue_work(system_unbound_wq, &fs_info->reclaim_bgs_work);
@@ -3646,9 +3684,12 @@ int btrfs_update_block_group(struct btrfs_trans_handle *trans,
old_val += num_bytes;
cache->used = old_val;
cache->reserved -= num_bytes;
+ cache->reclaim_mark = 0;
space_info->bytes_reserved -= num_bytes;
space_info->bytes_used += num_bytes;
space_info->disk_used += num_bytes * factor;
+ if (READ_ONCE(space_info->periodic_reclaim))
+ btrfs_space_info_update_reclaimable(space_info, -num_bytes);
spin_unlock(&cache->lock);
spin_unlock(&space_info->lock);
} else {
@@ -3658,8 +3699,10 @@ int btrfs_update_block_group(struct btrfs_trans_handle *trans,
btrfs_space_info_update_bytes_pinned(info, space_info, num_bytes);
space_info->bytes_used -= num_bytes;
space_info->disk_used -= num_bytes * factor;
-
- reclaim = should_reclaim_block_group(cache, num_bytes);
+ if (READ_ONCE(space_info->periodic_reclaim))
+ btrfs_space_info_update_reclaimable(space_info, num_bytes);
+ else
+ reclaim = should_reclaim_block_group(cache, num_bytes);
spin_unlock(&cache->lock);
spin_unlock(&space_info->lock);
@@ -4313,13 +4356,13 @@ void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
spin_lock(&block_group->lock);
if (test_and_clear_bit(BLOCK_GROUP_FLAG_IREF,
&block_group->runtime_flags)) {
- struct inode *inode = block_group->inode;
+ struct btrfs_inode *inode = block_group->inode;
block_group->inode = NULL;
spin_unlock(&block_group->lock);
ASSERT(block_group->io_ctl.inode == NULL);
- iput(inode);
+ iput(&inode->vfs_inode);
} else {
spin_unlock(&block_group->lock);
}
diff --git a/fs/btrfs/block-group.h b/fs/btrfs/block-group.h
index 85e2d4cd12dc..915111338fc0 100644
--- a/fs/btrfs/block-group.h
+++ b/fs/btrfs/block-group.h
@@ -115,7 +115,7 @@ struct btrfs_caching_control {
struct btrfs_block_group {
struct btrfs_fs_info *fs_info;
- struct inode *inode;
+ struct btrfs_inode *inode;
spinlock_t lock;
u64 start;
u64 length;
@@ -263,6 +263,7 @@ struct btrfs_block_group {
struct work_struct zone_finish_work;
struct extent_buffer *last_eb;
enum btrfs_block_group_size_class size_class;
+ u64 reclaim_mark;
};
static inline u64 btrfs_block_group_end(struct btrfs_block_group *block_group)
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index 91c994b569f3..3056c8aed8ef 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -19,7 +19,6 @@
#include <uapi/linux/btrfs_tree.h>
#include <trace/events/btrfs.h>
#include "block-rsv.h"
-#include "btrfs_inode.h"
#include "extent_map.h"
#include "extent_io.h"
#include "extent-io-tree.h"
@@ -89,6 +88,39 @@ enum {
BTRFS_INODE_FREE_SPACE_INODE,
/* Set when there are no capabilities in XATTs for the inode. */
BTRFS_INODE_NO_CAP_XATTR,
+ /*
+ * Set if an error happened when doing a COW write before submitting a
+ * bio or during writeback. Used for both buffered writes and direct IO
+ * writes. This is to signal a fast fsync that it has to wait for
+ * ordered extents to complete and therefore not log extent maps that
+ * point to unwritten extents (when an ordered extent completes and it
+ * has the BTRFS_ORDERED_IOERR flag set, it drops extent maps in its
+ * range).
+ */
+ BTRFS_INODE_COW_WRITE_ERROR,
+ /*
+ * Indicate this is a directory that points to a subvolume for which
+ * there is no root reference item. That's a case like the following:
+ *
+ * $ btrfs subvolume create /mnt/parent
+ * $ btrfs subvolume create /mnt/parent/child
+ * $ btrfs subvolume snapshot /mnt/parent /mnt/snap
+ *
+ * If subvolume "parent" is root 256, subvolume "child" is root 257 and
+ * snapshot "snap" is root 258, then there's no root reference item (key
+ * BTRFS_ROOT_REF_KEY in the root tree) for the subvolume "child"
+ * associated to root 258 (the snapshot) - there's only for the root
+ * of the "parent" subvolume (root 256). In the chunk root we have a
+ * (256 BTRFS_ROOT_REF_KEY 257) key but we don't have a
+ * (258 BTRFS_ROOT_REF_KEY 257) key - the sames goes for backrefs, we
+ * have a (257 BTRFS_ROOT_BACKREF_KEY 256) but we don't have a
+ * (257 BTRFS_ROOT_BACKREF_KEY 258) key.
+ *
+ * So when opening the "child" dentry from the snapshot's directory,
+ * we don't find a root ref item and we create a stub inode. This is
+ * done at new_simple_dir(), called from btrfs_lookup_dentry().
+ */
+ BTRFS_INODE_ROOT_STUB,
};
/* in memory btrfs inode */
@@ -96,10 +128,14 @@ struct btrfs_inode {
/* which subvolume this inode belongs to */
struct btrfs_root *root;
- /* key used to find this inode on disk. This is used by the code
- * to read in roots of subvolumes
+#if BITS_PER_LONG == 32
+ /*
+ * The objectid of the corresponding BTRFS_INODE_ITEM_KEY.
+ * On 64 bits platforms we can get it from vfs_inode.i_ino, which is an
+ * unsigned long and therefore 64 bits on such platforms.
*/
- struct btrfs_key location;
+ u64 objectid;
+#endif
/* Cached value of inode property 'compression'. */
u8 prop_compress;
@@ -155,9 +191,6 @@ struct btrfs_inode {
*/
struct list_head delalloc_inodes;
- /* node for the red-black tree that links inodes in subvolume root */
- struct rb_node rb_node;
-
unsigned long runtime_flags;
/* full 64 bit generation number, struct vfs_inode doesn't have a big
@@ -218,11 +251,20 @@ struct btrfs_inode {
u64 last_dir_index_offset;
};
- /*
- * Total number of bytes pending defrag, used by stat to check whether
- * it needs COW. Protected by 'lock'.
- */
- u64 defrag_bytes;
+ union {
+ /*
+ * Total number of bytes pending defrag, used by stat to check whether
+ * it needs COW. Protected by 'lock'.
+ * Used by inodes other than the data relocation inode.
+ */
+ u64 defrag_bytes;
+
+ /*
+ * Logical address of the block group being relocated.
+ * Used only by the data relocation inode.
+ */
+ u64 reloc_block_group_start;
+ };
/*
* The size of the file stored in the metadata on disk. data=ordered
@@ -231,12 +273,21 @@ struct btrfs_inode {
*/
u64 disk_i_size;
- /*
- * If this is a directory then index_cnt is the counter for the index
- * number for new files that are created. For an empty directory, this
- * must be initialized to BTRFS_DIR_START_INDEX.
- */
- u64 index_cnt;
+ union {
+ /*
+ * If this is a directory then index_cnt is the counter for the
+ * index number for new files that are created. For an empty
+ * directory, this must be initialized to BTRFS_DIR_START_INDEX.
+ */
+ u64 index_cnt;
+
+ /*
+ * If this is not a directory, this is the number of bytes
+ * outstanding that are going to need csums. This is used in
+ * ENOSPC accounting. Protected by 'lock'.
+ */
+ u64 csum_bytes;
+ };
/* Cache the directory index number to speed the dir/file remove */
u64 dir_index;
@@ -248,22 +299,25 @@ struct btrfs_inode {
*/
u64 last_unlink_trans;
- /*
- * The id/generation of the last transaction where this inode was
- * either the source or the destination of a clone/dedupe operation.
- * Used when logging an inode to know if there are shared extents that
- * need special care when logging checksum items, to avoid duplicate
- * checksum items in a log (which can lead to a corruption where we end
- * up with missing checksum ranges after log replay).
- * Protected by the vfs inode lock.
- */
- u64 last_reflink_trans;
+ union {
+ /*
+ * The id/generation of the last transaction where this inode
+ * was either the source or the destination of a clone/dedupe
+ * operation. Used when logging an inode to know if there are
+ * shared extents that need special care when logging checksum
+ * items, to avoid duplicate checksum items in a log (which can
+ * lead to a corruption where we end up with missing checksum
+ * ranges after log replay). Protected by the VFS inode lock.
+ * Used for regular files only.
+ */
+ u64 last_reflink_trans;
- /*
- * Number of bytes outstanding that are going to need csums. This is
- * used in ENOSPC accounting. Protected by 'lock'.
- */
- u64 csum_bytes;
+ /*
+ * In case this a root stub inode (BTRFS_INODE_ROOT_STUB flag set),
+ * the ID of that root.
+ */
+ u64 ref_root_id;
+ };
/* Backwards incompatible flags, lower half of inode_item::flags */
u32 flags;
@@ -321,10 +375,9 @@ static inline unsigned long btrfs_inode_hash(u64 objectid,
*/
static inline u64 btrfs_ino(const struct btrfs_inode *inode)
{
- u64 ino = inode->location.objectid;
+ u64 ino = inode->objectid;
- /* type == BTRFS_ROOT_ITEM_KEY: subvol dir */
- if (inode->location.type == BTRFS_ROOT_ITEM_KEY)
+ if (test_bit(BTRFS_INODE_ROOT_STUB, &inode->runtime_flags))
ino = inode->vfs_inode.i_ino;
return ino;
}
@@ -338,20 +391,36 @@ static inline u64 btrfs_ino(const struct btrfs_inode *inode)
#endif
+static inline void btrfs_get_inode_key(const struct btrfs_inode *inode,
+ struct btrfs_key *key)
+{
+ key->objectid = btrfs_ino(inode);
+ key->type = BTRFS_INODE_ITEM_KEY;
+ key->offset = 0;
+}
+
+static inline void btrfs_set_inode_number(struct btrfs_inode *inode, u64 ino)
+{
+#if BITS_PER_LONG == 32
+ inode->objectid = ino;
+#endif
+ inode->vfs_inode.i_ino = ino;
+}
+
static inline void btrfs_i_size_write(struct btrfs_inode *inode, u64 size)
{
i_size_write(&inode->vfs_inode, size);
inode->disk_i_size = size;
}
-static inline bool btrfs_is_free_space_inode(struct btrfs_inode *inode)
+static inline bool btrfs_is_free_space_inode(const struct btrfs_inode *inode)
{
return test_bit(BTRFS_INODE_FREE_SPACE_INODE, &inode->runtime_flags);
}
-static inline bool is_data_inode(struct inode *inode)
+static inline bool is_data_inode(const struct btrfs_inode *inode)
{
- return btrfs_ino(BTRFS_I(inode)) != BTRFS_BTREE_INODE_OBJECTID;
+ return btrfs_ino(inode) != BTRFS_BTREE_INODE_OBJECTID;
}
static inline void btrfs_mod_outstanding_extents(struct btrfs_inode *inode,
@@ -445,8 +514,8 @@ int btrfs_check_sector_csum(struct btrfs_fs_info *fs_info, struct page *page,
bool btrfs_data_csum_ok(struct btrfs_bio *bbio, struct btrfs_device *dev,
u32 bio_offset, struct bio_vec *bv);
noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len,
- u64 *orig_start, u64 *orig_block_len,
- u64 *ram_bytes, bool nowait, bool strict);
+ struct btrfs_file_extent *file_extent,
+ bool nowait, bool strict);
void btrfs_del_delalloc_inode(struct btrfs_inode *inode);
struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry);
@@ -505,9 +574,9 @@ void btrfs_free_inode(struct inode *inode);
int btrfs_drop_inode(struct inode *inode);
int __init btrfs_init_cachep(void);
void __cold btrfs_destroy_cachep(void);
-struct inode *btrfs_iget_path(struct super_block *s, u64 ino,
- struct btrfs_root *root, struct btrfs_path *path);
-struct inode *btrfs_iget(struct super_block *s, u64 ino, struct btrfs_root *root);
+struct inode *btrfs_iget_path(u64 ino, struct btrfs_root *root,
+ struct btrfs_path *path);
+struct inode *btrfs_iget(u64 ino, struct btrfs_root *root);
struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
struct page *page, u64 start, u64 len);
int btrfs_update_inode(struct btrfs_trans_handle *trans,
@@ -541,10 +610,6 @@ ssize_t btrfs_encoded_read(struct kiocb *iocb, struct iov_iter *iter,
ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from,
const struct btrfs_ioctl_encoded_io_args *encoded);
-ssize_t btrfs_dio_read(struct kiocb *iocb, struct iov_iter *iter,
- size_t done_before);
-struct iomap_dio *btrfs_dio_write(struct kiocb *iocb, struct iov_iter *iter,
- size_t done_before);
struct btrfs_inode *btrfs_find_first_inode(struct btrfs_root *root, u64 min_ino);
extern const struct dentry_operations btrfs_dentry_operations;
@@ -561,5 +626,10 @@ void btrfs_inode_unlock(struct btrfs_inode *inode, unsigned int ilock_flags);
void btrfs_update_inode_bytes(struct btrfs_inode *inode, const u64 add_bytes,
const u64 del_bytes);
void btrfs_assert_inode_range_clean(struct btrfs_inode *inode, u64 start, u64 end);
+u64 btrfs_get_extent_allocation_hint(struct btrfs_inode *inode, u64 start,
+ u64 num_bytes);
+struct extent_map *btrfs_create_io_em(struct btrfs_inode *inode, u64 start,
+ const struct btrfs_file_extent *file_extent,
+ int type);
#endif
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index 6441e47d8a5e..a8e2c461aff7 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -261,7 +261,7 @@ void btrfs_free_compr_folio(struct folio *folio)
folio_put(folio);
}
-static void end_bbio_comprssed_read(struct btrfs_bio *bbio)
+static void end_bbio_compressed_read(struct btrfs_bio *bbio)
{
struct compressed_bio *cb = to_compressed_bio(bbio);
blk_status_t status = bbio->bio.bi_status;
@@ -334,7 +334,7 @@ static void btrfs_finish_compressed_write_work(struct work_struct *work)
* This also calls the writeback end hooks for the file pages so that metadata
* and checksums can be updated in the file.
*/
-static void end_bbio_comprssed_write(struct btrfs_bio *bbio)
+static void end_bbio_compressed_write(struct btrfs_bio *bbio)
{
struct compressed_bio *cb = to_compressed_bio(bbio);
struct btrfs_fs_info *fs_info = bbio->inode->root->fs_info;
@@ -374,7 +374,7 @@ void btrfs_submit_compressed_write(struct btrfs_ordered_extent *ordered,
blk_opf_t write_flags,
bool writeback)
{
- struct btrfs_inode *inode = BTRFS_I(ordered->inode);
+ struct btrfs_inode *inode = ordered->inode;
struct btrfs_fs_info *fs_info = inode->root->fs_info;
struct compressed_bio *cb;
@@ -383,7 +383,7 @@ void btrfs_submit_compressed_write(struct btrfs_ordered_extent *ordered,
cb = alloc_compressed_bio(inode, ordered->file_offset,
REQ_OP_WRITE | write_flags,
- end_bbio_comprssed_write);
+ end_bbio_compressed_write);
cb->start = ordered->file_offset;
cb->len = ordered->num_bytes;
cb->compressed_folios = compressed_folios;
@@ -507,13 +507,15 @@ static noinline int add_ra_bio_pages(struct inode *inode,
*/
if (!em || cur < em->start ||
(cur + fs_info->sectorsize > extent_map_end(em)) ||
- (em->block_start >> SECTOR_SHIFT) != orig_bio->bi_iter.bi_sector) {
+ (extent_map_block_start(em) >> SECTOR_SHIFT) !=
+ orig_bio->bi_iter.bi_sector) {
free_extent_map(em);
unlock_extent(tree, cur, page_end, NULL);
unlock_page(page);
put_page(page);
break;
}
+ add_size = min(em->start + em->len, page_end + 1) - cur;
free_extent_map(em);
if (page->index == end_index) {
@@ -526,7 +528,6 @@ static noinline int add_ra_bio_pages(struct inode *inode,
}
}
- add_size = min(em->start + em->len, page_end + 1) - cur;
ret = bio_add_page(orig_bio, page, add_size, offset_in_page(cur));
if (ret != add_size) {
unlock_extent(tree, cur, page_end, NULL);
@@ -585,12 +586,12 @@ void btrfs_submit_compressed_read(struct btrfs_bio *bbio)
}
ASSERT(extent_map_is_compressed(em));
- compressed_len = em->block_len;
+ compressed_len = em->disk_num_bytes;
cb = alloc_compressed_bio(inode, file_offset, REQ_OP_READ,
- end_bbio_comprssed_read);
+ end_bbio_compressed_read);
- cb->start = em->orig_start;
+ cb->start = em->start - em->offset;
em_len = em->len;
em_start = em->start;
@@ -608,7 +609,7 @@ void btrfs_submit_compressed_read(struct btrfs_bio *bbio)
goto out_free_bio;
}
- ret2 = btrfs_alloc_folio_array(cb->nr_folios, cb->compressed_folios, 0);
+ ret2 = btrfs_alloc_folio_array(cb->nr_folios, cb->compressed_folios);
if (ret2) {
ret = BLK_STS_RESOURCE;
goto out_free_compressed_pages;
@@ -1506,7 +1507,7 @@ static void heuristic_collect_sample(struct inode *inode, u64 start, u64 end,
*
* Return non-zero if the compression should be done, 0 otherwise.
*/
-int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end)
+int btrfs_compress_heuristic(struct btrfs_inode *inode, u64 start, u64 end)
{
struct list_head *ws_list = get_workspace(0, 0);
struct heuristic_ws *ws;
@@ -1516,7 +1517,7 @@ int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end)
ws = list_entry(ws_list, struct heuristic_ws, list);
- heuristic_collect_sample(inode, start, end, ws);
+ heuristic_collect_sample(&inode->vfs_inode, start, end, ws);
if (sample_repeated_patterns(ws)) {
ret = 1;
diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h
index c20c1a1b09d5..cfdc64319186 100644
--- a/fs/btrfs/compression.h
+++ b/fs/btrfs/compression.h
@@ -144,7 +144,7 @@ extern const struct btrfs_compress_op btrfs_zstd_compress;
const char* btrfs_compress_type2str(enum btrfs_compression_type type);
bool btrfs_compress_is_valid_type(const char *str, size_t len);
-int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end);
+int btrfs_compress_heuristic(struct btrfs_inode *inode, u64 start, u64 end);
int btrfs_compress_filemap_get_folio(struct address_space *mapping, u64 start,
struct folio **in_folio_ret);
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 1a49b9232990..451203055bbf 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -321,7 +321,7 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans,
WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
trans->transid != fs_info->running_transaction->transid);
WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
- trans->transid != root->last_trans);
+ trans->transid != btrfs_get_root_last_trans(root));
level = btrfs_header_level(buf);
if (level == 0)
@@ -417,7 +417,6 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
u64 refs;
u64 owner;
u64 flags;
- u64 new_flags = 0;
int ret;
/*
@@ -462,8 +461,16 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
}
owner = btrfs_header_owner(buf);
- BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID &&
- !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
+ if (unlikely(owner == BTRFS_TREE_RELOC_OBJECTID &&
+ !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))) {
+ btrfs_crit(fs_info,
+"found tree block at bytenr %llu level %d root %llu refs %llu flags %llx without full backref flag set",
+ buf->start, btrfs_header_level(buf),
+ btrfs_root_id(root), refs, flags);
+ ret = -EUCLEAN;
+ btrfs_abort_transaction(trans, ret);
+ return ret;
+ }
if (refs > 1) {
if ((owner == btrfs_root_id(root) ||
@@ -481,7 +488,10 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
if (ret)
return ret;
}
- new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
+ ret = btrfs_set_disk_extent_flags(trans, buf,
+ BTRFS_BLOCK_FLAG_FULL_BACKREF);
+ if (ret)
+ return ret;
} else {
if (btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID)
@@ -491,11 +501,6 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
if (ret)
return ret;
}
- if (new_flags != 0) {
- ret = btrfs_set_disk_extent_flags(trans, buf, new_flags);
- if (ret)
- return ret;
- }
} else {
if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
if (btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID)
@@ -551,7 +556,7 @@ int btrfs_force_cow_block(struct btrfs_trans_handle *trans,
WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
trans->transid != fs_info->running_transaction->transid);
WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
- trans->transid != root->last_trans);
+ trans->transid != btrfs_get_root_last_trans(root));
level = btrfs_header_level(buf);
@@ -588,19 +593,15 @@ int btrfs_force_cow_block(struct btrfs_trans_handle *trans,
ret = update_ref_for_cow(trans, root, buf, cow, &last_ref);
if (ret) {
- btrfs_tree_unlock(cow);
- free_extent_buffer(cow);
btrfs_abort_transaction(trans, ret);
- return ret;
+ goto error_unlock_cow;
}
if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) {
ret = btrfs_reloc_cow_block(trans, root, buf, cow);
if (ret) {
- btrfs_tree_unlock(cow);
- free_extent_buffer(cow);
btrfs_abort_transaction(trans, ret);
- return ret;
+ goto error_unlock_cow;
}
}
@@ -612,27 +613,27 @@ int btrfs_force_cow_block(struct btrfs_trans_handle *trans,
ret = btrfs_tree_mod_log_insert_root(root->node, cow, true);
if (ret < 0) {
- btrfs_tree_unlock(cow);
- free_extent_buffer(cow);
btrfs_abort_transaction(trans, ret);
- return ret;
+ goto error_unlock_cow;
}
atomic_inc(&cow->refs);
rcu_assign_pointer(root->node, cow);
- btrfs_free_tree_block(trans, btrfs_root_id(root), buf,
- parent_start, last_ref);
+ ret = btrfs_free_tree_block(trans, btrfs_root_id(root), buf,
+ parent_start, last_ref);
free_extent_buffer(buf);
add_root_to_dirty_list(root);
+ if (ret < 0) {
+ btrfs_abort_transaction(trans, ret);
+ goto error_unlock_cow;
+ }
} else {
WARN_ON(trans->transid != btrfs_header_generation(parent));
ret = btrfs_tree_mod_log_insert_key(parent, parent_slot,
BTRFS_MOD_LOG_KEY_REPLACE);
if (ret) {
- btrfs_tree_unlock(cow);
- free_extent_buffer(cow);
btrfs_abort_transaction(trans, ret);
- return ret;
+ goto error_unlock_cow;
}
btrfs_set_node_blockptr(parent, parent_slot,
cow->start);
@@ -642,14 +643,16 @@ int btrfs_force_cow_block(struct btrfs_trans_handle *trans,
if (last_ref) {
ret = btrfs_tree_mod_log_free_eb(buf);
if (ret) {
- btrfs_tree_unlock(cow);
- free_extent_buffer(cow);
btrfs_abort_transaction(trans, ret);
- return ret;
+ goto error_unlock_cow;
}
}
- btrfs_free_tree_block(trans, btrfs_root_id(root), buf,
- parent_start, last_ref);
+ ret = btrfs_free_tree_block(trans, btrfs_root_id(root), buf,
+ parent_start, last_ref);
+ if (ret < 0) {
+ btrfs_abort_transaction(trans, ret);
+ goto error_unlock_cow;
+ }
}
if (unlock_orig)
btrfs_tree_unlock(buf);
@@ -657,6 +660,11 @@ int btrfs_force_cow_block(struct btrfs_trans_handle *trans,
btrfs_mark_buffer_dirty(trans, cow);
*cow_ret = cow;
return 0;
+
+error_unlock_cow:
+ btrfs_tree_unlock(cow);
+ free_extent_buffer(cow);
+ return ret;
}
static inline int should_cow_block(struct btrfs_trans_handle *trans,
@@ -983,9 +991,13 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
free_extent_buffer(mid);
root_sub_used_bytes(root);
- btrfs_free_tree_block(trans, btrfs_root_id(root), mid, 0, 1);
+ ret = btrfs_free_tree_block(trans, btrfs_root_id(root), mid, 0, 1);
/* once for the root ptr */
free_extent_buffer_stale(mid);
+ if (ret < 0) {
+ btrfs_abort_transaction(trans, ret);
+ goto out;
+ }
return 0;
}
if (btrfs_header_nritems(mid) >
@@ -1053,10 +1065,14 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
goto out;
}
root_sub_used_bytes(root);
- btrfs_free_tree_block(trans, btrfs_root_id(root), right,
- 0, 1);
+ ret = btrfs_free_tree_block(trans, btrfs_root_id(root),
+ right, 0, 1);
free_extent_buffer_stale(right);
right = NULL;
+ if (ret < 0) {
+ btrfs_abort_transaction(trans, ret);
+ goto out;
+ }
} else {
struct btrfs_disk_key right_key;
btrfs_node_key(right, &right_key, 0);
@@ -1111,9 +1127,13 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
goto out;
}
root_sub_used_bytes(root);
- btrfs_free_tree_block(trans, btrfs_root_id(root), mid, 0, 1);
+ ret = btrfs_free_tree_block(trans, btrfs_root_id(root), mid, 0, 1);
free_extent_buffer_stale(mid);
mid = NULL;
+ if (ret < 0) {
+ btrfs_abort_transaction(trans, ret);
+ goto out;
+ }
} else {
/* update the parent key to reflect our changes */
struct btrfs_disk_key mid_key;
@@ -1551,12 +1571,7 @@ read_block_for_search(struct btrfs_root *root, struct btrfs_path *p,
if (ret) {
free_extent_buffer(tmp);
btrfs_release_path(p);
- return -EIO;
- }
- if (btrfs_check_eb_owner(tmp, btrfs_root_id(root))) {
- free_extent_buffer(tmp);
- btrfs_release_path(p);
- return -EUCLEAN;
+ return ret;
}
if (unlock_up)
@@ -2883,7 +2898,11 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans,
old = root->node;
ret = btrfs_tree_mod_log_insert_root(root->node, c, false);
if (ret < 0) {
- btrfs_free_tree_block(trans, btrfs_root_id(root), c, 0, 1);
+ int ret2;
+
+ ret2 = btrfs_free_tree_block(trans, btrfs_root_id(root), c, 0, 1);
+ if (ret2 < 0)
+ btrfs_abort_transaction(trans, ret2);
btrfs_tree_unlock(c);
free_extent_buffer(c);
return ret;
@@ -4452,9 +4471,12 @@ static noinline int btrfs_del_leaf(struct btrfs_trans_handle *trans,
root_sub_used_bytes(root);
atomic_inc(&leaf->refs);
- btrfs_free_tree_block(trans, btrfs_root_id(root), leaf, 0, 1);
+ ret = btrfs_free_tree_block(trans, btrfs_root_id(root), leaf, 0, 1);
free_extent_buffer_stale(leaf);
- return 0;
+ if (ret < 0)
+ btrfs_abort_transaction(trans, ret);
+
+ return ret;
}
/*
* delete the item at the leaf level in path. If that empties
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index c03c58246033..c8568b1a61c4 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -221,9 +221,11 @@ struct btrfs_root {
struct list_head root_list;
- spinlock_t inode_lock;
- /* red-black tree that keeps track of in-memory inodes */
- struct rb_root inode_tree;
+ /*
+ * Xarray that keeps track of in-memory inodes, protected by the lock
+ * @inode_lock.
+ */
+ struct xarray inodes;
/*
* Xarray that keeps track of delayed nodes of every inode, protected
@@ -354,6 +356,16 @@ static inline void btrfs_set_root_last_log_commit(struct btrfs_root *root, int c
WRITE_ONCE(root->last_log_commit, commit_id);
}
+static inline u64 btrfs_get_root_last_trans(const struct btrfs_root *root)
+{
+ return READ_ONCE(root->last_trans);
+}
+
+static inline void btrfs_set_root_last_trans(struct btrfs_root *root, u64 transid)
+{
+ WRITE_ONCE(root->last_trans, transid);
+}
+
/*
* Structure that conveys information about an extent that is going to replace
* all the extents in a file range.
diff --git a/fs/btrfs/defrag.c b/fs/btrfs/defrag.c
index 407ccec3e57e..f6dbda37a361 100644
--- a/fs/btrfs/defrag.c
+++ b/fs/btrfs/defrag.c
@@ -139,7 +139,7 @@ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
if (trans)
transid = trans->transid;
else
- transid = inode->root->last_trans;
+ transid = btrfs_get_root_last_trans(root);
defrag = kmem_cache_zalloc(btrfs_inode_defrag_cachep, GFP_NOFS);
if (!defrag)
@@ -255,7 +255,7 @@ again:
goto cleanup;
}
- inode = btrfs_iget(fs_info->sb, defrag->ino, inode_root);
+ inode = btrfs_iget(defrag->ino, inode_root);
btrfs_put_root(inode_root);
if (IS_ERR(inode)) {
ret = PTR_ERR(inode);
@@ -707,8 +707,10 @@ iterate:
*/
if (key.offset > start) {
em->start = start;
- em->orig_start = start;
- em->block_start = EXTENT_MAP_HOLE;
+ em->disk_bytenr = EXTENT_MAP_HOLE;
+ em->disk_num_bytes = 0;
+ em->ram_bytes = 0;
+ em->offset = 0;
em->len = key.offset - start;
break;
}
@@ -825,7 +827,7 @@ static bool defrag_check_next_extent(struct inode *inode, struct extent_map *em,
*/
next = defrag_lookup_extent(inode, em->start + em->len, newer_than, locked);
/* No more em or hole */
- if (!next || next->block_start >= EXTENT_MAP_LAST_BYTE)
+ if (!next || next->disk_bytenr >= EXTENT_MAP_LAST_BYTE)
goto out;
if (next->flags & EXTENT_FLAG_PREALLOC)
goto out;
@@ -992,12 +994,12 @@ static int defrag_collect_targets(struct btrfs_inode *inode,
* This is for users who want to convert inline extents to
* regular ones through max_inline= mount option.
*/
- if (em->block_start == EXTENT_MAP_INLINE &&
+ if (em->disk_bytenr == EXTENT_MAP_INLINE &&
em->len <= inode->root->fs_info->max_inline)
goto next;
/* Skip holes and preallocated extents. */
- if (em->block_start == EXTENT_MAP_HOLE ||
+ if (em->disk_bytenr == EXTENT_MAP_HOLE ||
(em->flags & EXTENT_FLAG_PREALLOC))
goto next;
@@ -1062,7 +1064,7 @@ static int defrag_collect_targets(struct btrfs_inode *inode,
* So if an inline extent passed all above checks, just add it
* for defrag, and be converted to regular extents.
*/
- if (em->block_start == EXTENT_MAP_INLINE)
+ if (em->disk_bytenr == EXTENT_MAP_INLINE)
goto add;
next_mergeable = defrag_check_next_extent(&inode->vfs_inode, em,
diff --git a/fs/btrfs/delalloc-space.c b/fs/btrfs/delalloc-space.c
index b3527efd0b4b..7aa8a395d838 100644
--- a/fs/btrfs/delalloc-space.c
+++ b/fs/btrfs/delalloc-space.c
@@ -111,7 +111,7 @@
* making error handling and cleanup easier.
*/
-int btrfs_alloc_data_chunk_ondemand(struct btrfs_inode *inode, u64 bytes)
+int btrfs_alloc_data_chunk_ondemand(const struct btrfs_inode *inode, u64 bytes)
{
struct btrfs_root *root = inode->root;
struct btrfs_fs_info *fs_info = root->fs_info;
diff --git a/fs/btrfs/delalloc-space.h b/fs/btrfs/delalloc-space.h
index ce4f889e4f17..3f32953c0a80 100644
--- a/fs/btrfs/delalloc-space.h
+++ b/fs/btrfs/delalloc-space.h
@@ -9,7 +9,7 @@ struct extent_changeset;
struct btrfs_inode;
struct btrfs_fs_info;
-int btrfs_alloc_data_chunk_ondemand(struct btrfs_inode *inode, u64 bytes);
+int btrfs_alloc_data_chunk_ondemand(const struct btrfs_inode *inode, u64 bytes);
int btrfs_check_data_free_space(struct btrfs_inode *inode,
struct extent_changeset **reserved, u64 start, u64 len,
bool noflush);
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index 95a0497fa866..508bdbae29a0 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -77,14 +77,14 @@ static struct btrfs_delayed_node *btrfs_get_delayed_node(
return node;
}
- spin_lock(&root->inode_lock);
+ xa_lock(&root->delayed_nodes);
node = xa_load(&root->delayed_nodes, ino);
if (node) {
if (btrfs_inode->delayed_node) {
refcount_inc(&node->refs); /* can be accessed */
BUG_ON(btrfs_inode->delayed_node != node);
- spin_unlock(&root->inode_lock);
+ xa_unlock(&root->delayed_nodes);
return node;
}
@@ -111,10 +111,10 @@ static struct btrfs_delayed_node *btrfs_get_delayed_node(
node = NULL;
}
- spin_unlock(&root->inode_lock);
+ xa_unlock(&root->delayed_nodes);
return node;
}
- spin_unlock(&root->inode_lock);
+ xa_unlock(&root->delayed_nodes);
return NULL;
}
@@ -148,21 +148,21 @@ again:
kmem_cache_free(delayed_node_cache, node);
return ERR_PTR(-ENOMEM);
}
- spin_lock(&root->inode_lock);
+ xa_lock(&root->delayed_nodes);
ptr = xa_load(&root->delayed_nodes, ino);
if (ptr) {
/* Somebody inserted it, go back and read it. */
- spin_unlock(&root->inode_lock);
+ xa_unlock(&root->delayed_nodes);
kmem_cache_free(delayed_node_cache, node);
node = NULL;
goto again;
}
- ptr = xa_store(&root->delayed_nodes, ino, node, GFP_ATOMIC);
+ ptr = __xa_store(&root->delayed_nodes, ino, node, GFP_ATOMIC);
ASSERT(xa_err(ptr) != -EINVAL);
ASSERT(xa_err(ptr) != -ENOMEM);
ASSERT(ptr == NULL);
btrfs_inode->delayed_node = node;
- spin_unlock(&root->inode_lock);
+ xa_unlock(&root->delayed_nodes);
return node;
}
@@ -275,14 +275,12 @@ static void __btrfs_release_delayed_node(
if (refcount_dec_and_test(&delayed_node->refs)) {
struct btrfs_root *root = delayed_node->root;
- spin_lock(&root->inode_lock);
+ xa_erase(&root->delayed_nodes, delayed_node->inode_id);
/*
* Once our refcount goes to zero, nobody is allowed to bump it
* back up. We can delete it now.
*/
ASSERT(refcount_read(&delayed_node->refs) == 0);
- xa_erase(&root->delayed_nodes, delayed_node->inode_id);
- spin_unlock(&root->inode_lock);
kmem_cache_free(delayed_node_cache, delayed_node);
}
}
@@ -1471,7 +1469,7 @@ static void btrfs_release_dir_index_item_space(struct btrfs_trans_handle *trans)
int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
const char *name, int name_len,
struct btrfs_inode *dir,
- struct btrfs_disk_key *disk_key, u8 flags,
+ const struct btrfs_disk_key *disk_key, u8 flags,
u64 index)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
@@ -1684,7 +1682,7 @@ int btrfs_inode_delayed_dir_index_count(struct btrfs_inode *inode)
return 0;
}
-bool btrfs_readdir_get_delayed_items(struct inode *inode,
+bool btrfs_readdir_get_delayed_items(struct btrfs_inode *inode,
u64 last_index,
struct list_head *ins_list,
struct list_head *del_list)
@@ -1692,7 +1690,7 @@ bool btrfs_readdir_get_delayed_items(struct inode *inode,
struct btrfs_delayed_node *delayed_node;
struct btrfs_delayed_item *item;
- delayed_node = btrfs_get_delayed_node(BTRFS_I(inode));
+ delayed_node = btrfs_get_delayed_node(inode);
if (!delayed_node)
return false;
@@ -1700,8 +1698,8 @@ bool btrfs_readdir_get_delayed_items(struct inode *inode,
* We can only do one readdir with delayed items at a time because of
* item->readdir_list.
*/
- btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_SHARED);
- btrfs_inode_lock(BTRFS_I(inode), 0);
+ btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
+ btrfs_inode_lock(inode, 0);
mutex_lock(&delayed_node->mutex);
item = __btrfs_first_delayed_insertion_item(delayed_node);
@@ -1732,7 +1730,7 @@ bool btrfs_readdir_get_delayed_items(struct inode *inode,
return true;
}
-void btrfs_readdir_put_delayed_items(struct inode *inode,
+void btrfs_readdir_put_delayed_items(struct btrfs_inode *inode,
struct list_head *ins_list,
struct list_head *del_list)
{
@@ -1754,10 +1752,10 @@ void btrfs_readdir_put_delayed_items(struct inode *inode,
* The VFS is going to do up_read(), so we need to downgrade back to a
* read lock.
*/
- downgrade_write(&inode->i_rwsem);
+ downgrade_write(&inode->vfs_inode.i_rwsem);
}
-int btrfs_should_delete_dir_index(struct list_head *del_list,
+int btrfs_should_delete_dir_index(const struct list_head *del_list,
u64 index)
{
struct btrfs_delayed_item *curr;
@@ -1778,7 +1776,7 @@ int btrfs_should_delete_dir_index(struct list_head *del_list,
* Read dir info stored in the delayed tree.
*/
int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
- struct list_head *ins_list)
+ const struct list_head *ins_list)
{
struct btrfs_dir_item *di;
struct btrfs_delayed_item *curr, *next;
@@ -1916,7 +1914,8 @@ int btrfs_fill_inode(struct inode *inode, u32 *rdev)
BTRFS_I(inode)->i_otime_nsec = btrfs_stack_timespec_nsec(&inode_item->otime);
inode->i_generation = BTRFS_I(inode)->generation;
- BTRFS_I(inode)->index_cnt = (u64)-1;
+ if (S_ISDIR(inode->i_mode))
+ BTRFS_I(inode)->index_cnt = (u64)-1;
mutex_unlock(&delayed_node->mutex);
btrfs_release_delayed_node(delayed_node);
@@ -2057,9 +2056,9 @@ void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
struct btrfs_delayed_node *node;
int count;
- spin_lock(&root->inode_lock);
+ xa_lock(&root->delayed_nodes);
if (xa_empty(&root->delayed_nodes)) {
- spin_unlock(&root->inode_lock);
+ xa_unlock(&root->delayed_nodes);
return;
}
@@ -2076,7 +2075,7 @@ void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
if (count >= ARRAY_SIZE(delayed_nodes))
break;
}
- spin_unlock(&root->inode_lock);
+ xa_unlock(&root->delayed_nodes);
index++;
for (int i = 0; i < count; i++) {
diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h
index 64e115d97499..7cfefdfe54ea 100644
--- a/fs/btrfs/delayed-inode.h
+++ b/fs/btrfs/delayed-inode.h
@@ -110,7 +110,7 @@ void btrfs_init_delayed_root(struct btrfs_delayed_root *delayed_root);
int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
const char *name, int name_len,
struct btrfs_inode *dir,
- struct btrfs_disk_key *disk_key, u8 flags,
+ const struct btrfs_disk_key *disk_key, u8 flags,
u64 index);
int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
@@ -143,17 +143,17 @@ void btrfs_kill_all_delayed_nodes(struct btrfs_root *root);
void btrfs_destroy_delayed_inodes(struct btrfs_fs_info *fs_info);
/* Used for readdir() */
-bool btrfs_readdir_get_delayed_items(struct inode *inode,
+bool btrfs_readdir_get_delayed_items(struct btrfs_inode *inode,
u64 last_index,
struct list_head *ins_list,
struct list_head *del_list);
-void btrfs_readdir_put_delayed_items(struct inode *inode,
+void btrfs_readdir_put_delayed_items(struct btrfs_inode *inode,
struct list_head *ins_list,
struct list_head *del_list);
-int btrfs_should_delete_dir_index(struct list_head *del_list,
+int btrfs_should_delete_dir_index(const struct list_head *del_list,
u64 index);
int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
- struct list_head *ins_list);
+ const struct list_head *ins_list);
/* Used during directory logging. */
void btrfs_log_get_delayed_items(struct btrfs_inode *inode,
diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
index 6cc80fb10da2..2ac9296edccb 100644
--- a/fs/btrfs/delayed-ref.c
+++ b/fs/btrfs/delayed-ref.c
@@ -195,48 +195,6 @@ void btrfs_dec_delayed_refs_rsv_bg_updates(struct btrfs_fs_info *fs_info)
}
/*
- * Transfer bytes to our delayed refs rsv.
- *
- * @fs_info: the filesystem
- * @num_bytes: number of bytes to transfer
- *
- * This transfers up to the num_bytes amount, previously reserved, to the
- * delayed_refs_rsv. Any extra bytes are returned to the space info.
- */
-void btrfs_migrate_to_delayed_refs_rsv(struct btrfs_fs_info *fs_info,
- u64 num_bytes)
-{
- struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv;
- u64 to_free = 0;
-
- spin_lock(&delayed_refs_rsv->lock);
- if (delayed_refs_rsv->size > delayed_refs_rsv->reserved) {
- u64 delta = delayed_refs_rsv->size -
- delayed_refs_rsv->reserved;
- if (num_bytes > delta) {
- to_free = num_bytes - delta;
- num_bytes = delta;
- }
- } else {
- to_free = num_bytes;
- num_bytes = 0;
- }
-
- if (num_bytes)
- delayed_refs_rsv->reserved += num_bytes;
- if (delayed_refs_rsv->reserved >= delayed_refs_rsv->size)
- delayed_refs_rsv->full = true;
- spin_unlock(&delayed_refs_rsv->lock);
-
- if (num_bytes)
- trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
- 0, num_bytes, 1);
- if (to_free)
- btrfs_space_info_free_bytes_may_use(fs_info,
- delayed_refs_rsv->space_info, to_free);
-}
-
-/*
* Refill based on our delayed refs usage.
*
* @fs_info: the filesystem
@@ -861,6 +819,12 @@ static void init_delayed_ref_head(struct btrfs_delayed_ref_head *head_ref,
spin_lock_init(&head_ref->lock);
mutex_init(&head_ref->mutex);
+ /* If not metadata set an impossible level to help debugging. */
+ if (generic_ref->type == BTRFS_REF_METADATA)
+ head_ref->level = generic_ref->tree_ref.level;
+ else
+ head_ref->level = U8_MAX;
+
if (qrecord) {
if (generic_ref->ref_root && reserved) {
qrecord->data_rsv = reserved;
@@ -1114,7 +1078,7 @@ int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
}
int btrfs_add_delayed_extent_op(struct btrfs_trans_handle *trans,
- u64 bytenr, u64 num_bytes,
+ u64 bytenr, u64 num_bytes, u8 level,
struct btrfs_delayed_extent_op *extent_op)
{
struct btrfs_delayed_ref_head *head_ref;
@@ -1124,6 +1088,7 @@ int btrfs_add_delayed_extent_op(struct btrfs_trans_handle *trans,
.action = BTRFS_UPDATE_DELAYED_HEAD,
.bytenr = bytenr,
.num_bytes = num_bytes,
+ .tree_ref.level = level,
};
head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h
index 04b180ebe1fe..ef15e998be03 100644
--- a/fs/btrfs/delayed-ref.h
+++ b/fs/btrfs/delayed-ref.h
@@ -108,7 +108,6 @@ struct btrfs_delayed_ref_node {
struct btrfs_delayed_extent_op {
struct btrfs_disk_key key;
- u8 level;
bool update_key;
bool update_flags;
u64 flags_to_set;
@@ -172,6 +171,9 @@ struct btrfs_delayed_ref_head {
*/
u64 reserved_bytes;
+ /* Tree block level, for metadata only. */
+ u8 level;
+
/*
* when a new extent is allocated, it is just reserved in memory
* The actual extent isn't inserted into the extent allocation tree
@@ -355,7 +357,7 @@ int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
struct btrfs_ref *generic_ref,
u64 reserved);
int btrfs_add_delayed_extent_op(struct btrfs_trans_handle *trans,
- u64 bytenr, u64 num_bytes,
+ u64 bytenr, u64 num_bytes, u8 level,
struct btrfs_delayed_extent_op *extent_op);
void btrfs_merge_delayed_refs(struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_root *delayed_refs,
@@ -386,8 +388,6 @@ void btrfs_inc_delayed_refs_rsv_bg_updates(struct btrfs_fs_info *fs_info);
void btrfs_dec_delayed_refs_rsv_bg_updates(struct btrfs_fs_info *fs_info);
int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info,
enum btrfs_reserve_flush_enum flush);
-void btrfs_migrate_to_delayed_refs_rsv(struct btrfs_fs_info *fs_info,
- u64 num_bytes);
bool btrfs_check_space_for_delayed_refs(struct btrfs_fs_info *fs_info);
static inline u64 btrfs_delayed_ref_owner(struct btrfs_delayed_ref_node *node)
diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
index 7130040d92ab..f638c458d285 100644
--- a/fs/btrfs/dev-replace.c
+++ b/fs/btrfs/dev-replace.c
@@ -684,7 +684,7 @@ static int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info,
if (ret)
btrfs_err(fs_info, "kobj add dev failed %d", ret);
- btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
+ btrfs_wait_ordered_roots(fs_info, U64_MAX, NULL);
/*
* Commit dev_replace state and reserve 1 item for it.
@@ -880,7 +880,7 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
return ret;
}
- btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
+ btrfs_wait_ordered_roots(fs_info, U64_MAX, NULL);
/*
* We have to use this loop approach because at this point src_device
diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c
index 9c07d5c3e5ad..001c0c2f872c 100644
--- a/fs/btrfs/dir-item.c
+++ b/fs/btrfs/dir-item.c
@@ -22,7 +22,7 @@ static struct btrfs_dir_item *insert_with_overflow(struct btrfs_trans_handle
*trans,
struct btrfs_root *root,
struct btrfs_path *path,
- struct btrfs_key *cpu_key,
+ const struct btrfs_key *cpu_key,
u32 data_size,
const char *name,
int name_len)
@@ -108,7 +108,7 @@ int btrfs_insert_xattr_item(struct btrfs_trans_handle *trans,
*/
int btrfs_insert_dir_item(struct btrfs_trans_handle *trans,
const struct fscrypt_str *name, struct btrfs_inode *dir,
- struct btrfs_key *location, u8 type, u64 index)
+ const struct btrfs_key *location, u8 type, u64 index)
{
int ret = 0;
int ret2 = 0;
@@ -379,7 +379,7 @@ struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans,
* for a specific name.
*/
struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_fs_info *fs_info,
- struct btrfs_path *path,
+ const struct btrfs_path *path,
const char *name, int name_len)
{
struct btrfs_dir_item *dir_item;
@@ -417,7 +417,7 @@ struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_fs_info *fs_info,
int btrfs_delete_one_dir_name(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path,
- struct btrfs_dir_item *di)
+ const struct btrfs_dir_item *di)
{
struct extent_buffer *leaf;
diff --git a/fs/btrfs/dir-item.h b/fs/btrfs/dir-item.h
index 00b3d83d7569..5f6dfafc91f1 100644
--- a/fs/btrfs/dir-item.h
+++ b/fs/btrfs/dir-item.h
@@ -17,7 +17,7 @@ int btrfs_check_dir_item_collision(struct btrfs_root *root, u64 dir,
const struct fscrypt_str *name);
int btrfs_insert_dir_item(struct btrfs_trans_handle *trans,
const struct fscrypt_str *name, struct btrfs_inode *dir,
- struct btrfs_key *location, u8 type, u64 index);
+ const struct btrfs_key *location, u8 type, u64 index);
struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path, u64 dir,
@@ -33,7 +33,7 @@ struct btrfs_dir_item *btrfs_search_dir_index_item(struct btrfs_root *root,
int btrfs_delete_one_dir_name(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path,
- struct btrfs_dir_item *di);
+ const struct btrfs_dir_item *di);
int btrfs_insert_xattr_item(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path, u64 objectid,
@@ -45,7 +45,7 @@ struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans,
const char *name, u16 name_len,
int mod);
struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_fs_info *fs_info,
- struct btrfs_path *path,
+ const struct btrfs_path *path,
const char *name,
int name_len);
diff --git a/fs/btrfs/direct-io.c b/fs/btrfs/direct-io.c
new file mode 100644
index 000000000000..f9fb2db6a1e4
--- /dev/null
+++ b/fs/btrfs/direct-io.c
@@ -0,0 +1,1052 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/fsverity.h>
+#include <linux/iomap.h>
+#include "ctree.h"
+#include "delalloc-space.h"
+#include "direct-io.h"
+#include "extent-tree.h"
+#include "file.h"
+#include "fs.h"
+#include "transaction.h"
+#include "volumes.h"
+
+struct btrfs_dio_data {
+ ssize_t submitted;
+ struct extent_changeset *data_reserved;
+ struct btrfs_ordered_extent *ordered;
+ bool data_space_reserved;
+ bool nocow_done;
+};
+
+struct btrfs_dio_private {
+ /* Range of I/O */
+ u64 file_offset;
+ u32 bytes;
+
+ /* This must be last */
+ struct btrfs_bio bbio;
+};
+
+static struct bio_set btrfs_dio_bioset;
+
+static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend,
+ struct extent_state **cached_state,
+ unsigned int iomap_flags)
+{
+ const bool writing = (iomap_flags & IOMAP_WRITE);
+ const bool nowait = (iomap_flags & IOMAP_NOWAIT);
+ struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
+ struct btrfs_ordered_extent *ordered;
+ int ret = 0;
+
+ while (1) {
+ if (nowait) {
+ if (!try_lock_extent(io_tree, lockstart, lockend,
+ cached_state))
+ return -EAGAIN;
+ } else {
+ lock_extent(io_tree, lockstart, lockend, cached_state);
+ }
+ /*
+ * We're concerned with the entire range that we're going to be
+ * doing DIO to, so we need to make sure there's no ordered
+ * extents in this range.
+ */
+ ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), lockstart,
+ lockend - lockstart + 1);
+
+ /*
+ * We need to make sure there are no buffered pages in this
+ * range either, we could have raced between the invalidate in
+ * generic_file_direct_write and locking the extent. The
+ * invalidate needs to happen so that reads after a write do not
+ * get stale data.
+ */
+ if (!ordered &&
+ (!writing || !filemap_range_has_page(inode->i_mapping,
+ lockstart, lockend)))
+ break;
+
+ unlock_extent(io_tree, lockstart, lockend, cached_state);
+
+ if (ordered) {
+ if (nowait) {
+ btrfs_put_ordered_extent(ordered);
+ ret = -EAGAIN;
+ break;
+ }
+ /*
+ * If we are doing a DIO read and the ordered extent we
+ * found is for a buffered write, we can not wait for it
+ * to complete and retry, because if we do so we can
+ * deadlock with concurrent buffered writes on page
+ * locks. This happens only if our DIO read covers more
+ * than one extent map, if at this point has already
+ * created an ordered extent for a previous extent map
+ * and locked its range in the inode's io tree, and a
+ * concurrent write against that previous extent map's
+ * range and this range started (we unlock the ranges
+ * in the io tree only when the bios complete and
+ * buffered writes always lock pages before attempting
+ * to lock range in the io tree).
+ */
+ if (writing ||
+ test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags))
+ btrfs_start_ordered_extent(ordered);
+ else
+ ret = nowait ? -EAGAIN : -ENOTBLK;
+ btrfs_put_ordered_extent(ordered);
+ } else {
+ /*
+ * We could trigger writeback for this range (and wait
+ * for it to complete) and then invalidate the pages for
+ * this range (through invalidate_inode_pages2_range()),
+ * but that can lead us to a deadlock with a concurrent
+ * call to readahead (a buffered read or a defrag call
+ * triggered a readahead) on a page lock due to an
+ * ordered dio extent we created before but did not have
+ * yet a corresponding bio submitted (whence it can not
+ * complete), which makes readahead wait for that
+ * ordered extent to complete while holding a lock on
+ * that page.
+ */
+ ret = nowait ? -EAGAIN : -ENOTBLK;
+ }
+
+ if (ret)
+ break;
+
+ cond_resched();
+ }
+
+ return ret;
+}
+
+static struct extent_map *btrfs_create_dio_extent(struct btrfs_inode *inode,
+ struct btrfs_dio_data *dio_data,
+ const u64 start,
+ const struct btrfs_file_extent *file_extent,
+ const int type)
+{
+ struct extent_map *em = NULL;
+ struct btrfs_ordered_extent *ordered;
+
+ if (type != BTRFS_ORDERED_NOCOW) {
+ em = btrfs_create_io_em(inode, start, file_extent, type);
+ if (IS_ERR(em))
+ goto out;
+ }
+
+ ordered = btrfs_alloc_ordered_extent(inode, start, file_extent,
+ (1 << type) |
+ (1 << BTRFS_ORDERED_DIRECT));
+ if (IS_ERR(ordered)) {
+ if (em) {
+ free_extent_map(em);
+ btrfs_drop_extent_map_range(inode, start,
+ start + file_extent->num_bytes - 1, false);
+ }
+ em = ERR_CAST(ordered);
+ } else {
+ ASSERT(!dio_data->ordered);
+ dio_data->ordered = ordered;
+ }
+ out:
+
+ return em;
+}
+
+static struct extent_map *btrfs_new_extent_direct(struct btrfs_inode *inode,
+ struct btrfs_dio_data *dio_data,
+ u64 start, u64 len)
+{
+ struct btrfs_root *root = inode->root;
+ struct btrfs_fs_info *fs_info = root->fs_info;
+ struct btrfs_file_extent file_extent;
+ struct extent_map *em;
+ struct btrfs_key ins;
+ u64 alloc_hint;
+ int ret;
+
+ alloc_hint = btrfs_get_extent_allocation_hint(inode, start, len);
+again:
+ ret = btrfs_reserve_extent(root, len, len, fs_info->sectorsize,
+ 0, alloc_hint, &ins, 1, 1);
+ if (ret == -EAGAIN) {
+ ASSERT(btrfs_is_zoned(fs_info));
+ wait_on_bit_io(&inode->root->fs_info->flags, BTRFS_FS_NEED_ZONE_FINISH,
+ TASK_UNINTERRUPTIBLE);
+ goto again;
+ }
+ if (ret)
+ return ERR_PTR(ret);
+
+ file_extent.disk_bytenr = ins.objectid;
+ file_extent.disk_num_bytes = ins.offset;
+ file_extent.num_bytes = ins.offset;
+ file_extent.ram_bytes = ins.offset;
+ file_extent.offset = 0;
+ file_extent.compression = BTRFS_COMPRESS_NONE;
+ em = btrfs_create_dio_extent(inode, dio_data, start, &file_extent,
+ BTRFS_ORDERED_REGULAR);
+ btrfs_dec_block_group_reservations(fs_info, ins.objectid);
+ if (IS_ERR(em))
+ btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset,
+ 1);
+
+ return em;
+}
+
+static int btrfs_get_blocks_direct_write(struct extent_map **map,
+ struct inode *inode,
+ struct btrfs_dio_data *dio_data,
+ u64 start, u64 *lenp,
+ unsigned int iomap_flags)
+{
+ const bool nowait = (iomap_flags & IOMAP_NOWAIT);
+ struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
+ struct btrfs_file_extent file_extent;
+ struct extent_map *em = *map;
+ int type;
+ u64 block_start;
+ struct btrfs_block_group *bg;
+ bool can_nocow = false;
+ bool space_reserved = false;
+ u64 len = *lenp;
+ u64 prev_len;
+ int ret = 0;
+
+ /*
+ * We don't allocate a new extent in the following cases
+ *
+ * 1) The inode is marked as NODATACOW. In this case we'll just use the
+ * existing extent.
+ * 2) The extent is marked as PREALLOC. We're good to go here and can
+ * just use the extent.
+ *
+ */
+ if ((em->flags & EXTENT_FLAG_PREALLOC) ||
+ ((BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) &&
+ em->disk_bytenr != EXTENT_MAP_HOLE)) {
+ if (em->flags & EXTENT_FLAG_PREALLOC)
+ type = BTRFS_ORDERED_PREALLOC;
+ else
+ type = BTRFS_ORDERED_NOCOW;
+ len = min(len, em->len - (start - em->start));
+ block_start = extent_map_block_start(em) + (start - em->start);
+
+ if (can_nocow_extent(inode, start, &len,
+ &file_extent, false, false) == 1) {
+ bg = btrfs_inc_nocow_writers(fs_info, block_start);
+ if (bg)
+ can_nocow = true;
+ }
+ }
+
+ prev_len = len;
+ if (can_nocow) {
+ struct extent_map *em2;
+
+ /* We can NOCOW, so only need to reserve metadata space. */
+ ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode), len, len,
+ nowait);
+ if (ret < 0) {
+ /* Our caller expects us to free the input extent map. */
+ free_extent_map(em);
+ *map = NULL;
+ btrfs_dec_nocow_writers(bg);
+ if (nowait && (ret == -ENOSPC || ret == -EDQUOT))
+ ret = -EAGAIN;
+ goto out;
+ }
+ space_reserved = true;
+
+ em2 = btrfs_create_dio_extent(BTRFS_I(inode), dio_data, start,
+ &file_extent, type);
+ btrfs_dec_nocow_writers(bg);
+ if (type == BTRFS_ORDERED_PREALLOC) {
+ free_extent_map(em);
+ *map = em2;
+ em = em2;
+ }
+
+ if (IS_ERR(em2)) {
+ ret = PTR_ERR(em2);
+ goto out;
+ }
+
+ dio_data->nocow_done = true;
+ } else {
+ /* Our caller expects us to free the input extent map. */
+ free_extent_map(em);
+ *map = NULL;
+
+ if (nowait) {
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ /*
+ * If we could not allocate data space before locking the file
+ * range and we can't do a NOCOW write, then we have to fail.
+ */
+ if (!dio_data->data_space_reserved) {
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ /*
+ * We have to COW and we have already reserved data space before,
+ * so now we reserve only metadata.
+ */
+ ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode), len, len,
+ false);
+ if (ret < 0)
+ goto out;
+ space_reserved = true;
+
+ em = btrfs_new_extent_direct(BTRFS_I(inode), dio_data, start, len);
+ if (IS_ERR(em)) {
+ ret = PTR_ERR(em);
+ goto out;
+ }
+ *map = em;
+ len = min(len, em->len - (start - em->start));
+ if (len < prev_len)
+ btrfs_delalloc_release_metadata(BTRFS_I(inode),
+ prev_len - len, true);
+ }
+
+ /*
+ * We have created our ordered extent, so we can now release our reservation
+ * for an outstanding extent.
+ */
+ btrfs_delalloc_release_extents(BTRFS_I(inode), prev_len);
+
+ /*
+ * Need to update the i_size under the extent lock so buffered
+ * readers will get the updated i_size when we unlock.
+ */
+ if (start + len > i_size_read(inode))
+ i_size_write(inode, start + len);
+out:
+ if (ret && space_reserved) {
+ btrfs_delalloc_release_extents(BTRFS_I(inode), len);
+ btrfs_delalloc_release_metadata(BTRFS_I(inode), len, true);
+ }
+ *lenp = len;
+ return ret;
+}
+
+static int btrfs_dio_iomap_begin(struct inode *inode, loff_t start,
+ loff_t length, unsigned int flags, struct iomap *iomap,
+ struct iomap *srcmap)
+{
+ struct iomap_iter *iter = container_of(iomap, struct iomap_iter, iomap);
+ struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
+ struct extent_map *em;
+ struct extent_state *cached_state = NULL;
+ struct btrfs_dio_data *dio_data = iter->private;
+ u64 lockstart, lockend;
+ const bool write = !!(flags & IOMAP_WRITE);
+ int ret = 0;
+ u64 len = length;
+ const u64 data_alloc_len = length;
+ bool unlock_extents = false;
+
+ /*
+ * We could potentially fault if we have a buffer > PAGE_SIZE, and if
+ * we're NOWAIT we may submit a bio for a partial range and return
+ * EIOCBQUEUED, which would result in an errant short read.
+ *
+ * The best way to handle this would be to allow for partial completions
+ * of iocb's, so we could submit the partial bio, return and fault in
+ * the rest of the pages, and then submit the io for the rest of the
+ * range. However we don't have that currently, so simply return
+ * -EAGAIN at this point so that the normal path is used.
+ */
+ if (!write && (flags & IOMAP_NOWAIT) && length > PAGE_SIZE)
+ return -EAGAIN;
+
+ /*
+ * Cap the size of reads to that usually seen in buffered I/O as we need
+ * to allocate a contiguous array for the checksums.
+ */
+ if (!write)
+ len = min_t(u64, len, fs_info->sectorsize * BTRFS_MAX_BIO_SECTORS);
+
+ lockstart = start;
+ lockend = start + len - 1;
+
+ /*
+ * iomap_dio_rw() only does filemap_write_and_wait_range(), which isn't
+ * enough if we've written compressed pages to this area, so we need to
+ * flush the dirty pages again to make absolutely sure that any
+ * outstanding dirty pages are on disk - the first flush only starts
+ * compression on the data, while keeping the pages locked, so by the
+ * time the second flush returns we know bios for the compressed pages
+ * were submitted and finished, and the pages no longer under writeback.
+ *
+ * If we have a NOWAIT request and we have any pages in the range that
+ * are locked, likely due to compression still in progress, we don't want
+ * to block on page locks. We also don't want to block on pages marked as
+ * dirty or under writeback (same as for the non-compression case).
+ * iomap_dio_rw() did the same check, but after that and before we got
+ * here, mmap'ed writes may have happened or buffered reads started
+ * (readpage() and readahead(), which lock pages), as we haven't locked
+ * the file range yet.
+ */
+ if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
+ &BTRFS_I(inode)->runtime_flags)) {
+ if (flags & IOMAP_NOWAIT) {
+ if (filemap_range_needs_writeback(inode->i_mapping,
+ lockstart, lockend))
+ return -EAGAIN;
+ } else {
+ ret = filemap_fdatawrite_range(inode->i_mapping, start,
+ start + length - 1);
+ if (ret)
+ return ret;
+ }
+ }
+
+ memset(dio_data, 0, sizeof(*dio_data));
+
+ /*
+ * We always try to allocate data space and must do it before locking
+ * the file range, to avoid deadlocks with concurrent writes to the same
+ * range if the range has several extents and the writes don't expand the
+ * current i_size (the inode lock is taken in shared mode). If we fail to
+ * allocate data space here we continue and later, after locking the
+ * file range, we fail with ENOSPC only if we figure out we can not do a
+ * NOCOW write.
+ */
+ if (write && !(flags & IOMAP_NOWAIT)) {
+ ret = btrfs_check_data_free_space(BTRFS_I(inode),
+ &dio_data->data_reserved,
+ start, data_alloc_len, false);
+ if (!ret)
+ dio_data->data_space_reserved = true;
+ else if (ret && !(BTRFS_I(inode)->flags &
+ (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC)))
+ goto err;
+ }
+
+ /*
+ * If this errors out it's because we couldn't invalidate pagecache for
+ * this range and we need to fallback to buffered IO, or we are doing a
+ * NOWAIT read/write and we need to block.
+ */
+ ret = lock_extent_direct(inode, lockstart, lockend, &cached_state, flags);
+ if (ret < 0)
+ goto err;
+
+ em = btrfs_get_extent(BTRFS_I(inode), NULL, start, len);
+ if (IS_ERR(em)) {
+ ret = PTR_ERR(em);
+ goto unlock_err;
+ }
+
+ /*
+ * Ok for INLINE and COMPRESSED extents we need to fallback on buffered
+ * io. INLINE is special, and we could probably kludge it in here, but
+ * it's still buffered so for safety lets just fall back to the generic
+ * buffered path.
+ *
+ * For COMPRESSED we _have_ to read the entire extent in so we can
+ * decompress it, so there will be buffering required no matter what we
+ * do, so go ahead and fallback to buffered.
+ *
+ * We return -ENOTBLK because that's what makes DIO go ahead and go back
+ * to buffered IO. Don't blame me, this is the price we pay for using
+ * the generic code.
+ */
+ if (extent_map_is_compressed(em) || em->disk_bytenr == EXTENT_MAP_INLINE) {
+ free_extent_map(em);
+ /*
+ * If we are in a NOWAIT context, return -EAGAIN in order to
+ * fallback to buffered IO. This is not only because we can
+ * block with buffered IO (no support for NOWAIT semantics at
+ * the moment) but also to avoid returning short reads to user
+ * space - this happens if we were able to read some data from
+ * previous non-compressed extents and then when we fallback to
+ * buffered IO, at btrfs_file_read_iter() by calling
+ * filemap_read(), we fail to fault in pages for the read buffer,
+ * in which case filemap_read() returns a short read (the number
+ * of bytes previously read is > 0, so it does not return -EFAULT).
+ */
+ ret = (flags & IOMAP_NOWAIT) ? -EAGAIN : -ENOTBLK;
+ goto unlock_err;
+ }
+
+ len = min(len, em->len - (start - em->start));
+
+ /*
+ * If we have a NOWAIT request and the range contains multiple extents
+ * (or a mix of extents and holes), then we return -EAGAIN to make the
+ * caller fallback to a context where it can do a blocking (without
+ * NOWAIT) request. This way we avoid doing partial IO and returning
+ * success to the caller, which is not optimal for writes and for reads
+ * it can result in unexpected behaviour for an application.
+ *
+ * When doing a read, because we use IOMAP_DIO_PARTIAL when calling
+ * iomap_dio_rw(), we can end up returning less data then what the caller
+ * asked for, resulting in an unexpected, and incorrect, short read.
+ * That is, the caller asked to read N bytes and we return less than that,
+ * which is wrong unless we are crossing EOF. This happens if we get a
+ * page fault error when trying to fault in pages for the buffer that is
+ * associated to the struct iov_iter passed to iomap_dio_rw(), and we
+ * have previously submitted bios for other extents in the range, in
+ * which case iomap_dio_rw() may return us EIOCBQUEUED if not all of
+ * those bios have completed by the time we get the page fault error,
+ * which we return back to our caller - we should only return EIOCBQUEUED
+ * after we have submitted bios for all the extents in the range.
+ */
+ if ((flags & IOMAP_NOWAIT) && len < length) {
+ free_extent_map(em);
+ ret = -EAGAIN;
+ goto unlock_err;
+ }
+
+ if (write) {
+ ret = btrfs_get_blocks_direct_write(&em, inode, dio_data,
+ start, &len, flags);
+ if (ret < 0)
+ goto unlock_err;
+ unlock_extents = true;
+ /* Recalc len in case the new em is smaller than requested */
+ len = min(len, em->len - (start - em->start));
+ if (dio_data->data_space_reserved) {
+ u64 release_offset;
+ u64 release_len = 0;
+
+ if (dio_data->nocow_done) {
+ release_offset = start;
+ release_len = data_alloc_len;
+ } else if (len < data_alloc_len) {
+ release_offset = start + len;
+ release_len = data_alloc_len - len;
+ }
+
+ if (release_len > 0)
+ btrfs_free_reserved_data_space(BTRFS_I(inode),
+ dio_data->data_reserved,
+ release_offset,
+ release_len);
+ }
+ } else {
+ /*
+ * We need to unlock only the end area that we aren't using.
+ * The rest is going to be unlocked by the endio routine.
+ */
+ lockstart = start + len;
+ if (lockstart < lockend)
+ unlock_extents = true;
+ }
+
+ if (unlock_extents)
+ unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
+ &cached_state);
+ else
+ free_extent_state(cached_state);
+
+ /*
+ * Translate extent map information to iomap.
+ * We trim the extents (and move the addr) even though iomap code does
+ * that, since we have locked only the parts we are performing I/O in.
+ */
+ if ((em->disk_bytenr == EXTENT_MAP_HOLE) ||
+ ((em->flags & EXTENT_FLAG_PREALLOC) && !write)) {
+ iomap->addr = IOMAP_NULL_ADDR;
+ iomap->type = IOMAP_HOLE;
+ } else {
+ iomap->addr = extent_map_block_start(em) + (start - em->start);
+ iomap->type = IOMAP_MAPPED;
+ }
+ iomap->offset = start;
+ iomap->bdev = fs_info->fs_devices->latest_dev->bdev;
+ iomap->length = len;
+ free_extent_map(em);
+
+ return 0;
+
+unlock_err:
+ unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
+ &cached_state);
+err:
+ if (dio_data->data_space_reserved) {
+ btrfs_free_reserved_data_space(BTRFS_I(inode),
+ dio_data->data_reserved,
+ start, data_alloc_len);
+ extent_changeset_free(dio_data->data_reserved);
+ }
+
+ return ret;
+}
+
+static int btrfs_dio_iomap_end(struct inode *inode, loff_t pos, loff_t length,
+ ssize_t written, unsigned int flags, struct iomap *iomap)
+{
+ struct iomap_iter *iter = container_of(iomap, struct iomap_iter, iomap);
+ struct btrfs_dio_data *dio_data = iter->private;
+ size_t submitted = dio_data->submitted;
+ const bool write = !!(flags & IOMAP_WRITE);
+ int ret = 0;
+
+ if (!write && (iomap->type == IOMAP_HOLE)) {
+ /* If reading from a hole, unlock and return */
+ unlock_extent(&BTRFS_I(inode)->io_tree, pos, pos + length - 1,
+ NULL);
+ return 0;
+ }
+
+ if (submitted < length) {
+ pos += submitted;
+ length -= submitted;
+ if (write)
+ btrfs_finish_ordered_extent(dio_data->ordered, NULL,
+ pos, length, false);
+ else
+ unlock_extent(&BTRFS_I(inode)->io_tree, pos,
+ pos + length - 1, NULL);
+ ret = -ENOTBLK;
+ }
+ if (write) {
+ btrfs_put_ordered_extent(dio_data->ordered);
+ dio_data->ordered = NULL;
+ }
+
+ if (write)
+ extent_changeset_free(dio_data->data_reserved);
+ return ret;
+}
+
+static void btrfs_dio_end_io(struct btrfs_bio *bbio)
+{
+ struct btrfs_dio_private *dip =
+ container_of(bbio, struct btrfs_dio_private, bbio);
+ struct btrfs_inode *inode = bbio->inode;
+ struct bio *bio = &bbio->bio;
+
+ if (bio->bi_status) {
+ btrfs_warn(inode->root->fs_info,
+ "direct IO failed ino %llu op 0x%0x offset %#llx len %u err no %d",
+ btrfs_ino(inode), bio->bi_opf,
+ dip->file_offset, dip->bytes, bio->bi_status);
+ }
+
+ if (btrfs_op(bio) == BTRFS_MAP_WRITE) {
+ btrfs_finish_ordered_extent(bbio->ordered, NULL,
+ dip->file_offset, dip->bytes,
+ !bio->bi_status);
+ } else {
+ unlock_extent(&inode->io_tree, dip->file_offset,
+ dip->file_offset + dip->bytes - 1, NULL);
+ }
+
+ bbio->bio.bi_private = bbio->private;
+ iomap_dio_bio_end_io(bio);
+}
+
+static int btrfs_extract_ordered_extent(struct btrfs_bio *bbio,
+ struct btrfs_ordered_extent *ordered)
+{
+ u64 start = (u64)bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT;
+ u64 len = bbio->bio.bi_iter.bi_size;
+ struct btrfs_ordered_extent *new;
+ int ret;
+
+ /* Must always be called for the beginning of an ordered extent. */
+ if (WARN_ON_ONCE(start != ordered->disk_bytenr))
+ return -EINVAL;
+
+ /* No need to split if the ordered extent covers the entire bio. */
+ if (ordered->disk_num_bytes == len) {
+ refcount_inc(&ordered->refs);
+ bbio->ordered = ordered;
+ return 0;
+ }
+
+ /*
+ * Don't split the extent_map for NOCOW extents, as we're writing into
+ * a pre-existing one.
+ */
+ if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags)) {
+ ret = split_extent_map(bbio->inode, bbio->file_offset,
+ ordered->num_bytes, len,
+ ordered->disk_bytenr);
+ if (ret)
+ return ret;
+ }
+
+ new = btrfs_split_ordered_extent(ordered, len);
+ if (IS_ERR(new))
+ return PTR_ERR(new);
+ bbio->ordered = new;
+ return 0;
+}
+
+static void btrfs_dio_submit_io(const struct iomap_iter *iter, struct bio *bio,
+ loff_t file_offset)
+{
+ struct btrfs_bio *bbio = btrfs_bio(bio);
+ struct btrfs_dio_private *dip =
+ container_of(bbio, struct btrfs_dio_private, bbio);
+ struct btrfs_dio_data *dio_data = iter->private;
+
+ btrfs_bio_init(bbio, BTRFS_I(iter->inode)->root->fs_info,
+ btrfs_dio_end_io, bio->bi_private);
+ bbio->inode = BTRFS_I(iter->inode);
+ bbio->file_offset = file_offset;
+
+ dip->file_offset = file_offset;
+ dip->bytes = bio->bi_iter.bi_size;
+
+ dio_data->submitted += bio->bi_iter.bi_size;
+
+ /*
+ * Check if we are doing a partial write. If we are, we need to split
+ * the ordered extent to match the submitted bio. Hang on to the
+ * remaining unfinishable ordered_extent in dio_data so that it can be
+ * cancelled in iomap_end to avoid a deadlock wherein faulting the
+ * remaining pages is blocked on the outstanding ordered extent.
+ */
+ if (iter->flags & IOMAP_WRITE) {
+ int ret;
+
+ ret = btrfs_extract_ordered_extent(bbio, dio_data->ordered);
+ if (ret) {
+ btrfs_finish_ordered_extent(dio_data->ordered, NULL,
+ file_offset, dip->bytes,
+ !ret);
+ bio->bi_status = errno_to_blk_status(ret);
+ iomap_dio_bio_end_io(bio);
+ return;
+ }
+ }
+
+ btrfs_submit_bio(bbio, 0);
+}
+
+static const struct iomap_ops btrfs_dio_iomap_ops = {
+ .iomap_begin = btrfs_dio_iomap_begin,
+ .iomap_end = btrfs_dio_iomap_end,
+};
+
+static const struct iomap_dio_ops btrfs_dio_ops = {
+ .submit_io = btrfs_dio_submit_io,
+ .bio_set = &btrfs_dio_bioset,
+};
+
+static ssize_t btrfs_dio_read(struct kiocb *iocb, struct iov_iter *iter,
+ size_t done_before)
+{
+ struct btrfs_dio_data data = { 0 };
+
+ return iomap_dio_rw(iocb, iter, &btrfs_dio_iomap_ops, &btrfs_dio_ops,
+ IOMAP_DIO_PARTIAL, &data, done_before);
+}
+
+static struct iomap_dio *btrfs_dio_write(struct kiocb *iocb, struct iov_iter *iter,
+ size_t done_before)
+{
+ struct btrfs_dio_data data = { 0 };
+
+ return __iomap_dio_rw(iocb, iter, &btrfs_dio_iomap_ops, &btrfs_dio_ops,
+ IOMAP_DIO_PARTIAL, &data, done_before);
+}
+
+static ssize_t check_direct_IO(struct btrfs_fs_info *fs_info,
+ const struct iov_iter *iter, loff_t offset)
+{
+ const u32 blocksize_mask = fs_info->sectorsize - 1;
+
+ if (offset & blocksize_mask)
+ return -EINVAL;
+
+ if (iov_iter_alignment(iter) & blocksize_mask)
+ return -EINVAL;
+
+ return 0;
+}
+
+ssize_t btrfs_direct_write(struct kiocb *iocb, struct iov_iter *from)
+{
+ struct file *file = iocb->ki_filp;
+ struct inode *inode = file_inode(file);
+ struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
+ loff_t pos;
+ ssize_t written = 0;
+ ssize_t written_buffered;
+ size_t prev_left = 0;
+ loff_t endbyte;
+ ssize_t ret;
+ unsigned int ilock_flags = 0;
+ struct iomap_dio *dio;
+
+ if (iocb->ki_flags & IOCB_NOWAIT)
+ ilock_flags |= BTRFS_ILOCK_TRY;
+
+ /*
+ * If the write DIO is within EOF, use a shared lock and also only if
+ * security bits will likely not be dropped by file_remove_privs() called
+ * from btrfs_write_check(). Either will need to be rechecked after the
+ * lock was acquired.
+ */
+ if (iocb->ki_pos + iov_iter_count(from) <= i_size_read(inode) && IS_NOSEC(inode))
+ ilock_flags |= BTRFS_ILOCK_SHARED;
+
+relock:
+ ret = btrfs_inode_lock(BTRFS_I(inode), ilock_flags);
+ if (ret < 0)
+ return ret;
+
+ /* Shared lock cannot be used with security bits set. */
+ if ((ilock_flags & BTRFS_ILOCK_SHARED) && !IS_NOSEC(inode)) {
+ btrfs_inode_unlock(BTRFS_I(inode), ilock_flags);
+ ilock_flags &= ~BTRFS_ILOCK_SHARED;
+ goto relock;
+ }
+
+ ret = generic_write_checks(iocb, from);
+ if (ret <= 0) {
+ btrfs_inode_unlock(BTRFS_I(inode), ilock_flags);
+ return ret;
+ }
+
+ ret = btrfs_write_check(iocb, from, ret);
+ if (ret < 0) {
+ btrfs_inode_unlock(BTRFS_I(inode), ilock_flags);
+ goto out;
+ }
+
+ pos = iocb->ki_pos;
+ /*
+ * Re-check since file size may have changed just before taking the
+ * lock or pos may have changed because of O_APPEND in generic_write_check()
+ */
+ if ((ilock_flags & BTRFS_ILOCK_SHARED) &&
+ pos + iov_iter_count(from) > i_size_read(inode)) {
+ btrfs_inode_unlock(BTRFS_I(inode), ilock_flags);
+ ilock_flags &= ~BTRFS_ILOCK_SHARED;
+ goto relock;
+ }
+
+ if (check_direct_IO(fs_info, from, pos)) {
+ btrfs_inode_unlock(BTRFS_I(inode), ilock_flags);
+ goto buffered;
+ }
+
+ /*
+ * The iov_iter can be mapped to the same file range we are writing to.
+ * If that's the case, then we will deadlock in the iomap code, because
+ * it first calls our callback btrfs_dio_iomap_begin(), which will create
+ * an ordered extent, and after that it will fault in the pages that the
+ * iov_iter refers to. During the fault in we end up in the readahead
+ * pages code (starting at btrfs_readahead()), which will lock the range,
+ * find that ordered extent and then wait for it to complete (at
+ * btrfs_lock_and_flush_ordered_range()), resulting in a deadlock since
+ * obviously the ordered extent can never complete as we didn't submit
+ * yet the respective bio(s). This always happens when the buffer is
+ * memory mapped to the same file range, since the iomap DIO code always
+ * invalidates pages in the target file range (after starting and waiting
+ * for any writeback).
+ *
+ * So here we disable page faults in the iov_iter and then retry if we
+ * got -EFAULT, faulting in the pages before the retry.
+ */
+ from->nofault = true;
+ dio = btrfs_dio_write(iocb, from, written);
+ from->nofault = false;
+
+ /*
+ * iomap_dio_complete() will call btrfs_sync_file() if we have a dsync
+ * iocb, and that needs to lock the inode. So unlock it before calling
+ * iomap_dio_complete() to avoid a deadlock.
+ */
+ btrfs_inode_unlock(BTRFS_I(inode), ilock_flags);
+
+ if (IS_ERR_OR_NULL(dio))
+ ret = PTR_ERR_OR_ZERO(dio);
+ else
+ ret = iomap_dio_complete(dio);
+
+ /* No increment (+=) because iomap returns a cumulative value. */
+ if (ret > 0)
+ written = ret;
+
+ if (iov_iter_count(from) > 0 && (ret == -EFAULT || ret > 0)) {
+ const size_t left = iov_iter_count(from);
+ /*
+ * We have more data left to write. Try to fault in as many as
+ * possible of the remainder pages and retry. We do this without
+ * releasing and locking again the inode, to prevent races with
+ * truncate.
+ *
+ * Also, in case the iov refers to pages in the file range of the
+ * file we want to write to (due to a mmap), we could enter an
+ * infinite loop if we retry after faulting the pages in, since
+ * iomap will invalidate any pages in the range early on, before
+ * it tries to fault in the pages of the iov. So we keep track of
+ * how much was left of iov in the previous EFAULT and fallback
+ * to buffered IO in case we haven't made any progress.
+ */
+ if (left == prev_left) {
+ ret = -ENOTBLK;
+ } else {
+ fault_in_iov_iter_readable(from, left);
+ prev_left = left;
+ goto relock;
+ }
+ }
+
+ /*
+ * If 'ret' is -ENOTBLK or we have not written all data, then it means
+ * we must fallback to buffered IO.
+ */
+ if ((ret < 0 && ret != -ENOTBLK) || !iov_iter_count(from))
+ goto out;
+
+buffered:
+ /*
+ * If we are in a NOWAIT context, then return -EAGAIN to signal the caller
+ * it must retry the operation in a context where blocking is acceptable,
+ * because even if we end up not blocking during the buffered IO attempt
+ * below, we will block when flushing and waiting for the IO.
+ */
+ if (iocb->ki_flags & IOCB_NOWAIT) {
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ pos = iocb->ki_pos;
+ written_buffered = btrfs_buffered_write(iocb, from);
+ if (written_buffered < 0) {
+ ret = written_buffered;
+ goto out;
+ }
+ /*
+ * Ensure all data is persisted. We want the next direct IO read to be
+ * able to read what was just written.
+ */
+ endbyte = pos + written_buffered - 1;
+ ret = btrfs_fdatawrite_range(BTRFS_I(inode), pos, endbyte);
+ if (ret)
+ goto out;
+ ret = filemap_fdatawait_range(inode->i_mapping, pos, endbyte);
+ if (ret)
+ goto out;
+ written += written_buffered;
+ iocb->ki_pos = pos + written_buffered;
+ invalidate_mapping_pages(file->f_mapping, pos >> PAGE_SHIFT,
+ endbyte >> PAGE_SHIFT);
+out:
+ return ret < 0 ? ret : written;
+}
+
+static int check_direct_read(struct btrfs_fs_info *fs_info,
+ const struct iov_iter *iter, loff_t offset)
+{
+ int ret;
+ int i, seg;
+
+ ret = check_direct_IO(fs_info, iter, offset);
+ if (ret < 0)
+ return ret;
+
+ if (!iter_is_iovec(iter))
+ return 0;
+
+ for (seg = 0; seg < iter->nr_segs; seg++) {
+ for (i = seg + 1; i < iter->nr_segs; i++) {
+ const struct iovec *iov1 = iter_iov(iter) + seg;
+ const struct iovec *iov2 = iter_iov(iter) + i;
+
+ if (iov1->iov_base == iov2->iov_base)
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+ssize_t btrfs_direct_read(struct kiocb *iocb, struct iov_iter *to)
+{
+ struct inode *inode = file_inode(iocb->ki_filp);
+ size_t prev_left = 0;
+ ssize_t read = 0;
+ ssize_t ret;
+
+ if (fsverity_active(inode))
+ return 0;
+
+ if (check_direct_read(inode_to_fs_info(inode), to, iocb->ki_pos))
+ return 0;
+
+ btrfs_inode_lock(BTRFS_I(inode), BTRFS_ILOCK_SHARED);
+again:
+ /*
+ * This is similar to what we do for direct IO writes, see the comment
+ * at btrfs_direct_write(), but we also disable page faults in addition
+ * to disabling them only at the iov_iter level. This is because when
+ * reading from a hole or prealloc extent, iomap calls iov_iter_zero(),
+ * which can still trigger page fault ins despite having set ->nofault
+ * to true of our 'to' iov_iter.
+ *
+ * The difference to direct IO writes is that we deadlock when trying
+ * to lock the extent range in the inode's tree during he page reads
+ * triggered by the fault in (while for writes it is due to waiting for
+ * our own ordered extent). This is because for direct IO reads,
+ * btrfs_dio_iomap_begin() returns with the extent range locked, which
+ * is only unlocked in the endio callback (end_bio_extent_readpage()).
+ */
+ pagefault_disable();
+ to->nofault = true;
+ ret = btrfs_dio_read(iocb, to, read);
+ to->nofault = false;
+ pagefault_enable();
+
+ /* No increment (+=) because iomap returns a cumulative value. */
+ if (ret > 0)
+ read = ret;
+
+ if (iov_iter_count(to) > 0 && (ret == -EFAULT || ret > 0)) {
+ const size_t left = iov_iter_count(to);
+
+ if (left == prev_left) {
+ /*
+ * We didn't make any progress since the last attempt,
+ * fallback to a buffered read for the remainder of the
+ * range. This is just to avoid any possibility of looping
+ * for too long.
+ */
+ ret = read;
+ } else {
+ /*
+ * We made some progress since the last retry or this is
+ * the first time we are retrying. Fault in as many pages
+ * as possible and retry.
+ */
+ fault_in_iov_iter_writeable(to, left);
+ prev_left = left;
+ goto again;
+ }
+ }
+ btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_SHARED);
+ return ret < 0 ? ret : read;
+}
+
+int __init btrfs_init_dio(void)
+{
+ if (bioset_init(&btrfs_dio_bioset, BIO_POOL_SIZE,
+ offsetof(struct btrfs_dio_private, bbio.bio),
+ BIOSET_NEED_BVECS))
+ return -ENOMEM;
+
+ return 0;
+}
+
+void __cold btrfs_destroy_dio(void)
+{
+ bioset_exit(&btrfs_dio_bioset);
+}
diff --git a/fs/btrfs/direct-io.h b/fs/btrfs/direct-io.h
new file mode 100644
index 000000000000..3dc3ea926afe
--- /dev/null
+++ b/fs/btrfs/direct-io.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef BTRFS_DIRECT_IO_H
+#define BTRFS_DIRECT_IO_H
+
+#include <linux/types.h>
+
+int __init btrfs_init_dio(void);
+void __cold btrfs_destroy_dio(void);
+
+ssize_t btrfs_direct_write(struct kiocb *iocb, struct iov_iter *from);
+ssize_t btrfs_direct_read(struct kiocb *iocb, struct iov_iter *to);
+
+#endif /* BTRFS_DIRECT_IO_H */
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 1b20b3e390df..a6f5441e62d1 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -213,7 +213,7 @@ static int btrfs_repair_eb_io_failure(const struct extent_buffer *eb,
* structure for details.
*/
int btrfs_read_extent_buffer(struct extent_buffer *eb,
- struct btrfs_tree_parent_check *check)
+ const struct btrfs_tree_parent_check *check)
{
struct btrfs_fs_info *fs_info = eb->fs_info;
int failed = 0;
@@ -358,7 +358,7 @@ static bool check_tree_block_fsid(struct extent_buffer *eb)
/* Do basic extent buffer checks at read time */
int btrfs_validate_extent_buffer(struct extent_buffer *eb,
- struct btrfs_tree_parent_check *check)
+ const struct btrfs_tree_parent_check *check)
{
struct btrfs_fs_info *fs_info = eb->fs_info;
u64 found_start;
@@ -367,6 +367,7 @@ int btrfs_validate_extent_buffer(struct extent_buffer *eb,
u8 result[BTRFS_CSUM_SIZE];
const u8 *header_csum;
int ret = 0;
+ const bool ignore_csum = btrfs_test_opt(fs_info, IGNOREMETACSUMS);
ASSERT(check);
@@ -399,13 +400,16 @@ int btrfs_validate_extent_buffer(struct extent_buffer *eb,
if (memcmp(result, header_csum, csum_size) != 0) {
btrfs_warn_rl(fs_info,
-"checksum verify failed on logical %llu mirror %u wanted " CSUM_FMT " found " CSUM_FMT " level %d",
+"checksum verify failed on logical %llu mirror %u wanted " CSUM_FMT " found " CSUM_FMT " level %d%s",
eb->start, eb->read_mirror,
CSUM_FMT_VALUE(csum_size, header_csum),
CSUM_FMT_VALUE(csum_size, result),
- btrfs_header_level(eb));
- ret = -EUCLEAN;
- goto out;
+ btrfs_header_level(eb),
+ ignore_csum ? ", ignored" : "");
+ if (!ignore_csum) {
+ ret = -EUCLEAN;
+ goto out;
+ }
}
if (found_level != check->level) {
@@ -425,7 +429,7 @@ int btrfs_validate_extent_buffer(struct extent_buffer *eb,
goto out;
}
if (check->has_first_key) {
- struct btrfs_key *expect_key = &check->first_key;
+ const struct btrfs_key *expect_key = &check->first_key;
struct btrfs_key found_key;
if (found_level)
@@ -635,10 +639,6 @@ struct extent_buffer *read_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr,
free_extent_buffer_stale(buf);
return ERR_PTR(ret);
}
- if (btrfs_check_eb_owner(buf, check->owner_root)) {
- free_extent_buffer_stale(buf);
- return ERR_PTR(-EUCLEAN);
- }
return buf;
}
@@ -658,11 +658,11 @@ static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info,
root->state = 0;
RB_CLEAR_NODE(&root->rb_node);
- root->last_trans = 0;
+ btrfs_set_root_last_trans(root, 0);
root->free_objectid = 0;
root->nr_delalloc_inodes = 0;
root->nr_ordered_extents = 0;
- root->inode_tree = RB_ROOT;
+ xa_init(&root->inodes);
xa_init(&root->delayed_nodes);
btrfs_init_root_block_rsv(root);
@@ -674,7 +674,6 @@ static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info,
INIT_LIST_HEAD(&root->ordered_extents);
INIT_LIST_HEAD(&root->ordered_root);
INIT_LIST_HEAD(&root->reloc_dirty_list);
- spin_lock_init(&root->inode_lock);
spin_lock_init(&root->delalloc_lock);
spin_lock_init(&root->ordered_extent_lock);
spin_lock_init(&root->accounting_lock);
@@ -847,13 +846,6 @@ struct btrfs_root *btrfs_extent_root(struct btrfs_fs_info *fs_info, u64 bytenr)
return btrfs_global_root(fs_info, &key);
}
-struct btrfs_root *btrfs_block_group_root(struct btrfs_fs_info *fs_info)
-{
- if (btrfs_fs_compat_ro(fs_info, BLOCK_GROUP_TREE))
- return fs_info->block_group_root;
- return btrfs_extent_root(fs_info, 0);
-}
-
struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
u64 objectid)
{
@@ -1010,7 +1002,7 @@ int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
return ret;
}
- log_root->last_trans = trans->transid;
+ btrfs_set_root_last_trans(log_root, trans->transid);
log_root->root_key.offset = btrfs_root_id(root);
inode_item = &log_root->root_item.inode;
@@ -1033,7 +1025,7 @@ int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
static struct btrfs_root *read_tree_root_path(struct btrfs_root *tree_root,
struct btrfs_path *path,
- struct btrfs_key *key)
+ const struct btrfs_key *key)
{
struct btrfs_root *root;
struct btrfs_tree_parent_check check = { 0 };
@@ -1095,7 +1087,7 @@ fail:
}
struct btrfs_root *btrfs_read_tree_root(struct btrfs_root *tree_root,
- struct btrfs_key *key)
+ const struct btrfs_key *key)
{
struct btrfs_root *root;
struct btrfs_path *path;
@@ -1230,7 +1222,7 @@ int btrfs_insert_fs_root(struct btrfs_fs_info *fs_info,
return ret;
}
-void btrfs_check_leaked_roots(struct btrfs_fs_info *fs_info)
+void btrfs_check_leaked_roots(const struct btrfs_fs_info *fs_info)
{
#ifdef CONFIG_BTRFS_DEBUG
struct btrfs_root *root;
@@ -1854,7 +1846,8 @@ void btrfs_put_root(struct btrfs_root *root)
return;
if (refcount_dec_and_test(&root->refs)) {
- WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree));
+ if (WARN_ON(!xa_empty(&root->inodes)))
+ xa_destroy(&root->inodes);
WARN_ON(test_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state));
if (root->anon_dev)
free_anon_bdev(root->anon_dev);
@@ -1928,7 +1921,7 @@ static int btrfs_init_btree_inode(struct super_block *sb)
if (!inode)
return -ENOMEM;
- inode->i_ino = BTRFS_BTREE_INODE_OBJECTID;
+ btrfs_set_inode_number(BTRFS_I(inode), BTRFS_BTREE_INODE_OBJECTID);
set_nlink(inode, 1);
/*
* we set the i_size on the btree inode to the max possible int.
@@ -1939,15 +1932,11 @@ static int btrfs_init_btree_inode(struct super_block *sb)
inode->i_mapping->a_ops = &btree_aops;
mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
- RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
extent_io_tree_init(fs_info, &BTRFS_I(inode)->io_tree,
IO_TREE_BTREE_INODE_IO);
extent_map_tree_init(&BTRFS_I(inode)->extent_tree);
BTRFS_I(inode)->root = btrfs_grab_root(fs_info->tree_root);
- BTRFS_I(inode)->location.objectid = BTRFS_BTREE_INODE_OBJECTID;
- BTRFS_I(inode)->location.type = 0;
- BTRFS_I(inode)->location.offset = 0;
set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags);
__insert_inode_hash(inode, hash);
fs_info->btree_inode = inode;
@@ -2146,7 +2135,7 @@ static int load_global_roots_objectid(struct btrfs_root *tree_root,
/* If we have IGNOREDATACSUMS skip loading these roots. */
if (objectid == BTRFS_CSUM_TREE_OBJECTID &&
btrfs_test_opt(fs_info, IGNOREDATACSUMS)) {
- set_bit(BTRFS_FS_STATE_NO_CSUMS, &fs_info->fs_state);
+ set_bit(BTRFS_FS_STATE_NO_DATA_CSUMS, &fs_info->fs_state);
return 0;
}
@@ -2199,7 +2188,7 @@ static int load_global_roots_objectid(struct btrfs_root *tree_root,
if (!found || ret) {
if (objectid == BTRFS_CSUM_TREE_OBJECTID)
- set_bit(BTRFS_FS_STATE_NO_CSUMS, &fs_info->fs_state);
+ set_bit(BTRFS_FS_STATE_NO_DATA_CSUMS, &fs_info->fs_state);
if (!btrfs_test_opt(fs_info, IGNOREBADROOTS))
ret = ret ? ret : -ENOENT;
@@ -2350,21 +2339,29 @@ out:
* 1, 2 2nd and 3rd backup copy
* -1 skip bytenr check
*/
-int btrfs_validate_super(struct btrfs_fs_info *fs_info,
- struct btrfs_super_block *sb, int mirror_num)
+int btrfs_validate_super(const struct btrfs_fs_info *fs_info,
+ const struct btrfs_super_block *sb, int mirror_num)
{
u64 nodesize = btrfs_super_nodesize(sb);
u64 sectorsize = btrfs_super_sectorsize(sb);
int ret = 0;
+ const bool ignore_flags = btrfs_test_opt(fs_info, IGNORESUPERFLAGS);
if (btrfs_super_magic(sb) != BTRFS_MAGIC) {
btrfs_err(fs_info, "no valid FS found");
ret = -EINVAL;
}
- if (btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP) {
- btrfs_err(fs_info, "unrecognized or unsupported super flag: %llu",
- btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP);
- ret = -EINVAL;
+ if ((btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP)) {
+ if (!ignore_flags) {
+ btrfs_err(fs_info,
+ "unrecognized or unsupported super flag 0x%llx",
+ btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP);
+ ret = -EINVAL;
+ } else {
+ btrfs_info(fs_info,
+ "unrecognized or unsupported super flags: 0x%llx, ignored",
+ btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP);
+ }
}
if (btrfs_super_root_level(sb) >= BTRFS_MAX_LEVEL) {
btrfs_err(fs_info, "tree_root level too big: %d >= %d",
@@ -2467,7 +2464,7 @@ int btrfs_validate_super(struct btrfs_fs_info *fs_info,
(!btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID) ||
!btrfs_fs_incompat(fs_info, NO_HOLES))) {
btrfs_err(fs_info,
- "block-group-tree feature requires fres-space-tree and no-holes");
+ "block-group-tree feature requires free-space-tree and no-holes");
ret = -EINVAL;
}
@@ -2856,6 +2853,8 @@ static int init_mount_fs_info(struct btrfs_fs_info *fs_info, struct super_block
if (ret)
return ret;
+ spin_lock_init(&fs_info->extent_map_shrinker_lock);
+
ret = percpu_counter_init(&fs_info->dirty_metadata_bytes, 0, GFP_KERNEL);
if (ret)
return ret;
@@ -2880,6 +2879,8 @@ static int init_mount_fs_info(struct btrfs_fs_info *fs_info, struct super_block
if (sb_rdonly(sb))
set_bit(BTRFS_FS_STATE_RO, &fs_info->fs_state);
+ if (btrfs_test_opt(fs_info, IGNOREMETACSUMS))
+ set_bit(BTRFS_FS_STATE_SKIP_META_CSUMS, &fs_info->fs_state);
return btrfs_alloc_stripe_hash_table(fs_info);
}
@@ -2925,22 +2926,22 @@ static int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info)
{
u64 root_objectid = 0;
struct btrfs_root *gang[8];
- int i = 0;
- int err = 0;
- unsigned int ret = 0;
+ int ret = 0;
while (1) {
+ unsigned int found;
+
spin_lock(&fs_info->fs_roots_radix_lock);
- ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
+ found = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
(void **)gang, root_objectid,
ARRAY_SIZE(gang));
- if (!ret) {
+ if (!found) {
spin_unlock(&fs_info->fs_roots_radix_lock);
break;
}
- root_objectid = btrfs_root_id(gang[ret - 1]) + 1;
+ root_objectid = btrfs_root_id(gang[found - 1]) + 1;
- for (i = 0; i < ret; i++) {
+ for (int i = 0; i < found; i++) {
/* Avoid to grab roots in dead_roots. */
if (btrfs_root_refs(&gang[i]->root_item) == 0) {
gang[i] = NULL;
@@ -2951,24 +2952,25 @@ static int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info)
}
spin_unlock(&fs_info->fs_roots_radix_lock);
- for (i = 0; i < ret; i++) {
+ for (int i = 0; i < found; i++) {
if (!gang[i])
continue;
root_objectid = btrfs_root_id(gang[i]);
- err = btrfs_orphan_cleanup(gang[i]);
- if (err)
- goto out;
+ /*
+ * Continue to release the remaining roots after the first
+ * error without cleanup and preserve the first error
+ * for the return.
+ */
+ if (!ret)
+ ret = btrfs_orphan_cleanup(gang[i]);
btrfs_put_root(gang[i]);
}
+ if (ret)
+ break;
+
root_objectid++;
}
-out:
- /* Release the uncleaned roots due to error. */
- for (; i < ret; i++) {
- if (gang[i])
- btrfs_put_root(gang[i]);
- }
- return err;
+ return ret;
}
/*
@@ -3202,7 +3204,7 @@ int btrfs_check_features(struct btrfs_fs_info *fs_info, bool is_rw_mount)
}
int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_devices,
- char *options)
+ const char *options)
{
u32 sectorsize;
u32 nodesize;
@@ -4155,9 +4157,6 @@ void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info,
int btrfs_commit_super(struct btrfs_fs_info *fs_info)
{
- struct btrfs_root *root = fs_info->tree_root;
- struct btrfs_trans_handle *trans;
-
mutex_lock(&fs_info->cleaner_mutex);
btrfs_run_delayed_iputs(fs_info);
mutex_unlock(&fs_info->cleaner_mutex);
@@ -4167,10 +4166,7 @@ int btrfs_commit_super(struct btrfs_fs_info *fs_info)
down_write(&fs_info->cleanup_work_sem);
up_write(&fs_info->cleanup_work_sem);
- trans = btrfs_join_transaction(root);
- if (IS_ERR(trans))
- return PTR_ERR(trans);
- return btrfs_commit_transaction(trans);
+ return btrfs_commit_current_transaction(fs_info->tree_root);
}
static void warn_about_uncommitted_trans(struct btrfs_fs_info *fs_info)
@@ -4531,25 +4527,17 @@ static void btrfs_destroy_all_ordered_extents(struct btrfs_fs_info *fs_info)
* extents that haven't had their dirty pages IO start writeout yet
* actually get run and error out properly.
*/
- btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
+ btrfs_wait_ordered_roots(fs_info, U64_MAX, NULL);
}
static void btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
struct btrfs_fs_info *fs_info)
{
struct rb_node *node;
- struct btrfs_delayed_ref_root *delayed_refs;
+ struct btrfs_delayed_ref_root *delayed_refs = &trans->delayed_refs;
struct btrfs_delayed_ref_node *ref;
- delayed_refs = &trans->delayed_refs;
-
spin_lock(&delayed_refs->lock);
- if (atomic_read(&delayed_refs->num_entries) == 0) {
- spin_unlock(&delayed_refs->lock);
- btrfs_debug(fs_info, "delayed_refs has NO entry");
- return;
- }
-
while ((node = rb_first_cached(&delayed_refs->href_root)) != NULL) {
struct btrfs_delayed_ref_head *head;
struct rb_node *n;
diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h
index 76eb53fe7a11..99af64d3f277 100644
--- a/fs/btrfs/disk-io.h
+++ b/fs/btrfs/disk-io.h
@@ -41,7 +41,7 @@ static inline u64 btrfs_sb_offset(int mirror)
return BTRFS_SUPER_INFO_OFFSET;
}
-void btrfs_check_leaked_roots(struct btrfs_fs_info *fs_info);
+void btrfs_check_leaked_roots(const struct btrfs_fs_info *fs_info);
void btrfs_init_fs_info(struct btrfs_fs_info *fs_info);
struct extent_buffer *read_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr,
struct btrfs_tree_parent_check *check);
@@ -52,12 +52,11 @@ struct extent_buffer *btrfs_find_create_tree_block(
int btrfs_start_pre_rw_mount(struct btrfs_fs_info *fs_info);
int btrfs_check_super_csum(struct btrfs_fs_info *fs_info,
const struct btrfs_super_block *disk_sb);
-int __cold open_ctree(struct super_block *sb,
- struct btrfs_fs_devices *fs_devices,
- char *options);
+int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_devices,
+ const char *options);
void __cold close_ctree(struct btrfs_fs_info *fs_info);
-int btrfs_validate_super(struct btrfs_fs_info *fs_info,
- struct btrfs_super_block *sb, int mirror_num);
+int btrfs_validate_super(const struct btrfs_fs_info *fs_info,
+ const struct btrfs_super_block *sb, int mirror_num);
int btrfs_check_features(struct btrfs_fs_info *fs_info, bool is_rw_mount);
int write_all_supers(struct btrfs_fs_info *fs_info, int max_mirrors);
struct btrfs_super_block *btrfs_read_dev_super(struct block_device *bdev);
@@ -65,7 +64,7 @@ struct btrfs_super_block *btrfs_read_dev_one_super(struct block_device *bdev,
int copy_num, bool drop_cache);
int btrfs_commit_super(struct btrfs_fs_info *fs_info);
struct btrfs_root *btrfs_read_tree_root(struct btrfs_root *tree_root,
- struct btrfs_key *key);
+ const struct btrfs_key *key);
int btrfs_insert_fs_root(struct btrfs_fs_info *fs_info,
struct btrfs_root *root);
void btrfs_free_fs_roots(struct btrfs_fs_info *fs_info);
@@ -83,7 +82,6 @@ struct btrfs_root *btrfs_global_root(struct btrfs_fs_info *fs_info,
struct btrfs_key *key);
struct btrfs_root *btrfs_csum_root(struct btrfs_fs_info *fs_info, u64 bytenr);
struct btrfs_root *btrfs_extent_root(struct btrfs_fs_info *fs_info, u64 bytenr);
-struct btrfs_root *btrfs_block_group_root(struct btrfs_fs_info *fs_info);
void btrfs_free_fs_info(struct btrfs_fs_info *fs_info);
void btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info);
@@ -91,7 +89,7 @@ void btrfs_btree_balance_dirty_nodelay(struct btrfs_fs_info *fs_info);
void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info,
struct btrfs_root *root);
int btrfs_validate_extent_buffer(struct extent_buffer *eb,
- struct btrfs_tree_parent_check *check);
+ const struct btrfs_tree_parent_check *check);
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
struct btrfs_root *btrfs_alloc_dummy_root(struct btrfs_fs_info *fs_info);
#endif
@@ -118,7 +116,7 @@ void btrfs_mark_buffer_dirty(struct btrfs_trans_handle *trans,
int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid,
int atomic);
int btrfs_read_extent_buffer(struct extent_buffer *buf,
- struct btrfs_tree_parent_check *check);
+ const struct btrfs_tree_parent_check *check);
blk_status_t btree_csum_one_bio(struct btrfs_bio *bbio);
int btrfs_alloc_log_tree_node(struct btrfs_trans_handle *trans,
diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c
index 9e81f89e76d8..e2b22bea348a 100644
--- a/fs/btrfs/export.c
+++ b/fs/btrfs/export.c
@@ -40,7 +40,7 @@ static int btrfs_encode_fh(struct inode *inode, u32 *fh, int *max_len,
if (parent) {
u64 parent_root_id;
- fid->parent_objectid = BTRFS_I(parent)->location.objectid;
+ fid->parent_objectid = btrfs_ino(BTRFS_I(parent));
fid->parent_gen = parent->i_generation;
parent_root_id = btrfs_root_id(BTRFS_I(parent)->root);
@@ -84,7 +84,7 @@ struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid,
if (IS_ERR(root))
return ERR_CAST(root);
- inode = btrfs_iget(sb, objectid, root);
+ inode = btrfs_iget(objectid, root);
btrfs_put_root(root);
if (IS_ERR(inode))
return ERR_CAST(inode);
@@ -210,7 +210,7 @@ struct dentry *btrfs_get_parent(struct dentry *child)
found_key.offset, 0);
}
- return d_obtain_alias(btrfs_iget(fs_info->sb, key.objectid, root));
+ return d_obtain_alias(btrfs_iget(key.objectid, root));
fail:
btrfs_free_path(path);
return ERR_PTR(ret);
diff --git a/fs/btrfs/extent-io-tree.c b/fs/btrfs/extent-io-tree.c
index ed2cfc3d5d8a..c54c5d7a5cd5 100644
--- a/fs/btrfs/extent-io-tree.c
+++ b/fs/btrfs/extent-io-tree.c
@@ -4,6 +4,7 @@
#include <trace/events/btrfs.h>
#include "messages.h"
#include "ctree.h"
+#include "extent_io.h"
#include "extent-io-tree.h"
#include "btrfs_inode.h"
@@ -1084,6 +1085,9 @@ again:
*/
prealloc = alloc_extent_state(mask);
}
+ /* Optimistically preallocate the extent changeset ulist node. */
+ if (changeset)
+ extent_changeset_prealloc(changeset, mask);
spin_lock(&tree->lock);
if (cached_state && *cached_state) {
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 3774c191e36d..d77498e7671c 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -104,10 +104,7 @@ int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
struct btrfs_delayed_ref_head *head;
struct btrfs_delayed_ref_root *delayed_refs;
struct btrfs_path *path;
- struct btrfs_extent_item *ei;
- struct extent_buffer *leaf;
struct btrfs_key key;
- u32 item_size;
u64 num_refs;
u64 extent_flags;
u64 owner = 0;
@@ -126,11 +123,6 @@ int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
if (!path)
return -ENOMEM;
- if (!trans) {
- path->skip_locking = 1;
- path->search_commit_root = 1;
- }
-
search_again:
key.objectid = bytenr;
key.offset = offset;
@@ -144,7 +136,7 @@ search_again:
if (ret < 0)
goto out_free;
- if (ret > 0 && metadata && key.type == BTRFS_METADATA_ITEM_KEY) {
+ if (ret > 0 && key.type == BTRFS_METADATA_ITEM_KEY) {
if (path->slots[0]) {
path->slots[0]--;
btrfs_item_key_to_cpu(path->nodes[0], &key,
@@ -157,38 +149,37 @@ search_again:
}
if (ret == 0) {
- leaf = path->nodes[0];
- item_size = btrfs_item_size(leaf, path->slots[0]);
- if (item_size >= sizeof(*ei)) {
- ei = btrfs_item_ptr(leaf, path->slots[0],
- struct btrfs_extent_item);
- num_refs = btrfs_extent_refs(leaf, ei);
- extent_flags = btrfs_extent_flags(leaf, ei);
- owner = btrfs_get_extent_owner_root(fs_info, leaf,
- path->slots[0]);
- } else {
+ struct extent_buffer *leaf = path->nodes[0];
+ struct btrfs_extent_item *ei;
+ const u32 item_size = btrfs_item_size(leaf, path->slots[0]);
+
+ if (unlikely(item_size < sizeof(*ei))) {
ret = -EUCLEAN;
btrfs_err(fs_info,
"unexpected extent item size, has %u expect >= %zu",
item_size, sizeof(*ei));
- if (trans)
- btrfs_abort_transaction(trans, ret);
- else
- btrfs_handle_fs_error(fs_info, ret, NULL);
-
+ btrfs_abort_transaction(trans, ret);
goto out_free;
}
- BUG_ON(num_refs == 0);
+ ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
+ num_refs = btrfs_extent_refs(leaf, ei);
+ if (unlikely(num_refs == 0)) {
+ ret = -EUCLEAN;
+ btrfs_err(fs_info,
+ "unexpected zero reference count for extent item (%llu %u %llu)",
+ key.objectid, key.type, key.offset);
+ btrfs_abort_transaction(trans, ret);
+ goto out_free;
+ }
+ extent_flags = btrfs_extent_flags(leaf, ei);
+ owner = btrfs_get_extent_owner_root(fs_info, leaf, path->slots[0]);
} else {
num_refs = 0;
extent_flags = 0;
ret = 0;
}
- if (!trans)
- goto out;
-
delayed_refs = &trans->transaction->delayed_refs;
spin_lock(&delayed_refs->lock);
head = btrfs_find_delayed_ref_head(delayed_refs, bytenr);
@@ -211,15 +202,13 @@ search_again:
spin_lock(&head->lock);
if (head->extent_op && head->extent_op->update_flags)
extent_flags |= head->extent_op->flags_to_set;
- else
- BUG_ON(num_refs == 0);
num_refs += head->ref_mod;
spin_unlock(&head->lock);
mutex_unlock(&head->mutex);
}
spin_unlock(&delayed_refs->lock);
-out:
+
WARN_ON(num_refs == 0);
if (refs)
*refs = num_refs;
@@ -1632,7 +1621,7 @@ static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
if (metadata) {
key.type = BTRFS_METADATA_ITEM_KEY;
- key.offset = extent_op->level;
+ key.offset = head->level;
} else {
key.type = BTRFS_EXTENT_ITEM_KEY;
key.offset = head->num_bytes;
@@ -1667,7 +1656,7 @@ again:
ret = -EUCLEAN;
btrfs_err(fs_info,
"missing extent item for extent %llu num_bytes %llu level %d",
- head->bytenr, head->num_bytes, extent_op->level);
+ head->bytenr, head->num_bytes, head->level);
goto out;
}
}
@@ -1726,7 +1715,6 @@ static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
.generation = trans->transid,
};
- BUG_ON(!extent_op || !extent_op->update_flags);
ret = alloc_reserved_tree_block(trans, node, extent_op);
if (!ret)
btrfs_record_squota_delta(fs_info, &delta);
@@ -2233,7 +2221,6 @@ int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
struct extent_buffer *eb, u64 flags)
{
struct btrfs_delayed_extent_op *extent_op;
- int level = btrfs_header_level(eb);
int ret;
extent_op = btrfs_alloc_delayed_extent_op();
@@ -2243,9 +2230,9 @@ int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
extent_op->flags_to_set = flags;
extent_op->update_flags = true;
extent_op->update_key = false;
- extent_op->level = level;
- ret = btrfs_add_delayed_extent_op(trans, eb->start, eb->len, extent_op);
+ ret = btrfs_add_delayed_extent_op(trans, eb->start, eb->len,
+ btrfs_header_level(eb), extent_op);
if (ret)
btrfs_free_delayed_extent_op(extent_op);
return ret;
@@ -3419,10 +3406,10 @@ out_delayed_unlock:
return 0;
}
-void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
- u64 root_id,
- struct extent_buffer *buf,
- u64 parent, int last_ref)
+int btrfs_free_tree_block(struct btrfs_trans_handle *trans,
+ u64 root_id,
+ struct extent_buffer *buf,
+ u64 parent, int last_ref)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_block_group *bg;
@@ -3449,11 +3436,12 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
btrfs_init_tree_ref(&generic_ref, btrfs_header_level(buf), 0, false);
btrfs_ref_tree_mod(fs_info, &generic_ref);
ret = btrfs_add_delayed_tree_ref(trans, &generic_ref, NULL);
- BUG_ON(ret); /* -ENOMEM */
+ if (ret < 0)
+ return ret;
}
if (!last_ref)
- return;
+ return 0;
if (btrfs_header_generation(buf) != trans->transid)
goto out;
@@ -3510,6 +3498,7 @@ out:
* matter anymore.
*/
clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
+ return 0;
}
/* Can return -ENOMEM */
@@ -4864,7 +4853,7 @@ static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
struct btrfs_path *path;
struct extent_buffer *leaf;
u32 size = sizeof(*extent_item) + sizeof(*iref);
- u64 flags = extent_op->flags_to_set;
+ const u64 flags = (extent_op ? extent_op->flags_to_set : 0);
/* The owner of a tree block is the level. */
int level = btrfs_delayed_ref_owner(node);
bool skinny_metadata = btrfs_fs_incompat(fs_info, SKINNY_METADATA);
@@ -5121,7 +5110,6 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
struct btrfs_key ins;
struct btrfs_block_rsv *block_rsv;
struct extent_buffer *buf;
- struct btrfs_delayed_extent_op *extent_op;
u64 flags = 0;
int ret;
u32 blocksize = fs_info->nodesize;
@@ -5164,6 +5152,7 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
BUG_ON(parent > 0);
if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
+ struct btrfs_delayed_extent_op *extent_op;
struct btrfs_ref generic_ref = {
.action = BTRFS_ADD_DELAYED_EXTENT,
.bytenr = ins.objectid,
@@ -5172,30 +5161,34 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
.owning_root = owning_root,
.ref_root = root_objectid,
};
- extent_op = btrfs_alloc_delayed_extent_op();
- if (!extent_op) {
- ret = -ENOMEM;
- goto out_free_buf;
+
+ if (!skinny_metadata || flags != 0) {
+ extent_op = btrfs_alloc_delayed_extent_op();
+ if (!extent_op) {
+ ret = -ENOMEM;
+ goto out_free_buf;
+ }
+ if (key)
+ memcpy(&extent_op->key, key, sizeof(extent_op->key));
+ else
+ memset(&extent_op->key, 0, sizeof(extent_op->key));
+ extent_op->flags_to_set = flags;
+ extent_op->update_key = (skinny_metadata ? false : true);
+ extent_op->update_flags = (flags != 0);
+ } else {
+ extent_op = NULL;
}
- if (key)
- memcpy(&extent_op->key, key, sizeof(extent_op->key));
- else
- memset(&extent_op->key, 0, sizeof(extent_op->key));
- extent_op->flags_to_set = flags;
- extent_op->update_key = skinny_metadata ? false : true;
- extent_op->update_flags = true;
- extent_op->level = level;
btrfs_init_tree_ref(&generic_ref, level, btrfs_root_id(root), false);
btrfs_ref_tree_mod(fs_info, &generic_ref);
ret = btrfs_add_delayed_tree_ref(trans, &generic_ref, extent_op);
- if (ret)
- goto out_free_delayed;
+ if (ret) {
+ btrfs_free_delayed_extent_op(extent_op);
+ goto out_free_buf;
+ }
}
return buf;
-out_free_delayed:
- btrfs_free_delayed_extent_op(extent_op);
out_free_buf:
btrfs_tree_unlock(buf);
free_extent_buffer(buf);
@@ -5220,11 +5213,99 @@ struct walk_control {
int reada_slot;
int reada_count;
int restarted;
+ /* Indicate that extent info needs to be looked up when walking the tree. */
+ int lookup_info;
};
+/*
+ * This is our normal stage. We are traversing blocks the current snapshot owns
+ * and we are dropping any of our references to any children we are able to, and
+ * then freeing the block once we've processed all of the children.
+ */
#define DROP_REFERENCE 1
+
+/*
+ * We enter this stage when we have to walk into a child block (meaning we can't
+ * simply drop our reference to it from our current parent node) and there are
+ * more than one reference on it. If we are the owner of any of the children
+ * blocks from the current parent node then we have to do the FULL_BACKREF dance
+ * on them in order to drop our normal ref and add the shared ref.
+ */
#define UPDATE_BACKREF 2
+/*
+ * Decide if we need to walk down into this node to adjust the references.
+ *
+ * @root: the root we are currently deleting
+ * @wc: the walk control for this deletion
+ * @eb: the parent eb that we're currently visiting
+ * @refs: the number of refs for wc->level - 1
+ * @flags: the flags for wc->level - 1
+ * @slot: the slot in the eb that we're currently checking
+ *
+ * This is meant to be called when we're evaluating if a node we point to at
+ * wc->level should be read and walked into, or if we can simply delete our
+ * reference to it. We return true if we should walk into the node, false if we
+ * can skip it.
+ *
+ * We have assertions in here to make sure this is called correctly. We assume
+ * that sanity checking on the blocks read to this point has been done, so any
+ * corrupted file systems must have been caught before calling this function.
+ */
+static bool visit_node_for_delete(struct btrfs_root *root, struct walk_control *wc,
+ struct extent_buffer *eb, u64 refs, u64 flags, int slot)
+{
+ struct btrfs_key key;
+ u64 generation;
+ int level = wc->level;
+
+ ASSERT(level > 0);
+ ASSERT(wc->refs[level - 1] > 0);
+
+ /*
+ * The update backref stage we only want to skip if we already have
+ * FULL_BACKREF set, otherwise we need to read.
+ */
+ if (wc->stage == UPDATE_BACKREF) {
+ if (level == 1 && flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)
+ return false;
+ return true;
+ }
+
+ /*
+ * We're the last ref on this block, we must walk into it and process
+ * any refs it's pointing at.
+ */
+ if (wc->refs[level - 1] == 1)
+ return true;
+
+ /*
+ * If we're already FULL_BACKREF then we know we can just drop our
+ * current reference.
+ */
+ if (level == 1 && flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)
+ return false;
+
+ /*
+ * This block is older than our creation generation, we can drop our
+ * reference to it.
+ */
+ generation = btrfs_node_ptr_generation(eb, slot);
+ if (!wc->update_ref || generation <= root->root_key.offset)
+ return false;
+
+ /*
+ * This block was processed from a previous snapshot deletion run, we
+ * can skip it.
+ */
+ btrfs_node_key_to_cpu(eb, &key, slot);
+ if (btrfs_comp_cpu_keys(&key, &wc->update_progress) < 0)
+ return false;
+
+ /* All other cases we need to wander into the node. */
+ return true;
+}
+
static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct walk_control *wc,
@@ -5236,7 +5317,6 @@ static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
u64 refs;
u64 flags;
u32 nritems;
- struct btrfs_key key;
struct extent_buffer *eb;
int ret;
int slot;
@@ -5276,28 +5356,19 @@ static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
/* We don't care about errors in readahead. */
if (ret < 0)
continue;
- BUG_ON(refs == 0);
- if (wc->stage == DROP_REFERENCE) {
- if (refs == 1)
- goto reada;
+ /*
+ * This could be racey, it's conceivable that we raced and end
+ * up with a bogus refs count, if that's the case just skip, if
+ * we are actually corrupt we will notice when we look up
+ * everything again with our locks.
+ */
+ if (refs == 0)
+ continue;
- if (wc->level == 1 &&
- (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
- continue;
- if (!wc->update_ref ||
- generation <= root->root_key.offset)
- continue;
- btrfs_node_key_to_cpu(eb, &key, slot);
- ret = btrfs_comp_cpu_keys(&key,
- &wc->update_progress);
- if (ret < 0)
- continue;
- } else {
- if (wc->level == 1 &&
- (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
- continue;
- }
+ /* If we don't need to visit this node don't reada. */
+ if (!visit_node_for_delete(root, wc, eb, refs, flags, slot))
+ continue;
reada:
btrfs_readahead_node_child(eb, slot);
nread++;
@@ -5316,7 +5387,7 @@ reada:
static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path,
- struct walk_control *wc, int lookup_info)
+ struct walk_control *wc)
{
struct btrfs_fs_info *fs_info = root->fs_info;
int level = wc->level;
@@ -5331,19 +5402,22 @@ static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
* when reference count of tree block is 1, it won't increase
* again. once full backref flag is set, we never clear it.
*/
- if (lookup_info &&
+ if (wc->lookup_info &&
((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
(wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
- BUG_ON(!path->locks[level]);
+ ASSERT(path->locks[level]);
ret = btrfs_lookup_extent_info(trans, fs_info,
eb->start, level, 1,
&wc->refs[level],
&wc->flags[level],
NULL);
- BUG_ON(ret == -ENOMEM);
if (ret)
return ret;
- BUG_ON(wc->refs[level] == 0);
+ if (unlikely(wc->refs[level] == 0)) {
+ btrfs_err(fs_info, "bytenr %llu has 0 references, expect > 0",
+ eb->start);
+ return -EUCLEAN;
+ }
}
if (wc->stage == DROP_REFERENCE) {
@@ -5359,13 +5433,22 @@ static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
/* wc->stage == UPDATE_BACKREF */
if (!(wc->flags[level] & flag)) {
- BUG_ON(!path->locks[level]);
+ ASSERT(path->locks[level]);
ret = btrfs_inc_ref(trans, root, eb, 1);
- BUG_ON(ret); /* -ENOMEM */
+ if (ret) {
+ btrfs_abort_transaction(trans, ret);
+ return ret;
+ }
ret = btrfs_dec_ref(trans, root, eb, 0);
- BUG_ON(ret); /* -ENOMEM */
+ if (ret) {
+ btrfs_abort_transaction(trans, ret);
+ return ret;
+ }
ret = btrfs_set_disk_extent_flags(trans, eb, flag);
- BUG_ON(ret); /* -ENOMEM */
+ if (ret) {
+ btrfs_abort_transaction(trans, ret);
+ return ret;
+ }
wc->flags[level] |= flag;
}
@@ -5408,6 +5491,132 @@ static int check_ref_exists(struct btrfs_trans_handle *trans,
}
/*
+ * We may not have an uptodate block, so if we are going to walk down into this
+ * block we need to drop the lock, read it off of the disk, re-lock it and
+ * return to continue dropping the snapshot.
+ */
+static int check_next_block_uptodate(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct btrfs_path *path,
+ struct walk_control *wc,
+ struct extent_buffer *next)
+{
+ struct btrfs_tree_parent_check check = { 0 };
+ u64 generation;
+ int level = wc->level;
+ int ret;
+
+ btrfs_assert_tree_write_locked(next);
+
+ generation = btrfs_node_ptr_generation(path->nodes[level], path->slots[level]);
+
+ if (btrfs_buffer_uptodate(next, generation, 0))
+ return 0;
+
+ check.level = level - 1;
+ check.transid = generation;
+ check.owner_root = btrfs_root_id(root);
+ check.has_first_key = true;
+ btrfs_node_key_to_cpu(path->nodes[level], &check.first_key, path->slots[level]);
+
+ btrfs_tree_unlock(next);
+ if (level == 1)
+ reada_walk_down(trans, root, wc, path);
+ ret = btrfs_read_extent_buffer(next, &check);
+ if (ret) {
+ free_extent_buffer(next);
+ return ret;
+ }
+ btrfs_tree_lock(next);
+ wc->lookup_info = 1;
+ return 0;
+}
+
+/*
+ * If we determine that we don't have to visit wc->level - 1 then we need to
+ * determine if we can drop our reference.
+ *
+ * If we are UPDATE_BACKREF then we will not, we need to update our backrefs.
+ *
+ * If we are DROP_REFERENCE this will figure out if we need to drop our current
+ * reference, skipping it if we dropped it from a previous incompleted drop, or
+ * dropping it if we still have a reference to it.
+ */
+static int maybe_drop_reference(struct btrfs_trans_handle *trans, struct btrfs_root *root,
+ struct btrfs_path *path, struct walk_control *wc,
+ struct extent_buffer *next, u64 owner_root)
+{
+ struct btrfs_ref ref = {
+ .action = BTRFS_DROP_DELAYED_REF,
+ .bytenr = next->start,
+ .num_bytes = root->fs_info->nodesize,
+ .owning_root = owner_root,
+ .ref_root = btrfs_root_id(root),
+ };
+ int level = wc->level;
+ int ret;
+
+ /* We are UPDATE_BACKREF, we're not dropping anything. */
+ if (wc->stage == UPDATE_BACKREF)
+ return 0;
+
+ if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
+ ref.parent = path->nodes[level]->start;
+ } else {
+ ASSERT(btrfs_root_id(root) == btrfs_header_owner(path->nodes[level]));
+ if (btrfs_root_id(root) != btrfs_header_owner(path->nodes[level])) {
+ btrfs_err(root->fs_info, "mismatched block owner");
+ return -EIO;
+ }
+ }
+
+ /*
+ * If we had a drop_progress we need to verify the refs are set as
+ * expected. If we find our ref then we know that from here on out
+ * everything should be correct, and we can clear the
+ * ->restarted flag.
+ */
+ if (wc->restarted) {
+ ret = check_ref_exists(trans, root, next->start, ref.parent,
+ level - 1);
+ if (ret <= 0)
+ return ret;
+ ret = 0;
+ wc->restarted = 0;
+ }
+
+ /*
+ * Reloc tree doesn't contribute to qgroup numbers, and we have already
+ * accounted them at merge time (replace_path), thus we could skip
+ * expensive subtree trace here.
+ */
+ if (btrfs_root_id(root) != BTRFS_TREE_RELOC_OBJECTID &&
+ wc->refs[level - 1] > 1) {
+ u64 generation = btrfs_node_ptr_generation(path->nodes[level],
+ path->slots[level]);
+
+ ret = btrfs_qgroup_trace_subtree(trans, next, generation, level - 1);
+ if (ret) {
+ btrfs_err_rl(root->fs_info,
+"error %d accounting shared subtree, quota is out of sync, rescan required",
+ ret);
+ }
+ }
+
+ /*
+ * We need to update the next key in our walk control so we can update
+ * the drop_progress key accordingly. We don't care if find_next_key
+ * doesn't find a key because that means we're at the end and are going
+ * to clean up now.
+ */
+ wc->drop_level = level;
+ find_next_key(path, level, &wc->drop_progress);
+
+ btrfs_init_tree_ref(&ref, level - 1, 0, false);
+ return btrfs_free_extent(trans, &ref);
+}
+
+/*
* helper to process tree block pointer.
*
* when wc->stage == DROP_REFERENCE, this function checks
@@ -5423,19 +5632,15 @@ static int check_ref_exists(struct btrfs_trans_handle *trans,
static noinline int do_walk_down(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path,
- struct walk_control *wc, int *lookup_info)
+ struct walk_control *wc)
{
struct btrfs_fs_info *fs_info = root->fs_info;
u64 bytenr;
u64 generation;
u64 owner_root = 0;
- struct btrfs_tree_parent_check check = { 0 };
- struct btrfs_key key;
struct extent_buffer *next;
int level = wc->level;
- int reada = 0;
int ret = 0;
- bool need_account = false;
generation = btrfs_node_ptr_generation(path->nodes[level],
path->slots[level]);
@@ -5446,27 +5651,17 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
*/
if (wc->stage == UPDATE_BACKREF &&
generation <= root->root_key.offset) {
- *lookup_info = 1;
+ wc->lookup_info = 1;
return 1;
}
bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
- check.level = level - 1;
- check.transid = generation;
- check.owner_root = btrfs_root_id(root);
- check.has_first_key = true;
- btrfs_node_key_to_cpu(path->nodes[level], &check.first_key,
- path->slots[level]);
+ next = btrfs_find_create_tree_block(fs_info, bytenr, btrfs_root_id(root),
+ level - 1);
+ if (IS_ERR(next))
+ return PTR_ERR(next);
- next = find_extent_buffer(fs_info, bytenr);
- if (!next) {
- next = btrfs_find_create_tree_block(fs_info, bytenr,
- btrfs_root_id(root), level - 1);
- if (IS_ERR(next))
- return PTR_ERR(next);
- reada = 1;
- }
btrfs_tree_lock(next);
ret = btrfs_lookup_extent_info(trans, fs_info, bytenr, level - 1, 1,
@@ -5477,57 +5672,32 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
goto out_unlock;
if (unlikely(wc->refs[level - 1] == 0)) {
- btrfs_err(fs_info, "Missing references.");
- ret = -EIO;
+ btrfs_err(fs_info, "bytenr %llu has 0 references, expect > 0",
+ bytenr);
+ ret = -EUCLEAN;
goto out_unlock;
}
- *lookup_info = 0;
+ wc->lookup_info = 0;
- if (wc->stage == DROP_REFERENCE) {
- if (wc->refs[level - 1] > 1) {
- need_account = true;
- if (level == 1 &&
- (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
- goto skip;
-
- if (!wc->update_ref ||
- generation <= root->root_key.offset)
- goto skip;
-
- btrfs_node_key_to_cpu(path->nodes[level], &key,
- path->slots[level]);
- ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
- if (ret < 0)
- goto skip;
+ /* If we don't have to walk into this node skip it. */
+ if (!visit_node_for_delete(root, wc, path->nodes[level],
+ wc->refs[level - 1], wc->flags[level - 1],
+ path->slots[level]))
+ goto skip;
- wc->stage = UPDATE_BACKREF;
- wc->shared_level = level - 1;
- }
- } else {
- if (level == 1 &&
- (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
- goto skip;
+ /*
+ * We have to walk down into this node, and if we're currently at the
+ * DROP_REFERNCE stage and this block is shared then we need to switch
+ * to the UPDATE_BACKREF stage in order to convert to FULL_BACKREF.
+ */
+ if (wc->stage == DROP_REFERENCE && wc->refs[level - 1] > 1) {
+ wc->stage = UPDATE_BACKREF;
+ wc->shared_level = level - 1;
}
- if (!btrfs_buffer_uptodate(next, generation, 0)) {
- btrfs_tree_unlock(next);
- free_extent_buffer(next);
- next = NULL;
- *lookup_info = 1;
- }
-
- if (!next) {
- if (reada && level == 1)
- reada_walk_down(trans, root, wc, path);
- next = read_tree_block(fs_info, bytenr, &check);
- if (IS_ERR(next)) {
- return PTR_ERR(next);
- } else if (!extent_buffer_uptodate(next)) {
- free_extent_buffer(next);
- return -EIO;
- }
- btrfs_tree_lock(next);
- }
+ ret = check_next_block_uptodate(trans, root, path, wc, next);
+ if (ret)
+ return ret;
level--;
ASSERT(level == btrfs_header_level(next));
@@ -5544,78 +5714,12 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
wc->reada_slot = 0;
return 0;
skip:
+ ret = maybe_drop_reference(trans, root, path, wc, next, owner_root);
+ if (ret)
+ goto out_unlock;
wc->refs[level - 1] = 0;
wc->flags[level - 1] = 0;
- if (wc->stage == DROP_REFERENCE) {
- struct btrfs_ref ref = {
- .action = BTRFS_DROP_DELAYED_REF,
- .bytenr = bytenr,
- .num_bytes = fs_info->nodesize,
- .owning_root = owner_root,
- .ref_root = btrfs_root_id(root),
- };
- if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
- ref.parent = path->nodes[level]->start;
- } else {
- ASSERT(btrfs_root_id(root) ==
- btrfs_header_owner(path->nodes[level]));
- if (btrfs_root_id(root) !=
- btrfs_header_owner(path->nodes[level])) {
- btrfs_err(root->fs_info,
- "mismatched block owner");
- ret = -EIO;
- goto out_unlock;
- }
- }
-
- /*
- * If we had a drop_progress we need to verify the refs are set
- * as expected. If we find our ref then we know that from here
- * on out everything should be correct, and we can clear the
- * ->restarted flag.
- */
- if (wc->restarted) {
- ret = check_ref_exists(trans, root, bytenr, ref.parent,
- level - 1);
- if (ret < 0)
- goto out_unlock;
- if (ret == 0)
- goto no_delete;
- ret = 0;
- wc->restarted = 0;
- }
-
- /*
- * Reloc tree doesn't contribute to qgroup numbers, and we have
- * already accounted them at merge time (replace_path),
- * thus we could skip expensive subtree trace here.
- */
- if (btrfs_root_id(root) != BTRFS_TREE_RELOC_OBJECTID && need_account) {
- ret = btrfs_qgroup_trace_subtree(trans, next,
- generation, level - 1);
- if (ret) {
- btrfs_err_rl(fs_info,
- "Error %d accounting shared subtree. Quota is out of sync, rescan required.",
- ret);
- }
- }
-
- /*
- * We need to update the next key in our walk control so we can
- * update the drop_progress key accordingly. We don't care if
- * find_next_key doesn't find a key because that means we're at
- * the end and are going to clean up now.
- */
- wc->drop_level = level;
- find_next_key(path, level, &wc->drop_progress);
-
- btrfs_init_tree_ref(&ref, level - 1, 0, false);
- ret = btrfs_free_extent(trans, &ref);
- if (ret)
- goto out_unlock;
- }
-no_delete:
- *lookup_info = 1;
+ wc->lookup_info = 1;
ret = 1;
out_unlock:
@@ -5643,13 +5747,13 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
struct walk_control *wc)
{
struct btrfs_fs_info *fs_info = root->fs_info;
- int ret;
+ int ret = 0;
int level = wc->level;
struct extent_buffer *eb = path->nodes[level];
u64 parent = 0;
if (wc->stage == UPDATE_BACKREF) {
- BUG_ON(wc->shared_level < level);
+ ASSERT(wc->shared_level >= level);
if (level < wc->shared_level)
goto out;
@@ -5667,7 +5771,7 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
* count is one.
*/
if (!path->locks[level]) {
- BUG_ON(level == 0);
+ ASSERT(level > 0);
btrfs_tree_lock(eb);
path->locks[level] = BTRFS_WRITE_LOCK;
@@ -5681,7 +5785,12 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
path->locks[level] = 0;
return ret;
}
- BUG_ON(wc->refs[level] == 0);
+ if (unlikely(wc->refs[level] == 0)) {
+ btrfs_tree_unlock_rw(eb, path->locks[level]);
+ btrfs_err(fs_info, "bytenr %llu has 0 references, expect > 0",
+ eb->start);
+ return -EUCLEAN;
+ }
if (wc->refs[level] == 1) {
btrfs_tree_unlock_rw(eb, path->locks[level]);
path->locks[level] = 0;
@@ -5691,7 +5800,7 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
}
/* wc->stage == DROP_REFERENCE */
- BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
+ ASSERT(path->locks[level] || wc->refs[level] == 1);
if (wc->refs[level] == 1) {
if (level == 0) {
@@ -5699,7 +5808,10 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
ret = btrfs_dec_ref(trans, root, eb, 1);
else
ret = btrfs_dec_ref(trans, root, eb, 0);
- BUG_ON(ret); /* -ENOMEM */
+ if (ret) {
+ btrfs_abort_transaction(trans, ret);
+ return ret;
+ }
if (is_fstree(btrfs_root_id(root))) {
ret = btrfs_qgroup_trace_leaf_items(trans, eb);
if (ret) {
@@ -5730,12 +5842,14 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
goto owner_mismatch;
}
- btrfs_free_tree_block(trans, btrfs_root_id(root), eb, parent,
- wc->refs[level] == 1);
+ ret = btrfs_free_tree_block(trans, btrfs_root_id(root), eb, parent,
+ wc->refs[level] == 1);
+ if (ret < 0)
+ btrfs_abort_transaction(trans, ret);
out:
wc->refs[level] = 0;
wc->flags[level] = 0;
- return 0;
+ return ret;
owner_mismatch:
btrfs_err_rl(fs_info, "unexpected tree owner, have %llu expect %llu",
@@ -5743,17 +5857,38 @@ owner_mismatch:
return -EUCLEAN;
}
+/*
+ * walk_down_tree consists of two steps.
+ *
+ * walk_down_proc(). Look up the reference count and reference of our current
+ * wc->level. At this point path->nodes[wc->level] should be populated and
+ * uptodate, and in most cases should already be locked. If we are in
+ * DROP_REFERENCE and our refcount is > 1 then we've entered a shared node and
+ * we can walk back up the tree. If we are UPDATE_BACKREF we have to set
+ * FULL_BACKREF on this node if it's not already set, and then do the
+ * FULL_BACKREF conversion dance, which is to drop the root reference and add
+ * the shared reference to all of this nodes children.
+ *
+ * do_walk_down(). This is where we actually start iterating on the children of
+ * our current path->nodes[wc->level]. For DROP_REFERENCE that means dropping
+ * our reference to the children that return false from visit_node_for_delete(),
+ * which has various conditions where we know we can just drop our reference
+ * without visiting the node. For UPDATE_BACKREF we will skip any children that
+ * visit_node_for_delete() returns false for, only walking down when necessary.
+ * The bulk of the work for UPDATE_BACKREF occurs in the walk_up_tree() part of
+ * snapshot deletion.
+ */
static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path,
struct walk_control *wc)
{
int level = wc->level;
- int lookup_info = 1;
int ret = 0;
+ wc->lookup_info = 1;
while (level >= 0) {
- ret = walk_down_proc(trans, root, path, wc, lookup_info);
+ ret = walk_down_proc(trans, root, path, wc);
if (ret)
break;
@@ -5764,7 +5899,7 @@ static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
btrfs_header_nritems(path->nodes[level]))
break;
- ret = do_walk_down(trans, root, path, wc, &lookup_info);
+ ret = do_walk_down(trans, root, path, wc);
if (ret > 0) {
path->slots[level]++;
continue;
@@ -5775,6 +5910,23 @@ static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
return (ret == 1) ? 0 : ret;
}
+/*
+ * walk_up_tree() is responsible for making sure we visit every slot on our
+ * current node, and if we're at the end of that node then we call
+ * walk_up_proc() on our current node which will do one of a few things based on
+ * our stage.
+ *
+ * UPDATE_BACKREF. If we wc->level is currently less than our wc->shared_level
+ * then we need to walk back up the tree, and then going back down into the
+ * other slots via walk_down_tree to update any other children from our original
+ * wc->shared_level. Once we're at or above our wc->shared_level we can switch
+ * back to DROP_REFERENCE, lookup the current nodes refs and flags, and carry on.
+ *
+ * DROP_REFERENCE. If our refs == 1 then we're going to free this tree block.
+ * If we're level 0 then we need to btrfs_dec_ref() on all of the data extents
+ * in our current leaf. After that we call btrfs_free_tree_block() on the
+ * current node and walk up to the next node to walk down the next slot.
+ */
static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path,
@@ -5833,8 +5985,8 @@ int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref, int for_reloc)
struct btrfs_root_item *root_item = &root->root_item;
struct walk_control *wc;
struct btrfs_key key;
- int err = 0;
- int ret;
+ const u64 rootid = btrfs_root_id(root);
+ int ret = 0;
int level;
bool root_dropped = false;
bool unfinished_drop = false;
@@ -5843,14 +5995,14 @@ int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref, int for_reloc)
path = btrfs_alloc_path();
if (!path) {
- err = -ENOMEM;
+ ret = -ENOMEM;
goto out;
}
wc = kzalloc(sizeof(*wc), GFP_NOFS);
if (!wc) {
btrfs_free_path(path);
- err = -ENOMEM;
+ ret = -ENOMEM;
goto out;
}
@@ -5863,12 +6015,12 @@ int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref, int for_reloc)
else
trans = btrfs_start_transaction(tree_root, 0);
if (IS_ERR(trans)) {
- err = PTR_ERR(trans);
+ ret = PTR_ERR(trans);
goto out_free;
}
- err = btrfs_run_delayed_items(trans);
- if (err)
+ ret = btrfs_run_delayed_items(trans);
+ if (ret)
goto out_end_trans;
/*
@@ -5899,11 +6051,11 @@ int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref, int for_reloc)
path->lowest_level = level;
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
path->lowest_level = 0;
- if (ret < 0) {
- err = ret;
+ if (ret < 0)
goto out_end_trans;
- }
+
WARN_ON(ret > 0);
+ ret = 0;
/*
* unlock our path, this is safe because only this
@@ -5916,14 +6068,17 @@ int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref, int for_reloc)
btrfs_tree_lock(path->nodes[level]);
path->locks[level] = BTRFS_WRITE_LOCK;
+ /*
+ * btrfs_lookup_extent_info() returns 0 for success,
+ * or < 0 for error.
+ */
ret = btrfs_lookup_extent_info(trans, fs_info,
path->nodes[level]->start,
level, 1, &wc->refs[level],
&wc->flags[level], NULL);
- if (ret < 0) {
- err = ret;
+ if (ret < 0)
goto out_end_trans;
- }
+
BUG_ON(wc->refs[level] == 0);
if (level == btrfs_root_drop_level(root_item))
@@ -5949,19 +6104,18 @@ int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref, int for_reloc)
ret = walk_down_tree(trans, root, path, wc);
if (ret < 0) {
btrfs_abort_transaction(trans, ret);
- err = ret;
break;
}
ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
if (ret < 0) {
btrfs_abort_transaction(trans, ret);
- err = ret;
break;
}
if (ret > 0) {
BUG_ON(wc->stage != DROP_REFERENCE);
+ ret = 0;
break;
}
@@ -5983,7 +6137,6 @@ int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref, int for_reloc)
root_item);
if (ret) {
btrfs_abort_transaction(trans, ret);
- err = ret;
goto out_end_trans;
}
@@ -5994,7 +6147,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref, int for_reloc)
if (!for_reloc && btrfs_need_cleaner_sleep(fs_info)) {
btrfs_debug(fs_info,
"drop snapshot early exit");
- err = -EAGAIN;
+ ret = -EAGAIN;
goto out_free;
}
@@ -6008,19 +6161,18 @@ int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref, int for_reloc)
else
trans = btrfs_start_transaction(tree_root, 0);
if (IS_ERR(trans)) {
- err = PTR_ERR(trans);
+ ret = PTR_ERR(trans);
goto out_free;
}
}
}
btrfs_release_path(path);
- if (err)
+ if (ret)
goto out_end_trans;
ret = btrfs_del_root(trans, &root->root_key);
if (ret) {
btrfs_abort_transaction(trans, ret);
- err = ret;
goto out_end_trans;
}
@@ -6029,10 +6181,11 @@ int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref, int for_reloc)
NULL, NULL);
if (ret < 0) {
btrfs_abort_transaction(trans, ret);
- err = ret;
goto out_end_trans;
} else if (ret > 0) {
- /* if we fail to delete the orphan item this time
+ ret = 0;
+ /*
+ * If we fail to delete the orphan item this time
* around, it'll get picked up the next time.
*
* The most common failure here is just -ENOENT.
@@ -6063,11 +6216,19 @@ out_free:
kfree(wc);
btrfs_free_path(path);
out:
+ if (!ret && root_dropped) {
+ ret = btrfs_qgroup_cleanup_dropped_subvolume(fs_info, rootid);
+ if (ret < 0)
+ btrfs_warn_rl(fs_info,
+ "failed to cleanup qgroup 0/%llu: %d",
+ rootid, ret);
+ ret = 0;
+ }
/*
* We were an unfinished drop root, check to see if there are any
* pending, and if not clear and wake up any waiters.
*/
- if (!err && unfinished_drop)
+ if (!ret && unfinished_drop)
btrfs_maybe_wake_unfinished_drop(fs_info);
/*
@@ -6079,7 +6240,7 @@ out:
*/
if (!for_reloc && !root_dropped)
btrfs_add_dead_root(root);
- return err;
+ return ret;
}
/*
diff --git a/fs/btrfs/extent-tree.h b/fs/btrfs/extent-tree.h
index af9f8800d5ac..2ad51130c037 100644
--- a/fs/btrfs/extent-tree.h
+++ b/fs/btrfs/extent-tree.h
@@ -127,10 +127,10 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
u64 empty_size,
u64 reloc_src_root,
enum btrfs_lock_nesting nest);
-void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
- u64 root_id,
- struct extent_buffer *buf,
- u64 parent, int last_ref);
+int btrfs_free_tree_block(struct btrfs_trans_handle *trans,
+ u64 root_id,
+ struct extent_buffer *buf,
+ u64 parent, int last_ref);
int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
struct btrfs_root *root, u64 owner,
u64 offset, u64 ram_bytes,
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 597387e9f040..aa7f8148cd0d 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -164,23 +164,8 @@ void __cold extent_buffer_free_cachep(void)
kmem_cache_destroy(extent_buffer_cache);
}
-void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end)
-{
- unsigned long index = start >> PAGE_SHIFT;
- unsigned long end_index = end >> PAGE_SHIFT;
- struct page *page;
-
- while (index <= end_index) {
- page = find_get_page(inode->i_mapping, index);
- BUG_ON(!page); /* Pages should be in the extent_io_tree */
- clear_page_dirty_for_io(page);
- put_page(page);
- index++;
- }
-}
-
static void process_one_page(struct btrfs_fs_info *fs_info,
- struct page *page, struct page *locked_page,
+ struct page *page, const struct page *locked_page,
unsigned long page_ops, u64 start, u64 end)
{
struct folio *folio = page_folio(page);
@@ -203,7 +188,7 @@ static void process_one_page(struct btrfs_fs_info *fs_info,
}
static void __process_pages_contig(struct address_space *mapping,
- struct page *locked_page, u64 start, u64 end,
+ const struct page *locked_page, u64 start, u64 end,
unsigned long page_ops)
{
struct btrfs_fs_info *fs_info = inode_to_fs_info(mapping->host);
@@ -230,8 +215,8 @@ static void __process_pages_contig(struct address_space *mapping,
}
}
-static noinline void __unlock_for_delalloc(struct inode *inode,
- struct page *locked_page,
+static noinline void __unlock_for_delalloc(const struct inode *inode,
+ const struct page *locked_page,
u64 start, u64 end)
{
unsigned long index = start >> PAGE_SHIFT;
@@ -246,7 +231,7 @@ static noinline void __unlock_for_delalloc(struct inode *inode,
}
static noinline int lock_delalloc_pages(struct inode *inode,
- struct page *locked_page,
+ const struct page *locked_page,
u64 start,
u64 end)
{
@@ -411,7 +396,7 @@ out_failed:
}
void extent_clear_unlock_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
- struct page *locked_page,
+ const struct page *locked_page,
struct extent_state **cached,
u32 clear_bits, unsigned long page_ops)
{
@@ -667,24 +652,22 @@ static void end_bbio_data_read(struct btrfs_bio *bbio)
}
/*
- * Populate every free slot in a provided array with folios.
+ * Populate every free slot in a provided array with folios using GFP_NOFS.
*
* @nr_folios: number of folios to allocate
* @folio_array: the array to fill with folios; any existing non-NULL entries in
* the array will be skipped
- * @extra_gfp: the extra GFP flags for the allocation
*
* Return: 0 if all folios were able to be allocated;
* -ENOMEM otherwise, the partially allocated folios would be freed and
* the array slots zeroed
*/
-int btrfs_alloc_folio_array(unsigned int nr_folios, struct folio **folio_array,
- gfp_t extra_gfp)
+int btrfs_alloc_folio_array(unsigned int nr_folios, struct folio **folio_array)
{
for (int i = 0; i < nr_folios; i++) {
if (folio_array[i])
continue;
- folio_array[i] = folio_alloc(GFP_NOFS | extra_gfp, 0);
+ folio_array[i] = folio_alloc(GFP_NOFS, 0);
if (!folio_array[i])
goto error;
}
@@ -698,21 +681,21 @@ error:
}
/*
- * Populate every free slot in a provided array with pages.
+ * Populate every free slot in a provided array with pages, using GFP_NOFS.
*
* @nr_pages: number of pages to allocate
* @page_array: the array to fill with pages; any existing non-null entries in
- * the array will be skipped
- * @extra_gfp: the extra GFP flags for the allocation.
+ * the array will be skipped
+ * @nofail: whether using __GFP_NOFAIL flag
*
* Return: 0 if all pages were able to be allocated;
* -ENOMEM otherwise, the partially allocated pages would be freed and
* the array slots zeroed
*/
int btrfs_alloc_page_array(unsigned int nr_pages, struct page **page_array,
- gfp_t extra_gfp)
+ bool nofail)
{
- const gfp_t gfp = GFP_NOFS | extra_gfp;
+ const gfp_t gfp = nofail ? (GFP_NOFS | __GFP_NOFAIL) : GFP_NOFS;
unsigned int allocated;
for (allocated = 0; allocated < nr_pages;) {
@@ -736,13 +719,13 @@ int btrfs_alloc_page_array(unsigned int nr_pages, struct page **page_array,
*
* For now, the folios populated are always in order 0 (aka, single page).
*/
-static int alloc_eb_folio_array(struct extent_buffer *eb, gfp_t extra_gfp)
+static int alloc_eb_folio_array(struct extent_buffer *eb, bool nofail)
{
struct page *page_array[INLINE_EXTENT_BUFFER_PAGES] = { 0 };
int num_pages = num_extent_pages(eb);
int ret;
- ret = btrfs_alloc_page_array(num_pages, page_array, extra_gfp);
+ ret = btrfs_alloc_page_array(num_pages, page_array, nofail);
if (ret < 0)
return ret;
@@ -860,7 +843,7 @@ static void submit_extent_page(struct btrfs_bio_ctrl *bio_ctrl,
/* Cap to the current ordered extent boundary if there is one. */
if (len > bio_ctrl->len_to_oe_boundary) {
ASSERT(bio_ctrl->compress_type == BTRFS_COMPRESS_NONE);
- ASSERT(is_data_inode(&inode->vfs_inode));
+ ASSERT(is_data_inode(inode));
len = bio_ctrl->len_to_oe_boundary;
}
@@ -1083,10 +1066,10 @@ static int btrfs_do_readpage(struct page *page, struct extent_map **em_cached,
iosize = min(extent_map_end(em) - cur, end - cur + 1);
iosize = ALIGN(iosize, blocksize);
if (compress_type != BTRFS_COMPRESS_NONE)
- disk_bytenr = em->block_start;
+ disk_bytenr = em->disk_bytenr;
else
- disk_bytenr = em->block_start + extent_offset;
- block_start = em->block_start;
+ disk_bytenr = extent_map_block_start(em) + extent_offset;
+ block_start = extent_map_block_start(em);
if (em->flags & EXTENT_FLAG_PREALLOC)
block_start = EXTENT_MAP_HOLE;
@@ -1226,13 +1209,23 @@ static inline void contiguous_readpages(struct page *pages[], int nr_pages,
static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,
struct page *page, struct writeback_control *wbc)
{
+ struct btrfs_fs_info *fs_info = inode_to_fs_info(&inode->vfs_inode);
+ struct folio *folio = page_folio(page);
+ const bool is_subpage = btrfs_is_subpage(fs_info, page->mapping);
const u64 page_start = page_offset(page);
const u64 page_end = page_start + PAGE_SIZE - 1;
+ /*
+ * Save the last found delalloc end. As the delalloc end can go beyond
+ * page boundary, thus we cannot rely on subpage bitmap to locate the
+ * last delalloc end.
+ */
+ u64 last_delalloc_end = 0;
u64 delalloc_start = page_start;
u64 delalloc_end = page_end;
u64 delalloc_to_write = 0;
int ret = 0;
+ /* Lock all (subpage) delalloc ranges inside the page first. */
while (delalloc_start < page_end) {
delalloc_end = page_end;
if (!find_lock_delalloc_range(&inode->vfs_inode, page,
@@ -1240,15 +1233,95 @@ static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,
delalloc_start = delalloc_end + 1;
continue;
}
-
- ret = btrfs_run_delalloc_range(inode, page, delalloc_start,
- delalloc_end, wbc);
- if (ret < 0)
- return ret;
-
+ btrfs_folio_set_writer_lock(fs_info, folio, delalloc_start,
+ min(delalloc_end, page_end) + 1 -
+ delalloc_start);
+ last_delalloc_end = delalloc_end;
delalloc_start = delalloc_end + 1;
}
+ delalloc_start = page_start;
+ if (!last_delalloc_end)
+ goto out;
+
+ /* Run the delalloc ranges for the above locked ranges. */
+ while (delalloc_start < page_end) {
+ u64 found_start;
+ u32 found_len;
+ bool found;
+
+ if (!is_subpage) {
+ /*
+ * For non-subpage case, the found delalloc range must
+ * cover this page and there must be only one locked
+ * delalloc range.
+ */
+ found_start = page_start;
+ found_len = last_delalloc_end + 1 - found_start;
+ found = true;
+ } else {
+ found = btrfs_subpage_find_writer_locked(fs_info, folio,
+ delalloc_start, &found_start, &found_len);
+ }
+ if (!found)
+ break;
+ /*
+ * The subpage range covers the last sector, the delalloc range may
+ * end beyond the page boundary, use the saved delalloc_end
+ * instead.
+ */
+ if (found_start + found_len >= page_end)
+ found_len = last_delalloc_end + 1 - found_start;
+
+ if (ret >= 0) {
+ /* No errors hit so far, run the current delalloc range. */
+ ret = btrfs_run_delalloc_range(inode, page, found_start,
+ found_start + found_len - 1,
+ wbc);
+ } else {
+ /*
+ * We've hit an error during previous delalloc range,
+ * have to cleanup the remaining locked ranges.
+ */
+ unlock_extent(&inode->io_tree, found_start,
+ found_start + found_len - 1, NULL);
+ __unlock_for_delalloc(&inode->vfs_inode, page, found_start,
+ found_start + found_len - 1);
+ }
+
+ /*
+ * We can hit btrfs_run_delalloc_range() with >0 return value.
+ *
+ * This happens when either the IO is already done and page
+ * unlocked (inline) or the IO submission and page unlock would
+ * be handled as async (compression).
+ *
+ * Inline is only possible for regular sectorsize for now.
+ *
+ * Compression is possible for both subpage and regular cases,
+ * but even for subpage compression only happens for page aligned
+ * range, thus the found delalloc range must go beyond current
+ * page.
+ */
+ if (ret > 0)
+ ASSERT(!is_subpage || found_start + found_len >= page_end);
+
+ /*
+ * Above btrfs_run_delalloc_range() may have unlocked the page,
+ * thus for the last range, we cannot touch the page anymore.
+ */
+ if (found_start + found_len >= last_delalloc_end + 1)
+ break;
+
+ delalloc_start = found_start + found_len;
+ }
+ if (ret < 0)
+ return ret;
+out:
+ if (last_delalloc_end)
+ delalloc_end = last_delalloc_end;
+ else
+ delalloc_end = page_end;
/*
* delalloc_end is already one less than the total length, so
* we don't subtract one from PAGE_SIZE
@@ -1292,7 +1365,7 @@ static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,
* Return the next dirty range in [@start, @end).
* If no dirty range is found, @start will be page_offset(page) + PAGE_SIZE.
*/
-static void find_next_dirty_byte(struct btrfs_fs_info *fs_info,
+static void find_next_dirty_byte(const struct btrfs_fs_info *fs_info,
struct page *page, u64 *start, u64 *end)
{
struct folio *folio = page_folio(page);
@@ -1339,20 +1412,23 @@ static void find_next_dirty_byte(struct btrfs_fs_info *fs_info,
* < 0 if there were errors (page still locked)
*/
static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode,
- struct page *page,
+ struct page *page, u64 start, u32 len,
struct btrfs_bio_ctrl *bio_ctrl,
loff_t i_size,
int *nr_ret)
{
struct btrfs_fs_info *fs_info = inode->root->fs_info;
- u64 cur = page_offset(page);
- u64 end = cur + PAGE_SIZE - 1;
+ u64 cur = start;
+ u64 end = start + len - 1;
u64 extent_offset;
u64 block_start;
struct extent_map *em;
int ret = 0;
int nr = 0;
+ ASSERT(start >= page_offset(page) &&
+ start + len <= page_offset(page) + PAGE_SIZE);
+
ret = btrfs_writepage_cow_fixup(page);
if (ret) {
/* Fixup worker will requeue */
@@ -1405,8 +1481,8 @@ static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode,
ASSERT(IS_ALIGNED(em->start, fs_info->sectorsize));
ASSERT(IS_ALIGNED(em->len, fs_info->sectorsize));
- block_start = em->block_start;
- disk_bytenr = em->block_start + extent_offset;
+ block_start = extent_map_block_start(em);
+ disk_bytenr = extent_map_block_start(em) + extent_offset;
ASSERT(!extent_map_is_compressed(em));
ASSERT(block_start != EXTENT_MAP_HOLE);
@@ -1441,7 +1517,7 @@ static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode,
nr++;
}
- btrfs_folio_assert_not_dirty(fs_info, page_folio(page));
+ btrfs_folio_assert_not_dirty(fs_info, page_folio(page), start, len);
*nr_ret = nr;
return 0;
@@ -1499,7 +1575,8 @@ static int __extent_writepage(struct page *page, struct btrfs_bio_ctrl *bio_ctrl
if (ret)
goto done;
- ret = __extent_writepage_io(BTRFS_I(inode), page, bio_ctrl, i_size, &nr);
+ ret = __extent_writepage_io(BTRFS_I(inode), page, page_offset(page),
+ PAGE_SIZE, bio_ctrl, i_size, &nr);
if (ret == 1)
return 0;
@@ -1516,7 +1593,8 @@ done:
PAGE_SIZE, !ret);
mapping_set_error(page->mapping, ret);
}
- unlock_page(page);
+
+ btrfs_folio_end_all_writers(inode_to_fs_info(inode), folio);
ASSERT(ret <= 0);
return ret;
}
@@ -1648,7 +1726,7 @@ static void set_btree_ioerr(struct extent_buffer *eb)
* context.
*/
static struct extent_buffer *find_extent_buffer_nolock(
- struct btrfs_fs_info *fs_info, u64 start)
+ const struct btrfs_fs_info *fs_info, u64 start)
{
struct extent_buffer *eb;
@@ -2217,7 +2295,7 @@ retry:
* already been ran (aka, ordered extent inserted) and all pages are still
* locked.
*/
-void extent_write_locked_range(struct inode *inode, struct page *locked_page,
+void extent_write_locked_range(struct inode *inode, const struct page *locked_page,
u64 start, u64 end, struct writeback_control *wbc,
bool pages_dirty)
{
@@ -2246,20 +2324,21 @@ void extent_write_locked_range(struct inode *inode, struct page *locked_page,
page = find_get_page(mapping, cur >> PAGE_SHIFT);
ASSERT(PageLocked(page));
- if (pages_dirty && page != locked_page) {
+ if (pages_dirty && page != locked_page)
ASSERT(PageDirty(page));
- clear_page_dirty_for_io(page);
- }
- ret = __extent_writepage_io(BTRFS_I(inode), page, &bio_ctrl,
- i_size, &nr);
+ ret = __extent_writepage_io(BTRFS_I(inode), page, cur, cur_len,
+ &bio_ctrl, i_size, &nr);
if (ret == 1)
goto next_page;
/* Make sure the mapping tag for page dirty gets cleared. */
if (nr == 0) {
- set_page_writeback(page);
- end_page_writeback(page);
+ struct folio *folio;
+
+ folio = page_folio(page);
+ btrfs_folio_set_writeback(fs_info, folio, cur, cur_len);
+ btrfs_folio_clear_writeback(fs_info, folio, cur, cur_len);
}
if (ret) {
btrfs_mark_ordered_io_finished(BTRFS_I(inode), page,
@@ -2470,877 +2549,6 @@ next:
return try_release_extent_state(io_tree, page, mask);
}
-struct btrfs_fiemap_entry {
- u64 offset;
- u64 phys;
- u64 len;
- u32 flags;
-};
-
-/*
- * Indicate the caller of emit_fiemap_extent() that it needs to unlock the file
- * range from the inode's io tree, unlock the subvolume tree search path, flush
- * the fiemap cache and relock the file range and research the subvolume tree.
- * The value here is something negative that can't be confused with a valid
- * errno value and different from 1 because that's also a return value from
- * fiemap_fill_next_extent() and also it's often used to mean some btree search
- * did not find a key, so make it some distinct negative value.
- */
-#define BTRFS_FIEMAP_FLUSH_CACHE (-(MAX_ERRNO + 1))
-
-/*
- * Used to:
- *
- * - Cache the next entry to be emitted to the fiemap buffer, so that we can
- * merge extents that are contiguous and can be grouped as a single one;
- *
- * - Store extents ready to be written to the fiemap buffer in an intermediary
- * buffer. This intermediary buffer is to ensure that in case the fiemap
- * buffer is memory mapped to the fiemap target file, we don't deadlock
- * during btrfs_page_mkwrite(). This is because during fiemap we are locking
- * an extent range in order to prevent races with delalloc flushing and
- * ordered extent completion, which is needed in order to reliably detect
- * delalloc in holes and prealloc extents. And this can lead to a deadlock
- * if the fiemap buffer is memory mapped to the file we are running fiemap
- * against (a silly, useless in practice scenario, but possible) because
- * btrfs_page_mkwrite() will try to lock the same extent range.
- */
-struct fiemap_cache {
- /* An array of ready fiemap entries. */
- struct btrfs_fiemap_entry *entries;
- /* Number of entries in the entries array. */
- int entries_size;
- /* Index of the next entry in the entries array to write to. */
- int entries_pos;
- /*
- * Once the entries array is full, this indicates what's the offset for
- * the next file extent item we must search for in the inode's subvolume
- * tree after unlocking the extent range in the inode's io tree and
- * releasing the search path.
- */
- u64 next_search_offset;
- /*
- * This matches struct fiemap_extent_info::fi_mapped_extents, we use it
- * to count ourselves emitted extents and stop instead of relying on
- * fiemap_fill_next_extent() because we buffer ready fiemap entries at
- * the @entries array, and we want to stop as soon as we hit the max
- * amount of extents to map, not just to save time but also to make the
- * logic at extent_fiemap() simpler.
- */
- unsigned int extents_mapped;
- /* Fields for the cached extent (unsubmitted, not ready, extent). */
- u64 offset;
- u64 phys;
- u64 len;
- u32 flags;
- bool cached;
-};
-
-static int flush_fiemap_cache(struct fiemap_extent_info *fieinfo,
- struct fiemap_cache *cache)
-{
- for (int i = 0; i < cache->entries_pos; i++) {
- struct btrfs_fiemap_entry *entry = &cache->entries[i];
- int ret;
-
- ret = fiemap_fill_next_extent(fieinfo, entry->offset,
- entry->phys, entry->len,
- entry->flags);
- /*
- * Ignore 1 (reached max entries) because we keep track of that
- * ourselves in emit_fiemap_extent().
- */
- if (ret < 0)
- return ret;
- }
- cache->entries_pos = 0;
-
- return 0;
-}
-
-/*
- * Helper to submit fiemap extent.
- *
- * Will try to merge current fiemap extent specified by @offset, @phys,
- * @len and @flags with cached one.
- * And only when we fails to merge, cached one will be submitted as
- * fiemap extent.
- *
- * Return value is the same as fiemap_fill_next_extent().
- */
-static int emit_fiemap_extent(struct fiemap_extent_info *fieinfo,
- struct fiemap_cache *cache,
- u64 offset, u64 phys, u64 len, u32 flags)
-{
- struct btrfs_fiemap_entry *entry;
- u64 cache_end;
-
- /* Set at the end of extent_fiemap(). */
- ASSERT((flags & FIEMAP_EXTENT_LAST) == 0);
-
- if (!cache->cached)
- goto assign;
-
- /*
- * When iterating the extents of the inode, at extent_fiemap(), we may
- * find an extent that starts at an offset behind the end offset of the
- * previous extent we processed. This happens if fiemap is called
- * without FIEMAP_FLAG_SYNC and there are ordered extents completing
- * after we had to unlock the file range, release the search path, emit
- * the fiemap extents stored in the buffer (cache->entries array) and
- * the lock the remainder of the range and re-search the btree.
- *
- * For example we are in leaf X processing its last item, which is the
- * file extent item for file range [512K, 1M[, and after
- * btrfs_next_leaf() releases the path, there's an ordered extent that
- * completes for the file range [768K, 2M[, and that results in trimming
- * the file extent item so that it now corresponds to the file range
- * [512K, 768K[ and a new file extent item is inserted for the file
- * range [768K, 2M[, which may end up as the last item of leaf X or as
- * the first item of the next leaf - in either case btrfs_next_leaf()
- * will leave us with a path pointing to the new extent item, for the
- * file range [768K, 2M[, since that's the first key that follows the
- * last one we processed. So in order not to report overlapping extents
- * to user space, we trim the length of the previously cached extent and
- * emit it.
- *
- * Upon calling btrfs_next_leaf() we may also find an extent with an
- * offset smaller than or equals to cache->offset, and this happens
- * when we had a hole or prealloc extent with several delalloc ranges in
- * it, but after btrfs_next_leaf() released the path, delalloc was
- * flushed and the resulting ordered extents were completed, so we can
- * now have found a file extent item for an offset that is smaller than
- * or equals to what we have in cache->offset. We deal with this as
- * described below.
- */
- cache_end = cache->offset + cache->len;
- if (cache_end > offset) {
- if (offset == cache->offset) {
- /*
- * We cached a dealloc range (found in the io tree) for
- * a hole or prealloc extent and we have now found a
- * file extent item for the same offset. What we have
- * now is more recent and up to date, so discard what
- * we had in the cache and use what we have just found.
- */
- goto assign;
- } else if (offset > cache->offset) {
- /*
- * The extent range we previously found ends after the
- * offset of the file extent item we found and that
- * offset falls somewhere in the middle of that previous
- * extent range. So adjust the range we previously found
- * to end at the offset of the file extent item we have
- * just found, since this extent is more up to date.
- * Emit that adjusted range and cache the file extent
- * item we have just found. This corresponds to the case
- * where a previously found file extent item was split
- * due to an ordered extent completing.
- */
- cache->len = offset - cache->offset;
- goto emit;
- } else {
- const u64 range_end = offset + len;
-
- /*
- * The offset of the file extent item we have just found
- * is behind the cached offset. This means we were
- * processing a hole or prealloc extent for which we
- * have found delalloc ranges (in the io tree), so what
- * we have in the cache is the last delalloc range we
- * found while the file extent item we found can be
- * either for a whole delalloc range we previously
- * emmitted or only a part of that range.
- *
- * We have two cases here:
- *
- * 1) The file extent item's range ends at or behind the
- * cached extent's end. In this case just ignore the
- * current file extent item because we don't want to
- * overlap with previous ranges that may have been
- * emmitted already;
- *
- * 2) The file extent item starts behind the currently
- * cached extent but its end offset goes beyond the
- * end offset of the cached extent. We don't want to
- * overlap with a previous range that may have been
- * emmitted already, so we emit the currently cached
- * extent and then partially store the current file
- * extent item's range in the cache, for the subrange
- * going the cached extent's end to the end of the
- * file extent item.
- */
- if (range_end <= cache_end)
- return 0;
-
- if (!(flags & (FIEMAP_EXTENT_ENCODED | FIEMAP_EXTENT_DELALLOC)))
- phys += cache_end - offset;
-
- offset = cache_end;
- len = range_end - cache_end;
- goto emit;
- }
- }
-
- /*
- * Only merges fiemap extents if
- * 1) Their logical addresses are continuous
- *
- * 2) Their physical addresses are continuous
- * So truly compressed (physical size smaller than logical size)
- * extents won't get merged with each other
- *
- * 3) Share same flags
- */
- if (cache->offset + cache->len == offset &&
- cache->phys + cache->len == phys &&
- cache->flags == flags) {
- cache->len += len;
- return 0;
- }
-
-emit:
- /* Not mergeable, need to submit cached one */
-
- if (cache->entries_pos == cache->entries_size) {
- /*
- * We will need to research for the end offset of the last
- * stored extent and not from the current offset, because after
- * unlocking the range and releasing the path, if there's a hole
- * between that end offset and this current offset, a new extent
- * may have been inserted due to a new write, so we don't want
- * to miss it.
- */
- entry = &cache->entries[cache->entries_size - 1];
- cache->next_search_offset = entry->offset + entry->len;
- cache->cached = false;
-
- return BTRFS_FIEMAP_FLUSH_CACHE;
- }
-
- entry = &cache->entries[cache->entries_pos];
- entry->offset = cache->offset;
- entry->phys = cache->phys;
- entry->len = cache->len;
- entry->flags = cache->flags;
- cache->entries_pos++;
- cache->extents_mapped++;
-
- if (cache->extents_mapped == fieinfo->fi_extents_max) {
- cache->cached = false;
- return 1;
- }
-assign:
- cache->cached = true;
- cache->offset = offset;
- cache->phys = phys;
- cache->len = len;
- cache->flags = flags;
-
- return 0;
-}
-
-/*
- * Emit last fiemap cache
- *
- * The last fiemap cache may still be cached in the following case:
- * 0 4k 8k
- * |<- Fiemap range ->|
- * |<------------ First extent ----------->|
- *
- * In this case, the first extent range will be cached but not emitted.
- * So we must emit it before ending extent_fiemap().
- */
-static int emit_last_fiemap_cache(struct fiemap_extent_info *fieinfo,
- struct fiemap_cache *cache)
-{
- int ret;
-
- if (!cache->cached)
- return 0;
-
- ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys,
- cache->len, cache->flags);
- cache->cached = false;
- if (ret > 0)
- ret = 0;
- return ret;
-}
-
-static int fiemap_next_leaf_item(struct btrfs_inode *inode, struct btrfs_path *path)
-{
- struct extent_buffer *clone = path->nodes[0];
- struct btrfs_key key;
- int slot;
- int ret;
-
- path->slots[0]++;
- if (path->slots[0] < btrfs_header_nritems(path->nodes[0]))
- return 0;
-
- /*
- * Add a temporary extra ref to an already cloned extent buffer to
- * prevent btrfs_next_leaf() freeing it, we want to reuse it to avoid
- * the cost of allocating a new one.
- */
- ASSERT(test_bit(EXTENT_BUFFER_UNMAPPED, &clone->bflags));
- atomic_inc(&clone->refs);
-
- ret = btrfs_next_leaf(inode->root, path);
- if (ret != 0)
- goto out;
-
- /*
- * Don't bother with cloning if there are no more file extent items for
- * our inode.
- */
- btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
- if (key.objectid != btrfs_ino(inode) || key.type != BTRFS_EXTENT_DATA_KEY) {
- ret = 1;
- goto out;
- }
-
- /*
- * Important to preserve the start field, for the optimizations when
- * checking if extents are shared (see extent_fiemap()).
- *
- * We must set ->start before calling copy_extent_buffer_full(). If we
- * are on sub-pagesize blocksize, we use ->start to determine the offset
- * into the folio where our eb exists, and if we update ->start after
- * the fact then any subsequent reads of the eb may read from a
- * different offset in the folio than where we originally copied into.
- */
- clone->start = path->nodes[0]->start;
- /* See the comment at fiemap_search_slot() about why we clone. */
- copy_extent_buffer_full(clone, path->nodes[0]);
-
- slot = path->slots[0];
- btrfs_release_path(path);
- path->nodes[0] = clone;
- path->slots[0] = slot;
-out:
- if (ret)
- free_extent_buffer(clone);
-
- return ret;
-}
-
-/*
- * Search for the first file extent item that starts at a given file offset or
- * the one that starts immediately before that offset.
- * Returns: 0 on success, < 0 on error, 1 if not found.
- */
-static int fiemap_search_slot(struct btrfs_inode *inode, struct btrfs_path *path,
- u64 file_offset)
-{
- const u64 ino = btrfs_ino(inode);
- struct btrfs_root *root = inode->root;
- struct extent_buffer *clone;
- struct btrfs_key key;
- int slot;
- int ret;
-
- key.objectid = ino;
- key.type = BTRFS_EXTENT_DATA_KEY;
- key.offset = file_offset;
-
- ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
- if (ret < 0)
- return ret;
-
- if (ret > 0 && path->slots[0] > 0) {
- btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0] - 1);
- if (key.objectid == ino && key.type == BTRFS_EXTENT_DATA_KEY)
- path->slots[0]--;
- }
-
- if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
- ret = btrfs_next_leaf(root, path);
- if (ret != 0)
- return ret;
-
- btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
- if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY)
- return 1;
- }
-
- /*
- * We clone the leaf and use it during fiemap. This is because while
- * using the leaf we do expensive things like checking if an extent is
- * shared, which can take a long time. In order to prevent blocking
- * other tasks for too long, we use a clone of the leaf. We have locked
- * the file range in the inode's io tree, so we know none of our file
- * extent items can change. This way we avoid blocking other tasks that
- * want to insert items for other inodes in the same leaf or b+tree
- * rebalance operations (triggered for example when someone is trying
- * to push items into this leaf when trying to insert an item in a
- * neighbour leaf).
- * We also need the private clone because holding a read lock on an
- * extent buffer of the subvolume's b+tree will make lockdep unhappy
- * when we check if extents are shared, as backref walking may need to
- * lock the same leaf we are processing.
- */
- clone = btrfs_clone_extent_buffer(path->nodes[0]);
- if (!clone)
- return -ENOMEM;
-
- slot = path->slots[0];
- btrfs_release_path(path);
- path->nodes[0] = clone;
- path->slots[0] = slot;
-
- return 0;
-}
-
-/*
- * Process a range which is a hole or a prealloc extent in the inode's subvolume
- * btree. If @disk_bytenr is 0, we are dealing with a hole, otherwise a prealloc
- * extent. The end offset (@end) is inclusive.
- */
-static int fiemap_process_hole(struct btrfs_inode *inode,
- struct fiemap_extent_info *fieinfo,
- struct fiemap_cache *cache,
- struct extent_state **delalloc_cached_state,
- struct btrfs_backref_share_check_ctx *backref_ctx,
- u64 disk_bytenr, u64 extent_offset,
- u64 extent_gen,
- u64 start, u64 end)
-{
- const u64 i_size = i_size_read(&inode->vfs_inode);
- u64 cur_offset = start;
- u64 last_delalloc_end = 0;
- u32 prealloc_flags = FIEMAP_EXTENT_UNWRITTEN;
- bool checked_extent_shared = false;
- int ret;
-
- /*
- * There can be no delalloc past i_size, so don't waste time looking for
- * it beyond i_size.
- */
- while (cur_offset < end && cur_offset < i_size) {
- u64 delalloc_start;
- u64 delalloc_end;
- u64 prealloc_start;
- u64 prealloc_len = 0;
- bool delalloc;
-
- delalloc = btrfs_find_delalloc_in_range(inode, cur_offset, end,
- delalloc_cached_state,
- &delalloc_start,
- &delalloc_end);
- if (!delalloc)
- break;
-
- /*
- * If this is a prealloc extent we have to report every section
- * of it that has no delalloc.
- */
- if (disk_bytenr != 0) {
- if (last_delalloc_end == 0) {
- prealloc_start = start;
- prealloc_len = delalloc_start - start;
- } else {
- prealloc_start = last_delalloc_end + 1;
- prealloc_len = delalloc_start - prealloc_start;
- }
- }
-
- if (prealloc_len > 0) {
- if (!checked_extent_shared && fieinfo->fi_extents_max) {
- ret = btrfs_is_data_extent_shared(inode,
- disk_bytenr,
- extent_gen,
- backref_ctx);
- if (ret < 0)
- return ret;
- else if (ret > 0)
- prealloc_flags |= FIEMAP_EXTENT_SHARED;
-
- checked_extent_shared = true;
- }
- ret = emit_fiemap_extent(fieinfo, cache, prealloc_start,
- disk_bytenr + extent_offset,
- prealloc_len, prealloc_flags);
- if (ret)
- return ret;
- extent_offset += prealloc_len;
- }
-
- ret = emit_fiemap_extent(fieinfo, cache, delalloc_start, 0,
- delalloc_end + 1 - delalloc_start,
- FIEMAP_EXTENT_DELALLOC |
- FIEMAP_EXTENT_UNKNOWN);
- if (ret)
- return ret;
-
- last_delalloc_end = delalloc_end;
- cur_offset = delalloc_end + 1;
- extent_offset += cur_offset - delalloc_start;
- cond_resched();
- }
-
- /*
- * Either we found no delalloc for the whole prealloc extent or we have
- * a prealloc extent that spans i_size or starts at or after i_size.
- */
- if (disk_bytenr != 0 && last_delalloc_end < end) {
- u64 prealloc_start;
- u64 prealloc_len;
-
- if (last_delalloc_end == 0) {
- prealloc_start = start;
- prealloc_len = end + 1 - start;
- } else {
- prealloc_start = last_delalloc_end + 1;
- prealloc_len = end + 1 - prealloc_start;
- }
-
- if (!checked_extent_shared && fieinfo->fi_extents_max) {
- ret = btrfs_is_data_extent_shared(inode,
- disk_bytenr,
- extent_gen,
- backref_ctx);
- if (ret < 0)
- return ret;
- else if (ret > 0)
- prealloc_flags |= FIEMAP_EXTENT_SHARED;
- }
- ret = emit_fiemap_extent(fieinfo, cache, prealloc_start,
- disk_bytenr + extent_offset,
- prealloc_len, prealloc_flags);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-static int fiemap_find_last_extent_offset(struct btrfs_inode *inode,
- struct btrfs_path *path,
- u64 *last_extent_end_ret)
-{
- const u64 ino = btrfs_ino(inode);
- struct btrfs_root *root = inode->root;
- struct extent_buffer *leaf;
- struct btrfs_file_extent_item *ei;
- struct btrfs_key key;
- u64 disk_bytenr;
- int ret;
-
- /*
- * Lookup the last file extent. We're not using i_size here because
- * there might be preallocation past i_size.
- */
- ret = btrfs_lookup_file_extent(NULL, root, path, ino, (u64)-1, 0);
- /* There can't be a file extent item at offset (u64)-1 */
- ASSERT(ret != 0);
- if (ret < 0)
- return ret;
-
- /*
- * For a non-existing key, btrfs_search_slot() always leaves us at a
- * slot > 0, except if the btree is empty, which is impossible because
- * at least it has the inode item for this inode and all the items for
- * the root inode 256.
- */
- ASSERT(path->slots[0] > 0);
- path->slots[0]--;
- leaf = path->nodes[0];
- btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
- if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY) {
- /* No file extent items in the subvolume tree. */
- *last_extent_end_ret = 0;
- return 0;
- }
-
- /*
- * For an inline extent, the disk_bytenr is where inline data starts at,
- * so first check if we have an inline extent item before checking if we
- * have an implicit hole (disk_bytenr == 0).
- */
- ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item);
- if (btrfs_file_extent_type(leaf, ei) == BTRFS_FILE_EXTENT_INLINE) {
- *last_extent_end_ret = btrfs_file_extent_end(path);
- return 0;
- }
-
- /*
- * Find the last file extent item that is not a hole (when NO_HOLES is
- * not enabled). This should take at most 2 iterations in the worst
- * case: we have one hole file extent item at slot 0 of a leaf and
- * another hole file extent item as the last item in the previous leaf.
- * This is because we merge file extent items that represent holes.
- */
- disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, ei);
- while (disk_bytenr == 0) {
- ret = btrfs_previous_item(root, path, ino, BTRFS_EXTENT_DATA_KEY);
- if (ret < 0) {
- return ret;
- } else if (ret > 0) {
- /* No file extent items that are not holes. */
- *last_extent_end_ret = 0;
- return 0;
- }
- leaf = path->nodes[0];
- ei = btrfs_item_ptr(leaf, path->slots[0],
- struct btrfs_file_extent_item);
- disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, ei);
- }
-
- *last_extent_end_ret = btrfs_file_extent_end(path);
- return 0;
-}
-
-int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
- u64 start, u64 len)
-{
- const u64 ino = btrfs_ino(inode);
- struct extent_state *cached_state = NULL;
- struct extent_state *delalloc_cached_state = NULL;
- struct btrfs_path *path;
- struct fiemap_cache cache = { 0 };
- struct btrfs_backref_share_check_ctx *backref_ctx;
- u64 last_extent_end;
- u64 prev_extent_end;
- u64 range_start;
- u64 range_end;
- const u64 sectorsize = inode->root->fs_info->sectorsize;
- bool stopped = false;
- int ret;
-
- cache.entries_size = PAGE_SIZE / sizeof(struct btrfs_fiemap_entry);
- cache.entries = kmalloc_array(cache.entries_size,
- sizeof(struct btrfs_fiemap_entry),
- GFP_KERNEL);
- backref_ctx = btrfs_alloc_backref_share_check_ctx();
- path = btrfs_alloc_path();
- if (!cache.entries || !backref_ctx || !path) {
- ret = -ENOMEM;
- goto out;
- }
-
-restart:
- range_start = round_down(start, sectorsize);
- range_end = round_up(start + len, sectorsize);
- prev_extent_end = range_start;
-
- lock_extent(&inode->io_tree, range_start, range_end, &cached_state);
-
- ret = fiemap_find_last_extent_offset(inode, path, &last_extent_end);
- if (ret < 0)
- goto out_unlock;
- btrfs_release_path(path);
-
- path->reada = READA_FORWARD;
- ret = fiemap_search_slot(inode, path, range_start);
- if (ret < 0) {
- goto out_unlock;
- } else if (ret > 0) {
- /*
- * No file extent item found, but we may have delalloc between
- * the current offset and i_size. So check for that.
- */
- ret = 0;
- goto check_eof_delalloc;
- }
-
- while (prev_extent_end < range_end) {
- struct extent_buffer *leaf = path->nodes[0];
- struct btrfs_file_extent_item *ei;
- struct btrfs_key key;
- u64 extent_end;
- u64 extent_len;
- u64 extent_offset = 0;
- u64 extent_gen;
- u64 disk_bytenr = 0;
- u64 flags = 0;
- int extent_type;
- u8 compression;
-
- btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
- if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY)
- break;
-
- extent_end = btrfs_file_extent_end(path);
-
- /*
- * The first iteration can leave us at an extent item that ends
- * before our range's start. Move to the next item.
- */
- if (extent_end <= range_start)
- goto next_item;
-
- backref_ctx->curr_leaf_bytenr = leaf->start;
-
- /* We have in implicit hole (NO_HOLES feature enabled). */
- if (prev_extent_end < key.offset) {
- const u64 hole_end = min(key.offset, range_end) - 1;
-
- ret = fiemap_process_hole(inode, fieinfo, &cache,
- &delalloc_cached_state,
- backref_ctx, 0, 0, 0,
- prev_extent_end, hole_end);
- if (ret < 0) {
- goto out_unlock;
- } else if (ret > 0) {
- /* fiemap_fill_next_extent() told us to stop. */
- stopped = true;
- break;
- }
-
- /* We've reached the end of the fiemap range, stop. */
- if (key.offset >= range_end) {
- stopped = true;
- break;
- }
- }
-
- extent_len = extent_end - key.offset;
- ei = btrfs_item_ptr(leaf, path->slots[0],
- struct btrfs_file_extent_item);
- compression = btrfs_file_extent_compression(leaf, ei);
- extent_type = btrfs_file_extent_type(leaf, ei);
- extent_gen = btrfs_file_extent_generation(leaf, ei);
-
- if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
- disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, ei);
- if (compression == BTRFS_COMPRESS_NONE)
- extent_offset = btrfs_file_extent_offset(leaf, ei);
- }
-
- if (compression != BTRFS_COMPRESS_NONE)
- flags |= FIEMAP_EXTENT_ENCODED;
-
- if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
- flags |= FIEMAP_EXTENT_DATA_INLINE;
- flags |= FIEMAP_EXTENT_NOT_ALIGNED;
- ret = emit_fiemap_extent(fieinfo, &cache, key.offset, 0,
- extent_len, flags);
- } else if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
- ret = fiemap_process_hole(inode, fieinfo, &cache,
- &delalloc_cached_state,
- backref_ctx,
- disk_bytenr, extent_offset,
- extent_gen, key.offset,
- extent_end - 1);
- } else if (disk_bytenr == 0) {
- /* We have an explicit hole. */
- ret = fiemap_process_hole(inode, fieinfo, &cache,
- &delalloc_cached_state,
- backref_ctx, 0, 0, 0,
- key.offset, extent_end - 1);
- } else {
- /* We have a regular extent. */
- if (fieinfo->fi_extents_max) {
- ret = btrfs_is_data_extent_shared(inode,
- disk_bytenr,
- extent_gen,
- backref_ctx);
- if (ret < 0)
- goto out_unlock;
- else if (ret > 0)
- flags |= FIEMAP_EXTENT_SHARED;
- }
-
- ret = emit_fiemap_extent(fieinfo, &cache, key.offset,
- disk_bytenr + extent_offset,
- extent_len, flags);
- }
-
- if (ret < 0) {
- goto out_unlock;
- } else if (ret > 0) {
- /* emit_fiemap_extent() told us to stop. */
- stopped = true;
- break;
- }
-
- prev_extent_end = extent_end;
-next_item:
- if (fatal_signal_pending(current)) {
- ret = -EINTR;
- goto out_unlock;
- }
-
- ret = fiemap_next_leaf_item(inode, path);
- if (ret < 0) {
- goto out_unlock;
- } else if (ret > 0) {
- /* No more file extent items for this inode. */
- break;
- }
- cond_resched();
- }
-
-check_eof_delalloc:
- if (!stopped && prev_extent_end < range_end) {
- ret = fiemap_process_hole(inode, fieinfo, &cache,
- &delalloc_cached_state, backref_ctx,
- 0, 0, 0, prev_extent_end, range_end - 1);
- if (ret < 0)
- goto out_unlock;
- prev_extent_end = range_end;
- }
-
- if (cache.cached && cache.offset + cache.len >= last_extent_end) {
- const u64 i_size = i_size_read(&inode->vfs_inode);
-
- if (prev_extent_end < i_size) {
- u64 delalloc_start;
- u64 delalloc_end;
- bool delalloc;
-
- delalloc = btrfs_find_delalloc_in_range(inode,
- prev_extent_end,
- i_size - 1,
- &delalloc_cached_state,
- &delalloc_start,
- &delalloc_end);
- if (!delalloc)
- cache.flags |= FIEMAP_EXTENT_LAST;
- } else {
- cache.flags |= FIEMAP_EXTENT_LAST;
- }
- }
-
-out_unlock:
- unlock_extent(&inode->io_tree, range_start, range_end, &cached_state);
-
- if (ret == BTRFS_FIEMAP_FLUSH_CACHE) {
- btrfs_release_path(path);
- ret = flush_fiemap_cache(fieinfo, &cache);
- if (ret)
- goto out;
- len -= cache.next_search_offset - start;
- start = cache.next_search_offset;
- goto restart;
- } else if (ret < 0) {
- goto out;
- }
-
- /*
- * Must free the path before emitting to the fiemap buffer because we
- * may have a non-cloned leaf and if the fiemap buffer is memory mapped
- * to a file, a write into it (through btrfs_page_mkwrite()) may trigger
- * waiting for an ordered extent that in order to complete needs to
- * modify that leaf, therefore leading to a deadlock.
- */
- btrfs_free_path(path);
- path = NULL;
-
- ret = flush_fiemap_cache(fieinfo, &cache);
- if (ret)
- goto out;
-
- ret = emit_last_fiemap_cache(fieinfo, &cache);
-out:
- free_extent_state(delalloc_cached_state);
- kfree(cache.entries);
- btrfs_free_backref_share_ctx(backref_ctx);
- btrfs_free_path(path);
- return ret;
-}
-
static void __free_extent_buffer(struct extent_buffer *eb)
{
kmem_cache_free(extent_buffer_cache, eb);
@@ -3372,7 +2580,7 @@ static bool folio_range_has_eb(struct btrfs_fs_info *fs_info, struct folio *foli
return false;
}
-static void detach_extent_buffer_folio(struct extent_buffer *eb, struct folio *folio)
+static void detach_extent_buffer_folio(const struct extent_buffer *eb, struct folio *folio)
{
struct btrfs_fs_info *fs_info = eb->fs_info;
const bool mapped = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
@@ -3433,7 +2641,7 @@ static void detach_extent_buffer_folio(struct extent_buffer *eb, struct folio *f
}
/* Release all pages attached to the extent buffer */
-static void btrfs_release_extent_buffer_pages(struct extent_buffer *eb)
+static void btrfs_release_extent_buffer_pages(const struct extent_buffer *eb)
{
ASSERT(!extent_buffer_under_io(eb));
@@ -3499,7 +2707,7 @@ struct extent_buffer *btrfs_clone_extent_buffer(const struct extent_buffer *src)
*/
set_bit(EXTENT_BUFFER_UNMAPPED, &new->bflags);
- ret = alloc_eb_folio_array(new, 0);
+ ret = alloc_eb_folio_array(new, false);
if (ret) {
btrfs_release_extent_buffer(new);
return NULL;
@@ -3507,7 +2715,6 @@ struct extent_buffer *btrfs_clone_extent_buffer(const struct extent_buffer *src)
for (int i = 0; i < num_folios; i++) {
struct folio *folio = new->folios[i];
- int ret;
ret = attach_extent_buffer_folio(new, folio, NULL);
if (ret < 0) {
@@ -3533,7 +2740,7 @@ struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
if (!eb)
return NULL;
- ret = alloc_eb_folio_array(eb, 0);
+ ret = alloc_eb_folio_array(eb, false);
if (ret)
goto err;
@@ -3553,7 +2760,7 @@ err:
for (int i = 0; i < num_folios; i++) {
if (eb->folios[i]) {
detach_extent_buffer_folio(eb, eb->folios[i]);
- __folio_put(eb->folios[i]);
+ folio_put(eb->folios[i]);
}
}
__free_extent_buffer(eb);
@@ -3689,6 +2896,8 @@ static struct extent_buffer *grab_extent_buffer(
struct folio *folio = page_folio(page);
struct extent_buffer *exists;
+ lockdep_assert_held(&page->mapping->i_private_lock);
+
/*
* For subpage case, we completely rely on radix tree to ensure we
* don't try to insert two ebs for the same bytenr. So here we always
@@ -3756,13 +2965,14 @@ static int check_eb_alignment(struct btrfs_fs_info *fs_info, u64 start)
* The caller needs to free the existing folios and retry using the same order.
*/
static int attach_eb_folio_to_filemap(struct extent_buffer *eb, int i,
+ struct btrfs_subpage *prealloc,
struct extent_buffer **found_eb_ret)
{
struct btrfs_fs_info *fs_info = eb->fs_info;
struct address_space *mapping = fs_info->btree_inode->i_mapping;
const unsigned long index = eb->start >> PAGE_SHIFT;
- struct folio *existing_folio;
+ struct folio *existing_folio = NULL;
int ret;
ASSERT(found_eb_ret);
@@ -3774,12 +2984,14 @@ retry:
ret = filemap_add_folio(mapping, eb->folios[i], index + i,
GFP_NOFS | __GFP_NOFAIL);
if (!ret)
- return 0;
+ goto finish;
existing_folio = filemap_lock_folio(mapping, index + i);
/* The page cache only exists for a very short time, just retry. */
- if (IS_ERR(existing_folio))
+ if (IS_ERR(existing_folio)) {
+ existing_folio = NULL;
goto retry;
+ }
/* For now, we should only have single-page folios for btree inode. */
ASSERT(folio_nr_pages(existing_folio) == 1);
@@ -3790,14 +3002,13 @@ retry:
return -EAGAIN;
}
- if (fs_info->nodesize < PAGE_SIZE) {
- /*
- * We're going to reuse the existing page, can drop our page
- * and subpage structure now.
- */
+finish:
+ spin_lock(&mapping->i_private_lock);
+ if (existing_folio && fs_info->nodesize < PAGE_SIZE) {
+ /* We're going to reuse the existing page, can drop our folio now. */
__free_page(folio_page(eb->folios[i], 0));
eb->folios[i] = existing_folio;
- } else {
+ } else if (existing_folio) {
struct extent_buffer *existing_eb;
existing_eb = grab_extent_buffer(fs_info,
@@ -3805,6 +3016,7 @@ retry:
if (existing_eb) {
/* The extent buffer still exists, we can use it directly. */
*found_eb_ret = existing_eb;
+ spin_unlock(&mapping->i_private_lock);
folio_unlock(existing_folio);
folio_put(existing_folio);
return 1;
@@ -3813,6 +3025,22 @@ retry:
__free_page(folio_page(eb->folios[i], 0));
eb->folios[i] = existing_folio;
}
+ eb->folio_size = folio_size(eb->folios[i]);
+ eb->folio_shift = folio_shift(eb->folios[i]);
+ /* Should not fail, as we have preallocated the memory. */
+ ret = attach_extent_buffer_folio(eb, eb->folios[i], prealloc);
+ ASSERT(!ret);
+ /*
+ * To inform we have an extra eb under allocation, so that
+ * detach_extent_buffer_page() won't release the folio private when the
+ * eb hasn't been inserted into radix tree yet.
+ *
+ * The ref will be decreased when the eb releases the page, in
+ * detach_extent_buffer_page(). Thus needs no special handling in the
+ * error path.
+ */
+ btrfs_folio_inc_eb_refs(fs_info, eb->folios[i]);
+ spin_unlock(&mapping->i_private_lock);
return 0;
}
@@ -3824,7 +3052,6 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
int attached = 0;
struct extent_buffer *eb;
struct extent_buffer *existing_eb = NULL;
- struct address_space *mapping = fs_info->btree_inode->i_mapping;
struct btrfs_subpage *prealloc = NULL;
u64 lockdep_owner = owner_root;
bool page_contig = true;
@@ -3879,7 +3106,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
reallocate:
/* Allocate all pages first. */
- ret = alloc_eb_folio_array(eb, __GFP_NOFAIL);
+ ret = alloc_eb_folio_array(eb, true);
if (ret < 0) {
btrfs_free_subpage(prealloc);
goto out;
@@ -3890,7 +3117,7 @@ reallocate:
for (int i = 0; i < num_folios; i++) {
struct folio *folio;
- ret = attach_eb_folio_to_filemap(eb, i, &existing_eb);
+ ret = attach_eb_folio_to_filemap(eb, i, prealloc, &existing_eb);
if (ret > 0) {
ASSERT(existing_eb);
goto out;
@@ -3927,24 +3154,6 @@ reallocate:
* and free the allocated page.
*/
folio = eb->folios[i];
- eb->folio_size = folio_size(folio);
- eb->folio_shift = folio_shift(folio);
- spin_lock(&mapping->i_private_lock);
- /* Should not fail, as we have preallocated the memory */
- ret = attach_extent_buffer_folio(eb, folio, prealloc);
- ASSERT(!ret);
- /*
- * To inform we have extra eb under allocation, so that
- * detach_extent_buffer_page() won't release the folio private
- * when the eb hasn't yet been inserted into radix tree.
- *
- * The ref will be decreased when the eb released the page, in
- * detach_extent_buffer_page().
- * Thus needs no special handling in error path.
- */
- btrfs_folio_inc_eb_refs(fs_info, folio);
- spin_unlock(&mapping->i_private_lock);
-
WARN_ON(btrfs_folio_test_dirty(fs_info, folio, eb->start, eb->len));
/*
@@ -4349,7 +3558,7 @@ static void end_bbio_meta_read(struct btrfs_bio *bbio)
}
int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num,
- struct btrfs_tree_parent_check *check)
+ const struct btrfs_tree_parent_check *check)
{
struct btrfs_bio *bbio;
bool ret;
@@ -4587,8 +3796,7 @@ static void assert_eb_folio_uptodate(const struct extent_buffer *eb, int i)
return;
if (fs_info->nodesize < PAGE_SIZE) {
- struct folio *folio = eb->folios[0];
-
+ folio = eb->folios[0];
ASSERT(i == 0);
if (WARN_ON(!btrfs_subpage_test_uptodate(fs_info, folio,
eb->start, eb->len)))
@@ -4606,7 +3814,7 @@ static void __write_extent_buffer(const struct extent_buffer *eb,
size_t cur;
size_t offset;
char *kaddr;
- char *src = (char *)srcv;
+ const char *src = (const char *)srcv;
unsigned long i = get_eb_folio_index(eb, start);
/* For unmapped (dummy) ebs, no need to check their uptodate status. */
const bool check_uptodate = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
@@ -4963,7 +4171,7 @@ void memmove_extent_buffer(const struct extent_buffer *dst,
#define GANG_LOOKUP_SIZE 16
static struct extent_buffer *get_next_extent_buffer(
- struct btrfs_fs_info *fs_info, struct page *page, u64 bytenr)
+ const struct btrfs_fs_info *fs_info, struct page *page, u64 bytenr)
{
struct extent_buffer *gang[GANG_LOOKUP_SIZE];
struct extent_buffer *found = NULL;
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index dca6b12769ec..dceebd76c7d1 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -215,6 +215,11 @@ static inline struct extent_changeset *extent_changeset_alloc(void)
return ret;
}
+static inline void extent_changeset_prealloc(struct extent_changeset *changeset, gfp_t gfp_mask)
+{
+ ulist_prealloc(&changeset->range_changed, gfp_mask);
+}
+
static inline void extent_changeset_release(struct extent_changeset *changeset)
{
if (!changeset)
@@ -235,15 +240,13 @@ bool try_release_extent_mapping(struct page *page, gfp_t mask);
int try_release_extent_buffer(struct page *page);
int btrfs_read_folio(struct file *file, struct folio *folio);
-void extent_write_locked_range(struct inode *inode, struct page *locked_page,
+void extent_write_locked_range(struct inode *inode, const struct page *locked_page,
u64 start, u64 end, struct writeback_control *wbc,
bool pages_dirty);
int btrfs_writepages(struct address_space *mapping, struct writeback_control *wbc);
int btree_write_cache_pages(struct address_space *mapping,
struct writeback_control *wbc);
void btrfs_readahead(struct readahead_control *rac);
-int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
- u64 start, u64 len);
int set_folio_extent_mapped(struct folio *folio);
int set_page_extent_mapped(struct page *page);
void clear_page_extent_mapped(struct page *page);
@@ -263,7 +266,7 @@ void free_extent_buffer_stale(struct extent_buffer *eb);
#define WAIT_COMPLETE 1
#define WAIT_PAGE_LOCK 2
int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num,
- struct btrfs_tree_parent_check *parent_check);
+ const struct btrfs_tree_parent_check *parent_check);
void wait_on_extent_buffer_writeback(struct extent_buffer *eb);
void btrfs_readahead_tree_block(struct btrfs_fs_info *fs_info,
u64 bytenr, u64 owner_root, u64 gen, int level);
@@ -350,9 +353,8 @@ void extent_buffer_bitmap_clear(const struct extent_buffer *eb,
void set_extent_buffer_dirty(struct extent_buffer *eb);
void set_extent_buffer_uptodate(struct extent_buffer *eb);
void clear_extent_buffer_uptodate(struct extent_buffer *eb);
-void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end);
void extent_clear_unlock_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
- struct page *locked_page,
+ const struct page *locked_page,
struct extent_state **cached,
u32 bits_to_clear, unsigned long page_ops);
int extent_invalidate_folio(struct extent_io_tree *tree,
@@ -361,9 +363,8 @@ void btrfs_clear_buffer_dirty(struct btrfs_trans_handle *trans,
struct extent_buffer *buf);
int btrfs_alloc_page_array(unsigned int nr_pages, struct page **page_array,
- gfp_t extra_gfp);
-int btrfs_alloc_folio_array(unsigned int nr_folios, struct folio **folio_array,
- gfp_t extra_gfp);
+ bool nofail);
+int btrfs_alloc_folio_array(unsigned int nr_folios, struct folio **folio_array);
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
bool find_lock_delalloc_range(struct inode *inode,
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
index 744e8952abb0..81558f90ee80 100644
--- a/fs/btrfs/extent_map.c
+++ b/fs/btrfs/extent_map.c
@@ -33,7 +33,7 @@ void __cold extent_map_exit(void)
*/
void extent_map_tree_init(struct extent_map_tree *tree)
{
- tree->map = RB_ROOT_CACHED;
+ tree->root = RB_ROOT;
INIT_LIST_HEAD(&tree->modified_extents);
rwlock_init(&tree->lock);
}
@@ -85,27 +85,24 @@ static void dec_evictable_extent_maps(struct btrfs_inode *inode)
percpu_counter_dec(&fs_info->evictable_extent_maps);
}
-static int tree_insert(struct rb_root_cached *root, struct extent_map *em)
+static int tree_insert(struct rb_root *root, struct extent_map *em)
{
- struct rb_node **p = &root->rb_root.rb_node;
+ struct rb_node **p = &root->rb_node;
struct rb_node *parent = NULL;
struct extent_map *entry = NULL;
struct rb_node *orig_parent = NULL;
u64 end = range_end(em->start, em->len);
- bool leftmost = true;
while (*p) {
parent = *p;
entry = rb_entry(parent, struct extent_map, rb_node);
- if (em->start < entry->start) {
+ if (em->start < entry->start)
p = &(*p)->rb_left;
- } else if (em->start >= extent_map_end(entry)) {
+ else if (em->start >= extent_map_end(entry))
p = &(*p)->rb_right;
- leftmost = false;
- } else {
+ else
return -EEXIST;
- }
}
orig_parent = parent;
@@ -128,7 +125,7 @@ static int tree_insert(struct rb_root_cached *root, struct extent_map *em)
return -EEXIST;
rb_link_node(&em->rb_node, orig_parent, p);
- rb_insert_color_cached(&em->rb_node, root, leftmost);
+ rb_insert_color(&em->rb_node, root);
return 0;
}
@@ -186,11 +183,19 @@ static struct rb_node *__tree_search(struct rb_root *root, u64 offset,
return NULL;
}
+static inline u64 extent_map_block_len(const struct extent_map *em)
+{
+ if (extent_map_is_compressed(em))
+ return em->disk_num_bytes;
+ return em->len;
+}
+
static inline u64 extent_map_block_end(const struct extent_map *em)
{
- if (em->block_start + em->block_len < em->block_start)
+ if (extent_map_block_start(em) + extent_map_block_len(em) <
+ extent_map_block_start(em))
return (u64)-1;
- return em->block_start + em->block_len;
+ return extent_map_block_start(em) + extent_map_block_len(em);
}
static bool can_merge_extent_map(const struct extent_map *em)
@@ -225,15 +230,106 @@ static bool mergeable_maps(const struct extent_map *prev, const struct extent_ma
if (prev->flags != next->flags)
return false;
- if (next->block_start < EXTENT_MAP_LAST_BYTE - 1)
- return next->block_start == extent_map_block_end(prev);
+ if (next->disk_bytenr < EXTENT_MAP_LAST_BYTE - 1)
+ return extent_map_block_start(next) == extent_map_block_end(prev);
/* HOLES and INLINE extents. */
- return next->block_start == prev->block_start;
+ return next->disk_bytenr == prev->disk_bytenr;
+}
+
+/*
+ * Handle the on-disk data extents merge for @prev and @next.
+ *
+ * Only touches disk_bytenr/disk_num_bytes/offset/ram_bytes.
+ * For now only uncompressed regular extent can be merged.
+ *
+ * @prev and @next will be both updated to point to the new merged range.
+ * Thus one of them should be removed by the caller.
+ */
+static void merge_ondisk_extents(struct extent_map *prev, struct extent_map *next)
+{
+ u64 new_disk_bytenr;
+ u64 new_disk_num_bytes;
+ u64 new_offset;
+
+ /* @prev and @next should not be compressed. */
+ ASSERT(!extent_map_is_compressed(prev));
+ ASSERT(!extent_map_is_compressed(next));
+
+ /*
+ * There are two different cases where @prev and @next can be merged.
+ *
+ * 1) They are referring to the same data extent:
+ *
+ * |<----- data extent A ----->|
+ * |<- prev ->|<- next ->|
+ *
+ * 2) They are referring to different data extents but still adjacent:
+ *
+ * |<-- data extent A -->|<-- data extent B -->|
+ * |<- prev ->|<- next ->|
+ *
+ * The calculation here always merges the data extents first, then updates
+ * @offset using the new data extents.
+ *
+ * For case 1), the merged data extent would be the same.
+ * For case 2), we just merge the two data extents into one.
+ */
+ new_disk_bytenr = min(prev->disk_bytenr, next->disk_bytenr);
+ new_disk_num_bytes = max(prev->disk_bytenr + prev->disk_num_bytes,
+ next->disk_bytenr + next->disk_num_bytes) -
+ new_disk_bytenr;
+ new_offset = prev->disk_bytenr + prev->offset - new_disk_bytenr;
+
+ prev->disk_bytenr = new_disk_bytenr;
+ prev->disk_num_bytes = new_disk_num_bytes;
+ prev->ram_bytes = new_disk_num_bytes;
+ prev->offset = new_offset;
+
+ next->disk_bytenr = new_disk_bytenr;
+ next->disk_num_bytes = new_disk_num_bytes;
+ next->ram_bytes = new_disk_num_bytes;
+ next->offset = new_offset;
+}
+
+static void dump_extent_map(struct btrfs_fs_info *fs_info, const char *prefix,
+ struct extent_map *em)
+{
+ if (!IS_ENABLED(CONFIG_BTRFS_DEBUG))
+ return;
+ btrfs_crit(fs_info,
+"%s, start=%llu len=%llu disk_bytenr=%llu disk_num_bytes=%llu ram_bytes=%llu offset=%llu flags=0x%x",
+ prefix, em->start, em->len, em->disk_bytenr, em->disk_num_bytes,
+ em->ram_bytes, em->offset, em->flags);
+ ASSERT(0);
+}
+
+/* Internal sanity checks for btrfs debug builds. */
+static void validate_extent_map(struct btrfs_fs_info *fs_info, struct extent_map *em)
+{
+ if (!IS_ENABLED(CONFIG_BTRFS_DEBUG))
+ return;
+ if (em->disk_bytenr < EXTENT_MAP_LAST_BYTE) {
+ if (em->disk_num_bytes == 0)
+ dump_extent_map(fs_info, "zero disk_num_bytes", em);
+ if (em->offset + em->len > em->ram_bytes)
+ dump_extent_map(fs_info, "ram_bytes too small", em);
+ if (em->offset + em->len > em->disk_num_bytes &&
+ !extent_map_is_compressed(em))
+ dump_extent_map(fs_info, "disk_num_bytes too small", em);
+ if (!extent_map_is_compressed(em) &&
+ em->ram_bytes != em->disk_num_bytes)
+ dump_extent_map(fs_info,
+ "ram_bytes mismatch with disk_num_bytes for non-compressed em",
+ em);
+ } else if (em->offset) {
+ dump_extent_map(fs_info, "non-zero offset for hole/inline", em);
+ }
}
static void try_merge_map(struct btrfs_inode *inode, struct extent_map *em)
{
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
struct extent_map_tree *tree = &inode->extent_tree;
struct extent_map *merge = NULL;
struct rb_node *rb;
@@ -258,14 +354,15 @@ static void try_merge_map(struct btrfs_inode *inode, struct extent_map *em)
merge = rb_entry(rb, struct extent_map, rb_node);
if (rb && can_merge_extent_map(merge) && mergeable_maps(merge, em)) {
em->start = merge->start;
- em->orig_start = merge->orig_start;
em->len += merge->len;
- em->block_len += merge->block_len;
- em->block_start = merge->block_start;
em->generation = max(em->generation, merge->generation);
+
+ if (em->disk_bytenr < EXTENT_MAP_LAST_BYTE)
+ merge_ondisk_extents(merge, em);
em->flags |= EXTENT_FLAG_MERGED;
- rb_erase_cached(&merge->rb_node, &tree->map);
+ validate_extent_map(fs_info, em);
+ rb_erase(&merge->rb_node, &tree->root);
RB_CLEAR_NODE(&merge->rb_node);
free_extent_map(merge);
dec_evictable_extent_maps(inode);
@@ -277,8 +374,10 @@ static void try_merge_map(struct btrfs_inode *inode, struct extent_map *em)
merge = rb_entry(rb, struct extent_map, rb_node);
if (rb && can_merge_extent_map(merge) && mergeable_maps(em, merge)) {
em->len += merge->len;
- em->block_len += merge->block_len;
- rb_erase_cached(&merge->rb_node, &tree->map);
+ if (em->disk_bytenr < EXTENT_MAP_LAST_BYTE)
+ merge_ondisk_extents(em, merge);
+ validate_extent_map(fs_info, em);
+ rb_erase(&merge->rb_node, &tree->root);
RB_CLEAR_NODE(&merge->rb_node);
em->generation = max(em->generation, merge->generation);
em->flags |= EXTENT_FLAG_MERGED;
@@ -389,7 +488,8 @@ static int add_extent_mapping(struct btrfs_inode *inode,
lockdep_assert_held_write(&tree->lock);
- ret = tree_insert(&tree->map, em);
+ validate_extent_map(fs_info, em);
+ ret = tree_insert(&tree->root, em);
if (ret)
return ret;
@@ -410,7 +510,7 @@ __lookup_extent_mapping(struct extent_map_tree *tree,
struct rb_node *prev_or_next = NULL;
u64 end = range_end(start, len);
- rb_node = __tree_search(&tree->map.rb_root, start, &prev_or_next);
+ rb_node = __tree_search(&tree->root, start, &prev_or_next);
if (!rb_node) {
if (prev_or_next)
rb_node = prev_or_next;
@@ -479,7 +579,7 @@ void remove_extent_mapping(struct btrfs_inode *inode, struct extent_map *em)
lockdep_assert_held_write(&tree->lock);
WARN_ON(em->flags & EXTENT_FLAG_PINNED);
- rb_erase_cached(&em->rb_node, &tree->map);
+ rb_erase(&em->rb_node, &tree->root);
if (!(em->flags & EXTENT_FLAG_LOGGING))
list_del_init(&em->list);
RB_CLEAR_NODE(&em->rb_node);
@@ -492,15 +592,18 @@ static void replace_extent_mapping(struct btrfs_inode *inode,
struct extent_map *new,
int modified)
{
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
struct extent_map_tree *tree = &inode->extent_tree;
lockdep_assert_held_write(&tree->lock);
+ validate_extent_map(fs_info, new);
+
WARN_ON(cur->flags & EXTENT_FLAG_PINNED);
ASSERT(extent_map_in_tree(cur));
if (!(cur->flags & EXTENT_FLAG_LOGGING))
list_del_init(&cur->list);
- rb_replace_node_cached(&cur->rb_node, &new->rb_node, &tree->map);
+ rb_replace_node(&cur->rb_node, &new->rb_node, &tree->root);
RB_CLEAR_NODE(&cur->rb_node);
setup_extent_mapping(inode, new, modified);
@@ -561,11 +664,8 @@ static noinline int merge_extent_mapping(struct btrfs_inode *inode,
start_diff = start - em->start;
em->start = start;
em->len = end - start;
- if (em->block_start < EXTENT_MAP_LAST_BYTE &&
- !extent_map_is_compressed(em)) {
- em->block_start += start_diff;
- em->block_len = em->len;
- }
+ if (em->disk_bytenr < EXTENT_MAP_LAST_BYTE && !extent_map_is_compressed(em))
+ em->offset += start_diff;
return add_extent_mapping(inode, em, 0);
}
@@ -600,7 +700,7 @@ int btrfs_add_extent_mapping(struct btrfs_inode *inode,
* Tree-checker should have rejected any inline extent with non-zero
* file offset. Here just do a sanity check.
*/
- if (em->block_start == EXTENT_MAP_INLINE)
+ if (em->disk_bytenr == EXTENT_MAP_INLINE)
ASSERT(em->start == 0);
ret = add_extent_mapping(inode, em, 0);
@@ -657,18 +757,23 @@ int btrfs_add_extent_mapping(struct btrfs_inode *inode,
static void drop_all_extent_maps_fast(struct btrfs_inode *inode)
{
struct extent_map_tree *tree = &inode->extent_tree;
+ struct rb_node *node;
write_lock(&tree->lock);
- while (!RB_EMPTY_ROOT(&tree->map.rb_root)) {
+ node = rb_first(&tree->root);
+ while (node) {
struct extent_map *em;
- struct rb_node *node;
+ struct rb_node *next = rb_next(node);
- node = rb_first_cached(&tree->map);
em = rb_entry(node, struct extent_map, rb_node);
em->flags &= ~(EXTENT_FLAG_PINNED | EXTENT_FLAG_LOGGING);
remove_extent_mapping(inode, em);
free_extent_map(em);
- cond_resched_rwlock_write(&tree->lock);
+
+ if (cond_resched_rwlock_write(&tree->lock))
+ node = rb_first(&tree->root);
+ else
+ node = next;
}
write_unlock(&tree->lock);
}
@@ -729,7 +834,6 @@ void btrfs_drop_extent_map_range(struct btrfs_inode *inode, u64 start, u64 end,
u64 gen;
unsigned long flags;
bool modified;
- bool compressed;
if (em_end < end) {
next_em = next_extent_map(em);
@@ -763,7 +867,6 @@ void btrfs_drop_extent_map_range(struct btrfs_inode *inode, u64 start, u64 end,
goto remove_em;
gen = em->generation;
- compressed = extent_map_is_compressed(em);
if (em->start < start) {
if (!split) {
@@ -775,22 +878,15 @@ void btrfs_drop_extent_map_range(struct btrfs_inode *inode, u64 start, u64 end,
split->start = em->start;
split->len = start - em->start;
- if (em->block_start < EXTENT_MAP_LAST_BYTE) {
- split->orig_start = em->orig_start;
- split->block_start = em->block_start;
-
- if (compressed)
- split->block_len = em->block_len;
- else
- split->block_len = split->len;
- split->orig_block_len = max(split->block_len,
- em->orig_block_len);
+ if (em->disk_bytenr < EXTENT_MAP_LAST_BYTE) {
+ split->disk_bytenr = em->disk_bytenr;
+ split->disk_num_bytes = em->disk_num_bytes;
+ split->offset = em->offset;
split->ram_bytes = em->ram_bytes;
} else {
- split->orig_start = split->start;
- split->block_len = 0;
- split->block_start = em->block_start;
- split->orig_block_len = 0;
+ split->disk_bytenr = em->disk_bytenr;
+ split->disk_num_bytes = 0;
+ split->offset = 0;
split->ram_bytes = split->len;
}
@@ -810,30 +906,18 @@ void btrfs_drop_extent_map_range(struct btrfs_inode *inode, u64 start, u64 end,
}
split->start = end;
split->len = em_end - end;
- split->block_start = em->block_start;
+ split->disk_bytenr = em->disk_bytenr;
split->flags = flags;
split->generation = gen;
- if (em->block_start < EXTENT_MAP_LAST_BYTE) {
- split->orig_block_len = max(em->block_len,
- em->orig_block_len);
-
+ if (em->disk_bytenr < EXTENT_MAP_LAST_BYTE) {
+ split->disk_num_bytes = em->disk_num_bytes;
+ split->offset = em->offset + end - em->start;
split->ram_bytes = em->ram_bytes;
- if (compressed) {
- split->block_len = em->block_len;
- split->orig_start = em->orig_start;
- } else {
- const u64 diff = end - em->start;
-
- split->block_len = split->len;
- split->block_start += diff;
- split->orig_start = em->orig_start;
- }
} else {
+ split->disk_num_bytes = 0;
+ split->offset = 0;
split->ram_bytes = split->len;
- split->orig_start = split->start;
- split->block_len = 0;
- split->orig_block_len = 0;
}
if (extent_map_in_tree(em)) {
@@ -976,7 +1060,7 @@ int split_extent_map(struct btrfs_inode *inode, u64 start, u64 len, u64 pre,
ASSERT(em->len == len);
ASSERT(!extent_map_is_compressed(em));
- ASSERT(em->block_start < EXTENT_MAP_LAST_BYTE);
+ ASSERT(em->disk_bytenr < EXTENT_MAP_LAST_BYTE);
ASSERT(em->flags & EXTENT_FLAG_PINNED);
ASSERT(!(em->flags & EXTENT_FLAG_LOGGING));
ASSERT(!list_empty(&em->list));
@@ -987,10 +1071,9 @@ int split_extent_map(struct btrfs_inode *inode, u64 start, u64 len, u64 pre,
/* First, replace the em with a new extent_map starting from * em->start */
split_pre->start = em->start;
split_pre->len = pre;
- split_pre->orig_start = split_pre->start;
- split_pre->block_start = new_logical;
- split_pre->block_len = split_pre->len;
- split_pre->orig_block_len = split_pre->block_len;
+ split_pre->disk_bytenr = new_logical;
+ split_pre->disk_num_bytes = split_pre->len;
+ split_pre->offset = 0;
split_pre->ram_bytes = split_pre->len;
split_pre->flags = flags;
split_pre->generation = em->generation;
@@ -1005,10 +1088,9 @@ int split_extent_map(struct btrfs_inode *inode, u64 start, u64 len, u64 pre,
/* Insert the middle extent_map. */
split_mid->start = em->start + pre;
split_mid->len = em->len - pre;
- split_mid->orig_start = split_mid->start;
- split_mid->block_start = em->block_start + pre;
- split_mid->block_len = split_mid->len;
- split_mid->orig_block_len = split_mid->block_len;
+ split_mid->disk_bytenr = extent_map_block_start(em) + pre;
+ split_mid->disk_num_bytes = split_mid->len;
+ split_mid->offset = 0;
split_mid->ram_bytes = split_mid->len;
split_mid->flags = flags;
split_mid->generation = em->generation;
@@ -1028,7 +1110,14 @@ out_free_pre:
return ret;
}
-static long btrfs_scan_inode(struct btrfs_inode *inode, long *scanned, long nr_to_scan)
+struct btrfs_em_shrink_ctx {
+ long nr_to_scan;
+ long scanned;
+ u64 last_ino;
+ u64 last_root;
+};
+
+static long btrfs_scan_inode(struct btrfs_inode *inode, struct btrfs_em_shrink_ctx *ctx)
{
const u64 cur_fs_gen = btrfs_get_fs_generation(inode->root->fs_info);
struct extent_map_tree *tree = &inode->extent_tree;
@@ -1057,14 +1146,25 @@ static long btrfs_scan_inode(struct btrfs_inode *inode, long *scanned, long nr_t
if (!down_read_trylock(&inode->i_mmap_lock))
return 0;
- write_lock(&tree->lock);
- node = rb_first_cached(&tree->map);
+ /*
+ * We want to be fast because we can be called from any path trying to
+ * allocate memory, so if the lock is busy we don't want to spend time
+ * waiting for it - either some task is about to do IO for the inode or
+ * we may have another task shrinking extent maps, here in this code, so
+ * skip this inode.
+ */
+ if (!write_trylock(&tree->lock)) {
+ up_read(&inode->i_mmap_lock);
+ return 0;
+ }
+
+ node = rb_first(&tree->root);
while (node) {
+ struct rb_node *next = rb_next(node);
struct extent_map *em;
em = rb_entry(node, struct extent_map, rb_node);
- node = rb_next(node);
- (*scanned)++;
+ ctx->scanned++;
if (em->flags & EXTENT_FLAG_PINNED)
goto next;
@@ -1085,16 +1185,19 @@ static long btrfs_scan_inode(struct btrfs_inode *inode, long *scanned, long nr_t
free_extent_map(em);
nr_dropped++;
next:
- if (*scanned >= nr_to_scan)
+ if (ctx->scanned >= ctx->nr_to_scan)
break;
/*
- * Restart if we had to reschedule, and any extent maps that were
- * pinned before may have become unpinned after we released the
- * lock and took it again.
+ * Stop if we need to reschedule or there's contention on the
+ * lock. This is to avoid slowing other tasks trying to take the
+ * lock and because the shrinker might be called during a memory
+ * allocation path and we want to avoid taking a very long time
+ * and slowing down all sorts of tasks.
*/
- if (cond_resched_rwlock_write(&tree->lock))
- node = rb_first_cached(&tree->map);
+ if (need_resched() || rwlock_needbreak(&tree->lock))
+ break;
+ node = next;
}
write_unlock(&tree->lock);
up_read(&inode->i_mmap_lock);
@@ -1102,25 +1205,30 @@ next:
return nr_dropped;
}
-static long btrfs_scan_root(struct btrfs_root *root, long *scanned, long nr_to_scan)
+static long btrfs_scan_root(struct btrfs_root *root, struct btrfs_em_shrink_ctx *ctx)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_inode *inode;
long nr_dropped = 0;
- u64 min_ino = fs_info->extent_map_shrinker_last_ino + 1;
+ u64 min_ino = ctx->last_ino + 1;
inode = btrfs_find_first_inode(root, min_ino);
while (inode) {
- nr_dropped += btrfs_scan_inode(inode, scanned, nr_to_scan);
+ nr_dropped += btrfs_scan_inode(inode, ctx);
min_ino = btrfs_ino(inode) + 1;
- fs_info->extent_map_shrinker_last_ino = btrfs_ino(inode);
- iput(&inode->vfs_inode);
+ ctx->last_ino = btrfs_ino(inode);
+ btrfs_add_delayed_iput(inode);
- if (*scanned >= nr_to_scan)
+ if (ctx->scanned >= ctx->nr_to_scan)
+ break;
+
+ /*
+ * We may be called from memory allocation paths, so we don't
+ * want to take too much time and slowdown tasks.
+ */
+ if (need_resched())
break;
- cond_resched();
inode = btrfs_find_first_inode(root, min_ino);
}
@@ -1132,14 +1240,14 @@ static long btrfs_scan_root(struct btrfs_root *root, long *scanned, long nr_to_s
* inode if there is one or we will find out this was the last
* one and move to the next root.
*/
- fs_info->extent_map_shrinker_last_root = btrfs_root_id(root);
+ ctx->last_root = btrfs_root_id(root);
} else {
/*
* No more inodes in this root, set extent_map_shrinker_last_ino to 0 so
* that when processing the next root we start from its first inode.
*/
- fs_info->extent_map_shrinker_last_ino = 0;
- fs_info->extent_map_shrinker_last_root = btrfs_root_id(root) + 1;
+ ctx->last_ino = 0;
+ ctx->last_root = btrfs_root_id(root) + 1;
}
return nr_dropped;
@@ -1147,19 +1255,41 @@ static long btrfs_scan_root(struct btrfs_root *root, long *scanned, long nr_to_s
long btrfs_free_extent_maps(struct btrfs_fs_info *fs_info, long nr_to_scan)
{
- const u64 start_root_id = fs_info->extent_map_shrinker_last_root;
- u64 next_root_id = start_root_id;
+ struct btrfs_em_shrink_ctx ctx;
+ u64 start_root_id;
+ u64 next_root_id;
bool cycled = false;
long nr_dropped = 0;
- long scanned = 0;
+
+ ctx.scanned = 0;
+ ctx.nr_to_scan = nr_to_scan;
+
+ /*
+ * In case we have multiple tasks running this shrinker, make the next
+ * one start from the next inode in case it starts before we finish.
+ */
+ spin_lock(&fs_info->extent_map_shrinker_lock);
+ ctx.last_ino = fs_info->extent_map_shrinker_last_ino;
+ fs_info->extent_map_shrinker_last_ino++;
+ ctx.last_root = fs_info->extent_map_shrinker_last_root;
+ spin_unlock(&fs_info->extent_map_shrinker_lock);
+
+ start_root_id = ctx.last_root;
+ next_root_id = ctx.last_root;
if (trace_btrfs_extent_map_shrinker_scan_enter_enabled()) {
s64 nr = percpu_counter_sum_positive(&fs_info->evictable_extent_maps);
- trace_btrfs_extent_map_shrinker_scan_enter(fs_info, nr_to_scan, nr);
+ trace_btrfs_extent_map_shrinker_scan_enter(fs_info, nr_to_scan,
+ nr, ctx.last_root,
+ ctx.last_ino);
}
- while (scanned < nr_to_scan) {
+ /*
+ * We may be called from memory allocation paths, so we don't want to
+ * take too much time and slowdown tasks, so stop if we need reschedule.
+ */
+ while (ctx.scanned < ctx.nr_to_scan && !need_resched()) {
struct btrfs_root *root;
unsigned long count;
@@ -1171,8 +1301,8 @@ long btrfs_free_extent_maps(struct btrfs_fs_info *fs_info, long nr_to_scan)
spin_unlock(&fs_info->fs_roots_radix_lock);
if (start_root_id > 0 && !cycled) {
next_root_id = 0;
- fs_info->extent_map_shrinker_last_root = 0;
- fs_info->extent_map_shrinker_last_ino = 0;
+ ctx.last_root = 0;
+ ctx.last_ino = 0;
cycled = true;
continue;
}
@@ -1186,15 +1316,33 @@ long btrfs_free_extent_maps(struct btrfs_fs_info *fs_info, long nr_to_scan)
continue;
if (is_fstree(btrfs_root_id(root)))
- nr_dropped += btrfs_scan_root(root, &scanned, nr_to_scan);
+ nr_dropped += btrfs_scan_root(root, &ctx);
btrfs_put_root(root);
}
+ /*
+ * In case of multiple tasks running this extent map shrinking code this
+ * isn't perfect but it's simple and silences things like KCSAN. It's
+ * not possible to know which task made more progress because we can
+ * cycle back to the first root and first inode if it's not the first
+ * time the shrinker ran, see the above logic. Also a task that started
+ * later may finish ealier than another task and made less progress. So
+ * make this simple and update to the progress of the last task that
+ * finished, with the occasional possiblity of having two consecutive
+ * runs of the shrinker process the same inodes.
+ */
+ spin_lock(&fs_info->extent_map_shrinker_lock);
+ fs_info->extent_map_shrinker_last_ino = ctx.last_ino;
+ fs_info->extent_map_shrinker_last_root = ctx.last_root;
+ spin_unlock(&fs_info->extent_map_shrinker_lock);
+
if (trace_btrfs_extent_map_shrinker_scan_exit_enabled()) {
s64 nr = percpu_counter_sum_positive(&fs_info->evictable_extent_maps);
- trace_btrfs_extent_map_shrinker_scan_exit(fs_info, nr_dropped, nr);
+ trace_btrfs_extent_map_shrinker_scan_exit(fs_info, nr_dropped,
+ nr, ctx.last_root,
+ ctx.last_ino);
}
return nr_dropped;
diff --git a/fs/btrfs/extent_map.h b/fs/btrfs/extent_map.h
index 6d587111f73a..5154a8f1d26c 100644
--- a/fs/btrfs/extent_map.h
+++ b/fs/btrfs/extent_map.h
@@ -4,12 +4,11 @@
#define BTRFS_EXTENT_MAP_H
#include <linux/compiler_types.h>
-#include <linux/rwlock_types.h>
+#include <linux/spinlock_types.h>
#include <linux/rbtree.h>
#include <linux/list.h>
#include <linux/refcount.h>
#include "misc.h"
-#include "extent_map.h"
#include "compression.h"
struct btrfs_inode;
@@ -62,46 +61,33 @@ struct extent_map {
u64 len;
/*
- * The file offset of the original file extent before splitting.
+ * The bytenr of the full on-disk extent.
*
- * This is an in-memory only member, matching
- * extent_map::start - btrfs_file_extent_item::offset for
- * regular/preallocated extents. EXTENT_MAP_HOLE otherwise.
+ * For regular extents it's btrfs_file_extent_item::disk_bytenr.
+ * For holes it's EXTENT_MAP_HOLE and for inline extents it's
+ * EXTENT_MAP_INLINE.
*/
- u64 orig_start;
+ u64 disk_bytenr;
/*
* The full on-disk extent length, matching
* btrfs_file_extent_item::disk_num_bytes.
*/
- u64 orig_block_len;
-
- /*
- * The decompressed size of the whole on-disk extent, matching
- * btrfs_file_extent_item::ram_bytes.
- */
- u64 ram_bytes;
+ u64 disk_num_bytes;
/*
- * The on-disk logical bytenr for the file extent.
- *
- * For compressed extents it matches btrfs_file_extent_item::disk_bytenr.
- * For uncompressed extents it matches
- * btrfs_file_extent_item::disk_bytenr + btrfs_file_extent_item::offset
+ * Offset inside the decompressed extent.
*
- * For holes it is EXTENT_MAP_HOLE and for inline extents it is
- * EXTENT_MAP_INLINE.
+ * For regular extents it's btrfs_file_extent_item::offset.
+ * For holes and inline extents it's 0.
*/
- u64 block_start;
+ u64 offset;
/*
- * The on-disk length for the file extent.
- *
- * For compressed extents it matches btrfs_file_extent_item::disk_num_bytes.
- * For uncompressed extents it matches extent_map::len.
- * For holes and inline extents it's -1 and shouldn't be used.
+ * The decompressed size of the whole on-disk extent, matching
+ * btrfs_file_extent_item::ram_bytes.
*/
- u64 block_len;
+ u64 ram_bytes;
/*
* Generation of the extent map, for merged em it's the highest
@@ -115,7 +101,7 @@ struct extent_map {
};
struct extent_map_tree {
- struct rb_root_cached map;
+ struct rb_root root;
struct list_head modified_extents;
rwlock_t lock;
};
@@ -163,6 +149,16 @@ static inline int extent_map_in_tree(const struct extent_map *em)
return !RB_EMPTY_NODE(&em->rb_node);
}
+static inline u64 extent_map_block_start(const struct extent_map *em)
+{
+ if (em->disk_bytenr < EXTENT_MAP_LAST_BYTE) {
+ if (extent_map_is_compressed(em))
+ return em->disk_bytenr;
+ return em->disk_bytenr + em->offset;
+ }
+ return em->disk_bytenr;
+}
+
static inline u64 extent_map_end(const struct extent_map *em)
{
if (em->start + em->len < em->start)
diff --git a/fs/btrfs/fiemap.c b/fs/btrfs/fiemap.c
new file mode 100644
index 000000000000..8f95f3e44e99
--- /dev/null
+++ b/fs/btrfs/fiemap.c
@@ -0,0 +1,930 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "backref.h"
+#include "btrfs_inode.h"
+#include "fiemap.h"
+#include "file.h"
+#include "file-item.h"
+
+struct btrfs_fiemap_entry {
+ u64 offset;
+ u64 phys;
+ u64 len;
+ u32 flags;
+};
+
+/*
+ * Indicate the caller of emit_fiemap_extent() that it needs to unlock the file
+ * range from the inode's io tree, unlock the subvolume tree search path, flush
+ * the fiemap cache and relock the file range and research the subvolume tree.
+ * The value here is something negative that can't be confused with a valid
+ * errno value and different from 1 because that's also a return value from
+ * fiemap_fill_next_extent() and also it's often used to mean some btree search
+ * did not find a key, so make it some distinct negative value.
+ */
+#define BTRFS_FIEMAP_FLUSH_CACHE (-(MAX_ERRNO + 1))
+
+/*
+ * Used to:
+ *
+ * - Cache the next entry to be emitted to the fiemap buffer, so that we can
+ * merge extents that are contiguous and can be grouped as a single one;
+ *
+ * - Store extents ready to be written to the fiemap buffer in an intermediary
+ * buffer. This intermediary buffer is to ensure that in case the fiemap
+ * buffer is memory mapped to the fiemap target file, we don't deadlock
+ * during btrfs_page_mkwrite(). This is because during fiemap we are locking
+ * an extent range in order to prevent races with delalloc flushing and
+ * ordered extent completion, which is needed in order to reliably detect
+ * delalloc in holes and prealloc extents. And this can lead to a deadlock
+ * if the fiemap buffer is memory mapped to the file we are running fiemap
+ * against (a silly, useless in practice scenario, but possible) because
+ * btrfs_page_mkwrite() will try to lock the same extent range.
+ */
+struct fiemap_cache {
+ /* An array of ready fiemap entries. */
+ struct btrfs_fiemap_entry *entries;
+ /* Number of entries in the entries array. */
+ int entries_size;
+ /* Index of the next entry in the entries array to write to. */
+ int entries_pos;
+ /*
+ * Once the entries array is full, this indicates what's the offset for
+ * the next file extent item we must search for in the inode's subvolume
+ * tree after unlocking the extent range in the inode's io tree and
+ * releasing the search path.
+ */
+ u64 next_search_offset;
+ /*
+ * This matches struct fiemap_extent_info::fi_mapped_extents, we use it
+ * to count ourselves emitted extents and stop instead of relying on
+ * fiemap_fill_next_extent() because we buffer ready fiemap entries at
+ * the @entries array, and we want to stop as soon as we hit the max
+ * amount of extents to map, not just to save time but also to make the
+ * logic at extent_fiemap() simpler.
+ */
+ unsigned int extents_mapped;
+ /* Fields for the cached extent (unsubmitted, not ready, extent). */
+ u64 offset;
+ u64 phys;
+ u64 len;
+ u32 flags;
+ bool cached;
+};
+
+static int flush_fiemap_cache(struct fiemap_extent_info *fieinfo,
+ struct fiemap_cache *cache)
+{
+ for (int i = 0; i < cache->entries_pos; i++) {
+ struct btrfs_fiemap_entry *entry = &cache->entries[i];
+ int ret;
+
+ ret = fiemap_fill_next_extent(fieinfo, entry->offset,
+ entry->phys, entry->len,
+ entry->flags);
+ /*
+ * Ignore 1 (reached max entries) because we keep track of that
+ * ourselves in emit_fiemap_extent().
+ */
+ if (ret < 0)
+ return ret;
+ }
+ cache->entries_pos = 0;
+
+ return 0;
+}
+
+/*
+ * Helper to submit fiemap extent.
+ *
+ * Will try to merge current fiemap extent specified by @offset, @phys,
+ * @len and @flags with cached one.
+ * And only when we fails to merge, cached one will be submitted as
+ * fiemap extent.
+ *
+ * Return value is the same as fiemap_fill_next_extent().
+ */
+static int emit_fiemap_extent(struct fiemap_extent_info *fieinfo,
+ struct fiemap_cache *cache,
+ u64 offset, u64 phys, u64 len, u32 flags)
+{
+ struct btrfs_fiemap_entry *entry;
+ u64 cache_end;
+
+ /* Set at the end of extent_fiemap(). */
+ ASSERT((flags & FIEMAP_EXTENT_LAST) == 0);
+
+ if (!cache->cached)
+ goto assign;
+
+ /*
+ * When iterating the extents of the inode, at extent_fiemap(), we may
+ * find an extent that starts at an offset behind the end offset of the
+ * previous extent we processed. This happens if fiemap is called
+ * without FIEMAP_FLAG_SYNC and there are ordered extents completing
+ * after we had to unlock the file range, release the search path, emit
+ * the fiemap extents stored in the buffer (cache->entries array) and
+ * the lock the remainder of the range and re-search the btree.
+ *
+ * For example we are in leaf X processing its last item, which is the
+ * file extent item for file range [512K, 1M[, and after
+ * btrfs_next_leaf() releases the path, there's an ordered extent that
+ * completes for the file range [768K, 2M[, and that results in trimming
+ * the file extent item so that it now corresponds to the file range
+ * [512K, 768K[ and a new file extent item is inserted for the file
+ * range [768K, 2M[, which may end up as the last item of leaf X or as
+ * the first item of the next leaf - in either case btrfs_next_leaf()
+ * will leave us with a path pointing to the new extent item, for the
+ * file range [768K, 2M[, since that's the first key that follows the
+ * last one we processed. So in order not to report overlapping extents
+ * to user space, we trim the length of the previously cached extent and
+ * emit it.
+ *
+ * Upon calling btrfs_next_leaf() we may also find an extent with an
+ * offset smaller than or equals to cache->offset, and this happens
+ * when we had a hole or prealloc extent with several delalloc ranges in
+ * it, but after btrfs_next_leaf() released the path, delalloc was
+ * flushed and the resulting ordered extents were completed, so we can
+ * now have found a file extent item for an offset that is smaller than
+ * or equals to what we have in cache->offset. We deal with this as
+ * described below.
+ */
+ cache_end = cache->offset + cache->len;
+ if (cache_end > offset) {
+ if (offset == cache->offset) {
+ /*
+ * We cached a dealloc range (found in the io tree) for
+ * a hole or prealloc extent and we have now found a
+ * file extent item for the same offset. What we have
+ * now is more recent and up to date, so discard what
+ * we had in the cache and use what we have just found.
+ */
+ goto assign;
+ } else if (offset > cache->offset) {
+ /*
+ * The extent range we previously found ends after the
+ * offset of the file extent item we found and that
+ * offset falls somewhere in the middle of that previous
+ * extent range. So adjust the range we previously found
+ * to end at the offset of the file extent item we have
+ * just found, since this extent is more up to date.
+ * Emit that adjusted range and cache the file extent
+ * item we have just found. This corresponds to the case
+ * where a previously found file extent item was split
+ * due to an ordered extent completing.
+ */
+ cache->len = offset - cache->offset;
+ goto emit;
+ } else {
+ const u64 range_end = offset + len;
+
+ /*
+ * The offset of the file extent item we have just found
+ * is behind the cached offset. This means we were
+ * processing a hole or prealloc extent for which we
+ * have found delalloc ranges (in the io tree), so what
+ * we have in the cache is the last delalloc range we
+ * found while the file extent item we found can be
+ * either for a whole delalloc range we previously
+ * emmitted or only a part of that range.
+ *
+ * We have two cases here:
+ *
+ * 1) The file extent item's range ends at or behind the
+ * cached extent's end. In this case just ignore the
+ * current file extent item because we don't want to
+ * overlap with previous ranges that may have been
+ * emmitted already;
+ *
+ * 2) The file extent item starts behind the currently
+ * cached extent but its end offset goes beyond the
+ * end offset of the cached extent. We don't want to
+ * overlap with a previous range that may have been
+ * emmitted already, so we emit the currently cached
+ * extent and then partially store the current file
+ * extent item's range in the cache, for the subrange
+ * going the cached extent's end to the end of the
+ * file extent item.
+ */
+ if (range_end <= cache_end)
+ return 0;
+
+ if (!(flags & (FIEMAP_EXTENT_ENCODED | FIEMAP_EXTENT_DELALLOC)))
+ phys += cache_end - offset;
+
+ offset = cache_end;
+ len = range_end - cache_end;
+ goto emit;
+ }
+ }
+
+ /*
+ * Only merges fiemap extents if
+ * 1) Their logical addresses are continuous
+ *
+ * 2) Their physical addresses are continuous
+ * So truly compressed (physical size smaller than logical size)
+ * extents won't get merged with each other
+ *
+ * 3) Share same flags
+ */
+ if (cache->offset + cache->len == offset &&
+ cache->phys + cache->len == phys &&
+ cache->flags == flags) {
+ cache->len += len;
+ return 0;
+ }
+
+emit:
+ /* Not mergeable, need to submit cached one */
+
+ if (cache->entries_pos == cache->entries_size) {
+ /*
+ * We will need to research for the end offset of the last
+ * stored extent and not from the current offset, because after
+ * unlocking the range and releasing the path, if there's a hole
+ * between that end offset and this current offset, a new extent
+ * may have been inserted due to a new write, so we don't want
+ * to miss it.
+ */
+ entry = &cache->entries[cache->entries_size - 1];
+ cache->next_search_offset = entry->offset + entry->len;
+ cache->cached = false;
+
+ return BTRFS_FIEMAP_FLUSH_CACHE;
+ }
+
+ entry = &cache->entries[cache->entries_pos];
+ entry->offset = cache->offset;
+ entry->phys = cache->phys;
+ entry->len = cache->len;
+ entry->flags = cache->flags;
+ cache->entries_pos++;
+ cache->extents_mapped++;
+
+ if (cache->extents_mapped == fieinfo->fi_extents_max) {
+ cache->cached = false;
+ return 1;
+ }
+assign:
+ cache->cached = true;
+ cache->offset = offset;
+ cache->phys = phys;
+ cache->len = len;
+ cache->flags = flags;
+
+ return 0;
+}
+
+/*
+ * Emit last fiemap cache
+ *
+ * The last fiemap cache may still be cached in the following case:
+ * 0 4k 8k
+ * |<- Fiemap range ->|
+ * |<------------ First extent ----------->|
+ *
+ * In this case, the first extent range will be cached but not emitted.
+ * So we must emit it before ending extent_fiemap().
+ */
+static int emit_last_fiemap_cache(struct fiemap_extent_info *fieinfo,
+ struct fiemap_cache *cache)
+{
+ int ret;
+
+ if (!cache->cached)
+ return 0;
+
+ ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys,
+ cache->len, cache->flags);
+ cache->cached = false;
+ if (ret > 0)
+ ret = 0;
+ return ret;
+}
+
+static int fiemap_next_leaf_item(struct btrfs_inode *inode, struct btrfs_path *path)
+{
+ struct extent_buffer *clone = path->nodes[0];
+ struct btrfs_key key;
+ int slot;
+ int ret;
+
+ path->slots[0]++;
+ if (path->slots[0] < btrfs_header_nritems(path->nodes[0]))
+ return 0;
+
+ /*
+ * Add a temporary extra ref to an already cloned extent buffer to
+ * prevent btrfs_next_leaf() freeing it, we want to reuse it to avoid
+ * the cost of allocating a new one.
+ */
+ ASSERT(test_bit(EXTENT_BUFFER_UNMAPPED, &clone->bflags));
+ atomic_inc(&clone->refs);
+
+ ret = btrfs_next_leaf(inode->root, path);
+ if (ret != 0)
+ goto out;
+
+ /*
+ * Don't bother with cloning if there are no more file extent items for
+ * our inode.
+ */
+ btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
+ if (key.objectid != btrfs_ino(inode) || key.type != BTRFS_EXTENT_DATA_KEY) {
+ ret = 1;
+ goto out;
+ }
+
+ /*
+ * Important to preserve the start field, for the optimizations when
+ * checking if extents are shared (see extent_fiemap()).
+ *
+ * We must set ->start before calling copy_extent_buffer_full(). If we
+ * are on sub-pagesize blocksize, we use ->start to determine the offset
+ * into the folio where our eb exists, and if we update ->start after
+ * the fact then any subsequent reads of the eb may read from a
+ * different offset in the folio than where we originally copied into.
+ */
+ clone->start = path->nodes[0]->start;
+ /* See the comment at fiemap_search_slot() about why we clone. */
+ copy_extent_buffer_full(clone, path->nodes[0]);
+
+ slot = path->slots[0];
+ btrfs_release_path(path);
+ path->nodes[0] = clone;
+ path->slots[0] = slot;
+out:
+ if (ret)
+ free_extent_buffer(clone);
+
+ return ret;
+}
+
+/*
+ * Search for the first file extent item that starts at a given file offset or
+ * the one that starts immediately before that offset.
+ * Returns: 0 on success, < 0 on error, 1 if not found.
+ */
+static int fiemap_search_slot(struct btrfs_inode *inode, struct btrfs_path *path,
+ u64 file_offset)
+{
+ const u64 ino = btrfs_ino(inode);
+ struct btrfs_root *root = inode->root;
+ struct extent_buffer *clone;
+ struct btrfs_key key;
+ int slot;
+ int ret;
+
+ key.objectid = ino;
+ key.type = BTRFS_EXTENT_DATA_KEY;
+ key.offset = file_offset;
+
+ ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+ if (ret < 0)
+ return ret;
+
+ if (ret > 0 && path->slots[0] > 0) {
+ btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0] - 1);
+ if (key.objectid == ino && key.type == BTRFS_EXTENT_DATA_KEY)
+ path->slots[0]--;
+ }
+
+ if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
+ ret = btrfs_next_leaf(root, path);
+ if (ret != 0)
+ return ret;
+
+ btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
+ if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY)
+ return 1;
+ }
+
+ /*
+ * We clone the leaf and use it during fiemap. This is because while
+ * using the leaf we do expensive things like checking if an extent is
+ * shared, which can take a long time. In order to prevent blocking
+ * other tasks for too long, we use a clone of the leaf. We have locked
+ * the file range in the inode's io tree, so we know none of our file
+ * extent items can change. This way we avoid blocking other tasks that
+ * want to insert items for other inodes in the same leaf or b+tree
+ * rebalance operations (triggered for example when someone is trying
+ * to push items into this leaf when trying to insert an item in a
+ * neighbour leaf).
+ * We also need the private clone because holding a read lock on an
+ * extent buffer of the subvolume's b+tree will make lockdep unhappy
+ * when we check if extents are shared, as backref walking may need to
+ * lock the same leaf we are processing.
+ */
+ clone = btrfs_clone_extent_buffer(path->nodes[0]);
+ if (!clone)
+ return -ENOMEM;
+
+ slot = path->slots[0];
+ btrfs_release_path(path);
+ path->nodes[0] = clone;
+ path->slots[0] = slot;
+
+ return 0;
+}
+
+/*
+ * Process a range which is a hole or a prealloc extent in the inode's subvolume
+ * btree. If @disk_bytenr is 0, we are dealing with a hole, otherwise a prealloc
+ * extent. The end offset (@end) is inclusive.
+ */
+static int fiemap_process_hole(struct btrfs_inode *inode,
+ struct fiemap_extent_info *fieinfo,
+ struct fiemap_cache *cache,
+ struct extent_state **delalloc_cached_state,
+ struct btrfs_backref_share_check_ctx *backref_ctx,
+ u64 disk_bytenr, u64 extent_offset,
+ u64 extent_gen,
+ u64 start, u64 end)
+{
+ const u64 i_size = i_size_read(&inode->vfs_inode);
+ u64 cur_offset = start;
+ u64 last_delalloc_end = 0;
+ u32 prealloc_flags = FIEMAP_EXTENT_UNWRITTEN;
+ bool checked_extent_shared = false;
+ int ret;
+
+ /*
+ * There can be no delalloc past i_size, so don't waste time looking for
+ * it beyond i_size.
+ */
+ while (cur_offset < end && cur_offset < i_size) {
+ u64 delalloc_start;
+ u64 delalloc_end;
+ u64 prealloc_start;
+ u64 prealloc_len = 0;
+ bool delalloc;
+
+ delalloc = btrfs_find_delalloc_in_range(inode, cur_offset, end,
+ delalloc_cached_state,
+ &delalloc_start,
+ &delalloc_end);
+ if (!delalloc)
+ break;
+
+ /*
+ * If this is a prealloc extent we have to report every section
+ * of it that has no delalloc.
+ */
+ if (disk_bytenr != 0) {
+ if (last_delalloc_end == 0) {
+ prealloc_start = start;
+ prealloc_len = delalloc_start - start;
+ } else {
+ prealloc_start = last_delalloc_end + 1;
+ prealloc_len = delalloc_start - prealloc_start;
+ }
+ }
+
+ if (prealloc_len > 0) {
+ if (!checked_extent_shared && fieinfo->fi_extents_max) {
+ ret = btrfs_is_data_extent_shared(inode,
+ disk_bytenr,
+ extent_gen,
+ backref_ctx);
+ if (ret < 0)
+ return ret;
+ else if (ret > 0)
+ prealloc_flags |= FIEMAP_EXTENT_SHARED;
+
+ checked_extent_shared = true;
+ }
+ ret = emit_fiemap_extent(fieinfo, cache, prealloc_start,
+ disk_bytenr + extent_offset,
+ prealloc_len, prealloc_flags);
+ if (ret)
+ return ret;
+ extent_offset += prealloc_len;
+ }
+
+ ret = emit_fiemap_extent(fieinfo, cache, delalloc_start, 0,
+ delalloc_end + 1 - delalloc_start,
+ FIEMAP_EXTENT_DELALLOC |
+ FIEMAP_EXTENT_UNKNOWN);
+ if (ret)
+ return ret;
+
+ last_delalloc_end = delalloc_end;
+ cur_offset = delalloc_end + 1;
+ extent_offset += cur_offset - delalloc_start;
+ cond_resched();
+ }
+
+ /*
+ * Either we found no delalloc for the whole prealloc extent or we have
+ * a prealloc extent that spans i_size or starts at or after i_size.
+ */
+ if (disk_bytenr != 0 && last_delalloc_end < end) {
+ u64 prealloc_start;
+ u64 prealloc_len;
+
+ if (last_delalloc_end == 0) {
+ prealloc_start = start;
+ prealloc_len = end + 1 - start;
+ } else {
+ prealloc_start = last_delalloc_end + 1;
+ prealloc_len = end + 1 - prealloc_start;
+ }
+
+ if (!checked_extent_shared && fieinfo->fi_extents_max) {
+ ret = btrfs_is_data_extent_shared(inode,
+ disk_bytenr,
+ extent_gen,
+ backref_ctx);
+ if (ret < 0)
+ return ret;
+ else if (ret > 0)
+ prealloc_flags |= FIEMAP_EXTENT_SHARED;
+ }
+ ret = emit_fiemap_extent(fieinfo, cache, prealloc_start,
+ disk_bytenr + extent_offset,
+ prealloc_len, prealloc_flags);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int fiemap_find_last_extent_offset(struct btrfs_inode *inode,
+ struct btrfs_path *path,
+ u64 *last_extent_end_ret)
+{
+ const u64 ino = btrfs_ino(inode);
+ struct btrfs_root *root = inode->root;
+ struct extent_buffer *leaf;
+ struct btrfs_file_extent_item *ei;
+ struct btrfs_key key;
+ u64 disk_bytenr;
+ int ret;
+
+ /*
+ * Lookup the last file extent. We're not using i_size here because
+ * there might be preallocation past i_size.
+ */
+ ret = btrfs_lookup_file_extent(NULL, root, path, ino, (u64)-1, 0);
+ /* There can't be a file extent item at offset (u64)-1 */
+ ASSERT(ret != 0);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * For a non-existing key, btrfs_search_slot() always leaves us at a
+ * slot > 0, except if the btree is empty, which is impossible because
+ * at least it has the inode item for this inode and all the items for
+ * the root inode 256.
+ */
+ ASSERT(path->slots[0] > 0);
+ path->slots[0]--;
+ leaf = path->nodes[0];
+ btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
+ if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY) {
+ /* No file extent items in the subvolume tree. */
+ *last_extent_end_ret = 0;
+ return 0;
+ }
+
+ /*
+ * For an inline extent, the disk_bytenr is where inline data starts at,
+ * so first check if we have an inline extent item before checking if we
+ * have an implicit hole (disk_bytenr == 0).
+ */
+ ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item);
+ if (btrfs_file_extent_type(leaf, ei) == BTRFS_FILE_EXTENT_INLINE) {
+ *last_extent_end_ret = btrfs_file_extent_end(path);
+ return 0;
+ }
+
+ /*
+ * Find the last file extent item that is not a hole (when NO_HOLES is
+ * not enabled). This should take at most 2 iterations in the worst
+ * case: we have one hole file extent item at slot 0 of a leaf and
+ * another hole file extent item as the last item in the previous leaf.
+ * This is because we merge file extent items that represent holes.
+ */
+ disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, ei);
+ while (disk_bytenr == 0) {
+ ret = btrfs_previous_item(root, path, ino, BTRFS_EXTENT_DATA_KEY);
+ if (ret < 0) {
+ return ret;
+ } else if (ret > 0) {
+ /* No file extent items that are not holes. */
+ *last_extent_end_ret = 0;
+ return 0;
+ }
+ leaf = path->nodes[0];
+ ei = btrfs_item_ptr(leaf, path->slots[0],
+ struct btrfs_file_extent_item);
+ disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, ei);
+ }
+
+ *last_extent_end_ret = btrfs_file_extent_end(path);
+ return 0;
+}
+
+static int extent_fiemap(struct btrfs_inode *inode,
+ struct fiemap_extent_info *fieinfo,
+ u64 start, u64 len)
+{
+ const u64 ino = btrfs_ino(inode);
+ struct extent_state *cached_state = NULL;
+ struct extent_state *delalloc_cached_state = NULL;
+ struct btrfs_path *path;
+ struct fiemap_cache cache = { 0 };
+ struct btrfs_backref_share_check_ctx *backref_ctx;
+ u64 last_extent_end;
+ u64 prev_extent_end;
+ u64 range_start;
+ u64 range_end;
+ const u64 sectorsize = inode->root->fs_info->sectorsize;
+ bool stopped = false;
+ int ret;
+
+ cache.entries_size = PAGE_SIZE / sizeof(struct btrfs_fiemap_entry);
+ cache.entries = kmalloc_array(cache.entries_size,
+ sizeof(struct btrfs_fiemap_entry),
+ GFP_KERNEL);
+ backref_ctx = btrfs_alloc_backref_share_check_ctx();
+ path = btrfs_alloc_path();
+ if (!cache.entries || !backref_ctx || !path) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+restart:
+ range_start = round_down(start, sectorsize);
+ range_end = round_up(start + len, sectorsize);
+ prev_extent_end = range_start;
+
+ lock_extent(&inode->io_tree, range_start, range_end, &cached_state);
+
+ ret = fiemap_find_last_extent_offset(inode, path, &last_extent_end);
+ if (ret < 0)
+ goto out_unlock;
+ btrfs_release_path(path);
+
+ path->reada = READA_FORWARD;
+ ret = fiemap_search_slot(inode, path, range_start);
+ if (ret < 0) {
+ goto out_unlock;
+ } else if (ret > 0) {
+ /*
+ * No file extent item found, but we may have delalloc between
+ * the current offset and i_size. So check for that.
+ */
+ ret = 0;
+ goto check_eof_delalloc;
+ }
+
+ while (prev_extent_end < range_end) {
+ struct extent_buffer *leaf = path->nodes[0];
+ struct btrfs_file_extent_item *ei;
+ struct btrfs_key key;
+ u64 extent_end;
+ u64 extent_len;
+ u64 extent_offset = 0;
+ u64 extent_gen;
+ u64 disk_bytenr = 0;
+ u64 flags = 0;
+ int extent_type;
+ u8 compression;
+
+ btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
+ if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY)
+ break;
+
+ extent_end = btrfs_file_extent_end(path);
+
+ /*
+ * The first iteration can leave us at an extent item that ends
+ * before our range's start. Move to the next item.
+ */
+ if (extent_end <= range_start)
+ goto next_item;
+
+ backref_ctx->curr_leaf_bytenr = leaf->start;
+
+ /* We have in implicit hole (NO_HOLES feature enabled). */
+ if (prev_extent_end < key.offset) {
+ const u64 hole_end = min(key.offset, range_end) - 1;
+
+ ret = fiemap_process_hole(inode, fieinfo, &cache,
+ &delalloc_cached_state,
+ backref_ctx, 0, 0, 0,
+ prev_extent_end, hole_end);
+ if (ret < 0) {
+ goto out_unlock;
+ } else if (ret > 0) {
+ /* fiemap_fill_next_extent() told us to stop. */
+ stopped = true;
+ break;
+ }
+
+ /* We've reached the end of the fiemap range, stop. */
+ if (key.offset >= range_end) {
+ stopped = true;
+ break;
+ }
+ }
+
+ extent_len = extent_end - key.offset;
+ ei = btrfs_item_ptr(leaf, path->slots[0],
+ struct btrfs_file_extent_item);
+ compression = btrfs_file_extent_compression(leaf, ei);
+ extent_type = btrfs_file_extent_type(leaf, ei);
+ extent_gen = btrfs_file_extent_generation(leaf, ei);
+
+ if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
+ disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, ei);
+ if (compression == BTRFS_COMPRESS_NONE)
+ extent_offset = btrfs_file_extent_offset(leaf, ei);
+ }
+
+ if (compression != BTRFS_COMPRESS_NONE)
+ flags |= FIEMAP_EXTENT_ENCODED;
+
+ if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
+ flags |= FIEMAP_EXTENT_DATA_INLINE;
+ flags |= FIEMAP_EXTENT_NOT_ALIGNED;
+ ret = emit_fiemap_extent(fieinfo, &cache, key.offset, 0,
+ extent_len, flags);
+ } else if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
+ ret = fiemap_process_hole(inode, fieinfo, &cache,
+ &delalloc_cached_state,
+ backref_ctx,
+ disk_bytenr, extent_offset,
+ extent_gen, key.offset,
+ extent_end - 1);
+ } else if (disk_bytenr == 0) {
+ /* We have an explicit hole. */
+ ret = fiemap_process_hole(inode, fieinfo, &cache,
+ &delalloc_cached_state,
+ backref_ctx, 0, 0, 0,
+ key.offset, extent_end - 1);
+ } else {
+ /* We have a regular extent. */
+ if (fieinfo->fi_extents_max) {
+ ret = btrfs_is_data_extent_shared(inode,
+ disk_bytenr,
+ extent_gen,
+ backref_ctx);
+ if (ret < 0)
+ goto out_unlock;
+ else if (ret > 0)
+ flags |= FIEMAP_EXTENT_SHARED;
+ }
+
+ ret = emit_fiemap_extent(fieinfo, &cache, key.offset,
+ disk_bytenr + extent_offset,
+ extent_len, flags);
+ }
+
+ if (ret < 0) {
+ goto out_unlock;
+ } else if (ret > 0) {
+ /* emit_fiemap_extent() told us to stop. */
+ stopped = true;
+ break;
+ }
+
+ prev_extent_end = extent_end;
+next_item:
+ if (fatal_signal_pending(current)) {
+ ret = -EINTR;
+ goto out_unlock;
+ }
+
+ ret = fiemap_next_leaf_item(inode, path);
+ if (ret < 0) {
+ goto out_unlock;
+ } else if (ret > 0) {
+ /* No more file extent items for this inode. */
+ break;
+ }
+ cond_resched();
+ }
+
+check_eof_delalloc:
+ if (!stopped && prev_extent_end < range_end) {
+ ret = fiemap_process_hole(inode, fieinfo, &cache,
+ &delalloc_cached_state, backref_ctx,
+ 0, 0, 0, prev_extent_end, range_end - 1);
+ if (ret < 0)
+ goto out_unlock;
+ prev_extent_end = range_end;
+ }
+
+ if (cache.cached && cache.offset + cache.len >= last_extent_end) {
+ const u64 i_size = i_size_read(&inode->vfs_inode);
+
+ if (prev_extent_end < i_size) {
+ u64 delalloc_start;
+ u64 delalloc_end;
+ bool delalloc;
+
+ delalloc = btrfs_find_delalloc_in_range(inode,
+ prev_extent_end,
+ i_size - 1,
+ &delalloc_cached_state,
+ &delalloc_start,
+ &delalloc_end);
+ if (!delalloc)
+ cache.flags |= FIEMAP_EXTENT_LAST;
+ } else {
+ cache.flags |= FIEMAP_EXTENT_LAST;
+ }
+ }
+
+out_unlock:
+ unlock_extent(&inode->io_tree, range_start, range_end, &cached_state);
+
+ if (ret == BTRFS_FIEMAP_FLUSH_CACHE) {
+ btrfs_release_path(path);
+ ret = flush_fiemap_cache(fieinfo, &cache);
+ if (ret)
+ goto out;
+ len -= cache.next_search_offset - start;
+ start = cache.next_search_offset;
+ goto restart;
+ } else if (ret < 0) {
+ goto out;
+ }
+
+ /*
+ * Must free the path before emitting to the fiemap buffer because we
+ * may have a non-cloned leaf and if the fiemap buffer is memory mapped
+ * to a file, a write into it (through btrfs_page_mkwrite()) may trigger
+ * waiting for an ordered extent that in order to complete needs to
+ * modify that leaf, therefore leading to a deadlock.
+ */
+ btrfs_free_path(path);
+ path = NULL;
+
+ ret = flush_fiemap_cache(fieinfo, &cache);
+ if (ret)
+ goto out;
+
+ ret = emit_last_fiemap_cache(fieinfo, &cache);
+out:
+ free_extent_state(delalloc_cached_state);
+ kfree(cache.entries);
+ btrfs_free_backref_share_ctx(backref_ctx);
+ btrfs_free_path(path);
+ return ret;
+}
+
+int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
+ u64 start, u64 len)
+{
+ struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
+ int ret;
+
+ ret = fiemap_prep(inode, fieinfo, start, &len, 0);
+ if (ret)
+ return ret;
+
+ /*
+ * fiemap_prep() called filemap_write_and_wait() for the whole possible
+ * file range (0 to LLONG_MAX), but that is not enough if we have
+ * compression enabled. The first filemap_fdatawrite_range() only kicks
+ * in the compression of data (in an async thread) and will return
+ * before the compression is done and writeback is started. A second
+ * filemap_fdatawrite_range() is needed to wait for the compression to
+ * complete and writeback to start. We also need to wait for ordered
+ * extents to complete, because our fiemap implementation uses mainly
+ * file extent items to list the extents, searching for extent maps
+ * only for file ranges with holes or prealloc extents to figure out
+ * if we have delalloc in those ranges.
+ */
+ if (fieinfo->fi_flags & FIEMAP_FLAG_SYNC) {
+ ret = btrfs_wait_ordered_range(btrfs_inode, 0, LLONG_MAX);
+ if (ret)
+ return ret;
+ }
+
+ btrfs_inode_lock(btrfs_inode, BTRFS_ILOCK_SHARED);
+
+ /*
+ * We did an initial flush to avoid holding the inode's lock while
+ * triggering writeback and waiting for the completion of IO and ordered
+ * extents. Now after we locked the inode we do it again, because it's
+ * possible a new write may have happened in between those two steps.
+ */
+ if (fieinfo->fi_flags & FIEMAP_FLAG_SYNC) {
+ ret = btrfs_wait_ordered_range(btrfs_inode, 0, LLONG_MAX);
+ if (ret) {
+ btrfs_inode_unlock(btrfs_inode, BTRFS_ILOCK_SHARED);
+ return ret;
+ }
+ }
+
+ ret = extent_fiemap(btrfs_inode, fieinfo, start, len);
+ btrfs_inode_unlock(btrfs_inode, BTRFS_ILOCK_SHARED);
+
+ return ret;
+}
diff --git a/fs/btrfs/fiemap.h b/fs/btrfs/fiemap.h
new file mode 100644
index 000000000000..cfd74b35988f
--- /dev/null
+++ b/fs/btrfs/fiemap.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef BTRFS_FIEMAP_H
+#define BTRFS_FIEMAP_H
+
+#include <linux/fiemap.h>
+
+int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
+ u64 start, u64 len);
+
+#endif /* BTRFS_FIEMAP_H */
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
index bce95f871750..5c342fe1af61 100644
--- a/fs/btrfs/file-item.c
+++ b/fs/btrfs/file-item.c
@@ -45,13 +45,12 @@
*/
void btrfs_inode_safe_disk_i_size_write(struct btrfs_inode *inode, u64 new_i_size)
{
- struct btrfs_fs_info *fs_info = inode->root->fs_info;
u64 start, end, i_size;
int ret;
spin_lock(&inode->lock);
i_size = new_i_size ?: i_size_read(&inode->vfs_inode);
- if (btrfs_fs_incompat(fs_info, NO_HOLES)) {
+ if (!inode->file_extent_tree) {
inode->disk_i_size = i_size;
goto out_unlock;
}
@@ -84,13 +83,14 @@ out_unlock:
int btrfs_inode_set_file_extent_range(struct btrfs_inode *inode, u64 start,
u64 len)
{
+ if (!inode->file_extent_tree)
+ return 0;
+
if (len == 0)
return 0;
ASSERT(IS_ALIGNED(start + len, inode->root->fs_info->sectorsize));
- if (btrfs_fs_incompat(inode->root->fs_info, NO_HOLES))
- return 0;
return set_extent_bit(inode->file_extent_tree, start, start + len - 1,
EXTENT_DIRTY, NULL);
}
@@ -112,14 +112,15 @@ int btrfs_inode_set_file_extent_range(struct btrfs_inode *inode, u64 start,
int btrfs_inode_clear_file_extent_range(struct btrfs_inode *inode, u64 start,
u64 len)
{
+ if (!inode->file_extent_tree)
+ return 0;
+
if (len == 0)
return 0;
ASSERT(IS_ALIGNED(start + len, inode->root->fs_info->sectorsize) ||
len == (u64)-1);
- if (btrfs_fs_incompat(inode->root->fs_info, NO_HOLES))
- return 0;
return clear_extent_bit(inode->file_extent_tree, start,
start + len - 1, EXTENT_DIRTY, NULL);
}
@@ -352,7 +353,7 @@ blk_status_t btrfs_lookup_bio_sums(struct btrfs_bio *bbio)
u32 bio_offset = 0;
if ((inode->flags & BTRFS_INODE_NODATASUM) ||
- test_bit(BTRFS_FS_STATE_NO_CSUMS, &fs_info->fs_state))
+ test_bit(BTRFS_FS_STATE_NO_DATA_CSUMS, &fs_info->fs_state))
return BLK_STS_OK;
/*
@@ -1280,7 +1281,6 @@ void btrfs_extent_item_to_extent_map(struct btrfs_inode *inode,
const int slot = path->slots[0];
struct btrfs_key key;
u64 extent_start;
- u64 bytenr;
u8 type = btrfs_file_extent_type(leaf, fi);
int compress_type = btrfs_file_extent_compression(leaf, fi);
@@ -1290,24 +1290,29 @@ void btrfs_extent_item_to_extent_map(struct btrfs_inode *inode,
em->generation = btrfs_file_extent_generation(leaf, fi);
if (type == BTRFS_FILE_EXTENT_REG ||
type == BTRFS_FILE_EXTENT_PREALLOC) {
+ const u64 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
+
em->start = extent_start;
em->len = btrfs_file_extent_end(path) - extent_start;
- em->orig_start = extent_start -
- btrfs_file_extent_offset(leaf, fi);
- em->orig_block_len = btrfs_file_extent_disk_num_bytes(leaf, fi);
- bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
- if (bytenr == 0) {
- em->block_start = EXTENT_MAP_HOLE;
+ if (disk_bytenr == 0) {
+ em->disk_bytenr = EXTENT_MAP_HOLE;
+ em->disk_num_bytes = 0;
+ em->offset = 0;
return;
}
+ em->disk_bytenr = disk_bytenr;
+ em->disk_num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
+ em->offset = btrfs_file_extent_offset(leaf, fi);
if (compress_type != BTRFS_COMPRESS_NONE) {
extent_map_set_compression(em, compress_type);
- em->block_start = bytenr;
- em->block_len = em->orig_block_len;
} else {
- bytenr += btrfs_file_extent_offset(leaf, fi);
- em->block_start = bytenr;
- em->block_len = em->len;
+ /*
+ * Older kernels can create regular non-hole data
+ * extents with ram_bytes smaller than disk_num_bytes.
+ * Not a big deal, just always use disk_num_bytes
+ * for ram_bytes.
+ */
+ em->ram_bytes = em->disk_num_bytes;
if (type == BTRFS_FILE_EXTENT_PREALLOC)
em->flags |= EXTENT_FLAG_PREALLOC;
}
@@ -1315,15 +1320,10 @@ void btrfs_extent_item_to_extent_map(struct btrfs_inode *inode,
/* Tree-checker has ensured this. */
ASSERT(extent_start == 0);
- em->block_start = EXTENT_MAP_INLINE;
+ em->disk_bytenr = EXTENT_MAP_INLINE;
em->start = 0;
em->len = fs_info->sectorsize;
- /*
- * Initialize orig_start and block_len with the same values
- * as in inode.c:btrfs_get_extent().
- */
- em->orig_start = EXTENT_MAP_HOLE;
- em->block_len = (u64)-1;
+ em->offset = 0;
extent_map_set_compression(em, compress_type);
} else {
btrfs_err(fs_info,
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index e764ac3f22e2..21381de906f6 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -17,8 +17,8 @@
#include <linux/uio.h>
#include <linux/iversion.h>
#include <linux/fsverity.h>
-#include <linux/iomap.h>
#include "ctree.h"
+#include "direct-io.h"
#include "disk-io.h"
#include "transaction.h"
#include "btrfs_inode.h"
@@ -1104,7 +1104,7 @@ int btrfs_check_nocow_lock(struct btrfs_inode *inode, loff_t pos,
&cached_state);
}
ret = can_nocow_extent(&inode->vfs_inode, lockstart, &num_bytes,
- NULL, NULL, NULL, nowait, false);
+ NULL, nowait, false);
if (ret <= 0)
btrfs_drew_write_unlock(&root->snapshot_lock);
else
@@ -1140,8 +1140,7 @@ static void update_time_for_write(struct inode *inode)
inode_inc_iversion(inode);
}
-static int btrfs_write_check(struct kiocb *iocb, struct iov_iter *from,
- size_t count)
+int btrfs_write_check(struct kiocb *iocb, struct iov_iter *from, size_t count)
{
struct file *file = iocb->ki_filp;
struct inode *inode = file_inode(file);
@@ -1187,8 +1186,7 @@ static int btrfs_write_check(struct kiocb *iocb, struct iov_iter *from,
return 0;
}
-static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb,
- struct iov_iter *i)
+ssize_t btrfs_buffered_write(struct kiocb *iocb, struct iov_iter *i)
{
struct file *file = iocb->ki_filp;
loff_t pos;
@@ -1451,194 +1449,6 @@ out:
return num_written ? num_written : ret;
}
-static ssize_t check_direct_IO(struct btrfs_fs_info *fs_info,
- const struct iov_iter *iter, loff_t offset)
-{
- const u32 blocksize_mask = fs_info->sectorsize - 1;
-
- if (offset & blocksize_mask)
- return -EINVAL;
-
- if (iov_iter_alignment(iter) & blocksize_mask)
- return -EINVAL;
-
- return 0;
-}
-
-static ssize_t btrfs_direct_write(struct kiocb *iocb, struct iov_iter *from)
-{
- struct file *file = iocb->ki_filp;
- struct inode *inode = file_inode(file);
- struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
- loff_t pos;
- ssize_t written = 0;
- ssize_t written_buffered;
- size_t prev_left = 0;
- loff_t endbyte;
- ssize_t ret;
- unsigned int ilock_flags = 0;
- struct iomap_dio *dio;
-
- if (iocb->ki_flags & IOCB_NOWAIT)
- ilock_flags |= BTRFS_ILOCK_TRY;
-
- /*
- * If the write DIO is within EOF, use a shared lock and also only if
- * security bits will likely not be dropped by file_remove_privs() called
- * from btrfs_write_check(). Either will need to be rechecked after the
- * lock was acquired.
- */
- if (iocb->ki_pos + iov_iter_count(from) <= i_size_read(inode) && IS_NOSEC(inode))
- ilock_flags |= BTRFS_ILOCK_SHARED;
-
-relock:
- ret = btrfs_inode_lock(BTRFS_I(inode), ilock_flags);
- if (ret < 0)
- return ret;
-
- /* Shared lock cannot be used with security bits set. */
- if ((ilock_flags & BTRFS_ILOCK_SHARED) && !IS_NOSEC(inode)) {
- btrfs_inode_unlock(BTRFS_I(inode), ilock_flags);
- ilock_flags &= ~BTRFS_ILOCK_SHARED;
- goto relock;
- }
-
- ret = generic_write_checks(iocb, from);
- if (ret <= 0) {
- btrfs_inode_unlock(BTRFS_I(inode), ilock_flags);
- return ret;
- }
-
- ret = btrfs_write_check(iocb, from, ret);
- if (ret < 0) {
- btrfs_inode_unlock(BTRFS_I(inode), ilock_flags);
- goto out;
- }
-
- pos = iocb->ki_pos;
- /*
- * Re-check since file size may have changed just before taking the
- * lock or pos may have changed because of O_APPEND in generic_write_check()
- */
- if ((ilock_flags & BTRFS_ILOCK_SHARED) &&
- pos + iov_iter_count(from) > i_size_read(inode)) {
- btrfs_inode_unlock(BTRFS_I(inode), ilock_flags);
- ilock_flags &= ~BTRFS_ILOCK_SHARED;
- goto relock;
- }
-
- if (check_direct_IO(fs_info, from, pos)) {
- btrfs_inode_unlock(BTRFS_I(inode), ilock_flags);
- goto buffered;
- }
-
- /*
- * The iov_iter can be mapped to the same file range we are writing to.
- * If that's the case, then we will deadlock in the iomap code, because
- * it first calls our callback btrfs_dio_iomap_begin(), which will create
- * an ordered extent, and after that it will fault in the pages that the
- * iov_iter refers to. During the fault in we end up in the readahead
- * pages code (starting at btrfs_readahead()), which will lock the range,
- * find that ordered extent and then wait for it to complete (at
- * btrfs_lock_and_flush_ordered_range()), resulting in a deadlock since
- * obviously the ordered extent can never complete as we didn't submit
- * yet the respective bio(s). This always happens when the buffer is
- * memory mapped to the same file range, since the iomap DIO code always
- * invalidates pages in the target file range (after starting and waiting
- * for any writeback).
- *
- * So here we disable page faults in the iov_iter and then retry if we
- * got -EFAULT, faulting in the pages before the retry.
- */
- from->nofault = true;
- dio = btrfs_dio_write(iocb, from, written);
- from->nofault = false;
-
- /*
- * iomap_dio_complete() will call btrfs_sync_file() if we have a dsync
- * iocb, and that needs to lock the inode. So unlock it before calling
- * iomap_dio_complete() to avoid a deadlock.
- */
- btrfs_inode_unlock(BTRFS_I(inode), ilock_flags);
-
- if (IS_ERR_OR_NULL(dio))
- ret = PTR_ERR_OR_ZERO(dio);
- else
- ret = iomap_dio_complete(dio);
-
- /* No increment (+=) because iomap returns a cumulative value. */
- if (ret > 0)
- written = ret;
-
- if (iov_iter_count(from) > 0 && (ret == -EFAULT || ret > 0)) {
- const size_t left = iov_iter_count(from);
- /*
- * We have more data left to write. Try to fault in as many as
- * possible of the remainder pages and retry. We do this without
- * releasing and locking again the inode, to prevent races with
- * truncate.
- *
- * Also, in case the iov refers to pages in the file range of the
- * file we want to write to (due to a mmap), we could enter an
- * infinite loop if we retry after faulting the pages in, since
- * iomap will invalidate any pages in the range early on, before
- * it tries to fault in the pages of the iov. So we keep track of
- * how much was left of iov in the previous EFAULT and fallback
- * to buffered IO in case we haven't made any progress.
- */
- if (left == prev_left) {
- ret = -ENOTBLK;
- } else {
- fault_in_iov_iter_readable(from, left);
- prev_left = left;
- goto relock;
- }
- }
-
- /*
- * If 'ret' is -ENOTBLK or we have not written all data, then it means
- * we must fallback to buffered IO.
- */
- if ((ret < 0 && ret != -ENOTBLK) || !iov_iter_count(from))
- goto out;
-
-buffered:
- /*
- * If we are in a NOWAIT context, then return -EAGAIN to signal the caller
- * it must retry the operation in a context where blocking is acceptable,
- * because even if we end up not blocking during the buffered IO attempt
- * below, we will block when flushing and waiting for the IO.
- */
- if (iocb->ki_flags & IOCB_NOWAIT) {
- ret = -EAGAIN;
- goto out;
- }
-
- pos = iocb->ki_pos;
- written_buffered = btrfs_buffered_write(iocb, from);
- if (written_buffered < 0) {
- ret = written_buffered;
- goto out;
- }
- /*
- * Ensure all data is persisted. We want the next direct IO read to be
- * able to read what was just written.
- */
- endbyte = pos + written_buffered - 1;
- ret = btrfs_fdatawrite_range(inode, pos, endbyte);
- if (ret)
- goto out;
- ret = filemap_fdatawait_range(inode->i_mapping, pos, endbyte);
- if (ret)
- goto out;
- written += written_buffered;
- iocb->ki_pos = pos + written_buffered;
- invalidate_mapping_pages(file->f_mapping, pos >> PAGE_SHIFT,
- endbyte >> PAGE_SHIFT);
-out:
- return ret < 0 ? ret : written;
-}
-
static ssize_t btrfs_encoded_write(struct kiocb *iocb, struct iov_iter *from,
const struct btrfs_ioctl_encoded_io_args *encoded)
{
@@ -1738,7 +1548,7 @@ int btrfs_release_file(struct inode *inode, struct file *filp)
return 0;
}
-static int start_ordered_ops(struct inode *inode, loff_t start, loff_t end)
+static int start_ordered_ops(struct btrfs_inode *inode, loff_t start, loff_t end)
{
int ret;
struct blk_plug plug;
@@ -1758,7 +1568,7 @@ static int start_ordered_ops(struct inode *inode, loff_t start, loff_t end)
static inline bool skip_inode_logging(const struct btrfs_log_ctx *ctx)
{
- struct btrfs_inode *inode = BTRFS_I(ctx->inode);
+ struct btrfs_inode *inode = ctx->inode;
struct btrfs_fs_info *fs_info = inode->root->fs_info;
if (btrfs_inode_in_log(inode, btrfs_get_fs_generation(fs_info)) &&
@@ -1794,9 +1604,9 @@ static inline bool skip_inode_logging(const struct btrfs_log_ctx *ctx)
int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
{
struct dentry *dentry = file_dentry(file);
- struct inode *inode = d_inode(dentry);
- struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
- struct btrfs_root *root = BTRFS_I(inode)->root;
+ struct btrfs_inode *inode = BTRFS_I(d_inode(dentry));
+ struct btrfs_root *root = inode->root;
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_trans_handle *trans;
struct btrfs_log_ctx ctx;
int ret = 0, err;
@@ -1829,7 +1639,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
if (ret)
goto out;
- btrfs_inode_lock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
+ btrfs_inode_lock(inode, BTRFS_ILOCK_MMAP);
atomic_inc(&root->log_batch);
@@ -1853,7 +1663,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
*/
ret = start_ordered_ops(inode, start, end);
if (ret) {
- btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
+ btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
goto out;
}
@@ -1865,8 +1675,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
* running delalloc the full sync flag may be set if we need to drop
* extra extent map ranges due to temporary memory allocation failures.
*/
- full_sync = test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
- &BTRFS_I(inode)->runtime_flags);
+ full_sync = test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags);
/*
* We have to do this here to avoid the priority inversion of waiting on
@@ -1885,15 +1694,29 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
*/
if (full_sync || btrfs_is_zoned(fs_info)) {
ret = btrfs_wait_ordered_range(inode, start, len);
+ clear_bit(BTRFS_INODE_COW_WRITE_ERROR, &inode->runtime_flags);
} else {
/*
* Get our ordered extents as soon as possible to avoid doing
* checksum lookups in the csum tree, and use instead the
* checksums attached to the ordered extents.
*/
- btrfs_get_ordered_extents_for_logging(BTRFS_I(inode),
- &ctx.ordered_extents);
- ret = filemap_fdatawait_range(inode->i_mapping, start, end);
+ btrfs_get_ordered_extents_for_logging(inode, &ctx.ordered_extents);
+ ret = filemap_fdatawait_range(inode->vfs_inode.i_mapping, start, end);
+ if (ret)
+ goto out_release_extents;
+
+ /*
+ * Check and clear the BTRFS_INODE_COW_WRITE_ERROR now after
+ * starting and waiting for writeback, because for buffered IO
+ * it may have been set during the end IO callback
+ * (end_bbio_data_write() -> btrfs_finish_ordered_extent()) in
+ * case an error happened and we need to wait for ordered
+ * extents to complete so that any extent maps that point to
+ * unwritten locations are dropped and we don't log them.
+ */
+ if (test_and_clear_bit(BTRFS_INODE_COW_WRITE_ERROR, &inode->runtime_flags))
+ ret = btrfs_wait_ordered_range(inode, start, len);
}
if (ret)
@@ -1907,8 +1730,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
* modified so clear this flag in case it was set for whatever
* reason, it's no longer relevant.
*/
- clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
- &BTRFS_I(inode)->runtime_flags);
+ clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags);
/*
* An ordered extent might have started before and completed
* already with io errors, in which case the inode was not
@@ -1916,7 +1738,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
* for any errors that might have happened since we last
* checked called fsync.
*/
- ret = filemap_check_wb_err(inode->i_mapping, file->f_wb_err);
+ ret = filemap_check_wb_err(inode->vfs_inode.i_mapping, file->f_wb_err);
goto out_release_extents;
}
@@ -1966,7 +1788,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
* file again, but that will end up using the synchronization
* inside btrfs_sync_log to keep things safe.
*/
- btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
+ btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
if (ret == BTRFS_NO_LOG_SYNC) {
ret = btrfs_end_transaction(trans);
@@ -2035,7 +1857,7 @@ out:
out_release_extents:
btrfs_release_log_ctx_extents(&ctx);
- btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
+ btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
goto out;
}
@@ -2334,11 +2156,9 @@ out:
hole_em->start = offset;
hole_em->len = end - offset;
hole_em->ram_bytes = hole_em->len;
- hole_em->orig_start = offset;
- hole_em->block_start = EXTENT_MAP_HOLE;
- hole_em->block_len = 0;
- hole_em->orig_block_len = 0;
+ hole_em->disk_bytenr = EXTENT_MAP_HOLE;
+ hole_em->disk_num_bytes = 0;
hole_em->generation = trans->transid;
ret = btrfs_replace_extent_map_range(inode, hole_em, true);
@@ -2369,7 +2189,7 @@ static int find_first_non_hole(struct btrfs_inode *inode, u64 *start, u64 *len)
return PTR_ERR(em);
/* Hole or vacuum extent(only exists in no-hole mode) */
- if (em->block_start == EXTENT_MAP_HOLE) {
+ if (em->disk_bytenr == EXTENT_MAP_HOLE) {
ret = 1;
*len = em->start + em->len > *start + *len ?
0 : *start + *len - em->start - em->len;
@@ -2798,7 +2618,7 @@ static int btrfs_punch_hole(struct file *file, loff_t offset, loff_t len)
btrfs_inode_lock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
- ret = btrfs_wait_ordered_range(inode, offset, len);
+ ret = btrfs_wait_ordered_range(BTRFS_I(inode), offset, len);
if (ret)
goto out_only_mutex;
@@ -3026,7 +2846,7 @@ static int btrfs_zero_range_check_range_boundary(struct btrfs_inode *inode,
if (IS_ERR(em))
return PTR_ERR(em);
- if (em->block_start == EXTENT_MAP_HOLE)
+ if (em->disk_bytenr == EXTENT_MAP_HOLE)
ret = RANGE_BOUNDARY_HOLE;
else if (em->flags & EXTENT_FLAG_PREALLOC)
ret = RANGE_BOUNDARY_PREALLOC_EXTENT;
@@ -3090,7 +2910,7 @@ static int btrfs_zero_range(struct inode *inode,
ASSERT(IS_ALIGNED(alloc_start, sectorsize));
len = offset + len - alloc_start;
offset = alloc_start;
- alloc_hint = em->block_start + em->len;
+ alloc_hint = extent_map_block_start(em) + em->len;
}
free_extent_map(em);
@@ -3108,7 +2928,7 @@ static int btrfs_zero_range(struct inode *inode,
mode);
goto out;
}
- if (len < sectorsize && em->block_start != EXTENT_MAP_HOLE) {
+ if (len < sectorsize && em->disk_bytenr != EXTENT_MAP_HOLE) {
free_extent_map(em);
ret = btrfs_truncate_block(BTRFS_I(inode), offset, len,
0);
@@ -3293,7 +3113,7 @@ static long btrfs_fallocate(struct file *file, int mode,
* the file range and, due to the previous locking we did, we know there
* can't be more delalloc or ordered extents in the range.
*/
- ret = btrfs_wait_ordered_range(inode, alloc_start,
+ ret = btrfs_wait_ordered_range(BTRFS_I(inode), alloc_start,
alloc_end - alloc_start);
if (ret)
goto out;
@@ -3321,7 +3141,7 @@ static long btrfs_fallocate(struct file *file, int mode,
last_byte = min(extent_map_end(em), alloc_end);
actual_end = min_t(u64, extent_map_end(em), offset + len);
last_byte = ALIGN(last_byte, blocksize);
- if (em->block_start == EXTENT_MAP_HOLE ||
+ if (em->disk_bytenr == EXTENT_MAP_HOLE ||
(cur_offset >= inode->i_size &&
!(em->flags & EXTENT_FLAG_PREALLOC))) {
const u64 range_len = last_byte - cur_offset;
@@ -3904,97 +3724,6 @@ static int btrfs_file_open(struct inode *inode, struct file *filp)
return generic_file_open(inode, filp);
}
-static int check_direct_read(struct btrfs_fs_info *fs_info,
- const struct iov_iter *iter, loff_t offset)
-{
- int ret;
- int i, seg;
-
- ret = check_direct_IO(fs_info, iter, offset);
- if (ret < 0)
- return ret;
-
- if (!iter_is_iovec(iter))
- return 0;
-
- for (seg = 0; seg < iter->nr_segs; seg++) {
- for (i = seg + 1; i < iter->nr_segs; i++) {
- const struct iovec *iov1 = iter_iov(iter) + seg;
- const struct iovec *iov2 = iter_iov(iter) + i;
-
- if (iov1->iov_base == iov2->iov_base)
- return -EINVAL;
- }
- }
- return 0;
-}
-
-static ssize_t btrfs_direct_read(struct kiocb *iocb, struct iov_iter *to)
-{
- struct inode *inode = file_inode(iocb->ki_filp);
- size_t prev_left = 0;
- ssize_t read = 0;
- ssize_t ret;
-
- if (fsverity_active(inode))
- return 0;
-
- if (check_direct_read(inode_to_fs_info(inode), to, iocb->ki_pos))
- return 0;
-
- btrfs_inode_lock(BTRFS_I(inode), BTRFS_ILOCK_SHARED);
-again:
- /*
- * This is similar to what we do for direct IO writes, see the comment
- * at btrfs_direct_write(), but we also disable page faults in addition
- * to disabling them only at the iov_iter level. This is because when
- * reading from a hole or prealloc extent, iomap calls iov_iter_zero(),
- * which can still trigger page fault ins despite having set ->nofault
- * to true of our 'to' iov_iter.
- *
- * The difference to direct IO writes is that we deadlock when trying
- * to lock the extent range in the inode's tree during he page reads
- * triggered by the fault in (while for writes it is due to waiting for
- * our own ordered extent). This is because for direct IO reads,
- * btrfs_dio_iomap_begin() returns with the extent range locked, which
- * is only unlocked in the endio callback (end_bio_extent_readpage()).
- */
- pagefault_disable();
- to->nofault = true;
- ret = btrfs_dio_read(iocb, to, read);
- to->nofault = false;
- pagefault_enable();
-
- /* No increment (+=) because iomap returns a cumulative value. */
- if (ret > 0)
- read = ret;
-
- if (iov_iter_count(to) > 0 && (ret == -EFAULT || ret > 0)) {
- const size_t left = iov_iter_count(to);
-
- if (left == prev_left) {
- /*
- * We didn't make any progress since the last attempt,
- * fallback to a buffered read for the remainder of the
- * range. This is just to avoid any possibility of looping
- * for too long.
- */
- ret = read;
- } else {
- /*
- * We made some progress since the last retry or this is
- * the first time we are retrying. Fault in as many pages
- * as possible and retry.
- */
- fault_in_iov_iter_writeable(to, left);
- prev_left = left;
- goto again;
- }
- }
- btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_SHARED);
- return ret < 0 ? ret : read;
-}
-
static ssize_t btrfs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
ssize_t ret = 0;
@@ -4029,8 +3758,9 @@ const struct file_operations btrfs_file_operations = {
.fop_flags = FOP_BUFFER_RASYNC | FOP_BUFFER_WASYNC,
};
-int btrfs_fdatawrite_range(struct inode *inode, loff_t start, loff_t end)
+int btrfs_fdatawrite_range(struct btrfs_inode *inode, loff_t start, loff_t end)
{
+ struct address_space *mapping = inode->vfs_inode.i_mapping;
int ret;
/*
@@ -4047,10 +3777,9 @@ int btrfs_fdatawrite_range(struct inode *inode, loff_t start, loff_t end)
* know better and pull this out at some point in the future, it is
* right and you are wrong.
*/
- ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
- if (!ret && test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
- &BTRFS_I(inode)->runtime_flags))
- ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
+ ret = filemap_fdatawrite_range(mapping, start, end);
+ if (!ret && test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, &inode->runtime_flags))
+ ret = filemap_fdatawrite_range(mapping, start, end);
return ret;
}
diff --git a/fs/btrfs/file.h b/fs/btrfs/file.h
index 77aaca208c7b..912254e653cf 100644
--- a/fs/btrfs/file.h
+++ b/fs/btrfs/file.h
@@ -37,12 +37,14 @@ int btrfs_release_file(struct inode *inode, struct file *file);
int btrfs_dirty_pages(struct btrfs_inode *inode, struct page **pages,
size_t num_pages, loff_t pos, size_t write_bytes,
struct extent_state **cached, bool noreserve);
-int btrfs_fdatawrite_range(struct inode *inode, loff_t start, loff_t end);
+int btrfs_fdatawrite_range(struct btrfs_inode *inode, loff_t start, loff_t end);
int btrfs_check_nocow_lock(struct btrfs_inode *inode, loff_t pos,
size_t *write_bytes, bool nowait);
void btrfs_check_nocow_unlock(struct btrfs_inode *inode);
bool btrfs_find_delalloc_in_range(struct btrfs_inode *inode, u64 start, u64 end,
struct extent_state **cached_state,
u64 *delalloc_start_ret, u64 *delalloc_end_ret);
+int btrfs_write_check(struct kiocb *iocb, struct iov_iter *from, size_t count);
+ssize_t btrfs_buffered_write(struct kiocb *iocb, struct iov_iter *i);
#endif
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 3ab8dea5036b..3f9b7507543a 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -82,7 +82,6 @@ static struct inode *__lookup_free_space_inode(struct btrfs_root *root,
struct btrfs_path *path,
u64 offset)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_key key;
struct btrfs_key location;
struct btrfs_disk_key disk_key;
@@ -116,7 +115,7 @@ static struct inode *__lookup_free_space_inode(struct btrfs_root *root,
* sure NOFS is set to keep us from deadlocking.
*/
nofs_flag = memalloc_nofs_save();
- inode = btrfs_iget_path(fs_info->sb, location.objectid, root, path);
+ inode = btrfs_iget_path(location.objectid, root, path);
btrfs_release_path(path);
memalloc_nofs_restore(nofs_flag);
if (IS_ERR(inode))
@@ -138,7 +137,7 @@ struct inode *lookup_free_space_inode(struct btrfs_block_group *block_group,
spin_lock(&block_group->lock);
if (block_group->inode)
- inode = igrab(block_group->inode);
+ inode = igrab(&block_group->inode->vfs_inode);
spin_unlock(&block_group->lock);
if (inode)
return inode;
@@ -157,7 +156,7 @@ struct inode *lookup_free_space_inode(struct btrfs_block_group *block_group,
}
if (!test_and_set_bit(BLOCK_GROUP_FLAG_IREF, &block_group->runtime_flags))
- block_group->inode = igrab(inode);
+ block_group->inode = BTRFS_I(igrab(inode));
spin_unlock(&block_group->lock);
return inode;
@@ -858,6 +857,7 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
spin_unlock(&ctl->tree_lock);
btrfs_err(fs_info,
"Duplicate entries in free space cache, dumping");
+ kmem_cache_free(btrfs_free_space_bitmap_cachep, e->bitmap);
kmem_cache_free(btrfs_free_space_cachep, e);
goto free_cache;
}
@@ -1268,7 +1268,7 @@ static int flush_dirty_cache(struct inode *inode)
{
int ret;
- ret = btrfs_wait_ordered_range(inode, 0, (u64)-1);
+ ret = btrfs_wait_ordered_range(BTRFS_I(inode), 0, (u64)-1);
if (ret)
clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1,
EXTENT_DELALLOC, NULL);
@@ -1483,7 +1483,7 @@ static int __btrfs_write_out_cache(struct inode *inode,
io_ctl->entries = entries;
io_ctl->bitmaps = bitmaps;
- ret = btrfs_fdatawrite_range(inode, 0, (u64)-1);
+ ret = btrfs_fdatawrite_range(BTRFS_I(inode), 0, (u64)-1);
if (ret)
goto out;
@@ -2697,7 +2697,7 @@ static int __btrfs_add_free_space_zoned(struct btrfs_block_group *block_group,
u64 offset = bytenr - block_group->start;
u64 to_free, to_unusable;
int bg_reclaim_threshold = 0;
- bool initial = (size == block_group->length);
+ bool initial = ((size == block_group->length) && (block_group->alloc_offset == 0));
u64 reclaimable_unusable;
WARN_ON(!initial && offset + size > block_group->zone_capacity);
diff --git a/fs/btrfs/free-space-tree.c b/fs/btrfs/free-space-tree.c
index 90f2938bd743..7ba50e133921 100644
--- a/fs/btrfs/free-space-tree.c
+++ b/fs/btrfs/free-space-tree.c
@@ -1300,10 +1300,14 @@ int btrfs_delete_free_space_tree(struct btrfs_fs_info *fs_info)
btrfs_tree_lock(free_space_root->node);
btrfs_clear_buffer_dirty(trans, free_space_root->node);
btrfs_tree_unlock(free_space_root->node);
- btrfs_free_tree_block(trans, btrfs_root_id(free_space_root),
- free_space_root->node, 0, 1);
-
+ ret = btrfs_free_tree_block(trans, btrfs_root_id(free_space_root),
+ free_space_root->node, 0, 1);
btrfs_put_root(free_space_root);
+ if (ret < 0) {
+ btrfs_abort_transaction(trans, ret);
+ btrfs_end_transaction(trans);
+ return ret;
+ }
return btrfs_commit_transaction(trans);
}
diff --git a/fs/btrfs/fs.h b/fs/btrfs/fs.h
index 89f0650631cd..1b2a7aa0af36 100644
--- a/fs/btrfs/fs.h
+++ b/fs/btrfs/fs.h
@@ -29,7 +29,6 @@
#include "extent-io-tree.h"
#include "async-thread.h"
#include "block-rsv.h"
-#include "fs.h"
struct inode;
struct super_block;
@@ -99,7 +98,9 @@ enum {
/* The btrfs_fs_info created for self-tests */
BTRFS_FS_STATE_DUMMY_FS_INFO,
- BTRFS_FS_STATE_NO_CSUMS,
+ /* Checksum errors are ignored. */
+ BTRFS_FS_STATE_NO_DATA_CSUMS,
+ BTRFS_FS_STATE_SKIP_META_CSUMS,
/* Indicates there was an error cleaning up a log tree. */
BTRFS_FS_STATE_LOG_CLEANUP_ERROR,
@@ -225,6 +226,8 @@ enum {
BTRFS_MOUNT_IGNOREDATACSUMS = (1UL << 28),
BTRFS_MOUNT_NODISCARD = (1UL << 29),
BTRFS_MOUNT_NOSPACECACHE = (1UL << 30),
+ BTRFS_MOUNT_IGNOREMETACSUMS = (1UL << 31),
+ BTRFS_MOUNT_IGNORESUPERFLAGS = (1ULL << 32),
};
/*
@@ -630,6 +633,7 @@ struct btrfs_fs_info {
s32 delalloc_batch;
struct percpu_counter evictable_extent_maps;
+ spinlock_t extent_map_shrinker_lock;
u64 extent_map_shrinker_last_root;
u64 extent_map_shrinker_last_ino;
@@ -957,7 +961,7 @@ static inline bool btrfs_is_zoned(const struct btrfs_fs_info *fs_info)
/*
* Count how many fs_info->max_extent_size cover the @size
*/
-static inline u32 count_max_extents(struct btrfs_fs_info *fs_info, u64 size)
+static inline u32 count_max_extents(const struct btrfs_fs_info *fs_info, u64 size)
{
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
if (!fs_info)
@@ -1018,7 +1022,7 @@ void __btrfs_clear_fs_compat_ro(struct btrfs_fs_info *fs_info, u64 flag,
#define btrfs_test_opt(fs_info, opt) ((fs_info)->mount_opt & \
BTRFS_MOUNT_##opt)
-static inline int btrfs_fs_closing(struct btrfs_fs_info *fs_info)
+static inline int btrfs_fs_closing(const struct btrfs_fs_info *fs_info)
{
/* Do it this way so we only ever do one test_bit in the normal case. */
if (test_bit(BTRFS_FS_CLOSING_START, &fs_info->flags)) {
@@ -1037,7 +1041,7 @@ static inline int btrfs_fs_closing(struct btrfs_fs_info *fs_info)
* since setting and checking for SB_RDONLY in the superblock's flags is not
* atomic.
*/
-static inline int btrfs_need_cleaner_sleep(struct btrfs_fs_info *fs_info)
+static inline int btrfs_need_cleaner_sleep(const struct btrfs_fs_info *fs_info)
{
return test_bit(BTRFS_FS_STATE_RO, &fs_info->fs_state) ||
btrfs_fs_closing(fs_info);
@@ -1058,7 +1062,7 @@ static inline void btrfs_wake_unfinished_drop(struct btrfs_fs_info *fs_info)
#define EXPORT_FOR_TESTS
-static inline int btrfs_is_testing(struct btrfs_fs_info *fs_info)
+static inline int btrfs_is_testing(const struct btrfs_fs_info *fs_info)
{
return test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state);
}
@@ -1069,7 +1073,7 @@ void btrfs_test_destroy_inode(struct inode *inode);
#define EXPORT_FOR_TESTS static
-static inline int btrfs_is_testing(struct btrfs_fs_info *fs_info)
+static inline int btrfs_is_testing(const struct btrfs_fs_info *fs_info)
{
return 0;
}
diff --git a/fs/btrfs/inode-item.c b/fs/btrfs/inode-item.c
index 84a94d19b22c..316756ff08ac 100644
--- a/fs/btrfs/inode-item.c
+++ b/fs/btrfs/inode-item.c
@@ -141,8 +141,8 @@ static int btrfs_del_inode_extref(struct btrfs_trans_handle *trans,
extref = btrfs_find_name_in_ext_backref(path->nodes[0], path->slots[0],
ref_objectid, name);
if (!extref) {
- btrfs_handle_fs_error(root->fs_info, -ENOENT, NULL);
- ret = -EROFS;
+ btrfs_abort_transaction(trans, -ENOENT);
+ ret = -ENOENT;
goto out;
}
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 753db965f7c0..01eab6955647 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -70,31 +70,13 @@
#include "orphan.h"
#include "backref.h"
#include "raid-stripe-tree.h"
+#include "fiemap.h"
struct btrfs_iget_args {
u64 ino;
struct btrfs_root *root;
};
-struct btrfs_dio_data {
- ssize_t submitted;
- struct extent_changeset *data_reserved;
- struct btrfs_ordered_extent *ordered;
- bool data_space_reserved;
- bool nocow_done;
-};
-
-struct btrfs_dio_private {
- /* Range of I/O */
- u64 file_offset;
- u32 bytes;
-
- /* This must be last */
- struct btrfs_bio bbio;
-};
-
-static struct bio_set btrfs_dio_bioset;
-
struct btrfs_rename_ctx {
/* Output field. Stores the index number of the old directory entry. */
u64 index;
@@ -137,11 +119,6 @@ static noinline int run_delalloc_cow(struct btrfs_inode *inode,
struct page *locked_page, u64 start,
u64 end, struct writeback_control *wbc,
bool pages_dirty);
-static struct extent_map *create_io_em(struct btrfs_inode *inode, u64 start,
- u64 len, u64 orig_start, u64 block_start,
- u64 block_len, u64 orig_block_len,
- u64 ram_bytes, int compress_type,
- int type);
static int data_reloc_print_warning_inode(u64 inum, u64 offset, u64 num_bytes,
u64 root, void *warn_ctx)
@@ -877,7 +854,7 @@ static inline int inode_need_compress(struct btrfs_inode *inode, u64 start,
if (btrfs_test_opt(fs_info, COMPRESS) ||
inode->flags & BTRFS_INODE_COMPRESS ||
inode->prop_compress)
- return btrfs_compress_heuristic(&inode->vfs_inode, start, end);
+ return btrfs_compress_heuristic(inode, start, end);
return 0;
}
@@ -890,6 +867,26 @@ static inline void inode_should_defrag(struct btrfs_inode *inode,
btrfs_add_inode_defrag(NULL, inode, small_write);
}
+static int extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end)
+{
+ unsigned long end_index = end >> PAGE_SHIFT;
+ struct page *page;
+ int ret = 0;
+
+ for (unsigned long index = start >> PAGE_SHIFT;
+ index <= end_index; index++) {
+ page = find_get_page(inode->i_mapping, index);
+ if (unlikely(!page)) {
+ if (!ret)
+ ret = -ENOENT;
+ continue;
+ }
+ clear_page_dirty_for_io(page);
+ put_page(page);
+ }
+ return ret;
+}
+
/*
* Work queue call back to started compression on a file and pages.
*
@@ -931,7 +928,16 @@ static void compress_file_range(struct btrfs_work *work)
* Otherwise applications with the file mmap'd can wander in and change
* the page contents while we are compressing them.
*/
- extent_range_clear_dirty_for_io(&inode->vfs_inode, start, end);
+ ret = extent_range_clear_dirty_for_io(&inode->vfs_inode, start, end);
+
+ /*
+ * All the folios should have been locked thus no failure.
+ *
+ * And even if some folios are missing, btrfs_compress_folios()
+ * would handle them correctly, so here just do an ASSERT() check for
+ * early logic errors.
+ */
+ ASSERT(ret == 0);
/*
* We need to save i_size before now because it could change in between
@@ -1152,6 +1158,7 @@ static void submit_one_async_extent(struct async_chunk *async_chunk,
struct btrfs_root *root = inode->root;
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_ordered_extent *ordered;
+ struct btrfs_file_extent file_extent;
struct btrfs_key ins;
struct page *locked_page = NULL;
struct extent_state *cached = NULL;
@@ -1198,29 +1205,22 @@ static void submit_one_async_extent(struct async_chunk *async_chunk,
lock_extent(io_tree, start, end, &cached);
/* Here we're doing allocation and writeback of the compressed pages */
- em = create_io_em(inode, start,
- async_extent->ram_size, /* len */
- start, /* orig_start */
- ins.objectid, /* block_start */
- ins.offset, /* block_len */
- ins.offset, /* orig_block_len */
- async_extent->ram_size, /* ram_bytes */
- async_extent->compress_type,
- BTRFS_ORDERED_COMPRESSED);
+ file_extent.disk_bytenr = ins.objectid;
+ file_extent.disk_num_bytes = ins.offset;
+ file_extent.ram_bytes = async_extent->ram_size;
+ file_extent.num_bytes = async_extent->ram_size;
+ file_extent.offset = 0;
+ file_extent.compression = async_extent->compress_type;
+
+ em = btrfs_create_io_em(inode, start, &file_extent, BTRFS_ORDERED_COMPRESSED);
if (IS_ERR(em)) {
ret = PTR_ERR(em);
goto out_free_reserve;
}
free_extent_map(em);
- ordered = btrfs_alloc_ordered_extent(inode, start, /* file_offset */
- async_extent->ram_size, /* num_bytes */
- async_extent->ram_size, /* ram_bytes */
- ins.objectid, /* disk_bytenr */
- ins.offset, /* disk_num_bytes */
- 0, /* offset */
- 1 << BTRFS_ORDERED_COMPRESSED,
- async_extent->compress_type);
+ ordered = btrfs_alloc_ordered_extent(inode, start, &file_extent,
+ 1 << BTRFS_ORDERED_COMPRESSED);
if (IS_ERR(ordered)) {
btrfs_drop_extent_map_range(inode, start, end, false);
ret = PTR_ERR(ordered);
@@ -1264,8 +1264,8 @@ out_free_reserve:
kfree(async_extent);
}
-static u64 get_extent_allocation_hint(struct btrfs_inode *inode, u64 start,
- u64 num_bytes)
+u64 btrfs_get_extent_allocation_hint(struct btrfs_inode *inode, u64 start,
+ u64 num_bytes)
{
struct extent_map_tree *em_tree = &inode->extent_tree;
struct extent_map *em;
@@ -1279,15 +1279,15 @@ static u64 get_extent_allocation_hint(struct btrfs_inode *inode, u64 start,
* first block in this inode and use that as a hint. If that
* block is also bogus then just don't worry about it.
*/
- if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
+ if (em->disk_bytenr >= EXTENT_MAP_LAST_BYTE) {
free_extent_map(em);
em = search_extent_mapping(em_tree, 0, 0);
- if (em && em->block_start < EXTENT_MAP_LAST_BYTE)
- alloc_hint = em->block_start;
+ if (em && em->disk_bytenr < EXTENT_MAP_LAST_BYTE)
+ alloc_hint = extent_map_block_start(em);
if (em)
free_extent_map(em);
} else {
- alloc_hint = em->block_start;
+ alloc_hint = extent_map_block_start(em);
free_extent_map(em);
}
}
@@ -1375,7 +1375,7 @@ static noinline int cow_file_range(struct btrfs_inode *inode,
}
}
- alloc_hint = get_extent_allocation_hint(inode, start, num_bytes);
+ alloc_hint = btrfs_get_extent_allocation_hint(inode, start, num_bytes);
/*
* Relocation relies on the relocated extents to have exactly the same
@@ -1395,6 +1395,7 @@ static noinline int cow_file_range(struct btrfs_inode *inode,
while (num_bytes > 0) {
struct btrfs_ordered_extent *ordered;
+ struct btrfs_file_extent file_extent;
cur_alloc_size = num_bytes;
ret = btrfs_reserve_extent(root, cur_alloc_size, cur_alloc_size,
@@ -1431,18 +1432,18 @@ static noinline int cow_file_range(struct btrfs_inode *inode,
extent_reserved = true;
ram_size = ins.offset;
+ file_extent.disk_bytenr = ins.objectid;
+ file_extent.disk_num_bytes = ins.offset;
+ file_extent.num_bytes = ins.offset;
+ file_extent.ram_bytes = ins.offset;
+ file_extent.offset = 0;
+ file_extent.compression = BTRFS_COMPRESS_NONE;
lock_extent(&inode->io_tree, start, start + ram_size - 1,
&cached);
- em = create_io_em(inode, start, ins.offset, /* len */
- start, /* orig_start */
- ins.objectid, /* block_start */
- ins.offset, /* block_len */
- ins.offset, /* orig_block_len */
- ram_size, /* ram_bytes */
- BTRFS_COMPRESS_NONE, /* compress_type */
- BTRFS_ORDERED_REGULAR /* type */);
+ em = btrfs_create_io_em(inode, start, &file_extent,
+ BTRFS_ORDERED_REGULAR);
if (IS_ERR(em)) {
unlock_extent(&inode->io_tree, start,
start + ram_size - 1, &cached);
@@ -1451,10 +1452,8 @@ static noinline int cow_file_range(struct btrfs_inode *inode,
}
free_extent_map(em);
- ordered = btrfs_alloc_ordered_extent(inode, start, ram_size,
- ram_size, ins.objectid, cur_alloc_size,
- 0, 1 << BTRFS_ORDERED_REGULAR,
- BTRFS_COMPRESS_NONE);
+ ordered = btrfs_alloc_ordered_extent(inode, start, &file_extent,
+ 1 << BTRFS_ORDERED_REGULAR);
if (IS_ERR(ordered)) {
unlock_extent(&inode->io_tree, start,
start + ram_size - 1, &cached);
@@ -1617,10 +1616,8 @@ static noinline void submit_compressed_extents(struct btrfs_work *work, bool do_
u64 alloc_hint = 0;
if (do_free) {
- struct async_chunk *async_chunk;
struct async_cow *async_cow;
- async_chunk = container_of(work, struct async_chunk, work);
btrfs_add_delayed_iput(async_chunk->inode);
if (async_chunk->blkcg_css)
css_put(async_chunk->blkcg_css);
@@ -1850,13 +1847,11 @@ struct can_nocow_file_extent_args {
*/
bool free_path;
- /* Output fields. Only set when can_nocow_file_extent() returns 1. */
-
- u64 disk_bytenr;
- u64 disk_num_bytes;
- u64 extent_offset;
- /* Number of bytes that can be written to in NOCOW mode. */
- u64 num_bytes;
+ /*
+ * Output fields. Only set when can_nocow_file_extent() returns 1.
+ * The expected file extent for the NOCOW write.
+ */
+ struct btrfs_file_extent file_extent;
};
/*
@@ -1878,6 +1873,7 @@ static int can_nocow_file_extent(struct btrfs_path *path,
struct btrfs_root *root = inode->root;
struct btrfs_file_extent_item *fi;
struct btrfs_root *csum_root;
+ u64 io_start;
u64 extent_end;
u8 extent_type;
int can_nocow = 0;
@@ -1890,11 +1886,6 @@ static int can_nocow_file_extent(struct btrfs_path *path,
if (extent_type == BTRFS_FILE_EXTENT_INLINE)
goto out;
- /* Can't access these fields unless we know it's not an inline extent. */
- args->disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
- args->disk_num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
- args->extent_offset = btrfs_file_extent_offset(leaf, fi);
-
if (!(inode->flags & BTRFS_INODE_NODATACOW) &&
extent_type == BTRFS_FILE_EXTENT_REG)
goto out;
@@ -1910,7 +1901,7 @@ static int can_nocow_file_extent(struct btrfs_path *path,
goto out;
/* An explicit hole, must COW. */
- if (args->disk_bytenr == 0)
+ if (btrfs_file_extent_disk_bytenr(leaf, fi) == 0)
goto out;
/* Compressed/encrypted/encoded extents must be COWed. */
@@ -1921,6 +1912,12 @@ static int can_nocow_file_extent(struct btrfs_path *path,
extent_end = btrfs_file_extent_end(path);
+ args->file_extent.disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
+ args->file_extent.disk_num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
+ args->file_extent.ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
+ args->file_extent.offset = btrfs_file_extent_offset(leaf, fi);
+ args->file_extent.compression = btrfs_file_extent_compression(leaf, fi);
+
/*
* The following checks can be expensive, as they need to take other
* locks and do btree or rbtree searches, so release the path to avoid
@@ -1929,8 +1926,8 @@ static int can_nocow_file_extent(struct btrfs_path *path,
btrfs_release_path(path);
ret = btrfs_cross_ref_exist(root, btrfs_ino(inode),
- key->offset - args->extent_offset,
- args->disk_bytenr, args->strict, path);
+ key->offset - args->file_extent.offset,
+ args->file_extent.disk_bytenr, args->strict, path);
WARN_ON_ONCE(ret > 0 && is_freespace_inode);
if (ret != 0)
goto out;
@@ -1951,18 +1948,18 @@ static int can_nocow_file_extent(struct btrfs_path *path,
atomic_read(&root->snapshot_force_cow))
goto out;
- args->disk_bytenr += args->extent_offset;
- args->disk_bytenr += args->start - key->offset;
- args->num_bytes = min(args->end + 1, extent_end) - args->start;
+ args->file_extent.num_bytes = min(args->end + 1, extent_end) - args->start;
+ args->file_extent.offset += args->start - key->offset;
+ io_start = args->file_extent.disk_bytenr + args->file_extent.offset;
/*
* Force COW if csums exist in the range. This ensures that csums for a
* given extent are either valid or do not exist.
*/
- csum_root = btrfs_csum_root(root->fs_info, args->disk_bytenr);
- ret = btrfs_lookup_csums_list(csum_root, args->disk_bytenr,
- args->disk_bytenr + args->num_bytes - 1,
+ csum_root = btrfs_csum_root(root->fs_info, io_start);
+ ret = btrfs_lookup_csums_list(csum_root, io_start,
+ io_start + args->file_extent.num_bytes - 1,
NULL, nowait);
WARN_ON_ONCE(ret > 0 && is_freespace_inode);
if (ret != 0)
@@ -2021,7 +2018,6 @@ static noinline int run_delalloc_nocow(struct btrfs_inode *inode,
struct extent_buffer *leaf;
struct extent_state *cached_state = NULL;
u64 extent_end;
- u64 ram_bytes;
u64 nocow_end;
int extent_type;
bool is_prealloc;
@@ -2100,7 +2096,6 @@ next_slot:
ret = -EUCLEAN;
goto error;
}
- ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
extent_end = btrfs_file_extent_end(path);
/*
@@ -2120,7 +2115,9 @@ next_slot:
goto must_cow;
ret = 0;
- nocow_bg = btrfs_inc_nocow_writers(fs_info, nocow_args.disk_bytenr);
+ nocow_bg = btrfs_inc_nocow_writers(fs_info,
+ nocow_args.file_extent.disk_bytenr +
+ nocow_args.file_extent.offset);
if (!nocow_bg) {
must_cow:
/*
@@ -2156,21 +2153,16 @@ must_cow:
}
}
- nocow_end = cur_offset + nocow_args.num_bytes - 1;
+ nocow_end = cur_offset + nocow_args.file_extent.num_bytes - 1;
lock_extent(&inode->io_tree, cur_offset, nocow_end, &cached_state);
is_prealloc = extent_type == BTRFS_FILE_EXTENT_PREALLOC;
if (is_prealloc) {
- u64 orig_start = found_key.offset - nocow_args.extent_offset;
struct extent_map *em;
- em = create_io_em(inode, cur_offset, nocow_args.num_bytes,
- orig_start,
- nocow_args.disk_bytenr, /* block_start */
- nocow_args.num_bytes, /* block_len */
- nocow_args.disk_num_bytes, /* orig_block_len */
- ram_bytes, BTRFS_COMPRESS_NONE,
- BTRFS_ORDERED_PREALLOC);
+ em = btrfs_create_io_em(inode, cur_offset,
+ &nocow_args.file_extent,
+ BTRFS_ORDERED_PREALLOC);
if (IS_ERR(em)) {
unlock_extent(&inode->io_tree, cur_offset,
nocow_end, &cached_state);
@@ -2182,12 +2174,10 @@ must_cow:
}
ordered = btrfs_alloc_ordered_extent(inode, cur_offset,
- nocow_args.num_bytes, nocow_args.num_bytes,
- nocow_args.disk_bytenr, nocow_args.num_bytes, 0,
+ &nocow_args.file_extent,
is_prealloc
? (1 << BTRFS_ORDERED_PREALLOC)
- : (1 << BTRFS_ORDERED_NOCOW),
- BTRFS_COMPRESS_NONE);
+ : (1 << BTRFS_ORDERED_NOCOW));
btrfs_dec_nocow_writers(nocow_bg);
if (IS_ERR(ordered)) {
if (is_prealloc) {
@@ -2601,44 +2591,6 @@ void btrfs_clear_delalloc_extent(struct btrfs_inode *inode,
}
}
-static int btrfs_extract_ordered_extent(struct btrfs_bio *bbio,
- struct btrfs_ordered_extent *ordered)
-{
- u64 start = (u64)bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT;
- u64 len = bbio->bio.bi_iter.bi_size;
- struct btrfs_ordered_extent *new;
- int ret;
-
- /* Must always be called for the beginning of an ordered extent. */
- if (WARN_ON_ONCE(start != ordered->disk_bytenr))
- return -EINVAL;
-
- /* No need to split if the ordered extent covers the entire bio. */
- if (ordered->disk_num_bytes == len) {
- refcount_inc(&ordered->refs);
- bbio->ordered = ordered;
- return 0;
- }
-
- /*
- * Don't split the extent_map for NOCOW extents, as we're writing into
- * a pre-existing one.
- */
- if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags)) {
- ret = split_extent_map(bbio->inode, bbio->file_offset,
- ordered->num_bytes, len,
- ordered->disk_bytenr);
- if (ret)
- return ret;
- }
-
- new = btrfs_split_ordered_extent(ordered, len);
- if (IS_ERR(new))
- return PTR_ERR(new);
- bbio->ordered = new;
- return 0;
-}
-
/*
* given a list of ordered sums record them in the inode. This happens
* at IO completion time based on sums calculated at bio submission time.
@@ -2681,7 +2633,7 @@ static int btrfs_find_new_delalloc_bytes(struct btrfs_inode *inode,
if (IS_ERR(em))
return PTR_ERR(em);
- if (em->block_start != EXTENT_MAP_HOLE)
+ if (em->disk_bytenr != EXTENT_MAP_HOLE)
goto next;
em_len = em->len;
@@ -3037,10 +2989,8 @@ static int insert_ordered_extent_file_extent(struct btrfs_trans_handle *trans,
btrfs_set_stack_file_extent_disk_num_bytes(&stack_fi,
oe->disk_num_bytes);
btrfs_set_stack_file_extent_offset(&stack_fi, oe->offset);
- if (test_bit(BTRFS_ORDERED_TRUNCATED, &oe->flags)) {
+ if (test_bit(BTRFS_ORDERED_TRUNCATED, &oe->flags))
num_bytes = oe->truncated_len;
- ram_bytes = num_bytes;
- }
btrfs_set_stack_file_extent_num_bytes(&stack_fi, num_bytes);
btrfs_set_stack_file_extent_ram_bytes(&stack_fi, ram_bytes);
btrfs_set_stack_file_extent_compression(&stack_fi, oe->compress_type);
@@ -3056,7 +3006,7 @@ static int insert_ordered_extent_file_extent(struct btrfs_trans_handle *trans,
test_bit(BTRFS_ORDERED_ENCODED, &oe->flags) ||
test_bit(BTRFS_ORDERED_TRUNCATED, &oe->flags);
- return insert_reserved_file_extent(trans, BTRFS_I(oe->inode),
+ return insert_reserved_file_extent(trans, oe->inode,
oe->file_offset, &stack_fi,
update_inode_bytes, oe->qgroup_rsv);
}
@@ -3068,7 +3018,7 @@ static int insert_ordered_extent_file_extent(struct btrfs_trans_handle *trans,
*/
int btrfs_finish_one_ordered(struct btrfs_ordered_extent *ordered_extent)
{
- struct btrfs_inode *inode = BTRFS_I(ordered_extent->inode);
+ struct btrfs_inode *inode = ordered_extent->inode;
struct btrfs_root *root = inode->root;
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_trans_handle *trans = NULL;
@@ -3302,7 +3252,7 @@ out:
int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered)
{
- if (btrfs_is_zoned(inode_to_fs_info(ordered->inode)) &&
+ if (btrfs_is_zoned(ordered->inode->root->fs_info) &&
!test_bit(BTRFS_ORDERED_IOERR, &ordered->flags) &&
list_empty(&ordered->bioc_list))
btrfs_finish_ordered_zoned(ordered);
@@ -3596,7 +3546,7 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
found_key.objectid = found_key.offset;
found_key.type = BTRFS_INODE_ITEM_KEY;
found_key.offset = 0;
- inode = btrfs_iget(fs_info->sb, last_objectid, root);
+ inode = btrfs_iget(last_objectid, root);
if (IS_ERR(inode)) {
ret = PTR_ERR(inode);
inode = NULL;
@@ -3781,6 +3731,30 @@ static noinline int acls_after_inode_item(struct extent_buffer *leaf,
return 1;
}
+static int btrfs_init_file_extent_tree(struct btrfs_inode *inode)
+{
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
+
+ if (WARN_ON_ONCE(inode->file_extent_tree))
+ return 0;
+ if (btrfs_fs_incompat(fs_info, NO_HOLES))
+ return 0;
+ if (!S_ISREG(inode->vfs_inode.i_mode))
+ return 0;
+ if (btrfs_is_free_space_inode(inode))
+ return 0;
+
+ inode->file_extent_tree = kmalloc(sizeof(struct extent_io_tree), GFP_KERNEL);
+ if (!inode->file_extent_tree)
+ return -ENOMEM;
+
+ extent_io_tree_init(fs_info, inode->file_extent_tree, IO_TREE_INODE_FILE_EXTENT);
+ /* Lockdep class is set only for the file extent tree. */
+ lockdep_set_class(&inode->file_extent_tree->lock, &file_extent_tree_class);
+
+ return 0;
+}
+
/*
* read an inode from the btree into the in-memory inode
*/
@@ -3800,6 +3774,10 @@ static int btrfs_read_locked_inode(struct inode *inode,
bool filled = false;
int first_xattr_slot;
+ ret = btrfs_init_file_extent_tree(BTRFS_I(inode));
+ if (ret)
+ return ret;
+
ret = btrfs_fill_inode(inode, &rdev);
if (!ret)
filled = true;
@@ -3810,7 +3788,7 @@ static int btrfs_read_locked_inode(struct inode *inode,
return -ENOMEM;
}
- memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
+ btrfs_get_inode_key(BTRFS_I(inode), &location);
ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
if (ret) {
@@ -3856,7 +3834,9 @@ static int btrfs_read_locked_inode(struct inode *inode,
inode->i_rdev = 0;
rdev = btrfs_inode_rdev(leaf, inode_item);
- BTRFS_I(inode)->index_cnt = (u64)-1;
+ if (S_ISDIR(inode->i_mode))
+ BTRFS_I(inode)->index_cnt = (u64)-1;
+
btrfs_inode_split_flags(btrfs_inode_flags(leaf, inode_item),
&BTRFS_I(inode)->flags, &BTRFS_I(inode)->ro_flags);
@@ -4038,13 +4018,15 @@ static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans,
struct btrfs_inode_item *inode_item;
struct btrfs_path *path;
struct extent_buffer *leaf;
+ struct btrfs_key key;
int ret;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
- ret = btrfs_lookup_inode(trans, inode->root, path, &inode->location, 1);
+ btrfs_get_inode_key(inode, &key);
+ ret = btrfs_lookup_inode(trans, inode->root, path, &key, 1);
if (ret) {
if (ret > 0)
ret = -ENOENT;
@@ -4308,7 +4290,7 @@ static int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
if (btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID) {
objectid = btrfs_root_id(inode->root);
} else if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) {
- objectid = inode->location.objectid;
+ objectid = inode->ref_root_id;
} else {
WARN_ON(1);
fscrypt_free_filename(&fname);
@@ -4539,11 +4521,6 @@ int btrfs_delete_subvolume(struct btrfs_inode *dir, struct dentry *dentry)
ret = PTR_ERR(trans);
goto out_release;
}
- ret = btrfs_record_root_in_trans(trans, root);
- if (ret) {
- btrfs_abort_transaction(trans, ret);
- goto out_end_trans;
- }
btrfs_qgroup_convert_reserved_meta(root, qgroup_reserved);
qgroup_reserved = 0;
trans->block_rsv = &block_rsv;
@@ -4967,11 +4944,9 @@ int btrfs_cont_expand(struct btrfs_inode *inode, loff_t oldsize, loff_t size)
}
hole_em->start = cur_offset;
hole_em->len = hole_size;
- hole_em->orig_start = cur_offset;
- hole_em->block_start = EXTENT_MAP_HOLE;
- hole_em->block_len = 0;
- hole_em->orig_block_len = 0;
+ hole_em->disk_bytenr = EXTENT_MAP_HOLE;
+ hole_em->disk_num_bytes = 0;
hole_em->ram_bytes = hole_size;
hole_em->generation = btrfs_get_fs_generation(fs_info);
@@ -5049,7 +5024,7 @@ static int btrfs_setsize(struct inode *inode, struct iattr *attr)
struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
if (btrfs_is_zoned(fs_info)) {
- ret = btrfs_wait_ordered_range(inode,
+ ret = btrfs_wait_ordered_range(BTRFS_I(inode),
ALIGN(newsize, fs_info->sectorsize),
(u64)-1);
if (ret)
@@ -5079,7 +5054,7 @@ static int btrfs_setsize(struct inode *inode, struct iattr *attr)
* wait for disk_i_size to be stable and then update the
* in-memory size to match.
*/
- err = btrfs_wait_ordered_range(inode, 0, (u64)-1);
+ err = btrfs_wait_ordered_range(BTRFS_I(inode), 0, (u64)-1);
if (err)
return err;
i_size_write(inode, BTRFS_I(inode)->disk_i_size);
@@ -5493,59 +5468,52 @@ out:
return err;
}
-static void inode_tree_add(struct btrfs_inode *inode)
+static int btrfs_add_inode_to_root(struct btrfs_inode *inode, bool prealloc)
{
struct btrfs_root *root = inode->root;
- struct btrfs_inode *entry;
- struct rb_node **p;
- struct rb_node *parent;
- struct rb_node *new = &inode->rb_node;
- u64 ino = btrfs_ino(inode);
+ struct btrfs_inode *existing;
+ const u64 ino = btrfs_ino(inode);
+ int ret;
if (inode_unhashed(&inode->vfs_inode))
- return;
- parent = NULL;
- spin_lock(&root->inode_lock);
- p = &root->inode_tree.rb_node;
- while (*p) {
- parent = *p;
- entry = rb_entry(parent, struct btrfs_inode, rb_node);
+ return 0;
- if (ino < btrfs_ino(entry))
- p = &parent->rb_left;
- else if (ino > btrfs_ino(entry))
- p = &parent->rb_right;
- else {
- WARN_ON(!(entry->vfs_inode.i_state &
- (I_WILL_FREE | I_FREEING)));
- rb_replace_node(parent, new, &root->inode_tree);
- RB_CLEAR_NODE(parent);
- spin_unlock(&root->inode_lock);
- return;
- }
+ if (prealloc) {
+ ret = xa_reserve(&root->inodes, ino, GFP_NOFS);
+ if (ret)
+ return ret;
}
- rb_link_node(new, parent, p);
- rb_insert_color(new, &root->inode_tree);
- spin_unlock(&root->inode_lock);
+
+ existing = xa_store(&root->inodes, ino, inode, GFP_ATOMIC);
+
+ if (xa_is_err(existing)) {
+ ret = xa_err(existing);
+ ASSERT(ret != -EINVAL);
+ ASSERT(ret != -ENOMEM);
+ return ret;
+ } else if (existing) {
+ WARN_ON(!(existing->vfs_inode.i_state & (I_WILL_FREE | I_FREEING)));
+ }
+
+ return 0;
}
-static void inode_tree_del(struct btrfs_inode *inode)
+static void btrfs_del_inode_from_root(struct btrfs_inode *inode)
{
struct btrfs_root *root = inode->root;
- int empty = 0;
+ struct btrfs_inode *entry;
+ bool empty = false;
- spin_lock(&root->inode_lock);
- if (!RB_EMPTY_NODE(&inode->rb_node)) {
- rb_erase(&inode->rb_node, &root->inode_tree);
- RB_CLEAR_NODE(&inode->rb_node);
- empty = RB_EMPTY_ROOT(&root->inode_tree);
- }
- spin_unlock(&root->inode_lock);
+ xa_lock(&root->inodes);
+ entry = __xa_erase(&root->inodes, btrfs_ino(inode));
+ if (entry == inode)
+ empty = xa_empty(&root->inodes);
+ xa_unlock(&root->inodes);
if (empty && btrfs_root_refs(&root->root_item) == 0) {
- spin_lock(&root->inode_lock);
- empty = RB_EMPTY_ROOT(&root->inode_tree);
- spin_unlock(&root->inode_lock);
+ xa_lock(&root->inodes);
+ empty = xa_empty(&root->inodes);
+ xa_unlock(&root->inodes);
if (empty)
btrfs_add_dead_root(root);
}
@@ -5556,10 +5524,7 @@ static int btrfs_init_locked_inode(struct inode *inode, void *p)
{
struct btrfs_iget_args *args = p;
- inode->i_ino = args->ino;
- BTRFS_I(inode)->location.objectid = args->ino;
- BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY;
- BTRFS_I(inode)->location.offset = 0;
+ btrfs_set_inode_number(BTRFS_I(inode), args->ino);
BTRFS_I(inode)->root = btrfs_grab_root(args->root);
if (args->root && args->root == args->root->fs_info->tree_root &&
@@ -5573,12 +5538,11 @@ static int btrfs_find_actor(struct inode *inode, void *opaque)
{
struct btrfs_iget_args *args = opaque;
- return args->ino == BTRFS_I(inode)->location.objectid &&
+ return args->ino == btrfs_ino(BTRFS_I(inode)) &&
args->root == BTRFS_I(inode)->root;
}
-static struct inode *btrfs_iget_locked(struct super_block *s, u64 ino,
- struct btrfs_root *root)
+static struct inode *btrfs_iget_locked(u64 ino, struct btrfs_root *root)
{
struct inode *inode;
struct btrfs_iget_args args;
@@ -5587,7 +5551,7 @@ static struct inode *btrfs_iget_locked(struct super_block *s, u64 ino,
args.ino = ino;
args.root = root;
- inode = iget5_locked(s, hashval, btrfs_find_actor,
+ inode = iget5_locked_rcu(root->fs_info->sb, hashval, btrfs_find_actor,
btrfs_init_locked_inode,
(void *)&args);
return inode;
@@ -5599,41 +5563,44 @@ static struct inode *btrfs_iget_locked(struct super_block *s, u64 ino,
* allocator. NULL is also valid but may require an additional allocation
* later.
*/
-struct inode *btrfs_iget_path(struct super_block *s, u64 ino,
- struct btrfs_root *root, struct btrfs_path *path)
+struct inode *btrfs_iget_path(u64 ino, struct btrfs_root *root,
+ struct btrfs_path *path)
{
struct inode *inode;
+ int ret;
- inode = btrfs_iget_locked(s, ino, root);
+ inode = btrfs_iget_locked(ino, root);
if (!inode)
return ERR_PTR(-ENOMEM);
- if (inode->i_state & I_NEW) {
- int ret;
+ if (!(inode->i_state & I_NEW))
+ return inode;
- ret = btrfs_read_locked_inode(inode, path);
- if (!ret) {
- inode_tree_add(BTRFS_I(inode));
- unlock_new_inode(inode);
- } else {
- iget_failed(inode);
- /*
- * ret > 0 can come from btrfs_search_slot called by
- * btrfs_read_locked_inode, this means the inode item
- * was not found.
- */
- if (ret > 0)
- ret = -ENOENT;
- inode = ERR_PTR(ret);
- }
- }
+ ret = btrfs_read_locked_inode(inode, path);
+ /*
+ * ret > 0 can come from btrfs_search_slot called by
+ * btrfs_read_locked_inode(), this means the inode item was not found.
+ */
+ if (ret > 0)
+ ret = -ENOENT;
+ if (ret < 0)
+ goto error;
+
+ ret = btrfs_add_inode_to_root(BTRFS_I(inode), true);
+ if (ret < 0)
+ goto error;
+
+ unlock_new_inode(inode);
return inode;
+error:
+ iget_failed(inode);
+ return ERR_PTR(ret);
}
-struct inode *btrfs_iget(struct super_block *s, u64 ino, struct btrfs_root *root)
+struct inode *btrfs_iget(u64 ino, struct btrfs_root *root)
{
- return btrfs_iget_path(s, ino, root, NULL);
+ return btrfs_iget_path(ino, root, NULL);
}
static struct inode *new_simple_dir(struct inode *dir,
@@ -5647,10 +5614,11 @@ static struct inode *new_simple_dir(struct inode *dir,
return ERR_PTR(-ENOMEM);
BTRFS_I(inode)->root = btrfs_grab_root(root);
- memcpy(&BTRFS_I(inode)->location, key, sizeof(*key));
+ BTRFS_I(inode)->ref_root_id = key->objectid;
+ set_bit(BTRFS_INODE_ROOT_STUB, &BTRFS_I(inode)->runtime_flags);
set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags);
- inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID;
+ btrfs_set_inode_number(BTRFS_I(inode), BTRFS_EMPTY_SUBVOL_DIR_OBJECTID);
/*
* We only need lookup, the rest is read-only and there's no inode
* associated with the dentry
@@ -5704,7 +5672,7 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
return ERR_PTR(ret);
if (location.type == BTRFS_INODE_ITEM_KEY) {
- inode = btrfs_iget(dir->i_sb, location.objectid, root);
+ inode = btrfs_iget(location.objectid, root);
if (IS_ERR(inode))
return inode;
@@ -5728,7 +5696,7 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
else
inode = new_simple_dir(dir, &location, root);
} else {
- inode = btrfs_iget(dir->i_sb, location.objectid, sub_root);
+ inode = btrfs_iget(location.objectid, sub_root);
btrfs_put_root(sub_root);
if (IS_ERR(inode))
@@ -5948,7 +5916,7 @@ static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
addr = private->filldir_buf;
path->reada = READA_FORWARD;
- put = btrfs_readdir_get_delayed_items(inode, private->last_index,
+ put = btrfs_readdir_get_delayed_items(BTRFS_I(inode), private->last_index,
&ins_list, &del_list);
again:
@@ -6038,7 +6006,7 @@ nopos:
ret = 0;
err:
if (put)
- btrfs_readdir_put_delayed_items(inode, &ins_list, &del_list);
+ btrfs_readdir_put_delayed_items(BTRFS_I(inode), &ins_list, &del_list);
btrfs_free_path(path);
return ret;
}
@@ -6123,7 +6091,7 @@ static int btrfs_insert_inode_locked(struct inode *inode)
{
struct btrfs_iget_args args;
- args.ino = BTRFS_I(inode)->location.objectid;
+ args.ino = btrfs_ino(BTRFS_I(inode));
args.root = BTRFS_I(inode)->root;
return insert_inode_locked4(inode,
@@ -6230,7 +6198,6 @@ int btrfs_create_new_inode(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info = inode_to_fs_info(dir);
struct btrfs_root *root;
struct btrfs_inode_item *inode_item;
- struct btrfs_key *location;
struct btrfs_path *path;
u64 objectid;
struct btrfs_inode_ref *ref;
@@ -6239,6 +6206,7 @@ int btrfs_create_new_inode(struct btrfs_trans_handle *trans,
struct btrfs_item_batch batch;
unsigned long ptr;
int ret;
+ bool xa_reserved = false;
path = btrfs_alloc_path();
if (!path)
@@ -6248,10 +6216,19 @@ int btrfs_create_new_inode(struct btrfs_trans_handle *trans,
BTRFS_I(inode)->root = btrfs_grab_root(BTRFS_I(dir)->root);
root = BTRFS_I(inode)->root;
+ ret = btrfs_init_file_extent_tree(BTRFS_I(inode));
+ if (ret)
+ goto out;
+
ret = btrfs_get_free_objectid(root, &objectid);
if (ret)
goto out;
- inode->i_ino = objectid;
+ btrfs_set_inode_number(BTRFS_I(inode), objectid);
+
+ ret = xa_reserve(&root->inodes, objectid, GFP_NOFS);
+ if (ret)
+ goto out;
+ xa_reserved = true;
if (args->orphan) {
/*
@@ -6266,8 +6243,10 @@ int btrfs_create_new_inode(struct btrfs_trans_handle *trans,
if (ret)
goto out;
}
- /* index_cnt is ignored for everything but a dir. */
- BTRFS_I(inode)->index_cnt = BTRFS_DIR_START_INDEX;
+
+ if (S_ISDIR(inode->i_mode))
+ BTRFS_I(inode)->index_cnt = BTRFS_DIR_START_INDEX;
+
BTRFS_I(inode)->generation = trans->transid;
inode->i_generation = BTRFS_I(inode)->generation;
@@ -6294,11 +6273,6 @@ int btrfs_create_new_inode(struct btrfs_trans_handle *trans,
BTRFS_INODE_NODATASUM;
}
- location = &BTRFS_I(inode)->location;
- location->objectid = objectid;
- location->offset = 0;
- location->type = BTRFS_INODE_ITEM_KEY;
-
ret = btrfs_insert_inode_locked(inode);
if (ret < 0) {
if (!args->orphan)
@@ -6397,8 +6371,7 @@ int btrfs_create_new_inode(struct btrfs_trans_handle *trans,
* Subvolumes inherit properties from their parent subvolume,
* not the directory they were created in.
*/
- parent = btrfs_iget(fs_info->sb, BTRFS_FIRST_FREE_OBJECTID,
- BTRFS_I(dir)->root);
+ parent = btrfs_iget(BTRFS_FIRST_FREE_OBJECTID, BTRFS_I(dir)->root);
if (IS_ERR(parent)) {
ret = PTR_ERR(parent);
} else {
@@ -6426,7 +6399,12 @@ int btrfs_create_new_inode(struct btrfs_trans_handle *trans,
}
}
- inode_tree_add(BTRFS_I(inode));
+ ret = btrfs_add_inode_to_root(BTRFS_I(inode), false);
+ if (WARN_ON(ret)) {
+ /* Shouldn't happen, we used xa_reserve() before. */
+ btrfs_abort_transaction(trans, ret);
+ goto discard;
+ }
trace_btrfs_inode_new(inode);
btrfs_set_inode_last_trans(trans, BTRFS_I(inode));
@@ -6454,6 +6432,9 @@ discard:
ihold(inode);
discard_new_inode(inode);
out:
+ if (xa_reserved)
+ xa_release(&root->inodes, objectid);
+
btrfs_free_path(path);
return ret;
}
@@ -6818,7 +6799,7 @@ struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
if (em) {
if (em->start > start || em->start + em->len <= start)
free_extent_map(em);
- else if (em->block_start == EXTENT_MAP_INLINE && page)
+ else if (em->disk_bytenr == EXTENT_MAP_INLINE && page)
free_extent_map(em);
else
goto out;
@@ -6829,9 +6810,8 @@ struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
goto out;
}
em->start = EXTENT_MAP_HOLE;
- em->orig_start = EXTENT_MAP_HOLE;
+ em->disk_bytenr = EXTENT_MAP_HOLE;
em->len = (u64)-1;
- em->block_len = (u64)-1;
path = btrfs_alloc_path();
if (!path) {
@@ -6921,9 +6901,8 @@ next:
/* New extent overlaps with existing one */
em->start = start;
- em->orig_start = start;
em->len = found_key.offset - start;
- em->block_start = EXTENT_MAP_HOLE;
+ em->disk_bytenr = EXTENT_MAP_HOLE;
goto insert;
}
@@ -6947,7 +6926,7 @@ next:
*
* Other members are not utilized for inline extents.
*/
- ASSERT(em->block_start == EXTENT_MAP_INLINE);
+ ASSERT(em->disk_bytenr == EXTENT_MAP_INLINE);
ASSERT(em->len == fs_info->sectorsize);
ret = read_inline_extent(inode, path, page);
@@ -6957,9 +6936,8 @@ next:
}
not_found:
em->start = start;
- em->orig_start = start;
em->len = len;
- em->block_start = EXTENT_MAP_HOLE;
+ em->disk_bytenr = EXTENT_MAP_HOLE;
insert:
ret = 0;
btrfs_release_path(path);
@@ -6986,84 +6964,6 @@ out:
return em;
}
-static struct extent_map *btrfs_create_dio_extent(struct btrfs_inode *inode,
- struct btrfs_dio_data *dio_data,
- const u64 start,
- const u64 len,
- const u64 orig_start,
- const u64 block_start,
- const u64 block_len,
- const u64 orig_block_len,
- const u64 ram_bytes,
- const int type)
-{
- struct extent_map *em = NULL;
- struct btrfs_ordered_extent *ordered;
-
- if (type != BTRFS_ORDERED_NOCOW) {
- em = create_io_em(inode, start, len, orig_start, block_start,
- block_len, orig_block_len, ram_bytes,
- BTRFS_COMPRESS_NONE, /* compress_type */
- type);
- if (IS_ERR(em))
- goto out;
- }
- ordered = btrfs_alloc_ordered_extent(inode, start, len, len,
- block_start, block_len, 0,
- (1 << type) |
- (1 << BTRFS_ORDERED_DIRECT),
- BTRFS_COMPRESS_NONE);
- if (IS_ERR(ordered)) {
- if (em) {
- free_extent_map(em);
- btrfs_drop_extent_map_range(inode, start,
- start + len - 1, false);
- }
- em = ERR_CAST(ordered);
- } else {
- ASSERT(!dio_data->ordered);
- dio_data->ordered = ordered;
- }
- out:
-
- return em;
-}
-
-static struct extent_map *btrfs_new_extent_direct(struct btrfs_inode *inode,
- struct btrfs_dio_data *dio_data,
- u64 start, u64 len)
-{
- struct btrfs_root *root = inode->root;
- struct btrfs_fs_info *fs_info = root->fs_info;
- struct extent_map *em;
- struct btrfs_key ins;
- u64 alloc_hint;
- int ret;
-
- alloc_hint = get_extent_allocation_hint(inode, start, len);
-again:
- ret = btrfs_reserve_extent(root, len, len, fs_info->sectorsize,
- 0, alloc_hint, &ins, 1, 1);
- if (ret == -EAGAIN) {
- ASSERT(btrfs_is_zoned(fs_info));
- wait_on_bit_io(&inode->root->fs_info->flags, BTRFS_FS_NEED_ZONE_FINISH,
- TASK_UNINTERRUPTIBLE);
- goto again;
- }
- if (ret)
- return ERR_PTR(ret);
-
- em = btrfs_create_dio_extent(inode, dio_data, start, ins.offset, start,
- ins.objectid, ins.offset, ins.offset,
- ins.offset, BTRFS_ORDERED_REGULAR);
- btrfs_dec_block_group_reservations(fs_info, ins.objectid);
- if (IS_ERR(em))
- btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset,
- 1);
-
- return em;
-}
-
static bool btrfs_extent_readonly(struct btrfs_fs_info *fs_info, u64 bytenr)
{
struct btrfs_block_group *block_group;
@@ -7098,8 +6998,8 @@ static bool btrfs_extent_readonly(struct btrfs_fs_info *fs_info, u64 bytenr)
* any ordered extents.
*/
noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len,
- u64 *orig_start, u64 *orig_block_len,
- u64 *ram_bytes, bool nowait, bool strict)
+ struct btrfs_file_extent *file_extent,
+ bool nowait, bool strict)
{
struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
struct can_nocow_file_extent_args nocow_args = { 0 };
@@ -7149,8 +7049,6 @@ noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len,
fi = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item);
found_type = btrfs_file_extent_type(leaf, fi);
- if (ram_bytes)
- *ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
nocow_args.start = offset;
nocow_args.end = offset + *len - 1;
@@ -7168,14 +7066,16 @@ noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len,
}
ret = 0;
- if (btrfs_extent_readonly(fs_info, nocow_args.disk_bytenr))
+ if (btrfs_extent_readonly(fs_info,
+ nocow_args.file_extent.disk_bytenr +
+ nocow_args.file_extent.offset))
goto out;
if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) &&
found_type == BTRFS_FILE_EXTENT_PREALLOC) {
u64 range_end;
- range_end = round_up(offset + nocow_args.num_bytes,
+ range_end = round_up(offset + nocow_args.file_extent.num_bytes,
root->fs_info->sectorsize) - 1;
ret = test_range_bit_exists(io_tree, offset, range_end, EXTENT_DELALLOC);
if (ret) {
@@ -7184,117 +7084,20 @@ noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len,
}
}
- if (orig_start)
- *orig_start = key.offset - nocow_args.extent_offset;
- if (orig_block_len)
- *orig_block_len = nocow_args.disk_num_bytes;
+ if (file_extent)
+ memcpy(file_extent, &nocow_args.file_extent, sizeof(*file_extent));
- *len = nocow_args.num_bytes;
+ *len = nocow_args.file_extent.num_bytes;
ret = 1;
out:
btrfs_free_path(path);
return ret;
}
-static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend,
- struct extent_state **cached_state,
- unsigned int iomap_flags)
-{
- const bool writing = (iomap_flags & IOMAP_WRITE);
- const bool nowait = (iomap_flags & IOMAP_NOWAIT);
- struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
- struct btrfs_ordered_extent *ordered;
- int ret = 0;
-
- while (1) {
- if (nowait) {
- if (!try_lock_extent(io_tree, lockstart, lockend,
- cached_state))
- return -EAGAIN;
- } else {
- lock_extent(io_tree, lockstart, lockend, cached_state);
- }
- /*
- * We're concerned with the entire range that we're going to be
- * doing DIO to, so we need to make sure there's no ordered
- * extents in this range.
- */
- ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), lockstart,
- lockend - lockstart + 1);
-
- /*
- * We need to make sure there are no buffered pages in this
- * range either, we could have raced between the invalidate in
- * generic_file_direct_write and locking the extent. The
- * invalidate needs to happen so that reads after a write do not
- * get stale data.
- */
- if (!ordered &&
- (!writing || !filemap_range_has_page(inode->i_mapping,
- lockstart, lockend)))
- break;
-
- unlock_extent(io_tree, lockstart, lockend, cached_state);
-
- if (ordered) {
- if (nowait) {
- btrfs_put_ordered_extent(ordered);
- ret = -EAGAIN;
- break;
- }
- /*
- * If we are doing a DIO read and the ordered extent we
- * found is for a buffered write, we can not wait for it
- * to complete and retry, because if we do so we can
- * deadlock with concurrent buffered writes on page
- * locks. This happens only if our DIO read covers more
- * than one extent map, if at this point has already
- * created an ordered extent for a previous extent map
- * and locked its range in the inode's io tree, and a
- * concurrent write against that previous extent map's
- * range and this range started (we unlock the ranges
- * in the io tree only when the bios complete and
- * buffered writes always lock pages before attempting
- * to lock range in the io tree).
- */
- if (writing ||
- test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags))
- btrfs_start_ordered_extent(ordered);
- else
- ret = nowait ? -EAGAIN : -ENOTBLK;
- btrfs_put_ordered_extent(ordered);
- } else {
- /*
- * We could trigger writeback for this range (and wait
- * for it to complete) and then invalidate the pages for
- * this range (through invalidate_inode_pages2_range()),
- * but that can lead us to a deadlock with a concurrent
- * call to readahead (a buffered read or a defrag call
- * triggered a readahead) on a page lock due to an
- * ordered dio extent we created before but did not have
- * yet a corresponding bio submitted (whence it can not
- * complete), which makes readahead wait for that
- * ordered extent to complete while holding a lock on
- * that page.
- */
- ret = nowait ? -EAGAIN : -ENOTBLK;
- }
-
- if (ret)
- break;
-
- cond_resched();
- }
-
- return ret;
-}
-
/* The callers of this must take lock_extent() */
-static struct extent_map *create_io_em(struct btrfs_inode *inode, u64 start,
- u64 len, u64 orig_start, u64 block_start,
- u64 block_len, u64 orig_block_len,
- u64 ram_bytes, int compress_type,
- int type)
+struct extent_map *btrfs_create_io_em(struct btrfs_inode *inode, u64 start,
+ const struct btrfs_file_extent *file_extent,
+ int type)
{
struct extent_map *em;
int ret;
@@ -7313,32 +7116,26 @@ static struct extent_map *create_io_em(struct btrfs_inode *inode, u64 start,
switch (type) {
case BTRFS_ORDERED_PREALLOC:
- /* Uncompressed extents. */
- ASSERT(block_len == len);
-
/* We're only referring part of a larger preallocated extent. */
- ASSERT(block_len <= ram_bytes);
+ ASSERT(file_extent->num_bytes <= file_extent->ram_bytes);
break;
case BTRFS_ORDERED_REGULAR:
- /* Uncompressed extents. */
- ASSERT(block_len == len);
-
/* COW results a new extent matching our file extent size. */
- ASSERT(orig_block_len == len);
- ASSERT(ram_bytes == len);
+ ASSERT(file_extent->disk_num_bytes == file_extent->num_bytes);
+ ASSERT(file_extent->ram_bytes == file_extent->num_bytes);
/* Since it's a new extent, we should not have any offset. */
- ASSERT(orig_start == start);
+ ASSERT(file_extent->offset == 0);
break;
case BTRFS_ORDERED_COMPRESSED:
/* Must be compressed. */
- ASSERT(compress_type != BTRFS_COMPRESS_NONE);
+ ASSERT(file_extent->compression != BTRFS_COMPRESS_NONE);
/*
* Encoded write can make us to refer to part of the
* uncompressed extent.
*/
- ASSERT(len <= ram_bytes);
+ ASSERT(file_extent->num_bytes <= file_extent->ram_bytes);
break;
}
@@ -7347,16 +7144,15 @@ static struct extent_map *create_io_em(struct btrfs_inode *inode, u64 start,
return ERR_PTR(-ENOMEM);
em->start = start;
- em->orig_start = orig_start;
- em->len = len;
- em->block_len = block_len;
- em->block_start = block_start;
- em->orig_block_len = orig_block_len;
- em->ram_bytes = ram_bytes;
+ em->len = file_extent->num_bytes;
+ em->disk_bytenr = file_extent->disk_bytenr;
+ em->disk_num_bytes = file_extent->disk_num_bytes;
+ em->ram_bytes = file_extent->ram_bytes;
em->generation = -1;
+ em->offset = file_extent->offset;
em->flags |= EXTENT_FLAG_PINNED;
if (type == BTRFS_ORDERED_COMPRESSED)
- extent_map_set_compression(em, compress_type);
+ extent_map_set_compression(em, file_extent->compression);
ret = btrfs_replace_extent_map_range(inode, em, true);
if (ret) {
@@ -7368,580 +7164,6 @@ static struct extent_map *create_io_em(struct btrfs_inode *inode, u64 start,
return em;
}
-
-static int btrfs_get_blocks_direct_write(struct extent_map **map,
- struct inode *inode,
- struct btrfs_dio_data *dio_data,
- u64 start, u64 *lenp,
- unsigned int iomap_flags)
-{
- const bool nowait = (iomap_flags & IOMAP_NOWAIT);
- struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
- struct extent_map *em = *map;
- int type;
- u64 block_start, orig_start, orig_block_len, ram_bytes;
- struct btrfs_block_group *bg;
- bool can_nocow = false;
- bool space_reserved = false;
- u64 len = *lenp;
- u64 prev_len;
- int ret = 0;
-
- /*
- * We don't allocate a new extent in the following cases
- *
- * 1) The inode is marked as NODATACOW. In this case we'll just use the
- * existing extent.
- * 2) The extent is marked as PREALLOC. We're good to go here and can
- * just use the extent.
- *
- */
- if ((em->flags & EXTENT_FLAG_PREALLOC) ||
- ((BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) &&
- em->block_start != EXTENT_MAP_HOLE)) {
- if (em->flags & EXTENT_FLAG_PREALLOC)
- type = BTRFS_ORDERED_PREALLOC;
- else
- type = BTRFS_ORDERED_NOCOW;
- len = min(len, em->len - (start - em->start));
- block_start = em->block_start + (start - em->start);
-
- if (can_nocow_extent(inode, start, &len, &orig_start,
- &orig_block_len, &ram_bytes, false, false) == 1) {
- bg = btrfs_inc_nocow_writers(fs_info, block_start);
- if (bg)
- can_nocow = true;
- }
- }
-
- prev_len = len;
- if (can_nocow) {
- struct extent_map *em2;
-
- /* We can NOCOW, so only need to reserve metadata space. */
- ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode), len, len,
- nowait);
- if (ret < 0) {
- /* Our caller expects us to free the input extent map. */
- free_extent_map(em);
- *map = NULL;
- btrfs_dec_nocow_writers(bg);
- if (nowait && (ret == -ENOSPC || ret == -EDQUOT))
- ret = -EAGAIN;
- goto out;
- }
- space_reserved = true;
-
- em2 = btrfs_create_dio_extent(BTRFS_I(inode), dio_data, start, len,
- orig_start, block_start,
- len, orig_block_len,
- ram_bytes, type);
- btrfs_dec_nocow_writers(bg);
- if (type == BTRFS_ORDERED_PREALLOC) {
- free_extent_map(em);
- *map = em2;
- em = em2;
- }
-
- if (IS_ERR(em2)) {
- ret = PTR_ERR(em2);
- goto out;
- }
-
- dio_data->nocow_done = true;
- } else {
- /* Our caller expects us to free the input extent map. */
- free_extent_map(em);
- *map = NULL;
-
- if (nowait) {
- ret = -EAGAIN;
- goto out;
- }
-
- /*
- * If we could not allocate data space before locking the file
- * range and we can't do a NOCOW write, then we have to fail.
- */
- if (!dio_data->data_space_reserved) {
- ret = -ENOSPC;
- goto out;
- }
-
- /*
- * We have to COW and we have already reserved data space before,
- * so now we reserve only metadata.
- */
- ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode), len, len,
- false);
- if (ret < 0)
- goto out;
- space_reserved = true;
-
- em = btrfs_new_extent_direct(BTRFS_I(inode), dio_data, start, len);
- if (IS_ERR(em)) {
- ret = PTR_ERR(em);
- goto out;
- }
- *map = em;
- len = min(len, em->len - (start - em->start));
- if (len < prev_len)
- btrfs_delalloc_release_metadata(BTRFS_I(inode),
- prev_len - len, true);
- }
-
- /*
- * We have created our ordered extent, so we can now release our reservation
- * for an outstanding extent.
- */
- btrfs_delalloc_release_extents(BTRFS_I(inode), prev_len);
-
- /*
- * Need to update the i_size under the extent lock so buffered
- * readers will get the updated i_size when we unlock.
- */
- if (start + len > i_size_read(inode))
- i_size_write(inode, start + len);
-out:
- if (ret && space_reserved) {
- btrfs_delalloc_release_extents(BTRFS_I(inode), len);
- btrfs_delalloc_release_metadata(BTRFS_I(inode), len, true);
- }
- *lenp = len;
- return ret;
-}
-
-static int btrfs_dio_iomap_begin(struct inode *inode, loff_t start,
- loff_t length, unsigned int flags, struct iomap *iomap,
- struct iomap *srcmap)
-{
- struct iomap_iter *iter = container_of(iomap, struct iomap_iter, iomap);
- struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
- struct extent_map *em;
- struct extent_state *cached_state = NULL;
- struct btrfs_dio_data *dio_data = iter->private;
- u64 lockstart, lockend;
- const bool write = !!(flags & IOMAP_WRITE);
- int ret = 0;
- u64 len = length;
- const u64 data_alloc_len = length;
- bool unlock_extents = false;
-
- /*
- * We could potentially fault if we have a buffer > PAGE_SIZE, and if
- * we're NOWAIT we may submit a bio for a partial range and return
- * EIOCBQUEUED, which would result in an errant short read.
- *
- * The best way to handle this would be to allow for partial completions
- * of iocb's, so we could submit the partial bio, return and fault in
- * the rest of the pages, and then submit the io for the rest of the
- * range. However we don't have that currently, so simply return
- * -EAGAIN at this point so that the normal path is used.
- */
- if (!write && (flags & IOMAP_NOWAIT) && length > PAGE_SIZE)
- return -EAGAIN;
-
- /*
- * Cap the size of reads to that usually seen in buffered I/O as we need
- * to allocate a contiguous array for the checksums.
- */
- if (!write)
- len = min_t(u64, len, fs_info->sectorsize * BTRFS_MAX_BIO_SECTORS);
-
- lockstart = start;
- lockend = start + len - 1;
-
- /*
- * iomap_dio_rw() only does filemap_write_and_wait_range(), which isn't
- * enough if we've written compressed pages to this area, so we need to
- * flush the dirty pages again to make absolutely sure that any
- * outstanding dirty pages are on disk - the first flush only starts
- * compression on the data, while keeping the pages locked, so by the
- * time the second flush returns we know bios for the compressed pages
- * were submitted and finished, and the pages no longer under writeback.
- *
- * If we have a NOWAIT request and we have any pages in the range that
- * are locked, likely due to compression still in progress, we don't want
- * to block on page locks. We also don't want to block on pages marked as
- * dirty or under writeback (same as for the non-compression case).
- * iomap_dio_rw() did the same check, but after that and before we got
- * here, mmap'ed writes may have happened or buffered reads started
- * (readpage() and readahead(), which lock pages), as we haven't locked
- * the file range yet.
- */
- if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
- &BTRFS_I(inode)->runtime_flags)) {
- if (flags & IOMAP_NOWAIT) {
- if (filemap_range_needs_writeback(inode->i_mapping,
- lockstart, lockend))
- return -EAGAIN;
- } else {
- ret = filemap_fdatawrite_range(inode->i_mapping, start,
- start + length - 1);
- if (ret)
- return ret;
- }
- }
-
- memset(dio_data, 0, sizeof(*dio_data));
-
- /*
- * We always try to allocate data space and must do it before locking
- * the file range, to avoid deadlocks with concurrent writes to the same
- * range if the range has several extents and the writes don't expand the
- * current i_size (the inode lock is taken in shared mode). If we fail to
- * allocate data space here we continue and later, after locking the
- * file range, we fail with ENOSPC only if we figure out we can not do a
- * NOCOW write.
- */
- if (write && !(flags & IOMAP_NOWAIT)) {
- ret = btrfs_check_data_free_space(BTRFS_I(inode),
- &dio_data->data_reserved,
- start, data_alloc_len, false);
- if (!ret)
- dio_data->data_space_reserved = true;
- else if (ret && !(BTRFS_I(inode)->flags &
- (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC)))
- goto err;
- }
-
- /*
- * If this errors out it's because we couldn't invalidate pagecache for
- * this range and we need to fallback to buffered IO, or we are doing a
- * NOWAIT read/write and we need to block.
- */
- ret = lock_extent_direct(inode, lockstart, lockend, &cached_state, flags);
- if (ret < 0)
- goto err;
-
- em = btrfs_get_extent(BTRFS_I(inode), NULL, start, len);
- if (IS_ERR(em)) {
- ret = PTR_ERR(em);
- goto unlock_err;
- }
-
- /*
- * Ok for INLINE and COMPRESSED extents we need to fallback on buffered
- * io. INLINE is special, and we could probably kludge it in here, but
- * it's still buffered so for safety lets just fall back to the generic
- * buffered path.
- *
- * For COMPRESSED we _have_ to read the entire extent in so we can
- * decompress it, so there will be buffering required no matter what we
- * do, so go ahead and fallback to buffered.
- *
- * We return -ENOTBLK because that's what makes DIO go ahead and go back
- * to buffered IO. Don't blame me, this is the price we pay for using
- * the generic code.
- */
- if (extent_map_is_compressed(em) ||
- em->block_start == EXTENT_MAP_INLINE) {
- free_extent_map(em);
- /*
- * If we are in a NOWAIT context, return -EAGAIN in order to
- * fallback to buffered IO. This is not only because we can
- * block with buffered IO (no support for NOWAIT semantics at
- * the moment) but also to avoid returning short reads to user
- * space - this happens if we were able to read some data from
- * previous non-compressed extents and then when we fallback to
- * buffered IO, at btrfs_file_read_iter() by calling
- * filemap_read(), we fail to fault in pages for the read buffer,
- * in which case filemap_read() returns a short read (the number
- * of bytes previously read is > 0, so it does not return -EFAULT).
- */
- ret = (flags & IOMAP_NOWAIT) ? -EAGAIN : -ENOTBLK;
- goto unlock_err;
- }
-
- len = min(len, em->len - (start - em->start));
-
- /*
- * If we have a NOWAIT request and the range contains multiple extents
- * (or a mix of extents and holes), then we return -EAGAIN to make the
- * caller fallback to a context where it can do a blocking (without
- * NOWAIT) request. This way we avoid doing partial IO and returning
- * success to the caller, which is not optimal for writes and for reads
- * it can result in unexpected behaviour for an application.
- *
- * When doing a read, because we use IOMAP_DIO_PARTIAL when calling
- * iomap_dio_rw(), we can end up returning less data then what the caller
- * asked for, resulting in an unexpected, and incorrect, short read.
- * That is, the caller asked to read N bytes and we return less than that,
- * which is wrong unless we are crossing EOF. This happens if we get a
- * page fault error when trying to fault in pages for the buffer that is
- * associated to the struct iov_iter passed to iomap_dio_rw(), and we
- * have previously submitted bios for other extents in the range, in
- * which case iomap_dio_rw() may return us EIOCBQUEUED if not all of
- * those bios have completed by the time we get the page fault error,
- * which we return back to our caller - we should only return EIOCBQUEUED
- * after we have submitted bios for all the extents in the range.
- */
- if ((flags & IOMAP_NOWAIT) && len < length) {
- free_extent_map(em);
- ret = -EAGAIN;
- goto unlock_err;
- }
-
- if (write) {
- ret = btrfs_get_blocks_direct_write(&em, inode, dio_data,
- start, &len, flags);
- if (ret < 0)
- goto unlock_err;
- unlock_extents = true;
- /* Recalc len in case the new em is smaller than requested */
- len = min(len, em->len - (start - em->start));
- if (dio_data->data_space_reserved) {
- u64 release_offset;
- u64 release_len = 0;
-
- if (dio_data->nocow_done) {
- release_offset = start;
- release_len = data_alloc_len;
- } else if (len < data_alloc_len) {
- release_offset = start + len;
- release_len = data_alloc_len - len;
- }
-
- if (release_len > 0)
- btrfs_free_reserved_data_space(BTRFS_I(inode),
- dio_data->data_reserved,
- release_offset,
- release_len);
- }
- } else {
- /*
- * We need to unlock only the end area that we aren't using.
- * The rest is going to be unlocked by the endio routine.
- */
- lockstart = start + len;
- if (lockstart < lockend)
- unlock_extents = true;
- }
-
- if (unlock_extents)
- unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
- &cached_state);
- else
- free_extent_state(cached_state);
-
- /*
- * Translate extent map information to iomap.
- * We trim the extents (and move the addr) even though iomap code does
- * that, since we have locked only the parts we are performing I/O in.
- */
- if ((em->block_start == EXTENT_MAP_HOLE) ||
- ((em->flags & EXTENT_FLAG_PREALLOC) && !write)) {
- iomap->addr = IOMAP_NULL_ADDR;
- iomap->type = IOMAP_HOLE;
- } else {
- iomap->addr = em->block_start + (start - em->start);
- iomap->type = IOMAP_MAPPED;
- }
- iomap->offset = start;
- iomap->bdev = fs_info->fs_devices->latest_dev->bdev;
- iomap->length = len;
- free_extent_map(em);
-
- return 0;
-
-unlock_err:
- unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
- &cached_state);
-err:
- if (dio_data->data_space_reserved) {
- btrfs_free_reserved_data_space(BTRFS_I(inode),
- dio_data->data_reserved,
- start, data_alloc_len);
- extent_changeset_free(dio_data->data_reserved);
- }
-
- return ret;
-}
-
-static int btrfs_dio_iomap_end(struct inode *inode, loff_t pos, loff_t length,
- ssize_t written, unsigned int flags, struct iomap *iomap)
-{
- struct iomap_iter *iter = container_of(iomap, struct iomap_iter, iomap);
- struct btrfs_dio_data *dio_data = iter->private;
- size_t submitted = dio_data->submitted;
- const bool write = !!(flags & IOMAP_WRITE);
- int ret = 0;
-
- if (!write && (iomap->type == IOMAP_HOLE)) {
- /* If reading from a hole, unlock and return */
- unlock_extent(&BTRFS_I(inode)->io_tree, pos, pos + length - 1,
- NULL);
- return 0;
- }
-
- if (submitted < length) {
- pos += submitted;
- length -= submitted;
- if (write)
- btrfs_finish_ordered_extent(dio_data->ordered, NULL,
- pos, length, false);
- else
- unlock_extent(&BTRFS_I(inode)->io_tree, pos,
- pos + length - 1, NULL);
- ret = -ENOTBLK;
- }
- if (write) {
- btrfs_put_ordered_extent(dio_data->ordered);
- dio_data->ordered = NULL;
- }
-
- if (write)
- extent_changeset_free(dio_data->data_reserved);
- return ret;
-}
-
-static void btrfs_dio_end_io(struct btrfs_bio *bbio)
-{
- struct btrfs_dio_private *dip =
- container_of(bbio, struct btrfs_dio_private, bbio);
- struct btrfs_inode *inode = bbio->inode;
- struct bio *bio = &bbio->bio;
-
- if (bio->bi_status) {
- btrfs_warn(inode->root->fs_info,
- "direct IO failed ino %llu op 0x%0x offset %#llx len %u err no %d",
- btrfs_ino(inode), bio->bi_opf,
- dip->file_offset, dip->bytes, bio->bi_status);
- }
-
- if (btrfs_op(bio) == BTRFS_MAP_WRITE) {
- btrfs_finish_ordered_extent(bbio->ordered, NULL,
- dip->file_offset, dip->bytes,
- !bio->bi_status);
- } else {
- unlock_extent(&inode->io_tree, dip->file_offset,
- dip->file_offset + dip->bytes - 1, NULL);
- }
-
- bbio->bio.bi_private = bbio->private;
- iomap_dio_bio_end_io(bio);
-}
-
-static void btrfs_dio_submit_io(const struct iomap_iter *iter, struct bio *bio,
- loff_t file_offset)
-{
- struct btrfs_bio *bbio = btrfs_bio(bio);
- struct btrfs_dio_private *dip =
- container_of(bbio, struct btrfs_dio_private, bbio);
- struct btrfs_dio_data *dio_data = iter->private;
-
- btrfs_bio_init(bbio, BTRFS_I(iter->inode)->root->fs_info,
- btrfs_dio_end_io, bio->bi_private);
- bbio->inode = BTRFS_I(iter->inode);
- bbio->file_offset = file_offset;
-
- dip->file_offset = file_offset;
- dip->bytes = bio->bi_iter.bi_size;
-
- dio_data->submitted += bio->bi_iter.bi_size;
-
- /*
- * Check if we are doing a partial write. If we are, we need to split
- * the ordered extent to match the submitted bio. Hang on to the
- * remaining unfinishable ordered_extent in dio_data so that it can be
- * cancelled in iomap_end to avoid a deadlock wherein faulting the
- * remaining pages is blocked on the outstanding ordered extent.
- */
- if (iter->flags & IOMAP_WRITE) {
- int ret;
-
- ret = btrfs_extract_ordered_extent(bbio, dio_data->ordered);
- if (ret) {
- btrfs_finish_ordered_extent(dio_data->ordered, NULL,
- file_offset, dip->bytes,
- !ret);
- bio->bi_status = errno_to_blk_status(ret);
- iomap_dio_bio_end_io(bio);
- return;
- }
- }
-
- btrfs_submit_bio(bbio, 0);
-}
-
-static const struct iomap_ops btrfs_dio_iomap_ops = {
- .iomap_begin = btrfs_dio_iomap_begin,
- .iomap_end = btrfs_dio_iomap_end,
-};
-
-static const struct iomap_dio_ops btrfs_dio_ops = {
- .submit_io = btrfs_dio_submit_io,
- .bio_set = &btrfs_dio_bioset,
-};
-
-ssize_t btrfs_dio_read(struct kiocb *iocb, struct iov_iter *iter, size_t done_before)
-{
- struct btrfs_dio_data data = { 0 };
-
- return iomap_dio_rw(iocb, iter, &btrfs_dio_iomap_ops, &btrfs_dio_ops,
- IOMAP_DIO_PARTIAL, &data, done_before);
-}
-
-struct iomap_dio *btrfs_dio_write(struct kiocb *iocb, struct iov_iter *iter,
- size_t done_before)
-{
- struct btrfs_dio_data data = { 0 };
-
- return __iomap_dio_rw(iocb, iter, &btrfs_dio_iomap_ops, &btrfs_dio_ops,
- IOMAP_DIO_PARTIAL, &data, done_before);
-}
-
-static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
- u64 start, u64 len)
-{
- struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
- int ret;
-
- ret = fiemap_prep(inode, fieinfo, start, &len, 0);
- if (ret)
- return ret;
-
- /*
- * fiemap_prep() called filemap_write_and_wait() for the whole possible
- * file range (0 to LLONG_MAX), but that is not enough if we have
- * compression enabled. The first filemap_fdatawrite_range() only kicks
- * in the compression of data (in an async thread) and will return
- * before the compression is done and writeback is started. A second
- * filemap_fdatawrite_range() is needed to wait for the compression to
- * complete and writeback to start. We also need to wait for ordered
- * extents to complete, because our fiemap implementation uses mainly
- * file extent items to list the extents, searching for extent maps
- * only for file ranges with holes or prealloc extents to figure out
- * if we have delalloc in those ranges.
- */
- if (fieinfo->fi_flags & FIEMAP_FLAG_SYNC) {
- ret = btrfs_wait_ordered_range(inode, 0, LLONG_MAX);
- if (ret)
- return ret;
- }
-
- btrfs_inode_lock(btrfs_inode, BTRFS_ILOCK_SHARED);
-
- /*
- * We did an initial flush to avoid holding the inode's lock while
- * triggering writeback and waiting for the completion of IO and ordered
- * extents. Now after we locked the inode we do it again, because it's
- * possible a new write may have happened in between those two steps.
- */
- if (fieinfo->fi_flags & FIEMAP_FLAG_SYNC) {
- ret = btrfs_wait_ordered_range(inode, 0, LLONG_MAX);
- if (ret) {
- btrfs_inode_unlock(btrfs_inode, BTRFS_ILOCK_SHARED);
- return ret;
- }
- }
-
- ret = extent_fiemap(btrfs_inode, fieinfo, start, len);
- btrfs_inode_unlock(btrfs_inode, BTRFS_ILOCK_SHARED);
-
- return ret;
-}
-
/*
* For release_folio() and invalidate_folio() we have a race window where
* folio_end_writeback() is called but the subpage spinlock is not yet released.
@@ -8198,7 +7420,7 @@ static int btrfs_truncate(struct btrfs_inode *inode, bool skip_writeback)
const u64 min_size = btrfs_calc_metadata_size(fs_info, 1);
if (!skip_writeback) {
- ret = btrfs_wait_ordered_range(&inode->vfs_inode,
+ ret = btrfs_wait_ordered_range(inode,
inode->vfs_inode.i_size & (~mask),
(u64)-1);
if (ret)
@@ -8399,20 +7621,10 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
struct btrfs_fs_info *fs_info = btrfs_sb(sb);
struct btrfs_inode *ei;
struct inode *inode;
- struct extent_io_tree *file_extent_tree = NULL;
-
- /* Self tests may pass a NULL fs_info. */
- if (fs_info && !btrfs_fs_incompat(fs_info, NO_HOLES)) {
- file_extent_tree = kmalloc(sizeof(struct extent_io_tree), GFP_KERNEL);
- if (!file_extent_tree)
- return NULL;
- }
ei = alloc_inode_sb(sb, btrfs_inode_cachep, GFP_KERNEL);
- if (!ei) {
- kfree(file_extent_tree);
+ if (!ei)
return NULL;
- }
ei->root = NULL;
ei->generation = 0;
@@ -8425,8 +7637,12 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
ei->disk_i_size = 0;
ei->flags = 0;
ei->ro_flags = 0;
+ /*
+ * ->index_cnt will be properly initialized later when creating a new
+ * inode (btrfs_create_new_inode()) or when reading an existing inode
+ * from disk (btrfs_read_locked_inode()).
+ */
ei->csum_bytes = 0;
- ei->index_cnt = (u64)-1;
ei->dir_index = 0;
ei->last_unlink_trans = 0;
ei->last_reflink_trans = 0;
@@ -8453,20 +7669,14 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
extent_io_tree_init(fs_info, &ei->io_tree, IO_TREE_INODE_IO);
ei->io_tree.inode = ei;
- ei->file_extent_tree = file_extent_tree;
- if (file_extent_tree) {
- extent_io_tree_init(fs_info, ei->file_extent_tree,
- IO_TREE_INODE_FILE_EXTENT);
- /* Lockdep class is set only for the file extent tree. */
- lockdep_set_class(&ei->file_extent_tree->lock, &file_extent_tree_class);
- }
+ ei->file_extent_tree = NULL;
+
mutex_init(&ei->log_mutex);
spin_lock_init(&ei->ordered_tree_lock);
ei->ordered_tree = RB_ROOT;
ei->ordered_tree_last = NULL;
INIT_LIST_HEAD(&ei->delalloc_inodes);
INIT_LIST_HEAD(&ei->delayed_iput);
- RB_CLEAR_NODE(&ei->rb_node);
init_rwsem(&ei->i_mmap_lock);
return inode;
@@ -8502,9 +7712,10 @@ void btrfs_destroy_inode(struct inode *vfs_inode)
if (!S_ISDIR(vfs_inode->i_mode)) {
WARN_ON(inode->delalloc_bytes);
WARN_ON(inode->new_delalloc_bytes);
+ WARN_ON(inode->csum_bytes);
}
- WARN_ON(inode->csum_bytes);
- WARN_ON(inode->defrag_bytes);
+ if (!root || !btrfs_is_data_reloc_root(root))
+ WARN_ON(inode->defrag_bytes);
/*
* This can happen where we create an inode, but somebody else also
@@ -8538,7 +7749,7 @@ void btrfs_destroy_inode(struct inode *vfs_inode)
}
}
btrfs_qgroup_check_reserved_leak(inode);
- inode_tree_del(inode);
+ btrfs_del_inode_from_root(inode);
btrfs_drop_extent_map_range(inode, 0, (u64)-1, false);
btrfs_inode_clear_file_extent_range(inode, 0, (u64)-1);
btrfs_put_root(inode->root);
@@ -8572,7 +7783,6 @@ void __cold btrfs_destroy_cachep(void)
* destroy cache.
*/
rcu_barrier();
- bioset_exit(&btrfs_dio_bioset);
kmem_cache_destroy(btrfs_inode_cachep);
}
@@ -8583,17 +7793,9 @@ int __init btrfs_init_cachep(void)
SLAB_RECLAIM_ACCOUNT | SLAB_ACCOUNT,
init_once);
if (!btrfs_inode_cachep)
- goto fail;
-
- if (bioset_init(&btrfs_dio_bioset, BIO_POOL_SIZE,
- offsetof(struct btrfs_dio_private, bbio.bio),
- BIOSET_NEED_BVECS))
- goto fail;
+ return -ENOMEM;
return 0;
-fail:
- btrfs_destroy_cachep();
- return -ENOMEM;
}
static int btrfs_getattr(struct mnt_idmap *idmap,
@@ -9586,11 +8788,10 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
}
em->start = cur_offset;
- em->orig_start = cur_offset;
em->len = ins.offset;
- em->block_start = ins.objectid;
- em->block_len = ins.offset;
- em->orig_block_len = ins.offset;
+ em->disk_bytenr = ins.objectid;
+ em->offset = 0;
+ em->disk_num_bytes = ins.offset;
em->ram_bytes = ins.offset;
em->flags |= EXTENT_FLAG_PREALLOC;
em->generation = trans->transid;
@@ -9956,7 +9157,7 @@ static ssize_t btrfs_encoded_read_regular(struct kiocb *iocb,
pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS);
if (!pages)
return -ENOMEM;
- ret = btrfs_alloc_page_array(nr_pages, pages, 0);
+ ret = btrfs_alloc_page_array(nr_pages, pages, false);
if (ret) {
ret = -ENOMEM;
goto out;
@@ -10033,7 +9234,7 @@ ssize_t btrfs_encoded_read(struct kiocb *iocb, struct iov_iter *iter,
for (;;) {
struct btrfs_ordered_extent *ordered;
- ret = btrfs_wait_ordered_range(&inode->vfs_inode, start,
+ ret = btrfs_wait_ordered_range(inode, start,
lockend - start + 1);
if (ret)
goto out_unlock_inode;
@@ -10053,7 +9254,7 @@ ssize_t btrfs_encoded_read(struct kiocb *iocb, struct iov_iter *iter,
goto out_unlock_extent;
}
- if (em->block_start == EXTENT_MAP_INLINE) {
+ if (em->disk_bytenr == EXTENT_MAP_INLINE) {
u64 extent_start = em->start;
/*
@@ -10074,33 +9275,33 @@ ssize_t btrfs_encoded_read(struct kiocb *iocb, struct iov_iter *iter,
*/
encoded->len = min_t(u64, extent_map_end(em),
inode->vfs_inode.i_size) - iocb->ki_pos;
- if (em->block_start == EXTENT_MAP_HOLE ||
+ if (em->disk_bytenr == EXTENT_MAP_HOLE ||
(em->flags & EXTENT_FLAG_PREALLOC)) {
disk_bytenr = EXTENT_MAP_HOLE;
count = min_t(u64, count, encoded->len);
encoded->len = count;
encoded->unencoded_len = count;
} else if (extent_map_is_compressed(em)) {
- disk_bytenr = em->block_start;
+ disk_bytenr = em->disk_bytenr;
/*
* Bail if the buffer isn't large enough to return the whole
* compressed extent.
*/
- if (em->block_len > count) {
+ if (em->disk_num_bytes > count) {
ret = -ENOBUFS;
goto out_em;
}
- disk_io_size = em->block_len;
- count = em->block_len;
+ disk_io_size = em->disk_num_bytes;
+ count = em->disk_num_bytes;
encoded->unencoded_len = em->ram_bytes;
- encoded->unencoded_offset = iocb->ki_pos - em->orig_start;
+ encoded->unencoded_offset = iocb->ki_pos - (em->start - em->offset);
ret = btrfs_encoded_io_compression_from_extent(fs_info,
extent_map_compression(em));
if (ret < 0)
goto out_em;
encoded->compression = ret;
} else {
- disk_bytenr = em->block_start + (start - em->start);
+ disk_bytenr = extent_map_block_start(em) + (start - em->start);
if (encoded->len > count)
encoded->len = count;
/*
@@ -10155,6 +9356,7 @@ ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from,
struct extent_changeset *data_reserved = NULL;
struct extent_state *cached_state = NULL;
struct btrfs_ordered_extent *ordered;
+ struct btrfs_file_extent file_extent;
int compression;
size_t orig_count;
u64 start, end;
@@ -10276,7 +9478,7 @@ ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from,
for (;;) {
struct btrfs_ordered_extent *ordered;
- ret = btrfs_wait_ordered_range(&inode->vfs_inode, start, num_bytes);
+ ret = btrfs_wait_ordered_range(inode, start, num_bytes);
if (ret)
goto out_folios;
ret = invalidate_inode_pages2_range(inode->vfs_inode.i_mapping,
@@ -10330,22 +9532,22 @@ ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from,
goto out_delalloc_release;
extent_reserved = true;
- em = create_io_em(inode, start, num_bytes,
- start - encoded->unencoded_offset, ins.objectid,
- ins.offset, ins.offset, ram_bytes, compression,
- BTRFS_ORDERED_COMPRESSED);
+ file_extent.disk_bytenr = ins.objectid;
+ file_extent.disk_num_bytes = ins.offset;
+ file_extent.num_bytes = num_bytes;
+ file_extent.ram_bytes = ram_bytes;
+ file_extent.offset = encoded->unencoded_offset;
+ file_extent.compression = compression;
+ em = btrfs_create_io_em(inode, start, &file_extent, BTRFS_ORDERED_COMPRESSED);
if (IS_ERR(em)) {
ret = PTR_ERR(em);
goto out_free_reserved;
}
free_extent_map(em);
- ordered = btrfs_alloc_ordered_extent(inode, start, num_bytes, ram_bytes,
- ins.objectid, ins.offset,
- encoded->unencoded_offset,
+ ordered = btrfs_alloc_ordered_extent(inode, start, &file_extent,
(1 << BTRFS_ORDERED_ENCODED) |
- (1 << BTRFS_ORDERED_COMPRESSED),
- compression);
+ (1 << BTRFS_ORDERED_COMPRESSED));
if (IS_ERR(ordered)) {
btrfs_drop_extent_map_range(inode, start, end, false);
ret = PTR_ERR(ordered);
@@ -10385,7 +9587,7 @@ out_unlock:
out_folios:
for (i = 0; i < nr_folios; i++) {
if (folios[i])
- __folio_put(folios[i]);
+ folio_put(folios[i]);
}
kvfree(folios);
out:
@@ -10549,7 +9751,7 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
* file changes again after this, the user is doing something stupid and
* we don't really care.
*/
- ret = btrfs_wait_ordered_range(inode, 0, (u64)-1);
+ ret = btrfs_wait_ordered_range(BTRFS_I(inode), 0, (u64)-1);
if (ret)
return ret;
@@ -10635,12 +9837,12 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
goto out;
}
- if (em->block_start == EXTENT_MAP_HOLE) {
+ if (em->disk_bytenr == EXTENT_MAP_HOLE) {
btrfs_warn(fs_info, "swapfile must not have holes");
ret = -EINVAL;
goto out;
}
- if (em->block_start == EXTENT_MAP_INLINE) {
+ if (em->disk_bytenr == EXTENT_MAP_INLINE) {
/*
* It's unlikely we'll ever actually find ourselves
* here, as a file small enough to fit inline won't be
@@ -10658,12 +9860,12 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
goto out;
}
- logical_block_start = em->block_start + (start - em->start);
+ logical_block_start = extent_map_block_start(em) + (start - em->start);
len = min(len, em->len - (start - em->start));
free_extent_map(em);
em = NULL;
- ret = can_nocow_extent(inode, start, &len, NULL, NULL, NULL, false, true);
+ ret = can_nocow_extent(inode, start, &len, NULL, false, true);
if (ret < 0) {
goto out;
} else if (ret) {
@@ -10860,52 +10062,23 @@ void btrfs_assert_inode_range_clean(struct btrfs_inode *inode, u64 start, u64 en
*/
struct btrfs_inode *btrfs_find_first_inode(struct btrfs_root *root, u64 min_ino)
{
- struct rb_node *node;
- struct rb_node *prev;
struct btrfs_inode *inode;
+ unsigned long from = min_ino;
- spin_lock(&root->inode_lock);
-again:
- node = root->inode_tree.rb_node;
- prev = NULL;
- while (node) {
- prev = node;
- inode = rb_entry(node, struct btrfs_inode, rb_node);
- if (min_ino < btrfs_ino(inode))
- node = node->rb_left;
- else if (min_ino > btrfs_ino(inode))
- node = node->rb_right;
- else
+ xa_lock(&root->inodes);
+ while (true) {
+ inode = xa_find(&root->inodes, &from, ULONG_MAX, XA_PRESENT);
+ if (!inode)
+ break;
+ if (igrab(&inode->vfs_inode))
break;
- }
-
- if (!node) {
- while (prev) {
- inode = rb_entry(prev, struct btrfs_inode, rb_node);
- if (min_ino <= btrfs_ino(inode)) {
- node = prev;
- break;
- }
- prev = rb_next(prev);
- }
- }
-
- while (node) {
- inode = rb_entry(prev, struct btrfs_inode, rb_node);
- if (igrab(&inode->vfs_inode)) {
- spin_unlock(&root->inode_lock);
- return inode;
- }
-
- min_ino = btrfs_ino(inode) + 1;
- if (cond_resched_lock(&root->inode_lock))
- goto again;
- node = rb_next(node);
+ from = btrfs_ino(inode) + 1;
+ cond_resched_lock(&root->inodes.xa_lock);
}
- spin_unlock(&root->inode_lock);
+ xa_unlock(&root->inodes);
- return NULL;
+ return inode;
}
static const struct inode_operations btrfs_dir_inode_operations = {
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index efd5d6e9589e..e0a664b8a46a 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -375,15 +375,15 @@ int btrfs_fileattr_set(struct mnt_idmap *idmap,
return PTR_ERR(trans);
if (comp) {
- ret = btrfs_set_prop(trans, inode, "btrfs.compression", comp,
- strlen(comp), 0);
+ ret = btrfs_set_prop(trans, BTRFS_I(inode), "btrfs.compression",
+ comp, strlen(comp), 0);
if (ret) {
btrfs_abort_transaction(trans, ret);
goto out_end_trans;
}
} else {
- ret = btrfs_set_prop(trans, inode, "btrfs.compression", NULL,
- 0, 0);
+ ret = btrfs_set_prop(trans, BTRFS_I(inode), "btrfs.compression",
+ NULL, 0, 0);
if (ret && ret != -ENODATA) {
btrfs_abort_transaction(trans, ret);
goto out_end_trans;
@@ -552,7 +552,7 @@ static noinline int btrfs_ioctl_fitrim(struct btrfs_fs_info *fs_info,
return 0;
}
-int __pure btrfs_is_empty_uuid(u8 *uuid)
+int __pure btrfs_is_empty_uuid(const u8 *uuid)
{
int i;
@@ -658,15 +658,10 @@ static noinline int create_subvol(struct mnt_idmap *idmap,
ret = PTR_ERR(trans);
goto out_release_rsv;
}
- ret = btrfs_record_root_in_trans(trans, BTRFS_I(dir)->root);
- if (ret)
- goto out;
btrfs_qgroup_convert_reserved_meta(root, qgroup_reserved);
qgroup_reserved = 0;
trans->block_rsv = &block_rsv;
trans->bytes_reserved = block_rsv.size;
- /* Tree log can't currently deal with an inode which is a new root. */
- btrfs_set_log_full_commit(trans);
ret = btrfs_qgroup_inherit(trans, 0, objectid, btrfs_root_id(root), inherit);
if (ret)
@@ -719,6 +714,8 @@ static noinline int create_subvol(struct mnt_idmap *idmap,
ret = btrfs_insert_root(trans, fs_info->tree_root, &key,
root_item);
if (ret) {
+ int ret2;
+
/*
* Since we don't abort the transaction in this case, free the
* tree block so that we don't leak space and leave the
@@ -729,7 +726,9 @@ static noinline int create_subvol(struct mnt_idmap *idmap,
btrfs_tree_lock(leaf);
btrfs_clear_buffer_dirty(trans, leaf);
btrfs_tree_unlock(leaf);
- btrfs_free_tree_block(trans, objectid, leaf, 0, 1);
+ ret2 = btrfs_free_tree_block(trans, objectid, leaf, 0, 1);
+ if (ret2 < 0)
+ btrfs_abort_transaction(trans, ret2);
free_extent_buffer(leaf);
goto out;
}
@@ -767,6 +766,8 @@ static noinline int create_subvol(struct mnt_idmap *idmap,
goto out;
}
+ btrfs_record_new_subvolume(trans, BTRFS_I(dir));
+
d_instantiate_new(dentry, new_inode_args.inode);
new_inode_args.inode = NULL;
@@ -854,7 +855,7 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
pending_snapshot->dentry = dentry;
pending_snapshot->root = root;
pending_snapshot->readonly = readonly;
- pending_snapshot->dir = dir;
+ pending_snapshot->dir = BTRFS_I(dir);
pending_snapshot->inherit = inherit;
trans = btrfs_start_transaction(root, 0);
@@ -1070,7 +1071,7 @@ static noinline int btrfs_mksnapshot(const struct path *parent,
atomic_inc(&root->snapshot_force_cow);
snapshot_force_cow = true;
- btrfs_wait_ordered_extents(root, U64_MAX, 0, (u64)-1);
+ btrfs_wait_ordered_extents(root, U64_MAX, NULL);
ret = btrfs_mksubvol(parent, idmap, name, namelen,
root, readonly, inherit);
@@ -1917,8 +1918,7 @@ static int btrfs_search_path_in_tree_user(struct mnt_idmap *idmap,
struct btrfs_ioctl_ino_lookup_user_args *args)
{
struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
- struct super_block *sb = inode->i_sb;
- struct btrfs_key upper_limit = BTRFS_I(inode)->location;
+ u64 upper_limit = btrfs_ino(BTRFS_I(inode));
u64 treeid = btrfs_root_id(BTRFS_I(inode)->root);
u64 dirid = args->dirid;
unsigned long item_off;
@@ -1944,7 +1944,7 @@ static int btrfs_search_path_in_tree_user(struct mnt_idmap *idmap,
* If the bottom subvolume does not exist directly under upper_limit,
* construct the path in from the bottom up.
*/
- if (dirid != upper_limit.objectid) {
+ if (dirid != upper_limit) {
ptr = &args->path[BTRFS_INO_LOOKUP_USER_PATH_MAX - 1];
root = btrfs_get_fs_root(fs_info, treeid, true);
@@ -2006,7 +2006,7 @@ static int btrfs_search_path_in_tree_user(struct mnt_idmap *idmap,
* btree and lock the same leaf.
*/
btrfs_release_path(path);
- temp_inode = btrfs_iget(sb, key2.objectid, root);
+ temp_inode = btrfs_iget(key2.objectid, root);
if (IS_ERR(temp_inode)) {
ret = PTR_ERR(temp_inode);
goto out_put;
@@ -2019,7 +2019,7 @@ static int btrfs_search_path_in_tree_user(struct mnt_idmap *idmap,
goto out_put;
}
- if (key.offset == upper_limit.objectid)
+ if (key.offset == upper_limit)
break;
if (key.objectid == BTRFS_FIRST_FREE_OBJECTID) {
ret = -EACCES;
@@ -2140,7 +2140,7 @@ static int btrfs_ioctl_ino_lookup_user(struct file *file, void __user *argp)
inode = file_inode(file);
if (args->dirid == BTRFS_FIRST_FREE_OBJECTID &&
- BTRFS_I(inode)->location.objectid != BTRFS_FIRST_FREE_OBJECTID) {
+ btrfs_ino(BTRFS_I(inode)) != BTRFS_FIRST_FREE_OBJECTID) {
/*
* The subvolume does not exist under fd with which this is
* called
@@ -3807,12 +3807,29 @@ drop_write:
return ret;
}
+/*
+ * Quick check for ioctl handlers if quotas are enabled. Proper locking must be
+ * done before any operations.
+ */
+static bool qgroup_enabled(struct btrfs_fs_info *fs_info)
+{
+ bool ret = true;
+
+ mutex_lock(&fs_info->qgroup_ioctl_lock);
+ if (!fs_info->quota_root)
+ ret = false;
+ mutex_unlock(&fs_info->qgroup_ioctl_lock);
+
+ return ret;
+}
+
static long btrfs_ioctl_qgroup_assign(struct file *file, void __user *arg)
{
struct inode *inode = file_inode(file);
struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_ioctl_qgroup_assign_args *sa;
+ struct btrfs_qgroup_list *prealloc = NULL;
struct btrfs_trans_handle *trans;
int ret;
int err;
@@ -3820,6 +3837,9 @@ static long btrfs_ioctl_qgroup_assign(struct file *file, void __user *arg)
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
+ if (!qgroup_enabled(root->fs_info))
+ return -ENOTCONN;
+
ret = mnt_want_write_file(file);
if (ret)
return ret;
@@ -3830,14 +3850,27 @@ static long btrfs_ioctl_qgroup_assign(struct file *file, void __user *arg)
goto drop_write;
}
+ if (sa->assign) {
+ prealloc = kzalloc(sizeof(*prealloc), GFP_KERNEL);
+ if (!prealloc) {
+ ret = -ENOMEM;
+ goto drop_write;
+ }
+ }
+
trans = btrfs_join_transaction(root);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
goto out;
}
+ /*
+ * Prealloc ownership is moved to the relation handler, there it's used
+ * or freed on error.
+ */
if (sa->assign) {
- ret = btrfs_add_qgroup_relation(trans, sa->src, sa->dst);
+ ret = btrfs_add_qgroup_relation(trans, sa->src, sa->dst, prealloc);
+ prealloc = NULL;
} else {
ret = btrfs_del_qgroup_relation(trans, sa->src, sa->dst);
}
@@ -3847,13 +3880,15 @@ static long btrfs_ioctl_qgroup_assign(struct file *file, void __user *arg)
err = btrfs_run_qgroups(trans);
mutex_unlock(&fs_info->qgroup_ioctl_lock);
if (err < 0)
- btrfs_handle_fs_error(fs_info, err,
- "failed to update qgroup status and info");
+ btrfs_warn(fs_info,
+ "qgroup status update failed after %s relation, marked as inconsistent",
+ sa->assign ? "adding" : "deleting");
err = btrfs_end_transaction(trans);
if (err && !ret)
ret = err;
out:
+ kfree(prealloc);
kfree(sa);
drop_write:
mnt_drop_write_file(file);
@@ -3872,6 +3907,9 @@ static long btrfs_ioctl_qgroup_create(struct file *file, void __user *arg)
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
+ if (!qgroup_enabled(root->fs_info))
+ return -ENOTCONN;
+
ret = mnt_want_write_file(file);
if (ret)
return ret;
@@ -3928,6 +3966,9 @@ static long btrfs_ioctl_qgroup_limit(struct file *file, void __user *arg)
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
+ if (!qgroup_enabled(root->fs_info))
+ return -ENOTCONN;
+
ret = mnt_want_write_file(file);
if (ret)
return ret;
@@ -3973,6 +4014,9 @@ static long btrfs_ioctl_quota_rescan(struct file *file, void __user *arg)
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
+ if (!qgroup_enabled(fs_info))
+ return -ENOTCONN;
+
ret = mnt_want_write_file(file);
if (ret)
return ret;
@@ -4429,7 +4473,7 @@ out_drop_write:
return ret;
}
-static int _btrfs_ioctl_send(struct inode *inode, void __user *argp, bool compat)
+static int _btrfs_ioctl_send(struct btrfs_inode *inode, void __user *argp, bool compat)
{
struct btrfs_ioctl_send_args *arg;
int ret;
@@ -4627,7 +4671,7 @@ static int btrfs_ioctl_encoded_write(struct file *file, void __user *argp, bool
goto out_iov;
init_sync_kiocb(&kiocb, file);
- ret = kiocb_set_rw_flags(&kiocb, 0);
+ ret = kiocb_set_rw_flags(&kiocb, 0, WRITE);
if (ret)
goto out_iov;
kiocb.ki_pos = pos;
@@ -4751,10 +4795,10 @@ long btrfs_ioctl(struct file *file, unsigned int
return btrfs_ioctl_set_received_subvol_32(file, argp);
#endif
case BTRFS_IOC_SEND:
- return _btrfs_ioctl_send(inode, argp, false);
+ return _btrfs_ioctl_send(BTRFS_I(inode), argp, false);
#if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT)
case BTRFS_IOC_SEND_32:
- return _btrfs_ioctl_send(inode, argp, true);
+ return _btrfs_ioctl_send(BTRFS_I(inode), argp, true);
#endif
case BTRFS_IOC_GET_DEV_STATS:
return btrfs_ioctl_get_dev_stats(fs_info, argp);
diff --git a/fs/btrfs/ioctl.h b/fs/btrfs/ioctl.h
index 2c5dc25ec670..19cd26b0244a 100644
--- a/fs/btrfs/ioctl.h
+++ b/fs/btrfs/ioctl.h
@@ -19,7 +19,7 @@ int btrfs_fileattr_set(struct mnt_idmap *idmap,
struct dentry *dentry, struct fileattr *fa);
int btrfs_ioctl_get_supported_features(void __user *arg);
void btrfs_sync_inode_flags_to_i_flags(struct inode *inode);
-int __pure btrfs_is_empty_uuid(u8 *uuid);
+int __pure btrfs_is_empty_uuid(const u8 *uuid);
void btrfs_update_ioctl_balance_args(struct btrfs_fs_info *fs_info,
struct btrfs_ioctl_balance_args *bargs);
diff --git a/fs/btrfs/locking.h b/fs/btrfs/locking.h
index 1bc8e6738879..3c15c75e0582 100644
--- a/fs/btrfs/locking.h
+++ b/fs/btrfs/locking.h
@@ -11,7 +11,6 @@
#include <linux/lockdep.h>
#include <linux/percpu_counter.h>
#include "extent_io.h"
-#include "locking.h"
struct extent_buffer;
struct btrfs_path;
diff --git a/fs/btrfs/lru_cache.h b/fs/btrfs/lru_cache.h
index e32906ab6faa..07f1bb1c6aa3 100644
--- a/fs/btrfs/lru_cache.h
+++ b/fs/btrfs/lru_cache.h
@@ -6,7 +6,6 @@
#include <linux/types.h>
#include <linux/maple_tree.h>
#include <linux/list.h>
-#include "lru_cache.h"
/*
* A cache entry. This is meant to be embedded in a structure of a user of
diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c
index 1c396ac167aa..1e2a68b8f62d 100644
--- a/fs/btrfs/lzo.c
+++ b/fs/btrfs/lzo.c
@@ -258,8 +258,8 @@ int lzo_compress_folios(struct list_head *ws, struct address_space *mapping,
workspace->cbuf, &out_len,
workspace->mem);
kunmap_local(data_in);
- if (ret < 0) {
- pr_debug("BTRFS: lzo in loop returned %d\n", ret);
+ if (unlikely(ret < 0)) {
+ /* lzo1x_1_compress never fails. */
ret = -EIO;
goto out;
}
@@ -354,11 +354,14 @@ int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
* and all sectors should be used.
* If this happens, it means the compressed extent is corrupted.
*/
- if (len_in > min_t(size_t, BTRFS_MAX_COMPRESSED, cb->compressed_len) ||
- round_up(len_in, sectorsize) < cb->compressed_len) {
+ if (unlikely(len_in > min_t(size_t, BTRFS_MAX_COMPRESSED, cb->compressed_len) ||
+ round_up(len_in, sectorsize) < cb->compressed_len)) {
+ struct btrfs_inode *inode = cb->bbio.inode;
+
btrfs_err(fs_info,
- "invalid lzo header, lzo len %u compressed len %u",
- len_in, cb->compressed_len);
+"lzo header invalid, root %llu inode %llu offset %llu lzo len %u compressed len %u",
+ btrfs_root_id(inode->root), btrfs_ino(inode),
+ cb->start, len_in, cb->compressed_len);
return -EUCLEAN;
}
@@ -383,13 +386,17 @@ int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
kunmap_local(kaddr);
cur_in += LZO_LEN;
- if (seg_len > WORKSPACE_CBUF_LENGTH) {
+ if (unlikely(seg_len > WORKSPACE_CBUF_LENGTH)) {
+ struct btrfs_inode *inode = cb->bbio.inode;
+
/*
* seg_len shouldn't be larger than we have allocated
* for workspace->cbuf
*/
- btrfs_err(fs_info, "unexpectedly large lzo segment len %u",
- seg_len);
+ btrfs_err(fs_info,
+ "lzo segment too big, root %llu inode %llu offset %llu len %u",
+ btrfs_root_id(inode->root), btrfs_ino(inode),
+ cb->start, seg_len);
return -EIO;
}
@@ -399,8 +406,13 @@ int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
/* Decompress the data */
ret = lzo1x_decompress_safe(workspace->cbuf, seg_len,
workspace->buf, &out_len);
- if (ret != LZO_E_OK) {
- btrfs_err(fs_info, "failed to decompress");
+ if (unlikely(ret != LZO_E_OK)) {
+ struct btrfs_inode *inode = cb->bbio.inode;
+
+ btrfs_err(fs_info,
+ "lzo decompression failed, error %d root %llu inode %llu offset %llu",
+ ret, btrfs_root_id(inode->root), btrfs_ino(inode),
+ cb->start);
return -EIO;
}
@@ -454,8 +466,13 @@ int lzo_decompress(struct list_head *ws, const u8 *data_in,
out_len = sectorsize;
ret = lzo1x_decompress_safe(data_in, in_len, workspace->buf, &out_len);
- if (ret != LZO_E_OK) {
- pr_warn("BTRFS: decompress failed!\n");
+ if (unlikely(ret != LZO_E_OK)) {
+ struct btrfs_inode *inode = BTRFS_I(dest_page->mapping->host);
+
+ btrfs_err(fs_info,
+ "lzo decompression failed, error %d root %llu inode %llu offset %llu",
+ ret, btrfs_root_id(inode->root), btrfs_ino(inode),
+ page_offset(dest_page));
ret = -EIO;
goto out;
}
diff --git a/fs/btrfs/messages.c b/fs/btrfs/messages.c
index 210d9c82e2ae..77752eec125d 100644
--- a/fs/btrfs/messages.c
+++ b/fs/btrfs/messages.c
@@ -20,7 +20,8 @@ static const char fs_state_chars[] = {
[BTRFS_FS_STATE_TRANS_ABORTED] = 'A',
[BTRFS_FS_STATE_DEV_REPLACING] = 'R',
[BTRFS_FS_STATE_DUMMY_FS_INFO] = 0,
- [BTRFS_FS_STATE_NO_CSUMS] = 'C',
+ [BTRFS_FS_STATE_NO_DATA_CSUMS] = 'C',
+ [BTRFS_FS_STATE_SKIP_META_CSUMS] = 'S',
[BTRFS_FS_STATE_LOG_CLEANUP_ERROR] = 'L',
};
diff --git a/fs/btrfs/misc.h b/fs/btrfs/misc.h
index dde4904aead9..0d599fd847c9 100644
--- a/fs/btrfs/misc.h
+++ b/fs/btrfs/misc.h
@@ -66,7 +66,7 @@ struct rb_simple_node {
u64 bytenr;
};
-static inline struct rb_node *rb_simple_search(struct rb_root *root, u64 bytenr)
+static inline struct rb_node *rb_simple_search(const struct rb_root *root, u64 bytenr)
{
struct rb_node *node = root->rb_node;
struct rb_simple_node *entry;
@@ -93,7 +93,7 @@ static inline struct rb_node *rb_simple_search(struct rb_root *root, u64 bytenr)
* Return the rb_node that start at or after @bytenr. If there is no entry at
* or after @bytner return NULL.
*/
-static inline struct rb_node *rb_simple_search_first(struct rb_root *root,
+static inline struct rb_node *rb_simple_search_first(const struct rb_root *root,
u64 bytenr)
{
struct rb_node *node = root->rb_node, *ret = NULL;
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index c5bdd674f55c..82a68394a89c 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -19,6 +19,7 @@
#include "qgroup.h"
#include "subpage.h"
#include "file.h"
+#include "block-group.h"
static struct kmem_cache *btrfs_ordered_extent_cache;
@@ -179,7 +180,7 @@ static struct btrfs_ordered_extent *alloc_ordered_extent(
entry->disk_num_bytes = disk_num_bytes;
entry->offset = offset;
entry->bytes_left = num_bytes;
- entry->inode = igrab(&inode->vfs_inode);
+ entry->inode = BTRFS_I(igrab(&inode->vfs_inode));
entry->compress_type = compress_type;
entry->truncated_len = (u64)-1;
entry->qgroup_rsv = qgroup_rsv;
@@ -207,7 +208,7 @@ static struct btrfs_ordered_extent *alloc_ordered_extent(
static void insert_ordered_extent(struct btrfs_ordered_extent *entry)
{
- struct btrfs_inode *inode = BTRFS_I(entry->inode);
+ struct btrfs_inode *inode = entry->inode;
struct btrfs_root *root = inode->root;
struct btrfs_fs_info *fs_info = root->fs_info;
struct rb_node *node;
@@ -223,7 +224,7 @@ static void insert_ordered_extent(struct btrfs_ordered_extent *entry)
spin_lock_irq(&inode->ordered_tree_lock);
node = tree_insert(&inode->ordered_tree, entry->file_offset,
&entry->rb_node);
- if (node)
+ if (unlikely(node))
btrfs_panic(fs_info, -EEXIST,
"inconsistency in ordered tree at offset %llu",
entry->file_offset);
@@ -263,17 +264,39 @@ static void insert_ordered_extent(struct btrfs_ordered_extent *entry)
*/
struct btrfs_ordered_extent *btrfs_alloc_ordered_extent(
struct btrfs_inode *inode, u64 file_offset,
- u64 num_bytes, u64 ram_bytes, u64 disk_bytenr,
- u64 disk_num_bytes, u64 offset, unsigned long flags,
- int compress_type)
+ const struct btrfs_file_extent *file_extent, unsigned long flags)
{
struct btrfs_ordered_extent *entry;
ASSERT((flags & ~BTRFS_ORDERED_TYPE_FLAGS) == 0);
- entry = alloc_ordered_extent(inode, file_offset, num_bytes, ram_bytes,
- disk_bytenr, disk_num_bytes, offset, flags,
- compress_type);
+ /*
+ * For regular writes, we just use the members in @file_extent.
+ *
+ * For NOCOW, we don't really care about the numbers except @start and
+ * file_extent->num_bytes, as we won't insert a file extent item at all.
+ *
+ * For PREALLOC, we do not use ordered extent members, but
+ * btrfs_mark_extent_written() handles everything.
+ *
+ * So here we always pass 0 as offset for NOCOW/PREALLOC ordered extents,
+ * or btrfs_split_ordered_extent() cannot handle it correctly.
+ */
+ if (flags & ((1U << BTRFS_ORDERED_NOCOW) | (1U << BTRFS_ORDERED_PREALLOC)))
+ entry = alloc_ordered_extent(inode, file_offset,
+ file_extent->num_bytes,
+ file_extent->num_bytes,
+ file_extent->disk_bytenr + file_extent->offset,
+ file_extent->num_bytes, 0, flags,
+ file_extent->compression);
+ else
+ entry = alloc_ordered_extent(inode, file_offset,
+ file_extent->num_bytes,
+ file_extent->ram_bytes,
+ file_extent->disk_bytenr,
+ file_extent->disk_num_bytes,
+ file_extent->offset, flags,
+ file_extent->compression);
if (!IS_ERR(entry))
insert_ordered_extent(entry);
return entry;
@@ -287,7 +310,7 @@ struct btrfs_ordered_extent *btrfs_alloc_ordered_extent(
void btrfs_add_ordered_sum(struct btrfs_ordered_extent *entry,
struct btrfs_ordered_sum *sum)
{
- struct btrfs_inode *inode = BTRFS_I(entry->inode);
+ struct btrfs_inode *inode = entry->inode;
spin_lock_irq(&inode->ordered_tree_lock);
list_add_tail(&sum->list, &entry->list);
@@ -297,7 +320,7 @@ void btrfs_add_ordered_sum(struct btrfs_ordered_extent *entry,
void btrfs_mark_ordered_extent_error(struct btrfs_ordered_extent *ordered)
{
if (!test_and_set_bit(BTRFS_ORDERED_IOERR, &ordered->flags))
- mapping_set_error(ordered->inode->i_mapping, -EIO);
+ mapping_set_error(ordered->inode->vfs_inode.i_mapping, -EIO);
}
static void finish_ordered_fn(struct btrfs_work *work)
@@ -312,7 +335,7 @@ static bool can_finish_ordered_extent(struct btrfs_ordered_extent *ordered,
struct page *page, u64 file_offset,
u64 len, bool uptodate)
{
- struct btrfs_inode *inode = BTRFS_I(ordered->inode);
+ struct btrfs_inode *inode = ordered->inode;
struct btrfs_fs_info *fs_info = inode->root->fs_info;
lockdep_assert_held(&inode->ordered_tree_lock);
@@ -365,7 +388,7 @@ static bool can_finish_ordered_extent(struct btrfs_ordered_extent *ordered,
static void btrfs_queue_ordered_fn(struct btrfs_ordered_extent *ordered)
{
- struct btrfs_inode *inode = BTRFS_I(ordered->inode);
+ struct btrfs_inode *inode = ordered->inode;
struct btrfs_fs_info *fs_info = inode->root->fs_info;
struct btrfs_workqueue *wq = btrfs_is_free_space_inode(inode) ?
fs_info->endio_freespace_worker : fs_info->endio_write_workers;
@@ -374,11 +397,11 @@ static void btrfs_queue_ordered_fn(struct btrfs_ordered_extent *ordered)
btrfs_queue_work(wq, &ordered->work);
}
-bool btrfs_finish_ordered_extent(struct btrfs_ordered_extent *ordered,
+void btrfs_finish_ordered_extent(struct btrfs_ordered_extent *ordered,
struct page *page, u64 file_offset, u64 len,
bool uptodate)
{
- struct btrfs_inode *inode = BTRFS_I(ordered->inode);
+ struct btrfs_inode *inode = ordered->inode;
unsigned long flags;
bool ret;
@@ -388,9 +411,39 @@ bool btrfs_finish_ordered_extent(struct btrfs_ordered_extent *ordered,
ret = can_finish_ordered_extent(ordered, page, file_offset, len, uptodate);
spin_unlock_irqrestore(&inode->ordered_tree_lock, flags);
+ /*
+ * If this is a COW write it means we created new extent maps for the
+ * range and they point to unwritten locations if we got an error either
+ * before submitting a bio or during IO.
+ *
+ * We have marked the ordered extent with BTRFS_ORDERED_IOERR, and we
+ * are queuing its completion below. During completion, at
+ * btrfs_finish_one_ordered(), we will drop the extent maps for the
+ * unwritten extents.
+ *
+ * However because completion runs in a work queue we can end up having
+ * a fast fsync running before that. In the case of direct IO, once we
+ * unlock the inode the fsync might start, and we queue the completion
+ * before unlocking the inode. In the case of buffered IO when writeback
+ * finishes (end_bbio_data_write()) we queue the completion, so if the
+ * writeback was triggered by a fast fsync, the fsync might start
+ * logging before ordered extent completion runs in the work queue.
+ *
+ * The fast fsync will log file extent items based on the extent maps it
+ * finds, so if by the time it collects extent maps the ordered extent
+ * completion didn't happen yet, it will log file extent items that
+ * point to unwritten extents, resulting in a corruption if a crash
+ * happens and the log tree is replayed. Note that a fast fsync does not
+ * wait for completion of ordered extents in order to reduce latency.
+ *
+ * Set a flag in the inode so that the next fast fsync will wait for
+ * ordered extents to complete before starting to log.
+ */
+ if (!uptodate && !test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags))
+ set_bit(BTRFS_INODE_COW_WRITE_ERROR, &inode->runtime_flags);
+
if (ret)
btrfs_queue_ordered_fn(ordered);
- return ret;
}
/*
@@ -557,14 +610,14 @@ void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
struct list_head *cur;
struct btrfs_ordered_sum *sum;
- trace_btrfs_ordered_extent_put(BTRFS_I(entry->inode), entry);
+ trace_btrfs_ordered_extent_put(entry->inode, entry);
if (refcount_dec_and_test(&entry->refs)) {
ASSERT(list_empty(&entry->root_extent_list));
ASSERT(list_empty(&entry->log_list));
ASSERT(RB_EMPTY_NODE(&entry->rb_node));
if (entry->inode)
- btrfs_add_delayed_iput(BTRFS_I(entry->inode));
+ btrfs_add_delayed_iput(entry->inode);
while (!list_empty(&entry->list)) {
cur = entry->list.next;
sum = list_entry(cur, struct btrfs_ordered_sum, list);
@@ -595,7 +648,7 @@ void btrfs_remove_ordered_extent(struct btrfs_inode *btrfs_inode,
freespace_inode = btrfs_is_free_space_inode(btrfs_inode);
btrfs_lockdep_acquire(fs_info, btrfs_trans_pending_ordered);
- /* This is paired with btrfs_alloc_ordered_extent. */
+ /* This is paired with alloc_ordered_extent(). */
spin_lock(&btrfs_inode->lock);
btrfs_mod_outstanding_extents(btrfs_inode, -1);
spin_unlock(&btrfs_inode->lock);
@@ -681,11 +734,11 @@ static void btrfs_run_ordered_extent_work(struct btrfs_work *work)
}
/*
- * wait for all the ordered extents in a root. This is done when balancing
- * space between drives.
+ * Wait for all the ordered extents in a root. Use @bg as range or do whole
+ * range if it's NULL.
*/
u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr,
- const u64 range_start, const u64 range_len)
+ const struct btrfs_block_group *bg)
{
struct btrfs_fs_info *fs_info = root->fs_info;
LIST_HEAD(splice);
@@ -693,7 +746,17 @@ u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr,
LIST_HEAD(works);
struct btrfs_ordered_extent *ordered, *next;
u64 count = 0;
- const u64 range_end = range_start + range_len;
+ u64 range_start, range_len;
+ u64 range_end;
+
+ if (bg) {
+ range_start = bg->start;
+ range_len = bg->length;
+ } else {
+ range_start = 0;
+ range_len = U64_MAX;
+ }
+ range_end = range_start + range_len;
mutex_lock(&root->ordered_extent_mutex);
spin_lock(&root->ordered_extent_lock);
@@ -720,10 +783,10 @@ u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr,
btrfs_queue_work(fs_info->flush_workers, &ordered->flush_work);
cond_resched();
- spin_lock(&root->ordered_extent_lock);
if (nr != U64_MAX)
nr--;
count++;
+ spin_lock(&root->ordered_extent_lock);
}
list_splice_tail(&skipped, &root->ordered_extents);
list_splice_tail(&splice, &root->ordered_extents);
@@ -740,8 +803,12 @@ u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr,
return count;
}
+/*
+ * Wait for @nr ordered extents that intersect the @bg, or the whole range of
+ * the filesystem if @bg is NULL.
+ */
void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr,
- const u64 range_start, const u64 range_len)
+ const struct btrfs_block_group *bg)
{
struct btrfs_root *root;
LIST_HEAD(splice);
@@ -759,14 +826,13 @@ void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr,
&fs_info->ordered_roots);
spin_unlock(&fs_info->ordered_root_lock);
- done = btrfs_wait_ordered_extents(root, nr,
- range_start, range_len);
+ done = btrfs_wait_ordered_extents(root, nr, bg);
btrfs_put_root(root);
- spin_lock(&fs_info->ordered_root_lock);
- if (nr != U64_MAX) {
+ if (nr != U64_MAX)
nr -= done;
- }
+
+ spin_lock(&fs_info->ordered_root_lock);
}
list_splice_tail(&splice, &fs_info->ordered_roots);
spin_unlock(&fs_info->ordered_root_lock);
@@ -783,7 +849,7 @@ void btrfs_start_ordered_extent(struct btrfs_ordered_extent *entry)
{
u64 start = entry->file_offset;
u64 end = start + entry->num_bytes - 1;
- struct btrfs_inode *inode = BTRFS_I(entry->inode);
+ struct btrfs_inode *inode = entry->inode;
bool freespace_inode;
trace_btrfs_ordered_extent_start(inode, entry);
@@ -810,7 +876,7 @@ void btrfs_start_ordered_extent(struct btrfs_ordered_extent *entry)
/*
* Used to wait on ordered extents across a large range of bytes.
*/
-int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
+int btrfs_wait_ordered_range(struct btrfs_inode *inode, u64 start, u64 len)
{
int ret = 0;
int ret_wb = 0;
@@ -840,11 +906,11 @@ int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
* before the ordered extents complete - to avoid failures (-EEXIST)
* when adding the new ordered extents to the ordered tree.
*/
- ret_wb = filemap_fdatawait_range(inode->i_mapping, start, orig_end);
+ ret_wb = filemap_fdatawait_range(inode->vfs_inode.i_mapping, start, orig_end);
end = orig_end;
while (1) {
- ordered = btrfs_lookup_first_ordered_extent(BTRFS_I(inode), end);
+ ordered = btrfs_lookup_first_ordered_extent(inode, end);
if (!ordered)
break;
if (ordered->file_offset > orig_end) {
@@ -1142,7 +1208,7 @@ bool btrfs_try_lock_ordered_range(struct btrfs_inode *inode, u64 start, u64 end,
struct btrfs_ordered_extent *btrfs_split_ordered_extent(
struct btrfs_ordered_extent *ordered, u64 len)
{
- struct btrfs_inode *inode = BTRFS_I(ordered->inode);
+ struct btrfs_inode *inode = ordered->inode;
struct btrfs_root *root = inode->root;
struct btrfs_fs_info *fs_info = root->fs_info;
u64 file_offset = ordered->file_offset;
@@ -1181,15 +1247,32 @@ struct btrfs_ordered_extent *btrfs_split_ordered_extent(
/* One ref for the tree. */
refcount_inc(&new->refs);
+ /*
+ * Take the root's ordered_extent_lock to avoid a race with
+ * btrfs_wait_ordered_extents() when updating the disk_bytenr and
+ * disk_num_bytes fields of the ordered extent below. And we disable
+ * IRQs because the inode's ordered_tree_lock is used in IRQ context
+ * elsewhere.
+ *
+ * There's no concern about a previous caller of
+ * btrfs_wait_ordered_extents() getting the trimmed ordered extent
+ * before we insert the new one, because even if it gets the ordered
+ * extent before it's trimmed and the new one inserted, right before it
+ * uses it or during its use, the ordered extent might have been
+ * trimmed in the meanwhile, and it missed the new ordered extent.
+ * There's no way around this and it's harmless for current use cases,
+ * so we take the root's ordered_extent_lock to fix that race during
+ * trimming and silence tools like KCSAN.
+ */
spin_lock_irq(&root->ordered_extent_lock);
spin_lock(&inode->ordered_tree_lock);
- /* Remove from tree once */
- node = &ordered->rb_node;
- rb_erase(node, &inode->ordered_tree);
- RB_CLEAR_NODE(node);
- if (inode->ordered_tree_last == node)
- inode->ordered_tree_last = NULL;
+ /*
+ * We don't have overlapping ordered extents (that would imply double
+ * allocation of extents) and we checked above that the split length
+ * does not cross the ordered extent's num_bytes field, so there's
+ * no need to remove it and re-insert it in the tree.
+ */
ordered->file_offset += len;
ordered->disk_bytenr += len;
ordered->num_bytes -= len;
@@ -1219,18 +1302,10 @@ struct btrfs_ordered_extent *btrfs_split_ordered_extent(
offset += sum->len;
}
- /* Re-insert the node */
- node = tree_insert(&inode->ordered_tree, ordered->file_offset,
- &ordered->rb_node);
- if (node)
- btrfs_panic(fs_info, -EEXIST,
- "zoned: inconsistency in ordered tree at offset %llu",
- ordered->file_offset);
-
node = tree_insert(&inode->ordered_tree, new->file_offset, &new->rb_node);
- if (node)
+ if (unlikely(node))
btrfs_panic(fs_info, -EEXIST,
- "zoned: inconsistency in ordered tree at offset %llu",
+ "inconsistency in ordered tree at offset %llu after split",
new->file_offset);
spin_unlock(&inode->ordered_tree_lock);
diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h
index b6f6c6b91732..51b9e81726e2 100644
--- a/fs/btrfs/ordered-data.h
+++ b/fs/btrfs/ordered-data.h
@@ -130,7 +130,7 @@ struct btrfs_ordered_extent {
refcount_t refs;
/* the inode we belong to */
- struct inode *inode;
+ struct btrfs_inode *inode;
/* list of checksums for insertion when the extent io is done */
struct list_head list;
@@ -162,7 +162,7 @@ int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent);
void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry);
void btrfs_remove_ordered_extent(struct btrfs_inode *btrfs_inode,
struct btrfs_ordered_extent *entry);
-bool btrfs_finish_ordered_extent(struct btrfs_ordered_extent *ordered,
+void btrfs_finish_ordered_extent(struct btrfs_ordered_extent *ordered,
struct page *page, u64 file_offset, u64 len,
bool uptodate);
void btrfs_mark_ordered_io_finished(struct btrfs_inode *inode,
@@ -171,17 +171,28 @@ void btrfs_mark_ordered_io_finished(struct btrfs_inode *inode,
bool btrfs_dec_test_ordered_pending(struct btrfs_inode *inode,
struct btrfs_ordered_extent **cached,
u64 file_offset, u64 io_size);
+
+/*
+ * This represents details about the target file extent item of a write operation.
+ */
+struct btrfs_file_extent {
+ u64 disk_bytenr;
+ u64 disk_num_bytes;
+ u64 num_bytes;
+ u64 ram_bytes;
+ u64 offset;
+ u8 compression;
+};
+
struct btrfs_ordered_extent *btrfs_alloc_ordered_extent(
struct btrfs_inode *inode, u64 file_offset,
- u64 num_bytes, u64 ram_bytes, u64 disk_bytenr,
- u64 disk_num_bytes, u64 offset, unsigned long flags,
- int compress_type);
+ const struct btrfs_file_extent *file_extent, unsigned long flags);
void btrfs_add_ordered_sum(struct btrfs_ordered_extent *entry,
struct btrfs_ordered_sum *sum);
struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct btrfs_inode *inode,
u64 file_offset);
void btrfs_start_ordered_extent(struct btrfs_ordered_extent *entry);
-int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len);
+int btrfs_wait_ordered_range(struct btrfs_inode *inode, u64 start, u64 len);
struct btrfs_ordered_extent *
btrfs_lookup_first_ordered_extent(struct btrfs_inode *inode, u64 file_offset);
struct btrfs_ordered_extent *btrfs_lookup_first_ordered_range(
@@ -193,9 +204,9 @@ struct btrfs_ordered_extent *btrfs_lookup_ordered_range(
void btrfs_get_ordered_extents_for_logging(struct btrfs_inode *inode,
struct list_head *list);
u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr,
- const u64 range_start, const u64 range_len);
+ const struct btrfs_block_group *bg);
void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr,
- const u64 range_start, const u64 range_len);
+ const struct btrfs_block_group *bg);
void btrfs_lock_and_flush_ordered_range(struct btrfs_inode *inode, u64 start,
u64 end,
struct extent_state **cached_state);
diff --git a/fs/btrfs/print-tree.c b/fs/btrfs/print-tree.c
index 7e46aa8a0444..32dcea662da3 100644
--- a/fs/btrfs/print-tree.c
+++ b/fs/btrfs/print-tree.c
@@ -109,7 +109,7 @@ static void print_extent_item(const struct extent_buffer *eb, int slot, int type
btrfs_err(eb->fs_info,
"unexpected extent item size, has %u expect >= %zu",
item_size, sizeof(*ei));
- btrfs_handle_fs_error(eb->fs_info, -EUCLEAN, NULL);
+ return;
}
ei = btrfs_item_ptr(eb, slot, struct btrfs_extent_item);
@@ -208,11 +208,6 @@ static void print_raid_stripe_key(const struct extent_buffer *eb, u32 item_size,
struct btrfs_stripe_extent *stripe)
{
const int num_stripes = btrfs_num_raid_stripes(item_size);
- const u8 encoding = btrfs_stripe_extent_encoding(eb, stripe);
-
- pr_info("\t\t\tencoding: %s\n",
- (encoding && encoding < BTRFS_NR_RAID_TYPES) ?
- btrfs_raid_array[encoding].raid_name : "unknown");
for (int i = 0; i < num_stripes; i++)
pr_info("\t\t\tstride %d devid %llu physical %llu\n",
@@ -310,6 +305,9 @@ void btrfs_print_leaf(const struct extent_buffer *l)
case BTRFS_EXTENT_DATA_KEY:
fi = btrfs_item_ptr(l, i,
struct btrfs_file_extent_item);
+ pr_info("\t\tgeneration %llu type %hhu\n",
+ btrfs_file_extent_generation(l, fi),
+ btrfs_file_extent_type(l, fi));
if (btrfs_file_extent_type(l, fi) ==
BTRFS_FILE_EXTENT_INLINE) {
pr_info("\t\tinline extent data size %llu\n",
diff --git a/fs/btrfs/props.c b/fs/btrfs/props.c
index 155570e20f45..b8fa34e16abb 100644
--- a/fs/btrfs/props.c
+++ b/fs/btrfs/props.c
@@ -27,7 +27,7 @@ struct prop_handler {
int (*validate)(const struct btrfs_inode *inode, const char *value,
size_t len);
int (*apply)(struct inode *inode, const char *value, size_t len);
- const char *(*extract)(struct inode *inode);
+ const char *(*extract)(const struct inode *inode);
bool (*ignore)(const struct btrfs_inode *inode);
int inheritable;
};
@@ -104,7 +104,7 @@ bool btrfs_ignore_prop(const struct btrfs_inode *inode, const char *name)
return handler->ignore(inode);
}
-int btrfs_set_prop(struct btrfs_trans_handle *trans, struct inode *inode,
+int btrfs_set_prop(struct btrfs_trans_handle *trans, struct btrfs_inode *inode,
const char *name, const char *value, size_t value_len,
int flags)
{
@@ -116,29 +116,29 @@ int btrfs_set_prop(struct btrfs_trans_handle *trans, struct inode *inode,
return -EINVAL;
if (value_len == 0) {
- ret = btrfs_setxattr(trans, inode, handler->xattr_name,
+ ret = btrfs_setxattr(trans, &inode->vfs_inode, handler->xattr_name,
NULL, 0, flags);
if (ret)
return ret;
- ret = handler->apply(inode, NULL, 0);
+ ret = handler->apply(&inode->vfs_inode, NULL, 0);
ASSERT(ret == 0);
return ret;
}
- ret = btrfs_setxattr(trans, inode, handler->xattr_name, value,
+ ret = btrfs_setxattr(trans, &inode->vfs_inode, handler->xattr_name, value,
value_len, flags);
if (ret)
return ret;
- ret = handler->apply(inode, value, value_len);
+ ret = handler->apply(&inode->vfs_inode, value, value_len);
if (ret) {
- btrfs_setxattr(trans, inode, handler->xattr_name, NULL,
+ btrfs_setxattr(trans, &inode->vfs_inode, handler->xattr_name, NULL,
0, flags);
return ret;
}
- set_bit(BTRFS_INODE_HAS_PROPS, &BTRFS_I(inode)->runtime_flags);
+ set_bit(BTRFS_INODE_HAS_PROPS, &inode->runtime_flags);
return 0;
}
@@ -359,7 +359,7 @@ static bool prop_compression_ignore(const struct btrfs_inode *inode)
return false;
}
-static const char *prop_compression_extract(struct inode *inode)
+static const char *prop_compression_extract(const struct inode *inode)
{
switch (BTRFS_I(inode)->prop_compress) {
case BTRFS_COMPRESS_ZLIB:
@@ -385,7 +385,7 @@ static struct prop_handler prop_handlers[] = {
};
int btrfs_inode_inherit_props(struct btrfs_trans_handle *trans,
- struct inode *inode, struct inode *parent)
+ struct inode *inode, const struct inode *parent)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_fs_info *fs_info = root->fs_info;
diff --git a/fs/btrfs/props.h b/fs/btrfs/props.h
index f60cd89feb29..63546d0a9444 100644
--- a/fs/btrfs/props.h
+++ b/fs/btrfs/props.h
@@ -15,7 +15,7 @@ struct btrfs_trans_handle;
int __init btrfs_props_init(void);
-int btrfs_set_prop(struct btrfs_trans_handle *trans, struct inode *inode,
+int btrfs_set_prop(struct btrfs_trans_handle *trans, struct btrfs_inode *inode,
const char *name, const char *value, size_t value_len,
int flags);
int btrfs_validate_prop(const struct btrfs_inode *inode, const char *name,
@@ -26,6 +26,6 @@ int btrfs_load_inode_props(struct inode *inode, struct btrfs_path *path);
int btrfs_inode_inherit_props(struct btrfs_trans_handle *trans,
struct inode *inode,
- struct inode *dir);
+ const struct inode *dir);
#endif
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index fc2a7ea26354..5d57a285d59b 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -30,7 +30,7 @@
#include "root-tree.h"
#include "tree-checker.h"
-enum btrfs_qgroup_mode btrfs_qgroup_mode(struct btrfs_fs_info *fs_info)
+enum btrfs_qgroup_mode btrfs_qgroup_mode(const struct btrfs_fs_info *fs_info)
{
if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
return BTRFS_QGROUP_MODE_DISABLED;
@@ -39,12 +39,12 @@ enum btrfs_qgroup_mode btrfs_qgroup_mode(struct btrfs_fs_info *fs_info)
return BTRFS_QGROUP_MODE_FULL;
}
-bool btrfs_qgroup_enabled(struct btrfs_fs_info *fs_info)
+bool btrfs_qgroup_enabled(const struct btrfs_fs_info *fs_info)
{
return btrfs_qgroup_mode(fs_info) != BTRFS_QGROUP_MODE_DISABLED;
}
-bool btrfs_qgroup_full_accounting(struct btrfs_fs_info *fs_info)
+bool btrfs_qgroup_full_accounting(const struct btrfs_fs_info *fs_info)
{
return btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_FULL;
}
@@ -107,7 +107,7 @@ static void qgroup_rsv_release(struct btrfs_fs_info *fs_info,
static void qgroup_rsv_add_by_qgroup(struct btrfs_fs_info *fs_info,
struct btrfs_qgroup *dest,
- struct btrfs_qgroup *src)
+ const struct btrfs_qgroup *src)
{
int i;
@@ -117,7 +117,7 @@ static void qgroup_rsv_add_by_qgroup(struct btrfs_fs_info *fs_info,
static void qgroup_rsv_release_by_qgroup(struct btrfs_fs_info *fs_info,
struct btrfs_qgroup *dest,
- struct btrfs_qgroup *src)
+ const struct btrfs_qgroup *src)
{
int i;
@@ -141,37 +141,27 @@ static void btrfs_qgroup_update_new_refcnt(struct btrfs_qgroup *qg, u64 seq,
qg->new_refcnt += mod;
}
-static inline u64 btrfs_qgroup_get_old_refcnt(struct btrfs_qgroup *qg, u64 seq)
+static inline u64 btrfs_qgroup_get_old_refcnt(const struct btrfs_qgroup *qg, u64 seq)
{
if (qg->old_refcnt < seq)
return 0;
return qg->old_refcnt - seq;
}
-static inline u64 btrfs_qgroup_get_new_refcnt(struct btrfs_qgroup *qg, u64 seq)
+static inline u64 btrfs_qgroup_get_new_refcnt(const struct btrfs_qgroup *qg, u64 seq)
{
if (qg->new_refcnt < seq)
return 0;
return qg->new_refcnt - seq;
}
-/*
- * glue structure to represent the relations between qgroups.
- */
-struct btrfs_qgroup_list {
- struct list_head next_group;
- struct list_head next_member;
- struct btrfs_qgroup *group;
- struct btrfs_qgroup *member;
-};
-
static int
qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
int init_flags);
static void qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info);
/* must be called with qgroup_ioctl_lock held */
-static struct btrfs_qgroup *find_qgroup_rb(struct btrfs_fs_info *fs_info,
+static struct btrfs_qgroup *find_qgroup_rb(const struct btrfs_fs_info *fs_info,
u64 qgroupid)
{
struct rb_node *n = fs_info->qgroup_tree.rb_node;
@@ -346,7 +336,7 @@ static int del_relation_rb(struct btrfs_fs_info *fs_info,
}
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
-int btrfs_verify_qgroup_counts(struct btrfs_fs_info *fs_info, u64 qgroupid,
+int btrfs_verify_qgroup_counts(const struct btrfs_fs_info *fs_info, u64 qgroupid,
u64 rfer, u64 excl)
{
struct btrfs_qgroup *qgroup;
@@ -608,7 +598,7 @@ out:
* Return false if no reserved space is left.
* Return true if some reserved space is leaked.
*/
-bool btrfs_check_quota_leak(struct btrfs_fs_info *fs_info)
+bool btrfs_check_quota_leak(const struct btrfs_fs_info *fs_info)
{
struct rb_node *node;
bool ret = false;
@@ -1334,24 +1324,19 @@ out:
*/
static int flush_reservations(struct btrfs_fs_info *fs_info)
{
- struct btrfs_trans_handle *trans;
int ret;
ret = btrfs_start_delalloc_roots(fs_info, LONG_MAX, false);
if (ret)
return ret;
- btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
- trans = btrfs_join_transaction(fs_info->tree_root);
- if (IS_ERR(trans))
- return PTR_ERR(trans);
- ret = btrfs_commit_transaction(trans);
+ btrfs_wait_ordered_roots(fs_info, U64_MAX, NULL);
- return ret;
+ return btrfs_commit_current_transaction(fs_info->tree_root);
}
int btrfs_quota_disable(struct btrfs_fs_info *fs_info)
{
- struct btrfs_root *quota_root;
+ struct btrfs_root *quota_root = NULL;
struct btrfs_trans_handle *trans = NULL;
int ret = 0;
@@ -1446,12 +1431,14 @@ int btrfs_quota_disable(struct btrfs_fs_info *fs_info)
btrfs_tree_lock(quota_root->node);
btrfs_clear_buffer_dirty(trans, quota_root->node);
btrfs_tree_unlock(quota_root->node);
- btrfs_free_tree_block(trans, btrfs_root_id(quota_root),
- quota_root->node, 0, 1);
+ ret = btrfs_free_tree_block(trans, btrfs_root_id(quota_root),
+ quota_root->node, 0, 1);
- btrfs_put_root(quota_root);
+ if (ret < 0)
+ btrfs_abort_transaction(trans, ret);
out:
+ btrfs_put_root(quota_root);
mutex_unlock(&fs_info->qgroup_ioctl_lock);
if (ret && trans)
btrfs_end_transaction(trans);
@@ -1572,15 +1559,21 @@ out:
return ret;
}
-int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans, u64 src, u64 dst)
+/*
+ * Add relation between @src and @dst qgroup. The @prealloc is allocated by the
+ * callers and transferred here (either used or freed on error).
+ */
+int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans, u64 src, u64 dst,
+ struct btrfs_qgroup_list *prealloc)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_qgroup *parent;
struct btrfs_qgroup *member;
struct btrfs_qgroup_list *list;
- struct btrfs_qgroup_list *prealloc = NULL;
int ret = 0;
+ ASSERT(prealloc);
+
/* Check the level of src and dst first */
if (btrfs_qgroup_level(src) >= btrfs_qgroup_level(dst))
return -EINVAL;
@@ -1605,11 +1598,6 @@ int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans, u64 src, u64 dst
}
}
- prealloc = kzalloc(sizeof(*list), GFP_NOFS);
- if (!prealloc) {
- ret = -ENOMEM;
- goto out;
- }
ret = add_qgroup_relation_item(trans, src, dst);
if (ret)
goto out;
@@ -1748,13 +1736,55 @@ out:
return ret;
}
-static bool qgroup_has_usage(struct btrfs_qgroup *qgroup)
+/*
+ * Return 0 if we can not delete the qgroup (not empty or has children etc).
+ * Return >0 if we can delete the qgroup.
+ * Return <0 for other errors during tree search.
+ */
+static int can_delete_qgroup(struct btrfs_fs_info *fs_info, struct btrfs_qgroup *qgroup)
{
- return (qgroup->rfer > 0 || qgroup->rfer_cmpr > 0 ||
- qgroup->excl > 0 || qgroup->excl_cmpr > 0 ||
- qgroup->rsv.values[BTRFS_QGROUP_RSV_DATA] > 0 ||
- qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PREALLOC] > 0 ||
- qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PERTRANS] > 0);
+ struct btrfs_key key;
+ struct btrfs_path *path;
+ int ret;
+
+ /*
+ * Squota would never be inconsistent, but there can still be case
+ * where a dropped subvolume still has qgroup numbers, and squota
+ * relies on such qgroup for future accounting.
+ *
+ * So for squota, do not allow dropping any non-zero qgroup.
+ */
+ if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE &&
+ (qgroup->rfer || qgroup->excl || qgroup->excl_cmpr || qgroup->rfer_cmpr))
+ return 0;
+
+ /* For higher level qgroup, we can only delete it if it has no child. */
+ if (btrfs_qgroup_level(qgroup->qgroupid)) {
+ if (!list_empty(&qgroup->members))
+ return 0;
+ return 1;
+ }
+
+ /*
+ * For level-0 qgroups, we can only delete it if it has no subvolume
+ * for it.
+ * This means even a subvolume is unlinked but not yet fully dropped,
+ * we can not delete the qgroup.
+ */
+ key.objectid = qgroup->qgroupid;
+ key.type = BTRFS_ROOT_ITEM_KEY;
+ key.offset = -1ULL;
+ path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
+
+ ret = btrfs_find_root(fs_info->tree_root, &key, path, NULL, NULL);
+ btrfs_free_path(path);
+ /*
+ * The @ret from btrfs_find_root() exactly matches our definition for
+ * the return value, thus can be returned directly.
+ */
+ return ret;
}
int btrfs_remove_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid)
@@ -1776,7 +1806,10 @@ int btrfs_remove_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid)
goto out;
}
- if (is_fstree(qgroupid) && qgroup_has_usage(qgroup)) {
+ ret = can_delete_qgroup(fs_info, qgroup);
+ if (ret < 0)
+ goto out;
+ if (ret == 0) {
ret = -EBUSY;
goto out;
}
@@ -1801,6 +1834,34 @@ int btrfs_remove_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid)
}
spin_lock(&fs_info->qgroup_lock);
+ /*
+ * Warn on reserved space. The subvolume should has no child nor
+ * corresponding subvolume.
+ * Thus its reserved space should all be zero, no matter if qgroup
+ * is consistent or the mode.
+ */
+ WARN_ON(qgroup->rsv.values[BTRFS_QGROUP_RSV_DATA] ||
+ qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PREALLOC] ||
+ qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PERTRANS]);
+ /*
+ * The same for rfer/excl numbers, but that's only if our qgroup is
+ * consistent and if it's in regular qgroup mode.
+ * For simple mode it's not as accurate thus we can hit non-zero values
+ * very frequently.
+ */
+ if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_FULL &&
+ !(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT)) {
+ if (WARN_ON(qgroup->rfer || qgroup->excl ||
+ qgroup->rfer_cmpr || qgroup->excl_cmpr)) {
+ btrfs_warn_rl(fs_info,
+"to be deleted qgroup %u/%llu has non-zero numbers, rfer %llu rfer_cmpr %llu excl %llu excl_cmpr %llu",
+ btrfs_qgroup_level(qgroup->qgroupid),
+ btrfs_qgroup_subvolid(qgroup->qgroupid),
+ qgroup->rfer, qgroup->rfer_cmpr,
+ qgroup->excl, qgroup->excl_cmpr);
+ qgroup_mark_inconsistent(fs_info);
+ }
+ }
del_qgroup_rb(fs_info, qgroupid);
spin_unlock(&fs_info->qgroup_lock);
@@ -1816,6 +1877,41 @@ out:
return ret;
}
+int btrfs_qgroup_cleanup_dropped_subvolume(struct btrfs_fs_info *fs_info, u64 subvolid)
+{
+ struct btrfs_trans_handle *trans;
+ int ret;
+
+ if (!is_fstree(subvolid) || !btrfs_qgroup_enabled(fs_info) || !fs_info->quota_root)
+ return 0;
+
+ /*
+ * Commit current transaction to make sure all the rfer/excl numbers
+ * get updated.
+ */
+ trans = btrfs_start_transaction(fs_info->quota_root, 0);
+ if (IS_ERR(trans))
+ return PTR_ERR(trans);
+
+ ret = btrfs_commit_transaction(trans);
+ if (ret < 0)
+ return ret;
+
+ /* Start new trans to delete the qgroup info and limit items. */
+ trans = btrfs_start_transaction(fs_info->quota_root, 2);
+ if (IS_ERR(trans))
+ return PTR_ERR(trans);
+ ret = btrfs_remove_qgroup(trans, subvolid);
+ btrfs_end_transaction(trans);
+ /*
+ * It's squota and the subvolume still has numbers needed for future
+ * accounting, in this case we can not delete it. Just skip it.
+ */
+ if (ret == -EBUSY)
+ ret = 0;
+ return ret;
+}
+
int btrfs_limit_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid,
struct btrfs_qgroup_limit *limit)
{
@@ -3062,8 +3158,6 @@ int btrfs_qgroup_check_inherit(struct btrfs_fs_info *fs_info,
struct btrfs_qgroup_inherit *inherit,
size_t size)
{
- if (!btrfs_qgroup_enabled(fs_info))
- return 0;
if (inherit->flags & ~BTRFS_QGROUP_INHERIT_FLAGS_SUPP)
return -EOPNOTSUPP;
if (size < sizeof(*inherit) || size > PAGE_SIZE)
@@ -3085,6 +3179,14 @@ int btrfs_qgroup_check_inherit(struct btrfs_fs_info *fs_info,
return -EINVAL;
/*
+ * Skip the inherit source qgroups check if qgroup is not enabled.
+ * Qgroup can still be later enabled causing problems, but in that case
+ * btrfs_qgroup_inherit() would just ignore those invalid ones.
+ */
+ if (!btrfs_qgroup_enabled(fs_info))
+ return 0;
+
+ /*
* Now check all the remaining qgroups, they should all:
*
* - Exist
@@ -3216,7 +3318,6 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
struct btrfs_qgroup_inherit *inherit)
{
int ret = 0;
- int i;
u64 *i_qgroups;
bool committing = false;
struct btrfs_fs_info *fs_info = trans->fs_info;
@@ -3273,7 +3374,7 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
i_qgroups = (u64 *)(inherit + 1);
nums = inherit->num_qgroups + 2 * inherit->num_ref_copies +
2 * inherit->num_excl_copies;
- for (i = 0; i < nums; ++i) {
+ for (int i = 0; i < nums; i++) {
srcgroup = find_qgroup_rb(fs_info, *i_qgroups);
/*
@@ -3300,7 +3401,7 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
*/
if (inherit) {
i_qgroups = (u64 *)(inherit + 1);
- for (i = 0; i < inherit->num_qgroups; ++i, ++i_qgroups) {
+ for (int i = 0; i < inherit->num_qgroups; i++, i_qgroups++) {
if (*i_qgroups == 0)
continue;
ret = add_qgroup_relation_item(trans, objectid,
@@ -3386,7 +3487,7 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
goto unlock;
i_qgroups = (u64 *)(inherit + 1);
- for (i = 0; i < inherit->num_qgroups; ++i) {
+ for (int i = 0; i < inherit->num_qgroups; i++) {
if (*i_qgroups) {
ret = add_relation_rb(fs_info, qlist_prealloc[i], objectid,
*i_qgroups);
@@ -3406,7 +3507,7 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
++i_qgroups;
}
- for (i = 0; i < inherit->num_ref_copies; ++i, i_qgroups += 2) {
+ for (int i = 0; i < inherit->num_ref_copies; i++, i_qgroups += 2) {
struct btrfs_qgroup *src;
struct btrfs_qgroup *dst;
@@ -3427,7 +3528,7 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
/* Manually tweaking numbers certainly needs a rescan */
need_rescan = true;
}
- for (i = 0; i < inherit->num_excl_copies; ++i, i_qgroups += 2) {
+ for (int i = 0; i < inherit->num_excl_copies; i++, i_qgroups += 2) {
struct btrfs_qgroup *src;
struct btrfs_qgroup *dst;
@@ -3912,7 +4013,6 @@ int
btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info)
{
int ret = 0;
- struct btrfs_trans_handle *trans;
ret = qgroup_rescan_init(fs_info, 0, 1);
if (ret)
@@ -3929,16 +4029,10 @@ btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info)
* going to clear all tracking information for a clean start.
*/
- trans = btrfs_attach_transaction_barrier(fs_info->fs_root);
- if (IS_ERR(trans) && trans != ERR_PTR(-ENOENT)) {
+ ret = btrfs_commit_current_transaction(fs_info->fs_root);
+ if (ret) {
fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
- return PTR_ERR(trans);
- } else if (trans != ERR_PTR(-ENOENT)) {
- ret = btrfs_commit_transaction(trans);
- if (ret) {
- fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
- return ret;
- }
+ return ret;
}
qgroup_rescan_zero_tracking(fs_info);
@@ -4074,7 +4168,6 @@ static int qgroup_unreserve_range(struct btrfs_inode *inode,
*/
static int try_flush_qgroup(struct btrfs_root *root)
{
- struct btrfs_trans_handle *trans;
int ret;
/* Can't hold an open transaction or we run the risk of deadlocking. */
@@ -4095,17 +4188,9 @@ static int try_flush_qgroup(struct btrfs_root *root)
ret = btrfs_start_delalloc_snapshot(root, true);
if (ret < 0)
goto out;
- btrfs_wait_ordered_extents(root, U64_MAX, 0, (u64)-1);
+ btrfs_wait_ordered_extents(root, U64_MAX, NULL);
- trans = btrfs_attach_transaction_barrier(root);
- if (IS_ERR(trans)) {
- ret = PTR_ERR(trans);
- if (ret == -ENOENT)
- ret = 0;
- goto out;
- }
-
- ret = btrfs_commit_transaction(trans);
+ ret = btrfs_commit_current_transaction(root);
out:
clear_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state);
wake_up(&root->qgroup_flush_wait);
@@ -4811,7 +4896,7 @@ void btrfs_free_squota_rsv(struct btrfs_fs_info *fs_info, u64 root, u64 rsv_byte
}
int btrfs_record_squota_delta(struct btrfs_fs_info *fs_info,
- struct btrfs_squota_delta *delta)
+ const struct btrfs_squota_delta *delta)
{
int ret;
struct btrfs_qgroup *qgroup;
diff --git a/fs/btrfs/qgroup.h b/fs/btrfs/qgroup.h
index 706640be0ec2..deb479d176a9 100644
--- a/fs/btrfs/qgroup.h
+++ b/fs/btrfs/qgroup.h
@@ -123,7 +123,6 @@ struct btrfs_inode;
/*
* Record a dirty extent, and info qgroup to update quota on it
- * TODO: Use kmem cache to alloc it.
*/
struct btrfs_qgroup_extent_record {
struct rb_node node;
@@ -279,6 +278,14 @@ struct btrfs_qgroup {
struct kobject kobj;
};
+/* Glue structure to represent the relations between qgroups. */
+struct btrfs_qgroup_list {
+ struct list_head next_group;
+ struct list_head next_member;
+ struct btrfs_qgroup *group;
+ struct btrfs_qgroup *member;
+};
+
struct btrfs_squota_delta {
/* The fstree root this delta counts against. */
u64 root;
@@ -312,9 +319,9 @@ enum btrfs_qgroup_mode {
BTRFS_QGROUP_MODE_SIMPLE
};
-enum btrfs_qgroup_mode btrfs_qgroup_mode(struct btrfs_fs_info *fs_info);
-bool btrfs_qgroup_enabled(struct btrfs_fs_info *fs_info);
-bool btrfs_qgroup_full_accounting(struct btrfs_fs_info *fs_info);
+enum btrfs_qgroup_mode btrfs_qgroup_mode(const struct btrfs_fs_info *fs_info);
+bool btrfs_qgroup_enabled(const struct btrfs_fs_info *fs_info);
+bool btrfs_qgroup_full_accounting(const struct btrfs_fs_info *fs_info);
int btrfs_quota_enable(struct btrfs_fs_info *fs_info,
struct btrfs_ioctl_quota_ctl_args *quota_ctl_args);
int btrfs_quota_disable(struct btrfs_fs_info *fs_info);
@@ -322,11 +329,13 @@ int btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info);
void btrfs_qgroup_rescan_resume(struct btrfs_fs_info *fs_info);
int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info,
bool interruptible);
-int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans, u64 src, u64 dst);
+int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans, u64 src, u64 dst,
+ struct btrfs_qgroup_list *prealloc);
int btrfs_del_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
u64 dst);
int btrfs_create_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid);
int btrfs_remove_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid);
+int btrfs_qgroup_cleanup_dropped_subvolume(struct btrfs_fs_info *fs_info, u64 subvolid);
int btrfs_limit_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid,
struct btrfs_qgroup_limit *limit);
int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info);
@@ -361,7 +370,7 @@ void btrfs_qgroup_free_refroot(struct btrfs_fs_info *fs_info,
enum btrfs_qgroup_rsv_type type);
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
-int btrfs_verify_qgroup_counts(struct btrfs_fs_info *fs_info, u64 qgroupid,
+int btrfs_verify_qgroup_counts(const struct btrfs_fs_info *fs_info, u64 qgroupid,
u64 rfer, u64 excl);
#endif
@@ -431,9 +440,9 @@ int btrfs_qgroup_add_swapped_blocks(struct btrfs_trans_handle *trans,
int btrfs_qgroup_trace_subtree_after_cow(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct extent_buffer *eb);
void btrfs_qgroup_destroy_extent_records(struct btrfs_transaction *trans);
-bool btrfs_check_quota_leak(struct btrfs_fs_info *fs_info);
+bool btrfs_check_quota_leak(const struct btrfs_fs_info *fs_info);
void btrfs_free_squota_rsv(struct btrfs_fs_info *fs_info, u64 root, u64 rsv_bytes);
int btrfs_record_squota_delta(struct btrfs_fs_info *fs_info,
- struct btrfs_squota_delta *delta);
+ const struct btrfs_squota_delta *delta);
#endif
diff --git a/fs/btrfs/raid-stripe-tree.c b/fs/btrfs/raid-stripe-tree.c
index 6af6b4b9a32e..e6f7a234b8f6 100644
--- a/fs/btrfs/raid-stripe-tree.c
+++ b/fs/btrfs/raid-stripe-tree.c
@@ -80,7 +80,6 @@ static int btrfs_insert_one_raid_extent(struct btrfs_trans_handle *trans,
struct btrfs_key stripe_key;
struct btrfs_root *stripe_root = fs_info->stripe_root;
const int num_stripes = btrfs_bg_type_to_factor(bioc->map_type);
- u8 encoding = btrfs_bg_flags_to_raid_index(bioc->map_type);
struct btrfs_stripe_extent *stripe_extent;
const size_t item_size = struct_size(stripe_extent, strides, num_stripes);
int ret;
@@ -94,7 +93,6 @@ static int btrfs_insert_one_raid_extent(struct btrfs_trans_handle *trans,
trace_btrfs_insert_one_raid_extent(fs_info, bioc->logical, bioc->size,
num_stripes);
- btrfs_set_stack_stripe_extent_encoding(stripe_extent, encoding);
for (int i = 0; i < num_stripes; i++) {
u64 devid = bioc->stripes[i].dev->devid;
u64 physical = bioc->stripes[i].physical;
@@ -159,7 +157,6 @@ int btrfs_get_raid_extent_offset(struct btrfs_fs_info *fs_info,
struct extent_buffer *leaf;
const u64 end = logical + *length;
int num_stripes;
- u8 encoding;
u64 offset;
u64 found_logical;
u64 found_length;
@@ -222,16 +219,6 @@ int btrfs_get_raid_extent_offset(struct btrfs_fs_info *fs_info,
num_stripes = btrfs_num_raid_stripes(btrfs_item_size(leaf, slot));
stripe_extent = btrfs_item_ptr(leaf, slot, struct btrfs_stripe_extent);
- encoding = btrfs_stripe_extent_encoding(leaf, stripe_extent);
-
- if (encoding != btrfs_bg_flags_to_raid_index(map_type)) {
- ret = -EUCLEAN;
- btrfs_handle_fs_error(fs_info, ret,
- "on-disk stripe encoding %d doesn't match RAID index %d",
- encoding,
- btrfs_bg_flags_to_raid_index(map_type));
- goto out;
- }
for (int i = 0; i < num_stripes; i++) {
struct btrfs_raid_stride *stride = &stripe_extent->strides[i];
diff --git a/fs/btrfs/raid-stripe-tree.h b/fs/btrfs/raid-stripe-tree.h
index c9c258f84903..1ac1c21aac2f 100644
--- a/fs/btrfs/raid-stripe-tree.h
+++ b/fs/btrfs/raid-stripe-tree.h
@@ -48,8 +48,7 @@ static inline bool btrfs_need_stripe_tree_update(struct btrfs_fs_info *fs_info,
static inline int btrfs_num_raid_stripes(u32 item_size)
{
- return (item_size - offsetof(struct btrfs_stripe_extent, strides)) /
- sizeof(struct btrfs_raid_stride);
+ return item_size / sizeof(struct btrfs_raid_stride);
}
#endif
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
index 831fac45e70f..39bec672df0c 100644
--- a/fs/btrfs/raid56.c
+++ b/fs/btrfs/raid56.c
@@ -40,6 +40,85 @@
#define BTRFS_STRIPE_HASH_TABLE_BITS 11
+static void dump_bioc(const struct btrfs_fs_info *fs_info, const struct btrfs_io_context *bioc)
+{
+ if (unlikely(!bioc)) {
+ btrfs_crit(fs_info, "bioc=NULL");
+ return;
+ }
+ btrfs_crit(fs_info,
+"bioc logical=%llu full_stripe=%llu size=%llu map_type=0x%llx mirror=%u replace_nr_stripes=%u replace_stripe_src=%d num_stripes=%u",
+ bioc->logical, bioc->full_stripe_logical, bioc->size,
+ bioc->map_type, bioc->mirror_num, bioc->replace_nr_stripes,
+ bioc->replace_stripe_src, bioc->num_stripes);
+ for (int i = 0; i < bioc->num_stripes; i++) {
+ btrfs_crit(fs_info, " nr=%d devid=%llu physical=%llu",
+ i, bioc->stripes[i].dev->devid,
+ bioc->stripes[i].physical);
+ }
+}
+
+static void btrfs_dump_rbio(const struct btrfs_fs_info *fs_info,
+ const struct btrfs_raid_bio *rbio)
+{
+ if (!IS_ENABLED(CONFIG_BTRFS_ASSERT))
+ return;
+
+ dump_bioc(fs_info, rbio->bioc);
+ btrfs_crit(fs_info,
+"rbio flags=0x%lx nr_sectors=%u nr_data=%u real_stripes=%u stripe_nsectors=%u scrubp=%u dbitmap=0x%lx",
+ rbio->flags, rbio->nr_sectors, rbio->nr_data,
+ rbio->real_stripes, rbio->stripe_nsectors,
+ rbio->scrubp, rbio->dbitmap);
+}
+
+#define ASSERT_RBIO(expr, rbio) \
+({ \
+ if (IS_ENABLED(CONFIG_BTRFS_ASSERT) && unlikely(!(expr))) { \
+ const struct btrfs_fs_info *__fs_info = (rbio)->bioc ? \
+ (rbio)->bioc->fs_info : NULL; \
+ \
+ btrfs_dump_rbio(__fs_info, (rbio)); \
+ } \
+ ASSERT((expr)); \
+})
+
+#define ASSERT_RBIO_STRIPE(expr, rbio, stripe_nr) \
+({ \
+ if (IS_ENABLED(CONFIG_BTRFS_ASSERT) && unlikely(!(expr))) { \
+ const struct btrfs_fs_info *__fs_info = (rbio)->bioc ? \
+ (rbio)->bioc->fs_info : NULL; \
+ \
+ btrfs_dump_rbio(__fs_info, (rbio)); \
+ btrfs_crit(__fs_info, "stripe_nr=%d", (stripe_nr)); \
+ } \
+ ASSERT((expr)); \
+})
+
+#define ASSERT_RBIO_SECTOR(expr, rbio, sector_nr) \
+({ \
+ if (IS_ENABLED(CONFIG_BTRFS_ASSERT) && unlikely(!(expr))) { \
+ const struct btrfs_fs_info *__fs_info = (rbio)->bioc ? \
+ (rbio)->bioc->fs_info : NULL; \
+ \
+ btrfs_dump_rbio(__fs_info, (rbio)); \
+ btrfs_crit(__fs_info, "sector_nr=%d", (sector_nr)); \
+ } \
+ ASSERT((expr)); \
+})
+
+#define ASSERT_RBIO_LOGICAL(expr, rbio, logical) \
+({ \
+ if (IS_ENABLED(CONFIG_BTRFS_ASSERT) && unlikely(!(expr))) { \
+ const struct btrfs_fs_info *__fs_info = (rbio)->bioc ? \
+ (rbio)->bioc->fs_info : NULL; \
+ \
+ btrfs_dump_rbio(__fs_info, (rbio)); \
+ btrfs_crit(__fs_info, "logical=%llu", (logical)); \
+ } \
+ ASSERT((expr)); \
+})
+
/* Used by the raid56 code to lock stripes for read/modify/write */
struct btrfs_stripe_hash {
struct list_head hash_list;
@@ -592,8 +671,8 @@ static unsigned int rbio_stripe_sector_index(const struct btrfs_raid_bio *rbio,
unsigned int stripe_nr,
unsigned int sector_nr)
{
- ASSERT(stripe_nr < rbio->real_stripes);
- ASSERT(sector_nr < rbio->stripe_nsectors);
+ ASSERT_RBIO_STRIPE(stripe_nr < rbio->real_stripes, rbio, stripe_nr);
+ ASSERT_RBIO_SECTOR(sector_nr < rbio->stripe_nsectors, rbio, sector_nr);
return stripe_nr * rbio->stripe_nsectors + sector_nr;
}
@@ -873,8 +952,10 @@ static struct sector_ptr *sector_in_rbio(struct btrfs_raid_bio *rbio,
struct sector_ptr *sector;
int index;
- ASSERT(stripe_nr >= 0 && stripe_nr < rbio->real_stripes);
- ASSERT(sector_nr >= 0 && sector_nr < rbio->stripe_nsectors);
+ ASSERT_RBIO_STRIPE(stripe_nr >= 0 && stripe_nr < rbio->real_stripes,
+ rbio, stripe_nr);
+ ASSERT_RBIO_SECTOR(sector_nr >= 0 && sector_nr < rbio->stripe_nsectors,
+ rbio, sector_nr);
index = stripe_nr * rbio->stripe_nsectors + sector_nr;
ASSERT(index >= 0 && index < rbio->nr_sectors);
@@ -970,7 +1051,7 @@ static int alloc_rbio_pages(struct btrfs_raid_bio *rbio)
{
int ret;
- ret = btrfs_alloc_page_array(rbio->nr_pages, rbio->stripe_pages, 0);
+ ret = btrfs_alloc_page_array(rbio->nr_pages, rbio->stripe_pages, false);
if (ret < 0)
return ret;
/* Mapping all sectors */
@@ -985,7 +1066,7 @@ static int alloc_rbio_parity_pages(struct btrfs_raid_bio *rbio)
int ret;
ret = btrfs_alloc_page_array(rbio->nr_pages - data_pages,
- rbio->stripe_pages + data_pages, 0);
+ rbio->stripe_pages + data_pages, false);
if (ret < 0)
return ret;
@@ -1057,8 +1138,10 @@ static int rbio_add_io_sector(struct btrfs_raid_bio *rbio,
* thus it can be larger than rbio->real_stripe.
* So here we check against bioc->num_stripes, not rbio->real_stripes.
*/
- ASSERT(stripe_nr >= 0 && stripe_nr < rbio->bioc->num_stripes);
- ASSERT(sector_nr >= 0 && sector_nr < rbio->stripe_nsectors);
+ ASSERT_RBIO_STRIPE(stripe_nr >= 0 && stripe_nr < rbio->bioc->num_stripes,
+ rbio, stripe_nr);
+ ASSERT_RBIO_SECTOR(sector_nr >= 0 && sector_nr < rbio->stripe_nsectors,
+ rbio, sector_nr);
ASSERT(sector->page);
stripe = &rbio->bioc->stripes[stripe_nr];
@@ -1197,14 +1280,14 @@ static void assert_rbio(struct btrfs_raid_bio *rbio)
* At least two stripes (2 disks RAID5), and since real_stripes is U8,
* we won't go beyond 256 disks anyway.
*/
- ASSERT(rbio->real_stripes >= 2);
- ASSERT(rbio->nr_data > 0);
+ ASSERT_RBIO(rbio->real_stripes >= 2, rbio);
+ ASSERT_RBIO(rbio->nr_data > 0, rbio);
/*
* This is another check to make sure nr data stripes is smaller
* than total stripes.
*/
- ASSERT(rbio->nr_data < rbio->real_stripes);
+ ASSERT_RBIO(rbio->nr_data < rbio->real_stripes, rbio);
}
/* Generate PQ for one vertical stripe. */
@@ -1557,7 +1640,7 @@ static int alloc_rbio_data_pages(struct btrfs_raid_bio *rbio)
const int data_pages = rbio->nr_data * rbio->stripe_npages;
int ret;
- ret = btrfs_alloc_page_array(data_pages, rbio->stripe_pages, 0);
+ ret = btrfs_alloc_page_array(data_pages, rbio->stripe_pages, false);
if (ret < 0)
return ret;
@@ -1641,9 +1724,10 @@ static void rbio_add_bio(struct btrfs_raid_bio *rbio, struct bio *orig_bio)
const u32 sectorsize = fs_info->sectorsize;
u64 cur_logical;
- ASSERT(orig_logical >= full_stripe_start &&
- orig_logical + orig_len <= full_stripe_start +
- rbio->nr_data * BTRFS_STRIPE_LEN);
+ ASSERT_RBIO_LOGICAL(orig_logical >= full_stripe_start &&
+ orig_logical + orig_len <= full_stripe_start +
+ rbio->nr_data * BTRFS_STRIPE_LEN,
+ rbio, orig_logical);
bio_list_add(&rbio->bio_list, orig_bio);
rbio->bio_list_bytes += orig_bio->bi_iter.bi_size;
@@ -2389,7 +2473,7 @@ struct btrfs_raid_bio *raid56_parity_alloc_scrub_rbio(struct bio *bio,
break;
}
}
- ASSERT(i < rbio->real_stripes);
+ ASSERT_RBIO_STRIPE(i < rbio->real_stripes, rbio, i);
bitmap_copy(&rbio->dbitmap, dbitmap, stripe_nsectors);
return rbio;
@@ -2555,7 +2639,7 @@ static int finish_parity_scrub(struct btrfs_raid_bio *rbio)
* Replace is running and our parity stripe needs to be duplicated to
* the target device. Check we have a valid source stripe number.
*/
- ASSERT(rbio->bioc->replace_stripe_src >= 0);
+ ASSERT_RBIO(rbio->bioc->replace_stripe_src >= 0, rbio);
for_each_set_bit(sectornr, pbitmap, rbio->stripe_nsectors) {
struct sector_ptr *sector;
diff --git a/fs/btrfs/ref-verify.c b/fs/btrfs/ref-verify.c
index cf531255ab76..9522a8b79d22 100644
--- a/fs/btrfs/ref-verify.c
+++ b/fs/btrfs/ref-verify.c
@@ -441,7 +441,8 @@ static int process_extent_item(struct btrfs_fs_info *fs_info,
u32 item_size = btrfs_item_size(leaf, slot);
unsigned long end, ptr;
u64 offset, flags, count;
- int type, ret;
+ int type;
+ int ret = 0;
ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
flags = btrfs_extent_flags(leaf, ei);
@@ -486,7 +487,11 @@ static int process_extent_item(struct btrfs_fs_info *fs_info,
key->objectid, key->offset);
break;
case BTRFS_EXTENT_OWNER_REF_KEY:
- WARN_ON(!btrfs_fs_incompat(fs_info, SIMPLE_QUOTA));
+ if (!btrfs_fs_incompat(fs_info, SIMPLE_QUOTA)) {
+ btrfs_err(fs_info,
+ "found extent owner ref without simple quotas enabled");
+ ret = -EINVAL;
+ }
break;
default:
btrfs_err(fs_info, "invalid key type in iref");
diff --git a/fs/btrfs/reflink.c b/fs/btrfs/reflink.c
index d0a3fcecc46a..df6b93b927cd 100644
--- a/fs/btrfs/reflink.c
+++ b/fs/btrfs/reflink.c
@@ -733,7 +733,7 @@ static noinline int btrfs_clone_files(struct file *file, struct file *file_src,
* we found the previous extent covering eof and before we
* attempted to increment its reference count).
*/
- ret = btrfs_wait_ordered_range(inode, wb_start,
+ ret = btrfs_wait_ordered_range(BTRFS_I(inode), wb_start,
destoff - wb_start);
if (ret)
return ret;
@@ -755,7 +755,7 @@ static noinline int btrfs_clone_files(struct file *file, struct file *file_src,
* range, so wait for writeback to complete before truncating pages
* from the page cache. This is a rare case.
*/
- wb_ret = btrfs_wait_ordered_range(inode, destoff, len);
+ wb_ret = btrfs_wait_ordered_range(BTRFS_I(inode), destoff, len);
ret = ret ? ret : wb_ret;
/*
* Truncate page cache pages so that future reads will see the cloned
@@ -835,11 +835,11 @@ static int btrfs_remap_file_range_prep(struct file *file_in, loff_t pos_in,
if (ret < 0)
return ret;
- ret = btrfs_wait_ordered_range(inode_in, ALIGN_DOWN(pos_in, bs),
+ ret = btrfs_wait_ordered_range(BTRFS_I(inode_in), ALIGN_DOWN(pos_in, bs),
wb_len);
if (ret < 0)
return ret;
- ret = btrfs_wait_ordered_range(inode_out, ALIGN_DOWN(pos_out, bs),
+ ret = btrfs_wait_ordered_range(BTRFS_I(inode_out), ALIGN_DOWN(pos_out, bs),
wb_len);
if (ret < 0)
return ret;
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 8b24bb5a0aa1..b592fc8cf368 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -817,7 +817,7 @@ static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans,
goto abort;
}
set_bit(BTRFS_ROOT_SHAREABLE, &reloc_root->state);
- reloc_root->last_trans = trans->transid;
+ btrfs_set_root_last_trans(reloc_root, trans->transid);
return reloc_root;
fail:
kfree(root_item);
@@ -864,7 +864,7 @@ int btrfs_init_reloc_root(struct btrfs_trans_handle *trans,
*/
if (root->reloc_root) {
reloc_root = root->reloc_root;
- reloc_root->last_trans = trans->transid;
+ btrfs_set_root_last_trans(reloc_root, trans->transid);
return 0;
}
@@ -962,7 +962,7 @@ static int get_new_location(struct inode *reloc_inode, u64 *new_bytenr,
if (!path)
return -ENOMEM;
- bytenr -= BTRFS_I(reloc_inode)->index_cnt;
+ bytenr -= BTRFS_I(reloc_inode)->reloc_block_group_start;
ret = btrfs_lookup_file_extent(NULL, root, path,
btrfs_ino(BTRFS_I(reloc_inode)), bytenr, 0);
if (ret < 0)
@@ -1739,7 +1739,7 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc,
* btrfs_update_reloc_root() and update our root item
* appropriately.
*/
- reloc_root->last_trans = trans->transid;
+ btrfs_set_root_last_trans(reloc_root, trans->transid);
trans->block_rsv = rc->block_rsv;
replaced = 0;
@@ -2082,7 +2082,7 @@ static int record_reloc_root_in_trans(struct btrfs_trans_handle *trans,
struct btrfs_root *root;
int ret;
- if (reloc_root->last_trans == trans->transid)
+ if (btrfs_get_root_last_trans(reloc_root) == trans->transid)
return 0;
root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset, false);
@@ -2790,14 +2790,14 @@ out_free_blocks:
return ret;
}
-static noinline_for_stack int prealloc_file_extent_cluster(
- struct btrfs_inode *inode,
- const struct file_extent_cluster *cluster)
+static noinline_for_stack int prealloc_file_extent_cluster(struct reloc_control *rc)
{
+ const struct file_extent_cluster *cluster = &rc->cluster;
+ struct btrfs_inode *inode = BTRFS_I(rc->data_inode);
u64 alloc_hint = 0;
u64 start;
u64 end;
- u64 offset = inode->index_cnt;
+ u64 offset = inode->reloc_block_group_start;
u64 num_bytes;
int nr;
int ret = 0;
@@ -2899,11 +2899,14 @@ static noinline_for_stack int prealloc_file_extent_cluster(
return ret;
}
-static noinline_for_stack int setup_relocation_extent_mapping(struct inode *inode,
- u64 start, u64 end, u64 block_start)
+static noinline_for_stack int setup_relocation_extent_mapping(struct reloc_control *rc)
{
+ struct btrfs_inode *inode = BTRFS_I(rc->data_inode);
struct extent_map *em;
struct extent_state *cached_state = NULL;
+ u64 offset = inode->reloc_block_group_start;
+ u64 start = rc->cluster.start - offset;
+ u64 end = rc->cluster.end - offset;
int ret = 0;
em = alloc_extent_map();
@@ -2912,13 +2915,14 @@ static noinline_for_stack int setup_relocation_extent_mapping(struct inode *inod
em->start = start;
em->len = end + 1 - start;
- em->block_len = em->len;
- em->block_start = block_start;
+ em->disk_bytenr = rc->cluster.start;
+ em->disk_num_bytes = em->len;
+ em->ram_bytes = em->len;
em->flags |= EXTENT_FLAG_PINNED;
- lock_extent(&BTRFS_I(inode)->io_tree, start, end, &cached_state);
- ret = btrfs_replace_extent_map_range(BTRFS_I(inode), em, false);
- unlock_extent(&BTRFS_I(inode)->io_tree, start, end, &cached_state);
+ lock_extent(&inode->io_tree, start, end, &cached_state);
+ ret = btrfs_replace_extent_map_range(inode, em, false);
+ unlock_extent(&inode->io_tree, start, end, &cached_state);
free_extent_map(em);
return ret;
@@ -2946,12 +2950,14 @@ static u64 get_cluster_boundary_end(const struct file_extent_cluster *cluster,
return cluster->boundary[cluster_nr + 1] - 1;
}
-static int relocate_one_folio(struct inode *inode, struct file_ra_state *ra,
- const struct file_extent_cluster *cluster,
+static int relocate_one_folio(struct reloc_control *rc,
+ struct file_ra_state *ra,
int *cluster_nr, unsigned long index)
{
+ const struct file_extent_cluster *cluster = &rc->cluster;
+ struct inode *inode = rc->data_inode;
struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
- u64 offset = BTRFS_I(inode)->index_cnt;
+ u64 offset = BTRFS_I(inode)->reloc_block_group_start;
const unsigned long last_index = (cluster->end - offset) >> PAGE_SHIFT;
gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
struct folio *folio;
@@ -3083,10 +3089,11 @@ release_folio:
return ret;
}
-static int relocate_file_extent_cluster(struct inode *inode,
- const struct file_extent_cluster *cluster)
+static int relocate_file_extent_cluster(struct reloc_control *rc)
{
- u64 offset = BTRFS_I(inode)->index_cnt;
+ struct inode *inode = rc->data_inode;
+ const struct file_extent_cluster *cluster = &rc->cluster;
+ u64 offset = BTRFS_I(inode)->reloc_block_group_start;
unsigned long index;
unsigned long last_index;
struct file_ra_state *ra;
@@ -3100,21 +3107,20 @@ static int relocate_file_extent_cluster(struct inode *inode,
if (!ra)
return -ENOMEM;
- ret = prealloc_file_extent_cluster(BTRFS_I(inode), cluster);
+ ret = prealloc_file_extent_cluster(rc);
if (ret)
goto out;
file_ra_state_init(ra, inode->i_mapping);
- ret = setup_relocation_extent_mapping(inode, cluster->start - offset,
- cluster->end - offset, cluster->start);
+ ret = setup_relocation_extent_mapping(rc);
if (ret)
goto out;
last_index = (cluster->end - offset) >> PAGE_SHIFT;
for (index = (cluster->start - offset) >> PAGE_SHIFT;
index <= last_index && !ret; index++)
- ret = relocate_one_folio(inode, ra, cluster, &cluster_nr, index);
+ ret = relocate_one_folio(rc, ra, &cluster_nr, index);
if (ret == 0)
WARN_ON(cluster_nr != cluster->nr);
out:
@@ -3122,15 +3128,16 @@ out:
return ret;
}
-static noinline_for_stack int relocate_data_extent(struct inode *inode,
- const struct btrfs_key *extent_key,
- struct file_extent_cluster *cluster)
+static noinline_for_stack int relocate_data_extent(struct reloc_control *rc,
+ const struct btrfs_key *extent_key)
{
+ struct inode *inode = rc->data_inode;
+ struct file_extent_cluster *cluster = &rc->cluster;
int ret;
struct btrfs_root *root = BTRFS_I(inode)->root;
if (cluster->nr > 0 && extent_key->objectid != cluster->end + 1) {
- ret = relocate_file_extent_cluster(inode, cluster);
+ ret = relocate_file_extent_cluster(rc);
if (ret)
return ret;
cluster->nr = 0;
@@ -3156,7 +3163,7 @@ static noinline_for_stack int relocate_data_extent(struct inode *inode,
* the cluster we need to relocate.
*/
root->relocation_src_root = cluster->owning_root;
- ret = relocate_file_extent_cluster(inode, cluster);
+ ret = relocate_file_extent_cluster(rc);
if (ret)
return ret;
cluster->nr = 0;
@@ -3175,7 +3182,7 @@ static noinline_for_stack int relocate_data_extent(struct inode *inode,
cluster->nr++;
if (cluster->nr >= MAX_EXTENTS) {
- ret = relocate_file_extent_cluster(inode, cluster);
+ ret = relocate_file_extent_cluster(rc);
if (ret)
return ret;
cluster->nr = 0;
@@ -3369,7 +3376,7 @@ static int delete_block_group_cache(struct btrfs_fs_info *fs_info,
if (inode)
goto truncate;
- inode = btrfs_iget(fs_info->sb, ino, root);
+ inode = btrfs_iget(ino, root);
if (IS_ERR(inode))
return -ENOENT;
@@ -3744,8 +3751,7 @@ restart:
if (rc->stage == MOVE_DATA_EXTENTS &&
(flags & BTRFS_EXTENT_FLAG_DATA)) {
rc->found_file_extent = true;
- ret = relocate_data_extent(rc->data_inode,
- &key, &rc->cluster);
+ ret = relocate_data_extent(rc, &key);
if (ret < 0) {
err = ret;
break;
@@ -3774,8 +3780,7 @@ restart:
}
if (!err) {
- ret = relocate_file_extent_cluster(rc->data_inode,
- &rc->cluster);
+ ret = relocate_file_extent_cluster(rc);
if (ret < 0)
err = ret;
}
@@ -3908,14 +3913,14 @@ static noinline_for_stack struct inode *create_reloc_inode(
if (ret)
goto out;
- inode = btrfs_iget(fs_info->sb, objectid, root);
+ inode = btrfs_iget(objectid, root);
if (IS_ERR(inode)) {
delete_orphan_inode(trans, root, objectid);
ret = PTR_ERR(inode);
inode = NULL;
goto out;
}
- BTRFS_I(inode)->index_cnt = group->start;
+ BTRFS_I(inode)->reloc_block_group_start = group->start;
ret = btrfs_orphan_add(trans, BTRFS_I(inode));
out:
@@ -4002,15 +4007,13 @@ static void free_reloc_control(struct reloc_control *rc)
/*
* Print the block group being relocated
*/
-static void describe_relocation(struct btrfs_fs_info *fs_info,
- struct btrfs_block_group *block_group)
+static void describe_relocation(struct btrfs_block_group *block_group)
{
char buf[128] = {'\0'};
btrfs_describe_block_groups(block_group->flags, buf, sizeof(buf));
- btrfs_info(fs_info,
- "relocating block group %llu flags %s",
+ btrfs_info(block_group->fs_info, "relocating block group %llu flags %s",
block_group->start, buf);
}
@@ -4118,13 +4121,11 @@ int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start)
goto out;
}
- describe_relocation(fs_info, rc->block_group);
+ describe_relocation(rc->block_group);
btrfs_wait_block_group_reservations(rc->block_group);
btrfs_wait_nocow_writers(rc->block_group);
- btrfs_wait_ordered_roots(fs_info, U64_MAX,
- rc->block_group->start,
- rc->block_group->length);
+ btrfs_wait_ordered_roots(fs_info, U64_MAX, rc->block_group);
ret = btrfs_zone_finish(rc->block_group);
WARN_ON(ret && ret != -EAGAIN);
@@ -4149,7 +4150,7 @@ int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start)
* out of the loop if we hit an error.
*/
if (rc->stage == MOVE_DATA_EXTENTS && rc->found_file_extent) {
- ret = btrfs_wait_ordered_range(rc->data_inode, 0,
+ ret = btrfs_wait_ordered_range(BTRFS_I(rc->data_inode), 0,
(u64)-1);
if (ret)
err = ret;
@@ -4221,8 +4222,8 @@ int btrfs_recover_relocation(struct btrfs_fs_info *fs_info)
struct extent_buffer *leaf;
struct reloc_control *rc = NULL;
struct btrfs_trans_handle *trans;
- int ret;
- int err = 0;
+ int ret2;
+ int ret = 0;
path = btrfs_alloc_path();
if (!path)
@@ -4236,15 +4237,14 @@ int btrfs_recover_relocation(struct btrfs_fs_info *fs_info)
while (1) {
ret = btrfs_search_slot(NULL, fs_info->tree_root, &key,
path, 0, 0);
- if (ret < 0) {
- err = ret;
+ if (ret < 0)
goto out;
- }
if (ret > 0) {
if (path->slots[0] == 0)
break;
path->slots[0]--;
}
+ ret = 0;
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
btrfs_release_path(path);
@@ -4255,7 +4255,7 @@ int btrfs_recover_relocation(struct btrfs_fs_info *fs_info)
reloc_root = btrfs_read_tree_root(fs_info->tree_root, &key);
if (IS_ERR(reloc_root)) {
- err = PTR_ERR(reloc_root);
+ ret = PTR_ERR(reloc_root);
goto out;
}
@@ -4267,15 +4267,12 @@ int btrfs_recover_relocation(struct btrfs_fs_info *fs_info)
reloc_root->root_key.offset, false);
if (IS_ERR(fs_root)) {
ret = PTR_ERR(fs_root);
- if (ret != -ENOENT) {
- err = ret;
+ if (ret != -ENOENT)
goto out;
- }
ret = mark_garbage_root(reloc_root);
- if (ret < 0) {
- err = ret;
+ if (ret < 0)
goto out;
- }
+ ret = 0;
} else {
btrfs_put_root(fs_root);
}
@@ -4293,15 +4290,13 @@ int btrfs_recover_relocation(struct btrfs_fs_info *fs_info)
rc = alloc_reloc_control(fs_info);
if (!rc) {
- err = -ENOMEM;
+ ret = -ENOMEM;
goto out;
}
ret = reloc_chunk_start(fs_info);
- if (ret < 0) {
- err = ret;
+ if (ret < 0)
goto out_end;
- }
rc->extent_root = btrfs_extent_root(fs_info, 0);
@@ -4309,7 +4304,7 @@ int btrfs_recover_relocation(struct btrfs_fs_info *fs_info)
trans = btrfs_join_transaction(rc->extent_root);
if (IS_ERR(trans)) {
- err = PTR_ERR(trans);
+ ret = PTR_ERR(trans);
goto out_unset;
}
@@ -4329,15 +4324,15 @@ int btrfs_recover_relocation(struct btrfs_fs_info *fs_info)
fs_root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset,
false);
if (IS_ERR(fs_root)) {
- err = PTR_ERR(fs_root);
+ ret = PTR_ERR(fs_root);
list_add_tail(&reloc_root->root_list, &reloc_roots);
btrfs_end_transaction(trans);
goto out_unset;
}
- err = __add_reloc_root(reloc_root);
- ASSERT(err != -EEXIST);
- if (err) {
+ ret = __add_reloc_root(reloc_root);
+ ASSERT(ret != -EEXIST);
+ if (ret) {
list_add_tail(&reloc_root->root_list, &reloc_roots);
btrfs_put_root(fs_root);
btrfs_end_transaction(trans);
@@ -4347,8 +4342,8 @@ int btrfs_recover_relocation(struct btrfs_fs_info *fs_info)
btrfs_put_root(fs_root);
}
- err = btrfs_commit_transaction(trans);
- if (err)
+ ret = btrfs_commit_transaction(trans);
+ if (ret)
goto out_unset;
merge_reloc_roots(rc);
@@ -4357,14 +4352,14 @@ int btrfs_recover_relocation(struct btrfs_fs_info *fs_info)
trans = btrfs_join_transaction(rc->extent_root);
if (IS_ERR(trans)) {
- err = PTR_ERR(trans);
+ ret = PTR_ERR(trans);
goto out_clean;
}
- err = btrfs_commit_transaction(trans);
+ ret = btrfs_commit_transaction(trans);
out_clean:
- ret = clean_dirty_subvols(rc);
- if (ret < 0 && !err)
- err = ret;
+ ret2 = clean_dirty_subvols(rc);
+ if (ret2 < 0 && !ret)
+ ret = ret2;
out_unset:
unset_reloc_control(rc);
out_end:
@@ -4375,14 +4370,14 @@ out:
btrfs_free_path(path);
- if (err == 0) {
+ if (ret == 0) {
/* cleanup orphan inode in data relocation tree */
fs_root = btrfs_grab_root(fs_info->data_reloc_root);
ASSERT(fs_root);
- err = btrfs_orphan_cleanup(fs_root);
+ ret = btrfs_orphan_cleanup(fs_root);
btrfs_put_root(fs_root);
}
- return err;
+ return ret;
}
/*
@@ -4393,9 +4388,9 @@ out:
*/
int btrfs_reloc_clone_csums(struct btrfs_ordered_extent *ordered)
{
- struct btrfs_inode *inode = BTRFS_I(ordered->inode);
+ struct btrfs_inode *inode = ordered->inode;
struct btrfs_fs_info *fs_info = inode->root->fs_info;
- u64 disk_bytenr = ordered->file_offset + inode->index_cnt;
+ u64 disk_bytenr = ordered->file_offset + inode->reloc_block_group_start;
struct btrfs_root *csum_root = btrfs_csum_root(fs_info, disk_bytenr);
LIST_HEAD(list);
int ret;
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index afd6932f5e89..14a8d7100018 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -261,7 +261,7 @@ static int init_scrub_stripe(struct btrfs_fs_info *fs_info,
atomic_set(&stripe->pending_io, 0);
spin_lock_init(&stripe->write_error_lock);
- ret = btrfs_alloc_page_array(SCRUB_STRIPE_PAGES, stripe->pages, 0);
+ ret = btrfs_alloc_page_array(SCRUB_STRIPE_PAGES, stripe->pages, false);
if (ret < 0)
goto error;
@@ -1688,20 +1688,24 @@ static void scrub_submit_extent_sector_read(struct scrub_ctx *sctx,
(i << fs_info->sectorsize_bits);
int err;
- bbio = btrfs_bio_alloc(stripe->nr_sectors, REQ_OP_READ,
- fs_info, scrub_read_endio, stripe);
- bbio->bio.bi_iter.bi_sector = logical >> SECTOR_SHIFT;
-
io_stripe.is_scrub = true;
+ stripe_len = (nr_sectors - i) << fs_info->sectorsize_bits;
+ /*
+ * For RST cases, we need to manually split the bbio to
+ * follow the RST boundary.
+ */
err = btrfs_map_block(fs_info, BTRFS_MAP_READ, logical,
- &stripe_len, &bioc, &io_stripe,
- &mirror);
+ &stripe_len, &bioc, &io_stripe, &mirror);
btrfs_put_bioc(bioc);
- if (err) {
- btrfs_bio_end_io(bbio,
- errno_to_blk_status(err));
- return;
+ if (err < 0) {
+ set_bit(i, &stripe->io_error_bitmap);
+ set_bit(i, &stripe->error_bitmap);
+ continue;
}
+
+ bbio = btrfs_bio_alloc(stripe->nr_sectors, REQ_OP_READ,
+ fs_info, scrub_read_endio, stripe);
+ bbio->bio.bi_iter.bi_sector = logical >> SECTOR_SHIFT;
}
__bio_add_page(&bbio->bio, page, fs_info->sectorsize, pgoff);
@@ -2437,19 +2441,15 @@ static int finish_extent_writes_for_zoned(struct btrfs_root *root,
struct btrfs_block_group *cache)
{
struct btrfs_fs_info *fs_info = cache->fs_info;
- struct btrfs_trans_handle *trans;
if (!btrfs_is_zoned(fs_info))
return 0;
btrfs_wait_block_group_reservations(cache);
btrfs_wait_nocow_writers(cache);
- btrfs_wait_ordered_roots(fs_info, U64_MAX, cache->start, cache->length);
+ btrfs_wait_ordered_roots(fs_info, U64_MAX, cache);
- trans = btrfs_join_transaction(root);
- if (IS_ERR(trans))
- return PTR_ERR(trans);
- return btrfs_commit_transaction(trans);
+ return btrfs_commit_current_transaction(root);
}
static noinline_for_stack
@@ -2680,8 +2680,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
*/
if (sctx->is_dev_replace) {
btrfs_wait_nocow_writers(cache);
- btrfs_wait_ordered_roots(fs_info, U64_MAX, cache->start,
- cache->length);
+ btrfs_wait_ordered_roots(fs_info, U64_MAX, cache);
}
scrub_pause_off(fs_info);
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index 3dd4a48479a9..fb3675f5bf50 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -5188,11 +5188,10 @@ out:
static int process_verity(struct send_ctx *sctx)
{
int ret = 0;
- struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
struct inode *inode;
struct fs_path *p;
- inode = btrfs_iget(fs_info->sb, sctx->cur_ino, sctx->send_root);
+ inode = btrfs_iget(sctx->cur_ino, sctx->send_root);
if (IS_ERR(inode))
return PTR_ERR(inode);
@@ -5550,7 +5549,7 @@ static int send_encoded_inline_extent(struct send_ctx *sctx,
size_t inline_size;
int ret;
- inode = btrfs_iget(fs_info->sb, sctx->cur_ino, root);
+ inode = btrfs_iget(sctx->cur_ino, root);
if (IS_ERR(inode))
return PTR_ERR(inode);
@@ -5617,7 +5616,7 @@ static int send_encoded_extent(struct send_ctx *sctx, struct btrfs_path *path,
u32 crc;
int ret;
- inode = btrfs_iget(fs_info->sb, sctx->cur_ino, root);
+ inode = btrfs_iget(sctx->cur_ino, root);
if (IS_ERR(inode))
return PTR_ERR(inode);
@@ -5746,7 +5745,7 @@ static int send_extent_data(struct send_ctx *sctx, struct btrfs_path *path,
if (sctx->cur_inode == NULL) {
struct btrfs_root *root = sctx->send_root;
- sctx->cur_inode = btrfs_iget(root->fs_info->sb, sctx->cur_ino, root);
+ sctx->cur_inode = btrfs_iget(sctx->cur_ino, root);
if (IS_ERR(sctx->cur_inode)) {
int err = PTR_ERR(sctx->cur_inode);
@@ -7998,34 +7997,18 @@ out:
*/
static int ensure_commit_roots_uptodate(struct send_ctx *sctx)
{
- int i;
- struct btrfs_trans_handle *trans = NULL;
-
-again:
- if (sctx->parent_root &&
- sctx->parent_root->node != sctx->parent_root->commit_root)
- goto commit_trans;
-
- for (i = 0; i < sctx->clone_roots_cnt; i++)
- if (sctx->clone_roots[i].root->node !=
- sctx->clone_roots[i].root->commit_root)
- goto commit_trans;
-
- if (trans)
- return btrfs_end_transaction(trans);
+ struct btrfs_root *root = sctx->parent_root;
- return 0;
+ if (root && root->node != root->commit_root)
+ return btrfs_commit_current_transaction(root);
-commit_trans:
- /* Use any root, all fs roots will get their commit roots updated. */
- if (!trans) {
- trans = btrfs_join_transaction(sctx->send_root);
- if (IS_ERR(trans))
- return PTR_ERR(trans);
- goto again;
+ for (int i = 0; i < sctx->clone_roots_cnt; i++) {
+ root = sctx->clone_roots[i].root;
+ if (root->node != root->commit_root)
+ return btrfs_commit_current_transaction(root);
}
- return btrfs_commit_transaction(trans);
+ return 0;
}
/*
@@ -8046,7 +8029,7 @@ static int flush_delalloc_roots(struct send_ctx *sctx)
ret = btrfs_start_delalloc_snapshot(root, false);
if (ret)
return ret;
- btrfs_wait_ordered_extents(root, U64_MAX, 0, U64_MAX);
+ btrfs_wait_ordered_extents(root, U64_MAX, NULL);
}
for (i = 0; i < sctx->clone_roots_cnt; i++) {
@@ -8054,7 +8037,7 @@ static int flush_delalloc_roots(struct send_ctx *sctx)
ret = btrfs_start_delalloc_snapshot(root, false);
if (ret)
return ret;
- btrfs_wait_ordered_extents(root, U64_MAX, 0, U64_MAX);
+ btrfs_wait_ordered_extents(root, U64_MAX, NULL);
}
return 0;
@@ -8082,10 +8065,10 @@ static void dedupe_in_progress_warn(const struct btrfs_root *root)
btrfs_root_id(root), root->dedupe_in_progress);
}
-long btrfs_ioctl_send(struct inode *inode, struct btrfs_ioctl_send_args *arg)
+long btrfs_ioctl_send(struct btrfs_inode *inode, const struct btrfs_ioctl_send_args *arg)
{
int ret = 0;
- struct btrfs_root *send_root = BTRFS_I(inode)->root;
+ struct btrfs_root *send_root = inode->root;
struct btrfs_fs_info *fs_info = send_root->fs_info;
struct btrfs_root *clone_root;
struct send_ctx *sctx = NULL;
diff --git a/fs/btrfs/send.h b/fs/btrfs/send.h
index dd1c9f02b011..b07f4aa66878 100644
--- a/fs/btrfs/send.h
+++ b/fs/btrfs/send.h
@@ -11,7 +11,7 @@
#include <linux/sizes.h>
#include <linux/align.h>
-struct inode;
+struct btrfs_inode;
struct btrfs_ioctl_send_args;
#define BTRFS_SEND_STREAM_MAGIC "btrfs-stream"
@@ -182,6 +182,6 @@ enum {
__BTRFS_SEND_A_MAX = 35,
};
-long btrfs_ioctl_send(struct inode *inode, struct btrfs_ioctl_send_args *arg);
+long btrfs_ioctl_send(struct btrfs_inode *inode, const struct btrfs_ioctl_send_args *arg);
#endif
diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c
index d620323d08ea..9ac94d3119e8 100644
--- a/fs/btrfs/space-info.c
+++ b/fs/btrfs/space-info.c
@@ -1,5 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
+#include "linux/spinlock.h"
+#include <linux/minmax.h>
#include "misc.h"
#include "ctree.h"
#include "space-info.h"
@@ -190,6 +192,8 @@ void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
*/
#define BTRFS_DEFAULT_ZONED_RECLAIM_THRESH (75)
+#define BTRFS_UNALLOC_BLOCK_GROUP_TARGET (10ULL)
+
/*
* Calculate chunk size depending on volume type (regular or zoned).
*/
@@ -232,6 +236,7 @@ static int create_space_info(struct btrfs_fs_info *info, u64 flags)
if (!space_info)
return -ENOMEM;
+ space_info->fs_info = info;
for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
INIT_LIST_HEAD(&space_info->block_groups[i]);
init_rwsem(&space_info->groups_sem);
@@ -340,11 +345,32 @@ struct btrfs_space_info *btrfs_find_space_info(struct btrfs_fs_info *info,
return NULL;
}
+static u64 calc_effective_data_chunk_size(struct btrfs_fs_info *fs_info)
+{
+ struct btrfs_space_info *data_sinfo;
+ u64 data_chunk_size;
+
+ /*
+ * Calculate the data_chunk_size, space_info->chunk_size is the
+ * "optimal" chunk size based on the fs size. However when we actually
+ * allocate the chunk we will strip this down further, making it no
+ * more than 10% of the disk or 1G, whichever is smaller.
+ *
+ * On the zoned mode, we need to use zone_size (= data_sinfo->chunk_size)
+ * as it is.
+ */
+ data_sinfo = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
+ if (btrfs_is_zoned(fs_info))
+ return data_sinfo->chunk_size;
+ data_chunk_size = min(data_sinfo->chunk_size,
+ mult_perc(fs_info->fs_devices->total_rw_bytes, 10));
+ return min_t(u64, data_chunk_size, SZ_1G);
+}
+
static u64 calc_available_free_space(struct btrfs_fs_info *fs_info,
struct btrfs_space_info *space_info,
enum btrfs_reserve_flush_enum flush)
{
- struct btrfs_space_info *data_sinfo;
u64 profile;
u64 avail;
u64 data_chunk_size;
@@ -368,16 +394,7 @@ static u64 calc_available_free_space(struct btrfs_fs_info *fs_info,
if (avail == 0)
return 0;
- /*
- * Calculate the data_chunk_size, space_info->chunk_size is the
- * "optimal" chunk size based on the fs size. However when we actually
- * allocate the chunk we will strip this down further, making it no more
- * than 10% of the disk or 1G, whichever is smaller.
- */
- data_sinfo = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
- data_chunk_size = min(data_sinfo->chunk_size,
- mult_perc(fs_info->fs_devices->total_rw_bytes, 10));
- data_chunk_size = min_t(u64, data_chunk_size, SZ_1G);
+ data_chunk_size = calc_effective_data_chunk_size(fs_info);
/*
* Since data allocations immediately use block groups as part of the
@@ -405,6 +422,17 @@ static u64 calc_available_free_space(struct btrfs_fs_info *fs_info,
avail >>= 3;
else
avail >>= 1;
+
+ /*
+ * On the zoned mode, we always allocate one zone as one chunk.
+ * Returning non-zone size alingned bytes here will result in
+ * less pressure for the async metadata reclaim process, and it
+ * will over-commit too much leading to ENOSPC. Align down to the
+ * zone size to avoid that.
+ */
+ if (btrfs_is_zoned(fs_info))
+ avail = ALIGN_DOWN(avail, fs_info->zone_size);
+
return avail;
}
@@ -587,8 +615,6 @@ static inline u64 calc_reclaim_items_nr(const struct btrfs_fs_info *fs_info,
return nr;
}
-#define EXTENT_SIZE_PER_ITEM SZ_256K
-
/*
* shrink metadata reservation for delalloc
*/
@@ -688,7 +714,7 @@ static void shrink_delalloc(struct btrfs_fs_info *fs_info,
skip_async:
loops++;
if (wait_ordered && !trans) {
- btrfs_wait_ordered_roots(fs_info, items, 0, (u64)-1);
+ btrfs_wait_ordered_roots(fs_info, items, NULL);
} else {
time_left = schedule_timeout_killable(1);
if (time_left)
@@ -807,14 +833,7 @@ static void flush_space(struct btrfs_fs_info *fs_info,
* because that does not wait for a transaction to fully commit
* (only for it to be unblocked, state TRANS_STATE_UNBLOCKED).
*/
- trans = btrfs_attach_transaction_barrier(root);
- if (IS_ERR(trans)) {
- ret = PTR_ERR(trans);
- if (ret == -ENOENT)
- ret = 0;
- break;
- }
- ret = btrfs_commit_transaction(trans);
+ ret = btrfs_commit_current_transaction(root);
break;
default:
ret = -ENOSPC;
@@ -1868,3 +1887,209 @@ u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
return free_bytes;
}
+
+static u64 calc_pct_ratio(u64 x, u64 y)
+{
+ int err;
+
+ if (!y)
+ return 0;
+again:
+ err = check_mul_overflow(100, x, &x);
+ if (err)
+ goto lose_precision;
+ return div64_u64(x, y);
+lose_precision:
+ x >>= 10;
+ y >>= 10;
+ if (!y)
+ y = 1;
+ goto again;
+}
+
+/*
+ * A reasonable buffer for unallocated space is 10 data block_groups.
+ * If we claw this back repeatedly, we can still achieve efficient
+ * utilization when near full, and not do too much reclaim while
+ * always maintaining a solid buffer for workloads that quickly
+ * allocate and pressure the unallocated space.
+ */
+static u64 calc_unalloc_target(struct btrfs_fs_info *fs_info)
+{
+ u64 chunk_sz = calc_effective_data_chunk_size(fs_info);
+
+ return BTRFS_UNALLOC_BLOCK_GROUP_TARGET * chunk_sz;
+}
+
+/*
+ * The fundamental goal of automatic reclaim is to protect the filesystem's
+ * unallocated space and thus minimize the probability of the filesystem going
+ * read only when a metadata allocation failure causes a transaction abort.
+ *
+ * However, relocations happen into the space_info's unused space, therefore
+ * automatic reclaim must also back off as that space runs low. There is no
+ * value in doing trivial "relocations" of re-writing the same block group
+ * into a fresh one.
+ *
+ * Furthermore, we want to avoid doing too much reclaim even if there are good
+ * candidates. This is because the allocator is pretty good at filling up the
+ * holes with writes. So we want to do just enough reclaim to try and stay
+ * safe from running out of unallocated space but not be wasteful about it.
+ *
+ * Therefore, the dynamic reclaim threshold is calculated as follows:
+ * - calculate a target unallocated amount of 5 block group sized chunks
+ * - ratchet up the intensity of reclaim depending on how far we are from
+ * that target by using a formula of unalloc / target to set the threshold.
+ *
+ * Typically with 10 block groups as the target, the discrete values this comes
+ * out to are 0, 10, 20, ... , 80, 90, and 99.
+ */
+static int calc_dynamic_reclaim_threshold(struct btrfs_space_info *space_info)
+{
+ struct btrfs_fs_info *fs_info = space_info->fs_info;
+ u64 unalloc = atomic64_read(&fs_info->free_chunk_space);
+ u64 target = calc_unalloc_target(fs_info);
+ u64 alloc = space_info->total_bytes;
+ u64 used = btrfs_space_info_used(space_info, false);
+ u64 unused = alloc - used;
+ u64 want = target > unalloc ? target - unalloc : 0;
+ u64 data_chunk_size = calc_effective_data_chunk_size(fs_info);
+
+ /* If we have no unused space, don't bother, it won't work anyway. */
+ if (unused < data_chunk_size)
+ return 0;
+
+ /* Cast to int is OK because want <= target. */
+ return calc_pct_ratio(want, target);
+}
+
+int btrfs_calc_reclaim_threshold(struct btrfs_space_info *space_info)
+{
+ lockdep_assert_held(&space_info->lock);
+
+ if (READ_ONCE(space_info->dynamic_reclaim))
+ return calc_dynamic_reclaim_threshold(space_info);
+ return READ_ONCE(space_info->bg_reclaim_threshold);
+}
+
+/*
+ * Under "urgent" reclaim, we will reclaim even fresh block groups that have
+ * recently seen successful allocations, as we are desperate to reclaim
+ * whatever we can to avoid ENOSPC in a transaction leading to a readonly fs.
+ */
+static bool is_reclaim_urgent(struct btrfs_space_info *space_info)
+{
+ struct btrfs_fs_info *fs_info = space_info->fs_info;
+ u64 unalloc = atomic64_read(&fs_info->free_chunk_space);
+ u64 data_chunk_size = calc_effective_data_chunk_size(fs_info);
+
+ return unalloc < data_chunk_size;
+}
+
+static int do_reclaim_sweep(struct btrfs_fs_info *fs_info,
+ struct btrfs_space_info *space_info, int raid)
+{
+ struct btrfs_block_group *bg;
+ int thresh_pct;
+ bool try_again = true;
+ bool urgent;
+
+ spin_lock(&space_info->lock);
+ urgent = is_reclaim_urgent(space_info);
+ thresh_pct = btrfs_calc_reclaim_threshold(space_info);
+ spin_unlock(&space_info->lock);
+
+ down_read(&space_info->groups_sem);
+again:
+ list_for_each_entry(bg, &space_info->block_groups[raid], list) {
+ u64 thresh;
+ bool reclaim = false;
+
+ btrfs_get_block_group(bg);
+ spin_lock(&bg->lock);
+ thresh = mult_perc(bg->length, thresh_pct);
+ if (bg->used < thresh && bg->reclaim_mark) {
+ try_again = false;
+ reclaim = true;
+ }
+ bg->reclaim_mark++;
+ spin_unlock(&bg->lock);
+ if (reclaim)
+ btrfs_mark_bg_to_reclaim(bg);
+ btrfs_put_block_group(bg);
+ }
+
+ /*
+ * In situations where we are very motivated to reclaim (low unalloc)
+ * use two passes to make the reclaim mark check best effort.
+ *
+ * If we have any staler groups, we don't touch the fresher ones, but if we
+ * really need a block group, do take a fresh one.
+ */
+ if (try_again && urgent) {
+ try_again = false;
+ goto again;
+ }
+
+ up_read(&space_info->groups_sem);
+ return 0;
+}
+
+void btrfs_space_info_update_reclaimable(struct btrfs_space_info *space_info, s64 bytes)
+{
+ u64 chunk_sz = calc_effective_data_chunk_size(space_info->fs_info);
+
+ lockdep_assert_held(&space_info->lock);
+ space_info->reclaimable_bytes += bytes;
+
+ if (space_info->reclaimable_bytes >= chunk_sz)
+ btrfs_set_periodic_reclaim_ready(space_info, true);
+}
+
+void btrfs_set_periodic_reclaim_ready(struct btrfs_space_info *space_info, bool ready)
+{
+ lockdep_assert_held(&space_info->lock);
+ if (!READ_ONCE(space_info->periodic_reclaim))
+ return;
+ if (ready != space_info->periodic_reclaim_ready) {
+ space_info->periodic_reclaim_ready = ready;
+ if (!ready)
+ space_info->reclaimable_bytes = 0;
+ }
+}
+
+bool btrfs_should_periodic_reclaim(struct btrfs_space_info *space_info)
+{
+ bool ret;
+
+ if (space_info->flags & BTRFS_BLOCK_GROUP_SYSTEM)
+ return false;
+ if (!READ_ONCE(space_info->periodic_reclaim))
+ return false;
+
+ spin_lock(&space_info->lock);
+ ret = space_info->periodic_reclaim_ready;
+ btrfs_set_periodic_reclaim_ready(space_info, false);
+ spin_unlock(&space_info->lock);
+
+ return ret;
+}
+
+int btrfs_reclaim_sweep(struct btrfs_fs_info *fs_info)
+{
+ int ret;
+ int raid;
+ struct btrfs_space_info *space_info;
+
+ list_for_each_entry(space_info, &fs_info->space_info, list) {
+ if (!btrfs_should_periodic_reclaim(space_info))
+ continue;
+ for (raid = 0; raid < BTRFS_NR_RAID_TYPES; raid++) {
+ ret = do_reclaim_sweep(fs_info, space_info, raid);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return ret;
+}
diff --git a/fs/btrfs/space-info.h b/fs/btrfs/space-info.h
index a733458fd13b..4db8a0267c16 100644
--- a/fs/btrfs/space-info.h
+++ b/fs/btrfs/space-info.h
@@ -94,6 +94,7 @@ enum btrfs_flush_state {
};
struct btrfs_space_info {
+ struct btrfs_fs_info *fs_info;
spinlock_t lock;
u64 total_bytes; /* total bytes in the space,
@@ -165,6 +166,47 @@ struct btrfs_space_info {
struct kobject kobj;
struct kobject *block_group_kobjs[BTRFS_NR_RAID_TYPES];
+
+ /*
+ * Monotonically increasing counter of block group reclaim attempts
+ * Exposed in /sys/fs/<uuid>/allocation/<type>/reclaim_count
+ */
+ u64 reclaim_count;
+
+ /*
+ * Monotonically increasing counter of reclaimed bytes
+ * Exposed in /sys/fs/<uuid>/allocation/<type>/reclaim_bytes
+ */
+ u64 reclaim_bytes;
+
+ /*
+ * Monotonically increasing counter of reclaim errors
+ * Exposed in /sys/fs/<uuid>/allocation/<type>/reclaim_errors
+ */
+ u64 reclaim_errors;
+
+ /*
+ * If true, use the dynamic relocation threshold, instead of the
+ * fixed bg_reclaim_threshold.
+ */
+ bool dynamic_reclaim;
+
+ /*
+ * Periodically check all block groups against the reclaim
+ * threshold in the cleaner thread.
+ */
+ bool periodic_reclaim;
+
+ /*
+ * Periodic reclaim should be a no-op if a space_info hasn't
+ * freed any space since the last time we tried.
+ */
+ bool periodic_reclaim_ready;
+
+ /*
+ * Net bytes freed or allocated since the last reclaim pass.
+ */
+ s64 reclaimable_bytes;
};
struct reserve_ticket {
@@ -247,4 +289,10 @@ void btrfs_dump_space_info_for_trans_abort(struct btrfs_fs_info *fs_info);
void btrfs_init_async_reclaim_work(struct btrfs_fs_info *fs_info);
u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo);
+void btrfs_space_info_update_reclaimable(struct btrfs_space_info *space_info, s64 bytes);
+void btrfs_set_periodic_reclaim_ready(struct btrfs_space_info *space_info, bool ready);
+bool btrfs_should_periodic_reclaim(struct btrfs_space_info *space_info);
+int btrfs_calc_reclaim_threshold(struct btrfs_space_info *space_info);
+int btrfs_reclaim_sweep(struct btrfs_fs_info *fs_info);
+
#endif /* BTRFS_SPACE_INFO_H */
diff --git a/fs/btrfs/subpage.c b/fs/btrfs/subpage.c
index 54736f6238e6..8ddd5fcbeb93 100644
--- a/fs/btrfs/subpage.c
+++ b/fs/btrfs/subpage.c
@@ -74,7 +74,7 @@ bool btrfs_is_subpage(const struct btrfs_fs_info *fs_info, struct address_space
* mapping. And if page->mapping->host is data inode, it's subpage.
* As we have ruled our sectorsize >= PAGE_SIZE case already.
*/
- if (!mapping || !mapping->host || is_data_inode(mapping->host))
+ if (!mapping || !mapping->host || is_data_inode(BTRFS_I(mapping->host)))
return true;
/*
@@ -242,12 +242,12 @@ static void btrfs_subpage_assert(const struct btrfs_fs_info *fs_info,
#define subpage_calc_start_bit(fs_info, folio, name, start, len) \
({ \
- unsigned int start_bit; \
+ unsigned int __start_bit; \
\
btrfs_subpage_assert(fs_info, folio, start, len); \
- start_bit = offset_in_page(start) >> fs_info->sectorsize_bits; \
- start_bit += fs_info->subpage_info->name##_offset; \
- start_bit; \
+ __start_bit = offset_in_page(start) >> fs_info->sectorsize_bits; \
+ __start_bit += fs_info->subpage_info->name##_offset; \
+ __start_bit; \
})
void btrfs_subpage_start_reader(const struct btrfs_fs_info *fs_info,
@@ -283,7 +283,7 @@ void btrfs_subpage_end_reader(const struct btrfs_fs_info *fs_info,
bool last;
btrfs_subpage_assert(fs_info, folio, start, len);
- is_data = is_data_inode(folio->mapping->host);
+ is_data = is_data_inode(BTRFS_I(folio->mapping->host));
spin_lock_irqsave(&subpage->lock, flags);
@@ -703,19 +703,29 @@ IMPLEMENT_BTRFS_PAGE_OPS(checked, folio_set_checked, folio_clear_checked,
* Make sure not only the page dirty bit is cleared, but also subpage dirty bit
* is cleared.
*/
-void btrfs_folio_assert_not_dirty(const struct btrfs_fs_info *fs_info, struct folio *folio)
+void btrfs_folio_assert_not_dirty(const struct btrfs_fs_info *fs_info,
+ struct folio *folio, u64 start, u32 len)
{
- struct btrfs_subpage *subpage = folio_get_private(folio);
+ struct btrfs_subpage *subpage;
+ unsigned int start_bit;
+ unsigned int nbits;
+ unsigned long flags;
if (!IS_ENABLED(CONFIG_BTRFS_ASSERT))
return;
- ASSERT(!folio_test_dirty(folio));
- if (!btrfs_is_subpage(fs_info, folio->mapping))
+ if (!btrfs_is_subpage(fs_info, folio->mapping)) {
+ ASSERT(!folio_test_dirty(folio));
return;
+ }
- ASSERT(folio_test_private(folio) && folio_get_private(folio));
- ASSERT(subpage_test_bitmap_all_zero(fs_info, subpage, dirty));
+ start_bit = subpage_calc_start_bit(fs_info, folio, dirty, start, len);
+ nbits = len >> fs_info->sectorsize_bits;
+ subpage = folio_get_private(folio);
+ ASSERT(subpage);
+ spin_lock_irqsave(&subpage->lock, flags);
+ ASSERT(bitmap_test_range_all_zero(subpage->bitmaps, start_bit, nbits));
+ spin_unlock_irqrestore(&subpage->lock, flags);
}
/*
@@ -765,6 +775,130 @@ void btrfs_folio_unlock_writer(struct btrfs_fs_info *fs_info,
btrfs_folio_end_writer_lock(fs_info, folio, start, len);
}
+/*
+ * This is for folio already locked by plain lock_page()/folio_lock(), which
+ * doesn't have any subpage awareness.
+ *
+ * This populates the involved subpage ranges so that subpage helpers can
+ * properly unlock them.
+ */
+void btrfs_folio_set_writer_lock(const struct btrfs_fs_info *fs_info,
+ struct folio *folio, u64 start, u32 len)
+{
+ struct btrfs_subpage *subpage;
+ unsigned long flags;
+ unsigned int start_bit;
+ unsigned int nbits;
+ int ret;
+
+ ASSERT(folio_test_locked(folio));
+ if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, folio->mapping))
+ return;
+
+ subpage = folio_get_private(folio);
+ start_bit = subpage_calc_start_bit(fs_info, folio, locked, start, len);
+ nbits = len >> fs_info->sectorsize_bits;
+ spin_lock_irqsave(&subpage->lock, flags);
+ /* Target range should not yet be locked. */
+ ASSERT(bitmap_test_range_all_zero(subpage->bitmaps, start_bit, nbits));
+ bitmap_set(subpage->bitmaps, start_bit, nbits);
+ ret = atomic_add_return(nbits, &subpage->writers);
+ ASSERT(ret <= fs_info->subpage_info->bitmap_nr_bits);
+ spin_unlock_irqrestore(&subpage->lock, flags);
+}
+
+/*
+ * Find any subpage writer locked range inside @folio, starting at file offset
+ * @search_start. The caller should ensure the folio is locked.
+ *
+ * Return true and update @found_start_ret and @found_len_ret to the first
+ * writer locked range.
+ * Return false if there is no writer locked range.
+ */
+bool btrfs_subpage_find_writer_locked(const struct btrfs_fs_info *fs_info,
+ struct folio *folio, u64 search_start,
+ u64 *found_start_ret, u32 *found_len_ret)
+{
+ struct btrfs_subpage_info *subpage_info = fs_info->subpage_info;
+ struct btrfs_subpage *subpage = folio_get_private(folio);
+ const unsigned int len = PAGE_SIZE - offset_in_page(search_start);
+ const unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
+ locked, search_start, len);
+ const unsigned int locked_bitmap_start = subpage_info->locked_offset;
+ const unsigned int locked_bitmap_end = locked_bitmap_start +
+ subpage_info->bitmap_nr_bits;
+ unsigned long flags;
+ int first_zero;
+ int first_set;
+ bool found = false;
+
+ ASSERT(folio_test_locked(folio));
+ spin_lock_irqsave(&subpage->lock, flags);
+ first_set = find_next_bit(subpage->bitmaps, locked_bitmap_end, start_bit);
+ if (first_set >= locked_bitmap_end)
+ goto out;
+
+ found = true;
+
+ *found_start_ret = folio_pos(folio) +
+ ((first_set - locked_bitmap_start) << fs_info->sectorsize_bits);
+ /*
+ * Since @first_set is ensured to be smaller than locked_bitmap_end
+ * here, @found_start_ret should be inside the folio.
+ */
+ ASSERT(*found_start_ret < folio_pos(folio) + PAGE_SIZE);
+
+ first_zero = find_next_zero_bit(subpage->bitmaps, locked_bitmap_end, first_set);
+ *found_len_ret = (first_zero - first_set) << fs_info->sectorsize_bits;
+out:
+ spin_unlock_irqrestore(&subpage->lock, flags);
+ return found;
+}
+
+/*
+ * Unlike btrfs_folio_end_writer_lock() which unlocks a specified subpage range,
+ * this ends all writer locked ranges of a page.
+ *
+ * This is for the locked page of __extent_writepage(), as the locked page
+ * can contain several locked subpage ranges.
+ */
+void btrfs_folio_end_all_writers(const struct btrfs_fs_info *fs_info, struct folio *folio)
+{
+ struct btrfs_subpage *subpage = folio_get_private(folio);
+ u64 folio_start = folio_pos(folio);
+ u64 cur = folio_start;
+
+ ASSERT(folio_test_locked(folio));
+ if (!btrfs_is_subpage(fs_info, folio->mapping)) {
+ folio_unlock(folio);
+ return;
+ }
+
+ /* The page has no new delalloc range locked on it. Just plain unlock. */
+ if (atomic_read(&subpage->writers) == 0) {
+ folio_unlock(folio);
+ return;
+ }
+ while (cur < folio_start + PAGE_SIZE) {
+ u64 found_start;
+ u32 found_len;
+ bool found;
+ bool last;
+
+ found = btrfs_subpage_find_writer_locked(fs_info, folio, cur,
+ &found_start, &found_len);
+ if (!found)
+ break;
+ last = btrfs_subpage_end_and_test_writer(fs_info, folio,
+ found_start, found_len);
+ if (last) {
+ folio_unlock(folio);
+ break;
+ }
+ cur = found_start + found_len;
+ }
+}
+
#define GET_SUBPAGE_BITMAP(subpage, subpage_info, name, dst) \
bitmap_cut(dst, subpage->bitmaps, 0, \
subpage_info->name##_offset, subpage_info->bitmap_nr_bits)
@@ -775,7 +909,6 @@ void __cold btrfs_subpage_dump_bitmap(const struct btrfs_fs_info *fs_info,
struct btrfs_subpage_info *subpage_info = fs_info->subpage_info;
struct btrfs_subpage *subpage;
unsigned long uptodate_bitmap;
- unsigned long error_bitmap;
unsigned long dirty_bitmap;
unsigned long writeback_bitmap;
unsigned long ordered_bitmap;
@@ -797,10 +930,9 @@ void __cold btrfs_subpage_dump_bitmap(const struct btrfs_fs_info *fs_info,
dump_page(folio_page(folio, 0), "btrfs subpage dump");
btrfs_warn(fs_info,
-"start=%llu len=%u page=%llu, bitmaps uptodate=%*pbl error=%*pbl dirty=%*pbl writeback=%*pbl ordered=%*pbl checked=%*pbl",
+"start=%llu len=%u page=%llu, bitmaps uptodate=%*pbl dirty=%*pbl writeback=%*pbl ordered=%*pbl checked=%*pbl",
start, len, folio_pos(folio),
subpage_info->bitmap_nr_bits, &uptodate_bitmap,
- subpage_info->bitmap_nr_bits, &error_bitmap,
subpage_info->bitmap_nr_bits, &dirty_bitmap,
subpage_info->bitmap_nr_bits, &writeback_bitmap,
subpage_info->bitmap_nr_bits, &ordered_bitmap,
diff --git a/fs/btrfs/subpage.h b/fs/btrfs/subpage.h
index b6dc013b0fdc..249396e118d0 100644
--- a/fs/btrfs/subpage.h
+++ b/fs/btrfs/subpage.h
@@ -112,6 +112,12 @@ int btrfs_folio_start_writer_lock(const struct btrfs_fs_info *fs_info,
struct folio *folio, u64 start, u32 len);
void btrfs_folio_end_writer_lock(const struct btrfs_fs_info *fs_info,
struct folio *folio, u64 start, u32 len);
+void btrfs_folio_set_writer_lock(const struct btrfs_fs_info *fs_info,
+ struct folio *folio, u64 start, u32 len);
+bool btrfs_subpage_find_writer_locked(const struct btrfs_fs_info *fs_info,
+ struct folio *folio, u64 search_start,
+ u64 *found_start_ret, u32 *found_len_ret);
+void btrfs_folio_end_all_writers(const struct btrfs_fs_info *fs_info, struct folio *folio);
/*
* Template for subpage related operations.
@@ -156,7 +162,8 @@ DECLARE_BTRFS_SUBPAGE_OPS(checked);
bool btrfs_subpage_clear_and_test_dirty(const struct btrfs_fs_info *fs_info,
struct folio *folio, u64 start, u32 len);
-void btrfs_folio_assert_not_dirty(const struct btrfs_fs_info *fs_info, struct folio *folio);
+void btrfs_folio_assert_not_dirty(const struct btrfs_fs_info *fs_info,
+ struct folio *folio, u64 start, u32 len);
void btrfs_folio_unlock_writer(struct btrfs_fs_info *fs_info,
struct folio *folio, u64 start, u32 len);
void __cold btrfs_subpage_dump_bitmap(const struct btrfs_fs_info *fs_info,
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index f05cce7c8b8d..0eda8c21d861 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -34,6 +34,7 @@
#include "disk-io.h"
#include "transaction.h"
#include "btrfs_inode.h"
+#include "direct-io.h"
#include "props.h"
#include "xattr.h"
#include "bio.h"
@@ -125,9 +126,6 @@ enum {
Opt_rescue,
Opt_usebackuproot,
Opt_nologreplay,
- Opt_ignorebadroots,
- Opt_ignoredatacsums,
- Opt_rescue_all,
/* Debugging options */
Opt_enospc_debug,
@@ -178,6 +176,8 @@ enum {
Opt_rescue_nologreplay,
Opt_rescue_ignorebadroots,
Opt_rescue_ignoredatacsums,
+ Opt_rescue_ignoremetacsums,
+ Opt_rescue_ignoresuperflags,
Opt_rescue_parameter_all,
};
@@ -187,7 +187,11 @@ static const struct constant_table btrfs_parameter_rescue[] = {
{ "ignorebadroots", Opt_rescue_ignorebadroots },
{ "ibadroots", Opt_rescue_ignorebadroots },
{ "ignoredatacsums", Opt_rescue_ignoredatacsums },
+ { "ignoremetacsums", Opt_rescue_ignoremetacsums},
+ { "ignoresuperflags", Opt_rescue_ignoresuperflags},
{ "idatacsums", Opt_rescue_ignoredatacsums },
+ { "imetacsums", Opt_rescue_ignoremetacsums},
+ { "isuperflags", Opt_rescue_ignoresuperflags},
{ "all", Opt_rescue_parameter_all },
{}
};
@@ -573,8 +577,16 @@ static int btrfs_parse_param(struct fs_context *fc, struct fs_parameter *param)
case Opt_rescue_ignoredatacsums:
btrfs_set_opt(ctx->mount_opt, IGNOREDATACSUMS);
break;
+ case Opt_rescue_ignoremetacsums:
+ btrfs_set_opt(ctx->mount_opt, IGNOREMETACSUMS);
+ break;
+ case Opt_rescue_ignoresuperflags:
+ btrfs_set_opt(ctx->mount_opt, IGNORESUPERFLAGS);
+ break;
case Opt_rescue_parameter_all:
btrfs_set_opt(ctx->mount_opt, IGNOREDATACSUMS);
+ btrfs_set_opt(ctx->mount_opt, IGNOREMETACSUMS);
+ btrfs_set_opt(ctx->mount_opt, IGNORESUPERFLAGS);
btrfs_set_opt(ctx->mount_opt, IGNOREBADROOTS);
btrfs_set_opt(ctx->mount_opt, NOLOGREPLAY);
break;
@@ -629,7 +641,7 @@ static void btrfs_clear_oneshot_options(struct btrfs_fs_info *fs_info)
btrfs_clear_opt(fs_info->mount_opt, NOSPACECACHE);
}
-static bool check_ro_option(struct btrfs_fs_info *fs_info,
+static bool check_ro_option(const struct btrfs_fs_info *fs_info,
unsigned long mount_opt, unsigned long opt,
const char *opt_name)
{
@@ -641,7 +653,7 @@ static bool check_ro_option(struct btrfs_fs_info *fs_info,
return false;
}
-bool btrfs_check_options(struct btrfs_fs_info *info, unsigned long *mount_opt,
+bool btrfs_check_options(const struct btrfs_fs_info *info, unsigned long *mount_opt,
unsigned long flags)
{
bool ret = true;
@@ -649,7 +661,9 @@ bool btrfs_check_options(struct btrfs_fs_info *info, unsigned long *mount_opt,
if (!(flags & SB_RDONLY) &&
(check_ro_option(info, *mount_opt, BTRFS_MOUNT_NOLOGREPLAY, "nologreplay") ||
check_ro_option(info, *mount_opt, BTRFS_MOUNT_IGNOREBADROOTS, "ignorebadroots") ||
- check_ro_option(info, *mount_opt, BTRFS_MOUNT_IGNOREDATACSUMS, "ignoredatacsums")))
+ check_ro_option(info, *mount_opt, BTRFS_MOUNT_IGNOREDATACSUMS, "ignoredatacsums") ||
+ check_ro_option(info, *mount_opt, BTRFS_MOUNT_IGNOREMETACSUMS, "ignoremetacsums") ||
+ check_ro_option(info, *mount_opt, BTRFS_MOUNT_IGNORESUPERFLAGS, "ignoresuperflags")))
ret = false;
if (btrfs_fs_compat_ro(info, FREE_SPACE_TREE) &&
@@ -949,7 +963,7 @@ static int btrfs_fill_super(struct super_block *sb,
return err;
}
- inode = btrfs_iget(sb, BTRFS_FIRST_FREE_OBJECTID, fs_info->fs_root);
+ inode = btrfs_iget(BTRFS_FIRST_FREE_OBJECTID, fs_info->fs_root);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
btrfs_handle_fs_error(fs_info, err, NULL);
@@ -983,7 +997,7 @@ int btrfs_sync_fs(struct super_block *sb, int wait)
return 0;
}
- btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
+ btrfs_wait_ordered_roots(fs_info, U64_MAX, NULL);
trans = btrfs_attach_transaction_barrier(root);
if (IS_ERR(trans)) {
@@ -1065,6 +1079,10 @@ static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry)
print_rescue_option(seq, "ignorebadroots", &printed);
if (btrfs_test_opt(info, IGNOREDATACSUMS))
print_rescue_option(seq, "ignoredatacsums", &printed);
+ if (btrfs_test_opt(info, IGNOREMETACSUMS))
+ print_rescue_option(seq, "ignoremetacsums", &printed);
+ if (btrfs_test_opt(info, IGNORESUPERFLAGS))
+ print_rescue_option(seq, "ignoresuperflags", &printed);
if (btrfs_test_opt(info, FLUSHONCOMMIT))
seq_puts(seq, ",flushoncommit");
if (btrfs_test_opt(info, DISCARD_SYNC))
@@ -1422,6 +1440,8 @@ static void btrfs_emit_options(struct btrfs_fs_info *info,
btrfs_info_if_set(info, old, USEBACKUPROOT, "trying to use backup root at mount time");
btrfs_info_if_set(info, old, IGNOREBADROOTS, "ignoring bad roots");
btrfs_info_if_set(info, old, IGNOREDATACSUMS, "ignoring data csums");
+ btrfs_info_if_set(info, old, IGNOREMETACSUMS, "ignoring meta csums");
+ btrfs_info_if_set(info, old, IGNORESUPERFLAGS, "ignoring unknown super block flags");
btrfs_info_if_unset(info, old, NODATACOW, "setting datacow");
btrfs_info_if_unset(info, old, SSD, "not using ssd optimizations");
@@ -2257,9 +2277,7 @@ out:
static int btrfs_freeze(struct super_block *sb)
{
- struct btrfs_trans_handle *trans;
struct btrfs_fs_info *fs_info = btrfs_sb(sb);
- struct btrfs_root *root = fs_info->tree_root;
set_bit(BTRFS_FS_FROZEN, &fs_info->flags);
/*
@@ -2268,14 +2286,7 @@ static int btrfs_freeze(struct super_block *sb)
* we want to avoid on a frozen filesystem), or do the commit
* ourselves.
*/
- trans = btrfs_attach_transaction_barrier(root);
- if (IS_ERR(trans)) {
- /* no transaction, don't bother */
- if (PTR_ERR(trans) == -ENOENT)
- return 0;
- return PTR_ERR(trans);
- }
- return btrfs_commit_transaction(trans);
+ return btrfs_commit_current_transaction(fs_info->tree_root);
}
static int check_dev_super(struct btrfs_device *dev)
@@ -2499,6 +2510,9 @@ static const struct init_sequence mod_init_seq[] = {
.init_func = btrfs_init_cachep,
.exit_func = btrfs_destroy_cachep,
}, {
+ .init_func = btrfs_init_dio,
+ .exit_func = btrfs_destroy_dio,
+ }, {
.init_func = btrfs_transaction_init,
.exit_func = btrfs_transaction_exit,
}, {
@@ -2590,6 +2604,7 @@ static int __init init_btrfs_fs(void)
late_initcall(init_btrfs_fs);
module_exit(exit_btrfs_fs)
+MODULE_DESCRIPTION("B-Tree File System (BTRFS)");
MODULE_LICENSE("GPL");
MODULE_SOFTDEP("pre: crc32c");
MODULE_SOFTDEP("pre: xxhash64");
diff --git a/fs/btrfs/super.h b/fs/btrfs/super.h
index cbcab434b5ec..d2b8ebb46bc6 100644
--- a/fs/btrfs/super.h
+++ b/fs/btrfs/super.h
@@ -10,7 +10,7 @@
struct super_block;
struct btrfs_fs_info;
-bool btrfs_check_options(struct btrfs_fs_info *info, unsigned long *mount_opt,
+bool btrfs_check_options(const struct btrfs_fs_info *info, unsigned long *mount_opt,
unsigned long flags);
int btrfs_sync_fs(struct super_block *sb, int wait);
char *btrfs_get_subvol_name_from_objectid(struct btrfs_fs_info *fs_info,
diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
index af545b6b1190..03926ad467c9 100644
--- a/fs/btrfs/sysfs.c
+++ b/fs/btrfs/sysfs.c
@@ -385,6 +385,8 @@ static const char *rescue_opts[] = {
"nologreplay",
"ignorebadroots",
"ignoredatacsums",
+ "ignoremetacsums",
+ "ignoresuperflags",
"all",
};
@@ -894,6 +896,9 @@ SPACE_INFO_ATTR(bytes_readonly);
SPACE_INFO_ATTR(bytes_zone_unusable);
SPACE_INFO_ATTR(disk_used);
SPACE_INFO_ATTR(disk_total);
+SPACE_INFO_ATTR(reclaim_count);
+SPACE_INFO_ATTR(reclaim_bytes);
+SPACE_INFO_ATTR(reclaim_errors);
BTRFS_ATTR_RW(space_info, chunk_size, btrfs_chunk_size_show, btrfs_chunk_size_store);
BTRFS_ATTR(space_info, size_classes, btrfs_size_classes_show);
@@ -902,8 +907,12 @@ static ssize_t btrfs_sinfo_bg_reclaim_threshold_show(struct kobject *kobj,
char *buf)
{
struct btrfs_space_info *space_info = to_space_info(kobj);
+ ssize_t ret;
- return sysfs_emit(buf, "%d\n", READ_ONCE(space_info->bg_reclaim_threshold));
+ spin_lock(&space_info->lock);
+ ret = sysfs_emit(buf, "%d\n", btrfs_calc_reclaim_threshold(space_info));
+ spin_unlock(&space_info->lock);
+ return ret;
}
static ssize_t btrfs_sinfo_bg_reclaim_threshold_store(struct kobject *kobj,
@@ -914,6 +923,9 @@ static ssize_t btrfs_sinfo_bg_reclaim_threshold_store(struct kobject *kobj,
int thresh;
int ret;
+ if (READ_ONCE(space_info->dynamic_reclaim))
+ return -EINVAL;
+
ret = kstrtoint(buf, 10, &thresh);
if (ret)
return ret;
@@ -930,6 +942,72 @@ BTRFS_ATTR_RW(space_info, bg_reclaim_threshold,
btrfs_sinfo_bg_reclaim_threshold_show,
btrfs_sinfo_bg_reclaim_threshold_store);
+static ssize_t btrfs_sinfo_dynamic_reclaim_show(struct kobject *kobj,
+ struct kobj_attribute *a,
+ char *buf)
+{
+ struct btrfs_space_info *space_info = to_space_info(kobj);
+
+ return sysfs_emit(buf, "%d\n", READ_ONCE(space_info->dynamic_reclaim));
+}
+
+static ssize_t btrfs_sinfo_dynamic_reclaim_store(struct kobject *kobj,
+ struct kobj_attribute *a,
+ const char *buf, size_t len)
+{
+ struct btrfs_space_info *space_info = to_space_info(kobj);
+ int dynamic_reclaim;
+ int ret;
+
+ ret = kstrtoint(buf, 10, &dynamic_reclaim);
+ if (ret)
+ return ret;
+
+ if (dynamic_reclaim < 0)
+ return -EINVAL;
+
+ WRITE_ONCE(space_info->dynamic_reclaim, dynamic_reclaim != 0);
+
+ return len;
+}
+
+BTRFS_ATTR_RW(space_info, dynamic_reclaim,
+ btrfs_sinfo_dynamic_reclaim_show,
+ btrfs_sinfo_dynamic_reclaim_store);
+
+static ssize_t btrfs_sinfo_periodic_reclaim_show(struct kobject *kobj,
+ struct kobj_attribute *a,
+ char *buf)
+{
+ struct btrfs_space_info *space_info = to_space_info(kobj);
+
+ return sysfs_emit(buf, "%d\n", READ_ONCE(space_info->periodic_reclaim));
+}
+
+static ssize_t btrfs_sinfo_periodic_reclaim_store(struct kobject *kobj,
+ struct kobj_attribute *a,
+ const char *buf, size_t len)
+{
+ struct btrfs_space_info *space_info = to_space_info(kobj);
+ int periodic_reclaim;
+ int ret;
+
+ ret = kstrtoint(buf, 10, &periodic_reclaim);
+ if (ret)
+ return ret;
+
+ if (periodic_reclaim < 0)
+ return -EINVAL;
+
+ WRITE_ONCE(space_info->periodic_reclaim, periodic_reclaim != 0);
+
+ return len;
+}
+
+BTRFS_ATTR_RW(space_info, periodic_reclaim,
+ btrfs_sinfo_periodic_reclaim_show,
+ btrfs_sinfo_periodic_reclaim_store);
+
/*
* Allocation information about block group types.
*
@@ -947,8 +1025,13 @@ static struct attribute *space_info_attrs[] = {
BTRFS_ATTR_PTR(space_info, disk_used),
BTRFS_ATTR_PTR(space_info, disk_total),
BTRFS_ATTR_PTR(space_info, bg_reclaim_threshold),
+ BTRFS_ATTR_PTR(space_info, dynamic_reclaim),
BTRFS_ATTR_PTR(space_info, chunk_size),
BTRFS_ATTR_PTR(space_info, size_classes),
+ BTRFS_ATTR_PTR(space_info, reclaim_count),
+ BTRFS_ATTR_PTR(space_info, reclaim_bytes),
+ BTRFS_ATTR_PTR(space_info, reclaim_errors),
+ BTRFS_ATTR_PTR(space_info, periodic_reclaim),
#ifdef CONFIG_BTRFS_DEBUG
BTRFS_ATTR_PTR(space_info, force_chunk_alloc),
#endif
diff --git a/fs/btrfs/tests/btrfs-tests.c b/fs/btrfs/tests/btrfs-tests.c
index dce0387ef155..ce50847e1e01 100644
--- a/fs/btrfs/tests/btrfs-tests.c
+++ b/fs/btrfs/tests/btrfs-tests.c
@@ -61,10 +61,7 @@ struct inode *btrfs_new_test_inode(void)
return NULL;
inode->i_mode = S_IFREG;
- inode->i_ino = BTRFS_FIRST_FREE_OBJECTID;
- BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY;
- BTRFS_I(inode)->location.objectid = BTRFS_FIRST_FREE_OBJECTID;
- BTRFS_I(inode)->location.offset = 0;
+ btrfs_set_inode_number(BTRFS_I(inode), BTRFS_FIRST_FREE_OBJECTID);
inode_init_owner(&nop_mnt_idmap, inode, NULL, S_IFREG);
return inode;
diff --git a/fs/btrfs/tests/extent-map-tests.c b/fs/btrfs/tests/extent-map-tests.c
index ba36794ba2d5..ebec4ab361b8 100644
--- a/fs/btrfs/tests/extent-map-tests.c
+++ b/fs/btrfs/tests/extent-map-tests.c
@@ -19,8 +19,8 @@ static int free_extent_map_tree(struct btrfs_inode *inode)
int ret = 0;
write_lock(&em_tree->lock);
- while (!RB_EMPTY_ROOT(&em_tree->map.rb_root)) {
- node = rb_first_cached(&em_tree->map);
+ while (!RB_EMPTY_ROOT(&em_tree->root)) {
+ node = rb_first(&em_tree->root);
em = rb_entry(node, struct extent_map, rb_node);
remove_extent_mapping(inode, em);
@@ -28,9 +28,10 @@ static int free_extent_map_tree(struct btrfs_inode *inode)
if (refcount_read(&em->refs) != 1) {
ret = -EINVAL;
test_err(
-"em leak: em (start %llu len %llu block_start %llu block_len %llu) refs %d",
- em->start, em->len, em->block_start,
- em->block_len, refcount_read(&em->refs));
+"em leak: em (start %llu len %llu disk_bytenr %llu disk_num_bytes %llu offset %llu) refs %d",
+ em->start, em->len, em->disk_bytenr,
+ em->disk_num_bytes, em->offset,
+ refcount_read(&em->refs));
refcount_set(&em->refs, 1);
}
@@ -76,8 +77,9 @@ static int test_case_1(struct btrfs_fs_info *fs_info, struct btrfs_inode *inode)
/* Add [0, 16K) */
em->start = 0;
em->len = SZ_16K;
- em->block_start = 0;
- em->block_len = SZ_16K;
+ em->disk_bytenr = 0;
+ em->disk_num_bytes = SZ_16K;
+ em->ram_bytes = SZ_16K;
write_lock(&em_tree->lock);
ret = btrfs_add_extent_mapping(inode, &em, em->start, em->len);
write_unlock(&em_tree->lock);
@@ -97,8 +99,9 @@ static int test_case_1(struct btrfs_fs_info *fs_info, struct btrfs_inode *inode)
em->start = SZ_16K;
em->len = SZ_4K;
- em->block_start = SZ_32K; /* avoid merging */
- em->block_len = SZ_4K;
+ em->disk_bytenr = SZ_32K; /* avoid merging */
+ em->disk_num_bytes = SZ_4K;
+ em->ram_bytes = SZ_4K;
write_lock(&em_tree->lock);
ret = btrfs_add_extent_mapping(inode, &em, em->start, em->len);
write_unlock(&em_tree->lock);
@@ -118,8 +121,9 @@ static int test_case_1(struct btrfs_fs_info *fs_info, struct btrfs_inode *inode)
/* Add [0, 8K), should return [0, 16K) instead. */
em->start = start;
em->len = len;
- em->block_start = start;
- em->block_len = len;
+ em->disk_bytenr = start;
+ em->disk_num_bytes = len;
+ em->ram_bytes = len;
write_lock(&em_tree->lock);
ret = btrfs_add_extent_mapping(inode, &em, em->start, em->len);
write_unlock(&em_tree->lock);
@@ -134,11 +138,11 @@ static int test_case_1(struct btrfs_fs_info *fs_info, struct btrfs_inode *inode)
goto out;
}
if (em->start != 0 || extent_map_end(em) != SZ_16K ||
- em->block_start != 0 || em->block_len != SZ_16K) {
+ em->disk_bytenr != 0 || em->disk_num_bytes != SZ_16K) {
test_err(
-"case1 [%llu %llu]: ret %d return a wrong em (start %llu len %llu block_start %llu block_len %llu",
+"case1 [%llu %llu]: ret %d return a wrong em (start %llu len %llu disk_bytenr %llu disk_num_bytes %llu",
start, start + len, ret, em->start, em->len,
- em->block_start, em->block_len);
+ em->disk_bytenr, em->disk_num_bytes);
ret = -EINVAL;
}
free_extent_map(em);
@@ -172,8 +176,9 @@ static int test_case_2(struct btrfs_fs_info *fs_info, struct btrfs_inode *inode)
/* Add [0, 1K) */
em->start = 0;
em->len = SZ_1K;
- em->block_start = EXTENT_MAP_INLINE;
- em->block_len = (u64)-1;
+ em->disk_bytenr = EXTENT_MAP_INLINE;
+ em->disk_num_bytes = 0;
+ em->ram_bytes = SZ_1K;
write_lock(&em_tree->lock);
ret = btrfs_add_extent_mapping(inode, &em, em->start, em->len);
write_unlock(&em_tree->lock);
@@ -193,8 +198,9 @@ static int test_case_2(struct btrfs_fs_info *fs_info, struct btrfs_inode *inode)
em->start = SZ_4K;
em->len = SZ_4K;
- em->block_start = SZ_4K;
- em->block_len = SZ_4K;
+ em->disk_bytenr = SZ_4K;
+ em->disk_num_bytes = SZ_4K;
+ em->ram_bytes = SZ_4K;
write_lock(&em_tree->lock);
ret = btrfs_add_extent_mapping(inode, &em, em->start, em->len);
write_unlock(&em_tree->lock);
@@ -214,8 +220,9 @@ static int test_case_2(struct btrfs_fs_info *fs_info, struct btrfs_inode *inode)
/* Add [0, 1K) */
em->start = 0;
em->len = SZ_1K;
- em->block_start = EXTENT_MAP_INLINE;
- em->block_len = (u64)-1;
+ em->disk_bytenr = EXTENT_MAP_INLINE;
+ em->disk_num_bytes = 0;
+ em->ram_bytes = SZ_1K;
write_lock(&em_tree->lock);
ret = btrfs_add_extent_mapping(inode, &em, em->start, em->len);
write_unlock(&em_tree->lock);
@@ -229,11 +236,10 @@ static int test_case_2(struct btrfs_fs_info *fs_info, struct btrfs_inode *inode)
goto out;
}
if (em->start != 0 || extent_map_end(em) != SZ_1K ||
- em->block_start != EXTENT_MAP_INLINE || em->block_len != (u64)-1) {
+ em->disk_bytenr != EXTENT_MAP_INLINE) {
test_err(
-"case2 [0 1K]: ret %d return a wrong em (start %llu len %llu block_start %llu block_len %llu",
- ret, em->start, em->len, em->block_start,
- em->block_len);
+"case2 [0 1K]: ret %d return a wrong em (start %llu len %llu disk_bytenr %llu",
+ ret, em->start, em->len, em->disk_bytenr);
ret = -EINVAL;
}
free_extent_map(em);
@@ -263,8 +269,9 @@ static int __test_case_3(struct btrfs_fs_info *fs_info,
/* Add [4K, 8K) */
em->start = SZ_4K;
em->len = SZ_4K;
- em->block_start = SZ_4K;
- em->block_len = SZ_4K;
+ em->disk_bytenr = SZ_4K;
+ em->disk_num_bytes = SZ_4K;
+ em->ram_bytes = SZ_4K;
write_lock(&em_tree->lock);
ret = btrfs_add_extent_mapping(inode, &em, em->start, em->len);
write_unlock(&em_tree->lock);
@@ -284,8 +291,9 @@ static int __test_case_3(struct btrfs_fs_info *fs_info,
/* Add [0, 16K) */
em->start = 0;
em->len = SZ_16K;
- em->block_start = 0;
- em->block_len = SZ_16K;
+ em->disk_bytenr = 0;
+ em->disk_num_bytes = SZ_16K;
+ em->ram_bytes = SZ_16K;
write_lock(&em_tree->lock);
ret = btrfs_add_extent_mapping(inode, &em, start, len);
write_unlock(&em_tree->lock);
@@ -305,11 +313,11 @@ static int __test_case_3(struct btrfs_fs_info *fs_info,
* em->start.
*/
if (start < em->start || start + len > extent_map_end(em) ||
- em->start != em->block_start || em->len != em->block_len) {
+ em->start != extent_map_block_start(em)) {
test_err(
-"case3 [%llu %llu): ret %d em (start %llu len %llu block_start %llu block_len %llu)",
+"case3 [%llu %llu): ret %d em (start %llu len %llu disk_bytenr %llu block_len %llu)",
start, start + len, ret, em->start, em->len,
- em->block_start, em->block_len);
+ em->disk_bytenr, em->disk_num_bytes);
ret = -EINVAL;
}
free_extent_map(em);
@@ -370,8 +378,9 @@ static int __test_case_4(struct btrfs_fs_info *fs_info,
/* Add [0K, 8K) */
em->start = 0;
em->len = SZ_8K;
- em->block_start = 0;
- em->block_len = SZ_8K;
+ em->disk_bytenr = 0;
+ em->disk_num_bytes = SZ_8K;
+ em->ram_bytes = SZ_8K;
write_lock(&em_tree->lock);
ret = btrfs_add_extent_mapping(inode, &em, em->start, em->len);
write_unlock(&em_tree->lock);
@@ -391,8 +400,9 @@ static int __test_case_4(struct btrfs_fs_info *fs_info,
/* Add [8K, 32K) */
em->start = SZ_8K;
em->len = 24 * SZ_1K;
- em->block_start = SZ_16K; /* avoid merging */
- em->block_len = 24 * SZ_1K;
+ em->disk_bytenr = SZ_16K; /* avoid merging */
+ em->disk_num_bytes = 24 * SZ_1K;
+ em->ram_bytes = 24 * SZ_1K;
write_lock(&em_tree->lock);
ret = btrfs_add_extent_mapping(inode, &em, em->start, em->len);
write_unlock(&em_tree->lock);
@@ -411,8 +421,9 @@ static int __test_case_4(struct btrfs_fs_info *fs_info,
/* Add [0K, 32K) */
em->start = 0;
em->len = SZ_32K;
- em->block_start = 0;
- em->block_len = SZ_32K;
+ em->disk_bytenr = 0;
+ em->disk_num_bytes = SZ_32K;
+ em->ram_bytes = SZ_32K;
write_lock(&em_tree->lock);
ret = btrfs_add_extent_mapping(inode, &em, start, len);
write_unlock(&em_tree->lock);
@@ -429,9 +440,9 @@ static int __test_case_4(struct btrfs_fs_info *fs_info,
}
if (start < em->start || start + len > extent_map_end(em)) {
test_err(
-"case4 [%llu %llu): ret %d, added wrong em (start %llu len %llu block_start %llu block_len %llu)",
- start, start + len, ret, em->start, em->len, em->block_start,
- em->block_len);
+"case4 [%llu %llu): ret %d, added wrong em (start %llu len %llu disk_bytenr %llu disk_num_bytes %llu)",
+ start, start + len, ret, em->start, em->len,
+ em->disk_bytenr, em->disk_num_bytes);
ret = -EINVAL;
}
free_extent_map(em);
@@ -495,8 +506,9 @@ static int add_compressed_extent(struct btrfs_inode *inode,
em->start = start;
em->len = len;
- em->block_start = block_start;
- em->block_len = SZ_4K;
+ em->disk_bytenr = block_start;
+ em->disk_num_bytes = SZ_4K;
+ em->ram_bytes = len;
em->flags |= EXTENT_FLAG_COMPRESS_ZLIB;
write_lock(&em_tree->lock);
ret = btrfs_add_extent_mapping(inode, &em, em->start, em->len);
@@ -551,7 +563,7 @@ static int validate_range(struct extent_map_tree *em_tree, int index)
struct rb_node *n;
int i;
- for (i = 0, n = rb_first_cached(&em_tree->map);
+ for (i = 0, n = rb_first(&em_tree->root);
valid_ranges[index][i].len && n;
i++, n = rb_next(n)) {
struct extent_map *entry = rb_entry(n, struct extent_map, rb_node);
@@ -716,8 +728,9 @@ static int test_case_6(struct btrfs_fs_info *fs_info, struct btrfs_inode *inode)
em->start = SZ_4K;
em->len = SZ_4K;
- em->block_start = SZ_16K;
- em->block_len = SZ_16K;
+ em->disk_bytenr = SZ_16K;
+ em->disk_num_bytes = SZ_16K;
+ em->ram_bytes = SZ_16K;
write_lock(&em_tree->lock);
ret = btrfs_add_extent_mapping(inode, &em, 0, SZ_8K);
write_unlock(&em_tree->lock);
@@ -769,9 +782,10 @@ static int test_case_7(struct btrfs_fs_info *fs_info, struct btrfs_inode *inode)
/* [0, 16K), pinned */
em->start = 0;
em->len = SZ_16K;
- em->block_start = 0;
- em->block_len = SZ_4K;
- em->flags |= EXTENT_FLAG_PINNED;
+ em->disk_bytenr = 0;
+ em->disk_num_bytes = SZ_4K;
+ em->ram_bytes = SZ_16K;
+ em->flags |= (EXTENT_FLAG_PINNED | EXTENT_FLAG_COMPRESS_ZLIB);
write_lock(&em_tree->lock);
ret = btrfs_add_extent_mapping(inode, &em, em->start, em->len);
write_unlock(&em_tree->lock);
@@ -791,8 +805,9 @@ static int test_case_7(struct btrfs_fs_info *fs_info, struct btrfs_inode *inode)
/* [32K, 48K), not pinned */
em->start = SZ_32K;
em->len = SZ_16K;
- em->block_start = SZ_32K;
- em->block_len = SZ_16K;
+ em->disk_bytenr = SZ_32K;
+ em->disk_num_bytes = SZ_16K;
+ em->ram_bytes = SZ_16K;
write_lock(&em_tree->lock);
ret = btrfs_add_extent_mapping(inode, &em, em->start, em->len);
write_unlock(&em_tree->lock);
@@ -855,8 +870,9 @@ static int test_case_7(struct btrfs_fs_info *fs_info, struct btrfs_inode *inode)
goto out;
}
- if (em->block_start != SZ_32K + SZ_4K) {
- test_err("em->block_start is %llu, expected 36K", em->block_start);
+ if (extent_map_block_start(em) != SZ_32K + SZ_4K) {
+ test_err("em->block_start is %llu, expected 36K",
+ extent_map_block_start(em));
goto out;
}
diff --git a/fs/btrfs/tests/inode-tests.c b/fs/btrfs/tests/inode-tests.c
index 99da9d34b77a..3ea3bc2225fe 100644
--- a/fs/btrfs/tests/inode-tests.c
+++ b/fs/btrfs/tests/inode-tests.c
@@ -117,7 +117,7 @@ static void setup_file_extents(struct btrfs_root *root, u32 sectorsize)
/* Now for a regular extent */
insert_extent(root, offset, sectorsize - 1, sectorsize - 1, 0,
- disk_bytenr, sectorsize, BTRFS_FILE_EXTENT_REG, 0, slot);
+ disk_bytenr, sectorsize - 1, BTRFS_FILE_EXTENT_REG, 0, slot);
slot++;
disk_bytenr += sectorsize;
offset += sectorsize - 1;
@@ -264,8 +264,8 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
test_err("got an error when we shouldn't have");
goto out;
}
- if (em->block_start != EXTENT_MAP_HOLE) {
- test_err("expected a hole, got %llu", em->block_start);
+ if (em->disk_bytenr != EXTENT_MAP_HOLE) {
+ test_err("expected a hole, got %llu", em->disk_bytenr);
goto out;
}
free_extent_map(em);
@@ -283,8 +283,8 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
test_err("got an error when we shouldn't have");
goto out;
}
- if (em->block_start != EXTENT_MAP_INLINE) {
- test_err("expected an inline, got %llu", em->block_start);
+ if (em->disk_bytenr != EXTENT_MAP_INLINE) {
+ test_err("expected an inline, got %llu", em->disk_bytenr);
goto out;
}
@@ -321,8 +321,8 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
test_err("got an error when we shouldn't have");
goto out;
}
- if (em->block_start != EXTENT_MAP_HOLE) {
- test_err("expected a hole, got %llu", em->block_start);
+ if (em->disk_bytenr != EXTENT_MAP_HOLE) {
+ test_err("expected a hole, got %llu", em->disk_bytenr);
goto out;
}
if (em->start != offset || em->len != 4) {
@@ -344,8 +344,8 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
test_err("got an error when we shouldn't have");
goto out;
}
- if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
- test_err("expected a real extent, got %llu", em->block_start);
+ if (em->disk_bytenr >= EXTENT_MAP_LAST_BYTE) {
+ test_err("expected a real extent, got %llu", em->disk_bytenr);
goto out;
}
if (em->start != offset || em->len != sectorsize - 1) {
@@ -358,9 +358,8 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
test_err("unexpected flags set, want 0 have %u", em->flags);
goto out;
}
- if (em->orig_start != em->start) {
- test_err("wrong orig offset, want %llu, have %llu", em->start,
- em->orig_start);
+ if (em->offset != 0) {
+ test_err("wrong offset, want 0, have %llu", em->offset);
goto out;
}
offset = em->start + em->len;
@@ -372,8 +371,8 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
test_err("got an error when we shouldn't have");
goto out;
}
- if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
- test_err("expected a real extent, got %llu", em->block_start);
+ if (em->disk_bytenr >= EXTENT_MAP_LAST_BYTE) {
+ test_err("expected a real extent, got %llu", em->disk_bytenr);
goto out;
}
if (em->start != offset || em->len != sectorsize) {
@@ -386,12 +385,11 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
test_err("unexpected flags set, want 0 have %u", em->flags);
goto out;
}
- if (em->orig_start != em->start) {
- test_err("wrong orig offset, want %llu, have %llu", em->start,
- em->orig_start);
+ if (em->offset != 0) {
+ test_err("wrong offset, want 0, have %llu", em->offset);
goto out;
}
- disk_bytenr = em->block_start;
+ disk_bytenr = extent_map_block_start(em);
orig_start = em->start;
offset = em->start + em->len;
free_extent_map(em);
@@ -401,8 +399,8 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
test_err("got an error when we shouldn't have");
goto out;
}
- if (em->block_start != EXTENT_MAP_HOLE) {
- test_err("expected a hole, got %llu", em->block_start);
+ if (em->disk_bytenr != EXTENT_MAP_HOLE) {
+ test_err("expected a hole, got %llu", em->disk_bytenr);
goto out;
}
if (em->start != offset || em->len != sectorsize) {
@@ -423,8 +421,8 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
test_err("got an error when we shouldn't have");
goto out;
}
- if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
- test_err("expected a real extent, got %llu", em->block_start);
+ if (em->disk_bytenr >= EXTENT_MAP_LAST_BYTE) {
+ test_err("expected a real extent, got %llu", em->disk_bytenr);
goto out;
}
if (em->start != offset || em->len != 2 * sectorsize) {
@@ -437,15 +435,15 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
test_err("unexpected flags set, want 0 have %u", em->flags);
goto out;
}
- if (em->orig_start != orig_start) {
- test_err("wrong orig offset, want %llu, have %llu",
- orig_start, em->orig_start);
+ if (em->start - em->offset != orig_start) {
+ test_err("wrong offset, em->start=%llu em->offset=%llu orig_start=%llu",
+ em->start, em->offset, orig_start);
goto out;
}
disk_bytenr += (em->start - orig_start);
- if (em->block_start != disk_bytenr) {
+ if (extent_map_block_start(em) != disk_bytenr) {
test_err("wrong block start, want %llu, have %llu",
- disk_bytenr, em->block_start);
+ disk_bytenr, extent_map_block_start(em));
goto out;
}
offset = em->start + em->len;
@@ -457,8 +455,8 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
test_err("got an error when we shouldn't have");
goto out;
}
- if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
- test_err("expected a real extent, got %llu", em->block_start);
+ if (em->disk_bytenr >= EXTENT_MAP_LAST_BYTE) {
+ test_err("expected a real extent, got %llu", em->disk_bytenr);
goto out;
}
if (em->start != offset || em->len != sectorsize) {
@@ -472,9 +470,8 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
prealloc_only, em->flags);
goto out;
}
- if (em->orig_start != em->start) {
- test_err("wrong orig offset, want %llu, have %llu", em->start,
- em->orig_start);
+ if (em->offset != 0) {
+ test_err("wrong offset, want 0, have %llu", em->offset);
goto out;
}
offset = em->start + em->len;
@@ -486,8 +483,8 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
test_err("got an error when we shouldn't have");
goto out;
}
- if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
- test_err("expected a real extent, got %llu", em->block_start);
+ if (em->disk_bytenr >= EXTENT_MAP_LAST_BYTE) {
+ test_err("expected a real extent, got %llu", em->disk_bytenr);
goto out;
}
if (em->start != offset || em->len != sectorsize) {
@@ -501,12 +498,11 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
prealloc_only, em->flags);
goto out;
}
- if (em->orig_start != em->start) {
- test_err("wrong orig offset, want %llu, have %llu", em->start,
- em->orig_start);
+ if (em->offset != 0) {
+ test_err("wrong offset, want 0, have %llu", em->offset);
goto out;
}
- disk_bytenr = em->block_start;
+ disk_bytenr = extent_map_block_start(em);
orig_start = em->start;
offset = em->start + em->len;
free_extent_map(em);
@@ -516,8 +512,8 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
test_err("got an error when we shouldn't have");
goto out;
}
- if (em->block_start >= EXTENT_MAP_HOLE) {
- test_err("expected a real extent, got %llu", em->block_start);
+ if (em->disk_bytenr >= EXTENT_MAP_HOLE) {
+ test_err("expected a real extent, got %llu", em->disk_bytenr);
goto out;
}
if (em->start != offset || em->len != sectorsize) {
@@ -530,15 +526,14 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
test_err("unexpected flags set, want 0 have %u", em->flags);
goto out;
}
- if (em->orig_start != orig_start) {
- test_err("unexpected orig offset, wanted %llu, have %llu",
- orig_start, em->orig_start);
+ if (em->start - em->offset != orig_start) {
+ test_err("unexpected offset, wanted %llu, have %llu",
+ em->start - orig_start, em->offset);
goto out;
}
- if (em->block_start != (disk_bytenr + (em->start - em->orig_start))) {
+ if (extent_map_block_start(em) != disk_bytenr + em->offset) {
test_err("unexpected block start, wanted %llu, have %llu",
- disk_bytenr + (em->start - em->orig_start),
- em->block_start);
+ disk_bytenr + em->offset, extent_map_block_start(em));
goto out;
}
offset = em->start + em->len;
@@ -549,8 +544,8 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
test_err("got an error when we shouldn't have");
goto out;
}
- if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
- test_err("expected a real extent, got %llu", em->block_start);
+ if (em->disk_bytenr >= EXTENT_MAP_LAST_BYTE) {
+ test_err("expected a real extent, got %llu", em->disk_bytenr);
goto out;
}
if (em->start != offset || em->len != 2 * sectorsize) {
@@ -564,15 +559,14 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
prealloc_only, em->flags);
goto out;
}
- if (em->orig_start != orig_start) {
- test_err("wrong orig offset, want %llu, have %llu", orig_start,
- em->orig_start);
+ if (em->start - em->offset != orig_start) {
+ test_err("wrong offset, em->start=%llu em->offset=%llu orig_start=%llu",
+ em->start, em->offset, orig_start);
goto out;
}
- if (em->block_start != (disk_bytenr + (em->start - em->orig_start))) {
+ if (extent_map_block_start(em) != disk_bytenr + em->offset) {
test_err("unexpected block start, wanted %llu, have %llu",
- disk_bytenr + (em->start - em->orig_start),
- em->block_start);
+ disk_bytenr + em->offset, extent_map_block_start(em));
goto out;
}
offset = em->start + em->len;
@@ -584,8 +578,8 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
test_err("got an error when we shouldn't have");
goto out;
}
- if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
- test_err("expected a real extent, got %llu", em->block_start);
+ if (em->disk_bytenr >= EXTENT_MAP_LAST_BYTE) {
+ test_err("expected a real extent, got %llu", em->disk_bytenr);
goto out;
}
if (em->start != offset || em->len != 2 * sectorsize) {
@@ -599,9 +593,8 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
compressed_only, em->flags);
goto out;
}
- if (em->orig_start != em->start) {
- test_err("wrong orig offset, want %llu, have %llu",
- em->start, em->orig_start);
+ if (em->offset != 0) {
+ test_err("wrong offset, want 0, have %llu", em->offset);
goto out;
}
if (extent_map_compression(em) != BTRFS_COMPRESS_ZLIB) {
@@ -618,8 +611,8 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
test_err("got an error when we shouldn't have");
goto out;
}
- if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
- test_err("expected a real extent, got %llu", em->block_start);
+ if (em->disk_bytenr >= EXTENT_MAP_LAST_BYTE) {
+ test_err("expected a real extent, got %llu", em->disk_bytenr);
goto out;
}
if (em->start != offset || em->len != sectorsize) {
@@ -633,9 +626,8 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
compressed_only, em->flags);
goto out;
}
- if (em->orig_start != em->start) {
- test_err("wrong orig offset, want %llu, have %llu",
- em->start, em->orig_start);
+ if (em->offset != 0) {
+ test_err("wrong offset, want 0, have %llu", em->offset);
goto out;
}
if (extent_map_compression(em) != BTRFS_COMPRESS_ZLIB) {
@@ -643,7 +635,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
BTRFS_COMPRESS_ZLIB, extent_map_compression(em));
goto out;
}
- disk_bytenr = em->block_start;
+ disk_bytenr = extent_map_block_start(em);
orig_start = em->start;
offset = em->start + em->len;
free_extent_map(em);
@@ -653,8 +645,8 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
test_err("got an error when we shouldn't have");
goto out;
}
- if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
- test_err("expected a real extent, got %llu", em->block_start);
+ if (em->disk_bytenr >= EXTENT_MAP_LAST_BYTE) {
+ test_err("expected a real extent, got %llu", em->disk_bytenr);
goto out;
}
if (em->start != offset || em->len != sectorsize) {
@@ -667,9 +659,8 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
test_err("unexpected flags set, want 0 have %u", em->flags);
goto out;
}
- if (em->orig_start != em->start) {
- test_err("wrong orig offset, want %llu, have %llu", em->start,
- em->orig_start);
+ if (em->offset != 0) {
+ test_err("wrong offset, want 0, have %llu", em->offset);
goto out;
}
offset = em->start + em->len;
@@ -680,9 +671,9 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
test_err("got an error when we shouldn't have");
goto out;
}
- if (em->block_start != disk_bytenr) {
+ if (extent_map_block_start(em) != disk_bytenr) {
test_err("block start does not match, want %llu got %llu",
- disk_bytenr, em->block_start);
+ disk_bytenr, extent_map_block_start(em));
goto out;
}
if (em->start != offset || em->len != 2 * sectorsize) {
@@ -696,9 +687,9 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
compressed_only, em->flags);
goto out;
}
- if (em->orig_start != orig_start) {
- test_err("wrong orig offset, want %llu, have %llu",
- em->start, orig_start);
+ if (em->start - em->offset != orig_start) {
+ test_err("wrong offset, em->start=%llu em->offset=%llu orig_start=%llu",
+ em->start, em->offset, orig_start);
goto out;
}
if (extent_map_compression(em) != BTRFS_COMPRESS_ZLIB) {
@@ -715,8 +706,8 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
test_err("got an error when we shouldn't have");
goto out;
}
- if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
- test_err("expected a real extent, got %llu", em->block_start);
+ if (em->disk_bytenr >= EXTENT_MAP_LAST_BYTE) {
+ test_err("expected a real extent, got %llu", em->disk_bytenr);
goto out;
}
if (em->start != offset || em->len != sectorsize) {
@@ -729,9 +720,8 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
test_err("unexpected flags set, want 0 have %u", em->flags);
goto out;
}
- if (em->orig_start != em->start) {
- test_err("wrong orig offset, want %llu, have %llu", em->start,
- em->orig_start);
+ if (em->offset != 0) {
+ test_err("wrong offset, want 0, have %llu", em->offset);
goto out;
}
offset = em->start + em->len;
@@ -742,8 +732,8 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
test_err("got an error when we shouldn't have");
goto out;
}
- if (em->block_start != EXTENT_MAP_HOLE) {
- test_err("expected a hole extent, got %llu", em->block_start);
+ if (em->disk_bytenr != EXTENT_MAP_HOLE) {
+ test_err("expected a hole extent, got %llu", em->disk_bytenr);
goto out;
}
/*
@@ -762,9 +752,8 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
vacancy_only, em->flags);
goto out;
}
- if (em->orig_start != em->start) {
- test_err("wrong orig offset, want %llu, have %llu", em->start,
- em->orig_start);
+ if (em->offset != 0) {
+ test_err("wrong offset, want 0, have %llu", em->offset);
goto out;
}
offset = em->start + em->len;
@@ -775,8 +764,8 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
test_err("got an error when we shouldn't have");
goto out;
}
- if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
- test_err("expected a real extent, got %llu", em->block_start);
+ if (em->disk_bytenr >= EXTENT_MAP_LAST_BYTE) {
+ test_err("expected a real extent, got %llu", em->disk_bytenr);
goto out;
}
if (em->start != offset || em->len != sectorsize) {
@@ -789,9 +778,8 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
test_err("unexpected flags set, want 0 have %u", em->flags);
goto out;
}
- if (em->orig_start != em->start) {
- test_err("wrong orig offset, want %llu, have %llu", em->start,
- em->orig_start);
+ if (em->offset != 0) {
+ test_err("wrong orig offset, want 0, have %llu", em->offset);
goto out;
}
ret = 0;
@@ -855,8 +843,8 @@ static int test_hole_first(u32 sectorsize, u32 nodesize)
test_err("got an error when we shouldn't have");
goto out;
}
- if (em->block_start != EXTENT_MAP_HOLE) {
- test_err("expected a hole, got %llu", em->block_start);
+ if (em->disk_bytenr != EXTENT_MAP_HOLE) {
+ test_err("expected a hole, got %llu", em->disk_bytenr);
goto out;
}
if (em->start != 0 || em->len != sectorsize) {
@@ -877,8 +865,8 @@ static int test_hole_first(u32 sectorsize, u32 nodesize)
test_err("got an error when we shouldn't have");
goto out;
}
- if (em->block_start != sectorsize) {
- test_err("expected a real extent, got %llu", em->block_start);
+ if (extent_map_block_start(em) != sectorsize) {
+ test_err("expected a real extent, got %llu", extent_map_block_start(em));
goto out;
}
if (em->start != sectorsize || em->len != sectorsize) {
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 3388c836b9a5..5e6fff8e1003 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -405,7 +405,7 @@ static int record_root_in_trans(struct btrfs_trans_handle *trans,
int ret = 0;
if ((test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
- root->last_trans < trans->transid) || force) {
+ btrfs_get_root_last_trans(root) < trans->transid) || force) {
WARN_ON(!force && root->commit_root != root->node);
/*
@@ -421,7 +421,7 @@ static int record_root_in_trans(struct btrfs_trans_handle *trans,
smp_wmb();
spin_lock(&fs_info->fs_roots_radix_lock);
- if (root->last_trans == trans->transid && !force) {
+ if (btrfs_get_root_last_trans(root) == trans->transid && !force) {
spin_unlock(&fs_info->fs_roots_radix_lock);
return 0;
}
@@ -429,7 +429,7 @@ static int record_root_in_trans(struct btrfs_trans_handle *trans,
(unsigned long)btrfs_root_id(root),
BTRFS_ROOT_TRANS_TAG);
spin_unlock(&fs_info->fs_roots_radix_lock);
- root->last_trans = trans->transid;
+ btrfs_set_root_last_trans(root, trans->transid);
/* this is pretty tricky. We don't want to
* take the relocation lock in btrfs_record_root_in_trans
@@ -491,7 +491,7 @@ int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
* and barriers
*/
smp_rmb();
- if (root->last_trans == trans->transid &&
+ if (btrfs_get_root_last_trans(root) == trans->transid &&
!test_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state))
return 0;
@@ -1637,7 +1637,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
struct btrfs_root *root = pending->root;
struct btrfs_root *parent_root;
struct btrfs_block_rsv *rsv;
- struct inode *parent_inode = pending->dir;
+ struct inode *parent_inode = &pending->dir->vfs_inode;
struct btrfs_path *path;
struct btrfs_dir_item *dir_item;
struct extent_buffer *tmp;
@@ -1989,6 +1989,25 @@ void btrfs_commit_transaction_async(struct btrfs_trans_handle *trans)
btrfs_put_transaction(cur_trans);
}
+/*
+ * If there is a running transaction commit it or if it's already committing,
+ * wait for its commit to complete. Does not start and commit a new transaction
+ * if there isn't any running.
+ */
+int btrfs_commit_current_transaction(struct btrfs_root *root)
+{
+ struct btrfs_trans_handle *trans;
+
+ trans = btrfs_attach_transaction_barrier(root);
+ if (IS_ERR(trans)) {
+ int ret = PTR_ERR(trans);
+
+ return (ret == -ENOENT) ? 0 : ret;
+ }
+
+ return btrfs_commit_transaction(trans);
+}
+
static void cleanup_transaction(struct btrfs_trans_handle *trans, int err)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
@@ -2110,7 +2129,7 @@ static inline int btrfs_start_delalloc_flush(struct btrfs_fs_info *fs_info)
static inline void btrfs_wait_delalloc_flush(struct btrfs_fs_info *fs_info)
{
if (btrfs_test_opt(fs_info, FLUSHONCOMMIT))
- btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
+ btrfs_wait_ordered_roots(fs_info, U64_MAX, NULL);
}
/*
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index 4e451ab173b1..98c03ddc760b 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -172,7 +172,7 @@ struct btrfs_trans_handle {
struct btrfs_pending_snapshot {
struct dentry *dentry;
- struct inode *dir;
+ struct btrfs_inode *dir;
struct btrfs_root *root;
struct btrfs_root_item *root_item;
struct btrfs_root *snap;
@@ -229,11 +229,11 @@ bool __cold abort_should_print_stack(int error);
*/
#define btrfs_abort_transaction(trans, error) \
do { \
- bool first = false; \
+ bool __first = false; \
/* Report first abort since mount */ \
if (!test_and_set_bit(BTRFS_FS_STATE_TRANS_ABORTED, \
&((trans)->fs_info->fs_state))) { \
- first = true; \
+ __first = true; \
if (WARN(abort_should_print_stack(error), \
KERN_ERR \
"BTRFS: Transaction aborted (error %d)\n", \
@@ -246,7 +246,7 @@ do { \
} \
} \
__btrfs_abort_transaction((trans), __func__, \
- __LINE__, (error), first); \
+ __LINE__, (error), __first); \
} while (0)
int btrfs_end_transaction(struct btrfs_trans_handle *trans);
@@ -268,6 +268,7 @@ void btrfs_maybe_wake_unfinished_drop(struct btrfs_fs_info *fs_info);
int btrfs_clean_one_deleted_snapshot(struct btrfs_fs_info *fs_info);
int btrfs_commit_transaction(struct btrfs_trans_handle *trans);
void btrfs_commit_transaction_async(struct btrfs_trans_handle *trans);
+int btrfs_commit_current_transaction(struct btrfs_root *root);
int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans);
bool btrfs_should_end_transaction(struct btrfs_trans_handle *trans);
void btrfs_throttle(struct btrfs_fs_info *fs_info);
diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
index a2c3651a3d8f..6388786fd8b5 100644
--- a/fs/btrfs/tree-checker.c
+++ b/fs/btrfs/tree-checker.c
@@ -340,6 +340,24 @@ static int check_extent_data_item(struct extent_buffer *leaf,
}
}
+ /*
+ * For non-compressed data extents, ram_bytes should match its
+ * disk_num_bytes.
+ * However we do not really utilize ram_bytes in this case, so this check
+ * is only optional for DEBUG builds for developers to catch the
+ * unexpected behaviors.
+ */
+ if (IS_ENABLED(CONFIG_BTRFS_DEBUG) &&
+ btrfs_file_extent_compression(leaf, fi) == BTRFS_COMPRESS_NONE &&
+ btrfs_file_extent_disk_bytenr(leaf, fi)) {
+ if (WARN_ON(btrfs_file_extent_ram_bytes(leaf, fi) !=
+ btrfs_file_extent_disk_num_bytes(leaf, fi)))
+ file_extent_err(leaf, slot,
+"mismatch ram_bytes (%llu) and disk_num_bytes (%llu) for non-compressed extent",
+ btrfs_file_extent_ram_bytes(leaf, fi),
+ btrfs_file_extent_disk_num_bytes(leaf, fi));
+ }
+
return 0;
}
@@ -1682,9 +1700,6 @@ static int check_inode_ref(struct extent_buffer *leaf,
static int check_raid_stripe_extent(const struct extent_buffer *leaf,
const struct btrfs_key *key, int slot)
{
- struct btrfs_stripe_extent *stripe_extent =
- btrfs_item_ptr(leaf, slot, struct btrfs_stripe_extent);
-
if (unlikely(!IS_ALIGNED(key->objectid, leaf->fs_info->sectorsize))) {
generic_err(leaf, slot,
"invalid key objectid for raid stripe extent, have %llu expect aligned to %u",
@@ -1698,22 +1713,6 @@ static int check_raid_stripe_extent(const struct extent_buffer *leaf,
return -EUCLEAN;
}
- switch (btrfs_stripe_extent_encoding(leaf, stripe_extent)) {
- case BTRFS_STRIPE_RAID0:
- case BTRFS_STRIPE_RAID1:
- case BTRFS_STRIPE_DUP:
- case BTRFS_STRIPE_RAID10:
- case BTRFS_STRIPE_RAID5:
- case BTRFS_STRIPE_RAID6:
- case BTRFS_STRIPE_RAID1C3:
- case BTRFS_STRIPE_RAID1C4:
- break;
- default:
- generic_err(leaf, slot, "invalid raid stripe encoding %u",
- btrfs_stripe_extent_encoding(leaf, stripe_extent));
- return -EUCLEAN;
- }
-
return 0;
}
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 5146387b416b..f0cf8ce26f01 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -138,6 +138,25 @@ static void wait_log_commit(struct btrfs_root *root, int transid);
* and once to do all the other items.
*/
+static struct inode *btrfs_iget_logging(u64 objectid, struct btrfs_root *root)
+{
+ unsigned int nofs_flag;
+ struct inode *inode;
+
+ /*
+ * We're holding a transaction handle whether we are logging or
+ * replaying a log tree, so we must make sure NOFS semantics apply
+ * because btrfs_alloc_inode() may be triggered and it uses GFP_KERNEL
+ * to allocate an inode, which can recurse back into the filesystem and
+ * attempt a transaction commit, resulting in a deadlock.
+ */
+ nofs_flag = memalloc_nofs_save();
+ inode = btrfs_iget(objectid, root);
+ memalloc_nofs_restore(nofs_flag);
+
+ return inode;
+}
+
/*
* start a sub transaction and setup the log tree
* this increments the log tree writer count to make the people
@@ -600,7 +619,7 @@ static noinline struct inode *read_one_inode(struct btrfs_root *root,
{
struct inode *inode;
- inode = btrfs_iget(root->fs_info->sb, objectid, root);
+ inode = btrfs_iget_logging(objectid, root);
if (IS_ERR(inode))
inode = NULL;
return inode;
@@ -1625,7 +1644,8 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
if (ret)
goto out;
}
- BTRFS_I(inode)->index_cnt = (u64)-1;
+ if (S_ISDIR(inode->i_mode))
+ BTRFS_I(inode)->index_cnt = (u64)-1;
if (inode->i_nlink == 0) {
if (S_ISDIR(inode->i_mode)) {
@@ -2820,7 +2840,7 @@ static void wait_for_writer(struct btrfs_root *root)
finish_wait(&root->log_writer_wait, &wait);
}
-void btrfs_init_log_ctx(struct btrfs_log_ctx *ctx, struct inode *inode)
+void btrfs_init_log_ctx(struct btrfs_log_ctx *ctx, struct btrfs_inode *inode)
{
ctx->log_ret = 0;
ctx->log_transid = 0;
@@ -2839,7 +2859,7 @@ void btrfs_init_log_ctx(struct btrfs_log_ctx *ctx, struct inode *inode)
void btrfs_init_log_ctx_scratch_eb(struct btrfs_log_ctx *ctx)
{
- struct btrfs_inode *inode = BTRFS_I(ctx->inode);
+ struct btrfs_inode *inode = ctx->inode;
if (!test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags) &&
!test_bit(BTRFS_INODE_COPY_EVERYTHING, &inode->runtime_flags))
@@ -2857,7 +2877,7 @@ void btrfs_release_log_ctx_extents(struct btrfs_log_ctx *ctx)
struct btrfs_ordered_extent *ordered;
struct btrfs_ordered_extent *tmp;
- ASSERT(inode_is_locked(ctx->inode));
+ ASSERT(inode_is_locked(&ctx->inode->vfs_inode));
list_for_each_entry_safe(ordered, tmp, &ctx->ordered_extents, log_list) {
list_del_init(&ordered->log_list);
@@ -4234,8 +4254,10 @@ static int log_inode_item(struct btrfs_trans_handle *trans,
struct btrfs_inode *inode, bool inode_item_dropped)
{
struct btrfs_inode_item *inode_item;
+ struct btrfs_key key;
int ret;
+ btrfs_get_inode_key(inode, &key);
/*
* If we are doing a fast fsync and the inode was logged before in the
* current transaction, then we know the inode was previously logged and
@@ -4247,7 +4269,7 @@ static int log_inode_item(struct btrfs_trans_handle *trans,
* already exists can also result in unnecessarily splitting a leaf.
*/
if (!inode_item_dropped && inode->logged_trans == trans->transid) {
- ret = btrfs_search_slot(trans, log, &inode->location, path, 0, 1);
+ ret = btrfs_search_slot(trans, log, &key, path, 0, 1);
ASSERT(ret <= 0);
if (ret > 0)
ret = -ENOENT;
@@ -4261,7 +4283,7 @@ static int log_inode_item(struct btrfs_trans_handle *trans,
* the inode, we set BTRFS_INODE_NEEDS_FULL_SYNC on its runtime
* flags and set ->logged_trans to 0.
*/
- ret = btrfs_insert_empty_item(trans, log, path, &inode->location,
+ ret = btrfs_insert_empty_item(trans, log, path, &key,
sizeof(*inode_item));
ASSERT(ret != -EEXIST);
}
@@ -4575,6 +4597,7 @@ static int log_extent_csums(struct btrfs_trans_handle *trans,
{
struct btrfs_ordered_extent *ordered;
struct btrfs_root *csum_root;
+ u64 block_start;
u64 csum_offset;
u64 csum_len;
u64 mod_start = em->start;
@@ -4584,7 +4607,7 @@ static int log_extent_csums(struct btrfs_trans_handle *trans,
if (inode->flags & BTRFS_INODE_NODATASUM ||
(em->flags & EXTENT_FLAG_PREALLOC) ||
- em->block_start == EXTENT_MAP_HOLE)
+ em->disk_bytenr == EXTENT_MAP_HOLE)
return 0;
list_for_each_entry(ordered, &ctx->ordered_extents, log_list) {
@@ -4648,17 +4671,18 @@ static int log_extent_csums(struct btrfs_trans_handle *trans,
/* If we're compressed we have to save the entire range of csums. */
if (extent_map_is_compressed(em)) {
csum_offset = 0;
- csum_len = max(em->block_len, em->orig_block_len);
+ csum_len = em->disk_num_bytes;
} else {
csum_offset = mod_start - em->start;
csum_len = mod_len;
}
/* block start is already adjusted for the file extent offset. */
- csum_root = btrfs_csum_root(trans->fs_info, em->block_start);
- ret = btrfs_lookup_csums_list(csum_root, em->block_start + csum_offset,
- em->block_start + csum_offset +
- csum_len - 1, &ordered_sums, false);
+ block_start = extent_map_block_start(em);
+ csum_root = btrfs_csum_root(trans->fs_info, block_start);
+ ret = btrfs_lookup_csums_list(csum_root, block_start + csum_offset,
+ block_start + csum_offset + csum_len - 1,
+ &ordered_sums, false);
if (ret < 0)
return ret;
ret = 0;
@@ -4688,7 +4712,8 @@ static int log_one_extent(struct btrfs_trans_handle *trans,
struct extent_buffer *leaf;
struct btrfs_key key;
enum btrfs_compression_type compress_type;
- u64 extent_offset = em->start - em->orig_start;
+ u64 extent_offset = em->offset;
+ u64 block_start = extent_map_block_start(em);
u64 block_len;
int ret;
@@ -4698,14 +4723,13 @@ static int log_one_extent(struct btrfs_trans_handle *trans,
else
btrfs_set_stack_file_extent_type(&fi, BTRFS_FILE_EXTENT_REG);
- block_len = max(em->block_len, em->orig_block_len);
+ block_len = em->disk_num_bytes;
compress_type = extent_map_compression(em);
if (compress_type != BTRFS_COMPRESS_NONE) {
- btrfs_set_stack_file_extent_disk_bytenr(&fi, em->block_start);
+ btrfs_set_stack_file_extent_disk_bytenr(&fi, block_start);
btrfs_set_stack_file_extent_disk_num_bytes(&fi, block_len);
- } else if (em->block_start < EXTENT_MAP_LAST_BYTE) {
- btrfs_set_stack_file_extent_disk_bytenr(&fi, em->block_start -
- extent_offset);
+ } else if (em->disk_bytenr < EXTENT_MAP_LAST_BYTE) {
+ btrfs_set_stack_file_extent_disk_bytenr(&fi, block_start - extent_offset);
btrfs_set_stack_file_extent_disk_num_bytes(&fi, block_len);
}
@@ -4860,18 +4884,23 @@ static int btrfs_log_prealloc_extents(struct btrfs_trans_handle *trans,
path->slots[0]++;
continue;
}
- if (!dropped_extents) {
- /*
- * Avoid logging extent items logged in past fsync calls
- * and leading to duplicate keys in the log tree.
- */
+ /*
+ * Avoid overlapping items in the log tree. The first time we
+ * get here, get rid of everything from a past fsync. After
+ * that, if the current extent starts before the end of the last
+ * extent we copied, truncate the last one. This can happen if
+ * an ordered extent completion modifies the subvolume tree
+ * while btrfs_next_leaf() has the tree unlocked.
+ */
+ if (!dropped_extents || key.offset < truncate_offset) {
ret = truncate_inode_items(trans, root->log_root, inode,
- truncate_offset,
+ min(key.offset, truncate_offset),
BTRFS_EXTENT_DATA_KEY);
if (ret)
goto out;
dropped_extents = true;
}
+ truncate_offset = btrfs_file_extent_end(path);
if (ins_nr == 0)
start_slot = slot;
ins_nr++;
@@ -5433,7 +5462,6 @@ static int log_new_dir_dentries(struct btrfs_trans_handle *trans,
struct btrfs_log_ctx *ctx)
{
struct btrfs_root *root = start_inode->root;
- struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_path *path;
LIST_HEAD(dir_list);
struct btrfs_dir_list *dir_elem;
@@ -5494,7 +5522,7 @@ again:
continue;
btrfs_release_path(path);
- di_inode = btrfs_iget(fs_info->sb, di_key.objectid, root);
+ di_inode = btrfs_iget_logging(di_key.objectid, root);
if (IS_ERR(di_inode)) {
ret = PTR_ERR(di_inode);
goto out;
@@ -5554,7 +5582,7 @@ again:
btrfs_add_delayed_iput(curr_inode);
curr_inode = NULL;
- vfs_inode = btrfs_iget(fs_info->sb, ino, root);
+ vfs_inode = btrfs_iget_logging(ino, root);
if (IS_ERR(vfs_inode)) {
ret = PTR_ERR(vfs_inode);
break;
@@ -5649,7 +5677,7 @@ static int add_conflicting_inode(struct btrfs_trans_handle *trans,
if (ctx->num_conflict_inodes >= MAX_CONFLICT_INODES)
return BTRFS_LOG_FORCE_COMMIT;
- inode = btrfs_iget(root->fs_info->sb, ino, root);
+ inode = btrfs_iget_logging(ino, root);
/*
* If the other inode that had a conflicting dir entry was deleted in
* the current transaction then we either:
@@ -5750,7 +5778,6 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_log_ctx *ctx)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
int ret = 0;
/*
@@ -5781,7 +5808,7 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans,
list_del(&curr->list);
kfree(curr);
- inode = btrfs_iget(fs_info->sb, ino, root);
+ inode = btrfs_iget_logging(ino, root);
/*
* If the other inode that had a conflicting dir entry was
* deleted in the current transaction, we need to log its parent
@@ -5792,7 +5819,7 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans,
if (ret != -ENOENT)
break;
- inode = btrfs_iget(fs_info->sb, parent, root);
+ inode = btrfs_iget_logging(parent, root);
if (IS_ERR(inode)) {
ret = PTR_ERR(inode);
break;
@@ -5905,7 +5932,7 @@ again:
if (ret < 0) {
return ret;
} else if (ret > 0 &&
- other_ino != btrfs_ino(BTRFS_I(ctx->inode))) {
+ other_ino != btrfs_ino(ctx->inode)) {
if (ins_nr > 0) {
ins_nr++;
} else {
@@ -6314,7 +6341,6 @@ static int log_new_delayed_dentries(struct btrfs_trans_handle *trans,
struct btrfs_log_ctx *ctx)
{
const bool orig_log_new_dentries = ctx->log_new_dentries;
- struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_delayed_item *item;
int ret = 0;
@@ -6340,7 +6366,7 @@ static int log_new_delayed_dentries(struct btrfs_trans_handle *trans,
if (key.type == BTRFS_ROOT_ITEM_KEY)
continue;
- di_inode = btrfs_iget(fs_info->sb, key.objectid, inode->root);
+ di_inode = btrfs_iget_logging(key.objectid, inode->root);
if (IS_ERR(di_inode)) {
ret = PTR_ERR(di_inode);
break;
@@ -6724,7 +6750,6 @@ static int btrfs_log_all_parents(struct btrfs_trans_handle *trans,
struct btrfs_inode *inode,
struct btrfs_log_ctx *ctx)
{
- struct btrfs_fs_info *fs_info = trans->fs_info;
int ret;
struct btrfs_path *path;
struct btrfs_key key;
@@ -6789,8 +6814,7 @@ static int btrfs_log_all_parents(struct btrfs_trans_handle *trans,
cur_offset = item_size;
}
- dir_inode = btrfs_iget(fs_info->sb, inode_key.objectid,
- root);
+ dir_inode = btrfs_iget_logging(inode_key.objectid, root);
/*
* If the parent inode was deleted, return an error to
* fallback to a transaction commit. This is to prevent
@@ -6852,7 +6876,6 @@ static int log_new_ancestors(struct btrfs_trans_handle *trans,
btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
while (true) {
- struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_buffer *leaf;
int slot;
struct btrfs_key search_key;
@@ -6867,7 +6890,7 @@ static int log_new_ancestors(struct btrfs_trans_handle *trans,
search_key.objectid = found_key.offset;
search_key.type = BTRFS_INODE_ITEM_KEY;
search_key.offset = 0;
- inode = btrfs_iget(fs_info->sb, ino, root);
+ inode = btrfs_iget_logging(ino, root);
if (IS_ERR(inode))
return PTR_ERR(inode);
@@ -7056,6 +7079,15 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
}
/*
+ * If we're logging an inode from a subvolume created in the current
+ * transaction we must force a commit since the root is not persisted.
+ */
+ if (btrfs_root_generation(&root->root_item) == trans->transid) {
+ ret = BTRFS_LOG_FORCE_COMMIT;
+ goto end_no_trans;
+ }
+
+ /*
* Skip already logged inodes or inodes corresponding to tmpfiles
* (since logging them is pointless, a link count of 0 means they
* will never be accessible).
@@ -7436,6 +7468,24 @@ void btrfs_record_snapshot_destroy(struct btrfs_trans_handle *trans,
}
/*
+ * Call this when creating a subvolume in a directory.
+ * Because we don't commit a transaction when creating a subvolume, we can't
+ * allow the directory pointing to the subvolume to be logged with an entry that
+ * points to an unpersisted root if we are still in the transaction used to
+ * create the subvolume, so make any attempt to log the directory to result in a
+ * full log sync.
+ * Also we don't need to worry with renames, since btrfs_rename() marks the log
+ * for full commit when renaming a subvolume.
+ */
+void btrfs_record_new_subvolume(const struct btrfs_trans_handle *trans,
+ struct btrfs_inode *dir)
+{
+ mutex_lock(&dir->log_mutex);
+ dir->last_unlink_trans = trans->transid;
+ mutex_unlock(&dir->log_mutex);
+}
+
+/*
* Update the log after adding a new name for an inode.
*
* @trans: Transaction handle.
@@ -7567,7 +7617,7 @@ void btrfs_log_new_name(struct btrfs_trans_handle *trans,
goto out;
}
- btrfs_init_log_ctx(&ctx, &inode->vfs_inode);
+ btrfs_init_log_ctx(&ctx, inode);
ctx.logging_new_name = true;
btrfs_init_log_ctx_scratch_eb(&ctx);
/*
diff --git a/fs/btrfs/tree-log.h b/fs/btrfs/tree-log.h
index 22e9cbc81577..dc313e6bb2fa 100644
--- a/fs/btrfs/tree-log.h
+++ b/fs/btrfs/tree-log.h
@@ -37,7 +37,7 @@ struct btrfs_log_ctx {
bool logging_new_delayed_dentries;
/* Indicate if the inode being logged was logged before. */
bool logged_before;
- struct inode *inode;
+ struct btrfs_inode *inode;
struct list_head list;
/* Only used for fast fsyncs. */
struct list_head ordered_extents;
@@ -55,7 +55,7 @@ struct btrfs_log_ctx {
struct extent_buffer *scratch_eb;
};
-void btrfs_init_log_ctx(struct btrfs_log_ctx *ctx, struct inode *inode);
+void btrfs_init_log_ctx(struct btrfs_log_ctx *ctx, struct btrfs_inode *inode);
void btrfs_init_log_ctx_scratch_eb(struct btrfs_log_ctx *ctx);
void btrfs_release_log_ctx_extents(struct btrfs_log_ctx *ctx);
@@ -94,6 +94,8 @@ void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans,
bool for_rename);
void btrfs_record_snapshot_destroy(struct btrfs_trans_handle *trans,
struct btrfs_inode *dir);
+void btrfs_record_new_subvolume(const struct btrfs_trans_handle *trans,
+ struct btrfs_inode *dir);
void btrfs_log_new_name(struct btrfs_trans_handle *trans,
struct dentry *old_dentry, struct btrfs_inode *old_dir,
u64 old_dir_index, struct dentry *parent);
diff --git a/fs/btrfs/ulist.c b/fs/btrfs/ulist.c
index 183863f4bfa4..fc59b57257d6 100644
--- a/fs/btrfs/ulist.c
+++ b/fs/btrfs/ulist.c
@@ -50,6 +50,7 @@ void ulist_init(struct ulist *ulist)
INIT_LIST_HEAD(&ulist->nodes);
ulist->root = RB_ROOT;
ulist->nnodes = 0;
+ ulist->prealloc = NULL;
}
/*
@@ -68,6 +69,8 @@ void ulist_release(struct ulist *ulist)
list_for_each_entry_safe(node, next, &ulist->nodes, list) {
kfree(node);
}
+ kfree(ulist->prealloc);
+ ulist->prealloc = NULL;
ulist->root = RB_ROOT;
INIT_LIST_HEAD(&ulist->nodes);
}
@@ -105,6 +108,12 @@ struct ulist *ulist_alloc(gfp_t gfp_mask)
return ulist;
}
+void ulist_prealloc(struct ulist *ulist, gfp_t gfp_mask)
+{
+ if (!ulist->prealloc)
+ ulist->prealloc = kzalloc(sizeof(*ulist->prealloc), gfp_mask);
+}
+
/*
* Free dynamically allocated ulist.
*
@@ -206,9 +215,15 @@ int ulist_add_merge(struct ulist *ulist, u64 val, u64 aux,
*old_aux = node->aux;
return 0;
}
- node = kmalloc(sizeof(*node), gfp_mask);
- if (!node)
- return -ENOMEM;
+
+ if (ulist->prealloc) {
+ node = ulist->prealloc;
+ ulist->prealloc = NULL;
+ } else {
+ node = kmalloc(sizeof(*node), gfp_mask);
+ if (!node)
+ return -ENOMEM;
+ }
node->val = val;
node->aux = aux;
diff --git a/fs/btrfs/ulist.h b/fs/btrfs/ulist.h
index 8e200fe1a2dd..c62a372f1462 100644
--- a/fs/btrfs/ulist.h
+++ b/fs/btrfs/ulist.h
@@ -41,12 +41,14 @@ struct ulist {
struct list_head nodes;
struct rb_root root;
+ struct ulist_node *prealloc;
};
void ulist_init(struct ulist *ulist);
void ulist_release(struct ulist *ulist);
void ulist_reinit(struct ulist *ulist);
struct ulist *ulist_alloc(gfp_t gfp_mask);
+void ulist_prealloc(struct ulist *ulist, gfp_t mask);
void ulist_free(struct ulist *ulist);
int ulist_add(struct ulist *ulist, u64 val, u64 aux, gfp_t gfp_mask);
int ulist_add_merge(struct ulist *ulist, u64 val, u64 aux,
diff --git a/fs/btrfs/uuid-tree.c b/fs/btrfs/uuid-tree.c
index b0aff297d67d..eae75bb572b9 100644
--- a/fs/btrfs/uuid-tree.c
+++ b/fs/btrfs/uuid-tree.c
@@ -13,7 +13,7 @@
#include "accessors.h"
#include "uuid-tree.h"
-static void btrfs_uuid_to_key(u8 *uuid, u8 type, struct btrfs_key *key)
+static void btrfs_uuid_to_key(const u8 *uuid, u8 type, struct btrfs_key *key)
{
key->type = type;
key->objectid = get_unaligned_le64(uuid);
@@ -21,7 +21,7 @@ static void btrfs_uuid_to_key(u8 *uuid, u8 type, struct btrfs_key *key)
}
/* return -ENOENT for !found, < 0 for errors, or 0 if an item was found */
-static int btrfs_uuid_tree_lookup(struct btrfs_root *uuid_root, u8 *uuid,
+static int btrfs_uuid_tree_lookup(struct btrfs_root *uuid_root, const u8 *uuid,
u8 type, u64 subid)
{
int ret;
@@ -81,7 +81,7 @@ out:
return ret;
}
-int btrfs_uuid_tree_add(struct btrfs_trans_handle *trans, u8 *uuid, u8 type,
+int btrfs_uuid_tree_add(struct btrfs_trans_handle *trans, const u8 *uuid, u8 type,
u64 subid_cpu)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
@@ -145,7 +145,7 @@ out:
return ret;
}
-int btrfs_uuid_tree_remove(struct btrfs_trans_handle *trans, u8 *uuid, u8 type,
+int btrfs_uuid_tree_remove(struct btrfs_trans_handle *trans, const u8 *uuid, u8 type,
u64 subid)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
@@ -256,7 +256,7 @@ out:
* < 0 if an error occurred
*/
static int btrfs_check_uuid_tree_entry(struct btrfs_fs_info *fs_info,
- u8 *uuid, u8 type, u64 subvolid)
+ const u8 *uuid, u8 type, u64 subvolid)
{
int ret = 0;
struct btrfs_root *subvol_root;
diff --git a/fs/btrfs/uuid-tree.h b/fs/btrfs/uuid-tree.h
index 080ede0227ae..a3f5757cc7cf 100644
--- a/fs/btrfs/uuid-tree.h
+++ b/fs/btrfs/uuid-tree.h
@@ -8,9 +8,9 @@
struct btrfs_trans_handle;
struct btrfs_fs_info;
-int btrfs_uuid_tree_add(struct btrfs_trans_handle *trans, u8 *uuid, u8 type,
+int btrfs_uuid_tree_add(struct btrfs_trans_handle *trans, const u8 *uuid, u8 type,
u64 subid);
-int btrfs_uuid_tree_remove(struct btrfs_trans_handle *trans, u8 *uuid, u8 type,
+int btrfs_uuid_tree_remove(struct btrfs_trans_handle *trans, const u8 *uuid, u8 type,
u64 subid);
int btrfs_uuid_tree_iterate(struct btrfs_fs_info *fs_info);
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index c39145e8c4ad..fcedc43ef291 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -722,7 +722,7 @@ error_free_page:
return -EINVAL;
}
-u8 *btrfs_sb_fsid_ptr(struct btrfs_super_block *sb)
+const u8 *btrfs_sb_fsid_ptr(const struct btrfs_super_block *sb)
{
bool has_metadata_uuid = (btrfs_super_incompat_flags(sb) &
BTRFS_FEATURE_INCOMPAT_METADATA_UUID);
@@ -1380,20 +1380,13 @@ struct btrfs_device *btrfs_scan_one_device(const char *path, blk_mode_t flags,
bool new_device_added = false;
struct btrfs_device *device = NULL;
struct file *bdev_file;
- u64 bytenr, bytenr_orig;
+ u64 bytenr;
dev_t devt;
int ret;
lockdep_assert_held(&uuid_mutex);
/*
- * we would like to check all the supers, but that would make
- * a btrfs mount succeed after a mkfs from a different FS.
- * So, we need to add a special mount option to scan for
- * later supers, using BTRFS_SUPER_MIRROR_MAX instead
- */
-
- /*
* Avoid an exclusive open here, as the systemd-udev may initiate the
* device scan which may race with the user's mount or mkfs command,
* resulting in failure.
@@ -1407,7 +1400,12 @@ struct btrfs_device *btrfs_scan_one_device(const char *path, blk_mode_t flags,
if (IS_ERR(bdev_file))
return ERR_CAST(bdev_file);
- bytenr_orig = btrfs_sb_offset(0);
+ /*
+ * We would like to check all the super blocks, but doing so would
+ * allow a mount to succeed after a mkfs from a different filesystem.
+ * Currently, recovery from a bad primary btrfs superblock is done
+ * using the userspace command 'btrfs check --super'.
+ */
ret = btrfs_sb_log_location_bdev(file_bdev(bdev_file), 0, READ, &bytenr);
if (ret) {
device = ERR_PTR(ret);
@@ -1415,7 +1413,7 @@ struct btrfs_device *btrfs_scan_one_device(const char *path, blk_mode_t flags,
}
disk_super = btrfs_read_disk_super(file_bdev(bdev_file), bytenr,
- bytenr_orig);
+ btrfs_sb_offset(0));
if (IS_ERR(disk_super)) {
device = ERR_CAST(disk_super);
goto error_bdev_put;
@@ -2991,16 +2989,19 @@ static int btrfs_free_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
if (ret < 0)
goto out;
else if (ret > 0) { /* Logic error or corruption */
- btrfs_handle_fs_error(fs_info, -ENOENT,
- "Failed lookup while freeing chunk.");
- ret = -ENOENT;
+ btrfs_err(fs_info, "failed to lookup chunk %llu when freeing",
+ chunk_offset);
+ btrfs_abort_transaction(trans, -ENOENT);
+ ret = -EUCLEAN;
goto out;
}
ret = btrfs_del_item(trans, root, path);
- if (ret < 0)
- btrfs_handle_fs_error(fs_info, ret,
- "Failed to delete chunk item.");
+ if (ret < 0) {
+ btrfs_err(fs_info, "failed to delete chunk %llu item", chunk_offset);
+ btrfs_abort_transaction(trans, ret);
+ goto out;
+ }
out:
btrfs_free_path(path);
return ret;
@@ -5628,8 +5629,6 @@ static struct btrfs_block_group *create_chunk(struct btrfs_trans_handle *trans,
u64 start = ctl->start;
u64 type = ctl->type;
int ret;
- int i;
- int j;
map = btrfs_alloc_chunk_map(ctl->num_stripes, GFP_NOFS);
if (!map)
@@ -5644,8 +5643,8 @@ static struct btrfs_block_group *create_chunk(struct btrfs_trans_handle *trans,
map->sub_stripes = ctl->sub_stripes;
map->num_stripes = ctl->num_stripes;
- for (i = 0; i < ctl->ndevs; ++i) {
- for (j = 0; j < ctl->dev_stripes; ++j) {
+ for (int i = 0; i < ctl->ndevs; i++) {
+ for (int j = 0; j < ctl->dev_stripes; j++) {
int s = i * ctl->dev_stripes + j;
map->stripes[s].dev = devices_info[i].dev;
map->stripes[s].physical = devices_info[i].dev_offset +
@@ -6288,20 +6287,19 @@ static bool is_block_group_to_copy(struct btrfs_fs_info *fs_info, u64 logical)
return ret;
}
-static void handle_ops_on_dev_replace(enum btrfs_map_op op,
- struct btrfs_io_context *bioc,
+static void handle_ops_on_dev_replace(struct btrfs_io_context *bioc,
struct btrfs_dev_replace *dev_replace,
u64 logical,
- int *num_stripes_ret, int *max_errors_ret)
+ struct btrfs_io_geometry *io_geom)
{
u64 srcdev_devid = dev_replace->srcdev->devid;
/*
* At this stage, num_stripes is still the real number of stripes,
* excluding the duplicated stripes.
*/
- int num_stripes = *num_stripes_ret;
+ int num_stripes = io_geom->num_stripes;
+ int max_errors = io_geom->max_errors;
int nr_extra_stripes = 0;
- int max_errors = *max_errors_ret;
int i;
/*
@@ -6342,7 +6340,7 @@ static void handle_ops_on_dev_replace(enum btrfs_map_op op,
* replace.
* If we have 2 extra stripes, only choose the one with smaller physical.
*/
- if (op == BTRFS_MAP_GET_READ_MIRRORS && nr_extra_stripes == 2) {
+ if (io_geom->op == BTRFS_MAP_GET_READ_MIRRORS && nr_extra_stripes == 2) {
struct btrfs_io_stripe *first = &bioc->stripes[num_stripes];
struct btrfs_io_stripe *second = &bioc->stripes[num_stripes + 1];
@@ -6360,8 +6358,8 @@ static void handle_ops_on_dev_replace(enum btrfs_map_op op,
}
}
- *num_stripes_ret = num_stripes + nr_extra_stripes;
- *max_errors_ret = max_errors + nr_extra_stripes;
+ io_geom->num_stripes = num_stripes + nr_extra_stripes;
+ io_geom->max_errors = max_errors + nr_extra_stripes;
bioc->replace_nr_stripes = nr_extra_stripes;
}
@@ -6624,7 +6622,6 @@ int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
struct btrfs_chunk_map *map;
struct btrfs_io_geometry io_geom = { 0 };
u64 map_offset;
- int i;
int ret = 0;
int num_copies;
struct btrfs_io_context *bioc = NULL;
@@ -6770,7 +6767,7 @@ int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
* For all other non-RAID56 profiles, just copy the target
* stripe into the bioc.
*/
- for (i = 0; i < io_geom.num_stripes; i++) {
+ for (int i = 0; i < io_geom.num_stripes; i++) {
ret = set_io_stripe(fs_info, logical, length,
&bioc->stripes[i], map, &io_geom);
if (ret < 0)
@@ -6790,8 +6787,7 @@ int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL &&
op != BTRFS_MAP_READ) {
- handle_ops_on_dev_replace(op, bioc, dev_replace, logical,
- &io_geom.num_stripes, &io_geom.max_errors);
+ handle_ops_on_dev_replace(bioc, dev_replace, logical, &io_geom);
}
*bioc_ret = bioc;
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index 66e6fc481ecd..37a09ebb34dd 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -834,6 +834,6 @@ int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info);
bool btrfs_repair_one_zone(struct btrfs_fs_info *fs_info, u64 logical);
bool btrfs_pinned_by_swapfile(struct btrfs_fs_info *fs_info, void *ptr);
-u8 *btrfs_sb_fsid_ptr(struct btrfs_super_block *sb);
+const u8 *btrfs_sb_fsid_ptr(const struct btrfs_super_block *sb);
#endif
diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
index 15d0999e340e..738c7bb8ea7c 100644
--- a/fs/btrfs/xattr.c
+++ b/fs/btrfs/xattr.c
@@ -24,7 +24,7 @@
#include "accessors.h"
#include "dir-item.h"
-int btrfs_getxattr(struct inode *inode, const char *name,
+int btrfs_getxattr(const struct inode *inode, const char *name,
void *buffer, size_t size)
{
struct btrfs_dir_item *di;
@@ -451,7 +451,7 @@ static int btrfs_xattr_handler_set_prop(const struct xattr_handler *handler,
if (IS_ERR(trans))
return PTR_ERR(trans);
- ret = btrfs_set_prop(trans, inode, name, value, size, flags);
+ ret = btrfs_set_prop(trans, BTRFS_I(inode), name, value, size, flags);
if (!ret) {
inode_inc_iversion(inode);
inode_set_ctime_current(inode);
diff --git a/fs/btrfs/xattr.h b/fs/btrfs/xattr.h
index b9376ea258ff..8dc4cf49f6f0 100644
--- a/fs/btrfs/xattr.h
+++ b/fs/btrfs/xattr.h
@@ -14,7 +14,7 @@ struct btrfs_trans_handle;
extern const struct xattr_handler * const btrfs_xattr_handlers[];
-int btrfs_getxattr(struct inode *inode, const char *name,
+int btrfs_getxattr(const struct inode *inode, const char *name,
void *buffer, size_t size);
int btrfs_setxattr(struct btrfs_trans_handle *trans, struct inode *inode,
const char *name, const void *value, size_t size, int flags);
diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c
index d9e5c88a0f85..30971dd741e2 100644
--- a/fs/btrfs/zlib.c
+++ b/fs/btrfs/zlib.c
@@ -18,6 +18,7 @@
#include <linux/pagemap.h>
#include <linux/bio.h>
#include <linux/refcount.h>
+#include "btrfs_inode.h"
#include "compression.h"
/* workspace buffer size for s390 zlib hardware support */
@@ -112,8 +113,13 @@ int zlib_compress_folios(struct list_head *ws, struct address_space *mapping,
*total_out = 0;
*total_in = 0;
- if (Z_OK != zlib_deflateInit(&workspace->strm, workspace->level)) {
- pr_warn("BTRFS: deflateInit failed\n");
+ ret = zlib_deflateInit(&workspace->strm, workspace->level);
+ if (unlikely(ret != Z_OK)) {
+ struct btrfs_inode *inode = BTRFS_I(mapping->host);
+
+ btrfs_err(inode->root->fs_info,
+ "zlib compression init failed, error %d root %llu inode %llu offset %llu",
+ ret, btrfs_root_id(inode->root), btrfs_ino(inode), start);
ret = -EIO;
goto out;
}
@@ -182,9 +188,13 @@ int zlib_compress_folios(struct list_head *ws, struct address_space *mapping,
}
ret = zlib_deflate(&workspace->strm, Z_SYNC_FLUSH);
- if (ret != Z_OK) {
- pr_debug("BTRFS: deflate in loop returned %d\n",
- ret);
+ if (unlikely(ret != Z_OK)) {
+ struct btrfs_inode *inode = BTRFS_I(mapping->host);
+
+ btrfs_warn(inode->root->fs_info,
+ "zlib compression failed, error %d root %llu inode %llu offset %llu",
+ ret, btrfs_root_id(inode->root), btrfs_ino(inode),
+ start);
zlib_deflateEnd(&workspace->strm);
ret = -EIO;
goto out;
@@ -307,9 +317,14 @@ int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
workspace->strm.avail_in -= 2;
}
- if (Z_OK != zlib_inflateInit2(&workspace->strm, wbits)) {
- pr_warn("BTRFS: inflateInit failed\n");
+ ret = zlib_inflateInit2(&workspace->strm, wbits);
+ if (unlikely(ret != Z_OK)) {
+ struct btrfs_inode *inode = cb->bbio.inode;
+
kunmap_local(data_in);
+ btrfs_err(inode->root->fs_info,
+ "zlib decompression init failed, error %d root %llu inode %llu offset %llu",
+ ret, btrfs_root_id(inode->root), btrfs_ino(inode), cb->start);
return -EIO;
}
while (workspace->strm.total_in < srclen) {
@@ -348,10 +363,15 @@ int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
workspace->strm.avail_in = min(tmp, PAGE_SIZE);
}
}
- if (ret != Z_STREAM_END)
+ if (unlikely(ret != Z_STREAM_END)) {
+ btrfs_err(cb->bbio.inode->root->fs_info,
+ "zlib decompression failed, error %d root %llu inode %llu offset %llu",
+ ret, btrfs_root_id(cb->bbio.inode->root),
+ btrfs_ino(cb->bbio.inode), cb->start);
ret = -EIO;
- else
+ } else {
ret = 0;
+ }
done:
zlib_inflateEnd(&workspace->strm);
if (data_in)
@@ -386,8 +406,14 @@ int zlib_decompress(struct list_head *ws, const u8 *data_in,
workspace->strm.avail_in -= 2;
}
- if (Z_OK != zlib_inflateInit2(&workspace->strm, wbits)) {
- pr_warn("BTRFS: inflateInit failed\n");
+ ret = zlib_inflateInit2(&workspace->strm, wbits);
+ if (unlikely(ret != Z_OK)) {
+ struct btrfs_inode *inode = BTRFS_I(dest_page->mapping->host);
+
+ btrfs_err(inode->root->fs_info,
+ "zlib decompression init failed, error %d root %llu inode %llu offset %llu",
+ ret, btrfs_root_id(inode->root), btrfs_ino(inode),
+ page_offset(dest_page));
return -EIO;
}
@@ -404,8 +430,12 @@ int zlib_decompress(struct list_head *ws, const u8 *data_in,
out:
if (unlikely(to_copy != destlen)) {
- pr_warn_ratelimited("BTRFS: inflate failed, decompressed=%lu expected=%zu\n",
- to_copy, destlen);
+ struct btrfs_inode *inode = BTRFS_I(dest_page->mapping->host);
+
+ btrfs_err(inode->root->fs_info,
+"zlib decompression failed, error %d root %llu inode %llu offset %llu decompressed %lu expected %zu",
+ ret, btrfs_root_id(inode->root), btrfs_ino(inode),
+ page_offset(dest_page), to_copy, destlen);
ret = -EIO;
} else {
ret = 0;
diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
index 947a87576f6c..df7733044f7e 100644
--- a/fs/btrfs/zoned.c
+++ b/fs/btrfs/zoned.c
@@ -87,9 +87,8 @@ static int sb_write_pointer(struct block_device *bdev, struct blk_zone *zones,
bool empty[BTRFS_NR_SB_LOG_ZONES];
bool full[BTRFS_NR_SB_LOG_ZONES];
sector_t sector;
- int i;
- for (i = 0; i < BTRFS_NR_SB_LOG_ZONES; i++) {
+ for (int i = 0; i < BTRFS_NR_SB_LOG_ZONES; i++) {
ASSERT(zones[i].type != BLK_ZONE_TYPE_CONVENTIONAL);
empty[i] = (zones[i].cond == BLK_ZONE_COND_EMPTY);
full[i] = sb_zone_is_full(&zones[i]);
@@ -121,9 +120,8 @@ static int sb_write_pointer(struct block_device *bdev, struct blk_zone *zones,
struct address_space *mapping = bdev->bd_mapping;
struct page *page[BTRFS_NR_SB_LOG_ZONES];
struct btrfs_super_block *super[BTRFS_NR_SB_LOG_ZONES];
- int i;
- for (i = 0; i < BTRFS_NR_SB_LOG_ZONES; i++) {
+ for (int i = 0; i < BTRFS_NR_SB_LOG_ZONES; i++) {
u64 zone_end = (zones[i].start + zones[i].capacity) << SECTOR_SHIFT;
u64 bytenr = ALIGN_DOWN(zone_end, BTRFS_SUPER_INFO_SIZE) -
BTRFS_SUPER_INFO_SIZE;
@@ -144,7 +142,7 @@ static int sb_write_pointer(struct block_device *bdev, struct blk_zone *zones,
else
sector = zones[0].start;
- for (i = 0; i < BTRFS_NR_SB_LOG_ZONES; i++)
+ for (int i = 0; i < BTRFS_NR_SB_LOG_ZONES; i++)
btrfs_release_disk_super(super[i]);
} else if (!full[0] && (empty[1] || full[1])) {
sector = zones[0].wp;
@@ -652,8 +650,7 @@ out:
return NULL;
}
-int btrfs_get_dev_zone(struct btrfs_device *device, u64 pos,
- struct blk_zone *zone)
+static int btrfs_get_dev_zone(struct btrfs_device *device, u64 pos, struct blk_zone *zone)
{
unsigned int nr_zones = 1;
int ret;
@@ -770,7 +767,7 @@ int btrfs_check_zoned_mode(struct btrfs_fs_info *fs_info)
return 0;
}
-int btrfs_check_mountopts_zoned(struct btrfs_fs_info *info, unsigned long *mount_opt)
+int btrfs_check_mountopts_zoned(const struct btrfs_fs_info *info, unsigned long *mount_opt)
{
if (!btrfs_is_zoned(info))
return 0;
@@ -1726,7 +1723,7 @@ bool btrfs_use_zone_append(struct btrfs_bio *bbio)
if (!btrfs_is_zoned(fs_info))
return false;
- if (!inode || !is_data_inode(&inode->vfs_inode))
+ if (!inode || !is_data_inode(inode))
return false;
if (btrfs_op(&bbio->bio) != BTRFS_MAP_WRITE)
@@ -1768,7 +1765,7 @@ void btrfs_record_physical_zoned(struct btrfs_bio *bbio)
static void btrfs_rewrite_logical_zoned(struct btrfs_ordered_extent *ordered,
u64 logical)
{
- struct extent_map_tree *em_tree = &BTRFS_I(ordered->inode)->extent_tree;
+ struct extent_map_tree *em_tree = &ordered->inode->extent_tree;
struct extent_map *em;
ordered->disk_bytenr = logical;
@@ -1776,7 +1773,9 @@ static void btrfs_rewrite_logical_zoned(struct btrfs_ordered_extent *ordered,
write_lock(&em_tree->lock);
em = search_extent_mapping(em_tree, ordered->file_offset,
ordered->num_bytes);
- em->block_start = logical;
+ /* The em should be a new COW extent, thus it should not have an offset. */
+ ASSERT(em->offset == 0);
+ em->disk_bytenr = logical;
free_extent_map(em);
write_unlock(&em_tree->lock);
}
@@ -1787,7 +1786,7 @@ static bool btrfs_zoned_split_ordered(struct btrfs_ordered_extent *ordered,
struct btrfs_ordered_extent *new;
if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags) &&
- split_extent_map(BTRFS_I(ordered->inode), ordered->file_offset,
+ split_extent_map(ordered->inode, ordered->file_offset,
ordered->num_bytes, len, logical))
return false;
@@ -1801,7 +1800,7 @@ static bool btrfs_zoned_split_ordered(struct btrfs_ordered_extent *ordered,
void btrfs_finish_ordered_zoned(struct btrfs_ordered_extent *ordered)
{
- struct btrfs_inode *inode = BTRFS_I(ordered->inode);
+ struct btrfs_inode *inode = ordered->inode;
struct btrfs_fs_info *fs_info = inode->root->fs_info;
struct btrfs_ordered_sum *sum;
u64 logical, len;
@@ -1845,7 +1844,7 @@ out:
* here so that we don't attempt to log the csums later.
*/
if ((inode->flags & BTRFS_INODE_NODATASUM) ||
- test_bit(BTRFS_FS_STATE_NO_CSUMS, &fs_info->fs_state)) {
+ test_bit(BTRFS_FS_STATE_NO_DATA_CSUMS, &fs_info->fs_state)) {
while ((sum = list_first_entry_or_null(&ordered->list,
typeof(*sum), list))) {
list_del(&sum->list);
@@ -2215,8 +2214,7 @@ static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_writ
/* Ensure all writes in this block group finish */
btrfs_wait_block_group_reservations(block_group);
/* No need to wait for NOCOW writers. Zoned mode does not allow that */
- btrfs_wait_ordered_roots(fs_info, U64_MAX, block_group->start,
- block_group->length);
+ btrfs_wait_ordered_roots(fs_info, U64_MAX, block_group);
/* Wait for extent buffers to be written. */
if (is_metadata)
wait_eb_writebacks(block_group);
diff --git a/fs/btrfs/zoned.h b/fs/btrfs/zoned.h
index 77c4321e331f..d66d00c08001 100644
--- a/fs/btrfs/zoned.h
+++ b/fs/btrfs/zoned.h
@@ -53,14 +53,12 @@ struct btrfs_zoned_device_info {
void btrfs_finish_ordered_zoned(struct btrfs_ordered_extent *ordered);
#ifdef CONFIG_BLK_DEV_ZONED
-int btrfs_get_dev_zone(struct btrfs_device *device, u64 pos,
- struct blk_zone *zone);
int btrfs_get_dev_zone_info_all_devices(struct btrfs_fs_info *fs_info);
int btrfs_get_dev_zone_info(struct btrfs_device *device, bool populate_cache);
void btrfs_destroy_dev_zone_info(struct btrfs_device *device);
struct btrfs_zoned_device_info *btrfs_clone_dev_zone_info(struct btrfs_device *orig_dev);
int btrfs_check_zoned_mode(struct btrfs_fs_info *fs_info);
-int btrfs_check_mountopts_zoned(struct btrfs_fs_info *info, unsigned long *mount_opt);
+int btrfs_check_mountopts_zoned(const struct btrfs_fs_info *info, unsigned long *mount_opt);
int btrfs_sb_log_location_bdev(struct block_device *bdev, int mirror, int rw,
u64 *bytenr_ret);
int btrfs_sb_log_location(struct btrfs_device *device, int mirror, int rw,
@@ -98,11 +96,6 @@ int btrfs_zoned_activate_one_bg(struct btrfs_fs_info *fs_info,
struct btrfs_space_info *space_info, bool do_finish);
void btrfs_check_active_zone_reservation(struct btrfs_fs_info *fs_info);
#else /* CONFIG_BLK_DEV_ZONED */
-static inline int btrfs_get_dev_zone(struct btrfs_device *device, u64 pos,
- struct blk_zone *zone)
-{
- return 0;
-}
static inline int btrfs_get_dev_zone_info_all_devices(struct btrfs_fs_info *fs_info)
{
@@ -136,7 +129,7 @@ static inline int btrfs_check_zoned_mode(const struct btrfs_fs_info *fs_info)
return -EOPNOTSUPP;
}
-static inline int btrfs_check_mountopts_zoned(struct btrfs_fs_info *info,
+static inline int btrfs_check_mountopts_zoned(const struct btrfs_fs_info *info,
unsigned long *mount_opt)
{
return 0;
diff --git a/fs/btrfs/zstd.c b/fs/btrfs/zstd.c
index 2b232b82c3a8..2a079561b2b1 100644
--- a/fs/btrfs/zstd.c
+++ b/fs/btrfs/zstd.c
@@ -19,6 +19,7 @@
#include <linux/zstd.h>
#include "misc.h"
#include "fs.h"
+#include "btrfs_inode.h"
#include "compression.h"
#include "super.h"
@@ -399,8 +400,13 @@ int zstd_compress_folios(struct list_head *ws, struct address_space *mapping,
/* Initialize the stream */
stream = zstd_init_cstream(&params, len, workspace->mem,
workspace->size);
- if (!stream) {
- pr_warn("BTRFS: zstd_init_cstream failed\n");
+ if (unlikely(!stream)) {
+ struct btrfs_inode *inode = BTRFS_I(mapping->host);
+
+ btrfs_err(inode->root->fs_info,
+ "zstd compression init level %d failed, root %llu inode %llu offset %llu",
+ workspace->req_level, btrfs_root_id(inode->root),
+ btrfs_ino(inode), start);
ret = -EIO;
goto out;
}
@@ -429,9 +435,14 @@ int zstd_compress_folios(struct list_head *ws, struct address_space *mapping,
ret2 = zstd_compress_stream(stream, &workspace->out_buf,
&workspace->in_buf);
- if (zstd_is_error(ret2)) {
- pr_debug("BTRFS: zstd_compress_stream returned %d\n",
- zstd_get_error_code(ret2));
+ if (unlikely(zstd_is_error(ret2))) {
+ struct btrfs_inode *inode = BTRFS_I(mapping->host);
+
+ btrfs_warn(inode->root->fs_info,
+"zstd compression level %d failed, error %d root %llu inode %llu offset %llu",
+ workspace->req_level, zstd_get_error_code(ret2),
+ btrfs_root_id(inode->root), btrfs_ino(inode),
+ start);
ret = -EIO;
goto out;
}
@@ -497,9 +508,14 @@ int zstd_compress_folios(struct list_head *ws, struct address_space *mapping,
size_t ret2;
ret2 = zstd_end_stream(stream, &workspace->out_buf);
- if (zstd_is_error(ret2)) {
- pr_debug("BTRFS: zstd_end_stream returned %d\n",
- zstd_get_error_code(ret2));
+ if (unlikely(zstd_is_error(ret2))) {
+ struct btrfs_inode *inode = BTRFS_I(mapping->host);
+
+ btrfs_err(inode->root->fs_info,
+"zstd compression end level %d failed, error %d root %llu inode %llu offset %llu",
+ workspace->req_level, zstd_get_error_code(ret2),
+ btrfs_root_id(inode->root), btrfs_ino(inode),
+ start);
ret = -EIO;
goto out;
}
@@ -561,8 +577,12 @@ int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
stream = zstd_init_dstream(
ZSTD_BTRFS_MAX_INPUT, workspace->mem, workspace->size);
- if (!stream) {
- pr_debug("BTRFS: zstd_init_dstream failed\n");
+ if (unlikely(!stream)) {
+ struct btrfs_inode *inode = cb->bbio.inode;
+
+ btrfs_err(inode->root->fs_info,
+ "zstd decompression init failed, root %llu inode %llu offset %llu",
+ btrfs_root_id(inode->root), btrfs_ino(inode), cb->start);
ret = -EIO;
goto done;
}
@@ -580,9 +600,13 @@ int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
ret2 = zstd_decompress_stream(stream, &workspace->out_buf,
&workspace->in_buf);
- if (zstd_is_error(ret2)) {
- pr_debug("BTRFS: zstd_decompress_stream returned %d\n",
- zstd_get_error_code(ret2));
+ if (unlikely(zstd_is_error(ret2))) {
+ struct btrfs_inode *inode = cb->bbio.inode;
+
+ btrfs_err(inode->root->fs_info,
+ "zstd decompression failed, error %d root %llu inode %llu offset %llu",
+ zstd_get_error_code(ret2), btrfs_root_id(inode->root),
+ btrfs_ino(inode), cb->start);
ret = -EIO;
goto done;
}
@@ -637,8 +661,14 @@ int zstd_decompress(struct list_head *ws, const u8 *data_in,
stream = zstd_init_dstream(
ZSTD_BTRFS_MAX_INPUT, workspace->mem, workspace->size);
- if (!stream) {
- pr_warn("BTRFS: zstd_init_dstream failed\n");
+ if (unlikely(!stream)) {
+ struct btrfs_inode *inode = BTRFS_I(dest_page->mapping->host);
+
+ btrfs_err(inode->root->fs_info,
+ "zstd decompression init failed, root %llu inode %llu offset %llu",
+ btrfs_root_id(inode->root), btrfs_ino(inode),
+ page_offset(dest_page));
+ ret = -EIO;
goto finish;
}
@@ -655,9 +685,13 @@ int zstd_decompress(struct list_head *ws, const u8 *data_in,
* one call should end the decompression.
*/
ret = zstd_decompress_stream(stream, &workspace->out_buf, &workspace->in_buf);
- if (zstd_is_error(ret)) {
- pr_warn_ratelimited("BTRFS: zstd_decompress_stream return %d\n",
- zstd_get_error_code(ret));
+ if (unlikely(zstd_is_error(ret))) {
+ struct btrfs_inode *inode = BTRFS_I(dest_page->mapping->host);
+
+ btrfs_err(inode->root->fs_info,
+ "zstd decompression failed, error %d root %llu inode %llu offset %llu",
+ zstd_get_error_code(ret), btrfs_root_id(inode->root),
+ btrfs_ino(inode), page_offset(dest_page));
goto finish;
}
to_copy = workspace->out_buf.pos;
diff --git a/fs/buffer.c b/fs/buffer.c
index 8c19e705b9c3..dbe8f411ce52 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -258,7 +258,6 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
} else {
clear_buffer_uptodate(bh);
buffer_io_error(bh, ", async page read");
- folio_set_error(folio);
}
/*
@@ -391,7 +390,6 @@ static void end_buffer_async_write(struct buffer_head *bh, int uptodate)
buffer_io_error(bh, ", lost async page write");
mark_buffer_write_io_error(bh);
clear_buffer_uptodate(bh);
- folio_set_error(folio);
}
first = folio_buffers(folio);
@@ -1960,7 +1958,6 @@ recover:
clear_buffer_dirty(bh);
}
} while ((bh = bh->b_this_page) != head);
- folio_set_error(folio);
BUG_ON(folio_test_writeback(folio));
mapping_set_error(folio->mapping, err);
folio_start_writeback(folio);
@@ -2405,10 +2402,8 @@ int block_read_full_folio(struct folio *folio, get_block_t *get_block)
if (iblock < lblock) {
WARN_ON(bh->b_size != blocksize);
err = get_block(inode, iblock, bh, 0);
- if (err) {
- folio_set_error(folio);
+ if (err)
page_error = true;
- }
}
if (!buffer_mapped(bh)) {
folio_zero_range(folio, i * blocksize,
diff --git a/fs/cachefiles/cache.c b/fs/cachefiles/cache.c
index f449f7340aad..9fb06dc16520 100644
--- a/fs/cachefiles/cache.c
+++ b/fs/cachefiles/cache.c
@@ -8,6 +8,7 @@
#include <linux/slab.h>
#include <linux/statfs.h>
#include <linux/namei.h>
+#include <trace/events/fscache.h>
#include "internal.h"
/*
@@ -312,19 +313,59 @@ static void cachefiles_withdraw_objects(struct cachefiles_cache *cache)
}
/*
- * Withdraw volumes.
+ * Withdraw fscache volumes.
+ */
+static void cachefiles_withdraw_fscache_volumes(struct cachefiles_cache *cache)
+{
+ struct list_head *cur;
+ struct cachefiles_volume *volume;
+ struct fscache_volume *vcookie;
+
+ _enter("");
+retry:
+ spin_lock(&cache->object_list_lock);
+ list_for_each(cur, &cache->volumes) {
+ volume = list_entry(cur, struct cachefiles_volume, cache_link);
+
+ if (atomic_read(&volume->vcookie->n_accesses) == 0)
+ continue;
+
+ vcookie = fscache_try_get_volume(volume->vcookie,
+ fscache_volume_get_withdraw);
+ if (vcookie) {
+ spin_unlock(&cache->object_list_lock);
+ fscache_withdraw_volume(vcookie);
+ fscache_put_volume(vcookie, fscache_volume_put_withdraw);
+ goto retry;
+ }
+ }
+ spin_unlock(&cache->object_list_lock);
+
+ _leave("");
+}
+
+/*
+ * Withdraw cachefiles volumes.
*/
static void cachefiles_withdraw_volumes(struct cachefiles_cache *cache)
{
_enter("");
for (;;) {
+ struct fscache_volume *vcookie = NULL;
struct cachefiles_volume *volume = NULL;
spin_lock(&cache->object_list_lock);
if (!list_empty(&cache->volumes)) {
volume = list_first_entry(&cache->volumes,
struct cachefiles_volume, cache_link);
+ vcookie = fscache_try_get_volume(volume->vcookie,
+ fscache_volume_get_withdraw);
+ if (!vcookie) {
+ spin_unlock(&cache->object_list_lock);
+ cpu_relax();
+ continue;
+ }
list_del_init(&volume->cache_link);
}
spin_unlock(&cache->object_list_lock);
@@ -332,6 +373,7 @@ static void cachefiles_withdraw_volumes(struct cachefiles_cache *cache)
break;
cachefiles_withdraw_volume(volume);
+ fscache_put_volume(vcookie, fscache_volume_put_withdraw);
}
_leave("");
@@ -371,6 +413,7 @@ void cachefiles_withdraw_cache(struct cachefiles_cache *cache)
pr_info("File cache on %s unregistering\n", fscache->name);
fscache_withdraw_cache(fscache);
+ cachefiles_withdraw_fscache_volumes(cache);
/* we now have to destroy all the active objects pertaining to this
* cache - which we do by passing them off to thread pool to be
diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
index 6465e2574230..89b11336a836 100644
--- a/fs/cachefiles/daemon.c
+++ b/fs/cachefiles/daemon.c
@@ -133,7 +133,7 @@ static int cachefiles_daemon_open(struct inode *inode, struct file *file)
return 0;
}
-static void cachefiles_flush_reqs(struct cachefiles_cache *cache)
+void cachefiles_flush_reqs(struct cachefiles_cache *cache)
{
struct xarray *xa = &cache->reqs;
struct cachefiles_req *req;
@@ -159,6 +159,7 @@ static void cachefiles_flush_reqs(struct cachefiles_cache *cache)
xa_for_each(xa, index, req) {
req->error = -EIO;
complete(&req->done);
+ __xa_erase(xa, index);
}
xa_unlock(xa);
@@ -365,14 +366,14 @@ static __poll_t cachefiles_daemon_poll(struct file *file,
if (cachefiles_in_ondemand_mode(cache)) {
if (!xa_empty(&cache->reqs)) {
- rcu_read_lock();
+ xas_lock(&xas);
xas_for_each_marked(&xas, req, ULONG_MAX, CACHEFILES_REQ_NEW) {
if (!cachefiles_ondemand_is_reopening_read(req)) {
mask |= EPOLLIN;
break;
}
}
- rcu_read_unlock();
+ xas_unlock(&xas);
}
} else {
if (test_bit(CACHEFILES_STATE_CHANGED, &cache->flags))
diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
index d33169f0018b..7b99bd98de75 100644
--- a/fs/cachefiles/internal.h
+++ b/fs/cachefiles/internal.h
@@ -48,6 +48,7 @@ enum cachefiles_object_state {
CACHEFILES_ONDEMAND_OBJSTATE_CLOSE, /* Anonymous fd closed by daemon or initial state */
CACHEFILES_ONDEMAND_OBJSTATE_OPEN, /* Anonymous fd associated with object is available */
CACHEFILES_ONDEMAND_OBJSTATE_REOPENING, /* Object that was closed and is being reopened. */
+ CACHEFILES_ONDEMAND_OBJSTATE_DROPPING, /* Object is being dropped. */
};
struct cachefiles_ondemand_info {
@@ -55,6 +56,7 @@ struct cachefiles_ondemand_info {
int ondemand_id;
enum cachefiles_object_state state;
struct cachefiles_object *object;
+ spinlock_t lock;
};
/*
@@ -127,6 +129,7 @@ struct cachefiles_cache {
unsigned long req_id_next;
struct xarray ondemand_ids; /* xarray for ondemand_id allocation */
u32 ondemand_id_next;
+ u32 msg_id_next;
};
static inline bool cachefiles_in_ondemand_mode(struct cachefiles_cache *cache)
@@ -138,6 +141,7 @@ static inline bool cachefiles_in_ondemand_mode(struct cachefiles_cache *cache)
struct cachefiles_req {
struct cachefiles_object *object;
struct completion done;
+ refcount_t ref;
int error;
struct cachefiles_msg msg;
};
@@ -186,6 +190,7 @@ extern int cachefiles_has_space(struct cachefiles_cache *cache,
* daemon.c
*/
extern const struct file_operations cachefiles_daemon_fops;
+extern void cachefiles_flush_reqs(struct cachefiles_cache *cache);
extern void cachefiles_get_unbind_pincount(struct cachefiles_cache *cache);
extern void cachefiles_put_unbind_pincount(struct cachefiles_cache *cache);
@@ -332,6 +337,7 @@ cachefiles_ondemand_set_object_##_state(struct cachefiles_object *object) \
CACHEFILES_OBJECT_STATE_FUNCS(open, OPEN);
CACHEFILES_OBJECT_STATE_FUNCS(close, CLOSE);
CACHEFILES_OBJECT_STATE_FUNCS(reopening, REOPENING);
+CACHEFILES_OBJECT_STATE_FUNCS(dropping, DROPPING);
static inline bool cachefiles_ondemand_is_reopening_read(struct cachefiles_req *req)
{
@@ -424,6 +430,8 @@ do { \
pr_err("I/O Error: " FMT"\n", ##__VA_ARGS__); \
fscache_io_error((___cache)->cache); \
set_bit(CACHEFILES_DEAD, &(___cache)->flags); \
+ if (cachefiles_in_ondemand_mode(___cache)) \
+ cachefiles_flush_reqs(___cache); \
} while (0)
#define cachefiles_io_error_obj(object, FMT, ...) \
diff --git a/fs/cachefiles/ondemand.c b/fs/cachefiles/ondemand.c
index 4ba42f1fa3b4..470c96658385 100644
--- a/fs/cachefiles/ondemand.c
+++ b/fs/cachefiles/ondemand.c
@@ -1,22 +1,42 @@
// SPDX-License-Identifier: GPL-2.0-or-later
-#include <linux/fdtable.h>
#include <linux/anon_inodes.h>
#include <linux/uio.h>
#include "internal.h"
+struct ondemand_anon_file {
+ struct file *file;
+ int fd;
+};
+
+static inline void cachefiles_req_put(struct cachefiles_req *req)
+{
+ if (refcount_dec_and_test(&req->ref))
+ kfree(req);
+}
+
static int cachefiles_ondemand_fd_release(struct inode *inode,
struct file *file)
{
struct cachefiles_object *object = file->private_data;
- struct cachefiles_cache *cache = object->volume->cache;
- struct cachefiles_ondemand_info *info = object->ondemand;
- int object_id = info->ondemand_id;
+ struct cachefiles_cache *cache;
+ struct cachefiles_ondemand_info *info;
+ int object_id;
struct cachefiles_req *req;
- XA_STATE(xas, &cache->reqs, 0);
+ XA_STATE(xas, NULL, 0);
+
+ if (!object)
+ return 0;
+
+ info = object->ondemand;
+ cache = object->volume->cache;
+ xas.xa = &cache->reqs;
xa_lock(&cache->reqs);
+ spin_lock(&info->lock);
+ object_id = info->ondemand_id;
info->ondemand_id = CACHEFILES_ONDEMAND_ID_CLOSED;
cachefiles_ondemand_set_object_close(object);
+ spin_unlock(&info->lock);
/* Only flush CACHEFILES_REQ_NEW marked req to avoid race with daemon_read */
xas_for_each_marked(&xas, req, ULONG_MAX, CACHEFILES_REQ_NEW) {
@@ -76,12 +96,12 @@ static loff_t cachefiles_ondemand_fd_llseek(struct file *filp, loff_t pos,
}
static long cachefiles_ondemand_fd_ioctl(struct file *filp, unsigned int ioctl,
- unsigned long arg)
+ unsigned long id)
{
struct cachefiles_object *object = filp->private_data;
struct cachefiles_cache *cache = object->volume->cache;
struct cachefiles_req *req;
- unsigned long id;
+ XA_STATE(xas, &cache->reqs, id);
if (ioctl != CACHEFILES_IOC_READ_COMPLETE)
return -EINVAL;
@@ -89,10 +109,15 @@ static long cachefiles_ondemand_fd_ioctl(struct file *filp, unsigned int ioctl,
if (!test_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags))
return -EOPNOTSUPP;
- id = arg;
- req = xa_erase(&cache->reqs, id);
- if (!req)
+ xa_lock(&cache->reqs);
+ req = xas_load(&xas);
+ if (!req || req->msg.opcode != CACHEFILES_OP_READ ||
+ req->object != object) {
+ xa_unlock(&cache->reqs);
return -EINVAL;
+ }
+ xas_store(&xas, NULL);
+ xa_unlock(&cache->reqs);
trace_cachefiles_ondemand_cread(object, id);
complete(&req->done);
@@ -116,10 +141,12 @@ int cachefiles_ondemand_copen(struct cachefiles_cache *cache, char *args)
{
struct cachefiles_req *req;
struct fscache_cookie *cookie;
+ struct cachefiles_ondemand_info *info;
char *pid, *psize;
unsigned long id;
long size;
int ret;
+ XA_STATE(xas, &cache->reqs, 0);
if (!test_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags))
return -EOPNOTSUPP;
@@ -143,10 +170,18 @@ int cachefiles_ondemand_copen(struct cachefiles_cache *cache, char *args)
if (ret)
return ret;
- req = xa_erase(&cache->reqs, id);
- if (!req)
+ xa_lock(&cache->reqs);
+ xas.xa_index = id;
+ req = xas_load(&xas);
+ if (!req || req->msg.opcode != CACHEFILES_OP_OPEN ||
+ !req->object->ondemand->ondemand_id) {
+ xa_unlock(&cache->reqs);
return -EINVAL;
+ }
+ xas_store(&xas, NULL);
+ xa_unlock(&cache->reqs);
+ info = req->object->ondemand;
/* fail OPEN request if copen format is invalid */
ret = kstrtol(psize, 0, &size);
if (ret) {
@@ -166,6 +201,32 @@ int cachefiles_ondemand_copen(struct cachefiles_cache *cache, char *args)
goto out;
}
+ spin_lock(&info->lock);
+ /*
+ * The anonymous fd was closed before copen ? Fail the request.
+ *
+ * t1 | t2
+ * ---------------------------------------------------------
+ * cachefiles_ondemand_copen
+ * req = xa_erase(&cache->reqs, id)
+ * // Anon fd is maliciously closed.
+ * cachefiles_ondemand_fd_release
+ * xa_lock(&cache->reqs)
+ * cachefiles_ondemand_set_object_close(object)
+ * xa_unlock(&cache->reqs)
+ * cachefiles_ondemand_set_object_open
+ * // No one will ever close it again.
+ * cachefiles_ondemand_daemon_read
+ * cachefiles_ondemand_select_req
+ *
+ * Get a read req but its fd is already closed. The daemon can't
+ * issue a cread ioctl with an closed fd, then hung.
+ */
+ if (info->ondemand_id == CACHEFILES_ONDEMAND_ID_CLOSED) {
+ spin_unlock(&info->lock);
+ req->error = -EBADFD;
+ goto out;
+ }
cookie = req->object->cookie;
cookie->object_size = size;
if (size)
@@ -175,9 +236,15 @@ int cachefiles_ondemand_copen(struct cachefiles_cache *cache, char *args)
trace_cachefiles_ondemand_copen(req->object, id, size);
cachefiles_ondemand_set_object_open(req->object);
+ spin_unlock(&info->lock);
wake_up_all(&cache->daemon_pollwq);
out:
+ spin_lock(&info->lock);
+ /* Need to set object close to avoid reopen status continuing */
+ if (info->ondemand_id == CACHEFILES_ONDEMAND_ID_CLOSED)
+ cachefiles_ondemand_set_object_close(req->object);
+ spin_unlock(&info->lock);
complete(&req->done);
return ret;
}
@@ -205,14 +272,14 @@ int cachefiles_ondemand_restore(struct cachefiles_cache *cache, char *args)
return 0;
}
-static int cachefiles_ondemand_get_fd(struct cachefiles_req *req)
+static int cachefiles_ondemand_get_fd(struct cachefiles_req *req,
+ struct ondemand_anon_file *anon_file)
{
struct cachefiles_object *object;
struct cachefiles_cache *cache;
struct cachefiles_open *load;
- struct file *file;
u32 object_id;
- int ret, fd;
+ int ret;
object = cachefiles_grab_object(req->object,
cachefiles_obj_get_ondemand_fd);
@@ -224,35 +291,53 @@ static int cachefiles_ondemand_get_fd(struct cachefiles_req *req)
if (ret < 0)
goto err;
- fd = get_unused_fd_flags(O_WRONLY);
- if (fd < 0) {
- ret = fd;
+ anon_file->fd = get_unused_fd_flags(O_WRONLY);
+ if (anon_file->fd < 0) {
+ ret = anon_file->fd;
goto err_free_id;
}
- file = anon_inode_getfile("[cachefiles]", &cachefiles_ondemand_fd_fops,
- object, O_WRONLY);
- if (IS_ERR(file)) {
- ret = PTR_ERR(file);
+ anon_file->file = anon_inode_getfile("[cachefiles]",
+ &cachefiles_ondemand_fd_fops, object, O_WRONLY);
+ if (IS_ERR(anon_file->file)) {
+ ret = PTR_ERR(anon_file->file);
goto err_put_fd;
}
- file->f_mode |= FMODE_PWRITE | FMODE_LSEEK;
- fd_install(fd, file);
+ spin_lock(&object->ondemand->lock);
+ if (object->ondemand->ondemand_id > 0) {
+ spin_unlock(&object->ondemand->lock);
+ /* Pair with check in cachefiles_ondemand_fd_release(). */
+ anon_file->file->private_data = NULL;
+ ret = -EEXIST;
+ goto err_put_file;
+ }
+
+ anon_file->file->f_mode |= FMODE_PWRITE | FMODE_LSEEK;
load = (void *)req->msg.data;
- load->fd = fd;
+ load->fd = anon_file->fd;
object->ondemand->ondemand_id = object_id;
+ spin_unlock(&object->ondemand->lock);
cachefiles_get_unbind_pincount(cache);
trace_cachefiles_ondemand_open(object, &req->msg, load);
return 0;
+err_put_file:
+ fput(anon_file->file);
+ anon_file->file = NULL;
err_put_fd:
- put_unused_fd(fd);
+ put_unused_fd(anon_file->fd);
+ anon_file->fd = ret;
err_free_id:
xa_erase(&cache->ondemand_ids, object_id);
err:
+ spin_lock(&object->ondemand->lock);
+ /* Avoid marking an opened object as closed. */
+ if (object->ondemand->ondemand_id <= 0)
+ cachefiles_ondemand_set_object_close(object);
+ spin_unlock(&object->ondemand->lock);
cachefiles_put_object(object, cachefiles_obj_put_ondemand_fd);
return ret;
}
@@ -294,14 +379,28 @@ static struct cachefiles_req *cachefiles_ondemand_select_req(struct xa_state *xa
return NULL;
}
+static inline bool cachefiles_ondemand_finish_req(struct cachefiles_req *req,
+ struct xa_state *xas, int err)
+{
+ if (unlikely(!xas || !req))
+ return false;
+
+ if (xa_cmpxchg(xas->xa, xas->xa_index, req, NULL, 0) != req)
+ return false;
+
+ req->error = err;
+ complete(&req->done);
+ return true;
+}
+
ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache,
char __user *_buffer, size_t buflen)
{
struct cachefiles_req *req;
struct cachefiles_msg *msg;
- unsigned long id = 0;
size_t n;
int ret = 0;
+ struct ondemand_anon_file anon_file;
XA_STATE(xas, &cache->reqs, cache->req_id_next);
xa_lock(&cache->reqs);
@@ -330,42 +429,37 @@ ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache,
xas_clear_mark(&xas, CACHEFILES_REQ_NEW);
cache->req_id_next = xas.xa_index + 1;
+ refcount_inc(&req->ref);
+ cachefiles_grab_object(req->object, cachefiles_obj_get_read_req);
xa_unlock(&cache->reqs);
- id = xas.xa_index;
-
if (msg->opcode == CACHEFILES_OP_OPEN) {
- ret = cachefiles_ondemand_get_fd(req);
- if (ret) {
- cachefiles_ondemand_set_object_close(req->object);
- goto error;
- }
+ ret = cachefiles_ondemand_get_fd(req, &anon_file);
+ if (ret)
+ goto out;
}
- msg->msg_id = id;
+ msg->msg_id = xas.xa_index;
msg->object_id = req->object->ondemand->ondemand_id;
- if (copy_to_user(_buffer, msg, n) != 0) {
+ if (copy_to_user(_buffer, msg, n) != 0)
ret = -EFAULT;
- goto err_put_fd;
- }
- /* CLOSE request has no reply */
- if (msg->opcode == CACHEFILES_OP_CLOSE) {
- xa_erase(&cache->reqs, id);
- complete(&req->done);
+ if (msg->opcode == CACHEFILES_OP_OPEN) {
+ if (ret < 0) {
+ fput(anon_file.file);
+ put_unused_fd(anon_file.fd);
+ goto out;
+ }
+ fd_install(anon_file.fd, anon_file.file);
}
-
- return n;
-
-err_put_fd:
- if (msg->opcode == CACHEFILES_OP_OPEN)
- close_fd(((struct cachefiles_open *)msg->data)->fd);
-error:
- xa_erase(&cache->reqs, id);
- req->error = ret;
- complete(&req->done);
- return ret;
+out:
+ cachefiles_put_object(req->object, cachefiles_obj_put_read_req);
+ /* Remove error request and CLOSE request has no reply */
+ if (ret || msg->opcode == CACHEFILES_OP_CLOSE)
+ cachefiles_ondemand_finish_req(req, &xas, ret);
+ cachefiles_req_put(req);
+ return ret ? ret : n;
}
typedef int (*init_req_fn)(struct cachefiles_req *req, void *private);
@@ -395,6 +489,7 @@ static int cachefiles_ondemand_send_req(struct cachefiles_object *object,
goto out;
}
+ refcount_set(&req->ref, 1);
req->object = object;
init_completion(&req->done);
req->msg.opcode = opcode;
@@ -422,7 +517,8 @@ static int cachefiles_ondemand_send_req(struct cachefiles_object *object,
*/
xas_lock(&xas);
- if (test_bit(CACHEFILES_DEAD, &cache->flags)) {
+ if (test_bit(CACHEFILES_DEAD, &cache->flags) ||
+ cachefiles_ondemand_object_is_dropping(object)) {
xas_unlock(&xas);
ret = -EIO;
goto out;
@@ -432,20 +528,32 @@ static int cachefiles_ondemand_send_req(struct cachefiles_object *object,
smp_mb();
if (opcode == CACHEFILES_OP_CLOSE &&
- !cachefiles_ondemand_object_is_open(object)) {
+ !cachefiles_ondemand_object_is_open(object)) {
WARN_ON_ONCE(object->ondemand->ondemand_id == 0);
xas_unlock(&xas);
ret = -EIO;
goto out;
}
- xas.xa_index = 0;
+ /*
+ * Cyclically find a free xas to avoid msg_id reuse that would
+ * cause the daemon to successfully copen a stale msg_id.
+ */
+ xas.xa_index = cache->msg_id_next;
xas_find_marked(&xas, UINT_MAX, XA_FREE_MARK);
+ if (xas.xa_node == XAS_RESTART) {
+ xas.xa_index = 0;
+ xas_find_marked(&xas, cache->msg_id_next - 1, XA_FREE_MARK);
+ }
if (xas.xa_node == XAS_RESTART)
xas_set_err(&xas, -EBUSY);
+
xas_store(&xas, req);
- xas_clear_mark(&xas, XA_FREE_MARK);
- xas_set_mark(&xas, CACHEFILES_REQ_NEW);
+ if (xas_valid(&xas)) {
+ cache->msg_id_next = xas.xa_index + 1;
+ xas_clear_mark(&xas, XA_FREE_MARK);
+ xas_set_mark(&xas, CACHEFILES_REQ_NEW);
+ }
xas_unlock(&xas);
} while (xas_nomem(&xas, GFP_KERNEL));
@@ -454,16 +562,27 @@ static int cachefiles_ondemand_send_req(struct cachefiles_object *object,
goto out;
wake_up_all(&cache->daemon_pollwq);
- wait_for_completion(&req->done);
- ret = req->error;
- kfree(req);
+wait:
+ ret = wait_for_completion_killable(&req->done);
+ if (!ret) {
+ ret = req->error;
+ } else {
+ ret = -EINTR;
+ if (!cachefiles_ondemand_finish_req(req, &xas, ret)) {
+ /* Someone will complete it soon. */
+ cpu_relax();
+ goto wait;
+ }
+ }
+ cachefiles_req_put(req);
return ret;
out:
/* Reset the object to close state in error handling path.
* If error occurs after creating the anonymous fd,
* cachefiles_ondemand_fd_release() will set object to close.
*/
- if (opcode == CACHEFILES_OP_OPEN)
+ if (opcode == CACHEFILES_OP_OPEN &&
+ !cachefiles_ondemand_object_is_dropping(object))
cachefiles_ondemand_set_object_close(object);
kfree(req);
return ret;
@@ -562,8 +681,34 @@ int cachefiles_ondemand_init_object(struct cachefiles_object *object)
void cachefiles_ondemand_clean_object(struct cachefiles_object *object)
{
+ unsigned long index;
+ struct cachefiles_req *req;
+ struct cachefiles_cache *cache;
+
+ if (!object->ondemand)
+ return;
+
cachefiles_ondemand_send_req(object, CACHEFILES_OP_CLOSE, 0,
cachefiles_ondemand_init_close_req, NULL);
+
+ if (!object->ondemand->ondemand_id)
+ return;
+
+ /* Cancel all requests for the object that is being dropped. */
+ cache = object->volume->cache;
+ xa_lock(&cache->reqs);
+ cachefiles_ondemand_set_object_dropping(object);
+ xa_for_each(&cache->reqs, index, req) {
+ if (req->object == object) {
+ req->error = -EIO;
+ complete(&req->done);
+ __xa_erase(&cache->reqs, index);
+ }
+ }
+ xa_unlock(&cache->reqs);
+
+ /* Wait for ondemand_object_worker() to finish to avoid UAF. */
+ cancel_work_sync(&object->ondemand->ondemand_work);
}
int cachefiles_ondemand_init_obj_info(struct cachefiles_object *object,
@@ -578,6 +723,7 @@ int cachefiles_ondemand_init_obj_info(struct cachefiles_object *object,
return -ENOMEM;
object->ondemand->object = object;
+ spin_lock_init(&object->ondemand->lock);
INIT_WORK(&object->ondemand->ondemand_work, ondemand_object_worker);
return 0;
}
diff --git a/fs/cachefiles/volume.c b/fs/cachefiles/volume.c
index 89df0ba8ba5e..781aac4ef274 100644
--- a/fs/cachefiles/volume.c
+++ b/fs/cachefiles/volume.c
@@ -133,7 +133,6 @@ void cachefiles_free_volume(struct fscache_volume *vcookie)
void cachefiles_withdraw_volume(struct cachefiles_volume *volume)
{
- fscache_withdraw_volume(volume->vcookie);
cachefiles_set_volume_xattr(volume);
__cachefiles_free_volume(volume);
}
diff --git a/fs/cachefiles/xattr.c b/fs/cachefiles/xattr.c
index bcb6173943ee..4dd8a993c60a 100644
--- a/fs/cachefiles/xattr.c
+++ b/fs/cachefiles/xattr.c
@@ -110,9 +110,11 @@ int cachefiles_check_auxdata(struct cachefiles_object *object, struct file *file
if (xlen == 0)
xlen = vfs_getxattr(&nop_mnt_idmap, dentry, cachefiles_xattr_cache, buf, tlen);
if (xlen != tlen) {
- if (xlen < 0)
+ if (xlen < 0) {
+ ret = xlen;
trace_cachefiles_vfs_error(object, file_inode(file), xlen,
cachefiles_trace_getxattr_error);
+ }
if (xlen == -EIO)
cachefiles_io_error_obj(
object,
@@ -252,6 +254,7 @@ int cachefiles_check_volume_xattr(struct cachefiles_volume *volume)
xlen = vfs_getxattr(&nop_mnt_idmap, dentry, cachefiles_xattr_cache, buf, len);
if (xlen != len) {
if (xlen < 0) {
+ ret = xlen;
trace_cachefiles_vfs_error(NULL, d_inode(dentry), xlen,
cachefiles_trace_getxattr_error);
if (xlen == -EIO)
diff --git a/fs/coda/symlink.c b/fs/coda/symlink.c
index ccdbec388091..40f84d014524 100644
--- a/fs/coda/symlink.c
+++ b/fs/coda/symlink.c
@@ -31,15 +31,7 @@ static int coda_symlink_filler(struct file *file, struct folio *folio)
cii = ITOC(inode);
error = venus_readlink(inode->i_sb, &cii->c_fid, p, &len);
- if (error)
- goto fail;
- folio_mark_uptodate(folio);
- folio_unlock(folio);
- return 0;
-
-fail:
- folio_set_error(folio);
- folio_unlock(folio);
+ folio_end_read(folio, error == 0);
return error;
}
diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
index 18677cd4e62f..43d6bde1adcc 100644
--- a/fs/configfs/dir.c
+++ b/fs/configfs/dir.c
@@ -580,6 +580,7 @@ static void detach_attrs(struct config_item * item)
static int populate_attrs(struct config_item *item)
{
const struct config_item_type *t = item->ci_type;
+ struct configfs_group_operations *ops;
struct configfs_attribute *attr;
struct configfs_bin_attribute *bin_attr;
int error = 0;
@@ -587,14 +588,23 @@ static int populate_attrs(struct config_item *item)
if (!t)
return -EINVAL;
+
+ ops = t->ct_group_ops;
+
if (t->ct_attrs) {
for (i = 0; (attr = t->ct_attrs[i]) != NULL; i++) {
+ if (ops && ops->is_visible && !ops->is_visible(item, attr, i))
+ continue;
+
if ((error = configfs_create_file(item, attr)))
break;
}
}
if (t->ct_bin_attrs) {
for (i = 0; (bin_attr = t->ct_bin_attrs[i]) != NULL; i++) {
+ if (ops && ops->is_bin_visible && !ops->is_bin_visible(item, bin_attr, i))
+ continue;
+
error = configfs_create_bin_file(item, bin_attr);
if (error)
break;
diff --git a/fs/cramfs/inode.c b/fs/cramfs/inode.c
index 460690ca0174..b84d1747a020 100644
--- a/fs/cramfs/inode.c
+++ b/fs/cramfs/inode.c
@@ -811,19 +811,19 @@ out:
static int cramfs_read_folio(struct file *file, struct folio *folio)
{
- struct page *page = &folio->page;
- struct inode *inode = page->mapping->host;
+ struct inode *inode = folio->mapping->host;
u32 maxblock;
int bytes_filled;
void *pgdata;
+ bool success = false;
maxblock = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
bytes_filled = 0;
- pgdata = kmap_local_page(page);
+ pgdata = kmap_local_folio(folio, 0);
- if (page->index < maxblock) {
+ if (folio->index < maxblock) {
struct super_block *sb = inode->i_sb;
- u32 blkptr_offset = OFFSET(inode) + page->index * 4;
+ u32 blkptr_offset = OFFSET(inode) + folio->index * 4;
u32 block_ptr, block_start, block_len;
bool uncompressed, direct;
@@ -844,7 +844,7 @@ static int cramfs_read_folio(struct file *file, struct folio *folio)
if (uncompressed) {
block_len = PAGE_SIZE;
/* if last block: cap to file length */
- if (page->index == maxblock - 1)
+ if (folio->index == maxblock - 1)
block_len =
offset_in_page(inode->i_size);
} else {
@@ -861,7 +861,7 @@ static int cramfs_read_folio(struct file *file, struct folio *folio)
* from the previous block's pointer.
*/
block_start = OFFSET(inode) + maxblock * 4;
- if (page->index)
+ if (folio->index)
block_start = *(u32 *)
cramfs_read(sb, blkptr_offset - 4, 4);
/* Beware... previous ptr might be a direct ptr */
@@ -906,17 +906,12 @@ static int cramfs_read_folio(struct file *file, struct folio *folio)
}
memset(pgdata + bytes_filled, 0, PAGE_SIZE - bytes_filled);
- flush_dcache_page(page);
- kunmap_local(pgdata);
- SetPageUptodate(page);
- unlock_page(page);
- return 0;
+ flush_dcache_folio(folio);
+ success = true;
err:
kunmap_local(pgdata);
- ClearPageUptodate(page);
- SetPageError(page);
- unlock_page(page);
+ folio_end_read(folio, success);
return 0;
}
@@ -1003,4 +998,5 @@ static void __exit exit_cramfs_fs(void)
module_init(init_cramfs_fs)
module_exit(exit_cramfs_fs)
+MODULE_DESCRIPTION("Compressed ROM file system support");
MODULE_LICENSE("GPL");
diff --git a/fs/dcache.c b/fs/dcache.c
index 1ee6404b430b..8bdc278a0205 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -35,6 +35,8 @@
#include "internal.h"
#include "mount.h"
+#include <asm/runtime-const.h>
+
/*
* Usage:
* dcache->d_inode->i_lock protects:
@@ -100,9 +102,10 @@ static unsigned int d_hash_shift __ro_after_init;
static struct hlist_bl_head *dentry_hashtable __ro_after_init;
-static inline struct hlist_bl_head *d_hash(unsigned int hash)
+static inline struct hlist_bl_head *d_hash(unsigned long hashlen)
{
- return dentry_hashtable + (hash >> d_hash_shift);
+ return runtime_const_ptr(dentry_hashtable) +
+ runtime_const_shift_right_32(hashlen, d_hash_shift);
}
#define IN_LOOKUP_SHIFT 10
@@ -355,7 +358,11 @@ static inline void __d_clear_type_and_inode(struct dentry *dentry)
flags &= ~DCACHE_ENTRY_TYPE;
WRITE_ONCE(dentry->d_flags, flags);
dentry->d_inode = NULL;
- if (flags & DCACHE_LRU_LIST)
+ /*
+ * The negative counter only tracks dentries on the LRU. Don't inc if
+ * d_lru is on another list.
+ */
+ if ((flags & (DCACHE_LRU_LIST|DCACHE_SHRINK_LIST)) == DCACHE_LRU_LIST)
this_cpu_inc(nr_dentry_negative);
}
@@ -1548,7 +1555,7 @@ void shrink_dcache_for_umount(struct super_block *sb)
{
struct dentry *dentry;
- WARN(down_read_trylock(&sb->s_umount), "s_umount should've been locked");
+ rwsem_assert_held_write(&sb->s_umount);
dentry = sb->s_root;
sb->s_root = NULL;
@@ -1844,9 +1851,11 @@ static void __d_instantiate(struct dentry *dentry, struct inode *inode)
spin_lock(&dentry->d_lock);
/*
- * Decrement negative dentry count if it was in the LRU list.
+ * The negative counter only tracks dentries on the LRU. Don't dec if
+ * d_lru is on another list.
*/
- if (dentry->d_flags & DCACHE_LRU_LIST)
+ if ((dentry->d_flags &
+ (DCACHE_LRU_LIST|DCACHE_SHRINK_LIST)) == DCACHE_LRU_LIST)
this_cpu_dec(nr_dentry_negative);
hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
raw_write_seqcount_begin(&dentry->d_seq);
@@ -2104,7 +2113,7 @@ static noinline struct dentry *__d_lookup_rcu_op_compare(
unsigned *seqp)
{
u64 hashlen = name->hash_len;
- struct hlist_bl_head *b = d_hash(hashlen_hash(hashlen));
+ struct hlist_bl_head *b = d_hash(hashlen);
struct hlist_bl_node *node;
struct dentry *dentry;
@@ -2171,7 +2180,7 @@ struct dentry *__d_lookup_rcu(const struct dentry *parent,
{
u64 hashlen = name->hash_len;
const unsigned char *str = name->name;
- struct hlist_bl_head *b = d_hash(hashlen_hash(hashlen));
+ struct hlist_bl_head *b = d_hash(hashlen);
struct hlist_bl_node *node;
struct dentry *dentry;
@@ -2360,17 +2369,19 @@ EXPORT_SYMBOL(d_hash_and_lookup);
* - unhash this dentry and free it.
*
* Usually, we want to just turn this into
- * a negative dentry, but certain workloads can
- * generate a large number of negative dentries.
- * Therefore, it would be better to simply
- * unhash it.
+ * a negative dentry, but if anybody else is
+ * currently using the dentry or the inode
+ * we can't do that and we fall back on removing
+ * it from the hash queues and waiting for
+ * it to be deleted later when it has no users
*/
/**
* d_delete - delete a dentry
* @dentry: The dentry to delete
*
- * Remove the dentry from the hash queues so it can be deleted later.
+ * Turn the dentry into a negative dentry if possible, otherwise
+ * remove it from the hash queues so it can be deleted later
*/
void d_delete(struct dentry * dentry)
@@ -2379,8 +2390,6 @@ void d_delete(struct dentry * dentry)
spin_lock(&inode->i_lock);
spin_lock(&dentry->d_lock);
- __d_drop(dentry);
-
/*
* Are we the only user?
*/
@@ -2388,6 +2397,7 @@ void d_delete(struct dentry * dentry)
dentry->d_flags &= ~DCACHE_CANT_MOUNT;
dentry_unlink_inode(dentry);
} else {
+ __d_drop(dentry);
spin_unlock(&dentry->d_lock);
spin_unlock(&inode->i_lock);
}
@@ -3028,28 +3038,25 @@ EXPORT_SYMBOL(d_splice_alias);
bool is_subdir(struct dentry *new_dentry, struct dentry *old_dentry)
{
- bool result;
+ bool subdir;
unsigned seq;
if (new_dentry == old_dentry)
return true;
- do {
- /* for restarting inner loop in case of seq retry */
- seq = read_seqbegin(&rename_lock);
- /*
- * Need rcu_readlock to protect against the d_parent trashing
- * due to d_move
- */
- rcu_read_lock();
- if (d_ancestor(old_dentry, new_dentry))
- result = true;
- else
- result = false;
- rcu_read_unlock();
- } while (read_seqretry(&rename_lock, seq));
-
- return result;
+ /* Access d_parent under rcu as d_move() may change it. */
+ rcu_read_lock();
+ seq = read_seqbegin(&rename_lock);
+ subdir = d_ancestor(old_dentry, new_dentry);
+ /* Try lockless once... */
+ if (read_seqretry(&rename_lock, seq)) {
+ /* ...else acquire lock for progress even on deep chains. */
+ read_seqlock_excl(&rename_lock);
+ subdir = d_ancestor(old_dentry, new_dentry);
+ read_sequnlock_excl(&rename_lock);
+ }
+ rcu_read_unlock();
+ return subdir;
}
EXPORT_SYMBOL(is_subdir);
@@ -3099,6 +3106,34 @@ void d_tmpfile(struct file *file, struct inode *inode)
}
EXPORT_SYMBOL(d_tmpfile);
+/*
+ * Obtain inode number of the parent dentry.
+ */
+ino_t d_parent_ino(struct dentry *dentry)
+{
+ struct dentry *parent;
+ struct inode *iparent;
+ unsigned seq;
+ ino_t ret;
+
+ scoped_guard(rcu) {
+ seq = raw_seqcount_begin(&dentry->d_seq);
+ parent = READ_ONCE(dentry->d_parent);
+ iparent = d_inode_rcu(parent);
+ if (likely(iparent)) {
+ ret = iparent->i_ino;
+ if (!read_seqcount_retry(&dentry->d_seq, seq))
+ return ret;
+ }
+ }
+
+ spin_lock(&dentry->d_lock);
+ ret = dentry->d_parent->d_inode->i_ino;
+ spin_unlock(&dentry->d_lock);
+ return ret;
+}
+EXPORT_SYMBOL(d_parent_ino);
+
static __initdata unsigned long dhash_entries;
static int __init set_dhash_entries(char *str)
{
@@ -3128,6 +3163,9 @@ static void __init dcache_init_early(void)
0,
0);
d_hash_shift = 32 - d_hash_shift;
+
+ runtime_const_init(shift, d_hash_shift);
+ runtime_const_init(ptr, dentry_hashtable);
}
static void __init dcache_init(void)
@@ -3156,6 +3194,9 @@ static void __init dcache_init(void)
0,
0);
d_hash_shift = 32 - d_hash_shift;
+
+ runtime_const_init(shift, d_hash_shift);
+ runtime_const_init(ptr, dentry_hashtable);
}
/* SLAB cache for __getname() consumers */
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index dc51df0b118d..91521576f500 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -92,9 +92,9 @@ enum {
};
static const struct fs_parameter_spec debugfs_param_specs[] = {
- fsparam_u32 ("gid", Opt_gid),
+ fsparam_gid ("gid", Opt_gid),
fsparam_u32oct ("mode", Opt_mode),
- fsparam_u32 ("uid", Opt_uid),
+ fsparam_uid ("uid", Opt_uid),
{}
};
@@ -102,26 +102,26 @@ static int debugfs_parse_param(struct fs_context *fc, struct fs_parameter *param
{
struct debugfs_fs_info *opts = fc->s_fs_info;
struct fs_parse_result result;
- kuid_t uid;
- kgid_t gid;
int opt;
opt = fs_parse(fc, debugfs_param_specs, param, &result);
- if (opt < 0)
+ if (opt < 0) {
+ /*
+ * We might like to report bad mount options here; but
+ * traditionally debugfs has ignored all mount options
+ */
+ if (opt == -ENOPARAM)
+ return 0;
+
return opt;
+ }
switch (opt) {
case Opt_uid:
- uid = make_kuid(current_user_ns(), result.uint_32);
- if (!uid_valid(uid))
- return invalf(fc, "Unknown uid");
- opts->uid = uid;
+ opts->uid = result.uid;
break;
case Opt_gid:
- gid = make_kgid(current_user_ns(), result.uint_32);
- if (!gid_valid(gid))
- return invalf(fc, "Unknown gid");
- opts->gid = gid;
+ opts->gid = result.gid;
break;
case Opt_mode:
opts->mode = result.uint_32 & S_IALLUGO;
diff --git a/fs/dlm/ast.c b/fs/dlm/ast.c
index 59711486d801..742b30b61c19 100644
--- a/fs/dlm/ast.c
+++ b/fs/dlm/ast.c
@@ -18,35 +18,52 @@
#include "user.h"
#include "ast.h"
-static void dlm_callback_work(struct work_struct *work)
+static void dlm_run_callback(uint32_t ls_id, uint32_t lkb_id, int8_t mode,
+ uint32_t flags, uint8_t sb_flags, int sb_status,
+ struct dlm_lksb *lksb,
+ void (*astfn)(void *astparam),
+ void (*bastfn)(void *astparam, int mode),
+ void *astparam, const char *res_name,
+ size_t res_length)
{
- struct dlm_callback *cb = container_of(work, struct dlm_callback, work);
-
- if (cb->flags & DLM_CB_BAST) {
- trace_dlm_bast(cb->ls_id, cb->lkb_id, cb->mode, cb->res_name,
- cb->res_length);
- cb->bastfn(cb->astparam, cb->mode);
- } else if (cb->flags & DLM_CB_CAST) {
- trace_dlm_ast(cb->ls_id, cb->lkb_id, cb->sb_status,
- cb->sb_flags, cb->res_name, cb->res_length);
- cb->lkb_lksb->sb_status = cb->sb_status;
- cb->lkb_lksb->sb_flags = cb->sb_flags;
- cb->astfn(cb->astparam);
+ if (flags & DLM_CB_BAST) {
+ trace_dlm_bast(ls_id, lkb_id, mode, res_name, res_length);
+ bastfn(astparam, mode);
+ } else if (flags & DLM_CB_CAST) {
+ trace_dlm_ast(ls_id, lkb_id, sb_status, sb_flags, res_name,
+ res_length);
+ lksb->sb_status = sb_status;
+ lksb->sb_flags = sb_flags;
+ astfn(astparam);
}
+}
+static void dlm_do_callback(struct dlm_callback *cb)
+{
+ dlm_run_callback(cb->ls_id, cb->lkb_id, cb->mode, cb->flags,
+ cb->sb_flags, cb->sb_status, cb->lkb_lksb,
+ cb->astfn, cb->bastfn, cb->astparam,
+ cb->res_name, cb->res_length);
dlm_free_cb(cb);
}
-int dlm_queue_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode,
- int status, uint32_t sbflags,
- struct dlm_callback **cb)
+static void dlm_callback_work(struct work_struct *work)
+{
+ struct dlm_callback *cb = container_of(work, struct dlm_callback, work);
+
+ dlm_do_callback(cb);
+}
+
+bool dlm_may_skip_callback(struct dlm_lkb *lkb, uint32_t flags, int mode,
+ int status, uint32_t sbflags, int *copy_lvb)
{
struct dlm_rsb *rsb = lkb->lkb_resource;
- int rv = DLM_ENQUEUE_CALLBACK_SUCCESS;
struct dlm_ls *ls = rsb->res_ls;
- int copy_lvb = 0;
int prev_mode;
+ if (copy_lvb)
+ *copy_lvb = 0;
+
if (flags & DLM_CB_BAST) {
/* if cb is a bast, it should be skipped if the blocking mode is
* compatible with the last granted mode
@@ -56,7 +73,7 @@ int dlm_queue_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode,
log_debug(ls, "skip %x bast mode %d for cast mode %d",
lkb->lkb_id, mode,
lkb->lkb_last_cast_cb_mode);
- goto out;
+ return true;
}
}
@@ -74,7 +91,7 @@ int dlm_queue_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode,
(prev_mode > mode && prev_mode > DLM_LOCK_PR)) {
log_debug(ls, "skip %x add bast mode %d for bast mode %d",
lkb->lkb_id, mode, prev_mode);
- goto out;
+ return true;
}
}
@@ -85,8 +102,10 @@ int dlm_queue_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode,
prev_mode = lkb->lkb_last_cast_cb_mode;
if (!status && lkb->lkb_lksb->sb_lvbptr &&
- dlm_lvb_operations[prev_mode + 1][mode + 1])
- copy_lvb = 1;
+ dlm_lvb_operations[prev_mode + 1][mode + 1]) {
+ if (copy_lvb)
+ *copy_lvb = 1;
+ }
}
lkb->lkb_last_cast_cb_mode = mode;
@@ -96,11 +115,19 @@ int dlm_queue_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode,
lkb->lkb_last_cb_mode = mode;
lkb->lkb_last_cb_flags = flags;
+ return false;
+}
+
+int dlm_get_cb(struct dlm_lkb *lkb, uint32_t flags, int mode,
+ int status, uint32_t sbflags,
+ struct dlm_callback **cb)
+{
+ struct dlm_rsb *rsb = lkb->lkb_resource;
+ struct dlm_ls *ls = rsb->res_ls;
+
*cb = dlm_allocate_cb();
- if (!*cb) {
- rv = DLM_ENQUEUE_CALLBACK_FAILURE;
- goto out;
- }
+ if (WARN_ON_ONCE(!*cb))
+ return -ENOMEM;
/* for tracing */
(*cb)->lkb_id = lkb->lkb_id;
@@ -112,19 +139,34 @@ int dlm_queue_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode,
(*cb)->mode = mode;
(*cb)->sb_status = status;
(*cb)->sb_flags = (sbflags & 0x000000FF);
- (*cb)->copy_lvb = copy_lvb;
(*cb)->lkb_lksb = lkb->lkb_lksb;
- rv = DLM_ENQUEUE_CALLBACK_NEED_SCHED;
+ return 0;
+}
+
+static int dlm_get_queue_cb(struct dlm_lkb *lkb, uint32_t flags, int mode,
+ int status, uint32_t sbflags,
+ struct dlm_callback **cb)
+{
+ int rv;
+
+ rv = dlm_get_cb(lkb, flags, mode, status, sbflags, cb);
+ if (rv)
+ return rv;
-out:
- return rv;
+ (*cb)->astfn = lkb->lkb_astfn;
+ (*cb)->bastfn = lkb->lkb_bastfn;
+ (*cb)->astparam = lkb->lkb_astparam;
+ INIT_WORK(&(*cb)->work, dlm_callback_work);
+
+ return 0;
}
void dlm_add_cb(struct dlm_lkb *lkb, uint32_t flags, int mode, int status,
- uint32_t sbflags)
+ uint32_t sbflags)
{
- struct dlm_ls *ls = lkb->lkb_resource->res_ls;
+ struct dlm_rsb *rsb = lkb->lkb_resource;
+ struct dlm_ls *ls = rsb->res_ls;
struct dlm_callback *cb;
int rv;
@@ -133,34 +175,36 @@ void dlm_add_cb(struct dlm_lkb *lkb, uint32_t flags, int mode, int status,
return;
}
- rv = dlm_queue_lkb_callback(lkb, flags, mode, status, sbflags,
- &cb);
- switch (rv) {
- case DLM_ENQUEUE_CALLBACK_NEED_SCHED:
- cb->astfn = lkb->lkb_astfn;
- cb->bastfn = lkb->lkb_bastfn;
- cb->astparam = lkb->lkb_astparam;
- INIT_WORK(&cb->work, dlm_callback_work);
-
- spin_lock_bh(&ls->ls_cb_lock);
- if (test_bit(LSFL_CB_DELAY, &ls->ls_flags))
+ if (dlm_may_skip_callback(lkb, flags, mode, status, sbflags, NULL))
+ return;
+
+ spin_lock_bh(&ls->ls_cb_lock);
+ if (test_bit(LSFL_CB_DELAY, &ls->ls_flags)) {
+ rv = dlm_get_queue_cb(lkb, flags, mode, status, sbflags, &cb);
+ if (!rv)
list_add(&cb->list, &ls->ls_cb_delay);
- else
- queue_work(ls->ls_callback_wq, &cb->work);
- spin_unlock_bh(&ls->ls_cb_lock);
- break;
- case DLM_ENQUEUE_CALLBACK_SUCCESS:
- break;
- case DLM_ENQUEUE_CALLBACK_FAILURE:
- fallthrough;
- default:
- WARN_ON_ONCE(1);
- break;
+ } else {
+ if (test_bit(LSFL_SOFTIRQ, &ls->ls_flags)) {
+ dlm_run_callback(ls->ls_global_id, lkb->lkb_id, mode, flags,
+ sbflags, status, lkb->lkb_lksb,
+ lkb->lkb_astfn, lkb->lkb_bastfn,
+ lkb->lkb_astparam, rsb->res_name,
+ rsb->res_length);
+ } else {
+ rv = dlm_get_queue_cb(lkb, flags, mode, status, sbflags, &cb);
+ if (!rv)
+ queue_work(ls->ls_callback_wq, &cb->work);
+ }
}
+ spin_unlock_bh(&ls->ls_cb_lock);
}
int dlm_callback_start(struct dlm_ls *ls)
{
+ if (!test_bit(LSFL_FS, &ls->ls_flags) ||
+ test_bit(LSFL_SOFTIRQ, &ls->ls_flags))
+ return 0;
+
ls->ls_callback_wq = alloc_ordered_workqueue("dlm_callback",
WQ_HIGHPRI | WQ_MEM_RECLAIM);
if (!ls->ls_callback_wq) {
@@ -178,13 +222,15 @@ void dlm_callback_stop(struct dlm_ls *ls)
void dlm_callback_suspend(struct dlm_ls *ls)
{
- if (ls->ls_callback_wq) {
- spin_lock_bh(&ls->ls_cb_lock);
- set_bit(LSFL_CB_DELAY, &ls->ls_flags);
- spin_unlock_bh(&ls->ls_cb_lock);
+ if (!test_bit(LSFL_FS, &ls->ls_flags))
+ return;
+
+ spin_lock_bh(&ls->ls_cb_lock);
+ set_bit(LSFL_CB_DELAY, &ls->ls_flags);
+ spin_unlock_bh(&ls->ls_cb_lock);
+ if (ls->ls_callback_wq)
flush_workqueue(ls->ls_callback_wq);
- }
}
#define MAX_CB_QUEUE 25
@@ -195,14 +241,18 @@ void dlm_callback_resume(struct dlm_ls *ls)
int count = 0, sum = 0;
bool empty;
- if (!ls->ls_callback_wq)
+ if (!test_bit(LSFL_FS, &ls->ls_flags))
return;
more:
spin_lock_bh(&ls->ls_cb_lock);
list_for_each_entry_safe(cb, safe, &ls->ls_cb_delay, list) {
list_del(&cb->list);
- queue_work(ls->ls_callback_wq, &cb->work);
+ if (test_bit(LSFL_SOFTIRQ, &ls->ls_flags))
+ dlm_do_callback(cb);
+ else
+ queue_work(ls->ls_callback_wq, &cb->work);
+
count++;
if (count == MAX_CB_QUEUE)
break;
diff --git a/fs/dlm/ast.h b/fs/dlm/ast.h
index 9093ff043bee..e2b86845d331 100644
--- a/fs/dlm/ast.h
+++ b/fs/dlm/ast.h
@@ -11,12 +11,11 @@
#ifndef __ASTD_DOT_H__
#define __ASTD_DOT_H__
-#define DLM_ENQUEUE_CALLBACK_NEED_SCHED 1
-#define DLM_ENQUEUE_CALLBACK_SUCCESS 0
-#define DLM_ENQUEUE_CALLBACK_FAILURE -1
-int dlm_queue_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode,
- int status, uint32_t sbflags,
- struct dlm_callback **cb);
+bool dlm_may_skip_callback(struct dlm_lkb *lkb, uint32_t flags, int mode,
+ int status, uint32_t sbflags, int *copy_lvb);
+int dlm_get_cb(struct dlm_lkb *lkb, uint32_t flags, int mode,
+ int status, uint32_t sbflags,
+ struct dlm_callback **cb);
void dlm_add_cb(struct dlm_lkb *lkb, uint32_t flags, int mode, int status,
uint32_t sbflags);
diff --git a/fs/dlm/config.c b/fs/dlm/config.c
index 517fa975dc5a..99952234799e 100644
--- a/fs/dlm/config.c
+++ b/fs/dlm/config.c
@@ -672,7 +672,7 @@ static ssize_t comm_addr_store(struct config_item *item, const char *buf,
memcpy(addr, buf, len);
- rv = dlm_midcomms_addr(cm->nodeid, addr, len);
+ rv = dlm_midcomms_addr(cm->nodeid, addr);
if (rv) {
kfree(addr);
return rv;
diff --git a/fs/dlm/debug_fs.c b/fs/dlm/debug_fs.c
index 6ab3ed4074c6..7112958c2e5b 100644
--- a/fs/dlm/debug_fs.c
+++ b/fs/dlm/debug_fs.c
@@ -380,7 +380,7 @@ static const struct seq_operations format4_seq_ops;
static int table_seq_show(struct seq_file *seq, void *iter_ptr)
{
- struct dlm_rsb *rsb = list_entry(iter_ptr, struct dlm_rsb, res_rsbs_list);
+ struct dlm_rsb *rsb = list_entry(iter_ptr, struct dlm_rsb, res_slow_list);
if (seq->op == &format1_seq_ops)
print_format1(rsb, seq);
@@ -409,9 +409,9 @@ static void *table_seq_start(struct seq_file *seq, loff_t *pos)
}
if (seq->op == &format4_seq_ops)
- list = &ls->ls_toss;
+ list = &ls->ls_slow_inactive;
else
- list = &ls->ls_keep;
+ list = &ls->ls_slow_active;
read_lock_bh(&ls->ls_rsbtbl_lock);
return seq_list_start(list, *pos);
@@ -423,9 +423,9 @@ static void *table_seq_next(struct seq_file *seq, void *iter_ptr, loff_t *pos)
struct list_head *list;
if (seq->op == &format4_seq_ops)
- list = &ls->ls_toss;
+ list = &ls->ls_slow_inactive;
else
- list = &ls->ls_keep;
+ list = &ls->ls_slow_active;
return seq_list_next(iter_ptr, list, pos);
}
diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h
index 9085ba3b2f20..32d98e63d25e 100644
--- a/fs/dlm/dlm_internal.h
+++ b/fs/dlm/dlm_internal.h
@@ -36,7 +36,7 @@
#include <linux/miscdevice.h>
#include <linux/rhashtable.h>
#include <linux/mutex.h>
-#include <linux/idr.h>
+#include <linux/xarray.h>
#include <linux/ratelimit.h>
#include <linux/uaccess.h>
@@ -316,26 +316,24 @@ struct dlm_rsb {
int res_nodeid;
int res_master_nodeid;
int res_dir_nodeid;
- int res_id; /* for ls_recover_idr */
+ unsigned long res_id; /* for ls_recover_xa */
uint32_t res_lvbseq;
uint32_t res_hash;
unsigned long res_toss_time;
uint32_t res_first_lkid;
struct list_head res_lookup; /* lkbs waiting on first */
- union {
- struct list_head res_hashchain;
- struct rhash_head res_node; /* rsbtbl */
- };
+ struct rhash_head res_node; /* rsbtbl */
struct list_head res_grantqueue;
struct list_head res_convertqueue;
struct list_head res_waitqueue;
- struct list_head res_rsbs_list;
+ struct list_head res_slow_list; /* ls_slow_* */
+ struct list_head res_scan_list;
struct list_head res_root_list; /* used for recovery */
struct list_head res_masters_list; /* used for recovery */
struct list_head res_recover_list; /* used for recovery */
- struct list_head res_toss_q_list;
int res_recover_locks_count;
+ struct rcu_head rcu;
char *res_lvbptr;
char res_name[DLM_RESNAME_MAXLEN+1];
@@ -368,7 +366,8 @@ enum rsb_flags {
RSB_RECOVER_CONVERT,
RSB_RECOVER_GRANT,
RSB_RECOVER_LVB_INVAL,
- RSB_TOSS,
+ RSB_INACTIVE,
+ RSB_HASHED, /* set while rsb is on ls_rsbtbl */
};
static inline void rsb_set_flag(struct dlm_rsb *r, enum rsb_flags flag)
@@ -559,16 +558,8 @@ struct rcom_lock {
char rl_lvb[];
};
-/*
- * The max number of resources per rsbtbl bucket that shrink will attempt
- * to remove in each iteration.
- */
-
-#define DLM_REMOVE_NAMES_MAX 8
-
struct dlm_ls {
struct list_head ls_list; /* list of lockspaces */
- dlm_lockspace_t *ls_local_handle;
uint32_t ls_global_id; /* global unique lockspace ID */
uint32_t ls_generation;
uint32_t ls_exflags;
@@ -578,26 +569,21 @@ struct dlm_ls {
wait_queue_head_t ls_count_wait;
int ls_create_count; /* create/release refcount */
unsigned long ls_flags; /* LSFL_ */
- unsigned long ls_scan_time;
struct kobject ls_kobj;
- struct idr ls_lkbidr;
- rwlock_t ls_lkbidr_lock;
+ struct xarray ls_lkbxa;
+ rwlock_t ls_lkbxa_lock;
+ /* an rsb is on rsbtl for primary locking functions,
+ and on a slow list for recovery/dump iteration */
struct rhashtable ls_rsbtbl;
- rwlock_t ls_rsbtbl_lock;
+ rwlock_t ls_rsbtbl_lock; /* for ls_rsbtbl and ls_slow */
+ struct list_head ls_slow_inactive; /* to iterate rsbtbl */
+ struct list_head ls_slow_active; /* to iterate rsbtbl */
- struct list_head ls_toss;
- struct list_head ls_keep;
-
- struct timer_list ls_timer;
- /* this queue is ordered according the
- * absolute res_toss_time jiffies time
- * to mod_timer() with the first element
- * if necessary.
- */
- struct list_head ls_toss_q;
- spinlock_t ls_toss_q_lock;
+ struct timer_list ls_scan_timer; /* based on first scan_list rsb toss_time */
+ struct list_head ls_scan_list; /* rsbs ordered by res_toss_time */
+ spinlock_t ls_scan_lock;
spinlock_t ls_waiters_lock;
struct list_head ls_waiters; /* lkbs needing a reply */
@@ -605,10 +591,6 @@ struct dlm_ls {
spinlock_t ls_orphans_lock;
struct list_head ls_orphans;
- spinlock_t ls_new_rsb_spin;
- int ls_new_rsb_count;
- struct list_head ls_new_rsb; /* new rsb structs */
-
struct list_head ls_nodes; /* current nodes in ls */
struct list_head ls_nodes_gone; /* dead node list, recovery */
int ls_num_nodes; /* number of nodes in ls */
@@ -664,8 +646,8 @@ struct dlm_ls {
struct list_head ls_recover_list;
spinlock_t ls_recover_list_lock;
int ls_recover_list_count;
- struct idr ls_recover_idr;
- spinlock_t ls_recover_idr_lock;
+ struct xarray ls_recover_xa;
+ spinlock_t ls_recover_xa_lock;
wait_queue_head_t ls_wait_general;
wait_queue_head_t ls_recover_lock_wait;
spinlock_t ls_clear_proc_locks;
@@ -716,6 +698,8 @@ struct dlm_ls {
#define LSFL_CB_DELAY 9
#define LSFL_NODIR 10
#define LSFL_RECV_MSG_BLOCKED 11
+#define LSFL_FS 12
+#define LSFL_SOFTIRQ 13
#define DLM_PROC_FLAGS_CLOSING 1
#define DLM_PROC_FLAGS_COMPAT 2
diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c
index f103b8c30592..8bee4f444afd 100644
--- a/fs/dlm/lock.c
+++ b/fs/dlm/lock.c
@@ -89,7 +89,7 @@ static void __receive_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
const struct dlm_message *ms, bool local);
static int receive_extralen(const struct dlm_message *ms);
static void do_purge(struct dlm_ls *ls, int nodeid, int pid);
-static void toss_rsb(struct kref *kref);
+static void deactivate_rsb(struct kref *kref);
/*
* Lock compatibilty matrix - thanks Steve
@@ -330,8 +330,8 @@ static inline unsigned long rsb_toss_jiffies(void)
static inline void hold_rsb(struct dlm_rsb *r)
{
- /* rsbs in toss state never get referenced */
- WARN_ON(rsb_flag(r, RSB_TOSS));
+ /* inactive rsbs are not ref counted */
+ WARN_ON(rsb_flag(r, RSB_INACTIVE));
kref_get(&r->res_ref);
}
@@ -370,15 +370,12 @@ static inline int dlm_kref_put_write_lock_bh(struct kref *kref,
return 0;
}
-/* When all references to the rsb are gone it's transferred to
- the tossed list for later disposal. */
-
static void put_rsb(struct dlm_rsb *r)
{
struct dlm_ls *ls = r->res_ls;
int rv;
- rv = dlm_kref_put_write_lock_bh(&r->res_ref, toss_rsb,
+ rv = dlm_kref_put_write_lock_bh(&r->res_ref, deactivate_rsb,
&ls->ls_rsbtbl_lock);
if (rv)
write_unlock_bh(&ls->ls_rsbtbl_lock);
@@ -389,82 +386,54 @@ void dlm_put_rsb(struct dlm_rsb *r)
put_rsb(r);
}
-static int pre_rsb_struct(struct dlm_ls *ls)
-{
- struct dlm_rsb *r1, *r2;
- int count = 0;
-
- spin_lock_bh(&ls->ls_new_rsb_spin);
- if (ls->ls_new_rsb_count > dlm_config.ci_new_rsb_count / 2) {
- spin_unlock_bh(&ls->ls_new_rsb_spin);
- return 0;
- }
- spin_unlock_bh(&ls->ls_new_rsb_spin);
-
- r1 = dlm_allocate_rsb(ls);
- r2 = dlm_allocate_rsb(ls);
-
- spin_lock_bh(&ls->ls_new_rsb_spin);
- if (r1) {
- list_add(&r1->res_hashchain, &ls->ls_new_rsb);
- ls->ls_new_rsb_count++;
- }
- if (r2) {
- list_add(&r2->res_hashchain, &ls->ls_new_rsb);
- ls->ls_new_rsb_count++;
- }
- count = ls->ls_new_rsb_count;
- spin_unlock_bh(&ls->ls_new_rsb_spin);
-
- if (!count)
- return -ENOMEM;
- return 0;
-}
-
/* connected with timer_delete_sync() in dlm_ls_stop() to stop
* new timers when recovery is triggered and don't run them
- * again until a dlm_timer_resume() tries it again.
+ * again until a resume_scan_timer() tries it again.
*/
-static void __rsb_mod_timer(struct dlm_ls *ls, unsigned long jiffies)
+static void enable_scan_timer(struct dlm_ls *ls, unsigned long jiffies)
{
if (!dlm_locking_stopped(ls))
- mod_timer(&ls->ls_timer, jiffies);
+ mod_timer(&ls->ls_scan_timer, jiffies);
}
/* This function tries to resume the timer callback if a rsb
- * is on the toss list and no timer is pending. It might that
+ * is on the scan list and no timer is pending. It might that
* the first entry is on currently executed as timer callback
* but we don't care if a timer queued up again and does
* nothing. Should be a rare case.
*/
-void dlm_timer_resume(struct dlm_ls *ls)
+void resume_scan_timer(struct dlm_ls *ls)
{
struct dlm_rsb *r;
- spin_lock_bh(&ls->ls_toss_q_lock);
- r = list_first_entry_or_null(&ls->ls_toss_q, struct dlm_rsb,
- res_toss_q_list);
- if (r && !timer_pending(&ls->ls_timer))
- __rsb_mod_timer(ls, r->res_toss_time);
- spin_unlock_bh(&ls->ls_toss_q_lock);
+ spin_lock_bh(&ls->ls_scan_lock);
+ r = list_first_entry_or_null(&ls->ls_scan_list, struct dlm_rsb,
+ res_scan_list);
+ if (r && !timer_pending(&ls->ls_scan_timer))
+ enable_scan_timer(ls, r->res_toss_time);
+ spin_unlock_bh(&ls->ls_scan_lock);
}
-/* ls_rsbtbl_lock must be held and being sure the rsb is in toss state */
-static void rsb_delete_toss_timer(struct dlm_ls *ls, struct dlm_rsb *r)
+/* ls_rsbtbl_lock must be held */
+
+static void del_scan(struct dlm_ls *ls, struct dlm_rsb *r)
{
struct dlm_rsb *first;
- spin_lock_bh(&ls->ls_toss_q_lock);
+ /* active rsbs should never be on the scan list */
+ WARN_ON(!rsb_flag(r, RSB_INACTIVE));
+
+ spin_lock_bh(&ls->ls_scan_lock);
r->res_toss_time = 0;
/* if the rsb is not queued do nothing */
- if (list_empty(&r->res_toss_q_list))
+ if (list_empty(&r->res_scan_list))
goto out;
/* get the first element before delete */
- first = list_first_entry(&ls->ls_toss_q, struct dlm_rsb,
- res_toss_q_list);
- list_del_init(&r->res_toss_q_list);
+ first = list_first_entry(&ls->ls_scan_list, struct dlm_rsb,
+ res_scan_list);
+ list_del_init(&r->res_scan_list);
/* check if the first element was the rsb we deleted */
if (first == r) {
/* try to get the new first element, if the list
@@ -474,70 +443,59 @@ static void rsb_delete_toss_timer(struct dlm_ls *ls, struct dlm_rsb *r)
* if the list isn't empty and a new first element got
* in place, set the new timer expire time.
*/
- first = list_first_entry_or_null(&ls->ls_toss_q, struct dlm_rsb,
- res_toss_q_list);
+ first = list_first_entry_or_null(&ls->ls_scan_list, struct dlm_rsb,
+ res_scan_list);
if (!first)
- timer_delete(&ls->ls_timer);
+ timer_delete(&ls->ls_scan_timer);
else
- __rsb_mod_timer(ls, first->res_toss_time);
+ enable_scan_timer(ls, first->res_toss_time);
}
out:
- spin_unlock_bh(&ls->ls_toss_q_lock);
+ spin_unlock_bh(&ls->ls_scan_lock);
}
-/* Caller must held ls_rsbtbl_lock and need to be called every time
- * when either the rsb enters toss state or the toss state changes
- * the dir/master nodeid.
- */
-static void rsb_mod_timer(struct dlm_ls *ls, struct dlm_rsb *r)
+static void add_scan(struct dlm_ls *ls, struct dlm_rsb *r)
{
int our_nodeid = dlm_our_nodeid();
struct dlm_rsb *first;
- /* If we're the directory record for this rsb, and
- * we're not the master of it, then we need to wait
- * for the master node to send us a dir remove for
- * before removing the dir record.
- */
- if (!dlm_no_directory(ls) &&
- (r->res_master_nodeid != our_nodeid) &&
- (dlm_dir_nodeid(r) == our_nodeid)) {
- rsb_delete_toss_timer(ls, r);
- return;
- }
+ /* A dir record for a remote master rsb should never be on the scan list. */
+ WARN_ON(!dlm_no_directory(ls) &&
+ (r->res_master_nodeid != our_nodeid) &&
+ (dlm_dir_nodeid(r) == our_nodeid));
+
+ /* An active rsb should never be on the scan list. */
+ WARN_ON(!rsb_flag(r, RSB_INACTIVE));
- spin_lock_bh(&ls->ls_toss_q_lock);
+ /* An rsb should not already be on the scan list. */
+ WARN_ON(!list_empty(&r->res_scan_list));
+
+ spin_lock_bh(&ls->ls_scan_lock);
/* set the new rsb absolute expire time in the rsb */
r->res_toss_time = rsb_toss_jiffies();
- if (list_empty(&ls->ls_toss_q)) {
+ if (list_empty(&ls->ls_scan_list)) {
/* if the queue is empty add the element and it's
* our new expire time
*/
- list_add_tail(&r->res_toss_q_list, &ls->ls_toss_q);
- __rsb_mod_timer(ls, r->res_toss_time);
+ list_add_tail(&r->res_scan_list, &ls->ls_scan_list);
+ enable_scan_timer(ls, r->res_toss_time);
} else {
- /* check if the rsb was already queued, if so delete
- * it from the toss queue
- */
- if (!list_empty(&r->res_toss_q_list))
- list_del(&r->res_toss_q_list);
-
/* try to get the maybe new first element and then add
* to this rsb with the oldest expire time to the end
* of the queue. If the list was empty before this
* rsb expire time is our next expiration if it wasn't
* the now new first elemet is our new expiration time
*/
- first = list_first_entry_or_null(&ls->ls_toss_q, struct dlm_rsb,
- res_toss_q_list);
- list_add_tail(&r->res_toss_q_list, &ls->ls_toss_q);
+ first = list_first_entry_or_null(&ls->ls_scan_list, struct dlm_rsb,
+ res_scan_list);
+ list_add_tail(&r->res_scan_list, &ls->ls_scan_list);
if (!first)
- __rsb_mod_timer(ls, r->res_toss_time);
+ enable_scan_timer(ls, r->res_toss_time);
else
- __rsb_mod_timer(ls, first->res_toss_time);
+ enable_scan_timer(ls, first->res_toss_time);
}
- spin_unlock_bh(&ls->ls_toss_q_lock);
+ spin_unlock_bh(&ls->ls_scan_lock);
}
/* if we hit contention we do in 250 ms a retry to trylock.
@@ -547,9 +505,11 @@ static void rsb_mod_timer(struct dlm_ls *ls, struct dlm_rsb *r)
*/
#define DLM_TOSS_TIMER_RETRY (jiffies + msecs_to_jiffies(250))
-void dlm_rsb_toss_timer(struct timer_list *timer)
+/* Called by lockspace scan_timer to free unused rsb's. */
+
+void dlm_rsb_scan(struct timer_list *timer)
{
- struct dlm_ls *ls = from_timer(ls, timer, ls_timer);
+ struct dlm_ls *ls = from_timer(ls, timer, ls_scan_timer);
int our_nodeid = dlm_our_nodeid();
struct dlm_rsb *r;
int rv;
@@ -557,76 +517,63 @@ void dlm_rsb_toss_timer(struct timer_list *timer)
while (1) {
/* interrupting point to leave iteration when
* recovery waits for timer_delete_sync(), recovery
- * will take care to delete everything in toss queue.
+ * will take care to delete everything in scan list.
*/
if (dlm_locking_stopped(ls))
break;
- rv = spin_trylock(&ls->ls_toss_q_lock);
+ rv = spin_trylock(&ls->ls_scan_lock);
if (!rv) {
/* rearm again try timer */
- __rsb_mod_timer(ls, DLM_TOSS_TIMER_RETRY);
+ enable_scan_timer(ls, DLM_TOSS_TIMER_RETRY);
break;
}
- r = list_first_entry_or_null(&ls->ls_toss_q, struct dlm_rsb,
- res_toss_q_list);
+ r = list_first_entry_or_null(&ls->ls_scan_list, struct dlm_rsb,
+ res_scan_list);
if (!r) {
- /* nothing to do anymore next rsb queue will
- * set next mod_timer() expire.
- */
- spin_unlock(&ls->ls_toss_q_lock);
+ /* the next add_scan will enable the timer again */
+ spin_unlock(&ls->ls_scan_lock);
break;
}
- /* test if the first rsb isn't expired yet, if
- * so we stop freeing rsb from toss queue as
- * the order in queue is ascending to the
- * absolute res_toss_time jiffies
+ /*
+ * If the first rsb is not yet expired, then stop because the
+ * list is sorted with nearest expiration first.
*/
if (time_before(jiffies, r->res_toss_time)) {
/* rearm with the next rsb to expire in the future */
- __rsb_mod_timer(ls, r->res_toss_time);
- spin_unlock(&ls->ls_toss_q_lock);
+ enable_scan_timer(ls, r->res_toss_time);
+ spin_unlock(&ls->ls_scan_lock);
break;
}
/* in find_rsb_dir/nodir there is a reverse order of this
* lock, however this is only a trylock if we hit some
* possible contention we try it again.
- *
- * This lock synchronized while holding ls_toss_q_lock
- * synchronize everything that rsb_delete_toss_timer()
- * or rsb_mod_timer() can't run after this timer callback
- * deletes the rsb from the ls_toss_q. Whereas the other
- * holders have always a priority to run as this is only
- * a caching handling and the other holders might to put
- * this rsb out of the toss state.
*/
rv = write_trylock(&ls->ls_rsbtbl_lock);
if (!rv) {
- spin_unlock(&ls->ls_toss_q_lock);
+ spin_unlock(&ls->ls_scan_lock);
/* rearm again try timer */
- __rsb_mod_timer(ls, DLM_TOSS_TIMER_RETRY);
+ enable_scan_timer(ls, DLM_TOSS_TIMER_RETRY);
break;
}
- list_del(&r->res_rsbs_list);
+ list_del(&r->res_slow_list);
rhashtable_remove_fast(&ls->ls_rsbtbl, &r->res_node,
dlm_rhash_rsb_params);
+ rsb_clear_flag(r, RSB_HASHED);
- /* not necessary to held the ls_rsbtbl_lock when
- * calling send_remove()
- */
+ /* ls_rsbtbl_lock is not needed when calling send_remove() */
write_unlock(&ls->ls_rsbtbl_lock);
- /* remove the rsb out of the toss queue its gone
- * drom DLM now
- */
- list_del_init(&r->res_toss_q_list);
- spin_unlock(&ls->ls_toss_q_lock);
+ list_del_init(&r->res_scan_list);
+ spin_unlock(&ls->ls_scan_lock);
- /* no rsb in this state should ever run a timer */
+ /* An rsb that is a dir record for a remote master rsb
+ * cannot be removed, and should not have a timer enabled.
+ */
WARN_ON(!dlm_no_directory(ls) &&
(r->res_master_nodeid != our_nodeid) &&
(dlm_dir_nodeid(r) == our_nodeid));
@@ -640,7 +587,7 @@ void dlm_rsb_toss_timer(struct timer_list *timer)
(dlm_dir_nodeid(r) != our_nodeid))
send_remove(r);
- free_toss_rsb(r);
+ free_inactive_rsb(r);
}
}
@@ -652,22 +599,10 @@ static int get_rsb_struct(struct dlm_ls *ls, const void *name, int len,
struct dlm_rsb **r_ret)
{
struct dlm_rsb *r;
- int count;
- spin_lock_bh(&ls->ls_new_rsb_spin);
- if (list_empty(&ls->ls_new_rsb)) {
- count = ls->ls_new_rsb_count;
- spin_unlock_bh(&ls->ls_new_rsb_spin);
- log_debug(ls, "find_rsb retry %d %d %s",
- count, dlm_config.ci_new_rsb_count,
- (const char *)name);
- return -EAGAIN;
- }
-
- r = list_first_entry(&ls->ls_new_rsb, struct dlm_rsb, res_hashchain);
- list_del(&r->res_hashchain);
- ls->ls_new_rsb_count--;
- spin_unlock_bh(&ls->ls_new_rsb_spin);
+ r = dlm_allocate_rsb(ls);
+ if (!r)
+ return -ENOMEM;
r->res_ls = ls;
r->res_length = len;
@@ -679,7 +614,7 @@ static int get_rsb_struct(struct dlm_ls *ls, const void *name, int len,
INIT_LIST_HEAD(&r->res_convertqueue);
INIT_LIST_HEAD(&r->res_waitqueue);
INIT_LIST_HEAD(&r->res_root_list);
- INIT_LIST_HEAD(&r->res_toss_q_list);
+ INIT_LIST_HEAD(&r->res_scan_list);
INIT_LIST_HEAD(&r->res_recover_list);
INIT_LIST_HEAD(&r->res_masters_list);
@@ -702,8 +637,14 @@ int dlm_search_rsb_tree(struct rhashtable *rhash, const void *name, int len,
static int rsb_insert(struct dlm_rsb *rsb, struct rhashtable *rhash)
{
- return rhashtable_insert_fast(rhash, &rsb->res_node,
- dlm_rhash_rsb_params);
+ int rv;
+
+ rv = rhashtable_insert_fast(rhash, &rsb->res_node,
+ dlm_rhash_rsb_params);
+ if (!rv)
+ rsb_set_flag(rsb, RSB_HASHED);
+
+ return rv;
}
/*
@@ -733,7 +674,7 @@ static int rsb_insert(struct dlm_rsb *rsb, struct rhashtable *rhash)
* So, if the given rsb is on the toss list, it is moved to the keep list
* before being returned.
*
- * toss_rsb() happens when all local usage of the rsb is done, i.e. no
+ * deactivate_rsb() happens when all local usage of the rsb is done, i.e. no
* more refcounts exist, so the rsb is moved from the keep list to the
* toss list.
*
@@ -781,9 +722,9 @@ static int find_rsb_dir(struct dlm_ls *ls, const void *name, int len,
*
* If someone sends us a request, we are the dir node, and we do
* not find the rsb anywhere, then recreate it. This happens if
- * someone sends us a request after we have removed/freed an rsb
- * from our toss list. (They sent a request instead of lookup
- * because they are using an rsb from their toss list.)
+ * someone sends us a request after we have removed/freed an rsb.
+ * (They sent a request instead of lookup because they are using
+ * an rsb taken from their scan list.)
*/
if (from_local || from_dir ||
@@ -792,15 +733,8 @@ static int find_rsb_dir(struct dlm_ls *ls, const void *name, int len,
}
retry:
- if (create) {
- error = pre_rsb_struct(ls);
- if (error < 0)
- goto out;
- }
- retry_lookup:
-
- /* check if the rsb is in keep state under read lock - likely path */
+ /* check if the rsb is active under read lock - likely path */
read_lock_bh(&ls->ls_rsbtbl_lock);
error = dlm_search_rsb_tree(&ls->ls_rsbtbl, name, len, &r);
if (error) {
@@ -812,9 +746,9 @@ static int find_rsb_dir(struct dlm_ls *ls, const void *name, int len,
* rsb is active, so we can't check master_nodeid without lock_rsb.
*/
- if (rsb_flag(r, RSB_TOSS)) {
+ if (rsb_flag(r, RSB_INACTIVE)) {
read_unlock_bh(&ls->ls_rsbtbl_lock);
- goto do_toss;
+ goto do_inactive;
}
kref_get(&r->res_ref);
@@ -822,17 +756,29 @@ static int find_rsb_dir(struct dlm_ls *ls, const void *name, int len,
goto out;
- do_toss:
+ do_inactive:
write_lock_bh(&ls->ls_rsbtbl_lock);
- /* retry lookup under write lock to see if its still in toss state
- * if not it's in keep state and we relookup - unlikely path.
+ /*
+ * The expectation here is that the rsb will have HASHED and
+ * INACTIVE flags set, and that the rsb can be moved from
+ * inactive back to active again. However, between releasing
+ * the read lock and acquiring the write lock, this rsb could
+ * have been removed from rsbtbl, and had HASHED cleared, to
+ * be freed. To deal with this case, we would normally need
+ * to repeat dlm_search_rsb_tree while holding the write lock,
+ * but rcu allows us to simply check the HASHED flag, because
+ * the rcu read lock means the rsb will not be freed yet.
+ * If the HASHED flag is not set, then the rsb is being freed,
+ * so we add a new rsb struct. If the HASHED flag is set,
+ * and INACTIVE is not set, it means another thread has
+ * made the rsb active, as we're expecting to do here, and
+ * we just repeat the lookup (this will be very unlikely.)
*/
- error = dlm_search_rsb_tree(&ls->ls_rsbtbl, name, len, &r);
- if (!error) {
- if (!rsb_flag(r, RSB_TOSS)) {
+ if (rsb_flag(r, RSB_HASHED)) {
+ if (!rsb_flag(r, RSB_INACTIVE)) {
write_unlock_bh(&ls->ls_rsbtbl_lock);
- goto retry_lookup;
+ goto retry;
}
} else {
write_unlock_bh(&ls->ls_rsbtbl_lock);
@@ -842,14 +788,14 @@ static int find_rsb_dir(struct dlm_ls *ls, const void *name, int len,
/*
* rsb found inactive (master_nodeid may be out of date unless
* we are the dir_nodeid or were the master) No other thread
- * is using this rsb because it's on the toss list, so we can
+ * is using this rsb because it's inactive, so we can
* look at or update res_master_nodeid without lock_rsb.
*/
if ((r->res_master_nodeid != our_nodeid) && from_other) {
/* our rsb was not master, and another node (not the dir node)
has sent us a request */
- log_debug(ls, "find_rsb toss from_other %d master %d dir %d %s",
+ log_debug(ls, "find_rsb inactive from_other %d master %d dir %d %s",
from_nodeid, r->res_master_nodeid, dir_nodeid,
r->res_name);
write_unlock_bh(&ls->ls_rsbtbl_lock);
@@ -859,7 +805,7 @@ static int find_rsb_dir(struct dlm_ls *ls, const void *name, int len,
if ((r->res_master_nodeid != our_nodeid) && from_dir) {
/* don't think this should ever happen */
- log_error(ls, "find_rsb toss from_dir %d master %d",
+ log_error(ls, "find_rsb inactive from_dir %d master %d",
from_nodeid, r->res_master_nodeid);
dlm_print_rsb(r);
/* fix it and go on */
@@ -876,14 +822,12 @@ static int find_rsb_dir(struct dlm_ls *ls, const void *name, int len,
r->res_first_lkid = 0;
}
- list_move(&r->res_rsbs_list, &ls->ls_keep);
- rsb_clear_flag(r, RSB_TOSS);
- /* rsb got out of toss state, it becomes alive again
- * and we reinit the reference counter that is only
- * valid for keep state rsbs
- */
- kref_init(&r->res_ref);
- rsb_delete_toss_timer(ls, r);
+ /* A dir record will not be on the scan list. */
+ if (r->res_dir_nodeid != our_nodeid)
+ del_scan(ls, r);
+ list_move(&r->res_slow_list, &ls->ls_slow_active);
+ rsb_clear_flag(r, RSB_INACTIVE);
+ kref_init(&r->res_ref); /* ref is now used in active state */
write_unlock_bh(&ls->ls_rsbtbl_lock);
goto out;
@@ -898,9 +842,7 @@ static int find_rsb_dir(struct dlm_ls *ls, const void *name, int len,
goto out;
error = get_rsb_struct(ls, name, len, &r);
- if (error == -EAGAIN)
- goto retry;
- if (error)
+ if (WARN_ON_ONCE(error))
goto out;
r->res_hash = hash;
@@ -952,9 +894,9 @@ static int find_rsb_dir(struct dlm_ls *ls, const void *name, int len,
*/
write_unlock_bh(&ls->ls_rsbtbl_lock);
dlm_free_rsb(r);
- goto retry_lookup;
+ goto retry;
} else if (!error) {
- list_add(&r->res_rsbs_list, &ls->ls_keep);
+ list_add(&r->res_slow_list, &ls->ls_slow_active);
}
write_unlock_bh(&ls->ls_rsbtbl_lock);
out:
@@ -976,13 +918,8 @@ static int find_rsb_nodir(struct dlm_ls *ls, const void *name, int len,
int error;
retry:
- error = pre_rsb_struct(ls);
- if (error < 0)
- goto out;
- retry_lookup:
-
- /* check if the rsb is in keep state under read lock - likely path */
+ /* check if the rsb is in active state under read lock - likely path */
read_lock_bh(&ls->ls_rsbtbl_lock);
error = dlm_search_rsb_tree(&ls->ls_rsbtbl, name, len, &r);
if (error) {
@@ -990,9 +927,9 @@ static int find_rsb_nodir(struct dlm_ls *ls, const void *name, int len,
goto do_new;
}
- if (rsb_flag(r, RSB_TOSS)) {
+ if (rsb_flag(r, RSB_INACTIVE)) {
read_unlock_bh(&ls->ls_rsbtbl_lock);
- goto do_toss;
+ goto do_inactive;
}
/*
@@ -1005,17 +942,14 @@ static int find_rsb_nodir(struct dlm_ls *ls, const void *name, int len,
goto out;
- do_toss:
+ do_inactive:
write_lock_bh(&ls->ls_rsbtbl_lock);
- /* retry lookup under write lock to see if its still in toss state
- * if not it's in keep state and we relookup - unlikely path.
- */
- error = dlm_search_rsb_tree(&ls->ls_rsbtbl, name, len, &r);
- if (!error) {
- if (!rsb_flag(r, RSB_TOSS)) {
+ /* See comment in find_rsb_dir. */
+ if (rsb_flag(r, RSB_HASHED)) {
+ if (!rsb_flag(r, RSB_INACTIVE)) {
write_unlock_bh(&ls->ls_rsbtbl_lock);
- goto retry_lookup;
+ goto retry;
}
} else {
write_unlock_bh(&ls->ls_rsbtbl_lock);
@@ -1025,14 +959,14 @@ static int find_rsb_nodir(struct dlm_ls *ls, const void *name, int len,
/*
* rsb found inactive. No other thread is using this rsb because
- * it's on the toss list, so we can look at or update
- * res_master_nodeid without lock_rsb.
+ * it's inactive, so we can look at or update res_master_nodeid
+ * without lock_rsb.
*/
if (!recover && (r->res_master_nodeid != our_nodeid) && from_nodeid) {
/* our rsb is not master, and another node has sent us a
request; this should never happen */
- log_error(ls, "find_rsb toss from_nodeid %d master %d dir %d",
+ log_error(ls, "find_rsb inactive from_nodeid %d master %d dir %d",
from_nodeid, r->res_master_nodeid, dir_nodeid);
dlm_print_rsb(r);
write_unlock_bh(&ls->ls_rsbtbl_lock);
@@ -1044,21 +978,17 @@ static int find_rsb_nodir(struct dlm_ls *ls, const void *name, int len,
(dir_nodeid == our_nodeid)) {
/* our rsb is not master, and we are dir; may as well fix it;
this should never happen */
- log_error(ls, "find_rsb toss our %d master %d dir %d",
+ log_error(ls, "find_rsb inactive our %d master %d dir %d",
our_nodeid, r->res_master_nodeid, dir_nodeid);
dlm_print_rsb(r);
r->res_master_nodeid = our_nodeid;
r->res_nodeid = 0;
}
- list_move(&r->res_rsbs_list, &ls->ls_keep);
- rsb_clear_flag(r, RSB_TOSS);
- /* rsb got out of toss state, it becomes alive again
- * and we reinit the reference counter that is only
- * valid for keep state rsbs
- */
+ list_move(&r->res_slow_list, &ls->ls_slow_active);
+ rsb_clear_flag(r, RSB_INACTIVE);
kref_init(&r->res_ref);
- rsb_delete_toss_timer(ls, r);
+ del_scan(ls, r);
write_unlock_bh(&ls->ls_rsbtbl_lock);
goto out;
@@ -1070,10 +1000,7 @@ static int find_rsb_nodir(struct dlm_ls *ls, const void *name, int len,
*/
error = get_rsb_struct(ls, name, len, &r);
- if (error == -EAGAIN) {
- goto retry;
- }
- if (error)
+ if (WARN_ON_ONCE(error))
goto out;
r->res_hash = hash;
@@ -1090,9 +1017,9 @@ static int find_rsb_nodir(struct dlm_ls *ls, const void *name, int len,
*/
write_unlock_bh(&ls->ls_rsbtbl_lock);
dlm_free_rsb(r);
- goto retry_lookup;
+ goto retry;
} else if (!error) {
- list_add(&r->res_rsbs_list, &ls->ls_keep);
+ list_add(&r->res_slow_list, &ls->ls_slow_active);
}
write_unlock_bh(&ls->ls_rsbtbl_lock);
@@ -1101,12 +1028,54 @@ static int find_rsb_nodir(struct dlm_ls *ls, const void *name, int len,
return error;
}
+/*
+ * rsb rcu usage
+ *
+ * While rcu read lock is held, the rsb cannot be freed,
+ * which allows a lookup optimization.
+ *
+ * Two threads are accessing the same rsb concurrently,
+ * the first (A) is trying to use the rsb, the second (B)
+ * is trying to free the rsb.
+ *
+ * thread A thread B
+ * (trying to use rsb) (trying to free rsb)
+ *
+ * A1. rcu read lock
+ * A2. rsbtbl read lock
+ * A3. look up rsb in rsbtbl
+ * A4. rsbtbl read unlock
+ * B1. rsbtbl write lock
+ * B2. look up rsb in rsbtbl
+ * B3. remove rsb from rsbtbl
+ * B4. clear rsb HASHED flag
+ * B5. rsbtbl write unlock
+ * B6. begin freeing rsb using rcu...
+ *
+ * (rsb is inactive, so try to make it active again)
+ * A5. read rsb HASHED flag (safe because rsb is not freed yet)
+ * A6. the rsb HASHED flag is not set, which it means the rsb
+ * is being removed from rsbtbl and freed, so don't use it.
+ * A7. rcu read unlock
+ *
+ * B7. ...finish freeing rsb using rcu
+ * A8. create a new rsb
+ *
+ * Without the rcu optimization, steps A5-8 would need to do
+ * an extra rsbtbl lookup:
+ * A5. rsbtbl write lock
+ * A6. look up rsb in rsbtbl, not found
+ * A7. rsbtbl write unlock
+ * A8. create a new rsb
+ */
+
static int find_rsb(struct dlm_ls *ls, const void *name, int len,
int from_nodeid, unsigned int flags,
struct dlm_rsb **r_ret)
{
int dir_nodeid;
uint32_t hash;
+ int rv;
if (len > DLM_RESNAME_MAXLEN)
return -EINVAL;
@@ -1114,12 +1083,15 @@ static int find_rsb(struct dlm_ls *ls, const void *name, int len,
hash = jhash(name, len, 0);
dir_nodeid = dlm_hash2nodeid(ls, hash);
+ rcu_read_lock();
if (dlm_no_directory(ls))
- return find_rsb_nodir(ls, name, len, hash, dir_nodeid,
+ rv = find_rsb_nodir(ls, name, len, hash, dir_nodeid,
from_nodeid, flags, r_ret);
else
- return find_rsb_dir(ls, name, len, hash, dir_nodeid,
+ rv = find_rsb_dir(ls, name, len, hash, dir_nodeid,
from_nodeid, flags, r_ret);
+ rcu_read_unlock();
+ return rv;
}
/* we have received a request and found that res_master_nodeid != our_nodeid,
@@ -1166,7 +1138,7 @@ static int validate_master_nodeid(struct dlm_ls *ls, struct dlm_rsb *r,
}
static void __dlm_master_lookup(struct dlm_ls *ls, struct dlm_rsb *r, int our_nodeid,
- int from_nodeid, bool toss_list, unsigned int flags,
+ int from_nodeid, bool is_inactive, unsigned int flags,
int *r_nodeid, int *result)
{
int fix_master = (flags & DLM_LU_RECOVER_MASTER);
@@ -1190,9 +1162,9 @@ static void __dlm_master_lookup(struct dlm_ls *ls, struct dlm_rsb *r, int our_no
r->res_nodeid = from_nodeid;
rsb_set_flag(r, RSB_NEW_MASTER);
- if (toss_list) {
- /* I don't think we should ever find it on toss list. */
- log_error(ls, "%s fix_master on toss", __func__);
+ if (is_inactive) {
+ /* I don't think we should ever find it inactive. */
+ log_error(ls, "%s fix_master inactive", __func__);
dlm_dump_rsb(r);
}
}
@@ -1232,7 +1204,7 @@ static void __dlm_master_lookup(struct dlm_ls *ls, struct dlm_rsb *r, int our_no
if (!from_master && !fix_master &&
(r->res_master_nodeid == from_nodeid)) {
/* this can happen when the master sends remove, the dir node
- * finds the rsb on the keep list and ignores the remove,
+ * finds the rsb on the active list and ignores the remove,
* and the former master sends a lookup
*/
@@ -1276,8 +1248,8 @@ static void __dlm_master_lookup(struct dlm_ls *ls, struct dlm_rsb *r, int our_no
* . dlm_master_lookup RECOVER_MASTER (fix_master 1, from_master 0)
*/
-int dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, const char *name,
- int len, unsigned int flags, int *r_nodeid, int *result)
+static int _dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, const char *name,
+ int len, unsigned int flags, int *r_nodeid, int *result)
{
struct dlm_rsb *r = NULL;
uint32_t hash;
@@ -1304,19 +1276,14 @@ int dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, const char *name,
}
retry:
- error = pre_rsb_struct(ls);
- if (error < 0)
- return error;
-
- retry_lookup:
- /* check if the rsb is in keep state under read lock - likely path */
+ /* check if the rsb is active under read lock - likely path */
read_lock_bh(&ls->ls_rsbtbl_lock);
error = dlm_search_rsb_tree(&ls->ls_rsbtbl, name, len, &r);
if (!error) {
- if (rsb_flag(r, RSB_TOSS)) {
+ if (rsb_flag(r, RSB_INACTIVE)) {
read_unlock_bh(&ls->ls_rsbtbl_lock);
- goto do_toss;
+ goto do_inactive;
}
/* because the rsb is active, we need to lock_rsb before
@@ -1340,53 +1307,48 @@ int dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, const char *name,
goto not_found;
}
- do_toss:
+ do_inactive:
/* unlikely path - relookup under write */
write_lock_bh(&ls->ls_rsbtbl_lock);
- /* rsb_mod_timer() requires to held ls_rsbtbl_lock in write lock
- * check if the rsb is still in toss state, if not relookup
- */
error = dlm_search_rsb_tree(&ls->ls_rsbtbl, name, len, &r);
if (!error) {
- if (!rsb_flag(r, RSB_TOSS)) {
+ if (!rsb_flag(r, RSB_INACTIVE)) {
write_unlock_bh(&ls->ls_rsbtbl_lock);
/* something as changed, very unlikely but
* try again
*/
- goto retry_lookup;
+ goto retry;
}
} else {
write_unlock_bh(&ls->ls_rsbtbl_lock);
goto not_found;
}
- /* because the rsb is inactive (on toss list), it's not refcounted
- * and lock_rsb is not used, but is protected by the rsbtbl lock
- */
+ /* because the rsb is inactive, it's not refcounted and lock_rsb
+ is not used, but is protected by the rsbtbl lock */
__dlm_master_lookup(ls, r, our_nodeid, from_nodeid, true, flags,
r_nodeid, result);
- rsb_mod_timer(ls, r);
- /* the rsb was inactive (on toss list) */
+ /* A dir record rsb should never be on scan list. */
+ /* Try to fix this with del_scan? */
+ WARN_ON(!list_empty(&r->res_scan_list));
+
write_unlock_bh(&ls->ls_rsbtbl_lock);
return 0;
not_found:
error = get_rsb_struct(ls, name, len, &r);
- if (error == -EAGAIN)
- goto retry;
- if (error)
+ if (WARN_ON_ONCE(error))
goto out;
r->res_hash = hash;
r->res_dir_nodeid = our_nodeid;
r->res_master_nodeid = from_nodeid;
r->res_nodeid = from_nodeid;
- kref_init(&r->res_ref);
- rsb_set_flag(r, RSB_TOSS);
+ rsb_set_flag(r, RSB_INACTIVE);
write_lock_bh(&ls->ls_rsbtbl_lock);
error = rsb_insert(r, &ls->ls_rsbtbl);
@@ -1396,7 +1358,7 @@ int dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, const char *name,
*/
write_unlock_bh(&ls->ls_rsbtbl_lock);
dlm_free_rsb(r);
- goto retry_lookup;
+ goto retry;
} else if (error) {
write_unlock_bh(&ls->ls_rsbtbl_lock);
/* should never happen */
@@ -1404,8 +1366,7 @@ int dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, const char *name,
goto retry;
}
- list_add(&r->res_rsbs_list, &ls->ls_toss);
- rsb_mod_timer(ls, r);
+ list_add(&r->res_slow_list, &ls->ls_slow_inactive);
write_unlock_bh(&ls->ls_rsbtbl_lock);
if (result)
@@ -1415,12 +1376,22 @@ int dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, const char *name,
return error;
}
+int dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, const char *name,
+ int len, unsigned int flags, int *r_nodeid, int *result)
+{
+ int rv;
+ rcu_read_lock();
+ rv = _dlm_master_lookup(ls, from_nodeid, name, len, flags, r_nodeid, result);
+ rcu_read_unlock();
+ return rv;
+}
+
static void dlm_dump_rsb_hash(struct dlm_ls *ls, uint32_t hash)
{
struct dlm_rsb *r;
read_lock_bh(&ls->ls_rsbtbl_lock);
- list_for_each_entry(r, &ls->ls_keep, res_rsbs_list) {
+ list_for_each_entry(r, &ls->ls_slow_active, res_slow_list) {
if (r->res_hash == hash)
dlm_dump_rsb(r);
}
@@ -1442,15 +1413,28 @@ void dlm_dump_rsb_name(struct dlm_ls *ls, const char *name, int len)
read_unlock_bh(&ls->ls_rsbtbl_lock);
}
-static void toss_rsb(struct kref *kref)
+static void deactivate_rsb(struct kref *kref)
{
struct dlm_rsb *r = container_of(kref, struct dlm_rsb, res_ref);
struct dlm_ls *ls = r->res_ls;
+ int our_nodeid = dlm_our_nodeid();
DLM_ASSERT(list_empty(&r->res_root_list), dlm_print_rsb(r););
- rsb_set_flag(r, RSB_TOSS);
- list_move(&r->res_rsbs_list, &ls->ls_toss);
- rsb_mod_timer(ls, r);
+ rsb_set_flag(r, RSB_INACTIVE);
+ list_move(&r->res_slow_list, &ls->ls_slow_inactive);
+
+ /*
+ * When the rsb becomes unused:
+ * - If it's not a dir record for a remote master rsb,
+ * then it is put on the scan list to be freed.
+ * - If it's a dir record for a remote master rsb,
+ * then it is kept in the inactive state until
+ * receive_remove() from the master node.
+ */
+ if (!dlm_no_directory(ls) &&
+ (r->res_master_nodeid != our_nodeid) &&
+ (dlm_dir_nodeid(r) != our_nodeid))
+ add_scan(ls, r);
if (r->res_lvbptr) {
dlm_free_lvb(r->res_lvbptr);
@@ -1464,22 +1448,22 @@ static void unhold_rsb(struct dlm_rsb *r)
{
int rv;
- /* rsbs in toss state never get referenced */
- WARN_ON(rsb_flag(r, RSB_TOSS));
- rv = kref_put(&r->res_ref, toss_rsb);
+ /* inactive rsbs are not ref counted */
+ WARN_ON(rsb_flag(r, RSB_INACTIVE));
+ rv = kref_put(&r->res_ref, deactivate_rsb);
DLM_ASSERT(!rv, dlm_dump_rsb(r););
}
-void free_toss_rsb(struct dlm_rsb *r)
+void free_inactive_rsb(struct dlm_rsb *r)
{
- WARN_ON_ONCE(!rsb_flag(r, RSB_TOSS));
+ WARN_ON_ONCE(!rsb_flag(r, RSB_INACTIVE));
DLM_ASSERT(list_empty(&r->res_lookup), dlm_dump_rsb(r););
DLM_ASSERT(list_empty(&r->res_grantqueue), dlm_dump_rsb(r););
DLM_ASSERT(list_empty(&r->res_convertqueue), dlm_dump_rsb(r););
DLM_ASSERT(list_empty(&r->res_waitqueue), dlm_dump_rsb(r););
DLM_ASSERT(list_empty(&r->res_root_list), dlm_dump_rsb(r););
- DLM_ASSERT(list_empty(&r->res_toss_q_list), dlm_dump_rsb(r););
+ DLM_ASSERT(list_empty(&r->res_scan_list), dlm_dump_rsb(r););
DLM_ASSERT(list_empty(&r->res_recover_list), dlm_dump_rsb(r););
DLM_ASSERT(list_empty(&r->res_masters_list), dlm_dump_rsb(r););
@@ -1504,11 +1488,15 @@ static void detach_lkb(struct dlm_lkb *lkb)
}
static int _create_lkb(struct dlm_ls *ls, struct dlm_lkb **lkb_ret,
- int start, int end)
+ unsigned long start, unsigned long end)
{
+ struct xa_limit limit;
struct dlm_lkb *lkb;
int rv;
+ limit.max = end;
+ limit.min = start;
+
lkb = dlm_allocate_lkb(ls);
if (!lkb)
return -ENOMEM;
@@ -1522,14 +1510,12 @@ static int _create_lkb(struct dlm_ls *ls, struct dlm_lkb **lkb_ret,
INIT_LIST_HEAD(&lkb->lkb_ownqueue);
INIT_LIST_HEAD(&lkb->lkb_rsb_lookup);
- write_lock_bh(&ls->ls_lkbidr_lock);
- rv = idr_alloc(&ls->ls_lkbidr, lkb, start, end, GFP_NOWAIT);
- if (rv >= 0)
- lkb->lkb_id = rv;
- write_unlock_bh(&ls->ls_lkbidr_lock);
+ write_lock_bh(&ls->ls_lkbxa_lock);
+ rv = xa_alloc(&ls->ls_lkbxa, &lkb->lkb_id, lkb, limit, GFP_ATOMIC);
+ write_unlock_bh(&ls->ls_lkbxa_lock);
if (rv < 0) {
- log_error(ls, "create_lkb idr error %d", rv);
+ log_error(ls, "create_lkb xa error %d", rv);
dlm_free_lkb(lkb);
return rv;
}
@@ -1540,18 +1526,18 @@ static int _create_lkb(struct dlm_ls *ls, struct dlm_lkb **lkb_ret,
static int create_lkb(struct dlm_ls *ls, struct dlm_lkb **lkb_ret)
{
- return _create_lkb(ls, lkb_ret, 1, 0);
+ return _create_lkb(ls, lkb_ret, 1, ULONG_MAX);
}
static int find_lkb(struct dlm_ls *ls, uint32_t lkid, struct dlm_lkb **lkb_ret)
{
struct dlm_lkb *lkb;
- read_lock_bh(&ls->ls_lkbidr_lock);
- lkb = idr_find(&ls->ls_lkbidr, lkid);
+ read_lock_bh(&ls->ls_lkbxa_lock);
+ lkb = xa_load(&ls->ls_lkbxa, lkid);
if (lkb)
kref_get(&lkb->lkb_ref);
- read_unlock_bh(&ls->ls_lkbidr_lock);
+ read_unlock_bh(&ls->ls_lkbxa_lock);
*lkb_ret = lkb;
return lkb ? 0 : -ENOENT;
@@ -1576,10 +1562,10 @@ static int __put_lkb(struct dlm_ls *ls, struct dlm_lkb *lkb)
int rv;
rv = dlm_kref_put_write_lock_bh(&lkb->lkb_ref, kill_lkb,
- &ls->ls_lkbidr_lock);
+ &ls->ls_lkbxa_lock);
if (rv) {
- idr_remove(&ls->ls_lkbidr, lkid);
- write_unlock_bh(&ls->ls_lkbidr_lock);
+ xa_erase(&ls->ls_lkbxa, lkid);
+ write_unlock_bh(&ls->ls_lkbxa_lock);
detach_lkb(lkb);
@@ -4323,8 +4309,9 @@ static void receive_remove(struct dlm_ls *ls, const struct dlm_message *ms)
return;
}
- /* Look for name in rsb toss state, if it's there, kill it.
- * If it's in non toss state, it's being used, and we should ignore this
+ /*
+ * Look for inactive rsb, if it's there, free it.
+ * If the rsb is active, it's being used, and we should ignore this
* message. This is an expected race between the dir node sending a
* request to the master node at the same time as the master node sends
* a remove to the dir node. The resolution to that race is for the
@@ -4347,16 +4334,18 @@ static void receive_remove(struct dlm_ls *ls, const struct dlm_message *ms)
return;
}
- if (!rsb_flag(r, RSB_TOSS)) {
+ if (!rsb_flag(r, RSB_INACTIVE)) {
if (r->res_master_nodeid != from_nodeid) {
/* should not happen */
- log_error(ls, "receive_remove keep from %d master %d",
+ log_error(ls, "receive_remove on active rsb from %d master %d",
from_nodeid, r->res_master_nodeid);
dlm_print_rsb(r);
write_unlock_bh(&ls->ls_rsbtbl_lock);
return;
}
+ /* Ignore the remove message, see race comment above. */
+
log_debug(ls, "receive_remove from %d master %d first %x %s",
from_nodeid, r->res_master_nodeid, r->res_first_lkid,
name);
@@ -4365,19 +4354,20 @@ static void receive_remove(struct dlm_ls *ls, const struct dlm_message *ms)
}
if (r->res_master_nodeid != from_nodeid) {
- log_error(ls, "receive_remove toss from %d master %d",
+ log_error(ls, "receive_remove inactive from %d master %d",
from_nodeid, r->res_master_nodeid);
dlm_print_rsb(r);
write_unlock_bh(&ls->ls_rsbtbl_lock);
return;
}
- list_del(&r->res_rsbs_list);
+ list_del(&r->res_slow_list);
rhashtable_remove_fast(&ls->ls_rsbtbl, &r->res_node,
dlm_rhash_rsb_params);
+ rsb_clear_flag(r, RSB_HASHED);
write_unlock_bh(&ls->ls_rsbtbl_lock);
- free_toss_rsb(r);
+ free_inactive_rsb(r);
}
static void receive_purge(struct dlm_ls *ls, const struct dlm_message *ms)
@@ -5444,7 +5434,7 @@ static struct dlm_rsb *find_grant_rsb(struct dlm_ls *ls)
struct dlm_rsb *r;
read_lock_bh(&ls->ls_rsbtbl_lock);
- list_for_each_entry(r, &ls->ls_keep, res_rsbs_list) {
+ list_for_each_entry(r, &ls->ls_slow_active, res_slow_list) {
if (!rsb_flag(r, RSB_RECOVER_GRANT))
continue;
if (!is_master(r)) {
diff --git a/fs/dlm/lock.h b/fs/dlm/lock.h
index 8de9dee4c058..4ed8d36f9c6d 100644
--- a/fs/dlm/lock.h
+++ b/fs/dlm/lock.h
@@ -11,7 +11,6 @@
#ifndef __LOCK_DOT_H__
#define __LOCK_DOT_H__
-void dlm_rsb_toss_timer(struct timer_list *timer);
void dlm_dump_rsb(struct dlm_rsb *r);
void dlm_dump_rsb_name(struct dlm_ls *ls, const char *name, int len);
void dlm_print_lkb(struct dlm_lkb *lkb);
@@ -19,15 +18,15 @@ void dlm_receive_message_saved(struct dlm_ls *ls, const struct dlm_message *ms,
uint32_t saved_seq);
void dlm_receive_buffer(const union dlm_packet *p, int nodeid);
int dlm_modes_compat(int mode1, int mode2);
-void free_toss_rsb(struct dlm_rsb *r);
+void free_inactive_rsb(struct dlm_rsb *r);
void dlm_put_rsb(struct dlm_rsb *r);
void dlm_hold_rsb(struct dlm_rsb *r);
int dlm_put_lkb(struct dlm_lkb *lkb);
-void dlm_scan_rsbs(struct dlm_ls *ls);
int dlm_lock_recovery_try(struct dlm_ls *ls);
void dlm_lock_recovery(struct dlm_ls *ls);
void dlm_unlock_recovery(struct dlm_ls *ls);
-void dlm_timer_resume(struct dlm_ls *ls);
+void dlm_rsb_scan(struct timer_list *timer);
+void resume_scan_timer(struct dlm_ls *ls);
int dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, const char *name,
int len, unsigned int flags, int *r_nodeid, int *result);
diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
index 475ab4370dda..1848cbbc96a9 100644
--- a/fs/dlm/lockspace.c
+++ b/fs/dlm/lockspace.c
@@ -38,7 +38,7 @@ static ssize_t dlm_control_store(struct dlm_ls *ls, const char *buf, size_t len)
if (rc)
return rc;
- ls = dlm_find_lockspace_local(ls->ls_local_handle);
+ ls = dlm_find_lockspace_local(ls);
if (!ls)
return -EINVAL;
@@ -265,18 +265,9 @@ struct dlm_ls *dlm_find_lockspace_global(uint32_t id)
struct dlm_ls *dlm_find_lockspace_local(dlm_lockspace_t *lockspace)
{
- struct dlm_ls *ls;
+ struct dlm_ls *ls = lockspace;
- spin_lock_bh(&lslist_lock);
- list_for_each_entry(ls, &lslist, ls_list) {
- if (ls->ls_local_handle == lockspace) {
- atomic_inc(&ls->ls_count);
- goto out;
- }
- }
- ls = NULL;
- out:
- spin_unlock_bh(&lslist_lock);
+ atomic_inc(&ls->ls_count);
return ls;
}
@@ -410,37 +401,37 @@ static int new_lockspace(const char *name, const char *cluster,
atomic_set(&ls->ls_count, 0);
init_waitqueue_head(&ls->ls_count_wait);
ls->ls_flags = 0;
- ls->ls_scan_time = jiffies;
if (ops && dlm_config.ci_recover_callbacks) {
ls->ls_ops = ops;
ls->ls_ops_arg = ops_arg;
}
+ if (flags & DLM_LSFL_SOFTIRQ)
+ set_bit(LSFL_SOFTIRQ, &ls->ls_flags);
+
/* ls_exflags are forced to match among nodes, and we don't
* need to require all nodes to have some flags set
*/
- ls->ls_exflags = (flags & ~(DLM_LSFL_FS | DLM_LSFL_NEWEXCL));
+ ls->ls_exflags = (flags & ~(DLM_LSFL_FS | DLM_LSFL_NEWEXCL |
+ DLM_LSFL_SOFTIRQ));
- INIT_LIST_HEAD(&ls->ls_toss);
- INIT_LIST_HEAD(&ls->ls_keep);
+ INIT_LIST_HEAD(&ls->ls_slow_inactive);
+ INIT_LIST_HEAD(&ls->ls_slow_active);
rwlock_init(&ls->ls_rsbtbl_lock);
error = rhashtable_init(&ls->ls_rsbtbl, &dlm_rhash_rsb_params);
if (error)
goto out_lsfree;
- idr_init(&ls->ls_lkbidr);
- rwlock_init(&ls->ls_lkbidr_lock);
+ xa_init_flags(&ls->ls_lkbxa, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_BH);
+ rwlock_init(&ls->ls_lkbxa_lock);
INIT_LIST_HEAD(&ls->ls_waiters);
spin_lock_init(&ls->ls_waiters_lock);
INIT_LIST_HEAD(&ls->ls_orphans);
spin_lock_init(&ls->ls_orphans_lock);
- INIT_LIST_HEAD(&ls->ls_new_rsb);
- spin_lock_init(&ls->ls_new_rsb_spin);
-
INIT_LIST_HEAD(&ls->ls_nodes);
INIT_LIST_HEAD(&ls->ls_nodes_gone);
ls->ls_num_nodes = 0;
@@ -484,7 +475,7 @@ static int new_lockspace(const char *name, const char *cluster,
ls->ls_recover_buf = kmalloc(DLM_MAX_SOCKET_BUFSIZE, GFP_NOFS);
if (!ls->ls_recover_buf) {
error = -ENOMEM;
- goto out_lkbidr;
+ goto out_lkbxa;
}
ls->ls_slot = 0;
@@ -494,32 +485,31 @@ static int new_lockspace(const char *name, const char *cluster,
INIT_LIST_HEAD(&ls->ls_recover_list);
spin_lock_init(&ls->ls_recover_list_lock);
- idr_init(&ls->ls_recover_idr);
- spin_lock_init(&ls->ls_recover_idr_lock);
+ xa_init_flags(&ls->ls_recover_xa, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_BH);
+ spin_lock_init(&ls->ls_recover_xa_lock);
ls->ls_recover_list_count = 0;
- ls->ls_local_handle = ls;
init_waitqueue_head(&ls->ls_wait_general);
INIT_LIST_HEAD(&ls->ls_masters_list);
rwlock_init(&ls->ls_masters_lock);
INIT_LIST_HEAD(&ls->ls_dir_dump_list);
rwlock_init(&ls->ls_dir_dump_lock);
- INIT_LIST_HEAD(&ls->ls_toss_q);
- spin_lock_init(&ls->ls_toss_q_lock);
- timer_setup(&ls->ls_timer, dlm_rsb_toss_timer,
- TIMER_DEFERRABLE);
+ INIT_LIST_HEAD(&ls->ls_scan_list);
+ spin_lock_init(&ls->ls_scan_lock);
+ timer_setup(&ls->ls_scan_timer, dlm_rsb_scan, TIMER_DEFERRABLE);
spin_lock_bh(&lslist_lock);
ls->ls_create_count = 1;
list_add(&ls->ls_list, &lslist);
spin_unlock_bh(&lslist_lock);
- if (flags & DLM_LSFL_FS) {
- error = dlm_callback_start(ls);
- if (error) {
- log_error(ls, "can't start dlm_callback %d", error);
- goto out_delist;
- }
+ if (flags & DLM_LSFL_FS)
+ set_bit(LSFL_FS, &ls->ls_flags);
+
+ error = dlm_callback_start(ls);
+ if (error) {
+ log_error(ls, "can't start dlm_callback %d", error);
+ goto out_delist;
}
init_waitqueue_head(&ls->ls_recover_lock_wait);
@@ -584,10 +574,10 @@ static int new_lockspace(const char *name, const char *cluster,
spin_lock_bh(&lslist_lock);
list_del(&ls->ls_list);
spin_unlock_bh(&lslist_lock);
- idr_destroy(&ls->ls_recover_idr);
+ xa_destroy(&ls->ls_recover_xa);
kfree(ls->ls_recover_buf);
- out_lkbidr:
- idr_destroy(&ls->ls_lkbidr);
+ out_lkbxa:
+ xa_destroy(&ls->ls_lkbxa);
rhashtable_destroy(&ls->ls_rsbtbl);
out_lsfree:
if (do_unreg)
@@ -643,26 +633,15 @@ int dlm_new_user_lockspace(const char *name, const char *cluster,
void *ops_arg, int *ops_result,
dlm_lockspace_t **lockspace)
{
+ if (flags & DLM_LSFL_SOFTIRQ)
+ return -EINVAL;
+
return __dlm_new_lockspace(name, cluster, flags, lvblen, ops,
ops_arg, ops_result, lockspace);
}
-static int lkb_idr_is_local(int id, void *p, void *data)
-{
- struct dlm_lkb *lkb = p;
-
- return lkb->lkb_nodeid == 0 && lkb->lkb_grmode != DLM_LOCK_IV;
-}
-
-static int lkb_idr_is_any(int id, void *p, void *data)
-{
- return 1;
-}
-
-static int lkb_idr_free(int id, void *p, void *data)
+static int lkb_idr_free(struct dlm_lkb *lkb)
{
- struct dlm_lkb *lkb = p;
-
if (lkb->lkb_lvbptr && test_bit(DLM_IFL_MSTCPY_BIT, &lkb->lkb_iflags))
dlm_free_lvb(lkb->lkb_lvbptr);
@@ -670,23 +649,34 @@ static int lkb_idr_free(int id, void *p, void *data)
return 0;
}
-/* NOTE: We check the lkbidr here rather than the resource table.
+/* NOTE: We check the lkbxa here rather than the resource table.
This is because there may be LKBs queued as ASTs that have been unlinked
from their RSBs and are pending deletion once the AST has been delivered */
static int lockspace_busy(struct dlm_ls *ls, int force)
{
- int rv;
+ struct dlm_lkb *lkb;
+ unsigned long id;
+ int rv = 0;
- read_lock_bh(&ls->ls_lkbidr_lock);
+ read_lock_bh(&ls->ls_lkbxa_lock);
if (force == 0) {
- rv = idr_for_each(&ls->ls_lkbidr, lkb_idr_is_any, ls);
+ xa_for_each(&ls->ls_lkbxa, id, lkb) {
+ rv = 1;
+ break;
+ }
} else if (force == 1) {
- rv = idr_for_each(&ls->ls_lkbidr, lkb_idr_is_local, ls);
+ xa_for_each(&ls->ls_lkbxa, id, lkb) {
+ if (lkb->lkb_nodeid == 0 &&
+ lkb->lkb_grmode != DLM_LOCK_IV) {
+ rv = 1;
+ break;
+ }
+ }
} else {
rv = 0;
}
- read_unlock_bh(&ls->ls_lkbidr_lock);
+ read_unlock_bh(&ls->ls_lkbxa_lock);
return rv;
}
@@ -699,7 +689,8 @@ static void rhash_free_rsb(void *ptr, void *arg)
static int release_lockspace(struct dlm_ls *ls, int force)
{
- struct dlm_rsb *rsb;
+ struct dlm_lkb *lkb;
+ unsigned long id;
int busy, rv;
busy = lockspace_busy(ls, force);
@@ -739,7 +730,7 @@ static int release_lockspace(struct dlm_ls *ls, int force)
* time_shutdown_sync(), we don't care anymore
*/
clear_bit(LSFL_RUNNING, &ls->ls_flags);
- timer_shutdown_sync(&ls->ls_timer);
+ timer_shutdown_sync(&ls->ls_scan_timer);
if (ls_count == 1) {
dlm_clear_members(ls);
@@ -752,28 +743,22 @@ static int release_lockspace(struct dlm_ls *ls, int force)
dlm_delete_debug_file(ls);
- idr_destroy(&ls->ls_recover_idr);
+ xa_destroy(&ls->ls_recover_xa);
kfree(ls->ls_recover_buf);
/*
- * Free all lkb's in idr
+ * Free all lkb's in xa
*/
-
- idr_for_each(&ls->ls_lkbidr, lkb_idr_free, ls);
- idr_destroy(&ls->ls_lkbidr);
+ xa_for_each(&ls->ls_lkbxa, id, lkb) {
+ lkb_idr_free(lkb);
+ }
+ xa_destroy(&ls->ls_lkbxa);
/*
* Free all rsb's on rsbtbl
*/
rhashtable_free_and_destroy(&ls->ls_rsbtbl, rhash_free_rsb, NULL);
- while (!list_empty(&ls->ls_new_rsb)) {
- rsb = list_first_entry(&ls->ls_new_rsb, struct dlm_rsb,
- res_hashchain);
- list_del(&rsb->res_hashchain);
- dlm_free_rsb(rsb);
- }
-
/*
* Free structures on any other lists
*/
diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
index 6b8078085e56..2e3e269d820e 100644
--- a/fs/dlm/lowcomms.c
+++ b/fs/dlm/lowcomms.c
@@ -461,7 +461,7 @@ static bool dlm_lowcomms_con_has_addr(const struct connection *con,
return false;
}
-int dlm_lowcomms_addr(int nodeid, struct sockaddr_storage *addr, int len)
+int dlm_lowcomms_addr(int nodeid, struct sockaddr_storage *addr)
{
struct connection *con;
bool ret, idx;
@@ -858,12 +858,6 @@ static void free_processqueue_entry(struct processqueue_entry *pentry)
kfree(pentry);
}
-struct dlm_processed_nodes {
- int nodeid;
-
- struct list_head list;
-};
-
static void process_dlm_messages(struct work_struct *work)
{
struct processqueue_entry *pentry;
diff --git a/fs/dlm/lowcomms.h b/fs/dlm/lowcomms.h
index 8deb16f8f620..fd0df604eb93 100644
--- a/fs/dlm/lowcomms.h
+++ b/fs/dlm/lowcomms.h
@@ -46,7 +46,7 @@ void dlm_lowcomms_put_msg(struct dlm_msg *msg);
int dlm_lowcomms_resend_msg(struct dlm_msg *msg);
int dlm_lowcomms_connect_node(int nodeid);
int dlm_lowcomms_nodes_set_mark(int nodeid, unsigned int mark);
-int dlm_lowcomms_addr(int nodeid, struct sockaddr_storage *addr, int len);
+int dlm_lowcomms_addr(int nodeid, struct sockaddr_storage *addr);
void dlm_midcomms_receive_done(int nodeid);
struct kmem_cache *dlm_lowcomms_writequeue_cache_create(void);
struct kmem_cache *dlm_lowcomms_msg_cache_create(void);
diff --git a/fs/dlm/member.c b/fs/dlm/member.c
index c46e306f2e5c..a7ee7fd2b9d3 100644
--- a/fs/dlm/member.c
+++ b/fs/dlm/member.c
@@ -642,7 +642,7 @@ int dlm_ls_stop(struct dlm_ls *ls)
set_bit(LSFL_RECOVER_STOP, &ls->ls_flags);
new = test_and_clear_bit(LSFL_RUNNING, &ls->ls_flags);
if (new)
- timer_delete_sync(&ls->ls_timer);
+ timer_delete_sync(&ls->ls_scan_timer);
ls->ls_recover_seq++;
/* activate requestqueue and stop processing */
diff --git a/fs/dlm/memory.c b/fs/dlm/memory.c
index 15a8b1cee433..8c44b954c166 100644
--- a/fs/dlm/memory.c
+++ b/fs/dlm/memory.c
@@ -72,6 +72,8 @@ out:
void dlm_memory_exit(void)
{
+ rcu_barrier();
+
kmem_cache_destroy(writequeue_cache);
kmem_cache_destroy(mhandle_cache);
kmem_cache_destroy(msg_cache);
@@ -101,13 +103,19 @@ struct dlm_rsb *dlm_allocate_rsb(struct dlm_ls *ls)
return r;
}
-void dlm_free_rsb(struct dlm_rsb *r)
+static void __free_rsb_rcu(struct rcu_head *rcu)
{
+ struct dlm_rsb *r = container_of(rcu, struct dlm_rsb, rcu);
if (r->res_lvbptr)
dlm_free_lvb(r->res_lvbptr);
kmem_cache_free(rsb_cache, r);
}
+void dlm_free_rsb(struct dlm_rsb *r)
+{
+ call_rcu(&r->rcu, __free_rsb_rcu);
+}
+
struct dlm_lkb *dlm_allocate_lkb(struct dlm_ls *ls)
{
struct dlm_lkb *lkb;
diff --git a/fs/dlm/midcomms.c b/fs/dlm/midcomms.c
index c34f38e9ee5c..2c101bbe261a 100644
--- a/fs/dlm/midcomms.c
+++ b/fs/dlm/midcomms.c
@@ -334,12 +334,12 @@ static struct midcomms_node *nodeid2node(int nodeid)
return __find_node(nodeid, nodeid_hash(nodeid));
}
-int dlm_midcomms_addr(int nodeid, struct sockaddr_storage *addr, int len)
+int dlm_midcomms_addr(int nodeid, struct sockaddr_storage *addr)
{
int ret, idx, r = nodeid_hash(nodeid);
struct midcomms_node *node;
- ret = dlm_lowcomms_addr(nodeid, addr, len);
+ ret = dlm_lowcomms_addr(nodeid, addr);
if (ret)
return ret;
diff --git a/fs/dlm/midcomms.h b/fs/dlm/midcomms.h
index 278d26fdeb2c..7fad1d170bba 100644
--- a/fs/dlm/midcomms.h
+++ b/fs/dlm/midcomms.h
@@ -19,7 +19,7 @@ int dlm_process_incoming_buffer(int nodeid, unsigned char *buf, int buflen);
struct dlm_mhandle *dlm_midcomms_get_mhandle(int nodeid, int len, char **ppc);
void dlm_midcomms_commit_mhandle(struct dlm_mhandle *mh, const void *name,
int namelen);
-int dlm_midcomms_addr(int nodeid, struct sockaddr_storage *addr, int len);
+int dlm_midcomms_addr(int nodeid, struct sockaddr_storage *addr);
void dlm_midcomms_version_wait(void);
int dlm_midcomms_close(int nodeid);
int dlm_midcomms_start(void);
diff --git a/fs/dlm/recover.c b/fs/dlm/recover.c
index f493d5f30c58..c7afb428a2b4 100644
--- a/fs/dlm/recover.c
+++ b/fs/dlm/recover.c
@@ -293,73 +293,78 @@ static void recover_list_clear(struct dlm_ls *ls)
spin_unlock_bh(&ls->ls_recover_list_lock);
}
-static int recover_idr_empty(struct dlm_ls *ls)
+static int recover_xa_empty(struct dlm_ls *ls)
{
int empty = 1;
- spin_lock_bh(&ls->ls_recover_idr_lock);
+ spin_lock_bh(&ls->ls_recover_xa_lock);
if (ls->ls_recover_list_count)
empty = 0;
- spin_unlock_bh(&ls->ls_recover_idr_lock);
+ spin_unlock_bh(&ls->ls_recover_xa_lock);
return empty;
}
-static int recover_idr_add(struct dlm_rsb *r)
+static int recover_xa_add(struct dlm_rsb *r)
{
struct dlm_ls *ls = r->res_ls;
+ struct xa_limit limit = {
+ .min = 1,
+ .max = UINT_MAX,
+ };
+ uint32_t id;
int rv;
- spin_lock_bh(&ls->ls_recover_idr_lock);
+ spin_lock_bh(&ls->ls_recover_xa_lock);
if (r->res_id) {
rv = -1;
goto out_unlock;
}
- rv = idr_alloc(&ls->ls_recover_idr, r, 1, 0, GFP_NOWAIT);
+ rv = xa_alloc(&ls->ls_recover_xa, &id, r, limit, GFP_ATOMIC);
if (rv < 0)
goto out_unlock;
- r->res_id = rv;
+ r->res_id = id;
ls->ls_recover_list_count++;
dlm_hold_rsb(r);
rv = 0;
out_unlock:
- spin_unlock_bh(&ls->ls_recover_idr_lock);
+ spin_unlock_bh(&ls->ls_recover_xa_lock);
return rv;
}
-static void recover_idr_del(struct dlm_rsb *r)
+static void recover_xa_del(struct dlm_rsb *r)
{
struct dlm_ls *ls = r->res_ls;
- spin_lock_bh(&ls->ls_recover_idr_lock);
- idr_remove(&ls->ls_recover_idr, r->res_id);
+ spin_lock_bh(&ls->ls_recover_xa_lock);
+ xa_erase_bh(&ls->ls_recover_xa, r->res_id);
r->res_id = 0;
ls->ls_recover_list_count--;
- spin_unlock_bh(&ls->ls_recover_idr_lock);
+ spin_unlock_bh(&ls->ls_recover_xa_lock);
dlm_put_rsb(r);
}
-static struct dlm_rsb *recover_idr_find(struct dlm_ls *ls, uint64_t id)
+static struct dlm_rsb *recover_xa_find(struct dlm_ls *ls, uint64_t id)
{
struct dlm_rsb *r;
- spin_lock_bh(&ls->ls_recover_idr_lock);
- r = idr_find(&ls->ls_recover_idr, (int)id);
- spin_unlock_bh(&ls->ls_recover_idr_lock);
+ spin_lock_bh(&ls->ls_recover_xa_lock);
+ r = xa_load(&ls->ls_recover_xa, (int)id);
+ spin_unlock_bh(&ls->ls_recover_xa_lock);
return r;
}
-static void recover_idr_clear(struct dlm_ls *ls)
+static void recover_xa_clear(struct dlm_ls *ls)
{
struct dlm_rsb *r;
- int id;
+ unsigned long id;
- spin_lock_bh(&ls->ls_recover_idr_lock);
+ spin_lock_bh(&ls->ls_recover_xa_lock);
- idr_for_each_entry(&ls->ls_recover_idr, r, id) {
- idr_remove(&ls->ls_recover_idr, id);
+ xa_for_each(&ls->ls_recover_xa, id, r) {
+ xa_erase_bh(&ls->ls_recover_xa, id);
r->res_id = 0;
r->res_recover_locks_count = 0;
ls->ls_recover_list_count--;
@@ -372,7 +377,7 @@ static void recover_idr_clear(struct dlm_ls *ls)
ls->ls_recover_list_count);
ls->ls_recover_list_count = 0;
}
- spin_unlock_bh(&ls->ls_recover_idr_lock);
+ spin_unlock_bh(&ls->ls_recover_xa_lock);
}
@@ -470,7 +475,7 @@ static int recover_master(struct dlm_rsb *r, unsigned int *count, uint64_t seq)
set_new_master(r);
error = 0;
} else {
- recover_idr_add(r);
+ recover_xa_add(r);
error = dlm_send_rcom_lookup(r, dir_nodeid, seq);
}
@@ -551,10 +556,10 @@ int dlm_recover_masters(struct dlm_ls *ls, uint64_t seq,
log_rinfo(ls, "dlm_recover_masters %u of %u", count, total);
- error = dlm_wait_function(ls, &recover_idr_empty);
+ error = dlm_wait_function(ls, &recover_xa_empty);
out:
if (error)
- recover_idr_clear(ls);
+ recover_xa_clear(ls);
return error;
}
@@ -563,7 +568,7 @@ int dlm_recover_master_reply(struct dlm_ls *ls, const struct dlm_rcom *rc)
struct dlm_rsb *r;
int ret_nodeid, new_master;
- r = recover_idr_find(ls, le64_to_cpu(rc->rc_id));
+ r = recover_xa_find(ls, le64_to_cpu(rc->rc_id));
if (!r) {
log_error(ls, "dlm_recover_master_reply no id %llx",
(unsigned long long)le64_to_cpu(rc->rc_id));
@@ -582,9 +587,9 @@ int dlm_recover_master_reply(struct dlm_ls *ls, const struct dlm_rcom *rc)
r->res_nodeid = new_master;
set_new_master(r);
unlock_rsb(r);
- recover_idr_del(r);
+ recover_xa_del(r);
- if (recover_idr_empty(ls))
+ if (recover_xa_empty(ls))
wake_up(&ls->ls_wait_general);
out:
return 0;
@@ -877,29 +882,26 @@ void dlm_recover_rsbs(struct dlm_ls *ls, const struct list_head *root_list)
log_rinfo(ls, "dlm_recover_rsbs %d done", count);
}
-/* Create a single list of all root rsb's to be used during recovery */
-
-void dlm_clear_toss(struct dlm_ls *ls)
+void dlm_clear_inactive(struct dlm_ls *ls)
{
struct dlm_rsb *r, *safe;
unsigned int count = 0;
write_lock_bh(&ls->ls_rsbtbl_lock);
- list_for_each_entry_safe(r, safe, &ls->ls_toss, res_rsbs_list) {
- list_del(&r->res_rsbs_list);
+ list_for_each_entry_safe(r, safe, &ls->ls_slow_inactive, res_slow_list) {
+ list_del(&r->res_slow_list);
rhashtable_remove_fast(&ls->ls_rsbtbl, &r->res_node,
dlm_rhash_rsb_params);
- /* remove it from the toss queue if its part of it */
- if (!list_empty(&r->res_toss_q_list))
- list_del_init(&r->res_toss_q_list);
+ if (!list_empty(&r->res_scan_list))
+ list_del_init(&r->res_scan_list);
- free_toss_rsb(r);
+ free_inactive_rsb(r);
count++;
}
write_unlock_bh(&ls->ls_rsbtbl_lock);
if (count)
- log_rinfo(ls, "dlm_clear_toss %u done", count);
+ log_rinfo(ls, "dlm_clear_inactive %u done", count);
}
diff --git a/fs/dlm/recover.h b/fs/dlm/recover.h
index efc79a6e577d..ec69896462fb 100644
--- a/fs/dlm/recover.h
+++ b/fs/dlm/recover.h
@@ -25,7 +25,7 @@ int dlm_recover_master_reply(struct dlm_ls *ls, const struct dlm_rcom *rc);
int dlm_recover_locks(struct dlm_ls *ls, uint64_t seq,
const struct list_head *root_list);
void dlm_recovered_lock(struct dlm_rsb *r);
-void dlm_clear_toss(struct dlm_ls *ls);
+void dlm_clear_inactive(struct dlm_ls *ls);
void dlm_recover_rsbs(struct dlm_ls *ls, const struct list_head *root_list);
#endif /* __RECOVER_DOT_H__ */
diff --git a/fs/dlm/recoverd.c b/fs/dlm/recoverd.c
index 17a40d1e6036..34f4f9f49a6c 100644
--- a/fs/dlm/recoverd.c
+++ b/fs/dlm/recoverd.c
@@ -33,7 +33,7 @@ static int dlm_create_masters_list(struct dlm_ls *ls)
}
read_lock_bh(&ls->ls_rsbtbl_lock);
- list_for_each_entry(r, &ls->ls_keep, res_rsbs_list) {
+ list_for_each_entry(r, &ls->ls_slow_active, res_slow_list) {
if (r->res_nodeid)
continue;
@@ -63,12 +63,12 @@ static void dlm_create_root_list(struct dlm_ls *ls, struct list_head *root_list)
struct dlm_rsb *r;
read_lock_bh(&ls->ls_rsbtbl_lock);
- list_for_each_entry(r, &ls->ls_keep, res_rsbs_list) {
+ list_for_each_entry(r, &ls->ls_slow_active, res_slow_list) {
list_add(&r->res_root_list, root_list);
dlm_hold_rsb(r);
}
- WARN_ON_ONCE(!list_empty(&ls->ls_toss));
+ WARN_ON_ONCE(!list_empty(&ls->ls_slow_inactive));
read_unlock_bh(&ls->ls_rsbtbl_lock);
}
@@ -98,16 +98,16 @@ static int enable_locking(struct dlm_ls *ls, uint64_t seq)
spin_lock_bh(&ls->ls_recover_lock);
if (ls->ls_recover_seq == seq) {
set_bit(LSFL_RUNNING, &ls->ls_flags);
- /* Schedule next timer if recovery put something on toss.
+ /* Schedule next timer if recovery put something on inactive.
*
* The rsbs that was queued while recovery on toss hasn't
* started yet because LSFL_RUNNING was set everything
* else recovery hasn't started as well because ls_in_recovery
* is still hold. So we should not run into the case that
- * dlm_timer_resume() queues a timer that can occur in
+ * resume_scan_timer() queues a timer that can occur in
* a no op.
*/
- dlm_timer_resume(ls);
+ resume_scan_timer(ls);
/* unblocks processes waiting to enter the dlm */
up_write(&ls->ls_in_recovery);
clear_bit(LSFL_RECOVER_LOCK, &ls->ls_flags);
@@ -131,7 +131,7 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
dlm_callback_suspend(ls);
- dlm_clear_toss(ls);
+ dlm_clear_inactive(ls);
/*
* This list of root rsb's will be the basis of most of the recovery
diff --git a/fs/dlm/user.c b/fs/dlm/user.c
index 3173b974e8c8..5cb3896be826 100644
--- a/fs/dlm/user.c
+++ b/fs/dlm/user.c
@@ -182,7 +182,7 @@ void dlm_user_add_ast(struct dlm_lkb *lkb, uint32_t flags, int mode,
struct dlm_user_args *ua;
struct dlm_user_proc *proc;
struct dlm_callback *cb;
- int rv;
+ int rv, copy_lvb;
if (test_bit(DLM_DFL_ORPHAN_BIT, &lkb->lkb_dflags) ||
test_bit(DLM_IFL_DEAD_BIT, &lkb->lkb_iflags))
@@ -213,28 +213,22 @@ void dlm_user_add_ast(struct dlm_lkb *lkb, uint32_t flags, int mode,
spin_lock_bh(&proc->asts_spin);
- rv = dlm_queue_lkb_callback(lkb, flags, mode, status, sbflags, &cb);
- switch (rv) {
- case DLM_ENQUEUE_CALLBACK_NEED_SCHED:
- cb->ua = *ua;
- cb->lkb_lksb = &cb->ua.lksb;
- if (cb->copy_lvb) {
- memcpy(cb->lvbptr, ua->lksb.sb_lvbptr,
- DLM_USER_LVB_LEN);
- cb->lkb_lksb->sb_lvbptr = cb->lvbptr;
+ if (!dlm_may_skip_callback(lkb, flags, mode, status, sbflags,
+ &copy_lvb)) {
+ rv = dlm_get_cb(lkb, flags, mode, status, sbflags, &cb);
+ if (!rv) {
+ cb->copy_lvb = copy_lvb;
+ cb->ua = *ua;
+ cb->lkb_lksb = &cb->ua.lksb;
+ if (copy_lvb) {
+ memcpy(cb->lvbptr, ua->lksb.sb_lvbptr,
+ DLM_USER_LVB_LEN);
+ cb->lkb_lksb->sb_lvbptr = cb->lvbptr;
+ }
+
+ list_add_tail(&cb->list, &proc->asts);
+ wake_up_interruptible(&proc->wait);
}
-
- list_add_tail(&cb->list, &proc->asts);
- wake_up_interruptible(&proc->wait);
- break;
- case DLM_ENQUEUE_CALLBACK_SUCCESS:
- break;
- case DLM_ENQUEUE_CALLBACK_FAILURE:
- fallthrough;
- default:
- spin_unlock_bh(&proc->asts_spin);
- WARN_ON_ONCE(1);
- goto out;
}
spin_unlock_bh(&proc->asts_spin);
@@ -454,7 +448,7 @@ static int device_remove_lockspace(struct dlm_lspace_params *params)
if (params->flags & DLM_USER_LSFLG_FORCEFREE)
force = 2;
- lockspace = ls->ls_local_handle;
+ lockspace = ls;
dlm_put_lockspace(ls);
/* The final dlm_release_lockspace waits for references to go to
@@ -657,7 +651,7 @@ static int device_open(struct inode *inode, struct file *file)
return -ENOMEM;
}
- proc->lockspace = ls->ls_local_handle;
+ proc->lockspace = ls;
INIT_LIST_HEAD(&proc->asts);
INIT_LIST_HEAD(&proc->locks);
INIT_LIST_HEAD(&proc->unlocking);
diff --git a/fs/efivarfs/super.c b/fs/efivarfs/super.c
index bb14462f6d99..a929f1b613be 100644
--- a/fs/efivarfs/super.c
+++ b/fs/efivarfs/super.c
@@ -275,8 +275,8 @@ enum {
};
static const struct fs_parameter_spec efivarfs_parameters[] = {
- fsparam_u32("uid", Opt_uid),
- fsparam_u32("gid", Opt_gid),
+ fsparam_uid("uid", Opt_uid),
+ fsparam_gid("gid", Opt_gid),
{},
};
@@ -293,14 +293,10 @@ static int efivarfs_parse_param(struct fs_context *fc, struct fs_parameter *para
switch (opt) {
case Opt_uid:
- opts->uid = make_kuid(current_user_ns(), result.uint_32);
- if (!uid_valid(opts->uid))
- return -EINVAL;
+ opts->uid = result.uid;
break;
case Opt_gid:
- opts->gid = make_kgid(current_user_ns(), result.uint_32);
- if (!gid_valid(opts->gid))
- return -EINVAL;
+ opts->gid = result.gid;
break;
default:
return -EINVAL;
diff --git a/fs/efs/inode.c b/fs/efs/inode.c
index 7844ab24b813..462619e59766 100644
--- a/fs/efs/inode.c
+++ b/fs/efs/inode.c
@@ -311,4 +311,5 @@ efs_block_t efs_map_block(struct inode *inode, efs_block_t block) {
return 0;
}
+MODULE_DESCRIPTION("Extent File System (efs)");
MODULE_LICENSE("GPL");
diff --git a/fs/efs/symlink.c b/fs/efs/symlink.c
index 3b03a573cb1a..7749feded722 100644
--- a/fs/efs/symlink.c
+++ b/fs/efs/symlink.c
@@ -14,10 +14,9 @@
static int efs_symlink_read_folio(struct file *file, struct folio *folio)
{
- struct page *page = &folio->page;
- char *link = page_address(page);
- struct buffer_head * bh;
- struct inode * inode = page->mapping->host;
+ char *link = folio_address(folio);
+ struct buffer_head *bh;
+ struct inode *inode = folio->mapping->host;
efs_block_t size = inode->i_size;
int err;
@@ -40,12 +39,9 @@ static int efs_symlink_read_folio(struct file *file, struct folio *folio)
brelse(bh);
}
link[size] = '\0';
- SetPageUptodate(page);
- unlock_page(page);
- return 0;
+ err = 0;
fail:
- SetPageError(page);
- unlock_page(page);
+ folio_end_read(folio, err == 0);
return err;
}
diff --git a/fs/erofs/compress.h b/fs/erofs/compress.h
index 19d53c30c8af..7bfe251680ec 100644
--- a/fs/erofs/compress.h
+++ b/fs/erofs/compress.h
@@ -24,6 +24,8 @@ struct z_erofs_decompressor {
void *data, int size);
int (*decompress)(struct z_erofs_decompress_req *rq,
struct page **pagepool);
+ int (*init)(void);
+ void (*exit)(void);
char *name;
};
@@ -52,17 +54,14 @@ struct z_erofs_decompressor {
*/
/*
- * short-lived pages are pages directly from buddy system with specific
- * page->private (no need to set PagePrivate since these are non-LRU /
- * non-movable pages and bypass reclaim / migration code).
+ * Currently, short-lived pages are pages directly from buddy system
+ * with specific page->private (Z_EROFS_SHORTLIVED_PAGE).
+ * In the future world of Memdescs, it should be type 0 (Misc) memory
+ * which type can be checked with a new helper.
*/
static inline bool z_erofs_is_shortlived_page(struct page *page)
{
- if (page->private != Z_EROFS_SHORTLIVED_PAGE)
- return false;
-
- DBG_BUGON(page->mapping);
- return true;
+ return page->private == Z_EROFS_SHORTLIVED_PAGE;
}
static inline bool z_erofs_put_shortlivedpage(struct page **pagepool,
@@ -70,32 +69,32 @@ static inline bool z_erofs_put_shortlivedpage(struct page **pagepool,
{
if (!z_erofs_is_shortlived_page(page))
return false;
-
- /* short-lived pages should not be used by others at the same time */
- if (page_ref_count(page) > 1) {
- put_page(page);
- } else {
- /* follow the pcluster rule above. */
- erofs_pagepool_add(pagepool, page);
- }
+ erofs_pagepool_add(pagepool, page);
return true;
}
+extern const struct z_erofs_decompressor z_erofs_lzma_decomp;
+extern const struct z_erofs_decompressor z_erofs_deflate_decomp;
+extern const struct z_erofs_decompressor z_erofs_zstd_decomp;
+extern const struct z_erofs_decompressor *z_erofs_decomp[];
+
+struct z_erofs_stream_dctx {
+ struct z_erofs_decompress_req *rq;
+ unsigned int inpages, outpages; /* # of {en,de}coded pages */
+ int no, ni; /* the current {en,de}coded page # */
+
+ unsigned int avail_out; /* remaining bytes in the decoded buffer */
+ unsigned int inbuf_pos, inbuf_sz;
+ /* current status of the encoded buffer */
+ u8 *kin, *kout; /* buffer mapped pointers */
+ void *bounce; /* bounce buffer for inplace I/Os */
+ bool bounced; /* is the bounce buffer used now? */
+};
+
+int z_erofs_stream_switch_bufs(struct z_erofs_stream_dctx *dctx, void **dst,
+ void **src, struct page **pgpl);
int z_erofs_fixup_insize(struct z_erofs_decompress_req *rq, const char *padbuf,
unsigned int padbufsize);
-extern const struct z_erofs_decompressor erofs_decompressors[];
-
-/* prototypes for specific algorithms */
-int z_erofs_load_lzma_config(struct super_block *sb,
- struct erofs_super_block *dsb, void *data, int size);
-int z_erofs_load_deflate_config(struct super_block *sb,
- struct erofs_super_block *dsb, void *data, int size);
-int z_erofs_load_zstd_config(struct super_block *sb,
- struct erofs_super_block *dsb, void *data, int size);
-int z_erofs_lzma_decompress(struct z_erofs_decompress_req *rq,
- struct page **pagepool);
-int z_erofs_deflate_decompress(struct z_erofs_decompress_req *rq,
- struct page **pagepool);
-int z_erofs_zstd_decompress(struct z_erofs_decompress_req *rq,
- struct page **pgpl);
+int __init z_erofs_init_decompressor(void);
+void z_erofs_exit_decompressor(void);
#endif
diff --git a/fs/erofs/decompressor.c b/fs/erofs/decompressor.c
index 9d85b6c11c6b..c2253b6a5416 100644
--- a/fs/erofs/decompressor.c
+++ b/fs/erofs/decompressor.c
@@ -2,6 +2,7 @@
/*
* Copyright (C) 2019 HUAWEI, Inc.
* https://www.huawei.com/
+ * Copyright (C) 2024 Alibaba Cloud
*/
#include "compress.h"
#include <linux/lz4.h>
@@ -109,7 +110,6 @@ static int z_erofs_lz4_prepare_dstpages(struct z_erofs_lz4_decompress_ctx *ctx,
if (top) {
victim = availables[--top];
- get_page(victim);
} else {
victim = __erofs_allocpage(pagepool, rq->gfp, true);
if (!victim)
@@ -371,40 +371,113 @@ static int z_erofs_transform_plain(struct z_erofs_decompress_req *rq,
return 0;
}
-const struct z_erofs_decompressor erofs_decompressors[] = {
- [Z_EROFS_COMPRESSION_SHIFTED] = {
+int z_erofs_stream_switch_bufs(struct z_erofs_stream_dctx *dctx, void **dst,
+ void **src, struct page **pgpl)
+{
+ struct z_erofs_decompress_req *rq = dctx->rq;
+ struct super_block *sb = rq->sb;
+ struct page **pgo, *tmppage;
+ unsigned int j;
+
+ if (!dctx->avail_out) {
+ if (++dctx->no >= dctx->outpages || !rq->outputsize) {
+ erofs_err(sb, "insufficient space for decompressed data");
+ return -EFSCORRUPTED;
+ }
+
+ if (dctx->kout)
+ kunmap_local(dctx->kout);
+ dctx->avail_out = min(rq->outputsize, PAGE_SIZE - rq->pageofs_out);
+ rq->outputsize -= dctx->avail_out;
+ pgo = &rq->out[dctx->no];
+ if (!*pgo && rq->fillgaps) { /* deduped */
+ *pgo = erofs_allocpage(pgpl, rq->gfp);
+ if (!*pgo) {
+ dctx->kout = NULL;
+ return -ENOMEM;
+ }
+ set_page_private(*pgo, Z_EROFS_SHORTLIVED_PAGE);
+ }
+ if (*pgo) {
+ dctx->kout = kmap_local_page(*pgo);
+ *dst = dctx->kout + rq->pageofs_out;
+ } else {
+ *dst = dctx->kout = NULL;
+ }
+ rq->pageofs_out = 0;
+ }
+
+ if (dctx->inbuf_pos == dctx->inbuf_sz && rq->inputsize) {
+ if (++dctx->ni >= dctx->inpages) {
+ erofs_err(sb, "invalid compressed data");
+ return -EFSCORRUPTED;
+ }
+ if (dctx->kout) /* unlike kmap(), take care of the orders */
+ kunmap_local(dctx->kout);
+ kunmap_local(dctx->kin);
+
+ dctx->inbuf_sz = min_t(u32, rq->inputsize, PAGE_SIZE);
+ rq->inputsize -= dctx->inbuf_sz;
+ dctx->kin = kmap_local_page(rq->in[dctx->ni]);
+ *src = dctx->kin;
+ dctx->bounced = false;
+ if (dctx->kout) {
+ j = (u8 *)*dst - dctx->kout;
+ dctx->kout = kmap_local_page(rq->out[dctx->no]);
+ *dst = dctx->kout + j;
+ }
+ dctx->inbuf_pos = 0;
+ }
+
+ /*
+ * Handle overlapping: Use the given bounce buffer if the input data is
+ * under processing; Or utilize short-lived pages from the on-stack page
+ * pool, where pages are shared among the same request. Note that only
+ * a few inplace I/O pages need to be doubled.
+ */
+ if (!dctx->bounced && rq->out[dctx->no] == rq->in[dctx->ni]) {
+ memcpy(dctx->bounce, *src, dctx->inbuf_sz);
+ *src = dctx->bounce;
+ dctx->bounced = true;
+ }
+
+ for (j = dctx->ni + 1; j < dctx->inpages; ++j) {
+ if (rq->out[dctx->no] != rq->in[j])
+ continue;
+ tmppage = erofs_allocpage(pgpl, rq->gfp);
+ if (!tmppage)
+ return -ENOMEM;
+ set_page_private(tmppage, Z_EROFS_SHORTLIVED_PAGE);
+ copy_highpage(tmppage, rq->in[j]);
+ rq->in[j] = tmppage;
+ }
+ return 0;
+}
+
+const struct z_erofs_decompressor *z_erofs_decomp[] = {
+ [Z_EROFS_COMPRESSION_SHIFTED] = &(const struct z_erofs_decompressor) {
.decompress = z_erofs_transform_plain,
.name = "shifted"
},
- [Z_EROFS_COMPRESSION_INTERLACED] = {
+ [Z_EROFS_COMPRESSION_INTERLACED] = &(const struct z_erofs_decompressor) {
.decompress = z_erofs_transform_plain,
.name = "interlaced"
},
- [Z_EROFS_COMPRESSION_LZ4] = {
+ [Z_EROFS_COMPRESSION_LZ4] = &(const struct z_erofs_decompressor) {
.config = z_erofs_load_lz4_config,
.decompress = z_erofs_lz4_decompress,
+ .init = z_erofs_gbuf_init,
+ .exit = z_erofs_gbuf_exit,
.name = "lz4"
},
#ifdef CONFIG_EROFS_FS_ZIP_LZMA
- [Z_EROFS_COMPRESSION_LZMA] = {
- .config = z_erofs_load_lzma_config,
- .decompress = z_erofs_lzma_decompress,
- .name = "lzma"
- },
+ [Z_EROFS_COMPRESSION_LZMA] = &z_erofs_lzma_decomp,
#endif
#ifdef CONFIG_EROFS_FS_ZIP_DEFLATE
- [Z_EROFS_COMPRESSION_DEFLATE] = {
- .config = z_erofs_load_deflate_config,
- .decompress = z_erofs_deflate_decompress,
- .name = "deflate"
- },
+ [Z_EROFS_COMPRESSION_DEFLATE] = &z_erofs_deflate_decomp,
#endif
#ifdef CONFIG_EROFS_FS_ZIP_ZSTD
- [Z_EROFS_COMPRESSION_ZSTD] = {
- .config = z_erofs_load_zstd_config,
- .decompress = z_erofs_zstd_decompress,
- .name = "zstd"
- },
+ [Z_EROFS_COMPRESSION_ZSTD] = &z_erofs_zstd_decomp,
#endif
};
@@ -432,6 +505,7 @@ int z_erofs_parse_cfgs(struct super_block *sb, struct erofs_super_block *dsb)
offset = EROFS_SUPER_OFFSET + sbi->sb_size;
alg = 0;
for (algs = sbi->available_compr_algs; algs; algs >>= 1, ++alg) {
+ const struct z_erofs_decompressor *dec = z_erofs_decomp[alg];
void *data;
if (!(algs & 1))
@@ -443,16 +517,13 @@ int z_erofs_parse_cfgs(struct super_block *sb, struct erofs_super_block *dsb)
break;
}
- if (alg >= ARRAY_SIZE(erofs_decompressors) ||
- !erofs_decompressors[alg].config) {
+ if (alg < Z_EROFS_COMPRESSION_MAX && dec && dec->config) {
+ ret = dec->config(sb, dsb, data, size);
+ } else {
erofs_err(sb, "algorithm %d isn't enabled on this kernel",
alg);
ret = -EOPNOTSUPP;
- } else {
- ret = erofs_decompressors[alg].config(sb,
- dsb, data, size);
}
-
kfree(data);
if (ret)
break;
@@ -460,3 +531,28 @@ int z_erofs_parse_cfgs(struct super_block *sb, struct erofs_super_block *dsb)
erofs_put_metabuf(&buf);
return ret;
}
+
+int __init z_erofs_init_decompressor(void)
+{
+ int i, err;
+
+ for (i = 0; i < Z_EROFS_COMPRESSION_MAX; ++i) {
+ err = z_erofs_decomp[i] ? z_erofs_decomp[i]->init() : 0;
+ if (err) {
+ while (--i)
+ if (z_erofs_decomp[i])
+ z_erofs_decomp[i]->exit();
+ return err;
+ }
+ }
+ return 0;
+}
+
+void z_erofs_exit_decompressor(void)
+{
+ int i;
+
+ for (i = 0; i < Z_EROFS_COMPRESSION_MAX; ++i)
+ if (z_erofs_decomp[i])
+ z_erofs_decomp[i]->exit();
+}
diff --git a/fs/erofs/decompressor_deflate.c b/fs/erofs/decompressor_deflate.c
index 3a3461561a3c..5070d2fcc737 100644
--- a/fs/erofs/decompressor_deflate.c
+++ b/fs/erofs/decompressor_deflate.c
@@ -15,7 +15,7 @@ static DECLARE_WAIT_QUEUE_HEAD(z_erofs_deflate_wq);
module_param_named(deflate_streams, z_erofs_deflate_nstrms, uint, 0444);
-void z_erofs_deflate_exit(void)
+static void z_erofs_deflate_exit(void)
{
/* there should be no running fs instance */
while (z_erofs_deflate_avail_strms) {
@@ -41,7 +41,7 @@ void z_erofs_deflate_exit(void)
}
}
-int __init z_erofs_deflate_init(void)
+static int __init z_erofs_deflate_init(void)
{
/* by default, use # of possible CPUs instead */
if (!z_erofs_deflate_nstrms)
@@ -49,7 +49,7 @@ int __init z_erofs_deflate_init(void)
return 0;
}
-int z_erofs_load_deflate_config(struct super_block *sb,
+static int z_erofs_load_deflate_config(struct super_block *sb,
struct erofs_super_block *dsb, void *data, int size)
{
struct z_erofs_deflate_cfgs *dfl = data;
@@ -97,27 +97,26 @@ failed:
return -ENOMEM;
}
-int z_erofs_deflate_decompress(struct z_erofs_decompress_req *rq,
- struct page **pgpl)
+static int z_erofs_deflate_decompress(struct z_erofs_decompress_req *rq,
+ struct page **pgpl)
{
- const unsigned int nrpages_out =
- PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
- const unsigned int nrpages_in =
- PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT;
struct super_block *sb = rq->sb;
- unsigned int insz, outsz, pofs;
+ struct z_erofs_stream_dctx dctx = {
+ .rq = rq,
+ .inpages = PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT,
+ .outpages = PAGE_ALIGN(rq->pageofs_out + rq->outputsize)
+ >> PAGE_SHIFT,
+ .no = -1, .ni = 0,
+ };
struct z_erofs_deflate *strm;
- u8 *kin, *kout = NULL;
- bool bounced = false;
- int no = -1, ni = 0, j = 0, zerr, err;
+ int zerr, err;
/* 1. get the exact DEFLATE compressed size */
- kin = kmap_local_page(*rq->in);
- err = z_erofs_fixup_insize(rq, kin + rq->pageofs_in,
- min_t(unsigned int, rq->inputsize,
- sb->s_blocksize - rq->pageofs_in));
+ dctx.kin = kmap_local_page(*rq->in);
+ err = z_erofs_fixup_insize(rq, dctx.kin + rq->pageofs_in,
+ min(rq->inputsize, sb->s_blocksize - rq->pageofs_in));
if (err) {
- kunmap_local(kin);
+ kunmap_local(dctx.kin);
return err;
}
@@ -134,102 +133,35 @@ again:
spin_unlock(&z_erofs_deflate_lock);
/* 3. multi-call decompress */
- insz = rq->inputsize;
- outsz = rq->outputsize;
zerr = zlib_inflateInit2(&strm->z, -MAX_WBITS);
if (zerr != Z_OK) {
err = -EIO;
goto failed_zinit;
}
- pofs = rq->pageofs_out;
- strm->z.avail_in = min_t(u32, insz, PAGE_SIZE - rq->pageofs_in);
- insz -= strm->z.avail_in;
- strm->z.next_in = kin + rq->pageofs_in;
+ rq->fillgaps = true; /* DEFLATE doesn't support NULL output buffer */
+ strm->z.avail_in = min(rq->inputsize, PAGE_SIZE - rq->pageofs_in);
+ rq->inputsize -= strm->z.avail_in;
+ strm->z.next_in = dctx.kin + rq->pageofs_in;
strm->z.avail_out = 0;
+ dctx.bounce = strm->bounce;
while (1) {
- if (!strm->z.avail_out) {
- if (++no >= nrpages_out || !outsz) {
- erofs_err(sb, "insufficient space for decompressed data");
- err = -EFSCORRUPTED;
- break;
- }
-
- if (kout)
- kunmap_local(kout);
- strm->z.avail_out = min_t(u32, outsz, PAGE_SIZE - pofs);
- outsz -= strm->z.avail_out;
- if (!rq->out[no]) {
- rq->out[no] = erofs_allocpage(pgpl, rq->gfp);
- if (!rq->out[no]) {
- kout = NULL;
- err = -ENOMEM;
- break;
- }
- set_page_private(rq->out[no],
- Z_EROFS_SHORTLIVED_PAGE);
- }
- kout = kmap_local_page(rq->out[no]);
- strm->z.next_out = kout + pofs;
- pofs = 0;
- }
-
- if (!strm->z.avail_in && insz) {
- if (++ni >= nrpages_in) {
- erofs_err(sb, "invalid compressed data");
- err = -EFSCORRUPTED;
- break;
- }
-
- if (kout) { /* unlike kmap(), take care of the orders */
- j = strm->z.next_out - kout;
- kunmap_local(kout);
- }
- kunmap_local(kin);
- strm->z.avail_in = min_t(u32, insz, PAGE_SIZE);
- insz -= strm->z.avail_in;
- kin = kmap_local_page(rq->in[ni]);
- strm->z.next_in = kin;
- bounced = false;
- if (kout) {
- kout = kmap_local_page(rq->out[no]);
- strm->z.next_out = kout + j;
- }
- }
-
- /*
- * Handle overlapping: Use bounced buffer if the compressed
- * data is under processing; Or use short-lived pages from the
- * on-stack pagepool where pages share among the same request
- * and not _all_ inplace I/O pages are needed to be doubled.
- */
- if (!bounced && rq->out[no] == rq->in[ni]) {
- memcpy(strm->bounce, strm->z.next_in, strm->z.avail_in);
- strm->z.next_in = strm->bounce;
- bounced = true;
- }
-
- for (j = ni + 1; j < nrpages_in; ++j) {
- struct page *tmppage;
-
- if (rq->out[no] != rq->in[j])
- continue;
- tmppage = erofs_allocpage(pgpl, rq->gfp);
- if (!tmppage) {
- err = -ENOMEM;
- goto failed;
- }
- set_page_private(tmppage, Z_EROFS_SHORTLIVED_PAGE);
- copy_highpage(tmppage, rq->in[j]);
- rq->in[j] = tmppage;
- }
+ dctx.avail_out = strm->z.avail_out;
+ dctx.inbuf_sz = strm->z.avail_in;
+ err = z_erofs_stream_switch_bufs(&dctx,
+ (void **)&strm->z.next_out,
+ (void **)&strm->z.next_in, pgpl);
+ if (err)
+ break;
+ strm->z.avail_out = dctx.avail_out;
+ strm->z.avail_in = dctx.inbuf_sz;
zerr = zlib_inflate(&strm->z, Z_SYNC_FLUSH);
- if (zerr != Z_OK || !(outsz + strm->z.avail_out)) {
+ if (zerr != Z_OK || !(rq->outputsize + strm->z.avail_out)) {
if (zerr == Z_OK && rq->partial_decoding)
break;
- if (zerr == Z_STREAM_END && !outsz)
+ if (zerr == Z_STREAM_END && !rq->outputsize)
break;
erofs_err(sb, "failed to decompress %d in[%u] out[%u]",
zerr, rq->inputsize, rq->outputsize);
@@ -237,13 +169,12 @@ again:
break;
}
}
-failed:
if (zlib_inflateEnd(&strm->z) != Z_OK && !err)
err = -EIO;
- if (kout)
- kunmap_local(kout);
+ if (dctx.kout)
+ kunmap_local(dctx.kout);
failed_zinit:
- kunmap_local(kin);
+ kunmap_local(dctx.kin);
/* 4. push back DEFLATE stream context to the global list */
spin_lock(&z_erofs_deflate_lock);
strm->next = z_erofs_deflate_head;
@@ -252,3 +183,11 @@ failed_zinit:
wake_up(&z_erofs_deflate_wq);
return err;
}
+
+const struct z_erofs_decompressor z_erofs_deflate_decomp = {
+ .config = z_erofs_load_deflate_config,
+ .decompress = z_erofs_deflate_decompress,
+ .init = z_erofs_deflate_init,
+ .exit = z_erofs_deflate_exit,
+ .name = "deflate",
+};
diff --git a/fs/erofs/decompressor_lzma.c b/fs/erofs/decompressor_lzma.c
index 4b28dc130c9f..06a722b85a45 100644
--- a/fs/erofs/decompressor_lzma.c
+++ b/fs/erofs/decompressor_lzma.c
@@ -5,7 +5,6 @@
struct z_erofs_lzma {
struct z_erofs_lzma *next;
struct xz_dec_microlzma *state;
- struct xz_buf buf;
u8 bounce[PAGE_SIZE];
};
@@ -18,7 +17,7 @@ static DECLARE_WAIT_QUEUE_HEAD(z_erofs_lzma_wq);
module_param_named(lzma_streams, z_erofs_lzma_nstrms, uint, 0444);
-void z_erofs_lzma_exit(void)
+static void z_erofs_lzma_exit(void)
{
/* there should be no running fs instance */
while (z_erofs_lzma_avail_strms) {
@@ -46,7 +45,7 @@ void z_erofs_lzma_exit(void)
}
}
-int __init z_erofs_lzma_init(void)
+static int __init z_erofs_lzma_init(void)
{
unsigned int i;
@@ -70,7 +69,7 @@ int __init z_erofs_lzma_init(void)
return 0;
}
-int z_erofs_load_lzma_config(struct super_block *sb,
+static int z_erofs_load_lzma_config(struct super_block *sb,
struct erofs_super_block *dsb, void *data, int size)
{
static DEFINE_MUTEX(lzma_resize_mutex);
@@ -147,26 +146,28 @@ again:
return err;
}
-int z_erofs_lzma_decompress(struct z_erofs_decompress_req *rq,
- struct page **pgpl)
+static int z_erofs_lzma_decompress(struct z_erofs_decompress_req *rq,
+ struct page **pgpl)
{
- const unsigned int nrpages_out =
- PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
- const unsigned int nrpages_in =
- PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT;
- unsigned int inlen, outlen, pageofs;
+ struct super_block *sb = rq->sb;
+ struct z_erofs_stream_dctx dctx = {
+ .rq = rq,
+ .inpages = PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT,
+ .outpages = PAGE_ALIGN(rq->pageofs_out + rq->outputsize)
+ >> PAGE_SHIFT,
+ .no = -1, .ni = 0,
+ };
+ struct xz_buf buf = {};
struct z_erofs_lzma *strm;
- u8 *kin;
- bool bounced = false;
- int no, ni, j, err = 0;
+ enum xz_ret xz_err;
+ int err;
/* 1. get the exact LZMA compressed size */
- kin = kmap(*rq->in);
- err = z_erofs_fixup_insize(rq, kin + rq->pageofs_in,
- min_t(unsigned int, rq->inputsize,
- rq->sb->s_blocksize - rq->pageofs_in));
+ dctx.kin = kmap_local_page(*rq->in);
+ err = z_erofs_fixup_insize(rq, dctx.kin + rq->pageofs_in,
+ min(rq->inputsize, sb->s_blocksize - rq->pageofs_in));
if (err) {
- kunmap(*rq->in);
+ kunmap_local(dctx.kin);
return err;
}
@@ -183,108 +184,45 @@ again:
spin_unlock(&z_erofs_lzma_lock);
/* 3. multi-call decompress */
- inlen = rq->inputsize;
- outlen = rq->outputsize;
- xz_dec_microlzma_reset(strm->state, inlen, outlen,
+ xz_dec_microlzma_reset(strm->state, rq->inputsize, rq->outputsize,
!rq->partial_decoding);
- pageofs = rq->pageofs_out;
- strm->buf.in = kin + rq->pageofs_in;
- strm->buf.in_pos = 0;
- strm->buf.in_size = min_t(u32, inlen, PAGE_SIZE - rq->pageofs_in);
- inlen -= strm->buf.in_size;
- strm->buf.out = NULL;
- strm->buf.out_pos = 0;
- strm->buf.out_size = 0;
-
- for (ni = 0, no = -1;;) {
- enum xz_ret xz_err;
-
- if (strm->buf.out_pos == strm->buf.out_size) {
- if (strm->buf.out) {
- kunmap(rq->out[no]);
- strm->buf.out = NULL;
- }
-
- if (++no >= nrpages_out || !outlen) {
- erofs_err(rq->sb, "decompressed buf out of bound");
- err = -EFSCORRUPTED;
- break;
- }
- strm->buf.out_pos = 0;
- strm->buf.out_size = min_t(u32, outlen,
- PAGE_SIZE - pageofs);
- outlen -= strm->buf.out_size;
- if (!rq->out[no] && rq->fillgaps) { /* deduped */
- rq->out[no] = erofs_allocpage(pgpl, rq->gfp);
- if (!rq->out[no]) {
- err = -ENOMEM;
- break;
- }
- set_page_private(rq->out[no],
- Z_EROFS_SHORTLIVED_PAGE);
- }
- if (rq->out[no])
- strm->buf.out = kmap(rq->out[no]) + pageofs;
- pageofs = 0;
- } else if (strm->buf.in_pos == strm->buf.in_size) {
- kunmap(rq->in[ni]);
-
- if (++ni >= nrpages_in || !inlen) {
- erofs_err(rq->sb, "compressed buf out of bound");
- err = -EFSCORRUPTED;
- break;
- }
- strm->buf.in_pos = 0;
- strm->buf.in_size = min_t(u32, inlen, PAGE_SIZE);
- inlen -= strm->buf.in_size;
- kin = kmap(rq->in[ni]);
- strm->buf.in = kin;
- bounced = false;
- }
+ buf.in_size = min(rq->inputsize, PAGE_SIZE - rq->pageofs_in);
+ rq->inputsize -= buf.in_size;
+ buf.in = dctx.kin + rq->pageofs_in,
+ dctx.bounce = strm->bounce;
+ do {
+ dctx.avail_out = buf.out_size - buf.out_pos;
+ dctx.inbuf_sz = buf.in_size;
+ dctx.inbuf_pos = buf.in_pos;
+ err = z_erofs_stream_switch_bufs(&dctx, (void **)&buf.out,
+ (void **)&buf.in, pgpl);
+ if (err)
+ break;
- /*
- * Handle overlapping: Use bounced buffer if the compressed
- * data is under processing; Otherwise, Use short-lived pages
- * from the on-stack pagepool where pages share with the same
- * request.
- */
- if (!bounced && rq->out[no] == rq->in[ni]) {
- memcpy(strm->bounce, strm->buf.in, strm->buf.in_size);
- strm->buf.in = strm->bounce;
- bounced = true;
+ if (buf.out_size == buf.out_pos) {
+ buf.out_size = dctx.avail_out;
+ buf.out_pos = 0;
}
- for (j = ni + 1; j < nrpages_in; ++j) {
- struct page *tmppage;
+ buf.in_size = dctx.inbuf_sz;
+ buf.in_pos = dctx.inbuf_pos;
- if (rq->out[no] != rq->in[j])
- continue;
- tmppage = erofs_allocpage(pgpl, rq->gfp);
- if (!tmppage) {
- err = -ENOMEM;
- goto failed;
- }
- set_page_private(tmppage, Z_EROFS_SHORTLIVED_PAGE);
- copy_highpage(tmppage, rq->in[j]);
- rq->in[j] = tmppage;
- }
- xz_err = xz_dec_microlzma_run(strm->state, &strm->buf);
- DBG_BUGON(strm->buf.out_pos > strm->buf.out_size);
- DBG_BUGON(strm->buf.in_pos > strm->buf.in_size);
+ xz_err = xz_dec_microlzma_run(strm->state, &buf);
+ DBG_BUGON(buf.out_pos > buf.out_size);
+ DBG_BUGON(buf.in_pos > buf.in_size);
if (xz_err != XZ_OK) {
- if (xz_err == XZ_STREAM_END && !outlen)
+ if (xz_err == XZ_STREAM_END && !rq->outputsize)
break;
- erofs_err(rq->sb, "failed to decompress %d in[%u] out[%u]",
+ erofs_err(sb, "failed to decompress %d in[%u] out[%u]",
xz_err, rq->inputsize, rq->outputsize);
err = -EFSCORRUPTED;
break;
}
- }
-failed:
- if (no < nrpages_out && strm->buf.out)
- kunmap(rq->out[no]);
- if (ni < nrpages_in)
- kunmap(rq->in[ni]);
+ } while (1);
+
+ if (dctx.kout)
+ kunmap_local(dctx.kout);
+ kunmap_local(dctx.kin);
/* 4. push back LZMA stream context to the global list */
spin_lock(&z_erofs_lzma_lock);
strm->next = z_erofs_lzma_head;
@@ -293,3 +231,11 @@ failed:
wake_up(&z_erofs_lzma_wq);
return err;
}
+
+const struct z_erofs_decompressor z_erofs_lzma_decomp = {
+ .config = z_erofs_load_lzma_config,
+ .decompress = z_erofs_lzma_decompress,
+ .init = z_erofs_lzma_init,
+ .exit = z_erofs_lzma_exit,
+ .name = "lzma"
+};
diff --git a/fs/erofs/decompressor_zstd.c b/fs/erofs/decompressor_zstd.c
index 63a23cac3af4..7e177304967e 100644
--- a/fs/erofs/decompressor_zstd.c
+++ b/fs/erofs/decompressor_zstd.c
@@ -34,7 +34,7 @@ again:
return strm;
}
-void z_erofs_zstd_exit(void)
+static void z_erofs_zstd_exit(void)
{
while (z_erofs_zstd_avail_strms) {
struct z_erofs_zstd *strm, *n;
@@ -49,7 +49,7 @@ void z_erofs_zstd_exit(void)
}
}
-int __init z_erofs_zstd_init(void)
+static int __init z_erofs_zstd_init(void)
{
/* by default, use # of possible CPUs instead */
if (!z_erofs_zstd_nstrms)
@@ -72,7 +72,7 @@ int __init z_erofs_zstd_init(void)
return 0;
}
-int z_erofs_load_zstd_config(struct super_block *sb,
+static int z_erofs_load_zstd_config(struct super_block *sb,
struct erofs_super_block *dsb, void *data, int size)
{
static DEFINE_MUTEX(zstd_resize_mutex);
@@ -135,30 +135,29 @@ int z_erofs_load_zstd_config(struct super_block *sb,
return strm ? -ENOMEM : 0;
}
-int z_erofs_zstd_decompress(struct z_erofs_decompress_req *rq,
- struct page **pgpl)
+static int z_erofs_zstd_decompress(struct z_erofs_decompress_req *rq,
+ struct page **pgpl)
{
- const unsigned int nrpages_out =
- PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
- const unsigned int nrpages_in =
- PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT;
- zstd_dstream *stream;
struct super_block *sb = rq->sb;
- unsigned int insz, outsz, pofs;
- struct z_erofs_zstd *strm;
+ struct z_erofs_stream_dctx dctx = {
+ .rq = rq,
+ .inpages = PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT,
+ .outpages = PAGE_ALIGN(rq->pageofs_out + rq->outputsize)
+ >> PAGE_SHIFT,
+ .no = -1, .ni = 0,
+ };
zstd_in_buffer in_buf = { NULL, 0, 0 };
zstd_out_buffer out_buf = { NULL, 0, 0 };
- u8 *kin, *kout = NULL;
- bool bounced = false;
- int no = -1, ni = 0, j = 0, zerr, err;
+ struct z_erofs_zstd *strm;
+ zstd_dstream *stream;
+ int zerr, err;
/* 1. get the exact compressed size */
- kin = kmap_local_page(*rq->in);
- err = z_erofs_fixup_insize(rq, kin + rq->pageofs_in,
- min_t(unsigned int, rq->inputsize,
- sb->s_blocksize - rq->pageofs_in));
+ dctx.kin = kmap_local_page(*rq->in);
+ err = z_erofs_fixup_insize(rq, dctx.kin + rq->pageofs_in,
+ min(rq->inputsize, sb->s_blocksize - rq->pageofs_in));
if (err) {
- kunmap_local(kin);
+ kunmap_local(dctx.kin);
return err;
}
@@ -166,109 +165,48 @@ int z_erofs_zstd_decompress(struct z_erofs_decompress_req *rq,
strm = z_erofs_isolate_strms(false);
/* 3. multi-call decompress */
- insz = rq->inputsize;
- outsz = rq->outputsize;
stream = zstd_init_dstream(z_erofs_zstd_max_dictsize, strm->wksp, strm->wkspsz);
if (!stream) {
err = -EIO;
goto failed_zinit;
}
- pofs = rq->pageofs_out;
- in_buf.size = min_t(u32, insz, PAGE_SIZE - rq->pageofs_in);
- insz -= in_buf.size;
- in_buf.src = kin + rq->pageofs_in;
+ rq->fillgaps = true; /* ZSTD doesn't support NULL output buffer */
+ in_buf.size = min_t(u32, rq->inputsize, PAGE_SIZE - rq->pageofs_in);
+ rq->inputsize -= in_buf.size;
+ in_buf.src = dctx.kin + rq->pageofs_in;
+ dctx.bounce = strm->bounce;
+
do {
- if (out_buf.size == out_buf.pos) {
- if (++no >= nrpages_out || !outsz) {
- erofs_err(sb, "insufficient space for decompressed data");
- err = -EFSCORRUPTED;
- break;
- }
+ dctx.avail_out = out_buf.size - out_buf.pos;
+ dctx.inbuf_sz = in_buf.size;
+ dctx.inbuf_pos = in_buf.pos;
+ err = z_erofs_stream_switch_bufs(&dctx, &out_buf.dst,
+ (void **)&in_buf.src, pgpl);
+ if (err)
+ break;
- if (kout)
- kunmap_local(kout);
- out_buf.size = min_t(u32, outsz, PAGE_SIZE - pofs);
- outsz -= out_buf.size;
- if (!rq->out[no]) {
- rq->out[no] = erofs_allocpage(pgpl, rq->gfp);
- if (!rq->out[no]) {
- kout = NULL;
- err = -ENOMEM;
- break;
- }
- set_page_private(rq->out[no],
- Z_EROFS_SHORTLIVED_PAGE);
- }
- kout = kmap_local_page(rq->out[no]);
- out_buf.dst = kout + pofs;
+ if (out_buf.size == out_buf.pos) {
+ out_buf.size = dctx.avail_out;
out_buf.pos = 0;
- pofs = 0;
- }
-
- if (in_buf.size == in_buf.pos && insz) {
- if (++ni >= nrpages_in) {
- erofs_err(sb, "invalid compressed data");
- err = -EFSCORRUPTED;
- break;
- }
-
- if (kout) /* unlike kmap(), take care of the orders */
- kunmap_local(kout);
- kunmap_local(kin);
- in_buf.size = min_t(u32, insz, PAGE_SIZE);
- insz -= in_buf.size;
- kin = kmap_local_page(rq->in[ni]);
- in_buf.src = kin;
- in_buf.pos = 0;
- bounced = false;
- if (kout) {
- j = (u8 *)out_buf.dst - kout;
- kout = kmap_local_page(rq->out[no]);
- out_buf.dst = kout + j;
- }
}
+ in_buf.size = dctx.inbuf_sz;
+ in_buf.pos = dctx.inbuf_pos;
- /*
- * Handle overlapping: Use bounced buffer if the compressed
- * data is under processing; Or use short-lived pages from the
- * on-stack pagepool where pages share among the same request
- * and not _all_ inplace I/O pages are needed to be doubled.
- */
- if (!bounced && rq->out[no] == rq->in[ni]) {
- memcpy(strm->bounce, in_buf.src, in_buf.size);
- in_buf.src = strm->bounce;
- bounced = true;
- }
-
- for (j = ni + 1; j < nrpages_in; ++j) {
- struct page *tmppage;
-
- if (rq->out[no] != rq->in[j])
- continue;
- tmppage = erofs_allocpage(pgpl, rq->gfp);
- if (!tmppage) {
- err = -ENOMEM;
- goto failed;
- }
- set_page_private(tmppage, Z_EROFS_SHORTLIVED_PAGE);
- copy_highpage(tmppage, rq->in[j]);
- rq->in[j] = tmppage;
- }
zerr = zstd_decompress_stream(stream, &out_buf, &in_buf);
- if (zstd_is_error(zerr) || (!zerr && outsz)) {
+ if (zstd_is_error(zerr) || (!zerr && rq->outputsize)) {
erofs_err(sb, "failed to decompress in[%u] out[%u]: %s",
rq->inputsize, rq->outputsize,
zerr ? zstd_get_error_name(zerr) : "unexpected end of stream");
err = -EFSCORRUPTED;
break;
}
- } while (outsz || out_buf.pos < out_buf.size);
-failed:
- if (kout)
- kunmap_local(kout);
+ } while (rq->outputsize || out_buf.pos < out_buf.size);
+
+ if (dctx.kout)
+ kunmap_local(dctx.kout);
failed_zinit:
- kunmap_local(kin);
+ kunmap_local(dctx.kin);
/* 4. push back ZSTD stream context to the global list */
spin_lock(&z_erofs_zstd_lock);
strm->next = z_erofs_zstd_head;
@@ -277,3 +215,11 @@ failed_zinit:
wake_up(&z_erofs_zstd_wq);
return err;
}
+
+const struct z_erofs_decompressor z_erofs_zstd_decomp = {
+ .config = z_erofs_load_zstd_config,
+ .decompress = z_erofs_zstd_decompress,
+ .init = z_erofs_zstd_init,
+ .exit = z_erofs_zstd_exit,
+ .name = "zstd",
+};
diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h
index 0c1b44ac9524..736607675396 100644
--- a/fs/erofs/internal.h
+++ b/fs/erofs/internal.h
@@ -312,17 +312,13 @@ static inline unsigned int erofs_inode_datalayout(unsigned int ifmt)
return (ifmt >> EROFS_I_DATALAYOUT_BIT) & EROFS_I_DATALAYOUT_MASK;
}
-/*
- * Different from grab_cache_page_nowait(), reclaiming is never triggered
- * when allocating new pages.
- */
-static inline
-struct page *erofs_grab_cache_page_nowait(struct address_space *mapping,
- pgoff_t index)
+/* reclaiming is never triggered when allocating new folios. */
+static inline struct folio *erofs_grab_folio_nowait(struct address_space *as,
+ pgoff_t index)
{
- return pagecache_get_page(mapping, index,
+ return __filemap_get_folio(as, index,
FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT,
- readahead_gfp_mask(mapping) & ~__GFP_RECLAIM);
+ readahead_gfp_mask(as) & ~__GFP_RECLAIM);
}
/* Has a disk mapping */
@@ -458,8 +454,8 @@ void erofs_shrinker_register(struct super_block *sb);
void erofs_shrinker_unregister(struct super_block *sb);
int __init erofs_init_shrinker(void);
void erofs_exit_shrinker(void);
-int __init z_erofs_init_zip_subsystem(void);
-void z_erofs_exit_zip_subsystem(void);
+int __init z_erofs_init_subsystem(void);
+void z_erofs_exit_subsystem(void);
int erofs_try_to_free_all_cached_folios(struct erofs_sb_info *sbi,
struct erofs_workgroup *egrp);
int z_erofs_map_blocks_iter(struct inode *inode, struct erofs_map_blocks *map,
@@ -476,37 +472,11 @@ static inline void erofs_shrinker_register(struct super_block *sb) {}
static inline void erofs_shrinker_unregister(struct super_block *sb) {}
static inline int erofs_init_shrinker(void) { return 0; }
static inline void erofs_exit_shrinker(void) {}
-static inline int z_erofs_init_zip_subsystem(void) { return 0; }
-static inline void z_erofs_exit_zip_subsystem(void) {}
-static inline int z_erofs_gbuf_init(void) { return 0; }
-static inline void z_erofs_gbuf_exit(void) {}
+static inline int z_erofs_init_subsystem(void) { return 0; }
+static inline void z_erofs_exit_subsystem(void) {}
static inline int erofs_init_managed_cache(struct super_block *sb) { return 0; }
#endif /* !CONFIG_EROFS_FS_ZIP */
-#ifdef CONFIG_EROFS_FS_ZIP_LZMA
-int __init z_erofs_lzma_init(void);
-void z_erofs_lzma_exit(void);
-#else
-static inline int z_erofs_lzma_init(void) { return 0; }
-static inline int z_erofs_lzma_exit(void) { return 0; }
-#endif /* !CONFIG_EROFS_FS_ZIP_LZMA */
-
-#ifdef CONFIG_EROFS_FS_ZIP_DEFLATE
-int __init z_erofs_deflate_init(void);
-void z_erofs_deflate_exit(void);
-#else
-static inline int z_erofs_deflate_init(void) { return 0; }
-static inline int z_erofs_deflate_exit(void) { return 0; }
-#endif /* !CONFIG_EROFS_FS_ZIP_DEFLATE */
-
-#ifdef CONFIG_EROFS_FS_ZIP_ZSTD
-int __init z_erofs_zstd_init(void);
-void z_erofs_zstd_exit(void);
-#else
-static inline int z_erofs_zstd_init(void) { return 0; }
-static inline int z_erofs_zstd_exit(void) { return 0; }
-#endif /* !CONFIG_EROFS_FS_ZIP_ZSTD */
-
#ifdef CONFIG_EROFS_FS_ONDEMAND
int erofs_fscache_register_fs(struct super_block *sb);
void erofs_fscache_unregister_fs(struct super_block *sb);
diff --git a/fs/erofs/super.c b/fs/erofs/super.c
index c93bd24d2771..35268263aaed 100644
--- a/fs/erofs/super.c
+++ b/fs/erofs/super.c
@@ -343,7 +343,7 @@ static int erofs_read_superblock(struct super_block *sb)
sbi->build_time = le64_to_cpu(dsb->build_time);
sbi->build_time_nsec = le32_to_cpu(dsb->build_time_nsec);
- memcpy(&sb->s_uuid, dsb->uuid, sizeof(dsb->uuid));
+ super_set_uuid(sb, (void *)dsb->uuid, sizeof(dsb->uuid));
ret = strscpy(sbi->volume_name, dsb->volume_name,
sizeof(dsb->volume_name));
@@ -849,23 +849,7 @@ static int __init erofs_module_init(void)
if (err)
goto shrinker_err;
- err = z_erofs_lzma_init();
- if (err)
- goto lzma_err;
-
- err = z_erofs_deflate_init();
- if (err)
- goto deflate_err;
-
- err = z_erofs_zstd_init();
- if (err)
- goto zstd_err;
-
- err = z_erofs_gbuf_init();
- if (err)
- goto gbuf_err;
-
- err = z_erofs_init_zip_subsystem();
+ err = z_erofs_init_subsystem();
if (err)
goto zip_err;
@@ -882,16 +866,8 @@ static int __init erofs_module_init(void)
fs_err:
erofs_exit_sysfs();
sysfs_err:
- z_erofs_exit_zip_subsystem();
+ z_erofs_exit_subsystem();
zip_err:
- z_erofs_gbuf_exit();
-gbuf_err:
- z_erofs_zstd_exit();
-zstd_err:
- z_erofs_deflate_exit();
-deflate_err:
- z_erofs_lzma_exit();
-lzma_err:
erofs_exit_shrinker();
shrinker_err:
kmem_cache_destroy(erofs_inode_cachep);
@@ -906,13 +882,9 @@ static void __exit erofs_module_exit(void)
rcu_barrier();
erofs_exit_sysfs();
- z_erofs_exit_zip_subsystem();
- z_erofs_zstd_exit();
- z_erofs_deflate_exit();
- z_erofs_lzma_exit();
+ z_erofs_exit_subsystem();
erofs_exit_shrinker();
kmem_cache_destroy(erofs_inode_cachep);
- z_erofs_gbuf_exit();
}
static int erofs_statfs(struct dentry *dentry, struct kstatfs *buf)
diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
index d6fe002a4a71..424f656cd765 100644
--- a/fs/erofs/zdata.c
+++ b/fs/erofs/zdata.c
@@ -19,10 +19,7 @@
typedef void *z_erofs_next_pcluster_t;
struct z_erofs_bvec {
- union {
- struct page *page;
- struct folio *folio;
- };
+ struct page *page;
int offset;
unsigned int end;
};
@@ -449,44 +446,51 @@ static inline int erofs_cpu_hotplug_init(void) { return 0; }
static inline void erofs_cpu_hotplug_destroy(void) {}
#endif
-void z_erofs_exit_zip_subsystem(void)
+void z_erofs_exit_subsystem(void)
{
erofs_cpu_hotplug_destroy();
erofs_destroy_percpu_workers();
destroy_workqueue(z_erofs_workqueue);
z_erofs_destroy_pcluster_pool();
+ z_erofs_exit_decompressor();
}
-int __init z_erofs_init_zip_subsystem(void)
+int __init z_erofs_init_subsystem(void)
{
- int err = z_erofs_create_pcluster_pool();
+ int err = z_erofs_init_decompressor();
if (err)
- goto out_error_pcluster_pool;
+ goto err_decompressor;
+
+ err = z_erofs_create_pcluster_pool();
+ if (err)
+ goto err_pcluster_pool;
z_erofs_workqueue = alloc_workqueue("erofs_worker",
WQ_UNBOUND | WQ_HIGHPRI, num_possible_cpus());
if (!z_erofs_workqueue) {
err = -ENOMEM;
- goto out_error_workqueue_init;
+ goto err_workqueue_init;
}
err = erofs_init_percpu_workers();
if (err)
- goto out_error_pcpu_worker;
+ goto err_pcpu_worker;
err = erofs_cpu_hotplug_init();
if (err < 0)
- goto out_error_cpuhp_init;
+ goto err_cpuhp_init;
return err;
-out_error_cpuhp_init:
+err_cpuhp_init:
erofs_destroy_percpu_workers();
-out_error_pcpu_worker:
+err_pcpu_worker:
destroy_workqueue(z_erofs_workqueue);
-out_error_workqueue_init:
+err_workqueue_init:
z_erofs_destroy_pcluster_pool();
-out_error_pcluster_pool:
+err_pcluster_pool:
+ z_erofs_exit_decompressor();
+err_decompressor:
return err;
}
@@ -617,32 +621,31 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe)
fe->mode = Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE;
}
-/* called by erofs_shrinker to get rid of all cached compressed bvecs */
+/* (erofs_shrinker) disconnect cached encoded data with pclusters */
int erofs_try_to_free_all_cached_folios(struct erofs_sb_info *sbi,
struct erofs_workgroup *grp)
{
struct z_erofs_pcluster *const pcl =
container_of(grp, struct z_erofs_pcluster, obj);
unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
+ struct folio *folio;
int i;
DBG_BUGON(z_erofs_is_inline_pcluster(pcl));
- /* There is no actice user since the pcluster is now freezed */
+ /* Each cached folio contains one page unless bs > ps is supported */
for (i = 0; i < pclusterpages; ++i) {
- struct folio *folio = pcl->compressed_bvecs[i].folio;
-
- if (!folio)
- continue;
+ if (pcl->compressed_bvecs[i].page) {
+ folio = page_folio(pcl->compressed_bvecs[i].page);
+ /* Avoid reclaiming or migrating this folio */
+ if (!folio_trylock(folio))
+ return -EBUSY;
- /* Avoid reclaiming or migrating this folio */
- if (!folio_trylock(folio))
- return -EBUSY;
-
- if (!erofs_folio_is_managed(sbi, folio))
- continue;
- pcl->compressed_bvecs[i].folio = NULL;
- folio_detach_private(folio);
- folio_unlock(folio);
+ if (!erofs_folio_is_managed(sbi, folio))
+ continue;
+ pcl->compressed_bvecs[i].page = NULL;
+ folio_detach_private(folio);
+ folio_unlock(folio);
+ }
}
return 0;
}
@@ -650,9 +653,9 @@ int erofs_try_to_free_all_cached_folios(struct erofs_sb_info *sbi,
static bool z_erofs_cache_release_folio(struct folio *folio, gfp_t gfp)
{
struct z_erofs_pcluster *pcl = folio_get_private(folio);
- unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
+ struct z_erofs_bvec *bvec = pcl->compressed_bvecs;
+ struct z_erofs_bvec *end = bvec + z_erofs_pclusterpages(pcl);
bool ret;
- int i;
if (!folio_test_private(folio))
return true;
@@ -661,9 +664,9 @@ static bool z_erofs_cache_release_folio(struct folio *folio, gfp_t gfp)
spin_lock(&pcl->obj.lockref.lock);
if (pcl->obj.lockref.count <= 0) {
DBG_BUGON(z_erofs_is_inline_pcluster(pcl));
- for (i = 0; i < pclusterpages; ++i) {
- if (pcl->compressed_bvecs[i].folio == folio) {
- pcl->compressed_bvecs[i].folio = NULL;
+ for (; bvec < end; ++bvec) {
+ if (bvec->page && page_folio(bvec->page) == folio) {
+ bvec->page = NULL;
folio_detach_private(folio);
ret = true;
break;
@@ -925,7 +928,7 @@ static void z_erofs_pcluster_end(struct z_erofs_decompress_frontend *fe)
fe->pcl = NULL;
}
-static int z_erofs_read_fragment(struct super_block *sb, struct page *page,
+static int z_erofs_read_fragment(struct super_block *sb, struct folio *folio,
unsigned int cur, unsigned int end, erofs_off_t pos)
{
struct inode *packed_inode = EROFS_SB(sb)->packed_inode;
@@ -938,113 +941,109 @@ static int z_erofs_read_fragment(struct super_block *sb, struct page *page,
buf.mapping = packed_inode->i_mapping;
for (; cur < end; cur += cnt, pos += cnt) {
- cnt = min_t(unsigned int, end - cur,
- sb->s_blocksize - erofs_blkoff(sb, pos));
+ cnt = min(end - cur, sb->s_blocksize - erofs_blkoff(sb, pos));
src = erofs_bread(&buf, pos, EROFS_KMAP);
if (IS_ERR(src)) {
erofs_put_metabuf(&buf);
return PTR_ERR(src);
}
- memcpy_to_page(page, cur, src, cnt);
+ memcpy_to_folio(folio, cur, src, cnt);
}
erofs_put_metabuf(&buf);
return 0;
}
-static int z_erofs_scan_folio(struct z_erofs_decompress_frontend *fe,
+static int z_erofs_scan_folio(struct z_erofs_decompress_frontend *f,
struct folio *folio, bool ra)
{
- struct inode *const inode = fe->inode;
- struct erofs_map_blocks *const map = &fe->map;
+ struct inode *const inode = f->inode;
+ struct erofs_map_blocks *const map = &f->map;
const loff_t offset = folio_pos(folio);
- const unsigned int bs = i_blocksize(inode), fs = folio_size(folio);
- bool tight = true, exclusive;
- unsigned int cur, end, len, split;
+ const unsigned int bs = i_blocksize(inode);
+ unsigned int end = folio_size(folio), split = 0, cur, pgs;
+ bool tight, excl;
int err = 0;
+ tight = (bs == PAGE_SIZE);
z_erofs_onlinefolio_init(folio);
- split = 0;
- end = fs;
-repeat:
- if (offset + end - 1 < map->m_la ||
- offset + end - 1 >= map->m_la + map->m_llen) {
- z_erofs_pcluster_end(fe);
- map->m_la = offset + end - 1;
- map->m_llen = 0;
- err = z_erofs_map_blocks_iter(inode, map, 0);
- if (err)
- goto out;
- }
-
- cur = offset > map->m_la ? 0 : map->m_la - offset;
- /* bump split parts first to avoid several separate cases */
- ++split;
-
- if (!(map->m_flags & EROFS_MAP_MAPPED)) {
- folio_zero_segment(folio, cur, end);
- tight = false;
- goto next_part;
- }
-
- if (map->m_flags & EROFS_MAP_FRAGMENT) {
- erofs_off_t fpos = offset + cur - map->m_la;
+ do {
+ if (offset + end - 1 < map->m_la ||
+ offset + end - 1 >= map->m_la + map->m_llen) {
+ z_erofs_pcluster_end(f);
+ map->m_la = offset + end - 1;
+ map->m_llen = 0;
+ err = z_erofs_map_blocks_iter(inode, map, 0);
+ if (err)
+ break;
+ }
- len = min_t(unsigned int, map->m_llen - fpos, end - cur);
- err = z_erofs_read_fragment(inode->i_sb, &folio->page, cur,
- cur + len, EROFS_I(inode)->z_fragmentoff + fpos);
- if (err)
- goto out;
- tight = false;
- goto next_part;
- }
+ cur = offset > map->m_la ? 0 : map->m_la - offset;
+ pgs = round_down(cur, PAGE_SIZE);
+ /* bump split parts first to avoid several separate cases */
+ ++split;
+
+ if (!(map->m_flags & EROFS_MAP_MAPPED)) {
+ folio_zero_segment(folio, cur, end);
+ tight = false;
+ } else if (map->m_flags & EROFS_MAP_FRAGMENT) {
+ erofs_off_t fpos = offset + cur - map->m_la;
+
+ err = z_erofs_read_fragment(inode->i_sb, folio, cur,
+ cur + min(map->m_llen - fpos, end - cur),
+ EROFS_I(inode)->z_fragmentoff + fpos);
+ if (err)
+ break;
+ tight = false;
+ } else {
+ if (!f->pcl) {
+ err = z_erofs_pcluster_begin(f);
+ if (err)
+ break;
+ f->pcl->besteffort |= !ra;
+ }
- if (!fe->pcl) {
- err = z_erofs_pcluster_begin(fe);
- if (err)
- goto out;
- fe->pcl->besteffort |= !ra;
- }
+ pgs = round_down(end - 1, PAGE_SIZE);
+ /*
+ * Ensure this partial page belongs to this submit chain
+ * rather than other concurrent submit chains or
+ * noio(bypass) chains since those chains are handled
+ * asynchronously thus it cannot be used for inplace I/O
+ * or bvpage (should be processed in the strict order.)
+ */
+ tight &= (f->mode >= Z_EROFS_PCLUSTER_FOLLOWED);
+ excl = false;
+ if (cur <= pgs) {
+ excl = (split <= 1) || tight;
+ cur = pgs;
+ }
- /*
- * Ensure the current partial folio belongs to this submit chain rather
- * than other concurrent submit chains or the noio(bypass) chain since
- * those chains are handled asynchronously thus the folio cannot be used
- * for inplace I/O or bvpage (should be processed in a strict order.)
- */
- tight &= (fe->mode > Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE);
- exclusive = (!cur && ((split <= 1) || (tight && bs == fs)));
- if (cur)
- tight &= (fe->mode >= Z_EROFS_PCLUSTER_FOLLOWED);
-
- err = z_erofs_attach_page(fe, &((struct z_erofs_bvec) {
- .page = &folio->page,
- .offset = offset - map->m_la,
- .end = end,
- }), exclusive);
- if (err)
- goto out;
-
- z_erofs_onlinefolio_split(folio);
- if (fe->pcl->pageofs_out != (map->m_la & ~PAGE_MASK))
- fe->pcl->multibases = true;
- if (fe->pcl->length < offset + end - map->m_la) {
- fe->pcl->length = offset + end - map->m_la;
- fe->pcl->pageofs_out = map->m_la & ~PAGE_MASK;
- }
- if ((map->m_flags & EROFS_MAP_FULL_MAPPED) &&
- !(map->m_flags & EROFS_MAP_PARTIAL_REF) &&
- fe->pcl->length == map->m_llen)
- fe->pcl->partial = false;
-next_part:
- /* shorten the remaining extent to update progress */
- map->m_llen = offset + cur - map->m_la;
- map->m_flags &= ~EROFS_MAP_FULL_MAPPED;
-
- end = cur;
- if (end > 0)
- goto repeat;
+ err = z_erofs_attach_page(f, &((struct z_erofs_bvec) {
+ .page = folio_page(folio, pgs >> PAGE_SHIFT),
+ .offset = offset + pgs - map->m_la,
+ .end = end - pgs, }), excl);
+ if (err)
+ break;
-out:
+ z_erofs_onlinefolio_split(folio);
+ if (f->pcl->pageofs_out != (map->m_la & ~PAGE_MASK))
+ f->pcl->multibases = true;
+ if (f->pcl->length < offset + end - map->m_la) {
+ f->pcl->length = offset + end - map->m_la;
+ f->pcl->pageofs_out = map->m_la & ~PAGE_MASK;
+ }
+ if ((map->m_flags & EROFS_MAP_FULL_MAPPED) &&
+ !(map->m_flags & EROFS_MAP_PARTIAL_REF) &&
+ f->pcl->length == map->m_llen)
+ f->pcl->partial = false;
+ }
+ /* shorten the remaining extent to update progress */
+ map->m_llen = offset + cur - map->m_la;
+ map->m_flags &= ~EROFS_MAP_FULL_MAPPED;
+ if (cur <= pgs) {
+ split = cur < pgs;
+ tight = (bs == PAGE_SIZE);
+ }
+ } while ((end = cur) > 0);
z_erofs_onlinefolio_end(folio, err);
return err;
}
@@ -1066,7 +1065,7 @@ static bool z_erofs_is_sync_decompress(struct erofs_sb_info *sbi,
static bool z_erofs_page_is_invalidated(struct page *page)
{
- return !page->mapping && !z_erofs_is_shortlived_page(page);
+ return !page_folio(page)->mapping && !z_erofs_is_shortlived_page(page);
}
struct z_erofs_decompress_backend {
@@ -1221,8 +1220,8 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be,
struct z_erofs_pcluster *pcl = be->pcl;
unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
const struct z_erofs_decompressor *decomp =
- &erofs_decompressors[pcl->algorithmformat];
- int i, err2;
+ z_erofs_decomp[pcl->algorithmformat];
+ int i, j, jtop, err2;
struct page *page;
bool overlapped;
@@ -1280,10 +1279,9 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be,
WRITE_ONCE(pcl->compressed_bvecs[0].page, NULL);
put_page(page);
} else {
+ /* managed folios are still left in compressed_bvecs[] */
for (i = 0; i < pclusterpages; ++i) {
- /* consider shortlived pages added when decompressing */
page = be->compressed_pages[i];
-
if (!page ||
erofs_folio_is_managed(sbi, page_folio(page)))
continue;
@@ -1294,21 +1292,31 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be,
if (be->compressed_pages < be->onstack_pages ||
be->compressed_pages >= be->onstack_pages + Z_EROFS_ONSTACK_PAGES)
kvfree(be->compressed_pages);
- z_erofs_fill_other_copies(be, err);
+ jtop = 0;
+ z_erofs_fill_other_copies(be, err);
for (i = 0; i < be->nr_pages; ++i) {
page = be->decompressed_pages[i];
if (!page)
continue;
DBG_BUGON(z_erofs_page_is_invalidated(page));
-
- /* recycle all individual short-lived pages */
- if (z_erofs_put_shortlivedpage(be->pagepool, page))
+ if (!z_erofs_is_shortlived_page(page)) {
+ z_erofs_onlinefolio_end(page_folio(page), err);
continue;
- z_erofs_onlinefolio_end(page_folio(page), err);
+ }
+ if (pcl->algorithmformat != Z_EROFS_COMPRESSION_LZ4) {
+ erofs_pagepool_add(be->pagepool, page);
+ continue;
+ }
+ for (j = 0; j < jtop && be->decompressed_pages[j] != page; ++j)
+ ;
+ if (j >= jtop) /* this bounce page is newly detected */
+ be->decompressed_pages[jtop++] = page;
}
-
+ while (jtop)
+ erofs_pagepool_add(be->pagepool,
+ be->decompressed_pages[--jtop]);
if (be->decompressed_pages != be->onstack_pages)
kvfree(be->decompressed_pages);
@@ -1419,7 +1427,7 @@ static void z_erofs_fill_bio_vec(struct bio_vec *bvec,
bool tocache = false;
struct z_erofs_bvec zbv;
struct address_space *mapping;
- struct page *page;
+ struct folio *folio;
int bs = i_blocksize(f->inode);
/* Except for inplace folios, the entire folio can be used for I/Os */
@@ -1429,23 +1437,25 @@ repeat:
spin_lock(&pcl->obj.lockref.lock);
zbv = pcl->compressed_bvecs[nr];
spin_unlock(&pcl->obj.lockref.lock);
- if (!zbv.folio)
+ if (!zbv.page)
goto out_allocfolio;
- bvec->bv_page = &zbv.folio->page;
+ bvec->bv_page = zbv.page;
DBG_BUGON(z_erofs_is_shortlived_page(bvec->bv_page));
+
+ folio = page_folio(zbv.page);
/*
* Handle preallocated cached folios. We tried to allocate such folios
* without triggering direct reclaim. If allocation failed, inplace
* file-backed folios will be used instead.
*/
- if (zbv.folio->private == (void *)Z_EROFS_PREALLOCATED_PAGE) {
- zbv.folio->private = 0;
+ if (folio->private == (void *)Z_EROFS_PREALLOCATED_PAGE) {
+ folio->private = 0;
tocache = true;
goto out_tocache;
}
- mapping = READ_ONCE(zbv.folio->mapping);
+ mapping = READ_ONCE(folio->mapping);
/*
* File-backed folios for inplace I/Os are all locked steady,
* therefore it is impossible for `mapping` to be NULL.
@@ -1457,21 +1467,21 @@ repeat:
return;
}
- folio_lock(zbv.folio);
- if (zbv.folio->mapping == mc) {
+ folio_lock(folio);
+ if (folio->mapping == mc) {
/*
* The cached folio is still in managed cache but without
* a valid `->private` pcluster hint. Let's reconnect them.
*/
- if (!folio_test_private(zbv.folio)) {
- folio_attach_private(zbv.folio, pcl);
+ if (!folio_test_private(folio)) {
+ folio_attach_private(folio, pcl);
/* compressed_bvecs[] already takes a ref before */
- folio_put(zbv.folio);
+ folio_put(folio);
}
/* no need to submit if it is already up-to-date */
- if (folio_test_uptodate(zbv.folio)) {
- folio_unlock(zbv.folio);
+ if (folio_test_uptodate(folio)) {
+ folio_unlock(folio);
bvec->bv_page = NULL;
}
return;
@@ -1481,32 +1491,31 @@ repeat:
* It has been truncated, so it's unsafe to reuse this one. Let's
* allocate a new page for compressed data.
*/
- DBG_BUGON(zbv.folio->mapping);
+ DBG_BUGON(folio->mapping);
tocache = true;
- folio_unlock(zbv.folio);
- folio_put(zbv.folio);
+ folio_unlock(folio);
+ folio_put(folio);
out_allocfolio:
- page = erofs_allocpage(&f->pagepool, gfp | __GFP_NOFAIL);
+ zbv.page = erofs_allocpage(&f->pagepool, gfp | __GFP_NOFAIL);
spin_lock(&pcl->obj.lockref.lock);
- if (pcl->compressed_bvecs[nr].folio) {
- erofs_pagepool_add(&f->pagepool, page);
+ if (pcl->compressed_bvecs[nr].page) {
+ erofs_pagepool_add(&f->pagepool, zbv.page);
spin_unlock(&pcl->obj.lockref.lock);
cond_resched();
goto repeat;
}
- pcl->compressed_bvecs[nr].folio = zbv.folio = page_folio(page);
+ bvec->bv_page = pcl->compressed_bvecs[nr].page = zbv.page;
+ folio = page_folio(zbv.page);
+ /* first mark it as a temporary shortlived folio (now 1 ref) */
+ folio->private = (void *)Z_EROFS_SHORTLIVED_PAGE;
spin_unlock(&pcl->obj.lockref.lock);
- bvec->bv_page = page;
out_tocache:
if (!tocache || bs != PAGE_SIZE ||
- filemap_add_folio(mc, zbv.folio, pcl->obj.index + nr, gfp)) {
- /* turn into a temporary shortlived folio (1 ref) */
- zbv.folio->private = (void *)Z_EROFS_SHORTLIVED_PAGE;
+ filemap_add_folio(mc, folio, pcl->obj.index + nr, gfp))
return;
- }
- folio_attach_private(zbv.folio, pcl);
+ folio_attach_private(folio, pcl);
/* drop a refcount added by allocpage (then 2 refs in total here) */
- folio_put(zbv.folio);
+ folio_put(folio);
}
static struct z_erofs_decompressqueue *jobqueue_init(struct super_block *sb,
@@ -1767,7 +1776,6 @@ static void z_erofs_pcluster_readmore(struct z_erofs_decompress_frontend *f,
end = round_up(end, PAGE_SIZE);
} else {
end = round_up(map->m_la, PAGE_SIZE);
-
if (!map->m_llen)
return;
}
@@ -1775,15 +1783,15 @@ static void z_erofs_pcluster_readmore(struct z_erofs_decompress_frontend *f,
cur = map->m_la + map->m_llen - 1;
while ((cur >= end) && (cur < i_size_read(inode))) {
pgoff_t index = cur >> PAGE_SHIFT;
- struct page *page;
+ struct folio *folio;
- page = erofs_grab_cache_page_nowait(inode->i_mapping, index);
- if (page) {
- if (PageUptodate(page))
- unlock_page(page);
+ folio = erofs_grab_folio_nowait(inode->i_mapping, index);
+ if (!IS_ERR_OR_NULL(folio)) {
+ if (folio_test_uptodate(folio))
+ folio_unlock(folio);
else
- z_erofs_scan_folio(f, page_folio(page), !!rac);
- put_page(page);
+ z_erofs_scan_folio(f, folio, !!rac);
+ folio_put(folio);
}
if (cur < PAGE_SIZE)
diff --git a/fs/erofs/zmap.c b/fs/erofs/zmap.c
index 9b248ee5fef2..403af6e31d5b 100644
--- a/fs/erofs/zmap.c
+++ b/fs/erofs/zmap.c
@@ -686,7 +686,7 @@ int z_erofs_map_blocks_iter(struct inode *inode, struct erofs_map_blocks *map,
struct erofs_inode *const vi = EROFS_I(inode);
int err = 0;
- trace_z_erofs_map_blocks_iter_enter(inode, map, flags);
+ trace_erofs_map_blocks_enter(inode, map, flags);
/* when trying to read beyond EOF, leave it unmapped */
if (map->m_la >= inode->i_size) {
@@ -711,7 +711,9 @@ int z_erofs_map_blocks_iter(struct inode *inode, struct erofs_map_blocks *map,
err = z_erofs_do_map_blocks(inode, map, flags);
out:
- trace_z_erofs_map_blocks_iter_exit(inode, map, flags, err);
+ if (err)
+ map->m_llen = 0;
+ trace_erofs_map_blocks_exit(inode, map, flags, err);
return err;
}
diff --git a/fs/erofs/zutil.c b/fs/erofs/zutil.c
index 036024bce9f7..b80f612867c2 100644
--- a/fs/erofs/zutil.c
+++ b/fs/erofs/zutil.c
@@ -148,7 +148,7 @@ int __init z_erofs_gbuf_init(void)
void z_erofs_gbuf_exit(void)
{
- int i;
+ int i, j;
for (i = 0; i < z_erofs_gbuf_count + (!!z_erofs_rsvbuf); ++i) {
struct z_erofs_gbuf *gbuf = &z_erofs_gbufpool[i];
@@ -161,9 +161,9 @@ void z_erofs_gbuf_exit(void)
if (!gbuf->pages)
continue;
- for (i = 0; i < gbuf->nrpages; ++i)
- if (gbuf->pages[i])
- put_page(gbuf->pages[i]);
+ for (j = 0; j < gbuf->nrpages; ++j)
+ if (gbuf->pages[j])
+ put_page(gbuf->pages[j]);
kfree(gbuf->pages);
gbuf->pages = NULL;
}
diff --git a/fs/exec.c b/fs/exec.c
index 40073142288f..a47d0e4c54f6 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -486,6 +486,35 @@ static int count_strings_kernel(const char *const *argv)
return i;
}
+static inline int bprm_set_stack_limit(struct linux_binprm *bprm,
+ unsigned long limit)
+{
+#ifdef CONFIG_MMU
+ /* Avoid a pathological bprm->p. */
+ if (bprm->p < limit)
+ return -E2BIG;
+ bprm->argmin = bprm->p - limit;
+#endif
+ return 0;
+}
+static inline bool bprm_hit_stack_limit(struct linux_binprm *bprm)
+{
+#ifdef CONFIG_MMU
+ return bprm->p < bprm->argmin;
+#else
+ return false;
+#endif
+}
+
+/*
+ * Calculate bprm->argmin from:
+ * - _STK_LIM
+ * - ARG_MAX
+ * - bprm->rlim_stack.rlim_cur
+ * - bprm->argc
+ * - bprm->envc
+ * - bprm->p
+ */
static int bprm_stack_limits(struct linux_binprm *bprm)
{
unsigned long limit, ptr_size;
@@ -505,6 +534,9 @@ static int bprm_stack_limits(struct linux_binprm *bprm)
* of argument strings even with small stacks
*/
limit = max_t(unsigned long, limit, ARG_MAX);
+ /* Reject totally pathological counts. */
+ if (bprm->argc < 0 || bprm->envc < 0)
+ return -E2BIG;
/*
* We must account for the size of all the argv and envp pointers to
* the argv and envp strings, since they will also take up space in
@@ -518,13 +550,14 @@ static int bprm_stack_limits(struct linux_binprm *bprm)
* argc can never be 0, to keep them from walking envp by accident.
* See do_execveat_common().
*/
- ptr_size = (max(bprm->argc, 1) + bprm->envc) * sizeof(void *);
+ if (check_add_overflow(max(bprm->argc, 1), bprm->envc, &ptr_size) ||
+ check_mul_overflow(ptr_size, sizeof(void *), &ptr_size))
+ return -E2BIG;
if (limit <= ptr_size)
return -E2BIG;
limit -= ptr_size;
- bprm->argmin = bprm->p - limit;
- return 0;
+ return bprm_set_stack_limit(bprm, limit);
}
/*
@@ -562,10 +595,8 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
pos = bprm->p;
str += len;
bprm->p -= len;
-#ifdef CONFIG_MMU
- if (bprm->p < bprm->argmin)
+ if (bprm_hit_stack_limit(bprm))
goto out;
-#endif
while (len > 0) {
int offset, bytes_to_copy;
@@ -640,7 +671,7 @@ int copy_string_kernel(const char *arg, struct linux_binprm *bprm)
/* We're going to work our way backwards. */
arg += len;
bprm->p -= len;
- if (IS_ENABLED(CONFIG_MMU) && bprm->p < bprm->argmin)
+ if (bprm_hit_stack_limit(bprm))
return -E2BIG;
while (len > 0) {
@@ -952,10 +983,6 @@ static struct file *do_open_execat(int fd, struct filename *name, int flags)
path_noexec(&file->f_path)))
goto exit;
- err = deny_write_access(file);
- if (err)
- goto exit;
-
out:
return file;
@@ -971,8 +998,7 @@ exit:
*
* Returns ERR_PTR on failure or allocated struct file on success.
*
- * As this is a wrapper for the internal do_open_execat(), callers
- * must call allow_write_access() before fput() on release. Also see
+ * As this is a wrapper for the internal do_open_execat(). Also see
* do_close_execat().
*/
struct file *open_exec(const char *name)
@@ -1524,10 +1550,8 @@ static int prepare_bprm_creds(struct linux_binprm *bprm)
/* Matches do_open_execat() */
static void do_close_execat(struct file *file)
{
- if (!file)
- return;
- allow_write_access(file);
- fput(file);
+ if (file)
+ fput(file);
}
static void free_bprm(struct linux_binprm *bprm)
@@ -1846,7 +1870,6 @@ static int exec_binprm(struct linux_binprm *bprm)
bprm->file = bprm->interpreter;
bprm->interpreter = NULL;
- allow_write_access(exec);
if (unlikely(bprm->have_execfd)) {
if (bprm->executable) {
fput(exec);
@@ -2211,3 +2234,7 @@ static int __init init_fs_exec_sysctls(void)
fs_initcall(init_fs_exec_sysctls);
#endif /* CONFIG_SYSCTL */
+
+#ifdef CONFIG_EXEC_KUNIT_TEST
+#include "exec_test.c"
+#endif
diff --git a/fs/exec_test.c b/fs/exec_test.c
new file mode 100644
index 000000000000..7c77d039680b
--- /dev/null
+++ b/fs/exec_test.c
@@ -0,0 +1,141 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <kunit/test.h>
+
+struct bprm_stack_limits_result {
+ struct linux_binprm bprm;
+ int expected_rc;
+ unsigned long expected_argmin;
+};
+
+static const struct bprm_stack_limits_result bprm_stack_limits_results[] = {
+ /* Negative argc/envc counts produce -E2BIG */
+ { { .p = ULONG_MAX, .rlim_stack.rlim_cur = ULONG_MAX,
+ .argc = INT_MIN, .envc = INT_MIN }, .expected_rc = -E2BIG },
+ { { .p = ULONG_MAX, .rlim_stack.rlim_cur = ULONG_MAX,
+ .argc = 5, .envc = -1 }, .expected_rc = -E2BIG },
+ { { .p = ULONG_MAX, .rlim_stack.rlim_cur = ULONG_MAX,
+ .argc = -1, .envc = 10 }, .expected_rc = -E2BIG },
+ /* The max value of argc or envc is MAX_ARG_STRINGS. */
+ { { .p = ULONG_MAX, .rlim_stack.rlim_cur = ULONG_MAX,
+ .argc = INT_MAX, .envc = INT_MAX }, .expected_rc = -E2BIG },
+ { { .p = ULONG_MAX, .rlim_stack.rlim_cur = ULONG_MAX,
+ .argc = MAX_ARG_STRINGS, .envc = MAX_ARG_STRINGS }, .expected_rc = -E2BIG },
+ { { .p = ULONG_MAX, .rlim_stack.rlim_cur = ULONG_MAX,
+ .argc = 0, .envc = MAX_ARG_STRINGS }, .expected_rc = -E2BIG },
+ { { .p = ULONG_MAX, .rlim_stack.rlim_cur = ULONG_MAX,
+ .argc = MAX_ARG_STRINGS, .envc = 0 }, .expected_rc = -E2BIG },
+ /*
+ * On 32-bit system these argc and envc counts, while likely impossible
+ * to represent within the associated TASK_SIZE, could overflow the
+ * limit calculation, and bypass the ptr_size <= limit check.
+ */
+ { { .p = ULONG_MAX, .rlim_stack.rlim_cur = ULONG_MAX,
+ .argc = 0x20000001, .envc = 0x20000001 }, .expected_rc = -E2BIG },
+#ifdef CONFIG_MMU
+ /* Make sure a pathological bprm->p doesn't cause an overflow. */
+ { { .p = sizeof(void *), .rlim_stack.rlim_cur = ULONG_MAX,
+ .argc = 10, .envc = 10 }, .expected_rc = -E2BIG },
+#endif
+ /*
+ * 0 rlim_stack will get raised to ARG_MAX. With 1 string pointer,
+ * we should see p - ARG_MAX + sizeof(void *).
+ */
+ { { .p = ULONG_MAX, .rlim_stack.rlim_cur = 0,
+ .argc = 1, .envc = 0 }, .expected_argmin = ULONG_MAX - ARG_MAX + sizeof(void *)},
+ /* Validate that argc is always raised to a minimum of 1. */
+ { { .p = ULONG_MAX, .rlim_stack.rlim_cur = 0,
+ .argc = 0, .envc = 0 }, .expected_argmin = ULONG_MAX - ARG_MAX + sizeof(void *)},
+ /*
+ * 0 rlim_stack will get raised to ARG_MAX. With pointers filling ARG_MAX,
+ * we should see -E2BIG. (Note argc is always raised to at least 1.)
+ */
+ { { .p = ULONG_MAX, .rlim_stack.rlim_cur = 0,
+ .argc = ARG_MAX / sizeof(void *), .envc = 0 }, .expected_rc = -E2BIG },
+ { { .p = ULONG_MAX, .rlim_stack.rlim_cur = 0,
+ .argc = 0, .envc = ARG_MAX / sizeof(void *) - 1 }, .expected_rc = -E2BIG },
+ { { .p = ULONG_MAX, .rlim_stack.rlim_cur = 0,
+ .argc = ARG_MAX / sizeof(void *) + 1, .envc = 0 }, .expected_rc = -E2BIG },
+ { { .p = ULONG_MAX, .rlim_stack.rlim_cur = 0,
+ .argc = 0, .envc = ARG_MAX / sizeof(void *) }, .expected_rc = -E2BIG },
+ /* And with one less, we see space for exactly 1 pointer. */
+ { { .p = ULONG_MAX, .rlim_stack.rlim_cur = 0,
+ .argc = (ARG_MAX / sizeof(void *)) - 1, .envc = 0 },
+ .expected_argmin = ULONG_MAX - sizeof(void *) },
+ { { .p = ULONG_MAX, .rlim_stack.rlim_cur = 0,
+ .argc = 0, .envc = (ARG_MAX / sizeof(void *)) - 2, },
+ .expected_argmin = ULONG_MAX - sizeof(void *) },
+ /* If we raise rlim_stack / 4 to exactly ARG_MAX, nothing changes. */
+ { { .p = ULONG_MAX, .rlim_stack.rlim_cur = ARG_MAX * 4,
+ .argc = ARG_MAX / sizeof(void *), .envc = 0 }, .expected_rc = -E2BIG },
+ { { .p = ULONG_MAX, .rlim_stack.rlim_cur = ARG_MAX * 4,
+ .argc = 0, .envc = ARG_MAX / sizeof(void *) - 1 }, .expected_rc = -E2BIG },
+ { { .p = ULONG_MAX, .rlim_stack.rlim_cur = ARG_MAX * 4,
+ .argc = ARG_MAX / sizeof(void *) + 1, .envc = 0 }, .expected_rc = -E2BIG },
+ { { .p = ULONG_MAX, .rlim_stack.rlim_cur = ARG_MAX * 4,
+ .argc = 0, .envc = ARG_MAX / sizeof(void *) }, .expected_rc = -E2BIG },
+ { { .p = ULONG_MAX, .rlim_stack.rlim_cur = ARG_MAX * 4,
+ .argc = (ARG_MAX / sizeof(void *)) - 1, .envc = 0 },
+ .expected_argmin = ULONG_MAX - sizeof(void *) },
+ { { .p = ULONG_MAX, .rlim_stack.rlim_cur = ARG_MAX * 4,
+ .argc = 0, .envc = (ARG_MAX / sizeof(void *)) - 2, },
+ .expected_argmin = ULONG_MAX - sizeof(void *) },
+ /* But raising it another pointer * 4 will provide space for 1 more pointer. */
+ { { .p = ULONG_MAX, .rlim_stack.rlim_cur = (ARG_MAX + sizeof(void *)) * 4,
+ .argc = ARG_MAX / sizeof(void *), .envc = 0 },
+ .expected_argmin = ULONG_MAX - sizeof(void *) },
+ { { .p = ULONG_MAX, .rlim_stack.rlim_cur = (ARG_MAX + sizeof(void *)) * 4,
+ .argc = 0, .envc = ARG_MAX / sizeof(void *) - 1 },
+ .expected_argmin = ULONG_MAX - sizeof(void *) },
+ /* Raising rlim_stack / 4 to _STK_LIM / 4 * 3 will see more space. */
+ { { .p = ULONG_MAX, .rlim_stack.rlim_cur = 4 * (_STK_LIM / 4 * 3),
+ .argc = 0, .envc = 0 },
+ .expected_argmin = ULONG_MAX - (_STK_LIM / 4 * 3) + sizeof(void *) },
+ { { .p = ULONG_MAX, .rlim_stack.rlim_cur = 4 * (_STK_LIM / 4 * 3),
+ .argc = 0, .envc = 0 },
+ .expected_argmin = ULONG_MAX - (_STK_LIM / 4 * 3) + sizeof(void *) },
+ /* But raising it any further will see no increase. */
+ { { .p = ULONG_MAX, .rlim_stack.rlim_cur = 4 * (_STK_LIM / 4 * 3 + sizeof(void *)),
+ .argc = 0, .envc = 0 },
+ .expected_argmin = ULONG_MAX - (_STK_LIM / 4 * 3) + sizeof(void *) },
+ { { .p = ULONG_MAX, .rlim_stack.rlim_cur = 4 * (_STK_LIM / 4 * + sizeof(void *)),
+ .argc = 0, .envc = 0 },
+ .expected_argmin = ULONG_MAX - (_STK_LIM / 4 * 3) + sizeof(void *) },
+ { { .p = ULONG_MAX, .rlim_stack.rlim_cur = 4 * _STK_LIM,
+ .argc = 0, .envc = 0 },
+ .expected_argmin = ULONG_MAX - (_STK_LIM / 4 * 3) + sizeof(void *) },
+ { { .p = ULONG_MAX, .rlim_stack.rlim_cur = 4 * _STK_LIM,
+ .argc = 0, .envc = 0 },
+ .expected_argmin = ULONG_MAX - (_STK_LIM / 4 * 3) + sizeof(void *) },
+};
+
+static void exec_test_bprm_stack_limits(struct kunit *test)
+{
+ /* Double-check the constants. */
+ KUNIT_EXPECT_EQ(test, _STK_LIM, SZ_8M);
+ KUNIT_EXPECT_EQ(test, ARG_MAX, 32 * SZ_4K);
+ KUNIT_EXPECT_EQ(test, MAX_ARG_STRINGS, 0x7FFFFFFF);
+
+ for (int i = 0; i < ARRAY_SIZE(bprm_stack_limits_results); i++) {
+ const struct bprm_stack_limits_result *result = &bprm_stack_limits_results[i];
+ struct linux_binprm bprm = result->bprm;
+ int rc;
+
+ rc = bprm_stack_limits(&bprm);
+ KUNIT_EXPECT_EQ_MSG(test, rc, result->expected_rc, "on loop %d", i);
+#ifdef CONFIG_MMU
+ KUNIT_EXPECT_EQ_MSG(test, bprm.argmin, result->expected_argmin, "on loop %d", i);
+#endif
+ }
+}
+
+static struct kunit_case exec_test_cases[] = {
+ KUNIT_CASE(exec_test_bprm_stack_limits),
+ {},
+};
+
+static struct kunit_suite exec_test_suite = {
+ .name = "exec",
+ .test_cases = exec_test_cases,
+};
+
+kunit_test_suite(exec_test_suite);
diff --git a/fs/exfat/dir.c b/fs/exfat/dir.c
index 84572e11cc05..7446bf09a04a 100644
--- a/fs/exfat/dir.c
+++ b/fs/exfat/dir.c
@@ -813,7 +813,7 @@ static int __exfat_get_dentry_set(struct exfat_entry_set_cache *es,
num_bh = EXFAT_B_TO_BLK_ROUND_UP(off + num_entries * DENTRY_SIZE, sb);
if (num_bh > ARRAY_SIZE(es->__bh)) {
- es->bh = kmalloc_array(num_bh, sizeof(*es->bh), GFP_KERNEL);
+ es->bh = kmalloc_array(num_bh, sizeof(*es->bh), GFP_NOFS);
if (!es->bh) {
brelse(bh);
return -ENOMEM;
diff --git a/fs/exfat/file.c b/fs/exfat/file.c
index 9adfc38ca7da..64c31867bc76 100644
--- a/fs/exfat/file.c
+++ b/fs/exfat/file.c
@@ -89,12 +89,14 @@ free_clu:
return -EIO;
}
-static bool exfat_allow_set_time(struct exfat_sb_info *sbi, struct inode *inode)
+static bool exfat_allow_set_time(struct mnt_idmap *idmap,
+ struct exfat_sb_info *sbi, struct inode *inode)
{
mode_t allow_utime = sbi->options.allow_utime;
- if (!uid_eq(current_fsuid(), inode->i_uid)) {
- if (in_group_p(inode->i_gid))
+ if (!vfsuid_eq_kuid(i_uid_into_vfsuid(idmap, inode),
+ current_fsuid())) {
+ if (vfsgid_in_group_p(i_gid_into_vfsgid(idmap, inode)))
allow_utime >>= 3;
if (allow_utime & MAY_WRITE)
return true;
@@ -283,7 +285,7 @@ int exfat_getattr(struct mnt_idmap *idmap, const struct path *path,
struct inode *inode = d_backing_inode(path->dentry);
struct exfat_inode_info *ei = EXFAT_I(inode);
- generic_fillattr(&nop_mnt_idmap, request_mask, inode, stat);
+ generic_fillattr(idmap, request_mask, inode, stat);
exfat_truncate_atime(&stat->atime);
stat->result_mask |= STATX_BTIME;
stat->btime.tv_sec = ei->i_crtime.tv_sec;
@@ -311,20 +313,22 @@ int exfat_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
/* Check for setting the inode time. */
ia_valid = attr->ia_valid;
if ((ia_valid & (ATTR_MTIME_SET | ATTR_ATIME_SET | ATTR_TIMES_SET)) &&
- exfat_allow_set_time(sbi, inode)) {
+ exfat_allow_set_time(idmap, sbi, inode)) {
attr->ia_valid &= ~(ATTR_MTIME_SET | ATTR_ATIME_SET |
ATTR_TIMES_SET);
}
- error = setattr_prepare(&nop_mnt_idmap, dentry, attr);
+ error = setattr_prepare(idmap, dentry, attr);
attr->ia_valid = ia_valid;
if (error)
goto out;
if (((attr->ia_valid & ATTR_UID) &&
- !uid_eq(attr->ia_uid, sbi->options.fs_uid)) ||
+ (!uid_eq(from_vfsuid(idmap, i_user_ns(inode), attr->ia_vfsuid),
+ sbi->options.fs_uid))) ||
((attr->ia_valid & ATTR_GID) &&
- !gid_eq(attr->ia_gid, sbi->options.fs_gid)) ||
+ (!gid_eq(from_vfsgid(idmap, i_user_ns(inode), attr->ia_vfsgid),
+ sbi->options.fs_gid))) ||
((attr->ia_valid & ATTR_MODE) &&
(attr->ia_mode & ~(S_IFREG | S_IFLNK | S_IFDIR | 0777)))) {
error = -EPERM;
@@ -343,7 +347,7 @@ int exfat_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
if (attr->ia_valid & ATTR_SIZE)
inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
- setattr_copy(&nop_mnt_idmap, inode, attr);
+ setattr_copy(idmap, inode, attr);
exfat_truncate_inode_atime(inode);
if (attr->ia_valid & ATTR_SIZE) {
diff --git a/fs/exfat/super.c b/fs/exfat/super.c
index 3d5ea2cfad66..323ecebe6f0e 100644
--- a/fs/exfat/super.c
+++ b/fs/exfat/super.c
@@ -225,8 +225,8 @@ static const struct constant_table exfat_param_enums[] = {
};
static const struct fs_parameter_spec exfat_parameters[] = {
- fsparam_u32("uid", Opt_uid),
- fsparam_u32("gid", Opt_gid),
+ fsparam_uid("uid", Opt_uid),
+ fsparam_gid("gid", Opt_gid),
fsparam_u32oct("umask", Opt_umask),
fsparam_u32oct("dmask", Opt_dmask),
fsparam_u32oct("fmask", Opt_fmask),
@@ -262,10 +262,10 @@ static int exfat_parse_param(struct fs_context *fc, struct fs_parameter *param)
switch (opt) {
case Opt_uid:
- opts->fs_uid = make_kuid(current_user_ns(), result.uint_32);
+ opts->fs_uid = result.uid;
break;
case Opt_gid:
- opts->fs_gid = make_kgid(current_user_ns(), result.uint_32);
+ opts->fs_gid = result.gid;
break;
case Opt_umask:
opts->fs_fmask = result.uint_32;
@@ -788,7 +788,7 @@ static struct file_system_type exfat_fs_type = {
.init_fs_context = exfat_init_fs_context,
.parameters = exfat_parameters,
.kill_sb = exfat_kill_sb,
- .fs_flags = FS_REQUIRES_DEV,
+ .fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP,
};
static void exfat_inode_init_once(void *foo)
diff --git a/fs/exportfs/expfs.c b/fs/exportfs/expfs.c
index 07ea3d62b298..4f2dd4ab4486 100644
--- a/fs/exportfs/expfs.c
+++ b/fs/exportfs/expfs.c
@@ -427,7 +427,7 @@ EXPORT_SYMBOL_GPL(exportfs_encode_fh);
struct dentry *
exportfs_decode_fh_raw(struct vfsmount *mnt, struct fid *fid, int fh_len,
- int fileid_type,
+ int fileid_type, unsigned int flags,
int (*acceptable)(void *, struct dentry *),
void *context)
{
@@ -445,6 +445,11 @@ exportfs_decode_fh_raw(struct vfsmount *mnt, struct fid *fid, int fh_len,
if (IS_ERR_OR_NULL(result))
return result;
+ if ((flags & EXPORT_FH_DIR_ONLY) && !d_is_dir(result)) {
+ err = -ENOTDIR;
+ goto err_result;
+ }
+
/*
* If no acceptance criteria was specified by caller, a disconnected
* dentry is also accepatable. Callers may use this mode to query if
@@ -581,7 +586,7 @@ struct dentry *exportfs_decode_fh(struct vfsmount *mnt, struct fid *fid,
{
struct dentry *ret;
- ret = exportfs_decode_fh_raw(mnt, fid, fh_len, fileid_type,
+ ret = exportfs_decode_fh_raw(mnt, fid, fh_len, fileid_type, 0,
acceptable, context);
if (IS_ERR_OR_NULL(ret)) {
if (ret == ERR_PTR(-ENOMEM))
diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
index 1bfd6ab11038..b8cfab8f98b9 100644
--- a/fs/ext2/balloc.c
+++ b/fs/ext2/balloc.c
@@ -77,26 +77,33 @@ static int ext2_valid_block_bitmap(struct super_block *sb,
ext2_grpblk_t next_zero_bit;
ext2_fsblk_t bitmap_blk;
ext2_fsblk_t group_first_block;
+ ext2_grpblk_t max_bit;
group_first_block = ext2_group_first_block_no(sb, block_group);
+ max_bit = ext2_group_last_block_no(sb, block_group) - group_first_block;
/* check whether block bitmap block number is set */
bitmap_blk = le32_to_cpu(desc->bg_block_bitmap);
offset = bitmap_blk - group_first_block;
- if (!ext2_test_bit(offset, bh->b_data))
+ if (offset < 0 || offset > max_bit ||
+ !ext2_test_bit(offset, bh->b_data))
/* bad block bitmap */
goto err_out;
/* check whether the inode bitmap block number is set */
bitmap_blk = le32_to_cpu(desc->bg_inode_bitmap);
offset = bitmap_blk - group_first_block;
- if (!ext2_test_bit(offset, bh->b_data))
+ if (offset < 0 || offset > max_bit ||
+ !ext2_test_bit(offset, bh->b_data))
/* bad block bitmap */
goto err_out;
/* check whether the inode table block number is set */
bitmap_blk = le32_to_cpu(desc->bg_inode_table);
offset = bitmap_blk - group_first_block;
+ if (offset < 0 || offset > max_bit ||
+ offset + EXT2_SB(sb)->s_itb_per_group - 1 > max_bit)
+ goto err_out;
next_zero_bit = ext2_find_next_zero_bit(bh->b_data,
offset + EXT2_SB(sb)->s_itb_per_group,
offset);
diff --git a/fs/ext4/crypto.c b/fs/ext4/crypto.c
index 7ae0b61258a7..0a056d97e640 100644
--- a/fs/ext4/crypto.c
+++ b/fs/ext4/crypto.c
@@ -31,11 +31,10 @@ int ext4_fname_setup_filename(struct inode *dir, const struct qstr *iname,
ext4_fname_from_fscrypt_name(fname, &name);
-#if IS_ENABLED(CONFIG_UNICODE)
err = ext4_fname_setup_ci_filename(dir, iname, fname);
if (err)
ext4_fname_free_filename(fname);
-#endif
+
return err;
}
@@ -51,11 +50,9 @@ int ext4_fname_prepare_lookup(struct inode *dir, struct dentry *dentry,
ext4_fname_from_fscrypt_name(fname, &name);
-#if IS_ENABLED(CONFIG_UNICODE)
err = ext4_fname_setup_ci_filename(dir, &dentry->d_name, fname);
if (err)
ext4_fname_free_filename(fname);
-#endif
return err;
}
@@ -70,10 +67,7 @@ void ext4_fname_free_filename(struct ext4_filename *fname)
fname->usr_fname = NULL;
fname->disk_name.name = NULL;
-#if IS_ENABLED(CONFIG_UNICODE)
- kfree(fname->cf_name.name);
- fname->cf_name.name = NULL;
-#endif
+ ext4_fname_free_ci_filename(fname);
}
static bool uuid_is_zero(__u8 u[16])
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 983dad8c07ec..8007abd4972d 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -2511,7 +2511,7 @@ struct ext4_filename {
struct fscrypt_str crypto_buf;
#endif
#if IS_ENABLED(CONFIG_UNICODE)
- struct fscrypt_str cf_name;
+ struct qstr cf_name;
#endif
};
@@ -2745,8 +2745,25 @@ ext4_fsblk_t ext4_inode_to_goal_block(struct inode *);
#if IS_ENABLED(CONFIG_UNICODE)
extern int ext4_fname_setup_ci_filename(struct inode *dir,
- const struct qstr *iname,
- struct ext4_filename *fname);
+ const struct qstr *iname,
+ struct ext4_filename *fname);
+
+static inline void ext4_fname_free_ci_filename(struct ext4_filename *fname)
+{
+ kfree(fname->cf_name.name);
+ fname->cf_name.name = NULL;
+}
+#else
+static inline int ext4_fname_setup_ci_filename(struct inode *dir,
+ const struct qstr *iname,
+ struct ext4_filename *fname)
+{
+ return 0;
+}
+
+static inline void ext4_fname_free_ci_filename(struct ext4_filename *fname)
+{
+}
#endif
/* ext4 encryption related stuff goes here crypto.c */
@@ -2769,16 +2786,11 @@ static inline int ext4_fname_setup_filename(struct inode *dir,
int lookup,
struct ext4_filename *fname)
{
- int err = 0;
fname->usr_fname = iname;
fname->disk_name.name = (unsigned char *) iname->name;
fname->disk_name.len = iname->len;
-#if IS_ENABLED(CONFIG_UNICODE)
- err = ext4_fname_setup_ci_filename(dir, iname, fname);
-#endif
-
- return err;
+ return ext4_fname_setup_ci_filename(dir, iname, fname);
}
static inline int ext4_fname_prepare_lookup(struct inode *dir,
@@ -2790,10 +2802,7 @@ static inline int ext4_fname_prepare_lookup(struct inode *dir,
static inline void ext4_fname_free_filename(struct ext4_filename *fname)
{
-#if IS_ENABLED(CONFIG_UNICODE)
- kfree(fname->cf_name.name);
- fname->cf_name.name = NULL;
-#endif
+ ext4_fname_free_ci_filename(fname);
}
static inline int ext4_ioctl_get_encryption_pwsalt(struct file *filp,
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index a630b27a4cc6..e6769b97a970 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -1390,62 +1390,11 @@ static void dx_insert_block(struct dx_frame *frame, u32 hash, ext4_lblk_t block)
}
#if IS_ENABLED(CONFIG_UNICODE)
-/*
- * Test whether a case-insensitive directory entry matches the filename
- * being searched for. If quick is set, assume the name being looked up
- * is already in the casefolded form.
- *
- * Returns: 0 if the directory entry matches, more than 0 if it
- * doesn't match or less than zero on error.
- */
-static int ext4_ci_compare(const struct inode *parent, const struct qstr *name,
- u8 *de_name, size_t de_name_len, bool quick)
-{
- const struct super_block *sb = parent->i_sb;
- const struct unicode_map *um = sb->s_encoding;
- struct fscrypt_str decrypted_name = FSTR_INIT(NULL, de_name_len);
- struct qstr entry = QSTR_INIT(de_name, de_name_len);
- int ret;
-
- if (IS_ENCRYPTED(parent)) {
- const struct fscrypt_str encrypted_name =
- FSTR_INIT(de_name, de_name_len);
-
- decrypted_name.name = kmalloc(de_name_len, GFP_KERNEL);
- if (!decrypted_name.name)
- return -ENOMEM;
- ret = fscrypt_fname_disk_to_usr(parent, 0, 0, &encrypted_name,
- &decrypted_name);
- if (ret < 0)
- goto out;
- entry.name = decrypted_name.name;
- entry.len = decrypted_name.len;
- }
-
- if (quick)
- ret = utf8_strncasecmp_folded(um, name, &entry);
- else
- ret = utf8_strncasecmp(um, name, &entry);
- if (ret < 0) {
- /* Handle invalid character sequence as either an error
- * or as an opaque byte sequence.
- */
- if (sb_has_strict_encoding(sb))
- ret = -EINVAL;
- else if (name->len != entry.len)
- ret = 1;
- else
- ret = !!memcmp(name->name, entry.name, entry.len);
- }
-out:
- kfree(decrypted_name.name);
- return ret;
-}
-
int ext4_fname_setup_ci_filename(struct inode *dir, const struct qstr *iname,
struct ext4_filename *name)
{
- struct fscrypt_str *cf_name = &name->cf_name;
+ struct qstr *cf_name = &name->cf_name;
+ unsigned char *buf;
struct dx_hash_info *hinfo = &name->hinfo;
int len;
@@ -1455,18 +1404,18 @@ int ext4_fname_setup_ci_filename(struct inode *dir, const struct qstr *iname,
return 0;
}
- cf_name->name = kmalloc(EXT4_NAME_LEN, GFP_NOFS);
- if (!cf_name->name)
+ buf = kmalloc(EXT4_NAME_LEN, GFP_NOFS);
+ if (!buf)
return -ENOMEM;
- len = utf8_casefold(dir->i_sb->s_encoding,
- iname, cf_name->name,
- EXT4_NAME_LEN);
+ len = utf8_casefold(dir->i_sb->s_encoding, iname, buf, EXT4_NAME_LEN);
if (len <= 0) {
- kfree(cf_name->name);
- cf_name->name = NULL;
+ kfree(buf);
+ buf = NULL;
}
+ cf_name->name = buf;
cf_name->len = (unsigned) len;
+
if (!IS_ENCRYPTED(dir))
return 0;
@@ -1502,22 +1451,29 @@ static bool ext4_match(struct inode *parent,
#if IS_ENABLED(CONFIG_UNICODE)
if (IS_CASEFOLDED(parent) &&
(!IS_ENCRYPTED(parent) || fscrypt_has_encryption_key(parent))) {
- if (fname->cf_name.name) {
- struct qstr cf = {.name = fname->cf_name.name,
- .len = fname->cf_name.len};
- if (IS_ENCRYPTED(parent)) {
- if (fname->hinfo.hash != EXT4_DIRENT_HASH(de) ||
- fname->hinfo.minor_hash !=
- EXT4_DIRENT_MINOR_HASH(de)) {
-
- return false;
- }
- }
- return !ext4_ci_compare(parent, &cf, de->name,
- de->name_len, true);
- }
- return !ext4_ci_compare(parent, fname->usr_fname, de->name,
- de->name_len, false);
+ /*
+ * Just checking IS_ENCRYPTED(parent) below is not
+ * sufficient to decide whether one can use the hash for
+ * skipping the string comparison, because the key might
+ * have been added right after
+ * ext4_fname_setup_ci_filename(). In this case, a hash
+ * mismatch will be a false negative. Therefore, make
+ * sure cf_name was properly initialized before
+ * considering the calculated hash.
+ */
+ if (IS_ENCRYPTED(parent) && fname->cf_name.name &&
+ (fname->hinfo.hash != EXT4_DIRENT_HASH(de) ||
+ fname->hinfo.minor_hash != EXT4_DIRENT_MINOR_HASH(de)))
+ return false;
+ /*
+ * Treat comparison errors as not a match. The
+ * only case where it happens is on a disk
+ * corruption or ENOMEM.
+ */
+
+ return generic_ci_match(parent, fname->usr_fname,
+ &fname->cf_name, de->name,
+ de->name_len) > 0;
}
#endif
@@ -1869,8 +1825,7 @@ static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, unsi
}
}
-#if IS_ENABLED(CONFIG_UNICODE)
- if (!inode && IS_CASEFOLDED(dir)) {
+ if (IS_ENABLED(CONFIG_UNICODE) && !inode && IS_CASEFOLDED(dir)) {
/* Eventually we want to call d_add_ci(dentry, NULL)
* for negative dentries in the encoding case as
* well. For now, prevent the negative dentry
@@ -1878,7 +1833,7 @@ static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, unsi
*/
return NULL;
}
-#endif
+
return d_splice_alias(inode, dentry);
}
@@ -3208,16 +3163,14 @@ static int ext4_rmdir(struct inode *dir, struct dentry *dentry)
ext4_fc_track_unlink(handle, dentry);
retval = ext4_mark_inode_dirty(handle, dir);
-#if IS_ENABLED(CONFIG_UNICODE)
/* VFS negative dentries are incompatible with Encoding and
* Case-insensitiveness. Eventually we'll want avoid
* invalidating the dentries here, alongside with returning the
* negative dentries at ext4_lookup(), when it is better
* supported by the VFS for the CI case.
*/
- if (IS_CASEFOLDED(dir))
+ if (IS_ENABLED(CONFIG_UNICODE) && IS_CASEFOLDED(dir))
d_invalidate(dentry);
-#endif
end_rmdir:
brelse(bh);
@@ -3319,16 +3272,15 @@ static int ext4_unlink(struct inode *dir, struct dentry *dentry)
goto out_trace;
retval = __ext4_unlink(dir, &dentry->d_name, d_inode(dentry), dentry);
-#if IS_ENABLED(CONFIG_UNICODE)
+
/* VFS negative dentries are incompatible with Encoding and
* Case-insensitiveness. Eventually we'll want avoid
* invalidating the dentries here, alongside with returning the
* negative dentries at ext4_lookup(), when it is better
* supported by the VFS for the CI case.
*/
- if (IS_CASEFOLDED(dir))
+ if (IS_ENABLED(CONFIG_UNICODE) && IS_CASEFOLDED(dir))
d_invalidate(dentry);
-#endif
out_trace:
trace_ext4_unlink_exit(dentry, retval);
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index c682fb927b64..eb899628e121 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -1721,8 +1721,8 @@ static const struct fs_parameter_spec ext4_param_specs[] = {
fsparam_flag ("bsdgroups", Opt_grpid),
fsparam_flag ("nogrpid", Opt_nogrpid),
fsparam_flag ("sysvgroups", Opt_nogrpid),
- fsparam_u32 ("resgid", Opt_resgid),
- fsparam_u32 ("resuid", Opt_resuid),
+ fsparam_gid ("resgid", Opt_resgid),
+ fsparam_uid ("resuid", Opt_resuid),
fsparam_u32 ("sb", Opt_sb),
fsparam_enum ("errors", Opt_errors, ext4_param_errors),
fsparam_flag ("nouid32", Opt_nouid32),
@@ -2127,8 +2127,6 @@ static int ext4_parse_param(struct fs_context *fc, struct fs_parameter *param)
struct fs_parse_result result;
const struct mount_opts *m;
int is_remount;
- kuid_t uid;
- kgid_t gid;
int token;
token = fs_parse(fc, ext4_param_specs, param, &result);
@@ -2270,23 +2268,11 @@ static int ext4_parse_param(struct fs_context *fc, struct fs_parameter *param)
ctx->spec |= EXT4_SPEC_s_stripe;
return 0;
case Opt_resuid:
- uid = make_kuid(current_user_ns(), result.uint_32);
- if (!uid_valid(uid)) {
- ext4_msg(NULL, KERN_ERR, "Invalid uid value %d",
- result.uint_32);
- return -EINVAL;
- }
- ctx->s_resuid = uid;
+ ctx->s_resuid = result.uid;
ctx->spec |= EXT4_SPEC_s_resuid;
return 0;
case Opt_resgid:
- gid = make_kgid(current_user_ns(), result.uint_32);
- if (!gid_valid(gid)) {
- ext4_msg(NULL, KERN_ERR, "Invalid gid value %d",
- result.uint_32);
- return -EINVAL;
- }
- ctx->s_resgid = gid;
+ ctx->s_resgid = result.gid;
ctx->spec |= EXT4_SPEC_s_resgid;
return 0;
case Opt_journal_dev:
@@ -3586,14 +3572,12 @@ int ext4_feature_set_ok(struct super_block *sb, int readonly)
return 0;
}
-#if !IS_ENABLED(CONFIG_UNICODE)
- if (ext4_has_feature_casefold(sb)) {
+ if (!IS_ENABLED(CONFIG_UNICODE) && ext4_has_feature_casefold(sb)) {
ext4_msg(sb, KERN_ERR,
"Filesystem with casefold feature cannot be "
"mounted without CONFIG_UNICODE");
return 0;
}
-#endif
if (readonly)
return 1;
diff --git a/fs/f2fs/acl.c b/fs/f2fs/acl.c
index ec2aeccb69a3..8bffdeccdbc3 100644
--- a/fs/f2fs/acl.c
+++ b/fs/f2fs/acl.c
@@ -219,8 +219,7 @@ static int f2fs_acl_update_mode(struct mnt_idmap *idmap,
return error;
if (error == 0)
*acl = NULL;
- if (!vfsgid_in_group_p(i_gid_into_vfsgid(idmap, inode)) &&
- !capable_wrt_inode_uidgid(idmap, inode, CAP_FSETID))
+ if (!in_group_or_capable(idmap, inode, i_gid_into_vfsgid(idmap, inode)))
mode &= ~S_ISGID;
*mode_p = mode;
return 0;
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
index 02c9355176d3..cbd7a5e96a37 100644
--- a/fs/f2fs/dir.c
+++ b/fs/f2fs/dir.c
@@ -42,35 +42,49 @@ static unsigned int bucket_blocks(unsigned int level)
return 4;
}
+#if IS_ENABLED(CONFIG_UNICODE)
/* If @dir is casefolded, initialize @fname->cf_name from @fname->usr_fname. */
int f2fs_init_casefolded_name(const struct inode *dir,
struct f2fs_filename *fname)
{
-#if IS_ENABLED(CONFIG_UNICODE)
struct super_block *sb = dir->i_sb;
+ unsigned char *buf;
+ int len;
if (IS_CASEFOLDED(dir) &&
!is_dot_dotdot(fname->usr_fname->name, fname->usr_fname->len)) {
- fname->cf_name.name = f2fs_kmem_cache_alloc(f2fs_cf_name_slab,
- GFP_NOFS, false, F2FS_SB(sb));
- if (!fname->cf_name.name)
+ buf = f2fs_kmem_cache_alloc(f2fs_cf_name_slab,
+ GFP_NOFS, false, F2FS_SB(sb));
+ if (!buf)
return -ENOMEM;
- fname->cf_name.len = utf8_casefold(sb->s_encoding,
- fname->usr_fname,
- fname->cf_name.name,
- F2FS_NAME_LEN);
- if ((int)fname->cf_name.len <= 0) {
- kmem_cache_free(f2fs_cf_name_slab, fname->cf_name.name);
- fname->cf_name.name = NULL;
+
+ len = utf8_casefold(sb->s_encoding, fname->usr_fname,
+ buf, F2FS_NAME_LEN);
+ if (len <= 0) {
+ kmem_cache_free(f2fs_cf_name_slab, buf);
if (sb_has_strict_encoding(sb))
return -EINVAL;
/* fall back to treating name as opaque byte sequence */
+ return 0;
}
+ fname->cf_name.name = buf;
+ fname->cf_name.len = len;
}
-#endif
+
return 0;
}
+void f2fs_free_casefolded_name(struct f2fs_filename *fname)
+{
+ unsigned char *buf = (unsigned char *)fname->cf_name.name;
+
+ if (buf) {
+ kmem_cache_free(f2fs_cf_name_slab, buf);
+ fname->cf_name.name = NULL;
+ }
+}
+#endif /* CONFIG_UNICODE */
+
static int __f2fs_setup_filename(const struct inode *dir,
const struct fscrypt_name *crypt_name,
struct f2fs_filename *fname)
@@ -142,12 +156,7 @@ void f2fs_free_filename(struct f2fs_filename *fname)
kfree(fname->crypto_buf.name);
fname->crypto_buf.name = NULL;
#endif
-#if IS_ENABLED(CONFIG_UNICODE)
- if (fname->cf_name.name) {
- kmem_cache_free(f2fs_cf_name_slab, fname->cf_name.name);
- fname->cf_name.name = NULL;
- }
-#endif
+ f2fs_free_casefolded_name(fname);
}
static unsigned long dir_block_index(unsigned int level,
@@ -176,58 +185,6 @@ static struct f2fs_dir_entry *find_in_block(struct inode *dir,
return f2fs_find_target_dentry(&d, fname, max_slots);
}
-#if IS_ENABLED(CONFIG_UNICODE)
-/*
- * Test whether a case-insensitive directory entry matches the filename
- * being searched for.
- *
- * Returns 1 for a match, 0 for no match, and -errno on an error.
- */
-static int f2fs_match_ci_name(const struct inode *dir, const struct qstr *name,
- const u8 *de_name, u32 de_name_len)
-{
- const struct super_block *sb = dir->i_sb;
- const struct unicode_map *um = sb->s_encoding;
- struct fscrypt_str decrypted_name = FSTR_INIT(NULL, de_name_len);
- struct qstr entry = QSTR_INIT(de_name, de_name_len);
- int res;
-
- if (IS_ENCRYPTED(dir)) {
- const struct fscrypt_str encrypted_name =
- FSTR_INIT((u8 *)de_name, de_name_len);
-
- if (WARN_ON_ONCE(!fscrypt_has_encryption_key(dir)))
- return -EINVAL;
-
- decrypted_name.name = kmalloc(de_name_len, GFP_KERNEL);
- if (!decrypted_name.name)
- return -ENOMEM;
- res = fscrypt_fname_disk_to_usr(dir, 0, 0, &encrypted_name,
- &decrypted_name);
- if (res < 0)
- goto out;
- entry.name = decrypted_name.name;
- entry.len = decrypted_name.len;
- }
-
- res = utf8_strncasecmp_folded(um, name, &entry);
- /*
- * In strict mode, ignore invalid names. In non-strict mode,
- * fall back to treating them as opaque byte sequences.
- */
- if (res < 0 && !sb_has_strict_encoding(sb)) {
- res = name->len == entry.len &&
- memcmp(name->name, entry.name, name->len) == 0;
- } else {
- /* utf8_strncasecmp_folded returns 0 on match */
- res = (res == 0);
- }
-out:
- kfree(decrypted_name.name);
- return res;
-}
-#endif /* CONFIG_UNICODE */
-
static inline int f2fs_match_name(const struct inode *dir,
const struct f2fs_filename *fname,
const u8 *de_name, u32 de_name_len)
@@ -235,11 +192,11 @@ static inline int f2fs_match_name(const struct inode *dir,
struct fscrypt_name f;
#if IS_ENABLED(CONFIG_UNICODE)
- if (fname->cf_name.name) {
- struct qstr cf = FSTR_TO_QSTR(&fname->cf_name);
+ if (fname->cf_name.name)
+ return generic_ci_match(dir, fname->usr_fname,
+ &fname->cf_name,
+ de_name, de_name_len);
- return f2fs_match_ci_name(dir, &cf, de_name, de_name_len);
- }
#endif
f.usr_fname = fname->usr_fname;
f.disk_name = fname->disk_name;
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 1974b6aff397..8a9d910aa552 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -531,7 +531,7 @@ struct f2fs_filename {
* internal operation where usr_fname is also NULL. In all these cases
* we fall back to treating the name as an opaque byte sequence.
*/
- struct fscrypt_str cf_name;
+ struct qstr cf_name;
#endif
};
@@ -3533,8 +3533,22 @@ int f2fs_get_tmpfile(struct mnt_idmap *idmap, struct inode *dir,
/*
* dir.c
*/
+#if IS_ENABLED(CONFIG_UNICODE)
int f2fs_init_casefolded_name(const struct inode *dir,
struct f2fs_filename *fname);
+void f2fs_free_casefolded_name(struct f2fs_filename *fname);
+#else
+static inline int f2fs_init_casefolded_name(const struct inode *dir,
+ struct f2fs_filename *fname)
+{
+ return 0;
+}
+
+static inline void f2fs_free_casefolded_name(struct f2fs_filename *fname)
+{
+}
+#endif /* CONFIG_UNICODE */
+
int f2fs_setup_filename(struct inode *dir, const struct qstr *iname,
int lookup, struct f2fs_filename *fname);
int f2fs_prepare_lookup(struct inode *dir, struct dentry *dentry,
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index 5c0b281a70f3..c1ad9b278c47 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -185,7 +185,7 @@ static int get_parent_ino(struct inode *inode, nid_t *pino)
if (!dentry)
return 0;
- *pino = parent_ino(dentry);
+ *pino = d_parent_ino(dentry);
dput(dentry);
return 1;
}
@@ -923,10 +923,8 @@ static void __setattr_copy(struct mnt_idmap *idmap,
inode_set_ctime_to_ts(inode, attr->ia_ctime);
if (ia_valid & ATTR_MODE) {
umode_t mode = attr->ia_mode;
- vfsgid_t vfsgid = i_gid_into_vfsgid(idmap, inode);
- if (!vfsgid_in_group_p(vfsgid) &&
- !capable_wrt_inode_uidgid(idmap, inode, CAP_FSETID))
+ if (!in_group_or_capable(idmap, inode, i_gid_into_vfsgid(idmap, inode)))
mode &= ~S_ISGID;
set_acl_inode(inode, mode);
}
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
index e54f8c08bda8..1ecde2b45e99 100644
--- a/fs/f2fs/namei.c
+++ b/fs/f2fs/namei.c
@@ -576,8 +576,7 @@ static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry,
goto out_iput;
}
out_splice:
-#if IS_ENABLED(CONFIG_UNICODE)
- if (!inode && IS_CASEFOLDED(dir)) {
+ if (IS_ENABLED(CONFIG_UNICODE) && !inode && IS_CASEFOLDED(dir)) {
/* Eventually we want to call d_add_ci(dentry, NULL)
* for negative dentries in the encoding case as
* well. For now, prevent the negative dentry
@@ -586,7 +585,7 @@ out_splice:
trace_f2fs_lookup_end(dir, dentry, ino, err);
return NULL;
}
-#endif
+
new = d_splice_alias(inode, dentry);
trace_f2fs_lookup_end(dir, !IS_ERR_OR_NULL(new) ? new : dentry,
ino, IS_ERR(new) ? PTR_ERR(new) : err);
@@ -639,16 +638,15 @@ static int f2fs_unlink(struct inode *dir, struct dentry *dentry)
f2fs_delete_entry(de, page, dir, inode);
f2fs_unlock_op(sbi);
-#if IS_ENABLED(CONFIG_UNICODE)
/* VFS negative dentries are incompatible with Encoding and
* Case-insensitiveness. Eventually we'll want avoid
* invalidating the dentries here, alongside with returning the
* negative dentries at f2fs_lookup(), when it is better
* supported by the VFS for the CI case.
*/
- if (IS_CASEFOLDED(dir))
+ if (IS_ENABLED(CONFIG_UNICODE) && IS_CASEFOLDED(dir))
d_invalidate(dentry);
-#endif
+
if (IS_DIRSYNC(dir))
f2fs_sync_fs(sbi->sb, 1);
fail:
diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
index 496aee53c38a..8712e264071f 100644
--- a/fs/f2fs/recovery.c
+++ b/fs/f2fs/recovery.c
@@ -46,10 +46,6 @@
static struct kmem_cache *fsync_entry_slab;
-#if IS_ENABLED(CONFIG_UNICODE)
-extern struct kmem_cache *f2fs_cf_name_slab;
-#endif
-
bool f2fs_space_for_roll_forward(struct f2fs_sb_info *sbi)
{
s64 nalloc = percpu_counter_sum_positive(&sbi->alloc_valid_block_count);
@@ -153,11 +149,8 @@ static int init_recovered_filename(const struct inode *dir,
if (err)
return err;
f2fs_hash_filename(dir, fname);
-#if IS_ENABLED(CONFIG_UNICODE)
/* Case-sensitive match is fine for recovery */
- kmem_cache_free(f2fs_cf_name_slab, fname->cf_name.name);
- fname->cf_name.name = NULL;
-#endif
+ f2fs_free_casefolded_name(fname);
} else {
f2fs_hash_filename(dir, fname);
}
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 1f1b3647a998..df4cf31f93df 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -321,7 +321,7 @@ struct kmem_cache *f2fs_cf_name_slab;
static int __init f2fs_create_casefold_cache(void)
{
f2fs_cf_name_slab = f2fs_kmem_cache_create("f2fs_casefolded_name",
- F2FS_NAME_LEN);
+ F2FS_NAME_LEN);
return f2fs_cf_name_slab ? 0 : -ENOMEM;
}
@@ -1326,13 +1326,13 @@ default_check:
return -EINVAL;
}
#endif
-#if !IS_ENABLED(CONFIG_UNICODE)
- if (f2fs_sb_has_casefold(sbi)) {
+
+ if (!IS_ENABLED(CONFIG_UNICODE) && f2fs_sb_has_casefold(sbi)) {
f2fs_err(sbi,
"Filesystem with casefold feature cannot be mounted without CONFIG_UNICODE");
return -EINVAL;
}
-#endif
+
/*
* The BLKZONED feature indicates that the drive was formatted with
* zone alignment optimization. This is optional for host-aware
diff --git a/fs/fat/fat.h b/fs/fat/fat.h
index 66cf4778cf3b..d3e426de5f01 100644
--- a/fs/fat/fat.h
+++ b/fs/fat/fat.h
@@ -7,6 +7,8 @@
#include <linux/hash.h>
#include <linux/ratelimit.h>
#include <linux/msdos_fs.h>
+#include <linux/fs_context.h>
+#include <linux/fs_parser.h>
/*
* vfat shortname flags
@@ -51,7 +53,8 @@ struct fat_mount_options {
tz_set:1, /* Filesystem timestamps' offset set */
rodir:1, /* allow ATTR_RO for directory */
discard:1, /* Issue discard requests on deletions */
- dos1xfloppy:1; /* Assume default BPB for DOS 1.x floppies */
+ dos1xfloppy:1, /* Assume default BPB for DOS 1.x floppies */
+ debug:1; /* Not currently used */
};
#define FAT_HASH_BITS 8
@@ -415,12 +418,21 @@ extern struct inode *fat_iget(struct super_block *sb, loff_t i_pos);
extern struct inode *fat_build_inode(struct super_block *sb,
struct msdos_dir_entry *de, loff_t i_pos);
extern int fat_sync_inode(struct inode *inode);
-extern int fat_fill_super(struct super_block *sb, void *data, int silent,
- int isvfat, void (*setup)(struct super_block *));
+extern int fat_fill_super(struct super_block *sb, struct fs_context *fc,
+ void (*setup)(struct super_block *));
extern int fat_fill_inode(struct inode *inode, struct msdos_dir_entry *de);
extern int fat_flush_inodes(struct super_block *sb, struct inode *i1,
struct inode *i2);
+
+extern const struct fs_parameter_spec fat_param_spec[];
+int fat_init_fs_context(struct fs_context *fc, bool is_vfat);
+void fat_free_fc(struct fs_context *fc);
+
+int fat_parse_param(struct fs_context *fc, struct fs_parameter *param,
+ bool is_vfat);
+int fat_reconfigure(struct fs_context *fc);
+
static inline unsigned long fat_dir_hash(int logstart)
{
return hash_32(logstart, FAT_HASH_BITS);
diff --git a/fs/fat/fat_test.c b/fs/fat/fat_test.c
index 2dab4ca1d0d8..1f0062659067 100644
--- a/fs/fat/fat_test.c
+++ b/fs/fat/fat_test.c
@@ -193,4 +193,5 @@ static struct kunit_suite fat_test_suite = {
kunit_test_suites(&fat_test_suite);
+MODULE_DESCRIPTION("KUnit tests for FAT filesystems");
MODULE_LICENSE("GPL v2");
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index d9e6fbb6f246..19115fd2d2a4 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -16,7 +16,6 @@
#include <linux/mpage.h>
#include <linux/vfs.h>
#include <linux/seq_file.h>
-#include <linux/parser.h>
#include <linux/uio.h>
#include <linux/blkdev.h>
#include <linux/backing-dev.h>
@@ -804,16 +803,17 @@ static void __exit fat_destroy_inodecache(void)
kmem_cache_destroy(fat_inode_cachep);
}
-static int fat_remount(struct super_block *sb, int *flags, char *data)
+int fat_reconfigure(struct fs_context *fc)
{
bool new_rdonly;
+ struct super_block *sb = fc->root->d_sb;
struct msdos_sb_info *sbi = MSDOS_SB(sb);
- *flags |= SB_NODIRATIME | (sbi->options.isvfat ? 0 : SB_NOATIME);
+ fc->sb_flags |= SB_NODIRATIME | (sbi->options.isvfat ? 0 : SB_NOATIME);
sync_filesystem(sb);
/* make sure we update state on remount. */
- new_rdonly = *flags & SB_RDONLY;
+ new_rdonly = fc->sb_flags & SB_RDONLY;
if (new_rdonly != sb_rdonly(sb)) {
if (new_rdonly)
fat_set_state(sb, 0, 0);
@@ -822,6 +822,7 @@ static int fat_remount(struct super_block *sb, int *flags, char *data)
}
return 0;
}
+EXPORT_SYMBOL_GPL(fat_reconfigure);
static int fat_statfs(struct dentry *dentry, struct kstatfs *buf)
{
@@ -939,8 +940,6 @@ static const struct super_operations fat_sops = {
.evict_inode = fat_evict_inode,
.put_super = fat_put_super,
.statfs = fat_statfs,
- .remount_fs = fat_remount,
-
.show_options = fat_show_options,
};
@@ -1037,355 +1036,282 @@ static int fat_show_options(struct seq_file *m, struct dentry *root)
}
enum {
- Opt_check_n, Opt_check_r, Opt_check_s, Opt_uid, Opt_gid,
- Opt_umask, Opt_dmask, Opt_fmask, Opt_allow_utime, Opt_codepage,
- Opt_usefree, Opt_nocase, Opt_quiet, Opt_showexec, Opt_debug,
- Opt_immutable, Opt_dots, Opt_nodots,
- Opt_charset, Opt_shortname_lower, Opt_shortname_win95,
- Opt_shortname_winnt, Opt_shortname_mixed, Opt_utf8_no, Opt_utf8_yes,
- Opt_uni_xl_no, Opt_uni_xl_yes, Opt_nonumtail_no, Opt_nonumtail_yes,
- Opt_obsolete, Opt_flush, Opt_tz_utc, Opt_rodir, Opt_err_cont,
- Opt_err_panic, Opt_err_ro, Opt_discard, Opt_nfs, Opt_time_offset,
- Opt_nfs_stale_rw, Opt_nfs_nostale_ro, Opt_err, Opt_dos1xfloppy,
+ Opt_check, Opt_uid, Opt_gid, Opt_umask, Opt_dmask, Opt_fmask,
+ Opt_allow_utime, Opt_codepage, Opt_usefree, Opt_nocase, Opt_quiet,
+ Opt_showexec, Opt_debug, Opt_immutable, Opt_dots, Opt_dotsOK,
+ Opt_charset, Opt_shortname, Opt_utf8, Opt_utf8_bool,
+ Opt_uni_xl, Opt_uni_xl_bool, Opt_nonumtail, Opt_nonumtail_bool,
+ Opt_obsolete, Opt_flush, Opt_tz, Opt_rodir, Opt_errors, Opt_discard,
+ Opt_nfs, Opt_nfs_enum, Opt_time_offset, Opt_dos1xfloppy,
};
-static const match_table_t fat_tokens = {
- {Opt_check_r, "check=relaxed"},
- {Opt_check_s, "check=strict"},
- {Opt_check_n, "check=normal"},
- {Opt_check_r, "check=r"},
- {Opt_check_s, "check=s"},
- {Opt_check_n, "check=n"},
- {Opt_uid, "uid=%u"},
- {Opt_gid, "gid=%u"},
- {Opt_umask, "umask=%o"},
- {Opt_dmask, "dmask=%o"},
- {Opt_fmask, "fmask=%o"},
- {Opt_allow_utime, "allow_utime=%o"},
- {Opt_codepage, "codepage=%u"},
- {Opt_usefree, "usefree"},
- {Opt_nocase, "nocase"},
- {Opt_quiet, "quiet"},
- {Opt_showexec, "showexec"},
- {Opt_debug, "debug"},
- {Opt_immutable, "sys_immutable"},
- {Opt_flush, "flush"},
- {Opt_tz_utc, "tz=UTC"},
- {Opt_time_offset, "time_offset=%d"},
- {Opt_err_cont, "errors=continue"},
- {Opt_err_panic, "errors=panic"},
- {Opt_err_ro, "errors=remount-ro"},
- {Opt_discard, "discard"},
- {Opt_nfs_stale_rw, "nfs"},
- {Opt_nfs_stale_rw, "nfs=stale_rw"},
- {Opt_nfs_nostale_ro, "nfs=nostale_ro"},
- {Opt_dos1xfloppy, "dos1xfloppy"},
- {Opt_obsolete, "conv=binary"},
- {Opt_obsolete, "conv=text"},
- {Opt_obsolete, "conv=auto"},
- {Opt_obsolete, "conv=b"},
- {Opt_obsolete, "conv=t"},
- {Opt_obsolete, "conv=a"},
- {Opt_obsolete, "fat=%u"},
- {Opt_obsolete, "blocksize=%u"},
- {Opt_obsolete, "cvf_format=%20s"},
- {Opt_obsolete, "cvf_options=%100s"},
- {Opt_obsolete, "posix"},
- {Opt_err, NULL},
-};
-static const match_table_t msdos_tokens = {
- {Opt_nodots, "nodots"},
- {Opt_nodots, "dotsOK=no"},
- {Opt_dots, "dots"},
- {Opt_dots, "dotsOK=yes"},
- {Opt_err, NULL}
+static const struct constant_table fat_param_check[] = {
+ {"relaxed", 'r'},
+ {"r", 'r'},
+ {"strict", 's'},
+ {"s", 's'},
+ {"normal", 'n'},
+ {"n", 'n'},
+ {}
};
-static const match_table_t vfat_tokens = {
- {Opt_charset, "iocharset=%s"},
- {Opt_shortname_lower, "shortname=lower"},
- {Opt_shortname_win95, "shortname=win95"},
- {Opt_shortname_winnt, "shortname=winnt"},
- {Opt_shortname_mixed, "shortname=mixed"},
- {Opt_utf8_no, "utf8=0"}, /* 0 or no or false */
- {Opt_utf8_no, "utf8=no"},
- {Opt_utf8_no, "utf8=false"},
- {Opt_utf8_yes, "utf8=1"}, /* empty or 1 or yes or true */
- {Opt_utf8_yes, "utf8=yes"},
- {Opt_utf8_yes, "utf8=true"},
- {Opt_utf8_yes, "utf8"},
- {Opt_uni_xl_no, "uni_xlate=0"}, /* 0 or no or false */
- {Opt_uni_xl_no, "uni_xlate=no"},
- {Opt_uni_xl_no, "uni_xlate=false"},
- {Opt_uni_xl_yes, "uni_xlate=1"}, /* empty or 1 or yes or true */
- {Opt_uni_xl_yes, "uni_xlate=yes"},
- {Opt_uni_xl_yes, "uni_xlate=true"},
- {Opt_uni_xl_yes, "uni_xlate"},
- {Opt_nonumtail_no, "nonumtail=0"}, /* 0 or no or false */
- {Opt_nonumtail_no, "nonumtail=no"},
- {Opt_nonumtail_no, "nonumtail=false"},
- {Opt_nonumtail_yes, "nonumtail=1"}, /* empty or 1 or yes or true */
- {Opt_nonumtail_yes, "nonumtail=yes"},
- {Opt_nonumtail_yes, "nonumtail=true"},
- {Opt_nonumtail_yes, "nonumtail"},
- {Opt_rodir, "rodir"},
- {Opt_err, NULL}
+
+static const struct constant_table fat_param_tz[] = {
+ {"UTC", 0},
+ {}
};
-static int parse_options(struct super_block *sb, char *options, int is_vfat,
- int silent, int *debug, struct fat_mount_options *opts)
-{
- char *p;
- substring_t args[MAX_OPT_ARGS];
- int option;
- char *iocharset;
+static const struct constant_table fat_param_errors[] = {
+ {"continue", FAT_ERRORS_CONT},
+ {"panic", FAT_ERRORS_PANIC},
+ {"remount-ro", FAT_ERRORS_RO},
+ {}
+};
- opts->isvfat = is_vfat;
- opts->fs_uid = current_uid();
- opts->fs_gid = current_gid();
- opts->fs_fmask = opts->fs_dmask = current_umask();
- opts->allow_utime = -1;
- opts->codepage = fat_default_codepage;
- fat_reset_iocharset(opts);
- if (is_vfat) {
- opts->shortname = VFAT_SFN_DISPLAY_WINNT|VFAT_SFN_CREATE_WIN95;
- opts->rodir = 0;
- } else {
- opts->shortname = 0;
- opts->rodir = 1;
- }
- opts->name_check = 'n';
- opts->quiet = opts->showexec = opts->sys_immutable = opts->dotsOK = 0;
- opts->unicode_xlate = 0;
- opts->numtail = 1;
- opts->usefree = opts->nocase = 0;
- opts->tz_set = 0;
- opts->nfs = 0;
- opts->errors = FAT_ERRORS_RO;
- *debug = 0;
+static const struct constant_table fat_param_nfs[] = {
+ {"stale_rw", FAT_NFS_STALE_RW},
+ {"nostale_ro", FAT_NFS_NOSTALE_RO},
+ {}
+};
- opts->utf8 = IS_ENABLED(CONFIG_FAT_DEFAULT_UTF8) && is_vfat;
+/*
+ * These are all obsolete but we still reject invalid options.
+ * The corresponding values are therefore meaningless.
+ */
+static const struct constant_table fat_param_conv[] = {
+ {"binary", 0},
+ {"text", 0},
+ {"auto", 0},
+ {"b", 0},
+ {"t", 0},
+ {"a", 0},
+ {}
+};
- if (!options)
- goto out;
+/* Core options. See below for vfat and msdos extras */
+const struct fs_parameter_spec fat_param_spec[] = {
+ fsparam_enum ("check", Opt_check, fat_param_check),
+ fsparam_uid ("uid", Opt_uid),
+ fsparam_gid ("gid", Opt_gid),
+ fsparam_u32oct ("umask", Opt_umask),
+ fsparam_u32oct ("dmask", Opt_dmask),
+ fsparam_u32oct ("fmask", Opt_fmask),
+ fsparam_u32oct ("allow_utime", Opt_allow_utime),
+ fsparam_u32 ("codepage", Opt_codepage),
+ fsparam_flag ("usefree", Opt_usefree),
+ fsparam_flag ("nocase", Opt_nocase),
+ fsparam_flag ("quiet", Opt_quiet),
+ fsparam_flag ("showexec", Opt_showexec),
+ fsparam_flag ("debug", Opt_debug),
+ fsparam_flag ("sys_immutable", Opt_immutable),
+ fsparam_flag ("flush", Opt_flush),
+ fsparam_enum ("tz", Opt_tz, fat_param_tz),
+ fsparam_s32 ("time_offset", Opt_time_offset),
+ fsparam_enum ("errors", Opt_errors, fat_param_errors),
+ fsparam_flag ("discard", Opt_discard),
+ fsparam_flag ("nfs", Opt_nfs),
+ fsparam_enum ("nfs", Opt_nfs_enum, fat_param_nfs),
+ fsparam_flag ("dos1xfloppy", Opt_dos1xfloppy),
+ __fsparam(fs_param_is_enum, "conv",
+ Opt_obsolete, fs_param_deprecated, fat_param_conv),
+ __fsparam(fs_param_is_u32, "fat",
+ Opt_obsolete, fs_param_deprecated, NULL),
+ __fsparam(fs_param_is_u32, "blocksize",
+ Opt_obsolete, fs_param_deprecated, NULL),
+ __fsparam(fs_param_is_string, "cvf_format",
+ Opt_obsolete, fs_param_deprecated, NULL),
+ __fsparam(fs_param_is_string, "cvf_options",
+ Opt_obsolete, fs_param_deprecated, NULL),
+ __fsparam(NULL, "posix",
+ Opt_obsolete, fs_param_deprecated, NULL),
+ {}
+};
+EXPORT_SYMBOL_GPL(fat_param_spec);
- while ((p = strsep(&options, ",")) != NULL) {
- int token;
- if (!*p)
- continue;
+static const struct fs_parameter_spec msdos_param_spec[] = {
+ fsparam_flag_no ("dots", Opt_dots),
+ fsparam_bool ("dotsOK", Opt_dotsOK),
+ {}
+};
- token = match_token(p, fat_tokens, args);
- if (token == Opt_err) {
- if (is_vfat)
- token = match_token(p, vfat_tokens, args);
- else
- token = match_token(p, msdos_tokens, args);
- }
- switch (token) {
- case Opt_check_s:
- opts->name_check = 's';
- break;
- case Opt_check_r:
- opts->name_check = 'r';
- break;
- case Opt_check_n:
- opts->name_check = 'n';
- break;
- case Opt_usefree:
- opts->usefree = 1;
- break;
- case Opt_nocase:
- if (!is_vfat)
- opts->nocase = 1;
- else {
- /* for backward compatibility */
- opts->shortname = VFAT_SFN_DISPLAY_WIN95
- | VFAT_SFN_CREATE_WIN95;
- }
- break;
- case Opt_quiet:
- opts->quiet = 1;
- break;
- case Opt_showexec:
- opts->showexec = 1;
- break;
- case Opt_debug:
- *debug = 1;
- break;
- case Opt_immutable:
- opts->sys_immutable = 1;
- break;
- case Opt_uid:
- if (match_int(&args[0], &option))
- return -EINVAL;
- opts->fs_uid = make_kuid(current_user_ns(), option);
- if (!uid_valid(opts->fs_uid))
- return -EINVAL;
- break;
- case Opt_gid:
- if (match_int(&args[0], &option))
- return -EINVAL;
- opts->fs_gid = make_kgid(current_user_ns(), option);
- if (!gid_valid(opts->fs_gid))
- return -EINVAL;
- break;
- case Opt_umask:
- if (match_octal(&args[0], &option))
- return -EINVAL;
- opts->fs_fmask = opts->fs_dmask = option;
- break;
- case Opt_dmask:
- if (match_octal(&args[0], &option))
- return -EINVAL;
- opts->fs_dmask = option;
- break;
- case Opt_fmask:
- if (match_octal(&args[0], &option))
- return -EINVAL;
- opts->fs_fmask = option;
- break;
- case Opt_allow_utime:
- if (match_octal(&args[0], &option))
- return -EINVAL;
- opts->allow_utime = option & (S_IWGRP | S_IWOTH);
- break;
- case Opt_codepage:
- if (match_int(&args[0], &option))
- return -EINVAL;
- opts->codepage = option;
- break;
- case Opt_flush:
- opts->flush = 1;
- break;
- case Opt_time_offset:
- if (match_int(&args[0], &option))
- return -EINVAL;
- /*
- * GMT+-12 zones may have DST corrections so at least
- * 13 hours difference is needed. Make the limit 24
- * just in case someone invents something unusual.
- */
- if (option < -24 * 60 || option > 24 * 60)
- return -EINVAL;
- opts->tz_set = 1;
- opts->time_offset = option;
- break;
- case Opt_tz_utc:
- opts->tz_set = 1;
- opts->time_offset = 0;
- break;
- case Opt_err_cont:
- opts->errors = FAT_ERRORS_CONT;
- break;
- case Opt_err_panic:
- opts->errors = FAT_ERRORS_PANIC;
- break;
- case Opt_err_ro:
- opts->errors = FAT_ERRORS_RO;
- break;
- case Opt_nfs_stale_rw:
- opts->nfs = FAT_NFS_STALE_RW;
- break;
- case Opt_nfs_nostale_ro:
- opts->nfs = FAT_NFS_NOSTALE_RO;
- break;
- case Opt_dos1xfloppy:
- opts->dos1xfloppy = 1;
- break;
+static const struct constant_table fat_param_shortname[] = {
+ {"lower", VFAT_SFN_DISPLAY_LOWER | VFAT_SFN_CREATE_WIN95},
+ {"win95", VFAT_SFN_DISPLAY_WIN95 | VFAT_SFN_CREATE_WIN95},
+ {"winnt", VFAT_SFN_DISPLAY_WINNT | VFAT_SFN_CREATE_WINNT},
+ {"mixed", VFAT_SFN_DISPLAY_WINNT | VFAT_SFN_CREATE_WIN95},
+ {}
+};
- /* msdos specific */
- case Opt_dots:
- opts->dotsOK = 1;
- break;
- case Opt_nodots:
- opts->dotsOK = 0;
- break;
+static const struct fs_parameter_spec vfat_param_spec[] = {
+ fsparam_string ("iocharset", Opt_charset),
+ fsparam_enum ("shortname", Opt_shortname, fat_param_shortname),
+ fsparam_flag ("utf8", Opt_utf8),
+ fsparam_bool ("utf8", Opt_utf8_bool),
+ fsparam_flag ("uni_xlate", Opt_uni_xl),
+ fsparam_bool ("uni_xlate", Opt_uni_xl_bool),
+ fsparam_flag ("nonumtail", Opt_nonumtail),
+ fsparam_bool ("nonumtail", Opt_nonumtail_bool),
+ fsparam_flag ("rodir", Opt_rodir),
+ {}
+};
- /* vfat specific */
- case Opt_charset:
- fat_reset_iocharset(opts);
- iocharset = match_strdup(&args[0]);
- if (!iocharset)
- return -ENOMEM;
- opts->iocharset = iocharset;
- break;
- case Opt_shortname_lower:
- opts->shortname = VFAT_SFN_DISPLAY_LOWER
- | VFAT_SFN_CREATE_WIN95;
- break;
- case Opt_shortname_win95:
- opts->shortname = VFAT_SFN_DISPLAY_WIN95
- | VFAT_SFN_CREATE_WIN95;
- break;
- case Opt_shortname_winnt:
- opts->shortname = VFAT_SFN_DISPLAY_WINNT
- | VFAT_SFN_CREATE_WINNT;
- break;
- case Opt_shortname_mixed:
- opts->shortname = VFAT_SFN_DISPLAY_WINNT
- | VFAT_SFN_CREATE_WIN95;
- break;
- case Opt_utf8_no: /* 0 or no or false */
- opts->utf8 = 0;
- break;
- case Opt_utf8_yes: /* empty or 1 or yes or true */
- opts->utf8 = 1;
- break;
- case Opt_uni_xl_no: /* 0 or no or false */
- opts->unicode_xlate = 0;
- break;
- case Opt_uni_xl_yes: /* empty or 1 or yes or true */
- opts->unicode_xlate = 1;
- break;
- case Opt_nonumtail_no: /* 0 or no or false */
- opts->numtail = 1; /* negated option */
- break;
- case Opt_nonumtail_yes: /* empty or 1 or yes or true */
- opts->numtail = 0; /* negated option */
- break;
- case Opt_rodir:
- opts->rodir = 1;
- break;
- case Opt_discard:
- opts->discard = 1;
- break;
+int fat_parse_param(struct fs_context *fc, struct fs_parameter *param,
+ bool is_vfat)
+{
+ struct fat_mount_options *opts = fc->fs_private;
+ struct fs_parse_result result;
+ int opt;
- /* obsolete mount options */
- case Opt_obsolete:
- fat_msg(sb, KERN_INFO, "\"%s\" option is obsolete, "
- "not supported now", p);
- break;
- /* unknown option */
- default:
- if (!silent) {
- fat_msg(sb, KERN_ERR,
- "Unrecognized mount option \"%s\" "
- "or missing value", p);
- }
- return -EINVAL;
- }
- }
+ /* remount options have traditionally been ignored */
+ if (fc->purpose == FS_CONTEXT_FOR_RECONFIGURE)
+ return 0;
-out:
- /* UTF-8 doesn't provide FAT semantics */
- if (!strcmp(opts->iocharset, "utf8")) {
- fat_msg(sb, KERN_WARNING, "utf8 is not a recommended IO charset"
- " for FAT filesystems, filesystem will be "
- "case sensitive!");
+ opt = fs_parse(fc, fat_param_spec, param, &result);
+ /* If option not found in fat_param_spec, try vfat/msdos options */
+ if (opt == -ENOPARAM) {
+ if (is_vfat)
+ opt = fs_parse(fc, vfat_param_spec, param, &result);
+ else
+ opt = fs_parse(fc, msdos_param_spec, param, &result);
}
- /* If user doesn't specify allow_utime, it's initialized from dmask. */
- if (opts->allow_utime == (unsigned short)-1)
- opts->allow_utime = ~opts->fs_dmask & (S_IWGRP | S_IWOTH);
- if (opts->unicode_xlate)
- opts->utf8 = 0;
- if (opts->nfs == FAT_NFS_NOSTALE_RO) {
- sb->s_flags |= SB_RDONLY;
- sb->s_export_op = &fat_export_ops_nostale;
+ if (opt < 0)
+ return opt;
+
+ switch (opt) {
+ case Opt_check:
+ opts->name_check = result.uint_32;
+ break;
+ case Opt_usefree:
+ opts->usefree = 1;
+ break;
+ case Opt_nocase:
+ if (!is_vfat)
+ opts->nocase = 1;
+ else {
+ /* for backward compatibility */
+ opts->shortname = VFAT_SFN_DISPLAY_WIN95
+ | VFAT_SFN_CREATE_WIN95;
+ }
+ break;
+ case Opt_quiet:
+ opts->quiet = 1;
+ break;
+ case Opt_showexec:
+ opts->showexec = 1;
+ break;
+ case Opt_debug:
+ opts->debug = 1;
+ break;
+ case Opt_immutable:
+ opts->sys_immutable = 1;
+ break;
+ case Opt_uid:
+ opts->fs_uid = result.uid;
+ break;
+ case Opt_gid:
+ opts->fs_gid = result.gid;
+ break;
+ case Opt_umask:
+ opts->fs_fmask = opts->fs_dmask = result.uint_32;
+ break;
+ case Opt_dmask:
+ opts->fs_dmask = result.uint_32;
+ break;
+ case Opt_fmask:
+ opts->fs_fmask = result.uint_32;
+ break;
+ case Opt_allow_utime:
+ opts->allow_utime = result.uint_32 & (S_IWGRP | S_IWOTH);
+ break;
+ case Opt_codepage:
+ opts->codepage = result.uint_32;
+ break;
+ case Opt_flush:
+ opts->flush = 1;
+ break;
+ case Opt_time_offset:
+ /*
+ * GMT+-12 zones may have DST corrections so at least
+ * 13 hours difference is needed. Make the limit 24
+ * just in case someone invents something unusual.
+ */
+ if (result.int_32 < -24 * 60 || result.int_32 > 24 * 60)
+ return -EINVAL;
+ opts->tz_set = 1;
+ opts->time_offset = result.int_32;
+ break;
+ case Opt_tz:
+ opts->tz_set = 1;
+ opts->time_offset = result.uint_32;
+ break;
+ case Opt_errors:
+ opts->errors = result.uint_32;
+ break;
+ case Opt_nfs:
+ opts->nfs = FAT_NFS_STALE_RW;
+ break;
+ case Opt_nfs_enum:
+ opts->nfs = result.uint_32;
+ break;
+ case Opt_dos1xfloppy:
+ opts->dos1xfloppy = 1;
+ break;
+
+ /* msdos specific */
+ case Opt_dots: /* dots / nodots */
+ opts->dotsOK = !result.negated;
+ break;
+ case Opt_dotsOK: /* dotsOK = yes/no */
+ opts->dotsOK = result.boolean;
+ break;
+
+ /* vfat specific */
+ case Opt_charset:
+ fat_reset_iocharset(opts);
+ opts->iocharset = param->string;
+ param->string = NULL; /* Steal string */
+ break;
+ case Opt_shortname:
+ opts->shortname = result.uint_32;
+ break;
+ case Opt_utf8:
+ opts->utf8 = 1;
+ break;
+ case Opt_utf8_bool:
+ opts->utf8 = result.boolean;
+ break;
+ case Opt_uni_xl:
+ opts->unicode_xlate = 1;
+ break;
+ case Opt_uni_xl_bool:
+ opts->unicode_xlate = result.boolean;
+ break;
+ case Opt_nonumtail:
+ opts->numtail = 0; /* negated option */
+ break;
+ case Opt_nonumtail_bool:
+ opts->numtail = !result.boolean; /* negated option */
+ break;
+ case Opt_rodir:
+ opts->rodir = 1;
+ break;
+ case Opt_discard:
+ opts->discard = 1;
+ break;
+
+ /* obsolete mount options */
+ case Opt_obsolete:
+ printk(KERN_INFO "FAT-fs: \"%s\" option is obsolete, "
+ "not supported now", param->key);
+ break;
+ default:
+ return -EINVAL;
}
return 0;
}
+EXPORT_SYMBOL_GPL(fat_parse_param);
static int fat_read_root(struct inode *inode)
{
@@ -1604,9 +1530,11 @@ out:
/*
* Read the super block of an MS-DOS FS.
*/
-int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat,
+int fat_fill_super(struct super_block *sb, struct fs_context *fc,
void (*setup)(struct super_block *))
{
+ struct fat_mount_options *opts = fc->fs_private;
+ int silent = fc->sb_flags & SB_SILENT;
struct inode *root_inode = NULL, *fat_inode = NULL;
struct inode *fsinfo_inode = NULL;
struct buffer_head *bh;
@@ -1614,7 +1542,6 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat,
struct msdos_sb_info *sbi;
u16 logical_sector_size;
u32 total_sectors, total_clusters, fat_clusters, rootdir_sectors;
- int debug;
long error;
char buf[50];
struct timespec64 ts;
@@ -1643,9 +1570,27 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat,
ratelimit_state_init(&sbi->ratelimit, DEFAULT_RATELIMIT_INTERVAL,
DEFAULT_RATELIMIT_BURST);
- error = parse_options(sb, data, isvfat, silent, &debug, &sbi->options);
- if (error)
- goto out_fail;
+ /* UTF-8 doesn't provide FAT semantics */
+ if (!strcmp(opts->iocharset, "utf8")) {
+ fat_msg(sb, KERN_WARNING, "utf8 is not a recommended IO charset"
+ " for FAT filesystems, filesystem will be"
+ " case sensitive!");
+ }
+
+ /* If user doesn't specify allow_utime, it's initialized from dmask. */
+ if (opts->allow_utime == (unsigned short)-1)
+ opts->allow_utime = ~opts->fs_dmask & (S_IWGRP | S_IWOTH);
+ if (opts->unicode_xlate)
+ opts->utf8 = 0;
+ if (opts->nfs == FAT_NFS_NOSTALE_RO) {
+ sb->s_flags |= SB_RDONLY;
+ sb->s_export_op = &fat_export_ops_nostale;
+ }
+
+ /* Apply parsed options to sbi (structure copy) */
+ sbi->options = *opts;
+ /* Transfer ownership of iocharset to sbi->options */
+ opts->iocharset = NULL;
setup(sb); /* flavour-specific stuff that needs options */
@@ -1950,6 +1895,57 @@ int fat_flush_inodes(struct super_block *sb, struct inode *i1, struct inode *i2)
}
EXPORT_SYMBOL_GPL(fat_flush_inodes);
+int fat_init_fs_context(struct fs_context *fc, bool is_vfat)
+{
+ struct fat_mount_options *opts;
+
+ opts = kzalloc(sizeof(*opts), GFP_KERNEL);
+ if (!opts)
+ return -ENOMEM;
+
+ opts->isvfat = is_vfat;
+ opts->fs_uid = current_uid();
+ opts->fs_gid = current_gid();
+ opts->fs_fmask = opts->fs_dmask = current_umask();
+ opts->allow_utime = -1;
+ opts->codepage = fat_default_codepage;
+ fat_reset_iocharset(opts);
+ if (is_vfat) {
+ opts->shortname = VFAT_SFN_DISPLAY_WINNT|VFAT_SFN_CREATE_WIN95;
+ opts->rodir = 0;
+ } else {
+ opts->shortname = 0;
+ opts->rodir = 1;
+ }
+ opts->name_check = 'n';
+ opts->quiet = opts->showexec = opts->sys_immutable = opts->dotsOK = 0;
+ opts->unicode_xlate = 0;
+ opts->numtail = 1;
+ opts->usefree = opts->nocase = 0;
+ opts->tz_set = 0;
+ opts->nfs = 0;
+ opts->errors = FAT_ERRORS_RO;
+ opts->debug = 0;
+
+ opts->utf8 = IS_ENABLED(CONFIG_FAT_DEFAULT_UTF8) && is_vfat;
+
+ fc->fs_private = opts;
+ /* fc->ops assigned by caller */
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fat_init_fs_context);
+
+void fat_free_fc(struct fs_context *fc)
+{
+ struct fat_mount_options *opts = fc->fs_private;
+
+ if (opts->iocharset != fat_default_iocharset)
+ kfree(opts->iocharset);
+ kfree(fc->fs_private);
+}
+EXPORT_SYMBOL_GPL(fat_free_fc);
+
static int __init init_fat_fs(void)
{
int err;
@@ -1978,4 +1974,5 @@ static void __exit exit_fat_fs(void)
module_init(init_fat_fs)
module_exit(exit_fat_fs)
+MODULE_DESCRIPTION("Core FAT filesystem support");
MODULE_LICENSE("GPL");
diff --git a/fs/fat/namei_msdos.c b/fs/fat/namei_msdos.c
index 2116c486843b..f06f6ba643cc 100644
--- a/fs/fat/namei_msdos.c
+++ b/fs/fat/namei_msdos.c
@@ -650,24 +650,48 @@ static void setup(struct super_block *sb)
sb->s_flags |= SB_NOATIME;
}
-static int msdos_fill_super(struct super_block *sb, void *data, int silent)
+static int msdos_fill_super(struct super_block *sb, struct fs_context *fc)
{
- return fat_fill_super(sb, data, silent, 0, setup);
+ return fat_fill_super(sb, fc, setup);
}
-static struct dentry *msdos_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name,
- void *data)
+static int msdos_get_tree(struct fs_context *fc)
{
- return mount_bdev(fs_type, flags, dev_name, data, msdos_fill_super);
+ return get_tree_bdev(fc, msdos_fill_super);
+}
+
+static int msdos_parse_param(struct fs_context *fc, struct fs_parameter *param)
+{
+ return fat_parse_param(fc, param, false);
+}
+
+static const struct fs_context_operations msdos_context_ops = {
+ .parse_param = msdos_parse_param,
+ .get_tree = msdos_get_tree,
+ .reconfigure = fat_reconfigure,
+ .free = fat_free_fc,
+};
+
+static int msdos_init_fs_context(struct fs_context *fc)
+{
+ int err;
+
+ /* Initialize with is_vfat == false */
+ err = fat_init_fs_context(fc, false);
+ if (err)
+ return err;
+
+ fc->ops = &msdos_context_ops;
+ return 0;
}
static struct file_system_type msdos_fs_type = {
.owner = THIS_MODULE,
.name = "msdos",
- .mount = msdos_mount,
.kill_sb = kill_block_super,
.fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP,
+ .init_fs_context = msdos_init_fs_context,
+ .parameters = fat_param_spec,
};
MODULE_ALIAS_FS("msdos");
diff --git a/fs/fat/namei_vfat.c b/fs/fat/namei_vfat.c
index c4d00999a433..6423e1dedf14 100644
--- a/fs/fat/namei_vfat.c
+++ b/fs/fat/namei_vfat.c
@@ -1195,24 +1195,48 @@ static void setup(struct super_block *sb)
sb->s_d_op = &vfat_dentry_ops;
}
-static int vfat_fill_super(struct super_block *sb, void *data, int silent)
+static int vfat_fill_super(struct super_block *sb, struct fs_context *fc)
{
- return fat_fill_super(sb, data, silent, 1, setup);
+ return fat_fill_super(sb, fc, setup);
}
-static struct dentry *vfat_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name,
- void *data)
+static int vfat_get_tree(struct fs_context *fc)
{
- return mount_bdev(fs_type, flags, dev_name, data, vfat_fill_super);
+ return get_tree_bdev(fc, vfat_fill_super);
+}
+
+static int vfat_parse_param(struct fs_context *fc, struct fs_parameter *param)
+{
+ return fat_parse_param(fc, param, true);
+}
+
+static const struct fs_context_operations vfat_context_ops = {
+ .parse_param = vfat_parse_param,
+ .get_tree = vfat_get_tree,
+ .reconfigure = fat_reconfigure,
+ .free = fat_free_fc,
+};
+
+static int vfat_init_fs_context(struct fs_context *fc)
+{
+ int err;
+
+ /* Initialize with is_vfat == true */
+ err = fat_init_fs_context(fc, true);
+ if (err)
+ return err;
+
+ fc->ops = &vfat_context_ops;
+ return 0;
}
static struct file_system_type vfat_fs_type = {
.owner = THIS_MODULE,
.name = "vfat",
- .mount = vfat_mount,
.kill_sb = kill_block_super,
.fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP,
+ .init_fs_context = vfat_init_fs_context,
+ .parameters = fat_param_spec,
};
MODULE_ALIAS_FS("vfat");
diff --git a/fs/fhandle.c b/fs/fhandle.c
index 8a7f86c2139a..6e8cea16790e 100644
--- a/fs/fhandle.c
+++ b/fs/fhandle.c
@@ -115,88 +115,188 @@ SYSCALL_DEFINE5(name_to_handle_at, int, dfd, const char __user *, name,
return err;
}
-static struct vfsmount *get_vfsmount_from_fd(int fd)
+static int get_path_from_fd(int fd, struct path *root)
{
- struct vfsmount *mnt;
-
if (fd == AT_FDCWD) {
struct fs_struct *fs = current->fs;
spin_lock(&fs->lock);
- mnt = mntget(fs->pwd.mnt);
+ *root = fs->pwd;
+ path_get(root);
spin_unlock(&fs->lock);
} else {
struct fd f = fdget(fd);
if (!f.file)
- return ERR_PTR(-EBADF);
- mnt = mntget(f.file->f_path.mnt);
+ return -EBADF;
+ *root = f.file->f_path;
+ path_get(root);
fdput(f);
}
- return mnt;
+
+ return 0;
}
+enum handle_to_path_flags {
+ HANDLE_CHECK_PERMS = (1 << 0),
+ HANDLE_CHECK_SUBTREE = (1 << 1),
+};
+
+struct handle_to_path_ctx {
+ struct path root;
+ enum handle_to_path_flags flags;
+ unsigned int fh_flags;
+};
+
static int vfs_dentry_acceptable(void *context, struct dentry *dentry)
{
- return 1;
+ struct handle_to_path_ctx *ctx = context;
+ struct user_namespace *user_ns = current_user_ns();
+ struct dentry *d, *root = ctx->root.dentry;
+ struct mnt_idmap *idmap = mnt_idmap(ctx->root.mnt);
+ int retval = 0;
+
+ if (!root)
+ return 1;
+
+ /* Old permission model with global CAP_DAC_READ_SEARCH. */
+ if (!ctx->flags)
+ return 1;
+
+ /*
+ * It's racy as we're not taking rename_lock but we're able to ignore
+ * permissions and we just need an approximation whether we were able
+ * to follow a path to the file.
+ *
+ * It's also potentially expensive on some filesystems especially if
+ * there is a deep path.
+ */
+ d = dget(dentry);
+ while (d != root && !IS_ROOT(d)) {
+ struct dentry *parent = dget_parent(d);
+
+ /*
+ * We know that we have the ability to override DAC permissions
+ * as we've verified this earlier via CAP_DAC_READ_SEARCH. But
+ * we also need to make sure that there aren't any unmapped
+ * inodes in the path that would prevent us from reaching the
+ * file.
+ */
+ if (!privileged_wrt_inode_uidgid(user_ns, idmap,
+ d_inode(parent))) {
+ dput(d);
+ dput(parent);
+ return retval;
+ }
+
+ dput(d);
+ d = parent;
+ }
+
+ if (!(ctx->flags & HANDLE_CHECK_SUBTREE) || d == root)
+ retval = 1;
+ WARN_ON_ONCE(d != root && d != root->d_sb->s_root);
+ dput(d);
+ return retval;
}
-static int do_handle_to_path(int mountdirfd, struct file_handle *handle,
- struct path *path)
+static int do_handle_to_path(struct file_handle *handle, struct path *path,
+ struct handle_to_path_ctx *ctx)
{
- int retval = 0;
int handle_dwords;
+ struct vfsmount *mnt = ctx->root.mnt;
- path->mnt = get_vfsmount_from_fd(mountdirfd);
- if (IS_ERR(path->mnt)) {
- retval = PTR_ERR(path->mnt);
- goto out_err;
- }
/* change the handle size to multiple of sizeof(u32) */
handle_dwords = handle->handle_bytes >> 2;
- path->dentry = exportfs_decode_fh(path->mnt,
+ path->dentry = exportfs_decode_fh_raw(mnt,
(struct fid *)handle->f_handle,
handle_dwords, handle->handle_type,
- vfs_dentry_acceptable, NULL);
- if (IS_ERR(path->dentry)) {
- retval = PTR_ERR(path->dentry);
- goto out_mnt;
+ ctx->fh_flags,
+ vfs_dentry_acceptable, ctx);
+ if (IS_ERR_OR_NULL(path->dentry)) {
+ if (path->dentry == ERR_PTR(-ENOMEM))
+ return -ENOMEM;
+ return -ESTALE;
}
+ path->mnt = mntget(mnt);
return 0;
-out_mnt:
- mntput(path->mnt);
-out_err:
- return retval;
+}
+
+/*
+ * Allow relaxed permissions of file handles if the caller has the
+ * ability to mount the filesystem or create a bind-mount of the
+ * provided @mountdirfd.
+ *
+ * In both cases the caller may be able to get an unobstructed way to
+ * the encoded file handle. If the caller is only able to create a
+ * bind-mount we need to verify that there are no locked mounts on top
+ * of it that could prevent us from getting to the encoded file.
+ *
+ * In principle, locked mounts can prevent the caller from mounting the
+ * filesystem but that only applies to procfs and sysfs neither of which
+ * support decoding file handles.
+ */
+static inline bool may_decode_fh(struct handle_to_path_ctx *ctx,
+ unsigned int o_flags)
+{
+ struct path *root = &ctx->root;
+
+ /*
+ * Restrict to O_DIRECTORY to provide a deterministic API that avoids a
+ * confusing api in the face of disconnected non-dir dentries.
+ *
+ * There's only one dentry for each directory inode (VFS rule)...
+ */
+ if (!(o_flags & O_DIRECTORY))
+ return false;
+
+ if (ns_capable(root->mnt->mnt_sb->s_user_ns, CAP_SYS_ADMIN))
+ ctx->flags = HANDLE_CHECK_PERMS;
+ else if (is_mounted(root->mnt) &&
+ ns_capable(real_mount(root->mnt)->mnt_ns->user_ns,
+ CAP_SYS_ADMIN) &&
+ !has_locked_children(real_mount(root->mnt), root->dentry))
+ ctx->flags = HANDLE_CHECK_PERMS | HANDLE_CHECK_SUBTREE;
+ else
+ return false;
+
+ /* Are we able to override DAC permissions? */
+ if (!ns_capable(current_user_ns(), CAP_DAC_READ_SEARCH))
+ return false;
+
+ ctx->fh_flags = EXPORT_FH_DIR_ONLY;
+ return true;
}
static int handle_to_path(int mountdirfd, struct file_handle __user *ufh,
- struct path *path)
+ struct path *path, unsigned int o_flags)
{
int retval = 0;
struct file_handle f_handle;
struct file_handle *handle = NULL;
+ struct handle_to_path_ctx ctx = {};
- /*
- * With handle we don't look at the execute bit on the
- * directory. Ideally we would like CAP_DAC_SEARCH.
- * But we don't have that
- */
- if (!capable(CAP_DAC_READ_SEARCH)) {
- retval = -EPERM;
+ retval = get_path_from_fd(mountdirfd, &ctx.root);
+ if (retval)
goto out_err;
+
+ if (!capable(CAP_DAC_READ_SEARCH) && !may_decode_fh(&ctx, o_flags)) {
+ retval = -EPERM;
+ goto out_path;
}
+
if (copy_from_user(&f_handle, ufh, sizeof(struct file_handle))) {
retval = -EFAULT;
- goto out_err;
+ goto out_path;
}
if ((f_handle.handle_bytes > MAX_HANDLE_SZ) ||
(f_handle.handle_bytes == 0)) {
retval = -EINVAL;
- goto out_err;
+ goto out_path;
}
handle = kmalloc(struct_size(handle, f_handle, f_handle.handle_bytes),
GFP_KERNEL);
if (!handle) {
retval = -ENOMEM;
- goto out_err;
+ goto out_path;
}
/* copy the full handle */
*handle = f_handle;
@@ -207,10 +307,12 @@ static int handle_to_path(int mountdirfd, struct file_handle __user *ufh,
goto out_handle;
}
- retval = do_handle_to_path(mountdirfd, handle, path);
+ retval = do_handle_to_path(handle, path, &ctx);
out_handle:
kfree(handle);
+out_path:
+ path_put(&ctx.root);
out_err:
return retval;
}
@@ -223,7 +325,7 @@ static long do_handle_open(int mountdirfd, struct file_handle __user *ufh,
struct file *file;
int fd;
- retval = handle_to_path(mountdirfd, ufh, &path);
+ retval = handle_to_path(mountdirfd, ufh, &path, open_flag);
if (retval)
return retval;
diff --git a/fs/file.c b/fs/file.c
index 8076aef9c210..a3b72aa64f11 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -486,12 +486,12 @@ struct files_struct init_files = {
static unsigned int find_next_fd(struct fdtable *fdt, unsigned int start)
{
- unsigned int maxfd = fdt->max_fds;
+ unsigned int maxfd = fdt->max_fds; /* always multiple of BITS_PER_LONG */
unsigned int maxbit = maxfd / BITS_PER_LONG;
unsigned int bitbit = start / BITS_PER_LONG;
bitbit = find_next_zero_bit(fdt->full_fds_bits, maxbit, bitbit) * BITS_PER_LONG;
- if (bitbit > maxfd)
+ if (bitbit >= maxfd)
return maxfd;
if (bitbit > start)
start = bitbit;
diff --git a/fs/fs_parser.c b/fs/fs_parser.c
index a4d6ca0b8971..24727ec34e5a 100644
--- a/fs/fs_parser.c
+++ b/fs/fs_parser.c
@@ -308,6 +308,40 @@ int fs_param_is_fd(struct p_log *log, const struct fs_parameter_spec *p,
}
EXPORT_SYMBOL(fs_param_is_fd);
+int fs_param_is_uid(struct p_log *log, const struct fs_parameter_spec *p,
+ struct fs_parameter *param, struct fs_parse_result *result)
+{
+ kuid_t uid;
+
+ if (fs_param_is_u32(log, p, param, result) != 0)
+ return fs_param_bad_value(log, param);
+
+ uid = make_kuid(current_user_ns(), result->uint_32);
+ if (!uid_valid(uid))
+ return inval_plog(log, "Invalid uid '%s'", param->string);
+
+ result->uid = uid;
+ return 0;
+}
+EXPORT_SYMBOL(fs_param_is_uid);
+
+int fs_param_is_gid(struct p_log *log, const struct fs_parameter_spec *p,
+ struct fs_parameter *param, struct fs_parse_result *result)
+{
+ kgid_t gid;
+
+ if (fs_param_is_u32(log, p, param, result) != 0)
+ return fs_param_bad_value(log, param);
+
+ gid = make_kgid(current_user_ns(), result->uint_32);
+ if (!gid_valid(gid))
+ return inval_plog(log, "Invalid gid '%s'", param->string);
+
+ result->gid = gid;
+ return 0;
+}
+EXPORT_SYMBOL(fs_param_is_gid);
+
int fs_param_is_blockdev(struct p_log *log, const struct fs_parameter_spec *p,
struct fs_parameter *param, struct fs_parse_result *result)
{
diff --git a/fs/fsopen.c b/fs/fsopen.c
index 6593ae518115..ed2dd000622e 100644
--- a/fs/fsopen.c
+++ b/fs/fsopen.c
@@ -220,10 +220,6 @@ static int vfs_cmd_create(struct fs_context *fc, bool exclusive)
if (!mount_capable(fc))
return -EPERM;
- /* require the new mount api */
- if (exclusive && fc->ops == &legacy_fs_context_ops)
- return -EOPNOTSUPP;
-
fc->phase = FS_CONTEXT_CREATING;
fc->exclusive = exclusive;
@@ -411,6 +407,7 @@ SYSCALL_DEFINE5(fsconfig,
case FSCONFIG_SET_PATH:
case FSCONFIG_SET_PATH_EMPTY:
case FSCONFIG_SET_FD:
+ case FSCONFIG_CMD_CREATE_EXCL:
ret = -EOPNOTSUPP;
goto out_f;
}
@@ -451,7 +448,7 @@ SYSCALL_DEFINE5(fsconfig,
fallthrough;
case FSCONFIG_SET_PATH:
param.type = fs_value_is_filename;
- param.name = getname_flags(_value, lookup_flags, NULL);
+ param.name = getname_flags(_value, lookup_flags);
if (IS_ERR(param.name)) {
ret = PTR_ERR(param.name);
goto out_key;
diff --git a/fs/fuse/acl.c b/fs/fuse/acl.c
index 3d192b80a561..04cfd8fee992 100644
--- a/fs/fuse/acl.c
+++ b/fs/fuse/acl.c
@@ -146,8 +146,8 @@ int fuse_set_acl(struct mnt_idmap *idmap, struct dentry *dentry,
* be stripped.
*/
if (fc->posix_acl &&
- !vfsgid_in_group_p(i_gid_into_vfsgid(&nop_mnt_idmap, inode)) &&
- !capable_wrt_inode_uidgid(&nop_mnt_idmap, inode, CAP_FSETID))
+ !in_group_or_capable(&nop_mnt_idmap, inode,
+ i_gid_into_vfsgid(&nop_mnt_idmap, inode)))
extra_flags |= FUSE_SETXATTR_ACL_KILL_SGID;
ret = fuse_setxattr(inode, name, value, size, 0, extra_flags);
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 99e44ea7d875..d8ab4e93916f 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -740,8 +740,8 @@ static const struct fs_parameter_spec fuse_fs_parameters[] = {
fsparam_string ("source", OPT_SOURCE),
fsparam_u32 ("fd", OPT_FD),
fsparam_u32oct ("rootmode", OPT_ROOTMODE),
- fsparam_u32 ("user_id", OPT_USER_ID),
- fsparam_u32 ("group_id", OPT_GROUP_ID),
+ fsparam_uid ("user_id", OPT_USER_ID),
+ fsparam_gid ("group_id", OPT_GROUP_ID),
fsparam_flag ("default_permissions", OPT_DEFAULT_PERMISSIONS),
fsparam_flag ("allow_other", OPT_ALLOW_OTHER),
fsparam_u32 ("max_read", OPT_MAX_READ),
@@ -755,6 +755,8 @@ static int fuse_parse_param(struct fs_context *fsc, struct fs_parameter *param)
struct fs_parse_result result;
struct fuse_fs_context *ctx = fsc->fs_private;
int opt;
+ kuid_t kuid;
+ kgid_t kgid;
if (fsc->purpose == FS_CONTEXT_FOR_RECONFIGURE) {
/*
@@ -799,16 +801,26 @@ static int fuse_parse_param(struct fs_context *fsc, struct fs_parameter *param)
break;
case OPT_USER_ID:
- ctx->user_id = make_kuid(fsc->user_ns, result.uint_32);
- if (!uid_valid(ctx->user_id))
+ kuid = result.uid;
+ /*
+ * The requested uid must be representable in the
+ * filesystem's idmapping.
+ */
+ if (!kuid_has_mapping(fsc->user_ns, kuid))
return invalfc(fsc, "Invalid user_id");
+ ctx->user_id = kuid;
ctx->user_id_present = true;
break;
case OPT_GROUP_ID:
- ctx->group_id = make_kgid(fsc->user_ns, result.uint_32);
- if (!gid_valid(ctx->group_id))
+ kgid = result.gid;
+ /*
+ * The requested gid must be representable in the
+ * filesystem's idmapping.
+ */
+ if (!kgid_has_mapping(fsc->user_ns, kgid))
return invalfc(fsc, "Invalid group_id");
+ ctx->group_id = kgid;
ctx->group_id_present = true;
break;
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 4ea6c8bfb4e6..12a769077ea0 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -61,12 +61,10 @@ struct gfs2_glock_iter {
typedef void (*glock_examiner) (struct gfs2_glock * gl);
static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
-static void __gfs2_glock_dq(struct gfs2_holder *gh);
-static void handle_callback(struct gfs2_glock *gl, unsigned int state,
- unsigned long delay, bool remote);
+static void request_demote(struct gfs2_glock *gl, unsigned int state,
+ unsigned long delay, bool remote);
static struct dentry *gfs2_root;
-static struct workqueue_struct *glock_workqueue;
static LIST_HEAD(lru_list);
static atomic_t lru_count = ATOMIC_INIT(0);
static DEFINE_SPINLOCK(lru_lock);
@@ -218,34 +216,9 @@ struct gfs2_glock *gfs2_glock_hold(struct gfs2_glock *gl)
return gl;
}
-/**
- * demote_ok - Check to see if it's ok to unlock a glock
- * @gl: the glock
- *
- * Returns: 1 if it's ok
- */
-
-static int demote_ok(const struct gfs2_glock *gl)
-{
- const struct gfs2_glock_operations *glops = gl->gl_ops;
-
- if (gl->gl_state == LM_ST_UNLOCKED)
- return 0;
- if (!list_empty(&gl->gl_holders))
- return 0;
- if (glops->go_demote_ok)
- return glops->go_demote_ok(gl);
- return 1;
-}
-
-
-void gfs2_glock_add_to_lru(struct gfs2_glock *gl)
+static void gfs2_glock_add_to_lru(struct gfs2_glock *gl)
{
- if (!(gl->gl_ops->go_flags & GLOF_LRU))
- return;
-
spin_lock(&lru_lock);
-
list_move_tail(&gl->gl_lru, &lru_list);
if (!test_bit(GLF_LRU, &gl->gl_flags)) {
@@ -258,9 +231,6 @@ void gfs2_glock_add_to_lru(struct gfs2_glock *gl)
static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
{
- if (!(gl->gl_ops->go_flags & GLOF_LRU))
- return;
-
spin_lock(&lru_lock);
if (test_bit(GLF_LRU, &gl->gl_flags)) {
list_del_init(&gl->gl_lru);
@@ -275,7 +245,9 @@ static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
* work queue.
*/
static void gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) {
- if (!queue_delayed_work(glock_workqueue, &gl->gl_work, delay)) {
+ struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
+
+ if (!queue_delayed_work(sdp->sd_glock_wq, &gl->gl_work, delay)) {
/*
* We are holding the lockref spinlock, and the work was still
* queued above. The queued work (glock_work_func) takes that
@@ -305,6 +277,20 @@ static void __gfs2_glock_put(struct gfs2_glock *gl)
sdp->sd_lockstruct.ls_ops->lm_put_lock(gl);
}
+static bool __gfs2_glock_put_or_lock(struct gfs2_glock *gl)
+{
+ if (lockref_put_or_lock(&gl->gl_lockref))
+ return true;
+ GLOCK_BUG_ON(gl, gl->gl_lockref.count != 1);
+ if (gl->gl_state != LM_ST_UNLOCKED) {
+ gl->gl_lockref.count--;
+ gfs2_glock_add_to_lru(gl);
+ spin_unlock(&gl->gl_lockref.lock);
+ return true;
+ }
+ return false;
+}
+
/**
* gfs2_glock_put() - Decrement reference count on glock
* @gl: The glock to put
@@ -313,7 +299,7 @@ static void __gfs2_glock_put(struct gfs2_glock *gl)
void gfs2_glock_put(struct gfs2_glock *gl)
{
- if (lockref_put_or_lock(&gl->gl_lockref))
+ if (__gfs2_glock_put_or_lock(gl))
return;
__gfs2_glock_put(gl);
@@ -328,10 +314,9 @@ void gfs2_glock_put(struct gfs2_glock *gl)
*/
void gfs2_glock_put_async(struct gfs2_glock *gl)
{
- if (lockref_put_or_lock(&gl->gl_lockref))
+ if (__gfs2_glock_put_or_lock(gl))
return;
- GLOCK_BUG_ON(gl, gl->gl_lockref.count != 1);
gfs2_glock_queue_work(gl, 0);
spin_unlock(&gl->gl_lockref.lock);
}
@@ -570,18 +555,6 @@ static inline struct gfs2_holder *find_last_waiter(const struct gfs2_glock *gl)
static void state_change(struct gfs2_glock *gl, unsigned int new_state)
{
- int held1, held2;
-
- held1 = (gl->gl_state != LM_ST_UNLOCKED);
- held2 = (new_state != LM_ST_UNLOCKED);
-
- if (held1 != held2) {
- GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref));
- if (held2)
- gl->gl_lockref.count++;
- else
- gl->gl_lockref.count--;
- }
if (new_state != gl->gl_target)
/* shorten our minimum hold time */
gl->gl_hold_time = max(gl->gl_hold_time - GL_GLOCK_HOLD_DECR,
@@ -812,7 +785,7 @@ skip_inval:
(target != LM_ST_UNLOCKED ||
test_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags))) {
if (!is_system_glock(gl)) {
- handle_callback(gl, LM_ST_UNLOCKED, 0, false); /* sets demote */
+ request_demote(gl, LM_ST_UNLOCKED, 0, false);
/*
* Ordinarily, we would call dlm and its callback would call
* finish_xmote, which would call state_change() to the new state.
@@ -910,7 +883,6 @@ out_sched:
out_unlock:
clear_bit(GLF_LOCK, &gl->gl_flags);
smp_mb__after_atomic();
- return;
}
/**
@@ -1111,19 +1083,21 @@ static void glock_work_func(struct work_struct *work)
unsigned int drop_refs = 1;
spin_lock(&gl->gl_lockref.lock);
- if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags)) {
- clear_bit(GLF_REPLY_PENDING, &gl->gl_flags);
+ if (test_bit(GLF_HAVE_REPLY, &gl->gl_flags)) {
+ clear_bit(GLF_HAVE_REPLY, &gl->gl_flags);
finish_xmote(gl, gl->gl_reply);
drop_refs++;
}
if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
gl->gl_state != LM_ST_UNLOCKED &&
gl->gl_demote_state != LM_ST_EXCLUSIVE) {
- unsigned long holdtime, now = jiffies;
+ if (gl->gl_name.ln_type == LM_TYPE_INODE) {
+ unsigned long holdtime, now = jiffies;
- holdtime = gl->gl_tchange + gl->gl_hold_time;
- if (time_before(now, holdtime))
- delay = holdtime - now;
+ holdtime = gl->gl_tchange + gl->gl_hold_time;
+ if (time_before(now, holdtime))
+ delay = holdtime - now;
+ }
if (!delay) {
clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
@@ -1134,20 +1108,18 @@ static void glock_work_func(struct work_struct *work)
if (delay) {
/* Keep one glock reference for the work we requeue. */
drop_refs--;
- if (gl->gl_name.ln_type != LM_TYPE_INODE)
- delay = 0;
gfs2_glock_queue_work(gl, delay);
}
- /*
- * Drop the remaining glock references manually here. (Mind that
- * gfs2_glock_queue_work depends on the lockref spinlock begin held
- * here as well.)
- */
+ /* Drop the remaining glock references manually. */
+ GLOCK_BUG_ON(gl, gl->gl_lockref.count < drop_refs);
gl->gl_lockref.count -= drop_refs;
if (!gl->gl_lockref.count) {
- __gfs2_glock_put(gl);
- return;
+ if (gl->gl_state == LM_ST_UNLOCKED) {
+ __gfs2_glock_put(gl);
+ return;
+ }
+ gfs2_glock_add_to_lru(gl);
}
spin_unlock(&gl->gl_lockref.lock);
}
@@ -1183,6 +1155,8 @@ again:
out:
rcu_read_unlock();
finish_wait(wq, &wait.wait);
+ if (gl)
+ gfs2_glock_remove_from_lru(gl);
return gl;
}
@@ -1209,13 +1183,10 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
.ln_sbd = sdp };
struct gfs2_glock *gl, *tmp;
struct address_space *mapping;
- int ret = 0;
gl = find_insert_glock(&name, NULL);
- if (gl) {
- *glp = gl;
- return 0;
- }
+ if (gl)
+ goto found;
if (!create)
return -ENOENT;
@@ -1243,7 +1214,9 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
atomic_inc(&sdp->sd_glock_disposal);
gl->gl_node.next = NULL;
- gl->gl_flags = glops->go_instantiate ? BIT(GLF_INSTANTIATE_NEEDED) : 0;
+ gl->gl_flags = BIT(GLF_INITIAL);
+ if (glops->go_instantiate)
+ gl->gl_flags |= BIT(GLF_INSTANTIATE_NEEDED);
gl->gl_name = name;
lockdep_set_subclass(&gl->gl_lockref.lock, glops->go_subclass);
gl->gl_lockref.count = 1;
@@ -1275,23 +1248,19 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
}
tmp = find_insert_glock(&name, gl);
- if (!tmp) {
- *glp = gl;
- goto out;
- }
- if (IS_ERR(tmp)) {
- ret = PTR_ERR(tmp);
- goto out_free;
- }
- *glp = tmp;
+ if (tmp) {
+ gfs2_glock_dealloc(&gl->gl_rcu);
+ if (atomic_dec_and_test(&sdp->sd_glock_disposal))
+ wake_up(&sdp->sd_kill_wait);
-out_free:
- gfs2_glock_dealloc(&gl->gl_rcu);
- if (atomic_dec_and_test(&sdp->sd_glock_disposal))
- wake_up(&sdp->sd_kill_wait);
+ if (IS_ERR(tmp))
+ return PTR_ERR(tmp);
+ gl = tmp;
+ }
-out:
- return ret;
+found:
+ *glp = gl;
+ return 0;
}
/**
@@ -1461,7 +1430,7 @@ out:
}
/**
- * handle_callback - process a demote request
+ * request_demote - process a demote request
* @gl: the glock
* @state: the state the caller wants us to change to
* @delay: zero to demote immediately; otherwise pending demote
@@ -1471,8 +1440,8 @@ out:
* practise: LM_ST_SHARED and LM_ST_UNLOCKED
*/
-static void handle_callback(struct gfs2_glock *gl, unsigned int state,
- unsigned long delay, bool remote)
+static void request_demote(struct gfs2_glock *gl, unsigned int state,
+ unsigned long delay, bool remote)
{
if (delay)
set_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
@@ -1636,15 +1605,12 @@ unlock:
return error;
}
- if (test_bit(GLF_LRU, &gl->gl_flags))
- gfs2_glock_remove_from_lru(gl);
-
gh->gh_error = 0;
spin_lock(&gl->gl_lockref.lock);
add_to_queue(gh);
if (unlikely((LM_FLAG_NOEXP & gh->gh_flags) &&
- test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))) {
- set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
+ test_and_clear_bit(GLF_HAVE_FROZEN_REPLY, &gl->gl_flags))) {
+ set_bit(GLF_HAVE_REPLY, &gl->gl_flags);
gl->gl_lockref.count++;
gfs2_glock_queue_work(gl, 0);
}
@@ -1688,7 +1654,7 @@ static void __gfs2_glock_dq(struct gfs2_holder *gh)
* below.
*/
if (gh->gh_flags & GL_NOCACHE)
- handle_callback(gl, LM_ST_UNLOCKED, 0, false);
+ request_demote(gl, LM_ST_UNLOCKED, 0, false);
list_del_init(&gh->gh_list);
clear_bit(HIF_HOLDER, &gh->gh_iflags);
@@ -1703,9 +1669,6 @@ static void __gfs2_glock_dq(struct gfs2_holder *gh)
fast_path = 1;
}
- if (!test_bit(GLF_LFLUSH, &gl->gl_flags) && demote_ok(gl))
- gfs2_glock_add_to_lru(gl);
-
if (unlikely(!fast_path)) {
gl->gl_lockref.count++;
if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
@@ -1932,10 +1895,10 @@ void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
gl->gl_name.ln_type == LM_TYPE_INODE) {
if (time_before(now, holdtime))
delay = holdtime - now;
- if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags))
+ if (test_bit(GLF_HAVE_REPLY, &gl->gl_flags))
delay = gl->gl_hold_time;
}
- handle_callback(gl, state, delay, true);
+ request_demote(gl, state, delay, true);
gfs2_glock_queue_work(gl, delay);
spin_unlock(&gl->gl_lockref.lock);
}
@@ -1988,14 +1951,14 @@ void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags))) {
if (gfs2_should_freeze(gl)) {
- set_bit(GLF_FROZEN, &gl->gl_flags);
+ set_bit(GLF_HAVE_FROZEN_REPLY, &gl->gl_flags);
spin_unlock(&gl->gl_lockref.lock);
return;
}
}
gl->gl_lockref.count++;
- set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
+ set_bit(GLF_HAVE_REPLY, &gl->gl_flags);
gfs2_glock_queue_work(gl, 0);
spin_unlock(&gl->gl_lockref.lock);
}
@@ -2018,10 +1981,12 @@ static int glock_cmp(void *priv, const struct list_head *a,
static bool can_free_glock(struct gfs2_glock *gl)
{
- bool held = gl->gl_state != LM_ST_UNLOCKED;
+ struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
return !test_bit(GLF_LOCK, &gl->gl_flags) &&
- gl->gl_lockref.count == held;
+ !gl->gl_lockref.count &&
+ (!test_bit(GLF_LFLUSH, &gl->gl_flags) ||
+ test_bit(SDF_KILL, &sdp->sd_flags));
}
/**
@@ -2063,8 +2028,8 @@ add_back_to_lru:
clear_bit(GLF_LRU, &gl->gl_flags);
freed++;
gl->gl_lockref.count++;
- if (demote_ok(gl))
- handle_callback(gl, LM_ST_UNLOCKED, 0, false);
+ if (gl->gl_state != LM_ST_UNLOCKED)
+ request_demote(gl, LM_ST_UNLOCKED, 0, false);
gfs2_glock_queue_work(gl, 0);
spin_unlock(&gl->gl_lockref.lock);
cond_resched_lock(&lru_lock);
@@ -2182,13 +2147,14 @@ void gfs2_flush_delete_work(struct gfs2_sbd *sdp)
static void thaw_glock(struct gfs2_glock *gl)
{
- if (!test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))
+ if (!test_and_clear_bit(GLF_HAVE_FROZEN_REPLY, &gl->gl_flags))
return;
if (!lockref_get_not_dead(&gl->gl_lockref))
return;
+ gfs2_glock_remove_from_lru(gl);
spin_lock(&gl->gl_lockref.lock);
- set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
+ set_bit(GLF_HAVE_REPLY, &gl->gl_flags);
gfs2_glock_queue_work(gl, 0);
spin_unlock(&gl->gl_lockref.lock);
}
@@ -2207,7 +2173,7 @@ static void clear_glock(struct gfs2_glock *gl)
if (!__lockref_is_dead(&gl->gl_lockref)) {
gl->gl_lockref.count++;
if (gl->gl_state != LM_ST_UNLOCKED)
- handle_callback(gl, LM_ST_UNLOCKED, 0, false);
+ request_demote(gl, LM_ST_UNLOCKED, 0, false);
gfs2_glock_queue_work(gl, 0);
}
spin_unlock(&gl->gl_lockref.lock);
@@ -2259,16 +2225,30 @@ void gfs2_gl_dq_holders(struct gfs2_sbd *sdp)
void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
{
+ unsigned long start = jiffies;
+ bool timed_out = false;
+
set_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags);
- flush_workqueue(glock_workqueue);
+ flush_workqueue(sdp->sd_glock_wq);
glock_hash_walk(clear_glock, sdp);
- flush_workqueue(glock_workqueue);
- wait_event_timeout(sdp->sd_kill_wait,
- atomic_read(&sdp->sd_glock_disposal) == 0,
- HZ * 600);
+ flush_workqueue(sdp->sd_glock_wq);
+
+ while (!timed_out) {
+ wait_event_timeout(sdp->sd_kill_wait,
+ !atomic_read(&sdp->sd_glock_disposal),
+ HZ * 60);
+ if (!atomic_read(&sdp->sd_glock_disposal))
+ break;
+ timed_out = time_after(jiffies, start + (HZ * 600));
+ fs_warn(sdp, "%u glocks left after %u seconds%s\n",
+ atomic_read(&sdp->sd_glock_disposal),
+ jiffies_to_msecs(jiffies - start) / 1000,
+ timed_out ? ":" : "; still waiting");
+ }
gfs2_lm_unmount(sdp);
gfs2_free_dead_glocks(sdp);
glock_hash_walk(dump_glock_func, sdp);
+ destroy_workqueue(sdp->sd_glock_wq);
}
static const char *state2str(unsigned state)
@@ -2366,11 +2346,11 @@ static const char *gflags2str(char *buf, const struct gfs2_glock *gl)
*p++ = 'f';
if (test_bit(GLF_INVALIDATE_IN_PROGRESS, gflags))
*p++ = 'i';
- if (test_bit(GLF_REPLY_PENDING, gflags))
+ if (test_bit(GLF_HAVE_REPLY, gflags))
*p++ = 'r';
if (test_bit(GLF_INITIAL, gflags))
- *p++ = 'I';
- if (test_bit(GLF_FROZEN, gflags))
+ *p++ = 'a';
+ if (test_bit(GLF_HAVE_FROZEN_REPLY, gflags))
*p++ = 'F';
if (!list_empty(&gl->gl_holders))
*p++ = 'q';
@@ -2380,7 +2360,7 @@ static const char *gflags2str(char *buf, const struct gfs2_glock *gl)
*p++ = 'o';
if (test_bit(GLF_BLOCKING, gflags))
*p++ = 'b';
- if (test_bit(GLF_FREEING, gflags))
+ if (test_bit(GLF_UNLOCKED, gflags))
*p++ = 'x';
if (test_bit(GLF_INSTANTIATE_NEEDED, gflags))
*p++ = 'n';
@@ -2533,16 +2513,8 @@ int __init gfs2_glock_init(void)
if (ret < 0)
return ret;
- glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM |
- WQ_HIGHPRI | WQ_FREEZABLE, 0);
- if (!glock_workqueue) {
- rhashtable_destroy(&gl_hash_table);
- return -ENOMEM;
- }
-
glock_shrinker = shrinker_alloc(0, "gfs2-glock");
if (!glock_shrinker) {
- destroy_workqueue(glock_workqueue);
rhashtable_destroy(&gl_hash_table);
return -ENOMEM;
}
@@ -2562,7 +2534,6 @@ void gfs2_glock_exit(void)
{
shrinker_free(glock_shrinker);
rhashtable_destroy(&gl_hash_table);
- destroy_workqueue(glock_workqueue);
}
static void gfs2_glock_iter_next(struct gfs2_glock_iter *gi, loff_t n)
diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h
index 19aef6d53267..adf0091cc98f 100644
--- a/fs/gfs2/glock.h
+++ b/fs/gfs2/glock.h
@@ -250,7 +250,6 @@ void gfs2_flush_delete_work(struct gfs2_sbd *sdp);
void gfs2_gl_hash_clear(struct gfs2_sbd *sdp);
void gfs2_gl_dq_holders(struct gfs2_sbd *sdp);
void gfs2_glock_thaw(struct gfs2_sbd *sdp);
-void gfs2_glock_add_to_lru(struct gfs2_glock *gl);
void gfs2_glock_free(struct gfs2_glock *gl);
void gfs2_glock_free_later(struct gfs2_glock *gl);
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index 68677fb69a73..95d8081681dc 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -385,23 +385,6 @@ static void inode_go_inval(struct gfs2_glock *gl, int flags)
gfs2_clear_glop_pending(ip);
}
-/**
- * inode_go_demote_ok - Check to see if it's ok to unlock an inode glock
- * @gl: the glock
- *
- * Returns: 1 if it's ok
- */
-
-static int inode_go_demote_ok(const struct gfs2_glock *gl)
-{
- struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
-
- if (sdp->sd_jindex == gl->gl_object || sdp->sd_rindex == gl->gl_object)
- return 0;
-
- return 1;
-}
-
static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
@@ -648,21 +631,21 @@ static void iopen_go_callback(struct gfs2_glock *gl, bool remote)
}
/**
- * inode_go_free - wake up anyone waiting for dlm's unlock ast to free it
- * @gl: glock being freed
+ * inode_go_unlocked - wake up anyone waiting for dlm's unlock ast
+ * @gl: glock being unlocked
*
* For now, this is only used for the journal inode glock. In withdraw
- * situations, we need to wait for the glock to be freed so that we know
+ * situations, we need to wait for the glock to be unlocked so that we know
* other nodes may proceed with recovery / journal replay.
*/
-static void inode_go_free(struct gfs2_glock *gl)
+static void inode_go_unlocked(struct gfs2_glock *gl)
{
/* Note that we cannot reference gl_object because it's already set
* to NULL by this point in its lifecycle. */
- if (!test_bit(GLF_FREEING, &gl->gl_flags))
+ if (!test_bit(GLF_UNLOCKED, &gl->gl_flags))
return;
- clear_bit_unlock(GLF_FREEING, &gl->gl_flags);
- wake_up_bit(&gl->gl_flags, GLF_FREEING);
+ clear_bit_unlock(GLF_UNLOCKED, &gl->gl_flags);
+ wake_up_bit(&gl->gl_flags, GLF_UNLOCKED);
}
/**
@@ -722,13 +705,12 @@ const struct gfs2_glock_operations gfs2_meta_glops = {
const struct gfs2_glock_operations gfs2_inode_glops = {
.go_sync = inode_go_sync,
.go_inval = inode_go_inval,
- .go_demote_ok = inode_go_demote_ok,
.go_instantiate = inode_go_instantiate,
.go_held = inode_go_held,
.go_dump = inode_go_dump,
.go_type = LM_TYPE_INODE,
- .go_flags = GLOF_ASPACE | GLOF_LRU | GLOF_LVB,
- .go_free = inode_go_free,
+ .go_flags = GLOF_ASPACE | GLOF_LVB,
+ .go_unlocked = inode_go_unlocked,
};
const struct gfs2_glock_operations gfs2_rgrp_glops = {
@@ -751,13 +733,13 @@ const struct gfs2_glock_operations gfs2_iopen_glops = {
.go_type = LM_TYPE_IOPEN,
.go_callback = iopen_go_callback,
.go_dump = inode_go_dump,
- .go_flags = GLOF_LRU | GLOF_NONDISK,
+ .go_flags = GLOF_NONDISK,
.go_subclass = 1,
};
const struct gfs2_glock_operations gfs2_flock_glops = {
.go_type = LM_TYPE_FLOCK,
- .go_flags = GLOF_LRU | GLOF_NONDISK,
+ .go_flags = GLOF_NONDISK,
};
const struct gfs2_glock_operations gfs2_nondisk_glops = {
@@ -768,7 +750,7 @@ const struct gfs2_glock_operations gfs2_nondisk_glops = {
const struct gfs2_glock_operations gfs2_quota_glops = {
.go_type = LM_TYPE_QUOTA,
- .go_flags = GLOF_LVB | GLOF_LRU | GLOF_NONDISK,
+ .go_flags = GLOF_LVB | GLOF_NONDISK,
};
const struct gfs2_glock_operations gfs2_journal_glops = {
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index 60abd7050c99..aa4ef67a34e0 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -218,19 +218,17 @@ struct gfs2_glock_operations {
int (*go_sync) (struct gfs2_glock *gl);
int (*go_xmote_bh)(struct gfs2_glock *gl);
void (*go_inval) (struct gfs2_glock *gl, int flags);
- int (*go_demote_ok) (const struct gfs2_glock *gl);
int (*go_instantiate) (struct gfs2_glock *gl);
int (*go_held)(struct gfs2_holder *gh);
void (*go_dump)(struct seq_file *seq, const struct gfs2_glock *gl,
const char *fs_id_buf);
void (*go_callback)(struct gfs2_glock *gl, bool remote);
- void (*go_free)(struct gfs2_glock *gl);
+ void (*go_unlocked)(struct gfs2_glock *gl);
const int go_subclass;
const int go_type;
const unsigned long go_flags;
#define GLOF_ASPACE 1 /* address space attached */
#define GLOF_LVB 2 /* Lock Value Block attached */
-#define GLOF_LRU 4 /* LRU managed */
#define GLOF_NONDISK 8 /* not I/O related */
};
@@ -322,14 +320,14 @@ enum {
GLF_DIRTY = 6,
GLF_LFLUSH = 7,
GLF_INVALIDATE_IN_PROGRESS = 8,
- GLF_REPLY_PENDING = 9,
+ GLF_HAVE_REPLY = 9,
GLF_INITIAL = 10,
- GLF_FROZEN = 11,
+ GLF_HAVE_FROZEN_REPLY = 11,
GLF_INSTANTIATE_IN_PROG = 12, /* instantiate happening now */
GLF_LRU = 13,
GLF_OBJECT = 14, /* Used only for tracing */
GLF_BLOCKING = 15,
- GLF_FREEING = 16, /* Wait for glock to be freed */
+ GLF_UNLOCKED = 16, /* Wait for glock to be unlocked */
GLF_TRY_TO_EVICT = 17, /* iopen glocks only */
GLF_VERIFY_EVICT = 18, /* iopen glocks only */
};
@@ -772,6 +770,7 @@ struct gfs2_sbd {
/* Workqueue stuff */
+ struct workqueue_struct *sd_glock_wq;
struct workqueue_struct *sd_delete_wq;
/* Daemon stuff */
@@ -783,7 +782,6 @@ struct gfs2_sbd {
struct list_head sd_quota_list;
atomic_t sd_quota_count;
- struct mutex sd_quota_mutex;
struct mutex sd_quota_sync_mutex;
wait_queue_head_t sd_quota_wait;
diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c
index 49059274a528..fa5134df985f 100644
--- a/fs/gfs2/lock_dlm.c
+++ b/fs/gfs2/lock_dlm.c
@@ -134,8 +134,8 @@ static void gdlm_ast(void *arg)
switch (gl->gl_lksb.sb_status) {
case -DLM_EUNLOCK: /* Unlocked, so glock can be freed */
- if (gl->gl_ops->go_free)
- gl->gl_ops->go_free(gl);
+ if (gl->gl_ops->go_unlocked)
+ gl->gl_ops->go_unlocked(gl);
gfs2_glock_free(gl);
return;
case -DLM_ECANCEL: /* Cancel while getting lock */
@@ -163,11 +163,21 @@ static void gdlm_ast(void *arg)
BUG();
}
- set_bit(GLF_INITIAL, &gl->gl_flags);
+ /*
+ * The GLF_INITIAL flag is initially set for new glocks. Upon the
+ * first successful new (non-conversion) request, we clear this flag to
+ * indicate that a DLM lock exists and that gl->gl_lksb.sb_lkid is the
+ * identifier to use for identifying it.
+ *
+ * Any failed initial requests do not create a DLM lock, so we ignore
+ * the gl->gl_lksb.sb_lkid values that come with such requests.
+ */
+
+ clear_bit(GLF_INITIAL, &gl->gl_flags);
gfs2_glock_complete(gl, ret);
return;
out:
- if (!test_bit(GLF_INITIAL, &gl->gl_flags))
+ if (test_bit(GLF_INITIAL, &gl->gl_flags))
gl->gl_lksb.sb_lkid = 0;
gfs2_glock_complete(gl, ret);
}
@@ -239,7 +249,7 @@ static u32 make_flags(struct gfs2_glock *gl, const unsigned int gfs_flags,
BUG();
}
- if (gl->gl_lksb.sb_lkid != 0) {
+ if (!test_bit(GLF_INITIAL, &gl->gl_flags)) {
lkf |= DLM_LKF_CONVERT;
if (test_bit(GLF_BLOCKING, &gl->gl_flags))
lkf |= DLM_LKF_QUECVT;
@@ -270,14 +280,14 @@ static int gdlm_lock(struct gfs2_glock *gl, unsigned int req_state,
lkf = make_flags(gl, flags, req);
gfs2_glstats_inc(gl, GFS2_LKS_DCOUNT);
gfs2_sbstats_inc(gl, GFS2_LKS_DCOUNT);
- if (gl->gl_lksb.sb_lkid) {
- gfs2_update_request_times(gl);
- } else {
+ if (test_bit(GLF_INITIAL, &gl->gl_flags)) {
memset(strname, ' ', GDLM_STRNAME_BYTES - 1);
strname[GDLM_STRNAME_BYTES - 1] = '\0';
gfs2_reverse_hex(strname + 7, gl->gl_name.ln_type);
gfs2_reverse_hex(strname + 23, gl->gl_name.ln_number);
gl->gl_dstamp = ktime_get_real();
+ } else {
+ gfs2_update_request_times(gl);
}
/*
* Submit the actual lock request.
@@ -301,7 +311,7 @@ static void gdlm_put_lock(struct gfs2_glock *gl)
BUG_ON(!__lockref_is_dead(&gl->gl_lockref));
- if (gl->gl_lksb.sb_lkid == 0) {
+ if (test_bit(GLF_INITIAL, &gl->gl_flags)) {
gfs2_glock_free(gl);
return;
}
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index 05975ec76d35..ff1f3e3dc65c 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -103,7 +103,6 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb)
init_completion(&sdp->sd_journal_ready);
INIT_LIST_HEAD(&sdp->sd_quota_list);
- mutex_init(&sdp->sd_quota_mutex);
mutex_init(&sdp->sd_quota_sync_mutex);
init_waitqueue_head(&sdp->sd_quota_wait);
spin_lock_init(&sdp->sd_bitmap_lock);
@@ -1188,11 +1187,17 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc)
snprintf(sdp->sd_fsname, sizeof(sdp->sd_fsname), "%s", sdp->sd_table_name);
+ error = -ENOMEM;
+ sdp->sd_glock_wq = alloc_workqueue("gfs2-glock/%s",
+ WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_FREEZABLE, 0,
+ sdp->sd_fsname);
+ if (!sdp->sd_glock_wq)
+ goto fail_free;
+
sdp->sd_delete_wq = alloc_workqueue("gfs2-delete/%s",
WQ_MEM_RECLAIM | WQ_FREEZABLE, 0, sdp->sd_fsname);
- error = -ENOMEM;
if (!sdp->sd_delete_wq)
- goto fail_free;
+ goto fail_glock_wq;
error = gfs2_sys_fs_add(sdp);
if (error)
@@ -1301,6 +1306,8 @@ fail_debug:
gfs2_sys_fs_del(sdp);
fail_delete_wq:
destroy_workqueue(sdp->sd_delete_wq);
+fail_glock_wq:
+ destroy_workqueue(sdp->sd_glock_wq);
fail_free:
free_sbd(sdp);
sb->s_fs_info = NULL;
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index aa9cf0102848..2e6bc77f4f81 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -75,9 +75,6 @@
#define GFS2_QD_HASH_SIZE BIT(GFS2_QD_HASH_SHIFT)
#define GFS2_QD_HASH_MASK (GFS2_QD_HASH_SIZE - 1)
-#define QC_CHANGE 0
-#define QC_SYNC 1
-
/* Lock order: qd_lock -> bucket lock -> qd->lockref.lock -> lru lock */
/* -> sd_bitmap_lock */
static DEFINE_SPINLOCK(qd_lock);
@@ -319,11 +316,11 @@ static int qd_get(struct gfs2_sbd *sdp, struct kqid qid,
}
-static void qd_hold(struct gfs2_quota_data *qd)
+static void __qd_hold(struct gfs2_quota_data *qd)
{
struct gfs2_sbd *sdp = qd->qd_sbd;
- gfs2_assert(sdp, !__lockref_is_dead(&qd->qd_lockref));
- lockref_get(&qd->qd_lockref);
+ gfs2_assert(sdp, qd->qd_lockref.count > 0);
+ qd->qd_lockref.count++;
}
static void qd_put(struct gfs2_quota_data *qd)
@@ -400,16 +397,17 @@ static int bh_get(struct gfs2_quota_data *qd)
struct inode *inode = sdp->sd_qc_inode;
struct gfs2_inode *ip = GFS2_I(inode);
unsigned int block, offset;
- struct buffer_head *bh;
+ struct buffer_head *bh = NULL;
struct iomap iomap = { };
int error;
- mutex_lock(&sdp->sd_quota_mutex);
-
- if (qd->qd_bh_count++) {
- mutex_unlock(&sdp->sd_quota_mutex);
+ spin_lock(&qd->qd_lockref.lock);
+ if (qd->qd_bh_count) {
+ qd->qd_bh_count++;
+ spin_unlock(&qd->qd_lockref.lock);
return 0;
}
+ spin_unlock(&qd->qd_lockref.lock);
block = qd->qd_slot / sdp->sd_qc_per_block;
offset = qd->qd_slot % sdp->sd_qc_per_block;
@@ -418,122 +416,83 @@ static int bh_get(struct gfs2_quota_data *qd)
(loff_t)block << inode->i_blkbits,
i_blocksize(inode), &iomap);
if (error)
- goto fail;
+ return error;
error = -ENOENT;
if (iomap.type != IOMAP_MAPPED)
- goto fail;
+ return error;
error = gfs2_meta_read(ip->i_gl, iomap.addr >> inode->i_blkbits,
DIO_WAIT, 0, &bh);
if (error)
- goto fail;
+ return error;
error = -EIO;
if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC))
- goto fail_brelse;
-
- qd->qd_bh = bh;
- qd->qd_bh_qc = (struct gfs2_quota_change *)
- (bh->b_data + sizeof(struct gfs2_meta_header) +
- offset * sizeof(struct gfs2_quota_change));
-
- mutex_unlock(&sdp->sd_quota_mutex);
+ goto out;
- return 0;
+ spin_lock(&qd->qd_lockref.lock);
+ if (qd->qd_bh == NULL) {
+ qd->qd_bh = bh;
+ qd->qd_bh_qc = (struct gfs2_quota_change *)
+ (bh->b_data + sizeof(struct gfs2_meta_header) +
+ offset * sizeof(struct gfs2_quota_change));
+ bh = NULL;
+ }
+ qd->qd_bh_count++;
+ spin_unlock(&qd->qd_lockref.lock);
+ error = 0;
-fail_brelse:
+out:
brelse(bh);
-fail:
- qd->qd_bh_count--;
- mutex_unlock(&sdp->sd_quota_mutex);
return error;
}
static void bh_put(struct gfs2_quota_data *qd)
{
struct gfs2_sbd *sdp = qd->qd_sbd;
+ struct buffer_head *bh = NULL;
- mutex_lock(&sdp->sd_quota_mutex);
+ spin_lock(&qd->qd_lockref.lock);
gfs2_assert(sdp, qd->qd_bh_count);
if (!--qd->qd_bh_count) {
- brelse(qd->qd_bh);
+ bh = qd->qd_bh;
qd->qd_bh = NULL;
qd->qd_bh_qc = NULL;
}
- mutex_unlock(&sdp->sd_quota_mutex);
+ spin_unlock(&qd->qd_lockref.lock);
+ brelse(bh);
}
-static int qd_check_sync(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd,
- u64 *sync_gen)
+static bool qd_grab_sync(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd,
+ u64 sync_gen)
{
+ bool ret = false;
+
+ spin_lock(&qd->qd_lockref.lock);
if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
!test_bit(QDF_CHANGE, &qd->qd_flags) ||
- (sync_gen && (qd->qd_sync_gen >= *sync_gen)))
- return 0;
-
- /*
- * If qd_change is 0 it means a pending quota change was negated.
- * We should not sync it, but we still have a qd reference and slot
- * reference taken by gfs2_quota_change -> do_qc that need to be put.
- */
- if (!qd->qd_change && test_and_clear_bit(QDF_CHANGE, &qd->qd_flags)) {
- slot_put(qd);
- qd_put(qd);
- return 0;
- }
+ qd->qd_sync_gen >= sync_gen)
+ goto out;
- if (!lockref_get_not_dead(&qd->qd_lockref))
- return 0;
+ if (__lockref_is_dead(&qd->qd_lockref))
+ goto out;
+ qd->qd_lockref.count++;
list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
set_bit(QDF_LOCKED, &qd->qd_flags);
qd->qd_change_sync = qd->qd_change;
slot_hold(qd);
- return 1;
+ ret = true;
+
+out:
+ spin_unlock(&qd->qd_lockref.lock);
+ return ret;
}
-static int qd_bh_get_or_undo(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd)
+static void qd_ungrab_sync(struct gfs2_quota_data *qd)
{
- int error;
-
- error = bh_get(qd);
- if (!error)
- return 0;
-
clear_bit(QDF_LOCKED, &qd->qd_flags);
slot_put(qd);
qd_put(qd);
- return error;
-}
-
-static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
-{
- struct gfs2_quota_data *qd = NULL, *iter;
- int error;
-
- *qdp = NULL;
-
- if (sb_rdonly(sdp->sd_vfs))
- return 0;
-
- spin_lock(&qd_lock);
-
- list_for_each_entry(iter, &sdp->sd_quota_list, qd_list) {
- if (qd_check_sync(sdp, iter, &sdp->sd_quota_sync_gen)) {
- qd = iter;
- break;
- }
- }
-
- spin_unlock(&qd_lock);
-
- if (qd) {
- error = qd_bh_get_or_undo(sdp, qd);
- if (error)
- return error;
- *qdp = qd;
- }
-
- return 0;
}
static void qdsb_put(struct gfs2_quota_data *qd)
@@ -545,8 +504,10 @@ static void qdsb_put(struct gfs2_quota_data *qd)
static void qd_unlock(struct gfs2_quota_data *qd)
{
+ spin_lock(&qd->qd_lockref.lock);
gfs2_assert_warn(qd->qd_sbd, test_bit(QDF_LOCKED, &qd->qd_flags));
clear_bit(QDF_LOCKED, &qd->qd_flags);
+ spin_unlock(&qd->qd_lockref.lock);
qdsb_put(qd);
}
@@ -710,48 +671,57 @@ static int sort_qd(const void *a, const void *b)
return 0;
}
-static void do_qc(struct gfs2_quota_data *qd, s64 change, int qc_type)
+static void do_qc(struct gfs2_quota_data *qd, s64 change)
{
struct gfs2_sbd *sdp = qd->qd_sbd;
struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
struct gfs2_quota_change *qc = qd->qd_bh_qc;
+ bool needs_put = false;
s64 x;
- mutex_lock(&sdp->sd_quota_mutex);
gfs2_trans_add_meta(ip->i_gl, qd->qd_bh);
- if (!test_bit(QDF_CHANGE, &qd->qd_flags)) {
- qc->qc_change = 0;
+ /*
+ * The QDF_CHANGE flag indicates that the slot in the quota change file
+ * is used. Here, we use the value of qc->qc_change when the slot is
+ * used, and we assume a value of 0 otherwise.
+ */
+
+ spin_lock(&qd->qd_lockref.lock);
+
+ x = 0;
+ if (test_bit(QDF_CHANGE, &qd->qd_flags))
+ x = be64_to_cpu(qc->qc_change);
+ x += change;
+ qd->qd_change += change;
+
+ if (!x && test_bit(QDF_CHANGE, &qd->qd_flags)) {
+ /* The slot in the quota change file becomes unused. */
+ clear_bit(QDF_CHANGE, &qd->qd_flags);
+ qc->qc_flags = 0;
+ qc->qc_id = 0;
+ needs_put = true;
+ } else if (x && !test_bit(QDF_CHANGE, &qd->qd_flags)) {
+ /* The slot in the quota change file becomes used. */
+ set_bit(QDF_CHANGE, &qd->qd_flags);
+ __qd_hold(qd);
+ slot_hold(qd);
+
qc->qc_flags = 0;
if (qd->qd_id.type == USRQUOTA)
qc->qc_flags = cpu_to_be32(GFS2_QCF_USER);
qc->qc_id = cpu_to_be32(from_kqid(&init_user_ns, qd->qd_id));
}
-
- x = be64_to_cpu(qc->qc_change) + change;
qc->qc_change = cpu_to_be64(x);
- spin_lock(&qd_lock);
- qd->qd_change = x;
- spin_unlock(&qd_lock);
+ spin_unlock(&qd->qd_lockref.lock);
- if (qc_type == QC_CHANGE) {
- if (!test_and_set_bit(QDF_CHANGE, &qd->qd_flags)) {
- qd_hold(qd);
- slot_hold(qd);
- }
- } else {
- gfs2_assert_warn(sdp, test_bit(QDF_CHANGE, &qd->qd_flags));
- clear_bit(QDF_CHANGE, &qd->qd_flags);
- qc->qc_flags = 0;
- qc->qc_id = 0;
+ if (needs_put) {
slot_put(qd);
qd_put(qd);
}
-
if (change < 0) /* Reset quiet flag if we freed some blocks */
clear_bit(QDF_QMSG_QUIET, &qd->qd_flags);
- mutex_unlock(&sdp->sd_quota_mutex);
}
static int gfs2_write_buf_to_page(struct gfs2_sbd *sdp, unsigned long index,
@@ -890,6 +860,7 @@ static int gfs2_adjust_quota(struct gfs2_sbd *sdp, loff_t loc,
be64_add_cpu(&q.qu_value, change);
if (((s64)be64_to_cpu(q.qu_value)) < 0)
q.qu_value = 0; /* Never go negative on quota usage */
+ spin_lock(&qd->qd_lockref.lock);
qd->qd_qb.qb_value = q.qu_value;
if (fdq) {
if (fdq->d_fieldmask & QC_SPC_SOFT) {
@@ -905,6 +876,7 @@ static int gfs2_adjust_quota(struct gfs2_sbd *sdp, loff_t loc,
qd->qd_qb.qb_value = q.qu_value;
}
}
+ spin_unlock(&qd->qd_lockref.lock);
err = gfs2_write_disk_quota(sdp, &q, loc);
if (!err) {
@@ -919,7 +891,8 @@ static int gfs2_adjust_quota(struct gfs2_sbd *sdp, loff_t loc,
return err;
}
-static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
+static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda,
+ u64 sync_gen)
{
struct gfs2_sbd *sdp = (*qda)->qd_sbd;
struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
@@ -992,7 +965,7 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
if (error)
goto out_end_trans;
- do_qc(qd, -qd->qd_change_sync, QC_SYNC);
+ do_qc(qd, -qd->qd_change_sync);
set_bit(QDF_REFRESH, &qd->qd_flags);
}
@@ -1010,8 +983,13 @@ out_dq:
gfs2_log_flush(ip->i_gl->gl_name.ln_sbd, ip->i_gl,
GFS2_LOG_HEAD_FLUSH_NORMAL | GFS2_LFC_DO_SYNC);
if (!error) {
- for (x = 0; x < num_qd; x++)
- qda[x]->qd_sync_gen = sdp->sd_quota_sync_gen;
+ for (x = 0; x < num_qd; x++) {
+ qd = qda[x];
+ spin_lock(&qd->qd_lockref.lock);
+ if (qd->qd_sync_gen < sync_gen)
+ qd->qd_sync_gen = sync_gen;
+ spin_unlock(&qd->qd_lockref.lock);
+ }
}
return error;
}
@@ -1036,7 +1014,9 @@ static int update_qd(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd)
qlvb->qb_limit = q.qu_limit;
qlvb->qb_warn = q.qu_warn;
qlvb->qb_value = q.qu_value;
+ spin_lock(&qd->qd_lockref.lock);
qd->qd_qb = *qlvb;
+ spin_unlock(&qd->qd_lockref.lock);
return 0;
}
@@ -1058,7 +1038,9 @@ restart:
if (test_and_clear_bit(QDF_REFRESH, &qd->qd_flags))
force_refresh = FORCE;
+ spin_lock(&qd->qd_lockref.lock);
qd->qd_qb = *(struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
+ spin_unlock(&qd->qd_lockref.lock);
if (force_refresh || qd->qd_qb.qb_magic != cpu_to_be32(GFS2_MAGIC)) {
gfs2_glock_dq_uninit(q_gh);
@@ -1129,35 +1111,36 @@ static bool need_sync(struct gfs2_quota_data *qd)
{
struct gfs2_sbd *sdp = qd->qd_sbd;
struct gfs2_tune *gt = &sdp->sd_tune;
- s64 value;
+ s64 value, change, limit;
unsigned int num, den;
+ int ret = false;
+ spin_lock(&qd->qd_lockref.lock);
if (!qd->qd_qb.qb_limit)
- return false;
+ goto out;
- spin_lock(&qd_lock);
- value = qd->qd_change;
- spin_unlock(&qd_lock);
+ change = qd->qd_change;
+ if (change <= 0)
+ goto out;
+ value = (s64)be64_to_cpu(qd->qd_qb.qb_value);
+ limit = (s64)be64_to_cpu(qd->qd_qb.qb_limit);
+ if (value >= limit)
+ goto out;
spin_lock(&gt->gt_spin);
num = gt->gt_quota_scale_num;
den = gt->gt_quota_scale_den;
spin_unlock(&gt->gt_spin);
- if (value <= 0)
- return false;
- else if ((s64)be64_to_cpu(qd->qd_qb.qb_value) >=
- (s64)be64_to_cpu(qd->qd_qb.qb_limit))
- return false;
- else {
- value *= gfs2_jindex_size(sdp) * num;
- value = div_s64(value, den);
- value += (s64)be64_to_cpu(qd->qd_qb.qb_value);
- if (value < (s64)be64_to_cpu(qd->qd_qb.qb_limit))
- return false;
- }
+ change *= gfs2_jindex_size(sdp) * num;
+ change = div_s64(change, den);
+ if (value + change < limit)
+ goto out;
- return true;
+ ret = true;
+out:
+ spin_unlock(&qd->qd_lockref.lock);
+ return ret;
}
void gfs2_quota_unlock(struct gfs2_inode *ip)
@@ -1166,7 +1149,6 @@ void gfs2_quota_unlock(struct gfs2_inode *ip)
struct gfs2_quota_data *qda[2 * GFS2_MAXQUOTAS];
unsigned int count = 0;
u32 x;
- int found;
if (!test_and_clear_bit(GIF_QD_LOCKED, &ip->i_flags))
return;
@@ -1174,6 +1156,7 @@ void gfs2_quota_unlock(struct gfs2_inode *ip)
for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
struct gfs2_quota_data *qd;
bool sync;
+ int error;
qd = ip->i_qadata->qa_qd[x];
sync = need_sync(qd);
@@ -1183,18 +1166,26 @@ void gfs2_quota_unlock(struct gfs2_inode *ip)
continue;
spin_lock(&qd_lock);
- found = qd_check_sync(sdp, qd, NULL);
+ sync = qd_grab_sync(sdp, qd, U64_MAX);
spin_unlock(&qd_lock);
- if (!found)
+ if (!sync)
continue;
- if (!qd_bh_get_or_undo(sdp, qd))
- qda[count++] = qd;
+ gfs2_assert_warn(sdp, qd->qd_change_sync);
+ error = bh_get(qd);
+ if (error) {
+ qd_ungrab_sync(qd);
+ continue;
+ }
+
+ qda[count++] = qd;
}
if (count) {
- do_sync(count, qda);
+ u64 sync_gen = READ_ONCE(sdp->sd_quota_sync_gen);
+
+ do_sync(count, qda, sync_gen);
for (x = 0; x < count; x++)
qd_unlock(qda[x]);
}
@@ -1253,12 +1244,12 @@ int gfs2_quota_check(struct gfs2_inode *ip, kuid_t uid, kgid_t gid,
qid_eq(qd->qd_id, make_kqid_gid(gid))))
continue;
+ spin_lock(&qd->qd_lockref.lock);
warn = (s64)be64_to_cpu(qd->qd_qb.qb_warn);
limit = (s64)be64_to_cpu(qd->qd_qb.qb_limit);
value = (s64)be64_to_cpu(qd->qd_qb.qb_value);
- spin_lock(&qd_lock);
value += qd->qd_change;
- spin_unlock(&qd_lock);
+ spin_unlock(&qd->qd_lockref.lock);
if (limit > 0 && (limit - value) < ap->allowed)
ap->allowed = limit - value;
@@ -1312,39 +1303,20 @@ void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
if (qid_eq(qd->qd_id, make_kqid_uid(uid)) ||
qid_eq(qd->qd_id, make_kqid_gid(gid))) {
- do_qc(qd, change, QC_CHANGE);
+ do_qc(qd, change);
}
}
}
-static bool qd_changed(struct gfs2_sbd *sdp)
-{
- struct gfs2_quota_data *qd;
- bool changed = false;
-
- spin_lock(&qd_lock);
- list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
- if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
- !test_bit(QDF_CHANGE, &qd->qd_flags))
- continue;
-
- changed = true;
- break;
- }
- spin_unlock(&qd_lock);
- return changed;
-}
-
int gfs2_quota_sync(struct super_block *sb, int type)
{
struct gfs2_sbd *sdp = sb->s_fs_info;
struct gfs2_quota_data **qda;
unsigned int max_qd = PAGE_SIZE / sizeof(struct gfs2_holder);
- unsigned int num_qd;
- unsigned int x;
+ u64 sync_gen;
int error = 0;
- if (!qd_changed(sdp))
+ if (sb_rdonly(sdp->sd_vfs))
return 0;
qda = kcalloc(max_qd, sizeof(struct gfs2_quota_data *), GFP_KERNEL);
@@ -1352,27 +1324,44 @@ int gfs2_quota_sync(struct super_block *sb, int type)
return -ENOMEM;
mutex_lock(&sdp->sd_quota_sync_mutex);
- sdp->sd_quota_sync_gen++;
+ sync_gen = sdp->sd_quota_sync_gen + 1;
do {
- num_qd = 0;
+ struct gfs2_quota_data *iter;
+ unsigned int num_qd = 0;
+ unsigned int x;
- for (;;) {
- error = qd_fish(sdp, qda + num_qd);
- if (error || !qda[num_qd])
- break;
- if (++num_qd == max_qd)
- break;
+ spin_lock(&qd_lock);
+ list_for_each_entry(iter, &sdp->sd_quota_list, qd_list) {
+ if (qd_grab_sync(sdp, iter, sync_gen)) {
+ qda[num_qd++] = iter;
+ if (num_qd == max_qd)
+ break;
+ }
}
+ spin_unlock(&qd_lock);
- if (num_qd) {
+ if (!num_qd)
+ break;
+
+ for (x = 0; x < num_qd; x++) {
+ error = bh_get(qda[x]);
if (!error)
- error = do_sync(num_qd, qda);
+ continue;
+
+ while (x < num_qd)
+ qd_ungrab_sync(qda[--num_qd]);
+ break;
+ }
- for (x = 0; x < num_qd; x++)
- qd_unlock(qda[x]);
+ if (!error) {
+ WRITE_ONCE(sdp->sd_quota_sync_gen, sync_gen);
+ error = do_sync(num_qd, qda, sync_gen);
}
- } while (!error && num_qd == max_qd);
+
+ for (x = 0; x < num_qd; x++)
+ qd_unlock(qda[x]);
+ } while (!error);
mutex_unlock(&sdp->sd_quota_sync_mutex);
kfree(qda);
@@ -1407,6 +1396,7 @@ int gfs2_quota_init(struct gfs2_sbd *sdp)
unsigned int found = 0;
unsigned int hash;
unsigned int bm_size;
+ struct buffer_head *bh;
u64 dblock;
u32 extlen = 0;
int error;
@@ -1426,8 +1416,7 @@ int gfs2_quota_init(struct gfs2_sbd *sdp)
return error;
for (x = 0; x < blocks; x++) {
- struct buffer_head *bh;
- const struct gfs2_quota_change *qc;
+ struct gfs2_quota_change *qc;
unsigned int y;
if (!extlen) {
@@ -1440,15 +1429,13 @@ int gfs2_quota_init(struct gfs2_sbd *sdp)
bh = gfs2_meta_ra(ip->i_gl, dblock, extlen);
if (!bh)
goto fail;
- if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC)) {
- brelse(bh);
- goto fail;
- }
+ if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC))
+ goto fail_brelse;
- qc = (const struct gfs2_quota_change *)(bh->b_data + sizeof(struct gfs2_meta_header));
+ qc = (struct gfs2_quota_change *)(bh->b_data + sizeof(struct gfs2_meta_header));
for (y = 0; y < sdp->sd_qc_per_block && slot < sdp->sd_quota_slots;
y++, slot++) {
- struct gfs2_quota_data *qd;
+ struct gfs2_quota_data *old_qd, *qd;
s64 qc_change = be64_to_cpu(qc->qc_change);
u32 qc_flags = be32_to_cpu(qc->qc_flags);
enum quota_type qtype = (qc_flags & GFS2_QCF_USER) ?
@@ -1461,10 +1448,8 @@ int gfs2_quota_init(struct gfs2_sbd *sdp)
hash = gfs2_qd_hash(sdp, qc_id);
qd = qd_alloc(hash, sdp, qc_id);
- if (qd == NULL) {
- brelse(bh);
- goto fail;
- }
+ if (qd == NULL)
+ goto fail_brelse;
set_bit(QDF_CHANGE, &qd->qd_flags);
qd->qd_change = qc_change;
@@ -1472,18 +1457,41 @@ int gfs2_quota_init(struct gfs2_sbd *sdp)
qd->qd_slot_ref = 1;
spin_lock(&qd_lock);
+ spin_lock_bucket(hash);
+ old_qd = gfs2_qd_search_bucket(hash, sdp, qc_id);
+ if (old_qd) {
+ fs_err(sdp, "Corruption found in quota_change%u"
+ "file: duplicate identifier in "
+ "slot %u\n",
+ sdp->sd_jdesc->jd_jid, slot);
+
+ spin_unlock_bucket(hash);
+ spin_unlock(&qd_lock);
+ qd_put(old_qd);
+
+ gfs2_glock_put(qd->qd_gl);
+ kmem_cache_free(gfs2_quotad_cachep, qd);
+
+ /* zero out the duplicate slot */
+ lock_buffer(bh);
+ memset(qc, 0, sizeof(*qc));
+ mark_buffer_dirty(bh);
+ unlock_buffer(bh);
+
+ continue;
+ }
BUG_ON(test_and_set_bit(slot, sdp->sd_quota_bitmap));
list_add(&qd->qd_list, &sdp->sd_quota_list);
atomic_inc(&sdp->sd_quota_count);
- spin_unlock(&qd_lock);
-
- spin_lock_bucket(hash);
hlist_bl_add_head_rcu(&qd->qd_hlist, &qd_hash_table[hash]);
spin_unlock_bucket(hash);
+ spin_unlock(&qd_lock);
found++;
}
+ if (buffer_dirty(bh))
+ sync_dirty_buffer(bh);
brelse(bh);
dblock++;
extlen--;
@@ -1494,6 +1502,10 @@ int gfs2_quota_init(struct gfs2_sbd *sdp)
return 0;
+fail_brelse:
+ if (buffer_dirty(bh))
+ sync_dirty_buffer(bh);
+ brelse(bh);
fail:
gfs2_quota_cleanup(sdp);
return error;
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index 7a5aedfcd52a..6678060ed4d2 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -1524,7 +1524,6 @@ out:
if (ip->i_gl) {
glock_clear_object(ip->i_gl, ip);
wait_on_bit_io(&ip->i_flags, GIF_GLOP_PENDING, TASK_UNINTERRUPTIBLE);
- gfs2_glock_add_to_lru(ip->i_gl);
gfs2_glock_put_eventually(ip->i_gl);
rcu_assign_pointer(ip->i_gl, NULL);
}
diff --git a/fs/gfs2/trace_gfs2.h b/fs/gfs2/trace_gfs2.h
index a5deb9f86831..8eae8d62a413 100644
--- a/fs/gfs2/trace_gfs2.h
+++ b/fs/gfs2/trace_gfs2.h
@@ -53,9 +53,9 @@
{(1UL << GLF_DIRTY), "y" }, \
{(1UL << GLF_LFLUSH), "f" }, \
{(1UL << GLF_INVALIDATE_IN_PROGRESS), "i" }, \
- {(1UL << GLF_REPLY_PENDING), "r" }, \
- {(1UL << GLF_INITIAL), "I" }, \
- {(1UL << GLF_FROZEN), "F" }, \
+ {(1UL << GLF_HAVE_REPLY), "r" }, \
+ {(1UL << GLF_INITIAL), "a" }, \
+ {(1UL << GLF_HAVE_FROZEN_REPLY), "F" }, \
{(1UL << GLF_LRU), "L" }, \
{(1UL << GLF_OBJECT), "o" }, \
{(1UL << GLF_BLOCKING), "b" })
diff --git a/fs/gfs2/util.c b/fs/gfs2/util.c
index af4758d8d894..13be8d1d228b 100644
--- a/fs/gfs2/util.c
+++ b/fs/gfs2/util.c
@@ -99,12 +99,12 @@ out_unlock:
*/
int gfs2_freeze_lock_shared(struct gfs2_sbd *sdp)
{
+ int flags = LM_FLAG_NOEXP | GL_EXACT;
int error;
- error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED,
- LM_FLAG_NOEXP | GL_EXACT,
+ error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED, flags,
&sdp->sd_freeze_gh);
- if (error)
+ if (error && error != GLR_TRYFAILED)
fs_err(sdp, "can't lock the freeze glock: %d\n", error);
return error;
}
@@ -206,9 +206,9 @@ static void signal_our_withdraw(struct gfs2_sbd *sdp)
* on other nodes to be successful, otherwise we remain the owner of
* the glock as far as dlm is concerned.
*/
- if (i_gl->gl_ops->go_free) {
- set_bit(GLF_FREEING, &i_gl->gl_flags);
- wait_on_bit(&i_gl->gl_flags, GLF_FREEING, TASK_UNINTERRUPTIBLE);
+ if (i_gl->gl_ops->go_unlocked) {
+ set_bit(GLF_UNLOCKED, &i_gl->gl_flags);
+ wait_on_bit(&i_gl->gl_flags, GLF_UNLOCKED, TASK_UNINTERRUPTIBLE);
}
/*
diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c
index 8c34798a0715..744e10b46904 100644
--- a/fs/hfs/inode.c
+++ b/fs/hfs/inode.c
@@ -200,6 +200,7 @@ struct inode *hfs_new_inode(struct inode *dir, const struct qstr *name, umode_t
HFS_I(inode)->flags = 0;
HFS_I(inode)->rsrc_inode = NULL;
HFS_I(inode)->fs_blocks = 0;
+ HFS_I(inode)->tz_secondswest = sys_tz.tz_minuteswest * 60;
if (S_ISDIR(mode)) {
inode->i_size = 2;
HFS_SB(sb)->folder_count++;
@@ -275,6 +276,8 @@ void hfs_inode_read_fork(struct inode *inode, struct hfs_extent *ext,
for (count = 0, i = 0; i < 3; i++)
count += be16_to_cpu(ext[i].count);
HFS_I(inode)->first_blocks = count;
+ HFS_I(inode)->cached_start = 0;
+ HFS_I(inode)->cached_blocks = 0;
inode->i_size = HFS_I(inode)->phys_size = log_size;
HFS_I(inode)->fs_blocks = (log_size + sb->s_blocksize - 1) >> sb->s_blocksize_bits;
diff --git a/fs/hfs/super.c b/fs/hfs/super.c
index 6764afa98a6f..eeac99765f0d 100644
--- a/fs/hfs/super.c
+++ b/fs/hfs/super.c
@@ -28,6 +28,7 @@
static struct kmem_cache *hfs_inode_cachep;
+MODULE_DESCRIPTION("Apple Macintosh file system support");
MODULE_LICENSE("GPL");
static int hfs_sync_fs(struct super_block *sb, int wait)
diff --git a/fs/hfsplus/bfind.c b/fs/hfsplus/bfind.c
index ca2ba8c9f82e..901e83d65d20 100644
--- a/fs/hfsplus/bfind.c
+++ b/fs/hfsplus/bfind.c
@@ -25,19 +25,8 @@ int hfs_find_init(struct hfs_btree *tree, struct hfs_find_data *fd)
fd->key = ptr + tree->max_key_len + 2;
hfs_dbg(BNODE_REFS, "find_init: %d (%p)\n",
tree->cnid, __builtin_return_address(0));
- switch (tree->cnid) {
- case HFSPLUS_CAT_CNID:
- mutex_lock_nested(&tree->tree_lock, CATALOG_BTREE_MUTEX);
- break;
- case HFSPLUS_EXT_CNID:
- mutex_lock_nested(&tree->tree_lock, EXTENTS_BTREE_MUTEX);
- break;
- case HFSPLUS_ATTR_CNID:
- mutex_lock_nested(&tree->tree_lock, ATTR_BTREE_MUTEX);
- break;
- default:
- BUG();
- }
+ mutex_lock_nested(&tree->tree_lock,
+ hfsplus_btree_lock_class(tree));
return 0;
}
diff --git a/fs/hfsplus/extents.c b/fs/hfsplus/extents.c
index 3c572e44f2ad..9c51867dddc5 100644
--- a/fs/hfsplus/extents.c
+++ b/fs/hfsplus/extents.c
@@ -430,7 +430,8 @@ int hfsplus_free_fork(struct super_block *sb, u32 cnid,
hfsplus_free_extents(sb, ext_entry, total_blocks - start,
total_blocks);
total_blocks = start;
- mutex_lock(&fd.tree->tree_lock);
+ mutex_lock_nested(&fd.tree->tree_lock,
+ hfsplus_btree_lock_class(fd.tree));
} while (total_blocks > blocks);
hfs_find_exit(&fd);
@@ -592,7 +593,8 @@ void hfsplus_file_truncate(struct inode *inode)
alloc_cnt, alloc_cnt - blk_cnt);
hfsplus_dump_extent(hip->first_extents);
hip->first_blocks = blk_cnt;
- mutex_lock(&fd.tree->tree_lock);
+ mutex_lock_nested(&fd.tree->tree_lock,
+ hfsplus_btree_lock_class(fd.tree));
break;
}
res = __hfsplus_ext_cache_extent(&fd, inode, alloc_cnt);
@@ -606,7 +608,8 @@ void hfsplus_file_truncate(struct inode *inode)
hfsplus_free_extents(sb, hip->cached_extents,
alloc_cnt - start, alloc_cnt - blk_cnt);
hfsplus_dump_extent(hip->cached_extents);
- mutex_lock(&fd.tree->tree_lock);
+ mutex_lock_nested(&fd.tree->tree_lock,
+ hfsplus_btree_lock_class(fd.tree));
if (blk_cnt > start) {
hip->extent_state |= HFSPLUS_EXT_DIRTY;
break;
diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h
index 012a3d003fbe..9e78f181c24f 100644
--- a/fs/hfsplus/hfsplus_fs.h
+++ b/fs/hfsplus/hfsplus_fs.h
@@ -553,6 +553,27 @@ static inline __be32 __hfsp_ut2mt(time64_t ut)
return cpu_to_be32(lower_32_bits(ut) + HFSPLUS_UTC_OFFSET);
}
+static inline enum hfsplus_btree_mutex_classes
+hfsplus_btree_lock_class(struct hfs_btree *tree)
+{
+ enum hfsplus_btree_mutex_classes class;
+
+ switch (tree->cnid) {
+ case HFSPLUS_CAT_CNID:
+ class = CATALOG_BTREE_MUTEX;
+ break;
+ case HFSPLUS_EXT_CNID:
+ class = EXTENTS_BTREE_MUTEX;
+ break;
+ case HFSPLUS_ATTR_CNID:
+ class = ATTR_BTREE_MUTEX;
+ break;
+ default:
+ BUG();
+ }
+ return class;
+}
+
/* compatibility */
#define hfsp_mt2ut(t) (struct timespec64){ .tv_sec = __hfsp_mt2ut(t) }
#define hfsp_ut2mt(t) __hfsp_ut2mt((t).tv_sec)
diff --git a/fs/hfsplus/ioctl.c b/fs/hfsplus/ioctl.c
index 5661a2e24d03..40d04dba13ac 100644
--- a/fs/hfsplus/ioctl.c
+++ b/fs/hfsplus/ioctl.c
@@ -40,7 +40,7 @@ static int hfsplus_ioctl_bless(struct file *file, int __user *user_flags)
/* Directory containing the bootable system */
vh->finder_info[0] = bvh->finder_info[0] =
- cpu_to_be32(parent_ino(dentry));
+ cpu_to_be32(d_parent_ino(dentry));
/*
* Bootloader. Just using the inode here breaks in the case of
@@ -51,7 +51,7 @@ static int hfsplus_ioctl_bless(struct file *file, int __user *user_flags)
/* Per spec, the OS X system folder - same as finder_info[0] here */
vh->finder_info[5] = bvh->finder_info[5] =
- cpu_to_be32(parent_ino(dentry));
+ cpu_to_be32(d_parent_ino(dentry));
mutex_unlock(&sbi->vh_mutex);
return 0;
diff --git a/fs/hfsplus/xattr.c b/fs/hfsplus/xattr.c
index 5a400259ae74..9a1a93e3888b 100644
--- a/fs/hfsplus/xattr.c
+++ b/fs/hfsplus/xattr.c
@@ -696,7 +696,7 @@ ssize_t hfsplus_listxattr(struct dentry *dentry, char *buffer, size_t size)
return err;
}
- strbuf = kmalloc(NLS_MAX_CHARSET_SIZE * HFSPLUS_ATTR_MAX_STRLEN +
+ strbuf = kzalloc(NLS_MAX_CHARSET_SIZE * HFSPLUS_ATTR_MAX_STRLEN +
XATTR_MAC_OSX_PREFIX_LEN + 1, GFP_KERNEL);
if (!strbuf) {
res = -ENOMEM;
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
index a73d27c4dd58..3eb747d26924 100644
--- a/fs/hostfs/hostfs_kern.c
+++ b/fs/hostfs/hostfs_kern.c
@@ -16,11 +16,16 @@
#include <linux/seq_file.h>
#include <linux/writeback.h>
#include <linux/mount.h>
+#include <linux/fs_context.h>
#include <linux/namei.h>
#include "hostfs.h"
#include <init.h>
#include <kern.h>
+struct hostfs_fs_info {
+ char *host_root_path;
+};
+
struct hostfs_inode_info {
int fd;
fmode_t mode;
@@ -90,8 +95,10 @@ static char *__dentry_name(struct dentry *dentry, char *name)
char *p = dentry_path_raw(dentry, name, PATH_MAX);
char *root;
size_t len;
+ struct hostfs_fs_info *fsi;
- root = dentry->d_sb->s_fs_info;
+ fsi = dentry->d_sb->s_fs_info;
+ root = fsi->host_root_path;
len = strlen(root);
if (IS_ERR(p)) {
__putname(name);
@@ -196,8 +203,10 @@ static int hostfs_statfs(struct dentry *dentry, struct kstatfs *sf)
long long f_bavail;
long long f_files;
long long f_ffree;
+ struct hostfs_fs_info *fsi;
- err = do_statfs(dentry->d_sb->s_fs_info,
+ fsi = dentry->d_sb->s_fs_info;
+ err = do_statfs(fsi->host_root_path,
&sf->f_bsize, &f_blocks, &f_bfree, &f_bavail, &f_files,
&f_ffree, &sf->f_fsid, sizeof(sf->f_fsid),
&sf->f_namelen);
@@ -245,7 +254,11 @@ static void hostfs_free_inode(struct inode *inode)
static int hostfs_show_options(struct seq_file *seq, struct dentry *root)
{
- const char *root_path = root->d_sb->s_fs_info;
+ struct hostfs_fs_info *fsi;
+ const char *root_path;
+
+ fsi = root->d_sb->s_fs_info;
+ root_path = fsi->host_root_path;
size_t offset = strlen(root_ino) + 1;
if (strlen(root_path) > offset)
@@ -432,31 +445,20 @@ static int hostfs_writepage(struct page *page, struct writeback_control *wbc)
static int hostfs_read_folio(struct file *file, struct folio *folio)
{
- struct page *page = &folio->page;
char *buffer;
- loff_t start = page_offset(page);
+ loff_t start = folio_pos(folio);
int bytes_read, ret = 0;
- buffer = kmap_local_page(page);
+ buffer = kmap_local_folio(folio, 0);
bytes_read = read_file(FILE_HOSTFS_I(file)->fd, &start, buffer,
PAGE_SIZE);
- if (bytes_read < 0) {
- ClearPageUptodate(page);
- SetPageError(page);
+ if (bytes_read < 0)
ret = bytes_read;
- goto out;
- }
-
- memset(buffer + bytes_read, 0, PAGE_SIZE - bytes_read);
-
- ClearPageError(page);
- SetPageUptodate(page);
-
- out:
- flush_dcache_page(page);
+ else
+ buffer = folio_zero_tail(folio, bytes_read, buffer);
kunmap_local(buffer);
- unlock_page(page);
+ folio_end_read(folio, ret == 0);
return ret;
}
@@ -922,10 +924,11 @@ static const struct inode_operations hostfs_link_iops = {
.get_link = hostfs_get_link,
};
-static int hostfs_fill_sb_common(struct super_block *sb, void *d, int silent)
+static int hostfs_fill_super(struct super_block *sb, struct fs_context *fc)
{
+ struct hostfs_fs_info *fsi = sb->s_fs_info;
+ const char *host_root = fc->source;
struct inode *root_inode;
- char *host_root_path, *req_root = d;
int err;
sb->s_blocksize = 1024;
@@ -939,15 +942,15 @@ static int hostfs_fill_sb_common(struct super_block *sb, void *d, int silent)
return err;
/* NULL is printed as '(null)' by printf(): avoid that. */
- if (req_root == NULL)
- req_root = "";
+ if (fc->source == NULL)
+ host_root = "";
- sb->s_fs_info = host_root_path =
- kasprintf(GFP_KERNEL, "%s/%s", root_ino, req_root);
- if (host_root_path == NULL)
+ fsi->host_root_path =
+ kasprintf(GFP_KERNEL, "%s/%s", root_ino, host_root);
+ if (fsi->host_root_path == NULL)
return -ENOMEM;
- root_inode = hostfs_iget(sb, host_root_path);
+ root_inode = hostfs_iget(sb, fsi->host_root_path);
if (IS_ERR(root_inode))
return PTR_ERR(root_inode);
@@ -955,7 +958,7 @@ static int hostfs_fill_sb_common(struct super_block *sb, void *d, int silent)
char *name;
iput(root_inode);
- name = follow_link(host_root_path);
+ name = follow_link(fsi->host_root_path);
if (IS_ERR(name))
return PTR_ERR(name);
@@ -972,11 +975,38 @@ static int hostfs_fill_sb_common(struct super_block *sb, void *d, int silent)
return 0;
}
-static struct dentry *hostfs_read_sb(struct file_system_type *type,
- int flags, const char *dev_name,
- void *data)
+static int hostfs_fc_get_tree(struct fs_context *fc)
+{
+ return get_tree_nodev(fc, hostfs_fill_super);
+}
+
+static void hostfs_fc_free(struct fs_context *fc)
{
- return mount_nodev(type, flags, data, hostfs_fill_sb_common);
+ struct hostfs_fs_info *fsi = fc->s_fs_info;
+
+ if (!fsi)
+ return;
+
+ kfree(fsi->host_root_path);
+ kfree(fsi);
+}
+
+static const struct fs_context_operations hostfs_context_ops = {
+ .get_tree = hostfs_fc_get_tree,
+ .free = hostfs_fc_free,
+};
+
+static int hostfs_init_fs_context(struct fs_context *fc)
+{
+ struct hostfs_fs_info *fsi;
+
+ fsi = kzalloc(sizeof(*fsi), GFP_KERNEL);
+ if (!fsi)
+ return -ENOMEM;
+
+ fc->s_fs_info = fsi;
+ fc->ops = &hostfs_context_ops;
+ return 0;
}
static void hostfs_kill_sb(struct super_block *s)
@@ -986,11 +1016,11 @@ static void hostfs_kill_sb(struct super_block *s)
}
static struct file_system_type hostfs_type = {
- .owner = THIS_MODULE,
- .name = "hostfs",
- .mount = hostfs_read_sb,
- .kill_sb = hostfs_kill_sb,
- .fs_flags = 0,
+ .owner = THIS_MODULE,
+ .name = "hostfs",
+ .init_fs_context = hostfs_init_fs_context,
+ .kill_sb = hostfs_kill_sb,
+ .fs_flags = 0,
};
MODULE_ALIAS_FS("hostfs");
diff --git a/fs/hpfs/namei.c b/fs/hpfs/namei.c
index 9184b4584b01..d0edf9ed33b6 100644
--- a/fs/hpfs/namei.c
+++ b/fs/hpfs/namei.c
@@ -472,9 +472,8 @@ out:
static int hpfs_symlink_read_folio(struct file *file, struct folio *folio)
{
- struct page *page = &folio->page;
- char *link = page_address(page);
- struct inode *i = page->mapping->host;
+ char *link = folio_address(folio);
+ struct inode *i = folio->mapping->host;
struct fnode *fnode;
struct buffer_head *bh;
int err;
@@ -485,17 +484,9 @@ static int hpfs_symlink_read_folio(struct file *file, struct folio *folio)
goto fail;
err = hpfs_read_ea(i->i_sb, fnode, "SYMLINK", link, PAGE_SIZE);
brelse(bh);
- if (err)
- goto fail;
- hpfs_unlock(i->i_sb);
- SetPageUptodate(page);
- unlock_page(page);
- return 0;
-
fail:
hpfs_unlock(i->i_sb);
- SetPageError(page);
- unlock_page(page);
+ folio_end_read(folio, err == 0);
return err;
}
diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c
index 314834a078e9..e73717daa5f9 100644
--- a/fs/hpfs/super.c
+++ b/fs/hpfs/super.c
@@ -793,4 +793,5 @@ static void __exit exit_hpfs_fs(void)
module_init(init_hpfs_fs)
module_exit(exit_hpfs_fs)
+MODULE_DESCRIPTION("OS/2 HPFS file system support");
MODULE_LICENSE("GPL");
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 412f295acebe..81dab95f67ed 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -73,13 +73,13 @@ enum hugetlb_param {
};
static const struct fs_parameter_spec hugetlb_fs_parameters[] = {
- fsparam_u32 ("gid", Opt_gid),
+ fsparam_gid ("gid", Opt_gid),
fsparam_string("min_size", Opt_min_size),
fsparam_u32oct("mode", Opt_mode),
fsparam_string("nr_inodes", Opt_nr_inodes),
fsparam_string("pagesize", Opt_pagesize),
fsparam_string("size", Opt_size),
- fsparam_u32 ("uid", Opt_uid),
+ fsparam_uid ("uid", Opt_uid),
{}
};
@@ -1376,15 +1376,11 @@ static int hugetlbfs_parse_param(struct fs_context *fc, struct fs_parameter *par
switch (opt) {
case Opt_uid:
- ctx->uid = make_kuid(current_user_ns(), result.uint_32);
- if (!uid_valid(ctx->uid))
- goto bad_val;
+ ctx->uid = result.uid;
return 0;
case Opt_gid:
- ctx->gid = make_kgid(current_user_ns(), result.uint_32);
- if (!gid_valid(ctx->gid))
- goto bad_val;
+ ctx->gid = result.gid;
return 0;
case Opt_mode:
diff --git a/fs/inode.c b/fs/inode.c
index 3a41f83a4ba5..f356fe2ec2b6 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -162,6 +162,7 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
inode->i_sb = sb;
inode->i_blkbits = sb->s_blocksize_bits;
inode->i_flags = 0;
+ inode->i_state = 0;
atomic64_set(&inode->i_sequence, 0);
atomic_set(&inode->i_count, 1);
inode->i_op = &empty_iops;
@@ -231,6 +232,7 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
if (unlikely(security_inode_alloc(inode)))
return -ENOMEM;
+
this_cpu_inc(nr_inodes);
return 0;
@@ -886,36 +888,45 @@ long prune_icache_sb(struct super_block *sb, struct shrink_control *sc)
return freed;
}
-static void __wait_on_freeing_inode(struct inode *inode);
+static void __wait_on_freeing_inode(struct inode *inode, bool locked);
/*
* Called with the inode lock held.
*/
static struct inode *find_inode(struct super_block *sb,
struct hlist_head *head,
int (*test)(struct inode *, void *),
- void *data)
+ void *data, bool locked)
{
struct inode *inode = NULL;
+ if (locked)
+ lockdep_assert_held(&inode_hash_lock);
+ else
+ lockdep_assert_not_held(&inode_hash_lock);
+
+ rcu_read_lock();
repeat:
- hlist_for_each_entry(inode, head, i_hash) {
+ hlist_for_each_entry_rcu(inode, head, i_hash) {
if (inode->i_sb != sb)
continue;
if (!test(inode, data))
continue;
spin_lock(&inode->i_lock);
if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
- __wait_on_freeing_inode(inode);
+ __wait_on_freeing_inode(inode, locked);
goto repeat;
}
if (unlikely(inode->i_state & I_CREATING)) {
spin_unlock(&inode->i_lock);
+ rcu_read_unlock();
return ERR_PTR(-ESTALE);
}
__iget(inode);
spin_unlock(&inode->i_lock);
+ rcu_read_unlock();
return inode;
}
+ rcu_read_unlock();
return NULL;
}
@@ -924,29 +935,39 @@ repeat:
* iget_locked for details.
*/
static struct inode *find_inode_fast(struct super_block *sb,
- struct hlist_head *head, unsigned long ino)
+ struct hlist_head *head, unsigned long ino,
+ bool locked)
{
struct inode *inode = NULL;
+ if (locked)
+ lockdep_assert_held(&inode_hash_lock);
+ else
+ lockdep_assert_not_held(&inode_hash_lock);
+
+ rcu_read_lock();
repeat:
- hlist_for_each_entry(inode, head, i_hash) {
+ hlist_for_each_entry_rcu(inode, head, i_hash) {
if (inode->i_ino != ino)
continue;
if (inode->i_sb != sb)
continue;
spin_lock(&inode->i_lock);
if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
- __wait_on_freeing_inode(inode);
+ __wait_on_freeing_inode(inode, locked);
goto repeat;
}
if (unlikely(inode->i_state & I_CREATING)) {
spin_unlock(&inode->i_lock);
+ rcu_read_unlock();
return ERR_PTR(-ESTALE);
}
__iget(inode);
spin_unlock(&inode->i_lock);
+ rcu_read_unlock();
return inode;
}
+ rcu_read_unlock();
return NULL;
}
@@ -1004,14 +1025,7 @@ EXPORT_SYMBOL(get_next_ino);
*/
struct inode *new_inode_pseudo(struct super_block *sb)
{
- struct inode *inode = alloc_inode(sb);
-
- if (inode) {
- spin_lock(&inode->i_lock);
- inode->i_state = 0;
- spin_unlock(&inode->i_lock);
- }
- return inode;
+ return alloc_inode(sb);
}
/**
@@ -1161,7 +1175,7 @@ struct inode *inode_insert5(struct inode *inode, unsigned long hashval,
again:
spin_lock(&inode_hash_lock);
- old = find_inode(inode->i_sb, head, test, data);
+ old = find_inode(inode->i_sb, head, test, data, true);
if (unlikely(old)) {
/*
* Uhhuh, somebody else created the same inode under us.
@@ -1235,7 +1249,6 @@ struct inode *iget5_locked(struct super_block *sb, unsigned long hashval,
struct inode *new = alloc_inode(sb);
if (new) {
- new->i_state = 0;
inode = inode_insert5(new, hashval, test, set, data);
if (unlikely(inode != new))
destroy_inode(new);
@@ -1246,6 +1259,47 @@ struct inode *iget5_locked(struct super_block *sb, unsigned long hashval,
EXPORT_SYMBOL(iget5_locked);
/**
+ * iget5_locked_rcu - obtain an inode from a mounted file system
+ * @sb: super block of file system
+ * @hashval: hash value (usually inode number) to get
+ * @test: callback used for comparisons between inodes
+ * @set: callback used to initialize a new struct inode
+ * @data: opaque data pointer to pass to @test and @set
+ *
+ * This is equivalent to iget5_locked, except the @test callback must
+ * tolerate the inode not being stable, including being mid-teardown.
+ */
+struct inode *iget5_locked_rcu(struct super_block *sb, unsigned long hashval,
+ int (*test)(struct inode *, void *),
+ int (*set)(struct inode *, void *), void *data)
+{
+ struct hlist_head *head = inode_hashtable + hash(sb, hashval);
+ struct inode *inode, *new;
+
+again:
+ inode = find_inode(sb, head, test, data, false);
+ if (inode) {
+ if (IS_ERR(inode))
+ return NULL;
+ wait_on_inode(inode);
+ if (unlikely(inode_unhashed(inode))) {
+ iput(inode);
+ goto again;
+ }
+ return inode;
+ }
+
+ new = alloc_inode(sb);
+ if (new) {
+ inode = inode_insert5(new, hashval, test, set, data);
+ if (unlikely(inode != new))
+ destroy_inode(new);
+ }
+ return inode;
+}
+EXPORT_SYMBOL_GPL(iget5_locked_rcu);
+
+/**
* iget_locked - obtain an inode from a mounted file system
* @sb: super block of file system
* @ino: inode number to get
@@ -1263,9 +1317,7 @@ struct inode *iget_locked(struct super_block *sb, unsigned long ino)
struct hlist_head *head = inode_hashtable + hash(sb, ino);
struct inode *inode;
again:
- spin_lock(&inode_hash_lock);
- inode = find_inode_fast(sb, head, ino);
- spin_unlock(&inode_hash_lock);
+ inode = find_inode_fast(sb, head, ino, false);
if (inode) {
if (IS_ERR(inode))
return NULL;
@@ -1283,7 +1335,7 @@ again:
spin_lock(&inode_hash_lock);
/* We released the lock, so.. */
- old = find_inode_fast(sb, head, ino);
+ old = find_inode_fast(sb, head, ino, true);
if (!old) {
inode->i_ino = ino;
spin_lock(&inode->i_lock);
@@ -1419,7 +1471,7 @@ struct inode *ilookup5_nowait(struct super_block *sb, unsigned long hashval,
struct inode *inode;
spin_lock(&inode_hash_lock);
- inode = find_inode(sb, head, test, data);
+ inode = find_inode(sb, head, test, data, true);
spin_unlock(&inode_hash_lock);
return IS_ERR(inode) ? NULL : inode;
@@ -1474,7 +1526,7 @@ struct inode *ilookup(struct super_block *sb, unsigned long ino)
struct inode *inode;
again:
spin_lock(&inode_hash_lock);
- inode = find_inode_fast(sb, head, ino);
+ inode = find_inode_fast(sb, head, ino, true);
spin_unlock(&inode_hash_lock);
if (inode) {
@@ -2235,17 +2287,21 @@ EXPORT_SYMBOL(inode_needs_sync);
* wake_up_bit(&inode->i_state, __I_NEW) after removing from the hash list
* will DTRT.
*/
-static void __wait_on_freeing_inode(struct inode *inode)
+static void __wait_on_freeing_inode(struct inode *inode, bool locked)
{
wait_queue_head_t *wq;
DEFINE_WAIT_BIT(wait, &inode->i_state, __I_NEW);
wq = bit_waitqueue(&inode->i_state, __I_NEW);
prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
spin_unlock(&inode->i_lock);
- spin_unlock(&inode_hash_lock);
+ rcu_read_unlock();
+ if (locked)
+ spin_unlock(&inode_hash_lock);
schedule();
finish_wait(wq, &wait.wq_entry);
- spin_lock(&inode_hash_lock);
+ if (locked)
+ spin_lock(&inode_hash_lock);
+ rcu_read_lock();
}
static __initdata unsigned long ihash_entries;
@@ -2538,6 +2594,7 @@ bool in_group_or_capable(struct mnt_idmap *idmap,
return true;
return false;
}
+EXPORT_SYMBOL(in_group_or_capable);
/**
* mode_strip_sgid - handle the sgid bit for non-directories
diff --git a/fs/internal.h b/fs/internal.h
index ab2225136f60..cdd73209eecb 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -17,6 +17,7 @@ struct fs_context;
struct pipe_inode_info;
struct iov_iter;
struct mnt_idmap;
+struct ns_common;
/*
* block/bdev.c
@@ -239,6 +240,7 @@ extern void mnt_pin_kill(struct mount *m);
* fs/nsfs.c
*/
extern const struct dentry_operations ns_dentry_operations;
+int open_namespace(struct ns_common *ns);
/*
* fs/stat.c:
@@ -247,6 +249,8 @@ extern const struct dentry_operations ns_dentry_operations;
int getname_statx_lookup_flags(int flags);
int do_statx(int dfd, struct filename *filename, unsigned int flags,
unsigned int mask, struct statx __user *buffer);
+int do_statx_fd(int fd, unsigned int flags, unsigned int mask,
+ struct statx __user *buffer);
/*
* fs/splice.c:
@@ -321,3 +325,15 @@ struct stashed_operations {
int path_from_stashed(struct dentry **stashed, struct vfsmount *mnt, void *data,
struct path *path);
void stashed_dentry_prune(struct dentry *dentry);
+/**
+ * path_mounted - check whether path is mounted
+ * @path: path to check
+ *
+ * Determine whether @path refers to the root of a mount.
+ *
+ * Return: true if @path is the root of a mount, false if not.
+ */
+static inline bool path_mounted(const struct path *path)
+{
+ return path->mnt->mnt_root == path->dentry;
+}
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index 41c8f0c68ef5..f420c53d86ac 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -241,6 +241,7 @@ static void iomap_adjust_read_range(struct inode *inode, struct folio *folio,
unsigned block_size = (1 << block_bits);
size_t poff = offset_in_folio(folio, *pos);
size_t plen = min_t(loff_t, folio_size(folio) - poff, length);
+ size_t orig_plen = plen;
unsigned first = poff >> block_bits;
unsigned last = (poff + plen - 1) >> block_bits;
@@ -277,7 +278,7 @@ static void iomap_adjust_read_range(struct inode *inode, struct folio *folio,
* handle both halves separately so that we properly zero data in the
* page cache for blocks that are entirely outside of i_size.
*/
- if (orig_pos <= isize && orig_pos + length > isize) {
+ if (orig_pos <= isize && orig_pos + orig_plen > isize) {
unsigned end = offset_in_folio(folio, isize - 1) >> block_bits;
if (first <= end && last > end)
@@ -306,8 +307,6 @@ static void iomap_finish_folio_read(struct folio *folio, size_t off,
spin_unlock_irqrestore(&ifs->state_lock, flags);
}
- if (error)
- folio_set_error(folio);
if (finished)
folio_end_read(folio, uptodate);
}
@@ -443,6 +442,24 @@ done:
return pos - orig_pos + plen;
}
+static loff_t iomap_read_folio_iter(const struct iomap_iter *iter,
+ struct iomap_readpage_ctx *ctx)
+{
+ struct folio *folio = ctx->cur_folio;
+ size_t offset = offset_in_folio(folio, iter->pos);
+ loff_t length = min_t(loff_t, folio_size(folio) - offset,
+ iomap_length(iter));
+ loff_t done, ret;
+
+ for (done = 0; done < length; done += ret) {
+ ret = iomap_readpage_iter(iter, ctx, done);
+ if (ret <= 0)
+ return ret;
+ }
+
+ return done;
+}
+
int iomap_read_folio(struct folio *folio, const struct iomap_ops *ops)
{
struct iomap_iter iter = {
@@ -458,10 +475,7 @@ int iomap_read_folio(struct folio *folio, const struct iomap_ops *ops)
trace_iomap_readpage(iter.inode, 1);
while ((ret = iomap_iter(&iter, ops)) > 0)
- iter.processed = iomap_readpage_iter(&iter, &ctx, 0);
-
- if (ret < 0)
- folio_set_error(folio);
+ iter.processed = iomap_read_folio_iter(&iter, &ctx);
if (ctx.bio) {
submit_bio(ctx.bio);
@@ -697,7 +711,6 @@ static int __iomap_write_begin(const struct iomap_iter *iter, loff_t pos,
if (folio_test_uptodate(folio))
return 0;
- folio_clear_error(folio);
do {
iomap_adjust_read_range(iter->inode, folio, &block_start,
@@ -898,11 +911,11 @@ static bool iomap_write_end(struct iomap_iter *iter, loff_t pos, size_t len,
static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i)
{
loff_t length = iomap_length(iter);
- size_t chunk = PAGE_SIZE << MAX_PAGECACHE_ORDER;
loff_t pos = iter->pos;
ssize_t total_written = 0;
long status = 0;
struct address_space *mapping = iter->inode->i_mapping;
+ size_t chunk = mapping_max_folio_size(mapping);
unsigned int bdp_flags = (iter->flags & IOMAP_NOWAIT) ? BDP_ASYNC : 0;
do {
@@ -1543,8 +1556,6 @@ iomap_finish_ioend(struct iomap_ioend *ioend, int error)
/* walk all folios in bio, ending page IO on them */
bio_for_each_folio_all(fi, bio) {
- if (error)
- folio_set_error(fi.folio);
iomap_finish_folio_write(inode, fi.folio, fi.length);
folio_count++;
}
diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
index 93b1077a380a..f50311a6b429 100644
--- a/fs/isofs/inode.c
+++ b/fs/isofs/inode.c
@@ -326,8 +326,8 @@ static const struct fs_parameter_spec isofs_param_spec[] = {
fsparam_u32 ("session", Opt_session),
fsparam_u32 ("sbsector", Opt_sb),
fsparam_enum ("check", Opt_check, isofs_param_check),
- fsparam_u32 ("uid", Opt_uid),
- fsparam_u32 ("gid", Opt_gid),
+ fsparam_uid ("uid", Opt_uid),
+ fsparam_gid ("gid", Opt_gid),
/* Note: mode/dmode historically accepted %u not strictly %o */
fsparam_u32 ("mode", Opt_mode),
fsparam_u32 ("dmode", Opt_dmode),
@@ -344,8 +344,6 @@ static int isofs_parse_param(struct fs_context *fc,
struct isofs_options *popt = fc->fs_private;
struct fs_parse_result result;
int opt;
- kuid_t uid;
- kgid_t gid;
unsigned int n;
/* There are no remountable options */
@@ -409,17 +407,11 @@ static int isofs_parse_param(struct fs_context *fc,
case Opt_ignore:
break;
case Opt_uid:
- uid = make_kuid(current_user_ns(), result.uint_32);
- if (!uid_valid(uid))
- return -EINVAL;
- popt->uid = uid;
+ popt->uid = result.uid;
popt->uid_set = 1;
break;
case Opt_gid:
- gid = make_kgid(current_user_ns(), result.uint_32);
- if (!gid_valid(gid))
- return -EINVAL;
- popt->gid = gid;
+ popt->gid = result.gid;
popt->gid_set = 1;
break;
case Opt_mode:
@@ -1625,4 +1617,5 @@ static void __exit exit_iso9660_fs(void)
module_init(init_iso9660_fs)
module_exit(exit_iso9660_fs)
+MODULE_DESCRIPTION("ISO 9660 CDROM file system support");
MODULE_LICENSE("GPL");
diff --git a/fs/isofs/rock.c b/fs/isofs/rock.c
index d6c17ad69dee..dbf911126e61 100644
--- a/fs/isofs/rock.c
+++ b/fs/isofs/rock.c
@@ -688,11 +688,10 @@ int parse_rock_ridge_inode(struct iso_directory_record *de, struct inode *inode,
*/
static int rock_ridge_symlink_read_folio(struct file *file, struct folio *folio)
{
- struct page *page = &folio->page;
- struct inode *inode = page->mapping->host;
+ struct inode *inode = folio->mapping->host;
struct iso_inode_info *ei = ISOFS_I(inode);
struct isofs_sb_info *sbi = ISOFS_SB(inode->i_sb);
- char *link = page_address(page);
+ char *link = folio_address(folio);
unsigned long bufsize = ISOFS_BUFFER_SIZE(inode);
struct buffer_head *bh;
char *rpnt = link;
@@ -779,9 +778,10 @@ repeat:
goto fail;
brelse(bh);
*rpnt = '\0';
- SetPageUptodate(page);
- unlock_page(page);
- return 0;
+ ret = 0;
+end:
+ folio_end_read(folio, ret == 0);
+ return ret;
/* error exit from macro */
out:
@@ -795,9 +795,8 @@ out_bad_span:
fail:
brelse(bh);
error:
- SetPageError(page);
- unlock_page(page);
- return -EIO;
+ ret = -EIO;
+ goto end;
}
const struct address_space_operations isofs_symlink_aops = {
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index 03c4b9214f56..0d2825c131cc 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -3181,6 +3181,7 @@ static void __exit journal_exit(void)
jbd2_journal_destroy_caches();
}
+MODULE_DESCRIPTION("Generic filesystem journal-writing module");
MODULE_LICENSE("GPL");
module_init(journal_init);
module_exit(journal_exit);
diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c
index 62ea76da7fdf..e12cb145147e 100644
--- a/fs/jffs2/file.c
+++ b/fs/jffs2/file.c
@@ -95,13 +95,8 @@ static int jffs2_do_readpage_nolock (struct inode *inode, struct page *pg)
ret = jffs2_read_inode_range(c, f, pg_buf, pg->index << PAGE_SHIFT,
PAGE_SIZE);
- if (ret) {
- ClearPageUptodate(pg);
- SetPageError(pg);
- } else {
+ if (!ret)
SetPageUptodate(pg);
- ClearPageError(pg);
- }
flush_dcache_page(pg);
kunmap(pg);
@@ -304,10 +299,8 @@ static int jffs2_write_end(struct file *filp, struct address_space *mapping,
kunmap(pg);
- if (ret) {
- /* There was an error writing. */
- SetPageError(pg);
- }
+ if (ret)
+ mapping_set_error(mapping, ret);
/* Adjust writtenlen for the padding we did, so we don't confuse our caller */
writtenlen -= min(writtenlen, (start - aligned_start));
@@ -330,7 +323,6 @@ static int jffs2_write_end(struct file *filp, struct address_space *mapping,
it gets reread */
jffs2_dbg(1, "%s(): Not all bytes written. Marking page !uptodate\n",
__func__);
- SetPageError(pg);
ClearPageUptodate(pg);
}
diff --git a/fs/jfs/xattr.c b/fs/jfs/xattr.c
index 0fb7afac298e..9987055293b3 100644
--- a/fs/jfs/xattr.c
+++ b/fs/jfs/xattr.c
@@ -557,9 +557,11 @@ static int ea_get(struct inode *inode, struct ea_buffer *ea_buf, int min_size)
size_check:
if (EALIST_SIZE(ea_buf->xattr) != ea_size) {
+ int size = min_t(int, EALIST_SIZE(ea_buf->xattr), ea_size);
+
printk(KERN_ERR "ea_get: invalid extended attribute\n");
print_hex_dump(KERN_ERR, "", DUMP_PREFIX_ADDRESS, 16, 1,
- ea_buf->xattr, ea_size, 1);
+ ea_buf->xattr, size, 1);
ea_release(inode, ea_buf);
rc = -EIO;
goto clean_up;
diff --git a/fs/libfs.c b/fs/libfs.c
index b635ee5adbcc..8aa34870449f 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -1854,6 +1854,80 @@ static const struct dentry_operations generic_ci_dentry_ops = {
.d_revalidate = fscrypt_d_revalidate,
#endif
};
+
+/**
+ * generic_ci_match() - Match a name (case-insensitively) with a dirent.
+ * This is a filesystem helper for comparison with directory entries.
+ * generic_ci_d_compare should be used in VFS' ->d_compare instead.
+ *
+ * @parent: Inode of the parent of the dirent under comparison
+ * @name: name under lookup.
+ * @folded_name: Optional pre-folded name under lookup
+ * @de_name: Dirent name.
+ * @de_name_len: dirent name length.
+ *
+ * Test whether a case-insensitive directory entry matches the filename
+ * being searched. If @folded_name is provided, it is used instead of
+ * recalculating the casefold of @name.
+ *
+ * Return: > 0 if the directory entry matches, 0 if it doesn't match, or
+ * < 0 on error.
+ */
+int generic_ci_match(const struct inode *parent,
+ const struct qstr *name,
+ const struct qstr *folded_name,
+ const u8 *de_name, u32 de_name_len)
+{
+ const struct super_block *sb = parent->i_sb;
+ const struct unicode_map *um = sb->s_encoding;
+ struct fscrypt_str decrypted_name = FSTR_INIT(NULL, de_name_len);
+ struct qstr dirent = QSTR_INIT(de_name, de_name_len);
+ int res = 0;
+
+ if (IS_ENCRYPTED(parent)) {
+ const struct fscrypt_str encrypted_name =
+ FSTR_INIT((u8 *) de_name, de_name_len);
+
+ if (WARN_ON_ONCE(!fscrypt_has_encryption_key(parent)))
+ return -EINVAL;
+
+ decrypted_name.name = kmalloc(de_name_len, GFP_KERNEL);
+ if (!decrypted_name.name)
+ return -ENOMEM;
+ res = fscrypt_fname_disk_to_usr(parent, 0, 0, &encrypted_name,
+ &decrypted_name);
+ if (res < 0) {
+ kfree(decrypted_name.name);
+ return res;
+ }
+ dirent.name = decrypted_name.name;
+ dirent.len = decrypted_name.len;
+ }
+
+ /*
+ * Attempt a case-sensitive match first. It is cheaper and
+ * should cover most lookups, including all the sane
+ * applications that expect a case-sensitive filesystem.
+ */
+
+ if (dirent.len == name->len &&
+ !memcmp(name->name, dirent.name, dirent.len))
+ goto out;
+
+ if (folded_name->name)
+ res = utf8_strncasecmp_folded(um, folded_name, &dirent);
+ else
+ res = utf8_strncasecmp(um, name, &dirent);
+
+out:
+ kfree(decrypted_name.name);
+ if (res < 0 && sb_has_strict_encoding(sb)) {
+ pr_err_ratelimited("Directory contains filename that is invalid UTF-8");
+ return 0;
+ }
+ return !res;
+}
+EXPORT_SYMBOL(generic_ci_match);
#endif
#ifdef CONFIG_FS_ENCRYPTION
diff --git a/fs/lockd/Makefile b/fs/lockd/Makefile
index ac9f9d84510e..fe3e23dd29c3 100644
--- a/fs/lockd/Makefile
+++ b/fs/lockd/Makefile
@@ -7,8 +7,7 @@ ccflags-y += -I$(src) # needed for trace events
obj-$(CONFIG_LOCKD) += lockd.o
-lockd-objs-y += clntlock.o clntproc.o clntxdr.o host.o svc.o svclock.o \
- svcshare.o svcproc.o svcsubs.o mon.o trace.o xdr.o
-lockd-objs-$(CONFIG_LOCKD_V4) += clnt4xdr.o xdr4.o svc4proc.o
-lockd-objs-$(CONFIG_PROC_FS) += procfs.o
-lockd-objs := $(lockd-objs-y)
+lockd-y := clntlock.o clntproc.o clntxdr.o host.o svc.o svclock.o \
+ svcshare.o svcproc.o svcsubs.o mon.o trace.o xdr.o
+lockd-$(CONFIG_LOCKD_V4) += clnt4xdr.o xdr4.o svc4proc.o
+lockd-$(CONFIG_PROC_FS) += procfs.o
diff --git a/fs/locks.c b/fs/locks.c
index 90c8746874de..bdd94c32256f 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -1367,9 +1367,9 @@ retry:
locks_wake_up_blocks(&left->c);
}
out:
+ trace_posix_lock_inode(inode, request, error);
spin_unlock(&ctx->flc_lock);
percpu_up_read(&file_rwsem);
- trace_posix_lock_inode(inode, request, error);
/*
* Free any unused locks.
*/
@@ -2448,8 +2448,9 @@ int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd,
error = do_lock_file_wait(filp, cmd, file_lock);
/*
- * Attempt to detect a close/fcntl race and recover by releasing the
- * lock that was just acquired. There is no need to do that when we're
+ * Detect close/fcntl races and recover by zapping all POSIX locks
+ * associated with this file and our files_struct, just like on
+ * filp_flush(). There is no need to do that when we're
* unlocking though, or for OFD locks.
*/
if (!error && file_lock->c.flc_type != F_UNLCK &&
@@ -2464,9 +2465,7 @@ int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd,
f = files_lookup_fd_locked(files, fd);
spin_unlock(&files->file_lock);
if (f != filp) {
- file_lock->c.flc_type = F_UNLCK;
- error = do_lock_file_wait(filp, cmd, file_lock);
- WARN_ON_ONCE(error);
+ locks_remove_posix(filp, files);
error = -EBADF;
}
}
diff --git a/fs/minix/inode.c b/fs/minix/inode.c
index 7f9a2d8aa420..1c3df63162ef 100644
--- a/fs/minix/inode.c
+++ b/fs/minix/inode.c
@@ -730,5 +730,6 @@ static void __exit exit_minix_fs(void)
module_init(init_minix_fs)
module_exit(exit_minix_fs)
+MODULE_DESCRIPTION("Minix file system");
MODULE_LICENSE("GPL");
diff --git a/fs/minix/namei.c b/fs/minix/namei.c
index d6031acc34f0..a944a0f17b53 100644
--- a/fs/minix/namei.c
+++ b/fs/minix/namei.c
@@ -213,8 +213,7 @@ static int minix_rename(struct mnt_idmap *idmap,
if (!new_de)
goto out_dir;
err = minix_set_link(new_de, new_page, old_inode);
- kunmap(new_page);
- put_page(new_page);
+ unmap_and_put_page(new_page, new_de);
if (err)
goto out_dir;
inode_set_ctime_current(new_inode);
diff --git a/fs/mount.h b/fs/mount.h
index 4a42fc68f4cc..ad4b1ddebb54 100644
--- a/fs/mount.h
+++ b/fs/mount.h
@@ -16,6 +16,8 @@ struct mnt_namespace {
u64 event;
unsigned int nr_mounts; /* # of mounts in the namespace */
unsigned int pending_mounts;
+ struct rb_node mnt_ns_tree_node; /* node in the mnt_ns_tree */
+ refcount_t passive; /* number references not pinning @mounts */
} __randomize_layout;
struct mnt_pcp {
@@ -152,3 +154,4 @@ static inline void move_from_ns(struct mount *mnt, struct list_head *dt_list)
}
extern void mnt_cursor_del(struct mnt_namespace *ns, struct mount *cursor);
+bool has_locked_children(struct mount *mnt, struct dentry *dentry);
diff --git a/fs/mpage.c b/fs/mpage.c
index fa8b99a199fa..b5b5ddf9d513 100644
--- a/fs/mpage.c
+++ b/fs/mpage.c
@@ -48,13 +48,8 @@ static void mpage_read_end_io(struct bio *bio)
struct folio_iter fi;
int err = blk_status_to_errno(bio->bi_status);
- bio_for_each_folio_all(fi, bio) {
- if (err)
- folio_set_error(fi.folio);
- else
- folio_mark_uptodate(fi.folio);
- folio_unlock(fi.folio);
- }
+ bio_for_each_folio_all(fi, bio)
+ folio_end_read(fi.folio, err == 0);
bio_put(bio);
}
@@ -65,10 +60,8 @@ static void mpage_write_end_io(struct bio *bio)
int err = blk_status_to_errno(bio->bi_status);
bio_for_each_folio_all(fi, bio) {
- if (err) {
- folio_set_error(fi.folio);
+ if (err)
mapping_set_error(fi.folio->mapping, err);
- }
folio_end_writeback(fi.folio);
}
diff --git a/fs/namei.c b/fs/namei.c
index 37fb0a8aa09a..3a4c40e12f78 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -126,7 +126,7 @@
#define EMBEDDED_NAME_MAX (PATH_MAX - offsetof(struct filename, iname))
struct filename *
-getname_flags(const char __user *filename, int flags, int *empty)
+getname_flags(const char __user *filename, int flags)
{
struct filename *result;
char *kname;
@@ -148,9 +148,20 @@ getname_flags(const char __user *filename, int flags, int *empty)
result->name = kname;
len = strncpy_from_user(kname, filename, EMBEDDED_NAME_MAX);
- if (unlikely(len < 0)) {
- __putname(result);
- return ERR_PTR(len);
+ /*
+ * Handle both empty path and copy failure in one go.
+ */
+ if (unlikely(len <= 0)) {
+ if (unlikely(len < 0)) {
+ __putname(result);
+ return ERR_PTR(len);
+ }
+
+ /* The empty path is special. */
+ if (!(flags & LOOKUP_EMPTY)) {
+ __putname(result);
+ return ERR_PTR(-ENOENT);
+ }
}
/*
@@ -180,6 +191,12 @@ getname_flags(const char __user *filename, int flags, int *empty)
kfree(result);
return ERR_PTR(len);
}
+ /* The empty path is special. */
+ if (unlikely(!len) && !(flags & LOOKUP_EMPTY)) {
+ __putname(kname);
+ kfree(result);
+ return ERR_PTR(-ENOENT);
+ }
if (unlikely(len == PATH_MAX)) {
__putname(kname);
kfree(result);
@@ -188,16 +205,6 @@ getname_flags(const char __user *filename, int flags, int *empty)
}
atomic_set(&result->refcnt, 1);
- /* The empty path is special. */
- if (unlikely(!len)) {
- if (empty)
- *empty = 1;
- if (!(flags & LOOKUP_EMPTY)) {
- putname(result);
- return ERR_PTR(-ENOENT);
- }
- }
-
result->uptr = filename;
result->aname = NULL;
audit_getname(result);
@@ -209,13 +216,13 @@ getname_uflags(const char __user *filename, int uflags)
{
int flags = (uflags & AT_EMPTY_PATH) ? LOOKUP_EMPTY : 0;
- return getname_flags(filename, flags, NULL);
+ return getname_flags(filename, flags);
}
struct filename *
getname(const char __user * filename)
{
- return getname_flags(filename, 0, NULL);
+ return getname_flags(filename, 0);
}
struct filename *
@@ -1233,29 +1240,48 @@ int may_linkat(struct mnt_idmap *idmap, const struct path *link)
*
* Returns 0 if the open is allowed, -ve on error.
*/
-static int may_create_in_sticky(struct mnt_idmap *idmap,
- struct nameidata *nd, struct inode *const inode)
+static int may_create_in_sticky(struct mnt_idmap *idmap, struct nameidata *nd,
+ struct inode *const inode)
{
umode_t dir_mode = nd->dir_mode;
- vfsuid_t dir_vfsuid = nd->dir_vfsuid;
+ vfsuid_t dir_vfsuid = nd->dir_vfsuid, i_vfsuid;
- if ((!sysctl_protected_fifos && S_ISFIFO(inode->i_mode)) ||
- (!sysctl_protected_regular && S_ISREG(inode->i_mode)) ||
- likely(!(dir_mode & S_ISVTX)) ||
- vfsuid_eq(i_uid_into_vfsuid(idmap, inode), dir_vfsuid) ||
- vfsuid_eq_kuid(i_uid_into_vfsuid(idmap, inode), current_fsuid()))
+ if (likely(!(dir_mode & S_ISVTX)))
return 0;
- if (likely(dir_mode & 0002) ||
- (dir_mode & 0020 &&
- ((sysctl_protected_fifos >= 2 && S_ISFIFO(inode->i_mode)) ||
- (sysctl_protected_regular >= 2 && S_ISREG(inode->i_mode))))) {
- const char *operation = S_ISFIFO(inode->i_mode) ?
- "sticky_create_fifo" :
- "sticky_create_regular";
- audit_log_path_denied(AUDIT_ANOM_CREAT, operation);
+ if (S_ISREG(inode->i_mode) && !sysctl_protected_regular)
+ return 0;
+
+ if (S_ISFIFO(inode->i_mode) && !sysctl_protected_fifos)
+ return 0;
+
+ i_vfsuid = i_uid_into_vfsuid(idmap, inode);
+
+ if (vfsuid_eq(i_vfsuid, dir_vfsuid))
+ return 0;
+
+ if (vfsuid_eq_kuid(i_vfsuid, current_fsuid()))
+ return 0;
+
+ if (likely(dir_mode & 0002)) {
+ audit_log_path_denied(AUDIT_ANOM_CREAT, "sticky_create");
return -EACCES;
}
+
+ if (dir_mode & 0020) {
+ if (sysctl_protected_fifos >= 2 && S_ISFIFO(inode->i_mode)) {
+ audit_log_path_denied(AUDIT_ANOM_CREAT,
+ "sticky_create_fifo");
+ return -EACCES;
+ }
+
+ if (sysctl_protected_regular >= 2 && S_ISREG(inode->i_mode)) {
+ audit_log_path_denied(AUDIT_ANOM_CREAT,
+ "sticky_create_regular");
+ return -EACCES;
+ }
+ }
+
return 0;
}
@@ -1712,17 +1738,26 @@ static struct dentry *lookup_slow(const struct qstr *name,
}
static inline int may_lookup(struct mnt_idmap *idmap,
- struct nameidata *nd)
+ struct nameidata *restrict nd)
{
- if (nd->flags & LOOKUP_RCU) {
- int err = inode_permission(idmap, nd->inode, MAY_EXEC|MAY_NOT_BLOCK);
- if (!err) // success, keep going
- return 0;
- if (!try_to_unlazy(nd))
- return -ECHILD; // redo it all non-lazy
- if (err != -ECHILD) // hard error
- return err;
- }
+ int err, mask;
+
+ mask = nd->flags & LOOKUP_RCU ? MAY_NOT_BLOCK : 0;
+ err = inode_permission(idmap, nd->inode, mask | MAY_EXEC);
+ if (likely(!err))
+ return 0;
+
+ // If we failed, and we weren't in LOOKUP_RCU, it's final
+ if (!(nd->flags & LOOKUP_RCU))
+ return err;
+
+ // Drop out of RCU mode to make sure it wasn't transient
+ if (!try_to_unlazy(nd))
+ return -ECHILD; // redo it all non-lazy
+
+ if (err != -ECHILD) // hard error
+ return err;
+
return inode_permission(idmap, nd->inode, MAY_EXEC);
}
@@ -2163,21 +2198,39 @@ EXPORT_SYMBOL(hashlen_string);
/*
* Calculate the length and hash of the path component, and
- * return the "hash_len" as the result.
+ * return the length as the result.
*/
-static inline u64 hash_name(const void *salt, const char *name)
+static inline const char *hash_name(struct nameidata *nd,
+ const char *name,
+ unsigned long *lastword)
{
- unsigned long a = 0, b, x = 0, y = (unsigned long)salt;
+ unsigned long a, b, x, y = (unsigned long)nd->path.dentry;
unsigned long adata, bdata, mask, len;
const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
- len = 0;
- goto inside;
+ /*
+ * The first iteration is special, because it can result in
+ * '.' and '..' and has no mixing other than the final fold.
+ */
+ a = load_unaligned_zeropad(name);
+ b = a ^ REPEAT_BYTE('/');
+ if (has_zero(a, &adata, &constants) | has_zero(b, &bdata, &constants)) {
+ adata = prep_zero_mask(a, adata, &constants);
+ bdata = prep_zero_mask(b, bdata, &constants);
+ mask = create_zero_mask(adata | bdata);
+ a &= zero_bytemask(mask);
+ *lastword = a;
+ len = find_zero(mask);
+ nd->last.hash = fold_hash(a, y);
+ nd->last.len = len;
+ return name + len;
+ }
+ len = 0;
+ x = 0;
do {
HASH_MIX(x, y, a);
len += sizeof(unsigned long);
-inside:
a = load_unaligned_zeropad(name+len);
b = a ^ REPEAT_BYTE('/');
} while (!(has_zero(a, &adata, &constants) | has_zero(b, &bdata, &constants)));
@@ -2185,11 +2238,25 @@ inside:
adata = prep_zero_mask(a, adata, &constants);
bdata = prep_zero_mask(b, bdata, &constants);
mask = create_zero_mask(adata | bdata);
- x ^= a & zero_bytemask(mask);
+ a &= zero_bytemask(mask);
+ x ^= a;
+ len += find_zero(mask);
+ *lastword = 0; // Multi-word components cannot be DOT or DOTDOT
- return hashlen_create(fold_hash(x, y), len + find_zero(mask));
+ nd->last.hash = fold_hash(x, y);
+ nd->last.len = len;
+ return name + len;
}
+/*
+ * Note that the 'last' word is always zero-masked, but
+ * was loaded as a possibly big-endian word.
+ */
+#ifdef __BIG_ENDIAN
+ #define LAST_WORD_IS_DOT (0x2eul << (BITS_PER_LONG-8))
+ #define LAST_WORD_IS_DOTDOT (0x2e2eul << (BITS_PER_LONG-16))
+#endif
+
#else /* !CONFIG_DCACHE_WORD_ACCESS: Slow, byte-at-a-time version */
/* Return the hash of a string of known length */
@@ -2222,22 +2289,35 @@ EXPORT_SYMBOL(hashlen_string);
* We know there's a real path component here of at least
* one character.
*/
-static inline u64 hash_name(const void *salt, const char *name)
+static inline const char *hash_name(struct nameidata *nd, const char *name, unsigned long *lastword)
{
- unsigned long hash = init_name_hash(salt);
- unsigned long len = 0, c;
+ unsigned long hash = init_name_hash(nd->path.dentry);
+ unsigned long len = 0, c, last = 0;
c = (unsigned char)*name;
do {
+ last = (last << 8) + c;
len++;
hash = partial_name_hash(c, hash);
c = (unsigned char)name[len];
} while (c && c != '/');
- return hashlen_create(end_name_hash(hash), len);
+
+ // This is reliable for DOT or DOTDOT, since the component
+ // cannot contain NUL characters - top bits being zero means
+ // we cannot have had any other pathnames.
+ *lastword = last;
+ nd->last.hash = end_name_hash(hash);
+ nd->last.len = len;
+ return name + len;
}
#endif
+#ifndef LAST_WORD_IS_DOT
+ #define LAST_WORD_IS_DOT 0x2e
+ #define LAST_WORD_IS_DOTDOT 0x2e2e
+#endif
+
/*
* Name resolution.
* This is the basic name resolution function, turning a pathname into
@@ -2266,45 +2346,38 @@ static int link_path_walk(const char *name, struct nameidata *nd)
for(;;) {
struct mnt_idmap *idmap;
const char *link;
- u64 hash_len;
- int type;
+ unsigned long lastword;
idmap = mnt_idmap(nd->path.mnt);
err = may_lookup(idmap, nd);
if (err)
return err;
- hash_len = hash_name(nd->path.dentry, name);
+ nd->last.name = name;
+ name = hash_name(nd, name, &lastword);
- type = LAST_NORM;
- if (name[0] == '.') switch (hashlen_len(hash_len)) {
- case 2:
- if (name[1] == '.') {
- type = LAST_DOTDOT;
- nd->state |= ND_JUMPED;
- }
- break;
- case 1:
- type = LAST_DOT;
- }
- if (likely(type == LAST_NORM)) {
- struct dentry *parent = nd->path.dentry;
+ switch(lastword) {
+ case LAST_WORD_IS_DOTDOT:
+ nd->last_type = LAST_DOTDOT;
+ nd->state |= ND_JUMPED;
+ break;
+
+ case LAST_WORD_IS_DOT:
+ nd->last_type = LAST_DOT;
+ break;
+
+ default:
+ nd->last_type = LAST_NORM;
nd->state &= ~ND_JUMPED;
+
+ struct dentry *parent = nd->path.dentry;
if (unlikely(parent->d_flags & DCACHE_OP_HASH)) {
- struct qstr this = { { .hash_len = hash_len }, .name = name };
- err = parent->d_op->d_hash(parent, &this);
+ err = parent->d_op->d_hash(parent, &nd->last);
if (err < 0)
return err;
- hash_len = this.hash_len;
- name = this.name;
}
}
- nd->last.hash_len = hash_len;
- nd->last.name = name;
- nd->last_type = type;
-
- name += hashlen_len(hash_len);
if (!*name)
goto OK;
/*
@@ -2922,16 +2995,16 @@ int path_pts(struct path *path)
}
#endif
-int user_path_at_empty(int dfd, const char __user *name, unsigned flags,
- struct path *path, int *empty)
+int user_path_at(int dfd, const char __user *name, unsigned flags,
+ struct path *path)
{
- struct filename *filename = getname_flags(name, flags, empty);
+ struct filename *filename = getname_flags(name, flags);
int ret = filename_lookup(dfd, filename, flags, path, NULL);
putname(filename);
return ret;
}
-EXPORT_SYMBOL(user_path_at_empty);
+EXPORT_SYMBOL(user_path_at);
int __check_sticky(struct mnt_idmap *idmap, struct inode *dir,
struct inode *inode)
@@ -3572,8 +3645,12 @@ static const char *open_last_lookups(struct nameidata *nd,
else
inode_lock_shared(dir->d_inode);
dentry = lookup_open(nd, file, op, got_write);
- if (!IS_ERR(dentry) && (file->f_mode & FMODE_CREATED))
- fsnotify_create(dir->d_inode, dentry);
+ if (!IS_ERR(dentry)) {
+ if (file->f_mode & FMODE_CREATED)
+ fsnotify_create(dir->d_inode, dentry);
+ if (file->f_mode & FMODE_OPENED)
+ fsnotify_open(file);
+ }
if (open_flag & O_CREAT)
inode_unlock(dir->d_inode);
else
@@ -3700,6 +3777,8 @@ int vfs_tmpfile(struct mnt_idmap *idmap,
mode = vfs_prepare_mode(idmap, dir, mode, mode, mode);
error = dir->i_op->tmpfile(idmap, dir, file, mode);
dput(child);
+ if (file->f_mode & FMODE_OPENED)
+ fsnotify_open(file);
if (error)
return error;
/* Don't check for other permissions, the inode was just created */
diff --git a/fs/namespace.c b/fs/namespace.c
index 5a51315c6678..221db9de4729 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -70,7 +70,8 @@ static DEFINE_IDA(mnt_id_ida);
static DEFINE_IDA(mnt_group_ida);
/* Don't allow confusion with old 32bit mount ID */
-static atomic64_t mnt_id_ctr = ATOMIC64_INIT(1ULL << 32);
+#define MNT_UNIQUE_ID_OFFSET (1ULL << 32)
+static atomic64_t mnt_id_ctr = ATOMIC64_INIT(MNT_UNIQUE_ID_OFFSET);
static struct hlist_head *mount_hashtable __ro_after_init;
static struct hlist_head *mountpoint_hashtable __ro_after_init;
@@ -78,6 +79,8 @@ static struct kmem_cache *mnt_cache __ro_after_init;
static DECLARE_RWSEM(namespace_sem);
static HLIST_HEAD(unmounted); /* protected by namespace_sem */
static LIST_HEAD(ex_mountpoints); /* protected by namespace_sem */
+static DEFINE_RWLOCK(mnt_ns_tree_lock);
+static struct rb_root mnt_ns_tree = RB_ROOT; /* protected by mnt_ns_tree_lock */
struct mount_kattr {
unsigned int attr_set;
@@ -103,6 +106,109 @@ EXPORT_SYMBOL_GPL(fs_kobj);
*/
__cacheline_aligned_in_smp DEFINE_SEQLOCK(mount_lock);
+static int mnt_ns_cmp(u64 seq, const struct mnt_namespace *ns)
+{
+ u64 seq_b = ns->seq;
+
+ if (seq < seq_b)
+ return -1;
+ if (seq > seq_b)
+ return 1;
+ return 0;
+}
+
+static inline struct mnt_namespace *node_to_mnt_ns(const struct rb_node *node)
+{
+ if (!node)
+ return NULL;
+ return rb_entry(node, struct mnt_namespace, mnt_ns_tree_node);
+}
+
+static bool mnt_ns_less(struct rb_node *a, const struct rb_node *b)
+{
+ struct mnt_namespace *ns_a = node_to_mnt_ns(a);
+ struct mnt_namespace *ns_b = node_to_mnt_ns(b);
+ u64 seq_a = ns_a->seq;
+
+ return mnt_ns_cmp(seq_a, ns_b) < 0;
+}
+
+static void mnt_ns_tree_add(struct mnt_namespace *ns)
+{
+ guard(write_lock)(&mnt_ns_tree_lock);
+ rb_add(&ns->mnt_ns_tree_node, &mnt_ns_tree, mnt_ns_less);
+}
+
+static void mnt_ns_release(struct mnt_namespace *ns)
+{
+ lockdep_assert_not_held(&mnt_ns_tree_lock);
+
+ /* keep alive for {list,stat}mount() */
+ if (refcount_dec_and_test(&ns->passive)) {
+ put_user_ns(ns->user_ns);
+ kfree(ns);
+ }
+}
+DEFINE_FREE(mnt_ns_release, struct mnt_namespace *, if (_T) mnt_ns_release(_T))
+
+static void mnt_ns_tree_remove(struct mnt_namespace *ns)
+{
+ /* remove from global mount namespace list */
+ if (!is_anon_ns(ns)) {
+ guard(write_lock)(&mnt_ns_tree_lock);
+ rb_erase(&ns->mnt_ns_tree_node, &mnt_ns_tree);
+ }
+
+ mnt_ns_release(ns);
+}
+
+/*
+ * Returns the mount namespace which either has the specified id, or has the
+ * next smallest id afer the specified one.
+ */
+static struct mnt_namespace *mnt_ns_find_id_at(u64 mnt_ns_id)
+{
+ struct rb_node *node = mnt_ns_tree.rb_node;
+ struct mnt_namespace *ret = NULL;
+
+ lockdep_assert_held(&mnt_ns_tree_lock);
+
+ while (node) {
+ struct mnt_namespace *n = node_to_mnt_ns(node);
+
+ if (mnt_ns_id <= n->seq) {
+ ret = node_to_mnt_ns(node);
+ if (mnt_ns_id == n->seq)
+ break;
+ node = node->rb_left;
+ } else {
+ node = node->rb_right;
+ }
+ }
+ return ret;
+}
+
+/*
+ * Lookup a mount namespace by id and take a passive reference count. Taking a
+ * passive reference means the mount namespace can be emptied if e.g., the last
+ * task holding an active reference exits. To access the mounts of the
+ * namespace the @namespace_sem must first be acquired. If the namespace has
+ * already shut down before acquiring @namespace_sem, {list,stat}mount() will
+ * see that the mount rbtree of the namespace is empty.
+ */
+static struct mnt_namespace *lookup_mnt_ns(u64 mnt_ns_id)
+{
+ struct mnt_namespace *ns;
+
+ guard(read_lock)(&mnt_ns_tree_lock);
+ ns = mnt_ns_find_id_at(mnt_ns_id);
+ if (!ns || ns->seq != mnt_ns_id)
+ return NULL;
+
+ refcount_inc(&ns->passive);
+ return ns;
+}
+
static inline void lock_mount_hash(void)
{
write_seqlock(&mount_lock);
@@ -1448,6 +1554,30 @@ static struct mount *mnt_find_id_at(struct mnt_namespace *ns, u64 mnt_id)
return ret;
}
+/*
+ * Returns the mount which either has the specified mnt_id, or has the next
+ * greater id before the specified one.
+ */
+static struct mount *mnt_find_id_at_reverse(struct mnt_namespace *ns, u64 mnt_id)
+{
+ struct rb_node *node = ns->mounts.rb_node;
+ struct mount *ret = NULL;
+
+ while (node) {
+ struct mount *m = node_to_mount(node);
+
+ if (mnt_id >= m->mnt_id_unique) {
+ ret = node_to_mount(node);
+ if (mnt_id == m->mnt_id_unique)
+ break;
+ node = node->rb_right;
+ } else {
+ node = node->rb_left;
+ }
+ }
+ return ret;
+}
+
#ifdef CONFIG_PROC_FS
/* iterator; we want it to have access to namespace_sem, thus here... */
@@ -1846,19 +1976,6 @@ bool may_mount(void)
return ns_capable(current->nsproxy->mnt_ns->user_ns, CAP_SYS_ADMIN);
}
-/**
- * path_mounted - check whether path is mounted
- * @path: path to check
- *
- * Determine whether @path refers to the root of a mount.
- *
- * Return: true if @path is the root of a mount, false if not.
- */
-static inline bool path_mounted(const struct path *path)
-{
- return path->mnt->mnt_root == path->dentry;
-}
-
static void warn_mandlock(void)
{
pr_warn_once("=======================================================\n"
@@ -1966,69 +2083,72 @@ static bool mnt_ns_loop(struct dentry *dentry)
return current->nsproxy->mnt_ns->seq >= mnt_ns->seq;
}
-struct mount *copy_tree(struct mount *mnt, struct dentry *dentry,
+struct mount *copy_tree(struct mount *src_root, struct dentry *dentry,
int flag)
{
- struct mount *res, *p, *q, *r, *parent;
+ struct mount *res, *src_parent, *src_root_child, *src_mnt,
+ *dst_parent, *dst_mnt;
- if (!(flag & CL_COPY_UNBINDABLE) && IS_MNT_UNBINDABLE(mnt))
+ if (!(flag & CL_COPY_UNBINDABLE) && IS_MNT_UNBINDABLE(src_root))
return ERR_PTR(-EINVAL);
if (!(flag & CL_COPY_MNT_NS_FILE) && is_mnt_ns_file(dentry))
return ERR_PTR(-EINVAL);
- res = q = clone_mnt(mnt, dentry, flag);
- if (IS_ERR(q))
- return q;
+ res = dst_mnt = clone_mnt(src_root, dentry, flag);
+ if (IS_ERR(dst_mnt))
+ return dst_mnt;
- q->mnt_mountpoint = mnt->mnt_mountpoint;
+ src_parent = src_root;
+ dst_mnt->mnt_mountpoint = src_root->mnt_mountpoint;
- p = mnt;
- list_for_each_entry(r, &mnt->mnt_mounts, mnt_child) {
- struct mount *s;
- if (!is_subdir(r->mnt_mountpoint, dentry))
+ list_for_each_entry(src_root_child, &src_root->mnt_mounts, mnt_child) {
+ if (!is_subdir(src_root_child->mnt_mountpoint, dentry))
continue;
- for (s = r; s; s = next_mnt(s, r)) {
+ for (src_mnt = src_root_child; src_mnt;
+ src_mnt = next_mnt(src_mnt, src_root_child)) {
if (!(flag & CL_COPY_UNBINDABLE) &&
- IS_MNT_UNBINDABLE(s)) {
- if (s->mnt.mnt_flags & MNT_LOCKED) {
+ IS_MNT_UNBINDABLE(src_mnt)) {
+ if (src_mnt->mnt.mnt_flags & MNT_LOCKED) {
/* Both unbindable and locked. */
- q = ERR_PTR(-EPERM);
+ dst_mnt = ERR_PTR(-EPERM);
goto out;
} else {
- s = skip_mnt_tree(s);
+ src_mnt = skip_mnt_tree(src_mnt);
continue;
}
}
if (!(flag & CL_COPY_MNT_NS_FILE) &&
- is_mnt_ns_file(s->mnt.mnt_root)) {
- s = skip_mnt_tree(s);
+ is_mnt_ns_file(src_mnt->mnt.mnt_root)) {
+ src_mnt = skip_mnt_tree(src_mnt);
continue;
}
- while (p != s->mnt_parent) {
- p = p->mnt_parent;
- q = q->mnt_parent;
+ while (src_parent != src_mnt->mnt_parent) {
+ src_parent = src_parent->mnt_parent;
+ dst_mnt = dst_mnt->mnt_parent;
}
- p = s;
- parent = q;
- q = clone_mnt(p, p->mnt.mnt_root, flag);
- if (IS_ERR(q))
+
+ src_parent = src_mnt;
+ dst_parent = dst_mnt;
+ dst_mnt = clone_mnt(src_mnt, src_mnt->mnt.mnt_root, flag);
+ if (IS_ERR(dst_mnt))
goto out;
lock_mount_hash();
- list_add_tail(&q->mnt_list, &res->mnt_list);
- attach_mnt(q, parent, p->mnt_mp, false);
+ list_add_tail(&dst_mnt->mnt_list, &res->mnt_list);
+ attach_mnt(dst_mnt, dst_parent, src_parent->mnt_mp, false);
unlock_mount_hash();
}
}
return res;
+
out:
if (res) {
lock_mount_hash();
umount_tree(res, UMOUNT_SYNC);
unlock_mount_hash();
}
- return q;
+ return dst_mnt;
}
/* Caller should check returned pointer for errors */
@@ -2078,7 +2198,7 @@ void drop_collected_mounts(struct vfsmount *mnt)
namespace_unlock();
}
-static bool has_locked_children(struct mount *mnt, struct dentry *dentry)
+bool has_locked_children(struct mount *mnt, struct dentry *dentry)
{
struct mount *child;
@@ -3709,8 +3829,7 @@ static void free_mnt_ns(struct mnt_namespace *ns)
if (!is_anon_ns(ns))
ns_free_inum(&ns->ns);
dec_mnt_namespaces(ns->ucounts);
- put_user_ns(ns->user_ns);
- kfree(ns);
+ mnt_ns_tree_remove(ns);
}
/*
@@ -3749,7 +3868,9 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns, bool a
if (!anon)
new_ns->seq = atomic64_add_return(1, &mnt_ns_seq);
refcount_set(&new_ns->ns.count, 1);
+ refcount_set(&new_ns->passive, 1);
new_ns->mounts = RB_ROOT;
+ RB_CLEAR_NODE(&new_ns->mnt_ns_tree_node);
init_waitqueue_head(&new_ns->poll);
new_ns->user_ns = get_user_ns(user_ns);
new_ns->ucounts = ucounts;
@@ -3826,6 +3947,7 @@ struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
while (p->mnt.mnt_root != q->mnt.mnt_root)
p = next_mnt(skip_mnt_tree(p), old);
}
+ mnt_ns_tree_add(new_ns);
namespace_unlock();
if (rootmnt)
@@ -4843,6 +4965,40 @@ static int statmount_fs_type(struct kstatmount *s, struct seq_file *seq)
return 0;
}
+static void statmount_mnt_ns_id(struct kstatmount *s, struct mnt_namespace *ns)
+{
+ s->sm.mask |= STATMOUNT_MNT_NS_ID;
+ s->sm.mnt_ns_id = ns->seq;
+}
+
+static int statmount_mnt_opts(struct kstatmount *s, struct seq_file *seq)
+{
+ struct vfsmount *mnt = s->mnt;
+ struct super_block *sb = mnt->mnt_sb;
+ int err;
+
+ if (sb->s_op->show_options) {
+ size_t start = seq->count;
+
+ err = sb->s_op->show_options(seq, mnt->mnt_root);
+ if (err)
+ return err;
+
+ if (unlikely(seq_has_overflowed(seq)))
+ return -EAGAIN;
+
+ if (seq->count == start)
+ return 0;
+
+ /* skip leading comma */
+ memmove(seq->buf + start, seq->buf + start + 1,
+ seq->count - start - 1);
+ seq->count--;
+ }
+
+ return 0;
+}
+
static int statmount_string(struct kstatmount *s, u64 flag)
{
int ret;
@@ -4863,6 +5019,10 @@ static int statmount_string(struct kstatmount *s, u64 flag)
sm->mnt_point = seq->count;
ret = statmount_mnt_point(s, seq);
break;
+ case STATMOUNT_MNT_OPTS:
+ sm->mnt_opts = seq->count;
+ ret = statmount_mnt_opts(s, seq);
+ break;
default:
WARN_ON_ONCE(true);
return -EINVAL;
@@ -4903,23 +5063,84 @@ static int copy_statmount_to_user(struct kstatmount *s)
return 0;
}
-static int do_statmount(struct kstatmount *s)
+static struct mount *listmnt_next(struct mount *curr, bool reverse)
{
- struct mount *m = real_mount(s->mnt);
+ struct rb_node *node;
+
+ if (reverse)
+ node = rb_prev(&curr->mnt_node);
+ else
+ node = rb_next(&curr->mnt_node);
+
+ return node_to_mount(node);
+}
+
+static int grab_requested_root(struct mnt_namespace *ns, struct path *root)
+{
+ struct mount *first, *child;
+
+ rwsem_assert_held(&namespace_sem);
+
+ /* We're looking at our own ns, just use get_fs_root. */
+ if (ns == current->nsproxy->mnt_ns) {
+ get_fs_root(current->fs, root);
+ return 0;
+ }
+
+ /*
+ * We have to find the first mount in our ns and use that, however it
+ * may not exist, so handle that properly.
+ */
+ if (RB_EMPTY_ROOT(&ns->mounts))
+ return -ENOENT;
+
+ first = child = ns->root;
+ for (;;) {
+ child = listmnt_next(child, false);
+ if (!child)
+ return -ENOENT;
+ if (child->mnt_parent == first)
+ break;
+ }
+
+ root->mnt = mntget(&child->mnt);
+ root->dentry = dget(root->mnt->mnt_root);
+ return 0;
+}
+
+static int do_statmount(struct kstatmount *s, u64 mnt_id, u64 mnt_ns_id,
+ struct mnt_namespace *ns)
+{
+ struct path root __free(path_put) = {};
+ struct mount *m;
int err;
+ /* Has the namespace already been emptied? */
+ if (mnt_ns_id && RB_EMPTY_ROOT(&ns->mounts))
+ return -ENOENT;
+
+ s->mnt = lookup_mnt_in_ns(mnt_id, ns);
+ if (!s->mnt)
+ return -ENOENT;
+
+ err = grab_requested_root(ns, &root);
+ if (err)
+ return err;
+
/*
* Don't trigger audit denials. We just want to determine what
* mounts to show users.
*/
- if (!is_path_reachable(m, m->mnt.mnt_root, &s->root) &&
- !ns_capable_noaudit(&init_user_ns, CAP_SYS_ADMIN))
+ m = real_mount(s->mnt);
+ if (!is_path_reachable(m, m->mnt.mnt_root, &root) &&
+ !ns_capable_noaudit(ns->user_ns, CAP_SYS_ADMIN))
return -EPERM;
err = security_sb_statfs(s->mnt->mnt_root);
if (err)
return err;
+ s->root = root;
if (s->mask & STATMOUNT_SB_BASIC)
statmount_sb_basic(s);
@@ -4938,6 +5159,12 @@ static int do_statmount(struct kstatmount *s)
if (!err && s->mask & STATMOUNT_MNT_POINT)
err = statmount_string(s, STATMOUNT_MNT_POINT);
+ if (!err && s->mask & STATMOUNT_MNT_OPTS)
+ err = statmount_string(s, STATMOUNT_MNT_OPTS);
+
+ if (!err && s->mask & STATMOUNT_MNT_NS_ID)
+ statmount_mnt_ns_id(s, ns);
+
if (err)
return err;
@@ -4955,6 +5182,9 @@ static inline bool retry_statmount(const long ret, size_t *seq_size)
return true;
}
+#define STATMOUNT_STRING_REQ (STATMOUNT_MNT_ROOT | STATMOUNT_MNT_POINT | \
+ STATMOUNT_FS_TYPE | STATMOUNT_MNT_OPTS)
+
static int prepare_kstatmount(struct kstatmount *ks, struct mnt_id_req *kreq,
struct statmount __user *buf, size_t bufsize,
size_t seq_size)
@@ -4966,10 +5196,18 @@ static int prepare_kstatmount(struct kstatmount *ks, struct mnt_id_req *kreq,
ks->mask = kreq->param;
ks->buf = buf;
ks->bufsize = bufsize;
- ks->seq.size = seq_size;
- ks->seq.buf = kvmalloc(seq_size, GFP_KERNEL_ACCOUNT);
- if (!ks->seq.buf)
- return -ENOMEM;
+
+ if (ks->mask & STATMOUNT_STRING_REQ) {
+ if (bufsize == sizeof(ks->sm))
+ return -EOVERFLOW;
+
+ ks->seq.buf = kvmalloc(seq_size, GFP_KERNEL_ACCOUNT);
+ if (!ks->seq.buf)
+ return -ENOMEM;
+
+ ks->seq.size = seq_size;
+ }
+
return 0;
}
@@ -4979,7 +5217,7 @@ static int copy_mnt_id_req(const struct mnt_id_req __user *req,
int ret;
size_t usize;
- BUILD_BUG_ON(sizeof(struct mnt_id_req) != MNT_ID_REQ_SIZE_VER0);
+ BUILD_BUG_ON(sizeof(struct mnt_id_req) != MNT_ID_REQ_SIZE_VER1);
ret = get_user(usize, &req->size);
if (ret)
@@ -4994,16 +5232,32 @@ static int copy_mnt_id_req(const struct mnt_id_req __user *req,
return ret;
if (kreq->spare != 0)
return -EINVAL;
+ /* The first valid unique mount id is MNT_UNIQUE_ID_OFFSET + 1. */
+ if (kreq->mnt_id <= MNT_UNIQUE_ID_OFFSET)
+ return -EINVAL;
return 0;
}
+/*
+ * If the user requested a specific mount namespace id, look that up and return
+ * that, or if not simply grab a passive reference on our mount namespace and
+ * return that.
+ */
+static struct mnt_namespace *grab_requested_mnt_ns(u64 mnt_ns_id)
+{
+ if (mnt_ns_id)
+ return lookup_mnt_ns(mnt_ns_id);
+ refcount_inc(&current->nsproxy->mnt_ns->passive);
+ return current->nsproxy->mnt_ns;
+}
+
SYSCALL_DEFINE4(statmount, const struct mnt_id_req __user *, req,
struct statmount __user *, buf, size_t, bufsize,
unsigned int, flags)
{
- struct vfsmount *mnt;
+ struct mnt_namespace *ns __free(mnt_ns_release) = NULL;
+ struct kstatmount *ks __free(kfree) = NULL;
struct mnt_id_req kreq;
- struct kstatmount ks;
/* We currently support retrieval of 3 strings. */
size_t seq_size = 3 * PATH_MAX;
int ret;
@@ -5015,64 +5269,88 @@ SYSCALL_DEFINE4(statmount, const struct mnt_id_req __user *, req,
if (ret)
return ret;
+ ns = grab_requested_mnt_ns(kreq.mnt_ns_id);
+ if (!ns)
+ return -ENOENT;
+
+ if (kreq.mnt_ns_id && (ns != current->nsproxy->mnt_ns) &&
+ !ns_capable_noaudit(ns->user_ns, CAP_SYS_ADMIN))
+ return -ENOENT;
+
+ ks = kmalloc(sizeof(*ks), GFP_KERNEL_ACCOUNT);
+ if (!ks)
+ return -ENOMEM;
+
retry:
- ret = prepare_kstatmount(&ks, &kreq, buf, bufsize, seq_size);
+ ret = prepare_kstatmount(ks, &kreq, buf, bufsize, seq_size);
if (ret)
return ret;
- down_read(&namespace_sem);
- mnt = lookup_mnt_in_ns(kreq.mnt_id, current->nsproxy->mnt_ns);
- if (!mnt) {
- up_read(&namespace_sem);
- kvfree(ks.seq.buf);
- return -ENOENT;
- }
-
- ks.mnt = mnt;
- get_fs_root(current->fs, &ks.root);
- ret = do_statmount(&ks);
- path_put(&ks.root);
- up_read(&namespace_sem);
+ scoped_guard(rwsem_read, &namespace_sem)
+ ret = do_statmount(ks, kreq.mnt_id, kreq.mnt_ns_id, ns);
if (!ret)
- ret = copy_statmount_to_user(&ks);
- kvfree(ks.seq.buf);
+ ret = copy_statmount_to_user(ks);
+ kvfree(ks->seq.buf);
if (retry_statmount(ret, &seq_size))
goto retry;
return ret;
}
-static struct mount *listmnt_next(struct mount *curr)
+static ssize_t do_listmount(struct mnt_namespace *ns, u64 mnt_parent_id,
+ u64 last_mnt_id, u64 *mnt_ids, size_t nr_mnt_ids,
+ bool reverse)
{
- return node_to_mount(rb_next(&curr->mnt_node));
-}
-
-static ssize_t do_listmount(struct mount *first, struct path *orig,
- u64 mnt_parent_id, u64 __user *mnt_ids,
- size_t nr_mnt_ids, const struct path *root)
-{
- struct mount *r;
+ struct path root __free(path_put) = {};
+ struct path orig;
+ struct mount *r, *first;
ssize_t ret;
+ rwsem_assert_held(&namespace_sem);
+
+ ret = grab_requested_root(ns, &root);
+ if (ret)
+ return ret;
+
+ if (mnt_parent_id == LSMT_ROOT) {
+ orig = root;
+ } else {
+ orig.mnt = lookup_mnt_in_ns(mnt_parent_id, ns);
+ if (!orig.mnt)
+ return -ENOENT;
+ orig.dentry = orig.mnt->mnt_root;
+ }
+
/*
* Don't trigger audit denials. We just want to determine what
* mounts to show users.
*/
- if (!is_path_reachable(real_mount(orig->mnt), orig->dentry, root) &&
- !ns_capable_noaudit(&init_user_ns, CAP_SYS_ADMIN))
+ if (!is_path_reachable(real_mount(orig.mnt), orig.dentry, &root) &&
+ !ns_capable_noaudit(ns->user_ns, CAP_SYS_ADMIN))
return -EPERM;
- ret = security_sb_statfs(orig->dentry);
+ ret = security_sb_statfs(orig.dentry);
if (ret)
return ret;
- for (ret = 0, r = first; r && nr_mnt_ids; r = listmnt_next(r)) {
+ if (!last_mnt_id) {
+ if (reverse)
+ first = node_to_mount(rb_last(&ns->mounts));
+ else
+ first = node_to_mount(rb_first(&ns->mounts));
+ } else {
+ if (reverse)
+ first = mnt_find_id_at_reverse(ns, last_mnt_id - 1);
+ else
+ first = mnt_find_id_at(ns, last_mnt_id + 1);
+ }
+
+ for (ret = 0, r = first; r && nr_mnt_ids; r = listmnt_next(r, reverse)) {
if (r->mnt_id_unique == mnt_parent_id)
continue;
- if (!is_path_reachable(r, r->mnt.mnt_root, orig))
+ if (!is_path_reachable(r, r->mnt.mnt_root, &orig))
continue;
- if (put_user(r->mnt_id_unique, mnt_ids))
- return -EFAULT;
+ *mnt_ids = r->mnt_id_unique;
mnt_ids++;
nr_mnt_ids--;
ret++;
@@ -5080,22 +5358,26 @@ static ssize_t do_listmount(struct mount *first, struct path *orig,
return ret;
}
-SYSCALL_DEFINE4(listmount, const struct mnt_id_req __user *, req, u64 __user *,
- mnt_ids, size_t, nr_mnt_ids, unsigned int, flags)
+SYSCALL_DEFINE4(listmount, const struct mnt_id_req __user *, req,
+ u64 __user *, mnt_ids, size_t, nr_mnt_ids, unsigned int, flags)
{
- struct mnt_namespace *ns = current->nsproxy->mnt_ns;
+ u64 *kmnt_ids __free(kvfree) = NULL;
+ const size_t maxcount = 1000000;
+ struct mnt_namespace *ns __free(mnt_ns_release) = NULL;
struct mnt_id_req kreq;
- struct mount *first;
- struct path root, orig;
- u64 mnt_parent_id, last_mnt_id;
- const size_t maxcount = (size_t)-1 >> 3;
+ u64 last_mnt_id;
ssize_t ret;
- if (flags)
+ if (flags & ~LISTMOUNT_REVERSE)
return -EINVAL;
+ /*
+ * If the mount namespace really has more than 1 million mounts the
+ * caller must iterate over the mount namespace (and reconsider their
+ * system design...).
+ */
if (unlikely(nr_mnt_ids > maxcount))
- return -EFAULT;
+ return -EOVERFLOW;
if (!access_ok(mnt_ids, nr_mnt_ids * sizeof(*mnt_ids)))
return -EFAULT;
@@ -5103,33 +5385,37 @@ SYSCALL_DEFINE4(listmount, const struct mnt_id_req __user *, req, u64 __user *,
ret = copy_mnt_id_req(req, &kreq);
if (ret)
return ret;
- mnt_parent_id = kreq.mnt_id;
+
last_mnt_id = kreq.param;
+ /* The first valid unique mount id is MNT_UNIQUE_ID_OFFSET + 1. */
+ if (last_mnt_id != 0 && last_mnt_id <= MNT_UNIQUE_ID_OFFSET)
+ return -EINVAL;
- down_read(&namespace_sem);
- get_fs_root(current->fs, &root);
- if (mnt_parent_id == LSMT_ROOT) {
- orig = root;
- } else {
- ret = -ENOENT;
- orig.mnt = lookup_mnt_in_ns(mnt_parent_id, ns);
- if (!orig.mnt)
- goto err;
- orig.dentry = orig.mnt->mnt_root;
- }
- if (!last_mnt_id)
- first = node_to_mount(rb_first(&ns->mounts));
- else
- first = mnt_find_id_at(ns, last_mnt_id + 1);
+ kmnt_ids = kvmalloc_array(nr_mnt_ids, sizeof(*kmnt_ids),
+ GFP_KERNEL_ACCOUNT);
+ if (!kmnt_ids)
+ return -ENOMEM;
+
+ ns = grab_requested_mnt_ns(kreq.mnt_ns_id);
+ if (!ns)
+ return -ENOENT;
+
+ if (kreq.mnt_ns_id && (ns != current->nsproxy->mnt_ns) &&
+ !ns_capable_noaudit(ns->user_ns, CAP_SYS_ADMIN))
+ return -ENOENT;
+
+ scoped_guard(rwsem_read, &namespace_sem)
+ ret = do_listmount(ns, kreq.mnt_id, last_mnt_id, kmnt_ids,
+ nr_mnt_ids, (flags & LISTMOUNT_REVERSE));
+ if (ret <= 0)
+ return ret;
+
+ if (copy_to_user(mnt_ids, kmnt_ids, ret * sizeof(*mnt_ids)))
+ return -EFAULT;
- ret = do_listmount(first, &orig, mnt_parent_id, mnt_ids, nr_mnt_ids, &root);
-err:
- path_put(&root);
- up_read(&namespace_sem);
return ret;
}
-
static void __init init_mount_tree(void)
{
struct vfsmount *mnt;
@@ -5157,6 +5443,8 @@ static void __init init_mount_tree(void)
set_fs_pwd(current->fs, &root);
set_fs_root(current->fs, &root);
+
+ mnt_ns_tree_add(ns);
}
void __init mnt_init(void)
diff --git a/fs/netfs/buffered_read.c b/fs/netfs/buffered_read.c
index a6bb03bea920..4c0401dbbfcf 100644
--- a/fs/netfs/buffered_read.c
+++ b/fs/netfs/buffered_read.c
@@ -117,7 +117,7 @@ void netfs_rreq_unlock_folios(struct netfs_io_request *rreq)
if (!test_bit(NETFS_RREQ_DONT_UNLOCK_FOLIOS, &rreq->flags)) {
if (folio->index == rreq->no_unlock_folio &&
test_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags))
- _debug("no unlock");
+ kdebug("no unlock");
else
folio_unlock(folio);
}
@@ -204,7 +204,7 @@ void netfs_readahead(struct readahead_control *ractl)
struct netfs_inode *ctx = netfs_inode(ractl->mapping->host);
int ret;
- _enter("%lx,%x", readahead_index(ractl), readahead_count(ractl));
+ kenter("%lx,%x", readahead_index(ractl), readahead_count(ractl));
if (readahead_count(ractl) == 0)
return;
@@ -268,7 +268,7 @@ int netfs_read_folio(struct file *file, struct folio *folio)
struct folio *sink = NULL;
int ret;
- _enter("%lx", folio->index);
+ kenter("%lx", folio->index);
rreq = netfs_alloc_request(mapping, file,
folio_file_pos(folio), folio_size(folio),
@@ -508,7 +508,7 @@ retry:
have_folio:
*_folio = folio;
- _leave(" = 0");
+ kleave(" = 0");
return 0;
error_put:
@@ -518,7 +518,7 @@ error:
folio_unlock(folio);
folio_put(folio);
}
- _leave(" = %d", ret);
+ kleave(" = %d", ret);
return ret;
}
EXPORT_SYMBOL(netfs_write_begin);
@@ -536,7 +536,7 @@ int netfs_prefetch_for_write(struct file *file, struct folio *folio,
size_t flen = folio_size(folio);
int ret;
- _enter("%zx @%llx", flen, start);
+ kenter("%zx @%llx", flen, start);
ret = -ENOMEM;
@@ -567,7 +567,7 @@ int netfs_prefetch_for_write(struct file *file, struct folio *folio,
error_put:
netfs_put_request(rreq, false, netfs_rreq_trace_put_discard);
error:
- _leave(" = %d", ret);
+ kleave(" = %d", ret);
return ret;
}
diff --git a/fs/netfs/buffered_write.c b/fs/netfs/buffered_write.c
index 1121601536d1..ecbc99ec7d36 100644
--- a/fs/netfs/buffered_write.c
+++ b/fs/netfs/buffered_write.c
@@ -56,7 +56,7 @@ static enum netfs_how_to_modify netfs_how_to_modify(struct netfs_inode *ctx,
struct netfs_group *group = netfs_folio_group(folio);
loff_t pos = folio_file_pos(folio);
- _enter("");
+ kenter("");
if (group != netfs_group && group != NETFS_FOLIO_COPY_TO_CACHE)
return NETFS_FLUSH_CONTENT;
@@ -181,7 +181,7 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
struct folio *folio, *writethrough = NULL;
enum netfs_how_to_modify howto;
enum netfs_folio_trace trace;
- unsigned int bdp_flags = (iocb->ki_flags & IOCB_SYNC) ? 0: BDP_ASYNC;
+ unsigned int bdp_flags = (iocb->ki_flags & IOCB_NOWAIT) ? BDP_ASYNC : 0;
ssize_t written = 0, ret, ret2;
loff_t i_size, pos = iocb->ki_pos, from, to;
size_t max_chunk = PAGE_SIZE << MAX_PAGECACHE_ORDER;
@@ -272,12 +272,12 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
*/
howto = netfs_how_to_modify(ctx, file, folio, netfs_group,
flen, offset, part, maybe_trouble);
- _debug("howto %u", howto);
+ kdebug("howto %u", howto);
switch (howto) {
case NETFS_JUST_PREFETCH:
ret = netfs_prefetch_for_write(file, folio, offset, part);
if (ret < 0) {
- _debug("prefetch = %zd", ret);
+ kdebug("prefetch = %zd", ret);
goto error_folio_unlock;
}
break;
@@ -418,7 +418,7 @@ out:
}
iocb->ki_pos += written;
- _leave(" = %zd [%zd]", written, ret);
+ kleave(" = %zd [%zd]", written, ret);
return written ? written : ret;
error_folio_unlock:
@@ -491,7 +491,7 @@ ssize_t netfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
struct netfs_inode *ictx = netfs_inode(inode);
ssize_t ret;
- _enter("%llx,%zx,%llx", iocb->ki_pos, iov_iter_count(from), i_size_read(inode));
+ kenter("%llx,%zx,%llx", iocb->ki_pos, iov_iter_count(from), i_size_read(inode));
if (!iov_iter_count(from))
return 0;
@@ -523,17 +523,23 @@ vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_gr
struct netfs_group *group;
struct folio *folio = page_folio(vmf->page);
struct file *file = vmf->vma->vm_file;
+ struct address_space *mapping = file->f_mapping;
struct inode *inode = file_inode(file);
struct netfs_inode *ictx = netfs_inode(inode);
vm_fault_t ret = VM_FAULT_RETRY;
int err;
- _enter("%lx", folio->index);
+ kenter("%lx", folio->index);
sb_start_pagefault(inode->i_sb);
if (folio_lock_killable(folio) < 0)
goto out;
+ if (folio->mapping != mapping) {
+ folio_unlock(folio);
+ ret = VM_FAULT_NOPAGE;
+ goto out;
+ }
if (folio_wait_writeback_killable(folio)) {
ret = VM_FAULT_LOCKED;
@@ -549,9 +555,9 @@ vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_gr
group = netfs_folio_group(folio);
if (group != netfs_group && group != NETFS_FOLIO_COPY_TO_CACHE) {
folio_unlock(folio);
- err = filemap_fdatawait_range(inode->i_mapping,
- folio_pos(folio),
- folio_pos(folio) + folio_size(folio));
+ err = filemap_fdatawrite_range(mapping,
+ folio_pos(folio),
+ folio_pos(folio) + folio_size(folio));
switch (err) {
case 0:
ret = VM_FAULT_RETRY;
diff --git a/fs/netfs/direct_read.c b/fs/netfs/direct_read.c
index 10a1e4da6bda..b6debac6205f 100644
--- a/fs/netfs/direct_read.c
+++ b/fs/netfs/direct_read.c
@@ -33,7 +33,7 @@ ssize_t netfs_unbuffered_read_iter_locked(struct kiocb *iocb, struct iov_iter *i
size_t orig_count = iov_iter_count(iter);
bool async = !is_sync_kiocb(iocb);
- _enter("");
+ kenter("");
if (!orig_count)
return 0; /* Don't update atime */
diff --git a/fs/netfs/direct_write.c b/fs/netfs/direct_write.c
index f516460e994e..792ef17bae21 100644
--- a/fs/netfs/direct_write.c
+++ b/fs/netfs/direct_write.c
@@ -12,7 +12,7 @@
static void netfs_cleanup_dio_write(struct netfs_io_request *wreq)
{
struct inode *inode = wreq->inode;
- unsigned long long end = wreq->start + wreq->len;
+ unsigned long long end = wreq->start + wreq->transferred;
if (!wreq->error &&
i_size_read(inode) < end) {
@@ -37,7 +37,7 @@ ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov_iter *
size_t len = iov_iter_count(iter);
bool async = !is_sync_kiocb(iocb);
- _enter("");
+ kenter("");
/* We're going to need a bounce buffer if what we transmit is going to
* be different in some way to the source buffer, e.g. because it gets
@@ -45,7 +45,7 @@ ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov_iter *
*/
// TODO
- _debug("uw %llx-%llx", start, end);
+ kdebug("uw %llx-%llx", start, end);
wreq = netfs_create_write_req(iocb->ki_filp->f_mapping, iocb->ki_filp, start,
iocb->ki_flags & IOCB_DIRECT ?
@@ -92,10 +92,11 @@ ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov_iter *
__set_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags);
if (async)
wreq->iocb = iocb;
+ wreq->len = iov_iter_count(&wreq->io_iter);
wreq->cleanup = netfs_cleanup_dio_write;
- ret = netfs_unbuffered_write(wreq, is_sync_kiocb(iocb), iov_iter_count(&wreq->io_iter));
+ ret = netfs_unbuffered_write(wreq, is_sync_kiocb(iocb), wreq->len);
if (ret < 0) {
- _debug("begin = %zd", ret);
+ kdebug("begin = %zd", ret);
goto out;
}
@@ -142,7 +143,7 @@ ssize_t netfs_unbuffered_write_iter(struct kiocb *iocb, struct iov_iter *from)
loff_t pos = iocb->ki_pos;
unsigned long long end = pos + iov_iter_count(from) - 1;
- _enter("%llx,%zx,%llx", pos, iov_iter_count(from), i_size_read(inode));
+ kenter("%llx,%zx,%llx", pos, iov_iter_count(from), i_size_read(inode));
if (!iov_iter_count(from))
return 0;
diff --git a/fs/netfs/fscache_cache.c b/fs/netfs/fscache_cache.c
index 9397ed39b0b4..288a73c3072d 100644
--- a/fs/netfs/fscache_cache.c
+++ b/fs/netfs/fscache_cache.c
@@ -237,7 +237,7 @@ int fscache_add_cache(struct fscache_cache *cache,
{
int n_accesses;
- _enter("{%s,%s}", ops->name, cache->name);
+ kenter("{%s,%s}", ops->name, cache->name);
BUG_ON(fscache_cache_state(cache) != FSCACHE_CACHE_IS_PREPARING);
@@ -257,7 +257,7 @@ int fscache_add_cache(struct fscache_cache *cache,
up_write(&fscache_addremove_sem);
pr_notice("Cache \"%s\" added (type %s)\n", cache->name, ops->name);
- _leave(" = 0 [%s]", cache->name);
+ kleave(" = 0 [%s]", cache->name);
return 0;
}
EXPORT_SYMBOL(fscache_add_cache);
diff --git a/fs/netfs/fscache_cookie.c b/fs/netfs/fscache_cookie.c
index bce2492186d0..4d1e8bf4c615 100644
--- a/fs/netfs/fscache_cookie.c
+++ b/fs/netfs/fscache_cookie.c
@@ -456,7 +456,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
{
struct fscache_cookie *cookie;
- _enter("V=%x", volume->debug_id);
+ kenter("V=%x", volume->debug_id);
if (!index_key || !index_key_len || index_key_len > 255 || aux_data_len > 255)
return NULL;
@@ -484,7 +484,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
trace_fscache_acquire(cookie);
fscache_stat(&fscache_n_acquires_ok);
- _leave(" = c=%08x", cookie->debug_id);
+ kleave(" = c=%08x", cookie->debug_id);
return cookie;
}
EXPORT_SYMBOL(__fscache_acquire_cookie);
@@ -505,7 +505,7 @@ static void fscache_perform_lookup(struct fscache_cookie *cookie)
enum fscache_access_trace trace = fscache_access_lookup_cookie_end_failed;
bool need_withdraw = false;
- _enter("");
+ kenter("");
if (!cookie->volume->cache_priv) {
fscache_create_volume(cookie->volume, true);
@@ -519,7 +519,7 @@ static void fscache_perform_lookup(struct fscache_cookie *cookie)
if (cookie->state != FSCACHE_COOKIE_STATE_FAILED)
fscache_set_cookie_state(cookie, FSCACHE_COOKIE_STATE_QUIESCENT);
need_withdraw = true;
- _leave(" [fail]");
+ kleave(" [fail]");
goto out;
}
@@ -572,7 +572,7 @@ void __fscache_use_cookie(struct fscache_cookie *cookie, bool will_modify)
bool queue = false;
int n_active;
- _enter("c=%08x", cookie->debug_id);
+ kenter("c=%08x", cookie->debug_id);
if (WARN(test_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags),
"Trying to use relinquished cookie\n"))
@@ -636,7 +636,7 @@ again:
spin_unlock(&cookie->lock);
if (queue)
fscache_queue_cookie(cookie, fscache_cookie_get_use_work);
- _leave("");
+ kleave("");
}
EXPORT_SYMBOL(__fscache_use_cookie);
@@ -702,7 +702,7 @@ static void fscache_cookie_state_machine(struct fscache_cookie *cookie)
enum fscache_cookie_state state;
bool wake = false;
- _enter("c=%x", cookie->debug_id);
+ kenter("c=%x", cookie->debug_id);
again:
spin_lock(&cookie->lock);
@@ -820,7 +820,7 @@ out:
spin_unlock(&cookie->lock);
if (wake)
wake_up_cookie_state(cookie);
- _leave("");
+ kleave("");
}
static void fscache_cookie_worker(struct work_struct *work)
@@ -867,7 +867,7 @@ static void fscache_cookie_lru_do_one(struct fscache_cookie *cookie)
set_bit(FSCACHE_COOKIE_DO_LRU_DISCARD, &cookie->flags);
spin_unlock(&cookie->lock);
fscache_stat(&fscache_n_cookies_lru_expired);
- _debug("lru c=%x", cookie->debug_id);
+ kdebug("lru c=%x", cookie->debug_id);
__fscache_withdraw_cookie(cookie);
}
@@ -971,7 +971,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, bool retire)
if (retire)
fscache_stat(&fscache_n_relinquishes_retire);
- _enter("c=%08x{%d},%d",
+ kenter("c=%08x{%d},%d",
cookie->debug_id, atomic_read(&cookie->n_active), retire);
if (WARN(test_and_set_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags),
@@ -1050,7 +1050,7 @@ void __fscache_invalidate(struct fscache_cookie *cookie,
{
bool is_caching;
- _enter("c=%x", cookie->debug_id);
+ kenter("c=%x", cookie->debug_id);
fscache_stat(&fscache_n_invalidates);
@@ -1072,7 +1072,7 @@ void __fscache_invalidate(struct fscache_cookie *cookie,
case FSCACHE_COOKIE_STATE_INVALIDATING: /* is_still_valid will catch it */
default:
spin_unlock(&cookie->lock);
- _leave(" [no %u]", cookie->state);
+ kleave(" [no %u]", cookie->state);
return;
case FSCACHE_COOKIE_STATE_LOOKING_UP:
@@ -1081,7 +1081,7 @@ void __fscache_invalidate(struct fscache_cookie *cookie,
fallthrough;
case FSCACHE_COOKIE_STATE_CREATING:
spin_unlock(&cookie->lock);
- _leave(" [look %x]", cookie->inval_counter);
+ kleave(" [look %x]", cookie->inval_counter);
return;
case FSCACHE_COOKIE_STATE_ACTIVE:
@@ -1094,7 +1094,7 @@ void __fscache_invalidate(struct fscache_cookie *cookie,
if (is_caching)
fscache_queue_cookie(cookie, fscache_cookie_get_inval_work);
- _leave(" [inv]");
+ kleave(" [inv]");
return;
}
}
diff --git a/fs/netfs/fscache_io.c b/fs/netfs/fscache_io.c
index 38637e5c9b57..bf4eaeec44fb 100644
--- a/fs/netfs/fscache_io.c
+++ b/fs/netfs/fscache_io.c
@@ -28,12 +28,12 @@ bool fscache_wait_for_operation(struct netfs_cache_resources *cres,
again:
if (!fscache_cache_is_live(cookie->volume->cache)) {
- _leave(" [broken]");
+ kleave(" [broken]");
return false;
}
state = fscache_cookie_state(cookie);
- _enter("c=%08x{%u},%x", cookie->debug_id, state, want_state);
+ kenter("c=%08x{%u},%x", cookie->debug_id, state, want_state);
switch (state) {
case FSCACHE_COOKIE_STATE_CREATING:
@@ -52,7 +52,7 @@ again:
case FSCACHE_COOKIE_STATE_DROPPED:
case FSCACHE_COOKIE_STATE_RELINQUISHING:
default:
- _leave(" [not live]");
+ kleave(" [not live]");
return false;
}
@@ -92,7 +92,7 @@ again:
spin_lock(&cookie->lock);
state = fscache_cookie_state(cookie);
- _enter("c=%08x{%u},%x", cookie->debug_id, state, want_state);
+ kenter("c=%08x{%u},%x", cookie->debug_id, state, want_state);
switch (state) {
case FSCACHE_COOKIE_STATE_LOOKING_UP:
@@ -140,7 +140,7 @@ failed:
cres->cache_priv = NULL;
cres->ops = NULL;
fscache_end_cookie_access(cookie, fscache_access_io_not_live);
- _leave(" = -ENOBUFS");
+ kleave(" = -ENOBUFS");
return -ENOBUFS;
}
@@ -224,7 +224,7 @@ void __fscache_write_to_cache(struct fscache_cookie *cookie,
if (len == 0)
goto abandon;
- _enter("%llx,%zx", start, len);
+ kenter("%llx,%zx", start, len);
wreq = kzalloc(sizeof(struct fscache_write_request), GFP_NOFS);
if (!wreq)
diff --git a/fs/netfs/fscache_main.c b/fs/netfs/fscache_main.c
index 42e98bb523e3..bf9b33d26e31 100644
--- a/fs/netfs/fscache_main.c
+++ b/fs/netfs/fscache_main.c
@@ -99,7 +99,7 @@ error_wq:
*/
void __exit fscache_exit(void)
{
- _enter("");
+ kenter("");
kmem_cache_destroy(fscache_cookie_jar);
fscache_proc_cleanup();
diff --git a/fs/netfs/fscache_volume.c b/fs/netfs/fscache_volume.c
index cdf991bdd9de..2e2a405ca9b0 100644
--- a/fs/netfs/fscache_volume.c
+++ b/fs/netfs/fscache_volume.c
@@ -27,6 +27,19 @@ struct fscache_volume *fscache_get_volume(struct fscache_volume *volume,
return volume;
}
+struct fscache_volume *fscache_try_get_volume(struct fscache_volume *volume,
+ enum fscache_volume_trace where)
+{
+ int ref;
+
+ if (!__refcount_inc_not_zero(&volume->ref, &ref))
+ return NULL;
+
+ trace_fscache_volume(volume->debug_id, ref + 1, where);
+ return volume;
+}
+EXPORT_SYMBOL(fscache_try_get_volume);
+
static void fscache_see_volume(struct fscache_volume *volume,
enum fscache_volume_trace where)
{
@@ -251,7 +264,7 @@ static struct fscache_volume *fscache_alloc_volume(const char *volume_key,
fscache_see_volume(volume, fscache_volume_new_acquire);
fscache_stat(&fscache_n_volumes);
up_write(&fscache_addremove_sem);
- _leave(" = v=%x", volume->debug_id);
+ kleave(" = v=%x", volume->debug_id);
return volume;
err_vol:
@@ -420,6 +433,7 @@ void fscache_put_volume(struct fscache_volume *volume,
fscache_free_volume(volume);
}
}
+EXPORT_SYMBOL(fscache_put_volume);
/*
* Relinquish a volume representation cookie.
@@ -452,7 +466,7 @@ void fscache_withdraw_volume(struct fscache_volume *volume)
{
int n_accesses;
- _debug("withdraw V=%x", volume->debug_id);
+ kdebug("withdraw V=%x", volume->debug_id);
/* Allow wakeups on dec-to-0 */
n_accesses = atomic_dec_return(&volume->n_accesses);
diff --git a/fs/netfs/internal.h b/fs/netfs/internal.h
index 95e281a8af78..21e46bc9aa49 100644
--- a/fs/netfs/internal.h
+++ b/fs/netfs/internal.h
@@ -34,7 +34,6 @@ int netfs_begin_read(struct netfs_io_request *rreq, bool sync);
/*
* main.c
*/
-extern unsigned int netfs_debug;
extern struct list_head netfs_io_requests;
extern spinlock_t netfs_proc_lock;
extern mempool_t netfs_request_pool;
@@ -63,15 +62,6 @@ static inline void netfs_proc_del_rreq(struct netfs_io_request *rreq) {}
/*
* misc.c
*/
-#define NETFS_FLAG_PUT_MARK BIT(0)
-#define NETFS_FLAG_PAGECACHE_MARK BIT(1)
-int netfs_xa_store_and_mark(struct xarray *xa, unsigned long index,
- struct folio *folio, unsigned int flags,
- gfp_t gfp_mask);
-int netfs_add_folios_to_buffer(struct xarray *buffer,
- struct address_space *mapping,
- pgoff_t index, pgoff_t to, gfp_t gfp_mask);
-void netfs_clear_buffer(struct xarray *buffer);
/*
* objects.c
@@ -353,8 +343,6 @@ extern const struct seq_operations fscache_volumes_seq_ops;
struct fscache_volume *fscache_get_volume(struct fscache_volume *volume,
enum fscache_volume_trace where);
-void fscache_put_volume(struct fscache_volume *volume,
- enum fscache_volume_trace where);
bool fscache_begin_volume_access(struct fscache_volume *volume,
struct fscache_cookie *cookie,
enum fscache_access_trace why);
@@ -365,42 +353,12 @@ void fscache_create_volume(struct fscache_volume *volume, bool wait);
* debug tracing
*/
#define dbgprintk(FMT, ...) \
- printk("[%-6.6s] "FMT"\n", current->comm, ##__VA_ARGS__)
+ pr_debug("[%-6.6s] "FMT"\n", current->comm, ##__VA_ARGS__)
#define kenter(FMT, ...) dbgprintk("==> %s("FMT")", __func__, ##__VA_ARGS__)
#define kleave(FMT, ...) dbgprintk("<== %s()"FMT"", __func__, ##__VA_ARGS__)
#define kdebug(FMT, ...) dbgprintk(FMT, ##__VA_ARGS__)
-#ifdef __KDEBUG
-#define _enter(FMT, ...) kenter(FMT, ##__VA_ARGS__)
-#define _leave(FMT, ...) kleave(FMT, ##__VA_ARGS__)
-#define _debug(FMT, ...) kdebug(FMT, ##__VA_ARGS__)
-
-#elif defined(CONFIG_NETFS_DEBUG)
-#define _enter(FMT, ...) \
-do { \
- if (netfs_debug) \
- kenter(FMT, ##__VA_ARGS__); \
-} while (0)
-
-#define _leave(FMT, ...) \
-do { \
- if (netfs_debug) \
- kleave(FMT, ##__VA_ARGS__); \
-} while (0)
-
-#define _debug(FMT, ...) \
-do { \
- if (netfs_debug) \
- kdebug(FMT, ##__VA_ARGS__); \
-} while (0)
-
-#else
-#define _enter(FMT, ...) no_printk("==> %s("FMT")", __func__, ##__VA_ARGS__)
-#define _leave(FMT, ...) no_printk("<== %s()"FMT"", __func__, ##__VA_ARGS__)
-#define _debug(FMT, ...) no_printk(FMT, ##__VA_ARGS__)
-#endif
-
/*
* assertions
*/
diff --git a/fs/netfs/io.c b/fs/netfs/io.c
index c93851b98368..c7576481c321 100644
--- a/fs/netfs/io.c
+++ b/fs/netfs/io.c
@@ -130,7 +130,7 @@ static void netfs_reset_subreq_iter(struct netfs_io_request *rreq,
if (count == remaining)
return;
- _debug("R=%08x[%u] ITER RESUB-MISMATCH %zx != %zx-%zx-%llx %x\n",
+ kdebug("R=%08x[%u] ITER RESUB-MISMATCH %zx != %zx-%zx-%llx %x\n",
rreq->debug_id, subreq->debug_index,
iov_iter_count(&subreq->io_iter), subreq->transferred,
subreq->len, rreq->i_size,
@@ -326,7 +326,7 @@ void netfs_subreq_terminated(struct netfs_io_subrequest *subreq,
struct netfs_io_request *rreq = subreq->rreq;
int u;
- _enter("R=%x[%x]{%llx,%lx},%zd",
+ kenter("R=%x[%x]{%llx,%lx},%zd",
rreq->debug_id, subreq->debug_index,
subreq->start, subreq->flags, transferred_or_error);
@@ -435,7 +435,7 @@ netfs_rreq_prepare_read(struct netfs_io_request *rreq,
struct netfs_inode *ictx = netfs_inode(rreq->inode);
size_t lsize;
- _enter("%llx-%llx,%llx", subreq->start, subreq->start + subreq->len, rreq->i_size);
+ kenter("%llx-%llx,%llx", subreq->start, subreq->start + subreq->len, rreq->i_size);
if (rreq->origin != NETFS_DIO_READ) {
source = netfs_cache_prepare_read(subreq, rreq->i_size);
@@ -518,7 +518,7 @@ static bool netfs_rreq_submit_slice(struct netfs_io_request *rreq,
subreq->start = rreq->start + rreq->submitted;
subreq->len = io_iter->count;
- _debug("slice %llx,%zx,%llx", subreq->start, subreq->len, rreq->submitted);
+ kdebug("slice %llx,%zx,%llx", subreq->start, subreq->len, rreq->submitted);
list_add_tail(&subreq->rreq_link, &rreq->subrequests);
/* Call out to the cache to find out what it can do with the remaining
@@ -570,7 +570,7 @@ int netfs_begin_read(struct netfs_io_request *rreq, bool sync)
struct iov_iter io_iter;
int ret;
- _enter("R=%x %llx-%llx",
+ kenter("R=%x %llx-%llx",
rreq->debug_id, rreq->start, rreq->start + rreq->len - 1);
if (rreq->len == 0) {
@@ -593,7 +593,7 @@ int netfs_begin_read(struct netfs_io_request *rreq, bool sync)
atomic_set(&rreq->nr_outstanding, 1);
io_iter = rreq->io_iter;
do {
- _debug("submit %llx + %llx >= %llx",
+ kdebug("submit %llx + %llx >= %llx",
rreq->start, rreq->submitted, rreq->i_size);
if (rreq->origin == NETFS_DIO_READ &&
rreq->start + rreq->submitted >= rreq->i_size)
diff --git a/fs/netfs/main.c b/fs/netfs/main.c
index 5f0f438e5d21..db824c372842 100644
--- a/fs/netfs/main.c
+++ b/fs/netfs/main.c
@@ -20,10 +20,6 @@ MODULE_LICENSE("GPL");
EXPORT_TRACEPOINT_SYMBOL(netfs_sreq);
-unsigned netfs_debug;
-module_param_named(debug, netfs_debug, uint, S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(netfs_debug, "Netfs support debugging mask");
-
static struct kmem_cache *netfs_request_slab;
static struct kmem_cache *netfs_subrequest_slab;
mempool_t netfs_request_pool;
diff --git a/fs/netfs/misc.c b/fs/netfs/misc.c
index bc1fc54fb724..172808e83ca8 100644
--- a/fs/netfs/misc.c
+++ b/fs/netfs/misc.c
@@ -8,87 +8,6 @@
#include <linux/swap.h>
#include "internal.h"
-/*
- * Attach a folio to the buffer and maybe set marks on it to say that we need
- * to put the folio later and twiddle the pagecache flags.
- */
-int netfs_xa_store_and_mark(struct xarray *xa, unsigned long index,
- struct folio *folio, unsigned int flags,
- gfp_t gfp_mask)
-{
- XA_STATE_ORDER(xas, xa, index, folio_order(folio));
-
-retry:
- xas_lock(&xas);
- for (;;) {
- xas_store(&xas, folio);
- if (!xas_error(&xas))
- break;
- xas_unlock(&xas);
- if (!xas_nomem(&xas, gfp_mask))
- return xas_error(&xas);
- goto retry;
- }
-
- if (flags & NETFS_FLAG_PUT_MARK)
- xas_set_mark(&xas, NETFS_BUF_PUT_MARK);
- if (flags & NETFS_FLAG_PAGECACHE_MARK)
- xas_set_mark(&xas, NETFS_BUF_PAGECACHE_MARK);
- xas_unlock(&xas);
- return xas_error(&xas);
-}
-
-/*
- * Create the specified range of folios in the buffer attached to the read
- * request. The folios are marked with NETFS_BUF_PUT_MARK so that we know that
- * these need freeing later.
- */
-int netfs_add_folios_to_buffer(struct xarray *buffer,
- struct address_space *mapping,
- pgoff_t index, pgoff_t to, gfp_t gfp_mask)
-{
- struct folio *folio;
- int ret;
-
- if (to + 1 == index) /* Page range is inclusive */
- return 0;
-
- do {
- /* TODO: Figure out what order folio can be allocated here */
- folio = filemap_alloc_folio(readahead_gfp_mask(mapping), 0);
- if (!folio)
- return -ENOMEM;
- folio->index = index;
- ret = netfs_xa_store_and_mark(buffer, index, folio,
- NETFS_FLAG_PUT_MARK, gfp_mask);
- if (ret < 0) {
- folio_put(folio);
- return ret;
- }
-
- index += folio_nr_pages(folio);
- } while (index <= to && index != 0);
-
- return 0;
-}
-
-/*
- * Clear an xarray buffer, putting a ref on the folios that have
- * NETFS_BUF_PUT_MARK set.
- */
-void netfs_clear_buffer(struct xarray *buffer)
-{
- struct folio *folio;
- XA_STATE(xas, buffer, 0);
-
- rcu_read_lock();
- xas_for_each_marked(&xas, folio, ULONG_MAX, NETFS_BUF_PUT_MARK) {
- folio_put(folio);
- }
- rcu_read_unlock();
- xa_destroy(buffer);
-}
-
/**
* netfs_dirty_folio - Mark folio dirty and pin a cache object for writeback
* @mapping: The mapping the folio belongs to.
@@ -107,7 +26,7 @@ bool netfs_dirty_folio(struct address_space *mapping, struct folio *folio)
struct fscache_cookie *cookie = netfs_i_cookie(ictx);
bool need_use = false;
- _enter("");
+ kenter("");
if (!filemap_dirty_folio(mapping, folio))
return false;
@@ -180,7 +99,7 @@ void netfs_invalidate_folio(struct folio *folio, size_t offset, size_t length)
struct netfs_folio *finfo;
size_t flen = folio_size(folio);
- _enter("{%lx},%zx,%zx", folio->index, offset, length);
+ kenter("{%lx},%zx,%zx", folio->index, offset, length);
if (!folio_test_private(folio))
return;
diff --git a/fs/netfs/objects.c b/fs/netfs/objects.c
index c90d482b1650..f4a642727479 100644
--- a/fs/netfs/objects.c
+++ b/fs/netfs/objects.c
@@ -72,6 +72,7 @@ struct netfs_io_request *netfs_alloc_request(struct address_space *mapping,
}
}
+ atomic_inc(&ctx->io_count);
trace_netfs_rreq_ref(rreq->debug_id, 1, netfs_rreq_trace_new);
netfs_proc_add_rreq(rreq);
netfs_stat(&netfs_n_rh_rreq);
@@ -124,6 +125,7 @@ static void netfs_free_request(struct work_struct *work)
{
struct netfs_io_request *rreq =
container_of(work, struct netfs_io_request, work);
+ struct netfs_inode *ictx = netfs_inode(rreq->inode);
unsigned int i;
trace_netfs_rreq(rreq, netfs_rreq_trace_free);
@@ -142,6 +144,9 @@ static void netfs_free_request(struct work_struct *work)
}
kvfree(rreq->direct_bv);
}
+
+ if (atomic_dec_and_test(&ictx->io_count))
+ wake_up_var(&ictx->io_count);
call_rcu(&rreq->rcu, netfs_free_request_rcu);
}
diff --git a/fs/netfs/write_collect.c b/fs/netfs/write_collect.c
index 60112e4b2c5e..488147439fe0 100644
--- a/fs/netfs/write_collect.c
+++ b/fs/netfs/write_collect.c
@@ -161,7 +161,7 @@ static void netfs_retry_write_stream(struct netfs_io_request *wreq,
{
struct list_head *next;
- _enter("R=%x[%x:]", wreq->debug_id, stream->stream_nr);
+ kenter("R=%x[%x:]", wreq->debug_id, stream->stream_nr);
if (list_empty(&stream->subrequests))
return;
@@ -374,7 +374,7 @@ static void netfs_collect_write_results(struct netfs_io_request *wreq)
unsigned int notes;
int s;
- _enter("%llx-%llx", wreq->start, wreq->start + wreq->len);
+ kenter("%llx-%llx", wreq->start, wreq->start + wreq->len);
trace_netfs_collect(wreq);
trace_netfs_rreq(wreq, netfs_rreq_trace_collect);
@@ -409,7 +409,7 @@ reassess_streams:
front = stream->front;
while (front) {
trace_netfs_collect_sreq(wreq, front);
- //_debug("sreq [%x] %llx %zx/%zx",
+ //kdebug("sreq [%x] %llx %zx/%zx",
// front->debug_index, front->start, front->transferred, front->len);
/* Stall if there may be a discontinuity. */
@@ -510,7 +510,7 @@ reassess_streams:
* stream has a gap that can be jumped.
*/
if (notes & SOME_EMPTY) {
- unsigned long long jump_to = wreq->start + wreq->len;
+ unsigned long long jump_to = wreq->start + READ_ONCE(wreq->submitted);
for (s = 0; s < NR_IO_STREAMS; s++) {
stream = &wreq->io_streams[s];
@@ -598,7 +598,7 @@ reassess_streams:
out:
netfs_put_group_many(wreq->group, wreq->nr_group_rel);
wreq->nr_group_rel = 0;
- _leave(" = %x", notes);
+ kleave(" = %x", notes);
return;
need_retry:
@@ -606,7 +606,7 @@ need_retry:
* that any partially completed op will have had any wholly transferred
* folios removed from it.
*/
- _debug("retry");
+ kdebug("retry");
netfs_retry_writes(wreq);
goto out;
}
@@ -621,7 +621,7 @@ void netfs_write_collection_worker(struct work_struct *work)
size_t transferred;
int s;
- _enter("R=%x", wreq->debug_id);
+ kenter("R=%x", wreq->debug_id);
netfs_see_request(wreq, netfs_rreq_trace_see_work);
if (!test_bit(NETFS_RREQ_IN_PROGRESS, &wreq->flags)) {
@@ -684,16 +684,17 @@ void netfs_write_collection_worker(struct work_struct *work)
if (wreq->origin == NETFS_DIO_WRITE)
inode_dio_end(wreq->inode);
- _debug("finished");
+ kdebug("finished");
trace_netfs_rreq(wreq, netfs_rreq_trace_wake_ip);
clear_bit_unlock(NETFS_RREQ_IN_PROGRESS, &wreq->flags);
wake_up_bit(&wreq->flags, NETFS_RREQ_IN_PROGRESS);
if (wreq->iocb) {
- wreq->iocb->ki_pos += wreq->transferred;
+ size_t written = min(wreq->transferred, wreq->len);
+ wreq->iocb->ki_pos += written;
if (wreq->iocb->ki_complete)
wreq->iocb->ki_complete(
- wreq->iocb, wreq->error ? wreq->error : wreq->transferred);
+ wreq->iocb, wreq->error ? wreq->error : written);
wreq->iocb = VFS_PTR_POISON;
}
@@ -743,7 +744,7 @@ void netfs_write_subrequest_terminated(void *_op, ssize_t transferred_or_error,
struct netfs_io_request *wreq = subreq->rreq;
struct netfs_io_stream *stream = &wreq->io_streams[subreq->stream_nr];
- _enter("%x[%x] %zd", wreq->debug_id, subreq->debug_index, transferred_or_error);
+ kenter("%x[%x] %zd", wreq->debug_id, subreq->debug_index, transferred_or_error);
switch (subreq->source) {
case NETFS_UPLOAD_TO_SERVER:
diff --git a/fs/netfs/write_issue.c b/fs/netfs/write_issue.c
index e190043bc0da..d7c971df8866 100644
--- a/fs/netfs/write_issue.c
+++ b/fs/netfs/write_issue.c
@@ -99,7 +99,7 @@ struct netfs_io_request *netfs_create_write_req(struct address_space *mapping,
if (IS_ERR(wreq))
return wreq;
- _enter("R=%x", wreq->debug_id);
+ kenter("R=%x", wreq->debug_id);
ictx = netfs_inode(wreq->inode);
if (test_bit(NETFS_RREQ_WRITE_TO_CACHE, &wreq->flags))
@@ -159,7 +159,7 @@ static void netfs_prepare_write(struct netfs_io_request *wreq,
subreq->max_nr_segs = INT_MAX;
subreq->stream_nr = stream->stream_nr;
- _enter("R=%x[%x]", wreq->debug_id, subreq->debug_index);
+ kenter("R=%x[%x]", wreq->debug_id, subreq->debug_index);
trace_netfs_sreq_ref(wreq->debug_id, subreq->debug_index,
refcount_read(&subreq->ref),
@@ -215,7 +215,7 @@ static void netfs_do_issue_write(struct netfs_io_stream *stream,
{
struct netfs_io_request *wreq = subreq->rreq;
- _enter("R=%x[%x],%zx", wreq->debug_id, subreq->debug_index, subreq->len);
+ kenter("R=%x[%x],%zx", wreq->debug_id, subreq->debug_index, subreq->len);
if (test_bit(NETFS_SREQ_FAILED, &subreq->flags))
return netfs_write_subrequest_terminated(subreq, subreq->error, false);
@@ -254,7 +254,7 @@ static void netfs_issue_write(struct netfs_io_request *wreq,
stream->construct = NULL;
if (subreq->start + subreq->len > wreq->start + wreq->submitted)
- wreq->len = wreq->submitted = subreq->start + subreq->len - wreq->start;
+ WRITE_ONCE(wreq->submitted, subreq->start + subreq->len - wreq->start);
netfs_do_issue_write(stream, subreq);
}
@@ -272,11 +272,11 @@ int netfs_advance_write(struct netfs_io_request *wreq,
size_t part;
if (!stream->avail) {
- _leave("no write");
+ kleave("no write");
return len;
}
- _enter("R=%x[%x]", wreq->debug_id, subreq ? subreq->debug_index : 0);
+ kenter("R=%x[%x]", wreq->debug_id, subreq ? subreq->debug_index : 0);
if (subreq && start != subreq->start + subreq->len) {
netfs_issue_write(wreq, stream);
@@ -288,7 +288,7 @@ int netfs_advance_write(struct netfs_io_request *wreq,
subreq = stream->construct;
part = min(subreq->max_len - subreq->len, len);
- _debug("part %zx/%zx %zx/%zx", subreq->len, subreq->max_len, part, len);
+ kdebug("part %zx/%zx %zx/%zx", subreq->len, subreq->max_len, part, len);
subreq->len += part;
subreq->nr_segs++;
@@ -319,7 +319,7 @@ static int netfs_write_folio(struct netfs_io_request *wreq,
bool to_eof = false, streamw = false;
bool debug = false;
- _enter("");
+ kenter("");
/* netfs_perform_write() may shift i_size around the page or from out
* of the page to beyond it, but cannot move i_size into or through the
@@ -329,7 +329,7 @@ static int netfs_write_folio(struct netfs_io_request *wreq,
if (fpos >= i_size) {
/* mmap beyond eof. */
- _debug("beyond eof");
+ kdebug("beyond eof");
folio_start_writeback(folio);
folio_unlock(folio);
wreq->nr_group_rel += netfs_folio_written_back(folio);
@@ -363,7 +363,7 @@ static int netfs_write_folio(struct netfs_io_request *wreq,
}
flen -= foff;
- _debug("folio %zx %zx %zx", foff, flen, fsize);
+ kdebug("folio %zx %zx %zx", foff, flen, fsize);
/* Deal with discontinuities in the stream of dirty pages. These can
* arise from a number of sources:
@@ -483,11 +483,11 @@ static int netfs_write_folio(struct netfs_io_request *wreq,
if (!debug)
kdebug("R=%x: No submit", wreq->debug_id);
- if (flen < fsize)
+ if (foff + flen < fsize)
for (int s = 0; s < NR_IO_STREAMS; s++)
netfs_issue_write(wreq, &wreq->io_streams[s]);
- _leave(" = 0");
+ kleave(" = 0");
return 0;
}
@@ -522,7 +522,7 @@ int netfs_writepages(struct address_space *mapping,
netfs_stat(&netfs_n_wh_writepages);
do {
- _debug("wbiter %lx %llx", folio->index, wreq->start + wreq->submitted);
+ kdebug("wbiter %lx %llx", folio->index, wreq->start + wreq->submitted);
/* It appears we don't have to handle cyclic writeback wrapping. */
WARN_ON_ONCE(wreq && folio_pos(folio) < wreq->start + wreq->submitted);
@@ -546,14 +546,14 @@ int netfs_writepages(struct address_space *mapping,
mutex_unlock(&ictx->wb_lock);
netfs_put_request(wreq, false, netfs_rreq_trace_put_return);
- _leave(" = %d", error);
+ kleave(" = %d", error);
return error;
couldnt_start:
netfs_kill_dirty_pages(mapping, wbc, folio);
out:
mutex_unlock(&ictx->wb_lock);
- _leave(" = %d", error);
+ kleave(" = %d", error);
return error;
}
EXPORT_SYMBOL(netfs_writepages);
@@ -590,7 +590,7 @@ int netfs_advance_writethrough(struct netfs_io_request *wreq, struct writeback_c
struct folio *folio, size_t copied, bool to_page_end,
struct folio **writethrough_cache)
{
- _enter("R=%x ic=%zu ws=%u cp=%zu tp=%u",
+ kenter("R=%x ic=%zu ws=%u cp=%zu tp=%u",
wreq->debug_id, wreq->iter.count, wreq->wsize, copied, to_page_end);
if (!*writethrough_cache) {
@@ -624,7 +624,7 @@ int netfs_end_writethrough(struct netfs_io_request *wreq, struct writeback_contr
struct netfs_inode *ictx = netfs_inode(wreq->inode);
int ret;
- _enter("R=%x", wreq->debug_id);
+ kenter("R=%x", wreq->debug_id);
if (writethrough_cache)
netfs_write_folio(wreq, wbc, writethrough_cache);
@@ -636,7 +636,12 @@ int netfs_end_writethrough(struct netfs_io_request *wreq, struct writeback_contr
mutex_unlock(&ictx->wb_lock);
- ret = wreq->error;
+ if (wreq->iocb) {
+ ret = -EIOCBQUEUED;
+ } else {
+ wait_on_bit(&wreq->flags, NETFS_RREQ_IN_PROGRESS, TASK_UNINTERRUPTIBLE);
+ ret = wreq->error;
+ }
netfs_put_request(wreq, false, netfs_rreq_trace_put_return);
return ret;
}
@@ -652,7 +657,7 @@ int netfs_unbuffered_write(struct netfs_io_request *wreq, bool may_wait, size_t
loff_t start = wreq->start;
int error = 0;
- _enter("%zx", len);
+ kenter("%zx", len);
if (wreq->origin == NETFS_DIO_WRITE)
inode_dio_begin(wreq->inode);
@@ -660,7 +665,7 @@ int netfs_unbuffered_write(struct netfs_io_request *wreq, bool may_wait, size_t
while (len) {
// TODO: Prepare content encryption
- _debug("unbuffered %zx", len);
+ kdebug("unbuffered %zx", len);
part = netfs_advance_write(wreq, upload, start, len, false);
start += part;
len -= part;
@@ -679,6 +684,6 @@ int netfs_unbuffered_write(struct netfs_io_request *wreq, bool may_wait, size_t
if (list_empty(&upload->subrequests))
netfs_wake_write_collector(wreq, false);
- _leave(" = %d", error);
+ kleave(" = %d", error);
return error;
}
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 342930996226..07a7be27182e 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -1627,7 +1627,16 @@ nfs_lookup_revalidate_done(struct inode *dir, struct dentry *dentry,
switch (error) {
case 1:
break;
- case 0:
+ case -ETIMEDOUT:
+ if (inode && (IS_ROOT(dentry) ||
+ NFS_SERVER(inode)->flags & NFS_MOUNT_SOFTREVAL))
+ error = 1;
+ break;
+ case -ESTALE:
+ case -ENOENT:
+ error = 0;
+ fallthrough;
+ default:
/*
* We can't d_drop the root of a disconnected tree:
* its d_hash is on the s_anon list and d_drop() would hide
@@ -1682,18 +1691,8 @@ static int nfs_lookup_revalidate_dentry(struct inode *dir,
dir_verifier = nfs_save_change_attribute(dir);
ret = NFS_PROTO(dir)->lookup(dir, dentry, fhandle, fattr);
- if (ret < 0) {
- switch (ret) {
- case -ESTALE:
- case -ENOENT:
- ret = 0;
- break;
- case -ETIMEDOUT:
- if (NFS_SERVER(inode)->flags & NFS_MOUNT_SOFTREVAL)
- ret = 1;
- }
+ if (ret < 0)
goto out;
- }
/* Request help from readdirplus */
nfs_lookup_advise_force_readdirplus(dir, flags);
@@ -1737,7 +1736,7 @@ nfs_do_lookup_revalidate(struct inode *dir, struct dentry *dentry,
unsigned int flags)
{
struct inode *inode;
- int error;
+ int error = 0;
nfs_inc_stats(dir, NFSIOS_DENTRYREVALIDATE);
inode = d_inode(dentry);
@@ -1782,7 +1781,7 @@ out_valid:
out_bad:
if (flags & LOOKUP_RCU)
return -ECHILD;
- return nfs_lookup_revalidate_done(dir, dentry, inode, 0);
+ return nfs_lookup_revalidate_done(dir, dentry, inode, error);
}
static int
@@ -1804,9 +1803,10 @@ __nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags,
if (parent != READ_ONCE(dentry->d_parent))
return -ECHILD;
} else {
- /* Wait for unlink to complete */
+ /* Wait for unlink to complete - see unblock_revalidate() */
wait_var_event(&dentry->d_fsdata,
- dentry->d_fsdata != NFS_FSDATA_BLOCKED);
+ smp_load_acquire(&dentry->d_fsdata)
+ != NFS_FSDATA_BLOCKED);
parent = dget_parent(dentry);
ret = reval(d_inode(parent), dentry, flags);
dput(parent);
@@ -1819,6 +1819,29 @@ static int nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags)
return __nfs_lookup_revalidate(dentry, flags, nfs_do_lookup_revalidate);
}
+static void block_revalidate(struct dentry *dentry)
+{
+ /* old devname - just in case */
+ kfree(dentry->d_fsdata);
+
+ /* Any new reference that could lead to an open
+ * will take ->d_lock in lookup_open() -> d_lookup().
+ * Holding this lock ensures we cannot race with
+ * __nfs_lookup_revalidate() and removes and need
+ * for further barriers.
+ */
+ lockdep_assert_held(&dentry->d_lock);
+
+ dentry->d_fsdata = NFS_FSDATA_BLOCKED;
+}
+
+static void unblock_revalidate(struct dentry *dentry)
+{
+ /* store_release ensures wait_var_event() sees the update */
+ smp_store_release(&dentry->d_fsdata, NULL);
+ wake_up_var(&dentry->d_fsdata);
+}
+
/*
* A weaker form of d_revalidate for revalidating just the d_inode(dentry)
* when we don't really care about the dentry name. This is called when a
@@ -2255,6 +2278,9 @@ int nfs_atomic_open_v23(struct inode *dir, struct dentry *dentry,
*/
int error = 0;
+ if (dentry->d_name.len > NFS_SERVER(dir)->namelen)
+ return -ENAMETOOLONG;
+
if (open_flags & O_CREAT) {
file->f_mode |= FMODE_CREATED;
error = nfs_do_create(dir, dentry, mode, open_flags);
@@ -2549,15 +2575,12 @@ int nfs_unlink(struct inode *dir, struct dentry *dentry)
spin_unlock(&dentry->d_lock);
goto out;
}
- /* old devname */
- kfree(dentry->d_fsdata);
- dentry->d_fsdata = NFS_FSDATA_BLOCKED;
+ block_revalidate(dentry);
spin_unlock(&dentry->d_lock);
error = nfs_safe_remove(dentry);
nfs_dentry_remove_handle_error(dir, dentry, error);
- dentry->d_fsdata = NULL;
- wake_up_var(&dentry->d_fsdata);
+ unblock_revalidate(dentry);
out:
trace_nfs_unlink_exit(dir, dentry, error);
return error;
@@ -2664,8 +2687,7 @@ nfs_unblock_rename(struct rpc_task *task, struct nfs_renamedata *data)
{
struct dentry *new_dentry = data->new_dentry;
- new_dentry->d_fsdata = NULL;
- wake_up_var(&new_dentry->d_fsdata);
+ unblock_revalidate(new_dentry);
}
/*
@@ -2727,11 +2749,6 @@ int nfs_rename(struct mnt_idmap *idmap, struct inode *old_dir,
if (WARN_ON(new_dentry->d_flags & DCACHE_NFSFS_RENAMED) ||
WARN_ON(new_dentry->d_fsdata == NFS_FSDATA_BLOCKED))
goto out;
- if (new_dentry->d_fsdata) {
- /* old devname */
- kfree(new_dentry->d_fsdata);
- new_dentry->d_fsdata = NULL;
- }
spin_lock(&new_dentry->d_lock);
if (d_count(new_dentry) > 2) {
@@ -2753,7 +2770,7 @@ int nfs_rename(struct mnt_idmap *idmap, struct inode *old_dir,
new_dentry = dentry;
new_inode = NULL;
} else {
- new_dentry->d_fsdata = NFS_FSDATA_BLOCKED;
+ block_revalidate(new_dentry);
must_unblock = true;
spin_unlock(&new_dentry->d_lock);
}
@@ -2765,6 +2782,8 @@ int nfs_rename(struct mnt_idmap *idmap, struct inode *old_dir,
task = nfs_async_rename(old_dir, new_dir, old_dentry, new_dentry,
must_unblock ? nfs_unblock_rename : NULL);
if (IS_ERR(task)) {
+ if (must_unblock)
+ unblock_revalidate(new_dentry);
error = PTR_ERR(task);
goto out;
}
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index bb2f583eb28b..90079ca134dd 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -141,8 +141,6 @@ int nfs_swap_rw(struct kiocb *iocb, struct iov_iter *iter)
{
ssize_t ret;
- VM_BUG_ON(iov_iter_count(iter) != PAGE_SIZE);
-
if (iov_iter_rw(iter) == READ)
ret = nfs_file_direct_read(iocb, iter, true);
else
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index c93c12063b3a..a691fa10b3e9 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -4023,6 +4023,23 @@ static void test_fs_location_for_trunking(struct nfs4_fs_location *location,
}
}
+static bool _is_same_nfs4_pathname(struct nfs4_pathname *path1,
+ struct nfs4_pathname *path2)
+{
+ int i;
+
+ if (path1->ncomponents != path2->ncomponents)
+ return false;
+ for (i = 0; i < path1->ncomponents; i++) {
+ if (path1->components[i].len != path2->components[i].len)
+ return false;
+ if (memcmp(path1->components[i].data, path2->components[i].data,
+ path1->components[i].len))
+ return false;
+ }
+ return true;
+}
+
static int _nfs4_discover_trunking(struct nfs_server *server,
struct nfs_fh *fhandle)
{
@@ -4056,9 +4073,13 @@ static int _nfs4_discover_trunking(struct nfs_server *server,
if (status)
goto out_free_3;
- for (i = 0; i < locations->nlocations; i++)
+ for (i = 0; i < locations->nlocations; i++) {
+ if (!_is_same_nfs4_pathname(&locations->fs_path,
+ &locations->locations[i].rootpath))
+ continue;
test_fs_location_for_trunking(&locations->locations[i], clp,
server);
+ }
out_free_3:
kfree(locations->fattr);
out_free_2:
@@ -6268,6 +6289,7 @@ nfs4_set_security_label(struct inode *inode, const void *buf, size_t buflen)
if (status == 0)
nfs_setsecurity(inode, fattr);
+ nfs_free_fattr(fattr);
return status;
}
#endif /* CONFIG_NFS_V4_SECURITY_LABEL */
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index 6efb5068c116..040b6b79c75e 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -1545,6 +1545,11 @@ void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *desc, pgoff_t index)
continue;
} else if (index == prev->wb_index + 1)
continue;
+ /*
+ * We will submit more requests after these. Indicate
+ * this to the underlying layers.
+ */
+ desc->pg_moreio = 1;
nfs_pageio_complete(desc);
break;
}
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index a142287d86f6..cca80b5f54e0 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -122,8 +122,6 @@ static void nfs_readpage_release(struct nfs_page *req, int error)
{
struct folio *folio = nfs_page_to_folio(req);
- if (nfs_error_is_fatal_on_server(error) && error != -ETIMEDOUT)
- folio_set_error(folio);
if (nfs_page_group_sync_on_bit(req, PG_UNLOCKPAGE))
if (nfs_netfs_folio_unlock(folio))
folio_unlock(folio);
diff --git a/fs/nfs/symlink.c b/fs/nfs/symlink.c
index 0e27a2e4e68b..1c62a5a9f51d 100644
--- a/fs/nfs/symlink.c
+++ b/fs/nfs/symlink.c
@@ -32,16 +32,8 @@ static int nfs_symlink_filler(struct file *file, struct folio *folio)
int error;
error = NFS_PROTO(inode)->readlink(inode, &folio->page, 0, PAGE_SIZE);
- if (error < 0)
- goto error;
- folio_mark_uptodate(folio);
- folio_unlock(folio);
- return 0;
-
-error:
- folio_set_error(folio);
- folio_unlock(folio);
- return -EIO;
+ folio_end_read(folio, error == 0);
+ return error;
}
static const char *nfs_get_link(struct dentry *dentry,
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 2329cbb0e446..a91463ab87a0 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -311,7 +311,6 @@ static void nfs_mapping_set_error(struct folio *folio, int error)
{
struct address_space *mapping = folio_file_mapping(folio);
- folio_set_error(folio);
filemap_set_wb_err(mapping, error);
if (mapping->host)
errseq_set(&mapping->host->i_sb->s_wb_err,
diff --git a/fs/nfsd/Kconfig b/fs/nfsd/Kconfig
index 272ab8d5c4d7..ec2ab6429e00 100644
--- a/fs/nfsd/Kconfig
+++ b/fs/nfsd/Kconfig
@@ -162,7 +162,7 @@ config NFSD_V4_SECURITY_LABEL
config NFSD_LEGACY_CLIENT_TRACKING
bool "Support legacy NFSv4 client tracking methods (DEPRECATED)"
depends on NFSD_V4
- default n
+ default y
help
The NFSv4 server needs to store a small amount of information on
stable storage in order to handle state recovery after reboot. Most
diff --git a/fs/nfsd/filecache.c b/fs/nfsd/filecache.c
index ad9083ca144b..f4704f5d4086 100644
--- a/fs/nfsd/filecache.c
+++ b/fs/nfsd/filecache.c
@@ -664,7 +664,7 @@ static int
nfsd_file_lease_notifier_call(struct notifier_block *nb, unsigned long arg,
void *data)
{
- struct file_lock *fl = data;
+ struct file_lease *fl = data;
/* Only close files for F_SETLEASE leases */
if (fl->c.flc_flags & FL_LEASE)
diff --git a/fs/nfsd/netlink.c b/fs/nfsd/netlink.c
index 62d2586d9902..ca54aa583530 100644
--- a/fs/nfsd/netlink.c
+++ b/fs/nfsd/netlink.c
@@ -40,13 +40,16 @@ static const struct nla_policy nfsd_listener_set_nl_policy[NFSD_A_SERVER_SOCK_AD
[NFSD_A_SERVER_SOCK_ADDR] = NLA_POLICY_NESTED(nfsd_sock_nl_policy),
};
+/* NFSD_CMD_POOL_MODE_SET - do */
+static const struct nla_policy nfsd_pool_mode_set_nl_policy[NFSD_A_POOL_MODE_MODE + 1] = {
+ [NFSD_A_POOL_MODE_MODE] = { .type = NLA_NUL_STRING, },
+};
+
/* Ops table for nfsd */
static const struct genl_split_ops nfsd_nl_ops[] = {
{
.cmd = NFSD_CMD_RPC_STATUS_GET,
- .start = nfsd_nl_rpc_status_get_start,
.dumpit = nfsd_nl_rpc_status_get_dumpit,
- .done = nfsd_nl_rpc_status_get_done,
.flags = GENL_CMD_CAP_DUMP,
},
{
@@ -85,6 +88,18 @@ static const struct genl_split_ops nfsd_nl_ops[] = {
.doit = nfsd_nl_listener_get_doit,
.flags = GENL_CMD_CAP_DO,
},
+ {
+ .cmd = NFSD_CMD_POOL_MODE_SET,
+ .doit = nfsd_nl_pool_mode_set_doit,
+ .policy = nfsd_pool_mode_set_nl_policy,
+ .maxattr = NFSD_A_POOL_MODE_MODE,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = NFSD_CMD_POOL_MODE_GET,
+ .doit = nfsd_nl_pool_mode_get_doit,
+ .flags = GENL_CMD_CAP_DO,
+ },
};
struct genl_family nfsd_nl_family __ro_after_init = {
diff --git a/fs/nfsd/netlink.h b/fs/nfsd/netlink.h
index e3724637d64d..8eb903f24c41 100644
--- a/fs/nfsd/netlink.h
+++ b/fs/nfsd/netlink.h
@@ -15,9 +15,6 @@
extern const struct nla_policy nfsd_sock_nl_policy[NFSD_A_SOCK_TRANSPORT_NAME + 1];
extern const struct nla_policy nfsd_version_nl_policy[NFSD_A_VERSION_ENABLED + 1];
-int nfsd_nl_rpc_status_get_start(struct netlink_callback *cb);
-int nfsd_nl_rpc_status_get_done(struct netlink_callback *cb);
-
int nfsd_nl_rpc_status_get_dumpit(struct sk_buff *skb,
struct netlink_callback *cb);
int nfsd_nl_threads_set_doit(struct sk_buff *skb, struct genl_info *info);
@@ -26,6 +23,8 @@ int nfsd_nl_version_set_doit(struct sk_buff *skb, struct genl_info *info);
int nfsd_nl_version_get_doit(struct sk_buff *skb, struct genl_info *info);
int nfsd_nl_listener_set_doit(struct sk_buff *skb, struct genl_info *info);
int nfsd_nl_listener_get_doit(struct sk_buff *skb, struct genl_info *info);
+int nfsd_nl_pool_mode_set_doit(struct sk_buff *skb, struct genl_info *info);
+int nfsd_nl_pool_mode_get_doit(struct sk_buff *skb, struct genl_info *info);
extern struct genl_family nfsd_nl_family;
diff --git a/fs/nfsd/nfs2acl.c b/fs/nfsd/nfs2acl.c
index 12b2b9bc07bf..4e3be7201b1c 100644
--- a/fs/nfsd/nfs2acl.c
+++ b/fs/nfsd/nfs2acl.c
@@ -308,8 +308,6 @@ static void nfsaclsvc_release_access(struct svc_rqst *rqstp)
fh_put(&resp->fh);
}
-struct nfsd3_voidargs { int dummy; };
-
#define ST 1 /* status*/
#define AT 21 /* attributes */
#define pAT (1+AT) /* post attributes - conditional */
diff --git a/fs/nfsd/nfs3acl.c b/fs/nfsd/nfs3acl.c
index 73adca47d373..5e34e98db969 100644
--- a/fs/nfsd/nfs3acl.c
+++ b/fs/nfsd/nfs3acl.c
@@ -221,8 +221,6 @@ static void nfs3svc_release_getacl(struct svc_rqst *rqstp)
posix_acl_release(resp->acl_default);
}
-struct nfsd3_voidargs { int dummy; };
-
#define ST 1 /* status*/
#define AT 21 /* attributes */
#define pAT (1+AT) /* post attributes - conditional */
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index 46bd20fe5c0f..2e39cf2e502a 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -2269,7 +2269,7 @@ nfsd4_layoutget(struct svc_rqst *rqstp,
const struct nfsd4_layout_ops *ops;
struct nfs4_layout_stateid *ls;
__be32 nfserr;
- int accmode = NFSD_MAY_READ_IF_EXEC;
+ int accmode = NFSD_MAY_READ_IF_EXEC | NFSD_MAY_OWNER_OVERRIDE;
switch (lgp->lg_seg.iomode) {
case IOMODE_READ:
@@ -2359,7 +2359,8 @@ nfsd4_layoutcommit(struct svc_rqst *rqstp,
struct nfs4_layout_stateid *ls;
__be32 nfserr;
- nfserr = fh_verify(rqstp, current_fh, 0, NFSD_MAY_WRITE);
+ nfserr = fh_verify(rqstp, current_fh, 0,
+ NFSD_MAY_WRITE | NFSD_MAY_OWNER_OVERRIDE);
if (nfserr)
goto out;
diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c
index 2c060e0b1604..67d8673a9391 100644
--- a/fs/nfsd/nfs4recover.c
+++ b/fs/nfsd/nfs4recover.c
@@ -2086,8 +2086,8 @@ do_init:
status = nn->client_tracking_ops->init(net);
out:
if (status) {
- printk(KERN_WARNING "NFSD: Unable to initialize client "
- "recovery tracking! (%d)\n", status);
+ pr_warn("NFSD: Unable to initialize client recovery tracking! (%d)\n", status);
+ pr_warn("NFSD: Is nfsdcld running? If not, enable CONFIG_NFSD_LEGACY_CLIENT_TRACKING.\n");
nn->client_tracking_ops = NULL;
}
return status;
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index c7bfd2180e3f..42b41d55d4ed 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -118,11 +118,11 @@ static int zero_clientid(clientid_t *clid)
* operation described in @argp finishes.
*/
static void *
-svcxdr_tmpalloc(struct nfsd4_compoundargs *argp, u32 len)
+svcxdr_tmpalloc(struct nfsd4_compoundargs *argp, size_t len)
{
struct svcxdr_tmpbuf *tb;
- tb = kmalloc(sizeof(*tb) + len, GFP_KERNEL);
+ tb = kmalloc(struct_size(tb, buf, len), GFP_KERNEL);
if (!tb)
return NULL;
tb->next = argp->to_free;
@@ -138,9 +138,9 @@ svcxdr_tmpalloc(struct nfsd4_compoundargs *argp, u32 len)
* buffer might end on a page boundary.
*/
static char *
-svcxdr_dupstr(struct nfsd4_compoundargs *argp, void *buf, u32 len)
+svcxdr_dupstr(struct nfsd4_compoundargs *argp, void *buf, size_t len)
{
- char *p = svcxdr_tmpalloc(argp, len + 1);
+ char *p = svcxdr_tmpalloc(argp, size_add(len, 1));
if (!p)
return NULL;
@@ -150,7 +150,7 @@ svcxdr_dupstr(struct nfsd4_compoundargs *argp, void *buf, u32 len)
}
static void *
-svcxdr_savemem(struct nfsd4_compoundargs *argp, __be32 *p, u32 len)
+svcxdr_savemem(struct nfsd4_compoundargs *argp, __be32 *p, size_t len)
{
__be32 *tmp;
@@ -2146,7 +2146,7 @@ nfsd4_decode_clone(struct nfsd4_compoundargs *argp, union nfsd4_op_u *u)
*/
static __be32
nfsd4_vbuf_from_vector(struct nfsd4_compoundargs *argp, struct xdr_buf *xdr,
- char **bufp, u32 buflen)
+ char **bufp, size_t buflen)
{
struct page **pages = xdr->pages;
struct kvec *head = xdr->head;
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index 202140df8f82..9e0ea6fc2aa3 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -406,7 +406,7 @@ static ssize_t write_threads(struct file *file, char *buf, size_t size)
return -EINVAL;
trace_nfsd_ctl_threads(net, newthreads);
mutex_lock(&nfsd_mutex);
- rv = nfsd_svc(newthreads, net, file->f_cred, NULL);
+ rv = nfsd_svc(1, &newthreads, net, file->f_cred, NULL);
mutex_unlock(&nfsd_mutex);
if (rv < 0)
return rv;
@@ -481,6 +481,14 @@ static ssize_t write_pool_threads(struct file *file, char *buf, size_t size)
goto out_free;
trace_nfsd_ctl_pool_threads(net, i, nthreads[i]);
}
+
+ /*
+ * There must always be a thread in pool 0; the admin
+ * can't shut down NFS completely using pool_threads.
+ */
+ if (nthreads[0] == 0)
+ nthreads[0] = 1;
+
rv = nfsd_set_nrthreads(i, nthreads, net);
if (rv)
goto out_free;
@@ -1460,28 +1468,6 @@ static int create_proc_exports_entry(void)
unsigned int nfsd_net_id;
-/**
- * nfsd_nl_rpc_status_get_start - Prepare rpc_status_get dumpit
- * @cb: netlink metadata and command arguments
- *
- * Return values:
- * %0: The rpc_status_get command may proceed
- * %-ENODEV: There is no NFSD running in this namespace
- */
-int nfsd_nl_rpc_status_get_start(struct netlink_callback *cb)
-{
- struct nfsd_net *nn = net_generic(sock_net(cb->skb->sk), nfsd_net_id);
- int ret = -ENODEV;
-
- mutex_lock(&nfsd_mutex);
- if (nn->nfsd_serv)
- ret = 0;
- else
- mutex_unlock(&nfsd_mutex);
-
- return ret;
-}
-
static int nfsd_genl_rpc_status_compose_msg(struct sk_buff *skb,
struct netlink_callback *cb,
struct nfsd_genl_rqstp *rqstp)
@@ -1558,8 +1544,16 @@ static int nfsd_genl_rpc_status_compose_msg(struct sk_buff *skb,
int nfsd_nl_rpc_status_get_dumpit(struct sk_buff *skb,
struct netlink_callback *cb)
{
- struct nfsd_net *nn = net_generic(sock_net(skb->sk), nfsd_net_id);
int i, ret, rqstp_index = 0;
+ struct nfsd_net *nn;
+
+ mutex_lock(&nfsd_mutex);
+
+ nn = net_generic(sock_net(skb->sk), nfsd_net_id);
+ if (!nn->nfsd_serv) {
+ ret = -ENODEV;
+ goto out_unlock;
+ }
rcu_read_lock();
@@ -1636,22 +1630,10 @@ int nfsd_nl_rpc_status_get_dumpit(struct sk_buff *skb,
ret = skb->len;
out:
rcu_read_unlock();
-
- return ret;
-}
-
-/**
- * nfsd_nl_rpc_status_get_done - rpc_status_get dumpit post-processing
- * @cb: netlink metadata and command arguments
- *
- * Return values:
- * %0: Success
- */
-int nfsd_nl_rpc_status_get_done(struct netlink_callback *cb)
-{
+out_unlock:
mutex_unlock(&nfsd_mutex);
- return 0;
+ return ret;
}
/**
@@ -1663,7 +1645,7 @@ int nfsd_nl_rpc_status_get_done(struct netlink_callback *cb)
*/
int nfsd_nl_threads_set_doit(struct sk_buff *skb, struct genl_info *info)
{
- int nthreads = 0, count = 0, nrpools, ret = -EOPNOTSUPP, rem;
+ int *nthreads, count = 0, nrpools, i, ret = -EOPNOTSUPP, rem;
struct net *net = genl_info_net(info);
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
const struct nlattr *attr;
@@ -1680,15 +1662,22 @@ int nfsd_nl_threads_set_doit(struct sk_buff *skb, struct genl_info *info)
mutex_lock(&nfsd_mutex);
- nrpools = nfsd_nrpools(net);
- if (nrpools && count > nrpools)
- count = nrpools;
-
- /* XXX: make this handle non-global pool-modes */
- if (count > 1)
+ nrpools = max(count, nfsd_nrpools(net));
+ nthreads = kcalloc(nrpools, sizeof(int), GFP_KERNEL);
+ if (!nthreads) {
+ ret = -ENOMEM;
goto out_unlock;
+ }
+
+ i = 0;
+ nlmsg_for_each_attr(attr, info->nlhdr, GENL_HDRLEN, rem) {
+ if (nla_type(attr) == NFSD_A_SERVER_THREADS) {
+ nthreads[i++] = nla_get_u32(attr);
+ if (i >= nrpools)
+ break;
+ }
+ }
- nthreads = nla_get_u32(info->attrs[NFSD_A_SERVER_THREADS]);
if (info->attrs[NFSD_A_SERVER_GRACETIME] ||
info->attrs[NFSD_A_SERVER_LEASETIME] ||
info->attrs[NFSD_A_SERVER_SCOPE]) {
@@ -1722,12 +1711,13 @@ int nfsd_nl_threads_set_doit(struct sk_buff *skb, struct genl_info *info)
scope = nla_data(attr);
}
- ret = nfsd_svc(nthreads, net, get_current_cred(), scope);
-
+ ret = nfsd_svc(nrpools, nthreads, net, get_current_cred(), scope);
+ if (ret > 0)
+ ret = 0;
out_unlock:
mutex_unlock(&nfsd_mutex);
-
- return ret == nthreads ? 0 : ret;
+ kfree(nthreads);
+ return ret;
}
/**
@@ -2167,6 +2157,63 @@ err_free_msg:
}
/**
+ * nfsd_nl_pool_mode_set_doit - set the number of running threads
+ * @skb: reply buffer
+ * @info: netlink metadata and command arguments
+ *
+ * Return 0 on success or a negative errno.
+ */
+int nfsd_nl_pool_mode_set_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ const struct nlattr *attr;
+
+ if (GENL_REQ_ATTR_CHECK(info, NFSD_A_POOL_MODE_MODE))
+ return -EINVAL;
+
+ attr = info->attrs[NFSD_A_POOL_MODE_MODE];
+ return sunrpc_set_pool_mode(nla_data(attr));
+}
+
+/**
+ * nfsd_nl_pool_mode_get_doit - get info about pool_mode
+ * @skb: reply buffer
+ * @info: netlink metadata and command arguments
+ *
+ * Return 0 on success or a negative errno.
+ */
+int nfsd_nl_pool_mode_get_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct net *net = genl_info_net(info);
+ char buf[16];
+ void *hdr;
+ int err;
+
+ if (sunrpc_get_pool_mode(buf, ARRAY_SIZE(buf)) >= ARRAY_SIZE(buf))
+ return -ERANGE;
+
+ skb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ err = -EMSGSIZE;
+ hdr = genlmsg_iput(skb, info);
+ if (!hdr)
+ goto err_free_msg;
+
+ err = nla_put_string(skb, NFSD_A_POOL_MODE_MODE, buf) |
+ nla_put_u32(skb, NFSD_A_POOL_MODE_NPOOLS, nfsd_nrpools(net));
+ if (err)
+ goto err_free_msg;
+
+ genlmsg_end(skb, hdr);
+ return genlmsg_reply(skb, info);
+
+err_free_msg:
+ nlmsg_free(skb);
+ return err;
+}
+
+/**
* nfsd_net_init - Prepare the nfsd_net portion of a new net namespace
* @net: a freshly-created network namespace
*
@@ -2195,6 +2242,8 @@ static __net_init int nfsd_net_init(struct net *net)
nn->nfsd_svcstats.program = &nfsd_program;
nn->nfsd_versions = NULL;
nn->nfsd4_minorversions = NULL;
+ nn->nfsd_info.mutex = &nfsd_mutex;
+ nn->nfsd_serv = NULL;
nfsd4_init_leases_net(nn);
get_random_bytes(&nn->siphash_key, sizeof(nn->siphash_key));
seqlock_init(&nn->writeverf_lock);
diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h
index 8f4f239d9f8a..cec8697b1cd6 100644
--- a/fs/nfsd/nfsd.h
+++ b/fs/nfsd/nfsd.h
@@ -103,7 +103,8 @@ bool nfssvc_encode_voidres(struct svc_rqst *rqstp,
/*
* Function prototypes.
*/
-int nfsd_svc(int nrservs, struct net *net, const struct cred *cred, const char *scope);
+int nfsd_svc(int n, int *nservers, struct net *net,
+ const struct cred *cred, const char *scope);
int nfsd_dispatch(struct svc_rqst *rqstp);
int nfsd_nrthreads(struct net *);
diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c
index 0b75305fb5f5..dd4e11a703aa 100644
--- a/fs/nfsd/nfsfh.c
+++ b/fs/nfsd/nfsfh.c
@@ -247,7 +247,7 @@ static __be32 nfsd_set_fh_dentry(struct svc_rqst *rqstp, struct svc_fh *fhp)
dentry = dget(exp->ex_path.dentry);
else {
dentry = exportfs_decode_fh_raw(exp->ex_path.mnt, fid,
- data_left, fileid_type,
+ data_left, fileid_type, 0,
nfsd_acceptable, exp);
if (IS_ERR_OR_NULL(dentry)) {
trace_nfsd_set_fh_dentry_badhandle(rqstp, fhp,
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
index cd9a6a1a9fc8..0bc8eaa5e009 100644
--- a/fs/nfsd/nfssvc.c
+++ b/fs/nfsd/nfssvc.c
@@ -672,7 +672,6 @@ int nfsd_create_serv(struct net *net)
return error;
}
spin_lock(&nfsd_notifier_lock);
- nn->nfsd_info.mutex = &nfsd_mutex;
nn->nfsd_serv = serv;
spin_unlock(&nfsd_notifier_lock);
@@ -710,6 +709,19 @@ int nfsd_get_nrthreads(int n, int *nthreads, struct net *net)
return 0;
}
+/**
+ * nfsd_set_nrthreads - set the number of running threads in the net's service
+ * @n: number of array members in @nthreads
+ * @nthreads: array of thread counts for each pool
+ * @net: network namespace to operate within
+ *
+ * This function alters the number of running threads for the given network
+ * namespace in each pool. If passed an array longer then the number of pools
+ * the extra pool settings are ignored. If passed an array shorter than the
+ * number of pools, the missing values are interpreted as 0's.
+ *
+ * Returns 0 on success or a negative errno on error.
+ */
int nfsd_set_nrthreads(int n, int *nthreads, struct net *net)
{
int i = 0;
@@ -717,11 +729,18 @@ int nfsd_set_nrthreads(int n, int *nthreads, struct net *net)
int err = 0;
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
- WARN_ON(!mutex_is_locked(&nfsd_mutex));
+ lockdep_assert_held(&nfsd_mutex);
if (nn->nfsd_serv == NULL || n <= 0)
return 0;
+ /*
+ * Special case: When n == 1, pass in NULL for the pool, so that the
+ * change is distributed equally among them.
+ */
+ if (n == 1)
+ return svc_set_num_threads(nn->nfsd_serv, NULL, nthreads[0]);
+
if (n > nn->nfsd_serv->sv_nrpools)
n = nn->nfsd_serv->sv_nrpools;
@@ -744,31 +763,40 @@ int nfsd_set_nrthreads(int n, int *nthreads, struct net *net)
}
}
- /*
- * There must always be a thread in pool 0; the admin
- * can't shut down NFS completely using pool_threads.
- */
- if (nthreads[0] == 0)
- nthreads[0] = 1;
-
/* apply the new numbers */
for (i = 0; i < n; i++) {
err = svc_set_num_threads(nn->nfsd_serv,
&nn->nfsd_serv->sv_pools[i],
nthreads[i]);
if (err)
- break;
+ goto out;
}
+
+ /* Anything undefined in array is considered to be 0 */
+ for (i = n; i < nn->nfsd_serv->sv_nrpools; ++i) {
+ err = svc_set_num_threads(nn->nfsd_serv,
+ &nn->nfsd_serv->sv_pools[i],
+ 0);
+ if (err)
+ goto out;
+ }
+out:
return err;
}
-/*
- * Adjust the number of threads and return the new number of threads.
- * This is also the function that starts the server if necessary, if
- * this is the first time nrservs is nonzero.
+/**
+ * nfsd_svc: start up or shut down the nfsd server
+ * @n: number of array members in @nthreads
+ * @nthreads: array of thread counts for each pool
+ * @net: network namespace to operate within
+ * @cred: credentials to use for xprt creation
+ * @scope: server scope value (defaults to nodename)
+ *
+ * Adjust the number of threads in each pool and return the new
+ * total number of threads in the service.
*/
int
-nfsd_svc(int nrservs, struct net *net, const struct cred *cred, const char *scope)
+nfsd_svc(int n, int *nthreads, struct net *net, const struct cred *cred, const char *scope)
{
int error;
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
@@ -778,13 +806,6 @@ nfsd_svc(int nrservs, struct net *net, const struct cred *cred, const char *scop
dprintk("nfsd: creating service\n");
- nrservs = max(nrservs, 0);
- nrservs = min(nrservs, NFSD_MAXSERVS);
- error = 0;
-
- if (nrservs == 0 && nn->nfsd_serv == NULL)
- goto out;
-
strscpy(nn->nfsd_name, scope ? scope : utsname()->nodename,
sizeof(nn->nfsd_name));
@@ -796,7 +817,7 @@ nfsd_svc(int nrservs, struct net *net, const struct cred *cred, const char *scop
error = nfsd_startup_net(net, cred);
if (error)
goto out_put;
- error = svc_set_num_threads(serv, NULL, nrservs);
+ error = nfsd_set_nrthreads(n, nthreads, net);
if (error)
goto out_put;
error = serv->sv_nrthreads;
diff --git a/fs/nilfs2/alloc.c b/fs/nilfs2/alloc.c
index 89caef7513db..ba50388ee4bf 100644
--- a/fs/nilfs2/alloc.c
+++ b/fs/nilfs2/alloc.c
@@ -377,11 +377,12 @@ void *nilfs_palloc_block_get_entry(const struct inode *inode, __u64 nr,
* @target: offset number of an entry in the group (start point)
* @bsize: size in bits
* @lock: spin lock protecting @bitmap
+ * @wrap: whether to wrap around
*/
static int nilfs_palloc_find_available_slot(unsigned char *bitmap,
unsigned long target,
unsigned int bsize,
- spinlock_t *lock)
+ spinlock_t *lock, bool wrap)
{
int pos, end = bsize;
@@ -397,6 +398,8 @@ static int nilfs_palloc_find_available_slot(unsigned char *bitmap,
end = target;
}
+ if (!wrap)
+ return -ENOSPC;
/* wrap around */
for (pos = 0; pos < end; pos++) {
@@ -495,9 +498,10 @@ int nilfs_palloc_count_max_entries(struct inode *inode, u64 nused, u64 *nmaxp)
* nilfs_palloc_prepare_alloc_entry - prepare to allocate a persistent object
* @inode: inode of metadata file using this allocator
* @req: nilfs_palloc_req structure exchanged for the allocation
+ * @wrap: whether to wrap around
*/
int nilfs_palloc_prepare_alloc_entry(struct inode *inode,
- struct nilfs_palloc_req *req)
+ struct nilfs_palloc_req *req, bool wrap)
{
struct buffer_head *desc_bh, *bitmap_bh;
struct nilfs_palloc_group_desc *desc;
@@ -516,7 +520,7 @@ int nilfs_palloc_prepare_alloc_entry(struct inode *inode,
entries_per_group = nilfs_palloc_entries_per_group(inode);
for (i = 0; i < ngroups; i += n) {
- if (group >= ngroups) {
+ if (group >= ngroups && wrap) {
/* wrap around */
group = 0;
maxgroup = nilfs_palloc_group(inode, req->pr_entry_nr,
@@ -550,7 +554,14 @@ int nilfs_palloc_prepare_alloc_entry(struct inode *inode,
bitmap_kaddr = kmap_local_page(bitmap_bh->b_page);
bitmap = bitmap_kaddr + bh_offset(bitmap_bh);
pos = nilfs_palloc_find_available_slot(
- bitmap, group_offset, entries_per_group, lock);
+ bitmap, group_offset, entries_per_group, lock,
+ wrap);
+ /*
+ * Since the search for a free slot in the second and
+ * subsequent bitmap blocks always starts from the
+ * beginning, the wrap flag only has an effect on the
+ * first search.
+ */
kunmap_local(bitmap_kaddr);
if (pos >= 0)
goto found;
diff --git a/fs/nilfs2/alloc.h b/fs/nilfs2/alloc.h
index b667e869ac07..d825a9faca6d 100644
--- a/fs/nilfs2/alloc.h
+++ b/fs/nilfs2/alloc.h
@@ -50,8 +50,8 @@ struct nilfs_palloc_req {
struct buffer_head *pr_entry_bh;
};
-int nilfs_palloc_prepare_alloc_entry(struct inode *,
- struct nilfs_palloc_req *);
+int nilfs_palloc_prepare_alloc_entry(struct inode *inode,
+ struct nilfs_palloc_req *req, bool wrap);
void nilfs_palloc_commit_alloc_entry(struct inode *,
struct nilfs_palloc_req *);
void nilfs_palloc_abort_alloc_entry(struct inode *, struct nilfs_palloc_req *);
diff --git a/fs/nilfs2/dat.c b/fs/nilfs2/dat.c
index 180fc8d36213..fc1caf63a42a 100644
--- a/fs/nilfs2/dat.c
+++ b/fs/nilfs2/dat.c
@@ -75,7 +75,7 @@ int nilfs_dat_prepare_alloc(struct inode *dat, struct nilfs_palloc_req *req)
{
int ret;
- ret = nilfs_palloc_prepare_alloc_entry(dat, req);
+ ret = nilfs_palloc_prepare_alloc_entry(dat, req, true);
if (ret < 0)
return ret;
diff --git a/fs/nilfs2/dir.c b/fs/nilfs2/dir.c
index a002a44ff161..4a29b0138d75 100644
--- a/fs/nilfs2/dir.c
+++ b/fs/nilfs2/dir.c
@@ -135,6 +135,9 @@ static bool nilfs_check_folio(struct folio *folio, char *kaddr)
goto Enamelen;
if (((offs + rec_len - 1) ^ offs) & ~(chunk_size-1))
goto Espan;
+ if (unlikely(p->inode &&
+ NILFS_PRIVATE_INODE(le64_to_cpu(p->inode))))
+ goto Einumber;
}
if (offs != limit)
goto Eend;
@@ -160,6 +163,9 @@ Enamelen:
goto bad_entry;
Espan:
error = "directory entry across blocks";
+ goto bad_entry;
+Einumber:
+ error = "disallowed inode number";
bad_entry:
nilfs_error(sb,
"bad entry in directory #%lu: %s - offset=%lu, inode=%lu, rec_len=%zd, name_len=%d",
@@ -377,11 +383,39 @@ found:
struct nilfs_dir_entry *nilfs_dotdot(struct inode *dir, struct folio **foliop)
{
- struct nilfs_dir_entry *de = nilfs_get_folio(dir, 0, foliop);
+ struct folio *folio;
+ struct nilfs_dir_entry *de, *next_de;
+ size_t limit;
+ char *msg;
+ de = nilfs_get_folio(dir, 0, &folio);
if (IS_ERR(de))
return NULL;
- return nilfs_next_entry(de);
+
+ limit = nilfs_last_byte(dir, 0); /* is a multiple of chunk size */
+ if (unlikely(!limit || le64_to_cpu(de->inode) != dir->i_ino ||
+ !nilfs_match(1, ".", de))) {
+ msg = "missing '.'";
+ goto fail;
+ }
+
+ next_de = nilfs_next_entry(de);
+ /*
+ * If "next_de" has not reached the end of the chunk, there is
+ * at least one more record. Check whether it matches "..".
+ */
+ if (unlikely((char *)next_de == (char *)de + nilfs_chunk_size(dir) ||
+ !nilfs_match(2, "..", next_de))) {
+ msg = "missing '..'";
+ goto fail;
+ }
+ *foliop = folio;
+ return next_de;
+
+fail:
+ nilfs_error(dir->i_sb, "directory #%lu %s", dir->i_ino, msg);
+ folio_release_kmap(folio, de);
+ return NULL;
}
ino_t nilfs_inode_by_name(struct inode *dir, const struct qstr *qstr)
@@ -607,7 +641,7 @@ int nilfs_empty_dir(struct inode *inode)
kaddr = nilfs_get_folio(inode, i, &folio);
if (IS_ERR(kaddr))
- continue;
+ return 0;
de = (struct nilfs_dir_entry *)kaddr;
kaddr += nilfs_last_byte(inode, i) - NILFS_DIR_REC_LEN(1);
diff --git a/fs/nilfs2/ifile.c b/fs/nilfs2/ifile.c
index 612e609158b5..1e86b9303b7c 100644
--- a/fs/nilfs2/ifile.c
+++ b/fs/nilfs2/ifile.c
@@ -56,13 +56,10 @@ int nilfs_ifile_create_inode(struct inode *ifile, ino_t *out_ino,
struct nilfs_palloc_req req;
int ret;
- req.pr_entry_nr = 0; /*
- * 0 says find free inode from beginning
- * of a group. dull code!!
- */
+ req.pr_entry_nr = NILFS_FIRST_INO(ifile->i_sb);
req.pr_entry_bh = NULL;
- ret = nilfs_palloc_prepare_alloc_entry(ifile, &req);
+ ret = nilfs_palloc_prepare_alloc_entry(ifile, &req, false);
if (!ret) {
ret = nilfs_palloc_get_entry_block(ifile, req.pr_entry_nr, 1,
&req.pr_entry_bh);
diff --git a/fs/nilfs2/nilfs.h b/fs/nilfs2/nilfs.h
index 728e90be3570..4017f7856440 100644
--- a/fs/nilfs2/nilfs.h
+++ b/fs/nilfs2/nilfs.h
@@ -116,9 +116,15 @@ enum {
#define NILFS_FIRST_INO(sb) (((struct the_nilfs *)sb->s_fs_info)->ns_first_ino)
#define NILFS_MDT_INODE(sb, ino) \
- ((ino) < NILFS_FIRST_INO(sb) && (NILFS_MDT_INO_BITS & BIT(ino)))
+ ((ino) < NILFS_USER_INO && (NILFS_MDT_INO_BITS & BIT(ino)))
#define NILFS_VALID_INODE(sb, ino) \
- ((ino) >= NILFS_FIRST_INO(sb) || (NILFS_SYS_INO_BITS & BIT(ino)))
+ ((ino) >= NILFS_FIRST_INO(sb) || \
+ ((ino) < NILFS_USER_INO && (NILFS_SYS_INO_BITS & BIT(ino))))
+
+#define NILFS_PRIVATE_INODE(ino) ({ \
+ ino_t __ino = (ino); \
+ ((__ino) < NILFS_USER_INO && (__ino) != NILFS_ROOT_INO && \
+ (__ino) != NILFS_SKETCH_INO); })
/**
* struct nilfs_transaction_info: context information for synchronization
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index 60d4f59f7665..6ea81f1d5094 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -1652,6 +1652,7 @@ static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci)
if (bh->b_folio != bd_folio) {
if (bd_folio) {
folio_lock(bd_folio);
+ folio_wait_writeback(bd_folio);
folio_clear_dirty_for_io(bd_folio);
folio_start_writeback(bd_folio);
folio_unlock(bd_folio);
@@ -1665,6 +1666,7 @@ static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci)
if (bh == segbuf->sb_super_root) {
if (bh->b_folio != bd_folio) {
folio_lock(bd_folio);
+ folio_wait_writeback(bd_folio);
folio_clear_dirty_for_io(bd_folio);
folio_start_writeback(bd_folio);
folio_unlock(bd_folio);
@@ -1681,6 +1683,7 @@ static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci)
}
if (bd_folio) {
folio_lock(bd_folio);
+ folio_wait_writeback(bd_folio);
folio_clear_dirty_for_io(bd_folio);
folio_start_writeback(bd_folio);
folio_unlock(bd_folio);
diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c
index f41d7b6d432c..e44dde57ab65 100644
--- a/fs/nilfs2/the_nilfs.c
+++ b/fs/nilfs2/the_nilfs.c
@@ -452,6 +452,12 @@ static int nilfs_store_disk_layout(struct the_nilfs *nilfs,
}
nilfs->ns_first_ino = le32_to_cpu(sbp->s_first_ino);
+ if (nilfs->ns_first_ino < NILFS_USER_INO) {
+ nilfs_err(nilfs->ns_sb,
+ "too small lower limit for non-reserved inode numbers: %u",
+ nilfs->ns_first_ino);
+ return -EINVAL;
+ }
nilfs->ns_blocks_per_segment = le32_to_cpu(sbp->s_blocks_per_segment);
if (nilfs->ns_blocks_per_segment < NILFS_SEG_MIN_BLOCKS) {
diff --git a/fs/nilfs2/the_nilfs.h b/fs/nilfs2/the_nilfs.h
index 85da0629415d..1e829ed7b0ef 100644
--- a/fs/nilfs2/the_nilfs.h
+++ b/fs/nilfs2/the_nilfs.h
@@ -182,7 +182,7 @@ struct the_nilfs {
unsigned long ns_nrsvsegs;
unsigned long ns_first_data_block;
int ns_inode_size;
- int ns_first_ino;
+ unsigned int ns_first_ino;
u32 ns_crc_seed;
/* /sys/fs/<nilfs>/<device> */
diff --git a/fs/nls/mac-celtic.c b/fs/nls/mac-celtic.c
index 266c2d7d50bd..2963f3299d7e 100644
--- a/fs/nls/mac-celtic.c
+++ b/fs/nls/mac-celtic.c
@@ -598,4 +598,5 @@ static void __exit exit_nls_macceltic(void)
module_init(init_nls_macceltic)
module_exit(exit_nls_macceltic)
+MODULE_DESCRIPTION("NLS Codepage macceltic");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/mac-centeuro.c b/fs/nls/mac-centeuro.c
index 9789c6057551..43b20f4bdb67 100644
--- a/fs/nls/mac-centeuro.c
+++ b/fs/nls/mac-centeuro.c
@@ -528,4 +528,5 @@ static void __exit exit_nls_maccenteuro(void)
module_init(init_nls_maccenteuro)
module_exit(exit_nls_maccenteuro)
+MODULE_DESCRIPTION("NLS Codepage maccenteuro");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/mac-croatian.c b/fs/nls/mac-croatian.c
index bb19e7a07d43..62730d6a64e5 100644
--- a/fs/nls/mac-croatian.c
+++ b/fs/nls/mac-croatian.c
@@ -598,4 +598,5 @@ static void __exit exit_nls_maccroatian(void)
module_init(init_nls_maccroatian)
module_exit(exit_nls_maccroatian)
+MODULE_DESCRIPTION("NLS Codepage maccroatian");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/mac-cyrillic.c b/fs/nls/mac-cyrillic.c
index 2a7dea36acba..7a5c4d16aac8 100644
--- a/fs/nls/mac-cyrillic.c
+++ b/fs/nls/mac-cyrillic.c
@@ -493,4 +493,5 @@ static void __exit exit_nls_maccyrillic(void)
module_init(init_nls_maccyrillic)
module_exit(exit_nls_maccyrillic)
+MODULE_DESCRIPTION("NLS Codepage maccyrillic");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/mac-gaelic.c b/fs/nls/mac-gaelic.c
index 77b001653588..3d22f03a90b6 100644
--- a/fs/nls/mac-gaelic.c
+++ b/fs/nls/mac-gaelic.c
@@ -563,4 +563,5 @@ static void __exit exit_nls_macgaelic(void)
module_init(init_nls_macgaelic)
module_exit(exit_nls_macgaelic)
+MODULE_DESCRIPTION("NLS Codepage macgaelic");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/mac-greek.c b/fs/nls/mac-greek.c
index 1eccf499e2eb..de3aa9ddb5b1 100644
--- a/fs/nls/mac-greek.c
+++ b/fs/nls/mac-greek.c
@@ -493,4 +493,5 @@ static void __exit exit_nls_macgreek(void)
module_init(init_nls_macgreek)
module_exit(exit_nls_macgreek)
+MODULE_DESCRIPTION("NLS Codepage macgreek");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/mac-iceland.c b/fs/nls/mac-iceland.c
index cbd0875c6d69..0bba83f9d415 100644
--- a/fs/nls/mac-iceland.c
+++ b/fs/nls/mac-iceland.c
@@ -598,4 +598,5 @@ static void __exit exit_nls_maciceland(void)
module_init(init_nls_maciceland)
module_exit(exit_nls_maciceland)
+MODULE_DESCRIPTION("NLS Codepage maciceland");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/mac-inuit.c b/fs/nls/mac-inuit.c
index fba8357aaf03..493386832dfd 100644
--- a/fs/nls/mac-inuit.c
+++ b/fs/nls/mac-inuit.c
@@ -528,4 +528,5 @@ static void __exit exit_nls_macinuit(void)
module_init(init_nls_macinuit)
module_exit(exit_nls_macinuit)
+MODULE_DESCRIPTION("NLS Codepage macinuit");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/mac-roman.c b/fs/nls/mac-roman.c
index b6a98a5208cd..d3c082173c20 100644
--- a/fs/nls/mac-roman.c
+++ b/fs/nls/mac-roman.c
@@ -633,4 +633,5 @@ static void __exit exit_nls_macroman(void)
module_init(init_nls_macroman)
module_exit(exit_nls_macroman)
+MODULE_DESCRIPTION("NLS Codepage macroman");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/mac-romanian.c b/fs/nls/mac-romanian.c
index 25547f023638..a7735852f2d5 100644
--- a/fs/nls/mac-romanian.c
+++ b/fs/nls/mac-romanian.c
@@ -598,4 +598,5 @@ static void __exit exit_nls_macromanian(void)
module_init(init_nls_macromanian)
module_exit(exit_nls_macromanian)
+MODULE_DESCRIPTION("NLS Codepage macromanian");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/mac-turkish.c b/fs/nls/mac-turkish.c
index b5454bc7b7fa..d77e9b6b7d7c 100644
--- a/fs/nls/mac-turkish.c
+++ b/fs/nls/mac-turkish.c
@@ -598,4 +598,5 @@ static void __exit exit_nls_macturkish(void)
module_init(init_nls_macturkish)
module_exit(exit_nls_macturkish)
+MODULE_DESCRIPTION("NLS Codepage macturkish");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/nls_ascii.c b/fs/nls/nls_ascii.c
index a2620650d5e4..068143d71284 100644
--- a/fs/nls/nls_ascii.c
+++ b/fs/nls/nls_ascii.c
@@ -163,4 +163,5 @@ static void __exit exit_nls_ascii(void)
module_init(init_nls_ascii)
module_exit(exit_nls_ascii)
+MODULE_DESCRIPTION("NLS ASCII (United States)");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/nls_base.c b/fs/nls/nls_base.c
index a026dbd3593f..18d597e49a19 100644
--- a/fs/nls/nls_base.c
+++ b/fs/nls/nls_base.c
@@ -545,4 +545,5 @@ EXPORT_SYMBOL(unload_nls);
EXPORT_SYMBOL(load_nls);
EXPORT_SYMBOL(load_nls_default);
+MODULE_DESCRIPTION("Base file system native language support");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/nls_cp1250.c b/fs/nls/nls_cp1250.c
index ace3e19d3407..e22a57a4b828 100644
--- a/fs/nls/nls_cp1250.c
+++ b/fs/nls/nls_cp1250.c
@@ -343,4 +343,5 @@ static void __exit exit_nls_cp1250(void)
module_init(init_nls_cp1250)
module_exit(exit_nls_cp1250)
+MODULE_DESCRIPTION("NLS Windows CP1250 (Slavic/Central European Languages)");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/nls_cp1251.c b/fs/nls/nls_cp1251.c
index 9273ddfd08a1..6f46d339f23c 100644
--- a/fs/nls/nls_cp1251.c
+++ b/fs/nls/nls_cp1251.c
@@ -298,4 +298,5 @@ static void __exit exit_nls_cp1251(void)
module_init(init_nls_cp1251)
module_exit(exit_nls_cp1251)
+MODULE_DESCRIPTION("NLS Windows CP1251 (Bulgarian, Belarusian)");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/nls_cp1255.c b/fs/nls/nls_cp1255.c
index 1caf5dfed85b..299e089d4301 100644
--- a/fs/nls/nls_cp1255.c
+++ b/fs/nls/nls_cp1255.c
@@ -380,5 +380,6 @@ static void __exit exit_nls_cp1255(void)
module_init(init_nls_cp1255)
module_exit(exit_nls_cp1255)
+MODULE_DESCRIPTION("NLS Hebrew charsets (ISO-8859-8, CP1255)");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_ALIAS_NLS(iso8859-8);
diff --git a/fs/nls/nls_cp437.c b/fs/nls/nls_cp437.c
index 7ddb830da3fd..ab880499ea32 100644
--- a/fs/nls/nls_cp437.c
+++ b/fs/nls/nls_cp437.c
@@ -384,4 +384,5 @@ static void __exit exit_nls_cp437(void)
module_init(init_nls_cp437)
module_exit(exit_nls_cp437)
+MODULE_DESCRIPTION("NLS Codepage 437 (United States, Canada)");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/nls_cp737.c b/fs/nls/nls_cp737.c
index c593f683a0cd..5c37618296e9 100644
--- a/fs/nls/nls_cp737.c
+++ b/fs/nls/nls_cp737.c
@@ -347,4 +347,5 @@ static void __exit exit_nls_cp737(void)
module_init(init_nls_cp737)
module_exit(exit_nls_cp737)
+MODULE_DESCRIPTION("NLS Codepage 737 (Greek)");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/nls_cp775.c b/fs/nls/nls_cp775.c
index 554c863745f2..51ccc908901f 100644
--- a/fs/nls/nls_cp775.c
+++ b/fs/nls/nls_cp775.c
@@ -316,4 +316,5 @@ static void __exit exit_nls_cp775(void)
module_init(init_nls_cp775)
module_exit(exit_nls_cp775)
+MODULE_DESCRIPTION("NLS Codepage 775 (Baltic Rim)");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/nls_cp850.c b/fs/nls/nls_cp850.c
index 56cccd14b40b..5f9b9507a8b6 100644
--- a/fs/nls/nls_cp850.c
+++ b/fs/nls/nls_cp850.c
@@ -312,4 +312,5 @@ static void __exit exit_nls_cp850(void)
module_init(init_nls_cp850)
module_exit(exit_nls_cp850)
+MODULE_DESCRIPTION("NLS Codepage 850 (Europe)");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/nls_cp852.c b/fs/nls/nls_cp852.c
index 7cdc05ac1d40..fc513a5e8358 100644
--- a/fs/nls/nls_cp852.c
+++ b/fs/nls/nls_cp852.c
@@ -334,4 +334,5 @@ static void __exit exit_nls_cp852(void)
module_init(init_nls_cp852)
module_exit(exit_nls_cp852)
+MODULE_DESCRIPTION("NLS Codepage 852 (Central/Eastern Europe)");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/nls_cp855.c b/fs/nls/nls_cp855.c
index 7426eea05663..a43be58adb36 100644
--- a/fs/nls/nls_cp855.c
+++ b/fs/nls/nls_cp855.c
@@ -296,4 +296,5 @@ static void __exit exit_nls_cp855(void)
module_init(init_nls_cp855)
module_exit(exit_nls_cp855)
+MODULE_DESCRIPTION("NLS Codepage 855 (Cyrillic)");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/nls_cp857.c b/fs/nls/nls_cp857.c
index 098309733ebd..772cd4195bad 100644
--- a/fs/nls/nls_cp857.c
+++ b/fs/nls/nls_cp857.c
@@ -298,4 +298,5 @@ static void __exit exit_nls_cp857(void)
module_init(init_nls_cp857)
module_exit(exit_nls_cp857)
+MODULE_DESCRIPTION("NLS Codepage 857 (Turkish)");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/nls_cp860.c b/fs/nls/nls_cp860.c
index 84224478e731..36cf4ca11966 100644
--- a/fs/nls/nls_cp860.c
+++ b/fs/nls/nls_cp860.c
@@ -361,4 +361,5 @@ static void __exit exit_nls_cp860(void)
module_init(init_nls_cp860)
module_exit(exit_nls_cp860)
+MODULE_DESCRIPTION("NLS Codepage 860 (Portuguese)");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/nls_cp861.c b/fs/nls/nls_cp861.c
index dc873e4be092..b7397d079f8f 100644
--- a/fs/nls/nls_cp861.c
+++ b/fs/nls/nls_cp861.c
@@ -384,4 +384,5 @@ static void __exit exit_nls_cp861(void)
module_init(init_nls_cp861)
module_exit(exit_nls_cp861)
+MODULE_DESCRIPTION("NLS Codepage 861 (Icelandic)");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/nls_cp862.c b/fs/nls/nls_cp862.c
index d5263e3c5566..fd3b95d1e95d 100644
--- a/fs/nls/nls_cp862.c
+++ b/fs/nls/nls_cp862.c
@@ -418,4 +418,5 @@ static void __exit exit_nls_cp862(void)
module_init(init_nls_cp862)
module_exit(exit_nls_cp862)
+MODULE_DESCRIPTION("NLS Codepage 862 (Hebrew)");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/nls_cp863.c b/fs/nls/nls_cp863.c
index 051c9832e36a..813ae7944249 100644
--- a/fs/nls/nls_cp863.c
+++ b/fs/nls/nls_cp863.c
@@ -378,4 +378,5 @@ static void __exit exit_nls_cp863(void)
module_init(init_nls_cp863)
module_exit(exit_nls_cp863)
+MODULE_DESCRIPTION("NLS Codepage 863 (Canadian French)");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/nls_cp864.c b/fs/nls/nls_cp864.c
index 97eb1273b2f7..d9eb6d5cd47a 100644
--- a/fs/nls/nls_cp864.c
+++ b/fs/nls/nls_cp864.c
@@ -404,4 +404,5 @@ static void __exit exit_nls_cp864(void)
module_init(init_nls_cp864)
module_exit(exit_nls_cp864)
+MODULE_DESCRIPTION("NLS Codepage 864 (Arabic)");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/nls_cp865.c b/fs/nls/nls_cp865.c
index 111214228525..2678ffd98bb6 100644
--- a/fs/nls/nls_cp865.c
+++ b/fs/nls/nls_cp865.c
@@ -384,4 +384,5 @@ static void __exit exit_nls_cp865(void)
module_init(init_nls_cp865)
module_exit(exit_nls_cp865)
+MODULE_DESCRIPTION("NLS Codepage 865 (Norwegian, Danish)");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/nls_cp866.c b/fs/nls/nls_cp866.c
index ffdcbc3fc38d..7e93d0a3802a 100644
--- a/fs/nls/nls_cp866.c
+++ b/fs/nls/nls_cp866.c
@@ -302,4 +302,5 @@ static void __exit exit_nls_cp866(void)
module_init(init_nls_cp866)
module_exit(exit_nls_cp866)
+MODULE_DESCRIPTION("NLS Codepage 866 (Cyrillic/Russian)");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/nls_cp869.c b/fs/nls/nls_cp869.c
index 3b5a34589354..4491737dd5cb 100644
--- a/fs/nls/nls_cp869.c
+++ b/fs/nls/nls_cp869.c
@@ -312,4 +312,5 @@ static void __exit exit_nls_cp869(void)
module_init(init_nls_cp869)
module_exit(exit_nls_cp869)
+MODULE_DESCRIPTION("NLS Codepage 869 (Greek)");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/nls_cp874.c b/fs/nls/nls_cp874.c
index 8dfaa10710fa..4fcfbf8ca72c 100644
--- a/fs/nls/nls_cp874.c
+++ b/fs/nls/nls_cp874.c
@@ -271,5 +271,6 @@ static void __exit exit_nls_cp874(void)
module_init(init_nls_cp874)
module_exit(exit_nls_cp874)
+MODULE_DESCRIPTION("NLS Thai charset (CP874, TIS-620)");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_ALIAS_NLS(tis-620);
diff --git a/fs/nls/nls_cp932.c b/fs/nls/nls_cp932.c
index 67b7398e8483..e5e6270fcca6 100644
--- a/fs/nls/nls_cp932.c
+++ b/fs/nls/nls_cp932.c
@@ -7929,5 +7929,6 @@ static void __exit exit_nls_cp932(void)
module_init(init_nls_cp932)
module_exit(exit_nls_cp932)
+MODULE_DESCRIPTION("NLS Japanese charset (Shift-JIS)");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_ALIAS_NLS(sjis);
diff --git a/fs/nls/nls_cp936.c b/fs/nls/nls_cp936.c
index c96546cfec9f..91d0a15fd7f9 100644
--- a/fs/nls/nls_cp936.c
+++ b/fs/nls/nls_cp936.c
@@ -11107,5 +11107,6 @@ static void __exit exit_nls_cp936(void)
module_init(init_nls_cp936)
module_exit(exit_nls_cp936)
+MODULE_DESCRIPTION("NLS Simplified Chinese charset (CP936, GB2312)");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_ALIAS_NLS(gb2312);
diff --git a/fs/nls/nls_cp949.c b/fs/nls/nls_cp949.c
index 199171e97aa4..3ae03c76d59c 100644
--- a/fs/nls/nls_cp949.c
+++ b/fs/nls/nls_cp949.c
@@ -13942,5 +13942,6 @@ static void __exit exit_nls_cp949(void)
module_init(init_nls_cp949)
module_exit(exit_nls_cp949)
+MODULE_DESCRIPTION("NLS Korean charset (CP949, EUC-KR)");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_ALIAS_NLS(euc-kr);
diff --git a/fs/nls/nls_cp950.c b/fs/nls/nls_cp950.c
index 8e1418708209..e968aa80198d 100644
--- a/fs/nls/nls_cp950.c
+++ b/fs/nls/nls_cp950.c
@@ -9478,5 +9478,6 @@ static void __exit exit_nls_cp950(void)
module_init(init_nls_cp950)
module_exit(exit_nls_cp950)
+MODULE_DESCRIPTION("NLS Traditional Chinese charset (Big5)");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_ALIAS_NLS(big5);
diff --git a/fs/nls/nls_euc-jp.c b/fs/nls/nls_euc-jp.c
index 162b3f160353..0191cc9d955e 100644
--- a/fs/nls/nls_euc-jp.c
+++ b/fs/nls/nls_euc-jp.c
@@ -577,4 +577,5 @@ static void __exit exit_nls_euc_jp(void)
module_init(init_nls_euc_jp)
module_exit(exit_nls_euc_jp)
+MODULE_DESCRIPTION("NLS Japanese charset (EUC-JP)");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/nls_iso8859-1.c b/fs/nls/nls_iso8859-1.c
index 69ac020d43b1..a181be488f7d 100644
--- a/fs/nls/nls_iso8859-1.c
+++ b/fs/nls/nls_iso8859-1.c
@@ -254,4 +254,5 @@ static void __exit exit_nls_iso8859_1(void)
module_init(init_nls_iso8859_1)
module_exit(exit_nls_iso8859_1)
+MODULE_DESCRIPTION("NLS ISO 8859-1 (Latin 1; Western European Languages)");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/nls_iso8859-13.c b/fs/nls/nls_iso8859-13.c
index afb3f8f275f0..8e2be5bfeaf1 100644
--- a/fs/nls/nls_iso8859-13.c
+++ b/fs/nls/nls_iso8859-13.c
@@ -282,4 +282,5 @@ static void __exit exit_nls_iso8859_13(void)
module_init(init_nls_iso8859_13)
module_exit(exit_nls_iso8859_13)
+MODULE_DESCRIPTION("NLS ISO 8859-13 (Latin 7; Baltic)");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/nls_iso8859-14.c b/fs/nls/nls_iso8859-14.c
index 046370f0b6f0..c789eccb8a69 100644
--- a/fs/nls/nls_iso8859-14.c
+++ b/fs/nls/nls_iso8859-14.c
@@ -338,4 +338,5 @@ static void __exit exit_nls_iso8859_14(void)
module_init(init_nls_iso8859_14)
module_exit(exit_nls_iso8859_14)
+MODULE_DESCRIPTION("NLS ISO 8859-14 (Latin 8; Celtic)");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/nls_iso8859-15.c b/fs/nls/nls_iso8859-15.c
index 7e34a841a056..ffec649176fb 100644
--- a/fs/nls/nls_iso8859-15.c
+++ b/fs/nls/nls_iso8859-15.c
@@ -304,4 +304,5 @@ static void __exit exit_nls_iso8859_15(void)
module_init(init_nls_iso8859_15)
module_exit(exit_nls_iso8859_15)
+MODULE_DESCRIPTION("NLS ISO 8859-15 (Latin 9; Western European Languages with Euro)");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/nls_iso8859-2.c b/fs/nls/nls_iso8859-2.c
index 7dd571181741..d352334d0314 100644
--- a/fs/nls/nls_iso8859-2.c
+++ b/fs/nls/nls_iso8859-2.c
@@ -305,4 +305,5 @@ static void __exit exit_nls_iso8859_2(void)
module_init(init_nls_iso8859_2)
module_exit(exit_nls_iso8859_2)
+MODULE_DESCRIPTION("NLS ISO 8859-2 (Latin 2; Slavic/Central European Languages)");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/nls_iso8859-3.c b/fs/nls/nls_iso8859-3.c
index 740b75ec4493..09990e6634d2 100644
--- a/fs/nls/nls_iso8859-3.c
+++ b/fs/nls/nls_iso8859-3.c
@@ -305,4 +305,5 @@ static void __exit exit_nls_iso8859_3(void)
module_init(init_nls_iso8859_3)
module_exit(exit_nls_iso8859_3)
+MODULE_DESCRIPTION("NLS ISO 8859-3 (Latin 3; Esperanto, Galician, Maltese, Turkish)");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/nls_iso8859-4.c b/fs/nls/nls_iso8859-4.c
index 8826021e32f5..92795224912e 100644
--- a/fs/nls/nls_iso8859-4.c
+++ b/fs/nls/nls_iso8859-4.c
@@ -305,4 +305,5 @@ static void __exit exit_nls_iso8859_4(void)
module_init(init_nls_iso8859_4)
module_exit(exit_nls_iso8859_4)
+MODULE_DESCRIPTION("NLS ISO 8859-4 (Latin 4; old Baltic charset)");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/nls_iso8859-5.c b/fs/nls/nls_iso8859-5.c
index 7c04057a1ad8..32309315307a 100644
--- a/fs/nls/nls_iso8859-5.c
+++ b/fs/nls/nls_iso8859-5.c
@@ -269,4 +269,5 @@ static void __exit exit_nls_iso8859_5(void)
module_init(init_nls_iso8859_5)
module_exit(exit_nls_iso8859_5)
+MODULE_DESCRIPTION("NLS ISO 8859-5 (Cyrillic)");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/nls_iso8859-6.c b/fs/nls/nls_iso8859-6.c
index d4a881400d74..c18183469d2a 100644
--- a/fs/nls/nls_iso8859-6.c
+++ b/fs/nls/nls_iso8859-6.c
@@ -260,4 +260,5 @@ static void __exit exit_nls_iso8859_6(void)
module_init(init_nls_iso8859_6)
module_exit(exit_nls_iso8859_6)
+MODULE_DESCRIPTION("NLS ISO 8859-6 (Arabic)");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/nls_iso8859-7.c b/fs/nls/nls_iso8859-7.c
index 37b75d825a75..3652d6832864 100644
--- a/fs/nls/nls_iso8859-7.c
+++ b/fs/nls/nls_iso8859-7.c
@@ -314,4 +314,5 @@ static void __exit exit_nls_iso8859_7(void)
module_init(init_nls_iso8859_7)
module_exit(exit_nls_iso8859_7)
+MODULE_DESCRIPTION("NLS ISO 8859-7 (Modern Greek)");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/nls_iso8859-9.c b/fs/nls/nls_iso8859-9.c
index 557b98250d37..11a67834b855 100644
--- a/fs/nls/nls_iso8859-9.c
+++ b/fs/nls/nls_iso8859-9.c
@@ -269,4 +269,5 @@ static void __exit exit_nls_iso8859_9(void)
module_init(init_nls_iso8859_9)
module_exit(exit_nls_iso8859_9)
+MODULE_DESCRIPTION("NLS ISO 8859-9 (Latin 5; Turkish)");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/nls_koi8-r.c b/fs/nls/nls_koi8-r.c
index 811f232fccfb..e3dca27a3803 100644
--- a/fs/nls/nls_koi8-r.c
+++ b/fs/nls/nls_koi8-r.c
@@ -320,4 +320,5 @@ static void __exit exit_nls_koi8_r(void)
module_init(init_nls_koi8_r)
module_exit(exit_nls_koi8_r)
+MODULE_DESCRIPTION("NLS KOI8-R (Russian)");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/nls_koi8-ru.c b/fs/nls/nls_koi8-ru.c
index a80a741a8676..07afcd9e58c0 100644
--- a/fs/nls/nls_koi8-ru.c
+++ b/fs/nls/nls_koi8-ru.c
@@ -79,4 +79,5 @@ static void __exit exit_nls_koi8_ru(void)
module_init(init_nls_koi8_ru)
module_exit(exit_nls_koi8_ru)
+MODULE_DESCRIPTION("NLS KOI8-RU (Belarusian)");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/nls_koi8-u.c b/fs/nls/nls_koi8-u.c
index 7e029e4c188a..f60645758c1a 100644
--- a/fs/nls/nls_koi8-u.c
+++ b/fs/nls/nls_koi8-u.c
@@ -327,4 +327,5 @@ static void __exit exit_nls_koi8_u(void)
module_init(init_nls_koi8_u)
module_exit(exit_nls_koi8_u)
+MODULE_DESCRIPTION("NLS KOI8-U (Ukrainian)");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/nls_ucs2_utils.c b/fs/nls/nls_ucs2_utils.c
index a69781c54dd8..d4564b79d7bf 100644
--- a/fs/nls/nls_ucs2_utils.c
+++ b/fs/nls/nls_ucs2_utils.c
@@ -16,6 +16,7 @@
#include <asm/unaligned.h>
#include "nls_ucs2_utils.h"
+MODULE_DESCRIPTION("NLS UCS-2");
MODULE_LICENSE("GPL");
/*
diff --git a/fs/nls/nls_utf8.c b/fs/nls/nls_utf8.c
index afcfbc4a14db..a0fa0610eaac 100644
--- a/fs/nls/nls_utf8.c
+++ b/fs/nls/nls_utf8.c
@@ -64,4 +64,5 @@ static void __exit exit_nls_utf8(void)
module_init(init_nls_utf8)
module_exit(exit_nls_utf8)
+MODULE_DESCRIPTION("NLS UTF-8");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c
index ff69ae24c4e8..272c8a1dab3c 100644
--- a/fs/notify/fsnotify.c
+++ b/fs/notify/fsnotify.c
@@ -117,17 +117,13 @@ void fsnotify_sb_free(struct super_block *sb)
* parent cares. Thus when an event happens on a child it can quickly tell
* if there is a need to find a parent and send the event to the parent.
*/
-void __fsnotify_update_child_dentry_flags(struct inode *inode)
+void fsnotify_set_children_dentry_flags(struct inode *inode)
{
struct dentry *alias;
- int watched;
if (!S_ISDIR(inode->i_mode))
return;
- /* determine if the children should tell inode about their events */
- watched = fsnotify_inode_watches_children(inode);
-
spin_lock(&inode->i_lock);
/* run all of the dentries associated with this inode. Since this is a
* directory, there damn well better only be one item on this list */
@@ -143,10 +139,7 @@ void __fsnotify_update_child_dentry_flags(struct inode *inode)
continue;
spin_lock_nested(&child->d_lock, DENTRY_D_LOCK_NESTED);
- if (watched)
- child->d_flags |= DCACHE_FSNOTIFY_PARENT_WATCHED;
- else
- child->d_flags &= ~DCACHE_FSNOTIFY_PARENT_WATCHED;
+ child->d_flags |= DCACHE_FSNOTIFY_PARENT_WATCHED;
spin_unlock(&child->d_lock);
}
spin_unlock(&alias->d_lock);
@@ -154,6 +147,24 @@ void __fsnotify_update_child_dentry_flags(struct inode *inode)
spin_unlock(&inode->i_lock);
}
+/*
+ * Lazily clear false positive PARENT_WATCHED flag for child whose parent had
+ * stopped watching children.
+ */
+static void fsnotify_clear_child_dentry_flag(struct inode *pinode,
+ struct dentry *dentry)
+{
+ spin_lock(&dentry->d_lock);
+ /*
+ * d_lock is a sufficient barrier to prevent observing a non-watched
+ * parent state from before the fsnotify_set_children_dentry_flags()
+ * or fsnotify_update_flags() call that had set PARENT_WATCHED.
+ */
+ if (!fsnotify_inode_watches_children(pinode))
+ dentry->d_flags &= ~DCACHE_FSNOTIFY_PARENT_WATCHED;
+ spin_unlock(&dentry->d_lock);
+}
+
/* Are inode/sb/mount interested in parent and name info with this event? */
static bool fsnotify_event_needs_parent(struct inode *inode, __u32 mnt_mask,
__u32 mask)
@@ -228,7 +239,7 @@ int __fsnotify_parent(struct dentry *dentry, __u32 mask, const void *data,
p_inode = parent->d_inode;
p_mask = fsnotify_inode_watches_children(p_inode);
if (unlikely(parent_watched && !p_mask))
- __fsnotify_update_child_dentry_flags(p_inode);
+ fsnotify_clear_child_dentry_flag(p_inode, dentry);
/*
* Include parent/name in notification either if some notification
diff --git a/fs/notify/fsnotify.h b/fs/notify/fsnotify.h
index 2d059f789ee3..663759ed6fbc 100644
--- a/fs/notify/fsnotify.h
+++ b/fs/notify/fsnotify.h
@@ -93,7 +93,7 @@ static inline void fsnotify_clear_marks_by_sb(struct super_block *sb)
* update the dentry->d_flags of all of inode's children to indicate if inode cares
* about events that happen to its children.
*/
-extern void __fsnotify_update_child_dentry_flags(struct inode *inode);
+extern void fsnotify_set_children_dentry_flags(struct inode *inode);
extern struct kmem_cache *fsnotify_mark_connector_cachep;
diff --git a/fs/notify/mark.c b/fs/notify/mark.c
index c3eefa70633c..5e170e713088 100644
--- a/fs/notify/mark.c
+++ b/fs/notify/mark.c
@@ -250,6 +250,24 @@ static void *__fsnotify_recalc_mask(struct fsnotify_mark_connector *conn)
return fsnotify_update_iref(conn, want_iref);
}
+static bool fsnotify_conn_watches_children(
+ struct fsnotify_mark_connector *conn)
+{
+ if (conn->type != FSNOTIFY_OBJ_TYPE_INODE)
+ return false;
+
+ return fsnotify_inode_watches_children(fsnotify_conn_inode(conn));
+}
+
+static void fsnotify_conn_set_children_dentry_flags(
+ struct fsnotify_mark_connector *conn)
+{
+ if (conn->type != FSNOTIFY_OBJ_TYPE_INODE)
+ return;
+
+ fsnotify_set_children_dentry_flags(fsnotify_conn_inode(conn));
+}
+
/*
* Calculate mask of events for a list of marks. The caller must make sure
* connector and connector->obj cannot disappear under us. Callers achieve
@@ -258,15 +276,23 @@ static void *__fsnotify_recalc_mask(struct fsnotify_mark_connector *conn)
*/
void fsnotify_recalc_mask(struct fsnotify_mark_connector *conn)
{
+ bool update_children;
+
if (!conn)
return;
spin_lock(&conn->lock);
+ update_children = !fsnotify_conn_watches_children(conn);
__fsnotify_recalc_mask(conn);
+ update_children &= fsnotify_conn_watches_children(conn);
spin_unlock(&conn->lock);
- if (conn->type == FSNOTIFY_OBJ_TYPE_INODE)
- __fsnotify_update_child_dentry_flags(
- fsnotify_conn_inode(conn));
+ /*
+ * Set children's PARENT_WATCHED flags only if parent started watching.
+ * When parent stops watching, we clear false positive PARENT_WATCHED
+ * flags lazily in __fsnotify_parent().
+ */
+ if (update_children)
+ fsnotify_conn_set_children_dentry_flags(conn);
}
/* Free all connectors queued for freeing once SRCU period ends */
diff --git a/fs/nsfs.c b/fs/nsfs.c
index 07e22a15ef02..a4a925dce331 100644
--- a/fs/nsfs.c
+++ b/fs/nsfs.c
@@ -8,10 +8,12 @@
#include <linux/magic.h>
#include <linux/ktime.h>
#include <linux/seq_file.h>
+#include <linux/pid_namespace.h>
#include <linux/user_namespace.h>
#include <linux/nsfs.h>
#include <linux/uaccess.h>
+#include "mount.h"
#include "internal.h"
static struct vfsmount *nsfs_mnt;
@@ -82,40 +84,47 @@ int ns_get_path(struct path *path, struct task_struct *task,
return ns_get_path_cb(path, ns_get_path_task, &args);
}
-int open_related_ns(struct ns_common *ns,
- struct ns_common *(*get_ns)(struct ns_common *ns))
+/**
+ * open_namespace - open a namespace
+ * @ns: the namespace to open
+ *
+ * This will consume a reference to @ns indendent of success or failure.
+ *
+ * Return: A file descriptor on success or a negative error code on failure.
+ */
+int open_namespace(struct ns_common *ns)
{
- struct path path = {};
- struct ns_common *relative;
+ struct path path __free(path_put) = {};
struct file *f;
int err;
- int fd;
- fd = get_unused_fd_flags(O_CLOEXEC);
+ /* call first to consume reference */
+ err = path_from_stashed(&ns->stashed, nsfs_mnt, ns, &path);
+ if (err < 0)
+ return err;
+
+ CLASS(get_unused_fd, fd)(O_CLOEXEC);
if (fd < 0)
return fd;
- relative = get_ns(ns);
- if (IS_ERR(relative)) {
- put_unused_fd(fd);
- return PTR_ERR(relative);
- }
+ f = dentry_open(&path, O_RDONLY, current_cred());
+ if (IS_ERR(f))
+ return PTR_ERR(f);
- err = path_from_stashed(&relative->stashed, nsfs_mnt, relative, &path);
- if (err < 0) {
- put_unused_fd(fd);
- return err;
- }
+ fd_install(fd, f);
+ return take_fd(fd);
+}
- f = dentry_open(&path, O_RDONLY, current_cred());
- path_put(&path);
- if (IS_ERR(f)) {
- put_unused_fd(fd);
- fd = PTR_ERR(f);
- } else
- fd_install(fd, f);
+int open_related_ns(struct ns_common *ns,
+ struct ns_common *(*get_ns)(struct ns_common *ns))
+{
+ struct ns_common *relative;
+
+ relative = get_ns(ns);
+ if (IS_ERR(relative))
+ return PTR_ERR(relative);
- return fd;
+ return open_namespace(relative);
}
EXPORT_SYMBOL_GPL(open_related_ns);
@@ -123,9 +132,12 @@ static long ns_ioctl(struct file *filp, unsigned int ioctl,
unsigned long arg)
{
struct user_namespace *user_ns;
+ struct pid_namespace *pid_ns;
+ struct task_struct *tsk;
struct ns_common *ns = get_proc_ns(file_inode(filp));
uid_t __user *argp;
uid_t uid;
+ int ret;
switch (ioctl) {
case NS_GET_USERNS:
@@ -143,9 +155,69 @@ static long ns_ioctl(struct file *filp, unsigned int ioctl,
argp = (uid_t __user *) arg;
uid = from_kuid_munged(current_user_ns(), user_ns->owner);
return put_user(uid, argp);
+ case NS_GET_MNTNS_ID: {
+ struct mnt_namespace *mnt_ns;
+ __u64 __user *idp;
+ __u64 id;
+
+ if (ns->ops->type != CLONE_NEWNS)
+ return -EINVAL;
+
+ mnt_ns = container_of(ns, struct mnt_namespace, ns);
+ idp = (__u64 __user *)arg;
+ id = mnt_ns->seq;
+ return put_user(id, idp);
+ }
+ case NS_GET_PID_FROM_PIDNS:
+ fallthrough;
+ case NS_GET_TGID_FROM_PIDNS:
+ fallthrough;
+ case NS_GET_PID_IN_PIDNS:
+ fallthrough;
+ case NS_GET_TGID_IN_PIDNS:
+ if (ns->ops->type != CLONE_NEWPID)
+ return -EINVAL;
+
+ ret = -ESRCH;
+ pid_ns = container_of(ns, struct pid_namespace, ns);
+
+ rcu_read_lock();
+
+ if (ioctl == NS_GET_PID_IN_PIDNS ||
+ ioctl == NS_GET_TGID_IN_PIDNS)
+ tsk = find_task_by_vpid(arg);
+ else
+ tsk = find_task_by_pid_ns(arg, pid_ns);
+ if (!tsk)
+ break;
+
+ switch (ioctl) {
+ case NS_GET_PID_FROM_PIDNS:
+ ret = task_pid_vnr(tsk);
+ break;
+ case NS_GET_TGID_FROM_PIDNS:
+ ret = task_tgid_vnr(tsk);
+ break;
+ case NS_GET_PID_IN_PIDNS:
+ ret = task_pid_nr_ns(tsk, pid_ns);
+ break;
+ case NS_GET_TGID_IN_PIDNS:
+ ret = task_tgid_nr_ns(tsk, pid_ns);
+ break;
+ default:
+ ret = 0;
+ break;
+ }
+ rcu_read_unlock();
+
+ if (!ret)
+ ret = -ESRCH;
+ break;
default:
- return -ENOTTY;
+ ret = -ENOTTY;
}
+
+ return ret;
}
int ns_get_name(char *buf, size_t size, struct task_struct *task,
diff --git a/fs/ntfs3/super.c b/fs/ntfs3/super.c
index 27fbde2701b6..c5b688c5f984 100644
--- a/fs/ntfs3/super.c
+++ b/fs/ntfs3/super.c
@@ -259,8 +259,8 @@ enum Opt {
// clang-format off
static const struct fs_parameter_spec ntfs_fs_parameters[] = {
- fsparam_u32("uid", Opt_uid),
- fsparam_u32("gid", Opt_gid),
+ fsparam_uid("uid", Opt_uid),
+ fsparam_gid("gid", Opt_gid),
fsparam_u32oct("umask", Opt_umask),
fsparam_u32oct("dmask", Opt_dmask),
fsparam_u32oct("fmask", Opt_fmask),
@@ -319,14 +319,10 @@ static int ntfs_fs_parse_param(struct fs_context *fc,
switch (opt) {
case Opt_uid:
- opts->fs_uid = make_kuid(current_user_ns(), result.uint_32);
- if (!uid_valid(opts->fs_uid))
- return invalf(fc, "ntfs3: Invalid value for uid.");
+ opts->fs_uid = result.uid;
break;
case Opt_gid:
- opts->fs_gid = make_kgid(current_user_ns(), result.uint_32);
- if (!gid_valid(opts->fs_gid))
- return invalf(fc, "ntfs3: Invalid value for gid.");
+ opts->fs_gid = result.gid;
break;
case Opt_umask:
if (result.uint_32 & ~07777)
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index f0467d3b3c88..6be175a1ab3c 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -2366,6 +2366,11 @@ static int ocfs2_dio_end_io_write(struct inode *inode,
}
list_for_each_entry(ue, &dwc->dw_zero_list, ue_node) {
+ ret = ocfs2_assure_trans_credits(handle, credits);
+ if (ret < 0) {
+ mlog_errno(ret);
+ break;
+ }
ret = ocfs2_mark_extent_written(inode, &et, handle,
ue->ue_cpos, 1,
ue->ue_phys,
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
index 604fea3a26ff..530fba34f6d3 100644
--- a/fs/ocfs2/journal.c
+++ b/fs/ocfs2/journal.c
@@ -446,6 +446,23 @@ bail:
}
/*
+ * Make sure handle has at least 'nblocks' credits available. If it does not
+ * have that many credits available, we will try to extend the handle to have
+ * enough credits. If that fails, we will restart transaction to have enough
+ * credits. Similar notes regarding data consistency and locking implications
+ * as for ocfs2_extend_trans() apply here.
+ */
+int ocfs2_assure_trans_credits(handle_t *handle, int nblocks)
+{
+ int old_nblks = jbd2_handle_buffer_credits(handle);
+
+ trace_ocfs2_assure_trans_credits(old_nblks);
+ if (old_nblks >= nblocks)
+ return 0;
+ return ocfs2_extend_trans(handle, nblocks - old_nblks);
+}
+
+/*
* If we have fewer than thresh credits, extend by OCFS2_MAX_TRANS_DATA.
* If that fails, restart the transaction & regain write access for the
* buffer head which is used for metadata modifications.
@@ -479,12 +496,6 @@ bail:
return status;
}
-
-struct ocfs2_triggers {
- struct jbd2_buffer_trigger_type ot_triggers;
- int ot_offset;
-};
-
static inline struct ocfs2_triggers *to_ocfs2_trigger(struct jbd2_buffer_trigger_type *triggers)
{
return container_of(triggers, struct ocfs2_triggers, ot_triggers);
@@ -548,85 +559,76 @@ static void ocfs2_db_frozen_trigger(struct jbd2_buffer_trigger_type *triggers,
static void ocfs2_abort_trigger(struct jbd2_buffer_trigger_type *triggers,
struct buffer_head *bh)
{
+ struct ocfs2_triggers *ot = to_ocfs2_trigger(triggers);
+
mlog(ML_ERROR,
"ocfs2_abort_trigger called by JBD2. bh = 0x%lx, "
"bh->b_blocknr = %llu\n",
(unsigned long)bh,
(unsigned long long)bh->b_blocknr);
- ocfs2_error(bh->b_assoc_map->host->i_sb,
+ ocfs2_error(ot->sb,
"JBD2 has aborted our journal, ocfs2 cannot continue\n");
}
-static struct ocfs2_triggers di_triggers = {
- .ot_triggers = {
- .t_frozen = ocfs2_frozen_trigger,
- .t_abort = ocfs2_abort_trigger,
- },
- .ot_offset = offsetof(struct ocfs2_dinode, i_check),
-};
-
-static struct ocfs2_triggers eb_triggers = {
- .ot_triggers = {
- .t_frozen = ocfs2_frozen_trigger,
- .t_abort = ocfs2_abort_trigger,
- },
- .ot_offset = offsetof(struct ocfs2_extent_block, h_check),
-};
-
-static struct ocfs2_triggers rb_triggers = {
- .ot_triggers = {
- .t_frozen = ocfs2_frozen_trigger,
- .t_abort = ocfs2_abort_trigger,
- },
- .ot_offset = offsetof(struct ocfs2_refcount_block, rf_check),
-};
-
-static struct ocfs2_triggers gd_triggers = {
- .ot_triggers = {
- .t_frozen = ocfs2_frozen_trigger,
- .t_abort = ocfs2_abort_trigger,
- },
- .ot_offset = offsetof(struct ocfs2_group_desc, bg_check),
-};
-
-static struct ocfs2_triggers db_triggers = {
- .ot_triggers = {
- .t_frozen = ocfs2_db_frozen_trigger,
- .t_abort = ocfs2_abort_trigger,
- },
-};
+static void ocfs2_setup_csum_triggers(struct super_block *sb,
+ enum ocfs2_journal_trigger_type type,
+ struct ocfs2_triggers *ot)
+{
+ BUG_ON(type >= OCFS2_JOURNAL_TRIGGER_COUNT);
-static struct ocfs2_triggers xb_triggers = {
- .ot_triggers = {
- .t_frozen = ocfs2_frozen_trigger,
- .t_abort = ocfs2_abort_trigger,
- },
- .ot_offset = offsetof(struct ocfs2_xattr_block, xb_check),
-};
+ switch (type) {
+ case OCFS2_JTR_DI:
+ ot->ot_triggers.t_frozen = ocfs2_frozen_trigger;
+ ot->ot_offset = offsetof(struct ocfs2_dinode, i_check);
+ break;
+ case OCFS2_JTR_EB:
+ ot->ot_triggers.t_frozen = ocfs2_frozen_trigger;
+ ot->ot_offset = offsetof(struct ocfs2_extent_block, h_check);
+ break;
+ case OCFS2_JTR_RB:
+ ot->ot_triggers.t_frozen = ocfs2_frozen_trigger;
+ ot->ot_offset = offsetof(struct ocfs2_refcount_block, rf_check);
+ break;
+ case OCFS2_JTR_GD:
+ ot->ot_triggers.t_frozen = ocfs2_frozen_trigger;
+ ot->ot_offset = offsetof(struct ocfs2_group_desc, bg_check);
+ break;
+ case OCFS2_JTR_DB:
+ ot->ot_triggers.t_frozen = ocfs2_db_frozen_trigger;
+ break;
+ case OCFS2_JTR_XB:
+ ot->ot_triggers.t_frozen = ocfs2_frozen_trigger;
+ ot->ot_offset = offsetof(struct ocfs2_xattr_block, xb_check);
+ break;
+ case OCFS2_JTR_DQ:
+ ot->ot_triggers.t_frozen = ocfs2_dq_frozen_trigger;
+ break;
+ case OCFS2_JTR_DR:
+ ot->ot_triggers.t_frozen = ocfs2_frozen_trigger;
+ ot->ot_offset = offsetof(struct ocfs2_dx_root_block, dr_check);
+ break;
+ case OCFS2_JTR_DL:
+ ot->ot_triggers.t_frozen = ocfs2_frozen_trigger;
+ ot->ot_offset = offsetof(struct ocfs2_dx_leaf, dl_check);
+ break;
+ case OCFS2_JTR_NONE:
+ /* To make compiler happy... */
+ return;
+ }
-static struct ocfs2_triggers dq_triggers = {
- .ot_triggers = {
- .t_frozen = ocfs2_dq_frozen_trigger,
- .t_abort = ocfs2_abort_trigger,
- },
-};
+ ot->ot_triggers.t_abort = ocfs2_abort_trigger;
+ ot->sb = sb;
+}
-static struct ocfs2_triggers dr_triggers = {
- .ot_triggers = {
- .t_frozen = ocfs2_frozen_trigger,
- .t_abort = ocfs2_abort_trigger,
- },
- .ot_offset = offsetof(struct ocfs2_dx_root_block, dr_check),
-};
+void ocfs2_initialize_journal_triggers(struct super_block *sb,
+ struct ocfs2_triggers triggers[])
+{
+ enum ocfs2_journal_trigger_type type;
-static struct ocfs2_triggers dl_triggers = {
- .ot_triggers = {
- .t_frozen = ocfs2_frozen_trigger,
- .t_abort = ocfs2_abort_trigger,
- },
- .ot_offset = offsetof(struct ocfs2_dx_leaf, dl_check),
-};
+ for (type = OCFS2_JTR_DI; type < OCFS2_JOURNAL_TRIGGER_COUNT; type++)
+ ocfs2_setup_csum_triggers(sb, type, &triggers[type]);
+}
static int __ocfs2_journal_access(handle_t *handle,
struct ocfs2_caching_info *ci,
@@ -708,56 +710,91 @@ static int __ocfs2_journal_access(handle_t *handle,
int ocfs2_journal_access_di(handle_t *handle, struct ocfs2_caching_info *ci,
struct buffer_head *bh, int type)
{
- return __ocfs2_journal_access(handle, ci, bh, &di_triggers, type);
+ struct ocfs2_super *osb = OCFS2_SB(ocfs2_metadata_cache_get_super(ci));
+
+ return __ocfs2_journal_access(handle, ci, bh,
+ &osb->s_journal_triggers[OCFS2_JTR_DI],
+ type);
}
int ocfs2_journal_access_eb(handle_t *handle, struct ocfs2_caching_info *ci,
struct buffer_head *bh, int type)
{
- return __ocfs2_journal_access(handle, ci, bh, &eb_triggers, type);
+ struct ocfs2_super *osb = OCFS2_SB(ocfs2_metadata_cache_get_super(ci));
+
+ return __ocfs2_journal_access(handle, ci, bh,
+ &osb->s_journal_triggers[OCFS2_JTR_EB],
+ type);
}
int ocfs2_journal_access_rb(handle_t *handle, struct ocfs2_caching_info *ci,
struct buffer_head *bh, int type)
{
- return __ocfs2_journal_access(handle, ci, bh, &rb_triggers,
+ struct ocfs2_super *osb = OCFS2_SB(ocfs2_metadata_cache_get_super(ci));
+
+ return __ocfs2_journal_access(handle, ci, bh,
+ &osb->s_journal_triggers[OCFS2_JTR_RB],
type);
}
int ocfs2_journal_access_gd(handle_t *handle, struct ocfs2_caching_info *ci,
struct buffer_head *bh, int type)
{
- return __ocfs2_journal_access(handle, ci, bh, &gd_triggers, type);
+ struct ocfs2_super *osb = OCFS2_SB(ocfs2_metadata_cache_get_super(ci));
+
+ return __ocfs2_journal_access(handle, ci, bh,
+ &osb->s_journal_triggers[OCFS2_JTR_GD],
+ type);
}
int ocfs2_journal_access_db(handle_t *handle, struct ocfs2_caching_info *ci,
struct buffer_head *bh, int type)
{
- return __ocfs2_journal_access(handle, ci, bh, &db_triggers, type);
+ struct ocfs2_super *osb = OCFS2_SB(ocfs2_metadata_cache_get_super(ci));
+
+ return __ocfs2_journal_access(handle, ci, bh,
+ &osb->s_journal_triggers[OCFS2_JTR_DB],
+ type);
}
int ocfs2_journal_access_xb(handle_t *handle, struct ocfs2_caching_info *ci,
struct buffer_head *bh, int type)
{
- return __ocfs2_journal_access(handle, ci, bh, &xb_triggers, type);
+ struct ocfs2_super *osb = OCFS2_SB(ocfs2_metadata_cache_get_super(ci));
+
+ return __ocfs2_journal_access(handle, ci, bh,
+ &osb->s_journal_triggers[OCFS2_JTR_XB],
+ type);
}
int ocfs2_journal_access_dq(handle_t *handle, struct ocfs2_caching_info *ci,
struct buffer_head *bh, int type)
{
- return __ocfs2_journal_access(handle, ci, bh, &dq_triggers, type);
+ struct ocfs2_super *osb = OCFS2_SB(ocfs2_metadata_cache_get_super(ci));
+
+ return __ocfs2_journal_access(handle, ci, bh,
+ &osb->s_journal_triggers[OCFS2_JTR_DQ],
+ type);
}
int ocfs2_journal_access_dr(handle_t *handle, struct ocfs2_caching_info *ci,
struct buffer_head *bh, int type)
{
- return __ocfs2_journal_access(handle, ci, bh, &dr_triggers, type);
+ struct ocfs2_super *osb = OCFS2_SB(ocfs2_metadata_cache_get_super(ci));
+
+ return __ocfs2_journal_access(handle, ci, bh,
+ &osb->s_journal_triggers[OCFS2_JTR_DR],
+ type);
}
int ocfs2_journal_access_dl(handle_t *handle, struct ocfs2_caching_info *ci,
struct buffer_head *bh, int type)
{
- return __ocfs2_journal_access(handle, ci, bh, &dl_triggers, type);
+ struct ocfs2_super *osb = OCFS2_SB(ocfs2_metadata_cache_get_super(ci));
+
+ return __ocfs2_journal_access(handle, ci, bh,
+ &osb->s_journal_triggers[OCFS2_JTR_DL],
+ type);
}
int ocfs2_journal_access(handle_t *handle, struct ocfs2_caching_info *ci,
@@ -778,13 +815,15 @@ void ocfs2_journal_dirty(handle_t *handle, struct buffer_head *bh)
if (!is_handle_aborted(handle)) {
journal_t *journal = handle->h_transaction->t_journal;
- mlog(ML_ERROR, "jbd2_journal_dirty_metadata failed. "
- "Aborting transaction and journal.\n");
+ mlog(ML_ERROR, "jbd2_journal_dirty_metadata failed: "
+ "handle type %u started at line %u, credits %u/%u "
+ "errcode %d. Aborting transaction and journal.\n",
+ handle->h_type, handle->h_line_no,
+ handle->h_requested_credits,
+ jbd2_handle_buffer_credits(handle), status);
handle->h_err = status;
jbd2_journal_abort_handle(handle);
jbd2_journal_abort(journal, status);
- ocfs2_abort(bh->b_assoc_map->host->i_sb,
- "Journal already aborted.\n");
}
}
}
diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h
index 41c9fe7e62f9..e3c3a35dc5e0 100644
--- a/fs/ocfs2/journal.h
+++ b/fs/ocfs2/journal.h
@@ -243,6 +243,8 @@ handle_t *ocfs2_start_trans(struct ocfs2_super *osb,
int ocfs2_commit_trans(struct ocfs2_super *osb,
handle_t *handle);
int ocfs2_extend_trans(handle_t *handle, int nblocks);
+int ocfs2_assure_trans_credits(handle_t *handle,
+ int nblocks);
int ocfs2_allocate_extend_trans(handle_t *handle,
int thresh);
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index a503c553bab2..8fe826143d7b 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -284,6 +284,30 @@ enum ocfs2_mount_options
#define OCFS2_OSB_ERROR_FS 0x0004
#define OCFS2_DEFAULT_ATIME_QUANTUM 60
+struct ocfs2_triggers {
+ struct jbd2_buffer_trigger_type ot_triggers;
+ int ot_offset;
+ struct super_block *sb;
+};
+
+enum ocfs2_journal_trigger_type {
+ OCFS2_JTR_DI,
+ OCFS2_JTR_EB,
+ OCFS2_JTR_RB,
+ OCFS2_JTR_GD,
+ OCFS2_JTR_DB,
+ OCFS2_JTR_XB,
+ OCFS2_JTR_DQ,
+ OCFS2_JTR_DR,
+ OCFS2_JTR_DL,
+ OCFS2_JTR_NONE /* This must be the last entry */
+};
+
+#define OCFS2_JOURNAL_TRIGGER_COUNT OCFS2_JTR_NONE
+
+void ocfs2_initialize_journal_triggers(struct super_block *sb,
+ struct ocfs2_triggers triggers[]);
+
struct ocfs2_journal;
struct ocfs2_slot_info;
struct ocfs2_recovery_map;
@@ -351,6 +375,9 @@ struct ocfs2_super
struct ocfs2_journal *journal;
unsigned long osb_commit_interval;
+ /* Journal triggers for checksum */
+ struct ocfs2_triggers s_journal_triggers[OCFS2_JOURNAL_TRIGGER_COUNT];
+
struct delayed_work la_enable_wq;
/*
diff --git a/fs/ocfs2/ocfs2_trace.h b/fs/ocfs2/ocfs2_trace.h
index 60e208b01c8d..0511c69c9fde 100644
--- a/fs/ocfs2/ocfs2_trace.h
+++ b/fs/ocfs2/ocfs2_trace.h
@@ -2577,6 +2577,8 @@ DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_commit_cache_end);
DEFINE_OCFS2_INT_INT_EVENT(ocfs2_extend_trans);
+DEFINE_OCFS2_INT_EVENT(ocfs2_assure_trans_credits);
+
DEFINE_OCFS2_INT_EVENT(ocfs2_extend_trans_restart);
DEFINE_OCFS2_INT_INT_EVENT(ocfs2_allocate_extend_trans);
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 8aabaed2c1cb..afee70125ae3 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -1075,9 +1075,11 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
debugfs_create_file("fs_state", S_IFREG|S_IRUSR, osb->osb_debug_root,
osb, &ocfs2_osb_debug_fops);
- if (ocfs2_meta_ecc(osb))
+ if (ocfs2_meta_ecc(osb)) {
+ ocfs2_initialize_journal_triggers(sb, osb->s_journal_triggers);
ocfs2_blockcheck_stats_debugfs_install( &osb->osb_ecc_stats,
osb->osb_debug_root);
+ }
status = ocfs2_mount_volume(sb);
if (status < 0)
diff --git a/fs/open.c b/fs/open.c
index 89cafb572061..22adbef7ecc2 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -202,13 +202,13 @@ long do_sys_ftruncate(unsigned int fd, loff_t length, int small)
return error;
}
-SYSCALL_DEFINE2(ftruncate, unsigned int, fd, unsigned long, length)
+SYSCALL_DEFINE2(ftruncate, unsigned int, fd, off_t, length)
{
return do_sys_ftruncate(fd, length, 1);
}
#ifdef CONFIG_COMPAT
-COMPAT_SYSCALL_DEFINE2(ftruncate, unsigned int, fd, compat_ulong_t, length)
+COMPAT_SYSCALL_DEFINE2(ftruncate, unsigned int, fd, compat_off_t, length)
{
return do_sys_ftruncate(fd, length, 1);
}
@@ -247,6 +247,7 @@ int vfs_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
{
struct inode *inode = file_inode(file);
long ret;
+ loff_t sum;
if (offset < 0 || len <= 0)
return -EINVAL;
@@ -319,8 +320,11 @@ int vfs_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))
return -ENODEV;
- /* Check for wrap through zero too */
- if (((offset + len) > inode->i_sb->s_maxbytes) || ((offset + len) < 0))
+ /* Check for wraparound */
+ if (check_add_overflow(offset, len, &sum))
+ return -EFBIG;
+
+ if (sum > inode->i_sb->s_maxbytes)
return -EFBIG;
if (!file->f_op->fallocate)
@@ -982,12 +986,11 @@ static int do_dentry_open(struct file *f,
*/
if (f->f_mode & FMODE_WRITE) {
/*
- * Paired with smp_mb() in collapse_file() to ensure nr_thps
- * is up to date and the update to i_writecount by
- * get_write_access() is visible. Ensures subsequent insertion
- * of THPs into the page cache will fail.
+ * Depends on full fence from get_write_access() to synchronize
+ * against collapse_file() regarding i_writecount and nr_thps
+ * updates. Ensures subsequent insertion of THPs into the page
+ * cache will fail.
*/
- smp_mb();
if (filemap_nr_thps(inode->i_mapping)) {
struct address_space *mapping = inode->i_mapping;
@@ -1004,11 +1007,6 @@ static int do_dentry_open(struct file *f,
}
}
- /*
- * Once we return a file with FMODE_OPENED, __fput() will call
- * fsnotify_close(), so we need fsnotify_open() here for symmetry.
- */
- fsnotify_open(f);
return 0;
cleanup_all:
@@ -1085,8 +1083,19 @@ EXPORT_SYMBOL(file_path);
*/
int vfs_open(const struct path *path, struct file *file)
{
+ int ret;
+
file->f_path = *path;
- return do_dentry_open(file, NULL);
+ ret = do_dentry_open(file, NULL);
+ if (!ret) {
+ /*
+ * Once we return a file with FMODE_OPENED, __fput() will call
+ * fsnotify_close(), so we need fsnotify_open() here for
+ * symmetry.
+ */
+ fsnotify_open(file);
+ }
+ return ret;
}
struct file *dentry_open(const struct path *path, int flags,
@@ -1177,8 +1186,10 @@ struct file *kernel_file_open(const struct path *path, int flags,
error = do_dentry_open(f, NULL);
if (error) {
fput(f);
- f = ERR_PTR(error);
+ return ERR_PTR(error);
}
+
+ fsnotify_open(f);
return f;
}
EXPORT_SYMBOL_GPL(kernel_file_open);
diff --git a/fs/openpromfs/inode.c b/fs/openpromfs/inode.c
index a7b527ea50d3..26ecda0e4d19 100644
--- a/fs/openpromfs/inode.c
+++ b/fs/openpromfs/inode.c
@@ -471,4 +471,5 @@ static void __exit exit_openprom_fs(void)
module_init(init_openprom_fs)
module_exit(exit_openprom_fs)
+MODULE_DESCRIPTION("OpenPROM filesystem support");
MODULE_LICENSE("GPL");
diff --git a/fs/orangefs/inode.c b/fs/orangefs/inode.c
index 085912268442..fdb9b65db1de 100644
--- a/fs/orangefs/inode.c
+++ b/fs/orangefs/inode.c
@@ -56,7 +56,6 @@ static int orangefs_writepage_locked(struct page *page,
ret = wait_for_direct_io(ORANGEFS_IO_WRITE, inode, &off, &iter, wlen,
len, wr, NULL, NULL);
if (ret < 0) {
- SetPageError(page);
mapping_set_error(page->mapping, ret);
} else {
ret = 0;
@@ -119,7 +118,6 @@ static int orangefs_writepages_work(struct orangefs_writepages *ow,
0, &wr, NULL, NULL);
if (ret < 0) {
for (i = 0; i < ow->npages; i++) {
- SetPageError(ow->pages[i]);
mapping_set_error(ow->pages[i]->mapping, ret);
if (PagePrivate(ow->pages[i])) {
wrp = (struct orangefs_write_range *)
@@ -303,15 +301,10 @@ static int orangefs_read_folio(struct file *file, struct folio *folio)
iov_iter_zero(~0U, &iter);
/* takes care of potential aliasing */
flush_dcache_folio(folio);
- if (ret < 0) {
- folio_set_error(folio);
- } else {
- folio_mark_uptodate(folio);
+ if (ret > 0)
ret = 0;
- }
- /* unlock the folio after the ->read_folio() routine completes */
- folio_unlock(folio);
- return ret;
+ folio_end_read(folio, ret == 0);
+ return ret;
}
static int orangefs_write_begin(struct file *file,
diff --git a/fs/orangefs/orangefs-bufmap.c b/fs/orangefs/orangefs-bufmap.c
index b501dc07f922..edcca4beb765 100644
--- a/fs/orangefs/orangefs-bufmap.c
+++ b/fs/orangefs/orangefs-bufmap.c
@@ -274,10 +274,8 @@ orangefs_bufmap_map(struct orangefs_bufmap *bufmap,
gossip_err("orangefs error: asked for %d pages, only got %d.\n",
bufmap->page_count, ret);
- for (i = 0; i < ret; i++) {
- SetPageError(bufmap->page_array[i]);
+ for (i = 0; i < ret; i++)
unpin_user_page(bufmap->page_array[i]);
- }
return -ENOMEM;
}
diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
index 116f542442dd..ab65e98a1def 100644
--- a/fs/overlayfs/dir.c
+++ b/fs/overlayfs/dir.c
@@ -1314,10 +1314,6 @@ static int ovl_create_tmpfile(struct file *file, struct dentry *dentry,
int flags = file->f_flags | OVL_OPEN_FLAGS;
int err;
- err = ovl_copy_up(dentry->d_parent);
- if (err)
- return err;
-
old_cred = ovl_override_creds(dentry->d_sb);
err = ovl_setup_cred_for_create(dentry, inode, mode, old_cred);
if (err)
@@ -1360,6 +1356,10 @@ static int ovl_tmpfile(struct mnt_idmap *idmap, struct inode *dir,
if (!OVL_FS(dentry->d_sb)->tmpfile)
return -EOPNOTSUPP;
+ err = ovl_copy_up(dentry->d_parent);
+ if (err)
+ return err;
+
err = ovl_want_write(dentry);
if (err)
return err;
diff --git a/fs/overlayfs/export.c b/fs/overlayfs/export.c
index 063409069f56..5868cb222955 100644
--- a/fs/overlayfs/export.c
+++ b/fs/overlayfs/export.c
@@ -181,6 +181,10 @@ static int ovl_check_encode_origin(struct dentry *dentry)
struct ovl_fs *ofs = OVL_FS(dentry->d_sb);
bool decodable = ofs->config.nfs_export;
+ /* No upper layer? */
+ if (!ovl_upper_mnt(ofs))
+ return 1;
+
/* Lower file handle for non-upper non-decodable */
if (!ovl_dentry_upper(dentry) && !decodable)
return 1;
@@ -209,7 +213,7 @@ static int ovl_check_encode_origin(struct dentry *dentry)
* ovl_connect_layer() will try to make origin's layer "connected" by
* copying up a "connectable" ancestor.
*/
- if (d_is_dir(dentry) && ovl_upper_mnt(ofs) && decodable)
+ if (d_is_dir(dentry) && decodable)
return ovl_connect_layer(dentry);
/* Lower file handle for indexed and non-upper dir/non-dir */
diff --git a/fs/pidfs.c b/fs/pidfs.c
index dbb9d854d1c5..c9cb14181def 100644
--- a/fs/pidfs.c
+++ b/fs/pidfs.c
@@ -11,10 +11,16 @@
#include <linux/proc_fs.h>
#include <linux/proc_ns.h>
#include <linux/pseudo_fs.h>
+#include <linux/ptrace.h>
#include <linux/seq_file.h>
#include <uapi/linux/pidfd.h>
+#include <linux/ipc_namespace.h>
+#include <linux/time_namespace.h>
+#include <linux/utsname.h>
+#include <net/net_namespace.h>
#include "internal.h"
+#include "mount.h"
#ifdef CONFIG_PROC_FS
/**
@@ -108,11 +114,95 @@ static __poll_t pidfd_poll(struct file *file, struct poll_table_struct *pts)
return poll_flags;
}
+static long pidfd_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct task_struct *task __free(put_task) = NULL;
+ struct nsproxy *nsp __free(put_nsproxy) = NULL;
+ struct pid *pid = pidfd_pid(file);
+ struct ns_common *ns_common;
+
+ if (arg)
+ return -EINVAL;
+
+ task = get_pid_task(pid, PIDTYPE_PID);
+ if (!task)
+ return -ESRCH;
+
+ scoped_guard(task_lock, task) {
+ nsp = task->nsproxy;
+ if (nsp)
+ get_nsproxy(nsp);
+ }
+ if (!nsp)
+ return -ESRCH; /* just pretend it didn't exist */
+
+ /*
+ * We're trying to open a file descriptor to the namespace so perform a
+ * filesystem cred ptrace check. Also, we mirror nsfs behavior.
+ */
+ if (!ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS))
+ return -EACCES;
+
+ switch (cmd) {
+ /* Namespaces that hang of nsproxy. */
+ case PIDFD_GET_CGROUP_NAMESPACE:
+ get_cgroup_ns(nsp->cgroup_ns);
+ ns_common = to_ns_common(nsp->cgroup_ns);
+ break;
+ case PIDFD_GET_IPC_NAMESPACE:
+ get_ipc_ns(nsp->ipc_ns);
+ ns_common = to_ns_common(nsp->ipc_ns);
+ break;
+ case PIDFD_GET_MNT_NAMESPACE:
+ get_mnt_ns(nsp->mnt_ns);
+ ns_common = to_ns_common(nsp->mnt_ns);
+ break;
+ case PIDFD_GET_NET_NAMESPACE:
+ ns_common = to_ns_common(nsp->net_ns);
+ get_net_ns(ns_common);
+ break;
+ case PIDFD_GET_PID_FOR_CHILDREN_NAMESPACE:
+ get_pid_ns(nsp->pid_ns_for_children);
+ ns_common = to_ns_common(nsp->pid_ns_for_children);
+ break;
+ case PIDFD_GET_TIME_NAMESPACE:
+ get_time_ns(nsp->time_ns);
+ ns_common = to_ns_common(nsp->time_ns);
+ break;
+ case PIDFD_GET_TIME_FOR_CHILDREN_NAMESPACE:
+ get_time_ns(nsp->time_ns_for_children);
+ ns_common = to_ns_common(nsp->time_ns_for_children);
+ break;
+ case PIDFD_GET_UTS_NAMESPACE:
+ get_uts_ns(nsp->uts_ns);
+ ns_common = to_ns_common(nsp->uts_ns);
+ break;
+ /* Namespaces that don't hang of nsproxy. */
+ case PIDFD_GET_USER_NAMESPACE:
+ rcu_read_lock();
+ ns_common = to_ns_common(get_user_ns(task_cred_xxx(task, user_ns)));
+ rcu_read_unlock();
+ break;
+ case PIDFD_GET_PID_NAMESPACE:
+ rcu_read_lock();
+ ns_common = to_ns_common(get_pid_ns(task_active_pid_ns(task)));
+ rcu_read_unlock();
+ break;
+ default:
+ return -ENOIOCTLCMD;
+ }
+
+ /* open_namespace() unconditionally consumes the reference */
+ return open_namespace(ns_common);
+}
+
static const struct file_operations pidfs_file_operations = {
.poll = pidfd_poll,
#ifdef CONFIG_PROC_FS
.show_fdinfo = pidfd_show_fdinfo,
#endif
+ .unlocked_ioctl = pidfd_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
};
struct pid *pidfd_pid(const struct file *file)
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 18550c071d71..72a1acd03675 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -3214,7 +3214,7 @@ static int proc_pid_ksm_stat(struct seq_file *m, struct pid_namespace *ns,
mm = get_task_mm(task);
if (mm) {
seq_printf(m, "ksm_rmap_items %lu\n", mm->ksm_rmap_items);
- seq_printf(m, "ksm_zero_pages %lu\n", mm->ksm_zero_pages);
+ seq_printf(m, "ksm_zero_pages %ld\n", mm_ksm_zero_pages(mm));
seq_printf(m, "ksm_merging_pages %lu\n", mm->ksm_merging_pages);
seq_printf(m, "ksm_process_profit %ld\n", ksm_process_profit(mm));
mmput(mm);
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index 775ce0bcf08c..c02f1e63f82d 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -202,8 +202,8 @@ int proc_alloc_inum(unsigned int *inum)
{
int i;
- i = ida_simple_get(&proc_inum_ida, 0, UINT_MAX - PROC_DYNAMIC_FIRST + 1,
- GFP_KERNEL);
+ i = ida_alloc_max(&proc_inum_ida, UINT_MAX - PROC_DYNAMIC_FIRST,
+ GFP_KERNEL);
if (i < 0)
return i;
@@ -213,7 +213,7 @@ int proc_alloc_inum(unsigned int *inum)
void proc_free_inum(unsigned int inum)
{
- ida_simple_remove(&proc_inum_ida, inum - PROC_DYNAMIC_FIRST);
+ ida_free(&proc_inum_ida, inum - PROC_DYNAMIC_FIRST);
}
static int proc_misc_d_revalidate(struct dentry *dentry, unsigned int flags)
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index b1c2c0b82116..9553e77c9d31 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -21,7 +21,7 @@
#define list_for_each_table_entry(entry, header) \
entry = header->ctl_table; \
- for (size_t i = 0 ; i < header->ctl_table_size && entry->procname; ++i, entry++)
+ for (size_t i = 0 ; i < header->ctl_table_size; ++i, entry++)
static const struct dentry_operations proc_sys_dentry_operations;
static const struct file_operations proc_sys_file_operations;
@@ -476,12 +476,10 @@ static struct inode *proc_sys_make_inode(struct super_block *sb,
make_empty_dir_inode(inode);
}
+ inode->i_uid = GLOBAL_ROOT_UID;
+ inode->i_gid = GLOBAL_ROOT_GID;
if (root->set_ownership)
root->set_ownership(head, &inode->i_uid, &inode->i_gid);
- else {
- inode->i_uid = GLOBAL_ROOT_UID;
- inode->i_gid = GLOBAL_ROOT_GID;
- }
return inode;
}
@@ -951,14 +949,14 @@ static struct ctl_dir *new_dir(struct ctl_table_set *set,
char *new_name;
new = kzalloc(sizeof(*new) + sizeof(struct ctl_node) +
- sizeof(struct ctl_table)*2 + namelen + 1,
+ sizeof(struct ctl_table) + namelen + 1,
GFP_KERNEL);
if (!new)
return NULL;
node = (struct ctl_node *)(new + 1);
table = (struct ctl_table *)(node + 1);
- new_name = (char *)(table + 2);
+ new_name = (char *)(table + 1);
memcpy(new_name, name, namelen);
table[0].procname = new_name;
table[0].mode = S_IFDIR|S_IRUGO|S_IXUGO;
@@ -1093,6 +1091,7 @@ static int sysctl_err(const char *path, struct ctl_table *table, char *fmt, ...)
static int sysctl_check_table_array(const char *path, struct ctl_table *table)
{
+ unsigned int extra;
int err = 0;
if ((table->proc_handler == proc_douintvec) ||
@@ -1104,6 +1103,19 @@ static int sysctl_check_table_array(const char *path, struct ctl_table *table)
if (table->proc_handler == proc_dou8vec_minmax) {
if (table->maxlen != sizeof(u8))
err |= sysctl_err(path, table, "array not allowed");
+
+ if (table->extra1) {
+ extra = *(unsigned int *) table->extra1;
+ if (extra > 255U)
+ err |= sysctl_err(path, table,
+ "range value too large for proc_dou8vec_minmax");
+ }
+ if (table->extra2) {
+ extra = *(unsigned int *) table->extra2;
+ if (extra > 255U)
+ err |= sysctl_err(path, table,
+ "range value too large for proc_dou8vec_minmax");
+ }
}
if (table->proc_handler == proc_dobool) {
@@ -1119,6 +1131,8 @@ static int sysctl_check_table(const char *path, struct ctl_table_header *header)
struct ctl_table *entry;
int err = 0;
list_for_each_table_entry(entry, header) {
+ if (!entry->procname)
+ err |= sysctl_err(path, entry, "procname is null");
if ((entry->proc_handler == proc_dostring) ||
(entry->proc_handler == proc_dobool) ||
(entry->proc_handler == proc_dointvec) ||
@@ -1154,18 +1168,16 @@ static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table_
struct ctl_table_header *links;
struct ctl_node *node;
char *link_name;
- int nr_entries, name_bytes;
+ int name_bytes;
name_bytes = 0;
- nr_entries = 0;
list_for_each_table_entry(entry, head) {
- nr_entries++;
name_bytes += strlen(entry->procname) + 1;
}
links = kzalloc(sizeof(struct ctl_table_header) +
- sizeof(struct ctl_node)*nr_entries +
- sizeof(struct ctl_table)*(nr_entries + 1) +
+ sizeof(struct ctl_node)*head->ctl_table_size +
+ sizeof(struct ctl_table)*head->ctl_table_size +
name_bytes,
GFP_KERNEL);
@@ -1173,8 +1185,8 @@ static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table_
return NULL;
node = (struct ctl_node *)(links + 1);
- link_table = (struct ctl_table *)(node + nr_entries);
- link_name = (char *)&link_table[nr_entries + 1];
+ link_table = (struct ctl_table *)(node + head->ctl_table_size);
+ link_name = (char *)(link_table + head->ctl_table_size);
link = link_table;
list_for_each_table_entry(entry, head) {
@@ -1188,7 +1200,7 @@ static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table_
}
init_header(links, dir->header.root, dir->header.set, node, link_table,
head->ctl_table_size);
- links->nreg = nr_entries;
+ links->nreg = head->ctl_table_size;
return links;
}
@@ -1300,28 +1312,23 @@ static struct ctl_dir *sysctl_mkdir_p(struct ctl_dir *dir, const char *path)
* __register_sysctl_table - register a leaf sysctl table
* @set: Sysctl tree to register on
* @path: The path to the directory the sysctl table is in.
- * @table: the top-level table structure without any child. This table
- * should not be free'd after registration. So it should not be
- * used on stack. It can either be a global or dynamically allocated
- * by the caller and free'd later after sysctl unregistration.
+ *
+ * @table: the top-level table structure. This table should not be free'd
+ * after registration. So it should not be used on stack. It can either
+ * be a global or dynamically allocated by the caller and free'd later
+ * after sysctl unregistration.
* @table_size : The number of elements in table
*
* Register a sysctl table hierarchy. @table should be a filled in ctl_table
- * array. A completely 0 filled entry terminates the table.
+ * array.
*
* The members of the &struct ctl_table structure are used as follows:
- *
* procname - the name of the sysctl file under /proc/sys. Set to %NULL to not
* enter a sysctl file
- *
- * data - a pointer to data for use by proc_handler
- *
- * maxlen - the maximum size in bytes of the data
- *
- * mode - the file permissions for the /proc/sys file
- *
- * child - must be %NULL.
- *
+ * data - a pointer to data for use by proc_handler
+ * maxlen - the maximum size in bytes of the data
+ * mode - the file permissions for the /proc/sys file
+ * type - Defines the target type (described in struct definition)
* proc_handler - the text handler routine (described below)
*
* extra1, extra2 - extra pointers usable by the proc handler routines
@@ -1329,8 +1336,7 @@ static struct ctl_dir *sysctl_mkdir_p(struct ctl_dir *dir, const char *path)
* [0] https://lkml.kernel.org/87zgpte9o4.fsf@email.froward.int.ebiederm.org
*
* Leaf nodes in the sysctl tree will be represented by a single file
- * under /proc; non-leaf nodes (where child is not NULL) are not allowed,
- * sysctl_check_table() verifies this.
+ * under /proc; non-leaf nodes are not allowed.
*
* There must be a proc_handler routine for any terminal nodes.
* Several default handlers are available to cover common cases -
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index f8d35f993fe5..71e5039d940d 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -707,6 +707,9 @@ static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma)
#ifdef CONFIG_X86_USER_SHADOW_STACK
[ilog2(VM_SHADOW_STACK)] = "ss",
#endif
+#ifdef CONFIG_64BIT
+ [ilog2(VM_SEALED)] = "sl",
+#endif
};
size_t i;
diff --git a/fs/proc_namespace.c b/fs/proc_namespace.c
index 0a808951b7d3..e133b507ddf3 100644
--- a/fs/proc_namespace.c
+++ b/fs/proc_namespace.c
@@ -61,7 +61,7 @@ static int show_sb_opts(struct seq_file *m, struct super_block *sb)
return security_sb_show_options(m, sb);
}
-static void show_mnt_opts(struct seq_file *m, struct vfsmount *mnt)
+static void show_vfsmnt_opts(struct seq_file *m, struct vfsmount *mnt)
{
static const struct proc_fs_opts mnt_opts[] = {
{ MNT_NOSUID, ",nosuid" },
@@ -124,7 +124,7 @@ static int show_vfsmnt(struct seq_file *m, struct vfsmount *mnt)
err = show_sb_opts(m, sb);
if (err)
goto out;
- show_mnt_opts(m, mnt);
+ show_vfsmnt_opts(m, mnt);
if (sb->s_op->show_options)
err = sb->s_op->show_options(m, mnt_path.dentry);
seq_puts(m, " 0 0\n");
@@ -153,7 +153,7 @@ static int show_mountinfo(struct seq_file *m, struct vfsmount *mnt)
goto out;
seq_puts(m, mnt->mnt_flags & MNT_READONLY ? " ro" : " rw");
- show_mnt_opts(m, mnt);
+ show_vfsmnt_opts(m, mnt);
/* Tagged fields ("foo:X" or "bar") */
if (IS_MNT_SHARED(r))
diff --git a/fs/pstore/blk.c b/fs/pstore/blk.c
index de8cf5d75f34..65b2473e22ff 100644
--- a/fs/pstore/blk.c
+++ b/fs/pstore/blk.c
@@ -241,7 +241,7 @@ err:
/* get information of pstore/blk */
int pstore_blk_get_config(struct pstore_blk_config *info)
{
- strncpy(info->device, blkdev, 80);
+ strscpy(info->device, blkdev);
info->max_reason = max_reason;
info->kmsg_size = check_size(kmsg_size, 4096);
info->pmsg_size = check_size(pmsg_size, 4096);
diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c
index 03425928d2fb..3497ede88aa0 100644
--- a/fs/pstore/platform.c
+++ b/fs/pstore/platform.c
@@ -761,4 +761,5 @@ static void __exit pstore_exit(void)
module_exit(pstore_exit)
MODULE_AUTHOR("Tony Luck <tony.luck@intel.com>");
+MODULE_DESCRIPTION("Persistent Storage - platform driver interface");
MODULE_LICENSE("GPL");
diff --git a/fs/qnx4/inode.c b/fs/qnx4/inode.c
index d79841e94428..e399e2dd3a12 100644
--- a/fs/qnx4/inode.c
+++ b/fs/qnx4/inode.c
@@ -430,5 +430,6 @@ static void __exit exit_qnx4_fs(void)
module_init(init_qnx4_fs)
module_exit(exit_qnx4_fs)
+MODULE_DESCRIPTION("QNX4 file system");
MODULE_LICENSE("GPL");
diff --git a/fs/qnx6/inode.c b/fs/qnx6/inode.c
index d62fbef838b6..4f1735b882b1 100644
--- a/fs/qnx6/inode.c
+++ b/fs/qnx6/inode.c
@@ -694,4 +694,5 @@ static void __exit exit_qnx6_fs(void)
module_init(init_qnx6_fs)
module_exit(exit_qnx6_fs)
+MODULE_DESCRIPTION("QNX6 file system");
MODULE_LICENSE("GPL");
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 627eb2f72ef3..a2b256dac36e 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -2246,9 +2246,7 @@ int dquot_disable(struct super_block *sb, int type, unsigned int flags)
int cnt;
struct quota_info *dqopt = sb_dqopt(sb);
- /* s_umount should be held in exclusive mode */
- if (WARN_ON_ONCE(down_read_trylock(&sb->s_umount)))
- up_read(&sb->s_umount);
+ rwsem_assert_held_write(&sb->s_umount);
/* Cannot turn off usage accounting without turning off limits, or
* suspend quotas and simultaneously turn quotas off. */
@@ -2510,9 +2508,7 @@ int dquot_resume(struct super_block *sb, int type)
int ret = 0, cnt;
unsigned int flags;
- /* s_umount should be held in exclusive mode */
- if (WARN_ON_ONCE(down_read_trylock(&sb->s_umount)))
- up_read(&sb->s_umount);
+ rwsem_assert_held_write(&sb->s_umount);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
if (type != -1 && cnt != type)
diff --git a/fs/read_write.c b/fs/read_write.c
index ef6339391351..90e283b31ca1 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -730,7 +730,7 @@ static ssize_t do_iter_readv_writev(struct file *filp, struct iov_iter *iter,
ssize_t ret;
init_sync_kiocb(&kiocb, filp);
- ret = kiocb_set_rw_flags(&kiocb, flags);
+ ret = kiocb_set_rw_flags(&kiocb, flags, type);
if (ret)
return ret;
kiocb.ki_pos = (ppos ? *ppos : 0);
@@ -1736,3 +1736,19 @@ int generic_file_rw_checks(struct file *file_in, struct file *file_out)
return 0;
}
+
+bool generic_atomic_write_valid(struct iov_iter *iter, loff_t pos)
+{
+ size_t len = iov_iter_count(iter);
+
+ if (!iter_is_ubuf(iter))
+ return false;
+
+ if (!is_power_of_2(len))
+ return false;
+
+ if (!IS_ALIGNED(pos, len))
+ return false;
+
+ return true;
+}
diff --git a/fs/readdir.c b/fs/readdir.c
index 278bc0254732..d6c82421902a 100644
--- a/fs/readdir.c
+++ b/fs/readdir.c
@@ -22,8 +22,6 @@
#include <linux/compat.h>
#include <linux/uaccess.h>
-#include <asm/unaligned.h>
-
/*
* Some filesystems were never converted to '->iterate_shared()'
* and their directory iterators want the inode lock held for
@@ -72,7 +70,7 @@ int wrap_directory_iterator(struct file *file,
EXPORT_SYMBOL(wrap_directory_iterator);
/*
- * Note the "unsafe_put_user() semantics: we goto a
+ * Note the "unsafe_put_user()" semantics: we goto a
* label for errors.
*/
#define unsafe_copy_dirent_name(_dst, _src, _len, label) do { \
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index c1daedc50f4c..9b43a81a6488 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -2699,7 +2699,6 @@ fail:
}
bh = bh->b_this_page;
} while (bh != head);
- folio_set_error(folio);
BUG_ON(folio_test_writeback(folio));
folio_start_writeback(folio);
folio_unlock(folio);
diff --git a/fs/romfs/super.c b/fs/romfs/super.c
index 2cbb92462074..68758b6fed94 100644
--- a/fs/romfs/super.c
+++ b/fs/romfs/super.c
@@ -101,19 +101,15 @@ static struct inode *romfs_iget(struct super_block *sb, unsigned long pos);
*/
static int romfs_read_folio(struct file *file, struct folio *folio)
{
- struct page *page = &folio->page;
- struct inode *inode = page->mapping->host;
+ struct inode *inode = folio->mapping->host;
loff_t offset, size;
unsigned long fillsize, pos;
void *buf;
int ret;
- buf = kmap(page);
- if (!buf)
- return -ENOMEM;
+ buf = kmap_local_folio(folio, 0);
- /* 32 bit warning -- but not for us :) */
- offset = page_offset(page);
+ offset = folio_pos(folio);
size = i_size_read(inode);
fillsize = 0;
ret = 0;
@@ -125,20 +121,14 @@ static int romfs_read_folio(struct file *file, struct folio *folio)
ret = romfs_dev_read(inode->i_sb, pos, buf, fillsize);
if (ret < 0) {
- SetPageError(page);
fillsize = 0;
ret = -EIO;
}
}
- if (fillsize < PAGE_SIZE)
- memset(buf + fillsize, 0, PAGE_SIZE - fillsize);
- if (ret == 0)
- SetPageUptodate(page);
-
- flush_dcache_page(page);
- kunmap(page);
- unlock_page(page);
+ buf = folio_zero_tail(folio, fillsize, buf);
+ kunmap_local(buf);
+ folio_end_read(folio, ret == 0);
return ret;
}
diff --git a/fs/signalfd.c b/fs/signalfd.c
index 4a5614442dbf..ec7b2da2477a 100644
--- a/fs/signalfd.c
+++ b/fs/signalfd.c
@@ -282,14 +282,10 @@ static int do_signalfd4(int ufd, sigset_t *mask, int flags)
if (IS_ERR(file)) {
put_unused_fd(ufd);
kfree(ctx);
- return ufd;
+ return PTR_ERR(file);
}
file->f_mode |= FMODE_NOWAIT;
- /*
- * When we call this, the initialization must be complete, since
- * anon_inode_getfd() will install the fd.
- */
fd_install(ufd, file);
} else {
struct fd f = fdget(ufd);
diff --git a/fs/smb/client/cifsfs.c b/fs/smb/client/cifsfs.c
index a665aac9be9f..6397fdefd876 100644
--- a/fs/smb/client/cifsfs.c
+++ b/fs/smb/client/cifsfs.c
@@ -134,7 +134,7 @@ module_param(enable_oplocks, bool, 0644);
MODULE_PARM_DESC(enable_oplocks, "Enable or disable oplocks. Default: y/Y/1");
module_param(enable_gcm_256, bool, 0644);
-MODULE_PARM_DESC(enable_gcm_256, "Enable requesting strongest (256 bit) GCM encryption. Default: n/N/0");
+MODULE_PARM_DESC(enable_gcm_256, "Enable requesting strongest (256 bit) GCM encryption. Default: y/Y/0");
module_param(require_gcm_256, bool, 0644);
MODULE_PARM_DESC(require_gcm_256, "Require strongest (256 bit) GCM encryption. Default: n/N/0");
@@ -431,6 +431,7 @@ cifs_free_inode(struct inode *inode)
static void
cifs_evict_inode(struct inode *inode)
{
+ netfs_wait_for_outstanding_io(inode);
truncate_inode_pages_final(&inode->i_data);
if (inode->i_state & I_PINNING_NETFS_WB)
cifs_fscache_unuse_inode_cookie(inode, true);
diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h
index 73482734a8d8..a865941724c0 100644
--- a/fs/smb/client/cifsglob.h
+++ b/fs/smb/client/cifsglob.h
@@ -1494,6 +1494,8 @@ struct cifs_aio_ctx {
struct cifs_io_request {
struct netfs_io_request rreq;
struct cifsFileInfo *cfile;
+ struct TCP_Server_Info *server;
+ pid_t pid;
};
/* asynchronous read support */
@@ -1504,7 +1506,6 @@ struct cifs_io_subrequest {
struct cifs_io_request *req;
};
ssize_t got_bytes;
- pid_t pid;
unsigned int xid;
int result;
bool have_xid;
@@ -1917,8 +1918,8 @@ require use of the stronger protocol */
#define CIFSSEC_MUST_SEAL 0x40040 /* not supported yet */
#define CIFSSEC_MUST_NTLMSSP 0x80080 /* raw ntlmssp with ntlmv2 */
-#define CIFSSEC_DEF (CIFSSEC_MAY_SIGN | CIFSSEC_MAY_NTLMV2 | CIFSSEC_MAY_NTLMSSP)
-#define CIFSSEC_MAX (CIFSSEC_MUST_NTLMV2)
+#define CIFSSEC_DEF (CIFSSEC_MAY_SIGN | CIFSSEC_MAY_NTLMV2 | CIFSSEC_MAY_NTLMSSP | CIFSSEC_MAY_SEAL)
+#define CIFSSEC_MAX (CIFSSEC_MAY_SIGN | CIFSSEC_MUST_KRB5 | CIFSSEC_MAY_SEAL)
#define CIFSSEC_AUTH_MASK (CIFSSEC_MAY_NTLMV2 | CIFSSEC_MAY_KRB5 | CIFSSEC_MAY_NTLMSSP)
/*
*****************************************************************
diff --git a/fs/smb/client/cifspdu.h b/fs/smb/client/cifspdu.h
index c46d418c1c0c..a2072ab9e586 100644
--- a/fs/smb/client/cifspdu.h
+++ b/fs/smb/client/cifspdu.h
@@ -2574,7 +2574,7 @@ typedef struct {
struct win_dev {
- unsigned char type[8]; /* IntxCHR or IntxBLK or LnxFIFO*/
+ unsigned char type[8]; /* IntxCHR or IntxBLK or LnxFIFO or LnxSOCK */
__le64 major;
__le64 minor;
} __attribute__((packed));
diff --git a/fs/smb/client/cifssmb.c b/fs/smb/client/cifssmb.c
index 25e9ab947c17..595c4b673707 100644
--- a/fs/smb/client/cifssmb.c
+++ b/fs/smb/client/cifssmb.c
@@ -1345,8 +1345,8 @@ cifs_async_readv(struct cifs_io_subrequest *rdata)
if (rc)
return rc;
- smb->hdr.Pid = cpu_to_le16((__u16)rdata->pid);
- smb->hdr.PidHigh = cpu_to_le16((__u16)(rdata->pid >> 16));
+ smb->hdr.Pid = cpu_to_le16((__u16)rdata->req->pid);
+ smb->hdr.PidHigh = cpu_to_le16((__u16)(rdata->req->pid >> 16));
smb->AndXCommand = 0xFF; /* none */
smb->Fid = rdata->req->cfile->fid.netfid;
@@ -1689,8 +1689,8 @@ cifs_async_writev(struct cifs_io_subrequest *wdata)
if (rc)
goto async_writev_out;
- smb->hdr.Pid = cpu_to_le16((__u16)wdata->pid);
- smb->hdr.PidHigh = cpu_to_le16((__u16)(wdata->pid >> 16));
+ smb->hdr.Pid = cpu_to_le16((__u16)wdata->req->pid);
+ smb->hdr.PidHigh = cpu_to_le16((__u16)(wdata->req->pid >> 16));
smb->AndXCommand = 0xFF; /* none */
smb->Fid = wdata->req->cfile->fid.netfid;
diff --git a/fs/smb/client/file.c b/fs/smb/client/file.c
index 9d5c2440abfc..1374635e89fa 100644
--- a/fs/smb/client/file.c
+++ b/fs/smb/client/file.c
@@ -134,17 +134,15 @@ fail:
static bool cifs_clamp_length(struct netfs_io_subrequest *subreq)
{
struct netfs_io_request *rreq = subreq->rreq;
- struct TCP_Server_Info *server;
struct cifs_io_subrequest *rdata = container_of(subreq, struct cifs_io_subrequest, subreq);
struct cifs_io_request *req = container_of(subreq->rreq, struct cifs_io_request, rreq);
+ struct TCP_Server_Info *server = req->server;
struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode->i_sb);
size_t rsize = 0;
int rc;
rdata->xid = get_xid();
rdata->have_xid = true;
-
- server = cifs_pick_channel(tlink_tcon(req->cfile->tlink)->ses);
rdata->server = server;
if (cifs_sb->ctx->rsize == 0)
@@ -179,15 +177,8 @@ static void cifs_req_issue_read(struct netfs_io_subrequest *subreq)
struct netfs_io_request *rreq = subreq->rreq;
struct cifs_io_subrequest *rdata = container_of(subreq, struct cifs_io_subrequest, subreq);
struct cifs_io_request *req = container_of(subreq->rreq, struct cifs_io_request, rreq);
- struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode->i_sb);
- pid_t pid;
int rc = 0;
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
- pid = req->cfile->pid;
- else
- pid = current->tgid; // Ummm... This may be a workqueue
-
cifs_dbg(FYI, "%s: op=%08x[%x] mapping=%p len=%zu/%zu\n",
__func__, rreq->debug_id, subreq->debug_index, rreq->mapping,
subreq->transferred, subreq->len);
@@ -201,16 +192,8 @@ static void cifs_req_issue_read(struct netfs_io_subrequest *subreq)
}
__set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
- rdata->pid = pid;
-
- rc = adjust_credits(rdata->server, &rdata->credits, rdata->subreq.len);
- if (!rc) {
- if (rdata->req->cfile->invalidHandle)
- rc = -EAGAIN;
- else
- rc = rdata->server->ops->async_readv(rdata);
- }
+ rc = rdata->server->ops->async_readv(rdata);
out:
if (rc)
netfs_subreq_terminated(subreq, rc, false);
@@ -245,11 +228,15 @@ static int cifs_init_request(struct netfs_io_request *rreq, struct file *file)
rreq->rsize = cifs_sb->ctx->rsize;
rreq->wsize = cifs_sb->ctx->wsize;
+ req->pid = current->tgid; // Ummm... This may be a workqueue
if (file) {
open_file = file->private_data;
rreq->netfs_priv = file->private_data;
req->cfile = cifsFileInfo_get(open_file);
+ req->server = cifs_pick_channel(tlink_tcon(req->cfile->tlink)->ses);
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
+ req->pid = req->cfile->pid;
} else if (rreq->origin != NETFS_WRITEBACK) {
WARN_ON_ONCE(1);
return -EIO;
@@ -259,35 +246,6 @@ static int cifs_init_request(struct netfs_io_request *rreq, struct file *file)
}
/*
- * Expand the size of a readahead to the size of the rsize, if at least as
- * large as a page, allowing for the possibility that rsize is not pow-2
- * aligned.
- */
-static void cifs_expand_readahead(struct netfs_io_request *rreq)
-{
- unsigned int rsize = rreq->rsize;
- loff_t misalignment, i_size = i_size_read(rreq->inode);
-
- if (rsize < PAGE_SIZE)
- return;
-
- if (rsize < INT_MAX)
- rsize = roundup_pow_of_two(rsize);
- else
- rsize = ((unsigned int)INT_MAX + 1) / 2;
-
- misalignment = rreq->start & (rsize - 1);
- if (misalignment) {
- rreq->start -= misalignment;
- rreq->len += misalignment;
- }
-
- rreq->len = round_up(rreq->len, rsize);
- if (rreq->start < i_size && rreq->len > i_size - rreq->start)
- rreq->len = i_size - rreq->start;
-}
-
-/*
* Completion of a request operation.
*/
static void cifs_rreq_done(struct netfs_io_request *rreq)
@@ -342,7 +300,6 @@ const struct netfs_request_ops cifs_req_ops = {
.init_request = cifs_init_request,
.free_request = cifs_free_request,
.free_subrequest = cifs_free_subrequest,
- .expand_readahead = cifs_expand_readahead,
.clamp_length = cifs_clamp_length,
.issue_read = cifs_req_issue_read,
.done = cifs_rreq_done,
@@ -3200,8 +3157,6 @@ static int cifs_swap_rw(struct kiocb *iocb, struct iov_iter *iter)
{
ssize_t ret;
- WARN_ON_ONCE(iov_iter_count(iter) != PAGE_SIZE);
-
if (iov_iter_rw(iter) == READ)
ret = netfs_unbuffered_read_iter_locked(iocb, iter);
else
diff --git a/fs/smb/client/fs_context.c b/fs/smb/client/fs_context.c
index 3bbac925d076..bc926ab2555b 100644
--- a/fs/smb/client/fs_context.c
+++ b/fs/smb/client/fs_context.c
@@ -128,12 +128,14 @@ const struct fs_parameter_spec smb3_fs_parameters[] = {
fsparam_flag("compress", Opt_compress),
fsparam_flag("witness", Opt_witness),
+ /* Mount options which take uid or gid */
+ fsparam_uid("backupuid", Opt_backupuid),
+ fsparam_gid("backupgid", Opt_backupgid),
+ fsparam_uid("uid", Opt_uid),
+ fsparam_uid("cruid", Opt_cruid),
+ fsparam_gid("gid", Opt_gid),
+
/* Mount options which take numeric value */
- fsparam_u32("backupuid", Opt_backupuid),
- fsparam_u32("backupgid", Opt_backupgid),
- fsparam_u32("uid", Opt_uid),
- fsparam_u32("cruid", Opt_cruid),
- fsparam_u32("gid", Opt_gid),
fsparam_u32("file_mode", Opt_file_mode),
fsparam_u32("dirmode", Opt_dirmode),
fsparam_u32("dir_mode", Opt_dirmode),
@@ -951,8 +953,6 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
int i, opt;
bool is_smb3 = !strcmp(fc->fs_type->name, "smb3");
bool skip_parsing = false;
- kuid_t uid;
- kgid_t gid;
cifs_dbg(FYI, "CIFS: parsing cifs mount option '%s'\n", param->key);
@@ -1083,38 +1083,23 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
}
break;
case Opt_uid:
- uid = make_kuid(current_user_ns(), result.uint_32);
- if (!uid_valid(uid))
- goto cifs_parse_mount_err;
- ctx->linux_uid = uid;
+ ctx->linux_uid = result.uid;
ctx->uid_specified = true;
break;
case Opt_cruid:
- uid = make_kuid(current_user_ns(), result.uint_32);
- if (!uid_valid(uid))
- goto cifs_parse_mount_err;
- ctx->cred_uid = uid;
+ ctx->cred_uid = result.uid;
ctx->cruid_specified = true;
break;
case Opt_backupuid:
- uid = make_kuid(current_user_ns(), result.uint_32);
- if (!uid_valid(uid))
- goto cifs_parse_mount_err;
- ctx->backupuid = uid;
+ ctx->backupuid = result.uid;
ctx->backupuid_specified = true;
break;
case Opt_backupgid:
- gid = make_kgid(current_user_ns(), result.uint_32);
- if (!gid_valid(gid))
- goto cifs_parse_mount_err;
- ctx->backupgid = gid;
+ ctx->backupgid = result.gid;
ctx->backupgid_specified = true;
break;
case Opt_gid:
- gid = make_kgid(current_user_ns(), result.uint_32);
- if (!gid_valid(gid))
- goto cifs_parse_mount_err;
- ctx->linux_gid = gid;
+ ctx->linux_gid = result.gid;
ctx->gid_specified = true;
break;
case Opt_port:
diff --git a/fs/smb/client/inode.c b/fs/smb/client/inode.c
index 262576573eb5..4a8aa1de9522 100644
--- a/fs/smb/client/inode.c
+++ b/fs/smb/client/inode.c
@@ -606,6 +606,10 @@ cifs_sfu_type(struct cifs_fattr *fattr, const char *path,
mnr = le64_to_cpu(*(__le64 *)(pbuf+16));
fattr->cf_rdev = MKDEV(mjr, mnr);
}
+ } else if (memcmp("LnxSOCK", pbuf, 8) == 0) {
+ cifs_dbg(FYI, "Socket\n");
+ fattr->cf_mode |= S_IFSOCK;
+ fattr->cf_dtype = DT_SOCK;
} else if (memcmp("IntxLNK", pbuf, 7) == 0) {
cifs_dbg(FYI, "Symlink\n");
fattr->cf_mode |= S_IFLNK;
diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
index 4ce6c3121a7e..c8e536540895 100644
--- a/fs/smb/client/smb2ops.c
+++ b/fs/smb/client/smb2ops.c
@@ -4997,6 +4997,9 @@ static int __cifs_sfu_make_node(unsigned int xid, struct inode *inode,
pdev.major = cpu_to_le64(MAJOR(dev));
pdev.minor = cpu_to_le64(MINOR(dev));
break;
+ case S_IFSOCK:
+ strscpy(pdev.type, "LnxSOCK");
+ break;
case S_IFIFO:
strscpy(pdev.type, "LnxFIFO");
break;
diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c
index 993ac36c3d58..2ae2dbb6202b 100644
--- a/fs/smb/client/smb2pdu.c
+++ b/fs/smb/client/smb2pdu.c
@@ -4484,6 +4484,16 @@ smb2_new_read_req(void **buf, unsigned int *total_len,
return rc;
}
+static void smb2_readv_worker(struct work_struct *work)
+{
+ struct cifs_io_subrequest *rdata =
+ container_of(work, struct cifs_io_subrequest, subreq.work);
+
+ netfs_subreq_terminated(&rdata->subreq,
+ (rdata->result == 0 || rdata->result == -EAGAIN) ?
+ rdata->got_bytes : rdata->result, true);
+}
+
static void
smb2_readv_callback(struct mid_q_entry *mid)
{
@@ -4577,12 +4587,9 @@ smb2_readv_callback(struct mid_q_entry *mid)
if (rdata->subreq.start < rdata->subreq.rreq->i_size)
rdata->result = 0;
}
- if (rdata->result == 0 || rdata->result == -EAGAIN)
- iov_iter_advance(&rdata->subreq.io_iter, rdata->got_bytes);
rdata->credits.value = 0;
- netfs_subreq_terminated(&rdata->subreq,
- (rdata->result == 0 || rdata->result == -EAGAIN) ?
- rdata->got_bytes : rdata->result, true);
+ INIT_WORK(&rdata->subreq.work, smb2_readv_worker);
+ queue_work(cifsiod_wq, &rdata->subreq.work);
release_mid(mid);
add_credits(server, &credits, 0);
}
@@ -4614,7 +4621,7 @@ smb2_async_readv(struct cifs_io_subrequest *rdata)
io_parms.length = rdata->subreq.len;
io_parms.persistent_fid = rdata->req->cfile->fid.persistent_fid;
io_parms.volatile_fid = rdata->req->cfile->fid.volatile_fid;
- io_parms.pid = rdata->pid;
+ io_parms.pid = rdata->req->pid;
rc = smb2_new_read_req(
(void **) &buf, &total_len, &io_parms, rdata, 0, 0);
@@ -4789,7 +4796,6 @@ smb2_writev_callback(struct mid_q_entry *mid)
wdata->result = -ENOSPC;
else
wdata->subreq.len = written;
- iov_iter_advance(&wdata->subreq.io_iter, written);
break;
case MID_REQUEST_SUBMITTED:
case MID_RETRY_NEEDED:
@@ -4867,7 +4873,7 @@ smb2_async_writev(struct cifs_io_subrequest *wdata)
.length = wdata->subreq.len,
.persistent_fid = wdata->req->cfile->fid.persistent_fid,
.volatile_fid = wdata->req->cfile->fid.volatile_fid,
- .pid = wdata->pid,
+ .pid = wdata->req->pid,
};
io_parms = &_io_parms;
diff --git a/fs/smb/client/smb2transport.c b/fs/smb/client/smb2transport.c
index 02135a605305..1476c445cadc 100644
--- a/fs/smb/client/smb2transport.c
+++ b/fs/smb/client/smb2transport.c
@@ -216,8 +216,8 @@ smb2_find_smb_tcon(struct TCP_Server_Info *server, __u64 ses_id, __u32 tid)
}
tcon = smb2_find_smb_sess_tcon_unlocked(ses, tid);
if (!tcon) {
- cifs_put_smb_ses(ses);
spin_unlock(&cifs_tcp_ses_lock);
+ cifs_put_smb_ses(ses);
return NULL;
}
spin_unlock(&cifs_tcp_ses_lock);
diff --git a/fs/smb/common/cifs_arc4.c b/fs/smb/common/cifs_arc4.c
index 043e4cb839fa..df360ca47826 100644
--- a/fs/smb/common/cifs_arc4.c
+++ b/fs/smb/common/cifs_arc4.c
@@ -10,6 +10,7 @@
#include <linux/module.h>
#include "arc4.h"
+MODULE_DESCRIPTION("ARC4 Cipher Algorithm");
MODULE_LICENSE("GPL");
int cifs_arc4_setkey(struct arc4_ctx *ctx, const u8 *in_key, unsigned int key_len)
diff --git a/fs/smb/common/cifs_md4.c b/fs/smb/common/cifs_md4.c
index 50f78cfc6ce9..7ee7f4dad90c 100644
--- a/fs/smb/common/cifs_md4.c
+++ b/fs/smb/common/cifs_md4.c
@@ -24,6 +24,7 @@
#include <asm/byteorder.h>
#include "md4.h"
+MODULE_DESCRIPTION("MD4 Message Digest Algorithm (RFC1320)");
MODULE_LICENSE("GPL");
static inline u32 lshift(u32 x, unsigned int s)
diff --git a/fs/smb/common/smb2pdu.h b/fs/smb/common/smb2pdu.h
index 8d10be1fe18a..c3ee42188d25 100644
--- a/fs/smb/common/smb2pdu.h
+++ b/fs/smb/common/smb2pdu.h
@@ -917,6 +917,40 @@ struct smb2_query_directory_rsp {
__u8 Buffer[];
} __packed;
+/* DeviceType Flags */
+#define FILE_DEVICE_CD_ROM 0x00000002
+#define FILE_DEVICE_CD_ROM_FILE_SYSTEM 0x00000003
+#define FILE_DEVICE_DFS 0x00000006
+#define FILE_DEVICE_DISK 0x00000007
+#define FILE_DEVICE_DISK_FILE_SYSTEM 0x00000008
+#define FILE_DEVICE_FILE_SYSTEM 0x00000009
+#define FILE_DEVICE_NAMED_PIPE 0x00000011
+#define FILE_DEVICE_NETWORK 0x00000012
+#define FILE_DEVICE_NETWORK_FILE_SYSTEM 0x00000014
+#define FILE_DEVICE_NULL 0x00000015
+#define FILE_DEVICE_PARALLEL_PORT 0x00000016
+#define FILE_DEVICE_PRINTER 0x00000018
+#define FILE_DEVICE_SERIAL_PORT 0x0000001b
+#define FILE_DEVICE_STREAMS 0x0000001e
+#define FILE_DEVICE_TAPE 0x0000001f
+#define FILE_DEVICE_TAPE_FILE_SYSTEM 0x00000020
+#define FILE_DEVICE_VIRTUAL_DISK 0x00000024
+#define FILE_DEVICE_NETWORK_REDIRECTOR 0x00000028
+
+/* Device Characteristics */
+#define FILE_REMOVABLE_MEDIA 0x00000001
+#define FILE_READ_ONLY_DEVICE 0x00000002
+#define FILE_FLOPPY_DISKETTE 0x00000004
+#define FILE_WRITE_ONCE_MEDIA 0x00000008
+#define FILE_REMOTE_DEVICE 0x00000010
+#define FILE_DEVICE_IS_MOUNTED 0x00000020
+#define FILE_VIRTUAL_VOLUME 0x00000040
+#define FILE_DEVICE_SECURE_OPEN 0x00000100
+#define FILE_CHARACTERISTIC_TS_DEVICE 0x00001000
+#define FILE_CHARACTERISTIC_WEBDAV_DEVICE 0x00002000
+#define FILE_PORTABLE_DEVICE 0x00004000
+#define FILE_DEVICE_ALLOW_APPCONTAINER_TRAVERSAL 0x00020000
+
/*
* Maximum number of iovs we need for a set-info request.
* The largest one is rename/hardlink
diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
index b6c5a8ea3887..840c71c66b30 100644
--- a/fs/smb/server/smb2pdu.c
+++ b/fs/smb/server/smb2pdu.c
@@ -630,6 +630,12 @@ smb2_get_name(const char *src, const int maxlen, struct nls_table *local_nls)
return name;
}
+ if (*name == '\\') {
+ pr_err("not allow directory name included leading slash\n");
+ kfree(name);
+ return ERR_PTR(-EINVAL);
+ }
+
ksmbd_conv_path_to_unix(name);
ksmbd_strip_last_slash(name);
return name;
@@ -2045,15 +2051,22 @@ out_err1:
* @access: file access flags
* @disposition: file disposition flags
* @may_flags: set with MAY_ flags
+ * @is_dir: is creating open flags for directory
*
* Return: file open flags
*/
static int smb2_create_open_flags(bool file_present, __le32 access,
__le32 disposition,
- int *may_flags)
+ int *may_flags,
+ bool is_dir)
{
int oflags = O_NONBLOCK | O_LARGEFILE;
+ if (is_dir) {
+ access &= ~FILE_WRITE_DESIRE_ACCESS_LE;
+ ksmbd_debug(SMB, "Discard write access to a directory\n");
+ }
+
if (access & FILE_READ_DESIRED_ACCESS_LE &&
access & FILE_WRITE_DESIRE_ACCESS_LE) {
oflags |= O_RDWR;
@@ -2361,7 +2374,8 @@ static int smb2_set_ea(struct smb2_ea_info *eabuf, unsigned int buf_len,
if (rc > 0) {
rc = ksmbd_vfs_remove_xattr(idmap,
path,
- attr_name);
+ attr_name,
+ get_write);
if (rc < 0) {
ksmbd_debug(SMB,
@@ -2376,7 +2390,7 @@ static int smb2_set_ea(struct smb2_ea_info *eabuf, unsigned int buf_len,
} else {
rc = ksmbd_vfs_setxattr(idmap, path, attr_name, value,
le16_to_cpu(eabuf->EaValueLength),
- 0, true);
+ 0, get_write);
if (rc < 0) {
ksmbd_debug(SMB,
"ksmbd_vfs_setxattr is failed(%d)\n",
@@ -2468,7 +2482,7 @@ static int smb2_remove_smb_xattrs(const struct path *path)
!strncmp(&name[XATTR_USER_PREFIX_LEN], STREAM_PREFIX,
STREAM_PREFIX_LEN)) {
err = ksmbd_vfs_remove_xattr(idmap, path,
- name);
+ name, true);
if (err)
ksmbd_debug(SMB, "remove xattr failed : %s\n",
name);
@@ -2842,20 +2856,11 @@ int smb2_open(struct ksmbd_work *work)
}
if (req->NameLength) {
- if ((req->CreateOptions & FILE_DIRECTORY_FILE_LE) &&
- *(char *)req->Buffer == '\\') {
- pr_err("not allow directory name included leading slash\n");
- rc = -EINVAL;
- goto err_out2;
- }
-
name = smb2_get_name((char *)req + le16_to_cpu(req->NameOffset),
le16_to_cpu(req->NameLength),
work->conn->local_nls);
if (IS_ERR(name)) {
rc = PTR_ERR(name);
- if (rc != -ENOMEM)
- rc = -ENOENT;
name = NULL;
goto err_out2;
}
@@ -3169,7 +3174,9 @@ int smb2_open(struct ksmbd_work *work)
open_flags = smb2_create_open_flags(file_present, daccess,
req->CreateDisposition,
- &may_flags);
+ &may_flags,
+ req->CreateOptions & FILE_DIRECTORY_FILE_LE ||
+ (file_present && S_ISDIR(d_inode(path.dentry)->i_mode)));
if (!test_tree_conn_flag(tcon, KSMBD_TREE_CONN_FLAG_WRITABLE)) {
if (open_flags & (O_CREAT | O_TRUNC)) {
@@ -5316,8 +5323,13 @@ static int smb2_get_info_filesystem(struct ksmbd_work *work,
info = (struct filesystem_device_info *)rsp->Buffer;
- info->DeviceType = cpu_to_le32(stfs.f_type);
- info->DeviceCharacteristics = cpu_to_le32(0x00000020);
+ info->DeviceType = cpu_to_le32(FILE_DEVICE_DISK);
+ info->DeviceCharacteristics =
+ cpu_to_le32(FILE_DEVICE_IS_MOUNTED);
+ if (!test_tree_conn_flag(work->tcon,
+ KSMBD_TREE_CONN_FLAG_WRITABLE))
+ info->DeviceCharacteristics |=
+ cpu_to_le32(FILE_READ_ONLY_DEVICE);
rsp->OutputBufferLength = cpu_to_le32(8);
break;
}
diff --git a/fs/smb/server/vfs.c b/fs/smb/server/vfs.c
index 51b1b0bed616..9e859ba010cf 100644
--- a/fs/smb/server/vfs.c
+++ b/fs/smb/server/vfs.c
@@ -1058,16 +1058,21 @@ int ksmbd_vfs_fqar_lseek(struct ksmbd_file *fp, loff_t start, loff_t length,
}
int ksmbd_vfs_remove_xattr(struct mnt_idmap *idmap,
- const struct path *path, char *attr_name)
+ const struct path *path, char *attr_name,
+ bool get_write)
{
int err;
- err = mnt_want_write(path->mnt);
- if (err)
- return err;
+ if (get_write == true) {
+ err = mnt_want_write(path->mnt);
+ if (err)
+ return err;
+ }
err = vfs_removexattr(idmap, path->dentry, attr_name);
- mnt_drop_write(path->mnt);
+
+ if (get_write == true)
+ mnt_drop_write(path->mnt);
return err;
}
@@ -1380,7 +1385,7 @@ int ksmbd_vfs_remove_sd_xattrs(struct mnt_idmap *idmap, const struct path *path)
ksmbd_debug(SMB, "%s, len %zd\n", name, strlen(name));
if (!strncmp(name, XATTR_NAME_SD, XATTR_NAME_SD_LEN)) {
- err = ksmbd_vfs_remove_xattr(idmap, path, name);
+ err = ksmbd_vfs_remove_xattr(idmap, path, name, true);
if (err)
ksmbd_debug(SMB, "remove xattr failed : %s\n", name);
}
diff --git a/fs/smb/server/vfs.h b/fs/smb/server/vfs.h
index cfe1c8092f23..cb76f4b5bafe 100644
--- a/fs/smb/server/vfs.h
+++ b/fs/smb/server/vfs.h
@@ -114,7 +114,8 @@ int ksmbd_vfs_setxattr(struct mnt_idmap *idmap,
int ksmbd_vfs_xattr_stream_name(char *stream_name, char **xattr_stream_name,
size_t *xattr_stream_name_size, int s_type);
int ksmbd_vfs_remove_xattr(struct mnt_idmap *idmap,
- const struct path *path, char *attr_name);
+ const struct path *path, char *attr_name,
+ bool get_write);
int ksmbd_vfs_kern_path_locked(struct ksmbd_work *work, char *name,
unsigned int flags, struct path *parent_path,
struct path *path, bool caseless);
diff --git a/fs/smb/server/vfs_cache.c b/fs/smb/server/vfs_cache.c
index 6cb599cd287e..8b2e37c8716e 100644
--- a/fs/smb/server/vfs_cache.c
+++ b/fs/smb/server/vfs_cache.c
@@ -254,7 +254,8 @@ static void __ksmbd_inode_close(struct ksmbd_file *fp)
ci->m_flags &= ~S_DEL_ON_CLS_STREAM;
err = ksmbd_vfs_remove_xattr(file_mnt_idmap(filp),
&filp->f_path,
- fp->stream.name);
+ fp->stream.name,
+ true);
if (err)
pr_err("remove xattr failed : %s\n",
fp->stream.name);
diff --git a/fs/stat.c b/fs/stat.c
index 70bd3e888cfa..89ce1be56310 100644
--- a/fs/stat.c
+++ b/fs/stat.c
@@ -90,6 +90,37 @@ void generic_fill_statx_attr(struct inode *inode, struct kstat *stat)
EXPORT_SYMBOL(generic_fill_statx_attr);
/**
+ * generic_fill_statx_atomic_writes - Fill in atomic writes statx attributes
+ * @stat: Where to fill in the attribute flags
+ * @unit_min: Minimum supported atomic write length in bytes
+ * @unit_max: Maximum supported atomic write length in bytes
+ *
+ * Fill in the STATX{_ATTR}_WRITE_ATOMIC flags in the kstat structure from
+ * atomic write unit_min and unit_max values.
+ */
+void generic_fill_statx_atomic_writes(struct kstat *stat,
+ unsigned int unit_min,
+ unsigned int unit_max)
+{
+ /* Confirm that the request type is known */
+ stat->result_mask |= STATX_WRITE_ATOMIC;
+
+ /* Confirm that the file attribute type is known */
+ stat->attributes_mask |= STATX_ATTR_WRITE_ATOMIC;
+
+ if (unit_min) {
+ stat->atomic_write_unit_min = unit_min;
+ stat->atomic_write_unit_max = unit_max;
+ /* Initially only allow 1x segment */
+ stat->atomic_write_segments_max = 1;
+
+ /* Confirm atomic writes are actually supported */
+ stat->attributes |= STATX_ATTR_WRITE_ATOMIC;
+ }
+}
+EXPORT_SYMBOL_GPL(generic_fill_statx_atomic_writes);
+
+/**
* vfs_getattr_nosec - getattr without security checks
* @path: file to get attributes from
* @stat: structure to return attributes in
@@ -214,6 +245,43 @@ int getname_statx_lookup_flags(int flags)
return lookup_flags;
}
+static int vfs_statx_path(struct path *path, int flags, struct kstat *stat,
+ u32 request_mask)
+{
+ int error = vfs_getattr(path, stat, request_mask, flags);
+
+ if (request_mask & STATX_MNT_ID_UNIQUE) {
+ stat->mnt_id = real_mount(path->mnt)->mnt_id_unique;
+ stat->result_mask |= STATX_MNT_ID_UNIQUE;
+ } else {
+ stat->mnt_id = real_mount(path->mnt)->mnt_id;
+ stat->result_mask |= STATX_MNT_ID;
+ }
+
+ if (path_mounted(path))
+ stat->attributes |= STATX_ATTR_MOUNT_ROOT;
+ stat->attributes_mask |= STATX_ATTR_MOUNT_ROOT;
+
+ /*
+ * If this is a block device inode, override the filesystem
+ * attributes with the block device specific parameters that need to be
+ * obtained from the bdev backing inode.
+ */
+ if (S_ISBLK(stat->mode))
+ bdev_statx(path, stat, request_mask);
+
+ return error;
+}
+
+static int vfs_statx_fd(int fd, int flags, struct kstat *stat,
+ u32 request_mask)
+{
+ CLASS(fd_raw, f)(fd);
+ if (!f.file)
+ return -EBADF;
+ return vfs_statx_path(&f.file->f_path, flags, stat, request_mask);
+}
+
/**
* vfs_statx - Get basic and extra attributes by filename
* @dfd: A file descriptor representing the base dir for a relative filename
@@ -243,36 +311,13 @@ static int vfs_statx(int dfd, struct filename *filename, int flags,
retry:
error = filename_lookup(dfd, filename, lookup_flags, &path, NULL);
if (error)
- goto out;
-
- error = vfs_getattr(&path, stat, request_mask, flags);
-
- if (request_mask & STATX_MNT_ID_UNIQUE) {
- stat->mnt_id = real_mount(path.mnt)->mnt_id_unique;
- stat->result_mask |= STATX_MNT_ID_UNIQUE;
- } else {
- stat->mnt_id = real_mount(path.mnt)->mnt_id;
- stat->result_mask |= STATX_MNT_ID;
- }
-
- if (path.mnt->mnt_root == path.dentry)
- stat->attributes |= STATX_ATTR_MOUNT_ROOT;
- stat->attributes_mask |= STATX_ATTR_MOUNT_ROOT;
-
- /* Handle STATX_DIOALIGN for block devices. */
- if (request_mask & STATX_DIOALIGN) {
- struct inode *inode = d_backing_inode(path.dentry);
-
- if (S_ISBLK(inode->i_mode))
- bdev_statx_dioalign(inode, stat);
- }
-
+ return error;
+ error = vfs_statx_path(&path, flags, stat, request_mask);
path_put(&path);
if (retry_estale(error, lookup_flags)) {
lookup_flags |= LOOKUP_REVAL;
goto retry;
}
-out:
return error;
}
@@ -289,18 +334,10 @@ int vfs_fstatat(int dfd, const char __user *filename,
* If AT_EMPTY_PATH is set, we expect the common case to be that
* empty path, and avoid doing all the extra pathname work.
*/
- if (dfd >= 0 && flags == AT_EMPTY_PATH) {
- char c;
-
- ret = get_user(c, filename);
- if (unlikely(ret))
- return ret;
+ if (flags == AT_EMPTY_PATH && vfs_empty_path(dfd, filename))
+ return vfs_fstat(dfd, stat);
- if (likely(!c))
- return vfs_fstat(dfd, stat);
- }
-
- name = getname_flags(filename, getname_statx_lookup_flags(statx_flags), NULL);
+ name = getname_flags(filename, getname_statx_lookup_flags(statx_flags));
ret = vfs_statx(dfd, name, statx_flags, stat, STATX_BASIC_STATS);
putname(name);
@@ -488,34 +525,39 @@ static int do_readlinkat(int dfd, const char __user *pathname,
char __user *buf, int bufsiz)
{
struct path path;
+ struct filename *name;
int error;
- int empty = 0;
unsigned int lookup_flags = LOOKUP_EMPTY;
if (bufsiz <= 0)
return -EINVAL;
retry:
- error = user_path_at_empty(dfd, pathname, lookup_flags, &path, &empty);
- if (!error) {
- struct inode *inode = d_backing_inode(path.dentry);
-
- error = empty ? -ENOENT : -EINVAL;
- /*
- * AFS mountpoints allow readlink(2) but are not symlinks
- */
- if (d_is_symlink(path.dentry) || inode->i_op->readlink) {
- error = security_inode_readlink(path.dentry);
- if (!error) {
- touch_atime(&path);
- error = vfs_readlink(path.dentry, buf, bufsiz);
- }
- }
- path_put(&path);
- if (retry_estale(error, lookup_flags)) {
- lookup_flags |= LOOKUP_REVAL;
- goto retry;
+ name = getname_flags(pathname, lookup_flags);
+ error = filename_lookup(dfd, name, lookup_flags, &path, NULL);
+ if (unlikely(error)) {
+ putname(name);
+ return error;
+ }
+
+ /*
+ * AFS mountpoints allow readlink(2) but are not symlinks
+ */
+ if (d_is_symlink(path.dentry) ||
+ d_backing_inode(path.dentry)->i_op->readlink) {
+ error = security_inode_readlink(path.dentry);
+ if (!error) {
+ touch_atime(&path);
+ error = vfs_readlink(path.dentry, buf, bufsiz);
}
+ } else {
+ error = (name->name[0] == '\0') ? -ENOENT : -EINVAL;
+ }
+ path_put(&path);
+ putname(name);
+ if (retry_estale(error, lookup_flags)) {
+ lookup_flags |= LOOKUP_REVAL;
+ goto retry;
}
return error;
}
@@ -659,6 +701,9 @@ cp_statx(const struct kstat *stat, struct statx __user *buffer)
tmp.stx_dio_mem_align = stat->dio_mem_align;
tmp.stx_dio_offset_align = stat->dio_offset_align;
tmp.stx_subvol = stat->subvol;
+ tmp.stx_atomic_write_unit_min = stat->atomic_write_unit_min;
+ tmp.stx_atomic_write_unit_max = stat->atomic_write_unit_max;
+ tmp.stx_atomic_write_segments_max = stat->atomic_write_segments_max;
return copy_to_user(buffer, &tmp, sizeof(tmp)) ? -EFAULT : 0;
}
@@ -674,7 +719,8 @@ int do_statx(int dfd, struct filename *filename, unsigned int flags,
if ((flags & AT_STATX_SYNC_TYPE) == AT_STATX_SYNC_TYPE)
return -EINVAL;
- /* STATX_CHANGE_COOKIE is kernel-only for now. Ignore requests
+ /*
+ * STATX_CHANGE_COOKIE is kernel-only for now. Ignore requests
* from userland.
*/
mask &= ~STATX_CHANGE_COOKIE;
@@ -686,16 +732,41 @@ int do_statx(int dfd, struct filename *filename, unsigned int flags,
return cp_statx(&stat, buffer);
}
+int do_statx_fd(int fd, unsigned int flags, unsigned int mask,
+ struct statx __user *buffer)
+{
+ struct kstat stat;
+ int error;
+
+ if (mask & STATX__RESERVED)
+ return -EINVAL;
+ if ((flags & AT_STATX_SYNC_TYPE) == AT_STATX_SYNC_TYPE)
+ return -EINVAL;
+
+ /*
+ * STATX_CHANGE_COOKIE is kernel-only for now. Ignore requests
+ * from userland.
+ */
+ mask &= ~STATX_CHANGE_COOKIE;
+
+ error = vfs_statx_fd(fd, flags, &stat, mask);
+ if (error)
+ return error;
+
+ return cp_statx(&stat, buffer);
+}
+
/**
* sys_statx - System call to get enhanced stats
* @dfd: Base directory to pathwalk from *or* fd to stat.
- * @filename: File to stat or "" with AT_EMPTY_PATH
+ * @filename: File to stat or either NULL or "" with AT_EMPTY_PATH
* @flags: AT_* flags to control pathwalk.
* @mask: Parts of statx struct actually required.
* @buffer: Result buffer.
*
* Note that fstat() can be emulated by setting dfd to the fd of interest,
- * supplying "" as the filename and setting AT_EMPTY_PATH in the flags.
+ * supplying "" (or preferably NULL) as the filename and setting AT_EMPTY_PATH
+ * in the flags.
*/
SYSCALL_DEFINE5(statx,
int, dfd, const char __user *, filename, unsigned, flags,
@@ -703,9 +774,24 @@ SYSCALL_DEFINE5(statx,
struct statx __user *, buffer)
{
int ret;
+ unsigned lflags;
struct filename *name;
- name = getname_flags(filename, getname_statx_lookup_flags(flags), NULL);
+ /*
+ * Short-circuit handling of NULL and "" paths.
+ *
+ * For a NULL path we require and accept only the AT_EMPTY_PATH flag
+ * (possibly |'d with AT_STATX flags).
+ *
+ * However, glibc on 32-bit architectures implements fstatat as statx
+ * with the "" pathname and AT_NO_AUTOMOUNT | AT_EMPTY_PATH flags.
+ * Supporting this results in the uglification below.
+ */
+ lflags = flags & ~(AT_NO_AUTOMOUNT | AT_STATX_SYNC_TYPE);
+ if (lflags == AT_EMPTY_PATH && vfs_empty_path(dfd, filename))
+ return do_statx_fd(dfd, flags & ~AT_NO_AUTOMOUNT, mask, buffer);
+
+ name = getname_flags(filename, getname_statx_lookup_flags(flags));
ret = do_statx(dfd, name, flags, mask, buffer);
putname(name);
diff --git a/fs/super.c b/fs/super.c
index b72f1d288e95..095ba793e10c 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -1502,8 +1502,17 @@ static int fs_bdev_thaw(struct block_device *bdev)
lockdep_assert_held(&bdev->bd_fsfreeze_mutex);
+ /*
+ * The block device may have been frozen before it was claimed by a
+ * filesystem. Concurrently another process might try to mount that
+ * frozen block device and has temporarily claimed the block device for
+ * that purpose causing a concurrent fs_bdev_thaw() to end up here. The
+ * mounter is already about to abort mounting because they still saw an
+ * elevanted bdev->bd_fsfreeze_count so get_bdev_super() will return
+ * NULL in that case.
+ */
sb = get_bdev_super(bdev);
- if (WARN_ON_ONCE(!sb))
+ if (!sb)
return -EINVAL;
if (sb->s_op->thaw_super)
diff --git a/fs/sysv/super.c b/fs/sysv/super.c
index 3365a30dc1e0..5c0d07ddbda2 100644
--- a/fs/sysv/super.c
+++ b/fs/sysv/super.c
@@ -591,4 +591,5 @@ static void __exit exit_sysv_fs(void)
module_init(init_sysv_fs)
module_exit(exit_sysv_fs)
+MODULE_DESCRIPTION("SystemV Filesystem");
MODULE_LICENSE("GPL");
diff --git a/fs/tracefs/inode.c b/fs/tracefs/inode.c
index 7c29f4afc23d..1028ab6d9a74 100644
--- a/fs/tracefs/inode.c
+++ b/fs/tracefs/inode.c
@@ -296,9 +296,9 @@ enum {
};
static const struct fs_parameter_spec tracefs_param_specs[] = {
- fsparam_u32 ("gid", Opt_gid),
+ fsparam_gid ("gid", Opt_gid),
fsparam_u32oct ("mode", Opt_mode),
- fsparam_u32 ("uid", Opt_uid),
+ fsparam_uid ("uid", Opt_uid),
{}
};
@@ -306,8 +306,6 @@ static int tracefs_parse_param(struct fs_context *fc, struct fs_parameter *param
{
struct tracefs_fs_info *opts = fc->s_fs_info;
struct fs_parse_result result;
- kuid_t uid;
- kgid_t gid;
int opt;
opt = fs_parse(fc, tracefs_param_specs, param, &result);
@@ -316,16 +314,10 @@ static int tracefs_parse_param(struct fs_context *fc, struct fs_parameter *param
switch (opt) {
case Opt_uid:
- uid = make_kuid(current_user_ns(), result.uint_32);
- if (!uid_valid(uid))
- return invalf(fc, "Unknown uid");
- opts->uid = uid;
+ opts->uid = result.uid;
break;
case Opt_gid:
- gid = make_kgid(current_user_ns(), result.uint_32);
- if (!gid_valid(gid))
- return invalf(fc, "Unknown gid");
- opts->gid = gid;
+ opts->gid = result.gid;
break;
case Opt_mode:
opts->mode = result.uint_32 & S_IALLUGO;
diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c
index ab3ffc355949..d8fc11765d61 100644
--- a/fs/udf/balloc.c
+++ b/fs/udf/balloc.c
@@ -18,6 +18,7 @@
#include "udfdecl.h"
#include <linux/bitops.h>
+#include <linux/overflow.h>
#include "udf_i.h"
#include "udf_sb.h"
@@ -64,14 +65,18 @@ static int read_block_bitmap(struct super_block *sb,
}
for (i = 0; i < count; i++)
- if (udf_test_bit(i + off, bh->b_data))
+ if (udf_test_bit(i + off, bh->b_data)) {
+ bitmap->s_block_bitmap[bitmap_nr] =
+ ERR_PTR(-EFSCORRUPTED);
+ brelse(bh);
return -EFSCORRUPTED;
+ }
return 0;
}
-static int __load_block_bitmap(struct super_block *sb,
- struct udf_bitmap *bitmap,
- unsigned int block_group)
+static int load_block_bitmap(struct super_block *sb,
+ struct udf_bitmap *bitmap,
+ unsigned int block_group)
{
int retval = 0;
int nr_groups = bitmap->s_nr_groups;
@@ -81,8 +86,15 @@ static int __load_block_bitmap(struct super_block *sb,
block_group, nr_groups);
}
- if (bitmap->s_block_bitmap[block_group])
+ if (bitmap->s_block_bitmap[block_group]) {
+ /*
+ * The bitmap failed verification in the past. No point in
+ * trying again.
+ */
+ if (IS_ERR(bitmap->s_block_bitmap[block_group]))
+ return PTR_ERR(bitmap->s_block_bitmap[block_group]);
return block_group;
+ }
retval = read_block_bitmap(sb, bitmap, block_group, block_group);
if (retval < 0)
@@ -91,23 +103,6 @@ static int __load_block_bitmap(struct super_block *sb,
return block_group;
}
-static inline int load_block_bitmap(struct super_block *sb,
- struct udf_bitmap *bitmap,
- unsigned int block_group)
-{
- int slot;
-
- slot = __load_block_bitmap(sb, bitmap, block_group);
-
- if (slot < 0)
- return slot;
-
- if (!bitmap->s_block_bitmap[slot])
- return -EIO;
-
- return slot;
-}
-
static void udf_add_free_space(struct super_block *sb, u16 partition, u32 cnt)
{
struct udf_sb_info *sbi = UDF_SB(sb);
@@ -129,7 +124,6 @@ static void udf_bitmap_free_blocks(struct super_block *sb,
{
struct udf_sb_info *sbi = UDF_SB(sb);
struct buffer_head *bh = NULL;
- struct udf_part_map *partmap;
unsigned long block;
unsigned long block_group;
unsigned long bit;
@@ -138,19 +132,9 @@ static void udf_bitmap_free_blocks(struct super_block *sb,
unsigned long overflow;
mutex_lock(&sbi->s_alloc_mutex);
- partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
- if (bloc->logicalBlockNum + count < count ||
- (bloc->logicalBlockNum + count) > partmap->s_partition_len) {
- udf_debug("%u < %d || %u + %u > %u\n",
- bloc->logicalBlockNum, 0,
- bloc->logicalBlockNum, count,
- partmap->s_partition_len);
- goto error_return;
- }
-
+ /* We make sure this cannot overflow when mounting the filesystem */
block = bloc->logicalBlockNum + offset +
(sizeof(struct spaceBitmapDesc) << 3);
-
do {
overflow = 0;
block_group = block >> (sb->s_blocksize_bits + 3);
@@ -380,7 +364,6 @@ static void udf_table_free_blocks(struct super_block *sb,
uint32_t count)
{
struct udf_sb_info *sbi = UDF_SB(sb);
- struct udf_part_map *partmap;
uint32_t start, end;
uint32_t elen;
struct kernel_lb_addr eloc;
@@ -389,16 +372,6 @@ static void udf_table_free_blocks(struct super_block *sb,
struct udf_inode_info *iinfo;
mutex_lock(&sbi->s_alloc_mutex);
- partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
- if (bloc->logicalBlockNum + count < count ||
- (bloc->logicalBlockNum + count) > partmap->s_partition_len) {
- udf_debug("%u < %d || %u + %u > %u\n",
- bloc->logicalBlockNum, 0,
- bloc->logicalBlockNum, count,
- partmap->s_partition_len);
- goto error_return;
- }
-
iinfo = UDF_I(table);
udf_add_free_space(sb, sbi->s_partition, count);
@@ -673,6 +646,17 @@ void udf_free_blocks(struct super_block *sb, struct inode *inode,
{
uint16_t partition = bloc->partitionReferenceNum;
struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
+ uint32_t blk;
+
+ if (check_add_overflow(bloc->logicalBlockNum, offset, &blk) ||
+ check_add_overflow(blk, count, &blk) ||
+ bloc->logicalBlockNum + count > map->s_partition_len) {
+ udf_debug("Invalid request to free blocks: (%d, %u), off %u, "
+ "len %u, partition len %u\n",
+ partition, bloc->logicalBlockNum, offset, count,
+ map->s_partition_len);
+ return;
+ }
if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) {
udf_bitmap_free_blocks(sb, map->s_uspace.s_bitmap,
diff --git a/fs/udf/file.c b/fs/udf/file.c
index 97c59585208c..3a4179de316b 100644
--- a/fs/udf/file.c
+++ b/fs/udf/file.c
@@ -232,7 +232,9 @@ static int udf_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
if ((attr->ia_valid & ATTR_SIZE) &&
attr->ia_size != i_size_read(inode)) {
+ filemap_invalidate_lock(inode->i_mapping);
error = udf_setsize(inode, attr->ia_size);
+ filemap_invalidate_unlock(inode->i_mapping);
if (error)
return error;
}
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index 2fb21c5ffccf..4726a4d014b6 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -1247,10 +1247,7 @@ int udf_setsize(struct inode *inode, loff_t newsize)
if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
S_ISLNK(inode->i_mode)))
return -EINVAL;
- if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
- return -EPERM;
- filemap_invalidate_lock(inode->i_mapping);
iinfo = UDF_I(inode);
if (newsize > inode->i_size) {
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
@@ -1263,11 +1260,11 @@ int udf_setsize(struct inode *inode, loff_t newsize)
}
err = udf_expand_file_adinicb(inode);
if (err)
- goto out_unlock;
+ return err;
}
err = udf_extend_file(inode, newsize);
if (err)
- goto out_unlock;
+ return err;
set_size:
truncate_setsize(inode, newsize);
} else {
@@ -1285,14 +1282,14 @@ set_size:
err = block_truncate_page(inode->i_mapping, newsize,
udf_get_block);
if (err)
- goto out_unlock;
+ return err;
truncate_setsize(inode, newsize);
down_write(&iinfo->i_data_sem);
udf_clear_extent_cache(inode);
err = udf_truncate_extents(inode);
up_write(&iinfo->i_data_sem);
if (err)
- goto out_unlock;
+ return err;
}
update_time:
inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
@@ -1300,8 +1297,6 @@ update_time:
udf_sync_inode(inode);
else
mark_inode_dirty(inode);
-out_unlock:
- filemap_invalidate_unlock(inode->i_mapping);
return err;
}
diff --git a/fs/udf/namei.c b/fs/udf/namei.c
index 1308109fd42d..78a603129dd5 100644
--- a/fs/udf/namei.c
+++ b/fs/udf/namei.c
@@ -876,8 +876,6 @@ static int udf_rename(struct mnt_idmap *idmap, struct inode *old_dir,
if (has_diriter) {
diriter.fi.icb.extLocation =
cpu_to_lelb(UDF_I(new_dir)->i_location);
- udf_update_tag((char *)&diriter.fi,
- udf_dir_entry_len(&diriter.fi));
udf_fiiter_write_fi(&diriter, NULL);
udf_fiiter_release(&diriter);
}
diff --git a/fs/udf/super.c b/fs/udf/super.c
index 9381a66c6ce5..3460ecc826d1 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -336,7 +336,8 @@ static void udf_sb_free_bitmap(struct udf_bitmap *bitmap)
int nr_groups = bitmap->s_nr_groups;
for (i = 0; i < nr_groups; i++)
- brelse(bitmap->s_block_bitmap[i]);
+ if (!IS_ERR_OR_NULL(bitmap->s_block_bitmap[i]))
+ brelse(bitmap->s_block_bitmap[i]);
kvfree(bitmap);
}
@@ -1110,12 +1111,19 @@ static int udf_fill_partdesc_info(struct super_block *sb,
struct udf_part_map *map;
struct udf_sb_info *sbi = UDF_SB(sb);
struct partitionHeaderDesc *phd;
+ u32 sum;
int err;
map = &sbi->s_partmaps[p_index];
map->s_partition_len = le32_to_cpu(p->partitionLength); /* blocks */
map->s_partition_root = le32_to_cpu(p->partitionStartingLocation);
+ if (check_add_overflow(map->s_partition_root, map->s_partition_len,
+ &sum)) {
+ udf_err(sb, "Partition %d has invalid location %u + %u\n",
+ p_index, map->s_partition_root, map->s_partition_len);
+ return -EFSCORRUPTED;
+ }
if (p->accessType == cpu_to_le32(PD_ACCESS_TYPE_READ_ONLY))
map->s_partition_flags |= UDF_PART_FLAG_READ_ONLY;
@@ -1171,6 +1179,14 @@ static int udf_fill_partdesc_info(struct super_block *sb,
bitmap->s_extPosition = le32_to_cpu(
phd->unallocSpaceBitmap.extPosition);
map->s_partition_flags |= UDF_PART_FLAG_UNALLOC_BITMAP;
+ /* Check whether math over bitmap won't overflow. */
+ if (check_add_overflow(map->s_partition_len,
+ sizeof(struct spaceBitmapDesc) << 3,
+ &sum)) {
+ udf_err(sb, "Partition %d is too long (%u)\n", p_index,
+ map->s_partition_len);
+ return -EFSCORRUPTED;
+ }
udf_debug("unallocSpaceBitmap (part %d) @ %u\n",
p_index, bitmap->s_extPosition);
}
diff --git a/fs/ufs/dir.c b/fs/ufs/dir.c
index 27c85d92d1dc..61f25d3cf3f7 100644
--- a/fs/ufs/dir.c
+++ b/fs/ufs/dir.c
@@ -188,7 +188,6 @@ Eend:
"offset=%lu",
dir->i_ino, (page->index<<PAGE_SHIFT)+offs);
fail:
- SetPageError(page);
return false;
}
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index eee7320ab0b0..17e409ceaa33 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -2057,7 +2057,7 @@ static int userfaultfd_api(struct userfaultfd_ctx *ctx,
goto out;
features = uffdio_api.features;
ret = -EINVAL;
- if (uffdio_api.api != UFFD_API || (features & ~UFFD_API_FEATURES))
+ if (uffdio_api.api != UFFD_API)
goto err_out;
ret = -EPERM;
if ((features & UFFD_FEATURE_EVENT_FORK) && !capable(CAP_SYS_PTRACE))
@@ -2081,6 +2081,11 @@ static int userfaultfd_api(struct userfaultfd_ctx *ctx,
uffdio_api.features &= ~UFFD_FEATURE_WP_UNPOPULATED;
uffdio_api.features &= ~UFFD_FEATURE_WP_ASYNC;
#endif
+
+ ret = -EINVAL;
+ if (features & ~uffdio_api.features)
+ goto err_out;
+
uffdio_api.ioctls = UFFD_API_IOCTLS;
ret = -EFAULT;
if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api)))
diff --git a/fs/vboxsf/file.c b/fs/vboxsf/file.c
index 118dedef8ebe..fdb4da24d662 100644
--- a/fs/vboxsf/file.c
+++ b/fs/vboxsf/file.c
@@ -228,26 +228,19 @@ const struct inode_operations vboxsf_reg_iops = {
static int vboxsf_read_folio(struct file *file, struct folio *folio)
{
- struct page *page = &folio->page;
struct vboxsf_handle *sf_handle = file->private_data;
- loff_t off = page_offset(page);
+ loff_t off = folio_pos(folio);
u32 nread = PAGE_SIZE;
u8 *buf;
int err;
- buf = kmap(page);
+ buf = kmap_local_folio(folio, 0);
err = vboxsf_read(sf_handle->root, sf_handle->handle, off, &nread, buf);
- if (err == 0) {
- memset(&buf[nread], 0, PAGE_SIZE - nread);
- flush_dcache_page(page);
- SetPageUptodate(page);
- } else {
- SetPageError(page);
- }
+ buf = folio_zero_tail(folio, nread, buf + nread);
- kunmap(page);
- unlock_page(page);
+ kunmap_local(buf);
+ folio_end_read(folio, err == 0);
return err;
}
@@ -295,7 +288,6 @@ static int vboxsf_writepage(struct page *page, struct writeback_control *wbc)
kref_put(&sf_handle->refcount, vboxsf_handle_release);
if (err == 0) {
- ClearPageError(page);
/* mtime changed */
sf_i->force_restat = 1;
} else {
diff --git a/fs/vboxsf/super.c b/fs/vboxsf/super.c
index ffb1d565da39..e95b8a48d8a0 100644
--- a/fs/vboxsf/super.c
+++ b/fs/vboxsf/super.c
@@ -41,8 +41,8 @@ enum { opt_nls, opt_uid, opt_gid, opt_ttl, opt_dmode, opt_fmode,
static const struct fs_parameter_spec vboxsf_fs_parameters[] = {
fsparam_string ("nls", opt_nls),
- fsparam_u32 ("uid", opt_uid),
- fsparam_u32 ("gid", opt_gid),
+ fsparam_uid ("uid", opt_uid),
+ fsparam_gid ("gid", opt_gid),
fsparam_u32 ("ttl", opt_ttl),
fsparam_u32oct ("dmode", opt_dmode),
fsparam_u32oct ("fmode", opt_fmode),
@@ -55,8 +55,6 @@ static int vboxsf_parse_param(struct fs_context *fc, struct fs_parameter *param)
{
struct vboxsf_fs_context *ctx = fc->fs_private;
struct fs_parse_result result;
- kuid_t uid;
- kgid_t gid;
int opt;
opt = fs_parse(fc, vboxsf_fs_parameters, param, &result);
@@ -73,16 +71,10 @@ static int vboxsf_parse_param(struct fs_context *fc, struct fs_parameter *param)
param->string = NULL;
break;
case opt_uid:
- uid = make_kuid(current_user_ns(), result.uint_32);
- if (!uid_valid(uid))
- return -EINVAL;
- ctx->o.uid = uid;
+ ctx->o.uid = result.uid;
break;
case opt_gid:
- gid = make_kgid(current_user_ns(), result.uint_32);
- if (!gid_valid(gid))
- return -EINVAL;
- ctx->o.gid = gid;
+ ctx->o.gid = result.gid;
break;
case opt_ttl:
ctx->o.ttl = msecs_to_jiffies(result.uint_32);
diff --git a/fs/verity/measure.c b/fs/verity/measure.c
index 3969d54158d1..175d2f1bc089 100644
--- a/fs/verity/measure.c
+++ b/fs/verity/measure.c
@@ -111,14 +111,15 @@ __bpf_kfunc_start_defs();
/**
* bpf_get_fsverity_digest: read fsverity digest of file
* @file: file to get digest from
- * @digest_ptr: (out) dynptr for struct fsverity_digest
+ * @digest_p: (out) dynptr for struct fsverity_digest
*
* Read fsverity_digest of *file* into *digest_ptr*.
*
* Return: 0 on success, a negative value on error.
*/
-__bpf_kfunc int bpf_get_fsverity_digest(struct file *file, struct bpf_dynptr_kern *digest_ptr)
+__bpf_kfunc int bpf_get_fsverity_digest(struct file *file, struct bpf_dynptr *digest_p)
{
+ struct bpf_dynptr_kern *digest_ptr = (struct bpf_dynptr_kern *)digest_p;
const struct inode *inode = file_inode(file);
u32 dynptr_sz = __bpf_dynptr_size(digest_ptr);
struct fsverity_digest *arg;
diff --git a/fs/xfs/Kconfig b/fs/xfs/Kconfig
index d41edd30388b..fffd6fffdce0 100644
--- a/fs/xfs/Kconfig
+++ b/fs/xfs/Kconfig
@@ -217,6 +217,18 @@ config XFS_DEBUG
Say N unless you are an XFS developer, or you play one on TV.
+config XFS_DEBUG_EXPENSIVE
+ bool "XFS expensive debugging checks"
+ depends on XFS_FS && XFS_DEBUG
+ help
+ Say Y here to get an XFS build with expensive debugging checks
+ enabled. These checks may affect performance significantly.
+
+ Note that the resulting code will be HUGER and SLOWER, and probably
+ not useful unless you are debugging a particular problem.
+
+ Say N unless you are an XFS developer, or you play one on TV.
+
config XFS_ASSERT_FATAL
bool "XFS fatal asserts"
default y
diff --git a/fs/xfs/Makefile b/fs/xfs/Makefile
index c50447548d65..dd692619bed5 100644
--- a/fs/xfs/Makefile
+++ b/fs/xfs/Makefile
@@ -40,6 +40,7 @@ xfs-y += $(addprefix libxfs/, \
xfs_iext_tree.o \
xfs_inode_fork.o \
xfs_inode_buf.o \
+ xfs_inode_util.o \
xfs_log_rlimit.o \
xfs_ag_resv.o \
xfs_parent.o \
diff --git a/fs/xfs/libxfs/xfs_ag.c b/fs/xfs/libxfs/xfs_ag.c
index 240e079cb3fb..7e80732cb547 100644
--- a/fs/xfs/libxfs/xfs_ag.c
+++ b/fs/xfs/libxfs/xfs_ag.c
@@ -1008,7 +1008,7 @@ xfs_ag_shrink_space(
goto resv_err;
err2 = xfs_free_extent_later(*tpp, args.fsbno, delta, NULL,
- XFS_AG_RESV_NONE, true);
+ XFS_AG_RESV_NONE, XFS_FREE_EXTENT_SKIP_DISCARD);
if (err2)
goto resv_err;
diff --git a/fs/xfs/libxfs/xfs_ag_resv.h b/fs/xfs/libxfs/xfs_ag_resv.h
index ff20ed93de77..f247eeff7358 100644
--- a/fs/xfs/libxfs/xfs_ag_resv.h
+++ b/fs/xfs/libxfs/xfs_ag_resv.h
@@ -33,23 +33,4 @@ xfs_perag_resv(
}
}
-/*
- * RMAPBT reservation accounting wrappers. Since rmapbt blocks are sourced from
- * the AGFL, they are allocated one at a time and the reservation updates don't
- * require a transaction.
- */
-static inline void
-xfs_ag_resv_rmapbt_alloc(
- struct xfs_mount *mp,
- xfs_agnumber_t agno)
-{
- struct xfs_alloc_arg args = { NULL };
- struct xfs_perag *pag;
-
- args.len = 1;
- pag = xfs_perag_get(mp, agno);
- xfs_ag_resv_alloc_extent(pag, XFS_AG_RESV_RMAPBT, &args);
- xfs_perag_put(pag);
-}
-
#endif /* __XFS_AG_RESV_H__ */
diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c
index 6cb8b2ddc541..59326f84f6a5 100644
--- a/fs/xfs/libxfs/xfs_alloc.c
+++ b/fs/xfs/libxfs/xfs_alloc.c
@@ -27,6 +27,7 @@
#include "xfs_ag_resv.h"
#include "xfs_bmap.h"
#include "xfs_health.h"
+#include "xfs_extfree_item.h"
struct kmem_cache *xfs_extfree_item_cache;
@@ -466,6 +467,97 @@ xfs_alloc_fix_len(
}
/*
+ * Determine if the cursor points to the block that contains the right-most
+ * block of records in the by-count btree. This block contains the largest
+ * contiguous free extent in the AG, so if we modify a record in this block we
+ * need to call xfs_alloc_fixup_longest() once the modifications are done to
+ * ensure the agf->agf_longest field is kept up to date with the longest free
+ * extent tracked by the by-count btree.
+ */
+static bool
+xfs_alloc_cursor_at_lastrec(
+ struct xfs_btree_cur *cnt_cur)
+{
+ struct xfs_btree_block *block;
+ union xfs_btree_ptr ptr;
+ struct xfs_buf *bp;
+
+ block = xfs_btree_get_block(cnt_cur, 0, &bp);
+
+ xfs_btree_get_sibling(cnt_cur, block, &ptr, XFS_BB_RIGHTSIB);
+ return xfs_btree_ptr_is_null(cnt_cur, &ptr);
+}
+
+/*
+ * Find the rightmost record of the cntbt, and return the longest free space
+ * recorded in it. Simply set both the block number and the length to their
+ * maximum values before searching.
+ */
+static int
+xfs_cntbt_longest(
+ struct xfs_btree_cur *cnt_cur,
+ xfs_extlen_t *longest)
+{
+ struct xfs_alloc_rec_incore irec;
+ union xfs_btree_rec *rec;
+ int stat = 0;
+ int error;
+
+ memset(&cnt_cur->bc_rec, 0xFF, sizeof(cnt_cur->bc_rec));
+ error = xfs_btree_lookup(cnt_cur, XFS_LOOKUP_LE, &stat);
+ if (error)
+ return error;
+ if (!stat) {
+ /* totally empty tree */
+ *longest = 0;
+ return 0;
+ }
+
+ error = xfs_btree_get_rec(cnt_cur, &rec, &stat);
+ if (error)
+ return error;
+ if (XFS_IS_CORRUPT(cnt_cur->bc_mp, !stat)) {
+ xfs_btree_mark_sick(cnt_cur);
+ return -EFSCORRUPTED;
+ }
+
+ xfs_alloc_btrec_to_irec(rec, &irec);
+ *longest = irec.ar_blockcount;
+ return 0;
+}
+
+/*
+ * Update the longest contiguous free extent in the AG from the by-count cursor
+ * that is passed to us. This should be done at the end of any allocation or
+ * freeing operation that touches the longest extent in the btree.
+ *
+ * Needing to update the longest extent can be determined by calling
+ * xfs_alloc_cursor_at_lastrec() after the cursor is positioned for record
+ * modification but before the modification begins.
+ */
+static int
+xfs_alloc_fixup_longest(
+ struct xfs_btree_cur *cnt_cur)
+{
+ struct xfs_perag *pag = cnt_cur->bc_ag.pag;
+ struct xfs_buf *bp = cnt_cur->bc_ag.agbp;
+ struct xfs_agf *agf = bp->b_addr;
+ xfs_extlen_t longest = 0;
+ int error;
+
+ /* Lookup last rec in order to update AGF. */
+ error = xfs_cntbt_longest(cnt_cur, &longest);
+ if (error)
+ return error;
+
+ pag->pagf_longest = longest;
+ agf->agf_longest = cpu_to_be32(pag->pagf_longest);
+ xfs_alloc_log_agf(cnt_cur->bc_tp, bp, XFS_AGF_LONGEST);
+
+ return 0;
+}
+
+/*
* Update the two btrees, logically removing from freespace the extent
* starting at rbno, rlen blocks. The extent is contained within the
* actual (current) free extent fbno for flen blocks.
@@ -489,6 +581,7 @@ xfs_alloc_fixup_trees(
xfs_extlen_t nflen1=0; /* first new free length */
xfs_extlen_t nflen2=0; /* second new free length */
struct xfs_mount *mp;
+ bool fixup_longest = false;
mp = cnt_cur->bc_mp;
@@ -577,6 +670,10 @@ xfs_alloc_fixup_trees(
nfbno2 = rbno + rlen;
nflen2 = (fbno + flen) - nfbno2;
}
+
+ if (xfs_alloc_cursor_at_lastrec(cnt_cur))
+ fixup_longest = true;
+
/*
* Delete the entry from the by-size btree.
*/
@@ -654,6 +751,10 @@ xfs_alloc_fixup_trees(
return -EFSCORRUPTED;
}
}
+
+ if (fixup_longest)
+ return xfs_alloc_fixup_longest(cnt_cur);
+
return 0;
}
@@ -1008,13 +1109,12 @@ xfs_alloc_cur_finish(
struct xfs_alloc_arg *args,
struct xfs_alloc_cur *acur)
{
- struct xfs_agf __maybe_unused *agf = args->agbp->b_addr;
int error;
ASSERT(acur->cnt && acur->bnolt);
ASSERT(acur->bno >= acur->rec_bno);
ASSERT(acur->bno + acur->len <= acur->rec_bno + acur->rec_len);
- ASSERT(acur->rec_bno + acur->rec_len <= be32_to_cpu(agf->agf_length));
+ ASSERT(xfs_verify_agbext(args->pag, acur->rec_bno, acur->rec_len));
error = xfs_alloc_fixup_trees(acur->cnt, acur->bnolt, acur->rec_bno,
acur->rec_len, acur->bno, acur->len, 0);
@@ -1217,7 +1317,6 @@ STATIC int /* error */
xfs_alloc_ag_vextent_exact(
xfs_alloc_arg_t *args) /* allocation argument structure */
{
- struct xfs_agf __maybe_unused *agf = args->agbp->b_addr;
struct xfs_btree_cur *bno_cur;/* by block-number btree cursor */
struct xfs_btree_cur *cnt_cur;/* by count btree cursor */
int error;
@@ -1297,7 +1396,7 @@ xfs_alloc_ag_vextent_exact(
*/
cnt_cur = xfs_cntbt_init_cursor(args->mp, args->tp, args->agbp,
args->pag);
- ASSERT(args->agbno + args->len <= be32_to_cpu(agf->agf_length));
+ ASSERT(xfs_verify_agbext(args->pag, args->agbno, args->len));
error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen, args->agbno,
args->len, XFSA_FIXUP_BNO_OK);
if (error) {
@@ -1934,7 +2033,7 @@ out_nominleft:
/*
* Free the extent starting at agno/bno for length.
*/
-STATIC int
+int
xfs_free_ag_extent(
struct xfs_trans *tp,
struct xfs_buf *agbp,
@@ -1958,6 +2057,7 @@ xfs_free_ag_extent(
int i;
int error;
struct xfs_perag *pag = agbp->b_pag;
+ bool fixup_longest = false;
bno_cur = cnt_cur = NULL;
mp = tp->t_mountp;
@@ -2221,8 +2321,13 @@ xfs_free_ag_extent(
}
xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
bno_cur = NULL;
+
/*
* In all cases we need to insert the new freespace in the by-size tree.
+ *
+ * If this new freespace is being inserted in the block that contains
+ * the largest free space in the btree, make sure we also fix up the
+ * agf->agf-longest tracker field.
*/
if ((error = xfs_alloc_lookup_eq(cnt_cur, nbno, nlen, &i)))
goto error0;
@@ -2231,6 +2336,8 @@ xfs_free_ag_extent(
error = -EFSCORRUPTED;
goto error0;
}
+ if (xfs_alloc_cursor_at_lastrec(cnt_cur))
+ fixup_longest = true;
if ((error = xfs_btree_insert(cnt_cur, &i)))
goto error0;
if (XFS_IS_CORRUPT(mp, i != 1)) {
@@ -2238,6 +2345,12 @@ xfs_free_ag_extent(
error = -EFSCORRUPTED;
goto error0;
}
+ if (fixup_longest) {
+ error = xfs_alloc_fixup_longest(cnt_cur);
+ if (error)
+ goto error0;
+ }
+
xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
cnt_cur = NULL;
@@ -2424,32 +2537,6 @@ xfs_alloc_space_available(
return true;
}
-int
-xfs_free_agfl_block(
- struct xfs_trans *tp,
- xfs_agnumber_t agno,
- xfs_agblock_t agbno,
- struct xfs_buf *agbp,
- struct xfs_owner_info *oinfo)
-{
- int error;
- struct xfs_buf *bp;
-
- error = xfs_free_ag_extent(tp, agbp, agno, agbno, 1, oinfo,
- XFS_AG_RESV_AGFL);
- if (error)
- return error;
-
- error = xfs_trans_get_buf(tp, tp->t_mountp->m_ddev_targp,
- XFS_AGB_TO_DADDR(tp->t_mountp, agno, agbno),
- tp->t_mountp->m_bsize, 0, &bp);
- if (error)
- return error;
- xfs_trans_binval(tp, bp);
-
- return 0;
-}
-
/*
* Check the agfl fields of the agf for inconsistency or corruption.
*
@@ -2538,48 +2625,6 @@ xfs_agfl_reset(
}
/*
- * Defer an AGFL block free. This is effectively equivalent to
- * xfs_free_extent_later() with some special handling particular to AGFL blocks.
- *
- * Deferring AGFL frees helps prevent log reservation overruns due to too many
- * allocation operations in a transaction. AGFL frees are prone to this problem
- * because for one they are always freed one at a time. Further, an immediate
- * AGFL block free can cause a btree join and require another block free before
- * the real allocation can proceed. Deferring the free disconnects freeing up
- * the AGFL slot from freeing the block.
- */
-static int
-xfs_defer_agfl_block(
- struct xfs_trans *tp,
- xfs_agnumber_t agno,
- xfs_agblock_t agbno,
- struct xfs_owner_info *oinfo)
-{
- struct xfs_mount *mp = tp->t_mountp;
- struct xfs_extent_free_item *xefi;
- xfs_fsblock_t fsbno = XFS_AGB_TO_FSB(mp, agno, agbno);
-
- ASSERT(xfs_extfree_item_cache != NULL);
- ASSERT(oinfo != NULL);
-
- if (XFS_IS_CORRUPT(mp, !xfs_verify_fsbno(mp, fsbno)))
- return -EFSCORRUPTED;
-
- xefi = kmem_cache_zalloc(xfs_extfree_item_cache,
- GFP_KERNEL | __GFP_NOFAIL);
- xefi->xefi_startblock = fsbno;
- xefi->xefi_blockcount = 1;
- xefi->xefi_owner = oinfo->oi_owner;
- xefi->xefi_agresv = XFS_AG_RESV_AGFL;
-
- trace_xfs_agfl_free_defer(mp, agno, 0, agbno, 1);
-
- xfs_extent_free_get_group(mp, xefi);
- xfs_defer_add(tp, &xefi->xefi_list, &xfs_agfl_free_defer_type);
- return 0;
-}
-
-/*
* Add the extent to the list of extents to be free at transaction end.
* The list is maintained sorted (by block number).
*/
@@ -2590,28 +2635,15 @@ xfs_defer_extent_free(
xfs_filblks_t len,
const struct xfs_owner_info *oinfo,
enum xfs_ag_resv_type type,
- bool skip_discard,
+ unsigned int free_flags,
struct xfs_defer_pending **dfpp)
{
struct xfs_extent_free_item *xefi;
struct xfs_mount *mp = tp->t_mountp;
-#ifdef DEBUG
- xfs_agnumber_t agno;
- xfs_agblock_t agbno;
- ASSERT(bno != NULLFSBLOCK);
- ASSERT(len > 0);
ASSERT(len <= XFS_MAX_BMBT_EXTLEN);
ASSERT(!isnullstartblock(bno));
- agno = XFS_FSB_TO_AGNO(mp, bno);
- agbno = XFS_FSB_TO_AGBNO(mp, bno);
- ASSERT(agno < mp->m_sb.sb_agcount);
- ASSERT(agbno < mp->m_sb.sb_agblocks);
- ASSERT(len < mp->m_sb.sb_agblocks);
- ASSERT(agbno + len <= mp->m_sb.sb_agblocks);
-#endif
- ASSERT(xfs_extfree_item_cache != NULL);
- ASSERT(type != XFS_AG_RESV_AGFL);
+ ASSERT(!(free_flags & ~XFS_FREE_EXTENT_ALL_FLAGS));
if (XFS_IS_CORRUPT(mp, !xfs_verify_fsbext(mp, bno, len)))
return -EFSCORRUPTED;
@@ -2621,7 +2653,7 @@ xfs_defer_extent_free(
xefi->xefi_startblock = bno;
xefi->xefi_blockcount = (xfs_extlen_t)len;
xefi->xefi_agresv = type;
- if (skip_discard)
+ if (free_flags & XFS_FREE_EXTENT_SKIP_DISCARD)
xefi->xefi_flags |= XFS_EFI_SKIP_DISCARD;
if (oinfo) {
ASSERT(oinfo->oi_offset == 0);
@@ -2634,12 +2666,8 @@ xfs_defer_extent_free(
} else {
xefi->xefi_owner = XFS_RMAP_OWN_NULL;
}
- trace_xfs_bmap_free_defer(mp,
- XFS_FSB_TO_AGNO(tp->t_mountp, bno), 0,
- XFS_FSB_TO_AGBNO(tp->t_mountp, bno), len);
- xfs_extent_free_get_group(mp, xefi);
- *dfpp = xfs_defer_add(tp, &xefi->xefi_list, &xfs_extent_free_defer_type);
+ xfs_extent_free_defer_add(tp, xefi, dfpp);
return 0;
}
@@ -2650,11 +2678,11 @@ xfs_free_extent_later(
xfs_filblks_t len,
const struct xfs_owner_info *oinfo,
enum xfs_ag_resv_type type,
- bool skip_discard)
+ unsigned int free_flags)
{
struct xfs_defer_pending *dontcare = NULL;
- return xfs_defer_extent_free(tp, bno, len, oinfo, type, skip_discard,
+ return xfs_defer_extent_free(tp, bno, len, oinfo, type, free_flags,
&dontcare);
}
@@ -2679,13 +2707,13 @@ xfs_free_extent_later(
int
xfs_alloc_schedule_autoreap(
const struct xfs_alloc_arg *args,
- bool skip_discard,
+ unsigned int free_flags,
struct xfs_alloc_autoreap *aarp)
{
int error;
error = xfs_defer_extent_free(args->tp, args->fsbno, args->len,
- &args->oinfo, args->resv, skip_discard, &aarp->dfp);
+ &args->oinfo, args->resv, free_flags, &aarp->dfp);
if (error)
return error;
@@ -2897,8 +2925,21 @@ xfs_alloc_fix_freelist(
if (error)
goto out_agbp_relse;
- /* defer agfl frees */
- error = xfs_defer_agfl_block(tp, args->agno, bno, &targs.oinfo);
+ /*
+ * Defer the AGFL block free.
+ *
+ * This helps to prevent log reservation overruns due to too
+ * many allocation operations in a transaction. AGFL frees are
+ * prone to this problem because for one they are always freed
+ * one at a time. Further, an immediate AGFL block free can
+ * cause a btree join and require another block free before the
+ * real allocation can proceed.
+ * Deferring the free disconnects freeing up the AGFL slot from
+ * freeing the block.
+ */
+ error = xfs_free_extent_later(tp,
+ XFS_AGB_TO_FSB(mp, args->agno, bno), 1,
+ &targs.oinfo, XFS_AG_RESV_AGFL, 0);
if (error)
goto out_agbp_relse;
}
diff --git a/fs/xfs/libxfs/xfs_alloc.h b/fs/xfs/libxfs/xfs_alloc.h
index 0b956f8b9d5a..fae170825be0 100644
--- a/fs/xfs/libxfs/xfs_alloc.h
+++ b/fs/xfs/libxfs/xfs_alloc.h
@@ -80,6 +80,10 @@ int xfs_alloc_get_freelist(struct xfs_perag *pag, struct xfs_trans *tp,
int xfs_alloc_put_freelist(struct xfs_perag *pag, struct xfs_trans *tp,
struct xfs_buf *agfbp, struct xfs_buf *agflbp,
xfs_agblock_t bno, int btreeblk);
+int xfs_free_ag_extent(struct xfs_trans *tp, struct xfs_buf *agbp,
+ xfs_agnumber_t agno, xfs_agblock_t bno,
+ xfs_extlen_t len, const struct xfs_owner_info *oinfo,
+ enum xfs_ag_resv_type type);
/*
* Compute and fill in value of m_alloc_maxlevels.
@@ -194,8 +198,6 @@ int xfs_alloc_read_agf(struct xfs_perag *pag, struct xfs_trans *tp, int flags,
struct xfs_buf **agfbpp);
int xfs_alloc_read_agfl(struct xfs_perag *pag, struct xfs_trans *tp,
struct xfs_buf **bpp);
-int xfs_free_agfl_block(struct xfs_trans *, xfs_agnumber_t, xfs_agblock_t,
- struct xfs_buf *, struct xfs_owner_info *);
int xfs_alloc_fix_freelist(struct xfs_alloc_arg *args, uint32_t alloc_flags);
int xfs_free_extent_fix_freelist(struct xfs_trans *tp, struct xfs_perag *pag,
struct xfs_buf **agbp);
@@ -233,7 +235,12 @@ xfs_buf_to_agfl_bno(
int xfs_free_extent_later(struct xfs_trans *tp, xfs_fsblock_t bno,
xfs_filblks_t len, const struct xfs_owner_info *oinfo,
- enum xfs_ag_resv_type type, bool skip_discard);
+ enum xfs_ag_resv_type type, unsigned int free_flags);
+
+/* Don't issue a discard for the blocks freed. */
+#define XFS_FREE_EXTENT_SKIP_DISCARD (1U << 0)
+
+#define XFS_FREE_EXTENT_ALL_FLAGS (XFS_FREE_EXTENT_SKIP_DISCARD)
/*
* List of extents to be free "later".
@@ -249,9 +256,6 @@ struct xfs_extent_free_item {
enum xfs_ag_resv_type xefi_agresv;
};
-void xfs_extent_free_get_group(struct xfs_mount *mp,
- struct xfs_extent_free_item *xefi);
-
#define XFS_EFI_SKIP_DISCARD (1U << 0) /* don't issue discard */
#define XFS_EFI_ATTR_FORK (1U << 1) /* freeing attr fork block */
#define XFS_EFI_BMBT_BLOCK (1U << 2) /* freeing bmap btree block */
@@ -262,7 +266,7 @@ struct xfs_alloc_autoreap {
};
int xfs_alloc_schedule_autoreap(const struct xfs_alloc_arg *args,
- bool skip_discard, struct xfs_alloc_autoreap *aarp);
+ unsigned int free_flags, struct xfs_alloc_autoreap *aarp);
void xfs_alloc_cancel_autoreap(struct xfs_trans *tp,
struct xfs_alloc_autoreap *aarp);
void xfs_alloc_commit_autoreap(struct xfs_trans *tp,
diff --git a/fs/xfs/libxfs/xfs_alloc_btree.c b/fs/xfs/libxfs/xfs_alloc_btree.c
index 6ef5ddd89600..585e98e87ef9 100644
--- a/fs/xfs/libxfs/xfs_alloc_btree.c
+++ b/fs/xfs/libxfs/xfs_alloc_btree.c
@@ -115,67 +115,6 @@ xfs_allocbt_free_block(
return 0;
}
-/*
- * Update the longest extent in the AGF
- */
-STATIC void
-xfs_allocbt_update_lastrec(
- struct xfs_btree_cur *cur,
- const struct xfs_btree_block *block,
- const union xfs_btree_rec *rec,
- int ptr,
- int reason)
-{
- struct xfs_agf *agf = cur->bc_ag.agbp->b_addr;
- struct xfs_perag *pag;
- __be32 len;
- int numrecs;
-
- ASSERT(!xfs_btree_is_bno(cur->bc_ops));
-
- switch (reason) {
- case LASTREC_UPDATE:
- /*
- * If this is the last leaf block and it's the last record,
- * then update the size of the longest extent in the AG.
- */
- if (ptr != xfs_btree_get_numrecs(block))
- return;
- len = rec->alloc.ar_blockcount;
- break;
- case LASTREC_INSREC:
- if (be32_to_cpu(rec->alloc.ar_blockcount) <=
- be32_to_cpu(agf->agf_longest))
- return;
- len = rec->alloc.ar_blockcount;
- break;
- case LASTREC_DELREC:
- numrecs = xfs_btree_get_numrecs(block);
- if (ptr <= numrecs)
- return;
- ASSERT(ptr == numrecs + 1);
-
- if (numrecs) {
- xfs_alloc_rec_t *rrp;
-
- rrp = XFS_ALLOC_REC_ADDR(cur->bc_mp, block, numrecs);
- len = rrp->ar_blockcount;
- } else {
- len = 0;
- }
-
- break;
- default:
- ASSERT(0);
- return;
- }
-
- agf->agf_longest = len;
- pag = cur->bc_ag.agbp->b_pag;
- pag->pagf_longest = be32_to_cpu(len);
- xfs_alloc_log_agf(cur->bc_tp, cur->bc_ag.agbp, XFS_AGF_LONGEST);
-}
-
STATIC int
xfs_allocbt_get_minrecs(
struct xfs_btree_cur *cur,
@@ -493,7 +432,6 @@ const struct xfs_btree_ops xfs_bnobt_ops = {
.set_root = xfs_allocbt_set_root,
.alloc_block = xfs_allocbt_alloc_block,
.free_block = xfs_allocbt_free_block,
- .update_lastrec = xfs_allocbt_update_lastrec,
.get_minrecs = xfs_allocbt_get_minrecs,
.get_maxrecs = xfs_allocbt_get_maxrecs,
.init_key_from_rec = xfs_allocbt_init_key_from_rec,
@@ -511,7 +449,6 @@ const struct xfs_btree_ops xfs_bnobt_ops = {
const struct xfs_btree_ops xfs_cntbt_ops = {
.name = "cnt",
.type = XFS_BTREE_TYPE_AG,
- .geom_flags = XFS_BTGEO_LASTREC_UPDATE,
.rec_len = sizeof(xfs_alloc_rec_t),
.key_len = sizeof(xfs_alloc_key_t),
@@ -525,7 +462,6 @@ const struct xfs_btree_ops xfs_cntbt_ops = {
.set_root = xfs_allocbt_set_root,
.alloc_block = xfs_allocbt_alloc_block,
.free_block = xfs_allocbt_free_block,
- .update_lastrec = xfs_allocbt_update_lastrec,
.get_minrecs = xfs_allocbt_get_minrecs,
.get_maxrecs = xfs_allocbt_get_maxrecs,
.init_key_from_rec = xfs_allocbt_init_key_from_rec,
diff --git a/fs/xfs/libxfs/xfs_attr.c b/fs/xfs/libxfs/xfs_attr.c
index 430cd3244c14..f30bcc64100d 100644
--- a/fs/xfs/libxfs/xfs_attr.c
+++ b/fs/xfs/libxfs/xfs_attr.c
@@ -329,26 +329,20 @@ xfs_attr_calc_size(
return nblks;
}
-/* Initialize transaction reservation for attr operations */
-void
-xfs_init_attr_trans(
- struct xfs_da_args *args,
- struct xfs_trans_res *tres,
- unsigned int *total)
+/* Initialize transaction reservation for an xattr set/replace/upsert */
+inline struct xfs_trans_res
+xfs_attr_set_resv(
+ const struct xfs_da_args *args)
{
- struct xfs_mount *mp = args->dp->i_mount;
-
- if (args->value) {
- tres->tr_logres = M_RES(mp)->tr_attrsetm.tr_logres +
- M_RES(mp)->tr_attrsetrt.tr_logres *
- args->total;
- tres->tr_logcount = XFS_ATTRSET_LOG_COUNT;
- tres->tr_logflags = XFS_TRANS_PERM_LOG_RES;
- *total = args->total;
- } else {
- *tres = M_RES(mp)->tr_attrrm;
- *total = XFS_ATTRRM_SPACE_RES(mp);
- }
+ struct xfs_mount *mp = args->dp->i_mount;
+ struct xfs_trans_res ret = {
+ .tr_logres = M_RES(mp)->tr_attrsetm.tr_logres +
+ M_RES(mp)->tr_attrsetrt.tr_logres * args->total,
+ .tr_logcount = XFS_ATTRSET_LOG_COUNT,
+ .tr_logflags = XFS_TRANS_PERM_LOG_RES,
+ };
+
+ return ret;
}
/*
@@ -1006,7 +1000,7 @@ xfs_attr_set(
struct xfs_trans_res tres;
int error, local;
int rmt_blks = 0;
- unsigned int total;
+ unsigned int total = 0;
ASSERT(!args->trans);
@@ -1033,10 +1027,15 @@ xfs_attr_set(
if (!local)
rmt_blks = xfs_attr3_rmt_blocks(mp, args->valuelen);
+
+ tres = xfs_attr_set_resv(args);
+ total = args->total;
break;
case XFS_ATTRUPDATE_REMOVE:
XFS_STATS_INC(mp, xs_attr_remove);
rmt_blks = xfs_attr3_max_rmt_blocks(mp);
+ tres = M_RES(mp)->tr_attrrm;
+ total = XFS_ATTRRM_SPACE_RES(mp);
break;
}
@@ -1044,7 +1043,6 @@ xfs_attr_set(
* Root fork attributes can use reserved data blocks for this
* operation if necessary
*/
- xfs_init_attr_trans(args, &tres, &total);
error = xfs_trans_alloc_inode(dp, &tres, total, 0, rsvd, &args->trans);
if (error)
return error;
diff --git a/fs/xfs/libxfs/xfs_attr.h b/fs/xfs/libxfs/xfs_attr.h
index 088cb7b30168..0e51d0723f9a 100644
--- a/fs/xfs/libxfs/xfs_attr.h
+++ b/fs/xfs/libxfs/xfs_attr.h
@@ -565,8 +565,7 @@ bool xfs_attr_check_namespace(unsigned int attr_flags);
bool xfs_attr_namecheck(unsigned int attr_flags, const void *name,
size_t length);
int xfs_attr_calc_size(struct xfs_da_args *args, int *local);
-void xfs_init_attr_trans(struct xfs_da_args *args, struct xfs_trans_res *tres,
- unsigned int *total);
+struct xfs_trans_res xfs_attr_set_resv(const struct xfs_da_args *args);
/*
* Check to see if the attr should be upgraded from non-existent or shortform to
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index 3b3206d312d6..7df74c35d9f9 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -39,6 +39,7 @@
#include "xfs_health.h"
#include "xfs_bmap_item.h"
#include "xfs_symlink_remote.h"
+#include "xfs_inode_util.h"
struct kmem_cache *xfs_bmap_intent_cache;
@@ -604,7 +605,7 @@ xfs_bmap_btree_to_extents(
xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, whichfork);
error = xfs_free_extent_later(cur->bc_tp, cbno, 1, &oinfo,
- XFS_AG_RESV_NONE, false);
+ XFS_AG_RESV_NONE, 0);
if (error)
return error;
@@ -4058,20 +4059,32 @@ xfs_bmapi_reserve_delalloc(
xfs_extlen_t indlen;
uint64_t fdblocks;
int error;
- xfs_fileoff_t aoff = off;
+ xfs_fileoff_t aoff;
+ bool use_cowextszhint =
+ whichfork == XFS_COW_FORK && !prealloc;
+retry:
/*
* Cap the alloc length. Keep track of prealloc so we know whether to
* tag the inode before we return.
*/
+ aoff = off;
alen = XFS_FILBLKS_MIN(len + prealloc, XFS_MAX_BMBT_EXTLEN);
if (!eof)
alen = XFS_FILBLKS_MIN(alen, got->br_startoff - aoff);
if (prealloc && alen >= len)
prealloc = alen - len;
- /* Figure out the extent size, adjust alen */
- if (whichfork == XFS_COW_FORK) {
+ /*
+ * If we're targetting the COW fork but aren't creating a speculative
+ * posteof preallocation, try to expand the reservation to align with
+ * the COW extent size hint if there's sufficient free space.
+ *
+ * Unlike the data fork, the CoW cancellation functions will free all
+ * the reservations at inactivation, so we don't require that every
+ * delalloc reservation have a dirty pagecache.
+ */
+ if (use_cowextszhint) {
struct xfs_bmbt_irec prev;
xfs_extlen_t extsz = xfs_get_cowextsz_hint(ip);
@@ -4090,7 +4103,7 @@ xfs_bmapi_reserve_delalloc(
*/
error = xfs_quota_reserve_blkres(ip, alen);
if (error)
- return error;
+ goto out;
/*
* Split changing sb for alen and indlen since they could be coming
@@ -4140,6 +4153,17 @@ out_unreserve_frextents:
out_unreserve_quota:
if (XFS_IS_QUOTA_ON(mp))
xfs_quota_unreserve_blkres(ip, alen);
+out:
+ if (error == -ENOSPC || error == -EDQUOT) {
+ trace_xfs_delalloc_enospc(ip, off, len);
+
+ if (prealloc || use_cowextszhint) {
+ /* retry without any preallocation */
+ use_cowextszhint = false;
+ prealloc = 0;
+ goto retry;
+ }
+ }
return error;
}
@@ -5357,11 +5381,15 @@ xfs_bmap_del_extent_real(
error = xfs_rtfree_blocks(tp, del->br_startblock,
del->br_blockcount);
} else {
+ unsigned int efi_flags = 0;
+
+ if ((bflags & XFS_BMAPI_NODISCARD) ||
+ del->br_state == XFS_EXT_UNWRITTEN)
+ efi_flags |= XFS_FREE_EXTENT_SKIP_DISCARD;
+
error = xfs_free_extent_later(tp, del->br_startblock,
del->br_blockcount, NULL,
- XFS_AG_RESV_NONE,
- ((bflags & XFS_BMAPI_NODISCARD) ||
- del->br_state == XFS_EXT_UNWRITTEN));
+ XFS_AG_RESV_NONE, efi_flags);
}
if (error)
return error;
@@ -6383,6 +6411,7 @@ xfs_bunmapi_range(
error = xfs_defer_finish(tpp);
if (error)
goto out;
+ cond_resched();
}
out:
return error;
@@ -6430,3 +6459,45 @@ xfs_bmap_query_all(
return xfs_btree_query_all(cur, xfs_bmap_query_range_helper, &query);
}
+
+/* Helper function to extract extent size hint from inode */
+xfs_extlen_t
+xfs_get_extsz_hint(
+ struct xfs_inode *ip)
+{
+ /*
+ * No point in aligning allocations if we need to COW to actually
+ * write to them.
+ */
+ if (xfs_is_always_cow_inode(ip))
+ return 0;
+ if ((ip->i_diflags & XFS_DIFLAG_EXTSIZE) && ip->i_extsize)
+ return ip->i_extsize;
+ if (XFS_IS_REALTIME_INODE(ip) &&
+ ip->i_mount->m_sb.sb_rextsize > 1)
+ return ip->i_mount->m_sb.sb_rextsize;
+ return 0;
+}
+
+/*
+ * Helper function to extract CoW extent size hint from inode.
+ * Between the extent size hint and the CoW extent size hint, we
+ * return the greater of the two. If the value is zero (automatic),
+ * use the default size.
+ */
+xfs_extlen_t
+xfs_get_cowextsz_hint(
+ struct xfs_inode *ip)
+{
+ xfs_extlen_t a, b;
+
+ a = 0;
+ if (ip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE)
+ a = ip->i_cowextsize;
+ b = xfs_get_extsz_hint(ip);
+
+ a = max(a, b);
+ if (a == 0)
+ return XFS_DEFAULT_COWEXTSZ_HINT;
+ return a;
+}
diff --git a/fs/xfs/libxfs/xfs_bmap.h b/fs/xfs/libxfs/xfs_bmap.h
index 667b0c2b33d1..7592d46e97c6 100644
--- a/fs/xfs/libxfs/xfs_bmap.h
+++ b/fs/xfs/libxfs/xfs_bmap.h
@@ -296,4 +296,7 @@ typedef int (*xfs_bmap_query_range_fn)(
int xfs_bmap_query_all(struct xfs_btree_cur *cur, xfs_bmap_query_range_fn fn,
void *priv);
+xfs_extlen_t xfs_get_extsz_hint(struct xfs_inode *ip);
+xfs_extlen_t xfs_get_cowextsz_hint(struct xfs_inode *ip);
+
#endif /* __XFS_BMAP_H__ */
diff --git a/fs/xfs/libxfs/xfs_bmap_btree.c b/fs/xfs/libxfs/xfs_bmap_btree.c
index f5d84dcb58da..d1b06ccde19e 100644
--- a/fs/xfs/libxfs/xfs_bmap_btree.c
+++ b/fs/xfs/libxfs/xfs_bmap_btree.c
@@ -282,7 +282,7 @@ xfs_bmbt_free_block(
xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, cur->bc_ino.whichfork);
error = xfs_free_extent_later(cur->bc_tp, fsbno, 1, &oinfo,
- XFS_AG_RESV_NONE, false);
+ XFS_AG_RESV_NONE, 0);
if (error)
return error;
diff --git a/fs/xfs/libxfs/xfs_btree.c b/fs/xfs/libxfs/xfs_btree.c
index d29547572a68..a5c4af148853 100644
--- a/fs/xfs/libxfs/xfs_btree.c
+++ b/fs/xfs/libxfs/xfs_btree.c
@@ -1331,30 +1331,6 @@ xfs_btree_init_block_cur(
xfs_btree_owner(cur));
}
-/*
- * Return true if ptr is the last record in the btree and
- * we need to track updates to this record. The decision
- * will be further refined in the update_lastrec method.
- */
-STATIC int
-xfs_btree_is_lastrec(
- struct xfs_btree_cur *cur,
- struct xfs_btree_block *block,
- int level)
-{
- union xfs_btree_ptr ptr;
-
- if (level > 0)
- return 0;
- if (!(cur->bc_ops->geom_flags & XFS_BTGEO_LASTREC_UPDATE))
- return 0;
-
- xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_RIGHTSIB);
- if (!xfs_btree_ptr_is_null(cur, &ptr))
- return 0;
- return 1;
-}
-
STATIC void
xfs_btree_buf_to_ptr(
struct xfs_btree_cur *cur,
@@ -2420,15 +2396,6 @@ xfs_btree_update(
xfs_btree_copy_recs(cur, rp, rec, 1);
xfs_btree_log_recs(cur, bp, ptr, ptr);
- /*
- * If we are tracking the last record in the tree and
- * we are at the far right edge of the tree, update it.
- */
- if (xfs_btree_is_lastrec(cur, block, 0)) {
- cur->bc_ops->update_lastrec(cur, block, rec,
- ptr, LASTREC_UPDATE);
- }
-
/* Pass new key value up to our parent. */
if (xfs_btree_needs_key_update(cur, ptr)) {
error = xfs_btree_update_keys(cur, 0);
@@ -3618,15 +3585,6 @@ xfs_btree_insrec(
}
/*
- * If we are tracking the last record in the tree and
- * we are at the far right edge of the tree, update it.
- */
- if (xfs_btree_is_lastrec(cur, block, level)) {
- cur->bc_ops->update_lastrec(cur, block, rec,
- ptr, LASTREC_INSREC);
- }
-
- /*
* Return the new block number, if any.
* If there is one, give back a record value and a cursor too.
*/
@@ -3984,15 +3942,6 @@ xfs_btree_delrec(
xfs_btree_log_block(cur, bp, XFS_BB_NUMRECS);
/*
- * If we are tracking the last record in the tree and
- * we are at the far right edge of the tree, update it.
- */
- if (xfs_btree_is_lastrec(cur, block, level)) {
- cur->bc_ops->update_lastrec(cur, block, NULL,
- ptr, LASTREC_DELREC);
- }
-
- /*
* We're at the root level. First, shrink the root block in-memory.
* Try to get rid of the next level down. If we can't then there's
* nothing left to do.
diff --git a/fs/xfs/libxfs/xfs_btree.h b/fs/xfs/libxfs/xfs_btree.h
index f93374278aa1..10b7ddc3b2b3 100644
--- a/fs/xfs/libxfs/xfs_btree.h
+++ b/fs/xfs/libxfs/xfs_btree.h
@@ -154,12 +154,6 @@ struct xfs_btree_ops {
int *stat);
int (*free_block)(struct xfs_btree_cur *cur, struct xfs_buf *bp);
- /* update last record information */
- void (*update_lastrec)(struct xfs_btree_cur *cur,
- const struct xfs_btree_block *block,
- const union xfs_btree_rec *rec,
- int ptr, int reason);
-
/* records in block/level */
int (*get_minrecs)(struct xfs_btree_cur *cur, int level);
int (*get_maxrecs)(struct xfs_btree_cur *cur, int level);
@@ -222,15 +216,7 @@ struct xfs_btree_ops {
};
/* btree geometry flags */
-#define XFS_BTGEO_LASTREC_UPDATE (1U << 0) /* track last rec externally */
-#define XFS_BTGEO_OVERLAPPING (1U << 1) /* overlapping intervals */
-
-/*
- * Reasons for the update_lastrec method to be called.
- */
-#define LASTREC_UPDATE 0
-#define LASTREC_INSREC 1
-#define LASTREC_DELREC 2
+#define XFS_BTGEO_OVERLAPPING (1U << 0) /* overlapping intervals */
union xfs_btree_irec {
diff --git a/fs/xfs/libxfs/xfs_defer.c b/fs/xfs/libxfs/xfs_defer.c
index 4a078e07e1a0..40021849b42f 100644
--- a/fs/xfs/libxfs/xfs_defer.c
+++ b/fs/xfs/libxfs/xfs_defer.c
@@ -12,12 +12,14 @@
#include "xfs_mount.h"
#include "xfs_defer.h"
#include "xfs_trans.h"
+#include "xfs_trans_priv.h"
#include "xfs_buf_item.h"
#include "xfs_inode.h"
#include "xfs_inode_item.h"
#include "xfs_trace.h"
#include "xfs_icache.h"
#include "xfs_log.h"
+#include "xfs_log_priv.h"
#include "xfs_rmap.h"
#include "xfs_refcount.h"
#include "xfs_bmap.h"
@@ -556,7 +558,7 @@ xfs_defer_relog(
* the log threshold once per call.
*/
if (threshold_lsn == NULLCOMMITLSN) {
- threshold_lsn = xlog_grant_push_threshold(log, 0);
+ threshold_lsn = xfs_ail_get_push_target(log->l_ailp);
if (threshold_lsn == NULLCOMMITLSN)
break;
}
diff --git a/fs/xfs/libxfs/xfs_dir2.c b/fs/xfs/libxfs/xfs_dir2.c
index 457f9a38f850..202468223bf9 100644
--- a/fs/xfs/libxfs/xfs_dir2.c
+++ b/fs/xfs/libxfs/xfs_dir2.c
@@ -19,6 +19,11 @@
#include "xfs_error.h"
#include "xfs_trace.h"
#include "xfs_health.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_trans_space.h"
+#include "xfs_parent.h"
+#include "xfs_ag.h"
+#include "xfs_ialloc.h"
const struct xfs_name xfs_name_dotdot = {
.name = (const unsigned char *)"..",
@@ -584,9 +589,9 @@ xfs_dir_replace(
*/
int
xfs_dir_canenter(
- xfs_trans_t *tp,
- xfs_inode_t *dp,
- struct xfs_name *name) /* name of entry to add */
+ struct xfs_trans *tp,
+ struct xfs_inode *dp,
+ const struct xfs_name *name) /* name of entry to add */
{
return xfs_dir_createname(tp, dp, name, 0, 0);
}
@@ -756,3 +761,653 @@ xfs_dir2_compname(
return xfs_ascii_ci_compname(args, name, len);
return xfs_da_compname(args, name, len);
}
+
+#ifdef CONFIG_XFS_LIVE_HOOKS
+/*
+ * Use a static key here to reduce the overhead of directory live update hooks.
+ * If the compiler supports jump labels, the static branch will be replaced by
+ * a nop sled when there are no hook users. Online fsck is currently the only
+ * caller, so this is a reasonable tradeoff.
+ *
+ * Note: Patching the kernel code requires taking the cpu hotplug lock. Other
+ * parts of the kernel allocate memory with that lock held, which means that
+ * XFS callers cannot hold any locks that might be used by memory reclaim or
+ * writeback when calling the static_branch_{inc,dec} functions.
+ */
+DEFINE_STATIC_XFS_HOOK_SWITCH(xfs_dir_hooks_switch);
+
+void
+xfs_dir_hook_disable(void)
+{
+ xfs_hooks_switch_off(&xfs_dir_hooks_switch);
+}
+
+void
+xfs_dir_hook_enable(void)
+{
+ xfs_hooks_switch_on(&xfs_dir_hooks_switch);
+}
+
+/* Call hooks for a directory update relating to a child dirent update. */
+inline void
+xfs_dir_update_hook(
+ struct xfs_inode *dp,
+ struct xfs_inode *ip,
+ int delta,
+ const struct xfs_name *name)
+{
+ if (xfs_hooks_switched_on(&xfs_dir_hooks_switch)) {
+ struct xfs_dir_update_params p = {
+ .dp = dp,
+ .ip = ip,
+ .delta = delta,
+ .name = name,
+ };
+ struct xfs_mount *mp = ip->i_mount;
+
+ xfs_hooks_call(&mp->m_dir_update_hooks, 0, &p);
+ }
+}
+
+/* Call the specified function during a directory update. */
+int
+xfs_dir_hook_add(
+ struct xfs_mount *mp,
+ struct xfs_dir_hook *hook)
+{
+ return xfs_hooks_add(&mp->m_dir_update_hooks, &hook->dirent_hook);
+}
+
+/* Stop calling the specified function during a directory update. */
+void
+xfs_dir_hook_del(
+ struct xfs_mount *mp,
+ struct xfs_dir_hook *hook)
+{
+ xfs_hooks_del(&mp->m_dir_update_hooks, &hook->dirent_hook);
+}
+
+/* Configure directory update hook functions. */
+void
+xfs_dir_hook_setup(
+ struct xfs_dir_hook *hook,
+ notifier_fn_t mod_fn)
+{
+ xfs_hook_setup(&hook->dirent_hook, mod_fn);
+}
+#endif /* CONFIG_XFS_LIVE_HOOKS */
+
+/*
+ * Given a directory @dp, a newly allocated inode @ip, and a @name, link @ip
+ * into @dp under the given @name. If @ip is a directory, it will be
+ * initialized. Both inodes must have the ILOCK held and the transaction must
+ * have sufficient blocks reserved.
+ */
+int
+xfs_dir_create_child(
+ struct xfs_trans *tp,
+ unsigned int resblks,
+ struct xfs_dir_update *du)
+{
+ struct xfs_inode *dp = du->dp;
+ const struct xfs_name *name = du->name;
+ struct xfs_inode *ip = du->ip;
+ int error;
+
+ xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
+ xfs_assert_ilocked(dp, XFS_ILOCK_EXCL);
+
+ error = xfs_dir_createname(tp, dp, name, ip->i_ino, resblks);
+ if (error) {
+ ASSERT(error != -ENOSPC);
+ return error;
+ }
+
+ xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
+ xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
+
+ if (S_ISDIR(VFS_I(ip)->i_mode)) {
+ error = xfs_dir_init(tp, ip, dp);
+ if (error)
+ return error;
+
+ xfs_bumplink(tp, dp);
+ }
+
+ /*
+ * If we have parent pointers, we need to add the attribute containing
+ * the parent information now.
+ */
+ if (du->ppargs) {
+ error = xfs_parent_addname(tp, du->ppargs, dp, name, ip);
+ if (error)
+ return error;
+ }
+
+ xfs_dir_update_hook(dp, ip, 1, name);
+ return 0;
+}
+
+/*
+ * Given a directory @dp, an existing non-directory inode @ip, and a @name,
+ * link @ip into @dp under the given @name. Both inodes must have the ILOCK
+ * held.
+ */
+int
+xfs_dir_add_child(
+ struct xfs_trans *tp,
+ unsigned int resblks,
+ struct xfs_dir_update *du)
+{
+ struct xfs_inode *dp = du->dp;
+ const struct xfs_name *name = du->name;
+ struct xfs_inode *ip = du->ip;
+ struct xfs_mount *mp = tp->t_mountp;
+ int error;
+
+ xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
+ xfs_assert_ilocked(dp, XFS_ILOCK_EXCL);
+ ASSERT(!S_ISDIR(VFS_I(ip)->i_mode));
+
+ if (!resblks) {
+ error = xfs_dir_canenter(tp, dp, name);
+ if (error)
+ return error;
+ }
+
+ /*
+ * Handle initial link state of O_TMPFILE inode
+ */
+ if (VFS_I(ip)->i_nlink == 0) {
+ struct xfs_perag *pag;
+
+ pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
+ error = xfs_iunlink_remove(tp, pag, ip);
+ xfs_perag_put(pag);
+ if (error)
+ return error;
+ }
+
+ error = xfs_dir_createname(tp, dp, name, ip->i_ino, resblks);
+ if (error)
+ return error;
+
+ xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
+ xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
+
+ xfs_bumplink(tp, ip);
+
+ /*
+ * If we have parent pointers, we now need to add the parent record to
+ * the attribute fork of the inode. If this is the initial parent
+ * attribute, we need to create it correctly, otherwise we can just add
+ * the parent to the inode.
+ */
+ if (du->ppargs) {
+ error = xfs_parent_addname(tp, du->ppargs, dp, name, ip);
+ if (error)
+ return error;
+ }
+
+ xfs_dir_update_hook(dp, ip, 1, name);
+ return 0;
+}
+
+/*
+ * Given a directory @dp, a child @ip, and a @name, remove the (@name, @ip)
+ * entry from the directory. Both inodes must have the ILOCK held.
+ */
+int
+xfs_dir_remove_child(
+ struct xfs_trans *tp,
+ unsigned int resblks,
+ struct xfs_dir_update *du)
+{
+ struct xfs_inode *dp = du->dp;
+ const struct xfs_name *name = du->name;
+ struct xfs_inode *ip = du->ip;
+ int error;
+
+ xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
+ xfs_assert_ilocked(dp, XFS_ILOCK_EXCL);
+
+ /*
+ * If we're removing a directory perform some additional validation.
+ */
+ if (S_ISDIR(VFS_I(ip)->i_mode)) {
+ ASSERT(VFS_I(ip)->i_nlink >= 2);
+ if (VFS_I(ip)->i_nlink != 2)
+ return -ENOTEMPTY;
+ if (!xfs_dir_isempty(ip))
+ return -ENOTEMPTY;
+
+ /* Drop the link from ip's "..". */
+ error = xfs_droplink(tp, dp);
+ if (error)
+ return error;
+
+ /* Drop the "." link from ip to self. */
+ error = xfs_droplink(tp, ip);
+ if (error)
+ return error;
+
+ /*
+ * Point the unlinked child directory's ".." entry to the root
+ * directory to eliminate back-references to inodes that may
+ * get freed before the child directory is closed. If the fs
+ * gets shrunk, this can lead to dirent inode validation errors.
+ */
+ if (dp->i_ino != tp->t_mountp->m_sb.sb_rootino) {
+ error = xfs_dir_replace(tp, ip, &xfs_name_dotdot,
+ tp->t_mountp->m_sb.sb_rootino, 0);
+ if (error)
+ return error;
+ }
+ } else {
+ /*
+ * When removing a non-directory we need to log the parent
+ * inode here. For a directory this is done implicitly
+ * by the xfs_droplink call for the ".." entry.
+ */
+ xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
+ }
+ xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
+
+ /* Drop the link from dp to ip. */
+ error = xfs_droplink(tp, ip);
+ if (error)
+ return error;
+
+ error = xfs_dir_removename(tp, dp, name, ip->i_ino, resblks);
+ if (error) {
+ ASSERT(error != -ENOENT);
+ return error;
+ }
+
+ /* Remove parent pointer. */
+ if (du->ppargs) {
+ error = xfs_parent_removename(tp, du->ppargs, dp, name, ip);
+ if (error)
+ return error;
+ }
+
+ xfs_dir_update_hook(dp, ip, -1, name);
+ return 0;
+}
+
+/*
+ * Exchange the entry (@name1, @ip1) in directory @dp1 with the entry (@name2,
+ * @ip2) in directory @dp2, and update '..' @ip1 and @ip2's entries as needed.
+ * @ip1 and @ip2 need not be of the same type.
+ *
+ * All inodes must have the ILOCK held, and both entries must already exist.
+ */
+int
+xfs_dir_exchange_children(
+ struct xfs_trans *tp,
+ struct xfs_dir_update *du1,
+ struct xfs_dir_update *du2,
+ unsigned int spaceres)
+{
+ struct xfs_inode *dp1 = du1->dp;
+ const struct xfs_name *name1 = du1->name;
+ struct xfs_inode *ip1 = du1->ip;
+ struct xfs_inode *dp2 = du2->dp;
+ const struct xfs_name *name2 = du2->name;
+ struct xfs_inode *ip2 = du2->ip;
+ int ip1_flags = 0;
+ int ip2_flags = 0;
+ int dp2_flags = 0;
+ int error;
+
+ /* Swap inode number for dirent in first parent */
+ error = xfs_dir_replace(tp, dp1, name1, ip2->i_ino, spaceres);
+ if (error)
+ return error;
+
+ /* Swap inode number for dirent in second parent */
+ error = xfs_dir_replace(tp, dp2, name2, ip1->i_ino, spaceres);
+ if (error)
+ return error;
+
+ /*
+ * If we're renaming one or more directories across different parents,
+ * update the respective ".." entries (and link counts) to match the new
+ * parents.
+ */
+ if (dp1 != dp2) {
+ dp2_flags = XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
+
+ if (S_ISDIR(VFS_I(ip2)->i_mode)) {
+ error = xfs_dir_replace(tp, ip2, &xfs_name_dotdot,
+ dp1->i_ino, spaceres);
+ if (error)
+ return error;
+
+ /* transfer ip2 ".." reference to dp1 */
+ if (!S_ISDIR(VFS_I(ip1)->i_mode)) {
+ error = xfs_droplink(tp, dp2);
+ if (error)
+ return error;
+ xfs_bumplink(tp, dp1);
+ }
+
+ /*
+ * Although ip1 isn't changed here, userspace needs
+ * to be warned about the change, so that applications
+ * relying on it (like backup ones), will properly
+ * notify the change
+ */
+ ip1_flags |= XFS_ICHGTIME_CHG;
+ ip2_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
+ }
+
+ if (S_ISDIR(VFS_I(ip1)->i_mode)) {
+ error = xfs_dir_replace(tp, ip1, &xfs_name_dotdot,
+ dp2->i_ino, spaceres);
+ if (error)
+ return error;
+
+ /* transfer ip1 ".." reference to dp2 */
+ if (!S_ISDIR(VFS_I(ip2)->i_mode)) {
+ error = xfs_droplink(tp, dp1);
+ if (error)
+ return error;
+ xfs_bumplink(tp, dp2);
+ }
+
+ /*
+ * Although ip2 isn't changed here, userspace needs
+ * to be warned about the change, so that applications
+ * relying on it (like backup ones), will properly
+ * notify the change
+ */
+ ip1_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
+ ip2_flags |= XFS_ICHGTIME_CHG;
+ }
+ }
+
+ if (ip1_flags) {
+ xfs_trans_ichgtime(tp, ip1, ip1_flags);
+ xfs_trans_log_inode(tp, ip1, XFS_ILOG_CORE);
+ }
+ if (ip2_flags) {
+ xfs_trans_ichgtime(tp, ip2, ip2_flags);
+ xfs_trans_log_inode(tp, ip2, XFS_ILOG_CORE);
+ }
+ if (dp2_flags) {
+ xfs_trans_ichgtime(tp, dp2, dp2_flags);
+ xfs_trans_log_inode(tp, dp2, XFS_ILOG_CORE);
+ }
+ xfs_trans_ichgtime(tp, dp1, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
+ xfs_trans_log_inode(tp, dp1, XFS_ILOG_CORE);
+
+ /* Schedule parent pointer replacements */
+ if (du1->ppargs) {
+ error = xfs_parent_replacename(tp, du1->ppargs, dp1, name1,
+ dp2, name2, ip1);
+ if (error)
+ return error;
+ }
+
+ if (du2->ppargs) {
+ error = xfs_parent_replacename(tp, du2->ppargs, dp2, name2,
+ dp1, name1, ip2);
+ if (error)
+ return error;
+ }
+
+ /*
+ * Inform our hook clients that we've finished an exchange operation as
+ * follows: removed the source and target files from their directories;
+ * added the target to the source directory; and added the source to
+ * the target directory. All inodes are locked, so it's ok to model a
+ * rename this way so long as we say we deleted entries before we add
+ * new ones.
+ */
+ xfs_dir_update_hook(dp1, ip1, -1, name1);
+ xfs_dir_update_hook(dp2, ip2, -1, name2);
+ xfs_dir_update_hook(dp1, ip2, 1, name1);
+ xfs_dir_update_hook(dp2, ip1, 1, name2);
+ return 0;
+}
+
+/*
+ * Given an entry (@src_name, @src_ip) in directory @src_dp, make the entry
+ * @target_name in directory @target_dp point to @src_ip and remove the
+ * original entry, cleaning up everything left behind.
+ *
+ * Cleanup involves dropping a link count on @target_ip, and either removing
+ * the (@src_name, @src_ip) entry from @src_dp or simply replacing the entry
+ * with (@src_name, @wip) if a whiteout inode @wip is supplied.
+ *
+ * All inodes must have the ILOCK held. We assume that if @src_ip is a
+ * directory then its '..' doesn't already point to @target_dp, and that @wip
+ * is a freshly allocated whiteout.
+ */
+int
+xfs_dir_rename_children(
+ struct xfs_trans *tp,
+ struct xfs_dir_update *du_src,
+ struct xfs_dir_update *du_tgt,
+ unsigned int spaceres,
+ struct xfs_dir_update *du_wip)
+{
+ struct xfs_mount *mp = tp->t_mountp;
+ struct xfs_inode *src_dp = du_src->dp;
+ const struct xfs_name *src_name = du_src->name;
+ struct xfs_inode *src_ip = du_src->ip;
+ struct xfs_inode *target_dp = du_tgt->dp;
+ const struct xfs_name *target_name = du_tgt->name;
+ struct xfs_inode *target_ip = du_tgt->ip;
+ bool new_parent = (src_dp != target_dp);
+ bool src_is_directory;
+ int error;
+
+ src_is_directory = S_ISDIR(VFS_I(src_ip)->i_mode);
+
+ /*
+ * Check for expected errors before we dirty the transaction
+ * so we can return an error without a transaction abort.
+ */
+ if (target_ip == NULL) {
+ /*
+ * If there's no space reservation, check the entry will
+ * fit before actually inserting it.
+ */
+ if (!spaceres) {
+ error = xfs_dir_canenter(tp, target_dp, target_name);
+ if (error)
+ return error;
+ }
+ } else {
+ /*
+ * If target exists and it's a directory, check that whether
+ * it can be destroyed.
+ */
+ if (S_ISDIR(VFS_I(target_ip)->i_mode) &&
+ (!xfs_dir_isempty(target_ip) ||
+ (VFS_I(target_ip)->i_nlink > 2)))
+ return -EEXIST;
+ }
+
+ /*
+ * Directory entry creation below may acquire the AGF. Remove
+ * the whiteout from the unlinked list first to preserve correct
+ * AGI/AGF locking order. This dirties the transaction so failures
+ * after this point will abort and log recovery will clean up the
+ * mess.
+ *
+ * For whiteouts, we need to bump the link count on the whiteout
+ * inode. After this point, we have a real link, clear the tmpfile
+ * state flag from the inode so it doesn't accidentally get misused
+ * in future.
+ */
+ if (du_wip->ip) {
+ struct xfs_perag *pag;
+
+ ASSERT(VFS_I(du_wip->ip)->i_nlink == 0);
+
+ pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, du_wip->ip->i_ino));
+ error = xfs_iunlink_remove(tp, pag, du_wip->ip);
+ xfs_perag_put(pag);
+ if (error)
+ return error;
+
+ xfs_bumplink(tp, du_wip->ip);
+ }
+
+ /*
+ * Set up the target.
+ */
+ if (target_ip == NULL) {
+ /*
+ * If target does not exist and the rename crosses
+ * directories, adjust the target directory link count
+ * to account for the ".." reference from the new entry.
+ */
+ error = xfs_dir_createname(tp, target_dp, target_name,
+ src_ip->i_ino, spaceres);
+ if (error)
+ return error;
+
+ xfs_trans_ichgtime(tp, target_dp,
+ XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
+
+ if (new_parent && src_is_directory) {
+ xfs_bumplink(tp, target_dp);
+ }
+ } else { /* target_ip != NULL */
+ /*
+ * Link the source inode under the target name.
+ * If the source inode is a directory and we are moving
+ * it across directories, its ".." entry will be
+ * inconsistent until we replace that down below.
+ *
+ * In case there is already an entry with the same
+ * name at the destination directory, remove it first.
+ */
+ error = xfs_dir_replace(tp, target_dp, target_name,
+ src_ip->i_ino, spaceres);
+ if (error)
+ return error;
+
+ xfs_trans_ichgtime(tp, target_dp,
+ XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
+
+ /*
+ * Decrement the link count on the target since the target
+ * dir no longer points to it.
+ */
+ error = xfs_droplink(tp, target_ip);
+ if (error)
+ return error;
+
+ if (src_is_directory) {
+ /*
+ * Drop the link from the old "." entry.
+ */
+ error = xfs_droplink(tp, target_ip);
+ if (error)
+ return error;
+ }
+ } /* target_ip != NULL */
+
+ /*
+ * Remove the source.
+ */
+ if (new_parent && src_is_directory) {
+ /*
+ * Rewrite the ".." entry to point to the new
+ * directory.
+ */
+ error = xfs_dir_replace(tp, src_ip, &xfs_name_dotdot,
+ target_dp->i_ino, spaceres);
+ ASSERT(error != -EEXIST);
+ if (error)
+ return error;
+ }
+
+ /*
+ * We always want to hit the ctime on the source inode.
+ *
+ * This isn't strictly required by the standards since the source
+ * inode isn't really being changed, but old unix file systems did
+ * it and some incremental backup programs won't work without it.
+ */
+ xfs_trans_ichgtime(tp, src_ip, XFS_ICHGTIME_CHG);
+ xfs_trans_log_inode(tp, src_ip, XFS_ILOG_CORE);
+
+ /*
+ * Adjust the link count on src_dp. This is necessary when
+ * renaming a directory, either within one parent when
+ * the target existed, or across two parent directories.
+ */
+ if (src_is_directory && (new_parent || target_ip != NULL)) {
+
+ /*
+ * Decrement link count on src_directory since the
+ * entry that's moved no longer points to it.
+ */
+ error = xfs_droplink(tp, src_dp);
+ if (error)
+ return error;
+ }
+
+ /*
+ * For whiteouts, we only need to update the source dirent with the
+ * inode number of the whiteout inode rather than removing it
+ * altogether.
+ */
+ if (du_wip->ip)
+ error = xfs_dir_replace(tp, src_dp, src_name, du_wip->ip->i_ino,
+ spaceres);
+ else
+ error = xfs_dir_removename(tp, src_dp, src_name, src_ip->i_ino,
+ spaceres);
+ if (error)
+ return error;
+
+ xfs_trans_ichgtime(tp, src_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
+ xfs_trans_log_inode(tp, src_dp, XFS_ILOG_CORE);
+ if (new_parent)
+ xfs_trans_log_inode(tp, target_dp, XFS_ILOG_CORE);
+
+ /* Schedule parent pointer updates. */
+ if (du_wip->ppargs) {
+ error = xfs_parent_addname(tp, du_wip->ppargs, src_dp,
+ src_name, du_wip->ip);
+ if (error)
+ return error;
+ }
+
+ if (du_src->ppargs) {
+ error = xfs_parent_replacename(tp, du_src->ppargs, src_dp,
+ src_name, target_dp, target_name, src_ip);
+ if (error)
+ return error;
+ }
+
+ if (du_tgt->ppargs) {
+ error = xfs_parent_removename(tp, du_tgt->ppargs, target_dp,
+ target_name, target_ip);
+ if (error)
+ return error;
+ }
+
+ /*
+ * Inform our hook clients that we've finished a rename operation as
+ * follows: removed the source and target files from their directories;
+ * that we've added the source to the target directory; and finally
+ * that we've added the whiteout, if there was one. All inodes are
+ * locked, so it's ok to model a rename this way so long as we say we
+ * deleted entries before we add new ones.
+ */
+ if (target_ip)
+ xfs_dir_update_hook(target_dp, target_ip, -1, target_name);
+ xfs_dir_update_hook(src_dp, src_ip, -1, src_name);
+ xfs_dir_update_hook(target_dp, src_ip, 1, target_name);
+ if (du_wip->ip)
+ xfs_dir_update_hook(src_dp, du_wip->ip, 1, src_name);
+ return 0;
+}
diff --git a/fs/xfs/libxfs/xfs_dir2.h b/fs/xfs/libxfs/xfs_dir2.h
index 6dbe6e9ecb49..576068ed81fa 100644
--- a/fs/xfs/libxfs/xfs_dir2.h
+++ b/fs/xfs/libxfs/xfs_dir2.h
@@ -74,7 +74,7 @@ extern int xfs_dir_replace(struct xfs_trans *tp, struct xfs_inode *dp,
const struct xfs_name *name, xfs_ino_t inum,
xfs_extlen_t tot);
extern int xfs_dir_canenter(struct xfs_trans *tp, struct xfs_inode *dp,
- struct xfs_name *name);
+ const struct xfs_name *name);
int xfs_dir_lookup_args(struct xfs_da_args *args);
int xfs_dir_createname_args(struct xfs_da_args *args);
@@ -309,4 +309,51 @@ static inline unsigned char xfs_ascii_ci_xfrm(unsigned char c)
return c;
}
+struct xfs_dir_update_params {
+ const struct xfs_inode *dp;
+ const struct xfs_inode *ip;
+ const struct xfs_name *name;
+ int delta;
+};
+
+#ifdef CONFIG_XFS_LIVE_HOOKS
+void xfs_dir_update_hook(struct xfs_inode *dp, struct xfs_inode *ip,
+ int delta, const struct xfs_name *name);
+
+struct xfs_dir_hook {
+ struct xfs_hook dirent_hook;
+};
+
+void xfs_dir_hook_disable(void);
+void xfs_dir_hook_enable(void);
+
+int xfs_dir_hook_add(struct xfs_mount *mp, struct xfs_dir_hook *hook);
+void xfs_dir_hook_del(struct xfs_mount *mp, struct xfs_dir_hook *hook);
+void xfs_dir_hook_setup(struct xfs_dir_hook *hook, notifier_fn_t mod_fn);
+#else
+# define xfs_dir_update_hook(dp, ip, delta, name) ((void)0)
+#endif /* CONFIG_XFS_LIVE_HOOKS */
+
+struct xfs_parent_args;
+
+struct xfs_dir_update {
+ struct xfs_inode *dp;
+ const struct xfs_name *name;
+ struct xfs_inode *ip;
+ struct xfs_parent_args *ppargs;
+};
+
+int xfs_dir_create_child(struct xfs_trans *tp, unsigned int resblks,
+ struct xfs_dir_update *du);
+int xfs_dir_add_child(struct xfs_trans *tp, unsigned int resblks,
+ struct xfs_dir_update *du);
+int xfs_dir_remove_child(struct xfs_trans *tp, unsigned int resblks,
+ struct xfs_dir_update *du);
+
+int xfs_dir_exchange_children(struct xfs_trans *tp, struct xfs_dir_update *du1,
+ struct xfs_dir_update *du2, unsigned int spaceres);
+int xfs_dir_rename_children(struct xfs_trans *tp, struct xfs_dir_update *du_src,
+ struct xfs_dir_update *du_tgt, unsigned int spaceres,
+ struct xfs_dir_update *du_wip);
+
#endif /* __XFS_DIR2_H__ */
diff --git a/fs/xfs/libxfs/xfs_dir2_data.c b/fs/xfs/libxfs/xfs_dir2_data.c
index ea0b9628df18..a16b05c43e2e 100644
--- a/fs/xfs/libxfs/xfs_dir2_data.c
+++ b/fs/xfs/libxfs/xfs_dir2_data.c
@@ -178,6 +178,14 @@ __xfs_dir3_data_check(
while (offset < end) {
struct xfs_dir2_data_unused *dup = bp->b_addr + offset;
struct xfs_dir2_data_entry *dep = bp->b_addr + offset;
+ unsigned int reclen;
+
+ /*
+ * Are the remaining bytes large enough to hold an
+ * unused entry?
+ */
+ if (offset > end - xfs_dir2_data_unusedsize(1))
+ return __this_address;
/*
* If it's unused, look for the space in the bestfree table.
@@ -187,9 +195,13 @@ __xfs_dir3_data_check(
if (be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG) {
xfs_failaddr_t fa;
+ reclen = xfs_dir2_data_unusedsize(
+ be16_to_cpu(dup->length));
if (lastfree != 0)
return __this_address;
- if (offset + be16_to_cpu(dup->length) > end)
+ if (be16_to_cpu(dup->length) != reclen)
+ return __this_address;
+ if (offset + reclen > end)
return __this_address;
if (be16_to_cpu(*xfs_dir2_data_unused_tag_p(dup)) !=
offset)
@@ -207,10 +219,18 @@ __xfs_dir3_data_check(
be16_to_cpu(bf[2].length))
return __this_address;
}
- offset += be16_to_cpu(dup->length);
+ offset += reclen;
lastfree = 1;
continue;
}
+
+ /*
+ * This is not an unused entry. Are the remaining bytes
+ * large enough for a dirent with a single-byte name?
+ */
+ if (offset > end - xfs_dir2_data_entsize(mp, 1))
+ return __this_address;
+
/*
* It's a real entry. Validate the fields.
* If this is a block directory then make sure it's
@@ -219,9 +239,10 @@ __xfs_dir3_data_check(
*/
if (dep->namelen == 0)
return __this_address;
- if (!xfs_verify_dir_ino(mp, be64_to_cpu(dep->inumber)))
+ reclen = xfs_dir2_data_entsize(mp, dep->namelen);
+ if (offset + reclen > end)
return __this_address;
- if (offset + xfs_dir2_data_entsize(mp, dep->namelen) > end)
+ if (!xfs_verify_dir_ino(mp, be64_to_cpu(dep->inumber)))
return __this_address;
if (be16_to_cpu(*xfs_dir2_data_entry_tag_p(mp, dep)) != offset)
return __this_address;
@@ -245,7 +266,7 @@ __xfs_dir3_data_check(
if (i >= be32_to_cpu(btp->count))
return __this_address;
}
- offset += xfs_dir2_data_entsize(mp, dep->namelen);
+ offset += reclen;
}
/*
* Need to have seen all the entries and all the bestfree slots.
diff --git a/fs/xfs/libxfs/xfs_dir2_priv.h b/fs/xfs/libxfs/xfs_dir2_priv.h
index 3befb32509fa..10041350274a 100644
--- a/fs/xfs/libxfs/xfs_dir2_priv.h
+++ b/fs/xfs/libxfs/xfs_dir2_priv.h
@@ -190,6 +190,13 @@ extern int xfs_readdir(struct xfs_trans *tp, struct xfs_inode *dp,
struct dir_context *ctx, size_t bufsize);
static inline unsigned int
+xfs_dir2_data_unusedsize(
+ unsigned int len)
+{
+ return round_up(len, XFS_DIR2_DATA_ALIGN);
+}
+
+static inline unsigned int
xfs_dir2_data_entsize(
struct xfs_mount *mp,
unsigned int namelen)
diff --git a/fs/xfs/libxfs/xfs_format.h b/fs/xfs/libxfs/xfs_format.h
index 61f51becff4f..e1bfee0c3b1a 100644
--- a/fs/xfs/libxfs/xfs_format.h
+++ b/fs/xfs/libxfs/xfs_format.h
@@ -90,8 +90,7 @@ struct xfs_ifork;
#define XFSLABEL_MAX 12
/*
- * Superblock - in core version. Must match the ondisk version below.
- * Must be padded to 64 bit alignment.
+ * Superblock - in core version. Must be padded to 64 bit alignment.
*/
typedef struct xfs_sb {
uint32_t sb_magicnum; /* magic number == XFS_SB_MAGIC */
@@ -178,10 +177,8 @@ typedef struct xfs_sb {
/* must be padded to 64 bit alignment */
} xfs_sb_t;
-#define XFS_SB_CRC_OFF offsetof(struct xfs_sb, sb_crc)
-
/*
- * Superblock - on disk version. Must match the in core version above.
+ * Superblock - on disk version.
* Must be padded to 64 bit alignment.
*/
struct xfs_dsb {
@@ -265,6 +262,8 @@ struct xfs_dsb {
/* must be padded to 64 bit alignment */
};
+#define XFS_SB_CRC_OFF offsetof(struct xfs_dsb, sb_crc)
+
/*
* Misc. Flags - warning - these will be cleared by xfs_repair unless
* a feature bit is set when the flag is used.
diff --git a/fs/xfs/libxfs/xfs_fs.h b/fs/xfs/libxfs/xfs_fs.h
index 97996cb79aaa..454b63ef7201 100644
--- a/fs/xfs/libxfs/xfs_fs.h
+++ b/fs/xfs/libxfs/xfs_fs.h
@@ -996,7 +996,7 @@ struct xfs_getparents_by_handle {
#define XFS_IOC_FSGEOMETRY _IOR ('X', 126, struct xfs_fsop_geom)
#define XFS_IOC_BULKSTAT _IOR ('X', 127, struct xfs_bulkstat_req)
#define XFS_IOC_INUMBERS _IOR ('X', 128, struct xfs_inumbers_req)
-#define XFS_IOC_EXCHANGE_RANGE _IOWR('X', 129, struct xfs_exchange_range)
+#define XFS_IOC_EXCHANGE_RANGE _IOW ('X', 129, struct xfs_exchange_range)
/* XFS_IOC_GETFSUUID ---------- deprecated 140 */
diff --git a/fs/xfs/libxfs/xfs_ialloc.c b/fs/xfs/libxfs/xfs_ialloc.c
index 14c81f227c5b..0af5b7a33d05 100644
--- a/fs/xfs/libxfs/xfs_ialloc.c
+++ b/fs/xfs/libxfs/xfs_ialloc.c
@@ -1946,6 +1946,21 @@ retry:
}
return -ENOSPC;
}
+
+ /*
+ * Protect against obviously corrupt allocation btree records. Later
+ * xfs_iget checks will catch re-allocation of other active in-memory
+ * and on-disk inodes. If we don't catch reallocating the parent inode
+ * here we will deadlock in xfs_iget() so we have to do these checks
+ * first.
+ */
+ if (ino == parent || !xfs_verify_dir_ino(mp, ino)) {
+ xfs_alert(mp, "Allocated a known in-use inode 0x%llx!", ino);
+ xfs_agno_mark_sick(mp, XFS_INO_TO_AGNO(mp, ino),
+ XFS_SICK_AG_INOBT);
+ return -EFSCORRUPTED;
+ }
+
*new_ino = ino;
return 0;
}
@@ -1975,7 +1990,7 @@ xfs_difree_inode_chunk(
return xfs_free_extent_later(tp,
XFS_AGB_TO_FSB(mp, agno, sagbno),
M_IGEO(mp)->ialloc_blks, &XFS_RMAP_OINFO_INODES,
- XFS_AG_RESV_NONE, false);
+ XFS_AG_RESV_NONE, 0);
}
/* holemask is only 16-bits (fits in an unsigned long) */
@@ -2021,8 +2036,7 @@ xfs_difree_inode_chunk(
ASSERT(contigblk % mp->m_sb.sb_spino_align == 0);
error = xfs_free_extent_later(tp,
XFS_AGB_TO_FSB(mp, agno, agbno), contigblk,
- &XFS_RMAP_OINFO_INODES, XFS_AG_RESV_NONE,
- false);
+ &XFS_RMAP_OINFO_INODES, XFS_AG_RESV_NONE, 0);
if (error)
return error;
diff --git a/fs/xfs/libxfs/xfs_ialloc_btree.c b/fs/xfs/libxfs/xfs_ialloc_btree.c
index 42e9fd47f6c7..496e2f72a85b 100644
--- a/fs/xfs/libxfs/xfs_ialloc_btree.c
+++ b/fs/xfs/libxfs/xfs_ialloc_btree.c
@@ -170,7 +170,7 @@ __xfs_inobt_free_block(
xfs_inobt_mod_blockcount(cur, -1);
fsbno = XFS_DADDR_TO_FSB(cur->bc_mp, xfs_buf_daddr(bp));
return xfs_free_extent_later(cur->bc_tp, fsbno, 1,
- &XFS_RMAP_OINFO_INOBT, resv, false);
+ &XFS_RMAP_OINFO_INOBT, resv, 0);
}
STATIC int
diff --git a/fs/xfs/libxfs/xfs_inode_buf.c b/fs/xfs/libxfs/xfs_inode_buf.c
index d79002343d0b..513b50da6215 100644
--- a/fs/xfs/libxfs/xfs_inode_buf.c
+++ b/fs/xfs/libxfs/xfs_inode_buf.c
@@ -374,17 +374,40 @@ xfs_dinode_verify_fork(
/*
* For fork types that can contain local data, check that the fork
* format matches the size of local data contained within the fork.
- *
- * For all types, check that when the size says the should be in extent
- * or btree format, the inode isn't claiming it is in local format.
*/
if (whichfork == XFS_DATA_FORK) {
- if (S_ISDIR(mode) || S_ISLNK(mode)) {
+ /*
+ * A directory small enough to fit in the inode must be stored
+ * in local format. The directory sf <-> extents conversion
+ * code updates the directory size accordingly. Directories
+ * being truncated have zero size and are not subject to this
+ * check.
+ */
+ if (S_ISDIR(mode)) {
+ if (dip->di_size &&
+ be64_to_cpu(dip->di_size) <= fork_size &&
+ fork_format != XFS_DINODE_FMT_LOCAL)
+ return __this_address;
+ }
+
+ /*
+ * A symlink with a target small enough to fit in the inode can
+ * be stored in extents format if xattrs were added (thus
+ * converting the data fork from shortform to remote format)
+ * and then removed.
+ */
+ if (S_ISLNK(mode)) {
if (be64_to_cpu(dip->di_size) <= fork_size &&
+ fork_format != XFS_DINODE_FMT_EXTENTS &&
fork_format != XFS_DINODE_FMT_LOCAL)
return __this_address;
}
+ /*
+ * For all types, check that when the size says the fork should
+ * be in extent or btree format, the inode isn't claiming to be
+ * in local format.
+ */
if (be64_to_cpu(dip->di_size) > fork_size &&
fork_format == XFS_DINODE_FMT_LOCAL)
return __this_address;
@@ -508,9 +531,19 @@ xfs_dinode_verify(
if (mode && xfs_mode_to_ftype(mode) == XFS_DIR3_FT_UNKNOWN)
return __this_address;
- /* No zero-length symlinks/dirs. */
- if ((S_ISLNK(mode) || S_ISDIR(mode)) && di_size == 0)
- return __this_address;
+ /*
+ * No zero-length symlinks/dirs unless they're unlinked and hence being
+ * inactivated.
+ */
+ if ((S_ISLNK(mode) || S_ISDIR(mode)) && di_size == 0) {
+ if (dip->di_version > 1) {
+ if (dip->di_nlink)
+ return __this_address;
+ } else {
+ if (dip->di_onlink)
+ return __this_address;
+ }
+ }
fa = xfs_dinode_verify_nrext64(mp, dip);
if (fa)
diff --git a/fs/xfs/libxfs/xfs_inode_util.c b/fs/xfs/libxfs/xfs_inode_util.c
new file mode 100644
index 000000000000..032333289113
--- /dev/null
+++ b/fs/xfs/libxfs/xfs_inode_util.c
@@ -0,0 +1,749 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2000-2006 Silicon Graphics, Inc.
+ * All Rights Reserved.
+ */
+#include <linux/iversion.h>
+#include "xfs.h"
+#include "xfs_fs.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
+#include "xfs_sb.h"
+#include "xfs_mount.h"
+#include "xfs_inode.h"
+#include "xfs_inode_util.h"
+#include "xfs_trans.h"
+#include "xfs_ialloc.h"
+#include "xfs_health.h"
+#include "xfs_bmap.h"
+#include "xfs_error.h"
+#include "xfs_trace.h"
+#include "xfs_ag.h"
+#include "xfs_iunlink_item.h"
+#include "xfs_inode_item.h"
+
+uint16_t
+xfs_flags2diflags(
+ struct xfs_inode *ip,
+ unsigned int xflags)
+{
+ /* can't set PREALLOC this way, just preserve it */
+ uint16_t di_flags =
+ (ip->i_diflags & XFS_DIFLAG_PREALLOC);
+
+ if (xflags & FS_XFLAG_IMMUTABLE)
+ di_flags |= XFS_DIFLAG_IMMUTABLE;
+ if (xflags & FS_XFLAG_APPEND)
+ di_flags |= XFS_DIFLAG_APPEND;
+ if (xflags & FS_XFLAG_SYNC)
+ di_flags |= XFS_DIFLAG_SYNC;
+ if (xflags & FS_XFLAG_NOATIME)
+ di_flags |= XFS_DIFLAG_NOATIME;
+ if (xflags & FS_XFLAG_NODUMP)
+ di_flags |= XFS_DIFLAG_NODUMP;
+ if (xflags & FS_XFLAG_NODEFRAG)
+ di_flags |= XFS_DIFLAG_NODEFRAG;
+ if (xflags & FS_XFLAG_FILESTREAM)
+ di_flags |= XFS_DIFLAG_FILESTREAM;
+ if (S_ISDIR(VFS_I(ip)->i_mode)) {
+ if (xflags & FS_XFLAG_RTINHERIT)
+ di_flags |= XFS_DIFLAG_RTINHERIT;
+ if (xflags & FS_XFLAG_NOSYMLINKS)
+ di_flags |= XFS_DIFLAG_NOSYMLINKS;
+ if (xflags & FS_XFLAG_EXTSZINHERIT)
+ di_flags |= XFS_DIFLAG_EXTSZINHERIT;
+ if (xflags & FS_XFLAG_PROJINHERIT)
+ di_flags |= XFS_DIFLAG_PROJINHERIT;
+ } else if (S_ISREG(VFS_I(ip)->i_mode)) {
+ if (xflags & FS_XFLAG_REALTIME)
+ di_flags |= XFS_DIFLAG_REALTIME;
+ if (xflags & FS_XFLAG_EXTSIZE)
+ di_flags |= XFS_DIFLAG_EXTSIZE;
+ }
+
+ return di_flags;
+}
+
+uint64_t
+xfs_flags2diflags2(
+ struct xfs_inode *ip,
+ unsigned int xflags)
+{
+ uint64_t di_flags2 =
+ (ip->i_diflags2 & (XFS_DIFLAG2_REFLINK |
+ XFS_DIFLAG2_BIGTIME |
+ XFS_DIFLAG2_NREXT64));
+
+ if (xflags & FS_XFLAG_DAX)
+ di_flags2 |= XFS_DIFLAG2_DAX;
+ if (xflags & FS_XFLAG_COWEXTSIZE)
+ di_flags2 |= XFS_DIFLAG2_COWEXTSIZE;
+
+ return di_flags2;
+}
+
+uint32_t
+xfs_ip2xflags(
+ struct xfs_inode *ip)
+{
+ uint32_t flags = 0;
+
+ if (ip->i_diflags & XFS_DIFLAG_ANY) {
+ if (ip->i_diflags & XFS_DIFLAG_REALTIME)
+ flags |= FS_XFLAG_REALTIME;
+ if (ip->i_diflags & XFS_DIFLAG_PREALLOC)
+ flags |= FS_XFLAG_PREALLOC;
+ if (ip->i_diflags & XFS_DIFLAG_IMMUTABLE)
+ flags |= FS_XFLAG_IMMUTABLE;
+ if (ip->i_diflags & XFS_DIFLAG_APPEND)
+ flags |= FS_XFLAG_APPEND;
+ if (ip->i_diflags & XFS_DIFLAG_SYNC)
+ flags |= FS_XFLAG_SYNC;
+ if (ip->i_diflags & XFS_DIFLAG_NOATIME)
+ flags |= FS_XFLAG_NOATIME;
+ if (ip->i_diflags & XFS_DIFLAG_NODUMP)
+ flags |= FS_XFLAG_NODUMP;
+ if (ip->i_diflags & XFS_DIFLAG_RTINHERIT)
+ flags |= FS_XFLAG_RTINHERIT;
+ if (ip->i_diflags & XFS_DIFLAG_PROJINHERIT)
+ flags |= FS_XFLAG_PROJINHERIT;
+ if (ip->i_diflags & XFS_DIFLAG_NOSYMLINKS)
+ flags |= FS_XFLAG_NOSYMLINKS;
+ if (ip->i_diflags & XFS_DIFLAG_EXTSIZE)
+ flags |= FS_XFLAG_EXTSIZE;
+ if (ip->i_diflags & XFS_DIFLAG_EXTSZINHERIT)
+ flags |= FS_XFLAG_EXTSZINHERIT;
+ if (ip->i_diflags & XFS_DIFLAG_NODEFRAG)
+ flags |= FS_XFLAG_NODEFRAG;
+ if (ip->i_diflags & XFS_DIFLAG_FILESTREAM)
+ flags |= FS_XFLAG_FILESTREAM;
+ }
+
+ if (ip->i_diflags2 & XFS_DIFLAG2_ANY) {
+ if (ip->i_diflags2 & XFS_DIFLAG2_DAX)
+ flags |= FS_XFLAG_DAX;
+ if (ip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE)
+ flags |= FS_XFLAG_COWEXTSIZE;
+ }
+
+ if (xfs_inode_has_attr_fork(ip))
+ flags |= FS_XFLAG_HASATTR;
+ return flags;
+}
+
+prid_t
+xfs_get_initial_prid(struct xfs_inode *dp)
+{
+ if (dp->i_diflags & XFS_DIFLAG_PROJINHERIT)
+ return dp->i_projid;
+
+ /* Assign to the root project by default. */
+ return 0;
+}
+
+/* Propagate di_flags from a parent inode to a child inode. */
+static inline void
+xfs_inode_inherit_flags(
+ struct xfs_inode *ip,
+ const struct xfs_inode *pip)
+{
+ unsigned int di_flags = 0;
+ xfs_failaddr_t failaddr;
+ umode_t mode = VFS_I(ip)->i_mode;
+
+ if (S_ISDIR(mode)) {
+ if (pip->i_diflags & XFS_DIFLAG_RTINHERIT)
+ di_flags |= XFS_DIFLAG_RTINHERIT;
+ if (pip->i_diflags & XFS_DIFLAG_EXTSZINHERIT) {
+ di_flags |= XFS_DIFLAG_EXTSZINHERIT;
+ ip->i_extsize = pip->i_extsize;
+ }
+ if (pip->i_diflags & XFS_DIFLAG_PROJINHERIT)
+ di_flags |= XFS_DIFLAG_PROJINHERIT;
+ } else if (S_ISREG(mode)) {
+ if ((pip->i_diflags & XFS_DIFLAG_RTINHERIT) &&
+ xfs_has_realtime(ip->i_mount))
+ di_flags |= XFS_DIFLAG_REALTIME;
+ if (pip->i_diflags & XFS_DIFLAG_EXTSZINHERIT) {
+ di_flags |= XFS_DIFLAG_EXTSIZE;
+ ip->i_extsize = pip->i_extsize;
+ }
+ }
+ if ((pip->i_diflags & XFS_DIFLAG_NOATIME) &&
+ xfs_inherit_noatime)
+ di_flags |= XFS_DIFLAG_NOATIME;
+ if ((pip->i_diflags & XFS_DIFLAG_NODUMP) &&
+ xfs_inherit_nodump)
+ di_flags |= XFS_DIFLAG_NODUMP;
+ if ((pip->i_diflags & XFS_DIFLAG_SYNC) &&
+ xfs_inherit_sync)
+ di_flags |= XFS_DIFLAG_SYNC;
+ if ((pip->i_diflags & XFS_DIFLAG_NOSYMLINKS) &&
+ xfs_inherit_nosymlinks)
+ di_flags |= XFS_DIFLAG_NOSYMLINKS;
+ if ((pip->i_diflags & XFS_DIFLAG_NODEFRAG) &&
+ xfs_inherit_nodefrag)
+ di_flags |= XFS_DIFLAG_NODEFRAG;
+ if (pip->i_diflags & XFS_DIFLAG_FILESTREAM)
+ di_flags |= XFS_DIFLAG_FILESTREAM;
+
+ ip->i_diflags |= di_flags;
+
+ /*
+ * Inode verifiers on older kernels only check that the extent size
+ * hint is an integer multiple of the rt extent size on realtime files.
+ * They did not check the hint alignment on a directory with both
+ * rtinherit and extszinherit flags set. If the misaligned hint is
+ * propagated from a directory into a new realtime file, new file
+ * allocations will fail due to math errors in the rt allocator and/or
+ * trip the verifiers. Validate the hint settings in the new file so
+ * that we don't let broken hints propagate.
+ */
+ failaddr = xfs_inode_validate_extsize(ip->i_mount, ip->i_extsize,
+ VFS_I(ip)->i_mode, ip->i_diflags);
+ if (failaddr) {
+ ip->i_diflags &= ~(XFS_DIFLAG_EXTSIZE |
+ XFS_DIFLAG_EXTSZINHERIT);
+ ip->i_extsize = 0;
+ }
+}
+
+/* Propagate di_flags2 from a parent inode to a child inode. */
+static inline void
+xfs_inode_inherit_flags2(
+ struct xfs_inode *ip,
+ const struct xfs_inode *pip)
+{
+ xfs_failaddr_t failaddr;
+
+ if (pip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE) {
+ ip->i_diflags2 |= XFS_DIFLAG2_COWEXTSIZE;
+ ip->i_cowextsize = pip->i_cowextsize;
+ }
+ if (pip->i_diflags2 & XFS_DIFLAG2_DAX)
+ ip->i_diflags2 |= XFS_DIFLAG2_DAX;
+
+ /* Don't let invalid cowextsize hints propagate. */
+ failaddr = xfs_inode_validate_cowextsize(ip->i_mount, ip->i_cowextsize,
+ VFS_I(ip)->i_mode, ip->i_diflags, ip->i_diflags2);
+ if (failaddr) {
+ ip->i_diflags2 &= ~XFS_DIFLAG2_COWEXTSIZE;
+ ip->i_cowextsize = 0;
+ }
+}
+
+/*
+ * If we need to create attributes immediately after allocating the inode,
+ * initialise an empty attribute fork right now. We use the default fork offset
+ * for attributes here as we don't know exactly what size or how many
+ * attributes we might be adding. We can do this safely here because we know
+ * the data fork is completely empty and this saves us from needing to run a
+ * separate transaction to set the fork offset in the immediate future.
+ *
+ * If we have parent pointers and the caller hasn't told us that the file will
+ * never be linked into a directory tree, we /must/ create the attr fork.
+ */
+static inline bool
+xfs_icreate_want_attrfork(
+ struct xfs_mount *mp,
+ const struct xfs_icreate_args *args)
+{
+ if (args->flags & XFS_ICREATE_INIT_XATTRS)
+ return true;
+
+ if (!(args->flags & XFS_ICREATE_UNLINKABLE) && xfs_has_parent(mp))
+ return true;
+
+ return false;
+}
+
+/* Initialise an inode's attributes. */
+void
+xfs_inode_init(
+ struct xfs_trans *tp,
+ const struct xfs_icreate_args *args,
+ struct xfs_inode *ip)
+{
+ struct xfs_inode *pip = args->pip;
+ struct inode *dir = pip ? VFS_I(pip) : NULL;
+ struct xfs_mount *mp = tp->t_mountp;
+ struct inode *inode = VFS_I(ip);
+ unsigned int flags;
+ int times = XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG |
+ XFS_ICHGTIME_ACCESS;
+
+ if (args->flags & XFS_ICREATE_TMPFILE)
+ set_nlink(inode, 0);
+ else if (S_ISDIR(args->mode))
+ set_nlink(inode, 2);
+ else
+ set_nlink(inode, 1);
+ inode->i_rdev = args->rdev;
+
+ if (!args->idmap || pip == NULL) {
+ /* creating a tree root, sb rooted, or detached file */
+ inode->i_uid = GLOBAL_ROOT_UID;
+ inode->i_gid = GLOBAL_ROOT_GID;
+ ip->i_projid = 0;
+ inode->i_mode = args->mode;
+ } else {
+ /* creating a child in the directory tree */
+ if (dir && !(dir->i_mode & S_ISGID) && xfs_has_grpid(mp)) {
+ inode_fsuid_set(inode, args->idmap);
+ inode->i_gid = dir->i_gid;
+ inode->i_mode = args->mode;
+ } else {
+ inode_init_owner(args->idmap, inode, dir, args->mode);
+ }
+
+ /*
+ * If the group ID of the new file does not match the effective
+ * group ID or one of the supplementary group IDs, the S_ISGID
+ * bit is cleared (and only if the irix_sgid_inherit
+ * compatibility variable is set).
+ */
+ if (irix_sgid_inherit && (inode->i_mode & S_ISGID) &&
+ !vfsgid_in_group_p(i_gid_into_vfsgid(args->idmap, inode)))
+ inode->i_mode &= ~S_ISGID;
+
+ ip->i_projid = pip ? xfs_get_initial_prid(pip) : 0;
+ }
+
+ ip->i_disk_size = 0;
+ ip->i_df.if_nextents = 0;
+ ASSERT(ip->i_nblocks == 0);
+
+ ip->i_extsize = 0;
+ ip->i_diflags = 0;
+
+ if (xfs_has_v3inodes(mp)) {
+ inode_set_iversion(inode, 1);
+ ip->i_cowextsize = 0;
+ times |= XFS_ICHGTIME_CREATE;
+ }
+
+ xfs_trans_ichgtime(tp, ip, times);
+
+ flags = XFS_ILOG_CORE;
+ switch (args->mode & S_IFMT) {
+ case S_IFIFO:
+ case S_IFCHR:
+ case S_IFBLK:
+ case S_IFSOCK:
+ ip->i_df.if_format = XFS_DINODE_FMT_DEV;
+ flags |= XFS_ILOG_DEV;
+ break;
+ case S_IFREG:
+ case S_IFDIR:
+ if (pip && (pip->i_diflags & XFS_DIFLAG_ANY))
+ xfs_inode_inherit_flags(ip, pip);
+ if (pip && (pip->i_diflags2 & XFS_DIFLAG2_ANY))
+ xfs_inode_inherit_flags2(ip, pip);
+ fallthrough;
+ case S_IFLNK:
+ ip->i_df.if_format = XFS_DINODE_FMT_EXTENTS;
+ ip->i_df.if_bytes = 0;
+ ip->i_df.if_data = NULL;
+ break;
+ default:
+ ASSERT(0);
+ }
+
+ if (xfs_icreate_want_attrfork(mp, args)) {
+ ip->i_forkoff = xfs_default_attroffset(ip) >> 3;
+ xfs_ifork_init_attr(ip, XFS_DINODE_FMT_EXTENTS, 0);
+
+ if (!xfs_has_attr(mp)) {
+ spin_lock(&mp->m_sb_lock);
+ xfs_add_attr(mp);
+ spin_unlock(&mp->m_sb_lock);
+ xfs_log_sb(tp);
+ }
+ }
+
+ xfs_trans_log_inode(tp, ip, flags);
+}
+
+/*
+ * In-Core Unlinked List Lookups
+ * =============================
+ *
+ * Every inode is supposed to be reachable from some other piece of metadata
+ * with the exception of the root directory. Inodes with a connection to a
+ * file descriptor but not linked from anywhere in the on-disk directory tree
+ * are collectively known as unlinked inodes, though the filesystem itself
+ * maintains links to these inodes so that on-disk metadata are consistent.
+ *
+ * XFS implements a per-AG on-disk hash table of unlinked inodes. The AGI
+ * header contains a number of buckets that point to an inode, and each inode
+ * record has a pointer to the next inode in the hash chain. This
+ * singly-linked list causes scaling problems in the iunlink remove function
+ * because we must walk that list to find the inode that points to the inode
+ * being removed from the unlinked hash bucket list.
+ *
+ * Hence we keep an in-memory double linked list to link each inode on an
+ * unlinked list. Because there are 64 unlinked lists per AGI, keeping pointer
+ * based lists would require having 64 list heads in the perag, one for each
+ * list. This is expensive in terms of memory (think millions of AGs) and cache
+ * misses on lookups. Instead, use the fact that inodes on the unlinked list
+ * must be referenced at the VFS level to keep them on the list and hence we
+ * have an existence guarantee for inodes on the unlinked list.
+ *
+ * Given we have an existence guarantee, we can use lockless inode cache lookups
+ * to resolve aginos to xfs inodes. This means we only need 8 bytes per inode
+ * for the double linked unlinked list, and we don't need any extra locking to
+ * keep the list safe as all manipulations are done under the AGI buffer lock.
+ * Keeping the list up to date does not require memory allocation, just finding
+ * the XFS inode and updating the next/prev unlinked list aginos.
+ */
+
+/*
+ * Update the prev pointer of the next agino. Returns -ENOLINK if the inode
+ * is not in cache.
+ */
+static int
+xfs_iunlink_update_backref(
+ struct xfs_perag *pag,
+ xfs_agino_t prev_agino,
+ xfs_agino_t next_agino)
+{
+ struct xfs_inode *ip;
+
+ /* No update necessary if we are at the end of the list. */
+ if (next_agino == NULLAGINO)
+ return 0;
+
+ ip = xfs_iunlink_lookup(pag, next_agino);
+ if (!ip)
+ return -ENOLINK;
+
+ ip->i_prev_unlinked = prev_agino;
+ return 0;
+}
+
+/*
+ * Point the AGI unlinked bucket at an inode and log the results. The caller
+ * is responsible for validating the old value.
+ */
+STATIC int
+xfs_iunlink_update_bucket(
+ struct xfs_trans *tp,
+ struct xfs_perag *pag,
+ struct xfs_buf *agibp,
+ unsigned int bucket_index,
+ xfs_agino_t new_agino)
+{
+ struct xfs_agi *agi = agibp->b_addr;
+ xfs_agino_t old_value;
+ int offset;
+
+ ASSERT(xfs_verify_agino_or_null(pag, new_agino));
+
+ old_value = be32_to_cpu(agi->agi_unlinked[bucket_index]);
+ trace_xfs_iunlink_update_bucket(tp->t_mountp, pag->pag_agno, bucket_index,
+ old_value, new_agino);
+
+ /*
+ * We should never find the head of the list already set to the value
+ * passed in because either we're adding or removing ourselves from the
+ * head of the list.
+ */
+ if (old_value == new_agino) {
+ xfs_buf_mark_corrupt(agibp);
+ xfs_ag_mark_sick(pag, XFS_SICK_AG_AGI);
+ return -EFSCORRUPTED;
+ }
+
+ agi->agi_unlinked[bucket_index] = cpu_to_be32(new_agino);
+ offset = offsetof(struct xfs_agi, agi_unlinked) +
+ (sizeof(xfs_agino_t) * bucket_index);
+ xfs_trans_log_buf(tp, agibp, offset, offset + sizeof(xfs_agino_t) - 1);
+ return 0;
+}
+
+static int
+xfs_iunlink_insert_inode(
+ struct xfs_trans *tp,
+ struct xfs_perag *pag,
+ struct xfs_buf *agibp,
+ struct xfs_inode *ip)
+{
+ struct xfs_mount *mp = tp->t_mountp;
+ struct xfs_agi *agi = agibp->b_addr;
+ xfs_agino_t next_agino;
+ xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
+ short bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
+ int error;
+
+ /*
+ * Get the index into the agi hash table for the list this inode will
+ * go on. Make sure the pointer isn't garbage and that this inode
+ * isn't already on the list.
+ */
+ next_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
+ if (next_agino == agino ||
+ !xfs_verify_agino_or_null(pag, next_agino)) {
+ xfs_buf_mark_corrupt(agibp);
+ xfs_ag_mark_sick(pag, XFS_SICK_AG_AGI);
+ return -EFSCORRUPTED;
+ }
+
+ /*
+ * Update the prev pointer in the next inode to point back to this
+ * inode.
+ */
+ error = xfs_iunlink_update_backref(pag, agino, next_agino);
+ if (error == -ENOLINK)
+ error = xfs_iunlink_reload_next(tp, agibp, agino, next_agino);
+ if (error)
+ return error;
+
+ if (next_agino != NULLAGINO) {
+ /*
+ * There is already another inode in the bucket, so point this
+ * inode to the current head of the list.
+ */
+ error = xfs_iunlink_log_inode(tp, ip, pag, next_agino);
+ if (error)
+ return error;
+ ip->i_next_unlinked = next_agino;
+ }
+
+ /* Point the head of the list to point to this inode. */
+ ip->i_prev_unlinked = NULLAGINO;
+ return xfs_iunlink_update_bucket(tp, pag, agibp, bucket_index, agino);
+}
+
+/*
+ * This is called when the inode's link count has gone to 0 or we are creating
+ * a tmpfile via O_TMPFILE. The inode @ip must have nlink == 0.
+ *
+ * We place the on-disk inode on a list in the AGI. It will be pulled from this
+ * list when the inode is freed.
+ */
+int
+xfs_iunlink(
+ struct xfs_trans *tp,
+ struct xfs_inode *ip)
+{
+ struct xfs_mount *mp = tp->t_mountp;
+ struct xfs_perag *pag;
+ struct xfs_buf *agibp;
+ int error;
+
+ ASSERT(VFS_I(ip)->i_nlink == 0);
+ ASSERT(VFS_I(ip)->i_mode != 0);
+ trace_xfs_iunlink(ip);
+
+ pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
+
+ /* Get the agi buffer first. It ensures lock ordering on the list. */
+ error = xfs_read_agi(pag, tp, 0, &agibp);
+ if (error)
+ goto out;
+
+ error = xfs_iunlink_insert_inode(tp, pag, agibp, ip);
+out:
+ xfs_perag_put(pag);
+ return error;
+}
+
+static int
+xfs_iunlink_remove_inode(
+ struct xfs_trans *tp,
+ struct xfs_perag *pag,
+ struct xfs_buf *agibp,
+ struct xfs_inode *ip)
+{
+ struct xfs_mount *mp = tp->t_mountp;
+ struct xfs_agi *agi = agibp->b_addr;
+ xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
+ xfs_agino_t head_agino;
+ short bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
+ int error;
+
+ trace_xfs_iunlink_remove(ip);
+
+ /*
+ * Get the index into the agi hash table for the list this inode will
+ * go on. Make sure the head pointer isn't garbage.
+ */
+ head_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
+ if (!xfs_verify_agino(pag, head_agino)) {
+ XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
+ agi, sizeof(*agi));
+ xfs_ag_mark_sick(pag, XFS_SICK_AG_AGI);
+ return -EFSCORRUPTED;
+ }
+
+ /*
+ * Set our inode's next_unlinked pointer to NULL and then return
+ * the old pointer value so that we can update whatever was previous
+ * to us in the list to point to whatever was next in the list.
+ */
+ error = xfs_iunlink_log_inode(tp, ip, pag, NULLAGINO);
+ if (error)
+ return error;
+
+ /*
+ * Update the prev pointer in the next inode to point back to previous
+ * inode in the chain.
+ */
+ error = xfs_iunlink_update_backref(pag, ip->i_prev_unlinked,
+ ip->i_next_unlinked);
+ if (error == -ENOLINK)
+ error = xfs_iunlink_reload_next(tp, agibp, ip->i_prev_unlinked,
+ ip->i_next_unlinked);
+ if (error)
+ return error;
+
+ if (head_agino != agino) {
+ struct xfs_inode *prev_ip;
+
+ prev_ip = xfs_iunlink_lookup(pag, ip->i_prev_unlinked);
+ if (!prev_ip) {
+ xfs_inode_mark_sick(ip, XFS_SICK_INO_CORE);
+ return -EFSCORRUPTED;
+ }
+
+ error = xfs_iunlink_log_inode(tp, prev_ip, pag,
+ ip->i_next_unlinked);
+ prev_ip->i_next_unlinked = ip->i_next_unlinked;
+ } else {
+ /* Point the head of the list to the next unlinked inode. */
+ error = xfs_iunlink_update_bucket(tp, pag, agibp, bucket_index,
+ ip->i_next_unlinked);
+ }
+
+ ip->i_next_unlinked = NULLAGINO;
+ ip->i_prev_unlinked = 0;
+ return error;
+}
+
+/*
+ * Pull the on-disk inode from the AGI unlinked list.
+ */
+int
+xfs_iunlink_remove(
+ struct xfs_trans *tp,
+ struct xfs_perag *pag,
+ struct xfs_inode *ip)
+{
+ struct xfs_buf *agibp;
+ int error;
+
+ trace_xfs_iunlink_remove(ip);
+
+ /* Get the agi buffer first. It ensures lock ordering on the list. */
+ error = xfs_read_agi(pag, tp, 0, &agibp);
+ if (error)
+ return error;
+
+ return xfs_iunlink_remove_inode(tp, pag, agibp, ip);
+}
+
+/*
+ * Decrement the link count on an inode & log the change. If this causes the
+ * link count to go to zero, move the inode to AGI unlinked list so that it can
+ * be freed when the last active reference goes away via xfs_inactive().
+ */
+int
+xfs_droplink(
+ struct xfs_trans *tp,
+ struct xfs_inode *ip)
+{
+ struct inode *inode = VFS_I(ip);
+
+ xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
+
+ if (inode->i_nlink == 0) {
+ xfs_info_ratelimited(tp->t_mountp,
+ "Inode 0x%llx link count dropped below zero. Pinning link count.",
+ ip->i_ino);
+ set_nlink(inode, XFS_NLINK_PINNED);
+ }
+ if (inode->i_nlink != XFS_NLINK_PINNED)
+ drop_nlink(inode);
+
+ xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+
+ if (inode->i_nlink)
+ return 0;
+
+ return xfs_iunlink(tp, ip);
+}
+
+/*
+ * Increment the link count on an inode & log the change.
+ */
+void
+xfs_bumplink(
+ struct xfs_trans *tp,
+ struct xfs_inode *ip)
+{
+ struct inode *inode = VFS_I(ip);
+
+ xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
+
+ if (inode->i_nlink == XFS_NLINK_PINNED - 1)
+ xfs_info_ratelimited(tp->t_mountp,
+ "Inode 0x%llx link count exceeded maximum. Pinning link count.",
+ ip->i_ino);
+ if (inode->i_nlink != XFS_NLINK_PINNED)
+ inc_nlink(inode);
+
+ xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+}
+
+/* Free an inode in the ondisk index and zero it out. */
+int
+xfs_inode_uninit(
+ struct xfs_trans *tp,
+ struct xfs_perag *pag,
+ struct xfs_inode *ip,
+ struct xfs_icluster *xic)
+{
+ struct xfs_mount *mp = ip->i_mount;
+ int error;
+
+ /*
+ * Free the inode first so that we guarantee that the AGI lock is going
+ * to be taken before we remove the inode from the unlinked list. This
+ * makes the AGI lock -> unlinked list modification order the same as
+ * used in O_TMPFILE creation.
+ */
+ error = xfs_difree(tp, pag, ip->i_ino, xic);
+ if (error)
+ return error;
+
+ error = xfs_iunlink_remove(tp, pag, ip);
+ if (error)
+ return error;
+
+ /*
+ * Free any local-format data sitting around before we reset the
+ * data fork to extents format. Note that the attr fork data has
+ * already been freed by xfs_attr_inactive.
+ */
+ if (ip->i_df.if_format == XFS_DINODE_FMT_LOCAL) {
+ kfree(ip->i_df.if_data);
+ ip->i_df.if_data = NULL;
+ ip->i_df.if_bytes = 0;
+ }
+
+ VFS_I(ip)->i_mode = 0; /* mark incore inode as free */
+ ip->i_diflags = 0;
+ ip->i_diflags2 = mp->m_ino_geo.new_diflags2;
+ ip->i_forkoff = 0; /* mark the attr fork not in use */
+ ip->i_df.if_format = XFS_DINODE_FMT_EXTENTS;
+
+ /*
+ * Bump the generation count so no one will be confused
+ * by reincarnations of this inode.
+ */
+ VFS_I(ip)->i_generation++;
+ xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+ return 0;
+}
diff --git a/fs/xfs/libxfs/xfs_inode_util.h b/fs/xfs/libxfs/xfs_inode_util.h
new file mode 100644
index 000000000000..060242998a23
--- /dev/null
+++ b/fs/xfs/libxfs/xfs_inode_util.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
+ * All Rights Reserved.
+ */
+#ifndef __XFS_INODE_UTIL_H__
+#define __XFS_INODE_UTIL_H__
+
+struct xfs_icluster;
+
+uint16_t xfs_flags2diflags(struct xfs_inode *ip, unsigned int xflags);
+uint64_t xfs_flags2diflags2(struct xfs_inode *ip, unsigned int xflags);
+uint32_t xfs_dic2xflags(struct xfs_inode *ip);
+uint32_t xfs_ip2xflags(struct xfs_inode *ip);
+
+prid_t xfs_get_initial_prid(struct xfs_inode *dp);
+
+/*
+ * File creation context.
+ *
+ * Due to our only partial reliance on the VFS to propagate uid and gid values
+ * according to accepted Unix behaviors, callers must initialize idmap to the
+ * correct idmapping structure to get the correct inheritance behaviors when
+ * XFS_MOUNT_GRPID is set.
+ *
+ * To create files detached from the directory tree (e.g. quota inodes), set
+ * idmap to NULL. To create a tree root, set pip to NULL.
+ */
+struct xfs_icreate_args {
+ struct mnt_idmap *idmap;
+ struct xfs_inode *pip; /* parent inode or null */
+ dev_t rdev;
+ umode_t mode;
+
+#define XFS_ICREATE_TMPFILE (1U << 0) /* create an unlinked file */
+#define XFS_ICREATE_INIT_XATTRS (1U << 1) /* will set xattrs immediately */
+#define XFS_ICREATE_UNLINKABLE (1U << 2) /* cannot link into dir tree */
+ uint16_t flags;
+};
+
+/*
+ * Flags for xfs_trans_ichgtime().
+ */
+#define XFS_ICHGTIME_MOD 0x1 /* data fork modification timestamp */
+#define XFS_ICHGTIME_CHG 0x2 /* inode field change timestamp */
+#define XFS_ICHGTIME_CREATE 0x4 /* inode create timestamp */
+#define XFS_ICHGTIME_ACCESS 0x8 /* last access timestamp */
+void xfs_trans_ichgtime(struct xfs_trans *tp, struct xfs_inode *ip, int flags);
+
+void xfs_inode_init(struct xfs_trans *tp, const struct xfs_icreate_args *args,
+ struct xfs_inode *ip);
+
+int xfs_inode_uninit(struct xfs_trans *tp, struct xfs_perag *pag,
+ struct xfs_inode *ip, struct xfs_icluster *xic);
+
+int xfs_iunlink(struct xfs_trans *tp, struct xfs_inode *ip);
+int xfs_iunlink_remove(struct xfs_trans *tp, struct xfs_perag *pag,
+ struct xfs_inode *ip);
+int xfs_droplink(struct xfs_trans *tp, struct xfs_inode *ip);
+void xfs_bumplink(struct xfs_trans *tp, struct xfs_inode *ip);
+
+#endif /* __XFS_INODE_UTIL_H__ */
diff --git a/fs/xfs/libxfs/xfs_ondisk.h b/fs/xfs/libxfs/xfs_ondisk.h
index e8cdd77d03fa..23c133fd36f5 100644
--- a/fs/xfs/libxfs/xfs_ondisk.h
+++ b/fs/xfs/libxfs/xfs_ondisk.h
@@ -85,6 +85,7 @@ xfs_check_ondisk_structs(void)
XFS_CHECK_STRUCT_SIZE(xfs_attr_leaf_name_remote_t, 12);
*/
+ XFS_CHECK_OFFSET(struct xfs_dsb, sb_crc, 224);
XFS_CHECK_OFFSET(xfs_attr_leaf_name_local_t, valuelen, 0);
XFS_CHECK_OFFSET(xfs_attr_leaf_name_local_t, namelen, 2);
XFS_CHECK_OFFSET(xfs_attr_leaf_name_local_t, nameval, 3);
diff --git a/fs/xfs/libxfs/xfs_refcount.c b/fs/xfs/libxfs/xfs_refcount.c
index 511c912d515c..198b84117df1 100644
--- a/fs/xfs/libxfs/xfs_refcount.c
+++ b/fs/xfs/libxfs/xfs_refcount.c
@@ -24,6 +24,7 @@
#include "xfs_rmap.h"
#include "xfs_ag.h"
#include "xfs_health.h"
+#include "xfs_refcount_item.h"
struct kmem_cache *xfs_refcount_intent_cache;
@@ -51,7 +52,7 @@ xfs_refcount_lookup_le(
xfs_agblock_t bno,
int *stat)
{
- trace_xfs_refcount_lookup(cur->bc_mp, cur->bc_ag.pag->pag_agno,
+ trace_xfs_refcount_lookup(cur,
xfs_refcount_encode_startblock(bno, domain),
XFS_LOOKUP_LE);
cur->bc_rec.rc.rc_startblock = bno;
@@ -71,7 +72,7 @@ xfs_refcount_lookup_ge(
xfs_agblock_t bno,
int *stat)
{
- trace_xfs_refcount_lookup(cur->bc_mp, cur->bc_ag.pag->pag_agno,
+ trace_xfs_refcount_lookup(cur,
xfs_refcount_encode_startblock(bno, domain),
XFS_LOOKUP_GE);
cur->bc_rec.rc.rc_startblock = bno;
@@ -91,7 +92,7 @@ xfs_refcount_lookup_eq(
xfs_agblock_t bno,
int *stat)
{
- trace_xfs_refcount_lookup(cur->bc_mp, cur->bc_ag.pag->pag_agno,
+ trace_xfs_refcount_lookup(cur,
xfs_refcount_encode_startblock(bno, domain),
XFS_LOOKUP_LE);
cur->bc_rec.rc.rc_startblock = bno;
@@ -183,7 +184,7 @@ xfs_refcount_get_rec(
if (fa)
return xfs_refcount_complain_bad_rec(cur, fa, irec);
- trace_xfs_refcount_get(cur->bc_mp, cur->bc_ag.pag->pag_agno, irec);
+ trace_xfs_refcount_get(cur, irec);
return 0;
}
@@ -201,7 +202,7 @@ xfs_refcount_update(
uint32_t start;
int error;
- trace_xfs_refcount_update(cur->bc_mp, cur->bc_ag.pag->pag_agno, irec);
+ trace_xfs_refcount_update(cur, irec);
start = xfs_refcount_encode_startblock(irec->rc_startblock,
irec->rc_domain);
@@ -211,8 +212,7 @@ xfs_refcount_update(
error = xfs_btree_update(cur, &rec);
if (error)
- trace_xfs_refcount_update_error(cur->bc_mp,
- cur->bc_ag.pag->pag_agno, error, _RET_IP_);
+ trace_xfs_refcount_update_error(cur, error, _RET_IP_);
return error;
}
@@ -229,7 +229,7 @@ xfs_refcount_insert(
{
int error;
- trace_xfs_refcount_insert(cur->bc_mp, cur->bc_ag.pag->pag_agno, irec);
+ trace_xfs_refcount_insert(cur, irec);
cur->bc_rec.rc.rc_startblock = irec->rc_startblock;
cur->bc_rec.rc.rc_blockcount = irec->rc_blockcount;
@@ -247,8 +247,7 @@ xfs_refcount_insert(
out_error:
if (error)
- trace_xfs_refcount_insert_error(cur->bc_mp,
- cur->bc_ag.pag->pag_agno, error, _RET_IP_);
+ trace_xfs_refcount_insert_error(cur, error, _RET_IP_);
return error;
}
@@ -275,7 +274,7 @@ xfs_refcount_delete(
error = -EFSCORRUPTED;
goto out_error;
}
- trace_xfs_refcount_delete(cur->bc_mp, cur->bc_ag.pag->pag_agno, &irec);
+ trace_xfs_refcount_delete(cur, &irec);
error = xfs_btree_delete(cur, i);
if (XFS_IS_CORRUPT(cur->bc_mp, *i != 1)) {
xfs_btree_mark_sick(cur);
@@ -288,8 +287,7 @@ xfs_refcount_delete(
&found_rec);
out_error:
if (error)
- trace_xfs_refcount_delete_error(cur->bc_mp,
- cur->bc_ag.pag->pag_agno, error, _RET_IP_);
+ trace_xfs_refcount_delete_error(cur, error, _RET_IP_);
return error;
}
@@ -413,8 +411,7 @@ xfs_refcount_split_extent(
return 0;
*shape_changed = true;
- trace_xfs_refcount_split_extent(cur->bc_mp, cur->bc_ag.pag->pag_agno,
- &rcext, agbno);
+ trace_xfs_refcount_split_extent(cur, &rcext, agbno);
/* Establish the right extent. */
tmp = rcext;
@@ -438,8 +435,7 @@ xfs_refcount_split_extent(
return error;
out_error:
- trace_xfs_refcount_split_extent_error(cur->bc_mp,
- cur->bc_ag.pag->pag_agno, error, _RET_IP_);
+ trace_xfs_refcount_split_extent_error(cur, error, _RET_IP_);
return error;
}
@@ -458,8 +454,7 @@ xfs_refcount_merge_center_extents(
int error;
int found_rec;
- trace_xfs_refcount_merge_center_extents(cur->bc_mp,
- cur->bc_ag.pag->pag_agno, left, center, right);
+ trace_xfs_refcount_merge_center_extents(cur, left, center, right);
ASSERT(left->rc_domain == center->rc_domain);
ASSERT(right->rc_domain == center->rc_domain);
@@ -522,8 +517,7 @@ xfs_refcount_merge_center_extents(
return error;
out_error:
- trace_xfs_refcount_merge_center_extents_error(cur->bc_mp,
- cur->bc_ag.pag->pag_agno, error, _RET_IP_);
+ trace_xfs_refcount_merge_center_extents_error(cur, error, _RET_IP_);
return error;
}
@@ -541,8 +535,7 @@ xfs_refcount_merge_left_extent(
int error;
int found_rec;
- trace_xfs_refcount_merge_left_extent(cur->bc_mp,
- cur->bc_ag.pag->pag_agno, left, cleft);
+ trace_xfs_refcount_merge_left_extent(cur, left, cleft);
ASSERT(left->rc_domain == cleft->rc_domain);
@@ -589,8 +582,7 @@ xfs_refcount_merge_left_extent(
return error;
out_error:
- trace_xfs_refcount_merge_left_extent_error(cur->bc_mp,
- cur->bc_ag.pag->pag_agno, error, _RET_IP_);
+ trace_xfs_refcount_merge_left_extent_error(cur, error, _RET_IP_);
return error;
}
@@ -607,8 +599,7 @@ xfs_refcount_merge_right_extent(
int error;
int found_rec;
- trace_xfs_refcount_merge_right_extent(cur->bc_mp,
- cur->bc_ag.pag->pag_agno, cright, right);
+ trace_xfs_refcount_merge_right_extent(cur, cright, right);
ASSERT(right->rc_domain == cright->rc_domain);
@@ -658,8 +649,7 @@ xfs_refcount_merge_right_extent(
return error;
out_error:
- trace_xfs_refcount_merge_right_extent_error(cur->bc_mp,
- cur->bc_ag.pag->pag_agno, error, _RET_IP_);
+ trace_xfs_refcount_merge_right_extent_error(cur, error, _RET_IP_);
return error;
}
@@ -748,13 +738,11 @@ not_found:
cleft->rc_refcount = 1;
cleft->rc_domain = domain;
}
- trace_xfs_refcount_find_left_extent(cur->bc_mp, cur->bc_ag.pag->pag_agno,
- left, cleft, agbno);
+ trace_xfs_refcount_find_left_extent(cur, left, cleft, agbno);
return error;
out_error:
- trace_xfs_refcount_find_left_extent_error(cur->bc_mp,
- cur->bc_ag.pag->pag_agno, error, _RET_IP_);
+ trace_xfs_refcount_find_left_extent_error(cur, error, _RET_IP_);
return error;
}
@@ -843,13 +831,12 @@ not_found:
cright->rc_refcount = 1;
cright->rc_domain = domain;
}
- trace_xfs_refcount_find_right_extent(cur->bc_mp, cur->bc_ag.pag->pag_agno,
- cright, right, agbno + aglen);
+ trace_xfs_refcount_find_right_extent(cur, cright, right,
+ agbno + aglen);
return error;
out_error:
- trace_xfs_refcount_find_right_extent_error(cur->bc_mp,
- cur->bc_ag.pag->pag_agno, error, _RET_IP_);
+ trace_xfs_refcount_find_right_extent_error(cur, error, _RET_IP_);
return error;
}
@@ -1148,8 +1135,7 @@ xfs_refcount_adjust_extents(
tmp.rc_refcount = 1 + adj;
tmp.rc_domain = XFS_REFC_DOMAIN_SHARED;
- trace_xfs_refcount_modify_extent(cur->bc_mp,
- cur->bc_ag.pag->pag_agno, &tmp);
+ trace_xfs_refcount_modify_extent(cur, &tmp);
/*
* Either cover the hole (increment) or
@@ -1173,7 +1159,7 @@ xfs_refcount_adjust_extents(
tmp.rc_startblock);
error = xfs_free_extent_later(cur->bc_tp, fsbno,
tmp.rc_blockcount, NULL,
- XFS_AG_RESV_NONE, false);
+ XFS_AG_RESV_NONE, 0);
if (error)
goto out_error;
}
@@ -1214,8 +1200,7 @@ xfs_refcount_adjust_extents(
if (ext.rc_refcount == MAXREFCOUNT)
goto skip;
ext.rc_refcount += adj;
- trace_xfs_refcount_modify_extent(cur->bc_mp,
- cur->bc_ag.pag->pag_agno, &ext);
+ trace_xfs_refcount_modify_extent(cur, &ext);
cur->bc_refc.nr_ops++;
if (ext.rc_refcount > 1) {
error = xfs_refcount_update(cur, &ext);
@@ -1237,7 +1222,7 @@ xfs_refcount_adjust_extents(
ext.rc_startblock);
error = xfs_free_extent_later(cur->bc_tp, fsbno,
ext.rc_blockcount, NULL,
- XFS_AG_RESV_NONE, false);
+ XFS_AG_RESV_NONE, 0);
if (error)
goto out_error;
}
@@ -1254,8 +1239,7 @@ advloop:
return error;
out_error:
- trace_xfs_refcount_modify_extent_error(cur->bc_mp,
- cur->bc_ag.pag->pag_agno, error, _RET_IP_);
+ trace_xfs_refcount_modify_extent_error(cur, error, _RET_IP_);
return error;
}
@@ -1272,11 +1256,9 @@ xfs_refcount_adjust(
int error;
if (adj == XFS_REFCOUNT_ADJUST_INCREASE)
- trace_xfs_refcount_increase(cur->bc_mp,
- cur->bc_ag.pag->pag_agno, *agbno, *aglen);
+ trace_xfs_refcount_increase(cur, *agbno, *aglen);
else
- trace_xfs_refcount_decrease(cur->bc_mp,
- cur->bc_ag.pag->pag_agno, *agbno, *aglen);
+ trace_xfs_refcount_decrease(cur, *agbno, *aglen);
/*
* Ensure that no rcextents cross the boundary of the adjustment range.
@@ -1315,28 +1297,10 @@ xfs_refcount_adjust(
return 0;
out_error:
- trace_xfs_refcount_adjust_error(cur->bc_mp, cur->bc_ag.pag->pag_agno,
- error, _RET_IP_);
+ trace_xfs_refcount_adjust_error(cur, error, _RET_IP_);
return error;
}
-/* Clean up after calling xfs_refcount_finish_one. */
-void
-xfs_refcount_finish_one_cleanup(
- struct xfs_trans *tp,
- struct xfs_btree_cur *rcur,
- int error)
-{
- struct xfs_buf *agbp;
-
- if (rcur == NULL)
- return;
- agbp = rcur->bc_ag.agbp;
- xfs_btree_del_cursor(rcur, error);
- if (error)
- xfs_trans_brelse(tp, agbp);
-}
-
/*
* Set up a continuation a deferred refcount operation by updating the intent.
* Checks to make sure we're not going to run off the end of the AG.
@@ -1378,7 +1342,7 @@ xfs_refcount_finish_one(
struct xfs_btree_cur **pcur)
{
struct xfs_mount *mp = tp->t_mountp;
- struct xfs_btree_cur *rcur;
+ struct xfs_btree_cur *rcur = *pcur;
struct xfs_buf *agbp = NULL;
int error = 0;
xfs_agblock_t bno;
@@ -1387,9 +1351,7 @@ xfs_refcount_finish_one(
bno = XFS_FSB_TO_AGBNO(mp, ri->ri_startblock);
- trace_xfs_refcount_deferred(mp, XFS_FSB_TO_AGNO(mp, ri->ri_startblock),
- ri->ri_type, XFS_FSB_TO_AGBNO(mp, ri->ri_startblock),
- ri->ri_blockcount);
+ trace_xfs_refcount_deferred(mp, ri);
if (XFS_TEST_ERROR(false, mp, XFS_ERRTAG_REFCOUNT_FINISH_ONE))
return -EIO;
@@ -1398,11 +1360,10 @@ xfs_refcount_finish_one(
* If we haven't gotten a cursor or the cursor AG doesn't match
* the startblock, get one now.
*/
- rcur = *pcur;
if (rcur != NULL && rcur->bc_ag.pag != ri->ri_pag) {
nr_ops = rcur->bc_refc.nr_ops;
shape_changes = rcur->bc_refc.shape_changes;
- xfs_refcount_finish_one_cleanup(tp, rcur, 0);
+ xfs_btree_del_cursor(rcur, 0);
rcur = NULL;
*pcur = NULL;
}
@@ -1412,11 +1373,11 @@ xfs_refcount_finish_one(
if (error)
return error;
- rcur = xfs_refcountbt_init_cursor(mp, tp, agbp, ri->ri_pag);
+ *pcur = rcur = xfs_refcountbt_init_cursor(mp, tp, agbp,
+ ri->ri_pag);
rcur->bc_refc.nr_ops = nr_ops;
rcur->bc_refc.shape_changes = shape_changes;
}
- *pcur = rcur;
switch (ri->ri_type) {
case XFS_REFCOUNT_INCREASE:
@@ -1452,8 +1413,7 @@ xfs_refcount_finish_one(
return -EFSCORRUPTED;
}
if (!error && ri->ri_blockcount > 0)
- trace_xfs_refcount_finish_one_leftover(mp, ri->ri_pag->pag_agno,
- ri->ri_type, bno, ri->ri_blockcount);
+ trace_xfs_refcount_finish_one_leftover(mp, ri);
return error;
}
@@ -1469,11 +1429,6 @@ __xfs_refcount_add(
{
struct xfs_refcount_intent *ri;
- trace_xfs_refcount_defer(tp->t_mountp,
- XFS_FSB_TO_AGNO(tp->t_mountp, startblock),
- type, XFS_FSB_TO_AGBNO(tp->t_mountp, startblock),
- blockcount);
-
ri = kmem_cache_alloc(xfs_refcount_intent_cache,
GFP_KERNEL | __GFP_NOFAIL);
INIT_LIST_HEAD(&ri->ri_list);
@@ -1481,8 +1436,7 @@ __xfs_refcount_add(
ri->ri_startblock = startblock;
ri->ri_blockcount = blockcount;
- xfs_refcount_update_get_group(tp->t_mountp, ri);
- xfs_defer_add(tp, &ri->ri_list, &xfs_refcount_update_defer_type);
+ xfs_refcount_defer_add(tp, ri);
}
/*
@@ -1537,8 +1491,7 @@ xfs_refcount_find_shared(
int have;
int error;
- trace_xfs_refcount_find_shared(cur->bc_mp, cur->bc_ag.pag->pag_agno,
- agbno, aglen);
+ trace_xfs_refcount_find_shared(cur, agbno, aglen);
/* By default, skip the whole range */
*fbno = NULLAGBLOCK;
@@ -1625,13 +1578,11 @@ xfs_refcount_find_shared(
}
done:
- trace_xfs_refcount_find_shared_result(cur->bc_mp,
- cur->bc_ag.pag->pag_agno, *fbno, *flen);
+ trace_xfs_refcount_find_shared_result(cur, *fbno, *flen);
out_error:
if (error)
- trace_xfs_refcount_find_shared_error(cur->bc_mp,
- cur->bc_ag.pag->pag_agno, error, _RET_IP_);
+ trace_xfs_refcount_find_shared_error(cur, error, _RET_IP_);
return error;
}
@@ -1737,8 +1688,7 @@ xfs_refcount_adjust_cow_extents(
tmp.rc_refcount = 1;
tmp.rc_domain = XFS_REFC_DOMAIN_COW;
- trace_xfs_refcount_modify_extent(cur->bc_mp,
- cur->bc_ag.pag->pag_agno, &tmp);
+ trace_xfs_refcount_modify_extent(cur, &tmp);
error = xfs_refcount_insert(cur, &tmp,
&found_tmp);
@@ -1769,8 +1719,7 @@ xfs_refcount_adjust_cow_extents(
}
ext.rc_refcount = 0;
- trace_xfs_refcount_modify_extent(cur->bc_mp,
- cur->bc_ag.pag->pag_agno, &ext);
+ trace_xfs_refcount_modify_extent(cur, &ext);
error = xfs_refcount_delete(cur, &found_rec);
if (error)
goto out_error;
@@ -1786,8 +1735,7 @@ xfs_refcount_adjust_cow_extents(
return error;
out_error:
- trace_xfs_refcount_modify_extent_error(cur->bc_mp,
- cur->bc_ag.pag->pag_agno, error, _RET_IP_);
+ trace_xfs_refcount_modify_extent_error(cur, error, _RET_IP_);
return error;
}
@@ -1833,8 +1781,7 @@ xfs_refcount_adjust_cow(
return 0;
out_error:
- trace_xfs_refcount_adjust_cow_error(cur->bc_mp, cur->bc_ag.pag->pag_agno,
- error, _RET_IP_);
+ trace_xfs_refcount_adjust_cow_error(cur, error, _RET_IP_);
return error;
}
@@ -1847,8 +1794,7 @@ __xfs_refcount_cow_alloc(
xfs_agblock_t agbno,
xfs_extlen_t aglen)
{
- trace_xfs_refcount_cow_increase(rcur->bc_mp, rcur->bc_ag.pag->pag_agno,
- agbno, aglen);
+ trace_xfs_refcount_cow_increase(rcur, agbno, aglen);
/* Add refcount btree reservation */
return xfs_refcount_adjust_cow(rcur, agbno, aglen,
@@ -1864,8 +1810,7 @@ __xfs_refcount_cow_free(
xfs_agblock_t agbno,
xfs_extlen_t aglen)
{
- trace_xfs_refcount_cow_decrease(rcur->bc_mp, rcur->bc_ag.pag->pag_agno,
- agbno, aglen);
+ trace_xfs_refcount_cow_decrease(rcur, agbno, aglen);
/* Remove refcount btree reservation */
return xfs_refcount_adjust_cow(rcur, agbno, aglen,
@@ -2010,9 +1955,6 @@ xfs_refcount_recover_cow_leftovers(
if (error)
goto out_free;
- trace_xfs_refcount_recover_extent(mp, pag->pag_agno,
- &rr->rr_rrec);
-
/* Free the orphan record */
fsb = XFS_AGB_TO_FSB(mp, pag->pag_agno,
rr->rr_rrec.rc_startblock);
@@ -2022,7 +1964,7 @@ xfs_refcount_recover_cow_leftovers(
/* Free the block. */
error = xfs_free_extent_later(tp, fsb,
rr->rr_rrec.rc_blockcount, NULL,
- XFS_AG_RESV_NONE, false);
+ XFS_AG_RESV_NONE, 0);
if (error)
goto out_trans;
diff --git a/fs/xfs/libxfs/xfs_refcount.h b/fs/xfs/libxfs/xfs_refcount.h
index 9b56768a590c..68acb0b1b4a8 100644
--- a/fs/xfs/libxfs/xfs_refcount.h
+++ b/fs/xfs/libxfs/xfs_refcount.h
@@ -48,6 +48,12 @@ enum xfs_refcount_intent_type {
XFS_REFCOUNT_FREE_COW,
};
+#define XFS_REFCOUNT_INTENT_STRINGS \
+ { XFS_REFCOUNT_INCREASE, "incr" }, \
+ { XFS_REFCOUNT_DECREASE, "decr" }, \
+ { XFS_REFCOUNT_ALLOC_COW, "alloc_cow" }, \
+ { XFS_REFCOUNT_FREE_COW, "free_cow" }
+
struct xfs_refcount_intent {
struct list_head ri_list;
struct xfs_perag *ri_pag;
@@ -68,16 +74,11 @@ xfs_refcount_check_domain(
return true;
}
-void xfs_refcount_update_get_group(struct xfs_mount *mp,
- struct xfs_refcount_intent *ri);
-
void xfs_refcount_increase_extent(struct xfs_trans *tp,
struct xfs_bmbt_irec *irec);
void xfs_refcount_decrease_extent(struct xfs_trans *tp,
struct xfs_bmbt_irec *irec);
-extern void xfs_refcount_finish_one_cleanup(struct xfs_trans *tp,
- struct xfs_btree_cur *rcur, int error);
extern int xfs_refcount_finish_one(struct xfs_trans *tp,
struct xfs_refcount_intent *ri, struct xfs_btree_cur **pcur);
diff --git a/fs/xfs/libxfs/xfs_refcount_btree.c b/fs/xfs/libxfs/xfs_refcount_btree.c
index ca59f6c89f3e..cb3b1d42ae9a 100644
--- a/fs/xfs/libxfs/xfs_refcount_btree.c
+++ b/fs/xfs/libxfs/xfs_refcount_btree.c
@@ -109,7 +109,7 @@ xfs_refcountbt_free_block(
be32_add_cpu(&agf->agf_refcount_blocks, -1);
xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_REFCOUNT_BLOCKS);
return xfs_free_extent_later(cur->bc_tp, fsbno, 1,
- &XFS_RMAP_OINFO_REFC, XFS_AG_RESV_METADATA, false);
+ &XFS_RMAP_OINFO_REFC, XFS_AG_RESV_METADATA, 0);
}
STATIC int
diff --git a/fs/xfs/libxfs/xfs_rmap.c b/fs/xfs/libxfs/xfs_rmap.c
index ef16f6f9cef6..6ef4687b3aba 100644
--- a/fs/xfs/libxfs/xfs_rmap.c
+++ b/fs/xfs/libxfs/xfs_rmap.c
@@ -24,6 +24,7 @@
#include "xfs_inode.h"
#include "xfs_ag.h"
#include "xfs_health.h"
+#include "xfs_rmap_item.h"
struct kmem_cache *xfs_rmap_intent_cache;
@@ -100,8 +101,7 @@ xfs_rmap_update(
union xfs_btree_rec rec;
int error;
- trace_xfs_rmap_update(cur->bc_mp, cur->bc_ag.pag->pag_agno,
- irec->rm_startblock, irec->rm_blockcount,
+ trace_xfs_rmap_update(cur, irec->rm_startblock, irec->rm_blockcount,
irec->rm_owner, irec->rm_offset, irec->rm_flags);
rec.rmap.rm_startblock = cpu_to_be32(irec->rm_startblock);
@@ -111,8 +111,7 @@ xfs_rmap_update(
xfs_rmap_irec_offset_pack(irec));
error = xfs_btree_update(cur, &rec);
if (error)
- trace_xfs_rmap_update_error(cur->bc_mp,
- cur->bc_ag.pag->pag_agno, error, _RET_IP_);
+ trace_xfs_rmap_update_error(cur, error, _RET_IP_);
return error;
}
@@ -128,8 +127,7 @@ xfs_rmap_insert(
int i;
int error;
- trace_xfs_rmap_insert(rcur->bc_mp, rcur->bc_ag.pag->pag_agno, agbno,
- len, owner, offset, flags);
+ trace_xfs_rmap_insert(rcur, agbno, len, owner, offset, flags);
error = xfs_rmap_lookup_eq(rcur, agbno, len, owner, offset, flags, &i);
if (error)
@@ -155,8 +153,7 @@ xfs_rmap_insert(
}
done:
if (error)
- trace_xfs_rmap_insert_error(rcur->bc_mp,
- rcur->bc_ag.pag->pag_agno, error, _RET_IP_);
+ trace_xfs_rmap_insert_error(rcur, error, _RET_IP_);
return error;
}
@@ -172,8 +169,7 @@ xfs_rmap_delete(
int i;
int error;
- trace_xfs_rmap_delete(rcur->bc_mp, rcur->bc_ag.pag->pag_agno, agbno,
- len, owner, offset, flags);
+ trace_xfs_rmap_delete(rcur, agbno, len, owner, offset, flags);
error = xfs_rmap_lookup_eq(rcur, agbno, len, owner, offset, flags, &i);
if (error)
@@ -194,8 +190,7 @@ xfs_rmap_delete(
}
done:
if (error)
- trace_xfs_rmap_delete_error(rcur->bc_mp,
- rcur->bc_ag.pag->pag_agno, error, _RET_IP_);
+ trace_xfs_rmap_delete_error(rcur, error, _RET_IP_);
return error;
}
@@ -342,8 +337,7 @@ xfs_rmap_find_left_neighbor_helper(
{
struct xfs_find_left_neighbor_info *info = priv;
- trace_xfs_rmap_find_left_neighbor_candidate(cur->bc_mp,
- cur->bc_ag.pag->pag_agno, rec->rm_startblock,
+ trace_xfs_rmap_find_left_neighbor_candidate(cur, rec->rm_startblock,
rec->rm_blockcount, rec->rm_owner, rec->rm_offset,
rec->rm_flags);
@@ -393,8 +387,8 @@ xfs_rmap_find_left_neighbor(
info.high.rm_blockcount = 0;
info.irec = irec;
- trace_xfs_rmap_find_left_neighbor_query(cur->bc_mp,
- cur->bc_ag.pag->pag_agno, bno, 0, owner, offset, flags);
+ trace_xfs_rmap_find_left_neighbor_query(cur, bno, 0, owner, offset,
+ flags);
/*
* Historically, we always used the range query to walk every reverse
@@ -425,8 +419,7 @@ xfs_rmap_find_left_neighbor(
return error;
*stat = 1;
- trace_xfs_rmap_find_left_neighbor_result(cur->bc_mp,
- cur->bc_ag.pag->pag_agno, irec->rm_startblock,
+ trace_xfs_rmap_find_left_neighbor_result(cur, irec->rm_startblock,
irec->rm_blockcount, irec->rm_owner, irec->rm_offset,
irec->rm_flags);
return 0;
@@ -441,8 +434,7 @@ xfs_rmap_lookup_le_range_helper(
{
struct xfs_find_left_neighbor_info *info = priv;
- trace_xfs_rmap_lookup_le_range_candidate(cur->bc_mp,
- cur->bc_ag.pag->pag_agno, rec->rm_startblock,
+ trace_xfs_rmap_lookup_le_range_candidate(cur, rec->rm_startblock,
rec->rm_blockcount, rec->rm_owner, rec->rm_offset,
rec->rm_flags);
@@ -489,8 +481,7 @@ xfs_rmap_lookup_le_range(
*stat = 0;
info.irec = irec;
- trace_xfs_rmap_lookup_le_range(cur->bc_mp, cur->bc_ag.pag->pag_agno,
- bno, 0, owner, offset, flags);
+ trace_xfs_rmap_lookup_le_range(cur, bno, 0, owner, offset, flags);
/*
* Historically, we always used the range query to walk every reverse
@@ -521,8 +512,7 @@ xfs_rmap_lookup_le_range(
return error;
*stat = 1;
- trace_xfs_rmap_lookup_le_range_result(cur->bc_mp,
- cur->bc_ag.pag->pag_agno, irec->rm_startblock,
+ trace_xfs_rmap_lookup_le_range_result(cur, irec->rm_startblock,
irec->rm_blockcount, irec->rm_owner, irec->rm_offset,
irec->rm_flags);
return 0;
@@ -634,8 +624,7 @@ xfs_rmap_unmap(
(flags & XFS_RMAP_BMBT_BLOCK);
if (unwritten)
flags |= XFS_RMAP_UNWRITTEN;
- trace_xfs_rmap_unmap(mp, cur->bc_ag.pag->pag_agno, bno, len,
- unwritten, oinfo);
+ trace_xfs_rmap_unmap(cur, bno, len, unwritten, oinfo);
/*
* We should always have a left record because there's a static record
@@ -651,10 +640,9 @@ xfs_rmap_unmap(
goto out_error;
}
- trace_xfs_rmap_lookup_le_range_result(cur->bc_mp,
- cur->bc_ag.pag->pag_agno, ltrec.rm_startblock,
- ltrec.rm_blockcount, ltrec.rm_owner,
- ltrec.rm_offset, ltrec.rm_flags);
+ trace_xfs_rmap_lookup_le_range_result(cur, ltrec.rm_startblock,
+ ltrec.rm_blockcount, ltrec.rm_owner, ltrec.rm_offset,
+ ltrec.rm_flags);
ltoff = ltrec.rm_offset;
/*
@@ -721,10 +709,9 @@ xfs_rmap_unmap(
if (ltrec.rm_startblock == bno && ltrec.rm_blockcount == len) {
/* exact match, simply remove the record from rmap tree */
- trace_xfs_rmap_delete(mp, cur->bc_ag.pag->pag_agno,
- ltrec.rm_startblock, ltrec.rm_blockcount,
- ltrec.rm_owner, ltrec.rm_offset,
- ltrec.rm_flags);
+ trace_xfs_rmap_delete(cur, ltrec.rm_startblock,
+ ltrec.rm_blockcount, ltrec.rm_owner,
+ ltrec.rm_offset, ltrec.rm_flags);
error = xfs_btree_delete(cur, &i);
if (error)
goto out_error;
@@ -800,8 +787,7 @@ xfs_rmap_unmap(
else
cur->bc_rec.r.rm_offset = offset + len;
cur->bc_rec.r.rm_flags = flags;
- trace_xfs_rmap_insert(mp, cur->bc_ag.pag->pag_agno,
- cur->bc_rec.r.rm_startblock,
+ trace_xfs_rmap_insert(cur, cur->bc_rec.r.rm_startblock,
cur->bc_rec.r.rm_blockcount,
cur->bc_rec.r.rm_owner,
cur->bc_rec.r.rm_offset,
@@ -812,12 +798,10 @@ xfs_rmap_unmap(
}
out_done:
- trace_xfs_rmap_unmap_done(mp, cur->bc_ag.pag->pag_agno, bno, len,
- unwritten, oinfo);
+ trace_xfs_rmap_unmap_done(cur, bno, len, unwritten, oinfo);
out_error:
if (error)
- trace_xfs_rmap_unmap_error(mp, cur->bc_ag.pag->pag_agno,
- error, _RET_IP_);
+ trace_xfs_rmap_unmap_error(cur, error, _RET_IP_);
return error;
}
@@ -987,8 +971,7 @@ xfs_rmap_map(
(flags & XFS_RMAP_BMBT_BLOCK);
if (unwritten)
flags |= XFS_RMAP_UNWRITTEN;
- trace_xfs_rmap_map(mp, cur->bc_ag.pag->pag_agno, bno, len,
- unwritten, oinfo);
+ trace_xfs_rmap_map(cur, bno, len, unwritten, oinfo);
ASSERT(!xfs_rmap_should_skip_owner_update(oinfo));
/*
@@ -1001,8 +984,7 @@ xfs_rmap_map(
if (error)
goto out_error;
if (have_lt) {
- trace_xfs_rmap_lookup_le_range_result(cur->bc_mp,
- cur->bc_ag.pag->pag_agno, ltrec.rm_startblock,
+ trace_xfs_rmap_lookup_le_range_result(cur, ltrec.rm_startblock,
ltrec.rm_blockcount, ltrec.rm_owner,
ltrec.rm_offset, ltrec.rm_flags);
@@ -1040,10 +1022,10 @@ xfs_rmap_map(
error = -EFSCORRUPTED;
goto out_error;
}
- trace_xfs_rmap_find_right_neighbor_result(cur->bc_mp,
- cur->bc_ag.pag->pag_agno, gtrec.rm_startblock,
- gtrec.rm_blockcount, gtrec.rm_owner,
- gtrec.rm_offset, gtrec.rm_flags);
+ trace_xfs_rmap_find_right_neighbor_result(cur,
+ gtrec.rm_startblock, gtrec.rm_blockcount,
+ gtrec.rm_owner, gtrec.rm_offset,
+ gtrec.rm_flags);
if (!xfs_rmap_is_mergeable(&gtrec, owner, flags))
have_gt = 0;
}
@@ -1080,12 +1062,9 @@ xfs_rmap_map(
* result: |rrrrrrrrrrrrrrrrrrrrrrrrrrrrr|
*/
ltrec.rm_blockcount += gtrec.rm_blockcount;
- trace_xfs_rmap_delete(mp, cur->bc_ag.pag->pag_agno,
- gtrec.rm_startblock,
- gtrec.rm_blockcount,
- gtrec.rm_owner,
- gtrec.rm_offset,
- gtrec.rm_flags);
+ trace_xfs_rmap_delete(cur, gtrec.rm_startblock,
+ gtrec.rm_blockcount, gtrec.rm_owner,
+ gtrec.rm_offset, gtrec.rm_flags);
error = xfs_btree_delete(cur, &i);
if (error)
goto out_error;
@@ -1132,8 +1111,7 @@ xfs_rmap_map(
cur->bc_rec.r.rm_owner = owner;
cur->bc_rec.r.rm_offset = offset;
cur->bc_rec.r.rm_flags = flags;
- trace_xfs_rmap_insert(mp, cur->bc_ag.pag->pag_agno, bno, len,
- owner, offset, flags);
+ trace_xfs_rmap_insert(cur, bno, len, owner, offset, flags);
error = xfs_btree_insert(cur, &i);
if (error)
goto out_error;
@@ -1144,12 +1122,10 @@ xfs_rmap_map(
}
}
- trace_xfs_rmap_map_done(mp, cur->bc_ag.pag->pag_agno, bno, len,
- unwritten, oinfo);
+ trace_xfs_rmap_map_done(cur, bno, len, unwritten, oinfo);
out_error:
if (error)
- trace_xfs_rmap_map_error(mp, cur->bc_ag.pag->pag_agno,
- error, _RET_IP_);
+ trace_xfs_rmap_map_error(cur, error, _RET_IP_);
return error;
}
@@ -1223,8 +1199,7 @@ xfs_rmap_convert(
(flags & (XFS_RMAP_ATTR_FORK | XFS_RMAP_BMBT_BLOCK))));
oldext = unwritten ? XFS_RMAP_UNWRITTEN : 0;
new_endoff = offset + len;
- trace_xfs_rmap_convert(mp, cur->bc_ag.pag->pag_agno, bno, len,
- unwritten, oinfo);
+ trace_xfs_rmap_convert(cur, bno, len, unwritten, oinfo);
/*
* For the initial lookup, look for an exact match or the left-adjacent
@@ -1240,10 +1215,9 @@ xfs_rmap_convert(
goto done;
}
- trace_xfs_rmap_lookup_le_range_result(cur->bc_mp,
- cur->bc_ag.pag->pag_agno, PREV.rm_startblock,
- PREV.rm_blockcount, PREV.rm_owner,
- PREV.rm_offset, PREV.rm_flags);
+ trace_xfs_rmap_lookup_le_range_result(cur, PREV.rm_startblock,
+ PREV.rm_blockcount, PREV.rm_owner, PREV.rm_offset,
+ PREV.rm_flags);
ASSERT(PREV.rm_offset <= offset);
ASSERT(PREV.rm_offset + PREV.rm_blockcount >= new_endoff);
@@ -1284,10 +1258,9 @@ xfs_rmap_convert(
error = -EFSCORRUPTED;
goto done;
}
- trace_xfs_rmap_find_left_neighbor_result(cur->bc_mp,
- cur->bc_ag.pag->pag_agno, LEFT.rm_startblock,
- LEFT.rm_blockcount, LEFT.rm_owner,
- LEFT.rm_offset, LEFT.rm_flags);
+ trace_xfs_rmap_find_left_neighbor_result(cur,
+ LEFT.rm_startblock, LEFT.rm_blockcount,
+ LEFT.rm_owner, LEFT.rm_offset, LEFT.rm_flags);
if (LEFT.rm_startblock + LEFT.rm_blockcount == bno &&
LEFT.rm_offset + LEFT.rm_blockcount == offset &&
xfs_rmap_is_mergeable(&LEFT, owner, newext))
@@ -1325,10 +1298,10 @@ xfs_rmap_convert(
error = -EFSCORRUPTED;
goto done;
}
- trace_xfs_rmap_find_right_neighbor_result(cur->bc_mp,
- cur->bc_ag.pag->pag_agno, RIGHT.rm_startblock,
- RIGHT.rm_blockcount, RIGHT.rm_owner,
- RIGHT.rm_offset, RIGHT.rm_flags);
+ trace_xfs_rmap_find_right_neighbor_result(cur,
+ RIGHT.rm_startblock, RIGHT.rm_blockcount,
+ RIGHT.rm_owner, RIGHT.rm_offset,
+ RIGHT.rm_flags);
if (bno + len == RIGHT.rm_startblock &&
offset + len == RIGHT.rm_offset &&
xfs_rmap_is_mergeable(&RIGHT, owner, newext))
@@ -1344,8 +1317,7 @@ xfs_rmap_convert(
RIGHT.rm_blockcount > XFS_RMAP_LEN_MAX)
state &= ~RMAP_RIGHT_CONTIG;
- trace_xfs_rmap_convert_state(mp, cur->bc_ag.pag->pag_agno, state,
- _RET_IP_);
+ trace_xfs_rmap_convert_state(cur, state, _RET_IP_);
/* reset the cursor back to PREV */
error = xfs_rmap_lookup_le(cur, bno, owner, offset, oldext, NULL, &i);
@@ -1376,10 +1348,9 @@ xfs_rmap_convert(
error = -EFSCORRUPTED;
goto done;
}
- trace_xfs_rmap_delete(mp, cur->bc_ag.pag->pag_agno,
- RIGHT.rm_startblock, RIGHT.rm_blockcount,
- RIGHT.rm_owner, RIGHT.rm_offset,
- RIGHT.rm_flags);
+ trace_xfs_rmap_delete(cur, RIGHT.rm_startblock,
+ RIGHT.rm_blockcount, RIGHT.rm_owner,
+ RIGHT.rm_offset, RIGHT.rm_flags);
error = xfs_btree_delete(cur, &i);
if (error)
goto done;
@@ -1396,10 +1367,9 @@ xfs_rmap_convert(
error = -EFSCORRUPTED;
goto done;
}
- trace_xfs_rmap_delete(mp, cur->bc_ag.pag->pag_agno,
- PREV.rm_startblock, PREV.rm_blockcount,
- PREV.rm_owner, PREV.rm_offset,
- PREV.rm_flags);
+ trace_xfs_rmap_delete(cur, PREV.rm_startblock,
+ PREV.rm_blockcount, PREV.rm_owner,
+ PREV.rm_offset, PREV.rm_flags);
error = xfs_btree_delete(cur, &i);
if (error)
goto done;
@@ -1428,10 +1398,9 @@ xfs_rmap_convert(
* Setting all of a previous oldext extent to newext.
* The left neighbor is contiguous, the right is not.
*/
- trace_xfs_rmap_delete(mp, cur->bc_ag.pag->pag_agno,
- PREV.rm_startblock, PREV.rm_blockcount,
- PREV.rm_owner, PREV.rm_offset,
- PREV.rm_flags);
+ trace_xfs_rmap_delete(cur, PREV.rm_startblock,
+ PREV.rm_blockcount, PREV.rm_owner,
+ PREV.rm_offset, PREV.rm_flags);
error = xfs_btree_delete(cur, &i);
if (error)
goto done;
@@ -1468,10 +1437,9 @@ xfs_rmap_convert(
error = -EFSCORRUPTED;
goto done;
}
- trace_xfs_rmap_delete(mp, cur->bc_ag.pag->pag_agno,
- RIGHT.rm_startblock, RIGHT.rm_blockcount,
- RIGHT.rm_owner, RIGHT.rm_offset,
- RIGHT.rm_flags);
+ trace_xfs_rmap_delete(cur, RIGHT.rm_startblock,
+ RIGHT.rm_blockcount, RIGHT.rm_owner,
+ RIGHT.rm_offset, RIGHT.rm_flags);
error = xfs_btree_delete(cur, &i);
if (error)
goto done;
@@ -1549,8 +1517,7 @@ xfs_rmap_convert(
NEW.rm_blockcount = len;
NEW.rm_flags = newext;
cur->bc_rec.r = NEW;
- trace_xfs_rmap_insert(mp, cur->bc_ag.pag->pag_agno, bno,
- len, owner, offset, newext);
+ trace_xfs_rmap_insert(cur, bno, len, owner, offset, newext);
error = xfs_btree_insert(cur, &i);
if (error)
goto done;
@@ -1608,8 +1575,7 @@ xfs_rmap_convert(
NEW.rm_blockcount = len;
NEW.rm_flags = newext;
cur->bc_rec.r = NEW;
- trace_xfs_rmap_insert(mp, cur->bc_ag.pag->pag_agno, bno,
- len, owner, offset, newext);
+ trace_xfs_rmap_insert(cur, bno, len, owner, offset, newext);
error = xfs_btree_insert(cur, &i);
if (error)
goto done;
@@ -1640,9 +1606,8 @@ xfs_rmap_convert(
NEW = PREV;
NEW.rm_blockcount = offset - PREV.rm_offset;
cur->bc_rec.r = NEW;
- trace_xfs_rmap_insert(mp, cur->bc_ag.pag->pag_agno,
- NEW.rm_startblock, NEW.rm_blockcount,
- NEW.rm_owner, NEW.rm_offset,
+ trace_xfs_rmap_insert(cur, NEW.rm_startblock,
+ NEW.rm_blockcount, NEW.rm_owner, NEW.rm_offset,
NEW.rm_flags);
error = xfs_btree_insert(cur, &i);
if (error)
@@ -1669,8 +1634,7 @@ xfs_rmap_convert(
/* new middle extent - newext */
cur->bc_rec.r.rm_flags &= ~XFS_RMAP_UNWRITTEN;
cur->bc_rec.r.rm_flags |= newext;
- trace_xfs_rmap_insert(mp, cur->bc_ag.pag->pag_agno, bno, len,
- owner, offset, newext);
+ trace_xfs_rmap_insert(cur, bno, len, owner, offset, newext);
error = xfs_btree_insert(cur, &i);
if (error)
goto done;
@@ -1694,12 +1658,10 @@ xfs_rmap_convert(
ASSERT(0);
}
- trace_xfs_rmap_convert_done(mp, cur->bc_ag.pag->pag_agno, bno, len,
- unwritten, oinfo);
+ trace_xfs_rmap_convert_done(cur, bno, len, unwritten, oinfo);
done:
if (error)
- trace_xfs_rmap_convert_error(cur->bc_mp,
- cur->bc_ag.pag->pag_agno, error, _RET_IP_);
+ trace_xfs_rmap_convert_error(cur, error, _RET_IP_);
return error;
}
@@ -1735,8 +1697,7 @@ xfs_rmap_convert_shared(
(flags & (XFS_RMAP_ATTR_FORK | XFS_RMAP_BMBT_BLOCK))));
oldext = unwritten ? XFS_RMAP_UNWRITTEN : 0;
new_endoff = offset + len;
- trace_xfs_rmap_convert(mp, cur->bc_ag.pag->pag_agno, bno, len,
- unwritten, oinfo);
+ trace_xfs_rmap_convert(cur, bno, len, unwritten, oinfo);
/*
* For the initial lookup, look for and exact match or the left-adjacent
@@ -1805,10 +1766,10 @@ xfs_rmap_convert_shared(
error = -EFSCORRUPTED;
goto done;
}
- trace_xfs_rmap_find_right_neighbor_result(cur->bc_mp,
- cur->bc_ag.pag->pag_agno, RIGHT.rm_startblock,
- RIGHT.rm_blockcount, RIGHT.rm_owner,
- RIGHT.rm_offset, RIGHT.rm_flags);
+ trace_xfs_rmap_find_right_neighbor_result(cur,
+ RIGHT.rm_startblock, RIGHT.rm_blockcount,
+ RIGHT.rm_owner, RIGHT.rm_offset,
+ RIGHT.rm_flags);
if (xfs_rmap_is_mergeable(&RIGHT, owner, newext))
state |= RMAP_RIGHT_CONTIG;
}
@@ -1822,8 +1783,7 @@ xfs_rmap_convert_shared(
RIGHT.rm_blockcount > XFS_RMAP_LEN_MAX)
state &= ~RMAP_RIGHT_CONTIG;
- trace_xfs_rmap_convert_state(mp, cur->bc_ag.pag->pag_agno, state,
- _RET_IP_);
+ trace_xfs_rmap_convert_state(cur, state, _RET_IP_);
/*
* Switch out based on the FILLING and CONTIG state bits.
*/
@@ -2121,12 +2081,10 @@ xfs_rmap_convert_shared(
ASSERT(0);
}
- trace_xfs_rmap_convert_done(mp, cur->bc_ag.pag->pag_agno, bno, len,
- unwritten, oinfo);
+ trace_xfs_rmap_convert_done(cur, bno, len, unwritten, oinfo);
done:
if (error)
- trace_xfs_rmap_convert_error(cur->bc_mp,
- cur->bc_ag.pag->pag_agno, error, _RET_IP_);
+ trace_xfs_rmap_convert_error(cur, error, _RET_IP_);
return error;
}
@@ -2164,8 +2122,7 @@ xfs_rmap_unmap_shared(
xfs_owner_info_unpack(oinfo, &owner, &offset, &flags);
if (unwritten)
flags |= XFS_RMAP_UNWRITTEN;
- trace_xfs_rmap_unmap(mp, cur->bc_ag.pag->pag_agno, bno, len,
- unwritten, oinfo);
+ trace_xfs_rmap_unmap(cur, bno, len, unwritten, oinfo);
/*
* We should always have a left record because there's a static record
@@ -2321,12 +2278,10 @@ xfs_rmap_unmap_shared(
goto out_error;
}
- trace_xfs_rmap_unmap_done(mp, cur->bc_ag.pag->pag_agno, bno, len,
- unwritten, oinfo);
+ trace_xfs_rmap_unmap_done(cur, bno, len, unwritten, oinfo);
out_error:
if (error)
- trace_xfs_rmap_unmap_error(cur->bc_mp,
- cur->bc_ag.pag->pag_agno, error, _RET_IP_);
+ trace_xfs_rmap_unmap_error(cur, error, _RET_IP_);
return error;
}
@@ -2361,8 +2316,7 @@ xfs_rmap_map_shared(
xfs_owner_info_unpack(oinfo, &owner, &offset, &flags);
if (unwritten)
flags |= XFS_RMAP_UNWRITTEN;
- trace_xfs_rmap_map(mp, cur->bc_ag.pag->pag_agno, bno, len,
- unwritten, oinfo);
+ trace_xfs_rmap_map(cur, bno, len, unwritten, oinfo);
/* Is there a left record that abuts our range? */
error = xfs_rmap_find_left_neighbor(cur, bno, owner, offset, flags,
@@ -2387,10 +2341,10 @@ xfs_rmap_map_shared(
error = -EFSCORRUPTED;
goto out_error;
}
- trace_xfs_rmap_find_right_neighbor_result(cur->bc_mp,
- cur->bc_ag.pag->pag_agno, gtrec.rm_startblock,
- gtrec.rm_blockcount, gtrec.rm_owner,
- gtrec.rm_offset, gtrec.rm_flags);
+ trace_xfs_rmap_find_right_neighbor_result(cur,
+ gtrec.rm_startblock, gtrec.rm_blockcount,
+ gtrec.rm_owner, gtrec.rm_offset,
+ gtrec.rm_flags);
if (!xfs_rmap_is_mergeable(&gtrec, owner, flags))
have_gt = 0;
@@ -2482,12 +2436,10 @@ xfs_rmap_map_shared(
goto out_error;
}
- trace_xfs_rmap_map_done(mp, cur->bc_ag.pag->pag_agno, bno, len,
- unwritten, oinfo);
+ trace_xfs_rmap_map_done(cur, bno, len, unwritten, oinfo);
out_error:
if (error)
- trace_xfs_rmap_map_error(cur->bc_mp,
- cur->bc_ag.pag->pag_agno, error, _RET_IP_);
+ trace_xfs_rmap_map_error(cur, error, _RET_IP_);
return error;
}
@@ -2572,23 +2524,6 @@ xfs_rmap_query_all(
return xfs_btree_query_all(cur, xfs_rmap_query_range_helper, &query);
}
-/* Clean up after calling xfs_rmap_finish_one. */
-void
-xfs_rmap_finish_one_cleanup(
- struct xfs_trans *tp,
- struct xfs_btree_cur *rcur,
- int error)
-{
- struct xfs_buf *agbp;
-
- if (rcur == NULL)
- return;
- agbp = rcur->bc_ag.agbp;
- xfs_btree_del_cursor(rcur, error);
- if (error)
- xfs_trans_brelse(tp, agbp);
-}
-
/* Commit an rmap operation into the ondisk tree. */
int
__xfs_rmap_finish_intent(
@@ -2634,20 +2569,15 @@ xfs_rmap_finish_one(
struct xfs_rmap_intent *ri,
struct xfs_btree_cur **pcur)
{
+ struct xfs_owner_info oinfo;
struct xfs_mount *mp = tp->t_mountp;
- struct xfs_btree_cur *rcur;
+ struct xfs_btree_cur *rcur = *pcur;
struct xfs_buf *agbp = NULL;
- int error = 0;
- struct xfs_owner_info oinfo;
xfs_agblock_t bno;
bool unwritten;
+ int error = 0;
- bno = XFS_FSB_TO_AGBNO(mp, ri->ri_bmap.br_startblock);
-
- trace_xfs_rmap_deferred(mp, ri->ri_pag->pag_agno, ri->ri_type, bno,
- ri->ri_owner, ri->ri_whichfork,
- ri->ri_bmap.br_startoff, ri->ri_bmap.br_blockcount,
- ri->ri_bmap.br_state);
+ trace_xfs_rmap_deferred(mp, ri);
if (XFS_TEST_ERROR(false, mp, XFS_ERRTAG_RMAP_FINISH_ONE))
return -EIO;
@@ -2656,9 +2586,8 @@ xfs_rmap_finish_one(
* If we haven't gotten a cursor or the cursor AG doesn't match
* the startblock, get one now.
*/
- rcur = *pcur;
if (rcur != NULL && rcur->bc_ag.pag != ri->ri_pag) {
- xfs_rmap_finish_one_cleanup(tp, rcur, 0);
+ xfs_btree_del_cursor(rcur, 0);
rcur = NULL;
*pcur = NULL;
}
@@ -2678,9 +2607,8 @@ xfs_rmap_finish_one(
return -EFSCORRUPTED;
}
- rcur = xfs_rmapbt_init_cursor(mp, tp, agbp, ri->ri_pag);
+ *pcur = rcur = xfs_rmapbt_init_cursor(mp, tp, agbp, ri->ri_pag);
}
- *pcur = rcur;
xfs_rmap_ino_owner(&oinfo, ri->ri_owner, ri->ri_whichfork,
ri->ri_bmap.br_startoff);
@@ -2722,15 +2650,6 @@ __xfs_rmap_add(
{
struct xfs_rmap_intent *ri;
- trace_xfs_rmap_defer(tp->t_mountp,
- XFS_FSB_TO_AGNO(tp->t_mountp, bmap->br_startblock),
- type,
- XFS_FSB_TO_AGBNO(tp->t_mountp, bmap->br_startblock),
- owner, whichfork,
- bmap->br_startoff,
- bmap->br_blockcount,
- bmap->br_state);
-
ri = kmem_cache_alloc(xfs_rmap_intent_cache, GFP_KERNEL | __GFP_NOFAIL);
INIT_LIST_HEAD(&ri->ri_list);
ri->ri_type = type;
@@ -2738,8 +2657,7 @@ __xfs_rmap_add(
ri->ri_whichfork = whichfork;
ri->ri_bmap = *bmap;
- xfs_rmap_update_get_group(tp->t_mountp, ri);
- xfs_defer_add(tp, &ri->ri_list, &xfs_rmap_update_defer_type);
+ xfs_rmap_defer_add(tp, ri);
}
/* Map an extent into a file. */
diff --git a/fs/xfs/libxfs/xfs_rmap.h b/fs/xfs/libxfs/xfs_rmap.h
index 9d01fe689497..b783dd4dd95d 100644
--- a/fs/xfs/libxfs/xfs_rmap.h
+++ b/fs/xfs/libxfs/xfs_rmap.h
@@ -157,6 +157,16 @@ enum xfs_rmap_intent_type {
XFS_RMAP_FREE,
};
+#define XFS_RMAP_INTENT_STRINGS \
+ { XFS_RMAP_MAP, "map" }, \
+ { XFS_RMAP_MAP_SHARED, "map_shared" }, \
+ { XFS_RMAP_UNMAP, "unmap" }, \
+ { XFS_RMAP_UNMAP_SHARED, "unmap_shared" }, \
+ { XFS_RMAP_CONVERT, "cvt" }, \
+ { XFS_RMAP_CONVERT_SHARED, "cvt_shared" }, \
+ { XFS_RMAP_ALLOC, "alloc" }, \
+ { XFS_RMAP_FREE, "free" }
+
struct xfs_rmap_intent {
struct list_head ri_list;
enum xfs_rmap_intent_type ri_type;
@@ -166,9 +176,6 @@ struct xfs_rmap_intent {
struct xfs_perag *ri_pag;
};
-void xfs_rmap_update_get_group(struct xfs_mount *mp,
- struct xfs_rmap_intent *ri);
-
/* functions for updating the rmapbt based on bmbt map/unmap operations */
void xfs_rmap_map_extent(struct xfs_trans *tp, struct xfs_inode *ip,
int whichfork, struct xfs_bmbt_irec *imap);
@@ -182,8 +189,6 @@ void xfs_rmap_alloc_extent(struct xfs_trans *tp, xfs_agnumber_t agno,
void xfs_rmap_free_extent(struct xfs_trans *tp, xfs_agnumber_t agno,
xfs_agblock_t bno, xfs_extlen_t len, uint64_t owner);
-void xfs_rmap_finish_one_cleanup(struct xfs_trans *tp,
- struct xfs_btree_cur *rcur, int error);
int xfs_rmap_finish_one(struct xfs_trans *tp, struct xfs_rmap_intent *ri,
struct xfs_btree_cur **pcur);
int __xfs_rmap_finish_intent(struct xfs_btree_cur *rcur,
diff --git a/fs/xfs/libxfs/xfs_rmap_btree.c b/fs/xfs/libxfs/xfs_rmap_btree.c
index 9e759efa81cc..56fd6c4bd8b4 100644
--- a/fs/xfs/libxfs/xfs_rmap_btree.c
+++ b/fs/xfs/libxfs/xfs_rmap_btree.c
@@ -88,6 +88,7 @@ xfs_rmapbt_alloc_block(
struct xfs_buf *agbp = cur->bc_ag.agbp;
struct xfs_agf *agf = agbp->b_addr;
struct xfs_perag *pag = cur->bc_ag.pag;
+ struct xfs_alloc_arg args = { .len = 1 };
int error;
xfs_agblock_t bno;
@@ -107,7 +108,11 @@ xfs_rmapbt_alloc_block(
be32_add_cpu(&agf->agf_rmap_blocks, 1);
xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_RMAP_BLOCKS);
- xfs_ag_resv_rmapbt_alloc(cur->bc_mp, pag->pag_agno);
+ /*
+ * Since rmapbt blocks are sourced from the AGFL, they are allocated one
+ * at a time and the reservation updates don't require a transaction.
+ */
+ xfs_ag_resv_alloc_extent(pag, XFS_AG_RESV_RMAPBT, &args);
*stat = 1;
return 0;
diff --git a/fs/xfs/libxfs/xfs_sb.c b/fs/xfs/libxfs/xfs_sb.c
index 09e4bf949bf8..6b56f0f6d4c1 100644
--- a/fs/xfs/libxfs/xfs_sb.c
+++ b/fs/xfs/libxfs/xfs_sb.c
@@ -1038,11 +1038,12 @@ xfs_log_sb(
* and hence we don't need have to update it here.
*/
if (xfs_has_lazysbcount(mp)) {
- mp->m_sb.sb_icount = percpu_counter_sum(&mp->m_icount);
+ mp->m_sb.sb_icount = percpu_counter_sum_positive(&mp->m_icount);
mp->m_sb.sb_ifree = min_t(uint64_t,
- percpu_counter_sum(&mp->m_ifree),
+ percpu_counter_sum_positive(&mp->m_ifree),
mp->m_sb.sb_icount);
- mp->m_sb.sb_fdblocks = percpu_counter_sum(&mp->m_fdblocks);
+ mp->m_sb.sb_fdblocks =
+ percpu_counter_sum_positive(&mp->m_fdblocks);
}
xfs_sb_to_disk(bp->b_addr, &mp->m_sb);
diff --git a/fs/xfs/libxfs/xfs_shared.h b/fs/xfs/libxfs/xfs_shared.h
index 34f104ed372c..2f7413afbf46 100644
--- a/fs/xfs/libxfs/xfs_shared.h
+++ b/fs/xfs/libxfs/xfs_shared.h
@@ -177,13 +177,6 @@ void xfs_log_get_max_trans_res(struct xfs_mount *mp,
#define XFS_REFC_BTREE_REF 1
#define XFS_SSB_REF 0
-/*
- * Flags for xfs_trans_ichgtime().
- */
-#define XFS_ICHGTIME_MOD 0x1 /* data fork modification timestamp */
-#define XFS_ICHGTIME_CHG 0x2 /* inode field change timestamp */
-#define XFS_ICHGTIME_CREATE 0x4 /* inode create timestamp */
-
/* Computed inode geometry for the filesystem. */
struct xfs_ino_geometry {
/* Maximum inode count in this filesystem. */
diff --git a/fs/xfs/libxfs/xfs_trans_inode.c b/fs/xfs/libxfs/xfs_trans_inode.c
index 69fc5b981352..3c40f37e82c7 100644
--- a/fs/xfs/libxfs/xfs_trans_inode.c
+++ b/fs/xfs/libxfs/xfs_trans_inode.c
@@ -68,6 +68,8 @@ xfs_trans_ichgtime(
inode_set_mtime_to_ts(inode, tv);
if (flags & XFS_ICHGTIME_CHG)
inode_set_ctime_to_ts(inode, tv);
+ if (flags & XFS_ICHGTIME_ACCESS)
+ inode_set_atime_to_ts(inode, tv);
if (flags & XFS_ICHGTIME_CREATE)
ip->i_crtime = tv;
}
diff --git a/fs/xfs/libxfs/xfs_trans_resv.c b/fs/xfs/libxfs/xfs_trans_resv.c
index 6dbe6e7251e7..3dc8f785bf29 100644
--- a/fs/xfs/libxfs/xfs_trans_resv.c
+++ b/fs/xfs/libxfs/xfs_trans_resv.c
@@ -22,7 +22,6 @@
#include "xfs_rtbitmap.h"
#include "xfs_attr_item.h"
#include "xfs_log.h"
-#include "xfs_da_format.h"
#define _ALLOC true
#define _FREE false
diff --git a/fs/xfs/scrub/common.c b/fs/xfs/scrub/common.c
index 1ad8ec63a7f4..22f5f1a9d3f0 100644
--- a/fs/xfs/scrub/common.c
+++ b/fs/xfs/scrub/common.c
@@ -26,6 +26,7 @@
#include "xfs_da_format.h"
#include "xfs_da_btree.h"
#include "xfs_dir2_priv.h"
+#include "xfs_dir2.h"
#include "xfs_attr.h"
#include "xfs_reflink.h"
#include "xfs_ag.h"
diff --git a/fs/xfs/scrub/newbt.c b/fs/xfs/scrub/newbt.c
index 4a0271123d94..2aa14b7ab630 100644
--- a/fs/xfs/scrub/newbt.c
+++ b/fs/xfs/scrub/newbt.c
@@ -160,7 +160,8 @@ xrep_newbt_add_blocks(
if (args->tp) {
ASSERT(xnr->oinfo.oi_offset == 0);
- error = xfs_alloc_schedule_autoreap(args, true, &resv->autoreap);
+ error = xfs_alloc_schedule_autoreap(args,
+ XFS_FREE_EXTENT_SKIP_DISCARD, &resv->autoreap);
if (error)
goto out_pag;
}
@@ -414,7 +415,7 @@ xrep_newbt_free_extent(
*/
fsbno = XFS_AGB_TO_FSB(sc->mp, resv->pag->pag_agno, free_agbno);
error = xfs_free_extent_later(sc->tp, fsbno, free_aglen, &xnr->oinfo,
- xnr->resv, true);
+ xnr->resv, XFS_FREE_EXTENT_SKIP_DISCARD);
if (error)
return error;
diff --git a/fs/xfs/scrub/quota_repair.c b/fs/xfs/scrub/quota_repair.c
index 90cd1512bba9..cd51f10f2920 100644
--- a/fs/xfs/scrub/quota_repair.c
+++ b/fs/xfs/scrub/quota_repair.c
@@ -12,7 +12,6 @@
#include "xfs_defer.h"
#include "xfs_btree.h"
#include "xfs_bit.h"
-#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans.h"
#include "xfs_sb.h"
diff --git a/fs/xfs/scrub/reap.c b/fs/xfs/scrub/reap.c
index be283153c254..53697f3c5e1b 100644
--- a/fs/xfs/scrub/reap.c
+++ b/fs/xfs/scrub/reap.c
@@ -451,7 +451,7 @@ xreap_agextent_iter(
xfs_refcount_free_cow_extent(sc->tp, fsbno, *aglenp);
error = xfs_free_extent_later(sc->tp, fsbno, *aglenp, NULL,
- rs->resv, true);
+ rs->resv, XFS_FREE_EXTENT_SKIP_DISCARD);
if (error)
return error;
@@ -477,7 +477,7 @@ xreap_agextent_iter(
* system with large EFIs.
*/
error = xfs_free_extent_later(sc->tp, fsbno, *aglenp, rs->oinfo,
- rs->resv, true);
+ rs->resv, XFS_FREE_EXTENT_SKIP_DISCARD);
if (error)
return error;
@@ -943,7 +943,8 @@ xrep_reap_bmapi_iter(
xfs_trans_mod_dquot_byino(sc->tp, ip, XFS_TRANS_DQ_BCOUNT,
-(int64_t)imap->br_blockcount);
return xfs_free_extent_later(sc->tp, imap->br_startblock,
- imap->br_blockcount, NULL, XFS_AG_RESV_NONE, true);
+ imap->br_blockcount, NULL, XFS_AG_RESV_NONE,
+ XFS_FREE_EXTENT_SKIP_DISCARD);
}
/*
diff --git a/fs/xfs/scrub/scrub.c b/fs/xfs/scrub/scrub.c
index c013f0ba4f36..4cbcf7a86dbe 100644
--- a/fs/xfs/scrub/scrub.c
+++ b/fs/xfs/scrub/scrub.c
@@ -856,7 +856,7 @@ xfs_ioc_scrubv_metadata(
if (vec_bytes > PAGE_SIZE)
return -ENOMEM;
- uvectors = (void __user *)(uintptr_t)head.svh_vectors;
+ uvectors = u64_to_user_ptr(head.svh_vectors);
vectors = memdup_user(uvectors, vec_bytes);
if (IS_ERR(vectors))
return PTR_ERR(vectors);
diff --git a/fs/xfs/scrub/tempfile.c b/fs/xfs/scrub/tempfile.c
index b747b625c5ee..d390d56cd875 100644
--- a/fs/xfs/scrub/tempfile.c
+++ b/fs/xfs/scrub/tempfile.c
@@ -40,11 +40,16 @@ xrep_tempfile_create(
struct xfs_scrub *sc,
uint16_t mode)
{
+ struct xfs_icreate_args args = {
+ .pip = sc->mp->m_rootip,
+ .mode = mode,
+ .flags = XFS_ICREATE_TMPFILE | XFS_ICREATE_UNLINKABLE,
+ };
struct xfs_mount *mp = sc->mp;
struct xfs_trans *tp = NULL;
- struct xfs_dquot *udqp = NULL;
- struct xfs_dquot *gdqp = NULL;
- struct xfs_dquot *pdqp = NULL;
+ struct xfs_dquot *udqp;
+ struct xfs_dquot *gdqp;
+ struct xfs_dquot *pdqp;
struct xfs_trans_res *tres;
struct xfs_inode *dp = mp->m_rootip;
xfs_ino_t ino;
@@ -65,8 +70,7 @@ xrep_tempfile_create(
* inode should be completely root owned so that we don't fail due to
* quota limits.
*/
- error = xfs_qm_vop_dqalloc(dp, GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, 0,
- XFS_QMOPT_QUOTALL, &udqp, &gdqp, &pdqp);
+ error = xfs_icreate_dqalloc(&args, &udqp, &gdqp, &pdqp);
if (error)
return error;
@@ -87,14 +91,11 @@ xrep_tempfile_create(
error = xfs_dialloc(&tp, dp->i_ino, mode, &ino);
if (error)
goto out_trans_cancel;
- error = xfs_init_new_inode(&nop_mnt_idmap, tp, dp, ino, mode, 0, 0,
- 0, false, &sc->tempip);
+ error = xfs_icreate(tp, ino, &args, &sc->tempip);
if (error)
goto out_trans_cancel;
- /* Change the ownership of the inode to root. */
- VFS_I(sc->tempip)->i_uid = GLOBAL_ROOT_UID;
- VFS_I(sc->tempip)->i_gid = GLOBAL_ROOT_GID;
+ /* We don't touch file data, so drop the realtime flags. */
sc->tempip->i_diflags &= ~(XFS_DIFLAG_REALTIME | XFS_DIFLAG_RTINHERIT);
xfs_trans_log_inode(tp, sc->tempip, XFS_ILOG_CORE);
diff --git a/fs/xfs/scrub/xfarray.c b/fs/xfs/scrub/xfarray.c
index 9185ae7088d4..cdd13ed9c569 100644
--- a/fs/xfs/scrub/xfarray.c
+++ b/fs/xfs/scrub/xfarray.c
@@ -822,12 +822,14 @@ xfarray_sort_scan(
/* Grab the first folio that backs this array element. */
if (!si->folio) {
+ struct folio *folio;
loff_t next_pos;
- si->folio = xfile_get_folio(si->array->xfile, idx_pos,
+ folio = xfile_get_folio(si->array->xfile, idx_pos,
si->array->obj_size, XFILE_ALLOC);
- if (IS_ERR(si->folio))
- return PTR_ERR(si->folio);
+ if (IS_ERR(folio))
+ return PTR_ERR(folio);
+ si->folio = folio;
si->first_folio_idx = xfarray_idx(si->array,
folio_pos(si->folio) + si->array->obj_size - 1);
@@ -1048,6 +1050,7 @@ xfarray_sort(
out_free:
trace_xfarray_sort_stats(si, error);
+ xfarray_sort_scan_done(si);
kvfree(si);
return error;
}
diff --git a/fs/xfs/xfs.h b/fs/xfs/xfs.h
index f6ffb4f248f7..9355ccad9503 100644
--- a/fs/xfs/xfs.h
+++ b/fs/xfs/xfs.h
@@ -10,6 +10,10 @@
#define DEBUG 1
#endif
+#ifdef CONFIG_XFS_DEBUG_EXPENSIVE
+#define DEBUG_EXPENSIVE 1
+#endif
+
#ifdef CONFIG_XFS_ASSERT_FATAL
#define XFS_ASSERT_FATAL 1
#endif
diff --git a/fs/xfs/xfs_attr_item.c b/fs/xfs/xfs_attr_item.c
index 2b10ac4c5fce..f683b7a9323f 100644
--- a/fs/xfs/xfs_attr_item.c
+++ b/fs/xfs/xfs_attr_item.c
@@ -746,7 +746,7 @@ xfs_attr_recover_work(
struct xfs_attri_log_format *attrp;
struct xfs_attri_log_nameval *nv = attrip->attri_nameval;
int error;
- int total;
+ unsigned int total = 0;
/*
* First check the validity of the attr described by the ATTRI. If any
@@ -763,7 +763,20 @@ xfs_attr_recover_work(
return PTR_ERR(attr);
args = attr->xattri_da_args;
- xfs_init_attr_trans(args, &resv, &total);
+ switch (xfs_attr_intent_op(attr)) {
+ case XFS_ATTRI_OP_FLAGS_PPTR_SET:
+ case XFS_ATTRI_OP_FLAGS_PPTR_REPLACE:
+ case XFS_ATTRI_OP_FLAGS_SET:
+ case XFS_ATTRI_OP_FLAGS_REPLACE:
+ resv = xfs_attr_set_resv(args);
+ total = args->total;
+ break;
+ case XFS_ATTRI_OP_FLAGS_PPTR_REMOVE:
+ case XFS_ATTRI_OP_FLAGS_REMOVE:
+ resv = M_RES(mp)->tr_attrrm;
+ total = XFS_ATTRRM_SPACE_RES(mp);
+ break;
+ }
resv = xlog_recover_resv(&resv);
error = xfs_trans_alloc(mp, &resv, total, 0, XFS_TRANS_RESERVE, &tp);
if (error)
diff --git a/fs/xfs/xfs_bmap_item.c b/fs/xfs/xfs_bmap_item.c
index a19d62e78aa1..e224b49b7cff 100644
--- a/fs/xfs/xfs_bmap_item.c
+++ b/fs/xfs/xfs_bmap_item.c
@@ -324,13 +324,9 @@ xfs_bmap_update_get_group(
struct xfs_mount *mp,
struct xfs_bmap_intent *bi)
{
- xfs_agnumber_t agno;
-
if (xfs_ifork_is_realtime(bi->bi_owner, bi->bi_whichfork))
return;
- agno = XFS_FSB_TO_AGNO(mp, bi->bi_bmap.br_startblock);
-
/*
* Bump the intent count on behalf of the deferred rmap and refcount
* intent items that that we can queue when we finish this bmap work.
@@ -338,7 +334,7 @@ xfs_bmap_update_get_group(
* intent drops the intent count, ensuring that the intent count
* remains nonzero across the transaction roll.
*/
- bi->bi_pag = xfs_perag_intent_get(mp, agno);
+ bi->bi_pag = xfs_perag_intent_get(mp, bi->bi_bmap.br_startblock);
}
/* Add this deferred BUI to the transaction. */
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index ac2e77ebb54c..fe2e2c930975 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -486,13 +486,11 @@ out_unlock:
/*
* Test whether it is appropriate to check an inode for and free post EOF
- * blocks. The 'force' parameter determines whether we should also consider
- * regular files that are marked preallocated or append-only.
+ * blocks.
*/
bool
xfs_can_free_eofblocks(
- struct xfs_inode *ip,
- bool force)
+ struct xfs_inode *ip)
{
struct xfs_bmbt_irec imap;
struct xfs_mount *mp = ip->i_mount;
@@ -526,11 +524,11 @@ xfs_can_free_eofblocks(
return false;
/*
- * Do not free real preallocated or append-only files unless the file
- * has delalloc blocks and we are forced to remove them.
+ * Only free real extents for inodes with persistent preallocations or
+ * the append-only flag.
*/
if (ip->i_diflags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND))
- if (!force || ip->i_delayed_blks == 0)
+ if (ip->i_delayed_blks == 0)
return false;
/*
@@ -584,6 +582,22 @@ xfs_free_eofblocks(
/* Wait on dio to ensure i_size has settled. */
inode_dio_wait(VFS_I(ip));
+ /*
+ * For preallocated files only free delayed allocations.
+ *
+ * Note that this means we also leave speculative preallocations in
+ * place for preallocated files.
+ */
+ if (ip->i_diflags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)) {
+ if (ip->i_delayed_blks) {
+ xfs_bmap_punch_delalloc_range(ip,
+ round_up(XFS_ISIZE(ip), mp->m_sb.sb_blocksize),
+ LLONG_MAX);
+ }
+ xfs_inode_clear_eofblocks_tag(ip);
+ return 0;
+ }
+
error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
if (error) {
ASSERT(xfs_is_shutdown(mp));
@@ -794,14 +808,18 @@ xfs_flush_unmap_range(
xfs_off_t offset,
xfs_off_t len)
{
- struct xfs_mount *mp = ip->i_mount;
struct inode *inode = VFS_I(ip);
xfs_off_t rounding, start, end;
int error;
- rounding = max_t(xfs_off_t, mp->m_sb.sb_blocksize, PAGE_SIZE);
- start = round_down(offset, rounding);
- end = round_up(offset + len, rounding) - 1;
+ /*
+ * Make sure we extend the flush out to extent alignment
+ * boundaries so any extent range overlapping the start/end
+ * of the modification we are about to do is clean and idle.
+ */
+ rounding = max_t(xfs_off_t, xfs_inode_alloc_unitsize(ip), PAGE_SIZE);
+ start = rounddown_64(offset, rounding);
+ end = roundup_64(offset + len, rounding) - 1;
error = filemap_write_and_wait_range(inode->i_mapping, start, end);
if (error)
@@ -884,14 +902,14 @@ xfs_prepare_shift(
struct xfs_inode *ip,
loff_t offset)
{
- struct xfs_mount *mp = ip->i_mount;
+ unsigned int rounding;
int error;
/*
* Trim eofblocks to avoid shifting uninitialized post-eof preallocation
* into the accessible region of the file.
*/
- if (xfs_can_free_eofblocks(ip, true)) {
+ if (xfs_can_free_eofblocks(ip)) {
error = xfs_free_eofblocks(ip);
if (error)
return error;
@@ -902,11 +920,13 @@ xfs_prepare_shift(
* with the full range of the operation. If we don't, a COW writeback
* completion could race with an insert, front merge with the start
* extent (after split) during the shift and corrupt the file. Start
- * with the block just prior to the start to stabilize the boundary.
+ * with the allocation unit just prior to the start to stabilize the
+ * boundary.
*/
- offset = round_down(offset, mp->m_sb.sb_blocksize);
+ rounding = xfs_inode_alloc_unitsize(ip);
+ offset = rounddown_64(offset, rounding);
if (offset)
- offset -= mp->m_sb.sb_blocksize;
+ offset -= rounding;
/*
* Writeback and invalidate cache for the remainder of the file as we're
diff --git a/fs/xfs/xfs_bmap_util.h b/fs/xfs/xfs_bmap_util.h
index 51f84d8ff372..eb0895bfb9da 100644
--- a/fs/xfs/xfs_bmap_util.h
+++ b/fs/xfs/xfs_bmap_util.h
@@ -63,7 +63,7 @@ int xfs_insert_file_space(struct xfs_inode *, xfs_off_t offset,
xfs_off_t len);
/* EOF block manipulation functions */
-bool xfs_can_free_eofblocks(struct xfs_inode *ip, bool force);
+bool xfs_can_free_eofblocks(struct xfs_inode *ip);
int xfs_free_eofblocks(struct xfs_inode *ip);
int xfs_swap_extents(struct xfs_inode *ip, struct xfs_inode *tip,
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index 43031842341a..47549cfa61cd 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -22,6 +22,7 @@
#include "xfs_trace.h"
#include "xfs_log.h"
#include "xfs_log_priv.h"
+#include "xfs_error.h"
struct kmem_cache *xfs_buf_item_cache;
@@ -781,8 +782,39 @@ xfs_buf_item_committed(
return lsn;
}
+#ifdef DEBUG_EXPENSIVE
+static int
+xfs_buf_item_precommit(
+ struct xfs_trans *tp,
+ struct xfs_log_item *lip)
+{
+ struct xfs_buf_log_item *bip = BUF_ITEM(lip);
+ struct xfs_buf *bp = bip->bli_buf;
+ struct xfs_mount *mp = bp->b_mount;
+ xfs_failaddr_t fa;
+
+ if (!bp->b_ops || !bp->b_ops->verify_struct)
+ return 0;
+ if (bip->bli_flags & XFS_BLI_STALE)
+ return 0;
+
+ fa = bp->b_ops->verify_struct(bp);
+ if (fa) {
+ xfs_buf_verifier_error(bp, -EFSCORRUPTED, bp->b_ops->name,
+ bp->b_addr, BBTOB(bp->b_length), fa);
+ xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
+ ASSERT(fa == NULL);
+ }
+
+ return 0;
+}
+#else
+# define xfs_buf_item_precommit NULL
+#endif
+
static const struct xfs_item_ops xfs_buf_item_ops = {
.iop_size = xfs_buf_item_size,
+ .iop_precommit = xfs_buf_item_precommit,
.iop_format = xfs_buf_item_format,
.iop_pin = xfs_buf_item_pin,
.iop_unpin = xfs_buf_item_unpin,
diff --git a/fs/xfs/xfs_discard.c b/fs/xfs/xfs_discard.c
index 25fe3b932b5a..6f0fc7fe1f2b 100644
--- a/fs/xfs/xfs_discard.c
+++ b/fs/xfs/xfs_discard.c
@@ -20,6 +20,7 @@
#include "xfs_log.h"
#include "xfs_ag.h"
#include "xfs_health.h"
+#include "xfs_rtbitmap.h"
/*
* Notes on an efficient, low latency fstrim algorithm
@@ -322,7 +323,7 @@ xfs_trim_should_stop(void)
* we found in the last batch as the key to start the next.
*/
static int
-xfs_trim_extents(
+xfs_trim_perag_extents(
struct xfs_perag *pag,
xfs_agblock_t start,
xfs_agblock_t end,
@@ -383,6 +384,259 @@ xfs_trim_extents(
}
+static int
+xfs_trim_datadev_extents(
+ struct xfs_mount *mp,
+ xfs_daddr_t start,
+ xfs_daddr_t end,
+ xfs_extlen_t minlen,
+ uint64_t *blocks_trimmed)
+{
+ xfs_agnumber_t start_agno, end_agno;
+ xfs_agblock_t start_agbno, end_agbno;
+ xfs_daddr_t ddev_end;
+ struct xfs_perag *pag;
+ int last_error = 0, error;
+
+ ddev_end = min_t(xfs_daddr_t, end,
+ XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks) - 1);
+
+ start_agno = xfs_daddr_to_agno(mp, start);
+ start_agbno = xfs_daddr_to_agbno(mp, start);
+ end_agno = xfs_daddr_to_agno(mp, ddev_end);
+ end_agbno = xfs_daddr_to_agbno(mp, ddev_end);
+
+ for_each_perag_range(mp, start_agno, end_agno, pag) {
+ xfs_agblock_t agend = pag->block_count;
+
+ if (start_agno == end_agno)
+ agend = end_agbno;
+ error = xfs_trim_perag_extents(pag, start_agbno, agend, minlen,
+ blocks_trimmed);
+ if (error)
+ last_error = error;
+
+ if (xfs_trim_should_stop()) {
+ xfs_perag_rele(pag);
+ break;
+ }
+ start_agbno = 0;
+ }
+
+ return last_error;
+}
+
+#ifdef CONFIG_XFS_RT
+struct xfs_trim_rtdev {
+ /* list of rt extents to free */
+ struct list_head extent_list;
+
+ /* pointer to count of blocks trimmed */
+ uint64_t *blocks_trimmed;
+
+ /* minimum length that caller allows us to trim */
+ xfs_rtblock_t minlen_fsb;
+
+ /* restart point for the rtbitmap walk */
+ xfs_rtxnum_t restart_rtx;
+
+ /* stopping point for the current rtbitmap walk */
+ xfs_rtxnum_t stop_rtx;
+};
+
+struct xfs_rtx_busy {
+ struct list_head list;
+ xfs_rtblock_t bno;
+ xfs_rtblock_t length;
+};
+
+static void
+xfs_discard_free_rtdev_extents(
+ struct xfs_trim_rtdev *tr)
+{
+ struct xfs_rtx_busy *busyp, *n;
+
+ list_for_each_entry_safe(busyp, n, &tr->extent_list, list) {
+ list_del_init(&busyp->list);
+ kfree(busyp);
+ }
+}
+
+/*
+ * Walk the discard list and issue discards on all the busy extents in the
+ * list. We plug and chain the bios so that we only need a single completion
+ * call to clear all the busy extents once the discards are complete.
+ */
+static int
+xfs_discard_rtdev_extents(
+ struct xfs_mount *mp,
+ struct xfs_trim_rtdev *tr)
+{
+ struct block_device *bdev = mp->m_rtdev_targp->bt_bdev;
+ struct xfs_rtx_busy *busyp;
+ struct bio *bio = NULL;
+ struct blk_plug plug;
+ xfs_rtblock_t start = NULLRTBLOCK, length = 0;
+ int error = 0;
+
+ blk_start_plug(&plug);
+ list_for_each_entry(busyp, &tr->extent_list, list) {
+ if (start == NULLRTBLOCK)
+ start = busyp->bno;
+ length += busyp->length;
+
+ trace_xfs_discard_rtextent(mp, busyp->bno, busyp->length);
+
+ error = __blkdev_issue_discard(bdev,
+ XFS_FSB_TO_BB(mp, busyp->bno),
+ XFS_FSB_TO_BB(mp, busyp->length),
+ GFP_NOFS, &bio);
+ if (error)
+ break;
+ }
+ xfs_discard_free_rtdev_extents(tr);
+
+ if (bio) {
+ error = submit_bio_wait(bio);
+ if (error == -EOPNOTSUPP)
+ error = 0;
+ if (error)
+ xfs_info(mp,
+ "discard failed for rtextent [0x%llx,%llu], error %d",
+ (unsigned long long)start,
+ (unsigned long long)length,
+ error);
+ bio_put(bio);
+ }
+ blk_finish_plug(&plug);
+
+ return error;
+}
+
+static int
+xfs_trim_gather_rtextent(
+ struct xfs_mount *mp,
+ struct xfs_trans *tp,
+ const struct xfs_rtalloc_rec *rec,
+ void *priv)
+{
+ struct xfs_trim_rtdev *tr = priv;
+ struct xfs_rtx_busy *busyp;
+ xfs_rtblock_t rbno, rlen;
+
+ if (rec->ar_startext > tr->stop_rtx) {
+ /*
+ * If we've scanned a large number of rtbitmap blocks, update
+ * the cursor to point at this extent so we restart the next
+ * batch from this extent.
+ */
+ tr->restart_rtx = rec->ar_startext;
+ return -ECANCELED;
+ }
+
+ rbno = xfs_rtx_to_rtb(mp, rec->ar_startext);
+ rlen = xfs_rtx_to_rtb(mp, rec->ar_extcount);
+
+ /* Ignore too small. */
+ if (rlen < tr->minlen_fsb) {
+ trace_xfs_discard_rttoosmall(mp, rbno, rlen);
+ return 0;
+ }
+
+ busyp = kzalloc(sizeof(struct xfs_rtx_busy), GFP_KERNEL);
+ if (!busyp)
+ return -ENOMEM;
+
+ busyp->bno = rbno;
+ busyp->length = rlen;
+ INIT_LIST_HEAD(&busyp->list);
+ list_add_tail(&busyp->list, &tr->extent_list);
+ *tr->blocks_trimmed += rlen;
+
+ tr->restart_rtx = rec->ar_startext + rec->ar_extcount;
+ return 0;
+}
+
+static int
+xfs_trim_rtdev_extents(
+ struct xfs_mount *mp,
+ xfs_daddr_t start,
+ xfs_daddr_t end,
+ xfs_daddr_t minlen,
+ uint64_t *blocks_trimmed)
+{
+ struct xfs_rtalloc_rec low = { };
+ struct xfs_rtalloc_rec high = { };
+ struct xfs_trim_rtdev tr = {
+ .blocks_trimmed = blocks_trimmed,
+ .minlen_fsb = XFS_BB_TO_FSB(mp, minlen),
+ };
+ struct xfs_trans *tp;
+ xfs_daddr_t rtdev_daddr;
+ int error;
+
+ INIT_LIST_HEAD(&tr.extent_list);
+
+ /* Shift the start and end downwards to match the rt device. */
+ rtdev_daddr = XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks);
+ if (start > rtdev_daddr)
+ start -= rtdev_daddr;
+ else
+ start = 0;
+
+ if (end <= rtdev_daddr)
+ return 0;
+ end -= rtdev_daddr;
+
+ error = xfs_trans_alloc_empty(mp, &tp);
+ if (error)
+ return error;
+
+ end = min_t(xfs_daddr_t, end,
+ XFS_FSB_TO_BB(mp, mp->m_sb.sb_rblocks) - 1);
+
+ /* Convert the rt blocks to rt extents */
+ low.ar_startext = xfs_rtb_to_rtxup(mp, XFS_BB_TO_FSB(mp, start));
+ high.ar_startext = xfs_rtb_to_rtx(mp, XFS_BB_TO_FSBT(mp, end));
+
+ /*
+ * Walk the free ranges between low and high. The query_range function
+ * trims the extents returned.
+ */
+ do {
+ tr.stop_rtx = low.ar_startext + (mp->m_sb.sb_blocksize * NBBY);
+ xfs_rtbitmap_lock_shared(mp, XFS_RBMLOCK_BITMAP);
+ error = xfs_rtalloc_query_range(mp, tp, &low, &high,
+ xfs_trim_gather_rtextent, &tr);
+
+ if (error == -ECANCELED)
+ error = 0;
+ if (error) {
+ xfs_rtbitmap_unlock_shared(mp, XFS_RBMLOCK_BITMAP);
+ xfs_discard_free_rtdev_extents(&tr);
+ break;
+ }
+
+ if (list_empty(&tr.extent_list)) {
+ xfs_rtbitmap_unlock_shared(mp, XFS_RBMLOCK_BITMAP);
+ break;
+ }
+
+ error = xfs_discard_rtdev_extents(mp, &tr);
+ xfs_rtbitmap_unlock_shared(mp, XFS_RBMLOCK_BITMAP);
+ if (error)
+ break;
+
+ low.ar_startext = tr.restart_rtx;
+ } while (!xfs_trim_should_stop() && low.ar_startext <= high.ar_startext);
+
+ xfs_trans_cancel(tp);
+ return error;
+}
+#else
+# define xfs_trim_rtdev_extents(m,s,e,n,b) (-EOPNOTSUPP)
+#endif /* CONFIG_XFS_RT */
+
/*
* trim a range of the filesystem.
*
@@ -391,28 +645,37 @@ xfs_trim_extents(
* addressing. FSB addressing is sparse (AGNO|AGBNO), while the incoming format
* is a linear address range. Hence we need to use DADDR based conversions and
* comparisons for determining the correct offset and regions to trim.
+ *
+ * The realtime device is mapped into the FITRIM "address space" immediately
+ * after the data device.
*/
int
xfs_ioc_trim(
struct xfs_mount *mp,
struct fstrim_range __user *urange)
{
- struct xfs_perag *pag;
unsigned int granularity =
bdev_discard_granularity(mp->m_ddev_targp->bt_bdev);
+ struct block_device *rt_bdev = NULL;
struct fstrim_range range;
xfs_daddr_t start, end;
xfs_extlen_t minlen;
- xfs_agnumber_t start_agno, end_agno;
- xfs_agblock_t start_agbno, end_agbno;
+ xfs_rfsblock_t max_blocks;
uint64_t blocks_trimmed = 0;
int error, last_error = 0;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- if (!bdev_max_discard_sectors(mp->m_ddev_targp->bt_bdev))
+ if (mp->m_rtdev_targp &&
+ bdev_max_discard_sectors(mp->m_rtdev_targp->bt_bdev))
+ rt_bdev = mp->m_rtdev_targp->bt_bdev;
+ if (!bdev_max_discard_sectors(mp->m_ddev_targp->bt_bdev) && !rt_bdev)
return -EOPNOTSUPP;
+ if (rt_bdev)
+ granularity = max(granularity,
+ bdev_discard_granularity(rt_bdev));
+
/*
* We haven't recovered the log, so we cannot use our bnobt-guided
* storage zapping commands.
@@ -433,35 +696,27 @@ xfs_ioc_trim(
* used by the fstrim application. In the end it really doesn't
* matter as trimming blocks is an advisory interface.
*/
- if (range.start >= XFS_FSB_TO_B(mp, mp->m_sb.sb_dblocks) ||
+ max_blocks = mp->m_sb.sb_dblocks + mp->m_sb.sb_rblocks;
+ if (range.start >= XFS_FSB_TO_B(mp, max_blocks) ||
range.minlen > XFS_FSB_TO_B(mp, mp->m_ag_max_usable) ||
range.len < mp->m_sb.sb_blocksize)
return -EINVAL;
start = BTOBB(range.start);
- end = min_t(xfs_daddr_t, start + BTOBBT(range.len),
- XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)) - 1;
+ end = start + BTOBBT(range.len) - 1;
- start_agno = xfs_daddr_to_agno(mp, start);
- start_agbno = xfs_daddr_to_agbno(mp, start);
- end_agno = xfs_daddr_to_agno(mp, end);
- end_agbno = xfs_daddr_to_agbno(mp, end);
-
- for_each_perag_range(mp, start_agno, end_agno, pag) {
- xfs_agblock_t agend = pag->block_count;
-
- if (start_agno == end_agno)
- agend = end_agbno;
- error = xfs_trim_extents(pag, start_agbno, agend, minlen,
+ if (bdev_max_discard_sectors(mp->m_ddev_targp->bt_bdev)) {
+ error = xfs_trim_datadev_extents(mp, start, end, minlen,
&blocks_trimmed);
if (error)
last_error = error;
+ }
- if (xfs_trim_should_stop()) {
- xfs_perag_rele(pag);
- break;
- }
- start_agbno = 0;
+ if (rt_bdev && !xfs_trim_should_stop()) {
+ error = xfs_trim_rtdev_extents(mp, start, end, minlen,
+ &blocks_trimmed);
+ if (error)
+ last_error = error;
}
if (last_error)
diff --git a/fs/xfs/xfs_dquot_item.c b/fs/xfs/xfs_dquot_item.c
index 6a1aae799cf1..7d19091215b0 100644
--- a/fs/xfs/xfs_dquot_item.c
+++ b/fs/xfs/xfs_dquot_item.c
@@ -17,6 +17,7 @@
#include "xfs_trans_priv.h"
#include "xfs_qm.h"
#include "xfs_log.h"
+#include "xfs_error.h"
static inline struct xfs_dq_logitem *DQUOT_ITEM(struct xfs_log_item *lip)
{
@@ -193,8 +194,38 @@ xfs_qm_dquot_logitem_committing(
return xfs_qm_dquot_logitem_release(lip);
}
+#ifdef DEBUG_EXPENSIVE
+static int
+xfs_qm_dquot_logitem_precommit(
+ struct xfs_trans *tp,
+ struct xfs_log_item *lip)
+{
+ struct xfs_dquot *dqp = DQUOT_ITEM(lip)->qli_dquot;
+ struct xfs_mount *mp = dqp->q_mount;
+ struct xfs_disk_dquot ddq = { };
+ xfs_failaddr_t fa;
+
+ xfs_dquot_to_disk(&ddq, dqp);
+ fa = xfs_dquot_verify(mp, &ddq, dqp->q_id);
+ if (fa) {
+ XFS_CORRUPTION_ERROR("Bad dquot during logging",
+ XFS_ERRLEVEL_LOW, mp, &ddq, sizeof(ddq));
+ xfs_alert(mp,
+ "Metadata corruption detected at %pS, dquot 0x%x",
+ fa, dqp->q_id);
+ xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
+ ASSERT(fa == NULL);
+ }
+
+ return 0;
+}
+#else
+# define xfs_qm_dquot_logitem_precommit NULL
+#endif
+
static const struct xfs_item_ops xfs_dquot_item_ops = {
.iop_size = xfs_qm_dquot_logitem_size,
+ .iop_precommit = xfs_qm_dquot_logitem_precommit,
.iop_format = xfs_qm_dquot_logitem_format,
.iop_pin = xfs_qm_dquot_logitem_pin,
.iop_unpin = xfs_qm_dquot_logitem_unpin,
diff --git a/fs/xfs/xfs_drain.c b/fs/xfs/xfs_drain.c
index 005a66be44a2..7bdb9688c0f5 100644
--- a/fs/xfs/xfs_drain.c
+++ b/fs/xfs/xfs_drain.c
@@ -94,17 +94,17 @@ static inline int xfs_defer_drain_wait(struct xfs_defer_drain *dr)
}
/*
- * Get a passive reference to an AG and declare an intent to update its
- * metadata.
+ * Get a passive reference to the AG that contains a fsbno and declare an intent
+ * to update its metadata.
*/
struct xfs_perag *
xfs_perag_intent_get(
struct xfs_mount *mp,
- xfs_agnumber_t agno)
+ xfs_fsblock_t fsbno)
{
struct xfs_perag *pag;
- pag = xfs_perag_get(mp, agno);
+ pag = xfs_perag_get(mp, XFS_FSB_TO_AGNO(mp, fsbno));
if (!pag)
return NULL;
diff --git a/fs/xfs/xfs_drain.h b/fs/xfs/xfs_drain.h
index 50a5772a8296..775164f54ea6 100644
--- a/fs/xfs/xfs_drain.h
+++ b/fs/xfs/xfs_drain.h
@@ -62,7 +62,7 @@ void xfs_drain_wait_enable(void);
* until the item is finished or cancelled.
*/
struct xfs_perag *xfs_perag_intent_get(struct xfs_mount *mp,
- xfs_agnumber_t agno);
+ xfs_fsblock_t fsbno);
void xfs_perag_intent_put(struct xfs_perag *pag);
void xfs_perag_intent_hold(struct xfs_perag *pag);
@@ -76,7 +76,8 @@ struct xfs_defer_drain { /* empty */ };
#define xfs_defer_drain_free(dr) ((void)0)
#define xfs_defer_drain_init(dr) ((void)0)
-#define xfs_perag_intent_get(mp, agno) xfs_perag_get((mp), (agno))
+#define xfs_perag_intent_get(mp, fsbno) \
+ xfs_perag_get((mp), XFS_FSB_TO_AGNO(mp, fsbno))
#define xfs_perag_intent_put(pag) xfs_perag_put(pag)
static inline void xfs_perag_intent_hold(struct xfs_perag *pag) { }
diff --git a/fs/xfs/xfs_extfree_item.c b/fs/xfs/xfs_extfree_item.c
index 8c382f092332..abffc74a924f 100644
--- a/fs/xfs/xfs_extfree_item.c
+++ b/fs/xfs/xfs_extfree_item.c
@@ -303,6 +303,11 @@ static const struct xfs_item_ops xfs_efd_item_ops = {
.iop_intent = xfs_efd_item_intent,
};
+static inline struct xfs_extent_free_item *xefi_entry(const struct list_head *e)
+{
+ return list_entry(e, struct xfs_extent_free_item, xefi_list);
+}
+
/*
* Fill the EFD with all extents from the EFI when we need to roll the
* transaction and continue with a new EFI.
@@ -331,6 +336,22 @@ xfs_efd_from_efi(
efdp->efd_next_extent = efip->efi_format.efi_nextents;
}
+static void
+xfs_efd_add_extent(
+ struct xfs_efd_log_item *efdp,
+ struct xfs_extent_free_item *xefi)
+{
+ struct xfs_extent *extp;
+
+ ASSERT(efdp->efd_next_extent < efdp->efd_format.efd_nextents);
+
+ extp = &efdp->efd_format.efd_extents[efdp->efd_next_extent];
+ extp->ext_start = xefi->xefi_startblock;
+ extp->ext_len = xefi->xefi_blockcount;
+
+ efdp->efd_next_extent++;
+}
+
/* Sort bmap items by AG. */
static int
xfs_extent_free_diff_items(
@@ -338,11 +359,8 @@ xfs_extent_free_diff_items(
const struct list_head *a,
const struct list_head *b)
{
- struct xfs_extent_free_item *ra;
- struct xfs_extent_free_item *rb;
-
- ra = container_of(a, struct xfs_extent_free_item, xefi_list);
- rb = container_of(b, struct xfs_extent_free_item, xefi_list);
+ struct xfs_extent_free_item *ra = xefi_entry(a);
+ struct xfs_extent_free_item *rb = xefi_entry(b);
return ra->xefi_pag->pag_agno - rb->xefi_pag->pag_agno;
}
@@ -418,24 +436,35 @@ xfs_extent_free_create_done(
return &efdp->efd_item;
}
-/* Take a passive ref to the AG containing the space we're freeing. */
+/* Add this deferred EFI to the transaction. */
void
-xfs_extent_free_get_group(
- struct xfs_mount *mp,
- struct xfs_extent_free_item *xefi)
+xfs_extent_free_defer_add(
+ struct xfs_trans *tp,
+ struct xfs_extent_free_item *xefi,
+ struct xfs_defer_pending **dfpp)
{
- xfs_agnumber_t agno;
+ struct xfs_mount *mp = tp->t_mountp;
+
+ trace_xfs_extent_free_defer(mp, xefi);
- agno = XFS_FSB_TO_AGNO(mp, xefi->xefi_startblock);
- xefi->xefi_pag = xfs_perag_intent_get(mp, agno);
+ xefi->xefi_pag = xfs_perag_intent_get(mp, xefi->xefi_startblock);
+ if (xefi->xefi_agresv == XFS_AG_RESV_AGFL)
+ *dfpp = xfs_defer_add(tp, &xefi->xefi_list,
+ &xfs_agfl_free_defer_type);
+ else
+ *dfpp = xfs_defer_add(tp, &xefi->xefi_list,
+ &xfs_extent_free_defer_type);
}
-/* Release a passive AG ref after some freeing work. */
-static inline void
-xfs_extent_free_put_group(
- struct xfs_extent_free_item *xefi)
+/* Cancel a free extent. */
+STATIC void
+xfs_extent_free_cancel_item(
+ struct list_head *item)
{
+ struct xfs_extent_free_item *xefi = xefi_entry(item);
+
xfs_perag_intent_put(xefi->xefi_pag);
+ kmem_cache_free(xfs_extfree_item_cache, xefi);
}
/* Process a free extent. */
@@ -447,15 +476,12 @@ xfs_extent_free_finish_item(
struct xfs_btree_cur **state)
{
struct xfs_owner_info oinfo = { };
- struct xfs_extent_free_item *xefi;
+ struct xfs_extent_free_item *xefi = xefi_entry(item);
struct xfs_efd_log_item *efdp = EFD_ITEM(done);
struct xfs_mount *mp = tp->t_mountp;
- struct xfs_extent *extp;
- uint next_extent;
xfs_agblock_t agbno;
int error = 0;
- xefi = container_of(item, struct xfs_extent_free_item, xefi_list);
agbno = XFS_FSB_TO_AGBNO(mp, xefi->xefi_startblock);
oinfo.oi_owner = xefi->xefi_owner;
@@ -464,8 +490,7 @@ xfs_extent_free_finish_item(
if (xefi->xefi_flags & XFS_EFI_BMBT_BLOCK)
oinfo.oi_flags |= XFS_OWNER_INFO_BMBT_BLOCK;
- trace_xfs_bmap_free_deferred(tp->t_mountp, xefi->xefi_pag->pag_agno, 0,
- agbno, xefi->xefi_blockcount);
+ trace_xfs_extent_free_deferred(mp, xefi);
/*
* If we need a new transaction to make progress, the caller will log a
@@ -482,16 +507,8 @@ xfs_extent_free_finish_item(
return error;
}
- /* Add the work we finished to the EFD, even though nobody uses that */
- next_extent = efdp->efd_next_extent;
- ASSERT(next_extent < efdp->efd_format.efd_nextents);
- extp = &(efdp->efd_format.efd_extents[next_extent]);
- extp->ext_start = xefi->xefi_startblock;
- extp->ext_len = xefi->xefi_blockcount;
- efdp->efd_next_extent++;
-
- xfs_extent_free_put_group(xefi);
- kmem_cache_free(xfs_extfree_item_cache, xefi);
+ xfs_efd_add_extent(efdp, xefi);
+ xfs_extent_free_cancel_item(item);
return error;
}
@@ -503,19 +520,6 @@ xfs_extent_free_abort_intent(
xfs_efi_release(EFI_ITEM(intent));
}
-/* Cancel a free extent. */
-STATIC void
-xfs_extent_free_cancel_item(
- struct list_head *item)
-{
- struct xfs_extent_free_item *xefi;
-
- xefi = container_of(item, struct xfs_extent_free_item, xefi_list);
-
- xfs_extent_free_put_group(xefi);
- kmem_cache_free(xfs_extfree_item_cache, xefi);
-}
-
/*
* AGFL blocks are accounted differently in the reserve pools and are not
* inserted into the busy extent list.
@@ -530,35 +534,24 @@ xfs_agfl_free_finish_item(
struct xfs_owner_info oinfo = { };
struct xfs_mount *mp = tp->t_mountp;
struct xfs_efd_log_item *efdp = EFD_ITEM(done);
- struct xfs_extent_free_item *xefi;
- struct xfs_extent *extp;
+ struct xfs_extent_free_item *xefi = xefi_entry(item);
struct xfs_buf *agbp;
int error;
xfs_agblock_t agbno;
- uint next_extent;
- xefi = container_of(item, struct xfs_extent_free_item, xefi_list);
ASSERT(xefi->xefi_blockcount == 1);
agbno = XFS_FSB_TO_AGBNO(mp, xefi->xefi_startblock);
oinfo.oi_owner = xefi->xefi_owner;
- trace_xfs_agfl_free_deferred(mp, xefi->xefi_pag->pag_agno, 0, agbno,
- xefi->xefi_blockcount);
+ trace_xfs_agfl_free_deferred(mp, xefi);
error = xfs_alloc_read_agf(xefi->xefi_pag, tp, 0, &agbp);
if (!error)
- error = xfs_free_agfl_block(tp, xefi->xefi_pag->pag_agno,
- agbno, agbp, &oinfo);
+ error = xfs_free_ag_extent(tp, agbp, xefi->xefi_pag->pag_agno,
+ agbno, 1, &oinfo, XFS_AG_RESV_AGFL);
- next_extent = efdp->efd_next_extent;
- ASSERT(next_extent < efdp->efd_format.efd_nextents);
- extp = &(efdp->efd_format.efd_extents[next_extent]);
- extp->ext_start = xefi->xefi_startblock;
- extp->ext_len = xefi->xefi_blockcount;
- efdp->efd_next_extent++;
-
- xfs_extent_free_put_group(xefi);
- kmem_cache_free(xfs_extfree_item_cache, xefi);
+ xfs_efd_add_extent(efdp, xefi);
+ xfs_extent_free_cancel_item(&xefi->xefi_list);
return error;
}
@@ -585,7 +578,7 @@ xfs_efi_recover_work(
xefi->xefi_blockcount = extp->ext_len;
xefi->xefi_agresv = XFS_AG_RESV_NONE;
xefi->xefi_owner = XFS_RMAP_OWN_UNKNOWN;
- xfs_extent_free_get_group(mp, xefi);
+ xefi->xefi_pag = xfs_perag_intent_get(mp, extp->ext_start);
xfs_defer_add_item(dfp, &xefi->xefi_list);
}
diff --git a/fs/xfs/xfs_extfree_item.h b/fs/xfs/xfs_extfree_item.h
index da6a5afa607c..41b7c4306079 100644
--- a/fs/xfs/xfs_extfree_item.h
+++ b/fs/xfs/xfs_extfree_item.h
@@ -88,4 +88,10 @@ xfs_efd_log_item_sizeof(
extern struct kmem_cache *xfs_efi_cache;
extern struct kmem_cache *xfs_efd_cache;
+struct xfs_extent_free_item;
+
+void xfs_extent_free_defer_add(struct xfs_trans *tp,
+ struct xfs_extent_free_item *xefi,
+ struct xfs_defer_pending **dfpp);
+
#endif /* __XFS_EXTFREE_ITEM_H__ */
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index b240ea5241dc..4cdc54dc9686 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -213,29 +213,18 @@ xfs_ilock_iocb_for_write(
if (ret)
return ret;
- if (*lock_mode == XFS_IOLOCK_EXCL)
- return 0;
- if (!xfs_iflags_test(ip, XFS_IREMAPPING))
- return 0;
-
- xfs_iunlock(ip, *lock_mode);
- *lock_mode = XFS_IOLOCK_EXCL;
- return xfs_ilock_iocb(iocb, *lock_mode);
-}
-
-static unsigned int
-xfs_ilock_for_write_fault(
- struct xfs_inode *ip)
-{
- /* get a shared lock if no remapping in progress */
- xfs_ilock(ip, XFS_MMAPLOCK_SHARED);
- if (!xfs_iflags_test(ip, XFS_IREMAPPING))
- return XFS_MMAPLOCK_SHARED;
+ /*
+ * If a reflink remap is in progress we always need to take the iolock
+ * exclusively to wait for it to finish.
+ */
+ if (*lock_mode == XFS_IOLOCK_SHARED &&
+ xfs_iflags_test(ip, XFS_IREMAPPING)) {
+ xfs_iunlock(ip, *lock_mode);
+ *lock_mode = XFS_IOLOCK_EXCL;
+ return xfs_ilock_iocb(iocb, *lock_mode);
+ }
- /* wait for remapping to complete */
- xfs_iunlock(ip, XFS_MMAPLOCK_SHARED);
- xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
- return XFS_MMAPLOCK_EXCL;
+ return 0;
}
STATIC ssize_t
@@ -1247,31 +1236,77 @@ xfs_file_llseek(
return vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
}
-#ifdef CONFIG_FS_DAX
static inline vm_fault_t
-xfs_dax_fault(
+xfs_dax_fault_locked(
struct vm_fault *vmf,
unsigned int order,
- bool write_fault,
- pfn_t *pfn)
+ bool write_fault)
{
- return dax_iomap_fault(vmf, order, pfn, NULL,
+ vm_fault_t ret;
+ pfn_t pfn;
+
+ if (!IS_ENABLED(CONFIG_FS_DAX)) {
+ ASSERT(0);
+ return VM_FAULT_SIGBUS;
+ }
+ ret = dax_iomap_fault(vmf, order, &pfn, NULL,
(write_fault && !vmf->cow_page) ?
&xfs_dax_write_iomap_ops :
&xfs_read_iomap_ops);
+ if (ret & VM_FAULT_NEEDDSYNC)
+ ret = dax_finish_sync_fault(vmf, order, pfn);
+ return ret;
}
-#else
-static inline vm_fault_t
-xfs_dax_fault(
+
+static vm_fault_t
+xfs_dax_read_fault(
struct vm_fault *vmf,
- unsigned int order,
- bool write_fault,
- pfn_t *pfn)
+ unsigned int order)
{
- ASSERT(0);
- return VM_FAULT_SIGBUS;
+ struct xfs_inode *ip = XFS_I(file_inode(vmf->vma->vm_file));
+ vm_fault_t ret;
+
+ xfs_ilock(ip, XFS_MMAPLOCK_SHARED);
+ ret = xfs_dax_fault_locked(vmf, order, false);
+ xfs_iunlock(ip, XFS_MMAPLOCK_SHARED);
+
+ return ret;
+}
+
+static vm_fault_t
+xfs_write_fault(
+ struct vm_fault *vmf,
+ unsigned int order)
+{
+ struct inode *inode = file_inode(vmf->vma->vm_file);
+ struct xfs_inode *ip = XFS_I(inode);
+ unsigned int lock_mode = XFS_MMAPLOCK_SHARED;
+ vm_fault_t ret;
+
+ sb_start_pagefault(inode->i_sb);
+ file_update_time(vmf->vma->vm_file);
+
+ /*
+ * Normally we only need the shared mmaplock, but if a reflink remap is
+ * in progress we take the exclusive lock to wait for the remap to
+ * finish before taking a write fault.
+ */
+ xfs_ilock(ip, XFS_MMAPLOCK_SHARED);
+ if (xfs_iflags_test(ip, XFS_IREMAPPING)) {
+ xfs_iunlock(ip, XFS_MMAPLOCK_SHARED);
+ xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
+ lock_mode = XFS_MMAPLOCK_EXCL;
+ }
+
+ if (IS_DAX(inode))
+ ret = xfs_dax_fault_locked(vmf, order, true);
+ else
+ ret = iomap_page_mkwrite(vmf, &xfs_page_mkwrite_iomap_ops);
+ xfs_iunlock(ip, lock_mode);
+
+ sb_end_pagefault(inode->i_sb);
+ return ret;
}
-#endif
/*
* Locking for serialisation of IO during page faults. This results in a lock
@@ -1290,38 +1325,14 @@ __xfs_filemap_fault(
bool write_fault)
{
struct inode *inode = file_inode(vmf->vma->vm_file);
- struct xfs_inode *ip = XFS_I(inode);
- vm_fault_t ret;
- unsigned int lock_mode = 0;
-
- trace_xfs_filemap_fault(ip, order, write_fault);
-
- if (write_fault) {
- sb_start_pagefault(inode->i_sb);
- file_update_time(vmf->vma->vm_file);
- }
-
- if (IS_DAX(inode) || write_fault)
- lock_mode = xfs_ilock_for_write_fault(XFS_I(inode));
- if (IS_DAX(inode)) {
- pfn_t pfn;
-
- ret = xfs_dax_fault(vmf, order, write_fault, &pfn);
- if (ret & VM_FAULT_NEEDDSYNC)
- ret = dax_finish_sync_fault(vmf, order, pfn);
- } else if (write_fault) {
- ret = iomap_page_mkwrite(vmf, &xfs_page_mkwrite_iomap_ops);
- } else {
- ret = filemap_fault(vmf);
- }
-
- if (lock_mode)
- xfs_iunlock(XFS_I(inode), lock_mode);
+ trace_xfs_filemap_fault(XFS_I(inode), order, write_fault);
if (write_fault)
- sb_end_pagefault(inode->i_sb);
- return ret;
+ return xfs_write_fault(vmf, order);
+ if (IS_DAX(inode))
+ return xfs_dax_read_fault(vmf, order);
+ return filemap_fault(vmf);
}
static inline bool
diff --git a/fs/xfs/xfs_handle.c b/fs/xfs/xfs_handle.c
index c8785ed59543..cf5acbd3c7ca 100644
--- a/fs/xfs/xfs_handle.c
+++ b/fs/xfs/xfs_handle.c
@@ -21,7 +21,6 @@
#include "xfs_attr.h"
#include "xfs_ioctl.h"
#include "xfs_parent.h"
-#include "xfs_da_btree.h"
#include "xfs_handle.h"
#include "xfs_health.h"
#include "xfs_icache.h"
@@ -773,11 +772,6 @@ xfs_getparents_expand_lastrec(
trace_xfs_getparents_expand_lastrec(gpx->ip, gp, &gpx->context, gpr);
}
-static inline void __user *u64_to_uptr(u64 val)
-{
- return (void __user *)(uintptr_t)val;
-}
-
/* Retrieve the parent pointers for a given inode. */
STATIC int
xfs_getparents(
@@ -862,7 +856,7 @@ xfs_getparents(
ASSERT(gpx->context.firstu <= gpx->gph.gph_request.gp_bufsize);
/* Copy the records to userspace. */
- if (copy_to_user(u64_to_uptr(gpx->gph.gph_request.gp_buffer),
+ if (copy_to_user(u64_to_user_ptr(gpx->gph.gph_request.gp_buffer),
gpx->krecords, gpx->context.firstu))
error = -EFAULT;
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
index 0953163a2d84..cf629302d48e 100644
--- a/fs/xfs/xfs_icache.c
+++ b/fs/xfs/xfs_icache.c
@@ -86,9 +86,8 @@ xfs_inode_alloc(
return NULL;
}
- /* VFS doesn't initialise i_mode or i_state! */
+ /* VFS doesn't initialise i_mode! */
VFS_I(ip)->i_mode = 0;
- VFS_I(ip)->i_state = 0;
mapping_set_large_folios(VFS_I(ip)->i_mapping);
XFS_STATS_INC(mp, vn_active);
@@ -314,6 +313,7 @@ xfs_reinit_inode(
dev_t dev = inode->i_rdev;
kuid_t uid = inode->i_uid;
kgid_t gid = inode->i_gid;
+ unsigned long state = inode->i_state;
error = inode_init_always(mp->m_super, inode);
@@ -324,6 +324,7 @@ xfs_reinit_inode(
inode->i_rdev = dev;
inode->i_uid = uid;
inode->i_gid = gid;
+ inode->i_state = state;
mapping_set_large_folios(inode->i_mapping);
return error;
}
@@ -1155,7 +1156,7 @@ xfs_inode_free_eofblocks(
}
*lockflags |= XFS_IOLOCK_EXCL;
- if (xfs_can_free_eofblocks(ip, false))
+ if (xfs_can_free_eofblocks(ip))
return xfs_free_eofblocks(ip);
/* inode could be preallocated or append-only */
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 58fb7a5062e1..7dc6f326936c 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -42,54 +42,11 @@
#include "xfs_pnfs.h"
#include "xfs_parent.h"
#include "xfs_xattr.h"
+#include "xfs_inode_util.h"
struct kmem_cache *xfs_inode_cache;
/*
- * helper function to extract extent size hint from inode
- */
-xfs_extlen_t
-xfs_get_extsz_hint(
- struct xfs_inode *ip)
-{
- /*
- * No point in aligning allocations if we need to COW to actually
- * write to them.
- */
- if (xfs_is_always_cow_inode(ip))
- return 0;
- if ((ip->i_diflags & XFS_DIFLAG_EXTSIZE) && ip->i_extsize)
- return ip->i_extsize;
- if (XFS_IS_REALTIME_INODE(ip) &&
- ip->i_mount->m_sb.sb_rextsize > 1)
- return ip->i_mount->m_sb.sb_rextsize;
- return 0;
-}
-
-/*
- * Helper function to extract CoW extent size hint from inode.
- * Between the extent size hint and the CoW extent size hint, we
- * return the greater of the two. If the value is zero (automatic),
- * use the default size.
- */
-xfs_extlen_t
-xfs_get_cowextsz_hint(
- struct xfs_inode *ip)
-{
- xfs_extlen_t a, b;
-
- a = 0;
- if (ip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE)
- a = ip->i_cowextsize;
- b = xfs_get_extsz_hint(ip);
-
- a = max(a, b);
- if (a == 0)
- return XFS_DEFAULT_COWEXTSZ_HINT;
- return a;
-}
-
-/*
* These two are wrapper routines around the xfs_ilock() routine used to
* centralize some grungy code. They are used in places that wish to lock the
* inode solely for reading the extents. The reason these places can't just
@@ -566,55 +523,6 @@ xfs_lock_two_inodes(
}
}
-uint
-xfs_ip2xflags(
- struct xfs_inode *ip)
-{
- uint flags = 0;
-
- if (ip->i_diflags & XFS_DIFLAG_ANY) {
- if (ip->i_diflags & XFS_DIFLAG_REALTIME)
- flags |= FS_XFLAG_REALTIME;
- if (ip->i_diflags & XFS_DIFLAG_PREALLOC)
- flags |= FS_XFLAG_PREALLOC;
- if (ip->i_diflags & XFS_DIFLAG_IMMUTABLE)
- flags |= FS_XFLAG_IMMUTABLE;
- if (ip->i_diflags & XFS_DIFLAG_APPEND)
- flags |= FS_XFLAG_APPEND;
- if (ip->i_diflags & XFS_DIFLAG_SYNC)
- flags |= FS_XFLAG_SYNC;
- if (ip->i_diflags & XFS_DIFLAG_NOATIME)
- flags |= FS_XFLAG_NOATIME;
- if (ip->i_diflags & XFS_DIFLAG_NODUMP)
- flags |= FS_XFLAG_NODUMP;
- if (ip->i_diflags & XFS_DIFLAG_RTINHERIT)
- flags |= FS_XFLAG_RTINHERIT;
- if (ip->i_diflags & XFS_DIFLAG_PROJINHERIT)
- flags |= FS_XFLAG_PROJINHERIT;
- if (ip->i_diflags & XFS_DIFLAG_NOSYMLINKS)
- flags |= FS_XFLAG_NOSYMLINKS;
- if (ip->i_diflags & XFS_DIFLAG_EXTSIZE)
- flags |= FS_XFLAG_EXTSIZE;
- if (ip->i_diflags & XFS_DIFLAG_EXTSZINHERIT)
- flags |= FS_XFLAG_EXTSZINHERIT;
- if (ip->i_diflags & XFS_DIFLAG_NODEFRAG)
- flags |= FS_XFLAG_NODEFRAG;
- if (ip->i_diflags & XFS_DIFLAG_FILESTREAM)
- flags |= FS_XFLAG_FILESTREAM;
- }
-
- if (ip->i_diflags2 & XFS_DIFLAG2_ANY) {
- if (ip->i_diflags2 & XFS_DIFLAG2_DAX)
- flags |= FS_XFLAG_DAX;
- if (ip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE)
- flags |= FS_XFLAG_COWEXTSIZE;
- }
-
- if (xfs_inode_has_attr_fork(ip))
- flags |= FS_XFLAG_HASATTR;
- return flags;
-}
-
/*
* Lookups up an inode from "name". If ci_name is not NULL, then a CI match
* is allowed, otherwise it has to be an exact match. If a CI match is found,
@@ -656,97 +564,6 @@ out_unlock:
return error;
}
-/* Propagate di_flags from a parent inode to a child inode. */
-static void
-xfs_inode_inherit_flags(
- struct xfs_inode *ip,
- const struct xfs_inode *pip)
-{
- unsigned int di_flags = 0;
- xfs_failaddr_t failaddr;
- umode_t mode = VFS_I(ip)->i_mode;
-
- if (S_ISDIR(mode)) {
- if (pip->i_diflags & XFS_DIFLAG_RTINHERIT)
- di_flags |= XFS_DIFLAG_RTINHERIT;
- if (pip->i_diflags & XFS_DIFLAG_EXTSZINHERIT) {
- di_flags |= XFS_DIFLAG_EXTSZINHERIT;
- ip->i_extsize = pip->i_extsize;
- }
- if (pip->i_diflags & XFS_DIFLAG_PROJINHERIT)
- di_flags |= XFS_DIFLAG_PROJINHERIT;
- } else if (S_ISREG(mode)) {
- if ((pip->i_diflags & XFS_DIFLAG_RTINHERIT) &&
- xfs_has_realtime(ip->i_mount))
- di_flags |= XFS_DIFLAG_REALTIME;
- if (pip->i_diflags & XFS_DIFLAG_EXTSZINHERIT) {
- di_flags |= XFS_DIFLAG_EXTSIZE;
- ip->i_extsize = pip->i_extsize;
- }
- }
- if ((pip->i_diflags & XFS_DIFLAG_NOATIME) &&
- xfs_inherit_noatime)
- di_flags |= XFS_DIFLAG_NOATIME;
- if ((pip->i_diflags & XFS_DIFLAG_NODUMP) &&
- xfs_inherit_nodump)
- di_flags |= XFS_DIFLAG_NODUMP;
- if ((pip->i_diflags & XFS_DIFLAG_SYNC) &&
- xfs_inherit_sync)
- di_flags |= XFS_DIFLAG_SYNC;
- if ((pip->i_diflags & XFS_DIFLAG_NOSYMLINKS) &&
- xfs_inherit_nosymlinks)
- di_flags |= XFS_DIFLAG_NOSYMLINKS;
- if ((pip->i_diflags & XFS_DIFLAG_NODEFRAG) &&
- xfs_inherit_nodefrag)
- di_flags |= XFS_DIFLAG_NODEFRAG;
- if (pip->i_diflags & XFS_DIFLAG_FILESTREAM)
- di_flags |= XFS_DIFLAG_FILESTREAM;
-
- ip->i_diflags |= di_flags;
-
- /*
- * Inode verifiers on older kernels only check that the extent size
- * hint is an integer multiple of the rt extent size on realtime files.
- * They did not check the hint alignment on a directory with both
- * rtinherit and extszinherit flags set. If the misaligned hint is
- * propagated from a directory into a new realtime file, new file
- * allocations will fail due to math errors in the rt allocator and/or
- * trip the verifiers. Validate the hint settings in the new file so
- * that we don't let broken hints propagate.
- */
- failaddr = xfs_inode_validate_extsize(ip->i_mount, ip->i_extsize,
- VFS_I(ip)->i_mode, ip->i_diflags);
- if (failaddr) {
- ip->i_diflags &= ~(XFS_DIFLAG_EXTSIZE |
- XFS_DIFLAG_EXTSZINHERIT);
- ip->i_extsize = 0;
- }
-}
-
-/* Propagate di_flags2 from a parent inode to a child inode. */
-static void
-xfs_inode_inherit_flags2(
- struct xfs_inode *ip,
- const struct xfs_inode *pip)
-{
- xfs_failaddr_t failaddr;
-
- if (pip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE) {
- ip->i_diflags2 |= XFS_DIFLAG2_COWEXTSIZE;
- ip->i_cowextsize = pip->i_cowextsize;
- }
- if (pip->i_diflags2 & XFS_DIFLAG2_DAX)
- ip->i_diflags2 |= XFS_DIFLAG2_DAX;
-
- /* Don't let invalid cowextsize hints propagate. */
- failaddr = xfs_inode_validate_cowextsize(ip->i_mount, ip->i_cowextsize,
- VFS_I(ip)->i_mode, ip->i_diflags, ip->i_diflags2);
- if (failaddr) {
- ip->i_diflags2 &= ~XFS_DIFLAG2_COWEXTSIZE;
- ip->i_cowextsize = 0;
- }
-}
-
/*
* Initialise a newly allocated inode and return the in-core inode to the
* caller locked exclusively.
@@ -754,39 +571,15 @@ xfs_inode_inherit_flags2(
* Caller is responsible for unlocking the inode manually upon return
*/
int
-xfs_init_new_inode(
- struct mnt_idmap *idmap,
+xfs_icreate(
struct xfs_trans *tp,
- struct xfs_inode *pip,
xfs_ino_t ino,
- umode_t mode,
- xfs_nlink_t nlink,
- dev_t rdev,
- prid_t prid,
- bool init_xattrs,
+ const struct xfs_icreate_args *args,
struct xfs_inode **ipp)
{
- struct inode *dir = pip ? VFS_I(pip) : NULL;
struct xfs_mount *mp = tp->t_mountp;
- struct xfs_inode *ip;
- unsigned int flags;
+ struct xfs_inode *ip = NULL;
int error;
- struct timespec64 tv;
- struct inode *inode;
-
- /*
- * Protect against obviously corrupt allocation btree records. Later
- * xfs_iget checks will catch re-allocation of other active in-memory
- * and on-disk inodes. If we don't catch reallocating the parent inode
- * here we will deadlock in xfs_iget() so we have to do these checks
- * first.
- */
- if ((pip && ino == pip->i_ino) || !xfs_verify_dir_ino(mp, ino)) {
- xfs_alert(mp, "Allocated a known in-use inode 0x%llx!", ino);
- xfs_agno_mark_sick(mp, XFS_INO_TO_AGNO(mp, ino),
- XFS_SICK_AG_INOBT);
- return -EFSCORRUPTED;
- }
/*
* Get the in-core inode with the lock held exclusively to prevent
@@ -797,89 +590,8 @@ xfs_init_new_inode(
return error;
ASSERT(ip != NULL);
- inode = VFS_I(ip);
- set_nlink(inode, nlink);
- inode->i_rdev = rdev;
- ip->i_projid = prid;
-
- if (dir && !(dir->i_mode & S_ISGID) && xfs_has_grpid(mp)) {
- inode_fsuid_set(inode, idmap);
- inode->i_gid = dir->i_gid;
- inode->i_mode = mode;
- } else {
- inode_init_owner(idmap, inode, dir, mode);
- }
-
- /*
- * If the group ID of the new file does not match the effective group
- * ID or one of the supplementary group IDs, the S_ISGID bit is cleared
- * (and only if the irix_sgid_inherit compatibility variable is set).
- */
- if (irix_sgid_inherit && (inode->i_mode & S_ISGID) &&
- !vfsgid_in_group_p(i_gid_into_vfsgid(idmap, inode)))
- inode->i_mode &= ~S_ISGID;
-
- ip->i_disk_size = 0;
- ip->i_df.if_nextents = 0;
- ASSERT(ip->i_nblocks == 0);
-
- tv = inode_set_ctime_current(inode);
- inode_set_mtime_to_ts(inode, tv);
- inode_set_atime_to_ts(inode, tv);
-
- ip->i_extsize = 0;
- ip->i_diflags = 0;
-
- if (xfs_has_v3inodes(mp)) {
- inode_set_iversion(inode, 1);
- ip->i_cowextsize = 0;
- ip->i_crtime = tv;
- }
-
- flags = XFS_ILOG_CORE;
- switch (mode & S_IFMT) {
- case S_IFIFO:
- case S_IFCHR:
- case S_IFBLK:
- case S_IFSOCK:
- ip->i_df.if_format = XFS_DINODE_FMT_DEV;
- flags |= XFS_ILOG_DEV;
- break;
- case S_IFREG:
- case S_IFDIR:
- if (pip && (pip->i_diflags & XFS_DIFLAG_ANY))
- xfs_inode_inherit_flags(ip, pip);
- if (pip && (pip->i_diflags2 & XFS_DIFLAG2_ANY))
- xfs_inode_inherit_flags2(ip, pip);
- fallthrough;
- case S_IFLNK:
- ip->i_df.if_format = XFS_DINODE_FMT_EXTENTS;
- ip->i_df.if_bytes = 0;
- ip->i_df.if_data = NULL;
- break;
- default:
- ASSERT(0);
- }
-
- /*
- * If we need to create attributes immediately after allocating the
- * inode, initialise an empty attribute fork right now. We use the
- * default fork offset for attributes here as we don't know exactly what
- * size or how many attributes we might be adding. We can do this
- * safely here because we know the data fork is completely empty and
- * this saves us from needing to run a separate transaction to set the
- * fork offset in the immediate future.
- */
- if (init_xattrs && xfs_has_attr(mp)) {
- ip->i_forkoff = xfs_default_attroffset(ip) >> 3;
- xfs_ifork_init_attr(ip, XFS_DINODE_FMT_EXTENTS, 0);
- }
-
- /*
- * Log the new values stuffed into the inode.
- */
xfs_trans_ijoin(tp, ip, 0);
- xfs_trans_log_inode(tp, ip, flags);
+ xfs_inode_init(tp, args, ip);
/* now that we have an i_mode we can setup the inode structure */
xfs_setup_inode(ip);
@@ -888,158 +600,60 @@ xfs_init_new_inode(
return 0;
}
-/*
- * Decrement the link count on an inode & log the change. If this causes the
- * link count to go to zero, move the inode to AGI unlinked list so that it can
- * be freed when the last active reference goes away via xfs_inactive().
- */
+/* Return dquots for the ids that will be assigned to a new file. */
int
-xfs_droplink(
- struct xfs_trans *tp,
- struct xfs_inode *ip)
-{
- struct inode *inode = VFS_I(ip);
-
- xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
-
- if (inode->i_nlink == 0) {
- xfs_info_ratelimited(tp->t_mountp,
- "Inode 0x%llx link count dropped below zero. Pinning link count.",
- ip->i_ino);
- set_nlink(inode, XFS_NLINK_PINNED);
- }
- if (inode->i_nlink != XFS_NLINK_PINNED)
- drop_nlink(inode);
-
- xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
-
- if (inode->i_nlink)
- return 0;
-
- return xfs_iunlink(tp, ip);
-}
-
-/*
- * Increment the link count on an inode & log the change.
- */
-void
-xfs_bumplink(
- struct xfs_trans *tp,
- struct xfs_inode *ip)
-{
- struct inode *inode = VFS_I(ip);
-
- xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
-
- if (inode->i_nlink == XFS_NLINK_PINNED - 1)
- xfs_info_ratelimited(tp->t_mountp,
- "Inode 0x%llx link count exceeded maximum. Pinning link count.",
- ip->i_ino);
- if (inode->i_nlink != XFS_NLINK_PINNED)
- inc_nlink(inode);
-
- xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
-}
-
-#ifdef CONFIG_XFS_LIVE_HOOKS
-/*
- * Use a static key here to reduce the overhead of directory live update hooks.
- * If the compiler supports jump labels, the static branch will be replaced by
- * a nop sled when there are no hook users. Online fsck is currently the only
- * caller, so this is a reasonable tradeoff.
- *
- * Note: Patching the kernel code requires taking the cpu hotplug lock. Other
- * parts of the kernel allocate memory with that lock held, which means that
- * XFS callers cannot hold any locks that might be used by memory reclaim or
- * writeback when calling the static_branch_{inc,dec} functions.
- */
-DEFINE_STATIC_XFS_HOOK_SWITCH(xfs_dir_hooks_switch);
-
-void
-xfs_dir_hook_disable(void)
-{
- xfs_hooks_switch_off(&xfs_dir_hooks_switch);
-}
-
-void
-xfs_dir_hook_enable(void)
-{
- xfs_hooks_switch_on(&xfs_dir_hooks_switch);
-}
-
-/* Call hooks for a directory update relating to a child dirent update. */
-inline void
-xfs_dir_update_hook(
- struct xfs_inode *dp,
- struct xfs_inode *ip,
- int delta,
- const struct xfs_name *name)
-{
- if (xfs_hooks_switched_on(&xfs_dir_hooks_switch)) {
- struct xfs_dir_update_params p = {
- .dp = dp,
- .ip = ip,
- .delta = delta,
- .name = name,
- };
- struct xfs_mount *mp = ip->i_mount;
-
- xfs_hooks_call(&mp->m_dir_update_hooks, 0, &p);
+xfs_icreate_dqalloc(
+ const struct xfs_icreate_args *args,
+ struct xfs_dquot **udqpp,
+ struct xfs_dquot **gdqpp,
+ struct xfs_dquot **pdqpp)
+{
+ struct inode *dir = VFS_I(args->pip);
+ kuid_t uid = GLOBAL_ROOT_UID;
+ kgid_t gid = GLOBAL_ROOT_GID;
+ prid_t prid = 0;
+ unsigned int flags = XFS_QMOPT_QUOTALL;
+
+ if (args->idmap) {
+ /*
+ * The uid/gid computation code must match what the VFS uses to
+ * assign i_[ug]id. INHERIT adjusts the gid computation for
+ * setgid/grpid systems.
+ */
+ uid = mapped_fsuid(args->idmap, i_user_ns(dir));
+ gid = mapped_fsgid(args->idmap, i_user_ns(dir));
+ prid = xfs_get_initial_prid(args->pip);
+ flags |= XFS_QMOPT_INHERIT;
}
-}
-/* Call the specified function during a directory update. */
-int
-xfs_dir_hook_add(
- struct xfs_mount *mp,
- struct xfs_dir_hook *hook)
-{
- return xfs_hooks_add(&mp->m_dir_update_hooks, &hook->dirent_hook);
-}
+ *udqpp = *gdqpp = *pdqpp = NULL;
-/* Stop calling the specified function during a directory update. */
-void
-xfs_dir_hook_del(
- struct xfs_mount *mp,
- struct xfs_dir_hook *hook)
-{
- xfs_hooks_del(&mp->m_dir_update_hooks, &hook->dirent_hook);
+ return xfs_qm_vop_dqalloc(args->pip, uid, gid, prid, flags, udqpp,
+ gdqpp, pdqpp);
}
-/* Configure directory update hook functions. */
-void
-xfs_dir_hook_setup(
- struct xfs_dir_hook *hook,
- notifier_fn_t mod_fn)
-{
- xfs_hook_setup(&hook->dirent_hook, mod_fn);
-}
-#endif /* CONFIG_XFS_LIVE_HOOKS */
-
int
xfs_create(
- struct mnt_idmap *idmap,
- struct xfs_inode *dp,
+ const struct xfs_icreate_args *args,
struct xfs_name *name,
- umode_t mode,
- dev_t rdev,
- bool init_xattrs,
- xfs_inode_t **ipp)
+ struct xfs_inode **ipp)
{
- int is_dir = S_ISDIR(mode);
+ struct xfs_inode *dp = args->pip;
+ struct xfs_dir_update du = {
+ .dp = dp,
+ .name = name,
+ };
struct xfs_mount *mp = dp->i_mount;
- struct xfs_inode *ip = NULL;
struct xfs_trans *tp = NULL;
- int error;
- bool unlock_dp_on_error = false;
- prid_t prid;
- struct xfs_dquot *udqp = NULL;
- struct xfs_dquot *gdqp = NULL;
- struct xfs_dquot *pdqp = NULL;
+ struct xfs_dquot *udqp;
+ struct xfs_dquot *gdqp;
+ struct xfs_dquot *pdqp;
struct xfs_trans_res *tres;
- uint resblks;
xfs_ino_t ino;
- struct xfs_parent_args *ppargs;
+ bool unlock_dp_on_error = false;
+ bool is_dir = S_ISDIR(args->mode);
+ uint resblks;
+ int error;
trace_xfs_create(dp, name);
@@ -1048,15 +662,8 @@ xfs_create(
if (xfs_ifork_zapped(dp, XFS_DATA_FORK))
return -EIO;
- prid = xfs_get_initial_prid(dp);
-
- /*
- * Make sure that we have allocated dquot(s) on disk.
- */
- error = xfs_qm_vop_dqalloc(dp, mapped_fsuid(idmap, &init_user_ns),
- mapped_fsgid(idmap, &init_user_ns), prid,
- XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
- &udqp, &gdqp, &pdqp);
+ /* Make sure that we have allocated dquot(s) on disk. */
+ error = xfs_icreate_dqalloc(args, &udqp, &gdqp, &pdqp);
if (error)
return error;
@@ -1068,7 +675,7 @@ xfs_create(
tres = &M_RES(mp)->tr_create;
}
- error = xfs_parent_start(mp, &ppargs);
+ error = xfs_parent_start(mp, &du.ppargs);
if (error)
goto out_release_dquots;
@@ -1097,10 +704,9 @@ xfs_create(
* entry pointing to them, but a directory also the "." entry
* pointing to itself.
*/
- error = xfs_dialloc(&tp, dp->i_ino, mode, &ino);
+ error = xfs_dialloc(&tp, dp->i_ino, args->mode, &ino);
if (!error)
- error = xfs_init_new_inode(idmap, tp, dp, ino, mode,
- is_dir ? 2 : 1, rdev, prid, init_xattrs, &ip);
+ error = xfs_icreate(tp, ino, args, &du.ip);
if (error)
goto out_trans_cancel;
@@ -1113,38 +719,9 @@ xfs_create(
*/
xfs_trans_ijoin(tp, dp, 0);
- error = xfs_dir_createname(tp, dp, name, ip->i_ino,
- resblks - XFS_IALLOC_SPACE_RES(mp));
- if (error) {
- ASSERT(error != -ENOSPC);
+ error = xfs_dir_create_child(tp, resblks, &du);
+ if (error)
goto out_trans_cancel;
- }
- xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
- xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
-
- if (is_dir) {
- error = xfs_dir_init(tp, ip, dp);
- if (error)
- goto out_trans_cancel;
-
- xfs_bumplink(tp, dp);
- }
-
- /*
- * If we have parent pointers, we need to add the attribute containing
- * the parent information now.
- */
- if (ppargs) {
- error = xfs_parent_addname(tp, ppargs, dp, name, ip);
- if (error)
- goto out_trans_cancel;
- }
-
- /*
- * Create ip with a reference from dp, and add '.' and '..' references
- * if it's a directory.
- */
- xfs_dir_update_hook(dp, ip, 1, name);
/*
* If this is a synchronous mount, make sure that the
@@ -1159,7 +736,7 @@ xfs_create(
* These ids of the inode couldn't have changed since the new
* inode has been locked ever since it was created.
*/
- xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);
+ xfs_qm_vop_create_dqattach(tp, du.ip, udqp, gdqp, pdqp);
error = xfs_trans_commit(tp);
if (error)
@@ -1169,10 +746,10 @@ xfs_create(
xfs_qm_dqrele(gdqp);
xfs_qm_dqrele(pdqp);
- *ipp = ip;
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ *ipp = du.ip;
+ xfs_iunlock(du.ip, XFS_ILOCK_EXCL);
xfs_iunlock(dp, XFS_ILOCK_EXCL);
- xfs_parent_finish(mp, ppargs);
+ xfs_parent_finish(mp, du.ppargs);
return 0;
out_trans_cancel:
@@ -1183,13 +760,13 @@ xfs_create(
* setup of the inode and release the inode. This prevents recursive
* transactions and deadlocks from xfs_inactive.
*/
- if (ip) {
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
- xfs_finish_inode_setup(ip);
- xfs_irele(ip);
+ if (du.ip) {
+ xfs_iunlock(du.ip, XFS_ILOCK_EXCL);
+ xfs_finish_inode_setup(du.ip);
+ xfs_irele(du.ip);
}
out_parent:
- xfs_parent_finish(mp, ppargs);
+ xfs_parent_finish(mp, du.ppargs);
out_release_dquots:
xfs_qm_dqrele(udqp);
xfs_qm_dqrele(gdqp);
@@ -1202,36 +779,28 @@ xfs_create(
int
xfs_create_tmpfile(
- struct mnt_idmap *idmap,
- struct xfs_inode *dp,
- umode_t mode,
- bool init_xattrs,
+ const struct xfs_icreate_args *args,
struct xfs_inode **ipp)
{
+ struct xfs_inode *dp = args->pip;
struct xfs_mount *mp = dp->i_mount;
struct xfs_inode *ip = NULL;
struct xfs_trans *tp = NULL;
- int error;
- prid_t prid;
- struct xfs_dquot *udqp = NULL;
- struct xfs_dquot *gdqp = NULL;
- struct xfs_dquot *pdqp = NULL;
+ struct xfs_dquot *udqp;
+ struct xfs_dquot *gdqp;
+ struct xfs_dquot *pdqp;
struct xfs_trans_res *tres;
- uint resblks;
xfs_ino_t ino;
+ uint resblks;
+ int error;
+
+ ASSERT(args->flags & XFS_ICREATE_TMPFILE);
if (xfs_is_shutdown(mp))
return -EIO;
- prid = xfs_get_initial_prid(dp);
-
- /*
- * Make sure that we have allocated dquot(s) on disk.
- */
- error = xfs_qm_vop_dqalloc(dp, mapped_fsuid(idmap, &init_user_ns),
- mapped_fsgid(idmap, &init_user_ns), prid,
- XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
- &udqp, &gdqp, &pdqp);
+ /* Make sure that we have allocated dquot(s) on disk. */
+ error = xfs_icreate_dqalloc(args, &udqp, &gdqp, &pdqp);
if (error)
return error;
@@ -1243,10 +812,9 @@ xfs_create_tmpfile(
if (error)
goto out_release_dquots;
- error = xfs_dialloc(&tp, dp->i_ino, mode, &ino);
+ error = xfs_dialloc(&tp, dp->i_ino, args->mode, &ino);
if (!error)
- error = xfs_init_new_inode(idmap, tp, dp, ino, mode,
- 0, 0, prid, init_xattrs, &ip);
+ error = xfs_icreate(tp, ino, args, &ip);
if (error)
goto out_trans_cancel;
@@ -1303,11 +871,15 @@ xfs_link(
struct xfs_inode *sip,
struct xfs_name *target_name)
{
+ struct xfs_dir_update du = {
+ .dp = tdp,
+ .name = target_name,
+ .ip = sip,
+ };
struct xfs_mount *mp = tdp->i_mount;
struct xfs_trans *tp;
int error, nospace_error = 0;
int resblks;
- struct xfs_parent_args *ppargs;
trace_xfs_link(tdp, target_name);
@@ -1326,7 +898,7 @@ xfs_link(
if (error)
goto std_return;
- error = xfs_parent_start(mp, &ppargs);
+ error = xfs_parent_start(mp, &du.ppargs);
if (error)
goto std_return;
@@ -1341,7 +913,7 @@ xfs_link(
* pointers are enabled because we can't back out if the xattrs must
* grow.
*/
- if (ppargs && nospace_error) {
+ if (du.ppargs && nospace_error) {
error = nospace_error;
goto error_return;
}
@@ -1368,47 +940,9 @@ xfs_link(
}
}
- if (!resblks) {
- error = xfs_dir_canenter(tp, tdp, target_name);
- if (error)
- goto error_return;
- }
-
- /*
- * Handle initial link state of O_TMPFILE inode
- */
- if (VFS_I(sip)->i_nlink == 0) {
- struct xfs_perag *pag;
-
- pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, sip->i_ino));
- error = xfs_iunlink_remove(tp, pag, sip);
- xfs_perag_put(pag);
- if (error)
- goto error_return;
- }
-
- error = xfs_dir_createname(tp, tdp, target_name, sip->i_ino,
- resblks);
+ error = xfs_dir_add_child(tp, resblks, &du);
if (error)
goto error_return;
- xfs_trans_ichgtime(tp, tdp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
- xfs_trans_log_inode(tp, tdp, XFS_ILOG_CORE);
-
- xfs_bumplink(tp, sip);
-
- /*
- * If we have parent pointers, we now need to add the parent record to
- * the attribute fork of the inode. If this is the initial parent
- * attribute, we need to create it correctly, otherwise we can just add
- * the parent to the inode.
- */
- if (ppargs) {
- error = xfs_parent_addname(tp, ppargs, tdp, target_name, sip);
- if (error)
- goto error_return;
- }
-
- xfs_dir_update_hook(tdp, sip, 1, target_name);
/*
* If this is a synchronous mount, make sure that the
@@ -1421,7 +955,7 @@ xfs_link(
error = xfs_trans_commit(tp);
xfs_iunlock(tdp, XFS_ILOCK_EXCL);
xfs_iunlock(sip, XFS_ILOCK_EXCL);
- xfs_parent_finish(mp, ppargs);
+ xfs_parent_finish(mp, du.ppargs);
return error;
error_return:
@@ -1429,7 +963,7 @@ xfs_link(
xfs_iunlock(tdp, XFS_ILOCK_EXCL);
xfs_iunlock(sip, XFS_ILOCK_EXCL);
out_parent:
- xfs_parent_finish(mp, ppargs);
+ xfs_parent_finish(mp, du.ppargs);
std_return:
if (error == -ENOSPC && nospace_error)
error = nospace_error;
@@ -1595,7 +1129,7 @@ xfs_release(
if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL))
return 0;
- if (xfs_can_free_eofblocks(ip, false)) {
+ if (xfs_can_free_eofblocks(ip)) {
/*
* Check if the inode is being opened, written and closed
* frequently and we have delayed allocation blocks outstanding
@@ -1856,15 +1390,13 @@ xfs_inode_needs_inactive(
/*
* This file isn't being freed, so check if there are post-eof blocks
- * to free. @force is true because we are evicting an inode from the
- * cache. Post-eof blocks must be freed, lest we end up with broken
- * free space accounting.
+ * to free.
*
* Note: don't bother with iolock here since lockdep complains about
* acquiring it in reclaim context. We have the only reference to the
* inode at this point anyways.
*/
- return xfs_can_free_eofblocks(ip, true);
+ return xfs_can_free_eofblocks(ip);
}
/*
@@ -1947,15 +1479,11 @@ xfs_inactive(
if (VFS_I(ip)->i_nlink != 0) {
/*
- * force is true because we are evicting an inode from the
- * cache. Post-eof blocks must be freed, lest we end up with
- * broken free space accounting.
- *
* Note: don't bother with iolock here since lockdep complains
* about acquiring it in reclaim context. We have the only
* reference to the inode at this point anyways.
*/
- if (xfs_can_free_eofblocks(ip, true))
+ if (xfs_can_free_eofblocks(ip))
error = xfs_free_eofblocks(ip);
goto out;
@@ -2022,39 +1550,6 @@ out:
}
/*
- * In-Core Unlinked List Lookups
- * =============================
- *
- * Every inode is supposed to be reachable from some other piece of metadata
- * with the exception of the root directory. Inodes with a connection to a
- * file descriptor but not linked from anywhere in the on-disk directory tree
- * are collectively known as unlinked inodes, though the filesystem itself
- * maintains links to these inodes so that on-disk metadata are consistent.
- *
- * XFS implements a per-AG on-disk hash table of unlinked inodes. The AGI
- * header contains a number of buckets that point to an inode, and each inode
- * record has a pointer to the next inode in the hash chain. This
- * singly-linked list causes scaling problems in the iunlink remove function
- * because we must walk that list to find the inode that points to the inode
- * being removed from the unlinked hash bucket list.
- *
- * Hence we keep an in-memory double linked list to link each inode on an
- * unlinked list. Because there are 64 unlinked lists per AGI, keeping pointer
- * based lists would require having 64 list heads in the perag, one for each
- * list. This is expensive in terms of memory (think millions of AGs) and cache
- * misses on lookups. Instead, use the fact that inodes on the unlinked list
- * must be referenced at the VFS level to keep them on the list and hence we
- * have an existence guarantee for inodes on the unlinked list.
- *
- * Given we have an existence guarantee, we can use lockless inode cache lookups
- * to resolve aginos to xfs inodes. This means we only need 8 bytes per inode
- * for the double linked unlinked list, and we don't need any extra locking to
- * keep the list safe as all manipulations are done under the AGI buffer lock.
- * Keeping the list up to date does not require memory allocation, just finding
- * the XFS inode and updating the next/prev unlinked list aginos.
- */
-
-/*
* Find an inode on the unlinked list. This does not take references to the
* inode as we have existence guarantees by holding the AGI buffer lock and that
* only unlinked, referenced inodes can be on the unlinked inode list. If we
@@ -2089,75 +1584,11 @@ xfs_iunlink_lookup(
}
/*
- * Update the prev pointer of the next agino. Returns -ENOLINK if the inode
- * is not in cache.
- */
-static int
-xfs_iunlink_update_backref(
- struct xfs_perag *pag,
- xfs_agino_t prev_agino,
- xfs_agino_t next_agino)
-{
- struct xfs_inode *ip;
-
- /* No update necessary if we are at the end of the list. */
- if (next_agino == NULLAGINO)
- return 0;
-
- ip = xfs_iunlink_lookup(pag, next_agino);
- if (!ip)
- return -ENOLINK;
-
- ip->i_prev_unlinked = prev_agino;
- return 0;
-}
-
-/*
- * Point the AGI unlinked bucket at an inode and log the results. The caller
- * is responsible for validating the old value.
- */
-STATIC int
-xfs_iunlink_update_bucket(
- struct xfs_trans *tp,
- struct xfs_perag *pag,
- struct xfs_buf *agibp,
- unsigned int bucket_index,
- xfs_agino_t new_agino)
-{
- struct xfs_agi *agi = agibp->b_addr;
- xfs_agino_t old_value;
- int offset;
-
- ASSERT(xfs_verify_agino_or_null(pag, new_agino));
-
- old_value = be32_to_cpu(agi->agi_unlinked[bucket_index]);
- trace_xfs_iunlink_update_bucket(tp->t_mountp, pag->pag_agno, bucket_index,
- old_value, new_agino);
-
- /*
- * We should never find the head of the list already set to the value
- * passed in because either we're adding or removing ourselves from the
- * head of the list.
- */
- if (old_value == new_agino) {
- xfs_buf_mark_corrupt(agibp);
- xfs_ag_mark_sick(pag, XFS_SICK_AG_AGI);
- return -EFSCORRUPTED;
- }
-
- agi->agi_unlinked[bucket_index] = cpu_to_be32(new_agino);
- offset = offsetof(struct xfs_agi, agi_unlinked) +
- (sizeof(xfs_agino_t) * bucket_index);
- xfs_trans_log_buf(tp, agibp, offset, offset + sizeof(xfs_agino_t) - 1);
- return 0;
-}
-
-/*
* Load the inode @next_agino into the cache and set its prev_unlinked pointer
* to @prev_agino. Caller must hold the AGI to synchronize with other changes
* to the unlinked list.
*/
-STATIC int
+int
xfs_iunlink_reload_next(
struct xfs_trans *tp,
struct xfs_buf *agibp,
@@ -2213,187 +1644,6 @@ rele:
return error;
}
-static int
-xfs_iunlink_insert_inode(
- struct xfs_trans *tp,
- struct xfs_perag *pag,
- struct xfs_buf *agibp,
- struct xfs_inode *ip)
-{
- struct xfs_mount *mp = tp->t_mountp;
- struct xfs_agi *agi = agibp->b_addr;
- xfs_agino_t next_agino;
- xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
- short bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
- int error;
-
- /*
- * Get the index into the agi hash table for the list this inode will
- * go on. Make sure the pointer isn't garbage and that this inode
- * isn't already on the list.
- */
- next_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
- if (next_agino == agino ||
- !xfs_verify_agino_or_null(pag, next_agino)) {
- xfs_buf_mark_corrupt(agibp);
- xfs_ag_mark_sick(pag, XFS_SICK_AG_AGI);
- return -EFSCORRUPTED;
- }
-
- /*
- * Update the prev pointer in the next inode to point back to this
- * inode.
- */
- error = xfs_iunlink_update_backref(pag, agino, next_agino);
- if (error == -ENOLINK)
- error = xfs_iunlink_reload_next(tp, agibp, agino, next_agino);
- if (error)
- return error;
-
- if (next_agino != NULLAGINO) {
- /*
- * There is already another inode in the bucket, so point this
- * inode to the current head of the list.
- */
- error = xfs_iunlink_log_inode(tp, ip, pag, next_agino);
- if (error)
- return error;
- ip->i_next_unlinked = next_agino;
- }
-
- /* Point the head of the list to point to this inode. */
- ip->i_prev_unlinked = NULLAGINO;
- return xfs_iunlink_update_bucket(tp, pag, agibp, bucket_index, agino);
-}
-
-/*
- * This is called when the inode's link count has gone to 0 or we are creating
- * a tmpfile via O_TMPFILE. The inode @ip must have nlink == 0.
- *
- * We place the on-disk inode on a list in the AGI. It will be pulled from this
- * list when the inode is freed.
- */
-int
-xfs_iunlink(
- struct xfs_trans *tp,
- struct xfs_inode *ip)
-{
- struct xfs_mount *mp = tp->t_mountp;
- struct xfs_perag *pag;
- struct xfs_buf *agibp;
- int error;
-
- ASSERT(VFS_I(ip)->i_nlink == 0);
- ASSERT(VFS_I(ip)->i_mode != 0);
- trace_xfs_iunlink(ip);
-
- pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
-
- /* Get the agi buffer first. It ensures lock ordering on the list. */
- error = xfs_read_agi(pag, tp, 0, &agibp);
- if (error)
- goto out;
-
- error = xfs_iunlink_insert_inode(tp, pag, agibp, ip);
-out:
- xfs_perag_put(pag);
- return error;
-}
-
-static int
-xfs_iunlink_remove_inode(
- struct xfs_trans *tp,
- struct xfs_perag *pag,
- struct xfs_buf *agibp,
- struct xfs_inode *ip)
-{
- struct xfs_mount *mp = tp->t_mountp;
- struct xfs_agi *agi = agibp->b_addr;
- xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
- xfs_agino_t head_agino;
- short bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
- int error;
-
- trace_xfs_iunlink_remove(ip);
-
- /*
- * Get the index into the agi hash table for the list this inode will
- * go on. Make sure the head pointer isn't garbage.
- */
- head_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
- if (!xfs_verify_agino(pag, head_agino)) {
- XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
- agi, sizeof(*agi));
- xfs_ag_mark_sick(pag, XFS_SICK_AG_AGI);
- return -EFSCORRUPTED;
- }
-
- /*
- * Set our inode's next_unlinked pointer to NULL and then return
- * the old pointer value so that we can update whatever was previous
- * to us in the list to point to whatever was next in the list.
- */
- error = xfs_iunlink_log_inode(tp, ip, pag, NULLAGINO);
- if (error)
- return error;
-
- /*
- * Update the prev pointer in the next inode to point back to previous
- * inode in the chain.
- */
- error = xfs_iunlink_update_backref(pag, ip->i_prev_unlinked,
- ip->i_next_unlinked);
- if (error == -ENOLINK)
- error = xfs_iunlink_reload_next(tp, agibp, ip->i_prev_unlinked,
- ip->i_next_unlinked);
- if (error)
- return error;
-
- if (head_agino != agino) {
- struct xfs_inode *prev_ip;
-
- prev_ip = xfs_iunlink_lookup(pag, ip->i_prev_unlinked);
- if (!prev_ip) {
- xfs_inode_mark_sick(ip, XFS_SICK_INO_CORE);
- return -EFSCORRUPTED;
- }
-
- error = xfs_iunlink_log_inode(tp, prev_ip, pag,
- ip->i_next_unlinked);
- prev_ip->i_next_unlinked = ip->i_next_unlinked;
- } else {
- /* Point the head of the list to the next unlinked inode. */
- error = xfs_iunlink_update_bucket(tp, pag, agibp, bucket_index,
- ip->i_next_unlinked);
- }
-
- ip->i_next_unlinked = NULLAGINO;
- ip->i_prev_unlinked = 0;
- return error;
-}
-
-/*
- * Pull the on-disk inode from the AGI unlinked list.
- */
-int
-xfs_iunlink_remove(
- struct xfs_trans *tp,
- struct xfs_perag *pag,
- struct xfs_inode *ip)
-{
- struct xfs_buf *agibp;
- int error;
-
- trace_xfs_iunlink_remove(ip);
-
- /* Get the agi buffer first. It ensures lock ordering on the list. */
- error = xfs_read_agi(pag, tp, 0, &agibp);
- if (error)
- return error;
-
- return xfs_iunlink_remove_inode(tp, pag, agibp, ip);
-}
-
/*
* Look up the inode number specified and if it is not already marked XFS_ISTALE
* mark it stale. We should only find clean inodes in this lookup that aren't
@@ -2548,11 +1798,26 @@ xfs_ifree_cluster(
* This buffer may not have been correctly initialised as we
* didn't read it from disk. That's not important because we are
* only using to mark the buffer as stale in the log, and to
- * attach stale cached inodes on it. That means it will never be
- * dispatched for IO. If it is, we want to know about it, and we
- * want it to fail. We can acheive this by adding a write
- * verifier to the buffer.
+ * attach stale cached inodes on it.
+ *
+ * For the inode that triggered the cluster freeing, this
+ * attachment may occur in xfs_inode_item_precommit() after we
+ * have marked this buffer stale. If this buffer was not in
+ * memory before xfs_ifree_cluster() started, it will not be
+ * marked XBF_DONE and this will cause problems later in
+ * xfs_inode_item_precommit() when we trip over a (stale, !done)
+ * buffer to attached to the transaction.
+ *
+ * Hence we have to mark the buffer as XFS_DONE here. This is
+ * safe because we are also marking the buffer as XBF_STALE and
+ * XFS_BLI_STALE. That means it will never be dispatched for
+ * IO and it won't be unlocked until the cluster freeing has
+ * been committed to the journal and the buffer unpinned. If it
+ * is written, we want to know about it, and we want it to
+ * fail. We can acheive this by adding a write verifier to the
+ * buffer.
*/
+ bp->b_flags |= XBF_DONE;
bp->b_ops = &xfs_inode_buf_ops;
/*
@@ -2597,36 +1862,10 @@ xfs_ifree(
pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
- /*
- * Free the inode first so that we guarantee that the AGI lock is going
- * to be taken before we remove the inode from the unlinked list. This
- * makes the AGI lock -> unlinked list modification order the same as
- * used in O_TMPFILE creation.
- */
- error = xfs_difree(tp, pag, ip->i_ino, &xic);
- if (error)
- goto out;
-
- error = xfs_iunlink_remove(tp, pag, ip);
+ error = xfs_inode_uninit(tp, pag, ip, &xic);
if (error)
goto out;
- /*
- * Free any local-format data sitting around before we reset the
- * data fork to extents format. Note that the attr fork data has
- * already been freed by xfs_attr_inactive.
- */
- if (ip->i_df.if_format == XFS_DINODE_FMT_LOCAL) {
- kfree(ip->i_df.if_data);
- ip->i_df.if_data = NULL;
- ip->i_df.if_bytes = 0;
- }
-
- VFS_I(ip)->i_mode = 0; /* mark incore inode as free */
- ip->i_diflags = 0;
- ip->i_diflags2 = mp->m_ino_geo.new_diflags2;
- ip->i_forkoff = 0; /* mark the attr fork not in use */
- ip->i_df.if_format = XFS_DINODE_FMT_EXTENTS;
if (xfs_iflags_test(ip, XFS_IPRESERVE_DM_FIELDS))
xfs_iflags_clear(ip, XFS_IPRESERVE_DM_FIELDS);
@@ -2635,13 +1874,6 @@ xfs_ifree(
iip->ili_fields &= ~(XFS_ILOG_AOWNER | XFS_ILOG_DOWNER);
spin_unlock(&iip->ili_lock);
- /*
- * Bump the generation count so no one will be confused
- * by reincarnations of this inode.
- */
- VFS_I(ip)->i_generation++;
- xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
-
if (xic.deleted)
error = xfs_ifree_cluster(tp, pag, ip, &xic);
out:
@@ -2725,13 +1957,17 @@ xfs_remove(
struct xfs_name *name,
struct xfs_inode *ip)
{
+ struct xfs_dir_update du = {
+ .dp = dp,
+ .name = name,
+ .ip = ip,
+ };
struct xfs_mount *mp = dp->i_mount;
struct xfs_trans *tp = NULL;
int is_dir = S_ISDIR(VFS_I(ip)->i_mode);
int dontcare;
int error = 0;
uint resblks;
- struct xfs_parent_args *ppargs;
trace_xfs_remove(dp, name);
@@ -2748,7 +1984,7 @@ xfs_remove(
if (error)
goto std_return;
- error = xfs_parent_start(mp, &ppargs);
+ error = xfs_parent_start(mp, &du.ppargs);
if (error)
goto std_return;
@@ -2771,76 +2007,10 @@ xfs_remove(
goto out_parent;
}
- /*
- * If we're removing a directory perform some additional validation.
- */
- if (is_dir) {
- ASSERT(VFS_I(ip)->i_nlink >= 2);
- if (VFS_I(ip)->i_nlink != 2) {
- error = -ENOTEMPTY;
- goto out_trans_cancel;
- }
- if (!xfs_dir_isempty(ip)) {
- error = -ENOTEMPTY;
- goto out_trans_cancel;
- }
-
- /* Drop the link from ip's "..". */
- error = xfs_droplink(tp, dp);
- if (error)
- goto out_trans_cancel;
-
- /* Drop the "." link from ip to self. */
- error = xfs_droplink(tp, ip);
- if (error)
- goto out_trans_cancel;
-
- /*
- * Point the unlinked child directory's ".." entry to the root
- * directory to eliminate back-references to inodes that may
- * get freed before the child directory is closed. If the fs
- * gets shrunk, this can lead to dirent inode validation errors.
- */
- if (dp->i_ino != tp->t_mountp->m_sb.sb_rootino) {
- error = xfs_dir_replace(tp, ip, &xfs_name_dotdot,
- tp->t_mountp->m_sb.sb_rootino, 0);
- if (error)
- goto out_trans_cancel;
- }
- } else {
- /*
- * When removing a non-directory we need to log the parent
- * inode here. For a directory this is done implicitly
- * by the xfs_droplink call for the ".." entry.
- */
- xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
- }
- xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
-
- /* Drop the link from dp to ip. */
- error = xfs_droplink(tp, ip);
+ error = xfs_dir_remove_child(tp, resblks, &du);
if (error)
goto out_trans_cancel;
- error = xfs_dir_removename(tp, dp, name, ip->i_ino, resblks);
- if (error) {
- ASSERT(error != -ENOENT);
- goto out_trans_cancel;
- }
-
- /* Remove parent pointer. */
- if (ppargs) {
- error = xfs_parent_removename(tp, ppargs, dp, name, ip);
- if (error)
- goto out_trans_cancel;
- }
-
- /*
- * Drop the link from dp to ip, and if ip was a directory, remove the
- * '.' and '..' references since we freed the directory.
- */
- xfs_dir_update_hook(dp, ip, -1, name);
-
/*
* If this is a synchronous mount, make sure that the
* remove transaction goes to disk before returning to
@@ -2858,7 +2028,7 @@ xfs_remove(
xfs_iunlock(ip, XFS_ILOCK_EXCL);
xfs_iunlock(dp, XFS_ILOCK_EXCL);
- xfs_parent_finish(mp, ppargs);
+ xfs_parent_finish(mp, du.ppargs);
return 0;
out_trans_cancel:
@@ -2867,7 +2037,7 @@ xfs_remove(
xfs_iunlock(ip, XFS_ILOCK_EXCL);
xfs_iunlock(dp, XFS_ILOCK_EXCL);
out_parent:
- xfs_parent_finish(mp, ppargs);
+ xfs_parent_finish(mp, du.ppargs);
std_return:
return error;
}
@@ -2947,160 +2117,6 @@ xfs_sort_inodes(
}
}
-static int
-xfs_finish_rename(
- struct xfs_trans *tp)
-{
- /*
- * If this is a synchronous mount, make sure that the rename transaction
- * goes to disk before returning to the user.
- */
- if (xfs_has_wsync(tp->t_mountp) || xfs_has_dirsync(tp->t_mountp))
- xfs_trans_set_sync(tp);
-
- return xfs_trans_commit(tp);
-}
-
-/*
- * xfs_cross_rename()
- *
- * responsible for handling RENAME_EXCHANGE flag in renameat2() syscall
- */
-STATIC int
-xfs_cross_rename(
- struct xfs_trans *tp,
- struct xfs_inode *dp1,
- struct xfs_name *name1,
- struct xfs_inode *ip1,
- struct xfs_parent_args *ip1_ppargs,
- struct xfs_inode *dp2,
- struct xfs_name *name2,
- struct xfs_inode *ip2,
- struct xfs_parent_args *ip2_ppargs,
- int spaceres)
-{
- int error = 0;
- int ip1_flags = 0;
- int ip2_flags = 0;
- int dp2_flags = 0;
-
- /* Swap inode number for dirent in first parent */
- error = xfs_dir_replace(tp, dp1, name1, ip2->i_ino, spaceres);
- if (error)
- goto out_trans_abort;
-
- /* Swap inode number for dirent in second parent */
- error = xfs_dir_replace(tp, dp2, name2, ip1->i_ino, spaceres);
- if (error)
- goto out_trans_abort;
-
- /*
- * If we're renaming one or more directories across different parents,
- * update the respective ".." entries (and link counts) to match the new
- * parents.
- */
- if (dp1 != dp2) {
- dp2_flags = XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
-
- if (S_ISDIR(VFS_I(ip2)->i_mode)) {
- error = xfs_dir_replace(tp, ip2, &xfs_name_dotdot,
- dp1->i_ino, spaceres);
- if (error)
- goto out_trans_abort;
-
- /* transfer ip2 ".." reference to dp1 */
- if (!S_ISDIR(VFS_I(ip1)->i_mode)) {
- error = xfs_droplink(tp, dp2);
- if (error)
- goto out_trans_abort;
- xfs_bumplink(tp, dp1);
- }
-
- /*
- * Although ip1 isn't changed here, userspace needs
- * to be warned about the change, so that applications
- * relying on it (like backup ones), will properly
- * notify the change
- */
- ip1_flags |= XFS_ICHGTIME_CHG;
- ip2_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
- }
-
- if (S_ISDIR(VFS_I(ip1)->i_mode)) {
- error = xfs_dir_replace(tp, ip1, &xfs_name_dotdot,
- dp2->i_ino, spaceres);
- if (error)
- goto out_trans_abort;
-
- /* transfer ip1 ".." reference to dp2 */
- if (!S_ISDIR(VFS_I(ip2)->i_mode)) {
- error = xfs_droplink(tp, dp1);
- if (error)
- goto out_trans_abort;
- xfs_bumplink(tp, dp2);
- }
-
- /*
- * Although ip2 isn't changed here, userspace needs
- * to be warned about the change, so that applications
- * relying on it (like backup ones), will properly
- * notify the change
- */
- ip1_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
- ip2_flags |= XFS_ICHGTIME_CHG;
- }
- }
-
- /* Schedule parent pointer replacements */
- if (ip1_ppargs) {
- error = xfs_parent_replacename(tp, ip1_ppargs, dp1, name1, dp2,
- name2, ip1);
- if (error)
- goto out_trans_abort;
- }
-
- if (ip2_ppargs) {
- error = xfs_parent_replacename(tp, ip2_ppargs, dp2, name2, dp1,
- name1, ip2);
- if (error)
- goto out_trans_abort;
- }
-
- if (ip1_flags) {
- xfs_trans_ichgtime(tp, ip1, ip1_flags);
- xfs_trans_log_inode(tp, ip1, XFS_ILOG_CORE);
- }
- if (ip2_flags) {
- xfs_trans_ichgtime(tp, ip2, ip2_flags);
- xfs_trans_log_inode(tp, ip2, XFS_ILOG_CORE);
- }
- if (dp2_flags) {
- xfs_trans_ichgtime(tp, dp2, dp2_flags);
- xfs_trans_log_inode(tp, dp2, XFS_ILOG_CORE);
- }
- xfs_trans_ichgtime(tp, dp1, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
- xfs_trans_log_inode(tp, dp1, XFS_ILOG_CORE);
-
- /*
- * Inform our hook clients that we've finished an exchange operation as
- * follows: removed the source and target files from their directories;
- * added the target to the source directory; and added the source to
- * the target directory. All inodes are locked, so it's ok to model a
- * rename this way so long as we say we deleted entries before we add
- * new ones.
- */
- xfs_dir_update_hook(dp1, ip1, -1, name1);
- xfs_dir_update_hook(dp2, ip2, -1, name2);
- xfs_dir_update_hook(dp1, ip2, 1, name1);
- xfs_dir_update_hook(dp2, ip1, 1, name2);
-
- return xfs_finish_rename(tp);
-
-out_trans_abort:
- xfs_trans_cancel(tp);
- return error;
-}
-
/*
* xfs_rename_alloc_whiteout()
*
@@ -3116,12 +2132,17 @@ xfs_rename_alloc_whiteout(
struct xfs_inode *dp,
struct xfs_inode **wip)
{
+ struct xfs_icreate_args args = {
+ .idmap = idmap,
+ .pip = dp,
+ .mode = S_IFCHR | WHITEOUT_MODE,
+ .flags = XFS_ICREATE_TMPFILE,
+ };
struct xfs_inode *tmpfile;
struct qstr name;
int error;
- error = xfs_create_tmpfile(idmap, dp, S_IFCHR | WHITEOUT_MODE,
- xfs_has_parent(dp->i_mount), &tmpfile);
+ error = xfs_create_tmpfile(&args, &tmpfile);
if (error)
return error;
@@ -3161,13 +2182,20 @@ xfs_rename(
struct xfs_inode *target_ip,
unsigned int flags)
{
+ struct xfs_dir_update du_src = {
+ .dp = src_dp,
+ .name = src_name,
+ .ip = src_ip,
+ };
+ struct xfs_dir_update du_tgt = {
+ .dp = target_dp,
+ .name = target_name,
+ .ip = target_ip,
+ };
+ struct xfs_dir_update du_wip = { };
struct xfs_mount *mp = src_dp->i_mount;
struct xfs_trans *tp;
- struct xfs_inode *wip = NULL; /* whiteout inode */
struct xfs_inode *inodes[__XFS_SORT_INODES];
- struct xfs_parent_args *src_ppargs = NULL;
- struct xfs_parent_args *tgt_ppargs = NULL;
- struct xfs_parent_args *wip_ppargs = NULL;
int i;
int num_inodes = __XFS_SORT_INODES;
bool new_parent = (src_dp != target_dp);
@@ -3187,8 +2215,8 @@ xfs_rename(
* appropriately.
*/
if (flags & RENAME_WHITEOUT) {
- error = xfs_rename_alloc_whiteout(idmap, src_name,
- target_dp, &wip);
+ error = xfs_rename_alloc_whiteout(idmap, src_name, target_dp,
+ &du_wip.ip);
if (error)
return error;
@@ -3196,21 +2224,21 @@ xfs_rename(
src_name->type = XFS_DIR3_FT_CHRDEV;
}
- xfs_sort_for_rename(src_dp, target_dp, src_ip, target_ip, wip,
- inodes, &num_inodes);
+ xfs_sort_for_rename(src_dp, target_dp, src_ip, target_ip, du_wip.ip,
+ inodes, &num_inodes);
- error = xfs_parent_start(mp, &src_ppargs);
+ error = xfs_parent_start(mp, &du_src.ppargs);
if (error)
goto out_release_wip;
- if (wip) {
- error = xfs_parent_start(mp, &wip_ppargs);
+ if (du_wip.ip) {
+ error = xfs_parent_start(mp, &du_wip.ppargs);
if (error)
goto out_src_ppargs;
}
if (target_ip) {
- error = xfs_parent_start(mp, &tgt_ppargs);
+ error = xfs_parent_start(mp, &du_tgt.ppargs);
if (error)
goto out_wip_ppargs;
}
@@ -3218,7 +2246,7 @@ xfs_rename(
retry:
nospace_error = 0;
spaceres = xfs_rename_space_res(mp, src_name->len, target_ip != NULL,
- target_name->len, wip != NULL);
+ target_name->len, du_wip.ip != NULL);
error = xfs_trans_alloc(mp, &M_RES(mp)->tr_rename, spaceres, 0, 0, &tp);
if (error == -ENOSPC) {
nospace_error = error;
@@ -3233,7 +2261,7 @@ retry:
* We don't allow reservationless renaming when parent pointers are
* enabled because we can't back out if the xattrs must grow.
*/
- if (src_ppargs && nospace_error) {
+ if (du_src.ppargs && nospace_error) {
error = nospace_error;
xfs_trans_cancel(tp);
goto out_tgt_ppargs;
@@ -3265,8 +2293,8 @@ retry:
xfs_trans_ijoin(tp, src_ip, 0);
if (target_ip)
xfs_trans_ijoin(tp, target_ip, 0);
- if (wip)
- xfs_trans_ijoin(tp, wip, 0);
+ if (du_wip.ip)
+ xfs_trans_ijoin(tp, du_wip.ip, 0);
/*
* If we are using project inheritance, we only allow renames
@@ -3281,11 +2309,11 @@ retry:
/* RENAME_EXCHANGE is unique from here on. */
if (flags & RENAME_EXCHANGE) {
- error = xfs_cross_rename(tp, src_dp, src_name, src_ip,
- src_ppargs, target_dp, target_name, target_ip,
- tgt_ppargs, spaceres);
- nospace_error = 0;
- goto out_unlock;
+ error = xfs_dir_exchange_children(tp, &du_src, &du_tgt,
+ spaceres);
+ if (error)
+ goto out_trans_cancel;
+ goto out_commit;
}
/*
@@ -3318,39 +2346,12 @@ retry:
* We don't allow quotaless renaming when parent pointers are enabled
* because we can't back out if the xattrs must grow.
*/
- if (src_ppargs && nospace_error) {
+ if (du_src.ppargs && nospace_error) {
error = nospace_error;
goto out_trans_cancel;
}
/*
- * Check for expected errors before we dirty the transaction
- * so we can return an error without a transaction abort.
- */
- if (target_ip == NULL) {
- /*
- * If there's no space reservation, check the entry will
- * fit before actually inserting it.
- */
- if (!spaceres) {
- error = xfs_dir_canenter(tp, target_dp, target_name);
- if (error)
- goto out_trans_cancel;
- }
- } else {
- /*
- * If target exists and it's a directory, check that whether
- * it can be destroyed.
- */
- if (S_ISDIR(VFS_I(target_ip)->i_mode) &&
- (!xfs_dir_isempty(target_ip) ||
- (VFS_I(target_ip)->i_nlink > 2))) {
- error = -EEXIST;
- goto out_trans_cancel;
- }
- }
-
- /*
* Lock the AGI buffers we need to handle bumping the nlink of the
* whiteout inode off the unlinked list and to handle dropping the
* nlink of the target inode. Per locking order rules, do this in
@@ -3361,7 +2362,7 @@ retry:
* target_ip is either null or an empty directory.
*/
for (i = 0; i < num_inodes && inodes[i] != NULL; i++) {
- if (inodes[i] == wip ||
+ if (inodes[i] == du_wip.ip ||
(inodes[i] == target_ip &&
(VFS_I(target_ip)->i_nlink == 1 || src_is_directory))) {
struct xfs_perag *pag;
@@ -3376,188 +2377,29 @@ retry:
}
}
- /*
- * Directory entry creation below may acquire the AGF. Remove
- * the whiteout from the unlinked list first to preserve correct
- * AGI/AGF locking order. This dirties the transaction so failures
- * after this point will abort and log recovery will clean up the
- * mess.
- *
- * For whiteouts, we need to bump the link count on the whiteout
- * inode. After this point, we have a real link, clear the tmpfile
- * state flag from the inode so it doesn't accidentally get misused
- * in future.
- */
- if (wip) {
- struct xfs_perag *pag;
-
- ASSERT(VFS_I(wip)->i_nlink == 0);
-
- pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, wip->i_ino));
- error = xfs_iunlink_remove(tp, pag, wip);
- xfs_perag_put(pag);
- if (error)
- goto out_trans_cancel;
-
- xfs_bumplink(tp, wip);
- VFS_I(wip)->i_state &= ~I_LINKABLE;
- }
-
- /*
- * Set up the target.
- */
- if (target_ip == NULL) {
- /*
- * If target does not exist and the rename crosses
- * directories, adjust the target directory link count
- * to account for the ".." reference from the new entry.
- */
- error = xfs_dir_createname(tp, target_dp, target_name,
- src_ip->i_ino, spaceres);
- if (error)
- goto out_trans_cancel;
-
- xfs_trans_ichgtime(tp, target_dp,
- XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
-
- if (new_parent && src_is_directory) {
- xfs_bumplink(tp, target_dp);
- }
- } else { /* target_ip != NULL */
- /*
- * Link the source inode under the target name.
- * If the source inode is a directory and we are moving
- * it across directories, its ".." entry will be
- * inconsistent until we replace that down below.
- *
- * In case there is already an entry with the same
- * name at the destination directory, remove it first.
- */
- error = xfs_dir_replace(tp, target_dp, target_name,
- src_ip->i_ino, spaceres);
- if (error)
- goto out_trans_cancel;
-
- xfs_trans_ichgtime(tp, target_dp,
- XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
-
- /*
- * Decrement the link count on the target since the target
- * dir no longer points to it.
- */
- error = xfs_droplink(tp, target_ip);
- if (error)
- goto out_trans_cancel;
-
- if (src_is_directory) {
- /*
- * Drop the link from the old "." entry.
- */
- error = xfs_droplink(tp, target_ip);
- if (error)
- goto out_trans_cancel;
- }
- } /* target_ip != NULL */
-
- /*
- * Remove the source.
- */
- if (new_parent && src_is_directory) {
- /*
- * Rewrite the ".." entry to point to the new
- * directory.
- */
- error = xfs_dir_replace(tp, src_ip, &xfs_name_dotdot,
- target_dp->i_ino, spaceres);
- ASSERT(error != -EEXIST);
- if (error)
- goto out_trans_cancel;
- }
-
- /*
- * We always want to hit the ctime on the source inode.
- *
- * This isn't strictly required by the standards since the source
- * inode isn't really being changed, but old unix file systems did
- * it and some incremental backup programs won't work without it.
- */
- xfs_trans_ichgtime(tp, src_ip, XFS_ICHGTIME_CHG);
- xfs_trans_log_inode(tp, src_ip, XFS_ILOG_CORE);
-
- /*
- * Adjust the link count on src_dp. This is necessary when
- * renaming a directory, either within one parent when
- * the target existed, or across two parent directories.
- */
- if (src_is_directory && (new_parent || target_ip != NULL)) {
-
- /*
- * Decrement link count on src_directory since the
- * entry that's moved no longer points to it.
- */
- error = xfs_droplink(tp, src_dp);
- if (error)
- goto out_trans_cancel;
- }
-
- /*
- * For whiteouts, we only need to update the source dirent with the
- * inode number of the whiteout inode rather than removing it
- * altogether.
- */
- if (wip)
- error = xfs_dir_replace(tp, src_dp, src_name, wip->i_ino,
- spaceres);
- else
- error = xfs_dir_removename(tp, src_dp, src_name, src_ip->i_ino,
- spaceres);
-
+ error = xfs_dir_rename_children(tp, &du_src, &du_tgt, spaceres,
+ &du_wip);
if (error)
goto out_trans_cancel;
- /* Schedule parent pointer updates. */
- if (wip_ppargs) {
- error = xfs_parent_addname(tp, wip_ppargs, src_dp, src_name,
- wip);
- if (error)
- goto out_trans_cancel;
- }
-
- if (src_ppargs) {
- error = xfs_parent_replacename(tp, src_ppargs, src_dp,
- src_name, target_dp, target_name, src_ip);
- if (error)
- goto out_trans_cancel;
- }
-
- if (tgt_ppargs) {
- error = xfs_parent_removename(tp, tgt_ppargs, target_dp,
- target_name, target_ip);
- if (error)
- goto out_trans_cancel;
+ if (du_wip.ip) {
+ /*
+ * Now we have a real link, clear the "I'm a tmpfile" state
+ * flag from the inode so it doesn't accidentally get misused in
+ * future.
+ */
+ VFS_I(du_wip.ip)->i_state &= ~I_LINKABLE;
}
- xfs_trans_ichgtime(tp, src_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
- xfs_trans_log_inode(tp, src_dp, XFS_ILOG_CORE);
- if (new_parent)
- xfs_trans_log_inode(tp, target_dp, XFS_ILOG_CORE);
-
+out_commit:
/*
- * Inform our hook clients that we've finished a rename operation as
- * follows: removed the source and target files from their directories;
- * that we've added the source to the target directory; and finally
- * that we've added the whiteout, if there was one. All inodes are
- * locked, so it's ok to model a rename this way so long as we say we
- * deleted entries before we add new ones.
+ * If this is a synchronous mount, make sure that the rename
+ * transaction goes to disk before returning to the user.
*/
- if (target_ip)
- xfs_dir_update_hook(target_dp, target_ip, -1, target_name);
- xfs_dir_update_hook(src_dp, src_ip, -1, src_name);
- xfs_dir_update_hook(target_dp, src_ip, 1, target_name);
- if (wip)
- xfs_dir_update_hook(src_dp, wip, 1, src_name);
+ if (xfs_has_wsync(tp->t_mountp) || xfs_has_dirsync(tp->t_mountp))
+ xfs_trans_set_sync(tp);
- error = xfs_finish_rename(tp);
+ error = xfs_trans_commit(tp);
nospace_error = 0;
goto out_unlock;
@@ -3566,14 +2408,14 @@ out_trans_cancel:
out_unlock:
xfs_iunlock_rename(inodes, num_inodes);
out_tgt_ppargs:
- xfs_parent_finish(mp, tgt_ppargs);
+ xfs_parent_finish(mp, du_tgt.ppargs);
out_wip_ppargs:
- xfs_parent_finish(mp, wip_ppargs);
+ xfs_parent_finish(mp, du_wip.ppargs);
out_src_ppargs:
- xfs_parent_finish(mp, src_ppargs);
+ xfs_parent_finish(mp, du_src.ppargs);
out_release_wip:
- if (wip)
- xfs_irele(wip);
+ if (du_wip.ip)
+ xfs_irele(du_wip.ip);
if (error == -ENOSPC && nospace_error)
error = nospace_error;
return error;
@@ -3713,6 +2555,7 @@ flush_out:
iip->ili_last_fields = iip->ili_fields;
iip->ili_fields = 0;
iip->ili_fsync_fields = 0;
+ set_bit(XFS_LI_FLUSHING, &iip->ili_item.li_flags);
spin_unlock(&iip->ili_lock);
/*
@@ -4276,3 +3119,11 @@ xfs_inode_alloc_unitsize(
return XFS_FSB_TO_B(ip->i_mount, blocks);
}
+
+/* Should we always be using copy on write for file writes? */
+bool
+xfs_is_always_cow_inode(
+ struct xfs_inode *ip)
+{
+ return ip->i_mount->m_always_cow && xfs_has_reflink(ip->i_mount);
+}
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index 292b90b5f2ac..51defdebef30 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -8,6 +8,7 @@
#include "xfs_inode_buf.h"
#include "xfs_inode_fork.h"
+#include "xfs_inode_util.h"
/*
* Kernel only inode definitions
@@ -270,15 +271,6 @@ xfs_iflags_test_and_set(xfs_inode_t *ip, unsigned long flags)
return ret;
}
-static inline prid_t
-xfs_get_initial_prid(struct xfs_inode *dp)
-{
- if (dp->i_diflags & XFS_DIFLAG_PROJINHERIT)
- return dp->i_projid;
-
- return XFS_PROJID_DEFAULT;
-}
-
static inline bool xfs_is_reflink_inode(struct xfs_inode *ip)
{
return ip->i_diflags2 & XFS_DIFLAG2_REFLINK;
@@ -292,6 +284,13 @@ static inline bool xfs_is_metadata_inode(struct xfs_inode *ip)
xfs_is_quota_inode(&mp->m_sb, ip->i_ino);
}
+bool xfs_is_always_cow_inode(struct xfs_inode *ip);
+
+static inline bool xfs_is_cow_inode(struct xfs_inode *ip)
+{
+ return xfs_is_reflink_inode(ip) || xfs_is_always_cow_inode(ip);
+}
+
/*
* Check if an inode has any data in the COW fork. This might be often false
* even for inodes with the reflink flag when there is no pending COW operation.
@@ -517,12 +516,9 @@ int xfs_release(struct xfs_inode *ip);
int xfs_inactive(struct xfs_inode *ip);
int xfs_lookup(struct xfs_inode *dp, const struct xfs_name *name,
struct xfs_inode **ipp, struct xfs_name *ci_name);
-int xfs_create(struct mnt_idmap *idmap,
- struct xfs_inode *dp, struct xfs_name *name,
- umode_t mode, dev_t rdev, bool need_xattr,
- struct xfs_inode **ipp);
-int xfs_create_tmpfile(struct mnt_idmap *idmap,
- struct xfs_inode *dp, umode_t mode, bool init_xattrs,
+int xfs_create(const struct xfs_icreate_args *iargs,
+ struct xfs_name *name, struct xfs_inode **ipp);
+int xfs_create_tmpfile(const struct xfs_icreate_args *iargs,
struct xfs_inode **ipp);
int xfs_remove(struct xfs_inode *dp, struct xfs_name *name,
struct xfs_inode *ip);
@@ -542,7 +538,6 @@ void xfs_assert_ilocked(struct xfs_inode *, uint);
uint xfs_ilock_data_map_shared(struct xfs_inode *);
uint xfs_ilock_attr_map_shared(struct xfs_inode *);
-uint xfs_ip2xflags(struct xfs_inode *);
int xfs_ifree(struct xfs_trans *, struct xfs_inode *);
int xfs_itruncate_extents_flags(struct xfs_trans **,
struct xfs_inode *, int, xfs_fsize_t, int);
@@ -556,13 +551,8 @@ int xfs_iflush_cluster(struct xfs_buf *);
void xfs_lock_two_inodes(struct xfs_inode *ip0, uint ip0_mode,
struct xfs_inode *ip1, uint ip1_mode);
-xfs_extlen_t xfs_get_extsz_hint(struct xfs_inode *ip);
-xfs_extlen_t xfs_get_cowextsz_hint(struct xfs_inode *ip);
-
-int xfs_init_new_inode(struct mnt_idmap *idmap, struct xfs_trans *tp,
- struct xfs_inode *pip, xfs_ino_t ino, umode_t mode,
- xfs_nlink_t nlink, dev_t rdev, prid_t prid, bool init_xattrs,
- struct xfs_inode **ipp);
+int xfs_icreate(struct xfs_trans *tp, xfs_ino_t ino,
+ const struct xfs_icreate_args *args, struct xfs_inode **ipp);
static inline int
xfs_itruncate_extents(
@@ -616,18 +606,15 @@ extern struct kmem_cache *xfs_inode_cache;
bool xfs_inode_needs_inactive(struct xfs_inode *ip);
-int xfs_iunlink(struct xfs_trans *tp, struct xfs_inode *ip);
-int xfs_iunlink_remove(struct xfs_trans *tp, struct xfs_perag *pag,
- struct xfs_inode *ip);
struct xfs_inode *xfs_iunlink_lookup(struct xfs_perag *pag, xfs_agino_t agino);
+int xfs_iunlink_reload_next(struct xfs_trans *tp, struct xfs_buf *agibp,
+ xfs_agino_t prev_agino, xfs_agino_t next_agino);
void xfs_end_io(struct work_struct *work);
int xfs_ilock2_io_mmap(struct xfs_inode *ip1, struct xfs_inode *ip2);
void xfs_iunlock2_io_mmap(struct xfs_inode *ip1, struct xfs_inode *ip2);
void xfs_iunlock2_remapping(struct xfs_inode *ip1, struct xfs_inode *ip2);
-int xfs_droplink(struct xfs_trans *tp, struct xfs_inode *ip);
-void xfs_bumplink(struct xfs_trans *tp, struct xfs_inode *ip);
void xfs_lock_inodes(struct xfs_inode **ips, int inodes, uint lock_mode);
void xfs_sort_inodes(struct xfs_inode **i_tab, unsigned int num_inodes);
@@ -645,29 +632,8 @@ void xfs_inode_count_blocks(struct xfs_trans *tp, struct xfs_inode *ip,
xfs_filblks_t *dblocks, xfs_filblks_t *rblocks);
unsigned int xfs_inode_alloc_unitsize(struct xfs_inode *ip);
-struct xfs_dir_update_params {
- const struct xfs_inode *dp;
- const struct xfs_inode *ip;
- const struct xfs_name *name;
- int delta;
-};
-
-#ifdef CONFIG_XFS_LIVE_HOOKS
-void xfs_dir_update_hook(struct xfs_inode *dp, struct xfs_inode *ip,
- int delta, const struct xfs_name *name);
-
-struct xfs_dir_hook {
- struct xfs_hook dirent_hook;
-};
-
-void xfs_dir_hook_disable(void);
-void xfs_dir_hook_enable(void);
-
-int xfs_dir_hook_add(struct xfs_mount *mp, struct xfs_dir_hook *hook);
-void xfs_dir_hook_del(struct xfs_mount *mp, struct xfs_dir_hook *hook);
-void xfs_dir_hook_setup(struct xfs_dir_hook *hook, notifier_fn_t mod_fn);
-#else
-# define xfs_dir_update_hook(dp, ip, delta, name) ((void)0)
-#endif /* CONFIG_XFS_LIVE_HOOKS */
+int xfs_icreate_dqalloc(const struct xfs_icreate_args *args,
+ struct xfs_dquot **udqpp, struct xfs_dquot **gdqpp,
+ struct xfs_dquot **pdqpp);
#endif /* __XFS_INODE_H__ */
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index f28d653300d1..b509cbd191f4 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -37,6 +37,36 @@ xfs_inode_item_sort(
return INODE_ITEM(lip)->ili_inode->i_ino;
}
+#ifdef DEBUG_EXPENSIVE
+static void
+xfs_inode_item_precommit_check(
+ struct xfs_inode *ip)
+{
+ struct xfs_mount *mp = ip->i_mount;
+ struct xfs_dinode *dip;
+ xfs_failaddr_t fa;
+
+ dip = kzalloc(mp->m_sb.sb_inodesize, GFP_KERNEL | GFP_NOFS);
+ if (!dip) {
+ ASSERT(dip != NULL);
+ return;
+ }
+
+ xfs_inode_to_disk(ip, dip, 0);
+ xfs_dinode_calc_crc(mp, dip);
+ fa = xfs_dinode_verify(mp, ip->i_ino, dip);
+ if (fa) {
+ xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__, dip,
+ sizeof(*dip), fa);
+ xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
+ ASSERT(fa == NULL);
+ }
+ kfree(dip);
+}
+#else
+# define xfs_inode_item_precommit_check(ip) ((void)0)
+#endif
+
/*
* Prior to finally logging the inode, we have to ensure that all the
* per-modification inode state changes are applied. This includes VFS inode
@@ -169,6 +199,8 @@ xfs_inode_item_precommit(
iip->ili_fields |= (flags | iip->ili_last_fields);
spin_unlock(&iip->ili_lock);
+ xfs_inode_item_precommit_check(ip);
+
/*
* We are done with the log item transaction dirty state, so clear it so
* that it doesn't pollute future transactions.
@@ -933,6 +965,7 @@ xfs_iflush_finish(
}
iip->ili_last_fields = 0;
iip->ili_flush_lsn = 0;
+ clear_bit(XFS_LI_FLUSHING, &lip->li_flags);
spin_unlock(&iip->ili_lock);
xfs_iflags_clear(iip->ili_inode, XFS_IFLUSHING);
if (drop_buffer)
@@ -991,8 +1024,10 @@ xfs_buf_inode_io_fail(
{
struct xfs_log_item *lip;
- list_for_each_entry(lip, &bp->b_li_list, li_bio_list)
+ list_for_each_entry(lip, &bp->b_li_list, li_bio_list) {
set_bit(XFS_LI_FAILED, &lip->li_flags);
+ clear_bit(XFS_LI_FLUSHING, &lip->li_flags);
+ }
}
/*
@@ -1011,6 +1046,7 @@ xfs_iflush_abort_clean(
iip->ili_flush_lsn = 0;
iip->ili_item.li_buf = NULL;
list_del_init(&iip->ili_item.li_bio_list);
+ clear_bit(XFS_LI_FLUSHING, &iip->ili_item.li_flags);
}
/*
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
index f0117188f302..4e933db75b12 100644
--- a/fs/xfs/xfs_ioctl.c
+++ b/fs/xfs/xfs_ioctl.c
@@ -469,66 +469,6 @@ xfs_fileattr_get(
return 0;
}
-STATIC uint16_t
-xfs_flags2diflags(
- struct xfs_inode *ip,
- unsigned int xflags)
-{
- /* can't set PREALLOC this way, just preserve it */
- uint16_t di_flags =
- (ip->i_diflags & XFS_DIFLAG_PREALLOC);
-
- if (xflags & FS_XFLAG_IMMUTABLE)
- di_flags |= XFS_DIFLAG_IMMUTABLE;
- if (xflags & FS_XFLAG_APPEND)
- di_flags |= XFS_DIFLAG_APPEND;
- if (xflags & FS_XFLAG_SYNC)
- di_flags |= XFS_DIFLAG_SYNC;
- if (xflags & FS_XFLAG_NOATIME)
- di_flags |= XFS_DIFLAG_NOATIME;
- if (xflags & FS_XFLAG_NODUMP)
- di_flags |= XFS_DIFLAG_NODUMP;
- if (xflags & FS_XFLAG_NODEFRAG)
- di_flags |= XFS_DIFLAG_NODEFRAG;
- if (xflags & FS_XFLAG_FILESTREAM)
- di_flags |= XFS_DIFLAG_FILESTREAM;
- if (S_ISDIR(VFS_I(ip)->i_mode)) {
- if (xflags & FS_XFLAG_RTINHERIT)
- di_flags |= XFS_DIFLAG_RTINHERIT;
- if (xflags & FS_XFLAG_NOSYMLINKS)
- di_flags |= XFS_DIFLAG_NOSYMLINKS;
- if (xflags & FS_XFLAG_EXTSZINHERIT)
- di_flags |= XFS_DIFLAG_EXTSZINHERIT;
- if (xflags & FS_XFLAG_PROJINHERIT)
- di_flags |= XFS_DIFLAG_PROJINHERIT;
- } else if (S_ISREG(VFS_I(ip)->i_mode)) {
- if (xflags & FS_XFLAG_REALTIME)
- di_flags |= XFS_DIFLAG_REALTIME;
- if (xflags & FS_XFLAG_EXTSIZE)
- di_flags |= XFS_DIFLAG_EXTSIZE;
- }
-
- return di_flags;
-}
-
-STATIC uint64_t
-xfs_flags2diflags2(
- struct xfs_inode *ip,
- unsigned int xflags)
-{
- uint64_t di_flags2 =
- (ip->i_diflags2 & (XFS_DIFLAG2_REFLINK |
- XFS_DIFLAG2_BIGTIME |
- XFS_DIFLAG2_NREXT64));
-
- if (xflags & FS_XFLAG_DAX)
- di_flags2 |= XFS_DIFLAG2_DAX;
- if (xflags & FS_XFLAG_COWEXTSIZE)
- di_flags2 |= XFS_DIFLAG2_COWEXTSIZE;
-
- return di_flags2;
-}
-
static int
xfs_ioctl_setattr_xflags(
struct xfs_trans *tp,
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index 378342673925..72c981e3dc92 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -717,53 +717,30 @@ imap_needs_cow(
return true;
}
+/*
+ * Extents not yet cached requires exclusive access, don't block for
+ * IOMAP_NOWAIT.
+ *
+ * This is basically an opencoded xfs_ilock_data_map_shared() call, but with
+ * support for IOMAP_NOWAIT.
+ */
static int
xfs_ilock_for_iomap(
struct xfs_inode *ip,
unsigned flags,
unsigned *lockmode)
{
- unsigned int mode = *lockmode;
- bool is_write = flags & (IOMAP_WRITE | IOMAP_ZERO);
-
- /*
- * COW writes may allocate delalloc space or convert unwritten COW
- * extents, so we need to make sure to take the lock exclusively here.
- */
- if (xfs_is_cow_inode(ip) && is_write)
- mode = XFS_ILOCK_EXCL;
-
- /*
- * Extents not yet cached requires exclusive access, don't block. This
- * is an opencoded xfs_ilock_data_map_shared() call but with
- * non-blocking behaviour.
- */
- if (xfs_need_iread_extents(&ip->i_df)) {
- if (flags & IOMAP_NOWAIT)
- return -EAGAIN;
- mode = XFS_ILOCK_EXCL;
- }
-
-relock:
if (flags & IOMAP_NOWAIT) {
- if (!xfs_ilock_nowait(ip, mode))
+ if (xfs_need_iread_extents(&ip->i_df))
+ return -EAGAIN;
+ if (!xfs_ilock_nowait(ip, *lockmode))
return -EAGAIN;
} else {
- xfs_ilock(ip, mode);
- }
-
- /*
- * The reflink iflag could have changed since the earlier unlocked
- * check, so if we got ILOCK_SHARED for a write and but we're now a
- * reflink inode we have to switch to ILOCK_EXCL and relock.
- */
- if (mode == XFS_ILOCK_SHARED && is_write && xfs_is_cow_inode(ip)) {
- xfs_iunlock(ip, mode);
- mode = XFS_ILOCK_EXCL;
- goto relock;
+ if (xfs_need_iread_extents(&ip->i_df))
+ *lockmode = XFS_ILOCK_EXCL;
+ xfs_ilock(ip, *lockmode);
}
- *lockmode = mode;
return 0;
}
@@ -801,7 +778,7 @@ xfs_direct_write_iomap_begin(
int nimaps = 1, error = 0;
bool shared = false;
u16 iomap_flags = 0;
- unsigned int lockmode = XFS_ILOCK_SHARED;
+ unsigned int lockmode;
u64 seq;
ASSERT(flags & (IOMAP_WRITE | IOMAP_ZERO));
@@ -817,10 +794,30 @@ xfs_direct_write_iomap_begin(
if (offset + length > i_size_read(inode))
iomap_flags |= IOMAP_F_DIRTY;
+ /*
+ * COW writes may allocate delalloc space or convert unwritten COW
+ * extents, so we need to make sure to take the lock exclusively here.
+ */
+ if (xfs_is_cow_inode(ip))
+ lockmode = XFS_ILOCK_EXCL;
+ else
+ lockmode = XFS_ILOCK_SHARED;
+
+relock:
error = xfs_ilock_for_iomap(ip, flags, &lockmode);
if (error)
return error;
+ /*
+ * The reflink iflag could have changed since the earlier unlocked
+ * check, check if it again and relock if needed.
+ */
+ if (xfs_is_cow_inode(ip) && lockmode == XFS_ILOCK_SHARED) {
+ xfs_iunlock(ip, lockmode);
+ lockmode = XFS_ILOCK_EXCL;
+ goto relock;
+ }
+
error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap,
&nimaps, 0);
if (error)
@@ -1148,33 +1145,23 @@ xfs_buffered_write_iomap_begin(
}
}
-retry:
- error = xfs_bmapi_reserve_delalloc(ip, allocfork, offset_fsb,
- end_fsb - offset_fsb, prealloc_blocks,
- allocfork == XFS_DATA_FORK ? &imap : &cmap,
- allocfork == XFS_DATA_FORK ? &icur : &ccur,
- allocfork == XFS_DATA_FORK ? eof : cow_eof);
- switch (error) {
- case 0:
- break;
- case -ENOSPC:
- case -EDQUOT:
- /* retry without any preallocation */
- trace_xfs_delalloc_enospc(ip, offset, count);
- if (prealloc_blocks) {
- prealloc_blocks = 0;
- goto retry;
- }
- fallthrough;
- default:
- goto out_unlock;
- }
-
if (allocfork == XFS_COW_FORK) {
+ error = xfs_bmapi_reserve_delalloc(ip, allocfork, offset_fsb,
+ end_fsb - offset_fsb, prealloc_blocks, &cmap,
+ &ccur, cow_eof);
+ if (error)
+ goto out_unlock;
+
trace_xfs_iomap_alloc(ip, offset, count, allocfork, &cmap);
goto found_cow;
}
+ error = xfs_bmapi_reserve_delalloc(ip, allocfork, offset_fsb,
+ end_fsb - offset_fsb, prealloc_blocks, &imap, &icur,
+ eof);
+ if (error)
+ goto out_unlock;
+
/*
* Flag newly allocated delalloc blocks with IOMAP_F_NEW so we punch
* them out if the write happens to fail.
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index ff222827e550..1cdc8034f54d 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -17,6 +17,8 @@
#include "xfs_da_btree.h"
#include "xfs_attr.h"
#include "xfs_trans.h"
+#include "xfs_trans_space.h"
+#include "xfs_bmap_btree.h"
#include "xfs_trace.h"
#include "xfs_icache.h"
#include "xfs_symlink.h"
@@ -26,6 +28,7 @@
#include "xfs_ioctl.h"
#include "xfs_xattr.h"
#include "xfs_file.h"
+#include "xfs_bmap.h"
#include <linux/posix_acl.h>
#include <linux/security.h>
@@ -157,8 +160,6 @@ xfs_create_need_xattr(
if (dir->i_sb->s_security)
return true;
#endif
- if (xfs_has_parent(XFS_I(dir)->i_mount))
- return true;
return false;
}
@@ -172,49 +173,55 @@ xfs_generic_create(
dev_t rdev,
struct file *tmpfile) /* unnamed file */
{
- struct inode *inode;
- struct xfs_inode *ip = NULL;
- struct posix_acl *default_acl, *acl;
- struct xfs_name name;
- int error;
+ struct xfs_icreate_args args = {
+ .idmap = idmap,
+ .pip = XFS_I(dir),
+ .rdev = rdev,
+ .mode = mode,
+ };
+ struct inode *inode;
+ struct xfs_inode *ip = NULL;
+ struct posix_acl *default_acl, *acl;
+ struct xfs_name name;
+ int error;
/*
* Irix uses Missed'em'V split, but doesn't want to see
* the upper 5 bits of (14bit) major.
*/
- if (S_ISCHR(mode) || S_ISBLK(mode)) {
- if (unlikely(!sysv_valid_dev(rdev) || MAJOR(rdev) & ~0x1ff))
+ if (S_ISCHR(args.mode) || S_ISBLK(args.mode)) {
+ if (unlikely(!sysv_valid_dev(args.rdev) ||
+ MAJOR(args.rdev) & ~0x1ff))
return -EINVAL;
} else {
- rdev = 0;
+ args.rdev = 0;
}
- error = posix_acl_create(dir, &mode, &default_acl, &acl);
+ error = posix_acl_create(dir, &args.mode, &default_acl, &acl);
if (error)
return error;
/* Verify mode is valid also for tmpfile case */
- error = xfs_dentry_mode_to_name(&name, dentry, mode);
+ error = xfs_dentry_mode_to_name(&name, dentry, args.mode);
if (unlikely(error))
goto out_free_acl;
if (!tmpfile) {
- error = xfs_create(idmap, XFS_I(dir), &name, mode, rdev,
- xfs_create_need_xattr(dir, default_acl, acl),
- &ip);
+ if (xfs_create_need_xattr(dir, default_acl, acl))
+ args.flags |= XFS_ICREATE_INIT_XATTRS;
+
+ error = xfs_create(&args, &name, &ip);
} else {
- bool init_xattrs = false;
+ args.flags |= XFS_ICREATE_TMPFILE;
/*
- * If this temporary file will be linkable, set up the file
- * with an attr fork to receive a parent pointer.
+ * If this temporary file will not be linkable, don't bother
+ * creating an attr fork to receive a parent pointer.
*/
- if (!(tmpfile->f_flags & O_EXCL) &&
- xfs_has_parent(XFS_I(dir)->i_mount))
- init_xattrs = true;
+ if (tmpfile->f_flags & O_EXCL)
+ args.flags |= XFS_ICREATE_UNLINKABLE;
- error = xfs_create_tmpfile(idmap, XFS_I(dir), mode,
- init_xattrs, &ip);
+ error = xfs_create_tmpfile(&args, &ip);
}
if (unlikely(error))
goto out_free_acl;
@@ -811,6 +818,7 @@ xfs_setattr_size(
struct xfs_trans *tp;
int error;
uint lock_flags = 0;
+ uint resblks = 0;
bool did_zeroing = false;
xfs_assert_ilocked(ip, XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL);
@@ -917,7 +925,17 @@ xfs_setattr_size(
return error;
}
- error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
+ /*
+ * For realtime inode with more than one block rtextsize, we need the
+ * block reservation for bmap btree block allocations/splits that can
+ * happen since it could split the tail written extent and convert the
+ * right beyond EOF one to unwritten.
+ */
+ if (xfs_inode_has_bigrtalloc(ip))
+ resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
+
+ error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, resblks,
+ 0, 0, &tp);
if (error)
return error;
diff --git a/fs/xfs/xfs_iwalk.c b/fs/xfs/xfs_iwalk.c
index 730c8d48da28..86f14ec7c31f 100644
--- a/fs/xfs/xfs_iwalk.c
+++ b/fs/xfs/xfs_iwalk.c
@@ -351,7 +351,6 @@ xfs_iwalk_run_callbacks(
int *has_more)
{
struct xfs_mount *mp = iwag->mp;
- struct xfs_inobt_rec_incore *irec;
xfs_agino_t next_agino;
int error;
@@ -361,8 +360,8 @@ xfs_iwalk_run_callbacks(
/* Delete cursor but remember the last record we cached... */
xfs_iwalk_del_inobt(iwag->tp, curpp, agi_bpp, 0);
- irec = &iwag->recs[iwag->nr_recs - 1];
- ASSERT(next_agino >= irec->ir_startino + XFS_INODES_PER_CHUNK);
+ ASSERT(next_agino >= iwag->recs[iwag->nr_recs - 1].ir_startino +
+ XFS_INODES_PER_CHUNK);
if (iwag->drop_trans) {
xfs_trans_cancel(iwag->tp);
diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h
index ac355328121a..54a098fe7285 100644
--- a/fs/xfs/xfs_linux.h
+++ b/fs/xfs/xfs_linux.h
@@ -135,8 +135,6 @@ typedef __u32 xfs_nlink_t;
*/
#define __this_address ({ __label__ __here; __here: barrier(); &&__here; })
-#define XFS_PROJID_DEFAULT 0
-
#define howmany(x, y) (((x)+((y)-1))/(y))
static inline void delay(long ticks)
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 416c15494983..817ea7e0a8ab 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -30,10 +30,6 @@ xlog_alloc_log(
struct xfs_buftarg *log_target,
xfs_daddr_t blk_offset,
int num_bblks);
-STATIC int
-xlog_space_left(
- struct xlog *log,
- atomic64_t *head);
STATIC void
xlog_dealloc_log(
struct xlog *log);
@@ -51,19 +47,12 @@ xlog_state_get_iclog_space(
struct xlog_ticket *ticket,
int *logoffsetp);
STATIC void
-xlog_grant_push_ail(
- struct xlog *log,
- int need_bytes);
-STATIC void
xlog_sync(
struct xlog *log,
struct xlog_in_core *iclog,
struct xlog_ticket *ticket);
#if defined(DEBUG)
STATIC void
-xlog_verify_grant_tail(
- struct xlog *log);
-STATIC void
xlog_verify_iclog(
struct xlog *log,
struct xlog_in_core *iclog,
@@ -73,7 +62,6 @@ xlog_verify_tail_lsn(
struct xlog *log,
struct xlog_in_core *iclog);
#else
-#define xlog_verify_grant_tail(a)
#define xlog_verify_iclog(a,b,c)
#define xlog_verify_tail_lsn(a,b)
#endif
@@ -141,70 +129,66 @@ xlog_prepare_iovec(
return buf;
}
-static void
+static inline void
xlog_grant_sub_space(
- struct xlog *log,
- atomic64_t *head,
- int bytes)
+ struct xlog_grant_head *head,
+ int64_t bytes)
{
- int64_t head_val = atomic64_read(head);
- int64_t new, old;
-
- do {
- int cycle, space;
-
- xlog_crack_grant_head_val(head_val, &cycle, &space);
-
- space -= bytes;
- if (space < 0) {
- space += log->l_logsize;
- cycle--;
- }
-
- old = head_val;
- new = xlog_assign_grant_head_val(cycle, space);
- head_val = atomic64_cmpxchg(head, old, new);
- } while (head_val != old);
+ atomic64_sub(bytes, &head->grant);
}
-static void
+static inline void
xlog_grant_add_space(
- struct xlog *log,
- atomic64_t *head,
- int bytes)
+ struct xlog_grant_head *head,
+ int64_t bytes)
{
- int64_t head_val = atomic64_read(head);
- int64_t new, old;
-
- do {
- int tmp;
- int cycle, space;
-
- xlog_crack_grant_head_val(head_val, &cycle, &space);
-
- tmp = log->l_logsize - space;
- if (tmp > bytes)
- space += bytes;
- else {
- space = bytes - tmp;
- cycle++;
- }
-
- old = head_val;
- new = xlog_assign_grant_head_val(cycle, space);
- head_val = atomic64_cmpxchg(head, old, new);
- } while (head_val != old);
+ atomic64_add(bytes, &head->grant);
}
-STATIC void
+static void
xlog_grant_head_init(
struct xlog_grant_head *head)
{
- xlog_assign_grant_head(&head->grant, 1, 0);
+ atomic64_set(&head->grant, 0);
INIT_LIST_HEAD(&head->waiters);
spin_lock_init(&head->lock);
}
+void
+xlog_grant_return_space(
+ struct xlog *log,
+ xfs_lsn_t old_head,
+ xfs_lsn_t new_head)
+{
+ int64_t diff = xlog_lsn_sub(log, new_head, old_head);
+
+ xlog_grant_sub_space(&log->l_reserve_head, diff);
+ xlog_grant_sub_space(&log->l_write_head, diff);
+}
+
+/*
+ * Return the space in the log between the tail and the head. In the case where
+ * we have overrun available reservation space, return 0. The memory barrier
+ * pairs with the smp_wmb() in xlog_cil_ail_insert() to ensure that grant head
+ * vs tail space updates are seen in the correct order and hence avoid
+ * transients as space is transferred from the grant heads to the AIL on commit
+ * completion.
+ */
+static uint64_t
+xlog_grant_space_left(
+ struct xlog *log,
+ struct xlog_grant_head *head)
+{
+ int64_t free_bytes;
+
+ smp_rmb(); /* paired with smp_wmb in xlog_cil_ail_insert() */
+ free_bytes = log->l_logsize - READ_ONCE(log->l_tail_space) -
+ atomic64_read(&head->grant);
+ if (free_bytes > 0)
+ return free_bytes;
+ return 0;
+}
+
STATIC void
xlog_grant_head_wake_all(
struct xlog_grant_head *head)
@@ -242,42 +226,15 @@ xlog_grant_head_wake(
{
struct xlog_ticket *tic;
int need_bytes;
- bool woken_task = false;
list_for_each_entry(tic, &head->waiters, t_queue) {
-
- /*
- * There is a chance that the size of the CIL checkpoints in
- * progress at the last AIL push target calculation resulted in
- * limiting the target to the log head (l_last_sync_lsn) at the
- * time. This may not reflect where the log head is now as the
- * CIL checkpoints may have completed.
- *
- * Hence when we are woken here, it may be that the head of the
- * log that has moved rather than the tail. As the tail didn't
- * move, there still won't be space available for the
- * reservation we require. However, if the AIL has already
- * pushed to the target defined by the old log head location, we
- * will hang here waiting for something else to update the AIL
- * push target.
- *
- * Therefore, if there isn't space to wake the first waiter on
- * the grant head, we need to push the AIL again to ensure the
- * target reflects both the current log tail and log head
- * position before we wait for the tail to move again.
- */
-
need_bytes = xlog_ticket_reservation(log, head, tic);
- if (*free_bytes < need_bytes) {
- if (!woken_task)
- xlog_grant_push_ail(log, need_bytes);
+ if (*free_bytes < need_bytes)
return false;
- }
*free_bytes -= need_bytes;
trace_xfs_log_grant_wake_up(log, tic);
wake_up_process(tic->t_task);
- woken_task = true;
}
return true;
@@ -296,13 +253,15 @@ xlog_grant_head_wait(
do {
if (xlog_is_shutdown(log))
goto shutdown;
- xlog_grant_push_ail(log, need_bytes);
__set_current_state(TASK_UNINTERRUPTIBLE);
spin_unlock(&head->lock);
XFS_STATS_INC(log->l_mp, xs_sleep_logspace);
+ /* Push on the AIL to free up all the log space. */
+ xfs_ail_push_all(log->l_ailp);
+
trace_xfs_log_grant_sleep(log, tic);
schedule();
trace_xfs_log_grant_wake(log, tic);
@@ -310,7 +269,7 @@ xlog_grant_head_wait(
spin_lock(&head->lock);
if (xlog_is_shutdown(log))
goto shutdown;
- } while (xlog_space_left(log, &head->grant) < need_bytes);
+ } while (xlog_grant_space_left(log, head) < need_bytes);
list_del_init(&tic->t_queue);
return 0;
@@ -355,7 +314,7 @@ xlog_grant_head_check(
* otherwise try to get some space for this transaction.
*/
*need_bytes = xlog_ticket_reservation(log, head, tic);
- free_bytes = xlog_space_left(log, &head->grant);
+ free_bytes = xlog_grant_space_left(log, head);
if (!list_empty_careful(&head->waiters)) {
spin_lock(&head->lock);
if (!xlog_grant_head_wake(log, head, &free_bytes) ||
@@ -418,9 +377,6 @@ xfs_log_regrant(
* of rolling transactions in the log easily.
*/
tic->t_tid++;
-
- xlog_grant_push_ail(log, tic->t_unit_res);
-
tic->t_curr_res = tic->t_unit_res;
if (tic->t_cnt > 0)
return 0;
@@ -432,9 +388,8 @@ xfs_log_regrant(
if (error)
goto out_error;
- xlog_grant_add_space(log, &log->l_write_head.grant, need_bytes);
+ xlog_grant_add_space(&log->l_write_head, need_bytes);
trace_xfs_log_regrant_exit(log, tic);
- xlog_verify_grant_tail(log);
return 0;
out_error:
@@ -477,21 +432,15 @@ xfs_log_reserve(
ASSERT(*ticp == NULL);
tic = xlog_ticket_alloc(log, unit_bytes, cnt, permanent);
*ticp = tic;
-
- xlog_grant_push_ail(log, tic->t_cnt ? tic->t_unit_res * tic->t_cnt
- : tic->t_unit_res);
-
trace_xfs_log_reserve(log, tic);
-
error = xlog_grant_head_check(log, &log->l_reserve_head, tic,
&need_bytes);
if (error)
goto out_error;
- xlog_grant_add_space(log, &log->l_reserve_head.grant, need_bytes);
- xlog_grant_add_space(log, &log->l_write_head.grant, need_bytes);
+ xlog_grant_add_space(&log->l_reserve_head, need_bytes);
+ xlog_grant_add_space(&log->l_write_head, need_bytes);
trace_xfs_log_reserve_exit(log, tic);
- xlog_verify_grant_tail(log);
return 0;
out_error:
@@ -571,7 +520,6 @@ xlog_state_release_iclog(
struct xlog_in_core *iclog,
struct xlog_ticket *ticket)
{
- xfs_lsn_t tail_lsn;
bool last_ref;
lockdep_assert_held(&log->l_icloglock);
@@ -586,8 +534,8 @@ xlog_state_release_iclog(
if ((iclog->ic_state == XLOG_STATE_WANT_SYNC ||
(iclog->ic_flags & XLOG_ICL_NEED_FUA)) &&
!iclog->ic_header.h_tail_lsn) {
- tail_lsn = xlog_assign_tail_lsn(log->l_mp);
- iclog->ic_header.h_tail_lsn = cpu_to_be64(tail_lsn);
+ iclog->ic_header.h_tail_lsn =
+ cpu_to_be64(atomic64_read(&log->l_tail_lsn));
}
last_ref = atomic_dec_and_test(&iclog->ic_refcnt);
@@ -1149,7 +1097,7 @@ xfs_log_space_wake(
ASSERT(!xlog_in_recovery(log));
spin_lock(&log->l_write_head.lock);
- free_bytes = xlog_space_left(log, &log->l_write_head.grant);
+ free_bytes = xlog_grant_space_left(log, &log->l_write_head);
xlog_grant_head_wake(log, &log->l_write_head, &free_bytes);
spin_unlock(&log->l_write_head.lock);
}
@@ -1158,7 +1106,7 @@ xfs_log_space_wake(
ASSERT(!xlog_in_recovery(log));
spin_lock(&log->l_reserve_head.lock);
- free_bytes = xlog_space_left(log, &log->l_reserve_head.grant);
+ free_bytes = xlog_grant_space_left(log, &log->l_reserve_head);
xlog_grant_head_wake(log, &log->l_reserve_head, &free_bytes);
spin_unlock(&log->l_reserve_head.lock);
}
@@ -1272,105 +1220,6 @@ xfs_log_cover(
return error;
}
-/*
- * We may be holding the log iclog lock upon entering this routine.
- */
-xfs_lsn_t
-xlog_assign_tail_lsn_locked(
- struct xfs_mount *mp)
-{
- struct xlog *log = mp->m_log;
- struct xfs_log_item *lip;
- xfs_lsn_t tail_lsn;
-
- assert_spin_locked(&mp->m_ail->ail_lock);
-
- /*
- * To make sure we always have a valid LSN for the log tail we keep
- * track of the last LSN which was committed in log->l_last_sync_lsn,
- * and use that when the AIL was empty.
- */
- lip = xfs_ail_min(mp->m_ail);
- if (lip)
- tail_lsn = lip->li_lsn;
- else
- tail_lsn = atomic64_read(&log->l_last_sync_lsn);
- trace_xfs_log_assign_tail_lsn(log, tail_lsn);
- atomic64_set(&log->l_tail_lsn, tail_lsn);
- return tail_lsn;
-}
-
-xfs_lsn_t
-xlog_assign_tail_lsn(
- struct xfs_mount *mp)
-{
- xfs_lsn_t tail_lsn;
-
- spin_lock(&mp->m_ail->ail_lock);
- tail_lsn = xlog_assign_tail_lsn_locked(mp);
- spin_unlock(&mp->m_ail->ail_lock);
-
- return tail_lsn;
-}
-
-/*
- * Return the space in the log between the tail and the head. The head
- * is passed in the cycle/bytes formal parms. In the special case where
- * the reserve head has wrapped passed the tail, this calculation is no
- * longer valid. In this case, just return 0 which means there is no space
- * in the log. This works for all places where this function is called
- * with the reserve head. Of course, if the write head were to ever
- * wrap the tail, we should blow up. Rather than catch this case here,
- * we depend on other ASSERTions in other parts of the code. XXXmiken
- *
- * If reservation head is behind the tail, we have a problem. Warn about it,
- * but then treat it as if the log is empty.
- *
- * If the log is shut down, the head and tail may be invalid or out of whack, so
- * shortcut invalidity asserts in this case so that we don't trigger them
- * falsely.
- */
-STATIC int
-xlog_space_left(
- struct xlog *log,
- atomic64_t *head)
-{
- int tail_bytes;
- int tail_cycle;
- int head_cycle;
- int head_bytes;
-
- xlog_crack_grant_head(head, &head_cycle, &head_bytes);
- xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_bytes);
- tail_bytes = BBTOB(tail_bytes);
- if (tail_cycle == head_cycle && head_bytes >= tail_bytes)
- return log->l_logsize - (head_bytes - tail_bytes);
- if (tail_cycle + 1 < head_cycle)
- return 0;
-
- /* Ignore potential inconsistency when shutdown. */
- if (xlog_is_shutdown(log))
- return log->l_logsize;
-
- if (tail_cycle < head_cycle) {
- ASSERT(tail_cycle == (head_cycle - 1));
- return tail_bytes - head_bytes;
- }
-
- /*
- * The reservation head is behind the tail. In this case we just want to
- * return the size of the log as the amount of space left.
- */
- xfs_alert(log->l_mp, "xlog_space_left: head behind tail");
- xfs_alert(log->l_mp, " tail_cycle = %d, tail_bytes = %d",
- tail_cycle, tail_bytes);
- xfs_alert(log->l_mp, " GH cycle = %d, GH bytes = %d",
- head_cycle, head_bytes);
- ASSERT(0);
- return log->l_logsize;
-}
-
-
static void
xlog_ioend_work(
struct work_struct *work)
@@ -1543,7 +1392,6 @@ xlog_alloc_log(
log->l_prev_block = -1;
/* log->l_tail_lsn = 0x100000000LL; cycle = 1; current block = 0 */
xlog_assign_atomic_lsn(&log->l_tail_lsn, 1, 0);
- xlog_assign_atomic_lsn(&log->l_last_sync_lsn, 1, 0);
log->l_curr_cycle = 1; /* 0 is bad since this is initial value */
if (xfs_has_logv2(mp) && mp->m_sb.sb_logsunit > 1)
@@ -1668,89 +1516,6 @@ out:
} /* xlog_alloc_log */
/*
- * Compute the LSN that we'd need to push the log tail towards in order to have
- * (a) enough on-disk log space to log the number of bytes specified, (b) at
- * least 25% of the log space free, and (c) at least 256 blocks free. If the
- * log free space already meets all three thresholds, this function returns
- * NULLCOMMITLSN.
- */
-xfs_lsn_t
-xlog_grant_push_threshold(
- struct xlog *log,
- int need_bytes)
-{
- xfs_lsn_t threshold_lsn = 0;
- xfs_lsn_t last_sync_lsn;
- int free_blocks;
- int free_bytes;
- int threshold_block;
- int threshold_cycle;
- int free_threshold;
-
- ASSERT(BTOBB(need_bytes) < log->l_logBBsize);
-
- free_bytes = xlog_space_left(log, &log->l_reserve_head.grant);
- free_blocks = BTOBBT(free_bytes);
-
- /*
- * Set the threshold for the minimum number of free blocks in the
- * log to the maximum of what the caller needs, one quarter of the
- * log, and 256 blocks.
- */
- free_threshold = BTOBB(need_bytes);
- free_threshold = max(free_threshold, (log->l_logBBsize >> 2));
- free_threshold = max(free_threshold, 256);
- if (free_blocks >= free_threshold)
- return NULLCOMMITLSN;
-
- xlog_crack_atomic_lsn(&log->l_tail_lsn, &threshold_cycle,
- &threshold_block);
- threshold_block += free_threshold;
- if (threshold_block >= log->l_logBBsize) {
- threshold_block -= log->l_logBBsize;
- threshold_cycle += 1;
- }
- threshold_lsn = xlog_assign_lsn(threshold_cycle,
- threshold_block);
- /*
- * Don't pass in an lsn greater than the lsn of the last
- * log record known to be on disk. Use a snapshot of the last sync lsn
- * so that it doesn't change between the compare and the set.
- */
- last_sync_lsn = atomic64_read(&log->l_last_sync_lsn);
- if (XFS_LSN_CMP(threshold_lsn, last_sync_lsn) > 0)
- threshold_lsn = last_sync_lsn;
-
- return threshold_lsn;
-}
-
-/*
- * Push the tail of the log if we need to do so to maintain the free log space
- * thresholds set out by xlog_grant_push_threshold. We may need to adopt a
- * policy which pushes on an lsn which is further along in the log once we
- * reach the high water mark. In this manner, we would be creating a low water
- * mark.
- */
-STATIC void
-xlog_grant_push_ail(
- struct xlog *log,
- int need_bytes)
-{
- xfs_lsn_t threshold_lsn;
-
- threshold_lsn = xlog_grant_push_threshold(log, need_bytes);
- if (threshold_lsn == NULLCOMMITLSN || xlog_is_shutdown(log))
- return;
-
- /*
- * Get the transaction layer to kick the dirty buffers out to
- * disk asynchronously. No point in trying to do this if
- * the filesystem is shutting down.
- */
- xfs_ail_push(log->l_ailp, threshold_lsn);
-}
-
-/*
* Stamp cycle number in every block
*/
STATIC void
@@ -2048,8 +1813,8 @@ xlog_sync(
if (ticket) {
ticket->t_curr_res -= roundoff;
} else {
- xlog_grant_add_space(log, &log->l_reserve_head.grant, roundoff);
- xlog_grant_add_space(log, &log->l_write_head.grant, roundoff);
+ xlog_grant_add_space(&log->l_reserve_head, roundoff);
+ xlog_grant_add_space(&log->l_write_head, roundoff);
}
/* put cycle number in every block */
@@ -2675,47 +2440,6 @@ xlog_get_lowest_lsn(
}
/*
- * Completion of a iclog IO does not imply that a transaction has completed, as
- * transactions can be large enough to span many iclogs. We cannot change the
- * tail of the log half way through a transaction as this may be the only
- * transaction in the log and moving the tail to point to the middle of it
- * will prevent recovery from finding the start of the transaction. Hence we
- * should only update the last_sync_lsn if this iclog contains transaction
- * completion callbacks on it.
- *
- * We have to do this before we drop the icloglock to ensure we are the only one
- * that can update it.
- *
- * If we are moving the last_sync_lsn forwards, we also need to ensure we kick
- * the reservation grant head pushing. This is due to the fact that the push
- * target is bound by the current last_sync_lsn value. Hence if we have a large
- * amount of log space bound up in this committing transaction then the
- * last_sync_lsn value may be the limiting factor preventing tail pushing from
- * freeing space in the log. Hence once we've updated the last_sync_lsn we
- * should push the AIL to ensure the push target (and hence the grant head) is
- * no longer bound by the old log head location and can move forwards and make
- * progress again.
- */
-static void
-xlog_state_set_callback(
- struct xlog *log,
- struct xlog_in_core *iclog,
- xfs_lsn_t header_lsn)
-{
- trace_xlog_iclog_callback(iclog, _RET_IP_);
- iclog->ic_state = XLOG_STATE_CALLBACK;
-
- ASSERT(XFS_LSN_CMP(atomic64_read(&log->l_last_sync_lsn),
- header_lsn) <= 0);
-
- if (list_empty_careful(&iclog->ic_callbacks))
- return;
-
- atomic64_set(&log->l_last_sync_lsn, header_lsn);
- xlog_grant_push_ail(log, 0);
-}
-
-/*
* Return true if we need to stop processing, false to continue to the next
* iclog. The caller will need to run callbacks if the iclog is returned in the
* XLOG_STATE_CALLBACK state.
@@ -2746,7 +2470,17 @@ xlog_state_iodone_process_iclog(
lowest_lsn = xlog_get_lowest_lsn(log);
if (lowest_lsn && XFS_LSN_CMP(lowest_lsn, header_lsn) < 0)
return false;
- xlog_state_set_callback(log, iclog, header_lsn);
+ /*
+ * If there are no callbacks on this iclog, we can mark it clean
+ * immediately and return. Otherwise we need to run the
+ * callbacks.
+ */
+ if (list_empty(&iclog->ic_callbacks)) {
+ xlog_state_clean_iclog(log, iclog);
+ return false;
+ }
+ trace_xlog_iclog_callback(iclog, _RET_IP_);
+ iclog->ic_state = XLOG_STATE_CALLBACK;
return false;
default:
/*
@@ -3000,18 +2734,15 @@ xfs_log_ticket_regrant(
if (ticket->t_cnt > 0)
ticket->t_cnt--;
- xlog_grant_sub_space(log, &log->l_reserve_head.grant,
- ticket->t_curr_res);
- xlog_grant_sub_space(log, &log->l_write_head.grant,
- ticket->t_curr_res);
+ xlog_grant_sub_space(&log->l_reserve_head, ticket->t_curr_res);
+ xlog_grant_sub_space(&log->l_write_head, ticket->t_curr_res);
ticket->t_curr_res = ticket->t_unit_res;
trace_xfs_log_ticket_regrant_sub(log, ticket);
/* just return if we still have some of the pre-reserved space */
if (!ticket->t_cnt) {
- xlog_grant_add_space(log, &log->l_reserve_head.grant,
- ticket->t_unit_res);
+ xlog_grant_add_space(&log->l_reserve_head, ticket->t_unit_res);
trace_xfs_log_ticket_regrant_exit(log, ticket);
ticket->t_curr_res = ticket->t_unit_res;
@@ -3058,8 +2789,8 @@ xfs_log_ticket_ungrant(
bytes += ticket->t_unit_res*ticket->t_cnt;
}
- xlog_grant_sub_space(log, &log->l_reserve_head.grant, bytes);
- xlog_grant_sub_space(log, &log->l_write_head.grant, bytes);
+ xlog_grant_sub_space(&log->l_reserve_head, bytes);
+ xlog_grant_sub_space(&log->l_write_head, bytes);
trace_xfs_log_ticket_ungrant_exit(log, ticket);
@@ -3532,42 +3263,27 @@ xlog_ticket_alloc(
}
#if defined(DEBUG)
-/*
- * Check to make sure the grant write head didn't just over lap the tail. If
- * the cycles are the same, we can't be overlapping. Otherwise, make sure that
- * the cycles differ by exactly one and check the byte count.
- *
- * This check is run unlocked, so can give false positives. Rather than assert
- * on failures, use a warn-once flag and a panic tag to allow the admin to
- * determine if they want to panic the machine when such an error occurs. For
- * debug kernels this will have the same effect as using an assert but, unlinke
- * an assert, it can be turned off at runtime.
- */
-STATIC void
-xlog_verify_grant_tail(
- struct xlog *log)
+static void
+xlog_verify_dump_tail(
+ struct xlog *log,
+ struct xlog_in_core *iclog)
{
- int tail_cycle, tail_blocks;
- int cycle, space;
-
- xlog_crack_grant_head(&log->l_write_head.grant, &cycle, &space);
- xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_blocks);
- if (tail_cycle != cycle) {
- if (cycle - 1 != tail_cycle &&
- !test_and_set_bit(XLOG_TAIL_WARN, &log->l_opstate)) {
- xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES,
- "%s: cycle - 1 != tail_cycle", __func__);
- }
-
- if (space > BBTOB(tail_blocks) &&
- !test_and_set_bit(XLOG_TAIL_WARN, &log->l_opstate)) {
- xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES,
- "%s: space > BBTOB(tail_blocks)", __func__);
- }
- }
-}
-
-/* check if it will fit */
+ xfs_alert(log->l_mp,
+"ran out of log space tail 0x%llx/0x%llx, head lsn 0x%llx, head 0x%x/0x%x, prev head 0x%x/0x%x",
+ iclog ? be64_to_cpu(iclog->ic_header.h_tail_lsn) : -1,
+ atomic64_read(&log->l_tail_lsn),
+ log->l_ailp->ail_head_lsn,
+ log->l_curr_cycle, log->l_curr_block,
+ log->l_prev_cycle, log->l_prev_block);
+ xfs_alert(log->l_mp,
+"write grant 0x%llx, reserve grant 0x%llx, tail_space 0x%llx, size 0x%x, iclog flags 0x%x",
+ atomic64_read(&log->l_write_head.grant),
+ atomic64_read(&log->l_reserve_head.grant),
+ log->l_tail_space, log->l_logsize,
+ iclog ? iclog->ic_flags : -1);
+}
+
+/* Check if the new iclog will fit in the log. */
STATIC void
xlog_verify_tail_lsn(
struct xlog *log,
@@ -3576,21 +3292,34 @@ xlog_verify_tail_lsn(
xfs_lsn_t tail_lsn = be64_to_cpu(iclog->ic_header.h_tail_lsn);
int blocks;
- if (CYCLE_LSN(tail_lsn) == log->l_prev_cycle) {
- blocks =
- log->l_logBBsize - (log->l_prev_block - BLOCK_LSN(tail_lsn));
- if (blocks < BTOBB(iclog->ic_offset)+BTOBB(log->l_iclog_hsize))
- xfs_emerg(log->l_mp, "%s: ran out of log space", __func__);
- } else {
- ASSERT(CYCLE_LSN(tail_lsn)+1 == log->l_prev_cycle);
+ if (CYCLE_LSN(tail_lsn) == log->l_prev_cycle) {
+ blocks = log->l_logBBsize -
+ (log->l_prev_block - BLOCK_LSN(tail_lsn));
+ if (blocks < BTOBB(iclog->ic_offset) +
+ BTOBB(log->l_iclog_hsize)) {
+ xfs_emerg(log->l_mp,
+ "%s: ran out of log space", __func__);
+ xlog_verify_dump_tail(log, iclog);
+ }
+ return;
+ }
- if (BLOCK_LSN(tail_lsn) == log->l_prev_block)
+ if (CYCLE_LSN(tail_lsn) + 1 != log->l_prev_cycle) {
+ xfs_emerg(log->l_mp, "%s: head has wrapped tail.", __func__);
+ xlog_verify_dump_tail(log, iclog);
+ return;
+ }
+ if (BLOCK_LSN(tail_lsn) == log->l_prev_block) {
xfs_emerg(log->l_mp, "%s: tail wrapped", __func__);
+ xlog_verify_dump_tail(log, iclog);
+ return;
+ }
blocks = BLOCK_LSN(tail_lsn) - log->l_prev_block;
- if (blocks < BTOBB(iclog->ic_offset) + 1)
- xfs_emerg(log->l_mp, "%s: ran out of log space", __func__);
- }
+ if (blocks < BTOBB(iclog->ic_offset) + 1) {
+ xfs_emerg(log->l_mp, "%s: ran out of iclog space", __func__);
+ xlog_verify_dump_tail(log, iclog);
+ }
}
/*
diff --git a/fs/xfs/xfs_log.h b/fs/xfs/xfs_log.h
index d69acf881153..67c539cc9305 100644
--- a/fs/xfs/xfs_log.h
+++ b/fs/xfs/xfs_log.h
@@ -156,7 +156,6 @@ int xfs_log_quiesce(struct xfs_mount *mp);
void xfs_log_clean(struct xfs_mount *mp);
bool xfs_log_check_lsn(struct xfs_mount *, xfs_lsn_t);
-xfs_lsn_t xlog_grant_push_threshold(struct xlog *log, int need_bytes);
bool xlog_force_shutdown(struct xlog *log, uint32_t shutdown_flags);
int xfs_attr_use_log_assist(struct xfs_mount *mp);
diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c
index f51cbc6405c1..391a938d690c 100644
--- a/fs/xfs/xfs_log_cil.c
+++ b/fs/xfs/xfs_log_cil.c
@@ -694,6 +694,180 @@ xlog_cil_insert_items(
}
}
+static inline void
+xlog_cil_ail_insert_batch(
+ struct xfs_ail *ailp,
+ struct xfs_ail_cursor *cur,
+ struct xfs_log_item **log_items,
+ int nr_items,
+ xfs_lsn_t commit_lsn)
+{
+ int i;
+
+ spin_lock(&ailp->ail_lock);
+ /* xfs_trans_ail_update_bulk drops ailp->ail_lock */
+ xfs_trans_ail_update_bulk(ailp, cur, log_items, nr_items, commit_lsn);
+
+ for (i = 0; i < nr_items; i++) {
+ struct xfs_log_item *lip = log_items[i];
+
+ if (lip->li_ops->iop_unpin)
+ lip->li_ops->iop_unpin(lip, 0);
+ }
+}
+
+/*
+ * Take the checkpoint's log vector chain of items and insert the attached log
+ * items into the AIL. This uses bulk insertion techniques to minimise AIL lock
+ * traffic.
+ *
+ * The AIL tracks log items via the start record LSN of the checkpoint,
+ * not the commit record LSN. This is because we can pipeline multiple
+ * checkpoints, and so the start record of checkpoint N+1 can be
+ * written before the commit record of checkpoint N. i.e:
+ *
+ * start N commit N
+ * +-------------+------------+----------------+
+ * start N+1 commit N+1
+ *
+ * The tail of the log cannot be moved to the LSN of commit N when all
+ * the items of that checkpoint are written back, because then the
+ * start record for N+1 is no longer in the active portion of the log
+ * and recovery will fail/corrupt the filesystem.
+ *
+ * Hence when all the log items in checkpoint N are written back, the
+ * tail of the log most now only move as far forwards as the start LSN
+ * of checkpoint N+1.
+ *
+ * If we are called with the aborted flag set, it is because a log write during
+ * a CIL checkpoint commit has failed. In this case, all the items in the
+ * checkpoint have already gone through iop_committed and iop_committing, which
+ * means that checkpoint commit abort handling is treated exactly the same as an
+ * iclog write error even though we haven't started any IO yet. Hence in this
+ * case all we need to do is iop_committed processing, followed by an
+ * iop_unpin(aborted) call.
+ *
+ * The AIL cursor is used to optimise the insert process. If commit_lsn is not
+ * at the end of the AIL, the insert cursor avoids the need to walk the AIL to
+ * find the insertion point on every xfs_log_item_batch_insert() call. This
+ * saves a lot of needless list walking and is a net win, even though it
+ * slightly increases that amount of AIL lock traffic to set it up and tear it
+ * down.
+ */
+static void
+xlog_cil_ail_insert(
+ struct xfs_cil_ctx *ctx,
+ bool aborted)
+{
+#define LOG_ITEM_BATCH_SIZE 32
+ struct xfs_ail *ailp = ctx->cil->xc_log->l_ailp;
+ struct xfs_log_item *log_items[LOG_ITEM_BATCH_SIZE];
+ struct xfs_log_vec *lv;
+ struct xfs_ail_cursor cur;
+ xfs_lsn_t old_head;
+ int i = 0;
+
+ /*
+ * Update the AIL head LSN with the commit record LSN of this
+ * checkpoint. As iclogs are always completed in order, this should
+ * always be the same (as iclogs can contain multiple commit records) or
+ * higher LSN than the current head. We do this before insertion of the
+ * items so that log space checks during insertion will reflect the
+ * space that this checkpoint has already consumed. We call
+ * xfs_ail_update_finish() so that tail space and space-based wakeups
+ * will be recalculated appropriately.
+ */
+ ASSERT(XFS_LSN_CMP(ctx->commit_lsn, ailp->ail_head_lsn) >= 0 ||
+ aborted);
+ spin_lock(&ailp->ail_lock);
+ xfs_trans_ail_cursor_last(ailp, &cur, ctx->start_lsn);
+ old_head = ailp->ail_head_lsn;
+ ailp->ail_head_lsn = ctx->commit_lsn;
+ /* xfs_ail_update_finish() drops the ail_lock */
+ xfs_ail_update_finish(ailp, NULLCOMMITLSN);
+
+ /*
+ * We move the AIL head forwards to account for the space used in the
+ * log before we remove that space from the grant heads. This prevents a
+ * transient condition where reservation space appears to become
+ * available on return, only for it to disappear again immediately as
+ * the AIL head update accounts in the log tail space.
+ */
+ smp_wmb(); /* paired with smp_rmb in xlog_grant_space_left */
+ xlog_grant_return_space(ailp->ail_log, old_head, ailp->ail_head_lsn);
+
+ /* unpin all the log items */
+ list_for_each_entry(lv, &ctx->lv_chain, lv_list) {
+ struct xfs_log_item *lip = lv->lv_item;
+ xfs_lsn_t item_lsn;
+
+ if (aborted)
+ set_bit(XFS_LI_ABORTED, &lip->li_flags);
+
+ if (lip->li_ops->flags & XFS_ITEM_RELEASE_WHEN_COMMITTED) {
+ lip->li_ops->iop_release(lip);
+ continue;
+ }
+
+ if (lip->li_ops->iop_committed)
+ item_lsn = lip->li_ops->iop_committed(lip,
+ ctx->start_lsn);
+ else
+ item_lsn = ctx->start_lsn;
+
+ /* item_lsn of -1 means the item needs no further processing */
+ if (XFS_LSN_CMP(item_lsn, (xfs_lsn_t)-1) == 0)
+ continue;
+
+ /*
+ * if we are aborting the operation, no point in inserting the
+ * object into the AIL as we are in a shutdown situation.
+ */
+ if (aborted) {
+ ASSERT(xlog_is_shutdown(ailp->ail_log));
+ if (lip->li_ops->iop_unpin)
+ lip->li_ops->iop_unpin(lip, 1);
+ continue;
+ }
+
+ if (item_lsn != ctx->start_lsn) {
+
+ /*
+ * Not a bulk update option due to unusual item_lsn.
+ * Push into AIL immediately, rechecking the lsn once
+ * we have the ail lock. Then unpin the item. This does
+ * not affect the AIL cursor the bulk insert path is
+ * using.
+ */
+ spin_lock(&ailp->ail_lock);
+ if (XFS_LSN_CMP(item_lsn, lip->li_lsn) > 0)
+ xfs_trans_ail_update(ailp, lip, item_lsn);
+ else
+ spin_unlock(&ailp->ail_lock);
+ if (lip->li_ops->iop_unpin)
+ lip->li_ops->iop_unpin(lip, 0);
+ continue;
+ }
+
+ /* Item is a candidate for bulk AIL insert. */
+ log_items[i++] = lv->lv_item;
+ if (i >= LOG_ITEM_BATCH_SIZE) {
+ xlog_cil_ail_insert_batch(ailp, &cur, log_items,
+ LOG_ITEM_BATCH_SIZE, ctx->start_lsn);
+ i = 0;
+ }
+ }
+
+ /* make sure we insert the remainder! */
+ if (i)
+ xlog_cil_ail_insert_batch(ailp, &cur, log_items, i,
+ ctx->start_lsn);
+
+ spin_lock(&ailp->ail_lock);
+ xfs_trans_ail_cursor_done(&cur);
+ spin_unlock(&ailp->ail_lock);
+}
+
static void
xlog_cil_free_logvec(
struct list_head *lv_chain)
@@ -733,8 +907,7 @@ xlog_cil_committed(
spin_unlock(&ctx->cil->xc_push_lock);
}
- xfs_trans_committed_bulk(ctx->cil->xc_log->l_ailp, &ctx->lv_chain,
- ctx->start_lsn, abort);
+ xlog_cil_ail_insert(ctx, abort);
xfs_extent_busy_sort(&ctx->busy_extents.extent_list);
xfs_extent_busy_clear(mp, &ctx->busy_extents.extent_list,
diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h
index 40e22ec0fbe6..b8778a4fd6b6 100644
--- a/fs/xfs/xfs_log_priv.h
+++ b/fs/xfs/xfs_log_priv.h
@@ -431,18 +431,16 @@ struct xlog {
int l_prev_block; /* previous logical log block */
/*
- * l_last_sync_lsn and l_tail_lsn are atomics so they can be set and
- * read without needing to hold specific locks. To avoid operations
- * contending with other hot objects, place each of them on a separate
- * cacheline.
+ * l_tail_lsn is atomic so it can be set and read without needing to
+ * hold specific locks. To avoid operations contending with other hot
+ * objects, it on a separate cacheline.
*/
- /* lsn of last LR on disk */
- atomic64_t l_last_sync_lsn ____cacheline_aligned_in_smp;
/* lsn of 1st LR with unflushed * buffers */
atomic64_t l_tail_lsn ____cacheline_aligned_in_smp;
struct xlog_grant_head l_reserve_head;
struct xlog_grant_head l_write_head;
+ uint64_t l_tail_space;
struct xfs_kobj l_kobj;
@@ -546,36 +544,6 @@ xlog_assign_atomic_lsn(atomic64_t *lsn, uint cycle, uint block)
}
/*
- * When we crack the grant head, we sample it first so that the value will not
- * change while we are cracking it into the component values. This means we
- * will always get consistent component values to work from.
- */
-static inline void
-xlog_crack_grant_head_val(int64_t val, int *cycle, int *space)
-{
- *cycle = val >> 32;
- *space = val & 0xffffffff;
-}
-
-static inline void
-xlog_crack_grant_head(atomic64_t *head, int *cycle, int *space)
-{
- xlog_crack_grant_head_val(atomic64_read(head), cycle, space);
-}
-
-static inline int64_t
-xlog_assign_grant_head_val(int cycle, int space)
-{
- return ((int64_t)cycle << 32) | space;
-}
-
-static inline void
-xlog_assign_grant_head(atomic64_t *head, int cycle, int space)
-{
- atomic64_set(head, xlog_assign_grant_head_val(cycle, space));
-}
-
-/*
* Committed Item List interfaces
*/
int xlog_cil_init(struct xlog *log);
@@ -623,6 +591,27 @@ xlog_wait(
int xlog_wait_on_iclog(struct xlog_in_core *iclog)
__releases(iclog->ic_log->l_icloglock);
+/* Calculate the distance between two LSNs in bytes */
+static inline uint64_t
+xlog_lsn_sub(
+ struct xlog *log,
+ xfs_lsn_t high,
+ xfs_lsn_t low)
+{
+ uint32_t hi_cycle = CYCLE_LSN(high);
+ uint32_t hi_block = BLOCK_LSN(high);
+ uint32_t lo_cycle = CYCLE_LSN(low);
+ uint32_t lo_block = BLOCK_LSN(low);
+
+ if (hi_cycle == lo_cycle)
+ return BBTOB(hi_block - lo_block);
+ ASSERT((hi_cycle == lo_cycle + 1) || xlog_is_shutdown(log));
+ return (uint64_t)log->l_logsize - BBTOB(lo_block - hi_block);
+}
+
+void xlog_grant_return_space(struct xlog *log, xfs_lsn_t old_head,
+ xfs_lsn_t new_head);
+
/*
* The LSN is valid so long as it is behind the current LSN. If it isn't, this
* means that the next log record that includes this metadata could have a
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 4fe627991e86..4423dd344239 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -1177,8 +1177,8 @@ xlog_check_unmount_rec(
*/
xlog_assign_atomic_lsn(&log->l_tail_lsn,
log->l_curr_cycle, after_umount_blk);
- xlog_assign_atomic_lsn(&log->l_last_sync_lsn,
- log->l_curr_cycle, after_umount_blk);
+ log->l_ailp->ail_head_lsn =
+ atomic64_read(&log->l_tail_lsn);
*tail_blk = after_umount_blk;
*clean = true;
@@ -1212,11 +1212,7 @@ xlog_set_state(
if (bump_cycle)
log->l_curr_cycle++;
atomic64_set(&log->l_tail_lsn, be64_to_cpu(rhead->h_tail_lsn));
- atomic64_set(&log->l_last_sync_lsn, be64_to_cpu(rhead->h_lsn));
- xlog_assign_grant_head(&log->l_reserve_head.grant, log->l_curr_cycle,
- BBTOB(log->l_curr_block));
- xlog_assign_grant_head(&log->l_write_head.grant, log->l_curr_cycle,
- BBTOB(log->l_curr_block));
+ log->l_ailp->ail_head_lsn = be64_to_cpu(rhead->h_lsn);
}
/*
@@ -2489,7 +2485,10 @@ xlog_recover_process_data(
ohead = (struct xlog_op_header *)dp;
dp += sizeof(*ohead);
- ASSERT(dp <= end);
+ if (dp > end) {
+ xfs_warn(log->l_mp, "%s: op header overrun", __func__);
+ return -EFSCORRUPTED;
+ }
/* errors will abort recovery */
error = xlog_recover_process_ophdr(log, rhash, rhead, ohead,
@@ -3363,14 +3362,13 @@ xlog_do_recover(
/*
* We now update the tail_lsn since much of the recovery has completed
- * and there may be space available to use. If there were no extent
- * or iunlinks, we can free up the entire log and set the tail_lsn to
- * be the last_sync_lsn. This was set in xlog_find_tail to be the
- * lsn of the last known good LR on disk. If there are extent frees
- * or iunlinks they will have some entries in the AIL; so we look at
- * the AIL to determine how to set the tail_lsn.
+ * and there may be space available to use. If there were no extent or
+ * iunlinks, we can free up the entire log. This was set in
+ * xlog_find_tail to be the lsn of the last known good LR on disk. If
+ * there are extent frees or iunlinks they will have some entries in the
+ * AIL; so we look at the AIL to determine how to set the tail_lsn.
*/
- xlog_assign_tail_lsn(mp);
+ xfs_ail_assign_tail_lsn(log->l_ailp);
/*
* Now that we've finished replaying all buffer and inode updates,
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
index 47120b745c47..9490b913a4ab 100644
--- a/fs/xfs/xfs_qm.c
+++ b/fs/xfs/xfs_qm.c
@@ -793,12 +793,15 @@ xfs_qm_qino_alloc(
return error;
if (need_alloc) {
+ struct xfs_icreate_args args = {
+ .mode = S_IFREG,
+ .flags = XFS_ICREATE_UNLINKABLE,
+ };
xfs_ino_t ino;
error = xfs_dialloc(&tp, 0, S_IFREG, &ino);
if (!error)
- error = xfs_init_new_inode(&nop_mnt_idmap, tp, NULL, ino,
- S_IFREG, 1, 0, 0, false, ipp);
+ error = xfs_icreate(tp, ino, &args, ipp);
if (error) {
xfs_trans_cancel(tp);
return error;
diff --git a/fs/xfs/xfs_qm_bhv.c b/fs/xfs/xfs_qm_bhv.c
index 271c1021c733..a11436579877 100644
--- a/fs/xfs/xfs_qm_bhv.c
+++ b/fs/xfs/xfs_qm_bhv.c
@@ -11,7 +11,6 @@
#include "xfs_trans_resv.h"
#include "xfs_mount.h"
#include "xfs_quota.h"
-#include "xfs_mount.h"
#include "xfs_inode.h"
#include "xfs_trans.h"
#include "xfs_qm.h"
diff --git a/fs/xfs/xfs_refcount_item.c b/fs/xfs/xfs_refcount_item.c
index 14919b33e4fe..27398512b179 100644
--- a/fs/xfs/xfs_refcount_item.c
+++ b/fs/xfs/xfs_refcount_item.c
@@ -21,6 +21,8 @@
#include "xfs_log_priv.h"
#include "xfs_log_recover.h"
#include "xfs_ag.h"
+#include "xfs_btree.h"
+#include "xfs_trace.h"
struct kmem_cache *xfs_cui_cache;
struct kmem_cache *xfs_cud_cache;
@@ -227,6 +229,11 @@ static const struct xfs_item_ops xfs_cud_item_ops = {
.iop_intent = xfs_cud_item_intent,
};
+static inline struct xfs_refcount_intent *ci_entry(const struct list_head *e)
+{
+ return list_entry(e, struct xfs_refcount_intent, ri_list);
+}
+
/* Sort refcount intents by AG. */
static int
xfs_refcount_update_diff_items(
@@ -234,34 +241,12 @@ xfs_refcount_update_diff_items(
const struct list_head *a,
const struct list_head *b)
{
- struct xfs_refcount_intent *ra;
- struct xfs_refcount_intent *rb;
-
- ra = container_of(a, struct xfs_refcount_intent, ri_list);
- rb = container_of(b, struct xfs_refcount_intent, ri_list);
+ struct xfs_refcount_intent *ra = ci_entry(a);
+ struct xfs_refcount_intent *rb = ci_entry(b);
return ra->ri_pag->pag_agno - rb->ri_pag->pag_agno;
}
-/* Set the phys extent flags for this reverse mapping. */
-static void
-xfs_trans_set_refcount_flags(
- struct xfs_phys_extent *pmap,
- enum xfs_refcount_intent_type type)
-{
- pmap->pe_flags = 0;
- switch (type) {
- case XFS_REFCOUNT_INCREASE:
- case XFS_REFCOUNT_DECREASE:
- case XFS_REFCOUNT_ALLOC_COW:
- case XFS_REFCOUNT_FREE_COW:
- pmap->pe_flags |= type;
- break;
- default:
- ASSERT(0);
- }
-}
-
/* Log refcount updates in the intent item. */
STATIC void
xfs_refcount_update_log_item(
@@ -282,7 +267,18 @@ xfs_refcount_update_log_item(
pmap = &cuip->cui_format.cui_extents[next_extent];
pmap->pe_startblock = ri->ri_startblock;
pmap->pe_len = ri->ri_blockcount;
- xfs_trans_set_refcount_flags(pmap, ri->ri_type);
+
+ pmap->pe_flags = 0;
+ switch (ri->ri_type) {
+ case XFS_REFCOUNT_INCREASE:
+ case XFS_REFCOUNT_DECREASE:
+ case XFS_REFCOUNT_ALLOC_COW:
+ case XFS_REFCOUNT_FREE_COW:
+ pmap->pe_flags |= ri->ri_type;
+ break;
+ default:
+ ASSERT(0);
+ }
}
static struct xfs_log_item *
@@ -324,24 +320,29 @@ xfs_refcount_update_create_done(
return &cudp->cud_item;
}
-/* Take a passive ref to the AG containing the space we're refcounting. */
+/* Add this deferred CUI to the transaction. */
void
-xfs_refcount_update_get_group(
- struct xfs_mount *mp,
+xfs_refcount_defer_add(
+ struct xfs_trans *tp,
struct xfs_refcount_intent *ri)
{
- xfs_agnumber_t agno;
+ struct xfs_mount *mp = tp->t_mountp;
+
+ trace_xfs_refcount_defer(mp, ri);
- agno = XFS_FSB_TO_AGNO(mp, ri->ri_startblock);
- ri->ri_pag = xfs_perag_intent_get(mp, agno);
+ ri->ri_pag = xfs_perag_intent_get(mp, ri->ri_startblock);
+ xfs_defer_add(tp, &ri->ri_list, &xfs_refcount_update_defer_type);
}
-/* Release a passive AG ref after finishing refcounting work. */
-static inline void
-xfs_refcount_update_put_group(
- struct xfs_refcount_intent *ri)
+/* Cancel a deferred refcount update. */
+STATIC void
+xfs_refcount_update_cancel_item(
+ struct list_head *item)
{
+ struct xfs_refcount_intent *ri = ci_entry(item);
+
xfs_perag_intent_put(ri->ri_pag);
+ kmem_cache_free(xfs_refcount_intent_cache, ri);
}
/* Process a deferred refcount update. */
@@ -352,11 +353,9 @@ xfs_refcount_update_finish_item(
struct list_head *item,
struct xfs_btree_cur **state)
{
- struct xfs_refcount_intent *ri;
+ struct xfs_refcount_intent *ri = ci_entry(item);
int error;
- ri = container_of(item, struct xfs_refcount_intent, ri_list);
-
/* Did we run out of reservation? Requeue what we didn't finish. */
error = xfs_refcount_finish_one(tp, ri, state);
if (!error && ri->ri_blockcount > 0) {
@@ -365,30 +364,33 @@ xfs_refcount_update_finish_item(
return -EAGAIN;
}
- xfs_refcount_update_put_group(ri);
- kmem_cache_free(xfs_refcount_intent_cache, ri);
+ xfs_refcount_update_cancel_item(item);
return error;
}
-/* Abort all pending CUIs. */
+/* Clean up after calling xfs_refcount_finish_one. */
STATIC void
-xfs_refcount_update_abort_intent(
- struct xfs_log_item *intent)
+xfs_refcount_finish_one_cleanup(
+ struct xfs_trans *tp,
+ struct xfs_btree_cur *rcur,
+ int error)
{
- xfs_cui_release(CUI_ITEM(intent));
+ struct xfs_buf *agbp;
+
+ if (rcur == NULL)
+ return;
+ agbp = rcur->bc_ag.agbp;
+ xfs_btree_del_cursor(rcur, error);
+ if (error)
+ xfs_trans_brelse(tp, agbp);
}
-/* Cancel a deferred refcount update. */
+/* Abort all pending CUIs. */
STATIC void
-xfs_refcount_update_cancel_item(
- struct list_head *item)
+xfs_refcount_update_abort_intent(
+ struct xfs_log_item *intent)
{
- struct xfs_refcount_intent *ri;
-
- ri = container_of(item, struct xfs_refcount_intent, ri_list);
-
- xfs_refcount_update_put_group(ri);
- kmem_cache_free(xfs_refcount_intent_cache, ri);
+ xfs_cui_release(CUI_ITEM(intent));
}
/* Is this recovered CUI ok? */
@@ -429,7 +431,7 @@ xfs_cui_recover_work(
ri->ri_type = pmap->pe_flags & XFS_REFCOUNT_EXTENT_TYPE_MASK;
ri->ri_startblock = pmap->pe_startblock;
ri->ri_blockcount = pmap->pe_len;
- xfs_refcount_update_get_group(mp, ri);
+ ri->ri_pag = xfs_perag_intent_get(mp, pmap->pe_startblock);
xfs_defer_add_item(dfp, &ri->ri_list);
}
diff --git a/fs/xfs/xfs_refcount_item.h b/fs/xfs/xfs_refcount_item.h
index eb0ab13682d0..bfee8f30c63c 100644
--- a/fs/xfs/xfs_refcount_item.h
+++ b/fs/xfs/xfs_refcount_item.h
@@ -71,4 +71,9 @@ struct xfs_cud_log_item {
extern struct kmem_cache *xfs_cui_cache;
extern struct kmem_cache *xfs_cud_cache;
+struct xfs_refcount_intent;
+
+void xfs_refcount_defer_add(struct xfs_trans *tp,
+ struct xfs_refcount_intent *ri);
+
#endif /* __XFS_REFCOUNT_ITEM_H__ */
diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c
index 063a2e00d169..6fde6ec8092f 100644
--- a/fs/xfs/xfs_reflink.c
+++ b/fs/xfs/xfs_reflink.c
@@ -603,7 +603,7 @@ xfs_reflink_cancel_cow_blocks(
error = xfs_free_extent_later(*tpp, del.br_startblock,
del.br_blockcount, NULL,
- XFS_AG_RESV_NONE, false);
+ XFS_AG_RESV_NONE, 0);
if (error)
break;
@@ -1387,6 +1387,7 @@ xfs_reflink_remap_blocks(
destoff += imap.br_blockcount;
len -= imap.br_blockcount;
remapped_len += imap.br_blockcount;
+ cond_resched();
}
if (error)
diff --git a/fs/xfs/xfs_reflink.h b/fs/xfs/xfs_reflink.h
index 65c5dfe17ecf..fb55e4ce49fa 100644
--- a/fs/xfs/xfs_reflink.h
+++ b/fs/xfs/xfs_reflink.h
@@ -6,16 +6,6 @@
#ifndef __XFS_REFLINK_H
#define __XFS_REFLINK_H 1
-static inline bool xfs_is_always_cow_inode(struct xfs_inode *ip)
-{
- return ip->i_mount->m_always_cow && xfs_has_reflink(ip->i_mount);
-}
-
-static inline bool xfs_is_cow_inode(struct xfs_inode *ip)
-{
- return xfs_is_reflink_inode(ip) || xfs_is_always_cow_inode(ip);
-}
-
extern int xfs_reflink_trim_around_shared(struct xfs_inode *ip,
struct xfs_bmbt_irec *irec, bool *shared);
int xfs_bmap_trim_cow(struct xfs_inode *ip, struct xfs_bmbt_irec *imap,
diff --git a/fs/xfs/xfs_rmap_item.c b/fs/xfs/xfs_rmap_item.c
index e473124e29cc..88b5580e1e19 100644
--- a/fs/xfs/xfs_rmap_item.c
+++ b/fs/xfs/xfs_rmap_item.c
@@ -21,6 +21,8 @@
#include "xfs_log_priv.h"
#include "xfs_log_recover.h"
#include "xfs_ag.h"
+#include "xfs_btree.h"
+#include "xfs_trace.h"
struct kmem_cache *xfs_rui_cache;
struct kmem_cache *xfs_rud_cache;
@@ -226,47 +228,9 @@ static const struct xfs_item_ops xfs_rud_item_ops = {
.iop_intent = xfs_rud_item_intent,
};
-/* Set the map extent flags for this reverse mapping. */
-static void
-xfs_trans_set_rmap_flags(
- struct xfs_map_extent *map,
- enum xfs_rmap_intent_type type,
- int whichfork,
- xfs_exntst_t state)
+static inline struct xfs_rmap_intent *ri_entry(const struct list_head *e)
{
- map->me_flags = 0;
- if (state == XFS_EXT_UNWRITTEN)
- map->me_flags |= XFS_RMAP_EXTENT_UNWRITTEN;
- if (whichfork == XFS_ATTR_FORK)
- map->me_flags |= XFS_RMAP_EXTENT_ATTR_FORK;
- switch (type) {
- case XFS_RMAP_MAP:
- map->me_flags |= XFS_RMAP_EXTENT_MAP;
- break;
- case XFS_RMAP_MAP_SHARED:
- map->me_flags |= XFS_RMAP_EXTENT_MAP_SHARED;
- break;
- case XFS_RMAP_UNMAP:
- map->me_flags |= XFS_RMAP_EXTENT_UNMAP;
- break;
- case XFS_RMAP_UNMAP_SHARED:
- map->me_flags |= XFS_RMAP_EXTENT_UNMAP_SHARED;
- break;
- case XFS_RMAP_CONVERT:
- map->me_flags |= XFS_RMAP_EXTENT_CONVERT;
- break;
- case XFS_RMAP_CONVERT_SHARED:
- map->me_flags |= XFS_RMAP_EXTENT_CONVERT_SHARED;
- break;
- case XFS_RMAP_ALLOC:
- map->me_flags |= XFS_RMAP_EXTENT_ALLOC;
- break;
- case XFS_RMAP_FREE:
- map->me_flags |= XFS_RMAP_EXTENT_FREE;
- break;
- default:
- ASSERT(0);
- }
+ return list_entry(e, struct xfs_rmap_intent, ri_list);
}
/* Sort rmap intents by AG. */
@@ -276,11 +240,8 @@ xfs_rmap_update_diff_items(
const struct list_head *a,
const struct list_head *b)
{
- struct xfs_rmap_intent *ra;
- struct xfs_rmap_intent *rb;
-
- ra = container_of(a, struct xfs_rmap_intent, ri_list);
- rb = container_of(b, struct xfs_rmap_intent, ri_list);
+ struct xfs_rmap_intent *ra = ri_entry(a);
+ struct xfs_rmap_intent *rb = ri_entry(b);
return ra->ri_pag->pag_agno - rb->ri_pag->pag_agno;
}
@@ -307,8 +268,40 @@ xfs_rmap_update_log_item(
map->me_startblock = ri->ri_bmap.br_startblock;
map->me_startoff = ri->ri_bmap.br_startoff;
map->me_len = ri->ri_bmap.br_blockcount;
- xfs_trans_set_rmap_flags(map, ri->ri_type, ri->ri_whichfork,
- ri->ri_bmap.br_state);
+
+ map->me_flags = 0;
+ if (ri->ri_bmap.br_state == XFS_EXT_UNWRITTEN)
+ map->me_flags |= XFS_RMAP_EXTENT_UNWRITTEN;
+ if (ri->ri_whichfork == XFS_ATTR_FORK)
+ map->me_flags |= XFS_RMAP_EXTENT_ATTR_FORK;
+ switch (ri->ri_type) {
+ case XFS_RMAP_MAP:
+ map->me_flags |= XFS_RMAP_EXTENT_MAP;
+ break;
+ case XFS_RMAP_MAP_SHARED:
+ map->me_flags |= XFS_RMAP_EXTENT_MAP_SHARED;
+ break;
+ case XFS_RMAP_UNMAP:
+ map->me_flags |= XFS_RMAP_EXTENT_UNMAP;
+ break;
+ case XFS_RMAP_UNMAP_SHARED:
+ map->me_flags |= XFS_RMAP_EXTENT_UNMAP_SHARED;
+ break;
+ case XFS_RMAP_CONVERT:
+ map->me_flags |= XFS_RMAP_EXTENT_CONVERT;
+ break;
+ case XFS_RMAP_CONVERT_SHARED:
+ map->me_flags |= XFS_RMAP_EXTENT_CONVERT_SHARED;
+ break;
+ case XFS_RMAP_ALLOC:
+ map->me_flags |= XFS_RMAP_EXTENT_ALLOC;
+ break;
+ case XFS_RMAP_FREE:
+ map->me_flags |= XFS_RMAP_EXTENT_FREE;
+ break;
+ default:
+ ASSERT(0);
+ }
}
static struct xfs_log_item *
@@ -350,24 +343,29 @@ xfs_rmap_update_create_done(
return &rudp->rud_item;
}
-/* Take a passive ref to the AG containing the space we're rmapping. */
+/* Add this deferred RUI to the transaction. */
void
-xfs_rmap_update_get_group(
- struct xfs_mount *mp,
+xfs_rmap_defer_add(
+ struct xfs_trans *tp,
struct xfs_rmap_intent *ri)
{
- xfs_agnumber_t agno;
+ struct xfs_mount *mp = tp->t_mountp;
+
+ trace_xfs_rmap_defer(mp, ri);
- agno = XFS_FSB_TO_AGNO(mp, ri->ri_bmap.br_startblock);
- ri->ri_pag = xfs_perag_intent_get(mp, agno);
+ ri->ri_pag = xfs_perag_intent_get(mp, ri->ri_bmap.br_startblock);
+ xfs_defer_add(tp, &ri->ri_list, &xfs_rmap_update_defer_type);
}
-/* Release a passive AG ref after finishing rmapping work. */
-static inline void
-xfs_rmap_update_put_group(
- struct xfs_rmap_intent *ri)
+/* Cancel a deferred rmap update. */
+STATIC void
+xfs_rmap_update_cancel_item(
+ struct list_head *item)
{
+ struct xfs_rmap_intent *ri = ri_entry(item);
+
xfs_perag_intent_put(ri->ri_pag);
+ kmem_cache_free(xfs_rmap_intent_cache, ri);
}
/* Process a deferred rmap update. */
@@ -378,37 +376,38 @@ xfs_rmap_update_finish_item(
struct list_head *item,
struct xfs_btree_cur **state)
{
- struct xfs_rmap_intent *ri;
+ struct xfs_rmap_intent *ri = ri_entry(item);
int error;
- ri = container_of(item, struct xfs_rmap_intent, ri_list);
-
error = xfs_rmap_finish_one(tp, ri, state);
- xfs_rmap_update_put_group(ri);
- kmem_cache_free(xfs_rmap_intent_cache, ri);
+ xfs_rmap_update_cancel_item(item);
return error;
}
-/* Abort all pending RUIs. */
+/* Clean up after calling xfs_rmap_finish_one. */
STATIC void
-xfs_rmap_update_abort_intent(
- struct xfs_log_item *intent)
+xfs_rmap_finish_one_cleanup(
+ struct xfs_trans *tp,
+ struct xfs_btree_cur *rcur,
+ int error)
{
- xfs_rui_release(RUI_ITEM(intent));
+ struct xfs_buf *agbp = NULL;
+
+ if (rcur == NULL)
+ return;
+ agbp = rcur->bc_ag.agbp;
+ xfs_btree_del_cursor(rcur, error);
+ if (error && agbp)
+ xfs_trans_brelse(tp, agbp);
}
-/* Cancel a deferred rmap update. */
+/* Abort all pending RUIs. */
STATIC void
-xfs_rmap_update_cancel_item(
- struct list_head *item)
+xfs_rmap_update_abort_intent(
+ struct xfs_log_item *intent)
{
- struct xfs_rmap_intent *ri;
-
- ri = container_of(item, struct xfs_rmap_intent, ri_list);
-
- xfs_rmap_update_put_group(ri);
- kmem_cache_free(xfs_rmap_intent_cache, ri);
+ xfs_rui_release(RUI_ITEM(intent));
}
/* Is this recovered RUI ok? */
@@ -495,7 +494,7 @@ xfs_rui_recover_work(
ri->ri_bmap.br_blockcount = map->me_len;
ri->ri_bmap.br_state = (map->me_flags & XFS_RMAP_EXTENT_UNWRITTEN) ?
XFS_EXT_UNWRITTEN : XFS_EXT_NORM;
- xfs_rmap_update_get_group(mp, ri);
+ ri->ri_pag = xfs_perag_intent_get(mp, map->me_startblock);
xfs_defer_add_item(dfp, &ri->ri_list);
}
diff --git a/fs/xfs/xfs_rmap_item.h b/fs/xfs/xfs_rmap_item.h
index 802e5119eaca..40d331555675 100644
--- a/fs/xfs/xfs_rmap_item.h
+++ b/fs/xfs/xfs_rmap_item.h
@@ -71,4 +71,8 @@ struct xfs_rud_log_item {
extern struct kmem_cache *xfs_rui_cache;
extern struct kmem_cache *xfs_rud_cache;
+struct xfs_rmap_intent;
+
+void xfs_rmap_defer_add(struct xfs_trans *tp, struct xfs_rmap_intent *ri);
+
#endif /* __XFS_RMAP_ITEM_H__ */
diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c
index 5a7ddfed1bb8..0c3e96c621a6 100644
--- a/fs/xfs/xfs_rtalloc.c
+++ b/fs/xfs/xfs_rtalloc.c
@@ -12,6 +12,7 @@
#include "xfs_bit.h"
#include "xfs_mount.h"
#include "xfs_inode.h"
+#include "xfs_alloc.h"
#include "xfs_bmap.h"
#include "xfs_bmap_btree.h"
#include "xfs_bmap_util.h"
@@ -1382,7 +1383,7 @@ retry:
start = 0;
} else if (xfs_bmap_adjacent(ap)) {
start = xfs_rtb_to_rtx(mp, ap->blkno);
- } else if (ap->eof && ap->offset == 0) {
+ } else if (ap->datatype & XFS_ALLOC_INITIAL_USER_DATA) {
/*
* If it's an allocation to an empty file at offset 0, pick an
* extent that will space things out in the rt area.
diff --git a/fs/xfs/xfs_symlink.c b/fs/xfs/xfs_symlink.c
index 17aee806ec2e..77f19e2f66e0 100644
--- a/fs/xfs/xfs_symlink.c
+++ b/fs/xfs/xfs_symlink.c
@@ -90,19 +90,25 @@ xfs_symlink(
struct xfs_inode **ipp)
{
struct xfs_mount *mp = dp->i_mount;
+ struct xfs_icreate_args args = {
+ .idmap = idmap,
+ .pip = dp,
+ .mode = S_IFLNK | (mode & ~S_IFMT),
+ };
+ struct xfs_dir_update du = {
+ .dp = dp,
+ .name = link_name,
+ };
struct xfs_trans *tp = NULL;
- struct xfs_inode *ip = NULL;
int error = 0;
int pathlen;
bool unlock_dp_on_error = false;
xfs_filblks_t fs_blocks;
- prid_t prid;
- struct xfs_dquot *udqp = NULL;
- struct xfs_dquot *gdqp = NULL;
- struct xfs_dquot *pdqp = NULL;
+ struct xfs_dquot *udqp;
+ struct xfs_dquot *gdqp;
+ struct xfs_dquot *pdqp;
uint resblks;
xfs_ino_t ino;
- struct xfs_parent_args *ppargs;
*ipp = NULL;
@@ -119,15 +125,8 @@ xfs_symlink(
return -ENAMETOOLONG;
ASSERT(pathlen > 0);
- prid = xfs_get_initial_prid(dp);
-
- /*
- * Make sure that we have allocated dquot(s) on disk.
- */
- error = xfs_qm_vop_dqalloc(dp, mapped_fsuid(idmap, &init_user_ns),
- mapped_fsgid(idmap, &init_user_ns), prid,
- XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
- &udqp, &gdqp, &pdqp);
+ /* Make sure that we have allocated dquot(s) on disk. */
+ error = xfs_icreate_dqalloc(&args, &udqp, &gdqp, &pdqp);
if (error)
return error;
@@ -143,7 +142,7 @@ xfs_symlink(
fs_blocks = xfs_symlink_blocks(mp, pathlen);
resblks = xfs_symlink_space_res(mp, link_name->len, fs_blocks);
- error = xfs_parent_start(mp, &ppargs);
+ error = xfs_parent_start(mp, &du.ppargs);
if (error)
goto out_release_dquots;
@@ -168,9 +167,7 @@ xfs_symlink(
*/
error = xfs_dialloc(&tp, dp->i_ino, S_IFLNK, &ino);
if (!error)
- error = xfs_init_new_inode(idmap, tp, dp, ino,
- S_IFLNK | (mode & ~S_IFMT), 1, 0, prid,
- xfs_has_parent(mp), &ip);
+ error = xfs_icreate(tp, ino, &args, &du.ip);
if (error)
goto out_trans_cancel;
@@ -186,33 +183,22 @@ xfs_symlink(
/*
* Also attach the dquot(s) to it, if applicable.
*/
- xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);
+ xfs_qm_vop_create_dqattach(tp, du.ip, udqp, gdqp, pdqp);
resblks -= XFS_IALLOC_SPACE_RES(mp);
- error = xfs_symlink_write_target(tp, ip, ip->i_ino, target_path,
+ error = xfs_symlink_write_target(tp, du.ip, du.ip->i_ino, target_path,
pathlen, fs_blocks, resblks);
if (error)
goto out_trans_cancel;
resblks -= fs_blocks;
- i_size_write(VFS_I(ip), ip->i_disk_size);
+ i_size_write(VFS_I(du.ip), du.ip->i_disk_size);
/*
* Create the directory entry for the symlink.
*/
- error = xfs_dir_createname(tp, dp, link_name, ip->i_ino, resblks);
+ error = xfs_dir_create_child(tp, resblks, &du);
if (error)
goto out_trans_cancel;
- xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
- xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
-
- /* Add parent pointer for the new symlink. */
- if (ppargs) {
- error = xfs_parent_addname(tp, ppargs, dp, link_name, ip);
- if (error)
- goto out_trans_cancel;
- }
-
- xfs_dir_update_hook(dp, ip, 1, link_name);
/*
* If this is a synchronous mount, make sure that the
@@ -230,10 +216,10 @@ xfs_symlink(
xfs_qm_dqrele(gdqp);
xfs_qm_dqrele(pdqp);
- *ipp = ip;
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ *ipp = du.ip;
+ xfs_iunlock(du.ip, XFS_ILOCK_EXCL);
xfs_iunlock(dp, XFS_ILOCK_EXCL);
- xfs_parent_finish(mp, ppargs);
+ xfs_parent_finish(mp, du.ppargs);
return 0;
out_trans_cancel:
@@ -244,13 +230,13 @@ out_release_inode:
* setup of the inode and release the inode. This prevents recursive
* transactions and deadlocks from xfs_inactive.
*/
- if (ip) {
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
- xfs_finish_inode_setup(ip);
- xfs_irele(ip);
+ if (du.ip) {
+ xfs_iunlock(du.ip, XFS_ILOCK_EXCL);
+ xfs_finish_inode_setup(du.ip);
+ xfs_irele(du.ip);
}
out_parent:
- xfs_parent_finish(mp, ppargs);
+ xfs_parent_finish(mp, du.ppargs);
out_release_dquots:
xfs_qm_dqrele(udqp);
xfs_qm_dqrele(gdqp);
diff --git a/fs/xfs/xfs_sysfs.c b/fs/xfs/xfs_sysfs.c
index d2391eec37fe..60cb5318fdae 100644
--- a/fs/xfs/xfs_sysfs.c
+++ b/fs/xfs/xfs_sysfs.c
@@ -432,39 +432,30 @@ log_tail_lsn_show(
XFS_SYSFS_ATTR_RO(log_tail_lsn);
STATIC ssize_t
-reserve_grant_head_show(
+reserve_grant_head_bytes_show(
struct kobject *kobject,
char *buf)
-
{
- int cycle;
- int bytes;
- struct xlog *log = to_xlog(kobject);
-
- xlog_crack_grant_head(&log->l_reserve_head.grant, &cycle, &bytes);
- return sysfs_emit(buf, "%d:%d\n", cycle, bytes);
+ return sysfs_emit(buf, "%lld\n",
+ atomic64_read(&to_xlog(kobject)->l_reserve_head.grant));
}
-XFS_SYSFS_ATTR_RO(reserve_grant_head);
+XFS_SYSFS_ATTR_RO(reserve_grant_head_bytes);
STATIC ssize_t
-write_grant_head_show(
+write_grant_head_bytes_show(
struct kobject *kobject,
char *buf)
{
- int cycle;
- int bytes;
- struct xlog *log = to_xlog(kobject);
-
- xlog_crack_grant_head(&log->l_write_head.grant, &cycle, &bytes);
- return sysfs_emit(buf, "%d:%d\n", cycle, bytes);
+ return sysfs_emit(buf, "%lld\n",
+ atomic64_read(&to_xlog(kobject)->l_write_head.grant));
}
-XFS_SYSFS_ATTR_RO(write_grant_head);
+XFS_SYSFS_ATTR_RO(write_grant_head_bytes);
static struct attribute *xfs_log_attrs[] = {
ATTR_LIST(log_head_lsn),
ATTR_LIST(log_tail_lsn),
- ATTR_LIST(reserve_grant_head),
- ATTR_LIST(write_grant_head),
+ ATTR_LIST(reserve_grant_head_bytes),
+ ATTR_LIST(write_grant_head_bytes),
NULL,
};
ATTRIBUTE_GROUPS(xfs_log);
diff --git a/fs/xfs/xfs_trace.c b/fs/xfs/xfs_trace.c
index 9c7fbaae2717..2af9f274e872 100644
--- a/fs/xfs/xfs_trace.c
+++ b/fs/xfs/xfs_trace.c
@@ -22,6 +22,7 @@
#include "xfs_trans.h"
#include "xfs_log.h"
#include "xfs_log_priv.h"
+#include "xfs_trans_priv.h"
#include "xfs_buf_item.h"
#include "xfs_quota.h"
#include "xfs_dquot_item.h"
@@ -38,10 +39,11 @@
#include "xfs_iomap.h"
#include "xfs_buf_mem.h"
#include "xfs_btree_mem.h"
-#include "xfs_bmap.h"
#include "xfs_exchmaps.h"
#include "xfs_exchrange.h"
#include "xfs_parent.h"
+#include "xfs_rmap.h"
+#include "xfs_refcount.h"
/*
* We include this last to have the helpers above available for the trace
diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h
index 25ff6fe1eb6c..5646d300b286 100644
--- a/fs/xfs/xfs_trace.h
+++ b/fs/xfs/xfs_trace.h
@@ -90,6 +90,9 @@ struct xfs_exchrange;
struct xfs_getparents;
struct xfs_parent_irec;
struct xfs_attrlist_cursor_kern;
+struct xfs_extent_free_item;
+struct xfs_rmap_intent;
+struct xfs_refcount_intent;
#define XFS_ATTR_FILTER_FLAGS \
{ XFS_ATTR_ROOT, "ROOT" }, \
@@ -1227,6 +1230,7 @@ DECLARE_EVENT_CLASS(xfs_loggrant_class,
TP_ARGS(log, tic),
TP_STRUCT__entry(
__field(dev_t, dev)
+ __field(unsigned long, tic)
__field(char, ocnt)
__field(char, cnt)
__field(int, curr_res)
@@ -1234,16 +1238,16 @@ DECLARE_EVENT_CLASS(xfs_loggrant_class,
__field(unsigned int, flags)
__field(int, reserveq)
__field(int, writeq)
- __field(int, grant_reserve_cycle)
- __field(int, grant_reserve_bytes)
- __field(int, grant_write_cycle)
- __field(int, grant_write_bytes)
+ __field(uint64_t, grant_reserve_bytes)
+ __field(uint64_t, grant_write_bytes)
+ __field(uint64_t, tail_space)
__field(int, curr_cycle)
__field(int, curr_block)
__field(xfs_lsn_t, tail_lsn)
),
TP_fast_assign(
__entry->dev = log->l_mp->m_super->s_dev;
+ __entry->tic = (unsigned long)tic;
__entry->ocnt = tic->t_ocnt;
__entry->cnt = tic->t_cnt;
__entry->curr_res = tic->t_curr_res;
@@ -1251,23 +1255,22 @@ DECLARE_EVENT_CLASS(xfs_loggrant_class,
__entry->flags = tic->t_flags;
__entry->reserveq = list_empty(&log->l_reserve_head.waiters);
__entry->writeq = list_empty(&log->l_write_head.waiters);
- xlog_crack_grant_head(&log->l_reserve_head.grant,
- &__entry->grant_reserve_cycle,
- &__entry->grant_reserve_bytes);
- xlog_crack_grant_head(&log->l_write_head.grant,
- &__entry->grant_write_cycle,
- &__entry->grant_write_bytes);
+ __entry->tail_space = READ_ONCE(log->l_tail_space);
+ __entry->grant_reserve_bytes = __entry->tail_space +
+ atomic64_read(&log->l_reserve_head.grant);
+ __entry->grant_write_bytes = __entry->tail_space +
+ atomic64_read(&log->l_write_head.grant);
__entry->curr_cycle = log->l_curr_cycle;
__entry->curr_block = log->l_curr_block;
__entry->tail_lsn = atomic64_read(&log->l_tail_lsn);
),
- TP_printk("dev %d:%d t_ocnt %u t_cnt %u t_curr_res %u "
- "t_unit_res %u t_flags %s reserveq %s "
- "writeq %s grant_reserve_cycle %d "
- "grant_reserve_bytes %d grant_write_cycle %d "
- "grant_write_bytes %d curr_cycle %d curr_block %d "
+ TP_printk("dev %d:%d tic 0x%lx t_ocnt %u t_cnt %u t_curr_res %u "
+ "t_unit_res %u t_flags %s reserveq %s writeq %s "
+ "tail space %llu grant_reserve_bytes %llu "
+ "grant_write_bytes %llu curr_cycle %d curr_block %d "
"tail_cycle %d tail_block %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->tic,
__entry->ocnt,
__entry->cnt,
__entry->curr_res,
@@ -1275,9 +1278,8 @@ DECLARE_EVENT_CLASS(xfs_loggrant_class,
__print_flags(__entry->flags, "|", XLOG_TIC_FLAGS),
__entry->reserveq ? "empty" : "active",
__entry->writeq ? "empty" : "active",
- __entry->grant_reserve_cycle,
+ __entry->tail_space,
__entry->grant_reserve_bytes,
- __entry->grant_write_cycle,
__entry->grant_write_bytes,
__entry->curr_cycle,
__entry->curr_block,
@@ -1305,6 +1307,7 @@ DEFINE_LOGGRANT_EVENT(xfs_log_ticket_ungrant);
DEFINE_LOGGRANT_EVENT(xfs_log_ticket_ungrant_sub);
DEFINE_LOGGRANT_EVENT(xfs_log_ticket_ungrant_exit);
DEFINE_LOGGRANT_EVENT(xfs_log_cil_wait);
+DEFINE_LOGGRANT_EVENT(xfs_log_cil_return);
DECLARE_EVENT_CLASS(xfs_log_item_class,
TP_PROTO(struct xfs_log_item *lip),
@@ -1404,19 +1407,19 @@ TRACE_EVENT(xfs_log_assign_tail_lsn,
__field(dev_t, dev)
__field(xfs_lsn_t, new_lsn)
__field(xfs_lsn_t, old_lsn)
- __field(xfs_lsn_t, last_sync_lsn)
+ __field(xfs_lsn_t, head_lsn)
),
TP_fast_assign(
__entry->dev = log->l_mp->m_super->s_dev;
__entry->new_lsn = new_lsn;
__entry->old_lsn = atomic64_read(&log->l_tail_lsn);
- __entry->last_sync_lsn = atomic64_read(&log->l_last_sync_lsn);
+ __entry->head_lsn = log->l_ailp->ail_head_lsn;
),
- TP_printk("dev %d:%d new tail lsn %d/%d, old lsn %d/%d, last sync %d/%d",
+ TP_printk("dev %d:%d new tail lsn %d/%d, old lsn %d/%d, head lsn %d/%d",
MAJOR(__entry->dev), MINOR(__entry->dev),
CYCLE_LSN(__entry->new_lsn), BLOCK_LSN(__entry->new_lsn),
CYCLE_LSN(__entry->old_lsn), BLOCK_LSN(__entry->old_lsn),
- CYCLE_LSN(__entry->last_sync_lsn), BLOCK_LSN(__entry->last_sync_lsn))
+ CYCLE_LSN(__entry->head_lsn), BLOCK_LSN(__entry->head_lsn))
)
DECLARE_EVENT_CLASS(xfs_file_class,
@@ -2460,6 +2463,35 @@ DEFINE_DISCARD_EVENT(xfs_discard_toosmall);
DEFINE_DISCARD_EVENT(xfs_discard_exclude);
DEFINE_DISCARD_EVENT(xfs_discard_busy);
+DECLARE_EVENT_CLASS(xfs_rtdiscard_class,
+ TP_PROTO(struct xfs_mount *mp,
+ xfs_rtblock_t rtbno, xfs_rtblock_t len),
+ TP_ARGS(mp, rtbno, len),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_rtblock_t, rtbno)
+ __field(xfs_rtblock_t, len)
+ ),
+ TP_fast_assign(
+ __entry->dev = mp->m_rtdev_targp->bt_dev;
+ __entry->rtbno = rtbno;
+ __entry->len = len;
+ ),
+ TP_printk("dev %d:%d rtbno 0x%llx rtbcount 0x%llx",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->rtbno,
+ __entry->len)
+)
+
+#define DEFINE_RTDISCARD_EVENT(name) \
+DEFINE_EVENT(xfs_rtdiscard_class, name, \
+ TP_PROTO(struct xfs_mount *mp, \
+ xfs_rtblock_t rtbno, xfs_rtblock_t len), \
+ TP_ARGS(mp, rtbno, len))
+DEFINE_RTDISCARD_EVENT(xfs_discard_rtextent);
+DEFINE_RTDISCARD_EVENT(xfs_discard_rttoosmall);
+DEFINE_RTDISCARD_EVENT(xfs_discard_rtrelax);
+
DECLARE_EVENT_CLASS(xfs_btree_cur_class,
TP_PROTO(struct xfs_btree_cur *cur, int level, struct xfs_buf *bp),
TP_ARGS(cur, level, bp),
@@ -2681,41 +2713,37 @@ DEFINE_DEFER_PENDING_EVENT(xfs_defer_item_pause);
DEFINE_DEFER_PENDING_EVENT(xfs_defer_item_unpause);
DECLARE_EVENT_CLASS(xfs_free_extent_deferred_class,
- TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
- int type, xfs_agblock_t agbno, xfs_extlen_t len),
- TP_ARGS(mp, agno, type, agbno, len),
+ TP_PROTO(struct xfs_mount *mp, struct xfs_extent_free_item *free),
+ TP_ARGS(mp, free),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(xfs_agnumber_t, agno)
- __field(int, type)
__field(xfs_agblock_t, agbno)
__field(xfs_extlen_t, len)
+ __field(unsigned int, flags)
),
TP_fast_assign(
__entry->dev = mp->m_super->s_dev;
- __entry->agno = agno;
- __entry->type = type;
- __entry->agbno = agbno;
- __entry->len = len;
+ __entry->agno = XFS_FSB_TO_AGNO(mp, free->xefi_startblock);
+ __entry->agbno = XFS_FSB_TO_AGBNO(mp, free->xefi_startblock);
+ __entry->len = free->xefi_blockcount;
+ __entry->flags = free->xefi_flags;
),
- TP_printk("dev %d:%d op %d agno 0x%x agbno 0x%x fsbcount 0x%x",
+ TP_printk("dev %d:%d agno 0x%x agbno 0x%x fsbcount 0x%x flags 0x%x",
MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->type,
__entry->agno,
__entry->agbno,
- __entry->len)
+ __entry->len,
+ __entry->flags)
);
#define DEFINE_FREE_EXTENT_DEFERRED_EVENT(name) \
DEFINE_EVENT(xfs_free_extent_deferred_class, name, \
- TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, \
- int type, \
- xfs_agblock_t bno, \
- xfs_extlen_t len), \
- TP_ARGS(mp, agno, type, bno, len))
-DEFINE_FREE_EXTENT_DEFERRED_EVENT(xfs_bmap_free_defer);
-DEFINE_FREE_EXTENT_DEFERRED_EVENT(xfs_bmap_free_deferred);
+ TP_PROTO(struct xfs_mount *mp, struct xfs_extent_free_item *free), \
+ TP_ARGS(mp, free))
DEFINE_FREE_EXTENT_DEFERRED_EVENT(xfs_agfl_free_defer);
DEFINE_FREE_EXTENT_DEFERRED_EVENT(xfs_agfl_free_deferred);
+DEFINE_FREE_EXTENT_DEFERRED_EVENT(xfs_extent_free_defer);
+DEFINE_FREE_EXTENT_DEFERRED_EVENT(xfs_extent_free_deferred);
DECLARE_EVENT_CLASS(xfs_defer_pending_item_class,
TP_PROTO(struct xfs_mount *mp, struct xfs_defer_pending *dfp,
@@ -2760,10 +2788,10 @@ DEFINE_DEFER_PENDING_ITEM_EVENT(xfs_defer_finish_item);
/* rmap tracepoints */
DECLARE_EVENT_CLASS(xfs_rmap_class,
- TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
+ TP_PROTO(struct xfs_btree_cur *cur,
xfs_agblock_t agbno, xfs_extlen_t len, bool unwritten,
const struct xfs_owner_info *oinfo),
- TP_ARGS(mp, agno, agbno, len, unwritten, oinfo),
+ TP_ARGS(cur, agbno, len, unwritten, oinfo),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(xfs_agnumber_t, agno)
@@ -2774,8 +2802,8 @@ DECLARE_EVENT_CLASS(xfs_rmap_class,
__field(unsigned long, flags)
),
TP_fast_assign(
- __entry->dev = mp->m_super->s_dev;
- __entry->agno = agno;
+ __entry->dev = cur->bc_mp->m_super->s_dev;
+ __entry->agno = cur->bc_ag.pag->pag_agno;
__entry->agbno = agbno;
__entry->len = len;
__entry->owner = oinfo->oi_owner;
@@ -2795,57 +2823,109 @@ DECLARE_EVENT_CLASS(xfs_rmap_class,
);
#define DEFINE_RMAP_EVENT(name) \
DEFINE_EVENT(xfs_rmap_class, name, \
- TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, \
+ TP_PROTO(struct xfs_btree_cur *cur, \
xfs_agblock_t agbno, xfs_extlen_t len, bool unwritten, \
const struct xfs_owner_info *oinfo), \
- TP_ARGS(mp, agno, agbno, len, unwritten, oinfo))
+ TP_ARGS(cur, agbno, len, unwritten, oinfo))
-/* simple AG-based error/%ip tracepoint class */
-DECLARE_EVENT_CLASS(xfs_ag_error_class,
- TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, int error,
+/* btree cursor error/%ip tracepoint class */
+DECLARE_EVENT_CLASS(xfs_btree_error_class,
+ TP_PROTO(struct xfs_btree_cur *cur, int error,
unsigned long caller_ip),
- TP_ARGS(mp, agno, error, caller_ip),
+ TP_ARGS(cur, error, caller_ip),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(xfs_agnumber_t, agno)
+ __field(xfs_ino_t, ino)
__field(int, error)
__field(unsigned long, caller_ip)
),
TP_fast_assign(
- __entry->dev = mp->m_super->s_dev;
- __entry->agno = agno;
+ __entry->dev = cur->bc_mp->m_super->s_dev;
+ switch (cur->bc_ops->type) {
+ case XFS_BTREE_TYPE_INODE:
+ __entry->agno = 0;
+ __entry->ino = cur->bc_ino.ip->i_ino;
+ break;
+ case XFS_BTREE_TYPE_AG:
+ __entry->agno = cur->bc_ag.pag->pag_agno;
+ __entry->ino = 0;
+ break;
+ case XFS_BTREE_TYPE_MEM:
+ __entry->agno = 0;
+ __entry->ino = 0;
+ break;
+ }
__entry->error = error;
__entry->caller_ip = caller_ip;
),
- TP_printk("dev %d:%d agno 0x%x error %d caller %pS",
+ TP_printk("dev %d:%d agno 0x%x ino 0x%llx error %d caller %pS",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->agno,
+ __entry->ino,
__entry->error,
(char *)__entry->caller_ip)
);
-#define DEFINE_AG_ERROR_EVENT(name) \
-DEFINE_EVENT(xfs_ag_error_class, name, \
- TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, int error, \
+#define DEFINE_BTREE_ERROR_EVENT(name) \
+DEFINE_EVENT(xfs_btree_error_class, name, \
+ TP_PROTO(struct xfs_btree_cur *cur, int error, \
unsigned long caller_ip), \
- TP_ARGS(mp, agno, error, caller_ip))
+ TP_ARGS(cur, error, caller_ip))
DEFINE_RMAP_EVENT(xfs_rmap_unmap);
DEFINE_RMAP_EVENT(xfs_rmap_unmap_done);
-DEFINE_AG_ERROR_EVENT(xfs_rmap_unmap_error);
+DEFINE_BTREE_ERROR_EVENT(xfs_rmap_unmap_error);
DEFINE_RMAP_EVENT(xfs_rmap_map);
DEFINE_RMAP_EVENT(xfs_rmap_map_done);
-DEFINE_AG_ERROR_EVENT(xfs_rmap_map_error);
+DEFINE_BTREE_ERROR_EVENT(xfs_rmap_map_error);
DEFINE_RMAP_EVENT(xfs_rmap_convert);
DEFINE_RMAP_EVENT(xfs_rmap_convert_done);
-DEFINE_AG_ERROR_EVENT(xfs_rmap_convert_error);
-DEFINE_AG_ERROR_EVENT(xfs_rmap_convert_state);
+DEFINE_BTREE_ERROR_EVENT(xfs_rmap_convert_error);
+
+TRACE_EVENT(xfs_rmap_convert_state,
+ TP_PROTO(struct xfs_btree_cur *cur, int state,
+ unsigned long caller_ip),
+ TP_ARGS(cur, state, caller_ip),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_agnumber_t, agno)
+ __field(xfs_ino_t, ino)
+ __field(int, state)
+ __field(unsigned long, caller_ip)
+ ),
+ TP_fast_assign(
+ __entry->dev = cur->bc_mp->m_super->s_dev;
+ switch (cur->bc_ops->type) {
+ case XFS_BTREE_TYPE_INODE:
+ __entry->agno = 0;
+ __entry->ino = cur->bc_ino.ip->i_ino;
+ break;
+ case XFS_BTREE_TYPE_AG:
+ __entry->agno = cur->bc_ag.pag->pag_agno;
+ __entry->ino = 0;
+ break;
+ case XFS_BTREE_TYPE_MEM:
+ __entry->agno = 0;
+ __entry->ino = 0;
+ break;
+ }
+ __entry->state = state;
+ __entry->caller_ip = caller_ip;
+ ),
+ TP_printk("dev %d:%d agno 0x%x ino 0x%llx state %d caller %pS",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->agno,
+ __entry->ino,
+ __entry->state,
+ (char *)__entry->caller_ip)
+);
DECLARE_EVENT_CLASS(xfs_rmapbt_class,
- TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
+ TP_PROTO(struct xfs_btree_cur *cur,
xfs_agblock_t agbno, xfs_extlen_t len,
uint64_t owner, uint64_t offset, unsigned int flags),
- TP_ARGS(mp, agno, agbno, len, owner, offset, flags),
+ TP_ARGS(cur, agbno, len, owner, offset, flags),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(xfs_agnumber_t, agno)
@@ -2856,8 +2936,8 @@ DECLARE_EVENT_CLASS(xfs_rmapbt_class,
__field(unsigned int, flags)
),
TP_fast_assign(
- __entry->dev = mp->m_super->s_dev;
- __entry->agno = agno;
+ __entry->dev = cur->bc_mp->m_super->s_dev;
+ __entry->agno = cur->bc_ag.pag->pag_agno;
__entry->agbno = agbno;
__entry->len = len;
__entry->owner = owner;
@@ -2875,25 +2955,27 @@ DECLARE_EVENT_CLASS(xfs_rmapbt_class,
);
#define DEFINE_RMAPBT_EVENT(name) \
DEFINE_EVENT(xfs_rmapbt_class, name, \
- TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, \
+ TP_PROTO(struct xfs_btree_cur *cur, \
xfs_agblock_t agbno, xfs_extlen_t len, \
uint64_t owner, uint64_t offset, unsigned int flags), \
- TP_ARGS(mp, agno, agbno, len, owner, offset, flags))
+ TP_ARGS(cur, agbno, len, owner, offset, flags))
+
+TRACE_DEFINE_ENUM(XFS_RMAP_MAP);
+TRACE_DEFINE_ENUM(XFS_RMAP_MAP_SHARED);
+TRACE_DEFINE_ENUM(XFS_RMAP_UNMAP);
+TRACE_DEFINE_ENUM(XFS_RMAP_UNMAP_SHARED);
+TRACE_DEFINE_ENUM(XFS_RMAP_CONVERT);
+TRACE_DEFINE_ENUM(XFS_RMAP_CONVERT_SHARED);
+TRACE_DEFINE_ENUM(XFS_RMAP_ALLOC);
+TRACE_DEFINE_ENUM(XFS_RMAP_FREE);
DECLARE_EVENT_CLASS(xfs_rmap_deferred_class,
- TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
- int op,
- xfs_agblock_t agbno,
- xfs_ino_t ino,
- int whichfork,
- xfs_fileoff_t offset,
- xfs_filblks_t len,
- xfs_exntst_t state),
- TP_ARGS(mp, agno, op, agbno, ino, whichfork, offset, len, state),
+ TP_PROTO(struct xfs_mount *mp, struct xfs_rmap_intent *ri),
+ TP_ARGS(mp, ri),
TP_STRUCT__entry(
__field(dev_t, dev)
+ __field(unsigned long long, owner)
__field(xfs_agnumber_t, agno)
- __field(xfs_ino_t, ino)
__field(xfs_agblock_t, agbno)
__field(int, whichfork)
__field(xfs_fileoff_t, l_loff)
@@ -2903,21 +2985,22 @@ DECLARE_EVENT_CLASS(xfs_rmap_deferred_class,
),
TP_fast_assign(
__entry->dev = mp->m_super->s_dev;
- __entry->agno = agno;
- __entry->ino = ino;
- __entry->agbno = agbno;
- __entry->whichfork = whichfork;
- __entry->l_loff = offset;
- __entry->l_len = len;
- __entry->l_state = state;
- __entry->op = op;
- ),
- TP_printk("dev %d:%d op %d agno 0x%x agbno 0x%x owner 0x%llx %s fileoff 0x%llx fsbcount 0x%llx state %d",
+ __entry->agno = XFS_FSB_TO_AGNO(mp, ri->ri_bmap.br_startblock);
+ __entry->agbno = XFS_FSB_TO_AGBNO(mp,
+ ri->ri_bmap.br_startblock);
+ __entry->owner = ri->ri_owner;
+ __entry->whichfork = ri->ri_whichfork;
+ __entry->l_loff = ri->ri_bmap.br_startoff;
+ __entry->l_len = ri->ri_bmap.br_blockcount;
+ __entry->l_state = ri->ri_bmap.br_state;
+ __entry->op = ri->ri_type;
+ ),
+ TP_printk("dev %d:%d op %s agno 0x%x agbno 0x%x owner 0x%llx %s fileoff 0x%llx fsbcount 0x%llx state %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->op,
+ __print_symbolic(__entry->op, XFS_RMAP_INTENT_STRINGS),
__entry->agno,
__entry->agbno,
- __entry->ino,
+ __entry->owner,
__print_symbolic(__entry->whichfork, XFS_WHICHFORK_STRINGS),
__entry->l_loff,
__entry->l_len,
@@ -2925,24 +3008,17 @@ DECLARE_EVENT_CLASS(xfs_rmap_deferred_class,
);
#define DEFINE_RMAP_DEFERRED_EVENT(name) \
DEFINE_EVENT(xfs_rmap_deferred_class, name, \
- TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, \
- int op, \
- xfs_agblock_t agbno, \
- xfs_ino_t ino, \
- int whichfork, \
- xfs_fileoff_t offset, \
- xfs_filblks_t len, \
- xfs_exntst_t state), \
- TP_ARGS(mp, agno, op, agbno, ino, whichfork, offset, len, state))
+ TP_PROTO(struct xfs_mount *mp, struct xfs_rmap_intent *ri), \
+ TP_ARGS(mp, ri))
DEFINE_RMAP_DEFERRED_EVENT(xfs_rmap_defer);
DEFINE_RMAP_DEFERRED_EVENT(xfs_rmap_deferred);
DEFINE_RMAPBT_EVENT(xfs_rmap_update);
DEFINE_RMAPBT_EVENT(xfs_rmap_insert);
DEFINE_RMAPBT_EVENT(xfs_rmap_delete);
-DEFINE_AG_ERROR_EVENT(xfs_rmap_insert_error);
-DEFINE_AG_ERROR_EVENT(xfs_rmap_delete_error);
-DEFINE_AG_ERROR_EVENT(xfs_rmap_update_error);
+DEFINE_BTREE_ERROR_EVENT(xfs_rmap_insert_error);
+DEFINE_BTREE_ERROR_EVENT(xfs_rmap_delete_error);
+DEFINE_BTREE_ERROR_EVENT(xfs_rmap_update_error);
DEFINE_RMAPBT_EVENT(xfs_rmap_find_left_neighbor_candidate);
DEFINE_RMAPBT_EVENT(xfs_rmap_find_left_neighbor_query);
@@ -3068,21 +3144,74 @@ DEFINE_AG_RESV_EVENT(xfs_ag_resv_free_extent);
DEFINE_AG_RESV_EVENT(xfs_ag_resv_critical);
DEFINE_AG_RESV_EVENT(xfs_ag_resv_needed);
+/* simple AG-based error/%ip tracepoint class */
+DECLARE_EVENT_CLASS(xfs_ag_error_class,
+ TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, int error,
+ unsigned long caller_ip),
+ TP_ARGS(mp, agno, error, caller_ip),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_agnumber_t, agno)
+ __field(int, error)
+ __field(unsigned long, caller_ip)
+ ),
+ TP_fast_assign(
+ __entry->dev = mp->m_super->s_dev;
+ __entry->agno = agno;
+ __entry->error = error;
+ __entry->caller_ip = caller_ip;
+ ),
+ TP_printk("dev %d:%d agno 0x%x error %d caller %pS",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->agno,
+ __entry->error,
+ (char *)__entry->caller_ip)
+);
+
+#define DEFINE_AG_ERROR_EVENT(name) \
+DEFINE_EVENT(xfs_ag_error_class, name, \
+ TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, int error, \
+ unsigned long caller_ip), \
+ TP_ARGS(mp, agno, error, caller_ip))
DEFINE_AG_ERROR_EVENT(xfs_ag_resv_init_error);
/* refcount tracepoint classes */
-/* reuse the discard trace class for agbno/aglen-based traces */
-#define DEFINE_AG_EXTENT_EVENT(name) DEFINE_DISCARD_EVENT(name)
+DECLARE_EVENT_CLASS(xfs_refcount_class,
+ TP_PROTO(struct xfs_btree_cur *cur, xfs_agblock_t agbno,
+ xfs_extlen_t len),
+ TP_ARGS(cur, agbno, len),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_agnumber_t, agno)
+ __field(xfs_agblock_t, agbno)
+ __field(xfs_extlen_t, len)
+ ),
+ TP_fast_assign(
+ __entry->dev = cur->bc_mp->m_super->s_dev;
+ __entry->agno = cur->bc_ag.pag->pag_agno;
+ __entry->agbno = agbno;
+ __entry->len = len;
+ ),
+ TP_printk("dev %d:%d agno 0x%x agbno 0x%x fsbcount 0x%x",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->agno,
+ __entry->agbno,
+ __entry->len)
+);
+#define DEFINE_REFCOUNT_EVENT(name) \
+DEFINE_EVENT(xfs_refcount_class, name, \
+ TP_PROTO(struct xfs_btree_cur *cur, xfs_agblock_t agbno, \
+ xfs_extlen_t len), \
+ TP_ARGS(cur, agbno, len))
-/* ag btree lookup tracepoint class */
TRACE_DEFINE_ENUM(XFS_LOOKUP_EQi);
TRACE_DEFINE_ENUM(XFS_LOOKUP_LEi);
TRACE_DEFINE_ENUM(XFS_LOOKUP_GEi);
-DECLARE_EVENT_CLASS(xfs_ag_btree_lookup_class,
- TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
- xfs_agblock_t agbno, xfs_lookup_t dir),
- TP_ARGS(mp, agno, agbno, dir),
+TRACE_EVENT(xfs_refcount_lookup,
+ TP_PROTO(struct xfs_btree_cur *cur, xfs_agblock_t agbno,
+ xfs_lookup_t dir),
+ TP_ARGS(cur, agbno, dir),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(xfs_agnumber_t, agno)
@@ -3090,8 +3219,8 @@ DECLARE_EVENT_CLASS(xfs_ag_btree_lookup_class,
__field(xfs_lookup_t, dir)
),
TP_fast_assign(
- __entry->dev = mp->m_super->s_dev;
- __entry->agno = agno;
+ __entry->dev = cur->bc_mp->m_super->s_dev;
+ __entry->agno = cur->bc_ag.pag->pag_agno;
__entry->agbno = agbno;
__entry->dir = dir;
),
@@ -3103,17 +3232,10 @@ DECLARE_EVENT_CLASS(xfs_ag_btree_lookup_class,
__entry->dir)
)
-#define DEFINE_AG_BTREE_LOOKUP_EVENT(name) \
-DEFINE_EVENT(xfs_ag_btree_lookup_class, name, \
- TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, \
- xfs_agblock_t agbno, xfs_lookup_t dir), \
- TP_ARGS(mp, agno, agbno, dir))
-
/* single-rcext tracepoint class */
DECLARE_EVENT_CLASS(xfs_refcount_extent_class,
- TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
- struct xfs_refcount_irec *irec),
- TP_ARGS(mp, agno, irec),
+ TP_PROTO(struct xfs_btree_cur *cur, struct xfs_refcount_irec *irec),
+ TP_ARGS(cur, irec),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(xfs_agnumber_t, agno)
@@ -3123,8 +3245,8 @@ DECLARE_EVENT_CLASS(xfs_refcount_extent_class,
__field(xfs_nlink_t, refcount)
),
TP_fast_assign(
- __entry->dev = mp->m_super->s_dev;
- __entry->agno = agno;
+ __entry->dev = cur->bc_mp->m_super->s_dev;
+ __entry->agno = cur->bc_ag.pag->pag_agno;
__entry->domain = irec->rc_domain;
__entry->startblock = irec->rc_startblock;
__entry->blockcount = irec->rc_blockcount;
@@ -3141,15 +3263,14 @@ DECLARE_EVENT_CLASS(xfs_refcount_extent_class,
#define DEFINE_REFCOUNT_EXTENT_EVENT(name) \
DEFINE_EVENT(xfs_refcount_extent_class, name, \
- TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, \
- struct xfs_refcount_irec *irec), \
- TP_ARGS(mp, agno, irec))
+ TP_PROTO(struct xfs_btree_cur *cur, struct xfs_refcount_irec *irec), \
+ TP_ARGS(cur, irec))
/* single-rcext and an agbno tracepoint class */
DECLARE_EVENT_CLASS(xfs_refcount_extent_at_class,
- TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
- struct xfs_refcount_irec *irec, xfs_agblock_t agbno),
- TP_ARGS(mp, agno, irec, agbno),
+ TP_PROTO(struct xfs_btree_cur *cur, struct xfs_refcount_irec *irec,
+ xfs_agblock_t agbno),
+ TP_ARGS(cur, irec, agbno),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(xfs_agnumber_t, agno)
@@ -3160,8 +3281,8 @@ DECLARE_EVENT_CLASS(xfs_refcount_extent_at_class,
__field(xfs_agblock_t, agbno)
),
TP_fast_assign(
- __entry->dev = mp->m_super->s_dev;
- __entry->agno = agno;
+ __entry->dev = cur->bc_mp->m_super->s_dev;
+ __entry->agno = cur->bc_ag.pag->pag_agno;
__entry->domain = irec->rc_domain;
__entry->startblock = irec->rc_startblock;
__entry->blockcount = irec->rc_blockcount;
@@ -3180,15 +3301,15 @@ DECLARE_EVENT_CLASS(xfs_refcount_extent_at_class,
#define DEFINE_REFCOUNT_EXTENT_AT_EVENT(name) \
DEFINE_EVENT(xfs_refcount_extent_at_class, name, \
- TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, \
- struct xfs_refcount_irec *irec, xfs_agblock_t agbno), \
- TP_ARGS(mp, agno, irec, agbno))
+ TP_PROTO(struct xfs_btree_cur *cur, struct xfs_refcount_irec *irec, \
+ xfs_agblock_t agbno), \
+ TP_ARGS(cur, irec, agbno))
/* double-rcext tracepoint class */
DECLARE_EVENT_CLASS(xfs_refcount_double_extent_class,
- TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
- struct xfs_refcount_irec *i1, struct xfs_refcount_irec *i2),
- TP_ARGS(mp, agno, i1, i2),
+ TP_PROTO(struct xfs_btree_cur *cur, struct xfs_refcount_irec *i1,
+ struct xfs_refcount_irec *i2),
+ TP_ARGS(cur, i1, i2),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(xfs_agnumber_t, agno)
@@ -3202,8 +3323,8 @@ DECLARE_EVENT_CLASS(xfs_refcount_double_extent_class,
__field(xfs_nlink_t, i2_refcount)
),
TP_fast_assign(
- __entry->dev = mp->m_super->s_dev;
- __entry->agno = agno;
+ __entry->dev = cur->bc_mp->m_super->s_dev;
+ __entry->agno = cur->bc_ag.pag->pag_agno;
__entry->i1_domain = i1->rc_domain;
__entry->i1_startblock = i1->rc_startblock;
__entry->i1_blockcount = i1->rc_blockcount;
@@ -3229,16 +3350,15 @@ DECLARE_EVENT_CLASS(xfs_refcount_double_extent_class,
#define DEFINE_REFCOUNT_DOUBLE_EXTENT_EVENT(name) \
DEFINE_EVENT(xfs_refcount_double_extent_class, name, \
- TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, \
- struct xfs_refcount_irec *i1, struct xfs_refcount_irec *i2), \
- TP_ARGS(mp, agno, i1, i2))
+ TP_PROTO(struct xfs_btree_cur *cur, struct xfs_refcount_irec *i1, \
+ struct xfs_refcount_irec *i2), \
+ TP_ARGS(cur, i1, i2))
/* double-rcext and an agbno tracepoint class */
DECLARE_EVENT_CLASS(xfs_refcount_double_extent_at_class,
- TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
- struct xfs_refcount_irec *i1, struct xfs_refcount_irec *i2,
- xfs_agblock_t agbno),
- TP_ARGS(mp, agno, i1, i2, agbno),
+ TP_PROTO(struct xfs_btree_cur *cur, struct xfs_refcount_irec *i1,
+ struct xfs_refcount_irec *i2, xfs_agblock_t agbno),
+ TP_ARGS(cur, i1, i2, agbno),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(xfs_agnumber_t, agno)
@@ -3253,8 +3373,8 @@ DECLARE_EVENT_CLASS(xfs_refcount_double_extent_at_class,
__field(xfs_agblock_t, agbno)
),
TP_fast_assign(
- __entry->dev = mp->m_super->s_dev;
- __entry->agno = agno;
+ __entry->dev = cur->bc_mp->m_super->s_dev;
+ __entry->agno = cur->bc_ag.pag->pag_agno;
__entry->i1_domain = i1->rc_domain;
__entry->i1_startblock = i1->rc_startblock;
__entry->i1_blockcount = i1->rc_blockcount;
@@ -3282,17 +3402,15 @@ DECLARE_EVENT_CLASS(xfs_refcount_double_extent_at_class,
#define DEFINE_REFCOUNT_DOUBLE_EXTENT_AT_EVENT(name) \
DEFINE_EVENT(xfs_refcount_double_extent_at_class, name, \
- TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, \
- struct xfs_refcount_irec *i1, struct xfs_refcount_irec *i2, \
- xfs_agblock_t agbno), \
- TP_ARGS(mp, agno, i1, i2, agbno))
+ TP_PROTO(struct xfs_btree_cur *cur, struct xfs_refcount_irec *i1, \
+ struct xfs_refcount_irec *i2, xfs_agblock_t agbno), \
+ TP_ARGS(cur, i1, i2, agbno))
/* triple-rcext tracepoint class */
DECLARE_EVENT_CLASS(xfs_refcount_triple_extent_class,
- TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
- struct xfs_refcount_irec *i1, struct xfs_refcount_irec *i2,
- struct xfs_refcount_irec *i3),
- TP_ARGS(mp, agno, i1, i2, i3),
+ TP_PROTO(struct xfs_btree_cur *cur, struct xfs_refcount_irec *i1,
+ struct xfs_refcount_irec *i2, struct xfs_refcount_irec *i3),
+ TP_ARGS(cur, i1, i2, i3),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(xfs_agnumber_t, agno)
@@ -3310,8 +3428,8 @@ DECLARE_EVENT_CLASS(xfs_refcount_triple_extent_class,
__field(xfs_nlink_t, i3_refcount)
),
TP_fast_assign(
- __entry->dev = mp->m_super->s_dev;
- __entry->agno = agno;
+ __entry->dev = cur->bc_mp->m_super->s_dev;
+ __entry->agno = cur->bc_ag.pag->pag_agno;
__entry->i1_domain = i1->rc_domain;
__entry->i1_startblock = i1->rc_startblock;
__entry->i1_blockcount = i1->rc_blockcount;
@@ -3346,109 +3464,82 @@ DECLARE_EVENT_CLASS(xfs_refcount_triple_extent_class,
#define DEFINE_REFCOUNT_TRIPLE_EXTENT_EVENT(name) \
DEFINE_EVENT(xfs_refcount_triple_extent_class, name, \
- TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, \
- struct xfs_refcount_irec *i1, struct xfs_refcount_irec *i2, \
- struct xfs_refcount_irec *i3), \
- TP_ARGS(mp, agno, i1, i2, i3))
+ TP_PROTO(struct xfs_btree_cur *cur, struct xfs_refcount_irec *i1, \
+ struct xfs_refcount_irec *i2, struct xfs_refcount_irec *i3), \
+ TP_ARGS(cur, i1, i2, i3))
/* refcount btree tracepoints */
-DEFINE_AG_BTREE_LOOKUP_EVENT(xfs_refcount_lookup);
DEFINE_REFCOUNT_EXTENT_EVENT(xfs_refcount_get);
DEFINE_REFCOUNT_EXTENT_EVENT(xfs_refcount_update);
DEFINE_REFCOUNT_EXTENT_EVENT(xfs_refcount_insert);
DEFINE_REFCOUNT_EXTENT_EVENT(xfs_refcount_delete);
-DEFINE_AG_ERROR_EVENT(xfs_refcount_insert_error);
-DEFINE_AG_ERROR_EVENT(xfs_refcount_delete_error);
-DEFINE_AG_ERROR_EVENT(xfs_refcount_update_error);
+DEFINE_BTREE_ERROR_EVENT(xfs_refcount_insert_error);
+DEFINE_BTREE_ERROR_EVENT(xfs_refcount_delete_error);
+DEFINE_BTREE_ERROR_EVENT(xfs_refcount_update_error);
/* refcount adjustment tracepoints */
-DEFINE_AG_EXTENT_EVENT(xfs_refcount_increase);
-DEFINE_AG_EXTENT_EVENT(xfs_refcount_decrease);
-DEFINE_AG_EXTENT_EVENT(xfs_refcount_cow_increase);
-DEFINE_AG_EXTENT_EVENT(xfs_refcount_cow_decrease);
+DEFINE_REFCOUNT_EVENT(xfs_refcount_increase);
+DEFINE_REFCOUNT_EVENT(xfs_refcount_decrease);
+DEFINE_REFCOUNT_EVENT(xfs_refcount_cow_increase);
+DEFINE_REFCOUNT_EVENT(xfs_refcount_cow_decrease);
DEFINE_REFCOUNT_TRIPLE_EXTENT_EVENT(xfs_refcount_merge_center_extents);
DEFINE_REFCOUNT_EXTENT_EVENT(xfs_refcount_modify_extent);
-DEFINE_REFCOUNT_EXTENT_EVENT(xfs_refcount_recover_extent);
DEFINE_REFCOUNT_EXTENT_AT_EVENT(xfs_refcount_split_extent);
DEFINE_REFCOUNT_DOUBLE_EXTENT_EVENT(xfs_refcount_merge_left_extent);
DEFINE_REFCOUNT_DOUBLE_EXTENT_EVENT(xfs_refcount_merge_right_extent);
DEFINE_REFCOUNT_DOUBLE_EXTENT_AT_EVENT(xfs_refcount_find_left_extent);
DEFINE_REFCOUNT_DOUBLE_EXTENT_AT_EVENT(xfs_refcount_find_right_extent);
-DEFINE_AG_ERROR_EVENT(xfs_refcount_adjust_error);
-DEFINE_AG_ERROR_EVENT(xfs_refcount_adjust_cow_error);
-DEFINE_AG_ERROR_EVENT(xfs_refcount_merge_center_extents_error);
-DEFINE_AG_ERROR_EVENT(xfs_refcount_modify_extent_error);
-DEFINE_AG_ERROR_EVENT(xfs_refcount_split_extent_error);
-DEFINE_AG_ERROR_EVENT(xfs_refcount_merge_left_extent_error);
-DEFINE_AG_ERROR_EVENT(xfs_refcount_merge_right_extent_error);
-DEFINE_AG_ERROR_EVENT(xfs_refcount_find_left_extent_error);
-DEFINE_AG_ERROR_EVENT(xfs_refcount_find_right_extent_error);
+DEFINE_BTREE_ERROR_EVENT(xfs_refcount_adjust_error);
+DEFINE_BTREE_ERROR_EVENT(xfs_refcount_adjust_cow_error);
+DEFINE_BTREE_ERROR_EVENT(xfs_refcount_merge_center_extents_error);
+DEFINE_BTREE_ERROR_EVENT(xfs_refcount_modify_extent_error);
+DEFINE_BTREE_ERROR_EVENT(xfs_refcount_split_extent_error);
+DEFINE_BTREE_ERROR_EVENT(xfs_refcount_merge_left_extent_error);
+DEFINE_BTREE_ERROR_EVENT(xfs_refcount_merge_right_extent_error);
+DEFINE_BTREE_ERROR_EVENT(xfs_refcount_find_left_extent_error);
+DEFINE_BTREE_ERROR_EVENT(xfs_refcount_find_right_extent_error);
/* reflink helpers */
-DEFINE_AG_EXTENT_EVENT(xfs_refcount_find_shared);
-DEFINE_AG_EXTENT_EVENT(xfs_refcount_find_shared_result);
-DEFINE_AG_ERROR_EVENT(xfs_refcount_find_shared_error);
+DEFINE_REFCOUNT_EVENT(xfs_refcount_find_shared);
+DEFINE_REFCOUNT_EVENT(xfs_refcount_find_shared_result);
+DEFINE_BTREE_ERROR_EVENT(xfs_refcount_find_shared_error);
+
+TRACE_DEFINE_ENUM(XFS_REFCOUNT_INCREASE);
+TRACE_DEFINE_ENUM(XFS_REFCOUNT_DECREASE);
+TRACE_DEFINE_ENUM(XFS_REFCOUNT_ALLOC_COW);
+TRACE_DEFINE_ENUM(XFS_REFCOUNT_FREE_COW);
DECLARE_EVENT_CLASS(xfs_refcount_deferred_class,
- TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
- int type, xfs_agblock_t agbno, xfs_extlen_t len),
- TP_ARGS(mp, agno, type, agbno, len),
+ TP_PROTO(struct xfs_mount *mp, struct xfs_refcount_intent *refc),
+ TP_ARGS(mp, refc),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(xfs_agnumber_t, agno)
- __field(int, type)
+ __field(int, op)
__field(xfs_agblock_t, agbno)
__field(xfs_extlen_t, len)
),
TP_fast_assign(
__entry->dev = mp->m_super->s_dev;
- __entry->agno = agno;
- __entry->type = type;
- __entry->agbno = agbno;
- __entry->len = len;
+ __entry->agno = XFS_FSB_TO_AGNO(mp, refc->ri_startblock);
+ __entry->op = refc->ri_type;
+ __entry->agbno = XFS_FSB_TO_AGBNO(mp, refc->ri_startblock);
+ __entry->len = refc->ri_blockcount;
),
- TP_printk("dev %d:%d op %d agno 0x%x agbno 0x%x fsbcount 0x%x",
+ TP_printk("dev %d:%d op %s agno 0x%x agbno 0x%x fsbcount 0x%x",
MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->type,
+ __print_symbolic(__entry->op, XFS_REFCOUNT_INTENT_STRINGS),
__entry->agno,
__entry->agbno,
__entry->len)
);
#define DEFINE_REFCOUNT_DEFERRED_EVENT(name) \
DEFINE_EVENT(xfs_refcount_deferred_class, name, \
- TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, \
- int type, \
- xfs_agblock_t bno, \
- xfs_extlen_t len), \
- TP_ARGS(mp, agno, type, bno, len))
+ TP_PROTO(struct xfs_mount *mp, struct xfs_refcount_intent *refc), \
+ TP_ARGS(mp, refc))
DEFINE_REFCOUNT_DEFERRED_EVENT(xfs_refcount_defer);
DEFINE_REFCOUNT_DEFERRED_EVENT(xfs_refcount_deferred);
-
-TRACE_EVENT(xfs_refcount_finish_one_leftover,
- TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
- int type, xfs_agblock_t agbno, xfs_extlen_t len),
- TP_ARGS(mp, agno, type, agbno, len),
- TP_STRUCT__entry(
- __field(dev_t, dev)
- __field(xfs_agnumber_t, agno)
- __field(int, type)
- __field(xfs_agblock_t, agbno)
- __field(xfs_extlen_t, len)
- ),
- TP_fast_assign(
- __entry->dev = mp->m_super->s_dev;
- __entry->agno = agno;
- __entry->type = type;
- __entry->agbno = agbno;
- __entry->len = len;
- ),
- TP_printk("dev %d:%d type %d agno 0x%x agbno 0x%x fsbcount 0x%x",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->type,
- __entry->agno,
- __entry->agbno,
- __entry->len)
-);
+DEFINE_REFCOUNT_DEFERRED_EVENT(xfs_refcount_finish_one_leftover);
/* simple inode-based error/%ip tracepoint class */
DECLARE_EVENT_CLASS(xfs_inode_error_class,
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index 828da4ac4316..bdf3704dc301 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -725,135 +725,6 @@ xfs_trans_free_items(
}
}
-static inline void
-xfs_log_item_batch_insert(
- struct xfs_ail *ailp,
- struct xfs_ail_cursor *cur,
- struct xfs_log_item **log_items,
- int nr_items,
- xfs_lsn_t commit_lsn)
-{
- int i;
-
- spin_lock(&ailp->ail_lock);
- /* xfs_trans_ail_update_bulk drops ailp->ail_lock */
- xfs_trans_ail_update_bulk(ailp, cur, log_items, nr_items, commit_lsn);
-
- for (i = 0; i < nr_items; i++) {
- struct xfs_log_item *lip = log_items[i];
-
- if (lip->li_ops->iop_unpin)
- lip->li_ops->iop_unpin(lip, 0);
- }
-}
-
-/*
- * Bulk operation version of xfs_trans_committed that takes a log vector of
- * items to insert into the AIL. This uses bulk AIL insertion techniques to
- * minimise lock traffic.
- *
- * If we are called with the aborted flag set, it is because a log write during
- * a CIL checkpoint commit has failed. In this case, all the items in the
- * checkpoint have already gone through iop_committed and iop_committing, which
- * means that checkpoint commit abort handling is treated exactly the same
- * as an iclog write error even though we haven't started any IO yet. Hence in
- * this case all we need to do is iop_committed processing, followed by an
- * iop_unpin(aborted) call.
- *
- * The AIL cursor is used to optimise the insert process. If commit_lsn is not
- * at the end of the AIL, the insert cursor avoids the need to walk
- * the AIL to find the insertion point on every xfs_log_item_batch_insert()
- * call. This saves a lot of needless list walking and is a net win, even
- * though it slightly increases that amount of AIL lock traffic to set it up
- * and tear it down.
- */
-void
-xfs_trans_committed_bulk(
- struct xfs_ail *ailp,
- struct list_head *lv_chain,
- xfs_lsn_t commit_lsn,
- bool aborted)
-{
-#define LOG_ITEM_BATCH_SIZE 32
- struct xfs_log_item *log_items[LOG_ITEM_BATCH_SIZE];
- struct xfs_log_vec *lv;
- struct xfs_ail_cursor cur;
- int i = 0;
-
- spin_lock(&ailp->ail_lock);
- xfs_trans_ail_cursor_last(ailp, &cur, commit_lsn);
- spin_unlock(&ailp->ail_lock);
-
- /* unpin all the log items */
- list_for_each_entry(lv, lv_chain, lv_list) {
- struct xfs_log_item *lip = lv->lv_item;
- xfs_lsn_t item_lsn;
-
- if (aborted)
- set_bit(XFS_LI_ABORTED, &lip->li_flags);
-
- if (lip->li_ops->flags & XFS_ITEM_RELEASE_WHEN_COMMITTED) {
- lip->li_ops->iop_release(lip);
- continue;
- }
-
- if (lip->li_ops->iop_committed)
- item_lsn = lip->li_ops->iop_committed(lip, commit_lsn);
- else
- item_lsn = commit_lsn;
-
- /* item_lsn of -1 means the item needs no further processing */
- if (XFS_LSN_CMP(item_lsn, (xfs_lsn_t)-1) == 0)
- continue;
-
- /*
- * if we are aborting the operation, no point in inserting the
- * object into the AIL as we are in a shutdown situation.
- */
- if (aborted) {
- ASSERT(xlog_is_shutdown(ailp->ail_log));
- if (lip->li_ops->iop_unpin)
- lip->li_ops->iop_unpin(lip, 1);
- continue;
- }
-
- if (item_lsn != commit_lsn) {
-
- /*
- * Not a bulk update option due to unusual item_lsn.
- * Push into AIL immediately, rechecking the lsn once
- * we have the ail lock. Then unpin the item. This does
- * not affect the AIL cursor the bulk insert path is
- * using.
- */
- spin_lock(&ailp->ail_lock);
- if (XFS_LSN_CMP(item_lsn, lip->li_lsn) > 0)
- xfs_trans_ail_update(ailp, lip, item_lsn);
- else
- spin_unlock(&ailp->ail_lock);
- if (lip->li_ops->iop_unpin)
- lip->li_ops->iop_unpin(lip, 0);
- continue;
- }
-
- /* Item is a candidate for bulk AIL insert. */
- log_items[i++] = lv->lv_item;
- if (i >= LOG_ITEM_BATCH_SIZE) {
- xfs_log_item_batch_insert(ailp, &cur, log_items,
- LOG_ITEM_BATCH_SIZE, commit_lsn);
- i = 0;
- }
- }
-
- /* make sure we insert the remainder! */
- if (i)
- xfs_log_item_batch_insert(ailp, &cur, log_items, i, commit_lsn);
-
- spin_lock(&ailp->ail_lock);
- xfs_trans_ail_cursor_done(&cur);
- spin_unlock(&ailp->ail_lock);
-}
-
/*
* Sort transaction items prior to running precommit operations. This will
* attempt to order the items such that they will always be locked in the same
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
index 1636663707dc..f06cc0f41665 100644
--- a/fs/xfs/xfs_trans.h
+++ b/fs/xfs/xfs_trans.h
@@ -58,13 +58,15 @@ struct xfs_log_item {
#define XFS_LI_FAILED 2
#define XFS_LI_DIRTY 3
#define XFS_LI_WHITEOUT 4
+#define XFS_LI_FLUSHING 5
#define XFS_LI_FLAGS \
{ (1u << XFS_LI_IN_AIL), "IN_AIL" }, \
{ (1u << XFS_LI_ABORTED), "ABORTED" }, \
{ (1u << XFS_LI_FAILED), "FAILED" }, \
{ (1u << XFS_LI_DIRTY), "DIRTY" }, \
- { (1u << XFS_LI_WHITEOUT), "WHITEOUT" }
+ { (1u << XFS_LI_WHITEOUT), "WHITEOUT" }, \
+ { (1u << XFS_LI_FLUSHING), "FLUSHING" }
struct xfs_item_ops {
unsigned flags;
@@ -224,7 +226,6 @@ void xfs_trans_stale_inode_buf(xfs_trans_t *, struct xfs_buf *);
bool xfs_trans_ordered_buf(xfs_trans_t *, struct xfs_buf *);
void xfs_trans_dquot_buf(xfs_trans_t *, struct xfs_buf *, uint);
void xfs_trans_inode_alloc_buf(xfs_trans_t *, struct xfs_buf *);
-void xfs_trans_ichgtime(struct xfs_trans *, struct xfs_inode *, int);
void xfs_trans_ijoin(struct xfs_trans *, struct xfs_inode *, uint);
void xfs_trans_log_buf(struct xfs_trans *, struct xfs_buf *, uint,
uint);
diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c
index e4c343096f95..0fafcc9f3dbe 100644
--- a/fs/xfs/xfs_trans_ail.c
+++ b/fs/xfs/xfs_trans_ail.c
@@ -135,25 +135,6 @@ xfs_ail_min_lsn(
}
/*
- * Return the maximum lsn held in the AIL, or zero if the AIL is empty.
- */
-static xfs_lsn_t
-xfs_ail_max_lsn(
- struct xfs_ail *ailp)
-{
- xfs_lsn_t lsn = 0;
- struct xfs_log_item *lip;
-
- spin_lock(&ailp->ail_lock);
- lip = xfs_ail_max(ailp);
- if (lip)
- lsn = lip->li_lsn;
- spin_unlock(&ailp->ail_lock);
-
- return lsn;
-}
-
-/*
* The cursor keeps track of where our current traversal is up to by tracking
* the next item in the list for us. However, for this to be safe, removing an
* object from the AIL needs to invalidate any cursor that points to it. hence
@@ -414,6 +395,74 @@ xfsaild_push_item(
return lip->li_ops->iop_push(lip, &ailp->ail_buf_list);
}
+/*
+ * Compute the LSN that we'd need to push the log tail towards in order to have
+ * at least 25% of the log space free. If the log free space already meets this
+ * threshold, this function returns the lowest LSN in the AIL to slowly keep
+ * writeback ticking over and the tail of the log moving forward.
+ */
+static xfs_lsn_t
+xfs_ail_calc_push_target(
+ struct xfs_ail *ailp)
+{
+ struct xlog *log = ailp->ail_log;
+ struct xfs_log_item *lip;
+ xfs_lsn_t target_lsn;
+ xfs_lsn_t max_lsn;
+ xfs_lsn_t min_lsn;
+ int32_t free_bytes;
+ uint32_t target_block;
+ uint32_t target_cycle;
+
+ lockdep_assert_held(&ailp->ail_lock);
+
+ lip = xfs_ail_max(ailp);
+ if (!lip)
+ return NULLCOMMITLSN;
+
+ max_lsn = lip->li_lsn;
+ min_lsn = __xfs_ail_min_lsn(ailp);
+
+ /*
+ * If we are supposed to push all the items in the AIL, we want to push
+ * to the current head. We then clear the push flag so that we don't
+ * keep pushing newly queued items beyond where the push all command was
+ * run. If the push waiter wants to empty the ail, it should queue
+ * itself on the ail_empty wait queue.
+ */
+ if (test_and_clear_bit(XFS_AIL_OPSTATE_PUSH_ALL, &ailp->ail_opstate))
+ return max_lsn;
+
+ /* If someone wants the AIL empty, keep pushing everything we have. */
+ if (waitqueue_active(&ailp->ail_empty))
+ return max_lsn;
+
+ /*
+ * Background pushing - attempt to keep 25% of the log free and if we
+ * have that much free retain the existing target.
+ */
+ free_bytes = log->l_logsize - xlog_lsn_sub(log, max_lsn, min_lsn);
+ if (free_bytes >= log->l_logsize >> 2)
+ return ailp->ail_target;
+
+ target_cycle = CYCLE_LSN(min_lsn);
+ target_block = BLOCK_LSN(min_lsn) + (log->l_logBBsize >> 2);
+ if (target_block >= log->l_logBBsize) {
+ target_block -= log->l_logBBsize;
+ target_cycle += 1;
+ }
+ target_lsn = xlog_assign_lsn(target_cycle, target_block);
+
+ /* Cap the target to the highest LSN known to be in the AIL. */
+ if (XFS_LSN_CMP(target_lsn, max_lsn) > 0)
+ return max_lsn;
+
+ /* If the existing target is higher than the new target, keep it. */
+ if (XFS_LSN_CMP(ailp->ail_target, target_lsn) >= 0)
+ return ailp->ail_target;
+ return target_lsn;
+}
+
static long
xfsaild_push(
struct xfs_ail *ailp)
@@ -422,7 +471,6 @@ xfsaild_push(
struct xfs_ail_cursor cur;
struct xfs_log_item *lip;
xfs_lsn_t lsn;
- xfs_lsn_t target = NULLCOMMITLSN;
long tout;
int stuck = 0;
int flushing = 0;
@@ -447,37 +495,26 @@ xfsaild_push(
}
spin_lock(&ailp->ail_lock);
-
- /*
- * If we have a sync push waiter, we always have to push till the AIL is
- * empty. Update the target to point to the end of the AIL so that
- * capture updates that occur after the sync push waiter has gone to
- * sleep.
- */
- if (waitqueue_active(&ailp->ail_empty)) {
- lip = xfs_ail_max(ailp);
- if (lip)
- target = lip->li_lsn;
- } else {
- /* barrier matches the ail_target update in xfs_ail_push() */
- smp_rmb();
- target = ailp->ail_target;
- ailp->ail_target_prev = target;
- }
+ WRITE_ONCE(ailp->ail_target, xfs_ail_calc_push_target(ailp));
+ if (ailp->ail_target == NULLCOMMITLSN)
+ goto out_done;
/* we're done if the AIL is empty or our push has reached the end */
lip = xfs_trans_ail_cursor_first(ailp, &cur, ailp->ail_last_pushed_lsn);
if (!lip)
- goto out_done;
+ goto out_done_cursor;
XFS_STATS_INC(mp, xs_push_ail);
- ASSERT(target != NULLCOMMITLSN);
+ ASSERT(ailp->ail_target != NULLCOMMITLSN);
lsn = lip->li_lsn;
- while ((XFS_LSN_CMP(lip->li_lsn, target) <= 0)) {
+ while ((XFS_LSN_CMP(lip->li_lsn, ailp->ail_target) <= 0)) {
int lock_result;
+ if (test_bit(XFS_LI_FLUSHING, &lip->li_flags))
+ goto next_item;
+
/*
* Note that iop_push may unlock and reacquire the AIL lock. We
* rely on the AIL cursor implementation to be able to deal with
@@ -547,20 +584,24 @@ xfsaild_push(
if (stuck > 100)
break;
+next_item:
lip = xfs_trans_ail_cursor_next(ailp, &cur);
if (lip == NULL)
break;
+ if (lip->li_lsn != lsn && count > 1000)
+ break;
lsn = lip->li_lsn;
}
-out_done:
+out_done_cursor:
xfs_trans_ail_cursor_done(&cur);
+out_done:
spin_unlock(&ailp->ail_lock);
if (xfs_buf_delwri_submit_nowait(&ailp->ail_buf_list))
ailp->ail_log_flush++;
- if (!count || XFS_LSN_CMP(lsn, target) >= 0) {
+ if (!count || XFS_LSN_CMP(lsn, ailp->ail_target) >= 0) {
/*
* We reached the target or the AIL is empty, so wait a bit
* longer for I/O to complete and remove pushed items from the
@@ -585,7 +626,7 @@ out_done:
/*
* Assume we have more work to do in a short while.
*/
- tout = 10;
+ tout = 0;
}
return tout;
@@ -603,7 +644,7 @@ xfsaild(
set_freezable();
while (1) {
- if (tout && tout <= 20)
+ if (tout)
set_current_state(TASK_KILLABLE|TASK_FREEZABLE);
else
set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
@@ -639,21 +680,9 @@ xfsaild(
break;
}
+ /* Idle if the AIL is empty. */
spin_lock(&ailp->ail_lock);
-
- /*
- * Idle if the AIL is empty and we are not racing with a target
- * update. We check the AIL after we set the task to a sleep
- * state to guarantee that we either catch an ail_target update
- * or that a wake_up resets the state to TASK_RUNNING.
- * Otherwise, we run the risk of sleeping indefinitely.
- *
- * The barrier matches the ail_target update in xfs_ail_push().
- */
- smp_rmb();
- if (!xfs_ail_min(ailp) &&
- ailp->ail_target == ailp->ail_target_prev &&
- list_empty(&ailp->ail_buf_list)) {
+ if (!xfs_ail_min(ailp) && list_empty(&ailp->ail_buf_list)) {
spin_unlock(&ailp->ail_lock);
schedule();
tout = 0;
@@ -676,56 +705,6 @@ xfsaild(
}
/*
- * This routine is called to move the tail of the AIL forward. It does this by
- * trying to flush items in the AIL whose lsns are below the given
- * threshold_lsn.
- *
- * The push is run asynchronously in a workqueue, which means the caller needs
- * to handle waiting on the async flush for space to become available.
- * We don't want to interrupt any push that is in progress, hence we only queue
- * work if we set the pushing bit appropriately.
- *
- * We do this unlocked - we only need to know whether there is anything in the
- * AIL at the time we are called. We don't need to access the contents of
- * any of the objects, so the lock is not needed.
- */
-void
-xfs_ail_push(
- struct xfs_ail *ailp,
- xfs_lsn_t threshold_lsn)
-{
- struct xfs_log_item *lip;
-
- lip = xfs_ail_min(ailp);
- if (!lip || xlog_is_shutdown(ailp->ail_log) ||
- XFS_LSN_CMP(threshold_lsn, ailp->ail_target) <= 0)
- return;
-
- /*
- * Ensure that the new target is noticed in push code before it clears
- * the XFS_AIL_PUSHING_BIT.
- */
- smp_wmb();
- xfs_trans_ail_copy_lsn(ailp, &ailp->ail_target, &threshold_lsn);
- smp_wmb();
-
- wake_up_process(ailp->ail_task);
-}
-
-/*
- * Push out all items in the AIL immediately
- */
-void
-xfs_ail_push_all(
- struct xfs_ail *ailp)
-{
- xfs_lsn_t threshold_lsn = xfs_ail_max_lsn(ailp);
-
- if (threshold_lsn)
- xfs_ail_push(ailp, threshold_lsn);
-}
-
-/*
* Push out all items in the AIL immediately and wait until the AIL is empty.
*/
void
@@ -748,21 +727,49 @@ xfs_ail_push_all_sync(
}
void
+__xfs_ail_assign_tail_lsn(
+ struct xfs_ail *ailp)
+{
+ struct xlog *log = ailp->ail_log;
+ xfs_lsn_t tail_lsn;
+
+ assert_spin_locked(&ailp->ail_lock);
+
+ if (xlog_is_shutdown(log))
+ return;
+
+ tail_lsn = __xfs_ail_min_lsn(ailp);
+ if (!tail_lsn)
+ tail_lsn = ailp->ail_head_lsn;
+
+ WRITE_ONCE(log->l_tail_space,
+ xlog_lsn_sub(log, ailp->ail_head_lsn, tail_lsn));
+ trace_xfs_log_assign_tail_lsn(log, tail_lsn);
+ atomic64_set(&log->l_tail_lsn, tail_lsn);
+}
+
+/*
+ * Callers should pass the original tail lsn so that we can detect if the tail
+ * has moved as a result of the operation that was performed. If the caller
+ * needs to force a tail space update, it should pass NULLCOMMITLSN to bypass
+ * the "did the tail LSN change?" checks. If the caller wants to avoid a tail
+ * update (e.g. it knows the tail did not change) it should pass an @old_lsn of
+ * 0.
+ */
+void
xfs_ail_update_finish(
struct xfs_ail *ailp,
xfs_lsn_t old_lsn) __releases(ailp->ail_lock)
{
struct xlog *log = ailp->ail_log;
- /* if the tail lsn hasn't changed, don't do updates or wakeups. */
+ /* If the tail lsn hasn't changed, don't do updates or wakeups. */
if (!old_lsn || old_lsn == __xfs_ail_min_lsn(ailp)) {
spin_unlock(&ailp->ail_lock);
return;
}
- if (!xlog_is_shutdown(log))
- xlog_assign_tail_lsn_locked(log->l_mp);
-
+ __xfs_ail_assign_tail_lsn(ailp);
if (list_empty(&ailp->ail_head))
wake_up_all(&ailp->ail_empty);
spin_unlock(&ailp->ail_lock);
@@ -829,6 +836,19 @@ xfs_trans_ail_update_bulk(
if (!list_empty(&tmp))
xfs_ail_splice(ailp, cur, &tmp, lsn);
+ /*
+ * If this is the first insert, wake up the push daemon so it can
+ * actively scan for items to push. We also need to do a log tail
+ * LSN update to ensure that it is correctly tracked by the log, so
+ * set the tail_lsn to NULLCOMMITLSN so that xfs_ail_update_finish()
+ * will see that the tail lsn has changed and will update the tail
+ * appropriately.
+ */
+ if (!mlip) {
+ wake_up_process(ailp->ail_task);
+ tail_lsn = NULLCOMMITLSN;
+ }
+
xfs_ail_update_finish(ailp, tail_lsn);
}
diff --git a/fs/xfs/xfs_trans_priv.h b/fs/xfs/xfs_trans_priv.h
index d5400150358e..bd841df93021 100644
--- a/fs/xfs/xfs_trans_priv.h
+++ b/fs/xfs/xfs_trans_priv.h
@@ -19,9 +19,6 @@ void xfs_trans_add_item(struct xfs_trans *, struct xfs_log_item *);
void xfs_trans_del_item(struct xfs_log_item *);
void xfs_trans_unreserve_and_mod_sb(struct xfs_trans *tp);
-void xfs_trans_committed_bulk(struct xfs_ail *ailp,
- struct list_head *lv_chain,
- xfs_lsn_t commit_lsn, bool aborted);
/*
* AIL traversal cursor.
*
@@ -55,16 +52,20 @@ struct xfs_ail {
struct xlog *ail_log;
struct task_struct *ail_task;
struct list_head ail_head;
- xfs_lsn_t ail_target;
- xfs_lsn_t ail_target_prev;
struct list_head ail_cursors;
spinlock_t ail_lock;
xfs_lsn_t ail_last_pushed_lsn;
+ xfs_lsn_t ail_head_lsn;
int ail_log_flush;
+ unsigned long ail_opstate;
struct list_head ail_buf_list;
wait_queue_head_t ail_empty;
+ xfs_lsn_t ail_target;
};
+/* Push all items out of the AIL immediately. */
+#define XFS_AIL_OPSTATE_PUSH_ALL 0u
+
/*
* From xfs_trans_ail.c
*/
@@ -101,10 +102,23 @@ void xfs_ail_update_finish(struct xfs_ail *ailp, xfs_lsn_t old_lsn)
__releases(ailp->ail_lock);
void xfs_trans_ail_delete(struct xfs_log_item *lip, int shutdown_type);
-void xfs_ail_push(struct xfs_ail *, xfs_lsn_t);
-void xfs_ail_push_all(struct xfs_ail *);
-void xfs_ail_push_all_sync(struct xfs_ail *);
-struct xfs_log_item *xfs_ail_min(struct xfs_ail *ailp);
+static inline void xfs_ail_push(struct xfs_ail *ailp)
+{
+ wake_up_process(ailp->ail_task);
+}
+
+static inline void xfs_ail_push_all(struct xfs_ail *ailp)
+{
+ if (!test_and_set_bit(XFS_AIL_OPSTATE_PUSH_ALL, &ailp->ail_opstate))
+ xfs_ail_push(ailp);
+}
+
+static inline xfs_lsn_t xfs_ail_get_push_target(struct xfs_ail *ailp)
+{
+ return READ_ONCE(ailp->ail_target);
+}
+
+void xfs_ail_push_all_sync(struct xfs_ail *ailp);
xfs_lsn_t xfs_ail_min_lsn(struct xfs_ail *ailp);
struct xfs_log_item * xfs_trans_ail_cursor_first(struct xfs_ail *ailp,
@@ -117,6 +131,18 @@ struct xfs_log_item * xfs_trans_ail_cursor_next(struct xfs_ail *ailp,
struct xfs_ail_cursor *cur);
void xfs_trans_ail_cursor_done(struct xfs_ail_cursor *cur);
+void __xfs_ail_assign_tail_lsn(struct xfs_ail *ailp);
+
+static inline void
+xfs_ail_assign_tail_lsn(
+ struct xfs_ail *ailp)
+{
+
+ spin_lock(&ailp->ail_lock);
+ __xfs_ail_assign_tail_lsn(ailp);
+ spin_unlock(&ailp->ail_lock);
+}
+
#if BITS_PER_LONG != 64
static inline void
xfs_trans_ail_copy_lsn(
diff --git a/fs/zonefs/super.c b/fs/zonefs/super.c
index 964fa7f24003..faf1eb87895d 100644
--- a/fs/zonefs/super.c
+++ b/fs/zonefs/super.c
@@ -662,6 +662,7 @@ static struct inode *zonefs_get_file_inode(struct inode *dir,
inode->i_op = &zonefs_file_inode_operations;
inode->i_fop = &zonefs_file_operations;
inode->i_mapping->a_ops = &zonefs_file_aops;
+ mapping_set_large_folios(inode->i_mapping);
/* Update the inode access rights depending on the zone condition */
zonefs_inode_update_mode(inode);
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index 1a4dfd7a1c4a..05ca49478537 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -134,6 +134,7 @@ struct acpi_scan_handler {
bool (*match)(const char *idstr, const struct acpi_device_id **matchid);
int (*attach)(struct acpi_device *dev, const struct acpi_device_id *id);
void (*detach)(struct acpi_device *dev);
+ void (*post_eject)(struct acpi_device *dev);
void (*bind)(struct device *phys_dev);
void (*unbind)(struct device *phys_dev);
struct acpi_hotplug_profile hotplug;
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index 94d0fc3bd412..80dc36f9d527 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -663,6 +663,10 @@ ACPI_EXTERNAL_RETURN_STATUS(acpi_status
acpi_adr_space_type
space_id))
ACPI_EXTERNAL_RETURN_STATUS(acpi_status
+ acpi_execute_orphan_reg_method(acpi_handle device,
+ acpi_adr_space_type
+ space_id))
+ACPI_EXTERNAL_RETURN_STATUS(acpi_status
acpi_remove_address_space_handler(acpi_handle
device,
acpi_adr_space_type
diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h
index ae747c89d92c..e27958ef8264 100644
--- a/include/acpi/actbl2.h
+++ b/include/acpi/actbl2.h
@@ -1194,11 +1194,23 @@ struct acpi_madt_generic_translator {
struct acpi_madt_multiproc_wakeup {
struct acpi_subtable_header header;
- u16 mailbox_version;
+ u16 version;
u32 reserved; /* reserved - must be zero */
- u64 base_address;
+ u64 mailbox_address;
+ u64 reset_vector;
};
+/* Values for Version field above */
+
+enum acpi_madt_multiproc_wakeup_version {
+ ACPI_MADT_MP_WAKEUP_VERSION_NONE = 0,
+ ACPI_MADT_MP_WAKEUP_VERSION_V1 = 1,
+ ACPI_MADT_MP_WAKEUP_VERSION_RESERVED = 2, /* 2 and greater are reserved */
+};
+
+#define ACPI_MADT_MP_WAKEUP_SIZE_V0 16
+#define ACPI_MADT_MP_WAKEUP_SIZE_V1 24
+
#define ACPI_MULTIPROC_WAKEUP_MB_OS_SIZE 2032
#define ACPI_MULTIPROC_WAKEUP_MB_FIRMWARE_SIZE 2048
@@ -1211,7 +1223,8 @@ struct acpi_madt_multiproc_wakeup_mailbox {
u8 reserved_firmware[ACPI_MULTIPROC_WAKEUP_MB_FIRMWARE_SIZE]; /* reserved for firmware use */
};
-#define ACPI_MP_WAKE_COMMAND_WAKEUP 1
+#define ACPI_MP_WAKE_COMMAND_WAKEUP 1
+#define ACPI_MP_WAKE_COMMAND_TEST 2
/* 17: CPU Core Interrupt Controller (ACPI 6.5) */
diff --git a/include/acpi/battery.h b/include/acpi/battery.h
index 611a2561a014..c93f16dfb944 100644
--- a/include/acpi/battery.h
+++ b/include/acpi/battery.h
@@ -2,6 +2,7 @@
#ifndef __ACPI_BATTERY_H
#define __ACPI_BATTERY_H
+#include <linux/device.h>
#include <linux/power_supply.h>
#define ACPI_BATTERY_CLASS "battery"
@@ -19,5 +20,6 @@ struct acpi_battery_hook {
void battery_hook_register(struct acpi_battery_hook *hook);
void battery_hook_unregister(struct acpi_battery_hook *hook);
+int devm_battery_hook_register(struct device *dev, struct acpi_battery_hook *hook);
#endif
diff --git a/include/acpi/processor.h b/include/acpi/processor.h
index 3f34ebb27525..e6f6074eadbf 100644
--- a/include/acpi/processor.h
+++ b/include/acpi/processor.h
@@ -217,7 +217,7 @@ struct acpi_processor_flags {
u8 has_lpi:1;
u8 power_setup_done:1;
u8 bm_rld_set:1;
- u8 need_hotplug_init:1;
+ u8 previously_online:1;
};
struct acpi_processor {
diff --git a/include/asm-generic/Kbuild b/include/asm-generic/Kbuild
index b20fa25a7e8d..620b6da429d4 100644
--- a/include/asm-generic/Kbuild
+++ b/include/asm-generic/Kbuild
@@ -9,7 +9,6 @@ mandatory-y += archrandom.h
mandatory-y += barrier.h
mandatory-y += bitops.h
mandatory-y += bug.h
-mandatory-y += bugs.h
mandatory-y += cacheflush.h
mandatory-y += cfi.h
mandatory-y += checksum.h
@@ -46,6 +45,7 @@ mandatory-y += pci.h
mandatory-y += percpu.h
mandatory-y += pgalloc.h
mandatory-y += preempt.h
+mandatory-y += runtime-const.h
mandatory-y += rwonce.h
mandatory-y += sections.h
mandatory-y += serial.h
diff --git a/include/asm-generic/fixmap.h b/include/asm-generic/fixmap.h
index 8cc7b09c1bc7..29cab7947980 100644
--- a/include/asm-generic/fixmap.h
+++ b/include/asm-generic/fixmap.h
@@ -97,8 +97,5 @@ static inline unsigned long virt_to_fix(const unsigned long vaddr)
#define set_fixmap_io(idx, phys) \
__set_fixmap(idx, phys, FIXMAP_PAGE_IO)
-#define set_fixmap_offset_io(idx, phys) \
- __set_fixmap_offset(idx, phys, FIXMAP_PAGE_IO)
-
#endif /* __ASSEMBLY__ */
#endif /* __ASM_GENERIC_FIXMAP_H */
diff --git a/include/asm-generic/runtime-const.h b/include/asm-generic/runtime-const.h
new file mode 100644
index 000000000000..670499459514
--- /dev/null
+++ b/include/asm-generic/runtime-const.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_RUNTIME_CONST_H
+#define _ASM_RUNTIME_CONST_H
+
+/*
+ * This is the fallback for when the architecture doesn't
+ * support the runtime const operations.
+ *
+ * We just use the actual symbols as-is.
+ */
+#define runtime_const_ptr(sym) (sym)
+#define runtime_const_shift_right_32(val, sym) ((u32)(val)>>(sym))
+#define runtime_const_init(type,sym) do { } while (0)
+
+#endif
diff --git a/include/asm-generic/syscalls.h b/include/asm-generic/syscalls.h
index 933ca6581aba..fabcefe8a80a 100644
--- a/include/asm-generic/syscalls.h
+++ b/include/asm-generic/syscalls.h
@@ -19,7 +19,7 @@ asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
#ifndef sys_mmap
asmlinkage long sys_mmap(unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags,
- unsigned long fd, off_t pgoff);
+ unsigned long fd, unsigned long off);
#endif
#ifndef sys_rt_sigreturn
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 5703526d6ebf..35245e9225a5 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -103,7 +103,7 @@
#define DATA_MAIN .data .data.[0-9a-zA-Z_]* .data..L* .data..compoundliteral* .data.$__unnamed_* .data.$L*
#define SDATA_MAIN .sdata .sdata.[0-9a-zA-Z_]*
#define RODATA_MAIN .rodata .rodata.[0-9a-zA-Z_]* .rodata..L*
-#define BSS_MAIN .bss .bss.[0-9a-zA-Z_]* .bss..compoundliteral*
+#define BSS_MAIN .bss .bss.[0-9a-zA-Z_]* .bss..L* .bss..compoundliteral*
#define SBSS_MAIN .sbss .sbss.[0-9a-zA-Z_]*
#else
#define TEXT_MAIN .text
@@ -944,6 +944,14 @@
#define CON_INITCALL \
BOUNDED_SECTION_POST_LABEL(.con_initcall.init, __con_initcall, _start, _end)
+#define RUNTIME_NAME(t,x) runtime_##t##_##x
+
+#define RUNTIME_CONST(t,x) \
+ . = ALIGN(8); \
+ RUNTIME_NAME(t,x) : AT(ADDR(RUNTIME_NAME(t,x)) - LOAD_OFFSET) { \
+ *(RUNTIME_NAME(t,x)); \
+ }
+
/* Alignment must be consistent with (kunit_suite *) in include/kunit/test.h */
#define KUNIT_TABLE() \
. = ALIGN(8); \
diff --git a/include/clocksource/timer-xilinx.h b/include/clocksource/timer-xilinx.h
index c0f56fe6d22a..d116f18de899 100644
--- a/include/clocksource/timer-xilinx.h
+++ b/include/clocksource/timer-xilinx.h
@@ -41,7 +41,7 @@ struct regmap;
struct xilinx_timer_priv {
struct regmap *map;
struct clk *clk;
- u32 max;
+ u64 max;
};
/**
diff --git a/include/drm/drm_buddy.h b/include/drm/drm_buddy.h
index 82570f77e817..2a74fa9d0ce5 100644
--- a/include/drm/drm_buddy.h
+++ b/include/drm/drm_buddy.h
@@ -56,8 +56,8 @@ struct drm_buddy_block {
struct list_head tmp_link;
};
-/* Order-zero must be at least PAGE_SIZE */
-#define DRM_BUDDY_MAX_ORDER (63 - PAGE_SHIFT)
+/* Order-zero must be at least SZ_4K */
+#define DRM_BUDDY_MAX_ORDER (63 - 12)
/*
* Binary Buddy System.
@@ -85,7 +85,7 @@ struct drm_buddy {
unsigned int n_roots;
unsigned int max_order;
- /* Must be at least PAGE_SIZE */
+ /* Must be at least SZ_4K */
u64 chunk_size;
u64 size;
u64 avail;
diff --git a/include/dt-bindings/arm/qcom,ids.h b/include/dt-bindings/arm/qcom,ids.h
index d040033dc8ee..d6c9e9472121 100644
--- a/include/dt-bindings/arm/qcom,ids.h
+++ b/include/dt-bindings/arm/qcom,ids.h
@@ -175,6 +175,7 @@
#define QCOM_ID_SDA630 327
#define QCOM_ID_MSM8905 331
#define QCOM_ID_SDX202 333
+#define QCOM_ID_SDM670 336
#define QCOM_ID_SDM450 338
#define QCOM_ID_SM8150 339
#define QCOM_ID_SDA845 341
@@ -272,6 +273,7 @@
#define QCOM_ID_QCS8550 603
#define QCOM_ID_QCM8550 604
#define QCOM_ID_IPQ5300 624
+#define QCOM_ID_IPQ5321 650
/*
* The board type and revision information, used by Qualcomm bootloaders and
diff --git a/include/dt-bindings/clock/qcom,qcm2290-gpucc.h b/include/dt-bindings/clock/qcom,qcm2290-gpucc.h
new file mode 100644
index 000000000000..7c76dd05278f
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,qcm2290-gpucc.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2024, Linaro Limited
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GPU_CC_QCM2290_H
+#define _DT_BINDINGS_CLK_QCOM_GPU_CC_QCM2290_H
+
+/* GPU_CC clocks */
+#define GPU_CC_AHB_CLK 0
+#define GPU_CC_CRC_AHB_CLK 1
+#define GPU_CC_CX_GFX3D_CLK 2
+#define GPU_CC_CX_GMU_CLK 3
+#define GPU_CC_CX_SNOC_DVM_CLK 4
+#define GPU_CC_CXO_AON_CLK 5
+#define GPU_CC_CXO_CLK 6
+#define GPU_CC_GMU_CLK_SRC 7
+#define GPU_CC_GX_GFX3D_CLK 8
+#define GPU_CC_GX_GFX3D_CLK_SRC 9
+#define GPU_CC_PLL0 10
+#define GPU_CC_SLEEP_CLK 11
+#define GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK 12
+
+/* Resets */
+#define GPU_GX_BCR 0
+
+/* GDSCs */
+#define GPU_CX_GDSC 0
+#define GPU_GX_GDSC 1
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm8650-camcc.h b/include/dt-bindings/clock/qcom,sm8650-camcc.h
new file mode 100644
index 000000000000..df73bf35f4bf
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm8650-camcc.h
@@ -0,0 +1,195 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_CAM_CC_SM8650_H
+#define _DT_BINDINGS_CLK_QCOM_CAM_CC_SM8650_H
+
+/* CAM_CC clocks */
+#define CAM_CC_BPS_AHB_CLK 0
+#define CAM_CC_BPS_CLK 1
+#define CAM_CC_BPS_CLK_SRC 2
+#define CAM_CC_BPS_FAST_AHB_CLK 3
+#define CAM_CC_BPS_SHIFT_CLK 4
+#define CAM_CC_CAMNOC_AXI_NRT_CLK 5
+#define CAM_CC_CAMNOC_AXI_RT_CLK 6
+#define CAM_CC_CAMNOC_AXI_RT_CLK_SRC 7
+#define CAM_CC_CAMNOC_DCD_XO_CLK 8
+#define CAM_CC_CAMNOC_XO_CLK 9
+#define CAM_CC_CCI_0_CLK 10
+#define CAM_CC_CCI_0_CLK_SRC 11
+#define CAM_CC_CCI_1_CLK 12
+#define CAM_CC_CCI_1_CLK_SRC 13
+#define CAM_CC_CCI_2_CLK 14
+#define CAM_CC_CCI_2_CLK_SRC 15
+#define CAM_CC_CORE_AHB_CLK 16
+#define CAM_CC_CPAS_AHB_CLK 17
+#define CAM_CC_CPAS_BPS_CLK 18
+#define CAM_CC_CPAS_CRE_CLK 19
+#define CAM_CC_CPAS_FAST_AHB_CLK 20
+#define CAM_CC_CPAS_IFE_0_CLK 21
+#define CAM_CC_CPAS_IFE_1_CLK 22
+#define CAM_CC_CPAS_IFE_2_CLK 23
+#define CAM_CC_CPAS_IFE_LITE_CLK 24
+#define CAM_CC_CPAS_IPE_NPS_CLK 25
+#define CAM_CC_CPAS_SBI_CLK 26
+#define CAM_CC_CPAS_SFE_0_CLK 27
+#define CAM_CC_CPAS_SFE_1_CLK 28
+#define CAM_CC_CPAS_SFE_2_CLK 29
+#define CAM_CC_CPHY_RX_CLK_SRC 30
+#define CAM_CC_CRE_AHB_CLK 31
+#define CAM_CC_CRE_CLK 32
+#define CAM_CC_CRE_CLK_SRC 33
+#define CAM_CC_CSI0PHYTIMER_CLK 34
+#define CAM_CC_CSI0PHYTIMER_CLK_SRC 35
+#define CAM_CC_CSI1PHYTIMER_CLK 36
+#define CAM_CC_CSI1PHYTIMER_CLK_SRC 37
+#define CAM_CC_CSI2PHYTIMER_CLK 38
+#define CAM_CC_CSI2PHYTIMER_CLK_SRC 39
+#define CAM_CC_CSI3PHYTIMER_CLK 40
+#define CAM_CC_CSI3PHYTIMER_CLK_SRC 41
+#define CAM_CC_CSI4PHYTIMER_CLK 42
+#define CAM_CC_CSI4PHYTIMER_CLK_SRC 43
+#define CAM_CC_CSI5PHYTIMER_CLK 44
+#define CAM_CC_CSI5PHYTIMER_CLK_SRC 45
+#define CAM_CC_CSI6PHYTIMER_CLK 46
+#define CAM_CC_CSI6PHYTIMER_CLK_SRC 47
+#define CAM_CC_CSI7PHYTIMER_CLK 48
+#define CAM_CC_CSI7PHYTIMER_CLK_SRC 49
+#define CAM_CC_CSID_CLK 50
+#define CAM_CC_CSID_CLK_SRC 51
+#define CAM_CC_CSID_CSIPHY_RX_CLK 52
+#define CAM_CC_CSIPHY0_CLK 53
+#define CAM_CC_CSIPHY1_CLK 54
+#define CAM_CC_CSIPHY2_CLK 55
+#define CAM_CC_CSIPHY3_CLK 56
+#define CAM_CC_CSIPHY4_CLK 57
+#define CAM_CC_CSIPHY5_CLK 58
+#define CAM_CC_CSIPHY6_CLK 59
+#define CAM_CC_CSIPHY7_CLK 60
+#define CAM_CC_DRV_AHB_CLK 61
+#define CAM_CC_DRV_XO_CLK 62
+#define CAM_CC_FAST_AHB_CLK_SRC 63
+#define CAM_CC_GDSC_CLK 64
+#define CAM_CC_ICP_AHB_CLK 65
+#define CAM_CC_ICP_CLK 66
+#define CAM_CC_ICP_CLK_SRC 67
+#define CAM_CC_IFE_0_CLK 68
+#define CAM_CC_IFE_0_CLK_SRC 69
+#define CAM_CC_IFE_0_FAST_AHB_CLK 70
+#define CAM_CC_IFE_0_SHIFT_CLK 71
+#define CAM_CC_IFE_1_CLK 72
+#define CAM_CC_IFE_1_CLK_SRC 73
+#define CAM_CC_IFE_1_FAST_AHB_CLK 74
+#define CAM_CC_IFE_1_SHIFT_CLK 75
+#define CAM_CC_IFE_2_CLK 76
+#define CAM_CC_IFE_2_CLK_SRC 77
+#define CAM_CC_IFE_2_FAST_AHB_CLK 78
+#define CAM_CC_IFE_2_SHIFT_CLK 79
+#define CAM_CC_IFE_LITE_AHB_CLK 80
+#define CAM_CC_IFE_LITE_CLK 81
+#define CAM_CC_IFE_LITE_CLK_SRC 82
+#define CAM_CC_IFE_LITE_CPHY_RX_CLK 83
+#define CAM_CC_IFE_LITE_CSID_CLK 84
+#define CAM_CC_IFE_LITE_CSID_CLK_SRC 85
+#define CAM_CC_IPE_NPS_AHB_CLK 86
+#define CAM_CC_IPE_NPS_CLK 87
+#define CAM_CC_IPE_NPS_CLK_SRC 88
+#define CAM_CC_IPE_NPS_FAST_AHB_CLK 89
+#define CAM_CC_IPE_PPS_CLK 90
+#define CAM_CC_IPE_PPS_FAST_AHB_CLK 91
+#define CAM_CC_IPE_SHIFT_CLK 92
+#define CAM_CC_JPEG_1_CLK 93
+#define CAM_CC_JPEG_CLK 94
+#define CAM_CC_JPEG_CLK_SRC 95
+#define CAM_CC_MCLK0_CLK 96
+#define CAM_CC_MCLK0_CLK_SRC 97
+#define CAM_CC_MCLK1_CLK 98
+#define CAM_CC_MCLK1_CLK_SRC 99
+#define CAM_CC_MCLK2_CLK 100
+#define CAM_CC_MCLK2_CLK_SRC 101
+#define CAM_CC_MCLK3_CLK 102
+#define CAM_CC_MCLK3_CLK_SRC 103
+#define CAM_CC_MCLK4_CLK 104
+#define CAM_CC_MCLK4_CLK_SRC 105
+#define CAM_CC_MCLK5_CLK 106
+#define CAM_CC_MCLK5_CLK_SRC 107
+#define CAM_CC_MCLK6_CLK 108
+#define CAM_CC_MCLK6_CLK_SRC 109
+#define CAM_CC_MCLK7_CLK 110
+#define CAM_CC_MCLK7_CLK_SRC 111
+#define CAM_CC_PLL0 112
+#define CAM_CC_PLL0_OUT_EVEN 113
+#define CAM_CC_PLL0_OUT_ODD 114
+#define CAM_CC_PLL1 115
+#define CAM_CC_PLL1_OUT_EVEN 116
+#define CAM_CC_PLL2 117
+#define CAM_CC_PLL3 118
+#define CAM_CC_PLL3_OUT_EVEN 119
+#define CAM_CC_PLL4 120
+#define CAM_CC_PLL4_OUT_EVEN 121
+#define CAM_CC_PLL5 122
+#define CAM_CC_PLL5_OUT_EVEN 123
+#define CAM_CC_PLL6 124
+#define CAM_CC_PLL6_OUT_EVEN 125
+#define CAM_CC_PLL7 126
+#define CAM_CC_PLL7_OUT_EVEN 127
+#define CAM_CC_PLL8 128
+#define CAM_CC_PLL8_OUT_EVEN 129
+#define CAM_CC_PLL9 130
+#define CAM_CC_PLL9_OUT_EVEN 131
+#define CAM_CC_PLL9_OUT_ODD 132
+#define CAM_CC_PLL10 133
+#define CAM_CC_PLL10_OUT_EVEN 134
+#define CAM_CC_QDSS_DEBUG_CLK 135
+#define CAM_CC_QDSS_DEBUG_CLK_SRC 136
+#define CAM_CC_QDSS_DEBUG_XO_CLK 137
+#define CAM_CC_SBI_CLK 138
+#define CAM_CC_SBI_FAST_AHB_CLK 139
+#define CAM_CC_SBI_SHIFT_CLK 140
+#define CAM_CC_SFE_0_CLK 141
+#define CAM_CC_SFE_0_CLK_SRC 142
+#define CAM_CC_SFE_0_FAST_AHB_CLK 143
+#define CAM_CC_SFE_0_SHIFT_CLK 144
+#define CAM_CC_SFE_1_CLK 145
+#define CAM_CC_SFE_1_CLK_SRC 146
+#define CAM_CC_SFE_1_FAST_AHB_CLK 147
+#define CAM_CC_SFE_1_SHIFT_CLK 148
+#define CAM_CC_SFE_2_CLK 149
+#define CAM_CC_SFE_2_CLK_SRC 150
+#define CAM_CC_SFE_2_FAST_AHB_CLK 151
+#define CAM_CC_SFE_2_SHIFT_CLK 152
+#define CAM_CC_SLEEP_CLK 153
+#define CAM_CC_SLEEP_CLK_SRC 154
+#define CAM_CC_SLOW_AHB_CLK_SRC 155
+#define CAM_CC_TITAN_TOP_SHIFT_CLK 156
+#define CAM_CC_XO_CLK_SRC 157
+
+/* CAM_CC power domains */
+#define CAM_CC_TITAN_TOP_GDSC 0
+#define CAM_CC_BPS_GDSC 1
+#define CAM_CC_IFE_0_GDSC 2
+#define CAM_CC_IFE_1_GDSC 3
+#define CAM_CC_IFE_2_GDSC 4
+#define CAM_CC_IPE_0_GDSC 5
+#define CAM_CC_SBI_GDSC 6
+#define CAM_CC_SFE_0_GDSC 7
+#define CAM_CC_SFE_1_GDSC 8
+#define CAM_CC_SFE_2_GDSC 9
+
+/* CAM_CC resets */
+#define CAM_CC_BPS_BCR 0
+#define CAM_CC_DRV_BCR 1
+#define CAM_CC_ICP_BCR 2
+#define CAM_CC_IFE_0_BCR 3
+#define CAM_CC_IFE_1_BCR 4
+#define CAM_CC_IFE_2_BCR 5
+#define CAM_CC_IPE_0_BCR 6
+#define CAM_CC_QDSS_DEBUG_BCR 7
+#define CAM_CC_SBI_BCR 8
+#define CAM_CC_SFE_0_BCR 9
+#define CAM_CC_SFE_1_BCR 10
+#define CAM_CC_SFE_2_BCR 11
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm8650-videocc.h b/include/dt-bindings/clock/qcom,sm8650-videocc.h
new file mode 100644
index 000000000000..4e3c2d87280f
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm8650-videocc.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_VIDEO_CC_SM8650_H
+#define _DT_BINDINGS_CLK_QCOM_VIDEO_CC_SM8650_H
+
+#include "qcom,sm8450-videocc.h"
+
+/* SM8650 introduces below new clocks and resets compared to SM8450 */
+
+/* VIDEO_CC clocks */
+#define VIDEO_CC_MVS0_SHIFT_CLK 12
+#define VIDEO_CC_MVS0C_SHIFT_CLK 13
+#define VIDEO_CC_MVS1_SHIFT_CLK 14
+#define VIDEO_CC_MVS1C_SHIFT_CLK 15
+#define VIDEO_CC_XO_CLK_SRC 16
+
+/* VIDEO_CC resets */
+#define VIDEO_CC_XO_CLK_ARES 7
+
+#endif
diff --git a/include/dt-bindings/clock/rk3128-cru.h b/include/dt-bindings/clock/rk3128-cru.h
index 6a47825dac5d..1be455ba4985 100644
--- a/include/dt-bindings/clock/rk3128-cru.h
+++ b/include/dt-bindings/clock/rk3128-cru.h
@@ -116,6 +116,7 @@
#define PCLK_GMAC 367
#define PCLK_PMU_PRE 368
#define PCLK_SIM_CARD 369
+#define PCLK_MIPIPHY 370
/* hclk gates */
#define HCLK_SPDIF 440
diff --git a/include/dt-bindings/clock/sun50i-h616-ccu.h b/include/dt-bindings/clock/sun50i-h616-ccu.h
index 6f8f01e67628..ebb146ab7f8c 100644
--- a/include/dt-bindings/clock/sun50i-h616-ccu.h
+++ b/include/dt-bindings/clock/sun50i-h616-ccu.h
@@ -112,5 +112,6 @@
#define CLK_HDCP 126
#define CLK_BUS_HDCP 127
#define CLK_PLL_SYSTEM_32K 128
+#define CLK_BUS_GPADC 129
#endif /* _DT_BINDINGS_CLK_SUN50I_H616_H_ */
diff --git a/include/dt-bindings/input/cros-ec-keyboard.h b/include/dt-bindings/input/cros-ec-keyboard.h
index f0ae03634a96..afc12f6aa642 100644
--- a/include/dt-bindings/input/cros-ec-keyboard.h
+++ b/include/dt-bindings/input/cros-ec-keyboard.h
@@ -100,4 +100,108 @@
MATRIX_KEY(0x07, 0x0b, KEY_UP) \
MATRIX_KEY(0x07, 0x0c, KEY_LEFT)
+/* No numpad */
+#define CROS_TOP_ROW_KEYMAP_V30 \
+ MATRIX_KEY(0x00, 0x01, KEY_F11) /* T11 */ \
+ MATRIX_KEY(0x00, 0x02, KEY_F1) /* T1 */ \
+ MATRIX_KEY(0x00, 0x04, KEY_F10) /* T10 */ \
+ MATRIX_KEY(0x00, 0x0b, KEY_F14) /* T14 */ \
+ MATRIX_KEY(0x00, 0x0c, KEY_F15) /* T15 */ \
+ MATRIX_KEY(0x01, 0x02, KEY_F4) /* T4 */ \
+ MATRIX_KEY(0x01, 0x04, KEY_F7) /* T7 */ \
+ MATRIX_KEY(0x01, 0x05, KEY_F12) /* T12 */ \
+ MATRIX_KEY(0x01, 0x09, KEY_F9) /* T9 */ \
+ MATRIX_KEY(0x02, 0x02, KEY_F3) /* T3 */ \
+ MATRIX_KEY(0x02, 0x04, KEY_F6) /* T6 */ \
+ MATRIX_KEY(0x02, 0x0b, KEY_F8) /* T8 */ \
+ MATRIX_KEY(0x03, 0x02, KEY_F2) /* T2 */ \
+ MATRIX_KEY(0x03, 0x05, KEY_F13) /* T13 */ \
+ MATRIX_KEY(0x04, 0x04, KEY_F5) /* T5 */
+
+#define CROS_MAIN_KEYMAP_V30 /* Keycode */ \
+ MATRIX_KEY(0x00, 0x03, KEY_B) /* 50 */ \
+ MATRIX_KEY(0x00, 0x05, KEY_N) /* 51 */ \
+ MATRIX_KEY(0x00, 0x06, KEY_RO) /* 56 (JIS) */ \
+ MATRIX_KEY(0x00, 0x08, KEY_EQUAL) /* 13 */ \
+ MATRIX_KEY(0x00, 0x09, KEY_HOME) /* 80 (Numpad) */ \
+ MATRIX_KEY(0x00, 0x0a, KEY_RIGHTALT) /* 62 */ \
+ MATRIX_KEY(0x00, 0x10, KEY_FN) /* 127 */ \
+ \
+ MATRIX_KEY(0x01, 0x01, KEY_ESC) /* 110 */ \
+ MATRIX_KEY(0x01, 0x03, KEY_G) /* 35 */ \
+ MATRIX_KEY(0x01, 0x06, KEY_H) /* 36 */ \
+ MATRIX_KEY(0x01, 0x08, KEY_APOSTROPHE) /* 41 */ \
+ MATRIX_KEY(0x01, 0x0b, KEY_BACKSPACE) /* 15 */ \
+ MATRIX_KEY(0x01, 0x0c, KEY_HENKAN) /* 65 (JIS) */ \
+ MATRIX_KEY(0x01, 0x0e, KEY_LEFTCTRL) /* 58 */ \
+ \
+ MATRIX_KEY(0x02, 0x01, KEY_TAB) /* 16 */ \
+ MATRIX_KEY(0x02, 0x03, KEY_T) /* 21 */ \
+ MATRIX_KEY(0x02, 0x05, KEY_RIGHTBRACE) /* 28 */ \
+ MATRIX_KEY(0x02, 0x06, KEY_Y) /* 22 */ \
+ MATRIX_KEY(0x02, 0x08, KEY_LEFTBRACE) /* 27 */ \
+ MATRIX_KEY(0x02, 0x09, KEY_DELETE) /* 76 (Numpad) */ \
+ MATRIX_KEY(0x02, 0x0c, KEY_PAGEUP) /* 85 (Numpad) */ \
+ MATRIX_KEY(0x02, 0x011, KEY_YEN) /* 14 (JIS) */ \
+ \
+ MATRIX_KEY(0x03, 0x00, KEY_LEFTMETA) /* Launcher */ \
+ MATRIX_KEY(0x03, 0x01, KEY_GRAVE) /* 1 */ \
+ MATRIX_KEY(0x03, 0x03, KEY_5) /* 6 */ \
+ MATRIX_KEY(0x03, 0x04, KEY_S) /* 32 */ \
+ MATRIX_KEY(0x03, 0x06, KEY_MINUS) /* 12 */ \
+ MATRIX_KEY(0x03, 0x08, KEY_6) /* 7 */ \
+ MATRIX_KEY(0x03, 0x09, KEY_SLEEP) /* Lock */ \
+ MATRIX_KEY(0x03, 0x0b, KEY_BACKSLASH) /* 29 */ \
+ MATRIX_KEY(0x03, 0x0c, KEY_MUHENKAN) /* 63 (JIS) */ \
+ MATRIX_KEY(0x03, 0x0e, KEY_RIGHTCTRL) /* 64 */ \
+ \
+ MATRIX_KEY(0x04, 0x01, KEY_A) /* 31 */ \
+ MATRIX_KEY(0x04, 0x02, KEY_D) /* 33 */ \
+ MATRIX_KEY(0x04, 0x03, KEY_F) /* 34 */ \
+ MATRIX_KEY(0x04, 0x05, KEY_K) /* 38 */ \
+ MATRIX_KEY(0x04, 0x06, KEY_J) /* 37 */ \
+ MATRIX_KEY(0x04, 0x08, KEY_SEMICOLON) /* 40 */ \
+ MATRIX_KEY(0x04, 0x09, KEY_L) /* 39 */ \
+ MATRIX_KEY(0x04, 0x0b, KEY_ENTER) /* 43 */ \
+ MATRIX_KEY(0x04, 0x0c, KEY_END) /* 81 (Numpad) */ \
+ \
+ MATRIX_KEY(0x05, 0x01, KEY_1) /* 2 */ \
+ MATRIX_KEY(0x05, 0x02, KEY_COMMA) /* 53 */ \
+ MATRIX_KEY(0x05, 0x03, KEY_DOT) /* 54 */ \
+ MATRIX_KEY(0x05, 0x04, KEY_SLASH) /* 55 */ \
+ MATRIX_KEY(0x05, 0x05, KEY_C) /* 48 */ \
+ MATRIX_KEY(0x05, 0x06, KEY_SPACE) /* 61 */ \
+ MATRIX_KEY(0x05, 0x07, KEY_LEFTSHIFT) /* 44 */ \
+ MATRIX_KEY(0x05, 0x08, KEY_X) /* 47 */ \
+ MATRIX_KEY(0x05, 0x09, KEY_V) /* 49 */ \
+ MATRIX_KEY(0x05, 0x0b, KEY_M) /* 52 */ \
+ MATRIX_KEY(0x05, 0x0c, KEY_PAGEDOWN) /* 86 (Numpad) */ \
+ \
+ MATRIX_KEY(0x06, 0x01, KEY_Z) /* 46 */ \
+ MATRIX_KEY(0x06, 0x02, KEY_3) /* 4 */ \
+ MATRIX_KEY(0x06, 0x03, KEY_4) /* 5 */ \
+ MATRIX_KEY(0x06, 0x04, KEY_2) /* 3 */ \
+ MATRIX_KEY(0x06, 0x05, KEY_8) /* 9 */ \
+ MATRIX_KEY(0x06, 0x06, KEY_0) /* 11 */ \
+ MATRIX_KEY(0x06, 0x08, KEY_7) /* 8 */ \
+ MATRIX_KEY(0x06, 0x09, KEY_9) /* 10 */ \
+ MATRIX_KEY(0x06, 0x0b, KEY_DOWN) /* 84 */ \
+ MATRIX_KEY(0x06, 0x0c, KEY_RIGHT) /* 89 */ \
+ MATRIX_KEY(0x06, 0x0d, KEY_LEFTALT) /* 60 */ \
+ MATRIX_KEY(0x06, 0x0f, KEY_ASSISTANT) /* 128 */ \
+ MATRIX_KEY(0x06, 0x11, KEY_BACKSLASH) /* 42 (JIS, ISO) */ \
+ \
+ MATRIX_KEY(0x07, 0x01, KEY_U) /* 23 */ \
+ MATRIX_KEY(0x07, 0x02, KEY_I) /* 24 */ \
+ MATRIX_KEY(0x07, 0x03, KEY_O) /* 25 */ \
+ MATRIX_KEY(0x07, 0x04, KEY_P) /* 26 */ \
+ MATRIX_KEY(0x07, 0x05, KEY_Q) /* 17 */ \
+ MATRIX_KEY(0x07, 0x06, KEY_W) /* 18 */ \
+ MATRIX_KEY(0x07, 0x07, KEY_RIGHTSHIFT) /* 57 */ \
+ MATRIX_KEY(0x07, 0x08, KEY_E) /* 19 */ \
+ MATRIX_KEY(0x07, 0x09, KEY_R) /* 20 */ \
+ MATRIX_KEY(0x07, 0x0b, KEY_UP) /* 83 */ \
+ MATRIX_KEY(0x07, 0x0c, KEY_LEFT) /* 79 */ \
+ MATRIX_KEY(0x07, 0x11, KEY_102ND) /* 45 (ISO) */
+
#endif /* _CROS_EC_KEYBOARD_H */
diff --git a/include/dt-bindings/interconnect/qcom,ipq9574.h b/include/dt-bindings/interconnect/qcom,ipq9574.h
new file mode 100644
index 000000000000..42019335c7dd
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,ipq9574.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+#ifndef INTERCONNECT_QCOM_IPQ9574_H
+#define INTERCONNECT_QCOM_IPQ9574_H
+
+#define MASTER_ANOC_PCIE0 0
+#define SLAVE_ANOC_PCIE0 1
+#define MASTER_SNOC_PCIE0 2
+#define SLAVE_SNOC_PCIE0 3
+#define MASTER_ANOC_PCIE1 4
+#define SLAVE_ANOC_PCIE1 5
+#define MASTER_SNOC_PCIE1 6
+#define SLAVE_SNOC_PCIE1 7
+#define MASTER_ANOC_PCIE2 8
+#define SLAVE_ANOC_PCIE2 9
+#define MASTER_SNOC_PCIE2 10
+#define SLAVE_SNOC_PCIE2 11
+#define MASTER_ANOC_PCIE3 12
+#define SLAVE_ANOC_PCIE3 13
+#define MASTER_SNOC_PCIE3 14
+#define SLAVE_SNOC_PCIE3 15
+#define MASTER_USB 16
+#define SLAVE_USB 17
+#define MASTER_USB_AXI 18
+#define SLAVE_USB_AXI 19
+#define MASTER_NSSNOC_NSSCC 20
+#define SLAVE_NSSNOC_NSSCC 21
+#define MASTER_NSSNOC_SNOC_0 22
+#define SLAVE_NSSNOC_SNOC_0 23
+#define MASTER_NSSNOC_SNOC_1 24
+#define SLAVE_NSSNOC_SNOC_1 25
+#define MASTER_NSSNOC_PCNOC_1 26
+#define SLAVE_NSSNOC_PCNOC_1 27
+#define MASTER_NSSNOC_QOSGEN_REF 28
+#define SLAVE_NSSNOC_QOSGEN_REF 29
+#define MASTER_NSSNOC_TIMEOUT_REF 30
+#define SLAVE_NSSNOC_TIMEOUT_REF 31
+#define MASTER_NSSNOC_XO_DCD 32
+#define SLAVE_NSSNOC_XO_DCD 33
+#define MASTER_NSSNOC_ATB 34
+#define SLAVE_NSSNOC_ATB 35
+#define MASTER_MEM_NOC_NSSNOC 36
+#define SLAVE_MEM_NOC_NSSNOC 37
+#define MASTER_NSSNOC_MEMNOC 38
+#define SLAVE_NSSNOC_MEMNOC 39
+#define MASTER_NSSNOC_MEM_NOC_1 40
+#define SLAVE_NSSNOC_MEM_NOC_1 41
+
+#define MASTER_NSSNOC_PPE 0
+#define SLAVE_NSSNOC_PPE 1
+#define MASTER_NSSNOC_PPE_CFG 2
+#define SLAVE_NSSNOC_PPE_CFG 3
+#define MASTER_NSSNOC_NSS_CSR 4
+#define SLAVE_NSSNOC_NSS_CSR 5
+#define MASTER_NSSNOC_IMEM_QSB 6
+#define SLAVE_NSSNOC_IMEM_QSB 7
+#define MASTER_NSSNOC_IMEM_AHB 8
+#define SLAVE_NSSNOC_IMEM_AHB 9
+
+#endif /* INTERCONNECT_QCOM_IPQ9574_H */
diff --git a/include/dt-bindings/mfd/qcom-pm8008.h b/include/dt-bindings/mfd/qcom-pm8008.h
deleted file mode 100644
index eca9448df228..000000000000
--- a/include/dt-bindings/mfd/qcom-pm8008.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2021 The Linux Foundation. All rights reserved.
- */
-
-#ifndef __DT_BINDINGS_MFD_QCOM_PM8008_H
-#define __DT_BINDINGS_MFD_QCOM_PM8008_H
-
-/* PM8008 IRQ numbers */
-#define PM8008_IRQ_MISC_UVLO 0
-#define PM8008_IRQ_MISC_OVLO 1
-#define PM8008_IRQ_MISC_OTST2 2
-#define PM8008_IRQ_MISC_OTST3 3
-#define PM8008_IRQ_MISC_LDO_OCP 4
-#define PM8008_IRQ_TEMP_ALARM 5
-#define PM8008_IRQ_GPIO1 6
-#define PM8008_IRQ_GPIO2 7
-
-#endif
diff --git a/include/dt-bindings/mfd/st,stpmic1.h b/include/dt-bindings/mfd/st,stpmic1.h
index 321cd08797d9..9dd15b9c743e 100644
--- a/include/dt-bindings/mfd/st,stpmic1.h
+++ b/include/dt-bindings/mfd/st,stpmic1.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
/*
* Copyright (C) STMicroelectronics 2018 - All Rights Reserved
* Author: Philippe Peurichard <philippe.peurichard@st.com>,
diff --git a/include/dt-bindings/net/ti-dp83867.h b/include/dt-bindings/net/ti-dp83867.h
index 6fc4b445d3a1..b8a4f3ff4a3b 100644
--- a/include/dt-bindings/net/ti-dp83867.h
+++ b/include/dt-bindings/net/ti-dp83867.h
@@ -1,10 +1,10 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
/*
* Device Tree constants for the Texas Instruments DP83867 PHY
*
* Author: Dan Murphy <dmurphy@ti.com>
*
- * Copyright: (C) 2015 Texas Instruments, Inc.
+ * Copyright (C) 2015-2024 Texas Instruments Incorporated - https://www.ti.com/
*/
#ifndef _DT_BINDINGS_TI_DP83867_H
diff --git a/include/dt-bindings/net/ti-dp83869.h b/include/dt-bindings/net/ti-dp83869.h
index 218b1a64e975..917114aad7d0 100644
--- a/include/dt-bindings/net/ti-dp83869.h
+++ b/include/dt-bindings/net/ti-dp83869.h
@@ -1,10 +1,10 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
/*
* Device Tree constants for the Texas Instruments DP83869 PHY
*
* Author: Dan Murphy <dmurphy@ti.com>
*
- * Copyright: (C) 2019 Texas Instruments, Inc.
+ * Copyright (C) 2015-2024 Texas Instruments Incorporated - https://www.ti.com/
*/
#ifndef _DT_BINDINGS_TI_DP83869_H
diff --git a/include/dt-bindings/power/amlogic,a4-pwrc.h b/include/dt-bindings/power/amlogic,a4-pwrc.h
new file mode 100644
index 000000000000..bd2f9c558d22
--- /dev/null
+++ b/include/dt-bindings/power/amlogic,a4-pwrc.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */
+/*
+ * Copyright (C) 2024 Amlogic, Inc. All rights reserved
+ */
+#ifndef _DT_BINDINGS_AMLOGIC_A4_POWER_H
+#define _DT_BINDINGS_AMLOGIC_A4_POWER_H
+
+#define PWRC_A4_AUDIO_ID 0
+#define PWRC_A4_SDIOA_ID 1
+#define PWRC_A4_EMMC_ID 2
+#define PWRC_A4_USB_COMB_ID 3
+#define PWRC_A4_ETH_ID 4
+#define PWRC_A4_VOUT_ID 5
+#define PWRC_A4_AUDIO_PDM_ID 6
+#define PWRC_A4_DMC_ID 7
+#define PWRC_A4_SYS_WRAP_ID 8
+#define PWRC_A4_AO_I2C_S_ID 9
+#define PWRC_A4_AO_UART_ID 10
+#define PWRC_A4_AO_IR_ID 11
+
+#endif
diff --git a/include/dt-bindings/power/amlogic,a5-pwrc.h b/include/dt-bindings/power/amlogic,a5-pwrc.h
new file mode 100644
index 000000000000..3a6f53eb959f
--- /dev/null
+++ b/include/dt-bindings/power/amlogic,a5-pwrc.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */
+/*
+ * Copyright (C) 2024 Amlogic, Inc. All rights reserved
+ */
+
+#ifndef _DT_BINDINGS_AMLOGIC_A5_POWER_H
+#define _DT_BINDINGS_AMLOGIC_A5_POWER_H
+
+#define PWRC_A5_NNA_ID 0
+#define PWRC_A5_AUDIO_ID 1
+#define PWRC_A5_SDIOA_ID 2
+#define PWRC_A5_EMMC_ID 3
+#define PWRC_A5_USB_COMB_ID 4
+#define PWRC_A5_ETH_ID 5
+#define PWRC_A5_RSA_ID 6
+#define PWRC_A5_AUDIO_PDM_ID 7
+#define PWRC_A5_DMC_ID 8
+#define PWRC_A5_SYS_WRAP_ID 9
+#define PWRC_A5_DSPA_ID 10
+
+#endif
diff --git a/include/dt-bindings/regulator/st,stm32mp25-regulator.h b/include/dt-bindings/regulator/st,stm32mp25-regulator.h
new file mode 100644
index 000000000000..3c3d30911dd0
--- /dev/null
+++ b/include/dt-bindings/regulator/st,stm32mp25-regulator.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2024, STMicroelectronics - All Rights Reserved
+ */
+
+#ifndef __DT_BINDINGS_REGULATOR_ST_STM32MP25_REGULATOR_H
+#define __DT_BINDINGS_REGULATOR_ST_STM32MP25_REGULATOR_H
+
+/* SCMI voltage domains identifiers */
+
+/* SOC Internal regulators */
+#define VOLTD_SCMI_VDDIO1 0
+#define VOLTD_SCMI_VDDIO2 1
+#define VOLTD_SCMI_VDDIO3 2
+#define VOLTD_SCMI_VDDIO4 3
+#define VOLTD_SCMI_VDDIO 4
+#define VOLTD_SCMI_UCPD 5
+#define VOLTD_SCMI_USB33 6
+#define VOLTD_SCMI_ADC 7
+#define VOLTD_SCMI_GPU 8
+#define VOLTD_SCMI_VREFBUF 9
+
+/* STPMIC2 regulators */
+#define VOLTD_SCMI_STPMIC2_BUCK1 10
+#define VOLTD_SCMI_STPMIC2_BUCK2 11
+#define VOLTD_SCMI_STPMIC2_BUCK3 12
+#define VOLTD_SCMI_STPMIC2_BUCK4 13
+#define VOLTD_SCMI_STPMIC2_BUCK5 14
+#define VOLTD_SCMI_STPMIC2_BUCK6 15
+#define VOLTD_SCMI_STPMIC2_BUCK7 16
+#define VOLTD_SCMI_STPMIC2_LDO1 17
+#define VOLTD_SCMI_STPMIC2_LDO2 18
+#define VOLTD_SCMI_STPMIC2_LDO3 19
+#define VOLTD_SCMI_STPMIC2_LDO4 20
+#define VOLTD_SCMI_STPMIC2_LDO5 21
+#define VOLTD_SCMI_STPMIC2_LDO6 22
+#define VOLTD_SCMI_STPMIC2_LDO7 23
+#define VOLTD_SCMI_STPMIC2_LDO8 24
+#define VOLTD_SCMI_STPMIC2_REFDDR 25
+
+/* External regulators */
+#define VOLTD_SCMI_REGU0 26
+#define VOLTD_SCMI_REGU1 27
+#define VOLTD_SCMI_REGU2 28
+#define VOLTD_SCMI_REGU3 29
+#define VOLTD_SCMI_REGU4 30
+
+#endif /*__DT_BINDINGS_REGULATOR_ST_STM32MP25_REGULATOR_H */
diff --git a/include/dt-bindings/reset/sun50i-h616-ccu.h b/include/dt-bindings/reset/sun50i-h616-ccu.h
index 1bd8bb0a11be..ed177c04afdd 100644
--- a/include/dt-bindings/reset/sun50i-h616-ccu.h
+++ b/include/dt-bindings/reset/sun50i-h616-ccu.h
@@ -66,5 +66,6 @@
#define RST_BUS_TVE0 57
#define RST_BUS_HDCP 58
#define RST_BUS_KEYADC 59
+#define RST_BUS_GPADC 60
#endif /* _DT_BINDINGS_RESET_SUN50I_H616_H_ */
diff --git a/include/dt-bindings/thermal/mediatek,lvts-thermal.h b/include/dt-bindings/thermal/mediatek,lvts-thermal.h
index bf95309d2525..ddc7302a510a 100644
--- a/include/dt-bindings/thermal/mediatek,lvts-thermal.h
+++ b/include/dt-bindings/thermal/mediatek,lvts-thermal.h
@@ -24,7 +24,7 @@
#define MT8186_BIG_CPU1 5
#define MT8186_NNA 6
#define MT8186_ADSP 7
-#define MT8186_MFG 8
+#define MT8186_GPU 8
#define MT8188_MCU_LITTLE_CPU0 0
#define MT8188_MCU_LITTLE_CPU1 1
@@ -34,11 +34,11 @@
#define MT8188_MCU_BIG_CPU1 5
#define MT8188_AP_APU 0
-#define MT8188_AP_GPU1 1
-#define MT8188_AP_GPU2 2
-#define MT8188_AP_SOC1 3
-#define MT8188_AP_SOC2 4
-#define MT8188_AP_SOC3 5
+#define MT8188_AP_GPU0 1
+#define MT8188_AP_GPU1 2
+#define MT8188_AP_ADSP 3
+#define MT8188_AP_VDO 4
+#define MT8188_AP_INFRA 5
#define MT8188_AP_CAM1 6
#define MT8188_AP_CAM2 7
diff --git a/include/kunit/assert.h b/include/kunit/assert.h
index 24c2b9fa61e8..bb879389f11d 100644
--- a/include/kunit/assert.h
+++ b/include/kunit/assert.h
@@ -60,7 +60,7 @@ void kunit_assert_prologue(const struct kunit_loc *loc,
* struct kunit_fail_assert - Represents a plain fail expectation/assertion.
* @assert: The parent of this type.
*
- * Represents a simple KUNIT_FAIL/KUNIT_ASSERT_FAILURE that always fails.
+ * Represents a simple KUNIT_FAIL/KUNIT_FAIL_AND_ABORT that always fails.
*/
struct kunit_fail_assert {
struct kunit_assert assert;
@@ -218,4 +218,15 @@ void kunit_mem_assert_format(const struct kunit_assert *assert,
const struct va_format *message,
struct string_stream *stream);
+#if IS_ENABLED(CONFIG_KUNIT)
+void kunit_assert_print_msg(const struct va_format *message,
+ struct string_stream *stream);
+bool is_literal(const char *text, long long value);
+bool is_str_literal(const char *text, const char *value);
+void kunit_assert_hexdump(struct string_stream *stream,
+ const void *buf,
+ const void *compared_buf,
+ const size_t len);
+#endif
+
#endif /* _KUNIT_ASSERT_H */
diff --git a/include/kunit/test.h b/include/kunit/test.h
index e32b4cb7afa2..e2a1f0928e8b 100644
--- a/include/kunit/test.h
+++ b/include/kunit/test.h
@@ -480,6 +480,23 @@ static inline void *kunit_kcalloc(struct kunit *test, size_t n, size_t size, gfp
return kunit_kmalloc_array(test, n, size, gfp | __GFP_ZERO);
}
+/**
+ * kunit_vm_mmap() - Allocate KUnit-tracked vm_mmap() area
+ * @test: The test context object.
+ * @file: struct file pointer to map from, if any
+ * @addr: desired address, if any
+ * @len: how many bytes to allocate
+ * @prot: mmap PROT_* bits
+ * @flag: mmap flags
+ * @offset: offset into @file to start mapping from.
+ *
+ * See vm_mmap() for more information.
+ */
+unsigned long kunit_vm_mmap(struct kunit *test, struct file *file,
+ unsigned long addr, unsigned long len,
+ unsigned long prot, unsigned long flag,
+ unsigned long offset);
+
void kunit_cleanup(struct kunit *test);
void __printf(2, 3) kunit_log_append(struct string_stream *log, const char *fmt, ...);
@@ -1211,7 +1228,18 @@ do { \
fmt, \
##__VA_ARGS__)
-#define KUNIT_ASSERT_FAILURE(test, fmt, ...) \
+/**
+ * KUNIT_FAIL_AND_ABORT() - Always causes a test to fail and abort when evaluated.
+ * @test: The test context object.
+ * @fmt: an informational message to be printed when the assertion is made.
+ * @...: string format arguments.
+ *
+ * The opposite of KUNIT_SUCCEED(), it is an assertion that always fails. In
+ * other words, it always results in a failed assertion, and consequently
+ * always causes the test case to fail and abort when evaluated.
+ * See KUNIT_ASSERT_TRUE() for more information.
+ */
+#define KUNIT_FAIL_AND_ABORT(test, fmt, ...) \
KUNIT_FAIL_ASSERTION(test, KUNIT_ASSERTION, fmt, ##__VA_ARGS__)
/**
@@ -1438,12 +1466,12 @@ do { \
##__VA_ARGS__)
/**
- * KUNIT_ASSERT_STRNEQ() - Expects that strings @left and @right are not equal.
+ * KUNIT_ASSERT_STRNEQ() - An assertion that strings @left and @right are not equal.
* @test: The test context object.
* @left: an arbitrary expression that evaluates to a null terminated string.
* @right: an arbitrary expression that evaluates to a null terminated string.
*
- * Sets an expectation that the values that @left and @right evaluate to are
+ * Sets an assertion that the values that @left and @right evaluate to are
* not equal. This is semantically equivalent to
* KUNIT_ASSERT_TRUE(@test, strcmp((@left), (@right))). See KUNIT_ASSERT_TRUE()
* for more information.
@@ -1459,6 +1487,60 @@ do { \
##__VA_ARGS__)
/**
+ * KUNIT_ASSERT_MEMEQ() - Asserts that the first @size bytes of @left and @right are equal.
+ * @test: The test context object.
+ * @left: An arbitrary expression that evaluates to the specified size.
+ * @right: An arbitrary expression that evaluates to the specified size.
+ * @size: Number of bytes compared.
+ *
+ * Sets an assertion that the values that @left and @right evaluate to are
+ * equal. This is semantically equivalent to
+ * KUNIT_ASSERT_TRUE(@test, !memcmp((@left), (@right), (@size))). See
+ * KUNIT_ASSERT_TRUE() for more information.
+ *
+ * Although this assertion works for any memory block, it is not recommended
+ * for comparing more structured data, such as structs. This assertion is
+ * recommended for comparing, for example, data arrays.
+ */
+#define KUNIT_ASSERT_MEMEQ(test, left, right, size) \
+ KUNIT_ASSERT_MEMEQ_MSG(test, left, right, size, NULL)
+
+#define KUNIT_ASSERT_MEMEQ_MSG(test, left, right, size, fmt, ...) \
+ KUNIT_MEM_ASSERTION(test, \
+ KUNIT_ASSERTION, \
+ left, ==, right, \
+ size, \
+ fmt, \
+ ##__VA_ARGS__)
+
+/**
+ * KUNIT_ASSERT_MEMNEQ() - Asserts that the first @size bytes of @left and @right are not equal.
+ * @test: The test context object.
+ * @left: An arbitrary expression that evaluates to the specified size.
+ * @right: An arbitrary expression that evaluates to the specified size.
+ * @size: Number of bytes compared.
+ *
+ * Sets an assertion that the values that @left and @right evaluate to are
+ * not equal. This is semantically equivalent to
+ * KUNIT_ASSERT_TRUE(@test, memcmp((@left), (@right), (@size))). See
+ * KUNIT_ASSERT_TRUE() for more information.
+ *
+ * Although this assertion works for any memory block, it is not recommended
+ * for comparing more structured data, such as structs. This assertion is
+ * recommended for comparing, for example, data arrays.
+ */
+#define KUNIT_ASSERT_MEMNEQ(test, left, right, size) \
+ KUNIT_ASSERT_MEMNEQ_MSG(test, left, right, size, NULL)
+
+#define KUNIT_ASSERT_MEMNEQ_MSG(test, left, right, size, fmt, ...) \
+ KUNIT_MEM_ASSERTION(test, \
+ KUNIT_ASSERTION, \
+ left, !=, right, \
+ size, \
+ fmt, \
+ ##__VA_ARGS__)
+
+/**
* KUNIT_ASSERT_NULL() - Asserts that pointers @ptr is null.
* @test: The test context object.
* @ptr: an arbitrary pointer.
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 28c3fb2bef0d..170f5f8b0563 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -24,6 +24,7 @@ struct irq_domain_ops;
#define _LINUX
#endif
#include <acpi/acpi.h>
+#include <acpi/acpi_numa.h>
#ifdef CONFIG_ACPI
@@ -35,7 +36,6 @@ struct irq_domain_ops;
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
-#include <acpi/acpi_numa.h>
#include <acpi/acpi_io.h>
#include <asm/acpi.h>
@@ -237,11 +237,6 @@ acpi_table_parse_cedt(enum acpi_cedt_type id,
int acpi_parse_mcfg (struct acpi_table_header *header);
void acpi_table_print_madt_entry (struct acpi_subtable_header *madt);
-static inline bool acpi_gicc_is_usable(struct acpi_madt_generic_interrupt *gicc)
-{
- return gicc->flags & ACPI_MADT_ENABLED;
-}
-
#if defined(CONFIG_X86) || defined(CONFIG_LOONGARCH)
void acpi_numa_processor_affinity_init (struct acpi_srat_cpu_affinity *pa);
#else
@@ -304,6 +299,8 @@ int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, u32 acpi_id,
int acpi_unmap_cpu(int cpu);
#endif /* CONFIG_ACPI_HOTPLUG_CPU */
+acpi_handle acpi_get_processor_handle(int cpu);
+
#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
int acpi_get_ioapic_id(acpi_handle handle, u32 gsi_base, u64 *phys_addr);
#endif
@@ -576,6 +573,7 @@ acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context);
#define OSC_SB_CPC_FLEXIBLE_ADR_SPACE 0x00004000
#define OSC_SB_GENERIC_INITIATOR_SUPPORT 0x00020000
#define OSC_SB_NATIVE_USB4_SUPPORT 0x00040000
+#define OSC_SB_BATTERY_CHARGE_LIMITING_SUPPORT 0x00080000
#define OSC_SB_PRM_SUPPORT 0x00200000
#define OSC_SB_FFH_OPR_SUPPORT 0x00400000
@@ -777,8 +775,6 @@ const char *acpi_get_subsystem_id(acpi_handle handle);
#define acpi_dev_uid_match(adev, uid2) (adev && false)
#define acpi_dev_hid_uid_match(adev, hid2, uid2) (adev && false)
-#include <acpi/acpi_numa.h>
-
struct fwnode_handle;
static inline bool acpi_dev_found(const char *hid)
@@ -1076,6 +1072,11 @@ static inline bool acpi_sleep_state_supported(u8 sleep_state)
return false;
}
+static inline acpi_handle acpi_get_processor_handle(int cpu)
+{
+ return NULL;
+}
+
#endif /* !CONFIG_ACPI */
extern void arch_post_acpi_subsys_init(void);
diff --git a/include/linux/atomic/atomic-arch-fallback.h b/include/linux/atomic/atomic-arch-fallback.h
index 956bcba5dbf2..2f9d36b72bd8 100644
--- a/include/linux/atomic/atomic-arch-fallback.h
+++ b/include/linux/atomic/atomic-arch-fallback.h
@@ -2242,7 +2242,7 @@ raw_atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
/**
* raw_atomic_sub_and_test() - atomic subtract and test if zero with full ordering
- * @i: int value to add
+ * @i: int value to subtract
* @v: pointer to atomic_t
*
* Atomically updates @v to (@v - @i) with full ordering.
@@ -4368,7 +4368,7 @@ raw_atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
/**
* raw_atomic64_sub_and_test() - atomic subtract and test if zero with full ordering
- * @i: s64 value to add
+ * @i: s64 value to subtract
* @v: pointer to atomic64_t
*
* Atomically updates @v to (@v - @i) with full ordering.
@@ -4690,4 +4690,4 @@ raw_atomic64_dec_if_positive(atomic64_t *v)
}
#endif /* _LINUX_ATOMIC_FALLBACK_H */
-// 14850c0b0db20c62fdc78ccd1d42b98b88d76331
+// b565db590afeeff0d7c9485ccbca5bb6e155749f
diff --git a/include/linux/atomic/atomic-instrumented.h b/include/linux/atomic/atomic-instrumented.h
index debd487fe971..9409a6ddf3e0 100644
--- a/include/linux/atomic/atomic-instrumented.h
+++ b/include/linux/atomic/atomic-instrumented.h
@@ -1349,7 +1349,7 @@ atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
/**
* atomic_sub_and_test() - atomic subtract and test if zero with full ordering
- * @i: int value to add
+ * @i: int value to subtract
* @v: pointer to atomic_t
*
* Atomically updates @v to (@v - @i) with full ordering.
@@ -2927,7 +2927,7 @@ atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
/**
* atomic64_sub_and_test() - atomic subtract and test if zero with full ordering
- * @i: s64 value to add
+ * @i: s64 value to subtract
* @v: pointer to atomic64_t
*
* Atomically updates @v to (@v - @i) with full ordering.
@@ -4505,7 +4505,7 @@ atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new)
/**
* atomic_long_sub_and_test() - atomic subtract and test if zero with full ordering
- * @i: long value to add
+ * @i: long value to subtract
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v - @i) with full ordering.
@@ -5050,4 +5050,4 @@ atomic_long_dec_if_positive(atomic_long_t *v)
#endif /* _LINUX_ATOMIC_INSTRUMENTED_H */
-// ce5b65e0f1f8a276268b667194581d24bed219d4
+// 8829b337928e9508259079d32581775ececd415b
diff --git a/include/linux/atomic/atomic-long.h b/include/linux/atomic/atomic-long.h
index 3ef844b3ab8a..f86b29d90877 100644
--- a/include/linux/atomic/atomic-long.h
+++ b/include/linux/atomic/atomic-long.h
@@ -1535,7 +1535,7 @@ raw_atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new)
/**
* raw_atomic_long_sub_and_test() - atomic subtract and test if zero with full ordering
- * @i: long value to add
+ * @i: long value to subtract
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v - @i) with full ordering.
@@ -1809,4 +1809,4 @@ raw_atomic_long_dec_if_positive(atomic_long_t *v)
}
#endif /* _LINUX_ATOMIC_LONG_H */
-// 1c4a26fc77f345342953770ebe3c4d08e7ce2f9a
+// eadf183c3600b8b92b91839dd3be6bcc560c752d
diff --git a/include/linux/auxiliary_bus.h b/include/linux/auxiliary_bus.h
index de21d9d24a95..3ba4487c9cd9 100644
--- a/include/linux/auxiliary_bus.h
+++ b/include/linux/auxiliary_bus.h
@@ -58,6 +58,9 @@
* in
* @name: Match name found by the auxiliary device driver,
* @id: unique identitier if multiple devices of the same name are exported,
+ * @irqs: irqs xarray contains irq indices which are used by the device,
+ * @lock: Synchronize irq sysfs creation,
+ * @irq_dir_exists: whether "irqs" directory exists,
*
* An auxiliary_device represents a part of its parent device's functionality.
* It is given a name that, combined with the registering drivers
@@ -139,6 +142,11 @@ struct auxiliary_device {
struct device dev;
const char *name;
u32 id;
+ struct {
+ struct xarray irqs;
+ struct mutex lock; /* Synchronize irq sysfs creation */
+ bool irq_dir_exists;
+ } sysfs;
};
/**
@@ -212,8 +220,24 @@ int auxiliary_device_init(struct auxiliary_device *auxdev);
int __auxiliary_device_add(struct auxiliary_device *auxdev, const char *modname);
#define auxiliary_device_add(auxdev) __auxiliary_device_add(auxdev, KBUILD_MODNAME)
+#ifdef CONFIG_SYSFS
+int auxiliary_device_sysfs_irq_add(struct auxiliary_device *auxdev, int irq);
+void auxiliary_device_sysfs_irq_remove(struct auxiliary_device *auxdev,
+ int irq);
+#else /* CONFIG_SYSFS */
+static inline int
+auxiliary_device_sysfs_irq_add(struct auxiliary_device *auxdev, int irq)
+{
+ return 0;
+}
+
+static inline void
+auxiliary_device_sysfs_irq_remove(struct auxiliary_device *auxdev, int irq) {}
+#endif
+
static inline void auxiliary_device_uninit(struct auxiliary_device *auxdev)
{
+ mutex_destroy(&auxdev->sysfs.lock);
put_device(&auxdev->dev);
}
diff --git a/include/linux/backlight.h b/include/linux/backlight.h
index 19a1c0e22629..ea9c1bc8148e 100644
--- a/include/linux/backlight.h
+++ b/include/linux/backlight.h
@@ -209,15 +209,19 @@ struct backlight_properties {
* attribute: /sys/class/backlight/<backlight>/bl_power
* When the power property is updated update_status() is called.
*
- * The possible values are: (0: full on, 1 to 3: power saving
- * modes; 4: full off), see FB_BLANK_XXX.
+ * The possible values are: (0: full on, 4: full off), see
+ * BACKLIGHT_POWER constants.
*
- * When the backlight device is enabled @power is set
- * to FB_BLANK_UNBLANK. When the backlight device is disabled
- * @power is set to FB_BLANK_POWERDOWN.
+ * When the backlight device is enabled, @power is set to
+ * BACKLIGHT_POWER_ON. When the backlight device is disabled,
+ * @power is set to BACKLIGHT_POWER_OFF.
*/
int power;
+#define BACKLIGHT_POWER_ON (0)
+#define BACKLIGHT_POWER_OFF (4)
+#define BACKLIGHT_POWER_REDUCED (1) // deprecated; don't use in new code
+
/**
* @type: The type of backlight supported.
*
@@ -346,7 +350,7 @@ static inline int backlight_enable(struct backlight_device *bd)
if (!bd)
return 0;
- bd->props.power = FB_BLANK_UNBLANK;
+ bd->props.power = BACKLIGHT_POWER_ON;
bd->props.state &= ~BL_CORE_FBBLANK;
return backlight_update_status(bd);
@@ -361,7 +365,7 @@ static inline int backlight_disable(struct backlight_device *bd)
if (!bd)
return 0;
- bd->props.power = FB_BLANK_POWERDOWN;
+ bd->props.power = BACKLIGHT_POWER_OFF;
bd->props.state |= BL_CORE_FBBLANK;
return backlight_update_status(bd);
@@ -380,7 +384,7 @@ static inline int backlight_disable(struct backlight_device *bd)
*/
static inline bool backlight_is_blank(const struct backlight_device *bd)
{
- return bd->props.power != FB_BLANK_UNBLANK ||
+ return bd->props.power != BACKLIGHT_POWER_ON ||
bd->props.state & (BL_CORE_SUSPENDED | BL_CORE_FBBLANK);
}
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
index 70f97f685bff..e6c00e860951 100644
--- a/include/linux/binfmts.h
+++ b/include/linux/binfmts.h
@@ -19,13 +19,13 @@ struct linux_binprm {
#ifdef CONFIG_MMU
struct vm_area_struct *vma;
unsigned long vma_pages;
+ unsigned long argmin; /* rlimit marker for copy_strings() */
#else
# define MAX_ARG_PAGES 32
struct page *page[MAX_ARG_PAGES];
#endif
struct mm_struct *mm;
unsigned long p; /* current top of mem */
- unsigned long argmin; /* rlimit marker for copy_strings() */
unsigned int
/* Should an execfd be passed to userspace? */
have_execfd:1,
diff --git a/include/linux/bio.h b/include/linux/bio.h
index d5379548d684..818e93612947 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -731,6 +731,7 @@ static inline bool bioset_initialized(struct bio_set *bs)
bip_for_each_vec(_bvl, _bio->bi_integrity, _iter)
int bio_integrity_map_user(struct bio *bio, void __user *ubuf, ssize_t len, u32 seed);
+void bio_integrity_unmap_free_user(struct bio *bio);
extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int);
extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int);
extern bool bio_integrity_prep(struct bio *);
@@ -807,6 +808,9 @@ static inline int bio_integrity_map_user(struct bio *bio, void __user *ubuf,
{
return -EINVAL;
}
+static inline void bio_integrity_unmap_free_user(struct bio *bio)
+{
+}
#endif /* CONFIG_BLK_DEV_INTEGRITY */
diff --git a/include/linux/blk-integrity.h b/include/linux/blk-integrity.h
index e253e7bd0d17..804f856ed3e5 100644
--- a/include/linux/blk-integrity.h
+++ b/include/linux/blk-integrity.h
@@ -7,51 +7,38 @@
struct request;
enum blk_integrity_flags {
- BLK_INTEGRITY_VERIFY = 1 << 0,
- BLK_INTEGRITY_GENERATE = 1 << 1,
+ BLK_INTEGRITY_NOVERIFY = 1 << 0,
+ BLK_INTEGRITY_NOGENERATE = 1 << 1,
BLK_INTEGRITY_DEVICE_CAPABLE = 1 << 2,
- BLK_INTEGRITY_IP_CHECKSUM = 1 << 3,
+ BLK_INTEGRITY_REF_TAG = 1 << 3,
+ BLK_INTEGRITY_STACKED = 1 << 4,
};
-struct blk_integrity_iter {
- void *prot_buf;
- void *data_buf;
- sector_t seed;
- unsigned int data_size;
- unsigned short interval;
- unsigned char tuple_size;
- unsigned char pi_offset;
- const char *disk_name;
-};
-
-typedef blk_status_t (integrity_processing_fn) (struct blk_integrity_iter *);
-typedef void (integrity_prepare_fn) (struct request *);
-typedef void (integrity_complete_fn) (struct request *, unsigned int);
-
-struct blk_integrity_profile {
- integrity_processing_fn *generate_fn;
- integrity_processing_fn *verify_fn;
- integrity_prepare_fn *prepare_fn;
- integrity_complete_fn *complete_fn;
- const char *name;
-};
+const char *blk_integrity_profile_name(struct blk_integrity *bi);
+bool queue_limits_stack_integrity(struct queue_limits *t,
+ struct queue_limits *b);
+static inline bool queue_limits_stack_integrity_bdev(struct queue_limits *t,
+ struct block_device *bdev)
+{
+ return queue_limits_stack_integrity(t, &bdev->bd_disk->queue->limits);
+}
#ifdef CONFIG_BLK_DEV_INTEGRITY
-void blk_integrity_register(struct gendisk *, struct blk_integrity *);
-void blk_integrity_unregister(struct gendisk *);
-int blk_integrity_compare(struct gendisk *, struct gendisk *);
int blk_rq_map_integrity_sg(struct request_queue *, struct bio *,
struct scatterlist *);
int blk_rq_count_integrity_sg(struct request_queue *, struct bio *);
-static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
+static inline bool
+blk_integrity_queue_supports_integrity(struct request_queue *q)
{
- struct blk_integrity *bi = &disk->queue->integrity;
+ return q->limits.integrity.tuple_size;
+}
- if (!bi->profile)
+static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
+{
+ if (!blk_integrity_queue_supports_integrity(disk->queue))
return NULL;
-
- return bi;
+ return &disk->queue->limits.integrity;
}
static inline struct blk_integrity *
@@ -60,18 +47,6 @@ bdev_get_integrity(struct block_device *bdev)
return blk_get_integrity(bdev->bd_disk);
}
-static inline bool
-blk_integrity_queue_supports_integrity(struct request_queue *q)
-{
- return q->integrity.profile;
-}
-
-static inline void blk_queue_max_integrity_segments(struct request_queue *q,
- unsigned int segs)
-{
- q->limits.max_integrity_segments = segs;
-}
-
static inline unsigned short
queue_max_integrity_segments(const struct request_queue *q)
{
@@ -106,14 +81,13 @@ static inline bool blk_integrity_rq(struct request *rq)
}
/*
- * Return the first bvec that contains integrity data. Only drivers that are
- * limited to a single integrity segment should use this helper.
+ * Return the current bvec that contains the integrity data. bip_iter may be
+ * advanced to iterate over the integrity data.
*/
-static inline struct bio_vec *rq_integrity_vec(struct request *rq)
+static inline struct bio_vec rq_integrity_vec(struct request *rq)
{
- if (WARN_ON_ONCE(queue_max_integrity_segments(rq->q) > 1))
- return NULL;
- return rq->bio->bi_integrity->bip_vec;
+ return mp_bvec_iter_bvec(rq->bio->bi_integrity->bip_vec,
+ rq->bio->bi_integrity->bip_iter);
}
#else /* CONFIG_BLK_DEV_INTEGRITY */
static inline int blk_rq_count_integrity_sg(struct request_queue *q,
@@ -140,21 +114,6 @@ blk_integrity_queue_supports_integrity(struct request_queue *q)
{
return false;
}
-static inline int blk_integrity_compare(struct gendisk *a, struct gendisk *b)
-{
- return 0;
-}
-static inline void blk_integrity_register(struct gendisk *d,
- struct blk_integrity *b)
-{
-}
-static inline void blk_integrity_unregister(struct gendisk *d)
-{
-}
-static inline void blk_queue_max_integrity_segments(struct request_queue *q,
- unsigned int segs)
-{
-}
static inline unsigned short
queue_max_integrity_segments(const struct request_queue *q)
{
@@ -177,9 +136,11 @@ static inline int blk_integrity_rq(struct request *rq)
return 0;
}
-static inline struct bio_vec *rq_integrity_vec(struct request *rq)
+static inline struct bio_vec rq_integrity_vec(struct request *rq)
{
- return NULL;
+ /* the optimizer will remove all calls to this function */
+ return (struct bio_vec){ };
}
#endif /* CONFIG_BLK_DEV_INTEGRITY */
+
#endif /* _LINUX_BLK_INTEGRITY_H */
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 781c4500491b..632edd71f8c6 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -162,6 +162,11 @@ typedef u16 blk_short_t;
*/
#define BLK_STS_DURATION_LIMIT ((__force blk_status_t)17)
+/*
+ * Invalid size or alignment.
+ */
+#define BLK_STS_INVAL ((__force blk_status_t)19)
+
/**
* blk_path_error - returns true if error may be path related
* @error: status the request was completed with
@@ -370,7 +375,7 @@ enum req_flag_bits {
__REQ_SWAP, /* swap I/O */
__REQ_DRV, /* for driver use */
__REQ_FS_PRIVATE, /* for file system (submitter) use */
-
+ __REQ_ATOMIC, /* for atomic write operations */
/*
* Command specific flags, keep last:
*/
@@ -402,6 +407,7 @@ enum req_flag_bits {
#define REQ_SWAP (__force blk_opf_t)(1ULL << __REQ_SWAP)
#define REQ_DRV (__force blk_opf_t)(1ULL << __REQ_DRV)
#define REQ_FS_PRIVATE (__force blk_opf_t)(1ULL << __REQ_FS_PRIVATE)
+#define REQ_ATOMIC (__force blk_opf_t)(1ULL << __REQ_ATOMIC)
#define REQ_NOUNMAP (__force blk_opf_t)(1ULL << __REQ_NOUNMAP)
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index aefdda9f4ec7..b8196e219ac2 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -105,9 +105,16 @@ enum {
struct disk_events;
struct badblocks;
+enum blk_integrity_checksum {
+ BLK_INTEGRITY_CSUM_NONE = 0,
+ BLK_INTEGRITY_CSUM_IP = 1,
+ BLK_INTEGRITY_CSUM_CRC = 2,
+ BLK_INTEGRITY_CSUM_CRC64 = 3,
+} __packed ;
+
struct blk_integrity {
- const struct blk_integrity_profile *profile;
unsigned char flags;
+ enum blk_integrity_checksum csum_type;
unsigned char tuple_size;
unsigned char pi_offset;
unsigned char interval_exp;
@@ -186,6 +193,7 @@ struct gendisk {
*/
unsigned int nr_zones;
unsigned int zone_capacity;
+ unsigned int last_zone_capacity;
unsigned long *conv_zones_bitmap;
unsigned int zone_wplugs_hash_bits;
spinlock_t zone_wplugs_lock;
@@ -260,6 +268,7 @@ static inline dev_t disk_devt(struct gendisk *disk)
return MKDEV(disk->major, disk->first_minor);
}
+/* blk_validate_limits() validates bsize, so drivers don't usually need to */
static inline int blk_validate_block_size(unsigned long bsize)
{
if (bsize < 512 || bsize > PAGE_SIZE || !is_power_of_2(bsize))
@@ -274,17 +283,75 @@ static inline bool blk_op_is_passthrough(blk_opf_t op)
return op == REQ_OP_DRV_IN || op == REQ_OP_DRV_OUT;
}
+/* flags set by the driver in queue_limits.features */
+typedef unsigned int __bitwise blk_features_t;
+
+/* supports a volatile write cache */
+#define BLK_FEAT_WRITE_CACHE ((__force blk_features_t)(1u << 0))
+
+/* supports passing on the FUA bit */
+#define BLK_FEAT_FUA ((__force blk_features_t)(1u << 1))
+
+/* rotational device (hard drive or floppy) */
+#define BLK_FEAT_ROTATIONAL ((__force blk_features_t)(1u << 2))
+
+/* contributes to the random number pool */
+#define BLK_FEAT_ADD_RANDOM ((__force blk_features_t)(1u << 3))
+
+/* do disk/partitions IO accounting */
+#define BLK_FEAT_IO_STAT ((__force blk_features_t)(1u << 4))
+
+/* don't modify data until writeback is done */
+#define BLK_FEAT_STABLE_WRITES ((__force blk_features_t)(1u << 5))
+
+/* always completes in submit context */
+#define BLK_FEAT_SYNCHRONOUS ((__force blk_features_t)(1u << 6))
+
+/* supports REQ_NOWAIT */
+#define BLK_FEAT_NOWAIT ((__force blk_features_t)(1u << 7))
+
+/* supports DAX */
+#define BLK_FEAT_DAX ((__force blk_features_t)(1u << 8))
+
+/* supports I/O polling */
+#define BLK_FEAT_POLL ((__force blk_features_t)(1u << 9))
+
+/* is a zoned device */
+#define BLK_FEAT_ZONED ((__force blk_features_t)(1u << 10))
+
+/* supports PCI(e) p2p requests */
+#define BLK_FEAT_PCI_P2PDMA ((__force blk_features_t)(1u << 12))
+
+/* skip this queue in blk_mq_(un)quiesce_tagset */
+#define BLK_FEAT_SKIP_TAGSET_QUIESCE ((__force blk_features_t)(1u << 13))
+
+/* bounce all highmem pages */
+#define BLK_FEAT_BOUNCE_HIGH ((__force blk_features_t)(1u << 14))
+
+/* undocumented magic for bcache */
+#define BLK_FEAT_RAID_PARTIAL_STRIPES_EXPENSIVE \
+ ((__force blk_features_t)(1u << 15))
+
/*
- * BLK_BOUNCE_NONE: never bounce (default)
- * BLK_BOUNCE_HIGH: bounce all highmem pages
+ * Flags automatically inherited when stacking limits.
*/
-enum blk_bounce {
- BLK_BOUNCE_NONE,
- BLK_BOUNCE_HIGH,
-};
+#define BLK_FEAT_INHERIT_MASK \
+ (BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA | BLK_FEAT_ROTATIONAL | \
+ BLK_FEAT_STABLE_WRITES | BLK_FEAT_ZONED | BLK_FEAT_BOUNCE_HIGH | \
+ BLK_FEAT_RAID_PARTIAL_STRIPES_EXPENSIVE)
+
+/* internal flags in queue_limits.flags */
+typedef unsigned int __bitwise blk_flags_t;
+
+/* do not send FLUSH/FUA commands despite advertising a write cache */
+#define BLK_FLAG_WRITE_CACHE_DISABLED ((__force blk_flags_t)(1u << 0))
+
+/* I/O topology is misaligned */
+#define BLK_FLAG_MISALIGNED ((__force blk_flags_t)(1u << 1))
struct queue_limits {
- enum blk_bounce bounce;
+ blk_features_t features;
+ blk_flags_t flags;
unsigned long seg_boundary_mask;
unsigned long virt_boundary_mask;
@@ -309,14 +376,20 @@ struct queue_limits {
unsigned int discard_alignment;
unsigned int zone_write_granularity;
+ /* atomic write limits */
+ unsigned int atomic_write_hw_max;
+ unsigned int atomic_write_max_sectors;
+ unsigned int atomic_write_hw_boundary;
+ unsigned int atomic_write_boundary_sectors;
+ unsigned int atomic_write_hw_unit_min;
+ unsigned int atomic_write_unit_min;
+ unsigned int atomic_write_hw_unit_max;
+ unsigned int atomic_write_unit_max;
+
unsigned short max_segments;
unsigned short max_integrity_segments;
unsigned short max_discard_segments;
- unsigned char misaligned;
- unsigned char discard_misaligned;
- unsigned char raid_partial_stripes_expensive;
- bool zoned;
unsigned int max_open_zones;
unsigned int max_active_zones;
@@ -326,13 +399,14 @@ struct queue_limits {
* due to possible offsets.
*/
unsigned int dma_alignment;
+ unsigned int dma_pad_mask;
+
+ struct blk_integrity integrity;
};
typedef int (*report_zones_cb)(struct blk_zone *zone, unsigned int idx,
void *data);
-void disk_set_zoned(struct gendisk *disk);
-
#define BLK_ALL_ZONES ((unsigned int)-1)
int blkdev_report_zones(struct block_device *bdev, sector_t sector,
unsigned int nr_zones, report_zones_cb cb, void *data);
@@ -413,10 +487,6 @@ struct request_queue {
struct queue_limits limits;
-#ifdef CONFIG_BLK_DEV_INTEGRITY
- struct blk_integrity integrity;
-#endif /* CONFIG_BLK_DEV_INTEGRITY */
-
#ifdef CONFIG_PM
struct device *dev;
enum rpm_status rpm_status;
@@ -438,8 +508,6 @@ struct request_queue {
*/
int id;
- unsigned int dma_pad_mask;
-
/*
* queue settings
*/
@@ -525,38 +593,20 @@ struct request_queue {
#define QUEUE_FLAG_NOMERGES 3 /* disable merge attempts */
#define QUEUE_FLAG_SAME_COMP 4 /* complete on same CPU-group */
#define QUEUE_FLAG_FAIL_IO 5 /* fake timeout */
-#define QUEUE_FLAG_NONROT 6 /* non-rotational device (SSD) */
-#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */
-#define QUEUE_FLAG_IO_STAT 7 /* do disk/partitions IO accounting */
#define QUEUE_FLAG_NOXMERGES 9 /* No extended merges */
-#define QUEUE_FLAG_ADD_RANDOM 10 /* Contributes to random pool */
-#define QUEUE_FLAG_SYNCHRONOUS 11 /* always completes in submit context */
#define QUEUE_FLAG_SAME_FORCE 12 /* force complete on same CPU */
-#define QUEUE_FLAG_HW_WC 13 /* Write back caching supported */
#define QUEUE_FLAG_INIT_DONE 14 /* queue is initialized */
-#define QUEUE_FLAG_STABLE_WRITES 15 /* don't modify blks until WB is done */
-#define QUEUE_FLAG_POLL 16 /* IO polling enabled if set */
-#define QUEUE_FLAG_WC 17 /* Write back caching */
-#define QUEUE_FLAG_FUA 18 /* device supports FUA writes */
-#define QUEUE_FLAG_DAX 19 /* device supports DAX */
#define QUEUE_FLAG_STATS 20 /* track IO start and completion times */
#define QUEUE_FLAG_REGISTERED 22 /* queue has been registered to a disk */
#define QUEUE_FLAG_QUIESCED 24 /* queue has been quiesced */
-#define QUEUE_FLAG_PCI_P2PDMA 25 /* device supports PCI p2p requests */
-#define QUEUE_FLAG_ZONE_RESETALL 26 /* supports Zone Reset All */
#define QUEUE_FLAG_RQ_ALLOC_TIME 27 /* record rq->alloc_time_ns */
#define QUEUE_FLAG_HCTX_ACTIVE 28 /* at least one blk-mq hctx is active */
-#define QUEUE_FLAG_NOWAIT 29 /* device supports NOWAIT */
#define QUEUE_FLAG_SQ_SCHED 30 /* single queue style io dispatch */
-#define QUEUE_FLAG_SKIP_TAGSET_QUIESCE 31 /* quiesce_tagset skip the queue*/
-#define QUEUE_FLAG_MQ_DEFAULT ((1UL << QUEUE_FLAG_IO_STAT) | \
- (1UL << QUEUE_FLAG_SAME_COMP) | \
- (1UL << QUEUE_FLAG_NOWAIT))
+#define QUEUE_FLAG_MQ_DEFAULT (1UL << QUEUE_FLAG_SAME_COMP)
void blk_queue_flag_set(unsigned int flag, struct request_queue *q);
void blk_queue_flag_clear(unsigned int flag, struct request_queue *q);
-bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q);
#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
#define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags)
@@ -564,16 +614,10 @@ bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q);
#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
#define blk_queue_noxmerges(q) \
test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
-#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags)
-#define blk_queue_stable_writes(q) \
- test_bit(QUEUE_FLAG_STABLE_WRITES, &(q)->queue_flags)
-#define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags)
-#define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags)
-#define blk_queue_zone_resetall(q) \
- test_bit(QUEUE_FLAG_ZONE_RESETALL, &(q)->queue_flags)
-#define blk_queue_dax(q) test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags)
-#define blk_queue_pci_p2pdma(q) \
- test_bit(QUEUE_FLAG_PCI_P2PDMA, &(q)->queue_flags)
+#define blk_queue_nonrot(q) (!((q)->limits.features & BLK_FEAT_ROTATIONAL))
+#define blk_queue_io_stat(q) ((q)->limits.features & BLK_FEAT_IO_STAT)
+#define blk_queue_dax(q) ((q)->limits.features & BLK_FEAT_DAX)
+#define blk_queue_pci_p2pdma(q) ((q)->limits.features & BLK_FEAT_PCI_P2PDMA)
#ifdef CONFIG_BLK_RQ_ALLOC_TIME
#define blk_queue_rq_alloc_time(q) \
test_bit(QUEUE_FLAG_RQ_ALLOC_TIME, &(q)->queue_flags)
@@ -589,7 +633,7 @@ bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q);
#define blk_queue_registered(q) test_bit(QUEUE_FLAG_REGISTERED, &(q)->queue_flags)
#define blk_queue_sq_sched(q) test_bit(QUEUE_FLAG_SQ_SCHED, &(q)->queue_flags)
#define blk_queue_skip_tagset_quiesce(q) \
- test_bit(QUEUE_FLAG_SKIP_TAGSET_QUIESCE, &(q)->queue_flags)
+ ((q)->limits.features & BLK_FEAT_SKIP_TAGSET_QUIESCE)
extern void blk_set_pm_only(struct request_queue *q);
extern void blk_clear_pm_only(struct request_queue *q);
@@ -619,16 +663,26 @@ static inline enum rpm_status queue_rpm_status(struct request_queue *q)
static inline bool blk_queue_is_zoned(struct request_queue *q)
{
- return IS_ENABLED(CONFIG_BLK_DEV_ZONED) && q->limits.zoned;
+ return IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
+ (q->limits.features & BLK_FEAT_ZONED);
}
#ifdef CONFIG_BLK_DEV_ZONED
-unsigned int bdev_nr_zones(struct block_device *bdev);
-
static inline unsigned int disk_nr_zones(struct gendisk *disk)
{
- return blk_queue_is_zoned(disk->queue) ? disk->nr_zones : 0;
+ return disk->nr_zones;
+}
+bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs);
+#else /* CONFIG_BLK_DEV_ZONED */
+static inline unsigned int disk_nr_zones(struct gendisk *disk)
+{
+ return 0;
}
+static inline bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs)
+{
+ return false;
+}
+#endif /* CONFIG_BLK_DEV_ZONED */
static inline unsigned int disk_zone_no(struct gendisk *disk, sector_t sector)
{
@@ -637,16 +691,9 @@ static inline unsigned int disk_zone_no(struct gendisk *disk, sector_t sector)
return sector >> ilog2(disk->queue->limits.chunk_sectors);
}
-static inline void disk_set_max_open_zones(struct gendisk *disk,
- unsigned int max_open_zones)
-{
- disk->queue->limits.max_open_zones = max_open_zones;
-}
-
-static inline void disk_set_max_active_zones(struct gendisk *disk,
- unsigned int max_active_zones)
+static inline unsigned int bdev_nr_zones(struct block_device *bdev)
{
- disk->queue->limits.max_active_zones = max_active_zones;
+ return disk_nr_zones(bdev->bd_disk);
}
static inline unsigned int bdev_max_open_zones(struct block_device *bdev)
@@ -659,36 +706,6 @@ static inline unsigned int bdev_max_active_zones(struct block_device *bdev)
return bdev->bd_disk->queue->limits.max_active_zones;
}
-bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs);
-#else /* CONFIG_BLK_DEV_ZONED */
-static inline unsigned int bdev_nr_zones(struct block_device *bdev)
-{
- return 0;
-}
-
-static inline unsigned int disk_nr_zones(struct gendisk *disk)
-{
- return 0;
-}
-static inline unsigned int disk_zone_no(struct gendisk *disk, sector_t sector)
-{
- return 0;
-}
-static inline unsigned int bdev_max_open_zones(struct block_device *bdev)
-{
- return 0;
-}
-
-static inline unsigned int bdev_max_active_zones(struct block_device *bdev)
-{
- return 0;
-}
-static inline bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs)
-{
- return false;
-}
-#endif /* CONFIG_BLK_DEV_ZONED */
-
static inline unsigned int blk_queue_depth(struct request_queue *q)
{
if (q->queue_depth)
@@ -879,14 +896,15 @@ static inline bool bio_straddles_zones(struct bio *bio)
}
/*
- * Return how much of the chunk is left to be used for I/O at a given offset.
+ * Return how much within the boundary is left to be used for I/O at a given
+ * offset.
*/
-static inline unsigned int blk_chunk_sectors_left(sector_t offset,
- unsigned int chunk_sectors)
+static inline unsigned int blk_boundary_sectors_left(sector_t offset,
+ unsigned int boundary_sectors)
{
- if (unlikely(!is_power_of_2(chunk_sectors)))
- return chunk_sectors - sector_div(offset, chunk_sectors);
- return chunk_sectors - (offset & (chunk_sectors - 1));
+ if (unlikely(!is_power_of_2(boundary_sectors)))
+ return boundary_sectors - sector_div(offset, boundary_sectors);
+ return boundary_sectors - (offset & (boundary_sectors - 1));
}
/**
@@ -903,7 +921,6 @@ static inline unsigned int blk_chunk_sectors_left(sector_t offset,
*/
static inline struct queue_limits
queue_limits_start_update(struct request_queue *q)
- __acquires(q->limits_lock)
{
mutex_lock(&q->limits_lock);
return q->limits;
@@ -926,26 +943,31 @@ static inline void queue_limits_cancel_update(struct request_queue *q)
}
/*
+ * These helpers are for drivers that have sloppy feature negotiation and might
+ * have to disable DISCARD, WRITE_ZEROES or SECURE_DISCARD from the I/O
+ * completion handler when the device returned an indicator that the respective
+ * feature is not actually supported. They are racy and the driver needs to
+ * cope with that. Try to avoid this scheme if you can.
+ */
+static inline void blk_queue_disable_discard(struct request_queue *q)
+{
+ q->limits.max_discard_sectors = 0;
+}
+
+static inline void blk_queue_disable_secure_erase(struct request_queue *q)
+{
+ q->limits.max_secure_erase_sectors = 0;
+}
+
+static inline void blk_queue_disable_write_zeroes(struct request_queue *q)
+{
+ q->limits.max_write_zeroes_sectors = 0;
+}
+
+/*
* Access functions for manipulating queue properties
*/
-extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int);
-void blk_queue_max_secure_erase_sectors(struct request_queue *q,
- unsigned int max_sectors);
-extern void blk_queue_max_discard_sectors(struct request_queue *q,
- unsigned int max_discard_sectors);
-extern void blk_queue_max_write_zeroes_sectors(struct request_queue *q,
- unsigned int max_write_same_sectors);
-extern void blk_queue_logical_block_size(struct request_queue *, unsigned int);
-extern void blk_queue_max_zone_append_sectors(struct request_queue *q,
- unsigned int max_zone_append_sectors);
-extern void blk_queue_physical_block_size(struct request_queue *, unsigned int);
-void blk_queue_zone_write_granularity(struct request_queue *q,
- unsigned int size);
-extern void blk_queue_alignment_offset(struct request_queue *q,
- unsigned int alignment);
-void disk_update_readahead(struct gendisk *disk);
extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min);
-extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt);
extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth);
extern void blk_set_stacking_limits(struct queue_limits *lim);
@@ -953,9 +975,7 @@ extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
sector_t offset);
void queue_limits_stack_bdev(struct queue_limits *t, struct block_device *bdev,
sector_t offset, const char *pfx);
-extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int);
extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
-extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua);
struct blk_independent_access_ranges *
disk_alloc_independent_access_ranges(struct gendisk *disk, int nr_ia_ranges);
@@ -1076,6 +1096,7 @@ int blkdev_issue_secure_erase(struct block_device *bdev, sector_t sector,
#define BLKDEV_ZERO_NOUNMAP (1 << 0) /* do not free blocks */
#define BLKDEV_ZERO_NOFALLBACK (1 << 1) /* don't write explicit zeroes */
+#define BLKDEV_ZERO_KILLABLE (1 << 2) /* interruptible by fatal signals */
extern int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask, struct bio **biop,
@@ -1203,12 +1224,7 @@ static inline unsigned int bdev_max_segments(struct block_device *bdev)
static inline unsigned queue_logical_block_size(const struct request_queue *q)
{
- int retval = 512;
-
- if (q && q->limits.logical_block_size)
- retval = q->limits.logical_block_size;
-
- return retval;
+ return q->limits.logical_block_size;
}
static inline unsigned int bdev_logical_block_size(struct block_device *bdev)
@@ -1294,29 +1310,38 @@ static inline bool bdev_nonrot(struct block_device *bdev)
static inline bool bdev_synchronous(struct block_device *bdev)
{
- return test_bit(QUEUE_FLAG_SYNCHRONOUS,
- &bdev_get_queue(bdev)->queue_flags);
+ return bdev->bd_disk->queue->limits.features & BLK_FEAT_SYNCHRONOUS;
}
static inline bool bdev_stable_writes(struct block_device *bdev)
{
- return test_bit(QUEUE_FLAG_STABLE_WRITES,
- &bdev_get_queue(bdev)->queue_flags);
+ struct request_queue *q = bdev_get_queue(bdev);
+
+ if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) &&
+ q->limits.integrity.csum_type != BLK_INTEGRITY_CSUM_NONE)
+ return true;
+ return q->limits.features & BLK_FEAT_STABLE_WRITES;
+}
+
+static inline bool blk_queue_write_cache(struct request_queue *q)
+{
+ return (q->limits.features & BLK_FEAT_WRITE_CACHE) &&
+ !(q->limits.flags & BLK_FLAG_WRITE_CACHE_DISABLED);
}
static inline bool bdev_write_cache(struct block_device *bdev)
{
- return test_bit(QUEUE_FLAG_WC, &bdev_get_queue(bdev)->queue_flags);
+ return blk_queue_write_cache(bdev_get_queue(bdev));
}
static inline bool bdev_fua(struct block_device *bdev)
{
- return test_bit(QUEUE_FLAG_FUA, &bdev_get_queue(bdev)->queue_flags);
+ return bdev_get_queue(bdev)->limits.features & BLK_FEAT_FUA;
}
static inline bool bdev_nowait(struct block_device *bdev)
{
- return test_bit(QUEUE_FLAG_NOWAIT, &bdev_get_queue(bdev)->queue_flags);
+ return bdev->bd_disk->queue->limits.features & BLK_FEAT_NOWAIT;
}
static inline bool bdev_is_zoned(struct block_device *bdev)
@@ -1358,7 +1383,31 @@ static inline bool bdev_is_zone_start(struct block_device *bdev,
static inline int queue_dma_alignment(const struct request_queue *q)
{
- return q ? q->limits.dma_alignment : 511;
+ return q->limits.dma_alignment;
+}
+
+static inline unsigned int
+queue_atomic_write_unit_max_bytes(const struct request_queue *q)
+{
+ return q->limits.atomic_write_unit_max;
+}
+
+static inline unsigned int
+queue_atomic_write_unit_min_bytes(const struct request_queue *q)
+{
+ return q->limits.atomic_write_unit_min;
+}
+
+static inline unsigned int
+queue_atomic_write_boundary_bytes(const struct request_queue *q)
+{
+ return q->limits.atomic_write_boundary_sectors << SECTOR_SHIFT;
+}
+
+static inline unsigned int
+queue_atomic_write_max_bytes(const struct request_queue *q)
+{
+ return q->limits.atomic_write_max_sectors << SECTOR_SHIFT;
}
static inline unsigned int bdev_dma_alignment(struct block_device *bdev)
@@ -1373,10 +1422,16 @@ static inline bool bdev_iter_is_aligned(struct block_device *bdev,
bdev_logical_block_size(bdev) - 1);
}
+static inline int blk_lim_dma_alignment_and_pad(struct queue_limits *lim)
+{
+ return lim->dma_alignment | lim->dma_pad_mask;
+}
+
static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr,
unsigned int len)
{
- unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask;
+ unsigned int alignment = blk_lim_dma_alignment_and_pad(&q->limits);
+
return !(addr & alignment) && !(len & alignment);
}
@@ -1562,7 +1617,7 @@ int sync_blockdev(struct block_device *bdev);
int sync_blockdev_range(struct block_device *bdev, loff_t lstart, loff_t lend);
int sync_blockdev_nowait(struct block_device *bdev);
void sync_bdevs(bool wait);
-void bdev_statx_dioalign(struct inode *inode, struct kstat *stat);
+void bdev_statx(struct path *, struct kstat *, u32);
void printk_all_partitions(void);
int __init early_lookup_bdev(const char *pathname, dev_t *dev);
#else
@@ -1580,7 +1635,8 @@ static inline int sync_blockdev_nowait(struct block_device *bdev)
static inline void sync_bdevs(bool wait)
{
}
-static inline void bdev_statx_dioalign(struct inode *inode, struct kstat *stat)
+static inline void bdev_statx(struct path *path, struct kstat *stat,
+ u32 request_mask)
{
}
static inline void printk_all_partitions(void)
@@ -1602,6 +1658,27 @@ struct io_comp_batch {
void (*complete)(struct io_comp_batch *);
};
+static inline bool bdev_can_atomic_write(struct block_device *bdev)
+{
+ struct request_queue *bd_queue = bdev->bd_queue;
+ struct queue_limits *limits = &bd_queue->limits;
+
+ if (!limits->atomic_write_unit_min)
+ return false;
+
+ if (bdev_is_partition(bdev)) {
+ sector_t bd_start_sect = bdev->bd_start_sect;
+ unsigned int alignment =
+ max(limits->atomic_write_unit_min,
+ limits->atomic_write_hw_boundary);
+
+ if (!IS_ALIGNED(bd_start_sect, alignment >> SECTOR_SHIFT))
+ return false;
+ }
+
+ return true;
+}
+
#define DEFINE_IO_COMP_BATCH(name) struct io_comp_batch name = { }
#endif /* _LINUX_BLKDEV_H */
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 5e694a308081..4f1d4a97b9d1 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -1612,6 +1612,7 @@ struct bpf_link_ops {
struct bpf_link_info *info);
int (*update_map)(struct bpf_link *link, struct bpf_map *new_map,
struct bpf_map *old_map);
+ __poll_t (*poll)(struct file *file, struct poll_table_struct *pts);
};
struct bpf_tramp_link {
@@ -1730,9 +1731,9 @@ struct bpf_struct_ops {
int (*init_member)(const struct btf_type *t,
const struct btf_member *member,
void *kdata, const void *udata);
- int (*reg)(void *kdata);
- void (*unreg)(void *kdata);
- int (*update)(void *kdata, void *old_kdata);
+ int (*reg)(void *kdata, struct bpf_link *link);
+ void (*unreg)(void *kdata, struct bpf_link *link);
+ int (*update)(void *kdata, void *old_kdata, struct bpf_link *link);
int (*validate)(void *kdata);
void *cfi_stubs;
struct module *owner;
@@ -2333,6 +2334,7 @@ int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer);
int bpf_link_settle(struct bpf_link_primer *primer);
void bpf_link_cleanup(struct bpf_link_primer *primer);
void bpf_link_inc(struct bpf_link *link);
+struct bpf_link *bpf_link_inc_not_zero(struct bpf_link *link);
void bpf_link_put(struct bpf_link *link);
int bpf_link_new_fd(struct bpf_link *link);
struct bpf_link *bpf_link_get_from_fd(u32 ufd);
@@ -2492,7 +2494,7 @@ struct sk_buff;
struct bpf_dtab_netdev;
struct bpf_cpu_map_entry;
-void __dev_flush(void);
+void __dev_flush(struct list_head *flush_list);
int dev_xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
struct net_device *dev_rx);
int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_frame *xdpf,
@@ -2505,7 +2507,7 @@ int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
struct bpf_prog *xdp_prog, struct bpf_map *map,
bool exclude_ingress);
-void __cpu_map_flush(void);
+void __cpu_map_flush(struct list_head *flush_list);
int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf,
struct net_device *dev_rx);
int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu,
@@ -2642,8 +2644,6 @@ void bpf_dynptr_init(struct bpf_dynptr_kern *ptr, void *data,
void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr);
void bpf_dynptr_set_rdonly(struct bpf_dynptr_kern *ptr);
-bool dev_check_flush(void);
-bool cpu_map_check_flush(void);
#else /* !CONFIG_BPF_SYSCALL */
static inline struct bpf_prog *bpf_prog_get(u32 ufd)
{
@@ -2704,6 +2704,11 @@ static inline void bpf_link_inc(struct bpf_link *link)
{
}
+static inline struct bpf_link *bpf_link_inc_not_zero(struct bpf_link *link)
+{
+ return NULL;
+}
+
static inline void bpf_link_put(struct bpf_link *link)
{
}
@@ -2731,7 +2736,7 @@ static inline struct bpf_token *bpf_token_get_from_fd(u32 ufd)
return ERR_PTR(-EOPNOTSUPP);
}
-static inline void __dev_flush(void)
+static inline void __dev_flush(struct list_head *flush_list)
{
}
@@ -2777,7 +2782,7 @@ int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
return 0;
}
-static inline void __cpu_map_flush(void)
+static inline void __cpu_map_flush(struct list_head *flush_list)
{
}
@@ -2926,8 +2931,7 @@ bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr)
return ret;
}
-void __bpf_free_used_btfs(struct bpf_prog_aux *aux,
- struct btf_mod_pair *used_btfs, u32 len);
+void __bpf_free_used_btfs(struct btf_mod_pair *used_btfs, u32 len);
static inline struct bpf_prog *bpf_prog_get_type(u32 ufd,
enum bpf_prog_type type)
@@ -3258,8 +3262,8 @@ u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
struct bpf_insn *insn_buf,
struct bpf_prog *prog,
u32 *target_size);
-int bpf_dynptr_from_skb_rdonly(struct sk_buff *skb, u64 flags,
- struct bpf_dynptr_kern *ptr);
+int bpf_dynptr_from_skb_rdonly(struct __sk_buff *skb, u64 flags,
+ struct bpf_dynptr *ptr);
#else
static inline bool bpf_sock_common_is_valid_access(int off, int size,
enum bpf_access_type type,
@@ -3281,8 +3285,8 @@ static inline u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
{
return 0;
}
-static inline int bpf_dynptr_from_skb_rdonly(struct sk_buff *skb, u64 flags,
- struct bpf_dynptr_kern *ptr)
+static inline int bpf_dynptr_from_skb_rdonly(struct __sk_buff *skb, u64 flags,
+ struct bpf_dynptr *ptr)
{
return -EOPNOTSUPP;
}
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 50aa87f8d77f..6503c85b10a3 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -73,7 +73,10 @@ enum bpf_iter_state {
struct bpf_reg_state {
/* Ordering of fields matters. See states_equal() */
enum bpf_reg_type type;
- /* Fixed part of pointer offset, pointer types only */
+ /*
+ * Fixed part of pointer offset, pointer types only.
+ * Or constant delta between "linked" scalars with the same ID.
+ */
s32 off;
union {
/* valid when type == PTR_TO_PACKET */
@@ -167,6 +170,13 @@ struct bpf_reg_state {
* Similarly to dynptrs, we use ID to track "belonging" of a reference
* to a specific instance of bpf_iter.
*/
+ /*
+ * Upper bit of ID is used to remember relationship between "linked"
+ * registers. Example:
+ * r1 = r2; both will have r1->id == r2->id == N
+ * r1 += 10; r1->id == N | BPF_ADD_CONST and r1->off == 10
+ */
+#define BPF_ADD_CONST (1U << 31)
u32 id;
/* PTR_TO_SOCKET and PTR_TO_TCP_SOCK could be a ptr returned
* from a pointer-cast helper, bpf_sk_fullsock() and
@@ -746,6 +756,8 @@ struct bpf_verifier_env {
/* Same as scratched_regs but for stack slots */
u64 scratched_stack_slots;
u64 prev_log_pos, prev_insn_print_pos;
+ /* buffer used to temporary hold constants as scalar registers */
+ struct bpf_reg_state fake_reg[2];
/* buffer used to generate temporary string representations,
* e.g., in reg_type_str() to generate reg_type string
*/
@@ -844,7 +856,7 @@ static inline u32 type_flag(u32 type)
/* only use after check_attach_btf_id() */
static inline enum bpf_prog_type resolve_prog_type(const struct bpf_prog *prog)
{
- return prog->type == BPF_PROG_TYPE_EXT ?
+ return (prog->type == BPF_PROG_TYPE_EXT && prog->aux->dst_prog) ?
prog->aux->dst_prog->type : prog->type;
}
diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h
index 1394ba302367..028b3e00378e 100644
--- a/include/linux/brcmphy.h
+++ b/include/linux/brcmphy.h
@@ -271,12 +271,100 @@
#define BCM5482_SSD_SGMII_SLAVE_EN 0x0002 /* Slave mode enable */
#define BCM5482_SSD_SGMII_SLAVE_AD 0x0001 /* Slave auto-detection */
+/* BroadR-Reach LRE Registers. */
+#define MII_BCM54XX_LRECR 0x00 /* LRE Control Register */
+#define MII_BCM54XX_LRESR 0x01 /* LRE Status Register */
+#define MII_BCM54XX_LREPHYSID1 0x02 /* LRE PHYS ID 1 */
+#define MII_BCM54XX_LREPHYSID2 0x03 /* LRE PHYS ID 2 */
+#define MII_BCM54XX_LREANAA 0x04 /* LDS Auto-Negotiation Advertised Ability */
+#define MII_BCM54XX_LREANAC 0x05 /* LDS Auto-Negotiation Advertised Control */
+#define MII_BCM54XX_LREANPT 0x06 /* LDS Ability Next Page Transmit */
+#define MII_BCM54XX_LRELPA 0x07 /* LDS Link Partner Ability */
+#define MII_BCM54XX_LRELPNPM 0x08 /* LDS Link Partner Next Page Message */
+#define MII_BCM54XX_LRELPNPC 0x09 /* LDS Link Partner Next Page Control */
+#define MII_BCM54XX_LRELDSE 0x0a /* LDS Expansion Register */
+#define MII_BCM54XX_LREES 0x0f /* LRE Extended Status */
+
+/* LRE control register. */
+#define LRECR_RESET 0x8000 /* Reset to default state */
+#define LRECR_LOOPBACK 0x4000 /* Internal Loopback */
+#define LRECR_LDSRES 0x2000 /* Restart LDS Process */
+#define LRECR_LDSEN 0x1000 /* LDS Enable */
+#define LRECR_PDOWN 0x0800 /* Enable low power state */
+#define LRECR_ISOLATE 0x0400 /* Isolate data paths from MII */
+#define LRECR_SPEED100 0x0200 /* Select 100 Mbps */
+#define LRECR_SPEED10 0x0000 /* Select 10 Mbps */
+#define LRECR_4PAIRS 0x0020 /* Select 4 Pairs */
+#define LRECR_2PAIRS 0x0010 /* Select 2 Pairs */
+#define LRECR_1PAIR 0x0000 /* Select 1 Pair */
+#define LRECR_MASTER 0x0008 /* Force Master when LDS disabled */
+#define LRECR_SLAVE 0x0000 /* Force Slave when LDS disabled */
+
+/* LRE status register. */
+#define LRESR_100_1PAIR 0x2000 /* Can do 100Mbps 1 Pair */
+#define LRESR_100_4PAIR 0x1000 /* Can do 100Mbps 4 Pairs */
+#define LRESR_100_2PAIR 0x0800 /* Can do 100Mbps 2 Pairs */
+#define LRESR_10_2PAIR 0x0400 /* Can do 10Mbps 2 Pairs */
+#define LRESR_10_1PAIR 0x0200 /* Can do 10Mbps 1 Pair */
+#define LRESR_ESTATEN 0x0100 /* Extended Status in R15 */
+#define LRESR_RESV 0x0080 /* Unused... */
+#define LRESR_MFPS 0x0040 /* Can suppress Management Frames Preamble */
+#define LRESR_LDSCOMPLETE 0x0020 /* LDS Auto-negotiation complete */
+#define LRESR_8023 0x0010 /* Has IEEE 802.3 Support */
+#define LRESR_LDSABILITY 0x0008 /* LDS auto-negotiation capable */
+#define LRESR_LSTATUS 0x0004 /* Link status */
+#define LRESR_JCD 0x0002 /* Jabber detected */
+#define LRESR_ERCAP 0x0001 /* Ext-reg capability */
+
+/* LDS Auto-Negotiation Advertised Ability. */
+#define LREANAA_PAUSE_ASYM 0x8000 /* Can pause asymmetrically */
+#define LREANAA_PAUSE 0x4000 /* Can pause */
+#define LREANAA_100_1PAIR 0x0020 /* Can do 100Mbps 1 Pair */
+#define LREANAA_100_4PAIR 0x0010 /* Can do 100Mbps 4 Pair */
+#define LREANAA_100_2PAIR 0x0008 /* Can do 100Mbps 2 Pair */
+#define LREANAA_10_2PAIR 0x0004 /* Can do 10Mbps 2 Pair */
+#define LREANAA_10_1PAIR 0x0002 /* Can do 10Mbps 1 Pair */
+
+#define LRE_ADVERTISE_FULL (LREANAA_100_1PAIR | LREANAA_100_4PAIR | \
+ LREANAA_100_2PAIR | LREANAA_10_2PAIR | \
+ LREANAA_10_1PAIR)
+
+#define LRE_ADVERTISE_ALL LRE_ADVERTISE_FULL
+
+/* LDS Link Partner Ability. */
+#define LRELPA_PAUSE_ASYM 0x8000 /* Supports asymmetric pause */
+#define LRELPA_PAUSE 0x4000 /* Supports pause capability */
+#define LRELPA_100_1PAIR 0x0020 /* 100Mbps 1 Pair capable */
+#define LRELPA_100_4PAIR 0x0010 /* 100Mbps 4 Pair capable */
+#define LRELPA_100_2PAIR 0x0008 /* 100Mbps 2 Pair capable */
+#define LRELPA_10_2PAIR 0x0004 /* 10Mbps 2 Pair capable */
+#define LRELPA_10_1PAIR 0x0002 /* 10Mbps 1 Pair capable */
+
+/* LDS Expansion register. */
+#define LDSE_DOWNGRADE 0x8000 /* Can do LDS Speed Downgrade */
+#define LDSE_MASTER 0x4000 /* Master / Slave */
+#define LDSE_PAIRS_MASK 0x3000 /* Pair Count Mask */
+#define LDSE_PAIRS_SHIFT 12
+#define LDSE_4PAIRS (2 << LDSE_PAIRS_SHIFT) /* 4 Pairs Connection */
+#define LDSE_2PAIRS (1 << LDSE_PAIRS_SHIFT) /* 2 Pairs Connection */
+#define LDSE_1PAIR (0 << LDSE_PAIRS_SHIFT) /* 1 Pair Connection */
+#define LDSE_CABLEN_MASK 0x0FFF /* Cable Length Mask */
+
/* BCM54810 Registers */
#define BCM54810_EXP_BROADREACH_LRE_MISC_CTL (MII_BCM54XX_EXP_SEL_ER + 0x90)
#define BCM54810_EXP_BROADREACH_LRE_MISC_CTL_EN (1 << 0)
#define BCM54810_SHD_CLK_CTL 0x3
#define BCM54810_SHD_CLK_CTL_GTXCLK_EN (1 << 9)
+/* BCM54811 Registers */
+#define BCM54811_EXP_BROADREACH_LRE_OVERLAY_CTL (MII_BCM54XX_EXP_SEL_ER + 0x9A)
+/* Access Control Override Enable */
+#define BCM54811_EXP_BROADREACH_LRE_OVERLAY_CTL_EN BIT(15)
+/* Access Control Override Value */
+#define BCM54811_EXP_BROADREACH_LRE_OVERLAY_CTL_OVERRIDE_VAL BIT(14)
+/* Access Control Value */
+#define BCM54811_EXP_BROADREACH_LRE_OVERLAY_CTL_VAL BIT(13)
+
/* BCM54612E Registers */
#define BCM54612E_EXP_SPARE0 (MII_BCM54XX_EXP_SEL_ETC + 0x34)
#define BCM54612E_LED4_CLK125OUT_EN (1 << 1)
diff --git a/include/linux/btf.h b/include/linux/btf.h
index f9e56fd12a9f..cffb43133c68 100644
--- a/include/linux/btf.h
+++ b/include/linux/btf.h
@@ -82,7 +82,7 @@
* as to avoid issues such as the compiler inlining or eliding either a static
* kfunc, or a global kfunc in an LTO build.
*/
-#define __bpf_kfunc __used noinline
+#define __bpf_kfunc __used __retain noinline
#define __bpf_kfunc_start_defs() \
__diag_push(); \
@@ -140,6 +140,7 @@ extern const struct file_operations btf_fops;
const char *btf_get_name(const struct btf *btf);
void btf_get(struct btf *btf);
void btf_put(struct btf *btf);
+const struct btf_header *btf_header(const struct btf *btf);
int btf_new_fd(const union bpf_attr *attr, bpfptr_t uattr, u32 uattr_sz);
struct btf *btf_get_by_fd(int fd);
int btf_get_info_by_fd(const struct btf *btf,
@@ -212,8 +213,10 @@ int btf_get_fd_by_id(u32 id);
u32 btf_obj_id(const struct btf *btf);
bool btf_is_kernel(const struct btf *btf);
bool btf_is_module(const struct btf *btf);
+bool btf_is_vmlinux(const struct btf *btf);
struct module *btf_try_get_module(const struct btf *btf);
u32 btf_nr_types(const struct btf *btf);
+struct btf *btf_base_btf(const struct btf *btf);
bool btf_member_is_reg_int(const struct btf *btf, const struct btf_type *s,
const struct btf_member *m,
u32 expected_offset, u32 expected_size);
@@ -339,6 +342,11 @@ static inline u8 btf_int_offset(const struct btf_type *t)
return BTF_INT_OFFSET(*(u32 *)(t + 1));
}
+static inline __u8 btf_int_bits(const struct btf_type *t)
+{
+ return BTF_INT_BITS(*(__u32 *)(t + 1));
+}
+
static inline bool btf_type_is_scalar(const struct btf_type *t)
{
return btf_type_is_int(t) || btf_type_is_enum(t);
@@ -478,6 +486,11 @@ static inline struct btf_param *btf_params(const struct btf_type *t)
return (struct btf_param *)(t + 1);
}
+static inline struct btf_decl_tag *btf_decl_tag(const struct btf_type *t)
+{
+ return (struct btf_decl_tag *)(t + 1);
+}
+
static inline int btf_id_cmp_func(const void *a, const void *b)
{
const int *pa = a, *pb = b;
@@ -515,9 +528,38 @@ static inline const struct bpf_struct_ops_desc *bpf_struct_ops_find(struct btf *
}
#endif
+enum btf_field_iter_kind {
+ BTF_FIELD_ITER_IDS,
+ BTF_FIELD_ITER_STRS,
+};
+
+struct btf_field_desc {
+ /* once-per-type offsets */
+ int t_off_cnt, t_offs[2];
+ /* member struct size, or zero, if no members */
+ int m_sz;
+ /* repeated per-member offsets */
+ int m_off_cnt, m_offs[1];
+};
+
+struct btf_field_iter {
+ struct btf_field_desc desc;
+ void *p;
+ int m_idx;
+ int off_idx;
+ int vlen;
+};
+
#ifdef CONFIG_BPF_SYSCALL
const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id);
+void btf_set_base_btf(struct btf *btf, const struct btf *base_btf);
+int btf_relocate(struct btf *btf, const struct btf *base_btf, __u32 **map_ids);
+int btf_field_iter_init(struct btf_field_iter *it, struct btf_type *t,
+ enum btf_field_iter_kind iter_kind);
+__u32 *btf_field_iter_next(struct btf_field_iter *it);
+
const char *btf_name_by_offset(const struct btf *btf, u32 offset);
+const char *btf_str_by_offset(const struct btf *btf, u32 offset);
struct btf *btf_parse_vmlinux(void);
struct btf *bpf_prog_get_target_btf(const struct bpf_prog *prog);
u32 *btf_kfunc_id_set_contains(const struct btf *btf, u32 kfunc_btf_id,
@@ -531,6 +573,7 @@ s32 btf_find_dtor_kfunc(struct btf *btf, u32 btf_id);
int register_btf_id_dtor_kfuncs(const struct btf_id_dtor_kfunc *dtors, u32 add_cnt,
struct module *owner);
struct btf_struct_meta *btf_find_struct_meta(const struct btf *btf, u32 btf_id);
+bool btf_is_projection_of(const char *pname, const char *tname);
bool btf_is_prog_ctx_type(struct bpf_verifier_log *log, const struct btf *btf,
const struct btf_type *t, enum bpf_prog_type prog_type,
int arg);
@@ -543,6 +586,28 @@ static inline const struct btf_type *btf_type_by_id(const struct btf *btf,
{
return NULL;
}
+
+static inline void btf_set_base_btf(struct btf *btf, const struct btf *base_btf)
+{
+}
+
+static inline int btf_relocate(void *log, struct btf *btf, const struct btf *base_btf,
+ __u32 **map_ids)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int btf_field_iter_init(struct btf_field_iter *it, struct btf_type *t,
+ enum btf_field_iter_kind iter_kind)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline __u32 *btf_field_iter_next(struct btf_field_iter *it)
+{
+ return NULL;
+}
+
static inline const char *btf_name_by_offset(const struct btf *btf,
u32 offset)
{
diff --git a/include/linux/bvec.h b/include/linux/bvec.h
index bd1e361b351c..f41c7f0ef91e 100644
--- a/include/linux/bvec.h
+++ b/include/linux/bvec.h
@@ -280,4 +280,18 @@ static inline void *bvec_virt(struct bio_vec *bvec)
return page_address(bvec->bv_page) + bvec->bv_offset;
}
+/**
+ * bvec_phys - return the physical address for a bvec
+ * @bvec: bvec to return the physical address for
+ */
+static inline phys_addr_t bvec_phys(const struct bio_vec *bvec)
+{
+ /*
+ * Note this open codes page_to_phys because page_to_phys is defined in
+ * <asm/io.h>, which we don't want to pull in here. If it ever moves to
+ * a sensible place we should start using it.
+ */
+ return PFN_PHYS(page_to_pfn(bvec->bv_page)) + bvec->bv_offset;
+}
+
#endif /* __LINUX_BVEC_H */
diff --git a/include/linux/cache.h b/include/linux/cache.h
index 0ecb17bb6883..ca2a05682a54 100644
--- a/include/linux/cache.h
+++ b/include/linux/cache.h
@@ -13,6 +13,32 @@
#define SMP_CACHE_BYTES L1_CACHE_BYTES
#endif
+/**
+ * SMP_CACHE_ALIGN - align a value to the L2 cacheline size
+ * @x: value to align
+ *
+ * On some architectures, L2 ("SMP") CL size is bigger than L1, and sometimes,
+ * this needs to be accounted.
+ *
+ * Return: aligned value.
+ */
+#ifndef SMP_CACHE_ALIGN
+#define SMP_CACHE_ALIGN(x) ALIGN(x, SMP_CACHE_BYTES)
+#endif
+
+/*
+ * ``__aligned_largest`` aligns a field to the value most optimal for the
+ * target architecture to perform memory operations. Get the actual value
+ * to be able to use it anywhere else.
+ */
+#ifndef __LARGEST_ALIGN
+#define __LARGEST_ALIGN sizeof(struct { long x; } __aligned_largest)
+#endif
+
+#ifndef LARGEST_ALIGN
+#define LARGEST_ALIGN(x) ALIGN(x, __LARGEST_ALIGN)
+#endif
+
/*
* __read_mostly is used to keep rarely changing variables out of frequently
* updated cachelines. Its use should be reserved for data that is used
@@ -95,6 +121,39 @@
__u8 __cacheline_group_end__##GROUP[0]
#endif
+/**
+ * __cacheline_group_begin_aligned - declare an aligned group start
+ * @GROUP: name of the group
+ * @...: optional group alignment
+ *
+ * The following block inside a struct:
+ *
+ * __cacheline_group_begin_aligned(grp);
+ * field a;
+ * field b;
+ * __cacheline_group_end_aligned(grp);
+ *
+ * will always be aligned to either the specified alignment or
+ * ``SMP_CACHE_BYTES``.
+ */
+#define __cacheline_group_begin_aligned(GROUP, ...) \
+ __cacheline_group_begin(GROUP) \
+ __aligned((__VA_ARGS__ + 0) ? : SMP_CACHE_BYTES)
+
+/**
+ * __cacheline_group_end_aligned - declare an aligned group end
+ * @GROUP: name of the group
+ * @...: optional alignment (same as was in __cacheline_group_begin_aligned())
+ *
+ * Note that the end marker is aligned to sizeof(long) to allow more precise
+ * size assertion. It also declares a padding at the end to avoid next field
+ * falling into this cacheline.
+ */
+#define __cacheline_group_end_aligned(GROUP, ...) \
+ __cacheline_group_end(GROUP) __aligned(sizeof(long)); \
+ struct { } __cacheline_group_pad__##GROUP \
+ __aligned((__VA_ARGS__ + 0) ? : SMP_CACHE_BYTES)
+
#ifndef CACHELINE_ASSERT_GROUP_MEMBER
#define CACHELINE_ASSERT_GROUP_MEMBER(TYPE, GROUP, MEMBER) \
BUILD_BUG_ON(!(offsetof(TYPE, MEMBER) >= \
diff --git a/include/linux/cacheinfo.h b/include/linux/cacheinfo.h
index 2cb15fe4fe12..3dde175f4108 100644
--- a/include/linux/cacheinfo.h
+++ b/include/linux/cacheinfo.h
@@ -3,6 +3,7 @@
#define _LINUX_CACHEINFO_H
#include <linux/bitops.h>
+#include <linux/cpuhplock.h>
#include <linux/cpumask.h>
#include <linux/smp.h>
@@ -113,23 +114,37 @@ int acpi_get_cache_info(unsigned int cpu,
const struct attribute_group *cache_get_priv_group(struct cacheinfo *this_leaf);
/*
- * Get the id of the cache associated with @cpu at level @level.
+ * Get the cacheinfo structure for the cache associated with @cpu at
+ * level @level.
* cpuhp lock must be held.
*/
-static inline int get_cpu_cacheinfo_id(int cpu, int level)
+static inline struct cacheinfo *get_cpu_cacheinfo_level(int cpu, int level)
{
struct cpu_cacheinfo *ci = get_cpu_cacheinfo(cpu);
int i;
+ lockdep_assert_cpus_held();
+
for (i = 0; i < ci->num_leaves; i++) {
if (ci->info_list[i].level == level) {
if (ci->info_list[i].attributes & CACHE_ID)
- return ci->info_list[i].id;
- return -1;
+ return &ci->info_list[i];
+ return NULL;
}
}
- return -1;
+ return NULL;
+}
+
+/*
+ * Get the id of the cache associated with @cpu at level @level.
+ * cpuhp lock must be held.
+ */
+static inline int get_cpu_cacheinfo_id(int cpu, int level)
+{
+ struct cacheinfo *ci = get_cpu_cacheinfo_level(cpu, level);
+
+ return ci ? ci->id : -1;
}
#ifdef CONFIG_ARM64
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
index 1b92aed49363..23492213ea35 100644
--- a/include/linux/can/dev.h
+++ b/include/linux/can/dev.h
@@ -186,7 +186,7 @@ void close_candev(struct net_device *dev);
int can_change_mtu(struct net_device *dev, int new_mtu);
int can_eth_ioctl_hwts(struct net_device *netdev, struct ifreq *ifr, int cmd);
int can_ethtool_op_get_ts_info_hwts(struct net_device *dev,
- struct ethtool_ts_info *info);
+ struct kernel_ethtool_ts_info *info);
int register_candev(struct net_device *dev);
void unregister_candev(struct net_device *dev);
diff --git a/include/linux/cc_platform.h b/include/linux/cc_platform.h
index 60693a145894..caa4b4430634 100644
--- a/include/linux/cc_platform.h
+++ b/include/linux/cc_platform.h
@@ -82,16 +82,6 @@ enum cc_attr {
CC_ATTR_GUEST_SEV_SNP,
/**
- * @CC_ATTR_HOTPLUG_DISABLED: Hotplug is not supported or disabled.
- *
- * The platform/OS is running as a guest/virtual machine does not
- * support CPU hotplug feature.
- *
- * Examples include TDX Guest.
- */
- CC_ATTR_HOTPLUG_DISABLED,
-
- /**
* @CC_ATTR_HOST_SEV_SNP: AMD SNP enabled on the host.
*
* The host kernel is running with the necessary features
diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h
index 98c6fd0b39b6..fdfb61ccf55a 100644
--- a/include/linux/cdrom.h
+++ b/include/linux/cdrom.h
@@ -77,7 +77,7 @@ struct cdrom_device_ops {
unsigned int clearing, int slot);
int (*tray_move) (struct cdrom_device_info *, int);
int (*lock_door) (struct cdrom_device_info *, int);
- int (*select_speed) (struct cdrom_device_info *, int);
+ int (*select_speed) (struct cdrom_device_info *, unsigned long);
int (*get_last_session) (struct cdrom_device_info *,
struct cdrom_multisession *);
int (*get_mcn) (struct cdrom_device_info *,
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index ea48c861cd36..b36690ca0d3f 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -119,7 +119,12 @@ enum {
/*
* Enable hugetlb accounting for the memory controller.
*/
- CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING = (1 << 19),
+ CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING = (1 << 19),
+
+ /*
+ * Enable legacy local pids.events.
+ */
+ CGRP_ROOT_PIDS_LOCAL_EVENTS = (1 << 20),
};
/* cftype->flags */
diff --git a/include/linux/cleanup.h b/include/linux/cleanup.h
index c2d09bc4f976..d9e613803df1 100644
--- a/include/linux/cleanup.h
+++ b/include/linux/cleanup.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __LINUX_GUARDS_H
-#define __LINUX_GUARDS_H
+#ifndef _LINUX_CLEANUP_H
+#define _LINUX_CLEANUP_H
#include <linux/compiler.h>
@@ -63,17 +63,20 @@
#define __free(_name) __cleanup(__free_##_name)
-#define __get_and_null_ptr(p) \
- ({ __auto_type __ptr = &(p); \
- __auto_type __val = *__ptr; \
- *__ptr = NULL; __val; })
+#define __get_and_null(p, nullvalue) \
+ ({ \
+ __auto_type __ptr = &(p); \
+ __auto_type __val = *__ptr; \
+ *__ptr = nullvalue; \
+ __val; \
+ })
static inline __must_check
const volatile void * __must_check_fn(const volatile void *val)
{ return val; }
#define no_free_ptr(p) \
- ((typeof(p)) __must_check_fn(__get_and_null_ptr(p)))
+ ((typeof(p)) __must_check_fn(__get_and_null(p, NULL)))
#define return_ptr(p) return no_free_ptr(p)
@@ -247,4 +250,4 @@ __DEFINE_LOCK_GUARD_0(_name, _lock)
{ return class_##_name##_lock_ptr(_T); }
-#endif /* __LINUX_GUARDS_H */
+#endif /* _LINUX_CLEANUP_H */
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index 0ad8b550bb4b..d35b677b08fe 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -21,6 +21,7 @@
#include <asm/div64.h>
#include <asm/io.h>
+struct clocksource_base;
struct clocksource;
struct module;
@@ -50,6 +51,7 @@ struct module;
* multiplication
* @name: Pointer to clocksource name
* @list: List head for registration (internal)
+ * @freq_khz: Clocksource frequency in khz.
* @rating: Rating value for selection (higher is better)
* To avoid rating inflation the following
* list should give you a guide as to how
@@ -70,6 +72,8 @@ struct module;
* validate the clocksource from which the snapshot was
* taken.
* @flags: Flags describing special properties
+ * @base: Hardware abstraction for clock on which a clocksource
+ * is based
* @enable: Optional function to enable the clocksource
* @disable: Optional function to disable the clocksource
* @suspend: Optional suspend function for the clocksource
@@ -107,10 +111,12 @@ struct clocksource {
u64 max_cycles;
const char *name;
struct list_head list;
+ u32 freq_khz;
int rating;
enum clocksource_ids id;
enum vdso_clock_mode vdso_clock_mode;
unsigned long flags;
+ struct clocksource_base *base;
int (*enable)(struct clocksource *cs);
void (*disable)(struct clocksource *cs);
@@ -306,4 +312,25 @@ static inline unsigned int clocksource_get_max_watchdog_retry(void)
void clocksource_verify_percpu(struct clocksource *cs);
+/**
+ * struct clocksource_base - hardware abstraction for clock on which a clocksource
+ * is based
+ * @id: Defaults to CSID_GENERIC. The id value is used for conversion
+ * functions which require that the current clocksource is based
+ * on a clocksource_base with a particular ID in certain snapshot
+ * functions to allow callers to validate the clocksource from
+ * which the snapshot was taken.
+ * @freq_khz: Nominal frequency of the base clock in kHz
+ * @offset: Offset between the base clock and the clocksource
+ * @numerator: Numerator of the clock ratio between base clock and the clocksource
+ * @denominator: Denominator of the clock ratio between base clock and the clocksource
+ */
+struct clocksource_base {
+ enum clocksource_ids id;
+ u32 freq_khz;
+ u64 offset;
+ u32 numerator;
+ u32 denominator;
+};
+
#endif /* _LINUX_CLOCKSOURCE_H */
diff --git a/include/linux/clocksource_ids.h b/include/linux/clocksource_ids.h
index a4fa3436940c..2bb4d8c2f1b0 100644
--- a/include/linux/clocksource_ids.h
+++ b/include/linux/clocksource_ids.h
@@ -9,6 +9,7 @@ enum clocksource_ids {
CSID_X86_TSC_EARLY,
CSID_X86_TSC,
CSID_X86_KVM_CLK,
+ CSID_X86_ART,
CSID_MAX,
};
diff --git a/include/linux/closure.h b/include/linux/closure.h
index 99155df162d0..2af44427107d 100644
--- a/include/linux/closure.h
+++ b/include/linux/closure.h
@@ -159,6 +159,7 @@ struct closure {
#ifdef CONFIG_DEBUG_CLOSURES
#define CLOSURE_MAGIC_DEAD 0xc054dead
#define CLOSURE_MAGIC_ALIVE 0xc054a11e
+#define CLOSURE_MAGIC_STACK 0xc05451cc
unsigned int magic;
struct list_head all;
@@ -285,6 +286,21 @@ static inline void closure_get(struct closure *cl)
}
/**
+ * closure_get_not_zero
+ */
+static inline bool closure_get_not_zero(struct closure *cl)
+{
+ unsigned old = atomic_read(&cl->remaining);
+ do {
+ if (!(old & CLOSURE_REMAINING_MASK))
+ return false;
+
+ } while (!atomic_try_cmpxchg_acquire(&cl->remaining, &old, old + 1));
+
+ return true;
+}
+
+/**
* closure_init - Initialize a closure, setting the refcount to 1
* @cl: closure to initialize
* @parent: parent of the new closure. cl will take a refcount on it for its
@@ -308,6 +324,18 @@ static inline void closure_init_stack(struct closure *cl)
{
memset(cl, 0, sizeof(struct closure));
atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER);
+#ifdef CONFIG_DEBUG_CLOSURES
+ cl->magic = CLOSURE_MAGIC_STACK;
+#endif
+}
+
+static inline void closure_init_stack_release(struct closure *cl)
+{
+ memset(cl, 0, sizeof(struct closure));
+ atomic_set_release(&cl->remaining, CLOSURE_REMAINING_INITIALIZER);
+#ifdef CONFIG_DEBUG_CLOSURES
+ cl->magic = CLOSURE_MAGIC_STACK;
+#endif
}
/**
@@ -355,6 +383,8 @@ do { \
*/
#define closure_return(_cl) continue_at((_cl), NULL, NULL)
+void closure_return_sync(struct closure *cl);
+
/**
* continue_at_nobarrier - jump to another function without barrier
*
diff --git a/include/linux/compat.h b/include/linux/compat.h
index 233f61ec8afc..56cebaff0c91 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -608,7 +608,7 @@ asmlinkage long compat_sys_fstatfs(unsigned int fd,
asmlinkage long compat_sys_fstatfs64(unsigned int fd, compat_size_t sz,
struct compat_statfs64 __user *buf);
asmlinkage long compat_sys_truncate(const char __user *, compat_off_t);
-asmlinkage long compat_sys_ftruncate(unsigned int, compat_ulong_t);
+asmlinkage long compat_sys_ftruncate(unsigned int, compat_off_t);
/* No generic prototype for truncate64, ftruncate64, fallocate */
asmlinkage long compat_sys_openat(int dfd, const char __user *filename,
int flags, umode_t mode);
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 8c252e073bd8..68a24a3a6979 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -194,9 +194,17 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
* This data_race() macro is useful for situations in which data races
* should be forgiven. One example is diagnostic code that accesses
* shared variables but is not a part of the core synchronization design.
+ * For example, if accesses to a given variable are protected by a lock,
+ * except for diagnostic code, then the accesses under the lock should
+ * be plain C-language accesses and those in the diagnostic code should
+ * use data_race(). This way, KCSAN will complain if buggy lockless
+ * accesses to that variable are introduced, even if the buggy accesses
+ * are protected by READ_ONCE() or WRITE_ONCE().
*
* This macro *does not* affect normal code generation, but is a hint
- * to tooling that data races here are to be ignored.
+ * to tooling that data races here are to be ignored. If the access must
+ * be atomic *and* KCSAN should ignore the access, use both data_race()
+ * and READ_ONCE(), for example, data_race(READ_ONCE(x)).
*/
#define data_race(expr) \
({ \
diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
index 93600de3800b..f14c275950b5 100644
--- a/include/linux/compiler_types.h
+++ b/include/linux/compiler_types.h
@@ -143,6 +143,29 @@ static inline void __chk_io_ptr(const volatile void __iomem *ptr) { }
# define __preserve_most
#endif
+/*
+ * Annotating a function/variable with __retain tells the compiler to place
+ * the object in its own section and set the flag SHF_GNU_RETAIN. This flag
+ * instructs the linker to retain the object during garbage-cleanup or LTO
+ * phases.
+ *
+ * Note that the __used macro is also used to prevent functions or data
+ * being optimized out, but operates at the compiler/IR-level and may still
+ * allow unintended removal of objects during linking.
+ *
+ * Optional: only supported since gcc >= 11, clang >= 13
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-retain-function-attribute
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#retain
+ */
+#if __has_attribute(__retain__) && \
+ (defined(CONFIG_LD_DEAD_CODE_DATA_ELIMINATION) || \
+ defined(CONFIG_LTO_CLANG))
+# define __retain __attribute__((__retain__))
+#else
+# define __retain
+#endif
+
/* Compiler specific macros. */
#ifdef __clang__
#include <linux/compiler-clang.h>
diff --git a/include/linux/configfs.h b/include/linux/configfs.h
index 2606711adb18..c771e9d0d0b9 100644
--- a/include/linux/configfs.h
+++ b/include/linux/configfs.h
@@ -216,6 +216,9 @@ struct configfs_group_operations {
struct config_group *(*make_group)(struct config_group *group, const char *name);
void (*disconnect_notify)(struct config_group *group, struct config_item *item);
void (*drop_item)(struct config_group *group, struct config_item *item);
+ bool (*is_visible)(struct config_item *item, struct configfs_attribute *attr, int n);
+ bool (*is_bin_visible)(struct config_item *item, struct configfs_bin_attribute *attr,
+ int n);
};
struct configfs_subsystem {
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 861c3bfc5f17..a8926d0a28cd 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -18,6 +18,7 @@
#include <linux/compiler.h>
#include <linux/cpumask.h>
#include <linux/cpuhotplug.h>
+#include <linux/cpuhplock.h>
#include <linux/cpu_smt.h>
struct device;
@@ -132,38 +133,6 @@ static inline int add_cpu(unsigned int cpu) { return 0;}
#endif /* CONFIG_SMP */
extern const struct bus_type cpu_subsys;
-extern int lockdep_is_cpus_held(void);
-
-#ifdef CONFIG_HOTPLUG_CPU
-extern void cpus_write_lock(void);
-extern void cpus_write_unlock(void);
-extern void cpus_read_lock(void);
-extern void cpus_read_unlock(void);
-extern int cpus_read_trylock(void);
-extern void lockdep_assert_cpus_held(void);
-extern void cpu_hotplug_disable(void);
-extern void cpu_hotplug_enable(void);
-void clear_tasks_mm_cpumask(int cpu);
-int remove_cpu(unsigned int cpu);
-int cpu_device_down(struct device *dev);
-extern void smp_shutdown_nonboot_cpus(unsigned int primary_cpu);
-
-#else /* CONFIG_HOTPLUG_CPU */
-
-static inline void cpus_write_lock(void) { }
-static inline void cpus_write_unlock(void) { }
-static inline void cpus_read_lock(void) { }
-static inline void cpus_read_unlock(void) { }
-static inline int cpus_read_trylock(void) { return true; }
-static inline void lockdep_assert_cpus_held(void) { }
-static inline void cpu_hotplug_disable(void) { }
-static inline void cpu_hotplug_enable(void) { }
-static inline int remove_cpu(unsigned int cpu) { return -EPERM; }
-static inline void smp_shutdown_nonboot_cpus(unsigned int primary_cpu) { }
-#endif /* !CONFIG_HOTPLUG_CPU */
-
-DEFINE_LOCK_GUARD_0(cpus_read_lock, cpus_read_lock(), cpus_read_unlock())
-
#ifdef CONFIG_PM_SLEEP_SMP
extern int freeze_secondary_cpus(int primary);
extern void thaw_secondary_cpus(void);
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 20f7e98ee8af..d4d2f4d1d7cb 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -396,7 +396,7 @@ struct cpufreq_driver {
int (*online)(struct cpufreq_policy *policy);
int (*offline)(struct cpufreq_policy *policy);
- int (*exit)(struct cpufreq_policy *policy);
+ void (*exit)(struct cpufreq_policy *policy);
int (*suspend)(struct cpufreq_policy *policy);
int (*resume)(struct cpufreq_policy *policy);
@@ -785,7 +785,7 @@ ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf);
#ifdef CONFIG_CPU_FREQ
int cpufreq_boost_trigger_state(int state);
-int cpufreq_boost_enabled(void);
+bool cpufreq_boost_enabled(void);
int cpufreq_enable_boost_support(void);
bool policy_has_boost_freq(struct cpufreq_policy *policy);
@@ -1164,9 +1164,9 @@ static inline int cpufreq_boost_trigger_state(int state)
{
return 0;
}
-static inline int cpufreq_boost_enabled(void)
+static inline bool cpufreq_boost_enabled(void)
{
- return 0;
+ return false;
}
static inline int cpufreq_enable_boost_support(void)
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 7a5785f405b6..89f5c34ce4df 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -27,7 +27,7 @@
* startup callbacks sequentially from CPUHP_OFFLINE + 1 to CPUHP_ONLINE
* during a CPU online operation. During a CPU offline operation the
* installed teardown callbacks are invoked in the reverse order from
- * CPU_ONLINE - 1 down to CPUHP_OFFLINE.
+ * CPUHP_ONLINE - 1 down to CPUHP_OFFLINE.
*
* The state space has three sections: PREPARE, STARTING and ONLINE.
*
@@ -171,6 +171,7 @@ enum cpuhp_state {
CPUHP_AP_ARMADA_TIMER_STARTING,
CPUHP_AP_MIPS_GIC_TIMER_STARTING,
CPUHP_AP_ARC_TIMER_STARTING,
+ CPUHP_AP_REALTEK_TIMER_STARTING,
CPUHP_AP_RISCV_TIMER_STARTING,
CPUHP_AP_CLINT_TIMER_STARTING,
CPUHP_AP_CSKY_TIMER_STARTING,
diff --git a/include/linux/cpuhplock.h b/include/linux/cpuhplock.h
new file mode 100644
index 000000000000..f7aa20f62b87
--- /dev/null
+++ b/include/linux/cpuhplock.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * include/linux/cpuhplock.h - CPU hotplug locking
+ *
+ * Locking functions for CPU hotplug.
+ */
+#ifndef _LINUX_CPUHPLOCK_H_
+#define _LINUX_CPUHPLOCK_H_
+
+#include <linux/cleanup.h>
+#include <linux/errno.h>
+
+struct device;
+
+extern int lockdep_is_cpus_held(void);
+
+#ifdef CONFIG_HOTPLUG_CPU
+void cpus_write_lock(void);
+void cpus_write_unlock(void);
+void cpus_read_lock(void);
+void cpus_read_unlock(void);
+int cpus_read_trylock(void);
+void lockdep_assert_cpus_held(void);
+void cpu_hotplug_disable_offlining(void);
+void cpu_hotplug_disable(void);
+void cpu_hotplug_enable(void);
+void clear_tasks_mm_cpumask(int cpu);
+int remove_cpu(unsigned int cpu);
+int cpu_device_down(struct device *dev);
+void smp_shutdown_nonboot_cpus(unsigned int primary_cpu);
+
+#else /* CONFIG_HOTPLUG_CPU */
+
+static inline void cpus_write_lock(void) { }
+static inline void cpus_write_unlock(void) { }
+static inline void cpus_read_lock(void) { }
+static inline void cpus_read_unlock(void) { }
+static inline int cpus_read_trylock(void) { return true; }
+static inline void lockdep_assert_cpus_held(void) { }
+static inline void cpu_hotplug_disable_offlining(void) { }
+static inline void cpu_hotplug_disable(void) { }
+static inline void cpu_hotplug_enable(void) { }
+static inline int remove_cpu(unsigned int cpu) { return -EPERM; }
+static inline void smp_shutdown_nonboot_cpus(unsigned int primary_cpu) { }
+#endif /* !CONFIG_HOTPLUG_CPU */
+
+DEFINE_LOCK_GUARD_0(cpus_read_lock, cpus_read_lock(), cpus_read_unlock())
+
+#endif /* _LINUX_CPUHPLOCK_H_ */
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index 23686bed441d..954d4adc8f81 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -93,6 +93,7 @@ static inline void set_nr_cpu_ids(unsigned int nr)
*
* cpu_possible_mask- has bit 'cpu' set iff cpu is populatable
* cpu_present_mask - has bit 'cpu' set iff cpu is populated
+ * cpu_enabled_mask - has bit 'cpu' set iff cpu can be brought online
* cpu_online_mask - has bit 'cpu' set iff cpu available to scheduler
* cpu_active_mask - has bit 'cpu' set iff cpu available to migration
*
@@ -125,11 +126,13 @@ static inline void set_nr_cpu_ids(unsigned int nr)
extern struct cpumask __cpu_possible_mask;
extern struct cpumask __cpu_online_mask;
+extern struct cpumask __cpu_enabled_mask;
extern struct cpumask __cpu_present_mask;
extern struct cpumask __cpu_active_mask;
extern struct cpumask __cpu_dying_mask;
#define cpu_possible_mask ((const struct cpumask *)&__cpu_possible_mask)
#define cpu_online_mask ((const struct cpumask *)&__cpu_online_mask)
+#define cpu_enabled_mask ((const struct cpumask *)&__cpu_enabled_mask)
#define cpu_present_mask ((const struct cpumask *)&__cpu_present_mask)
#define cpu_active_mask ((const struct cpumask *)&__cpu_active_mask)
#define cpu_dying_mask ((const struct cpumask *)&__cpu_dying_mask)
@@ -1075,6 +1078,7 @@ extern const DECLARE_BITMAP(cpu_all_bits, NR_CPUS);
#else
#define for_each_possible_cpu(cpu) for_each_cpu((cpu), cpu_possible_mask)
#define for_each_online_cpu(cpu) for_each_cpu((cpu), cpu_online_mask)
+#define for_each_enabled_cpu(cpu) for_each_cpu((cpu), cpu_enabled_mask)
#define for_each_present_cpu(cpu) for_each_cpu((cpu), cpu_present_mask)
#endif
@@ -1093,6 +1097,15 @@ set_cpu_possible(unsigned int cpu, bool possible)
}
static inline void
+set_cpu_enabled(unsigned int cpu, bool can_be_onlined)
+{
+ if (can_be_onlined)
+ cpumask_set_cpu(cpu, &__cpu_enabled_mask);
+ else
+ cpumask_clear_cpu(cpu, &__cpu_enabled_mask);
+}
+
+static inline void
set_cpu_present(unsigned int cpu, bool present)
{
if (present)
@@ -1173,6 +1186,7 @@ static __always_inline unsigned int num_online_cpus(void)
return raw_atomic_read(&__num_online_cpus);
}
#define num_possible_cpus() cpumask_weight(cpu_possible_mask)
+#define num_enabled_cpus() cpumask_weight(cpu_enabled_mask)
#define num_present_cpus() cpumask_weight(cpu_present_mask)
#define num_active_cpus() cpumask_weight(cpu_active_mask)
@@ -1181,6 +1195,11 @@ static inline bool cpu_online(unsigned int cpu)
return cpumask_test_cpu(cpu, cpu_online_mask);
}
+static inline bool cpu_enabled(unsigned int cpu)
+{
+ return cpumask_test_cpu(cpu, cpu_enabled_mask);
+}
+
static inline bool cpu_possible(unsigned int cpu)
{
return cpumask_test_cpu(cpu, cpu_possible_mask);
@@ -1205,6 +1224,7 @@ static inline bool cpu_dying(unsigned int cpu)
#define num_online_cpus() 1U
#define num_possible_cpus() 1U
+#define num_enabled_cpus() 1U
#define num_present_cpus() 1U
#define num_active_cpus() 1U
@@ -1218,6 +1238,11 @@ static inline bool cpu_possible(unsigned int cpu)
return cpu == 0;
}
+static inline bool cpu_enabled(unsigned int cpu)
+{
+ return cpu == 0;
+}
+
static inline bool cpu_present(unsigned int cpu)
{
return cpu == 0;
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index bf53e3894aae..bff956f7b2b9 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -71,7 +71,7 @@ extern const struct qstr dotdot_name;
# define DNAME_INLINE_LEN 40 /* 192 bytes */
#else
# ifdef CONFIG_SMP
-# define DNAME_INLINE_LEN 40 /* 128 bytes */
+# define DNAME_INLINE_LEN 36 /* 128 bytes */
# else
# define DNAME_INLINE_LEN 44 /* 128 bytes */
# endif
@@ -89,13 +89,18 @@ struct dentry {
struct inode *d_inode; /* Where the name belongs to - NULL is
* negative */
unsigned char d_iname[DNAME_INLINE_LEN]; /* small names */
+ /* --- cacheline 1 boundary (64 bytes) was 32 bytes ago --- */
/* Ref lookup also touches following */
- struct lockref d_lockref; /* per-dentry lock and refcount */
const struct dentry_operations *d_op;
struct super_block *d_sb; /* The root of the dentry tree */
unsigned long d_time; /* used by d_revalidate */
void *d_fsdata; /* fs-specific data */
+ /* --- cacheline 2 boundary (128 bytes) --- */
+ struct lockref d_lockref; /* per-dentry lock and refcount
+ * keep separate from RCU lookup area if
+ * possible!
+ */
union {
struct list_head d_lru; /* LRU list */
@@ -278,6 +283,8 @@ static inline unsigned d_count(const struct dentry *dentry)
return dentry->d_lockref.count;
}
+ino_t d_parent_ino(struct dentry *dentry);
+
/*
* helper function for dentry_operations.d_dname() members
*/
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 82b2195efaca..15d28164bbbd 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -358,6 +358,13 @@ struct dm_target {
bool discards_supported:1;
/*
+ * Automatically set by dm-core if this target supports
+ * REQ_OP_ZONE_RESET_ALL. Otherwise, this operation will be emulated
+ * using REQ_OP_ZONE_RESET. Target drivers must not set this manually.
+ */
+ bool zone_reset_all_supported:1;
+
+ /*
* Set if this target requires that discards be split on
* 'max_discard_sectors' boundaries.
*/
diff --git a/include/linux/device.h b/include/linux/device.h
index fc3bd7116ab9..ace039151cb8 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -1220,8 +1220,6 @@ static inline void device_remove_group(struct device *dev,
return device_remove_groups(dev, groups);
}
-int __must_check devm_device_add_groups(struct device *dev,
- const struct attribute_group **groups);
int __must_check devm_device_add_group(struct device *dev,
const struct attribute_group *grp);
diff --git a/include/linux/dim.h b/include/linux/dim.h
index f343bc9aa2ec..1b581ff25a15 100644
--- a/include/linux/dim.h
+++ b/include/linux/dim.h
@@ -10,6 +10,15 @@
#include <linux/types.h>
#include <linux/workqueue.h>
+struct net_device;
+
+/* Number of DIM profiles and period mode. */
+#define NET_DIM_PARAMS_NUM_PROFILES 5
+#define NET_DIM_DEFAULT_RX_CQ_PKTS_FROM_EQE 256
+#define NET_DIM_DEFAULT_TX_CQ_PKTS_FROM_EQE 128
+#define NET_DIM_DEF_PROFILE_CQE 1
+#define NET_DIM_DEF_PROFILE_EQE 1
+
/*
* Number of events between DIM iterations.
* Causes a moderation of the algorithm run.
@@ -38,12 +47,45 @@
* @pkts: CQ packet counter suggestion (by DIM)
* @comps: Completion counter
* @cq_period_mode: CQ period count mode (from CQE/EQE)
+ * @rcu: for asynchronous kfree_rcu
*/
struct dim_cq_moder {
u16 usec;
u16 pkts;
u16 comps;
u8 cq_period_mode;
+ struct rcu_head rcu;
+};
+
+#define DIM_PROFILE_RX BIT(0) /* support rx profile modification */
+#define DIM_PROFILE_TX BIT(1) /* support tx profile modification */
+
+#define DIM_COALESCE_USEC BIT(0) /* support usec field modification */
+#define DIM_COALESCE_PKTS BIT(1) /* support pkts field modification */
+#define DIM_COALESCE_COMPS BIT(2) /* support comps field modification */
+
+/**
+ * struct dim_irq_moder - Structure for irq moderation information.
+ * Used to collect irq moderation related information.
+ *
+ * @profile_flags: DIM_PROFILE_*
+ * @coal_flags: DIM_COALESCE_* for Rx and Tx
+ * @dim_rx_mode: Rx DIM period count mode: CQE or EQE
+ * @dim_tx_mode: Tx DIM period count mode: CQE or EQE
+ * @rx_profile: DIM profile list for Rx
+ * @tx_profile: DIM profile list for Tx
+ * @rx_dim_work: Rx DIM worker scheduled by net_dim()
+ * @tx_dim_work: Tx DIM worker scheduled by net_dim()
+ */
+struct dim_irq_moder {
+ u8 profile_flags;
+ u8 coal_flags;
+ u8 dim_rx_mode;
+ u8 dim_tx_mode;
+ struct dim_cq_moder __rcu *rx_profile;
+ struct dim_cq_moder __rcu *tx_profile;
+ void (*rx_dim_work)(struct work_struct *work);
+ void (*tx_dim_work)(struct work_struct *work);
};
/**
@@ -192,6 +234,77 @@ enum dim_step_result {
};
/**
+ * net_dim_init_irq_moder - collect information to initialize irq moderation
+ * @dev: target network device
+ * @profile_flags: Rx or Tx profile modification capability
+ * @coal_flags: irq moderation params flags
+ * @rx_mode: CQ period mode for Rx
+ * @tx_mode: CQ period mode for Tx
+ * @rx_dim_work: Rx worker called after dim decision
+ * @tx_dim_work: Tx worker called after dim decision
+ *
+ * Return: 0 on success or a negative error code.
+ */
+int net_dim_init_irq_moder(struct net_device *dev, u8 profile_flags,
+ u8 coal_flags, u8 rx_mode, u8 tx_mode,
+ void (*rx_dim_work)(struct work_struct *work),
+ void (*tx_dim_work)(struct work_struct *work));
+
+/**
+ * net_dim_free_irq_moder - free fields for irq moderation
+ * @dev: target network device
+ */
+void net_dim_free_irq_moder(struct net_device *dev);
+
+/**
+ * net_dim_setting - initialize DIM's cq mode and schedule worker
+ * @dev: target network device
+ * @dim: DIM context
+ * @is_tx: true indicates the tx direction, false indicates the rx direction
+ */
+void net_dim_setting(struct net_device *dev, struct dim *dim, bool is_tx);
+
+/**
+ * net_dim_work_cancel - synchronously cancel dim's worker
+ * @dim: DIM context
+ */
+void net_dim_work_cancel(struct dim *dim);
+
+/**
+ * net_dim_get_rx_irq_moder - get DIM rx results based on profile_ix
+ * @dev: target network device
+ * @dim: DIM context
+ *
+ * Return: DIM irq moderation
+ */
+struct dim_cq_moder
+net_dim_get_rx_irq_moder(struct net_device *dev, struct dim *dim);
+
+/**
+ * net_dim_get_tx_irq_moder - get DIM tx results based on profile_ix
+ * @dev: target network device
+ * @dim: DIM context
+ *
+ * Return: DIM irq moderation
+ */
+struct dim_cq_moder
+net_dim_get_tx_irq_moder(struct net_device *dev, struct dim *dim);
+
+/**
+ * net_dim_set_rx_mode - set DIM rx cq mode
+ * @dev: target network device
+ * @rx_mode: target rx cq mode
+ */
+void net_dim_set_rx_mode(struct net_device *dev, u8 rx_mode);
+
+/**
+ * net_dim_set_tx_mode - set DIM tx cq mode
+ * @dev: target network device
+ * @tx_mode: target tx cq mode
+ */
+void net_dim_set_tx_mode(struct net_device *dev, u8 tx_mode);
+
+/**
* dim_on_top - check if current state is a good place to stop (top location)
* @dim: DIM context
*
diff --git a/include/linux/dlm.h b/include/linux/dlm.h
index c58c4f790c04..bacda9898f2b 100644
--- a/include/linux/dlm.h
+++ b/include/linux/dlm.h
@@ -35,6 +35,9 @@ struct dlm_lockspace_ops {
int num_slots, int our_slot, uint32_t generation);
};
+/* only relevant for kernel lockspaces, will be removed in future */
+#define DLM_LSFL_SOFTIRQ __DLM_LSFL_RESERVED0
+
/*
* dlm_new_lockspace
*
@@ -55,6 +58,11 @@ struct dlm_lockspace_ops {
* used to select the directory node. Must be the same on all nodes.
* DLM_LSFL_NEWEXCL
* dlm_new_lockspace() should return -EEXIST if the lockspace exists.
+ * DLM_LSFL_SOFTIRQ
+ * dlm request callbacks (ast, bast) are softirq safe. Flag should be
+ * preferred by users. Will be default in some future. If set the
+ * strongest context for ast, bast callback is softirq as it avoids
+ * an additional context switch.
*
* lvblen: length of lvb in bytes. Must be multiple of 8.
* dlm_new_lockspace() returns an error if this does not match
@@ -121,7 +129,14 @@ int dlm_release_lockspace(dlm_lockspace_t *lockspace, int force);
* call.
*
* AST routines should not block (at least not for long), but may make
- * any locking calls they please.
+ * any locking calls they please. If DLM_LSFL_SOFTIRQ for kernel
+ * users of dlm_new_lockspace() is passed the ast and bast callbacks
+ * can be processed in softirq context. Also some of the callback
+ * contexts are in the same context as the DLM lock request API, users
+ * must not hold locks while calling dlm lock request API and trying
+ * to acquire this lock in the callback again, this will end in a
+ * lock recursion. For newer implementation the DLM_LSFL_SOFTIRQ
+ * should be used.
*/
int dlm_lock(dlm_lockspace_t *lockspace,
diff --git a/include/linux/dsa/8021q.h b/include/linux/dsa/8021q.h
index f3664ee12170..d13aabdeb4b2 100644
--- a/include/linux/dsa/8021q.h
+++ b/include/linux/dsa/8021q.h
@@ -8,12 +8,18 @@
#include <net/dsa.h>
#include <linux/types.h>
+/* VBID is limited to three bits only and zero is reserved.
+ * Only 7 bridges can be enumerated.
+ */
+#define DSA_TAG_8021Q_MAX_NUM_BRIDGES 7
+
int dsa_tag_8021q_register(struct dsa_switch *ds, __be16 proto);
void dsa_tag_8021q_unregister(struct dsa_switch *ds);
int dsa_tag_8021q_bridge_join(struct dsa_switch *ds, int port,
- struct dsa_bridge bridge);
+ struct dsa_bridge bridge, bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack);
void dsa_tag_8021q_bridge_leave(struct dsa_switch *ds, int port,
struct dsa_bridge bridge);
diff --git a/include/linux/dsa/lan9303.h b/include/linux/dsa/lan9303.h
index b4f22112ba75..3ce7cbcc37a3 100644
--- a/include/linux/dsa/lan9303.h
+++ b/include/linux/dsa/lan9303.h
@@ -5,8 +5,8 @@ struct lan9303;
struct lan9303_phy_ops {
/* PHY 1 and 2 access*/
- int (*phy_read)(struct lan9303 *chip, int port, int regnum);
- int (*phy_write)(struct lan9303 *chip, int port,
+ int (*phy_read)(struct lan9303 *chip, int addr, int regnum);
+ int (*phy_write)(struct lan9303 *chip, int addr,
int regnum, u16 val);
};
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 418e555459da..6bf3c4fe8511 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -74,10 +74,10 @@ typedef void *efi_handle_t;
*/
typedef guid_t efi_guid_t __aligned(__alignof__(u32));
-#define EFI_GUID(a, b, c, d...) (efi_guid_t){ { \
+#define EFI_GUID(a, b, c, d...) ((efi_guid_t){ { \
(a) & 0xff, ((a) >> 8) & 0xff, ((a) >> 16) & 0xff, ((a) >> 24) & 0xff, \
(b) & 0xff, ((b) >> 8) & 0xff, \
- (c) & 0xff, ((c) >> 8) & 0xff, d } }
+ (c) & 0xff, ((c) >> 8) & 0xff, d } })
/*
* Generic EFI table header
@@ -385,6 +385,7 @@ void efi_native_runtime_setup(void);
#define EFI_MEMORY_ATTRIBUTES_TABLE_GUID EFI_GUID(0xdcfa911d, 0x26eb, 0x469f, 0xa2, 0x20, 0x38, 0xb7, 0xdc, 0x46, 0x12, 0x20)
#define EFI_CONSOLE_OUT_DEVICE_GUID EFI_GUID(0xd3b36f2c, 0xd551, 0x11d4, 0x9a, 0x46, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d)
#define APPLE_PROPERTIES_PROTOCOL_GUID EFI_GUID(0x91bd12fe, 0xf6c3, 0x44fb, 0xa5, 0xb7, 0x51, 0x22, 0xab, 0x30, 0x3a, 0xe0)
+#define APPLE_SET_OS_PROTOCOL_GUID EFI_GUID(0xc5c5da95, 0x7d5c, 0x45e6, 0xb2, 0xf1, 0x3f, 0xd5, 0x2b, 0xb1, 0x00, 0x77)
#define EFI_TCG2_PROTOCOL_GUID EFI_GUID(0x607f766c, 0x7455, 0x42be, 0x93, 0x0b, 0xe4, 0xd7, 0x6d, 0xb2, 0x72, 0x0f)
#define EFI_TCG2_FINAL_EVENTS_TABLE_GUID EFI_GUID(0x1e2ed096, 0x30e2, 0x4254, 0xbd, 0x89, 0x86, 0x3b, 0xbe, 0xf8, 0x23, 0x25)
#define EFI_LOAD_FILE_PROTOCOL_GUID EFI_GUID(0x56ec3091, 0x954c, 0x11d2, 0x8e, 0x3f, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b)
@@ -607,7 +608,11 @@ typedef struct {
u32 num_entries;
u32 desc_size;
u32 flags;
- efi_memory_desc_t entry[0];
+ /*
+ * There are @num_entries following, each of size @desc_size bytes,
+ * including an efi_memory_desc_t header. See efi_memdesc_ptr().
+ */
+ efi_memory_desc_t entry[];
} efi_memory_attributes_table_t;
typedef struct {
@@ -783,7 +788,7 @@ extern int efi_memattr_apply_permissions(struct mm_struct *mm,
efi_memattr_perm_setter fn);
/*
- * efi_early_memdesc_ptr - get the n-th EFI memmap descriptor
+ * efi_memdesc_ptr - get the n-th EFI memmap descriptor
* @map: the start of efi memmap
* @desc_size: the size of space for each EFI memmap descriptor
* @n: the index of efi memmap descriptor
@@ -801,7 +806,7 @@ extern int efi_memattr_apply_permissions(struct mm_struct *mm,
* during bootup since for_each_efi_memory_desc_xxx() is available after the
* kernel initializes the EFI subsystem to set up struct efi_memory_map.
*/
-#define efi_early_memdesc_ptr(map, desc_size, n) \
+#define efi_memdesc_ptr(map, desc_size, n) \
(efi_memory_desc_t *)((void *)(map) + ((n) * (desc_size)))
/* Iterate through an efi_memory_map */
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index 2ad1ffa4ccb9..0ed47d00549b 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -636,6 +636,14 @@ static inline void eth_skb_pkt_type(struct sk_buff *skb,
}
}
+static inline struct ethhdr *eth_skb_pull_mac(struct sk_buff *skb)
+{
+ struct ethhdr *eth = (struct ethhdr *)skb->data;
+
+ skb_pull_inline(skb, ETH_HLEN);
+ return eth;
+}
+
/**
* eth_skb_pad - Pad buffer to mininum number of octets for Ethernet frame
* @skb: Buffer to pad
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index 6fd9107d3cc0..303fda54ef17 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -18,6 +18,7 @@
#include <linux/if_ether.h>
#include <linux/netlink.h>
#include <uapi/linux/ethtool.h>
+#include <uapi/linux/net_tstamp.h>
struct compat_ethtool_rx_flow_spec {
u32 flow_type;
@@ -159,6 +160,49 @@ static inline u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings)
return index % n_rx_rings;
}
+/**
+ * struct ethtool_rxfh_context - a custom RSS context configuration
+ * @indir_size: Number of u32 entries in indirection table
+ * @key_size: Size of hash key, in bytes
+ * @priv_size: Size of driver private data, in bytes
+ * @hfunc: RSS hash function identifier. One of the %ETH_RSS_HASH_*
+ * @input_xfrm: Defines how the input data is transformed. Valid values are one
+ * of %RXH_XFRM_*.
+ * @indir_configured: indir has been specified (at create time or subsequently)
+ * @key_configured: hkey has been specified (at create time or subsequently)
+ */
+struct ethtool_rxfh_context {
+ u32 indir_size;
+ u32 key_size;
+ u16 priv_size;
+ u8 hfunc;
+ u8 input_xfrm;
+ u8 indir_configured:1;
+ u8 key_configured:1;
+ /* private: driver private data, indirection table, and hash key are
+ * stored sequentially in @data area. Use below helpers to access.
+ */
+ u32 key_off;
+ u8 data[] __aligned(sizeof(void *));
+};
+
+static inline void *ethtool_rxfh_context_priv(struct ethtool_rxfh_context *ctx)
+{
+ return ctx->data;
+}
+
+static inline u32 *ethtool_rxfh_context_indir(struct ethtool_rxfh_context *ctx)
+{
+ return (u32 *)(ctx->data + ALIGN(ctx->priv_size, sizeof(u32)));
+}
+
+static inline u8 *ethtool_rxfh_context_key(struct ethtool_rxfh_context *ctx)
+{
+ return &ctx->data[ctx->key_off];
+}
+
+void ethtool_rxfh_context_lost(struct net_device *dev, u32 context_id);
+
/* declare a link mode bitmap */
#define __ETHTOOL_DECLARE_LINK_MODE_MASK(name) \
DECLARE_BITMAP(name, __ETHTOOL_LINK_MODE_MASK_NBITS)
@@ -284,7 +328,9 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
#define ETHTOOL_COALESCE_TX_AGGR_MAX_BYTES BIT(24)
#define ETHTOOL_COALESCE_TX_AGGR_MAX_FRAMES BIT(25)
#define ETHTOOL_COALESCE_TX_AGGR_TIME_USECS BIT(26)
-#define ETHTOOL_COALESCE_ALL_PARAMS GENMASK(26, 0)
+#define ETHTOOL_COALESCE_RX_PROFILE BIT(27)
+#define ETHTOOL_COALESCE_TX_PROFILE BIT(28)
+#define ETHTOOL_COALESCE_ALL_PARAMS GENMASK(28, 0)
#define ETHTOOL_COALESCE_USECS \
(ETHTOOL_COALESCE_RX_USECS | ETHTOOL_COALESCE_TX_USECS)
@@ -504,17 +550,16 @@ struct ethtool_ts_stats {
#define ETH_MODULE_MAX_I2C_ADDRESS 0x7f
/**
- * struct ethtool_module_eeprom - EEPROM dump from specified page
- * @offset: Offset within the specified EEPROM page to begin read, in bytes.
- * @length: Number of bytes to read.
- * @page: Page number to read from.
- * @bank: Page bank number to read from, if applicable by EEPROM spec.
+ * struct ethtool_module_eeprom - plug-in module EEPROM read / write parameters
+ * @offset: When @offset is 0-127, it is used as an address to the Lower Memory
+ * (@page must be 0). Otherwise, it is used as an address to the
+ * Upper Memory.
+ * @length: Number of bytes to read / write.
+ * @page: Page number.
+ * @bank: Bank number, if supported by EEPROM spec.
* @i2c_address: I2C address of a page. Value less than 0x7f expected. Most
* EEPROMs use 0x50 or 0x51.
* @data: Pointer to buffer with EEPROM data of @length size.
- *
- * This can be used to manage pages during EEPROM dump in ethtool and pass
- * required information to the driver.
*/
struct ethtool_module_eeprom {
u32 offset;
@@ -662,6 +707,22 @@ struct ethtool_rxfh_param {
};
/**
+ * struct kernel_ethtool_ts_info - kernel copy of struct ethtool_ts_info
+ * @cmd: command number = %ETHTOOL_GET_TS_INFO
+ * @so_timestamping: bit mask of the sum of the supported SO_TIMESTAMPING flags
+ * @phc_index: device index of the associated PHC, or -1 if there is none
+ * @tx_types: bit mask of the supported hwtstamp_tx_types enumeration values
+ * @rx_filters: bit mask of the supported hwtstamp_rx_filters enumeration values
+ */
+struct kernel_ethtool_ts_info {
+ u32 cmd;
+ u32 so_timestamping;
+ int phc_index;
+ enum hwtstamp_tx_types tx_types;
+ enum hwtstamp_rx_filters rx_filters;
+};
+
+/**
* struct ethtool_ops - optional netdev operations
* @cap_link_lanes_supported: indicates if the driver supports lanes
* parameter.
@@ -669,6 +730,16 @@ struct ethtool_rxfh_param {
* contexts.
* @cap_rss_sym_xor_supported: indicates if the driver supports symmetric-xor
* RSS.
+ * @rxfh_indir_space: max size of RSS indirection tables, if indirection table
+ * size as returned by @get_rxfh_indir_size may change during lifetime
+ * of the device. Leave as 0 if the table size is constant.
+ * @rxfh_key_space: same as @rxfh_indir_space, but for the key.
+ * @rxfh_priv_size: size of the driver private data area the core should
+ * allocate for an RSS context (in &struct ethtool_rxfh_context).
+ * @rxfh_max_context_id: maximum (exclusive) supported RSS context ID. If this
+ * is zero then the core may choose any (nonzero) ID, otherwise the core
+ * will only use IDs strictly less than this value, as the @rss_context
+ * argument to @create_rxfh_context and friends.
* @supported_coalesce_params: supported types of interrupt coalescing.
* @supported_ring_params: supported ring params.
* @get_drvinfo: Report driver/device information. Modern drivers no
@@ -765,6 +836,32 @@ struct ethtool_rxfh_param {
* will remain unchanged.
* Returns a negative error code or zero. An error code must be returned
* if at least one unsupported change was requested.
+ * @create_rxfh_context: Create a new RSS context with the specified RX flow
+ * hash indirection table, hash key, and hash function.
+ * The &struct ethtool_rxfh_context for this context is passed in @ctx;
+ * note that the indir table, hkey and hfunc are not yet populated as
+ * of this call. The driver does not need to update these; the core
+ * will do so if this op succeeds.
+ * However, if @rxfh.indir is set to %NULL, the driver must update the
+ * indir table in @ctx with the (default or inherited) table actually in
+ * use; similarly, if @rxfh.key is %NULL, @rxfh.hfunc is
+ * %ETH_RSS_HASH_NO_CHANGE, or @rxfh.input_xfrm is %RXH_XFRM_NO_CHANGE,
+ * the driver should update the corresponding information in @ctx.
+ * If the driver provides this method, it must also provide
+ * @modify_rxfh_context and @remove_rxfh_context.
+ * Returns a negative error code or zero.
+ * @modify_rxfh_context: Reconfigure the specified RSS context. Allows setting
+ * the contents of the RX flow hash indirection table, hash key, and/or
+ * hash function associated with the given context.
+ * Parameters which are set to %NULL or zero will remain unchanged.
+ * The &struct ethtool_rxfh_context for this context is passed in @ctx;
+ * note that it will still contain the *old* settings. The driver does
+ * not need to update these; the core will do so if this op succeeds.
+ * Returns a negative error code or zero. An error code must be returned
+ * if at least one unsupported change was requested.
+ * @remove_rxfh_context: Remove the specified RSS context.
+ * The &struct ethtool_rxfh_context for this context is passed in @ctx.
+ * Returns a negative error code or zero.
* @get_channels: Get number of channels.
* @set_channels: Set number of channels. Returns a negative error code or
* zero.
@@ -822,6 +919,8 @@ struct ethtool_rxfh_param {
* @get_module_eeprom_by_page: Get a region of plug-in module EEPROM data from
* specified page. Returns a negative error code or the amount of bytes
* read.
+ * @set_module_eeprom_by_page: Write to a region of plug-in module EEPROM,
+ * from kernel space only. Returns a negative error code or zero.
* @get_eth_phy_stats: Query some of the IEEE 802.3 PHY statistics.
* @get_eth_mac_stats: Query some of the IEEE 802.3 MAC statistics.
* @get_eth_ctrl_stats: Query some of the IEEE 802.3 MAC Ctrl statistics.
@@ -852,6 +951,10 @@ struct ethtool_ops {
u32 cap_link_lanes_supported:1;
u32 cap_rss_ctx_supported:1;
u32 cap_rss_sym_xor_supported:1;
+ u32 rxfh_indir_space;
+ u16 rxfh_key_space;
+ u16 rxfh_priv_size;
+ u32 rxfh_max_context_id;
u32 supported_coalesce_params;
u32 supported_ring_params;
void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *);
@@ -914,13 +1017,25 @@ struct ethtool_ops {
int (*get_rxfh)(struct net_device *, struct ethtool_rxfh_param *);
int (*set_rxfh)(struct net_device *, struct ethtool_rxfh_param *,
struct netlink_ext_ack *extack);
+ int (*create_rxfh_context)(struct net_device *,
+ struct ethtool_rxfh_context *ctx,
+ const struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack);
+ int (*modify_rxfh_context)(struct net_device *,
+ struct ethtool_rxfh_context *ctx,
+ const struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack);
+ int (*remove_rxfh_context)(struct net_device *,
+ struct ethtool_rxfh_context *ctx,
+ u32 rss_context,
+ struct netlink_ext_ack *extack);
void (*get_channels)(struct net_device *, struct ethtool_channels *);
int (*set_channels)(struct net_device *, struct ethtool_channels *);
int (*get_dump_flag)(struct net_device *, struct ethtool_dump *);
int (*get_dump_data)(struct net_device *,
struct ethtool_dump *, void *);
int (*set_dump)(struct net_device *, struct ethtool_dump *);
- int (*get_ts_info)(struct net_device *, struct ethtool_ts_info *);
+ int (*get_ts_info)(struct net_device *, struct kernel_ethtool_ts_info *);
void (*get_ts_stats)(struct net_device *dev,
struct ethtool_ts_stats *ts_stats);
int (*get_module_info)(struct net_device *,
@@ -956,6 +1071,9 @@ struct ethtool_ops {
int (*get_module_eeprom_by_page)(struct net_device *dev,
const struct ethtool_module_eeprom *page,
struct netlink_ext_ack *extack);
+ int (*set_module_eeprom_by_page)(struct net_device *dev,
+ const struct ethtool_module_eeprom *page,
+ struct netlink_ext_ack *extack);
void (*get_eth_phy_stats)(struct net_device *dev,
struct ethtool_eth_phy_stats *phy_stats);
void (*get_eth_mac_stats)(struct net_device *dev,
@@ -998,6 +1116,21 @@ int ethtool_virtdev_set_link_ksettings(struct net_device *dev,
const struct ethtool_link_ksettings *cmd,
u32 *dev_speed, u8 *dev_duplex);
+/**
+ * struct ethtool_netdev_state - per-netdevice state for ethtool features
+ * @rss_ctx: XArray of custom RSS contexts
+ * @rss_lock: Protects entries in @rss_ctx. May be taken from
+ * within RTNL.
+ * @wol_enabled: Wake-on-LAN is enabled
+ * @module_fw_flash_in_progress: Module firmware flashing is in progress.
+ */
+struct ethtool_netdev_state {
+ struct xarray rss_ctx;
+ struct mutex rss_lock;
+ unsigned wol_enabled:1;
+ unsigned module_fw_flash_in_progress:1;
+};
+
struct phy_device;
struct phy_tdr_config;
struct phy_plca_cfg;
@@ -1063,7 +1196,8 @@ int ethtool_get_phc_vclocks(struct net_device *dev, int **vclock_index);
/* Some generic methods drivers may use in their ethtool_ops */
u32 ethtool_op_get_link(struct net_device *dev);
-int ethtool_op_get_ts_info(struct net_device *dev, struct ethtool_ts_info *eti);
+int ethtool_op_get_ts_info(struct net_device *dev,
+ struct kernel_ethtool_ts_info *eti);
/**
* ethtool_mm_frag_size_add_to_min - Translate (standard) additional fragment
@@ -1112,7 +1246,8 @@ static inline int ethtool_mm_frag_size_min_to_add(u32 val_min, u32 *val_add,
* @info: buffer to hold the result
* Returns zero on success, non-zero otherwise.
*/
-int ethtool_get_ts_info_by_layer(struct net_device *dev, struct ethtool_ts_info *info);
+int ethtool_get_ts_info_by_layer(struct net_device *dev,
+ struct kernel_ethtool_ts_info *info);
/**
* ethtool_sprintf - Write formatted string to ethtool string data
@@ -1155,4 +1290,24 @@ struct ethtool_forced_speed_map {
void
ethtool_forced_speed_maps_init(struct ethtool_forced_speed_map *maps, u32 size);
+
+/* C33 PSE extended state and substate. */
+struct ethtool_c33_pse_ext_state_info {
+ enum ethtool_c33_pse_ext_state c33_pse_ext_state;
+ union {
+ enum ethtool_c33_pse_ext_substate_error_condition error_condition;
+ enum ethtool_c33_pse_ext_substate_mr_pse_enable mr_pse_enable;
+ enum ethtool_c33_pse_ext_substate_option_detect_ted option_detect_ted;
+ enum ethtool_c33_pse_ext_substate_option_vport_lim option_vport_lim;
+ enum ethtool_c33_pse_ext_substate_ovld_detected ovld_detected;
+ enum ethtool_c33_pse_ext_substate_power_not_available power_not_available;
+ enum ethtool_c33_pse_ext_substate_short_detected short_detected;
+ u32 __c33_pse_ext_substate;
+ };
+};
+
+struct ethtool_c33_pse_pw_limit_range {
+ u32 min;
+ u32 max;
+};
#endif /* _LINUX_ETHTOOL_H */
diff --git a/include/linux/exportfs.h b/include/linux/exportfs.h
index bb37ad5cc954..893a1d21dc1c 100644
--- a/include/linux/exportfs.h
+++ b/include/linux/exportfs.h
@@ -158,6 +158,7 @@ struct fid {
#define EXPORT_FH_CONNECTABLE 0x1 /* Encode file handle with parent */
#define EXPORT_FH_FID 0x2 /* File handle may be non-decodeable */
+#define EXPORT_FH_DIR_ONLY 0x4 /* Only decode file handle for a directory */
/**
* struct export_operations - for nfsd to communicate with file systems
@@ -305,6 +306,7 @@ static inline int exportfs_encode_fid(struct inode *inode, struct fid *fid,
extern struct dentry *exportfs_decode_fh_raw(struct vfsmount *mnt,
struct fid *fid, int fh_len,
int fileid_type,
+ unsigned int flags,
int (*acceptable)(void *, struct dentry *),
void *context);
extern struct dentry *exportfs_decode_fh(struct vfsmount *mnt, struct fid *fid,
diff --git a/include/linux/file.h b/include/linux/file.h
index 45d0f4800abd..237931f20739 100644
--- a/include/linux/file.h
+++ b/include/linux/file.h
@@ -97,6 +97,26 @@ extern void put_unused_fd(unsigned int fd);
DEFINE_CLASS(get_unused_fd, int, if (_T >= 0) put_unused_fd(_T),
get_unused_fd_flags(flags), unsigned flags)
+/*
+ * take_fd() will take care to set @fd to -EBADF ensuring that
+ * CLASS(get_unused_fd) won't call put_unused_fd(). This makes it
+ * easier to rely on CLASS(get_unused_fd):
+ *
+ * struct file *f;
+ *
+ * CLASS(get_unused_fd, fd)(O_CLOEXEC);
+ * if (fd < 0)
+ * return fd;
+ *
+ * f = dentry_open(&path, O_RDONLY, current_cred());
+ * if (IS_ERR(f))
+ * return PTR_ERR(fd);
+ *
+ * fd_install(fd, f);
+ * return take_fd(fd);
+ */
+#define take_fd(fd) __get_and_null(fd, -EBADF)
+
extern void fd_install(unsigned int fd, struct file *file);
int receive_fd(struct file *file, int __user *ufd, unsigned int o_flags);
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 0f12cf01070e..b6672ff61407 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -733,21 +733,128 @@ struct bpf_nh_params {
};
};
+/* flags for bpf_redirect_info kern_flags */
+#define BPF_RI_F_RF_NO_DIRECT BIT(0) /* no napi_direct on return_frame */
+#define BPF_RI_F_RI_INIT BIT(1)
+#define BPF_RI_F_CPU_MAP_INIT BIT(2)
+#define BPF_RI_F_DEV_MAP_INIT BIT(3)
+#define BPF_RI_F_XSK_MAP_INIT BIT(4)
+
struct bpf_redirect_info {
u64 tgt_index;
void *tgt_value;
struct bpf_map *map;
u32 flags;
- u32 kern_flags;
u32 map_id;
enum bpf_map_type map_type;
struct bpf_nh_params nh;
+ u32 kern_flags;
};
-DECLARE_PER_CPU(struct bpf_redirect_info, bpf_redirect_info);
+struct bpf_net_context {
+ struct bpf_redirect_info ri;
+ struct list_head cpu_map_flush_list;
+ struct list_head dev_map_flush_list;
+ struct list_head xskmap_map_flush_list;
+};
-/* flags for bpf_redirect_info kern_flags */
-#define BPF_RI_F_RF_NO_DIRECT BIT(0) /* no napi_direct on return_frame */
+static inline struct bpf_net_context *bpf_net_ctx_set(struct bpf_net_context *bpf_net_ctx)
+{
+ struct task_struct *tsk = current;
+
+ if (tsk->bpf_net_context != NULL)
+ return NULL;
+ bpf_net_ctx->ri.kern_flags = 0;
+
+ tsk->bpf_net_context = bpf_net_ctx;
+ return bpf_net_ctx;
+}
+
+static inline void bpf_net_ctx_clear(struct bpf_net_context *bpf_net_ctx)
+{
+ if (bpf_net_ctx)
+ current->bpf_net_context = NULL;
+}
+
+static inline struct bpf_net_context *bpf_net_ctx_get(void)
+{
+ return current->bpf_net_context;
+}
+
+static inline struct bpf_redirect_info *bpf_net_ctx_get_ri(void)
+{
+ struct bpf_net_context *bpf_net_ctx = bpf_net_ctx_get();
+
+ if (!(bpf_net_ctx->ri.kern_flags & BPF_RI_F_RI_INIT)) {
+ memset(&bpf_net_ctx->ri, 0, offsetof(struct bpf_net_context, ri.nh));
+ bpf_net_ctx->ri.kern_flags |= BPF_RI_F_RI_INIT;
+ }
+
+ return &bpf_net_ctx->ri;
+}
+
+static inline struct list_head *bpf_net_ctx_get_cpu_map_flush_list(void)
+{
+ struct bpf_net_context *bpf_net_ctx = bpf_net_ctx_get();
+
+ if (!(bpf_net_ctx->ri.kern_flags & BPF_RI_F_CPU_MAP_INIT)) {
+ INIT_LIST_HEAD(&bpf_net_ctx->cpu_map_flush_list);
+ bpf_net_ctx->ri.kern_flags |= BPF_RI_F_CPU_MAP_INIT;
+ }
+
+ return &bpf_net_ctx->cpu_map_flush_list;
+}
+
+static inline struct list_head *bpf_net_ctx_get_dev_flush_list(void)
+{
+ struct bpf_net_context *bpf_net_ctx = bpf_net_ctx_get();
+
+ if (!(bpf_net_ctx->ri.kern_flags & BPF_RI_F_DEV_MAP_INIT)) {
+ INIT_LIST_HEAD(&bpf_net_ctx->dev_map_flush_list);
+ bpf_net_ctx->ri.kern_flags |= BPF_RI_F_DEV_MAP_INIT;
+ }
+
+ return &bpf_net_ctx->dev_map_flush_list;
+}
+
+static inline struct list_head *bpf_net_ctx_get_xskmap_flush_list(void)
+{
+ struct bpf_net_context *bpf_net_ctx = bpf_net_ctx_get();
+
+ if (!(bpf_net_ctx->ri.kern_flags & BPF_RI_F_XSK_MAP_INIT)) {
+ INIT_LIST_HEAD(&bpf_net_ctx->xskmap_map_flush_list);
+ bpf_net_ctx->ri.kern_flags |= BPF_RI_F_XSK_MAP_INIT;
+ }
+
+ return &bpf_net_ctx->xskmap_map_flush_list;
+}
+
+static inline void bpf_net_ctx_get_all_used_flush_lists(struct list_head **lh_map,
+ struct list_head **lh_dev,
+ struct list_head **lh_xsk)
+{
+ struct bpf_net_context *bpf_net_ctx = bpf_net_ctx_get();
+ u32 kern_flags = bpf_net_ctx->ri.kern_flags;
+ struct list_head *lh;
+
+ *lh_map = *lh_dev = *lh_xsk = NULL;
+
+ if (!IS_ENABLED(CONFIG_BPF_SYSCALL))
+ return;
+
+ lh = &bpf_net_ctx->dev_map_flush_list;
+ if (kern_flags & BPF_RI_F_DEV_MAP_INIT && !list_empty(lh))
+ *lh_dev = lh;
+
+ lh = &bpf_net_ctx->cpu_map_flush_list;
+ if (kern_flags & BPF_RI_F_CPU_MAP_INIT && !list_empty(lh))
+ *lh_map = lh;
+
+ lh = &bpf_net_ctx->xskmap_map_flush_list;
+ if (IS_ENABLED(CONFIG_XDP_SOCKETS) &&
+ kern_flags & BPF_RI_F_XSK_MAP_INIT && !list_empty(lh))
+ *lh_xsk = lh;
+}
/* Compute the linear packet data range [data, data_end) which
* will be accessed by various program types (cls_bpf, act_bpf,
@@ -1018,25 +1125,23 @@ struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
const struct bpf_insn *patch, u32 len);
int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt);
-void bpf_clear_redirect_map(struct bpf_map *map);
-
static inline bool xdp_return_frame_no_direct(void)
{
- struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
+ struct bpf_redirect_info *ri = bpf_net_ctx_get_ri();
return ri->kern_flags & BPF_RI_F_RF_NO_DIRECT;
}
static inline void xdp_set_return_frame_no_direct(void)
{
- struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
+ struct bpf_redirect_info *ri = bpf_net_ctx_get_ri();
ri->kern_flags |= BPF_RI_F_RF_NO_DIRECT;
}
static inline void xdp_clear_return_frame_no_direct(void)
{
- struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
+ struct bpf_redirect_info *ri = bpf_net_ctx_get_ri();
ri->kern_flags &= ~BPF_RI_F_RF_NO_DIRECT;
}
@@ -1129,8 +1234,7 @@ bpf_jit_binary_pack_alloc(unsigned int proglen, u8 **ro_image,
struct bpf_binary_header **rw_hdr,
u8 **rw_image,
bpf_jit_fill_hole_t bpf_fill_ill_insns);
-int bpf_jit_binary_pack_finalize(struct bpf_prog *prog,
- struct bpf_binary_header *ro_header,
+int bpf_jit_binary_pack_finalize(struct bpf_binary_header *ro_header,
struct bpf_binary_header *rw_header);
void bpf_jit_binary_pack_free(struct bpf_binary_header *ro_header,
struct bpf_binary_header *rw_header);
@@ -1208,18 +1312,18 @@ static inline bool bpf_jit_kallsyms_enabled(void)
return false;
}
-const char *__bpf_address_lookup(unsigned long addr, unsigned long *size,
+int __bpf_address_lookup(unsigned long addr, unsigned long *size,
unsigned long *off, char *sym);
bool is_bpf_text_address(unsigned long addr);
int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
char *sym);
struct bpf_prog *bpf_prog_ksym_find(unsigned long addr);
-static inline const char *
+static inline int
bpf_address_lookup(unsigned long addr, unsigned long *size,
unsigned long *off, char **modname, char *sym)
{
- const char *ret = __bpf_address_lookup(addr, size, off, sym);
+ int ret = __bpf_address_lookup(addr, size, off, sym);
if (ret && modname)
*modname = NULL;
@@ -1263,11 +1367,11 @@ static inline bool bpf_jit_kallsyms_enabled(void)
return false;
}
-static inline const char *
+static inline int
__bpf_address_lookup(unsigned long addr, unsigned long *size,
unsigned long *off, char *sym)
{
- return NULL;
+ return 0;
}
static inline bool is_bpf_text_address(unsigned long addr)
@@ -1286,11 +1390,11 @@ static inline struct bpf_prog *bpf_prog_ksym_find(unsigned long addr)
return NULL;
}
-static inline const char *
+static inline int
bpf_address_lookup(unsigned long addr, unsigned long *size,
unsigned long *off, char **modname, char *sym)
{
- return NULL;
+ return 0;
}
static inline void bpf_prog_kallsyms_add(struct bpf_prog *fp)
@@ -1406,7 +1510,7 @@ struct bpf_sock_ops_kern {
struct bpf_sysctl_kern {
struct ctl_table_header *head;
- struct ctl_table *table;
+ const struct ctl_table *table;
void *cur_val;
size_t cur_len;
void *new_val;
@@ -1592,7 +1696,7 @@ static __always_inline long __bpf_xdp_redirect_map(struct bpf_map *map, u64 inde
u64 flags, const u64 flag_mask,
void *lookup_elem(struct bpf_map *map, u32 key))
{
- struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
+ struct bpf_redirect_info *ri = bpf_net_ctx_get_ri();
const u64 action_mask = XDP_ABORTED | XDP_DROP | XDP_PASS | XDP_TX;
/* Lower bits of the flags are used as return code on lookup failure */
diff --git a/include/linux/firmware/cirrus/cs_dsp.h b/include/linux/firmware/cirrus/cs_dsp.h
index 82687e07a7c2..c28a6b9c3b2d 100644
--- a/include/linux/firmware/cirrus/cs_dsp.h
+++ b/include/linux/firmware/cirrus/cs_dsp.h
@@ -42,6 +42,16 @@
#define CS_DSP_ACKED_CTL_MIN_VALUE 0
#define CS_DSP_ACKED_CTL_MAX_VALUE 0xFFFFFF
+/*
+ * Write sequence operation codes
+ */
+#define CS_DSP_WSEQ_FULL 0x00
+#define CS_DSP_WSEQ_ADDR8 0x02
+#define CS_DSP_WSEQ_L16 0x04
+#define CS_DSP_WSEQ_H16 0x05
+#define CS_DSP_WSEQ_UNLOCK 0xFD
+#define CS_DSP_WSEQ_END 0xFF
+
/**
* struct cs_dsp_region - Describes a logical memory region in DSP address space
* @type: Memory region type
@@ -259,6 +269,23 @@ struct cs_dsp_alg_region *cs_dsp_find_alg_region(struct cs_dsp *dsp,
const char *cs_dsp_mem_region_name(unsigned int type);
/**
+ * struct cs_dsp_wseq - Describes a write sequence
+ * @ctl: Write sequence cs_dsp control
+ * @ops: Operations contained within
+ */
+struct cs_dsp_wseq {
+ struct cs_dsp_coeff_ctl *ctl;
+ struct list_head ops;
+};
+
+int cs_dsp_wseq_init(struct cs_dsp *dsp, struct cs_dsp_wseq *wseqs, unsigned int num_wseqs);
+int cs_dsp_wseq_write(struct cs_dsp *dsp, struct cs_dsp_wseq *wseq, u32 addr, u32 data,
+ u8 op_code, bool update);
+int cs_dsp_wseq_multi_write(struct cs_dsp *dsp, struct cs_dsp_wseq *wseq,
+ const struct reg_sequence *reg_seq, int num_regs,
+ u8 op_code, bool update);
+
+/**
* struct cs_dsp_chunk - Describes a buffer holding data formatted for the DSP
* @data: Pointer to underlying buffer memory
* @max: Pointer to end of the buffer memory
diff --git a/include/linux/firmware/qcom/qcom_qseecom.h b/include/linux/firmware/qcom/qcom_qseecom.h
index 366243ee9609..1dc5b3b50aa9 100644
--- a/include/linux/firmware/qcom/qcom_qseecom.h
+++ b/include/linux/firmware/qcom/qcom_qseecom.h
@@ -73,9 +73,9 @@ static inline void qseecom_dma_free(struct qseecom_client *client, size_t size,
/**
* qcom_qseecom_app_send() - Send to and receive data from a given QSEE app.
* @client: The QSEECOM client associated with the target app.
- * @req: DMA address of the request buffer sent to the app.
+ * @req: Request buffer sent to the app (must be TZ memory).
* @req_size: Size of the request buffer.
- * @rsp: DMA address of the response buffer, written to by the app.
+ * @rsp: Response buffer, written to by the app (must be TZ memory).
* @rsp_size: Size of the response buffer.
*
* Sends a request to the QSEE app associated with the given client and read
@@ -90,8 +90,8 @@ static inline void qseecom_dma_free(struct qseecom_client *client, size_t size,
* Return: Zero on success, nonzero on failure.
*/
static inline int qcom_qseecom_app_send(struct qseecom_client *client,
- dma_addr_t req, size_t req_size,
- dma_addr_t rsp, size_t rsp_size)
+ void *req, size_t req_size,
+ void *rsp, size_t rsp_size)
{
return qcom_scm_qseecom_app_send(client->app_id, req, req_size, rsp, rsp_size);
}
diff --git a/include/linux/firmware/qcom/qcom_scm.h b/include/linux/firmware/qcom/qcom_scm.h
index aaa19f93ac43..9f14976399ab 100644
--- a/include/linux/firmware/qcom/qcom_scm.h
+++ b/include/linux/firmware/qcom/qcom_scm.h
@@ -115,11 +115,40 @@ int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val,
int qcom_scm_lmh_profile_change(u32 profile_id);
bool qcom_scm_lmh_dcvsh_available(void);
+/*
+ * Request TZ to program set of access controlled registers necessary
+ * irrespective of any features
+ */
+#define QCOM_SCM_GPU_ALWAYS_EN_REQ BIT(0)
+/*
+ * Request TZ to program BCL id to access controlled register when BCL is
+ * enabled
+ */
+#define QCOM_SCM_GPU_BCL_EN_REQ BIT(1)
+/*
+ * Request TZ to program set of access controlled register for CLX feature
+ * when enabled
+ */
+#define QCOM_SCM_GPU_CLX_EN_REQ BIT(2)
+/*
+ * Request TZ to program tsense ids to access controlled registers for reading
+ * gpu temperature sensors
+ */
+#define QCOM_SCM_GPU_TSENSE_EN_REQ BIT(3)
+
+int qcom_scm_gpu_init_regs(u32 gpu_req);
+
+int qcom_scm_shm_bridge_enable(void);
+int qcom_scm_shm_bridge_create(struct device *dev, u64 pfn_and_ns_perm_flags,
+ u64 ipfn_and_s_perm_flags, u64 size_and_flags,
+ u64 ns_vmids, u64 *handle);
+int qcom_scm_shm_bridge_delete(struct device *dev, u64 handle);
+
#ifdef CONFIG_QCOM_QSEECOM
int qcom_scm_qseecom_app_get_id(const char *app_name, u32 *app_id);
-int qcom_scm_qseecom_app_send(u32 app_id, dma_addr_t req, size_t req_size,
- dma_addr_t rsp, size_t rsp_size);
+int qcom_scm_qseecom_app_send(u32 app_id, void *req, size_t req_size,
+ void *rsp, size_t rsp_size);
#else /* CONFIG_QCOM_QSEECOM */
@@ -129,8 +158,8 @@ static inline int qcom_scm_qseecom_app_get_id(const char *app_name, u32 *app_id)
}
static inline int qcom_scm_qseecom_app_send(u32 app_id,
- dma_addr_t req, size_t req_size,
- dma_addr_t rsp, size_t rsp_size)
+ void *req, size_t req_size,
+ void *rsp, size_t rsp_size)
{
return -EINVAL;
}
diff --git a/include/linux/firmware/qcom/qcom_tzmem.h b/include/linux/firmware/qcom/qcom_tzmem.h
new file mode 100644
index 000000000000..b83b63a0c049
--- /dev/null
+++ b/include/linux/firmware/qcom/qcom_tzmem.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2023-2024 Linaro Ltd.
+ */
+
+#ifndef __QCOM_TZMEM_H
+#define __QCOM_TZMEM_H
+
+#include <linux/cleanup.h>
+#include <linux/gfp.h>
+#include <linux/types.h>
+
+struct device;
+struct qcom_tzmem_pool;
+
+/**
+ * enum qcom_tzmem_policy - Policy for pool growth.
+ */
+enum qcom_tzmem_policy {
+ /**< Static pool, never grow above initial size. */
+ QCOM_TZMEM_POLICY_STATIC = 1,
+ /**< When out of memory, add increment * current size of memory. */
+ QCOM_TZMEM_POLICY_MULTIPLIER,
+ /**< When out of memory add as much as is needed until max_size. */
+ QCOM_TZMEM_POLICY_ON_DEMAND,
+};
+
+/**
+ * struct qcom_tzmem_pool_config - TZ memory pool configuration.
+ * @initial_size: Number of bytes to allocate for the pool during its creation.
+ * @policy: Pool size growth policy.
+ * @increment: Used with policies that allow pool growth.
+ * @max_size: Size above which the pool will never grow.
+ */
+struct qcom_tzmem_pool_config {
+ size_t initial_size;
+ enum qcom_tzmem_policy policy;
+ size_t increment;
+ size_t max_size;
+};
+
+struct qcom_tzmem_pool *
+qcom_tzmem_pool_new(const struct qcom_tzmem_pool_config *config);
+void qcom_tzmem_pool_free(struct qcom_tzmem_pool *pool);
+struct qcom_tzmem_pool *
+devm_qcom_tzmem_pool_new(struct device *dev,
+ const struct qcom_tzmem_pool_config *config);
+
+void *qcom_tzmem_alloc(struct qcom_tzmem_pool *pool, size_t size, gfp_t gfp);
+void qcom_tzmem_free(void *ptr);
+
+DEFINE_FREE(qcom_tzmem, void *, if (_T) qcom_tzmem_free(_T))
+
+phys_addr_t qcom_tzmem_to_phys(void *ptr);
+
+#endif /* __QCOM_TZMEM */
diff --git a/include/linux/firmware/xlnx-event-manager.h b/include/linux/firmware/xlnx-event-manager.h
index 82e8254b0f80..645dd34155e6 100644
--- a/include/linux/firmware/xlnx-event-manager.h
+++ b/include/linux/firmware/xlnx-event-manager.h
@@ -1,4 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Xilinx Event Management Driver
+ *
+ * Copyright (C) 2024, Advanced Micro Devices, Inc.
+ */
#ifndef _FIRMWARE_XLNX_EVENT_MANAGER_H_
#define _FIRMWARE_XLNX_EVENT_MANAGER_H_
@@ -7,6 +12,11 @@
#define CB_MAX_PAYLOAD_SIZE (4U) /*In payload maximum 32bytes */
+#define EVENT_SUBSYSTEM_RESTART (4U)
+
+#define PM_DEV_ACPU_0_0 (0x1810c0afU)
+#define PM_DEV_ACPU_0 (0x1810c003U)
+
/************************** Exported Function *****************************/
typedef void (*event_cb_func_t)(const u32 *payload, void *data);
diff --git a/include/linux/firmware/xlnx-zynqmp.h b/include/linux/firmware/xlnx-zynqmp.h
index 1a069a56c961..d7d07afc0532 100644
--- a/include/linux/firmware/xlnx-zynqmp.h
+++ b/include/linux/firmware/xlnx-zynqmp.h
@@ -52,6 +52,9 @@
#define API_ID_MASK GENMASK(7, 0)
#define MODULE_ID_MASK GENMASK(11, 8)
+/* Firmware feature check version mask */
+#define FIRMWARE_VERSION_MASK 0xFFFFU
+
/* ATF only commands */
#define TF_A_PM_REGISTER_SGI 0xa04
#define PM_GET_TRUSTZONE_VERSION 0xa03
diff --git a/include/linux/fortify-string.h b/include/linux/fortify-string.h
index 7e0f340bf363..0d99bf11d260 100644
--- a/include/linux/fortify-string.h
+++ b/include/linux/fortify-string.h
@@ -601,11 +601,7 @@ __FORTIFY_INLINE bool fortify_memcpy_chk(__kernel_size_t size,
/*
* Warn when writing beyond destination field size.
*
- * We must ignore p_size_field == 0 for existing 0-element
- * fake flexible arrays, until they are all converted to
- * proper flexible arrays.
- *
- * The implementation of __builtin_*object_size() behaves
+ * Note the implementation of __builtin_*object_size() behaves
* like sizeof() when not directly referencing a flexible
* array member, which means there will be many bounds checks
* that will appear at run-time, without a way for them to be
@@ -613,7 +609,7 @@ __FORTIFY_INLINE bool fortify_memcpy_chk(__kernel_size_t size,
* is specifically the flexible array member).
* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=101832
*/
- if (p_size_field != 0 && p_size_field != SIZE_MAX &&
+ if (p_size_field != SIZE_MAX &&
p_size != p_size_field && p_size_field < size)
return true;
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 0283cf366c2a..fd34b5755c0b 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -125,8 +125,10 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
#define FMODE_EXEC ((__force fmode_t)(1 << 5))
/* File writes are restricted (block device specific) */
#define FMODE_WRITE_RESTRICTED ((__force fmode_t)(1 << 6))
+/* File supports atomic writes */
+#define FMODE_CAN_ATOMIC_WRITE ((__force fmode_t)(1 << 7))
-/* FMODE_* bits 7 to 8 */
+/* FMODE_* bit 8 */
/* 32bit hashes as llseek() offset (for directories) */
#define FMODE_32BITHASH ((__force fmode_t)(1 << 9))
@@ -317,6 +319,7 @@ struct readahead_control;
#define IOCB_SYNC (__force int) RWF_SYNC
#define IOCB_NOWAIT (__force int) RWF_NOWAIT
#define IOCB_APPEND (__force int) RWF_APPEND
+#define IOCB_ATOMIC (__force int) RWF_ATOMIC
/* non-RWF related bits - start at 16 */
#define IOCB_EVENTFD (1 << 16)
@@ -351,6 +354,7 @@ struct readahead_control;
{ IOCB_SYNC, "SYNC" }, \
{ IOCB_NOWAIT, "NOWAIT" }, \
{ IOCB_APPEND, "APPEND" }, \
+ { IOCB_ATOMIC, "ATOMIC"}, \
{ IOCB_EVENTFD, "EVENTFD"}, \
{ IOCB_DIRECT, "DIRECT" }, \
{ IOCB_WRITE, "WRITE" }, \
@@ -660,9 +664,13 @@ struct inode {
};
dev_t i_rdev;
loff_t i_size;
- struct timespec64 __i_atime;
- struct timespec64 __i_mtime;
- struct timespec64 __i_ctime; /* use inode_*_ctime accessors! */
+ time64_t i_atime_sec;
+ time64_t i_mtime_sec;
+ time64_t i_ctime_sec;
+ u32 i_atime_nsec;
+ u32 i_mtime_nsec;
+ u32 i_ctime_nsec;
+ u32 i_generation;
spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */
unsigned short i_bytes;
u8 i_blkbits;
@@ -719,10 +727,10 @@ struct inode {
unsigned i_dir_seq;
};
- __u32 i_generation;
#ifdef CONFIG_FSNOTIFY
__u32 i_fsnotify_mask; /* all events this inode cares about */
+ /* 32-bit hole reserved for expanding i_fsnotify_mask */
struct fsnotify_mark_connector __rcu *i_fsnotify_marks;
#endif
@@ -1538,23 +1546,27 @@ struct timespec64 inode_set_ctime_current(struct inode *inode);
static inline time64_t inode_get_atime_sec(const struct inode *inode)
{
- return inode->__i_atime.tv_sec;
+ return inode->i_atime_sec;
}
static inline long inode_get_atime_nsec(const struct inode *inode)
{
- return inode->__i_atime.tv_nsec;
+ return inode->i_atime_nsec;
}
static inline struct timespec64 inode_get_atime(const struct inode *inode)
{
- return inode->__i_atime;
+ struct timespec64 ts = { .tv_sec = inode_get_atime_sec(inode),
+ .tv_nsec = inode_get_atime_nsec(inode) };
+
+ return ts;
}
static inline struct timespec64 inode_set_atime_to_ts(struct inode *inode,
struct timespec64 ts)
{
- inode->__i_atime = ts;
+ inode->i_atime_sec = ts.tv_sec;
+ inode->i_atime_nsec = ts.tv_nsec;
return ts;
}
@@ -1563,28 +1575,32 @@ static inline struct timespec64 inode_set_atime(struct inode *inode,
{
struct timespec64 ts = { .tv_sec = sec,
.tv_nsec = nsec };
+
return inode_set_atime_to_ts(inode, ts);
}
static inline time64_t inode_get_mtime_sec(const struct inode *inode)
{
- return inode->__i_mtime.tv_sec;
+ return inode->i_mtime_sec;
}
static inline long inode_get_mtime_nsec(const struct inode *inode)
{
- return inode->__i_mtime.tv_nsec;
+ return inode->i_mtime_nsec;
}
static inline struct timespec64 inode_get_mtime(const struct inode *inode)
{
- return inode->__i_mtime;
+ struct timespec64 ts = { .tv_sec = inode_get_mtime_sec(inode),
+ .tv_nsec = inode_get_mtime_nsec(inode) };
+ return ts;
}
static inline struct timespec64 inode_set_mtime_to_ts(struct inode *inode,
struct timespec64 ts)
{
- inode->__i_mtime = ts;
+ inode->i_mtime_sec = ts.tv_sec;
+ inode->i_mtime_nsec = ts.tv_nsec;
return ts;
}
@@ -1598,23 +1614,27 @@ static inline struct timespec64 inode_set_mtime(struct inode *inode,
static inline time64_t inode_get_ctime_sec(const struct inode *inode)
{
- return inode->__i_ctime.tv_sec;
+ return inode->i_ctime_sec;
}
static inline long inode_get_ctime_nsec(const struct inode *inode)
{
- return inode->__i_ctime.tv_nsec;
+ return inode->i_ctime_nsec;
}
static inline struct timespec64 inode_get_ctime(const struct inode *inode)
{
- return inode->__i_ctime;
+ struct timespec64 ts = { .tv_sec = inode_get_ctime_sec(inode),
+ .tv_nsec = inode_get_ctime_nsec(inode) };
+
+ return ts;
}
static inline struct timespec64 inode_set_ctime_to_ts(struct inode *inode,
struct timespec64 ts)
{
- inode->__i_ctime = ts;
+ inode->i_ctime_sec = ts.tv_sec;
+ inode->i_ctime_nsec = ts.tv_nsec;
return ts;
}
@@ -1926,6 +1946,8 @@ void inode_init_owner(struct mnt_idmap *idmap, struct inode *inode,
extern bool may_open_dev(const struct path *path);
umode_t mode_strip_sgid(struct mnt_idmap *idmap,
const struct inode *dir, umode_t mode);
+bool in_group_or_capable(struct mnt_idmap *idmap,
+ const struct inode *inode, vfsgid_t vfsgid);
/*
* This is the "filldir" function type, used by readdir() to let
@@ -2685,7 +2707,7 @@ static inline struct file *file_clone_open(struct file *file)
}
extern int filp_close(struct file *, fl_owner_t id);
-extern struct filename *getname_flags(const char __user *, int, int *);
+extern struct filename *getname_flags(const char __user *, int);
extern struct filename *getname_uflags(const char __user *, int);
extern struct filename *getname(const char __user *);
extern struct filename *getname_kernel(const char *);
@@ -3029,7 +3051,12 @@ extern struct inode *inode_insert5(struct inode *inode, unsigned long hashval,
int (*test)(struct inode *, void *),
int (*set)(struct inode *, void *),
void *data);
-extern struct inode * iget5_locked(struct super_block *, unsigned long, int (*test)(struct inode *, void *), int (*set)(struct inode *, void *), void *);
+struct inode *iget5_locked(struct super_block *, unsigned long,
+ int (*test)(struct inode *, void *),
+ int (*set)(struct inode *, void *), void *);
+struct inode *iget5_locked_rcu(struct super_block *, unsigned long,
+ int (*test)(struct inode *, void *),
+ int (*set)(struct inode *, void *), void *);
extern struct inode * iget_locked(struct super_block *, unsigned long);
extern struct inode *find_inode_nowait(struct super_block *,
unsigned long,
@@ -3231,6 +3258,9 @@ extern const struct inode_operations page_symlink_inode_operations;
extern void kfree_link(void *);
void generic_fillattr(struct mnt_idmap *, u32, struct inode *, struct kstat *);
void generic_fill_statx_attr(struct inode *inode, struct kstat *stat);
+void generic_fill_statx_atomic_writes(struct kstat *stat,
+ unsigned int unit_min,
+ unsigned int unit_max);
extern int vfs_getattr_nosec(const struct path *, struct kstat *, u32, unsigned int);
extern int vfs_getattr(const struct path *, struct kstat *, u32, unsigned int);
void __inode_add_bytes(struct inode *inode, loff_t bytes);
@@ -3351,6 +3381,10 @@ extern int generic_file_fsync(struct file *, loff_t, loff_t, int);
extern int generic_check_addressable(unsigned, u64);
extern void generic_set_sb_d_ops(struct super_block *sb);
+extern int generic_ci_match(const struct inode *parent,
+ const struct qstr *name,
+ const struct qstr *folded_name,
+ const u8 *de_name, u32 de_name_len);
static inline bool sb_has_encoding(const struct super_block *sb)
{
@@ -3403,7 +3437,8 @@ static inline int iocb_flags(struct file *file)
return res;
}
-static inline int kiocb_set_rw_flags(struct kiocb *ki, rwf_t flags)
+static inline int kiocb_set_rw_flags(struct kiocb *ki, rwf_t flags,
+ int rw_type)
{
int kiocb_flags = 0;
@@ -3422,6 +3457,12 @@ static inline int kiocb_set_rw_flags(struct kiocb *ki, rwf_t flags)
return -EOPNOTSUPP;
kiocb_flags |= IOCB_NOIO;
}
+ if (flags & RWF_ATOMIC) {
+ if (rw_type != WRITE)
+ return -EOPNOTSUPP;
+ if (!(ki->ki_filp->f_mode & FMODE_CAN_ATOMIC_WRITE))
+ return -EOPNOTSUPP;
+ }
kiocb_flags |= (__force int) (flags & RWF_SUPPORTED);
if (flags & RWF_SYNC)
kiocb_flags |= IOCB_DSYNC;
@@ -3436,20 +3477,6 @@ static inline int kiocb_set_rw_flags(struct kiocb *ki, rwf_t flags)
return 0;
}
-static inline ino_t parent_ino(struct dentry *dentry)
-{
- ino_t res;
-
- /*
- * Don't strictly need d_lock here? If the parent ino could change
- * then surely we'd have a deeper race in the caller?
- */
- spin_lock(&dentry->d_lock);
- res = dentry->d_parent->d_inode->i_ino;
- spin_unlock(&dentry->d_lock);
- return res;
-}
-
/* Transaction based IO helpers */
/*
@@ -3574,7 +3601,7 @@ static inline bool dir_emit_dot(struct file *file, struct dir_context *ctx)
static inline bool dir_emit_dotdot(struct file *file, struct dir_context *ctx)
{
return ctx->actor(ctx, "..", 2, ctx->pos,
- parent_ino(file->f_path.dentry), DT_DIR);
+ d_parent_ino(file->f_path.dentry), DT_DIR);
}
static inline bool dir_emit_dots(struct file *file, struct dir_context *ctx)
{
@@ -3613,4 +3640,23 @@ extern int vfs_fadvise(struct file *file, loff_t offset, loff_t len,
extern int generic_fadvise(struct file *file, loff_t offset, loff_t len,
int advice);
+static inline bool vfs_empty_path(int dfd, const char __user *path)
+{
+ char c;
+
+ if (dfd < 0)
+ return false;
+
+ /* We now allow NULL to be used for empty path. */
+ if (!path)
+ return true;
+
+ if (unlikely(get_user(c, path)))
+ return false;
+
+ return !c;
+}
+
+bool generic_atomic_write_valid(struct iov_iter *iter, loff_t pos);
+
#endif /* _LINUX_FS_H */
diff --git a/include/linux/fs_parser.h b/include/linux/fs_parser.h
index d3350979115f..6cf713a7e6c6 100644
--- a/include/linux/fs_parser.h
+++ b/include/linux/fs_parser.h
@@ -28,7 +28,7 @@ typedef int fs_param_type(struct p_log *,
*/
fs_param_type fs_param_is_bool, fs_param_is_u32, fs_param_is_s32, fs_param_is_u64,
fs_param_is_enum, fs_param_is_string, fs_param_is_blob, fs_param_is_blockdev,
- fs_param_is_path, fs_param_is_fd;
+ fs_param_is_path, fs_param_is_fd, fs_param_is_uid, fs_param_is_gid;
/*
* Specification of the type of value a parameter wants.
@@ -57,6 +57,8 @@ struct fs_parse_result {
int int_32; /* For spec_s32/spec_enum */
unsigned int uint_32; /* For spec_u32{,_octal,_hex}/spec_enum */
u64 uint_64; /* For spec_u64 */
+ kuid_t uid;
+ kgid_t gid;
};
};
@@ -131,6 +133,8 @@ static inline bool fs_validate_description(const char *name,
#define fsparam_bdev(NAME, OPT) __fsparam(fs_param_is_blockdev, NAME, OPT, 0, NULL)
#define fsparam_path(NAME, OPT) __fsparam(fs_param_is_path, NAME, OPT, 0, NULL)
#define fsparam_fd(NAME, OPT) __fsparam(fs_param_is_fd, NAME, OPT, 0, NULL)
+#define fsparam_uid(NAME, OPT) __fsparam(fs_param_is_uid, NAME, OPT, 0, NULL)
+#define fsparam_gid(NAME, OPT) __fsparam(fs_param_is_gid, NAME, OPT, 0, NULL)
/* String parameter that allows empty argument */
#define fsparam_string_empty(NAME, OPT) \
diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
index bdf7f3eddf0a..4c91a019972b 100644
--- a/include/linux/fscache-cache.h
+++ b/include/linux/fscache-cache.h
@@ -19,6 +19,7 @@
enum fscache_cache_trace;
enum fscache_cookie_trace;
enum fscache_access_trace;
+enum fscache_volume_trace;
enum fscache_cache_state {
FSCACHE_CACHE_IS_NOT_PRESENT, /* No cache is present for this name */
@@ -97,6 +98,11 @@ extern void fscache_withdraw_cookie(struct fscache_cookie *cookie);
extern void fscache_io_error(struct fscache_cache *cache);
+extern struct fscache_volume *
+fscache_try_get_volume(struct fscache_volume *volume,
+ enum fscache_volume_trace where);
+extern void fscache_put_volume(struct fscache_volume *volume,
+ enum fscache_volume_trace where);
extern void fscache_end_volume_access(struct fscache_volume *volume,
struct fscache_cookie *cookie,
enum fscache_access_trace why);
diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
index 4da80e92f804..278620e063ab 100644
--- a/include/linux/fsnotify.h
+++ b/include/linux/fsnotify.h
@@ -112,7 +112,13 @@ static inline int fsnotify_file(struct file *file, __u32 mask)
{
const struct path *path;
- if (file->f_mode & FMODE_NONOTIFY)
+ /*
+ * FMODE_NONOTIFY are fds generated by fanotify itself which should not
+ * generate new events. We also don't want to generate events for
+ * FMODE_PATH fds (involves open & close events) as they are just
+ * handle creation / destruction events and not "real" file events.
+ */
+ if (file->f_mode & (FMODE_NONOTIFY | FMODE_PATH))
return 0;
path = &file->f_path;
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index 4dd6143db271..8be029bc50b1 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -594,12 +594,14 @@ static inline __u32 fsnotify_parent_needed_mask(__u32 mask)
static inline int fsnotify_inode_watches_children(struct inode *inode)
{
+ __u32 parent_mask = READ_ONCE(inode->i_fsnotify_mask);
+
/* FS_EVENT_ON_CHILD is set if the inode may care */
- if (!(inode->i_fsnotify_mask & FS_EVENT_ON_CHILD))
+ if (!(parent_mask & FS_EVENT_ON_CHILD))
return 0;
/* this inode might care about child events, does it care about the
* specific set of events that can happen on a child? */
- return inode->i_fsnotify_mask & FS_EVENTS_POSS_ON_CHILD;
+ return parent_mask & FS_EVENTS_POSS_ON_CHILD;
}
/*
@@ -613,7 +615,7 @@ static inline void fsnotify_update_flags(struct dentry *dentry)
/*
* Serialisation of setting PARENT_WATCHED on the dentries is provided
* by d_lock. If inotify_inode_watched changes after we have taken
- * d_lock, the following __fsnotify_update_child_dentry_flags call will
+ * d_lock, the following fsnotify_set_children_dentry_flags call will
* find our entry, so it will spin until we complete here, and update
* us with the new state.
*/
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 800995c425e0..b792274189a3 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -86,15 +86,15 @@ struct ftrace_hash;
#if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_MODULES) && \
defined(CONFIG_DYNAMIC_FTRACE)
-const char *
+int
ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
unsigned long *off, char **modname, char *sym);
#else
-static inline const char *
+static inline int
ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
unsigned long *off, char **modname, char *sym)
{
- return NULL;
+ return 0;
}
#endif
diff --git a/include/linux/gpio.h b/include/linux/gpio.h
index 56ac7e7a2889..063f71b18a7c 100644
--- a/include/linux/gpio.h
+++ b/include/linux/gpio.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * <linux/gpio.h>
+ * NOTE: This header *must not* be included.
*
* This is the LEGACY GPIO bulk include file, including legacy APIs. It is
* used for GPIO drivers still referencing the global GPIO numberspace,
@@ -16,8 +16,6 @@
struct device;
-/* see Documentation/driver-api/gpio/legacy.rst */
-
/* make these flag values available regardless of GPIO kconfig options */
#define GPIOF_DIR_OUT (0 << 0)
#define GPIOF_DIR_IN (1 << 0)
@@ -121,8 +119,6 @@ static inline int gpio_to_irq(unsigned gpio)
int gpio_request_one(unsigned gpio, unsigned long flags, const char *label);
-/* CONFIG_GPIOLIB: bindings for managed devices that want to request gpios */
-
int devm_gpio_request(struct device *dev, unsigned gpio, const char *label);
int devm_gpio_request_one(struct device *dev, unsigned gpio,
unsigned long flags, const char *label);
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index 0032bb6e7d8f..2dd7cb9cc270 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -632,10 +632,6 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
devm_gpiochip_add_data_with_key(dev, gc, data, NULL, NULL)
#endif /* CONFIG_LOCKDEP */
-static inline int gpiochip_add(struct gpio_chip *gc)
-{
- return gpiochip_add_data(gc, NULL);
-}
void gpiochip_remove(struct gpio_chip *gc);
int devm_gpiochip_add_data_with_key(struct device *dev, struct gpio_chip *gc,
void *data, struct lock_class_key *lock_key,
@@ -791,7 +787,6 @@ struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *gc,
enum gpiod_flags dflags);
void gpiochip_free_own_desc(struct gpio_desc *desc);
-struct gpio_desc *gpiochip_get_desc(struct gpio_chip *gc, unsigned int hwnum);
struct gpio_desc *
gpio_device_get_desc(struct gpio_device *gdev, unsigned int hwnum);
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 8e06d89698e6..1533c9dcd3a6 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -1125,6 +1125,13 @@ int __must_check hid_hw_open(struct hid_device *hdev);
void hid_hw_close(struct hid_device *hdev);
void hid_hw_request(struct hid_device *hdev,
struct hid_report *report, enum hid_class_request reqtype);
+int __hid_hw_raw_request(struct hid_device *hdev,
+ unsigned char reportnum, __u8 *buf,
+ size_t len, enum hid_report_type rtype,
+ enum hid_class_request reqtype,
+ __u64 source, bool from_bpf);
+int __hid_hw_output_report(struct hid_device *hdev, __u8 *buf, size_t len, __u64 source,
+ bool from_bpf);
int hid_hw_raw_request(struct hid_device *hdev,
unsigned char reportnum, __u8 *buf,
size_t len, enum hid_report_type rtype,
diff --git a/include/linux/hid_bpf.h b/include/linux/hid_bpf.h
index eec2592dec12..9ca96fc90449 100644
--- a/include/linux/hid_bpf.h
+++ b/include/linux/hid_bpf.h
@@ -4,7 +4,8 @@
#define __HID_BPF_H
#include <linux/bpf.h>
-#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/srcu.h>
#include <uapi/linux/hid.h>
struct hid_device;
@@ -20,15 +21,9 @@ struct hid_device;
* struct hid_bpf_ctx - User accessible data for all HID programs
*
* ``data`` is not directly accessible from the context. We need to issue
- * a call to ``hid_bpf_get_data()`` in order to get a pointer to that field.
+ * a call to hid_bpf_get_data() in order to get a pointer to that field.
*
- * All of these fields are currently read-only.
- *
- * @index: program index in the jump table. No special meaning (a smaller index
- * doesn't mean the program will be executed before another program with
- * a bigger index).
- * @hid: the ``struct hid_device`` representing the device itself
- * @report_type: used for ``hid_bpf_device_event()``
+ * @hid: the &struct hid_device representing the device itself
* @allocated_size: Allocated size of data.
*
* This is how much memory is available and can be requested
@@ -45,76 +40,147 @@ struct hid_device;
* ``size`` must always be less or equal than ``allocated_size`` (it is enforced
* once all BPF programs have been run).
* @retval: Return value of the previous program.
+ *
+ * ``hid`` and ``allocated_size`` are read-only, ``size`` and ``retval`` are read-write.
*/
struct hid_bpf_ctx {
- __u32 index;
- const struct hid_device *hid;
+ struct hid_device *hid;
__u32 allocated_size;
- enum hid_report_type report_type;
union {
__s32 retval;
__s32 size;
};
};
-/**
- * enum hid_bpf_attach_flags - flags used when attaching a HIF-BPF program
- *
- * @HID_BPF_FLAG_NONE: no specific flag is used, the kernel choses where to
- * insert the program
- * @HID_BPF_FLAG_INSERT_HEAD: insert the given program before any other program
- * currently attached to the device. This doesn't
- * guarantee that this program will always be first
- * @HID_BPF_FLAG_MAX: sentinel value, not to be used by the callers
- */
-enum hid_bpf_attach_flags {
- HID_BPF_FLAG_NONE = 0,
- HID_BPF_FLAG_INSERT_HEAD = _BITUL(0),
- HID_BPF_FLAG_MAX,
-};
-
-/* Following functions are tracepoints that BPF programs can attach to */
-int hid_bpf_device_event(struct hid_bpf_ctx *ctx);
-int hid_bpf_rdesc_fixup(struct hid_bpf_ctx *ctx);
-
/*
* Below is HID internal
*/
-/* internal function to call eBPF programs, not to be used by anybody */
-int __hid_bpf_tail_call(struct hid_bpf_ctx *ctx);
-
#define HID_BPF_MAX_PROGS_PER_DEV 64
#define HID_BPF_FLAG_MASK (((HID_BPF_FLAG_MAX - 1) << 1) - 1)
-/* types of HID programs to attach to */
-enum hid_bpf_prog_type {
- HID_BPF_PROG_TYPE_UNDEF = -1,
- HID_BPF_PROG_TYPE_DEVICE_EVENT, /* an event is emitted from the device */
- HID_BPF_PROG_TYPE_RDESC_FIXUP,
- HID_BPF_PROG_TYPE_MAX,
-};
struct hid_report_enum;
-struct hid_bpf_ops {
+struct hid_ops {
struct hid_report *(*hid_get_report)(struct hid_report_enum *report_enum, const u8 *data);
int (*hid_hw_raw_request)(struct hid_device *hdev,
unsigned char reportnum, __u8 *buf,
size_t len, enum hid_report_type rtype,
- enum hid_class_request reqtype);
- int (*hid_hw_output_report)(struct hid_device *hdev, __u8 *buf, size_t len);
+ enum hid_class_request reqtype,
+ u64 source, bool from_bpf);
+ int (*hid_hw_output_report)(struct hid_device *hdev, __u8 *buf, size_t len,
+ u64 source, bool from_bpf);
int (*hid_input_report)(struct hid_device *hid, enum hid_report_type type,
- u8 *data, u32 size, int interrupt);
+ u8 *data, u32 size, int interrupt, u64 source, bool from_bpf,
+ bool lock_already_taken);
struct module *owner;
const struct bus_type *bus_type;
};
-extern struct hid_bpf_ops *hid_bpf_ops;
+extern struct hid_ops *hid_ops;
-struct hid_bpf_prog_list {
- u16 prog_idx[HID_BPF_MAX_PROGS_PER_DEV];
- u8 prog_cnt;
+/**
+ * struct hid_bpf_ops - A BPF struct_ops of callbacks allowing to attach HID-BPF
+ * programs to a HID device
+ * @hid_id: the HID uniq ID to attach to. This is writeable before ``load()``, and
+ * cannot be changed after
+ * @flags: flags used while attaching the struct_ops to the device. Currently only
+ * available value is %0 or ``BPF_F_BEFORE``.
+ * Writeable only before ``load()``
+ */
+struct hid_bpf_ops {
+ /* hid_id needs to stay first so we can easily change it
+ * from userspace.
+ */
+ int hid_id;
+ u32 flags;
+
+ /* private: do not show up in the docs */
+ struct list_head list;
+
+ /* public: rest should show up in the docs */
+
+ /**
+ * @hid_device_event: called whenever an event is coming in from the device
+ *
+ * It has the following arguments:
+ *
+ * ``ctx``: The HID-BPF context as &struct hid_bpf_ctx
+ *
+ * Return: %0 on success and keep processing; a positive
+ * value to change the incoming size buffer; a negative
+ * error code to interrupt the processing of this event
+ *
+ * Context: Interrupt context.
+ */
+ int (*hid_device_event)(struct hid_bpf_ctx *ctx, enum hid_report_type report_type,
+ u64 source);
+
+ /**
+ * @hid_rdesc_fixup: called when the probe function parses the report descriptor
+ * of the HID device
+ *
+ * It has the following arguments:
+ *
+ * ``ctx``: The HID-BPF context as &struct hid_bpf_ctx
+ *
+ * Return: %0 on success and keep processing; a positive
+ * value to change the incoming size buffer; a negative
+ * error code to interrupt the processing of this device
+ */
+ int (*hid_rdesc_fixup)(struct hid_bpf_ctx *ctx);
+
+ /**
+ * @hid_hw_request: called whenever a hid_hw_raw_request() call is emitted
+ * on the HID device
+ *
+ * It has the following arguments:
+ *
+ * ``ctx``: The HID-BPF context as &struct hid_bpf_ctx
+ *
+ * ``reportnum``: the report number, as in hid_hw_raw_request()
+ *
+ * ``rtype``: the report type (``HID_INPUT_REPORT``, ``HID_FEATURE_REPORT``,
+ * ``HID_OUTPUT_REPORT``)
+ *
+ * ``reqtype``: the request
+ *
+ * ``source``: a u64 referring to a uniq but identifiable source. If %0, the
+ * kernel itself emitted that call. For hidraw, ``source`` is set
+ * to the associated ``struct file *``.
+ *
+ * Return: %0 to keep processing the request by hid-core; any other value
+ * stops hid-core from processing that event. A positive value should be
+ * returned with the number of bytes returned in the incoming buffer; a
+ * negative error code interrupts the processing of this call.
+ */
+ int (*hid_hw_request)(struct hid_bpf_ctx *ctx, unsigned char reportnum,
+ enum hid_report_type rtype, enum hid_class_request reqtype,
+ u64 source);
+
+ /**
+ * @hid_hw_output_report: called whenever a hid_hw_output_report() call is emitted
+ * on the HID device
+ *
+ * It has the following arguments:
+ *
+ * ``ctx``: The HID-BPF context as &struct hid_bpf_ctx
+ *
+ * ``source``: a u64 referring to a uniq but identifiable source. If %0, the
+ * kernel itself emitted that call. For hidraw, ``source`` is set
+ * to the associated ``struct file *``.
+ *
+ * Return: %0 to keep processing the request by hid-core; any other value
+ * stops hid-core from processing that event. A positive value should be
+ * returned with the number of bytes written to the device; a negative error
+ * code interrupts the processing of this call.
+ */
+ int (*hid_hw_output_report)(struct hid_bpf_ctx *ctx, u64 source);
+
+
+ /* private: do not show up in the docs */
+ struct hid_device *hdev;
};
/* stored in each device */
@@ -124,34 +190,44 @@ struct hid_bpf {
* to this HID device
*/
u32 allocated_data;
-
- struct hid_bpf_prog_list __rcu *progs[HID_BPF_PROG_TYPE_MAX]; /* attached BPF progs */
bool destroyed; /* prevents the assignment of any progs */
- spinlock_t progs_lock; /* protects RCU update of progs */
-};
-
-/* specific HID-BPF link when a program is attached to a device */
-struct hid_bpf_link {
- struct bpf_link link;
- int hid_table_index;
+ struct hid_bpf_ops *rdesc_ops;
+ struct list_head prog_list;
+ struct mutex prog_list_lock; /* protects prog_list update */
+ struct srcu_struct srcu; /* protects prog_list read-only access */
};
#ifdef CONFIG_HID_BPF
u8 *dispatch_hid_bpf_device_event(struct hid_device *hid, enum hid_report_type type, u8 *data,
- u32 *size, int interrupt);
+ u32 *size, int interrupt, u64 source, bool from_bpf);
+int dispatch_hid_bpf_raw_requests(struct hid_device *hdev,
+ unsigned char reportnum, __u8 *buf,
+ u32 size, enum hid_report_type rtype,
+ enum hid_class_request reqtype,
+ u64 source, bool from_bpf);
+int dispatch_hid_bpf_output_report(struct hid_device *hdev, __u8 *buf, u32 size,
+ u64 source, bool from_bpf);
int hid_bpf_connect_device(struct hid_device *hdev);
void hid_bpf_disconnect_device(struct hid_device *hdev);
void hid_bpf_destroy_device(struct hid_device *hid);
-void hid_bpf_device_init(struct hid_device *hid);
+int hid_bpf_device_init(struct hid_device *hid);
u8 *call_hid_bpf_rdesc_fixup(struct hid_device *hdev, u8 *rdesc, unsigned int *size);
#else /* CONFIG_HID_BPF */
static inline u8 *dispatch_hid_bpf_device_event(struct hid_device *hid, enum hid_report_type type,
- u8 *data, u32 *size, int interrupt) { return data; }
+ u8 *data, u32 *size, int interrupt,
+ u64 source, bool from_bpf) { return data; }
+static inline int dispatch_hid_bpf_raw_requests(struct hid_device *hdev,
+ unsigned char reportnum, u8 *buf,
+ u32 size, enum hid_report_type rtype,
+ enum hid_class_request reqtype,
+ u64 source, bool from_bpf) { return 0; }
+static inline int dispatch_hid_bpf_output_report(struct hid_device *hdev, __u8 *buf, u32 size,
+ u64 source, bool from_bpf) { return 0; }
static inline int hid_bpf_connect_device(struct hid_device *hdev) { return 0; }
static inline void hid_bpf_disconnect_device(struct hid_device *hdev) {}
static inline void hid_bpf_destroy_device(struct hid_device *hid) {}
-static inline void hid_bpf_device_init(struct hid_device *hid) {}
+static inline int hid_bpf_device_init(struct hid_device *hid) { return 0; }
#define call_hid_bpf_rdesc_fixup(_hdev, _rdesc, _size) \
((u8 *)kmemdup(_rdesc, *(_size), GFP_KERNEL))
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index c8d3ec116e29..2aa986a5cd1b 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -269,8 +269,8 @@ enum mthp_stat_item {
MTHP_STAT_ANON_FAULT_ALLOC,
MTHP_STAT_ANON_FAULT_FALLBACK,
MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE,
- MTHP_STAT_ANON_SWPOUT,
- MTHP_STAT_ANON_SWPOUT_FALLBACK,
+ MTHP_STAT_SWPOUT,
+ MTHP_STAT_SWPOUT_FALLBACK,
__MTHP_STAT_COUNT
};
@@ -278,6 +278,7 @@ struct mthp_stat {
unsigned long stats[ilog2(MAX_PTRS_PER_PTE) + 1][__MTHP_STAT_COUNT];
};
+#ifdef CONFIG_SYSFS
DECLARE_PER_CPU(struct mthp_stat, mthp_stats);
static inline void count_mthp_stat(int order, enum mthp_stat_item item)
@@ -287,6 +288,11 @@ static inline void count_mthp_stat(int order, enum mthp_stat_item item)
this_cpu_inc(mthp_stats.stats[order][item]);
}
+#else
+static inline void count_mthp_stat(int order, enum mthp_stat_item item)
+{
+}
+#endif
#define transparent_hugepage_use_zero_page() \
(transparent_hugepage_flags & \
diff --git a/include/linux/hwmon.h b/include/linux/hwmon.h
index edf96f249eb5..e94314760aab 100644
--- a/include/linux/hwmon.h
+++ b/include/linux/hwmon.h
@@ -45,6 +45,7 @@ enum hwmon_chip_attributes {
hwmon_chip_power_samples,
hwmon_chip_temp_samples,
hwmon_chip_beep_enable,
+ hwmon_chip_pec,
};
#define HWMON_C_TEMP_RESET_HISTORY BIT(hwmon_chip_temp_reset_history)
@@ -60,6 +61,7 @@ enum hwmon_chip_attributes {
#define HWMON_C_POWER_SAMPLES BIT(hwmon_chip_power_samples)
#define HWMON_C_TEMP_SAMPLES BIT(hwmon_chip_temp_samples)
#define HWMON_C_BEEP_ENABLE BIT(hwmon_chip_beep_enable)
+#define HWMON_C_PEC BIT(hwmon_chip_pec)
enum hwmon_temp_attributes {
hwmon_temp_enable,
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index 5e6cd43a6dbd..424acb98c7c2 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -852,7 +852,6 @@ static inline void i2c_mark_adapter_resumed(struct i2c_adapter *adap)
/* i2c adapter classes (bitmask) */
#define I2C_CLASS_HWMON (1<<0) /* lm_sensors, ... */
-#define I2C_CLASS_SPD (1<<7) /* Memory modules */
/* Warn users that the adapter doesn't support classes anymore */
#define I2C_CLASS_DEPRECATED (1<<8)
@@ -961,8 +960,6 @@ int i2c_handle_smbus_host_notify(struct i2c_adapter *adap, unsigned short addr);
#define builtin_i2c_driver(__i2c_driver) \
builtin_driver(__i2c_driver, i2c_add_driver)
-#endif /* I2C */
-
/* must call put_device() when done with returned i2c_client device */
struct i2c_client *i2c_find_device_by_fwnode(struct fwnode_handle *fwnode);
@@ -972,6 +969,28 @@ struct i2c_adapter *i2c_find_adapter_by_fwnode(struct fwnode_handle *fwnode);
/* must call i2c_put_adapter() when done with returned i2c_adapter device */
struct i2c_adapter *i2c_get_adapter_by_fwnode(struct fwnode_handle *fwnode);
+#else /* I2C */
+
+static inline struct i2c_client *
+i2c_find_device_by_fwnode(struct fwnode_handle *fwnode)
+{
+ return NULL;
+}
+
+static inline struct i2c_adapter *
+i2c_find_adapter_by_fwnode(struct fwnode_handle *fwnode)
+{
+ return NULL;
+}
+
+static inline struct i2c_adapter *
+i2c_get_adapter_by_fwnode(struct fwnode_handle *fwnode)
+{
+ return NULL;
+}
+
+#endif /* !I2C */
+
#if IS_ENABLED(CONFIG_OF)
/* must call put_device() when done with returned i2c_client device */
static inline struct i2c_client *of_find_i2c_device_by_node(struct device_node *node)
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index de2dce743ee2..30cef3b940eb 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -373,6 +373,7 @@ struct ieee80211_trigger {
/**
* ieee80211_has_tods - check if IEEE80211_FCTL_TODS is set
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame has to-DS set
*/
static inline bool ieee80211_has_tods(__le16 fc)
{
@@ -382,6 +383,7 @@ static inline bool ieee80211_has_tods(__le16 fc)
/**
* ieee80211_has_fromds - check if IEEE80211_FCTL_FROMDS is set
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame has from-DS set
*/
static inline bool ieee80211_has_fromds(__le16 fc)
{
@@ -391,6 +393,7 @@ static inline bool ieee80211_has_fromds(__le16 fc)
/**
* ieee80211_has_a4 - check if IEEE80211_FCTL_TODS and IEEE80211_FCTL_FROMDS are set
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not it's a 4-address frame (from-DS and to-DS set)
*/
static inline bool ieee80211_has_a4(__le16 fc)
{
@@ -401,6 +404,7 @@ static inline bool ieee80211_has_a4(__le16 fc)
/**
* ieee80211_has_morefrags - check if IEEE80211_FCTL_MOREFRAGS is set
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame has more fragments (more frags bit set)
*/
static inline bool ieee80211_has_morefrags(__le16 fc)
{
@@ -410,6 +414,7 @@ static inline bool ieee80211_has_morefrags(__le16 fc)
/**
* ieee80211_has_retry - check if IEEE80211_FCTL_RETRY is set
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the retry flag is set
*/
static inline bool ieee80211_has_retry(__le16 fc)
{
@@ -419,6 +424,7 @@ static inline bool ieee80211_has_retry(__le16 fc)
/**
* ieee80211_has_pm - check if IEEE80211_FCTL_PM is set
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the power management flag is set
*/
static inline bool ieee80211_has_pm(__le16 fc)
{
@@ -428,6 +434,7 @@ static inline bool ieee80211_has_pm(__le16 fc)
/**
* ieee80211_has_moredata - check if IEEE80211_FCTL_MOREDATA is set
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the more data flag is set
*/
static inline bool ieee80211_has_moredata(__le16 fc)
{
@@ -437,6 +444,7 @@ static inline bool ieee80211_has_moredata(__le16 fc)
/**
* ieee80211_has_protected - check if IEEE80211_FCTL_PROTECTED is set
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the protected flag is set
*/
static inline bool ieee80211_has_protected(__le16 fc)
{
@@ -446,6 +454,7 @@ static inline bool ieee80211_has_protected(__le16 fc)
/**
* ieee80211_has_order - check if IEEE80211_FCTL_ORDER is set
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the order flag is set
*/
static inline bool ieee80211_has_order(__le16 fc)
{
@@ -455,6 +464,7 @@ static inline bool ieee80211_has_order(__le16 fc)
/**
* ieee80211_is_mgmt - check if type is IEEE80211_FTYPE_MGMT
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame type is management
*/
static inline bool ieee80211_is_mgmt(__le16 fc)
{
@@ -465,6 +475,7 @@ static inline bool ieee80211_is_mgmt(__le16 fc)
/**
* ieee80211_is_ctl - check if type is IEEE80211_FTYPE_CTL
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame type is control
*/
static inline bool ieee80211_is_ctl(__le16 fc)
{
@@ -475,6 +486,7 @@ static inline bool ieee80211_is_ctl(__le16 fc)
/**
* ieee80211_is_data - check if type is IEEE80211_FTYPE_DATA
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is a data frame
*/
static inline bool ieee80211_is_data(__le16 fc)
{
@@ -485,6 +497,7 @@ static inline bool ieee80211_is_data(__le16 fc)
/**
* ieee80211_is_ext - check if type is IEEE80211_FTYPE_EXT
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame type is extended
*/
static inline bool ieee80211_is_ext(__le16 fc)
{
@@ -496,6 +509,7 @@ static inline bool ieee80211_is_ext(__le16 fc)
/**
* ieee80211_is_data_qos - check if type is IEEE80211_FTYPE_DATA and IEEE80211_STYPE_QOS_DATA is set
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is a QoS data frame
*/
static inline bool ieee80211_is_data_qos(__le16 fc)
{
@@ -510,6 +524,8 @@ static inline bool ieee80211_is_data_qos(__le16 fc)
/**
* ieee80211_is_data_present - check if type is IEEE80211_FTYPE_DATA and has data
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is a QoS data frame that has data
+ * (i.e. is not null data)
*/
static inline bool ieee80211_is_data_present(__le16 fc)
{
@@ -524,6 +540,7 @@ static inline bool ieee80211_is_data_present(__le16 fc)
/**
* ieee80211_is_assoc_req - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_ASSOC_REQ
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is an association request
*/
static inline bool ieee80211_is_assoc_req(__le16 fc)
{
@@ -534,6 +551,7 @@ static inline bool ieee80211_is_assoc_req(__le16 fc)
/**
* ieee80211_is_assoc_resp - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_ASSOC_RESP
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is an association response
*/
static inline bool ieee80211_is_assoc_resp(__le16 fc)
{
@@ -544,6 +562,7 @@ static inline bool ieee80211_is_assoc_resp(__le16 fc)
/**
* ieee80211_is_reassoc_req - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_REASSOC_REQ
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is a reassociation request
*/
static inline bool ieee80211_is_reassoc_req(__le16 fc)
{
@@ -554,6 +573,7 @@ static inline bool ieee80211_is_reassoc_req(__le16 fc)
/**
* ieee80211_is_reassoc_resp - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_REASSOC_RESP
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is a reassociation response
*/
static inline bool ieee80211_is_reassoc_resp(__le16 fc)
{
@@ -564,6 +584,7 @@ static inline bool ieee80211_is_reassoc_resp(__le16 fc)
/**
* ieee80211_is_probe_req - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_PROBE_REQ
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is a probe request
*/
static inline bool ieee80211_is_probe_req(__le16 fc)
{
@@ -574,6 +595,7 @@ static inline bool ieee80211_is_probe_req(__le16 fc)
/**
* ieee80211_is_probe_resp - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_PROBE_RESP
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is a probe response
*/
static inline bool ieee80211_is_probe_resp(__le16 fc)
{
@@ -584,6 +606,7 @@ static inline bool ieee80211_is_probe_resp(__le16 fc)
/**
* ieee80211_is_beacon - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_BEACON
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is a (regular, not S1G) beacon
*/
static inline bool ieee80211_is_beacon(__le16 fc)
{
@@ -595,6 +618,7 @@ static inline bool ieee80211_is_beacon(__le16 fc)
* ieee80211_is_s1g_beacon - check if IEEE80211_FTYPE_EXT &&
* IEEE80211_STYPE_S1G_BEACON
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is an S1G beacon
*/
static inline bool ieee80211_is_s1g_beacon(__le16 fc)
{
@@ -604,30 +628,21 @@ static inline bool ieee80211_is_s1g_beacon(__le16 fc)
}
/**
- * ieee80211_next_tbtt_present - check if IEEE80211_FTYPE_EXT &&
- * IEEE80211_STYPE_S1G_BEACON && IEEE80211_S1G_BCN_NEXT_TBTT
- * @fc: frame control bytes in little-endian byteorder
- */
-static inline bool ieee80211_next_tbtt_present(__le16 fc)
-{
- return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
- cpu_to_le16(IEEE80211_FTYPE_EXT | IEEE80211_STYPE_S1G_BEACON) &&
- fc & cpu_to_le16(IEEE80211_S1G_BCN_NEXT_TBTT);
-}
-
-/**
- * ieee80211_is_s1g_short_beacon - check if next tbtt present bit is set. Only
- * true for S1G beacons when they're short.
+ * ieee80211_is_s1g_short_beacon - check if frame is an S1G short beacon
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is an S1G short beacon,
+ * i.e. it is an S1G beacon with 'next TBTT' flag set
*/
static inline bool ieee80211_is_s1g_short_beacon(__le16 fc)
{
- return ieee80211_is_s1g_beacon(fc) && ieee80211_next_tbtt_present(fc);
+ return ieee80211_is_s1g_beacon(fc) &&
+ (fc & cpu_to_le16(IEEE80211_S1G_BCN_NEXT_TBTT));
}
/**
* ieee80211_is_atim - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_ATIM
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is an ATIM frame
*/
static inline bool ieee80211_is_atim(__le16 fc)
{
@@ -638,6 +653,7 @@ static inline bool ieee80211_is_atim(__le16 fc)
/**
* ieee80211_is_disassoc - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_DISASSOC
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is a disassociation frame
*/
static inline bool ieee80211_is_disassoc(__le16 fc)
{
@@ -648,6 +664,7 @@ static inline bool ieee80211_is_disassoc(__le16 fc)
/**
* ieee80211_is_auth - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_AUTH
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is an authentication frame
*/
static inline bool ieee80211_is_auth(__le16 fc)
{
@@ -658,6 +675,7 @@ static inline bool ieee80211_is_auth(__le16 fc)
/**
* ieee80211_is_deauth - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_DEAUTH
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is a deauthentication frame
*/
static inline bool ieee80211_is_deauth(__le16 fc)
{
@@ -668,6 +686,7 @@ static inline bool ieee80211_is_deauth(__le16 fc)
/**
* ieee80211_is_action - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_ACTION
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is an action frame
*/
static inline bool ieee80211_is_action(__le16 fc)
{
@@ -678,6 +697,7 @@ static inline bool ieee80211_is_action(__le16 fc)
/**
* ieee80211_is_back_req - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_BACK_REQ
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is a block-ACK request frame
*/
static inline bool ieee80211_is_back_req(__le16 fc)
{
@@ -688,6 +708,7 @@ static inline bool ieee80211_is_back_req(__le16 fc)
/**
* ieee80211_is_back - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_BACK
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is a block-ACK frame
*/
static inline bool ieee80211_is_back(__le16 fc)
{
@@ -698,6 +719,7 @@ static inline bool ieee80211_is_back(__le16 fc)
/**
* ieee80211_is_pspoll - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_PSPOLL
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is a PS-poll frame
*/
static inline bool ieee80211_is_pspoll(__le16 fc)
{
@@ -708,6 +730,7 @@ static inline bool ieee80211_is_pspoll(__le16 fc)
/**
* ieee80211_is_rts - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_RTS
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is an RTS frame
*/
static inline bool ieee80211_is_rts(__le16 fc)
{
@@ -718,6 +741,7 @@ static inline bool ieee80211_is_rts(__le16 fc)
/**
* ieee80211_is_cts - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_CTS
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is a CTS frame
*/
static inline bool ieee80211_is_cts(__le16 fc)
{
@@ -728,6 +752,7 @@ static inline bool ieee80211_is_cts(__le16 fc)
/**
* ieee80211_is_ack - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_ACK
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is an ACK frame
*/
static inline bool ieee80211_is_ack(__le16 fc)
{
@@ -738,6 +763,7 @@ static inline bool ieee80211_is_ack(__le16 fc)
/**
* ieee80211_is_cfend - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_CFEND
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is a CF-end frame
*/
static inline bool ieee80211_is_cfend(__le16 fc)
{
@@ -748,6 +774,7 @@ static inline bool ieee80211_is_cfend(__le16 fc)
/**
* ieee80211_is_cfendack - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_CFENDACK
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is a CF-end-ack frame
*/
static inline bool ieee80211_is_cfendack(__le16 fc)
{
@@ -758,6 +785,7 @@ static inline bool ieee80211_is_cfendack(__le16 fc)
/**
* ieee80211_is_nullfunc - check if frame is a regular (non-QoS) nullfunc frame
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is a nullfunc frame
*/
static inline bool ieee80211_is_nullfunc(__le16 fc)
{
@@ -768,6 +796,7 @@ static inline bool ieee80211_is_nullfunc(__le16 fc)
/**
* ieee80211_is_qos_nullfunc - check if frame is a QoS nullfunc frame
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is a QoS nullfunc frame
*/
static inline bool ieee80211_is_qos_nullfunc(__le16 fc)
{
@@ -778,6 +807,7 @@ static inline bool ieee80211_is_qos_nullfunc(__le16 fc)
/**
* ieee80211_is_trigger - check if frame is trigger frame
* @fc: frame control field in little-endian byteorder
+ * Return: whether or not the frame is a trigger frame
*/
static inline bool ieee80211_is_trigger(__le16 fc)
{
@@ -788,6 +818,7 @@ static inline bool ieee80211_is_trigger(__le16 fc)
/**
* ieee80211_is_any_nullfunc - check if frame is regular or QoS nullfunc frame
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is a nullfunc or QoS nullfunc frame
*/
static inline bool ieee80211_is_any_nullfunc(__le16 fc)
{
@@ -797,6 +828,8 @@ static inline bool ieee80211_is_any_nullfunc(__le16 fc)
/**
* ieee80211_is_first_frag - check if IEEE80211_SCTL_FRAG is not set
* @seq_ctrl: frame sequence control bytes in little-endian byteorder
+ * Return: whether or not the frame is the first fragment (also true if
+ * it's not fragmented at all)
*/
static inline bool ieee80211_is_first_frag(__le16 seq_ctrl)
{
@@ -806,6 +839,7 @@ static inline bool ieee80211_is_first_frag(__le16 seq_ctrl)
/**
* ieee80211_is_frag - check if a frame is a fragment
* @hdr: 802.11 header of the frame
+ * Return: whether or not the frame is a fragment
*/
static inline bool ieee80211_is_frag(struct ieee80211_hdr *hdr)
{
@@ -1101,7 +1135,7 @@ enum ieee80211_vht_opmode_bits {
};
/**
- * enum ieee80211_s1g_chanwidth
+ * enum ieee80211_s1g_chanwidth - S1G channel widths
* These are defined in IEEE802.11-2016ah Table 10-20
* as BSS Channel Width
*
@@ -2359,6 +2393,8 @@ struct ieee80211_eht_operation_info {
* @max_vht_nss: current maximum NSS as advertised by the STA in
* operating mode notification, can be 0 in which case the
* capability data will be used to derive this (from MCS support)
+ * Return: The maximum NSS that can be used for the given bandwidth/MCS
+ * combination
*
* Due to the VHT Extended NSS Bandwidth Support, the maximum NSS can
* vary for a given BW/MCS. This function parses the data.
@@ -2370,44 +2406,6 @@ int ieee80211_get_vht_max_nss(struct ieee80211_vht_cap *cap,
int mcs, bool ext_nss_bw_capable,
unsigned int max_vht_nss);
-/**
- * enum ieee80211_ap_reg_power - regulatory power for a Access Point
- *
- * @IEEE80211_REG_UNSET_AP: Access Point has no regulatory power mode
- * @IEEE80211_REG_LPI_AP: Indoor Access Point
- * @IEEE80211_REG_SP_AP: Standard power Access Point
- * @IEEE80211_REG_VLP_AP: Very low power Access Point
- * @IEEE80211_REG_AP_POWER_AFTER_LAST: internal
- * @IEEE80211_REG_AP_POWER_MAX: maximum value
- */
-enum ieee80211_ap_reg_power {
- IEEE80211_REG_UNSET_AP,
- IEEE80211_REG_LPI_AP,
- IEEE80211_REG_SP_AP,
- IEEE80211_REG_VLP_AP,
- IEEE80211_REG_AP_POWER_AFTER_LAST,
- IEEE80211_REG_AP_POWER_MAX =
- IEEE80211_REG_AP_POWER_AFTER_LAST - 1,
-};
-
-/**
- * enum ieee80211_client_reg_power - regulatory power for a client
- *
- * @IEEE80211_REG_UNSET_CLIENT: Client has no regulatory power mode
- * @IEEE80211_REG_DEFAULT_CLIENT: Default Client
- * @IEEE80211_REG_SUBORDINATE_CLIENT: Subordinate Client
- * @IEEE80211_REG_CLIENT_POWER_AFTER_LAST: internal
- * @IEEE80211_REG_CLIENT_POWER_MAX: maximum value
- */
-enum ieee80211_client_reg_power {
- IEEE80211_REG_UNSET_CLIENT,
- IEEE80211_REG_DEFAULT_CLIENT,
- IEEE80211_REG_SUBORDINATE_CLIENT,
- IEEE80211_REG_CLIENT_POWER_AFTER_LAST,
- IEEE80211_REG_CLIENT_POWER_MAX =
- IEEE80211_REG_CLIENT_POWER_AFTER_LAST - 1,
-};
-
/* 802.11ax HE MAC capabilities */
#define IEEE80211_HE_MAC_CAP0_HTC_HE 0x01
#define IEEE80211_HE_MAC_CAP0_TWT_REQ 0x02
@@ -2789,22 +2787,6 @@ struct ieee80211_he_6ghz_oper {
u8 minrate;
} __packed;
-/*
- * In "9.4.2.161 Transmit Power Envelope element" of "IEEE Std 802.11ax-2021",
- * it show four types in "Table 9-275a-Maximum Transmit Power Interpretation
- * subfield encoding", and two category for each type in "Table E-12-Regulatory
- * Info subfield encoding in the United States".
- * So it it totally max 8 Transmit Power Envelope element.
- */
-#define IEEE80211_TPE_MAX_IE_COUNT 8
-/*
- * In "Table 9-277—Meaning of Maximum Transmit Power Count subfield"
- * of "IEEE Std 802.11ax™‐2021", the max power level is 8.
- */
-#define IEEE80211_MAX_NUM_PWR_LEVEL 8
-
-#define IEEE80211_TPE_MAX_POWER_COUNT 8
-
/* transmit power interpretation type of transmit power envelope element */
enum ieee80211_tx_power_intrpt_type {
IEEE80211_TPE_LOCAL_EIRP,
@@ -2813,24 +2795,107 @@ enum ieee80211_tx_power_intrpt_type {
IEEE80211_TPE_REG_CLIENT_EIRP_PSD,
};
+/* category type of transmit power envelope element */
+enum ieee80211_tx_power_category_6ghz {
+ IEEE80211_TPE_CAT_6GHZ_DEFAULT = 0,
+ IEEE80211_TPE_CAT_6GHZ_SUBORDINATE = 1,
+};
+
+/*
+ * For IEEE80211_TPE_LOCAL_EIRP / IEEE80211_TPE_REG_CLIENT_EIRP,
+ * setting to 63.5 dBm means no constraint.
+ */
+#define IEEE80211_TPE_MAX_TX_PWR_NO_CONSTRAINT 127
+
+/*
+ * For IEEE80211_TPE_LOCAL_EIRP_PSD / IEEE80211_TPE_REG_CLIENT_EIRP_PSD,
+ * setting to 127 indicates no PSD limit for the 20 MHz channel.
+ */
+#define IEEE80211_TPE_PSD_NO_LIMIT 127
+
/**
* struct ieee80211_tx_pwr_env - Transmit Power Envelope
- * @tx_power_info: Transmit Power Information field
- * @tx_power: Maximum Transmit Power field
+ * @info: Transmit Power Information field
+ * @variable: Maximum Transmit Power field
*
* This structure represents the payload of the "Transmit Power
* Envelope element" as described in IEEE Std 802.11ax-2021 section
* 9.4.2.161
*/
struct ieee80211_tx_pwr_env {
- u8 tx_power_info;
- s8 tx_power[IEEE80211_TPE_MAX_POWER_COUNT];
+ u8 info;
+ u8 variable[];
} __packed;
#define IEEE80211_TX_PWR_ENV_INFO_COUNT 0x7
#define IEEE80211_TX_PWR_ENV_INFO_INTERPRET 0x38
#define IEEE80211_TX_PWR_ENV_INFO_CATEGORY 0xC0
+#define IEEE80211_TX_PWR_ENV_EXT_COUNT 0xF
+
+static inline bool ieee80211_valid_tpe_element(const u8 *data, u8 len)
+{
+ const struct ieee80211_tx_pwr_env *env = (const void *)data;
+ u8 count, interpret, category;
+ u8 needed = sizeof(*env);
+ u8 N; /* also called N in the spec */
+
+ if (len < needed)
+ return false;
+
+ count = u8_get_bits(env->info, IEEE80211_TX_PWR_ENV_INFO_COUNT);
+ interpret = u8_get_bits(env->info, IEEE80211_TX_PWR_ENV_INFO_INTERPRET);
+ category = u8_get_bits(env->info, IEEE80211_TX_PWR_ENV_INFO_CATEGORY);
+
+ switch (category) {
+ case IEEE80211_TPE_CAT_6GHZ_DEFAULT:
+ case IEEE80211_TPE_CAT_6GHZ_SUBORDINATE:
+ break;
+ default:
+ return false;
+ }
+
+ switch (interpret) {
+ case IEEE80211_TPE_LOCAL_EIRP:
+ case IEEE80211_TPE_REG_CLIENT_EIRP:
+ if (count > 3)
+ return false;
+
+ /* count == 0 encodes 1 value for 20 MHz, etc. */
+ needed += count + 1;
+
+ if (len < needed)
+ return false;
+
+ /* there can be extension fields not accounted for in 'count' */
+
+ return true;
+ case IEEE80211_TPE_LOCAL_EIRP_PSD:
+ case IEEE80211_TPE_REG_CLIENT_EIRP_PSD:
+ if (count > 4)
+ return false;
+
+ N = count ? 1 << (count - 1) : 1;
+ needed += N;
+
+ if (len < needed)
+ return false;
+
+ if (len > needed) {
+ u8 K = u8_get_bits(env->variable[N],
+ IEEE80211_TX_PWR_ENV_EXT_COUNT);
+
+ needed += 1 + K;
+ if (len < needed)
+ return false;
+ }
+
+ return true;
+ }
+
+ return false;
+}
+
/*
* ieee80211_he_oper_size - calculate 802.11ax HE Operations IE size
* @he_oper_ie: byte data of the He Operations IE, stating from the byte
@@ -4145,7 +4210,7 @@ enum ieee80211_idle_options {
};
/**
- * struct ieee80211_bss_max_idle_period_ie
+ * struct ieee80211_bss_max_idle_period_ie - BSS max idle period element struct
*
* This structure refers to "BSS Max idle period element"
*
@@ -4180,7 +4245,7 @@ enum ieee80211_sa_query_action {
};
/**
- * struct ieee80211_bssid_index
+ * struct ieee80211_bssid_index - multiple BSSID index element structure
*
* This structure refers to "Multiple BSSID-index element"
*
@@ -4195,7 +4260,8 @@ struct ieee80211_bssid_index {
};
/**
- * struct ieee80211_multiple_bssid_configuration
+ * struct ieee80211_multiple_bssid_configuration - multiple BSSID configuration
+ * element structure
*
* This structure refers to "Multiple BSSID Configuration element"
*
@@ -4326,6 +4392,7 @@ struct ieee80211_he_6ghz_capa {
/**
* ieee80211_get_qos_ctl - get pointer to qos control bytes
* @hdr: the frame
+ * Return: a pointer to the QoS control field in the frame header
*
* The qos ctrl bytes come after the frame_control, duration, seq_num
* and 3 or 4 addresses of length ETH_ALEN. Checks frame_control to choose
@@ -4348,6 +4415,7 @@ static inline u8 *ieee80211_get_qos_ctl(struct ieee80211_hdr *hdr)
/**
* ieee80211_get_tid - get qos TID
* @hdr: the frame
+ * Return: the TID from the QoS control field
*/
static inline u8 ieee80211_get_tid(struct ieee80211_hdr *hdr)
{
@@ -4359,6 +4427,7 @@ static inline u8 ieee80211_get_tid(struct ieee80211_hdr *hdr)
/**
* ieee80211_get_SA - get pointer to SA
* @hdr: the frame
+ * Return: a pointer to the source address (SA)
*
* Given an 802.11 frame, this function returns the offset
* to the source address (SA). It does not verify that the
@@ -4378,6 +4447,7 @@ static inline u8 *ieee80211_get_SA(struct ieee80211_hdr *hdr)
/**
* ieee80211_get_DA - get pointer to DA
* @hdr: the frame
+ * Return: a pointer to the destination address (DA)
*
* Given an 802.11 frame, this function returns the offset
* to the destination address (DA). It does not verify that
@@ -4396,6 +4466,7 @@ static inline u8 *ieee80211_get_DA(struct ieee80211_hdr *hdr)
/**
* ieee80211_is_bufferable_mmpdu - check if frame is bufferable MMPDU
* @skb: the skb to check, starting with the 802.11 header
+ * Return: whether or not the MMPDU is bufferable
*/
static inline bool ieee80211_is_bufferable_mmpdu(struct sk_buff *skb)
{
@@ -4434,6 +4505,7 @@ static inline bool ieee80211_is_bufferable_mmpdu(struct sk_buff *skb)
/**
* _ieee80211_is_robust_mgmt_frame - check if frame is a robust management frame
* @hdr: the frame (buffer must include at least the first octet of payload)
+ * Return: whether or not the frame is a robust management frame
*/
static inline bool _ieee80211_is_robust_mgmt_frame(struct ieee80211_hdr *hdr)
{
@@ -4470,6 +4542,7 @@ static inline bool _ieee80211_is_robust_mgmt_frame(struct ieee80211_hdr *hdr)
/**
* ieee80211_is_robust_mgmt_frame - check if skb contains a robust mgmt frame
* @skb: the skb containing the frame, length will be checked
+ * Return: whether or not the frame is a robust management frame
*/
static inline bool ieee80211_is_robust_mgmt_frame(struct sk_buff *skb)
{
@@ -4482,6 +4555,7 @@ static inline bool ieee80211_is_robust_mgmt_frame(struct sk_buff *skb)
* ieee80211_is_public_action - check if frame is a public action frame
* @hdr: the frame
* @len: length of the frame
+ * Return: whether or not the frame is a public action frame
*/
static inline bool ieee80211_is_public_action(struct ieee80211_hdr *hdr,
size_t len)
@@ -4527,8 +4601,9 @@ ieee80211_is_protected_dual_of_public_action(struct sk_buff *skb)
/**
* _ieee80211_is_group_privacy_action - check if frame is a group addressed
- * privacy action frame
+ * privacy action frame
* @hdr: the frame
+ * Return: whether or not the frame is a group addressed privacy action frame
*/
static inline bool _ieee80211_is_group_privacy_action(struct ieee80211_hdr *hdr)
{
@@ -4544,8 +4619,9 @@ static inline bool _ieee80211_is_group_privacy_action(struct ieee80211_hdr *hdr)
/**
* ieee80211_is_group_privacy_action - check if frame is a group addressed
- * privacy action frame
+ * privacy action frame
* @skb: the skb containing the frame, length will be checked
+ * Return: whether or not the frame is a group addressed privacy action frame
*/
static inline bool ieee80211_is_group_privacy_action(struct sk_buff *skb)
{
@@ -4557,6 +4633,7 @@ static inline bool ieee80211_is_group_privacy_action(struct sk_buff *skb)
/**
* ieee80211_tu_to_usec - convert time units (TU) to microseconds
* @tu: the TUs
+ * Return: the time value converted to microseconds
*/
static inline unsigned long ieee80211_tu_to_usec(unsigned long tu)
{
@@ -4568,6 +4645,7 @@ static inline unsigned long ieee80211_tu_to_usec(unsigned long tu)
* @tim: the TIM IE
* @tim_len: length of the TIM IE
* @aid: the AID to look for
+ * Return: whether or not traffic is indicated in the TIM for the given AID
*/
static inline bool ieee80211_check_tim(const struct ieee80211_tim_ie *tim,
u8 tim_len, u16 aid)
@@ -4594,8 +4672,10 @@ static inline bool ieee80211_check_tim(const struct ieee80211_tim_ie *tim,
}
/**
- * ieee80211_get_tdls_action - get tdls packet action (or -1, if not tdls packet)
+ * ieee80211_get_tdls_action - get TDLS action code
* @skb: the skb containing the frame, length will not be checked
+ * Return: the TDLS action code, or -1 if it's not an encapsulated TDLS action
+ * frame
*
* This function assumes the frame is a data frame, and that the network header
* is in the correct place.
@@ -4635,6 +4715,7 @@ static inline int ieee80211_get_tdls_action(struct sk_buff *skb)
/**
* ieee80211_action_contains_tpc - checks if the frame contains TPC element
* @skb: the skb containing the frame, length will be checked
+ * Return: %true if the frame contains a TPC element, %false otherwise
*
* This function checks if it's either TPC report action frame or Link
* Measurement report action frame as defined in IEEE Std. 802.11-2012 8.5.2.5
@@ -4679,6 +4760,11 @@ static inline bool ieee80211_action_contains_tpc(struct sk_buff *skb)
return true;
}
+/**
+ * ieee80211_is_timing_measurement - check if frame is timing measurement response
+ * @skb: the SKB to check
+ * Return: whether or not the frame is a valid timing measurement response
+ */
static inline bool ieee80211_is_timing_measurement(struct sk_buff *skb)
{
struct ieee80211_mgmt *mgmt = (void *)skb->data;
@@ -4698,6 +4784,11 @@ static inline bool ieee80211_is_timing_measurement(struct sk_buff *skb)
return false;
}
+/**
+ * ieee80211_is_ftm - check if frame is FTM response
+ * @skb: the SKB to check
+ * Return: whether or not the frame is a valid FTM response action frame
+ */
static inline bool ieee80211_is_ftm(struct sk_buff *skb)
{
struct ieee80211_mgmt *mgmt = (void *)skb->data;
@@ -4752,6 +4843,7 @@ struct element {
* @element: element pointer after for_each_element() or friends
* @data: same data pointer as passed to for_each_element() or friends
* @datalen: same data length as passed to for_each_element() or friends
+ * Return: %true if all elements were iterated, %false otherwise; see notes
*
* This function returns %true if all the data was parsed or considered
* while walking the elements. Only use this if your for_each_element()
@@ -4955,6 +5047,7 @@ struct ieee80211_mle_tdls_common_info {
* ieee80211_mle_common_size - check multi-link element common size
* @data: multi-link element, must already be checked for size using
* ieee80211_mle_size_ok()
+ * Return: the size of the multi-link element's "common" subfield
*/
static inline u8 ieee80211_mle_common_size(const u8 *data)
{
@@ -4987,11 +5080,10 @@ static inline u8 ieee80211_mle_common_size(const u8 *data)
/**
* ieee80211_mle_get_link_id - returns the link ID
* @data: the basic multi link element
+ * Return: the link ID, or -1 if not present
*
* The element is assumed to be of the correct type (BASIC) and big enough,
* this must be checked using ieee80211_mle_type_ok().
- *
- * If the BSS link ID can't be found, -1 will be returned
*/
static inline int ieee80211_mle_get_link_id(const u8 *data)
{
@@ -5011,12 +5103,10 @@ static inline int ieee80211_mle_get_link_id(const u8 *data)
/**
* ieee80211_mle_get_bss_param_ch_cnt - returns the BSS parameter change count
* @data: pointer to the basic multi link element
+ * Return: the BSS Parameter Change Count field value, or -1 if not present
*
* The element is assumed to be of the correct type (BASIC) and big enough,
* this must be checked using ieee80211_mle_type_ok().
- *
- * If the BSS parameter change count value can't be found (the presence bit
- * for it is clear), -1 will be returned.
*/
static inline int
ieee80211_mle_get_bss_param_ch_cnt(const u8 *data)
@@ -5039,13 +5129,13 @@ ieee80211_mle_get_bss_param_ch_cnt(const u8 *data)
/**
* ieee80211_mle_get_eml_med_sync_delay - returns the medium sync delay
- * @data: pointer to the multi link EHT IE
+ * @data: pointer to the multi-link element
+ * Return: the medium synchronization delay field value from the multi-link
+ * element, or the default value (%IEEE80211_MED_SYNC_DELAY_DEFAULT)
+ * if not present
*
* The element is assumed to be of the correct type (BASIC) and big enough,
* this must be checked using ieee80211_mle_type_ok().
- *
- * If the medium synchronization is not present, then the default value is
- * returned.
*/
static inline u16 ieee80211_mle_get_eml_med_sync_delay(const u8 *data)
{
@@ -5069,12 +5159,12 @@ static inline u16 ieee80211_mle_get_eml_med_sync_delay(const u8 *data)
/**
* ieee80211_mle_get_eml_cap - returns the EML capability
- * @data: pointer to the multi link EHT IE
+ * @data: pointer to the multi-link element
+ * Return: the EML capability field value from the multi-link element,
+ * or 0 if not present
*
* The element is assumed to be of the correct type (BASIC) and big enough,
* this must be checked using ieee80211_mle_type_ok().
- *
- * If the EML capability is not present, 0 will be returned.
*/
static inline u16 ieee80211_mle_get_eml_cap(const u8 *data)
{
@@ -5100,13 +5190,12 @@ static inline u16 ieee80211_mle_get_eml_cap(const u8 *data)
/**
* ieee80211_mle_get_mld_capa_op - returns the MLD capabilities and operations.
- * @data: pointer to the multi link EHT IE
+ * @data: pointer to the multi-link element
+ * Return: the MLD capabilities and operations field value from the multi-link
+ * element, or 0 if not present
*
* The element is assumed to be of the correct type (BASIC) and big enough,
* this must be checked using ieee80211_mle_type_ok().
- *
- * If the MLD capabilities and operations field is not present, 0 will be
- * returned.
*/
static inline u16 ieee80211_mle_get_mld_capa_op(const u8 *data)
{
@@ -5137,12 +5226,11 @@ static inline u16 ieee80211_mle_get_mld_capa_op(const u8 *data)
/**
* ieee80211_mle_get_mld_id - returns the MLD ID
- * @data: pointer to the multi link element
+ * @data: pointer to the multi-link element
+ * Return: The MLD ID in the given multi-link element, or 0 if not present
*
* The element is assumed to be of the correct type (BASIC) and big enough,
* this must be checked using ieee80211_mle_type_ok().
- *
- * If the MLD ID is not present, 0 will be returned.
*/
static inline u8 ieee80211_mle_get_mld_id(const u8 *data)
{
@@ -5177,6 +5265,7 @@ static inline u8 ieee80211_mle_get_mld_id(const u8 *data)
* ieee80211_mle_size_ok - validate multi-link element size
* @data: pointer to the element data
* @len: length of the containing element
+ * Return: whether or not the multi-link element size is OK
*/
static inline bool ieee80211_mle_size_ok(const u8 *data, size_t len)
{
@@ -5246,6 +5335,7 @@ static inline bool ieee80211_mle_size_ok(const u8 *data, size_t len)
* @data: pointer to the element data
* @type: expected type of the element
* @len: length of the containing element
+ * Return: whether or not the multi-link element type matches and size is OK
*/
static inline bool ieee80211_mle_type_ok(const u8 *data, u8 type, size_t len)
{
@@ -5289,6 +5379,7 @@ struct ieee80211_mle_per_sta_profile {
* profile size
* @data: pointer to the sub element data
* @len: length of the containing sub element
+ * Return: %true if the STA profile is large enough, %false otherwise
*/
static inline bool ieee80211_mle_basic_sta_prof_size_ok(const u8 *data,
size_t len)
@@ -5373,6 +5464,7 @@ ieee80211_mle_basic_sta_prof_bss_param_ch_cnt(const struct ieee80211_mle_per_sta
* element sta profile size.
* @data: pointer to the sub element data
* @len: length of the containing sub element
+ * Return: %true if the STA profile is large enough, %false otherwise
*/
static inline bool ieee80211_mle_reconf_sta_prof_size_ok(const u8 *data,
size_t len)
diff --git a/include/linux/intel_tcc.h b/include/linux/intel_tcc.h
index 8ff8eabb4a98..fa788817acfc 100644
--- a/include/linux/intel_tcc.h
+++ b/include/linux/intel_tcc.h
@@ -14,5 +14,6 @@ int intel_tcc_get_tjmax(int cpu);
int intel_tcc_get_offset(int cpu);
int intel_tcc_set_offset(int cpu, int offset);
int intel_tcc_get_temp(int cpu, int *temp, bool pkg);
+u32 intel_tcc_get_offset_mask(void);
#endif /* __INTEL_TCC_H__ */
diff --git a/include/linux/intel_tpmi.h b/include/linux/intel_tpmi.h
index 1e880cb0f454..ff480b47ae64 100644
--- a/include/linux/intel_tpmi.h
+++ b/include/linux/intel_tpmi.h
@@ -21,6 +21,7 @@ enum intel_tpmi_id {
TPMI_ID_PEM = 1, /* Power and Perf excursion Monitor */
TPMI_ID_UNCORE = 2, /* Uncore Frequency Scaling */
TPMI_ID_SST = 5, /* Speed Select Technology */
+ TPMI_ID_PLR = 0xc, /* Performance Limit Reasons */
TPMI_CONTROL_ID = 0x80, /* Special ID for getting feature status */
TPMI_INFO_ID = 0x81, /* Special ID for PCI BDF and Package ID information */
};
@@ -53,4 +54,5 @@ struct resource *tpmi_get_resource_at_index(struct auxiliary_device *auxdev, int
int tpmi_get_resource_count(struct auxiliary_device *auxdev);
int tpmi_get_feature_status(struct auxiliary_device *auxdev, int feature_id, bool *read_blocked,
bool *write_blocked);
+struct dentry *tpmi_get_debugfs_dir(struct auxiliary_device *auxdev);
#endif
diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
index 7a6b190c7da7..3bb6198d1523 100644
--- a/include/linux/io_uring_types.h
+++ b/include/linux/io_uring_types.h
@@ -50,7 +50,7 @@ struct io_wq_work_list {
struct io_wq_work {
struct io_wq_work_node list;
- unsigned flags;
+ atomic_t flags;
/* place it here instead of io_kiocb as it fills padding and saves 4B */
int cancel_seq;
};
@@ -207,18 +207,9 @@ struct io_submit_state {
bool need_plug;
bool cq_flush;
unsigned short submit_nr;
- unsigned int cqes_count;
struct blk_plug plug;
};
-struct io_ev_fd {
- struct eventfd_ctx *cq_ev_fd;
- unsigned int eventfd_async: 1;
- struct rcu_head rcu;
- atomic_t refs;
- atomic_t ops;
-};
-
struct io_alloc_cache {
void **entries;
unsigned int nr_cached;
@@ -373,7 +364,6 @@ struct io_ring_ctx {
struct io_restriction restrictions;
/* slow path rsrc auxilary data, used by update/register */
- struct io_mapped_ubuf *dummy_ubuf;
struct io_rsrc_data *file_data;
struct io_rsrc_data *buf_data;
@@ -406,6 +396,9 @@ struct io_ring_ctx {
struct callback_head poll_wq_task_work;
struct list_head defer_list;
+ struct io_alloc_cache msg_cache;
+ spinlock_t msg_lock;
+
#ifdef CONFIG_NET_RX_BUSY_POLL
struct list_head napi_list; /* track busy poll napi_id */
spinlock_t napi_lock; /* napi_list lock */
@@ -648,7 +641,7 @@ struct io_kiocb {
struct io_rsrc_node *rsrc_node;
atomic_t refs;
- atomic_t poll_refs;
+ bool cancel_seq_set;
struct io_task_work io_task_work;
/* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */
struct hlist_node hash_node;
@@ -657,6 +650,7 @@ struct io_kiocb {
/* opcode allocated if it needs to store data for async defer */
void *async_data;
/* linked requests, IFF REQ_F_HARDLINK or REQ_F_LINK are set */
+ atomic_t poll_refs;
struct io_kiocb *link;
/* custom credentials, valid IFF REQ_F_CREDS is set */
const struct cred *creds;
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 7bc8dff7cf6d..17b3f36ad843 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -1533,7 +1533,7 @@ struct iommu_domain *iommu_sva_domain_alloc(struct device *dev,
static inline struct iommu_sva *
iommu_sva_bind_device(struct device *dev, struct mm_struct *mm)
{
- return NULL;
+ return ERR_PTR(-ENODEV);
}
static inline void iommu_sva_unbind_device(struct iommu_sva *handle)
diff --git a/include/linux/irq_sim.h b/include/linux/irq_sim.h
index ab831e5ae748..89b4d8ff274b 100644
--- a/include/linux/irq_sim.h
+++ b/include/linux/irq_sim.h
@@ -16,11 +16,28 @@
* requested like normal irqs and enqueued from process context.
*/
+struct irq_sim_ops {
+ int (*irq_sim_irq_requested)(struct irq_domain *domain,
+ irq_hw_number_t hwirq, void *data);
+ void (*irq_sim_irq_released)(struct irq_domain *domain,
+ irq_hw_number_t hwirq, void *data);
+};
+
struct irq_domain *irq_domain_create_sim(struct fwnode_handle *fwnode,
unsigned int num_irqs);
struct irq_domain *devm_irq_domain_create_sim(struct device *dev,
struct fwnode_handle *fwnode,
unsigned int num_irqs);
+struct irq_domain *irq_domain_create_sim_full(struct fwnode_handle *fwnode,
+ unsigned int num_irqs,
+ const struct irq_sim_ops *ops,
+ void *data);
+struct irq_domain *
+devm_irq_domain_create_sim_full(struct device *dev,
+ struct fwnode_handle *fwnode,
+ unsigned int num_irqs,
+ const struct irq_sim_ops *ops,
+ void *data);
void irq_domain_remove_sim(struct irq_domain *domain);
#endif /* _LINUX_IRQ_SIM_H */
diff --git a/include/linux/irqchip/arm-gic-common.h b/include/linux/irqchip/arm-gic-common.h
index 1177f3a1aed5..fc0246cc05ac 100644
--- a/include/linux/irqchip/arm-gic-common.h
+++ b/include/linux/irqchip/arm-gic-common.h
@@ -10,10 +10,6 @@
#include <linux/irqchip/arm-vgic-info.h>
#define GICD_INT_DEF_PRI 0xa0
-#define GICD_INT_DEF_PRI_X4 ((GICD_INT_DEF_PRI << 24) |\
- (GICD_INT_DEF_PRI << 16) |\
- (GICD_INT_DEF_PRI << 8) |\
- GICD_INT_DEF_PRI)
struct irq_domain;
struct fwnode_handle;
diff --git a/include/linux/irqchip/arm-gic-v3-prio.h b/include/linux/irqchip/arm-gic-v3-prio.h
new file mode 100644
index 000000000000..44157c9abb78
--- /dev/null
+++ b/include/linux/irqchip/arm-gic-v3-prio.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __LINUX_IRQCHIP_ARM_GIC_V3_PRIO_H
+#define __LINUX_IRQCHIP_ARM_GIC_V3_PRIO_H
+
+/*
+ * GIC priorities from the view of the PMR/RPR.
+ *
+ * These values are chosen to be valid in either the absolute priority space or
+ * the NS view of the priority space. The value programmed into the distributor
+ * and ITS will be chosen at boot time such that these values appear in the
+ * PMR/RPR.
+ *
+ * GICV3_PRIO_UNMASKED is the PMR view of the priority to use to permit both
+ * IRQs and pseudo-NMIs.
+ *
+ * GICV3_PRIO_IRQ is the PMR view of the priority of regular interrupts. This
+ * can be written to the PMR to mask regular IRQs.
+ *
+ * GICV3_PRIO_NMI is the PMR view of the priority of pseudo-NMIs. This can be
+ * written to the PMR to mask pseudo-NMIs.
+ *
+ * On arm64 some code sections either automatically switch back to PSR.I or
+ * explicitly require to not use priority masking. If bit GICV3_PRIO_PSR_I_SET
+ * is included in the priority mask, it indicates that PSR.I should be set and
+ * interrupt disabling temporarily does not rely on IRQ priorities.
+ */
+#define GICV3_PRIO_UNMASKED 0xe0
+#define GICV3_PRIO_IRQ 0xc0
+#define GICV3_PRIO_NMI 0x80
+
+#define GICV3_PRIO_PSR_I_SET (1 << 4)
+
+#ifndef __ASSEMBLER__
+
+#define __gicv3_prio_to_ns(p) (0xff & ((p) << 1))
+#define __gicv3_ns_to_prio(ns) (0x80 | ((ns) >> 1))
+
+#define __gicv3_prio_valid_ns(p) \
+ (__gicv3_ns_to_prio(__gicv3_prio_to_ns(p)) == (p))
+
+static_assert(__gicv3_prio_valid_ns(GICV3_PRIO_NMI));
+static_assert(__gicv3_prio_valid_ns(GICV3_PRIO_IRQ));
+
+static_assert(GICV3_PRIO_NMI < GICV3_PRIO_IRQ);
+static_assert(GICV3_PRIO_IRQ < GICV3_PRIO_UNMASKED);
+
+static_assert(GICV3_PRIO_IRQ < (GICV3_PRIO_IRQ | GICV3_PRIO_PSR_I_SET));
+
+#endif /* __ASSEMBLER */
+
+#endif /* __LINUX_IRQCHIP_ARM_GIC_V3_PRIO_H */
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index 728691365464..70c0948f978e 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -638,7 +638,7 @@ struct fwnode_handle;
int __init its_lpi_memreserve_init(void);
int its_cpu_init(void);
int its_init(struct fwnode_handle *handle, struct rdists *rdists,
- struct irq_domain *domain);
+ struct irq_domain *domain, u8 irq_prio);
int mbi_init(struct fwnode_handle *fwnode, struct irq_domain *parent);
static inline bool gic_enable_sre(void)
diff --git a/include/linux/kcov.h b/include/linux/kcov.h
index b851ba415e03..75a2fb8b16c3 100644
--- a/include/linux/kcov.h
+++ b/include/linux/kcov.h
@@ -21,6 +21,8 @@ enum kcov_mode {
KCOV_MODE_TRACE_PC = 2,
/* Collecting comparison operands mode. */
KCOV_MODE_TRACE_CMP = 3,
+ /* The process owns a KCOV remote reference. */
+ KCOV_MODE_REMOTE = 4,
};
#define KCOV_IN_CTXSW (1 << 30)
@@ -55,21 +57,47 @@ static inline void kcov_remote_start_usb(u64 id)
/*
* The softirq flavor of kcov_remote_*() functions is introduced as a temporary
- * work around for kcov's lack of nested remote coverage sections support in
- * task context. Adding support for nested sections is tracked in:
- * https://bugzilla.kernel.org/show_bug.cgi?id=210337
+ * workaround for KCOV's lack of nested remote coverage sections support.
+ *
+ * Adding support is tracked in https://bugzilla.kernel.org/show_bug.cgi?id=210337.
+ *
+ * kcov_remote_start_usb_softirq():
+ *
+ * 1. Only collects coverage when called in the softirq context. This allows
+ * avoiding nested remote coverage collection sections in the task context.
+ * For example, USB/IP calls usb_hcd_giveback_urb() in the task context
+ * within an existing remote coverage collection section. Thus, KCOV should
+ * not attempt to start collecting coverage within the coverage collection
+ * section in __usb_hcd_giveback_urb() in this case.
+ *
+ * 2. Disables interrupts for the duration of the coverage collection section.
+ * This allows avoiding nested remote coverage collection sections in the
+ * softirq context (a softirq might occur during the execution of a work in
+ * the BH workqueue, which runs with in_serving_softirq() > 0).
+ * For example, usb_giveback_urb_bh() runs in the BH workqueue with
+ * interrupts enabled, so __usb_hcd_giveback_urb() might be interrupted in
+ * the middle of its remote coverage collection section, and the interrupt
+ * handler might invoke __usb_hcd_giveback_urb() again.
*/
-static inline void kcov_remote_start_usb_softirq(u64 id)
+static inline unsigned long kcov_remote_start_usb_softirq(u64 id)
{
- if (in_serving_softirq())
+ unsigned long flags = 0;
+
+ if (in_serving_softirq()) {
+ local_irq_save(flags);
kcov_remote_start_usb(id);
+ }
+
+ return flags;
}
-static inline void kcov_remote_stop_softirq(void)
+static inline void kcov_remote_stop_softirq(unsigned long flags)
{
- if (in_serving_softirq())
+ if (in_serving_softirq()) {
kcov_remote_stop();
+ local_irq_restore(flags);
+ }
}
#ifdef CONFIG_64BIT
@@ -103,8 +131,11 @@ static inline u64 kcov_common_handle(void)
}
static inline void kcov_remote_start_common(u64 id) {}
static inline void kcov_remote_start_usb(u64 id) {}
-static inline void kcov_remote_start_usb_softirq(u64 id) {}
-static inline void kcov_remote_stop_softirq(void) {}
+static inline unsigned long kcov_remote_start_usb_softirq(u64 id)
+{
+ return 0;
+}
+static inline void kcov_remote_stop_softirq(unsigned long flags) {}
#endif /* CONFIG_KCOV */
#endif /* _LINUX_KCOV_H */
diff --git a/include/linux/ksm.h b/include/linux/ksm.h
index 52c63a9c5a9c..11690dacd986 100644
--- a/include/linux/ksm.h
+++ b/include/linux/ksm.h
@@ -33,16 +33,27 @@ void __ksm_exit(struct mm_struct *mm);
*/
#define is_ksm_zero_pte(pte) (is_zero_pfn(pte_pfn(pte)) && pte_dirty(pte))
-extern unsigned long ksm_zero_pages;
+extern atomic_long_t ksm_zero_pages;
+
+static inline void ksm_map_zero_page(struct mm_struct *mm)
+{
+ atomic_long_inc(&ksm_zero_pages);
+ atomic_long_inc(&mm->ksm_zero_pages);
+}
static inline void ksm_might_unmap_zero_page(struct mm_struct *mm, pte_t pte)
{
if (is_ksm_zero_pte(pte)) {
- ksm_zero_pages--;
- mm->ksm_zero_pages--;
+ atomic_long_dec(&ksm_zero_pages);
+ atomic_long_dec(&mm->ksm_zero_pages);
}
}
+static inline long mm_ksm_zero_pages(struct mm_struct *mm)
+{
+ return atomic_long_read(&mm->ksm_zero_pages);
+}
+
static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
{
if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags))
diff --git a/include/linux/leds.h b/include/linux/leds.h
index 6300313c46b7..6885603f211b 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -107,6 +107,8 @@ struct led_classdev {
#define LED_BRIGHT_HW_CHANGED BIT(21)
#define LED_RETAIN_AT_SHUTDOWN BIT(22)
#define LED_INIT_DEFAULT_TRIGGER BIT(23)
+#define LED_REJECT_NAME_CONFLICT BIT(24)
+#define LED_MULTI_COLOR BIT(25)
/* set_brightness_work / blink_timer flags, atomic, private. */
unsigned long work_flags;
@@ -374,6 +376,25 @@ void led_set_brightness(struct led_classdev *led_cdev, unsigned int brightness);
int led_set_brightness_sync(struct led_classdev *led_cdev, unsigned int value);
/**
+ * led_mc_set_brightness - set mc LED color intensity values and brightness
+ * @led_cdev: the LED to set
+ * @intensity_value: array of per color intensity values to set
+ * @num_colors: amount of entries in intensity_value array
+ * @brightness: the brightness to set the LED to
+ *
+ * Set a multi-color LED's per color intensity values and brightness.
+ * If necessary, this cancels the software blink timer. This function is
+ * guaranteed not to sleep.
+ *
+ * Calling this function on a non multi-color led_classdev or with the wrong
+ * num_colors value is an error. In this case an error will be logged once
+ * and the call will do nothing.
+ */
+void led_mc_set_brightness(struct led_classdev *led_cdev,
+ unsigned int *intensity_value, unsigned int num_colors,
+ unsigned int brightness);
+
+/**
* led_update_brightness - update LED brightness
* @led_cdev: the LED to query
*
@@ -428,6 +449,16 @@ int led_compose_name(struct device *dev, struct led_init_data *init_data,
char *led_classdev_name);
/**
+ * led_get_color_name - get string representation of color ID
+ * @color_id: The LED_COLOR_ID_* constant
+ *
+ * Get the string name of a LED_COLOR_ID_* constant.
+ *
+ * Returns: A string constant or NULL on an invalid ID.
+ */
+const char *led_get_color_name(u8 color_id);
+
+/**
* led_sysfs_is_disabled - check if LED sysfs interface is disabled
* @led_cdev: the LED to query
*
@@ -490,6 +521,9 @@ void led_trigger_register_simple(const char *name,
struct led_trigger **trigger);
void led_trigger_unregister_simple(struct led_trigger *trigger);
void led_trigger_event(struct led_trigger *trigger, enum led_brightness event);
+void led_mc_trigger_event(struct led_trigger *trig,
+ unsigned int *intensity_value, unsigned int num_colors,
+ enum led_brightness brightness);
void led_trigger_blink(struct led_trigger *trigger, unsigned long delay_on,
unsigned long delay_off);
void led_trigger_blink_oneshot(struct led_trigger *trigger,
@@ -532,6 +566,9 @@ static inline void led_trigger_register_simple(const char *name,
static inline void led_trigger_unregister_simple(struct led_trigger *trigger) {}
static inline void led_trigger_event(struct led_trigger *trigger,
enum led_brightness event) {}
+static inline void led_mc_trigger_event(struct led_trigger *trig,
+ unsigned int *intensity_value, unsigned int num_colors,
+ enum led_brightness brightness) {}
static inline void led_trigger_blink(struct led_trigger *trigger,
unsigned long delay_on,
unsigned long delay_off) {}
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 13fb41d25da6..17394098bee9 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -814,7 +814,6 @@ struct ata_port {
/* Flags that change dynamically, protected by ap->lock */
unsigned int pflags; /* ATA_PFLAG_xxx */
unsigned int print_id; /* user visible unique port ID */
- unsigned int local_port_no; /* host local port num */
unsigned int port_no; /* 0 based port no. inside the host */
#ifdef CONFIG_ATA_SFF
@@ -1069,7 +1068,7 @@ extern int sata_std_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline);
extern void ata_std_postreset(struct ata_link *link, unsigned int *classes);
-extern struct ata_host *ata_host_alloc(struct device *dev, int max_ports);
+extern struct ata_host *ata_host_alloc(struct device *dev, int n_ports);
extern struct ata_host *ata_host_alloc_pinfo(struct device *dev,
const struct ata_port_info * const * ppi, int n_ports);
extern void ata_host_get(struct ata_host *host);
@@ -1082,7 +1081,6 @@ extern int ata_host_activate(struct ata_host *host, int irq,
const struct scsi_host_template *sht);
extern void ata_host_detach(struct ata_host *host);
extern void ata_host_init(struct ata_host *, struct device *, struct ata_port_operations *);
-extern int ata_scsi_detect(struct scsi_host_template *sht);
extern int ata_scsi_ioctl(struct scsi_device *dev, unsigned int cmd,
void __user *arg);
#ifdef CONFIG_COMPAT
@@ -1246,11 +1244,11 @@ extern int sata_link_debounce(struct ata_link *link,
extern int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy,
bool spm_wakeup);
extern int ata_slave_link_init(struct ata_port *ap);
-extern struct ata_port *ata_sas_port_alloc(struct ata_host *,
- struct ata_port_info *, struct Scsi_Host *);
extern void ata_port_probe(struct ata_port *ap);
-extern int ata_sas_tport_add(struct device *parent, struct ata_port *ap);
-extern void ata_sas_tport_delete(struct ata_port *ap);
+extern struct ata_port *ata_port_alloc(struct ata_host *host);
+extern void ata_port_free(struct ata_port *ap);
+extern int ata_tport_add(struct device *parent, struct ata_port *ap);
+extern void ata_tport_delete(struct ata_port *ap);
int ata_sas_device_configure(struct scsi_device *sdev, struct queue_limits *lim,
struct ata_port *ap);
extern int ata_sas_queuecmd(struct scsi_cmnd *cmd, struct ata_port *ap);
diff --git a/include/linux/local_lock.h b/include/linux/local_lock.h
index e55010fa7329..091dc0b6bdfb 100644
--- a/include/linux/local_lock.h
+++ b/include/linux/local_lock.h
@@ -51,4 +51,25 @@
#define local_unlock_irqrestore(lock, flags) \
__local_unlock_irqrestore(lock, flags)
+DEFINE_GUARD(local_lock, local_lock_t __percpu*,
+ local_lock(_T),
+ local_unlock(_T))
+DEFINE_GUARD(local_lock_irq, local_lock_t __percpu*,
+ local_lock_irq(_T),
+ local_unlock_irq(_T))
+DEFINE_LOCK_GUARD_1(local_lock_irqsave, local_lock_t __percpu,
+ local_lock_irqsave(_T->lock, _T->flags),
+ local_unlock_irqrestore(_T->lock, _T->flags),
+ unsigned long flags)
+
+#define local_lock_nested_bh(_lock) \
+ __local_lock_nested_bh(_lock)
+
+#define local_unlock_nested_bh(_lock) \
+ __local_unlock_nested_bh(_lock)
+
+DEFINE_GUARD(local_lock_nested_bh, local_lock_t __percpu*,
+ local_lock_nested_bh(_T),
+ local_unlock_nested_bh(_T))
+
#endif
diff --git a/include/linux/local_lock_internal.h b/include/linux/local_lock_internal.h
index 975e33b793a7..8dd71fbbb6d2 100644
--- a/include/linux/local_lock_internal.h
+++ b/include/linux/local_lock_internal.h
@@ -62,6 +62,17 @@ do { \
local_lock_debug_init(lock); \
} while (0)
+#define __spinlock_nested_bh_init(lock) \
+do { \
+ static struct lock_class_key __key; \
+ \
+ debug_check_no_locks_freed((void *)lock, sizeof(*lock));\
+ lockdep_init_map_type(&(lock)->dep_map, #lock, &__key, \
+ 0, LD_WAIT_CONFIG, LD_WAIT_INV, \
+ LD_LOCK_NORMAL); \
+ local_lock_debug_init(lock); \
+} while (0)
+
#define __local_lock(lock) \
do { \
preempt_disable(); \
@@ -98,6 +109,15 @@ do { \
local_irq_restore(flags); \
} while (0)
+#define __local_lock_nested_bh(lock) \
+ do { \
+ lockdep_assert_in_softirq(); \
+ local_lock_acquire(this_cpu_ptr(lock)); \
+ } while (0)
+
+#define __local_unlock_nested_bh(lock) \
+ local_lock_release(this_cpu_ptr(lock))
+
#else /* !CONFIG_PREEMPT_RT */
/*
@@ -138,4 +158,15 @@ typedef spinlock_t local_lock_t;
#define __local_unlock_irqrestore(lock, flags) __local_unlock(lock)
+#define __local_lock_nested_bh(lock) \
+do { \
+ lockdep_assert_in_softirq_func(); \
+ spin_lock(this_cpu_ptr(lock)); \
+} while (0)
+
+#define __local_unlock_nested_bh(lock) \
+do { \
+ spin_unlock(this_cpu_ptr((lock))); \
+} while (0)
+
#endif /* CONFIG_PREEMPT_RT */
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 5e51b0de4c4b..3f5a551579cc 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -297,9 +297,6 @@ extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie);
.wait_type_inner = _wait_type, \
.lock_type = LD_LOCK_WAIT_OVERRIDE, }
-#define lock_map_assert_held(l) \
- lockdep_assert(lock_is_held(l) != LOCK_STATE_NOT_HELD)
-
#else /* !CONFIG_LOCKDEP */
static inline void lockdep_init_task(struct task_struct *task)
@@ -391,8 +388,6 @@ extern int lockdep_is_held(const void *);
#define DEFINE_WAIT_OVERRIDE_MAP(_name, _wait_type) \
struct lockdep_map __maybe_unused _name = {}
-#define lock_map_assert_held(l) do { (void)(l); } while (0)
-
#endif /* !LOCKDEP */
#ifdef CONFIG_PROVE_LOCKING
@@ -605,6 +600,8 @@ do { \
(!in_softirq() || in_irq() || in_nmi())); \
} while (0)
+extern void lockdep_assert_in_softirq_func(void);
+
#else
# define might_lock(lock) do { } while (0)
# define might_lock_read(lock) do { } while (0)
@@ -618,6 +615,7 @@ do { \
# define lockdep_assert_preemption_enabled() do { } while (0)
# define lockdep_assert_preemption_disabled() do { } while (0)
# define lockdep_assert_in_softirq() do { } while (0)
+# define lockdep_assert_in_softirq_func() do { } while (0)
#endif
#ifdef CONFIG_PROVE_RAW_LOCK_NESTING
diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h
index f804b76cde44..855db460e08b 100644
--- a/include/linux/lsm_hook_defs.h
+++ b/include/linux/lsm_hook_defs.h
@@ -144,6 +144,7 @@ LSM_HOOK(int, 0, inode_setattr, struct mnt_idmap *idmap, struct dentry *dentry,
LSM_HOOK(void, LSM_RET_VOID, inode_post_setattr, struct mnt_idmap *idmap,
struct dentry *dentry, int ia_valid)
LSM_HOOK(int, 0, inode_getattr, const struct path *path)
+LSM_HOOK(int, 0, inode_xattr_skipcap, const char *name)
LSM_HOOK(int, 0, inode_setxattr, struct mnt_idmap *idmap,
struct dentry *dentry, const char *name, const void *value,
size_t size, int flags)
@@ -413,7 +414,7 @@ LSM_HOOK(void, LSM_RET_VOID, key_post_create_or_update, struct key *keyring,
#ifdef CONFIG_AUDIT
LSM_HOOK(int, 0, audit_rule_init, u32 field, u32 op, char *rulestr,
- void **lsmrule)
+ void **lsmrule, gfp_t gfp)
LSM_HOOK(int, 0, audit_rule_known, struct audit_krule *krule)
LSM_HOOK(int, 0, audit_rule_match, u32 secid, u32 field, u32 op, void *lsmrule)
LSM_HOOK(void, LSM_RET_VOID, audit_rule_free, void *lsmrule)
diff --git a/include/linux/math64.h b/include/linux/math64.h
index d34def7f9a8c..6aaccc1626ab 100644
--- a/include/linux/math64.h
+++ b/include/linux/math64.h
@@ -298,6 +298,19 @@ u64 mul_u64_u64_div_u64(u64 a, u64 mul, u64 div);
({ u64 _tmp = (d); div64_u64((ll) + _tmp - 1, _tmp); })
/**
+ * DIV_U64_ROUND_UP - unsigned 64bit divide with 32bit divisor rounded up
+ * @ll: unsigned 64bit dividend
+ * @d: unsigned 32bit divisor
+ *
+ * Divide unsigned 64bit dividend by unsigned 32bit divisor
+ * and round up.
+ *
+ * Return: dividend / divisor rounded up
+ */
+#define DIV_U64_ROUND_UP(ll, d) \
+ ({ u32 _tmp = (d); div_u64((ll) + _tmp - 1, _tmp); })
+
+/**
* DIV64_U64_ROUND_CLOSEST - unsigned 64bit divide with 64bit divisor rounded to nearest integer
* @dividend: unsigned 64bit dividend
* @divisor: unsigned 64bit divisor
@@ -342,4 +355,19 @@ u64 mul_u64_u64_div_u64(u64 a, u64 mul, u64 div);
div_s64((__x - (__d / 2)), __d); \
} \
)
+
+/**
+ * roundup_u64 - Round up a 64bit value to the next specified 32bit multiple
+ * @x: the value to up
+ * @y: 32bit multiple to round up to
+ *
+ * Rounds @x to the next multiple of @y. For 32bit @x values, see roundup and
+ * the faster round_up() for powers of 2.
+ *
+ * Return: rounded up value.
+ */
+static inline u64 roundup_u64(u64 x, u32 y)
+{
+ return DIV_U64_ROUND_UP(x, y) * y;
+}
#endif /* _LINUX_MATH64_H */
diff --git a/include/linux/mfd/88pm886.h b/include/linux/mfd/88pm886.h
new file mode 100644
index 000000000000..133aa302e492
--- /dev/null
+++ b/include/linux/mfd/88pm886.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __MFD_88PM886_H
+#define __MFD_88PM886_H
+
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+
+#define PM886_A1_CHIP_ID 0xa1
+
+#define PM886_IRQ_ONKEY 0
+
+#define PM886_PAGE_OFFSET_REGULATORS 1
+
+#define PM886_REG_ID 0x00
+
+#define PM886_REG_STATUS1 0x01
+#define PM886_ONKEY_STS1 BIT(0)
+
+#define PM886_REG_INT_STATUS1 0x05
+
+#define PM886_REG_INT_ENA_1 0x0a
+#define PM886_INT_ENA1_ONKEY BIT(0)
+
+#define PM886_REG_MISC_CONFIG1 0x14
+#define PM886_SW_PDOWN BIT(5)
+
+#define PM886_REG_MISC_CONFIG2 0x15
+#define PM886_INT_INV BIT(0)
+#define PM886_INT_CLEAR BIT(1)
+#define PM886_INT_RC 0x00
+#define PM886_INT_WC BIT(1)
+#define PM886_INT_MASK_MODE BIT(2)
+
+#define PM886_REG_RTC_SPARE6 0xef
+
+#define PM886_REG_BUCK_EN 0x08
+#define PM886_REG_LDO_EN1 0x09
+#define PM886_REG_LDO_EN2 0x0a
+#define PM886_REG_LDO1_VOUT 0x20
+#define PM886_REG_LDO2_VOUT 0x26
+#define PM886_REG_LDO3_VOUT 0x2c
+#define PM886_REG_LDO4_VOUT 0x32
+#define PM886_REG_LDO5_VOUT 0x38
+#define PM886_REG_LDO6_VOUT 0x3e
+#define PM886_REG_LDO7_VOUT 0x44
+#define PM886_REG_LDO8_VOUT 0x4a
+#define PM886_REG_LDO9_VOUT 0x50
+#define PM886_REG_LDO10_VOUT 0x56
+#define PM886_REG_LDO11_VOUT 0x5c
+#define PM886_REG_LDO12_VOUT 0x62
+#define PM886_REG_LDO13_VOUT 0x68
+#define PM886_REG_LDO14_VOUT 0x6e
+#define PM886_REG_LDO15_VOUT 0x74
+#define PM886_REG_LDO16_VOUT 0x7a
+#define PM886_REG_BUCK1_VOUT 0xa5
+#define PM886_REG_BUCK2_VOUT 0xb3
+#define PM886_REG_BUCK3_VOUT 0xc1
+#define PM886_REG_BUCK4_VOUT 0xcf
+#define PM886_REG_BUCK5_VOUT 0xdd
+
+#define PM886_LDO_VSEL_MASK 0x0f
+#define PM886_BUCK_VSEL_MASK 0x7f
+
+struct pm886_chip {
+ struct i2c_client *client;
+ unsigned int chip_id;
+ struct regmap *regmap;
+};
+#endif /* __MFD_88PM886_H */
diff --git a/include/linux/mfd/cs40l50.h b/include/linux/mfd/cs40l50.h
new file mode 100644
index 000000000000..e5dc49860944
--- /dev/null
+++ b/include/linux/mfd/cs40l50.h
@@ -0,0 +1,137 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * CS40L50 Advanced Haptic Driver with waveform memory,
+ * integrated DSP, and closed-loop algorithms
+ *
+ * Copyright 2024 Cirrus Logic, Inc.
+ *
+ * Author: James Ogletree <james.ogletree@cirrus.com>
+ */
+
+#ifndef __MFD_CS40L50_H__
+#define __MFD_CS40L50_H__
+
+#include <linux/firmware/cirrus/cs_dsp.h>
+#include <linux/gpio/consumer.h>
+#include <linux/pm.h>
+#include <linux/regmap.h>
+
+/* Power Supply Configuration */
+#define CS40L50_BLOCK_ENABLES2 0x201C
+#define CS40L50_ERR_RLS 0x2034
+#define CS40L50_BST_LPMODE_SEL 0x3810
+#define CS40L50_DCM_LOW_POWER 0x1
+#define CS40L50_OVERTEMP_WARN 0x4000010
+
+/* Interrupts */
+#define CS40L50_IRQ1_INT_1 0xE010
+#define CS40L50_IRQ1_BASE CS40L50_IRQ1_INT_1
+#define CS40L50_IRQ1_INT_2 0xE014
+#define CS40L50_IRQ1_INT_8 0xE02C
+#define CS40L50_IRQ1_INT_9 0xE030
+#define CS40L50_IRQ1_INT_10 0xE034
+#define CS40L50_IRQ1_INT_18 0xE054
+#define CS40L50_IRQ1_MASK_1 0xE090
+#define CS40L50_IRQ1_MASK_2 0xE094
+#define CS40L50_IRQ1_MASK_20 0xE0DC
+#define CS40L50_IRQ1_INT_1_OFFSET (CS40L50_IRQ1_INT_1 - CS40L50_IRQ1_BASE)
+#define CS40L50_IRQ1_INT_2_OFFSET (CS40L50_IRQ1_INT_2 - CS40L50_IRQ1_BASE)
+#define CS40L50_IRQ1_INT_8_OFFSET (CS40L50_IRQ1_INT_8 - CS40L50_IRQ1_BASE)
+#define CS40L50_IRQ1_INT_9_OFFSET (CS40L50_IRQ1_INT_9 - CS40L50_IRQ1_BASE)
+#define CS40L50_IRQ1_INT_10_OFFSET (CS40L50_IRQ1_INT_10 - CS40L50_IRQ1_BASE)
+#define CS40L50_IRQ1_INT_18_OFFSET (CS40L50_IRQ1_INT_18 - CS40L50_IRQ1_BASE)
+#define CS40L50_IRQ_MASK_2_OVERRIDE 0xFFDF7FFF
+#define CS40L50_IRQ_MASK_20_OVERRIDE 0x15C01000
+#define CS40L50_AMP_SHORT_MASK BIT(31)
+#define CS40L50_DSP_QUEUE_MASK BIT(21)
+#define CS40L50_TEMP_ERR_MASK BIT(31)
+#define CS40L50_BST_UVP_MASK BIT(6)
+#define CS40L50_BST_SHORT_MASK BIT(7)
+#define CS40L50_BST_ILIMIT_MASK BIT(18)
+#define CS40L50_UVLO_VDDBATT_MASK BIT(16)
+#define CS40L50_GLOBAL_ERROR_MASK BIT(15)
+
+enum cs40l50_irq_list {
+ CS40L50_DSP_QUEUE_IRQ,
+ CS40L50_GLOBAL_ERROR_IRQ,
+ CS40L50_UVLO_VDDBATT_IRQ,
+ CS40L50_BST_ILIMIT_IRQ,
+ CS40L50_BST_SHORT_IRQ,
+ CS40L50_BST_UVP_IRQ,
+ CS40L50_TEMP_ERR_IRQ,
+ CS40L50_AMP_SHORT_IRQ,
+};
+
+/* DSP */
+#define CS40L50_XMEM_PACKED_0 0x2000000
+#define CS40L50_XMEM_UNPACKED24_0 0x2800000
+#define CS40L50_SYS_INFO_ID 0x25E0000
+#define CS40L50_DSP_QUEUE_WT 0x28042C8
+#define CS40L50_DSP_QUEUE_RD 0x28042CC
+#define CS40L50_NUM_WAVES 0x2805C18
+#define CS40L50_CORE_BASE 0x2B80000
+#define CS40L50_YMEM_PACKED_0 0x2C00000
+#define CS40L50_YMEM_UNPACKED24_0 0x3400000
+#define CS40L50_PMEM_0 0x3800000
+#define CS40L50_DSP_POLL_US 1000
+#define CS40L50_DSP_TIMEOUT_COUNT 100
+#define CS40L50_RESET_PULSE_US 2200
+#define CS40L50_CP_READY_US 3100
+#define CS40L50_AUTOSUSPEND_MS 2000
+#define CS40L50_PM_ALGO 0x9F206
+#define CS40L50_GLOBAL_ERR_RLS_SET BIT(11)
+#define CS40L50_GLOBAL_ERR_RLS_CLEAR 0
+
+enum cs40l50_wseqs {
+ CS40L50_PWR_ON,
+ CS40L50_STANDBY,
+ CS40L50_ACTIVE,
+ CS40L50_NUM_WSEQS,
+};
+
+/* DSP Queue */
+#define CS40L50_DSP_QUEUE_BASE 0x11004
+#define CS40L50_DSP_QUEUE_END 0x1101C
+#define CS40L50_DSP_QUEUE 0x11020
+#define CS40L50_PREVENT_HIBER 0x2000003
+#define CS40L50_ALLOW_HIBER 0x2000004
+#define CS40L50_SHUTDOWN 0x2000005
+#define CS40L50_SYSTEM_RESET 0x2000007
+#define CS40L50_START_I2S 0x3000002
+#define CS40L50_OWT_PUSH 0x3000008
+#define CS40L50_STOP_PLAYBACK 0x5000000
+#define CS40L50_OWT_DELETE 0xD000000
+
+/* Firmware files */
+#define CS40L50_FW "cs40l50.wmfw"
+#define CS40L50_WT "cs40l50.bin"
+
+/* Device */
+#define CS40L50_DEVID 0x0
+#define CS40L50_REVID 0x4
+#define CS40L50_DEVID_A 0x40A50
+#define CS40L50_REVID_B0 0xB0
+
+struct cs40l50 {
+ struct device *dev;
+ struct regmap *regmap;
+ struct mutex lock;
+ struct cs_dsp dsp;
+ struct gpio_desc *reset_gpio;
+ struct regmap_irq_chip_data *irq_data;
+ const struct firmware *fw;
+ const struct firmware *bin;
+ struct cs_dsp_wseq wseqs[CS40L50_NUM_WSEQS];
+ int irq;
+ u32 devid;
+ u32 revid;
+};
+
+int cs40l50_dsp_write(struct device *dev, struct regmap *regmap, u32 val);
+int cs40l50_probe(struct cs40l50 *cs40l50);
+int cs40l50_remove(struct cs40l50 *cs40l50);
+
+extern const struct regmap_config cs40l50_regmap;
+extern const struct dev_pm_ops cs40l50_pm_ops;
+
+#endif /* __MFD_CS40L50_H__ */
diff --git a/include/linux/mfd/idt8a340_reg.h b/include/linux/mfd/idt8a340_reg.h
index 0c706085c205..53a222605526 100644
--- a/include/linux/mfd/idt8a340_reg.h
+++ b/include/linux/mfd/idt8a340_reg.h
@@ -61,7 +61,7 @@
#define HW_Q8_CTRL_SPARE (0xa7d4)
#define HW_Q11_CTRL_SPARE (0xa7ec)
-/**
+/*
* Select FOD5 as sync_trigger for Q8 divider.
* Transition from logic zero to one
* sets trigger to sync Q8 divider.
@@ -70,7 +70,7 @@
*/
#define Q9_TO_Q8_SYNC_TRIG BIT(1)
-/**
+/*
* Enable FOD5 as driver for clock and sync for Q8 divider.
* Enable fanout buffer for FOD5.
*
@@ -78,7 +78,7 @@
*/
#define Q9_TO_Q8_FANOUT_AND_CLOCK_SYNC_ENABLE_MASK (BIT(0) | BIT(2))
-/**
+/*
* Select FOD6 as sync_trigger for Q11 divider.
* Transition from logic zero to one
* sets trigger to sync Q11 divider.
@@ -87,7 +87,7 @@
*/
#define Q10_TO_Q11_SYNC_TRIG BIT(1)
-/**
+/*
* Enable FOD6 as driver for clock and sync for Q11 divider.
* Enable fanout buffer for FOD6.
*
diff --git a/include/linux/mfd/lm3533.h b/include/linux/mfd/lm3533.h
index 77092f6363ad..69059a7a2ce5 100644
--- a/include/linux/mfd/lm3533.h
+++ b/include/linux/mfd/lm3533.h
@@ -16,6 +16,7 @@
DEVICE_ATTR(_name, S_IRUGO | S_IWUSR , show_##_name, store_##_name)
struct device;
+struct gpio_desc;
struct regmap;
struct lm3533 {
@@ -23,7 +24,7 @@ struct lm3533 {
struct regmap *regmap;
- int gpio_hwen;
+ struct gpio_desc *hwen;
int irq;
unsigned have_als:1;
@@ -69,8 +70,6 @@ enum lm3533_boost_ovp {
};
struct lm3533_platform_data {
- int gpio_hwen;
-
enum lm3533_boost_ovp boost_ovp;
enum lm3533_boost_freq boost_freq;
diff --git a/include/linux/mfd/rohm-bd96801.h b/include/linux/mfd/rohm-bd96801.h
new file mode 100644
index 000000000000..e2d9e10b6364
--- /dev/null
+++ b/include/linux/mfd/rohm-bd96801.h
@@ -0,0 +1,215 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Copyright (C) 2024 ROHM Semiconductors */
+
+#ifndef __MFD_BD96801_H__
+#define __MFD_BD96801_H__
+
+#define BD96801_REG_SSCG_CTRL 0x09
+#define BD96801_REG_SHD_INTB 0x20
+#define BD96801_LDO5_VOL_LVL_REG 0x2c
+#define BD96801_LDO6_VOL_LVL_REG 0x2d
+#define BD96801_LDO7_VOL_LVL_REG 0x2e
+#define BD96801_REG_BUCK_OVP 0x30
+#define BD96801_REG_BUCK_OVD 0x35
+#define BD96801_REG_LDO_OVP 0x31
+#define BD96801_REG_LDO_OVD 0x36
+#define BD96801_REG_BOOT_OVERTIME 0x3a
+#define BD96801_REG_WD_TMO 0x40
+#define BD96801_REG_WD_CONF 0x41
+#define BD96801_REG_WD_FEED 0x42
+#define BD96801_REG_WD_FAILCOUNT 0x43
+#define BD96801_REG_WD_ASK 0x46
+#define BD96801_REG_WD_STATUS 0x4a
+#define BD96801_REG_PMIC_STATE 0x4f
+#define BD96801_REG_EXT_STATE 0x50
+
+#define BD96801_STATE_STBY 0x09
+
+#define BD96801_LOCK_REG 0x04
+#define BD96801_UNLOCK 0x9d
+#define BD96801_LOCK 0x00
+
+/* IRQ register area */
+#define BD96801_REG_INT_MAIN 0x51
+
+/*
+ * The BD96801 has two physical IRQ lines, INTB and ERRB.
+ *
+ * The 'main status register' is located at 0x51.
+ * The ERRB status registers are located at 0x52 ... 0x5B
+ * INTB status registers are at range 0x5c ... 0x63
+ */
+#define BD96801_REG_INT_SYS_ERRB1 0x52
+#define BD96801_REG_INT_SYS_INTB 0x5c
+#define BD96801_REG_INT_LDO7_INTB 0x63
+
+/* MASK registers */
+#define BD96801_REG_MASK_SYS_INTB 0x73
+#define BD96801_REG_MASK_SYS_ERRB 0x69
+
+#define BD96801_MAX_REGISTER 0x7a
+
+#define BD96801_OTP_ERR_MASK BIT(0)
+#define BD96801_DBIST_ERR_MASK BIT(1)
+#define BD96801_EEP_ERR_MASK BIT(2)
+#define BD96801_ABIST_ERR_MASK BIT(3)
+#define BD96801_PRSTB_ERR_MASK BIT(4)
+#define BD96801_DRMOS1_ERR_MASK BIT(5)
+#define BD96801_DRMOS2_ERR_MASK BIT(6)
+#define BD96801_SLAVE_ERR_MASK BIT(7)
+#define BD96801_VREF_ERR_MASK BIT(0)
+#define BD96801_TSD_ERR_MASK BIT(1)
+#define BD96801_UVLO_ERR_MASK BIT(2)
+#define BD96801_OVLO_ERR_MASK BIT(3)
+#define BD96801_OSC_ERR_MASK BIT(4)
+#define BD96801_PON_ERR_MASK BIT(5)
+#define BD96801_POFF_ERR_MASK BIT(6)
+#define BD96801_CMD_SHDN_ERR_MASK BIT(7)
+#define BD96801_INT_PRSTB_WDT_ERR_MASK BIT(0)
+#define BD96801_INT_CHIP_IF_ERR_MASK BIT(3)
+#define BD96801_INT_SHDN_ERR_MASK BIT(7)
+#define BD96801_OUT_PVIN_ERR_MASK BIT(0)
+#define BD96801_OUT_OVP_ERR_MASK BIT(1)
+#define BD96801_OUT_UVP_ERR_MASK BIT(2)
+#define BD96801_OUT_SHDN_ERR_MASK BIT(7)
+
+/* ERRB IRQs */
+enum {
+ /* Reg 0x52, 0x53, 0x54 - ERRB system IRQs */
+ BD96801_OTP_ERR_STAT,
+ BD96801_DBIST_ERR_STAT,
+ BD96801_EEP_ERR_STAT,
+ BD96801_ABIST_ERR_STAT,
+ BD96801_PRSTB_ERR_STAT,
+ BD96801_DRMOS1_ERR_STAT,
+ BD96801_DRMOS2_ERR_STAT,
+ BD96801_SLAVE_ERR_STAT,
+ BD96801_VREF_ERR_STAT,
+ BD96801_TSD_ERR_STAT,
+ BD96801_UVLO_ERR_STAT,
+ BD96801_OVLO_ERR_STAT,
+ BD96801_OSC_ERR_STAT,
+ BD96801_PON_ERR_STAT,
+ BD96801_POFF_ERR_STAT,
+ BD96801_CMD_SHDN_ERR_STAT,
+ BD96801_INT_PRSTB_WDT_ERR,
+ BD96801_INT_CHIP_IF_ERR,
+ BD96801_INT_SHDN_ERR_STAT,
+
+ /* Reg 0x55 BUCK1 ERR IRQs */
+ BD96801_BUCK1_PVIN_ERR_STAT,
+ BD96801_BUCK1_OVP_ERR_STAT,
+ BD96801_BUCK1_UVP_ERR_STAT,
+ BD96801_BUCK1_SHDN_ERR_STAT,
+
+ /* Reg 0x56 BUCK2 ERR IRQs */
+ BD96801_BUCK2_PVIN_ERR_STAT,
+ BD96801_BUCK2_OVP_ERR_STAT,
+ BD96801_BUCK2_UVP_ERR_STAT,
+ BD96801_BUCK2_SHDN_ERR_STAT,
+
+ /* Reg 0x57 BUCK3 ERR IRQs */
+ BD96801_BUCK3_PVIN_ERR_STAT,
+ BD96801_BUCK3_OVP_ERR_STAT,
+ BD96801_BUCK3_UVP_ERR_STAT,
+ BD96801_BUCK3_SHDN_ERR_STAT,
+
+ /* Reg 0x58 BUCK4 ERR IRQs */
+ BD96801_BUCK4_PVIN_ERR_STAT,
+ BD96801_BUCK4_OVP_ERR_STAT,
+ BD96801_BUCK4_UVP_ERR_STAT,
+ BD96801_BUCK4_SHDN_ERR_STAT,
+
+ /* Reg 0x59 LDO5 ERR IRQs */
+ BD96801_LDO5_PVIN_ERR_STAT,
+ BD96801_LDO5_OVP_ERR_STAT,
+ BD96801_LDO5_UVP_ERR_STAT,
+ BD96801_LDO5_SHDN_ERR_STAT,
+
+ /* Reg 0x5a LDO6 ERR IRQs */
+ BD96801_LDO6_PVIN_ERR_STAT,
+ BD96801_LDO6_OVP_ERR_STAT,
+ BD96801_LDO6_UVP_ERR_STAT,
+ BD96801_LDO6_SHDN_ERR_STAT,
+
+ /* Reg 0x5b LDO7 ERR IRQs */
+ BD96801_LDO7_PVIN_ERR_STAT,
+ BD96801_LDO7_OVP_ERR_STAT,
+ BD96801_LDO7_UVP_ERR_STAT,
+ BD96801_LDO7_SHDN_ERR_STAT,
+};
+
+/* INTB IRQs */
+enum {
+ /* Reg 0x5c (System INTB) */
+ BD96801_TW_STAT,
+ BD96801_WDT_ERR_STAT,
+ BD96801_I2C_ERR_STAT,
+ BD96801_CHIP_IF_ERR_STAT,
+
+ /* Reg 0x5d (BUCK1 INTB) */
+ BD96801_BUCK1_OCPH_STAT,
+ BD96801_BUCK1_OCPL_STAT,
+ BD96801_BUCK1_OCPN_STAT,
+ BD96801_BUCK1_OVD_STAT,
+ BD96801_BUCK1_UVD_STAT,
+ BD96801_BUCK1_TW_CH_STAT,
+
+ /* Reg 0x5e (BUCK2 INTB) */
+ BD96801_BUCK2_OCPH_STAT,
+ BD96801_BUCK2_OCPL_STAT,
+ BD96801_BUCK2_OCPN_STAT,
+ BD96801_BUCK2_OVD_STAT,
+ BD96801_BUCK2_UVD_STAT,
+ BD96801_BUCK2_TW_CH_STAT,
+
+ /* Reg 0x5f (BUCK3 INTB)*/
+ BD96801_BUCK3_OCPH_STAT,
+ BD96801_BUCK3_OCPL_STAT,
+ BD96801_BUCK3_OCPN_STAT,
+ BD96801_BUCK3_OVD_STAT,
+ BD96801_BUCK3_UVD_STAT,
+ BD96801_BUCK3_TW_CH_STAT,
+
+ /* Reg 0x60 (BUCK4 INTB)*/
+ BD96801_BUCK4_OCPH_STAT,
+ BD96801_BUCK4_OCPL_STAT,
+ BD96801_BUCK4_OCPN_STAT,
+ BD96801_BUCK4_OVD_STAT,
+ BD96801_BUCK4_UVD_STAT,
+ BD96801_BUCK4_TW_CH_STAT,
+
+ /* Reg 0x61 (LDO5 INTB) */
+ BD96801_LDO5_OCPH_STAT, /* bit [0] */
+ BD96801_LDO5_OVD_STAT, /* bit [3] */
+ BD96801_LDO5_UVD_STAT, /* bit [4] */
+
+ /* Reg 0x62 (LDO6 INTB) */
+ BD96801_LDO6_OCPH_STAT, /* bit [0] */
+ BD96801_LDO6_OVD_STAT, /* bit [3] */
+ BD96801_LDO6_UVD_STAT, /* bit [4] */
+
+ /* Reg 0x63 (LDO7 INTB) */
+ BD96801_LDO7_OCPH_STAT, /* bit [0] */
+ BD96801_LDO7_OVD_STAT, /* bit [3] */
+ BD96801_LDO7_UVD_STAT, /* bit [4] */
+};
+
+/* IRQ MASKs */
+#define BD96801_TW_STAT_MASK BIT(0)
+#define BD96801_WDT_ERR_STAT_MASK BIT(1)
+#define BD96801_I2C_ERR_STAT_MASK BIT(2)
+#define BD96801_CHIP_IF_ERR_STAT_MASK BIT(3)
+
+#define BD96801_BUCK_OCPH_STAT_MASK BIT(0)
+#define BD96801_BUCK_OCPL_STAT_MASK BIT(1)
+#define BD96801_BUCK_OCPN_STAT_MASK BIT(2)
+#define BD96801_BUCK_OVD_STAT_MASK BIT(3)
+#define BD96801_BUCK_UVD_STAT_MASK BIT(4)
+#define BD96801_BUCK_TW_CH_STAT_MASK BIT(5)
+
+#define BD96801_LDO_OCPH_STAT_MASK BIT(0)
+#define BD96801_LDO_OVD_STAT_MASK BIT(3)
+#define BD96801_LDO_UVD_STAT_MASK BIT(4)
+
+#endif
diff --git a/include/linux/mfd/rohm-generic.h b/include/linux/mfd/rohm-generic.h
index 4eeb22876bad..e7d4e6afe388 100644
--- a/include/linux/mfd/rohm-generic.h
+++ b/include/linux/mfd/rohm-generic.h
@@ -16,6 +16,7 @@ enum rohm_chip_type {
ROHM_CHIP_TYPE_BD71828,
ROHM_CHIP_TYPE_BD71837,
ROHM_CHIP_TYPE_BD71847,
+ ROHM_CHIP_TYPE_BD96801,
ROHM_CHIP_TYPE_AMOUNT
};
diff --git a/include/linux/mfd/stm32-timers.h b/include/linux/mfd/stm32-timers.h
index 9eb17481b07f..f09ba598c97a 100644
--- a/include/linux/mfd/stm32-timers.h
+++ b/include/linux/mfd/stm32-timers.h
@@ -12,97 +12,106 @@
#include <linux/dma-mapping.h>
#include <linux/regmap.h>
-#define TIM_CR1 0x00 /* Control Register 1 */
-#define TIM_CR2 0x04 /* Control Register 2 */
-#define TIM_SMCR 0x08 /* Slave mode control reg */
-#define TIM_DIER 0x0C /* DMA/interrupt register */
-#define TIM_SR 0x10 /* Status register */
-#define TIM_EGR 0x14 /* Event Generation Reg */
-#define TIM_CCMR1 0x18 /* Capt/Comp 1 Mode Reg */
-#define TIM_CCMR2 0x1C /* Capt/Comp 2 Mode Reg */
-#define TIM_CCER 0x20 /* Capt/Comp Enable Reg */
-#define TIM_CNT 0x24 /* Counter */
-#define TIM_PSC 0x28 /* Prescaler */
-#define TIM_ARR 0x2c /* Auto-Reload Register */
-#define TIM_CCR1 0x34 /* Capt/Comp Register 1 */
-#define TIM_CCR2 0x38 /* Capt/Comp Register 2 */
-#define TIM_CCR3 0x3C /* Capt/Comp Register 3 */
-#define TIM_CCR4 0x40 /* Capt/Comp Register 4 */
-#define TIM_BDTR 0x44 /* Break and Dead-Time Reg */
-#define TIM_DCR 0x48 /* DMA control register */
-#define TIM_DMAR 0x4C /* DMA register for transfer */
-#define TIM_TISEL 0x68 /* Input Selection */
+#define TIM_CR1 0x00 /* Control Register 1 */
+#define TIM_CR2 0x04 /* Control Register 2 */
+#define TIM_SMCR 0x08 /* Slave mode control reg */
+#define TIM_DIER 0x0C /* DMA/interrupt register */
+#define TIM_SR 0x10 /* Status register */
+#define TIM_EGR 0x14 /* Event Generation Reg */
+#define TIM_CCMR1 0x18 /* Capt/Comp 1 Mode Reg */
+#define TIM_CCMR2 0x1C /* Capt/Comp 2 Mode Reg */
+#define TIM_CCER 0x20 /* Capt/Comp Enable Reg */
+#define TIM_CNT 0x24 /* Counter */
+#define TIM_PSC 0x28 /* Prescaler */
+#define TIM_ARR 0x2c /* Auto-Reload Register */
+#define TIM_CCRx(x) (0x34 + 4 * ((x) - 1)) /* Capt/Comp Register x (x ∈ {1, .. 4}) */
+#define TIM_CCR1 TIM_CCRx(1) /* Capt/Comp Register 1 */
+#define TIM_CCR2 TIM_CCRx(2) /* Capt/Comp Register 2 */
+#define TIM_CCR3 TIM_CCRx(3) /* Capt/Comp Register 3 */
+#define TIM_CCR4 TIM_CCRx(4) /* Capt/Comp Register 4 */
+#define TIM_BDTR 0x44 /* Break and Dead-Time Reg */
+#define TIM_DCR 0x48 /* DMA control register */
+#define TIM_DMAR 0x4C /* DMA register for transfer */
+#define TIM_TISEL 0x68 /* Input Selection */
-#define TIM_CR1_CEN BIT(0) /* Counter Enable */
-#define TIM_CR1_DIR BIT(4) /* Counter Direction */
-#define TIM_CR1_ARPE BIT(7) /* Auto-reload Preload Ena */
-#define TIM_CR2_MMS (BIT(4) | BIT(5) | BIT(6)) /* Master mode selection */
-#define TIM_CR2_MMS2 GENMASK(23, 20) /* Master mode selection 2 */
-#define TIM_SMCR_SMS (BIT(0) | BIT(1) | BIT(2)) /* Slave mode selection */
-#define TIM_SMCR_TS (BIT(4) | BIT(5) | BIT(6)) /* Trigger selection */
-#define TIM_DIER_UIE BIT(0) /* Update interrupt */
-#define TIM_DIER_CC1IE BIT(1) /* CC1 Interrupt Enable */
-#define TIM_DIER_CC2IE BIT(2) /* CC2 Interrupt Enable */
-#define TIM_DIER_CC3IE BIT(3) /* CC3 Interrupt Enable */
-#define TIM_DIER_CC4IE BIT(4) /* CC4 Interrupt Enable */
-#define TIM_DIER_CC_IE(x) BIT((x) + 1) /* CC1, CC2, CC3, CC4 interrupt enable */
-#define TIM_DIER_UDE BIT(8) /* Update DMA request Enable */
-#define TIM_DIER_CC1DE BIT(9) /* CC1 DMA request Enable */
-#define TIM_DIER_CC2DE BIT(10) /* CC2 DMA request Enable */
-#define TIM_DIER_CC3DE BIT(11) /* CC3 DMA request Enable */
-#define TIM_DIER_CC4DE BIT(12) /* CC4 DMA request Enable */
-#define TIM_DIER_COMDE BIT(13) /* COM DMA request Enable */
-#define TIM_DIER_TDE BIT(14) /* Trigger DMA request Enable */
-#define TIM_SR_UIF BIT(0) /* Update interrupt flag */
-#define TIM_SR_CC_IF(x) BIT((x) + 1) /* CC1, CC2, CC3, CC4 interrupt flag */
-#define TIM_EGR_UG BIT(0) /* Update Generation */
-#define TIM_CCMR_PE BIT(3) /* Channel Preload Enable */
-#define TIM_CCMR_M1 (BIT(6) | BIT(5)) /* Channel PWM Mode 1 */
-#define TIM_CCMR_CC1S (BIT(0) | BIT(1)) /* Capture/compare 1 sel */
-#define TIM_CCMR_IC1PSC GENMASK(3, 2) /* Input capture 1 prescaler */
-#define TIM_CCMR_CC2S (BIT(8) | BIT(9)) /* Capture/compare 2 sel */
-#define TIM_CCMR_IC2PSC GENMASK(11, 10) /* Input capture 2 prescaler */
-#define TIM_CCMR_CC1S_TI1 BIT(0) /* IC1/IC3 selects TI1/TI3 */
-#define TIM_CCMR_CC1S_TI2 BIT(1) /* IC1/IC3 selects TI2/TI4 */
-#define TIM_CCMR_CC2S_TI2 BIT(8) /* IC2/IC4 selects TI2/TI4 */
-#define TIM_CCMR_CC2S_TI1 BIT(9) /* IC2/IC4 selects TI1/TI3 */
-#define TIM_CCMR_CC3S (BIT(0) | BIT(1)) /* Capture/compare 3 sel */
-#define TIM_CCMR_CC4S (BIT(8) | BIT(9)) /* Capture/compare 4 sel */
-#define TIM_CCMR_CC3S_TI3 BIT(0) /* IC3 selects TI3 */
-#define TIM_CCMR_CC4S_TI4 BIT(8) /* IC4 selects TI4 */
-#define TIM_CCER_CC1E BIT(0) /* Capt/Comp 1 out Ena */
-#define TIM_CCER_CC1P BIT(1) /* Capt/Comp 1 Polarity */
-#define TIM_CCER_CC1NE BIT(2) /* Capt/Comp 1N out Ena */
-#define TIM_CCER_CC1NP BIT(3) /* Capt/Comp 1N Polarity */
-#define TIM_CCER_CC2E BIT(4) /* Capt/Comp 2 out Ena */
-#define TIM_CCER_CC2P BIT(5) /* Capt/Comp 2 Polarity */
-#define TIM_CCER_CC2NP BIT(7) /* Capt/Comp 2N Polarity */
-#define TIM_CCER_CC3E BIT(8) /* Capt/Comp 3 out Ena */
-#define TIM_CCER_CC3P BIT(9) /* Capt/Comp 3 Polarity */
-#define TIM_CCER_CC3NP BIT(11) /* Capt/Comp 3N Polarity */
-#define TIM_CCER_CC4E BIT(12) /* Capt/Comp 4 out Ena */
-#define TIM_CCER_CC4P BIT(13) /* Capt/Comp 4 Polarity */
-#define TIM_CCER_CC4NP BIT(15) /* Capt/Comp 4N Polarity */
-#define TIM_CCER_CCXE (BIT(0) | BIT(4) | BIT(8) | BIT(12))
-#define TIM_BDTR_BKE(x) BIT(12 + (x) * 12) /* Break input enable */
-#define TIM_BDTR_BKP(x) BIT(13 + (x) * 12) /* Break input polarity */
-#define TIM_BDTR_AOE BIT(14) /* Automatic Output Enable */
-#define TIM_BDTR_MOE BIT(15) /* Main Output Enable */
-#define TIM_BDTR_BKF(x) (0xf << (16 + (x) * 4))
-#define TIM_DCR_DBA GENMASK(4, 0) /* DMA base addr */
-#define TIM_DCR_DBL GENMASK(12, 8) /* DMA burst len */
+#define TIM_CR1_CEN BIT(0) /* Counter Enable */
+#define TIM_CR1_DIR BIT(4) /* Counter Direction */
+#define TIM_CR1_ARPE BIT(7) /* Auto-reload Preload Ena */
+#define TIM_CR2_MMS (BIT(4) | BIT(5) | BIT(6)) /* Master mode selection */
+#define TIM_CR2_MMS2 GENMASK(23, 20) /* Master mode selection 2 */
+#define TIM_SMCR_SMS (BIT(0) | BIT(1) | BIT(2)) /* Slave mode selection */
+#define TIM_SMCR_TS (BIT(4) | BIT(5) | BIT(6)) /* Trigger selection */
+#define TIM_DIER_UIE BIT(0) /* Update interrupt */
+#define TIM_DIER_CCxIE(x) BIT(1 + ((x) - 1)) /* CCx Interrupt Enable (x ∈ {1, .. 4}) */
+#define TIM_DIER_CC1IE TIM_DIER_CCxIE(1) /* CC1 Interrupt Enable */
+#define TIM_DIER_CC2IE TIM_DIER_CCxIE(2) /* CC2 Interrupt Enable */
+#define TIM_DIER_CC3IE TIM_DIER_CCxIE(3) /* CC3 Interrupt Enable */
+#define TIM_DIER_CC4IE TIM_DIER_CCxIE(4) /* CC4 Interrupt Enable */
+#define TIM_DIER_UDE BIT(8) /* Update DMA request Enable */
+#define TIM_DIER_CCxDE(x) BIT(9 + ((x) - 1)) /* CCx DMA request Enable (x ∈ {1, .. 4}) */
+#define TIM_DIER_CC1DE TIM_DIER_CCxDE(1) /* CC1 DMA request Enable */
+#define TIM_DIER_CC2DE TIM_DIER_CCxDE(2) /* CC2 DMA request Enable */
+#define TIM_DIER_CC3DE TIM_DIER_CCxDE(3) /* CC3 DMA request Enable */
+#define TIM_DIER_CC4DE TIM_DIER_CCxDE(4) /* CC4 DMA request Enable */
+#define TIM_DIER_COMDE BIT(13) /* COM DMA request Enable */
+#define TIM_DIER_TDE BIT(14) /* Trigger DMA request Enable */
+#define TIM_SR_UIF BIT(0) /* Update interrupt flag */
+#define TIM_SR_CC_IF(x) BIT((x) + 1) /* CC1, CC2, CC3, CC4 interrupt flag */
+#define TIM_EGR_UG BIT(0) /* Update Generation */
+#define TIM_CCMR_PE BIT(3) /* Channel Preload Enable */
+#define TIM_CCMR_M1 (BIT(6) | BIT(5)) /* Channel PWM Mode 1 */
+#define TIM_CCMR_CC1S (BIT(0) | BIT(1)) /* Capture/compare 1 sel */
+#define TIM_CCMR_IC1PSC GENMASK(3, 2) /* Input capture 1 prescaler */
+#define TIM_CCMR_CC2S (BIT(8) | BIT(9)) /* Capture/compare 2 sel */
+#define TIM_CCMR_IC2PSC GENMASK(11, 10) /* Input capture 2 prescaler */
+#define TIM_CCMR_CC1S_TI1 BIT(0) /* IC1/IC3 selects TI1/TI3 */
+#define TIM_CCMR_CC1S_TI2 BIT(1) /* IC1/IC3 selects TI2/TI4 */
+#define TIM_CCMR_CC2S_TI2 BIT(8) /* IC2/IC4 selects TI2/TI4 */
+#define TIM_CCMR_CC2S_TI1 BIT(9) /* IC2/IC4 selects TI1/TI3 */
+#define TIM_CCMR_CC3S (BIT(0) | BIT(1)) /* Capture/compare 3 sel */
+#define TIM_CCMR_CC4S (BIT(8) | BIT(9)) /* Capture/compare 4 sel */
+#define TIM_CCMR_CC3S_TI3 BIT(0) /* IC3 selects TI3 */
+#define TIM_CCMR_CC4S_TI4 BIT(8) /* IC4 selects TI4 */
+#define TIM_CCER_CCxE(x) BIT(0 + 4 * ((x) - 1)) /* Capt/Comp x out Ena (x ∈ {1, .. 4}) */
+#define TIM_CCER_CCxP(x) BIT(1 + 4 * ((x) - 1)) /* Capt/Comp x Polarity (x ∈ {1, .. 4}) */
+#define TIM_CCER_CCxNE(x) BIT(2 + 4 * ((x) - 1)) /* Capt/Comp xN out Ena (x ∈ {1, .. 4}) */
+#define TIM_CCER_CCxNP(x) BIT(3 + 4 * ((x) - 1)) /* Capt/Comp xN Polarity (x ∈ {1, .. 4}) */
+#define TIM_CCER_CC1E TIM_CCER_CCxE(1) /* Capt/Comp 1 out Ena */
+#define TIM_CCER_CC1P TIM_CCER_CCxP(1) /* Capt/Comp 1 Polarity */
+#define TIM_CCER_CC1NE TIM_CCER_CCxNE(1) /* Capt/Comp 1N out Ena */
+#define TIM_CCER_CC1NP TIM_CCER_CCxNP(1) /* Capt/Comp 1N Polarity */
+#define TIM_CCER_CC2E TIM_CCER_CCxE(2) /* Capt/Comp 2 out Ena */
+#define TIM_CCER_CC2P TIM_CCER_CCxP(2) /* Capt/Comp 2 Polarity */
+#define TIM_CCER_CC2NE TIM_CCER_CCxNE(2) /* Capt/Comp 2N out Ena */
+#define TIM_CCER_CC2NP TIM_CCER_CCxNP(2) /* Capt/Comp 2N Polarity */
+#define TIM_CCER_CC3E TIM_CCER_CCxE(3) /* Capt/Comp 3 out Ena */
+#define TIM_CCER_CC3P TIM_CCER_CCxP(3) /* Capt/Comp 3 Polarity */
+#define TIM_CCER_CC3NE TIM_CCER_CCxNE(3) /* Capt/Comp 3N out Ena */
+#define TIM_CCER_CC3NP TIM_CCER_CCxNP(3) /* Capt/Comp 3N Polarity */
+#define TIM_CCER_CC4E TIM_CCER_CCxE(4) /* Capt/Comp 4 out Ena */
+#define TIM_CCER_CC4P TIM_CCER_CCxP(4) /* Capt/Comp 4 Polarity */
+#define TIM_CCER_CC4NE TIM_CCER_CCxNE(4) /* Capt/Comp 4N out Ena */
+#define TIM_CCER_CC4NP TIM_CCER_CCxNP(4) /* Capt/Comp 4N Polarity */
+#define TIM_CCER_CCXE (BIT(0) | BIT(4) | BIT(8) | BIT(12))
+#define TIM_BDTR_BKE(x) BIT(12 + (x) * 12) /* Break input enable */
+#define TIM_BDTR_BKP(x) BIT(13 + (x) * 12) /* Break input polarity */
+#define TIM_BDTR_AOE BIT(14) /* Automatic Output Enable */
+#define TIM_BDTR_MOE BIT(15) /* Main Output Enable */
+#define TIM_BDTR_BKF(x) (0xf << (16 + (x) * 4))
+#define TIM_DCR_DBA GENMASK(4, 0) /* DMA base addr */
+#define TIM_DCR_DBL GENMASK(12, 8) /* DMA burst len */
-#define MAX_TIM_PSC 0xFFFF
-#define MAX_TIM_ICPSC 0x3
-#define TIM_CR2_MMS_SHIFT 4
-#define TIM_CR2_MMS2_SHIFT 20
+#define MAX_TIM_PSC 0xFFFF
+#define MAX_TIM_ICPSC 0x3
+#define TIM_CR2_MMS_SHIFT 4
+#define TIM_CR2_MMS2_SHIFT 20
#define TIM_SMCR_SMS_SLAVE_MODE_DISABLED 0 /* counts on internal clock when CEN=1 */
#define TIM_SMCR_SMS_ENCODER_MODE_1 1 /* counts TI1FP1 edges, depending on TI2FP2 level */
#define TIM_SMCR_SMS_ENCODER_MODE_2 2 /* counts TI2FP2 edges, depending on TI1FP1 level */
#define TIM_SMCR_SMS_ENCODER_MODE_3 3 /* counts on both TI1FP1 and TI2FP2 edges */
-#define TIM_SMCR_TS_SHIFT 4
-#define TIM_BDTR_BKF_MASK 0xF
-#define TIM_BDTR_BKF_SHIFT(x) (16 + (x) * 4)
+#define TIM_SMCR_TS_SHIFT 4
+#define TIM_BDTR_BKF_MASK 0xF
+#define TIM_BDTR_BKF_SHIFT(x) (16 + (x) * 4)
enum stm32_timers_dmas {
STM32_TIMERS_DMA_CH1,
diff --git a/include/linux/mfd/syscon.h b/include/linux/mfd/syscon.h
index c315903f6dab..aad9c6b50463 100644
--- a/include/linux/mfd/syscon.h
+++ b/include/linux/mfd/syscon.h
@@ -28,6 +28,8 @@ struct regmap *syscon_regmap_lookup_by_phandle_args(struct device_node *np,
unsigned int *out_args);
struct regmap *syscon_regmap_lookup_by_phandle_optional(struct device_node *np,
const char *property);
+int of_syscon_register_regmap(struct device_node *np,
+ struct regmap *regmap);
#else
static inline struct regmap *device_node_to_regmap(struct device_node *np)
{
@@ -67,6 +69,12 @@ static inline struct regmap *syscon_regmap_lookup_by_phandle_optional(
return NULL;
}
+static inline int of_syscon_register_regmap(struct device_node *np,
+ struct regmap *regmap)
+{
+ return -EOPNOTSUPP;
+}
+
#endif
#endif /* __LINUX_MFD_SYSCON_H__ */
diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h
deleted file mode 100644
index eace8ea6cda0..000000000000
--- a/include/linux/mfd/tmio.h
+++ /dev/null
@@ -1,133 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef MFD_TMIO_H
-#define MFD_TMIO_H
-
-#include <linux/device.h>
-#include <linux/fb.h>
-#include <linux/io.h>
-#include <linux/jiffies.h>
-#include <linux/mmc/card.h>
-#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
-
-#define tmio_ioread8(addr) readb(addr)
-#define tmio_ioread16(addr) readw(addr)
-#define tmio_ioread16_rep(r, b, l) readsw(r, b, l)
-#define tmio_ioread32(addr) \
- (((u32)readw((addr))) | (((u32)readw((addr) + 2)) << 16))
-
-#define tmio_iowrite8(val, addr) writeb((val), (addr))
-#define tmio_iowrite16(val, addr) writew((val), (addr))
-#define tmio_iowrite16_rep(r, b, l) writesw(r, b, l)
-#define tmio_iowrite32(val, addr) \
- do { \
- writew((val), (addr)); \
- writew((val) >> 16, (addr) + 2); \
- } while (0)
-
-#define sd_config_write8(base, shift, reg, val) \
- tmio_iowrite8((val), (base) + ((reg) << (shift)))
-#define sd_config_write16(base, shift, reg, val) \
- tmio_iowrite16((val), (base) + ((reg) << (shift)))
-#define sd_config_write32(base, shift, reg, val) \
- do { \
- tmio_iowrite16((val), (base) + ((reg) << (shift))); \
- tmio_iowrite16((val) >> 16, (base) + ((reg + 2) << (shift))); \
- } while (0)
-
-/* tmio MMC platform flags */
-/*
- * Some controllers can support a 2-byte block size when the bus width
- * is configured in 4-bit mode.
- */
-#define TMIO_MMC_BLKSZ_2BYTES BIT(1)
-/*
- * Some controllers can support SDIO IRQ signalling.
- */
-#define TMIO_MMC_SDIO_IRQ BIT(2)
-
-/* Some features are only available or tested on R-Car Gen2 or later */
-#define TMIO_MMC_MIN_RCAR2 BIT(3)
-
-/*
- * Some controllers require waiting for the SD bus to become
- * idle before writing to some registers.
- */
-#define TMIO_MMC_HAS_IDLE_WAIT BIT(4)
-
-/*
- * Use the busy timeout feature. Probably all TMIO versions support it. Yet,
- * we don't have documentation for old variants, so we enable only known good
- * variants with this flag. Can be removed once all variants are known good.
- */
-#define TMIO_MMC_USE_BUSY_TIMEOUT BIT(5)
-
-/*
- * Some controllers have CMD12 automatically
- * issue/non-issue register
- */
-#define TMIO_MMC_HAVE_CMD12_CTRL BIT(7)
-
-/* Controller has some SDIO status bits which must be 1 */
-#define TMIO_MMC_SDIO_STATUS_SETBITS BIT(8)
-
-/*
- * Some controllers have a 32-bit wide data port register
- */
-#define TMIO_MMC_32BIT_DATA_PORT BIT(9)
-
-/*
- * Some controllers allows to set SDx actual clock
- */
-#define TMIO_MMC_CLK_ACTUAL BIT(10)
-
-/* Some controllers have a CBSY bit */
-#define TMIO_MMC_HAVE_CBSY BIT(11)
-
-struct dma_chan;
-
-/*
- * data for the MMC controller
- */
-struct tmio_mmc_data {
- void *chan_priv_tx;
- void *chan_priv_rx;
- unsigned int hclk;
- unsigned long capabilities;
- unsigned long capabilities2;
- unsigned long flags;
- u32 ocr_mask; /* available voltages */
- dma_addr_t dma_rx_offset;
- unsigned int max_blk_count;
- unsigned short max_segs;
- void (*set_pwr)(struct platform_device *host, int state);
- void (*set_clk_div)(struct platform_device *host, int state);
-};
-
-/*
- * data for the NAND controller
- */
-struct tmio_nand_data {
- struct nand_bbt_descr *badblock_pattern;
- struct mtd_partition *partition;
- unsigned int num_partitions;
- const char *const *part_parsers;
-};
-
-#define FBIO_TMIO_ACC_WRITE 0x7C639300
-#define FBIO_TMIO_ACC_SYNC 0x7C639301
-
-struct tmio_fb_data {
- int (*lcd_set_power)(struct platform_device *fb_dev,
- bool on);
- int (*lcd_mode)(struct platform_device *fb_dev,
- const struct fb_videomode *mode);
- int num_modes;
- struct fb_videomode *modes;
-
- /* in mm: size of screen */
- int height;
- int width;
-};
-
-#endif
diff --git a/include/linux/mfd/tps65912.h b/include/linux/mfd/tps65912.h
index 860ec0a16c96..e5373c302722 100644
--- a/include/linux/mfd/tps65912.h
+++ b/include/linux/mfd/tps65912.h
@@ -314,6 +314,5 @@ struct tps65912 {
extern const struct regmap_config tps65912_regmap_config;
int tps65912_device_init(struct tps65912 *tps);
-void tps65912_device_exit(struct tps65912 *tps);
#endif /* __LINUX_MFD_TPS65912_H */
diff --git a/include/linux/mii_timestamper.h b/include/linux/mii_timestamper.h
index 26b04f73f214..995db62570f9 100644
--- a/include/linux/mii_timestamper.h
+++ b/include/linux/mii_timestamper.h
@@ -59,7 +59,7 @@ struct mii_timestamper {
struct phy_device *phydev);
int (*ts_info)(struct mii_timestamper *mii_ts,
- struct ethtool_ts_info *ts_info);
+ struct kernel_ethtool_ts_info *ts_info);
struct device *device;
};
diff --git a/include/linux/misc_cgroup.h b/include/linux/misc_cgroup.h
index e799b1f8d05b..49eef10c8e59 100644
--- a/include/linux/misc_cgroup.h
+++ b/include/linux/misc_cgroup.h
@@ -9,15 +9,16 @@
#define _MISC_CGROUP_H_
/**
- * Types of misc cgroup entries supported by the host.
+ * enum misc_res_type - Types of misc cgroup entries supported by the host.
*/
enum misc_res_type {
#ifdef CONFIG_KVM_AMD_SEV
- /* AMD SEV ASIDs resource */
+ /** @MISC_CG_RES_SEV: AMD SEV ASIDs resource */
MISC_CG_RES_SEV,
- /* AMD SEV-ES ASIDs resource */
+ /** @MISC_CG_RES_SEV_ES: AMD SEV-ES ASIDs resource */
MISC_CG_RES_SEV_ES,
#endif
+ /** @MISC_CG_RES_TYPES: count of enum misc_res_type constants */
MISC_CG_RES_TYPES
};
@@ -30,13 +31,16 @@ struct misc_cg;
/**
* struct misc_res: Per cgroup per misc type resource
* @max: Maximum limit on the resource.
+ * @watermark: Historical maximum usage of the resource.
* @usage: Current usage of the resource.
* @events: Number of times, the resource limit exceeded.
*/
struct misc_res {
u64 max;
+ atomic64_t watermark;
atomic64_t usage;
atomic64_t events;
+ atomic64_t events_local;
};
/**
@@ -50,6 +54,8 @@ struct misc_cg {
/* misc.events */
struct cgroup_file events_file;
+ /* misc.events.local */
+ struct cgroup_file events_local_file;
struct misc_res res[MISC_CG_RES_TYPES];
};
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index d7bb31d9a446..da09bfaa7b81 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -294,6 +294,7 @@ enum {
#define MLX5_UMR_FLEX_ALIGNMENT 0x40
#define MLX5_UMR_MTT_NUM_ENTRIES_ALIGNMENT (MLX5_UMR_FLEX_ALIGNMENT / sizeof(struct mlx5_mtt))
#define MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT (MLX5_UMR_FLEX_ALIGNMENT / sizeof(struct mlx5_klm))
+#define MLX5_UMR_KSM_NUM_ENTRIES_ALIGNMENT (MLX5_UMR_FLEX_ALIGNMENT / sizeof(struct mlx5_ksm))
#define MLX5_USER_INDEX_LEN (MLX5_FLD_SZ_BYTES(qpc, user_index) * 8)
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 779cfdf2e9d6..0d31f77396fc 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -766,6 +766,12 @@ struct mlx5_hca_cap {
u32 max[MLX5_UN_SZ_DW(hca_cap_union)];
};
+enum mlx5_wc_state {
+ MLX5_WC_STATE_UNINITIALIZED,
+ MLX5_WC_STATE_UNSUPPORTED,
+ MLX5_WC_STATE_SUPPORTED,
+};
+
struct mlx5_core_dev {
struct device *device;
enum mlx5_coredev_type coredev_type;
@@ -824,6 +830,9 @@ struct mlx5_core_dev {
#endif
u64 num_ipsec_offloads;
struct mlx5_sd *sd;
+ enum mlx5_wc_state wc_state;
+ /* sync write combining state */
+ struct mutex wc_state_lock;
};
struct mlx5_db {
@@ -1375,4 +1384,6 @@ static inline bool mlx5_is_macsec_roce_supported(struct mlx5_core_dev *mdev)
enum {
MLX5_OCTWORD = 16,
};
+
+bool mlx5_wc_support_get(struct mlx5_core_dev *mdev);
#endif /* MLX5_DRIVER_H */
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index f468763478ae..c176953edcf8 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -1093,7 +1093,8 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
u8 tunnel_stateless_ip_over_ip_tx[0x1];
u8 reserved_at_2e[0x2];
u8 max_vxlan_udp_ports[0x8];
- u8 reserved_at_38[0x6];
+ u8 swp_csum_l4_partial[0x1];
+ u8 reserved_at_39[0x5];
u8 max_geneve_opt_len[0x1];
u8 tunnel_stateless_geneve_rx[0x1];
@@ -1526,8 +1527,7 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 ts_cqe_to_dest_cqn[0x1];
u8 reserved_at_b3[0x6];
u8 go_back_n[0x1];
- u8 shampo[0x1];
- u8 reserved_at_bb[0x5];
+ u8 reserved_at_ba[0x6];
u8 max_sgl_for_optimized_performance[0x8];
u8 log_max_cq_sz[0x8];
@@ -1744,7 +1744,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_280[0x10];
u8 max_wqe_sz_sq[0x10];
- u8 reserved_at_2a0[0x10];
+ u8 reserved_at_2a0[0xb];
+ u8 shampo[0x1];
+ u8 reserved_at_2ac[0x4];
u8 max_wqe_sz_rq[0x10];
u8 max_flow_counter_31_16[0x10];
@@ -1992,7 +1994,9 @@ struct mlx5_ifc_cmd_hca_cap_2_bits {
u8 migration_tracking_state[0x1];
u8 reserved_at_ca[0x6];
u8 migration_in_chunks[0x1];
- u8 reserved_at_d1[0xf];
+ u8 reserved_at_d1[0x1];
+ u8 sf_eq_usage[0x1];
+ u8 reserved_at_d3[0xd];
u8 cross_vhca_object_to_object_supported[0x20];
@@ -2017,7 +2021,8 @@ struct mlx5_ifc_cmd_hca_cap_2_bits {
u8 reserved_at_250[0x10];
u8 reserved_at_260[0x120];
- u8 reserved_at_380[0x10];
+ u8 reserved_at_380[0xb];
+ u8 min_mkey_log_entity_size_fixed_buffer[0x5];
u8 ec_vf_vport_base[0x10];
u8 reserved_at_3a0[0x10];
@@ -2029,7 +2034,15 @@ struct mlx5_ifc_cmd_hca_cap_2_bits {
u8 pcc_ifa2[0x1];
u8 reserved_at_3f1[0xf];
- u8 reserved_at_400[0x400];
+ u8 reserved_at_400[0x1];
+ u8 min_mkey_log_entity_size_fixed_buffer_valid[0x1];
+ u8 reserved_at_402[0x1e];
+
+ u8 reserved_at_420[0x20];
+
+ u8 reserved_at_440[0x8];
+ u8 max_num_eqs_24b[0x18];
+ u8 reserved_at_460[0x3a0];
};
enum mlx5_ifc_flow_destination_type {
@@ -3908,7 +3921,7 @@ enum {
};
enum {
- ELEMENT_TYPE_CAP_MASK_TASR = 1 << 0,
+ ELEMENT_TYPE_CAP_MASK_TSAR = 1 << 0,
ELEMENT_TYPE_CAP_MASK_VPORT = 1 << 1,
ELEMENT_TYPE_CAP_MASK_VPORT_TC = 1 << 2,
ELEMENT_TYPE_CAP_MASK_PARA_VPORT_TC = 1 << 3,
@@ -5629,7 +5642,11 @@ struct mlx5_ifc_query_q_counter_out_bits {
u8 local_ack_timeout_err[0x20];
- u8 reserved_at_320[0xa0];
+ u8 reserved_at_320[0x60];
+
+ u8 req_rnr_retries_exceeded[0x20];
+
+ u8 reserved_at_3a0[0x20];
u8 resp_local_length_error[0x20];
@@ -10308,9 +10325,9 @@ struct mlx5_ifc_mcam_access_reg_bits {
u8 mfrl[0x1];
u8 regs_39_to_32[0x8];
- u8 regs_31_to_10[0x16];
+ u8 regs_31_to_11[0x15];
u8 mtmp[0x1];
- u8 regs_8_to_0[0x9];
+ u8 regs_9_to_0[0xa];
};
struct mlx5_ifc_mcam_access_reg_bits1 {
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 9849dfda44d4..eb7c96d24ac0 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -406,6 +406,11 @@ extern unsigned int kobjsize(const void *objp);
#define VM_ALLOW_ANY_UNCACHED VM_NONE
#endif
+#ifdef CONFIG_64BIT
+/* VM is sealed, in vm_flags */
+#define VM_SEALED _BITUL(63)
+#endif
+
/* Bits set in the VMA until the stack is in its final location */
#define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ | VM_STACK_EARLY)
@@ -3776,14 +3781,7 @@ DECLARE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_FREE_DEFAULT_ON, init_on_free);
static inline bool want_init_on_free(void)
{
return static_branch_maybe(CONFIG_INIT_ON_FREE_DEFAULT_ON,
- &init_on_free);
-}
-
-DECLARE_STATIC_KEY_MAYBE(CONFIG_INIT_MLOCKED_ON_FREE_DEFAULT_ON, init_mlocked_on_free);
-static inline bool want_init_mlocked_on_free(void)
-{
- return static_branch_maybe(CONFIG_INIT_MLOCKED_ON_FREE_DEFAULT_ON,
- &init_mlocked_on_free);
+ &init_on_free);
}
extern bool _debug_pagealloc_enabled_early;
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 24323c7d0bd4..af3a0256fa93 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -985,7 +985,7 @@ struct mm_struct {
* Represent how many empty pages are merged with kernel zero
* pages when enabling KSM use_zero_pages.
*/
- unsigned long ksm_zero_pages;
+ atomic_long_t ksm_zero_pages;
#endif /* CONFIG_KSM */
#ifdef CONFIG_LRU_GEN_WALKS_MMU
struct {
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 8f9c9590a42c..1dc6248feb83 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -654,13 +654,12 @@ enum zone_watermarks {
};
/*
- * One per migratetype for each PAGE_ALLOC_COSTLY_ORDER. One additional list
- * for THP which will usually be GFP_MOVABLE. Even if it is another type,
- * it should not contribute to serious fragmentation causing THP allocation
- * failures.
+ * One per migratetype for each PAGE_ALLOC_COSTLY_ORDER. Two additional lists
+ * are added for THP. One PCP list is used by GPF_MOVABLE, and the other PCP list
+ * is used by GFP_UNMOVABLE and GFP_RECLAIMABLE.
*/
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-#define NR_PCP_THP 1
+#define NR_PCP_THP 2
#else
#define NR_PCP_THP 0
#endif
@@ -1980,8 +1979,9 @@ static inline int subsection_map_index(unsigned long pfn)
static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
{
int idx = subsection_map_index(pfn);
+ struct mem_section_usage *usage = READ_ONCE(ms->usage);
- return test_bit(idx, READ_ONCE(ms->usage)->subsection_map);
+ return usage ? test_bit(idx, usage->subsection_map) : 0;
}
#else
static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
diff --git a/include/linux/module.h b/include/linux/module.h
index ffa1c603163c..4213d8993cd8 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -509,7 +509,9 @@ struct module {
#endif
#ifdef CONFIG_DEBUG_INFO_BTF_MODULES
unsigned int btf_data_size;
+ unsigned int btf_base_data_size;
void *btf_data;
+ void *btf_base_data;
#endif
#ifdef CONFIG_JUMP_LABEL
struct jump_entry *jump_entries;
@@ -931,11 +933,11 @@ int module_kallsyms_on_each_symbol(const char *modname,
* least KSYM_NAME_LEN long: a pointer to namebuf is returned if
* found, otherwise NULL.
*/
-const char *module_address_lookup(unsigned long addr,
- unsigned long *symbolsize,
- unsigned long *offset,
- char **modname, const unsigned char **modbuildid,
- char *namebuf);
+int module_address_lookup(unsigned long addr,
+ unsigned long *symbolsize,
+ unsigned long *offset,
+ char **modname, const unsigned char **modbuildid,
+ char *namebuf);
int lookup_module_symbol_name(unsigned long addr, char *symname);
int lookup_module_symbol_attrs(unsigned long addr,
unsigned long *size,
@@ -964,14 +966,14 @@ static inline int module_kallsyms_on_each_symbol(const char *modname,
}
/* For kallsyms to ask for address resolution. NULL means not found. */
-static inline const char *module_address_lookup(unsigned long addr,
+static inline int module_address_lookup(unsigned long addr,
unsigned long *symbolsize,
unsigned long *offset,
char **modname,
const unsigned char **modbuildid,
char *namebuf)
{
- return NULL;
+ return 0;
}
static inline int lookup_module_symbol_name(unsigned long addr, char *symname)
diff --git a/include/linux/namei.h b/include/linux/namei.h
index 967aa9ea9f96..8ec8fed3bce8 100644
--- a/include/linux/namei.h
+++ b/include/linux/namei.h
@@ -50,13 +50,7 @@ enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT};
extern int path_pts(struct path *path);
-extern int user_path_at_empty(int, const char __user *, unsigned, struct path *, int *empty);
-
-static inline int user_path_at(int dfd, const char __user *name, unsigned flags,
- struct path *path)
-{
- return user_path_at_empty(dfd, name, flags, path, NULL);
-}
+extern int user_path_at(int, const char __user *, unsigned, struct path *);
struct dentry *lookup_one_qstr_excl(const struct qstr *name,
struct dentry *base,
diff --git a/include/linux/net_tstamp.h b/include/linux/net_tstamp.h
index eb01c37e71e0..662074b08c94 100644
--- a/include/linux/net_tstamp.h
+++ b/include/linux/net_tstamp.h
@@ -5,7 +5,16 @@
#include <uapi/linux/net_tstamp.h>
+#define SOF_TIMESTAMPING_SOFTWARE_MASK (SOF_TIMESTAMPING_RX_SOFTWARE | \
+ SOF_TIMESTAMPING_TX_SOFTWARE | \
+ SOF_TIMESTAMPING_SOFTWARE)
+
+#define SOF_TIMESTAMPING_HARDWARE_MASK (SOF_TIMESTAMPING_RX_HARDWARE | \
+ SOF_TIMESTAMPING_TX_HARDWARE | \
+ SOF_TIMESTAMPING_RAW_HARDWARE)
+
enum hwtstamp_source {
+ HWTSTAMP_SOURCE_UNSPEC,
HWTSTAMP_SOURCE_NETDEV,
HWTSTAMP_SOURCE_PHYLIB,
};
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index d20c6c99eb88..607009150b5f 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -43,6 +43,7 @@
#include <linux/netdev_features.h>
#include <linux/neighbour.h>
+#include <linux/netdevice_xmit.h>
#include <uapi/linux/netdevice.h>
#include <uapi/linux/if_bonding.h>
#include <uapi/linux/pkt_cls.h>
@@ -79,6 +80,7 @@ struct xdp_buff;
struct xdp_frame;
struct xdp_metadata_ops;
struct xdp_md;
+struct ethtool_netdev_state;
typedef u32 xdp_features_t;
@@ -1817,7 +1819,8 @@ enum netdev_reg_state {
* @priv_flags: Like 'flags' but invisible to userspace,
* see if.h for the definitions
* @gflags: Global flags ( kept as legacy )
- * @padded: How much padding added by alloc_netdev()
+ * @priv_len: Size of the ->priv flexible array
+ * @priv: Flexible array containing private data
* @operstate: RFC2863 operstate
* @link_mode: Mapping policy to operstate
* @if_port: Selectable AUI, TP, ...
@@ -1985,8 +1988,6 @@ enum netdev_reg_state {
* switch driver and used to set the phys state of the
* switch port.
*
- * @wol_enabled: Wake-on-LAN is enabled
- *
* @threaded: napi threaded mode is enabled
*
* @net_notifier_list: List of per-net netdev notifier block
@@ -1998,6 +1999,7 @@ enum netdev_reg_state {
* @udp_tunnel_nic_info: static structure describing the UDP tunnel
* offload capabilities of the device
* @udp_tunnel_nic: UDP tunnel offload state
+ * @ethtool: ethtool related state
* @xdp_state: stores info on attached XDP BPF programs
*
* @nested_level: Used as a parameter of spin_lock_nested() of
@@ -2198,10 +2200,10 @@ struct net_device {
unsigned short neigh_priv_len;
unsigned short dev_id;
unsigned short dev_port;
- unsigned short padded;
+ int irq;
+ u32 priv_len;
spinlock_t addr_list_lock;
- int irq;
struct netdev_hw_addr_list uc;
struct netdev_hw_addr_list mc;
@@ -2372,7 +2374,6 @@ struct net_device {
struct lock_class_key *qdisc_tx_busylock;
bool proto_down;
bool threaded;
- unsigned wol_enabled:1;
struct list_head net_notifier_list;
@@ -2383,6 +2384,8 @@ struct net_device {
const struct udp_tunnel_nic_info *udp_tunnel_nic_info;
struct udp_tunnel_nic *udp_tunnel_nic;
+ struct ethtool_netdev_state *ethtool;
+
/* protected by rtnl_lock */
struct bpf_xdp_entity xdp_state[__MAX_XDP_MODE];
@@ -2401,7 +2404,13 @@ struct net_device {
/** @page_pools: page pools created for this netdevice */
struct hlist_head page_pools;
#endif
-};
+
+ /** @irq_moder: dim parameters used if IS_ENABLED(CONFIG_DIMLIB). */
+ struct dim_irq_moder *irq_moder;
+
+ u8 priv[] ____cacheline_aligned
+ __counted_by(priv_len);
+} ____cacheline_aligned;
#define to_net_dev(d) container_of(d, struct net_device, dev)
/*
@@ -2591,7 +2600,7 @@ void dev_net_set(struct net_device *dev, struct net *net)
*/
static inline void *netdev_priv(const struct net_device *dev)
{
- return (char *)dev + ALIGN(sizeof(struct net_device), NETDEV_ALIGN);
+ return (void *)dev->priv;
}
/* Set the sysfs physical device reference for the network logical device
@@ -2731,12 +2740,12 @@ struct pcpu_sw_netstats {
} __aligned(4 * sizeof(u64));
struct pcpu_dstats {
- u64 rx_packets;
- u64 rx_bytes;
- u64 rx_drops;
- u64 tx_packets;
- u64 tx_bytes;
- u64 tx_drops;
+ u64_stats_t rx_packets;
+ u64_stats_t rx_bytes;
+ u64_stats_t rx_drops;
+ u64_stats_t tx_packets;
+ u64_stats_t tx_bytes;
+ u64_stats_t tx_drops;
struct u64_stats_sync syncp;
} __aligned(8 * sizeof(u64));
@@ -3021,7 +3030,8 @@ int call_netdevice_notifiers_info(unsigned long val,
#define net_device_entry(lh) list_entry(lh, struct net_device, dev_list)
#define for_each_netdev_dump(net, d, ifindex) \
- xa_for_each_start(&(net)->dev_by_index, (ifindex), (d), (ifindex))
+ for (; (d = xa_find(&(net)->dev_by_index, &ifindex, \
+ ULONG_MAX, XA_PRESENT)); ifindex++)
static inline struct net_device *next_net_device(struct net_device *dev)
{
@@ -3121,7 +3131,6 @@ static inline void unregister_netdevice(struct net_device *dev)
int netdev_refcnt_read(const struct net_device *dev);
void free_netdev(struct net_device *dev);
-void netdev_freemem(struct net_device *dev);
void init_dummy_netdev(struct net_device *dev);
struct net_device *netdev_get_xmit_slave(struct net_device *dev,
@@ -3200,6 +3209,7 @@ static inline bool dev_has_header(const struct net_device *dev)
struct softnet_data {
struct list_head poll_list;
struct sk_buff_head process_queue;
+ local_lock_t process_queue_bh_lock;
/* stats */
unsigned int processed;
@@ -3222,13 +3232,7 @@ struct softnet_data {
struct sk_buff_head xfrm_backlog;
#endif
/* written and read only by owning cpu: */
- struct {
- u16 recursion;
- u8 more;
-#ifdef CONFIG_NET_EGRESS
- u8 skip_txqueue;
-#endif
- } xmit;
+ struct netdev_xmit xmit;
#ifdef CONFIG_RPS
/* input_queue_head should be written by cpu owning this struct,
* and only read by other cpus. Worth using a cache line.
@@ -3256,10 +3260,18 @@ struct softnet_data {
DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
+#ifndef CONFIG_PREEMPT_RT
static inline int dev_recursion_level(void)
{
return this_cpu_read(softnet_data.xmit.recursion);
}
+#else
+static inline int dev_recursion_level(void)
+{
+ return current->net_xmit.recursion;
+}
+
+#endif
void __netif_schedule(struct Qdisc *q);
void netif_schedule_queue(struct netdev_queue *txq);
@@ -3903,9 +3915,6 @@ int generic_hwtstamp_get_lower(struct net_device *dev,
int generic_hwtstamp_set_lower(struct net_device *dev,
struct kernel_hwtstamp_config *kernel_cfg,
struct netlink_ext_ack *extack);
-int dev_set_hwtstamp_phylib(struct net_device *dev,
- struct kernel_hwtstamp_config *cfg,
- struct netlink_ext_ack *extack);
int dev_ethtool(struct net *net, struct ifreq *ifr, void __user *userdata);
unsigned int dev_get_flags(const struct net_device *);
int __dev_change_flags(struct net_device *dev, unsigned int flags,
@@ -4874,18 +4883,35 @@ static inline ktime_t netdev_get_tstamp(struct net_device *dev,
return hwtstamps->hwtstamp;
}
-static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops,
- struct sk_buff *skb, struct net_device *dev,
- bool more)
+#ifndef CONFIG_PREEMPT_RT
+static inline void netdev_xmit_set_more(bool more)
{
__this_cpu_write(softnet_data.xmit.more, more);
- return ops->ndo_start_xmit(skb, dev);
}
static inline bool netdev_xmit_more(void)
{
return __this_cpu_read(softnet_data.xmit.more);
}
+#else
+static inline void netdev_xmit_set_more(bool more)
+{
+ current->net_xmit.more = more;
+}
+
+static inline bool netdev_xmit_more(void)
+{
+ return current->net_xmit.more;
+}
+#endif
+
+static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops,
+ struct sk_buff *skb, struct net_device *dev,
+ bool more)
+{
+ netdev_xmit_set_more(more);
+ return ops->ndo_start_xmit(skb, dev);
+}
static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_device *dev,
struct netdev_queue *txq, bool more)
diff --git a/include/linux/netdevice_xmit.h b/include/linux/netdevice_xmit.h
new file mode 100644
index 000000000000..38325e070296
--- /dev/null
+++ b/include/linux/netdevice_xmit.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _LINUX_NETDEVICE_XMIT_H
+#define _LINUX_NETDEVICE_XMIT_H
+
+struct netdev_xmit {
+ u16 recursion;
+ u8 more;
+#ifdef CONFIG_NET_EGRESS
+ u8 skip_txqueue;
+#endif
+};
+
+#endif
diff --git a/include/linux/netfs.h b/include/linux/netfs.h
index d2d291a9cdad..5d0288938cc2 100644
--- a/include/linux/netfs.h
+++ b/include/linux/netfs.h
@@ -68,6 +68,7 @@ struct netfs_inode {
loff_t remote_i_size; /* Size of the remote file */
loff_t zero_point; /* Size after which we assume there's no data
* on the server */
+ atomic_t io_count; /* Number of outstanding reqs */
unsigned long flags;
#define NETFS_ICTX_ODIRECT 0 /* The file has DIO in progress */
#define NETFS_ICTX_UNBUFFERED 1 /* I/O should not use the pagecache */
@@ -474,6 +475,7 @@ static inline void netfs_inode_init(struct netfs_inode *ctx,
ctx->remote_i_size = i_size_read(&ctx->inode);
ctx->zero_point = LLONG_MAX;
ctx->flags = 0;
+ atomic_set(&ctx->io_count, 0);
#if IS_ENABLED(CONFIG_FSCACHE)
ctx->cache = NULL;
#endif
@@ -517,4 +519,20 @@ static inline struct fscache_cookie *netfs_i_cookie(struct netfs_inode *ctx)
#endif
}
+/**
+ * netfs_wait_for_outstanding_io - Wait for outstanding I/O to complete
+ * @inode: The netfs inode to wait on
+ *
+ * Wait for outstanding I/O requests of any type to complete. This is intended
+ * to be called from inode eviction routines. This makes sure that any
+ * resources held by those requests are cleaned up before we let the inode get
+ * cleaned up.
+ */
+static inline void netfs_wait_for_outstanding_io(struct inode *inode)
+{
+ struct netfs_inode *ictx = netfs_inode(inode);
+
+ wait_var_event(&ictx->io_count, atomic_read(&ictx->io_count) == 0);
+}
+
#endif /* _LINUX_NETFS_H */
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index 5df7340d4dab..b332c2048c75 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -47,7 +47,6 @@ struct netlink_kernel_cfg {
unsigned int groups;
unsigned int flags;
void (*input)(struct sk_buff *skb);
- struct mutex *cb_mutex;
int (*bind)(struct net *net, int group);
void (*unbind)(struct net *net, int group);
void (*release) (struct sock *sk, unsigned long *groups);
diff --git a/include/linux/nsproxy.h b/include/linux/nsproxy.h
index 5601d14e2886..dab6a1734a22 100644
--- a/include/linux/nsproxy.h
+++ b/include/linux/nsproxy.h
@@ -42,6 +42,17 @@ struct nsproxy {
};
extern struct nsproxy init_nsproxy;
+#define to_ns_common(__ns) \
+ _Generic((__ns), \
+ struct cgroup_namespace *: &(__ns->ns), \
+ struct ipc_namespace *: &(__ns->ns), \
+ struct net *: &(__ns->ns), \
+ struct pid_namespace *: &(__ns->ns), \
+ struct mnt_namespace *: &(__ns->ns), \
+ struct time_namespace *: &(__ns->ns), \
+ struct user_namespace *: &(__ns->ns), \
+ struct uts_namespace *: &(__ns->ns))
+
/*
* A structure to encompass all bits needed to install
* a partial or complete new set of namespaces.
@@ -112,4 +123,6 @@ static inline void get_nsproxy(struct nsproxy *ns)
refcount_inc(&ns->count);
}
+DEFINE_FREE(put_nsproxy, struct nsproxy *, if (_T) put_nsproxy(_T))
+
#endif
diff --git a/include/linux/numa.h b/include/linux/numa.h
index 1d43371fafd2..eb19503604fe 100644
--- a/include/linux/numa.h
+++ b/include/linux/numa.h
@@ -15,6 +15,11 @@
#define NUMA_NO_NODE (-1)
#define NUMA_NO_MEMBLK (-1)
+static inline bool numa_valid_node(int nid)
+{
+ return nid >= 0 && nid < MAX_NUMNODES;
+}
+
/* optionally keep NUMA memory info available post init */
#ifdef CONFIG_NUMA_KEEP_MEMINFO
#define __initdata_or_meminfo
diff --git a/include/linux/nvme-fc-driver.h b/include/linux/nvme-fc-driver.h
index 4109f1bd6128..89ea1ebd975a 100644
--- a/include/linux/nvme-fc-driver.h
+++ b/include/linux/nvme-fc-driver.h
@@ -920,6 +920,9 @@ struct nvmet_fc_target_port {
* further references to hosthandle.
* Entrypoint is Mandatory if the lldd calls nvmet_fc_invalidate_host().
*
+ * @host_traddr: called by the transport to retrieve the node name and
+ * port name of the host port address.
+ *
* @max_hw_queues: indicates the maximum number of hw queues the LLDD
* supports for cpu affinitization.
* Value is Mandatory. Must be at least 1.
@@ -975,6 +978,7 @@ struct nvmet_fc_target_template {
void (*ls_abort)(struct nvmet_fc_target_port *targetport,
void *hosthandle, struct nvmefc_ls_req *lsreq);
void (*host_release)(void *hosthandle);
+ int (*host_traddr)(void *hosthandle, u64 *wwnn, u64 *wwpn);
u32 max_hw_queues;
u16 max_sgl_segments;
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index 425573202295..c12a329dd463 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -25,6 +25,9 @@
#define NVME_NSID_ALL 0xffffffff
+/* Special NSSR value, 'NVMe' */
+#define NVME_SUBSYS_RESET 0x4E564D65
+
enum nvme_subsys_type {
/* Referral to another discovery type target subsystem */
NVME_NQN_DISC = 1,
@@ -85,10 +88,11 @@ enum {
enum {
NVMF_RDMA_QPTYPE_CONNECTED = 1, /* Reliable Connected */
NVMF_RDMA_QPTYPE_DATAGRAM = 2, /* Reliable Datagram */
+ NVMF_RDMA_QPTYPE_INVALID = 0xff,
};
-/* RDMA QP Service Type codes for Discovery Log Page entry TSAS
- * RDMA_QPTYPE field
+/* RDMA Provider Type codes for Discovery Log Page entry TSAS
+ * RDMA_PRTYPE field
*/
enum {
NVMF_RDMA_PRTYPE_NOT_SPECIFIED = 1, /* No Provider Specified */
@@ -110,6 +114,7 @@ enum {
NVMF_TCP_SECTYPE_NONE = 0, /* No Security */
NVMF_TCP_SECTYPE_TLS12 = 1, /* TLSv1.2, NVMe-oF 1.1 and NVMe-TCP 3.6.1.1 */
NVMF_TCP_SECTYPE_TLS13 = 2, /* TLSv1.3, NVMe-oF 1.1 and NVMe-TCP 3.6.1.1 */
+ NVMF_TCP_SECTYPE_INVALID = 0xff,
};
#define NVME_AQ_DEPTH 32
@@ -1846,6 +1851,7 @@ enum {
/*
* Generic Command Status:
*/
+ NVME_SCT_GENERIC = 0x0,
NVME_SC_SUCCESS = 0x0,
NVME_SC_INVALID_OPCODE = 0x1,
NVME_SC_INVALID_FIELD = 0x2,
@@ -1893,6 +1899,7 @@ enum {
/*
* Command Specific Status:
*/
+ NVME_SCT_COMMAND_SPECIFIC = 0x100,
NVME_SC_CQ_INVALID = 0x100,
NVME_SC_QID_INVALID = 0x101,
NVME_SC_QUEUE_SIZE = 0x102,
@@ -1966,6 +1973,7 @@ enum {
/*
* Media and Data Integrity Errors:
*/
+ NVME_SCT_MEDIA_ERROR = 0x200,
NVME_SC_WRITE_FAULT = 0x280,
NVME_SC_READ_ERROR = 0x281,
NVME_SC_GUARD_CHECK = 0x282,
@@ -1978,6 +1986,7 @@ enum {
/*
* Path-related Errors:
*/
+ NVME_SCT_PATH = 0x300,
NVME_SC_INTERNAL_PATH_ERROR = 0x300,
NVME_SC_ANA_PERSISTENT_LOSS = 0x301,
NVME_SC_ANA_INACCESSIBLE = 0x302,
@@ -1986,11 +1995,17 @@ enum {
NVME_SC_HOST_PATH_ERROR = 0x370,
NVME_SC_HOST_ABORTED_CMD = 0x371,
- NVME_SC_CRD = 0x1800,
- NVME_SC_MORE = 0x2000,
- NVME_SC_DNR = 0x4000,
+ NVME_SC_MASK = 0x00ff, /* Status Code */
+ NVME_SCT_MASK = 0x0700, /* Status Code Type */
+ NVME_SCT_SC_MASK = NVME_SCT_MASK | NVME_SC_MASK,
+
+ NVME_STATUS_CRD = 0x1800, /* Command Retry Delayed */
+ NVME_STATUS_MORE = 0x2000,
+ NVME_STATUS_DNR = 0x4000, /* Do Not Retry */
};
+#define NVME_SCT(status) ((status) >> 8 & 7)
+
struct nvme_completion {
/*
* Used by Admin and Fabrics commands to return data:
diff --git a/include/linux/objagg.h b/include/linux/objagg.h
index 78021777df46..6df5b887dc54 100644
--- a/include/linux/objagg.h
+++ b/include/linux/objagg.h
@@ -8,7 +8,6 @@ struct objagg_ops {
size_t obj_size;
bool (*delta_check)(void *priv, const void *parent_obj,
const void *obj);
- int (*hints_obj_cmp)(const void *obj1, const void *obj2);
void * (*delta_create)(void *priv, void *parent_obj, void *obj);
void (*delta_destroy)(void *priv, void *delta_priv);
void * (*root_create)(void *priv, void *obj, unsigned int root_id);
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 104078afe0b1..b9e914e1face 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -944,15 +944,18 @@ PAGEFLAG_FALSE(HasHWPoisoned, has_hwpoisoned)
* mistaken for a page type value.
*/
-#define PAGE_TYPE_BASE 0xf0000000
-/* Reserve 0x0000007f to catch underflows of _mapcount */
-#define PAGE_MAPCOUNT_RESERVE -128
-#define PG_buddy 0x00000080
-#define PG_offline 0x00000100
-#define PG_table 0x00000200
-#define PG_guard 0x00000400
-#define PG_hugetlb 0x00000800
-#define PG_slab 0x00001000
+enum pagetype {
+ PG_buddy = 0x00000080,
+ PG_offline = 0x00000100,
+ PG_table = 0x00000200,
+ PG_guard = 0x00000400,
+ PG_hugetlb = 0x00000800,
+ PG_slab = 0x00001000,
+
+ PAGE_TYPE_BASE = 0xf0000000,
+ /* Reserve 0x0000007f to catch underflows of _mapcount */
+ PAGE_MAPCOUNT_RESERVE = -128,
+};
#define PageType(page, flag) \
((page->page_type & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE)
diff --git a/include/linux/page_ref.h b/include/linux/page_ref.h
index 1acf5bac7f50..8c236c651d1d 100644
--- a/include/linux/page_ref.h
+++ b/include/linux/page_ref.h
@@ -230,7 +230,13 @@ static inline int folio_ref_dec_return(struct folio *folio)
static inline bool page_ref_add_unless(struct page *page, int nr, int u)
{
- bool ret = atomic_add_unless(&page->_refcount, nr, u);
+ bool ret = false;
+
+ rcu_read_lock();
+ /* avoid writing to the vmemmap area being remapped */
+ if (!page_is_fake_head(page) && page_ref_count(page) != u)
+ ret = atomic_add_unless(&page->_refcount, nr, u);
+ rcu_read_unlock();
if (page_ref_tracepoint_active(page_ref_mod_unless))
__page_ref_mod_unless(page, nr, ret);
@@ -258,54 +264,9 @@ static inline bool folio_try_get(struct folio *folio)
return folio_ref_add_unless(folio, 1, 0);
}
-static inline bool folio_ref_try_add_rcu(struct folio *folio, int count)
-{
-#ifdef CONFIG_TINY_RCU
- /*
- * The caller guarantees the folio will not be freed from interrupt
- * context, so (on !SMP) we only need preemption to be disabled
- * and TINY_RCU does that for us.
- */
-# ifdef CONFIG_PREEMPT_COUNT
- VM_BUG_ON(!in_atomic() && !irqs_disabled());
-# endif
- VM_BUG_ON_FOLIO(folio_ref_count(folio) == 0, folio);
- folio_ref_add(folio, count);
-#else
- if (unlikely(!folio_ref_add_unless(folio, count, 0))) {
- /* Either the folio has been freed, or will be freed. */
- return false;
- }
-#endif
- return true;
-}
-
-/**
- * folio_try_get_rcu - Attempt to increase the refcount on a folio.
- * @folio: The folio.
- *
- * This is a version of folio_try_get() optimised for non-SMP kernels.
- * If you are still holding the rcu_read_lock() after looking up the
- * page and know that the page cannot have its refcount decreased to
- * zero in interrupt context, you can use this instead of folio_try_get().
- *
- * Example users include get_user_pages_fast() (as pages are not unmapped
- * from interrupt context) and the page cache lookups (as pages are not
- * truncated from interrupt context). We also know that pages are not
- * frozen in interrupt context for the purposes of splitting or migration.
- *
- * You can also use this function if you're holding a lock that prevents
- * pages being frozen & removed; eg the i_pages lock for the page cache
- * or the mmap_lock or page table lock for page tables. In this case,
- * it will always succeed, and you could have used a plain folio_get(),
- * but it's sometimes more convenient to have a common function called
- * from both locked and RCU-protected contexts.
- *
- * Return: True if the reference count was successfully incremented.
- */
-static inline bool folio_try_get_rcu(struct folio *folio)
+static inline bool folio_ref_try_add(struct folio *folio, int count)
{
- return folio_ref_try_add_rcu(folio, 1);
+ return folio_ref_add_unless(folio, count, 0);
}
static inline int page_ref_freeze(struct page *page, int count)
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 3d69589c00a4..a0a026d2d244 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -346,6 +346,26 @@ static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
m->gfp_mask = mask;
}
+/*
+ * There are some parts of the kernel which assume that PMD entries
+ * are exactly HPAGE_PMD_ORDER. Those should be fixed, but until then,
+ * limit the maximum allocation order to PMD size. I'm not aware of any
+ * assumptions about maximum order if THP are disabled, but 8 seems like
+ * a good order (that's 1MB if you're using 4kB pages)
+ */
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#define PREFERRED_MAX_PAGECACHE_ORDER HPAGE_PMD_ORDER
+#else
+#define PREFERRED_MAX_PAGECACHE_ORDER 8
+#endif
+
+/*
+ * xas_split_alloc() does not support arbitrary orders. This implies no
+ * 512MB THP on ARM64 with 64KB base page size.
+ */
+#define MAX_XAS_ORDER (XA_CHUNK_SHIFT * 2 - 1)
+#define MAX_PAGECACHE_ORDER min(MAX_XAS_ORDER, PREFERRED_MAX_PAGECACHE_ORDER)
+
/**
* mapping_set_large_folios() - Indicate the file supports large folios.
* @mapping: The file.
@@ -368,10 +388,22 @@ static inline void mapping_set_large_folios(struct address_space *mapping)
*/
static inline bool mapping_large_folio_support(struct address_space *mapping)
{
+ /* AS_LARGE_FOLIO_SUPPORT is only reasonable for pagecache folios */
+ VM_WARN_ONCE((unsigned long)mapping & PAGE_MAPPING_ANON,
+ "Anonymous mapping always supports large folio");
+
return IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
test_bit(AS_LARGE_FOLIO_SUPPORT, &mapping->flags);
}
+/* Return the maximum folio size for this pagecache mapping, in bytes. */
+static inline size_t mapping_max_folio_size(struct address_space *mapping)
+{
+ if (mapping_large_folio_support(mapping))
+ return PAGE_SIZE << MAX_PAGECACHE_ORDER;
+ return PAGE_SIZE;
+}
+
static inline int filemap_nr_thps(struct address_space *mapping)
{
#ifdef CONFIG_READ_ONLY_THP_FOR_FS
@@ -530,19 +562,6 @@ static inline void *detach_page_private(struct page *page)
return folio_detach_private(page_folio(page));
}
-/*
- * There are some parts of the kernel which assume that PMD entries
- * are exactly HPAGE_PMD_ORDER. Those should be fixed, but until then,
- * limit the maximum allocation order to PMD size. I'm not aware of any
- * assumptions about maximum order if THP are disabled, but 8 seems like
- * a good order (that's 1MB if you're using 4kB pages)
- */
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-#define MAX_PAGECACHE_ORDER HPAGE_PMD_ORDER
-#else
-#define MAX_PAGECACHE_ORDER 8
-#endif
-
#ifdef CONFIG_NUMA
struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order);
#else
diff --git a/include/linux/path.h b/include/linux/path.h
index 475225a03d0d..ca073e70decd 100644
--- a/include/linux/path.h
+++ b/include/linux/path.h
@@ -24,4 +24,13 @@ static inline void path_put_init(struct path *path)
*path = (struct path) { };
}
+/*
+ * Cleanup macro for use with __free(path_put). Avoids dereference and
+ * copying @path unlike DEFINE_FREE(). path_put() will handle the empty
+ * path correctly just ensure @path is initialized:
+ *
+ * struct path path __free(path_put) = {};
+ */
+#define __free_path_put path_put
+
#endif /* _LINUX_PATH_H */
diff --git a/include/linux/pci-pwrctl.h b/include/linux/pci-pwrctl.h
new file mode 100644
index 000000000000..45e9cfe740e4
--- /dev/null
+++ b/include/linux/pci-pwrctl.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2024 Linaro Ltd.
+ */
+
+#ifndef __PCI_PWRCTL_H__
+#define __PCI_PWRCTL_H__
+
+#include <linux/notifier.h>
+
+struct device;
+struct device_link;
+
+/*
+ * This is a simple framework for solving the issue of PCI devices that require
+ * certain resources (regulators, GPIOs, clocks) to be enabled before the
+ * device can actually be detected on the PCI bus.
+ *
+ * The idea is to reuse the platform bus to populate OF nodes describing the
+ * PCI device and its resources, let these platform devices probe and enable
+ * relevant resources and then trigger a rescan of the PCI bus allowing for the
+ * same device (with a second associated struct device) to be registered with
+ * the PCI subsystem.
+ *
+ * To preserve a correct hierarchy for PCI power management and device reset,
+ * we create a device link between the power control platform device (parent)
+ * and the supplied PCI device (child).
+ */
+
+/**
+ * struct pci_pwrctl - PCI device power control context.
+ * @dev: Address of the power controlling device.
+ *
+ * An object of this type must be allocated by the PCI power control device and
+ * passed to the pwrctl subsystem to trigger a bus rescan and setup a device
+ * link with the device once it's up.
+ */
+struct pci_pwrctl {
+ struct device *dev;
+
+ /* Private: don't use. */
+ struct notifier_block nb;
+ struct device_link *link;
+};
+
+int pci_pwrctl_device_set_ready(struct pci_pwrctl *pwrctl);
+void pci_pwrctl_device_unset_ready(struct pci_pwrctl *pwrctl);
+int devm_pci_pwrctl_device_set_ready(struct device *dev,
+ struct pci_pwrctl *pwrctl);
+
+#endif /* __PCI_PWRCTL_H__ */
diff --git a/include/linux/pci.h b/include/linux/pci.h
index fb004fd4e889..cafc5ab1cbcb 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -413,8 +413,6 @@ struct pci_dev {
struct resource driver_exclusive_resource; /* driver exclusive resource ranges */
bool match_driver; /* Skip attaching driver */
- struct lock_class_key cfg_access_key;
- struct lockdep_map cfg_access_lock;
unsigned int transparent:1; /* Subtractive decode bridge */
unsigned int io_window:1; /* Bridge has I/O window */
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 942a587bb97e..76a8f2d6bd64 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2126,6 +2126,8 @@
#define PCI_VENDOR_ID_CHELSIO 0x1425
+#define PCI_VENDOR_ID_EDIMAX 0x1432
+
#define PCI_VENDOR_ID_ADLINK 0x144a
#define PCI_VENDOR_ID_SAMSUNG 0x144d
@@ -2599,6 +2601,8 @@
#define PCI_VENDOR_ID_HYGON 0x1d94
+#define PCI_VENDOR_ID_META 0x1d9b
+
#define PCI_VENDOR_ID_FUNGIBLE 0x1dad
#define PCI_VENDOR_ID_HXT 0x1dbf
diff --git a/include/linux/pcs/pcs-xpcs.h b/include/linux/pcs/pcs-xpcs.h
index da3a6c30f6d2..b4a4eb6c8866 100644
--- a/include/linux/pcs/pcs-xpcs.h
+++ b/include/linux/pcs/pcs-xpcs.h
@@ -7,11 +7,12 @@
#ifndef __LINUX_PCS_XPCS_H
#define __LINUX_PCS_XPCS_H
+#include <linux/clk.h>
+#include <linux/fwnode.h>
+#include <linux/mdio.h>
#include <linux/phy.h>
#include <linux/phylink.h>
-
-#define NXP_SJA1105_XPCS_ID 0x00000010
-#define NXP_SJA1110_XPCS_ID 0x00000020
+#include <linux/types.h>
/* AN mode */
#define DW_AN_C73 1
@@ -20,20 +21,46 @@
#define DW_AN_C37_1000BASEX 4
#define DW_10GBASER 5
-/* device vendor OUI */
-#define DW_OUI_WX 0x0018fc80
+struct dw_xpcs_desc;
+
+enum dw_xpcs_pcs_id {
+ DW_XPCS_ID_NATIVE = 0,
+ NXP_SJA1105_XPCS_ID = 0x00000010,
+ NXP_SJA1110_XPCS_ID = 0x00000020,
+ DW_XPCS_ID = 0x7996ced0,
+ DW_XPCS_ID_MASK = 0xffffffff,
+};
-/* dev_flag */
-#define DW_DEV_TXGBE BIT(0)
+enum dw_xpcs_pma_id {
+ DW_XPCS_PMA_ID_NATIVE = 0,
+ DW_XPCS_PMA_GEN1_3G_ID,
+ DW_XPCS_PMA_GEN2_3G_ID,
+ DW_XPCS_PMA_GEN2_6G_ID,
+ DW_XPCS_PMA_GEN4_3G_ID,
+ DW_XPCS_PMA_GEN4_6G_ID,
+ DW_XPCS_PMA_GEN5_10G_ID,
+ DW_XPCS_PMA_GEN5_12G_ID,
+ WX_TXGBE_XPCS_PMA_10G_ID = 0x0018fc80,
+};
-struct xpcs_id;
+struct dw_xpcs_info {
+ u32 pcs;
+ u32 pma;
+};
+
+enum dw_xpcs_clock {
+ DW_XPCS_CORE_CLK,
+ DW_XPCS_PAD_CLK,
+ DW_XPCS_NUM_CLKS,
+};
struct dw_xpcs {
+ struct dw_xpcs_info info;
+ const struct dw_xpcs_desc *desc;
struct mdio_device *mdiodev;
- const struct xpcs_id *id;
+ struct clk_bulk_data clks[DW_XPCS_NUM_CLKS];
struct phylink_pcs pcs;
phy_interface_t interface;
- int dev_flag;
};
int xpcs_get_an_mode(struct dw_xpcs *xpcs, phy_interface_t interface);
@@ -46,6 +73,8 @@ int xpcs_config_eee(struct dw_xpcs *xpcs, int mult_fact_100ns,
int enable);
struct dw_xpcs *xpcs_create_mdiodev(struct mii_bus *bus, int addr,
phy_interface_t interface);
+struct dw_xpcs *xpcs_create_fwnode(struct fwnode_handle *fwnode,
+ phy_interface_t interface);
void xpcs_destroy(struct dw_xpcs *xpcs);
#endif /* __LINUX_PCS_XPCS_H */
diff --git a/include/linux/perf/arm_pmuv3.h b/include/linux/perf/arm_pmuv3.h
index 46377e134d67..7867db04ec98 100644
--- a/include/linux/perf/arm_pmuv3.h
+++ b/include/linux/perf/arm_pmuv3.h
@@ -309,4 +309,6 @@
} \
} while (0)
+#include <asm/arm_pmuv3.h>
+
#endif
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index a5304ae8c654..65ece0d5b4b6 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -781,11 +781,12 @@ struct perf_event {
unsigned int pending_wakeup;
unsigned int pending_kill;
unsigned int pending_disable;
- unsigned int pending_sigtrap;
unsigned long pending_addr; /* SIGTRAP */
struct irq_work pending_irq;
+ struct irq_work pending_disable_irq;
struct callback_head pending_task;
unsigned int pending_work;
+ struct rcuwait pending_work_wait;
atomic_t event_limit;
@@ -962,7 +963,7 @@ struct perf_event_context {
struct rcu_head rcu_head;
/*
- * Sum (event->pending_sigtrap + event->pending_work)
+ * Sum (event->pending_work + event->pending_work)
*
* The SIGTRAP is targeted at ctx->task, as such it won't do changing
* that until the signal is delivered.
@@ -970,12 +971,6 @@ struct perf_event_context {
local_t nr_pending;
};
-/*
- * Number of contexts where an event can trigger:
- * task, softirq, hardirq, nmi.
- */
-#define PERF_NR_CONTEXTS 4
-
struct perf_cpu_pmu_context {
struct perf_event_pmu_context epc;
struct perf_event_pmu_context *task_epc;
diff --git a/include/linux/pgalloc_tag.h b/include/linux/pgalloc_tag.h
index 86ba5d33e43b..9cacadbd61f8 100644
--- a/include/linux/pgalloc_tag.h
+++ b/include/linux/pgalloc_tag.h
@@ -37,6 +37,9 @@ static inline union codetag_ref *get_page_tag_ref(struct page *page)
static inline void put_page_tag_ref(union codetag_ref *ref)
{
+ if (WARN_ON(!ref))
+ return;
+
page_ext_put(page_ext_from_codetag_ref(ref));
}
@@ -102,9 +105,11 @@ static inline struct alloc_tag *pgalloc_tag_get(struct page *page)
union codetag_ref *ref = get_page_tag_ref(page);
alloc_tag_sub_check(ref);
- if (ref && ref->ct)
- tag = ct_to_alloc_tag(ref->ct);
- put_page_tag_ref(ref);
+ if (ref) {
+ if (ref->ct)
+ tag = ct_to_alloc_tag(ref->ct);
+ put_page_tag_ref(ref);
+ }
}
return tag;
diff --git a/include/linux/phy.h b/include/linux/phy.h
index e6e83304558e..04ae5c811cfb 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -128,6 +128,7 @@ extern const int phy_10gbit_features_array[1];
* @PHY_INTERFACE_MODE_10GKR: 10GBASE-KR - with Clause 73 AN
* @PHY_INTERFACE_MODE_QUSGMII: Quad Universal SGMII
* @PHY_INTERFACE_MODE_1000BASEKX: 1000Base-KX - with Clause 73 AN
+ * @PHY_INTERFACE_MODE_10G_QXGMII: 10G-QXGMII - 4 ports over 10G USXGMII
* @PHY_INTERFACE_MODE_MAX: Book keeping
*
* Describes the interface between the MAC and PHY.
@@ -168,6 +169,7 @@ typedef enum {
PHY_INTERFACE_MODE_10GKR,
PHY_INTERFACE_MODE_QUSGMII,
PHY_INTERFACE_MODE_1000BASEKX,
+ PHY_INTERFACE_MODE_10G_QXGMII,
PHY_INTERFACE_MODE_MAX,
} phy_interface_t;
@@ -289,6 +291,8 @@ static inline const char *phy_modes(phy_interface_t interface)
return "100base-x";
case PHY_INTERFACE_MODE_QUSGMII:
return "qusgmii";
+ case PHY_INTERFACE_MODE_10G_QXGMII:
+ return "10g-qxgmii";
default:
return "unknown";
}
@@ -612,6 +616,8 @@ struct macsec_ops;
* handling shall be postponed until PHY has resumed
* @irq_rerun: Flag indicating interrupts occurred while PHY was suspended,
* requiring a rerun of the interrupt handler after resume
+ * @default_timestamp: Flag indicating whether we are using the phy
+ * timestamp as the default one
* @interface: enum phy_interface_t value
* @possible_interfaces: bitmap if interface modes that the attached PHY
* will switch between depending on media speed.
@@ -677,6 +683,8 @@ struct phy_device {
unsigned irq_suspended:1;
unsigned irq_rerun:1;
+ unsigned default_timestamp:1;
+
int rate_matching;
enum phy_state state;
@@ -1122,7 +1130,7 @@ struct phy_driver {
u8 index, enum led_brightness value);
/**
- * @led_blink_set: Set a PHY LED brightness. Index indicates
+ * @led_blink_set: Set a PHY LED blinking. Index indicates
* which of the PHYs led should be configured to blink. Delays
* are in milliseconds and if both are zero then a sensible
* default should be chosen. The call should adjust the
@@ -1610,7 +1618,7 @@ static inline bool phy_rxtstamp(struct phy_device *phydev, struct sk_buff *skb,
}
static inline int phy_ts_info(struct phy_device *phydev,
- struct ethtool_ts_info *tsinfo)
+ struct kernel_ethtool_ts_info *tsinfo)
{
return phydev->mii_ts->ts_info(phydev->mii_ts, tsinfo);
}
@@ -1622,6 +1630,21 @@ static inline void phy_txtstamp(struct phy_device *phydev, struct sk_buff *skb,
}
/**
+ * phy_is_default_hwtstamp - Is the PHY hwtstamp the default timestamp
+ * @phydev: Pointer to phy_device
+ *
+ * This is used to get default timestamping device taking into account
+ * the new API choice, which is selecting the timestamping from MAC by
+ * default if the phydev does not have default_timestamp flag enabled.
+ *
+ * Return: True if phy is the default hw timestamp, false otherwise.
+ */
+static inline bool phy_is_default_hwtstamp(struct phy_device *phydev)
+{
+ return phy_has_hwtstamp(phydev) && phydev->default_timestamp;
+}
+
+/**
* phy_is_internal - Convenience function for testing if a PHY is internal
* @phydev: the phy_device struct
*/
diff --git a/include/linux/phylink.h b/include/linux/phylink.h
index 5ea6b2ad2396..2381e07429a2 100644
--- a/include/linux/phylink.h
+++ b/include/linux/phylink.h
@@ -141,7 +141,8 @@ enum phylink_op_type {
* @mac_requires_rxc: if true, the MAC always requires a receive clock from PHY.
* The PHY driver should start the clock signal as soon as
* possible and avoid stopping it during suspend events.
- * @ovr_an_inband: if true, override PCS to MLO_AN_INBAND
+ * @default_an_inband: if true, defaults to MLO_AN_INBAND rather than
+ * MLO_AN_PHY. A fixed-link specification will override.
* @get_fixed_state: callback to execute to determine the fixed link state,
* if MAC link is at %MLO_AN_FIXED mode.
* @supported_interfaces: bitmap describing which PHY_INTERFACE_MODE_xxx
@@ -154,7 +155,7 @@ struct phylink_config {
bool poll_fixed_state;
bool mac_managed_pm;
bool mac_requires_rxc;
- bool ovr_an_inband;
+ bool default_an_inband;
void (*get_fixed_state)(struct phylink_config *config,
struct phylink_link_state *state);
DECLARE_PHY_INTERFACE_MASK(supported_interfaces);
@@ -653,6 +654,7 @@ static inline int phylink_get_link_timer_ns(phy_interface_t interface)
case PHY_INTERFACE_MODE_SGMII:
case PHY_INTERFACE_MODE_QSGMII:
case PHY_INTERFACE_MODE_USXGMII:
+ case PHY_INTERFACE_MODE_10G_QXGMII:
return 1600000;
case PHY_INTERFACE_MODE_1000BASEX:
diff --git a/include/linux/platform_data/cros_ec_commands.h b/include/linux/platform_data/cros_ec_commands.h
index ecc47d5fe239..e574b790be6f 100644
--- a/include/linux/platform_data/cros_ec_commands.h
+++ b/include/linux/platform_data/cros_ec_commands.h
@@ -3463,6 +3463,34 @@ union __ec_align_offset1 ec_response_get_next_data_v1 {
};
BUILD_ASSERT(sizeof(union ec_response_get_next_data_v1) == 16);
+union __ec_align_offset1 ec_response_get_next_data_v3 {
+ uint8_t key_matrix[18];
+
+ /* Unaligned */
+ uint32_t host_event;
+ uint64_t host_event64;
+
+ struct __ec_todo_unpacked {
+ /* For aligning the fifo_info */
+ uint8_t reserved[3];
+ struct ec_response_motion_sense_fifo_info info;
+ } sensor_fifo;
+
+ uint32_t buttons;
+
+ uint32_t switches;
+
+ uint32_t fp_events;
+
+ uint32_t sysrq;
+
+ /* CEC events from enum mkbp_cec_event */
+ uint32_t cec_events;
+
+ uint8_t cec_message[16];
+};
+BUILD_ASSERT(sizeof(union ec_response_get_next_data_v3) == 18);
+
struct ec_response_get_next_event {
uint8_t event_type;
/* Followed by event data if any */
@@ -3475,6 +3503,12 @@ struct ec_response_get_next_event_v1 {
union ec_response_get_next_data_v1 data;
} __ec_align1;
+struct ec_response_get_next_event_v3 {
+ uint8_t event_type;
+ /* Followed by event data if any */
+ union ec_response_get_next_data_v3 data;
+} __ec_align1;
+
/* Bit indices for buttons and switches.*/
/* Buttons */
#define EC_MKBP_POWER_BUTTON 0
@@ -3809,16 +3843,61 @@ struct ec_params_i2c_write {
* discharge the battery.
*/
#define EC_CMD_CHARGE_CONTROL 0x0096
-#define EC_VER_CHARGE_CONTROL 1
+#define EC_VER_CHARGE_CONTROL 3
enum ec_charge_control_mode {
CHARGE_CONTROL_NORMAL = 0,
CHARGE_CONTROL_IDLE,
CHARGE_CONTROL_DISCHARGE,
+ /* Add no more entry below. */
+ CHARGE_CONTROL_COUNT,
+};
+
+#define EC_CHARGE_MODE_TEXT \
+ { \
+ [CHARGE_CONTROL_NORMAL] = "NORMAL", \
+ [CHARGE_CONTROL_IDLE] = "IDLE", \
+ [CHARGE_CONTROL_DISCHARGE] = "DISCHARGE", \
+ }
+
+enum ec_charge_control_cmd {
+ EC_CHARGE_CONTROL_CMD_SET = 0,
+ EC_CHARGE_CONTROL_CMD_GET,
+};
+
+enum ec_charge_control_flag {
+ EC_CHARGE_CONTROL_FLAG_NO_IDLE = BIT(0),
};
struct ec_params_charge_control {
- uint32_t mode; /* enum charge_control_mode */
+ uint32_t mode; /* enum charge_control_mode */
+
+ /* Below are the fields added in V2. */
+ uint8_t cmd; /* enum ec_charge_control_cmd. */
+ uint8_t flags; /* enum ec_charge_control_flag (v3+) */
+ /*
+ * Lower and upper thresholds for battery sustainer. This struct isn't
+ * named to avoid tainting foreign projects' name spaces.
+ *
+ * If charge mode is explicitly set (e.g. DISCHARGE), battery sustainer
+ * will be disabled. To disable battery sustainer, set mode=NORMAL,
+ * lower=-1, upper=-1.
+ */
+ struct {
+ int8_t lower; /* Display SoC in percentage. */
+ int8_t upper; /* Display SoC in percentage. */
+ } sustain_soc;
+} __ec_align4;
+
+/* Added in v2 */
+struct ec_response_charge_control {
+ uint32_t mode; /* enum charge_control_mode */
+ struct { /* Battery sustainer thresholds */
+ int8_t lower;
+ int8_t upper;
+ } sustain_soc;
+ uint8_t flags; /* enum ec_charge_control_flag (v3+) */
+ uint8_t reserved;
} __ec_align4;
/*****************************************************************************/
diff --git a/include/linux/platform_data/cros_ec_proto.h b/include/linux/platform_data/cros_ec_proto.h
index 8865e350c12a..b34ed0cc1f8d 100644
--- a/include/linux/platform_data/cros_ec_proto.h
+++ b/include/linux/platform_data/cros_ec_proto.h
@@ -185,7 +185,7 @@ struct cros_ec_device {
bool host_sleep_v1;
struct blocking_notifier_head event_notifier;
- struct ec_response_get_next_event_v1 event_data;
+ struct ec_response_get_next_event_v3 event_data;
int event_size;
u32 host_event_wake_mask;
u32 last_resume_result;
@@ -261,6 +261,10 @@ int cros_ec_get_sensor_count(struct cros_ec_dev *ec);
int cros_ec_cmd(struct cros_ec_device *ec_dev, unsigned int version, int command, const void *outdata,
size_t outsize, void *indata, size_t insize);
+int cros_ec_cmd_readmem(struct cros_ec_device *ec_dev, u8 offset, u8 size, void *dest);
+
+int cros_ec_get_cmd_versions(struct cros_ec_device *ec_dev, u16 cmd);
+
/**
* cros_ec_get_time_ns() - Return time in ns.
*
diff --git a/include/linux/platform_data/lenovo-yoga-c630.h b/include/linux/platform_data/lenovo-yoga-c630.h
new file mode 100644
index 000000000000..5d1f9fb33cfc
--- /dev/null
+++ b/include/linux/platform_data/lenovo-yoga-c630.h
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022-2024, Linaro Ltd
+ * Authors:
+ * Bjorn Andersson
+ * Dmitry Baryshkov
+ */
+
+#ifndef _LENOVO_YOGA_C630_DATA_H
+#define _LENOVO_YOGA_C630_DATA_H
+
+struct yoga_c630_ec;
+struct notifier_block;
+
+#define YOGA_C630_MOD_NAME "lenovo_yoga_c630"
+
+#define YOGA_C630_DEV_UCSI "ucsi"
+#define YOGA_C630_DEV_PSY "psy"
+
+int yoga_c630_ec_read8(struct yoga_c630_ec *ec, u8 addr);
+int yoga_c630_ec_read16(struct yoga_c630_ec *ec, u8 addr);
+
+int yoga_c630_ec_register_notify(struct yoga_c630_ec *ec, struct notifier_block *nb);
+void yoga_c630_ec_unregister_notify(struct yoga_c630_ec *ec, struct notifier_block *nb);
+
+#define YOGA_C630_UCSI_WRITE_SIZE 8
+#define YOGA_C630_UCSI_CCI_SIZE 4
+#define YOGA_C630_UCSI_DATA_SIZE 16
+#define YOGA_C630_UCSI_READ_SIZE (YOGA_C630_UCSI_CCI_SIZE + YOGA_C630_UCSI_DATA_SIZE)
+
+u16 yoga_c630_ec_ucsi_get_version(struct yoga_c630_ec *ec);
+int yoga_c630_ec_ucsi_write(struct yoga_c630_ec *ec,
+ const u8 req[YOGA_C630_UCSI_WRITE_SIZE]);
+int yoga_c630_ec_ucsi_read(struct yoga_c630_ec *ec,
+ u8 resp[YOGA_C630_UCSI_READ_SIZE]);
+
+#define LENOVO_EC_EVENT_USB 0x20
+#define LENOVO_EC_EVENT_UCSI 0x21
+#define LENOVO_EC_EVENT_HPD 0x22
+#define LENOVO_EC_EVENT_BAT_STATUS 0x24
+#define LENOVO_EC_EVENT_BAT_INFO 0x25
+#define LENOVO_EC_EVENT_BAT_ADPT_STATUS 0x37
+
+#endif
diff --git a/include/linux/platform_data/mmc-pxamci.h b/include/linux/platform_data/mmc-pxamci.h
index 7e44e84e7150..652f323b5ecc 100644
--- a/include/linux/platform_data/mmc-pxamci.h
+++ b/include/linux/platform_data/mmc-pxamci.h
@@ -7,6 +7,7 @@
struct device;
struct mmc_host;
+struct property_entry;
struct pxamci_platform_data {
unsigned int ocr_mask; /* available voltages */
@@ -18,7 +19,8 @@ struct pxamci_platform_data {
bool gpio_card_ro_invert; /* gpio ro is inverted */
};
-extern void pxa_set_mci_info(struct pxamci_platform_data *info);
+extern void pxa_set_mci_info(const struct pxamci_platform_data *info,
+ const struct property_entry *props);
extern void pxa3xx_set_mci2_info(struct pxamci_platform_data *info);
extern void pxa3xx_set_mci3_info(struct pxamci_platform_data *info);
diff --git a/include/linux/platform_data/tmio.h b/include/linux/platform_data/tmio.h
new file mode 100644
index 000000000000..b060124ba1ae
--- /dev/null
+++ b/include/linux/platform_data/tmio.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef MFD_TMIO_H
+#define MFD_TMIO_H
+
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
+/* TMIO MMC platform flags */
+
+/*
+ * Some controllers can support a 2-byte block size when the bus width is
+ * configured in 4-bit mode.
+ */
+#define TMIO_MMC_BLKSZ_2BYTES BIT(1)
+
+/* Some controllers can support SDIO IRQ signalling */
+#define TMIO_MMC_SDIO_IRQ BIT(2)
+
+/* Some features are only available or tested on R-Car Gen2 or later */
+#define TMIO_MMC_MIN_RCAR2 BIT(3)
+
+/*
+ * Some controllers require waiting for the SD bus to become idle before
+ * writing to some registers.
+ */
+#define TMIO_MMC_HAS_IDLE_WAIT BIT(4)
+
+/*
+ * Use the busy timeout feature. Probably all TMIO versions support it. Yet,
+ * we don't have documentation for old variants, so we enable only known good
+ * variants with this flag. Can be removed once all variants are known good.
+ */
+#define TMIO_MMC_USE_BUSY_TIMEOUT BIT(5)
+
+/* Some controllers have CMD12 automatically issue/non-issue register */
+#define TMIO_MMC_HAVE_CMD12_CTRL BIT(7)
+
+/* Controller has some SDIO status bits which must be 1 */
+#define TMIO_MMC_SDIO_STATUS_SETBITS BIT(8)
+
+/* Some controllers have a 32-bit wide data port register */
+#define TMIO_MMC_32BIT_DATA_PORT BIT(9)
+
+/* Some controllers allows to set SDx actual clock */
+#define TMIO_MMC_CLK_ACTUAL BIT(10)
+
+/* Some controllers have a CBSY bit */
+#define TMIO_MMC_HAVE_CBSY BIT(11)
+
+struct tmio_mmc_data {
+ void *chan_priv_tx;
+ void *chan_priv_rx;
+ unsigned int hclk;
+ unsigned long capabilities;
+ unsigned long capabilities2;
+ unsigned long flags;
+ u32 ocr_mask; /* available voltages */
+ dma_addr_t dma_rx_offset;
+ unsigned int max_blk_count;
+ unsigned short max_segs;
+};
+#endif
diff --git a/include/linux/platform_data/x86/asus-wmi.h b/include/linux/platform_data/x86/asus-wmi.h
index 3eb5cd6773ad..0aeeae1c1943 100644
--- a/include/linux/platform_data/x86/asus-wmi.h
+++ b/include/linux/platform_data/x86/asus-wmi.h
@@ -51,6 +51,10 @@
#define ASUS_WMI_DEVID_LED6 0x00020016
#define ASUS_WMI_DEVID_MICMUTE_LED 0x00040017
+/* Disable Camera LED */
+#define ASUS_WMI_DEVID_CAMERA_LED_NEG 0x00060078 /* 0 = on (unused) */
+#define ASUS_WMI_DEVID_CAMERA_LED 0x00060079 /* 1 = on */
+
/* Backlight and Brightness */
#define ASUS_WMI_DEVID_ALS_ENABLE 0x00050001 /* Ambient Light Sensor */
#define ASUS_WMI_DEVID_BACKLIGHT 0x00050011
diff --git a/include/linux/platform_data/x86/soc.h b/include/linux/platform_data/x86/soc.h
index a5705189e2ac..f981907a5cb0 100644
--- a/include/linux/platform_data/x86/soc.h
+++ b/include/linux/platform_data/x86/soc.h
@@ -20,7 +20,7 @@
static inline bool soc_intel_is_##soc(void) \
{ \
static const struct x86_cpu_id soc##_cpu_ids[] = { \
- X86_MATCH_INTEL_FAM6_MODEL(type, NULL), \
+ X86_MATCH_VFM(type, NULL), \
{} \
}; \
const struct x86_cpu_id *id; \
@@ -31,11 +31,11 @@ static inline bool soc_intel_is_##soc(void) \
return false; \
}
-SOC_INTEL_IS_CPU(byt, ATOM_SILVERMONT);
-SOC_INTEL_IS_CPU(cht, ATOM_AIRMONT);
-SOC_INTEL_IS_CPU(apl, ATOM_GOLDMONT);
-SOC_INTEL_IS_CPU(glk, ATOM_GOLDMONT_PLUS);
-SOC_INTEL_IS_CPU(cml, KABYLAKE_L);
+SOC_INTEL_IS_CPU(byt, INTEL_ATOM_SILVERMONT);
+SOC_INTEL_IS_CPU(cht, INTEL_ATOM_AIRMONT);
+SOC_INTEL_IS_CPU(apl, INTEL_ATOM_GOLDMONT);
+SOC_INTEL_IS_CPU(glk, INTEL_ATOM_GOLDMONT_PLUS);
+SOC_INTEL_IS_CPU(cml, INTEL_KABYLAKE_L);
#undef SOC_INTEL_IS_CPU
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index f24546a3d3db..015751b64746 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -175,6 +175,10 @@ struct generic_pm_domain {
int (*set_performance_state)(struct generic_pm_domain *genpd,
unsigned int state);
struct gpd_dev_ops dev_ops;
+ int (*set_hwmode_dev)(struct generic_pm_domain *domain,
+ struct device *dev, bool enable);
+ bool (*get_hwmode_dev)(struct generic_pm_domain *domain,
+ struct device *dev);
int (*attach_dev)(struct generic_pm_domain *domain,
struct device *dev);
void (*detach_dev)(struct generic_pm_domain *domain,
@@ -237,6 +241,7 @@ struct generic_pm_domain_data {
unsigned int performance_state;
unsigned int default_pstate;
unsigned int rpm_pstate;
+ bool hw_mode;
void *data;
};
@@ -267,6 +272,8 @@ int dev_pm_genpd_remove_notifier(struct device *dev);
void dev_pm_genpd_set_next_wakeup(struct device *dev, ktime_t next);
ktime_t dev_pm_genpd_get_next_hrtimer(struct device *dev);
void dev_pm_genpd_synced_poweroff(struct device *dev);
+int dev_pm_genpd_set_hwmode(struct device *dev, bool enable);
+bool dev_pm_genpd_get_hwmode(struct device *dev);
extern struct dev_power_governor simple_qos_governor;
extern struct dev_power_governor pm_domain_always_on_gov;
@@ -340,6 +347,16 @@ static inline ktime_t dev_pm_genpd_get_next_hrtimer(struct device *dev)
static inline void dev_pm_genpd_synced_poweroff(struct device *dev)
{ }
+static inline int dev_pm_genpd_set_hwmode(struct device *dev, bool enable)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline bool dev_pm_genpd_get_hwmode(struct device *dev)
+{
+ return false;
+}
+
#define simple_qos_governor (*(struct dev_power_governor *)(NULL))
#define pm_domain_always_on_gov (*(struct dev_power_governor *)(NULL))
#endif
diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h
index dd7c8441af42..6424692c30b7 100644
--- a/include/linux/pm_opp.h
+++ b/include/linux/pm_opp.h
@@ -474,6 +474,7 @@ int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpuma
struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev);
struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp);
int of_get_required_opp_performance_state(struct device_node *np, int index);
+bool dev_pm_opp_of_has_required_opp(struct device *dev);
int dev_pm_opp_of_find_icc_paths(struct device *dev, struct opp_table *opp_table);
int dev_pm_opp_of_register_em(struct device *dev, struct cpumask *cpus);
int dev_pm_opp_calc_power(struct device *dev, unsigned long *uW,
@@ -552,6 +553,11 @@ static inline int of_get_required_opp_performance_state(struct device_node *np,
return -EOPNOTSUPP;
}
+static inline bool dev_pm_opp_of_has_required_opp(struct device *dev)
+{
+ return false;
+}
+
static inline int dev_pm_opp_of_find_icc_paths(struct device *dev, struct opp_table *opp_table)
{
return -EOPNOTSUPP;
diff --git a/include/linux/pnp.h b/include/linux/pnp.h
index 82561242cda4..7f2ff95d2deb 100644
--- a/include/linux/pnp.h
+++ b/include/linux/pnp.h
@@ -435,8 +435,6 @@ struct pnp_protocol {
#define protocol_for_each_dev(protocol, dev) \
list_for_each_entry(dev, &(protocol)->devices, protocol_list)
-extern const struct bus_type pnp_bus_type;
-
#if defined(CONFIG_PNP)
/* device management */
@@ -469,7 +467,7 @@ int compare_pnp_id(struct pnp_id *pos, const char *id);
int pnp_register_driver(struct pnp_driver *drv);
void pnp_unregister_driver(struct pnp_driver *drv);
-#define dev_is_pnp(d) ((d)->bus == &pnp_bus_type)
+bool dev_is_pnp(const struct device *dev);
#else
@@ -502,7 +500,7 @@ static inline int compare_pnp_id(struct pnp_id *pos, const char *id) { return -E
static inline int pnp_register_driver(struct pnp_driver *drv) { return -ENODEV; }
static inline void pnp_unregister_driver(struct pnp_driver *drv) { }
-#define dev_is_pnp(d) false
+static inline bool dev_is_pnp(const struct device *dev) { return false; }
#endif /* CONFIG_PNP */
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index 8e5705a56b85..c852cc882501 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -319,6 +319,8 @@ struct power_supply {
char *online_trig_name;
struct led_trigger *charging_blink_full_solid_trig;
char *charging_blink_full_solid_trig_name;
+ struct led_trigger *charging_orange_full_green_trig;
+ char *charging_orange_full_green_trig_name;
#endif
};
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index 7233e9cf1bab..ce76f1a45722 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -481,4 +481,45 @@ DEFINE_LOCK_GUARD_0(preempt, preempt_disable(), preempt_enable())
DEFINE_LOCK_GUARD_0(preempt_notrace, preempt_disable_notrace(), preempt_enable_notrace())
DEFINE_LOCK_GUARD_0(migrate, migrate_disable(), migrate_enable())
+#ifdef CONFIG_PREEMPT_DYNAMIC
+
+extern bool preempt_model_none(void);
+extern bool preempt_model_voluntary(void);
+extern bool preempt_model_full(void);
+
+#else
+
+static inline bool preempt_model_none(void)
+{
+ return IS_ENABLED(CONFIG_PREEMPT_NONE);
+}
+static inline bool preempt_model_voluntary(void)
+{
+ return IS_ENABLED(CONFIG_PREEMPT_VOLUNTARY);
+}
+static inline bool preempt_model_full(void)
+{
+ return IS_ENABLED(CONFIG_PREEMPT);
+}
+
+#endif
+
+static inline bool preempt_model_rt(void)
+{
+ return IS_ENABLED(CONFIG_PREEMPT_RT);
+}
+
+/*
+ * Does the preemption model allow non-cooperative preemption?
+ *
+ * For !CONFIG_PREEMPT_DYNAMIC kernels this is an exact match with
+ * CONFIG_PREEMPTION; for CONFIG_PREEMPT_DYNAMIC this doesn't work as the
+ * kernel is *built* with CONFIG_PREEMPTION=y but may run with e.g. the
+ * PREEMPT_NONE model.
+ */
+static inline bool preempt_model_preemptible(void)
+{
+ return preempt_model_full() || preempt_model_rt();
+}
+
#endif /* __LINUX_PREEMPT_H */
diff --git a/include/linux/printk.h b/include/linux/printk.h
index 40afab23881a..65c5184470f1 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -60,9 +60,6 @@ static inline const char *printk_skip_headers(const char *buffer)
#define CONSOLE_LOGLEVEL_DEFAULT CONFIG_CONSOLE_LOGLEVEL_DEFAULT
#define CONSOLE_LOGLEVEL_QUIET CONFIG_CONSOLE_LOGLEVEL_QUIET
-int add_preferred_console_match(const char *match, const char *name,
- const short idx);
-
extern int console_printk[];
#define console_loglevel (console_printk[0])
diff --git a/include/linux/pse-pd/pse.h b/include/linux/pse-pd/pse.h
index 6d07c95dabb9..591a53e082e6 100644
--- a/include/linux/pse-pd/pse.h
+++ b/include/linux/pse-pd/pse.h
@@ -9,6 +9,9 @@
#include <linux/list.h>
#include <uapi/linux/ethtool.h>
+/* Maximum current in uA according to IEEE 802.3-2022 Table 145-1 */
+#define MAX_PI_CURRENT 1920000
+
struct phy_device;
struct pse_controller_dev;
@@ -36,12 +39,29 @@ struct pse_control_config {
* functions. IEEE 802.3-2022 30.9.1.1.2 aPSEAdminState
* @c33_pw_status: power detection status of the PSE.
* IEEE 802.3-2022 30.9.1.1.5 aPSEPowerDetectionStatus:
+ * @c33_pw_class: detected class of a powered PD
+ * IEEE 802.3-2022 30.9.1.1.8 aPSEPowerClassification
+ * @c33_actual_pw: power currently delivered by the PSE in mW
+ * IEEE 802.3-2022 30.9.1.1.23 aPSEActualPower
+ * @c33_ext_state_info: extended state information of the PSE
+ * @c33_avail_pw_limit: available power limit of the PSE in mW
+ * IEEE 802.3-2022 145.2.5.4 pse_avail_pwr
+ * @c33_pw_limit_ranges: supported power limit configuration range. The driver
+ * is in charge of the memory allocation.
+ * @c33_pw_limit_nb_ranges: number of supported power limit configuration
+ * ranges
*/
struct pse_control_status {
enum ethtool_podl_pse_admin_state podl_admin_state;
enum ethtool_podl_pse_pw_d_status podl_pw_status;
enum ethtool_c33_pse_admin_state c33_admin_state;
enum ethtool_c33_pse_pw_d_status c33_pw_status;
+ u32 c33_pw_class;
+ u32 c33_actual_pw;
+ struct ethtool_c33_pse_ext_state_info c33_ext_state_info;
+ u32 c33_avail_pw_limit;
+ struct ethtool_c33_pse_pw_limit_range *c33_pw_limit_ranges;
+ u32 c33_pw_limit_nb_ranges;
};
/**
@@ -53,6 +73,14 @@ struct pse_control_status {
* May also return negative errno.
* @pi_enable: Configure the PSE PI as enabled.
* @pi_disable: Configure the PSE PI as disabled.
+ * @pi_get_voltage: Return voltage similarly to get_voltage regulator
+ * callback.
+ * @pi_get_current_limit: Get the configured current limit similarly to
+ * get_current_limit regulator callback.
+ * @pi_set_current_limit: Configure the current limit similarly to
+ * set_current_limit regulator callback.
+ * Should not return an error in case of MAX_PI_CURRENT
+ * current value set.
*/
struct pse_controller_ops {
int (*ethtool_get_status)(struct pse_controller_dev *pcdev,
@@ -62,6 +90,11 @@ struct pse_controller_ops {
int (*pi_is_enabled)(struct pse_controller_dev *pcdev, int id);
int (*pi_enable)(struct pse_controller_dev *pcdev, int id);
int (*pi_disable)(struct pse_controller_dev *pcdev, int id);
+ int (*pi_get_voltage)(struct pse_controller_dev *pcdev, int id);
+ int (*pi_get_current_limit)(struct pse_controller_dev *pcdev,
+ int id);
+ int (*pi_set_current_limit)(struct pse_controller_dev *pcdev,
+ int id, int max_uA);
};
struct module;
@@ -148,6 +181,11 @@ int pse_ethtool_get_status(struct pse_control *psec,
int pse_ethtool_set_config(struct pse_control *psec,
struct netlink_ext_ack *extack,
const struct pse_control_config *config);
+int pse_ethtool_set_pw_limit(struct pse_control *psec,
+ struct netlink_ext_ack *extack,
+ const unsigned int pw_limit);
+int pse_ethtool_get_pw_limit(struct pse_control *psec,
+ struct netlink_ext_ack *extack);
bool pse_has_podl(struct pse_control *psec);
bool pse_has_c33(struct pse_control *psec);
@@ -167,14 +205,27 @@ static inline int pse_ethtool_get_status(struct pse_control *psec,
struct netlink_ext_ack *extack,
struct pse_control_status *status)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline int pse_ethtool_set_config(struct pse_control *psec,
struct netlink_ext_ack *extack,
const struct pse_control_config *config)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
+}
+
+static inline int pse_ethtool_set_pw_limit(struct pse_control *psec,
+ struct netlink_ext_ack *extack,
+ const unsigned int pw_limit)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int pse_ethtool_get_pw_limit(struct pse_control *psec,
+ struct netlink_ext_ack *extack)
+{
+ return -EOPNOTSUPP;
}
static inline bool pse_has_podl(struct pse_control *psec)
diff --git a/include/linux/pwm.h b/include/linux/pwm.h
index 60b92c2c75ef..f8c2dc12dbd3 100644
--- a/include/linux/pwm.h
+++ b/include/linux/pwm.h
@@ -4,9 +4,12 @@
#include <linux/device.h>
#include <linux/err.h>
+#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
+MODULE_IMPORT_NS(PWM);
+
struct pwm_chip;
/**
@@ -249,9 +252,7 @@ struct pwm_capture {
* @free: optional hook for freeing a PWM
* @capture: capture and report PWM signal
* @apply: atomically apply a new PWM config
- * @get_state: get the current PWM state. This function is only
- * called once per PWM device when the PWM chip is
- * registered.
+ * @get_state: get the current PWM state.
*/
struct pwm_ops {
int (*request)(struct pwm_chip *chip, struct pwm_device *pwm);
@@ -407,10 +408,6 @@ void pwmchip_remove(struct pwm_chip *chip);
int __devm_pwmchip_add(struct device *dev, struct pwm_chip *chip, struct module *owner);
#define devm_pwmchip_add(dev, chip) __devm_pwmchip_add(dev, chip, THIS_MODULE)
-struct pwm_device *pwm_request_from_chip(struct pwm_chip *chip,
- unsigned int index,
- const char *label);
-
struct pwm_device *of_pwm_xlate_with_flags(struct pwm_chip *chip,
const struct of_phandle_args *args);
struct pwm_device *of_pwm_single_xlate(struct pwm_chip *chip,
@@ -505,14 +502,6 @@ static inline int devm_pwmchip_add(struct device *dev, struct pwm_chip *chip)
return -EINVAL;
}
-static inline struct pwm_device *pwm_request_from_chip(struct pwm_chip *chip,
- unsigned int index,
- const char *label)
-{
- might_sleep();
- return ERR_PTR(-ENODEV);
-}
-
static inline struct pwm_device *pwm_get(struct device *dev,
const char *consumer)
{
@@ -574,13 +563,6 @@ static inline void pwm_apply_args(struct pwm_device *pwm)
pwm_apply_might_sleep(pwm, &state);
}
-/* only for backwards-compatibility, new code should not use this */
-static inline int pwm_apply_state(struct pwm_device *pwm,
- const struct pwm_state *state)
-{
- return pwm_apply_might_sleep(pwm, state);
-}
-
struct pwm_lookup {
struct list_head list;
const char *provider;
diff --git a/include/linux/pwrseq/consumer.h b/include/linux/pwrseq/consumer.h
new file mode 100644
index 000000000000..7d583b4f266e
--- /dev/null
+++ b/include/linux/pwrseq/consumer.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2024 Linaro Ltd.
+ */
+
+#ifndef __POWER_SEQUENCING_CONSUMER_H__
+#define __POWER_SEQUENCING_CONSUMER_H__
+
+#include <linux/err.h>
+
+struct device;
+struct pwrseq_desc;
+
+#if IS_ENABLED(CONFIG_POWER_SEQUENCING)
+
+struct pwrseq_desc * __must_check
+pwrseq_get(struct device *dev, const char *target);
+void pwrseq_put(struct pwrseq_desc *desc);
+
+struct pwrseq_desc * __must_check
+devm_pwrseq_get(struct device *dev, const char *target);
+
+int pwrseq_power_on(struct pwrseq_desc *desc);
+int pwrseq_power_off(struct pwrseq_desc *desc);
+
+#else /* CONFIG_POWER_SEQUENCING */
+
+static inline struct pwrseq_desc * __must_check
+pwrseq_get(struct device *dev, const char *target)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline void pwrseq_put(struct pwrseq_desc *desc)
+{
+}
+
+static inline struct pwrseq_desc * __must_check
+devm_pwrseq_get(struct device *dev, const char *target)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline int pwrseq_power_on(struct pwrseq_desc *desc)
+{
+ return -ENOSYS;
+}
+
+static inline int pwrseq_power_off(struct pwrseq_desc *desc)
+{
+ return -ENOSYS;
+}
+
+#endif /* CONFIG_POWER_SEQUENCING */
+
+#endif /* __POWER_SEQUENCING_CONSUMER_H__ */
diff --git a/include/linux/pwrseq/provider.h b/include/linux/pwrseq/provider.h
new file mode 100644
index 000000000000..cbc3607cbfcf
--- /dev/null
+++ b/include/linux/pwrseq/provider.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2024 Linaro Ltd.
+ */
+
+#ifndef __POWER_SEQUENCING_PROVIDER_H__
+#define __POWER_SEQUENCING_PROVIDER_H__
+
+struct device;
+struct module;
+struct pwrseq_device;
+
+typedef int (*pwrseq_power_state_func)(struct pwrseq_device *);
+typedef int (*pwrseq_match_func)(struct pwrseq_device *, struct device *);
+
+/**
+ * struct pwrseq_unit_data - Configuration of a single power sequencing
+ * unit.
+ * @name: Name of the unit.
+ * @deps: Units that must be enabled before this one and disabled after it
+ * in the order they come in this array. Must be NULL-terminated.
+ * @enable: Callback running the part of the power-on sequence provided by
+ * this unit.
+ * @disable: Callback running the part of the power-off sequence provided
+ * by this unit.
+ */
+struct pwrseq_unit_data {
+ const char *name;
+ const struct pwrseq_unit_data **deps;
+ pwrseq_power_state_func enable;
+ pwrseq_power_state_func disable;
+};
+
+/**
+ * struct pwrseq_target_data - Configuration of a power sequencing target.
+ * @name: Name of the target.
+ * @unit: Final unit that this target must reach in order to be considered
+ * enabled.
+ * @post_enable: Callback run after the target unit has been enabled, *after*
+ * the state lock has been released. It's useful for implementing
+ * boot-up delays without blocking other users from powering up
+ * using the same power sequencer.
+ */
+struct pwrseq_target_data {
+ const char *name;
+ const struct pwrseq_unit_data *unit;
+ pwrseq_power_state_func post_enable;
+};
+
+/**
+ * struct pwrseq_config - Configuration used for registering a new provider.
+ * @parent: Parent device for the sequencer. Must be set.
+ * @owner: Module providing this device.
+ * @drvdata: Private driver data.
+ * @match: Provider callback used to match the consumer device to the sequencer.
+ * @targets: Array of targets for this power sequencer. Must be NULL-terminated.
+ */
+struct pwrseq_config {
+ struct device *parent;
+ struct module *owner;
+ void *drvdata;
+ pwrseq_match_func match;
+ const struct pwrseq_target_data **targets;
+};
+
+struct pwrseq_device *
+pwrseq_device_register(const struct pwrseq_config *config);
+void pwrseq_device_unregister(struct pwrseq_device *pwrseq);
+struct pwrseq_device *
+devm_pwrseq_device_register(struct device *dev,
+ const struct pwrseq_config *config);
+
+void *pwrseq_device_get_drvdata(struct pwrseq_device *pwrseq);
+
+#endif /* __POWER_SEQUENCING_PROVIDER_H__ */
diff --git a/include/linux/randomize_kstack.h b/include/linux/randomize_kstack.h
index 6d92b68efbf6..1d982dbdd0d0 100644
--- a/include/linux/randomize_kstack.h
+++ b/include/linux/randomize_kstack.h
@@ -32,13 +32,19 @@ DECLARE_PER_CPU(u32, kstack_offset);
#endif
/*
- * Use, at most, 10 bits of entropy. We explicitly cap this to keep the
- * "VLA" from being unbounded (see above). 10 bits leaves enough room for
- * per-arch offset masks to reduce entropy (by removing higher bits, since
- * high entropy may overly constrain usable stack space), and for
- * compiler/arch-specific stack alignment to remove the lower bits.
+ * Use, at most, 6 bits of entropy (on 64-bit; 8 on 32-bit). This cap is
+ * to keep the "VLA" from being unbounded (see above). Additionally clear
+ * the bottom 4 bits (on 64-bit systems, 2 for 32-bit), since stack
+ * alignment will always be at least word size. This makes the compiler
+ * code gen better when it is applying the actual per-arch alignment to
+ * the final offset. The resulting randomness is reasonable without overly
+ * constraining usable stack space.
*/
-#define KSTACK_OFFSET_MAX(x) ((x) & 0x3FF)
+#ifdef CONFIG_64BIT
+#define KSTACK_OFFSET_MAX(x) ((x) & 0b1111110000)
+#else
+#define KSTACK_OFFSET_MAX(x) ((x) & 0b1111111100)
+#endif
/**
* add_random_kstack_offset - Increase stack utilization by previously
diff --git a/include/linux/rcu_segcblist.h b/include/linux/rcu_segcblist.h
index 659d13a7ddaa..ba95c06675e1 100644
--- a/include/linux/rcu_segcblist.h
+++ b/include/linux/rcu_segcblist.h
@@ -80,36 +80,35 @@ struct rcu_cblist {
* | SEGCBLIST_RCU_CORE | SEGCBLIST_LOCKING | SEGCBLIST_OFFLOADED |
* | |
* | Callbacks processed by rcu_core() from softirqs or local |
- * | rcuc kthread, while holding nocb_lock. Waking up CB and GP kthreads, |
- * | allowing nocb_timer to be armed. |
+ * | rcuc kthread, while holding nocb_lock. Waking up CB and GP kthreads. |
* ----------------------------------------------------------------------------
* |
* v
- * -----------------------------------
- * | |
- * v v
- * --------------------------------------- ----------------------------------|
- * | SEGCBLIST_RCU_CORE | | | SEGCBLIST_RCU_CORE | |
- * | SEGCBLIST_LOCKING | | | SEGCBLIST_LOCKING | |
- * | SEGCBLIST_OFFLOADED | | | SEGCBLIST_OFFLOADED | |
- * | SEGCBLIST_KTHREAD_CB | | SEGCBLIST_KTHREAD_GP |
- * | | | |
- * | | | |
- * | CB kthread woke up and | | GP kthread woke up and |
- * | acknowledged SEGCBLIST_OFFLOADED. | | acknowledged SEGCBLIST_OFFLOADED|
- * | Processes callbacks concurrently | | |
- * | with rcu_core(), holding | | |
- * | nocb_lock. | | |
- * --------------------------------------- -----------------------------------
- * | |
- * -----------------------------------
+ * ----------------------------------------------------------------------------
+ * | SEGCBLIST_RCU_CORE | SEGCBLIST_LOCKING | SEGCBLIST_OFFLOADED |
+ * | + unparked CB kthread |
+ * | |
+ * | CB kthread got unparked and processes callbacks concurrently with |
+ * | rcu_core(), holding nocb_lock. |
+ * ---------------------------------------------------------------------------
+ * |
+ * v
+ * ---------------------------------------------------------------------------|
+ * | SEGCBLIST_RCU_CORE | |
+ * | SEGCBLIST_LOCKING | |
+ * | SEGCBLIST_OFFLOADED | |
+ * | SEGCBLIST_KTHREAD_GP |
+ * | + unparked CB kthread |
+ * | |
+ * | GP kthread woke up and acknowledged nocb_lock. |
+ * ---------------------------------------- -----------------------------------
* |
* v
* |--------------------------------------------------------------------------|
- * | SEGCBLIST_LOCKING | |
- * | SEGCBLIST_OFFLOADED | |
+ * | SEGCBLIST_LOCKING | |
+ * | SEGCBLIST_OFFLOADED | |
* | SEGCBLIST_KTHREAD_GP | |
- * | SEGCBLIST_KTHREAD_CB |
+ * | + unparked CB kthread |
* | |
* | Kthreads handle callbacks holding nocb_lock, local rcu_core() stops |
* | handling callbacks. Enable bypass queueing. |
@@ -125,8 +124,8 @@ struct rcu_cblist {
* |--------------------------------------------------------------------------|
* | SEGCBLIST_LOCKING | |
* | SEGCBLIST_OFFLOADED | |
- * | SEGCBLIST_KTHREAD_CB | |
* | SEGCBLIST_KTHREAD_GP |
+ * | + unparked CB kthread |
* | |
* | CB/GP kthreads handle callbacks holding nocb_lock, local rcu_core() |
* | ignores callbacks. Bypass enqueue is enabled. |
@@ -137,11 +136,11 @@ struct rcu_cblist {
* | SEGCBLIST_RCU_CORE | |
* | SEGCBLIST_LOCKING | |
* | SEGCBLIST_OFFLOADED | |
- * | SEGCBLIST_KTHREAD_CB | |
* | SEGCBLIST_KTHREAD_GP |
+ * | + unparked CB kthread |
* | |
* | CB/GP kthreads handle callbacks holding nocb_lock, local rcu_core() |
- * | handles callbacks concurrently. Bypass enqueue is enabled. |
+ * | handles callbacks concurrently. Bypass enqueue is disabled. |
* | Invoke RCU core so we make sure not to preempt it in the middle with |
* | leaving some urgent work unattended within a jiffy. |
* ----------------------------------------------------------------------------
@@ -150,42 +149,31 @@ struct rcu_cblist {
* |--------------------------------------------------------------------------|
* | SEGCBLIST_RCU_CORE | |
* | SEGCBLIST_LOCKING | |
- * | SEGCBLIST_KTHREAD_CB | |
* | SEGCBLIST_KTHREAD_GP |
+ * | + unparked CB kthread |
* | |
* | CB/GP kthreads and local rcu_core() handle callbacks concurrently |
- * | holding nocb_lock. Wake up CB and GP kthreads if necessary. Disable |
- * | bypass enqueue. |
+ * | holding nocb_lock. Wake up GP kthread if necessary. |
* ----------------------------------------------------------------------------
* |
* v
- * -----------------------------------
- * | |
- * v v
- * ---------------------------------------------------------------------------|
- * | | |
- * | SEGCBLIST_RCU_CORE | | SEGCBLIST_RCU_CORE | |
- * | SEGCBLIST_LOCKING | | SEGCBLIST_LOCKING | |
- * | SEGCBLIST_KTHREAD_CB | SEGCBLIST_KTHREAD_GP |
- * | | |
- * | GP kthread woke up and | CB kthread woke up and |
- * | acknowledged the fact that | acknowledged the fact that |
- * | SEGCBLIST_OFFLOADED got cleared. | SEGCBLIST_OFFLOADED got cleared. |
- * | | The CB kthread goes to sleep |
- * | The callbacks from the target CPU | until it ever gets re-offloaded. |
- * | will be ignored from the GP kthread | |
- * | loop. | |
+ * |--------------------------------------------------------------------------|
+ * | SEGCBLIST_RCU_CORE | |
+ * | SEGCBLIST_LOCKING | |
+ * | + unparked CB kthread |
+ * | |
+ * | GP kthread woke up and acknowledged the fact that SEGCBLIST_OFFLOADED |
+ * | got cleared. The callbacks from the target CPU will be ignored from the|
+ * | GP kthread loop. |
* ----------------------------------------------------------------------------
- * | |
- * -----------------------------------
* |
* v
* ----------------------------------------------------------------------------
* | SEGCBLIST_RCU_CORE | SEGCBLIST_LOCKING |
+ * | + parked CB kthread |
* | |
- * | Callbacks processed by rcu_core() from softirqs or local |
- * | rcuc kthread, while holding nocb_lock. Forbid nocb_timer to be armed. |
- * | Flush pending nocb_timer. Flush nocb bypass callbacks. |
+ * | CB kthread is parked. Callbacks processed by rcu_core() from softirqs or |
+ * | local rcuc kthread, while holding nocb_lock. |
* ----------------------------------------------------------------------------
* |
* v
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index dfd2399f2cde..be450a3477be 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -209,7 +209,6 @@ void synchronize_rcu_tasks_rude(void);
#define rcu_note_voluntary_context_switch(t) rcu_tasks_qs(t, false)
void exit_tasks_rcu_start(void);
-void exit_tasks_rcu_stop(void);
void exit_tasks_rcu_finish(void);
#else /* #ifdef CONFIG_TASKS_RCU_GENERIC */
#define rcu_tasks_classic_qs(t, preempt) do { } while (0)
@@ -218,7 +217,6 @@ void exit_tasks_rcu_finish(void);
#define call_rcu_tasks call_rcu
#define synchronize_rcu_tasks synchronize_rcu
static inline void exit_tasks_rcu_start(void) { }
-static inline void exit_tasks_rcu_stop(void) { }
static inline void exit_tasks_rcu_finish(void) { }
#endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */
@@ -421,11 +419,71 @@ static inline void rcu_preempt_sleep_check(void) { }
"Illegal context switch in RCU-sched read-side critical section"); \
} while (0)
+// See RCU_LOCKDEP_WARN() for an explanation of the double call to
+// debug_lockdep_rcu_enabled().
+static inline bool lockdep_assert_rcu_helper(bool c)
+{
+ return debug_lockdep_rcu_enabled() &&
+ (c || !rcu_is_watching() || !rcu_lockdep_current_cpu_online()) &&
+ debug_lockdep_rcu_enabled();
+}
+
+/**
+ * lockdep_assert_in_rcu_read_lock - WARN if not protected by rcu_read_lock()
+ *
+ * Splats if lockdep is enabled and there is no rcu_read_lock() in effect.
+ */
+#define lockdep_assert_in_rcu_read_lock() \
+ WARN_ON_ONCE(lockdep_assert_rcu_helper(!lock_is_held(&rcu_lock_map)))
+
+/**
+ * lockdep_assert_in_rcu_read_lock_bh - WARN if not protected by rcu_read_lock_bh()
+ *
+ * Splats if lockdep is enabled and there is no rcu_read_lock_bh() in effect.
+ * Note that local_bh_disable() and friends do not suffice here, instead an
+ * actual rcu_read_lock_bh() is required.
+ */
+#define lockdep_assert_in_rcu_read_lock_bh() \
+ WARN_ON_ONCE(lockdep_assert_rcu_helper(!lock_is_held(&rcu_bh_lock_map)))
+
+/**
+ * lockdep_assert_in_rcu_read_lock_sched - WARN if not protected by rcu_read_lock_sched()
+ *
+ * Splats if lockdep is enabled and there is no rcu_read_lock_sched()
+ * in effect. Note that preempt_disable() and friends do not suffice here,
+ * instead an actual rcu_read_lock_sched() is required.
+ */
+#define lockdep_assert_in_rcu_read_lock_sched() \
+ WARN_ON_ONCE(lockdep_assert_rcu_helper(!lock_is_held(&rcu_sched_lock_map)))
+
+/**
+ * lockdep_assert_in_rcu_reader - WARN if not within some type of RCU reader
+ *
+ * Splats if lockdep is enabled and there is no RCU reader of any
+ * type in effect. Note that regions of code protected by things like
+ * preempt_disable, local_bh_disable(), and local_irq_disable() all qualify
+ * as RCU readers.
+ *
+ * Note that this will never trigger in PREEMPT_NONE or PREEMPT_VOLUNTARY
+ * kernels that are not also built with PREEMPT_COUNT. But if you have
+ * lockdep enabled, you might as well also enable PREEMPT_COUNT.
+ */
+#define lockdep_assert_in_rcu_reader() \
+ WARN_ON_ONCE(lockdep_assert_rcu_helper(!lock_is_held(&rcu_lock_map) && \
+ !lock_is_held(&rcu_bh_lock_map) && \
+ !lock_is_held(&rcu_sched_lock_map) && \
+ preemptible()))
+
#else /* #ifdef CONFIG_PROVE_RCU */
#define RCU_LOCKDEP_WARN(c, s) do { } while (0 && (c))
#define rcu_sleep_check() do { } while (0)
+#define lockdep_assert_in_rcu_read_lock() do { } while (0)
+#define lockdep_assert_in_rcu_read_lock_bh() do { } while (0)
+#define lockdep_assert_in_rcu_read_lock_sched() do { } while (0)
+#define lockdep_assert_in_rcu_reader() do { } while (0)
+
#endif /* #else #ifdef CONFIG_PROVE_RCU */
/*
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index a6bc2980a98b..122e38161acb 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -1237,6 +1237,8 @@ int regmap_noinc_read(struct regmap *map, unsigned int reg,
void *val, size_t val_len);
int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
size_t val_count);
+int regmap_multi_reg_read(struct regmap *map, unsigned int *reg, void *val,
+ size_t val_count);
int regmap_update_bits_base(struct regmap *map, unsigned int reg,
unsigned int mask, unsigned int val,
bool *change, bool async, bool force);
@@ -1607,7 +1609,7 @@ struct regmap_irq_chip {
unsigned int main_status;
unsigned int num_main_status_bits;
- struct regmap_irq_sub_irq_map *sub_reg_offsets;
+ const struct regmap_irq_sub_irq_map *sub_reg_offsets;
int num_main_regs;
unsigned int status_base;
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
index 59d0b9a79e6e..d986ec13092e 100644
--- a/include/linux/regulator/consumer.h
+++ b/include/linux/regulator/consumer.h
@@ -128,11 +128,11 @@ struct regulator;
*
* @supply: The name of the supply. Initialised by the user before
* using the bulk regulator APIs.
+ * @consumer: The regulator consumer for the supply. This will be managed
+ * by the bulk API.
* @init_load_uA: After getting the regulator, regulator_set_load() will be
* called with this load. Initialised by the user before
* using the bulk regulator APIs.
- * @consumer: The regulator consumer for the supply. This will be managed
- * by the bulk API.
*
* The regulator APIs provide a series of regulator_bulk_() API calls as
* a convenience to consumers which require multiple supplies. This
@@ -140,8 +140,8 @@ struct regulator;
*/
struct regulator_bulk_data {
const char *supply;
- int init_load_uA;
struct regulator *consumer;
+ int init_load_uA;
/* private: Internal use */
int ret;
@@ -250,6 +250,7 @@ int regulator_get_hardware_vsel_register(struct regulator *regulator,
unsigned *vsel_mask);
int regulator_list_hardware_vsel(struct regulator *regulator,
unsigned selector);
+int regulator_hardware_enable(struct regulator *regulator, bool enable);
/* regulator notifier block */
int regulator_register_notifier(struct regulator *regulator,
@@ -571,6 +572,12 @@ static inline int regulator_list_hardware_vsel(struct regulator *regulator,
return -EOPNOTSUPP;
}
+static inline int regulator_hardware_enable(struct regulator *regulator,
+ bool enable)
+{
+ return -EOPNOTSUPP;
+}
+
static inline int regulator_register_notifier(struct regulator *regulator,
struct notifier_block *nb)
{
diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h
index a365f67131ec..b0875b99e811 100644
--- a/include/linux/resctrl.h
+++ b/include/linux/resctrl.h
@@ -2,6 +2,7 @@
#ifndef _RESCTRL_H
#define _RESCTRL_H
+#include <linux/cacheinfo.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/pid.h>
@@ -58,11 +59,45 @@ struct resctrl_staged_config {
bool have_new_ctrl;
};
+enum resctrl_domain_type {
+ RESCTRL_CTRL_DOMAIN,
+ RESCTRL_MON_DOMAIN,
+};
+
/**
- * struct rdt_domain - group of CPUs sharing a resctrl resource
+ * struct rdt_domain_hdr - common header for different domain types
* @list: all instances of this resource
* @id: unique id for this instance
+ * @type: type of this instance
* @cpu_mask: which CPUs share this resource
+ */
+struct rdt_domain_hdr {
+ struct list_head list;
+ int id;
+ enum resctrl_domain_type type;
+ struct cpumask cpu_mask;
+};
+
+/**
+ * struct rdt_ctrl_domain - group of CPUs sharing a resctrl control resource
+ * @hdr: common header for different domain types
+ * @plr: pseudo-locked region (if any) associated with domain
+ * @staged_config: parsed configuration to be applied
+ * @mbps_val: When mba_sc is enabled, this holds the array of user
+ * specified control values for mba_sc in MBps, indexed
+ * by closid
+ */
+struct rdt_ctrl_domain {
+ struct rdt_domain_hdr hdr;
+ struct pseudo_lock_region *plr;
+ struct resctrl_staged_config staged_config[CDP_NUM_TYPES];
+ u32 *mbps_val;
+};
+
+/**
+ * struct rdt_mon_domain - group of CPUs sharing a resctrl monitor resource
+ * @hdr: common header for different domain types
+ * @ci: cache info for this domain
* @rmid_busy_llc: bitmap of which limbo RMIDs are above threshold
* @mbm_total: saved state for MBM total bandwidth
* @mbm_local: saved state for MBM local bandwidth
@@ -70,16 +105,10 @@ struct resctrl_staged_config {
* @cqm_limbo: worker to periodically read CQM h/w counters
* @mbm_work_cpu: worker CPU for MBM h/w counters
* @cqm_work_cpu: worker CPU for CQM h/w counters
- * @plr: pseudo-locked region (if any) associated with domain
- * @staged_config: parsed configuration to be applied
- * @mbps_val: When mba_sc is enabled, this holds the array of user
- * specified control values for mba_sc in MBps, indexed
- * by closid
*/
-struct rdt_domain {
- struct list_head list;
- int id;
- struct cpumask cpu_mask;
+struct rdt_mon_domain {
+ struct rdt_domain_hdr hdr;
+ struct cacheinfo *ci;
unsigned long *rmid_busy_llc;
struct mbm_state *mbm_total;
struct mbm_state *mbm_local;
@@ -87,9 +116,6 @@ struct rdt_domain {
struct delayed_work cqm_limbo;
int mbm_work_cpu;
int cqm_work_cpu;
- struct pseudo_lock_region *plr;
- struct resctrl_staged_config staged_config[CDP_NUM_TYPES];
- u32 *mbps_val;
};
/**
@@ -150,16 +176,24 @@ struct resctrl_membw {
struct rdt_parse_data;
struct resctrl_schema;
+enum resctrl_scope {
+ RESCTRL_L2_CACHE = 2,
+ RESCTRL_L3_CACHE = 3,
+ RESCTRL_L3_NODE,
+};
+
/**
* struct rdt_resource - attributes of a resctrl resource
* @rid: The index of the resource
* @alloc_capable: Is allocation available on this machine
* @mon_capable: Is monitor feature available on this machine
* @num_rmid: Number of RMIDs available
- * @cache_level: Which cache level defines scope of this resource
+ * @ctrl_scope: Scope of this resource for control functions
+ * @mon_scope: Scope of this resource for monitor functions
* @cache: Cache allocation related data
* @membw: If the component has bandwidth controls, their properties.
- * @domains: RCU list of all domains for this resource
+ * @ctrl_domains: RCU list of all control domains for this resource
+ * @mon_domains: RCU list of all monitor domains for this resource
* @name: Name to use in "schemata" file.
* @data_width: Character width of data when displaying
* @default_ctrl: Specifies default cache cbm or memory B/W percent.
@@ -174,17 +208,19 @@ struct rdt_resource {
bool alloc_capable;
bool mon_capable;
int num_rmid;
- int cache_level;
+ enum resctrl_scope ctrl_scope;
+ enum resctrl_scope mon_scope;
struct resctrl_cache cache;
struct resctrl_membw membw;
- struct list_head domains;
+ struct list_head ctrl_domains;
+ struct list_head mon_domains;
char *name;
int data_width;
u32 default_ctrl;
const char *format_str;
int (*parse_ctrlval)(struct rdt_parse_data *data,
struct resctrl_schema *s,
- struct rdt_domain *d);
+ struct rdt_ctrl_domain *d);
struct list_head evt_list;
unsigned long fflags;
bool cdp_capable;
@@ -218,13 +254,15 @@ int resctrl_arch_update_domains(struct rdt_resource *r, u32 closid);
* Update the ctrl_val and apply this config right now.
* Must be called on one of the domain's CPUs.
*/
-int resctrl_arch_update_one(struct rdt_resource *r, struct rdt_domain *d,
+int resctrl_arch_update_one(struct rdt_resource *r, struct rdt_ctrl_domain *d,
u32 closid, enum resctrl_conf_type t, u32 cfg_val);
-u32 resctrl_arch_get_config(struct rdt_resource *r, struct rdt_domain *d,
+u32 resctrl_arch_get_config(struct rdt_resource *r, struct rdt_ctrl_domain *d,
u32 closid, enum resctrl_conf_type type);
-int resctrl_online_domain(struct rdt_resource *r, struct rdt_domain *d);
-void resctrl_offline_domain(struct rdt_resource *r, struct rdt_domain *d);
+int resctrl_online_ctrl_domain(struct rdt_resource *r, struct rdt_ctrl_domain *d);
+int resctrl_online_mon_domain(struct rdt_resource *r, struct rdt_mon_domain *d);
+void resctrl_offline_ctrl_domain(struct rdt_resource *r, struct rdt_ctrl_domain *d);
+void resctrl_offline_mon_domain(struct rdt_resource *r, struct rdt_mon_domain *d);
void resctrl_online_cpu(unsigned int cpu);
void resctrl_offline_cpu(unsigned int cpu);
@@ -253,7 +291,7 @@ void resctrl_offline_cpu(unsigned int cpu);
* Return:
* 0 on success, or -EIO, -EINVAL etc on error.
*/
-int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_domain *d,
+int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_mon_domain *d,
u32 closid, u32 rmid, enum resctrl_event_id eventid,
u64 *val, void *arch_mon_ctx);
@@ -286,7 +324,7 @@ static inline void resctrl_arch_rmid_read_context_check(void)
*
* This can be called from any CPU.
*/
-void resctrl_arch_reset_rmid(struct rdt_resource *r, struct rdt_domain *d,
+void resctrl_arch_reset_rmid(struct rdt_resource *r, struct rdt_mon_domain *d,
u32 closid, u32 rmid,
enum resctrl_event_id eventid);
@@ -299,7 +337,7 @@ void resctrl_arch_reset_rmid(struct rdt_resource *r, struct rdt_domain *d,
*
* This can be called from any CPU.
*/
-void resctrl_arch_reset_rmid_all(struct rdt_resource *r, struct rdt_domain *d);
+void resctrl_arch_reset_rmid_all(struct rdt_resource *r, struct rdt_mon_domain *d);
extern unsigned int resctrl_rmid_realloc_threshold;
extern unsigned int resctrl_rmid_realloc_limit;
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 61591ac6eab6..33dd8d9d2b85 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -36,6 +36,7 @@
#include <linux/signal_types.h>
#include <linux/syscall_user_dispatch_types.h>
#include <linux/mm_types_task.h>
+#include <linux/netdevice_xmit.h>
#include <linux/task_io_accounting.h>
#include <linux/posix-timers_types.h>
#include <linux/restart_block.h>
@@ -53,6 +54,7 @@ struct bio_list;
struct blk_plug;
struct bpf_local_storage;
struct bpf_run_ctx;
+struct bpf_net_context;
struct capture_control;
struct cfs_rq;
struct fs_struct;
@@ -734,6 +736,12 @@ enum perf_event_task_context {
perf_nr_task_contexts,
};
+/*
+ * Number of contexts where an event can trigger:
+ * task, softirq, hardirq, nmi.
+ */
+#define PERF_NR_CONTEXTS 4
+
struct wake_q_node {
struct wake_q_node *next;
};
@@ -975,7 +983,9 @@ struct task_struct {
/* delay due to memory thrashing */
unsigned in_thrashing:1;
#endif
-
+#ifdef CONFIG_PREEMPT_RT
+ struct netdev_xmit net_xmit;
+#endif
unsigned long atomic_flags; /* Flags requiring atomic access. */
struct restart_block restart_block;
@@ -1256,6 +1266,7 @@ struct task_struct {
unsigned int futex_state;
#endif
#ifdef CONFIG_PERF_EVENTS
+ u8 perf_recursion[PERF_NR_CONTEXTS];
struct perf_event_context *perf_event_ctxp;
struct mutex perf_event_mutex;
struct list_head perf_event_list;
@@ -1506,6 +1517,8 @@ struct task_struct {
/* Used for BPF run context */
struct bpf_run_ctx *bpf_ctx;
#endif
+ /* Used by BPF for per-TASK xdp storage */
+ struct bpf_net_context *bpf_net_context;
#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
unsigned long lowest_stack;
@@ -2064,47 +2077,6 @@ extern int __cond_resched_rwlock_write(rwlock_t *lock);
__cond_resched_rwlock_write(lock); \
})
-#ifdef CONFIG_PREEMPT_DYNAMIC
-
-extern bool preempt_model_none(void);
-extern bool preempt_model_voluntary(void);
-extern bool preempt_model_full(void);
-
-#else
-
-static inline bool preempt_model_none(void)
-{
- return IS_ENABLED(CONFIG_PREEMPT_NONE);
-}
-static inline bool preempt_model_voluntary(void)
-{
- return IS_ENABLED(CONFIG_PREEMPT_VOLUNTARY);
-}
-static inline bool preempt_model_full(void)
-{
- return IS_ENABLED(CONFIG_PREEMPT);
-}
-
-#endif
-
-static inline bool preempt_model_rt(void)
-{
- return IS_ENABLED(CONFIG_PREEMPT_RT);
-}
-
-/*
- * Does the preemption model allow non-cooperative preemption?
- *
- * For !CONFIG_PREEMPT_DYNAMIC kernels this is an exact match with
- * CONFIG_PREEMPTION; for CONFIG_PREEMPT_DYNAMIC this doesn't work as the
- * kernel is *built* with CONFIG_PREEMPTION=y but may run with e.g. the
- * PREEMPT_NONE model.
- */
-static inline bool preempt_model_preemptible(void)
-{
- return preempt_model_full() || preempt_model_rt();
-}
-
static __always_inline bool need_resched(void)
{
return unlikely(tif_need_resched());
@@ -2192,13 +2164,13 @@ static inline int sched_core_idle_cpu(int cpu) { return idle_cpu(cpu); }
extern void sched_set_stop_task(int cpu, struct task_struct *stop);
#ifdef CONFIG_MEM_ALLOC_PROFILING
-static inline struct alloc_tag *alloc_tag_save(struct alloc_tag *tag)
+static __always_inline struct alloc_tag *alloc_tag_save(struct alloc_tag *tag)
{
swap(current->alloc_tag, tag);
return tag;
}
-static inline void alloc_tag_restore(struct alloc_tag *tag, struct alloc_tag *old)
+static __always_inline void alloc_tag_restore(struct alloc_tag *tag, struct alloc_tag *old)
{
#ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG
WARN(current->alloc_tag != tag, "current->alloc_tag was changed:\n");
diff --git a/include/linux/security.h b/include/linux/security.h
index 21cf70346b33..de3af33e6ff5 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -2048,7 +2048,8 @@ static inline void security_key_post_create_or_update(struct key *keyring,
#ifdef CONFIG_AUDIT
#ifdef CONFIG_SECURITY
-int security_audit_rule_init(u32 field, u32 op, char *rulestr, void **lsmrule);
+int security_audit_rule_init(u32 field, u32 op, char *rulestr, void **lsmrule,
+ gfp_t gfp);
int security_audit_rule_known(struct audit_krule *krule);
int security_audit_rule_match(u32 secid, u32 field, u32 op, void *lsmrule);
void security_audit_rule_free(void *lsmrule);
@@ -2056,7 +2057,7 @@ void security_audit_rule_free(void *lsmrule);
#else
static inline int security_audit_rule_init(u32 field, u32 op, char *rulestr,
- void **lsmrule)
+ void **lsmrule, gfp_t gfp)
{
return 0;
}
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index 8cb65f50e830..aea25eef9a1a 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -811,8 +811,7 @@ enum UART_TX_FLAGS {
if (pending < WAKEUP_CHARS) { \
uart_write_wakeup(__port); \
\
- if (!((flags) & UART_TX_NOSTOP) && pending == 0 && \
- __port->ops->tx_empty(__port)) \
+ if (!((flags) & UART_TX_NOSTOP) && pending == 0) \
__port->ops->stop_tx(__port); \
} \
\
@@ -852,6 +851,24 @@ enum UART_TX_FLAGS {
})
/**
+ * uart_port_tx_limited_flags -- transmit helper for uart_port with count limiting with flags
+ * @port: uart port
+ * @ch: variable to store a character to be written to the HW
+ * @flags: %UART_TX_NOSTOP or similar
+ * @count: a limit of characters to send
+ * @tx_ready: can HW accept more data function
+ * @put_char: function to write a character
+ * @tx_done: function to call after the loop is done
+ *
+ * See uart_port_tx_limited() for more details.
+ */
+#define uart_port_tx_limited_flags(port, ch, flags, count, tx_ready, put_char, tx_done) ({ \
+ unsigned int __count = (count); \
+ __uart_port_tx(port, ch, flags, tx_ready, put_char, tx_done, __count, \
+ __count--); \
+})
+
+/**
* uart_port_tx -- transmit helper for uart_port
* @port: uart port
* @ch: variable to store a character to be written to the HW
diff --git a/include/linux/sfp.h b/include/linux/sfp.h
index a45da7eef9a2..b14be59550e3 100644
--- a/include/linux/sfp.h
+++ b/include/linux/sfp.h
@@ -284,6 +284,12 @@ enum {
SFF8024_ID_QSFP_8438 = 0x0c,
SFF8024_ID_QSFP_8436_8636 = 0x0d,
SFF8024_ID_QSFP28_8636 = 0x11,
+ SFF8024_ID_QSFP_DD = 0x18,
+ SFF8024_ID_OSFP = 0x19,
+ SFF8024_ID_DSFP = 0x1B,
+ SFF8024_ID_QSFP_PLUS_CMIS = 0x1E,
+ SFF8024_ID_SFP_DD_CMIS = 0x1F,
+ SFF8024_ID_SFP_PLUS_CMIS = 0x20,
SFF8024_ENCODING_UNSPEC = 0x00,
SFF8024_ENCODING_8B10B = 0x01,
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 1c2902eaebd3..9c29bdd5596d 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -706,6 +706,13 @@ typedef unsigned int sk_buff_data_t;
typedef unsigned char *sk_buff_data_t;
#endif
+enum skb_tstamp_type {
+ SKB_CLOCK_REALTIME,
+ SKB_CLOCK_MONOTONIC,
+ SKB_CLOCK_TAI,
+ __SKB_CLOCK_MAX = SKB_CLOCK_TAI,
+};
+
/**
* DOC: Basic sk_buff geometry
*
@@ -823,10 +830,8 @@ typedef unsigned char *sk_buff_data_t;
* @dst_pending_confirm: need to confirm neighbour
* @decrypted: Decrypted SKB
* @slow_gro: state present at GRO time, slower prepare step required
- * @mono_delivery_time: When set, skb->tstamp has the
- * delivery_time in mono clock base (i.e. EDT). Otherwise, the
- * skb->tstamp has the (rcv) timestamp at ingress and
- * delivery_time at egress.
+ * @tstamp_type: When set, skb->tstamp has the
+ * delivery_time clock base of skb->tstamp.
* @napi_id: id of the NAPI struct this skb came from
* @sender_cpu: (aka @napi_id) source CPU in XPS
* @alloc_cpu: CPU which did the skb allocation.
@@ -954,7 +959,7 @@ struct sk_buff {
/* private: */
__u8 __mono_tc_offset[0];
/* public: */
- __u8 mono_delivery_time:1; /* See SKB_MONO_DELIVERY_TIME_MASK */
+ __u8 tstamp_type:2; /* See skb_tstamp_type */
#ifdef CONFIG_NET_XGRESS
__u8 tc_at_ingress:1; /* See TC_AT_INGRESS_MASK */
__u8 tc_skip_classify:1;
@@ -1084,15 +1089,16 @@ struct sk_buff {
#endif
#define PKT_TYPE_OFFSET offsetof(struct sk_buff, __pkt_type_offset)
-/* if you move tc_at_ingress or mono_delivery_time
+/* if you move tc_at_ingress or tstamp_type
* around, you also must adapt these constants.
*/
#ifdef __BIG_ENDIAN_BITFIELD
-#define SKB_MONO_DELIVERY_TIME_MASK (1 << 7)
-#define TC_AT_INGRESS_MASK (1 << 6)
+#define SKB_TSTAMP_TYPE_MASK (3 << 6)
+#define SKB_TSTAMP_TYPE_RSHIFT (6)
+#define TC_AT_INGRESS_MASK (1 << 5)
#else
-#define SKB_MONO_DELIVERY_TIME_MASK (1 << 0)
-#define TC_AT_INGRESS_MASK (1 << 1)
+#define SKB_TSTAMP_TYPE_MASK (3)
+#define TC_AT_INGRESS_MASK (1 << 2)
#endif
#define SKB_BF_MONO_TC_OFFSET offsetof(struct sk_buff, __mono_tc_offset)
@@ -1245,8 +1251,14 @@ static inline bool skb_data_unref(const struct sk_buff *skb,
return true;
}
-void __fix_address
-kfree_skb_reason(struct sk_buff *skb, enum skb_drop_reason reason);
+void __fix_address sk_skb_reason_drop(struct sock *sk, struct sk_buff *skb,
+ enum skb_drop_reason reason);
+
+static inline void
+kfree_skb_reason(struct sk_buff *skb, enum skb_drop_reason reason)
+{
+ sk_skb_reason_drop(NULL, skb, reason);
+}
/**
* kfree_skb - free an sk_buff with 'NOT_SPECIFIED' reason
@@ -1492,8 +1504,14 @@ __skb_set_sw_hash(struct sk_buff *skb, __u32 hash, bool is_l4)
__skb_set_hash(skb, hash, true, is_l4);
}
-void __skb_get_hash(struct sk_buff *skb);
-u32 __skb_get_hash_symmetric(const struct sk_buff *skb);
+u32 __skb_get_hash_symmetric_net(const struct net *net, const struct sk_buff *skb);
+
+static inline u32 __skb_get_hash_symmetric(const struct sk_buff *skb)
+{
+ return __skb_get_hash_symmetric_net(NULL, skb);
+}
+
+void __skb_get_hash_net(const struct net *net, struct sk_buff *skb);
u32 skb_get_poff(const struct sk_buff *skb);
u32 __skb_get_poff(const struct sk_buff *skb, const void *data,
const struct flow_keys_basic *keys, int hlen);
@@ -1572,10 +1590,18 @@ void skb_flow_dissect_hash(const struct sk_buff *skb,
struct flow_dissector *flow_dissector,
void *target_container);
+static inline __u32 skb_get_hash_net(const struct net *net, struct sk_buff *skb)
+{
+ if (!skb->l4_hash && !skb->sw_hash)
+ __skb_get_hash_net(net, skb);
+
+ return skb->hash;
+}
+
static inline __u32 skb_get_hash(struct sk_buff *skb)
{
if (!skb->l4_hash && !skb->sw_hash)
- __skb_get_hash(skb);
+ __skb_get_hash_net(NULL, skb);
return skb->hash;
}
@@ -1677,6 +1703,9 @@ int __zerocopy_sg_from_iter(struct msghdr *msg, struct sock *sk,
struct sk_buff *skb, struct iov_iter *from,
size_t length);
+int zerocopy_fill_skb_from_iter(struct sk_buff *skb,
+ struct iov_iter *from, size_t length);
+
static inline int skb_zerocopy_iter_dgram(struct sk_buff *skb,
struct msghdr *msg, int len)
{
@@ -4179,7 +4208,7 @@ static inline void skb_get_new_timestampns(const struct sk_buff *skb,
static inline void __net_timestamp(struct sk_buff *skb)
{
skb->tstamp = ktime_get_real();
- skb->mono_delivery_time = 0;
+ skb->tstamp_type = SKB_CLOCK_REALTIME;
}
static inline ktime_t net_timedelta(ktime_t t)
@@ -4188,10 +4217,36 @@ static inline ktime_t net_timedelta(ktime_t t)
}
static inline void skb_set_delivery_time(struct sk_buff *skb, ktime_t kt,
- bool mono)
+ u8 tstamp_type)
{
skb->tstamp = kt;
- skb->mono_delivery_time = kt && mono;
+
+ if (kt)
+ skb->tstamp_type = tstamp_type;
+ else
+ skb->tstamp_type = SKB_CLOCK_REALTIME;
+}
+
+static inline void skb_set_delivery_type_by_clockid(struct sk_buff *skb,
+ ktime_t kt, clockid_t clockid)
+{
+ u8 tstamp_type = SKB_CLOCK_REALTIME;
+
+ switch (clockid) {
+ case CLOCK_REALTIME:
+ break;
+ case CLOCK_MONOTONIC:
+ tstamp_type = SKB_CLOCK_MONOTONIC;
+ break;
+ case CLOCK_TAI:
+ tstamp_type = SKB_CLOCK_TAI;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ kt = 0;
+ }
+
+ skb_set_delivery_time(skb, kt, tstamp_type);
}
DECLARE_STATIC_KEY_FALSE(netstamp_needed_key);
@@ -4201,8 +4256,8 @@ DECLARE_STATIC_KEY_FALSE(netstamp_needed_key);
*/
static inline void skb_clear_delivery_time(struct sk_buff *skb)
{
- if (skb->mono_delivery_time) {
- skb->mono_delivery_time = 0;
+ if (skb->tstamp_type) {
+ skb->tstamp_type = SKB_CLOCK_REALTIME;
if (static_branch_unlikely(&netstamp_needed_key))
skb->tstamp = ktime_get_real();
else
@@ -4212,7 +4267,7 @@ static inline void skb_clear_delivery_time(struct sk_buff *skb)
static inline void skb_clear_tstamp(struct sk_buff *skb)
{
- if (skb->mono_delivery_time)
+ if (skb->tstamp_type)
return;
skb->tstamp = 0;
@@ -4220,7 +4275,7 @@ static inline void skb_clear_tstamp(struct sk_buff *skb)
static inline ktime_t skb_tstamp(const struct sk_buff *skb)
{
- if (skb->mono_delivery_time)
+ if (skb->tstamp_type)
return 0;
return skb->tstamp;
@@ -4228,7 +4283,7 @@ static inline ktime_t skb_tstamp(const struct sk_buff *skb)
static inline ktime_t skb_tstamp_cond(const struct sk_buff *skb, bool cond)
{
- if (!skb->mono_delivery_time && skb->tstamp)
+ if (skb->tstamp_type != SKB_CLOCK_MONOTONIC && skb->tstamp)
return skb->tstamp;
if (static_branch_unlikely(&netstamp_needed_key) || cond)
diff --git a/include/linux/skbuff_ref.h b/include/linux/skbuff_ref.h
index 11f0a4063403..16c241a23472 100644
--- a/include/linux/skbuff_ref.h
+++ b/include/linux/skbuff_ref.h
@@ -32,13 +32,13 @@ static inline void skb_frag_ref(struct sk_buff *skb, int f)
__skb_frag_ref(&skb_shinfo(skb)->frags[f]);
}
-bool napi_pp_put_page(struct page *page);
+bool napi_pp_put_page(netmem_ref netmem);
static inline void
skb_page_unref(struct page *page, bool recycle)
{
#ifdef CONFIG_PAGE_POOL
- if (recycle && napi_pp_put_page(page))
+ if (recycle && napi_pp_put_page(page_to_netmem(page)))
return;
#endif
put_page(page);
diff --git a/include/linux/soc/mediatek/mtk-cmdq.h b/include/linux/soc/mediatek/mtk-cmdq.h
index d4a8e34505e6..5bee6f7fc400 100644
--- a/include/linux/soc/mediatek/mtk-cmdq.h
+++ b/include/linux/soc/mediatek/mtk-cmdq.h
@@ -25,6 +25,31 @@
struct cmdq_pkt;
+enum cmdq_logic_op {
+ CMDQ_LOGIC_ASSIGN = 0,
+ CMDQ_LOGIC_ADD = 1,
+ CMDQ_LOGIC_SUBTRACT = 2,
+ CMDQ_LOGIC_MULTIPLY = 3,
+ CMDQ_LOGIC_XOR = 8,
+ CMDQ_LOGIC_NOT = 9,
+ CMDQ_LOGIC_OR = 10,
+ CMDQ_LOGIC_AND = 11,
+ CMDQ_LOGIC_LEFT_SHIFT = 12,
+ CMDQ_LOGIC_RIGHT_SHIFT = 13,
+ CMDQ_LOGIC_MAX,
+};
+
+struct cmdq_operand {
+ /* register type */
+ bool reg;
+ union {
+ /* index */
+ u16 idx;
+ /* value */
+ u16 value;
+ };
+};
+
struct cmdq_client_reg {
u8 subsys;
u16 offset;
@@ -273,6 +298,23 @@ int cmdq_pkt_poll_mask(struct cmdq_pkt *pkt, u8 subsys,
u16 offset, u32 value, u32 mask);
/**
+ * cmdq_pkt_logic_command() - Append logic command to the CMDQ packet, ask GCE to
+ * execute an instruction that store the result of logic operation
+ * with left and right operand into result_reg_idx.
+ * @pkt: the CMDQ packet
+ * @result_reg_idx: SPR index that store operation result of left_operand and right_operand
+ * @left_operand: left operand
+ * @s_op: the logic operator enum
+ * @right_operand: right operand
+ *
+ * Return: 0 for success; else the error code is returned
+ */
+int cmdq_pkt_logic_command(struct cmdq_pkt *pkt, u16 result_reg_idx,
+ struct cmdq_operand *left_operand,
+ enum cmdq_logic_op s_op,
+ struct cmdq_operand *right_operand);
+
+/**
* cmdq_pkt_assign() - Append logic assign command to the CMDQ packet, ask GCE
* to execute an instruction that set a constant value into
* internal register and use as value, mask or address in
diff --git a/include/linux/soc/qcom/llcc-qcom.h b/include/linux/soc/qcom/llcc-qcom.h
index 1a886666bbb6..9e9f528b1370 100644
--- a/include/linux/soc/qcom/llcc-qcom.h
+++ b/include/linux/soc/qcom/llcc-qcom.h
@@ -115,7 +115,8 @@ struct llcc_edac_reg_offset {
/**
* struct llcc_drv_data - Data associated with the llcc driver
* @regmaps: regmaps associated with the llcc device
- * @bcast_regmap: regmap associated with llcc broadcast offset
+ * @bcast_regmap: regmap associated with llcc broadcast OR offset
+ * @bcast_and_regmap: regmap associated with llcc broadcast AND offset
* @cfg: pointer to the data structure for slice configuration
* @edac_reg_offset: Offset of the LLCC EDAC registers
* @lock: mutex associated with each slice
@@ -129,6 +130,7 @@ struct llcc_edac_reg_offset {
struct llcc_drv_data {
struct regmap **regmaps;
struct regmap *bcast_regmap;
+ struct regmap *bcast_and_regmap;
const struct llcc_slice_config *cfg;
const struct llcc_edac_reg_offset *edac_reg_offset;
struct mutex lock;
diff --git a/include/linux/soc/qcom/smem.h b/include/linux/soc/qcom/smem.h
index a36a3b9d4929..0943bf419e11 100644
--- a/include/linux/soc/qcom/smem.h
+++ b/include/linux/soc/qcom/smem.h
@@ -13,5 +13,6 @@ int qcom_smem_get_free_space(unsigned host);
phys_addr_t qcom_smem_virt_to_phys(void *p);
int qcom_smem_get_soc_id(u32 *id);
+int qcom_smem_get_feature_code(u32 *code);
#endif
diff --git a/include/linux/soc/qcom/socinfo.h b/include/linux/soc/qcom/socinfo.h
index e78777bb0f4a..608950443eee 100644
--- a/include/linux/soc/qcom/socinfo.h
+++ b/include/linux/soc/qcom/socinfo.h
@@ -3,6 +3,8 @@
#ifndef __QCOM_SOCINFO_H__
#define __QCOM_SOCINFO_H__
+#include <linux/types.h>
+
/*
* SMEM item id, used to acquire handles to respective
* SMEM region.
@@ -12,6 +14,14 @@
#define SMEM_SOCINFO_BUILD_ID_LENGTH 32
#define SMEM_SOCINFO_CHIP_ID_LENGTH 32
+/*
+ * SoC version type with major number in the upper 16 bits and minor
+ * number in the lower 16 bits.
+ */
+#define SOCINFO_MAJOR(ver) (((ver) >> 16) & 0xffff)
+#define SOCINFO_MINOR(ver) ((ver) & 0xffff)
+#define SOCINFO_VERSION(maj, min) ((((maj) & 0xffff) << 16)|((min) & 0xffff))
+
/* Socinfo SMEM item structure */
struct socinfo {
__le32 fmt;
@@ -74,4 +84,28 @@ struct socinfo {
__le32 boot_core;
};
+/* Internal feature codes */
+enum qcom_socinfo_feature_code {
+ /* External feature codes */
+ SOCINFO_FC_UNKNOWN = 0x0,
+ SOCINFO_FC_AA,
+ SOCINFO_FC_AB,
+ SOCINFO_FC_AC,
+ SOCINFO_FC_AD,
+ SOCINFO_FC_AE,
+ SOCINFO_FC_AF,
+ SOCINFO_FC_AG,
+ SOCINFO_FC_AH,
+};
+
+/* Internal feature codes */
+/* Valid values: 0 <= n <= 0xf */
+#define SOCINFO_FC_Yn(n) (0xf1 + (n))
+#define SOCINFO_FC_INT_MAX SOCINFO_FC_Yn(0xf)
+
+/* Product codes */
+#define SOCINFO_PC_UNKNOWN 0
+#define SOCINFO_PCn(n) ((n) + 1)
+#define SOCINFO_PC_RESERVE (BIT(31) - 1)
+
#endif
diff --git a/include/linux/soc/samsung/exynos-regs-pmu.h b/include/linux/soc/samsung/exynos-regs-pmu.h
index aa840ed043e1..f411c176536d 100644
--- a/include/linux/soc/samsung/exynos-regs-pmu.h
+++ b/include/linux/soc/samsung/exynos-regs-pmu.h
@@ -657,4 +657,8 @@
#define EXYNOS5433_PAD_RETENTION_UFS_OPTION (0x3268)
#define EXYNOS5433_PAD_RETENTION_FSYSGENIO_OPTION (0x32A8)
+/* For Tensor GS101 */
+#define GS101_SYSIP_DAT0 (0x810)
+#define GS101_SYSTEM_CONFIGURATION (0x3A00)
+
#endif /* __LINUX_SOC_EXYNOS_REGS_PMU_H */
diff --git a/include/linux/socket.h b/include/linux/socket.h
index 89d16b90370b..df9cdb8bbfb8 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -76,7 +76,7 @@ struct msghdr {
__kernel_size_t msg_controllen; /* ancillary data buffer length */
struct kiocb *msg_iocb; /* ptr to iocb for async requests */
struct ubuf_info *msg_ubuf;
- int (*sg_from_iter)(struct sock *sk, struct sk_buff *skb,
+ int (*sg_from_iter)(struct sk_buff *skb,
struct iov_iter *from, size_t length);
};
@@ -442,11 +442,14 @@ extern int __sys_accept4(int fd, struct sockaddr __user *upeer_sockaddr,
extern int __sys_socket(int family, int type, int protocol);
extern struct file *__sys_socket_file(int family, int type, int protocol);
extern int __sys_bind(int fd, struct sockaddr __user *umyaddr, int addrlen);
+extern int __sys_bind_socket(struct socket *sock, struct sockaddr_storage *address,
+ int addrlen);
extern int __sys_connect_file(struct file *file, struct sockaddr_storage *addr,
int addrlen, int file_flags);
extern int __sys_connect(int fd, struct sockaddr __user *uservaddr,
int addrlen);
extern int __sys_listen(int fd, int backlog);
+extern int __sys_listen_socket(struct socket *sock, int backlog);
extern int __sys_getsockname(int fd, struct sockaddr __user *usockaddr,
int __user *usockaddr_len);
extern int __sys_getpeername(int fd, struct sockaddr __user *usockaddr,
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index e8e1e798924f..d7a16e0adf44 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -447,7 +447,6 @@ extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 ch
* @cur_msg_need_completion: Flag used internally to opportunistically skip
* the @cur_msg_completion. This flag is used to signal the context that
* is running spi_finalize_current_message() that it needs to complete()
- * @cur_msg_mapped: message has been mapped for DMA
* @fallback: fallback to PIO if DMA transfer return failure with
* SPI_TRANS_FAIL_NO_START.
* @last_cs_mode_high: was (mode & SPI_CS_HIGH) true on the last call to set_cs.
@@ -533,6 +532,9 @@ extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 ch
* @queue_empty: signal green light for opportunistically skipping the queue
* for spi_sync transfers.
* @must_async: disable all fast paths in the core
+ * @defer_optimize_message: set to true if controller cannot pre-optimize messages
+ * and needs to defer the optimization step until the message is actually
+ * being transferred
*
* Each SPI controller can communicate with one or more @spi_device
* children. These make a small bus, sharing MOSI, MISO and SCK signals
@@ -708,7 +710,6 @@ struct spi_controller {
bool running;
bool rt;
bool auto_runtime_pm;
- bool cur_msg_mapped;
bool fallback;
bool last_cs_mode_high;
s8 last_cs[SPI_CS_CNT_MAX];
@@ -776,6 +777,7 @@ struct spi_controller {
/* Flag for enabling opportunistic skipping of the queue in spi_sync */
bool queue_empty;
bool must_async;
+ bool defer_optimize_message;
};
static inline void *spi_controller_get_devdata(struct spi_controller *ctlr)
@@ -981,6 +983,8 @@ struct spi_res {
* transfer this transfer. Set to 0 if the SPI bus driver does
* not support it.
* @transfer_list: transfers are sequenced through @spi_message.transfers
+ * @tx_sg_mapped: If true, the @tx_sg is mapped for DMA
+ * @rx_sg_mapped: If true, the @rx_sg is mapped for DMA
* @tx_sg: Scatterlist for transmit, currently not for client use
* @rx_sg: Scatterlist for receive, currently not for client use
* @ptp_sts_word_pre: The word (subject to bits_per_word semantics) offset
@@ -1077,20 +1081,24 @@ struct spi_transfer {
#define SPI_TRANS_FAIL_IO BIT(1)
u16 error;
- dma_addr_t tx_dma;
- dma_addr_t rx_dma;
+ bool tx_sg_mapped;
+ bool rx_sg_mapped;
+
struct sg_table tx_sg;
struct sg_table rx_sg;
+ dma_addr_t tx_dma;
+ dma_addr_t rx_dma;
unsigned dummy_data:1;
unsigned cs_off:1;
unsigned cs_change:1;
- unsigned tx_nbits:3;
- unsigned rx_nbits:3;
+ unsigned tx_nbits:4;
+ unsigned rx_nbits:4;
unsigned timestamped:1;
#define SPI_NBITS_SINGLE 0x01 /* 1-bit transfer */
#define SPI_NBITS_DUAL 0x02 /* 2-bit transfer */
#define SPI_NBITS_QUAD 0x04 /* 4-bit transfer */
+#define SPI_NBITS_OCTAL 0x08 /* 8-bit transfer */
u8 bits_per_word;
struct spi_delay delay;
struct spi_delay cs_change_delay;
@@ -1268,6 +1276,8 @@ static inline void spi_message_free(struct spi_message *m)
extern int spi_optimize_message(struct spi_device *spi, struct spi_message *msg);
extern void spi_unoptimize_message(struct spi_message *msg);
+extern int devm_spi_optimize_message(struct device *dev, struct spi_device *spi,
+ struct spi_message *msg);
extern int spi_setup(struct spi_device *spi);
extern int spi_async(struct spi_device *spi, struct spi_message *message);
diff --git a/include/linux/spi/spi_bitbang.h b/include/linux/spi/spi_bitbang.h
index b930eca2ef7b..d4cb83195f7a 100644
--- a/include/linux/spi/spi_bitbang.h
+++ b/include/linux/spi/spi_bitbang.h
@@ -4,6 +4,8 @@
#include <linux/workqueue.h>
+typedef u32 (*spi_bb_txrx_word_fn)(struct spi_device *, unsigned int, u32, u8, unsigned int);
+
struct spi_bitbang {
struct mutex lock;
u8 busy;
@@ -28,9 +30,8 @@ struct spi_bitbang {
int (*txrx_bufs)(struct spi_device *spi, struct spi_transfer *t);
/* txrx_word[SPI_MODE_*]() just looks like a shift register */
- u32 (*txrx_word[4])(struct spi_device *spi,
- unsigned nsecs,
- u32 word, u8 bits, unsigned flags);
+ spi_bb_txrx_word_fn txrx_word[SPI_MODE_X_MASK + 1];
+
int (*set_line_direction)(struct spi_device *spi, bool output);
};
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 3fcd20de6ca8..63dd8cf3c3c2 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -462,11 +462,10 @@ static __always_inline int spin_is_contended(spinlock_t *lock)
*/
static inline int spin_needbreak(spinlock_t *lock)
{
-#ifdef CONFIG_PREEMPTION
+ if (!preempt_model_preemptible())
+ return 0;
+
return spin_is_contended(lock);
-#else
- return 0;
-#endif
}
/*
@@ -479,11 +478,10 @@ static inline int spin_needbreak(spinlock_t *lock)
*/
static inline int rwlock_needbreak(rwlock_t *lock)
{
-#ifdef CONFIG_PREEMPTION
+ if (!preempt_model_preemptible())
+ return 0;
+
return rwlock_is_contended(lock);
-#else
- return 0;
-#endif
}
/*
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index 236610e4a8fa..6f6cb5fc1242 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -57,10 +57,45 @@ void cleanup_srcu_struct(struct srcu_struct *ssp);
int __srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp);
void __srcu_read_unlock(struct srcu_struct *ssp, int idx) __releases(ssp);
void synchronize_srcu(struct srcu_struct *ssp);
+
+#define SRCU_GET_STATE_COMPLETED 0x1
+
+/**
+ * get_completed_synchronize_srcu - Return a pre-completed polled state cookie
+ *
+ * Returns a value that poll_state_synchronize_srcu() will always treat
+ * as a cookie whose grace period has already completed.
+ */
+static inline unsigned long get_completed_synchronize_srcu(void)
+{
+ return SRCU_GET_STATE_COMPLETED;
+}
+
unsigned long get_state_synchronize_srcu(struct srcu_struct *ssp);
unsigned long start_poll_synchronize_srcu(struct srcu_struct *ssp);
bool poll_state_synchronize_srcu(struct srcu_struct *ssp, unsigned long cookie);
+// Maximum number of unsigned long values corresponding to
+// not-yet-completed SRCU grace periods.
+#define NUM_ACTIVE_SRCU_POLL_OLDSTATE 2
+
+/**
+ * same_state_synchronize_srcu - Are two old-state values identical?
+ * @oldstate1: First old-state value.
+ * @oldstate2: Second old-state value.
+ *
+ * The two old-state values must have been obtained from either
+ * get_state_synchronize_srcu(), start_poll_synchronize_srcu(), or
+ * get_completed_synchronize_srcu(). Returns @true if the two values are
+ * identical and @false otherwise. This allows structures whose lifetimes
+ * are tracked by old-state values to push these values to a list header,
+ * allowing those structures to be slightly smaller.
+ */
+static inline bool same_state_synchronize_srcu(unsigned long oldstate1, unsigned long oldstate2)
+{
+ return oldstate1 == oldstate2;
+}
+
#ifdef CONFIG_NEED_SRCU_NMI_SAFE
int __srcu_read_lock_nmisafe(struct srcu_struct *ssp) __acquires(ssp);
void __srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx) __releases(ssp);
diff --git a/include/linux/stat.h b/include/linux/stat.h
index bf92441dbad2..3d900c86981c 100644
--- a/include/linux/stat.h
+++ b/include/linux/stat.h
@@ -54,6 +54,9 @@ struct kstat {
u32 dio_offset_align;
u64 change_cookie;
u64 subvol;
+ u32 atomic_write_unit_min;
+ u32 atomic_write_unit_max;
+ u32 atomic_write_segments_max;
};
/* These definitions are internal to the kernel for now. Mainly used by nfsd. */
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index f92c195c76ed..84e13bd5df28 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -13,7 +13,7 @@
#define __STMMAC_PLATFORM_DATA
#include <linux/platform_device.h>
-#include <linux/phy.h>
+#include <linux/phylink.h>
#define MTL_MAX_RX_QUEUES 8
#define MTL_MAX_TX_QUEUES 8
@@ -82,8 +82,8 @@ struct stmmac_priv;
struct stmmac_mdio_bus_data {
unsigned int phy_mask;
- unsigned int has_xpcs;
- unsigned int xpcs_an_inband;
+ unsigned int pcs_mask;
+ unsigned int default_an_inband;
int *irqs;
int probed_phy_irq;
bool needs_reset;
@@ -271,6 +271,8 @@ struct plat_stmmacenet_data {
void (*dump_debug_regs)(void *priv);
int (*pcs_init)(struct stmmac_priv *priv);
void (*pcs_exit)(struct stmmac_priv *priv);
+ struct phylink_pcs *(*select_pcs)(struct stmmac_priv *priv,
+ phy_interface_t interface);
void *bsp_priv;
struct clk *stmmac_clk;
struct clk *pclk;
diff --git a/include/linux/string.h b/include/linux/string.h
index 60168aa2af07..9edace076ddb 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -289,7 +289,7 @@ extern void *kmemdup_noprof(const void *src, size_t len, gfp_t gfp) __realloc_si
extern void *kvmemdup(const void *src, size_t len, gfp_t gfp) __realloc_size(2);
extern char *kmemdup_nul(const char *s, size_t len, gfp_t gfp);
-extern void *kmemdup_array(const void *src, size_t element_size, size_t count, gfp_t gfp)
+extern void *kmemdup_array(const void *src, size_t count, size_t element_size, gfp_t gfp)
__realloc_size(2, 3);
/* lib/argv_split.c */
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index 23617da0e565..a7d0406b9ef5 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -85,6 +85,7 @@ struct svc_serv {
char * sv_name; /* service name */
unsigned int sv_nrpools; /* number of thread pools */
+ bool sv_is_pooled; /* is this a pooled service? */
struct svc_pool * sv_pools; /* array of thread pools */
int (*sv_threadfn)(void *data);
@@ -398,6 +399,8 @@ struct svc_procedure {
/*
* Function prototypes.
*/
+int sunrpc_set_pool_mode(const char *val);
+int sunrpc_get_pool_mode(char *val, size_t size);
int svc_rpcb_setup(struct svc_serv *serv, struct net *net);
void svc_rpcb_cleanup(struct svc_serv *serv, struct net *net);
int svc_bind(struct svc_serv *serv, struct net *net);
diff --git a/include/linux/swap.h b/include/linux/swap.h
index bd450023b9a4..e685e93ba354 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -354,7 +354,8 @@ static inline swp_entry_t page_swap_entry(struct page *page)
}
/* linux/mm/workingset.c */
-bool workingset_test_recent(void *shadow, bool file, bool *workingset);
+bool workingset_test_recent(void *shadow, bool file, bool *workingset,
+ bool flush);
void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages);
void *workingset_eviction(struct folio *folio, struct mem_cgroup *target_memcg);
void workingset_refault(struct folio *folio, void *shadow);
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 9104952d323d..fff820c3e93e 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -322,13 +322,13 @@ asmlinkage long sys_io_pgetevents(aio_context_t ctx_id,
long nr,
struct io_event __user *events,
struct __kernel_timespec __user *timeout,
- const struct __aio_sigset *sig);
+ const struct __aio_sigset __user *sig);
asmlinkage long sys_io_pgetevents_time32(aio_context_t ctx_id,
long min_nr,
long nr,
struct io_event __user *events,
struct old_timespec32 __user *timeout,
- const struct __aio_sigset *sig);
+ const struct __aio_sigset __user *sig);
asmlinkage long sys_io_uring_setup(u32 entries,
struct io_uring_params __user *p);
asmlinkage long sys_io_uring_enter(unsigned int fd, u32 to_submit,
@@ -418,7 +418,7 @@ asmlinkage long sys_listmount(const struct mnt_id_req __user *req,
u64 __user *mnt_ids, size_t nr_mnt_ids,
unsigned int flags);
asmlinkage long sys_truncate(const char __user *path, long length);
-asmlinkage long sys_ftruncate(unsigned int fd, unsigned long length);
+asmlinkage long sys_ftruncate(unsigned int fd, off_t length);
#if BITS_PER_LONG == 32
asmlinkage long sys_truncate64(const char __user *path, loff_t length);
asmlinkage long sys_ftruncate64(unsigned int fd, loff_t length);
@@ -441,7 +441,7 @@ asmlinkage long sys_fchown(unsigned int fd, uid_t user, gid_t group);
asmlinkage long sys_openat(int dfd, const char __user *filename, int flags,
umode_t mode);
asmlinkage long sys_openat2(int dfd, const char __user *filename,
- struct open_how *how, size_t size);
+ struct open_how __user *how, size_t size);
asmlinkage long sys_close(unsigned int fd);
asmlinkage long sys_close_range(unsigned int fd, unsigned int max_fd,
unsigned int flags);
@@ -555,7 +555,7 @@ asmlinkage long sys_get_robust_list(int pid,
asmlinkage long sys_set_robust_list(struct robust_list_head __user *head,
size_t len);
-asmlinkage long sys_futex_waitv(struct futex_waitv *waiters,
+asmlinkage long sys_futex_waitv(struct futex_waitv __user *waiters,
unsigned int nr_futexes, unsigned int flags,
struct __kernel_timespec __user *timeout, clockid_t clockid);
@@ -859,9 +859,15 @@ asmlinkage long sys_prlimit64(pid_t pid, unsigned int resource,
const struct rlimit64 __user *new_rlim,
struct rlimit64 __user *old_rlim);
asmlinkage long sys_fanotify_init(unsigned int flags, unsigned int event_f_flags);
+#if defined(CONFIG_ARCH_SPLIT_ARG64)
+asmlinkage long sys_fanotify_mark(int fanotify_fd, unsigned int flags,
+ unsigned int mask_1, unsigned int mask_2,
+ int dfd, const char __user * pathname);
+#else
asmlinkage long sys_fanotify_mark(int fanotify_fd, unsigned int flags,
u64 mask, int fd,
const char __user *pathname);
+#endif
asmlinkage long sys_name_to_handle_at(int dfd, const char __user *name,
struct file_handle __user *handle,
int __user *mnt_id, int flag);
@@ -907,7 +913,7 @@ asmlinkage long sys_seccomp(unsigned int op, unsigned int flags,
asmlinkage long sys_getrandom(char __user *buf, size_t count,
unsigned int flags);
asmlinkage long sys_memfd_create(const char __user *uname_ptr, unsigned int flags);
-asmlinkage long sys_bpf(int cmd, union bpf_attr *attr, unsigned int size);
+asmlinkage long sys_bpf(int cmd, union bpf_attr __user *attr, unsigned int size);
asmlinkage long sys_execveat(int dfd, const char __user *filename,
const char __user *const __user *argv,
const char __user *const __user *envp, int flags);
@@ -960,11 +966,11 @@ asmlinkage long sys_cachestat(unsigned int fd,
struct cachestat_range __user *cstat_range,
struct cachestat __user *cstat, unsigned int flags);
asmlinkage long sys_map_shadow_stack(unsigned long addr, unsigned long size, unsigned int flags);
-asmlinkage long sys_lsm_get_self_attr(unsigned int attr, struct lsm_ctx *ctx,
- u32 *size, u32 flags);
-asmlinkage long sys_lsm_set_self_attr(unsigned int attr, struct lsm_ctx *ctx,
+asmlinkage long sys_lsm_get_self_attr(unsigned int attr, struct lsm_ctx __user *ctx,
+ u32 __user *size, u32 flags);
+asmlinkage long sys_lsm_set_self_attr(unsigned int attr, struct lsm_ctx __user *ctx,
u32 size, u32 flags);
-asmlinkage long sys_lsm_list_modules(u64 *ids, u32 *size, u32 flags);
+asmlinkage long sys_lsm_list_modules(u64 __user *ids, u32 __user *size, u32 flags);
/*
* Architecture-specific system calls
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index 09db2f2e6488..54fbec062772 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -237,7 +237,7 @@ extern struct ctl_table_header *register_sysctl_mount_point(const char *path);
void do_sysctl_args(void);
bool sysctl_is_alias(char *param);
-int do_proc_douintvec(struct ctl_table *table, int write,
+int do_proc_douintvec(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos,
int (*conv)(unsigned long *lvalp,
unsigned int *valp,
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
index a7d725fbf739..c4e64dc11206 100644
--- a/include/linux/sysfs.h
+++ b/include/linux/sysfs.h
@@ -750,6 +750,15 @@ static inline int sysfs_emit_at(char *buf, int at, const char *fmt, ...)
{
return 0;
}
+
+static inline ssize_t sysfs_bin_attr_simple_read(struct file *file,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return 0;
+}
#endif /* CONFIG_SYSFS */
static inline int __must_check sysfs_create_file(struct kobject *kobj,
diff --git a/include/linux/t10-pi.h b/include/linux/t10-pi.h
index 248f4ac95642..2c59fe3efcd4 100644
--- a/include/linux/t10-pi.h
+++ b/include/linux/t10-pi.h
@@ -41,18 +41,12 @@ static inline u32 t10_pi_ref_tag(struct request *rq)
{
unsigned int shift = ilog2(queue_logical_block_size(rq->q));
-#ifdef CONFIG_BLK_DEV_INTEGRITY
- if (rq->q->integrity.interval_exp)
- shift = rq->q->integrity.interval_exp;
-#endif
+ if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) &&
+ rq->q->limits.integrity.interval_exp)
+ shift = rq->q->limits.integrity.interval_exp;
return blk_rq_pos(rq) >> (shift - SECTOR_SHIFT) & 0xffffffff;
}
-extern const struct blk_integrity_profile t10_pi_type1_crc;
-extern const struct blk_integrity_profile t10_pi_type1_ip;
-extern const struct blk_integrity_profile t10_pi_type3_crc;
-extern const struct blk_integrity_profile t10_pi_type3_ip;
-
struct crc64_pi_tuple {
__be64 guard_tag;
__be16 app_tag;
@@ -72,14 +66,10 @@ static inline u64 ext_pi_ref_tag(struct request *rq)
{
unsigned int shift = ilog2(queue_logical_block_size(rq->q));
-#ifdef CONFIG_BLK_DEV_INTEGRITY
- if (rq->q->integrity.interval_exp)
- shift = rq->q->integrity.interval_exp;
-#endif
+ if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) &&
+ rq->q->limits.integrity.interval_exp)
+ shift = rq->q->limits.integrity.interval_exp;
return lower_48_bits(blk_rq_pos(rq) >> (shift - SECTOR_SHIFT));
}
-extern const struct blk_integrity_profile ext_pi_type1_crc64;
-extern const struct blk_integrity_profile ext_pi_type3_crc64;
-
#endif
diff --git a/include/linux/task_work.h b/include/linux/task_work.h
index 795ef5a68429..cf5e7e891a77 100644
--- a/include/linux/task_work.h
+++ b/include/linux/task_work.h
@@ -18,6 +18,7 @@ enum task_work_notify_mode {
TWA_RESUME,
TWA_SIGNAL,
TWA_SIGNAL_NO_IPI,
+ TWA_NMI_CURRENT,
};
static inline bool task_work_pending(struct task_struct *task)
@@ -30,7 +31,8 @@ int task_work_add(struct task_struct *task, struct callback_head *twork,
struct callback_head *task_work_cancel_match(struct task_struct *task,
bool (*match)(struct callback_head *, void *data), void *data);
-struct callback_head *task_work_cancel(struct task_struct *, task_work_func_t);
+struct callback_head *task_work_cancel_func(struct task_struct *, task_work_func_t);
+bool task_work_cancel(struct task_struct *task, struct callback_head *cb);
void task_work_run(void);
static inline void exit_task_work(struct task_struct *task)
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index f1155c0439c4..25fbf960b474 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -79,6 +79,9 @@ struct thermal_trip {
#define THERMAL_TRIP_FLAG_RW (THERMAL_TRIP_FLAG_RW_TEMP | \
THERMAL_TRIP_FLAG_RW_HYST)
+#define THERMAL_TRIP_PRIV_TO_INT(_val_) (uintptr_t)(_val_)
+#define THERMAL_INT_TO_TRIP_PRIV(_val_) (void *)(uintptr_t)(_val_)
+
struct thermal_zone_device;
struct thermal_zone_device_ops {
@@ -90,7 +93,8 @@ struct thermal_zone_device_ops {
int (*set_trips) (struct thermal_zone_device *, int, int);
int (*change_mode) (struct thermal_zone_device *,
enum thermal_device_mode);
- int (*set_trip_temp) (struct thermal_zone_device *, int, int);
+ int (*set_trip_temp) (struct thermal_zone_device *,
+ const struct thermal_trip *, int);
int (*get_crit_temp) (struct thermal_zone_device *, int *);
int (*set_emul_temp) (struct thermal_zone_device *, int);
int (*get_trend) (struct thermal_zone_device *,
@@ -198,8 +202,6 @@ static inline void devm_thermal_of_zone_unregister(struct device *dev,
}
#endif
-int __thermal_zone_get_trip(struct thermal_zone_device *tz, int trip_id,
- struct thermal_trip *trip);
int thermal_zone_get_trip(struct thermal_zone_device *tz, int trip_id,
struct thermal_trip *trip);
int for_each_thermal_trip(struct thermal_zone_device *tz,
@@ -221,7 +223,8 @@ struct thermal_zone_device *thermal_zone_device_register_with_trips(
int num_trips, void *devdata,
const struct thermal_zone_device_ops *ops,
const struct thermal_zone_params *tzp,
- int passive_delay, int polling_delay);
+ unsigned int passive_delay,
+ unsigned int polling_delay);
struct thermal_zone_device *thermal_tripless_zone_device_register(
const char *type,
@@ -261,7 +264,7 @@ thermal_of_cooling_device_register(struct device_node *np, const char *, void *,
struct thermal_cooling_device *
devm_thermal_of_cooling_device_register(struct device *dev,
struct device_node *np,
- char *type, void *devdata,
+ const char *type, void *devdata,
const struct thermal_cooling_device_ops *ops);
void thermal_cooling_device_update(struct thermal_cooling_device *);
void thermal_cooling_device_unregister(struct thermal_cooling_device *);
@@ -269,6 +272,9 @@ struct thermal_zone_device *thermal_zone_get_zone_by_name(const char *name);
int thermal_zone_get_temp(struct thermal_zone_device *tz, int *temp);
int thermal_zone_get_slope(struct thermal_zone_device *tz);
int thermal_zone_get_offset(struct thermal_zone_device *tz);
+bool thermal_trip_is_bound_to_cdev(struct thermal_zone_device *tz,
+ const struct thermal_trip *trip,
+ struct thermal_cooling_device *cdev);
int thermal_zone_device_enable(struct thermal_zone_device *tz);
int thermal_zone_device_disable(struct thermal_zone_device *tz);
@@ -305,7 +311,7 @@ thermal_of_cooling_device_register(struct device_node *np,
static inline struct thermal_cooling_device *
devm_thermal_of_cooling_device_register(struct device *dev,
struct device_node *np,
- char *type, void *devdata,
+ const char *type, void *devdata,
const struct thermal_cooling_device_ops *ops)
{
return ERR_PTR(-ENODEV);
diff --git a/include/linux/tick.h b/include/linux/tick.h
index 4924a33700b7..72744638c5b0 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -139,7 +139,6 @@ extern void tick_nohz_irq_exit(void);
extern bool tick_nohz_idle_got_tick(void);
extern ktime_t tick_nohz_get_next_hrtimer(void);
extern ktime_t tick_nohz_get_sleep_length(ktime_t *delta_next);
-extern unsigned long tick_nohz_get_idle_calls(void);
extern unsigned long tick_nohz_get_idle_calls_cpu(int cpu);
extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time);
extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time);
diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h
index 0ea7823b7f31..fc12a9ba2c88 100644
--- a/include/linux/timekeeping.h
+++ b/include/linux/timekeeping.h
@@ -310,12 +310,18 @@ struct system_device_crosststamp {
* timekeeping code to verify comparability of two cycle values.
* The default ID, CSID_GENERIC, does not identify a specific
* clocksource.
+ * @use_nsecs: @cycles is in nanoseconds.
*/
struct system_counterval_t {
u64 cycles;
enum clocksource_ids cs_id;
+ bool use_nsecs;
};
+extern bool ktime_real_to_base_clock(ktime_t treal,
+ enum clocksource_ids base_id, u64 *cycles);
+extern bool timekeeping_clocksource_has_base(enum clocksource_ids id);
+
/*
* Get cross timestamp between system clock and device clock
*/
diff --git a/include/linux/tpm.h b/include/linux/tpm.h
index c17e4efbb2e5..e93ee8d936a9 100644
--- a/include/linux/tpm.h
+++ b/include/linux/tpm.h
@@ -394,21 +394,6 @@ enum tpm2_object_attributes {
TPM2_OA_SIGN = BIT(18),
};
-/*
- * definitions for the canonical template. These are mandated
- * by the TCG key template documents
- */
-
-#define AES_KEY_BYTES AES_KEYSIZE_128
-#define AES_KEY_BITS (AES_KEY_BYTES*8)
-#define TPM2_OA_TMPL (TPM2_OA_NO_DA | \
- TPM2_OA_FIXED_TPM | \
- TPM2_OA_FIXED_PARENT | \
- TPM2_OA_SENSITIVE_DATA_ORIGIN | \
- TPM2_OA_USER_WITH_AUTH | \
- TPM2_OA_DECRYPT | \
- TPM2_OA_RESTRICTED)
-
enum tpm2_session_attributes {
TPM2_SA_CONTINUE_SESSION = BIT(0),
TPM2_SA_AUDIT_EXCLUSIVE = BIT(1),
@@ -437,8 +422,6 @@ u8 tpm_buf_read_u8(struct tpm_buf *buf, off_t *offset);
u16 tpm_buf_read_u16(struct tpm_buf *buf, off_t *offset);
u32 tpm_buf_read_u32(struct tpm_buf *buf, off_t *offset);
-u8 *tpm_buf_parameters(struct tpm_buf *buf);
-
/*
* Check if TPM device is in the firmware upgrade mode.
*/
@@ -507,9 +490,16 @@ static inline void tpm_buf_append_empty_auth(struct tpm_buf *buf, u32 handle)
{
}
#endif
+
+static inline struct tpm2_auth *tpm2_chip_auth(struct tpm_chip *chip)
+{
#ifdef CONFIG_TCG_TPM2_HMAC
+ return chip->auth;
+#else
+ return NULL;
+#endif
+}
-int tpm2_start_auth_session(struct tpm_chip *chip);
void tpm_buf_append_name(struct tpm_chip *chip, struct tpm_buf *buf,
u32 handle, u8 *name);
void tpm_buf_append_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf,
@@ -521,9 +511,27 @@ static inline void tpm_buf_append_hmac_session_opt(struct tpm_chip *chip,
u8 *passphrase,
int passphraselen)
{
- tpm_buf_append_hmac_session(chip, buf, attributes, passphrase,
- passphraselen);
+ struct tpm_header *head;
+ int offset;
+
+ if (tpm2_chip_auth(chip)) {
+ tpm_buf_append_hmac_session(chip, buf, attributes, passphrase, passphraselen);
+ } else {
+ offset = buf->handles * 4 + TPM_HEADER_SIZE;
+ head = (struct tpm_header *)buf->data;
+
+ /*
+ * If the only sessions are optional, the command tag must change to
+ * TPM2_ST_NO_SESSIONS.
+ */
+ if (tpm_buf_length(buf) == offset)
+ head->tag = cpu_to_be16(TPM2_ST_NO_SESSIONS);
+ }
}
+
+#ifdef CONFIG_TCG_TPM2_HMAC
+
+int tpm2_start_auth_session(struct tpm_chip *chip);
void tpm_buf_fill_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf);
int tpm_buf_check_hmac_response(struct tpm_chip *chip, struct tpm_buf *buf,
int rc);
@@ -538,56 +546,6 @@ static inline int tpm2_start_auth_session(struct tpm_chip *chip)
static inline void tpm2_end_auth_session(struct tpm_chip *chip)
{
}
-static inline void tpm_buf_append_name(struct tpm_chip *chip,
- struct tpm_buf *buf,
- u32 handle, u8 *name)
-{
- tpm_buf_append_u32(buf, handle);
- /* count the number of handles in the upper bits of flags */
- buf->handles++;
-}
-static inline void tpm_buf_append_hmac_session(struct tpm_chip *chip,
- struct tpm_buf *buf,
- u8 attributes, u8 *passphrase,
- int passphraselen)
-{
- /* offset tells us where the sessions area begins */
- int offset = buf->handles * 4 + TPM_HEADER_SIZE;
- u32 len = 9 + passphraselen;
-
- if (tpm_buf_length(buf) != offset) {
- /* not the first session so update the existing length */
- len += get_unaligned_be32(&buf->data[offset]);
- put_unaligned_be32(len, &buf->data[offset]);
- } else {
- tpm_buf_append_u32(buf, len);
- }
- /* auth handle */
- tpm_buf_append_u32(buf, TPM2_RS_PW);
- /* nonce */
- tpm_buf_append_u16(buf, 0);
- /* attributes */
- tpm_buf_append_u8(buf, 0);
- /* passphrase */
- tpm_buf_append_u16(buf, passphraselen);
- tpm_buf_append(buf, passphrase, passphraselen);
-}
-static inline void tpm_buf_append_hmac_session_opt(struct tpm_chip *chip,
- struct tpm_buf *buf,
- u8 attributes,
- u8 *passphrase,
- int passphraselen)
-{
- int offset = buf->handles * 4 + TPM_HEADER_SIZE;
- struct tpm_header *head = (struct tpm_header *) buf->data;
-
- /*
- * if the only sessions are optional, the command tag
- * must change to TPM2_ST_NO_SESSIONS
- */
- if (tpm_buf_length(buf) == offset)
- head->tag = cpu_to_be16(TPM2_ST_NO_SESSIONS);
-}
static inline void tpm_buf_fill_hmac_session(struct tpm_chip *chip,
struct tpm_buf *buf)
{
diff --git a/include/linux/tsm.h b/include/linux/tsm.h
index de8324a2223c..11b0c525be30 100644
--- a/include/linux/tsm.h
+++ b/include/linux/tsm.h
@@ -4,6 +4,7 @@
#include <linux/sizes.h>
#include <linux/types.h>
+#include <linux/uuid.h>
#define TSM_INBLOB_MAX 64
#define TSM_OUTBLOB_MAX SZ_32K
@@ -19,11 +20,17 @@
* @privlevel: optional privilege level to associate with @outblob
* @inblob_len: sizeof @inblob
* @inblob: arbitrary input data
+ * @service_provider: optional name of where to obtain the tsm report blob
+ * @service_guid: optional service-provider service guid to attest
+ * @service_manifest_version: optional service-provider service manifest version requested
*/
struct tsm_desc {
unsigned int privlevel;
size_t inblob_len;
u8 inblob[TSM_INBLOB_MAX];
+ char *service_provider;
+ guid_t service_guid;
+ unsigned int service_manifest_version;
};
/**
@@ -33,6 +40,8 @@ struct tsm_desc {
* @outblob: generated evidence to provider to the attestation agent
* @auxblob_len: sizeof(@auxblob)
* @auxblob: (optional) auxiliary data to the report (e.g. certificate data)
+ * @manifestblob_len: sizeof(@manifestblob)
+ * @manifestblob: (optional) manifest data associated with the report
*/
struct tsm_report {
struct tsm_desc desc;
@@ -40,6 +49,42 @@ struct tsm_report {
u8 *outblob;
size_t auxblob_len;
u8 *auxblob;
+ size_t manifestblob_len;
+ u8 *manifestblob;
+};
+
+/**
+ * enum tsm_attr_index - index used to reference report attributes
+ * @TSM_REPORT_GENERATION: index of the report generation number attribute
+ * @TSM_REPORT_PROVIDER: index of the provider name attribute
+ * @TSM_REPORT_PRIVLEVEL: index of the desired privilege level attribute
+ * @TSM_REPORT_PRIVLEVEL_FLOOR: index of the minimum allowed privileg level attribute
+ * @TSM_REPORT_SERVICE_PROVIDER: index of the service provider identifier attribute
+ * @TSM_REPORT_SERVICE_GUID: index of the service GUID attribute
+ * @TSM_REPORT_SERVICE_MANIFEST_VER: index of the service manifest version attribute
+ */
+enum tsm_attr_index {
+ TSM_REPORT_GENERATION,
+ TSM_REPORT_PROVIDER,
+ TSM_REPORT_PRIVLEVEL,
+ TSM_REPORT_PRIVLEVEL_FLOOR,
+ TSM_REPORT_SERVICE_PROVIDER,
+ TSM_REPORT_SERVICE_GUID,
+ TSM_REPORT_SERVICE_MANIFEST_VER,
+};
+
+/**
+ * enum tsm_bin_attr_index - index used to reference binary report attributes
+ * @TSM_REPORT_INBLOB: index of the binary report input attribute
+ * @TSM_REPORT_OUTBLOB: index of the binary report output attribute
+ * @TSM_REPORT_AUXBLOB: index of the binary auxiliary data attribute
+ * @TSM_REPORT_MANIFESTBLOB: index of the binary manifest data attribute
+ */
+enum tsm_bin_attr_index {
+ TSM_REPORT_INBLOB,
+ TSM_REPORT_OUTBLOB,
+ TSM_REPORT_AUXBLOB,
+ TSM_REPORT_MANIFESTBLOB,
};
/**
@@ -48,22 +93,20 @@ struct tsm_report {
* @privlevel_floor: convey base privlevel for nested scenarios
* @report_new: Populate @report with the report blob and auxblob
* (optional), return 0 on successful population, or -errno otherwise
+ * @report_attr_visible: show or hide a report attribute entry
+ * @report_bin_attr_visible: show or hide a report binary attribute entry
*
* Implementation specific ops, only one is expected to be registered at
* a time i.e. only one of "sev-guest", "tdx-guest", etc.
*/
struct tsm_ops {
const char *name;
- const unsigned int privlevel_floor;
+ unsigned int privlevel_floor;
int (*report_new)(struct tsm_report *report, void *data);
+ bool (*report_attr_visible)(int n);
+ bool (*report_bin_attr_visible)(int n);
};
-extern const struct config_item_type tsm_report_default_type;
-
-/* publish @privlevel, @privlevel_floor, and @auxblob attributes */
-extern const struct config_item_type tsm_report_extra_type;
-
-int tsm_register(const struct tsm_ops *ops, void *priv,
- const struct config_item_type *type);
+int tsm_register(const struct tsm_ops *ops, void *priv);
int tsm_unregister(const struct tsm_ops *ops);
#endif /* __TSM_H */
diff --git a/include/linux/turris-omnia-mcu-interface.h b/include/linux/turris-omnia-mcu-interface.h
new file mode 100644
index 000000000000..2da8cbeb158a
--- /dev/null
+++ b/include/linux/turris-omnia-mcu-interface.h
@@ -0,0 +1,249 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * CZ.NIC's Turris Omnia MCU I2C interface commands definitions
+ *
+ * 2024 by Marek Behún <kabel@kernel.org>
+ */
+
+#ifndef __TURRIS_OMNIA_MCU_INTERFACE_H
+#define __TURRIS_OMNIA_MCU_INTERFACE_H
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+
+enum omnia_commands_e {
+ OMNIA_CMD_GET_STATUS_WORD = 0x01, /* slave sends status word back */
+ OMNIA_CMD_GENERAL_CONTROL = 0x02,
+ OMNIA_CMD_LED_MODE = 0x03, /* default/user */
+ OMNIA_CMD_LED_STATE = 0x04, /* LED on/off */
+ OMNIA_CMD_LED_COLOR = 0x05, /* LED number + RED + GREEN + BLUE */
+ OMNIA_CMD_USER_VOLTAGE = 0x06,
+ OMNIA_CMD_SET_BRIGHTNESS = 0x07,
+ OMNIA_CMD_GET_BRIGHTNESS = 0x08,
+ OMNIA_CMD_GET_RESET = 0x09,
+ OMNIA_CMD_GET_FW_VERSION_APP = 0x0A, /* 20B git hash number */
+ OMNIA_CMD_SET_WATCHDOG_STATE = 0x0B, /* 0 - disable
+ * 1 - enable / ping
+ * after boot watchdog is started
+ * with 2 minutes timeout
+ */
+
+ /* OMNIA_CMD_WATCHDOG_STATUS = 0x0C, not implemented anymore */
+
+ OMNIA_CMD_GET_WATCHDOG_STATE = 0x0D,
+ OMNIA_CMD_GET_FW_VERSION_BOOT = 0x0E, /* 20B Git hash number */
+ OMNIA_CMD_GET_FW_CHECKSUM = 0x0F, /* 4B length, 4B checksum */
+
+ /* available if FEATURES_SUPPORTED bit set in status word */
+ OMNIA_CMD_GET_FEATURES = 0x10,
+
+ /* available if EXT_CMD bit set in features */
+ OMNIA_CMD_GET_EXT_STATUS_DWORD = 0x11,
+ OMNIA_CMD_EXT_CONTROL = 0x12,
+ OMNIA_CMD_GET_EXT_CONTROL_STATUS = 0x13,
+
+ /* available if NEW_INT_API bit set in features */
+ OMNIA_CMD_GET_INT_AND_CLEAR = 0x14,
+ OMNIA_CMD_GET_INT_MASK = 0x15,
+ OMNIA_CMD_SET_INT_MASK = 0x16,
+
+ /* available if FLASHING bit set in features */
+ OMNIA_CMD_FLASH = 0x19,
+
+ /* available if WDT_PING bit set in features */
+ OMNIA_CMD_SET_WDT_TIMEOUT = 0x20,
+ OMNIA_CMD_GET_WDT_TIMELEFT = 0x21,
+
+ /* available if POWEROFF_WAKEUP bit set in features */
+ OMNIA_CMD_SET_WAKEUP = 0x22,
+ OMNIA_CMD_GET_UPTIME_AND_WAKEUP = 0x23,
+ OMNIA_CMD_POWER_OFF = 0x24,
+
+ /* available if USB_OVC_PROT_SETTING bit set in features */
+ OMNIA_CMD_SET_USB_OVC_PROT = 0x25,
+ OMNIA_CMD_GET_USB_OVC_PROT = 0x26,
+
+ /* available if TRNG bit set in features */
+ OMNIA_CMD_TRNG_COLLECT_ENTROPY = 0x28,
+
+ /* available if CRYPTO bit set in features */
+ OMNIA_CMD_CRYPTO_GET_PUBLIC_KEY = 0x29,
+ OMNIA_CMD_CRYPTO_SIGN_MESSAGE = 0x2A,
+ OMNIA_CMD_CRYPTO_COLLECT_SIGNATURE = 0x2B,
+
+ /* available if BOARD_INFO it set in features */
+ OMNIA_CMD_BOARD_INFO_GET = 0x2C,
+ OMNIA_CMD_BOARD_INFO_BURN = 0x2D,
+
+ /* available only at address 0x2b (LED-controller) */
+ /* available only if LED_GAMMA_CORRECTION bit set in features */
+ OMNIA_CMD_SET_GAMMA_CORRECTION = 0x30,
+ OMNIA_CMD_GET_GAMMA_CORRECTION = 0x31,
+
+ /* available only at address 0x2b (LED-controller) */
+ /* available only if PER_LED_CORRECTION bit set in features */
+ /* available only if FROM_BIT_16_INVALID bit NOT set in features */
+ OMNIA_CMD_SET_LED_CORRECTIONS = 0x32,
+ OMNIA_CMD_GET_LED_CORRECTIONS = 0x33,
+};
+
+enum omnia_flashing_commands_e {
+ OMNIA_FLASH_CMD_UNLOCK = 0x01,
+ OMNIA_FLASH_CMD_SIZE_AND_CSUM = 0x02,
+ OMNIA_FLASH_CMD_PROGRAM = 0x03,
+ OMNIA_FLASH_CMD_RESET = 0x04,
+};
+
+enum omnia_sts_word_e {
+ OMNIA_STS_MCU_TYPE_MASK = GENMASK(1, 0),
+ OMNIA_STS_MCU_TYPE_STM32 = FIELD_PREP_CONST(OMNIA_STS_MCU_TYPE_MASK, 0),
+ OMNIA_STS_MCU_TYPE_GD32 = FIELD_PREP_CONST(OMNIA_STS_MCU_TYPE_MASK, 1),
+ OMNIA_STS_MCU_TYPE_MKL = FIELD_PREP_CONST(OMNIA_STS_MCU_TYPE_MASK, 2),
+ OMNIA_STS_FEATURES_SUPPORTED = BIT(2),
+ OMNIA_STS_USER_REGULATOR_NOT_SUPPORTED = BIT(3),
+ OMNIA_STS_CARD_DET = BIT(4),
+ OMNIA_STS_MSATA_IND = BIT(5),
+ OMNIA_STS_USB30_OVC = BIT(6),
+ OMNIA_STS_USB31_OVC = BIT(7),
+ OMNIA_STS_USB30_PWRON = BIT(8),
+ OMNIA_STS_USB31_PWRON = BIT(9),
+ OMNIA_STS_ENABLE_4V5 = BIT(10),
+ OMNIA_STS_BUTTON_MODE = BIT(11),
+ OMNIA_STS_BUTTON_PRESSED = BIT(12),
+ OMNIA_STS_BUTTON_COUNTER_MASK = GENMASK(15, 13),
+};
+
+enum omnia_ctl_byte_e {
+ OMNIA_CTL_LIGHT_RST = BIT(0),
+ OMNIA_CTL_HARD_RST = BIT(1),
+ /* BIT(2) is currently reserved */
+ OMNIA_CTL_USB30_PWRON = BIT(3),
+ OMNIA_CTL_USB31_PWRON = BIT(4),
+ OMNIA_CTL_ENABLE_4V5 = BIT(5),
+ OMNIA_CTL_BUTTON_MODE = BIT(6),
+ OMNIA_CTL_BOOTLOADER = BIT(7),
+};
+
+enum omnia_features_e {
+ OMNIA_FEAT_PERIPH_MCU = BIT(0),
+ OMNIA_FEAT_EXT_CMDS = BIT(1),
+ OMNIA_FEAT_WDT_PING = BIT(2),
+ OMNIA_FEAT_LED_STATE_EXT_MASK = GENMASK(4, 3),
+ OMNIA_FEAT_LED_STATE_EXT = FIELD_PREP_CONST(OMNIA_FEAT_LED_STATE_EXT_MASK, 1),
+ OMNIA_FEAT_LED_STATE_EXT_V32 = FIELD_PREP_CONST(OMNIA_FEAT_LED_STATE_EXT_MASK, 2),
+ OMNIA_FEAT_LED_GAMMA_CORRECTION = BIT(5),
+ OMNIA_FEAT_NEW_INT_API = BIT(6),
+ OMNIA_FEAT_BOOTLOADER = BIT(7),
+ OMNIA_FEAT_FLASHING = BIT(8),
+ OMNIA_FEAT_NEW_MESSAGE_API = BIT(9),
+ OMNIA_FEAT_BRIGHTNESS_INT = BIT(10),
+ OMNIA_FEAT_POWEROFF_WAKEUP = BIT(11),
+ OMNIA_FEAT_CAN_OLD_MESSAGE_API = BIT(12),
+ OMNIA_FEAT_TRNG = BIT(13),
+ OMNIA_FEAT_CRYPTO = BIT(14),
+ OMNIA_FEAT_BOARD_INFO = BIT(15),
+
+ /*
+ * Orginally the features command replied only 16 bits. If more were
+ * read, either the I2C transaction failed or 0xff bytes were sent.
+ * Therefore to consider bits 16 - 31 valid, one bit (20) was reserved
+ * to be zero.
+ */
+
+ /* Bits 16 - 19 correspond to bits 0 - 3 of status word */
+ OMNIA_FEAT_MCU_TYPE_MASK = GENMASK(17, 16),
+ OMNIA_FEAT_MCU_TYPE_STM32 = FIELD_PREP_CONST(OMNIA_FEAT_MCU_TYPE_MASK, 0),
+ OMNIA_FEAT_MCU_TYPE_GD32 = FIELD_PREP_CONST(OMNIA_FEAT_MCU_TYPE_MASK, 1),
+ OMNIA_FEAT_MCU_TYPE_MKL = FIELD_PREP_CONST(OMNIA_FEAT_MCU_TYPE_MASK, 2),
+ OMNIA_FEAT_FEATURES_SUPPORTED = BIT(18),
+ OMNIA_FEAT_USER_REGULATOR_NOT_SUPPORTED = BIT(19),
+
+ /* must not be set */
+ OMNIA_FEAT_FROM_BIT_16_INVALID = BIT(20),
+
+ OMNIA_FEAT_PER_LED_CORRECTION = BIT(21),
+ OMNIA_FEAT_USB_OVC_PROT_SETTING = BIT(22),
+};
+
+enum omnia_ext_sts_dword_e {
+ OMNIA_EXT_STS_SFP_nDET = BIT(0),
+ OMNIA_EXT_STS_LED_STATES_MASK = GENMASK(31, 12),
+ OMNIA_EXT_STS_WLAN0_MSATA_LED = BIT(12),
+ OMNIA_EXT_STS_WLAN1_LED = BIT(13),
+ OMNIA_EXT_STS_WLAN2_LED = BIT(14),
+ OMNIA_EXT_STS_WPAN0_LED = BIT(15),
+ OMNIA_EXT_STS_WPAN1_LED = BIT(16),
+ OMNIA_EXT_STS_WPAN2_LED = BIT(17),
+ OMNIA_EXT_STS_WAN_LED0 = BIT(18),
+ OMNIA_EXT_STS_WAN_LED1 = BIT(19),
+ OMNIA_EXT_STS_LAN0_LED0 = BIT(20),
+ OMNIA_EXT_STS_LAN0_LED1 = BIT(21),
+ OMNIA_EXT_STS_LAN1_LED0 = BIT(22),
+ OMNIA_EXT_STS_LAN1_LED1 = BIT(23),
+ OMNIA_EXT_STS_LAN2_LED0 = BIT(24),
+ OMNIA_EXT_STS_LAN2_LED1 = BIT(25),
+ OMNIA_EXT_STS_LAN3_LED0 = BIT(26),
+ OMNIA_EXT_STS_LAN3_LED1 = BIT(27),
+ OMNIA_EXT_STS_LAN4_LED0 = BIT(28),
+ OMNIA_EXT_STS_LAN4_LED1 = BIT(29),
+ OMNIA_EXT_STS_LAN5_LED0 = BIT(30),
+ OMNIA_EXT_STS_LAN5_LED1 = BIT(31),
+};
+
+enum omnia_ext_ctl_e {
+ OMNIA_EXT_CTL_nRES_MMC = BIT(0),
+ OMNIA_EXT_CTL_nRES_LAN = BIT(1),
+ OMNIA_EXT_CTL_nRES_PHY = BIT(2),
+ OMNIA_EXT_CTL_nPERST0 = BIT(3),
+ OMNIA_EXT_CTL_nPERST1 = BIT(4),
+ OMNIA_EXT_CTL_nPERST2 = BIT(5),
+ OMNIA_EXT_CTL_PHY_SFP = BIT(6),
+ OMNIA_EXT_CTL_PHY_SFP_AUTO = BIT(7),
+ OMNIA_EXT_CTL_nVHV_CTRL = BIT(8),
+};
+
+enum omnia_int_e {
+ OMNIA_INT_CARD_DET = BIT(0),
+ OMNIA_INT_MSATA_IND = BIT(1),
+ OMNIA_INT_USB30_OVC = BIT(2),
+ OMNIA_INT_USB31_OVC = BIT(3),
+ OMNIA_INT_BUTTON_PRESSED = BIT(4),
+ OMNIA_INT_SFP_nDET = BIT(5),
+ OMNIA_INT_BRIGHTNESS_CHANGED = BIT(6),
+ OMNIA_INT_TRNG = BIT(7),
+ OMNIA_INT_MESSAGE_SIGNED = BIT(8),
+
+ OMNIA_INT_LED_STATES_MASK = GENMASK(31, 12),
+ OMNIA_INT_WLAN0_MSATA_LED = BIT(12),
+ OMNIA_INT_WLAN1_LED = BIT(13),
+ OMNIA_INT_WLAN2_LED = BIT(14),
+ OMNIA_INT_WPAN0_LED = BIT(15),
+ OMNIA_INT_WPAN1_LED = BIT(16),
+ OMNIA_INT_WPAN2_LED = BIT(17),
+ OMNIA_INT_WAN_LED0 = BIT(18),
+ OMNIA_INT_WAN_LED1 = BIT(19),
+ OMNIA_INT_LAN0_LED0 = BIT(20),
+ OMNIA_INT_LAN0_LED1 = BIT(21),
+ OMNIA_INT_LAN1_LED0 = BIT(22),
+ OMNIA_INT_LAN1_LED1 = BIT(23),
+ OMNIA_INT_LAN2_LED0 = BIT(24),
+ OMNIA_INT_LAN2_LED1 = BIT(25),
+ OMNIA_INT_LAN3_LED0 = BIT(26),
+ OMNIA_INT_LAN3_LED1 = BIT(27),
+ OMNIA_INT_LAN4_LED0 = BIT(28),
+ OMNIA_INT_LAN4_LED1 = BIT(29),
+ OMNIA_INT_LAN5_LED0 = BIT(30),
+ OMNIA_INT_LAN5_LED1 = BIT(31),
+};
+
+enum omnia_cmd_poweroff_e {
+ OMNIA_CMD_POWER_OFF_POWERON_BUTTON = BIT(0),
+ OMNIA_CMD_POWER_OFF_MAGIC = 0xdead,
+};
+
+enum omnia_cmd_usb_ovc_prot_e {
+ OMNIA_CMD_xET_USB_OVC_PROT_PORT_MASK = GENMASK(3, 0),
+ OMNIA_CMD_xET_USB_OVC_PROT_ENABLE = BIT(4),
+};
+
+#endif /* __TURRIS_OMNIA_MCU_INTERFACE_H */
diff --git a/include/linux/vfio.h b/include/linux/vfio.h
index 8b1a29820409..000a6cab2d31 100644
--- a/include/linux/vfio.h
+++ b/include/linux/vfio.h
@@ -64,6 +64,7 @@ struct vfio_device {
struct completion comp;
struct iommufd_access *iommufd_access;
void (*put_kvm)(struct kvm *kvm);
+ struct inode *inode;
#if IS_ENABLED(CONFIG_IOMMUFD)
struct iommufd_device *iommufd_device;
u8 iommufd_attached:1;
diff --git a/include/linux/vfio_pci_core.h b/include/linux/vfio_pci_core.h
index a2c8b8bba711..f87067438ed4 100644
--- a/include/linux/vfio_pci_core.h
+++ b/include/linux/vfio_pci_core.h
@@ -93,8 +93,6 @@ struct vfio_pci_core_device {
struct list_head sriov_pfs_item;
struct vfio_pci_core_device *sriov_pf_core_dev;
struct notifier_block nb;
- struct mutex vma_lock;
- struct list_head vma_list;
struct rw_semaphore memory_lock;
};
diff --git a/include/linux/wmi.h b/include/linux/wmi.h
index 63cca3b58d6d..3275470b5531 100644
--- a/include/linux/wmi.h
+++ b/include/linux/wmi.h
@@ -16,12 +16,16 @@
* struct wmi_device - WMI device structure
* @dev: Device associated with this WMI device
* @setable: True for devices implementing the Set Control Method
+ * @driver_override: Driver name to force a match; do not set directly,
+ * because core frees it; use driver_set_override() to
+ * set or clear it.
*
* This represents WMI devices discovered by the WMI driver core.
*/
struct wmi_device {
struct device dev;
bool setable;
+ const char *driver_override;
};
/**
diff --git a/include/linux/wordpart.h b/include/linux/wordpart.h
index 4ca1ba66d2f0..5a7b97bb7c95 100644
--- a/include/linux/wordpart.h
+++ b/include/linux/wordpart.h
@@ -39,6 +39,14 @@
*/
#define REPEAT_BYTE(x) ((~0ul / 0xff) * (x))
+/**
+ * REPEAT_BYTE_U32 - repeat the value @x multiple times as a u32 value
+ * @x: value to repeat
+ *
+ * NOTE: @x is not checked for > 0xff; larger values produce odd results.
+ */
+#define REPEAT_BYTE_U32(x) lower_32_bits(REPEAT_BYTE(x))
+
/* Set bits in the first 'n' bytes when loaded from memory */
#ifdef __LITTLE_ENDIAN
# define aligned_byte_mask(n) ((1UL << 8*(n))-1)
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index fb3993894536..d9968bfc8eac 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -95,7 +95,7 @@ enum wq_misc_consts {
WORK_BUSY_RUNNING = 1 << 1,
/* maximum string length for set_worker_desc() */
- WORKER_DESC_LEN = 24,
+ WORKER_DESC_LEN = 32,
};
/* Convenience constants - of type 'unsigned long', not 'enum'! */
diff --git a/include/net/af_unix.h b/include/net/af_unix.h
index b6eedf7650da..63129c79b8cb 100644
--- a/include/net/af_unix.h
+++ b/include/net/af_unix.h
@@ -96,20 +96,6 @@ struct unix_sock {
#define unix_state_lock(s) spin_lock(&unix_sk(s)->lock)
#define unix_state_unlock(s) spin_unlock(&unix_sk(s)->lock)
-enum unix_socket_lock_class {
- U_LOCK_NORMAL,
- U_LOCK_SECOND, /* for double locking, see unix_state_double_lock(). */
- U_LOCK_DIAG, /* used while dumping icons, see sk_diag_dump_icons(). */
- U_LOCK_GC_LISTENER, /* used for listening socket while determining gc
- * candidates to close a small race window.
- */
-};
-
-static inline void unix_state_lock_nested(struct sock *sk,
- enum unix_socket_lock_class subclass)
-{
- spin_lock_nested(&unix_sk(sk)->lock, subclass);
-}
#define peer_wait peer_wq.wait
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index b3228bd6cd6b..5d655e109b2c 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -441,6 +441,10 @@ typedef void (*hci_req_complete_t)(struct hci_dev *hdev, u8 status, u16 opcode);
typedef void (*hci_req_complete_skb_t)(struct hci_dev *hdev, u8 status,
u16 opcode, struct sk_buff *skb);
+void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
+ hci_req_complete_t *req_complete,
+ hci_req_complete_skb_t *req_complete_skb);
+
#define HCI_REQ_START BIT(0)
#define HCI_REQ_SKB BIT(1)
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index fe932ca3bc8c..e372a88e8c3f 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -324,6 +324,17 @@ enum {
* claim to support it.
*/
HCI_QUIRK_BROKEN_READ_ENC_KEY_SIZE,
+
+ /*
+ * When this quirk is set, the reserved bits of Primary/Secondary_PHY
+ * inside the LE Extended Advertising Report events are discarded.
+ * This is required for some Apple/Broadcom controllers which
+ * abuse these reserved bits for unrelated flags.
+ *
+ * This quirk can be set before hci_register_dev is called or
+ * during the hdev->setup vendor callback.
+ */
+ HCI_QUIRK_FIXUP_LE_EXT_ADV_REPORT_PHY,
};
/* HCI device flags */
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 9231396fe96f..31020891fc68 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -91,8 +91,6 @@ struct discovery_state {
s8 rssi;
u16 uuid_count;
u8 (*uuids)[16];
- unsigned long scan_start;
- unsigned long scan_duration;
unsigned long name_resolve_timeout;
};
@@ -478,7 +476,6 @@ struct hci_dev {
unsigned int iso_pkts;
unsigned long acl_last_tx;
- unsigned long sco_last_tx;
unsigned long le_last_tx;
__u8 le_tx_def_phys;
@@ -530,7 +527,6 @@ struct hci_dev {
struct discovery_state discovery;
- int discovery_old_state;
bool discovery_paused;
int advertising_old_state;
bool advertising_paused;
@@ -649,6 +645,7 @@ struct hci_dev {
int (*get_codec_config_data)(struct hci_dev *hdev, __u8 type,
struct bt_codec *codec, __u8 *vnd_len,
__u8 **vnd_data);
+ u8 (*classify_pkt_type)(struct hci_dev *hdev, struct sk_buff *skb);
};
#define HCI_PHY_HANDLE(handle) (handle & 0xff)
@@ -890,8 +887,6 @@ static inline void hci_discovery_filter_clear(struct hci_dev *hdev)
hdev->discovery.uuid_count = 0;
kfree(hdev->discovery.uuids);
hdev->discovery.uuids = NULL;
- hdev->discovery.scan_start = 0;
- hdev->discovery.scan_duration = 0;
}
bool hci_discovery_active(struct hci_dev *hdev);
@@ -2113,18 +2108,46 @@ static inline int hci_check_conn_params(u16 min, u16 max, u16 latency,
{
u16 max_latency;
- if (min > max || min < 6 || max > 3200)
+ if (min > max) {
+ BT_WARN("min %d > max %d", min, max);
return -EINVAL;
+ }
+
+ if (min < 6) {
+ BT_WARN("min %d < 6", min);
+ return -EINVAL;
+ }
+
+ if (max > 3200) {
+ BT_WARN("max %d > 3200", max);
+ return -EINVAL;
+ }
+
+ if (to_multiplier < 10) {
+ BT_WARN("to_multiplier %d < 10", to_multiplier);
+ return -EINVAL;
+ }
- if (to_multiplier < 10 || to_multiplier > 3200)
+ if (to_multiplier > 3200) {
+ BT_WARN("to_multiplier %d > 3200", to_multiplier);
return -EINVAL;
+ }
- if (max >= to_multiplier * 8)
+ if (max >= to_multiplier * 8) {
+ BT_WARN("max %d >= to_multiplier %d * 8", max, to_multiplier);
return -EINVAL;
+ }
max_latency = (to_multiplier * 4 / max) - 1;
- if (latency > 499 || latency > max_latency)
+ if (latency > 499) {
+ BT_WARN("latency %d > 499", latency);
return -EINVAL;
+ }
+
+ if (latency > max_latency) {
+ BT_WARN("latency %d > max_latency %d", latency, max_latency);
+ return -EINVAL;
+ }
return 0;
}
diff --git a/include/net/bluetooth/hci_sock.h b/include/net/bluetooth/hci_sock.h
index 9949870f7d78..13e8cd4414a1 100644
--- a/include/net/bluetooth/hci_sock.h
+++ b/include/net/bluetooth/hci_sock.h
@@ -144,7 +144,7 @@ struct hci_dev_req {
struct hci_dev_list_req {
__u16 dev_num;
- struct hci_dev_req dev_req[]; /* hci_dev_req structures */
+ struct hci_dev_req dev_req[] __counted_by(dev_num);
};
struct hci_conn_list_req {
diff --git a/include/net/bluetooth/hci_sync.h b/include/net/bluetooth/hci_sync.h
index 6a9d063e9f47..75e052909b5f 100644
--- a/include/net/bluetooth/hci_sync.h
+++ b/include/net/bluetooth/hci_sync.h
@@ -8,6 +8,23 @@
#define UINT_PTR(_handle) ((void *)((uintptr_t)_handle))
#define PTR_UINT(_ptr) ((uintptr_t)((void *)_ptr))
+#define HCI_REQ_DONE 0
+#define HCI_REQ_PEND 1
+#define HCI_REQ_CANCELED 2
+
+#define hci_req_sync_lock(hdev) mutex_lock(&hdev->req_lock)
+#define hci_req_sync_unlock(hdev) mutex_unlock(&hdev->req_lock)
+
+struct hci_request {
+ struct hci_dev *hdev;
+ struct sk_buff_head cmd_q;
+
+ /* If something goes wrong when building the HCI request, the error
+ * value is stored in this field.
+ */
+ int err;
+};
+
typedef int (*hci_cmd_sync_work_func_t)(struct hci_dev *hdev, void *data);
typedef void (*hci_cmd_sync_work_destroy_t)(struct hci_dev *hdev, void *data,
int err);
@@ -20,6 +37,10 @@ struct hci_cmd_sync_work_entry {
};
struct adv_info;
+
+struct sk_buff *hci_cmd_sync_alloc(struct hci_dev *hdev, u16 opcode, u32 plen,
+ const void *param, struct sock *sk);
+
/* Function with sync suffix shall not be called with hdev->lock held as they
* wait the command to complete and in the meantime an event could be received
* which could attempt to acquire hdev->lock causing a deadlock.
@@ -38,6 +59,8 @@ int __hci_cmd_sync_status(struct hci_dev *hdev, u16 opcode, u32 plen,
int __hci_cmd_sync_status_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
const void *param, u8 event, u32 timeout,
struct sock *sk);
+int hci_cmd_sync_status(struct hci_dev *hdev, u16 opcode, u32 plen,
+ const void *param, u32 timeout);
void hci_cmd_sync_init(struct hci_dev *hdev);
void hci_cmd_sync_clear(struct hci_dev *hdev);
@@ -129,6 +152,8 @@ int hci_update_discoverable(struct hci_dev *hdev);
int hci_update_connectable_sync(struct hci_dev *hdev);
+int hci_inquiry_sync(struct hci_dev *hdev, u8 length, u8 num_rsp);
+
int hci_start_discovery_sync(struct hci_dev *hdev);
int hci_stop_discovery_sync(struct hci_dev *hdev);
@@ -136,6 +161,7 @@ int hci_suspend_sync(struct hci_dev *hdev);
int hci_resume_sync(struct hci_dev *hdev);
struct hci_conn;
+struct hci_conn_params;
int hci_abort_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 reason);
@@ -154,3 +180,5 @@ int hci_connect_acl_sync(struct hci_dev *hdev, struct hci_conn *conn);
int hci_connect_le_sync(struct hci_dev *hdev, struct hci_conn *conn);
int hci_cancel_connect_sync(struct hci_dev *hdev, struct hci_conn *conn);
+int hci_le_conn_update_sync(struct hci_dev *hdev, struct hci_conn *conn,
+ struct hci_conn_params *params);
diff --git a/include/net/bluetooth/rfcomm.h b/include/net/bluetooth/rfcomm.h
index 99d26879b02a..c05882476900 100644
--- a/include/net/bluetooth/rfcomm.h
+++ b/include/net/bluetooth/rfcomm.h
@@ -355,7 +355,7 @@ struct rfcomm_dev_info {
struct rfcomm_dev_list_req {
u16 dev_num;
- struct rfcomm_dev_info dev_info[];
+ struct rfcomm_dev_info dev_info[] __counted_by(dev_num);
};
int rfcomm_dev_ioctl(struct sock *sk, unsigned int cmd, void __user *arg);
diff --git a/include/net/caif/caif_layer.h b/include/net/caif/caif_layer.h
index 51f7bb42a936..0f45d875905f 100644
--- a/include/net/caif/caif_layer.h
+++ b/include/net/caif/caif_layer.h
@@ -11,9 +11,7 @@
struct cflayer;
struct cfpkt;
-struct cfpktq;
struct caif_payload_info;
-struct caif_packet_funcs;
#define CAIF_LAYER_NAME_SZ 16
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index cbf1664dc569..192d72c8b465 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -125,33 +125,36 @@ struct wiphy;
* @IEEE80211_CHAN_CAN_MONITOR: This channel can be used for monitor
* mode even in the presence of other (regulatory) restrictions,
* even if it is otherwise disabled.
+ * @IEEE80211_CHAN_ALLOW_6GHZ_VLP_AP: Allow using this channel for AP operation
+ * with very low power (VLP), even if otherwise set to NO_IR.
*/
enum ieee80211_channel_flags {
- IEEE80211_CHAN_DISABLED = 1<<0,
- IEEE80211_CHAN_NO_IR = 1<<1,
- IEEE80211_CHAN_PSD = 1<<2,
- IEEE80211_CHAN_RADAR = 1<<3,
- IEEE80211_CHAN_NO_HT40PLUS = 1<<4,
- IEEE80211_CHAN_NO_HT40MINUS = 1<<5,
- IEEE80211_CHAN_NO_OFDM = 1<<6,
- IEEE80211_CHAN_NO_80MHZ = 1<<7,
- IEEE80211_CHAN_NO_160MHZ = 1<<8,
- IEEE80211_CHAN_INDOOR_ONLY = 1<<9,
- IEEE80211_CHAN_IR_CONCURRENT = 1<<10,
- IEEE80211_CHAN_NO_20MHZ = 1<<11,
- IEEE80211_CHAN_NO_10MHZ = 1<<12,
- IEEE80211_CHAN_NO_HE = 1<<13,
- IEEE80211_CHAN_1MHZ = 1<<14,
- IEEE80211_CHAN_2MHZ = 1<<15,
- IEEE80211_CHAN_4MHZ = 1<<16,
- IEEE80211_CHAN_8MHZ = 1<<17,
- IEEE80211_CHAN_16MHZ = 1<<18,
- IEEE80211_CHAN_NO_320MHZ = 1<<19,
- IEEE80211_CHAN_NO_EHT = 1<<20,
- IEEE80211_CHAN_DFS_CONCURRENT = 1<<21,
- IEEE80211_CHAN_NO_6GHZ_VLP_CLIENT = 1<<22,
- IEEE80211_CHAN_NO_6GHZ_AFC_CLIENT = 1<<23,
- IEEE80211_CHAN_CAN_MONITOR = 1<<24,
+ IEEE80211_CHAN_DISABLED = BIT(0),
+ IEEE80211_CHAN_NO_IR = BIT(1),
+ IEEE80211_CHAN_PSD = BIT(2),
+ IEEE80211_CHAN_RADAR = BIT(3),
+ IEEE80211_CHAN_NO_HT40PLUS = BIT(4),
+ IEEE80211_CHAN_NO_HT40MINUS = BIT(5),
+ IEEE80211_CHAN_NO_OFDM = BIT(6),
+ IEEE80211_CHAN_NO_80MHZ = BIT(7),
+ IEEE80211_CHAN_NO_160MHZ = BIT(8),
+ IEEE80211_CHAN_INDOOR_ONLY = BIT(9),
+ IEEE80211_CHAN_IR_CONCURRENT = BIT(10),
+ IEEE80211_CHAN_NO_20MHZ = BIT(11),
+ IEEE80211_CHAN_NO_10MHZ = BIT(12),
+ IEEE80211_CHAN_NO_HE = BIT(13),
+ IEEE80211_CHAN_1MHZ = BIT(14),
+ IEEE80211_CHAN_2MHZ = BIT(15),
+ IEEE80211_CHAN_4MHZ = BIT(16),
+ IEEE80211_CHAN_8MHZ = BIT(17),
+ IEEE80211_CHAN_16MHZ = BIT(18),
+ IEEE80211_CHAN_NO_320MHZ = BIT(19),
+ IEEE80211_CHAN_NO_EHT = BIT(20),
+ IEEE80211_CHAN_DFS_CONCURRENT = BIT(21),
+ IEEE80211_CHAN_NO_6GHZ_VLP_CLIENT = BIT(22),
+ IEEE80211_CHAN_NO_6GHZ_AFC_CLIENT = BIT(23),
+ IEEE80211_CHAN_CAN_MONITOR = BIT(24),
+ IEEE80211_CHAN_ALLOW_6GHZ_VLP_AP = BIT(25),
};
#define IEEE80211_CHAN_NO_HT40 \
@@ -229,13 +232,13 @@ struct ieee80211_channel {
* @IEEE80211_RATE_SUPPORTS_10MHZ: Rate can be used in 10 MHz mode
*/
enum ieee80211_rate_flags {
- IEEE80211_RATE_SHORT_PREAMBLE = 1<<0,
- IEEE80211_RATE_MANDATORY_A = 1<<1,
- IEEE80211_RATE_MANDATORY_B = 1<<2,
- IEEE80211_RATE_MANDATORY_G = 1<<3,
- IEEE80211_RATE_ERP_G = 1<<4,
- IEEE80211_RATE_SUPPORTS_5MHZ = 1<<5,
- IEEE80211_RATE_SUPPORTS_10MHZ = 1<<6,
+ IEEE80211_RATE_SHORT_PREAMBLE = BIT(0),
+ IEEE80211_RATE_MANDATORY_A = BIT(1),
+ IEEE80211_RATE_MANDATORY_B = BIT(2),
+ IEEE80211_RATE_MANDATORY_G = BIT(3),
+ IEEE80211_RATE_ERP_G = BIT(4),
+ IEEE80211_RATE_SUPPORTS_5MHZ = BIT(5),
+ IEEE80211_RATE_SUPPORTS_10MHZ = BIT(6),
};
/**
@@ -1595,6 +1598,7 @@ struct cfg80211_color_change_settings {
*
* Used to pass interface combination parameters
*
+ * @radio_idx: wiphy radio index or -1 for global
* @num_different_channels: the number of different channels we want
* to use for verification
* @radar_detect: a bitmap where each bit corresponds to a channel
@@ -1608,6 +1612,7 @@ struct cfg80211_color_change_settings {
* the verification
*/
struct iface_combination_params {
+ int radio_idx;
int num_different_channels;
u8 radar_detect;
int iftype_num[NUM_NL80211_IFTYPES];
@@ -1957,9 +1962,9 @@ struct rate_info {
* @BSS_PARAM_FLAGS_SHORT_SLOT_TIME: whether short slot time is enabled
*/
enum bss_param_flags {
- BSS_PARAM_FLAGS_CTS_PROT = 1<<0,
- BSS_PARAM_FLAGS_SHORT_PREAMBLE = 1<<1,
- BSS_PARAM_FLAGS_SHORT_SLOT_TIME = 1<<2,
+ BSS_PARAM_FLAGS_CTS_PROT = BIT(0),
+ BSS_PARAM_FLAGS_SHORT_PREAMBLE = BIT(1),
+ BSS_PARAM_FLAGS_SHORT_SLOT_TIME = BIT(2),
};
/**
@@ -2200,7 +2205,7 @@ struct cfg80211_sar_sub_specs {
struct cfg80211_sar_specs {
enum nl80211_sar_type type;
u32 num_sub_specs;
- struct cfg80211_sar_sub_specs sub_specs[];
+ struct cfg80211_sar_sub_specs sub_specs[] __counted_by(num_sub_specs);
};
@@ -2266,13 +2271,13 @@ static inline int cfg80211_get_station(struct net_device *dev,
* @MONITOR_FLAG_ACTIVE: active monitor, ACKs frames on its MAC address
*/
enum monitor_flags {
- MONITOR_FLAG_CHANGED = 1<<__NL80211_MNTR_FLAG_INVALID,
- MONITOR_FLAG_FCSFAIL = 1<<NL80211_MNTR_FLAG_FCSFAIL,
- MONITOR_FLAG_PLCPFAIL = 1<<NL80211_MNTR_FLAG_PLCPFAIL,
- MONITOR_FLAG_CONTROL = 1<<NL80211_MNTR_FLAG_CONTROL,
- MONITOR_FLAG_OTHER_BSS = 1<<NL80211_MNTR_FLAG_OTHER_BSS,
- MONITOR_FLAG_COOK_FRAMES = 1<<NL80211_MNTR_FLAG_COOK_FRAMES,
- MONITOR_FLAG_ACTIVE = 1<<NL80211_MNTR_FLAG_ACTIVE,
+ MONITOR_FLAG_CHANGED = BIT(__NL80211_MNTR_FLAG_INVALID),
+ MONITOR_FLAG_FCSFAIL = BIT(NL80211_MNTR_FLAG_FCSFAIL),
+ MONITOR_FLAG_PLCPFAIL = BIT(NL80211_MNTR_FLAG_PLCPFAIL),
+ MONITOR_FLAG_CONTROL = BIT(NL80211_MNTR_FLAG_CONTROL),
+ MONITOR_FLAG_OTHER_BSS = BIT(NL80211_MNTR_FLAG_OTHER_BSS),
+ MONITOR_FLAG_COOK_FRAMES = BIT(NL80211_MNTR_FLAG_COOK_FRAMES),
+ MONITOR_FLAG_ACTIVE = BIT(NL80211_MNTR_FLAG_ACTIVE),
};
/**
@@ -2838,7 +2843,7 @@ struct cfg80211_sched_scan_request {
struct list_head list;
/* keep last */
- struct ieee80211_channel *channels[];
+ struct ieee80211_channel *channels[] __counted_by(n_channels);
};
/**
@@ -3399,15 +3404,15 @@ enum cfg80211_connect_params_changed {
* @WIPHY_PARAM_TXQ_QUANTUM: TXQ scheduler quantum
*/
enum wiphy_params_flags {
- WIPHY_PARAM_RETRY_SHORT = 1 << 0,
- WIPHY_PARAM_RETRY_LONG = 1 << 1,
- WIPHY_PARAM_FRAG_THRESHOLD = 1 << 2,
- WIPHY_PARAM_RTS_THRESHOLD = 1 << 3,
- WIPHY_PARAM_COVERAGE_CLASS = 1 << 4,
- WIPHY_PARAM_DYN_ACK = 1 << 5,
- WIPHY_PARAM_TXQ_LIMIT = 1 << 6,
- WIPHY_PARAM_TXQ_MEMORY_LIMIT = 1 << 7,
- WIPHY_PARAM_TXQ_QUANTUM = 1 << 8,
+ WIPHY_PARAM_RETRY_SHORT = BIT(0),
+ WIPHY_PARAM_RETRY_LONG = BIT(1),
+ WIPHY_PARAM_FRAG_THRESHOLD = BIT(2),
+ WIPHY_PARAM_RTS_THRESHOLD = BIT(3),
+ WIPHY_PARAM_COVERAGE_CLASS = BIT(4),
+ WIPHY_PARAM_DYN_ACK = BIT(5),
+ WIPHY_PARAM_TXQ_LIMIT = BIT(6),
+ WIPHY_PARAM_TXQ_MEMORY_LIMIT = BIT(7),
+ WIPHY_PARAM_TXQ_QUANTUM = BIT(8),
};
#define IEEE80211_DEFAULT_AIRTIME_WEIGHT 256
@@ -3566,8 +3571,8 @@ struct cfg80211_coalesce_rules {
* @n_rules: number of rules
*/
struct cfg80211_coalesce {
- struct cfg80211_coalesce_rules *rules;
int n_rules;
+ struct cfg80211_coalesce_rules rules[] __counted_by(n_rules);
};
/**
@@ -3582,7 +3587,7 @@ struct cfg80211_coalesce {
struct cfg80211_wowlan_nd_match {
struct cfg80211_ssid ssid;
int n_channels;
- u32 channels[];
+ u32 channels[] __counted_by(n_channels);
};
/**
@@ -3596,7 +3601,7 @@ struct cfg80211_wowlan_nd_match {
*/
struct cfg80211_wowlan_nd_info {
int n_matches;
- struct cfg80211_wowlan_nd_match *matches[];
+ struct cfg80211_wowlan_nd_match *matches[] __counted_by(n_matches);
};
/**
@@ -4577,6 +4582,8 @@ struct mgmt_frame_regs {
*
* @set_hw_timestamp: Enable/disable HW timestamping of TM/FTM frames.
* @set_ttlm: set the TID to link mapping.
+ * @get_radio_mask: get bitmask of radios in use.
+ * (invoked with the wiphy mutex held)
*/
struct cfg80211_ops {
int (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow);
@@ -4938,6 +4945,7 @@ struct cfg80211_ops {
struct cfg80211_set_hw_timestamp *hwts);
int (*set_ttlm)(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_ttlm_params *params);
+ u32 (*get_radio_mask)(struct wiphy *wiphy, struct net_device *dev);
};
/*
@@ -5043,7 +5051,9 @@ struct ieee80211_iface_limit {
* struct ieee80211_iface_combination - possible interface combination
*
* With this structure the driver can describe which interface
- * combinations it supports concurrently.
+ * combinations it supports concurrently. When set in a struct wiphy_radio,
+ * the combinations refer to combinations of interfaces currently active on
+ * that radio.
*
* Examples:
*
@@ -5403,6 +5413,38 @@ struct wiphy_iftype_akm_suites {
int n_akm_suites;
};
+/**
+ * struct wiphy_radio_freq_range - wiphy frequency range
+ * @start_freq: start range edge frequency (kHz)
+ * @end_freq: end range edge frequency (kHz)
+ */
+struct wiphy_radio_freq_range {
+ u32 start_freq;
+ u32 end_freq;
+};
+
+
+/**
+ * struct wiphy_radio - physical radio of a wiphy
+ * This structure describes a physical radio belonging to a wiphy.
+ * It is used to describe concurrent-channel capabilities. Only one channel
+ * can be active on the radio described by struct wiphy_radio.
+ *
+ * @freq_range: frequency range that the radio can operate on.
+ * @n_freq_range: number of elements in @freq_range
+ *
+ * @iface_combinations: Valid interface combinations array, should not
+ * list single interface types.
+ * @n_iface_combinations: number of entries in @iface_combinations array.
+ */
+struct wiphy_radio {
+ const struct wiphy_radio_freq_range *freq_range;
+ int n_freq_range;
+
+ const struct ieee80211_iface_combination *iface_combinations;
+ int n_iface_combinations;
+};
+
#define CFG80211_HW_TIMESTAMP_ALL_PEERS 0xffff
/**
@@ -5621,6 +5663,9 @@ struct wiphy_iftype_akm_suites {
* A value of %CFG80211_HW_TIMESTAMP_ALL_PEERS indicates the driver
* supports enabling HW timestamping for all peers (i.e. no need to
* specify a mac address).
+ *
+ * @radio: radios belonging to this wiphy
+ * @n_radio: number of radios
*/
struct wiphy {
struct mutex mtx;
@@ -5771,6 +5816,9 @@ struct wiphy {
u16 hw_timestamp_max_peers;
+ int n_radio;
+ const struct wiphy_radio *radio;
+
char priv[] __aligned(NETDEV_ALIGN);
};
@@ -6082,6 +6130,21 @@ void wiphy_delayed_work_flush(struct wiphy *wiphy,
struct wiphy_delayed_work *dwork);
/**
+ * enum ieee80211_ap_reg_power - regulatory power for an Access Point
+ *
+ * @IEEE80211_REG_UNSET_AP: Access Point has no regulatory power mode
+ * @IEEE80211_REG_LPI_AP: Indoor Access Point
+ * @IEEE80211_REG_SP_AP: Standard power Access Point
+ * @IEEE80211_REG_VLP_AP: Very low power Access Point
+ */
+enum ieee80211_ap_reg_power {
+ IEEE80211_REG_UNSET_AP,
+ IEEE80211_REG_LPI_AP,
+ IEEE80211_REG_SP_AP,
+ IEEE80211_REG_VLP_AP,
+};
+
+/**
* struct wireless_dev - wireless device state
*
* For netdevs, this structure must be allocated by the driver
@@ -6446,6 +6509,17 @@ static inline bool cfg80211_channel_is_psc(struct ieee80211_channel *chan)
}
/**
+ * cfg80211_radio_chandef_valid - Check if the radio supports the chandef
+ *
+ * @radio: wiphy radio
+ * @chandef: chandef for current channel
+ *
+ * Return: whether or not the given chandef is valid for the given radio
+ */
+bool cfg80211_radio_chandef_valid(const struct wiphy_radio *radio,
+ const struct cfg80211_chan_def *chandef);
+
+/**
* ieee80211_get_response_rate - get basic rate for a given rate
*
* @sband: the band to look for rates in
@@ -8786,6 +8860,34 @@ static inline void cfg80211_report_obss_beacon(struct wiphy *wiphy,
}
/**
+ * struct cfg80211_beaconing_check_config - beacon check configuration
+ * @iftype: the interface type to check for
+ * @relax: allow IR-relaxation conditions to apply (e.g. another
+ * interface connected already on the same channel)
+ * NOTE: If this is set, wiphy mutex must be held.
+ * @reg_power: &enum ieee80211_ap_reg_power value indicating the
+ * advertised/used 6 GHz regulatory power setting
+ */
+struct cfg80211_beaconing_check_config {
+ enum nl80211_iftype iftype;
+ enum ieee80211_ap_reg_power reg_power;
+ bool relax;
+};
+
+/**
+ * cfg80211_reg_check_beaconing - check if beaconing is allowed
+ * @wiphy: the wiphy
+ * @chandef: the channel definition
+ * @cfg: additional parameters for the checking
+ *
+ * Return: %true if there is no secondary channel or the secondary channel(s)
+ * can be used for beaconing (i.e. is not a radar channel etc.)
+ */
+bool cfg80211_reg_check_beaconing(struct wiphy *wiphy,
+ struct cfg80211_chan_def *chandef,
+ struct cfg80211_beaconing_check_config *cfg);
+
+/**
* cfg80211_reg_can_beacon - check if beaconing is allowed
* @wiphy: the wiphy
* @chandef: the channel definition
@@ -8794,9 +8896,17 @@ static inline void cfg80211_report_obss_beacon(struct wiphy *wiphy,
* Return: %true if there is no secondary channel or the secondary channel(s)
* can be used for beaconing (i.e. is not a radar channel etc.)
*/
-bool cfg80211_reg_can_beacon(struct wiphy *wiphy,
- struct cfg80211_chan_def *chandef,
- enum nl80211_iftype iftype);
+static inline bool
+cfg80211_reg_can_beacon(struct wiphy *wiphy,
+ struct cfg80211_chan_def *chandef,
+ enum nl80211_iftype iftype)
+{
+ struct cfg80211_beaconing_check_config config = {
+ .iftype = iftype,
+ };
+
+ return cfg80211_reg_check_beaconing(wiphy, chandef, &config);
+}
/**
* cfg80211_reg_can_beacon_relax - check if beaconing is allowed with relaxation
@@ -8811,9 +8921,18 @@ bool cfg80211_reg_can_beacon(struct wiphy *wiphy,
*
* Context: Requires the wiphy mutex to be held.
*/
-bool cfg80211_reg_can_beacon_relax(struct wiphy *wiphy,
- struct cfg80211_chan_def *chandef,
- enum nl80211_iftype iftype);
+static inline bool
+cfg80211_reg_can_beacon_relax(struct wiphy *wiphy,
+ struct cfg80211_chan_def *chandef,
+ enum nl80211_iftype iftype)
+{
+ struct cfg80211_beaconing_check_config config = {
+ .iftype = iftype,
+ .relax = true,
+ };
+
+ return cfg80211_reg_check_beaconing(wiphy, chandef, &config);
+}
/**
* cfg80211_ch_switch_notify - update wdev channel and notify userspace
diff --git a/include/net/devlink.h b/include/net/devlink.h
index 35eb0f884386..db5eff6cb60f 100644
--- a/include/net/devlink.h
+++ b/include/net/devlink.h
@@ -352,7 +352,7 @@ struct devlink_dpipe_table {
bool resource_valid;
u64 resource_id;
u64 resource_units;
- struct devlink_dpipe_table_ops *table_ops;
+ const struct devlink_dpipe_table_ops *table_ops;
struct rcu_head rcu;
};
@@ -1751,7 +1751,7 @@ void devl_sb_unregister(struct devlink *devlink, unsigned int sb_index);
void devlink_sb_unregister(struct devlink *devlink, unsigned int sb_index);
int devl_dpipe_table_register(struct devlink *devlink,
const char *table_name,
- struct devlink_dpipe_table_ops *table_ops,
+ const struct devlink_dpipe_table_ops *table_ops,
void *priv, bool counter_control_extern);
void devl_dpipe_table_unregister(struct devlink *devlink,
const char *table_name);
diff --git a/include/net/dsa.h b/include/net/dsa.h
index b60e7e410aba..b06f97ae3da1 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -53,6 +53,7 @@ struct tc_action;
#define DSA_TAG_PROTO_RTL8_4T_VALUE 25
#define DSA_TAG_PROTO_RZN1_A5PSW_VALUE 26
#define DSA_TAG_PROTO_LAN937X_VALUE 27
+#define DSA_TAG_PROTO_VSC73XX_8021Q_VALUE 28
enum dsa_tag_protocol {
DSA_TAG_PROTO_NONE = DSA_TAG_PROTO_NONE_VALUE,
@@ -83,6 +84,7 @@ enum dsa_tag_protocol {
DSA_TAG_PROTO_RTL8_4T = DSA_TAG_PROTO_RTL8_4T_VALUE,
DSA_TAG_PROTO_RZN1_A5PSW = DSA_TAG_PROTO_RZN1_A5PSW_VALUE,
DSA_TAG_PROTO_LAN937X = DSA_TAG_PROTO_LAN937X_VALUE,
+ DSA_TAG_PROTO_VSC73XX_8021Q = DSA_TAG_PROTO_VSC73XX_8021Q_VALUE,
};
struct dsa_switch;
@@ -882,15 +884,9 @@ struct dsa_switch_ops {
struct phylink_pcs *(*phylink_mac_select_pcs)(struct dsa_switch *ds,
int port,
phy_interface_t iface);
- int (*phylink_mac_prepare)(struct dsa_switch *ds, int port,
- unsigned int mode,
- phy_interface_t interface);
void (*phylink_mac_config)(struct dsa_switch *ds, int port,
unsigned int mode,
const struct phylink_link_state *state);
- int (*phylink_mac_finish)(struct dsa_switch *ds, int port,
- unsigned int mode,
- phy_interface_t interface);
void (*phylink_mac_link_down)(struct dsa_switch *ds, int port,
unsigned int mode,
phy_interface_t interface);
@@ -940,7 +936,7 @@ struct dsa_switch_ops {
* ethtool timestamp info
*/
int (*get_ts_info)(struct dsa_switch *ds, int port,
- struct ethtool_ts_info *ts);
+ struct kernel_ethtool_ts_info *ts);
/*
* ethtool MAC merge layer
diff --git a/include/net/dst_ops.h b/include/net/dst_ops.h
index 6d1c8541183d..3a9001a042a5 100644
--- a/include/net/dst_ops.h
+++ b/include/net/dst_ops.h
@@ -24,7 +24,7 @@ struct dst_ops {
void (*destroy)(struct dst_entry *);
void (*ifdown)(struct dst_entry *,
struct net_device *dev);
- struct dst_entry * (*negative_advice)(struct dst_entry *);
+ void (*negative_advice)(struct sock *sk, struct dst_entry *);
void (*link_failure)(struct sk_buff *);
void (*update_pmtu)(struct dst_entry *dst, struct sock *sk,
struct sk_buff *skb, u32 mtu,
diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h
index 9ab376d1a677..ced79dc8e856 100644
--- a/include/net/flow_dissector.h
+++ b/include/net/flow_dissector.h
@@ -7,6 +7,7 @@
#include <linux/siphash.h>
#include <linux/string.h>
#include <uapi/linux/if_ether.h>
+#include <uapi/linux/pkt_cls.h>
struct bpf_prog;
struct net;
@@ -16,7 +17,8 @@ struct sk_buff;
* struct flow_dissector_key_control:
* @thoff: Transport header offset
* @addr_type: Type of key. One of FLOW_DISSECTOR_KEY_*
- * @flags: Key flags. Any of FLOW_DIS_(IS_FRAGMENT|FIRST_FRAGENCAPSULATION)
+ * @flags: Key flags.
+ * Any of FLOW_DIS_(IS_FRAGMENT|FIRST_FRAG|ENCAPSULATION|F_*)
*/
struct flow_dissector_key_control {
u16 thoff;
@@ -24,9 +26,20 @@ struct flow_dissector_key_control {
u32 flags;
};
-#define FLOW_DIS_IS_FRAGMENT BIT(0)
-#define FLOW_DIS_FIRST_FRAG BIT(1)
-#define FLOW_DIS_ENCAPSULATION BIT(2)
+/* The control flags are kept in sync with TCA_FLOWER_KEY_FLAGS_*, as those
+ * flags are exposed to userspace in some error paths, ie. unsupported flags.
+ */
+enum flow_dissector_ctrl_flags {
+ FLOW_DIS_IS_FRAGMENT = TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT,
+ FLOW_DIS_FIRST_FRAG = TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST,
+ FLOW_DIS_F_TUNNEL_CSUM = TCA_FLOWER_KEY_FLAGS_TUNNEL_CSUM,
+ FLOW_DIS_F_TUNNEL_DONT_FRAGMENT = TCA_FLOWER_KEY_FLAGS_TUNNEL_DONT_FRAGMENT,
+ FLOW_DIS_F_TUNNEL_OAM = TCA_FLOWER_KEY_FLAGS_TUNNEL_OAM,
+ FLOW_DIS_F_TUNNEL_CRIT_OPT = TCA_FLOWER_KEY_FLAGS_TUNNEL_CRIT_OPT,
+
+ /* These flags are internal to the kernel */
+ FLOW_DIS_ENCAPSULATION = (TCA_FLOWER_KEY_FLAGS_MAX << 1),
+};
enum flow_dissect_ret {
FLOW_DISSECT_RET_OUT_GOOD,
@@ -433,6 +446,8 @@ static inline bool flow_keys_have_l4(const struct flow_keys *keys)
}
u32 flow_hash_from_keys(struct flow_keys *keys);
+u32 flow_hash_from_keys_seed(struct flow_keys *keys,
+ const siphash_key_t *keyval);
void skb_flow_get_icmp_tci(const struct sk_buff *skb,
struct flow_dissector_key_icmp *key_icmp,
const void *data, int thoff, int hlen);
diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h
index ec9f80509f60..292cd8f4b762 100644
--- a/include/net/flow_offload.h
+++ b/include/net/flow_offload.h
@@ -472,6 +472,28 @@ static inline bool flow_rule_is_supp_control_flags(const u32 supp_flags,
}
/**
+ * flow_rule_is_supp_enc_control_flags() - check for supported control flags
+ * @supp_enc_flags: encapsulation control flags supported by driver
+ * @enc_ctrl_flags: encapsulation control flags present in rule
+ * @extack: The netlink extended ACK for reporting errors.
+ *
+ * Return: true if only supported control flags are set, false otherwise.
+ */
+static inline bool flow_rule_is_supp_enc_control_flags(const u32 supp_enc_flags,
+ const u32 enc_ctrl_flags,
+ struct netlink_ext_ack *extack)
+{
+ if (likely((enc_ctrl_flags & ~supp_enc_flags) == 0))
+ return true;
+
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Unsupported match on enc_control.flags %#x",
+ enc_ctrl_flags);
+
+ return false;
+}
+
+/**
* flow_rule_has_control_flags() - check for presence of any control flags
* @ctrl_flags: control flags present in rule
* @extack: The netlink extended ACK for reporting errors.
@@ -485,6 +507,19 @@ static inline bool flow_rule_has_control_flags(const u32 ctrl_flags,
}
/**
+ * flow_rule_has_enc_control_flags() - check for presence of any control flags
+ * @enc_ctrl_flags: encapsulation control flags present in rule
+ * @extack: The netlink extended ACK for reporting errors.
+ *
+ * Return: true if control flags are set, false otherwise.
+ */
+static inline bool flow_rule_has_enc_control_flags(const u32 enc_ctrl_flags,
+ struct netlink_ext_ack *extack)
+{
+ return !flow_rule_is_supp_enc_control_flags(0, enc_ctrl_flags, extack);
+}
+
+/**
* flow_rule_match_has_control_flags() - match and check for any control flags
* @rule: The flow_rule under evaluation.
* @extack: The netlink extended ACK for reporting errors.
diff --git a/include/net/ieee80211_radiotap.h b/include/net/ieee80211_radiotap.h
index 925bac726a92..91762faecc13 100644
--- a/include/net/ieee80211_radiotap.h
+++ b/include/net/ieee80211_radiotap.h
@@ -582,6 +582,7 @@ enum ieee80211_radiotap_eht_usig_tb {
/**
* ieee80211_get_radiotap_len - get radiotap header length
* @data: pointer to the header
+ * Return: the radiotap header length
*/
static inline u16 ieee80211_get_radiotap_len(const char *data)
{
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index 7d6b1254c92d..c0deaafebfdc 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -263,7 +263,7 @@ struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
struct sock *inet_csk_reqsk_queue_add(struct sock *sk,
struct request_sock *req,
struct sock *child);
-void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
+bool inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
unsigned long timeout);
struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child,
struct request_sock *req,
diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h
index 153960663ce4..5af6eb14c5db 100644
--- a/include/net/inet_frag.h
+++ b/include/net/inet_frag.h
@@ -76,7 +76,7 @@ struct frag_v6_compare_key {
* @stamp: timestamp of the last received fragment
* @len: total length of the original datagram
* @meat: length of received fragments so far
- * @mono_delivery_time: stamp has a mono delivery time (EDT)
+ * @tstamp_type: stamp has a mono delivery time (EDT)
* @flags: fragment queue flags
* @max_size: maximum received fragment size
* @fqdir: pointer to struct fqdir
@@ -97,7 +97,7 @@ struct inet_frag_queue {
ktime_t stamp;
int len;
int meat;
- u8 mono_delivery_time;
+ u8 tstamp_type;
__u8 flags;
u16 max_size;
struct fqdir *fqdir;
diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h
index 2a536eea9424..f88b68269012 100644
--- a/include/net/inet_timewait_sock.h
+++ b/include/net/inet_timewait_sock.h
@@ -93,17 +93,14 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk,
struct inet_timewait_death_row *dr,
const int state);
-void inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
- struct inet_hashinfo *hashinfo);
+void inet_twsk_hashdance_schedule(struct inet_timewait_sock *tw,
+ struct sock *sk,
+ struct inet_hashinfo *hashinfo,
+ int timeo);
void __inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo,
bool rearm);
-static inline void inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo)
-{
- __inet_twsk_schedule(tw, timeo, false);
-}
-
static inline void inet_twsk_reschedule(struct inet_timewait_sock *tw, int timeo)
{
__inet_twsk_schedule(tw, timeo, true);
diff --git a/include/net/ip.h b/include/net/ip.h
index 6d735e00d3f3..c5606cadb1a5 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -506,8 +506,7 @@ static inline unsigned int ip_skb_dst_mtu(struct sock *sk,
return mtu - lwtunnel_headroom(skb_dst(skb)->lwtstate, mtu);
}
-struct dst_metrics *ip_fib_metrics_init(struct net *net, struct nlattr *fc_mx,
- int fc_mx_len,
+struct dst_metrics *ip_fib_metrics_init(struct nlattr *fc_mx, int fc_mx_len,
struct netlink_ext_ack *extack);
static inline void ip_fib_metrics_put(struct dst_metrics *fib_metrics)
{
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index a18ed24fed94..6dbdf60b342f 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -127,18 +127,26 @@ void rt6_age_exceptions(struct fib6_info *f6i, struct fib6_gc_args *gc_args,
static inline int ip6_route_get_saddr(struct net *net, struct fib6_info *f6i,
const struct in6_addr *daddr,
- unsigned int prefs,
+ unsigned int prefs, int l3mdev_index,
struct in6_addr *saddr)
{
+ struct net_device *l3mdev;
+ struct net_device *dev;
+ bool same_vrf;
int err = 0;
- if (f6i && f6i->fib6_prefsrc.plen) {
+ rcu_read_lock();
+
+ l3mdev = dev_get_by_index_rcu(net, l3mdev_index);
+ if (!f6i || !f6i->fib6_prefsrc.plen || l3mdev)
+ dev = f6i ? fib6_info_nh_dev(f6i) : NULL;
+ same_vrf = !l3mdev || l3mdev_master_dev_rcu(dev) == l3mdev;
+ if (f6i && f6i->fib6_prefsrc.plen && same_vrf)
*saddr = f6i->fib6_prefsrc.addr;
- } else {
- struct net_device *dev = f6i ? fib6_info_nh_dev(f6i) : NULL;
+ else
+ err = ipv6_dev_get_saddr(net, same_vrf ? dev : l3mdev, daddr, prefs, saddr);
- err = ipv6_dev_get_saddr(net, dev, daddr, prefs, saddr);
- }
+ rcu_read_unlock();
return err;
}
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index 9b2f69ba5e49..6e7984bfb986 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -520,7 +520,35 @@ void fib_nhc_update_mtu(struct fib_nh_common *nhc, u32 new, u32 orig);
#ifdef CONFIG_IP_ROUTE_MULTIPATH
int fib_multipath_hash(const struct net *net, const struct flowi4 *fl4,
const struct sk_buff *skb, struct flow_keys *flkeys);
+
+static void
+fib_multipath_hash_construct_key(siphash_key_t *key, u32 mp_seed)
+{
+ u64 mp_seed_64 = mp_seed;
+
+ key->key[0] = (mp_seed_64 << 32) | mp_seed_64;
+ key->key[1] = key->key[0];
+}
+
+static inline u32 fib_multipath_hash_from_keys(const struct net *net,
+ struct flow_keys *keys)
+{
+ siphash_aligned_key_t hash_key;
+ u32 mp_seed;
+
+ mp_seed = READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_seed).mp_seed;
+ fib_multipath_hash_construct_key(&hash_key, mp_seed);
+
+ return flow_hash_from_keys_seed(keys, &hash_key);
+}
+#else
+static inline u32 fib_multipath_hash_from_keys(const struct net *net,
+ struct flow_keys *keys)
+{
+ return flow_hash_from_keys(keys);
+}
#endif
+
int fib_check_nh(struct net *net, struct fib_nh *nh, u32 table, u8 scope,
struct netlink_ext_ack *extack);
void fib_select_multipath(struct fib_result *res, int hash);
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index 9a6a08ec7713..1db2417b8ff5 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -461,9 +461,10 @@ static inline bool pskb_inet_may_pull(struct sk_buff *skb)
/* Variant of pskb_inet_may_pull().
*/
-static inline bool skb_vlan_inet_prepare(struct sk_buff *skb)
+static inline bool skb_vlan_inet_prepare(struct sk_buff *skb,
+ bool inner_proto_inherit)
{
- int nhlen = 0, maclen = ETH_HLEN;
+ int nhlen = 0, maclen = inner_proto_inherit ? 0 : ETH_HLEN;
__be16 type = skb->protocol;
/* Essentially this is skb_protocol(skb, true)
diff --git a/include/net/ipv6_stubs.h b/include/net/ipv6_stubs.h
index 485c39a89866..11cefd50704d 100644
--- a/include/net/ipv6_stubs.h
+++ b/include/net/ipv6_stubs.h
@@ -9,6 +9,7 @@
#include <net/flow.h>
#include <net/neighbour.h>
#include <net/sock.h>
+#include <net/ipv6.h>
/* structs from net/ip6_fib.h */
struct fib6_info;
@@ -72,6 +73,8 @@ struct ipv6_stub {
int (*output)(struct net *, struct sock *, struct sk_buff *));
struct net_device *(*ipv6_dev_find)(struct net *net, const struct in6_addr *addr,
struct net_device *dev);
+ int (*ip6_xmit)(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
+ __u32 mark, struct ipv6_txoptions *opt, int tclass, u32 priority);
};
extern const struct ipv6_stub *ipv6_stub __read_mostly;
diff --git a/include/net/libeth/cache.h b/include/net/libeth/cache.h
new file mode 100644
index 000000000000..bdb0c043ce61
--- /dev/null
+++ b/include/net/libeth/cache.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2024 Intel Corporation */
+
+#ifndef __LIBETH_CACHE_H
+#define __LIBETH_CACHE_H
+
+#include <linux/cache.h>
+
+/**
+ * libeth_cacheline_group_assert - make sure cacheline group size is expected
+ * @type: type of the structure containing the group
+ * @grp: group name inside the struct
+ * @sz: expected group size
+ */
+#if defined(CONFIG_64BIT) && SMP_CACHE_BYTES == 64
+#define libeth_cacheline_group_assert(type, grp, sz) \
+ static_assert(offsetof(type, __cacheline_group_end__##grp) - \
+ offsetofend(type, __cacheline_group_begin__##grp) == \
+ (sz))
+#define __libeth_cacheline_struct_assert(type, sz) \
+ static_assert(sizeof(type) == (sz))
+#else /* !CONFIG_64BIT || SMP_CACHE_BYTES != 64 */
+#define libeth_cacheline_group_assert(type, grp, sz) \
+ static_assert(offsetof(type, __cacheline_group_end__##grp) - \
+ offsetofend(type, __cacheline_group_begin__##grp) <= \
+ (sz))
+#define __libeth_cacheline_struct_assert(type, sz) \
+ static_assert(sizeof(type) <= (sz))
+#endif /* !CONFIG_64BIT || SMP_CACHE_BYTES != 64 */
+
+#define __libeth_cls1(sz1) SMP_CACHE_ALIGN(sz1)
+#define __libeth_cls2(sz1, sz2) (SMP_CACHE_ALIGN(sz1) + SMP_CACHE_ALIGN(sz2))
+#define __libeth_cls3(sz1, sz2, sz3) \
+ (SMP_CACHE_ALIGN(sz1) + SMP_CACHE_ALIGN(sz2) + SMP_CACHE_ALIGN(sz3))
+#define __libeth_cls(...) \
+ CONCATENATE(__libeth_cls, COUNT_ARGS(__VA_ARGS__))(__VA_ARGS__)
+
+/**
+ * libeth_cacheline_struct_assert - make sure CL-based struct size is expected
+ * @type: type of the struct
+ * @...: from 1 to 3 CL group sizes (read-mostly, read-write, cold)
+ *
+ * When a struct contains several CL groups, it's difficult to predict its size
+ * on different architectures. The macro instead takes sizes of all of the
+ * groups the structure contains and generates the final struct size.
+ */
+#define libeth_cacheline_struct_assert(type, ...) \
+ __libeth_cacheline_struct_assert(type, __libeth_cls(__VA_ARGS__)); \
+ static_assert(__alignof(type) >= SMP_CACHE_BYTES)
+
+/**
+ * libeth_cacheline_set_assert - make sure CL-based struct layout is expected
+ * @type: type of the struct
+ * @ro: expected size of the read-mostly group
+ * @rw: expected size of the read-write group
+ * @c: expected size of the cold group
+ *
+ * Check that each group size is expected and then do final struct size check.
+ */
+#define libeth_cacheline_set_assert(type, ro, rw, c) \
+ libeth_cacheline_group_assert(type, read_mostly, ro); \
+ libeth_cacheline_group_assert(type, read_write, rw); \
+ libeth_cacheline_group_assert(type, cold, c); \
+ libeth_cacheline_struct_assert(type, ro, rw, c)
+
+#endif /* __LIBETH_CACHE_H */
diff --git a/include/net/libeth/rx.h b/include/net/libeth/rx.h
index f29ea3e34c6c..43574bd6612f 100644
--- a/include/net/libeth/rx.h
+++ b/include/net/libeth/rx.h
@@ -17,6 +17,8 @@
#define LIBETH_MAX_HEADROOM LIBETH_SKB_HEADROOM
/* Link layer / L2 overhead: Ethernet, 2 VLAN tags (C + S), FCS */
#define LIBETH_RX_LL_LEN (ETH_HLEN + 2 * VLAN_HLEN + ETH_FCS_LEN)
+/* Maximum supported L2-L4 header length */
+#define LIBETH_MAX_HEAD roundup_pow_of_two(max(MAX_HEADER, 256))
/* Always use order-0 pages */
#define LIBETH_RX_PAGE_ORDER 0
@@ -44,12 +46,26 @@ struct libeth_fqe {
} __aligned_largest;
/**
+ * enum libeth_fqe_type - enum representing types of Rx buffers
+ * @LIBETH_FQE_MTU: buffer size is determined by MTU
+ * @LIBETH_FQE_SHORT: buffer size is smaller than MTU, for short frames
+ * @LIBETH_FQE_HDR: buffer size is ```LIBETH_MAX_HEAD```-sized, for headers
+ */
+enum libeth_fqe_type {
+ LIBETH_FQE_MTU = 0U,
+ LIBETH_FQE_SHORT,
+ LIBETH_FQE_HDR,
+};
+
+/**
* struct libeth_fq - structure representing a buffer (fill) queue
* @fp: hotpath part of the structure
* @pp: &page_pool for buffer management
* @fqes: array of Rx buffers
* @truesize: size to allocate per buffer, w/overhead
* @count: number of descriptors/buffers the queue has
+ * @type: type of the buffers this queue has
+ * @hsplit: flag whether header split is enabled
* @buf_len: HW-writeable length per each buffer
* @nid: ID of the closest NUMA node with memory
*/
@@ -63,6 +79,9 @@ struct libeth_fq {
);
/* Cold fields */
+ enum libeth_fqe_type type:2;
+ bool hsplit:1;
+
u32 buf_len;
int nid;
};
diff --git a/include/net/llc_c_st.h b/include/net/llc_c_st.h
index 53823d61d8b6..a4bea0f33188 100644
--- a/include/net/llc_c_st.h
+++ b/include/net/llc_c_st.h
@@ -44,8 +44,8 @@ struct llc_conn_state_trans {
};
struct llc_conn_state {
- u8 current_state;
- struct llc_conn_state_trans **transitions;
+ u8 current_state;
+ const struct llc_conn_state_trans **transitions;
};
extern struct llc_conn_state llc_conn_state_table[];
diff --git a/include/net/llc_s_st.h b/include/net/llc_s_st.h
index ed5b2fa40d32..fca49d483d20 100644
--- a/include/net/llc_s_st.h
+++ b/include/net/llc_s_st.h
@@ -29,8 +29,8 @@ struct llc_sap_state_trans {
};
struct llc_sap_state {
- u8 curr_state;
- struct llc_sap_state_trans **transitions;
+ u8 curr_state;
+ const struct llc_sap_state_trans **transitions;
};
/* only access to SAP state table */
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index cafc664ee531..0a04eaf5343c 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -250,6 +250,7 @@ struct ieee80211_chan_req {
* @min_def: the minimum channel definition currently required.
* @ap: the channel definition the AP actually is operating as,
* for use with (wider bandwidth) OFDMA
+ * @radio_idx: index of the wiphy radio used used for this channel
* @rx_chains_static: The number of RX chains that must always be
* active on the channel to receive MIMO transmissions
* @rx_chains_dynamic: The number of RX chains that must be enabled
@@ -264,6 +265,7 @@ struct ieee80211_chanctx_conf {
struct cfg80211_chan_def min_def;
struct cfg80211_chan_def ap;
+ int radio_idx;
u8 rx_chains_static, rx_chains_dynamic;
bool radar_enabled;
@@ -362,6 +364,7 @@ struct ieee80211_vif_chanctx_switch {
* status changed.
* @BSS_CHANGED_MLD_VALID_LINKS: MLD valid links status changed.
* @BSS_CHANGED_MLD_TTLM: negotiated TID to link mapping was changed
+ * @BSS_CHANGED_TPE: transmit power envelope changed
*/
enum ieee80211_bss_change {
BSS_CHANGED_ASSOC = 1<<0,
@@ -395,9 +398,10 @@ enum ieee80211_bss_change {
BSS_CHANGED_HE_OBSS_PD = 1<<28,
BSS_CHANGED_HE_BSS_COLOR = 1<<29,
BSS_CHANGED_FILS_DISCOVERY = 1<<30,
- BSS_CHANGED_UNSOL_BCAST_PROBE_RESP = 1<<31,
+ BSS_CHANGED_UNSOL_BCAST_PROBE_RESP = BIT_ULL(31),
BSS_CHANGED_MLD_VALID_LINKS = BIT_ULL(33),
BSS_CHANGED_MLD_TTLM = BIT_ULL(34),
+ BSS_CHANGED_TPE = BIT_ULL(35),
/* when adding here, make sure to change ieee80211_reconfig */
};
@@ -550,6 +554,39 @@ struct ieee80211_fils_discovery {
u32 max_interval;
};
+#define IEEE80211_TPE_EIRP_ENTRIES_320MHZ 5
+struct ieee80211_parsed_tpe_eirp {
+ bool valid;
+ s8 power[IEEE80211_TPE_EIRP_ENTRIES_320MHZ];
+ u8 count;
+};
+
+#define IEEE80211_TPE_PSD_ENTRIES_320MHZ 16
+struct ieee80211_parsed_tpe_psd {
+ bool valid;
+ s8 power[IEEE80211_TPE_PSD_ENTRIES_320MHZ];
+ u8 count, n;
+};
+
+/**
+ * struct ieee80211_parsed_tpe - parsed transmit power envelope information
+ * @max_local: maximum local EIRP, one value for 20, 40, 80, 160, 320 MHz each
+ * (indexed by TX power category)
+ * @max_reg_client: maximum regulatory client EIRP, one value for 20, 40, 80,
+ * 160, 320 MHz each
+ * (indexed by TX power category)
+ * @psd_local: maximum local power spectral density, one value for each 20 MHz
+ * subchannel per bss_conf's chanreq.oper
+ * (indexed by TX power category)
+ * @psd_reg_client: maximum regulatory power spectral density, one value for
+ * each 20 MHz subchannel per bss_conf's chanreq.oper
+ * (indexed by TX power category)
+ */
+struct ieee80211_parsed_tpe {
+ struct ieee80211_parsed_tpe_eirp max_local[2], max_reg_client[2];
+ struct ieee80211_parsed_tpe_psd psd_local[2], psd_reg_client[2];
+};
+
/**
* struct ieee80211_bss_conf - holds the BSS's changing parameters
*
@@ -662,8 +699,7 @@ struct ieee80211_fils_discovery {
* @beacon_tx_rate: The configured beacon transmit rate that needs to be passed
* to driver when rate control is offloaded to firmware.
* @power_type: power type of BSS for 6 GHz
- * @tx_pwr_env: transmit power envelope array of BSS.
- * @tx_pwr_env_num: number of @tx_pwr_env.
+ * @tpe: transmit power envelope information
* @pwr_reduction: power constraint of BSS.
* @eht_support: does this BSS support EHT
* @csa_active: marks whether a channel switch is going on.
@@ -701,6 +737,9 @@ struct ieee80211_fils_discovery {
* beamformee
* @eht_mu_beamformer: in AP-mode, does this BSS enable operation as an EHT MU
* beamformer
+ * @eht_80mhz_full_bw_ul_mumimo: in AP-mode, does this BSS support the
+ * reception of an EHT TB PPDU on an RU that spans the entire PPDU
+ * bandwidth
*/
struct ieee80211_bss_conf {
struct ieee80211_vif *vif;
@@ -766,8 +805,9 @@ struct ieee80211_bss_conf {
u32 unsol_bcast_probe_resp_interval;
struct cfg80211_bitrate_mask beacon_tx_rate;
enum ieee80211_ap_reg_power power_type;
- struct ieee80211_tx_pwr_env tx_pwr_env[IEEE80211_TPE_MAX_IE_COUNT];
- u8 tx_pwr_env_num;
+
+ struct ieee80211_parsed_tpe tpe;
+
u8 pwr_reduction;
bool eht_support;
@@ -793,6 +833,7 @@ struct ieee80211_bss_conf {
bool eht_su_beamformer;
bool eht_su_beamformee;
bool eht_mu_beamformer;
+ bool eht_80mhz_full_bw_ul_mumimo;
};
/**
@@ -2728,14 +2769,6 @@ struct ieee80211_txq {
* @IEEE80211_HW_SUPPORTS_TDLS_BUFFER_STA: Hardware supports buffer STA on
* TDLS links.
*
- * @IEEE80211_HW_DEAUTH_NEED_MGD_TX_PREP: The driver requires the
- * mgd_prepare_tx() callback to be called before transmission of a
- * deauthentication frame in case the association was completed but no
- * beacon was heard. This is required in multi-channel scenarios, where the
- * virtual interface might not be given air time for the transmission of
- * the frame, as it is not synced with the AP/P2P GO yet, and thus the
- * deauthentication frame might not be transmitted.
- *
* @IEEE80211_HW_DOESNT_SUPPORT_QOS_NDP: The driver (or firmware) doesn't
* support QoS NDP for AP probing - that's most likely a driver bug.
*
@@ -2835,7 +2868,6 @@ enum ieee80211_hw_flags {
IEEE80211_HW_REPORTS_LOW_ACK,
IEEE80211_HW_SUPPORTS_TX_FRAG,
IEEE80211_HW_SUPPORTS_TDLS_BUFFER_STA,
- IEEE80211_HW_DEAUTH_NEED_MGD_TX_PREP,
IEEE80211_HW_DOESNT_SUPPORT_QOS_NDP,
IEEE80211_HW_BUFF_MMPDU_TXQ,
IEEE80211_HW_SUPPORTS_VHT_EXT_NSS_BW,
@@ -3748,13 +3780,15 @@ enum ieee80211_reconfig_type {
* @success: whether the frame exchange was successful, only
* used with the mgd_complete_tx() method, and then only
* valid for auth and (re)assoc.
+ * @was_assoc: set if this call is due to deauth/disassoc
+ * while just having been associated
* @link_id: the link id on which the frame will be TX'ed.
* Only used with the mgd_prepare_tx() method.
*/
struct ieee80211_prep_tx_info {
u16 duration;
u16 subtype;
- u8 success:1;
+ u8 success:1, was_assoc:1;
int link_id;
};
@@ -4203,12 +4237,9 @@ struct ieee80211_prep_tx_info {
* yet it need not necessarily be given airtime, in particular since any
* transmission to a P2P GO needs to be synchronized against the GO's
* powersave state. mac80211 will call this function before transmitting a
- * management frame prior to having successfully associated to allow the
- * driver to give it channel time for the transmission, to get a response
- * and to be able to synchronize with the GO.
- * For drivers that set %IEEE80211_HW_DEAUTH_NEED_MGD_TX_PREP, mac80211
- * would also call this function before transmitting a deauthentication
- * frame in case that no beacon was heard from the AP/P2P GO.
+ * management frame prior to transmitting that frame to allow the driver
+ * to give it channel time for the transmission, to get a response and be
+ * able to synchronize with the GO.
* The callback will be called before each transmission and upon return
* mac80211 will transmit the frame right away.
* Additional information is passed in the &struct ieee80211_prep_tx_info
@@ -4405,7 +4436,7 @@ struct ieee80211_ops {
struct ieee80211_tx_control *control,
struct sk_buff *skb);
int (*start)(struct ieee80211_hw *hw);
- void (*stop)(struct ieee80211_hw *hw);
+ void (*stop)(struct ieee80211_hw *hw, bool suspend);
#ifdef CONFIG_PM
int (*suspend)(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan);
int (*resume)(struct ieee80211_hw *hw);
diff --git a/include/net/mana/gdma.h b/include/net/mana/gdma.h
index 27684135bb4d..83963d9e804d 100644
--- a/include/net/mana/gdma.h
+++ b/include/net/mana/gdma.h
@@ -224,7 +224,15 @@ struct gdma_dev {
struct auxiliary_device *adev;
};
-#define MINIMUM_SUPPORTED_PAGE_SIZE PAGE_SIZE
+/* MANA_PAGE_SIZE is the DMA unit */
+#define MANA_PAGE_SHIFT 12
+#define MANA_PAGE_SIZE BIT(MANA_PAGE_SHIFT)
+#define MANA_PAGE_ALIGN(x) ALIGN((x), MANA_PAGE_SIZE)
+#define MANA_PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)(addr), MANA_PAGE_SIZE)
+#define MANA_PFN(a) ((a) >> MANA_PAGE_SHIFT)
+
+/* Required by HW */
+#define MANA_MIN_QSIZE MANA_PAGE_SIZE
#define GDMA_CQE_SIZE 64
#define GDMA_EQE_SIZE 16
@@ -543,11 +551,13 @@ enum {
*/
#define GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX BIT(2)
#define GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG BIT(3)
+#define GDMA_DRV_CAP_FLAG_1_VARIABLE_INDIRECTION_TABLE_SUPPORT BIT(5)
#define GDMA_DRV_CAP_FLAGS1 \
(GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT | \
GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX | \
- GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG)
+ GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG | \
+ GDMA_DRV_CAP_FLAG_1_VARIABLE_INDIRECTION_TABLE_SUPPORT)
#define GDMA_DRV_CAP_FLAGS2 0
diff --git a/include/net/mana/mana.h b/include/net/mana/mana.h
index 561f6719fb4e..e39b8676fe54 100644
--- a/include/net/mana/mana.h
+++ b/include/net/mana/mana.h
@@ -30,8 +30,8 @@ enum TRI_STATE {
};
/* Number of entries for hardware indirection table must be in power of 2 */
-#define MANA_INDIRECT_TABLE_SIZE 64
-#define MANA_INDIRECT_TABLE_MASK (MANA_INDIRECT_TABLE_SIZE - 1)
+#define MANA_INDIRECT_TABLE_MAX_SIZE 512
+#define MANA_INDIRECT_TABLE_DEF_SIZE 64
/* The Toeplitz hash key's length in bytes: should be multiple of 8 */
#define MANA_HASH_KEY_SIZE 40
@@ -42,7 +42,8 @@ enum TRI_STATE {
#define MAX_SEND_BUFFERS_PER_QUEUE 256
-#define EQ_SIZE (8 * PAGE_SIZE)
+#define EQ_SIZE (8 * MANA_PAGE_SIZE)
+
#define LOG2_EQ_THROTTLE 3
#define MAX_PORTS_IN_MANA_DEV 256
@@ -410,10 +411,11 @@ struct mana_port_context {
struct mana_tx_qp *tx_qp;
/* Indirection Table for RX & TX. The values are queue indexes */
- u32 indir_table[MANA_INDIRECT_TABLE_SIZE];
+ u32 *indir_table;
+ u32 indir_table_sz;
/* Indirection table containing RxObject Handles */
- mana_handle_t rxobj_table[MANA_INDIRECT_TABLE_SIZE];
+ mana_handle_t *rxobj_table;
/* Hash key used by the NIC */
u8 hashkey[MANA_HASH_KEY_SIZE];
diff --git a/include/net/netdev_queues.h b/include/net/netdev_queues.h
index a8a7e48dfa6c..5ca019d294ca 100644
--- a/include/net/netdev_queues.h
+++ b/include/net/netdev_queues.h
@@ -62,6 +62,8 @@ struct netdev_queue_stats_tx {
* statistics will not generally add up to the total number of events for
* the device. The @get_base_stats callback allows filling in the delta
* between events for currently live queues and overall device history.
+ * @get_base_stats can also be used to report any miscellaneous packets
+ * transferred outside of the main set of queues used by the networking stack.
* When the statistics for the entire device are queried, first @get_base_stats
* is issued to collect the delta, and then a series of per-queue callbacks.
* Only statistics which are set in @get_base_stats will be reported
diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h
index 9abb7ee40d72..b63d53bb9dd6 100644
--- a/include/net/netfilter/nf_flow_table.h
+++ b/include/net/netfilter/nf_flow_table.h
@@ -305,11 +305,26 @@ struct flow_ports {
__be16 source, dest;
};
+struct nf_flowtable *nf_flowtable_by_dev(const struct net_device *dev);
+int nf_flow_offload_xdp_setup(struct nf_flowtable *flowtable,
+ struct net_device *dev,
+ enum flow_block_command cmd);
+
unsigned int nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
const struct nf_hook_state *state);
unsigned int nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
const struct nf_hook_state *state);
+#if (IS_BUILTIN(CONFIG_NF_FLOW_TABLE) && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) || \
+ (IS_MODULE(CONFIG_NF_FLOW_TABLE) && IS_ENABLED(CONFIG_DEBUG_INFO_BTF_MODULES))
+extern int nf_flow_register_bpf(void);
+#else
+static inline int nf_flow_register_bpf(void)
+{
+ return 0;
+}
+#endif
+
#define MODULE_ALIAS_NF_FLOWTABLE(family) \
MODULE_ALIAS("nf-flowtable-" __stringify(family))
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 2796153b03da..1bfdd16890fa 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -619,6 +619,11 @@ static inline void *nft_set_priv(const struct nft_set *set)
return (void *)set->data;
}
+static inline enum nft_data_types nft_set_datatype(const struct nft_set *set)
+{
+ return set->dtype == NFT_DATA_VERDICT ? NFT_DATA_VERDICT : NFT_DATA_VALUE;
+}
+
static inline bool nft_set_gc_is_pending(const struct nft_set *s)
{
return refcount_read(&s->refs) != 1;
@@ -1171,7 +1176,7 @@ static inline bool nft_chain_is_bound(struct nft_chain *chain)
int nft_chain_add(struct nft_table *table, struct nft_chain *chain);
void nft_chain_del(struct nft_chain *chain);
-void nf_tables_chain_destroy(struct nft_ctx *ctx);
+void nf_tables_chain_destroy(struct nft_chain *chain);
struct nft_stats {
u64 bytes;
@@ -1608,41 +1613,67 @@ static inline int nft_set_elem_is_dead(const struct nft_set_ext *ext)
}
/**
- * struct nft_trans - nf_tables object update in transaction
+ * struct nft_trans - nf_tables object update in transaction
*
- * @list: used internally
- * @binding_list: list of objects with possible bindings
- * @msg_type: message type
- * @put_net: ctx->net needs to be put
- * @ctx: transaction context
- * @data: internal information related to the transaction
+ * @list: used internally
+ * @net: struct net
+ * @table: struct nft_table the object resides in
+ * @msg_type: message type
+ * @seq: netlink sequence number
+ * @flags: modifiers to new request
+ * @report: notify via unicast netlink message
+ * @put_net: net needs to be put
+ *
+ * This is the information common to all objects in the transaction,
+ * this must always be the first member of derived sub-types.
*/
struct nft_trans {
struct list_head list;
- struct list_head binding_list;
+ struct net *net;
+ struct nft_table *table;
int msg_type;
- bool put_net;
- struct nft_ctx ctx;
- char data[];
+ u32 seq;
+ u16 flags;
+ u8 report:1;
+ u8 put_net:1;
+};
+
+/**
+ * struct nft_trans_binding - nf_tables object with binding support in transaction
+ * @nft_trans: base structure, MUST be first member
+ * @binding_list: list of objects with possible bindings
+ *
+ * This is the base type used by objects that can be bound to a chain.
+ */
+struct nft_trans_binding {
+ struct nft_trans nft_trans;
+ struct list_head binding_list;
};
struct nft_trans_rule {
+ struct nft_trans nft_trans;
struct nft_rule *rule;
+ struct nft_chain *chain;
struct nft_flow_rule *flow;
u32 rule_id;
bool bound;
};
-#define nft_trans_rule(trans) \
- (((struct nft_trans_rule *)trans->data)->rule)
-#define nft_trans_flow_rule(trans) \
- (((struct nft_trans_rule *)trans->data)->flow)
-#define nft_trans_rule_id(trans) \
- (((struct nft_trans_rule *)trans->data)->rule_id)
-#define nft_trans_rule_bound(trans) \
- (((struct nft_trans_rule *)trans->data)->bound)
+#define nft_trans_container_rule(trans) \
+ container_of(trans, struct nft_trans_rule, nft_trans)
+#define nft_trans_rule(trans) \
+ nft_trans_container_rule(trans)->rule
+#define nft_trans_flow_rule(trans) \
+ nft_trans_container_rule(trans)->flow
+#define nft_trans_rule_id(trans) \
+ nft_trans_container_rule(trans)->rule_id
+#define nft_trans_rule_bound(trans) \
+ nft_trans_container_rule(trans)->bound
+#define nft_trans_rule_chain(trans) \
+ nft_trans_container_rule(trans)->chain
struct nft_trans_set {
+ struct nft_trans_binding nft_trans_binding;
struct nft_set *set;
u32 set_id;
u32 gc_int;
@@ -1652,100 +1683,117 @@ struct nft_trans_set {
u32 size;
};
-#define nft_trans_set(trans) \
- (((struct nft_trans_set *)trans->data)->set)
-#define nft_trans_set_id(trans) \
- (((struct nft_trans_set *)trans->data)->set_id)
-#define nft_trans_set_bound(trans) \
- (((struct nft_trans_set *)trans->data)->bound)
-#define nft_trans_set_update(trans) \
- (((struct nft_trans_set *)trans->data)->update)
-#define nft_trans_set_timeout(trans) \
- (((struct nft_trans_set *)trans->data)->timeout)
-#define nft_trans_set_gc_int(trans) \
- (((struct nft_trans_set *)trans->data)->gc_int)
-#define nft_trans_set_size(trans) \
- (((struct nft_trans_set *)trans->data)->size)
+#define nft_trans_container_set(t) \
+ container_of(t, struct nft_trans_set, nft_trans_binding.nft_trans)
+#define nft_trans_set(trans) \
+ nft_trans_container_set(trans)->set
+#define nft_trans_set_id(trans) \
+ nft_trans_container_set(trans)->set_id
+#define nft_trans_set_bound(trans) \
+ nft_trans_container_set(trans)->bound
+#define nft_trans_set_update(trans) \
+ nft_trans_container_set(trans)->update
+#define nft_trans_set_timeout(trans) \
+ nft_trans_container_set(trans)->timeout
+#define nft_trans_set_gc_int(trans) \
+ nft_trans_container_set(trans)->gc_int
+#define nft_trans_set_size(trans) \
+ nft_trans_container_set(trans)->size
struct nft_trans_chain {
+ struct nft_trans_binding nft_trans_binding;
struct nft_chain *chain;
- bool update;
char *name;
struct nft_stats __percpu *stats;
u8 policy;
+ bool update;
bool bound;
u32 chain_id;
struct nft_base_chain *basechain;
struct list_head hook_list;
};
-#define nft_trans_chain(trans) \
- (((struct nft_trans_chain *)trans->data)->chain)
-#define nft_trans_chain_update(trans) \
- (((struct nft_trans_chain *)trans->data)->update)
-#define nft_trans_chain_name(trans) \
- (((struct nft_trans_chain *)trans->data)->name)
-#define nft_trans_chain_stats(trans) \
- (((struct nft_trans_chain *)trans->data)->stats)
-#define nft_trans_chain_policy(trans) \
- (((struct nft_trans_chain *)trans->data)->policy)
-#define nft_trans_chain_bound(trans) \
- (((struct nft_trans_chain *)trans->data)->bound)
-#define nft_trans_chain_id(trans) \
- (((struct nft_trans_chain *)trans->data)->chain_id)
-#define nft_trans_basechain(trans) \
- (((struct nft_trans_chain *)trans->data)->basechain)
-#define nft_trans_chain_hooks(trans) \
- (((struct nft_trans_chain *)trans->data)->hook_list)
+#define nft_trans_container_chain(t) \
+ container_of(t, struct nft_trans_chain, nft_trans_binding.nft_trans)
+#define nft_trans_chain(trans) \
+ nft_trans_container_chain(trans)->chain
+#define nft_trans_chain_update(trans) \
+ nft_trans_container_chain(trans)->update
+#define nft_trans_chain_name(trans) \
+ nft_trans_container_chain(trans)->name
+#define nft_trans_chain_stats(trans) \
+ nft_trans_container_chain(trans)->stats
+#define nft_trans_chain_policy(trans) \
+ nft_trans_container_chain(trans)->policy
+#define nft_trans_chain_bound(trans) \
+ nft_trans_container_chain(trans)->bound
+#define nft_trans_chain_id(trans) \
+ nft_trans_container_chain(trans)->chain_id
+#define nft_trans_basechain(trans) \
+ nft_trans_container_chain(trans)->basechain
+#define nft_trans_chain_hooks(trans) \
+ nft_trans_container_chain(trans)->hook_list
struct nft_trans_table {
+ struct nft_trans nft_trans;
bool update;
};
-#define nft_trans_table_update(trans) \
- (((struct nft_trans_table *)trans->data)->update)
+#define nft_trans_container_table(trans) \
+ container_of(trans, struct nft_trans_table, nft_trans)
+#define nft_trans_table_update(trans) \
+ nft_trans_container_table(trans)->update
struct nft_trans_elem {
+ struct nft_trans nft_trans;
struct nft_set *set;
struct nft_elem_priv *elem_priv;
bool bound;
};
-#define nft_trans_elem_set(trans) \
- (((struct nft_trans_elem *)trans->data)->set)
-#define nft_trans_elem_priv(trans) \
- (((struct nft_trans_elem *)trans->data)->elem_priv)
-#define nft_trans_elem_set_bound(trans) \
- (((struct nft_trans_elem *)trans->data)->bound)
+#define nft_trans_container_elem(t) \
+ container_of(t, struct nft_trans_elem, nft_trans)
+#define nft_trans_elem_set(trans) \
+ nft_trans_container_elem(trans)->set
+#define nft_trans_elem_priv(trans) \
+ nft_trans_container_elem(trans)->elem_priv
+#define nft_trans_elem_set_bound(trans) \
+ nft_trans_container_elem(trans)->bound
struct nft_trans_obj {
+ struct nft_trans nft_trans;
struct nft_object *obj;
struct nft_object *newobj;
bool update;
};
-#define nft_trans_obj(trans) \
- (((struct nft_trans_obj *)trans->data)->obj)
-#define nft_trans_obj_newobj(trans) \
- (((struct nft_trans_obj *)trans->data)->newobj)
-#define nft_trans_obj_update(trans) \
- (((struct nft_trans_obj *)trans->data)->update)
+#define nft_trans_container_obj(t) \
+ container_of(t, struct nft_trans_obj, nft_trans)
+#define nft_trans_obj(trans) \
+ nft_trans_container_obj(trans)->obj
+#define nft_trans_obj_newobj(trans) \
+ nft_trans_container_obj(trans)->newobj
+#define nft_trans_obj_update(trans) \
+ nft_trans_container_obj(trans)->update
struct nft_trans_flowtable {
+ struct nft_trans nft_trans;
struct nft_flowtable *flowtable;
- bool update;
struct list_head hook_list;
u32 flags;
+ bool update;
};
-#define nft_trans_flowtable(trans) \
- (((struct nft_trans_flowtable *)trans->data)->flowtable)
-#define nft_trans_flowtable_update(trans) \
- (((struct nft_trans_flowtable *)trans->data)->update)
-#define nft_trans_flowtable_hooks(trans) \
- (((struct nft_trans_flowtable *)trans->data)->hook_list)
-#define nft_trans_flowtable_flags(trans) \
- (((struct nft_trans_flowtable *)trans->data)->flags)
+#define nft_trans_container_flowtable(t) \
+ container_of(t, struct nft_trans_flowtable, nft_trans)
+#define nft_trans_flowtable(trans) \
+ nft_trans_container_flowtable(trans)->flowtable
+#define nft_trans_flowtable_update(trans) \
+ nft_trans_container_flowtable(trans)->update
+#define nft_trans_flowtable_hooks(trans) \
+ nft_trans_container_flowtable(trans)->hook_list
+#define nft_trans_flowtable_flags(trans) \
+ nft_trans_container_flowtable(trans)->flags
#define NFT_TRANS_GC_BATCHCOUNT 256
@@ -1759,6 +1807,33 @@ struct nft_trans_gc {
struct rcu_head rcu;
};
+static inline void nft_ctx_update(struct nft_ctx *ctx,
+ const struct nft_trans *trans)
+{
+ switch (trans->msg_type) {
+ case NFT_MSG_NEWRULE:
+ case NFT_MSG_DELRULE:
+ case NFT_MSG_DESTROYRULE:
+ ctx->chain = nft_trans_rule_chain(trans);
+ break;
+ case NFT_MSG_NEWCHAIN:
+ case NFT_MSG_DELCHAIN:
+ case NFT_MSG_DESTROYCHAIN:
+ ctx->chain = nft_trans_chain(trans);
+ break;
+ default:
+ ctx->chain = NULL;
+ break;
+ }
+
+ ctx->net = trans->net;
+ ctx->table = trans->table;
+ ctx->family = trans->table->family;
+ ctx->report = trans->report;
+ ctx->flags = trans->flags;
+ ctx->seq = trans->seq;
+}
+
struct nft_trans_gc *nft_trans_gc_alloc(struct nft_set *set,
unsigned int gc_seq, gfp_t gfp);
void nft_trans_gc_destroy(struct nft_trans_gc *trans);
diff --git a/include/net/netmem.h b/include/net/netmem.h
index d8b810245c1d..46cc9b89ac79 100644
--- a/include/net/netmem.h
+++ b/include/net/netmem.h
@@ -38,4 +38,19 @@ static inline netmem_ref page_to_netmem(struct page *page)
return (__force netmem_ref)page;
}
+static inline int netmem_ref_count(netmem_ref netmem)
+{
+ return page_ref_count(netmem_to_page(netmem));
+}
+
+static inline unsigned long netmem_to_pfn(netmem_ref netmem)
+{
+ return page_to_pfn(netmem_to_page(netmem));
+}
+
+static inline netmem_ref netmem_compound_head(netmem_ref netmem)
+{
+ return page_to_netmem(compound_head(netmem_to_page(netmem)));
+}
+
#endif /* _NET_NETMEM_H */
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index c356c458b340..5fcd61ada622 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -40,6 +40,13 @@ struct inet_timewait_death_row {
struct tcp_fastopen_context;
+#ifdef CONFIG_IP_ROUTE_MULTIPATH
+struct sysctl_fib_multipath_hash_seed {
+ u32 user_seed;
+ u32 mp_seed;
+};
+#endif
+
struct netns_ipv4 {
/* Cacheline organization can be found documented in
* Documentation/networking/net_cachelines/netns_ipv4_sysctl.rst.
@@ -170,6 +177,7 @@ struct netns_ipv4 {
u8 sysctl_tcp_sack;
u8 sysctl_tcp_window_scaling;
u8 sysctl_tcp_timestamps;
+ int sysctl_tcp_rto_min_us;
u8 sysctl_tcp_recovery;
u8 sysctl_tcp_thin_linear_timeouts;
u8 sysctl_tcp_slow_start_after_idle;
@@ -245,6 +253,7 @@ struct netns_ipv4 {
#endif
#endif
#ifdef CONFIG_IP_ROUTE_MULTIPATH
+ struct sysctl_fib_multipath_hash_seed sysctl_fib_multipath_hash_seed;
u32 sysctl_fib_multipath_hash_fields;
u8 sysctl_fib_multipath_use_neigh;
u8 sysctl_fib_multipath_hash_policy;
diff --git a/include/net/netns/netfilter.h b/include/net/netns/netfilter.h
index 02bbdc577f8e..a6a0bf4a247e 100644
--- a/include/net/netns/netfilter.h
+++ b/include/net/netns/netfilter.h
@@ -15,6 +15,9 @@ struct netns_nf {
const struct nf_logger __rcu *nf_loggers[NFPROTO_NUMPROTO];
#ifdef CONFIG_SYSCTL
struct ctl_table_header *nf_log_dir_header;
+#ifdef CONFIG_LWTUNNEL
+ struct ctl_table_header *nf_lwtnl_dir_header;
+#endif
#endif
struct nf_hook_entries __rcu *hooks_ipv4[NF_INET_NUMHOOKS];
struct nf_hook_entries __rcu *hooks_ipv6[NF_INET_NUMHOOKS];
diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h
index 423b52eca908..d489d9250bff 100644
--- a/include/net/netns/xfrm.h
+++ b/include/net/netns/xfrm.h
@@ -83,6 +83,7 @@ struct netns_xfrm {
spinlock_t xfrm_policy_lock;
struct mutex xfrm_cfg_mutex;
+ struct delayed_work nat_keepalive_work;
};
#endif
diff --git a/include/net/page_pool/helpers.h b/include/net/page_pool/helpers.h
index 873631c79ab1..2b43a893c619 100644
--- a/include/net/page_pool/helpers.h
+++ b/include/net/page_pool/helpers.h
@@ -55,6 +55,8 @@
#include <linux/dma-mapping.h>
#include <net/page_pool/types.h>
+#include <net/net_debug.h>
+#include <net/netmem.h>
#ifdef CONFIG_PAGE_POOL_STATS
/* Deprecated driver-facing API, use netlink instead */
@@ -212,6 +214,11 @@ page_pool_get_dma_dir(const struct page_pool *pool)
return pool->p.dma_dir;
}
+static inline void page_pool_fragment_netmem(netmem_ref netmem, long nr)
+{
+ atomic_long_set(&netmem_to_page(netmem)->pp_ref_count, nr);
+}
+
/**
* page_pool_fragment_page() - split a fresh page into fragments
* @page: page to split
@@ -232,11 +239,12 @@ page_pool_get_dma_dir(const struct page_pool *pool)
*/
static inline void page_pool_fragment_page(struct page *page, long nr)
{
- atomic_long_set(&page->pp_ref_count, nr);
+ page_pool_fragment_netmem(page_to_netmem(page), nr);
}
-static inline long page_pool_unref_page(struct page *page, long nr)
+static inline long page_pool_unref_netmem(netmem_ref netmem, long nr)
{
+ struct page *page = netmem_to_page(netmem);
long ret;
/* If nr == pp_ref_count then we have cleared all remaining
@@ -279,15 +287,41 @@ static inline long page_pool_unref_page(struct page *page, long nr)
return ret;
}
+static inline long page_pool_unref_page(struct page *page, long nr)
+{
+ return page_pool_unref_netmem(page_to_netmem(page), nr);
+}
+
+static inline void page_pool_ref_netmem(netmem_ref netmem)
+{
+ atomic_long_inc(&netmem_to_page(netmem)->pp_ref_count);
+}
+
static inline void page_pool_ref_page(struct page *page)
{
- atomic_long_inc(&page->pp_ref_count);
+ page_pool_ref_netmem(page_to_netmem(page));
}
-static inline bool page_pool_is_last_ref(struct page *page)
+static inline bool page_pool_is_last_ref(netmem_ref netmem)
{
/* If page_pool_unref_page() returns 0, we were the last user */
- return page_pool_unref_page(page, 1) == 0;
+ return page_pool_unref_netmem(netmem, 1) == 0;
+}
+
+static inline void page_pool_put_netmem(struct page_pool *pool,
+ netmem_ref netmem,
+ unsigned int dma_sync_size,
+ bool allow_direct)
+{
+ /* When page_pool isn't compiled-in, net/core/xdp.c doesn't
+ * allow registering MEM_TYPE_PAGE_POOL, but shield linker.
+ */
+#ifdef CONFIG_PAGE_POOL
+ if (!page_pool_is_last_ref(netmem))
+ return;
+
+ page_pool_put_unrefed_netmem(pool, netmem, dma_sync_size, allow_direct);
+#endif
}
/**
@@ -308,15 +342,15 @@ static inline void page_pool_put_page(struct page_pool *pool,
unsigned int dma_sync_size,
bool allow_direct)
{
- /* When page_pool isn't compiled-in, net/core/xdp.c doesn't
- * allow registering MEM_TYPE_PAGE_POOL, but shield linker.
- */
-#ifdef CONFIG_PAGE_POOL
- if (!page_pool_is_last_ref(page))
- return;
+ page_pool_put_netmem(pool, page_to_netmem(page), dma_sync_size,
+ allow_direct);
+}
- page_pool_put_unrefed_page(pool, page, dma_sync_size, allow_direct);
-#endif
+static inline void page_pool_put_full_netmem(struct page_pool *pool,
+ netmem_ref netmem,
+ bool allow_direct)
+{
+ page_pool_put_netmem(pool, netmem, -1, allow_direct);
}
/**
@@ -331,7 +365,7 @@ static inline void page_pool_put_page(struct page_pool *pool,
static inline void page_pool_put_full_page(struct page_pool *pool,
struct page *page, bool allow_direct)
{
- page_pool_put_page(pool, page, -1, allow_direct);
+ page_pool_put_netmem(pool, page_to_netmem(page), -1, allow_direct);
}
/**
@@ -365,6 +399,18 @@ static inline void page_pool_free_va(struct page_pool *pool, void *va,
page_pool_put_page(pool, virt_to_head_page(va), -1, allow_direct);
}
+static inline dma_addr_t page_pool_get_dma_addr_netmem(netmem_ref netmem)
+{
+ struct page *page = netmem_to_page(netmem);
+
+ dma_addr_t ret = page->dma_addr;
+
+ if (PAGE_POOL_32BIT_ARCH_WITH_64BIT_DMA)
+ ret <<= PAGE_SHIFT;
+
+ return ret;
+}
+
/**
* page_pool_get_dma_addr() - Retrieve the stored DMA address.
* @page: page allocated from a page pool
@@ -374,16 +420,14 @@ static inline void page_pool_free_va(struct page_pool *pool, void *va,
*/
static inline dma_addr_t page_pool_get_dma_addr(const struct page *page)
{
- dma_addr_t ret = page->dma_addr;
-
- if (PAGE_POOL_32BIT_ARCH_WITH_64BIT_DMA)
- ret <<= PAGE_SHIFT;
-
- return ret;
+ return page_pool_get_dma_addr_netmem(page_to_netmem((struct page *)page));
}
-static inline bool page_pool_set_dma_addr(struct page *page, dma_addr_t addr)
+static inline bool page_pool_set_dma_addr_netmem(netmem_ref netmem,
+ dma_addr_t addr)
{
+ struct page *page = netmem_to_page(netmem);
+
if (PAGE_POOL_32BIT_ARCH_WITH_64BIT_DMA) {
page->dma_addr = addr >> PAGE_SHIFT;
@@ -419,6 +463,11 @@ static inline void page_pool_dma_sync_for_cpu(const struct page_pool *pool,
page_pool_get_dma_dir(pool));
}
+static inline bool page_pool_set_dma_addr(struct page *page, dma_addr_t addr)
+{
+ return page_pool_set_dma_addr_netmem(page_to_netmem(page), addr);
+}
+
static inline bool page_pool_put(struct page_pool *pool)
{
return refcount_dec_and_test(&pool->user_cnt);
diff --git a/include/net/page_pool/types.h b/include/net/page_pool/types.h
index b088d131aeb0..50569fed7868 100644
--- a/include/net/page_pool/types.h
+++ b/include/net/page_pool/types.h
@@ -6,6 +6,7 @@
#include <linux/dma-direction.h>
#include <linux/ptr_ring.h>
#include <linux/types.h>
+#include <net/netmem.h>
#define PP_FLAG_DMA_MAP BIT(0) /* Should page_pool do the DMA
* map/unmap
@@ -40,21 +41,22 @@
#define PP_ALLOC_CACHE_REFILL 64
struct pp_alloc_cache {
u32 count;
- struct page *cache[PP_ALLOC_CACHE_SIZE];
+ netmem_ref cache[PP_ALLOC_CACHE_SIZE];
};
/**
* struct page_pool_params - page pool parameters
+ * @fast: params accessed frequently on hotpath
* @order: 2^order pages on allocation
* @pool_size: size of the ptr_ring
* @nid: NUMA node id to allocate from pages from
* @dev: device, for DMA pre-mapping purposes
- * @netdev: netdev this pool will serve (leave as NULL if none or multiple)
* @napi: NAPI which is the sole consumer of pages, otherwise NULL
* @dma_dir: DMA mapping direction
* @max_len: max DMA sync memory size for PP_FLAG_DMA_SYNC_DEV
* @offset: DMA sync address offset for PP_FLAG_DMA_SYNC_DEV
- * @netdev: corresponding &net_device for Netlink introspection
+ * @slow: params with slowpath access only (initialization and Netlink)
+ * @netdev: netdev this pool will serve (leave as NULL if none or multiple)
* @flags: PP_FLAG_DMA_MAP, PP_FLAG_DMA_SYNC_DEV, PP_FLAG_SYSTEM_POOL
*/
struct page_pool_params {
@@ -72,7 +74,7 @@ struct page_pool_params {
struct net_device *netdev;
unsigned int flags;
/* private: used by test code only */
- void (*init_callback)(struct page *page, void *arg);
+ void (*init_callback)(netmem_ref netmem, void *arg);
void *init_arg;
);
};
@@ -127,6 +129,16 @@ struct page_pool_stats {
};
#endif
+/* The whole frag API block must stay within one cacheline. On 32-bit systems,
+ * sizeof(long) == sizeof(int), so that the block size is ``3 * sizeof(long)``.
+ * On 64-bit systems, the actual size is ``2 * sizeof(long) + sizeof(int)``.
+ * The closest pow-2 to both of them is ``4 * sizeof(long)``, so just use that
+ * one for simplicity.
+ * Having it aligned to a cacheline boundary may be excessive and doesn't bring
+ * any good.
+ */
+#define PAGE_POOL_FRAG_GROUP_ALIGN (4 * sizeof(long))
+
struct page_pool {
struct page_pool_params_fast p;
@@ -140,19 +152,11 @@ struct page_pool {
bool system:1; /* This is a global percpu pool */
#endif
- /* The following block must stay within one cacheline. On 32-bit
- * systems, sizeof(long) == sizeof(int), so that the block size is
- * ``3 * sizeof(long)``. On 64-bit systems, the actual size is
- * ``2 * sizeof(long) + sizeof(int)``. The closest pow-2 to both of
- * them is ``4 * sizeof(long)``, so just use that one for simplicity.
- * Having it aligned to a cacheline boundary may be excessive and
- * doesn't bring any good.
- */
- __cacheline_group_begin(frag) __aligned(4 * sizeof(long));
+ __cacheline_group_begin_aligned(frag, PAGE_POOL_FRAG_GROUP_ALIGN);
long frag_users;
- struct page *frag_page;
+ netmem_ref frag_page;
unsigned int frag_offset;
- __cacheline_group_end(frag);
+ __cacheline_group_end_aligned(frag, PAGE_POOL_FRAG_GROUP_ALIGN);
struct delayed_work release_dw;
void (*disconnect)(void *pool);
@@ -219,8 +223,12 @@ struct page_pool {
};
struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp);
+netmem_ref page_pool_alloc_netmem(struct page_pool *pool, gfp_t gfp);
struct page *page_pool_alloc_frag(struct page_pool *pool, unsigned int *offset,
unsigned int size, gfp_t gfp);
+netmem_ref page_pool_alloc_frag_netmem(struct page_pool *pool,
+ unsigned int *offset, unsigned int size,
+ gfp_t gfp);
struct page_pool *page_pool_create(const struct page_pool_params *params);
struct page_pool *page_pool_create_percpu(const struct page_pool_params *params,
int cpuid);
@@ -228,6 +236,7 @@ struct page_pool *page_pool_create_percpu(const struct page_pool_params *params,
struct xdp_mem_info;
#ifdef CONFIG_PAGE_POOL
+void page_pool_disable_direct_recycling(struct page_pool *pool);
void page_pool_destroy(struct page_pool *pool);
void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *),
const struct xdp_mem_info *mem);
@@ -250,6 +259,9 @@ static inline void page_pool_put_page_bulk(struct page_pool *pool, void **data,
}
#endif
+void page_pool_put_unrefed_netmem(struct page_pool *pool, netmem_ref netmem,
+ unsigned int dma_sync_size,
+ bool allow_direct);
void page_pool_put_unrefed_page(struct page_pool *pool, struct page *page,
unsigned int dma_sync_size,
bool allow_direct);
diff --git a/include/net/psample.h b/include/net/psample.h
index 0509d2d6be67..5071b5fc2b59 100644
--- a/include/net/psample.h
+++ b/include/net/psample.h
@@ -24,7 +24,10 @@ struct psample_metadata {
u8 out_tc_valid:1,
out_tc_occ_valid:1,
latency_valid:1,
- unused:5;
+ rate_as_probability:1,
+ unused:4;
+ const u8 *user_cookie;
+ u32 user_cookie_len;
};
struct psample_group *psample_group_get(struct net *net, u32 group_num);
@@ -35,13 +38,15 @@ struct sk_buff;
#if IS_ENABLED(CONFIG_PSAMPLE)
-void psample_sample_packet(struct psample_group *group, struct sk_buff *skb,
- u32 sample_rate, const struct psample_metadata *md);
+void psample_sample_packet(struct psample_group *group,
+ const struct sk_buff *skb, u32 sample_rate,
+ const struct psample_metadata *md);
#else
static inline void psample_sample_packet(struct psample_group *group,
- struct sk_buff *skb, u32 sample_rate,
+ const struct sk_buff *skb,
+ u32 sample_rate,
const struct psample_metadata *md)
{
}
diff --git a/include/net/regulatory.h b/include/net/regulatory.h
index ebf9e028d1ef..a103f4c8cf75 100644
--- a/include/net/regulatory.h
+++ b/include/net/regulatory.h
@@ -71,8 +71,6 @@ enum environment_cap {
* CRDA and can be used by other regulatory requests. When a
* the last request is not yet processed we must yield until it
* is processed before processing any new requests.
- * @country_ie_checksum: checksum of the last processed and accepted
- * country IE
* @country_ie_env: lets us know if the AP is telling us we are outdoor,
* indoor, or if it doesn't matter
* @list: used to insert into the reg_requests_list linked list
diff --git a/include/net/request_sock.h b/include/net/request_sock.h
index d88c0dfc2d46..b07b1cd14e9f 100644
--- a/include/net/request_sock.h
+++ b/include/net/request_sock.h
@@ -128,39 +128,6 @@ static inline struct sock *skb_steal_sock(struct sk_buff *skb,
return sk;
}
-static inline struct request_sock *
-reqsk_alloc_noprof(const struct request_sock_ops *ops, struct sock *sk_listener,
- bool attach_listener)
-{
- struct request_sock *req;
-
- req = kmem_cache_alloc_noprof(ops->slab, GFP_ATOMIC | __GFP_NOWARN);
- if (!req)
- return NULL;
- req->rsk_listener = NULL;
- if (attach_listener) {
- if (unlikely(!refcount_inc_not_zero(&sk_listener->sk_refcnt))) {
- kmem_cache_free(ops->slab, req);
- return NULL;
- }
- req->rsk_listener = sk_listener;
- }
- req->rsk_ops = ops;
- req_to_sk(req)->sk_prot = sk_listener->sk_prot;
- sk_node_init(&req_to_sk(req)->sk_node);
- sk_tx_queue_clear(req_to_sk(req));
- req->saved_syn = NULL;
- req->syncookie = 0;
- req->timeout = 0;
- req->num_timeout = 0;
- req->num_retrans = 0;
- req->sk = NULL;
- refcount_set(&req->rsk_refcnt, 0);
-
- return req;
-}
-#define reqsk_alloc(...) alloc_hooks(reqsk_alloc_noprof(__VA_ARGS__))
-
static inline void __reqsk_free(struct request_sock *req)
{
req->rsk_ops->destructor(req);
@@ -172,14 +139,14 @@ static inline void __reqsk_free(struct request_sock *req)
static inline void reqsk_free(struct request_sock *req)
{
- WARN_ON_ONCE(refcount_read(&req->rsk_refcnt) != 0);
+ DEBUG_NET_WARN_ON_ONCE(refcount_read(&req->rsk_refcnt) != 0);
__reqsk_free(req);
}
static inline void reqsk_put(struct request_sock *req)
{
if (refcount_dec_and_test(&req->rsk_refcnt))
- reqsk_free(req);
+ __reqsk_free(req);
}
/*
@@ -285,4 +252,16 @@ static inline int reqsk_queue_len_young(const struct request_sock_queue *queue)
return atomic_read(&queue->young);
}
+/* RFC 7323 2.3 Using the Window Scale Option
+ * The window field (SEG.WND) of every outgoing segment, with the
+ * exception of <SYN> segments, MUST be right-shifted by
+ * Rcv.Wind.Shift bits.
+ *
+ * This means the SEG.WND carried in SYNACK can not exceed 65535.
+ * We use this property to harden TCP stack while in NEW_SYN_RECV state.
+ */
+static inline u32 tcp_synack_window(const struct request_sock *req)
+{
+ return min(req->rsk_rcv_wnd, 65535U);
+}
#endif /* _REQUEST_SOCK_H */
diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
index 3bfb80bad173..b45d57b5968a 100644
--- a/include/net/rtnetlink.h
+++ b/include/net/rtnetlink.h
@@ -13,6 +13,7 @@ enum rtnl_link_flags {
RTNL_FLAG_DOIT_UNLOCKED = BIT(0),
RTNL_FLAG_BULK_DEL_SUPPORTED = BIT(1),
RTNL_FLAG_DUMP_UNLOCKED = BIT(2),
+ RTNL_FLAG_DUMP_SPLIT_NLM_DONE = BIT(3), /* legacy behavior */
};
enum rtnl_kinds {
diff --git a/include/net/sctp/stream_sched.h b/include/net/sctp/stream_sched.h
index 572d73fdcd5e..8034bf5febbe 100644
--- a/include/net/sctp/stream_sched.h
+++ b/include/net/sctp/stream_sched.h
@@ -35,10 +35,10 @@ struct sctp_sched_ops {
struct sctp_chunk *(*dequeue)(struct sctp_outq *q);
/* Called only if the chunk fit the packet */
void (*dequeue_done)(struct sctp_outq *q, struct sctp_chunk *chunk);
- /* Sched all chunks already enqueued */
- void (*sched_all)(struct sctp_stream *steam);
- /* Unched all chunks already enqueued */
- void (*unsched_all)(struct sctp_stream *steam);
+ /* Schedule all chunks already enqueued */
+ void (*sched_all)(struct sctp_stream *stream);
+ /* Unschedule all chunks already enqueued */
+ void (*unsched_all)(struct sctp_stream *stream);
};
int sctp_sched_set_sched(struct sctp_association *asoc,
diff --git a/include/net/seg6.h b/include/net/seg6.h
index af668f17b398..82b3fbbcbb93 100644
--- a/include/net/seg6.h
+++ b/include/net/seg6.h
@@ -52,10 +52,17 @@ static inline struct seg6_pernet_data *seg6_pernet(struct net *net)
extern int seg6_init(void);
extern void seg6_exit(void);
+#ifdef CONFIG_IPV6_SEG6_LWTUNNEL
extern int seg6_iptunnel_init(void);
extern void seg6_iptunnel_exit(void);
extern int seg6_local_init(void);
extern void seg6_local_exit(void);
+#else
+static inline int seg6_iptunnel_init(void) { return 0; }
+static inline void seg6_iptunnel_exit(void) {}
+static inline int seg6_local_init(void) { return 0; }
+static inline void seg6_local_exit(void) {}
+#endif
extern bool seg6_validate_srh(struct ipv6_sr_hdr *srh, int len, bool reduced);
extern struct ipv6_sr_hdr *seg6_get_srh(struct sk_buff *skb, int flags);
diff --git a/include/net/seg6_hmac.h b/include/net/seg6_hmac.h
index 2b5d2ee5613e..24f733b3e3fe 100644
--- a/include/net/seg6_hmac.h
+++ b/include/net/seg6_hmac.h
@@ -49,9 +49,16 @@ extern int seg6_hmac_info_del(struct net *net, u32 key);
extern int seg6_push_hmac(struct net *net, struct in6_addr *saddr,
struct ipv6_sr_hdr *srh);
extern bool seg6_hmac_validate_skb(struct sk_buff *skb);
+#ifdef CONFIG_IPV6_SEG6_HMAC
extern int seg6_hmac_init(void);
extern void seg6_hmac_exit(void);
extern int seg6_hmac_net_init(struct net *net);
extern void seg6_hmac_net_exit(struct net *net);
+#else
+static inline int seg6_hmac_init(void) { return 0; }
+static inline void seg6_hmac_exit(void) {}
+static inline int seg6_hmac_net_init(struct net *net) { return 0; }
+static inline void seg6_hmac_net_exit(struct net *net) {}
+#endif
#endif
diff --git a/include/net/seg6_local.h b/include/net/seg6_local.h
index 3fab9dec2ec4..888c1ce6f527 100644
--- a/include/net/seg6_local.h
+++ b/include/net/seg6_local.h
@@ -19,6 +19,7 @@ extern int seg6_lookup_nexthop(struct sk_buff *skb, struct in6_addr *nhaddr,
extern bool seg6_bpf_has_valid_srh(struct sk_buff *skb);
struct seg6_bpf_srh_state {
+ local_lock_t bh_lock;
struct ipv6_sr_hdr *srh;
u16 hdrlen;
bool valid;
diff --git a/include/net/sock.h b/include/net/sock.h
index 5f4d0629348f..cce23ac4d514 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -544,6 +544,11 @@ struct sock {
netns_tracker ns_tracker;
};
+struct sock_bh_locked {
+ struct sock *sock;
+ local_lock_t bh_lock;
+};
+
enum sk_pacing {
SK_PACING_NONE = 0,
SK_PACING_NEEDED = 1,
@@ -2063,17 +2068,10 @@ sk_dst_get(const struct sock *sk)
static inline void __dst_negative_advice(struct sock *sk)
{
- struct dst_entry *ndst, *dst = __sk_dst_get(sk);
-
- if (dst && dst->ops->negative_advice) {
- ndst = dst->ops->negative_advice(dst);
+ struct dst_entry *dst = __sk_dst_get(sk);
- if (ndst != dst) {
- rcu_assign_pointer(sk->sk_dst_cache, ndst);
- sk_tx_queue_clear(sk);
- WRITE_ONCE(sk->sk_dst_pending_confirm, 0);
- }
- }
+ if (dst && dst->ops->negative_advice)
+ dst->ops->negative_advice(sk, dst);
}
static inline void dst_negative_advice(struct sock *sk)
@@ -2102,7 +2100,7 @@ sk_dst_set(struct sock *sk, struct dst_entry *dst)
sk_tx_queue_clear(sk);
WRITE_ONCE(sk->sk_dst_pending_confirm, 0);
- old_dst = xchg((__force struct dst_entry **)&sk->sk_dst_cache, dst);
+ old_dst = unrcu_pointer(xchg(&sk->sk_dst_cache, RCU_INITIALIZER(dst)));
dst_release(old_dst);
}
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 060e95b331a2..2aac11e7e1cc 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -677,6 +677,7 @@ void tcp_skb_collapse_tstamp(struct sk_buff *skb,
/* tcp_input.c */
void tcp_rearm_rto(struct sock *sk);
void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req);
+void tcp_done_with_error(struct sock *sk, int err);
void tcp_reset(struct sock *sk, struct sk_buff *skb);
void tcp_fin(struct sock *sk);
void tcp_check_space(struct sock *sk);
@@ -1065,11 +1066,19 @@ static inline bool tcp_skb_can_collapse_to(const struct sk_buff *skb)
static inline bool tcp_skb_can_collapse(const struct sk_buff *to,
const struct sk_buff *from)
{
+ /* skb_cmp_decrypted() not needed, use tcp_write_collapse_fence() */
return likely(tcp_skb_can_collapse_to(to) &&
mptcp_skb_can_collapse(to, from) &&
skb_pure_zcopy_same(to, from));
}
+static inline bool tcp_skb_can_collapse_rx(const struct sk_buff *to,
+ const struct sk_buff *from)
+{
+ return likely(mptcp_skb_can_collapse(to, from) &&
+ !skb_cmp_decrypted(to, from));
+}
+
/* Events passed to congestion control interface */
enum tcp_ca_event {
CA_EVENT_TX_START, /* first transmit when no packets in flight */
@@ -1215,7 +1224,7 @@ extern struct tcp_congestion_ops tcp_reno;
struct tcp_congestion_ops *tcp_ca_find(const char *name);
struct tcp_congestion_ops *tcp_ca_find_key(u32 key);
-u32 tcp_ca_get_key_by_name(struct net *net, const char *name, bool *ecn_ca);
+u32 tcp_ca_get_key_by_name(const char *name, bool *ecn_ca);
#ifdef CONFIG_INET
char *tcp_ca_get_name_by_key(u32 key, char *buffer);
#else
@@ -1854,12 +1863,6 @@ tcp_md5_do_lookup_any_l3index(const struct sock *sk,
return __tcp_md5_do_lookup(sk, 0, addr, family, true);
}
-enum skb_drop_reason
-tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb,
- const void *saddr, const void *daddr,
- int family, int l3index, const __u8 *hash_location);
-
-
#define tcp_twsk_md5_key(twsk) ((twsk)->tw_md5_key)
#else
static inline struct tcp_md5sig_key *
@@ -1876,13 +1879,6 @@ tcp_md5_do_lookup_any_l3index(const struct sock *sk,
return NULL;
}
-static inline enum skb_drop_reason
-tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb,
- const void *saddr, const void *daddr,
- int family, int l3index, const __u8 *hash_location)
-{
- return SKB_NOT_DROPPED_YET;
-}
#define tcp_twsk_md5_key(twsk) NULL
#endif
@@ -2094,6 +2090,14 @@ static inline void tcp_rtx_queue_unlink_and_free(struct sk_buff *skb, struct soc
tcp_wmem_free_skb(sk, skb);
}
+static inline void tcp_write_collapse_fence(struct sock *sk)
+{
+ struct sk_buff *skb = tcp_write_queue_tail(sk);
+
+ if (skb)
+ TCP_SKB_CB(skb)->eor = 1;
+}
+
static inline void tcp_push_pending_frames(struct sock *sk)
{
if (tcp_send_head(sk)) {
@@ -2369,21 +2373,15 @@ static inline void tcp_get_current_key(const struct sock *sk,
static inline bool tcp_key_is_md5(const struct tcp_key *key)
{
-#ifdef CONFIG_TCP_MD5SIG
- if (static_branch_unlikely(&tcp_md5_needed.key) &&
- key->type == TCP_KEY_MD5)
- return true;
-#endif
+ if (static_branch_tcp_md5())
+ return key->type == TCP_KEY_MD5;
return false;
}
static inline bool tcp_key_is_ao(const struct tcp_key *key)
{
-#ifdef CONFIG_TCP_AO
- if (static_branch_unlikely(&tcp_ao_needed.key) &&
- key->type == TCP_KEY_AO)
- return true;
-#endif
+ if (static_branch_tcp_ao())
+ return key->type == TCP_KEY_AO;
return false;
}
@@ -2795,66 +2793,9 @@ static inline bool tcp_ao_required(struct sock *sk, const void *saddr,
return false;
}
-/* Called with rcu_read_lock() */
-static inline enum skb_drop_reason
-tcp_inbound_hash(struct sock *sk, const struct request_sock *req,
- const struct sk_buff *skb,
- const void *saddr, const void *daddr,
- int family, int dif, int sdif)
-{
- const struct tcphdr *th = tcp_hdr(skb);
- const struct tcp_ao_hdr *aoh;
- const __u8 *md5_location;
- int l3index;
-
- /* Invalid option or two times meet any of auth options */
- if (tcp_parse_auth_options(th, &md5_location, &aoh)) {
- tcp_hash_fail("TCP segment has incorrect auth options set",
- family, skb, "");
- return SKB_DROP_REASON_TCP_AUTH_HDR;
- }
-
- if (req) {
- if (tcp_rsk_used_ao(req) != !!aoh) {
- NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAOBAD);
- tcp_hash_fail("TCP connection can't start/end using TCP-AO",
- family, skb, "%s",
- !aoh ? "missing AO" : "AO signed");
- return SKB_DROP_REASON_TCP_AOFAILURE;
- }
- }
-
- /* sdif set, means packet ingressed via a device
- * in an L3 domain and dif is set to the l3mdev
- */
- l3index = sdif ? dif : 0;
-
- /* Fast path: unsigned segments */
- if (likely(!md5_location && !aoh)) {
- /* Drop if there's TCP-MD5 or TCP-AO key with any rcvid/sndid
- * for the remote peer. On TCP-AO established connection
- * the last key is impossible to remove, so there's
- * always at least one current_key.
- */
- if (tcp_ao_required(sk, saddr, family, l3index, true)) {
- tcp_hash_fail("AO hash is required, but not found",
- family, skb, "L3 index %d", l3index);
- return SKB_DROP_REASON_TCP_AONOTFOUND;
- }
- if (unlikely(tcp_md5_do_lookup(sk, l3index, saddr, family))) {
- NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
- tcp_hash_fail("MD5 Hash not found",
- family, skb, "L3 index %d", l3index);
- return SKB_DROP_REASON_TCP_MD5NOTFOUND;
- }
- return SKB_NOT_DROPPED_YET;
- }
-
- if (aoh)
- return tcp_inbound_ao_hash(sk, skb, family, req, l3index, aoh);
-
- return tcp_inbound_md5_hash(sk, skb, saddr, daddr, family,
- l3index, md5_location);
-}
+enum skb_drop_reason tcp_inbound_hash(struct sock *sk,
+ const struct request_sock *req, const struct sk_buff *skb,
+ const void *saddr, const void *daddr,
+ int family, int dif, int sdif);
#endif /* _TCP_H */
diff --git a/include/net/tcp_ao.h b/include/net/tcp_ao.h
index 471e177362b4..1d46460d0fef 100644
--- a/include/net/tcp_ao.h
+++ b/include/net/tcp_ao.h
@@ -19,6 +19,11 @@ struct tcp_ao_hdr {
u8 rnext_keyid;
};
+static inline u8 tcp_ao_hdr_maclen(const struct tcp_ao_hdr *aoh)
+{
+ return aoh->length - sizeof(struct tcp_ao_hdr);
+}
+
struct tcp_ao_counters {
atomic64_t pkt_good;
atomic64_t pkt_bad;
@@ -86,7 +91,8 @@ static inline int tcp_ao_sizeof_key(const struct tcp_ao_key *key)
struct tcp_ao_info {
/* List of tcp_ao_key's */
struct hlist_head head;
- /* current_key and rnext_key aren't maintained on listen sockets.
+ /* current_key and rnext_key are maintained on sockets
+ * in TCP_AO_ESTABLISHED states.
* Their purpose is to cache keys on established connections,
* saving needless lookups. Never dereference any of them from
* listen sockets.
@@ -143,43 +149,6 @@ extern struct static_key_false_deferred tcp_ao_needed;
#define static_branch_tcp_ao() false
#endif
-static inline bool tcp_hash_should_produce_warnings(void)
-{
- return static_branch_tcp_md5() || static_branch_tcp_ao();
-}
-
-#define tcp_hash_fail(msg, family, skb, fmt, ...) \
-do { \
- const struct tcphdr *th = tcp_hdr(skb); \
- char hdr_flags[6]; \
- char *f = hdr_flags; \
- \
- if (!tcp_hash_should_produce_warnings()) \
- break; \
- if (th->fin) \
- *f++ = 'F'; \
- if (th->syn) \
- *f++ = 'S'; \
- if (th->rst) \
- *f++ = 'R'; \
- if (th->psh) \
- *f++ = 'P'; \
- if (th->ack) \
- *f++ = '.'; \
- *f = 0; \
- if ((family) == AF_INET) { \
- net_info_ratelimited("%s for %pI4.%d->%pI4.%d [%s] " fmt "\n", \
- msg, &ip_hdr(skb)->saddr, ntohs(th->source), \
- &ip_hdr(skb)->daddr, ntohs(th->dest), \
- hdr_flags, ##__VA_ARGS__); \
- } else { \
- net_info_ratelimited("%s for [%pI6c].%d->[%pI6c].%d [%s]" fmt "\n", \
- msg, &ipv6_hdr(skb)->saddr, ntohs(th->source), \
- &ipv6_hdr(skb)->daddr, ntohs(th->dest), \
- hdr_flags, ##__VA_ARGS__); \
- } \
-} while (0)
-
#ifdef CONFIG_TCP_AO
/* TCP-AO structures and functions */
struct tcp4_ao_context {
@@ -201,9 +170,9 @@ struct tcp6_ao_context {
};
struct tcp_sigpool;
+/* Established states are fast-path and there always is current_key/rnext_key */
#define TCP_AO_ESTABLISHED (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2 | \
- TCPF_CLOSE | TCPF_CLOSE_WAIT | \
- TCPF_LAST_ACK | TCPF_CLOSING)
+ TCPF_CLOSE_WAIT | TCPF_LAST_ACK | TCPF_CLOSING)
int tcp_ao_transmit_skb(struct sock *sk, struct sk_buff *skb,
struct tcp_ao_key *key, struct tcphdr *th,
diff --git a/include/net/tcx.h b/include/net/tcx.h
index 72a3e75e539f..5ce0ce9e0c02 100644
--- a/include/net/tcx.h
+++ b/include/net/tcx.h
@@ -13,7 +13,7 @@ struct mini_Qdisc;
struct tcx_entry {
struct mini_Qdisc __rcu *miniq;
struct bpf_mprog_bundle bundle;
- bool miniq_active;
+ u32 miniq_active;
struct rcu_head rcu;
};
@@ -125,11 +125,16 @@ static inline void tcx_skeys_dec(bool ingress)
tcx_dec();
}
-static inline void tcx_miniq_set_active(struct bpf_mprog_entry *entry,
- const bool active)
+static inline void tcx_miniq_inc(struct bpf_mprog_entry *entry)
{
ASSERT_RTNL();
- tcx_entry(entry)->miniq_active = active;
+ tcx_entry(entry)->miniq_active++;
+}
+
+static inline void tcx_miniq_dec(struct bpf_mprog_entry *entry)
+{
+ ASSERT_RTNL();
+ tcx_entry(entry)->miniq_active--;
}
static inline bool tcx_entry_is_active(struct bpf_mprog_entry *entry)
diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h
index 3d54de168a6d..bfe625b55d55 100644
--- a/include/net/xdp_sock.h
+++ b/include/net/xdp_sock.h
@@ -121,7 +121,7 @@ struct xsk_tx_metadata_ops {
int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp);
int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp);
-void __xsk_map_flush(void);
+void __xsk_map_flush(struct list_head *flush_list);
/**
* xsk_tx_metadata_to_compl - Save enough relevant metadata information
@@ -206,7 +206,7 @@ static inline int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp)
return -EOPNOTSUPP;
}
-static inline void __xsk_map_flush(void)
+static inline void __xsk_map_flush(struct list_head *flush_list)
{
}
@@ -228,14 +228,4 @@ static inline void xsk_tx_metadata_complete(struct xsk_tx_metadata_compl *compl,
}
#endif /* CONFIG_XDP_SOCKETS */
-
-#if defined(CONFIG_XDP_SOCKETS) && defined(CONFIG_DEBUG_NET)
-bool xsk_map_check_flush(void);
-#else
-static inline bool xsk_map_check_flush(void)
-{
- return false;
-}
-#endif
-
#endif /* _LINUX_XDP_SOCK_H */
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 77ebf5bcf0b9..54cef89f6c1e 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -178,7 +178,10 @@ struct xfrm_state {
struct hlist_node gclist;
struct hlist_node bydst;
};
- struct hlist_node bysrc;
+ union {
+ struct hlist_node dev_gclist;
+ struct hlist_node bysrc;
+ };
struct hlist_node byspi;
struct hlist_node byseq;
@@ -229,6 +232,10 @@ struct xfrm_state {
struct xfrm_encap_tmpl *encap;
struct sock __rcu *encap_sk;
+ /* NAT keepalive */
+ u32 nat_keepalive_interval; /* seconds */
+ time64_t nat_keepalive_expiration;
+
/* Data for care-of address */
xfrm_address_t *coaddr;
@@ -1588,7 +1595,7 @@ void xfrm_state_update_stats(struct net *net);
static inline void xfrm_dev_state_update_stats(struct xfrm_state *x)
{
struct xfrm_dev_offload *xdo = &x->xso;
- struct net_device *dev = xdo->dev;
+ struct net_device *dev = READ_ONCE(xdo->dev);
if (dev && dev->xfrmdev_ops &&
dev->xfrmdev_ops->xdo_dev_state_update_stats)
@@ -1946,13 +1953,16 @@ int xfrm_dev_policy_add(struct net *net, struct xfrm_policy *xp,
struct xfrm_user_offload *xuo, u8 dir,
struct netlink_ext_ack *extack);
bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x);
+void xfrm_dev_state_delete(struct xfrm_state *x);
+void xfrm_dev_state_free(struct xfrm_state *x);
static inline void xfrm_dev_state_advance_esn(struct xfrm_state *x)
{
struct xfrm_dev_offload *xso = &x->xso;
+ struct net_device *dev = READ_ONCE(xso->dev);
- if (xso->dev && xso->dev->xfrmdev_ops->xdo_dev_state_advance_esn)
- xso->dev->xfrmdev_ops->xdo_dev_state_advance_esn(x);
+ if (dev && dev->xfrmdev_ops->xdo_dev_state_advance_esn)
+ dev->xfrmdev_ops->xdo_dev_state_advance_esn(x);
}
static inline bool xfrm_dst_offload_ok(struct dst_entry *dst)
@@ -1973,28 +1983,6 @@ static inline bool xfrm_dst_offload_ok(struct dst_entry *dst)
return false;
}
-static inline void xfrm_dev_state_delete(struct xfrm_state *x)
-{
- struct xfrm_dev_offload *xso = &x->xso;
-
- if (xso->dev)
- xso->dev->xfrmdev_ops->xdo_dev_state_delete(x);
-}
-
-static inline void xfrm_dev_state_free(struct xfrm_state *x)
-{
- struct xfrm_dev_offload *xso = &x->xso;
- struct net_device *dev = xso->dev;
-
- if (dev && dev->xfrmdev_ops) {
- if (dev->xfrmdev_ops->xdo_dev_state_free)
- dev->xfrmdev_ops->xdo_dev_state_free(x);
- xso->dev = NULL;
- xso->type = XFRM_DEV_OFFLOAD_UNSPECIFIED;
- netdev_put(dev, &xso->dev_tracker);
- }
-}
-
static inline void xfrm_dev_policy_delete(struct xfrm_policy *x)
{
struct xfrm_dev_offload *xdo = &x->xdo;
@@ -2203,4 +2191,10 @@ static inline int register_xfrm_state_bpf(void)
}
#endif
+int xfrm_nat_keepalive_init(unsigned short family);
+void xfrm_nat_keepalive_fini(unsigned short family);
+int xfrm_nat_keepalive_net_init(struct net *net);
+int xfrm_nat_keepalive_net_fini(struct net *net);
+void xfrm_nat_keepalive_state_updated(struct xfrm_state *x);
+
#endif /* _NET_XFRM_H */
diff --git a/include/scsi/scsi_devinfo.h b/include/scsi/scsi_devinfo.h
index 6b548dc2c496..1d79a3b536ce 100644
--- a/include/scsi/scsi_devinfo.h
+++ b/include/scsi/scsi_devinfo.h
@@ -69,8 +69,10 @@
#define BLIST_RETRY_ITF ((__force blist_flags_t)(1ULL << 32))
/* Always retry ABORTED_COMMAND with ASC 0xc1 */
#define BLIST_RETRY_ASC_C1 ((__force blist_flags_t)(1ULL << 33))
+/* Do not query the IO Advice Hints Grouping mode page */
+#define BLIST_SKIP_IO_HINTS ((__force blist_flags_t)(1ULL << 34))
-#define __BLIST_LAST_USED BLIST_RETRY_ASC_C1
+#define __BLIST_LAST_USED BLIST_SKIP_IO_HINTS
#define __BLIST_HIGH_UNUSED (~(__BLIST_LAST_USED | \
(__force blist_flags_t) \
diff --git a/include/scsi/scsi_proto.h b/include/scsi/scsi_proto.h
index 843106e1109f..70e1262b2e20 100644
--- a/include/scsi/scsi_proto.h
+++ b/include/scsi/scsi_proto.h
@@ -120,6 +120,7 @@
#define WRITE_SAME_16 0x93
#define ZBC_OUT 0x94
#define ZBC_IN 0x95
+#define WRITE_ATOMIC_16 0x9c
#define SERVICE_ACTION_BIDIRECTIONAL 0x9d
#define SERVICE_ACTION_IN_16 0x9e
#define SERVICE_ACTION_OUT_16 0x9f
diff --git a/include/scsi/scsi_transport_sas.h b/include/scsi/scsi_transport_sas.h
index 0e75b9277c8c..e3b6ce3cbf88 100644
--- a/include/scsi/scsi_transport_sas.h
+++ b/include/scsi/scsi_transport_sas.h
@@ -200,6 +200,8 @@ unsigned int sas_is_tlr_enabled(struct scsi_device *);
void sas_disable_tlr(struct scsi_device *);
void sas_enable_tlr(struct scsi_device *);
+bool sas_ata_ncq_prio_supported(struct scsi_device *sdev);
+
extern struct sas_rphy *sas_end_device_alloc(struct sas_port *);
extern struct sas_rphy *sas_expander_alloc(struct sas_port *, enum sas_device_type);
void sas_rphy_free(struct sas_rphy *);
diff --git a/include/soc/mscc/ocelot.h b/include/soc/mscc/ocelot.h
index 1e1b40f4e664..6a37b29f4b4c 100644
--- a/include/soc/mscc/ocelot.h
+++ b/include/soc/mscc/ocelot.h
@@ -1016,7 +1016,7 @@ void ocelot_port_get_eth_mac_stats(struct ocelot *ocelot, int port,
void ocelot_port_get_eth_phy_stats(struct ocelot *ocelot, int port,
struct ethtool_eth_phy_stats *phy_stats);
int ocelot_get_ts_info(struct ocelot *ocelot, int port,
- struct ethtool_ts_info *info);
+ struct kernel_ethtool_ts_info *info);
void ocelot_set_ageing_time(struct ocelot *ocelot, unsigned int msecs);
int ocelot_port_vlan_filtering(struct ocelot *ocelot, int port, bool enabled,
struct netlink_ext_ack *extack);
diff --git a/include/sound/dmaengine_pcm.h b/include/sound/dmaengine_pcm.h
index c11aaf8079fb..f6baa9a01868 100644
--- a/include/sound/dmaengine_pcm.h
+++ b/include/sound/dmaengine_pcm.h
@@ -36,6 +36,7 @@ snd_pcm_uframes_t snd_dmaengine_pcm_pointer_no_residue(struct snd_pcm_substream
int snd_dmaengine_pcm_open(struct snd_pcm_substream *substream,
struct dma_chan *chan);
int snd_dmaengine_pcm_close(struct snd_pcm_substream *substream);
+int snd_dmaengine_pcm_sync_stop(struct snd_pcm_substream *substream);
int snd_dmaengine_pcm_open_request_chan(struct snd_pcm_substream *substream,
dma_filter_fn filter_fn, void *filter_data);
diff --git a/include/sound/pcm.h b/include/sound/pcm.h
index 61c6054618c8..3edd7a7346da 100644
--- a/include/sound/pcm.h
+++ b/include/sound/pcm.h
@@ -124,7 +124,7 @@ struct snd_pcm_ops {
#define SNDRV_PCM_RATE_768000 (1U<<16) /* 768000Hz */
#define SNDRV_PCM_RATE_CONTINUOUS (1U<<30) /* continuous range */
-#define SNDRV_PCM_RATE_KNOT (1U<<31) /* supports more non-continuos rates */
+#define SNDRV_PCM_RATE_KNOT (1U<<31) /* supports more non-continuous rates */
#define SNDRV_PCM_RATE_8000_44100 (SNDRV_PCM_RATE_8000|SNDRV_PCM_RATE_11025|\
SNDRV_PCM_RATE_16000|SNDRV_PCM_RATE_22050|\
diff --git a/include/trace/events/block.h b/include/trace/events/block.h
index 0e128ad51460..1527d5d45e01 100644
--- a/include/trace/events/block.h
+++ b/include/trace/events/block.h
@@ -9,9 +9,17 @@
#include <linux/blkdev.h>
#include <linux/buffer_head.h>
#include <linux/tracepoint.h>
+#include <uapi/linux/ioprio.h>
#define RWBS_LEN 8
+#define IOPRIO_CLASS_STRINGS \
+ { IOPRIO_CLASS_NONE, "none" }, \
+ { IOPRIO_CLASS_RT, "rt" }, \
+ { IOPRIO_CLASS_BE, "be" }, \
+ { IOPRIO_CLASS_IDLE, "idle" }, \
+ { IOPRIO_CLASS_INVALID, "invalid"}
+
#ifdef CONFIG_BUFFER_HEAD
DECLARE_EVENT_CLASS(block_buffer,
@@ -82,6 +90,7 @@ TRACE_EVENT(block_rq_requeue,
__field( dev_t, dev )
__field( sector_t, sector )
__field( unsigned int, nr_sector )
+ __field( unsigned short, ioprio )
__array( char, rwbs, RWBS_LEN )
__dynamic_array( char, cmd, 1 )
),
@@ -90,16 +99,20 @@ TRACE_EVENT(block_rq_requeue,
__entry->dev = rq->q->disk ? disk_devt(rq->q->disk) : 0;
__entry->sector = blk_rq_trace_sector(rq);
__entry->nr_sector = blk_rq_trace_nr_sectors(rq);
+ __entry->ioprio = rq->ioprio;
blk_fill_rwbs(__entry->rwbs, rq->cmd_flags);
__get_str(cmd)[0] = '\0';
),
- TP_printk("%d,%d %s (%s) %llu + %u [%d]",
+ TP_printk("%d,%d %s (%s) %llu + %u %s,%u,%u [%d]",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->rwbs, __get_str(cmd),
- (unsigned long long)__entry->sector,
- __entry->nr_sector, 0)
+ (unsigned long long)__entry->sector, __entry->nr_sector,
+ __print_symbolic(IOPRIO_PRIO_CLASS(__entry->ioprio),
+ IOPRIO_CLASS_STRINGS),
+ IOPRIO_PRIO_HINT(__entry->ioprio),
+ IOPRIO_PRIO_LEVEL(__entry->ioprio), 0)
);
DECLARE_EVENT_CLASS(block_rq_completion,
@@ -113,6 +126,7 @@ DECLARE_EVENT_CLASS(block_rq_completion,
__field( sector_t, sector )
__field( unsigned int, nr_sector )
__field( int , error )
+ __field( unsigned short, ioprio )
__array( char, rwbs, RWBS_LEN )
__dynamic_array( char, cmd, 1 )
),
@@ -122,16 +136,20 @@ DECLARE_EVENT_CLASS(block_rq_completion,
__entry->sector = blk_rq_pos(rq);
__entry->nr_sector = nr_bytes >> 9;
__entry->error = blk_status_to_errno(error);
+ __entry->ioprio = rq->ioprio;
blk_fill_rwbs(__entry->rwbs, rq->cmd_flags);
__get_str(cmd)[0] = '\0';
),
- TP_printk("%d,%d %s (%s) %llu + %u [%d]",
+ TP_printk("%d,%d %s (%s) %llu + %u %s,%u,%u [%d]",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->rwbs, __get_str(cmd),
- (unsigned long long)__entry->sector,
- __entry->nr_sector, __entry->error)
+ (unsigned long long)__entry->sector, __entry->nr_sector,
+ __print_symbolic(IOPRIO_PRIO_CLASS(__entry->ioprio),
+ IOPRIO_CLASS_STRINGS),
+ IOPRIO_PRIO_HINT(__entry->ioprio),
+ IOPRIO_PRIO_LEVEL(__entry->ioprio), __entry->error)
);
/**
@@ -180,6 +198,7 @@ DECLARE_EVENT_CLASS(block_rq,
__field( sector_t, sector )
__field( unsigned int, nr_sector )
__field( unsigned int, bytes )
+ __field( unsigned short, ioprio )
__array( char, rwbs, RWBS_LEN )
__array( char, comm, TASK_COMM_LEN )
__dynamic_array( char, cmd, 1 )
@@ -190,17 +209,21 @@ DECLARE_EVENT_CLASS(block_rq,
__entry->sector = blk_rq_trace_sector(rq);
__entry->nr_sector = blk_rq_trace_nr_sectors(rq);
__entry->bytes = blk_rq_bytes(rq);
+ __entry->ioprio = rq->ioprio;
blk_fill_rwbs(__entry->rwbs, rq->cmd_flags);
__get_str(cmd)[0] = '\0';
memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
),
- TP_printk("%d,%d %s %u (%s) %llu + %u [%s]",
+ TP_printk("%d,%d %s %u (%s) %llu + %u %s,%u,%u [%s]",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->rwbs, __entry->bytes, __get_str(cmd),
- (unsigned long long)__entry->sector,
- __entry->nr_sector, __entry->comm)
+ (unsigned long long)__entry->sector, __entry->nr_sector,
+ __print_symbolic(IOPRIO_PRIO_CLASS(__entry->ioprio),
+ IOPRIO_CLASS_STRINGS),
+ IOPRIO_PRIO_HINT(__entry->ioprio),
+ IOPRIO_PRIO_LEVEL(__entry->ioprio), __entry->comm)
);
/**
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index fadf406b5260..246c0fbd582e 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -291,9 +291,6 @@ TRACE_EVENT_CONDITION(btrfs_get_extent,
__field( u64, ino )
__field( u64, start )
__field( u64, len )
- __field( u64, orig_start )
- __field( u64, block_start )
- __field( u64, block_len )
__field( u32, flags )
__field( int, refs )
),
@@ -303,23 +300,15 @@ TRACE_EVENT_CONDITION(btrfs_get_extent,
__entry->ino = btrfs_ino(inode);
__entry->start = map->start;
__entry->len = map->len;
- __entry->orig_start = map->orig_start;
- __entry->block_start = map->block_start;
- __entry->block_len = map->block_len;
__entry->flags = map->flags;
__entry->refs = refcount_read(&map->refs);
),
- TP_printk_btrfs("root=%llu(%s) ino=%llu start=%llu len=%llu "
- "orig_start=%llu block_start=%llu(%s) "
- "block_len=%llu flags=%s refs=%u",
+ TP_printk_btrfs("root=%llu(%s) ino=%llu start=%llu len=%llu flags=%s refs=%u",
show_root_type(__entry->root_objectid),
__entry->ino,
__entry->start,
__entry->len,
- __entry->orig_start,
- show_map_type(__entry->block_start),
- __entry->block_len,
show_map_flags(__entry->flags),
__entry->refs)
);
@@ -2556,9 +2545,10 @@ TRACE_EVENT(btrfs_extent_map_shrinker_count,
TRACE_EVENT(btrfs_extent_map_shrinker_scan_enter,
- TP_PROTO(const struct btrfs_fs_info *fs_info, long nr_to_scan, long nr),
+ TP_PROTO(const struct btrfs_fs_info *fs_info, long nr_to_scan, long nr,
+ u64 last_root_id, u64 last_ino),
- TP_ARGS(fs_info, nr_to_scan, nr),
+ TP_ARGS(fs_info, nr_to_scan, nr, last_root_id, last_ino),
TP_STRUCT__entry_btrfs(
__field( long, nr_to_scan )
@@ -2570,8 +2560,8 @@ TRACE_EVENT(btrfs_extent_map_shrinker_scan_enter,
TP_fast_assign_btrfs(fs_info,
__entry->nr_to_scan = nr_to_scan;
__entry->nr = nr;
- __entry->last_root_id = fs_info->extent_map_shrinker_last_root;
- __entry->last_ino = fs_info->extent_map_shrinker_last_ino;
+ __entry->last_root_id = last_root_id;
+ __entry->last_ino = last_ino;
),
TP_printk_btrfs("nr_to_scan=%ld nr=%ld last_root=%llu(%s) last_ino=%llu",
@@ -2581,9 +2571,10 @@ TRACE_EVENT(btrfs_extent_map_shrinker_scan_enter,
TRACE_EVENT(btrfs_extent_map_shrinker_scan_exit,
- TP_PROTO(const struct btrfs_fs_info *fs_info, long nr_dropped, long nr),
+ TP_PROTO(const struct btrfs_fs_info *fs_info, long nr_dropped, long nr,
+ u64 last_root_id, u64 last_ino),
- TP_ARGS(fs_info, nr_dropped, nr),
+ TP_ARGS(fs_info, nr_dropped, nr, last_root_id, last_ino),
TP_STRUCT__entry_btrfs(
__field( long, nr_dropped )
@@ -2595,8 +2586,8 @@ TRACE_EVENT(btrfs_extent_map_shrinker_scan_exit,
TP_fast_assign_btrfs(fs_info,
__entry->nr_dropped = nr_dropped;
__entry->nr = nr;
- __entry->last_root_id = fs_info->extent_map_shrinker_last_root;
- __entry->last_ino = fs_info->extent_map_shrinker_last_ino;
+ __entry->last_root_id = last_root_id;
+ __entry->last_ino = last_ino;
),
TP_printk_btrfs("nr_dropped=%ld nr=%ld last_root=%llu(%s) last_ino=%llu",
@@ -2615,7 +2606,6 @@ TRACE_EVENT(btrfs_extent_map_shrinker_remove_em,
__field( u64, root_id )
__field( u64, start )
__field( u64, len )
- __field( u64, block_start )
__field( u32, flags )
),
@@ -2624,15 +2614,12 @@ TRACE_EVENT(btrfs_extent_map_shrinker_remove_em,
__entry->root_id = inode->root->root_key.objectid;
__entry->start = em->start;
__entry->len = em->len;
- __entry->block_start = em->block_start;
__entry->flags = em->flags;
),
- TP_printk_btrfs(
-"ino=%llu root=%llu(%s) start=%llu len=%llu block_start=%llu(%s) flags=%s",
+ TP_printk_btrfs("ino=%llu root=%llu(%s) start=%llu len=%llu flags=%s",
__entry->ino, show_root_type(__entry->root_id),
__entry->start, __entry->len,
- show_map_type(__entry->block_start),
show_map_flags(__entry->flags))
);
diff --git a/include/trace/events/cachefiles.h b/include/trace/events/cachefiles.h
index cf4b98b9a9ed..7d931db02b93 100644
--- a/include/trace/events/cachefiles.h
+++ b/include/trace/events/cachefiles.h
@@ -33,6 +33,8 @@ enum cachefiles_obj_ref_trace {
cachefiles_obj_see_withdrawal,
cachefiles_obj_get_ondemand_fd,
cachefiles_obj_put_ondemand_fd,
+ cachefiles_obj_get_read_req,
+ cachefiles_obj_put_read_req,
};
enum fscache_why_object_killed {
@@ -127,7 +129,11 @@ enum cachefiles_error_trace {
EM(cachefiles_obj_see_lookup_cookie, "SEE lookup_cookie") \
EM(cachefiles_obj_see_lookup_failed, "SEE lookup_failed") \
EM(cachefiles_obj_see_withdraw_cookie, "SEE withdraw_cookie") \
- E_(cachefiles_obj_see_withdrawal, "SEE withdrawal")
+ EM(cachefiles_obj_see_withdrawal, "SEE withdrawal") \
+ EM(cachefiles_obj_get_ondemand_fd, "GET ondemand_fd") \
+ EM(cachefiles_obj_put_ondemand_fd, "PUT ondemand_fd") \
+ EM(cachefiles_obj_get_read_req, "GET read_req") \
+ E_(cachefiles_obj_put_read_req, "PUT read_req")
#define cachefiles_coherency_traces \
EM(cachefiles_coherency_check_aux, "BAD aux ") \
diff --git a/include/trace/events/erofs.h b/include/trace/events/erofs.h
index b9bbfd855f2a..57df3843e650 100644
--- a/include/trace/events/erofs.h
+++ b/include/trace/events/erofs.h
@@ -143,7 +143,8 @@ TRACE_EVENT(erofs_readpages,
__entry->raw)
);
-DECLARE_EVENT_CLASS(erofs__map_blocks_enter,
+TRACE_EVENT(erofs_map_blocks_enter,
+
TP_PROTO(struct inode *inode, struct erofs_map_blocks *map,
unsigned int flags),
@@ -171,21 +172,8 @@ DECLARE_EVENT_CLASS(erofs__map_blocks_enter,
__entry->flags ? show_map_flags(__entry->flags) : "NULL")
);
-DEFINE_EVENT(erofs__map_blocks_enter, erofs_map_blocks_enter,
- TP_PROTO(struct inode *inode, struct erofs_map_blocks *map,
- unsigned flags),
-
- TP_ARGS(inode, map, flags)
-);
-
-DEFINE_EVENT(erofs__map_blocks_enter, z_erofs_map_blocks_iter_enter,
- TP_PROTO(struct inode *inode, struct erofs_map_blocks *map,
- unsigned int flags),
-
- TP_ARGS(inode, map, flags)
-);
+TRACE_EVENT(erofs_map_blocks_exit,
-DECLARE_EVENT_CLASS(erofs__map_blocks_exit,
TP_PROTO(struct inode *inode, struct erofs_map_blocks *map,
unsigned int flags, int ret),
@@ -223,20 +211,6 @@ DECLARE_EVENT_CLASS(erofs__map_blocks_exit,
show_mflags(__entry->mflags), __entry->ret)
);
-DEFINE_EVENT(erofs__map_blocks_exit, erofs_map_blocks_exit,
- TP_PROTO(struct inode *inode, struct erofs_map_blocks *map,
- unsigned flags, int ret),
-
- TP_ARGS(inode, map, flags, ret)
-);
-
-DEFINE_EVENT(erofs__map_blocks_exit, z_erofs_map_blocks_iter_exit,
- TP_PROTO(struct inode *inode, struct erofs_map_blocks *map,
- unsigned int flags, int ret),
-
- TP_ARGS(inode, map, flags, ret)
-);
-
TRACE_EVENT(erofs_destroy_inode,
TP_PROTO(struct inode *inode),
diff --git a/include/trace/events/firewire.h b/include/trace/events/firewire.h
index d695a560673f..5ccc0d91b220 100644
--- a/include/trace/events/firewire.h
+++ b/include/trace/events/firewire.h
@@ -36,10 +36,11 @@
#define QUADLET_SIZE 4
DECLARE_EVENT_CLASS(async_outbound_initiate_template,
- TP_PROTO(u64 transaction, unsigned int generation, unsigned int scode, const u32 *header, const u32 *data, unsigned int data_count),
- TP_ARGS(transaction, generation, scode, header, data, data_count),
+ TP_PROTO(u64 transaction, unsigned int card_index, unsigned int generation, unsigned int scode, const u32 *header, const u32 *data, unsigned int data_count),
+ TP_ARGS(transaction, card_index, generation, scode, header, data, data_count),
TP_STRUCT__entry(
__field(u64, transaction)
+ __field(u8, card_index)
__field(u8, generation)
__field(u8, scode)
__array(u32, header, ASYNC_HEADER_QUADLET_COUNT)
@@ -47,6 +48,7 @@ DECLARE_EVENT_CLASS(async_outbound_initiate_template,
),
TP_fast_assign(
__entry->transaction = transaction;
+ __entry->card_index = card_index;
__entry->generation = generation;
__entry->scode = scode;
memcpy(__entry->header, header, QUADLET_SIZE * ASYNC_HEADER_QUADLET_COUNT);
@@ -54,8 +56,9 @@ DECLARE_EVENT_CLASS(async_outbound_initiate_template,
),
// This format is for the request subaction.
TP_printk(
- "transaction=0x%llx generation=%u scode=%u dst_id=0x%04x tlabel=%u tcode=%u src_id=0x%04x offset=0x%012llx header=%s data=%s",
+ "transaction=0x%llx card_index=%u generation=%u scode=%u dst_id=0x%04x tlabel=%u tcode=%u src_id=0x%04x offset=0x%012llx header=%s data=%s",
__entry->transaction,
+ __entry->card_index,
__entry->generation,
__entry->scode,
ASYNC_HEADER_GET_DESTINATION(__entry->header),
@@ -71,10 +74,11 @@ DECLARE_EVENT_CLASS(async_outbound_initiate_template,
// The value of status is one of ack codes and rcodes specific to Linux FireWire subsystem.
DECLARE_EVENT_CLASS(async_outbound_complete_template,
- TP_PROTO(u64 transaction, unsigned int generation, unsigned int scode, unsigned int status, unsigned int timestamp),
- TP_ARGS(transaction, generation, scode, status, timestamp),
+ TP_PROTO(u64 transaction, unsigned int card_index, unsigned int generation, unsigned int scode, unsigned int status, unsigned int timestamp),
+ TP_ARGS(transaction, card_index, generation, scode, status, timestamp),
TP_STRUCT__entry(
__field(u64, transaction)
+ __field(u8, card_index)
__field(u8, generation)
__field(u8, scode)
__field(u8, status)
@@ -82,14 +86,16 @@ DECLARE_EVENT_CLASS(async_outbound_complete_template,
),
TP_fast_assign(
__entry->transaction = transaction;
+ __entry->card_index = card_index;
__entry->generation = generation;
__entry->scode = scode;
__entry->status = status;
__entry->timestamp = timestamp;
),
TP_printk(
- "transaction=0x%llx generation=%u scode=%u status=%u timestamp=0x%04x",
+ "transaction=0x%llx card_index=%u generation=%u scode=%u status=%u timestamp=0x%04x",
__entry->transaction,
+ __entry->card_index,
__entry->generation,
__entry->scode,
__entry->status,
@@ -99,10 +105,11 @@ DECLARE_EVENT_CLASS(async_outbound_complete_template,
// The value of status is one of ack codes and rcodes specific to Linux FireWire subsystem.
DECLARE_EVENT_CLASS(async_inbound_template,
- TP_PROTO(u64 transaction, unsigned int generation, unsigned int scode, unsigned int status, unsigned int timestamp, const u32 *header, const u32 *data, unsigned int data_count),
- TP_ARGS(transaction, generation, scode, status, timestamp, header, data, data_count),
+ TP_PROTO(u64 transaction, unsigned int card_index, unsigned int generation, unsigned int scode, unsigned int status, unsigned int timestamp, const u32 *header, const u32 *data, unsigned int data_count),
+ TP_ARGS(transaction, card_index, generation, scode, status, timestamp, header, data, data_count),
TP_STRUCT__entry(
__field(u64, transaction)
+ __field(u8, card_index)
__field(u8, generation)
__field(u8, scode)
__field(u8, status)
@@ -112,6 +119,7 @@ DECLARE_EVENT_CLASS(async_inbound_template,
),
TP_fast_assign(
__entry->transaction = transaction;
+ __entry->card_index = card_index;
__entry->generation = generation;
__entry->scode = scode;
__entry->status = status;
@@ -121,8 +129,9 @@ DECLARE_EVENT_CLASS(async_inbound_template,
),
// This format is for the response subaction.
TP_printk(
- "transaction=0x%llx generation=%u scode=%u status=%u timestamp=0x%04x dst_id=0x%04x tlabel=%u tcode=%u src_id=0x%04x rcode=%u header=%s data=%s",
+ "transaction=0x%llx card_index=%u generation=%u scode=%u status=%u timestamp=0x%04x dst_id=0x%04x tlabel=%u tcode=%u src_id=0x%04x rcode=%u header=%s data=%s",
__entry->transaction,
+ __entry->card_index,
__entry->generation,
__entry->scode,
__entry->status,
@@ -139,26 +148,27 @@ DECLARE_EVENT_CLASS(async_inbound_template,
);
DEFINE_EVENT(async_outbound_initiate_template, async_request_outbound_initiate,
- TP_PROTO(u64 transaction, unsigned int generation, unsigned int scode, const u32 *header, const u32 *data, unsigned int data_count),
- TP_ARGS(transaction, generation, scode, header, data, data_count)
+ TP_PROTO(u64 transaction, unsigned int card_index, unsigned int generation, unsigned int scode, const u32 *header, const u32 *data, unsigned int data_count),
+ TP_ARGS(transaction, card_index, generation, scode, header, data, data_count)
);
DEFINE_EVENT(async_outbound_complete_template, async_request_outbound_complete,
- TP_PROTO(u64 transaction, unsigned int generation, unsigned int scode, unsigned int status, unsigned int timestamp),
- TP_ARGS(transaction, generation, scode, status, timestamp)
+ TP_PROTO(u64 transaction, unsigned int card_index, unsigned int generation, unsigned int scode, unsigned int status, unsigned int timestamp),
+ TP_ARGS(transaction, card_index, generation, scode, status, timestamp)
);
DEFINE_EVENT(async_inbound_template, async_response_inbound,
- TP_PROTO(u64 transaction, unsigned int generation, unsigned int scode, unsigned int status, unsigned int timestamp, const u32 *header, const u32 *data, unsigned int data_count),
- TP_ARGS(transaction, generation, scode, status, timestamp, header, data, data_count)
+ TP_PROTO(u64 transaction, unsigned int card_index, unsigned int generation, unsigned int scode, unsigned int status, unsigned int timestamp, const u32 *header, const u32 *data, unsigned int data_count),
+ TP_ARGS(transaction, card_index, generation, scode, status, timestamp, header, data, data_count)
);
DEFINE_EVENT_PRINT(async_inbound_template, async_request_inbound,
- TP_PROTO(u64 transaction, unsigned int generation, unsigned int scode, unsigned int status, unsigned int timestamp, const u32 *header, const u32 *data, unsigned int data_count),
- TP_ARGS(transaction, generation, scode, status, timestamp, header, data, data_count),
+ TP_PROTO(u64 transaction, unsigned int card_index, unsigned int generation, unsigned int scode, unsigned int status, unsigned int timestamp, const u32 *header, const u32 *data, unsigned int data_count),
+ TP_ARGS(transaction, card_index, generation, scode, status, timestamp, header, data, data_count),
TP_printk(
- "transaction=0x%llx generation=%u scode=%u status=%u timestamp=0x%04x dst_id=0x%04x tlabel=%u tcode=%u src_id=0x%04x offset=0x%012llx header=%s data=%s",
+ "transaction=0x%llx card_index=%u generation=%u scode=%u status=%u timestamp=0x%04x dst_id=0x%04x tlabel=%u tcode=%u src_id=0x%04x offset=0x%012llx header=%s data=%s",
__entry->transaction,
+ __entry->card_index,
__entry->generation,
__entry->scode,
__entry->status,
@@ -175,11 +185,12 @@ DEFINE_EVENT_PRINT(async_inbound_template, async_request_inbound,
);
DEFINE_EVENT_PRINT(async_outbound_initiate_template, async_response_outbound_initiate,
- TP_PROTO(u64 transaction, unsigned int generation, unsigned int scode, const u32 *header, const u32 *data, unsigned int data_count),
- TP_ARGS(transaction, generation, scode, header, data, data_count),
+ TP_PROTO(u64 transaction, unsigned int card_index, unsigned int generation, unsigned int scode, const u32 *header, const u32 *data, unsigned int data_count),
+ TP_ARGS(transaction, card_index, generation, scode, header, data, data_count),
TP_printk(
- "transaction=0x%llx generation=%u scode=%u dst_id=0x%04x tlabel=%u tcode=%u src_id=0x%04x rcode=%u header=%s data=%s",
+ "transaction=0x%llx card_index=%u generation=%u scode=%u dst_id=0x%04x tlabel=%u tcode=%u src_id=0x%04x rcode=%u header=%s data=%s",
__entry->transaction,
+ __entry->card_index,
__entry->generation,
__entry->scode,
ASYNC_HEADER_GET_DESTINATION(__entry->header),
@@ -194,8 +205,8 @@ DEFINE_EVENT_PRINT(async_outbound_initiate_template, async_response_outbound_ini
);
DEFINE_EVENT(async_outbound_complete_template, async_response_outbound_complete,
- TP_PROTO(u64 transaction, unsigned int generation, unsigned int scode, unsigned int status, unsigned int timestamp),
- TP_ARGS(transaction, generation, scode, status, timestamp)
+ TP_PROTO(u64 transaction, unsigned int card_index, unsigned int generation, unsigned int scode, unsigned int status, unsigned int timestamp),
+ TP_ARGS(transaction, card_index, generation, scode, status, timestamp)
);
#undef ASYNC_HEADER_GET_DESTINATION
@@ -206,23 +217,26 @@ DEFINE_EVENT(async_outbound_complete_template, async_response_outbound_complete,
#undef ASYNC_HEADER_GET_RCODE
TRACE_EVENT(async_phy_outbound_initiate,
- TP_PROTO(u64 packet, unsigned int generation, u32 first_quadlet, u32 second_quadlet),
- TP_ARGS(packet, generation, first_quadlet, second_quadlet),
+ TP_PROTO(u64 packet, unsigned int card_index, unsigned int generation, u32 first_quadlet, u32 second_quadlet),
+ TP_ARGS(packet, card_index, generation, first_quadlet, second_quadlet),
TP_STRUCT__entry(
__field(u64, packet)
+ __field(u8, card_index)
__field(u8, generation)
__field(u32, first_quadlet)
__field(u32, second_quadlet)
),
TP_fast_assign(
__entry->packet = packet;
+ __entry->card_index = card_index;
__entry->generation = generation;
__entry->first_quadlet = first_quadlet;
__entry->second_quadlet = second_quadlet
),
TP_printk(
- "packet=0x%llx generation=%u first_quadlet=0x%08x second_quadlet=0x%08x",
+ "packet=0x%llx card_index=%u generation=%u first_quadlet=0x%08x second_quadlet=0x%08x",
__entry->packet,
+ __entry->card_index,
__entry->generation,
__entry->first_quadlet,
__entry->second_quadlet
@@ -230,23 +244,26 @@ TRACE_EVENT(async_phy_outbound_initiate,
);
TRACE_EVENT(async_phy_outbound_complete,
- TP_PROTO(u64 packet, unsigned int generation, unsigned int status, unsigned int timestamp),
- TP_ARGS(packet, generation, status, timestamp),
+ TP_PROTO(u64 packet, unsigned int card_index, unsigned int generation, unsigned int status, unsigned int timestamp),
+ TP_ARGS(packet, card_index, generation, status, timestamp),
TP_STRUCT__entry(
__field(u64, packet)
+ __field(u8, card_index)
__field(u8, generation)
__field(u8, status)
__field(u16, timestamp)
),
TP_fast_assign(
__entry->packet = packet;
+ __entry->card_index = card_index;
__entry->generation = generation;
__entry->status = status;
__entry->timestamp = timestamp;
),
TP_printk(
- "packet=0x%llx generation=%u status=%u timestamp=0x%04x",
+ "packet=0x%llx card_index=%u generation=%u status=%u timestamp=0x%04x",
__entry->packet,
+ __entry->card_index,
__entry->generation,
__entry->status,
__entry->timestamp
@@ -254,10 +271,11 @@ TRACE_EVENT(async_phy_outbound_complete,
);
TRACE_EVENT(async_phy_inbound,
- TP_PROTO(u64 packet, unsigned int generation, unsigned int status, unsigned int timestamp, u32 first_quadlet, u32 second_quadlet),
- TP_ARGS(packet, generation, status, timestamp, first_quadlet, second_quadlet),
+ TP_PROTO(u64 packet, unsigned int card_index, unsigned int generation, unsigned int status, unsigned int timestamp, u32 first_quadlet, u32 second_quadlet),
+ TP_ARGS(packet, card_index, generation, status, timestamp, first_quadlet, second_quadlet),
TP_STRUCT__entry(
__field(u64, packet)
+ __field(u8, card_index)
__field(u8, generation)
__field(u8, status)
__field(u16, timestamp)
@@ -273,8 +291,9 @@ TRACE_EVENT(async_phy_inbound,
__entry->second_quadlet = second_quadlet
),
TP_printk(
- "packet=0x%llx generation=%u status=%u timestamp=0x%04x first_quadlet=0x%08x second_quadlet=0x%08x",
+ "packet=0x%llx card_index=%u generation=%u status=%u timestamp=0x%04x first_quadlet=0x%08x second_quadlet=0x%08x",
__entry->packet,
+ __entry->card_index,
__entry->generation,
__entry->status,
__entry->timestamp,
@@ -284,55 +303,61 @@ TRACE_EVENT(async_phy_inbound,
);
DECLARE_EVENT_CLASS(bus_reset_arrange_template,
- TP_PROTO(unsigned int generation, bool short_reset),
- TP_ARGS(generation, short_reset),
+ TP_PROTO(unsigned int card_index, unsigned int generation, bool short_reset),
+ TP_ARGS(card_index, generation, short_reset),
TP_STRUCT__entry(
+ __field(u8, card_index)
__field(u8, generation)
__field(bool, short_reset)
),
TP_fast_assign(
+ __entry->card_index = card_index;
__entry->generation = generation;
__entry->short_reset = short_reset;
),
TP_printk(
- "generation=%u short_reset=%s",
+ "card_index=%u generation=%u short_reset=%s",
+ __entry->card_index,
__entry->generation,
__entry->short_reset ? "true" : "false"
)
);
DEFINE_EVENT(bus_reset_arrange_template, bus_reset_initiate,
- TP_PROTO(unsigned int generation, bool short_reset),
- TP_ARGS(generation, short_reset)
+ TP_PROTO(unsigned int card_index, unsigned int generation, bool short_reset),
+ TP_ARGS(card_index, generation, short_reset)
);
DEFINE_EVENT(bus_reset_arrange_template, bus_reset_schedule,
- TP_PROTO(unsigned int generation, bool short_reset),
- TP_ARGS(generation, short_reset)
+ TP_PROTO(unsigned int card_index, unsigned int generation, bool short_reset),
+ TP_ARGS(card_index, generation, short_reset)
);
DEFINE_EVENT(bus_reset_arrange_template, bus_reset_postpone,
- TP_PROTO(unsigned int generation, bool short_reset),
- TP_ARGS(generation, short_reset)
+ TP_PROTO(unsigned int card_index, unsigned int generation, bool short_reset),
+ TP_ARGS(card_index, generation, short_reset)
);
TRACE_EVENT(bus_reset_handle,
- TP_PROTO(unsigned int generation, unsigned int node_id, bool bm_abdicate, u32 *self_ids, unsigned int self_id_count),
- TP_ARGS(generation, node_id, bm_abdicate, self_ids, self_id_count),
+ TP_PROTO(unsigned int card_index, unsigned int generation, unsigned int node_id, bool bm_abdicate, u32 *self_ids, unsigned int self_id_count),
+ TP_ARGS(card_index, generation, node_id, bm_abdicate, self_ids, self_id_count),
TP_STRUCT__entry(
+ __field(u8, card_index)
__field(u8, generation)
__field(u8, node_id)
__field(bool, bm_abdicate)
__dynamic_array(u32, self_ids, self_id_count)
),
TP_fast_assign(
+ __entry->card_index = card_index;
__entry->generation = generation;
__entry->node_id = node_id;
__entry->bm_abdicate = bm_abdicate;
memcpy(__get_dynamic_array(self_ids), self_ids, __get_dynamic_array_len(self_ids));
),
TP_printk(
- "generation=%u node_id=0x%04x bm_abdicate=%s self_ids=%s",
+ "card_index=%u generation=%u node_id=0x%04x bm_abdicate=%s self_ids=%s",
+ __entry->card_index,
__entry->generation,
__entry->node_id,
__entry->bm_abdicate ? "true" : "false",
diff --git a/include/trace/events/fscache.h b/include/trace/events/fscache.h
index a6190aa1b406..f1a73aa83fbb 100644
--- a/include/trace/events/fscache.h
+++ b/include/trace/events/fscache.h
@@ -35,12 +35,14 @@ enum fscache_volume_trace {
fscache_volume_get_cookie,
fscache_volume_get_create_work,
fscache_volume_get_hash_collision,
+ fscache_volume_get_withdraw,
fscache_volume_free,
fscache_volume_new_acquire,
fscache_volume_put_cookie,
fscache_volume_put_create_work,
fscache_volume_put_hash_collision,
fscache_volume_put_relinquish,
+ fscache_volume_put_withdraw,
fscache_volume_see_create_work,
fscache_volume_see_hash_wake,
fscache_volume_wait_create_work,
@@ -120,12 +122,14 @@ enum fscache_access_trace {
EM(fscache_volume_get_cookie, "GET cook ") \
EM(fscache_volume_get_create_work, "GET creat") \
EM(fscache_volume_get_hash_collision, "GET hcoll") \
+ EM(fscache_volume_get_withdraw, "GET withd") \
EM(fscache_volume_free, "FREE ") \
EM(fscache_volume_new_acquire, "NEW acq ") \
EM(fscache_volume_put_cookie, "PUT cook ") \
EM(fscache_volume_put_create_work, "PUT creat") \
EM(fscache_volume_put_hash_collision, "PUT hcoll") \
EM(fscache_volume_put_relinquish, "PUT relnq") \
+ EM(fscache_volume_put_withdraw, "PUT withd") \
EM(fscache_volume_see_create_work, "SEE creat") \
EM(fscache_volume_see_hash_wake, "SEE hwake") \
E_(fscache_volume_wait_create_work, "WAIT crea")
diff --git a/include/trace/events/page_pool.h b/include/trace/events/page_pool.h
index 6834356b2d2a..543e54e432a1 100644
--- a/include/trace/events/page_pool.h
+++ b/include/trace/events/page_pool.h
@@ -42,51 +42,53 @@ TRACE_EVENT(page_pool_release,
TRACE_EVENT(page_pool_state_release,
TP_PROTO(const struct page_pool *pool,
- const struct page *page, u32 release),
+ netmem_ref netmem, u32 release),
- TP_ARGS(pool, page, release),
+ TP_ARGS(pool, netmem, release),
TP_STRUCT__entry(
__field(const struct page_pool *, pool)
- __field(const struct page *, page)
+ __field(unsigned long, netmem)
__field(u32, release)
__field(unsigned long, pfn)
),
TP_fast_assign(
__entry->pool = pool;
- __entry->page = page;
+ __entry->netmem = (__force unsigned long)netmem;
__entry->release = release;
- __entry->pfn = page_to_pfn(page);
+ __entry->pfn = netmem_to_pfn(netmem);
),
- TP_printk("page_pool=%p page=%p pfn=0x%lx release=%u",
- __entry->pool, __entry->page, __entry->pfn, __entry->release)
+ TP_printk("page_pool=%p netmem=%p pfn=0x%lx release=%u",
+ __entry->pool, (void *)__entry->netmem,
+ __entry->pfn, __entry->release)
);
TRACE_EVENT(page_pool_state_hold,
TP_PROTO(const struct page_pool *pool,
- const struct page *page, u32 hold),
+ netmem_ref netmem, u32 hold),
- TP_ARGS(pool, page, hold),
+ TP_ARGS(pool, netmem, hold),
TP_STRUCT__entry(
__field(const struct page_pool *, pool)
- __field(const struct page *, page)
+ __field(unsigned long, netmem)
__field(u32, hold)
__field(unsigned long, pfn)
),
TP_fast_assign(
__entry->pool = pool;
- __entry->page = page;
+ __entry->netmem = (__force unsigned long)netmem;
__entry->hold = hold;
- __entry->pfn = page_to_pfn(page);
+ __entry->pfn = netmem_to_pfn(netmem);
),
- TP_printk("page_pool=%p page=%p pfn=0x%lx hold=%u",
- __entry->pool, __entry->page, __entry->pfn, __entry->hold)
+ TP_printk("page_pool=%p netmem=%p pfn=0x%lx hold=%u",
+ __entry->pool, (void *)__entry->netmem,
+ __entry->pfn, __entry->hold)
);
TRACE_EVENT(page_pool_update_nid,
diff --git a/include/trace/events/qdisc.h b/include/trace/events/qdisc.h
index f1b5e816e7e5..ff33f41a9db7 100644
--- a/include/trace/events/qdisc.h
+++ b/include/trace/events/qdisc.h
@@ -81,7 +81,7 @@ TRACE_EVENT(qdisc_reset,
TP_ARGS(q),
TP_STRUCT__entry(
- __string( dev, qdisc_dev(q)->name )
+ __string( dev, qdisc_dev(q) ? qdisc_dev(q)->name : "(null)" )
__string( kind, q->ops->id )
__field( u32, parent )
__field( u32, handle )
diff --git a/include/trace/events/scsi.h b/include/trace/events/scsi.h
index 8e2d9b1b0e77..05f1945ed204 100644
--- a/include/trace/events/scsi.h
+++ b/include/trace/events/scsi.h
@@ -102,6 +102,7 @@
scsi_opcode_name(WRITE_32), \
scsi_opcode_name(WRITE_SAME_32), \
scsi_opcode_name(ATA_16), \
+ scsi_opcode_name(WRITE_ATOMIC_16), \
scsi_opcode_name(ATA_12))
#define scsi_hostbyte_name(result) { result, #result }
diff --git a/include/trace/events/skb.h b/include/trace/events/skb.h
index 07e0715628ec..b877133cd93a 100644
--- a/include/trace/events/skb.h
+++ b/include/trace/events/skb.h
@@ -24,13 +24,14 @@ DEFINE_DROP_REASON(FN, FN)
TRACE_EVENT(kfree_skb,
TP_PROTO(struct sk_buff *skb, void *location,
- enum skb_drop_reason reason),
+ enum skb_drop_reason reason, struct sock *rx_sk),
- TP_ARGS(skb, location, reason),
+ TP_ARGS(skb, location, reason, rx_sk),
TP_STRUCT__entry(
__field(void *, skbaddr)
__field(void *, location)
+ __field(void *, rx_sk)
__field(unsigned short, protocol)
__field(enum skb_drop_reason, reason)
),
@@ -38,12 +39,14 @@ TRACE_EVENT(kfree_skb,
TP_fast_assign(
__entry->skbaddr = skb;
__entry->location = location;
+ __entry->rx_sk = rx_sk;
__entry->protocol = ntohs(skb->protocol);
__entry->reason = reason;
),
- TP_printk("skbaddr=%p protocol=%u location=%pS reason: %s",
- __entry->skbaddr, __entry->protocol, __entry->location,
+ TP_printk("skbaddr=%p rx_sk=%p protocol=%u location=%pS reason: %s",
+ __entry->skbaddr, __entry->rx_sk, __entry->protocol,
+ __entry->location,
__print_symbolic(__entry->reason,
DEFINE_DROP_REASON(FN, FNe)))
);
diff --git a/include/trace/events/tcp.h b/include/trace/events/tcp.h
index 49b5ee091cf6..1c8bd8e186b8 100644
--- a/include/trace/events/tcp.h
+++ b/include/trace/events/tcp.h
@@ -411,6 +411,323 @@ TRACE_EVENT(tcp_cong_state_set,
__entry->cong_state)
);
+DECLARE_EVENT_CLASS(tcp_hash_event,
+
+ TP_PROTO(const struct sock *sk, const struct sk_buff *skb),
+
+ TP_ARGS(sk, skb),
+
+ TP_STRUCT__entry(
+ __field(__u64, net_cookie)
+ __field(const void *, skbaddr)
+ __field(const void *, skaddr)
+ __field(int, state)
+
+ /* sockaddr_in6 is always bigger than sockaddr_in */
+ __array(__u8, saddr, sizeof(struct sockaddr_in6))
+ __array(__u8, daddr, sizeof(struct sockaddr_in6))
+ __field(int, l3index)
+
+ __field(__u16, sport)
+ __field(__u16, dport)
+ __field(__u16, family)
+
+ __field(bool, fin)
+ __field(bool, syn)
+ __field(bool, rst)
+ __field(bool, psh)
+ __field(bool, ack)
+ ),
+
+ TP_fast_assign(
+ const struct tcphdr *th = (const struct tcphdr *)skb->data;
+
+ __entry->net_cookie = sock_net(sk)->net_cookie;
+ __entry->skbaddr = skb;
+ __entry->skaddr = sk;
+ __entry->state = sk->sk_state;
+
+ memset(__entry->saddr, 0, sizeof(struct sockaddr_in6));
+ memset(__entry->daddr, 0, sizeof(struct sockaddr_in6));
+ TP_STORE_ADDR_PORTS_SKB(skb, th, __entry->saddr, __entry->daddr);
+ __entry->l3index = inet_sdif(skb) ? inet_iif(skb) : 0;
+
+ /* For filtering use */
+ __entry->sport = ntohs(th->source);
+ __entry->dport = ntohs(th->dest);
+ __entry->family = sk->sk_family;
+
+ __entry->fin = th->fin;
+ __entry->syn = th->syn;
+ __entry->rst = th->rst;
+ __entry->psh = th->psh;
+ __entry->ack = th->ack;
+ ),
+
+ TP_printk("net=%llu state=%s family=%s src=%pISpc dest=%pISpc L3index=%d [%c%c%c%c%c]",
+ __entry->net_cookie,
+ show_tcp_state_name(__entry->state),
+ show_family_name(__entry->family),
+ __entry->saddr, __entry->daddr,
+ __entry->l3index,
+ __entry->fin ? 'F' : ' ',
+ __entry->syn ? 'S' : ' ',
+ __entry->rst ? 'R' : ' ',
+ __entry->psh ? 'P' : ' ',
+ __entry->ack ? '.' : ' ')
+);
+
+DEFINE_EVENT(tcp_hash_event, tcp_hash_bad_header,
+
+ TP_PROTO(const struct sock *sk, const struct sk_buff *skb),
+ TP_ARGS(sk, skb)
+);
+
+DEFINE_EVENT(tcp_hash_event, tcp_hash_md5_required,
+
+ TP_PROTO(const struct sock *sk, const struct sk_buff *skb),
+ TP_ARGS(sk, skb)
+);
+
+DEFINE_EVENT(tcp_hash_event, tcp_hash_md5_unexpected,
+
+ TP_PROTO(const struct sock *sk, const struct sk_buff *skb),
+ TP_ARGS(sk, skb)
+);
+
+DEFINE_EVENT(tcp_hash_event, tcp_hash_md5_mismatch,
+
+ TP_PROTO(const struct sock *sk, const struct sk_buff *skb),
+ TP_ARGS(sk, skb)
+);
+
+DEFINE_EVENT(tcp_hash_event, tcp_hash_ao_required,
+
+ TP_PROTO(const struct sock *sk, const struct sk_buff *skb),
+ TP_ARGS(sk, skb)
+);
+
+DECLARE_EVENT_CLASS(tcp_ao_event,
+
+ TP_PROTO(const struct sock *sk, const struct sk_buff *skb,
+ const __u8 keyid, const __u8 rnext, const __u8 maclen),
+
+ TP_ARGS(sk, skb, keyid, rnext, maclen),
+
+ TP_STRUCT__entry(
+ __field(__u64, net_cookie)
+ __field(const void *, skbaddr)
+ __field(const void *, skaddr)
+ __field(int, state)
+
+ /* sockaddr_in6 is always bigger than sockaddr_in */
+ __array(__u8, saddr, sizeof(struct sockaddr_in6))
+ __array(__u8, daddr, sizeof(struct sockaddr_in6))
+ __field(int, l3index)
+
+ __field(__u16, sport)
+ __field(__u16, dport)
+ __field(__u16, family)
+
+ __field(bool, fin)
+ __field(bool, syn)
+ __field(bool, rst)
+ __field(bool, psh)
+ __field(bool, ack)
+
+ __field(__u8, keyid)
+ __field(__u8, rnext)
+ __field(__u8, maclen)
+ ),
+
+ TP_fast_assign(
+ const struct tcphdr *th = (const struct tcphdr *)skb->data;
+
+ __entry->net_cookie = sock_net(sk)->net_cookie;
+ __entry->skbaddr = skb;
+ __entry->skaddr = sk;
+ __entry->state = sk->sk_state;
+
+ memset(__entry->saddr, 0, sizeof(struct sockaddr_in6));
+ memset(__entry->daddr, 0, sizeof(struct sockaddr_in6));
+ TP_STORE_ADDR_PORTS_SKB(skb, th, __entry->saddr, __entry->daddr);
+ __entry->l3index = inet_sdif(skb) ? inet_iif(skb) : 0;
+
+ /* For filtering use */
+ __entry->sport = ntohs(th->source);
+ __entry->dport = ntohs(th->dest);
+ __entry->family = sk->sk_family;
+
+ __entry->fin = th->fin;
+ __entry->syn = th->syn;
+ __entry->rst = th->rst;
+ __entry->psh = th->psh;
+ __entry->ack = th->ack;
+
+ __entry->keyid = keyid;
+ __entry->rnext = rnext;
+ __entry->maclen = maclen;
+ ),
+
+ TP_printk("net=%llu state=%s family=%s src=%pISpc dest=%pISpc L3index=%d [%c%c%c%c%c] keyid=%u rnext=%u maclen=%u",
+ __entry->net_cookie,
+ show_tcp_state_name(__entry->state),
+ show_family_name(__entry->family),
+ __entry->saddr, __entry->daddr,
+ __entry->l3index,
+ __entry->fin ? 'F' : ' ',
+ __entry->syn ? 'S' : ' ',
+ __entry->rst ? 'R' : ' ',
+ __entry->psh ? 'P' : ' ',
+ __entry->ack ? '.' : ' ',
+ __entry->keyid, __entry->rnext, __entry->maclen)
+);
+
+DEFINE_EVENT(tcp_ao_event, tcp_ao_handshake_failure,
+ TP_PROTO(const struct sock *sk, const struct sk_buff *skb,
+ const __u8 keyid, const __u8 rnext, const __u8 maclen),
+ TP_ARGS(sk, skb, keyid, rnext, maclen)
+);
+
+DEFINE_EVENT(tcp_ao_event, tcp_ao_wrong_maclen,
+ TP_PROTO(const struct sock *sk, const struct sk_buff *skb,
+ const __u8 keyid, const __u8 rnext, const __u8 maclen),
+ TP_ARGS(sk, skb, keyid, rnext, maclen)
+);
+
+DEFINE_EVENT(tcp_ao_event, tcp_ao_mismatch,
+ TP_PROTO(const struct sock *sk, const struct sk_buff *skb,
+ const __u8 keyid, const __u8 rnext, const __u8 maclen),
+ TP_ARGS(sk, skb, keyid, rnext, maclen)
+);
+
+DEFINE_EVENT(tcp_ao_event, tcp_ao_key_not_found,
+ TP_PROTO(const struct sock *sk, const struct sk_buff *skb,
+ const __u8 keyid, const __u8 rnext, const __u8 maclen),
+ TP_ARGS(sk, skb, keyid, rnext, maclen)
+);
+
+DEFINE_EVENT(tcp_ao_event, tcp_ao_rnext_request,
+ TP_PROTO(const struct sock *sk, const struct sk_buff *skb,
+ const __u8 keyid, const __u8 rnext, const __u8 maclen),
+ TP_ARGS(sk, skb, keyid, rnext, maclen)
+);
+
+DECLARE_EVENT_CLASS(tcp_ao_event_sk,
+
+ TP_PROTO(const struct sock *sk, const __u8 keyid, const __u8 rnext),
+
+ TP_ARGS(sk, keyid, rnext),
+
+ TP_STRUCT__entry(
+ __field(__u64, net_cookie)
+ __field(const void *, skaddr)
+ __field(int, state)
+
+ /* sockaddr_in6 is always bigger than sockaddr_in */
+ __array(__u8, saddr, sizeof(struct sockaddr_in6))
+ __array(__u8, daddr, sizeof(struct sockaddr_in6))
+
+ __field(__u16, sport)
+ __field(__u16, dport)
+ __field(__u16, family)
+
+ __field(__u8, keyid)
+ __field(__u8, rnext)
+ ),
+
+ TP_fast_assign(
+ const struct inet_sock *inet = inet_sk(sk);
+
+ __entry->net_cookie = sock_net(sk)->net_cookie;
+ __entry->skaddr = sk;
+ __entry->state = sk->sk_state;
+
+ memset(__entry->saddr, 0, sizeof(struct sockaddr_in6));
+ memset(__entry->daddr, 0, sizeof(struct sockaddr_in6));
+ TP_STORE_ADDR_PORTS(__entry, inet, sk);
+
+ /* For filtering use */
+ __entry->sport = ntohs(inet->inet_sport);
+ __entry->dport = ntohs(inet->inet_dport);
+ __entry->family = sk->sk_family;
+
+ __entry->keyid = keyid;
+ __entry->rnext = rnext;
+ ),
+
+ TP_printk("net=%llu state=%s family=%s src=%pISpc dest=%pISpc keyid=%u rnext=%u",
+ __entry->net_cookie,
+ show_tcp_state_name(__entry->state),
+ show_family_name(__entry->family),
+ __entry->saddr, __entry->daddr,
+ __entry->keyid, __entry->rnext)
+);
+
+DEFINE_EVENT(tcp_ao_event_sk, tcp_ao_synack_no_key,
+ TP_PROTO(const struct sock *sk, const __u8 keyid, const __u8 rnext),
+ TP_ARGS(sk, keyid, rnext)
+);
+
+DECLARE_EVENT_CLASS(tcp_ao_event_sne,
+
+ TP_PROTO(const struct sock *sk, __u32 new_sne),
+
+ TP_ARGS(sk, new_sne),
+
+ TP_STRUCT__entry(
+ __field(__u64, net_cookie)
+ __field(const void *, skaddr)
+ __field(int, state)
+
+ /* sockaddr_in6 is always bigger than sockaddr_in */
+ __array(__u8, saddr, sizeof(struct sockaddr_in6))
+ __array(__u8, daddr, sizeof(struct sockaddr_in6))
+
+ __field(__u16, sport)
+ __field(__u16, dport)
+ __field(__u16, family)
+
+ __field(__u32, new_sne)
+ ),
+
+ TP_fast_assign(
+ const struct inet_sock *inet = inet_sk(sk);
+
+ __entry->net_cookie = sock_net(sk)->net_cookie;
+ __entry->skaddr = sk;
+ __entry->state = sk->sk_state;
+
+ memset(__entry->saddr, 0, sizeof(struct sockaddr_in6));
+ memset(__entry->daddr, 0, sizeof(struct sockaddr_in6));
+ TP_STORE_ADDR_PORTS(__entry, inet, sk);
+
+ /* For filtering use */
+ __entry->sport = ntohs(inet->inet_sport);
+ __entry->dport = ntohs(inet->inet_dport);
+ __entry->family = sk->sk_family;
+
+ __entry->new_sne = new_sne;
+ ),
+
+ TP_printk("net=%llu state=%s family=%s src=%pISpc dest=%pISpc sne=%u",
+ __entry->net_cookie,
+ show_tcp_state_name(__entry->state),
+ show_family_name(__entry->family),
+ __entry->saddr, __entry->daddr,
+ __entry->new_sne)
+);
+
+DEFINE_EVENT(tcp_ao_event_sne, tcp_ao_snd_sne_update,
+ TP_PROTO(const struct sock *sk, __u32 new_sne),
+ TP_ARGS(sk, new_sne)
+);
+
+DEFINE_EVENT(tcp_ao_event_sne, tcp_ao_rcv_sne_update,
+ TP_PROTO(const struct sock *sk, __u32 new_sne),
+ TP_ARGS(sk, new_sne)
+);
+
#endif /* _TRACE_TCP_H */
/* This part must be outside protection */
diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h
index d983c48a3b6a..5bf6148cac2b 100644
--- a/include/uapi/asm-generic/unistd.h
+++ b/include/uapi/asm-generic/unistd.h
@@ -737,7 +737,7 @@ __SC_COMP(__NR_pselect6_time64, sys_pselect6, compat_sys_pselect6_time64)
#define __NR_ppoll_time64 414
__SC_COMP(__NR_ppoll_time64, sys_ppoll, compat_sys_ppoll_time64)
#define __NR_io_pgetevents_time64 416
-__SYSCALL(__NR_io_pgetevents_time64, sys_io_pgetevents)
+__SC_COMP(__NR_io_pgetevents_time64, sys_io_pgetevents, compat_sys_io_pgetevents_time64)
#define __NR_recvmmsg_time64 417
__SC_COMP(__NR_recvmmsg_time64, sys_recvmmsg, compat_sys_recvmmsg_time64)
#define __NR_mq_timedsend_time64 418
@@ -776,12 +776,8 @@ __SYSCALL(__NR_fsmount, sys_fsmount)
__SYSCALL(__NR_fspick, sys_fspick)
#define __NR_pidfd_open 434
__SYSCALL(__NR_pidfd_open, sys_pidfd_open)
-
-#ifdef __ARCH_WANT_SYS_CLONE3
#define __NR_clone3 435
__SYSCALL(__NR_clone3, sys_clone3)
-#endif
-
#define __NR_close_range 436
__SYSCALL(__NR_close_range, sys_close_range)
#define __NR_openat2 437
diff --git a/include/uapi/drm/panthor_drm.h b/include/uapi/drm/panthor_drm.h
index aaed8e12ad0b..926b1deb1116 100644
--- a/include/uapi/drm/panthor_drm.h
+++ b/include/uapi/drm/panthor_drm.h
@@ -802,6 +802,9 @@ struct drm_panthor_queue_submit {
* Must be 64-bit/8-byte aligned (the size of a CS instruction)
*
* Can be zero if stream_addr is zero too.
+ *
+ * When the stream size is zero, the queue submit serves as a
+ * synchronization point.
*/
__u32 stream_size;
@@ -822,6 +825,8 @@ struct drm_panthor_queue_submit {
* ensure the GPU doesn't get garbage when reading the indirect command
* stream buffers. If you want the cache flush to happen
* unconditionally, pass a zero here.
+ *
+ * Ignored when stream_size is zero.
*/
__u32 latest_flush;
diff --git a/include/uapi/drm/xe_drm.h b/include/uapi/drm/xe_drm.h
index 1446c3bae515..d425b83181df 100644
--- a/include/uapi/drm/xe_drm.h
+++ b/include/uapi/drm/xe_drm.h
@@ -776,7 +776,13 @@ struct drm_xe_gem_create {
#define DRM_XE_GEM_CPU_CACHING_WC 2
/**
* @cpu_caching: The CPU caching mode to select for this object. If
- * mmaping the object the mode selected here will also be used.
+ * mmaping the object the mode selected here will also be used. The
+ * exception is when mapping system memory (including data evicted
+ * to system) on discrete GPUs. The caching mode selected will
+ * then be overridden to DRM_XE_GEM_CPU_CACHING_WB, and coherency
+ * between GPU- and CPU is guaranteed. The caching mode of
+ * existing CPU-mappings will be updated transparently to
+ * user-space clients.
*/
__u16 cpu_caching;
/** @pad: MBZ */
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 90706a47f6ff..35bcf52dbc65 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -1425,6 +1425,8 @@ enum {
#define BPF_F_TEST_RUN_ON_CPU (1U << 0)
/* If set, XDP frames will be transmitted after processing */
#define BPF_F_TEST_XDP_LIVE_FRAMES (1U << 1)
+/* If set, apply CHECKSUM_COMPLETE to skb and validate the checksum */
+#define BPF_F_TEST_SKB_CHECKSUM_COMPLETE (1U << 2)
/* type for BPF_ENABLE_STATS */
enum bpf_stats_type {
@@ -6207,12 +6209,17 @@ union { \
__u64 :64; \
} __attribute__((aligned(8)))
+/* The enum used in skb->tstamp_type. It specifies the clock type
+ * of the time stored in the skb->tstamp.
+ */
enum {
- BPF_SKB_TSTAMP_UNSPEC,
- BPF_SKB_TSTAMP_DELIVERY_MONO, /* tstamp has mono delivery time */
- /* For any BPF_SKB_TSTAMP_* that the bpf prog cannot handle,
- * the bpf prog should handle it like BPF_SKB_TSTAMP_UNSPEC
- * and try to deduce it by ingress, egress or skb->sk->sk_clockid.
+ BPF_SKB_TSTAMP_UNSPEC = 0, /* DEPRECATED */
+ BPF_SKB_TSTAMP_DELIVERY_MONO = 1, /* DEPRECATED */
+ BPF_SKB_CLOCK_REALTIME = 0,
+ BPF_SKB_CLOCK_MONOTONIC = 1,
+ BPF_SKB_CLOCK_TAI = 2,
+ /* For any future BPF_SKB_CLOCK_* that the bpf prog cannot handle,
+ * the bpf prog can try to deduce it by ingress/egress/skb->sk->sk_clockid.
*/
};
diff --git a/include/uapi/linux/btrfs_tree.h b/include/uapi/linux/btrfs_tree.h
index d24e8e121507..fc29d273845d 100644
--- a/include/uapi/linux/btrfs_tree.h
+++ b/include/uapi/linux/btrfs_tree.h
@@ -747,21 +747,9 @@ struct btrfs_raid_stride {
__le64 physical;
} __attribute__ ((__packed__));
-/* The stripe_extent::encoding, 1:1 mapping of enum btrfs_raid_types. */
-#define BTRFS_STRIPE_RAID0 1
-#define BTRFS_STRIPE_RAID1 2
-#define BTRFS_STRIPE_DUP 3
-#define BTRFS_STRIPE_RAID10 4
-#define BTRFS_STRIPE_RAID5 5
-#define BTRFS_STRIPE_RAID6 6
-#define BTRFS_STRIPE_RAID1C3 7
-#define BTRFS_STRIPE_RAID1C4 8
-
struct btrfs_stripe_extent {
- __u8 encoding;
- __u8 reserved[7];
/* An array of raid strides this stripe is composed of. */
- struct btrfs_raid_stride strides[];
+ __DECLARE_FLEX_ARRAY(struct btrfs_raid_stride, strides);
} __attribute__ ((__packed__));
#define BTRFS_HEADER_FLAG_WRITTEN (1ULL << 0)
@@ -777,6 +765,14 @@ struct btrfs_stripe_extent {
#define BTRFS_SUPER_FLAG_CHANGING_FSID (1ULL << 35)
#define BTRFS_SUPER_FLAG_CHANGING_FSID_V2 (1ULL << 36)
+/*
+ * Those are temporaray flags utilized by btrfs-progs to do offline conversion.
+ * They are rejected by kernel.
+ * But still keep them all here to avoid conflicts.
+ */
+#define BTRFS_SUPER_FLAG_CHANGING_BG_TREE (1ULL << 38)
+#define BTRFS_SUPER_FLAG_CHANGING_DATA_CSUM (1ULL << 39)
+#define BTRFS_SUPER_FLAG_CHANGING_META_CSUM (1ULL << 40)
/*
* items in the extent btree are used to record the objectid of the
diff --git a/include/uapi/linux/can/isotp.h b/include/uapi/linux/can/isotp.h
index 6cde62371b6f..bd990917f7c4 100644
--- a/include/uapi/linux/can/isotp.h
+++ b/include/uapi/linux/can/isotp.h
@@ -2,7 +2,7 @@
/*
* linux/can/isotp.h
*
- * Definitions for isotp CAN sockets (ISO 15765-2:2016)
+ * Definitions for ISO 15765-2 CAN transport protocol sockets
*
* Copyright (c) 2020 Volkswagen Group Electronic Research
* All rights reserved.
diff --git a/include/uapi/linux/cn_proc.h b/include/uapi/linux/cn_proc.h
index f2afb7cc4926..18e3745b86cd 100644
--- a/include/uapi/linux/cn_proc.h
+++ b/include/uapi/linux/cn_proc.h
@@ -69,8 +69,7 @@ struct proc_input {
static inline enum proc_cn_event valid_event(enum proc_cn_event ev_type)
{
- ev_type &= PROC_EVENT_ALL;
- return ev_type;
+ return (enum proc_cn_event)(ev_type & PROC_EVENT_ALL);
}
/*
diff --git a/include/uapi/linux/dlm.h b/include/uapi/linux/dlm.h
index e7e905fb0bb2..4eaf835780b0 100644
--- a/include/uapi/linux/dlm.h
+++ b/include/uapi/linux/dlm.h
@@ -71,6 +71,8 @@ struct dlm_lksb {
/* DLM_LSFL_TIMEWARN is deprecated and reserved. DO NOT USE! */
#define DLM_LSFL_TIMEWARN 0x00000002
#define DLM_LSFL_NEWEXCL 0x00000008
+/* currently reserved due in-kernel use */
+#define __DLM_LSFL_RESERVED0 0x00000010
#endif /* _UAPI__DLM_DOT_H__ */
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index 8733a3117902..4a0a6e703483 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -753,6 +753,197 @@ enum ethtool_module_power_mode {
};
/**
+ * enum ethtool_c33_pse_ext_state - groups of PSE extended states
+ * functions. IEEE 802.3-2022 33.2.4.4 Variables
+ *
+ * @ETHTOOL_C33_PSE_EXT_STATE_ERROR_CONDITION: Group of error_condition states
+ * @ETHTOOL_C33_PSE_EXT_STATE_MR_MPS_VALID: Group of mr_mps_valid states
+ * @ETHTOOL_C33_PSE_EXT_STATE_MR_PSE_ENABLE: Group of mr_pse_enable states
+ * @ETHTOOL_C33_PSE_EXT_STATE_OPTION_DETECT_TED: Group of option_detect_ted
+ * states
+ * @ETHTOOL_C33_PSE_EXT_STATE_OPTION_VPORT_LIM: Group of option_vport_lim states
+ * @ETHTOOL_C33_PSE_EXT_STATE_OVLD_DETECTED: Group of ovld_detected states
+ * @ETHTOOL_C33_PSE_EXT_STATE_PD_DLL_POWER_TYPE: Group of pd_dll_power_type
+ * states
+ * @ETHTOOL_C33_PSE_EXT_STATE_POWER_NOT_AVAILABLE: Group of power_not_available
+ * states
+ * @ETHTOOL_C33_PSE_EXT_STATE_SHORT_DETECTED: Group of short_detected states
+ */
+enum ethtool_c33_pse_ext_state {
+ ETHTOOL_C33_PSE_EXT_STATE_ERROR_CONDITION = 1,
+ ETHTOOL_C33_PSE_EXT_STATE_MR_MPS_VALID,
+ ETHTOOL_C33_PSE_EXT_STATE_MR_PSE_ENABLE,
+ ETHTOOL_C33_PSE_EXT_STATE_OPTION_DETECT_TED,
+ ETHTOOL_C33_PSE_EXT_STATE_OPTION_VPORT_LIM,
+ ETHTOOL_C33_PSE_EXT_STATE_OVLD_DETECTED,
+ ETHTOOL_C33_PSE_EXT_STATE_PD_DLL_POWER_TYPE,
+ ETHTOOL_C33_PSE_EXT_STATE_POWER_NOT_AVAILABLE,
+ ETHTOOL_C33_PSE_EXT_STATE_SHORT_DETECTED,
+};
+
+/**
+ * enum ethtool_c33_pse_ext_substate_mr_mps_valid - mr_mps_valid states
+ * functions. IEEE 802.3-2022 33.2.4.4 Variables
+ *
+ * @ETHTOOL_C33_PSE_EXT_SUBSTATE_MR_MPS_VALID_DETECTED_UNDERLOAD: Underload
+ * state
+ * @ETHTOOL_C33_PSE_EXT_SUBSTATE_MR_MPS_VALID_CONNECTION_OPEN: Port is not
+ * connected
+ *
+ * The PSE monitors either the DC or AC Maintain Power Signature
+ * (MPS, see 33.2.9.1). This variable indicates the presence or absence of
+ * a valid MPS.
+ */
+enum ethtool_c33_pse_ext_substate_mr_mps_valid {
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_MR_MPS_VALID_DETECTED_UNDERLOAD = 1,
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_MR_MPS_VALID_CONNECTION_OPEN,
+};
+
+/**
+ * enum ethtool_c33_pse_ext_substate_error_condition - error_condition states
+ * functions. IEEE 802.3-2022 33.2.4.4 Variables
+ *
+ * @ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_NON_EXISTING_PORT: Non-existing
+ * port number
+ * @ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_UNDEFINED_PORT: Undefined port
+ * @ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_INTERNAL_HW_FAULT: Internal
+ * hardware fault
+ * @ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_COMM_ERROR_AFTER_FORCE_ON:
+ * Communication error after force on
+ * @ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_UNKNOWN_PORT_STATUS: Unknown
+ * port status
+ * @ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_HOST_CRASH_TURN_OFF: Host
+ * crash turn off
+ * @ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_HOST_CRASH_FORCE_SHUTDOWN:
+ * Host crash force shutdown
+ * @ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_CONFIG_CHANGE: Configuration
+ * change
+ * @ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_DETECTED_OVER_TEMP: Over
+ * temperature detected
+ *
+ * error_condition is a variable indicating the status of
+ * implementation-specific fault conditions or optionally other system faults
+ * that prevent the PSE from meeting the specifications in Table 33–11 and that
+ * require the PSE not to source power. These error conditions are different
+ * from those monitored by the state diagrams in Figure 33–10.
+ */
+enum ethtool_c33_pse_ext_substate_error_condition {
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_NON_EXISTING_PORT = 1,
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_UNDEFINED_PORT,
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_INTERNAL_HW_FAULT,
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_COMM_ERROR_AFTER_FORCE_ON,
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_UNKNOWN_PORT_STATUS,
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_HOST_CRASH_TURN_OFF,
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_HOST_CRASH_FORCE_SHUTDOWN,
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_CONFIG_CHANGE,
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_DETECTED_OVER_TEMP,
+};
+
+/**
+ * enum ethtool_c33_pse_ext_substate_mr_pse_enable - mr_pse_enable states
+ * functions. IEEE 802.3-2022 33.2.4.4 Variables
+ *
+ * @ETHTOOL_C33_PSE_EXT_SUBSTATE_MR_PSE_ENABLE_DISABLE_PIN_ACTIVE: Disable
+ * pin active
+ *
+ * mr_pse_enable is control variable that selects PSE operation and test
+ * functions.
+ */
+enum ethtool_c33_pse_ext_substate_mr_pse_enable {
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_MR_PSE_ENABLE_DISABLE_PIN_ACTIVE = 1,
+};
+
+/**
+ * enum ethtool_c33_pse_ext_substate_option_detect_ted - option_detect_ted
+ * states functions. IEEE 802.3-2022 33.2.4.4 Variables
+ *
+ * @ETHTOOL_C33_PSE_EXT_SUBSTATE_OPTION_DETECT_TED_DET_IN_PROCESS: Detection
+ * in process
+ * @ETHTOOL_C33_PSE_EXT_SUBSTATE_OPTION_DETECT_TED_CONNECTION_CHECK_ERROR:
+ * Connection check error
+ *
+ * option_detect_ted is a variable indicating if detection can be performed
+ * by the PSE during the ted_timer interval.
+ */
+enum ethtool_c33_pse_ext_substate_option_detect_ted {
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_OPTION_DETECT_TED_DET_IN_PROCESS = 1,
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_OPTION_DETECT_TED_CONNECTION_CHECK_ERROR,
+};
+
+/**
+ * enum ethtool_c33_pse_ext_substate_option_vport_lim - option_vport_lim states
+ * functions. IEEE 802.3-2022 33.2.4.4 Variables
+ *
+ * @ETHTOOL_C33_PSE_EXT_SUBSTATE_OPTION_VPORT_LIM_HIGH_VOLTAGE: Main supply
+ * voltage is high
+ * @ETHTOOL_C33_PSE_EXT_SUBSTATE_OPTION_VPORT_LIM_LOW_VOLTAGE: Main supply
+ * voltage is low
+ * @ETHTOOL_C33_PSE_EXT_SUBSTATE_OPTION_VPORT_LIM_VOLTAGE_INJECTION: Voltage
+ * injection into the port
+ *
+ * option_vport_lim is an optional variable indicates if VPSE is out of the
+ * operating range during normal operating state.
+ */
+enum ethtool_c33_pse_ext_substate_option_vport_lim {
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_OPTION_VPORT_LIM_HIGH_VOLTAGE = 1,
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_OPTION_VPORT_LIM_LOW_VOLTAGE,
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_OPTION_VPORT_LIM_VOLTAGE_INJECTION,
+};
+
+/**
+ * enum ethtool_c33_pse_ext_substate_ovld_detected - ovld_detected states
+ * functions. IEEE 802.3-2022 33.2.4.4 Variables
+ *
+ * @ETHTOOL_C33_PSE_EXT_SUBSTATE_OVLD_DETECTED_OVERLOAD: Overload state
+ *
+ * ovld_detected is a variable indicating if the PSE output current has been
+ * in an overload condition (see 33.2.7.6) for at least TCUT of a one-second
+ * sliding time.
+ */
+enum ethtool_c33_pse_ext_substate_ovld_detected {
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_OVLD_DETECTED_OVERLOAD = 1,
+};
+
+/**
+ * enum ethtool_c33_pse_ext_substate_power_not_available - power_not_available
+ * states functions. IEEE 802.3-2022 33.2.4.4 Variables
+ *
+ * @ETHTOOL_C33_PSE_EXT_SUBSTATE_POWER_NOT_AVAILABLE_BUDGET_EXCEEDED: Power
+ * budget exceeded for the controller
+ * @ETHTOOL_C33_PSE_EXT_SUBSTATE_POWER_NOT_AVAILABLE_PORT_PW_LIMIT_EXCEEDS_CONTROLLER_BUDGET:
+ * Configured port power limit exceeded controller power budget
+ * @ETHTOOL_C33_PSE_EXT_SUBSTATE_POWER_NOT_AVAILABLE_PD_REQUEST_EXCEEDS_PORT_LIMIT:
+ * Power request from PD exceeds port limit
+ * @ETHTOOL_C33_PSE_EXT_SUBSTATE_POWER_NOT_AVAILABLE_HW_PW_LIMIT: Power
+ * denied due to Hardware power limit
+ *
+ * power_not_available is a variable that is asserted in an
+ * implementation-dependent manner when the PSE is no longer capable of
+ * sourcing sufficient power to support the attached PD. Sufficient power
+ * is defined by classification; see 33.2.6.
+ */
+enum ethtool_c33_pse_ext_substate_power_not_available {
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_POWER_NOT_AVAILABLE_BUDGET_EXCEEDED = 1,
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_POWER_NOT_AVAILABLE_PORT_PW_LIMIT_EXCEEDS_CONTROLLER_BUDGET,
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_POWER_NOT_AVAILABLE_PD_REQUEST_EXCEEDS_PORT_LIMIT,
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_POWER_NOT_AVAILABLE_HW_PW_LIMIT,
+};
+
+/**
+ * enum ethtool_c33_pse_ext_substate_short_detected - short_detected states
+ * functions. IEEE 802.3-2022 33.2.4.4 Variables
+ *
+ * @ETHTOOL_C33_PSE_EXT_SUBSTATE_SHORT_DETECTED_SHORT_CONDITION: Short
+ * condition was detected
+ *
+ * short_detected is a variable indicating if the PSE output current has been
+ * in a short circuit condition for TLIM within a sliding window (see 33.2.7.7).
+ */
+enum ethtool_c33_pse_ext_substate_short_detected {
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_SHORT_DETECTED_SHORT_CONDITION = 1,
+};
+
+/**
* enum ethtool_pse_types - Types of PSE controller.
* @ETHTOOL_PSE_UNKNOWN: Type of PSE controller is unknown
* @ETHTOOL_PSE_PODL: PSE controller which support PoDL
@@ -878,6 +1069,24 @@ enum ethtool_mm_verify_status {
};
/**
+ * enum ethtool_module_fw_flash_status - plug-in module firmware flashing status
+ * @ETHTOOL_MODULE_FW_FLASH_STATUS_STARTED: The firmware flashing process has
+ * started.
+ * @ETHTOOL_MODULE_FW_FLASH_STATUS_IN_PROGRESS: The firmware flashing process
+ * is in progress.
+ * @ETHTOOL_MODULE_FW_FLASH_STATUS_COMPLETED: The firmware flashing process was
+ * completed successfully.
+ * @ETHTOOL_MODULE_FW_FLASH_STATUS_ERROR: The firmware flashing process was
+ * stopped due to an error.
+ */
+enum ethtool_module_fw_flash_status {
+ ETHTOOL_MODULE_FW_FLASH_STATUS_STARTED = 1,
+ ETHTOOL_MODULE_FW_FLASH_STATUS_IN_PROGRESS,
+ ETHTOOL_MODULE_FW_FLASH_STATUS_COMPLETED,
+ ETHTOOL_MODULE_FW_FLASH_STATUS_ERROR,
+};
+
+/**
* struct ethtool_gstrings - string set for data tagging
* @cmd: Command number = %ETHTOOL_GSTRINGS
* @string_set: String set ID; one of &enum ethtool_stringset
@@ -1845,6 +2054,7 @@ enum ethtool_link_mode_bit_indices {
ETHTOOL_LINK_MODE_10baseT1S_Full_BIT = 99,
ETHTOOL_LINK_MODE_10baseT1S_Half_BIT = 100,
ETHTOOL_LINK_MODE_10baseT1S_P2MP_Half_BIT = 101,
+ ETHTOOL_LINK_MODE_10baseT1BRR_Full_BIT = 102,
/* must be last entry */
__ETHTOOL_LINK_MODE_MASK_NBITS
diff --git a/include/uapi/linux/ethtool_netlink.h b/include/uapi/linux/ethtool_netlink.h
index b49b804b9495..6d5bdcc67631 100644
--- a/include/uapi/linux/ethtool_netlink.h
+++ b/include/uapi/linux/ethtool_netlink.h
@@ -57,6 +57,7 @@ enum {
ETHTOOL_MSG_PLCA_GET_STATUS,
ETHTOOL_MSG_MM_GET,
ETHTOOL_MSG_MM_SET,
+ ETHTOOL_MSG_MODULE_FW_FLASH_ACT,
/* add new constants above here */
__ETHTOOL_MSG_USER_CNT,
@@ -109,6 +110,7 @@ enum {
ETHTOOL_MSG_PLCA_NTF,
ETHTOOL_MSG_MM_GET_REPLY,
ETHTOOL_MSG_MM_NTF,
+ ETHTOOL_MSG_MODULE_FW_FLASH_NTF,
/* add new constants above here */
__ETHTOOL_MSG_KERNEL_CNT,
@@ -415,12 +417,34 @@ enum {
ETHTOOL_A_COALESCE_TX_AGGR_MAX_BYTES, /* u32 */
ETHTOOL_A_COALESCE_TX_AGGR_MAX_FRAMES, /* u32 */
ETHTOOL_A_COALESCE_TX_AGGR_TIME_USECS, /* u32 */
+ /* nest - _A_PROFILE_IRQ_MODERATION */
+ ETHTOOL_A_COALESCE_RX_PROFILE,
+ /* nest - _A_PROFILE_IRQ_MODERATION */
+ ETHTOOL_A_COALESCE_TX_PROFILE,
/* add new constants above here */
__ETHTOOL_A_COALESCE_CNT,
ETHTOOL_A_COALESCE_MAX = (__ETHTOOL_A_COALESCE_CNT - 1)
};
+enum {
+ ETHTOOL_A_PROFILE_UNSPEC,
+ /* nest, _A_IRQ_MODERATION_* */
+ ETHTOOL_A_PROFILE_IRQ_MODERATION,
+ __ETHTOOL_A_PROFILE_CNT,
+ ETHTOOL_A_PROFILE_MAX = (__ETHTOOL_A_PROFILE_CNT - 1)
+};
+
+enum {
+ ETHTOOL_A_IRQ_MODERATION_UNSPEC,
+ ETHTOOL_A_IRQ_MODERATION_USEC, /* u32 */
+ ETHTOOL_A_IRQ_MODERATION_PKTS, /* u32 */
+ ETHTOOL_A_IRQ_MODERATION_COMPS, /* u32 */
+
+ __ETHTOOL_A_IRQ_MODERATION_CNT,
+ ETHTOOL_A_IRQ_MODERATION_MAX = (__ETHTOOL_A_IRQ_MODERATION_CNT - 1)
+};
+
/* PAUSE */
enum {
@@ -907,6 +931,12 @@ enum {
/* Power Sourcing Equipment */
enum {
+ ETHTOOL_A_C33_PSE_PW_LIMIT_UNSPEC,
+ ETHTOOL_A_C33_PSE_PW_LIMIT_MIN, /* u32 */
+ ETHTOOL_A_C33_PSE_PW_LIMIT_MAX, /* u32 */
+};
+
+enum {
ETHTOOL_A_PSE_UNSPEC,
ETHTOOL_A_PSE_HEADER, /* nest - _A_HEADER_* */
ETHTOOL_A_PODL_PSE_ADMIN_STATE, /* u32 */
@@ -915,6 +945,12 @@ enum {
ETHTOOL_A_C33_PSE_ADMIN_STATE, /* u32 */
ETHTOOL_A_C33_PSE_ADMIN_CONTROL, /* u32 */
ETHTOOL_A_C33_PSE_PW_D_STATUS, /* u32 */
+ ETHTOOL_A_C33_PSE_PW_CLASS, /* u32 */
+ ETHTOOL_A_C33_PSE_ACTUAL_PW, /* u32 */
+ ETHTOOL_A_C33_PSE_EXT_STATE, /* u32 */
+ ETHTOOL_A_C33_PSE_EXT_SUBSTATE, /* u32 */
+ ETHTOOL_A_C33_PSE_AVAIL_PW_LIMIT, /* u32 */
+ ETHTOOL_A_C33_PSE_PW_LIMIT_RANGES, /* nest - _C33_PSE_PW_LIMIT_* */
/* add new constants above here */
__ETHTOOL_A_PSE_CNT,
@@ -996,6 +1032,23 @@ enum {
ETHTOOL_A_MM_MAX = (__ETHTOOL_A_MM_CNT - 1)
};
+/* MODULE_FW_FLASH */
+
+enum {
+ ETHTOOL_A_MODULE_FW_FLASH_UNSPEC,
+ ETHTOOL_A_MODULE_FW_FLASH_HEADER, /* nest - _A_HEADER_* */
+ ETHTOOL_A_MODULE_FW_FLASH_FILE_NAME, /* string */
+ ETHTOOL_A_MODULE_FW_FLASH_PASSWORD, /* u32 */
+ ETHTOOL_A_MODULE_FW_FLASH_STATUS, /* u32 */
+ ETHTOOL_A_MODULE_FW_FLASH_STATUS_MSG, /* string */
+ ETHTOOL_A_MODULE_FW_FLASH_DONE, /* uint */
+ ETHTOOL_A_MODULE_FW_FLASH_TOTAL, /* uint */
+
+ /* add new constants above here */
+ __ETHTOOL_A_MODULE_FW_FLASH_CNT,
+ ETHTOOL_A_MODULE_FW_FLASH_MAX = (__ETHTOOL_A_MODULE_FW_FLASH_CNT - 1)
+};
+
/* generic netlink info */
#define ETHTOOL_GENL_NAME "ethtool"
#define ETHTOOL_GENL_VERSION 1
diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h
index 45e4e64fd664..191a7e88a8ab 100644
--- a/include/uapi/linux/fs.h
+++ b/include/uapi/linux/fs.h
@@ -329,9 +329,12 @@ typedef int __bitwise __kernel_rwf_t;
/* per-IO negation of O_APPEND */
#define RWF_NOAPPEND ((__force __kernel_rwf_t)0x00000020)
+/* Atomic Write */
+#define RWF_ATOMIC ((__force __kernel_rwf_t)0x00000040)
+
/* mask of flags supported by the kernel */
#define RWF_SUPPORTED (RWF_HIPRI | RWF_DSYNC | RWF_SYNC | RWF_NOWAIT |\
- RWF_APPEND | RWF_NOAPPEND)
+ RWF_APPEND | RWF_NOAPPEND | RWF_ATOMIC)
/* Pagemap ioctl */
#define PAGEMAP_SCAN _IOWR('f', 16, struct pm_scan_arg)
diff --git a/include/uapi/linux/in.h b/include/uapi/linux/in.h
index e682ab628dfa..d358add1611c 100644
--- a/include/uapi/linux/in.h
+++ b/include/uapi/linux/in.h
@@ -81,6 +81,8 @@ enum {
#define IPPROTO_ETHERNET IPPROTO_ETHERNET
IPPROTO_RAW = 255, /* Raw IP packets */
#define IPPROTO_RAW IPPROTO_RAW
+ IPPROTO_SMC = 256, /* Shared Memory Communications */
+#define IPPROTO_SMC IPPROTO_SMC
IPPROTO_MPTCP = 262, /* Multipath TCP connection */
#define IPPROTO_MPTCP IPPROTO_MPTCP
IPPROTO_MAX
diff --git a/include/uapi/linux/input-event-codes.h b/include/uapi/linux/input-event-codes.h
index 03edf2ccdf6c..a4206723f503 100644
--- a/include/uapi/linux/input-event-codes.h
+++ b/include/uapi/linux/input-event-codes.h
@@ -618,6 +618,8 @@
#define KEY_CAMERA_ACCESS_ENABLE 0x24b /* Enables programmatic access to camera devices. (HUTRR72) */
#define KEY_CAMERA_ACCESS_DISABLE 0x24c /* Disables programmatic access to camera devices. (HUTRR72) */
#define KEY_CAMERA_ACCESS_TOGGLE 0x24d /* Toggles the current state of the camera access control. (HUTRR72) */
+#define KEY_ACCESSIBILITY 0x24e /* Toggles the system bound accessibility UI/command (HUTRR116) */
+#define KEY_DO_NOT_DISTURB 0x24f /* Toggles the system-wide "Do Not Disturb" control (HUTRR94)*/
#define KEY_BRIGHTNESS_MIN 0x250 /* Set Brightness to Minimum */
#define KEY_BRIGHTNESS_MAX 0x251 /* Set Brightness to Maximum */
diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h
index 994bf7af0efe..2aaf7ee256ac 100644
--- a/include/uapi/linux/io_uring.h
+++ b/include/uapi/linux/io_uring.h
@@ -257,6 +257,8 @@ enum io_uring_op {
IORING_OP_FUTEX_WAITV,
IORING_OP_FIXED_FD_INSTALL,
IORING_OP_FTRUNCATE,
+ IORING_OP_BIND,
+ IORING_OP_LISTEN,
/* this goes last, obviously */
IORING_OP_LAST,
diff --git a/include/uapi/linux/kd.h b/include/uapi/linux/kd.h
index 8ddb2219a84b..6b384065c013 100644
--- a/include/uapi/linux/kd.h
+++ b/include/uapi/linux/kd.h
@@ -5,61 +5,60 @@
#include <linux/compiler.h>
/* 0x4B is 'K', to avoid collision with termios and vt */
-#define KD_IOCTL_BASE 'K'
-#define GIO_FONT _IO(KD_IOCTL_BASE, 0x60) /* gets font in expanded form */
-#define PIO_FONT _IO(KD_IOCTL_BASE, 0x61) /* use font in expanded form */
+#define GIO_FONT 0x4B60 /* gets font in expanded form */
+#define PIO_FONT 0x4B61 /* use font in expanded form */
-#define GIO_FONTX _IO(KD_IOCTL_BASE, 0x6B) /* get font using struct consolefontdesc */
-#define PIO_FONTX _IO(KD_IOCTL_BASE, 0x6C) /* set font using struct consolefontdesc */
+#define GIO_FONTX 0x4B6B /* get font using struct consolefontdesc */
+#define PIO_FONTX 0x4B6C /* set font using struct consolefontdesc */
struct consolefontdesc {
unsigned short charcount; /* characters in font (256 or 512) */
unsigned short charheight; /* scan lines per character (1-32) */
char __user *chardata; /* font data in expanded form */
};
-#define PIO_FONTRESET _IO(KD_IOCTL_BASE, 0x6D) /* reset to default font */
+#define PIO_FONTRESET 0x4B6D /* reset to default font */
-#define GIO_CMAP _IO(KD_IOCTL_BASE, 0x70) /* gets colour palette on VGA+ */
-#define PIO_CMAP _IO(KD_IOCTL_BASE, 0x71) /* sets colour palette on VGA+ */
+#define GIO_CMAP 0x4B70 /* gets colour palette on VGA+ */
+#define PIO_CMAP 0x4B71 /* sets colour palette on VGA+ */
-#define KIOCSOUND _IO(KD_IOCTL_BASE, 0x2F) /* start sound generation (0 for off) */
-#define KDMKTONE _IO(KD_IOCTL_BASE, 0x30) /* generate tone */
+#define KIOCSOUND 0x4B2F /* start sound generation (0 for off) */
+#define KDMKTONE 0x4B30 /* generate tone */
-#define KDGETLED _IO(KD_IOCTL_BASE, 0x31) /* return current led state */
-#define KDSETLED _IO(KD_IOCTL_BASE, 0x32) /* set led state [lights, not flags] */
+#define KDGETLED 0x4B31 /* return current led state */
+#define KDSETLED 0x4B32 /* set led state [lights, not flags] */
#define LED_SCR 0x01 /* scroll lock led */
#define LED_NUM 0x02 /* num lock led */
#define LED_CAP 0x04 /* caps lock led */
-#define KDGKBTYPE _IO(KD_IOCTL_BASE, 0x33) /* get keyboard type */
+#define KDGKBTYPE 0x4B33 /* get keyboard type */
#define KB_84 0x01
#define KB_101 0x02 /* this is what we always answer */
#define KB_OTHER 0x03
-#define KDADDIO _IO(KD_IOCTL_BASE, 0x34) /* add i/o port as valid */
-#define KDDELIO _IO(KD_IOCTL_BASE, 0x35) /* del i/o port as valid */
-#define KDENABIO _IO(KD_IOCTL_BASE, 0x36) /* enable i/o to video board */
-#define KDDISABIO _IO(KD_IOCTL_BASE, 0x37) /* disable i/o to video board */
+#define KDADDIO 0x4B34 /* add i/o port as valid */
+#define KDDELIO 0x4B35 /* del i/o port as valid */
+#define KDENABIO 0x4B36 /* enable i/o to video board */
+#define KDDISABIO 0x4B37 /* disable i/o to video board */
-#define KDSETMODE _IO(KD_IOCTL_BASE, 0x3A) /* set text/graphics mode */
+#define KDSETMODE 0x4B3A /* set text/graphics mode */
#define KD_TEXT 0x00
#define KD_GRAPHICS 0x01
#define KD_TEXT0 0x02 /* obsolete */
#define KD_TEXT1 0x03 /* obsolete */
-#define KDGETMODE _IO(KD_IOCTL_BASE, 0x3B) /* get current mode */
+#define KDGETMODE 0x4B3B /* get current mode */
-#define KDMAPDISP _IO(KD_IOCTL_BASE, 0x3C) /* map display into address space */
-#define KDUNMAPDISP _IO(KD_IOCTL_BASE, 0x3D) /* unmap display from address space */
+#define KDMAPDISP 0x4B3C /* map display into address space */
+#define KDUNMAPDISP 0x4B3D /* unmap display from address space */
typedef char scrnmap_t;
#define E_TABSZ 256
-#define GIO_SCRNMAP _IO(KD_IOCTL_BASE, 0x40) /* get screen mapping from kernel */
-#define PIO_SCRNMAP _IO(KD_IOCTL_BASE, 0x41) /* put screen mapping table in kernel */
-#define GIO_UNISCRNMAP _IO(KD_IOCTL_BASE, 0x69) /* get full Unicode screen mapping */
-#define PIO_UNISCRNMAP _IO(KD_IOCTL_BASE, 0x6A) /* set full Unicode screen mapping */
+#define GIO_SCRNMAP 0x4B40 /* get screen mapping from kernel */
+#define PIO_SCRNMAP 0x4B41 /* put screen mapping table in kernel */
+#define GIO_UNISCRNMAP 0x4B69 /* get full Unicode screen mapping */
+#define PIO_UNISCRNMAP 0x4B6A /* set full Unicode screen mapping */
-#define GIO_UNIMAP _IO(KD_IOCTL_BASE, 0x66) /* get unicode-to-font mapping from kernel */
+#define GIO_UNIMAP 0x4B66 /* get unicode-to-font mapping from kernel */
struct unipair {
unsigned short unicode;
unsigned short fontpos;
@@ -68,8 +67,8 @@ struct unimapdesc {
unsigned short entry_ct;
struct unipair __user *entries;
};
-#define PIO_UNIMAP _IO(KD_IOCTL_BASE, 0x67) /* put unicode-to-font mapping in kernel */
-#define PIO_UNIMAPCLR _IO(KD_IOCTL_BASE, 0x68) /* clear table, possibly advise hash algorithm */
+#define PIO_UNIMAP 0x4B67 /* put unicode-to-font mapping in kernel */
+#define PIO_UNIMAPCLR 0x4B68 /* clear table, possibly advise hash algorithm */
struct unimapinit {
unsigned short advised_hashsize; /* 0 if no opinion */
unsigned short advised_hashstep; /* 0 if no opinion */
@@ -84,19 +83,19 @@ struct unimapinit {
#define K_MEDIUMRAW 0x02
#define K_UNICODE 0x03
#define K_OFF 0x04
-#define KDGKBMODE _IO(KD_IOCTL_BASE, 0x44) /* gets current keyboard mode */
-#define KDSKBMODE _IO(KD_IOCTL_BASE, 0x45) /* sets current keyboard mode */
+#define KDGKBMODE 0x4B44 /* gets current keyboard mode */
+#define KDSKBMODE 0x4B45 /* sets current keyboard mode */
#define K_METABIT 0x03
#define K_ESCPREFIX 0x04
-#define KDGKBMETA _IO(KD_IOCTL_BASE, 0x62) /* gets meta key handling mode */
-#define KDSKBMETA _IO(KD_IOCTL_BASE, 0x63) /* sets meta key handling mode */
+#define KDGKBMETA 0x4B62 /* gets meta key handling mode */
+#define KDSKBMETA 0x4B63 /* sets meta key handling mode */
#define K_SCROLLLOCK 0x01
#define K_NUMLOCK 0x02
#define K_CAPSLOCK 0x04
-#define KDGKBLED _IO(KD_IOCTL_BASE, 0x64) /* get led flags (not lights) */
-#define KDSKBLED _IO(KD_IOCTL_BASE, 0x65) /* set led flags (not lights) */
+#define KDGKBLED 0x4B64 /* get led flags (not lights) */
+#define KDSKBLED 0x4B65 /* set led flags (not lights) */
struct kbentry {
unsigned char kb_table;
@@ -108,15 +107,15 @@ struct kbentry {
#define K_ALTTAB 0x02
#define K_ALTSHIFTTAB 0x03
-#define KDGKBENT _IO(KD_IOCTL_BASE, 0x46) /* gets one entry in translation table */
-#define KDSKBENT _IO(KD_IOCTL_BASE, 0x47) /* sets one entry in translation table */
+#define KDGKBENT 0x4B46 /* gets one entry in translation table */
+#define KDSKBENT 0x4B47 /* sets one entry in translation table */
struct kbsentry {
unsigned char kb_func;
unsigned char kb_string[512];
};
-#define KDGKBSENT _IO(KD_IOCTL_BASE, 0x48) /* gets one function key string entry */
-#define KDSKBSENT _IO(KD_IOCTL_BASE, 0x49) /* sets one function key string entry */
+#define KDGKBSENT 0x4B48 /* gets one function key string entry */
+#define KDSKBSENT 0x4B49 /* sets one function key string entry */
struct kbdiacr {
unsigned char diacr, base, result;
@@ -125,8 +124,8 @@ struct kbdiacrs {
unsigned int kb_cnt; /* number of entries in following array */
struct kbdiacr kbdiacr[256]; /* MAX_DIACR from keyboard.h */
};
-#define KDGKBDIACR _IO(KD_IOCTL_BASE, 0x4A) /* read kernel accent table */
-#define KDSKBDIACR _IO(KD_IOCTL_BASE, 0x4B) /* write kernel accent table */
+#define KDGKBDIACR 0x4B4A /* read kernel accent table */
+#define KDSKBDIACR 0x4B4B /* write kernel accent table */
struct kbdiacruc {
unsigned int diacr, base, result;
@@ -135,16 +134,16 @@ struct kbdiacrsuc {
unsigned int kb_cnt; /* number of entries in following array */
struct kbdiacruc kbdiacruc[256]; /* MAX_DIACR from keyboard.h */
};
-#define KDGKBDIACRUC _IO(KD_IOCTL_BASE, 0xFA) /* read kernel accent table - UCS */
-#define KDSKBDIACRUC _IO(KD_IOCTL_BASE, 0xFB) /* write kernel accent table - UCS */
+#define KDGKBDIACRUC 0x4BFA /* read kernel accent table - UCS */
+#define KDSKBDIACRUC 0x4BFB /* write kernel accent table - UCS */
struct kbkeycode {
unsigned int scancode, keycode;
};
-#define KDGETKEYCODE _IO(KD_IOCTL_BASE, 0x4C) /* read kernel keycode table entry */
-#define KDSETKEYCODE _IO(KD_IOCTL_BASE, 0x4D) /* write kernel keycode table entry */
+#define KDGETKEYCODE 0x4B4C /* read kernel keycode table entry */
+#define KDSETKEYCODE 0x4B4D /* write kernel keycode table entry */
-#define KDSIGACCEPT _IO(KD_IOCTL_BASE, 0x4E) /* accept kbd generated signals */
+#define KDSIGACCEPT 0x4B4E /* accept kbd generated signals */
struct kbd_repeat {
int delay; /* in msec; <= 0: don't change */
@@ -152,11 +151,10 @@ struct kbd_repeat {
/* earlier this field was misnamed "rate" */
};
-#define KDKBDREP _IO(KD_IOCTL_BASE, 0x52) /* set keyboard delay/repeat rate;
- * actually used values are returned
- */
+#define KDKBDREP 0x4B52 /* set keyboard delay/repeat rate;
+ * actually used values are returned */
-#define KDFONTOP _IO(KD_IOCTL_BASE, 0x72) /* font operations */
+#define KDFONTOP 0x4B72 /* font operations */
struct console_font_op {
unsigned int op; /* operation code KD_FONT_OP_* */
diff --git a/include/uapi/linux/mount.h b/include/uapi/linux/mount.h
index ad5478dbad00..225bc366ffcb 100644
--- a/include/uapi/linux/mount.h
+++ b/include/uapi/linux/mount.h
@@ -154,7 +154,7 @@ struct mount_attr {
*/
struct statmount {
__u32 size; /* Total size, including strings */
- __u32 __spare1;
+ __u32 mnt_opts; /* [str] Mount options of the mount */
__u64 mask; /* What results were written */
__u32 sb_dev_major; /* Device ID */
__u32 sb_dev_minor;
@@ -172,7 +172,8 @@ struct statmount {
__u64 propagate_from; /* Propagation from in current namespace */
__u32 mnt_root; /* [str] Root of mount relative to root of fs */
__u32 mnt_point; /* [str] Mountpoint relative to current root */
- __u64 __spare2[50];
+ __u64 mnt_ns_id; /* ID of the mount namespace */
+ __u64 __spare2[49];
char str[]; /* Variable size part containing strings */
};
@@ -188,10 +189,12 @@ struct mnt_id_req {
__u32 spare;
__u64 mnt_id;
__u64 param;
+ __u64 mnt_ns_id;
};
/* List of all mnt_id_req versions. */
#define MNT_ID_REQ_SIZE_VER0 24 /* sizeof first published struct */
+#define MNT_ID_REQ_SIZE_VER1 32 /* sizeof second published struct */
/*
* @mask bits for statmount(2)
@@ -202,10 +205,13 @@ struct mnt_id_req {
#define STATMOUNT_MNT_ROOT 0x00000008U /* Want/got mnt_root */
#define STATMOUNT_MNT_POINT 0x00000010U /* Want/got mnt_point */
#define STATMOUNT_FS_TYPE 0x00000020U /* Want/got fs_type */
+#define STATMOUNT_MNT_NS_ID 0x00000040U /* Want/got mnt_ns_id */
+#define STATMOUNT_MNT_OPTS 0x00000080U /* Want/got mnt_opts */
/*
* Special @mnt_id values that can be passed to listmount
*/
#define LSMT_ROOT 0xffffffffffffffff /* root mount */
+#define LISTMOUNT_REVERSE (1 << 0) /* List later mounts first */
#endif /* _UAPI_LINUX_MOUNT_H */
diff --git a/include/uapi/linux/netdev.h b/include/uapi/linux/netdev.h
index a8188202413e..43742ac5b00d 100644
--- a/include/uapi/linux/netdev.h
+++ b/include/uapi/linux/netdev.h
@@ -148,6 +148,7 @@ enum {
NETDEV_A_QSTATS_RX_ALLOC_FAIL,
NETDEV_A_QSTATS_RX_HW_DROPS,
NETDEV_A_QSTATS_RX_HW_DROP_OVERRUNS,
+ NETDEV_A_QSTATS_RX_CSUM_COMPLETE,
NETDEV_A_QSTATS_RX_CSUM_UNNECESSARY,
NETDEV_A_QSTATS_RX_CSUM_NONE,
NETDEV_A_QSTATS_RX_CSUM_BAD,
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
index aa4094ca2444..639894ed1b97 100644
--- a/include/uapi/linux/netfilter/nf_tables.h
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -1376,7 +1376,7 @@ enum nft_secmark_attributes {
#define NFTA_SECMARK_MAX (__NFTA_SECMARK_MAX - 1)
/* Max security context length */
-#define NFT_SECMARK_CTX_MAXLEN 256
+#define NFT_SECMARK_CTX_MAXLEN 4096
/**
* enum nft_reject_types - nf_tables reject expression reject types
diff --git a/include/uapi/linux/nfsd_netlink.h b/include/uapi/linux/nfsd_netlink.h
index 24c86dbc7ed5..887cbd12b695 100644
--- a/include/uapi/linux/nfsd_netlink.h
+++ b/include/uapi/linux/nfsd_netlink.h
@@ -71,6 +71,14 @@ enum {
};
enum {
+ NFSD_A_POOL_MODE_MODE = 1,
+ NFSD_A_POOL_MODE_NPOOLS,
+
+ __NFSD_A_POOL_MODE_MAX,
+ NFSD_A_POOL_MODE_MAX = (__NFSD_A_POOL_MODE_MAX - 1)
+};
+
+enum {
NFSD_CMD_RPC_STATUS_GET = 1,
NFSD_CMD_THREADS_SET,
NFSD_CMD_THREADS_GET,
@@ -78,6 +86,8 @@ enum {
NFSD_CMD_VERSION_GET,
NFSD_CMD_LISTENER_SET,
NFSD_CMD_LISTENER_GET,
+ NFSD_CMD_POOL_MODE_SET,
+ NFSD_CMD_POOL_MODE_GET,
__NFSD_CMD_MAX,
NFSD_CMD_MAX = (__NFSD_CMD_MAX - 1)
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index f917bc6c9b6f..f97f5adc8d51 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -2052,6 +2052,10 @@ enum nl80211_commands {
* @NL80211_ATTR_INTERFACE_COMBINATIONS: Nested attribute listing the supported
* interface combinations. In each nested item, it contains attributes
* defined in &enum nl80211_if_combination_attrs.
+ * If the wiphy uses multiple radios (@NL80211_ATTR_WIPHY_RADIOS is set),
+ * this attribute contains the interface combinations of the first radio.
+ * See @NL80211_ATTR_WIPHY_INTERFACE_COMBINATIONS for the global wiphy
+ * combinations for the sum of all radios.
* @NL80211_ATTR_SOFTWARE_IFTYPES: Nested attribute (just like
* %NL80211_ATTR_SUPPORTED_IFTYPES) containing the interface types that
* are managed in software: interfaces of these types aren't subject to
@@ -2856,6 +2860,14 @@ enum nl80211_commands {
* %NL80211_CMD_ASSOCIATE indicating the SPP A-MSDUs
* are used on this connection
*
+ * @NL80211_ATTR_WIPHY_RADIOS: Nested attribute describing physical radios
+ * belonging to this wiphy. See &enum nl80211_wiphy_radio_attrs.
+ *
+ * @NL80211_ATTR_WIPHY_INTERFACE_COMBINATIONS: Nested attribute listing the
+ * supported interface combinations for all radios combined. In each
+ * nested item, it contains attributes defined in
+ * &enum nl80211_if_combination_attrs.
+ *
* @NUM_NL80211_ATTR: total number of nl80211_attrs available
* @NL80211_ATTR_MAX: highest attribute number currently defined
* @__NL80211_ATTR_AFTER_LAST: internal use
@@ -3401,6 +3413,9 @@ enum nl80211_attrs {
NL80211_ATTR_ASSOC_SPP_AMSDU,
+ NL80211_ATTR_WIPHY_RADIOS,
+ NL80211_ATTR_WIPHY_INTERFACE_COMBINATIONS,
+
/* add attributes here, update the policy in nl80211.c */
__NL80211_ATTR_AFTER_LAST,
@@ -4277,6 +4292,8 @@ enum nl80211_wmm_rule {
* @NL80211_FREQUENCY_ATTR_CAN_MONITOR: This channel can be used in monitor
* mode despite other (regulatory) restrictions, even if the channel is
* otherwise completely disabled.
+ * @NL80211_FREQUENCY_ATTR_ALLOW_6GHZ_VLP_AP: This channel can be used for a
+ * very low power (VLP) AP, despite being NO_IR.
* @NL80211_FREQUENCY_ATTR_MAX: highest frequency attribute number
* currently defined
* @__NL80211_FREQUENCY_ATTR_AFTER_LAST: internal use
@@ -4320,6 +4337,7 @@ enum nl80211_frequency_attr {
NL80211_FREQUENCY_ATTR_NO_6GHZ_VLP_CLIENT,
NL80211_FREQUENCY_ATTR_NO_6GHZ_AFC_CLIENT,
NL80211_FREQUENCY_ATTR_CAN_MONITOR,
+ NL80211_FREQUENCY_ATTR_ALLOW_6GHZ_VLP_AP,
/* keep last */
__NL80211_FREQUENCY_ATTR_AFTER_LAST,
@@ -4529,6 +4547,8 @@ enum nl80211_sched_scan_match_attr {
* Should be used together with %NL80211_RRF_DFS only.
* @NL80211_RRF_NO_6GHZ_VLP_CLIENT: Client connection to VLP AP not allowed
* @NL80211_RRF_NO_6GHZ_AFC_CLIENT: Client connection to AFC AP not allowed
+ * @NL80211_RRF_ALLOW_6GHZ_VLP_AP: Very low power (VLP) AP can be permitted
+ * despite NO_IR configuration.
*/
enum nl80211_reg_rule_flags {
NL80211_RRF_NO_OFDM = 1<<0,
@@ -4553,6 +4573,7 @@ enum nl80211_reg_rule_flags {
NL80211_RRF_DFS_CONCURRENT = 1<<21,
NL80211_RRF_NO_6GHZ_VLP_CLIENT = 1<<22,
NL80211_RRF_NO_6GHZ_AFC_CLIENT = 1<<23,
+ NL80211_RRF_ALLOW_6GHZ_VLP_AP = 1<<24,
};
#define NL80211_RRF_PASSIVE_SCAN NL80211_RRF_NO_IR
@@ -7999,4 +8020,54 @@ enum nl80211_ap_settings_flags {
NL80211_AP_SETTINGS_SA_QUERY_OFFLOAD_SUPPORT = 1 << 1,
};
+/**
+ * enum nl80211_wiphy_radio_attrs - wiphy radio attributes
+ *
+ * @__NL80211_WIPHY_RADIO_ATTR_INVALID: Invalid
+ *
+ * @NL80211_WIPHY_RADIO_ATTR_INDEX: Index of this radio (u32)
+ * @NL80211_WIPHY_RADIO_ATTR_FREQ_RANGE: Frequency range supported by this
+ * radio. Attribute may be present multiple times.
+ * @NL80211_WIPHY_RADIO_ATTR_INTERFACE_COMBINATION: Supported interface
+ * combination for this radio. Attribute may be present multiple times
+ * and contains attributes defined in &enum nl80211_if_combination_attrs.
+ *
+ * @__NL80211_WIPHY_RADIO_ATTR_LAST: Internal
+ * @NL80211_WIPHY_RADIO_ATTR_MAX: Highest attribute
+ */
+enum nl80211_wiphy_radio_attrs {
+ __NL80211_WIPHY_RADIO_ATTR_INVALID,
+
+ NL80211_WIPHY_RADIO_ATTR_INDEX,
+ NL80211_WIPHY_RADIO_ATTR_FREQ_RANGE,
+ NL80211_WIPHY_RADIO_ATTR_INTERFACE_COMBINATION,
+
+ /* keep last */
+ __NL80211_WIPHY_RADIO_ATTR_LAST,
+ NL80211_WIPHY_RADIO_ATTR_MAX = __NL80211_WIPHY_RADIO_ATTR_LAST - 1,
+};
+
+/**
+ * enum nl80211_wiphy_radio_freq_range - wiphy radio frequency range
+ *
+ * @__NL80211_WIPHY_RADIO_FREQ_ATTR_INVALID: Invalid
+ *
+ * @NL80211_WIPHY_RADIO_FREQ_ATTR_START: Frequency range start (u32).
+ * The unit is kHz.
+ * @NL80211_WIPHY_RADIO_FREQ_ATTR_END: Frequency range end (u32).
+ * The unit is kHz.
+ *
+ * @__NL80211_WIPHY_RADIO_FREQ_ATTR_LAST: Internal
+ * @NL80211_WIPHY_RADIO_FREQ_ATTR_MAX: Highest attribute
+ */
+enum nl80211_wiphy_radio_freq_range {
+ __NL80211_WIPHY_RADIO_FREQ_ATTR_INVALID,
+
+ NL80211_WIPHY_RADIO_FREQ_ATTR_START,
+ NL80211_WIPHY_RADIO_FREQ_ATTR_END,
+
+ __NL80211_WIPHY_RADIO_FREQ_ATTR_LAST,
+ NL80211_WIPHY_RADIO_FREQ_ATTR_MAX = __NL80211_WIPHY_RADIO_FREQ_ATTR_LAST - 1,
+};
+
#endif /* __LINUX_NL80211_H */
diff --git a/include/uapi/linux/nsfs.h b/include/uapi/linux/nsfs.h
index a0c8552b64ee..b133211331f6 100644
--- a/include/uapi/linux/nsfs.h
+++ b/include/uapi/linux/nsfs.h
@@ -15,5 +15,15 @@
#define NS_GET_NSTYPE _IO(NSIO, 0x3)
/* Get owner UID (in the caller's user namespace) for a user namespace */
#define NS_GET_OWNER_UID _IO(NSIO, 0x4)
+/* Get the id for a mount namespace */
+#define NS_GET_MNTNS_ID _IO(NSIO, 0x5)
+/* Translate pid from target pid namespace into the caller's pid namespace. */
+#define NS_GET_PID_FROM_PIDNS _IOR(NSIO, 0x6, int)
+/* Return thread-group leader id of pid in the callers pid namespace. */
+#define NS_GET_TGID_FROM_PIDNS _IOR(NSIO, 0x7, int)
+/* Translate pid from caller's pid namespace into a target pid namespace. */
+#define NS_GET_PID_IN_PIDNS _IOR(NSIO, 0x8, int)
+/* Return thread-group leader id of pid in the target pid namespace. */
+#define NS_GET_TGID_IN_PIDNS _IOR(NSIO, 0x9, int)
#endif /* __LINUX_NSFS_H */
diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h
index efc82c318fa2..3a701bd1f31b 100644
--- a/include/uapi/linux/openvswitch.h
+++ b/include/uapi/linux/openvswitch.h
@@ -649,7 +649,8 @@ enum ovs_flow_attr {
* Actions are passed as nested attributes.
*
* Executes the specified actions with the given probability on a per-packet
- * basis.
+ * basis. Nested actions will be able to access the probability value of the
+ * parent @OVS_ACTION_ATTR_SAMPLE.
*/
enum ovs_sample_attr {
OVS_SAMPLE_ATTR_UNSPEC,
@@ -914,6 +915,31 @@ struct check_pkt_len_arg {
};
#endif
+#define OVS_PSAMPLE_COOKIE_MAX_SIZE 16
+/**
+ * enum ovs_psample_attr - Attributes for %OVS_ACTION_ATTR_PSAMPLE
+ * action.
+ *
+ * @OVS_PSAMPLE_ATTR_GROUP: 32-bit number to identify the source of the
+ * sample.
+ * @OVS_PSAMPLE_ATTR_COOKIE: An optional variable-length binary cookie that
+ * contains user-defined metadata. The maximum length is
+ * OVS_PSAMPLE_COOKIE_MAX_SIZE bytes.
+ *
+ * Sends the packet to the psample multicast group with the specified group and
+ * cookie. It is possible to combine this action with the
+ * %OVS_ACTION_ATTR_TRUNC action to limit the size of the sample.
+ */
+enum ovs_psample_attr {
+ OVS_PSAMPLE_ATTR_GROUP = 1, /* u32 number. */
+ OVS_PSAMPLE_ATTR_COOKIE, /* Optional, user specified cookie. */
+
+ /* private: */
+ __OVS_PSAMPLE_ATTR_MAX
+};
+
+#define OVS_PSAMPLE_ATTR_MAX (__OVS_PSAMPLE_ATTR_MAX - 1)
+
/**
* enum ovs_action_attr - Action types.
*
@@ -966,6 +992,8 @@ struct check_pkt_len_arg {
* of l3 tunnel flag in the tun_flags field of OVS_ACTION_ATTR_ADD_MPLS
* argument.
* @OVS_ACTION_ATTR_DROP: Explicit drop action.
+ * @OVS_ACTION_ATTR_PSAMPLE: Send a sample of the packet to external observers
+ * via psample.
*
* Only a single header can be set with a single %OVS_ACTION_ATTR_SET. Not all
* fields within a header are modifiable, e.g. the IPv4 protocol and fragment
@@ -1004,6 +1032,7 @@ enum ovs_action_attr {
OVS_ACTION_ATTR_ADD_MPLS, /* struct ovs_action_add_mpls. */
OVS_ACTION_ATTR_DEC_TTL, /* Nested OVS_DEC_TTL_ATTR_*. */
OVS_ACTION_ATTR_DROP, /* u32 error code. */
+ OVS_ACTION_ATTR_PSAMPLE, /* Nested OVS_PSAMPLE_ATTR_*. */
__OVS_ACTION_ATTR_MAX, /* Nothing past this will be accepted
* from userspace. */
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index 3a64499b0f5d..4842c36fdf80 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -1349,12 +1349,14 @@ union perf_mem_data_src {
#define PERF_MEM_LVLNUM_L2 0x02 /* L2 */
#define PERF_MEM_LVLNUM_L3 0x03 /* L3 */
#define PERF_MEM_LVLNUM_L4 0x04 /* L4 */
-/* 5-0x7 available */
+#define PERF_MEM_LVLNUM_L2_MHB 0x05 /* L2 Miss Handling Buffer */
+#define PERF_MEM_LVLNUM_MSC 0x06 /* Memory-side Cache */
+/* 0x7 available */
#define PERF_MEM_LVLNUM_UNC 0x08 /* Uncached */
#define PERF_MEM_LVLNUM_CXL 0x09 /* CXL */
#define PERF_MEM_LVLNUM_IO 0x0a /* I/O */
#define PERF_MEM_LVLNUM_ANY_CACHE 0x0b /* Any cache */
-#define PERF_MEM_LVLNUM_LFB 0x0c /* LFB */
+#define PERF_MEM_LVLNUM_LFB 0x0c /* LFB / L1 Miss Handling Buffer */
#define PERF_MEM_LVLNUM_RAM 0x0d /* RAM */
#define PERF_MEM_LVLNUM_PMEM 0x0e /* PMEM */
#define PERF_MEM_LVLNUM_NA 0x0f /* N/A */
diff --git a/include/uapi/linux/pidfd.h b/include/uapi/linux/pidfd.h
index 72ec000a97cd..565fc0629fff 100644
--- a/include/uapi/linux/pidfd.h
+++ b/include/uapi/linux/pidfd.h
@@ -5,6 +5,7 @@
#include <linux/types.h>
#include <linux/fcntl.h>
+#include <linux/ioctl.h>
/* Flags for pidfd_open(). */
#define PIDFD_NONBLOCK O_NONBLOCK
@@ -15,4 +16,17 @@
#define PIDFD_SIGNAL_THREAD_GROUP (1UL << 1)
#define PIDFD_SIGNAL_PROCESS_GROUP (1UL << 2)
+#define PIDFS_IOCTL_MAGIC 0xFF
+
+#define PIDFD_GET_CGROUP_NAMESPACE _IO(PIDFS_IOCTL_MAGIC, 1)
+#define PIDFD_GET_IPC_NAMESPACE _IO(PIDFS_IOCTL_MAGIC, 2)
+#define PIDFD_GET_MNT_NAMESPACE _IO(PIDFS_IOCTL_MAGIC, 3)
+#define PIDFD_GET_NET_NAMESPACE _IO(PIDFS_IOCTL_MAGIC, 4)
+#define PIDFD_GET_PID_NAMESPACE _IO(PIDFS_IOCTL_MAGIC, 5)
+#define PIDFD_GET_PID_FOR_CHILDREN_NAMESPACE _IO(PIDFS_IOCTL_MAGIC, 6)
+#define PIDFD_GET_TIME_NAMESPACE _IO(PIDFS_IOCTL_MAGIC, 7)
+#define PIDFD_GET_TIME_FOR_CHILDREN_NAMESPACE _IO(PIDFS_IOCTL_MAGIC, 8)
+#define PIDFD_GET_USER_NAMESPACE _IO(PIDFS_IOCTL_MAGIC, 9)
+#define PIDFD_GET_UTS_NAMESPACE _IO(PIDFS_IOCTL_MAGIC, 10)
+
#endif /* _UAPI_LINUX_PIDFD_H */
diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h
index 229fc925ec3a..d36d9cdf0c00 100644
--- a/include/uapi/linux/pkt_cls.h
+++ b/include/uapi/linux/pkt_cls.h
@@ -554,6 +554,9 @@ enum {
TCA_FLOWER_KEY_SPI, /* be32 */
TCA_FLOWER_KEY_SPI_MASK, /* be32 */
+ TCA_FLOWER_KEY_ENC_FLAGS, /* be32 */
+ TCA_FLOWER_KEY_ENC_FLAGS_MASK, /* be32 */
+
__TCA_FLOWER_MAX,
};
@@ -674,8 +677,15 @@ enum {
enum {
TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT = (1 << 0),
TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST = (1 << 1),
+ TCA_FLOWER_KEY_FLAGS_TUNNEL_CSUM = (1 << 2),
+ TCA_FLOWER_KEY_FLAGS_TUNNEL_DONT_FRAGMENT = (1 << 3),
+ TCA_FLOWER_KEY_FLAGS_TUNNEL_OAM = (1 << 4),
+ TCA_FLOWER_KEY_FLAGS_TUNNEL_CRIT_OPT = (1 << 5),
+ __TCA_FLOWER_KEY_FLAGS_MAX,
};
+#define TCA_FLOWER_KEY_FLAGS_MAX (__TCA_FLOWER_KEY_FLAGS_MAX - 1)
+
enum {
TCA_FLOWER_KEY_CFM_OPT_UNSPEC,
TCA_FLOWER_KEY_CFM_MD_LEVEL,
diff --git a/include/uapi/linux/psample.h b/include/uapi/linux/psample.h
index e585db5bf2d2..b765f0e81f20 100644
--- a/include/uapi/linux/psample.h
+++ b/include/uapi/linux/psample.h
@@ -8,7 +8,11 @@ enum {
PSAMPLE_ATTR_ORIGSIZE,
PSAMPLE_ATTR_SAMPLE_GROUP,
PSAMPLE_ATTR_GROUP_SEQ,
- PSAMPLE_ATTR_SAMPLE_RATE,
+ PSAMPLE_ATTR_SAMPLE_RATE, /* u32, ratio between observed and
+ * sampled packets or scaled probability
+ * if PSAMPLE_ATTR_SAMPLE_PROBABILITY
+ * is set.
+ */
PSAMPLE_ATTR_DATA,
PSAMPLE_ATTR_GROUP_REFCOUNT,
PSAMPLE_ATTR_TUNNEL,
@@ -19,6 +23,11 @@ enum {
PSAMPLE_ATTR_LATENCY, /* u64, nanoseconds */
PSAMPLE_ATTR_TIMESTAMP, /* u64, nanoseconds */
PSAMPLE_ATTR_PROTO, /* u16 */
+ PSAMPLE_ATTR_USER_COOKIE, /* binary, user provided data */
+ PSAMPLE_ATTR_SAMPLE_PROBABILITY,/* no argument, interpret rate in
+ * PSAMPLE_ATTR_SAMPLE_RATE as a
+ * probability scaled 0 - U32_MAX.
+ */
__PSAMPLE_ATTR_MAX
};
diff --git a/include/uapi/linux/stat.h b/include/uapi/linux/stat.h
index 67626d535316..887a25286441 100644
--- a/include/uapi/linux/stat.h
+++ b/include/uapi/linux/stat.h
@@ -126,9 +126,15 @@ struct statx {
__u64 stx_mnt_id;
__u32 stx_dio_mem_align; /* Memory buffer alignment for direct I/O */
__u32 stx_dio_offset_align; /* File offset alignment for direct I/O */
- __u64 stx_subvol; /* Subvolume identifier */
/* 0xa0 */
- __u64 __spare3[11]; /* Spare space for future expansion */
+ __u64 stx_subvol; /* Subvolume identifier */
+ __u32 stx_atomic_write_unit_min; /* Min atomic write unit in bytes */
+ __u32 stx_atomic_write_unit_max; /* Max atomic write unit in bytes */
+ /* 0xb0 */
+ __u32 stx_atomic_write_segments_max; /* Max atomic write segment count */
+ __u32 __spare1[1];
+ /* 0xb8 */
+ __u64 __spare3[9]; /* Spare space for future expansion */
/* 0x100 */
};
@@ -157,6 +163,7 @@ struct statx {
#define STATX_DIOALIGN 0x00002000U /* Want/got direct I/O alignment info */
#define STATX_MNT_ID_UNIQUE 0x00004000U /* Want/got extended stx_mount_id */
#define STATX_SUBVOL 0x00008000U /* Want/got stx_subvol */
+#define STATX_WRITE_ATOMIC 0x00010000U /* Want/got atomic_write_* fields */
#define STATX__RESERVED 0x80000000U /* Reserved for future struct statx expansion */
@@ -192,6 +199,7 @@ struct statx {
#define STATX_ATTR_MOUNT_ROOT 0x00002000 /* Root of a mount */
#define STATX_ATTR_VERITY 0x00100000 /* [I] Verity protected file */
#define STATX_ATTR_DAX 0x00200000 /* File is currently in DAX state */
+#define STATX_ATTR_WRITE_ATOMIC 0x00400000 /* File supports atomic write operations */
#endif /* _UAPI_LINUX_STAT_H */
diff --git a/include/uapi/linux/tcp_metrics.h b/include/uapi/linux/tcp_metrics.h
index 7cb4a172feed..927c735a5b0e 100644
--- a/include/uapi/linux/tcp_metrics.h
+++ b/include/uapi/linux/tcp_metrics.h
@@ -1,8 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/* tcp_metrics.h - TCP Metrics Interface */
-#ifndef _LINUX_TCP_METRICS_H
-#define _LINUX_TCP_METRICS_H
+#ifndef _UAPI_LINUX_TCP_METRICS_H
+#define _UAPI_LINUX_TCP_METRICS_H
#include <linux/types.h>
@@ -27,6 +27,22 @@ enum tcp_metric_index {
#define TCP_METRIC_MAX (__TCP_METRIC_MAX - 1)
+/* Re-define enum tcp_metric_index, again, using the values carried
+ * as netlink attribute types.
+ */
+enum {
+ TCP_METRICS_A_METRICS_RTT = 1,
+ TCP_METRICS_A_METRICS_RTTVAR,
+ TCP_METRICS_A_METRICS_SSTHRESH,
+ TCP_METRICS_A_METRICS_CWND,
+ TCP_METRICS_A_METRICS_REODERING,
+ TCP_METRICS_A_METRICS_RTT_US,
+ TCP_METRICS_A_METRICS_RTTVAR_US,
+
+ __TCP_METRICS_A_METRICS_MAX
+};
+#define TCP_METRICS_A_METRICS_MAX (__TCP_METRICS_A_METRICS_MAX - 1)
+
enum {
TCP_METRICS_ATTR_UNSPEC,
TCP_METRICS_ATTR_ADDR_IPV4, /* u32 */
@@ -58,4 +74,4 @@ enum {
#define TCP_METRICS_CMD_MAX (__TCP_METRICS_CMD_MAX - 1)
-#endif /* _LINUX_TCP_METRICS_H */
+#endif /* _UAPI_LINUX_TCP_METRICS_H */
diff --git a/include/uapi/linux/trace_mmap.h b/include/uapi/linux/trace_mmap.h
index bd1066754220..c102ef35d11e 100644
--- a/include/uapi/linux/trace_mmap.h
+++ b/include/uapi/linux/trace_mmap.h
@@ -43,6 +43,6 @@ struct trace_buffer_meta {
__u64 Reserved2;
};
-#define TRACE_MMAP_IOCTL_GET_READER _IO('T', 0x1)
+#define TRACE_MMAP_IOCTL_GET_READER _IO('R', 0x20)
#endif /* _TRACE_MMAP_H_ */
diff --git a/include/uapi/linux/xfrm.h b/include/uapi/linux/xfrm.h
index d950d02ab791..f28701500714 100644
--- a/include/uapi/linux/xfrm.h
+++ b/include/uapi/linux/xfrm.h
@@ -321,6 +321,7 @@ enum xfrm_attr_type_t {
XFRMA_IF_ID, /* __u32 */
XFRMA_MTIMER_THRESH, /* __u32 in seconds for input SA */
XFRMA_SA_DIR, /* __u8 */
+ XFRMA_NAT_KEEPALIVE_INTERVAL, /* __u32 in seconds for NAT keepalive */
__XFRMA_MAX
#define XFRMA_OUTPUT_MARK XFRMA_SET_MARK /* Compatibility */
diff --git a/include/uapi/linux/zorro_ids.h b/include/uapi/linux/zorro_ids.h
index 6e574d7b7d79..393f2ee9c042 100644
--- a/include/uapi/linux/zorro_ids.h
+++ b/include/uapi/linux/zorro_ids.h
@@ -449,6 +449,9 @@
#define ZORRO_PROD_VMC_ISDN_BLASTER_Z2 ZORRO_ID(VMC, 0x01, 0)
#define ZORRO_PROD_VMC_HYPERCOM_4 ZORRO_ID(VMC, 0x02, 0)
+#define ZORRO_MANUF_CSLAB 0x1400
+#define ZORRO_PROD_CSLAB_WARP_1260 ZORRO_ID(CSLAB, 0x65, 0)
+
#define ZORRO_MANUF_INFORMATION 0x157C
#define ZORRO_PROD_INFORMATION_ISDN_ENGINE_I ZORRO_ID(INFORMATION, 0x64, 0)
diff --git a/include/uapi/misc/fastrpc.h b/include/uapi/misc/fastrpc.h
index f33d914d8f46..91583690bddc 100644
--- a/include/uapi/misc/fastrpc.h
+++ b/include/uapi/misc/fastrpc.h
@@ -8,11 +8,14 @@
#define FASTRPC_IOCTL_ALLOC_DMA_BUFF _IOWR('R', 1, struct fastrpc_alloc_dma_buf)
#define FASTRPC_IOCTL_FREE_DMA_BUFF _IOWR('R', 2, __u32)
#define FASTRPC_IOCTL_INVOKE _IOWR('R', 3, struct fastrpc_invoke)
+/* This ioctl is only supported with secure device nodes */
#define FASTRPC_IOCTL_INIT_ATTACH _IO('R', 4)
#define FASTRPC_IOCTL_INIT_CREATE _IOWR('R', 5, struct fastrpc_init_create)
#define FASTRPC_IOCTL_MMAP _IOWR('R', 6, struct fastrpc_req_mmap)
#define FASTRPC_IOCTL_MUNMAP _IOWR('R', 7, struct fastrpc_req_munmap)
+/* This ioctl is only supported with secure device nodes */
#define FASTRPC_IOCTL_INIT_ATTACH_SNS _IO('R', 8)
+/* This ioctl is only supported with secure device nodes */
#define FASTRPC_IOCTL_INIT_CREATE_STATIC _IOWR('R', 9, struct fastrpc_init_create_static)
#define FASTRPC_IOCTL_MEM_MAP _IOWR('R', 10, struct fastrpc_mem_map)
#define FASTRPC_IOCTL_MEM_UNMAP _IOWR('R', 11, struct fastrpc_mem_unmap)
diff --git a/include/vdso/datapage.h b/include/vdso/datapage.h
index d04d394db064..7647e0946f50 100644
--- a/include/vdso/datapage.h
+++ b/include/vdso/datapage.h
@@ -77,6 +77,10 @@ struct vdso_timestamp {
* vdso_data will be accessed by 64 bit and compat code at the same time
* so we should be careful before modifying this structure.
*
+ * The ordering of the struct members is optimized to have fast access to the
+ * often required struct members which are related to CLOCK_REALTIME and
+ * CLOCK_MONOTONIC. This information is stored in the first cache lines.
+ *
* @basetime is used to store the base time for the system wide time getter
* VVAR page.
*
diff --git a/include/xen/events.h b/include/xen/events.h
index 3b07409f8032..de5da58a0205 100644
--- a/include/xen/events.h
+++ b/include/xen/events.h
@@ -144,4 +144,6 @@ static inline void xen_evtchn_close(evtchn_port_t port)
BUG();
}
+extern bool xen_fifo_events;
+
#endif /* _XEN_EVENTS_H */
diff --git a/init/Kconfig b/init/Kconfig
index 72404c1f2157..febdea2afc3b 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -883,7 +883,7 @@ config GCC10_NO_ARRAY_BOUNDS
config CC_NO_ARRAY_BOUNDS
bool
- default y if CC_IS_GCC && GCC_VERSION >= 100000 && GCC10_NO_ARRAY_BOUNDS
+ default y if CC_IS_GCC && GCC_VERSION >= 90000 && GCC10_NO_ARRAY_BOUNDS
# Currently, disable -Wstringop-overflow for GCC globally.
config GCC_NO_STRINGOP_OVERFLOW
diff --git a/io_uring/Makefile b/io_uring/Makefile
index fc1b23c524e8..61923e11c767 100644
--- a/io_uring/Makefile
+++ b/io_uring/Makefile
@@ -4,9 +4,9 @@
obj-$(CONFIG_IO_URING) += io_uring.o opdef.o kbuf.o rsrc.o notif.o \
tctx.o filetable.o rw.o net.o poll.o \
- uring_cmd.o openclose.o sqpoll.o \
- xattr.o nop.o fs.o splice.o sync.o \
- msg_ring.o advise.o openclose.o \
+ eventfd.o uring_cmd.o openclose.o \
+ sqpoll.o xattr.o nop.o fs.o splice.o \
+ sync.o msg_ring.o advise.o openclose.o \
epoll.o statx.o timeout.o fdinfo.o \
cancel.o waitid.o register.o \
truncate.o memmap.o
diff --git a/io_uring/advise.c b/io_uring/advise.c
index 7085804c513c..cb7b881665e5 100644
--- a/io_uring/advise.c
+++ b/io_uring/advise.c
@@ -17,14 +17,14 @@
struct io_fadvise {
struct file *file;
u64 offset;
- u32 len;
+ u64 len;
u32 advice;
};
struct io_madvise {
struct file *file;
u64 addr;
- u32 len;
+ u64 len;
u32 advice;
};
@@ -33,11 +33,13 @@ int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
struct io_madvise *ma = io_kiocb_to_cmd(req, struct io_madvise);
- if (sqe->buf_index || sqe->off || sqe->splice_fd_in)
+ if (sqe->buf_index || sqe->splice_fd_in)
return -EINVAL;
ma->addr = READ_ONCE(sqe->addr);
- ma->len = READ_ONCE(sqe->len);
+ ma->len = READ_ONCE(sqe->off);
+ if (!ma->len)
+ ma->len = READ_ONCE(sqe->len);
ma->advice = READ_ONCE(sqe->fadvise_advice);
req->flags |= REQ_F_FORCE_ASYNC;
return 0;
@@ -78,11 +80,13 @@ int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
struct io_fadvise *fa = io_kiocb_to_cmd(req, struct io_fadvise);
- if (sqe->buf_index || sqe->addr || sqe->splice_fd_in)
+ if (sqe->buf_index || sqe->splice_fd_in)
return -EINVAL;
fa->offset = READ_ONCE(sqe->off);
- fa->len = READ_ONCE(sqe->len);
+ fa->len = READ_ONCE(sqe->addr);
+ if (!fa->len)
+ fa->len = READ_ONCE(sqe->len);
fa->advice = READ_ONCE(sqe->fadvise_advice);
if (io_fadvise_force_async(fa))
req->flags |= REQ_F_FORCE_ASYNC;
diff --git a/io_uring/cancel.h b/io_uring/cancel.h
index 76b32e65c03c..b33995e00ba9 100644
--- a/io_uring/cancel.h
+++ b/io_uring/cancel.h
@@ -27,10 +27,10 @@ bool io_cancel_req_match(struct io_kiocb *req, struct io_cancel_data *cd);
static inline bool io_cancel_match_sequence(struct io_kiocb *req, int sequence)
{
- if ((req->flags & REQ_F_CANCEL_SEQ) && sequence == req->work.cancel_seq)
+ if (req->cancel_seq_set && sequence == req->work.cancel_seq)
return true;
- req->flags |= REQ_F_CANCEL_SEQ;
+ req->cancel_seq_set = true;
req->work.cancel_seq = sequence;
return false;
}
diff --git a/io_uring/eventfd.c b/io_uring/eventfd.c
new file mode 100644
index 000000000000..b9384503a2b7
--- /dev/null
+++ b/io_uring/eventfd.c
@@ -0,0 +1,160 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/eventfd.h>
+#include <linux/eventpoll.h>
+#include <linux/io_uring.h>
+#include <linux/io_uring_types.h>
+
+#include "io-wq.h"
+#include "eventfd.h"
+
+struct io_ev_fd {
+ struct eventfd_ctx *cq_ev_fd;
+ unsigned int eventfd_async: 1;
+ struct rcu_head rcu;
+ atomic_t refs;
+ atomic_t ops;
+};
+
+enum {
+ IO_EVENTFD_OP_SIGNAL_BIT,
+};
+
+static void io_eventfd_free(struct rcu_head *rcu)
+{
+ struct io_ev_fd *ev_fd = container_of(rcu, struct io_ev_fd, rcu);
+
+ eventfd_ctx_put(ev_fd->cq_ev_fd);
+ kfree(ev_fd);
+}
+
+static void io_eventfd_do_signal(struct rcu_head *rcu)
+{
+ struct io_ev_fd *ev_fd = container_of(rcu, struct io_ev_fd, rcu);
+
+ eventfd_signal_mask(ev_fd->cq_ev_fd, EPOLL_URING_WAKE);
+
+ if (atomic_dec_and_test(&ev_fd->refs))
+ io_eventfd_free(rcu);
+}
+
+void io_eventfd_signal(struct io_ring_ctx *ctx)
+{
+ struct io_ev_fd *ev_fd = NULL;
+
+ if (READ_ONCE(ctx->rings->cq_flags) & IORING_CQ_EVENTFD_DISABLED)
+ return;
+
+ guard(rcu)();
+
+ /*
+ * rcu_dereference ctx->io_ev_fd once and use it for both for checking
+ * and eventfd_signal
+ */
+ ev_fd = rcu_dereference(ctx->io_ev_fd);
+
+ /*
+ * Check again if ev_fd exists incase an io_eventfd_unregister call
+ * completed between the NULL check of ctx->io_ev_fd at the start of
+ * the function and rcu_read_lock.
+ */
+ if (unlikely(!ev_fd))
+ return;
+ if (!atomic_inc_not_zero(&ev_fd->refs))
+ return;
+ if (ev_fd->eventfd_async && !io_wq_current_is_worker())
+ goto out;
+
+ if (likely(eventfd_signal_allowed())) {
+ eventfd_signal_mask(ev_fd->cq_ev_fd, EPOLL_URING_WAKE);
+ } else {
+ if (!atomic_fetch_or(BIT(IO_EVENTFD_OP_SIGNAL_BIT), &ev_fd->ops)) {
+ call_rcu_hurry(&ev_fd->rcu, io_eventfd_do_signal);
+ return;
+ }
+ }
+out:
+ if (atomic_dec_and_test(&ev_fd->refs))
+ call_rcu(&ev_fd->rcu, io_eventfd_free);
+}
+
+void io_eventfd_flush_signal(struct io_ring_ctx *ctx)
+{
+ bool skip;
+
+ spin_lock(&ctx->completion_lock);
+
+ /*
+ * Eventfd should only get triggered when at least one event has been
+ * posted. Some applications rely on the eventfd notification count
+ * only changing IFF a new CQE has been added to the CQ ring. There's
+ * no depedency on 1:1 relationship between how many times this
+ * function is called (and hence the eventfd count) and number of CQEs
+ * posted to the CQ ring.
+ */
+ skip = ctx->cached_cq_tail == ctx->evfd_last_cq_tail;
+ ctx->evfd_last_cq_tail = ctx->cached_cq_tail;
+ spin_unlock(&ctx->completion_lock);
+ if (skip)
+ return;
+
+ io_eventfd_signal(ctx);
+}
+
+int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg,
+ unsigned int eventfd_async)
+{
+ struct io_ev_fd *ev_fd;
+ __s32 __user *fds = arg;
+ int fd;
+
+ ev_fd = rcu_dereference_protected(ctx->io_ev_fd,
+ lockdep_is_held(&ctx->uring_lock));
+ if (ev_fd)
+ return -EBUSY;
+
+ if (copy_from_user(&fd, fds, sizeof(*fds)))
+ return -EFAULT;
+
+ ev_fd = kmalloc(sizeof(*ev_fd), GFP_KERNEL);
+ if (!ev_fd)
+ return -ENOMEM;
+
+ ev_fd->cq_ev_fd = eventfd_ctx_fdget(fd);
+ if (IS_ERR(ev_fd->cq_ev_fd)) {
+ int ret = PTR_ERR(ev_fd->cq_ev_fd);
+ kfree(ev_fd);
+ return ret;
+ }
+
+ spin_lock(&ctx->completion_lock);
+ ctx->evfd_last_cq_tail = ctx->cached_cq_tail;
+ spin_unlock(&ctx->completion_lock);
+
+ ev_fd->eventfd_async = eventfd_async;
+ ctx->has_evfd = true;
+ atomic_set(&ev_fd->refs, 1);
+ atomic_set(&ev_fd->ops, 0);
+ rcu_assign_pointer(ctx->io_ev_fd, ev_fd);
+ return 0;
+}
+
+int io_eventfd_unregister(struct io_ring_ctx *ctx)
+{
+ struct io_ev_fd *ev_fd;
+
+ ev_fd = rcu_dereference_protected(ctx->io_ev_fd,
+ lockdep_is_held(&ctx->uring_lock));
+ if (ev_fd) {
+ ctx->has_evfd = false;
+ rcu_assign_pointer(ctx->io_ev_fd, NULL);
+ if (atomic_dec_and_test(&ev_fd->refs))
+ call_rcu(&ev_fd->rcu, io_eventfd_free);
+ return 0;
+ }
+
+ return -ENXIO;
+}
diff --git a/io_uring/eventfd.h b/io_uring/eventfd.h
new file mode 100644
index 000000000000..d394f49c6321
--- /dev/null
+++ b/io_uring/eventfd.h
@@ -0,0 +1,8 @@
+
+struct io_ring_ctx;
+int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg,
+ unsigned int eventfd_async);
+int io_eventfd_unregister(struct io_ring_ctx *ctx);
+
+void io_eventfd_flush_signal(struct io_ring_ctx *ctx);
+void io_eventfd_signal(struct io_ring_ctx *ctx);
diff --git a/io_uring/io-wq.c b/io_uring/io-wq.c
index d1c47a9d9215..f1e7c670add8 100644
--- a/io_uring/io-wq.c
+++ b/io_uring/io-wq.c
@@ -23,6 +23,7 @@
#include "io_uring.h"
#define WORKER_IDLE_TIMEOUT (5 * HZ)
+#define WORKER_INIT_LIMIT 3
enum {
IO_WORKER_F_UP = 0, /* up and active */
@@ -58,6 +59,7 @@ struct io_worker {
unsigned long create_state;
struct callback_head create_work;
+ int init_retries;
union {
struct rcu_head rcu;
@@ -159,7 +161,7 @@ static inline struct io_wq_acct *io_get_acct(struct io_wq *wq, bool bound)
static inline struct io_wq_acct *io_work_get_acct(struct io_wq *wq,
struct io_wq_work *work)
{
- return io_get_acct(wq, !(work->flags & IO_WQ_WORK_UNBOUND));
+ return io_get_acct(wq, !(atomic_read(&work->flags) & IO_WQ_WORK_UNBOUND));
}
static inline struct io_wq_acct *io_wq_get_acct(struct io_worker *worker)
@@ -451,7 +453,7 @@ static void __io_worker_idle(struct io_wq *wq, struct io_worker *worker)
static inline unsigned int io_get_work_hash(struct io_wq_work *work)
{
- return work->flags >> IO_WQ_HASH_SHIFT;
+ return atomic_read(&work->flags) >> IO_WQ_HASH_SHIFT;
}
static bool io_wait_on_hash(struct io_wq *wq, unsigned int hash)
@@ -592,8 +594,9 @@ static void io_worker_handle_work(struct io_wq_acct *acct,
next_hashed = wq_next_work(work);
- if (unlikely(do_kill) && (work->flags & IO_WQ_WORK_UNBOUND))
- work->flags |= IO_WQ_WORK_CANCEL;
+ if (do_kill &&
+ (atomic_read(&work->flags) & IO_WQ_WORK_UNBOUND))
+ atomic_or(IO_WQ_WORK_CANCEL, &work->flags);
wq->do_work(work);
io_assign_current_work(worker, NULL);
@@ -744,7 +747,7 @@ static bool io_wq_work_match_all(struct io_wq_work *work, void *data)
return true;
}
-static inline bool io_should_retry_thread(long err)
+static inline bool io_should_retry_thread(struct io_worker *worker, long err)
{
/*
* Prevent perpetual task_work retry, if the task (or its group) is
@@ -752,6 +755,8 @@ static inline bool io_should_retry_thread(long err)
*/
if (fatal_signal_pending(current))
return false;
+ if (worker->init_retries++ >= WORKER_INIT_LIMIT)
+ return false;
switch (err) {
case -EAGAIN:
@@ -778,7 +783,7 @@ static void create_worker_cont(struct callback_head *cb)
io_init_new_worker(wq, worker, tsk);
io_worker_release(worker);
return;
- } else if (!io_should_retry_thread(PTR_ERR(tsk))) {
+ } else if (!io_should_retry_thread(worker, PTR_ERR(tsk))) {
struct io_wq_acct *acct = io_wq_get_acct(worker);
atomic_dec(&acct->nr_running);
@@ -845,7 +850,7 @@ fail:
tsk = create_io_thread(io_wq_worker, worker, NUMA_NO_NODE);
if (!IS_ERR(tsk)) {
io_init_new_worker(wq, worker, tsk);
- } else if (!io_should_retry_thread(PTR_ERR(tsk))) {
+ } else if (!io_should_retry_thread(worker, PTR_ERR(tsk))) {
kfree(worker);
goto fail;
} else {
@@ -891,7 +896,7 @@ static bool io_wq_worker_wake(struct io_worker *worker, void *data)
static void io_run_cancel(struct io_wq_work *work, struct io_wq *wq)
{
do {
- work->flags |= IO_WQ_WORK_CANCEL;
+ atomic_or(IO_WQ_WORK_CANCEL, &work->flags);
wq->do_work(work);
work = wq->free_work(work);
} while (work);
@@ -926,8 +931,12 @@ static bool io_wq_work_match_item(struct io_wq_work *work, void *data)
void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work)
{
struct io_wq_acct *acct = io_work_get_acct(wq, work);
- unsigned long work_flags = work->flags;
- struct io_cb_cancel_data match;
+ unsigned int work_flags = atomic_read(&work->flags);
+ struct io_cb_cancel_data match = {
+ .fn = io_wq_work_match_item,
+ .data = work,
+ .cancel_all = false,
+ };
bool do_create;
/*
@@ -935,7 +944,7 @@ void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work)
* been marked as one that should not get executed, cancel it here.
*/
if (test_bit(IO_WQ_BIT_EXIT, &wq->state) ||
- (work->flags & IO_WQ_WORK_CANCEL)) {
+ (work_flags & IO_WQ_WORK_CANCEL)) {
io_run_cancel(work, wq);
return;
}
@@ -965,10 +974,6 @@ void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work)
raw_spin_unlock(&wq->lock);
/* fatal condition, failed to create the first worker */
- match.fn = io_wq_work_match_item,
- match.data = work,
- match.cancel_all = false,
-
io_acct_cancel_pending_work(wq, acct, &match);
}
}
@@ -982,7 +987,7 @@ void io_wq_hash_work(struct io_wq_work *work, void *val)
unsigned int bit;
bit = hash_ptr(val, IO_WQ_HASH_ORDER);
- work->flags |= (IO_WQ_WORK_HASHED | (bit << IO_WQ_HASH_SHIFT));
+ atomic_or(IO_WQ_WORK_HASHED | (bit << IO_WQ_HASH_SHIFT), &work->flags);
}
static bool __io_wq_worker_cancel(struct io_worker *worker,
@@ -990,7 +995,7 @@ static bool __io_wq_worker_cancel(struct io_worker *worker,
struct io_wq_work *work)
{
if (work && match->fn(work, match->data)) {
- work->flags |= IO_WQ_WORK_CANCEL;
+ atomic_or(IO_WQ_WORK_CANCEL, &work->flags);
__set_notify_signal(worker->task);
return true;
}
diff --git a/io_uring/io-wq.h b/io_uring/io-wq.h
index 2b2a6406dd8e..b3b004a7b625 100644
--- a/io_uring/io-wq.h
+++ b/io_uring/io-wq.h
@@ -56,7 +56,7 @@ bool io_wq_worker_stopped(void);
static inline bool io_wq_is_hashed(struct io_wq_work *work)
{
- return work->flags & IO_WQ_WORK_HASHED;
+ return atomic_read(&work->flags) & IO_WQ_WORK_HASHED;
}
typedef bool (work_cancel_fn)(struct io_wq_work *, void *);
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 816e93e7f949..8e6faa942a6f 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -95,12 +95,14 @@
#include "futex.h"
#include "napi.h"
#include "uring_cmd.h"
+#include "msg_ring.h"
#include "memmap.h"
#include "timeout.h"
#include "poll.h"
#include "rw.h"
#include "alloc_cache.h"
+#include "eventfd.h"
#define IORING_MAX_ENTRIES 32768
#define IORING_MAX_CQ_ENTRIES (2 * IORING_MAX_ENTRIES)
@@ -314,6 +316,9 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
sizeof(struct io_async_rw));
ret |= io_alloc_cache_init(&ctx->uring_cache, IO_ALLOC_CACHE_MAX,
sizeof(struct uring_cache));
+ spin_lock_init(&ctx->msg_lock);
+ ret |= io_alloc_cache_init(&ctx->msg_cache, IO_ALLOC_CACHE_MAX,
+ sizeof(struct io_kiocb));
ret |= io_futex_cache_init(ctx);
if (ret)
goto err;
@@ -350,6 +355,7 @@ err:
io_alloc_cache_free(&ctx->netmsg_cache, io_netmsg_cache_free);
io_alloc_cache_free(&ctx->rw_cache, io_rw_cache_free);
io_alloc_cache_free(&ctx->uring_cache, kfree);
+ io_alloc_cache_free(&ctx->msg_cache, io_msg_cache_free);
io_futex_cache_free(ctx);
kfree(ctx->cancel_table.hbs);
kfree(ctx->cancel_table_locked.hbs);
@@ -461,9 +467,9 @@ static void io_prep_async_work(struct io_kiocb *req)
}
req->work.list.next = NULL;
- req->work.flags = 0;
+ atomic_set(&req->work.flags, 0);
if (req->flags & REQ_F_FORCE_ASYNC)
- req->work.flags |= IO_WQ_WORK_CONCURRENT;
+ atomic_or(IO_WQ_WORK_CONCURRENT, &req->work.flags);
if (req->file && !(req->flags & REQ_F_FIXED_FILE))
req->flags |= io_file_get_flags(req->file);
@@ -479,7 +485,7 @@ static void io_prep_async_work(struct io_kiocb *req)
io_wq_hash_work(&req->work, file_inode(req->file));
} else if (!req->file || !S_ISBLK(file_inode(req->file)->i_mode)) {
if (def->unbound_nonreg_file)
- req->work.flags |= IO_WQ_WORK_UNBOUND;
+ atomic_or(IO_WQ_WORK_UNBOUND, &req->work.flags);
}
}
@@ -519,7 +525,7 @@ static void io_queue_iowq(struct io_kiocb *req)
* worker for it).
*/
if (WARN_ON_ONCE(!same_thread_group(req->task, current)))
- req->work.flags |= IO_WQ_WORK_CANCEL;
+ atomic_or(IO_WQ_WORK_CANCEL, &req->work.flags);
trace_io_uring_queue_async_work(req, io_wq_is_hashed(&req->work));
io_wq_enqueue(tctx->io_wq, &req->work);
@@ -541,84 +547,6 @@ static __cold void io_queue_deferred(struct io_ring_ctx *ctx)
}
}
-void io_eventfd_ops(struct rcu_head *rcu)
-{
- struct io_ev_fd *ev_fd = container_of(rcu, struct io_ev_fd, rcu);
- int ops = atomic_xchg(&ev_fd->ops, 0);
-
- if (ops & BIT(IO_EVENTFD_OP_SIGNAL_BIT))
- eventfd_signal_mask(ev_fd->cq_ev_fd, EPOLL_URING_WAKE);
-
- /* IO_EVENTFD_OP_FREE_BIT may not be set here depending on callback
- * ordering in a race but if references are 0 we know we have to free
- * it regardless.
- */
- if (atomic_dec_and_test(&ev_fd->refs)) {
- eventfd_ctx_put(ev_fd->cq_ev_fd);
- kfree(ev_fd);
- }
-}
-
-static void io_eventfd_signal(struct io_ring_ctx *ctx)
-{
- struct io_ev_fd *ev_fd = NULL;
-
- rcu_read_lock();
- /*
- * rcu_dereference ctx->io_ev_fd once and use it for both for checking
- * and eventfd_signal
- */
- ev_fd = rcu_dereference(ctx->io_ev_fd);
-
- /*
- * Check again if ev_fd exists incase an io_eventfd_unregister call
- * completed between the NULL check of ctx->io_ev_fd at the start of
- * the function and rcu_read_lock.
- */
- if (unlikely(!ev_fd))
- goto out;
- if (READ_ONCE(ctx->rings->cq_flags) & IORING_CQ_EVENTFD_DISABLED)
- goto out;
- if (ev_fd->eventfd_async && !io_wq_current_is_worker())
- goto out;
-
- if (likely(eventfd_signal_allowed())) {
- eventfd_signal_mask(ev_fd->cq_ev_fd, EPOLL_URING_WAKE);
- } else {
- atomic_inc(&ev_fd->refs);
- if (!atomic_fetch_or(BIT(IO_EVENTFD_OP_SIGNAL_BIT), &ev_fd->ops))
- call_rcu_hurry(&ev_fd->rcu, io_eventfd_ops);
- else
- atomic_dec(&ev_fd->refs);
- }
-
-out:
- rcu_read_unlock();
-}
-
-static void io_eventfd_flush_signal(struct io_ring_ctx *ctx)
-{
- bool skip;
-
- spin_lock(&ctx->completion_lock);
-
- /*
- * Eventfd should only get triggered when at least one event has been
- * posted. Some applications rely on the eventfd notification count
- * only changing IFF a new CQE has been added to the CQ ring. There's
- * no depedency on 1:1 relationship between how many times this
- * function is called (and hence the eventfd count) and number of CQEs
- * posted to the CQ ring.
- */
- skip = ctx->cached_cq_tail == ctx->evfd_last_cq_tail;
- ctx->evfd_last_cq_tail = ctx->cached_cq_tail;
- spin_unlock(&ctx->completion_lock);
- if (skip)
- return;
-
- io_eventfd_signal(ctx);
-}
-
void __io_commit_cqring_flush(struct io_ring_ctx *ctx)
{
if (ctx->poll_activated)
@@ -878,20 +806,43 @@ static bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res,
return false;
}
-bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags)
+static bool __io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res,
+ u32 cflags)
{
bool filled;
- io_cq_lock(ctx);
filled = io_fill_cqe_aux(ctx, user_data, res, cflags);
if (!filled)
filled = io_cqring_event_overflow(ctx, user_data, res, cflags, 0, 0);
+ return filled;
+}
+
+bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags)
+{
+ bool filled;
+
+ io_cq_lock(ctx);
+ filled = __io_post_aux_cqe(ctx, user_data, res, cflags);
io_cq_unlock_post(ctx);
return filled;
}
/*
+ * Must be called from inline task_work so we now a flush will happen later,
+ * and obviously with ctx->uring_lock held (tw always has that).
+ */
+void io_add_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags)
+{
+ if (!io_fill_cqe_aux(ctx, user_data, res, cflags)) {
+ spin_lock(&ctx->completion_lock);
+ io_cqring_event_overflow(ctx, user_data, res, cflags, 0, 0);
+ spin_unlock(&ctx->completion_lock);
+ }
+ ctx->submit_state.cq_flush = true;
+}
+
+/*
* A helper for multishot requests posting additional CQEs.
* Should only be used from a task_work including IO_URING_F_MULTISHOT.
*/
@@ -1175,9 +1126,10 @@ void tctx_task_work(struct callback_head *cb)
WARN_ON_ONCE(ret);
}
-static inline void io_req_local_work_add(struct io_kiocb *req, unsigned flags)
+static inline void io_req_local_work_add(struct io_kiocb *req,
+ struct io_ring_ctx *ctx,
+ unsigned flags)
{
- struct io_ring_ctx *ctx = req->ctx;
unsigned nr_wait, nr_tw, nr_tw_prev;
struct llist_node *head;
@@ -1191,6 +1143,8 @@ static inline void io_req_local_work_add(struct io_kiocb *req, unsigned flags)
if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK))
flags &= ~IOU_F_TWQ_LAZY_WAKE;
+ guard(rcu)();
+
head = READ_ONCE(ctx->work_llist.first);
do {
nr_tw_prev = 0;
@@ -1259,8 +1213,8 @@ static void io_req_normal_work_add(struct io_kiocb *req)
if (ctx->flags & IORING_SETUP_SQPOLL) {
struct io_sq_data *sqd = ctx->sq_data;
- if (wq_has_sleeper(&sqd->wait))
- wake_up(&sqd->wait);
+ if (sqd->thread)
+ __set_notify_signal(sqd->thread);
return;
}
@@ -1272,13 +1226,18 @@ static void io_req_normal_work_add(struct io_kiocb *req)
void __io_req_task_work_add(struct io_kiocb *req, unsigned flags)
{
- if (req->ctx->flags & IORING_SETUP_DEFER_TASKRUN) {
- rcu_read_lock();
- io_req_local_work_add(req, flags);
- rcu_read_unlock();
- } else {
+ if (req->ctx->flags & IORING_SETUP_DEFER_TASKRUN)
+ io_req_local_work_add(req, req->ctx, flags);
+ else
io_req_normal_work_add(req);
- }
+}
+
+void io_req_task_work_add_remote(struct io_kiocb *req, struct io_ring_ctx *ctx,
+ unsigned flags)
+{
+ if (WARN_ON_ONCE(!(ctx->flags & IORING_SETUP_DEFER_TASKRUN)))
+ return;
+ io_req_local_work_add(req, ctx, flags);
}
static void __cold io_move_task_work_from_local(struct io_ring_ctx *ctx)
@@ -1467,7 +1426,7 @@ void __io_submit_flush_completions(struct io_ring_ctx *ctx)
}
__io_cq_unlock_post(ctx);
- if (!wq_list_empty(&ctx->submit_state.compl_reqs)) {
+ if (!wq_list_empty(&state->compl_reqs)) {
io_free_batch_list(ctx, state->compl_reqs.first);
INIT_WQ_LIST(&state->compl_reqs);
}
@@ -1813,14 +1772,14 @@ void io_wq_submit_work(struct io_wq_work *work)
io_arm_ltimeout(req);
/* either cancelled or io-wq is dying, so don't touch tctx->iowq */
- if (work->flags & IO_WQ_WORK_CANCEL) {
+ if (atomic_read(&work->flags) & IO_WQ_WORK_CANCEL) {
fail:
io_req_task_queue_fail(req, err);
return;
}
if (!io_assign_file(req, def, issue_flags)) {
err = -EBADF;
- work->flags |= IO_WQ_WORK_CANCEL;
+ atomic_or(IO_WQ_WORK_CANCEL, &work->flags);
goto fail;
}
@@ -2058,6 +2017,7 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
req->file = NULL;
req->rsrc_node = NULL;
req->task = current;
+ req->cancel_seq_set = false;
if (unlikely(opcode >= IORING_OP_LAST)) {
req->opcode = 0;
@@ -2648,6 +2608,7 @@ static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx)
io_alloc_cache_free(&ctx->netmsg_cache, io_netmsg_cache_free);
io_alloc_cache_free(&ctx->rw_cache, io_rw_cache_free);
io_alloc_cache_free(&ctx->uring_cache, kfree);
+ io_alloc_cache_free(&ctx->msg_cache, io_msg_cache_free);
io_futex_cache_free(ctx);
io_destroy_buffers(ctx);
mutex_unlock(&ctx->uring_lock);
diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
index 624ca9076a50..e1ce908f0679 100644
--- a/io_uring/io_uring.h
+++ b/io_uring/io_uring.h
@@ -65,6 +65,7 @@ bool io_cqe_cache_refill(struct io_ring_ctx *ctx, bool overflow);
int io_run_task_work_sig(struct io_ring_ctx *ctx);
void io_req_defer_failed(struct io_kiocb *req, s32 res);
bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags);
+void io_add_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags);
bool io_req_post_cqe(struct io_kiocb *req, s32 res, u32 cflags);
void __io_commit_cqring_flush(struct io_ring_ctx *ctx);
@@ -73,6 +74,8 @@ struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
unsigned issue_flags);
void __io_req_task_work_add(struct io_kiocb *req, unsigned flags);
+void io_req_task_work_add_remote(struct io_kiocb *req, struct io_ring_ctx *ctx,
+ unsigned flags);
bool io_alloc_async_data(struct io_kiocb *req);
void io_req_task_queue(struct io_kiocb *req);
void io_req_task_complete(struct io_kiocb *req, struct io_tw_state *ts);
@@ -104,12 +107,6 @@ bool __io_alloc_req_refill(struct io_ring_ctx *ctx);
bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
bool cancel_all);
-enum {
- IO_EVENTFD_OP_SIGNAL_BIT,
- IO_EVENTFD_OP_FREE_BIT,
-};
-
-void io_eventfd_ops(struct rcu_head *rcu);
void io_activate_pollwq(struct io_ring_ctx *ctx);
static inline void io_lockdep_assert_cq_locked(struct io_ring_ctx *ctx)
@@ -433,7 +430,7 @@ static inline bool io_file_can_poll(struct io_kiocb *req)
{
if (req->flags & REQ_F_CAN_POLL)
return true;
- if (file_can_poll(req->file)) {
+ if (req->file && file_can_poll(req->file)) {
req->flags |= REQ_F_CAN_POLL;
return true;
}
diff --git a/io_uring/memmap.c b/io_uring/memmap.c
index 4785d6af5fee..a0f32a255fd1 100644
--- a/io_uring/memmap.c
+++ b/io_uring/memmap.c
@@ -244,6 +244,7 @@ __cold int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
struct io_ring_ctx *ctx = file->private_data;
size_t sz = vma->vm_end - vma->vm_start;
long offset = vma->vm_pgoff << PAGE_SHIFT;
+ unsigned int npages;
void *ptr;
ptr = io_uring_validate_mmap_request(file, vma->vm_pgoff, sz);
@@ -253,8 +254,8 @@ __cold int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
switch (offset & IORING_OFF_MMAP_MASK) {
case IORING_OFF_SQ_RING:
case IORING_OFF_CQ_RING:
- return io_uring_mmap_pages(ctx, vma, ctx->ring_pages,
- ctx->n_ring_pages);
+ npages = min(ctx->n_ring_pages, (sz + PAGE_SIZE - 1) >> PAGE_SHIFT);
+ return io_uring_mmap_pages(ctx, vma, ctx->ring_pages, npages);
case IORING_OFF_SQES:
return io_uring_mmap_pages(ctx, vma, ctx->sqe_pages,
ctx->n_sqe_pages);
diff --git a/io_uring/msg_ring.c b/io_uring/msg_ring.c
index 81c4a9d43729..29fa9285a33d 100644
--- a/io_uring/msg_ring.c
+++ b/io_uring/msg_ring.c
@@ -11,9 +11,9 @@
#include "io_uring.h"
#include "rsrc.h"
#include "filetable.h"
+#include "alloc_cache.h"
#include "msg_ring.h"
-
/* All valid masks for MSG_RING */
#define IORING_MSG_RING_MASK (IORING_MSG_RING_CQE_SKIP | \
IORING_MSG_RING_FLAGS_PASS)
@@ -68,59 +68,70 @@ void io_msg_ring_cleanup(struct io_kiocb *req)
static inline bool io_msg_need_remote(struct io_ring_ctx *target_ctx)
{
- if (!target_ctx->task_complete)
- return false;
- return current != target_ctx->submitter_task;
+ return target_ctx->task_complete;
}
-static int io_msg_exec_remote(struct io_kiocb *req, task_work_func_t func)
+static void io_msg_tw_complete(struct io_kiocb *req, struct io_tw_state *ts)
{
- struct io_ring_ctx *ctx = req->file->private_data;
- struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
- struct task_struct *task = READ_ONCE(ctx->submitter_task);
+ struct io_ring_ctx *ctx = req->ctx;
- if (unlikely(!task))
- return -EOWNERDEAD;
+ io_add_aux_cqe(ctx, req->cqe.user_data, req->cqe.res, req->cqe.flags);
+ if (spin_trylock(&ctx->msg_lock)) {
+ if (io_alloc_cache_put(&ctx->msg_cache, req))
+ req = NULL;
+ spin_unlock(&ctx->msg_lock);
+ }
+ if (req)
+ kmem_cache_free(req_cachep, req);
+ percpu_ref_put(&ctx->refs);
+}
- init_task_work(&msg->tw, func);
- if (task_work_add(task, &msg->tw, TWA_SIGNAL))
+static int io_msg_remote_post(struct io_ring_ctx *ctx, struct io_kiocb *req,
+ int res, u32 cflags, u64 user_data)
+{
+ req->task = READ_ONCE(ctx->submitter_task);
+ if (!req->task) {
+ kmem_cache_free(req_cachep, req);
return -EOWNERDEAD;
+ }
+ req->cqe.user_data = user_data;
+ io_req_set_res(req, res, cflags);
+ percpu_ref_get(&ctx->refs);
+ req->ctx = ctx;
+ req->io_task_work.func = io_msg_tw_complete;
+ io_req_task_work_add_remote(req, ctx, IOU_F_TWQ_LAZY_WAKE);
+ return 0;
+}
- return IOU_ISSUE_SKIP_COMPLETE;
+static struct io_kiocb *io_msg_get_kiocb(struct io_ring_ctx *ctx)
+{
+ struct io_kiocb *req = NULL;
+
+ if (spin_trylock(&ctx->msg_lock)) {
+ req = io_alloc_cache_get(&ctx->msg_cache);
+ spin_unlock(&ctx->msg_lock);
+ }
+ if (req)
+ return req;
+ return kmem_cache_alloc(req_cachep, GFP_KERNEL | __GFP_NOWARN);
}
-static void io_msg_tw_complete(struct callback_head *head)
+static int io_msg_data_remote(struct io_kiocb *req)
{
- struct io_msg *msg = container_of(head, struct io_msg, tw);
- struct io_kiocb *req = cmd_to_io_kiocb(msg);
struct io_ring_ctx *target_ctx = req->file->private_data;
- int ret = 0;
-
- if (current->flags & PF_EXITING) {
- ret = -EOWNERDEAD;
- } else {
- u32 flags = 0;
-
- if (msg->flags & IORING_MSG_RING_FLAGS_PASS)
- flags = msg->cqe_flags;
-
- /*
- * If the target ring is using IOPOLL mode, then we need to be
- * holding the uring_lock for posting completions. Other ring
- * types rely on the regular completion locking, which is
- * handled while posting.
- */
- if (target_ctx->flags & IORING_SETUP_IOPOLL)
- mutex_lock(&target_ctx->uring_lock);
- if (!io_post_aux_cqe(target_ctx, msg->user_data, msg->len, flags))
- ret = -EOVERFLOW;
- if (target_ctx->flags & IORING_SETUP_IOPOLL)
- mutex_unlock(&target_ctx->uring_lock);
- }
+ struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
+ struct io_kiocb *target;
+ u32 flags = 0;
- if (ret < 0)
- req_set_fail(req);
- io_req_queue_tw_complete(req, ret);
+ target = io_msg_get_kiocb(req->ctx);
+ if (unlikely(!target))
+ return -ENOMEM;
+
+ if (msg->flags & IORING_MSG_RING_FLAGS_PASS)
+ flags = msg->cqe_flags;
+
+ return io_msg_remote_post(target_ctx, target, msg->len, flags,
+ msg->user_data);
}
static int io_msg_ring_data(struct io_kiocb *req, unsigned int issue_flags)
@@ -138,7 +149,7 @@ static int io_msg_ring_data(struct io_kiocb *req, unsigned int issue_flags)
return -EBADFD;
if (io_msg_need_remote(target_ctx))
- return io_msg_exec_remote(req, io_msg_tw_complete);
+ return io_msg_data_remote(req);
if (msg->flags & IORING_MSG_RING_FLAGS_PASS)
flags = msg->cqe_flags;
@@ -218,6 +229,22 @@ static void io_msg_tw_fd_complete(struct callback_head *head)
io_req_queue_tw_complete(req, ret);
}
+static int io_msg_fd_remote(struct io_kiocb *req)
+{
+ struct io_ring_ctx *ctx = req->file->private_data;
+ struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
+ struct task_struct *task = READ_ONCE(ctx->submitter_task);
+
+ if (unlikely(!task))
+ return -EOWNERDEAD;
+
+ init_task_work(&msg->tw, io_msg_tw_fd_complete);
+ if (task_work_add(task, &msg->tw, TWA_SIGNAL))
+ return -EOWNERDEAD;
+
+ return IOU_ISSUE_SKIP_COMPLETE;
+}
+
static int io_msg_send_fd(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_ring_ctx *target_ctx = req->file->private_data;
@@ -240,7 +267,7 @@ static int io_msg_send_fd(struct io_kiocb *req, unsigned int issue_flags)
}
if (io_msg_need_remote(target_ctx))
- return io_msg_exec_remote(req, io_msg_tw_fd_complete);
+ return io_msg_fd_remote(req);
return io_msg_install_complete(req, issue_flags);
}
@@ -294,3 +321,10 @@ done:
io_req_set_res(req, ret, 0);
return IOU_OK;
}
+
+void io_msg_cache_free(const void *entry)
+{
+ struct io_kiocb *req = (struct io_kiocb *) entry;
+
+ kmem_cache_free(req_cachep, req);
+}
diff --git a/io_uring/msg_ring.h b/io_uring/msg_ring.h
index 3987ee6c0e5f..3030f3942f0f 100644
--- a/io_uring/msg_ring.h
+++ b/io_uring/msg_ring.h
@@ -3,3 +3,4 @@
int io_msg_ring_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
int io_msg_ring(struct io_kiocb *req, unsigned int issue_flags);
void io_msg_ring_cleanup(struct io_kiocb *req);
+void io_msg_cache_free(const void *entry);
diff --git a/io_uring/napi.c b/io_uring/napi.c
index 883a1a665907..762254a7ff3f 100644
--- a/io_uring/napi.c
+++ b/io_uring/napi.c
@@ -261,12 +261,14 @@ int io_unregister_napi(struct io_ring_ctx *ctx, void __user *arg)
}
/*
- * __io_napi_adjust_timeout() - Add napi id to the busy poll list
+ * __io_napi_adjust_timeout() - adjust busy loop timeout
* @ctx: pointer to io-uring context structure
* @iowq: pointer to io wait queue
* @ts: pointer to timespec or NULL
*
* Adjust the busy loop timeout according to timespec and busy poll timeout.
+ * If the specified NAPI timeout is bigger than the wait timeout, then adjust
+ * the NAPI timeout accordingly.
*/
void __io_napi_adjust_timeout(struct io_ring_ctx *ctx, struct io_wait_queue *iowq,
struct timespec64 *ts)
@@ -274,16 +276,16 @@ void __io_napi_adjust_timeout(struct io_ring_ctx *ctx, struct io_wait_queue *iow
unsigned int poll_to = READ_ONCE(ctx->napi_busy_poll_to);
if (ts) {
- struct timespec64 poll_to_ts = ns_to_timespec64(1000 * (s64)poll_to);
-
- if (timespec64_compare(ts, &poll_to_ts) > 0) {
- *ts = timespec64_sub(*ts, poll_to_ts);
- } else {
- u64 to = timespec64_to_ns(ts);
-
- do_div(to, 1000);
- ts->tv_sec = 0;
- ts->tv_nsec = 0;
+ struct timespec64 poll_to_ts;
+
+ poll_to_ts = ns_to_timespec64(1000 * (s64)poll_to);
+ if (timespec64_compare(ts, &poll_to_ts) < 0) {
+ s64 poll_to_ns = timespec64_to_ns(ts);
+ if (poll_to_ns > 0) {
+ u64 val = poll_to_ns + 999;
+ do_div(val, 1000);
+ poll_to = val;
+ }
}
}
diff --git a/io_uring/net.c b/io_uring/net.c
index 0a48596429d9..594490a1389b 100644
--- a/io_uring/net.c
+++ b/io_uring/net.c
@@ -51,6 +51,16 @@ struct io_connect {
bool seen_econnaborted;
};
+struct io_bind {
+ struct file *file;
+ int addr_len;
+};
+
+struct io_listen {
+ struct file *file;
+ int backlog;
+};
+
struct io_sr_msg {
struct file *file;
union {
@@ -817,20 +827,20 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
bool mshot_finished, unsigned issue_flags)
{
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
- unsigned int cflags;
-
- if (sr->flags & IORING_RECVSEND_BUNDLE)
- cflags = io_put_kbufs(req, io_bundle_nbufs(kmsg, *ret),
- issue_flags);
- else
- cflags = io_put_kbuf(req, issue_flags);
+ unsigned int cflags = 0;
if (kmsg->msg.msg_inq > 0)
cflags |= IORING_CQE_F_SOCK_NONEMPTY;
- /* bundle with no more immediate buffers, we're done */
- if (sr->flags & IORING_RECVSEND_BUNDLE && req->flags & REQ_F_BL_EMPTY)
- goto finish;
+ if (sr->flags & IORING_RECVSEND_BUNDLE) {
+ cflags |= io_put_kbufs(req, io_bundle_nbufs(kmsg, *ret),
+ issue_flags);
+ /* bundle with no more immediate buffers, we're done */
+ if (req->flags & REQ_F_BL_EMPTY)
+ goto finish;
+ } else {
+ cflags |= io_put_kbuf(req, issue_flags);
+ }
/*
* Fill CQE for this receive and see if we should keep trying to
@@ -1129,13 +1139,15 @@ int io_recv(struct io_kiocb *req, unsigned int issue_flags)
retry_multishot:
if (io_do_buffer_select(req)) {
ret = io_recv_buf_select(req, kmsg, &len, issue_flags);
- if (unlikely(ret))
+ if (unlikely(ret)) {
+ kmsg->msg.msg_inq = -1;
goto out_free;
+ }
sr->buf = NULL;
}
- kmsg->msg.msg_inq = -1;
kmsg->msg.msg_flags = 0;
+ kmsg->msg.msg_inq = -1;
if (flags & MSG_WAITALL)
min_ret = iov_iter_count(&kmsg->msg.msg_iter);
@@ -1265,14 +1277,14 @@ int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
return io_sendmsg_prep_setup(req, req->opcode == IORING_OP_SENDMSG_ZC);
}
-static int io_sg_from_iter_iovec(struct sock *sk, struct sk_buff *skb,
+static int io_sg_from_iter_iovec(struct sk_buff *skb,
struct iov_iter *from, size_t length)
{
skb_zcopy_downgrade_managed(skb);
- return __zerocopy_sg_from_iter(NULL, sk, skb, from, length);
+ return zerocopy_fill_skb_from_iter(skb, from, length);
}
-static int io_sg_from_iter(struct sock *sk, struct sk_buff *skb,
+static int io_sg_from_iter(struct sk_buff *skb,
struct iov_iter *from, size_t length)
{
struct skb_shared_info *shinfo = skb_shinfo(skb);
@@ -1285,7 +1297,7 @@ static int io_sg_from_iter(struct sock *sk, struct sk_buff *skb,
if (!frag)
shinfo->flags |= SKBFL_MANAGED_FRAG_REFS;
else if (unlikely(!skb_zcopy_managed(skb)))
- return __zerocopy_sg_from_iter(NULL, sk, skb, from, length);
+ return zerocopy_fill_skb_from_iter(skb, from, length);
bi.bi_size = min(from->count, length);
bi.bi_bvec_done = from->iov_offset;
@@ -1312,14 +1324,6 @@ static int io_sg_from_iter(struct sock *sk, struct sk_buff *skb,
skb->data_len += copied;
skb->len += copied;
skb->truesize += truesize;
-
- if (sk && sk->sk_type == SOCK_STREAM) {
- sk_wmem_queued_add(sk, truesize);
- if (!skb_zcopy_pure(skb))
- sk_mem_charge(sk, truesize);
- } else {
- refcount_add(truesize, &skb->sk->sk_wmem_alloc);
- }
return ret;
}
@@ -1715,6 +1719,70 @@ out:
return IOU_OK;
}
+int io_bind_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+{
+ struct io_bind *bind = io_kiocb_to_cmd(req, struct io_bind);
+ struct sockaddr __user *uaddr;
+ struct io_async_msghdr *io;
+
+ if (sqe->len || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in)
+ return -EINVAL;
+
+ uaddr = u64_to_user_ptr(READ_ONCE(sqe->addr));
+ bind->addr_len = READ_ONCE(sqe->addr2);
+
+ io = io_msg_alloc_async(req);
+ if (unlikely(!io))
+ return -ENOMEM;
+ return move_addr_to_kernel(uaddr, bind->addr_len, &io->addr);
+}
+
+int io_bind(struct io_kiocb *req, unsigned int issue_flags)
+{
+ struct io_bind *bind = io_kiocb_to_cmd(req, struct io_bind);
+ struct io_async_msghdr *io = req->async_data;
+ struct socket *sock;
+ int ret;
+
+ sock = sock_from_file(req->file);
+ if (unlikely(!sock))
+ return -ENOTSOCK;
+
+ ret = __sys_bind_socket(sock, &io->addr, bind->addr_len);
+ if (ret < 0)
+ req_set_fail(req);
+ io_req_set_res(req, ret, 0);
+ return 0;
+}
+
+int io_listen_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+{
+ struct io_listen *listen = io_kiocb_to_cmd(req, struct io_listen);
+
+ if (sqe->addr || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in || sqe->addr2)
+ return -EINVAL;
+
+ listen->backlog = READ_ONCE(sqe->len);
+ return 0;
+}
+
+int io_listen(struct io_kiocb *req, unsigned int issue_flags)
+{
+ struct io_listen *listen = io_kiocb_to_cmd(req, struct io_listen);
+ struct socket *sock;
+ int ret;
+
+ sock = sock_from_file(req->file);
+ if (unlikely(!sock))
+ return -ENOTSOCK;
+
+ ret = __sys_listen_socket(sock, listen->backlog);
+ if (ret < 0)
+ req_set_fail(req);
+ io_req_set_res(req, ret, 0);
+ return 0;
+}
+
void io_netmsg_cache_free(const void *entry)
{
struct io_async_msghdr *kmsg = (struct io_async_msghdr *) entry;
diff --git a/io_uring/net.h b/io_uring/net.h
index 0eb1c1920fc9..52bfee05f06a 100644
--- a/io_uring/net.h
+++ b/io_uring/net.h
@@ -49,6 +49,12 @@ int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags);
int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
void io_send_zc_cleanup(struct io_kiocb *req);
+int io_bind_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
+int io_bind(struct io_kiocb *req, unsigned int issue_flags);
+
+int io_listen_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
+int io_listen(struct io_kiocb *req, unsigned int issue_flags);
+
void io_netmsg_cache_free(const void *entry);
#else
static inline void io_netmsg_cache_free(const void *entry)
diff --git a/io_uring/opdef.c b/io_uring/opdef.c
index 2de5cca9504e..a2be3bbca5ff 100644
--- a/io_uring/opdef.c
+++ b/io_uring/opdef.c
@@ -495,6 +495,26 @@ const struct io_issue_def io_issue_defs[] = {
.prep = io_ftruncate_prep,
.issue = io_ftruncate,
},
+ [IORING_OP_BIND] = {
+#if defined(CONFIG_NET)
+ .needs_file = 1,
+ .prep = io_bind_prep,
+ .issue = io_bind,
+ .async_size = sizeof(struct io_async_msghdr),
+#else
+ .prep = io_eopnotsupp_prep,
+#endif
+ },
+ [IORING_OP_LISTEN] = {
+#if defined(CONFIG_NET)
+ .needs_file = 1,
+ .prep = io_listen_prep,
+ .issue = io_listen,
+ .async_size = sizeof(struct io_async_msghdr),
+#else
+ .prep = io_eopnotsupp_prep,
+#endif
+ },
};
const struct io_cold_def io_cold_defs[] = {
@@ -516,10 +536,12 @@ const struct io_cold_def io_cold_defs[] = {
},
[IORING_OP_READ_FIXED] = {
.name = "READ_FIXED",
+ .cleanup = io_readv_writev_cleanup,
.fail = io_rw_fail,
},
[IORING_OP_WRITE_FIXED] = {
.name = "WRITE_FIXED",
+ .cleanup = io_readv_writev_cleanup,
.fail = io_rw_fail,
},
[IORING_OP_POLL_ADD] = {
@@ -582,10 +604,12 @@ const struct io_cold_def io_cold_defs[] = {
},
[IORING_OP_READ] = {
.name = "READ",
+ .cleanup = io_readv_writev_cleanup,
.fail = io_rw_fail,
},
[IORING_OP_WRITE] = {
.name = "WRITE",
+ .cleanup = io_readv_writev_cleanup,
.fail = io_rw_fail,
},
[IORING_OP_FADVISE] = {
@@ -692,6 +716,7 @@ const struct io_cold_def io_cold_defs[] = {
},
[IORING_OP_READ_MULTISHOT] = {
.name = "READ_MULTISHOT",
+ .cleanup = io_readv_writev_cleanup,
},
[IORING_OP_WAITID] = {
.name = "WAITID",
@@ -711,6 +736,12 @@ const struct io_cold_def io_cold_defs[] = {
[IORING_OP_FTRUNCATE] = {
.name = "FTRUNCATE",
},
+ [IORING_OP_BIND] = {
+ .name = "BIND",
+ },
+ [IORING_OP_LISTEN] = {
+ .name = "LISTEN",
+ },
};
const char *io_uring_get_opcode(u8 opcode)
@@ -720,6 +751,14 @@ const char *io_uring_get_opcode(u8 opcode)
return "INVALID";
}
+bool io_uring_op_supported(u8 opcode)
+{
+ if (opcode < IORING_OP_LAST &&
+ io_issue_defs[opcode].prep != io_eopnotsupp_prep)
+ return true;
+ return false;
+}
+
void __init io_uring_optable_init(void)
{
int i;
diff --git a/io_uring/opdef.h b/io_uring/opdef.h
index 7ee6f5aa90aa..14456436ff74 100644
--- a/io_uring/opdef.h
+++ b/io_uring/opdef.h
@@ -17,8 +17,6 @@ struct io_issue_def {
unsigned poll_exclusive : 1;
/* op supports buffer selection */
unsigned buffer_select : 1;
- /* opcode is not supported by this kernel */
- unsigned not_supported : 1;
/* skip auditing */
unsigned audit_skip : 1;
/* supports ioprio */
@@ -47,5 +45,7 @@ struct io_cold_def {
extern const struct io_issue_def io_issue_defs[];
extern const struct io_cold_def io_cold_defs[];
+bool io_uring_op_supported(u8 opcode);
+
void io_uring_optable_init(void);
#endif
diff --git a/io_uring/register.c b/io_uring/register.c
index ef8c908346a4..e3c20be5a198 100644
--- a/io_uring/register.c
+++ b/io_uring/register.c
@@ -27,65 +27,11 @@
#include "cancel.h"
#include "kbuf.h"
#include "napi.h"
+#include "eventfd.h"
#define IORING_MAX_RESTRICTIONS (IORING_RESTRICTION_LAST + \
IORING_REGISTER_LAST + IORING_OP_LAST)
-static int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg,
- unsigned int eventfd_async)
-{
- struct io_ev_fd *ev_fd;
- __s32 __user *fds = arg;
- int fd;
-
- ev_fd = rcu_dereference_protected(ctx->io_ev_fd,
- lockdep_is_held(&ctx->uring_lock));
- if (ev_fd)
- return -EBUSY;
-
- if (copy_from_user(&fd, fds, sizeof(*fds)))
- return -EFAULT;
-
- ev_fd = kmalloc(sizeof(*ev_fd), GFP_KERNEL);
- if (!ev_fd)
- return -ENOMEM;
-
- ev_fd->cq_ev_fd = eventfd_ctx_fdget(fd);
- if (IS_ERR(ev_fd->cq_ev_fd)) {
- int ret = PTR_ERR(ev_fd->cq_ev_fd);
- kfree(ev_fd);
- return ret;
- }
-
- spin_lock(&ctx->completion_lock);
- ctx->evfd_last_cq_tail = ctx->cached_cq_tail;
- spin_unlock(&ctx->completion_lock);
-
- ev_fd->eventfd_async = eventfd_async;
- ctx->has_evfd = true;
- rcu_assign_pointer(ctx->io_ev_fd, ev_fd);
- atomic_set(&ev_fd->refs, 1);
- atomic_set(&ev_fd->ops, 0);
- return 0;
-}
-
-int io_eventfd_unregister(struct io_ring_ctx *ctx)
-{
- struct io_ev_fd *ev_fd;
-
- ev_fd = rcu_dereference_protected(ctx->io_ev_fd,
- lockdep_is_held(&ctx->uring_lock));
- if (ev_fd) {
- ctx->has_evfd = false;
- rcu_assign_pointer(ctx->io_ev_fd, NULL);
- if (!atomic_fetch_or(BIT(IO_EVENTFD_OP_FREE_BIT), &ev_fd->ops))
- call_rcu(&ev_fd->rcu, io_eventfd_ops);
- return 0;
- }
-
- return -ENXIO;
-}
-
static __cold int io_probe(struct io_ring_ctx *ctx, void __user *arg,
unsigned nr_args)
{
@@ -93,9 +39,10 @@ static __cold int io_probe(struct io_ring_ctx *ctx, void __user *arg,
size_t size;
int i, ret;
+ if (nr_args > IORING_OP_LAST)
+ nr_args = IORING_OP_LAST;
+
size = struct_size(p, ops, nr_args);
- if (size == SIZE_MAX)
- return -EOVERFLOW;
p = kzalloc(size, GFP_KERNEL);
if (!p)
return -ENOMEM;
@@ -108,12 +55,10 @@ static __cold int io_probe(struct io_ring_ctx *ctx, void __user *arg,
goto out;
p->last_op = IORING_OP_LAST - 1;
- if (nr_args > IORING_OP_LAST)
- nr_args = IORING_OP_LAST;
for (i = 0; i < nr_args; i++) {
p->ops[i].op = i;
- if (!io_issue_defs[i].not_supported)
+ if (io_uring_op_supported(i))
p->ops[i].flags = IO_URING_OP_SUPPORTED;
}
p->ops_len = i;
@@ -355,8 +300,10 @@ static __cold int io_register_iowq_max_workers(struct io_ring_ctx *ctx,
}
if (sqd) {
+ mutex_unlock(&ctx->uring_lock);
mutex_unlock(&sqd->lock);
io_put_sq_data(sqd);
+ mutex_lock(&ctx->uring_lock);
}
if (copy_to_user(arg, new_count, sizeof(new_count)))
@@ -380,8 +327,10 @@ static __cold int io_register_iowq_max_workers(struct io_ring_ctx *ctx,
return 0;
err:
if (sqd) {
+ mutex_unlock(&ctx->uring_lock);
mutex_unlock(&sqd->lock);
io_put_sq_data(sqd);
+ mutex_lock(&ctx->uring_lock);
}
return ret;
}
diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c
index 65417c9553b1..a860516bf448 100644
--- a/io_uring/rsrc.c
+++ b/io_uring/rsrc.c
@@ -85,31 +85,6 @@ static int io_account_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
return 0;
}
-static int io_copy_iov(struct io_ring_ctx *ctx, struct iovec *dst,
- void __user *arg, unsigned index)
-{
- struct iovec __user *src;
-
-#ifdef CONFIG_COMPAT
- if (ctx->compat) {
- struct compat_iovec __user *ciovs;
- struct compat_iovec ciov;
-
- ciovs = (struct compat_iovec __user *) arg;
- if (copy_from_user(&ciov, &ciovs[index], sizeof(ciov)))
- return -EFAULT;
-
- dst->iov_base = u64_to_user_ptr((u64)ciov.iov_base);
- dst->iov_len = ciov.iov_len;
- return 0;
- }
-#endif
- src = (struct iovec __user *) arg;
- if (copy_from_user(dst, &src[index], sizeof(*dst)))
- return -EFAULT;
- return 0;
-}
-
static int io_buffer_validate(struct iovec *iov)
{
unsigned long tmp, acct_len = iov->iov_len + (PAGE_SIZE - 1);
@@ -249,6 +224,7 @@ __cold static int io_rsrc_ref_quiesce(struct io_rsrc_data *data,
ret = io_run_task_work_sig(ctx);
if (ret < 0) {
+ finish_wait(&ctx->rsrc_quiesce_wq, &we);
mutex_lock(&ctx->uring_lock);
if (list_empty(&ctx->rsrc_ref_list))
ret = 0;
@@ -256,7 +232,6 @@ __cold static int io_rsrc_ref_quiesce(struct io_rsrc_data *data,
}
schedule();
- __set_current_state(TASK_RUNNING);
mutex_lock(&ctx->uring_lock);
ret = 0;
} while (!list_empty(&ctx->rsrc_ref_list));
@@ -419,8 +394,9 @@ static int __io_sqe_buffers_update(struct io_ring_ctx *ctx,
struct io_uring_rsrc_update2 *up,
unsigned int nr_args)
{
+ struct iovec __user *uvec = u64_to_user_ptr(up->data);
u64 __user *tags = u64_to_user_ptr(up->tags);
- struct iovec iov, __user *iovs = u64_to_user_ptr(up->data);
+ struct iovec fast_iov, *iov;
struct page *last_hpage = NULL;
__u32 done;
int i, err;
@@ -434,21 +410,23 @@ static int __io_sqe_buffers_update(struct io_ring_ctx *ctx,
struct io_mapped_ubuf *imu;
u64 tag = 0;
- err = io_copy_iov(ctx, &iov, iovs, done);
- if (err)
+ iov = iovec_from_user(&uvec[done], 1, 1, &fast_iov, ctx->compat);
+ if (IS_ERR(iov)) {
+ err = PTR_ERR(iov);
break;
+ }
if (tags && copy_from_user(&tag, &tags[done], sizeof(tag))) {
err = -EFAULT;
break;
}
- err = io_buffer_validate(&iov);
+ err = io_buffer_validate(iov);
if (err)
break;
- if (!iov.iov_base && tag) {
+ if (!iov->iov_base && tag) {
err = -EINVAL;
break;
}
- err = io_sqe_buffer_register(ctx, &iov, &imu, &last_hpage);
+ err = io_sqe_buffer_register(ctx, iov, &imu, &last_hpage);
if (err)
break;
@@ -970,8 +948,9 @@ int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
{
struct page *last_hpage = NULL;
struct io_rsrc_data *data;
+ struct iovec fast_iov, *iov = &fast_iov;
+ const struct iovec __user *uvec = (struct iovec * __user) arg;
int i, ret;
- struct iovec iov;
BUILD_BUG_ON(IORING_MAX_REG_BUFFERS >= (1u << 16));
@@ -988,24 +967,27 @@ int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
return ret;
}
+ if (!arg)
+ memset(iov, 0, sizeof(*iov));
+
for (i = 0; i < nr_args; i++, ctx->nr_user_bufs++) {
if (arg) {
- ret = io_copy_iov(ctx, &iov, arg, i);
- if (ret)
+ iov = iovec_from_user(&uvec[i], 1, 1, &fast_iov, ctx->compat);
+ if (IS_ERR(iov)) {
+ ret = PTR_ERR(iov);
break;
- ret = io_buffer_validate(&iov);
+ }
+ ret = io_buffer_validate(iov);
if (ret)
break;
- } else {
- memset(&iov, 0, sizeof(iov));
}
- if (!iov.iov_base && *io_get_tag_slot(data, i)) {
+ if (!iov->iov_base && *io_get_tag_slot(data, i)) {
ret = -EINVAL;
break;
}
- ret = io_sqe_buffer_register(ctx, &iov, &ctx->user_bufs[i],
+ ret = io_sqe_buffer_register(ctx, iov, &ctx->user_bufs[i],
&last_hpage);
if (ret)
break;
@@ -1067,7 +1049,6 @@ int io_import_fixed(int ddir, struct iov_iter *iter,
* branch doesn't expect non PAGE_SIZE'd chunks.
*/
iter->bvec = bvec;
- iter->nr_segs = bvec->bv_len;
iter->count -= offset;
iter->iov_offset = offset;
} else {
diff --git a/io_uring/rw.c b/io_uring/rw.c
index 1a2128459cb4..c004d21e2f12 100644
--- a/io_uring/rw.c
+++ b/io_uring/rw.c
@@ -772,7 +772,7 @@ static bool need_complete_io(struct io_kiocb *req)
S_ISBLK(file_inode(req->file)->i_mode);
}
-static int io_rw_init_file(struct io_kiocb *req, fmode_t mode)
+static int io_rw_init_file(struct io_kiocb *req, fmode_t mode, int rw_type)
{
struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
struct kiocb *kiocb = &rw->kiocb;
@@ -787,7 +787,7 @@ static int io_rw_init_file(struct io_kiocb *req, fmode_t mode)
req->flags |= io_file_get_flags(file);
kiocb->ki_flags = file->f_iocb_flags;
- ret = kiocb_set_rw_flags(kiocb, rw->flags);
+ ret = kiocb_set_rw_flags(kiocb, rw->flags, rw_type);
if (unlikely(ret))
return ret;
kiocb->ki_flags |= IOCB_ALLOC_CACHE;
@@ -832,8 +832,7 @@ static int __io_read(struct io_kiocb *req, unsigned int issue_flags)
if (unlikely(ret < 0))
return ret;
}
-
- ret = io_rw_init_file(req, FMODE_READ);
+ ret = io_rw_init_file(req, FMODE_READ, READ);
if (unlikely(ret))
return ret;
req->cqe.res = iov_iter_count(&io->iter);
@@ -1013,7 +1012,7 @@ int io_write(struct io_kiocb *req, unsigned int issue_flags)
ssize_t ret, ret2;
loff_t *ppos;
- ret = io_rw_init_file(req, FMODE_WRITE);
+ ret = io_rw_init_file(req, FMODE_WRITE, WRITE);
if (unlikely(ret))
return ret;
req->cqe.res = iov_iter_count(&io->iter);
diff --git a/io_uring/statx.c b/io_uring/statx.c
index abb874209caa..f7f9b202eec0 100644
--- a/io_uring/statx.c
+++ b/io_uring/statx.c
@@ -37,8 +37,7 @@ int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
sx->flags = READ_ONCE(sqe->statx_flags);
sx->filename = getname_flags(path,
- getname_statx_lookup_flags(sx->flags),
- NULL);
+ getname_statx_lookup_flags(sx->flags));
if (IS_ERR(sx->filename)) {
int ret = PTR_ERR(sx->filename);
diff --git a/io_uring/xattr.c b/io_uring/xattr.c
index 44905b82eea8..6cf41c3bc369 100644
--- a/io_uring/xattr.c
+++ b/io_uring/xattr.c
@@ -96,7 +96,7 @@ int io_getxattr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
path = u64_to_user_ptr(READ_ONCE(sqe->addr3));
- ix->filename = getname_flags(path, LOOKUP_FOLLOW, NULL);
+ ix->filename = getname_flags(path, LOOKUP_FOLLOW);
if (IS_ERR(ix->filename)) {
ret = PTR_ERR(ix->filename);
ix->filename = NULL;
@@ -189,7 +189,7 @@ int io_setxattr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
path = u64_to_user_ptr(READ_ONCE(sqe->addr3));
- ix->filename = getname_flags(path, LOOKUP_FOLLOW, NULL);
+ ix->filename = getname_flags(path, LOOKUP_FOLLOW);
if (IS_ERR(ix->filename)) {
ret = PTR_ERR(ix->filename);
ix->filename = NULL;
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index 5eea4dc0509e..a7cbd69efbef 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -903,7 +903,8 @@ static int do_mq_open(const char __user *u_name, int oflag, umode_t mode,
audit_mq_open(oflag, mode, attr);
- if (IS_ERR(name = getname(u_name)))
+ name = getname(u_name);
+ if (IS_ERR(name))
return PTR_ERR(name);
fd = get_unused_fd_flags(O_CLOEXEC);
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
index be8c680121e4..d6ef4f4f9cba 100644
--- a/kernel/auditfilter.c
+++ b/kernel/auditfilter.c
@@ -529,7 +529,8 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
entry->rule.buflen += f_val;
f->lsm_str = str;
err = security_audit_rule_init(f->type, f->op, str,
- (void **)&f->lsm_rule);
+ (void **)&f->lsm_rule,
+ GFP_KERNEL);
/* Keep currently invalid fields around in case they
* become valid after a policy reload. */
if (err == -EINVAL) {
@@ -799,7 +800,7 @@ static inline int audit_dupe_lsm_field(struct audit_field *df,
/* our own (refreshed) copy of lsm_rule */
ret = security_audit_rule_init(df->type, df->op, df->lsm_str,
- (void **)&df->lsm_rule);
+ (void **)&df->lsm_rule, GFP_KERNEL);
/* Keep currently invalid fields around in case they
* become valid after a policy reload. */
if (ret == -EINVAL) {
diff --git a/kernel/bpf/Makefile b/kernel/bpf/Makefile
index 7eb9ad3a3ae6..0291eef9ce92 100644
--- a/kernel/bpf/Makefile
+++ b/kernel/bpf/Makefile
@@ -50,5 +50,11 @@ endif
obj-$(CONFIG_BPF_PRELOAD) += preload/
obj-$(CONFIG_BPF_SYSCALL) += relo_core.o
-$(obj)/relo_core.o: $(srctree)/tools/lib/bpf/relo_core.c FORCE
+obj-$(CONFIG_BPF_SYSCALL) += btf_iter.o
+obj-$(CONFIG_BPF_SYSCALL) += btf_relocate.o
+
+# Some source files are common to libbpf.
+vpath %.c $(srctree)/kernel/bpf:$(srctree)/tools/lib/bpf
+
+$(obj)/%.o: %.c FORCE
$(call if_changed_rule,cc_o_c)
diff --git a/kernel/bpf/arena.c b/kernel/bpf/arena.c
index 583ee4fe48ef..e52b3ad231b9 100644
--- a/kernel/bpf/arena.c
+++ b/kernel/bpf/arena.c
@@ -212,6 +212,7 @@ static u64 arena_map_mem_usage(const struct bpf_map *map)
struct vma_list {
struct vm_area_struct *vma;
struct list_head head;
+ atomic_t mmap_count;
};
static int remember_vma(struct bpf_arena *arena, struct vm_area_struct *vma)
@@ -221,20 +222,30 @@ static int remember_vma(struct bpf_arena *arena, struct vm_area_struct *vma)
vml = kmalloc(sizeof(*vml), GFP_KERNEL);
if (!vml)
return -ENOMEM;
+ atomic_set(&vml->mmap_count, 1);
vma->vm_private_data = vml;
vml->vma = vma;
list_add(&vml->head, &arena->vma_list);
return 0;
}
+static void arena_vm_open(struct vm_area_struct *vma)
+{
+ struct vma_list *vml = vma->vm_private_data;
+
+ atomic_inc(&vml->mmap_count);
+}
+
static void arena_vm_close(struct vm_area_struct *vma)
{
struct bpf_map *map = vma->vm_file->private_data;
struct bpf_arena *arena = container_of(map, struct bpf_arena, map);
- struct vma_list *vml;
+ struct vma_list *vml = vma->vm_private_data;
+ if (!atomic_dec_and_test(&vml->mmap_count))
+ return;
guard(mutex)(&arena->lock);
- vml = vma->vm_private_data;
+ /* update link list under lock */
list_del(&vml->head);
vma->vm_private_data = NULL;
kfree(vml);
@@ -287,6 +298,7 @@ out:
}
static const struct vm_operations_struct arena_vm_ops = {
+ .open = arena_vm_open,
.close = arena_vm_close,
.fault = arena_vm_fault,
};
diff --git a/kernel/bpf/bpf_local_storage.c b/kernel/bpf/bpf_local_storage.c
index 976cb258a0ed..c938dea5ddbf 100644
--- a/kernel/bpf/bpf_local_storage.c
+++ b/kernel/bpf/bpf_local_storage.c
@@ -782,8 +782,8 @@ bpf_local_storage_map_alloc(union bpf_attr *attr,
nbuckets = max_t(u32, 2, nbuckets);
smap->bucket_log = ilog2(nbuckets);
- smap->buckets = bpf_map_kvcalloc(&smap->map, sizeof(*smap->buckets),
- nbuckets, GFP_USER | __GFP_NOWARN);
+ smap->buckets = bpf_map_kvcalloc(&smap->map, nbuckets,
+ sizeof(*smap->buckets), GFP_USER | __GFP_NOWARN);
if (!smap->buckets) {
err = -ENOMEM;
goto free_smap;
diff --git a/kernel/bpf/bpf_lsm.c b/kernel/bpf/bpf_lsm.c
index 68240c3c6e7d..08a338e1f231 100644
--- a/kernel/bpf/bpf_lsm.c
+++ b/kernel/bpf/bpf_lsm.c
@@ -280,6 +280,7 @@ BTF_ID(func, bpf_lsm_cred_prepare)
BTF_ID(func, bpf_lsm_file_ioctl)
BTF_ID(func, bpf_lsm_file_lock)
BTF_ID(func, bpf_lsm_file_open)
+BTF_ID(func, bpf_lsm_file_post_open)
BTF_ID(func, bpf_lsm_file_receive)
BTF_ID(func, bpf_lsm_inode_create)
diff --git a/kernel/bpf/bpf_struct_ops.c b/kernel/bpf/bpf_struct_ops.c
index 86c7884abaf8..0d515ec57aa5 100644
--- a/kernel/bpf/bpf_struct_ops.c
+++ b/kernel/bpf/bpf_struct_ops.c
@@ -12,6 +12,7 @@
#include <linux/mutex.h>
#include <linux/btf_ids.h>
#include <linux/rcupdate_wait.h>
+#include <linux/poll.h>
struct bpf_struct_ops_value {
struct bpf_struct_ops_common_value common;
@@ -56,6 +57,7 @@ struct bpf_struct_ops_map {
struct bpf_struct_ops_link {
struct bpf_link link;
struct bpf_map __rcu *map;
+ wait_queue_head_t wait_hup;
};
static DEFINE_MUTEX(update_mutex);
@@ -571,7 +573,7 @@ int bpf_struct_ops_prepare_trampoline(struct bpf_tramp_links *tlinks,
}
size = arch_prepare_bpf_trampoline(NULL, image + image_off,
- image + PAGE_SIZE,
+ image + image_off + size,
model, flags, tlinks, stub_func);
if (size <= 0) {
if (image != *_image)
@@ -757,7 +759,7 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
goto unlock;
}
- err = st_ops->reg(kdata);
+ err = st_ops->reg(kdata, NULL);
if (likely(!err)) {
/* This refcnt increment on the map here after
* 'st_ops->reg()' is secure since the state of the
@@ -805,7 +807,7 @@ static long bpf_struct_ops_map_delete_elem(struct bpf_map *map, void *key)
BPF_STRUCT_OPS_STATE_TOBEFREE);
switch (prev_state) {
case BPF_STRUCT_OPS_STATE_INUSE:
- st_map->st_ops_desc->st_ops->unreg(&st_map->kvalue.data);
+ st_map->st_ops_desc->st_ops->unreg(&st_map->kvalue.data, NULL);
bpf_map_put(map);
return 0;
case BPF_STRUCT_OPS_STATE_TOBEFREE:
@@ -1057,10 +1059,7 @@ static void bpf_struct_ops_map_link_dealloc(struct bpf_link *link)
st_map = (struct bpf_struct_ops_map *)
rcu_dereference_protected(st_link->map, true);
if (st_map) {
- /* st_link->map can be NULL if
- * bpf_struct_ops_link_create() fails to register.
- */
- st_map->st_ops_desc->st_ops->unreg(&st_map->kvalue.data);
+ st_map->st_ops_desc->st_ops->unreg(&st_map->kvalue.data, link);
bpf_map_put(&st_map->map);
}
kfree(st_link);
@@ -1075,7 +1074,8 @@ static void bpf_struct_ops_map_link_show_fdinfo(const struct bpf_link *link,
st_link = container_of(link, struct bpf_struct_ops_link, link);
rcu_read_lock();
map = rcu_dereference(st_link->map);
- seq_printf(seq, "map_id:\t%d\n", map->id);
+ if (map)
+ seq_printf(seq, "map_id:\t%d\n", map->id);
rcu_read_unlock();
}
@@ -1088,7 +1088,8 @@ static int bpf_struct_ops_map_link_fill_link_info(const struct bpf_link *link,
st_link = container_of(link, struct bpf_struct_ops_link, link);
rcu_read_lock();
map = rcu_dereference(st_link->map);
- info->struct_ops.map_id = map->id;
+ if (map)
+ info->struct_ops.map_id = map->id;
rcu_read_unlock();
return 0;
}
@@ -1113,6 +1114,10 @@ static int bpf_struct_ops_map_link_update(struct bpf_link *link, struct bpf_map
mutex_lock(&update_mutex);
old_map = rcu_dereference_protected(st_link->map, lockdep_is_held(&update_mutex));
+ if (!old_map) {
+ err = -ENOLINK;
+ goto err_out;
+ }
if (expected_old_map && old_map != expected_old_map) {
err = -EPERM;
goto err_out;
@@ -1125,7 +1130,7 @@ static int bpf_struct_ops_map_link_update(struct bpf_link *link, struct bpf_map
goto err_out;
}
- err = st_map->st_ops_desc->st_ops->update(st_map->kvalue.data, old_st_map->kvalue.data);
+ err = st_map->st_ops_desc->st_ops->update(st_map->kvalue.data, old_st_map->kvalue.data, link);
if (err)
goto err_out;
@@ -1139,11 +1144,53 @@ err_out:
return err;
}
+static int bpf_struct_ops_map_link_detach(struct bpf_link *link)
+{
+ struct bpf_struct_ops_link *st_link = container_of(link, struct bpf_struct_ops_link, link);
+ struct bpf_struct_ops_map *st_map;
+ struct bpf_map *map;
+
+ mutex_lock(&update_mutex);
+
+ map = rcu_dereference_protected(st_link->map, lockdep_is_held(&update_mutex));
+ if (!map) {
+ mutex_unlock(&update_mutex);
+ return 0;
+ }
+ st_map = container_of(map, struct bpf_struct_ops_map, map);
+
+ st_map->st_ops_desc->st_ops->unreg(&st_map->kvalue.data, link);
+
+ RCU_INIT_POINTER(st_link->map, NULL);
+ /* Pair with bpf_map_get() in bpf_struct_ops_link_create() or
+ * bpf_map_inc() in bpf_struct_ops_map_link_update().
+ */
+ bpf_map_put(&st_map->map);
+
+ mutex_unlock(&update_mutex);
+
+ wake_up_interruptible_poll(&st_link->wait_hup, EPOLLHUP);
+
+ return 0;
+}
+
+static __poll_t bpf_struct_ops_map_link_poll(struct file *file,
+ struct poll_table_struct *pts)
+{
+ struct bpf_struct_ops_link *st_link = file->private_data;
+
+ poll_wait(file, &st_link->wait_hup, pts);
+
+ return rcu_access_pointer(st_link->map) ? 0 : EPOLLHUP;
+}
+
static const struct bpf_link_ops bpf_struct_ops_map_lops = {
.dealloc = bpf_struct_ops_map_link_dealloc,
+ .detach = bpf_struct_ops_map_link_detach,
.show_fdinfo = bpf_struct_ops_map_link_show_fdinfo,
.fill_link_info = bpf_struct_ops_map_link_fill_link_info,
.update_map = bpf_struct_ops_map_link_update,
+ .poll = bpf_struct_ops_map_link_poll,
};
int bpf_struct_ops_link_create(union bpf_attr *attr)
@@ -1176,13 +1223,21 @@ int bpf_struct_ops_link_create(union bpf_attr *attr)
if (err)
goto err_out;
- err = st_map->st_ops_desc->st_ops->reg(st_map->kvalue.data);
+ init_waitqueue_head(&link->wait_hup);
+
+ /* Hold the update_mutex such that the subsystem cannot
+ * do link->ops->detach() before the link is fully initialized.
+ */
+ mutex_lock(&update_mutex);
+ err = st_map->st_ops_desc->st_ops->reg(st_map->kvalue.data, &link->link);
if (err) {
+ mutex_unlock(&update_mutex);
bpf_link_cleanup(&link_primer);
link = NULL;
goto err_out;
}
RCU_INIT_POINTER(link->map, map);
+ mutex_unlock(&update_mutex);
return bpf_link_settle(&link_primer);
diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
index 821063660d9f..520f49f422fe 100644
--- a/kernel/bpf/btf.c
+++ b/kernel/bpf/btf.c
@@ -274,6 +274,7 @@ struct btf {
u32 start_str_off; /* first string offset (0 for base BTF) */
char name[MODULE_NAME_LEN];
bool kernel_btf;
+ __u32 *base_id_map; /* map from distilled base BTF -> vmlinux BTF ids */
};
enum verifier_phase {
@@ -414,7 +415,7 @@ const char *btf_type_str(const struct btf_type *t)
struct btf_show {
u64 flags;
void *target; /* target of show operation (seq file, buffer) */
- void (*showfn)(struct btf_show *show, const char *fmt, va_list args);
+ __printf(2, 0) void (*showfn)(struct btf_show *show, const char *fmt, va_list args);
const struct btf *btf;
/* below are used during iteration */
struct {
@@ -530,6 +531,11 @@ static bool btf_type_is_decl_tag_target(const struct btf_type *t)
btf_type_is_var(t) || btf_type_is_typedef(t);
}
+bool btf_is_vmlinux(const struct btf *btf)
+{
+ return btf->kernel_btf && !btf->base_btf;
+}
+
u32 btf_nr_types(const struct btf *btf)
{
u32 total = 0;
@@ -772,7 +778,7 @@ static bool __btf_name_char_ok(char c, bool first)
return true;
}
-static const char *btf_str_by_offset(const struct btf *btf, u32 offset)
+const char *btf_str_by_offset(const struct btf *btf, u32 offset)
{
while (offset < btf->start_str_off)
btf = btf->base_btf;
@@ -1670,14 +1676,8 @@ static void btf_free_kfunc_set_tab(struct btf *btf)
if (!tab)
return;
- /* For module BTF, we directly assign the sets being registered, so
- * there is nothing to free except kfunc_set_tab.
- */
- if (btf_is_module(btf))
- goto free_tab;
for (hook = 0; hook < ARRAY_SIZE(tab->sets); hook++)
kfree(tab->sets[hook]);
-free_tab:
kfree(tab);
btf->kfunc_set_tab = NULL;
}
@@ -1735,7 +1735,12 @@ static void btf_free(struct btf *btf)
kvfree(btf->types);
kvfree(btf->resolved_sizes);
kvfree(btf->resolved_ids);
- kvfree(btf->data);
+ /* vmlinux does not allocate btf->data, it simply points it at
+ * __start_BTF.
+ */
+ if (!btf_is_vmlinux(btf))
+ kvfree(btf->data);
+ kvfree(btf->base_id_map);
kfree(btf);
}
@@ -1764,6 +1769,23 @@ void btf_put(struct btf *btf)
}
}
+struct btf *btf_base_btf(const struct btf *btf)
+{
+ return btf->base_btf;
+}
+
+const struct btf_header *btf_header(const struct btf *btf)
+{
+ return &btf->hdr;
+}
+
+void btf_set_base_btf(struct btf *btf, const struct btf *base_btf)
+{
+ btf->base_btf = (struct btf *)base_btf;
+ btf->start_id = btf_nr_types(base_btf);
+ btf->start_str_off = base_btf->hdr.str_len;
+}
+
static int env_resolve_init(struct btf_verifier_env *env)
{
struct btf *btf = env->btf;
@@ -3442,10 +3464,12 @@ btf_find_graph_root(const struct btf *btf, const struct btf_type *pt,
goto end; \
}
-static int btf_get_field_type(const char *name, u32 field_mask, u32 *seen_mask,
+static int btf_get_field_type(const struct btf *btf, const struct btf_type *var_type,
+ u32 field_mask, u32 *seen_mask,
int *align, int *sz)
{
int type = 0;
+ const char *name = __btf_name_by_offset(btf, var_type->name_off);
if (field_mask & BPF_SPIN_LOCK) {
if (!strcmp(name, "bpf_spin_lock")) {
@@ -3481,7 +3505,7 @@ static int btf_get_field_type(const char *name, u32 field_mask, u32 *seen_mask,
field_mask_test_name(BPF_REFCOUNT, "bpf_refcount");
/* Only return BPF_KPTR when all other types with matchable names fail */
- if (field_mask & BPF_KPTR) {
+ if (field_mask & BPF_KPTR && !__btf_type_is_struct(var_type)) {
type = BPF_KPTR_REF;
goto end;
}
@@ -3494,140 +3518,232 @@ end:
#undef field_mask_test_name
+/* Repeat a number of fields for a specified number of times.
+ *
+ * Copy the fields starting from the first field and repeat them for
+ * repeat_cnt times. The fields are repeated by adding the offset of each
+ * field with
+ * (i + 1) * elem_size
+ * where i is the repeat index and elem_size is the size of an element.
+ */
+static int btf_repeat_fields(struct btf_field_info *info,
+ u32 field_cnt, u32 repeat_cnt, u32 elem_size)
+{
+ u32 i, j;
+ u32 cur;
+
+ /* Ensure not repeating fields that should not be repeated. */
+ for (i = 0; i < field_cnt; i++) {
+ switch (info[i].type) {
+ case BPF_KPTR_UNREF:
+ case BPF_KPTR_REF:
+ case BPF_KPTR_PERCPU:
+ case BPF_LIST_HEAD:
+ case BPF_RB_ROOT:
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ cur = field_cnt;
+ for (i = 0; i < repeat_cnt; i++) {
+ memcpy(&info[cur], &info[0], field_cnt * sizeof(info[0]));
+ for (j = 0; j < field_cnt; j++)
+ info[cur++].off += (i + 1) * elem_size;
+ }
+
+ return 0;
+}
+
static int btf_find_struct_field(const struct btf *btf,
const struct btf_type *t, u32 field_mask,
- struct btf_field_info *info, int info_cnt)
+ struct btf_field_info *info, int info_cnt,
+ u32 level);
+
+/* Find special fields in the struct type of a field.
+ *
+ * This function is used to find fields of special types that is not a
+ * global variable or a direct field of a struct type. It also handles the
+ * repetition if it is the element type of an array.
+ */
+static int btf_find_nested_struct(const struct btf *btf, const struct btf_type *t,
+ u32 off, u32 nelems,
+ u32 field_mask, struct btf_field_info *info,
+ int info_cnt, u32 level)
{
- int ret, idx = 0, align, sz, field_type;
- const struct btf_member *member;
+ int ret, err, i;
+
+ level++;
+ if (level >= MAX_RESOLVE_DEPTH)
+ return -E2BIG;
+
+ ret = btf_find_struct_field(btf, t, field_mask, info, info_cnt, level);
+
+ if (ret <= 0)
+ return ret;
+
+ /* Shift the offsets of the nested struct fields to the offsets
+ * related to the container.
+ */
+ for (i = 0; i < ret; i++)
+ info[i].off += off;
+
+ if (nelems > 1) {
+ err = btf_repeat_fields(info, ret, nelems - 1, t->size);
+ if (err == 0)
+ ret *= nelems;
+ else
+ ret = err;
+ }
+
+ return ret;
+}
+
+static int btf_find_field_one(const struct btf *btf,
+ const struct btf_type *var,
+ const struct btf_type *var_type,
+ int var_idx,
+ u32 off, u32 expected_size,
+ u32 field_mask, u32 *seen_mask,
+ struct btf_field_info *info, int info_cnt,
+ u32 level)
+{
+ int ret, align, sz, field_type;
struct btf_field_info tmp;
+ const struct btf_array *array;
+ u32 i, nelems = 1;
+
+ /* Walk into array types to find the element type and the number of
+ * elements in the (flattened) array.
+ */
+ for (i = 0; i < MAX_RESOLVE_DEPTH && btf_type_is_array(var_type); i++) {
+ array = btf_array(var_type);
+ nelems *= array->nelems;
+ var_type = btf_type_by_id(btf, array->type);
+ }
+ if (i == MAX_RESOLVE_DEPTH)
+ return -E2BIG;
+ if (nelems == 0)
+ return 0;
+
+ field_type = btf_get_field_type(btf, var_type,
+ field_mask, seen_mask, &align, &sz);
+ /* Look into variables of struct types */
+ if (!field_type && __btf_type_is_struct(var_type)) {
+ sz = var_type->size;
+ if (expected_size && expected_size != sz * nelems)
+ return 0;
+ ret = btf_find_nested_struct(btf, var_type, off, nelems, field_mask,
+ &info[0], info_cnt, level);
+ return ret;
+ }
+
+ if (field_type == 0)
+ return 0;
+ if (field_type < 0)
+ return field_type;
+
+ if (expected_size && expected_size != sz * nelems)
+ return 0;
+ if (off % align)
+ return 0;
+
+ switch (field_type) {
+ case BPF_SPIN_LOCK:
+ case BPF_TIMER:
+ case BPF_WORKQUEUE:
+ case BPF_LIST_NODE:
+ case BPF_RB_NODE:
+ case BPF_REFCOUNT:
+ ret = btf_find_struct(btf, var_type, off, sz, field_type,
+ info_cnt ? &info[0] : &tmp);
+ if (ret < 0)
+ return ret;
+ break;
+ case BPF_KPTR_UNREF:
+ case BPF_KPTR_REF:
+ case BPF_KPTR_PERCPU:
+ ret = btf_find_kptr(btf, var_type, off, sz,
+ info_cnt ? &info[0] : &tmp);
+ if (ret < 0)
+ return ret;
+ break;
+ case BPF_LIST_HEAD:
+ case BPF_RB_ROOT:
+ ret = btf_find_graph_root(btf, var, var_type,
+ var_idx, off, sz,
+ info_cnt ? &info[0] : &tmp,
+ field_type);
+ if (ret < 0)
+ return ret;
+ break;
+ default:
+ return -EFAULT;
+ }
+
+ if (ret == BTF_FIELD_IGNORE)
+ return 0;
+ if (nelems > info_cnt)
+ return -E2BIG;
+ if (nelems > 1) {
+ ret = btf_repeat_fields(info, 1, nelems - 1, sz);
+ if (ret < 0)
+ return ret;
+ }
+ return nelems;
+}
+
+static int btf_find_struct_field(const struct btf *btf,
+ const struct btf_type *t, u32 field_mask,
+ struct btf_field_info *info, int info_cnt,
+ u32 level)
+{
+ int ret, idx = 0;
+ const struct btf_member *member;
u32 i, off, seen_mask = 0;
for_each_member(i, t, member) {
const struct btf_type *member_type = btf_type_by_id(btf,
member->type);
- field_type = btf_get_field_type(__btf_name_by_offset(btf, member_type->name_off),
- field_mask, &seen_mask, &align, &sz);
- if (field_type == 0)
- continue;
- if (field_type < 0)
- return field_type;
-
off = __btf_member_bit_offset(t, member);
if (off % 8)
/* valid C code cannot generate such BTF */
return -EINVAL;
off /= 8;
- if (off % align)
- continue;
- switch (field_type) {
- case BPF_SPIN_LOCK:
- case BPF_TIMER:
- case BPF_WORKQUEUE:
- case BPF_LIST_NODE:
- case BPF_RB_NODE:
- case BPF_REFCOUNT:
- ret = btf_find_struct(btf, member_type, off, sz, field_type,
- idx < info_cnt ? &info[idx] : &tmp);
- if (ret < 0)
- return ret;
- break;
- case BPF_KPTR_UNREF:
- case BPF_KPTR_REF:
- case BPF_KPTR_PERCPU:
- ret = btf_find_kptr(btf, member_type, off, sz,
- idx < info_cnt ? &info[idx] : &tmp);
- if (ret < 0)
- return ret;
- break;
- case BPF_LIST_HEAD:
- case BPF_RB_ROOT:
- ret = btf_find_graph_root(btf, t, member_type,
- i, off, sz,
- idx < info_cnt ? &info[idx] : &tmp,
- field_type);
- if (ret < 0)
- return ret;
- break;
- default:
- return -EFAULT;
- }
-
- if (ret == BTF_FIELD_IGNORE)
- continue;
- if (idx >= info_cnt)
- return -E2BIG;
- ++idx;
+ ret = btf_find_field_one(btf, t, member_type, i,
+ off, 0,
+ field_mask, &seen_mask,
+ &info[idx], info_cnt - idx, level);
+ if (ret < 0)
+ return ret;
+ idx += ret;
}
return idx;
}
static int btf_find_datasec_var(const struct btf *btf, const struct btf_type *t,
u32 field_mask, struct btf_field_info *info,
- int info_cnt)
+ int info_cnt, u32 level)
{
- int ret, idx = 0, align, sz, field_type;
+ int ret, idx = 0;
const struct btf_var_secinfo *vsi;
- struct btf_field_info tmp;
u32 i, off, seen_mask = 0;
for_each_vsi(i, t, vsi) {
const struct btf_type *var = btf_type_by_id(btf, vsi->type);
const struct btf_type *var_type = btf_type_by_id(btf, var->type);
- field_type = btf_get_field_type(__btf_name_by_offset(btf, var_type->name_off),
- field_mask, &seen_mask, &align, &sz);
- if (field_type == 0)
- continue;
- if (field_type < 0)
- return field_type;
-
off = vsi->offset;
- if (vsi->size != sz)
- continue;
- if (off % align)
- continue;
-
- switch (field_type) {
- case BPF_SPIN_LOCK:
- case BPF_TIMER:
- case BPF_WORKQUEUE:
- case BPF_LIST_NODE:
- case BPF_RB_NODE:
- case BPF_REFCOUNT:
- ret = btf_find_struct(btf, var_type, off, sz, field_type,
- idx < info_cnt ? &info[idx] : &tmp);
- if (ret < 0)
- return ret;
- break;
- case BPF_KPTR_UNREF:
- case BPF_KPTR_REF:
- case BPF_KPTR_PERCPU:
- ret = btf_find_kptr(btf, var_type, off, sz,
- idx < info_cnt ? &info[idx] : &tmp);
- if (ret < 0)
- return ret;
- break;
- case BPF_LIST_HEAD:
- case BPF_RB_ROOT:
- ret = btf_find_graph_root(btf, var, var_type,
- -1, off, sz,
- idx < info_cnt ? &info[idx] : &tmp,
- field_type);
- if (ret < 0)
- return ret;
- break;
- default:
- return -EFAULT;
- }
-
- if (ret == BTF_FIELD_IGNORE)
- continue;
- if (idx >= info_cnt)
- return -E2BIG;
- ++idx;
+ ret = btf_find_field_one(btf, var, var_type, -1, off, vsi->size,
+ field_mask, &seen_mask,
+ &info[idx], info_cnt - idx,
+ level);
+ if (ret < 0)
+ return ret;
+ idx += ret;
}
return idx;
}
@@ -3637,9 +3753,9 @@ static int btf_find_field(const struct btf *btf, const struct btf_type *t,
int info_cnt)
{
if (__btf_type_is_struct(t))
- return btf_find_struct_field(btf, t, field_mask, info, info_cnt);
+ return btf_find_struct_field(btf, t, field_mask, info, info_cnt, 0);
else if (btf_type_is_datasec(t))
- return btf_find_datasec_var(btf, t, field_mask, info, info_cnt);
+ return btf_find_datasec_var(btf, t, field_mask, info, info_cnt, 0);
return -EINVAL;
}
@@ -5726,6 +5842,15 @@ static int find_kern_ctx_type_id(enum bpf_prog_type prog_type)
return ctx_type->type;
}
+bool btf_is_projection_of(const char *pname, const char *tname)
+{
+ if (strcmp(pname, "__sk_buff") == 0 && strcmp(tname, "sk_buff") == 0)
+ return true;
+ if (strcmp(pname, "xdp_md") == 0 && strcmp(tname, "xdp_buff") == 0)
+ return true;
+ return false;
+}
+
bool btf_is_prog_ctx_type(struct bpf_verifier_log *log, const struct btf *btf,
const struct btf_type *t, enum bpf_prog_type prog_type,
int arg)
@@ -5788,9 +5913,7 @@ again:
* int socket_filter_bpf_prog(struct __sk_buff *skb)
* { // no fields of skb are ever used }
*/
- if (strcmp(ctx_tname, "__sk_buff") == 0 && strcmp(tname, "sk_buff") == 0)
- return true;
- if (strcmp(ctx_tname, "xdp_md") == 0 && strcmp(tname, "xdp_buff") == 0)
+ if (btf_is_projection_of(ctx_tname, tname))
return true;
if (strcmp(ctx_tname, tname)) {
/* bpf_user_pt_regs_t is a typedef, so resolve it to
@@ -5982,23 +6105,15 @@ int get_kern_ctx_btf_id(struct bpf_verifier_log *log, enum bpf_prog_type prog_ty
BTF_ID_LIST(bpf_ctx_convert_btf_id)
BTF_ID(struct, bpf_ctx_convert)
-struct btf *btf_parse_vmlinux(void)
+static struct btf *btf_parse_base(struct btf_verifier_env *env, const char *name,
+ void *data, unsigned int data_size)
{
- struct btf_verifier_env *env = NULL;
- struct bpf_verifier_log *log;
struct btf *btf = NULL;
int err;
if (!IS_ENABLED(CONFIG_DEBUG_INFO_BTF))
return ERR_PTR(-ENOENT);
- env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN);
- if (!env)
- return ERR_PTR(-ENOMEM);
-
- log = &env->log;
- log->level = BPF_LOG_KERNEL;
-
btf = kzalloc(sizeof(*btf), GFP_KERNEL | __GFP_NOWARN);
if (!btf) {
err = -ENOMEM;
@@ -6006,10 +6121,10 @@ struct btf *btf_parse_vmlinux(void)
}
env->btf = btf;
- btf->data = __start_BTF;
- btf->data_size = __stop_BTF - __start_BTF;
+ btf->data = data;
+ btf->data_size = data_size;
btf->kernel_btf = true;
- snprintf(btf->name, sizeof(btf->name), "vmlinux");
+ snprintf(btf->name, sizeof(btf->name), "%s", name);
err = btf_parse_hdr(env);
if (err)
@@ -6029,20 +6144,11 @@ struct btf *btf_parse_vmlinux(void)
if (err)
goto errout;
- /* btf_parse_vmlinux() runs under bpf_verifier_lock */
- bpf_ctx_convert.t = btf_type_by_id(btf, bpf_ctx_convert_btf_id[0]);
-
refcount_set(&btf->refcnt, 1);
- err = btf_alloc_id(btf);
- if (err)
- goto errout;
-
- btf_verifier_env_free(env);
return btf;
errout:
- btf_verifier_env_free(env);
if (btf) {
kvfree(btf->types);
kfree(btf);
@@ -6050,19 +6156,61 @@ errout:
return ERR_PTR(err);
}
+struct btf *btf_parse_vmlinux(void)
+{
+ struct btf_verifier_env *env = NULL;
+ struct bpf_verifier_log *log;
+ struct btf *btf;
+ int err;
+
+ env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN);
+ if (!env)
+ return ERR_PTR(-ENOMEM);
+
+ log = &env->log;
+ log->level = BPF_LOG_KERNEL;
+ btf = btf_parse_base(env, "vmlinux", __start_BTF, __stop_BTF - __start_BTF);
+ if (IS_ERR(btf))
+ goto err_out;
+
+ /* btf_parse_vmlinux() runs under bpf_verifier_lock */
+ bpf_ctx_convert.t = btf_type_by_id(btf, bpf_ctx_convert_btf_id[0]);
+ err = btf_alloc_id(btf);
+ if (err) {
+ btf_free(btf);
+ btf = ERR_PTR(err);
+ }
+err_out:
+ btf_verifier_env_free(env);
+ return btf;
+}
+
+/* If .BTF_ids section was created with distilled base BTF, both base and
+ * split BTF ids will need to be mapped to actual base/split ids for
+ * BTF now that it has been relocated.
+ */
+static __u32 btf_relocate_id(const struct btf *btf, __u32 id)
+{
+ if (!btf->base_btf || !btf->base_id_map)
+ return id;
+ return btf->base_id_map[id];
+}
+
#ifdef CONFIG_DEBUG_INFO_BTF_MODULES
-static struct btf *btf_parse_module(const char *module_name, const void *data, unsigned int data_size)
+static struct btf *btf_parse_module(const char *module_name, const void *data,
+ unsigned int data_size, void *base_data,
+ unsigned int base_data_size)
{
+ struct btf *btf = NULL, *vmlinux_btf, *base_btf = NULL;
struct btf_verifier_env *env = NULL;
struct bpf_verifier_log *log;
- struct btf *btf = NULL, *base_btf;
- int err;
+ int err = 0;
- base_btf = bpf_get_btf_vmlinux();
- if (IS_ERR(base_btf))
- return base_btf;
- if (!base_btf)
+ vmlinux_btf = bpf_get_btf_vmlinux();
+ if (IS_ERR(vmlinux_btf))
+ return vmlinux_btf;
+ if (!vmlinux_btf)
return ERR_PTR(-EINVAL);
env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN);
@@ -6072,6 +6220,16 @@ static struct btf *btf_parse_module(const char *module_name, const void *data, u
log = &env->log;
log->level = BPF_LOG_KERNEL;
+ if (base_data) {
+ base_btf = btf_parse_base(env, ".BTF.base", base_data, base_data_size);
+ if (IS_ERR(base_btf)) {
+ err = PTR_ERR(base_btf);
+ goto errout;
+ }
+ } else {
+ base_btf = vmlinux_btf;
+ }
+
btf = kzalloc(sizeof(*btf), GFP_KERNEL | __GFP_NOWARN);
if (!btf) {
err = -ENOMEM;
@@ -6111,12 +6269,22 @@ static struct btf *btf_parse_module(const char *module_name, const void *data, u
if (err)
goto errout;
+ if (base_btf != vmlinux_btf) {
+ err = btf_relocate(btf, vmlinux_btf, &btf->base_id_map);
+ if (err)
+ goto errout;
+ btf_free(base_btf);
+ base_btf = vmlinux_btf;
+ }
+
btf_verifier_env_free(env);
refcount_set(&btf->refcnt, 1);
return btf;
errout:
btf_verifier_env_free(env);
+ if (base_btf != vmlinux_btf)
+ btf_free(base_btf);
if (btf) {
kvfree(btf->data);
kvfree(btf->types);
@@ -6693,7 +6861,7 @@ int btf_struct_access(struct bpf_verifier_log *log,
for (i = 0; i < rec->cnt; i++) {
struct btf_field *field = &rec->fields[i];
u32 offset = field->offset;
- if (off < offset + btf_field_type_size(field->type) && offset < off + size) {
+ if (off < offset + field->size && offset < off + size) {
bpf_log(log,
"direct access to %s is disallowed\n",
btf_field_type_name(field->type));
@@ -7370,8 +7538,8 @@ static void btf_type_show(const struct btf *btf, u32 type_id, void *obj,
btf_type_ops(t)->show(btf, t, type_id, obj, 0, show);
}
-static void btf_seq_show(struct btf_show *show, const char *fmt,
- va_list args)
+__printf(2, 0) static void btf_seq_show(struct btf_show *show, const char *fmt,
+ va_list args)
{
seq_vprintf((struct seq_file *)show->target, fmt, args);
}
@@ -7404,8 +7572,8 @@ struct btf_show_snprintf {
int len; /* length we would have written */
};
-static void btf_snprintf_show(struct btf_show *show, const char *fmt,
- va_list args)
+__printf(2, 0) static void btf_snprintf_show(struct btf_show *show, const char *fmt,
+ va_list args)
{
struct btf_show_snprintf *ssnprintf = (struct btf_show_snprintf *)show;
int len;
@@ -7669,7 +7837,8 @@ static int btf_module_notify(struct notifier_block *nb, unsigned long op,
err = -ENOMEM;
goto out;
}
- btf = btf_parse_module(mod->name, mod->btf_data, mod->btf_data_size);
+ btf = btf_parse_module(mod->name, mod->btf_data, mod->btf_data_size,
+ mod->btf_base_data, mod->btf_base_data_size);
if (IS_ERR(btf)) {
kfree(btf_mod);
if (!IS_ENABLED(CONFIG_MODULE_ALLOW_BTF_MISMATCH)) {
@@ -7993,7 +8162,7 @@ static int btf_populate_kfunc_set(struct btf *btf, enum btf_kfunc_hook hook,
bool add_filter = !!kset->filter;
struct btf_kfunc_set_tab *tab;
struct btf_id_set8 *set;
- u32 set_cnt;
+ u32 set_cnt, i;
int ret;
if (hook >= BTF_KFUNC_HOOK_MAX) {
@@ -8039,21 +8208,15 @@ static int btf_populate_kfunc_set(struct btf *btf, enum btf_kfunc_hook hook,
goto end;
}
- /* We don't need to allocate, concatenate, and sort module sets, because
- * only one is allowed per hook. Hence, we can directly assign the
- * pointer and return.
- */
- if (!vmlinux_set) {
- tab->sets[hook] = add_set;
- goto do_add_filter;
- }
-
/* In case of vmlinux sets, there may be more than one set being
* registered per hook. To create a unified set, we allocate a new set
* and concatenate all individual sets being registered. While each set
* is individually sorted, they may become unsorted when concatenated,
* hence re-sorting the final set again is required to make binary
* searching the set using btf_id_set8_contains function work.
+ *
+ * For module sets, we need to allocate as we may need to relocate
+ * BTF ids.
*/
set_cnt = set ? set->cnt : 0;
@@ -8083,11 +8246,14 @@ static int btf_populate_kfunc_set(struct btf *btf, enum btf_kfunc_hook hook,
/* Concatenate the two sets */
memcpy(set->pairs + set->cnt, add_set->pairs, add_set->cnt * sizeof(set->pairs[0]));
+ /* Now that the set is copied, update with relocated BTF ids */
+ for (i = set->cnt; i < set->cnt + add_set->cnt; i++)
+ set->pairs[i].id = btf_relocate_id(btf, set->pairs[i].id);
+
set->cnt += add_set->cnt;
sort(set->pairs, set->cnt, sizeof(set->pairs[0]), btf_id_cmp_func, NULL);
-do_add_filter:
if (add_filter) {
hook_filter = &tab->hook_filters[hook];
hook_filter->filters[hook_filter->nr_filters++] = kset->filter;
@@ -8207,7 +8373,7 @@ static int __register_btf_kfunc_id_set(enum btf_kfunc_hook hook,
return PTR_ERR(btf);
for (i = 0; i < kset->set->cnt; i++) {
- ret = btf_check_kfunc_protos(btf, kset->set->pairs[i].id,
+ ret = btf_check_kfunc_protos(btf, btf_relocate_id(btf, kset->set->pairs[i].id),
kset->set->pairs[i].flags);
if (ret)
goto err_out;
@@ -8271,7 +8437,7 @@ static int btf_check_dtor_kfuncs(struct btf *btf, const struct btf_id_dtor_kfunc
u32 nr_args, i;
for (i = 0; i < cnt; i++) {
- dtor_btf_id = dtors[i].kfunc_btf_id;
+ dtor_btf_id = btf_relocate_id(btf, dtors[i].kfunc_btf_id);
dtor_func = btf_type_by_id(btf, dtor_btf_id);
if (!dtor_func || !btf_type_is_func(dtor_func))
@@ -8306,7 +8472,7 @@ int register_btf_id_dtor_kfuncs(const struct btf_id_dtor_kfunc *dtors, u32 add_c
{
struct btf_id_dtor_kfunc_tab *tab;
struct btf *btf;
- u32 tab_cnt;
+ u32 tab_cnt, i;
int ret;
btf = btf_get_module_btf(owner);
@@ -8357,6 +8523,13 @@ int register_btf_id_dtor_kfuncs(const struct btf_id_dtor_kfunc *dtors, u32 add_c
btf->dtor_kfunc_tab = tab;
memcpy(tab->dtors + tab->cnt, dtors, add_cnt * sizeof(tab->dtors[0]));
+
+ /* remap BTF ids based on BTF relocation (if any) */
+ for (i = tab_cnt; i < tab_cnt + add_cnt; i++) {
+ tab->dtors[i].btf_id = btf_relocate_id(btf, tab->dtors[i].btf_id);
+ tab->dtors[i].kfunc_btf_id = btf_relocate_id(btf, tab->dtors[i].kfunc_btf_id);
+ }
+
tab->cnt += add_cnt;
sort(tab->dtors, tab->cnt, sizeof(tab->dtors[0]), btf_id_cmp_func, NULL);
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 1a6c3faa6e4a..7ee62e38faf0 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -736,11 +736,11 @@ static struct bpf_ksym *bpf_ksym_find(unsigned long addr)
return n ? container_of(n, struct bpf_ksym, tnode) : NULL;
}
-const char *__bpf_address_lookup(unsigned long addr, unsigned long *size,
+int __bpf_address_lookup(unsigned long addr, unsigned long *size,
unsigned long *off, char *sym)
{
struct bpf_ksym *ksym;
- char *ret = NULL;
+ int ret = 0;
rcu_read_lock();
ksym = bpf_ksym_find(addr);
@@ -748,9 +748,8 @@ const char *__bpf_address_lookup(unsigned long addr, unsigned long *size,
unsigned long symbol_start = ksym->start;
unsigned long symbol_end = ksym->end;
- strscpy(sym, ksym->name, KSYM_NAME_LEN);
+ ret = strscpy(sym, ksym->name, KSYM_NAME_LEN);
- ret = sym;
if (size)
*size = symbol_end - symbol_start;
if (off)
@@ -1174,8 +1173,7 @@ bpf_jit_binary_pack_alloc(unsigned int proglen, u8 **image_ptr,
}
/* Copy JITed text from rw_header to its final location, the ro_header. */
-int bpf_jit_binary_pack_finalize(struct bpf_prog *prog,
- struct bpf_binary_header *ro_header,
+int bpf_jit_binary_pack_finalize(struct bpf_binary_header *ro_header,
struct bpf_binary_header *rw_header)
{
void *ptr;
@@ -2743,8 +2741,7 @@ static void bpf_free_used_maps(struct bpf_prog_aux *aux)
kfree(aux->used_maps);
}
-void __bpf_free_used_btfs(struct bpf_prog_aux *aux,
- struct btf_mod_pair *used_btfs, u32 len)
+void __bpf_free_used_btfs(struct btf_mod_pair *used_btfs, u32 len)
{
#ifdef CONFIG_BPF_SYSCALL
struct btf_mod_pair *btf_mod;
@@ -2761,7 +2758,7 @@ void __bpf_free_used_btfs(struct bpf_prog_aux *aux,
static void bpf_free_used_btfs(struct bpf_prog_aux *aux)
{
- __bpf_free_used_btfs(aux, aux->used_btfs, aux->used_btf_cnt);
+ __bpf_free_used_btfs(aux->used_btfs, aux->used_btf_cnt);
kfree(aux->used_btfs);
}
diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c
index a8e34416e960..fbdf5a1aabfe 100644
--- a/kernel/bpf/cpumap.c
+++ b/kernel/bpf/cpumap.c
@@ -79,8 +79,6 @@ struct bpf_cpu_map {
struct bpf_cpu_map_entry __rcu **cpu_map;
};
-static DEFINE_PER_CPU(struct list_head, cpu_map_flush_list);
-
static struct bpf_map *cpu_map_alloc(union bpf_attr *attr)
{
u32 value_size = attr->value_size;
@@ -240,12 +238,14 @@ static int cpu_map_bpf_prog_run(struct bpf_cpu_map_entry *rcpu, void **frames,
int xdp_n, struct xdp_cpumap_stats *stats,
struct list_head *list)
{
+ struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
int nframes;
if (!rcpu->prog)
return xdp_n;
rcu_read_lock_bh();
+ bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
nframes = cpu_map_bpf_prog_run_xdp(rcpu, frames, xdp_n, stats);
@@ -255,6 +255,7 @@ static int cpu_map_bpf_prog_run(struct bpf_cpu_map_entry *rcpu, void **frames,
if (unlikely(!list_empty(list)))
cpu_map_bpf_prog_run_skb(rcpu, list, stats);
+ bpf_net_ctx_clear(bpf_net_ctx);
rcu_read_unlock_bh(); /* resched point, may call do_softirq() */
return nframes;
@@ -706,7 +707,6 @@ static void bq_flush_to_queue(struct xdp_bulk_queue *bq)
*/
static void bq_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf)
{
- struct list_head *flush_list = this_cpu_ptr(&cpu_map_flush_list);
struct xdp_bulk_queue *bq = this_cpu_ptr(rcpu->bulkq);
if (unlikely(bq->count == CPU_MAP_BULK_SIZE))
@@ -723,8 +723,11 @@ static void bq_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf)
*/
bq->q[bq->count++] = xdpf;
- if (!bq->flush_node.prev)
+ if (!bq->flush_node.prev) {
+ struct list_head *flush_list = bpf_net_ctx_get_cpu_map_flush_list();
+
list_add(&bq->flush_node, flush_list);
+ }
}
int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf,
@@ -756,9 +759,8 @@ trace:
return ret;
}
-void __cpu_map_flush(void)
+void __cpu_map_flush(struct list_head *flush_list)
{
- struct list_head *flush_list = this_cpu_ptr(&cpu_map_flush_list);
struct xdp_bulk_queue *bq, *tmp;
list_for_each_entry_safe(bq, tmp, flush_list, flush_node) {
@@ -768,24 +770,3 @@ void __cpu_map_flush(void)
wake_up_process(bq->obj->kthread);
}
}
-
-#ifdef CONFIG_DEBUG_NET
-bool cpu_map_check_flush(void)
-{
- if (list_empty(this_cpu_ptr(&cpu_map_flush_list)))
- return false;
- __cpu_map_flush();
- return true;
-}
-#endif
-
-static int __init cpu_map_init(void)
-{
- int cpu;
-
- for_each_possible_cpu(cpu)
- INIT_LIST_HEAD(&per_cpu(cpu_map_flush_list, cpu));
- return 0;
-}
-
-subsys_initcall(cpu_map_init);
diff --git a/kernel/bpf/crypto.c b/kernel/bpf/crypto.c
index 2bee4af91e38..94854cd9c4cc 100644
--- a/kernel/bpf/crypto.c
+++ b/kernel/bpf/crypto.c
@@ -275,7 +275,7 @@ static int bpf_crypto_crypt(const struct bpf_crypto_ctx *ctx,
if (__bpf_dynptr_is_rdonly(dst))
return -EINVAL;
- siv_len = __bpf_dynptr_size(siv);
+ siv_len = siv ? __bpf_dynptr_size(siv) : 0;
src_len = __bpf_dynptr_size(src);
dst_len = __bpf_dynptr_size(dst);
if (!src_len || !dst_len)
@@ -303,36 +303,44 @@ static int bpf_crypto_crypt(const struct bpf_crypto_ctx *ctx,
/**
* bpf_crypto_decrypt() - Decrypt buffer using configured context and IV provided.
- * @ctx: The crypto context being used. The ctx must be a trusted pointer.
- * @src: bpf_dynptr to the encrypted data. Must be a trusted pointer.
- * @dst: bpf_dynptr to the buffer where to store the result. Must be a trusted pointer.
- * @siv: bpf_dynptr to IV data and state data to be used by decryptor.
+ * @ctx: The crypto context being used. The ctx must be a trusted pointer.
+ * @src: bpf_dynptr to the encrypted data. Must be a trusted pointer.
+ * @dst: bpf_dynptr to the buffer where to store the result. Must be a trusted pointer.
+ * @siv__nullable: bpf_dynptr to IV data and state data to be used by decryptor. May be NULL.
*
* Decrypts provided buffer using IV data and the crypto context. Crypto context must be configured.
*/
__bpf_kfunc int bpf_crypto_decrypt(struct bpf_crypto_ctx *ctx,
- const struct bpf_dynptr_kern *src,
- const struct bpf_dynptr_kern *dst,
- const struct bpf_dynptr_kern *siv)
+ const struct bpf_dynptr *src,
+ const struct bpf_dynptr *dst,
+ const struct bpf_dynptr *siv__nullable)
{
- return bpf_crypto_crypt(ctx, src, dst, siv, true);
+ const struct bpf_dynptr_kern *src_kern = (struct bpf_dynptr_kern *)src;
+ const struct bpf_dynptr_kern *dst_kern = (struct bpf_dynptr_kern *)dst;
+ const struct bpf_dynptr_kern *siv_kern = (struct bpf_dynptr_kern *)siv__nullable;
+
+ return bpf_crypto_crypt(ctx, src_kern, dst_kern, siv_kern, true);
}
/**
* bpf_crypto_encrypt() - Encrypt buffer using configured context and IV provided.
- * @ctx: The crypto context being used. The ctx must be a trusted pointer.
- * @src: bpf_dynptr to the plain data. Must be a trusted pointer.
- * @dst: bpf_dynptr to buffer where to store the result. Must be a trusted pointer.
- * @siv: bpf_dynptr to IV data and state data to be used by decryptor.
+ * @ctx: The crypto context being used. The ctx must be a trusted pointer.
+ * @src: bpf_dynptr to the plain data. Must be a trusted pointer.
+ * @dst: bpf_dynptr to the buffer where to store the result. Must be a trusted pointer.
+ * @siv__nullable: bpf_dynptr to IV data and state data to be used by decryptor. May be NULL.
*
* Encrypts provided buffer using IV data and the crypto context. Crypto context must be configured.
*/
__bpf_kfunc int bpf_crypto_encrypt(struct bpf_crypto_ctx *ctx,
- const struct bpf_dynptr_kern *src,
- const struct bpf_dynptr_kern *dst,
- const struct bpf_dynptr_kern *siv)
+ const struct bpf_dynptr *src,
+ const struct bpf_dynptr *dst,
+ const struct bpf_dynptr *siv__nullable)
{
- return bpf_crypto_crypt(ctx, src, dst, siv, false);
+ const struct bpf_dynptr_kern *src_kern = (struct bpf_dynptr_kern *)src;
+ const struct bpf_dynptr_kern *dst_kern = (struct bpf_dynptr_kern *)dst;
+ const struct bpf_dynptr_kern *siv_kern = (struct bpf_dynptr_kern *)siv__nullable;
+
+ return bpf_crypto_crypt(ctx, src_kern, dst_kern, siv_kern, false);
}
__bpf_kfunc_end_defs();
diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c
index 4e2cdbb5629f..9e0e3b0a18e4 100644
--- a/kernel/bpf/devmap.c
+++ b/kernel/bpf/devmap.c
@@ -83,7 +83,6 @@ struct bpf_dtab {
u32 n_buckets;
};
-static DEFINE_PER_CPU(struct list_head, dev_flush_list);
static DEFINE_SPINLOCK(dev_map_lock);
static LIST_HEAD(dev_map_list);
@@ -107,7 +106,7 @@ static inline struct hlist_head *dev_map_index_hash(struct bpf_dtab *dtab,
return &dtab->dev_index_head[idx & (dtab->n_buckets - 1)];
}
-static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr)
+static int dev_map_alloc_check(union bpf_attr *attr)
{
u32 valsize = attr->value_size;
@@ -121,23 +120,28 @@ static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr)
attr->map_flags & ~DEV_CREATE_FLAG_MASK)
return -EINVAL;
+ if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
+ /* Hash table size must be power of 2; roundup_pow_of_two()
+ * can overflow into UB on 32-bit arches
+ */
+ if (attr->max_entries > 1UL << 31)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr)
+{
/* Lookup returns a pointer straight to dev->ifindex, so make sure the
* verifier prevents writes from the BPF side
*/
attr->map_flags |= BPF_F_RDONLY_PROG;
-
-
bpf_map_init_from_attr(&dtab->map, attr);
if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
- /* hash table size must be power of 2; roundup_pow_of_two() can
- * overflow into UB on 32-bit arches, so check that first
- */
- if (dtab->map.max_entries > 1UL << 31)
- return -EINVAL;
-
+ /* Hash table size must be power of 2 */
dtab->n_buckets = roundup_pow_of_two(dtab->map.max_entries);
-
dtab->dev_index_head = dev_map_create_hash(dtab->n_buckets,
dtab->map.numa_node);
if (!dtab->dev_index_head)
@@ -196,7 +200,14 @@ static void dev_map_free(struct bpf_map *map)
list_del_rcu(&dtab->list);
spin_unlock(&dev_map_lock);
- bpf_clear_redirect_map(map);
+ /* bpf_redirect_info->map is assigned in __bpf_xdp_redirect_map()
+ * during NAPI callback and cleared after the XDP redirect. There is no
+ * explicit RCU read section which protects bpf_redirect_info->map but
+ * local_bh_disable() also marks the beginning an RCU section. This
+ * makes the complete softirq callback RCU protected. Thus after
+ * following synchronize_rcu() there no bpf_redirect_info->map == map
+ * assignment.
+ */
synchronize_rcu();
/* Make sure prior __dev_map_entry_free() have completed. */
@@ -406,9 +417,8 @@ out:
* driver before returning from its napi->poll() routine. See the comment above
* xdp_do_flush() in filter.c.
*/
-void __dev_flush(void)
+void __dev_flush(struct list_head *flush_list)
{
- struct list_head *flush_list = this_cpu_ptr(&dev_flush_list);
struct xdp_dev_bulk_queue *bq, *tmp;
list_for_each_entry_safe(bq, tmp, flush_list, flush_node) {
@@ -419,16 +429,6 @@ void __dev_flush(void)
}
}
-#ifdef CONFIG_DEBUG_NET
-bool dev_check_flush(void)
-{
- if (list_empty(this_cpu_ptr(&dev_flush_list)))
- return false;
- __dev_flush();
- return true;
-}
-#endif
-
/* Elements are kept alive by RCU; either by rcu_read_lock() (from syscall) or
* by local_bh_disable() (from XDP calls inside NAPI). The
* rcu_read_lock_bh_held() below makes lockdep accept both.
@@ -453,7 +453,6 @@ static void *__dev_map_lookup_elem(struct bpf_map *map, u32 key)
static void bq_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
struct net_device *dev_rx, struct bpf_prog *xdp_prog)
{
- struct list_head *flush_list = this_cpu_ptr(&dev_flush_list);
struct xdp_dev_bulk_queue *bq = this_cpu_ptr(dev->xdp_bulkq);
if (unlikely(bq->count == DEV_MAP_BULK_SIZE))
@@ -467,6 +466,8 @@ static void bq_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
* are only ever modified together.
*/
if (!bq->dev_rx) {
+ struct list_head *flush_list = bpf_net_ctx_get_dev_flush_list();
+
bq->dev_rx = dev_rx;
bq->xdp_prog = xdp_prog;
list_add(&bq->flush_node, flush_list);
@@ -760,9 +761,6 @@ int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
for (i = 0; i < dtab->n_buckets; i++) {
head = dev_map_index_hash(dtab, i);
hlist_for_each_entry_safe(dst, next, head, index_hlist) {
- if (!dst)
- continue;
-
if (is_ifindex_excluded(excluded_devices, num_excluded,
dst->dev->ifindex))
continue;
@@ -1043,6 +1041,7 @@ static u64 dev_map_mem_usage(const struct bpf_map *map)
BTF_ID_LIST_SINGLE(dev_map_btf_ids, struct, bpf_dtab)
const struct bpf_map_ops dev_map_ops = {
.map_meta_equal = bpf_map_meta_equal,
+ .map_alloc_check = dev_map_alloc_check,
.map_alloc = dev_map_alloc,
.map_free = dev_map_free,
.map_get_next_key = dev_map_get_next_key,
@@ -1057,6 +1056,7 @@ const struct bpf_map_ops dev_map_ops = {
const struct bpf_map_ops dev_map_hash_ops = {
.map_meta_equal = bpf_map_meta_equal,
+ .map_alloc_check = dev_map_alloc_check,
.map_alloc = dev_map_alloc,
.map_free = dev_map_free,
.map_get_next_key = dev_map_hash_get_next_key,
@@ -1156,15 +1156,11 @@ static struct notifier_block dev_map_notifier = {
static int __init dev_map_init(void)
{
- int cpu;
-
/* Assure tracepoint shadow struct _bpf_dtab_netdev is in sync */
BUILD_BUG_ON(offsetof(struct bpf_dtab_netdev, dev) !=
offsetof(struct _bpf_dtab_netdev, dev));
register_netdevice_notifier(&dev_map_notifier);
- for_each_possible_cpu(cpu)
- INIT_LIST_HEAD(&per_cpu(dev_flush_list, cpu));
return 0;
}
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index 2a69a9a36c0f..b5f0adae8293 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -1084,7 +1084,10 @@ struct bpf_async_cb {
struct bpf_prog *prog;
void __rcu *callback_fn;
void *value;
- struct rcu_head rcu;
+ union {
+ struct rcu_head rcu;
+ struct work_struct delete_work;
+ };
u64 flags;
};
@@ -1107,6 +1110,7 @@ struct bpf_async_cb {
struct bpf_hrtimer {
struct bpf_async_cb cb;
struct hrtimer timer;
+ atomic_t cancelling;
};
struct bpf_work {
@@ -1219,6 +1223,21 @@ static void bpf_wq_delete_work(struct work_struct *work)
kfree_rcu(w, cb.rcu);
}
+static void bpf_timer_delete_work(struct work_struct *work)
+{
+ struct bpf_hrtimer *t = container_of(work, struct bpf_hrtimer, cb.delete_work);
+
+ /* Cancel the timer and wait for callback to complete if it was running.
+ * If hrtimer_cancel() can be safely called it's safe to call
+ * kfree_rcu(t) right after for both preallocated and non-preallocated
+ * maps. The async->cb = NULL was already done and no code path can see
+ * address 't' anymore. Timer if armed for existing bpf_hrtimer before
+ * bpf_timer_cancel_and_free will have been cancelled.
+ */
+ hrtimer_cancel(&t->timer);
+ kfree_rcu(t, cb.rcu);
+}
+
static int __bpf_async_init(struct bpf_async_kern *async, struct bpf_map *map, u64 flags,
enum bpf_async_type type)
{
@@ -1262,6 +1281,8 @@ static int __bpf_async_init(struct bpf_async_kern *async, struct bpf_map *map, u
clockid = flags & (MAX_CLOCKS - 1);
t = (struct bpf_hrtimer *)cb;
+ atomic_set(&t->cancelling, 0);
+ INIT_WORK(&t->cb.delete_work, bpf_timer_delete_work);
hrtimer_init(&t->timer, clockid, HRTIMER_MODE_REL_SOFT);
t->timer.function = bpf_timer_cb;
cb->value = (void *)async - map->record->timer_off;
@@ -1440,7 +1461,8 @@ static void drop_prog_refcnt(struct bpf_async_cb *async)
BPF_CALL_1(bpf_timer_cancel, struct bpf_async_kern *, timer)
{
- struct bpf_hrtimer *t;
+ struct bpf_hrtimer *t, *cur_t;
+ bool inc = false;
int ret = 0;
if (in_nmi())
@@ -1452,14 +1474,41 @@ BPF_CALL_1(bpf_timer_cancel, struct bpf_async_kern *, timer)
ret = -EINVAL;
goto out;
}
- if (this_cpu_read(hrtimer_running) == t) {
+
+ cur_t = this_cpu_read(hrtimer_running);
+ if (cur_t == t) {
/* If bpf callback_fn is trying to bpf_timer_cancel()
* its own timer the hrtimer_cancel() will deadlock
- * since it waits for callback_fn to finish
+ * since it waits for callback_fn to finish.
+ */
+ ret = -EDEADLK;
+ goto out;
+ }
+
+ /* Only account in-flight cancellations when invoked from a timer
+ * callback, since we want to avoid waiting only if other _callbacks_
+ * are waiting on us, to avoid introducing lockups. Non-callback paths
+ * are ok, since nobody would synchronously wait for their completion.
+ */
+ if (!cur_t)
+ goto drop;
+ atomic_inc(&t->cancelling);
+ /* Need full barrier after relaxed atomic_inc */
+ smp_mb__after_atomic();
+ inc = true;
+ if (atomic_read(&cur_t->cancelling)) {
+ /* We're cancelling timer t, while some other timer callback is
+ * attempting to cancel us. In such a case, it might be possible
+ * that timer t belongs to the other callback, or some other
+ * callback waiting upon it (creating transitive dependencies
+ * upon us), and we will enter a deadlock if we continue
+ * cancelling and waiting for it synchronously, since it might
+ * do the same. Bail!
*/
ret = -EDEADLK;
goto out;
}
+drop:
drop_prog_refcnt(&t->cb);
out:
__bpf_spin_unlock_irqrestore(&timer->lock);
@@ -1467,6 +1516,8 @@ out:
* if it was running.
*/
ret = ret ?: hrtimer_cancel(&t->timer);
+ if (inc)
+ atomic_dec(&t->cancelling);
rcu_read_unlock();
return ret;
}
@@ -1512,25 +1563,39 @@ void bpf_timer_cancel_and_free(void *val)
if (!t)
return;
- /* Cancel the timer and wait for callback to complete if it was running.
- * If hrtimer_cancel() can be safely called it's safe to call kfree(t)
- * right after for both preallocated and non-preallocated maps.
- * The async->cb = NULL was already done and no code path can
- * see address 't' anymore.
- *
- * Check that bpf_map_delete/update_elem() wasn't called from timer
- * callback_fn. In such case don't call hrtimer_cancel() (since it will
- * deadlock) and don't call hrtimer_try_to_cancel() (since it will just
- * return -1). Though callback_fn is still running on this cpu it's
+ /* We check that bpf_map_delete/update_elem() was called from timer
+ * callback_fn. In such case we don't call hrtimer_cancel() (since it
+ * will deadlock) and don't call hrtimer_try_to_cancel() (since it will
+ * just return -1). Though callback_fn is still running on this cpu it's
* safe to do kfree(t) because bpf_timer_cb() read everything it needed
* from 't'. The bpf subprog callback_fn won't be able to access 't',
* since async->cb = NULL was already done. The timer will be
* effectively cancelled because bpf_timer_cb() will return
* HRTIMER_NORESTART.
+ *
+ * However, it is possible the timer callback_fn calling us armed the
+ * timer _before_ calling us, such that failing to cancel it here will
+ * cause it to possibly use struct hrtimer after freeing bpf_hrtimer.
+ * Therefore, we _need_ to cancel any outstanding timers before we do
+ * kfree_rcu, even though no more timers can be armed.
+ *
+ * Moreover, we need to schedule work even if timer does not belong to
+ * the calling callback_fn, as on two different CPUs, we can end up in a
+ * situation where both sides run in parallel, try to cancel one
+ * another, and we end up waiting on both sides in hrtimer_cancel
+ * without making forward progress, since timer1 depends on time2
+ * callback to finish, and vice versa.
+ *
+ * CPU 1 (timer1_cb) CPU 2 (timer2_cb)
+ * bpf_timer_cancel_and_free(timer2) bpf_timer_cancel_and_free(timer1)
+ *
+ * To avoid these issues, punt to workqueue context when we are in a
+ * timer callback.
*/
- if (this_cpu_read(hrtimer_running) != t)
- hrtimer_cancel(&t->timer);
- kfree_rcu(t, cb.rcu);
+ if (this_cpu_read(hrtimer_running))
+ queue_work(system_unbound_wq, &t->cb.delete_work);
+ else
+ bpf_timer_delete_work(&t->cb.delete_work);
}
/* This function is called by map_delete/update_elem for individual element and
@@ -2433,7 +2498,7 @@ __bpf_kfunc struct task_struct *bpf_task_from_pid(s32 pid)
/**
* bpf_dynptr_slice() - Obtain a read-only pointer to the dynptr data.
- * @ptr: The dynptr whose data slice to retrieve
+ * @p: The dynptr whose data slice to retrieve
* @offset: Offset into the dynptr
* @buffer__opt: User-provided buffer to copy contents into. May be NULL
* @buffer__szk: Size (in bytes) of the buffer if present. This is the
@@ -2459,9 +2524,10 @@ __bpf_kfunc struct task_struct *bpf_task_from_pid(s32 pid)
* provided buffer, with its contents containing the data, if unable to obtain
* direct pointer)
*/
-__bpf_kfunc void *bpf_dynptr_slice(const struct bpf_dynptr_kern *ptr, u32 offset,
+__bpf_kfunc void *bpf_dynptr_slice(const struct bpf_dynptr *p, u32 offset,
void *buffer__opt, u32 buffer__szk)
{
+ const struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p;
enum bpf_dynptr_type type;
u32 len = buffer__szk;
int err;
@@ -2503,7 +2569,7 @@ __bpf_kfunc void *bpf_dynptr_slice(const struct bpf_dynptr_kern *ptr, u32 offset
/**
* bpf_dynptr_slice_rdwr() - Obtain a writable pointer to the dynptr data.
- * @ptr: The dynptr whose data slice to retrieve
+ * @p: The dynptr whose data slice to retrieve
* @offset: Offset into the dynptr
* @buffer__opt: User-provided buffer to copy contents into. May be NULL
* @buffer__szk: Size (in bytes) of the buffer if present. This is the
@@ -2543,9 +2609,11 @@ __bpf_kfunc void *bpf_dynptr_slice(const struct bpf_dynptr_kern *ptr, u32 offset
* provided buffer, with its contents containing the data, if unable to obtain
* direct pointer)
*/
-__bpf_kfunc void *bpf_dynptr_slice_rdwr(const struct bpf_dynptr_kern *ptr, u32 offset,
+__bpf_kfunc void *bpf_dynptr_slice_rdwr(const struct bpf_dynptr *p, u32 offset,
void *buffer__opt, u32 buffer__szk)
{
+ const struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p;
+
if (!ptr->data || __bpf_dynptr_is_rdonly(ptr))
return NULL;
@@ -2571,11 +2639,12 @@ __bpf_kfunc void *bpf_dynptr_slice_rdwr(const struct bpf_dynptr_kern *ptr, u32 o
* will be copied out into the buffer and the user will need to call
* bpf_dynptr_write() to commit changes.
*/
- return bpf_dynptr_slice(ptr, offset, buffer__opt, buffer__szk);
+ return bpf_dynptr_slice(p, offset, buffer__opt, buffer__szk);
}
-__bpf_kfunc int bpf_dynptr_adjust(struct bpf_dynptr_kern *ptr, u32 start, u32 end)
+__bpf_kfunc int bpf_dynptr_adjust(const struct bpf_dynptr *p, u32 start, u32 end)
{
+ struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p;
u32 size;
if (!ptr->data || start > end)
@@ -2592,36 +2661,45 @@ __bpf_kfunc int bpf_dynptr_adjust(struct bpf_dynptr_kern *ptr, u32 start, u32 en
return 0;
}
-__bpf_kfunc bool bpf_dynptr_is_null(struct bpf_dynptr_kern *ptr)
+__bpf_kfunc bool bpf_dynptr_is_null(const struct bpf_dynptr *p)
{
+ struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p;
+
return !ptr->data;
}
-__bpf_kfunc bool bpf_dynptr_is_rdonly(struct bpf_dynptr_kern *ptr)
+__bpf_kfunc bool bpf_dynptr_is_rdonly(const struct bpf_dynptr *p)
{
+ struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p;
+
if (!ptr->data)
return false;
return __bpf_dynptr_is_rdonly(ptr);
}
-__bpf_kfunc __u32 bpf_dynptr_size(const struct bpf_dynptr_kern *ptr)
+__bpf_kfunc __u32 bpf_dynptr_size(const struct bpf_dynptr *p)
{
+ struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p;
+
if (!ptr->data)
return -EINVAL;
return __bpf_dynptr_size(ptr);
}
-__bpf_kfunc int bpf_dynptr_clone(struct bpf_dynptr_kern *ptr,
- struct bpf_dynptr_kern *clone__uninit)
+__bpf_kfunc int bpf_dynptr_clone(const struct bpf_dynptr *p,
+ struct bpf_dynptr *clone__uninit)
{
+ struct bpf_dynptr_kern *clone = (struct bpf_dynptr_kern *)clone__uninit;
+ struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p;
+
if (!ptr->data) {
- bpf_dynptr_set_null(clone__uninit);
+ bpf_dynptr_set_null(clone);
return -EINVAL;
}
- *clone__uninit = *ptr;
+ *clone = *ptr;
return 0;
}
@@ -2721,7 +2799,7 @@ __bpf_kfunc int bpf_wq_start(struct bpf_wq *wq, unsigned int flags)
}
__bpf_kfunc int bpf_wq_set_callback_impl(struct bpf_wq *wq,
- int (callback_fn)(void *map, int *key, struct bpf_wq *wq),
+ int (callback_fn)(void *map, int *key, void *value),
unsigned int flags,
void *aux__ign)
{
@@ -2744,6 +2822,122 @@ __bpf_kfunc void bpf_preempt_enable(void)
preempt_enable();
}
+struct bpf_iter_bits {
+ __u64 __opaque[2];
+} __aligned(8);
+
+struct bpf_iter_bits_kern {
+ union {
+ unsigned long *bits;
+ unsigned long bits_copy;
+ };
+ u32 nr_bits;
+ int bit;
+} __aligned(8);
+
+/**
+ * bpf_iter_bits_new() - Initialize a new bits iterator for a given memory area
+ * @it: The new bpf_iter_bits to be created
+ * @unsafe_ptr__ign: A pointer pointing to a memory area to be iterated over
+ * @nr_words: The size of the specified memory area, measured in 8-byte units.
+ * Due to the limitation of memalloc, it can't be greater than 512.
+ *
+ * This function initializes a new bpf_iter_bits structure for iterating over
+ * a memory area which is specified by the @unsafe_ptr__ign and @nr_words. It
+ * copies the data of the memory area to the newly created bpf_iter_bits @it for
+ * subsequent iteration operations.
+ *
+ * On success, 0 is returned. On failure, ERR is returned.
+ */
+__bpf_kfunc int
+bpf_iter_bits_new(struct bpf_iter_bits *it, const u64 *unsafe_ptr__ign, u32 nr_words)
+{
+ struct bpf_iter_bits_kern *kit = (void *)it;
+ u32 nr_bytes = nr_words * sizeof(u64);
+ u32 nr_bits = BYTES_TO_BITS(nr_bytes);
+ int err;
+
+ BUILD_BUG_ON(sizeof(struct bpf_iter_bits_kern) != sizeof(struct bpf_iter_bits));
+ BUILD_BUG_ON(__alignof__(struct bpf_iter_bits_kern) !=
+ __alignof__(struct bpf_iter_bits));
+
+ kit->nr_bits = 0;
+ kit->bits_copy = 0;
+ kit->bit = -1;
+
+ if (!unsafe_ptr__ign || !nr_words)
+ return -EINVAL;
+
+ /* Optimization for u64 mask */
+ if (nr_bits == 64) {
+ err = bpf_probe_read_kernel_common(&kit->bits_copy, nr_bytes, unsafe_ptr__ign);
+ if (err)
+ return -EFAULT;
+
+ kit->nr_bits = nr_bits;
+ return 0;
+ }
+
+ /* Fallback to memalloc */
+ kit->bits = bpf_mem_alloc(&bpf_global_ma, nr_bytes);
+ if (!kit->bits)
+ return -ENOMEM;
+
+ err = bpf_probe_read_kernel_common(kit->bits, nr_bytes, unsafe_ptr__ign);
+ if (err) {
+ bpf_mem_free(&bpf_global_ma, kit->bits);
+ return err;
+ }
+
+ kit->nr_bits = nr_bits;
+ return 0;
+}
+
+/**
+ * bpf_iter_bits_next() - Get the next bit in a bpf_iter_bits
+ * @it: The bpf_iter_bits to be checked
+ *
+ * This function returns a pointer to a number representing the value of the
+ * next bit in the bits.
+ *
+ * If there are no further bits available, it returns NULL.
+ */
+__bpf_kfunc int *bpf_iter_bits_next(struct bpf_iter_bits *it)
+{
+ struct bpf_iter_bits_kern *kit = (void *)it;
+ u32 nr_bits = kit->nr_bits;
+ const unsigned long *bits;
+ int bit;
+
+ if (nr_bits == 0)
+ return NULL;
+
+ bits = nr_bits == 64 ? &kit->bits_copy : kit->bits;
+ bit = find_next_bit(bits, nr_bits, kit->bit + 1);
+ if (bit >= nr_bits) {
+ kit->nr_bits = 0;
+ return NULL;
+ }
+
+ kit->bit = bit;
+ return &kit->bit;
+}
+
+/**
+ * bpf_iter_bits_destroy() - Destroy a bpf_iter_bits
+ * @it: The bpf_iter_bits to be destroyed
+ *
+ * Destroy the resource associated with the bpf_iter_bits.
+ */
+__bpf_kfunc void bpf_iter_bits_destroy(struct bpf_iter_bits *it)
+{
+ struct bpf_iter_bits_kern *kit = (void *)it;
+
+ if (kit->nr_bits <= 64)
+ return;
+ bpf_mem_free(&bpf_global_ma, kit->bits);
+}
+
__bpf_kfunc_end_defs();
BTF_KFUNCS_START(generic_btf_ids)
@@ -2826,6 +3020,9 @@ BTF_ID_FLAGS(func, bpf_wq_set_callback_impl)
BTF_ID_FLAGS(func, bpf_wq_start)
BTF_ID_FLAGS(func, bpf_preempt_disable)
BTF_ID_FLAGS(func, bpf_preempt_enable)
+BTF_ID_FLAGS(func, bpf_iter_bits_new, KF_ITER_NEW)
+BTF_ID_FLAGS(func, bpf_iter_bits_next, KF_ITER_NEXT | KF_RET_NULL)
+BTF_ID_FLAGS(func, bpf_iter_bits_destroy, KF_ITER_DESTROY)
BTF_KFUNCS_END(common_btf_ids)
static const struct btf_kfunc_id_set common_kfunc_set = {
@@ -2867,7 +3064,9 @@ late_initcall(kfunc_init);
*/
const void *__bpf_dynptr_data(const struct bpf_dynptr_kern *ptr, u32 len)
{
- return bpf_dynptr_slice(ptr, 0, NULL, len);
+ const struct bpf_dynptr *p = (struct bpf_dynptr *)ptr;
+
+ return bpf_dynptr_slice(p, 0, NULL, len);
}
/* Get a pointer to dynptr data up to len bytes for read write access. If
diff --git a/kernel/bpf/log.c b/kernel/bpf/log.c
index 4bd8f17a9f24..5aebfc3051e3 100644
--- a/kernel/bpf/log.c
+++ b/kernel/bpf/log.c
@@ -91,7 +91,7 @@ void bpf_verifier_vlog(struct bpf_verifier_log *log, const char *fmt,
goto fail;
} else {
u64 new_end, new_start;
- u32 buf_start, buf_end, new_n;
+ u32 buf_start, buf_end;
new_end = log->end_pos + n;
if (new_end - log->start_pos >= log->len_total)
@@ -708,7 +708,9 @@ static void print_reg_state(struct bpf_verifier_env *env,
verbose(env, "%s", btf_type_name(reg->btf, reg->btf_id));
verbose(env, "(");
if (reg->id)
- verbose_a("id=%d", reg->id);
+ verbose_a("id=%d", reg->id & ~BPF_ADD_CONST);
+ if (reg->id & BPF_ADD_CONST)
+ verbose(env, "%+d", reg->off);
if (reg->ref_obj_id)
verbose_a("ref_obj_id=%d", reg->ref_obj_id);
if (type_is_non_owning_ref(reg->type))
diff --git a/kernel/bpf/ringbuf.c b/kernel/bpf/ringbuf.c
index 0ee653a936ea..e20b90c36131 100644
--- a/kernel/bpf/ringbuf.c
+++ b/kernel/bpf/ringbuf.c
@@ -51,7 +51,8 @@ struct bpf_ringbuf {
* This prevents a user-space application from modifying the
* position and ruining in-kernel tracking. The permissions of the
* pages depend on who is producing samples: user-space or the
- * kernel.
+ * kernel. Note that the pending counter is placed in the same
+ * page as the producer, so that it shares the same cache line.
*
* Kernel-producer
* ---------------
@@ -70,6 +71,7 @@ struct bpf_ringbuf {
*/
unsigned long consumer_pos __aligned(PAGE_SIZE);
unsigned long producer_pos __aligned(PAGE_SIZE);
+ unsigned long pending_pos;
char data[] __aligned(PAGE_SIZE);
};
@@ -179,6 +181,7 @@ static struct bpf_ringbuf *bpf_ringbuf_alloc(size_t data_sz, int numa_node)
rb->mask = data_sz - 1;
rb->consumer_pos = 0;
rb->producer_pos = 0;
+ rb->pending_pos = 0;
return rb;
}
@@ -404,9 +407,9 @@ bpf_ringbuf_restore_from_rec(struct bpf_ringbuf_hdr *hdr)
static void *__bpf_ringbuf_reserve(struct bpf_ringbuf *rb, u64 size)
{
- unsigned long cons_pos, prod_pos, new_prod_pos, flags;
- u32 len, pg_off;
+ unsigned long cons_pos, prod_pos, new_prod_pos, pend_pos, flags;
struct bpf_ringbuf_hdr *hdr;
+ u32 len, pg_off, tmp_size, hdr_len;
if (unlikely(size > RINGBUF_MAX_RECORD_SZ))
return NULL;
@@ -424,13 +427,29 @@ static void *__bpf_ringbuf_reserve(struct bpf_ringbuf *rb, u64 size)
spin_lock_irqsave(&rb->spinlock, flags);
}
+ pend_pos = rb->pending_pos;
prod_pos = rb->producer_pos;
new_prod_pos = prod_pos + len;
- /* check for out of ringbuf space by ensuring producer position
- * doesn't advance more than (ringbuf_size - 1) ahead
+ while (pend_pos < prod_pos) {
+ hdr = (void *)rb->data + (pend_pos & rb->mask);
+ hdr_len = READ_ONCE(hdr->len);
+ if (hdr_len & BPF_RINGBUF_BUSY_BIT)
+ break;
+ tmp_size = hdr_len & ~BPF_RINGBUF_DISCARD_BIT;
+ tmp_size = round_up(tmp_size + BPF_RINGBUF_HDR_SZ, 8);
+ pend_pos += tmp_size;
+ }
+ rb->pending_pos = pend_pos;
+
+ /* check for out of ringbuf space:
+ * - by ensuring producer position doesn't advance more than
+ * (ringbuf_size - 1) ahead
+ * - by ensuring oldest not yet committed record until newest
+ * record does not span more than (ringbuf_size - 1)
*/
- if (new_prod_pos - cons_pos > rb->mask) {
+ if (new_prod_pos - cons_pos > rb->mask ||
+ new_prod_pos - pend_pos > rb->mask) {
spin_unlock_irqrestore(&rb->spinlock, flags);
return NULL;
}
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 2222c3ff88e7..869265852d51 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -2998,6 +2998,7 @@ static int bpf_obj_get(const union bpf_attr *attr)
void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
const struct bpf_link_ops *ops, struct bpf_prog *prog)
{
+ WARN_ON(ops->dealloc && ops->dealloc_deferred);
atomic64_set(&link->refcnt, 1);
link->type = type;
link->id = 0;
@@ -3056,16 +3057,17 @@ static void bpf_link_defer_dealloc_mult_rcu_gp(struct rcu_head *rcu)
/* bpf_link_free is guaranteed to be called from process context */
static void bpf_link_free(struct bpf_link *link)
{
+ const struct bpf_link_ops *ops = link->ops;
bool sleepable = false;
bpf_link_free_id(link->id);
if (link->prog) {
sleepable = link->prog->sleepable;
/* detach BPF program, clean up used resources */
- link->ops->release(link);
+ ops->release(link);
bpf_prog_put(link->prog);
}
- if (link->ops->dealloc_deferred) {
+ if (ops->dealloc_deferred) {
/* schedule BPF link deallocation; if underlying BPF program
* is sleepable, we need to first wait for RCU tasks trace
* sync, then go through "classic" RCU grace period
@@ -3074,9 +3076,8 @@ static void bpf_link_free(struct bpf_link *link)
call_rcu_tasks_trace(&link->rcu, bpf_link_defer_dealloc_mult_rcu_gp);
else
call_rcu(&link->rcu, bpf_link_defer_dealloc_rcu_gp);
- }
- if (link->ops->dealloc)
- link->ops->dealloc(link);
+ } else if (ops->dealloc)
+ ops->dealloc(link);
}
static void bpf_link_put_deferred(struct work_struct *work)
@@ -3150,6 +3151,13 @@ static void bpf_link_show_fdinfo(struct seq_file *m, struct file *filp)
}
#endif
+static __poll_t bpf_link_poll(struct file *file, struct poll_table_struct *pts)
+{
+ struct bpf_link *link = file->private_data;
+
+ return link->ops->poll(file, pts);
+}
+
static const struct file_operations bpf_link_fops = {
#ifdef CONFIG_PROC_FS
.show_fdinfo = bpf_link_show_fdinfo,
@@ -3159,6 +3167,16 @@ static const struct file_operations bpf_link_fops = {
.write = bpf_dummy_write,
};
+static const struct file_operations bpf_link_fops_poll = {
+#ifdef CONFIG_PROC_FS
+ .show_fdinfo = bpf_link_show_fdinfo,
+#endif
+ .release = bpf_link_release,
+ .read = bpf_dummy_read,
+ .write = bpf_dummy_write,
+ .poll = bpf_link_poll,
+};
+
static int bpf_link_alloc_id(struct bpf_link *link)
{
int id;
@@ -3201,7 +3219,9 @@ int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer)
return id;
}
- file = anon_inode_getfile("bpf_link", &bpf_link_fops, link, O_CLOEXEC);
+ file = anon_inode_getfile("bpf_link",
+ link->ops->poll ? &bpf_link_fops_poll : &bpf_link_fops,
+ link, O_CLOEXEC);
if (IS_ERR(file)) {
bpf_link_free_id(id);
put_unused_fd(fd);
@@ -3229,7 +3249,9 @@ int bpf_link_settle(struct bpf_link_primer *primer)
int bpf_link_new_fd(struct bpf_link *link)
{
- return anon_inode_getfd("bpf-link", &bpf_link_fops, link, O_CLOEXEC);
+ return anon_inode_getfd("bpf-link",
+ link->ops->poll ? &bpf_link_fops_poll : &bpf_link_fops,
+ link, O_CLOEXEC);
}
struct bpf_link *bpf_link_get_from_fd(u32 ufd)
@@ -3239,7 +3261,7 @@ struct bpf_link *bpf_link_get_from_fd(u32 ufd)
if (!f.file)
return ERR_PTR(-EBADF);
- if (f.file->f_op != &bpf_link_fops) {
+ if (f.file->f_op != &bpf_link_fops && f.file->f_op != &bpf_link_fops_poll) {
fdput(f);
return ERR_PTR(-EINVAL);
}
@@ -4971,7 +4993,7 @@ static int bpf_obj_get_info_by_fd(const union bpf_attr *attr,
uattr);
else if (f.file->f_op == &btf_fops)
err = bpf_btf_get_info_by_fd(f.file, f.file->private_data, attr, uattr);
- else if (f.file->f_op == &bpf_link_fops)
+ else if (f.file->f_op == &bpf_link_fops || f.file->f_op == &bpf_link_fops_poll)
err = bpf_link_get_info_by_fd(f.file, f.file->private_data,
attr, uattr);
else
@@ -5106,7 +5128,7 @@ static int bpf_task_fd_query(const union bpf_attr *attr,
if (!file)
return -EBADF;
- if (file->f_op == &bpf_link_fops) {
+ if (file->f_op == &bpf_link_fops || file->f_op == &bpf_link_fops_poll) {
struct bpf_link *link = file->private_data;
if (link->ops == &bpf_raw_tp_link_lops) {
@@ -5416,10 +5438,11 @@ static int link_detach(union bpf_attr *attr)
return ret;
}
-static struct bpf_link *bpf_link_inc_not_zero(struct bpf_link *link)
+struct bpf_link *bpf_link_inc_not_zero(struct bpf_link *link)
{
return atomic64_fetch_add_unless(&link->refcnt, 1, 0) ? link : ERR_PTR(-ENOENT);
}
+EXPORT_SYMBOL(bpf_link_inc_not_zero);
struct bpf_link *bpf_link_by_id(u32 id)
{
diff --git a/kernel/bpf/task_iter.c b/kernel/bpf/task_iter.c
index ec4e97c61eef..02aa9db8d796 100644
--- a/kernel/bpf/task_iter.c
+++ b/kernel/bpf/task_iter.c
@@ -261,6 +261,7 @@ task_file_seq_get_next(struct bpf_iter_seq_task_file_info *info)
u32 saved_tid = info->tid;
struct task_struct *curr_task;
unsigned int curr_fd = info->fd;
+ struct file *f;
/* If this function returns a non-NULL file object,
* it held a reference to the task/file.
@@ -286,12 +287,8 @@ again:
}
rcu_read_lock();
- for (;; curr_fd++) {
- struct file *f;
- f = task_lookup_next_fdget_rcu(curr_task, &curr_fd);
- if (!f)
- break;
-
+ f = task_lookup_next_fdget_rcu(curr_task, &curr_fd);
+ if (f) {
/* set info->fd */
info->fd = curr_fd;
rcu_read_unlock();
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 77da1f438bec..8da132a1ef28 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -2982,8 +2982,10 @@ static int check_subprogs(struct bpf_verifier_env *env)
if (code == (BPF_JMP | BPF_CALL) &&
insn[i].src_reg == 0 &&
- insn[i].imm == BPF_FUNC_tail_call)
+ insn[i].imm == BPF_FUNC_tail_call) {
subprog[cur_subprog].has_tail_call = true;
+ subprog[cur_subprog].tail_call_reachable = true;
+ }
if (BPF_CLASS(code) == BPF_LD &&
(BPF_MODE(code) == BPF_ABS || BPF_MODE(code) == BPF_IND))
subprog[cur_subprog].has_ld_abs = true;
@@ -3215,7 +3217,8 @@ static int insn_def_regno(const struct bpf_insn *insn)
case BPF_ST:
return -1;
case BPF_STX:
- if (BPF_MODE(insn->code) == BPF_ATOMIC &&
+ if ((BPF_MODE(insn->code) == BPF_ATOMIC ||
+ BPF_MODE(insn->code) == BPF_PROBE_ATOMIC) &&
(insn->imm & BPF_FETCH)) {
if (insn->imm == BPF_CMPXCHG)
return BPF_REG_0;
@@ -3991,7 +3994,7 @@ static bool idset_contains(struct bpf_idset *s, u32 id)
u32 i;
for (i = 0; i < s->count; ++i)
- if (s->ids[i] == id)
+ if (s->ids[i] == (id & ~BPF_ADD_CONST))
return true;
return false;
@@ -4001,7 +4004,7 @@ static int idset_push(struct bpf_idset *s, u32 id)
{
if (WARN_ON_ONCE(s->count >= ARRAY_SIZE(s->ids)))
return -EFAULT;
- s->ids[s->count++] = id;
+ s->ids[s->count++] = id & ~BPF_ADD_CONST;
return 0;
}
@@ -4438,8 +4441,20 @@ static bool __is_pointer_value(bool allow_ptr_leaks,
static void assign_scalar_id_before_mov(struct bpf_verifier_env *env,
struct bpf_reg_state *src_reg)
{
- if (src_reg->type == SCALAR_VALUE && !src_reg->id &&
- !tnum_is_const(src_reg->var_off))
+ if (src_reg->type != SCALAR_VALUE)
+ return;
+
+ if (src_reg->id & BPF_ADD_CONST) {
+ /*
+ * The verifier is processing rX = rY insn and
+ * rY->id has special linked register already.
+ * Cleared it, since multiple rX += const are not supported.
+ */
+ src_reg->id = 0;
+ src_reg->off = 0;
+ }
+
+ if (!src_reg->id && !tnum_is_const(src_reg->var_off))
/* Ensure that src_reg has a valid ID that will be copied to
* dst_reg and then will be used by find_equal_scalars() to
* propagate min/max range.
@@ -4549,11 +4564,12 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
state->stack[spi].spilled_ptr.id = 0;
} else if (!reg && !(off % BPF_REG_SIZE) && is_bpf_st_mem(insn) &&
env->bpf_capable) {
- struct bpf_reg_state fake_reg = {};
+ struct bpf_reg_state *tmp_reg = &env->fake_reg[0];
- __mark_reg_known(&fake_reg, insn->imm);
- fake_reg.type = SCALAR_VALUE;
- save_register_state(env, state, spi, &fake_reg, size);
+ memset(tmp_reg, 0, sizeof(*tmp_reg));
+ __mark_reg_known(tmp_reg, insn->imm);
+ tmp_reg->type = SCALAR_VALUE;
+ save_register_state(env, state, spi, tmp_reg, size);
} else if (reg && is_spillable_regtype(reg->type)) {
/* register containing pointer is being spilled into stack */
if (size != BPF_REG_SIZE) {
@@ -5448,7 +5464,7 @@ static int check_map_access(struct bpf_verifier_env *env, u32 regno,
* this program. To check that [x1, x2) overlaps with [y1, y2),
* it is sufficient to check x1 < y2 && y1 < x2.
*/
- if (reg->smin_value + off < p + btf_field_type_size(field->type) &&
+ if (reg->smin_value + off < p + field->size &&
p < reg->umax_value + off + size) {
switch (field->type) {
case BPF_KPTR_UNREF:
@@ -6235,6 +6251,7 @@ static void set_sext32_default_val(struct bpf_reg_state *reg, int size)
}
reg->u32_min_value = 0;
reg->u32_max_value = U32_MAX;
+ reg->var_off = tnum_subreg(tnum_unknown);
}
static void coerce_subreg_to_size_sx(struct bpf_reg_state *reg, int size)
@@ -6279,6 +6296,7 @@ static void coerce_subreg_to_size_sx(struct bpf_reg_state *reg, int size)
reg->s32_max_value = s32_max;
reg->u32_min_value = (u32)s32_min;
reg->u32_max_value = (u32)s32_max;
+ reg->var_off = tnum_subreg(tnum_range(s32_min, s32_max));
return;
}
@@ -7712,6 +7730,13 @@ static int process_dynptr_func(struct bpf_verifier_env *env, int regno, int insn
struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
int err;
+ if (reg->type != PTR_TO_STACK && reg->type != CONST_PTR_TO_DYNPTR) {
+ verbose(env,
+ "arg#%d expected pointer to stack or const struct bpf_dynptr\n",
+ regno);
+ return -EINVAL;
+ }
+
/* MEM_UNINIT and MEM_RDONLY are exclusive, when applied to an
* ARG_PTR_TO_DYNPTR (or ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_*):
*/
@@ -8882,7 +8907,8 @@ static bool may_update_sockmap(struct bpf_verifier_env *env, int func_id)
enum bpf_attach_type eatype = env->prog->expected_attach_type;
enum bpf_prog_type type = resolve_prog_type(env->prog);
- if (func_id != BPF_FUNC_map_update_elem)
+ if (func_id != BPF_FUNC_map_update_elem &&
+ func_id != BPF_FUNC_map_delete_elem)
return false;
/* It's not possible to get access to a locked struct sock in these
@@ -8893,6 +8919,11 @@ static bool may_update_sockmap(struct bpf_verifier_env *env, int func_id)
if (eatype == BPF_TRACE_ITER)
return true;
break;
+ case BPF_PROG_TYPE_SOCK_OPS:
+ /* map_update allowed only via dedicated helpers with event type checks */
+ if (func_id == BPF_FUNC_map_delete_elem)
+ return true;
+ break;
case BPF_PROG_TYPE_SOCKET_FILTER:
case BPF_PROG_TYPE_SCHED_CLS:
case BPF_PROG_TYPE_SCHED_ACT:
@@ -8988,7 +9019,6 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env,
case BPF_MAP_TYPE_SOCKMAP:
if (func_id != BPF_FUNC_sk_redirect_map &&
func_id != BPF_FUNC_sock_map_update &&
- func_id != BPF_FUNC_map_delete_elem &&
func_id != BPF_FUNC_msg_redirect_map &&
func_id != BPF_FUNC_sk_select_reuseport &&
func_id != BPF_FUNC_map_lookup_elem &&
@@ -8998,7 +9028,6 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env,
case BPF_MAP_TYPE_SOCKHASH:
if (func_id != BPF_FUNC_sk_redirect_hash &&
func_id != BPF_FUNC_sock_hash_update &&
- func_id != BPF_FUNC_map_delete_elem &&
func_id != BPF_FUNC_msg_redirect_hash &&
func_id != BPF_FUNC_sk_select_reuseport &&
func_id != BPF_FUNC_map_lookup_elem &&
@@ -9457,6 +9486,10 @@ static int btf_check_func_arg_match(struct bpf_verifier_env *env, int subprog,
return -EINVAL;
}
} else if (arg->arg_type == (ARG_PTR_TO_DYNPTR | MEM_RDONLY)) {
+ ret = check_func_arg_reg_off(env, reg, regno, ARG_PTR_TO_DYNPTR);
+ if (ret)
+ return ret;
+
ret = process_dynptr_func(env, regno, -1, arg->arg_type, 0);
if (ret)
return ret;
@@ -10910,7 +10943,7 @@ enum {
};
BTF_ID_LIST(kf_arg_btf_ids)
-BTF_ID(struct, bpf_dynptr_kern)
+BTF_ID(struct, bpf_dynptr)
BTF_ID(struct, bpf_list_head)
BTF_ID(struct, bpf_list_node)
BTF_ID(struct, bpf_rb_root)
@@ -11124,7 +11157,11 @@ BTF_ID(func, bpf_iter_css_task_new)
#else
BTF_ID_UNUSED
#endif
+#ifdef CONFIG_BPF_EVENTS
BTF_ID(func, bpf_session_cookie)
+#else
+BTF_ID_UNUSED
+#endif
static bool is_kfunc_ret_null(struct bpf_kfunc_call_arg_meta *meta)
{
@@ -11179,6 +11216,9 @@ get_kfunc_ptr_arg_type(struct bpf_verifier_env *env,
if (btf_is_prog_ctx_type(&env->log, meta->btf, t, resolve_prog_type(env->prog), argno))
return KF_ARG_PTR_TO_CTX;
+ if (is_kfunc_arg_nullable(meta->btf, &args[argno]) && register_is_null(reg))
+ return KF_ARG_PTR_TO_NULL;
+
if (is_kfunc_arg_alloc_obj(meta->btf, &args[argno]))
return KF_ARG_PTR_TO_ALLOC_BTF_ID;
@@ -11224,9 +11264,6 @@ get_kfunc_ptr_arg_type(struct bpf_verifier_env *env,
if (is_kfunc_arg_callback(env, meta->btf, &args[argno]))
return KF_ARG_PTR_TO_CALLBACK;
- if (is_kfunc_arg_nullable(meta->btf, &args[argno]) && register_is_null(reg))
- return KF_ARG_PTR_TO_NULL;
-
if (argno + 1 < nargs &&
(is_kfunc_arg_mem_size(meta->btf, &args[argno + 1], &regs[regno + 1]) ||
is_kfunc_arg_const_mem_size(meta->btf, &args[argno + 1], &regs[regno + 1])))
@@ -11257,6 +11294,8 @@ static int process_kf_arg_ptr_to_btf_id(struct bpf_verifier_env *env,
bool strict_type_match = false;
const struct btf *reg_btf;
const char *reg_ref_tname;
+ bool taking_projection;
+ bool struct_same;
u32 reg_ref_id;
if (base_type(reg->type) == PTR_TO_BTF_ID) {
@@ -11296,11 +11335,19 @@ static int process_kf_arg_ptr_to_btf_id(struct bpf_verifier_env *env,
btf_type_ids_nocast_alias(&env->log, reg_btf, reg_ref_id, meta->btf, ref_id))
strict_type_match = true;
- WARN_ON_ONCE(is_kfunc_trusted_args(meta) && reg->off);
+ WARN_ON_ONCE(is_kfunc_release(meta) &&
+ (reg->off || !tnum_is_const(reg->var_off) ||
+ reg->var_off.value));
reg_ref_t = btf_type_skip_modifiers(reg_btf, reg_ref_id, &reg_ref_id);
reg_ref_tname = btf_name_by_offset(reg_btf, reg_ref_t->name_off);
- if (!btf_struct_ids_match(&env->log, reg_btf, reg_ref_id, reg->off, meta->btf, ref_id, strict_type_match)) {
+ struct_same = btf_struct_ids_match(&env->log, reg_btf, reg_ref_id, reg->off, meta->btf, ref_id, strict_type_match);
+ /* If kfunc is accepting a projection type (ie. __sk_buff), it cannot
+ * actually use it -- it must cast to the underlying type. So we allow
+ * caller to pass in the underlying type.
+ */
+ taking_projection = btf_is_projection_of(ref_tname, reg_ref_tname);
+ if (!taking_projection && !struct_same) {
verbose(env, "kernel function %s args#%d expected pointer to %s %s but R%d has a pointer to %s %s\n",
meta->func_name, argno, btf_type_str(ref_t), ref_tname, argno + 1,
btf_type_str(reg_ref_t), reg_ref_tname);
@@ -11640,7 +11687,7 @@ __process_kf_arg_ptr_to_graph_node(struct bpf_verifier_env *env,
node_off = reg->off + reg->var_off.value;
field = reg_find_field_offset(reg, node_off, node_field_type);
- if (!field || field->offset != node_off) {
+ if (!field) {
verbose(env, "%s not found at offset=%u\n", node_type_name, node_off);
return -EINVAL;
}
@@ -11872,12 +11919,8 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
return -EINVAL;
}
}
-
fallthrough;
case KF_ARG_PTR_TO_CTX:
- /* Trusted arguments have the same offset checks as release arguments */
- arg_type |= OBJ_RELEASE;
- break;
case KF_ARG_PTR_TO_DYNPTR:
case KF_ARG_PTR_TO_ITER:
case KF_ARG_PTR_TO_LIST_HEAD:
@@ -11890,7 +11933,6 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
case KF_ARG_PTR_TO_REFCOUNTED_KPTR:
case KF_ARG_PTR_TO_CONST_STR:
case KF_ARG_PTR_TO_WORKQUEUE:
- /* Trusted by default */
break;
default:
WARN_ON_ONCE(1);
@@ -11946,12 +11988,6 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
enum bpf_arg_type dynptr_arg_type = ARG_PTR_TO_DYNPTR;
int clone_ref_obj_id = 0;
- if (reg->type != PTR_TO_STACK &&
- reg->type != CONST_PTR_TO_DYNPTR) {
- verbose(env, "arg#%d expected pointer to stack or dynptr_ptr\n", i);
- return -EINVAL;
- }
-
if (reg->type == CONST_PTR_TO_DYNPTR)
dynptr_arg_type |= MEM_RDONLY;
@@ -12690,46 +12726,6 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
return 0;
}
-static bool signed_add_overflows(s64 a, s64 b)
-{
- /* Do the add in u64, where overflow is well-defined */
- s64 res = (s64)((u64)a + (u64)b);
-
- if (b < 0)
- return res > a;
- return res < a;
-}
-
-static bool signed_add32_overflows(s32 a, s32 b)
-{
- /* Do the add in u32, where overflow is well-defined */
- s32 res = (s32)((u32)a + (u32)b);
-
- if (b < 0)
- return res > a;
- return res < a;
-}
-
-static bool signed_sub_overflows(s64 a, s64 b)
-{
- /* Do the sub in u64, where overflow is well-defined */
- s64 res = (s64)((u64)a - (u64)b);
-
- if (b < 0)
- return res < a;
- return res > a;
-}
-
-static bool signed_sub32_overflows(s32 a, s32 b)
-{
- /* Do the sub in u32, where overflow is well-defined */
- s32 res = (s32)((u32)a - (u32)b);
-
- if (b < 0)
- return res < a;
- return res > a;
-}
-
static bool check_reg_sane_offset(struct bpf_verifier_env *env,
const struct bpf_reg_state *reg,
enum bpf_reg_type type)
@@ -13211,21 +13207,15 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
* added into the variable offset, and we copy the fixed offset
* from ptr_reg.
*/
- if (signed_add_overflows(smin_ptr, smin_val) ||
- signed_add_overflows(smax_ptr, smax_val)) {
+ if (check_add_overflow(smin_ptr, smin_val, &dst_reg->smin_value) ||
+ check_add_overflow(smax_ptr, smax_val, &dst_reg->smax_value)) {
dst_reg->smin_value = S64_MIN;
dst_reg->smax_value = S64_MAX;
- } else {
- dst_reg->smin_value = smin_ptr + smin_val;
- dst_reg->smax_value = smax_ptr + smax_val;
}
- if (umin_ptr + umin_val < umin_ptr ||
- umax_ptr + umax_val < umax_ptr) {
+ if (check_add_overflow(umin_ptr, umin_val, &dst_reg->umin_value) ||
+ check_add_overflow(umax_ptr, umax_val, &dst_reg->umax_value)) {
dst_reg->umin_value = 0;
dst_reg->umax_value = U64_MAX;
- } else {
- dst_reg->umin_value = umin_ptr + umin_val;
- dst_reg->umax_value = umax_ptr + umax_val;
}
dst_reg->var_off = tnum_add(ptr_reg->var_off, off_reg->var_off);
dst_reg->off = ptr_reg->off;
@@ -13268,14 +13258,11 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
/* A new variable offset is created. If the subtrahend is known
* nonnegative, then any reg->range we had before is still good.
*/
- if (signed_sub_overflows(smin_ptr, smax_val) ||
- signed_sub_overflows(smax_ptr, smin_val)) {
+ if (check_sub_overflow(smin_ptr, smax_val, &dst_reg->smin_value) ||
+ check_sub_overflow(smax_ptr, smin_val, &dst_reg->smax_value)) {
/* Overflow possible, we know nothing */
dst_reg->smin_value = S64_MIN;
dst_reg->smax_value = S64_MAX;
- } else {
- dst_reg->smin_value = smin_ptr - smax_val;
- dst_reg->smax_value = smax_ptr - smin_val;
}
if (umin_ptr < umax_val) {
/* Overflow possible, we know nothing */
@@ -13328,71 +13315,56 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
static void scalar32_min_max_add(struct bpf_reg_state *dst_reg,
struct bpf_reg_state *src_reg)
{
- s32 smin_val = src_reg->s32_min_value;
- s32 smax_val = src_reg->s32_max_value;
- u32 umin_val = src_reg->u32_min_value;
- u32 umax_val = src_reg->u32_max_value;
+ s32 *dst_smin = &dst_reg->s32_min_value;
+ s32 *dst_smax = &dst_reg->s32_max_value;
+ u32 *dst_umin = &dst_reg->u32_min_value;
+ u32 *dst_umax = &dst_reg->u32_max_value;
- if (signed_add32_overflows(dst_reg->s32_min_value, smin_val) ||
- signed_add32_overflows(dst_reg->s32_max_value, smax_val)) {
- dst_reg->s32_min_value = S32_MIN;
- dst_reg->s32_max_value = S32_MAX;
- } else {
- dst_reg->s32_min_value += smin_val;
- dst_reg->s32_max_value += smax_val;
+ if (check_add_overflow(*dst_smin, src_reg->s32_min_value, dst_smin) ||
+ check_add_overflow(*dst_smax, src_reg->s32_max_value, dst_smax)) {
+ *dst_smin = S32_MIN;
+ *dst_smax = S32_MAX;
}
- if (dst_reg->u32_min_value + umin_val < umin_val ||
- dst_reg->u32_max_value + umax_val < umax_val) {
- dst_reg->u32_min_value = 0;
- dst_reg->u32_max_value = U32_MAX;
- } else {
- dst_reg->u32_min_value += umin_val;
- dst_reg->u32_max_value += umax_val;
+ if (check_add_overflow(*dst_umin, src_reg->u32_min_value, dst_umin) ||
+ check_add_overflow(*dst_umax, src_reg->u32_max_value, dst_umax)) {
+ *dst_umin = 0;
+ *dst_umax = U32_MAX;
}
}
static void scalar_min_max_add(struct bpf_reg_state *dst_reg,
struct bpf_reg_state *src_reg)
{
- s64 smin_val = src_reg->smin_value;
- s64 smax_val = src_reg->smax_value;
- u64 umin_val = src_reg->umin_value;
- u64 umax_val = src_reg->umax_value;
+ s64 *dst_smin = &dst_reg->smin_value;
+ s64 *dst_smax = &dst_reg->smax_value;
+ u64 *dst_umin = &dst_reg->umin_value;
+ u64 *dst_umax = &dst_reg->umax_value;
- if (signed_add_overflows(dst_reg->smin_value, smin_val) ||
- signed_add_overflows(dst_reg->smax_value, smax_val)) {
- dst_reg->smin_value = S64_MIN;
- dst_reg->smax_value = S64_MAX;
- } else {
- dst_reg->smin_value += smin_val;
- dst_reg->smax_value += smax_val;
+ if (check_add_overflow(*dst_smin, src_reg->smin_value, dst_smin) ||
+ check_add_overflow(*dst_smax, src_reg->smax_value, dst_smax)) {
+ *dst_smin = S64_MIN;
+ *dst_smax = S64_MAX;
}
- if (dst_reg->umin_value + umin_val < umin_val ||
- dst_reg->umax_value + umax_val < umax_val) {
- dst_reg->umin_value = 0;
- dst_reg->umax_value = U64_MAX;
- } else {
- dst_reg->umin_value += umin_val;
- dst_reg->umax_value += umax_val;
+ if (check_add_overflow(*dst_umin, src_reg->umin_value, dst_umin) ||
+ check_add_overflow(*dst_umax, src_reg->umax_value, dst_umax)) {
+ *dst_umin = 0;
+ *dst_umax = U64_MAX;
}
}
static void scalar32_min_max_sub(struct bpf_reg_state *dst_reg,
struct bpf_reg_state *src_reg)
{
- s32 smin_val = src_reg->s32_min_value;
- s32 smax_val = src_reg->s32_max_value;
+ s32 *dst_smin = &dst_reg->s32_min_value;
+ s32 *dst_smax = &dst_reg->s32_max_value;
u32 umin_val = src_reg->u32_min_value;
u32 umax_val = src_reg->u32_max_value;
- if (signed_sub32_overflows(dst_reg->s32_min_value, smax_val) ||
- signed_sub32_overflows(dst_reg->s32_max_value, smin_val)) {
+ if (check_sub_overflow(*dst_smin, src_reg->s32_max_value, dst_smin) ||
+ check_sub_overflow(*dst_smax, src_reg->s32_min_value, dst_smax)) {
/* Overflow possible, we know nothing */
- dst_reg->s32_min_value = S32_MIN;
- dst_reg->s32_max_value = S32_MAX;
- } else {
- dst_reg->s32_min_value -= smax_val;
- dst_reg->s32_max_value -= smin_val;
+ *dst_smin = S32_MIN;
+ *dst_smax = S32_MAX;
}
if (dst_reg->u32_min_value < umax_val) {
/* Overflow possible, we know nothing */
@@ -13408,19 +13380,16 @@ static void scalar32_min_max_sub(struct bpf_reg_state *dst_reg,
static void scalar_min_max_sub(struct bpf_reg_state *dst_reg,
struct bpf_reg_state *src_reg)
{
- s64 smin_val = src_reg->smin_value;
- s64 smax_val = src_reg->smax_value;
+ s64 *dst_smin = &dst_reg->smin_value;
+ s64 *dst_smax = &dst_reg->smax_value;
u64 umin_val = src_reg->umin_value;
u64 umax_val = src_reg->umax_value;
- if (signed_sub_overflows(dst_reg->smin_value, smax_val) ||
- signed_sub_overflows(dst_reg->smax_value, smin_val)) {
+ if (check_sub_overflow(*dst_smin, src_reg->smax_value, dst_smin) ||
+ check_sub_overflow(*dst_smax, src_reg->smin_value, dst_smax)) {
/* Overflow possible, we know nothing */
- dst_reg->smin_value = S64_MIN;
- dst_reg->smax_value = S64_MAX;
- } else {
- dst_reg->smin_value -= smax_val;
- dst_reg->smax_value -= smin_val;
+ *dst_smin = S64_MIN;
+ *dst_smax = S64_MAX;
}
if (dst_reg->umin_value < umax_val) {
/* Overflow possible, we know nothing */
@@ -14026,6 +13995,7 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
struct bpf_func_state *state = vstate->frame[vstate->curframe];
struct bpf_reg_state *regs = state->regs, *dst_reg, *src_reg;
struct bpf_reg_state *ptr_reg = NULL, off_reg = {0};
+ bool alu32 = (BPF_CLASS(insn->code) != BPF_ALU64);
u8 opcode = BPF_OP(insn->code);
int err;
@@ -14048,11 +14018,7 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
if (dst_reg->type != SCALAR_VALUE)
ptr_reg = dst_reg;
- else
- /* Make sure ID is cleared otherwise dst_reg min/max could be
- * incorrectly propagated into other registers by find_equal_scalars()
- */
- dst_reg->id = 0;
+
if (BPF_SRC(insn->code) == BPF_X) {
src_reg = &regs[insn->src_reg];
if (src_reg->type != SCALAR_VALUE) {
@@ -14116,7 +14082,43 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
verbose(env, "verifier internal error: no src_reg\n");
return -EINVAL;
}
- return adjust_scalar_min_max_vals(env, insn, dst_reg, *src_reg);
+ err = adjust_scalar_min_max_vals(env, insn, dst_reg, *src_reg);
+ if (err)
+ return err;
+ /*
+ * Compilers can generate the code
+ * r1 = r2
+ * r1 += 0x1
+ * if r2 < 1000 goto ...
+ * use r1 in memory access
+ * So remember constant delta between r2 and r1 and update r1 after
+ * 'if' condition.
+ */
+ if (env->bpf_capable && BPF_OP(insn->code) == BPF_ADD &&
+ dst_reg->id && is_reg_const(src_reg, alu32)) {
+ u64 val = reg_const_value(src_reg, alu32);
+
+ if ((dst_reg->id & BPF_ADD_CONST) ||
+ /* prevent overflow in find_equal_scalars() later */
+ val > (u32)S32_MAX) {
+ /*
+ * If the register already went through rX += val
+ * we cannot accumulate another val into rx->off.
+ */
+ dst_reg->off = 0;
+ dst_reg->id = 0;
+ } else {
+ dst_reg->id |= BPF_ADD_CONST;
+ dst_reg->off = val;
+ }
+ } else {
+ /*
+ * Make sure ID is cleared otherwise dst_reg min/max could be
+ * incorrectly propagated into other registers by find_equal_scalars()
+ */
+ dst_reg->id = 0;
+ }
+ return 0;
}
/* check validity of 32-bit and 64-bit arithmetic operations */
@@ -15088,12 +15090,36 @@ static bool try_match_pkt_pointers(const struct bpf_insn *insn,
static void find_equal_scalars(struct bpf_verifier_state *vstate,
struct bpf_reg_state *known_reg)
{
+ struct bpf_reg_state fake_reg;
struct bpf_func_state *state;
struct bpf_reg_state *reg;
bpf_for_each_reg_in_vstate(vstate, state, reg, ({
- if (reg->type == SCALAR_VALUE && reg->id == known_reg->id)
+ if (reg->type != SCALAR_VALUE || reg == known_reg)
+ continue;
+ if ((reg->id & ~BPF_ADD_CONST) != (known_reg->id & ~BPF_ADD_CONST))
+ continue;
+ if ((!(reg->id & BPF_ADD_CONST) && !(known_reg->id & BPF_ADD_CONST)) ||
+ reg->off == known_reg->off) {
copy_register_state(reg, known_reg);
+ } else {
+ s32 saved_off = reg->off;
+
+ fake_reg.type = SCALAR_VALUE;
+ __mark_reg_known(&fake_reg, (s32)reg->off - (s32)known_reg->off);
+
+ /* reg = known_reg; reg += delta */
+ copy_register_state(reg, known_reg);
+ /*
+ * Must preserve off, id and add_const flag,
+ * otherwise another find_equal_scalars() will be incorrect.
+ */
+ reg->off = saved_off;
+
+ scalar32_min_max_add(reg, &fake_reg);
+ scalar_min_max_add(reg, &fake_reg);
+ reg->var_off = tnum_add(reg->var_off, fake_reg.var_off);
+ }
}));
}
@@ -15105,7 +15131,6 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
struct bpf_reg_state *regs = this_branch->frame[this_branch->curframe]->regs;
struct bpf_reg_state *dst_reg, *other_branch_regs, *src_reg = NULL;
struct bpf_reg_state *eq_branch_regs;
- struct bpf_reg_state fake_reg = {};
u8 opcode = BPF_OP(insn->code);
bool is_jmp32;
int pred = -1;
@@ -15171,7 +15196,8 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
verbose(env, "BPF_JMP/JMP32 uses reserved fields\n");
return -EINVAL;
}
- src_reg = &fake_reg;
+ src_reg = &env->fake_reg[0];
+ memset(src_reg, 0, sizeof(*src_reg));
src_reg->type = SCALAR_VALUE;
__mark_reg_known(src_reg, insn->imm);
}
@@ -15231,10 +15257,16 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
&other_branch_regs[insn->src_reg],
dst_reg, src_reg, opcode, is_jmp32);
} else /* BPF_SRC(insn->code) == BPF_K */ {
+ /* reg_set_min_max() can mangle the fake_reg. Make a copy
+ * so that these are two different memory locations. The
+ * src_reg is not used beyond here in context of K.
+ */
+ memcpy(&env->fake_reg[1], &env->fake_reg[0],
+ sizeof(env->fake_reg[0]));
err = reg_set_min_max(env,
&other_branch_regs[insn->dst_reg],
- src_reg /* fake one */,
- dst_reg, src_reg /* same fake one */,
+ &env->fake_reg[0],
+ dst_reg, &env->fake_reg[1],
opcode, is_jmp32);
}
if (err)
@@ -16722,6 +16754,10 @@ static bool regsafe(struct bpf_verifier_env *env, struct bpf_reg_state *rold,
}
if (!rold->precise && exact == NOT_EXACT)
return true;
+ if ((rold->id & BPF_ADD_CONST) != (rcur->id & BPF_ADD_CONST))
+ return false;
+ if ((rold->id & BPF_ADD_CONST) && (rold->off != rcur->off))
+ return false;
/* Why check_ids() for scalar registers?
*
* Consider the following BPF code:
@@ -17433,11 +17469,11 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
goto skip_inf_loop_check;
}
if (is_may_goto_insn_at(env, insn_idx)) {
- if (states_equal(env, &sl->state, cur, RANGE_WITHIN)) {
+ if (sl->state.may_goto_depth != cur->may_goto_depth &&
+ states_equal(env, &sl->state, cur, RANGE_WITHIN)) {
update_loop_entry(cur, &sl->state);
goto hit;
}
- goto skip_inf_loop_check;
}
if (calls_callback(env, insn_idx)) {
if (states_equal(env, &sl->state, cur, RANGE_WITHIN))
@@ -18603,8 +18639,7 @@ static void release_maps(struct bpf_verifier_env *env)
/* drop refcnt of maps used by the rejected program */
static void release_btfs(struct bpf_verifier_env *env)
{
- __bpf_free_used_btfs(env->prog->aux, env->used_btfs,
- env->used_btf_cnt);
+ __bpf_free_used_btfs(env->used_btfs, env->used_btf_cnt);
}
/* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */
@@ -18715,6 +18750,41 @@ static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 of
return new_prog;
}
+/*
+ * For all jmp insns in a given 'prog' that point to 'tgt_idx' insn adjust the
+ * jump offset by 'delta'.
+ */
+static int adjust_jmp_off(struct bpf_prog *prog, u32 tgt_idx, u32 delta)
+{
+ struct bpf_insn *insn = prog->insnsi;
+ u32 insn_cnt = prog->len, i;
+ s32 imm;
+ s16 off;
+
+ for (i = 0; i < insn_cnt; i++, insn++) {
+ u8 code = insn->code;
+
+ if ((BPF_CLASS(code) != BPF_JMP && BPF_CLASS(code) != BPF_JMP32) ||
+ BPF_OP(code) == BPF_CALL || BPF_OP(code) == BPF_EXIT)
+ continue;
+
+ if (insn->code == (BPF_JMP32 | BPF_JA)) {
+ if (i + 1 + insn->imm != tgt_idx)
+ continue;
+ if (check_add_overflow(insn->imm, delta, &imm))
+ return -ERANGE;
+ insn->imm = imm;
+ } else {
+ if (i + 1 + insn->off != tgt_idx)
+ continue;
+ if (check_add_overflow(insn->off, delta, &off))
+ return -ERANGE;
+ insn->off = off;
+ }
+ }
+ return 0;
+}
+
static int adjust_subprog_starts_after_remove(struct bpf_verifier_env *env,
u32 off, u32 cnt)
{
@@ -19989,7 +20059,10 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
stack_depth_extra = 8;
insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_AX, BPF_REG_10, stack_off);
- insn_buf[1] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_AX, 0, insn->off + 2);
+ if (insn->off >= 0)
+ insn_buf[1] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_AX, 0, insn->off + 2);
+ else
+ insn_buf[1] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_AX, 0, insn->off - 1);
insn_buf[2] = BPF_ALU64_IMM(BPF_SUB, BPF_REG_AX, 1);
insn_buf[3] = BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_AX, stack_off);
cnt = 4;
@@ -20305,7 +20378,7 @@ patch_map_ops_generic:
goto next_insn;
}
-#ifdef CONFIG_X86_64
+#if defined(CONFIG_X86_64) && !defined(CONFIG_UML)
/* Implement bpf_get_smp_processor_id() inline. */
if (insn->imm == BPF_FUNC_get_smp_processor_id &&
prog->jit_requested && bpf_jit_supports_percpu_insn()) {
@@ -20531,6 +20604,13 @@ next_insn:
if (!new_prog)
return -ENOMEM;
env->prog = prog = new_prog;
+ /*
+ * If may_goto is a first insn of a prog there could be a jmp
+ * insn that points to it, hence adjust all such jmps to point
+ * to insn after BPF_ST that inits may_goto count.
+ * Adjustment will succeed because bpf_patch_insn_data() didn't fail.
+ */
+ WARN_ON(adjust_jmp_off(env->prog, subprog_start, 1));
}
/* Since poke tab is now finalized, publish aux to tracker. */
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index e32b6972c478..c8e4b62b436a 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -1744,8 +1744,11 @@ static int css_populate_dir(struct cgroup_subsys_state *css)
if (cgroup_psi_enabled()) {
ret = cgroup_addrm_files(css, cgrp,
cgroup_psi_files, true);
- if (ret < 0)
+ if (ret < 0) {
+ cgroup_addrm_files(css, cgrp,
+ cgroup_base_files, false);
return ret;
+ }
}
} else {
ret = cgroup_addrm_files(css, cgrp,
@@ -1839,9 +1842,9 @@ int rebind_subsystems(struct cgroup_root *dst_root, u16 ss_mask)
RCU_INIT_POINTER(scgrp->subsys[ssid], NULL);
rcu_assign_pointer(dcgrp->subsys[ssid], css);
ss->root = dst_root;
- css->cgroup = dcgrp;
spin_lock_irq(&css_set_lock);
+ css->cgroup = dcgrp;
WARN_ON(!list_empty(&dcgrp->e_csets[ss->id]));
list_for_each_entry_safe(cset, cset_pos, &scgrp->e_csets[ss->id],
e_cset_node[ss->id]) {
@@ -1922,6 +1925,7 @@ enum cgroup2_param {
Opt_memory_localevents,
Opt_memory_recursiveprot,
Opt_memory_hugetlb_accounting,
+ Opt_pids_localevents,
nr__cgroup2_params
};
@@ -1931,6 +1935,7 @@ static const struct fs_parameter_spec cgroup2_fs_parameters[] = {
fsparam_flag("memory_localevents", Opt_memory_localevents),
fsparam_flag("memory_recursiveprot", Opt_memory_recursiveprot),
fsparam_flag("memory_hugetlb_accounting", Opt_memory_hugetlb_accounting),
+ fsparam_flag("pids_localevents", Opt_pids_localevents),
{}
};
@@ -1960,6 +1965,9 @@ static int cgroup2_parse_param(struct fs_context *fc, struct fs_parameter *param
case Opt_memory_hugetlb_accounting:
ctx->flags |= CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING;
return 0;
+ case Opt_pids_localevents:
+ ctx->flags |= CGRP_ROOT_PIDS_LOCAL_EVENTS;
+ return 0;
}
return -EINVAL;
}
@@ -1989,6 +1997,11 @@ static void apply_cgroup_root_flags(unsigned int root_flags)
cgrp_dfl_root.flags |= CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING;
else
cgrp_dfl_root.flags &= ~CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING;
+
+ if (root_flags & CGRP_ROOT_PIDS_LOCAL_EVENTS)
+ cgrp_dfl_root.flags |= CGRP_ROOT_PIDS_LOCAL_EVENTS;
+ else
+ cgrp_dfl_root.flags &= ~CGRP_ROOT_PIDS_LOCAL_EVENTS;
}
}
@@ -2004,6 +2017,8 @@ static int cgroup_show_options(struct seq_file *seq, struct kernfs_root *kf_root
seq_puts(seq, ",memory_recursiveprot");
if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING)
seq_puts(seq, ",memory_hugetlb_accounting");
+ if (cgrp_dfl_root.flags & CGRP_ROOT_PIDS_LOCAL_EVENTS)
+ seq_puts(seq, ",pids_localevents");
return 0;
}
@@ -6686,8 +6701,10 @@ void cgroup_exit(struct task_struct *tsk)
WARN_ON_ONCE(list_empty(&tsk->cg_list));
cset = task_css_set(tsk);
css_set_move_task(tsk, cset, NULL, false);
- list_add_tail(&tsk->cg_list, &cset->dying_tasks);
cset->nr_tasks--;
+ /* matches the signal->live check in css_task_iter_advance() */
+ if (thread_group_leader(tsk) && atomic_read(&tsk->signal->live))
+ list_add_tail(&tsk->cg_list, &cset->dying_tasks);
if (dl_task(tsk))
dec_dl_tasks_cs(tsk);
@@ -6714,10 +6731,12 @@ void cgroup_release(struct task_struct *task)
ss->release(task);
} while_each_subsys_mask();
- spin_lock_irq(&css_set_lock);
- css_set_skip_task_iters(task_css_set(task), task);
- list_del_init(&task->cg_list);
- spin_unlock_irq(&css_set_lock);
+ if (!list_empty(&task->cg_list)) {
+ spin_lock_irq(&css_set_lock);
+ css_set_skip_task_iters(task_css_set(task), task);
+ list_del_init(&task->cg_list);
+ spin_unlock_irq(&css_set_lock);
+ }
}
void cgroup_free(struct task_struct *task)
@@ -7062,7 +7081,8 @@ static ssize_t features_show(struct kobject *kobj, struct kobj_attribute *attr,
"favordynmods\n"
"memory_localevents\n"
"memory_recursiveprot\n"
- "memory_hugetlb_accounting\n");
+ "memory_hugetlb_accounting\n"
+ "pids_localevents\n");
}
static struct kobj_attribute cgroup_features_attr = __ATTR_RO(features);
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index c12b9fdb22a4..40ec4abaf440 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -21,6 +21,7 @@
* License. See the file COPYING in the main directory of the Linux
* distribution for more details.
*/
+#include "cgroup-internal.h"
#include <linux/cpu.h>
#include <linux/cpumask.h>
@@ -87,7 +88,7 @@ static const char * const perr_strings[] = {
[PERR_NOTEXCL] = "Cpu list in cpuset.cpus not exclusive",
[PERR_NOCPUS] = "Parent unable to distribute cpu downstream",
[PERR_HOTPLUG] = "No cpu available due to hotplug",
- [PERR_CPUSEMPTY] = "cpuset.cpus is empty",
+ [PERR_CPUSEMPTY] = "cpuset.cpus and cpuset.cpus.exclusive are empty",
[PERR_HKEEPING] = "partition config conflicts with housekeeping setup",
};
@@ -127,19 +128,28 @@ struct cpuset {
/*
* Exclusive CPUs dedicated to current cgroup (default hierarchy only)
*
- * This exclusive CPUs must be a subset of cpus_allowed. A parent
- * cgroup can only grant exclusive CPUs to one of its children.
+ * The effective_cpus of a valid partition root comes solely from its
+ * effective_xcpus and some of the effective_xcpus may be distributed
+ * to sub-partitions below & hence excluded from its effective_cpus.
+ * For a valid partition root, its effective_cpus have no relationship
+ * with cpus_allowed unless its exclusive_cpus isn't set.
*
- * When the cgroup becomes a valid partition root, effective_xcpus
- * defaults to cpus_allowed if not set. The effective_cpus of a valid
- * partition root comes solely from its effective_xcpus and some of the
- * effective_xcpus may be distributed to sub-partitions below & hence
- * excluded from its effective_cpus.
+ * This value will only be set if either exclusive_cpus is set or
+ * when this cpuset becomes a local partition root.
*/
cpumask_var_t effective_xcpus;
/*
* Exclusive CPUs as requested by the user (default hierarchy only)
+ *
+ * Its value is independent of cpus_allowed and designates the set of
+ * CPUs that can be granted to the current cpuset or its children when
+ * it becomes a valid partition root. The effective set of exclusive
+ * CPUs granted (effective_xcpus) depends on whether those exclusive
+ * CPUs are passed down by its ancestors and not yet taken up by
+ * another sibling partition root along the way.
+ *
+ * If its value isn't set, it defaults to cpus_allowed.
*/
cpumask_var_t exclusive_cpus;
@@ -169,7 +179,7 @@ struct cpuset {
/* for custom sched domain */
int relax_domain_level;
- /* number of valid sub-partitions */
+ /* number of valid local child partitions */
int nr_subparts;
/* partition root state */
@@ -230,6 +240,17 @@ static struct list_head remote_children;
* 2 - partition root without load balancing (isolated)
* -1 - invalid partition root
* -2 - invalid isolated partition root
+ *
+ * There are 2 types of partitions - local or remote. Local partitions are
+ * those whose parents are partition root themselves. Setting of
+ * cpuset.cpus.exclusive are optional in setting up local partitions.
+ * Remote partitions are those whose parents are not partition roots. Passing
+ * down exclusive CPUs by setting cpuset.cpus.exclusive along its ancestor
+ * nodes are mandatory in creating a remote partition.
+ *
+ * For simplicity, a local partition can be created under a local or remote
+ * partition but a remote partition cannot have any partition root in its
+ * ancestor chain except the cgroup root.
*/
#define PRS_MEMBER 0
#define PRS_ROOT 1
@@ -434,7 +455,7 @@ static struct cpuset top_cpuset = {
* by other task, we use alloc_lock in the task_struct fields to protect
* them.
*
- * The cpuset_common_file_read() handlers only hold callback_lock across
+ * The cpuset_common_seq_show() handlers only hold callback_lock across
* small pieces of code, such as when reading out possibly multi-word
* cpumasks and nodemasks.
*
@@ -709,6 +730,19 @@ static inline void free_cpuset(struct cpuset *cs)
kfree(cs);
}
+/* Return user specified exclusive CPUs */
+static inline struct cpumask *user_xcpus(struct cpuset *cs)
+{
+ return cpumask_empty(cs->exclusive_cpus) ? cs->cpus_allowed
+ : cs->exclusive_cpus;
+}
+
+static inline bool xcpus_empty(struct cpuset *cs)
+{
+ return cpumask_empty(cs->cpus_allowed) &&
+ cpumask_empty(cs->exclusive_cpus);
+}
+
static inline struct cpumask *fetch_xcpus(struct cpuset *cs)
{
return !cpumask_empty(cs->exclusive_cpus) ? cs->exclusive_cpus :
@@ -825,17 +859,41 @@ static int validate_change(struct cpuset *cur, struct cpuset *trial)
/*
* If either I or some sibling (!= me) is exclusive, we can't
- * overlap
+ * overlap. exclusive_cpus cannot overlap with each other if set.
*/
ret = -EINVAL;
cpuset_for_each_child(c, css, par) {
- if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) &&
- c != cur) {
+ bool txset, cxset; /* Are exclusive_cpus set? */
+
+ if (c == cur)
+ continue;
+
+ txset = !cpumask_empty(trial->exclusive_cpus);
+ cxset = !cpumask_empty(c->exclusive_cpus);
+ if (is_cpu_exclusive(trial) || is_cpu_exclusive(c) ||
+ (txset && cxset)) {
if (!cpusets_are_exclusive(trial, c))
goto out;
+ } else if (txset || cxset) {
+ struct cpumask *xcpus, *acpus;
+
+ /*
+ * When just one of the exclusive_cpus's is set,
+ * cpus_allowed of the other cpuset, if set, cannot be
+ * a subset of it or none of those CPUs will be
+ * available if these exclusive CPUs are activated.
+ */
+ if (txset) {
+ xcpus = trial->exclusive_cpus;
+ acpus = c->cpus_allowed;
+ } else {
+ xcpus = c->exclusive_cpus;
+ acpus = trial->cpus_allowed;
+ }
+ if (!cpumask_empty(acpus) && cpumask_subset(acpus, xcpus))
+ goto out;
}
if ((is_mem_exclusive(trial) || is_mem_exclusive(c)) &&
- c != cur &&
nodes_intersects(trial->mems_allowed, c->mems_allowed))
goto out;
}
@@ -957,13 +1015,15 @@ static int generate_sched_domains(cpumask_var_t **domains,
int nslot; /* next empty doms[] struct cpumask slot */
struct cgroup_subsys_state *pos_css;
bool root_load_balance = is_sched_load_balance(&top_cpuset);
+ bool cgrpv2 = cgroup_subsys_on_dfl(cpuset_cgrp_subsys);
doms = NULL;
dattr = NULL;
csa = NULL;
/* Special case for the 99% of systems with one, full, sched domain */
- if (root_load_balance && !top_cpuset.nr_subparts) {
+ if (root_load_balance && cpumask_empty(subpartitions_cpus)) {
+single_root_domain:
ndoms = 1;
doms = alloc_sched_domains(ndoms);
if (!doms)
@@ -991,16 +1051,18 @@ static int generate_sched_domains(cpumask_var_t **domains,
cpuset_for_each_descendant_pre(cp, pos_css, &top_cpuset) {
if (cp == &top_cpuset)
continue;
+
+ if (cgrpv2)
+ goto v2;
+
/*
+ * v1:
* Continue traversing beyond @cp iff @cp has some CPUs and
* isn't load balancing. The former is obvious. The
* latter: All child cpusets contain a subset of the
* parent's cpus, so just skip them, and then we call
* update_domain_attr_tree() to calc relax_domain_level of
* the corresponding sched domain.
- *
- * If root is load-balancing, we can skip @cp if it
- * is a subset of the root's effective_cpus.
*/
if (!cpumask_empty(cp->cpus_allowed) &&
!(is_sched_load_balance(cp) &&
@@ -1008,20 +1070,39 @@ static int generate_sched_domains(cpumask_var_t **domains,
housekeeping_cpumask(HK_TYPE_DOMAIN))))
continue;
- if (root_load_balance &&
- cpumask_subset(cp->cpus_allowed, top_cpuset.effective_cpus))
- continue;
-
if (is_sched_load_balance(cp) &&
!cpumask_empty(cp->effective_cpus))
csa[csn++] = cp;
- /* skip @cp's subtree if not a partition root */
- if (!is_partition_valid(cp))
+ /* skip @cp's subtree */
+ pos_css = css_rightmost_descendant(pos_css);
+ continue;
+
+v2:
+ /*
+ * Only valid partition roots that are not isolated and with
+ * non-empty effective_cpus will be saved into csn[].
+ */
+ if ((cp->partition_root_state == PRS_ROOT) &&
+ !cpumask_empty(cp->effective_cpus))
+ csa[csn++] = cp;
+
+ /*
+ * Skip @cp's subtree if not a partition root and has no
+ * exclusive CPUs to be granted to child cpusets.
+ */
+ if (!is_partition_valid(cp) && cpumask_empty(cp->exclusive_cpus))
pos_css = css_rightmost_descendant(pos_css);
}
rcu_read_unlock();
+ /*
+ * If there are only isolated partitions underneath the cgroup root,
+ * we can optimize out unneeded sched domains scanning.
+ */
+ if (root_load_balance && (csn == 1))
+ goto single_root_domain;
+
for (i = 0; i < csn; i++)
csa[i]->pn = i;
ndoms = csn;
@@ -1064,6 +1145,20 @@ restart:
dattr = kmalloc_array(ndoms, sizeof(struct sched_domain_attr),
GFP_KERNEL);
+ /*
+ * Cgroup v2 doesn't support domain attributes, just set all of them
+ * to SD_ATTR_INIT. Also non-isolating partition root CPUs are a
+ * subset of HK_TYPE_DOMAIN housekeeping CPUs.
+ */
+ if (cgrpv2) {
+ for (i = 0; i < ndoms; i++) {
+ cpumask_copy(doms[i], csa[i]->effective_cpus);
+ if (dattr)
+ dattr[i] = SD_ATTR_INIT;
+ }
+ goto done;
+ }
+
for (nslot = 0, i = 0; i < csn; i++) {
struct cpuset *a = csa[i];
struct cpumask *dp;
@@ -1223,7 +1318,7 @@ static void rebuild_sched_domains_locked(void)
* root should be only a subset of the active CPUs. Since a CPU in any
* partition root could be offlined, all must be checked.
*/
- if (top_cpuset.nr_subparts) {
+ if (!cpumask_empty(subpartitions_cpus)) {
rcu_read_lock();
cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) {
if (!is_partition_valid(cs)) {
@@ -1338,7 +1433,7 @@ static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs,
*/
static int update_partition_exclusive(struct cpuset *cs, int new_prs)
{
- bool exclusive = (new_prs > 0);
+ bool exclusive = (new_prs > PRS_MEMBER);
if (exclusive && !is_cpu_exclusive(cs)) {
if (update_flag(CS_CPU_EXCLUSIVE, cs, 1))
@@ -1532,7 +1627,7 @@ EXPORT_SYMBOL_GPL(cpuset_cpu_is_isolated);
* Return: true if xcpus is not empty, false otherwise.
*
* Starting with exclusive_cpus (cpus_allowed if exclusive_cpus is not set),
- * it must be a subset of cpus_allowed and parent's effective_xcpus.
+ * it must be a subset of parent's effective_xcpus.
*/
static bool compute_effective_exclusive_cpumask(struct cpuset *cs,
struct cpumask *xcpus)
@@ -1542,12 +1637,7 @@ static bool compute_effective_exclusive_cpumask(struct cpuset *cs,
if (!xcpus)
xcpus = cs->effective_xcpus;
- if (!cpumask_empty(cs->exclusive_cpus))
- cpumask_and(xcpus, cs->exclusive_cpus, cs->cpus_allowed);
- else
- cpumask_copy(xcpus, cs->cpus_allowed);
-
- return cpumask_and(xcpus, xcpus, parent->effective_xcpus);
+ return cpumask_and(xcpus, user_xcpus(cs), parent->effective_xcpus);
}
static inline bool is_remote_partition(struct cpuset *cs)
@@ -1826,8 +1916,7 @@ static int update_parent_effective_cpumask(struct cpuset *cs, int cmd,
*/
adding = deleting = false;
old_prs = new_prs = cs->partition_root_state;
- xcpus = !cpumask_empty(cs->exclusive_cpus)
- ? cs->effective_xcpus : cs->cpus_allowed;
+ xcpus = user_xcpus(cs);
if (cmd == partcmd_invalidate) {
if (is_prs_invalid(old_prs))
@@ -1855,7 +1944,7 @@ static int update_parent_effective_cpumask(struct cpuset *cs, int cmd,
return is_partition_invalid(parent)
? PERR_INVPARENT : PERR_NOTPART;
}
- if (!newmask && cpumask_empty(cs->cpus_allowed))
+ if (!newmask && xcpus_empty(cs))
return PERR_CPUSEMPTY;
nocpu = tasks_nocpu_error(parent, cs, xcpus);
@@ -2583,8 +2672,6 @@ static int update_exclusive_cpumask(struct cpuset *cs, struct cpuset *trialcs,
retval = cpulist_parse(buf, trialcs->exclusive_cpus);
if (retval < 0)
return retval;
- if (!is_cpu_exclusive(cs))
- set_bit(CS_CPU_EXCLUSIVE, &trialcs->flags);
}
/* Nothing to do if the CPUs didn't change */
@@ -3071,9 +3158,9 @@ static int update_prstate(struct cpuset *cs, int new_prs)
? partcmd_enable : partcmd_enablei;
/*
- * cpus_allowed cannot be empty.
+ * cpus_allowed and exclusive_cpus cannot be both empty.
*/
- if (cpumask_empty(cs->cpus_allowed)) {
+ if (xcpus_empty(cs)) {
err = PERR_CPUSEMPTY;
goto out;
}
@@ -4009,8 +4096,6 @@ cpuset_css_alloc(struct cgroup_subsys_state *parent_css)
}
__set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
- nodes_clear(cs->mems_allowed);
- nodes_clear(cs->effective_mems);
fmeter_init(&cs->fmeter);
cs->relax_domain_level = -1;
INIT_LIST_HEAD(&cs->remote_sibling);
@@ -4040,6 +4125,12 @@ static int cpuset_css_online(struct cgroup_subsys_state *css)
set_bit(CS_SPREAD_PAGE, &cs->flags);
if (is_spread_slab(parent))
set_bit(CS_SPREAD_SLAB, &cs->flags);
+ /*
+ * For v2, clear CS_SCHED_LOAD_BALANCE if parent is isolated
+ */
+ if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
+ !is_sched_load_balance(parent))
+ clear_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
cpuset_inc();
@@ -4050,14 +4141,6 @@ static int cpuset_css_online(struct cgroup_subsys_state *css)
cs->use_parent_ecpus = true;
parent->child_ecpus_count++;
}
-
- /*
- * For v2, clear CS_SCHED_LOAD_BALANCE if parent is isolated
- */
- if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
- !is_sched_load_balance(parent))
- clear_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
-
spin_unlock_irq(&callback_lock);
if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags))
@@ -4571,7 +4654,7 @@ static void cpuset_handle_hotplug(void)
* In the rare case that hotplug removes all the cpus in
* subpartitions_cpus, we assumed that cpus are updated.
*/
- if (!cpus_updated && top_cpuset.nr_subparts)
+ if (!cpus_updated && !cpumask_empty(subpartitions_cpus))
cpus_updated = true;
/* For v1, synchronize cpus_allowed to cpu_active_mask */
@@ -5051,10 +5134,14 @@ int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns,
if (!buf)
goto out;
- css = task_get_css(tsk, cpuset_cgrp_id);
- retval = cgroup_path_ns(css->cgroup, buf, PATH_MAX,
- current->nsproxy->cgroup_ns);
- css_put(css);
+ rcu_read_lock();
+ spin_lock_irq(&css_set_lock);
+ css = task_css(tsk, cpuset_cgrp_id);
+ retval = cgroup_path_ns_locked(css->cgroup, buf, PATH_MAX,
+ current->nsproxy->cgroup_ns);
+ spin_unlock_irq(&css_set_lock);
+ rcu_read_unlock();
+
if (retval == -E2BIG)
retval = -ENAMETOOLONG;
if (retval < 0)
diff --git a/kernel/cgroup/misc.c b/kernel/cgroup/misc.c
index 79a3717a5803..0e26068995a6 100644
--- a/kernel/cgroup/misc.c
+++ b/kernel/cgroup/misc.c
@@ -121,6 +121,30 @@ static void misc_cg_cancel_charge(enum misc_res_type type, struct misc_cg *cg,
misc_res_name[type]);
}
+static void misc_cg_update_watermark(struct misc_res *res, u64 new_usage)
+{
+ u64 old;
+
+ while (true) {
+ old = atomic64_read(&res->watermark);
+ if (new_usage <= old)
+ break;
+ if (atomic64_cmpxchg(&res->watermark, old, new_usage) == old)
+ break;
+ }
+}
+
+static void misc_cg_event(enum misc_res_type type, struct misc_cg *cg)
+{
+ atomic64_inc(&cg->res[type].events_local);
+ cgroup_file_notify(&cg->events_local_file);
+
+ for (; parent_misc(cg); cg = parent_misc(cg)) {
+ atomic64_inc(&cg->res[type].events);
+ cgroup_file_notify(&cg->events_file);
+ }
+}
+
/**
* misc_cg_try_charge() - Try charging the misc cgroup.
* @type: Misc res type to charge.
@@ -159,14 +183,12 @@ int misc_cg_try_charge(enum misc_res_type type, struct misc_cg *cg, u64 amount)
ret = -EBUSY;
goto err_charge;
}
+ misc_cg_update_watermark(res, new_usage);
}
return 0;
err_charge:
- for (j = i; j; j = parent_misc(j)) {
- atomic64_inc(&j->res[type].events);
- cgroup_file_notify(&j->events_file);
- }
+ misc_cg_event(type, i);
for (j = cg; j != i; j = parent_misc(j))
misc_cg_cancel_charge(type, j, amount);
@@ -308,6 +330,29 @@ static int misc_cg_current_show(struct seq_file *sf, void *v)
}
/**
+ * misc_cg_peak_show() - Show the peak usage of the misc cgroup.
+ * @sf: Interface file
+ * @v: Arguments passed
+ *
+ * Context: Any context.
+ * Return: 0 to denote successful print.
+ */
+static int misc_cg_peak_show(struct seq_file *sf, void *v)
+{
+ int i;
+ u64 watermark;
+ struct misc_cg *cg = css_misc(seq_css(sf));
+
+ for (i = 0; i < MISC_CG_RES_TYPES; i++) {
+ watermark = atomic64_read(&cg->res[i].watermark);
+ if (READ_ONCE(misc_res_capacity[i]) || watermark)
+ seq_printf(sf, "%s %llu\n", misc_res_name[i], watermark);
+ }
+
+ return 0;
+}
+
+/**
* misc_cg_capacity_show() - Show the total capacity of misc res on the host.
* @sf: Interface file
* @v: Arguments passed
@@ -331,20 +376,33 @@ static int misc_cg_capacity_show(struct seq_file *sf, void *v)
return 0;
}
-static int misc_events_show(struct seq_file *sf, void *v)
+static int __misc_events_show(struct seq_file *sf, bool local)
{
struct misc_cg *cg = css_misc(seq_css(sf));
u64 events;
int i;
for (i = 0; i < MISC_CG_RES_TYPES; i++) {
- events = atomic64_read(&cg->res[i].events);
+ if (local)
+ events = atomic64_read(&cg->res[i].events_local);
+ else
+ events = atomic64_read(&cg->res[i].events);
if (READ_ONCE(misc_res_capacity[i]) || events)
seq_printf(sf, "%s.max %llu\n", misc_res_name[i], events);
}
return 0;
}
+static int misc_events_show(struct seq_file *sf, void *v)
+{
+ return __misc_events_show(sf, false);
+}
+
+static int misc_events_local_show(struct seq_file *sf, void *v)
+{
+ return __misc_events_show(sf, true);
+}
+
/* Misc cgroup interface files */
static struct cftype misc_cg_files[] = {
{
@@ -358,6 +416,10 @@ static struct cftype misc_cg_files[] = {
.seq_show = misc_cg_current_show,
},
{
+ .name = "peak",
+ .seq_show = misc_cg_peak_show,
+ },
+ {
.name = "capacity",
.seq_show = misc_cg_capacity_show,
.flags = CFTYPE_ONLY_ON_ROOT,
@@ -368,6 +430,12 @@ static struct cftype misc_cg_files[] = {
.file_offset = offsetof(struct misc_cg, events_file),
.seq_show = misc_events_show,
},
+ {
+ .name = "events.local",
+ .flags = CFTYPE_NOT_ON_ROOT,
+ .file_offset = offsetof(struct misc_cg, events_local_file),
+ .seq_show = misc_events_local_show,
+ },
{}
};
diff --git a/kernel/cgroup/pids.c b/kernel/cgroup/pids.c
index 0e5ec7d59b4d..f5cb0ec45b9d 100644
--- a/kernel/cgroup/pids.c
+++ b/kernel/cgroup/pids.c
@@ -38,6 +38,14 @@
#define PIDS_MAX (PID_MAX_LIMIT + 1ULL)
#define PIDS_MAX_STR "max"
+enum pidcg_event {
+ /* Fork failed in subtree because this pids_cgroup limit was hit. */
+ PIDCG_MAX,
+ /* Fork failed in this pids_cgroup because ancestor limit was hit. */
+ PIDCG_FORKFAIL,
+ NR_PIDCG_EVENTS,
+};
+
struct pids_cgroup {
struct cgroup_subsys_state css;
@@ -49,11 +57,12 @@ struct pids_cgroup {
atomic64_t limit;
int64_t watermark;
- /* Handle for "pids.events" */
+ /* Handles for pids.events[.local] */
struct cgroup_file events_file;
+ struct cgroup_file events_local_file;
- /* Number of times fork failed because limit was hit. */
- atomic64_t events_limit;
+ atomic64_t events[NR_PIDCG_EVENTS];
+ atomic64_t events_local[NR_PIDCG_EVENTS];
};
static struct pids_cgroup *css_pids(struct cgroup_subsys_state *css)
@@ -148,12 +157,13 @@ static void pids_charge(struct pids_cgroup *pids, int num)
* pids_try_charge - hierarchically try to charge the pid count
* @pids: the pid cgroup state
* @num: the number of pids to charge
+ * @fail: storage of pid cgroup causing the fail
*
* This function follows the set limit. It will fail if the charge would cause
* the new value to exceed the hierarchical limit. Returns 0 if the charge
* succeeded, otherwise -EAGAIN.
*/
-static int pids_try_charge(struct pids_cgroup *pids, int num)
+static int pids_try_charge(struct pids_cgroup *pids, int num, struct pids_cgroup **fail)
{
struct pids_cgroup *p, *q;
@@ -166,9 +176,10 @@ static int pids_try_charge(struct pids_cgroup *pids, int num)
* p->limit is %PIDS_MAX then we know that this test will never
* fail.
*/
- if (new > limit)
+ if (new > limit) {
+ *fail = p;
goto revert;
-
+ }
/*
* Not technically accurate if we go over limit somewhere up
* the hierarchy, but that's tolerable for the watermark.
@@ -229,6 +240,36 @@ static void pids_cancel_attach(struct cgroup_taskset *tset)
}
}
+static void pids_event(struct pids_cgroup *pids_forking,
+ struct pids_cgroup *pids_over_limit)
+{
+ struct pids_cgroup *p = pids_forking;
+ bool limit = false;
+
+ /* Only log the first time limit is hit. */
+ if (atomic64_inc_return(&p->events_local[PIDCG_FORKFAIL]) == 1) {
+ pr_info("cgroup: fork rejected by pids controller in ");
+ pr_cont_cgroup_path(p->css.cgroup);
+ pr_cont("\n");
+ }
+ cgroup_file_notify(&p->events_local_file);
+ if (!cgroup_subsys_on_dfl(pids_cgrp_subsys) ||
+ cgrp_dfl_root.flags & CGRP_ROOT_PIDS_LOCAL_EVENTS)
+ return;
+
+ for (; parent_pids(p); p = parent_pids(p)) {
+ if (p == pids_over_limit) {
+ limit = true;
+ atomic64_inc(&p->events_local[PIDCG_MAX]);
+ cgroup_file_notify(&p->events_local_file);
+ }
+ if (limit)
+ atomic64_inc(&p->events[PIDCG_MAX]);
+
+ cgroup_file_notify(&p->events_file);
+ }
+}
+
/*
* task_css_check(true) in pids_can_fork() and pids_cancel_fork() relies
* on cgroup_threadgroup_change_begin() held by the copy_process().
@@ -236,7 +277,7 @@ static void pids_cancel_attach(struct cgroup_taskset *tset)
static int pids_can_fork(struct task_struct *task, struct css_set *cset)
{
struct cgroup_subsys_state *css;
- struct pids_cgroup *pids;
+ struct pids_cgroup *pids, *pids_over_limit;
int err;
if (cset)
@@ -244,16 +285,10 @@ static int pids_can_fork(struct task_struct *task, struct css_set *cset)
else
css = task_css_check(current, pids_cgrp_id, true);
pids = css_pids(css);
- err = pids_try_charge(pids, 1);
- if (err) {
- /* Only log the first time events_limit is incremented. */
- if (atomic64_inc_return(&pids->events_limit) == 1) {
- pr_info("cgroup: fork rejected by pids controller in ");
- pr_cont_cgroup_path(css->cgroup);
- pr_cont("\n");
- }
- cgroup_file_notify(&pids->events_file);
- }
+ err = pids_try_charge(pids, 1, &pids_over_limit);
+ if (err)
+ pids_event(pids, pids_over_limit);
+
return err;
}
@@ -337,11 +372,32 @@ static s64 pids_peak_read(struct cgroup_subsys_state *css,
return READ_ONCE(pids->watermark);
}
-static int pids_events_show(struct seq_file *sf, void *v)
+static int __pids_events_show(struct seq_file *sf, bool local)
{
struct pids_cgroup *pids = css_pids(seq_css(sf));
+ enum pidcg_event pe = PIDCG_MAX;
+ atomic64_t *events;
+
+ if (!cgroup_subsys_on_dfl(pids_cgrp_subsys) ||
+ cgrp_dfl_root.flags & CGRP_ROOT_PIDS_LOCAL_EVENTS) {
+ pe = PIDCG_FORKFAIL;
+ local = true;
+ }
+ events = local ? pids->events_local : pids->events;
+
+ seq_printf(sf, "max %lld\n", (s64)atomic64_read(&events[pe]));
+ return 0;
+}
- seq_printf(sf, "max %lld\n", (s64)atomic64_read(&pids->events_limit));
+static int pids_events_show(struct seq_file *sf, void *v)
+{
+ __pids_events_show(sf, false);
+ return 0;
+}
+
+static int pids_events_local_show(struct seq_file *sf, void *v)
+{
+ __pids_events_show(sf, true);
return 0;
}
@@ -368,9 +424,42 @@ static struct cftype pids_files[] = {
.file_offset = offsetof(struct pids_cgroup, events_file),
.flags = CFTYPE_NOT_ON_ROOT,
},
+ {
+ .name = "events.local",
+ .seq_show = pids_events_local_show,
+ .file_offset = offsetof(struct pids_cgroup, events_local_file),
+ .flags = CFTYPE_NOT_ON_ROOT,
+ },
+ { } /* terminate */
+};
+
+static struct cftype pids_files_legacy[] = {
+ {
+ .name = "max",
+ .write = pids_max_write,
+ .seq_show = pids_max_show,
+ .flags = CFTYPE_NOT_ON_ROOT,
+ },
+ {
+ .name = "current",
+ .read_s64 = pids_current_read,
+ .flags = CFTYPE_NOT_ON_ROOT,
+ },
+ {
+ .name = "peak",
+ .flags = CFTYPE_NOT_ON_ROOT,
+ .read_s64 = pids_peak_read,
+ },
+ {
+ .name = "events",
+ .seq_show = pids_events_show,
+ .file_offset = offsetof(struct pids_cgroup, events_file),
+ .flags = CFTYPE_NOT_ON_ROOT,
+ },
{ } /* terminate */
};
+
struct cgroup_subsys pids_cgrp_subsys = {
.css_alloc = pids_css_alloc,
.css_free = pids_css_free,
@@ -379,7 +468,7 @@ struct cgroup_subsys pids_cgrp_subsys = {
.can_fork = pids_can_fork,
.cancel_fork = pids_cancel_fork,
.release = pids_release,
- .legacy_cftypes = pids_files,
+ .legacy_cftypes = pids_files_legacy,
.dfl_cftypes = pids_files,
.threaded = true,
};
diff --git a/kernel/cgroup/rstat.c b/kernel/cgroup/rstat.c
index fb8b49437573..a06b45272411 100644
--- a/kernel/cgroup/rstat.c
+++ b/kernel/cgroup/rstat.c
@@ -594,49 +594,46 @@ static void root_cgroup_cputime(struct cgroup_base_stat *bstat)
}
}
+
+static void cgroup_force_idle_show(struct seq_file *seq, struct cgroup_base_stat *bstat)
+{
+#ifdef CONFIG_SCHED_CORE
+ u64 forceidle_time = bstat->forceidle_sum;
+
+ do_div(forceidle_time, NSEC_PER_USEC);
+ seq_printf(seq, "core_sched.force_idle_usec %llu\n", forceidle_time);
+#endif
+}
+
void cgroup_base_stat_cputime_show(struct seq_file *seq)
{
struct cgroup *cgrp = seq_css(seq)->cgroup;
u64 usage, utime, stime;
- struct cgroup_base_stat bstat;
-#ifdef CONFIG_SCHED_CORE
- u64 forceidle_time;
-#endif
if (cgroup_parent(cgrp)) {
cgroup_rstat_flush_hold(cgrp);
usage = cgrp->bstat.cputime.sum_exec_runtime;
cputime_adjust(&cgrp->bstat.cputime, &cgrp->prev_cputime,
&utime, &stime);
-#ifdef CONFIG_SCHED_CORE
- forceidle_time = cgrp->bstat.forceidle_sum;
-#endif
cgroup_rstat_flush_release(cgrp);
} else {
- root_cgroup_cputime(&bstat);
- usage = bstat.cputime.sum_exec_runtime;
- utime = bstat.cputime.utime;
- stime = bstat.cputime.stime;
-#ifdef CONFIG_SCHED_CORE
- forceidle_time = bstat.forceidle_sum;
-#endif
+ /* cgrp->bstat of root is not actually used, reuse it */
+ root_cgroup_cputime(&cgrp->bstat);
+ usage = cgrp->bstat.cputime.sum_exec_runtime;
+ utime = cgrp->bstat.cputime.utime;
+ stime = cgrp->bstat.cputime.stime;
}
do_div(usage, NSEC_PER_USEC);
do_div(utime, NSEC_PER_USEC);
do_div(stime, NSEC_PER_USEC);
-#ifdef CONFIG_SCHED_CORE
- do_div(forceidle_time, NSEC_PER_USEC);
-#endif
seq_printf(seq, "usage_usec %llu\n"
"user_usec %llu\n"
"system_usec %llu\n",
usage, utime, stime);
-#ifdef CONFIG_SCHED_CORE
- seq_printf(seq, "core_sched.force_idle_usec %llu\n", forceidle_time);
-#endif
+ cgroup_force_idle_show(seq, &cgrp->bstat);
}
/* Add bpf kfuncs for cgroup_rstat_updated() and cgroup_rstat_flush() */
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 563877d6c28b..1209ddaec026 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -483,6 +483,8 @@ static int cpu_hotplug_disabled;
DEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock);
+static bool cpu_hotplug_offline_disabled __ro_after_init;
+
void cpus_read_lock(void)
{
percpu_down_read(&cpu_hotplug_lock);
@@ -542,6 +544,14 @@ static void lockdep_release_cpus_lock(void)
rwsem_release(&cpu_hotplug_lock.dep_map, _THIS_IP_);
}
+/* Declare CPU offlining not supported */
+void cpu_hotplug_disable_offlining(void)
+{
+ cpu_maps_update_begin();
+ cpu_hotplug_offline_disabled = true;
+ cpu_maps_update_done();
+}
+
/*
* Wait for currently running CPU hotplug operations to complete (if any) and
* disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
@@ -1471,7 +1481,7 @@ static int cpu_down_maps_locked(unsigned int cpu, enum cpuhp_state target)
* If the platform does not support hotplug, report it explicitly to
* differentiate it from a transient offlining failure.
*/
- if (cc_platform_has(CC_ATTR_HOTPLUG_DISABLED))
+ if (cpu_hotplug_offline_disabled)
return -EOPNOTSUPP;
if (cpu_hotplug_disabled)
return -EBUSY;
@@ -1859,6 +1869,9 @@ static inline bool cpuhp_bringup_cpus_parallel(unsigned int ncpus) { return fals
void __init bringup_nonboot_cpus(unsigned int max_cpus)
{
+ if (!max_cpus)
+ return;
+
/* Try parallel bringup optimization if enabled */
if (cpuhp_bringup_cpus_parallel(max_cpus))
return;
@@ -1891,8 +1904,8 @@ int freeze_secondary_cpus(int primary)
cpumask_clear(frozen_cpus);
pr_info("Disabling non-boot CPUs ...\n");
- for_each_online_cpu(cpu) {
- if (cpu == primary)
+ for (cpu = nr_cpu_ids - 1; cpu >= 0; cpu--) {
+ if (!cpu_online(cpu) || cpu == primary)
continue;
if (pm_wakeup_pending()) {
@@ -2446,7 +2459,7 @@ EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance);
* The caller needs to hold cpus read locked while calling this function.
* Return:
* On success:
- * Positive state number if @state is CPUHP_AP_ONLINE_DYN;
+ * Positive state number if @state is CPUHP_AP_ONLINE_DYN or CPUHP_BP_PREPARE_DYN;
* 0 for all other states
* On failure: proper (negative) error code
*/
@@ -2469,7 +2482,7 @@ int __cpuhp_setup_state_cpuslocked(enum cpuhp_state state,
ret = cpuhp_store_callbacks(state, name, startup, teardown,
multi_instance);
- dynstate = state == CPUHP_AP_ONLINE_DYN;
+ dynstate = state == CPUHP_AP_ONLINE_DYN || state == CPUHP_BP_PREPARE_DYN;
if (ret > 0 && dynstate) {
state = ret;
ret = 0;
@@ -2500,8 +2513,8 @@ int __cpuhp_setup_state_cpuslocked(enum cpuhp_state state,
out:
mutex_unlock(&cpuhp_state_mutex);
/*
- * If the requested state is CPUHP_AP_ONLINE_DYN, return the
- * dynamically allocated state in case of success.
+ * If the requested state is CPUHP_AP_ONLINE_DYN or CPUHP_BP_PREPARE_DYN,
+ * return the dynamically allocated state in case of success.
*/
if (!ret && dynstate)
return state;
@@ -3069,6 +3082,9 @@ EXPORT_SYMBOL(__cpu_possible_mask);
struct cpumask __cpu_online_mask __read_mostly;
EXPORT_SYMBOL(__cpu_online_mask);
+struct cpumask __cpu_enabled_mask __read_mostly;
+EXPORT_SYMBOL(__cpu_enabled_mask);
+
struct cpumask __cpu_present_mask __read_mostly;
EXPORT_SYMBOL(__cpu_present_mask);
diff --git a/kernel/dma/map_benchmark.c b/kernel/dma/map_benchmark.c
index 02205ab53b7e..4950e0b622b1 100644
--- a/kernel/dma/map_benchmark.c
+++ b/kernel/dma/map_benchmark.c
@@ -101,7 +101,6 @@ static int do_map_benchmark(struct map_benchmark_data *map)
struct task_struct **tsk;
int threads = map->bparam.threads;
int node = map->bparam.node;
- const cpumask_t *cpu_mask = cpumask_of_node(node);
u64 loops;
int ret = 0;
int i;
@@ -118,11 +117,13 @@ static int do_map_benchmark(struct map_benchmark_data *map)
if (IS_ERR(tsk[i])) {
pr_err("create dma_map thread failed\n");
ret = PTR_ERR(tsk[i]);
+ while (--i >= 0)
+ kthread_stop(tsk[i]);
goto out;
}
if (node != NUMA_NO_NODE)
- kthread_bind_mask(tsk[i], cpu_mask);
+ kthread_bind_mask(tsk[i], cpumask_of_node(node));
}
/* clear the old value in the previous benchmark */
@@ -139,13 +140,17 @@ static int do_map_benchmark(struct map_benchmark_data *map)
msleep_interruptible(map->bparam.seconds * 1000);
- /* wait for the completion of benchmark threads */
+ /* wait for the completion of all started benchmark threads */
for (i = 0; i < threads; i++) {
- ret = kthread_stop(tsk[i]);
- if (ret)
- goto out;
+ int kthread_ret = kthread_stop_put(tsk[i]);
+
+ if (kthread_ret)
+ ret = kthread_ret;
}
+ if (ret)
+ goto out;
+
loops = atomic64_read(&map->loops);
if (likely(loops > 0)) {
u64 map_variance, unmap_variance;
@@ -170,8 +175,6 @@ static int do_map_benchmark(struct map_benchmark_data *map)
}
out:
- for (i = 0; i < threads; i++)
- put_task_struct(tsk[i]);
put_device(map->dev);
kfree(tsk);
return ret;
@@ -208,7 +211,8 @@ static long map_benchmark_ioctl(struct file *file, unsigned int cmd,
}
if (map->bparam.node != NUMA_NO_NODE &&
- !node_possible(map->bparam.node)) {
+ (map->bparam.node < 0 || map->bparam.node >= MAX_NUMNODES ||
+ !node_possible(map->bparam.node))) {
pr_err("invalid numa node\n");
return -EINVAL;
}
@@ -252,6 +256,9 @@ static long map_benchmark_ioctl(struct file *file, unsigned int cmd,
* dma_mask changed by benchmark
*/
dma_set_mask(map->dev, old_dma_mask);
+
+ if (ret)
+ return ret;
break;
default:
return -EINVAL;
diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c
index 1273be84392c..ad57944b6c40 100644
--- a/kernel/events/callchain.c
+++ b/kernel/events/callchain.c
@@ -29,7 +29,7 @@ static inline size_t perf_callchain_entry__sizeof(void)
sysctl_perf_event_max_contexts_per_stack));
}
-static DEFINE_PER_CPU(int, callchain_recursion[PERF_NR_CONTEXTS]);
+static DEFINE_PER_CPU(u8, callchain_recursion[PERF_NR_CONTEXTS]);
static atomic_t nr_callchain_events;
static DEFINE_MUTEX(callchain_mutex);
static struct callchain_cpus_entries *callchain_cpus_entries;
diff --git a/kernel/events/core.c b/kernel/events/core.c
index f0128c5ff278..ab6c4c942f79 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -2283,21 +2283,6 @@ event_sched_out(struct perf_event *event, struct perf_event_context *ctx)
state = PERF_EVENT_STATE_OFF;
}
- if (event->pending_sigtrap) {
- bool dec = true;
-
- event->pending_sigtrap = 0;
- if (state != PERF_EVENT_STATE_OFF &&
- !event->pending_work) {
- event->pending_work = 1;
- dec = false;
- WARN_ON_ONCE(!atomic_long_inc_not_zero(&event->refcount));
- task_work_add(current, &event->pending_task, TWA_RESUME);
- }
- if (dec)
- local_dec(&event->ctx->nr_pending);
- }
-
perf_event_set_state(event, state);
if (!is_software_event(event))
@@ -2466,7 +2451,7 @@ static void __perf_event_disable(struct perf_event *event,
* hold the top-level event's child_mutex, so any descendant that
* goes to exit will block in perf_event_exit_event().
*
- * When called from perf_pending_irq it's OK because event->ctx
+ * When called from perf_pending_disable it's OK because event->ctx
* is the current context on this CPU and preemption is disabled,
* hence we can't get into perf_event_task_sched_out for this context.
*/
@@ -2506,7 +2491,7 @@ EXPORT_SYMBOL_GPL(perf_event_disable);
void perf_event_disable_inatomic(struct perf_event *event)
{
event->pending_disable = 1;
- irq_work_queue(&event->pending_irq);
+ irq_work_queue(&event->pending_disable_irq);
}
#define MAX_INTERRUPTS (~0ULL)
@@ -5206,9 +5191,35 @@ static bool exclusive_event_installable(struct perf_event *event,
static void perf_addr_filters_splice(struct perf_event *event,
struct list_head *head);
+static void perf_pending_task_sync(struct perf_event *event)
+{
+ struct callback_head *head = &event->pending_task;
+
+ if (!event->pending_work)
+ return;
+ /*
+ * If the task is queued to the current task's queue, we
+ * obviously can't wait for it to complete. Simply cancel it.
+ */
+ if (task_work_cancel(current, head)) {
+ event->pending_work = 0;
+ local_dec(&event->ctx->nr_pending);
+ return;
+ }
+
+ /*
+ * All accesses related to the event are within the same RCU section in
+ * perf_pending_task(). The RCU grace period before the event is freed
+ * will make sure all those accesses are complete by then.
+ */
+ rcuwait_wait_event(&event->pending_work_wait, !event->pending_work, TASK_UNINTERRUPTIBLE);
+}
+
static void _free_event(struct perf_event *event)
{
irq_work_sync(&event->pending_irq);
+ irq_work_sync(&event->pending_disable_irq);
+ perf_pending_task_sync(event);
unaccount_event(event);
@@ -5384,6 +5395,7 @@ int perf_event_release_kernel(struct perf_event *event)
again:
mutex_lock(&event->child_mutex);
list_for_each_entry(child, &event->child_list, child_list) {
+ void *var = NULL;
/*
* Cannot change, child events are not migrated, see the
@@ -5424,11 +5436,23 @@ again:
* this can't be the last reference.
*/
put_event(event);
+ } else {
+ var = &ctx->refcount;
}
mutex_unlock(&event->child_mutex);
mutex_unlock(&ctx->mutex);
put_ctx(ctx);
+
+ if (var) {
+ /*
+ * If perf_event_free_task() has deleted all events from the
+ * ctx while the child_mutex got released above, make sure to
+ * notify about the preceding put_ctx().
+ */
+ smp_mb(); /* pairs with wait_var_event() */
+ wake_up_var(var);
+ }
goto again;
}
mutex_unlock(&event->child_mutex);
@@ -6496,6 +6520,8 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
return -EINVAL;
nr_pages = vma_size / PAGE_SIZE;
+ if (nr_pages > INT_MAX)
+ return -ENOMEM;
mutex_lock(&event->mmap_mutex);
ret = -EINVAL;
@@ -6737,7 +6763,7 @@ static void perf_sigtrap(struct perf_event *event)
/*
* Deliver the pending work in-event-context or follow the context.
*/
-static void __perf_pending_irq(struct perf_event *event)
+static void __perf_pending_disable(struct perf_event *event)
{
int cpu = READ_ONCE(event->oncpu);
@@ -6752,11 +6778,6 @@ static void __perf_pending_irq(struct perf_event *event)
* Yay, we hit home and are in the context of the event.
*/
if (cpu == smp_processor_id()) {
- if (event->pending_sigtrap) {
- event->pending_sigtrap = 0;
- perf_sigtrap(event);
- local_dec(&event->ctx->nr_pending);
- }
if (event->pending_disable) {
event->pending_disable = 0;
perf_event_disable_local(event);
@@ -6780,11 +6801,26 @@ static void __perf_pending_irq(struct perf_event *event)
* irq_work_queue(); // FAILS
*
* irq_work_run()
- * perf_pending_irq()
+ * perf_pending_disable()
*
* But the event runs on CPU-B and wants disabling there.
*/
- irq_work_queue_on(&event->pending_irq, cpu);
+ irq_work_queue_on(&event->pending_disable_irq, cpu);
+}
+
+static void perf_pending_disable(struct irq_work *entry)
+{
+ struct perf_event *event = container_of(entry, struct perf_event, pending_disable_irq);
+ int rctx;
+
+ /*
+ * If we 'fail' here, that's OK, it means recursion is already disabled
+ * and we won't recurse 'further'.
+ */
+ rctx = perf_swevent_get_recursion_context();
+ __perf_pending_disable(event);
+ if (rctx >= 0)
+ perf_swevent_put_recursion_context(rctx);
}
static void perf_pending_irq(struct irq_work *entry)
@@ -6807,8 +6843,6 @@ static void perf_pending_irq(struct irq_work *entry)
perf_event_wakeup(event);
}
- __perf_pending_irq(event);
-
if (rctx >= 0)
perf_swevent_put_recursion_context(rctx);
}
@@ -6819,23 +6853,27 @@ static void perf_pending_task(struct callback_head *head)
int rctx;
/*
+ * All accesses to the event must belong to the same implicit RCU read-side
+ * critical section as the ->pending_work reset. See comment in
+ * perf_pending_task_sync().
+ */
+ rcu_read_lock();
+ /*
* If we 'fail' here, that's OK, it means recursion is already disabled
* and we won't recurse 'further'.
*/
- preempt_disable_notrace();
rctx = perf_swevent_get_recursion_context();
if (event->pending_work) {
event->pending_work = 0;
perf_sigtrap(event);
local_dec(&event->ctx->nr_pending);
+ rcuwait_wake_up(&event->pending_work_wait);
}
+ rcu_read_unlock();
if (rctx >= 0)
perf_swevent_put_recursion_context(rctx);
- preempt_enable_notrace();
-
- put_event(event);
}
#ifdef CONFIG_GUEST_PERF_EVENTS
@@ -9693,16 +9731,26 @@ static int __perf_event_overflow(struct perf_event *event,
*/
bool valid_sample = sample_is_allowed(event, regs);
unsigned int pending_id = 1;
+ enum task_work_notify_mode notify_mode;
if (regs)
pending_id = hash32_ptr((void *)instruction_pointer(regs)) ?: 1;
- if (!event->pending_sigtrap) {
- event->pending_sigtrap = pending_id;
+
+ notify_mode = in_nmi() ? TWA_NMI_CURRENT : TWA_RESUME;
+
+ if (!event->pending_work &&
+ !task_work_add(current, &event->pending_task, notify_mode)) {
+ event->pending_work = pending_id;
local_inc(&event->ctx->nr_pending);
+
+ event->pending_addr = 0;
+ if (valid_sample && (data->sample_flags & PERF_SAMPLE_ADDR))
+ event->pending_addr = data->addr;
+
} else if (event->attr.exclude_kernel && valid_sample) {
/*
* Should not be able to return to user space without
- * consuming pending_sigtrap; with exceptions:
+ * consuming pending_work; with exceptions:
*
* 1. Where !exclude_kernel, events can overflow again
* in the kernel without returning to user space.
@@ -9712,13 +9760,8 @@ static int __perf_event_overflow(struct perf_event *event,
* To approximate progress (with false negatives),
* check 32-bit hash of the current IP.
*/
- WARN_ON_ONCE(event->pending_sigtrap != pending_id);
+ WARN_ON_ONCE(event->pending_work != pending_id);
}
-
- event->pending_addr = 0;
- if (valid_sample && (data->sample_flags & PERF_SAMPLE_ADDR))
- event->pending_addr = data->addr;
- irq_work_queue(&event->pending_irq);
}
READ_ONCE(event->overflow_handler)(event, data, regs);
@@ -9746,11 +9789,7 @@ struct swevent_htable {
struct swevent_hlist *swevent_hlist;
struct mutex hlist_mutex;
int hlist_refcount;
-
- /* Recursion avoidance in each contexts */
- int recursion[PERF_NR_CONTEXTS];
};
-
static DEFINE_PER_CPU(struct swevent_htable, swevent_htable);
/*
@@ -9948,17 +9987,13 @@ DEFINE_PER_CPU(struct pt_regs, __perf_regs[4]);
int perf_swevent_get_recursion_context(void)
{
- struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
-
- return get_recursion_context(swhash->recursion);
+ return get_recursion_context(current->perf_recursion);
}
EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
void perf_swevent_put_recursion_context(int rctx)
{
- struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
-
- put_recursion_context(swhash->recursion, rctx);
+ put_recursion_context(current->perf_recursion, rctx);
}
void ___perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
@@ -11948,7 +11983,9 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
init_waitqueue_head(&event->waitq);
init_irq_work(&event->pending_irq, perf_pending_irq);
+ event->pending_disable_irq = IRQ_WORK_INIT_HARD(perf_pending_disable);
init_task_work(&event->pending_task, perf_pending_task);
+ rcuwait_init(&event->pending_work_wait);
mutex_init(&event->mmap_mutex);
raw_spin_lock_init(&event->addr_filters.lock);
@@ -13624,6 +13661,7 @@ int perf_event_init_task(struct task_struct *child, u64 clone_flags)
{
int ret;
+ memset(child->perf_recursion, 0, sizeof(child->perf_recursion));
child->perf_event_ctxp = NULL;
mutex_init(&child->perf_event_mutex);
INIT_LIST_HEAD(&child->perf_event_list);
diff --git a/kernel/events/internal.h b/kernel/events/internal.h
index 5150d5f84c03..451514442a1b 100644
--- a/kernel/events/internal.h
+++ b/kernel/events/internal.h
@@ -128,7 +128,7 @@ static inline unsigned long perf_data_size(struct perf_buffer *rb)
static inline unsigned long perf_aux_size(struct perf_buffer *rb)
{
- return rb->aux_nr_pages << PAGE_SHIFT;
+ return (unsigned long)rb->aux_nr_pages << PAGE_SHIFT;
}
#define __DEFINE_OUTPUT_COPY_BODY(advance_buf, memcpy_func, ...) \
@@ -208,7 +208,7 @@ arch_perf_out_copy_user(void *dst, const void *src, unsigned long n)
DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
-static inline int get_recursion_context(int *recursion)
+static inline int get_recursion_context(u8 *recursion)
{
unsigned char rctx = interrupt_context_level();
@@ -221,7 +221,7 @@ static inline int get_recursion_context(int *recursion)
return rctx;
}
-static inline void put_recursion_context(int *recursion, int rctx)
+static inline void put_recursion_context(u8 *recursion, unsigned char rctx)
{
barrier();
recursion[rctx]--;
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index 4013408ce012..8cadf97bc290 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -682,13 +682,18 @@ int rb_alloc_aux(struct perf_buffer *rb, struct perf_event *event,
if (!has_aux(event))
return -EOPNOTSUPP;
+ if (nr_pages <= 0)
+ return -EINVAL;
+
if (!overwrite) {
/*
* Watermark defaults to half the buffer, and so does the
* max_order, to aid PMU drivers in double buffering.
*/
if (!watermark)
- watermark = nr_pages << (PAGE_SHIFT - 1);
+ watermark = min_t(unsigned long,
+ U32_MAX,
+ (unsigned long)nr_pages << (PAGE_SHIFT - 1));
/*
* Use aux_watermark as the basis for chunking to
diff --git a/kernel/exit.c b/kernel/exit.c
index f95a2c1338a8..be81342caf1b 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -277,7 +277,6 @@ repeat:
}
write_unlock_irq(&tasklist_lock);
- seccomp_filter_release(p);
proc_flush_pid(thread_pid);
put_pid(thread_pid);
release_thread(p);
@@ -484,6 +483,8 @@ retry:
* Search through everything else, we should not get here often.
*/
for_each_process(g) {
+ if (atomic_read(&mm->mm_users) <= 1)
+ break;
if (g->flags & PF_KTHREAD)
continue;
for_each_thread(g, c) {
@@ -832,6 +833,8 @@ void __noreturn do_exit(long code)
io_uring_files_cancel();
exit_signals(tsk); /* sets PF_EXITING */
+ seccomp_filter_release(tsk);
+
acct_update_integrals(tsk);
group_dead = atomic_dec_and_test(&tsk->signal->live);
if (group_dead) {
diff --git a/kernel/fork.c b/kernel/fork.c
index 99076dbe27d8..942e3d8617bf 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -115,6 +115,8 @@
#define CREATE_TRACE_POINTS
#include <trace/events/task.h>
+#include <kunit/visibility.h>
+
/*
* Minimum number of threads to boot the kernel
*/
@@ -616,12 +618,6 @@ static void dup_mm_exe_file(struct mm_struct *mm, struct mm_struct *oldmm)
exe_file = get_mm_exe_file(oldmm);
RCU_INIT_POINTER(mm->exe_file, exe_file);
- /*
- * We depend on the oldmm having properly denied write access to the
- * exe_file already.
- */
- if (exe_file && deny_write_access(exe_file))
- pr_warn_once("deny_write_access() failed in %s\n", __func__);
}
#ifdef CONFIG_MMU
@@ -1334,6 +1330,7 @@ struct mm_struct *mm_alloc(void)
memset(mm, 0, sizeof(*mm));
return mm_init(mm, current, current_user_ns());
}
+EXPORT_SYMBOL_IF_KUNIT(mm_alloc);
static inline void __mmput(struct mm_struct *mm)
{
@@ -1412,20 +1409,11 @@ int set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file)
*/
old_exe_file = rcu_dereference_raw(mm->exe_file);
- if (new_exe_file) {
- /*
- * We expect the caller (i.e., sys_execve) to already denied
- * write access, so this is unlikely to fail.
- */
- if (unlikely(deny_write_access(new_exe_file)))
- return -EACCES;
+ if (new_exe_file)
get_file(new_exe_file);
- }
rcu_assign_pointer(mm->exe_file, new_exe_file);
- if (old_exe_file) {
- allow_write_access(old_exe_file);
+ if (old_exe_file)
fput(old_exe_file);
- }
return 0;
}
@@ -1464,9 +1452,6 @@ int replace_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file)
return ret;
}
- ret = deny_write_access(new_exe_file);
- if (ret)
- return -EACCES;
get_file(new_exe_file);
/* set the new file */
@@ -1475,10 +1460,8 @@ int replace_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file)
rcu_assign_pointer(mm->exe_file, new_exe_file);
mmap_write_unlock(mm);
- if (old_exe_file) {
- allow_write_access(old_exe_file);
+ if (old_exe_file)
fput(old_exe_file);
- }
return 0;
}
@@ -2941,8 +2924,6 @@ SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp,
}
#endif
-#ifdef __ARCH_WANT_SYS_CLONE3
-
noinline static int copy_clone_args_from_user(struct kernel_clone_args *kargs,
struct clone_args __user *uargs,
size_t usize)
@@ -3086,6 +3067,11 @@ SYSCALL_DEFINE2(clone3, struct clone_args __user *, uargs, size_t, size)
struct kernel_clone_args kargs;
pid_t set_tid[MAX_PID_NS_LEVEL];
+#ifdef __ARCH_BROKEN_SYS_CLONE3
+#warning clone3() entry point is missing, please fix
+ return -ENOSYS;
+#endif
+
kargs.set_tid = set_tid;
err = copy_clone_args_from_user(&kargs, uargs, size);
@@ -3097,7 +3083,6 @@ SYSCALL_DEFINE2(clone3, struct clone_args __user *, uargs, size_t, size)
return kernel_clone(&kargs);
}
-#endif
void walk_process_tree(struct task_struct *top, proc_visitor visitor, void *data)
{
diff --git a/kernel/gcov/gcc_4_7.c b/kernel/gcov/gcc_4_7.c
index 74a4ef1da9ad..fd75b4a484d7 100644
--- a/kernel/gcov/gcc_4_7.c
+++ b/kernel/gcov/gcc_4_7.c
@@ -18,7 +18,9 @@
#include <linux/mm.h>
#include "gcov.h"
-#if (__GNUC__ >= 10)
+#if (__GNUC__ >= 14)
+#define GCOV_COUNTERS 9
+#elif (__GNUC__ >= 10)
#define GCOV_COUNTERS 8
#elif (__GNUC__ >= 7)
#define GCOV_COUNTERS 9
diff --git a/kernel/gen_kheaders.sh b/kernel/gen_kheaders.sh
index 6d443ea22bb7..383fd43ac612 100755
--- a/kernel/gen_kheaders.sh
+++ b/kernel/gen_kheaders.sh
@@ -14,7 +14,12 @@ include/
arch/$SRCARCH/include/
"
-type cpio > /dev/null
+if ! command -v cpio >/dev/null; then
+ echo >&2 "***"
+ echo >&2 "*** 'cpio' could not be found."
+ echo >&2 "***"
+ exit 1
+fi
# Support incremental builds by skipping archive generation
# if timestamps of files being archived are not changed.
@@ -84,7 +89,7 @@ find $cpio_dir -type f -print0 |
# Create archive and try to normalize metadata for reproducibility.
tar "${KBUILD_BUILD_TIMESTAMP:+--mtime=$KBUILD_BUILD_TIMESTAMP}" \
- --owner=0 --group=0 --sort=name --numeric-owner \
+ --owner=0 --group=0 --sort=name --numeric-owner --mode=u=rw,go=r,a+X \
-I $XZ -cf $tarfile -C $cpio_dir/ . > /dev/null
echo $headers_md5 > kernel/kheaders.md5
diff --git a/kernel/irq/irq_sim.c b/kernel/irq/irq_sim.c
index 38d6ae651ac7..3d4036db15ac 100644
--- a/kernel/irq/irq_sim.c
+++ b/kernel/irq/irq_sim.c
@@ -17,6 +17,8 @@ struct irq_sim_work_ctx {
unsigned int irq_count;
unsigned long *pending;
struct irq_domain *domain;
+ struct irq_sim_ops ops;
+ void *user_data;
};
struct irq_sim_irq_ctx {
@@ -88,6 +90,31 @@ static int irq_sim_set_irqchip_state(struct irq_data *data,
return 0;
}
+static int irq_sim_request_resources(struct irq_data *data)
+{
+ struct irq_sim_irq_ctx *irq_ctx = irq_data_get_irq_chip_data(data);
+ struct irq_sim_work_ctx *work_ctx = irq_ctx->work_ctx;
+ irq_hw_number_t hwirq = irqd_to_hwirq(data);
+
+ if (work_ctx->ops.irq_sim_irq_requested)
+ return work_ctx->ops.irq_sim_irq_requested(work_ctx->domain,
+ hwirq,
+ work_ctx->user_data);
+
+ return 0;
+}
+
+static void irq_sim_release_resources(struct irq_data *data)
+{
+ struct irq_sim_irq_ctx *irq_ctx = irq_data_get_irq_chip_data(data);
+ struct irq_sim_work_ctx *work_ctx = irq_ctx->work_ctx;
+ irq_hw_number_t hwirq = irqd_to_hwirq(data);
+
+ if (work_ctx->ops.irq_sim_irq_released)
+ work_ctx->ops.irq_sim_irq_released(work_ctx->domain, hwirq,
+ work_ctx->user_data);
+}
+
static struct irq_chip irq_sim_irqchip = {
.name = "irq_sim",
.irq_mask = irq_sim_irqmask,
@@ -95,6 +122,8 @@ static struct irq_chip irq_sim_irqchip = {
.irq_set_type = irq_sim_set_type,
.irq_get_irqchip_state = irq_sim_get_irqchip_state,
.irq_set_irqchip_state = irq_sim_set_irqchip_state,
+ .irq_request_resources = irq_sim_request_resources,
+ .irq_release_resources = irq_sim_release_resources,
};
static void irq_sim_handle_irq(struct irq_work *work)
@@ -164,6 +193,15 @@ static const struct irq_domain_ops irq_sim_domain_ops = {
struct irq_domain *irq_domain_create_sim(struct fwnode_handle *fwnode,
unsigned int num_irqs)
{
+ return irq_domain_create_sim_full(fwnode, num_irqs, NULL, NULL);
+}
+EXPORT_SYMBOL_GPL(irq_domain_create_sim);
+
+struct irq_domain *irq_domain_create_sim_full(struct fwnode_handle *fwnode,
+ unsigned int num_irqs,
+ const struct irq_sim_ops *ops,
+ void *data)
+{
struct irq_sim_work_ctx *work_ctx __free(kfree) =
kmalloc(sizeof(*work_ctx), GFP_KERNEL);
@@ -183,10 +221,14 @@ struct irq_domain *irq_domain_create_sim(struct fwnode_handle *fwnode,
work_ctx->irq_count = num_irqs;
work_ctx->work = IRQ_WORK_INIT_HARD(irq_sim_handle_irq);
work_ctx->pending = no_free_ptr(pending);
+ work_ctx->user_data = data;
+
+ if (ops)
+ memcpy(&work_ctx->ops, ops, sizeof(*ops));
return no_free_ptr(work_ctx)->domain;
}
-EXPORT_SYMBOL_GPL(irq_domain_create_sim);
+EXPORT_SYMBOL_GPL(irq_domain_create_sim_full);
/**
* irq_domain_remove_sim - Deinitialize the interrupt simulator domain: free
@@ -228,10 +270,22 @@ struct irq_domain *devm_irq_domain_create_sim(struct device *dev,
struct fwnode_handle *fwnode,
unsigned int num_irqs)
{
+ return devm_irq_domain_create_sim_full(dev, fwnode, num_irqs,
+ NULL, NULL);
+}
+EXPORT_SYMBOL_GPL(devm_irq_domain_create_sim);
+
+struct irq_domain *
+devm_irq_domain_create_sim_full(struct device *dev,
+ struct fwnode_handle *fwnode,
+ unsigned int num_irqs,
+ const struct irq_sim_ops *ops,
+ void *data)
+{
struct irq_domain *domain;
int ret;
- domain = irq_domain_create_sim(fwnode, num_irqs);
+ domain = irq_domain_create_sim_full(fwnode, num_irqs, ops, data);
if (IS_ERR(domain))
return domain;
@@ -241,4 +295,4 @@ struct irq_domain *devm_irq_domain_create_sim(struct device *dev,
return domain;
}
-EXPORT_SYMBOL_GPL(devm_irq_domain_create_sim);
+EXPORT_SYMBOL_GPL(devm_irq_domain_create_sim_full);
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 71b0fc2d0aea..dd53298ef1a5 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -1337,7 +1337,7 @@ static int irq_thread(void *data)
* synchronize_hardirq(). So neither IRQTF_RUNTHREAD nor the
* oneshot mask bit can be set.
*/
- task_work_cancel(current, irq_thread_dtor);
+ task_work_cancel_func(current, irq_thread_dtor);
return 0;
}
diff --git a/kernel/jump_label.c b/kernel/jump_label.c
index 3218fa5688b9..4ad5ed8adf96 100644
--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -131,13 +131,16 @@ bool static_key_fast_inc_not_disabled(struct static_key *key)
STATIC_KEY_CHECK_USE(key);
/*
* Negative key->enabled has a special meaning: it sends
- * static_key_slow_inc() down the slow path, and it is non-zero
- * so it counts as "enabled" in jump_label_update(). Note that
- * atomic_inc_unless_negative() checks >= 0, so roll our own.
+ * static_key_slow_inc/dec() down the slow path, and it is non-zero
+ * so it counts as "enabled" in jump_label_update().
+ *
+ * The INT_MAX overflow condition is either used by the networking
+ * code to reset or detected in the slow path of
+ * static_key_slow_inc_cpuslocked().
*/
v = atomic_read(&key->enabled);
do {
- if (v <= 0 || (v + 1) < 0)
+ if (v <= 0 || v == INT_MAX)
return false;
} while (!likely(atomic_try_cmpxchg(&key->enabled, &v, v + 1)));
@@ -150,7 +153,7 @@ bool static_key_slow_inc_cpuslocked(struct static_key *key)
lockdep_assert_cpus_held();
/*
- * Careful if we get concurrent static_key_slow_inc() calls;
+ * Careful if we get concurrent static_key_slow_inc/dec() calls;
* later calls must wait for the first one to _finish_ the
* jump_label_update() process. At the same time, however,
* the jump_label_update() call below wants to see
@@ -159,22 +162,24 @@ bool static_key_slow_inc_cpuslocked(struct static_key *key)
if (static_key_fast_inc_not_disabled(key))
return true;
- jump_label_lock();
- if (atomic_read(&key->enabled) == 0) {
- atomic_set(&key->enabled, -1);
+ guard(mutex)(&jump_label_mutex);
+ /* Try to mark it as 'enabling in progress. */
+ if (!atomic_cmpxchg(&key->enabled, 0, -1)) {
jump_label_update(key);
/*
- * Ensure that if the above cmpxchg loop observes our positive
- * value, it must also observe all the text changes.
+ * Ensure that when static_key_fast_inc_not_disabled() or
+ * static_key_slow_try_dec() observe the positive value,
+ * they must also observe all the text changes.
*/
atomic_set_release(&key->enabled, 1);
} else {
- if (WARN_ON_ONCE(!static_key_fast_inc_not_disabled(key))) {
- jump_label_unlock();
+ /*
+ * While holding the mutex this should never observe
+ * anything else than a value >= 1 and succeed
+ */
+ if (WARN_ON_ONCE(!static_key_fast_inc_not_disabled(key)))
return false;
- }
}
- jump_label_unlock();
return true;
}
@@ -247,20 +252,32 @@ EXPORT_SYMBOL_GPL(static_key_disable);
static bool static_key_slow_try_dec(struct static_key *key)
{
- int val;
-
- val = atomic_fetch_add_unless(&key->enabled, -1, 1);
- if (val == 1)
- return false;
+ int v;
/*
- * The negative count check is valid even when a negative
- * key->enabled is in use by static_key_slow_inc(); a
- * __static_key_slow_dec() before the first static_key_slow_inc()
- * returns is unbalanced, because all other static_key_slow_inc()
- * instances block while the update is in progress.
+ * Go into the slow path if key::enabled is less than or equal than
+ * one. One is valid to shut down the key, anything less than one
+ * is an imbalance, which is handled at the call site.
+ *
+ * That includes the special case of '-1' which is set in
+ * static_key_slow_inc_cpuslocked(), but that's harmless as it is
+ * fully serialized in the slow path below. By the time this task
+ * acquires the jump label lock the value is back to one and the
+ * retry under the lock must succeed.
*/
- WARN(val < 0, "jump label: negative count!\n");
+ v = atomic_read(&key->enabled);
+ do {
+ /*
+ * Warn about the '-1' case though; since that means a
+ * decrement is concurrent with a first (0->1) increment. IOW
+ * people are trying to disable something that wasn't yet fully
+ * enabled. This suggests an ordering problem on the user side.
+ */
+ WARN_ON_ONCE(v < 0);
+ if (v <= 1)
+ return false;
+ } while (!likely(atomic_try_cmpxchg(&key->enabled, &v, v - 1)));
+
return true;
}
@@ -271,10 +288,11 @@ static void __static_key_slow_dec_cpuslocked(struct static_key *key)
if (static_key_slow_try_dec(key))
return;
- jump_label_lock();
- if (atomic_dec_and_test(&key->enabled))
+ guard(mutex)(&jump_label_mutex);
+ if (atomic_cmpxchg(&key->enabled, 1, 0))
jump_label_update(key);
- jump_label_unlock();
+ else
+ WARN_ON_ONCE(!static_key_slow_try_dec(key));
}
static void __static_key_slow_dec(struct static_key *key)
diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
index 22ea19a36e6e..98b9622d372e 100644
--- a/kernel/kallsyms.c
+++ b/kernel/kallsyms.c
@@ -388,12 +388,12 @@ int kallsyms_lookup_size_offset(unsigned long addr, unsigned long *symbolsize,
!!__bpf_address_lookup(addr, symbolsize, offset, namebuf);
}
-static const char *kallsyms_lookup_buildid(unsigned long addr,
+static int kallsyms_lookup_buildid(unsigned long addr,
unsigned long *symbolsize,
unsigned long *offset, char **modname,
const unsigned char **modbuildid, char *namebuf)
{
- const char *ret;
+ int ret;
namebuf[KSYM_NAME_LEN - 1] = 0;
namebuf[0] = 0;
@@ -410,7 +410,7 @@ static const char *kallsyms_lookup_buildid(unsigned long addr,
if (modbuildid)
*modbuildid = NULL;
- ret = namebuf;
+ ret = strlen(namebuf);
goto found;
}
@@ -442,8 +442,13 @@ const char *kallsyms_lookup(unsigned long addr,
unsigned long *offset,
char **modname, char *namebuf)
{
- return kallsyms_lookup_buildid(addr, symbolsize, offset, modname,
- NULL, namebuf);
+ int ret = kallsyms_lookup_buildid(addr, symbolsize, offset, modname,
+ NULL, namebuf);
+
+ if (!ret)
+ return NULL;
+
+ return namebuf;
}
int lookup_symbol_name(unsigned long addr, char *symname)
@@ -478,19 +483,15 @@ static int __sprint_symbol(char *buffer, unsigned long address,
{
char *modname;
const unsigned char *buildid;
- const char *name;
unsigned long offset, size;
int len;
address += symbol_offset;
- name = kallsyms_lookup_buildid(address, &size, &offset, &modname, &buildid,
+ len = kallsyms_lookup_buildid(address, &size, &offset, &modname, &buildid,
buffer);
- if (!name)
+ if (!len)
return sprintf(buffer, "0x%lx", address - symbol_offset);
- if (name != buffer)
- strcpy(buffer, name);
- len = strlen(buffer);
offset -= symbol_offset;
if (add_offset)
diff --git a/kernel/kcov.c b/kernel/kcov.c
index c3124f6d5536..f0a69d402066 100644
--- a/kernel/kcov.c
+++ b/kernel/kcov.c
@@ -632,6 +632,7 @@ static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
return -EINVAL;
kcov->mode = mode;
t->kcov = kcov;
+ t->kcov_mode = KCOV_MODE_REMOTE;
kcov->t = t;
kcov->remote = true;
kcov->remote_size = remote_arg->area_size;
diff --git a/kernel/kcsan/kcsan_test.c b/kernel/kcsan/kcsan_test.c
index 0c17b4c83e1c..117d9d4d3c3b 100644
--- a/kernel/kcsan/kcsan_test.c
+++ b/kernel/kcsan/kcsan_test.c
@@ -1620,5 +1620,6 @@ static struct kunit_suite kcsan_test_suite = {
kunit_test_suites(&kcsan_test_suite);
+MODULE_DESCRIPTION("KCSAN test suite");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Marco Elver <elver@google.com>");
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 151bd3de5936..726b22ce7d0b 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -97,7 +97,6 @@ static struct ctl_table kern_lockdep_table[] = {
.proc_handler = proc_dointvec,
},
#endif /* CONFIG_LOCK_STAT */
- { }
};
static __init int kernel_lockdep_sysctls_init(void)
diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c
index 415d81e6ce70..de95ec07e477 100644
--- a/kernel/locking/locktorture.c
+++ b/kernel/locking/locktorture.c
@@ -30,6 +30,7 @@
#include <linux/torture.h>
#include <linux/reboot.h>
+MODULE_DESCRIPTION("torture test facility for locking");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com>");
diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c
index 1df5fef8a656..7d96bed718e4 100644
--- a/kernel/locking/qspinlock.c
+++ b/kernel/locking/qspinlock.c
@@ -583,7 +583,7 @@ EXPORT_SYMBOL(queued_spin_lock_slowpath);
#include "qspinlock_paravirt.h"
#include "qspinlock.c"
-bool nopvspin __initdata;
+bool nopvspin;
static __init int parse_nopvspin(char *arg)
{
nopvspin = true;
diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c
index c6d17aee4209..33cac79e3994 100644
--- a/kernel/locking/rwsem.c
+++ b/kernel/locking/rwsem.c
@@ -1297,7 +1297,7 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
/*
* lock for writing
*/
-static inline int __down_write_common(struct rw_semaphore *sem, int state)
+static __always_inline int __down_write_common(struct rw_semaphore *sem, int state)
{
int ret = 0;
@@ -1310,12 +1310,12 @@ static inline int __down_write_common(struct rw_semaphore *sem, int state)
return ret;
}
-static inline void __down_write(struct rw_semaphore *sem)
+static __always_inline void __down_write(struct rw_semaphore *sem)
{
__down_write_common(sem, TASK_UNINTERRUPTIBLE);
}
-static inline int __down_write_killable(struct rw_semaphore *sem)
+static __always_inline int __down_write_killable(struct rw_semaphore *sem)
{
return __down_write_common(sem, TASK_KILLABLE);
}
diff --git a/kernel/locking/spinlock.c b/kernel/locking/spinlock.c
index 8475a0794f8c..438c6086d540 100644
--- a/kernel/locking/spinlock.c
+++ b/kernel/locking/spinlock.c
@@ -413,3 +413,11 @@ notrace int in_lock_functions(unsigned long addr)
&& addr < (unsigned long)__lock_text_end;
}
EXPORT_SYMBOL(in_lock_functions);
+
+#if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_PREEMPT_RT)
+void notrace lockdep_assert_in_softirq_func(void)
+{
+ lockdep_assert_in_softirq();
+}
+EXPORT_SYMBOL(lockdep_assert_in_softirq_func);
+#endif
diff --git a/kernel/module/kallsyms.c b/kernel/module/kallsyms.c
index 62fb57bb9f16..bf65e0c3c86f 100644
--- a/kernel/module/kallsyms.c
+++ b/kernel/module/kallsyms.c
@@ -321,14 +321,15 @@ void * __weak dereference_module_function_descriptor(struct module *mod,
* For kallsyms to ask for address resolution. NULL means not found. Careful
* not to lock to avoid deadlock on oopses, simply disable preemption.
*/
-const char *module_address_lookup(unsigned long addr,
- unsigned long *size,
- unsigned long *offset,
- char **modname,
- const unsigned char **modbuildid,
- char *namebuf)
+int module_address_lookup(unsigned long addr,
+ unsigned long *size,
+ unsigned long *offset,
+ char **modname,
+ const unsigned char **modbuildid,
+ char *namebuf)
{
- const char *ret = NULL;
+ const char *sym;
+ int ret = 0;
struct module *mod;
preempt_disable();
@@ -344,12 +345,10 @@ const char *module_address_lookup(unsigned long addr,
#endif
}
- ret = find_kallsyms_symbol(mod, addr, size, offset);
- }
- /* Make a copy in here where it's safe */
- if (ret) {
- strscpy(namebuf, ret, KSYM_NAME_LEN);
- ret = namebuf;
+ sym = find_kallsyms_symbol(mod, addr, size, offset);
+
+ if (sym)
+ ret = strscpy(namebuf, sym, KSYM_NAME_LEN);
}
preempt_enable();
diff --git a/kernel/module/main.c b/kernel/module/main.c
index d18a94b973e1..d9592195c5bb 100644
--- a/kernel/module/main.c
+++ b/kernel/module/main.c
@@ -2166,6 +2166,8 @@ static int find_module_sections(struct module *mod, struct load_info *info)
#endif
#ifdef CONFIG_DEBUG_INFO_BTF_MODULES
mod->btf_data = any_section_objs(info, ".BTF", 1, &mod->btf_data_size);
+ mod->btf_base_data = any_section_objs(info, ".BTF.base", 1,
+ &mod->btf_base_data_size);
#endif
#ifdef CONFIG_JUMP_LABEL
mod->jump_entries = section_objs(info, "__jump_table",
@@ -2590,8 +2592,9 @@ static noinline int do_init_module(struct module *mod)
}
#ifdef CONFIG_DEBUG_INFO_BTF_MODULES
- /* .BTF is not SHF_ALLOC and will get removed, so sanitize pointer */
+ /* .BTF is not SHF_ALLOC and will get removed, so sanitize pointers */
mod->btf_data = NULL;
+ mod->btf_base_data = NULL;
#endif
/*
* We want to free module_init, but be aware that kallsyms may be
diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
index dc48fecfa1dc..bdf0087d6442 100644
--- a/kernel/pid_namespace.c
+++ b/kernel/pid_namespace.c
@@ -218,6 +218,7 @@ void zap_pid_ns_processes(struct pid_namespace *pid_ns)
*/
do {
clear_thread_flag(TIF_SIGPENDING);
+ clear_thread_flag(TIF_NOTIFY_SIGNAL);
rc = kernel_wait4(-1, NULL, __WALL, NULL);
} while (rc != -ECHILD);
@@ -248,24 +249,7 @@ void zap_pid_ns_processes(struct pid_namespace *pid_ns)
set_current_state(TASK_INTERRUPTIBLE);
if (pid_ns->pid_allocated == init_pids)
break;
- /*
- * Release tasks_rcu_exit_srcu to avoid following deadlock:
- *
- * 1) TASK A unshare(CLONE_NEWPID)
- * 2) TASK A fork() twice -> TASK B (child reaper for new ns)
- * and TASK C
- * 3) TASK B exits, kills TASK C, waits for TASK A to reap it
- * 4) TASK A calls synchronize_rcu_tasks()
- * -> synchronize_srcu(tasks_rcu_exit_srcu)
- * 5) *DEADLOCK*
- *
- * It is considered safe to release tasks_rcu_exit_srcu here
- * because we assume the current task can not be concurrently
- * reaped at this point.
- */
- exit_tasks_rcu_stop();
schedule();
- exit_tasks_rcu_start();
}
__set_current_state(TASK_RUNNING);
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index d9abb7ab031d..753b8dd42a59 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -1595,7 +1595,7 @@ int swsusp_check(bool exclusive)
put:
if (error)
- fput(hib_resume_bdev_file);
+ bdev_fput(hib_resume_bdev_file);
else
pr_debug("Image signature found, resuming\n");
} else {
diff --git a/kernel/printk/Makefile b/kernel/printk/Makefile
index 040fe7d1eda2..39a2b61c7232 100644
--- a/kernel/printk/Makefile
+++ b/kernel/printk/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-y = printk.o conopt.o
+obj-y = printk.o
obj-$(CONFIG_PRINTK) += printk_safe.o nbcon.o
obj-$(CONFIG_A11Y_BRAILLE_CONSOLE) += braille.o
obj-$(CONFIG_PRINTK_INDEX) += index.o
diff --git a/kernel/printk/conopt.c b/kernel/printk/conopt.c
deleted file mode 100644
index 9d507bac3657..000000000000
--- a/kernel/printk/conopt.c
+++ /dev/null
@@ -1,146 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Kernel command line console options for hardware based addressing
- *
- * Copyright (C) 2023 Texas Instruments Incorporated - https://www.ti.com/
- * Author: Tony Lindgren <tony@atomide.com>
- */
-
-#include <linux/console.h>
-#include <linux/init.h>
-#include <linux/string.h>
-#include <linux/types.h>
-
-#include <asm/errno.h>
-
-#include "console_cmdline.h"
-
-/*
- * Allow longer DEVNAME:0.0 style console naming such as abcd0000.serial:0.0
- * in addition to the legacy ttyS0 style naming.
- */
-#define CONSOLE_NAME_MAX 32
-
-#define CONSOLE_OPT_MAX 16
-#define CONSOLE_BRL_OPT_MAX 16
-
-struct console_option {
- char name[CONSOLE_NAME_MAX];
- char opt[CONSOLE_OPT_MAX];
- char brl_opt[CONSOLE_BRL_OPT_MAX];
- u8 has_brl_opt:1;
-};
-
-/* Updated only at console_setup() time, no locking needed */
-static struct console_option conopt[MAX_CMDLINECONSOLES];
-
-/**
- * console_opt_save - Saves kernel command line console option for driver use
- * @str: Kernel command line console name and option
- * @brl_opt: Braille console options
- *
- * Saves a kernel command line console option for driver subsystems to use for
- * adding a preferred console during init. Called from console_setup() only.
- *
- * Return: 0 on success, negative error code on failure.
- */
-int __init console_opt_save(const char *str, const char *brl_opt)
-{
- struct console_option *con;
- size_t namelen, optlen;
- const char *opt;
- int i;
-
- namelen = strcspn(str, ",");
- if (namelen == 0 || namelen >= CONSOLE_NAME_MAX)
- return -EINVAL;
-
- opt = str + namelen;
- if (*opt == ',')
- opt++;
-
- optlen = strlen(opt);
- if (optlen >= CONSOLE_OPT_MAX)
- return -EINVAL;
-
- for (i = 0; i < MAX_CMDLINECONSOLES; i++) {
- con = &conopt[i];
-
- if (con->name[0]) {
- if (!strncmp(str, con->name, namelen))
- return 0;
- continue;
- }
-
- /*
- * The name isn't terminated, only opt is. Empty opt is fine,
- * but brl_opt can be either empty or NULL. For more info, see
- * _braille_console_setup().
- */
- strscpy(con->name, str, namelen + 1);
- strscpy(con->opt, opt, CONSOLE_OPT_MAX);
- if (brl_opt) {
- strscpy(con->brl_opt, brl_opt, CONSOLE_BRL_OPT_MAX);
- con->has_brl_opt = 1;
- }
-
- return 0;
- }
-
- return -ENOMEM;
-}
-
-static struct console_option *console_opt_find(const char *name)
-{
- struct console_option *con;
- int i;
-
- for (i = 0; i < MAX_CMDLINECONSOLES; i++) {
- con = &conopt[i];
- if (!strcmp(name, con->name))
- return con;
- }
-
- return NULL;
-}
-
-/**
- * add_preferred_console_match - Adds a preferred console if a match is found
- * @match: Expected console on kernel command line, such as console=DEVNAME:0.0
- * @name: Name of the console character device to add such as ttyS
- * @idx: Index for the console
- *
- * Allows driver subsystems to add a console after translating the command
- * line name to the character device name used for the console. Options are
- * added automatically based on the kernel command line. Duplicate preferred
- * consoles are ignored by __add_preferred_console().
- *
- * Return: 0 on success, negative error code on failure.
- */
-int add_preferred_console_match(const char *match, const char *name,
- const short idx)
-{
- struct console_option *con;
- char *brl_opt = NULL;
-
- if (!match || !strlen(match) || !name || !strlen(name) ||
- idx < 0)
- return -EINVAL;
-
- con = console_opt_find(match);
- if (!con)
- return -ENOENT;
-
- /*
- * See __add_preferred_console(). It checks for NULL brl_options to set
- * the preferred_console flag. Empty brl_opt instead of NULL leads into
- * the preferred_console flag not set, and CON_CONSDEV not being set,
- * and the boot console won't get disabled at the end of console_setup().
- */
- if (con->has_brl_opt)
- brl_opt = con->brl_opt;
-
- console_opt_add_preferred_console(name, idx, con->opt, brl_opt);
-
- return 0;
-}
diff --git a/kernel/printk/console_cmdline.h b/kernel/printk/console_cmdline.h
index a125e0235589..3ca74ad391d6 100644
--- a/kernel/printk/console_cmdline.h
+++ b/kernel/printk/console_cmdline.h
@@ -2,12 +2,6 @@
#ifndef _CONSOLE_CMDLINE_H
#define _CONSOLE_CMDLINE_H
-#define MAX_CMDLINECONSOLES 8
-
-int console_opt_save(const char *str, const char *brl_opt);
-int console_opt_add_preferred_console(const char *name, const short idx,
- char *options, char *brl_options);
-
struct console_cmdline
{
char name[16]; /* Name of the driver */
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index 420fd310129d..dddb15f48d59 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -383,6 +383,9 @@ static int console_locked;
/*
* Array of consoles built from command line options (console=)
*/
+
+#define MAX_CMDLINECONSOLES 8
+
static struct console_cmdline console_cmdline[MAX_CMDLINECONSOLES];
static int preferred_console = -1;
@@ -2500,17 +2503,6 @@ static int __init console_setup(char *str)
if (_braille_console_setup(&str, &brl_options))
return 1;
- /* Save the console for driver subsystem use */
- if (console_opt_save(str, brl_options))
- return 1;
-
- /* Flag register_console() to not call try_enable_default_console() */
- console_set_on_cmdline = 1;
-
- /* Don't attempt to parse a DEVNAME:0.0 style console */
- if (strchr(str, ':'))
- return 1;
-
/*
* Decode str into name, index, options.
*/
@@ -2541,13 +2533,6 @@ static int __init console_setup(char *str)
}
__setup("console=", console_setup);
-/* Only called from add_preferred_console_match() */
-int console_opt_add_preferred_console(const char *name, const short idx,
- char *options, char *brl_options)
-{
- return __add_preferred_console(name, idx, options, brl_options, true);
-}
-
/**
* add_preferred_console - add a device to the list of preferred consoles.
* @name: device name
@@ -3522,7 +3507,7 @@ void register_console(struct console *newcon)
* Note that a console with tty binding will have CON_CONSDEV
* flag set and will be first in the list.
*/
- if (preferred_console < 0 && !console_set_on_cmdline) {
+ if (preferred_console < 0) {
if (hlist_empty(&console_list) || !console_first()->device ||
console_first()->flags & CON_BOOT) {
try_enable_default_console(newcon);
diff --git a/kernel/rcu/rcuscale.c b/kernel/rcu/rcuscale.c
index 8db4fedaaa1e..b53a9e8f5904 100644
--- a/kernel/rcu/rcuscale.c
+++ b/kernel/rcu/rcuscale.c
@@ -42,6 +42,7 @@
#include "rcu.h"
+MODULE_DESCRIPTION("Read-Copy Update module-based scalability-test facility");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com>");
diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
index 807fbf6123a7..08bf7c669dd3 100644
--- a/kernel/rcu/rcutorture.c
+++ b/kernel/rcu/rcutorture.c
@@ -51,6 +51,7 @@
#include "rcu.h"
+MODULE_DESCRIPTION("Read-Copy Update module-based torture test facility");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com> and Josh Triplett <josh@joshtriplett.org>");
@@ -390,6 +391,7 @@ struct rcu_torture_ops {
int extendables;
int slow_gps;
int no_pi_lock;
+ int debug_objects;
const char *name;
};
@@ -577,6 +579,7 @@ static struct rcu_torture_ops rcu_ops = {
.irq_capable = 1,
.can_boost = IS_ENABLED(CONFIG_RCU_BOOST),
.extendables = RCUTORTURE_MAX_EXTEND,
+ .debug_objects = 1,
.name = "rcu"
};
@@ -747,6 +750,7 @@ static struct rcu_torture_ops srcu_ops = {
.cbflood_max = 50000,
.irq_capable = 1,
.no_pi_lock = IS_ENABLED(CONFIG_TINY_SRCU),
+ .debug_objects = 1,
.name = "srcu"
};
@@ -786,6 +790,7 @@ static struct rcu_torture_ops srcud_ops = {
.cbflood_max = 50000,
.irq_capable = 1,
.no_pi_lock = IS_ENABLED(CONFIG_TINY_SRCU),
+ .debug_objects = 1,
.name = "srcud"
};
@@ -2626,7 +2631,7 @@ static void rcu_torture_fwd_cb_cr(struct rcu_head *rhp)
spin_lock_irqsave(&rfp->rcu_fwd_lock, flags);
rfcpp = rfp->rcu_fwd_cb_tail;
rfp->rcu_fwd_cb_tail = &rfcp->rfc_next;
- WRITE_ONCE(*rfcpp, rfcp);
+ smp_store_release(rfcpp, rfcp);
WRITE_ONCE(rfp->n_launders_cb, rfp->n_launders_cb + 1);
i = ((jiffies - rfp->rcu_fwd_startat) / (HZ / FWD_CBS_HIST_DIV));
if (i >= ARRAY_SIZE(rfp->n_launders_hist))
@@ -3455,7 +3460,6 @@ rcu_torture_cleanup(void)
cur_ops->gp_slow_unregister(NULL);
}
-#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
static void rcu_torture_leak_cb(struct rcu_head *rhp)
{
}
@@ -3473,7 +3477,6 @@ static void rcu_torture_err_cb(struct rcu_head *rhp)
*/
pr_alert("%s: duplicated callback was invoked.\n", KBUILD_MODNAME);
}
-#endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
/*
* Verify that double-free causes debug-objects to complain, but only
@@ -3482,39 +3485,43 @@ static void rcu_torture_err_cb(struct rcu_head *rhp)
*/
static void rcu_test_debug_objects(void)
{
-#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
struct rcu_head rh1;
struct rcu_head rh2;
+ int idx;
+
+ if (!IS_ENABLED(CONFIG_DEBUG_OBJECTS_RCU_HEAD)) {
+ pr_alert("%s: !CONFIG_DEBUG_OBJECTS_RCU_HEAD, not testing duplicate call_%s()\n",
+ KBUILD_MODNAME, cur_ops->name);
+ return;
+ }
+
+ if (WARN_ON_ONCE(cur_ops->debug_objects &&
+ (!cur_ops->call || !cur_ops->cb_barrier)))
+ return;
+
struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_KERNEL);
init_rcu_head_on_stack(&rh1);
init_rcu_head_on_stack(&rh2);
- pr_alert("%s: WARN: Duplicate call_rcu() test starting.\n", KBUILD_MODNAME);
+ pr_alert("%s: WARN: Duplicate call_%s() test starting.\n", KBUILD_MODNAME, cur_ops->name);
/* Try to queue the rh2 pair of callbacks for the same grace period. */
- preempt_disable(); /* Prevent preemption from interrupting test. */
- rcu_read_lock(); /* Make it impossible to finish a grace period. */
- call_rcu_hurry(&rh1, rcu_torture_leak_cb); /* Start grace period. */
- local_irq_disable(); /* Make it harder to start a new grace period. */
- call_rcu_hurry(&rh2, rcu_torture_leak_cb);
- call_rcu_hurry(&rh2, rcu_torture_err_cb); /* Duplicate callback. */
+ idx = cur_ops->readlock(); /* Make it impossible to finish a grace period. */
+ cur_ops->call(&rh1, rcu_torture_leak_cb); /* Start grace period. */
+ cur_ops->call(&rh2, rcu_torture_leak_cb);
+ cur_ops->call(&rh2, rcu_torture_err_cb); /* Duplicate callback. */
if (rhp) {
- call_rcu_hurry(rhp, rcu_torture_leak_cb);
- call_rcu_hurry(rhp, rcu_torture_err_cb); /* Another duplicate callback. */
+ cur_ops->call(rhp, rcu_torture_leak_cb);
+ cur_ops->call(rhp, rcu_torture_err_cb); /* Another duplicate callback. */
}
- local_irq_enable();
- rcu_read_unlock();
- preempt_enable();
+ cur_ops->readunlock(idx);
/* Wait for them all to get done so we can safely return. */
- rcu_barrier();
- pr_alert("%s: WARN: Duplicate call_rcu() test complete.\n", KBUILD_MODNAME);
+ cur_ops->cb_barrier();
+ pr_alert("%s: WARN: Duplicate call_%s() test complete.\n", KBUILD_MODNAME, cur_ops->name);
destroy_rcu_head_on_stack(&rh1);
destroy_rcu_head_on_stack(&rh2);
kfree(rhp);
-#else /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
- pr_alert("%s: !CONFIG_DEBUG_OBJECTS_RCU_HEAD, not testing duplicate call_rcu()\n", KBUILD_MODNAME);
-#endif /* #else #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
}
static void rcutorture_sync(void)
diff --git a/kernel/rcu/refscale.c b/kernel/rcu/refscale.c
index 2c2648a3ad30..f4ea5b1ec068 100644
--- a/kernel/rcu/refscale.c
+++ b/kernel/rcu/refscale.c
@@ -63,6 +63,7 @@ do { \
#define SCALEOUT_ERRSTRING(s, x...) pr_alert("%s" SCALE_FLAG "!!! " s "\n", scale_type, ## x)
+MODULE_DESCRIPTION("Scalability test for object reference mechanisms");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Joel Fernandes (Google) <joel@joelfernandes.org>");
diff --git a/kernel/rcu/srcutiny.c b/kernel/rcu/srcutiny.c
index 5afd5cf494db..549c03336ee9 100644
--- a/kernel/rcu/srcutiny.c
+++ b/kernel/rcu/srcutiny.c
@@ -277,7 +277,8 @@ bool poll_state_synchronize_srcu(struct srcu_struct *ssp, unsigned long cookie)
unsigned long cur_s = READ_ONCE(ssp->srcu_idx);
barrier();
- return ULONG_CMP_GE(cur_s, cookie) || ULONG_CMP_LT(cur_s, cookie - 3);
+ return cookie == SRCU_GET_STATE_COMPLETED ||
+ ULONG_CMP_GE(cur_s, cookie) || ULONG_CMP_LT(cur_s, cookie - 3);
}
EXPORT_SYMBOL_GPL(poll_state_synchronize_srcu);
diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c
index bc4b58b0204e..b24db425f16d 100644
--- a/kernel/rcu/srcutree.c
+++ b/kernel/rcu/srcutree.c
@@ -667,7 +667,10 @@ void cleanup_srcu_struct(struct srcu_struct *ssp)
pr_info("%s: Active srcu_struct %p read state: %d gp state: %lu/%lu\n",
__func__, ssp, rcu_seq_state(READ_ONCE(sup->srcu_gp_seq)),
rcu_seq_current(&sup->srcu_gp_seq), sup->srcu_gp_seq_needed);
- return; /* Caller forgot to stop doing call_srcu()? */
+ return; // Caller forgot to stop doing call_srcu()?
+ // Or caller invoked start_poll_synchronize_srcu()
+ // and then cleanup_srcu_struct() before that grace
+ // period ended?
}
kfree(sup->node);
sup->node = NULL;
@@ -845,7 +848,6 @@ static void srcu_gp_end(struct srcu_struct *ssp)
bool cbs;
bool last_lvl;
int cpu;
- unsigned long flags;
unsigned long gpseq;
int idx;
unsigned long mask;
@@ -907,12 +909,12 @@ static void srcu_gp_end(struct srcu_struct *ssp)
if (!(gpseq & counter_wrap_check))
for_each_possible_cpu(cpu) {
sdp = per_cpu_ptr(ssp->sda, cpu);
- spin_lock_irqsave_rcu_node(sdp, flags);
+ spin_lock_irq_rcu_node(sdp);
if (ULONG_CMP_GE(gpseq, sdp->srcu_gp_seq_needed + 100))
sdp->srcu_gp_seq_needed = gpseq;
if (ULONG_CMP_GE(gpseq, sdp->srcu_gp_seq_needed_exp + 100))
sdp->srcu_gp_seq_needed_exp = gpseq;
- spin_unlock_irqrestore_rcu_node(sdp, flags);
+ spin_unlock_irq_rcu_node(sdp);
}
/* Callback initiation done, allow grace periods after next. */
@@ -1540,7 +1542,8 @@ EXPORT_SYMBOL_GPL(start_poll_synchronize_srcu);
*/
bool poll_state_synchronize_srcu(struct srcu_struct *ssp, unsigned long cookie)
{
- if (!rcu_seq_done(&ssp->srcu_sup->srcu_gp_seq, cookie))
+ if (cookie != SRCU_GET_STATE_COMPLETED &&
+ !rcu_seq_done(&ssp->srcu_sup->srcu_gp_seq, cookie))
return false;
// Ensure that the end of the SRCU grace period happens before
// any subsequent code that the caller might execute.
diff --git a/kernel/rcu/sync.c b/kernel/rcu/sync.c
index 6c2bd9001adc..da60a9947c00 100644
--- a/kernel/rcu/sync.c
+++ b/kernel/rcu/sync.c
@@ -122,7 +122,7 @@ void rcu_sync_enter(struct rcu_sync *rsp)
* we are called at early boot time but this shouldn't happen.
*/
}
- WRITE_ONCE(rsp->gp_count, rsp->gp_count + 1);
+ rsp->gp_count++;
spin_unlock_irq(&rsp->rss_lock);
if (gp_state == GP_IDLE) {
@@ -151,15 +151,11 @@ void rcu_sync_enter(struct rcu_sync *rsp)
*/
void rcu_sync_exit(struct rcu_sync *rsp)
{
- int gpc;
-
WARN_ON_ONCE(READ_ONCE(rsp->gp_state) == GP_IDLE);
- WARN_ON_ONCE(READ_ONCE(rsp->gp_count) == 0);
spin_lock_irq(&rsp->rss_lock);
- gpc = rsp->gp_count - 1;
- WRITE_ONCE(rsp->gp_count, gpc);
- if (!gpc) {
+ WARN_ON_ONCE(rsp->gp_count == 0);
+ if (!--rsp->gp_count) {
if (rsp->gp_state == GP_PASSED) {
WRITE_ONCE(rsp->gp_state, GP_EXIT);
rcu_sync_call(rsp);
@@ -178,10 +174,10 @@ void rcu_sync_dtor(struct rcu_sync *rsp)
{
int gp_state;
- WARN_ON_ONCE(READ_ONCE(rsp->gp_count));
WARN_ON_ONCE(READ_ONCE(rsp->gp_state) == GP_PASSED);
spin_lock_irq(&rsp->rss_lock);
+ WARN_ON_ONCE(rsp->gp_count);
if (rsp->gp_state == GP_REPLAY)
WRITE_ONCE(rsp->gp_state, GP_EXIT);
gp_state = rsp->gp_state;
diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h
index e1bf33018e6d..ba3440a45b6d 100644
--- a/kernel/rcu/tasks.h
+++ b/kernel/rcu/tasks.h
@@ -858,7 +858,7 @@ static void rcu_tasks_wait_gp(struct rcu_tasks *rtp)
// not know to synchronize with this RCU Tasks grace period) have
// completed exiting. The synchronize_rcu() in rcu_tasks_postgp()
// will take care of any tasks stuck in the non-preemptible region
-// of do_exit() following its call to exit_tasks_rcu_stop().
+// of do_exit() following its call to exit_tasks_rcu_finish().
// check_all_holdout_tasks(), repeatedly until holdout list is empty:
// Scans the holdout list, attempting to identify a quiescent state
// for each task on the list. If there is a quiescent state, the
@@ -1220,7 +1220,7 @@ void exit_tasks_rcu_start(void)
* Remove the task from the "yet another list" because do_exit() is now
* non-preemptible, allowing synchronize_rcu() to wait beyond this point.
*/
-void exit_tasks_rcu_stop(void)
+void exit_tasks_rcu_finish(void)
{
unsigned long flags;
struct rcu_tasks_percpu *rtpcp;
@@ -1231,22 +1231,12 @@ void exit_tasks_rcu_stop(void)
raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
list_del_init(&t->rcu_tasks_exit_list);
raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
-}
-/*
- * Contribute to protect against tasklist scan blind spot while the
- * task is exiting and may be removed from the tasklist. See
- * corresponding synchronize_srcu() for further details.
- */
-void exit_tasks_rcu_finish(void)
-{
- exit_tasks_rcu_stop();
- exit_tasks_rcu_finish_trace(current);
+ exit_tasks_rcu_finish_trace(t);
}
#else /* #ifdef CONFIG_TASKS_RCU */
void exit_tasks_rcu_start(void) { }
-void exit_tasks_rcu_stop(void) { }
void exit_tasks_rcu_finish(void) { exit_tasks_rcu_finish_trace(current); }
#endif /* #else #ifdef CONFIG_TASKS_RCU */
@@ -1757,6 +1747,16 @@ static void rcu_tasks_trace_pregp_step(struct list_head *hop)
// allow safe access to the hop list.
for_each_online_cpu(cpu) {
rcu_read_lock();
+ // Note that cpu_curr_snapshot() picks up the target
+ // CPU's current task while its runqueue is locked with
+ // an smp_mb__after_spinlock(). This ensures that either
+ // the grace-period kthread will see that task's read-side
+ // critical section or the task will see the updater's pre-GP
+ // accesses. The trailing smp_mb() in cpu_curr_snapshot()
+ // does not currently play a role other than simplify
+ // that function's ordering semantics. If these simplified
+ // ordering semantics continue to be redundant, that smp_mb()
+ // might be removed.
t = cpu_curr_snapshot(cpu);
if (rcu_tasks_trace_pertask_prep(t, true))
trc_add_holdout(t, hop);
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 28c7031711a3..e641cc681901 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -96,6 +96,7 @@ static struct rcu_state rcu_state = {
.ofl_lock = __ARCH_SPIN_LOCK_UNLOCKED,
.srs_cleanup_work = __WORK_INITIALIZER(rcu_state.srs_cleanup_work,
rcu_sr_normal_gp_cleanup_work),
+ .srs_cleanups_pending = ATOMIC_INIT(0),
};
/* Dump rcu_node combining tree at boot to verify correct setup. */
@@ -175,6 +176,9 @@ static int gp_init_delay;
module_param(gp_init_delay, int, 0444);
static int gp_cleanup_delay;
module_param(gp_cleanup_delay, int, 0444);
+static int nohz_full_patience_delay;
+module_param(nohz_full_patience_delay, int, 0444);
+static int nohz_full_patience_delay_jiffies;
// Add delay to rcu_read_unlock() for strict grace periods.
static int rcu_unlock_delay;
@@ -296,16 +300,6 @@ static void rcu_dynticks_eqs_online(void)
}
/*
- * Snapshot the ->dynticks counter with full ordering so as to allow
- * stable comparison of this counter with past and future snapshots.
- */
-static int rcu_dynticks_snap(int cpu)
-{
- smp_mb(); // Fundamental RCU ordering guarantee.
- return ct_dynticks_cpu_acquire(cpu);
-}
-
-/*
* Return true if the snapshot returned from rcu_dynticks_snap()
* indicates that RCU is in an extended quiescent state.
*/
@@ -321,7 +315,15 @@ static bool rcu_dynticks_in_eqs(int snap)
*/
static bool rcu_dynticks_in_eqs_since(struct rcu_data *rdp, int snap)
{
- return snap != rcu_dynticks_snap(rdp->cpu);
+ /*
+ * The first failing snapshot is already ordered against the accesses
+ * performed by the remote CPU after it exits idle.
+ *
+ * The second snapshot therefore only needs to order against accesses
+ * performed by the remote CPU prior to entering idle and therefore can
+ * rely solely on acquire semantics.
+ */
+ return snap != ct_dynticks_cpu_acquire(rdp->cpu);
}
/*
@@ -769,7 +771,18 @@ static void rcu_gpnum_ovf(struct rcu_node *rnp, struct rcu_data *rdp)
*/
static int dyntick_save_progress_counter(struct rcu_data *rdp)
{
- rdp->dynticks_snap = rcu_dynticks_snap(rdp->cpu);
+ /*
+ * Full ordering between remote CPU's post idle accesses and updater's
+ * accesses prior to current GP (and also the started GP sequence number)
+ * is enforced by rcu_seq_start() implicit barrier and even further by
+ * smp_mb__after_unlock_lock() barriers chained all the way throughout the
+ * rnp locking tree since rcu_gp_init() and up to the current leaf rnp
+ * locking.
+ *
+ * Ordering between remote CPU's pre idle accesses and post grace period
+ * updater's accesses is enforced by the below acquire semantic.
+ */
+ rdp->dynticks_snap = ct_dynticks_cpu_acquire(rdp->cpu);
if (rcu_dynticks_in_eqs(rdp->dynticks_snap)) {
trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti"));
rcu_gpnum_ovf(rdp->mynode, rdp);
@@ -1660,6 +1673,9 @@ static void rcu_sr_normal_gp_cleanup_work(struct work_struct *work)
rcu_sr_put_wait_head(rcu);
}
+
+ /* Order list manipulations with atomic access. */
+ atomic_dec_return_release(&rcu_state.srs_cleanups_pending);
}
/*
@@ -1667,7 +1683,7 @@ static void rcu_sr_normal_gp_cleanup_work(struct work_struct *work)
*/
static void rcu_sr_normal_gp_cleanup(void)
{
- struct llist_node *wait_tail, *next, *rcu;
+ struct llist_node *wait_tail, *next = NULL, *rcu = NULL;
int done = 0;
wait_tail = rcu_state.srs_wait_tail;
@@ -1693,16 +1709,34 @@ static void rcu_sr_normal_gp_cleanup(void)
break;
}
- // concurrent sr_normal_gp_cleanup work might observe this update.
- smp_store_release(&rcu_state.srs_done_tail, wait_tail);
+ /*
+ * Fast path, no more users to process except putting the second last
+ * wait head if no inflight-workers. If there are in-flight workers,
+ * they will remove the last wait head.
+ *
+ * Note that the ACQUIRE orders atomic access with list manipulation.
+ */
+ if (wait_tail->next && wait_tail->next->next == NULL &&
+ rcu_sr_is_wait_head(wait_tail->next) &&
+ !atomic_read_acquire(&rcu_state.srs_cleanups_pending)) {
+ rcu_sr_put_wait_head(wait_tail->next);
+ wait_tail->next = NULL;
+ }
+
+ /* Concurrent sr_normal_gp_cleanup work might observe this update. */
ASSERT_EXCLUSIVE_WRITER(rcu_state.srs_done_tail);
+ smp_store_release(&rcu_state.srs_done_tail, wait_tail);
/*
* We schedule a work in order to perform a final processing
* of outstanding users(if still left) and releasing wait-heads
* added by rcu_sr_normal_gp_init() call.
*/
- queue_work(sync_wq, &rcu_state.srs_cleanup_work);
+ if (wait_tail->next) {
+ atomic_inc(&rcu_state.srs_cleanups_pending);
+ if (!queue_work(sync_wq, &rcu_state.srs_cleanup_work))
+ atomic_dec(&rcu_state.srs_cleanups_pending);
+ }
}
/*
@@ -1810,7 +1844,7 @@ static noinline_for_stack bool rcu_gp_init(void)
WRITE_ONCE(rcu_state.gp_state, RCU_GP_ONOFF);
/* Exclude CPU hotplug operations. */
rcu_for_each_leaf_node(rnp) {
- local_irq_save(flags);
+ local_irq_disable();
arch_spin_lock(&rcu_state.ofl_lock);
raw_spin_lock_rcu_node(rnp);
if (rnp->qsmaskinit == rnp->qsmaskinitnext &&
@@ -1818,7 +1852,7 @@ static noinline_for_stack bool rcu_gp_init(void)
/* Nothing to do on this leaf rcu_node structure. */
raw_spin_unlock_rcu_node(rnp);
arch_spin_unlock(&rcu_state.ofl_lock);
- local_irq_restore(flags);
+ local_irq_enable();
continue;
}
@@ -1855,7 +1889,7 @@ static noinline_for_stack bool rcu_gp_init(void)
raw_spin_unlock_rcu_node(rnp);
arch_spin_unlock(&rcu_state.ofl_lock);
- local_irq_restore(flags);
+ local_irq_enable();
}
rcu_gp_slow(gp_preinit_delay); /* Races with CPU hotplug. */
@@ -4313,11 +4347,15 @@ static int rcu_pending(int user)
return 1;
/* Is this a nohz_full CPU in userspace or idle? (Ignore RCU if so.) */
- if ((user || rcu_is_cpu_rrupt_from_idle()) && rcu_nohz_full_cpu())
+ gp_in_progress = rcu_gp_in_progress();
+ if ((user || rcu_is_cpu_rrupt_from_idle() ||
+ (gp_in_progress &&
+ time_before(jiffies, READ_ONCE(rcu_state.gp_start) +
+ nohz_full_patience_delay_jiffies))) &&
+ rcu_nohz_full_cpu())
return 0;
/* Is the RCU core waiting for a quiescent state from this CPU? */
- gp_in_progress = rcu_gp_in_progress();
if (rdp->core_needs_qs && !rdp->cpu_no_qs.b.norm && gp_in_progress)
return 1;
@@ -4767,7 +4805,7 @@ rcu_boot_init_percpu_data(int cpu)
rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu);
INIT_WORK(&rdp->strict_work, strict_work_handler);
WARN_ON_ONCE(ct->dynticks_nesting != 1);
- WARN_ON_ONCE(rcu_dynticks_in_eqs(rcu_dynticks_snap(cpu)));
+ WARN_ON_ONCE(rcu_dynticks_in_eqs(ct_dynticks_cpu(cpu)));
rdp->barrier_seq_snap = rcu_state.barrier_sequence;
rdp->rcu_ofl_gp_seq = rcu_state.gp_seq;
rdp->rcu_ofl_gp_state = RCU_GP_CLEANED;
@@ -5110,11 +5148,15 @@ void rcutree_migrate_callbacks(int cpu)
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
bool needwake;
- if (rcu_rdp_is_offloaded(rdp) ||
- rcu_segcblist_empty(&rdp->cblist))
- return; /* No callbacks to migrate. */
+ if (rcu_rdp_is_offloaded(rdp))
+ return;
raw_spin_lock_irqsave(&rcu_state.barrier_lock, flags);
+ if (rcu_segcblist_empty(&rdp->cblist)) {
+ raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags);
+ return; /* No callbacks to migrate. */
+ }
+
WARN_ON_ONCE(rcu_rdp_cpu_online(rdp));
rcu_barrier_entrain(rdp);
my_rdp = this_cpu_ptr(&rcu_data);
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
index bae7925c497f..fcf2b4aa3441 100644
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
@@ -223,7 +223,6 @@ struct rcu_data {
struct swait_queue_head nocb_state_wq; /* For offloading state changes */
struct task_struct *nocb_gp_kthread;
raw_spinlock_t nocb_lock; /* Guard following pair of fields. */
- atomic_t nocb_lock_contended; /* Contention experienced. */
int nocb_defer_wakeup; /* Defer wakeup of nocb_kthread. */
struct timer_list nocb_timer; /* Enforce finite deferral. */
unsigned long nocb_gp_adv_time; /* Last call_rcu() CB adv (jiffies). */
@@ -420,6 +419,7 @@ struct rcu_state {
struct llist_node *srs_done_tail; /* ready for GP users. */
struct sr_wait_node srs_wait_nodes[SR_NORMAL_GP_WAIT_HEAD_MAX];
struct work_struct srs_cleanup_work;
+ atomic_t srs_cleanups_pending; /* srs inflight worker cleanups. */
};
/* Values for rcu_state structure's gp_flags field. */
diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h
index 8a1d9c8bd9f7..4acd29d16fdb 100644
--- a/kernel/rcu/tree_exp.h
+++ b/kernel/rcu/tree_exp.h
@@ -265,7 +265,12 @@ static bool sync_exp_work_done(unsigned long s)
{
if (rcu_exp_gp_seq_done(s)) {
trace_rcu_exp_grace_period(rcu_state.name, s, TPS("done"));
- smp_mb(); /* Ensure test happens before caller kfree(). */
+ /*
+ * Order GP completion with preceding accesses. Order also GP
+ * completion with post GP update side accesses. Pairs with
+ * rcu_seq_end().
+ */
+ smp_mb();
return true;
}
return false;
@@ -357,7 +362,21 @@ static void __sync_rcu_exp_select_node_cpus(struct rcu_exp_work *rewp)
!(rnp->qsmaskinitnext & mask)) {
mask_ofl_test |= mask;
} else {
- snap = rcu_dynticks_snap(cpu);
+ /*
+ * Full ordering between remote CPU's post idle accesses
+ * and updater's accesses prior to current GP (and also
+ * the started GP sequence number) is enforced by
+ * rcu_seq_start() implicit barrier, relayed by kworkers
+ * locking and even further by smp_mb__after_unlock_lock()
+ * barriers chained all the way throughout the rnp locking
+ * tree since sync_exp_reset_tree() and up to the current
+ * leaf rnp locking.
+ *
+ * Ordering between remote CPU's pre idle accesses and
+ * post grace period updater's accesses is enforced by the
+ * below acquire semantic.
+ */
+ snap = ct_dynticks_cpu_acquire(cpu);
if (rcu_dynticks_in_eqs(snap))
mask_ofl_test |= mask;
else
@@ -953,7 +972,6 @@ void synchronize_rcu_expedited(void)
rnp = rcu_get_root();
wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3],
sync_exp_work_done(s));
- smp_mb(); /* Work actions happen before return. */
/* Let the next expedited grace period start. */
mutex_unlock(&rcu_state.exp_mutex);
diff --git a/kernel/rcu/tree_nocb.h b/kernel/rcu/tree_nocb.h
index 3f85577bddd4..3ce30841119a 100644
--- a/kernel/rcu/tree_nocb.h
+++ b/kernel/rcu/tree_nocb.h
@@ -91,8 +91,7 @@ module_param(nocb_nobypass_lim_per_jiffy, int, 0);
/*
* Acquire the specified rcu_data structure's ->nocb_bypass_lock. If the
- * lock isn't immediately available, increment ->nocb_lock_contended to
- * flag the contention.
+ * lock isn't immediately available, perform minimal sanity check.
*/
static void rcu_nocb_bypass_lock(struct rcu_data *rdp)
__acquires(&rdp->nocb_bypass_lock)
@@ -100,29 +99,12 @@ static void rcu_nocb_bypass_lock(struct rcu_data *rdp)
lockdep_assert_irqs_disabled();
if (raw_spin_trylock(&rdp->nocb_bypass_lock))
return;
- atomic_inc(&rdp->nocb_lock_contended);
+ /*
+ * Contention expected only when local enqueue collide with
+ * remote flush from kthreads.
+ */
WARN_ON_ONCE(smp_processor_id() != rdp->cpu);
- smp_mb__after_atomic(); /* atomic_inc() before lock. */
raw_spin_lock(&rdp->nocb_bypass_lock);
- smp_mb__before_atomic(); /* atomic_dec() after lock. */
- atomic_dec(&rdp->nocb_lock_contended);
-}
-
-/*
- * Spinwait until the specified rcu_data structure's ->nocb_lock is
- * not contended. Please note that this is extremely special-purpose,
- * relying on the fact that at most two kthreads and one CPU contend for
- * this lock, and also that the two kthreads are guaranteed to have frequent
- * grace-period-duration time intervals between successive acquisitions
- * of the lock. This allows us to use an extremely simple throttling
- * mechanism, and further to apply it only to the CPU doing floods of
- * call_rcu() invocations. Don't try this at home!
- */
-static void rcu_nocb_wait_contended(struct rcu_data *rdp)
-{
- WARN_ON_ONCE(smp_processor_id() != rdp->cpu);
- while (WARN_ON_ONCE(atomic_read(&rdp->nocb_lock_contended)))
- cpu_relax();
}
/*
@@ -510,7 +492,6 @@ static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
}
// We need to use the bypass.
- rcu_nocb_wait_contended(rdp);
rcu_nocb_bypass_lock(rdp);
ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
rcu_segcblist_inc_len(&rdp->cblist); /* Must precede enqueue. */
@@ -635,8 +616,7 @@ static void call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *head,
}
}
-static int nocb_gp_toggle_rdp(struct rcu_data *rdp,
- bool *wake_state)
+static int nocb_gp_toggle_rdp(struct rcu_data *rdp)
{
struct rcu_segcblist *cblist = &rdp->cblist;
unsigned long flags;
@@ -650,8 +630,6 @@ static int nocb_gp_toggle_rdp(struct rcu_data *rdp,
* We will handle this rdp until it ever gets de-offloaded.
*/
rcu_segcblist_set_flags(cblist, SEGCBLIST_KTHREAD_GP);
- if (rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB))
- *wake_state = true;
ret = 1;
} else if (!rcu_segcblist_test_flags(cblist, SEGCBLIST_OFFLOADED) &&
rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP)) {
@@ -660,8 +638,6 @@ static int nocb_gp_toggle_rdp(struct rcu_data *rdp,
* We will ignore this rdp until it ever gets re-offloaded.
*/
rcu_segcblist_clear_flags(cblist, SEGCBLIST_KTHREAD_GP);
- if (!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB))
- *wake_state = true;
ret = 0;
} else {
WARN_ON_ONCE(1);
@@ -877,16 +853,15 @@ static void nocb_gp_wait(struct rcu_data *my_rdp)
}
if (rdp_toggling) {
- bool wake_state = false;
int ret;
- ret = nocb_gp_toggle_rdp(rdp_toggling, &wake_state);
+ ret = nocb_gp_toggle_rdp(rdp_toggling);
if (ret == 1)
list_add_tail(&rdp_toggling->nocb_entry_rdp, &my_rdp->nocb_head_rdp);
else if (ret == 0)
list_del(&rdp_toggling->nocb_entry_rdp);
- if (wake_state)
- swake_up_one(&rdp_toggling->nocb_state_wq);
+
+ swake_up_one(&rdp_toggling->nocb_state_wq);
}
my_rdp->nocb_gp_seq = -1;
@@ -913,16 +888,9 @@ static int rcu_nocb_gp_kthread(void *arg)
return 0;
}
-static inline bool nocb_cb_can_run(struct rcu_data *rdp)
-{
- u8 flags = SEGCBLIST_OFFLOADED | SEGCBLIST_KTHREAD_CB;
-
- return rcu_segcblist_test_flags(&rdp->cblist, flags);
-}
-
static inline bool nocb_cb_wait_cond(struct rcu_data *rdp)
{
- return nocb_cb_can_run(rdp) && !READ_ONCE(rdp->nocb_cb_sleep);
+ return !READ_ONCE(rdp->nocb_cb_sleep) || kthread_should_park();
}
/*
@@ -934,21 +902,19 @@ static void nocb_cb_wait(struct rcu_data *rdp)
struct rcu_segcblist *cblist = &rdp->cblist;
unsigned long cur_gp_seq;
unsigned long flags;
- bool needwake_state = false;
bool needwake_gp = false;
- bool can_sleep = true;
struct rcu_node *rnp = rdp->mynode;
- do {
- swait_event_interruptible_exclusive(rdp->nocb_cb_wq,
- nocb_cb_wait_cond(rdp));
-
- if (READ_ONCE(rdp->nocb_cb_sleep)) {
- WARN_ON(signal_pending(current));
- trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WokeEmpty"));
- }
- } while (!nocb_cb_can_run(rdp));
+ swait_event_interruptible_exclusive(rdp->nocb_cb_wq,
+ nocb_cb_wait_cond(rdp));
+ if (kthread_should_park()) {
+ kthread_parkme();
+ } else if (READ_ONCE(rdp->nocb_cb_sleep)) {
+ WARN_ON(signal_pending(current));
+ trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WokeEmpty"));
+ }
+ WARN_ON_ONCE(!rcu_rdp_is_offloaded(rdp));
local_irq_save(flags);
rcu_momentary_dyntick_idle();
@@ -971,37 +937,16 @@ static void nocb_cb_wait(struct rcu_data *rdp)
raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
}
- if (rcu_segcblist_test_flags(cblist, SEGCBLIST_OFFLOADED)) {
- if (!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB)) {
- rcu_segcblist_set_flags(cblist, SEGCBLIST_KTHREAD_CB);
- if (rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP))
- needwake_state = true;
- }
- if (rcu_segcblist_ready_cbs(cblist))
- can_sleep = false;
+ if (!rcu_segcblist_ready_cbs(cblist)) {
+ WRITE_ONCE(rdp->nocb_cb_sleep, true);
+ trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("CBSleep"));
} else {
- /*
- * De-offloading. Clear our flag and notify the de-offload worker.
- * We won't touch the callbacks and keep sleeping until we ever
- * get re-offloaded.
- */
- WARN_ON_ONCE(!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB));
- rcu_segcblist_clear_flags(cblist, SEGCBLIST_KTHREAD_CB);
- if (!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP))
- needwake_state = true;
+ WRITE_ONCE(rdp->nocb_cb_sleep, false);
}
- WRITE_ONCE(rdp->nocb_cb_sleep, can_sleep);
-
- if (rdp->nocb_cb_sleep)
- trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("CBSleep"));
-
rcu_nocb_unlock_irqrestore(rdp, flags);
if (needwake_gp)
rcu_gp_kthread_wake();
-
- if (needwake_state)
- swake_up_one(&rdp->nocb_state_wq);
}
/*
@@ -1094,17 +1039,8 @@ static int rdp_offload_toggle(struct rcu_data *rdp,
bool wake_gp = false;
rcu_segcblist_offload(cblist, offload);
-
- if (rdp->nocb_cb_sleep)
- rdp->nocb_cb_sleep = false;
rcu_nocb_unlock_irqrestore(rdp, flags);
- /*
- * Ignore former value of nocb_cb_sleep and force wake up as it could
- * have been spuriously set to false already.
- */
- swake_up_one(&rdp->nocb_cb_wq);
-
raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags);
// Queue this rdp for add/del to/from the list to iterate on rcuog
WRITE_ONCE(rdp_gp->nocb_toggling_rdp, rdp);
@@ -1161,19 +1097,11 @@ static long rcu_nocb_rdp_deoffload(void *arg)
if (wake_gp)
wake_up_process(rdp_gp->nocb_gp_kthread);
- /*
- * If rcuo[p] kthread spawn failed, directly remove SEGCBLIST_KTHREAD_CB.
- * Just wait SEGCBLIST_KTHREAD_GP to be cleared by rcuog.
- */
- if (!rdp->nocb_cb_kthread) {
- rcu_nocb_lock_irqsave(rdp, flags);
- rcu_segcblist_clear_flags(&rdp->cblist, SEGCBLIST_KTHREAD_CB);
- rcu_nocb_unlock_irqrestore(rdp, flags);
- }
-
swait_event_exclusive(rdp->nocb_state_wq,
- !rcu_segcblist_test_flags(cblist,
- SEGCBLIST_KTHREAD_CB | SEGCBLIST_KTHREAD_GP));
+ !rcu_segcblist_test_flags(cblist,
+ SEGCBLIST_KTHREAD_GP));
+ if (rdp->nocb_cb_kthread)
+ kthread_park(rdp->nocb_cb_kthread);
} else {
/*
* No kthread to clear the flags for us or remove the rdp from the nocb list
@@ -1181,8 +1109,7 @@ static long rcu_nocb_rdp_deoffload(void *arg)
* but we stick to paranoia in this rare path.
*/
rcu_nocb_lock_irqsave(rdp, flags);
- rcu_segcblist_clear_flags(&rdp->cblist,
- SEGCBLIST_KTHREAD_CB | SEGCBLIST_KTHREAD_GP);
+ rcu_segcblist_clear_flags(&rdp->cblist, SEGCBLIST_KTHREAD_GP);
rcu_nocb_unlock_irqrestore(rdp, flags);
list_del(&rdp->nocb_entry_rdp);
@@ -1282,8 +1209,10 @@ static long rcu_nocb_rdp_offload(void *arg)
wake_gp = rdp_offload_toggle(rdp, true, flags);
if (wake_gp)
wake_up_process(rdp_gp->nocb_gp_kthread);
+
+ kthread_unpark(rdp->nocb_cb_kthread);
+
swait_event_exclusive(rdp->nocb_state_wq,
- rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB) &&
rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP));
/*
@@ -1468,7 +1397,7 @@ void __init rcu_init_nohz(void)
if (rcu_segcblist_empty(&rdp->cblist))
rcu_segcblist_init(&rdp->cblist);
rcu_segcblist_offload(&rdp->cblist, true);
- rcu_segcblist_set_flags(&rdp->cblist, SEGCBLIST_KTHREAD_CB | SEGCBLIST_KTHREAD_GP);
+ rcu_segcblist_set_flags(&rdp->cblist, SEGCBLIST_KTHREAD_GP);
rcu_segcblist_clear_flags(&rdp->cblist, SEGCBLIST_RCU_CORE);
}
rcu_organize_nocb_kthreads();
@@ -1526,11 +1455,16 @@ static void rcu_spawn_cpu_nocb_kthread(int cpu)
mutex_unlock(&rdp_gp->nocb_gp_kthread_mutex);
/* Spawn the kthread for this CPU. */
- t = kthread_run(rcu_nocb_cb_kthread, rdp,
- "rcuo%c/%d", rcu_state.abbr, cpu);
+ t = kthread_create(rcu_nocb_cb_kthread, rdp,
+ "rcuo%c/%d", rcu_state.abbr, cpu);
if (WARN_ONCE(IS_ERR(t), "%s: Could not start rcuo CB kthread, OOM is now expected behavior\n", __func__))
goto end;
+ if (rcu_rdp_is_offloaded(rdp))
+ wake_up_process(t);
+ else
+ kthread_park(t);
+
if (IS_ENABLED(CONFIG_RCU_NOCB_CPU_CB_BOOST) && kthread_prio)
sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
@@ -1678,12 +1612,11 @@ static void show_rcu_nocb_state(struct rcu_data *rdp)
sprintf(bufw, "%ld", rsclp->gp_seq[RCU_WAIT_TAIL]);
sprintf(bufr, "%ld", rsclp->gp_seq[RCU_NEXT_READY_TAIL]);
- pr_info(" CB %d^%d->%d %c%c%c%c%c%c F%ld L%ld C%d %c%c%s%c%s%c%c q%ld %c CPU %d%s\n",
+ pr_info(" CB %d^%d->%d %c%c%c%c%c F%ld L%ld C%d %c%c%s%c%s%c%c q%ld %c CPU %d%s\n",
rdp->cpu, rdp->nocb_gp_rdp->cpu,
nocb_next_rdp ? nocb_next_rdp->cpu : -1,
"kK"[!!rdp->nocb_cb_kthread],
"bB"[raw_spin_is_locked(&rdp->nocb_bypass_lock)],
- "cC"[!!atomic_read(&rdp->nocb_lock_contended)],
"lL"[raw_spin_is_locked(&rdp->nocb_lock)],
"sS"[!!rdp->nocb_cb_sleep],
".W"[swait_active(&rdp->nocb_cb_wq)],
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 340bbefe5f65..c569da65b421 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -28,8 +28,8 @@ static bool rcu_rdp_is_offloaded(struct rcu_data *rdp)
!(lockdep_is_held(&rcu_state.barrier_mutex) ||
(IS_ENABLED(CONFIG_HOTPLUG_CPU) && lockdep_is_cpus_held()) ||
rcu_lockdep_is_held_nocb(rdp) ||
- (rdp == this_cpu_ptr(&rcu_data) &&
- !(IS_ENABLED(CONFIG_PREEMPT_COUNT) && preemptible())) ||
+ (!(IS_ENABLED(CONFIG_PREEMPT_COUNT) && preemptible()) &&
+ rdp == this_cpu_ptr(&rcu_data)) ||
rcu_current_is_nocb_kthread(rdp)),
"Unsafe read of RCU_NOCB offloaded state"
);
@@ -93,6 +93,16 @@ static void __init rcu_bootup_announce_oddness(void)
pr_info("\tRCU debug GP init slowdown %d jiffies.\n", gp_init_delay);
if (gp_cleanup_delay)
pr_info("\tRCU debug GP cleanup slowdown %d jiffies.\n", gp_cleanup_delay);
+ if (nohz_full_patience_delay < 0) {
+ pr_info("\tRCU NOCB CPU patience negative (%d), resetting to zero.\n", nohz_full_patience_delay);
+ nohz_full_patience_delay = 0;
+ } else if (nohz_full_patience_delay > 5 * MSEC_PER_SEC) {
+ pr_info("\tRCU NOCB CPU patience too large (%d), resetting to %ld.\n", nohz_full_patience_delay, 5 * MSEC_PER_SEC);
+ nohz_full_patience_delay = 5 * MSEC_PER_SEC;
+ } else if (nohz_full_patience_delay) {
+ pr_info("\tRCU NOCB CPU patience set to %d milliseconds.\n", nohz_full_patience_delay);
+ }
+ nohz_full_patience_delay_jiffies = msecs_to_jiffies(nohz_full_patience_delay);
if (!use_softirq)
pr_info("\tRCU_SOFTIRQ processing moved to rcuc kthreads.\n");
if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG))
diff --git a/kernel/rcu/tree_stall.h b/kernel/rcu/tree_stall.h
index 460efecd077b..4b0e9d7c4c68 100644
--- a/kernel/rcu/tree_stall.h
+++ b/kernel/rcu/tree_stall.h
@@ -501,7 +501,7 @@ static void print_cpu_stall_info(int cpu)
}
delta = rcu_seq_ctr(rdp->mynode->gp_seq - rdp->rcu_iw_gp_seq);
falsepositive = rcu_is_gp_kthread_starving(NULL) &&
- rcu_dynticks_in_eqs(rcu_dynticks_snap(cpu));
+ rcu_dynticks_in_eqs(ct_dynticks_cpu(cpu));
rcuc_starved = rcu_is_rcuc_kthread_starving(rdp, &j);
if (rcuc_starved)
// Print signed value, as negative values indicate a probable bug.
@@ -515,7 +515,7 @@ static void print_cpu_stall_info(int cpu)
rdp->rcu_iw_pending ? (int)min(delta, 9UL) + '0' :
"!."[!delta],
ticks_value, ticks_title,
- rcu_dynticks_snap(cpu) & 0xffff,
+ ct_dynticks_cpu(cpu) & 0xffff,
ct_dynticks_nesting_cpu(cpu), ct_dynticks_nmi_nesting_cpu(cpu),
rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
data_race(rcu_state.n_force_qs) - rcu_state.n_force_qs_gpstart,
diff --git a/kernel/scftorture.c b/kernel/scftorture.c
index 59032aaccd18..44e83a646264 100644
--- a/kernel/scftorture.c
+++ b/kernel/scftorture.c
@@ -43,6 +43,7 @@
#define SCFTORTOUT_ERRSTRING(s, x...) pr_alert(SCFTORT_FLAG "!!! " s "\n", ## x)
+MODULE_DESCRIPTION("Torture tests on the smp_call_function() family of primitives");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Paul E. McKenney <paulmck@kernel.org>");
@@ -67,7 +68,7 @@ torture_param(int, weight_many_wait, -1, "Testing weight for multi-CPU operation
torture_param(int, weight_all, -1, "Testing weight for all-CPU no-wait operations.");
torture_param(int, weight_all_wait, -1, "Testing weight for all-CPU operations.");
-char *torture_type = "";
+static char *torture_type = "";
#ifdef MODULE
# define SCFTORT_SHUTDOWN 0
diff --git a/kernel/sched/build_policy.c b/kernel/sched/build_policy.c
index d9dc9ab3773f..39c315182b35 100644
--- a/kernel/sched/build_policy.c
+++ b/kernel/sched/build_policy.c
@@ -52,3 +52,4 @@
#include "cputime.c"
#include "deadline.c"
+#include "syscalls.c"
diff --git a/kernel/sched/clock.c b/kernel/sched/clock.c
index 3c6193de9cde..a09655b48140 100644
--- a/kernel/sched/clock.c
+++ b/kernel/sched/clock.c
@@ -340,7 +340,7 @@ again:
this_clock = sched_clock_local(my_scd);
/*
* We must enforce atomic readout on 32-bit, otherwise the
- * update on the remote CPU can hit inbetween the readout of
+ * update on the remote CPU can hit in between the readout of
* the low 32-bit and the high 32-bit portion.
*/
remote_clock = cmpxchg64(&scd->clock, 0, 0);
@@ -444,7 +444,7 @@ notrace void sched_clock_tick_stable(void)
}
/*
- * We are going deep-idle (irqs are disabled):
+ * We are going deep-idle (IRQs are disabled):
*/
notrace void sched_clock_idle_sleep_event(void)
{
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index bcf2c4cc0522..ae5ef3013a55 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2,9 +2,10 @@
/*
* kernel/sched/core.c
*
- * Core kernel scheduler code and related syscalls
+ * Core kernel CPU scheduler code
*
* Copyright (C) 1991-2002 Linus Torvalds
+ * Copyright (C) 1998-2024 Ingo Molnar, Red Hat
*/
#include <linux/highmem.h>
#include <linux/hrtimer_api.h>
@@ -706,14 +707,14 @@ static void update_rq_clock_task(struct rq *rq, s64 delta)
/*
* Since irq_time is only updated on {soft,}irq_exit, we might run into
* this case when a previous update_rq_clock() happened inside a
- * {soft,}irq region.
+ * {soft,}IRQ region.
*
* When this happens, we stop ->clock_task and only update the
* prev_irq_time stamp to account for the part that fit, so that a next
* update will consume the rest. This ensures ->clock_task is
* monotonic.
*
- * It does however cause some slight miss-attribution of {soft,}irq
+ * It does however cause some slight miss-attribution of {soft,}IRQ
* time, a more accurate solution would be to update the irq_time using
* the current rq->clock timestamp, except that would require using
* atomic ops.
@@ -723,7 +724,6 @@ static void update_rq_clock_task(struct rq *rq, s64 delta)
rq->prev_irq_time += irq_delta;
delta -= irq_delta;
- psi_account_irqtime(rq->curr, irq_delta);
delayacct_irq(rq->curr, irq_delta);
#endif
#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
@@ -826,7 +826,7 @@ static void __hrtick_start(void *arg)
/*
* Called to set the hrtick timer state.
*
- * called with rq->lock held and irqs disabled
+ * called with rq->lock held and IRQs disabled
*/
void hrtick_start(struct rq *rq, u64 delay)
{
@@ -850,7 +850,7 @@ void hrtick_start(struct rq *rq, u64 delay)
/*
* Called to set the hrtick timer state.
*
- * called with rq->lock held and irqs disabled
+ * called with rq->lock held and IRQs disabled
*/
void hrtick_start(struct rq *rq, u64 delay)
{
@@ -884,7 +884,7 @@ static inline void hrtick_rq_init(struct rq *rq)
#endif /* CONFIG_SCHED_HRTICK */
/*
- * cmpxchg based fetch_or, macro so it works for different integer types
+ * try_cmpxchg based fetch_or() macro so it works for different integer types:
*/
#define fetch_or(ptr, mask) \
({ \
@@ -1081,7 +1081,7 @@ void resched_cpu(int cpu)
*
* We don't do similar optimization for completely idle system, as
* selecting an idle CPU will add more delays to the timers than intended
- * (as that CPU's timer base may not be uptodate wrt jiffies etc).
+ * (as that CPU's timer base may not be up to date wrt jiffies etc).
*/
int get_nohz_timer_target(void)
{
@@ -1141,7 +1141,7 @@ static void wake_up_idle_cpu(int cpu)
* nohz functions that would need to follow TIF_NR_POLLING
* clearing:
*
- * - On most archs, a simple fetch_or on ti::flags with a
+ * - On most architectures, a simple fetch_or on ti::flags with a
* "0" value would be enough to know if an IPI needs to be sent.
*
* - x86 needs to perform a last need_resched() check between
@@ -1324,30 +1324,27 @@ int tg_nop(struct task_group *tg, void *data)
}
#endif
-static void set_load_weight(struct task_struct *p, bool update_load)
+void set_load_weight(struct task_struct *p, bool update_load)
{
int prio = p->static_prio - MAX_RT_PRIO;
- struct load_weight *load = &p->se.load;
+ struct load_weight lw;
- /*
- * SCHED_IDLE tasks get minimal weight:
- */
if (task_has_idle_policy(p)) {
- load->weight = scale_load(WEIGHT_IDLEPRIO);
- load->inv_weight = WMULT_IDLEPRIO;
- return;
+ lw.weight = scale_load(WEIGHT_IDLEPRIO);
+ lw.inv_weight = WMULT_IDLEPRIO;
+ } else {
+ lw.weight = scale_load(sched_prio_to_weight[prio]);
+ lw.inv_weight = sched_prio_to_wmult[prio];
}
/*
* SCHED_OTHER tasks have to update their load when changing their
* weight
*/
- if (update_load && p->sched_class == &fair_sched_class) {
- reweight_task(p, prio);
- } else {
- load->weight = scale_load(sched_prio_to_weight[prio]);
- load->inv_weight = sched_prio_to_wmult[prio];
- }
+ if (update_load && p->sched_class == &fair_sched_class)
+ reweight_task(p, &lw);
+ else
+ p->se.load = lw;
}
#ifdef CONFIG_UCLAMP_TASK
@@ -1384,7 +1381,7 @@ static unsigned int __maybe_unused sysctl_sched_uclamp_util_max = SCHED_CAPACITY
* This knob will not override the system default sched_util_clamp_min defined
* above.
*/
-static unsigned int sysctl_sched_uclamp_util_min_rt_default = SCHED_CAPACITY_SCALE;
+unsigned int sysctl_sched_uclamp_util_min_rt_default = SCHED_CAPACITY_SCALE;
/* All clamps are required to be less or equal than these values */
static struct uclamp_se uclamp_default[UCLAMP_CNT];
@@ -1409,32 +1406,6 @@ static struct uclamp_se uclamp_default[UCLAMP_CNT];
*/
DEFINE_STATIC_KEY_FALSE(sched_uclamp_used);
-/* Integer rounded range for each bucket */
-#define UCLAMP_BUCKET_DELTA DIV_ROUND_CLOSEST(SCHED_CAPACITY_SCALE, UCLAMP_BUCKETS)
-
-#define for_each_clamp_id(clamp_id) \
- for ((clamp_id) = 0; (clamp_id) < UCLAMP_CNT; (clamp_id)++)
-
-static inline unsigned int uclamp_bucket_id(unsigned int clamp_value)
-{
- return min_t(unsigned int, clamp_value / UCLAMP_BUCKET_DELTA, UCLAMP_BUCKETS - 1);
-}
-
-static inline unsigned int uclamp_none(enum uclamp_id clamp_id)
-{
- if (clamp_id == UCLAMP_MIN)
- return 0;
- return SCHED_CAPACITY_SCALE;
-}
-
-static inline void uclamp_se_set(struct uclamp_se *uc_se,
- unsigned int value, bool user_defined)
-{
- uc_se->value = value;
- uc_se->bucket_id = uclamp_bucket_id(value);
- uc_se->user_defined = user_defined;
-}
-
static inline unsigned int
uclamp_idle_value(struct rq *rq, enum uclamp_id clamp_id,
unsigned int clamp_value)
@@ -1676,7 +1647,7 @@ static inline void uclamp_rq_dec_id(struct rq *rq, struct task_struct *p,
rq_clamp = uclamp_rq_get(rq, clamp_id);
/*
* Defensive programming: this should never happen. If it happens,
- * e.g. due to future modification, warn and fixup the expected value.
+ * e.g. due to future modification, warn and fix up the expected value.
*/
SCHED_WARN_ON(bucket->value > rq_clamp);
if (bucket->value >= rq_clamp) {
@@ -1898,107 +1869,6 @@ undo:
}
#endif
-static int uclamp_validate(struct task_struct *p,
- const struct sched_attr *attr)
-{
- int util_min = p->uclamp_req[UCLAMP_MIN].value;
- int util_max = p->uclamp_req[UCLAMP_MAX].value;
-
- if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN) {
- util_min = attr->sched_util_min;
-
- if (util_min + 1 > SCHED_CAPACITY_SCALE + 1)
- return -EINVAL;
- }
-
- if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX) {
- util_max = attr->sched_util_max;
-
- if (util_max + 1 > SCHED_CAPACITY_SCALE + 1)
- return -EINVAL;
- }
-
- if (util_min != -1 && util_max != -1 && util_min > util_max)
- return -EINVAL;
-
- /*
- * We have valid uclamp attributes; make sure uclamp is enabled.
- *
- * We need to do that here, because enabling static branches is a
- * blocking operation which obviously cannot be done while holding
- * scheduler locks.
- */
- static_branch_enable(&sched_uclamp_used);
-
- return 0;
-}
-
-static bool uclamp_reset(const struct sched_attr *attr,
- enum uclamp_id clamp_id,
- struct uclamp_se *uc_se)
-{
- /* Reset on sched class change for a non user-defined clamp value. */
- if (likely(!(attr->sched_flags & SCHED_FLAG_UTIL_CLAMP)) &&
- !uc_se->user_defined)
- return true;
-
- /* Reset on sched_util_{min,max} == -1. */
- if (clamp_id == UCLAMP_MIN &&
- attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN &&
- attr->sched_util_min == -1) {
- return true;
- }
-
- if (clamp_id == UCLAMP_MAX &&
- attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX &&
- attr->sched_util_max == -1) {
- return true;
- }
-
- return false;
-}
-
-static void __setscheduler_uclamp(struct task_struct *p,
- const struct sched_attr *attr)
-{
- enum uclamp_id clamp_id;
-
- for_each_clamp_id(clamp_id) {
- struct uclamp_se *uc_se = &p->uclamp_req[clamp_id];
- unsigned int value;
-
- if (!uclamp_reset(attr, clamp_id, uc_se))
- continue;
-
- /*
- * RT by default have a 100% boost value that could be modified
- * at runtime.
- */
- if (unlikely(rt_task(p) && clamp_id == UCLAMP_MIN))
- value = sysctl_sched_uclamp_util_min_rt_default;
- else
- value = uclamp_none(clamp_id);
-
- uclamp_se_set(uc_se, value, false);
-
- }
-
- if (likely(!(attr->sched_flags & SCHED_FLAG_UTIL_CLAMP)))
- return;
-
- if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN &&
- attr->sched_util_min != -1) {
- uclamp_se_set(&p->uclamp_req[UCLAMP_MIN],
- attr->sched_util_min, true);
- }
-
- if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX &&
- attr->sched_util_max != -1) {
- uclamp_se_set(&p->uclamp_req[UCLAMP_MAX],
- attr->sched_util_max, true);
- }
-}
-
static void uclamp_fork(struct task_struct *p)
{
enum uclamp_id clamp_id;
@@ -2066,13 +1936,6 @@ static void __init init_uclamp(void)
#else /* !CONFIG_UCLAMP_TASK */
static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p) { }
static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p) { }
-static inline int uclamp_validate(struct task_struct *p,
- const struct sched_attr *attr)
-{
- return -EOPNOTSUPP;
-}
-static void __setscheduler_uclamp(struct task_struct *p,
- const struct sched_attr *attr) { }
static inline void uclamp_fork(struct task_struct *p) { }
static inline void uclamp_post_fork(struct task_struct *p) { }
static inline void init_uclamp(void) { }
@@ -2102,7 +1965,7 @@ unsigned long get_wchan(struct task_struct *p)
return ip;
}
-static inline void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
+void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
{
if (!(flags & ENQUEUE_NOCLOCK))
update_rq_clock(rq);
@@ -2119,7 +1982,7 @@ static inline void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
sched_core_enqueue(rq, p);
}
-static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
+void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
{
if (sched_core_enabled(rq))
sched_core_dequeue(rq, p, flags);
@@ -2157,52 +2020,6 @@ void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
dequeue_task(rq, p, flags);
}
-static inline int __normal_prio(int policy, int rt_prio, int nice)
-{
- int prio;
-
- if (dl_policy(policy))
- prio = MAX_DL_PRIO - 1;
- else if (rt_policy(policy))
- prio = MAX_RT_PRIO - 1 - rt_prio;
- else
- prio = NICE_TO_PRIO(nice);
-
- return prio;
-}
-
-/*
- * Calculate the expected normal priority: i.e. priority
- * without taking RT-inheritance into account. Might be
- * boosted by interactivity modifiers. Changes upon fork,
- * setprio syscalls, and whenever the interactivity
- * estimator recalculates.
- */
-static inline int normal_prio(struct task_struct *p)
-{
- return __normal_prio(p->policy, p->rt_priority, PRIO_TO_NICE(p->static_prio));
-}
-
-/*
- * Calculate the current priority, i.e. the priority
- * taken into account by the scheduler. This value might
- * be boosted by RT tasks, or might be boosted by
- * interactivity modifiers. Will be RT if the task got
- * RT-boosted. If not then it returns p->normal_prio.
- */
-static int effective_prio(struct task_struct *p)
-{
- p->normal_prio = normal_prio(p);
- /*
- * If we are RT tasks or we were boosted to RT priority,
- * keep the priority unchanged. Otherwise, update priority
- * to the normal priority:
- */
- if (!rt_prio(p->prio))
- return p->normal_prio;
- return p->prio;
-}
-
/**
* task_curr - is this task currently executing on a CPU?
* @p: the task in question.
@@ -2221,9 +2038,9 @@ inline int task_curr(const struct task_struct *p)
* this means any call to check_class_changed() must be followed by a call to
* balance_callback().
*/
-static inline void check_class_changed(struct rq *rq, struct task_struct *p,
- const struct sched_class *prev_class,
- int oldprio)
+void check_class_changed(struct rq *rq, struct task_struct *p,
+ const struct sched_class *prev_class,
+ int oldprio)
{
if (prev_class != p->sched_class) {
if (prev_class->switched_from)
@@ -2392,9 +2209,6 @@ unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state
static void
__do_set_cpus_allowed(struct task_struct *p, struct affinity_context *ctx);
-static int __set_cpus_allowed_ptr(struct task_struct *p,
- struct affinity_context *ctx);
-
static void migrate_disable_switch(struct rq *rq, struct task_struct *p)
{
struct affinity_context ac = {
@@ -2409,7 +2223,7 @@ static void migrate_disable_switch(struct rq *rq, struct task_struct *p)
return;
/*
- * Violates locking rules! see comment in __do_set_cpus_allowed().
+ * Violates locking rules! See comment in __do_set_cpus_allowed().
*/
__do_set_cpus_allowed(p, &ac);
}
@@ -2576,7 +2390,7 @@ static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf,
}
/*
- * migration_cpu_stop - this will be executed by a highprio stopper thread
+ * migration_cpu_stop - this will be executed by a high-prio stopper thread
* and performs thread migration by bumping thread off CPU then
* 'pushing' onto another runqueue.
*/
@@ -2821,16 +2635,6 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
kfree_rcu((union cpumask_rcuhead *)ac.user_mask, rcu);
}
-static cpumask_t *alloc_user_cpus_ptr(int node)
-{
- /*
- * See do_set_cpus_allowed() above for the rcu_head usage.
- */
- int size = max_t(int, cpumask_size(), sizeof(struct rcu_head));
-
- return kmalloc_node(size, GFP_KERNEL, node);
-}
-
int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src,
int node)
{
@@ -3199,8 +3003,7 @@ out:
* task must not exit() & deallocate itself prematurely. The
* call is not atomic; no spinlocks may be held.
*/
-static int __set_cpus_allowed_ptr(struct task_struct *p,
- struct affinity_context *ctx)
+int __set_cpus_allowed_ptr(struct task_struct *p, struct affinity_context *ctx)
{
struct rq_flags rf;
struct rq *rq;
@@ -3319,9 +3122,6 @@ out_free_mask:
free_cpumask_var(new_mask);
}
-static int
-__sched_setaffinity(struct task_struct *p, struct affinity_context *ctx);
-
/*
* Restore the affinity of a task @p which was previously restricted by a
* call to force_compatible_cpus_allowed_ptr().
@@ -3701,12 +3501,6 @@ void sched_set_stop_task(int cpu, struct task_struct *stop)
#else /* CONFIG_SMP */
-static inline int __set_cpus_allowed_ptr(struct task_struct *p,
- struct affinity_context *ctx)
-{
- return set_cpus_allowed_ptr(p, ctx->new_mask);
-}
-
static inline void migrate_disable_switch(struct rq *rq, struct task_struct *p) { }
static inline bool rq_has_pinned_tasks(struct rq *rq)
@@ -3714,11 +3508,6 @@ static inline bool rq_has_pinned_tasks(struct rq *rq)
return false;
}
-static inline cpumask_t *alloc_user_cpus_ptr(int node)
-{
- return NULL;
-}
-
#endif /* !CONFIG_SMP */
static void
@@ -3901,8 +3690,8 @@ void sched_ttwu_pending(void *arg)
* it is possible for select_idle_siblings() to stack a number
* of tasks on this CPU during that window.
*
- * It is ok to clear ttwu_pending when another task pending.
- * We will receive IPI after local irq enabled and then enqueue it.
+ * It is OK to clear ttwu_pending when another task pending.
+ * We will receive IPI after local IRQ enabled and then enqueue it.
* Since now nr_running > 0, idle_cpu() will always get correct result.
*/
WRITE_ONCE(rq->ttwu_pending, 0);
@@ -4467,12 +4256,7 @@ int task_call_func(struct task_struct *p, task_call_f func, void *arg)
* @cpu: The CPU on which to snapshot the task.
*
* Returns the task_struct pointer of the task "currently" running on
- * the specified CPU. If the same task is running on that CPU throughout,
- * the return value will be a pointer to that task's task_struct structure.
- * If the CPU did any context switches even vaguely concurrently with the
- * execution of this function, the return value will be a pointer to the
- * task_struct structure of a randomly chosen task that was running on
- * that CPU somewhere around the time that this function was executing.
+ * the specified CPU.
*
* If the specified CPU was offline, the return value is whatever it
* is, perhaps a pointer to the task_struct structure of that CPU's idle
@@ -4486,11 +4270,16 @@ int task_call_func(struct task_struct *p, task_call_f func, void *arg)
*/
struct task_struct *cpu_curr_snapshot(int cpu)
{
+ struct rq *rq = cpu_rq(cpu);
struct task_struct *t;
+ struct rq_flags rf;
- smp_mb(); /* Pairing determined by caller's synchronization design. */
+ rq_lock_irqsave(rq, &rf);
+ smp_mb__after_spinlock(); /* Pairing determined by caller's synchronization design. */
t = rcu_dereference(cpu_curr(cpu));
+ rq_unlock_irqrestore(rq, &rf);
smp_mb(); /* Pairing determined by caller's synchronization design. */
+
return t;
}
@@ -5095,7 +4884,7 @@ __splice_balance_callbacks(struct rq *rq, bool split)
return head;
}
-static inline struct balance_callback *splice_balance_callbacks(struct rq *rq)
+struct balance_callback *splice_balance_callbacks(struct rq *rq)
{
return __splice_balance_callbacks(rq, true);
}
@@ -5105,7 +4894,7 @@ static void __balance_callbacks(struct rq *rq)
do_balance_callbacks(rq, __splice_balance_callbacks(rq, false));
}
-static inline void balance_callbacks(struct rq *rq, struct balance_callback *head)
+void balance_callbacks(struct rq *rq, struct balance_callback *head)
{
unsigned long flags;
@@ -5122,15 +4911,6 @@ static inline void __balance_callbacks(struct rq *rq)
{
}
-static inline struct balance_callback *splice_balance_callbacks(struct rq *rq)
-{
- return NULL;
-}
-
-static inline void balance_callbacks(struct rq *rq, struct balance_callback *head)
-{
-}
-
#endif
static inline void
@@ -5233,7 +5013,7 @@ prepare_task_switch(struct rq *rq, struct task_struct *prev,
*
* The context switch have flipped the stack from under us and restored the
* local variables which were saved when this task called schedule() in the
- * past. prev == current is still correct but we need to recalculate this_rq
+ * past. 'prev == current' is still correct but we need to recalculate this_rq
* because prev may have moved to another CPU.
*/
static struct rq *finish_task_switch(struct task_struct *prev)
@@ -5556,9 +5336,9 @@ EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
static inline void prefetch_curr_exec_start(struct task_struct *p)
{
#ifdef CONFIG_FAIR_GROUP_SCHED
- struct sched_entity *curr = (&p->se)->cfs_rq->curr;
+ struct sched_entity *curr = p->se.cfs_rq->curr;
#else
- struct sched_entity *curr = (&task_rq(p)->cfs)->curr;
+ struct sched_entity *curr = task_rq(p)->cfs.curr;
#endif
prefetch(curr);
prefetch(&curr->exec_start);
@@ -5579,7 +5359,7 @@ unsigned long long task_sched_runtime(struct task_struct *p)
/*
* 64-bit doesn't need locks to atomically read a 64-bit value.
* So we have a optimization chance when the task's delta_exec is 0.
- * Reading ->on_cpu is racy, but this is ok.
+ * Reading ->on_cpu is racy, but this is OK.
*
* If we race with it leaving CPU, we'll take a lock. So we're correct.
* If we race with it entering CPU, unaccounted time is 0. This is
@@ -5665,7 +5445,7 @@ void sched_tick(void)
{
int cpu = smp_processor_id();
struct rq *rq = cpu_rq(cpu);
- struct task_struct *curr = rq->curr;
+ struct task_struct *curr;
struct rq_flags rf;
unsigned long hw_pressure;
u64 resched_latency;
@@ -5677,6 +5457,9 @@ void sched_tick(void)
rq_lock(rq, &rf);
+ curr = rq->curr;
+ psi_account_irqtime(rq, curr, NULL);
+
update_rq_clock(rq);
hw_pressure = arch_scale_hw_pressure(cpu_of(rq));
update_hw_load_avg(rq_clock_task(rq), rq, hw_pressure);
@@ -6737,6 +6520,7 @@ static void __sched notrace __schedule(unsigned int sched_mode)
++*switch_count;
migrate_disable_switch(rq, prev);
+ psi_account_irqtime(rq, prev, next);
psi_sched_switch(prev, next, !task_on_rq_queued(prev));
trace_sched_switch(sched_mode & SM_MASK_PREEMPT, prev, next, prev_state);
@@ -6853,7 +6637,7 @@ void __sched schedule_idle(void)
{
/*
* As this skips calling sched_submit_work(), which the idle task does
- * regardless because that function is a nop when the task is in a
+ * regardless because that function is a NOP when the task is in a
* TASK_RUNNING state, make sure this isn't used someplace that the
* current task can be in any other state. Note, idle is always in the
* TASK_RUNNING state.
@@ -7048,9 +6832,9 @@ EXPORT_SYMBOL(dynamic_preempt_schedule_notrace);
/*
* This is the entry point to schedule() from kernel preemption
- * off of irq context.
- * Note, that this is called and return with irqs disabled. This will
- * protect us against recursive calling from irq.
+ * off of IRQ context.
+ * Note, that this is called and return with IRQs disabled. This will
+ * protect us against recursive calling from IRQ contexts.
*/
asmlinkage __visible void __sched preempt_schedule_irq(void)
{
@@ -7080,7 +6864,7 @@ int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flag
}
EXPORT_SYMBOL(default_wake_function);
-static void __setscheduler_prio(struct task_struct *p, int prio)
+void __setscheduler_prio(struct task_struct *p, int prio)
{
if (dl_prio(prio))
p->sched_class = &dl_sched_class;
@@ -7120,21 +6904,6 @@ void rt_mutex_post_schedule(void)
lockdep_assert(fetch_and_set(current->sched_rt_mutex, 0));
}
-static inline int __rt_effective_prio(struct task_struct *pi_task, int prio)
-{
- if (pi_task)
- prio = min(prio, pi_task->prio);
-
- return prio;
-}
-
-static inline int rt_effective_prio(struct task_struct *p, int prio)
-{
- struct task_struct *pi_task = rt_mutex_get_top_task(p);
-
- return __rt_effective_prio(pi_task, prio);
-}
-
/*
* rt_mutex_setprio - set the current priority of a task
* @p: task to boost
@@ -7184,7 +6953,7 @@ void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
goto out_unlock;
/*
- * Idle task boosting is a nono in general. There is one
+ * Idle task boosting is a no-no in general. There is one
* exception, when PREEMPT_RT and NOHZ is active:
*
* The idle task calls get_next_timer_interrupt() and holds
@@ -7263,1325 +7032,8 @@ out_unlock:
preempt_enable();
}
-#else
-static inline int rt_effective_prio(struct task_struct *p, int prio)
-{
- return prio;
-}
#endif
-void set_user_nice(struct task_struct *p, long nice)
-{
- bool queued, running;
- struct rq *rq;
- int old_prio;
-
- if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE)
- return;
- /*
- * We have to be careful, if called from sys_setpriority(),
- * the task might be in the middle of scheduling on another CPU.
- */
- CLASS(task_rq_lock, rq_guard)(p);
- rq = rq_guard.rq;
-
- update_rq_clock(rq);
-
- /*
- * The RT priorities are set via sched_setscheduler(), but we still
- * allow the 'normal' nice value to be set - but as expected
- * it won't have any effect on scheduling until the task is
- * SCHED_DEADLINE, SCHED_FIFO or SCHED_RR:
- */
- if (task_has_dl_policy(p) || task_has_rt_policy(p)) {
- p->static_prio = NICE_TO_PRIO(nice);
- return;
- }
-
- queued = task_on_rq_queued(p);
- running = task_current(rq, p);
- if (queued)
- dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK);
- if (running)
- put_prev_task(rq, p);
-
- p->static_prio = NICE_TO_PRIO(nice);
- set_load_weight(p, true);
- old_prio = p->prio;
- p->prio = effective_prio(p);
-
- if (queued)
- enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
- if (running)
- set_next_task(rq, p);
-
- /*
- * If the task increased its priority or is running and
- * lowered its priority, then reschedule its CPU:
- */
- p->sched_class->prio_changed(rq, p, old_prio);
-}
-EXPORT_SYMBOL(set_user_nice);
-
-/*
- * is_nice_reduction - check if nice value is an actual reduction
- *
- * Similar to can_nice() but does not perform a capability check.
- *
- * @p: task
- * @nice: nice value
- */
-static bool is_nice_reduction(const struct task_struct *p, const int nice)
-{
- /* Convert nice value [19,-20] to rlimit style value [1,40]: */
- int nice_rlim = nice_to_rlimit(nice);
-
- return (nice_rlim <= task_rlimit(p, RLIMIT_NICE));
-}
-
-/*
- * can_nice - check if a task can reduce its nice value
- * @p: task
- * @nice: nice value
- */
-int can_nice(const struct task_struct *p, const int nice)
-{
- return is_nice_reduction(p, nice) || capable(CAP_SYS_NICE);
-}
-
-#ifdef __ARCH_WANT_SYS_NICE
-
-/*
- * sys_nice - change the priority of the current process.
- * @increment: priority increment
- *
- * sys_setpriority is a more generic, but much slower function that
- * does similar things.
- */
-SYSCALL_DEFINE1(nice, int, increment)
-{
- long nice, retval;
-
- /*
- * Setpriority might change our priority at the same moment.
- * We don't have to worry. Conceptually one call occurs first
- * and we have a single winner.
- */
- increment = clamp(increment, -NICE_WIDTH, NICE_WIDTH);
- nice = task_nice(current) + increment;
-
- nice = clamp_val(nice, MIN_NICE, MAX_NICE);
- if (increment < 0 && !can_nice(current, nice))
- return -EPERM;
-
- retval = security_task_setnice(current, nice);
- if (retval)
- return retval;
-
- set_user_nice(current, nice);
- return 0;
-}
-
-#endif
-
-/**
- * task_prio - return the priority value of a given task.
- * @p: the task in question.
- *
- * Return: The priority value as seen by users in /proc.
- *
- * sched policy return value kernel prio user prio/nice
- *
- * normal, batch, idle [0 ... 39] [100 ... 139] 0/[-20 ... 19]
- * fifo, rr [-2 ... -100] [98 ... 0] [1 ... 99]
- * deadline -101 -1 0
- */
-int task_prio(const struct task_struct *p)
-{
- return p->prio - MAX_RT_PRIO;
-}
-
-/**
- * idle_cpu - is a given CPU idle currently?
- * @cpu: the processor in question.
- *
- * Return: 1 if the CPU is currently idle. 0 otherwise.
- */
-int idle_cpu(int cpu)
-{
- struct rq *rq = cpu_rq(cpu);
-
- if (rq->curr != rq->idle)
- return 0;
-
- if (rq->nr_running)
- return 0;
-
-#ifdef CONFIG_SMP
- if (rq->ttwu_pending)
- return 0;
-#endif
-
- return 1;
-}
-
-/**
- * available_idle_cpu - is a given CPU idle for enqueuing work.
- * @cpu: the CPU in question.
- *
- * Return: 1 if the CPU is currently idle. 0 otherwise.
- */
-int available_idle_cpu(int cpu)
-{
- if (!idle_cpu(cpu))
- return 0;
-
- if (vcpu_is_preempted(cpu))
- return 0;
-
- return 1;
-}
-
-/**
- * idle_task - return the idle task for a given CPU.
- * @cpu: the processor in question.
- *
- * Return: The idle task for the CPU @cpu.
- */
-struct task_struct *idle_task(int cpu)
-{
- return cpu_rq(cpu)->idle;
-}
-
-#ifdef CONFIG_SCHED_CORE
-int sched_core_idle_cpu(int cpu)
-{
- struct rq *rq = cpu_rq(cpu);
-
- if (sched_core_enabled(rq) && rq->curr == rq->idle)
- return 1;
-
- return idle_cpu(cpu);
-}
-
-#endif
-
-#ifdef CONFIG_SMP
-/*
- * This function computes an effective utilization for the given CPU, to be
- * used for frequency selection given the linear relation: f = u * f_max.
- *
- * The scheduler tracks the following metrics:
- *
- * cpu_util_{cfs,rt,dl,irq}()
- * cpu_bw_dl()
- *
- * Where the cfs,rt and dl util numbers are tracked with the same metric and
- * synchronized windows and are thus directly comparable.
- *
- * The cfs,rt,dl utilization are the running times measured with rq->clock_task
- * which excludes things like IRQ and steal-time. These latter are then accrued
- * in the irq utilization.
- *
- * The DL bandwidth number otoh is not a measured metric but a value computed
- * based on the task model parameters and gives the minimal utilization
- * required to meet deadlines.
- */
-unsigned long effective_cpu_util(int cpu, unsigned long util_cfs,
- unsigned long *min,
- unsigned long *max)
-{
- unsigned long util, irq, scale;
- struct rq *rq = cpu_rq(cpu);
-
- scale = arch_scale_cpu_capacity(cpu);
-
- /*
- * Early check to see if IRQ/steal time saturates the CPU, can be
- * because of inaccuracies in how we track these -- see
- * update_irq_load_avg().
- */
- irq = cpu_util_irq(rq);
- if (unlikely(irq >= scale)) {
- if (min)
- *min = scale;
- if (max)
- *max = scale;
- return scale;
- }
-
- if (min) {
- /*
- * The minimum utilization returns the highest level between:
- * - the computed DL bandwidth needed with the IRQ pressure which
- * steals time to the deadline task.
- * - The minimum performance requirement for CFS and/or RT.
- */
- *min = max(irq + cpu_bw_dl(rq), uclamp_rq_get(rq, UCLAMP_MIN));
-
- /*
- * When an RT task is runnable and uclamp is not used, we must
- * ensure that the task will run at maximum compute capacity.
- */
- if (!uclamp_is_used() && rt_rq_is_runnable(&rq->rt))
- *min = max(*min, scale);
- }
-
- /*
- * Because the time spend on RT/DL tasks is visible as 'lost' time to
- * CFS tasks and we use the same metric to track the effective
- * utilization (PELT windows are synchronized) we can directly add them
- * to obtain the CPU's actual utilization.
- */
- util = util_cfs + cpu_util_rt(rq);
- util += cpu_util_dl(rq);
-
- /*
- * The maximum hint is a soft bandwidth requirement, which can be lower
- * than the actual utilization because of uclamp_max requirements.
- */
- if (max)
- *max = min(scale, uclamp_rq_get(rq, UCLAMP_MAX));
-
- if (util >= scale)
- return scale;
-
- /*
- * There is still idle time; further improve the number by using the
- * irq metric. Because IRQ/steal time is hidden from the task clock we
- * need to scale the task numbers:
- *
- * max - irq
- * U' = irq + --------- * U
- * max
- */
- util = scale_irq_capacity(util, irq, scale);
- util += irq;
-
- return min(scale, util);
-}
-
-unsigned long sched_cpu_util(int cpu)
-{
- return effective_cpu_util(cpu, cpu_util_cfs(cpu), NULL, NULL);
-}
-#endif /* CONFIG_SMP */
-
-/**
- * find_process_by_pid - find a process with a matching PID value.
- * @pid: the pid in question.
- *
- * The task of @pid, if found. %NULL otherwise.
- */
-static struct task_struct *find_process_by_pid(pid_t pid)
-{
- return pid ? find_task_by_vpid(pid) : current;
-}
-
-static struct task_struct *find_get_task(pid_t pid)
-{
- struct task_struct *p;
- guard(rcu)();
-
- p = find_process_by_pid(pid);
- if (likely(p))
- get_task_struct(p);
-
- return p;
-}
-
-DEFINE_CLASS(find_get_task, struct task_struct *, if (_T) put_task_struct(_T),
- find_get_task(pid), pid_t pid)
-
-/*
- * sched_setparam() passes in -1 for its policy, to let the functions
- * it calls know not to change it.
- */
-#define SETPARAM_POLICY -1
-
-static void __setscheduler_params(struct task_struct *p,
- const struct sched_attr *attr)
-{
- int policy = attr->sched_policy;
-
- if (policy == SETPARAM_POLICY)
- policy = p->policy;
-
- p->policy = policy;
-
- if (dl_policy(policy))
- __setparam_dl(p, attr);
- else if (fair_policy(policy))
- p->static_prio = NICE_TO_PRIO(attr->sched_nice);
-
- /*
- * __sched_setscheduler() ensures attr->sched_priority == 0 when
- * !rt_policy. Always setting this ensures that things like
- * getparam()/getattr() don't report silly values for !rt tasks.
- */
- p->rt_priority = attr->sched_priority;
- p->normal_prio = normal_prio(p);
- set_load_weight(p, true);
-}
-
-/*
- * Check the target process has a UID that matches the current process's:
- */
-static bool check_same_owner(struct task_struct *p)
-{
- const struct cred *cred = current_cred(), *pcred;
- guard(rcu)();
-
- pcred = __task_cred(p);
- return (uid_eq(cred->euid, pcred->euid) ||
- uid_eq(cred->euid, pcred->uid));
-}
-
-/*
- * Allow unprivileged RT tasks to decrease priority.
- * Only issue a capable test if needed and only once to avoid an audit
- * event on permitted non-privileged operations:
- */
-static int user_check_sched_setscheduler(struct task_struct *p,
- const struct sched_attr *attr,
- int policy, int reset_on_fork)
-{
- if (fair_policy(policy)) {
- if (attr->sched_nice < task_nice(p) &&
- !is_nice_reduction(p, attr->sched_nice))
- goto req_priv;
- }
-
- if (rt_policy(policy)) {
- unsigned long rlim_rtprio = task_rlimit(p, RLIMIT_RTPRIO);
-
- /* Can't set/change the rt policy: */
- if (policy != p->policy && !rlim_rtprio)
- goto req_priv;
-
- /* Can't increase priority: */
- if (attr->sched_priority > p->rt_priority &&
- attr->sched_priority > rlim_rtprio)
- goto req_priv;
- }
-
- /*
- * Can't set/change SCHED_DEADLINE policy at all for now
- * (safest behavior); in the future we would like to allow
- * unprivileged DL tasks to increase their relative deadline
- * or reduce their runtime (both ways reducing utilization)
- */
- if (dl_policy(policy))
- goto req_priv;
-
- /*
- * Treat SCHED_IDLE as nice 20. Only allow a switch to
- * SCHED_NORMAL if the RLIMIT_NICE would normally permit it.
- */
- if (task_has_idle_policy(p) && !idle_policy(policy)) {
- if (!is_nice_reduction(p, task_nice(p)))
- goto req_priv;
- }
-
- /* Can't change other user's priorities: */
- if (!check_same_owner(p))
- goto req_priv;
-
- /* Normal users shall not reset the sched_reset_on_fork flag: */
- if (p->sched_reset_on_fork && !reset_on_fork)
- goto req_priv;
-
- return 0;
-
-req_priv:
- if (!capable(CAP_SYS_NICE))
- return -EPERM;
-
- return 0;
-}
-
-static int __sched_setscheduler(struct task_struct *p,
- const struct sched_attr *attr,
- bool user, bool pi)
-{
- int oldpolicy = -1, policy = attr->sched_policy;
- int retval, oldprio, newprio, queued, running;
- const struct sched_class *prev_class;
- struct balance_callback *head;
- struct rq_flags rf;
- int reset_on_fork;
- int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
- struct rq *rq;
- bool cpuset_locked = false;
-
- /* The pi code expects interrupts enabled */
- BUG_ON(pi && in_interrupt());
-recheck:
- /* Double check policy once rq lock held: */
- if (policy < 0) {
- reset_on_fork = p->sched_reset_on_fork;
- policy = oldpolicy = p->policy;
- } else {
- reset_on_fork = !!(attr->sched_flags & SCHED_FLAG_RESET_ON_FORK);
-
- if (!valid_policy(policy))
- return -EINVAL;
- }
-
- if (attr->sched_flags & ~(SCHED_FLAG_ALL | SCHED_FLAG_SUGOV))
- return -EINVAL;
-
- /*
- * Valid priorities for SCHED_FIFO and SCHED_RR are
- * 1..MAX_RT_PRIO-1, valid priority for SCHED_NORMAL,
- * SCHED_BATCH and SCHED_IDLE is 0.
- */
- if (attr->sched_priority > MAX_RT_PRIO-1)
- return -EINVAL;
- if ((dl_policy(policy) && !__checkparam_dl(attr)) ||
- (rt_policy(policy) != (attr->sched_priority != 0)))
- return -EINVAL;
-
- if (user) {
- retval = user_check_sched_setscheduler(p, attr, policy, reset_on_fork);
- if (retval)
- return retval;
-
- if (attr->sched_flags & SCHED_FLAG_SUGOV)
- return -EINVAL;
-
- retval = security_task_setscheduler(p);
- if (retval)
- return retval;
- }
-
- /* Update task specific "requested" clamps */
- if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) {
- retval = uclamp_validate(p, attr);
- if (retval)
- return retval;
- }
-
- /*
- * SCHED_DEADLINE bandwidth accounting relies on stable cpusets
- * information.
- */
- if (dl_policy(policy) || dl_policy(p->policy)) {
- cpuset_locked = true;
- cpuset_lock();
- }
-
- /*
- * Make sure no PI-waiters arrive (or leave) while we are
- * changing the priority of the task:
- *
- * To be able to change p->policy safely, the appropriate
- * runqueue lock must be held.
- */
- rq = task_rq_lock(p, &rf);
- update_rq_clock(rq);
-
- /*
- * Changing the policy of the stop threads its a very bad idea:
- */
- if (p == rq->stop) {
- retval = -EINVAL;
- goto unlock;
- }
-
- /*
- * If not changing anything there's no need to proceed further,
- * but store a possible modification of reset_on_fork.
- */
- if (unlikely(policy == p->policy)) {
- if (fair_policy(policy) && attr->sched_nice != task_nice(p))
- goto change;
- if (rt_policy(policy) && attr->sched_priority != p->rt_priority)
- goto change;
- if (dl_policy(policy) && dl_param_changed(p, attr))
- goto change;
- if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP)
- goto change;
-
- p->sched_reset_on_fork = reset_on_fork;
- retval = 0;
- goto unlock;
- }
-change:
-
- if (user) {
-#ifdef CONFIG_RT_GROUP_SCHED
- /*
- * Do not allow realtime tasks into groups that have no runtime
- * assigned.
- */
- if (rt_bandwidth_enabled() && rt_policy(policy) &&
- task_group(p)->rt_bandwidth.rt_runtime == 0 &&
- !task_group_is_autogroup(task_group(p))) {
- retval = -EPERM;
- goto unlock;
- }
-#endif
-#ifdef CONFIG_SMP
- if (dl_bandwidth_enabled() && dl_policy(policy) &&
- !(attr->sched_flags & SCHED_FLAG_SUGOV)) {
- cpumask_t *span = rq->rd->span;
-
- /*
- * Don't allow tasks with an affinity mask smaller than
- * the entire root_domain to become SCHED_DEADLINE. We
- * will also fail if there's no bandwidth available.
- */
- if (!cpumask_subset(span, p->cpus_ptr) ||
- rq->rd->dl_bw.bw == 0) {
- retval = -EPERM;
- goto unlock;
- }
- }
-#endif
- }
-
- /* Re-check policy now with rq lock held: */
- if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
- policy = oldpolicy = -1;
- task_rq_unlock(rq, p, &rf);
- if (cpuset_locked)
- cpuset_unlock();
- goto recheck;
- }
-
- /*
- * If setscheduling to SCHED_DEADLINE (or changing the parameters
- * of a SCHED_DEADLINE task) we need to check if enough bandwidth
- * is available.
- */
- if ((dl_policy(policy) || dl_task(p)) && sched_dl_overflow(p, policy, attr)) {
- retval = -EBUSY;
- goto unlock;
- }
-
- p->sched_reset_on_fork = reset_on_fork;
- oldprio = p->prio;
-
- newprio = __normal_prio(policy, attr->sched_priority, attr->sched_nice);
- if (pi) {
- /*
- * Take priority boosted tasks into account. If the new
- * effective priority is unchanged, we just store the new
- * normal parameters and do not touch the scheduler class and
- * the runqueue. This will be done when the task deboost
- * itself.
- */
- newprio = rt_effective_prio(p, newprio);
- if (newprio == oldprio)
- queue_flags &= ~DEQUEUE_MOVE;
- }
-
- queued = task_on_rq_queued(p);
- running = task_current(rq, p);
- if (queued)
- dequeue_task(rq, p, queue_flags);
- if (running)
- put_prev_task(rq, p);
-
- prev_class = p->sched_class;
-
- if (!(attr->sched_flags & SCHED_FLAG_KEEP_PARAMS)) {
- __setscheduler_params(p, attr);
- __setscheduler_prio(p, newprio);
- }
- __setscheduler_uclamp(p, attr);
-
- if (queued) {
- /*
- * We enqueue to tail when the priority of a task is
- * increased (user space view).
- */
- if (oldprio < p->prio)
- queue_flags |= ENQUEUE_HEAD;
-
- enqueue_task(rq, p, queue_flags);
- }
- if (running)
- set_next_task(rq, p);
-
- check_class_changed(rq, p, prev_class, oldprio);
-
- /* Avoid rq from going away on us: */
- preempt_disable();
- head = splice_balance_callbacks(rq);
- task_rq_unlock(rq, p, &rf);
-
- if (pi) {
- if (cpuset_locked)
- cpuset_unlock();
- rt_mutex_adjust_pi(p);
- }
-
- /* Run balance callbacks after we've adjusted the PI chain: */
- balance_callbacks(rq, head);
- preempt_enable();
-
- return 0;
-
-unlock:
- task_rq_unlock(rq, p, &rf);
- if (cpuset_locked)
- cpuset_unlock();
- return retval;
-}
-
-static int _sched_setscheduler(struct task_struct *p, int policy,
- const struct sched_param *param, bool check)
-{
- struct sched_attr attr = {
- .sched_policy = policy,
- .sched_priority = param->sched_priority,
- .sched_nice = PRIO_TO_NICE(p->static_prio),
- };
-
- /* Fixup the legacy SCHED_RESET_ON_FORK hack. */
- if ((policy != SETPARAM_POLICY) && (policy & SCHED_RESET_ON_FORK)) {
- attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
- policy &= ~SCHED_RESET_ON_FORK;
- attr.sched_policy = policy;
- }
-
- return __sched_setscheduler(p, &attr, check, true);
-}
-/**
- * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
- * @p: the task in question.
- * @policy: new policy.
- * @param: structure containing the new RT priority.
- *
- * Use sched_set_fifo(), read its comment.
- *
- * Return: 0 on success. An error code otherwise.
- *
- * NOTE that the task may be already dead.
- */
-int sched_setscheduler(struct task_struct *p, int policy,
- const struct sched_param *param)
-{
- return _sched_setscheduler(p, policy, param, true);
-}
-
-int sched_setattr(struct task_struct *p, const struct sched_attr *attr)
-{
- return __sched_setscheduler(p, attr, true, true);
-}
-
-int sched_setattr_nocheck(struct task_struct *p, const struct sched_attr *attr)
-{
- return __sched_setscheduler(p, attr, false, true);
-}
-EXPORT_SYMBOL_GPL(sched_setattr_nocheck);
-
-/**
- * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
- * @p: the task in question.
- * @policy: new policy.
- * @param: structure containing the new RT priority.
- *
- * Just like sched_setscheduler, only don't bother checking if the
- * current context has permission. For example, this is needed in
- * stop_machine(): we create temporary high priority worker threads,
- * but our caller might not have that capability.
- *
- * Return: 0 on success. An error code otherwise.
- */
-int sched_setscheduler_nocheck(struct task_struct *p, int policy,
- const struct sched_param *param)
-{
- return _sched_setscheduler(p, policy, param, false);
-}
-
-/*
- * SCHED_FIFO is a broken scheduler model; that is, it is fundamentally
- * incapable of resource management, which is the one thing an OS really should
- * be doing.
- *
- * This is of course the reason it is limited to privileged users only.
- *
- * Worse still; it is fundamentally impossible to compose static priority
- * workloads. You cannot take two correctly working static prio workloads
- * and smash them together and still expect them to work.
- *
- * For this reason 'all' FIFO tasks the kernel creates are basically at:
- *
- * MAX_RT_PRIO / 2
- *
- * The administrator _MUST_ configure the system, the kernel simply doesn't
- * know enough information to make a sensible choice.
- */
-void sched_set_fifo(struct task_struct *p)
-{
- struct sched_param sp = { .sched_priority = MAX_RT_PRIO / 2 };
- WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0);
-}
-EXPORT_SYMBOL_GPL(sched_set_fifo);
-
-/*
- * For when you don't much care about FIFO, but want to be above SCHED_NORMAL.
- */
-void sched_set_fifo_low(struct task_struct *p)
-{
- struct sched_param sp = { .sched_priority = 1 };
- WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0);
-}
-EXPORT_SYMBOL_GPL(sched_set_fifo_low);
-
-void sched_set_normal(struct task_struct *p, int nice)
-{
- struct sched_attr attr = {
- .sched_policy = SCHED_NORMAL,
- .sched_nice = nice,
- };
- WARN_ON_ONCE(sched_setattr_nocheck(p, &attr) != 0);
-}
-EXPORT_SYMBOL_GPL(sched_set_normal);
-
-static int
-do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
-{
- struct sched_param lparam;
-
- if (!param || pid < 0)
- return -EINVAL;
- if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
- return -EFAULT;
-
- CLASS(find_get_task, p)(pid);
- if (!p)
- return -ESRCH;
-
- return sched_setscheduler(p, policy, &lparam);
-}
-
-/*
- * Mimics kernel/events/core.c perf_copy_attr().
- */
-static int sched_copy_attr(struct sched_attr __user *uattr, struct sched_attr *attr)
-{
- u32 size;
- int ret;
-
- /* Zero the full structure, so that a short copy will be nice: */
- memset(attr, 0, sizeof(*attr));
-
- ret = get_user(size, &uattr->size);
- if (ret)
- return ret;
-
- /* ABI compatibility quirk: */
- if (!size)
- size = SCHED_ATTR_SIZE_VER0;
- if (size < SCHED_ATTR_SIZE_VER0 || size > PAGE_SIZE)
- goto err_size;
-
- ret = copy_struct_from_user(attr, sizeof(*attr), uattr, size);
- if (ret) {
- if (ret == -E2BIG)
- goto err_size;
- return ret;
- }
-
- if ((attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) &&
- size < SCHED_ATTR_SIZE_VER1)
- return -EINVAL;
-
- /*
- * XXX: Do we want to be lenient like existing syscalls; or do we want
- * to be strict and return an error on out-of-bounds values?
- */
- attr->sched_nice = clamp(attr->sched_nice, MIN_NICE, MAX_NICE);
-
- return 0;
-
-err_size:
- put_user(sizeof(*attr), &uattr->size);
- return -E2BIG;
-}
-
-static void get_params(struct task_struct *p, struct sched_attr *attr)
-{
- if (task_has_dl_policy(p))
- __getparam_dl(p, attr);
- else if (task_has_rt_policy(p))
- attr->sched_priority = p->rt_priority;
- else
- attr->sched_nice = task_nice(p);
-}
-
-/**
- * sys_sched_setscheduler - set/change the scheduler policy and RT priority
- * @pid: the pid in question.
- * @policy: new policy.
- * @param: structure containing the new RT priority.
- *
- * Return: 0 on success. An error code otherwise.
- */
-SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, struct sched_param __user *, param)
-{
- if (policy < 0)
- return -EINVAL;
-
- return do_sched_setscheduler(pid, policy, param);
-}
-
-/**
- * sys_sched_setparam - set/change the RT priority of a thread
- * @pid: the pid in question.
- * @param: structure containing the new RT priority.
- *
- * Return: 0 on success. An error code otherwise.
- */
-SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
-{
- return do_sched_setscheduler(pid, SETPARAM_POLICY, param);
-}
-
-/**
- * sys_sched_setattr - same as above, but with extended sched_attr
- * @pid: the pid in question.
- * @uattr: structure containing the extended parameters.
- * @flags: for future extension.
- */
-SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr,
- unsigned int, flags)
-{
- struct sched_attr attr;
- int retval;
-
- if (!uattr || pid < 0 || flags)
- return -EINVAL;
-
- retval = sched_copy_attr(uattr, &attr);
- if (retval)
- return retval;
-
- if ((int)attr.sched_policy < 0)
- return -EINVAL;
- if (attr.sched_flags & SCHED_FLAG_KEEP_POLICY)
- attr.sched_policy = SETPARAM_POLICY;
-
- CLASS(find_get_task, p)(pid);
- if (!p)
- return -ESRCH;
-
- if (attr.sched_flags & SCHED_FLAG_KEEP_PARAMS)
- get_params(p, &attr);
-
- return sched_setattr(p, &attr);
-}
-
-/**
- * sys_sched_getscheduler - get the policy (scheduling class) of a thread
- * @pid: the pid in question.
- *
- * Return: On success, the policy of the thread. Otherwise, a negative error
- * code.
- */
-SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
-{
- struct task_struct *p;
- int retval;
-
- if (pid < 0)
- return -EINVAL;
-
- guard(rcu)();
- p = find_process_by_pid(pid);
- if (!p)
- return -ESRCH;
-
- retval = security_task_getscheduler(p);
- if (!retval) {
- retval = p->policy;
- if (p->sched_reset_on_fork)
- retval |= SCHED_RESET_ON_FORK;
- }
- return retval;
-}
-
-/**
- * sys_sched_getparam - get the RT priority of a thread
- * @pid: the pid in question.
- * @param: structure containing the RT priority.
- *
- * Return: On success, 0 and the RT priority is in @param. Otherwise, an error
- * code.
- */
-SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
-{
- struct sched_param lp = { .sched_priority = 0 };
- struct task_struct *p;
- int retval;
-
- if (!param || pid < 0)
- return -EINVAL;
-
- scoped_guard (rcu) {
- p = find_process_by_pid(pid);
- if (!p)
- return -ESRCH;
-
- retval = security_task_getscheduler(p);
- if (retval)
- return retval;
-
- if (task_has_rt_policy(p))
- lp.sched_priority = p->rt_priority;
- }
-
- /*
- * This one might sleep, we cannot do it with a spinlock held ...
- */
- return copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
-}
-
-/*
- * Copy the kernel size attribute structure (which might be larger
- * than what user-space knows about) to user-space.
- *
- * Note that all cases are valid: user-space buffer can be larger or
- * smaller than the kernel-space buffer. The usual case is that both
- * have the same size.
- */
-static int
-sched_attr_copy_to_user(struct sched_attr __user *uattr,
- struct sched_attr *kattr,
- unsigned int usize)
-{
- unsigned int ksize = sizeof(*kattr);
-
- if (!access_ok(uattr, usize))
- return -EFAULT;
-
- /*
- * sched_getattr() ABI forwards and backwards compatibility:
- *
- * If usize == ksize then we just copy everything to user-space and all is good.
- *
- * If usize < ksize then we only copy as much as user-space has space for,
- * this keeps ABI compatibility as well. We skip the rest.
- *
- * If usize > ksize then user-space is using a newer version of the ABI,
- * which part the kernel doesn't know about. Just ignore it - tooling can
- * detect the kernel's knowledge of attributes from the attr->size value
- * which is set to ksize in this case.
- */
- kattr->size = min(usize, ksize);
-
- if (copy_to_user(uattr, kattr, kattr->size))
- return -EFAULT;
-
- return 0;
-}
-
-/**
- * sys_sched_getattr - similar to sched_getparam, but with sched_attr
- * @pid: the pid in question.
- * @uattr: structure containing the extended parameters.
- * @usize: sizeof(attr) for fwd/bwd comp.
- * @flags: for future extension.
- */
-SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
- unsigned int, usize, unsigned int, flags)
-{
- struct sched_attr kattr = { };
- struct task_struct *p;
- int retval;
-
- if (!uattr || pid < 0 || usize > PAGE_SIZE ||
- usize < SCHED_ATTR_SIZE_VER0 || flags)
- return -EINVAL;
-
- scoped_guard (rcu) {
- p = find_process_by_pid(pid);
- if (!p)
- return -ESRCH;
-
- retval = security_task_getscheduler(p);
- if (retval)
- return retval;
-
- kattr.sched_policy = p->policy;
- if (p->sched_reset_on_fork)
- kattr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
- get_params(p, &kattr);
- kattr.sched_flags &= SCHED_FLAG_ALL;
-
-#ifdef CONFIG_UCLAMP_TASK
- /*
- * This could race with another potential updater, but this is fine
- * because it'll correctly read the old or the new value. We don't need
- * to guarantee who wins the race as long as it doesn't return garbage.
- */
- kattr.sched_util_min = p->uclamp_req[UCLAMP_MIN].value;
- kattr.sched_util_max = p->uclamp_req[UCLAMP_MAX].value;
-#endif
- }
-
- return sched_attr_copy_to_user(uattr, &kattr, usize);
-}
-
-#ifdef CONFIG_SMP
-int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask)
-{
- /*
- * If the task isn't a deadline task or admission control is
- * disabled then we don't care about affinity changes.
- */
- if (!task_has_dl_policy(p) || !dl_bandwidth_enabled())
- return 0;
-
- /*
- * Since bandwidth control happens on root_domain basis,
- * if admission test is enabled, we only admit -deadline
- * tasks allowed to run on all the CPUs in the task's
- * root_domain.
- */
- guard(rcu)();
- if (!cpumask_subset(task_rq(p)->rd->span, mask))
- return -EBUSY;
-
- return 0;
-}
-#endif
-
-static int
-__sched_setaffinity(struct task_struct *p, struct affinity_context *ctx)
-{
- int retval;
- cpumask_var_t cpus_allowed, new_mask;
-
- if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL))
- return -ENOMEM;
-
- if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
- retval = -ENOMEM;
- goto out_free_cpus_allowed;
- }
-
- cpuset_cpus_allowed(p, cpus_allowed);
- cpumask_and(new_mask, ctx->new_mask, cpus_allowed);
-
- ctx->new_mask = new_mask;
- ctx->flags |= SCA_CHECK;
-
- retval = dl_task_check_affinity(p, new_mask);
- if (retval)
- goto out_free_new_mask;
-
- retval = __set_cpus_allowed_ptr(p, ctx);
- if (retval)
- goto out_free_new_mask;
-
- cpuset_cpus_allowed(p, cpus_allowed);
- if (!cpumask_subset(new_mask, cpus_allowed)) {
- /*
- * We must have raced with a concurrent cpuset update.
- * Just reset the cpumask to the cpuset's cpus_allowed.
- */
- cpumask_copy(new_mask, cpus_allowed);
-
- /*
- * If SCA_USER is set, a 2nd call to __set_cpus_allowed_ptr()
- * will restore the previous user_cpus_ptr value.
- *
- * In the unlikely event a previous user_cpus_ptr exists,
- * we need to further restrict the mask to what is allowed
- * by that old user_cpus_ptr.
- */
- if (unlikely((ctx->flags & SCA_USER) && ctx->user_mask)) {
- bool empty = !cpumask_and(new_mask, new_mask,
- ctx->user_mask);
-
- if (WARN_ON_ONCE(empty))
- cpumask_copy(new_mask, cpus_allowed);
- }
- __set_cpus_allowed_ptr(p, ctx);
- retval = -EINVAL;
- }
-
-out_free_new_mask:
- free_cpumask_var(new_mask);
-out_free_cpus_allowed:
- free_cpumask_var(cpus_allowed);
- return retval;
-}
-
-long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
-{
- struct affinity_context ac;
- struct cpumask *user_mask;
- int retval;
-
- CLASS(find_get_task, p)(pid);
- if (!p)
- return -ESRCH;
-
- if (p->flags & PF_NO_SETAFFINITY)
- return -EINVAL;
-
- if (!check_same_owner(p)) {
- guard(rcu)();
- if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE))
- return -EPERM;
- }
-
- retval = security_task_setscheduler(p);
- if (retval)
- return retval;
-
- /*
- * With non-SMP configs, user_cpus_ptr/user_mask isn't used and
- * alloc_user_cpus_ptr() returns NULL.
- */
- user_mask = alloc_user_cpus_ptr(NUMA_NO_NODE);
- if (user_mask) {
- cpumask_copy(user_mask, in_mask);
- } else if (IS_ENABLED(CONFIG_SMP)) {
- return -ENOMEM;
- }
-
- ac = (struct affinity_context){
- .new_mask = in_mask,
- .user_mask = user_mask,
- .flags = SCA_USER,
- };
-
- retval = __sched_setaffinity(p, &ac);
- kfree(ac.user_mask);
-
- return retval;
-}
-
-static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
- struct cpumask *new_mask)
-{
- if (len < cpumask_size())
- cpumask_clear(new_mask);
- else if (len > cpumask_size())
- len = cpumask_size();
-
- return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
-}
-
-/**
- * sys_sched_setaffinity - set the CPU affinity of a process
- * @pid: pid of the process
- * @len: length in bytes of the bitmask pointed to by user_mask_ptr
- * @user_mask_ptr: user-space pointer to the new CPU mask
- *
- * Return: 0 on success. An error code otherwise.
- */
-SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
- unsigned long __user *, user_mask_ptr)
-{
- cpumask_var_t new_mask;
- int retval;
-
- if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
- return -ENOMEM;
-
- retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
- if (retval == 0)
- retval = sched_setaffinity(pid, new_mask);
- free_cpumask_var(new_mask);
- return retval;
-}
-
-long sched_getaffinity(pid_t pid, struct cpumask *mask)
-{
- struct task_struct *p;
- int retval;
-
- guard(rcu)();
- p = find_process_by_pid(pid);
- if (!p)
- return -ESRCH;
-
- retval = security_task_getscheduler(p);
- if (retval)
- return retval;
-
- guard(raw_spinlock_irqsave)(&p->pi_lock);
- cpumask_and(mask, &p->cpus_mask, cpu_active_mask);
-
- return 0;
-}
-
-/**
- * sys_sched_getaffinity - get the CPU affinity of a process
- * @pid: pid of the process
- * @len: length in bytes of the bitmask pointed to by user_mask_ptr
- * @user_mask_ptr: user-space pointer to hold the current CPU mask
- *
- * Return: size of CPU mask copied to user_mask_ptr on success. An
- * error code otherwise.
- */
-SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
- unsigned long __user *, user_mask_ptr)
-{
- int ret;
- cpumask_var_t mask;
-
- if ((len * BITS_PER_BYTE) < nr_cpu_ids)
- return -EINVAL;
- if (len & (sizeof(unsigned long)-1))
- return -EINVAL;
-
- if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
- return -ENOMEM;
-
- ret = sched_getaffinity(pid, mask);
- if (ret == 0) {
- unsigned int retlen = min(len, cpumask_size());
-
- if (copy_to_user(user_mask_ptr, cpumask_bits(mask), retlen))
- ret = -EFAULT;
- else
- ret = retlen;
- }
- free_cpumask_var(mask);
-
- return ret;
-}
-
-static void do_sched_yield(void)
-{
- struct rq_flags rf;
- struct rq *rq;
-
- rq = this_rq_lock_irq(&rf);
-
- schedstat_inc(rq->yld_count);
- current->sched_class->yield_task(rq);
-
- preempt_disable();
- rq_unlock_irq(rq, &rf);
- sched_preempt_enable_no_resched();
-
- schedule();
-}
-
-/**
- * sys_sched_yield - yield the current processor to other threads.
- *
- * This function yields the current CPU to other tasks. If there are no
- * other threads running on this CPU then this function will return.
- *
- * Return: 0.
- */
-SYSCALL_DEFINE0(sched_yield)
-{
- do_sched_yield();
- return 0;
-}
-
#if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC)
int __sched __cond_resched(void)
{
@@ -8904,105 +7356,11 @@ PREEMPT_MODEL_ACCESSOR(none);
PREEMPT_MODEL_ACCESSOR(voluntary);
PREEMPT_MODEL_ACCESSOR(full);
-#else /* !CONFIG_PREEMPT_DYNAMIC */
+#else /* !CONFIG_PREEMPT_DYNAMIC: */
static inline void preempt_dynamic_init(void) { }
-#endif /* #ifdef CONFIG_PREEMPT_DYNAMIC */
-
-/**
- * yield - yield the current processor to other threads.
- *
- * Do not ever use this function, there's a 99% chance you're doing it wrong.
- *
- * The scheduler is at all times free to pick the calling task as the most
- * eligible task to run, if removing the yield() call from your code breaks
- * it, it's already broken.
- *
- * Typical broken usage is:
- *
- * while (!event)
- * yield();
- *
- * where one assumes that yield() will let 'the other' process run that will
- * make event true. If the current task is a SCHED_FIFO task that will never
- * happen. Never use yield() as a progress guarantee!!
- *
- * If you want to use yield() to wait for something, use wait_event().
- * If you want to use yield() to be 'nice' for others, use cond_resched().
- * If you still want to use yield(), do not!
- */
-void __sched yield(void)
-{
- set_current_state(TASK_RUNNING);
- do_sched_yield();
-}
-EXPORT_SYMBOL(yield);
-
-/**
- * yield_to - yield the current processor to another thread in
- * your thread group, or accelerate that thread toward the
- * processor it's on.
- * @p: target task
- * @preempt: whether task preemption is allowed or not
- *
- * It's the caller's job to ensure that the target task struct
- * can't go away on us before we can do any checks.
- *
- * Return:
- * true (>0) if we indeed boosted the target task.
- * false (0) if we failed to boost the target.
- * -ESRCH if there's no task to yield to.
- */
-int __sched yield_to(struct task_struct *p, bool preempt)
-{
- struct task_struct *curr = current;
- struct rq *rq, *p_rq;
- int yielded = 0;
-
- scoped_guard (irqsave) {
- rq = this_rq();
-
-again:
- p_rq = task_rq(p);
- /*
- * If we're the only runnable task on the rq and target rq also
- * has only one task, there's absolutely no point in yielding.
- */
- if (rq->nr_running == 1 && p_rq->nr_running == 1)
- return -ESRCH;
-
- guard(double_rq_lock)(rq, p_rq);
- if (task_rq(p) != p_rq)
- goto again;
-
- if (!curr->sched_class->yield_to_task)
- return 0;
-
- if (curr->sched_class != p->sched_class)
- return 0;
-
- if (task_on_cpu(p_rq, p) || !task_is_running(p))
- return 0;
-
- yielded = curr->sched_class->yield_to_task(rq, p);
- if (yielded) {
- schedstat_inc(rq->yld_count);
- /*
- * Make p's CPU reschedule; pick_next_entity
- * takes care of fairness.
- */
- if (preempt && rq != p_rq)
- resched_curr(p_rq);
- }
- }
-
- if (yielded)
- schedule();
-
- return yielded;
-}
-EXPORT_SYMBOL_GPL(yield_to);
+#endif /* CONFIG_PREEMPT_DYNAMIC */
int io_schedule_prepare(void)
{
@@ -9045,123 +7403,6 @@ void __sched io_schedule(void)
}
EXPORT_SYMBOL(io_schedule);
-/**
- * sys_sched_get_priority_max - return maximum RT priority.
- * @policy: scheduling class.
- *
- * Return: On success, this syscall returns the maximum
- * rt_priority that can be used by a given scheduling class.
- * On failure, a negative error code is returned.
- */
-SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
-{
- int ret = -EINVAL;
-
- switch (policy) {
- case SCHED_FIFO:
- case SCHED_RR:
- ret = MAX_RT_PRIO-1;
- break;
- case SCHED_DEADLINE:
- case SCHED_NORMAL:
- case SCHED_BATCH:
- case SCHED_IDLE:
- ret = 0;
- break;
- }
- return ret;
-}
-
-/**
- * sys_sched_get_priority_min - return minimum RT priority.
- * @policy: scheduling class.
- *
- * Return: On success, this syscall returns the minimum
- * rt_priority that can be used by a given scheduling class.
- * On failure, a negative error code is returned.
- */
-SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
-{
- int ret = -EINVAL;
-
- switch (policy) {
- case SCHED_FIFO:
- case SCHED_RR:
- ret = 1;
- break;
- case SCHED_DEADLINE:
- case SCHED_NORMAL:
- case SCHED_BATCH:
- case SCHED_IDLE:
- ret = 0;
- }
- return ret;
-}
-
-static int sched_rr_get_interval(pid_t pid, struct timespec64 *t)
-{
- unsigned int time_slice = 0;
- int retval;
-
- if (pid < 0)
- return -EINVAL;
-
- scoped_guard (rcu) {
- struct task_struct *p = find_process_by_pid(pid);
- if (!p)
- return -ESRCH;
-
- retval = security_task_getscheduler(p);
- if (retval)
- return retval;
-
- scoped_guard (task_rq_lock, p) {
- struct rq *rq = scope.rq;
- if (p->sched_class->get_rr_interval)
- time_slice = p->sched_class->get_rr_interval(rq, p);
- }
- }
-
- jiffies_to_timespec64(time_slice, t);
- return 0;
-}
-
-/**
- * sys_sched_rr_get_interval - return the default timeslice of a process.
- * @pid: pid of the process.
- * @interval: userspace pointer to the timeslice value.
- *
- * this syscall writes the default timeslice value of a given process
- * into the user-space timespec buffer. A value of '0' means infinity.
- *
- * Return: On success, 0 and the timeslice is in @interval. Otherwise,
- * an error code.
- */
-SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
- struct __kernel_timespec __user *, interval)
-{
- struct timespec64 t;
- int retval = sched_rr_get_interval(pid, &t);
-
- if (retval == 0)
- retval = put_timespec64(&t, interval);
-
- return retval;
-}
-
-#ifdef CONFIG_COMPAT_32BIT_TIME
-SYSCALL_DEFINE2(sched_rr_get_interval_time32, pid_t, pid,
- struct old_timespec32 __user *, interval)
-{
- struct timespec64 t;
- int retval = sched_rr_get_interval(pid, &t);
-
- if (retval == 0)
- retval = put_old_timespec32(&t, interval);
- return retval;
-}
-#endif
-
void sched_show_task(struct task_struct *p)
{
unsigned long free = 0;
@@ -9729,7 +7970,7 @@ int sched_cpu_deactivate(unsigned int cpu)
* Specifically, we rely on ttwu to no longer target this CPU, see
* ttwu_queue_cond() and is_cpu_allowed().
*
- * Do sync before park smpboot threads to take care the rcu boost case.
+ * Do sync before park smpboot threads to take care the RCU boost case.
*/
synchronize_rcu();
@@ -9804,7 +8045,7 @@ int sched_cpu_wait_empty(unsigned int cpu)
* Since this CPU is going 'away' for a while, fold any nr_active delta we
* might have. Called from the CPU stopper task after ensuring that the
* stopper is the last running task on the CPU, so nr_active count is
- * stable. We need to take the teardown thread which is calling this into
+ * stable. We need to take the tear-down thread which is calling this into
* account, so we hand in adjust = 1 to the load calculation.
*
* Also see the comment "Global load-average calculations".
@@ -9998,7 +8239,7 @@ void __init sched_init(void)
/*
* How much CPU bandwidth does root_task_group get?
*
- * In case of task-groups formed thr' the cgroup filesystem, it
+ * In case of task-groups formed through the cgroup filesystem, it
* gets 100% of the CPU resources in the system. This overall
* system CPU resource is divided among the tasks of
* root_task_group and its child task-groups in a fair manner,
@@ -10300,7 +8541,7 @@ void normalize_rt_tasks(void)
#if defined(CONFIG_KGDB_KDB)
/*
- * These functions are only useful for kdb.
+ * These functions are only useful for KDB.
*
* They can only be called when the whole system has been
* stopped - every CPU needs to be quiescent, and no scheduling
@@ -10408,7 +8649,7 @@ void sched_online_group(struct task_group *tg, struct task_group *parent)
online_fair_sched_group(tg);
}
-/* rcu callback to free various structures associated with a task group */
+/* RCU callback to free various structures associated with a task group */
static void sched_unregister_group_rcu(struct rcu_head *rhp)
{
/* Now it should be safe to free those cfs_rqs: */
@@ -11526,10 +9767,10 @@ const int sched_prio_to_weight[40] = {
};
/*
- * Inverse (2^32/x) values of the sched_prio_to_weight[] array, precalculated.
+ * Inverse (2^32/x) values of the sched_prio_to_weight[] array, pre-calculated.
*
* In cases where the weight does not change often, we can use the
- * precalculated inverse to speed up arithmetics by turning divisions
+ * pre-calculated inverse to speed up arithmetics by turning divisions
* into multiplications:
*/
const u32 sched_prio_to_wmult[40] = {
@@ -11785,16 +10026,16 @@ void sched_mm_cid_migrate_to(struct rq *dst_rq, struct task_struct *t)
/*
* Move the src cid if the dst cid is unset. This keeps id
* allocation closest to 0 in cases where few threads migrate around
- * many cpus.
+ * many CPUs.
*
* If destination cid is already set, we may have to just clear
* the src cid to ensure compactness in frequent migrations
* scenarios.
*
* It is not useful to clear the src cid when the number of threads is
- * greater or equal to the number of allowed cpus, because user-space
+ * greater or equal to the number of allowed CPUs, because user-space
* can expect that the number of allowed cids can reach the number of
- * allowed cpus.
+ * allowed CPUs.
*/
dst_pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu_of(dst_rq));
dst_cid = READ_ONCE(dst_pcpu_cid->cid);
diff --git a/kernel/sched/core_sched.c b/kernel/sched/core_sched.c
index a57fd8f27498..1ef98a93eb1d 100644
--- a/kernel/sched/core_sched.c
+++ b/kernel/sched/core_sched.c
@@ -279,7 +279,7 @@ void __sched_core_account_forceidle(struct rq *rq)
continue;
/*
- * Note: this will account forceidle to the current cpu, even
+ * Note: this will account forceidle to the current CPU, even
* if it comes from our SMT sibling.
*/
__account_forceidle_time(p, delta);
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index aa48b2ec879d..a5e00293ae43 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -14,11 +14,11 @@
* They are only modified in vtime_account, on corresponding CPU
* with interrupts disabled. So, writes are safe.
* They are read and saved off onto struct rq in update_rq_clock().
- * This may result in other CPU reading this CPU's irq time and can
+ * This may result in other CPU reading this CPU's IRQ time and can
* race with irq/vtime_account on this CPU. We would either get old
- * or new value with a side effect of accounting a slice of irq time to wrong
- * task when irq is in progress while we read rq->clock. That is a worthy
- * compromise in place of having locks on each irq in account_system_time.
+ * or new value with a side effect of accounting a slice of IRQ time to wrong
+ * task when IRQ is in progress while we read rq->clock. That is a worthy
+ * compromise in place of having locks on each IRQ in account_system_time.
*/
DEFINE_PER_CPU(struct irqtime, cpu_irqtime);
@@ -269,7 +269,7 @@ static __always_inline u64 steal_account_process_time(u64 maxtime)
}
/*
- * Account how much elapsed time was spent in steal, irq, or softirq time.
+ * Account how much elapsed time was spent in steal, IRQ, or softirq time.
*/
static inline u64 account_other_time(u64 max)
{
@@ -370,7 +370,7 @@ void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
* Check for hardirq is done both for system and user time as there is
* no timer going off while we are on hardirq and hence we may never get an
* opportunity to update it solely in system time.
- * p->stime and friends are only updated on system time and not on irq
+ * p->stime and friends are only updated on system time and not on IRQ
* softirq as those do not count in task exec_runtime any more.
*/
static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
@@ -380,7 +380,7 @@ static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
/*
* When returning from idle, many ticks can get accounted at
- * once, including some ticks of steal, irq, and softirq time.
+ * once, including some ticks of steal, IRQ, and softirq time.
* Subtract those ticks from the amount of time accounted to
* idle, or potentially user or system time. Due to rounding,
* other time can exceed ticks occasionally.
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index c75d1307d86d..f59e5c19d944 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -708,7 +708,7 @@ static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p
}
/*
- * And we finally need to fixup root_domain(s) bandwidth accounting,
+ * And we finally need to fix up root_domain(s) bandwidth accounting,
* since p is still hanging out in the old (now moved to default) root
* domain.
*/
@@ -992,7 +992,7 @@ static inline bool dl_is_implicit(struct sched_dl_entity *dl_se)
* is detected, the runtime and deadline need to be updated.
*
* If the task has an implicit deadline, i.e., deadline == period, the Original
- * CBS is applied. the runtime is replenished and a new absolute deadline is
+ * CBS is applied. The runtime is replenished and a new absolute deadline is
* set, as in the previous cases.
*
* However, the Original CBS does not work properly for tasks with
@@ -1294,7 +1294,7 @@ int dl_runtime_exceeded(struct sched_dl_entity *dl_se)
* Since rq->dl.running_bw and rq->dl.this_bw contain utilizations multiplied
* by 2^BW_SHIFT, the result has to be shifted right by BW_SHIFT.
* Since rq->dl.bw_ratio contains 1 / Umax multiplied by 2^RATIO_SHIFT, dl_bw
- * is multiped by rq->dl.bw_ratio and shifted right by RATIO_SHIFT.
+ * is multiplied by rq->dl.bw_ratio and shifted right by RATIO_SHIFT.
* Since delta is a 64 bit variable, to have an overflow its value should be
* larger than 2^(64 - 20 - 8), which is more than 64 seconds. So, overflow is
* not an issue here.
@@ -1804,8 +1804,13 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
* The replenish timer needs to be canceled. No
* problem if it fires concurrently: boosted threads
* are ignored in dl_task_timer().
+ *
+ * If the timer callback was running (hrtimer_try_to_cancel == -1),
+ * it will eventually call put_task_struct().
*/
- hrtimer_try_to_cancel(&p->dl.dl_timer);
+ if (hrtimer_try_to_cancel(&p->dl.dl_timer) == 1 &&
+ !dl_server(&p->dl))
+ put_task_struct(p);
p->dl.dl_throttled = 0;
}
} else if (!dl_prio(p->normal_prio)) {
@@ -2488,7 +2493,7 @@ static void pull_dl_task(struct rq *this_rq)
src_rq = cpu_rq(cpu);
/*
- * It looks racy, abd it is! However, as in sched_rt.c,
+ * It looks racy, and it is! However, as in sched_rt.c,
* we are fine with this.
*/
if (this_rq->dl.dl_nr_running &&
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 8a5b1ae0aa55..9057584ec06d 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -61,7 +61,7 @@
* Options are:
*
* SCHED_TUNABLESCALING_NONE - unscaled, always *1
- * SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus)
+ * SCHED_TUNABLESCALING_LOG - scaled logarithmically, *1+ilog(ncpus)
* SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus
*
* (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus))
@@ -3835,15 +3835,14 @@ static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
}
}
-void reweight_task(struct task_struct *p, int prio)
+void reweight_task(struct task_struct *p, const struct load_weight *lw)
{
struct sched_entity *se = &p->se;
struct cfs_rq *cfs_rq = cfs_rq_of(se);
struct load_weight *load = &se->load;
- unsigned long weight = scale_load(sched_prio_to_weight[prio]);
- reweight_entity(cfs_rq, se, weight);
- load->inv_weight = sched_prio_to_wmult[prio];
+ reweight_entity(cfs_rq, se, lw->weight);
+ load->inv_weight = lw->inv_weight;
}
static inline int throttled_hierarchy(struct cfs_rq *cfs_rq);
@@ -8719,7 +8718,7 @@ static bool yield_to_task_fair(struct rq *rq, struct task_struct *p)
* topology where each level pairs two lower groups (or better). This results
* in O(log n) layers. Furthermore we reduce the number of CPUs going up the
* tree to only the first of the previous level and we decrease the frequency
- * of load-balance at each level inv. proportional to the number of CPUs in
+ * of load-balance at each level inversely proportional to the number of CPUs in
* the groups.
*
* This yields:
@@ -9149,12 +9148,8 @@ static int detach_tasks(struct lb_env *env)
break;
env->loop++;
- /*
- * We've more or less seen every task there is, call it quits
- * unless we haven't found any movable task yet.
- */
- if (env->loop > env->loop_max &&
- !(env->flags & LBF_ALL_PINNED))
+ /* We've more or less seen every task there is, call it quits */
+ if (env->loop > env->loop_max)
break;
/* take a breather every nr_migrate tasks */
@@ -11393,9 +11388,7 @@ more_balance:
if (env.flags & LBF_NEED_BREAK) {
env.flags &= ~LBF_NEED_BREAK;
- /* Stop if we tried all running tasks */
- if (env.loop < busiest->nr_running)
- goto more_balance;
+ goto more_balance;
}
/*
@@ -11892,6 +11885,13 @@ static void kick_ilb(unsigned int flags)
return;
/*
+ * Don't bother if no new NOHZ balance work items for ilb_cpu,
+ * i.e. all bits in flags are already set in ilb_cpu.
+ */
+ if ((atomic_read(nohz_flags(ilb_cpu)) & flags) == flags)
+ return;
+
+ /*
* Access to rq::nohz_csd is serialized by NOHZ_KICK_MASK; he who sets
* the first flag owns it; cleared by nohz_csd_func().
*/
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index 6135fbe83d68..6e78d071beb5 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -172,19 +172,13 @@ static void cpuidle_idle_call(void)
/*
* Check if the idle task must be rescheduled. If it is the
- * case, exit the function after re-enabling the local irq.
+ * case, exit the function after re-enabling the local IRQ.
*/
if (need_resched()) {
local_irq_enable();
return;
}
- /*
- * The RCU framework needs to be told that we are entering an idle
- * section, so no more rcu read side critical sections and one more
- * step to the grace period
- */
-
if (cpuidle_not_available(drv, dev)) {
tick_nohz_idle_stop_tick();
@@ -244,7 +238,7 @@ exit_idle:
__current_set_polling();
/*
- * It is up to the idle functions to reenable local interrupts
+ * It is up to the idle functions to re-enable local interrupts
*/
if (WARN_ON_ONCE(irqs_disabled()))
local_irq_enable();
@@ -320,7 +314,7 @@ static void do_idle(void)
rcu_nocb_flush_deferred_wakeup();
/*
- * In poll mode we reenable interrupts and spin. Also if we
+ * In poll mode we re-enable interrupts and spin. Also if we
* detected in the wakeup from idle path that the tick
* broadcast device expired for us, we don't want to go deep
* idle as we know that the IPI is going to arrive right away.
diff --git a/kernel/sched/loadavg.c b/kernel/sched/loadavg.c
index ca9da66cc894..c48900b856a2 100644
--- a/kernel/sched/loadavg.c
+++ b/kernel/sched/loadavg.c
@@ -45,7 +45,7 @@
* again, being late doesn't loose the delta, just wrecks the sample.
*
* - cpu_rq()->nr_uninterruptible isn't accurately tracked per-CPU because
- * this would add another cross-CPU cacheline miss and atomic operation
+ * this would add another cross-CPU cache-line miss and atomic operation
* to the wakeup path. Instead we increment on whatever CPU the task ran
* when it went into uninterruptible state and decrement on whatever CPU
* did the wakeup. This means that only the sum of nr_uninterruptible over
@@ -62,7 +62,7 @@ EXPORT_SYMBOL(avenrun); /* should be removed */
/**
* get_avenrun - get the load average array
- * @loads: pointer to dest load array
+ * @loads: pointer to destination load array
* @offset: offset to add
* @shift: shift count to shift the result left
*
diff --git a/kernel/sched/pelt.c b/kernel/sched/pelt.c
index ef00382de595..fa52906a4478 100644
--- a/kernel/sched/pelt.c
+++ b/kernel/sched/pelt.c
@@ -417,7 +417,7 @@ int update_hw_load_avg(u64 now, struct rq *rq, u64 capacity)
#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
/*
- * irq:
+ * IRQ:
*
* util_sum = \Sum se->avg.util_sum but se->avg.util_sum is not tracked
* util_sum = cpu_scale * load_sum
@@ -432,7 +432,7 @@ int update_irq_load_avg(struct rq *rq, u64 running)
int ret = 0;
/*
- * We can't use clock_pelt because irq time is not accounted in
+ * We can't use clock_pelt because IRQ time is not accounted in
* clock_task. Instead we directly scale the running time to
* reflect the real amount of computation
*/
diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c
index 7b4aa5809c0f..020d58967d4e 100644
--- a/kernel/sched/psi.c
+++ b/kernel/sched/psi.c
@@ -41,7 +41,7 @@
* What it means for a task to be productive is defined differently
* for each resource. For IO, productive means a running task. For
* memory, productive means a running task that isn't a reclaimer. For
- * CPU, productive means an oncpu task.
+ * CPU, productive means an on-CPU task.
*
* Naturally, the FULL state doesn't exist for the CPU resource at the
* system level, but exist at the cgroup level. At the cgroup level,
@@ -49,7 +49,7 @@
* resource which is being used by others outside of the cgroup or
* throttled by the cgroup cpu.max configuration.
*
- * The percentage of wallclock time spent in those compound stall
+ * The percentage of wall clock time spent in those compound stall
* states gives pressure numbers between 0 and 100 for each resource,
* where the SOME percentage indicates workload slowdowns and the FULL
* percentage indicates reduced CPU utilization:
@@ -218,28 +218,32 @@ void __init psi_init(void)
group_init(&psi_system);
}
-static bool test_state(unsigned int *tasks, enum psi_states state, bool oncpu)
+static u32 test_states(unsigned int *tasks, u32 state_mask)
{
- switch (state) {
- case PSI_IO_SOME:
- return unlikely(tasks[NR_IOWAIT]);
- case PSI_IO_FULL:
- return unlikely(tasks[NR_IOWAIT] && !tasks[NR_RUNNING]);
- case PSI_MEM_SOME:
- return unlikely(tasks[NR_MEMSTALL]);
- case PSI_MEM_FULL:
- return unlikely(tasks[NR_MEMSTALL] &&
- tasks[NR_RUNNING] == tasks[NR_MEMSTALL_RUNNING]);
- case PSI_CPU_SOME:
- return unlikely(tasks[NR_RUNNING] > oncpu);
- case PSI_CPU_FULL:
- return unlikely(tasks[NR_RUNNING] && !oncpu);
- case PSI_NONIDLE:
- return tasks[NR_IOWAIT] || tasks[NR_MEMSTALL] ||
- tasks[NR_RUNNING];
- default:
- return false;
+ const bool oncpu = state_mask & PSI_ONCPU;
+
+ if (tasks[NR_IOWAIT]) {
+ state_mask |= BIT(PSI_IO_SOME);
+ if (!tasks[NR_RUNNING])
+ state_mask |= BIT(PSI_IO_FULL);
+ }
+
+ if (tasks[NR_MEMSTALL]) {
+ state_mask |= BIT(PSI_MEM_SOME);
+ if (tasks[NR_RUNNING] == tasks[NR_MEMSTALL_RUNNING])
+ state_mask |= BIT(PSI_MEM_FULL);
}
+
+ if (tasks[NR_RUNNING] > oncpu)
+ state_mask |= BIT(PSI_CPU_SOME);
+
+ if (tasks[NR_RUNNING] && !oncpu)
+ state_mask |= BIT(PSI_CPU_FULL);
+
+ if (tasks[NR_IOWAIT] || tasks[NR_MEMSTALL] || tasks[NR_RUNNING])
+ state_mask |= BIT(PSI_NONIDLE);
+
+ return state_mask;
}
static void get_recent_times(struct psi_group *group, int cpu,
@@ -345,7 +349,7 @@ static void collect_percpu_times(struct psi_group *group,
/*
* Collect the per-cpu time buckets and average them into a
- * single time sample that is normalized to wallclock time.
+ * single time sample that is normalized to wall clock time.
*
* For averaging, each CPU is weighted by its non-idle time in
* the sampling period. This eliminates artifacts from uneven
@@ -770,9 +774,9 @@ static void psi_group_change(struct psi_group *group, int cpu,
{
struct psi_group_cpu *groupc;
unsigned int t, m;
- enum psi_states s;
u32 state_mask;
+ lockdep_assert_rq_held(cpu_rq(cpu));
groupc = per_cpu_ptr(group->pcpu, cpu);
/*
@@ -841,10 +845,7 @@ static void psi_group_change(struct psi_group *group, int cpu,
return;
}
- for (s = 0; s < NR_PSI_STATES; s++) {
- if (test_state(groupc->tasks, s, state_mask & PSI_ONCPU))
- state_mask |= (1 << s);
- }
+ state_mask = test_states(groupc->tasks, state_mask);
/*
* Since we care about lost potential, a memstall is FULL
@@ -991,22 +992,32 @@ void psi_task_switch(struct task_struct *prev, struct task_struct *next,
}
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
-void psi_account_irqtime(struct task_struct *task, u32 delta)
+void psi_account_irqtime(struct rq *rq, struct task_struct *curr, struct task_struct *prev)
{
- int cpu = task_cpu(task);
+ int cpu = task_cpu(curr);
struct psi_group *group;
struct psi_group_cpu *groupc;
- u64 now;
+ u64 now, irq;
+ s64 delta;
if (static_branch_likely(&psi_disabled))
return;
- if (!task->pid)
+ if (!curr->pid)
+ return;
+
+ lockdep_assert_rq_held(rq);
+ group = task_psi_group(curr);
+ if (prev && task_psi_group(prev) == group)
return;
now = cpu_clock(cpu);
+ irq = irq_time_read(cpu);
+ delta = (s64)(irq - rq->psi_irq_time);
+ if (delta < 0)
+ return;
+ rq->psi_irq_time = irq;
- group = task_psi_group(task);
do {
if (!group->enabled)
continue;
@@ -1194,7 +1205,7 @@ void psi_cgroup_restart(struct psi_group *group)
/*
* After we disable psi_group->enabled, we don't actually
* stop percpu tasks accounting in each psi_group_cpu,
- * instead only stop test_state() loop, record_times()
+ * instead only stop test_states() loop, record_times()
* and averaging worker, see psi_group_change() for details.
*
* When disable cgroup PSI, this function has nothing to sync
@@ -1202,7 +1213,7 @@ void psi_cgroup_restart(struct psi_group *group)
* would see !psi_group->enabled and only do task accounting.
*
* When re-enable cgroup PSI, this function use psi_group_change()
- * to get correct state mask from test_state() loop on tasks[],
+ * to get correct state mask from test_states() loop on tasks[],
* and restart groupc->state_start from now, use .clear = .set = 0
* here since no task status really changed.
*/
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index aa4c1c874fa4..63e49c8ffc4d 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -140,7 +140,7 @@ void init_rt_rq(struct rt_rq *rt_rq)
INIT_LIST_HEAD(array->queue + i);
__clear_bit(i, array->bitmap);
}
- /* delimiter for bitsearch: */
+ /* delimiter for bit-search: */
__set_bit(MAX_RT_PRIO, array->bitmap);
#if defined CONFIG_SMP
@@ -1135,7 +1135,7 @@ dec_rt_prio(struct rt_rq *rt_rq, int prio)
/*
* This may have been our highest task, and therefore
- * we may have some recomputation to do
+ * we may have some re-computation to do
*/
if (prio == prev_prio) {
struct rt_prio_array *array = &rt_rq->active;
@@ -1571,7 +1571,7 @@ select_task_rq_rt(struct task_struct *p, int cpu, int flags)
*
* For equal prio tasks, we just let the scheduler sort it out.
*
- * Otherwise, just let it ride on the affined RQ and the
+ * Otherwise, just let it ride on the affine RQ and the
* post-schedule router will push the preempted task away
*
* This test is optimistic, if we get it wrong the load-balancer
@@ -2147,14 +2147,14 @@ static void push_rt_tasks(struct rq *rq)
* if its the only CPU with multiple RT tasks queued, and a large number
* of CPUs scheduling a lower priority task at the same time.
*
- * Each root domain has its own irq work function that can iterate over
+ * Each root domain has its own IRQ work function that can iterate over
* all CPUs with RT overloaded tasks. Since all CPUs with overloaded RT
* task must be checked if there's one or many CPUs that are lowering
- * their priority, there's a single irq work iterator that will try to
+ * their priority, there's a single IRQ work iterator that will try to
* push off RT tasks that are waiting to run.
*
* When a CPU schedules a lower priority task, it will kick off the
- * irq work iterator that will jump to each CPU with overloaded RT tasks.
+ * IRQ work iterator that will jump to each CPU with overloaded RT tasks.
* As it only takes the first CPU that schedules a lower priority task
* to start the process, the rto_start variable is incremented and if
* the atomic result is one, then that CPU will try to take the rto_lock.
@@ -2162,7 +2162,7 @@ static void push_rt_tasks(struct rq *rq)
* CPUs scheduling lower priority tasks.
*
* All CPUs that are scheduling a lower priority task will increment the
- * rt_loop_next variable. This will make sure that the irq work iterator
+ * rt_loop_next variable. This will make sure that the IRQ work iterator
* checks all RT overloaded CPUs whenever a CPU schedules a new lower
* priority task, even if the iterator is in the middle of a scan. Incrementing
* the rt_loop_next will cause the iterator to perform another scan.
@@ -2242,7 +2242,7 @@ static void tell_cpu_to_push(struct rq *rq)
* The rto_cpu is updated under the lock, if it has a valid CPU
* then the IPI is still running and will continue due to the
* update to loop_next, and nothing needs to be done here.
- * Otherwise it is finishing up and an ipi needs to be sent.
+ * Otherwise it is finishing up and an IPI needs to be sent.
*/
if (rq->rd->rto_cpu < 0)
cpu = rto_next_cpu(rq->rd);
@@ -2594,7 +2594,7 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
watchdog(rq, p);
/*
- * RR tasks need a special form of timeslice management.
+ * RR tasks need a special form of time-slice management.
* FIFO tasks have no timeslices.
*/
if (p->policy != SCHED_RR)
@@ -2900,7 +2900,7 @@ static int sched_rt_global_constraints(void)
int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
{
- /* Don't accept realtime tasks when there is no way for them to run */
+ /* Don't accept real-time tasks when there is no way for them to run */
if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0)
return 0;
@@ -3001,7 +3001,7 @@ static int sched_rr_handler(struct ctl_table *table, int write, void *buffer,
ret = proc_dointvec(table, write, buffer, lenp, ppos);
/*
* Make sure that internally we keep jiffies.
- * Also, writing zero resets the timeslice to default:
+ * Also, writing zero resets the time-slice to default:
*/
if (!ret && write) {
sched_rr_timeslice =
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index a831af102070..4c36cc680361 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -74,6 +74,12 @@
#include "../workqueue_internal.h"
+struct rq;
+struct cfs_rq;
+struct rt_rq;
+struct sched_group;
+struct cpuidle_state;
+
#ifdef CONFIG_PARAVIRT
# include <asm/paravirt.h>
# include <asm/paravirt_api_clock.h>
@@ -90,9 +96,6 @@
# define SCHED_WARN_ON(x) ({ (void)(x), 0; })
#endif
-struct rq;
-struct cpuidle_state;
-
/* task_struct::on_rq states: */
#define TASK_ON_RQ_QUEUED 1
#define TASK_ON_RQ_MIGRATING 2
@@ -128,12 +131,12 @@ extern struct list_head asym_cap_list;
/*
* Helpers for converting nanosecond timing to jiffy resolution
*/
-#define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ))
+#define NS_TO_JIFFIES(time) ((unsigned long)(time) / (NSEC_PER_SEC/HZ))
/*
* Increase resolution of nice-level calculations for 64-bit architectures.
* The extra resolution improves shares distribution and load balancing of
- * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup
+ * low-weight task groups (eg. nice +19 on an autogroup), deeper task-group
* hierarchies, especially on larger systems. This is not a user-visible change
* and does not change the user-interface for setting shares/weights.
*
@@ -147,12 +150,13 @@ extern struct list_head asym_cap_list;
#ifdef CONFIG_64BIT
# define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT + SCHED_FIXEDPOINT_SHIFT)
# define scale_load(w) ((w) << SCHED_FIXEDPOINT_SHIFT)
-# define scale_load_down(w) \
-({ \
- unsigned long __w = (w); \
- if (__w) \
- __w = max(2UL, __w >> SCHED_FIXEDPOINT_SHIFT); \
- __w; \
+# define scale_load_down(w) \
+({ \
+ unsigned long __w = (w); \
+ \
+ if (__w) \
+ __w = max(2UL, __w >> SCHED_FIXEDPOINT_SHIFT); \
+ __w; \
})
#else
# define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT)
@@ -187,6 +191,7 @@ static inline int idle_policy(int policy)
{
return policy == SCHED_IDLE;
}
+
static inline int fair_policy(int policy)
{
return policy == SCHED_NORMAL || policy == SCHED_BATCH;
@@ -201,6 +206,7 @@ static inline int dl_policy(int policy)
{
return policy == SCHED_DEADLINE;
}
+
static inline bool valid_policy(int policy)
{
return idle_policy(policy) || fair_policy(policy) ||
@@ -222,11 +228,12 @@ static inline int task_has_dl_policy(struct task_struct *p)
return dl_policy(p->policy);
}
-#define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT)
+#define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT)
static inline void update_avg(u64 *avg, u64 sample)
{
s64 diff = sample - *avg;
+
*avg += diff / 8;
}
@@ -251,7 +258,7 @@ static inline void update_avg(u64 *avg, u64 sample)
*/
#define SCHED_FLAG_SUGOV 0x10000000
-#define SCHED_DL_FLAGS (SCHED_FLAG_RECLAIM | SCHED_FLAG_DL_OVERRUN | SCHED_FLAG_SUGOV)
+#define SCHED_DL_FLAGS (SCHED_FLAG_RECLAIM | SCHED_FLAG_DL_OVERRUN | SCHED_FLAG_SUGOV)
static inline bool dl_entity_is_special(const struct sched_dl_entity *dl_se)
{
@@ -358,9 +365,6 @@ extern void dl_server_init(struct sched_dl_entity *dl_se, struct rq *rq,
#ifdef CONFIG_CGROUP_SCHED
-struct cfs_rq;
-struct rt_rq;
-
extern struct list_head task_groups;
struct cfs_bandwidth {
@@ -406,7 +410,7 @@ struct task_group {
#ifdef CONFIG_SMP
/*
* load_avg can be heavily contended at clock tick time, so put
- * it in its own cacheline separated from the fields above which
+ * it in its own cache-line separated from the fields above which
* will also be accessed at each tick.
*/
atomic_long_t load_avg ____cacheline_aligned;
@@ -536,6 +540,7 @@ static inline void set_task_rq_fair(struct sched_entity *se,
#else /* CONFIG_CGROUP_SCHED */
struct cfs_bandwidth { };
+
static inline bool cfs_task_bw_constrained(struct task_struct *p) { return false; }
#endif /* CONFIG_CGROUP_SCHED */
@@ -551,8 +556,8 @@ extern int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent
* applicable for 32-bits architectures.
*/
#ifdef CONFIG_64BIT
-# define u64_u32_load_copy(var, copy) var
-# define u64_u32_store_copy(var, copy, val) (var = val)
+# define u64_u32_load_copy(var, copy) var
+# define u64_u32_store_copy(var, copy, val) (var = val)
#else
# define u64_u32_load_copy(var, copy) \
({ \
@@ -580,8 +585,8 @@ do { \
copy = __val; \
} while (0)
#endif
-# define u64_u32_load(var) u64_u32_load_copy(var, var##_copy)
-# define u64_u32_store(var, val) u64_u32_store_copy(var, var##_copy, val)
+# define u64_u32_load(var) u64_u32_load_copy(var, var##_copy)
+# define u64_u32_store(var, val) u64_u32_store_copy(var, var##_copy, val)
/* CFS-related fields in a runqueue */
struct cfs_rq {
@@ -803,6 +808,7 @@ struct dl_rq {
};
#ifdef CONFIG_FAIR_GROUP_SCHED
+
/* An entity is a task if it doesn't "own" a runqueue */
#define entity_is_task(se) (!se->my_q)
@@ -820,16 +826,18 @@ static inline long se_runnable(struct sched_entity *se)
return se->runnable_weight;
}
-#else
+#else /* !CONFIG_FAIR_GROUP_SCHED: */
+
#define entity_is_task(se) 1
-static inline void se_update_runnable(struct sched_entity *se) {}
+static inline void se_update_runnable(struct sched_entity *se) { }
static inline long se_runnable(struct sched_entity *se)
{
return !!se->on_rq;
}
-#endif
+
+#endif /* !CONFIG_FAIR_GROUP_SCHED */
#ifdef CONFIG_SMP
/*
@@ -874,7 +882,7 @@ struct root_domain {
*/
bool overloaded;
- /* Indicate one or more cpus over-utilized (tipping point) */
+ /* Indicate one or more CPUs over-utilized (tipping point) */
bool overutilized;
/*
@@ -988,7 +996,6 @@ struct uclamp_rq {
DECLARE_STATIC_KEY_FALSE(sched_uclamp_used);
#endif /* CONFIG_UCLAMP_TASK */
-struct rq;
struct balance_callback {
struct balance_callback *next;
void (*func)(struct rq *rq);
@@ -1126,6 +1133,7 @@ struct rq {
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
u64 prev_irq_time;
+ u64 psi_irq_time;
#endif
#ifdef CONFIG_PARAVIRT
u64 prev_steal_time;
@@ -1143,7 +1151,7 @@ struct rq {
call_single_data_t hrtick_csd;
#endif
struct hrtimer hrtick_timer;
- ktime_t hrtick_time;
+ ktime_t hrtick_time;
#endif
#ifdef CONFIG_SCHEDSTATS
@@ -1165,7 +1173,7 @@ struct rq {
#endif
#ifdef CONFIG_CPU_IDLE
- /* Must be inspected within a rcu lock section */
+ /* Must be inspected within a RCU lock section */
struct cpuidle_state *idle_state;
#endif
@@ -1227,7 +1235,7 @@ static inline int cpu_of(struct rq *rq)
#endif
}
-#define MDF_PUSH 0x01
+#define MDF_PUSH 0x01
static inline bool is_migration_disabled(struct task_struct *p)
{
@@ -1246,7 +1254,6 @@ DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
#define raw_rq() raw_cpu_ptr(&runqueues)
-struct sched_group;
#ifdef CONFIG_SCHED_CORE
static inline struct cpumask *sched_group_span(struct sched_group *sg);
@@ -1282,9 +1289,10 @@ static inline raw_spinlock_t *__rq_lockp(struct rq *rq)
return &rq->__lock;
}
-bool cfs_prio_less(const struct task_struct *a, const struct task_struct *b,
- bool fi);
-void task_vruntime_update(struct rq *rq, struct task_struct *p, bool in_fi);
+extern bool
+cfs_prio_less(const struct task_struct *a, const struct task_struct *b, bool fi);
+
+extern void task_vruntime_update(struct rq *rq, struct task_struct *p, bool in_fi);
/*
* Helpers to check if the CPU's core cookie matches with the task's cookie
@@ -1352,7 +1360,7 @@ extern void sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags);
extern void sched_core_get(void);
extern void sched_core_put(void);
-#else /* !CONFIG_SCHED_CORE */
+#else /* !CONFIG_SCHED_CORE: */
static inline bool sched_core_enabled(struct rq *rq)
{
@@ -1390,7 +1398,8 @@ static inline bool sched_group_cookie_match(struct rq *rq,
{
return true;
}
-#endif /* CONFIG_SCHED_CORE */
+
+#endif /* !CONFIG_SCHED_CORE */
static inline void lockdep_assert_rq_held(struct rq *rq)
{
@@ -1421,8 +1430,10 @@ static inline void raw_spin_rq_unlock_irq(struct rq *rq)
static inline unsigned long _raw_spin_rq_lock_irqsave(struct rq *rq)
{
unsigned long flags;
+
local_irq_save(flags);
raw_spin_rq_lock(rq);
+
return flags;
}
@@ -1451,6 +1462,7 @@ static inline void update_idle_core(struct rq *rq) { }
#endif
#ifdef CONFIG_FAIR_GROUP_SCHED
+
static inline struct task_struct *task_of(struct sched_entity *se)
{
SCHED_WARN_ON(!entity_is_task(se));
@@ -1474,9 +1486,9 @@ static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
return grp->my_q;
}
-#else
+#else /* !CONFIG_FAIR_GROUP_SCHED: */
-#define task_of(_se) container_of(_se, struct task_struct, se)
+#define task_of(_se) container_of(_se, struct task_struct, se)
static inline struct cfs_rq *task_cfs_rq(const struct task_struct *p)
{
@@ -1496,7 +1508,8 @@ static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
{
return NULL;
}
-#endif
+
+#endif /* !CONFIG_FAIR_GROUP_SCHED */
extern void update_rq_clock(struct rq *rq);
@@ -1622,9 +1635,9 @@ static inline void rq_pin_lock(struct rq *rq, struct rq_flags *rf)
#ifdef CONFIG_SCHED_DEBUG
rq->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP);
rf->clock_update_flags = 0;
-#ifdef CONFIG_SMP
+# ifdef CONFIG_SMP
SCHED_WARN_ON(rq->balance_callback && rq->balance_callback != &balance_push_callback);
-#endif
+# endif
#endif
}
@@ -1650,9 +1663,11 @@ static inline void rq_repin_lock(struct rq *rq, struct rq_flags *rf)
#endif
}
+extern
struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
__acquires(rq->lock);
+extern
struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
__acquires(p->pi_lock)
__acquires(rq->lock);
@@ -1679,48 +1694,42 @@ DEFINE_LOCK_GUARD_1(task_rq_lock, struct task_struct,
task_rq_unlock(_T->rq, _T->lock, &_T->rf),
struct rq *rq; struct rq_flags rf)
-static inline void
-rq_lock_irqsave(struct rq *rq, struct rq_flags *rf)
+static inline void rq_lock_irqsave(struct rq *rq, struct rq_flags *rf)
__acquires(rq->lock)
{
raw_spin_rq_lock_irqsave(rq, rf->flags);
rq_pin_lock(rq, rf);
}
-static inline void
-rq_lock_irq(struct rq *rq, struct rq_flags *rf)
+static inline void rq_lock_irq(struct rq *rq, struct rq_flags *rf)
__acquires(rq->lock)
{
raw_spin_rq_lock_irq(rq);
rq_pin_lock(rq, rf);
}
-static inline void
-rq_lock(struct rq *rq, struct rq_flags *rf)
+static inline void rq_lock(struct rq *rq, struct rq_flags *rf)
__acquires(rq->lock)
{
raw_spin_rq_lock(rq);
rq_pin_lock(rq, rf);
}
-static inline void
-rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf)
+static inline void rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf)
__releases(rq->lock)
{
rq_unpin_lock(rq, rf);
raw_spin_rq_unlock_irqrestore(rq, rf->flags);
}
-static inline void
-rq_unlock_irq(struct rq *rq, struct rq_flags *rf)
+static inline void rq_unlock_irq(struct rq *rq, struct rq_flags *rf)
__releases(rq->lock)
{
rq_unpin_lock(rq, rf);
raw_spin_rq_unlock_irq(rq);
}
-static inline void
-rq_unlock(struct rq *rq, struct rq_flags *rf)
+static inline void rq_unlock(struct rq *rq, struct rq_flags *rf)
__releases(rq->lock)
{
rq_unpin_lock(rq, rf);
@@ -1742,8 +1751,7 @@ DEFINE_LOCK_GUARD_1(rq_lock_irqsave, struct rq,
rq_unlock_irqrestore(_T->lock, &_T->rf),
struct rq_flags rf)
-static inline struct rq *
-this_rq_lock_irq(struct rq_flags *rf)
+static inline struct rq *this_rq_lock_irq(struct rq_flags *rf)
__acquires(rq->lock)
{
struct rq *rq;
@@ -1751,15 +1759,18 @@ this_rq_lock_irq(struct rq_flags *rf)
local_irq_disable();
rq = this_rq();
rq_lock(rq, rf);
+
return rq;
}
#ifdef CONFIG_NUMA
+
enum numa_topology_type {
NUMA_DIRECT,
NUMA_GLUELESS_MESH,
NUMA_BACKPLANE,
};
+
extern enum numa_topology_type sched_numa_topology_type;
extern int sched_max_numa_distance;
extern bool find_numa_distance(int distance);
@@ -1768,18 +1779,23 @@ extern void sched_update_numa(int cpu, bool online);
extern void sched_domains_numa_masks_set(unsigned int cpu);
extern void sched_domains_numa_masks_clear(unsigned int cpu);
extern int sched_numa_find_closest(const struct cpumask *cpus, int cpu);
-#else
+
+#else /* !CONFIG_NUMA: */
+
static inline void sched_init_numa(int offline_node) { }
static inline void sched_update_numa(int cpu, bool online) { }
static inline void sched_domains_numa_masks_set(unsigned int cpu) { }
static inline void sched_domains_numa_masks_clear(unsigned int cpu) { }
+
static inline int sched_numa_find_closest(const struct cpumask *cpus, int cpu)
{
return nr_cpu_ids;
}
-#endif
+
+#endif /* !CONFIG_NUMA */
#ifdef CONFIG_NUMA_BALANCING
+
/* The regions in numa_faults array from task_struct */
enum numa_faults_stats {
NUMA_MEM = 0,
@@ -1787,17 +1803,21 @@ enum numa_faults_stats {
NUMA_MEMBUF,
NUMA_CPUBUF
};
+
extern void sched_setnuma(struct task_struct *p, int node);
extern int migrate_task_to(struct task_struct *p, int cpu);
extern int migrate_swap(struct task_struct *p, struct task_struct *t,
int cpu, int scpu);
extern void init_numa_balancing(unsigned long clone_flags, struct task_struct *p);
-#else
+
+#else /* !CONFIG_NUMA_BALANCING: */
+
static inline void
init_numa_balancing(unsigned long clone_flags, struct task_struct *p)
{
}
-#endif /* CONFIG_NUMA_BALANCING */
+
+#endif /* !CONFIG_NUMA_BALANCING */
#ifdef CONFIG_SMP
@@ -1822,8 +1842,7 @@ queue_balance_callback(struct rq *rq,
}
#define rcu_dereference_check_sched_domain(p) \
- rcu_dereference_check((p), \
- lockdep_is_held(&sched_domains_mutex))
+ rcu_dereference_check((p), lockdep_is_held(&sched_domains_mutex))
/*
* The domain tree (rq->sd) is protected by RCU's quiescent state transition.
@@ -1894,6 +1913,7 @@ DECLARE_PER_CPU(struct sched_domain_shared __rcu *, sd_llc_shared);
DECLARE_PER_CPU(struct sched_domain __rcu *, sd_numa);
DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing);
DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_cpucapacity);
+
extern struct static_key_false sched_asym_cpucapacity;
extern struct static_key_false sched_cluster_active;
@@ -1957,15 +1977,11 @@ static inline struct cpumask *group_balance_mask(struct sched_group *sg)
extern int group_balance_cpu(struct sched_group *sg);
#ifdef CONFIG_SCHED_DEBUG
-void update_sched_domain_debugfs(void);
-void dirty_sched_domain_sysctl(int cpu);
+extern void update_sched_domain_debugfs(void);
+extern void dirty_sched_domain_sysctl(int cpu);
#else
-static inline void update_sched_domain_debugfs(void)
-{
-}
-static inline void dirty_sched_domain_sysctl(int cpu)
-{
-}
+static inline void update_sched_domain_debugfs(void) { }
+static inline void dirty_sched_domain_sysctl(int cpu) { }
#endif
extern int sched_update_scaling(void);
@@ -1976,6 +1992,7 @@ static inline const struct cpumask *task_user_cpus(struct task_struct *p)
return cpu_possible_mask; /* &init_task.cpus_mask */
return p->user_cpus_ptr;
}
+
#endif /* CONFIG_SMP */
#include "stats.h"
@@ -1998,13 +2015,13 @@ static inline void sched_core_tick(struct rq *rq)
__sched_core_tick(rq);
}
-#else
+#else /* !(CONFIG_SCHED_CORE && CONFIG_SCHEDSTATS): */
-static inline void sched_core_account_forceidle(struct rq *rq) {}
+static inline void sched_core_account_forceidle(struct rq *rq) { }
-static inline void sched_core_tick(struct rq *rq) {}
+static inline void sched_core_tick(struct rq *rq) { }
-#endif /* CONFIG_SCHED_CORE && CONFIG_SCHEDSTATS */
+#endif /* !(CONFIG_SCHED_CORE && CONFIG_SCHEDSTATS) */
#ifdef CONFIG_CGROUP_SCHED
@@ -2046,15 +2063,16 @@ static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
#endif
}
-#else /* CONFIG_CGROUP_SCHED */
+#else /* !CONFIG_CGROUP_SCHED: */
static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
+
static inline struct task_group *task_group(struct task_struct *p)
{
return NULL;
}
-#endif /* CONFIG_CGROUP_SCHED */
+#endif /* !CONFIG_CGROUP_SCHED */
static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
{
@@ -2099,6 +2117,7 @@ enum {
extern const_debug unsigned int sysctl_sched_features;
#ifdef CONFIG_JUMP_LABEL
+
#define SCHED_FEAT(name, enabled) \
static __always_inline bool static_branch_##name(struct static_key *key) \
{ \
@@ -2111,13 +2130,13 @@ static __always_inline bool static_branch_##name(struct static_key *key) \
extern struct static_key sched_feat_keys[__SCHED_FEAT_NR];
#define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x]))
-#else /* !CONFIG_JUMP_LABEL */
+#else /* !CONFIG_JUMP_LABEL: */
#define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
-#endif /* CONFIG_JUMP_LABEL */
+#endif /* !CONFIG_JUMP_LABEL */
-#else /* !SCHED_DEBUG */
+#else /* !SCHED_DEBUG: */
/*
* Each translation unit has its own copy of sysctl_sched_features to allow
@@ -2133,7 +2152,7 @@ static const_debug __maybe_unused unsigned int sysctl_sched_features =
#define sched_feat(x) !!(sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
-#endif /* SCHED_DEBUG */
+#endif /* !SCHED_DEBUG */
extern struct static_key_false sched_numa_balancing;
extern struct static_key_false sched_schedstats;
@@ -2176,13 +2195,13 @@ static inline int task_on_rq_migrating(struct task_struct *p)
}
/* Wake flags. The first three directly map to some SD flag value */
-#define WF_EXEC 0x02 /* Wakeup after exec; maps to SD_BALANCE_EXEC */
-#define WF_FORK 0x04 /* Wakeup after fork; maps to SD_BALANCE_FORK */
-#define WF_TTWU 0x08 /* Wakeup; maps to SD_BALANCE_WAKE */
+#define WF_EXEC 0x02 /* Wakeup after exec; maps to SD_BALANCE_EXEC */
+#define WF_FORK 0x04 /* Wakeup after fork; maps to SD_BALANCE_FORK */
+#define WF_TTWU 0x08 /* Wakeup; maps to SD_BALANCE_WAKE */
-#define WF_SYNC 0x10 /* Waker goes to sleep after wakeup */
-#define WF_MIGRATED 0x20 /* Internal use, task got migrated */
-#define WF_CURRENT_CPU 0x40 /* Prefer to move the wakee to the current CPU. */
+#define WF_SYNC 0x10 /* Waker goes to sleep after wakeup */
+#define WF_MIGRATED 0x20 /* Internal use, task got migrated */
+#define WF_CURRENT_CPU 0x40 /* Prefer to move the wakee to the current CPU. */
#ifdef CONFIG_SMP
static_assert(WF_EXEC == SD_BALANCE_EXEC);
@@ -2252,9 +2271,9 @@ extern const u32 sched_prio_to_wmult[40];
#define RETRY_TASK ((void *)-1UL)
struct affinity_context {
- const struct cpumask *new_mask;
- struct cpumask *user_mask;
- unsigned int flags;
+ const struct cpumask *new_mask;
+ struct cpumask *user_mask;
+ unsigned int flags;
};
extern s64 update_curr_common(struct rq *rq);
@@ -2402,8 +2421,19 @@ extern void update_group_capacity(struct sched_domain *sd, int cpu);
extern void sched_balance_trigger(struct rq *rq);
+extern int __set_cpus_allowed_ptr(struct task_struct *p, struct affinity_context *ctx);
extern void set_cpus_allowed_common(struct task_struct *p, struct affinity_context *ctx);
+static inline cpumask_t *alloc_user_cpus_ptr(int node)
+{
+ /*
+ * See do_set_cpus_allowed() above for the rcu_head usage.
+ */
+ int size = max_t(int, cpumask_size(), sizeof(struct rcu_head));
+
+ return kmalloc_node(size, GFP_KERNEL, node);
+}
+
static inline struct task_struct *get_push_task(struct rq *rq)
{
struct task_struct *p = rq->curr;
@@ -2425,9 +2455,23 @@ static inline struct task_struct *get_push_task(struct rq *rq)
extern int push_cpu_stop(void *arg);
-#endif
+#else /* !CONFIG_SMP: */
+
+static inline int __set_cpus_allowed_ptr(struct task_struct *p,
+ struct affinity_context *ctx)
+{
+ return set_cpus_allowed_ptr(p, ctx->new_mask);
+}
+
+static inline cpumask_t *alloc_user_cpus_ptr(int node)
+{
+ return NULL;
+}
+
+#endif /* !CONFIG_SMP */
#ifdef CONFIG_CPU_IDLE
+
static inline void idle_set_state(struct rq *rq,
struct cpuidle_state *idle_state)
{
@@ -2440,7 +2484,9 @@ static inline struct cpuidle_state *idle_get_state(struct rq *rq)
return rq->idle_state;
}
-#else
+
+#else /* !CONFIG_CPU_IDLE: */
+
static inline void idle_set_state(struct rq *rq,
struct cpuidle_state *idle_state)
{
@@ -2450,7 +2496,8 @@ static inline struct cpuidle_state *idle_get_state(struct rq *rq)
{
return NULL;
}
-#endif
+
+#endif /* !CONFIG_CPU_IDLE */
extern void schedule_idle(void);
asmlinkage void schedule_user(void);
@@ -2463,7 +2510,7 @@ extern void init_sched_dl_class(void);
extern void init_sched_rt_class(void);
extern void init_sched_fair_class(void);
-extern void reweight_task(struct task_struct *p, int prio);
+extern void reweight_task(struct task_struct *p, const struct load_weight *lw);
extern void resched_curr(struct rq *rq);
extern void resched_cpu(int cpu);
@@ -2479,7 +2526,8 @@ extern void init_dl_entity(struct sched_dl_entity *dl_se);
#define RATIO_SHIFT 8
#define MAX_BW_BITS (64 - BW_SHIFT)
#define MAX_BW ((1ULL << MAX_BW_BITS) - 1)
-unsigned long to_ratio(u64 period, u64 runtime);
+
+extern unsigned long to_ratio(u64 period, u64 runtime);
extern void init_entity_runnable_average(struct sched_entity *se);
extern void post_init_entity_util_avg(struct task_struct *p);
@@ -2505,10 +2553,10 @@ static inline void sched_update_tick_dependency(struct rq *rq)
else
tick_nohz_dep_set_cpu(cpu, TICK_DEP_BIT_SCHED);
}
-#else
+#else /* !CONFIG_NO_HZ_FULL: */
static inline int sched_tick_offload_init(void) { return 0; }
static inline void sched_update_tick_dependency(struct rq *rq) { }
-#endif
+#endif /* !CONFIG_NO_HZ_FULL */
static inline void add_nr_running(struct rq *rq, unsigned count)
{
@@ -2544,9 +2592,9 @@ extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags);
extern void wakeup_preempt(struct rq *rq, struct task_struct *p, int flags);
#ifdef CONFIG_PREEMPT_RT
-#define SCHED_NR_MIGRATE_BREAK 8
+# define SCHED_NR_MIGRATE_BREAK 8
#else
-#define SCHED_NR_MIGRATE_BREAK 32
+# define SCHED_NR_MIGRATE_BREAK 32
#endif
extern const_debug unsigned int sysctl_sched_nr_migrate;
@@ -2595,9 +2643,9 @@ static inline int hrtick_enabled_dl(struct rq *rq)
return hrtick_enabled(rq);
}
-void hrtick_start(struct rq *rq, u64 delay);
+extern void hrtick_start(struct rq *rq, u64 delay);
-#else
+#else /* !CONFIG_SCHED_HRTICK: */
static inline int hrtick_enabled_fair(struct rq *rq)
{
@@ -2614,13 +2662,10 @@ static inline int hrtick_enabled(struct rq *rq)
return 0;
}
-#endif /* CONFIG_SCHED_HRTICK */
+#endif /* !CONFIG_SCHED_HRTICK */
#ifndef arch_scale_freq_tick
-static __always_inline
-void arch_scale_freq_tick(void)
-{
-}
+static __always_inline void arch_scale_freq_tick(void) { }
#endif
#ifndef arch_scale_freq_capacity
@@ -2657,13 +2702,13 @@ static inline void double_rq_clock_clear_update(struct rq *rq1, struct rq *rq2)
#endif
}
#else
-static inline void double_rq_clock_clear_update(struct rq *rq1, struct rq *rq2) {}
+static inline void double_rq_clock_clear_update(struct rq *rq1, struct rq *rq2) { }
#endif
-#define DEFINE_LOCK_GUARD_2(name, type, _lock, _unlock, ...) \
-__DEFINE_UNLOCK_GUARD(name, type, _unlock, type *lock2; __VA_ARGS__) \
-static inline class_##name##_t class_##name##_constructor(type *lock, type *lock2) \
-{ class_##name##_t _t = { .lock = lock, .lock2 = lock2 }, *_T = &_t; \
+#define DEFINE_LOCK_GUARD_2(name, type, _lock, _unlock, ...) \
+__DEFINE_UNLOCK_GUARD(name, type, _unlock, type *lock2; __VA_ARGS__) \
+static inline class_##name##_t class_##name##_constructor(type *lock, type *lock2) \
+{ class_##name##_t _t = { .lock = lock, .lock2 = lock2 }, *_T = &_t; \
_lock; return _t; }
#ifdef CONFIG_SMP
@@ -2717,7 +2762,7 @@ static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
return 1;
}
-#else
+#else /* !CONFIG_PREEMPTION: */
/*
* Unfair double_lock_balance: Optimizes throughput at the expense of
* latency by eliminating extra atomic operations when the locks are
@@ -2748,7 +2793,7 @@ static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
return 1;
}
-#endif /* CONFIG_PREEMPTION */
+#endif /* !CONFIG_PREEMPTION */
/*
* double_lock_balance - lock the busiest runqueue, this_rq is locked already.
@@ -2824,9 +2869,10 @@ static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
extern void set_rq_online (struct rq *rq);
extern void set_rq_offline(struct rq *rq);
+
extern bool sched_smp_initialized;
-#else /* CONFIG_SMP */
+#else /* !CONFIG_SMP: */
/*
* double_rq_lock - safely lock two runqueues
@@ -2860,7 +2906,7 @@ static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
__release(rq2->lock);
}
-#endif
+#endif /* !CONFIG_SMP */
DEFINE_LOCK_GUARD_2(double_rq_lock, struct rq,
double_rq_lock(_T->lock, _T->lock2),
@@ -2881,16 +2927,15 @@ extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
extern void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq);
extern void resched_latency_warn(int cpu, u64 latency);
-#ifdef CONFIG_NUMA_BALANCING
-extern void
-show_numa_stats(struct task_struct *p, struct seq_file *m);
+# ifdef CONFIG_NUMA_BALANCING
+extern void show_numa_stats(struct task_struct *p, struct seq_file *m);
extern void
print_numa_stats(struct seq_file *m, int node, unsigned long tsf,
- unsigned long tpf, unsigned long gsf, unsigned long gpf);
-#endif /* CONFIG_NUMA_BALANCING */
-#else
-static inline void resched_latency_warn(int cpu, u64 latency) {}
-#endif /* CONFIG_SCHED_DEBUG */
+ unsigned long tpf, unsigned long gsf, unsigned long gpf);
+# endif /* CONFIG_NUMA_BALANCING */
+#else /* !CONFIG_SCHED_DEBUG: */
+static inline void resched_latency_warn(int cpu, u64 latency) { }
+#endif /* !CONFIG_SCHED_DEBUG */
extern void init_cfs_rq(struct cfs_rq *cfs_rq);
extern void init_rt_rq(struct rt_rq *rt_rq);
@@ -2900,6 +2945,7 @@ extern void cfs_bandwidth_usage_inc(void);
extern void cfs_bandwidth_usage_dec(void);
#ifdef CONFIG_NO_HZ_COMMON
+
#define NOHZ_BALANCE_KICK_BIT 0
#define NOHZ_STATS_KICK_BIT 1
#define NOHZ_NEWILB_KICK_BIT 2
@@ -2914,14 +2960,14 @@ extern void cfs_bandwidth_usage_dec(void);
/* Update nohz.next_balance */
#define NOHZ_NEXT_KICK BIT(NOHZ_NEXT_KICK_BIT)
-#define NOHZ_KICK_MASK (NOHZ_BALANCE_KICK | NOHZ_STATS_KICK | NOHZ_NEXT_KICK)
+#define NOHZ_KICK_MASK (NOHZ_BALANCE_KICK | NOHZ_STATS_KICK | NOHZ_NEXT_KICK)
-#define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags)
+#define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags)
extern void nohz_balance_exit_idle(struct rq *rq);
-#else
+#else /* !CONFIG_NO_HZ_COMMON: */
static inline void nohz_balance_exit_idle(struct rq *rq) { }
-#endif
+#endif /* !CONFIG_NO_HZ_COMMON */
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
extern void nohz_run_idle_balance(int cpu);
@@ -2930,6 +2976,7 @@ static inline void nohz_run_idle_balance(int cpu) { }
#endif
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
+
struct irqtime {
u64 total;
u64 tick_delta;
@@ -2957,9 +3004,11 @@ static inline u64 irq_time_read(int cpu)
return total;
}
+
#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
#ifdef CONFIG_CPU_FREQ
+
DECLARE_PER_CPU(struct update_util_data __rcu *, cpufreq_update_util_data);
/**
@@ -2993,9 +3042,9 @@ static inline void cpufreq_update_util(struct rq *rq, unsigned int flags)
if (data)
data->func(data, rq_clock(rq), flags);
}
-#else
-static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {}
-#endif /* CONFIG_CPU_FREQ */
+#else /* !CONFIG_CPU_FREQ: */
+static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) { }
+#endif /* !CONFIG_CPU_FREQ */
#ifdef arch_scale_freq_capacity
# ifndef arch_scale_freq_invariant
@@ -3006,6 +3055,7 @@ static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {}
#endif
#ifdef CONFIG_SMP
+
unsigned long effective_cpu_util(int cpu, unsigned long util_cfs,
unsigned long *min,
unsigned long *max);
@@ -3048,9 +3098,11 @@ static inline unsigned long cpu_util_rt(struct rq *rq)
{
return READ_ONCE(rq->avg_rt.util_avg);
}
-#endif
+
+#endif /* CONFIG_SMP */
#ifdef CONFIG_UCLAMP_TASK
+
unsigned long uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id);
static inline unsigned long uclamp_rq_get(struct rq *rq,
@@ -3097,9 +3149,40 @@ static inline bool uclamp_is_used(void)
{
return static_branch_likely(&sched_uclamp_used);
}
-#else /* CONFIG_UCLAMP_TASK */
-static inline unsigned long uclamp_eff_value(struct task_struct *p,
- enum uclamp_id clamp_id)
+
+#define for_each_clamp_id(clamp_id) \
+ for ((clamp_id) = 0; (clamp_id) < UCLAMP_CNT; (clamp_id)++)
+
+extern unsigned int sysctl_sched_uclamp_util_min_rt_default;
+
+
+static inline unsigned int uclamp_none(enum uclamp_id clamp_id)
+{
+ if (clamp_id == UCLAMP_MIN)
+ return 0;
+ return SCHED_CAPACITY_SCALE;
+}
+
+/* Integer rounded range for each bucket */
+#define UCLAMP_BUCKET_DELTA DIV_ROUND_CLOSEST(SCHED_CAPACITY_SCALE, UCLAMP_BUCKETS)
+
+static inline unsigned int uclamp_bucket_id(unsigned int clamp_value)
+{
+ return min_t(unsigned int, clamp_value / UCLAMP_BUCKET_DELTA, UCLAMP_BUCKETS - 1);
+}
+
+static inline void
+uclamp_se_set(struct uclamp_se *uc_se, unsigned int value, bool user_defined)
+{
+ uc_se->value = value;
+ uc_se->bucket_id = uclamp_bucket_id(value);
+ uc_se->user_defined = user_defined;
+}
+
+#else /* !CONFIG_UCLAMP_TASK: */
+
+static inline unsigned long
+uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id)
{
if (clamp_id == UCLAMP_MIN)
return 0;
@@ -3114,8 +3197,8 @@ static inline bool uclamp_is_used(void)
return false;
}
-static inline unsigned long uclamp_rq_get(struct rq *rq,
- enum uclamp_id clamp_id)
+static inline unsigned long
+uclamp_rq_get(struct rq *rq, enum uclamp_id clamp_id)
{
if (clamp_id == UCLAMP_MIN)
return 0;
@@ -3123,8 +3206,8 @@ static inline unsigned long uclamp_rq_get(struct rq *rq,
return SCHED_CAPACITY_SCALE;
}
-static inline void uclamp_rq_set(struct rq *rq, enum uclamp_id clamp_id,
- unsigned int value)
+static inline void
+uclamp_rq_set(struct rq *rq, enum uclamp_id clamp_id, unsigned int value)
{
}
@@ -3132,9 +3215,11 @@ static inline bool uclamp_rq_is_idle(struct rq *rq)
{
return false;
}
-#endif /* CONFIG_UCLAMP_TASK */
+
+#endif /* !CONFIG_UCLAMP_TASK */
#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
+
static inline unsigned long cpu_util_irq(struct rq *rq)
{
return READ_ONCE(rq->avg_irq.util_avg);
@@ -3149,7 +3234,9 @@ unsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned
return util;
}
-#else
+
+#else /* !CONFIG_HAVE_SCHED_AVG_IRQ: */
+
static inline unsigned long cpu_util_irq(struct rq *rq)
{
return 0;
@@ -3160,7 +3247,8 @@ unsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned
{
return util;
}
-#endif
+
+#endif /* !CONFIG_HAVE_SCHED_AVG_IRQ */
#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
@@ -3178,11 +3266,13 @@ extern struct cpufreq_governor schedutil_gov;
#else /* ! (CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL) */
#define perf_domain_span(pd) NULL
+
static inline bool sched_energy_enabled(void) { return false; }
#endif /* CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL */
#ifdef CONFIG_MEMBARRIER
+
/*
* The scheduler provides memory barriers required by membarrier between:
* - prior user-space memory accesses and store to rq->membarrier_state,
@@ -3204,13 +3294,16 @@ static inline void membarrier_switch_mm(struct rq *rq,
WRITE_ONCE(rq->membarrier_state, membarrier_state);
}
-#else
+
+#else /* !CONFIG_MEMBARRIER :*/
+
static inline void membarrier_switch_mm(struct rq *rq,
struct mm_struct *prev_mm,
struct mm_struct *next_mm)
{
}
-#endif
+
+#endif /* !CONFIG_MEMBARRIER */
#ifdef CONFIG_SMP
static inline bool is_per_cpu_kthread(struct task_struct *p)
@@ -3262,7 +3355,7 @@ static inline void __mm_cid_put(struct mm_struct *mm, int cid)
* be held to transition to other states.
*
* State transitions synchronized with cmpxchg or try_cmpxchg need to be
- * consistent across cpus, which prevents use of this_cpu_cmpxchg.
+ * consistent across CPUs, which prevents use of this_cpu_cmpxchg.
*/
static inline void mm_cid_put_lazy(struct task_struct *t)
{
@@ -3329,6 +3422,7 @@ static inline int __mm_cid_try_get(struct mm_struct *mm)
}
if (cpumask_test_and_set_cpu(cid, cpumask))
return -1;
+
return cid;
}
@@ -3393,6 +3487,7 @@ unlock:
raw_spin_unlock(&cid_lock);
end:
mm_cid_snapshot_time(rq, mm);
+
return cid;
}
@@ -3415,6 +3510,7 @@ static inline int mm_cid_get(struct rq *rq, struct mm_struct *mm)
}
cid = __mm_cid_get(rq, mm);
__this_cpu_write(pcpu_cid->cid, cid);
+
return cid;
}
@@ -3469,15 +3565,68 @@ static inline void switch_mm_cid(struct rq *rq,
next->last_mm_cid = next->mm_cid = mm_cid_get(rq, next->mm);
}
-#else
+#else /* !CONFIG_SCHED_MM_CID: */
static inline void switch_mm_cid(struct rq *rq, struct task_struct *prev, struct task_struct *next) { }
static inline void sched_mm_cid_migrate_from(struct task_struct *t) { }
static inline void sched_mm_cid_migrate_to(struct rq *dst_rq, struct task_struct *t) { }
static inline void task_tick_mm_cid(struct rq *rq, struct task_struct *curr) { }
static inline void init_sched_mm_cid(struct task_struct *t) { }
-#endif
+#endif /* !CONFIG_SCHED_MM_CID */
extern u64 avg_vruntime(struct cfs_rq *cfs_rq);
extern int entity_eligible(struct cfs_rq *cfs_rq, struct sched_entity *se);
+#ifdef CONFIG_RT_MUTEXES
+
+static inline int __rt_effective_prio(struct task_struct *pi_task, int prio)
+{
+ if (pi_task)
+ prio = min(prio, pi_task->prio);
+
+ return prio;
+}
+
+static inline int rt_effective_prio(struct task_struct *p, int prio)
+{
+ struct task_struct *pi_task = rt_mutex_get_top_task(p);
+
+ return __rt_effective_prio(pi_task, prio);
+}
+
+#else /* !CONFIG_RT_MUTEXES: */
+
+static inline int rt_effective_prio(struct task_struct *p, int prio)
+{
+ return prio;
+}
+
+#endif /* !CONFIG_RT_MUTEXES */
+
+extern int __sched_setscheduler(struct task_struct *p, const struct sched_attr *attr, bool user, bool pi);
+extern int __sched_setaffinity(struct task_struct *p, struct affinity_context *ctx);
+extern void __setscheduler_prio(struct task_struct *p, int prio);
+extern void set_load_weight(struct task_struct *p, bool update_load);
+extern void enqueue_task(struct rq *rq, struct task_struct *p, int flags);
+extern void dequeue_task(struct rq *rq, struct task_struct *p, int flags);
+
+extern void check_class_changed(struct rq *rq, struct task_struct *p,
+ const struct sched_class *prev_class,
+ int oldprio);
+
+#ifdef CONFIG_SMP
+extern struct balance_callback *splice_balance_callbacks(struct rq *rq);
+extern void balance_callbacks(struct rq *rq, struct balance_callback *head);
+#else
+
+static inline struct balance_callback *splice_balance_callbacks(struct rq *rq)
+{
+ return NULL;
+}
+
+static inline void balance_callbacks(struct rq *rq, struct balance_callback *head)
+{
+}
+
+#endif
+
#endif /* _KERNEL_SCHED_SCHED_H */
diff --git a/kernel/sched/stats.h b/kernel/sched/stats.h
index 38f3698f5e5b..237780aa3c53 100644
--- a/kernel/sched/stats.h
+++ b/kernel/sched/stats.h
@@ -110,8 +110,12 @@ __schedstats_from_se(struct sched_entity *se)
void psi_task_change(struct task_struct *task, int clear, int set);
void psi_task_switch(struct task_struct *prev, struct task_struct *next,
bool sleep);
-void psi_account_irqtime(struct task_struct *task, u32 delta);
-
+#ifdef CONFIG_IRQ_TIME_ACCOUNTING
+void psi_account_irqtime(struct rq *rq, struct task_struct *curr, struct task_struct *prev);
+#else
+static inline void psi_account_irqtime(struct rq *rq, struct task_struct *curr,
+ struct task_struct *prev) {}
+#endif /*CONFIG_IRQ_TIME_ACCOUNTING */
/*
* PSI tracks state that persists across sleeps, such as iowaits and
* memory stalls. As a result, it has to distinguish between sleeps,
@@ -192,7 +196,8 @@ static inline void psi_ttwu_dequeue(struct task_struct *p) {}
static inline void psi_sched_switch(struct task_struct *prev,
struct task_struct *next,
bool sleep) {}
-static inline void psi_account_irqtime(struct task_struct *task, u32 delta) {}
+static inline void psi_account_irqtime(struct rq *rq, struct task_struct *curr,
+ struct task_struct *prev) {}
#endif /* CONFIG_PSI */
#ifdef CONFIG_SCHED_INFO
@@ -219,7 +224,7 @@ static inline void sched_info_dequeue(struct rq *rq, struct task_struct *t)
/*
* Called when a task finally hits the CPU. We can now calculate how
* long it was waiting to run. We also note when it began so that we
- * can keep stats on how long its timeslice is.
+ * can keep stats on how long its time-slice is.
*/
static void sched_info_arrive(struct rq *rq, struct task_struct *t)
{
diff --git a/kernel/sched/syscalls.c b/kernel/sched/syscalls.c
new file mode 100644
index 000000000000..ae1b42775ef9
--- /dev/null
+++ b/kernel/sched/syscalls.c
@@ -0,0 +1,1699 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * kernel/sched/syscalls.c
+ *
+ * Core kernel scheduler syscalls related code
+ *
+ * Copyright (C) 1991-2002 Linus Torvalds
+ * Copyright (C) 1998-2024 Ingo Molnar, Red Hat
+ */
+#include <linux/sched.h>
+#include <linux/cpuset.h>
+#include <linux/sched/debug.h>
+
+#include <uapi/linux/sched/types.h>
+
+#include "sched.h"
+#include "autogroup.h"
+
+static inline int __normal_prio(int policy, int rt_prio, int nice)
+{
+ int prio;
+
+ if (dl_policy(policy))
+ prio = MAX_DL_PRIO - 1;
+ else if (rt_policy(policy))
+ prio = MAX_RT_PRIO - 1 - rt_prio;
+ else
+ prio = NICE_TO_PRIO(nice);
+
+ return prio;
+}
+
+/*
+ * Calculate the expected normal priority: i.e. priority
+ * without taking RT-inheritance into account. Might be
+ * boosted by interactivity modifiers. Changes upon fork,
+ * setprio syscalls, and whenever the interactivity
+ * estimator recalculates.
+ */
+static inline int normal_prio(struct task_struct *p)
+{
+ return __normal_prio(p->policy, p->rt_priority, PRIO_TO_NICE(p->static_prio));
+}
+
+/*
+ * Calculate the current priority, i.e. the priority
+ * taken into account by the scheduler. This value might
+ * be boosted by RT tasks, or might be boosted by
+ * interactivity modifiers. Will be RT if the task got
+ * RT-boosted. If not then it returns p->normal_prio.
+ */
+static int effective_prio(struct task_struct *p)
+{
+ p->normal_prio = normal_prio(p);
+ /*
+ * If we are RT tasks or we were boosted to RT priority,
+ * keep the priority unchanged. Otherwise, update priority
+ * to the normal priority:
+ */
+ if (!rt_prio(p->prio))
+ return p->normal_prio;
+ return p->prio;
+}
+
+void set_user_nice(struct task_struct *p, long nice)
+{
+ bool queued, running;
+ struct rq *rq;
+ int old_prio;
+
+ if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE)
+ return;
+ /*
+ * We have to be careful, if called from sys_setpriority(),
+ * the task might be in the middle of scheduling on another CPU.
+ */
+ CLASS(task_rq_lock, rq_guard)(p);
+ rq = rq_guard.rq;
+
+ update_rq_clock(rq);
+
+ /*
+ * The RT priorities are set via sched_setscheduler(), but we still
+ * allow the 'normal' nice value to be set - but as expected
+ * it won't have any effect on scheduling until the task is
+ * SCHED_DEADLINE, SCHED_FIFO or SCHED_RR:
+ */
+ if (task_has_dl_policy(p) || task_has_rt_policy(p)) {
+ p->static_prio = NICE_TO_PRIO(nice);
+ return;
+ }
+
+ queued = task_on_rq_queued(p);
+ running = task_current(rq, p);
+ if (queued)
+ dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK);
+ if (running)
+ put_prev_task(rq, p);
+
+ p->static_prio = NICE_TO_PRIO(nice);
+ set_load_weight(p, true);
+ old_prio = p->prio;
+ p->prio = effective_prio(p);
+
+ if (queued)
+ enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
+ if (running)
+ set_next_task(rq, p);
+
+ /*
+ * If the task increased its priority or is running and
+ * lowered its priority, then reschedule its CPU:
+ */
+ p->sched_class->prio_changed(rq, p, old_prio);
+}
+EXPORT_SYMBOL(set_user_nice);
+
+/*
+ * is_nice_reduction - check if nice value is an actual reduction
+ *
+ * Similar to can_nice() but does not perform a capability check.
+ *
+ * @p: task
+ * @nice: nice value
+ */
+static bool is_nice_reduction(const struct task_struct *p, const int nice)
+{
+ /* Convert nice value [19,-20] to rlimit style value [1,40]: */
+ int nice_rlim = nice_to_rlimit(nice);
+
+ return (nice_rlim <= task_rlimit(p, RLIMIT_NICE));
+}
+
+/*
+ * can_nice - check if a task can reduce its nice value
+ * @p: task
+ * @nice: nice value
+ */
+int can_nice(const struct task_struct *p, const int nice)
+{
+ return is_nice_reduction(p, nice) || capable(CAP_SYS_NICE);
+}
+
+#ifdef __ARCH_WANT_SYS_NICE
+
+/*
+ * sys_nice - change the priority of the current process.
+ * @increment: priority increment
+ *
+ * sys_setpriority is a more generic, but much slower function that
+ * does similar things.
+ */
+SYSCALL_DEFINE1(nice, int, increment)
+{
+ long nice, retval;
+
+ /*
+ * Setpriority might change our priority at the same moment.
+ * We don't have to worry. Conceptually one call occurs first
+ * and we have a single winner.
+ */
+ increment = clamp(increment, -NICE_WIDTH, NICE_WIDTH);
+ nice = task_nice(current) + increment;
+
+ nice = clamp_val(nice, MIN_NICE, MAX_NICE);
+ if (increment < 0 && !can_nice(current, nice))
+ return -EPERM;
+
+ retval = security_task_setnice(current, nice);
+ if (retval)
+ return retval;
+
+ set_user_nice(current, nice);
+ return 0;
+}
+
+#endif
+
+/**
+ * task_prio - return the priority value of a given task.
+ * @p: the task in question.
+ *
+ * Return: The priority value as seen by users in /proc.
+ *
+ * sched policy return value kernel prio user prio/nice
+ *
+ * normal, batch, idle [0 ... 39] [100 ... 139] 0/[-20 ... 19]
+ * fifo, rr [-2 ... -100] [98 ... 0] [1 ... 99]
+ * deadline -101 -1 0
+ */
+int task_prio(const struct task_struct *p)
+{
+ return p->prio - MAX_RT_PRIO;
+}
+
+/**
+ * idle_cpu - is a given CPU idle currently?
+ * @cpu: the processor in question.
+ *
+ * Return: 1 if the CPU is currently idle. 0 otherwise.
+ */
+int idle_cpu(int cpu)
+{
+ struct rq *rq = cpu_rq(cpu);
+
+ if (rq->curr != rq->idle)
+ return 0;
+
+ if (rq->nr_running)
+ return 0;
+
+#ifdef CONFIG_SMP
+ if (rq->ttwu_pending)
+ return 0;
+#endif
+
+ return 1;
+}
+
+/**
+ * available_idle_cpu - is a given CPU idle for enqueuing work.
+ * @cpu: the CPU in question.
+ *
+ * Return: 1 if the CPU is currently idle. 0 otherwise.
+ */
+int available_idle_cpu(int cpu)
+{
+ if (!idle_cpu(cpu))
+ return 0;
+
+ if (vcpu_is_preempted(cpu))
+ return 0;
+
+ return 1;
+}
+
+/**
+ * idle_task - return the idle task for a given CPU.
+ * @cpu: the processor in question.
+ *
+ * Return: The idle task for the CPU @cpu.
+ */
+struct task_struct *idle_task(int cpu)
+{
+ return cpu_rq(cpu)->idle;
+}
+
+#ifdef CONFIG_SCHED_CORE
+int sched_core_idle_cpu(int cpu)
+{
+ struct rq *rq = cpu_rq(cpu);
+
+ if (sched_core_enabled(rq) && rq->curr == rq->idle)
+ return 1;
+
+ return idle_cpu(cpu);
+}
+
+#endif
+
+#ifdef CONFIG_SMP
+/*
+ * This function computes an effective utilization for the given CPU, to be
+ * used for frequency selection given the linear relation: f = u * f_max.
+ *
+ * The scheduler tracks the following metrics:
+ *
+ * cpu_util_{cfs,rt,dl,irq}()
+ * cpu_bw_dl()
+ *
+ * Where the cfs,rt and dl util numbers are tracked with the same metric and
+ * synchronized windows and are thus directly comparable.
+ *
+ * The cfs,rt,dl utilization are the running times measured with rq->clock_task
+ * which excludes things like IRQ and steal-time. These latter are then accrued
+ * in the IRQ utilization.
+ *
+ * The DL bandwidth number OTOH is not a measured metric but a value computed
+ * based on the task model parameters and gives the minimal utilization
+ * required to meet deadlines.
+ */
+unsigned long effective_cpu_util(int cpu, unsigned long util_cfs,
+ unsigned long *min,
+ unsigned long *max)
+{
+ unsigned long util, irq, scale;
+ struct rq *rq = cpu_rq(cpu);
+
+ scale = arch_scale_cpu_capacity(cpu);
+
+ /*
+ * Early check to see if IRQ/steal time saturates the CPU, can be
+ * because of inaccuracies in how we track these -- see
+ * update_irq_load_avg().
+ */
+ irq = cpu_util_irq(rq);
+ if (unlikely(irq >= scale)) {
+ if (min)
+ *min = scale;
+ if (max)
+ *max = scale;
+ return scale;
+ }
+
+ if (min) {
+ /*
+ * The minimum utilization returns the highest level between:
+ * - the computed DL bandwidth needed with the IRQ pressure which
+ * steals time to the deadline task.
+ * - The minimum performance requirement for CFS and/or RT.
+ */
+ *min = max(irq + cpu_bw_dl(rq), uclamp_rq_get(rq, UCLAMP_MIN));
+
+ /*
+ * When an RT task is runnable and uclamp is not used, we must
+ * ensure that the task will run at maximum compute capacity.
+ */
+ if (!uclamp_is_used() && rt_rq_is_runnable(&rq->rt))
+ *min = max(*min, scale);
+ }
+
+ /*
+ * Because the time spend on RT/DL tasks is visible as 'lost' time to
+ * CFS tasks and we use the same metric to track the effective
+ * utilization (PELT windows are synchronized) we can directly add them
+ * to obtain the CPU's actual utilization.
+ */
+ util = util_cfs + cpu_util_rt(rq);
+ util += cpu_util_dl(rq);
+
+ /*
+ * The maximum hint is a soft bandwidth requirement, which can be lower
+ * than the actual utilization because of uclamp_max requirements.
+ */
+ if (max)
+ *max = min(scale, uclamp_rq_get(rq, UCLAMP_MAX));
+
+ if (util >= scale)
+ return scale;
+
+ /*
+ * There is still idle time; further improve the number by using the
+ * IRQ metric. Because IRQ/steal time is hidden from the task clock we
+ * need to scale the task numbers:
+ *
+ * max - irq
+ * U' = irq + --------- * U
+ * max
+ */
+ util = scale_irq_capacity(util, irq, scale);
+ util += irq;
+
+ return min(scale, util);
+}
+
+unsigned long sched_cpu_util(int cpu)
+{
+ return effective_cpu_util(cpu, cpu_util_cfs(cpu), NULL, NULL);
+}
+#endif /* CONFIG_SMP */
+
+/**
+ * find_process_by_pid - find a process with a matching PID value.
+ * @pid: the pid in question.
+ *
+ * The task of @pid, if found. %NULL otherwise.
+ */
+static struct task_struct *find_process_by_pid(pid_t pid)
+{
+ return pid ? find_task_by_vpid(pid) : current;
+}
+
+static struct task_struct *find_get_task(pid_t pid)
+{
+ struct task_struct *p;
+ guard(rcu)();
+
+ p = find_process_by_pid(pid);
+ if (likely(p))
+ get_task_struct(p);
+
+ return p;
+}
+
+DEFINE_CLASS(find_get_task, struct task_struct *, if (_T) put_task_struct(_T),
+ find_get_task(pid), pid_t pid)
+
+/*
+ * sched_setparam() passes in -1 for its policy, to let the functions
+ * it calls know not to change it.
+ */
+#define SETPARAM_POLICY -1
+
+static void __setscheduler_params(struct task_struct *p,
+ const struct sched_attr *attr)
+{
+ int policy = attr->sched_policy;
+
+ if (policy == SETPARAM_POLICY)
+ policy = p->policy;
+
+ p->policy = policy;
+
+ if (dl_policy(policy))
+ __setparam_dl(p, attr);
+ else if (fair_policy(policy))
+ p->static_prio = NICE_TO_PRIO(attr->sched_nice);
+
+ /*
+ * __sched_setscheduler() ensures attr->sched_priority == 0 when
+ * !rt_policy. Always setting this ensures that things like
+ * getparam()/getattr() don't report silly values for !rt tasks.
+ */
+ p->rt_priority = attr->sched_priority;
+ p->normal_prio = normal_prio(p);
+ set_load_weight(p, true);
+}
+
+/*
+ * Check the target process has a UID that matches the current process's:
+ */
+static bool check_same_owner(struct task_struct *p)
+{
+ const struct cred *cred = current_cred(), *pcred;
+ guard(rcu)();
+
+ pcred = __task_cred(p);
+ return (uid_eq(cred->euid, pcred->euid) ||
+ uid_eq(cred->euid, pcred->uid));
+}
+
+#ifdef CONFIG_UCLAMP_TASK
+
+static int uclamp_validate(struct task_struct *p,
+ const struct sched_attr *attr)
+{
+ int util_min = p->uclamp_req[UCLAMP_MIN].value;
+ int util_max = p->uclamp_req[UCLAMP_MAX].value;
+
+ if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN) {
+ util_min = attr->sched_util_min;
+
+ if (util_min + 1 > SCHED_CAPACITY_SCALE + 1)
+ return -EINVAL;
+ }
+
+ if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX) {
+ util_max = attr->sched_util_max;
+
+ if (util_max + 1 > SCHED_CAPACITY_SCALE + 1)
+ return -EINVAL;
+ }
+
+ if (util_min != -1 && util_max != -1 && util_min > util_max)
+ return -EINVAL;
+
+ /*
+ * We have valid uclamp attributes; make sure uclamp is enabled.
+ *
+ * We need to do that here, because enabling static branches is a
+ * blocking operation which obviously cannot be done while holding
+ * scheduler locks.
+ */
+ static_branch_enable(&sched_uclamp_used);
+
+ return 0;
+}
+
+static bool uclamp_reset(const struct sched_attr *attr,
+ enum uclamp_id clamp_id,
+ struct uclamp_se *uc_se)
+{
+ /* Reset on sched class change for a non user-defined clamp value. */
+ if (likely(!(attr->sched_flags & SCHED_FLAG_UTIL_CLAMP)) &&
+ !uc_se->user_defined)
+ return true;
+
+ /* Reset on sched_util_{min,max} == -1. */
+ if (clamp_id == UCLAMP_MIN &&
+ attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN &&
+ attr->sched_util_min == -1) {
+ return true;
+ }
+
+ if (clamp_id == UCLAMP_MAX &&
+ attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX &&
+ attr->sched_util_max == -1) {
+ return true;
+ }
+
+ return false;
+}
+
+static void __setscheduler_uclamp(struct task_struct *p,
+ const struct sched_attr *attr)
+{
+ enum uclamp_id clamp_id;
+
+ for_each_clamp_id(clamp_id) {
+ struct uclamp_se *uc_se = &p->uclamp_req[clamp_id];
+ unsigned int value;
+
+ if (!uclamp_reset(attr, clamp_id, uc_se))
+ continue;
+
+ /*
+ * RT by default have a 100% boost value that could be modified
+ * at runtime.
+ */
+ if (unlikely(rt_task(p) && clamp_id == UCLAMP_MIN))
+ value = sysctl_sched_uclamp_util_min_rt_default;
+ else
+ value = uclamp_none(clamp_id);
+
+ uclamp_se_set(uc_se, value, false);
+
+ }
+
+ if (likely(!(attr->sched_flags & SCHED_FLAG_UTIL_CLAMP)))
+ return;
+
+ if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN &&
+ attr->sched_util_min != -1) {
+ uclamp_se_set(&p->uclamp_req[UCLAMP_MIN],
+ attr->sched_util_min, true);
+ }
+
+ if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX &&
+ attr->sched_util_max != -1) {
+ uclamp_se_set(&p->uclamp_req[UCLAMP_MAX],
+ attr->sched_util_max, true);
+ }
+}
+
+#else /* !CONFIG_UCLAMP_TASK: */
+
+static inline int uclamp_validate(struct task_struct *p,
+ const struct sched_attr *attr)
+{
+ return -EOPNOTSUPP;
+}
+static void __setscheduler_uclamp(struct task_struct *p,
+ const struct sched_attr *attr) { }
+#endif
+
+/*
+ * Allow unprivileged RT tasks to decrease priority.
+ * Only issue a capable test if needed and only once to avoid an audit
+ * event on permitted non-privileged operations:
+ */
+static int user_check_sched_setscheduler(struct task_struct *p,
+ const struct sched_attr *attr,
+ int policy, int reset_on_fork)
+{
+ if (fair_policy(policy)) {
+ if (attr->sched_nice < task_nice(p) &&
+ !is_nice_reduction(p, attr->sched_nice))
+ goto req_priv;
+ }
+
+ if (rt_policy(policy)) {
+ unsigned long rlim_rtprio = task_rlimit(p, RLIMIT_RTPRIO);
+
+ /* Can't set/change the rt policy: */
+ if (policy != p->policy && !rlim_rtprio)
+ goto req_priv;
+
+ /* Can't increase priority: */
+ if (attr->sched_priority > p->rt_priority &&
+ attr->sched_priority > rlim_rtprio)
+ goto req_priv;
+ }
+
+ /*
+ * Can't set/change SCHED_DEADLINE policy at all for now
+ * (safest behavior); in the future we would like to allow
+ * unprivileged DL tasks to increase their relative deadline
+ * or reduce their runtime (both ways reducing utilization)
+ */
+ if (dl_policy(policy))
+ goto req_priv;
+
+ /*
+ * Treat SCHED_IDLE as nice 20. Only allow a switch to
+ * SCHED_NORMAL if the RLIMIT_NICE would normally permit it.
+ */
+ if (task_has_idle_policy(p) && !idle_policy(policy)) {
+ if (!is_nice_reduction(p, task_nice(p)))
+ goto req_priv;
+ }
+
+ /* Can't change other user's priorities: */
+ if (!check_same_owner(p))
+ goto req_priv;
+
+ /* Normal users shall not reset the sched_reset_on_fork flag: */
+ if (p->sched_reset_on_fork && !reset_on_fork)
+ goto req_priv;
+
+ return 0;
+
+req_priv:
+ if (!capable(CAP_SYS_NICE))
+ return -EPERM;
+
+ return 0;
+}
+
+int __sched_setscheduler(struct task_struct *p,
+ const struct sched_attr *attr,
+ bool user, bool pi)
+{
+ int oldpolicy = -1, policy = attr->sched_policy;
+ int retval, oldprio, newprio, queued, running;
+ const struct sched_class *prev_class;
+ struct balance_callback *head;
+ struct rq_flags rf;
+ int reset_on_fork;
+ int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
+ struct rq *rq;
+ bool cpuset_locked = false;
+
+ /* The pi code expects interrupts enabled */
+ BUG_ON(pi && in_interrupt());
+recheck:
+ /* Double check policy once rq lock held: */
+ if (policy < 0) {
+ reset_on_fork = p->sched_reset_on_fork;
+ policy = oldpolicy = p->policy;
+ } else {
+ reset_on_fork = !!(attr->sched_flags & SCHED_FLAG_RESET_ON_FORK);
+
+ if (!valid_policy(policy))
+ return -EINVAL;
+ }
+
+ if (attr->sched_flags & ~(SCHED_FLAG_ALL | SCHED_FLAG_SUGOV))
+ return -EINVAL;
+
+ /*
+ * Valid priorities for SCHED_FIFO and SCHED_RR are
+ * 1..MAX_RT_PRIO-1, valid priority for SCHED_NORMAL,
+ * SCHED_BATCH and SCHED_IDLE is 0.
+ */
+ if (attr->sched_priority > MAX_RT_PRIO-1)
+ return -EINVAL;
+ if ((dl_policy(policy) && !__checkparam_dl(attr)) ||
+ (rt_policy(policy) != (attr->sched_priority != 0)))
+ return -EINVAL;
+
+ if (user) {
+ retval = user_check_sched_setscheduler(p, attr, policy, reset_on_fork);
+ if (retval)
+ return retval;
+
+ if (attr->sched_flags & SCHED_FLAG_SUGOV)
+ return -EINVAL;
+
+ retval = security_task_setscheduler(p);
+ if (retval)
+ return retval;
+ }
+
+ /* Update task specific "requested" clamps */
+ if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) {
+ retval = uclamp_validate(p, attr);
+ if (retval)
+ return retval;
+ }
+
+ /*
+ * SCHED_DEADLINE bandwidth accounting relies on stable cpusets
+ * information.
+ */
+ if (dl_policy(policy) || dl_policy(p->policy)) {
+ cpuset_locked = true;
+ cpuset_lock();
+ }
+
+ /*
+ * Make sure no PI-waiters arrive (or leave) while we are
+ * changing the priority of the task:
+ *
+ * To be able to change p->policy safely, the appropriate
+ * runqueue lock must be held.
+ */
+ rq = task_rq_lock(p, &rf);
+ update_rq_clock(rq);
+
+ /*
+ * Changing the policy of the stop threads its a very bad idea:
+ */
+ if (p == rq->stop) {
+ retval = -EINVAL;
+ goto unlock;
+ }
+
+ /*
+ * If not changing anything there's no need to proceed further,
+ * but store a possible modification of reset_on_fork.
+ */
+ if (unlikely(policy == p->policy)) {
+ if (fair_policy(policy) && attr->sched_nice != task_nice(p))
+ goto change;
+ if (rt_policy(policy) && attr->sched_priority != p->rt_priority)
+ goto change;
+ if (dl_policy(policy) && dl_param_changed(p, attr))
+ goto change;
+ if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP)
+ goto change;
+
+ p->sched_reset_on_fork = reset_on_fork;
+ retval = 0;
+ goto unlock;
+ }
+change:
+
+ if (user) {
+#ifdef CONFIG_RT_GROUP_SCHED
+ /*
+ * Do not allow real-time tasks into groups that have no runtime
+ * assigned.
+ */
+ if (rt_bandwidth_enabled() && rt_policy(policy) &&
+ task_group(p)->rt_bandwidth.rt_runtime == 0 &&
+ !task_group_is_autogroup(task_group(p))) {
+ retval = -EPERM;
+ goto unlock;
+ }
+#endif
+#ifdef CONFIG_SMP
+ if (dl_bandwidth_enabled() && dl_policy(policy) &&
+ !(attr->sched_flags & SCHED_FLAG_SUGOV)) {
+ cpumask_t *span = rq->rd->span;
+
+ /*
+ * Don't allow tasks with an affinity mask smaller than
+ * the entire root_domain to become SCHED_DEADLINE. We
+ * will also fail if there's no bandwidth available.
+ */
+ if (!cpumask_subset(span, p->cpus_ptr) ||
+ rq->rd->dl_bw.bw == 0) {
+ retval = -EPERM;
+ goto unlock;
+ }
+ }
+#endif
+ }
+
+ /* Re-check policy now with rq lock held: */
+ if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
+ policy = oldpolicy = -1;
+ task_rq_unlock(rq, p, &rf);
+ if (cpuset_locked)
+ cpuset_unlock();
+ goto recheck;
+ }
+
+ /*
+ * If setscheduling to SCHED_DEADLINE (or changing the parameters
+ * of a SCHED_DEADLINE task) we need to check if enough bandwidth
+ * is available.
+ */
+ if ((dl_policy(policy) || dl_task(p)) && sched_dl_overflow(p, policy, attr)) {
+ retval = -EBUSY;
+ goto unlock;
+ }
+
+ p->sched_reset_on_fork = reset_on_fork;
+ oldprio = p->prio;
+
+ newprio = __normal_prio(policy, attr->sched_priority, attr->sched_nice);
+ if (pi) {
+ /*
+ * Take priority boosted tasks into account. If the new
+ * effective priority is unchanged, we just store the new
+ * normal parameters and do not touch the scheduler class and
+ * the runqueue. This will be done when the task deboost
+ * itself.
+ */
+ newprio = rt_effective_prio(p, newprio);
+ if (newprio == oldprio)
+ queue_flags &= ~DEQUEUE_MOVE;
+ }
+
+ queued = task_on_rq_queued(p);
+ running = task_current(rq, p);
+ if (queued)
+ dequeue_task(rq, p, queue_flags);
+ if (running)
+ put_prev_task(rq, p);
+
+ prev_class = p->sched_class;
+
+ if (!(attr->sched_flags & SCHED_FLAG_KEEP_PARAMS)) {
+ __setscheduler_params(p, attr);
+ __setscheduler_prio(p, newprio);
+ }
+ __setscheduler_uclamp(p, attr);
+
+ if (queued) {
+ /*
+ * We enqueue to tail when the priority of a task is
+ * increased (user space view).
+ */
+ if (oldprio < p->prio)
+ queue_flags |= ENQUEUE_HEAD;
+
+ enqueue_task(rq, p, queue_flags);
+ }
+ if (running)
+ set_next_task(rq, p);
+
+ check_class_changed(rq, p, prev_class, oldprio);
+
+ /* Avoid rq from going away on us: */
+ preempt_disable();
+ head = splice_balance_callbacks(rq);
+ task_rq_unlock(rq, p, &rf);
+
+ if (pi) {
+ if (cpuset_locked)
+ cpuset_unlock();
+ rt_mutex_adjust_pi(p);
+ }
+
+ /* Run balance callbacks after we've adjusted the PI chain: */
+ balance_callbacks(rq, head);
+ preempt_enable();
+
+ return 0;
+
+unlock:
+ task_rq_unlock(rq, p, &rf);
+ if (cpuset_locked)
+ cpuset_unlock();
+ return retval;
+}
+
+static int _sched_setscheduler(struct task_struct *p, int policy,
+ const struct sched_param *param, bool check)
+{
+ struct sched_attr attr = {
+ .sched_policy = policy,
+ .sched_priority = param->sched_priority,
+ .sched_nice = PRIO_TO_NICE(p->static_prio),
+ };
+
+ /* Fixup the legacy SCHED_RESET_ON_FORK hack. */
+ if ((policy != SETPARAM_POLICY) && (policy & SCHED_RESET_ON_FORK)) {
+ attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
+ policy &= ~SCHED_RESET_ON_FORK;
+ attr.sched_policy = policy;
+ }
+
+ return __sched_setscheduler(p, &attr, check, true);
+}
+/**
+ * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
+ * @p: the task in question.
+ * @policy: new policy.
+ * @param: structure containing the new RT priority.
+ *
+ * Use sched_set_fifo(), read its comment.
+ *
+ * Return: 0 on success. An error code otherwise.
+ *
+ * NOTE that the task may be already dead.
+ */
+int sched_setscheduler(struct task_struct *p, int policy,
+ const struct sched_param *param)
+{
+ return _sched_setscheduler(p, policy, param, true);
+}
+
+int sched_setattr(struct task_struct *p, const struct sched_attr *attr)
+{
+ return __sched_setscheduler(p, attr, true, true);
+}
+
+int sched_setattr_nocheck(struct task_struct *p, const struct sched_attr *attr)
+{
+ return __sched_setscheduler(p, attr, false, true);
+}
+EXPORT_SYMBOL_GPL(sched_setattr_nocheck);
+
+/**
+ * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernel-space.
+ * @p: the task in question.
+ * @policy: new policy.
+ * @param: structure containing the new RT priority.
+ *
+ * Just like sched_setscheduler, only don't bother checking if the
+ * current context has permission. For example, this is needed in
+ * stop_machine(): we create temporary high priority worker threads,
+ * but our caller might not have that capability.
+ *
+ * Return: 0 on success. An error code otherwise.
+ */
+int sched_setscheduler_nocheck(struct task_struct *p, int policy,
+ const struct sched_param *param)
+{
+ return _sched_setscheduler(p, policy, param, false);
+}
+
+/*
+ * SCHED_FIFO is a broken scheduler model; that is, it is fundamentally
+ * incapable of resource management, which is the one thing an OS really should
+ * be doing.
+ *
+ * This is of course the reason it is limited to privileged users only.
+ *
+ * Worse still; it is fundamentally impossible to compose static priority
+ * workloads. You cannot take two correctly working static prio workloads
+ * and smash them together and still expect them to work.
+ *
+ * For this reason 'all' FIFO tasks the kernel creates are basically at:
+ *
+ * MAX_RT_PRIO / 2
+ *
+ * The administrator _MUST_ configure the system, the kernel simply doesn't
+ * know enough information to make a sensible choice.
+ */
+void sched_set_fifo(struct task_struct *p)
+{
+ struct sched_param sp = { .sched_priority = MAX_RT_PRIO / 2 };
+ WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0);
+}
+EXPORT_SYMBOL_GPL(sched_set_fifo);
+
+/*
+ * For when you don't much care about FIFO, but want to be above SCHED_NORMAL.
+ */
+void sched_set_fifo_low(struct task_struct *p)
+{
+ struct sched_param sp = { .sched_priority = 1 };
+ WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0);
+}
+EXPORT_SYMBOL_GPL(sched_set_fifo_low);
+
+void sched_set_normal(struct task_struct *p, int nice)
+{
+ struct sched_attr attr = {
+ .sched_policy = SCHED_NORMAL,
+ .sched_nice = nice,
+ };
+ WARN_ON_ONCE(sched_setattr_nocheck(p, &attr) != 0);
+}
+EXPORT_SYMBOL_GPL(sched_set_normal);
+
+static int
+do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
+{
+ struct sched_param lparam;
+
+ if (!param || pid < 0)
+ return -EINVAL;
+ if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
+ return -EFAULT;
+
+ CLASS(find_get_task, p)(pid);
+ if (!p)
+ return -ESRCH;
+
+ return sched_setscheduler(p, policy, &lparam);
+}
+
+/*
+ * Mimics kernel/events/core.c perf_copy_attr().
+ */
+static int sched_copy_attr(struct sched_attr __user *uattr, struct sched_attr *attr)
+{
+ u32 size;
+ int ret;
+
+ /* Zero the full structure, so that a short copy will be nice: */
+ memset(attr, 0, sizeof(*attr));
+
+ ret = get_user(size, &uattr->size);
+ if (ret)
+ return ret;
+
+ /* ABI compatibility quirk: */
+ if (!size)
+ size = SCHED_ATTR_SIZE_VER0;
+ if (size < SCHED_ATTR_SIZE_VER0 || size > PAGE_SIZE)
+ goto err_size;
+
+ ret = copy_struct_from_user(attr, sizeof(*attr), uattr, size);
+ if (ret) {
+ if (ret == -E2BIG)
+ goto err_size;
+ return ret;
+ }
+
+ if ((attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) &&
+ size < SCHED_ATTR_SIZE_VER1)
+ return -EINVAL;
+
+ /*
+ * XXX: Do we want to be lenient like existing syscalls; or do we want
+ * to be strict and return an error on out-of-bounds values?
+ */
+ attr->sched_nice = clamp(attr->sched_nice, MIN_NICE, MAX_NICE);
+
+ return 0;
+
+err_size:
+ put_user(sizeof(*attr), &uattr->size);
+ return -E2BIG;
+}
+
+static void get_params(struct task_struct *p, struct sched_attr *attr)
+{
+ if (task_has_dl_policy(p))
+ __getparam_dl(p, attr);
+ else if (task_has_rt_policy(p))
+ attr->sched_priority = p->rt_priority;
+ else
+ attr->sched_nice = task_nice(p);
+}
+
+/**
+ * sys_sched_setscheduler - set/change the scheduler policy and RT priority
+ * @pid: the pid in question.
+ * @policy: new policy.
+ * @param: structure containing the new RT priority.
+ *
+ * Return: 0 on success. An error code otherwise.
+ */
+SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, struct sched_param __user *, param)
+{
+ if (policy < 0)
+ return -EINVAL;
+
+ return do_sched_setscheduler(pid, policy, param);
+}
+
+/**
+ * sys_sched_setparam - set/change the RT priority of a thread
+ * @pid: the pid in question.
+ * @param: structure containing the new RT priority.
+ *
+ * Return: 0 on success. An error code otherwise.
+ */
+SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
+{
+ return do_sched_setscheduler(pid, SETPARAM_POLICY, param);
+}
+
+/**
+ * sys_sched_setattr - same as above, but with extended sched_attr
+ * @pid: the pid in question.
+ * @uattr: structure containing the extended parameters.
+ * @flags: for future extension.
+ */
+SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr,
+ unsigned int, flags)
+{
+ struct sched_attr attr;
+ int retval;
+
+ if (!uattr || pid < 0 || flags)
+ return -EINVAL;
+
+ retval = sched_copy_attr(uattr, &attr);
+ if (retval)
+ return retval;
+
+ if ((int)attr.sched_policy < 0)
+ return -EINVAL;
+ if (attr.sched_flags & SCHED_FLAG_KEEP_POLICY)
+ attr.sched_policy = SETPARAM_POLICY;
+
+ CLASS(find_get_task, p)(pid);
+ if (!p)
+ return -ESRCH;
+
+ if (attr.sched_flags & SCHED_FLAG_KEEP_PARAMS)
+ get_params(p, &attr);
+
+ return sched_setattr(p, &attr);
+}
+
+/**
+ * sys_sched_getscheduler - get the policy (scheduling class) of a thread
+ * @pid: the pid in question.
+ *
+ * Return: On success, the policy of the thread. Otherwise, a negative error
+ * code.
+ */
+SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
+{
+ struct task_struct *p;
+ int retval;
+
+ if (pid < 0)
+ return -EINVAL;
+
+ guard(rcu)();
+ p = find_process_by_pid(pid);
+ if (!p)
+ return -ESRCH;
+
+ retval = security_task_getscheduler(p);
+ if (!retval) {
+ retval = p->policy;
+ if (p->sched_reset_on_fork)
+ retval |= SCHED_RESET_ON_FORK;
+ }
+ return retval;
+}
+
+/**
+ * sys_sched_getparam - get the RT priority of a thread
+ * @pid: the pid in question.
+ * @param: structure containing the RT priority.
+ *
+ * Return: On success, 0 and the RT priority is in @param. Otherwise, an error
+ * code.
+ */
+SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
+{
+ struct sched_param lp = { .sched_priority = 0 };
+ struct task_struct *p;
+ int retval;
+
+ if (!param || pid < 0)
+ return -EINVAL;
+
+ scoped_guard (rcu) {
+ p = find_process_by_pid(pid);
+ if (!p)
+ return -ESRCH;
+
+ retval = security_task_getscheduler(p);
+ if (retval)
+ return retval;
+
+ if (task_has_rt_policy(p))
+ lp.sched_priority = p->rt_priority;
+ }
+
+ /*
+ * This one might sleep, we cannot do it with a spinlock held ...
+ */
+ return copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
+}
+
+/*
+ * Copy the kernel size attribute structure (which might be larger
+ * than what user-space knows about) to user-space.
+ *
+ * Note that all cases are valid: user-space buffer can be larger or
+ * smaller than the kernel-space buffer. The usual case is that both
+ * have the same size.
+ */
+static int
+sched_attr_copy_to_user(struct sched_attr __user *uattr,
+ struct sched_attr *kattr,
+ unsigned int usize)
+{
+ unsigned int ksize = sizeof(*kattr);
+
+ if (!access_ok(uattr, usize))
+ return -EFAULT;
+
+ /*
+ * sched_getattr() ABI forwards and backwards compatibility:
+ *
+ * If usize == ksize then we just copy everything to user-space and all is good.
+ *
+ * If usize < ksize then we only copy as much as user-space has space for,
+ * this keeps ABI compatibility as well. We skip the rest.
+ *
+ * If usize > ksize then user-space is using a newer version of the ABI,
+ * which part the kernel doesn't know about. Just ignore it - tooling can
+ * detect the kernel's knowledge of attributes from the attr->size value
+ * which is set to ksize in this case.
+ */
+ kattr->size = min(usize, ksize);
+
+ if (copy_to_user(uattr, kattr, kattr->size))
+ return -EFAULT;
+
+ return 0;
+}
+
+/**
+ * sys_sched_getattr - similar to sched_getparam, but with sched_attr
+ * @pid: the pid in question.
+ * @uattr: structure containing the extended parameters.
+ * @usize: sizeof(attr) for fwd/bwd comp.
+ * @flags: for future extension.
+ */
+SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
+ unsigned int, usize, unsigned int, flags)
+{
+ struct sched_attr kattr = { };
+ struct task_struct *p;
+ int retval;
+
+ if (!uattr || pid < 0 || usize > PAGE_SIZE ||
+ usize < SCHED_ATTR_SIZE_VER0 || flags)
+ return -EINVAL;
+
+ scoped_guard (rcu) {
+ p = find_process_by_pid(pid);
+ if (!p)
+ return -ESRCH;
+
+ retval = security_task_getscheduler(p);
+ if (retval)
+ return retval;
+
+ kattr.sched_policy = p->policy;
+ if (p->sched_reset_on_fork)
+ kattr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
+ get_params(p, &kattr);
+ kattr.sched_flags &= SCHED_FLAG_ALL;
+
+#ifdef CONFIG_UCLAMP_TASK
+ /*
+ * This could race with another potential updater, but this is fine
+ * because it'll correctly read the old or the new value. We don't need
+ * to guarantee who wins the race as long as it doesn't return garbage.
+ */
+ kattr.sched_util_min = p->uclamp_req[UCLAMP_MIN].value;
+ kattr.sched_util_max = p->uclamp_req[UCLAMP_MAX].value;
+#endif
+ }
+
+ return sched_attr_copy_to_user(uattr, &kattr, usize);
+}
+
+#ifdef CONFIG_SMP
+int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask)
+{
+ /*
+ * If the task isn't a deadline task or admission control is
+ * disabled then we don't care about affinity changes.
+ */
+ if (!task_has_dl_policy(p) || !dl_bandwidth_enabled())
+ return 0;
+
+ /*
+ * Since bandwidth control happens on root_domain basis,
+ * if admission test is enabled, we only admit -deadline
+ * tasks allowed to run on all the CPUs in the task's
+ * root_domain.
+ */
+ guard(rcu)();
+ if (!cpumask_subset(task_rq(p)->rd->span, mask))
+ return -EBUSY;
+
+ return 0;
+}
+#endif /* CONFIG_SMP */
+
+int __sched_setaffinity(struct task_struct *p, struct affinity_context *ctx)
+{
+ int retval;
+ cpumask_var_t cpus_allowed, new_mask;
+
+ if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL))
+ return -ENOMEM;
+
+ if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
+ retval = -ENOMEM;
+ goto out_free_cpus_allowed;
+ }
+
+ cpuset_cpus_allowed(p, cpus_allowed);
+ cpumask_and(new_mask, ctx->new_mask, cpus_allowed);
+
+ ctx->new_mask = new_mask;
+ ctx->flags |= SCA_CHECK;
+
+ retval = dl_task_check_affinity(p, new_mask);
+ if (retval)
+ goto out_free_new_mask;
+
+ retval = __set_cpus_allowed_ptr(p, ctx);
+ if (retval)
+ goto out_free_new_mask;
+
+ cpuset_cpus_allowed(p, cpus_allowed);
+ if (!cpumask_subset(new_mask, cpus_allowed)) {
+ /*
+ * We must have raced with a concurrent cpuset update.
+ * Just reset the cpumask to the cpuset's cpus_allowed.
+ */
+ cpumask_copy(new_mask, cpus_allowed);
+
+ /*
+ * If SCA_USER is set, a 2nd call to __set_cpus_allowed_ptr()
+ * will restore the previous user_cpus_ptr value.
+ *
+ * In the unlikely event a previous user_cpus_ptr exists,
+ * we need to further restrict the mask to what is allowed
+ * by that old user_cpus_ptr.
+ */
+ if (unlikely((ctx->flags & SCA_USER) && ctx->user_mask)) {
+ bool empty = !cpumask_and(new_mask, new_mask,
+ ctx->user_mask);
+
+ if (WARN_ON_ONCE(empty))
+ cpumask_copy(new_mask, cpus_allowed);
+ }
+ __set_cpus_allowed_ptr(p, ctx);
+ retval = -EINVAL;
+ }
+
+out_free_new_mask:
+ free_cpumask_var(new_mask);
+out_free_cpus_allowed:
+ free_cpumask_var(cpus_allowed);
+ return retval;
+}
+
+long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
+{
+ struct affinity_context ac;
+ struct cpumask *user_mask;
+ int retval;
+
+ CLASS(find_get_task, p)(pid);
+ if (!p)
+ return -ESRCH;
+
+ if (p->flags & PF_NO_SETAFFINITY)
+ return -EINVAL;
+
+ if (!check_same_owner(p)) {
+ guard(rcu)();
+ if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE))
+ return -EPERM;
+ }
+
+ retval = security_task_setscheduler(p);
+ if (retval)
+ return retval;
+
+ /*
+ * With non-SMP configs, user_cpus_ptr/user_mask isn't used and
+ * alloc_user_cpus_ptr() returns NULL.
+ */
+ user_mask = alloc_user_cpus_ptr(NUMA_NO_NODE);
+ if (user_mask) {
+ cpumask_copy(user_mask, in_mask);
+ } else if (IS_ENABLED(CONFIG_SMP)) {
+ return -ENOMEM;
+ }
+
+ ac = (struct affinity_context){
+ .new_mask = in_mask,
+ .user_mask = user_mask,
+ .flags = SCA_USER,
+ };
+
+ retval = __sched_setaffinity(p, &ac);
+ kfree(ac.user_mask);
+
+ return retval;
+}
+
+static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
+ struct cpumask *new_mask)
+{
+ if (len < cpumask_size())
+ cpumask_clear(new_mask);
+ else if (len > cpumask_size())
+ len = cpumask_size();
+
+ return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
+}
+
+/**
+ * sys_sched_setaffinity - set the CPU affinity of a process
+ * @pid: pid of the process
+ * @len: length in bytes of the bitmask pointed to by user_mask_ptr
+ * @user_mask_ptr: user-space pointer to the new CPU mask
+ *
+ * Return: 0 on success. An error code otherwise.
+ */
+SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
+ unsigned long __user *, user_mask_ptr)
+{
+ cpumask_var_t new_mask;
+ int retval;
+
+ if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
+ return -ENOMEM;
+
+ retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
+ if (retval == 0)
+ retval = sched_setaffinity(pid, new_mask);
+ free_cpumask_var(new_mask);
+ return retval;
+}
+
+long sched_getaffinity(pid_t pid, struct cpumask *mask)
+{
+ struct task_struct *p;
+ int retval;
+
+ guard(rcu)();
+ p = find_process_by_pid(pid);
+ if (!p)
+ return -ESRCH;
+
+ retval = security_task_getscheduler(p);
+ if (retval)
+ return retval;
+
+ guard(raw_spinlock_irqsave)(&p->pi_lock);
+ cpumask_and(mask, &p->cpus_mask, cpu_active_mask);
+
+ return 0;
+}
+
+/**
+ * sys_sched_getaffinity - get the CPU affinity of a process
+ * @pid: pid of the process
+ * @len: length in bytes of the bitmask pointed to by user_mask_ptr
+ * @user_mask_ptr: user-space pointer to hold the current CPU mask
+ *
+ * Return: size of CPU mask copied to user_mask_ptr on success. An
+ * error code otherwise.
+ */
+SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
+ unsigned long __user *, user_mask_ptr)
+{
+ int ret;
+ cpumask_var_t mask;
+
+ if ((len * BITS_PER_BYTE) < nr_cpu_ids)
+ return -EINVAL;
+ if (len & (sizeof(unsigned long)-1))
+ return -EINVAL;
+
+ if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
+ return -ENOMEM;
+
+ ret = sched_getaffinity(pid, mask);
+ if (ret == 0) {
+ unsigned int retlen = min(len, cpumask_size());
+
+ if (copy_to_user(user_mask_ptr, cpumask_bits(mask), retlen))
+ ret = -EFAULT;
+ else
+ ret = retlen;
+ }
+ free_cpumask_var(mask);
+
+ return ret;
+}
+
+static void do_sched_yield(void)
+{
+ struct rq_flags rf;
+ struct rq *rq;
+
+ rq = this_rq_lock_irq(&rf);
+
+ schedstat_inc(rq->yld_count);
+ current->sched_class->yield_task(rq);
+
+ preempt_disable();
+ rq_unlock_irq(rq, &rf);
+ sched_preempt_enable_no_resched();
+
+ schedule();
+}
+
+/**
+ * sys_sched_yield - yield the current processor to other threads.
+ *
+ * This function yields the current CPU to other tasks. If there are no
+ * other threads running on this CPU then this function will return.
+ *
+ * Return: 0.
+ */
+SYSCALL_DEFINE0(sched_yield)
+{
+ do_sched_yield();
+ return 0;
+}
+
+/**
+ * yield - yield the current processor to other threads.
+ *
+ * Do not ever use this function, there's a 99% chance you're doing it wrong.
+ *
+ * The scheduler is at all times free to pick the calling task as the most
+ * eligible task to run, if removing the yield() call from your code breaks
+ * it, it's already broken.
+ *
+ * Typical broken usage is:
+ *
+ * while (!event)
+ * yield();
+ *
+ * where one assumes that yield() will let 'the other' process run that will
+ * make event true. If the current task is a SCHED_FIFO task that will never
+ * happen. Never use yield() as a progress guarantee!!
+ *
+ * If you want to use yield() to wait for something, use wait_event().
+ * If you want to use yield() to be 'nice' for others, use cond_resched().
+ * If you still want to use yield(), do not!
+ */
+void __sched yield(void)
+{
+ set_current_state(TASK_RUNNING);
+ do_sched_yield();
+}
+EXPORT_SYMBOL(yield);
+
+/**
+ * yield_to - yield the current processor to another thread in
+ * your thread group, or accelerate that thread toward the
+ * processor it's on.
+ * @p: target task
+ * @preempt: whether task preemption is allowed or not
+ *
+ * It's the caller's job to ensure that the target task struct
+ * can't go away on us before we can do any checks.
+ *
+ * Return:
+ * true (>0) if we indeed boosted the target task.
+ * false (0) if we failed to boost the target.
+ * -ESRCH if there's no task to yield to.
+ */
+int __sched yield_to(struct task_struct *p, bool preempt)
+{
+ struct task_struct *curr = current;
+ struct rq *rq, *p_rq;
+ int yielded = 0;
+
+ scoped_guard (irqsave) {
+ rq = this_rq();
+
+again:
+ p_rq = task_rq(p);
+ /*
+ * If we're the only runnable task on the rq and target rq also
+ * has only one task, there's absolutely no point in yielding.
+ */
+ if (rq->nr_running == 1 && p_rq->nr_running == 1)
+ return -ESRCH;
+
+ guard(double_rq_lock)(rq, p_rq);
+ if (task_rq(p) != p_rq)
+ goto again;
+
+ if (!curr->sched_class->yield_to_task)
+ return 0;
+
+ if (curr->sched_class != p->sched_class)
+ return 0;
+
+ if (task_on_cpu(p_rq, p) || !task_is_running(p))
+ return 0;
+
+ yielded = curr->sched_class->yield_to_task(rq, p);
+ if (yielded) {
+ schedstat_inc(rq->yld_count);
+ /*
+ * Make p's CPU reschedule; pick_next_entity
+ * takes care of fairness.
+ */
+ if (preempt && rq != p_rq)
+ resched_curr(p_rq);
+ }
+ }
+
+ if (yielded)
+ schedule();
+
+ return yielded;
+}
+EXPORT_SYMBOL_GPL(yield_to);
+
+/**
+ * sys_sched_get_priority_max - return maximum RT priority.
+ * @policy: scheduling class.
+ *
+ * Return: On success, this syscall returns the maximum
+ * rt_priority that can be used by a given scheduling class.
+ * On failure, a negative error code is returned.
+ */
+SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
+{
+ int ret = -EINVAL;
+
+ switch (policy) {
+ case SCHED_FIFO:
+ case SCHED_RR:
+ ret = MAX_RT_PRIO-1;
+ break;
+ case SCHED_DEADLINE:
+ case SCHED_NORMAL:
+ case SCHED_BATCH:
+ case SCHED_IDLE:
+ ret = 0;
+ break;
+ }
+ return ret;
+}
+
+/**
+ * sys_sched_get_priority_min - return minimum RT priority.
+ * @policy: scheduling class.
+ *
+ * Return: On success, this syscall returns the minimum
+ * rt_priority that can be used by a given scheduling class.
+ * On failure, a negative error code is returned.
+ */
+SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
+{
+ int ret = -EINVAL;
+
+ switch (policy) {
+ case SCHED_FIFO:
+ case SCHED_RR:
+ ret = 1;
+ break;
+ case SCHED_DEADLINE:
+ case SCHED_NORMAL:
+ case SCHED_BATCH:
+ case SCHED_IDLE:
+ ret = 0;
+ }
+ return ret;
+}
+
+static int sched_rr_get_interval(pid_t pid, struct timespec64 *t)
+{
+ unsigned int time_slice = 0;
+ int retval;
+
+ if (pid < 0)
+ return -EINVAL;
+
+ scoped_guard (rcu) {
+ struct task_struct *p = find_process_by_pid(pid);
+ if (!p)
+ return -ESRCH;
+
+ retval = security_task_getscheduler(p);
+ if (retval)
+ return retval;
+
+ scoped_guard (task_rq_lock, p) {
+ struct rq *rq = scope.rq;
+ if (p->sched_class->get_rr_interval)
+ time_slice = p->sched_class->get_rr_interval(rq, p);
+ }
+ }
+
+ jiffies_to_timespec64(time_slice, t);
+ return 0;
+}
+
+/**
+ * sys_sched_rr_get_interval - return the default time-slice of a process.
+ * @pid: pid of the process.
+ * @interval: userspace pointer to the time-slice value.
+ *
+ * this syscall writes the default time-slice value of a given process
+ * into the user-space timespec buffer. A value of '0' means infinity.
+ *
+ * Return: On success, 0 and the time-slice is in @interval. Otherwise,
+ * an error code.
+ */
+SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
+ struct __kernel_timespec __user *, interval)
+{
+ struct timespec64 t;
+ int retval = sched_rr_get_interval(pid, &t);
+
+ if (retval == 0)
+ retval = put_timespec64(&t, interval);
+
+ return retval;
+}
+
+#ifdef CONFIG_COMPAT_32BIT_TIME
+SYSCALL_DEFINE2(sched_rr_get_interval_time32, pid_t, pid,
+ struct old_timespec32 __user *, interval)
+{
+ struct timespec64 t;
+ int retval = sched_rr_get_interval(pid, &t);
+
+ if (retval == 0)
+ retval = put_old_timespec32(&t, interval);
+ return retval;
+}
+#endif
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index a6994a1fcc90..784a0be81e84 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -501,7 +501,7 @@ void rq_attach_root(struct rq *rq, struct root_domain *rd)
cpumask_clear_cpu(rq->cpu, old_rd->span);
/*
- * If we dont want to free the old_rd yet then
+ * If we don't want to free the old_rd yet then
* set old_rd to NULL to skip the freeing later
* in this function:
*/
@@ -1176,7 +1176,7 @@ fail:
* uniquely identify each group (for a given domain):
*
* - The first is the balance_cpu (see should_we_balance() and the
- * load-balance blub in fair.c); for each group we only want 1 CPU to
+ * load-balance blurb in fair.c); for each group we only want 1 CPU to
* continue balancing at a higher domain.
*
* - The second is the sched_group_capacity; we want all identical groups
@@ -1388,7 +1388,7 @@ static inline void asym_cpu_capacity_update_data(int cpu)
/*
* Search if capacity already exits. If not, track which the entry
- * where we should insert to keep the list ordered descendingly.
+ * where we should insert to keep the list ordered descending.
*/
list_for_each_entry(entry, &asym_cap_list, link) {
if (capacity == entry->capacity)
@@ -1853,7 +1853,7 @@ void sched_init_numa(int offline_node)
struct cpumask ***masks;
/*
- * O(nr_nodes^2) deduplicating selection sort -- in order to find the
+ * O(nr_nodes^2) de-duplicating selection sort -- in order to find the
* unique distances in the node_distance() table.
*/
distance_map = bitmap_alloc(NR_DISTANCE_VALUES, GFP_KERNEL);
@@ -2750,7 +2750,7 @@ match2:
}
#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
- /* Build perf. domains: */
+ /* Build perf domains: */
for (i = 0; i < ndoms_new; i++) {
for (j = 0; j < n && !sched_energy_update; j++) {
if (cpumask_equal(doms_new[i], doms_cur[j]) &&
@@ -2759,7 +2759,7 @@ match2:
goto match3;
}
}
- /* No match - add perf. domains for a new rd */
+ /* No match - add perf domains for a new rd */
has_eas |= build_perf_domains(doms_new[i]);
match3:
;
diff --git a/kernel/sched/wait_bit.c b/kernel/sched/wait_bit.c
index 0b1cd985dc27..134d7112ef71 100644
--- a/kernel/sched/wait_bit.c
+++ b/kernel/sched/wait_bit.c
@@ -33,7 +33,7 @@ int wake_bit_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync
EXPORT_SYMBOL(wake_bit_function);
/*
- * To allow interruptible waiting and asynchronous (i.e. nonblocking)
+ * To allow interruptible waiting and asynchronous (i.e. non-blocking)
* waiting, the actions of __wait_on_bit() and __wait_on_bit_lock() are
* permitted return codes. Nonzero return codes halt waiting and return.
*/
@@ -133,7 +133,7 @@ EXPORT_SYMBOL(__wake_up_bit);
* @bit: the bit of the word being waited on
*
* There is a standard hashed waitqueue table for generic use. This
- * is the part of the hashtable's accessor API that wakes up waiters
+ * is the part of the hash-table's accessor API that wakes up waiters
* on a bit. For instance, if one were to have waiters on a bitflag,
* one would call wake_up_bit() after clearing the bit.
*
diff --git a/kernel/seccomp.c b/kernel/seccomp.c
index e30b60b57614..dc51e521bc1d 100644
--- a/kernel/seccomp.c
+++ b/kernel/seccomp.c
@@ -502,6 +502,9 @@ static inline pid_t seccomp_can_sync_threads(void)
/* Skip current, since it is initiating the sync. */
if (thread == caller)
continue;
+ /* Skip exited threads. */
+ if (thread->flags & PF_EXITING)
+ continue;
if (thread->seccomp.mode == SECCOMP_MODE_DISABLED ||
(thread->seccomp.mode == SECCOMP_MODE_FILTER &&
@@ -563,18 +566,21 @@ static void __seccomp_filter_release(struct seccomp_filter *orig)
* @tsk: task the filter should be released from.
*
* This function should only be called when the task is exiting as
- * it detaches it from its filter tree. As such, READ_ONCE() and
- * barriers are not needed here, as would normally be needed.
+ * it detaches it from its filter tree. PF_EXITING has to be set
+ * for the task.
*/
void seccomp_filter_release(struct task_struct *tsk)
{
- struct seccomp_filter *orig = tsk->seccomp.filter;
+ struct seccomp_filter *orig;
- /* We are effectively holding the siglock by not having any sighand. */
- WARN_ON(tsk->sighand != NULL);
+ if (WARN_ON((tsk->flags & PF_EXITING) == 0))
+ return;
+ spin_lock_irq(&tsk->sighand->siglock);
+ orig = tsk->seccomp.filter;
/* Detach task from its filter tree. */
tsk->seccomp.filter = NULL;
+ spin_unlock_irq(&tsk->sighand->siglock);
__seccomp_filter_release(orig);
}
@@ -602,6 +608,13 @@ static inline void seccomp_sync_threads(unsigned long flags)
if (thread == caller)
continue;
+ /*
+ * Skip exited threads. seccomp_filter_release could have
+ * been already called for this task.
+ */
+ if (thread->flags & PF_EXITING)
+ continue;
+
/* Get a task reference for the new leaf node. */
get_seccomp_filter(caller);
@@ -1466,7 +1479,7 @@ static int recv_wake_function(wait_queue_entry_t *wait, unsigned int mode, int s
void *key)
{
/* Avoid a wakeup if event not interesting for us. */
- if (key && !(key_to_poll(key) & (EPOLLIN | EPOLLERR)))
+ if (key && !(key_to_poll(key) & (EPOLLIN | EPOLLERR | EPOLLHUP)))
return 0;
return autoremove_wake_function(wait, mode, sync, key);
}
@@ -1476,6 +1489,9 @@ static int recv_wait_event(struct seccomp_filter *filter)
DEFINE_WAIT_FUNC(wait, recv_wake_function);
int ret;
+ if (refcount_read(&filter->users) == 0)
+ return 0;
+
if (atomic_dec_if_positive(&filter->notif->requests) >= 0)
return 0;
@@ -1484,6 +1500,8 @@ static int recv_wait_event(struct seccomp_filter *filter)
if (atomic_dec_if_positive(&filter->notif->requests) >= 0)
break;
+ if (refcount_read(&filter->users) == 0)
+ break;
if (ret)
return ret;
diff --git a/kernel/signal.c b/kernel/signal.c
index 1f9dd41c04be..60c737e423a1 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -2600,6 +2600,14 @@ static void do_freezer_trap(void)
spin_unlock_irq(&current->sighand->siglock);
cgroup_enter_frozen();
schedule();
+
+ /*
+ * We could've been woken by task_work, run it to clear
+ * TIF_NOTIFY_SIGNAL. The caller will retry if necessary.
+ */
+ clear_notify_signal();
+ if (unlikely(task_work_pending(current)))
+ task_work_run();
}
static int ptrace_signal(int signr, kernel_siginfo_t *info, enum pid_type type)
diff --git a/kernel/smp.c b/kernel/smp.c
index f085ebcdf9e7..aaffecdad319 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -25,6 +25,7 @@
#include <linux/nmi.h>
#include <linux/sched/debug.h>
#include <linux/jump_label.h>
+#include <linux/string_choices.h>
#include <trace/events/ipi.h>
#define CREATE_TRACE_POINTS
@@ -982,8 +983,7 @@ void __init smp_init(void)
num_nodes = num_online_nodes();
num_cpus = num_online_cpus();
pr_info("Brought up %d node%s, %d CPU%s\n",
- num_nodes, (num_nodes > 1 ? "s" : ""),
- num_cpus, (num_cpus > 1 ? "s" : ""));
+ num_nodes, str_plural(num_nodes), num_cpus, str_plural(num_cpus));
/* Any cleanup work */
smp_cpus_done(setup_max_cpus);
@@ -1119,6 +1119,7 @@ int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys)
queue_work_on(cpu, system_wq, &sscs.work);
wait_for_completion(&sscs.done);
+ destroy_work_on_stack(&sscs.work);
return sscs.ret;
}
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
index d7eee421d4bc..2ef820a2d067 100644
--- a/kernel/sys_ni.c
+++ b/kernel/sys_ni.c
@@ -46,8 +46,8 @@ COND_SYSCALL(io_getevents_time32);
COND_SYSCALL(io_getevents);
COND_SYSCALL(io_pgetevents_time32);
COND_SYSCALL(io_pgetevents);
-COND_SYSCALL_COMPAT(io_pgetevents_time32);
COND_SYSCALL_COMPAT(io_pgetevents);
+COND_SYSCALL_COMPAT(io_pgetevents_time64);
COND_SYSCALL(io_uring_setup);
COND_SYSCALL(io_uring_enter);
COND_SYSCALL(io_uring_register);
@@ -76,8 +76,6 @@ COND_SYSCALL(timerfd_gettime32);
COND_SYSCALL(acct);
COND_SYSCALL(capget);
COND_SYSCALL(capset);
-/* __ARCH_WANT_SYS_CLONE3 */
-COND_SYSCALL(clone3);
COND_SYSCALL(futex);
COND_SYSCALL(futex_time32);
COND_SYSCALL(set_robust_list);
diff --git a/kernel/sysctl-test.c b/kernel/sysctl-test.c
index 6ef887c19c48..3ac98bb7fb82 100644
--- a/kernel/sysctl-test.c
+++ b/kernel/sysctl-test.c
@@ -367,6 +367,54 @@ static void sysctl_test_api_dointvec_write_single_greater_int_max(
KUNIT_EXPECT_EQ(test, 0, *((int *)table.data));
}
+/*
+ * Test that registering an invalid extra value is not allowed.
+ */
+static void sysctl_test_register_sysctl_sz_invalid_extra_value(
+ struct kunit *test)
+{
+ unsigned char data = 0;
+ struct ctl_table table_foo[] = {
+ {
+ .procname = "foo",
+ .data = &data,
+ .maxlen = sizeof(u8),
+ .mode = 0644,
+ .proc_handler = proc_dou8vec_minmax,
+ .extra1 = SYSCTL_FOUR,
+ .extra2 = SYSCTL_ONE_THOUSAND,
+ },
+ };
+
+ struct ctl_table table_bar[] = {
+ {
+ .procname = "bar",
+ .data = &data,
+ .maxlen = sizeof(u8),
+ .mode = 0644,
+ .proc_handler = proc_dou8vec_minmax,
+ .extra1 = SYSCTL_NEG_ONE,
+ .extra2 = SYSCTL_ONE_HUNDRED,
+ },
+ };
+
+ struct ctl_table table_qux[] = {
+ {
+ .procname = "qux",
+ .data = &data,
+ .maxlen = sizeof(u8),
+ .mode = 0644,
+ .proc_handler = proc_dou8vec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_TWO_HUNDRED,
+ },
+ };
+
+ KUNIT_EXPECT_NULL(test, register_sysctl("foo", table_foo));
+ KUNIT_EXPECT_NULL(test, register_sysctl("foo", table_bar));
+ KUNIT_EXPECT_NOT_NULL(test, register_sysctl("foo", table_qux));
+}
+
static struct kunit_case sysctl_test_cases[] = {
KUNIT_CASE(sysctl_test_api_dointvec_null_tbl_data),
KUNIT_CASE(sysctl_test_api_dointvec_table_maxlen_unset),
@@ -378,6 +426,7 @@ static struct kunit_case sysctl_test_cases[] = {
KUNIT_CASE(sysctl_test_dointvec_write_happy_single_negative),
KUNIT_CASE(sysctl_test_api_dointvec_write_single_less_int_min),
KUNIT_CASE(sysctl_test_api_dointvec_write_single_greater_int_max),
+ KUNIT_CASE(sysctl_test_register_sysctl_sz_invalid_extra_value),
{}
};
@@ -388,4 +437,5 @@ static struct kunit_suite sysctl_test_suite = {
kunit_test_suites(&sysctl_test_suite);
+MODULE_DESCRIPTION("KUnit test of proc sysctl");
MODULE_LICENSE("GPL v2");
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index e0b917328cf9..e4421594fc25 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -205,7 +205,7 @@ static int _proc_do_string(char *data, int maxlen, int write,
return 0;
}
-static void warn_sysctl_write(struct ctl_table *table)
+static void warn_sysctl_write(const struct ctl_table *table)
{
pr_warn_once("%s wrote to %s when file position was not 0!\n"
"This will not be supported in the future. To silence this\n"
@@ -223,7 +223,7 @@ static void warn_sysctl_write(struct ctl_table *table)
* handlers can ignore the return value.
*/
static bool proc_first_pos_non_zero_ignore(loff_t *ppos,
- struct ctl_table *table)
+ const struct ctl_table *table)
{
if (!*ppos)
return false;
@@ -468,7 +468,7 @@ static int do_proc_douintvec_conv(unsigned long *lvalp,
static const char proc_wspace_sep[] = { ' ', '\t', '\n' };
-static int __do_proc_dointvec(void *tbl_data, struct ctl_table *table,
+static int __do_proc_dointvec(void *tbl_data, const struct ctl_table *table,
int write, void *buffer,
size_t *lenp, loff_t *ppos,
int (*conv)(bool *negp, unsigned long *lvalp, int *valp,
@@ -541,7 +541,7 @@ out:
return err;
}
-static int do_proc_dointvec(struct ctl_table *table, int write,
+static int do_proc_dointvec(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos,
int (*conv)(bool *negp, unsigned long *lvalp, int *valp,
int write, void *data),
@@ -552,7 +552,7 @@ static int do_proc_dointvec(struct ctl_table *table, int write,
}
static int do_proc_douintvec_w(unsigned int *tbl_data,
- struct ctl_table *table,
+ const struct ctl_table *table,
void *buffer,
size_t *lenp, loff_t *ppos,
int (*conv)(unsigned long *lvalp,
@@ -639,7 +639,7 @@ out:
return err;
}
-static int __do_proc_douintvec(void *tbl_data, struct ctl_table *table,
+static int __do_proc_douintvec(void *tbl_data, const struct ctl_table *table,
int write, void *buffer,
size_t *lenp, loff_t *ppos,
int (*conv)(unsigned long *lvalp,
@@ -675,7 +675,7 @@ static int __do_proc_douintvec(void *tbl_data, struct ctl_table *table,
return do_proc_douintvec_r(i, buffer, lenp, ppos, conv, data);
}
-int do_proc_douintvec(struct ctl_table *table, int write,
+int do_proc_douintvec(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos,
int (*conv)(unsigned long *lvalp,
unsigned int *valp,
@@ -977,16 +977,10 @@ int proc_dou8vec_minmax(struct ctl_table *table, int write,
if (table->maxlen != sizeof(u8))
return -EINVAL;
- if (table->extra1) {
+ if (table->extra1)
min = *(unsigned int *) table->extra1;
- if (min > 255U)
- return -EINVAL;
- }
- if (table->extra2) {
+ if (table->extra2)
max = *(unsigned int *) table->extra2;
- if (max > 255U)
- return -EINVAL;
- }
tmp = *table;
@@ -1023,8 +1017,9 @@ static int sysrq_sysctl_handler(struct ctl_table *table, int write,
}
#endif
-static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table,
- int write, void *buffer, size_t *lenp, loff_t *ppos,
+static int __do_proc_doulongvec_minmax(void *data,
+ const struct ctl_table *table, int write,
+ void *buffer, size_t *lenp, loff_t *ppos,
unsigned long convmul, unsigned long convdiv)
{
unsigned long *i, *min, *max;
@@ -1096,7 +1091,7 @@ out:
return err;
}
-static int do_proc_doulongvec_minmax(struct ctl_table *table, int write,
+static int do_proc_doulongvec_minmax(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos, unsigned long convmul,
unsigned long convdiv)
{
diff --git a/kernel/task_work.c b/kernel/task_work.c
index 95a7e1b7f1da..5c2daa7ad3f9 100644
--- a/kernel/task_work.c
+++ b/kernel/task_work.c
@@ -1,10 +1,18 @@
// SPDX-License-Identifier: GPL-2.0
+#include <linux/irq_work.h>
#include <linux/spinlock.h>
#include <linux/task_work.h>
#include <linux/resume_user_mode.h>
static struct callback_head work_exited; /* all we need is ->next == NULL */
+static void task_work_set_notify_irq(struct irq_work *entry)
+{
+ test_and_set_tsk_thread_flag(current, TIF_NOTIFY_RESUME);
+}
+static DEFINE_PER_CPU(struct irq_work, irq_work_NMI_resume) =
+ IRQ_WORK_INIT_HARD(task_work_set_notify_irq);
+
/**
* task_work_add - ask the @task to execute @work->func()
* @task: the task which should run the callback
@@ -12,7 +20,7 @@ static struct callback_head work_exited; /* all we need is ->next == NULL */
* @notify: how to notify the targeted task
*
* Queue @work for task_work_run() below and notify the @task if @notify
- * is @TWA_RESUME, @TWA_SIGNAL, or @TWA_SIGNAL_NO_IPI.
+ * is @TWA_RESUME, @TWA_SIGNAL, @TWA_SIGNAL_NO_IPI or @TWA_NMI_CURRENT.
*
* @TWA_SIGNAL works like signals, in that the it will interrupt the targeted
* task and run the task_work, regardless of whether the task is currently
@@ -24,6 +32,8 @@ static struct callback_head work_exited; /* all we need is ->next == NULL */
* kernel anyway.
* @TWA_RESUME work is run only when the task exits the kernel and returns to
* user mode, or before entering guest mode.
+ * @TWA_NMI_CURRENT works like @TWA_RESUME, except it can only be used for the
+ * current @task and if the current context is NMI.
*
* Fails if the @task is exiting/exited and thus it can't process this @work.
* Otherwise @work->func() will be called when the @task goes through one of
@@ -44,8 +54,13 @@ int task_work_add(struct task_struct *task, struct callback_head *work,
{
struct callback_head *head;
- /* record the work call stack in order to print it in KASAN reports */
- kasan_record_aux_stack(work);
+ if (notify == TWA_NMI_CURRENT) {
+ if (WARN_ON_ONCE(task != current))
+ return -EINVAL;
+ } else {
+ /* record the work call stack in order to print it in KASAN reports */
+ kasan_record_aux_stack(work);
+ }
head = READ_ONCE(task->task_works);
do {
@@ -66,6 +81,9 @@ int task_work_add(struct task_struct *task, struct callback_head *work,
case TWA_SIGNAL_NO_IPI:
__set_notify_signal(task);
break;
+ case TWA_NMI_CURRENT:
+ irq_work_queue(this_cpu_ptr(&irq_work_NMI_resume));
+ break;
default:
WARN_ON_ONCE(1);
break;
@@ -120,9 +138,9 @@ static bool task_work_func_match(struct callback_head *cb, void *data)
}
/**
- * task_work_cancel - cancel a pending work added by task_work_add()
- * @task: the task which should execute the work
- * @func: identifies the work to remove
+ * task_work_cancel_func - cancel a pending work matching a function added by task_work_add()
+ * @task: the task which should execute the func's work
+ * @func: identifies the func to match with a work to remove
*
* Find the last queued pending work with ->func == @func and remove
* it from queue.
@@ -131,11 +149,35 @@ static bool task_work_func_match(struct callback_head *cb, void *data)
* The found work or NULL if not found.
*/
struct callback_head *
-task_work_cancel(struct task_struct *task, task_work_func_t func)
+task_work_cancel_func(struct task_struct *task, task_work_func_t func)
{
return task_work_cancel_match(task, task_work_func_match, func);
}
+static bool task_work_match(struct callback_head *cb, void *data)
+{
+ return cb == data;
+}
+
+/**
+ * task_work_cancel - cancel a pending work added by task_work_add()
+ * @task: the task which should execute the work
+ * @cb: the callback to remove if queued
+ *
+ * Remove a callback from a task's queue if queued.
+ *
+ * RETURNS:
+ * True if the callback was queued and got cancelled, false otherwise.
+ */
+bool task_work_cancel(struct task_struct *task, struct callback_head *cb)
+{
+ struct callback_head *ret;
+
+ ret = task_work_cancel_match(task, task_work_match, cb);
+
+ return ret == cb;
+}
+
/**
* task_work_run - execute the works added by task_work_add()
*
@@ -168,7 +210,7 @@ void task_work_run(void)
if (!work)
break;
/*
- * Synchronize with task_work_cancel(). It can not remove
+ * Synchronize with task_work_cancel_match(). It can not remove
* the first entry == work, cmpxchg(task_works) must fail.
* But it can remove another entry from the ->next list.
*/
diff --git a/kernel/time/clocksource-wdtest.c b/kernel/time/clocksource-wdtest.c
index d06185e054ea..62e73444ffe4 100644
--- a/kernel/time/clocksource-wdtest.c
+++ b/kernel/time/clocksource-wdtest.c
@@ -22,6 +22,7 @@
#include "tick-internal.h"
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Clocksource watchdog unit test");
MODULE_AUTHOR("Paul E. McKenney <paulmck@kernel.org>");
static int holdoff = IS_BUILTIN(CONFIG_TEST_CLOCKSOURCE_WATCHDOG) ? 10 : 0;
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index 492c14aac642..b8ee320208d4 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -1285,6 +1285,8 @@ void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
struct hrtimer_clock_base *base;
unsigned long flags;
+ if (WARN_ON_ONCE(!timer->function))
+ return;
/*
* Check whether the HRTIMER_MODE_SOFT bit and hrtimer.is_soft
* match on CONFIG_PREEMPT_RT = n. With PREEMPT_RT check the hard
diff --git a/kernel/time/test_udelay.c b/kernel/time/test_udelay.c
index 20d5df631570..783f2297111b 100644
--- a/kernel/time/test_udelay.c
+++ b/kernel/time/test_udelay.c
@@ -155,5 +155,6 @@ static void __exit udelay_test_exit(void)
module_exit(udelay_test_exit);
+MODULE_DESCRIPTION("udelay test module");
MODULE_AUTHOR("David Riley <davidriley@chromium.org>");
MODULE_LICENSE("GPL");
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index 771d1e040303..b4843099a8da 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -1141,6 +1141,7 @@ void tick_broadcast_switch_to_oneshot(void)
#ifdef CONFIG_HOTPLUG_CPU
void hotplug_cpu__broadcast_tick_pull(int deadcpu)
{
+ struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
struct clock_event_device *bc;
unsigned long flags;
@@ -1148,6 +1149,28 @@ void hotplug_cpu__broadcast_tick_pull(int deadcpu)
bc = tick_broadcast_device.evtdev;
if (bc && broadcast_needs_cpu(bc, deadcpu)) {
+ /*
+ * If the broadcast force bit of the current CPU is set,
+ * then the current CPU has not yet reprogrammed the local
+ * timer device to avoid a ping-pong race. See
+ * ___tick_broadcast_oneshot_control().
+ *
+ * If the broadcast device is hrtimer based then
+ * programming the broadcast event below does not have any
+ * effect because the local clockevent device is not
+ * running and not programmed because the broadcast event
+ * is not earlier than the pending event of the local clock
+ * event device. As a consequence all CPUs waiting for a
+ * broadcast event are stuck forever.
+ *
+ * Detect this condition and reprogram the cpu local timer
+ * device to avoid the starvation.
+ */
+ if (tick_check_broadcast_expired()) {
+ cpumask_clear_cpu(smp_processor_id(), tick_broadcast_force_mask);
+ tick_program_event(td->evtdev->next_event, 1);
+ }
+
/* This moves the broadcast assignment to this CPU: */
clockevents_program_event(bc, bc->next_event, 1);
}
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index d88b13076b79..a47bcf71defc 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -178,26 +178,6 @@ void tick_setup_periodic(struct clock_event_device *dev, int broadcast)
}
}
-#ifdef CONFIG_NO_HZ_FULL
-static void giveup_do_timer(void *info)
-{
- int cpu = *(unsigned int *)info;
-
- WARN_ON(tick_do_timer_cpu != smp_processor_id());
-
- tick_do_timer_cpu = cpu;
-}
-
-static void tick_take_do_timer_from_boot(void)
-{
- int cpu = smp_processor_id();
- int from = tick_do_timer_boot_cpu;
-
- if (from >= 0 && from != cpu)
- smp_call_function_single(from, giveup_do_timer, &cpu, 1);
-}
-#endif
-
/*
* Setup the tick device
*/
@@ -221,19 +201,25 @@ static void tick_setup_device(struct tick_device *td,
tick_next_period = ktime_get();
#ifdef CONFIG_NO_HZ_FULL
/*
- * The boot CPU may be nohz_full, in which case set
- * tick_do_timer_boot_cpu so the first housekeeping
- * secondary that comes up will take do_timer from
- * us.
+ * The boot CPU may be nohz_full, in which case the
+ * first housekeeping secondary will take do_timer()
+ * from it.
*/
if (tick_nohz_full_cpu(cpu))
tick_do_timer_boot_cpu = cpu;
- } else if (tick_do_timer_boot_cpu != -1 &&
- !tick_nohz_full_cpu(cpu)) {
- tick_take_do_timer_from_boot();
+ } else if (tick_do_timer_boot_cpu != -1 && !tick_nohz_full_cpu(cpu)) {
tick_do_timer_boot_cpu = -1;
- WARN_ON(READ_ONCE(tick_do_timer_cpu) != cpu);
+ /*
+ * The boot CPU will stay in periodic (NOHZ disabled)
+ * mode until clocksource_done_booting() called after
+ * smp_init() selects a high resolution clocksource and
+ * timekeeping_notify() kicks the NOHZ stuff alive.
+ *
+ * So this WRITE_ONCE can only race with the READ_ONCE
+ * check in tick_periodic() but this race is harmless.
+ */
+ WRITE_ONCE(tick_do_timer_cpu, cpu);
#endif
}
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 71a792cd8936..753a184c7090 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -1026,10 +1026,10 @@ static void tick_nohz_stop_tick(struct tick_sched *ts, int cpu)
if (expires == KTIME_MAX || ts->next_tick == hrtimer_get_expires(&ts->sched_timer))
return;
- WARN_ON_ONCE(1);
- printk_once("basemono: %llu ts->next_tick: %llu dev->next_event: %llu timer->active: %d timer->expires: %llu\n",
- basemono, ts->next_tick, dev->next_event,
- hrtimer_active(&ts->sched_timer), hrtimer_get_expires(&ts->sched_timer));
+ WARN_ONCE(1, "basemono: %llu ts->next_tick: %llu dev->next_event: %llu "
+ "timer->active: %d timer->expires: %llu\n", basemono, ts->next_tick,
+ dev->next_event, hrtimer_active(&ts->sched_timer),
+ hrtimer_get_expires(&ts->sched_timer));
}
/*
@@ -1385,20 +1385,6 @@ unsigned long tick_nohz_get_idle_calls_cpu(int cpu)
return ts->idle_calls;
}
-/**
- * tick_nohz_get_idle_calls - return the current idle calls counter value
- *
- * Called from the schedutil frequency scaling governor in scheduler context.
- *
- * Return: the current idle calls counter value for the current CPU
- */
-unsigned long tick_nohz_get_idle_calls(void)
-{
- struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
-
- return ts->idle_calls;
-}
-
static void tick_nohz_account_idle_time(struct tick_sched *ts,
ktime_t now)
{
diff --git a/kernel/time/time_test.c b/kernel/time/time_test.c
index 3e5d422dd15c..2889763165e5 100644
--- a/kernel/time/time_test.c
+++ b/kernel/time/time_test.c
@@ -96,4 +96,5 @@ static struct kunit_suite time_test_suite = {
};
kunit_test_suite(time_test_suite);
+MODULE_DESCRIPTION("time unit test suite");
MODULE_LICENSE("GPL");
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 4e18db1819f8..2fa87dcfeda9 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -1195,6 +1195,108 @@ static bool timestamp_in_interval(u64 start, u64 end, u64 ts)
return false;
}
+static bool convert_clock(u64 *val, u32 numerator, u32 denominator)
+{
+ u64 rem, res;
+
+ if (!numerator || !denominator)
+ return false;
+
+ res = div64_u64_rem(*val, denominator, &rem) * numerator;
+ *val = res + div_u64(rem * numerator, denominator);
+ return true;
+}
+
+static bool convert_base_to_cs(struct system_counterval_t *scv)
+{
+ struct clocksource *cs = tk_core.timekeeper.tkr_mono.clock;
+ struct clocksource_base *base;
+ u32 num, den;
+
+ /* The timestamp was taken from the time keeper clock source */
+ if (cs->id == scv->cs_id)
+ return true;
+
+ /*
+ * Check whether cs_id matches the base clock. Prevent the compiler from
+ * re-evaluating @base as the clocksource might change concurrently.
+ */
+ base = READ_ONCE(cs->base);
+ if (!base || base->id != scv->cs_id)
+ return false;
+
+ num = scv->use_nsecs ? cs->freq_khz : base->numerator;
+ den = scv->use_nsecs ? USEC_PER_SEC : base->denominator;
+
+ if (!convert_clock(&scv->cycles, num, den))
+ return false;
+
+ scv->cycles += base->offset;
+ return true;
+}
+
+static bool convert_cs_to_base(u64 *cycles, enum clocksource_ids base_id)
+{
+ struct clocksource *cs = tk_core.timekeeper.tkr_mono.clock;
+ struct clocksource_base *base;
+
+ /*
+ * Check whether base_id matches the base clock. Prevent the compiler from
+ * re-evaluating @base as the clocksource might change concurrently.
+ */
+ base = READ_ONCE(cs->base);
+ if (!base || base->id != base_id)
+ return false;
+
+ *cycles -= base->offset;
+ if (!convert_clock(cycles, base->denominator, base->numerator))
+ return false;
+ return true;
+}
+
+static bool convert_ns_to_cs(u64 *delta)
+{
+ struct tk_read_base *tkr = &tk_core.timekeeper.tkr_mono;
+
+ if (BITS_TO_BYTES(fls64(*delta) + tkr->shift) >= sizeof(*delta))
+ return false;
+
+ *delta = div_u64((*delta << tkr->shift) - tkr->xtime_nsec, tkr->mult);
+ return true;
+}
+
+/**
+ * ktime_real_to_base_clock() - Convert CLOCK_REALTIME timestamp to a base clock timestamp
+ * @treal: CLOCK_REALTIME timestamp to convert
+ * @base_id: base clocksource id
+ * @cycles: pointer to store the converted base clock timestamp
+ *
+ * Converts a supplied, future realtime clock value to the corresponding base clock value.
+ *
+ * Return: true if the conversion is successful, false otherwise.
+ */
+bool ktime_real_to_base_clock(ktime_t treal, enum clocksource_ids base_id, u64 *cycles)
+{
+ struct timekeeper *tk = &tk_core.timekeeper;
+ unsigned int seq;
+ u64 delta;
+
+ do {
+ seq = read_seqcount_begin(&tk_core.seq);
+ if ((u64)treal < tk->tkr_mono.base_real)
+ return false;
+ delta = (u64)treal - tk->tkr_mono.base_real;
+ if (!convert_ns_to_cs(&delta))
+ return false;
+ *cycles = tk->tkr_mono.cycle_last + delta;
+ if (!convert_cs_to_base(cycles, base_id))
+ return false;
+ } while (read_seqcount_retry(&tk_core.seq, seq));
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(ktime_real_to_base_clock);
+
/**
* get_device_system_crosststamp - Synchronously capture system/device timestamp
* @get_time_fn: Callback to get simultaneous device time and
@@ -1241,7 +1343,7 @@ int get_device_system_crosststamp(int (*get_time_fn)
* installed timekeeper clocksource
*/
if (system_counterval.cs_id == CSID_GENERIC ||
- tk->tkr_mono.clock->id != system_counterval.cs_id)
+ !convert_base_to_cs(&system_counterval))
return -ENODEV;
cycles = system_counterval.cycles;
@@ -1307,6 +1409,30 @@ int get_device_system_crosststamp(int (*get_time_fn)
EXPORT_SYMBOL_GPL(get_device_system_crosststamp);
/**
+ * timekeeping_clocksource_has_base - Check whether the current clocksource
+ * is based on given a base clock
+ * @id: base clocksource ID
+ *
+ * Note: The return value is a snapshot which can become invalid right
+ * after the function returns.
+ *
+ * Return: true if the timekeeper clocksource has a base clock with @id,
+ * false otherwise
+ */
+bool timekeeping_clocksource_has_base(enum clocksource_ids id)
+{
+ /*
+ * This is a snapshot, so no point in using the sequence
+ * count. Just prevent the compiler from re-evaluating @base as the
+ * clocksource might change concurrently.
+ */
+ struct clocksource_base *base = READ_ONCE(tk_core.timekeeper.tkr_mono.clock->base);
+
+ return base ? base->id == id : false;
+}
+EXPORT_SYMBOL_GPL(timekeeping_clocksource_has_base);
+
+/**
* do_settimeofday64 - Sets the time of day.
* @ts: pointer to the timespec64 variable containing the new time
*
@@ -2421,6 +2547,7 @@ EXPORT_SYMBOL_GPL(random_get_entropy_fallback);
/**
* do_adjtimex() - Accessor function to NTP __do_adjtimex function
+ * @txc: Pointer to kernel_timex structure containing NTP parameters
*/
int do_adjtimex(struct __kernel_timex *txc)
{
@@ -2489,6 +2616,8 @@ int do_adjtimex(struct __kernel_timex *txc)
#ifdef CONFIG_NTP_PPS
/**
* hardpps() - Accessor function to NTP __hardpps function
+ * @phase_ts: Pointer to timespec64 structure representing phase timestamp
+ * @raw_ts: Pointer to timespec64 structure representing raw timestamp
*/
void hardpps(const struct timespec64 *phase_ts, const struct timespec64 *raw_ts)
{
diff --git a/kernel/torture.c b/kernel/torture.c
index c72ab2d251f4..dede150aef01 100644
--- a/kernel/torture.c
+++ b/kernel/torture.c
@@ -40,6 +40,7 @@
#include <linux/sched/rt.h>
#include "rcu/rcu.h"
+MODULE_DESCRIPTION("Common functions for in-kernel torture tests");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com>");
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 166ad5444eea..721c3b221048 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -1136,7 +1136,7 @@ config PREEMPTIRQ_DELAY_TEST
config SYNTH_EVENT_GEN_TEST
tristate "Test module for in-kernel synthetic event generation"
- depends on SYNTH_EVENTS
+ depends on SYNTH_EVENTS && m
help
This option creates a test module to check the base
functionality of in-kernel synthetic event definition and
@@ -1149,7 +1149,7 @@ config SYNTH_EVENT_GEN_TEST
config KPROBE_EVENT_GEN_TEST
tristate "Test module for in-kernel kprobe event generation"
- depends on KPROBE_EVENTS
+ depends on KPROBE_EVENTS && m
help
This option creates a test module to check the base
functionality of in-kernel kprobe event definition.
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index f5154c051d2c..cd098846e251 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -1369,8 +1369,8 @@ __bpf_kfunc void bpf_key_put(struct bpf_key *bkey)
#ifdef CONFIG_SYSTEM_DATA_VERIFICATION
/**
* bpf_verify_pkcs7_signature - verify a PKCS#7 signature
- * @data_ptr: data to verify
- * @sig_ptr: signature of the data
+ * @data_p: data to verify
+ * @sig_p: signature of the data
* @trusted_keyring: keyring with keys trusted for signature verification
*
* Verify the PKCS#7 signature *sig_ptr* against the supplied *data_ptr*
@@ -1378,10 +1378,12 @@ __bpf_kfunc void bpf_key_put(struct bpf_key *bkey)
*
* Return: 0 on success, a negative value on error.
*/
-__bpf_kfunc int bpf_verify_pkcs7_signature(struct bpf_dynptr_kern *data_ptr,
- struct bpf_dynptr_kern *sig_ptr,
+__bpf_kfunc int bpf_verify_pkcs7_signature(struct bpf_dynptr *data_p,
+ struct bpf_dynptr *sig_p,
struct bpf_key *trusted_keyring)
{
+ struct bpf_dynptr_kern *data_ptr = (struct bpf_dynptr_kern *)data_p;
+ struct bpf_dynptr_kern *sig_ptr = (struct bpf_dynptr_kern *)sig_p;
const void *data, *sig;
u32 data_len, sig_len;
int ret;
@@ -1444,7 +1446,7 @@ __bpf_kfunc_start_defs();
* bpf_get_file_xattr - get xattr of a file
* @file: file to get xattr from
* @name__str: name of the xattr
- * @value_ptr: output buffer of the xattr value
+ * @value_p: output buffer of the xattr value
*
* Get xattr *name__str* of *file* and store the output in *value_ptr*.
*
@@ -1453,8 +1455,9 @@ __bpf_kfunc_start_defs();
* Return: 0 on success, a negative value on error.
*/
__bpf_kfunc int bpf_get_file_xattr(struct file *file, const char *name__str,
- struct bpf_dynptr_kern *value_ptr)
+ struct bpf_dynptr *value_p)
{
+ struct bpf_dynptr_kern *value_ptr = (struct bpf_dynptr_kern *)value_p;
struct dentry *dentry;
u32 value_len;
void *value;
@@ -3295,7 +3298,7 @@ static int uprobe_prog_run(struct bpf_uprobe *uprobe,
struct bpf_run_ctx *old_run_ctx;
int err = 0;
- if (link->task && current != link->task)
+ if (link->task && current->mm != link->task->mm)
return 0;
if (sleepable)
@@ -3396,8 +3399,9 @@ int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr
upath = u64_to_user_ptr(attr->link_create.uprobe_multi.path);
uoffsets = u64_to_user_ptr(attr->link_create.uprobe_multi.offsets);
cnt = attr->link_create.uprobe_multi.cnt;
+ pid = attr->link_create.uprobe_multi.pid;
- if (!upath || !uoffsets || !cnt)
+ if (!upath || !uoffsets || !cnt || pid < 0)
return -EINVAL;
if (cnt > MAX_UPROBE_MULTI_CNT)
return -E2BIG;
@@ -3421,11 +3425,8 @@ int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr
goto error_path_put;
}
- pid = attr->link_create.uprobe_multi.pid;
if (pid) {
- rcu_read_lock();
- task = get_pid_task(find_vpid(pid), PIDTYPE_PID);
- rcu_read_unlock();
+ task = get_pid_task(find_vpid(pid), PIDTYPE_TGID);
if (!task) {
err = -ESRCH;
goto error_path_put;
@@ -3519,7 +3520,6 @@ static u64 bpf_uprobe_multi_entry_ip(struct bpf_run_ctx *ctx)
}
#endif /* CONFIG_UPROBES */
-#ifdef CONFIG_FPROBE
__bpf_kfunc_start_defs();
__bpf_kfunc bool bpf_session_is_return(void)
@@ -3568,4 +3568,3 @@ static int __init bpf_kprobe_multi_kfuncs_init(void)
}
late_initcall(bpf_kprobe_multi_kfuncs_init);
-#endif
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 65208d3b5ed9..eacab4020508 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -6969,7 +6969,7 @@ allocate_ftrace_mod_map(struct module *mod,
return mod_map;
}
-static const char *
+static int
ftrace_func_address_lookup(struct ftrace_mod_map *mod_map,
unsigned long addr, unsigned long *size,
unsigned long *off, char *sym)
@@ -6990,21 +6990,18 @@ ftrace_func_address_lookup(struct ftrace_mod_map *mod_map,
*size = found_func->size;
if (off)
*off = addr - found_func->ip;
- if (sym)
- strscpy(sym, found_func->name, KSYM_NAME_LEN);
-
- return found_func->name;
+ return strscpy(sym, found_func->name, KSYM_NAME_LEN);
}
- return NULL;
+ return 0;
}
-const char *
+int
ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
unsigned long *off, char **modname, char *sym)
{
struct ftrace_mod_map *mod_map;
- const char *ret = NULL;
+ int ret = 0;
/* mod_map is freed via call_rcu() */
preempt_disable();
diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c
index 5e263c141574..39877c80d6cb 100644
--- a/kernel/trace/trace_probe.c
+++ b/kernel/trace/trace_probe.c
@@ -554,6 +554,10 @@ static int parse_btf_field(char *fieldname, const struct btf_type *type,
anon_offs = 0;
field = btf_find_struct_member(ctx->btf, type, fieldname,
&anon_offs);
+ if (IS_ERR(field)) {
+ trace_probe_log_err(ctx->offset, BAD_BTF_TID);
+ return PTR_ERR(field);
+ }
if (!field) {
trace_probe_log_err(ctx->offset, NO_BTF_FIELD);
return -ENOENT;
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index 8541fa1494ae..c98e3b3386ba 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -970,19 +970,17 @@ static struct uprobe_cpu_buffer *prepare_uprobe_buffer(struct trace_uprobe *tu,
static void __uprobe_trace_func(struct trace_uprobe *tu,
unsigned long func, struct pt_regs *regs,
- struct uprobe_cpu_buffer **ucbp,
+ struct uprobe_cpu_buffer *ucb,
struct trace_event_file *trace_file)
{
struct uprobe_trace_entry_head *entry;
struct trace_event_buffer fbuffer;
- struct uprobe_cpu_buffer *ucb;
void *data;
int size, esize;
struct trace_event_call *call = trace_probe_event_call(&tu->tp);
WARN_ON(call != trace_file->event_call);
- ucb = prepare_uprobe_buffer(tu, regs, ucbp);
if (WARN_ON_ONCE(ucb->dsize > PAGE_SIZE))
return;
@@ -1014,13 +1012,16 @@ static int uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs,
struct uprobe_cpu_buffer **ucbp)
{
struct event_file_link *link;
+ struct uprobe_cpu_buffer *ucb;
if (is_ret_probe(tu))
return 0;
+ ucb = prepare_uprobe_buffer(tu, regs, ucbp);
+
rcu_read_lock();
trace_probe_for_each_link_rcu(link, &tu->tp)
- __uprobe_trace_func(tu, 0, regs, ucbp, link->file);
+ __uprobe_trace_func(tu, 0, regs, ucb, link->file);
rcu_read_unlock();
return 0;
@@ -1031,10 +1032,13 @@ static void uretprobe_trace_func(struct trace_uprobe *tu, unsigned long func,
struct uprobe_cpu_buffer **ucbp)
{
struct event_file_link *link;
+ struct uprobe_cpu_buffer *ucb;
+
+ ucb = prepare_uprobe_buffer(tu, regs, ucbp);
rcu_read_lock();
trace_probe_for_each_link_rcu(link, &tu->tp)
- __uprobe_trace_func(tu, func, regs, ucbp, link->file);
+ __uprobe_trace_func(tu, func, regs, ucb, link->file);
rcu_read_unlock();
}
diff --git a/kernel/utsname_sysctl.c b/kernel/utsname_sysctl.c
index 76a772072557..04e4513f2985 100644
--- a/kernel/utsname_sysctl.c
+++ b/kernel/utsname_sysctl.c
@@ -15,7 +15,7 @@
#ifdef CONFIG_PROC_SYSCTL
-static void *get_uts(struct ctl_table *table)
+static void *get_uts(const struct ctl_table *table)
{
char *which = table->data;
struct uts_namespace *uts_ns;
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 003474c9a77d..1745ca788ede 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -125,6 +125,7 @@ enum wq_internal_consts {
HIGHPRI_NICE_LEVEL = MIN_NICE,
WQ_NAME_LEN = 32,
+ WORKER_ID_LEN = 10 + WQ_NAME_LEN, /* "kworker/R-" + WQ_NAME_LEN */
};
/*
@@ -215,8 +216,6 @@ struct worker_pool {
struct worker *manager; /* L: purely informational */
struct list_head workers; /* A: attached workers */
- struct list_head dying_workers; /* A: workers about to die */
- struct completion *detach_completion; /* all workers detached */
struct ida worker_ida; /* worker IDs for task name */
@@ -435,7 +434,7 @@ static struct wq_pod_type wq_pod_types[WQ_AFFN_NR_TYPES];
static enum wq_affn_scope wq_affn_dfl = WQ_AFFN_CACHE;
/* buf for wq_update_unbound_pod_attrs(), protected by CPU hotplug exclusion */
-static struct workqueue_attrs *wq_update_pod_attrs_buf;
+static struct workqueue_attrs *unbound_wq_update_pwq_attrs_buf;
static DEFINE_MUTEX(wq_pool_mutex); /* protects pools and workqueues list */
static DEFINE_MUTEX(wq_pool_attach_mutex); /* protects worker attach/detach */
@@ -446,6 +445,9 @@ static struct rcuwait manager_wait = __RCUWAIT_INITIALIZER(manager_wait);
static LIST_HEAD(workqueues); /* PR: list of all workqueues */
static bool workqueue_freezing; /* PL: have wqs started freezing? */
+/* PL: mirror the cpu_online_mask excluding the CPU in the midst of hotplugging */
+static cpumask_var_t wq_online_cpumask;
+
/* PL&A: allowable cpus for unbound wqs and work items */
static cpumask_var_t wq_unbound_cpumask;
@@ -1683,33 +1685,6 @@ static void __pwq_activate_work(struct pool_workqueue *pwq,
__clear_bit(WORK_STRUCT_INACTIVE_BIT, wdb);
}
-/**
- * pwq_activate_work - Activate a work item if inactive
- * @pwq: pool_workqueue @work belongs to
- * @work: work item to activate
- *
- * Returns %true if activated. %false if already active.
- */
-static bool pwq_activate_work(struct pool_workqueue *pwq,
- struct work_struct *work)
-{
- struct worker_pool *pool = pwq->pool;
- struct wq_node_nr_active *nna;
-
- lockdep_assert_held(&pool->lock);
-
- if (!(*work_data_bits(work) & WORK_STRUCT_INACTIVE))
- return false;
-
- nna = wq_node_nr_active(pwq->wq, pool->node);
- if (nna)
- atomic_inc(&nna->nr);
-
- pwq->nr_active++;
- __pwq_activate_work(pwq, work);
- return true;
-}
-
static bool tryinc_node_nr_active(struct wq_node_nr_active *nna)
{
int max = READ_ONCE(nna->max);
@@ -2116,7 +2091,7 @@ static int try_to_grab_pending(struct work_struct *work, u32 cflags,
*/
pwq = get_work_pwq(work);
if (pwq && pwq->pool == pool) {
- unsigned long work_data;
+ unsigned long work_data = *work_data_bits(work);
debug_work_deactivate(work);
@@ -2125,13 +2100,17 @@ static int try_to_grab_pending(struct work_struct *work, u32 cflags,
* pwq->inactive_works since a queued barrier can't be
* canceled (see the comments in insert_wq_barrier()).
*
- * An inactive work item cannot be grabbed directly because
+ * An inactive work item cannot be deleted directly because
* it might have linked barrier work items which, if left
* on the inactive_works list, will confuse pwq->nr_active
- * management later on and cause stall. Make sure the work
- * item is activated before grabbing.
+ * management later on and cause stall. Move the linked
+ * barrier work items to the worklist when deleting the grabbed
+ * item. Also keep WORK_STRUCT_INACTIVE in work_data, so that
+ * it doesn't participate in nr_active management in later
+ * pwq_dec_nr_in_flight().
*/
- pwq_activate_work(pwq, work);
+ if (work_data & WORK_STRUCT_INACTIVE)
+ move_linked_works(work, &pwq->pool->worklist, NULL);
list_del_init(&work->entry);
@@ -2139,7 +2118,6 @@ static int try_to_grab_pending(struct work_struct *work, u32 cflags,
* work->data points to pwq iff queued. Let's point to pool. As
* this destroys work->data needed by the next step, stash it.
*/
- work_data = *work_data_bits(work);
set_work_pool_and_keep_pending(work, pool->id,
pool_offq_flags(pool));
@@ -2297,9 +2275,13 @@ retry:
* If @work was previously on a different pool, it might still be
* running there, in which case the work needs to be queued on that
* pool to guarantee non-reentrancy.
+ *
+ * For ordered workqueue, work items must be queued on the newest pwq
+ * for accurate order management. Guaranteed order also guarantees
+ * non-reentrancy. See the comments above unplug_oldest_pwq().
*/
last_pool = get_work_pool(work);
- if (last_pool && last_pool != pool) {
+ if (last_pool && last_pool != pool && !(wq->flags & __WQ_ORDERED)) {
struct worker *worker;
raw_spin_lock(&last_pool->lock);
@@ -2709,6 +2691,27 @@ static void worker_attach_to_pool(struct worker *worker,
mutex_unlock(&wq_pool_attach_mutex);
}
+static void unbind_worker(struct worker *worker)
+{
+ lockdep_assert_held(&wq_pool_attach_mutex);
+
+ kthread_set_per_cpu(worker->task, -1);
+ if (cpumask_intersects(wq_unbound_cpumask, cpu_active_mask))
+ WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, wq_unbound_cpumask) < 0);
+ else
+ WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, cpu_possible_mask) < 0);
+}
+
+
+static void detach_worker(struct worker *worker)
+{
+ lockdep_assert_held(&wq_pool_attach_mutex);
+
+ unbind_worker(worker);
+ list_del(&worker->node);
+ worker->pool = NULL;
+}
+
/**
* worker_detach_from_pool() - detach a worker from its pool
* @worker: worker which is attached to its pool
@@ -2720,26 +2723,36 @@ static void worker_attach_to_pool(struct worker *worker,
static void worker_detach_from_pool(struct worker *worker)
{
struct worker_pool *pool = worker->pool;
- struct completion *detach_completion = NULL;
/* there is one permanent BH worker per CPU which should never detach */
WARN_ON_ONCE(pool->flags & POOL_BH);
mutex_lock(&wq_pool_attach_mutex);
-
- kthread_set_per_cpu(worker->task, -1);
- list_del(&worker->node);
- worker->pool = NULL;
-
- if (list_empty(&pool->workers) && list_empty(&pool->dying_workers))
- detach_completion = pool->detach_completion;
+ detach_worker(worker);
mutex_unlock(&wq_pool_attach_mutex);
/* clear leftover flags without pool->lock after it is detached */
worker->flags &= ~(WORKER_UNBOUND | WORKER_REBOUND);
+}
- if (detach_completion)
- complete(detach_completion);
+static int format_worker_id(char *buf, size_t size, struct worker *worker,
+ struct worker_pool *pool)
+{
+ if (worker->rescue_wq)
+ return scnprintf(buf, size, "kworker/R-%s",
+ worker->rescue_wq->name);
+
+ if (pool) {
+ if (pool->cpu >= 0)
+ return scnprintf(buf, size, "kworker/%d:%d%s",
+ pool->cpu, worker->id,
+ pool->attrs->nice < 0 ? "H" : "");
+ else
+ return scnprintf(buf, size, "kworker/u%d:%d",
+ pool->id, worker->id);
+ } else {
+ return scnprintf(buf, size, "kworker/dying");
+ }
}
/**
@@ -2758,7 +2771,6 @@ static struct worker *create_worker(struct worker_pool *pool)
{
struct worker *worker;
int id;
- char id_buf[23];
/* ID is needed to determine kthread name */
id = ida_alloc(&pool->worker_ida, GFP_KERNEL);
@@ -2777,17 +2789,14 @@ static struct worker *create_worker(struct worker_pool *pool)
worker->id = id;
if (!(pool->flags & POOL_BH)) {
- if (pool->cpu >= 0)
- snprintf(id_buf, sizeof(id_buf), "%d:%d%s", pool->cpu, id,
- pool->attrs->nice < 0 ? "H" : "");
- else
- snprintf(id_buf, sizeof(id_buf), "u%d:%d", pool->id, id);
+ char id_buf[WORKER_ID_LEN];
+ format_worker_id(id_buf, sizeof(id_buf), worker, pool);
worker->task = kthread_create_on_node(worker_thread, worker,
- pool->node, "kworker/%s", id_buf);
+ pool->node, "%s", id_buf);
if (IS_ERR(worker->task)) {
if (PTR_ERR(worker->task) == -EINTR) {
- pr_err("workqueue: Interrupted when creating a worker thread \"kworker/%s\"\n",
+ pr_err("workqueue: Interrupted when creating a worker thread \"%s\"\n",
id_buf);
} else {
pr_err_once("workqueue: Failed to create a worker thread: %pe",
@@ -2827,35 +2836,22 @@ fail:
return NULL;
}
-static void unbind_worker(struct worker *worker)
+static void detach_dying_workers(struct list_head *cull_list)
{
- lockdep_assert_held(&wq_pool_attach_mutex);
+ struct worker *worker;
- kthread_set_per_cpu(worker->task, -1);
- if (cpumask_intersects(wq_unbound_cpumask, cpu_active_mask))
- WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, wq_unbound_cpumask) < 0);
- else
- WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, cpu_possible_mask) < 0);
+ list_for_each_entry(worker, cull_list, entry)
+ detach_worker(worker);
}
-static void wake_dying_workers(struct list_head *cull_list)
+static void reap_dying_workers(struct list_head *cull_list)
{
struct worker *worker, *tmp;
list_for_each_entry_safe(worker, tmp, cull_list, entry) {
list_del_init(&worker->entry);
- unbind_worker(worker);
- /*
- * If the worker was somehow already running, then it had to be
- * in pool->idle_list when set_worker_dying() happened or we
- * wouldn't have gotten here.
- *
- * Thus, the worker must either have observed the WORKER_DIE
- * flag, or have set its state to TASK_IDLE. Either way, the
- * below will be observed by the worker and is safe to do
- * outside of pool->lock.
- */
- wake_up_process(worker->task);
+ kthread_stop_put(worker->task);
+ kfree(worker);
}
}
@@ -2889,7 +2885,9 @@ static void set_worker_dying(struct worker *worker, struct list_head *list)
worker->flags |= WORKER_DIE;
list_move(&worker->entry, list);
- list_move(&worker->node, &pool->dying_workers);
+
+ /* get an extra task struct reference for later kthread_stop_put() */
+ get_task_struct(worker->task);
}
/**
@@ -2948,9 +2946,9 @@ static void idle_cull_fn(struct work_struct *work)
/*
* Grabbing wq_pool_attach_mutex here ensures an already-running worker
- * cannot proceed beyong worker_detach_from_pool() in its self-destruct
- * path. This is required as a previously-preempted worker could run after
- * set_worker_dying() has happened but before wake_dying_workers() did.
+ * cannot proceed beyong set_pf_worker() in its self-destruct path.
+ * This is required as a previously-preempted worker could run after
+ * set_worker_dying() has happened but before detach_dying_workers() did.
*/
mutex_lock(&wq_pool_attach_mutex);
raw_spin_lock_irq(&pool->lock);
@@ -2971,8 +2969,10 @@ static void idle_cull_fn(struct work_struct *work)
}
raw_spin_unlock_irq(&pool->lock);
- wake_dying_workers(&cull_list);
+ detach_dying_workers(&cull_list);
mutex_unlock(&wq_pool_attach_mutex);
+
+ reap_dying_workers(&cull_list);
}
static void send_mayday(struct work_struct *work)
@@ -3350,11 +3350,8 @@ woke_up:
raw_spin_unlock_irq(&pool->lock);
set_pf_worker(false);
- set_task_comm(worker->task, "kworker/dying");
ida_free(&pool->worker_ida, worker->id);
- worker_detach_from_pool(worker);
WARN_ON_ONCE(!list_empty(&worker->entry));
- kfree(worker);
return 0;
}
@@ -4745,7 +4742,6 @@ static int init_worker_pool(struct worker_pool *pool)
timer_setup(&pool->mayday_timer, pool_mayday_timeout, 0);
INIT_LIST_HEAD(&pool->workers);
- INIT_LIST_HEAD(&pool->dying_workers);
ida_init(&pool->worker_ida);
INIT_HLIST_NODE(&pool->hash_node);
@@ -4887,7 +4883,6 @@ static void rcu_free_pool(struct rcu_head *rcu)
*/
static void put_unbound_pool(struct worker_pool *pool)
{
- DECLARE_COMPLETION_ONSTACK(detach_completion);
struct worker *worker;
LIST_HEAD(cull_list);
@@ -4939,14 +4934,11 @@ static void put_unbound_pool(struct worker_pool *pool)
WARN_ON(pool->nr_workers || pool->nr_idle);
raw_spin_unlock_irq(&pool->lock);
- wake_dying_workers(&cull_list);
+ detach_dying_workers(&cull_list);
- if (!list_empty(&pool->workers) || !list_empty(&pool->dying_workers))
- pool->detach_completion = &detach_completion;
mutex_unlock(&wq_pool_attach_mutex);
- if (pool->detach_completion)
- wait_for_completion(pool->detach_completion);
+ reap_dying_workers(&cull_list);
/* shut down the timers */
del_timer_sync(&pool->idle_timer);
@@ -5022,12 +5014,6 @@ fail:
return NULL;
}
-static void rcu_free_pwq(struct rcu_head *rcu)
-{
- kmem_cache_free(pwq_cache,
- container_of(rcu, struct pool_workqueue, rcu));
-}
-
/*
* Scheduled on pwq_release_worker by put_pwq() when an unbound pwq hits zero
* refcnt and needs to be destroyed.
@@ -5073,7 +5059,7 @@ static void pwq_release_workfn(struct kthread_work *work)
raw_spin_unlock_irq(&nna->lock);
}
- call_rcu(&pwq->rcu, rcu_free_pwq);
+ kfree_rcu(pwq, rcu);
/*
* If we're the last pwq going away, @wq is already dead and no one
@@ -5145,14 +5131,22 @@ static struct pool_workqueue *alloc_unbound_pwq(struct workqueue_struct *wq,
return pwq;
}
+static void apply_wqattrs_lock(void)
+{
+ mutex_lock(&wq_pool_mutex);
+}
+
+static void apply_wqattrs_unlock(void)
+{
+ mutex_unlock(&wq_pool_mutex);
+}
+
/**
* wq_calc_pod_cpumask - calculate a wq_attrs' cpumask for a pod
* @attrs: the wq_attrs of the default pwq of the target workqueue
* @cpu: the target CPU
- * @cpu_going_down: if >= 0, the CPU to consider as offline
*
- * Calculate the cpumask a workqueue with @attrs should use on @pod. If
- * @cpu_going_down is >= 0, that cpu is considered offline during calculation.
+ * Calculate the cpumask a workqueue with @attrs should use on @pod.
* The result is stored in @attrs->__pod_cpumask.
*
* If pod affinity is not enabled, @attrs->cpumask is always used. If enabled
@@ -5161,29 +5155,18 @@ static struct pool_workqueue *alloc_unbound_pwq(struct workqueue_struct *wq,
*
* The caller is responsible for ensuring that the cpumask of @pod stays stable.
*/
-static void wq_calc_pod_cpumask(struct workqueue_attrs *attrs, int cpu,
- int cpu_going_down)
+static void wq_calc_pod_cpumask(struct workqueue_attrs *attrs, int cpu)
{
const struct wq_pod_type *pt = wqattrs_pod_type(attrs);
int pod = pt->cpu_pod[cpu];
- /* does @pod have any online CPUs @attrs wants? */
+ /* calculate possible CPUs in @pod that @attrs wants */
cpumask_and(attrs->__pod_cpumask, pt->pod_cpus[pod], attrs->cpumask);
- cpumask_and(attrs->__pod_cpumask, attrs->__pod_cpumask, cpu_online_mask);
- if (cpu_going_down >= 0)
- cpumask_clear_cpu(cpu_going_down, attrs->__pod_cpumask);
-
- if (cpumask_empty(attrs->__pod_cpumask)) {
+ /* does @pod have any online CPUs @attrs wants? */
+ if (!cpumask_intersects(attrs->__pod_cpumask, wq_online_cpumask)) {
cpumask_copy(attrs->__pod_cpumask, attrs->cpumask);
return;
}
-
- /* yeap, return possible CPUs in @pod that @attrs wants */
- cpumask_and(attrs->__pod_cpumask, attrs->cpumask, pt->pod_cpus[pod]);
-
- if (cpumask_empty(attrs->__pod_cpumask))
- pr_warn_once("WARNING: workqueue cpumask: online intersect > "
- "possible intersect\n");
}
/* install @pwq into @wq and return the old pwq, @cpu < 0 for dfl_pwq */
@@ -5268,7 +5251,7 @@ apply_wqattrs_prepare(struct workqueue_struct *wq,
ctx->dfl_pwq->refcnt++;
ctx->pwq_tbl[cpu] = ctx->dfl_pwq;
} else {
- wq_calc_pod_cpumask(new_attrs, cpu, -1);
+ wq_calc_pod_cpumask(new_attrs, cpu);
ctx->pwq_tbl[cpu] = alloc_unbound_pwq(wq, new_attrs);
if (!ctx->pwq_tbl[cpu])
goto out_free;
@@ -5359,8 +5342,6 @@ static int apply_workqueue_attrs_locked(struct workqueue_struct *wq,
*
* Performs GFP_KERNEL allocations.
*
- * Assumes caller has CPU hotplug read exclusion, i.e. cpus_read_lock().
- *
* Return: 0 on success and -errno on failure.
*/
int apply_workqueue_attrs(struct workqueue_struct *wq,
@@ -5368,8 +5349,6 @@ int apply_workqueue_attrs(struct workqueue_struct *wq,
{
int ret;
- lockdep_assert_cpus_held();
-
mutex_lock(&wq_pool_mutex);
ret = apply_workqueue_attrs_locked(wq, attrs);
mutex_unlock(&wq_pool_mutex);
@@ -5378,15 +5357,12 @@ int apply_workqueue_attrs(struct workqueue_struct *wq,
}
/**
- * wq_update_pod - update pod affinity of a wq for CPU hot[un]plug
+ * unbound_wq_update_pwq - update a pwq slot for CPU hot[un]plug
* @wq: the target workqueue
- * @cpu: the CPU to update pool association for
- * @hotplug_cpu: the CPU coming up or going down
- * @online: whether @cpu is coming up or going down
+ * @cpu: the CPU to update the pwq slot for
*
* This function is to be called from %CPU_DOWN_PREPARE, %CPU_ONLINE and
- * %CPU_DOWN_FAILED. @cpu is being hot[un]plugged, update pod affinity of
- * @wq accordingly.
+ * %CPU_DOWN_FAILED. @cpu is in the same pod of the CPU being hot[un]plugged.
*
*
* If pod affinity can't be adjusted due to memory allocation failure, it falls
@@ -5399,10 +5375,8 @@ int apply_workqueue_attrs(struct workqueue_struct *wq,
* CPU_DOWN. If a workqueue user wants strict affinity, it's the user's
* responsibility to flush the work item from CPU_DOWN_PREPARE.
*/
-static void wq_update_pod(struct workqueue_struct *wq, int cpu,
- int hotplug_cpu, bool online)
+static void unbound_wq_update_pwq(struct workqueue_struct *wq, int cpu)
{
- int off_cpu = online ? -1 : hotplug_cpu;
struct pool_workqueue *old_pwq = NULL, *pwq;
struct workqueue_attrs *target_attrs;
@@ -5416,13 +5390,13 @@ static void wq_update_pod(struct workqueue_struct *wq, int cpu,
* Let's use a preallocated one. The following buf is protected by
* CPU hotplug exclusion.
*/
- target_attrs = wq_update_pod_attrs_buf;
+ target_attrs = unbound_wq_update_pwq_attrs_buf;
copy_workqueue_attrs(target_attrs, wq->unbound_attrs);
wqattrs_actualize_cpumask(target_attrs, wq_unbound_cpumask);
/* nothing to do if the target cpumask matches the current pwq */
- wq_calc_pod_cpumask(target_attrs, cpu, off_cpu);
+ wq_calc_pod_cpumask(target_attrs, cpu);
if (wqattrs_equal(target_attrs, unbound_pwq(wq, cpu)->pool->attrs))
return;
@@ -5456,21 +5430,24 @@ static int alloc_and_link_pwqs(struct workqueue_struct *wq)
bool highpri = wq->flags & WQ_HIGHPRI;
int cpu, ret;
+ lockdep_assert_held(&wq_pool_mutex);
+
wq->cpu_pwq = alloc_percpu(struct pool_workqueue *);
if (!wq->cpu_pwq)
goto enomem;
if (!(wq->flags & WQ_UNBOUND)) {
+ struct worker_pool __percpu *pools;
+
+ if (wq->flags & WQ_BH)
+ pools = bh_worker_pools;
+ else
+ pools = cpu_worker_pools;
+
for_each_possible_cpu(cpu) {
struct pool_workqueue **pwq_p;
- struct worker_pool __percpu *pools;
struct worker_pool *pool;
- if (wq->flags & WQ_BH)
- pools = bh_worker_pools;
- else
- pools = cpu_worker_pools;
-
pool = &(per_cpu_ptr(pools, cpu)[highpri]);
pwq_p = per_cpu_ptr(wq->cpu_pwq, cpu);
@@ -5488,26 +5465,18 @@ static int alloc_and_link_pwqs(struct workqueue_struct *wq)
return 0;
}
- cpus_read_lock();
if (wq->flags & __WQ_ORDERED) {
struct pool_workqueue *dfl_pwq;
- ret = apply_workqueue_attrs(wq, ordered_wq_attrs[highpri]);
+ ret = apply_workqueue_attrs_locked(wq, ordered_wq_attrs[highpri]);
/* there should only be single pwq for ordering guarantee */
dfl_pwq = rcu_access_pointer(wq->dfl_pwq);
WARN(!ret && (wq->pwqs.next != &dfl_pwq->pwqs_node ||
wq->pwqs.prev != &dfl_pwq->pwqs_node),
"ordering guarantee broken for workqueue %s\n", wq->name);
} else {
- ret = apply_workqueue_attrs(wq, unbound_std_wq_attrs[highpri]);
+ ret = apply_workqueue_attrs_locked(wq, unbound_std_wq_attrs[highpri]);
}
- cpus_read_unlock();
-
- /* for unbound pwq, flush the pwq_release_worker ensures that the
- * pwq_release_workfn() completes before calling kfree(wq).
- */
- if (ret)
- kthread_flush_worker(pwq_release_worker);
return ret;
@@ -5542,8 +5511,11 @@ static int wq_clamp_max_active(int max_active, unsigned int flags,
static int init_rescuer(struct workqueue_struct *wq)
{
struct worker *rescuer;
+ char id_buf[WORKER_ID_LEN];
int ret;
+ lockdep_assert_held(&wq_pool_mutex);
+
if (!(wq->flags & WQ_MEM_RECLAIM))
return 0;
@@ -5555,7 +5527,9 @@ static int init_rescuer(struct workqueue_struct *wq)
}
rescuer->rescue_wq = wq;
- rescuer->task = kthread_create(rescuer_thread, rescuer, "kworker/R-%s", wq->name);
+ format_worker_id(id_buf, sizeof(id_buf), rescuer, NULL);
+
+ rescuer->task = kthread_create(rescuer_thread, rescuer, "%s", id_buf);
if (IS_ERR(rescuer->task)) {
ret = PTR_ERR(rescuer->task);
pr_err("workqueue: Failed to create a rescuer kthread for wq \"%s\": %pe",
@@ -5566,7 +5540,7 @@ static int init_rescuer(struct workqueue_struct *wq)
wq->rescuer = rescuer;
if (wq->flags & WQ_UNBOUND)
- kthread_bind_mask(rescuer->task, wq_unbound_cpumask);
+ kthread_bind_mask(rescuer->task, unbound_effective_cpumask(wq));
else
kthread_bind_mask(rescuer->task, cpu_possible_mask);
wake_up_process(rescuer->task);
@@ -5714,21 +5688,14 @@ struct workqueue_struct *alloc_workqueue(const char *fmt,
goto err_unreg_lockdep;
}
- if (alloc_and_link_pwqs(wq) < 0)
- goto err_free_node_nr_active;
-
- if (wq_online && init_rescuer(wq) < 0)
- goto err_destroy;
-
- if ((wq->flags & WQ_SYSFS) && workqueue_sysfs_register(wq))
- goto err_destroy;
-
/*
- * wq_pool_mutex protects global freeze state and workqueues list.
- * Grab it, adjust max_active and add the new @wq to workqueues
- * list.
+ * wq_pool_mutex protects the workqueues list, allocations of PWQs,
+ * and the global freeze state.
*/
- mutex_lock(&wq_pool_mutex);
+ apply_wqattrs_lock();
+
+ if (alloc_and_link_pwqs(wq) < 0)
+ goto err_unlock_free_node_nr_active;
mutex_lock(&wq->mutex);
wq_adjust_max_active(wq);
@@ -5736,13 +5703,27 @@ struct workqueue_struct *alloc_workqueue(const char *fmt,
list_add_tail_rcu(&wq->list, &workqueues);
- mutex_unlock(&wq_pool_mutex);
+ if (wq_online && init_rescuer(wq) < 0)
+ goto err_unlock_destroy;
+
+ apply_wqattrs_unlock();
+
+ if ((wq->flags & WQ_SYSFS) && workqueue_sysfs_register(wq))
+ goto err_destroy;
return wq;
-err_free_node_nr_active:
- if (wq->flags & WQ_UNBOUND)
+err_unlock_free_node_nr_active:
+ apply_wqattrs_unlock();
+ /*
+ * Failed alloc_and_link_pwqs() may leave pending pwq->release_work,
+ * flushing the pwq_release_worker ensures that the pwq_release_workfn()
+ * completes before calling kfree(wq).
+ */
+ if (wq->flags & WQ_UNBOUND) {
+ kthread_flush_worker(pwq_release_worker);
free_node_nr_active(wq->node_nr_active);
+ }
err_unreg_lockdep:
wq_unregister_lockdep(wq);
wq_free_lockdep(wq);
@@ -5750,6 +5731,8 @@ err_free_wq:
free_workqueue_attrs(wq->unbound_attrs);
kfree(wq);
return NULL;
+err_unlock_destroy:
+ apply_wqattrs_unlock();
err_destroy:
destroy_workqueue(wq);
return NULL;
@@ -6384,19 +6367,15 @@ void show_freezable_workqueues(void)
/* used to show worker information through /proc/PID/{comm,stat,status} */
void wq_worker_comm(char *buf, size_t size, struct task_struct *task)
{
- int off;
-
- /* always show the actual comm */
- off = strscpy(buf, task->comm, size);
- if (off < 0)
- return;
-
/* stabilize PF_WQ_WORKER and worker pool association */
mutex_lock(&wq_pool_attach_mutex);
if (task->flags & PF_WQ_WORKER) {
struct worker *worker = kthread_data(task);
struct worker_pool *pool = worker->pool;
+ int off;
+
+ off = format_worker_id(buf, size, worker, pool);
if (pool) {
raw_spin_lock_irq(&pool->lock);
@@ -6415,6 +6394,8 @@ void wq_worker_comm(char *buf, size_t size, struct task_struct *task)
}
raw_spin_unlock_irq(&pool->lock);
}
+ } else {
+ strscpy(buf, task->comm, size);
}
mutex_unlock(&wq_pool_attach_mutex);
@@ -6590,6 +6571,8 @@ int workqueue_online_cpu(unsigned int cpu)
mutex_lock(&wq_pool_mutex);
+ cpumask_set_cpu(cpu, wq_online_cpumask);
+
for_each_pool(pool, pi) {
/* BH pools aren't affected by hotplug */
if (pool->flags & POOL_BH)
@@ -6612,7 +6595,7 @@ int workqueue_online_cpu(unsigned int cpu)
int tcpu;
for_each_cpu(tcpu, pt->pod_cpus[pt->cpu_pod[cpu]])
- wq_update_pod(wq, tcpu, cpu, true);
+ unbound_wq_update_pwq(wq, tcpu);
mutex_lock(&wq->mutex);
wq_update_node_max_active(wq, -1);
@@ -6636,6 +6619,9 @@ int workqueue_offline_cpu(unsigned int cpu)
/* update pod affinity of unbound workqueues */
mutex_lock(&wq_pool_mutex);
+
+ cpumask_clear_cpu(cpu, wq_online_cpumask);
+
list_for_each_entry(wq, &workqueues, list) {
struct workqueue_attrs *attrs = wq->unbound_attrs;
@@ -6644,7 +6630,7 @@ int workqueue_offline_cpu(unsigned int cpu)
int tcpu;
for_each_cpu(tcpu, pt->pod_cpus[pt->cpu_pod[cpu]])
- wq_update_pod(wq, tcpu, cpu, false);
+ unbound_wq_update_pwq(wq, tcpu);
mutex_lock(&wq->mutex);
wq_update_node_max_active(wq, cpu);
@@ -6870,8 +6856,7 @@ static int workqueue_apply_unbound_cpumask(const cpumask_var_t unbound_cpumask)
* @exclude_cpumask: the cpumask to be excluded from wq_unbound_cpumask
*
* This function can be called from cpuset code to provide a set of isolated
- * CPUs that should be excluded from wq_unbound_cpumask. The caller must hold
- * either cpus_read_lock or cpus_write_lock.
+ * CPUs that should be excluded from wq_unbound_cpumask.
*/
int workqueue_unbound_exclude_cpumask(cpumask_var_t exclude_cpumask)
{
@@ -6881,12 +6866,8 @@ int workqueue_unbound_exclude_cpumask(cpumask_var_t exclude_cpumask)
if (!zalloc_cpumask_var(&cpumask, GFP_KERNEL))
return -ENOMEM;
- lockdep_assert_cpus_held();
mutex_lock(&wq_pool_mutex);
- /* Save the current isolated cpumask & export it via sysfs */
- cpumask_copy(wq_isolated_cpumask, exclude_cpumask);
-
/*
* If the operation fails, it will fall back to
* wq_requested_unbound_cpumask which is initially set to
@@ -6898,6 +6879,10 @@ int workqueue_unbound_exclude_cpumask(cpumask_var_t exclude_cpumask)
if (!cpumask_equal(cpumask, wq_unbound_cpumask))
ret = workqueue_apply_unbound_cpumask(cpumask);
+ /* Save the current isolated cpumask & export it via sysfs */
+ if (!ret)
+ cpumask_copy(wq_isolated_cpumask, exclude_cpumask);
+
mutex_unlock(&wq_pool_mutex);
free_cpumask_var(cpumask);
return ret;
@@ -6931,9 +6916,8 @@ static int wq_affn_dfl_set(const char *val, const struct kernel_param *kp)
wq_affn_dfl = affn;
list_for_each_entry(wq, &workqueues, list) {
- for_each_online_cpu(cpu) {
- wq_update_pod(wq, cpu, cpu, true);
- }
+ for_each_online_cpu(cpu)
+ unbound_wq_update_pwq(wq, cpu);
}
mutex_unlock(&wq_pool_mutex);
@@ -7021,19 +7005,6 @@ static struct attribute *wq_sysfs_attrs[] = {
};
ATTRIBUTE_GROUPS(wq_sysfs);
-static void apply_wqattrs_lock(void)
-{
- /* CPUs should stay stable across pwq creations and installations */
- cpus_read_lock();
- mutex_lock(&wq_pool_mutex);
-}
-
-static void apply_wqattrs_unlock(void)
-{
- mutex_unlock(&wq_pool_mutex);
- cpus_read_unlock();
-}
-
static ssize_t wq_nice_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -7232,16 +7203,12 @@ static int workqueue_set_unbound_cpumask(cpumask_var_t cpumask)
*/
cpumask_and(cpumask, cpumask, cpu_possible_mask);
if (!cpumask_empty(cpumask)) {
+ ret = 0;
apply_wqattrs_lock();
- cpumask_copy(wq_requested_unbound_cpumask, cpumask);
- if (cpumask_equal(cpumask, wq_unbound_cpumask)) {
- ret = 0;
- goto out_unlock;
- }
-
- ret = workqueue_apply_unbound_cpumask(cpumask);
-
-out_unlock:
+ if (!cpumask_equal(cpumask, wq_unbound_cpumask))
+ ret = workqueue_apply_unbound_cpumask(cpumask);
+ if (!ret)
+ cpumask_copy(wq_requested_unbound_cpumask, cpumask);
apply_wqattrs_unlock();
}
@@ -7560,10 +7527,18 @@ static void wq_watchdog_timer_fn(struct timer_list *unused)
notrace void wq_watchdog_touch(int cpu)
{
+ unsigned long thresh = READ_ONCE(wq_watchdog_thresh) * HZ;
+ unsigned long touch_ts = READ_ONCE(wq_watchdog_touched);
+ unsigned long now = jiffies;
+
if (cpu >= 0)
- per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies;
+ per_cpu(wq_watchdog_touched_cpu, cpu) = now;
+ else
+ WARN_ONCE(1, "%s should be called with valid CPU", __func__);
- wq_watchdog_touched = jiffies;
+ /* Don't unnecessarily store to global cacheline */
+ if (time_after(now, touch_ts + thresh / 4))
+ WRITE_ONCE(wq_watchdog_touched, jiffies);
}
static void wq_watchdog_set_thresh(unsigned long thresh)
@@ -7673,10 +7648,12 @@ void __init workqueue_init_early(void)
BUILD_BUG_ON(__alignof__(struct pool_workqueue) < __alignof__(long long));
+ BUG_ON(!alloc_cpumask_var(&wq_online_cpumask, GFP_KERNEL));
BUG_ON(!alloc_cpumask_var(&wq_unbound_cpumask, GFP_KERNEL));
BUG_ON(!alloc_cpumask_var(&wq_requested_unbound_cpumask, GFP_KERNEL));
BUG_ON(!zalloc_cpumask_var(&wq_isolated_cpumask, GFP_KERNEL));
+ cpumask_copy(wq_online_cpumask, cpu_online_mask);
cpumask_copy(wq_unbound_cpumask, cpu_possible_mask);
restrict_unbound_cpumask("HK_TYPE_WQ", housekeeping_cpumask(HK_TYPE_WQ));
restrict_unbound_cpumask("HK_TYPE_DOMAIN", housekeeping_cpumask(HK_TYPE_DOMAIN));
@@ -7687,8 +7664,8 @@ void __init workqueue_init_early(void)
pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC);
- wq_update_pod_attrs_buf = alloc_workqueue_attrs();
- BUG_ON(!wq_update_pod_attrs_buf);
+ unbound_wq_update_pwq_attrs_buf = alloc_workqueue_attrs();
+ BUG_ON(!unbound_wq_update_pwq_attrs_buf);
/*
* If nohz_full is enabled, set power efficient workqueue as unbound.
@@ -7953,12 +7930,12 @@ void __init workqueue_init_topology(void)
/*
* Workqueues allocated earlier would have all CPUs sharing the default
- * worker pool. Explicitly call wq_update_pod() on all workqueue and CPU
- * combinations to apply per-pod sharing.
+ * worker pool. Explicitly call unbound_wq_update_pwq() on all workqueue
+ * and CPU combinations to apply per-pod sharing.
*/
list_for_each_entry(wq, &workqueues, list) {
for_each_online_cpu(cpu)
- wq_update_pod(wq, cpu, cpu, true);
+ unbound_wq_update_pwq(wq, cpu);
if (wq->flags & WQ_UNBOUND) {
mutex_lock(&wq->mutex);
wq_update_node_max_active(wq, -1);
diff --git a/lib/Kconfig b/lib/Kconfig
index d33a268bc256..b38849af6f13 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -539,13 +539,7 @@ config CPUMASK_OFFSTACK
stack overflow.
config FORCE_NR_CPUS
- bool "Set number of CPUs at compile time"
- depends on SMP && EXPERT && !COMPILE_TEST
- help
- Say Yes if you have NR_CPUS set to an actual number of possible
- CPUs in your system, not to a default value. This forces the core
- code to rely on compile-time value and optimize kernel routines
- better.
+ def_bool !SMP
config CPU_RMAP
bool
@@ -629,6 +623,7 @@ config SIGNATURE
config DIMLIB
tristate
+ depends on NET
help
Dynamic Interrupt Moderation library.
Implements an algorithm for dynamically changing CQ moderation values
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 59b6765d86b8..561e346f5cb0 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -2505,18 +2505,6 @@ config TEST_VMALLOC
If unsure, say N.
-config TEST_USER_COPY
- tristate "Test user/kernel boundary protections"
- depends on m
- help
- This builds the "test_user_copy" module that runs sanity checks
- on the copy_to/from_user infrastructure, making sure basic
- user/kernel boundary testing is working. If it fails to load,
- a regression has been detected in the user/kernel memory boundary
- protections.
-
- If unsure, say N.
-
config TEST_BPF
tristate "Test BPF filter functionality"
depends on m && NET
@@ -2814,6 +2802,15 @@ config SIPHASH_KUNIT_TEST
This is intended to help people writing architecture-specific
optimized versions. If unsure, say N.
+config USERCOPY_KUNIT_TEST
+ tristate "KUnit Test for user/kernel boundary protections"
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ This builds the "usercopy_kunit" module that runs sanity checks
+ on the copy_to/from_user infrastructure, making sure basic
+ user/kernel boundary testing is working.
+
config TEST_UDELAY
tristate "udelay test driver"
help
diff --git a/lib/Makefile b/lib/Makefile
index 3b1769045651..322bb127b4dc 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -78,7 +78,6 @@ obj-$(CONFIG_TEST_LKM) += test_module.o
obj-$(CONFIG_TEST_VMALLOC) += test_vmalloc.o
obj-$(CONFIG_TEST_RHASHTABLE) += test_rhashtable.o
obj-$(CONFIG_TEST_SORT) += test_sort.o
-obj-$(CONFIG_TEST_USER_COPY) += test_user_copy.o
obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_keys.o
obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_key_base.o
obj-$(CONFIG_TEST_DYNAMIC_DEBUG) += test_dynamic_debug.o
@@ -388,6 +387,7 @@ CFLAGS_fortify_kunit.o += $(call cc-disable-warning, stringop-truncation)
CFLAGS_fortify_kunit.o += $(DISABLE_STRUCTLEAK_PLUGIN)
obj-$(CONFIG_FORTIFY_KUNIT_TEST) += fortify_kunit.o
obj-$(CONFIG_SIPHASH_KUNIT_TEST) += siphash_kunit.o
+obj-$(CONFIG_USERCOPY_KUNIT_TEST) += usercopy_kunit.o
obj-$(CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED) += devmem_is_allowed.o
@@ -426,3 +426,7 @@ $(obj)/$(TEST_FORTIFY_LOG): $(addprefix $(obj)/, $(TEST_FORTIFY_LOGS)) FORCE
ifeq ($(CONFIG_FORTIFY_SOURCE),y)
$(obj)/string.o: $(obj)/$(TEST_FORTIFY_LOG)
endif
+
+# Some architectures define __NO_FORTIFY if __SANITIZE_ADDRESS__ is undefined.
+# Pass CFLAGS_KASAN to avoid warnings.
+$(foreach x, $(patsubst %.log,%.o,$(TEST_FORTIFY_LOGS)), $(eval KASAN_SANITIZE_$(x) := y))
diff --git a/lib/alloc_tag.c b/lib/alloc_tag.c
index 11ed973ac359..81e5f9a70f22 100644
--- a/lib/alloc_tag.c
+++ b/lib/alloc_tag.c
@@ -227,6 +227,7 @@ struct page_ext_operations page_alloc_tagging_ops = {
};
EXPORT_SYMBOL(page_alloc_tagging_ops);
+#ifdef CONFIG_SYSCTL
static struct ctl_table memory_allocation_profiling_sysctls[] = {
{
.procname = "mem_profiling",
@@ -238,9 +239,19 @@ static struct ctl_table memory_allocation_profiling_sysctls[] = {
#endif
.proc_handler = proc_do_static_key,
},
- { }
};
+static void __init sysctl_init(void)
+{
+ if (!mem_profiling_support)
+ memory_allocation_profiling_sysctls[0].mode = 0444;
+
+ register_sysctl_init("vm", memory_allocation_profiling_sysctls);
+}
+#else /* CONFIG_SYSCTL */
+static inline void sysctl_init(void) {}
+#endif /* CONFIG_SYSCTL */
+
static int __init alloc_tag_init(void)
{
const struct codetag_type_desc desc = {
@@ -253,9 +264,7 @@ static int __init alloc_tag_init(void)
if (IS_ERR(alloc_tag_cttype))
return PTR_ERR(alloc_tag_cttype);
- if (!mem_profiling_support)
- memory_allocation_profiling_sysctls[0].mode = 0444;
- register_sysctl_init("vm", memory_allocation_profiling_sysctls);
+ sysctl_init();
procfs_init();
return 0;
diff --git a/lib/build_OID_registry b/lib/build_OID_registry
index 56d8bafeb848..8267e8d71338 100755
--- a/lib/build_OID_registry
+++ b/lib/build_OID_registry
@@ -38,7 +38,9 @@ close IN_FILE || die;
#
open C_FILE, ">$ARGV[1]" or die;
print C_FILE "/*\n";
-print C_FILE " * Automatically generated by ", $0 =~ s#^\Q$abs_srctree/\E##r, ". Do not edit\n";
+my $scriptname = $0;
+$scriptname =~ s#^\Q$abs_srctree/\E##;
+print C_FILE " * Automatically generated by ", $scriptname, ". Do not edit\n";
print C_FILE " */\n";
#
diff --git a/lib/closure.c b/lib/closure.c
index 07409e9e35a5..116afae2eed9 100644
--- a/lib/closure.c
+++ b/lib/closure.c
@@ -13,14 +13,25 @@
#include <linux/seq_file.h>
#include <linux/sched/debug.h>
-static inline void closure_put_after_sub(struct closure *cl, int flags)
+static inline void closure_put_after_sub_checks(int flags)
{
int r = flags & CLOSURE_REMAINING_MASK;
- BUG_ON(flags & CLOSURE_GUARD_MASK);
- BUG_ON(!r && (flags & ~CLOSURE_DESTRUCTOR));
+ if (WARN(flags & CLOSURE_GUARD_MASK,
+ "closure has guard bits set: %x (%u)",
+ flags & CLOSURE_GUARD_MASK, (unsigned) __fls(r)))
+ r &= ~CLOSURE_GUARD_MASK;
+
+ WARN(!r && (flags & ~CLOSURE_DESTRUCTOR),
+ "closure ref hit 0 with incorrect flags set: %x (%u)",
+ flags & ~CLOSURE_DESTRUCTOR, (unsigned) __fls(flags));
+}
+
+static inline void closure_put_after_sub(struct closure *cl, int flags)
+{
+ closure_put_after_sub_checks(flags);
- if (!r) {
+ if (!(flags & CLOSURE_REMAINING_MASK)) {
smp_acquire__after_ctrl_dep();
cl->closure_get_happened = false;
@@ -139,6 +150,41 @@ void __sched __closure_sync(struct closure *cl)
}
EXPORT_SYMBOL(__closure_sync);
+/*
+ * closure_return_sync - finish running a closure, synchronously (i.e. waiting
+ * for outstanding get()s to finish) and returning once closure refcount is 0.
+ *
+ * Unlike closure_sync() this doesn't reinit the ref to 1; subsequent
+ * closure_get_not_zero() calls waill fail.
+ */
+void __sched closure_return_sync(struct closure *cl)
+{
+ struct closure_syncer s = { .task = current };
+
+ cl->s = &s;
+ set_closure_fn(cl, closure_sync_fn, NULL);
+
+ unsigned flags = atomic_sub_return_release(1 + CLOSURE_RUNNING - CLOSURE_DESTRUCTOR,
+ &cl->remaining);
+
+ closure_put_after_sub_checks(flags);
+
+ if (unlikely(flags & CLOSURE_REMAINING_MASK)) {
+ while (1) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ if (s.done)
+ break;
+ schedule();
+ }
+
+ __set_current_state(TASK_RUNNING);
+ }
+
+ if (cl->parent)
+ closure_put(cl->parent);
+}
+EXPORT_SYMBOL(closure_return_sync);
+
int __sched __closure_sync_timeout(struct closure *cl, unsigned long timeout)
{
struct closure_syncer s = { .task = current };
@@ -198,6 +244,9 @@ void closure_debug_destroy(struct closure *cl)
{
unsigned long flags;
+ if (cl->magic == CLOSURE_MAGIC_STACK)
+ return;
+
BUG_ON(cl->magic != CLOSURE_MAGIC_ALIVE);
cl->magic = CLOSURE_MAGIC_DEAD;
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index fb12a9bacd2f..7cea91e193a8 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -78,16 +78,17 @@ static bool obj_freeing;
/* The number of objs on the global free list */
static int obj_nr_tofree;
-static int debug_objects_maxchain __read_mostly;
-static int __maybe_unused debug_objects_maxchecked __read_mostly;
-static int debug_objects_fixups __read_mostly;
-static int debug_objects_warnings __read_mostly;
-static int debug_objects_enabled __read_mostly
- = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
-static int debug_objects_pool_size __read_mostly
- = ODEBUG_POOL_SIZE;
-static int debug_objects_pool_min_level __read_mostly
- = ODEBUG_POOL_MIN_LEVEL;
+static int __data_racy debug_objects_maxchain __read_mostly;
+static int __data_racy __maybe_unused debug_objects_maxchecked __read_mostly;
+static int __data_racy debug_objects_fixups __read_mostly;
+static int __data_racy debug_objects_warnings __read_mostly;
+static int __data_racy debug_objects_enabled __read_mostly
+ = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
+static int __data_racy debug_objects_pool_size __read_mostly
+ = ODEBUG_POOL_SIZE;
+static int __data_racy debug_objects_pool_min_level __read_mostly
+ = ODEBUG_POOL_MIN_LEVEL;
+
static const struct debug_obj_descr *descr_test __read_mostly;
static struct kmem_cache *obj_cache __ro_after_init;
diff --git a/lib/dim/net_dim.c b/lib/dim/net_dim.c
index 4e32f7aaac86..d7e7028e9b19 100644
--- a/lib/dim/net_dim.c
+++ b/lib/dim/net_dim.c
@@ -4,6 +4,7 @@
*/
#include <linux/dim.h>
+#include <linux/rtnetlink.h>
/*
* Net DIM profiles:
@@ -11,12 +12,6 @@
* There are different set of profiles for RX/TX CQs.
* Each profile size must be of NET_DIM_PARAMS_NUM_PROFILES
*/
-#define NET_DIM_PARAMS_NUM_PROFILES 5
-#define NET_DIM_DEFAULT_RX_CQ_PKTS_FROM_EQE 256
-#define NET_DIM_DEFAULT_TX_CQ_PKTS_FROM_EQE 128
-#define NET_DIM_DEF_PROFILE_CQE 1
-#define NET_DIM_DEF_PROFILE_EQE 1
-
#define NET_DIM_RX_EQE_PROFILES { \
{.usec = 1, .pkts = NET_DIM_DEFAULT_RX_CQ_PKTS_FROM_EQE,}, \
{.usec = 8, .pkts = NET_DIM_DEFAULT_RX_CQ_PKTS_FROM_EQE,}, \
@@ -101,6 +96,143 @@ net_dim_get_def_tx_moderation(u8 cq_period_mode)
}
EXPORT_SYMBOL(net_dim_get_def_tx_moderation);
+int net_dim_init_irq_moder(struct net_device *dev, u8 profile_flags,
+ u8 coal_flags, u8 rx_mode, u8 tx_mode,
+ void (*rx_dim_work)(struct work_struct *work),
+ void (*tx_dim_work)(struct work_struct *work))
+{
+ struct dim_cq_moder *rxp = NULL, *txp;
+ struct dim_irq_moder *moder;
+ int len;
+
+ dev->irq_moder = kzalloc(sizeof(*dev->irq_moder), GFP_KERNEL);
+ if (!dev->irq_moder)
+ return -ENOMEM;
+
+ moder = dev->irq_moder;
+ len = NET_DIM_PARAMS_NUM_PROFILES * sizeof(*moder->rx_profile);
+
+ moder->coal_flags = coal_flags;
+ moder->profile_flags = profile_flags;
+
+ if (profile_flags & DIM_PROFILE_RX) {
+ moder->rx_dim_work = rx_dim_work;
+ moder->dim_rx_mode = rx_mode;
+ rxp = kmemdup(rx_profile[rx_mode], len, GFP_KERNEL);
+ if (!rxp)
+ goto free_moder;
+
+ rcu_assign_pointer(moder->rx_profile, rxp);
+ }
+
+ if (profile_flags & DIM_PROFILE_TX) {
+ moder->tx_dim_work = tx_dim_work;
+ moder->dim_tx_mode = tx_mode;
+ txp = kmemdup(tx_profile[tx_mode], len, GFP_KERNEL);
+ if (!txp)
+ goto free_rxp;
+
+ rcu_assign_pointer(moder->tx_profile, txp);
+ }
+
+ return 0;
+
+free_rxp:
+ kfree(rxp);
+free_moder:
+ kfree(moder);
+ return -ENOMEM;
+}
+EXPORT_SYMBOL(net_dim_init_irq_moder);
+
+/* RTNL lock is held. */
+void net_dim_free_irq_moder(struct net_device *dev)
+{
+ struct dim_cq_moder *rxp, *txp;
+
+ if (!dev->irq_moder)
+ return;
+
+ rxp = rtnl_dereference(dev->irq_moder->rx_profile);
+ txp = rtnl_dereference(dev->irq_moder->tx_profile);
+
+ rcu_assign_pointer(dev->irq_moder->rx_profile, NULL);
+ rcu_assign_pointer(dev->irq_moder->tx_profile, NULL);
+
+ kfree_rcu(rxp, rcu);
+ kfree_rcu(txp, rcu);
+ kfree(dev->irq_moder);
+}
+EXPORT_SYMBOL(net_dim_free_irq_moder);
+
+void net_dim_setting(struct net_device *dev, struct dim *dim, bool is_tx)
+{
+ struct dim_irq_moder *irq_moder = dev->irq_moder;
+
+ if (!irq_moder)
+ return;
+
+ if (is_tx) {
+ INIT_WORK(&dim->work, irq_moder->tx_dim_work);
+ dim->mode = READ_ONCE(irq_moder->dim_tx_mode);
+ return;
+ }
+
+ INIT_WORK(&dim->work, irq_moder->rx_dim_work);
+ dim->mode = READ_ONCE(irq_moder->dim_rx_mode);
+}
+EXPORT_SYMBOL(net_dim_setting);
+
+void net_dim_work_cancel(struct dim *dim)
+{
+ cancel_work_sync(&dim->work);
+}
+EXPORT_SYMBOL(net_dim_work_cancel);
+
+struct dim_cq_moder net_dim_get_rx_irq_moder(struct net_device *dev,
+ struct dim *dim)
+{
+ struct dim_cq_moder res, *profile;
+
+ rcu_read_lock();
+ profile = rcu_dereference(dev->irq_moder->rx_profile);
+ res = profile[dim->profile_ix];
+ rcu_read_unlock();
+
+ res.cq_period_mode = dim->mode;
+
+ return res;
+}
+EXPORT_SYMBOL(net_dim_get_rx_irq_moder);
+
+struct dim_cq_moder net_dim_get_tx_irq_moder(struct net_device *dev,
+ struct dim *dim)
+{
+ struct dim_cq_moder res, *profile;
+
+ rcu_read_lock();
+ profile = rcu_dereference(dev->irq_moder->tx_profile);
+ res = profile[dim->profile_ix];
+ rcu_read_unlock();
+
+ res.cq_period_mode = dim->mode;
+
+ return res;
+}
+EXPORT_SYMBOL(net_dim_get_tx_irq_moder);
+
+void net_dim_set_rx_mode(struct net_device *dev, u8 rx_mode)
+{
+ WRITE_ONCE(dev->irq_moder->dim_rx_mode, rx_mode);
+}
+EXPORT_SYMBOL(net_dim_set_rx_mode);
+
+void net_dim_set_tx_mode(struct net_device *dev, u8 tx_mode)
+{
+ WRITE_ONCE(dev->irq_moder->dim_tx_mode, tx_mode);
+}
+EXPORT_SYMBOL(net_dim_set_tx_mode);
+
static int net_dim_step(struct dim *dim)
{
if (dim->tired == (NET_DIM_PARAMS_NUM_PROFILES * 2))
diff --git a/lib/fortify_kunit.c b/lib/fortify_kunit.c
index 39da5b3bc649..27ea8bf0252c 100644
--- a/lib/fortify_kunit.c
+++ b/lib/fortify_kunit.c
@@ -236,9 +236,6 @@ static void fortify_test_alloc_size_##allocator##_dynamic(struct kunit *test) \
kfree(p)); \
checker(expected_size, __kmalloc(alloc_size, gfp), \
kfree(p)); \
- checker(expected_size, \
- __kmalloc_node(alloc_size, gfp, NUMA_NO_NODE), \
- kfree(p)); \
\
orig = kmalloc(alloc_size, gfp); \
KUNIT_EXPECT_TRUE(test, orig != NULL); \
@@ -377,7 +374,7 @@ static const char * const test_strs[] = {
for (i = 0; i < ARRAY_SIZE(test_strs); i++) { \
len = strlen(test_strs[i]); \
KUNIT_EXPECT_EQ(test, __builtin_constant_p(len), 0); \
- checker(len, kmemdup_array(test_strs[i], len, 1, gfp), \
+ checker(len, kmemdup_array(test_strs[i], 1, len, gfp), \
kfree(p)); \
checker(len, kmemdup(test_strs[i], len, gfp), \
kfree(p)); \
@@ -913,10 +910,9 @@ static void fortify_test_##memfunc(struct kunit *test) \
memfunc(zero.buf, srcB, 0 + unconst); \
KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \
KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); \
- /* We currently explicitly ignore zero-sized dests. */ \
memfunc(zero.buf, srcB, 1 + unconst); \
KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \
- KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); \
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1); \
}
__fortify_test(memcpy)
__fortify_test(memmove)
diff --git a/lib/kunit/Makefile b/lib/kunit/Makefile
index 309659a32a78..30f6bbf04a4a 100644
--- a/lib/kunit/Makefile
+++ b/lib/kunit/Makefile
@@ -2,6 +2,7 @@ obj-$(CONFIG_KUNIT) += kunit.o
kunit-objs += test.o \
resource.o \
+ user_alloc.o \
static_stub.o \
string-stream.o \
assert.o \
@@ -22,6 +23,7 @@ obj-$(CONFIG_KUNIT_TEST) += kunit-test.o
# string-stream-test compiles built-in only.
ifeq ($(CONFIG_KUNIT_TEST),y)
obj-$(CONFIG_KUNIT_TEST) += string-stream-test.o
+obj-$(CONFIG_KUNIT_TEST) += assert_test.o
endif
obj-$(CONFIG_KUNIT_EXAMPLE_TEST) += kunit-example-test.o
diff --git a/lib/kunit/assert.c b/lib/kunit/assert.c
index dd1d633d0fe2..867aa5c4bccf 100644
--- a/lib/kunit/assert.c
+++ b/lib/kunit/assert.c
@@ -7,6 +7,7 @@
*/
#include <kunit/assert.h>
#include <kunit/test.h>
+#include <kunit/visibility.h>
#include "string-stream.h"
@@ -30,8 +31,9 @@ void kunit_assert_prologue(const struct kunit_loc *loc,
}
EXPORT_SYMBOL_GPL(kunit_assert_prologue);
-static void kunit_assert_print_msg(const struct va_format *message,
- struct string_stream *stream)
+VISIBLE_IF_KUNIT
+void kunit_assert_print_msg(const struct va_format *message,
+ struct string_stream *stream)
{
if (message->fmt)
string_stream_add(stream, "\n%pV", message);
@@ -89,7 +91,7 @@ void kunit_ptr_not_err_assert_format(const struct kunit_assert *assert,
EXPORT_SYMBOL_GPL(kunit_ptr_not_err_assert_format);
/* Checks if `text` is a literal representing `value`, e.g. "5" and 5 */
-static bool is_literal(const char *text, long long value)
+VISIBLE_IF_KUNIT bool is_literal(const char *text, long long value)
{
char *buffer;
int len;
@@ -166,7 +168,7 @@ EXPORT_SYMBOL_GPL(kunit_binary_ptr_assert_format);
/* Checks if KUNIT_EXPECT_STREQ() args were string literals.
* Note: `text` will have ""s where as `value` will not.
*/
-static bool is_str_literal(const char *text, const char *value)
+VISIBLE_IF_KUNIT bool is_str_literal(const char *text, const char *value)
{
int len;
@@ -208,10 +210,11 @@ EXPORT_SYMBOL_GPL(kunit_binary_str_assert_format);
/* Adds a hexdump of a buffer to a string_stream comparing it with
* a second buffer. The different bytes are marked with <>.
*/
-static void kunit_assert_hexdump(struct string_stream *stream,
- const void *buf,
- const void *compared_buf,
- const size_t len)
+VISIBLE_IF_KUNIT
+void kunit_assert_hexdump(struct string_stream *stream,
+ const void *buf,
+ const void *compared_buf,
+ const size_t len)
{
size_t i;
const u8 *buf1 = buf;
diff --git a/lib/kunit/assert_test.c b/lib/kunit/assert_test.c
new file mode 100644
index 000000000000..4a5967712186
--- /dev/null
+++ b/lib/kunit/assert_test.c
@@ -0,0 +1,388 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * KUnit test for the assertion formatting functions.
+ * Author: Ivan Orlov <ivan.orlov0322@gmail.com>
+ */
+#include <kunit/test.h>
+#include "string-stream.h"
+
+#define TEST_PTR_EXPECTED_BUF_SIZE 32
+#define HEXDUMP_TEST_BUF_LEN 5
+#define ASSERT_TEST_EXPECT_CONTAIN(test, str, substr) KUNIT_EXPECT_TRUE(test, strstr(str, substr))
+#define ASSERT_TEST_EXPECT_NCONTAIN(test, str, substr) KUNIT_EXPECT_FALSE(test, strstr(str, substr))
+
+static void kunit_test_is_literal(struct kunit *test)
+{
+ KUNIT_EXPECT_TRUE(test, is_literal("5", 5));
+ KUNIT_EXPECT_TRUE(test, is_literal("0", 0));
+ KUNIT_EXPECT_TRUE(test, is_literal("1234567890", 1234567890));
+ KUNIT_EXPECT_TRUE(test, is_literal("-1234567890", -1234567890));
+ KUNIT_EXPECT_FALSE(test, is_literal("05", 5));
+ KUNIT_EXPECT_FALSE(test, is_literal("", 0));
+ KUNIT_EXPECT_FALSE(test, is_literal("-0", 0));
+ KUNIT_EXPECT_FALSE(test, is_literal("12#45", 1245));
+}
+
+static void kunit_test_is_str_literal(struct kunit *test)
+{
+ KUNIT_EXPECT_TRUE(test, is_str_literal("\"Hello, World!\"", "Hello, World!"));
+ KUNIT_EXPECT_TRUE(test, is_str_literal("\"\"", ""));
+ KUNIT_EXPECT_TRUE(test, is_str_literal("\"\"\"", "\""));
+ KUNIT_EXPECT_FALSE(test, is_str_literal("", ""));
+ KUNIT_EXPECT_FALSE(test, is_str_literal("\"", "\""));
+ KUNIT_EXPECT_FALSE(test, is_str_literal("\"Abacaba", "Abacaba"));
+ KUNIT_EXPECT_FALSE(test, is_str_literal("Abacaba\"", "Abacaba"));
+ KUNIT_EXPECT_FALSE(test, is_str_literal("\"Abacaba\"", "\"Abacaba\""));
+}
+
+KUNIT_DEFINE_ACTION_WRAPPER(kfree_wrapper, kfree, const void *);
+
+/* this function is used to get a "char *" string from the string stream and defer its cleanup */
+static char *get_str_from_stream(struct kunit *test, struct string_stream *stream)
+{
+ char *str = string_stream_get_string(stream);
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, str);
+ kunit_add_action(test, kfree_wrapper, (void *)str);
+
+ return str;
+}
+
+static void kunit_test_assert_prologue(struct kunit *test)
+{
+ struct string_stream *stream;
+ char *str;
+ const struct kunit_loc location = {
+ .file = "testfile.c",
+ .line = 1337,
+ };
+
+ stream = kunit_alloc_string_stream(test, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, stream);
+
+ /* Test an expectation fail prologue */
+ kunit_assert_prologue(&location, KUNIT_EXPECTATION, stream);
+ str = get_str_from_stream(test, stream);
+ ASSERT_TEST_EXPECT_CONTAIN(test, str, "EXPECTATION");
+ ASSERT_TEST_EXPECT_CONTAIN(test, str, "testfile.c");
+ ASSERT_TEST_EXPECT_CONTAIN(test, str, "1337");
+
+ /* Test an assertion fail prologue */
+ string_stream_clear(stream);
+ kunit_assert_prologue(&location, KUNIT_ASSERTION, stream);
+ str = get_str_from_stream(test, stream);
+ ASSERT_TEST_EXPECT_CONTAIN(test, str, "ASSERTION");
+ ASSERT_TEST_EXPECT_CONTAIN(test, str, "testfile.c");
+ ASSERT_TEST_EXPECT_CONTAIN(test, str, "1337");
+}
+
+/*
+ * This function accepts an arbitrary count of parameters and generates a va_format struct,
+ * which can be used to validate kunit_assert_print_msg function
+ */
+static void verify_assert_print_msg(struct kunit *test,
+ struct string_stream *stream,
+ char *expected, const char *format, ...)
+{
+ va_list list;
+ const struct va_format vformat = {
+ .fmt = format,
+ .va = &list,
+ };
+
+ va_start(list, format);
+ string_stream_clear(stream);
+ kunit_assert_print_msg(&vformat, stream);
+ KUNIT_EXPECT_STREQ(test, get_str_from_stream(test, stream), expected);
+}
+
+static void kunit_test_assert_print_msg(struct kunit *test)
+{
+ struct string_stream *stream;
+
+ stream = kunit_alloc_string_stream(test, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, stream);
+
+ verify_assert_print_msg(test, stream, "\nTest", "Test");
+ verify_assert_print_msg(test, stream, "\nAbacaba -123 234", "%s %d %u",
+ "Abacaba", -123, 234U);
+ verify_assert_print_msg(test, stream, "", NULL);
+}
+
+/*
+ * Further code contains the tests for different assert format functions.
+ * This helper function accepts the assert format function, executes it and
+ * validates the result string from the stream by checking that all of the
+ * substrings exist in the output.
+ */
+static void validate_assert(assert_format_t format_func, struct kunit *test,
+ const struct kunit_assert *assert,
+ struct string_stream *stream, int num_checks, ...)
+{
+ size_t i;
+ va_list checks;
+ char *cur_substr_exp;
+ struct va_format message = { NULL, NULL };
+
+ va_start(checks, num_checks);
+ string_stream_clear(stream);
+ format_func(assert, &message, stream);
+
+ for (i = 0; i < num_checks; i++) {
+ cur_substr_exp = va_arg(checks, char *);
+ ASSERT_TEST_EXPECT_CONTAIN(test, get_str_from_stream(test, stream), cur_substr_exp);
+ }
+}
+
+static void kunit_test_unary_assert_format(struct kunit *test)
+{
+ struct string_stream *stream;
+ struct kunit_assert assert = {};
+ struct kunit_unary_assert un_assert = {
+ .assert = assert,
+ .condition = "expr",
+ .expected_true = true,
+ };
+
+ stream = kunit_alloc_string_stream(test, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, stream);
+
+ validate_assert(kunit_unary_assert_format, test, &un_assert.assert,
+ stream, 2, "true", "is false");
+
+ un_assert.expected_true = false;
+ validate_assert(kunit_unary_assert_format, test, &un_assert.assert,
+ stream, 2, "false", "is true");
+}
+
+static void kunit_test_ptr_not_err_assert_format(struct kunit *test)
+{
+ struct string_stream *stream;
+ struct kunit_assert assert = {};
+ struct kunit_ptr_not_err_assert not_err_assert = {
+ .assert = assert,
+ .text = "expr",
+ .value = NULL,
+ };
+
+ stream = kunit_alloc_string_stream(test, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, stream);
+
+ /* Value is NULL. The corresponding message should be printed out */
+ validate_assert(kunit_ptr_not_err_assert_format, test,
+ &not_err_assert.assert,
+ stream, 1, "null");
+
+ /* Value is not NULL, but looks like an error pointer. Error should be printed out */
+ not_err_assert.value = (void *)-12;
+ validate_assert(kunit_ptr_not_err_assert_format, test,
+ &not_err_assert.assert, stream, 2,
+ "error", "-12");
+}
+
+static void kunit_test_binary_assert_format(struct kunit *test)
+{
+ struct string_stream *stream;
+ struct kunit_assert assert = {};
+ struct kunit_binary_assert_text text = {
+ .left_text = "1 + 2",
+ .operation = "==",
+ .right_text = "2",
+ };
+ const struct kunit_binary_assert binary_assert = {
+ .assert = assert,
+ .text = &text,
+ .left_value = 3,
+ .right_value = 2,
+ };
+
+ stream = kunit_alloc_string_stream(test, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, stream);
+
+ /*
+ * Printed values should depend on the input we provide: the left text, right text, left
+ * value and the right value.
+ */
+ validate_assert(kunit_binary_assert_format, test, &binary_assert.assert,
+ stream, 4, "1 + 2", "2", "3", "==");
+
+ text.right_text = "4 - 2";
+ validate_assert(kunit_binary_assert_format, test, &binary_assert.assert,
+ stream, 3, "==", "1 + 2", "4 - 2");
+
+ text.left_text = "3";
+ validate_assert(kunit_binary_assert_format, test, &binary_assert.assert,
+ stream, 4, "3", "4 - 2", "2", "==");
+
+ text.right_text = "2";
+ validate_assert(kunit_binary_assert_format, test, &binary_assert.assert,
+ stream, 3, "3", "2", "==");
+}
+
+static void kunit_test_binary_ptr_assert_format(struct kunit *test)
+{
+ struct string_stream *stream;
+ struct kunit_assert assert = {};
+ char *addr_var_a, *addr_var_b;
+ static const void *var_a = (void *)0xDEADBEEF;
+ static const void *var_b = (void *)0xBADDCAFE;
+ struct kunit_binary_assert_text text = {
+ .left_text = "var_a",
+ .operation = "==",
+ .right_text = "var_b",
+ };
+ struct kunit_binary_ptr_assert binary_ptr_assert = {
+ .assert = assert,
+ .text = &text,
+ .left_value = var_a,
+ .right_value = var_b,
+ };
+
+ addr_var_a = kunit_kzalloc(test, TEST_PTR_EXPECTED_BUF_SIZE, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, addr_var_a);
+ addr_var_b = kunit_kzalloc(test, TEST_PTR_EXPECTED_BUF_SIZE, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, addr_var_b);
+ /*
+ * Print the addresses to the buffers first.
+ * This is necessary as we may have different count of leading zeros in the pointer
+ * on different architectures.
+ */
+ snprintf(addr_var_a, TEST_PTR_EXPECTED_BUF_SIZE, "%px", var_a);
+ snprintf(addr_var_b, TEST_PTR_EXPECTED_BUF_SIZE, "%px", var_b);
+
+ stream = kunit_alloc_string_stream(test, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, stream);
+ validate_assert(kunit_binary_ptr_assert_format, test, &binary_ptr_assert.assert,
+ stream, 3, addr_var_a, addr_var_b, "==");
+}
+
+static void kunit_test_binary_str_assert_format(struct kunit *test)
+{
+ struct string_stream *stream;
+ struct kunit_assert assert = {};
+ static const char *var_a = "abacaba";
+ static const char *var_b = "kernel";
+ struct kunit_binary_assert_text text = {
+ .left_text = "var_a",
+ .operation = "==",
+ .right_text = "var_b",
+ };
+ struct kunit_binary_str_assert binary_str_assert = {
+ .assert = assert,
+ .text = &text,
+ .left_value = var_a,
+ .right_value = var_b,
+ };
+
+ stream = kunit_alloc_string_stream(test, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, stream);
+
+ validate_assert(kunit_binary_str_assert_format, test,
+ &binary_str_assert.assert,
+ stream, 5, "var_a", "var_b", "\"abacaba\"",
+ "\"kernel\"", "==");
+
+ text.left_text = "\"abacaba\"";
+ validate_assert(kunit_binary_str_assert_format, test, &binary_str_assert.assert,
+ stream, 4, "\"abacaba\"", "var_b", "\"kernel\"", "==");
+
+ text.right_text = "\"kernel\"";
+ validate_assert(kunit_binary_str_assert_format, test, &binary_str_assert.assert,
+ stream, 3, "\"abacaba\"", "\"kernel\"", "==");
+}
+
+static const u8 hex_testbuf1[] = { 0x26, 0x74, 0x6b, 0x9c, 0x55,
+ 0x45, 0x9d, 0x47, 0xd6, 0x47,
+ 0x2, 0x89, 0x8c, 0x81, 0x94,
+ 0x12, 0xfe, 0x01 };
+static const u8 hex_testbuf2[] = { 0x26, 0x74, 0x6b, 0x9c, 0x55,
+ 0x45, 0x9d, 0x47, 0x21, 0x47,
+ 0xcd, 0x89, 0x24, 0x50, 0x94,
+ 0x12, 0xba, 0x01 };
+static void kunit_test_assert_hexdump(struct kunit *test)
+{
+ struct string_stream *stream;
+ char *str;
+ size_t i;
+ char buf[HEXDUMP_TEST_BUF_LEN];
+
+ stream = kunit_alloc_string_stream(test, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, stream);
+ /* Check that we are getting output like <xx> for non-matching numbers. */
+ kunit_assert_hexdump(stream, hex_testbuf1, hex_testbuf2, sizeof(hex_testbuf1));
+ str = get_str_from_stream(test, stream);
+ for (i = 0; i < sizeof(hex_testbuf1); i++) {
+ snprintf(buf, HEXDUMP_TEST_BUF_LEN, "<%02x>", hex_testbuf1[i]);
+ if (hex_testbuf1[i] != hex_testbuf2[i])
+ ASSERT_TEST_EXPECT_CONTAIN(test, str, buf);
+ }
+ /* We shouldn't get any <xx> numbers when comparing the buffer with itself. */
+ string_stream_clear(stream);
+ kunit_assert_hexdump(stream, hex_testbuf1, hex_testbuf1, sizeof(hex_testbuf1));
+ str = get_str_from_stream(test, stream);
+ ASSERT_TEST_EXPECT_NCONTAIN(test, str, "<");
+ ASSERT_TEST_EXPECT_NCONTAIN(test, str, ">");
+}
+
+static void kunit_test_mem_assert_format(struct kunit *test)
+{
+ struct string_stream *stream;
+ struct string_stream *expected_stream;
+ struct kunit_assert assert = {};
+ static const struct kunit_binary_assert_text text = {
+ .left_text = "hex_testbuf1",
+ .operation = "==",
+ .right_text = "hex_testbuf2",
+ };
+ struct kunit_mem_assert mem_assert = {
+ .assert = assert,
+ .text = &text,
+ .left_value = NULL,
+ .right_value = hex_testbuf2,
+ .size = sizeof(hex_testbuf1),
+ };
+
+ expected_stream = kunit_alloc_string_stream(test, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, expected_stream);
+ stream = kunit_alloc_string_stream(test, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, stream);
+
+ /* The left value is NULL */
+ validate_assert(kunit_mem_assert_format, test, &mem_assert.assert,
+ stream, 2, "hex_testbuf1", "is not null");
+
+ /* The right value is NULL, the left value is not NULL */
+ mem_assert.left_value = hex_testbuf1;
+ mem_assert.right_value = NULL;
+ validate_assert(kunit_mem_assert_format, test, &mem_assert.assert,
+ stream, 2, "hex_testbuf2", "is not null");
+
+ /* Both arguments are not null */
+ mem_assert.left_value = hex_testbuf1;
+ mem_assert.right_value = hex_testbuf2;
+
+ validate_assert(kunit_mem_assert_format, test, &mem_assert.assert,
+ stream, 3, "hex_testbuf1", "hex_testbuf2", "==");
+}
+
+static struct kunit_case assert_test_cases[] = {
+ KUNIT_CASE(kunit_test_is_literal),
+ KUNIT_CASE(kunit_test_is_str_literal),
+ KUNIT_CASE(kunit_test_assert_prologue),
+ KUNIT_CASE(kunit_test_assert_print_msg),
+ KUNIT_CASE(kunit_test_unary_assert_format),
+ KUNIT_CASE(kunit_test_ptr_not_err_assert_format),
+ KUNIT_CASE(kunit_test_binary_assert_format),
+ KUNIT_CASE(kunit_test_binary_ptr_assert_format),
+ KUNIT_CASE(kunit_test_binary_str_assert_format),
+ KUNIT_CASE(kunit_test_assert_hexdump),
+ KUNIT_CASE(kunit_test_mem_assert_format),
+ {}
+};
+
+static struct kunit_suite assert_test_suite = {
+ .name = "kunit-assert",
+ .test_cases = assert_test_cases,
+};
+
+kunit_test_suites(&assert_test_suite);
diff --git a/lib/kunit/executor.c b/lib/kunit/executor.c
index 70b9a43cd257..34b7b6833df3 100644
--- a/lib/kunit/executor.c
+++ b/lib/kunit/executor.c
@@ -70,32 +70,26 @@ struct kunit_glob_filter {
static int kunit_parse_glob_filter(struct kunit_glob_filter *parsed,
const char *filter_glob)
{
- const int len = strlen(filter_glob);
const char *period = strchr(filter_glob, '.');
if (!period) {
- parsed->suite_glob = kzalloc(len + 1, GFP_KERNEL);
+ parsed->suite_glob = kstrdup(filter_glob, GFP_KERNEL);
if (!parsed->suite_glob)
return -ENOMEM;
-
parsed->test_glob = NULL;
- strcpy(parsed->suite_glob, filter_glob);
return 0;
}
- parsed->suite_glob = kzalloc(period - filter_glob + 1, GFP_KERNEL);
+ parsed->suite_glob = kstrndup(filter_glob, period - filter_glob, GFP_KERNEL);
if (!parsed->suite_glob)
return -ENOMEM;
- parsed->test_glob = kzalloc(len - (period - filter_glob) + 1, GFP_KERNEL);
+ parsed->test_glob = kstrdup(period + 1, GFP_KERNEL);
if (!parsed->test_glob) {
kfree(parsed->suite_glob);
return -ENOMEM;
}
- strncpy(parsed->suite_glob, filter_glob, period - filter_glob);
- strncpy(parsed->test_glob, period + 1, len - (period - filter_glob));
-
return 0;
}
diff --git a/lib/kunit/executor_test.c b/lib/kunit/executor_test.c
index 3f7f967e3688..f0090c2729cd 100644
--- a/lib/kunit/executor_test.c
+++ b/lib/kunit/executor_test.c
@@ -286,7 +286,7 @@ static struct kunit_suite *alloc_fake_suite(struct kunit *test,
/* We normally never expect to allocate suites, hence the non-const cast. */
suite = kunit_kzalloc(test, sizeof(*suite), GFP_KERNEL);
- strncpy((char *)suite->name, suite_name, sizeof(suite->name) - 1);
+ strscpy((char *)suite->name, suite_name, sizeof(suite->name));
suite->test_cases = test_cases;
return suite;
diff --git a/lib/kunit/kunit-example-test.c b/lib/kunit/kunit-example-test.c
index 798924f7cc86..3056d6bc705d 100644
--- a/lib/kunit/kunit-example-test.c
+++ b/lib/kunit/kunit-example-test.c
@@ -374,4 +374,5 @@ static struct kunit_suite example_init_test_suite = {
*/
kunit_test_init_section_suites(&example_init_test_suite);
+MODULE_DESCRIPTION("Example KUnit test suite");
MODULE_LICENSE("GPL v2");
diff --git a/lib/kunit/kunit-test.c b/lib/kunit/kunit-test.c
index e3412e0ca399..37e02be1e710 100644
--- a/lib/kunit/kunit-test.c
+++ b/lib/kunit/kunit-test.c
@@ -871,4 +871,5 @@ kunit_test_suites(&kunit_try_catch_test_suite, &kunit_resource_test_suite,
&kunit_current_test_suite, &kunit_device_test_suite,
&kunit_fault_test_suite);
+MODULE_DESCRIPTION("KUnit test for core test infrastructure");
MODULE_LICENSE("GPL v2");
diff --git a/lib/kunit/test.c b/lib/kunit/test.c
index b8514dbb337c..e8b1b52a19ab 100644
--- a/lib/kunit/test.c
+++ b/lib/kunit/test.c
@@ -938,4 +938,5 @@ static void __exit kunit_exit(void)
}
module_exit(kunit_exit);
+MODULE_DESCRIPTION("Base unit test (KUnit) API");
MODULE_LICENSE("GPL v2");
diff --git a/lib/kunit/user_alloc.c b/lib/kunit/user_alloc.c
new file mode 100644
index 000000000000..ae935df09a5e
--- /dev/null
+++ b/lib/kunit/user_alloc.c
@@ -0,0 +1,117 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KUnit userspace memory allocation resource management.
+ */
+#include <kunit/resource.h>
+#include <kunit/test.h>
+#include <linux/kthread.h>
+#include <linux/mm.h>
+
+struct kunit_vm_mmap_resource {
+ unsigned long addr;
+ size_t size;
+};
+
+/* vm_mmap() arguments */
+struct kunit_vm_mmap_params {
+ struct file *file;
+ unsigned long addr;
+ unsigned long len;
+ unsigned long prot;
+ unsigned long flag;
+ unsigned long offset;
+};
+
+/* Create and attach a new mm if it doesn't already exist. */
+static int kunit_attach_mm(void)
+{
+ struct mm_struct *mm;
+
+ if (current->mm)
+ return 0;
+
+ /* arch_pick_mmap_layout() is only sane with MMU systems. */
+ if (!IS_ENABLED(CONFIG_MMU))
+ return -EINVAL;
+
+ mm = mm_alloc();
+ if (!mm)
+ return -ENOMEM;
+
+ /* Define the task size. */
+ mm->task_size = TASK_SIZE;
+
+ /* Make sure we can allocate new VMAs. */
+ arch_pick_mmap_layout(mm, &current->signal->rlim[RLIMIT_STACK]);
+
+ /* Attach the mm. It will be cleaned up when the process dies. */
+ kthread_use_mm(mm);
+
+ return 0;
+}
+
+static int kunit_vm_mmap_init(struct kunit_resource *res, void *context)
+{
+ struct kunit_vm_mmap_params *p = context;
+ struct kunit_vm_mmap_resource vres;
+ int ret;
+
+ ret = kunit_attach_mm();
+ if (ret)
+ return ret;
+
+ vres.size = p->len;
+ vres.addr = vm_mmap(p->file, p->addr, p->len, p->prot, p->flag, p->offset);
+ if (!vres.addr)
+ return -ENOMEM;
+ res->data = kmemdup(&vres, sizeof(vres), GFP_KERNEL);
+ if (!res->data) {
+ vm_munmap(vres.addr, vres.size);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void kunit_vm_mmap_free(struct kunit_resource *res)
+{
+ struct kunit_vm_mmap_resource *vres = res->data;
+
+ /*
+ * Since this is executed from the test monitoring process,
+ * the test's mm has already been torn down. We don't need
+ * to run vm_munmap(vres->addr, vres->size), only clean up
+ * the vres.
+ */
+
+ kfree(vres);
+ res->data = NULL;
+}
+
+unsigned long kunit_vm_mmap(struct kunit *test, struct file *file,
+ unsigned long addr, unsigned long len,
+ unsigned long prot, unsigned long flag,
+ unsigned long offset)
+{
+ struct kunit_vm_mmap_params params = {
+ .file = file,
+ .addr = addr,
+ .len = len,
+ .prot = prot,
+ .flag = flag,
+ .offset = offset,
+ };
+ struct kunit_vm_mmap_resource *vres;
+
+ vres = kunit_alloc_resource(test,
+ kunit_vm_mmap_init,
+ kunit_vm_mmap_free,
+ GFP_KERNEL,
+ &params);
+ if (vres)
+ return vres->addr;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(kunit_vm_mmap);
+
+MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING);
diff --git a/lib/list-test.c b/lib/list-test.c
index 0cc27de9cec8..37cbc33e9fdb 100644
--- a/lib/list-test.c
+++ b/lib/list-test.c
@@ -1201,12 +1201,6 @@ static struct kunit_suite hlist_test_module = {
};
-struct klist_test_struct {
- int data;
- struct klist klist;
- struct klist_node klist_node;
-};
-
static int node_count;
static struct klist_node *last_node;
@@ -1499,4 +1493,5 @@ static struct kunit_suite klist_test_module = {
kunit_test_suites(&list_test_module, &hlist_test_module, &klist_test_module);
+MODULE_DESCRIPTION("KUnit test for the Kernel Linked-list structures");
MODULE_LICENSE("GPL v2");
diff --git a/lib/objagg.c b/lib/objagg.c
index 1e248629ed64..363e43e849ac 100644
--- a/lib/objagg.c
+++ b/lib/objagg.c
@@ -167,6 +167,9 @@ static int objagg_obj_parent_assign(struct objagg *objagg,
{
void *delta_priv;
+ if (WARN_ON(!objagg_obj_is_root(parent)))
+ return -EINVAL;
+
delta_priv = objagg->ops->delta_create(objagg->priv, parent->obj,
objagg_obj->obj);
if (IS_ERR(delta_priv))
@@ -421,7 +424,7 @@ static struct objagg_obj *__objagg_obj_get(struct objagg *objagg, void *obj)
*
* There are 3 main options this function wraps:
* 1) The object according to "obj" already exist. In that case
- * the reference counter is incrementes and the object is returned.
+ * the reference counter is incremented and the object is returned.
* 2) The object does not exist, but it can be aggregated within
* another object. In that case, user ops->delta_create() is called
* to obtain delta data and a new object is created with returned
@@ -903,20 +906,6 @@ static const struct objagg_opt_algo *objagg_opt_algos[] = {
[OBJAGG_OPT_ALGO_SIMPLE_GREEDY] = &objagg_opt_simple_greedy,
};
-static int objagg_hints_obj_cmp(struct rhashtable_compare_arg *arg,
- const void *obj)
-{
- struct rhashtable *ht = arg->ht;
- struct objagg_hints *objagg_hints =
- container_of(ht, struct objagg_hints, node_ht);
- const struct objagg_ops *ops = objagg_hints->ops;
- const char *ptr = obj;
-
- ptr += ht->p.key_offset;
- return ops->hints_obj_cmp ? ops->hints_obj_cmp(ptr, arg->key) :
- memcmp(ptr, arg->key, ht->p.key_len);
-}
-
/**
* objagg_hints_get - obtains hints instance
* @objagg: objagg instance
@@ -955,7 +944,6 @@ struct objagg_hints *objagg_hints_get(struct objagg *objagg,
offsetof(struct objagg_hints_node, obj);
objagg_hints->ht_params.head_offset =
offsetof(struct objagg_hints_node, ht_node);
- objagg_hints->ht_params.obj_cmpfn = objagg_hints_obj_cmp;
err = rhashtable_init(&objagg_hints->node_ht, &objagg_hints->ht_params);
if (err)
diff --git a/lib/overflow_kunit.c b/lib/overflow_kunit.c
index 4ef31b0bb74d..d305b0c054bb 100644
--- a/lib/overflow_kunit.c
+++ b/lib/overflow_kunit.c
@@ -1178,14 +1178,28 @@ struct foo {
s16 array[] __counted_by(counter);
};
+struct bar {
+ int a;
+ u32 counter;
+ s16 array[];
+};
+
static void DEFINE_FLEX_test(struct kunit *test)
{
- DEFINE_RAW_FLEX(struct foo, two, array, 2);
+ /* Using _RAW_ on a __counted_by struct will initialize "counter" to zero */
+ DEFINE_RAW_FLEX(struct foo, two_but_zero, array, 2);
+#if __has_attribute(__counted_by__)
+ int expected_raw_size = sizeof(struct foo);
+#else
+ int expected_raw_size = sizeof(struct foo) + 2 * sizeof(s16);
+#endif
+ /* Without annotation, it will always be on-stack size. */
+ DEFINE_RAW_FLEX(struct bar, two, array, 2);
DEFINE_FLEX(struct foo, eight, array, counter, 8);
DEFINE_FLEX(struct foo, empty, array, counter, 0);
- KUNIT_EXPECT_EQ(test, __struct_size(two),
- sizeof(struct foo) + sizeof(s16) + sizeof(s16));
+ KUNIT_EXPECT_EQ(test, __struct_size(two_but_zero), expected_raw_size);
+ KUNIT_EXPECT_EQ(test, __struct_size(two), sizeof(struct bar) + 2 * sizeof(s16));
KUNIT_EXPECT_EQ(test, __struct_size(eight), 24);
KUNIT_EXPECT_EQ(test, __struct_size(empty), sizeof(struct foo));
}
diff --git a/lib/string_helpers_kunit.c b/lib/string_helpers_kunit.c
index f88e39fd68d6..c853046183d2 100644
--- a/lib/string_helpers_kunit.c
+++ b/lib/string_helpers_kunit.c
@@ -625,4 +625,5 @@ static struct kunit_suite string_helpers_test_suite = {
kunit_test_suites(&string_helpers_test_suite);
+MODULE_DESCRIPTION("Test cases for string helpers module");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/lib/string_kunit.c b/lib/string_kunit.c
index 2a812decf14b..c919e3293da6 100644
--- a/lib/string_kunit.c
+++ b/lib/string_kunit.c
@@ -633,4 +633,5 @@ static struct kunit_suite string_test_suite = {
kunit_test_suites(&string_test_suite);
+MODULE_DESCRIPTION("Test cases for string functions");
MODULE_LICENSE("GPL v2");
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index 207ff87194db..b7acc29bcc3b 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -15198,6 +15198,7 @@ struct tail_call_test {
int flags;
int result;
int stack_depth;
+ bool has_tail_call;
};
/* Flags that can be passed to tail call test cases */
@@ -15273,6 +15274,7 @@ static struct tail_call_test tail_call_tests[] = {
BPF_EXIT_INSN(),
},
.result = 3,
+ .has_tail_call = true,
},
{
"Tail call 3",
@@ -15283,6 +15285,7 @@ static struct tail_call_test tail_call_tests[] = {
BPF_EXIT_INSN(),
},
.result = 6,
+ .has_tail_call = true,
},
{
"Tail call 4",
@@ -15293,6 +15296,7 @@ static struct tail_call_test tail_call_tests[] = {
BPF_EXIT_INSN(),
},
.result = 10,
+ .has_tail_call = true,
},
{
"Tail call load/store leaf",
@@ -15323,6 +15327,7 @@ static struct tail_call_test tail_call_tests[] = {
},
.result = 0,
.stack_depth = 16,
+ .has_tail_call = true,
},
{
"Tail call error path, max count reached",
@@ -15335,6 +15340,7 @@ static struct tail_call_test tail_call_tests[] = {
},
.flags = FLAG_NEED_STATE | FLAG_RESULT_IN_STATE,
.result = (MAX_TAIL_CALL_CNT + 1) * MAX_TESTRUNS,
+ .has_tail_call = true,
},
{
"Tail call count preserved across function calls",
@@ -15357,6 +15363,7 @@ static struct tail_call_test tail_call_tests[] = {
.stack_depth = 8,
.flags = FLAG_NEED_STATE | FLAG_RESULT_IN_STATE,
.result = (MAX_TAIL_CALL_CNT + 1) * MAX_TESTRUNS,
+ .has_tail_call = true,
},
{
"Tail call error path, NULL target",
@@ -15369,6 +15376,7 @@ static struct tail_call_test tail_call_tests[] = {
},
.flags = FLAG_NEED_STATE | FLAG_RESULT_IN_STATE,
.result = MAX_TESTRUNS,
+ .has_tail_call = true,
},
{
"Tail call error path, index out of range",
@@ -15381,6 +15389,7 @@ static struct tail_call_test tail_call_tests[] = {
},
.flags = FLAG_NEED_STATE | FLAG_RESULT_IN_STATE,
.result = MAX_TESTRUNS,
+ .has_tail_call = true,
},
};
@@ -15430,6 +15439,7 @@ static __init int prepare_tail_call_tests(struct bpf_array **pprogs)
fp->len = len;
fp->type = BPF_PROG_TYPE_SOCKET_FILTER;
fp->aux->stack_depth = test->stack_depth;
+ fp->aux->tail_call_reachable = test->has_tail_call;
memcpy(fp->insnsi, test->insns, len * sizeof(struct bpf_insn));
/* Relocate runtime tail call offsets and addresses */
@@ -15706,4 +15716,5 @@ static void __exit test_bpf_exit(void)
module_init(test_bpf_init);
module_exit(test_bpf_exit);
+MODULE_DESCRIPTION("Testsuite for BPF interpreter and BPF JIT compiler");
MODULE_LICENSE("GPL");
diff --git a/lib/test_objagg.c b/lib/test_objagg.c
index c0c957c50635..d34df4306b87 100644
--- a/lib/test_objagg.c
+++ b/lib/test_objagg.c
@@ -60,7 +60,7 @@ static struct objagg_obj *world_obj_get(struct world *world,
if (!world->key_refs[key_id_index(key_id)]) {
world->objagg_objs[key_id_index(key_id)] = objagg_obj;
} else if (world->objagg_objs[key_id_index(key_id)] != objagg_obj) {
- pr_err("Key %u: God another object for the same key.\n",
+ pr_err("Key %u: Got another object for the same key.\n",
key_id);
err = -EINVAL;
goto err_key_id_check;
diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c
index 42b585208249..c63db03ebb9d 100644
--- a/lib/test_rhashtable.c
+++ b/lib/test_rhashtable.c
@@ -811,4 +811,5 @@ static void __exit test_rht_exit(void)
module_init(test_rht_init);
module_exit(test_rht_exit);
+MODULE_DESCRIPTION("Resizable, Scalable, Concurrent Hash Table test module");
MODULE_LICENSE("GPL v2");
diff --git a/lib/test_user_copy.c b/lib/test_user_copy.c
deleted file mode 100644
index 5ff04d8fe971..000000000000
--- a/lib/test_user_copy.c
+++ /dev/null
@@ -1,331 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Kernel module for testing copy_to/from_user infrastructure.
- *
- * Copyright 2013 Google Inc. All Rights Reserved
- *
- * Authors:
- * Kees Cook <keescook@chromium.org>
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/mman.h>
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/uaccess.h>
-#include <linux/vmalloc.h>
-
-/*
- * Several 32-bit architectures support 64-bit {get,put}_user() calls.
- * As there doesn't appear to be anything that can safely determine
- * their capability at compile-time, we just have to opt-out certain archs.
- */
-#if BITS_PER_LONG == 64 || (!(defined(CONFIG_ARM) && !defined(MMU)) && \
- !defined(CONFIG_M68K) && \
- !defined(CONFIG_MICROBLAZE) && \
- !defined(CONFIG_NIOS2) && \
- !defined(CONFIG_PPC32) && \
- !defined(CONFIG_SUPERH))
-# define TEST_U64
-#endif
-
-#define test(condition, msg, ...) \
-({ \
- int cond = (condition); \
- if (cond) \
- pr_warn("[%d] " msg "\n", __LINE__, ##__VA_ARGS__); \
- cond; \
-})
-
-static bool is_zeroed(void *from, size_t size)
-{
- return memchr_inv(from, 0x0, size) == NULL;
-}
-
-static int test_check_nonzero_user(char *kmem, char __user *umem, size_t size)
-{
- int ret = 0;
- size_t start, end, i, zero_start, zero_end;
-
- if (test(size < 2 * PAGE_SIZE, "buffer too small"))
- return -EINVAL;
-
- /*
- * We want to cross a page boundary to exercise the code more
- * effectively. We also don't want to make the size we scan too large,
- * otherwise the test can take a long time and cause soft lockups. So
- * scan a 1024 byte region across the page boundary.
- */
- size = 1024;
- start = PAGE_SIZE - (size / 2);
-
- kmem += start;
- umem += start;
-
- zero_start = size / 4;
- zero_end = size - zero_start;
-
- /*
- * We conduct a series of check_nonzero_user() tests on a block of
- * memory with the following byte-pattern (trying every possible
- * [start,end] pair):
- *
- * [ 00 ff 00 ff ... 00 00 00 00 ... ff 00 ff 00 ]
- *
- * And we verify that check_nonzero_user() acts identically to
- * memchr_inv().
- */
-
- memset(kmem, 0x0, size);
- for (i = 1; i < zero_start; i += 2)
- kmem[i] = 0xff;
- for (i = zero_end; i < size; i += 2)
- kmem[i] = 0xff;
-
- ret |= test(copy_to_user(umem, kmem, size),
- "legitimate copy_to_user failed");
-
- for (start = 0; start <= size; start++) {
- for (end = start; end <= size; end++) {
- size_t len = end - start;
- int retval = check_zeroed_user(umem + start, len);
- int expected = is_zeroed(kmem + start, len);
-
- ret |= test(retval != expected,
- "check_nonzero_user(=%d) != memchr_inv(=%d) mismatch (start=%zu, end=%zu)",
- retval, expected, start, end);
- }
- }
-
- return ret;
-}
-
-static int test_copy_struct_from_user(char *kmem, char __user *umem,
- size_t size)
-{
- int ret = 0;
- char *umem_src = NULL, *expected = NULL;
- size_t ksize, usize;
-
- umem_src = kmalloc(size, GFP_KERNEL);
- ret = test(umem_src == NULL, "kmalloc failed");
- if (ret)
- goto out_free;
-
- expected = kmalloc(size, GFP_KERNEL);
- ret = test(expected == NULL, "kmalloc failed");
- if (ret)
- goto out_free;
-
- /* Fill umem with a fixed byte pattern. */
- memset(umem_src, 0x3e, size);
- ret |= test(copy_to_user(umem, umem_src, size),
- "legitimate copy_to_user failed");
-
- /* Check basic case -- (usize == ksize). */
- ksize = size;
- usize = size;
-
- memcpy(expected, umem_src, ksize);
-
- memset(kmem, 0x0, size);
- ret |= test(copy_struct_from_user(kmem, ksize, umem, usize),
- "copy_struct_from_user(usize == ksize) failed");
- ret |= test(memcmp(kmem, expected, ksize),
- "copy_struct_from_user(usize == ksize) gives unexpected copy");
-
- /* Old userspace case -- (usize < ksize). */
- ksize = size;
- usize = size / 2;
-
- memcpy(expected, umem_src, usize);
- memset(expected + usize, 0x0, ksize - usize);
-
- memset(kmem, 0x0, size);
- ret |= test(copy_struct_from_user(kmem, ksize, umem, usize),
- "copy_struct_from_user(usize < ksize) failed");
- ret |= test(memcmp(kmem, expected, ksize),
- "copy_struct_from_user(usize < ksize) gives unexpected copy");
-
- /* New userspace (-E2BIG) case -- (usize > ksize). */
- ksize = size / 2;
- usize = size;
-
- memset(kmem, 0x0, size);
- ret |= test(copy_struct_from_user(kmem, ksize, umem, usize) != -E2BIG,
- "copy_struct_from_user(usize > ksize) didn't give E2BIG");
-
- /* New userspace (success) case -- (usize > ksize). */
- ksize = size / 2;
- usize = size;
-
- memcpy(expected, umem_src, ksize);
- ret |= test(clear_user(umem + ksize, usize - ksize),
- "legitimate clear_user failed");
-
- memset(kmem, 0x0, size);
- ret |= test(copy_struct_from_user(kmem, ksize, umem, usize),
- "copy_struct_from_user(usize > ksize) failed");
- ret |= test(memcmp(kmem, expected, ksize),
- "copy_struct_from_user(usize > ksize) gives unexpected copy");
-
-out_free:
- kfree(expected);
- kfree(umem_src);
- return ret;
-}
-
-static int __init test_user_copy_init(void)
-{
- int ret = 0;
- char *kmem;
- char __user *usermem;
- char *bad_usermem;
- unsigned long user_addr;
- u8 val_u8;
- u16 val_u16;
- u32 val_u32;
-#ifdef TEST_U64
- u64 val_u64;
-#endif
-
- kmem = kmalloc(PAGE_SIZE * 2, GFP_KERNEL);
- if (!kmem)
- return -ENOMEM;
-
- user_addr = vm_mmap(NULL, 0, PAGE_SIZE * 2,
- PROT_READ | PROT_WRITE | PROT_EXEC,
- MAP_ANONYMOUS | MAP_PRIVATE, 0);
- if (user_addr >= (unsigned long)(TASK_SIZE)) {
- pr_warn("Failed to allocate user memory\n");
- kfree(kmem);
- return -ENOMEM;
- }
-
- usermem = (char __user *)user_addr;
- bad_usermem = (char *)user_addr;
-
- /*
- * Legitimate usage: none of these copies should fail.
- */
- memset(kmem, 0x3a, PAGE_SIZE * 2);
- ret |= test(copy_to_user(usermem, kmem, PAGE_SIZE),
- "legitimate copy_to_user failed");
- memset(kmem, 0x0, PAGE_SIZE);
- ret |= test(copy_from_user(kmem, usermem, PAGE_SIZE),
- "legitimate copy_from_user failed");
- ret |= test(memcmp(kmem, kmem + PAGE_SIZE, PAGE_SIZE),
- "legitimate usercopy failed to copy data");
-
-#define test_legit(size, check) \
- do { \
- val_##size = check; \
- ret |= test(put_user(val_##size, (size __user *)usermem), \
- "legitimate put_user (" #size ") failed"); \
- val_##size = 0; \
- ret |= test(get_user(val_##size, (size __user *)usermem), \
- "legitimate get_user (" #size ") failed"); \
- ret |= test(val_##size != check, \
- "legitimate get_user (" #size ") failed to do copy"); \
- if (val_##size != check) { \
- pr_info("0x%llx != 0x%llx\n", \
- (unsigned long long)val_##size, \
- (unsigned long long)check); \
- } \
- } while (0)
-
- test_legit(u8, 0x5a);
- test_legit(u16, 0x5a5b);
- test_legit(u32, 0x5a5b5c5d);
-#ifdef TEST_U64
- test_legit(u64, 0x5a5b5c5d6a6b6c6d);
-#endif
-#undef test_legit
-
- /* Test usage of check_nonzero_user(). */
- ret |= test_check_nonzero_user(kmem, usermem, 2 * PAGE_SIZE);
- /* Test usage of copy_struct_from_user(). */
- ret |= test_copy_struct_from_user(kmem, usermem, 2 * PAGE_SIZE);
-
- /*
- * Invalid usage: none of these copies should succeed.
- */
-
- /* Prepare kernel memory with check values. */
- memset(kmem, 0x5a, PAGE_SIZE);
- memset(kmem + PAGE_SIZE, 0, PAGE_SIZE);
-
- /* Reject kernel-to-kernel copies through copy_from_user(). */
- ret |= test(!copy_from_user(kmem, (char __user *)(kmem + PAGE_SIZE),
- PAGE_SIZE),
- "illegal all-kernel copy_from_user passed");
-
- /* Destination half of buffer should have been zeroed. */
- ret |= test(memcmp(kmem + PAGE_SIZE, kmem, PAGE_SIZE),
- "zeroing failure for illegal all-kernel copy_from_user");
-
-#if 0
- /*
- * When running with SMAP/PAN/etc, this will Oops the kernel
- * due to the zeroing of userspace memory on failure. This needs
- * to be tested in LKDTM instead, since this test module does not
- * expect to explode.
- */
- ret |= test(!copy_from_user(bad_usermem, (char __user *)kmem,
- PAGE_SIZE),
- "illegal reversed copy_from_user passed");
-#endif
- ret |= test(!copy_to_user((char __user *)kmem, kmem + PAGE_SIZE,
- PAGE_SIZE),
- "illegal all-kernel copy_to_user passed");
- ret |= test(!copy_to_user((char __user *)kmem, bad_usermem,
- PAGE_SIZE),
- "illegal reversed copy_to_user passed");
-
-#define test_illegal(size, check) \
- do { \
- val_##size = (check); \
- ret |= test(!get_user(val_##size, (size __user *)kmem), \
- "illegal get_user (" #size ") passed"); \
- ret |= test(val_##size != (size)0, \
- "zeroing failure for illegal get_user (" #size ")"); \
- if (val_##size != (size)0) { \
- pr_info("0x%llx != 0\n", \
- (unsigned long long)val_##size); \
- } \
- ret |= test(!put_user(val_##size, (size __user *)kmem), \
- "illegal put_user (" #size ") passed"); \
- } while (0)
-
- test_illegal(u8, 0x5a);
- test_illegal(u16, 0x5a5b);
- test_illegal(u32, 0x5a5b5c5d);
-#ifdef TEST_U64
- test_illegal(u64, 0x5a5b5c5d6a6b6c6d);
-#endif
-#undef test_illegal
-
- vm_munmap(user_addr, PAGE_SIZE * 2);
- kfree(kmem);
-
- if (ret == 0) {
- pr_info("tests passed.\n");
- return 0;
- }
-
- return -EINVAL;
-}
-
-module_init(test_user_copy_init);
-
-static void __exit test_user_copy_exit(void)
-{
- pr_info("unloaded.\n");
-}
-
-module_exit(test_user_copy_exit);
-
-MODULE_AUTHOR("Kees Cook <keescook@chromium.org>");
-MODULE_LICENSE("GPL");
diff --git a/lib/usercopy_kunit.c b/lib/usercopy_kunit.c
new file mode 100644
index 000000000000..77fa00a13df7
--- /dev/null
+++ b/lib/usercopy_kunit.c
@@ -0,0 +1,335 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Kernel module for testing copy_to/from_user infrastructure.
+ *
+ * Copyright 2013 Google Inc. All Rights Reserved
+ *
+ * Authors:
+ * Kees Cook <keescook@chromium.org>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/mman.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <kunit/test.h>
+
+/*
+ * Several 32-bit architectures support 64-bit {get,put}_user() calls.
+ * As there doesn't appear to be anything that can safely determine
+ * their capability at compile-time, we just have to opt-out certain archs.
+ */
+#if BITS_PER_LONG == 64 || (!(defined(CONFIG_ARM) && !defined(MMU)) && \
+ !defined(CONFIG_M68K) && \
+ !defined(CONFIG_MICROBLAZE) && \
+ !defined(CONFIG_NIOS2) && \
+ !defined(CONFIG_PPC32) && \
+ !defined(CONFIG_SUPERH))
+# define TEST_U64
+#endif
+
+struct usercopy_test_priv {
+ char *kmem;
+ char __user *umem;
+ size_t size;
+};
+
+static bool is_zeroed(void *from, size_t size)
+{
+ return memchr_inv(from, 0x0, size) == NULL;
+}
+
+/* Test usage of check_nonzero_user(). */
+static void usercopy_test_check_nonzero_user(struct kunit *test)
+{
+ size_t start, end, i, zero_start, zero_end;
+ struct usercopy_test_priv *priv = test->priv;
+ char __user *umem = priv->umem;
+ char *kmem = priv->kmem;
+ size_t size = priv->size;
+
+ KUNIT_ASSERT_GE_MSG(test, size, 2 * PAGE_SIZE, "buffer too small");
+
+ /*
+ * We want to cross a page boundary to exercise the code more
+ * effectively. We also don't want to make the size we scan too large,
+ * otherwise the test can take a long time and cause soft lockups. So
+ * scan a 1024 byte region across the page boundary.
+ */
+ size = 1024;
+ start = PAGE_SIZE - (size / 2);
+
+ kmem += start;
+ umem += start;
+
+ zero_start = size / 4;
+ zero_end = size - zero_start;
+
+ /*
+ * We conduct a series of check_nonzero_user() tests on a block of
+ * memory with the following byte-pattern (trying every possible
+ * [start,end] pair):
+ *
+ * [ 00 ff 00 ff ... 00 00 00 00 ... ff 00 ff 00 ]
+ *
+ * And we verify that check_nonzero_user() acts identically to
+ * memchr_inv().
+ */
+
+ memset(kmem, 0x0, size);
+ for (i = 1; i < zero_start; i += 2)
+ kmem[i] = 0xff;
+ for (i = zero_end; i < size; i += 2)
+ kmem[i] = 0xff;
+
+ KUNIT_EXPECT_EQ_MSG(test, copy_to_user(umem, kmem, size), 0,
+ "legitimate copy_to_user failed");
+
+ for (start = 0; start <= size; start++) {
+ for (end = start; end <= size; end++) {
+ size_t len = end - start;
+ int retval = check_zeroed_user(umem + start, len);
+ int expected = is_zeroed(kmem + start, len);
+
+ KUNIT_ASSERT_EQ_MSG(test, retval, expected,
+ "check_nonzero_user(=%d) != memchr_inv(=%d) mismatch (start=%zu, end=%zu)",
+ retval, expected, start, end);
+ }
+ }
+}
+
+/* Test usage of copy_struct_from_user(). */
+static void usercopy_test_copy_struct_from_user(struct kunit *test)
+{
+ char *umem_src = NULL, *expected = NULL;
+ struct usercopy_test_priv *priv = test->priv;
+ char __user *umem = priv->umem;
+ char *kmem = priv->kmem;
+ size_t size = priv->size;
+ size_t ksize, usize;
+
+ umem_src = kunit_kmalloc(test, size, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, umem_src);
+
+ expected = kunit_kmalloc(test, size, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, expected);
+
+ /* Fill umem with a fixed byte pattern. */
+ memset(umem_src, 0x3e, size);
+ KUNIT_ASSERT_EQ_MSG(test, copy_to_user(umem, umem_src, size), 0,
+ "legitimate copy_to_user failed");
+
+ /* Check basic case -- (usize == ksize). */
+ ksize = size;
+ usize = size;
+
+ memcpy(expected, umem_src, ksize);
+
+ memset(kmem, 0x0, size);
+ KUNIT_EXPECT_EQ_MSG(test, copy_struct_from_user(kmem, ksize, umem, usize), 0,
+ "copy_struct_from_user(usize == ksize) failed");
+ KUNIT_EXPECT_MEMEQ_MSG(test, kmem, expected, ksize,
+ "copy_struct_from_user(usize == ksize) gives unexpected copy");
+
+ /* Old userspace case -- (usize < ksize). */
+ ksize = size;
+ usize = size / 2;
+
+ memcpy(expected, umem_src, usize);
+ memset(expected + usize, 0x0, ksize - usize);
+
+ memset(kmem, 0x0, size);
+ KUNIT_EXPECT_EQ_MSG(test, copy_struct_from_user(kmem, ksize, umem, usize), 0,
+ "copy_struct_from_user(usize < ksize) failed");
+ KUNIT_EXPECT_MEMEQ_MSG(test, kmem, expected, ksize,
+ "copy_struct_from_user(usize < ksize) gives unexpected copy");
+
+ /* New userspace (-E2BIG) case -- (usize > ksize). */
+ ksize = size / 2;
+ usize = size;
+
+ memset(kmem, 0x0, size);
+ KUNIT_EXPECT_EQ_MSG(test, copy_struct_from_user(kmem, ksize, umem, usize), -E2BIG,
+ "copy_struct_from_user(usize > ksize) didn't give E2BIG");
+
+ /* New userspace (success) case -- (usize > ksize). */
+ ksize = size / 2;
+ usize = size;
+
+ memcpy(expected, umem_src, ksize);
+ KUNIT_EXPECT_EQ_MSG(test, clear_user(umem + ksize, usize - ksize), 0,
+ "legitimate clear_user failed");
+
+ memset(kmem, 0x0, size);
+ KUNIT_EXPECT_EQ_MSG(test, copy_struct_from_user(kmem, ksize, umem, usize), 0,
+ "copy_struct_from_user(usize > ksize) failed");
+ KUNIT_EXPECT_MEMEQ_MSG(test, kmem, expected, ksize,
+ "copy_struct_from_user(usize > ksize) gives unexpected copy");
+}
+
+/*
+ * Legitimate usage: none of these copies should fail.
+ */
+static void usercopy_test_valid(struct kunit *test)
+{
+ struct usercopy_test_priv *priv = test->priv;
+ char __user *usermem = priv->umem;
+ char *kmem = priv->kmem;
+
+ memset(kmem, 0x3a, PAGE_SIZE * 2);
+ KUNIT_EXPECT_EQ_MSG(test, 0, copy_to_user(usermem, kmem, PAGE_SIZE),
+ "legitimate copy_to_user failed");
+ memset(kmem, 0x0, PAGE_SIZE);
+ KUNIT_EXPECT_EQ_MSG(test, 0, copy_from_user(kmem, usermem, PAGE_SIZE),
+ "legitimate copy_from_user failed");
+ KUNIT_EXPECT_MEMEQ_MSG(test, kmem, kmem + PAGE_SIZE, PAGE_SIZE,
+ "legitimate usercopy failed to copy data");
+
+#define test_legit(size, check) \
+ do { \
+ size val_##size = (check); \
+ KUNIT_EXPECT_EQ_MSG(test, 0, \
+ put_user(val_##size, (size __user *)usermem), \
+ "legitimate put_user (" #size ") failed"); \
+ val_##size = 0; \
+ KUNIT_EXPECT_EQ_MSG(test, 0, \
+ get_user(val_##size, (size __user *)usermem), \
+ "legitimate get_user (" #size ") failed"); \
+ KUNIT_EXPECT_EQ_MSG(test, val_##size, check, \
+ "legitimate get_user (" #size ") failed to do copy"); \
+ } while (0)
+
+ test_legit(u8, 0x5a);
+ test_legit(u16, 0x5a5b);
+ test_legit(u32, 0x5a5b5c5d);
+#ifdef TEST_U64
+ test_legit(u64, 0x5a5b5c5d6a6b6c6d);
+#endif
+#undef test_legit
+}
+
+/*
+ * Invalid usage: none of these copies should succeed.
+ */
+static void usercopy_test_invalid(struct kunit *test)
+{
+ struct usercopy_test_priv *priv = test->priv;
+ char __user *usermem = priv->umem;
+ char *bad_usermem = (char *)usermem;
+ char *kmem = priv->kmem;
+ u64 *kmem_u64 = (u64 *)kmem;
+
+ if (IS_ENABLED(CONFIG_ALTERNATE_USER_ADDRESS_SPACE) ||
+ !IS_ENABLED(CONFIG_MMU)) {
+ kunit_skip(test, "Testing for kernel/userspace address confusion is only sensible on architectures with a shared address space");
+ return;
+ }
+
+ /* Prepare kernel memory with check values. */
+ memset(kmem, 0x5a, PAGE_SIZE);
+ memset(kmem + PAGE_SIZE, 0, PAGE_SIZE);
+
+ /* Reject kernel-to-kernel copies through copy_from_user(). */
+ KUNIT_EXPECT_NE_MSG(test, copy_from_user(kmem, (char __user *)(kmem + PAGE_SIZE),
+ PAGE_SIZE), 0,
+ "illegal all-kernel copy_from_user passed");
+
+ /* Destination half of buffer should have been zeroed. */
+ KUNIT_EXPECT_MEMEQ_MSG(test, kmem + PAGE_SIZE, kmem, PAGE_SIZE,
+ "zeroing failure for illegal all-kernel copy_from_user");
+
+#if 0
+ /*
+ * When running with SMAP/PAN/etc, this will Oops the kernel
+ * due to the zeroing of userspace memory on failure. This needs
+ * to be tested in LKDTM instead, since this test module does not
+ * expect to explode.
+ */
+ KUNIT_EXPECT_NE_MSG(test, copy_from_user(bad_usermem, (char __user *)kmem,
+ PAGE_SIZE), 0,
+ "illegal reversed copy_from_user passed");
+#endif
+ KUNIT_EXPECT_NE_MSG(test, copy_to_user((char __user *)kmem, kmem + PAGE_SIZE,
+ PAGE_SIZE), 0,
+ "illegal all-kernel copy_to_user passed");
+
+ KUNIT_EXPECT_NE_MSG(test, copy_to_user((char __user *)kmem, bad_usermem,
+ PAGE_SIZE), 0,
+ "illegal reversed copy_to_user passed");
+
+#define test_illegal(size, check) \
+ do { \
+ size val_##size = (check); \
+ /* get_user() */ \
+ KUNIT_EXPECT_NE_MSG(test, get_user(val_##size, (size __user *)kmem), 0, \
+ "illegal get_user (" #size ") passed"); \
+ KUNIT_EXPECT_EQ_MSG(test, val_##size, 0, \
+ "zeroing failure for illegal get_user (" #size ")"); \
+ /* put_user() */ \
+ *kmem_u64 = 0xF09FA4AFF09FA4AF; \
+ KUNIT_EXPECT_NE_MSG(test, put_user(val_##size, (size __user *)kmem), 0, \
+ "illegal put_user (" #size ") passed"); \
+ KUNIT_EXPECT_EQ_MSG(test, *kmem_u64, 0xF09FA4AFF09FA4AF, \
+ "illegal put_user (" #size ") wrote to kernel memory!"); \
+ } while (0)
+
+ test_illegal(u8, 0x5a);
+ test_illegal(u16, 0x5a5b);
+ test_illegal(u32, 0x5a5b5c5d);
+#ifdef TEST_U64
+ test_illegal(u64, 0x5a5b5c5d6a6b6c6d);
+#endif
+#undef test_illegal
+}
+
+static int usercopy_test_init(struct kunit *test)
+{
+ struct usercopy_test_priv *priv;
+ unsigned long user_addr;
+
+ if (!IS_ENABLED(CONFIG_MMU)) {
+ kunit_skip(test, "Userspace allocation testing not available on non-MMU systems");
+ return 0;
+ }
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv);
+ test->priv = priv;
+ priv->size = PAGE_SIZE * 2;
+
+ priv->kmem = kunit_kmalloc(test, priv->size, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv->kmem);
+
+ user_addr = kunit_vm_mmap(test, NULL, 0, priv->size,
+ PROT_READ | PROT_WRITE | PROT_EXEC,
+ MAP_ANONYMOUS | MAP_PRIVATE, 0);
+ KUNIT_ASSERT_NE_MSG(test, user_addr, 0,
+ "Could not create userspace mm");
+ KUNIT_ASSERT_LT_MSG(test, user_addr, (unsigned long)TASK_SIZE,
+ "Failed to allocate user memory");
+ priv->umem = (char __user *)user_addr;
+
+ return 0;
+}
+
+static struct kunit_case usercopy_test_cases[] = {
+ KUNIT_CASE(usercopy_test_valid),
+ KUNIT_CASE(usercopy_test_invalid),
+ KUNIT_CASE(usercopy_test_check_nonzero_user),
+ KUNIT_CASE(usercopy_test_copy_struct_from_user),
+ {}
+};
+
+static struct kunit_suite usercopy_test_suite = {
+ .name = "usercopy",
+ .init = usercopy_test_init,
+ .test_cases = usercopy_test_cases,
+};
+
+kunit_test_suites(&usercopy_test_suite);
+MODULE_AUTHOR("Kees Cook <kees@kernel.org>");
+MODULE_DESCRIPTION("Kernel module for testing copy_to/from_user infrastructure");
+MODULE_LICENSE("GPL");
diff --git a/lib/vdso/gettimeofday.c b/lib/vdso/gettimeofday.c
index 899850bd6f0b..c01eaafd8041 100644
--- a/lib/vdso/gettimeofday.c
+++ b/lib/vdso/gettimeofday.c
@@ -140,14 +140,14 @@ static __always_inline int do_hres(const struct vdso_data *vd, clockid_t clk,
do {
/*
- * Open coded to handle VDSO_CLOCKMODE_TIMENS. Time namespace
- * enabled tasks have a special VVAR page installed which
- * has vd->seq set to 1 and vd->clock_mode set to
- * VDSO_CLOCKMODE_TIMENS. For non time namespace affected tasks
- * this does not affect performance because if vd->seq is
- * odd, i.e. a concurrent update is in progress the extra
- * check for vd->clock_mode is just a few extra
- * instructions while spin waiting for vd->seq to become
+ * Open coded function vdso_read_begin() to handle
+ * VDSO_CLOCKMODE_TIMENS. Time namespace enabled tasks have a
+ * special VVAR page installed which has vd->seq set to 1 and
+ * vd->clock_mode set to VDSO_CLOCKMODE_TIMENS. For non time
+ * namespace affected tasks this does not affect performance
+ * because if vd->seq is odd, i.e. a concurrent update is in
+ * progress the extra check for vd->clock_mode is just a few
+ * extra instructions while spin waiting for vd->seq to become
* even again.
*/
while (unlikely((seq = READ_ONCE(vd->seq)) & 1)) {
@@ -223,8 +223,8 @@ static __always_inline int do_coarse(const struct vdso_data *vd, clockid_t clk,
do {
/*
- * Open coded to handle VDSO_CLOCK_TIMENS. See comment in
- * do_hres().
+ * Open coded function vdso_read_begin() to handle
+ * VDSO_CLOCK_TIMENS. See comment in do_hres().
*/
while ((seq = READ_ONCE(vd->seq)) & 1) {
if (IS_ENABLED(CONFIG_TIME_NS) &&
diff --git a/mm/compaction.c b/mm/compaction.c
index e731d45befc7..739b1bf3d637 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -79,6 +79,13 @@ static inline bool is_via_compact_memory(int order) { return false; }
#define COMPACTION_HPAGE_ORDER (PMD_SHIFT - PAGE_SHIFT)
#endif
+static struct page *mark_allocated_noprof(struct page *page, unsigned int order, gfp_t gfp_flags)
+{
+ post_alloc_hook(page, order, __GFP_MOVABLE);
+ return page;
+}
+#define mark_allocated(...) alloc_hooks(mark_allocated_noprof(__VA_ARGS__))
+
static void split_map_pages(struct list_head *freepages)
{
unsigned int i, order;
@@ -93,7 +100,7 @@ static void split_map_pages(struct list_head *freepages)
nr_pages = 1 << order;
- post_alloc_hook(page, order, __GFP_MOVABLE);
+ mark_allocated(page, order, __GFP_MOVABLE);
if (order)
split_page(page, order);
@@ -122,7 +129,7 @@ static unsigned long release_free_list(struct list_head *freepages)
* Convert free pages into post allocation pages, so
* that we can free them via __free_page.
*/
- post_alloc_hook(page, order, __GFP_MOVABLE);
+ mark_allocated(page, order, __GFP_MOVABLE);
__free_pages(page, order);
if (pfn > high_pfn)
high_pfn = pfn;
diff --git a/mm/damon/core.c b/mm/damon/core.c
index 6392f1cc97a3..e66823d6b10b 100644
--- a/mm/damon/core.c
+++ b/mm/damon/core.c
@@ -1358,14 +1358,31 @@ static void damon_merge_regions_of(struct damon_target *t, unsigned int thres,
* access frequencies are similar. This is for minimizing the monitoring
* overhead under the dynamically changeable access pattern. If a merge was
* unnecessarily made, later 'kdamond_split_regions()' will revert it.
+ *
+ * The total number of regions could be higher than the user-defined limit,
+ * max_nr_regions for some cases. For example, the user can update
+ * max_nr_regions to a number that lower than the current number of regions
+ * while DAMON is running. For such a case, repeat merging until the limit is
+ * met while increasing @threshold up to possible maximum level.
*/
static void kdamond_merge_regions(struct damon_ctx *c, unsigned int threshold,
unsigned long sz_limit)
{
struct damon_target *t;
-
- damon_for_each_target(t, c)
- damon_merge_regions_of(t, threshold, sz_limit);
+ unsigned int nr_regions;
+ unsigned int max_thres;
+
+ max_thres = c->attrs.aggr_interval /
+ (c->attrs.sample_interval ? c->attrs.sample_interval : 1);
+ do {
+ nr_regions = 0;
+ damon_for_each_target(t, c) {
+ damon_merge_regions_of(t, threshold, sz_limit);
+ nr_regions += damon_nr_regions(t);
+ }
+ threshold = max(1, threshold * 2);
+ } while (nr_regions > c->attrs.max_nr_regions &&
+ threshold / 2 < max_thres);
}
/*
diff --git a/mm/debug_vm_pgtable.c b/mm/debug_vm_pgtable.c
index b104a353b532..e4969fb54da3 100644
--- a/mm/debug_vm_pgtable.c
+++ b/mm/debug_vm_pgtable.c
@@ -40,22 +40,7 @@
* Please refer Documentation/mm/arch_pgtable_helpers.rst for the semantics
* expectations that are being validated here. All future changes in here
* or the documentation need to be in sync.
- *
- * On s390 platform, the lower 4 bits are used to identify given page table
- * entry type. But these bits might affect the ability to clear entries with
- * pxx_clear() because of how dynamic page table folding works on s390. So
- * while loading up the entries do not change the lower 4 bits. It does not
- * have affect any other platform. Also avoid the 62nd bit on ppc64 that is
- * used to mark a pte entry.
*/
-#define S390_SKIP_MASK GENMASK(3, 0)
-#if __BITS_PER_LONG == 64
-#define PPC64_SKIP_MASK GENMASK(62, 62)
-#else
-#define PPC64_SKIP_MASK 0x0
-#endif
-#define ARCH_SKIP_MASK (S390_SKIP_MASK | PPC64_SKIP_MASK)
-#define RANDOM_ORVALUE (GENMASK(BITS_PER_LONG - 1, 0) & ~ARCH_SKIP_MASK)
#define RANDOM_NZVALUE GENMASK(7, 0)
struct pgtable_debug_args {
@@ -511,8 +496,7 @@ static void __init pud_clear_tests(struct pgtable_debug_args *args)
return;
pr_debug("Validating PUD clear\n");
- pud = __pud(pud_val(pud) | RANDOM_ORVALUE);
- WRITE_ONCE(*args->pudp, pud);
+ WARN_ON(pud_none(pud));
pud_clear(args->pudp);
pud = READ_ONCE(*args->pudp);
WARN_ON(!pud_none(pud));
@@ -548,8 +532,7 @@ static void __init p4d_clear_tests(struct pgtable_debug_args *args)
return;
pr_debug("Validating P4D clear\n");
- p4d = __p4d(p4d_val(p4d) | RANDOM_ORVALUE);
- WRITE_ONCE(*args->p4dp, p4d);
+ WARN_ON(p4d_none(p4d));
p4d_clear(args->p4dp);
p4d = READ_ONCE(*args->p4dp);
WARN_ON(!p4d_none(p4d));
@@ -582,8 +565,7 @@ static void __init pgd_clear_tests(struct pgtable_debug_args *args)
return;
pr_debug("Validating PGD clear\n");
- pgd = __pgd(pgd_val(pgd) | RANDOM_ORVALUE);
- WRITE_ONCE(*args->pgdp, pgd);
+ WARN_ON(pgd_none(pgd));
pgd_clear(args->pgdp);
pgd = READ_ONCE(*args->pgdp);
WARN_ON(!pgd_none(pgd));
@@ -634,10 +616,8 @@ static void __init pte_clear_tests(struct pgtable_debug_args *args)
if (WARN_ON(!args->ptep))
return;
-#ifndef CONFIG_RISCV
- pte = __pte(pte_val(pte) | RANDOM_ORVALUE);
-#endif
set_pte_at(args->mm, args->vaddr, args->ptep, pte);
+ WARN_ON(pte_none(pte));
flush_dcache_page(page);
barrier();
ptep_clear(args->mm, args->vaddr, args->ptep);
@@ -650,8 +630,7 @@ static void __init pmd_clear_tests(struct pgtable_debug_args *args)
pmd_t pmd = READ_ONCE(*args->pmdp);
pr_debug("Validating PMD clear\n");
- pmd = __pmd(pmd_val(pmd) | RANDOM_ORVALUE);
- WRITE_ONCE(*args->pmdp, pmd);
+ WARN_ON(pmd_none(pmd));
pmd_clear(args->pmdp);
pmd = READ_ONCE(*args->pmdp);
WARN_ON(!pmd_none(pmd));
diff --git a/mm/filemap.c b/mm/filemap.c
index 382c3d06bfb1..657bcd887fdb 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1000,7 +1000,7 @@ struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order)
do {
cpuset_mems_cookie = read_mems_allowed_begin();
n = cpuset_mem_spread_node();
- folio = __folio_alloc_node(gfp, order, n);
+ folio = __folio_alloc_node_noprof(gfp, order, n);
} while (!folio && read_mems_allowed_retry(cpuset_mems_cookie));
return folio;
@@ -1847,7 +1847,7 @@ repeat:
if (!folio || xa_is_value(folio))
goto out;
- if (!folio_try_get_rcu(folio))
+ if (!folio_try_get(folio))
goto repeat;
if (unlikely(folio != xas_reload(&xas))) {
@@ -2001,7 +2001,7 @@ retry:
if (!folio || xa_is_value(folio))
return folio;
- if (!folio_try_get_rcu(folio))
+ if (!folio_try_get(folio))
goto reset;
if (unlikely(folio != xas_reload(xas))) {
@@ -2181,7 +2181,7 @@ unsigned filemap_get_folios_contig(struct address_space *mapping,
if (xa_is_value(folio))
goto update_start;
- if (!folio_try_get_rcu(folio))
+ if (!folio_try_get(folio))
goto retry;
if (unlikely(folio != xas_reload(&xas)))
@@ -2313,7 +2313,7 @@ static void filemap_get_read_batch(struct address_space *mapping,
break;
if (xa_is_sibling(folio))
break;
- if (!folio_try_get_rcu(folio))
+ if (!folio_try_get(folio))
goto retry;
if (unlikely(folio != xas_reload(&xas)))
@@ -3124,7 +3124,7 @@ static struct file *do_sync_mmap_readahead(struct vm_fault *vmf)
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
/* Use the readahead code, even if readahead is disabled */
- if (vm_flags & VM_HUGEPAGE) {
+ if ((vm_flags & VM_HUGEPAGE) && HPAGE_PMD_ORDER <= MAX_PAGECACHE_ORDER) {
fpin = maybe_unlock_mmap_for_io(vmf, fpin);
ractl._index &= ~((unsigned long)HPAGE_PMD_NR - 1);
ra->size = HPAGE_PMD_NR;
@@ -3231,7 +3231,8 @@ static vm_fault_t filemap_fault_recheck_pte_none(struct vm_fault *vmf)
if (!(vmf->flags & FAULT_FLAG_ORIG_PTE_VALID))
return 0;
- ptep = pte_offset_map(vmf->pmd, vmf->address);
+ ptep = pte_offset_map_nolock(vma->vm_mm, vmf->pmd, vmf->address,
+ &vmf->ptl);
if (unlikely(!ptep))
return VM_FAULT_NOPAGE;
@@ -3472,7 +3473,7 @@ static struct folio *next_uptodate_folio(struct xa_state *xas,
continue;
if (folio_test_locked(folio))
continue;
- if (!folio_try_get_rcu(folio))
+ if (!folio_try_get(folio))
continue;
/* Has the page moved or been split? */
if (unlikely(folio != xas_reload(xas)))
@@ -4248,6 +4249,9 @@ static void filemap_cachestat(struct address_space *mapping,
XA_STATE(xas, &mapping->i_pages, first_index);
struct folio *folio;
+ /* Flush stats (and potentially sleep) outside the RCU read section. */
+ mem_cgroup_flush_stats_ratelimited(NULL);
+
rcu_read_lock();
xas_for_each(&xas, folio, last_index) {
int order;
@@ -4311,7 +4315,7 @@ static void filemap_cachestat(struct address_space *mapping,
goto resched;
}
#endif
- if (workingset_test_recent(shadow, true, &workingset))
+ if (workingset_test_recent(shadow, true, &workingset, false))
cs->nr_recently_evicted += nr_pages;
goto resched;
diff --git a/mm/gup.c b/mm/gup.c
index ca0f5cedce9b..f1d6bc06eb52 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -76,7 +76,7 @@ retry:
folio = page_folio(page);
if (WARN_ON_ONCE(folio_ref_count(folio) < 0))
return NULL;
- if (unlikely(!folio_ref_try_add_rcu(folio, refs)))
+ if (unlikely(!folio_ref_try_add(folio, refs)))
return NULL;
/*
@@ -97,95 +97,6 @@ retry:
return folio;
}
-/**
- * try_grab_folio() - Attempt to get or pin a folio.
- * @page: pointer to page to be grabbed
- * @refs: the value to (effectively) add to the folio's refcount
- * @flags: gup flags: these are the FOLL_* flag values.
- *
- * "grab" names in this file mean, "look at flags to decide whether to use
- * FOLL_PIN or FOLL_GET behavior, when incrementing the folio's refcount.
- *
- * Either FOLL_PIN or FOLL_GET (or neither) must be set, but not both at the
- * same time. (That's true throughout the get_user_pages*() and
- * pin_user_pages*() APIs.) Cases:
- *
- * FOLL_GET: folio's refcount will be incremented by @refs.
- *
- * FOLL_PIN on large folios: folio's refcount will be incremented by
- * @refs, and its pincount will be incremented by @refs.
- *
- * FOLL_PIN on single-page folios: folio's refcount will be incremented by
- * @refs * GUP_PIN_COUNTING_BIAS.
- *
- * Return: The folio containing @page (with refcount appropriately
- * incremented) for success, or NULL upon failure. If neither FOLL_GET
- * nor FOLL_PIN was set, that's considered failure, and furthermore,
- * a likely bug in the caller, so a warning is also emitted.
- */
-struct folio *try_grab_folio(struct page *page, int refs, unsigned int flags)
-{
- struct folio *folio;
-
- if (WARN_ON_ONCE((flags & (FOLL_GET | FOLL_PIN)) == 0))
- return NULL;
-
- if (unlikely(!(flags & FOLL_PCI_P2PDMA) && is_pci_p2pdma_page(page)))
- return NULL;
-
- if (flags & FOLL_GET)
- return try_get_folio(page, refs);
-
- /* FOLL_PIN is set */
-
- /*
- * Don't take a pin on the zero page - it's not going anywhere
- * and it is used in a *lot* of places.
- */
- if (is_zero_page(page))
- return page_folio(page);
-
- folio = try_get_folio(page, refs);
- if (!folio)
- return NULL;
-
- /*
- * Can't do FOLL_LONGTERM + FOLL_PIN gup fast path if not in a
- * right zone, so fail and let the caller fall back to the slow
- * path.
- */
- if (unlikely((flags & FOLL_LONGTERM) &&
- !folio_is_longterm_pinnable(folio))) {
- if (!put_devmap_managed_folio_refs(folio, refs))
- folio_put_refs(folio, refs);
- return NULL;
- }
-
- /*
- * When pinning a large folio, use an exact count to track it.
- *
- * However, be sure to *also* increment the normal folio
- * refcount field at least once, so that the folio really
- * is pinned. That's why the refcount from the earlier
- * try_get_folio() is left intact.
- */
- if (folio_test_large(folio))
- atomic_add(refs, &folio->_pincount);
- else
- folio_ref_add(folio,
- refs * (GUP_PIN_COUNTING_BIAS - 1));
- /*
- * Adjust the pincount before re-checking the PTE for changes.
- * This is essentially a smp_mb() and is paired with a memory
- * barrier in folio_try_share_anon_rmap_*().
- */
- smp_mb__after_atomic();
-
- node_stat_mod_folio(folio, NR_FOLL_PIN_ACQUIRED, refs);
-
- return folio;
-}
-
static void gup_put_folio(struct folio *folio, int refs, unsigned int flags)
{
if (flags & FOLL_PIN) {
@@ -203,58 +114,59 @@ static void gup_put_folio(struct folio *folio, int refs, unsigned int flags)
}
/**
- * try_grab_page() - elevate a page's refcount by a flag-dependent amount
- * @page: pointer to page to be grabbed
- * @flags: gup flags: these are the FOLL_* flag values.
+ * try_grab_folio() - add a folio's refcount by a flag-dependent amount
+ * @folio: pointer to folio to be grabbed
+ * @refs: the value to (effectively) add to the folio's refcount
+ * @flags: gup flags: these are the FOLL_* flag values
*
* This might not do anything at all, depending on the flags argument.
*
* "grab" names in this file mean, "look at flags to decide whether to use
- * FOLL_PIN or FOLL_GET behavior, when incrementing the page's refcount.
+ * FOLL_PIN or FOLL_GET behavior, when incrementing the folio's refcount.
*
* Either FOLL_PIN or FOLL_GET (or neither) may be set, but not both at the same
- * time. Cases: please see the try_grab_folio() documentation, with
- * "refs=1".
+ * time.
*
* Return: 0 for success, or if no action was required (if neither FOLL_PIN
* nor FOLL_GET was set, nothing is done). A negative error code for failure:
*
- * -ENOMEM FOLL_GET or FOLL_PIN was set, but the page could not
+ * -ENOMEM FOLL_GET or FOLL_PIN was set, but the folio could not
* be grabbed.
+ *
+ * It is called when we have a stable reference for the folio, typically in
+ * GUP slow path.
*/
-int __must_check try_grab_page(struct page *page, unsigned int flags)
+int __must_check try_grab_folio(struct folio *folio, int refs,
+ unsigned int flags)
{
- struct folio *folio = page_folio(page);
-
if (WARN_ON_ONCE(folio_ref_count(folio) <= 0))
return -ENOMEM;
- if (unlikely(!(flags & FOLL_PCI_P2PDMA) && is_pci_p2pdma_page(page)))
+ if (unlikely(!(flags & FOLL_PCI_P2PDMA) && is_pci_p2pdma_page(&folio->page)))
return -EREMOTEIO;
if (flags & FOLL_GET)
- folio_ref_inc(folio);
+ folio_ref_add(folio, refs);
else if (flags & FOLL_PIN) {
/*
* Don't take a pin on the zero page - it's not going anywhere
* and it is used in a *lot* of places.
*/
- if (is_zero_page(page))
+ if (is_zero_folio(folio))
return 0;
/*
- * Similar to try_grab_folio(): be sure to *also*
- * increment the normal page refcount field at least once,
+ * Increment the normal page refcount field at least once,
* so that the page really is pinned.
*/
if (folio_test_large(folio)) {
- folio_ref_add(folio, 1);
- atomic_add(1, &folio->_pincount);
+ folio_ref_add(folio, refs);
+ atomic_add(refs, &folio->_pincount);
} else {
- folio_ref_add(folio, GUP_PIN_COUNTING_BIAS);
+ folio_ref_add(folio, refs * GUP_PIN_COUNTING_BIAS);
}
- node_stat_mod_folio(folio, NR_FOLL_PIN_ACQUIRED, 1);
+ node_stat_mod_folio(folio, NR_FOLL_PIN_ACQUIRED, refs);
}
return 0;
@@ -515,6 +427,102 @@ static int record_subpages(struct page *page, unsigned long sz,
return nr;
}
+
+/**
+ * try_grab_folio_fast() - Attempt to get or pin a folio in fast path.
+ * @page: pointer to page to be grabbed
+ * @refs: the value to (effectively) add to the folio's refcount
+ * @flags: gup flags: these are the FOLL_* flag values.
+ *
+ * "grab" names in this file mean, "look at flags to decide whether to use
+ * FOLL_PIN or FOLL_GET behavior, when incrementing the folio's refcount.
+ *
+ * Either FOLL_PIN or FOLL_GET (or neither) must be set, but not both at the
+ * same time. (That's true throughout the get_user_pages*() and
+ * pin_user_pages*() APIs.) Cases:
+ *
+ * FOLL_GET: folio's refcount will be incremented by @refs.
+ *
+ * FOLL_PIN on large folios: folio's refcount will be incremented by
+ * @refs, and its pincount will be incremented by @refs.
+ *
+ * FOLL_PIN on single-page folios: folio's refcount will be incremented by
+ * @refs * GUP_PIN_COUNTING_BIAS.
+ *
+ * Return: The folio containing @page (with refcount appropriately
+ * incremented) for success, or NULL upon failure. If neither FOLL_GET
+ * nor FOLL_PIN was set, that's considered failure, and furthermore,
+ * a likely bug in the caller, so a warning is also emitted.
+ *
+ * It uses add ref unless zero to elevate the folio refcount and must be called
+ * in fast path only.
+ */
+static struct folio *try_grab_folio_fast(struct page *page, int refs,
+ unsigned int flags)
+{
+ struct folio *folio;
+
+ /* Raise warn if it is not called in fast GUP */
+ VM_WARN_ON_ONCE(!irqs_disabled());
+
+ if (WARN_ON_ONCE((flags & (FOLL_GET | FOLL_PIN)) == 0))
+ return NULL;
+
+ if (unlikely(!(flags & FOLL_PCI_P2PDMA) && is_pci_p2pdma_page(page)))
+ return NULL;
+
+ if (flags & FOLL_GET)
+ return try_get_folio(page, refs);
+
+ /* FOLL_PIN is set */
+
+ /*
+ * Don't take a pin on the zero page - it's not going anywhere
+ * and it is used in a *lot* of places.
+ */
+ if (is_zero_page(page))
+ return page_folio(page);
+
+ folio = try_get_folio(page, refs);
+ if (!folio)
+ return NULL;
+
+ /*
+ * Can't do FOLL_LONGTERM + FOLL_PIN gup fast path if not in a
+ * right zone, so fail and let the caller fall back to the slow
+ * path.
+ */
+ if (unlikely((flags & FOLL_LONGTERM) &&
+ !folio_is_longterm_pinnable(folio))) {
+ if (!put_devmap_managed_folio_refs(folio, refs))
+ folio_put_refs(folio, refs);
+ return NULL;
+ }
+
+ /*
+ * When pinning a large folio, use an exact count to track it.
+ *
+ * However, be sure to *also* increment the normal folio
+ * refcount field at least once, so that the folio really
+ * is pinned. That's why the refcount from the earlier
+ * try_get_folio() is left intact.
+ */
+ if (folio_test_large(folio))
+ atomic_add(refs, &folio->_pincount);
+ else
+ folio_ref_add(folio,
+ refs * (GUP_PIN_COUNTING_BIAS - 1));
+ /*
+ * Adjust the pincount before re-checking the PTE for changes.
+ * This is essentially a smp_mb() and is paired with a memory
+ * barrier in folio_try_share_anon_rmap_*().
+ */
+ smp_mb__after_atomic();
+
+ node_stat_mod_folio(folio, NR_FOLL_PIN_ACQUIRED, refs);
+
+ return folio;
+}
#endif /* CONFIG_ARCH_HAS_HUGEPD || CONFIG_HAVE_GUP_FAST */
#ifdef CONFIG_ARCH_HAS_HUGEPD
@@ -535,7 +543,7 @@ static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end,
*/
static int gup_hugepte(struct vm_area_struct *vma, pte_t *ptep, unsigned long sz,
unsigned long addr, unsigned long end, unsigned int flags,
- struct page **pages, int *nr)
+ struct page **pages, int *nr, bool fast)
{
unsigned long pte_end;
struct page *page;
@@ -558,9 +566,15 @@ static int gup_hugepte(struct vm_area_struct *vma, pte_t *ptep, unsigned long sz
page = pte_page(pte);
refs = record_subpages(page, sz, addr, end, pages + *nr);
- folio = try_grab_folio(page, refs, flags);
- if (!folio)
- return 0;
+ if (fast) {
+ folio = try_grab_folio_fast(page, refs, flags);
+ if (!folio)
+ return 0;
+ } else {
+ folio = page_folio(page);
+ if (try_grab_folio(folio, refs, flags))
+ return 0;
+ }
if (unlikely(pte_val(pte) != pte_val(ptep_get(ptep)))) {
gup_put_folio(folio, refs, flags);
@@ -588,7 +602,7 @@ static int gup_hugepte(struct vm_area_struct *vma, pte_t *ptep, unsigned long sz
static int gup_hugepd(struct vm_area_struct *vma, hugepd_t hugepd,
unsigned long addr, unsigned int pdshift,
unsigned long end, unsigned int flags,
- struct page **pages, int *nr)
+ struct page **pages, int *nr, bool fast)
{
pte_t *ptep;
unsigned long sz = 1UL << hugepd_shift(hugepd);
@@ -598,7 +612,8 @@ static int gup_hugepd(struct vm_area_struct *vma, hugepd_t hugepd,
ptep = hugepte_offset(hugepd, addr, pdshift);
do {
next = hugepte_addr_end(addr, end, sz);
- ret = gup_hugepte(vma, ptep, sz, addr, end, flags, pages, nr);
+ ret = gup_hugepte(vma, ptep, sz, addr, end, flags, pages, nr,
+ fast);
if (ret != 1)
return ret;
} while (ptep++, addr = next, addr != end);
@@ -625,7 +640,7 @@ static struct page *follow_hugepd(struct vm_area_struct *vma, hugepd_t hugepd,
ptep = hugepte_offset(hugepd, addr, pdshift);
ptl = huge_pte_lock(h, vma->vm_mm, ptep);
ret = gup_hugepd(vma, hugepd, addr, pdshift, addr + PAGE_SIZE,
- flags, &page, &nr);
+ flags, &page, &nr, false);
spin_unlock(ptl);
if (ret == 1) {
@@ -642,7 +657,7 @@ static struct page *follow_hugepd(struct vm_area_struct *vma, hugepd_t hugepd,
static inline int gup_hugepd(struct vm_area_struct *vma, hugepd_t hugepd,
unsigned long addr, unsigned int pdshift,
unsigned long end, unsigned int flags,
- struct page **pages, int *nr)
+ struct page **pages, int *nr, bool fast)
{
return 0;
}
@@ -729,7 +744,7 @@ static struct page *follow_huge_pud(struct vm_area_struct *vma,
gup_must_unshare(vma, flags, page))
return ERR_PTR(-EMLINK);
- ret = try_grab_page(page, flags);
+ ret = try_grab_folio(page_folio(page), 1, flags);
if (ret)
page = ERR_PTR(ret);
else
@@ -806,7 +821,7 @@ static struct page *follow_huge_pmd(struct vm_area_struct *vma,
VM_BUG_ON_PAGE((flags & FOLL_PIN) && PageAnon(page) &&
!PageAnonExclusive(page), page);
- ret = try_grab_page(page, flags);
+ ret = try_grab_folio(page_folio(page), 1, flags);
if (ret)
return ERR_PTR(ret);
@@ -968,8 +983,8 @@ static struct page *follow_page_pte(struct vm_area_struct *vma,
VM_BUG_ON_PAGE((flags & FOLL_PIN) && PageAnon(page) &&
!PageAnonExclusive(page), page);
- /* try_grab_page() does nothing unless FOLL_GET or FOLL_PIN is set. */
- ret = try_grab_page(page, flags);
+ /* try_grab_folio() does nothing unless FOLL_GET or FOLL_PIN is set. */
+ ret = try_grab_folio(page_folio(page), 1, flags);
if (unlikely(ret)) {
page = ERR_PTR(ret);
goto out;
@@ -1233,7 +1248,7 @@ static int get_gate_page(struct mm_struct *mm, unsigned long address,
goto unmap;
*page = pte_page(entry);
}
- ret = try_grab_page(*page, gup_flags);
+ ret = try_grab_folio(page_folio(*page), 1, gup_flags);
if (unlikely(ret))
goto unmap;
out:
@@ -1636,20 +1651,19 @@ next_page:
* pages.
*/
if (page_increm > 1) {
- struct folio *folio;
+ struct folio *folio = page_folio(page);
/*
* Since we already hold refcount on the
* large folio, this should never fail.
*/
- folio = try_grab_folio(page, page_increm - 1,
- foll_flags);
- if (WARN_ON_ONCE(!folio)) {
+ if (try_grab_folio(folio, page_increm - 1,
+ foll_flags)) {
/*
* Release the 1st page ref if the
* folio is problematic, fail hard.
*/
- gup_put_folio(page_folio(page), 1,
+ gup_put_folio(folio, 1,
foll_flags);
ret = -EFAULT;
goto out;
@@ -2797,7 +2811,6 @@ EXPORT_SYMBOL(get_user_pages_unlocked);
* This code is based heavily on the PowerPC implementation by Nick Piggin.
*/
#ifdef CONFIG_HAVE_GUP_FAST
-
/*
* Used in the GUP-fast path to determine whether GUP is permitted to work on
* a specific folio.
@@ -2962,7 +2975,7 @@ static int gup_fast_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr,
VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
page = pte_page(pte);
- folio = try_grab_folio(page, 1, flags);
+ folio = try_grab_folio_fast(page, 1, flags);
if (!folio)
goto pte_unmap;
@@ -3049,7 +3062,7 @@ static int gup_fast_devmap_leaf(unsigned long pfn, unsigned long addr,
break;
}
- folio = try_grab_folio(page, 1, flags);
+ folio = try_grab_folio_fast(page, 1, flags);
if (!folio) {
gup_fast_undo_dev_pagemap(nr, nr_start, flags, pages);
break;
@@ -3138,7 +3151,7 @@ static int gup_fast_pmd_leaf(pmd_t orig, pmd_t *pmdp, unsigned long addr,
page = pmd_page(orig);
refs = record_subpages(page, PMD_SIZE, addr, end, pages + *nr);
- folio = try_grab_folio(page, refs, flags);
+ folio = try_grab_folio_fast(page, refs, flags);
if (!folio)
return 0;
@@ -3182,7 +3195,7 @@ static int gup_fast_pud_leaf(pud_t orig, pud_t *pudp, unsigned long addr,
page = pud_page(orig);
refs = record_subpages(page, PUD_SIZE, addr, end, pages + *nr);
- folio = try_grab_folio(page, refs, flags);
+ folio = try_grab_folio_fast(page, refs, flags);
if (!folio)
return 0;
@@ -3222,7 +3235,7 @@ static int gup_fast_pgd_leaf(pgd_t orig, pgd_t *pgdp, unsigned long addr,
page = pgd_page(orig);
refs = record_subpages(page, PGDIR_SIZE, addr, end, pages + *nr);
- folio = try_grab_folio(page, refs, flags);
+ folio = try_grab_folio_fast(page, refs, flags);
if (!folio)
return 0;
@@ -3276,7 +3289,8 @@ static int gup_fast_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr,
* pmd format and THP pmd format
*/
if (gup_hugepd(NULL, __hugepd(pmd_val(pmd)), addr,
- PMD_SHIFT, next, flags, pages, nr) != 1)
+ PMD_SHIFT, next, flags, pages, nr,
+ true) != 1)
return 0;
} else if (!gup_fast_pte_range(pmd, pmdp, addr, next, flags,
pages, nr))
@@ -3306,7 +3320,8 @@ static int gup_fast_pud_range(p4d_t *p4dp, p4d_t p4d, unsigned long addr,
return 0;
} else if (unlikely(is_hugepd(__hugepd(pud_val(pud))))) {
if (gup_hugepd(NULL, __hugepd(pud_val(pud)), addr,
- PUD_SHIFT, next, flags, pages, nr) != 1)
+ PUD_SHIFT, next, flags, pages, nr,
+ true) != 1)
return 0;
} else if (!gup_fast_pmd_range(pudp, pud, addr, next, flags,
pages, nr))
@@ -3333,7 +3348,8 @@ static int gup_fast_p4d_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr,
BUILD_BUG_ON(p4d_leaf(p4d));
if (unlikely(is_hugepd(__hugepd(p4d_val(p4d))))) {
if (gup_hugepd(NULL, __hugepd(p4d_val(p4d)), addr,
- P4D_SHIFT, next, flags, pages, nr) != 1)
+ P4D_SHIFT, next, flags, pages, nr,
+ true) != 1)
return 0;
} else if (!gup_fast_pud_range(p4dp, p4d, addr, next, flags,
pages, nr))
@@ -3362,7 +3378,8 @@ static void gup_fast_pgd_range(unsigned long addr, unsigned long end,
return;
} else if (unlikely(is_hugepd(__hugepd(pgd_val(pgd))))) {
if (gup_hugepd(NULL, __hugepd(pgd_val(pgd)), addr,
- PGDIR_SHIFT, next, flags, pages, nr) != 1)
+ PGDIR_SHIFT, next, flags, pages, nr,
+ true) != 1)
return;
} else if (!gup_fast_p4d_range(pgdp, pgd, addr, next, flags,
pages, nr))
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 317de2afd371..2120f7478e55 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -558,15 +558,15 @@ static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
DEFINE_MTHP_STAT_ATTR(anon_fault_alloc, MTHP_STAT_ANON_FAULT_ALLOC);
DEFINE_MTHP_STAT_ATTR(anon_fault_fallback, MTHP_STAT_ANON_FAULT_FALLBACK);
DEFINE_MTHP_STAT_ATTR(anon_fault_fallback_charge, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE);
-DEFINE_MTHP_STAT_ATTR(anon_swpout, MTHP_STAT_ANON_SWPOUT);
-DEFINE_MTHP_STAT_ATTR(anon_swpout_fallback, MTHP_STAT_ANON_SWPOUT_FALLBACK);
+DEFINE_MTHP_STAT_ATTR(swpout, MTHP_STAT_SWPOUT);
+DEFINE_MTHP_STAT_ATTR(swpout_fallback, MTHP_STAT_SWPOUT_FALLBACK);
static struct attribute *stats_attrs[] = {
&anon_fault_alloc_attr.attr,
&anon_fault_fallback_attr.attr,
&anon_fault_fallback_charge_attr.attr,
- &anon_swpout_attr.attr,
- &anon_swpout_fallback_attr.attr,
+ &swpout_attr.attr,
+ &swpout_fallback_attr.attr,
NULL,
};
@@ -1331,7 +1331,7 @@ struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
if (!*pgmap)
return ERR_PTR(-EFAULT);
page = pfn_to_page(pfn);
- ret = try_grab_page(page, flags);
+ ret = try_grab_folio(page_folio(page), 1, flags);
if (ret)
page = ERR_PTR(ret);
@@ -3009,30 +3009,36 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
if (new_order >= folio_order(folio))
return -EINVAL;
- /* Cannot split anonymous THP to order-1 */
- if (new_order == 1 && folio_test_anon(folio)) {
- VM_WARN_ONCE(1, "Cannot split to order-1 folio");
- return -EINVAL;
- }
-
- if (new_order) {
- /* Only swapping a whole PMD-mapped folio is supported */
- if (folio_test_swapcache(folio))
+ if (folio_test_anon(folio)) {
+ /* order-1 is not supported for anonymous THP. */
+ if (new_order == 1) {
+ VM_WARN_ONCE(1, "Cannot split to order-1 folio");
return -EINVAL;
+ }
+ } else if (new_order) {
/* Split shmem folio to non-zero order not supported */
if (shmem_mapping(folio->mapping)) {
VM_WARN_ONCE(1,
"Cannot split shmem folio to non-0 order");
return -EINVAL;
}
- /* No split if the file system does not support large folio */
- if (!mapping_large_folio_support(folio->mapping)) {
+ /*
+ * No split if the file system does not support large folio.
+ * Note that we might still have THPs in such mappings due to
+ * CONFIG_READ_ONLY_THP_FOR_FS. But in that case, the mapping
+ * does not actually support large folios properly.
+ */
+ if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) &&
+ !mapping_large_folio_support(folio->mapping)) {
VM_WARN_ONCE(1,
"Cannot split file folio to non-0 order");
return -EINVAL;
}
}
+ /* Only swapping a whole PMD-mapped folio is supported */
+ if (folio_test_swapcache(folio) && new_order)
+ return -EINVAL;
is_hzp = is_huge_zero_folio(folio);
if (is_hzp) {
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 6be78e7d4f6e..43e1af868cfd 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1625,13 +1625,10 @@ static inline void destroy_compound_gigantic_folio(struct folio *folio,
* folio appears as just a compound page. Otherwise, wait until after
* allocating vmemmap to clear the flag.
*
- * A reference is held on the folio, except in the case of demote.
- *
* Must be called with hugetlb lock held.
*/
-static void __remove_hugetlb_folio(struct hstate *h, struct folio *folio,
- bool adjust_surplus,
- bool demote)
+static void remove_hugetlb_folio(struct hstate *h, struct folio *folio,
+ bool adjust_surplus)
{
int nid = folio_nid(folio);
@@ -1645,6 +1642,7 @@ static void __remove_hugetlb_folio(struct hstate *h, struct folio *folio,
list_del(&folio->lru);
if (folio_test_hugetlb_freed(folio)) {
+ folio_clear_hugetlb_freed(folio);
h->free_huge_pages--;
h->free_huge_pages_node[nid]--;
}
@@ -1661,33 +1659,13 @@ static void __remove_hugetlb_folio(struct hstate *h, struct folio *folio,
if (!folio_test_hugetlb_vmemmap_optimized(folio))
__folio_clear_hugetlb(folio);
- /*
- * In the case of demote we do not ref count the page as it will soon
- * be turned into a page of smaller size.
- */
- if (!demote)
- folio_ref_unfreeze(folio, 1);
-
h->nr_huge_pages--;
h->nr_huge_pages_node[nid]--;
}
-static void remove_hugetlb_folio(struct hstate *h, struct folio *folio,
- bool adjust_surplus)
-{
- __remove_hugetlb_folio(h, folio, adjust_surplus, false);
-}
-
-static void remove_hugetlb_folio_for_demote(struct hstate *h, struct folio *folio,
- bool adjust_surplus)
-{
- __remove_hugetlb_folio(h, folio, adjust_surplus, true);
-}
-
static void add_hugetlb_folio(struct hstate *h, struct folio *folio,
bool adjust_surplus)
{
- int zeroed;
int nid = folio_nid(folio);
VM_BUG_ON_FOLIO(!folio_test_hugetlb_vmemmap_optimized(folio), folio);
@@ -1711,21 +1689,6 @@ static void add_hugetlb_folio(struct hstate *h, struct folio *folio,
*/
folio_set_hugetlb_vmemmap_optimized(folio);
- /*
- * This folio is about to be managed by the hugetlb allocator and
- * should have no users. Drop our reference, and check for others
- * just in case.
- */
- zeroed = folio_put_testzero(folio);
- if (unlikely(!zeroed))
- /*
- * It is VERY unlikely soneone else has taken a ref
- * on the folio. In this case, we simply return as
- * free_huge_folio() will be called when this other ref
- * is dropped.
- */
- return;
-
arch_clear_hugetlb_flags(folio);
enqueue_hugetlb_folio(h, folio);
}
@@ -1763,13 +1726,6 @@ static void __update_and_free_hugetlb_folio(struct hstate *h,
}
/*
- * Move PageHWPoison flag from head page to the raw error pages,
- * which makes any healthy subpages reusable.
- */
- if (unlikely(folio_test_hwpoison(folio)))
- folio_clear_hugetlb_hwpoison(folio);
-
- /*
* If vmemmap pages were allocated above, then we need to clear the
* hugetlb flag under the hugetlb lock.
*/
@@ -1780,6 +1736,15 @@ static void __update_and_free_hugetlb_folio(struct hstate *h,
}
/*
+ * Move PageHWPoison flag from head page to the raw error pages,
+ * which makes any healthy subpages reusable.
+ */
+ if (unlikely(folio_test_hwpoison(folio)))
+ folio_clear_hugetlb_hwpoison(folio);
+
+ folio_ref_unfreeze(folio, 1);
+
+ /*
* Non-gigantic pages demoted from CMA allocated gigantic pages
* need to be given back to CMA in free_gigantic_folio.
*/
@@ -2197,6 +2162,9 @@ static struct folio *alloc_buddy_hugetlb_folio(struct hstate *h,
nid = numa_mem_id();
retry:
folio = __folio_alloc(gfp_mask, order, nid, nmask);
+ /* Ensure hugetlb folio won't have large_rmappable flag set. */
+ if (folio)
+ folio_clear_large_rmappable(folio);
if (folio && !folio_ref_freeze(folio, 1)) {
folio_put(folio);
@@ -3079,11 +3047,8 @@ retry:
free_new:
spin_unlock_irq(&hugetlb_lock);
- if (new_folio) {
- /* Folio has a zero ref count, but needs a ref to be freed */
- folio_ref_unfreeze(new_folio, 1);
+ if (new_folio)
update_and_free_hugetlb_folio(h, new_folio, false);
- }
return ret;
}
@@ -3938,7 +3903,7 @@ static int demote_free_hugetlb_folio(struct hstate *h, struct folio *folio)
target_hstate = size_to_hstate(PAGE_SIZE << h->demote_order);
- remove_hugetlb_folio_for_demote(h, folio, false);
+ remove_hugetlb_folio(h, folio, false);
spin_unlock_irq(&hugetlb_lock);
/*
@@ -3952,7 +3917,6 @@ static int demote_free_hugetlb_folio(struct hstate *h, struct folio *folio)
if (rc) {
/* Allocation of vmemmmap failed, we can not demote folio */
spin_lock_irq(&hugetlb_lock);
- folio_ref_unfreeze(folio, 1);
add_hugetlb_folio(h, folio, false);
return rc;
}
@@ -5768,8 +5732,20 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
* do_exit() will not see it, and will keep the reservation
* forever.
*/
- if (adjust_reservation && vma_needs_reservation(h, vma, address))
- vma_add_reservation(h, vma, address);
+ if (adjust_reservation) {
+ int rc = vma_needs_reservation(h, vma, address);
+
+ if (rc < 0)
+ /* Pressumably allocate_file_region_entries failed
+ * to allocate a file_region struct. Clear
+ * hugetlb_restore_reserve so that global reserve
+ * count will not be incremented by free_huge_folio.
+ * Act as if we consumed the reservation.
+ */
+ folio_clear_hugetlb_restore_reserve(page_folio(page));
+ else if (rc)
+ vma_add_reservation(h, vma, address);
+ }
tlb_remove_page_size(tlb, page, huge_page_size(h));
/*
diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c
index b9a55322e52c..8193906515c6 100644
--- a/mm/hugetlb_vmemmap.c
+++ b/mm/hugetlb_vmemmap.c
@@ -446,6 +446,8 @@ static int __hugetlb_vmemmap_restore_folio(const struct hstate *h,
unsigned long vmemmap_reuse;
VM_WARN_ON_ONCE_FOLIO(!folio_test_hugetlb(folio), folio);
+ VM_WARN_ON_ONCE_FOLIO(folio_ref_count(folio), folio);
+
if (!folio_test_hugetlb_vmemmap_optimized(folio))
return 0;
@@ -481,6 +483,9 @@ static int __hugetlb_vmemmap_restore_folio(const struct hstate *h,
*/
int hugetlb_vmemmap_restore_folio(const struct hstate *h, struct folio *folio)
{
+ /* avoid writes from page_ref_add_unless() while unfolding vmemmap */
+ synchronize_rcu();
+
return __hugetlb_vmemmap_restore_folio(h, folio, 0);
}
@@ -505,6 +510,9 @@ long hugetlb_vmemmap_restore_folios(const struct hstate *h,
long restored = 0;
long ret = 0;
+ /* avoid writes from page_ref_add_unless() while unfolding vmemmap */
+ synchronize_rcu();
+
list_for_each_entry_safe(folio, t_folio, folio_list, lru) {
if (folio_test_hugetlb_vmemmap_optimized(folio)) {
ret = __hugetlb_vmemmap_restore_folio(h, folio,
@@ -550,6 +558,8 @@ static int __hugetlb_vmemmap_optimize_folio(const struct hstate *h,
unsigned long vmemmap_reuse;
VM_WARN_ON_ONCE_FOLIO(!folio_test_hugetlb(folio), folio);
+ VM_WARN_ON_ONCE_FOLIO(folio_ref_count(folio), folio);
+
if (!vmemmap_should_optimize_folio(h, folio))
return ret;
@@ -601,6 +611,9 @@ void hugetlb_vmemmap_optimize_folio(const struct hstate *h, struct folio *folio)
{
LIST_HEAD(vmemmap_pages);
+ /* avoid writes from page_ref_add_unless() while folding vmemmap */
+ synchronize_rcu();
+
__hugetlb_vmemmap_optimize_folio(h, folio, &vmemmap_pages, 0);
free_vmemmap_page_list(&vmemmap_pages);
}
@@ -644,6 +657,9 @@ void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_l
flush_tlb_all();
+ /* avoid writes from page_ref_add_unless() while folding vmemmap */
+ synchronize_rcu();
+
list_for_each_entry(folio, folio_list, lru) {
int ret;
diff --git a/mm/internal.h b/mm/internal.h
index b2c75b12014e..cc2c5e07fad3 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -588,7 +588,6 @@ extern void __putback_isolated_page(struct page *page, unsigned int order,
extern void memblock_free_pages(struct page *page, unsigned long pfn,
unsigned int order);
extern void __free_pages_core(struct page *page, unsigned int order);
-extern void kernel_init_pages(struct page *page, int numpages);
/*
* This will have no effect, other than possibly generating a warning, if the
@@ -1183,8 +1182,8 @@ int migrate_device_coherent_page(struct page *page);
/*
* mm/gup.c
*/
-struct folio *try_grab_folio(struct page *page, int refs, unsigned int flags);
-int __must_check try_grab_page(struct page *page, unsigned int flags);
+int __must_check try_grab_folio(struct folio *folio, int refs,
+ unsigned int flags);
/*
* mm/huge_memory.c
@@ -1436,11 +1435,6 @@ unsigned long shrink_slab(gfp_t gfp_mask, int nid, struct mem_cgroup *memcg,
int priority);
#ifdef CONFIG_64BIT
-/* VM is sealed, in vm_flags */
-#define VM_SEALED _BITUL(63)
-#endif
-
-#ifdef CONFIG_64BIT
static inline int can_do_mseal(unsigned long flags)
{
if (flags)
diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index e7c9a4dc89f8..85e7c6b4575c 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -532,7 +532,7 @@ void __kasan_mempool_unpoison_object(void *ptr, size_t size, unsigned long ip)
return;
/* Unpoison the object and save alloc info for non-kmalloc() allocations. */
- unpoison_slab_object(slab->slab_cache, ptr, size, flags);
+ unpoison_slab_object(slab->slab_cache, ptr, flags, false);
/* Poison the redzone and save alloc info for kmalloc() allocations. */
if (is_kmalloc_cache(slab->slab_cache))
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 774a97e6e2da..aab471791bd9 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -2000,9 +2000,9 @@ out_unlock:
if (!is_shmem) {
filemap_nr_thps_inc(mapping);
/*
- * Paired with smp_mb() in do_dentry_open() to ensure
- * i_writecount is up to date and the update to nr_thps is
- * visible. Ensures the page cache will be truncated if the
+ * Paired with the fence in do_dentry_open() -> get_write_access()
+ * to ensure i_writecount is up to date and the update to nr_thps
+ * is visible. Ensures the page cache will be truncated if the
* file is opened writable.
*/
smp_mb();
@@ -2190,8 +2190,8 @@ rollback:
if (!is_shmem && result == SCAN_COPY_MC) {
filemap_nr_thps_dec(mapping);
/*
- * Paired with smp_mb() in do_dentry_open() to
- * ensure the update to nr_thps is visible.
+ * Paired with the fence in do_dentry_open() -> get_write_access()
+ * to ensure the update to nr_thps is visible.
*/
smp_mb();
}
diff --git a/mm/kmsan/core.c b/mm/kmsan/core.c
index cf2d70e9c9a5..95f859e38c53 100644
--- a/mm/kmsan/core.c
+++ b/mm/kmsan/core.c
@@ -196,8 +196,7 @@ void kmsan_internal_set_shadow_origin(void *addr, size_t size, int b,
u32 origin, bool checked)
{
u64 address = (u64)addr;
- void *shadow_start;
- u32 *origin_start;
+ u32 *shadow_start, *origin_start;
size_t pad = 0;
KMSAN_WARN_ON(!kmsan_metadata_is_contiguous(addr, size));
@@ -225,8 +224,16 @@ void kmsan_internal_set_shadow_origin(void *addr, size_t size, int b,
origin_start =
(u32 *)kmsan_get_metadata((void *)address, KMSAN_META_ORIGIN);
- for (int i = 0; i < size / KMSAN_ORIGIN_SIZE; i++)
- origin_start[i] = origin;
+ /*
+ * If the new origin is non-zero, assume that the shadow byte is also non-zero,
+ * and unconditionally overwrite the old origin slot.
+ * If the new origin is zero, overwrite the old origin slot iff the
+ * corresponding shadow slot is zero.
+ */
+ for (int i = 0; i < size / KMSAN_ORIGIN_SIZE; i++) {
+ if (origin || !shadow_start[i])
+ origin_start[i] = origin;
+ }
}
struct page *kmsan_vmalloc_to_page_or_null(void *vaddr)
diff --git a/mm/ksm.c b/mm/ksm.c
index 452ac8346e6e..34c4820e0d3d 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -296,7 +296,7 @@ static bool ksm_use_zero_pages __read_mostly;
static bool ksm_smart_scan = true;
/* The number of zero pages which is placed by KSM */
-unsigned long ksm_zero_pages;
+atomic_long_t ksm_zero_pages = ATOMIC_LONG_INIT(0);
/* The number of pages that have been skipped due to "smart scanning" */
static unsigned long ksm_pages_skipped;
@@ -1429,8 +1429,7 @@ static int replace_page(struct vm_area_struct *vma, struct page *page,
* the dirty bit in zero page's PTE is set.
*/
newpte = pte_mkdirty(pte_mkspecial(pfn_pte(page_to_pfn(kpage), vma->vm_page_prot)));
- ksm_zero_pages++;
- mm->ksm_zero_pages++;
+ ksm_map_zero_page(mm);
/*
* We're replacing an anonymous page with a zero page, which is
* not anonymous. We need to do proper accounting otherwise we
@@ -2754,18 +2753,16 @@ static void ksm_do_scan(unsigned int scan_npages)
{
struct ksm_rmap_item *rmap_item;
struct page *page;
- unsigned int npages = scan_npages;
- while (npages-- && likely(!freezing(current))) {
+ while (scan_npages-- && likely(!freezing(current))) {
cond_resched();
rmap_item = scan_get_next_rmap_item(&page);
if (!rmap_item)
return;
cmp_and_merge_page(page, rmap_item);
put_page(page);
+ ksm_pages_scanned++;
}
-
- ksm_pages_scanned += scan_npages - npages;
}
static int ksmd_should_run(void)
@@ -3376,7 +3373,7 @@ static void wait_while_offlining(void)
#ifdef CONFIG_PROC_FS
long ksm_process_profit(struct mm_struct *mm)
{
- return (long)(mm->ksm_merging_pages + mm->ksm_zero_pages) * PAGE_SIZE -
+ return (long)(mm->ksm_merging_pages + mm_ksm_zero_pages(mm)) * PAGE_SIZE -
mm->ksm_rmap_items * sizeof(struct ksm_rmap_item);
}
#endif /* CONFIG_PROC_FS */
@@ -3665,7 +3662,7 @@ KSM_ATTR_RO(pages_skipped);
static ssize_t ksm_zero_pages_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
- return sysfs_emit(buf, "%ld\n", ksm_zero_pages);
+ return sysfs_emit(buf, "%ld\n", atomic_long_read(&ksm_zero_pages));
}
KSM_ATTR_RO(ksm_zero_pages);
@@ -3674,7 +3671,7 @@ static ssize_t general_profit_show(struct kobject *kobj,
{
long general_profit;
- general_profit = (ksm_pages_sharing + ksm_zero_pages) * PAGE_SIZE -
+ general_profit = (ksm_pages_sharing + atomic_long_read(&ksm_zero_pages)) * PAGE_SIZE -
ksm_rmap_items * sizeof(struct ksm_rmap_item);
return sysfs_emit(buf, "%ld\n", general_profit);
diff --git a/mm/memblock.c b/mm/memblock.c
index d09136e040d3..e81fb68f7f88 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -754,7 +754,7 @@ bool __init_memblock memblock_validate_numa_coverage(unsigned long threshold_byt
/* calculate lose page */
for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
- if (nid == NUMA_NO_NODE)
+ if (!numa_valid_node(nid))
nr_pages += end_pfn - start_pfn;
}
@@ -1061,7 +1061,7 @@ static bool should_skip_region(struct memblock_type *type,
return false;
/* only memory regions are associated with nodes, check it */
- if (nid != NUMA_NO_NODE && nid != m_nid)
+ if (numa_valid_node(nid) && nid != m_nid)
return true;
/* skip hotpluggable memory regions if needed */
@@ -1118,10 +1118,6 @@ void __next_mem_range(u64 *idx, int nid, enum memblock_flags flags,
int idx_a = *idx & 0xffffffff;
int idx_b = *idx >> 32;
- if (WARN_ONCE(nid == MAX_NUMNODES,
- "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
- nid = NUMA_NO_NODE;
-
for (; idx_a < type_a->cnt; idx_a++) {
struct memblock_region *m = &type_a->regions[idx_a];
@@ -1215,9 +1211,6 @@ void __init_memblock __next_mem_range_rev(u64 *idx, int nid,
int idx_a = *idx & 0xffffffff;
int idx_b = *idx >> 32;
- if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
- nid = NUMA_NO_NODE;
-
if (*idx == (u64)ULLONG_MAX) {
idx_a = type_a->cnt - 1;
if (type_b != NULL)
@@ -1303,7 +1296,7 @@ void __init_memblock __next_mem_pfn_range(int *idx, int nid,
if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size))
continue;
- if (nid == MAX_NUMNODES || nid == r_nid)
+ if (!numa_valid_node(nid) || nid == r_nid)
break;
}
if (*idx >= type->cnt) {
@@ -1448,9 +1441,6 @@ phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size,
enum memblock_flags flags = choose_memblock_flags();
phys_addr_t found;
- if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
- nid = NUMA_NO_NODE;
-
if (!align) {
/* Can't use WARNs this early in boot on powerpc */
dump_stack();
@@ -1463,7 +1453,7 @@ again:
if (found && !memblock_reserve(found, size))
goto done;
- if (nid != NUMA_NO_NODE && !exact_nid) {
+ if (numa_valid_node(nid) && !exact_nid) {
found = memblock_find_in_range_node(size, align, start,
end, NUMA_NO_NODE,
flags);
@@ -1983,7 +1973,7 @@ static void __init_memblock memblock_dump(struct memblock_type *type)
end = base + size - 1;
flags = rgn->flags;
#ifdef CONFIG_NUMA
- if (memblock_get_region_node(rgn) != MAX_NUMNODES)
+ if (numa_valid_node(memblock_get_region_node(rgn)))
snprintf(nid_buf, sizeof(nid_buf), " on node %d",
memblock_get_region_node(rgn));
#endif
@@ -2177,7 +2167,7 @@ static void __init memmap_init_reserved_pages(void)
start = region->base;
end = start + region->size;
- if (nid == NUMA_NO_NODE || nid >= MAX_NUMNODES)
+ if (!numa_valid_node(nid))
nid = early_pfn_to_nid(PFN_DOWN(start));
reserve_bootmem_region(start, end, nid);
@@ -2268,7 +2258,7 @@ static int memblock_debug_show(struct seq_file *m, void *private)
seq_printf(m, "%4d: ", i);
seq_printf(m, "%pa..%pa ", &reg->base, &end);
- if (nid != MAX_NUMNODES)
+ if (numa_valid_node(nid))
seq_printf(m, "%4d ", nid);
else
seq_printf(m, "%4c ", 'x');
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 7fad15b2290c..8f2f1bb18c9c 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -3147,8 +3147,6 @@ static inline void __mod_objcg_mlstate(struct obj_cgroup *objcg,
struct mem_cgroup *memcg;
struct lruvec *lruvec;
- lockdep_assert_irqs_disabled();
-
rcu_read_lock();
memcg = obj_cgroup_memcg(objcg);
lruvec = mem_cgroup_lruvec(memcg, pgdat);
@@ -7747,8 +7745,7 @@ void __mem_cgroup_uncharge_folios(struct folio_batch *folios)
* @new: Replacement folio.
*
* Charge @new as a replacement folio for @old. @old will
- * be uncharged upon free. This is only used by the page cache
- * (in replace_page_cache_folio()).
+ * be uncharged upon free.
*
* Both folios must be locked, @new->mapping must be set up.
*/
@@ -7826,17 +7823,6 @@ void mem_cgroup_migrate(struct folio *old, struct folio *new)
/* Transfer the charge and the css ref */
commit_charge(new, memcg);
- /*
- * If the old folio is a large folio and is in the split queue, it needs
- * to be removed from the split queue now, in case getting an incorrect
- * split queue in destroy_large_folio() after the memcg of the old folio
- * is cleared.
- *
- * In addition, the old folio is about to be freed after migration, so
- * removing from the split queue a bit earlier seems reasonable.
- */
- if (folio_test_large(old) && folio_test_large_rmappable(old))
- folio_undo_large_rmappable(old);
old->memcg_data = 0;
}
diff --git a/mm/memory.c b/mm/memory.c
index 0f47a533014e..d10e616d7389 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1507,12 +1507,6 @@ static __always_inline void zap_present_folio_ptes(struct mmu_gather *tlb,
if (unlikely(folio_mapcount(folio) < 0))
print_bad_pte(vma, addr, ptent, page);
}
-
- if (want_init_mlocked_on_free() && folio_test_mlocked(folio) &&
- !delay_rmap && folio_test_anon(folio)) {
- kernel_init_pages(page, folio_nr_pages(folio));
- }
-
if (unlikely(__tlb_remove_folio_pages(tlb, page, nr, delay_rmap))) {
*force_flush = true;
*force_break = true;
@@ -4614,8 +4608,9 @@ vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
if (!thp_vma_suitable_order(vma, haddr, PMD_ORDER))
return ret;
- if (page != &folio->page || folio_order(folio) != HPAGE_PMD_ORDER)
+ if (folio_order(folio) != HPAGE_PMD_ORDER)
return ret;
+ page = &folio->page;
/*
* Just backoff if any subpage of a THP is corrupted otherwise
@@ -5106,10 +5101,16 @@ static void numa_rebuild_large_mapping(struct vm_fault *vmf, struct vm_area_stru
bool ignore_writable, bool pte_write_upgrade)
{
int nr = pte_pfn(fault_pte) - folio_pfn(folio);
- unsigned long start = max(vmf->address - nr * PAGE_SIZE, vma->vm_start);
- unsigned long end = min(vmf->address + (folio_nr_pages(folio) - nr) * PAGE_SIZE, vma->vm_end);
- pte_t *start_ptep = vmf->pte - (vmf->address - start) / PAGE_SIZE;
- unsigned long addr;
+ unsigned long start, end, addr = vmf->address;
+ unsigned long addr_start = addr - (nr << PAGE_SHIFT);
+ unsigned long pt_start = ALIGN_DOWN(addr, PMD_SIZE);
+ pte_t *start_ptep;
+
+ /* Stay within the VMA and within the page table. */
+ start = max3(addr_start, pt_start, vma->vm_start);
+ end = min3(addr_start + folio_size(folio), pt_start + PMD_SIZE,
+ vma->vm_end);
+ start_ptep = vmf->pte - ((addr - start) >> PAGE_SHIFT);
/* Restore all PTEs' mapping of the large folio */
for (addr = start; addr != end; start_ptep++, addr += PAGE_SIZE) {
diff --git a/mm/mempool.c b/mm/mempool.c
index 6ece63a00acf..3223337135d0 100644
--- a/mm/mempool.c
+++ b/mm/mempool.c
@@ -273,7 +273,7 @@ mempool_t *mempool_create_node_noprof(int min_nr, mempool_alloc_t *alloc_fn,
{
mempool_t *pool;
- pool = kzalloc_node(sizeof(*pool), gfp_mask, node_id);
+ pool = kmalloc_node_noprof(sizeof(*pool), gfp_mask | __GFP_ZERO, node_id);
if (!pool)
return NULL;
diff --git a/mm/migrate.c b/mm/migrate.c
index dd04f578c19c..a8c6f466e33a 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -415,6 +415,15 @@ int folio_migrate_mapping(struct address_space *mapping,
if (folio_ref_count(folio) != expected_count)
return -EAGAIN;
+ /* Take off deferred split queue while frozen and memcg set */
+ if (folio_test_large(folio) &&
+ folio_test_large_rmappable(folio)) {
+ if (!folio_ref_freeze(folio, expected_count))
+ return -EAGAIN;
+ folio_undo_large_rmappable(folio);
+ folio_ref_unfreeze(folio, expected_count);
+ }
+
/* No turning back from here */
newfolio->index = folio->index;
newfolio->mapping = folio->mapping;
@@ -433,6 +442,10 @@ int folio_migrate_mapping(struct address_space *mapping,
return -EAGAIN;
}
+ /* Take off deferred split queue while frozen and memcg set */
+ if (folio_test_large(folio) && folio_test_large_rmappable(folio))
+ folio_undo_large_rmappable(folio);
+
/*
* Now we know that no one else is looking at the folio:
* no turning back from here.
@@ -1654,7 +1667,16 @@ static int migrate_pages_batch(struct list_head *from,
/*
* The rare folio on the deferred split list should
- * be split now. It should not count as a failure.
+ * be split now. It should not count as a failure:
+ * but increment nr_failed because, without doing so,
+ * migrate_pages() may report success with (split but
+ * unmigrated) pages still on its fromlist; whereas it
+ * always reports success when its fromlist is empty.
+ * stats->nr_thp_failed should be increased too,
+ * otherwise stats inconsistency will happen when
+ * migrate_pages_batch is called via migrate_pages()
+ * with MIGRATE_SYNC and MIGRATE_ASYNC.
+ *
* Only check it without removing it from the list.
* Since the folio can be on deferred_split_scan()
* local list and removing it can cause the local list
@@ -1669,6 +1691,8 @@ static int migrate_pages_batch(struct list_head *from,
if (nr_pages > 2 &&
!list_empty(&folio->_deferred_list)) {
if (try_split_folio(folio, split_folios) == 0) {
+ nr_failed++;
+ stats->nr_thp_failed += is_thp;
stats->nr_thp_split += is_thp;
stats->nr_split++;
continue;
diff --git a/mm/mm_init.c b/mm/mm_init.c
index f72b852bd5b8..3ec04933f7fd 100644
--- a/mm/mm_init.c
+++ b/mm/mm_init.c
@@ -2523,9 +2523,6 @@ EXPORT_SYMBOL(init_on_alloc);
DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_FREE_DEFAULT_ON, init_on_free);
EXPORT_SYMBOL(init_on_free);
-DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_MLOCKED_ON_FREE_DEFAULT_ON, init_mlocked_on_free);
-EXPORT_SYMBOL(init_mlocked_on_free);
-
static bool _init_on_alloc_enabled_early __read_mostly
= IS_ENABLED(CONFIG_INIT_ON_ALLOC_DEFAULT_ON);
static int __init early_init_on_alloc(char *buf)
@@ -2543,14 +2540,6 @@ static int __init early_init_on_free(char *buf)
}
early_param("init_on_free", early_init_on_free);
-static bool _init_mlocked_on_free_enabled_early __read_mostly
- = IS_ENABLED(CONFIG_INIT_MLOCKED_ON_FREE_DEFAULT_ON);
-static int __init early_init_mlocked_on_free(char *buf)
-{
- return kstrtobool(buf, &_init_mlocked_on_free_enabled_early);
-}
-early_param("init_mlocked_on_free", early_init_mlocked_on_free);
-
DEFINE_STATIC_KEY_MAYBE(CONFIG_DEBUG_VM, check_pages_enabled);
/*
@@ -2578,21 +2567,12 @@ static void __init mem_debugging_and_hardening_init(void)
}
#endif
- if ((_init_on_alloc_enabled_early || _init_on_free_enabled_early ||
- _init_mlocked_on_free_enabled_early) &&
+ if ((_init_on_alloc_enabled_early || _init_on_free_enabled_early) &&
page_poisoning_requested) {
pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, "
- "will take precedence over init_on_alloc, init_on_free "
- "and init_mlocked_on_free\n");
+ "will take precedence over init_on_alloc and init_on_free\n");
_init_on_alloc_enabled_early = false;
_init_on_free_enabled_early = false;
- _init_mlocked_on_free_enabled_early = false;
- }
-
- if (_init_mlocked_on_free_enabled_early && _init_on_free_enabled_early) {
- pr_info("mem auto-init: init_on_free is on, "
- "will take precedence over init_mlocked_on_free\n");
- _init_mlocked_on_free_enabled_early = false;
}
if (_init_on_alloc_enabled_early) {
@@ -2609,17 +2589,9 @@ static void __init mem_debugging_and_hardening_init(void)
static_branch_disable(&init_on_free);
}
- if (_init_mlocked_on_free_enabled_early) {
- want_check_pages = true;
- static_branch_enable(&init_mlocked_on_free);
- } else {
- static_branch_disable(&init_mlocked_on_free);
- }
-
- if (IS_ENABLED(CONFIG_KMSAN) && (_init_on_alloc_enabled_early ||
- _init_on_free_enabled_early || _init_mlocked_on_free_enabled_early))
- pr_info("mem auto-init: please make sure init_on_alloc, init_on_free and "
- "init_mlocked_on_free are disabled when running KMSAN\n");
+ if (IS_ENABLED(CONFIG_KMSAN) &&
+ (_init_on_alloc_enabled_early || _init_on_free_enabled_early))
+ pr_info("mem auto-init: please make sure init_on_alloc and init_on_free are disabled when running KMSAN\n");
#ifdef CONFIG_DEBUG_PAGEALLOC
if (debug_pagealloc_enabled()) {
@@ -2658,10 +2630,9 @@ static void __init report_meminit(void)
else
stack = "off";
- pr_info("mem auto-init: stack:%s, heap alloc:%s, heap free:%s, mlocked free:%s\n",
+ pr_info("mem auto-init: stack:%s, heap alloc:%s, heap free:%s\n",
stack, want_init_on_alloc(GFP_KERNEL) ? "on" : "off",
- want_init_on_free() ? "on" : "off",
- want_init_mlocked_on_free() ? "on" : "off");
+ want_init_on_free() ? "on" : "off");
if (want_init_on_free())
pr_info("mem auto-init: clearing system memory may take some time...\n");
}
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 12c9297ed4a7..8a1c92090129 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -415,13 +415,20 @@ static void domain_dirty_limits(struct dirty_throttle_control *dtc)
else
bg_thresh = (bg_ratio * available_memory) / PAGE_SIZE;
- if (bg_thresh >= thresh)
- bg_thresh = thresh / 2;
tsk = current;
if (rt_task(tsk)) {
bg_thresh += bg_thresh / 4 + global_wb_domain.dirty_limit / 32;
thresh += thresh / 4 + global_wb_domain.dirty_limit / 32;
}
+ /*
+ * Dirty throttling logic assumes the limits in page units fit into
+ * 32-bits. This gives 16TB dirty limits max which is hopefully enough.
+ */
+ if (thresh > UINT_MAX)
+ thresh = UINT_MAX;
+ /* This makes sure bg_thresh is within 32-bits as well */
+ if (bg_thresh >= thresh)
+ bg_thresh = thresh / 2;
dtc->thresh = thresh;
dtc->bg_thresh = bg_thresh;
@@ -471,7 +478,11 @@ static unsigned long node_dirty_limit(struct pglist_data *pgdat)
if (rt_task(tsk))
dirty += dirty / 4;
- return dirty;
+ /*
+ * Dirty throttling logic assumes the limits in page units fit into
+ * 32-bits. This gives 16TB dirty limits max which is hopefully enough.
+ */
+ return min_t(unsigned long, dirty, UINT_MAX);
}
/**
@@ -508,10 +519,17 @@ static int dirty_background_bytes_handler(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int ret;
+ unsigned long old_bytes = dirty_background_bytes;
ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
- if (ret == 0 && write)
+ if (ret == 0 && write) {
+ if (DIV_ROUND_UP(dirty_background_bytes, PAGE_SIZE) >
+ UINT_MAX) {
+ dirty_background_bytes = old_bytes;
+ return -ERANGE;
+ }
dirty_background_ratio = 0;
+ }
return ret;
}
@@ -537,6 +555,10 @@ static int dirty_bytes_handler(struct ctl_table *table, int write,
ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
if (ret == 0 && write && vm_dirty_bytes != old_bytes) {
+ if (DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE) > UINT_MAX) {
+ vm_dirty_bytes = old_bytes;
+ return -ERANGE;
+ }
writeback_set_ratelimit();
vm_dirty_ratio = 0;
}
@@ -1660,7 +1682,7 @@ static inline void wb_dirty_limits(struct dirty_throttle_control *dtc)
*/
dtc->wb_thresh = __wb_calc_thresh(dtc, dtc->thresh);
dtc->wb_bg_thresh = dtc->thresh ?
- div64_u64(dtc->wb_thresh * dtc->bg_thresh, dtc->thresh) : 0;
+ div_u64((u64)dtc->wb_thresh * dtc->bg_thresh, dtc->thresh) : 0;
/*
* In order to avoid the stacked BDI deadlock we need
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 2e22ce5675ca..9ecf99190ea2 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -504,10 +504,15 @@ out:
static inline unsigned int order_to_pindex(int migratetype, int order)
{
+ bool __maybe_unused movable;
+
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
if (order > PAGE_ALLOC_COSTLY_ORDER) {
VM_BUG_ON(order != HPAGE_PMD_ORDER);
- return NR_LOWORDER_PCP_LISTS;
+
+ movable = migratetype == MIGRATE_MOVABLE;
+
+ return NR_LOWORDER_PCP_LISTS + movable;
}
#else
VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER);
@@ -521,7 +526,7 @@ static inline int pindex_to_order(unsigned int pindex)
int order = pindex / MIGRATE_PCPTYPES;
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- if (pindex == NR_LOWORDER_PCP_LISTS)
+ if (pindex >= NR_LOWORDER_PCP_LISTS)
order = HPAGE_PMD_ORDER;
#else
VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER);
@@ -1016,7 +1021,7 @@ static inline bool should_skip_kasan_poison(struct page *page)
return page_kasan_tag(page) == KASAN_TAG_KERNEL;
}
-void kernel_init_pages(struct page *page, int numpages)
+static void kernel_init_pages(struct page *page, int numpages)
{
int i;
@@ -1955,10 +1960,12 @@ int find_suitable_fallback(struct free_area *area, unsigned int order,
}
/*
- * Reserve a pageblock for exclusive use of high-order atomic allocations if
- * there are no empty page blocks that contain a page with a suitable order
+ * Reserve the pageblock(s) surrounding an allocation request for
+ * exclusive use of high-order atomic allocations if there are no
+ * empty page blocks that contain a page with a suitable order
*/
-static void reserve_highatomic_pageblock(struct page *page, struct zone *zone)
+static void reserve_highatomic_pageblock(struct page *page, int order,
+ struct zone *zone)
{
int mt;
unsigned long max_managed, flags;
@@ -1984,10 +1991,17 @@ static void reserve_highatomic_pageblock(struct page *page, struct zone *zone)
/* Yoink! */
mt = get_pageblock_migratetype(page);
/* Only reserve normal pageblocks (i.e., they can merge with others) */
- if (migratetype_is_mergeable(mt))
- if (move_freepages_block(zone, page, mt,
- MIGRATE_HIGHATOMIC) != -1)
- zone->nr_reserved_highatomic += pageblock_nr_pages;
+ if (!migratetype_is_mergeable(mt))
+ goto out_unlock;
+
+ if (order < pageblock_order) {
+ if (move_freepages_block(zone, page, mt, MIGRATE_HIGHATOMIC) == -1)
+ goto out_unlock;
+ zone->nr_reserved_highatomic += pageblock_nr_pages;
+ } else {
+ change_pageblock_range(page, order, MIGRATE_HIGHATOMIC);
+ zone->nr_reserved_highatomic += 1 << order;
+ }
out_unlock:
spin_unlock_irqrestore(&zone->lock, flags);
@@ -1999,7 +2013,7 @@ out_unlock:
* intense memory pressure but failed atomic allocations should be easier
* to recover from than an OOM.
*
- * If @force is true, try to unreserve a pageblock even though highatomic
+ * If @force is true, try to unreserve pageblocks even though highatomic
* pageblock is exhausted.
*/
static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
@@ -2041,6 +2055,7 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
* adjust the count once.
*/
if (is_migrate_highatomic(mt)) {
+ unsigned long size;
/*
* It should never happen but changes to
* locking could inadvertently allow a per-cpu
@@ -2048,9 +2063,9 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
* while unreserving so be safe and watch for
* underflows.
*/
- zone->nr_reserved_highatomic -= min(
- pageblock_nr_pages,
- zone->nr_reserved_highatomic);
+ size = max(pageblock_nr_pages, 1UL << order);
+ size = min(size, zone->nr_reserved_highatomic);
+ zone->nr_reserved_highatomic -= size;
}
/*
@@ -2062,11 +2077,19 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
* of pageblocks that cannot be completely freed
* may increase.
*/
- ret = move_freepages_block(zone, page, mt,
- ac->migratetype);
+ if (order < pageblock_order)
+ ret = move_freepages_block(zone, page, mt,
+ ac->migratetype);
+ else {
+ move_to_free_list(page, zone, order, mt,
+ ac->migratetype);
+ change_pageblock_range(page, order,
+ ac->migratetype);
+ ret = 1;
+ }
/*
- * Reserving this block already succeeded, so this should
- * not fail on zone boundaries.
+ * Reserving the block(s) already succeeded,
+ * so this should not fail on zone boundaries.
*/
WARN_ON_ONCE(ret == -1);
if (ret > 0) {
@@ -3406,7 +3429,7 @@ try_this_zone:
* if the pageblock should be reserved for the future
*/
if (unlikely(alloc_flags & ALLOC_HIGHATOMIC))
- reserve_highatomic_pageblock(page, zone);
+ reserve_highatomic_pageblock(page, order, zone);
return page;
} else {
diff --git a/mm/page_io.c b/mm/page_io.c
index 46c603dddf04..0a150c240bf4 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -217,7 +217,7 @@ static inline void count_swpout_vm_event(struct folio *folio)
count_memcg_folio_events(folio, THP_SWPOUT, 1);
count_vm_event(THP_SWPOUT);
}
- count_mthp_stat(folio_order(folio), MTHP_STAT_ANON_SWPOUT);
+ count_mthp_stat(folio_order(folio), MTHP_STAT_SWPOUT);
#endif
count_vm_events(PSWPOUT, folio_nr_pages(folio));
}
diff --git a/mm/page_table_check.c b/mm/page_table_check.c
index 4169576bed72..509c6ef8de40 100644
--- a/mm/page_table_check.c
+++ b/mm/page_table_check.c
@@ -73,6 +73,9 @@ static void page_table_check_clear(unsigned long pfn, unsigned long pgcnt)
page = pfn_to_page(pfn);
page_ext = page_ext_get(page);
+ if (!page_ext)
+ return;
+
BUG_ON(PageSlab(page));
anon = PageAnon(page);
@@ -110,6 +113,9 @@ static void page_table_check_set(unsigned long pfn, unsigned long pgcnt,
page = pfn_to_page(pfn);
page_ext = page_ext_get(page);
+ if (!page_ext)
+ return;
+
BUG_ON(PageSlab(page));
anon = PageAnon(page);
@@ -140,7 +146,10 @@ void __page_table_check_zero(struct page *page, unsigned int order)
BUG_ON(PageSlab(page));
page_ext = page_ext_get(page);
- BUG_ON(!page_ext);
+
+ if (!page_ext)
+ return;
+
for (i = 0; i < (1ul << order); i++) {
struct page_table_check *ptc = get_page_table_check(page_ext);
diff --git a/mm/readahead.c b/mm/readahead.c
index c1b23989d9ca..817b2a352d78 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -503,11 +503,11 @@ void page_cache_ra_order(struct readahead_control *ractl,
limit = min(limit, index + ra->size - 1);
- if (new_order < MAX_PAGECACHE_ORDER) {
+ if (new_order < MAX_PAGECACHE_ORDER)
new_order += 2;
- new_order = min_t(unsigned int, MAX_PAGECACHE_ORDER, new_order);
- new_order = min_t(unsigned int, new_order, ilog2(ra->size));
- }
+
+ new_order = min_t(unsigned int, MAX_PAGECACHE_ORDER, new_order);
+ new_order = min_t(unsigned int, new_order, ilog2(ra->size));
/* See comment in page_cache_ra_unbounded() */
nofs = memalloc_nofs_save();
diff --git a/mm/shmem.c b/mm/shmem.c
index f5d60436b604..831b52dfd56e 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -541,8 +541,9 @@ static bool shmem_confirm_swap(struct address_space *mapping,
static int shmem_huge __read_mostly = SHMEM_HUGE_NEVER;
-bool shmem_is_huge(struct inode *inode, pgoff_t index, bool shmem_huge_force,
- struct mm_struct *mm, unsigned long vm_flags)
+static bool __shmem_is_huge(struct inode *inode, pgoff_t index,
+ bool shmem_huge_force, struct mm_struct *mm,
+ unsigned long vm_flags)
{
loff_t i_size;
@@ -573,6 +574,16 @@ bool shmem_is_huge(struct inode *inode, pgoff_t index, bool shmem_huge_force,
}
}
+bool shmem_is_huge(struct inode *inode, pgoff_t index,
+ bool shmem_huge_force, struct mm_struct *mm,
+ unsigned long vm_flags)
+{
+ if (HPAGE_PMD_ORDER > MAX_PAGECACHE_ORDER)
+ return false;
+
+ return __shmem_is_huge(inode, index, shmem_huge_force, mm, vm_flags);
+}
+
#if defined(CONFIG_SYSFS)
static int shmem_parse_huge(const char *str)
{
@@ -1786,7 +1797,7 @@ static int shmem_replace_folio(struct folio **foliop, gfp_t gfp,
xa_lock_irq(&swap_mapping->i_pages);
error = shmem_replace_entry(swap_mapping, swap_index, old, new);
if (!error) {
- mem_cgroup_migrate(old, new);
+ mem_cgroup_replace_folio(old, new);
__lruvec_stat_mod_folio(new, NR_FILE_PAGES, 1);
__lruvec_stat_mod_folio(new, NR_SHMEM, 1);
__lruvec_stat_mod_folio(old, NR_FILE_PAGES, -1);
@@ -3166,10 +3177,13 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
struct folio *folio;
/*
- * Good, the fallocate(2) manpage permits EINTR: we may have
- * been interrupted because we are using up too much memory.
+ * Check for fatal signal so that we abort early in OOM
+ * situations. We don't want to abort in case of non-fatal
+ * signals as large fallocate can take noticeable time and
+ * e.g. periodic timers may result in fallocate constantly
+ * restarting.
*/
- if (signal_pending(current))
+ if (fatal_signal_pending(current))
error = -EINTR;
else if (shmem_falloc.nr_unswapped > shmem_falloc.nr_falloced)
error = -ENOMEM;
@@ -3903,14 +3917,14 @@ static const struct constant_table shmem_param_enums_huge[] = {
};
const struct fs_parameter_spec shmem_fs_parameters[] = {
- fsparam_u32 ("gid", Opt_gid),
+ fsparam_gid ("gid", Opt_gid),
fsparam_enum ("huge", Opt_huge, shmem_param_enums_huge),
fsparam_u32oct("mode", Opt_mode),
fsparam_string("mpol", Opt_mpol),
fsparam_string("nr_blocks", Opt_nr_blocks),
fsparam_string("nr_inodes", Opt_nr_inodes),
fsparam_string("size", Opt_size),
- fsparam_u32 ("uid", Opt_uid),
+ fsparam_uid ("uid", Opt_uid),
fsparam_flag ("inode32", Opt_inode32),
fsparam_flag ("inode64", Opt_inode64),
fsparam_flag ("noswap", Opt_noswap),
@@ -3970,9 +3984,7 @@ static int shmem_parse_one(struct fs_context *fc, struct fs_parameter *param)
ctx->mode = result.uint_32 & 07777;
break;
case Opt_uid:
- kuid = make_kuid(current_user_ns(), result.uint_32);
- if (!uid_valid(kuid))
- goto bad_value;
+ kuid = result.uid;
/*
* The requested uid must be representable in the
@@ -3984,9 +3996,7 @@ static int shmem_parse_one(struct fs_context *fc, struct fs_parameter *param)
ctx->uid = kuid;
break;
case Opt_gid:
- kgid = make_kgid(current_user_ns(), result.uint_32);
- if (!gid_valid(kgid))
- goto bad_value;
+ kgid = result.gid;
/*
* The requested gid must be representable in the
diff --git a/mm/slub.c b/mm/slub.c
index 0809760cf789..4927edec6a8c 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1952,7 +1952,7 @@ int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
#ifdef CONFIG_MEMCG
new_exts |= MEMCG_DATA_OBJEXTS;
#endif
- old_exts = slab->obj_exts;
+ old_exts = READ_ONCE(slab->obj_exts);
handle_failed_objexts_alloc(old_exts, vec, objects);
if (new_slab) {
/*
@@ -1961,7 +1961,8 @@ int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
* be simply assigned.
*/
slab->obj_exts = new_exts;
- } else if (cmpxchg(&slab->obj_exts, old_exts, new_exts) != old_exts) {
+ } else if ((old_exts & ~OBJEXTS_FLAGS_MASK) ||
+ cmpxchg(&slab->obj_exts, old_exts, new_exts) != old_exts) {
/*
* If the slab is already in use, somebody can allocate and
* assign slabobj_exts in parallel. In this case the existing
@@ -3901,7 +3902,6 @@ bool slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru,
unsigned int orig_size)
{
unsigned int zero_size = s->object_size;
- struct slabobj_ext *obj_exts;
bool kasan_init = init;
size_t i;
gfp_t init_flags = flags & gfp_allowed_mask;
@@ -3944,9 +3944,11 @@ bool slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru,
kmemleak_alloc_recursive(p[i], s->object_size, 1,
s->flags, init_flags);
kmsan_slab_alloc(s, p[i], init_flags);
+#ifdef CONFIG_MEM_ALLOC_PROFILING
if (need_slab_obj_ext()) {
+ struct slabobj_ext *obj_exts;
+
obj_exts = prepare_slab_obj_exts_hook(s, flags, p[i]);
-#ifdef CONFIG_MEM_ALLOC_PROFILING
/*
* Currently obj_exts is used only for allocation profiling.
* If other users appear then mem_alloc_profiling_enabled()
@@ -3954,8 +3956,8 @@ bool slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru,
*/
if (likely(obj_exts))
alloc_tag_add(&obj_exts->ref, current->alloc_tag, s->size);
-#endif
}
+#endif
}
return memcg_slab_post_alloc_hook(s, lru, flags, size, p);
diff --git a/mm/util.c b/mm/util.c
index c9e519e6811f..983baf2bd675 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -26,6 +26,8 @@
#include <linux/uaccess.h>
+#include <kunit/visibility.h>
+
#include "internal.h"
#include "swap.h"
@@ -139,14 +141,14 @@ EXPORT_SYMBOL(kmemdup_noprof);
* kmemdup_array - duplicate a given array.
*
* @src: array to duplicate.
- * @element_size: size of each element of array.
* @count: number of elements to duplicate from array.
+ * @element_size: size of each element of array.
* @gfp: GFP mask to use.
*
* Return: duplicated array of @src or %NULL in case of error,
* result is physically contiguous. Use kfree() to free.
*/
-void *kmemdup_array(const void *src, size_t element_size, size_t count, gfp_t gfp)
+void *kmemdup_array(const void *src, size_t count, size_t element_size, gfp_t gfp)
{
return kmemdup(src, size_mul(element_size, count), gfp);
}
@@ -482,6 +484,9 @@ void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
clear_bit(MMF_TOPDOWN, &mm->flags);
}
#endif
+#ifdef CONFIG_MMU
+EXPORT_SYMBOL_IF_KUNIT(arch_pick_mmap_layout);
+#endif
/**
* __account_locked_vm - account locked pages to an mm's locked_vm
@@ -705,7 +710,7 @@ void *kvrealloc_noprof(const void *p, size_t oldsize, size_t newsize, gfp_t flag
if (oldsize >= newsize)
return (void *)p;
- newp = kvmalloc(newsize, flags);
+ newp = kvmalloc_noprof(newsize, flags);
if (!newp)
return NULL;
memcpy(newp, p, oldsize);
@@ -726,7 +731,7 @@ void *__vmalloc_array_noprof(size_t n, size_t size, gfp_t flags)
if (unlikely(check_mul_overflow(n, size, &bytes)))
return NULL;
- return __vmalloc(bytes, flags);
+ return __vmalloc_noprof(bytes, flags);
}
EXPORT_SYMBOL(__vmalloc_array_noprof);
@@ -737,7 +742,7 @@ EXPORT_SYMBOL(__vmalloc_array_noprof);
*/
void *vmalloc_array_noprof(size_t n, size_t size)
{
- return __vmalloc_array(n, size, GFP_KERNEL);
+ return __vmalloc_array_noprof(n, size, GFP_KERNEL);
}
EXPORT_SYMBOL(vmalloc_array_noprof);
@@ -749,7 +754,7 @@ EXPORT_SYMBOL(vmalloc_array_noprof);
*/
void *__vcalloc_noprof(size_t n, size_t size, gfp_t flags)
{
- return __vmalloc_array(n, size, flags | __GFP_ZERO);
+ return __vmalloc_array_noprof(n, size, flags | __GFP_ZERO);
}
EXPORT_SYMBOL(__vcalloc_noprof);
@@ -760,7 +765,7 @@ EXPORT_SYMBOL(__vcalloc_noprof);
*/
void *vcalloc_noprof(size_t n, size_t size)
{
- return __vmalloc_array(n, size, GFP_KERNEL | __GFP_ZERO);
+ return __vmalloc_array_noprof(n, size, GFP_KERNEL | __GFP_ZERO);
}
EXPORT_SYMBOL(vcalloc_noprof);
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 5d3aa2dc88a8..e34ea860153f 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -722,7 +722,7 @@ int is_vmalloc_or_module_addr(const void *x)
* and fall back on vmalloc() if that fails. Others
* just put it in the vmalloc space.
*/
-#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
+#if defined(CONFIG_EXECMEM) && defined(MODULES_VADDR)
unsigned long addr = (unsigned long)kasan_reset_tag(x);
if (addr >= MODULES_VADDR && addr < MODULES_END)
return 1;
@@ -2498,6 +2498,7 @@ struct vmap_block {
struct list_head free_list;
struct rcu_head rcu_head;
struct list_head purge;
+ unsigned int cpu;
};
/* Queue of free and dirty vmap blocks, for allocation and flushing purposes */
@@ -2542,7 +2543,15 @@ static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue);
static struct xarray *
addr_to_vb_xa(unsigned long addr)
{
- int index = (addr / VMAP_BLOCK_SIZE) % num_possible_cpus();
+ int index = (addr / VMAP_BLOCK_SIZE) % nr_cpu_ids;
+
+ /*
+ * Please note, nr_cpu_ids points on a highest set
+ * possible bit, i.e. we never invoke cpumask_next()
+ * if an index points on it which is nr_cpu_ids - 1.
+ */
+ if (!cpu_possible(index))
+ index = cpumask_next(index, cpu_possible_mask);
return &per_cpu(vmap_block_queue, index).vmap_blocks;
}
@@ -2625,8 +2634,15 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
free_vmap_area(va);
return ERR_PTR(err);
}
-
- vbq = raw_cpu_ptr(&vmap_block_queue);
+ /*
+ * list_add_tail_rcu could happened in another core
+ * rather than vb->cpu due to task migration, which
+ * is safe as list_add_tail_rcu will ensure the list's
+ * integrity together with list_for_each_rcu from read
+ * side.
+ */
+ vb->cpu = raw_smp_processor_id();
+ vbq = per_cpu_ptr(&vmap_block_queue, vb->cpu);
spin_lock(&vbq->lock);
list_add_tail_rcu(&vb->free_list, &vbq->free);
spin_unlock(&vbq->lock);
@@ -2654,9 +2670,10 @@ static void free_vmap_block(struct vmap_block *vb)
}
static bool purge_fragmented_block(struct vmap_block *vb,
- struct vmap_block_queue *vbq, struct list_head *purge_list,
- bool force_purge)
+ struct list_head *purge_list, bool force_purge)
{
+ struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, vb->cpu);
+
if (vb->free + vb->dirty != VMAP_BBMAP_BITS ||
vb->dirty == VMAP_BBMAP_BITS)
return false;
@@ -2704,7 +2721,7 @@ static void purge_fragmented_blocks(int cpu)
continue;
spin_lock(&vb->lock);
- purge_fragmented_block(vb, vbq, &purge, true);
+ purge_fragmented_block(vb, &purge, true);
spin_unlock(&vb->lock);
}
rcu_read_unlock();
@@ -2841,7 +2858,7 @@ static void _vm_unmap_aliases(unsigned long start, unsigned long end, int flush)
* not purgeable, check whether there is dirty
* space to be flushed.
*/
- if (!purge_fragmented_block(vb, vbq, &purge_list, false) &&
+ if (!purge_fragmented_block(vb, &purge_list, false) &&
vb->dirty_max && vb->dirty != VMAP_BBMAP_BITS) {
unsigned long va_start = vb->va->va_start;
unsigned long s, e;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index d55e8d07ffc4..2e34de9cd0d4 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1227,7 +1227,7 @@ retry:
THP_SWPOUT_FALLBACK, 1);
count_vm_event(THP_SWPOUT_FALLBACK);
}
- count_mthp_stat(order, MTHP_STAT_ANON_SWPOUT_FALLBACK);
+ count_mthp_stat(order, MTHP_STAT_SWPOUT_FALLBACK);
#endif
if (!add_to_swap(folio))
goto activate_locked_split;
diff --git a/mm/workingset.c b/mm/workingset.c
index c22adb93622a..a2b28e356e68 100644
--- a/mm/workingset.c
+++ b/mm/workingset.c
@@ -412,10 +412,12 @@ void *workingset_eviction(struct folio *folio, struct mem_cgroup *target_memcg)
* @file: whether the corresponding folio is from the file lru.
* @workingset: where the workingset value unpacked from shadow should
* be stored.
+ * @flush: whether to flush cgroup rstat.
*
* Return: true if the shadow is for a recently evicted folio; false otherwise.
*/
-bool workingset_test_recent(void *shadow, bool file, bool *workingset)
+bool workingset_test_recent(void *shadow, bool file, bool *workingset,
+ bool flush)
{
struct mem_cgroup *eviction_memcg;
struct lruvec *eviction_lruvec;
@@ -467,10 +469,16 @@ bool workingset_test_recent(void *shadow, bool file, bool *workingset)
/*
* Flush stats (and potentially sleep) outside the RCU read section.
+ *
+ * Note that workingset_test_recent() itself might be called in RCU read
+ * section (for e.g, in cachestat) - these callers need to skip flushing
+ * stats (via the flush argument).
+ *
* XXX: With per-memcg flushing and thresholding, is ratelimiting
* still needed here?
*/
- mem_cgroup_flush_stats_ratelimited(eviction_memcg);
+ if (flush)
+ mem_cgroup_flush_stats_ratelimited(eviction_memcg);
eviction_lruvec = mem_cgroup_lruvec(eviction_memcg, pgdat);
refault = atomic_long_read(&eviction_lruvec->nonresident_age);
@@ -558,7 +566,7 @@ void workingset_refault(struct folio *folio, void *shadow)
mod_lruvec_state(lruvec, WORKINGSET_REFAULT_BASE + file, nr);
- if (!workingset_test_recent(shadow, file, &workingset))
+ if (!workingset_test_recent(shadow, file, &workingset, true))
return;
folio_set_active(folio);
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 3efba4f857ac..217be32426b5 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -677,7 +677,7 @@ static void vlan_ethtool_get_drvinfo(struct net_device *dev,
}
static int vlan_ethtool_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
const struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
return ethtool_get_ts_info_by_layer(vlan->real_dev, info);
diff --git a/net/9p/client.c b/net/9p/client.c
index 00774656eeac..5cd94721d974 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -236,6 +236,8 @@ static int p9_fcall_init(struct p9_client *c, struct p9_fcall *fc,
if (!fc->sdata)
return -ENOMEM;
fc->capacity = alloc_msize;
+ fc->id = 0;
+ fc->tag = P9_NOTAG;
return 0;
}
diff --git a/net/Kconfig b/net/Kconfig
index f0a8692496ff..d27d0deac0bf 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -290,15 +290,21 @@ config MAX_SKB_FRAGS
If unsure, say 17.
config RPS
- bool
+ bool "Receive packet steering"
depends on SMP && SYSFS
default y
+ help
+ Software receive side packet steering (RPS) distributes the
+ load of received packet processing across multiple CPUs.
config RFS_ACCEL
- bool
+ bool "Hardware acceleration of RFS"
depends on RPS
select CPU_RMAP
default y
+ help
+ Allowing drivers for multiqueue hardware with flow filter tables to
+ accelerate RFS.
config SOCK_RX_QUEUE_MAPPING
bool
@@ -351,7 +357,7 @@ config BPF_STREAM_PARSER
BPF_MAP_TYPE_SOCKMAP.
config NET_FLOW_LIMIT
- bool
+ bool "Net flow limit"
depends on RPS
default y
help
@@ -502,6 +508,7 @@ config FAILOVER
config ETHTOOL_NETLINK
bool "Netlink interface for ethtool"
+ select DIMLIB
default y
help
An alternative userspace interface for ethtool based on generic
diff --git a/net/atm/ioctl.c b/net/atm/ioctl.c
index f81f8d56f5c0..0f7a39aeccc8 100644
--- a/net/atm/ioctl.c
+++ b/net/atm/ioctl.c
@@ -68,7 +68,7 @@ static int do_vcc_ioctl(struct socket *sock, unsigned int cmd,
goto done;
}
error = put_user(sk->sk_sndbuf - sk_wmem_alloc_get(sk),
- (int __user *)argp) ? -EFAULT : 0;
+ (int __user *)argp);
goto done;
case SIOCINQ:
{
@@ -83,7 +83,7 @@ static int do_vcc_ioctl(struct socket *sock, unsigned int cmd,
skb = skb_peek(&sk->sk_receive_queue);
amount = skb ? skb->len : 0;
spin_unlock_irq(&sk->sk_receive_queue.lock);
- error = put_user(amount, (int __user *)argp) ? -EFAULT : 0;
+ error = put_user(amount, (int __user *)argp);
goto done;
}
case ATM_SETSC:
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index 8077cf2ee448..d6f9fae06a9d 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -1378,8 +1378,10 @@ static int ax25_accept(struct socket *sock, struct socket *newsock,
{
struct sk_buff *skb;
struct sock *newsk;
+ ax25_dev *ax25_dev;
DEFINE_WAIT(wait);
struct sock *sk;
+ ax25_cb *ax25;
int err = 0;
if (sock->state != SS_UNCONNECTED)
@@ -1434,6 +1436,10 @@ static int ax25_accept(struct socket *sock, struct socket *newsock,
kfree_skb(skb);
sk_acceptq_removed(sk);
newsock->state = SS_CONNECTED;
+ ax25 = sk_to_ax25(newsk);
+ ax25_dev = ax25->ax25_dev;
+ netdev_hold(ax25_dev->dev, &ax25->dev_tracker, GFP_ATOMIC);
+ ax25_dev_hold(ax25_dev);
out:
release_sock(sk);
diff --git a/net/ax25/ax25_dev.c b/net/ax25/ax25_dev.c
index 742d7c68e7e7..9efd6690b344 100644
--- a/net/ax25/ax25_dev.c
+++ b/net/ax25/ax25_dev.c
@@ -196,7 +196,7 @@ void __exit ax25_dev_free(void)
list_for_each_entry_safe(s, n, &ax25_dev_list, list) {
netdev_put(s->dev, &s->dev_tracker);
list_del(&s->list);
- kfree(s);
+ ax25_dev_put(s);
}
spin_unlock_bh(&ax25_dev_lock);
}
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index ac74f6ead62d..8f6dd2c6ee41 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -12,6 +12,7 @@
#include <linux/errno.h>
#include <linux/etherdevice.h>
#include <linux/gfp.h>
+#include <linux/if_vlan.h>
#include <linux/jiffies.h>
#include <linux/kref.h>
#include <linux/list.h>
@@ -132,6 +133,29 @@ batadv_orig_node_vlan_get(struct batadv_orig_node *orig_node,
}
/**
+ * batadv_vlan_id_valid() - check if vlan id is in valid batman-adv encoding
+ * @vid: the VLAN identifier
+ *
+ * Return: true when either no vlan is set or if VLAN is in correct range,
+ * false otherwise
+ */
+static bool batadv_vlan_id_valid(unsigned short vid)
+{
+ unsigned short non_vlan = vid & ~(BATADV_VLAN_HAS_TAG | VLAN_VID_MASK);
+
+ if (vid == 0)
+ return true;
+
+ if (!(vid & BATADV_VLAN_HAS_TAG))
+ return false;
+
+ if (non_vlan)
+ return false;
+
+ return true;
+}
+
+/**
* batadv_orig_node_vlan_new() - search and possibly create an orig_node_vlan
* object
* @orig_node: the originator serving the VLAN
@@ -149,6 +173,9 @@ batadv_orig_node_vlan_new(struct batadv_orig_node *orig_node,
{
struct batadv_orig_node_vlan *vlan;
+ if (!batadv_vlan_id_valid(vid))
+ return NULL;
+
spin_lock_bh(&orig_node->vlan_list_lock);
/* first look if an object for this vid already exists */
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index b21ff3c36b07..2243cec18ecc 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -209,6 +209,20 @@ batadv_tt_global_hash_find(struct batadv_priv *bat_priv, const u8 *addr,
}
/**
+ * batadv_tt_local_entry_free_rcu() - free the tt_local_entry
+ * @rcu: rcu pointer of the tt_local_entry
+ */
+static void batadv_tt_local_entry_free_rcu(struct rcu_head *rcu)
+{
+ struct batadv_tt_local_entry *tt_local_entry;
+
+ tt_local_entry = container_of(rcu, struct batadv_tt_local_entry,
+ common.rcu);
+
+ kmem_cache_free(batadv_tl_cache, tt_local_entry);
+}
+
+/**
* batadv_tt_local_entry_release() - release tt_local_entry from lists and queue
* for free after rcu grace period
* @ref: kref pointer of the nc_node
@@ -222,7 +236,7 @@ static void batadv_tt_local_entry_release(struct kref *ref)
batadv_softif_vlan_put(tt_local_entry->vlan);
- kfree_rcu(tt_local_entry, common.rcu);
+ call_rcu(&tt_local_entry->common.rcu, batadv_tt_local_entry_free_rcu);
}
/**
@@ -241,6 +255,20 @@ batadv_tt_local_entry_put(struct batadv_tt_local_entry *tt_local_entry)
}
/**
+ * batadv_tt_global_entry_free_rcu() - free the tt_global_entry
+ * @rcu: rcu pointer of the tt_global_entry
+ */
+static void batadv_tt_global_entry_free_rcu(struct rcu_head *rcu)
+{
+ struct batadv_tt_global_entry *tt_global_entry;
+
+ tt_global_entry = container_of(rcu, struct batadv_tt_global_entry,
+ common.rcu);
+
+ kmem_cache_free(batadv_tg_cache, tt_global_entry);
+}
+
+/**
* batadv_tt_global_entry_release() - release tt_global_entry from lists and
* queue for free after rcu grace period
* @ref: kref pointer of the nc_node
@@ -254,7 +282,7 @@ void batadv_tt_global_entry_release(struct kref *ref)
batadv_tt_global_del_orig_list(tt_global_entry);
- kfree_rcu(tt_global_entry, common.rcu);
+ call_rcu(&tt_global_entry->common.rcu, batadv_tt_global_entry_free_rcu);
}
/**
@@ -380,6 +408,19 @@ static void batadv_tt_global_size_dec(struct batadv_orig_node *orig_node,
}
/**
+ * batadv_tt_orig_list_entry_free_rcu() - free the orig_entry
+ * @rcu: rcu pointer of the orig_entry
+ */
+static void batadv_tt_orig_list_entry_free_rcu(struct rcu_head *rcu)
+{
+ struct batadv_tt_orig_list_entry *orig_entry;
+
+ orig_entry = container_of(rcu, struct batadv_tt_orig_list_entry, rcu);
+
+ kmem_cache_free(batadv_tt_orig_cache, orig_entry);
+}
+
+/**
* batadv_tt_orig_list_entry_release() - release tt orig entry from lists and
* queue for free after rcu grace period
* @ref: kref pointer of the tt orig entry
@@ -392,7 +433,7 @@ static void batadv_tt_orig_list_entry_release(struct kref *ref)
refcount);
batadv_orig_node_put(orig_entry->orig_node);
- kfree_rcu(orig_entry, rcu);
+ call_rcu(&orig_entry->rcu, batadv_tt_orig_list_entry_free_rcu);
}
/**
diff --git a/net/bluetooth/Makefile b/net/bluetooth/Makefile
index 628d448d78be..5a3835b7dfcd 100644
--- a/net/bluetooth/Makefile
+++ b/net/bluetooth/Makefile
@@ -14,8 +14,7 @@ bluetooth_6lowpan-y := 6lowpan.o
bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o \
hci_sock.o hci_sysfs.o l2cap_core.o l2cap_sock.o smp.o lib.o \
- ecdh_helper.o hci_request.o mgmt_util.o mgmt_config.o hci_codec.o \
- eir.o hci_sync.o
+ ecdh_helper.o mgmt_util.o mgmt_config.o hci_codec.o eir.o hci_sync.o
bluetooth-$(CONFIG_DEV_COREDUMP) += coredump.o
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 0c76dcde5361..8e48ccd2af30 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -34,7 +34,6 @@
#include <net/bluetooth/iso.h>
#include <net/bluetooth/mgmt.h>
-#include "hci_request.h"
#include "smp.h"
#include "eir.h"
@@ -899,8 +898,8 @@ static int hci_conn_hash_alloc_unset(struct hci_dev *hdev)
U16_MAX, GFP_ATOMIC);
}
-struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
- u8 role, u16 handle)
+static struct hci_conn *__hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
+ u8 role, u16 handle)
{
struct hci_conn *conn;
@@ -1041,7 +1040,16 @@ struct hci_conn *hci_conn_add_unset(struct hci_dev *hdev, int type,
if (unlikely(handle < 0))
return ERR_PTR(-ECONNREFUSED);
- return hci_conn_add(hdev, type, dst, role, handle);
+ return __hci_conn_add(hdev, type, dst, role, handle);
+}
+
+struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
+ u8 role, u16 handle)
+{
+ if (handle > HCI_CONN_HANDLE_MAX)
+ return ERR_PTR(-EINVAL);
+
+ return __hci_conn_add(hdev, type, dst, role, handle);
}
static void hci_conn_cleanup_child(struct hci_conn *conn, u8 reason)
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index dd3b0f501018..8a4ebd93adfc 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -40,7 +40,6 @@
#include <net/bluetooth/l2cap.h>
#include <net/bluetooth/mgmt.h>
-#include "hci_request.h"
#include "hci_debugfs.h"
#include "smp.h"
#include "leds.h"
@@ -63,50 +62,6 @@ DEFINE_MUTEX(hci_cb_list_lock);
/* HCI ID Numbering */
static DEFINE_IDA(hci_index_ida);
-static int hci_scan_req(struct hci_request *req, unsigned long opt)
-{
- __u8 scan = opt;
-
- BT_DBG("%s %x", req->hdev->name, scan);
-
- /* Inquiry and Page scans */
- hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
- return 0;
-}
-
-static int hci_auth_req(struct hci_request *req, unsigned long opt)
-{
- __u8 auth = opt;
-
- BT_DBG("%s %x", req->hdev->name, auth);
-
- /* Authentication */
- hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
- return 0;
-}
-
-static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
-{
- __u8 encrypt = opt;
-
- BT_DBG("%s %x", req->hdev->name, encrypt);
-
- /* Encryption */
- hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
- return 0;
-}
-
-static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
-{
- __le16 policy = cpu_to_le16(opt);
-
- BT_DBG("%s %x", req->hdev->name, policy);
-
- /* Default link policy */
- hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
- return 0;
-}
-
/* Get HCI device by index.
* Device is held on return. */
struct hci_dev *hci_dev_get(int index)
@@ -356,33 +311,12 @@ static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
return copied;
}
-static int hci_inq_req(struct hci_request *req, unsigned long opt)
-{
- struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
- struct hci_dev *hdev = req->hdev;
- struct hci_cp_inquiry cp;
-
- BT_DBG("%s", hdev->name);
-
- if (test_bit(HCI_INQUIRY, &hdev->flags))
- return 0;
-
- /* Start Inquiry */
- memcpy(&cp.lap, &ir->lap, 3);
- cp.length = ir->length;
- cp.num_rsp = ir->num_rsp;
- hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
-
- return 0;
-}
-
int hci_inquiry(void __user *arg)
{
__u8 __user *ptr = arg;
struct hci_inquiry_req ir;
struct hci_dev *hdev;
int err = 0, do_inquiry = 0, max_rsp;
- long timeo;
__u8 *buf;
if (copy_from_user(&ir, ptr, sizeof(ir)))
@@ -421,11 +355,11 @@ int hci_inquiry(void __user *arg)
}
hci_dev_unlock(hdev);
- timeo = ir.length * msecs_to_jiffies(2000);
-
if (do_inquiry) {
- err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
- timeo, NULL);
+ hci_req_sync_lock(hdev);
+ err = hci_inquiry_sync(hdev, ir.length, ir.num_rsp);
+ hci_req_sync_unlock(hdev);
+
if (err < 0)
goto done;
@@ -735,6 +669,7 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)
{
struct hci_dev *hdev;
struct hci_dev_req dr;
+ __le16 policy;
int err = 0;
if (copy_from_user(&dr, arg, sizeof(dr)))
@@ -761,8 +696,8 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)
switch (cmd) {
case HCISETAUTH:
- err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
- HCI_INIT_TIMEOUT, NULL);
+ err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_AUTH_ENABLE,
+ 1, &dr.dev_opt, HCI_CMD_TIMEOUT);
break;
case HCISETENCRYPT:
@@ -773,19 +708,21 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)
if (!test_bit(HCI_AUTH, &hdev->flags)) {
/* Auth must be enabled first */
- err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
- HCI_INIT_TIMEOUT, NULL);
+ err = hci_cmd_sync_status(hdev,
+ HCI_OP_WRITE_AUTH_ENABLE,
+ 1, &dr.dev_opt,
+ HCI_CMD_TIMEOUT);
if (err)
break;
}
- err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
- HCI_INIT_TIMEOUT, NULL);
+ err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_ENCRYPT_MODE,
+ 1, &dr.dev_opt, HCI_CMD_TIMEOUT);
break;
case HCISETSCAN:
- err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
- HCI_INIT_TIMEOUT, NULL);
+ err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_SCAN_ENABLE,
+ 1, &dr.dev_opt, HCI_CMD_TIMEOUT);
/* Ensure that the connectable and discoverable states
* get correctly modified as this was a non-mgmt change.
@@ -795,8 +732,10 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)
break;
case HCISETLINKPOL:
- err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
- HCI_INIT_TIMEOUT, NULL);
+ policy = cpu_to_le16(dr.dev_opt);
+
+ err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_DEF_LINK_POLICY,
+ 2, &policy, HCI_CMD_TIMEOUT);
break;
case HCISETLINKMODE:
@@ -837,7 +776,7 @@ int hci_get_dev_list(void __user *arg)
struct hci_dev *hdev;
struct hci_dev_list_req *dl;
struct hci_dev_req *dr;
- int n = 0, size, err;
+ int n = 0, err;
__u16 dev_num;
if (get_user(dev_num, (__u16 __user *) arg))
@@ -846,12 +785,11 @@ int hci_get_dev_list(void __user *arg)
if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
return -EINVAL;
- size = sizeof(*dl) + dev_num * sizeof(*dr);
-
- dl = kzalloc(size, GFP_KERNEL);
+ dl = kzalloc(struct_size(dl, dev_req, dev_num), GFP_KERNEL);
if (!dl)
return -ENOMEM;
+ dl->dev_num = dev_num;
dr = dl->dev_req;
read_lock(&hci_dev_list_lock);
@@ -865,8 +803,8 @@ int hci_get_dev_list(void __user *arg)
if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
flags &= ~BIT(HCI_UP);
- (dr + n)->dev_id = hdev->id;
- (dr + n)->dev_opt = flags;
+ dr[n].dev_id = hdev->id;
+ dr[n].dev_opt = flags;
if (++n >= dev_num)
break;
@@ -874,9 +812,7 @@ int hci_get_dev_list(void __user *arg)
read_unlock(&hci_dev_list_lock);
dl->dev_num = n;
- size = sizeof(*dl) + n * sizeof(*dr);
-
- err = copy_to_user(arg, dl, size);
+ err = copy_to_user(arg, dl, struct_size(dl, dev_req, n));
kfree(dl);
return err ? -EFAULT : 0;
@@ -2615,7 +2551,6 @@ struct hci_dev *hci_alloc_dev_priv(int sizeof_priv)
INIT_DELAYED_WORK(&hdev->ncmd_timer, hci_ncmd_timeout);
hci_devcd_setup(hdev);
- hci_request_setup(hdev);
hci_init_sysfs(hdev);
discovery_init(hdev);
@@ -2751,7 +2686,11 @@ void hci_unregister_dev(struct hci_dev *hdev)
list_del(&hdev->list);
write_unlock(&hci_dev_list_lock);
+ cancel_work_sync(&hdev->rx_work);
+ cancel_work_sync(&hdev->cmd_work);
+ cancel_work_sync(&hdev->tx_work);
cancel_work_sync(&hdev->power_on);
+ cancel_work_sync(&hdev->error_reset);
hci_cmd_sync_clear(hdev);
@@ -2944,15 +2883,31 @@ int hci_reset_dev(struct hci_dev *hdev)
}
EXPORT_SYMBOL(hci_reset_dev);
+static u8 hci_dev_classify_pkt_type(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ if (hdev->classify_pkt_type)
+ return hdev->classify_pkt_type(hdev, skb);
+
+ return hci_skb_pkt_type(skb);
+}
+
/* Receive frame from HCI drivers */
int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
{
+ u8 dev_pkt_type;
+
if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
&& !test_bit(HCI_INIT, &hdev->flags))) {
kfree_skb(skb);
return -ENXIO;
}
+ /* Check if the driver agree with packet type classification */
+ dev_pkt_type = hci_dev_classify_pkt_type(hdev, skb);
+ if (hci_skb_pkt_type(skb) != dev_pkt_type) {
+ hci_skb_pkt_type(skb) = dev_pkt_type;
+ }
+
switch (hci_skb_pkt_type(skb)) {
case HCI_EVENT_PKT:
break;
@@ -3097,7 +3052,7 @@ int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
- skb = hci_prepare_cmd(hdev, opcode, plen, param);
+ skb = hci_cmd_sync_alloc(hdev, opcode, plen, param, NULL);
if (!skb) {
bt_dev_err(hdev, "no memory for command");
return -ENOMEM;
@@ -3132,7 +3087,7 @@ int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
return -EINVAL;
}
- skb = hci_prepare_cmd(hdev, opcode, plen, param);
+ skb = hci_cmd_sync_alloc(hdev, opcode, plen, param, NULL);
if (!skb) {
bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
opcode);
@@ -4117,7 +4072,7 @@ static void hci_send_cmd_sync(struct hci_dev *hdev, struct sk_buff *skb)
return;
}
- if (hci_req_status_pend(hdev) &&
+ if (hdev->req_status == HCI_REQ_PEND &&
!hci_dev_test_and_set_flag(hdev, HCI_CMD_PENDING)) {
kfree_skb(hdev->req_skb);
hdev->req_skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
diff --git a/net/bluetooth/hci_debugfs.c b/net/bluetooth/hci_debugfs.c
index ce3ff2fa72e5..f625074d1f00 100644
--- a/net/bluetooth/hci_debugfs.c
+++ b/net/bluetooth/hci_debugfs.c
@@ -28,7 +28,6 @@
#include <net/bluetooth/hci_core.h>
#include "smp.h"
-#include "hci_request.h"
#include "hci_debugfs.h"
#define DEFINE_QUIRK_ATTRIBUTE(__name, __quirk) \
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index a487f9df8145..dce8035ca799 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -33,7 +33,6 @@
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/mgmt.h>
-#include "hci_request.h"
#include "hci_debugfs.h"
#include "hci_codec.h"
#include "smp.h"
@@ -6311,6 +6310,13 @@ static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, void *data,
evt_type = __le16_to_cpu(info->type) & LE_EXT_ADV_EVT_TYPE_MASK;
legacy_evt_type = ext_evt_type_to_legacy(hdev, evt_type);
+
+ if (test_bit(HCI_QUIRK_FIXUP_LE_EXT_ADV_REPORT_PHY,
+ &hdev->quirks)) {
+ info->primary_phy &= 0x1f;
+ info->secondary_phy &= 0x1f;
+ }
+
if (legacy_evt_type != LE_ADV_INVALID) {
process_adv_report(hdev, legacy_evt_type, &info->bdaddr,
info->bdaddr_type, NULL, 0,
@@ -6660,6 +6666,7 @@ static void hci_le_cis_estabilished_evt(struct hci_dev *hdev, void *data,
struct bt_iso_qos *qos;
bool pending = false;
u16 handle = __le16_to_cpu(ev->handle);
+ u32 c_sdu_interval, p_sdu_interval;
bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
@@ -6684,12 +6691,25 @@ static void hci_le_cis_estabilished_evt(struct hci_dev *hdev, void *data,
pending = test_and_clear_bit(HCI_CONN_CREATE_CIS, &conn->flags);
- /* Convert ISO Interval (1.25 ms slots) to SDU Interval (us) */
- qos->ucast.in.interval = le16_to_cpu(ev->interval) * 1250;
- qos->ucast.out.interval = qos->ucast.in.interval;
+ /* BLUETOOTH CORE SPECIFICATION Version 5.4 | Vol 6, Part G
+ * page 3075:
+ * Transport_Latency_C_To_P = CIG_Sync_Delay + (FT_C_To_P) ×
+ * ISO_Interval + SDU_Interval_C_To_P
+ * ...
+ * SDU_Interval = (CIG_Sync_Delay + (FT) x ISO_Interval) -
+ * Transport_Latency
+ */
+ c_sdu_interval = (get_unaligned_le24(ev->cig_sync_delay) +
+ (ev->c_ft * le16_to_cpu(ev->interval) * 1250)) -
+ get_unaligned_le24(ev->c_latency);
+ p_sdu_interval = (get_unaligned_le24(ev->cig_sync_delay) +
+ (ev->p_ft * le16_to_cpu(ev->interval) * 1250)) -
+ get_unaligned_le24(ev->p_latency);
switch (conn->role) {
case HCI_ROLE_SLAVE:
+ qos->ucast.in.interval = c_sdu_interval;
+ qos->ucast.out.interval = p_sdu_interval;
/* Convert Transport Latency (us) to Latency (msec) */
qos->ucast.in.latency =
DIV_ROUND_CLOSEST(get_unaligned_le24(ev->c_latency),
@@ -6703,6 +6723,8 @@ static void hci_le_cis_estabilished_evt(struct hci_dev *hdev, void *data,
qos->ucast.out.phy = ev->p_phy;
break;
case HCI_ROLE_MASTER:
+ qos->ucast.in.interval = p_sdu_interval;
+ qos->ucast.out.interval = c_sdu_interval;
/* Convert Transport Latency (us) to Latency (msec) */
qos->ucast.out.latency =
DIV_ROUND_CLOSEST(get_unaligned_le24(ev->c_latency),
@@ -6893,6 +6915,10 @@ static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data,
bis = hci_conn_hash_lookup_handle(hdev, handle);
if (!bis) {
+ if (handle > HCI_CONN_HANDLE_MAX) {
+ bt_dev_dbg(hdev, "ignore too large handle %u", handle);
+ continue;
+ }
bis = hci_conn_add(hdev, ISO_LINK, BDADDR_ANY,
HCI_ROLE_SLAVE, handle);
if (IS_ERR(bis))
@@ -6961,6 +6987,8 @@ static void hci_le_big_info_adv_report_evt(struct hci_dev *hdev, void *data,
if (!pa_sync)
goto unlock;
+ pa_sync->iso_qos.bcast.encryption = ev->encryption;
+
/* Notify iso layer */
hci_connect_cfm(pa_sync, 0);
diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c
deleted file mode 100644
index efea25eb56ce..000000000000
--- a/net/bluetooth/hci_request.c
+++ /dev/null
@@ -1,903 +0,0 @@
-/*
- BlueZ - Bluetooth protocol stack for Linux
-
- Copyright (C) 2014 Intel Corporation
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License version 2 as
- published by the Free Software Foundation;
-
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
- IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
- CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
- WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
- ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
- COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
- SOFTWARE IS DISCLAIMED.
-*/
-
-#include <linux/sched/signal.h>
-
-#include <net/bluetooth/bluetooth.h>
-#include <net/bluetooth/hci_core.h>
-#include <net/bluetooth/mgmt.h>
-
-#include "smp.h"
-#include "hci_request.h"
-#include "msft.h"
-#include "eir.h"
-
-void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
-{
- skb_queue_head_init(&req->cmd_q);
- req->hdev = hdev;
- req->err = 0;
-}
-
-void hci_req_purge(struct hci_request *req)
-{
- skb_queue_purge(&req->cmd_q);
-}
-
-bool hci_req_status_pend(struct hci_dev *hdev)
-{
- return hdev->req_status == HCI_REQ_PEND;
-}
-
-static int req_run(struct hci_request *req, hci_req_complete_t complete,
- hci_req_complete_skb_t complete_skb)
-{
- struct hci_dev *hdev = req->hdev;
- struct sk_buff *skb;
- unsigned long flags;
-
- bt_dev_dbg(hdev, "length %u", skb_queue_len(&req->cmd_q));
-
- /* If an error occurred during request building, remove all HCI
- * commands queued on the HCI request queue.
- */
- if (req->err) {
- skb_queue_purge(&req->cmd_q);
- return req->err;
- }
-
- /* Do not allow empty requests */
- if (skb_queue_empty(&req->cmd_q))
- return -ENODATA;
-
- skb = skb_peek_tail(&req->cmd_q);
- if (complete) {
- bt_cb(skb)->hci.req_complete = complete;
- } else if (complete_skb) {
- bt_cb(skb)->hci.req_complete_skb = complete_skb;
- bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
- }
-
- spin_lock_irqsave(&hdev->cmd_q.lock, flags);
- skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
- spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
-
- queue_work(hdev->workqueue, &hdev->cmd_work);
-
- return 0;
-}
-
-int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
-{
- return req_run(req, complete, NULL);
-}
-
-int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
-{
- return req_run(req, NULL, complete);
-}
-
-void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
- struct sk_buff *skb)
-{
- bt_dev_dbg(hdev, "result 0x%2.2x", result);
-
- if (hdev->req_status == HCI_REQ_PEND) {
- hdev->req_result = result;
- hdev->req_status = HCI_REQ_DONE;
- if (skb) {
- kfree_skb(hdev->req_skb);
- hdev->req_skb = skb_get(skb);
- }
- wake_up_interruptible(&hdev->req_wait_q);
- }
-}
-
-/* Execute request and wait for completion. */
-int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
- unsigned long opt),
- unsigned long opt, u32 timeout, u8 *hci_status)
-{
- struct hci_request req;
- int err = 0;
-
- bt_dev_dbg(hdev, "start");
-
- hci_req_init(&req, hdev);
-
- hdev->req_status = HCI_REQ_PEND;
-
- err = func(&req, opt);
- if (err) {
- if (hci_status)
- *hci_status = HCI_ERROR_UNSPECIFIED;
- return err;
- }
-
- err = hci_req_run_skb(&req, hci_req_sync_complete);
- if (err < 0) {
- hdev->req_status = 0;
-
- /* ENODATA means the HCI request command queue is empty.
- * This can happen when a request with conditionals doesn't
- * trigger any commands to be sent. This is normal behavior
- * and should not trigger an error return.
- */
- if (err == -ENODATA) {
- if (hci_status)
- *hci_status = 0;
- return 0;
- }
-
- if (hci_status)
- *hci_status = HCI_ERROR_UNSPECIFIED;
-
- return err;
- }
-
- err = wait_event_interruptible_timeout(hdev->req_wait_q,
- hdev->req_status != HCI_REQ_PEND, timeout);
-
- if (err == -ERESTARTSYS)
- return -EINTR;
-
- switch (hdev->req_status) {
- case HCI_REQ_DONE:
- err = -bt_to_errno(hdev->req_result);
- if (hci_status)
- *hci_status = hdev->req_result;
- break;
-
- case HCI_REQ_CANCELED:
- err = -hdev->req_result;
- if (hci_status)
- *hci_status = HCI_ERROR_UNSPECIFIED;
- break;
-
- default:
- err = -ETIMEDOUT;
- if (hci_status)
- *hci_status = HCI_ERROR_UNSPECIFIED;
- break;
- }
-
- kfree_skb(hdev->req_skb);
- hdev->req_skb = NULL;
- hdev->req_status = hdev->req_result = 0;
-
- bt_dev_dbg(hdev, "end: err %d", err);
-
- return err;
-}
-
-int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
- unsigned long opt),
- unsigned long opt, u32 timeout, u8 *hci_status)
-{
- int ret;
-
- /* Serialize all requests */
- hci_req_sync_lock(hdev);
- /* check the state after obtaing the lock to protect the HCI_UP
- * against any races from hci_dev_do_close when the controller
- * gets removed.
- */
- if (test_bit(HCI_UP, &hdev->flags))
- ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
- else
- ret = -ENETDOWN;
- hci_req_sync_unlock(hdev);
-
- return ret;
-}
-
-struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
- const void *param)
-{
- int len = HCI_COMMAND_HDR_SIZE + plen;
- struct hci_command_hdr *hdr;
- struct sk_buff *skb;
-
- skb = bt_skb_alloc(len, GFP_ATOMIC);
- if (!skb)
- return NULL;
-
- hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
- hdr->opcode = cpu_to_le16(opcode);
- hdr->plen = plen;
-
- if (plen)
- skb_put_data(skb, param, plen);
-
- bt_dev_dbg(hdev, "skb len %d", skb->len);
-
- hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
- hci_skb_opcode(skb) = opcode;
-
- return skb;
-}
-
-/* Queue a command to an asynchronous HCI request */
-void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
- const void *param, u8 event)
-{
- struct hci_dev *hdev = req->hdev;
- struct sk_buff *skb;
-
- bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
-
- /* If an error occurred during request building, there is no point in
- * queueing the HCI command. We can simply return.
- */
- if (req->err)
- return;
-
- skb = hci_prepare_cmd(hdev, opcode, plen, param);
- if (!skb) {
- bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
- opcode);
- req->err = -ENOMEM;
- return;
- }
-
- if (skb_queue_empty(&req->cmd_q))
- bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
-
- hci_skb_event(skb) = event;
-
- skb_queue_tail(&req->cmd_q, skb);
-}
-
-void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
- const void *param)
-{
- bt_dev_dbg(req->hdev, "HCI_REQ-0x%4.4x", opcode);
- hci_req_add_ev(req, opcode, plen, param, 0);
-}
-
-static void start_interleave_scan(struct hci_dev *hdev)
-{
- hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
- queue_delayed_work(hdev->req_workqueue,
- &hdev->interleave_scan, 0);
-}
-
-static bool is_interleave_scanning(struct hci_dev *hdev)
-{
- return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE;
-}
-
-static void cancel_interleave_scan(struct hci_dev *hdev)
-{
- bt_dev_dbg(hdev, "cancelling interleave scan");
-
- cancel_delayed_work_sync(&hdev->interleave_scan);
-
- hdev->interleave_scan_state = INTERLEAVE_SCAN_NONE;
-}
-
-/* Return true if interleave_scan wasn't started until exiting this function,
- * otherwise, return false
- */
-static bool __hci_update_interleaved_scan(struct hci_dev *hdev)
-{
- /* Do interleaved scan only if all of the following are true:
- * - There is at least one ADV monitor
- * - At least one pending LE connection or one device to be scanned for
- * - Monitor offloading is not supported
- * If so, we should alternate between allowlist scan and one without
- * any filters to save power.
- */
- bool use_interleaving = hci_is_adv_monitoring(hdev) &&
- !(list_empty(&hdev->pend_le_conns) &&
- list_empty(&hdev->pend_le_reports)) &&
- hci_get_adv_monitor_offload_ext(hdev) ==
- HCI_ADV_MONITOR_EXT_NONE;
- bool is_interleaving = is_interleave_scanning(hdev);
-
- if (use_interleaving && !is_interleaving) {
- start_interleave_scan(hdev);
- bt_dev_dbg(hdev, "starting interleave scan");
- return true;
- }
-
- if (!use_interleaving && is_interleaving)
- cancel_interleave_scan(hdev);
-
- return false;
-}
-
-void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn)
-{
- struct hci_dev *hdev = req->hdev;
-
- if (hdev->scanning_paused) {
- bt_dev_dbg(hdev, "Scanning is paused for suspend");
- return;
- }
-
- if (use_ext_scan(hdev)) {
- struct hci_cp_le_set_ext_scan_enable cp;
-
- memset(&cp, 0, sizeof(cp));
- cp.enable = LE_SCAN_DISABLE;
- hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp),
- &cp);
- } else {
- struct hci_cp_le_set_scan_enable cp;
-
- memset(&cp, 0, sizeof(cp));
- cp.enable = LE_SCAN_DISABLE;
- hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
- }
-
- /* Disable address resolution */
- if (hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION) && !rpa_le_conn) {
- __u8 enable = 0x00;
-
- hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
- }
-}
-
-static void del_from_accept_list(struct hci_request *req, bdaddr_t *bdaddr,
- u8 bdaddr_type)
-{
- struct hci_cp_le_del_from_accept_list cp;
-
- cp.bdaddr_type = bdaddr_type;
- bacpy(&cp.bdaddr, bdaddr);
-
- bt_dev_dbg(req->hdev, "Remove %pMR (0x%x) from accept list", &cp.bdaddr,
- cp.bdaddr_type);
- hci_req_add(req, HCI_OP_LE_DEL_FROM_ACCEPT_LIST, sizeof(cp), &cp);
-
- if (use_ll_privacy(req->hdev)) {
- struct smp_irk *irk;
-
- irk = hci_find_irk_by_addr(req->hdev, bdaddr, bdaddr_type);
- if (irk) {
- struct hci_cp_le_del_from_resolv_list cp;
-
- cp.bdaddr_type = bdaddr_type;
- bacpy(&cp.bdaddr, bdaddr);
-
- hci_req_add(req, HCI_OP_LE_DEL_FROM_RESOLV_LIST,
- sizeof(cp), &cp);
- }
- }
-}
-
-/* Adds connection to accept list if needed. On error, returns -1. */
-static int add_to_accept_list(struct hci_request *req,
- struct hci_conn_params *params, u8 *num_entries,
- bool allow_rpa)
-{
- struct hci_cp_le_add_to_accept_list cp;
- struct hci_dev *hdev = req->hdev;
-
- /* Already in accept list */
- if (hci_bdaddr_list_lookup(&hdev->le_accept_list, &params->addr,
- params->addr_type))
- return 0;
-
- /* Select filter policy to accept all advertising */
- if (*num_entries >= hdev->le_accept_list_size)
- return -1;
-
- /* Accept list can not be used with RPAs */
- if (!allow_rpa &&
- !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
- hci_find_irk_by_addr(hdev, &params->addr, params->addr_type)) {
- return -1;
- }
-
- /* During suspend, only wakeable devices can be in accept list */
- if (hdev->suspended &&
- !(params->flags & HCI_CONN_FLAG_REMOTE_WAKEUP))
- return 0;
-
- *num_entries += 1;
- cp.bdaddr_type = params->addr_type;
- bacpy(&cp.bdaddr, &params->addr);
-
- bt_dev_dbg(hdev, "Add %pMR (0x%x) to accept list", &cp.bdaddr,
- cp.bdaddr_type);
- hci_req_add(req, HCI_OP_LE_ADD_TO_ACCEPT_LIST, sizeof(cp), &cp);
-
- if (use_ll_privacy(hdev)) {
- struct smp_irk *irk;
-
- irk = hci_find_irk_by_addr(hdev, &params->addr,
- params->addr_type);
- if (irk) {
- struct hci_cp_le_add_to_resolv_list cp;
-
- cp.bdaddr_type = params->addr_type;
- bacpy(&cp.bdaddr, &params->addr);
- memcpy(cp.peer_irk, irk->val, 16);
-
- if (hci_dev_test_flag(hdev, HCI_PRIVACY))
- memcpy(cp.local_irk, hdev->irk, 16);
- else
- memset(cp.local_irk, 0, 16);
-
- hci_req_add(req, HCI_OP_LE_ADD_TO_RESOLV_LIST,
- sizeof(cp), &cp);
- }
- }
-
- return 0;
-}
-
-static u8 update_accept_list(struct hci_request *req)
-{
- struct hci_dev *hdev = req->hdev;
- struct hci_conn_params *params;
- struct bdaddr_list *b;
- u8 num_entries = 0;
- bool pend_conn, pend_report;
- /* We allow usage of accept list even with RPAs in suspend. In the worst
- * case, we won't be able to wake from devices that use the privacy1.2
- * features. Additionally, once we support privacy1.2 and IRK
- * offloading, we can update this to also check for those conditions.
- */
- bool allow_rpa = hdev->suspended;
-
- if (use_ll_privacy(hdev))
- allow_rpa = true;
-
- /* Go through the current accept list programmed into the
- * controller one by one and check if that address is still
- * in the list of pending connections or list of devices to
- * report. If not present in either list, then queue the
- * command to remove it from the controller.
- */
- list_for_each_entry(b, &hdev->le_accept_list, list) {
- pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns,
- &b->bdaddr,
- b->bdaddr_type);
- pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports,
- &b->bdaddr,
- b->bdaddr_type);
-
- /* If the device is not likely to connect or report,
- * remove it from the accept list.
- */
- if (!pend_conn && !pend_report) {
- del_from_accept_list(req, &b->bdaddr, b->bdaddr_type);
- continue;
- }
-
- /* Accept list can not be used with RPAs */
- if (!allow_rpa &&
- !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
- hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
- return 0x00;
- }
-
- num_entries++;
- }
-
- /* Since all no longer valid accept list entries have been
- * removed, walk through the list of pending connections
- * and ensure that any new device gets programmed into
- * the controller.
- *
- * If the list of the devices is larger than the list of
- * available accept list entries in the controller, then
- * just abort and return filer policy value to not use the
- * accept list.
- */
- list_for_each_entry(params, &hdev->pend_le_conns, action) {
- if (add_to_accept_list(req, params, &num_entries, allow_rpa))
- return 0x00;
- }
-
- /* After adding all new pending connections, walk through
- * the list of pending reports and also add these to the
- * accept list if there is still space. Abort if space runs out.
- */
- list_for_each_entry(params, &hdev->pend_le_reports, action) {
- if (add_to_accept_list(req, params, &num_entries, allow_rpa))
- return 0x00;
- }
-
- /* Use the allowlist unless the following conditions are all true:
- * - We are not currently suspending
- * - There are 1 or more ADV monitors registered and it's not offloaded
- * - Interleaved scanning is not currently using the allowlist
- */
- if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended &&
- hci_get_adv_monitor_offload_ext(hdev) == HCI_ADV_MONITOR_EXT_NONE &&
- hdev->interleave_scan_state != INTERLEAVE_SCAN_ALLOWLIST)
- return 0x00;
-
- /* Select filter policy to use accept list */
- return 0x01;
-}
-
-static bool scan_use_rpa(struct hci_dev *hdev)
-{
- return hci_dev_test_flag(hdev, HCI_PRIVACY);
-}
-
-static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
- u16 window, u8 own_addr_type, u8 filter_policy,
- bool filter_dup, bool addr_resolv)
-{
- struct hci_dev *hdev = req->hdev;
-
- if (hdev->scanning_paused) {
- bt_dev_dbg(hdev, "Scanning is paused for suspend");
- return;
- }
-
- if (use_ll_privacy(hdev) && addr_resolv) {
- u8 enable = 0x01;
-
- hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
- }
-
- /* Use ext scanning if set ext scan param and ext scan enable is
- * supported
- */
- if (use_ext_scan(hdev)) {
- struct hci_cp_le_set_ext_scan_params *ext_param_cp;
- struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
- struct hci_cp_le_scan_phy_params *phy_params;
- u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2];
- u32 plen;
-
- ext_param_cp = (void *)data;
- phy_params = (void *)ext_param_cp->data;
-
- memset(ext_param_cp, 0, sizeof(*ext_param_cp));
- ext_param_cp->own_addr_type = own_addr_type;
- ext_param_cp->filter_policy = filter_policy;
-
- plen = sizeof(*ext_param_cp);
-
- if (scan_1m(hdev) || scan_2m(hdev)) {
- ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M;
-
- memset(phy_params, 0, sizeof(*phy_params));
- phy_params->type = type;
- phy_params->interval = cpu_to_le16(interval);
- phy_params->window = cpu_to_le16(window);
-
- plen += sizeof(*phy_params);
- phy_params++;
- }
-
- if (scan_coded(hdev)) {
- ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED;
-
- memset(phy_params, 0, sizeof(*phy_params));
- phy_params->type = type;
- phy_params->interval = cpu_to_le16(interval);
- phy_params->window = cpu_to_le16(window);
-
- plen += sizeof(*phy_params);
- phy_params++;
- }
-
- hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
- plen, ext_param_cp);
-
- memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
- ext_enable_cp.enable = LE_SCAN_ENABLE;
- ext_enable_cp.filter_dup = filter_dup;
-
- hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
- sizeof(ext_enable_cp), &ext_enable_cp);
- } else {
- struct hci_cp_le_set_scan_param param_cp;
- struct hci_cp_le_set_scan_enable enable_cp;
-
- memset(&param_cp, 0, sizeof(param_cp));
- param_cp.type = type;
- param_cp.interval = cpu_to_le16(interval);
- param_cp.window = cpu_to_le16(window);
- param_cp.own_address_type = own_addr_type;
- param_cp.filter_policy = filter_policy;
- hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
- &param_cp);
-
- memset(&enable_cp, 0, sizeof(enable_cp));
- enable_cp.enable = LE_SCAN_ENABLE;
- enable_cp.filter_dup = filter_dup;
- hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
- &enable_cp);
- }
-}
-
-static void set_random_addr(struct hci_request *req, bdaddr_t *rpa);
-static int hci_update_random_address(struct hci_request *req,
- bool require_privacy, bool use_rpa,
- u8 *own_addr_type)
-{
- struct hci_dev *hdev = req->hdev;
- int err;
-
- /* If privacy is enabled use a resolvable private address. If
- * current RPA has expired or there is something else than
- * the current RPA in use, then generate a new one.
- */
- if (use_rpa) {
- /* If Controller supports LL Privacy use own address type is
- * 0x03
- */
- if (use_ll_privacy(hdev))
- *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
- else
- *own_addr_type = ADDR_LE_DEV_RANDOM;
-
- if (rpa_valid(hdev))
- return 0;
-
- err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
- if (err < 0) {
- bt_dev_err(hdev, "failed to generate new RPA");
- return err;
- }
-
- set_random_addr(req, &hdev->rpa);
-
- return 0;
- }
-
- /* In case of required privacy without resolvable private address,
- * use an non-resolvable private address. This is useful for active
- * scanning and non-connectable advertising.
- */
- if (require_privacy) {
- bdaddr_t nrpa;
-
- while (true) {
- /* The non-resolvable private address is generated
- * from random six bytes with the two most significant
- * bits cleared.
- */
- get_random_bytes(&nrpa, 6);
- nrpa.b[5] &= 0x3f;
-
- /* The non-resolvable private address shall not be
- * equal to the public address.
- */
- if (bacmp(&hdev->bdaddr, &nrpa))
- break;
- }
-
- *own_addr_type = ADDR_LE_DEV_RANDOM;
- set_random_addr(req, &nrpa);
- return 0;
- }
-
- /* If forcing static address is in use or there is no public
- * address use the static address as random address (but skip
- * the HCI command if the current random address is already the
- * static one.
- *
- * In case BR/EDR has been disabled on a dual-mode controller
- * and a static address has been configured, then use that
- * address instead of the public BR/EDR address.
- */
- if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
- !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
- (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
- bacmp(&hdev->static_addr, BDADDR_ANY))) {
- *own_addr_type = ADDR_LE_DEV_RANDOM;
- if (bacmp(&hdev->static_addr, &hdev->random_addr))
- hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
- &hdev->static_addr);
- return 0;
- }
-
- /* Neither privacy nor static address is being used so use a
- * public address.
- */
- *own_addr_type = ADDR_LE_DEV_PUBLIC;
-
- return 0;
-}
-
-/* Ensure to call hci_req_add_le_scan_disable() first to disable the
- * controller based address resolution to be able to reconfigure
- * resolving list.
- */
-void hci_req_add_le_passive_scan(struct hci_request *req)
-{
- struct hci_dev *hdev = req->hdev;
- u8 own_addr_type;
- u8 filter_policy;
- u16 window, interval;
- /* Default is to enable duplicates filter */
- u8 filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
- /* Background scanning should run with address resolution */
- bool addr_resolv = true;
-
- if (hdev->scanning_paused) {
- bt_dev_dbg(hdev, "Scanning is paused for suspend");
- return;
- }
-
- /* Set require_privacy to false since no SCAN_REQ are send
- * during passive scanning. Not using an non-resolvable address
- * here is important so that peer devices using direct
- * advertising with our address will be correctly reported
- * by the controller.
- */
- if (hci_update_random_address(req, false, scan_use_rpa(hdev),
- &own_addr_type))
- return;
-
- if (hdev->enable_advmon_interleave_scan &&
- __hci_update_interleaved_scan(hdev))
- return;
-
- bt_dev_dbg(hdev, "interleave state %d", hdev->interleave_scan_state);
- /* Adding or removing entries from the accept list must
- * happen before enabling scanning. The controller does
- * not allow accept list modification while scanning.
- */
- filter_policy = update_accept_list(req);
-
- /* When the controller is using random resolvable addresses and
- * with that having LE privacy enabled, then controllers with
- * Extended Scanner Filter Policies support can now enable support
- * for handling directed advertising.
- *
- * So instead of using filter polices 0x00 (no accept list)
- * and 0x01 (accept list enabled) use the new filter policies
- * 0x02 (no accept list) and 0x03 (accept list enabled).
- */
- if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
- (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
- filter_policy |= 0x02;
-
- if (hdev->suspended) {
- window = hdev->le_scan_window_suspend;
- interval = hdev->le_scan_int_suspend;
- } else if (hci_is_le_conn_scanning(hdev)) {
- window = hdev->le_scan_window_connect;
- interval = hdev->le_scan_int_connect;
- } else if (hci_is_adv_monitoring(hdev)) {
- window = hdev->le_scan_window_adv_monitor;
- interval = hdev->le_scan_int_adv_monitor;
-
- /* Disable duplicates filter when scanning for advertisement
- * monitor for the following reasons.
- *
- * For HW pattern filtering (ex. MSFT), Realtek and Qualcomm
- * controllers ignore RSSI_Sampling_Period when the duplicates
- * filter is enabled.
- *
- * For SW pattern filtering, when we're not doing interleaved
- * scanning, it is necessary to disable duplicates filter,
- * otherwise hosts can only receive one advertisement and it's
- * impossible to know if a peer is still in range.
- */
- filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
- } else {
- window = hdev->le_scan_window;
- interval = hdev->le_scan_interval;
- }
-
- bt_dev_dbg(hdev, "LE passive scan with accept list = %d",
- filter_policy);
- hci_req_start_scan(req, LE_SCAN_PASSIVE, interval, window,
- own_addr_type, filter_policy, filter_dup,
- addr_resolv);
-}
-
-static int hci_req_add_le_interleaved_scan(struct hci_request *req,
- unsigned long opt)
-{
- struct hci_dev *hdev = req->hdev;
- int ret = 0;
-
- hci_dev_lock(hdev);
-
- if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
- hci_req_add_le_scan_disable(req, false);
- hci_req_add_le_passive_scan(req);
-
- switch (hdev->interleave_scan_state) {
- case INTERLEAVE_SCAN_ALLOWLIST:
- bt_dev_dbg(hdev, "next state: allowlist");
- hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
- break;
- case INTERLEAVE_SCAN_NO_FILTER:
- bt_dev_dbg(hdev, "next state: no filter");
- hdev->interleave_scan_state = INTERLEAVE_SCAN_ALLOWLIST;
- break;
- case INTERLEAVE_SCAN_NONE:
- BT_ERR("unexpected error");
- ret = -1;
- }
-
- hci_dev_unlock(hdev);
-
- return ret;
-}
-
-static void interleave_scan_work(struct work_struct *work)
-{
- struct hci_dev *hdev = container_of(work, struct hci_dev,
- interleave_scan.work);
- u8 status;
- unsigned long timeout;
-
- if (hdev->interleave_scan_state == INTERLEAVE_SCAN_ALLOWLIST) {
- timeout = msecs_to_jiffies(hdev->advmon_allowlist_duration);
- } else if (hdev->interleave_scan_state == INTERLEAVE_SCAN_NO_FILTER) {
- timeout = msecs_to_jiffies(hdev->advmon_no_filter_duration);
- } else {
- bt_dev_err(hdev, "unexpected error");
- return;
- }
-
- hci_req_sync(hdev, hci_req_add_le_interleaved_scan, 0,
- HCI_CMD_TIMEOUT, &status);
-
- /* Don't continue interleaving if it was canceled */
- if (is_interleave_scanning(hdev))
- queue_delayed_work(hdev->req_workqueue,
- &hdev->interleave_scan, timeout);
-}
-
-static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
-{
- struct hci_dev *hdev = req->hdev;
-
- /* If we're advertising or initiating an LE connection we can't
- * go ahead and change the random address at this time. This is
- * because the eventual initiator address used for the
- * subsequently created connection will be undefined (some
- * controllers use the new address and others the one we had
- * when the operation started).
- *
- * In this kind of scenario skip the update and let the random
- * address be updated at the next cycle.
- */
- if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
- hci_lookup_le_connect(hdev)) {
- bt_dev_dbg(hdev, "Deferring random address update");
- hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
- return;
- }
-
- hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
-}
-
-void hci_request_setup(struct hci_dev *hdev)
-{
- INIT_DELAYED_WORK(&hdev->interleave_scan, interleave_scan_work);
-}
-
-void hci_request_cancel_all(struct hci_dev *hdev)
-{
- hci_cmd_sync_cancel_sync(hdev, ENODEV);
-
- cancel_interleave_scan(hdev);
-}
diff --git a/net/bluetooth/hci_request.h b/net/bluetooth/hci_request.h
deleted file mode 100644
index c91f2838f542..000000000000
--- a/net/bluetooth/hci_request.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- BlueZ - Bluetooth protocol stack for Linux
- Copyright (C) 2014 Intel Corporation
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License version 2 as
- published by the Free Software Foundation;
-
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
- IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
- CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
- WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
- ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
- COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
- SOFTWARE IS DISCLAIMED.
-*/
-
-#include <asm/unaligned.h>
-
-#define HCI_REQ_DONE 0
-#define HCI_REQ_PEND 1
-#define HCI_REQ_CANCELED 2
-
-#define hci_req_sync_lock(hdev) mutex_lock(&hdev->req_lock)
-#define hci_req_sync_unlock(hdev) mutex_unlock(&hdev->req_lock)
-
-struct hci_request {
- struct hci_dev *hdev;
- struct sk_buff_head cmd_q;
-
- /* If something goes wrong when building the HCI request, the error
- * value is stored in this field.
- */
- int err;
-};
-
-void hci_req_init(struct hci_request *req, struct hci_dev *hdev);
-void hci_req_purge(struct hci_request *req);
-bool hci_req_status_pend(struct hci_dev *hdev);
-int hci_req_run(struct hci_request *req, hci_req_complete_t complete);
-int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete);
-void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
- struct sk_buff *skb);
-void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
- const void *param);
-void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
- const void *param, u8 event);
-void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
- hci_req_complete_t *req_complete,
- hci_req_complete_skb_t *req_complete_skb);
-
-int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
- unsigned long opt),
- unsigned long opt, u32 timeout, u8 *hci_status);
-int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
- unsigned long opt),
- unsigned long opt, u32 timeout, u8 *hci_status);
-
-struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
- const void *param);
-
-void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn);
-void hci_req_add_le_passive_scan(struct hci_request *req);
-
-void hci_request_setup(struct hci_dev *hdev);
-void hci_request_cancel_all(struct hci_dev *hdev);
diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
index 16daa79b7981..cd2ed16da8a4 100644
--- a/net/bluetooth/hci_sync.c
+++ b/net/bluetooth/hci_sync.c
@@ -12,7 +12,6 @@
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/mgmt.h>
-#include "hci_request.h"
#include "hci_codec.h"
#include "hci_debugfs.h"
#include "smp.h"
@@ -49,9 +48,8 @@ static void hci_cmd_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
wake_up_interruptible(&hdev->req_wait_q);
}
-static struct sk_buff *hci_cmd_sync_alloc(struct hci_dev *hdev, u16 opcode,
- u32 plen, const void *param,
- struct sock *sk)
+struct sk_buff *hci_cmd_sync_alloc(struct hci_dev *hdev, u16 opcode, u32 plen,
+ const void *param, struct sock *sk)
{
int len = HCI_COMMAND_HDR_SIZE + plen;
struct hci_command_hdr *hdr;
@@ -147,6 +145,13 @@ static int hci_cmd_sync_run(struct hci_request *req)
return 0;
}
+static void hci_request_init(struct hci_request *req, struct hci_dev *hdev)
+{
+ skb_queue_head_init(&req->cmd_q);
+ req->hdev = hdev;
+ req->err = 0;
+}
+
/* This function requires the caller holds hdev->req_lock. */
struct sk_buff *__hci_cmd_sync_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
const void *param, u8 event, u32 timeout,
@@ -158,7 +163,7 @@ struct sk_buff *__hci_cmd_sync_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
bt_dev_dbg(hdev, "Opcode 0x%4.4x", opcode);
- hci_req_init(&req, hdev);
+ hci_request_init(&req, hdev);
hci_cmd_sync_add(&req, opcode, plen, param, event, sk);
@@ -280,6 +285,19 @@ int __hci_cmd_sync_status(struct hci_dev *hdev, u16 opcode, u32 plen,
}
EXPORT_SYMBOL(__hci_cmd_sync_status);
+int hci_cmd_sync_status(struct hci_dev *hdev, u16 opcode, u32 plen,
+ const void *param, u32 timeout)
+{
+ int err;
+
+ hci_req_sync_lock(hdev);
+ err = __hci_cmd_sync_status(hdev, opcode, plen, param, timeout);
+ hci_req_sync_unlock(hdev);
+
+ return err;
+}
+EXPORT_SYMBOL(hci_cmd_sync_status);
+
static void hci_cmd_sync_work(struct work_struct *work)
{
struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_sync_work);
@@ -334,10 +352,9 @@ static int scan_disable_sync(struct hci_dev *hdev, void *data)
return hci_scan_disable_sync(hdev);
}
-static int hci_inquiry_sync(struct hci_dev *hdev, u8 length);
static int interleaved_inquiry_sync(struct hci_dev *hdev, void *data)
{
- return hci_inquiry_sync(hdev, DISCOV_INTERLEAVED_INQUIRY_LEN);
+ return hci_inquiry_sync(hdev, DISCOV_INTERLEAVED_INQUIRY_LEN, 0);
}
static void le_scan_disable(struct work_struct *work)
@@ -358,8 +375,6 @@ static void le_scan_disable(struct work_struct *work)
goto _return;
}
- hdev->discovery.scan_start = 0;
-
/* If we were running LE only scan, change discovery state. If
* we were running both LE and BR/EDR inquiry simultaneously,
* and BR/EDR inquiry is already finished, stop discovery,
@@ -557,6 +572,53 @@ unlock:
hci_dev_unlock(hdev);
}
+static bool is_interleave_scanning(struct hci_dev *hdev)
+{
+ return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE;
+}
+
+static int hci_passive_scan_sync(struct hci_dev *hdev);
+
+static void interleave_scan_work(struct work_struct *work)
+{
+ struct hci_dev *hdev = container_of(work, struct hci_dev,
+ interleave_scan.work);
+ unsigned long timeout;
+
+ if (hdev->interleave_scan_state == INTERLEAVE_SCAN_ALLOWLIST) {
+ timeout = msecs_to_jiffies(hdev->advmon_allowlist_duration);
+ } else if (hdev->interleave_scan_state == INTERLEAVE_SCAN_NO_FILTER) {
+ timeout = msecs_to_jiffies(hdev->advmon_no_filter_duration);
+ } else {
+ bt_dev_err(hdev, "unexpected error");
+ return;
+ }
+
+ hci_passive_scan_sync(hdev);
+
+ hci_dev_lock(hdev);
+
+ switch (hdev->interleave_scan_state) {
+ case INTERLEAVE_SCAN_ALLOWLIST:
+ bt_dev_dbg(hdev, "next state: allowlist");
+ hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
+ break;
+ case INTERLEAVE_SCAN_NO_FILTER:
+ bt_dev_dbg(hdev, "next state: no filter");
+ hdev->interleave_scan_state = INTERLEAVE_SCAN_ALLOWLIST;
+ break;
+ case INTERLEAVE_SCAN_NONE:
+ bt_dev_err(hdev, "unexpected error");
+ }
+
+ hci_dev_unlock(hdev);
+
+ /* Don't continue interleaving if it was canceled */
+ if (is_interleave_scanning(hdev))
+ queue_delayed_work(hdev->req_workqueue,
+ &hdev->interleave_scan, timeout);
+}
+
void hci_cmd_sync_init(struct hci_dev *hdev)
{
INIT_WORK(&hdev->cmd_sync_work, hci_cmd_sync_work);
@@ -568,6 +630,7 @@ void hci_cmd_sync_init(struct hci_dev *hdev)
INIT_WORK(&hdev->reenable_adv_work, reenable_adv);
INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable);
INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
+ INIT_DELAYED_WORK(&hdev->interleave_scan, interleave_scan_work);
}
static void _hci_cmd_sync_cancel_entry(struct hci_dev *hdev,
@@ -1194,7 +1257,7 @@ int hci_setup_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance)
cp.own_addr_type = own_addr_type;
cp.channel_map = hdev->le_adv_channel_map;
- cp.handle = instance;
+ cp.handle = adv ? adv->handle : instance;
if (flags & MGMT_ADV_FLAG_SEC_2M) {
cp.primary_phy = HCI_ADV_PHY_1M;
@@ -2101,11 +2164,6 @@ static void hci_start_interleave_scan(struct hci_dev *hdev)
&hdev->interleave_scan, 0);
}
-static bool is_interleave_scanning(struct hci_dev *hdev)
-{
- return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE;
-}
-
static void cancel_interleave_scan(struct hci_dev *hdev)
{
bt_dev_dbg(hdev, "cancelling interleave scan");
@@ -5004,7 +5062,9 @@ int hci_dev_close_sync(struct hci_dev *hdev)
cancel_delayed_work(&hdev->ncmd_timer);
cancel_delayed_work(&hdev->le_scan_disable);
- hci_request_cancel_all(hdev);
+ hci_cmd_sync_cancel_sync(hdev, ENODEV);
+
+ cancel_interleave_scan(hdev);
if (hdev->adv_instance_timeout) {
cancel_delayed_work_sync(&hdev->adv_instance_expire);
@@ -5651,7 +5711,7 @@ int hci_update_connectable_sync(struct hci_dev *hdev)
return hci_update_passive_scan_sync(hdev);
}
-static int hci_inquiry_sync(struct hci_dev *hdev, u8 length)
+int hci_inquiry_sync(struct hci_dev *hdev, u8 length, u8 num_rsp)
{
const u8 giac[3] = { 0x33, 0x8b, 0x9e };
const u8 liac[3] = { 0x00, 0x8b, 0x9e };
@@ -5674,6 +5734,7 @@ static int hci_inquiry_sync(struct hci_dev *hdev, u8 length)
memcpy(&cp.lap, giac, sizeof(cp.lap));
cp.length = length;
+ cp.num_rsp = num_rsp;
return __hci_cmd_sync_status(hdev, HCI_OP_INQUIRY,
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
@@ -5760,7 +5821,7 @@ static int hci_start_interleaved_discovery_sync(struct hci_dev *hdev)
if (err)
return err;
- return hci_inquiry_sync(hdev, DISCOV_BREDR_INQUIRY_LEN);
+ return hci_inquiry_sync(hdev, DISCOV_BREDR_INQUIRY_LEN, 0);
}
int hci_start_discovery_sync(struct hci_dev *hdev)
@@ -5772,7 +5833,7 @@ int hci_start_discovery_sync(struct hci_dev *hdev)
switch (hdev->discovery.type) {
case DISCOV_TYPE_BREDR:
- return hci_inquiry_sync(hdev, DISCOV_BREDR_INQUIRY_LEN);
+ return hci_inquiry_sync(hdev, DISCOV_BREDR_INQUIRY_LEN, 0);
case DISCOV_TYPE_INTERLEAVED:
/* When running simultaneous discovery, the LE scanning time
* should occupy the whole discovery time sine BR/EDR inquiry
@@ -5842,7 +5903,6 @@ static int hci_pause_discovery_sync(struct hci_dev *hdev)
return err;
hdev->discovery_paused = true;
- hdev->discovery_old_state = old_state;
hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
return 0;
@@ -6711,3 +6771,21 @@ int hci_cancel_connect_sync(struct hci_dev *hdev, struct hci_conn *conn)
return -ENOENT;
}
+
+int hci_le_conn_update_sync(struct hci_dev *hdev, struct hci_conn *conn,
+ struct hci_conn_params *params)
+{
+ struct hci_cp_le_conn_update cp;
+
+ memset(&cp, 0, sizeof(cp));
+ cp.handle = cpu_to_le16(conn->handle);
+ cp.conn_interval_min = cpu_to_le16(params->conn_min_interval);
+ cp.conn_interval_max = cpu_to_le16(params->conn_max_interval);
+ cp.conn_latency = cpu_to_le16(params->conn_latency);
+ cp.supervision_timeout = cpu_to_le16(params->supervision_timeout);
+ cp.min_ce_len = cpu_to_le16(0x0000);
+ cp.max_ce_len = cpu_to_le16(0x0000);
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_LE_CONN_UPDATE,
+ sizeof(cp), &cp, HCI_CMD_TIMEOUT);
+}
diff --git a/net/bluetooth/iso.c b/net/bluetooth/iso.c
index cc055b952ce6..d5e00d0dd1a0 100644
--- a/net/bluetooth/iso.c
+++ b/net/bluetooth/iso.c
@@ -1356,8 +1356,7 @@ static int iso_sock_recvmsg(struct socket *sock, struct msghdr *msg,
lock_sock(sk);
switch (sk->sk_state) {
case BT_CONNECT2:
- if (pi->conn->hcon &&
- test_bit(HCI_CONN_PA_SYNC, &pi->conn->hcon->flags)) {
+ if (test_bit(BT_SK_PA_SYNC, &pi->flags)) {
iso_conn_big_sync(sk);
sk->sk_state = BT_LISTEN;
} else {
@@ -1721,11 +1720,6 @@ static void iso_sock_ready(struct sock *sk)
release_sock(sk);
}
-struct iso_list_data {
- struct hci_conn *hcon;
- int count;
-};
-
static bool iso_match_big(struct sock *sk, void *data)
{
struct hci_evt_le_big_sync_estabilished *ev = data;
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 5b509b767557..c3c26bbb5dda 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -4011,8 +4011,8 @@ static void l2cap_connect(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd,
status = L2CAP_CS_AUTHOR_PEND;
chan->ops->defer(chan);
} else {
- l2cap_state_change(chan, BT_CONNECT2);
- result = L2CAP_CR_PEND;
+ l2cap_state_change(chan, BT_CONFIG);
+ result = L2CAP_CR_SUCCESS;
status = L2CAP_CS_NO_INFO;
}
} else {
@@ -4647,13 +4647,7 @@ static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
memset(&rsp, 0, sizeof(rsp));
- if (max > hcon->le_conn_max_interval) {
- BT_DBG("requested connection interval exceeds current bounds.");
- err = -EINVAL;
- } else {
- err = hci_check_conn_params(min, max, latency, to_multiplier);
- }
-
+ err = hci_check_conn_params(min, max, latency, to_multiplier);
if (err)
rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
else
@@ -6767,6 +6761,8 @@ static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
BT_DBG("chan %p, len %d", chan, skb->len);
+ l2cap_chan_lock(chan);
+
if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
goto drop;
@@ -6783,6 +6779,7 @@ static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
}
drop:
+ l2cap_chan_unlock(chan);
l2cap_chan_put(chan);
free_skb:
kfree_skb(skb);
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index 6db60946c627..ba437c6f6ee5 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -1239,6 +1239,10 @@ static void l2cap_sock_kill(struct sock *sk)
BT_DBG("sk %p state %s", sk, state_to_string(sk->sk_state));
+ /* Sock is dead, so set chan data to NULL, avoid other task use invalid
+ * sock pointer.
+ */
+ l2cap_pi(sk)->chan->data = NULL;
/* Kill poor orphan */
l2cap_chan_put(l2cap_pi(sk)->chan);
@@ -1481,12 +1485,16 @@ static struct l2cap_chan *l2cap_sock_new_connection_cb(struct l2cap_chan *chan)
static int l2cap_sock_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
{
- struct sock *sk = chan->data;
- struct l2cap_pinfo *pi = l2cap_pi(sk);
+ struct sock *sk;
+ struct l2cap_pinfo *pi;
int err;
- lock_sock(sk);
+ sk = chan->data;
+ if (!sk)
+ return -ENXIO;
+ pi = l2cap_pi(sk);
+ lock_sock(sk);
if (chan->mode == L2CAP_MODE_ERTM && !list_empty(&pi->rx_busy)) {
err = -ENOMEM;
goto done;
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 80f220b7e19d..40d4887c7f79 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -33,7 +33,6 @@
#include <net/bluetooth/l2cap.h>
#include <net/bluetooth/mgmt.h>
-#include "hci_request.h"
#include "smp.h"
#include "mgmt_util.h"
#include "mgmt_config.h"
@@ -42,7 +41,7 @@
#include "aosp.h"
#define MGMT_VERSION 1
-#define MGMT_REVISION 22
+#define MGMT_REVISION 23
static const u16 mgmt_commands[] = {
MGMT_OP_READ_INDEX_LIST,
@@ -7813,6 +7812,18 @@ unlock:
return err;
}
+static int conn_update_sync(struct hci_dev *hdev, void *data)
+{
+ struct hci_conn_params *params = data;
+ struct hci_conn *conn;
+
+ conn = hci_conn_hash_lookup_le(hdev, &params->addr, params->addr_type);
+ if (!conn)
+ return -ECANCELED;
+
+ return hci_le_conn_update_sync(hdev, conn, params);
+}
+
static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
u16 len)
{
@@ -7846,12 +7857,14 @@ static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
hci_dev_lock(hdev);
- hci_conn_params_clear_disabled(hdev);
+ if (param_count > 1)
+ hci_conn_params_clear_disabled(hdev);
for (i = 0; i < param_count; i++) {
struct mgmt_conn_param *param = &cp->params[i];
struct hci_conn_params *hci_param;
u16 min, max, latency, timeout;
+ bool update = false;
u8 addr_type;
bt_dev_dbg(hdev, "Adding %pMR (type %u)", &param->addr.bdaddr,
@@ -7879,6 +7892,19 @@ static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
continue;
}
+ /* Detect when the loading is for an existing parameter then
+ * attempt to trigger the connection update procedure.
+ */
+ if (!i && param_count == 1) {
+ hci_param = hci_conn_params_lookup(hdev,
+ &param->addr.bdaddr,
+ addr_type);
+ if (hci_param)
+ update = true;
+ else
+ hci_conn_params_clear_disabled(hdev);
+ }
+
hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
addr_type);
if (!hci_param) {
@@ -7890,6 +7916,25 @@ static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
hci_param->conn_max_interval = max;
hci_param->conn_latency = latency;
hci_param->supervision_timeout = timeout;
+
+ /* Check if we need to trigger a connection update */
+ if (update) {
+ struct hci_conn *conn;
+
+ /* Lookup for existing connection as central and check
+ * if parameters match and if they don't then trigger
+ * a connection update.
+ */
+ conn = hci_conn_hash_lookup_le(hdev, &hci_param->addr,
+ addr_type);
+ if (conn && conn->role == HCI_ROLE_MASTER &&
+ (conn->le_conn_min_interval != min ||
+ conn->le_conn_max_interval != max ||
+ conn->le_conn_latency != latency ||
+ conn->le_supv_timeout != timeout))
+ hci_cmd_sync_queue(hdev, conn_update_sync,
+ hci_param, NULL);
+ }
}
hci_dev_unlock(hdev);
diff --git a/net/bluetooth/msft.c b/net/bluetooth/msft.c
index d039683d3bdd..5a8ccc491b14 100644
--- a/net/bluetooth/msft.c
+++ b/net/bluetooth/msft.c
@@ -7,7 +7,6 @@
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/mgmt.h>
-#include "hci_request.h"
#include "mgmt_util.h"
#include "msft.h"
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
index 69c75c041fe1..af80d599c337 100644
--- a/net/bluetooth/rfcomm/tty.c
+++ b/net/bluetooth/rfcomm/tty.c
@@ -504,7 +504,7 @@ static int rfcomm_get_dev_list(void __user *arg)
struct rfcomm_dev *dev;
struct rfcomm_dev_list_req *dl;
struct rfcomm_dev_info *di;
- int n = 0, size, err;
+ int n = 0, err;
u16 dev_num;
BT_DBG("");
@@ -515,12 +515,11 @@ static int rfcomm_get_dev_list(void __user *arg)
if (!dev_num || dev_num > (PAGE_SIZE * 4) / sizeof(*di))
return -EINVAL;
- size = sizeof(*dl) + dev_num * sizeof(*di);
-
- dl = kzalloc(size, GFP_KERNEL);
+ dl = kzalloc(struct_size(dl, dev_info, dev_num), GFP_KERNEL);
if (!dl)
return -ENOMEM;
+ dl->dev_num = dev_num;
di = dl->dev_info;
mutex_lock(&rfcomm_dev_lock);
@@ -528,12 +527,12 @@ static int rfcomm_get_dev_list(void __user *arg)
list_for_each_entry(dev, &rfcomm_dev_list, list) {
if (!tty_port_get(&dev->port))
continue;
- (di + n)->id = dev->id;
- (di + n)->flags = dev->flags;
- (di + n)->state = dev->dlc->state;
- (di + n)->channel = dev->channel;
- bacpy(&(di + n)->src, &dev->src);
- bacpy(&(di + n)->dst, &dev->dst);
+ di[n].id = dev->id;
+ di[n].flags = dev->flags;
+ di[n].state = dev->dlc->state;
+ di[n].channel = dev->channel;
+ bacpy(&di[n].src, &dev->src);
+ bacpy(&di[n].dst, &dev->dst);
tty_port_put(&dev->port);
if (++n >= dev_num)
break;
@@ -542,9 +541,7 @@ static int rfcomm_get_dev_list(void __user *arg)
mutex_unlock(&rfcomm_dev_lock);
dl->dev_num = n;
- size = sizeof(*dl) + n * sizeof(*di);
-
- err = copy_to_user(arg, dl, size);
+ err = copy_to_user(arg, dl, struct_size(dl, dev_info, n));
kfree(dl);
return err ? -EFAULT : 0;
diff --git a/net/bpf/bpf_dummy_struct_ops.c b/net/bpf/bpf_dummy_struct_ops.c
index 891cdf61c65a..3ea52b05adfb 100644
--- a/net/bpf/bpf_dummy_struct_ops.c
+++ b/net/bpf/bpf_dummy_struct_ops.c
@@ -272,12 +272,12 @@ static int bpf_dummy_init_member(const struct btf_type *t,
return -EOPNOTSUPP;
}
-static int bpf_dummy_reg(void *kdata)
+static int bpf_dummy_reg(void *kdata, struct bpf_link *link)
{
return -EOPNOTSUPP;
}
-static void bpf_dummy_unreg(void *kdata)
+static void bpf_dummy_unreg(void *kdata, struct bpf_link *link)
{
}
diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
index f6aad4ed2ab2..6d7a442ceb89 100644
--- a/net/bpf/test_run.c
+++ b/net/bpf/test_run.c
@@ -127,9 +127,10 @@ struct xdp_test_data {
#define TEST_XDP_FRAME_SIZE (PAGE_SIZE - sizeof(struct xdp_page_head))
#define TEST_XDP_MAX_BATCH 256
-static void xdp_test_run_init_page(struct page *page, void *arg)
+static void xdp_test_run_init_page(netmem_ref netmem, void *arg)
{
- struct xdp_page_head *head = phys_to_virt(page_to_phys(page));
+ struct xdp_page_head *head =
+ phys_to_virt(page_to_phys(netmem_to_page(netmem)));
struct xdp_buff *new_ctx, *orig_ctx;
u32 headroom = XDP_PACKET_HEADROOM;
struct xdp_test_data *xdp = arg;
@@ -283,9 +284,10 @@ static int xdp_recv_frames(struct xdp_frame **frames, int nframes,
static int xdp_test_run_batch(struct xdp_test_data *xdp, struct bpf_prog *prog,
u32 repeat)
{
- struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
+ struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
int err = 0, act, ret, i, nframes = 0, batch_sz;
struct xdp_frame **frames = xdp->frames;
+ struct bpf_redirect_info *ri;
struct xdp_page_head *head;
struct xdp_frame *frm;
bool redirect = false;
@@ -295,6 +297,8 @@ static int xdp_test_run_batch(struct xdp_test_data *xdp, struct bpf_prog *prog,
batch_sz = min_t(u32, repeat, xdp->batch_size);
local_bh_disable();
+ bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
+ ri = bpf_net_ctx_get_ri();
xdp_set_return_frame_no_direct();
for (i = 0; i < batch_sz; i++) {
@@ -359,6 +363,7 @@ out:
}
xdp_clear_return_frame_no_direct();
+ bpf_net_ctx_clear(bpf_net_ctx);
local_bh_enable();
return err;
}
@@ -394,6 +399,7 @@ static int bpf_test_run_xdp_live(struct bpf_prog *prog, struct xdp_buff *ctx,
static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
u32 *retval, u32 *time, bool xdp)
{
+ struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
struct bpf_prog_array_item item = {.prog = prog};
struct bpf_run_ctx *old_ctx;
struct bpf_cg_run_ctx run_ctx;
@@ -419,10 +425,14 @@ static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
do {
run_ctx.prog_item = &item;
local_bh_disable();
+ bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
+
if (xdp)
*retval = bpf_prog_run_xdp(prog, ctx);
else
*retval = bpf_prog_run(prog, ctx);
+
+ bpf_net_ctx_clear(bpf_net_ctx);
local_bh_enable();
} while (bpf_test_timer_continue(&t, 1, repeat, &ret, time));
bpf_reset_run_ctx(old_ctx);
@@ -727,10 +737,16 @@ static void
__bpf_prog_test_run_raw_tp(void *data)
{
struct bpf_raw_tp_test_run_info *info = data;
+ struct bpf_trace_run_ctx run_ctx = {};
+ struct bpf_run_ctx *old_run_ctx;
+
+ old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
rcu_read_lock();
info->retval = bpf_prog_run(info->prog, info->ctx);
rcu_read_unlock();
+
+ bpf_reset_run_ctx(old_run_ctx);
}
int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,
@@ -977,7 +993,8 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
void *data;
int ret;
- if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size)
+ if ((kattr->test.flags & ~BPF_F_TEST_SKB_CHECKSUM_COMPLETE) ||
+ kattr->test.cpu || kattr->test.batch_size)
return -EINVAL;
data = bpf_test_init(kattr, kattr->test.data_size_in,
@@ -1025,6 +1042,7 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
__skb_put(skb, size);
+
if (ctx && ctx->ifindex > 1) {
dev = dev_get_by_index(net, ctx->ifindex);
if (!dev) {
@@ -1060,9 +1078,19 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
__skb_push(skb, hh_len);
if (is_direct_pkt_access)
bpf_compute_data_pointers(skb);
+
ret = convert___skb_to_skb(skb, ctx);
if (ret)
goto out;
+
+ if (kattr->test.flags & BPF_F_TEST_SKB_CHECKSUM_COMPLETE) {
+ const int off = skb_network_offset(skb);
+ int len = skb->len - off;
+
+ skb->csum = skb_checksum(skb, off, len, 0);
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ }
+
ret = bpf_test_run(prog, skb, repeat, &retval, &duration, false);
if (ret)
goto out;
@@ -1077,6 +1105,20 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
}
memset(__skb_push(skb, hh_len), 0, hh_len);
}
+
+ if (kattr->test.flags & BPF_F_TEST_SKB_CHECKSUM_COMPLETE) {
+ const int off = skb_network_offset(skb);
+ int len = skb->len - off;
+ __wsum csum;
+
+ csum = skb_checksum(skb, off, len, 0);
+
+ if (csum_fold(skb->csum) != csum_fold(csum)) {
+ ret = -EBADMSG;
+ goto out;
+ }
+ }
+
convert_skb_to___skb(skb, ctx);
size = skb->len;
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index d97064d460dc..e19b583ff2c6 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -25,8 +25,8 @@ static inline int should_deliver(const struct net_bridge_port *p,
vg = nbp_vlan_group_rcu(p);
return ((p->flags & BR_HAIRPIN_MODE) || skb->dev != p->dev) &&
- p->state == BR_STATE_FORWARDING && br_allowed_egress(vg, skb) &&
- nbp_switchdev_allowed_egress(p, skb) &&
+ (br_mst_is_enabled(p->br) || p->state == BR_STATE_FORWARDING) &&
+ br_allowed_egress(vg, skb) && nbp_switchdev_allowed_egress(p, skb) &&
!br_skb_isolated(p, skb);
}
diff --git a/net/bridge/br_mst.c b/net/bridge/br_mst.c
index 3c66141d34d6..1820f09ff59c 100644
--- a/net/bridge/br_mst.c
+++ b/net/bridge/br_mst.c
@@ -73,11 +73,10 @@ int br_mst_get_state(const struct net_device *dev, u16 msti, u8 *state)
}
EXPORT_SYMBOL_GPL(br_mst_get_state);
-static void br_mst_vlan_set_state(struct net_bridge_port *p, struct net_bridge_vlan *v,
+static void br_mst_vlan_set_state(struct net_bridge_vlan_group *vg,
+ struct net_bridge_vlan *v,
u8 state)
{
- struct net_bridge_vlan_group *vg = nbp_vlan_group(p);
-
if (br_vlan_get_state(v) == state)
return;
@@ -103,7 +102,7 @@ int br_mst_set_state(struct net_bridge_port *p, u16 msti, u8 state,
int err = 0;
rcu_read_lock();
- vg = nbp_vlan_group(p);
+ vg = nbp_vlan_group_rcu(p);
if (!vg)
goto out;
@@ -121,7 +120,7 @@ int br_mst_set_state(struct net_bridge_port *p, u16 msti, u8 state,
if (v->brvlan->msti != msti)
continue;
- br_mst_vlan_set_state(p, v, state);
+ br_mst_vlan_set_state(vg, v, state);
}
out:
@@ -140,13 +139,13 @@ static void br_mst_vlan_sync_state(struct net_bridge_vlan *pv, u16 msti)
* it.
*/
if (v != pv && v->brvlan->msti == msti) {
- br_mst_vlan_set_state(pv->port, pv, v->state);
+ br_mst_vlan_set_state(vg, pv, v->state);
return;
}
}
/* Otherwise, start out in a new MSTI with all ports disabled. */
- return br_mst_vlan_set_state(pv->port, pv, BR_STATE_DISABLED);
+ return br_mst_vlan_set_state(vg, pv, BR_STATE_DISABLED);
}
int br_mst_vlan_set_msti(struct net_bridge_vlan *mv, u16 msti)
diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
index bf30c50b5689..3c9f6538990e 100644
--- a/net/bridge/br_netfilter_hooks.c
+++ b/net/bridge/br_netfilter_hooks.c
@@ -137,6 +137,7 @@ static inline bool is_pppoe_ipv6(const struct sk_buff *skb,
#define NF_BRIDGE_MAX_MAC_HEADER_LENGTH (PPPOE_SES_HLEN + ETH_HLEN)
struct brnf_frag_data {
+ local_lock_t bh_lock;
char mac[NF_BRIDGE_MAX_MAC_HEADER_LENGTH];
u8 encap_size;
u8 size;
@@ -144,7 +145,9 @@ struct brnf_frag_data {
__be16 vlan_proto;
};
-static DEFINE_PER_CPU(struct brnf_frag_data, brnf_frag_data_storage);
+static DEFINE_PER_CPU(struct brnf_frag_data, brnf_frag_data_storage) = {
+ .bh_lock = INIT_LOCAL_LOCK(bh_lock),
+};
static void nf_bridge_info_free(struct sk_buff *skb)
{
@@ -850,6 +853,7 @@ static int br_nf_dev_queue_xmit(struct net *net, struct sock *sk, struct sk_buff
{
struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
unsigned int mtu, mtu_reserved;
+ int ret;
mtu_reserved = nf_bridge_mtu_reduction(skb);
mtu = skb->dev->mtu;
@@ -882,6 +886,7 @@ static int br_nf_dev_queue_xmit(struct net *net, struct sock *sk, struct sk_buff
IPCB(skb)->frag_max_size = nf_bridge->frag_max_size;
+ local_lock_nested_bh(&brnf_frag_data_storage.bh_lock);
data = this_cpu_ptr(&brnf_frag_data_storage);
if (skb_vlan_tag_present(skb)) {
@@ -897,7 +902,9 @@ static int br_nf_dev_queue_xmit(struct net *net, struct sock *sk, struct sk_buff
skb_copy_from_linear_data_offset(skb, -data->size, data->mac,
data->size);
- return br_nf_ip_fragment(net, sk, skb, br_nf_push_frag_xmit);
+ ret = br_nf_ip_fragment(net, sk, skb, br_nf_push_frag_xmit);
+ local_unlock_nested_bh(&brnf_frag_data_storage.bh_lock);
+ return ret;
}
if (IS_ENABLED(CONFIG_NF_DEFRAG_IPV6) &&
skb->protocol == htons(ETH_P_IPV6)) {
@@ -909,6 +916,7 @@ static int br_nf_dev_queue_xmit(struct net *net, struct sock *sk, struct sk_buff
IP6CB(skb)->frag_max_size = nf_bridge->frag_max_size;
+ local_lock_nested_bh(&brnf_frag_data_storage.bh_lock);
data = this_cpu_ptr(&brnf_frag_data_storage);
data->encap_size = nf_bridge_encap_header_len(skb);
data->size = ETH_HLEN + data->encap_size;
@@ -916,8 +924,12 @@ static int br_nf_dev_queue_xmit(struct net *net, struct sock *sk, struct sk_buff
skb_copy_from_linear_data_offset(skb, -data->size, data->mac,
data->size);
- if (v6ops)
- return v6ops->fragment(net, sk, skb, br_nf_push_frag_xmit);
+ if (v6ops) {
+ ret = v6ops->fragment(net, sk, skb, br_nf_push_frag_xmit);
+ local_unlock_nested_bh(&brnf_frag_data_storage.bh_lock);
+ return ret;
+ }
+ local_unlock_nested_bh(&brnf_frag_data_storage.bh_lock);
kfree_skb(skb);
return -EMSGSIZE;
diff --git a/net/bridge/br_netlink_tunnel.c b/net/bridge/br_netlink_tunnel.c
index 17abf092f7ca..71a12da30004 100644
--- a/net/bridge/br_netlink_tunnel.c
+++ b/net/bridge/br_netlink_tunnel.c
@@ -315,8 +315,8 @@ int br_process_vlan_tunnel_info(const struct net_bridge *br,
if (curr_change)
*changed = curr_change;
- __vlan_tunnel_handle_range(p, &v_start, &v_end, v,
- curr_change);
+ __vlan_tunnel_handle_range(p, &v_start, &v_end, v,
+ curr_change);
}
if (v_start && v_end)
br_vlan_notify(br, p, v_start->vid, v_end->vid,
diff --git a/net/bridge/netfilter/nf_conntrack_bridge.c b/net/bridge/netfilter/nf_conntrack_bridge.c
index c3c51b9a6826..816bb0fde718 100644
--- a/net/bridge/netfilter/nf_conntrack_bridge.c
+++ b/net/bridge/netfilter/nf_conntrack_bridge.c
@@ -32,7 +32,7 @@ static int nf_br_ip_fragment(struct net *net, struct sock *sk,
struct sk_buff *))
{
int frag_max_size = BR_INPUT_SKB_CB(skb)->frag_max_size;
- bool mono_delivery_time = skb->mono_delivery_time;
+ u8 tstamp_type = skb->tstamp_type;
unsigned int hlen, ll_rs, mtu;
ktime_t tstamp = skb->tstamp;
struct ip_frag_state state;
@@ -82,7 +82,7 @@ static int nf_br_ip_fragment(struct net *net, struct sock *sk,
if (iter.frag)
ip_fraglist_prepare(skb, &iter);
- skb_set_delivery_time(skb, tstamp, mono_delivery_time);
+ skb_set_delivery_time(skb, tstamp, tstamp_type);
err = output(net, sk, data, skb);
if (err || !iter.frag)
break;
@@ -113,7 +113,7 @@ slow_path:
goto blackhole;
}
- skb_set_delivery_time(skb2, tstamp, mono_delivery_time);
+ skb_set_delivery_time(skb2, tstamp, tstamp_type);
err = output(net, sk, data, skb2);
if (err)
goto blackhole;
diff --git a/net/caif/cfpkt_skbuff.c b/net/caif/cfpkt_skbuff.c
index 7796414d47e5..2ae8cfa3df88 100644
--- a/net/caif/cfpkt_skbuff.c
+++ b/net/caif/cfpkt_skbuff.c
@@ -21,13 +21,6 @@ do { \
pr_warn(errmsg); \
} while (0)
-struct cfpktq {
- struct sk_buff_head head;
- atomic_t count;
- /* Lock protects count updates */
- spinlock_t lock;
-};
-
/*
* net/caif/ is generic and does not
* understand SKB, so we do this typecast
diff --git a/net/can/Kconfig b/net/can/Kconfig
index cb56be8e3862..af64a6f76458 100644
--- a/net/can/Kconfig
+++ b/net/can/Kconfig
@@ -56,18 +56,17 @@ config CAN_GW
source "net/can/j1939/Kconfig"
config CAN_ISOTP
- tristate "ISO 15765-2:2016 CAN transport protocol"
+ tristate "ISO 15765-2 CAN transport protocol"
help
CAN Transport Protocols offer support for segmented Point-to-Point
communication between CAN nodes via two defined CAN Identifiers.
+ This protocol driver implements segmented data transfers for CAN CC
+ (aka Classical CAN, CAN 2.0B) and CAN FD frame types which were
+ introduced with ISO 15765-2:2016.
As CAN frames can only transport a small amount of data bytes
- (max. 8 bytes for 'classic' CAN and max. 64 bytes for CAN FD) this
+ (max. 8 bytes for CAN CC and max. 64 bytes for CAN FD) this
segmentation is needed to transport longer Protocol Data Units (PDU)
as needed e.g. for vehicle diagnosis (UDS, ISO 14229) or IP-over-CAN
traffic.
- This protocol driver implements data transfers according to
- ISO 15765-2:2016 for 'classic' CAN and CAN FD frame types.
- If you want to perform automotive vehicle diagnostic services (UDS),
- say 'y'.
endif
diff --git a/net/can/isotp.c b/net/can/isotp.c
index 25bac0fafc83..16046931542a 100644
--- a/net/can/isotp.c
+++ b/net/can/isotp.c
@@ -72,7 +72,7 @@
#include <net/sock.h>
#include <net/net_namespace.h>
-MODULE_DESCRIPTION("PF_CAN isotp 15765-2:2016 protocol");
+MODULE_DESCRIPTION("PF_CAN ISO 15765-2 transport protocol");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Oliver Hartkopp <socketcan@hartkopp.net>");
MODULE_ALIAS("can-proto-6");
@@ -83,10 +83,11 @@ MODULE_ALIAS("can-proto-6");
(CAN_EFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG) : \
(CAN_SFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG))
-/* ISO 15765-2:2016 supports more than 4095 byte per ISO PDU as the FF_DL can
- * take full 32 bit values (4 Gbyte). We would need some good concept to handle
- * this between user space and kernel space. For now set the static buffer to
- * something about 8 kbyte to be able to test this new functionality.
+/* Since ISO 15765-2:2016 the CAN isotp protocol supports more than 4095
+ * byte per ISO PDU as the FF_DL can take full 32 bit values (4 Gbyte).
+ * We would need some good concept to handle this between user space and
+ * kernel space. For now set the static buffer to something about 8 kbyte
+ * to be able to test this new functionality.
*/
#define DEFAULT_MAX_PDU_SIZE 8300
diff --git a/net/can/j1939/main.c b/net/can/j1939/main.c
index a6fb89fa6278..7e8a20f2fc42 100644
--- a/net/can/j1939/main.c
+++ b/net/can/j1939/main.c
@@ -30,10 +30,6 @@ MODULE_ALIAS("can-proto-" __stringify(CAN_J1939));
/* CAN_HDR: #bytes before can_frame data part */
#define J1939_CAN_HDR (offsetof(struct can_frame, data))
-/* CAN_FTR: #bytes beyond data part */
-#define J1939_CAN_FTR (sizeof(struct can_frame) - J1939_CAN_HDR - \
- sizeof(((struct can_frame *)0)->data))
-
/* lowest layer */
static void j1939_can_recv(struct sk_buff *iskb, void *data)
{
@@ -342,7 +338,7 @@ int j1939_send_one(struct j1939_priv *priv, struct sk_buff *skb)
memset(cf, 0, J1939_CAN_HDR);
/* make it a full can frame again */
- skb_put(skb, J1939_CAN_FTR + (8 - dlc));
+ skb_put_zero(skb, 8 - dlc);
canid = CAN_EFF_FLAG |
(skcb->priority << 26) |
diff --git a/net/can/j1939/transport.c b/net/can/j1939/transport.c
index fe3df23a2595..4be73de5033c 100644
--- a/net/can/j1939/transport.c
+++ b/net/can/j1939/transport.c
@@ -1593,8 +1593,8 @@ j1939_session *j1939_xtp_rx_rts_session_new(struct j1939_priv *priv,
struct j1939_sk_buff_cb skcb = *j1939_skb_to_cb(skb);
struct j1939_session *session;
const u8 *dat;
+ int len, ret;
pgn_t pgn;
- int len;
netdev_dbg(priv->ndev, "%s\n", __func__);
@@ -1653,7 +1653,22 @@ j1939_session *j1939_xtp_rx_rts_session_new(struct j1939_priv *priv,
session->tskey = priv->rx_tskey++;
j1939_sk_errqueue(session, J1939_ERRQUEUE_RX_RTS);
- WARN_ON_ONCE(j1939_session_activate(session));
+ ret = j1939_session_activate(session);
+ if (ret) {
+ /* Entering this scope indicates an issue with the J1939 bus.
+ * Possible scenarios include:
+ * - A time lapse occurred, and a new session was initiated
+ * due to another packet being sent correctly. This could
+ * have been caused by too long interrupt, debugger, or being
+ * out-scheduled by another task.
+ * - The bus is receiving numerous erroneous packets, either
+ * from a malfunctioning device or during a test scenario.
+ */
+ netdev_alert(priv->ndev, "%s: 0x%p: concurrent session with same addr (%02x %02x) is already active.\n",
+ __func__, session, skcb.addr.sa, skcb.addr.da);
+ j1939_session_put(session);
+ return NULL;
+ }
return session;
}
@@ -1681,6 +1696,8 @@ static int j1939_xtp_rx_rts_session_active(struct j1939_session *session,
j1939_session_timers_cancel(session);
j1939_session_cancel(session, J1939_XTP_ABORT_BUSY);
+ if (session->transmission)
+ j1939_session_deactivate_activate_next(session);
return -EBUSY;
}
diff --git a/net/ceph/crush/mapper.c b/net/ceph/crush/mapper.c
index 1daf95e17d67..3a5bd1cd1e99 100644
--- a/net/ceph/crush/mapper.c
+++ b/net/ceph/crush/mapper.c
@@ -429,7 +429,10 @@ static int is_out(const struct crush_map *map,
/**
* crush_choose_firstn - choose numrep distinct items of given type
* @map: the crush_map
+ * @work: working space initialized by crush_init_workspace()
* @bucket: the bucket we are choose an item from
+ * @weight: weight vector (for map leaves)
+ * @weight_max: size of weight vector
* @x: crush input value
* @numrep: the number of items to choose
* @type: the type of item to choose
@@ -445,6 +448,7 @@ static int is_out(const struct crush_map *map,
* @vary_r: pass r to recursive calls
* @out2: second output vector for leaf items (if @recurse_to_leaf)
* @parent_r: r value passed from the parent
+ * @choose_args: weights and ids for each known bucket
*/
static int crush_choose_firstn(const struct crush_map *map,
struct crush_work *work,
@@ -636,9 +640,8 @@ reject:
}
-/**
+/*
* crush_choose_indep: alternative breadth-first positionally stable mapping
- *
*/
static void crush_choose_indep(const struct crush_map *map,
struct crush_work *work,
diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c
index f263f7e91a21..ab66b599ac47 100644
--- a/net/ceph/mon_client.c
+++ b/net/ceph/mon_client.c
@@ -1085,13 +1085,19 @@ static void delayed_work(struct work_struct *work)
struct ceph_mon_client *monc =
container_of(work, struct ceph_mon_client, delayed_work.work);
- dout("monc delayed_work\n");
mutex_lock(&monc->mutex);
+ dout("%s mon%d\n", __func__, monc->cur_mon);
+ if (monc->cur_mon < 0) {
+ goto out;
+ }
+
if (monc->hunting) {
dout("%s continuing hunt\n", __func__);
reopen_session(monc);
} else {
int is_auth = ceph_auth_is_authenticated(monc->auth);
+
+ dout("%s is_authed %d\n", __func__, is_auth);
if (ceph_con_keepalive_expired(&monc->con,
CEPH_MONC_PING_TIMEOUT)) {
dout("monc keepalive timeout\n");
@@ -1116,6 +1122,8 @@ static void delayed_work(struct work_struct *work)
}
}
__schedule_delayed(monc);
+
+out:
mutex_unlock(&monc->mutex);
}
@@ -1232,13 +1240,15 @@ EXPORT_SYMBOL(ceph_monc_init);
void ceph_monc_stop(struct ceph_mon_client *monc)
{
dout("stop\n");
- cancel_delayed_work_sync(&monc->delayed_work);
mutex_lock(&monc->mutex);
__close_session(monc);
+ monc->hunting = false;
monc->cur_mon = -1;
mutex_unlock(&monc->mutex);
+ cancel_delayed_work_sync(&monc->delayed_work);
+
/*
* flush msgr queue before we destroy ourselves to ensure that:
* - any work that references our embedded con is finished.
diff --git a/net/core/datagram.c b/net/core/datagram.c
index e614cfd8e14a..a40f733b37d7 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -416,15 +416,23 @@ static int __skb_datagram_iter(const struct sk_buff *skb, int offset,
end = start + skb_frag_size(frag);
if ((copy = end - offset) > 0) {
- struct page *page = skb_frag_page(frag);
- u8 *vaddr = kmap(page);
+ u32 p_off, p_len, copied;
+ struct page *p;
+ u8 *vaddr;
if (copy > len)
copy = len;
- n = INDIRECT_CALL_1(cb, simple_copy_to_iter,
- vaddr + skb_frag_off(frag) + offset - start,
- copy, data, to);
- kunmap(page);
+
+ n = 0;
+ skb_frag_foreach_page(frag,
+ skb_frag_off(frag) + offset - start,
+ copy, p, p_off, p_len, copied) {
+ vaddr = kmap_local_page(p);
+ n += INDIRECT_CALL_1(cb, simple_copy_to_iter,
+ vaddr + p_off, p_len, data, to);
+ kunmap_local(vaddr);
+ }
+
offset += n;
if (n != copy)
goto short_copy;
@@ -610,16 +618,10 @@ fault:
}
EXPORT_SYMBOL(skb_copy_datagram_from_iter);
-int __zerocopy_sg_from_iter(struct msghdr *msg, struct sock *sk,
- struct sk_buff *skb, struct iov_iter *from,
- size_t length)
+int zerocopy_fill_skb_from_iter(struct sk_buff *skb,
+ struct iov_iter *from, size_t length)
{
- int frag;
-
- if (msg && msg->msg_ubuf && msg->sg_from_iter)
- return msg->sg_from_iter(sk, skb, from, length);
-
- frag = skb_shinfo(skb)->nr_frags;
+ int frag = skb_shinfo(skb)->nr_frags;
while (length && iov_iter_count(from)) {
struct page *head, *last_head = NULL;
@@ -627,7 +629,6 @@ int __zerocopy_sg_from_iter(struct msghdr *msg, struct sock *sk,
int refs, order, n = 0;
size_t start;
ssize_t copied;
- unsigned long truesize;
if (frag == MAX_SKB_FRAGS)
return -EMSGSIZE;
@@ -639,17 +640,9 @@ int __zerocopy_sg_from_iter(struct msghdr *msg, struct sock *sk,
length -= copied;
- truesize = PAGE_ALIGN(copied + start);
skb->data_len += copied;
skb->len += copied;
- skb->truesize += truesize;
- if (sk && sk->sk_type == SOCK_STREAM) {
- sk_wmem_queued_add(sk, truesize);
- if (!skb_zcopy_pure(skb))
- sk_mem_charge(sk, truesize);
- } else {
- refcount_add(truesize, &skb->sk->sk_wmem_alloc);
- }
+ skb->truesize += PAGE_ALIGN(copied + start);
head = compound_head(pages[n]);
order = compound_order(head);
@@ -692,6 +685,30 @@ int __zerocopy_sg_from_iter(struct msghdr *msg, struct sock *sk,
}
return 0;
}
+
+int __zerocopy_sg_from_iter(struct msghdr *msg, struct sock *sk,
+ struct sk_buff *skb, struct iov_iter *from,
+ size_t length)
+{
+ unsigned long orig_size = skb->truesize;
+ unsigned long truesize;
+ int ret;
+
+ if (msg && msg->msg_ubuf && msg->sg_from_iter)
+ ret = msg->sg_from_iter(skb, from, length);
+ else
+ ret = zerocopy_fill_skb_from_iter(skb, from, length);
+
+ truesize = skb->truesize - orig_size;
+ if (sk && sk->sk_type == SOCK_STREAM) {
+ sk_wmem_queued_add(sk, truesize);
+ if (!skb_zcopy_pure(skb))
+ sk_mem_charge(sk, truesize);
+ } else {
+ refcount_add(truesize, &skb->sk->sk_wmem_alloc);
+ }
+ return ret;
+}
EXPORT_SYMBOL(__zerocopy_sg_from_iter);
/**
diff --git a/net/core/dev.c b/net/core/dev.c
index e1bb6d7856d9..6ea1d20676fb 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -229,7 +229,7 @@ static inline void backlog_lock_irq_save(struct softnet_data *sd,
{
if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
spin_lock_irqsave(&sd->input_pkt_queue.lock, *flags);
- else if (!IS_ENABLED(CONFIG_PREEMPT_RT))
+ else
local_irq_save(*flags);
}
@@ -237,7 +237,7 @@ static inline void backlog_lock_irq_disable(struct softnet_data *sd)
{
if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
spin_lock_irq(&sd->input_pkt_queue.lock);
- else if (!IS_ENABLED(CONFIG_PREEMPT_RT))
+ else
local_irq_disable();
}
@@ -246,7 +246,7 @@ static inline void backlog_unlock_irq_restore(struct softnet_data *sd,
{
if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
spin_unlock_irqrestore(&sd->input_pkt_queue.lock, *flags);
- else if (!IS_ENABLED(CONFIG_PREEMPT_RT))
+ else
local_irq_restore(*flags);
}
@@ -254,7 +254,7 @@ static inline void backlog_unlock_irq_enable(struct softnet_data *sd)
{
if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
spin_unlock_irq(&sd->input_pkt_queue.lock);
- else if (!IS_ENABLED(CONFIG_PREEMPT_RT))
+ else
local_irq_enable();
}
@@ -449,7 +449,9 @@ static RAW_NOTIFIER_HEAD(netdev_chain);
* queue in the local softnet handler.
*/
-DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
+DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data) = {
+ .process_queue_bh_lock = INIT_LOCAL_LOCK(process_queue_bh_lock),
+};
EXPORT_PER_CPU_SYMBOL(softnet_data);
/* Page_pool has a lockless array/stack to alloc/recycle pages.
@@ -1226,9 +1228,9 @@ int dev_change_name(struct net_device *dev, const char *newname)
memcpy(oldname, dev->name, IFNAMSIZ);
- write_seqlock(&netdev_rename_lock);
+ write_seqlock_bh(&netdev_rename_lock);
err = dev_get_valid_name(net, dev, newname);
- write_sequnlock(&netdev_rename_lock);
+ write_sequnlock_bh(&netdev_rename_lock);
if (err < 0) {
up_write(&devnet_rename_sem);
@@ -1269,9 +1271,9 @@ rollback:
if (err >= 0) {
err = ret;
down_write(&devnet_rename_sem);
- write_seqlock(&netdev_rename_lock);
+ write_seqlock_bh(&netdev_rename_lock);
memcpy(dev->name, oldname, IFNAMSIZ);
- write_sequnlock(&netdev_rename_lock);
+ write_sequnlock_bh(&netdev_rename_lock);
memcpy(oldname, newname, IFNAMSIZ);
WRITE_ONCE(dev->name_assign_type, old_assign_type);
old_assign_type = NET_NAME_RENAMED;
@@ -2160,7 +2162,7 @@ EXPORT_SYMBOL(net_disable_timestamp);
static inline void net_timestamp_set(struct sk_buff *skb)
{
skb->tstamp = 0;
- skb->mono_delivery_time = 0;
+ skb->tstamp_type = SKB_CLOCK_REALTIME;
if (static_branch_unlikely(&netstamp_needed_key))
skb->tstamp = ktime_get_real();
}
@@ -3940,6 +3942,7 @@ netdev_tx_queue_mapping(struct net_device *dev, struct sk_buff *skb)
return netdev_get_tx_queue(dev, netdev_cap_txqueue(dev, qm));
}
+#ifndef CONFIG_PREEMPT_RT
static bool netdev_xmit_txqueue_skipped(void)
{
return __this_cpu_read(softnet_data.xmit.skip_txqueue);
@@ -3950,6 +3953,19 @@ void netdev_xmit_skip_txqueue(bool skip)
__this_cpu_write(softnet_data.xmit.skip_txqueue, skip);
}
EXPORT_SYMBOL_GPL(netdev_xmit_skip_txqueue);
+
+#else
+static bool netdev_xmit_txqueue_skipped(void)
+{
+ return current->net_xmit.skip_txqueue;
+}
+
+void netdev_xmit_skip_txqueue(bool skip)
+{
+ current->net_xmit.skip_txqueue = skip;
+}
+EXPORT_SYMBOL_GPL(netdev_xmit_skip_txqueue);
+#endif
#endif /* CONFIG_NET_EGRESS */
#ifdef CONFIG_NET_XGRESS
@@ -4029,10 +4045,13 @@ sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
{
struct bpf_mprog_entry *entry = rcu_dereference_bh(skb->dev->tcx_ingress);
enum skb_drop_reason drop_reason = SKB_DROP_REASON_TC_INGRESS;
+ struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
int sch_ret;
if (!entry)
return skb;
+
+ bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
if (*pt_prev) {
*ret = deliver_skb(skb, *pt_prev, orig_dev);
*pt_prev = NULL;
@@ -4061,10 +4080,12 @@ ingress_verdict:
break;
}
*ret = NET_RX_SUCCESS;
+ bpf_net_ctx_clear(bpf_net_ctx);
return NULL;
case TC_ACT_SHOT:
kfree_skb_reason(skb, drop_reason);
*ret = NET_RX_DROP;
+ bpf_net_ctx_clear(bpf_net_ctx);
return NULL;
/* used by tc_run */
case TC_ACT_STOLEN:
@@ -4074,8 +4095,10 @@ ingress_verdict:
fallthrough;
case TC_ACT_CONSUMED:
*ret = NET_RX_SUCCESS;
+ bpf_net_ctx_clear(bpf_net_ctx);
return NULL;
}
+ bpf_net_ctx_clear(bpf_net_ctx);
return skb;
}
@@ -4085,11 +4108,14 @@ sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev)
{
struct bpf_mprog_entry *entry = rcu_dereference_bh(dev->tcx_egress);
enum skb_drop_reason drop_reason = SKB_DROP_REASON_TC_EGRESS;
+ struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
int sch_ret;
if (!entry)
return skb;
+ bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
+
/* qdisc_skb_cb(skb)->pkt_len & tcx_set_ingress() was
* already set by the caller.
*/
@@ -4105,10 +4131,12 @@ egress_verdict:
/* No need to push/pop skb's mac_header here on egress! */
skb_do_redirect(skb);
*ret = NET_XMIT_SUCCESS;
+ bpf_net_ctx_clear(bpf_net_ctx);
return NULL;
case TC_ACT_SHOT:
kfree_skb_reason(skb, drop_reason);
*ret = NET_XMIT_DROP;
+ bpf_net_ctx_clear(bpf_net_ctx);
return NULL;
/* used by tc_run */
case TC_ACT_STOLEN:
@@ -4118,8 +4146,10 @@ egress_verdict:
fallthrough;
case TC_ACT_CONSUMED:
*ret = NET_XMIT_SUCCESS;
+ bpf_net_ctx_clear(bpf_net_ctx);
return NULL;
}
+ bpf_net_ctx_clear(bpf_net_ctx);
return skb;
}
@@ -4516,12 +4546,13 @@ set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
struct rps_dev_flow *rflow, u16 next_cpu)
{
if (next_cpu < nr_cpu_ids) {
+ u32 head;
#ifdef CONFIG_RFS_ACCEL
struct netdev_rx_queue *rxqueue;
struct rps_dev_flow_table *flow_table;
struct rps_dev_flow *old_rflow;
- u32 flow_id, head;
u16 rxq_index;
+ u32 flow_id;
int rc;
/* Should we steer this flow to a different hardware queue? */
@@ -5095,11 +5126,14 @@ static DEFINE_STATIC_KEY_FALSE(generic_xdp_needed_key);
int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff **pskb)
{
+ struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
+
if (xdp_prog) {
struct xdp_buff xdp;
u32 act;
int err;
+ bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
act = netif_receive_generic_xdp(pskb, &xdp, xdp_prog);
if (act != XDP_PASS) {
switch (act) {
@@ -5113,11 +5147,13 @@ int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff **pskb)
generic_xdp_tx(*pskb, xdp_prog);
break;
}
+ bpf_net_ctx_clear(bpf_net_ctx);
return XDP_DROP;
}
}
return XDP_PASS;
out_redir:
+ bpf_net_ctx_clear(bpf_net_ctx);
kfree_skb_reason(*pskb, SKB_DROP_REASON_XDP);
return XDP_DROP;
}
@@ -5233,7 +5269,7 @@ static __latent_entropy void net_tx_action(struct softirq_action *h)
trace_consume_skb(skb, net_tx_action);
else
trace_kfree_skb(skb, net_tx_action,
- get_kfree_skb_cb(skb)->reason);
+ get_kfree_skb_cb(skb)->reason, NULL);
if (skb->fclone != SKB_FCLONE_UNAVAILABLE)
__kfree_skb(skb);
@@ -5934,6 +5970,7 @@ static void flush_backlog(struct work_struct *work)
}
backlog_unlock_irq_enable(sd);
+ local_lock_nested_bh(&softnet_data.process_queue_bh_lock);
skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
if (skb->dev->reg_state == NETREG_UNREGISTERING) {
__skb_unlink(skb, &sd->process_queue);
@@ -5941,6 +5978,7 @@ static void flush_backlog(struct work_struct *work)
rps_input_queue_head_incr(sd);
}
}
+ local_unlock_nested_bh(&softnet_data.process_queue_bh_lock);
local_bh_enable();
}
@@ -6062,7 +6100,9 @@ static int process_backlog(struct napi_struct *napi, int quota)
while (again) {
struct sk_buff *skb;
+ local_lock_nested_bh(&softnet_data.process_queue_bh_lock);
while ((skb = __skb_dequeue(&sd->process_queue))) {
+ local_unlock_nested_bh(&softnet_data.process_queue_bh_lock);
rcu_read_lock();
__netif_receive_skb(skb);
rcu_read_unlock();
@@ -6071,7 +6111,9 @@ static int process_backlog(struct napi_struct *napi, int quota)
return work;
}
+ local_lock_nested_bh(&softnet_data.process_queue_bh_lock);
}
+ local_unlock_nested_bh(&softnet_data.process_queue_bh_lock);
backlog_lock_irq_disable(sd);
if (skb_queue_empty(&sd->input_pkt_queue)) {
@@ -6086,8 +6128,10 @@ static int process_backlog(struct napi_struct *napi, int quota)
napi->state &= NAPIF_STATE_THREADED;
again = false;
} else {
+ local_lock_nested_bh(&softnet_data.process_queue_bh_lock);
skb_queue_splice_tail_init(&sd->input_pkt_queue,
&sd->process_queue);
+ local_unlock_nested_bh(&softnet_data.process_queue_bh_lock);
}
backlog_unlock_irq_enable(sd);
}
@@ -6300,6 +6344,7 @@ enum {
static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock,
unsigned flags, u16 budget)
{
+ struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
bool skip_schedule = false;
unsigned long timeout;
int rc;
@@ -6317,6 +6362,7 @@ static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock,
clear_bit(NAPI_STATE_IN_BUSY_POLL, &napi->state);
local_bh_disable();
+ bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
if (flags & NAPI_F_PREFER_BUSY_POLL) {
napi->defer_hard_irqs_count = READ_ONCE(napi->dev->napi_defer_hard_irqs);
@@ -6339,6 +6385,7 @@ static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock,
netpoll_poll_unlock(have_poll_lock);
if (rc == budget)
__busy_poll_stop(napi, skip_schedule);
+ bpf_net_ctx_clear(bpf_net_ctx);
local_bh_enable();
}
@@ -6348,6 +6395,7 @@ static void __napi_busy_loop(unsigned int napi_id,
{
unsigned long start_time = loop_end ? busy_loop_current_time() : 0;
int (*napi_poll)(struct napi_struct *napi, int budget);
+ struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
void *have_poll_lock = NULL;
struct napi_struct *napi;
@@ -6366,6 +6414,7 @@ restart:
int work = 0;
local_bh_disable();
+ bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
if (!napi_poll) {
unsigned long val = READ_ONCE(napi->state);
@@ -6396,6 +6445,7 @@ count:
__NET_ADD_STATS(dev_net(napi->dev),
LINUX_MIB_BUSYPOLLRXPACKETS, work);
skb_defer_free_flush(this_cpu_ptr(&softnet_data));
+ bpf_net_ctx_clear(bpf_net_ctx);
local_bh_enable();
if (!loop_end || loop_end(loop_end_arg, start_time))
@@ -6823,6 +6873,7 @@ static int napi_thread_wait(struct napi_struct *napi)
static void napi_threaded_poll_loop(struct napi_struct *napi)
{
+ struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
struct softnet_data *sd;
unsigned long last_qs = jiffies;
@@ -6831,6 +6882,8 @@ static void napi_threaded_poll_loop(struct napi_struct *napi)
void *have;
local_bh_disable();
+ bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
+
sd = this_cpu_ptr(&softnet_data);
sd->in_napi_threaded_poll = true;
@@ -6846,6 +6899,7 @@ static void napi_threaded_poll_loop(struct napi_struct *napi)
net_rps_action_and_irq_enable(sd);
}
skb_defer_free_flush(sd);
+ bpf_net_ctx_clear(bpf_net_ctx);
local_bh_enable();
if (!repoll)
@@ -6871,10 +6925,12 @@ static __latent_entropy void net_rx_action(struct softirq_action *h)
struct softnet_data *sd = this_cpu_ptr(&softnet_data);
unsigned long time_limit = jiffies +
usecs_to_jiffies(READ_ONCE(net_hotdata.netdev_budget_usecs));
+ struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
int budget = READ_ONCE(net_hotdata.netdev_budget);
LIST_HEAD(list);
LIST_HEAD(repoll);
+ bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
start:
sd->in_net_rx_action = true;
local_irq_disable();
@@ -6927,7 +6983,8 @@ start:
sd->in_net_rx_action = false;
net_rps_action_and_irq_enable(sd);
-end:;
+end:
+ bpf_net_ctx_clear(bpf_net_ctx);
}
struct netdev_adjacent {
@@ -10284,6 +10341,10 @@ int register_netdevice(struct net_device *dev)
if (ret)
return ret;
+ /* rss ctx ID 0 is reserved for the default context, start from 1 */
+ xa_init_flags(&dev->ethtool->rss_ctx, XA_FLAGS_ALLOC1);
+ mutex_init(&dev->ethtool->rss_lock);
+
spin_lock_init(&dev->addr_list_lock);
netdev_set_addr_lockdep_class(dev);
@@ -10702,6 +10763,54 @@ void netdev_run_todo(void)
wake_up(&netdev_unregistering_wq);
}
+/* Collate per-cpu network dstats statistics
+ *
+ * Read per-cpu network statistics from dev->dstats and populate the related
+ * fields in @s.
+ */
+static void dev_fetch_dstats(struct rtnl_link_stats64 *s,
+ const struct pcpu_dstats __percpu *dstats)
+{
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ u64 rx_packets, rx_bytes, rx_drops;
+ u64 tx_packets, tx_bytes, tx_drops;
+ const struct pcpu_dstats *stats;
+ unsigned int start;
+
+ stats = per_cpu_ptr(dstats, cpu);
+ do {
+ start = u64_stats_fetch_begin(&stats->syncp);
+ rx_packets = u64_stats_read(&stats->rx_packets);
+ rx_bytes = u64_stats_read(&stats->rx_bytes);
+ rx_drops = u64_stats_read(&stats->rx_drops);
+ tx_packets = u64_stats_read(&stats->tx_packets);
+ tx_bytes = u64_stats_read(&stats->tx_bytes);
+ tx_drops = u64_stats_read(&stats->tx_drops);
+ } while (u64_stats_fetch_retry(&stats->syncp, start));
+
+ s->rx_packets += rx_packets;
+ s->rx_bytes += rx_bytes;
+ s->rx_dropped += rx_drops;
+ s->tx_packets += tx_packets;
+ s->tx_bytes += tx_bytes;
+ s->tx_dropped += tx_drops;
+ }
+}
+
+/* ndo_get_stats64 implementation for dtstats-based accounting.
+ *
+ * Populate @s from dev->stats and dev->dstats. This is used internally by the
+ * core for NETDEV_PCPU_STAT_DSTAT-type stats collection.
+ */
+static void dev_get_dstats64(const struct net_device *dev,
+ struct rtnl_link_stats64 *s)
+{
+ netdev_stats_to_stats64(s, &dev->stats);
+ dev_fetch_dstats(s, dev->dstats);
+}
+
/* Convert net_device_stats to rtnl_link_stats64. rtnl_link_stats64 has
* all the same fields in the same order as net_device_stats, with only
* the type differing, but rtnl_link_stats64 may have additional fields
@@ -10778,6 +10887,8 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
} else if (dev->pcpu_stat_type == NETDEV_PCPU_STAT_TSTATS) {
dev_get_tstats64(dev, storage);
+ } else if (dev->pcpu_stat_type == NETDEV_PCPU_STAT_DSTATS) {
+ dev_get_dstats64(dev, storage);
} else {
netdev_stats_to_stats64(storage, &dev->stats);
}
@@ -10895,13 +11006,6 @@ void netdev_sw_irq_coalesce_default_on(struct net_device *dev)
}
EXPORT_SYMBOL_GPL(netdev_sw_irq_coalesce_default_on);
-void netdev_freemem(struct net_device *dev)
-{
- char *addr = (char *)dev - dev->padded;
-
- kvfree(addr);
-}
-
/**
* alloc_netdev_mqs - allocate network device
* @sizeof_priv: size of private data to allocate space for
@@ -10921,8 +11025,6 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
unsigned int txqs, unsigned int rxqs)
{
struct net_device *dev;
- unsigned int alloc_size;
- struct net_device *p;
BUG_ON(strlen(name) >= sizeof(dev->name));
@@ -10936,21 +11038,12 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
return NULL;
}
- alloc_size = sizeof(struct net_device);
- if (sizeof_priv) {
- /* ensure 32-byte alignment of private area */
- alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
- alloc_size += sizeof_priv;
- }
- /* ensure 32-byte alignment of whole construct */
- alloc_size += NETDEV_ALIGN - 1;
-
- p = kvzalloc(alloc_size, GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL);
- if (!p)
+ dev = kvzalloc(struct_size(dev, priv, sizeof_priv),
+ GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL);
+ if (!dev)
return NULL;
- dev = PTR_ALIGN(p, NETDEV_ALIGN);
- dev->padded = (char *)dev - (char *)p;
+ dev->priv_len = sizeof_priv;
ref_tracker_dir_init(&dev->refcnt_tracker, 128, name);
#ifdef CONFIG_PCPU_DEV_REFCNT
@@ -11014,6 +11107,9 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
dev->real_num_rx_queues = rxqs;
if (netif_alloc_rx_queues(dev))
goto free_all;
+ dev->ethtool = kzalloc(sizeof(*dev->ethtool), GFP_KERNEL_ACCOUNT);
+ if (!dev->ethtool)
+ goto free_all;
strcpy(dev->name, name);
dev->name_assign_type = name_assign_type;
@@ -11034,7 +11130,7 @@ free_pcpu:
free_percpu(dev->pcpu_refcnt);
free_dev:
#endif
- netdev_freemem(dev);
+ kvfree(dev);
return NULL;
}
EXPORT_SYMBOL(alloc_netdev_mqs);
@@ -11064,6 +11160,7 @@ void free_netdev(struct net_device *dev)
return;
}
+ kfree(dev->ethtool);
netif_free_tx_queues(dev);
netif_free_rx_queues(dev);
@@ -11088,7 +11185,7 @@ void free_netdev(struct net_device *dev)
/* Compatibility with error handling in drivers */
if (dev->reg_state == NETREG_UNINITIALIZED ||
dev->reg_state == NETREG_DUMMY) {
- netdev_freemem(dev);
+ kvfree(dev);
return;
}
@@ -11129,6 +11226,34 @@ void synchronize_net(void)
}
EXPORT_SYMBOL(synchronize_net);
+static void netdev_rss_contexts_free(struct net_device *dev)
+{
+ struct ethtool_rxfh_context *ctx;
+ unsigned long context;
+
+ mutex_lock(&dev->ethtool->rss_lock);
+ xa_for_each(&dev->ethtool->rss_ctx, context, ctx) {
+ struct ethtool_rxfh_param rxfh;
+
+ rxfh.indir = ethtool_rxfh_context_indir(ctx);
+ rxfh.key = ethtool_rxfh_context_key(ctx);
+ rxfh.hfunc = ctx->hfunc;
+ rxfh.input_xfrm = ctx->input_xfrm;
+ rxfh.rss_context = context;
+ rxfh.rss_delete = true;
+
+ xa_erase(&dev->ethtool->rss_ctx, context);
+ if (dev->ethtool_ops->create_rxfh_context)
+ dev->ethtool_ops->remove_rxfh_context(dev, ctx,
+ context, NULL);
+ else
+ dev->ethtool_ops->set_rxfh(dev, &rxfh, NULL);
+ kfree(ctx);
+ }
+ xa_destroy(&dev->ethtool->rss_ctx);
+ mutex_unlock(&dev->ethtool->rss_lock);
+}
+
/**
* unregister_netdevice_queue - remove device from the kernel
* @dev: device
@@ -11232,11 +11357,15 @@ void unregister_netdevice_many_notify(struct list_head *head,
netdev_name_node_alt_flush(dev);
netdev_name_node_free(dev->name_node);
+ netdev_rss_contexts_free(dev);
+
call_netdevice_notifiers(NETDEV_PRE_UNINIT, dev);
if (dev->netdev_ops->ndo_uninit)
dev->netdev_ops->ndo_uninit(dev);
+ mutex_destroy(&dev->ethtool->rss_lock);
+
if (skb)
rtmsg_ifinfo_send(skb, dev, GFP_KERNEL, portid, nlh);
@@ -11418,9 +11547,9 @@ int __dev_change_net_namespace(struct net_device *dev, struct net *net,
if (new_name[0]) {
/* Rename the netdev to prepared name */
- write_seqlock(&netdev_rename_lock);
+ write_seqlock_bh(&netdev_rename_lock);
strscpy(dev->name, new_name, IFNAMSIZ);
- write_sequnlock(&netdev_rename_lock);
+ write_sequnlock_bh(&netdev_rename_lock);
}
/* Fixup kobjects */
diff --git a/net/core/dev.h b/net/core/dev.h
index b7b518bc2be5..5654325c5b71 100644
--- a/net/core/dev.h
+++ b/net/core/dev.h
@@ -150,6 +150,8 @@ struct napi_struct *napi_by_id(unsigned int napi_id);
void kick_defer_list_purge(struct softnet_data *sd, unsigned int cpu);
#define XMIT_RECURSION_LIMIT 8
+
+#ifndef CONFIG_PREEMPT_RT
static inline bool dev_xmit_recursion(void)
{
return unlikely(__this_cpu_read(softnet_data.xmit.recursion) >
@@ -165,5 +167,25 @@ static inline void dev_xmit_recursion_dec(void)
{
__this_cpu_dec(softnet_data.xmit.recursion);
}
+#else
+static inline bool dev_xmit_recursion(void)
+{
+ return unlikely(current->net_xmit.recursion > XMIT_RECURSION_LIMIT);
+}
+
+static inline void dev_xmit_recursion_inc(void)
+{
+ current->net_xmit.recursion++;
+}
+
+static inline void dev_xmit_recursion_dec(void)
+{
+ current->net_xmit.recursion--;
+}
+#endif
+
+int dev_set_hwtstamp_phylib(struct net_device *dev,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack);
#endif
diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
index 9a66cf5015f2..8592c052c0f4 100644
--- a/net/core/dev_ioctl.c
+++ b/net/core/dev_ioctl.c
@@ -259,9 +259,7 @@ static int dev_eth_ioctl(struct net_device *dev,
* @dev: Network device
* @cfg: Timestamping configuration structure
*
- * Helper for enforcing a common policy that phylib timestamping, if available,
- * should take precedence in front of hardware timestamping provided by the
- * netdev.
+ * Helper for calling the default hardware provider timestamping.
*
* Note: phy_mii_ioctl() only handles SIOCSHWTSTAMP (not SIOCGHWTSTAMP), and
* there only exists a phydev->mii_ts->hwtstamp() method. So this will return
@@ -271,7 +269,7 @@ static int dev_eth_ioctl(struct net_device *dev,
static int dev_get_hwtstamp_phylib(struct net_device *dev,
struct kernel_hwtstamp_config *cfg)
{
- if (phy_has_hwtstamp(dev->phydev))
+ if (phy_is_default_hwtstamp(dev->phydev))
return phy_hwtstamp_get(dev->phydev, cfg);
return dev->netdev_ops->ndo_hwtstamp_get(dev, cfg);
@@ -327,7 +325,7 @@ int dev_set_hwtstamp_phylib(struct net_device *dev,
struct netlink_ext_ack *extack)
{
const struct net_device_ops *ops = dev->netdev_ops;
- bool phy_ts = phy_has_hwtstamp(dev->phydev);
+ bool phy_ts = phy_is_default_hwtstamp(dev->phydev);
struct kernel_hwtstamp_config old_cfg = {};
bool changed = false;
int err;
@@ -363,7 +361,6 @@ int dev_set_hwtstamp_phylib(struct net_device *dev,
return 0;
}
-EXPORT_SYMBOL_GPL(dev_set_hwtstamp_phylib);
static int dev_set_hwtstamp(struct net_device *dev, struct ifreq *ifr)
{
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
index 430ed18f8584..2e0ae3328232 100644
--- a/net/core/drop_monitor.c
+++ b/net/core/drop_monitor.c
@@ -109,7 +109,8 @@ static u32 net_dm_queue_len = 1000;
struct net_dm_alert_ops {
void (*kfree_skb_probe)(void *ignore, struct sk_buff *skb,
void *location,
- enum skb_drop_reason reason);
+ enum skb_drop_reason reason,
+ struct sock *rx_sk);
void (*napi_poll_probe)(void *ignore, struct napi_struct *napi,
int work, int budget);
void (*work_item_func)(struct work_struct *work);
@@ -264,7 +265,8 @@ out:
static void trace_kfree_skb_hit(void *ignore, struct sk_buff *skb,
void *location,
- enum skb_drop_reason reason)
+ enum skb_drop_reason reason,
+ struct sock *rx_sk)
{
trace_drop_common(skb, location);
}
@@ -491,7 +493,8 @@ static const struct net_dm_alert_ops net_dm_alert_summary_ops = {
static void net_dm_packet_trace_kfree_skb_hit(void *ignore,
struct sk_buff *skb,
void *location,
- enum skb_drop_reason reason)
+ enum skb_drop_reason reason,
+ struct sock *rx_sk)
{
ktime_t tstamp = ktime_get_real();
struct per_cpu_dm_data *data;
diff --git a/net/core/dst_cache.c b/net/core/dst_cache.c
index 6a0482e676d3..70c634b9e7b0 100644
--- a/net/core/dst_cache.c
+++ b/net/core/dst_cache.c
@@ -27,6 +27,7 @@ struct dst_cache_pcpu {
static void dst_cache_per_cpu_dst_set(struct dst_cache_pcpu *dst_cache,
struct dst_entry *dst, u32 cookie)
{
+ DEBUG_NET_WARN_ON_ONCE(!in_softirq());
dst_release(dst_cache->dst);
if (dst)
dst_hold(dst);
@@ -40,6 +41,7 @@ static struct dst_entry *dst_cache_per_cpu_get(struct dst_cache *dst_cache,
{
struct dst_entry *dst;
+ DEBUG_NET_WARN_ON_ONCE(!in_softirq());
dst = idst->dst;
if (!dst)
goto fail;
diff --git a/net/core/filter.c b/net/core/filter.c
index 2510464692af..4cf1d34f7617 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -1658,13 +1658,21 @@ struct bpf_scratchpad {
__be32 diff[MAX_BPF_STACK / sizeof(__be32)];
u8 buff[MAX_BPF_STACK];
};
+ local_lock_t bh_lock;
};
-static DEFINE_PER_CPU(struct bpf_scratchpad, bpf_sp);
+static DEFINE_PER_CPU(struct bpf_scratchpad, bpf_sp) = {
+ .bh_lock = INIT_LOCAL_LOCK(bh_lock),
+};
static inline int __bpf_try_make_writable(struct sk_buff *skb,
unsigned int write_len)
{
+#ifdef CONFIG_DEBUG_NET
+ /* Avoid a splat in pskb_may_pull_reason() */
+ if (write_len > INT_MAX)
+ return -EINVAL;
+#endif
return skb_ensure_writable(skb, write_len);
}
@@ -2016,6 +2024,7 @@ BPF_CALL_5(bpf_csum_diff, __be32 *, from, u32, from_size,
struct bpf_scratchpad *sp = this_cpu_ptr(&bpf_sp);
u32 diff_size = from_size + to_size;
int i, j = 0;
+ __wsum ret;
/* This is quite flexible, some examples:
*
@@ -2029,12 +2038,15 @@ BPF_CALL_5(bpf_csum_diff, __be32 *, from, u32, from_size,
diff_size > sizeof(sp->diff)))
return -EINVAL;
+ local_lock_nested_bh(&bpf_sp.bh_lock);
for (i = 0; i < from_size / sizeof(__be32); i++, j++)
sp->diff[j] = ~from[i];
for (i = 0; i < to_size / sizeof(__be32); i++, j++)
sp->diff[j] = to[i];
- return csum_partial(sp->diff, diff_size, seed);
+ ret = csum_partial(sp->diff, diff_size, seed);
+ local_unlock_nested_bh(&bpf_sp.bh_lock);
+ return ret;
}
static const struct bpf_func_proto bpf_csum_diff_proto = {
@@ -2274,12 +2286,12 @@ static int __bpf_redirect_neigh_v6(struct sk_buff *skb, struct net_device *dev,
err = bpf_out_neigh_v6(net, skb, dev, nh);
if (unlikely(net_xmit_eval(err)))
- dev->stats.tx_errors++;
+ DEV_STATS_INC(dev, tx_errors);
else
ret = NET_XMIT_SUCCESS;
goto out_xmit;
out_drop:
- dev->stats.tx_errors++;
+ DEV_STATS_INC(dev, tx_errors);
kfree_skb(skb);
out_xmit:
return ret;
@@ -2380,12 +2392,12 @@ static int __bpf_redirect_neigh_v4(struct sk_buff *skb, struct net_device *dev,
err = bpf_out_neigh_v4(net, skb, dev, nh);
if (unlikely(net_xmit_eval(err)))
- dev->stats.tx_errors++;
+ DEV_STATS_INC(dev, tx_errors);
else
ret = NET_XMIT_SUCCESS;
goto out_xmit;
out_drop:
- dev->stats.tx_errors++;
+ DEV_STATS_INC(dev, tx_errors);
kfree_skb(skb);
out_xmit:
return ret;
@@ -2471,9 +2483,6 @@ static const struct bpf_func_proto bpf_clone_redirect_proto = {
.arg3_type = ARG_ANYTHING,
};
-DEFINE_PER_CPU(struct bpf_redirect_info, bpf_redirect_info);
-EXPORT_PER_CPU_SYMBOL_GPL(bpf_redirect_info);
-
static struct net_device *skb_get_peer_dev(struct net_device *dev)
{
const struct net_device_ops *ops = dev->netdev_ops;
@@ -2486,7 +2495,7 @@ static struct net_device *skb_get_peer_dev(struct net_device *dev)
int skb_do_redirect(struct sk_buff *skb)
{
- struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
+ struct bpf_redirect_info *ri = bpf_net_ctx_get_ri();
struct net *net = dev_net(skb->dev);
struct net_device *dev;
u32 flags = ri->flags;
@@ -2519,7 +2528,7 @@ out_drop:
BPF_CALL_2(bpf_redirect, u32, ifindex, u64, flags)
{
- struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
+ struct bpf_redirect_info *ri = bpf_net_ctx_get_ri();
if (unlikely(flags & (~(BPF_F_INGRESS) | BPF_F_REDIRECT_INTERNAL)))
return TC_ACT_SHOT;
@@ -2540,7 +2549,7 @@ static const struct bpf_func_proto bpf_redirect_proto = {
BPF_CALL_2(bpf_redirect_peer, u32, ifindex, u64, flags)
{
- struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
+ struct bpf_redirect_info *ri = bpf_net_ctx_get_ri();
if (unlikely(flags))
return TC_ACT_SHOT;
@@ -2562,7 +2571,7 @@ static const struct bpf_func_proto bpf_redirect_peer_proto = {
BPF_CALL_4(bpf_redirect_neigh, u32, ifindex, struct bpf_redir_neigh *, params,
int, plen, u64, flags)
{
- struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
+ struct bpf_redirect_info *ri = bpf_net_ctx_get_ri();
if (unlikely((plen && plen < sizeof(*params)) || flags))
return TC_ACT_SHOT;
@@ -4268,50 +4277,50 @@ static const struct bpf_func_proto bpf_xdp_adjust_meta_proto = {
*/
void xdp_do_flush(void)
{
- __dev_flush();
- __cpu_map_flush();
- __xsk_map_flush();
+ struct list_head *lh_map, *lh_dev, *lh_xsk;
+
+ bpf_net_ctx_get_all_used_flush_lists(&lh_map, &lh_dev, &lh_xsk);
+ if (lh_dev)
+ __dev_flush(lh_dev);
+ if (lh_map)
+ __cpu_map_flush(lh_map);
+ if (lh_xsk)
+ __xsk_map_flush(lh_xsk);
}
EXPORT_SYMBOL_GPL(xdp_do_flush);
#if defined(CONFIG_DEBUG_NET) && defined(CONFIG_BPF_SYSCALL)
void xdp_do_check_flushed(struct napi_struct *napi)
{
- bool ret;
+ struct list_head *lh_map, *lh_dev, *lh_xsk;
+ bool missed = false;
- ret = dev_check_flush();
- ret |= cpu_map_check_flush();
- ret |= xsk_map_check_flush();
+ bpf_net_ctx_get_all_used_flush_lists(&lh_map, &lh_dev, &lh_xsk);
+ if (lh_dev) {
+ __dev_flush(lh_dev);
+ missed = true;
+ }
+ if (lh_map) {
+ __cpu_map_flush(lh_map);
+ missed = true;
+ }
+ if (lh_xsk) {
+ __xsk_map_flush(lh_xsk);
+ missed = true;
+ }
- WARN_ONCE(ret, "Missing xdp_do_flush() invocation after NAPI by %ps\n",
+ WARN_ONCE(missed, "Missing xdp_do_flush() invocation after NAPI by %ps\n",
napi->poll);
}
#endif
-void bpf_clear_redirect_map(struct bpf_map *map)
-{
- struct bpf_redirect_info *ri;
- int cpu;
-
- for_each_possible_cpu(cpu) {
- ri = per_cpu_ptr(&bpf_redirect_info, cpu);
- /* Avoid polluting remote cacheline due to writes if
- * not needed. Once we pass this test, we need the
- * cmpxchg() to make sure it hasn't been changed in
- * the meantime by remote CPU.
- */
- if (unlikely(READ_ONCE(ri->map) == map))
- cmpxchg(&ri->map, map, NULL);
- }
-}
-
DEFINE_STATIC_KEY_FALSE(bpf_master_redirect_enabled_key);
EXPORT_SYMBOL_GPL(bpf_master_redirect_enabled_key);
u32 xdp_master_redirect(struct xdp_buff *xdp)
{
+ struct bpf_redirect_info *ri = bpf_net_ctx_get_ri();
struct net_device *master, *slave;
- struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
master = netdev_master_upper_dev_get_rcu(xdp->rxq->dev);
slave = master->netdev_ops->ndo_xdp_get_xmit_slave(master, xdp);
@@ -4383,7 +4392,7 @@ static __always_inline int __xdp_do_redirect_frame(struct bpf_redirect_info *ri,
map = READ_ONCE(ri->map);
/* The map pointer is cleared when the map is being torn
- * down by bpf_clear_redirect_map()
+ * down by dev_map_free()
*/
if (unlikely(!map)) {
err = -ENOENT;
@@ -4428,7 +4437,7 @@ err:
int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp,
struct bpf_prog *xdp_prog)
{
- struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
+ struct bpf_redirect_info *ri = bpf_net_ctx_get_ri();
enum bpf_map_type map_type = ri->map_type;
if (map_type == BPF_MAP_TYPE_XSKMAP)
@@ -4442,7 +4451,7 @@ EXPORT_SYMBOL_GPL(xdp_do_redirect);
int xdp_do_redirect_frame(struct net_device *dev, struct xdp_buff *xdp,
struct xdp_frame *xdpf, struct bpf_prog *xdp_prog)
{
- struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
+ struct bpf_redirect_info *ri = bpf_net_ctx_get_ri();
enum bpf_map_type map_type = ri->map_type;
if (map_type == BPF_MAP_TYPE_XSKMAP)
@@ -4459,7 +4468,7 @@ static int xdp_do_generic_redirect_map(struct net_device *dev,
enum bpf_map_type map_type, u32 map_id,
u32 flags)
{
- struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
+ struct bpf_redirect_info *ri = bpf_net_ctx_get_ri();
struct bpf_map *map;
int err;
@@ -4471,7 +4480,7 @@ static int xdp_do_generic_redirect_map(struct net_device *dev,
map = READ_ONCE(ri->map);
/* The map pointer is cleared when the map is being torn
- * down by bpf_clear_redirect_map()
+ * down by dev_map_free()
*/
if (unlikely(!map)) {
err = -ENOENT;
@@ -4513,7 +4522,7 @@ err:
int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
struct xdp_buff *xdp, struct bpf_prog *xdp_prog)
{
- struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
+ struct bpf_redirect_info *ri = bpf_net_ctx_get_ri();
enum bpf_map_type map_type = ri->map_type;
void *fwd = ri->tgt_value;
u32 map_id = ri->map_id;
@@ -4549,7 +4558,7 @@ err:
BPF_CALL_2(bpf_xdp_redirect, u32, ifindex, u64, flags)
{
- struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
+ struct bpf_redirect_info *ri = bpf_net_ctx_get_ri();
if (unlikely(flags))
return XDP_ABORTED;
@@ -6450,6 +6459,7 @@ BPF_CALL_4(bpf_lwt_seg6_store_bytes, struct sk_buff *, skb, u32, offset,
void *srh_tlvs, *srh_end, *ptr;
int srhoff = 0;
+ lockdep_assert_held(&srh_state->bh_lock);
if (srh == NULL)
return -EINVAL;
@@ -6506,6 +6516,7 @@ BPF_CALL_4(bpf_lwt_seg6_action, struct sk_buff *, skb,
int hdroff = 0;
int err;
+ lockdep_assert_held(&srh_state->bh_lock);
switch (action) {
case SEG6_LOCAL_ACTION_END_X:
if (!seg6_bpf_has_valid_srh(skb))
@@ -6582,6 +6593,7 @@ BPF_CALL_3(bpf_lwt_seg6_adjust_srh, struct sk_buff *, skb, u32, offset,
int srhoff = 0;
int ret;
+ lockdep_assert_held(&srh_state->bh_lock);
if (unlikely(srh == NULL))
return -EINVAL;
@@ -6815,7 +6827,7 @@ static const struct bpf_func_proto bpf_skc_lookup_tcp_proto = {
.ret_type = RET_PTR_TO_SOCK_COMMON_OR_NULL,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
- .arg3_type = ARG_CONST_SIZE,
+ .arg3_type = ARG_CONST_SIZE_OR_ZERO,
.arg4_type = ARG_ANYTHING,
.arg5_type = ARG_ANYTHING,
};
@@ -6834,7 +6846,7 @@ static const struct bpf_func_proto bpf_sk_lookup_tcp_proto = {
.ret_type = RET_PTR_TO_SOCKET_OR_NULL,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
- .arg3_type = ARG_CONST_SIZE,
+ .arg3_type = ARG_CONST_SIZE_OR_ZERO,
.arg4_type = ARG_ANYTHING,
.arg5_type = ARG_ANYTHING,
};
@@ -6853,7 +6865,7 @@ static const struct bpf_func_proto bpf_sk_lookup_udp_proto = {
.ret_type = RET_PTR_TO_SOCKET_OR_NULL,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
- .arg3_type = ARG_CONST_SIZE,
+ .arg3_type = ARG_CONST_SIZE_OR_ZERO,
.arg4_type = ARG_ANYTHING,
.arg5_type = ARG_ANYTHING,
};
@@ -6877,7 +6889,7 @@ static const struct bpf_func_proto bpf_tc_skc_lookup_tcp_proto = {
.ret_type = RET_PTR_TO_SOCK_COMMON_OR_NULL,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
- .arg3_type = ARG_CONST_SIZE,
+ .arg3_type = ARG_CONST_SIZE_OR_ZERO,
.arg4_type = ARG_ANYTHING,
.arg5_type = ARG_ANYTHING,
};
@@ -6901,7 +6913,7 @@ static const struct bpf_func_proto bpf_tc_sk_lookup_tcp_proto = {
.ret_type = RET_PTR_TO_SOCKET_OR_NULL,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
- .arg3_type = ARG_CONST_SIZE,
+ .arg3_type = ARG_CONST_SIZE_OR_ZERO,
.arg4_type = ARG_ANYTHING,
.arg5_type = ARG_ANYTHING,
};
@@ -6925,7 +6937,7 @@ static const struct bpf_func_proto bpf_tc_sk_lookup_udp_proto = {
.ret_type = RET_PTR_TO_SOCKET_OR_NULL,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
- .arg3_type = ARG_CONST_SIZE,
+ .arg3_type = ARG_CONST_SIZE_OR_ZERO,
.arg4_type = ARG_ANYTHING,
.arg5_type = ARG_ANYTHING,
};
@@ -6963,7 +6975,7 @@ static const struct bpf_func_proto bpf_xdp_sk_lookup_udp_proto = {
.ret_type = RET_PTR_TO_SOCKET_OR_NULL,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
- .arg3_type = ARG_CONST_SIZE,
+ .arg3_type = ARG_CONST_SIZE_OR_ZERO,
.arg4_type = ARG_ANYTHING,
.arg5_type = ARG_ANYTHING,
};
@@ -6987,7 +6999,7 @@ static const struct bpf_func_proto bpf_xdp_skc_lookup_tcp_proto = {
.ret_type = RET_PTR_TO_SOCK_COMMON_OR_NULL,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
- .arg3_type = ARG_CONST_SIZE,
+ .arg3_type = ARG_CONST_SIZE_OR_ZERO,
.arg4_type = ARG_ANYTHING,
.arg5_type = ARG_ANYTHING,
};
@@ -7011,7 +7023,7 @@ static const struct bpf_func_proto bpf_xdp_sk_lookup_tcp_proto = {
.ret_type = RET_PTR_TO_SOCKET_OR_NULL,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
- .arg3_type = ARG_CONST_SIZE,
+ .arg3_type = ARG_CONST_SIZE_OR_ZERO,
.arg4_type = ARG_ANYTHING,
.arg5_type = ARG_ANYTHING,
};
@@ -7031,7 +7043,7 @@ static const struct bpf_func_proto bpf_sock_addr_skc_lookup_tcp_proto = {
.ret_type = RET_PTR_TO_SOCK_COMMON_OR_NULL,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
- .arg3_type = ARG_CONST_SIZE,
+ .arg3_type = ARG_CONST_SIZE_OR_ZERO,
.arg4_type = ARG_ANYTHING,
.arg5_type = ARG_ANYTHING,
};
@@ -7050,7 +7062,7 @@ static const struct bpf_func_proto bpf_sock_addr_sk_lookup_tcp_proto = {
.ret_type = RET_PTR_TO_SOCKET_OR_NULL,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
- .arg3_type = ARG_CONST_SIZE,
+ .arg3_type = ARG_CONST_SIZE_OR_ZERO,
.arg4_type = ARG_ANYTHING,
.arg5_type = ARG_ANYTHING,
};
@@ -7069,7 +7081,7 @@ static const struct bpf_func_proto bpf_sock_addr_sk_lookup_udp_proto = {
.ret_type = RET_PTR_TO_SOCKET_OR_NULL,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
- .arg3_type = ARG_CONST_SIZE,
+ .arg3_type = ARG_CONST_SIZE_OR_ZERO,
.arg4_type = ARG_ANYTHING,
.arg5_type = ARG_ANYTHING,
};
@@ -7726,17 +7738,21 @@ BPF_CALL_3(bpf_skb_set_tstamp, struct sk_buff *, skb,
return -EOPNOTSUPP;
switch (tstamp_type) {
- case BPF_SKB_TSTAMP_DELIVERY_MONO:
+ case BPF_SKB_CLOCK_REALTIME:
+ skb->tstamp = tstamp;
+ skb->tstamp_type = SKB_CLOCK_REALTIME;
+ break;
+ case BPF_SKB_CLOCK_MONOTONIC:
if (!tstamp)
return -EINVAL;
skb->tstamp = tstamp;
- skb->mono_delivery_time = 1;
+ skb->tstamp_type = SKB_CLOCK_MONOTONIC;
break;
- case BPF_SKB_TSTAMP_UNSPEC:
- if (tstamp)
+ case BPF_SKB_CLOCK_TAI:
+ if (!tstamp)
return -EINVAL;
- skb->tstamp = 0;
- skb->mono_delivery_time = 0;
+ skb->tstamp = tstamp;
+ skb->tstamp_type = SKB_CLOCK_TAI;
break;
default:
return -EINVAL;
@@ -9387,16 +9403,17 @@ static struct bpf_insn *bpf_convert_tstamp_type_read(const struct bpf_insn *si,
{
__u8 value_reg = si->dst_reg;
__u8 skb_reg = si->src_reg;
- /* AX is needed because src_reg and dst_reg could be the same */
- __u8 tmp_reg = BPF_REG_AX;
-
- *insn++ = BPF_LDX_MEM(BPF_B, tmp_reg, skb_reg,
- SKB_BF_MONO_TC_OFFSET);
- *insn++ = BPF_JMP32_IMM(BPF_JSET, tmp_reg,
- SKB_MONO_DELIVERY_TIME_MASK, 2);
- *insn++ = BPF_MOV32_IMM(value_reg, BPF_SKB_TSTAMP_UNSPEC);
- *insn++ = BPF_JMP_A(1);
- *insn++ = BPF_MOV32_IMM(value_reg, BPF_SKB_TSTAMP_DELIVERY_MONO);
+ BUILD_BUG_ON(__SKB_CLOCK_MAX != (int)BPF_SKB_CLOCK_TAI);
+ BUILD_BUG_ON(SKB_CLOCK_REALTIME != (int)BPF_SKB_CLOCK_REALTIME);
+ BUILD_BUG_ON(SKB_CLOCK_MONOTONIC != (int)BPF_SKB_CLOCK_MONOTONIC);
+ BUILD_BUG_ON(SKB_CLOCK_TAI != (int)BPF_SKB_CLOCK_TAI);
+ *insn++ = BPF_LDX_MEM(BPF_B, value_reg, skb_reg, SKB_BF_MONO_TC_OFFSET);
+ *insn++ = BPF_ALU32_IMM(BPF_AND, value_reg, SKB_TSTAMP_TYPE_MASK);
+#ifdef __BIG_ENDIAN_BITFIELD
+ *insn++ = BPF_ALU32_IMM(BPF_RSH, value_reg, SKB_TSTAMP_TYPE_RSHIFT);
+#else
+ BUILD_BUG_ON(!(SKB_TSTAMP_TYPE_MASK & 0x1));
+#endif
return insn;
}
@@ -9439,11 +9456,12 @@ static struct bpf_insn *bpf_convert_tstamp_read(const struct bpf_prog *prog,
__u8 tmp_reg = BPF_REG_AX;
*insn++ = BPF_LDX_MEM(BPF_B, tmp_reg, skb_reg, SKB_BF_MONO_TC_OFFSET);
- *insn++ = BPF_ALU32_IMM(BPF_AND, tmp_reg,
- TC_AT_INGRESS_MASK | SKB_MONO_DELIVERY_TIME_MASK);
- *insn++ = BPF_JMP32_IMM(BPF_JNE, tmp_reg,
- TC_AT_INGRESS_MASK | SKB_MONO_DELIVERY_TIME_MASK, 2);
- /* skb->tc_at_ingress && skb->mono_delivery_time,
+ /* check if ingress mask bits is set */
+ *insn++ = BPF_JMP32_IMM(BPF_JSET, tmp_reg, TC_AT_INGRESS_MASK, 1);
+ *insn++ = BPF_JMP_A(4);
+ *insn++ = BPF_JMP32_IMM(BPF_JSET, tmp_reg, SKB_TSTAMP_TYPE_MASK, 1);
+ *insn++ = BPF_JMP_A(2);
+ /* skb->tc_at_ingress && skb->tstamp_type,
* read 0 as the (rcv) timestamp.
*/
*insn++ = BPF_MOV64_IMM(value_reg, 0);
@@ -9468,7 +9486,7 @@ static struct bpf_insn *bpf_convert_tstamp_write(const struct bpf_prog *prog,
* the bpf prog is aware the tstamp could have delivery time.
* Thus, write skb->tstamp as is if tstamp_type_access is true.
* Otherwise, writing at ingress will have to clear the
- * mono_delivery_time bit also.
+ * skb->tstamp_type bit also.
*/
if (!prog->tstamp_type_access) {
__u8 tmp_reg = BPF_REG_AX;
@@ -9478,8 +9496,8 @@ static struct bpf_insn *bpf_convert_tstamp_write(const struct bpf_prog *prog,
*insn++ = BPF_JMP32_IMM(BPF_JSET, tmp_reg, TC_AT_INGRESS_MASK, 1);
/* goto <store> */
*insn++ = BPF_JMP_A(2);
- /* <clear>: mono_delivery_time */
- *insn++ = BPF_ALU32_IMM(BPF_AND, tmp_reg, ~SKB_MONO_DELIVERY_TIME_MASK);
+ /* <clear>: skb->tstamp_type */
+ *insn++ = BPF_ALU32_IMM(BPF_AND, tmp_reg, ~SKB_TSTAMP_TYPE_MASK);
*insn++ = BPF_STX_MEM(BPF_B, skb_reg, tmp_reg, SKB_BF_MONO_TC_OFFSET);
}
#endif
@@ -11035,7 +11053,6 @@ const struct bpf_verifier_ops lwt_seg6local_verifier_ops = {
};
const struct bpf_prog_ops lwt_seg6local_prog_ops = {
- .test_run = bpf_prog_test_run_skb,
};
const struct bpf_verifier_ops cg_sock_verifier_ops = {
@@ -11853,28 +11870,34 @@ bpf_sk_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
}
__bpf_kfunc_start_defs();
-__bpf_kfunc int bpf_dynptr_from_skb(struct sk_buff *skb, u64 flags,
- struct bpf_dynptr_kern *ptr__uninit)
+__bpf_kfunc int bpf_dynptr_from_skb(struct __sk_buff *s, u64 flags,
+ struct bpf_dynptr *ptr__uninit)
{
+ struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)ptr__uninit;
+ struct sk_buff *skb = (struct sk_buff *)s;
+
if (flags) {
- bpf_dynptr_set_null(ptr__uninit);
+ bpf_dynptr_set_null(ptr);
return -EINVAL;
}
- bpf_dynptr_init(ptr__uninit, skb, BPF_DYNPTR_TYPE_SKB, 0, skb->len);
+ bpf_dynptr_init(ptr, skb, BPF_DYNPTR_TYPE_SKB, 0, skb->len);
return 0;
}
-__bpf_kfunc int bpf_dynptr_from_xdp(struct xdp_buff *xdp, u64 flags,
- struct bpf_dynptr_kern *ptr__uninit)
+__bpf_kfunc int bpf_dynptr_from_xdp(struct xdp_md *x, u64 flags,
+ struct bpf_dynptr *ptr__uninit)
{
+ struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)ptr__uninit;
+ struct xdp_buff *xdp = (struct xdp_buff *)x;
+
if (flags) {
- bpf_dynptr_set_null(ptr__uninit);
+ bpf_dynptr_set_null(ptr);
return -EINVAL;
}
- bpf_dynptr_init(ptr__uninit, xdp, BPF_DYNPTR_TYPE_XDP, 0, xdp_get_buff_len(xdp));
+ bpf_dynptr_init(ptr, xdp, BPF_DYNPTR_TYPE_XDP, 0, xdp_get_buff_len(xdp));
return 0;
}
@@ -11900,10 +11923,11 @@ __bpf_kfunc int bpf_sock_addr_set_sun_path(struct bpf_sock_addr_kern *sa_kern,
return 0;
}
-__bpf_kfunc int bpf_sk_assign_tcp_reqsk(struct sk_buff *skb, struct sock *sk,
+__bpf_kfunc int bpf_sk_assign_tcp_reqsk(struct __sk_buff *s, struct sock *sk,
struct bpf_tcp_req_attrs *attrs, int attrs__sz)
{
#if IS_ENABLED(CONFIG_SYN_COOKIES)
+ struct sk_buff *skb = (struct sk_buff *)s;
const struct request_sock_ops *ops;
struct inet_request_sock *ireq;
struct tcp_request_sock *treq;
@@ -11998,16 +12022,17 @@ __bpf_kfunc int bpf_sk_assign_tcp_reqsk(struct sk_buff *skb, struct sock *sk,
__bpf_kfunc_end_defs();
-int bpf_dynptr_from_skb_rdonly(struct sk_buff *skb, u64 flags,
- struct bpf_dynptr_kern *ptr__uninit)
+int bpf_dynptr_from_skb_rdonly(struct __sk_buff *skb, u64 flags,
+ struct bpf_dynptr *ptr__uninit)
{
+ struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)ptr__uninit;
int err;
err = bpf_dynptr_from_skb(skb, flags, ptr__uninit);
if (err)
return err;
- bpf_dynptr_set_rdonly(ptr__uninit);
+ bpf_dynptr_set_rdonly(ptr);
return 0;
}
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index f82e9a7d3b37..ada1e39b557e 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -299,9 +299,10 @@ void skb_flow_dissect_meta(const struct sk_buff *skb,
EXPORT_SYMBOL(skb_flow_dissect_meta);
static void
-skb_flow_dissect_set_enc_addr_type(enum flow_dissector_key_id type,
- struct flow_dissector *flow_dissector,
- void *target_container)
+skb_flow_dissect_set_enc_control(enum flow_dissector_key_id type,
+ u32 ctrl_flags,
+ struct flow_dissector *flow_dissector,
+ void *target_container)
{
struct flow_dissector_key_control *ctrl;
@@ -312,6 +313,7 @@ skb_flow_dissect_set_enc_addr_type(enum flow_dissector_key_id type,
FLOW_DISSECTOR_KEY_ENC_CONTROL,
target_container);
ctrl->addr_type = type;
+ ctrl->flags = ctrl_flags;
}
void
@@ -367,6 +369,7 @@ skb_flow_dissect_tunnel_info(const struct sk_buff *skb,
{
struct ip_tunnel_info *info;
struct ip_tunnel_key *key;
+ u32 ctrl_flags = 0;
/* A quick check to see if there might be something to do. */
if (!dissector_uses_key(flow_dissector,
@@ -391,11 +394,20 @@ skb_flow_dissect_tunnel_info(const struct sk_buff *skb,
key = &info->key;
+ if (test_bit(IP_TUNNEL_CSUM_BIT, key->tun_flags))
+ ctrl_flags |= FLOW_DIS_F_TUNNEL_CSUM;
+ if (test_bit(IP_TUNNEL_DONT_FRAGMENT_BIT, key->tun_flags))
+ ctrl_flags |= FLOW_DIS_F_TUNNEL_DONT_FRAGMENT;
+ if (test_bit(IP_TUNNEL_OAM_BIT, key->tun_flags))
+ ctrl_flags |= FLOW_DIS_F_TUNNEL_OAM;
+ if (test_bit(IP_TUNNEL_CRIT_OPT_BIT, key->tun_flags))
+ ctrl_flags |= FLOW_DIS_F_TUNNEL_CRIT_OPT;
+
switch (ip_tunnel_info_af(info)) {
case AF_INET:
- skb_flow_dissect_set_enc_addr_type(FLOW_DISSECTOR_KEY_IPV4_ADDRS,
- flow_dissector,
- target_container);
+ skb_flow_dissect_set_enc_control(FLOW_DISSECTOR_KEY_IPV4_ADDRS,
+ ctrl_flags, flow_dissector,
+ target_container);
if (dissector_uses_key(flow_dissector,
FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
struct flow_dissector_key_ipv4_addrs *ipv4;
@@ -408,9 +420,9 @@ skb_flow_dissect_tunnel_info(const struct sk_buff *skb,
}
break;
case AF_INET6:
- skb_flow_dissect_set_enc_addr_type(FLOW_DISSECTOR_KEY_IPV6_ADDRS,
- flow_dissector,
- target_container);
+ skb_flow_dissect_set_enc_control(FLOW_DISSECTOR_KEY_IPV6_ADDRS,
+ ctrl_flags, flow_dissector,
+ target_container);
if (dissector_uses_key(flow_dissector,
FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) {
struct flow_dissector_key_ipv6_addrs *ipv6;
@@ -422,6 +434,10 @@ skb_flow_dissect_tunnel_info(const struct sk_buff *skb,
ipv6->dst = key->u.ipv6.dst;
}
break;
+ default:
+ skb_flow_dissect_set_enc_control(0, ctrl_flags, flow_dissector,
+ target_container);
+ break;
}
if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
@@ -1792,6 +1808,13 @@ u32 flow_hash_from_keys(struct flow_keys *keys)
}
EXPORT_SYMBOL(flow_hash_from_keys);
+u32 flow_hash_from_keys_seed(struct flow_keys *keys,
+ const siphash_key_t *keyval)
+{
+ return __flow_hash_from_keys(keys, keyval);
+}
+EXPORT_SYMBOL(flow_hash_from_keys_seed);
+
static inline u32 ___skb_get_hash(const struct sk_buff *skb,
struct flow_keys *keys,
const siphash_key_t *keyval)
@@ -1831,22 +1854,23 @@ EXPORT_SYMBOL(make_flow_keys_digest);
static struct flow_dissector flow_keys_dissector_symmetric __read_mostly;
-u32 __skb_get_hash_symmetric(const struct sk_buff *skb)
+u32 __skb_get_hash_symmetric_net(const struct net *net, const struct sk_buff *skb)
{
struct flow_keys keys;
__flow_hash_secret_init();
memset(&keys, 0, sizeof(keys));
- __skb_flow_dissect(NULL, skb, &flow_keys_dissector_symmetric,
+ __skb_flow_dissect(net, skb, &flow_keys_dissector_symmetric,
&keys, NULL, 0, 0, 0, 0);
return __flow_hash_from_keys(&keys, &hashrnd);
}
-EXPORT_SYMBOL_GPL(__skb_get_hash_symmetric);
+EXPORT_SYMBOL_GPL(__skb_get_hash_symmetric_net);
/**
- * __skb_get_hash: calculate a flow hash
+ * __skb_get_hash_net: calculate a flow hash
+ * @net: associated network namespace, derived from @skb if NULL
* @skb: sk_buff to calculate flow hash from
*
* This function calculates a flow hash based on src/dst addresses
@@ -1854,18 +1878,24 @@ EXPORT_SYMBOL_GPL(__skb_get_hash_symmetric);
* on success, zero indicates no valid hash. Also, sets l4_hash in skb
* if hash is a canonical 4-tuple hash over transport ports.
*/
-void __skb_get_hash(struct sk_buff *skb)
+void __skb_get_hash_net(const struct net *net, struct sk_buff *skb)
{
struct flow_keys keys;
u32 hash;
+ memset(&keys, 0, sizeof(keys));
+
+ __skb_flow_dissect(net, skb, &flow_keys_dissector,
+ &keys, NULL, 0, 0, 0,
+ FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
+
__flow_hash_secret_init();
- hash = ___skb_get_hash(skb, &keys, &hashrnd);
+ hash = __flow_hash_from_keys(&keys, &hashrnd);
__skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
}
-EXPORT_SYMBOL(__skb_get_hash);
+EXPORT_SYMBOL(__skb_get_hash_net);
__u32 skb_get_hash_perturb(const struct sk_buff *skb,
const siphash_key_t *perturb)
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
index fae9c4694186..412816076b8b 100644
--- a/net/core/gen_estimator.c
+++ b/net/core/gen_estimator.c
@@ -206,7 +206,7 @@ void gen_kill_estimator(struct net_rate_estimator __rcu **rate_est)
{
struct net_rate_estimator *est;
- est = xchg((__force struct net_rate_estimator **)rate_est, NULL);
+ est = unrcu_pointer(xchg(rate_est, NULL));
if (est) {
timer_shutdown_sync(&est->timer);
kfree_rcu(est, rcu);
diff --git a/net/core/lwt_bpf.c b/net/core/lwt_bpf.c
index 4a0797f0a154..afb05f58b64c 100644
--- a/net/core/lwt_bpf.c
+++ b/net/core/lwt_bpf.c
@@ -38,13 +38,14 @@ static inline struct bpf_lwt *bpf_lwt_lwtunnel(struct lwtunnel_state *lwt)
static int run_lwt_bpf(struct sk_buff *skb, struct bpf_lwt_prog *lwt,
struct dst_entry *dst, bool can_redirect)
{
+ struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
int ret;
- /* Migration disable and BH disable are needed to protect per-cpu
- * redirect_info between BPF prog and skb_do_redirect().
+ /* Disabling BH is needed to protect per-CPU bpf_redirect_info between
+ * BPF prog and skb_do_redirect().
*/
- migrate_disable();
local_bh_disable();
+ bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
bpf_compute_data_pointers(skb);
ret = bpf_prog_run_save_cb(lwt->prog, skb);
@@ -77,8 +78,8 @@ static int run_lwt_bpf(struct sk_buff *skb, struct bpf_lwt_prog *lwt,
break;
}
+ bpf_net_ctx_clear(bpf_net_ctx);
local_bh_enable();
- migrate_enable();
return ret;
}
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 45fd88405b6b..277751375b0a 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -3578,7 +3578,7 @@ static void neigh_copy_dflt_parms(struct net *net, struct neigh_parms *p,
rcu_read_unlock();
}
-static void neigh_proc_update(struct ctl_table *ctl, int write)
+static void neigh_proc_update(const struct ctl_table *ctl, int write)
{
struct net_device *dev = ctl->extra1;
struct neigh_parms *p = ctl->extra2;
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 4c27a360c294..0e2084ce7b75 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -2028,7 +2028,7 @@ static void netdev_release(struct device *d)
* device is dead and about to be freed.
*/
kfree(rcu_access_pointer(dev->ifalias));
- netdev_freemem(dev);
+ kvfree(dev);
}
static const void *net_namespace(const struct device *d)
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 4f7a61688d18..6a823ba906c6 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -693,11 +693,16 @@ EXPORT_SYMBOL_GPL(__put_net);
* get_net_ns - increment the refcount of the network namespace
* @ns: common namespace (net)
*
- * Returns the net's common namespace.
+ * Returns the net's common namespace or ERR_PTR() if ref is zero.
*/
struct ns_common *get_net_ns(struct ns_common *ns)
{
- return &get_net(container_of(ns, struct net, ns))->ns;
+ struct net *net;
+
+ net = maybe_get_net(container_of(ns, struct net, ns));
+ if (net)
+ return &net->ns;
+ return ERR_PTR(-EINVAL);
}
EXPORT_SYMBOL_GPL(get_net_ns);
diff --git a/net/core/netdev-genl.c b/net/core/netdev-genl.c
index 1f6ae6379e0f..05f9515d2c05 100644
--- a/net/core/netdev-genl.c
+++ b/net/core/netdev-genl.c
@@ -59,22 +59,22 @@ XDP_METADATA_KFUNC_xxx
nla_put_u64_64bit(rsp, NETDEV_A_DEV_XDP_RX_METADATA_FEATURES,
xdp_rx_meta, NETDEV_A_DEV_PAD) ||
nla_put_u64_64bit(rsp, NETDEV_A_DEV_XSK_FEATURES,
- xsk_features, NETDEV_A_DEV_PAD)) {
- genlmsg_cancel(rsp, hdr);
- return -EINVAL;
- }
+ xsk_features, NETDEV_A_DEV_PAD))
+ goto err_cancel_msg;
if (netdev->xdp_features & NETDEV_XDP_ACT_XSK_ZEROCOPY) {
if (nla_put_u32(rsp, NETDEV_A_DEV_XDP_ZC_MAX_SEGS,
- netdev->xdp_zc_max_segs)) {
- genlmsg_cancel(rsp, hdr);
- return -EINVAL;
- }
+ netdev->xdp_zc_max_segs))
+ goto err_cancel_msg;
}
genlmsg_end(rsp, hdr);
return 0;
+
+err_cancel_msg:
+ genlmsg_cancel(rsp, hdr);
+ return -EMSGSIZE;
}
static void
diff --git a/net/core/page_pool.c b/net/core/page_pool.c
index f4444b4e39e6..2abe6e919224 100644
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -178,7 +178,8 @@ static void page_pool_struct_check(void)
CACHELINE_ASSERT_GROUP_MEMBER(struct page_pool, frag, frag_users);
CACHELINE_ASSERT_GROUP_MEMBER(struct page_pool, frag, frag_page);
CACHELINE_ASSERT_GROUP_MEMBER(struct page_pool, frag, frag_offset);
- CACHELINE_ASSERT_GROUP_SIZE(struct page_pool, frag, 4 * sizeof(long));
+ CACHELINE_ASSERT_GROUP_SIZE(struct page_pool, frag,
+ PAGE_POOL_FRAG_GROUP_ALIGN);
}
static int page_pool_init(struct page_pool *pool,
@@ -327,19 +328,18 @@ struct page_pool *page_pool_create(const struct page_pool_params *params)
}
EXPORT_SYMBOL(page_pool_create);
-static void page_pool_return_page(struct page_pool *pool, struct page *page);
+static void page_pool_return_page(struct page_pool *pool, netmem_ref netmem);
-noinline
-static struct page *page_pool_refill_alloc_cache(struct page_pool *pool)
+static noinline netmem_ref page_pool_refill_alloc_cache(struct page_pool *pool)
{
struct ptr_ring *r = &pool->ring;
- struct page *page;
+ netmem_ref netmem;
int pref_nid; /* preferred NUMA node */
/* Quicker fallback, avoid locks when ring is empty */
if (__ptr_ring_empty(r)) {
alloc_stat_inc(pool, empty);
- return NULL;
+ return 0;
}
/* Softirq guarantee CPU and thus NUMA node is stable. This,
@@ -354,57 +354,57 @@ static struct page *page_pool_refill_alloc_cache(struct page_pool *pool)
/* Refill alloc array, but only if NUMA match */
do {
- page = __ptr_ring_consume(r);
- if (unlikely(!page))
+ netmem = (__force netmem_ref)__ptr_ring_consume(r);
+ if (unlikely(!netmem))
break;
- if (likely(page_to_nid(page) == pref_nid)) {
- pool->alloc.cache[pool->alloc.count++] = page;
+ if (likely(page_to_nid(netmem_to_page(netmem)) == pref_nid)) {
+ pool->alloc.cache[pool->alloc.count++] = netmem;
} else {
/* NUMA mismatch;
* (1) release 1 page to page-allocator and
* (2) break out to fallthrough to alloc_pages_node.
* This limit stress on page buddy alloactor.
*/
- page_pool_return_page(pool, page);
+ page_pool_return_page(pool, netmem);
alloc_stat_inc(pool, waive);
- page = NULL;
+ netmem = 0;
break;
}
} while (pool->alloc.count < PP_ALLOC_CACHE_REFILL);
/* Return last page */
if (likely(pool->alloc.count > 0)) {
- page = pool->alloc.cache[--pool->alloc.count];
+ netmem = pool->alloc.cache[--pool->alloc.count];
alloc_stat_inc(pool, refill);
}
- return page;
+ return netmem;
}
/* fast path */
-static struct page *__page_pool_get_cached(struct page_pool *pool)
+static netmem_ref __page_pool_get_cached(struct page_pool *pool)
{
- struct page *page;
+ netmem_ref netmem;
/* Caller MUST guarantee safe non-concurrent access, e.g. softirq */
if (likely(pool->alloc.count)) {
/* Fast-path */
- page = pool->alloc.cache[--pool->alloc.count];
+ netmem = pool->alloc.cache[--pool->alloc.count];
alloc_stat_inc(pool, fast);
} else {
- page = page_pool_refill_alloc_cache(pool);
+ netmem = page_pool_refill_alloc_cache(pool);
}
- return page;
+ return netmem;
}
static void __page_pool_dma_sync_for_device(const struct page_pool *pool,
- const struct page *page,
+ netmem_ref netmem,
u32 dma_sync_size)
{
#if defined(CONFIG_HAS_DMA) && defined(CONFIG_DMA_NEED_SYNC)
- dma_addr_t dma_addr = page_pool_get_dma_addr(page);
+ dma_addr_t dma_addr = page_pool_get_dma_addr_netmem(netmem);
dma_sync_size = min(dma_sync_size, pool->p.max_len);
__dma_sync_single_for_device(pool->p.dev, dma_addr + pool->p.offset,
@@ -414,14 +414,14 @@ static void __page_pool_dma_sync_for_device(const struct page_pool *pool,
static __always_inline void
page_pool_dma_sync_for_device(const struct page_pool *pool,
- const struct page *page,
+ netmem_ref netmem,
u32 dma_sync_size)
{
if (pool->dma_sync && dma_dev_need_sync(pool->p.dev))
- __page_pool_dma_sync_for_device(pool, page, dma_sync_size);
+ __page_pool_dma_sync_for_device(pool, netmem, dma_sync_size);
}
-static bool page_pool_dma_map(struct page_pool *pool, struct page *page)
+static bool page_pool_dma_map(struct page_pool *pool, netmem_ref netmem)
{
dma_addr_t dma;
@@ -430,31 +430,32 @@ static bool page_pool_dma_map(struct page_pool *pool, struct page *page)
* into page private data (i.e 32bit cpu with 64bit DMA caps)
* This mapping is kept for lifetime of page, until leaving pool.
*/
- dma = dma_map_page_attrs(pool->p.dev, page, 0,
- (PAGE_SIZE << pool->p.order),
- pool->p.dma_dir, DMA_ATTR_SKIP_CPU_SYNC |
- DMA_ATTR_WEAK_ORDERING);
+ dma = dma_map_page_attrs(pool->p.dev, netmem_to_page(netmem), 0,
+ (PAGE_SIZE << pool->p.order), pool->p.dma_dir,
+ DMA_ATTR_SKIP_CPU_SYNC |
+ DMA_ATTR_WEAK_ORDERING);
if (dma_mapping_error(pool->p.dev, dma))
return false;
- if (page_pool_set_dma_addr(page, dma))
+ if (page_pool_set_dma_addr_netmem(netmem, dma))
goto unmap_failed;
- page_pool_dma_sync_for_device(pool, page, pool->p.max_len);
+ page_pool_dma_sync_for_device(pool, netmem, pool->p.max_len);
return true;
unmap_failed:
- WARN_ON_ONCE("unexpected DMA address, please report to netdev@");
+ WARN_ONCE(1, "unexpected DMA address, please report to netdev@");
dma_unmap_page_attrs(pool->p.dev, dma,
PAGE_SIZE << pool->p.order, pool->p.dma_dir,
DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING);
return false;
}
-static void page_pool_set_pp_info(struct page_pool *pool,
- struct page *page)
+static void page_pool_set_pp_info(struct page_pool *pool, netmem_ref netmem)
{
+ struct page *page = netmem_to_page(netmem);
+
page->pp = pool;
page->pp_magic |= PP_SIGNATURE;
@@ -464,13 +465,15 @@ static void page_pool_set_pp_info(struct page_pool *pool,
* is dirtying the same cache line as the page->pp_magic above, so
* the overhead is negligible.
*/
- page_pool_fragment_page(page, 1);
+ page_pool_fragment_netmem(netmem, 1);
if (pool->has_init_callback)
- pool->slow.init_callback(page, pool->slow.init_arg);
+ pool->slow.init_callback(netmem, pool->slow.init_arg);
}
-static void page_pool_clear_pp_info(struct page *page)
+static void page_pool_clear_pp_info(netmem_ref netmem)
{
+ struct page *page = netmem_to_page(netmem);
+
page->pp_magic = 0;
page->pp = NULL;
}
@@ -485,34 +488,34 @@ static struct page *__page_pool_alloc_page_order(struct page_pool *pool,
if (unlikely(!page))
return NULL;
- if (pool->dma_map && unlikely(!page_pool_dma_map(pool, page))) {
+ if (pool->dma_map && unlikely(!page_pool_dma_map(pool, page_to_netmem(page)))) {
put_page(page);
return NULL;
}
alloc_stat_inc(pool, slow_high_order);
- page_pool_set_pp_info(pool, page);
+ page_pool_set_pp_info(pool, page_to_netmem(page));
/* Track how many pages are held 'in-flight' */
pool->pages_state_hold_cnt++;
- trace_page_pool_state_hold(pool, page, pool->pages_state_hold_cnt);
+ trace_page_pool_state_hold(pool, page_to_netmem(page),
+ pool->pages_state_hold_cnt);
return page;
}
/* slow path */
-noinline
-static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
- gfp_t gfp)
+static noinline netmem_ref __page_pool_alloc_pages_slow(struct page_pool *pool,
+ gfp_t gfp)
{
const int bulk = PP_ALLOC_CACHE_REFILL;
unsigned int pp_order = pool->p.order;
bool dma_map = pool->dma_map;
- struct page *page;
+ netmem_ref netmem;
int i, nr_pages;
/* Don't support bulk alloc for high-order pages */
if (unlikely(pp_order))
- return __page_pool_alloc_page_order(pool, gfp);
+ return page_to_netmem(__page_pool_alloc_page_order(pool, gfp));
/* Unnecessary as alloc cache is empty, but guarantees zero count */
if (unlikely(pool->alloc.count > 0))
@@ -521,56 +524,63 @@ static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
/* Mark empty alloc.cache slots "empty" for alloc_pages_bulk_array */
memset(&pool->alloc.cache, 0, sizeof(void *) * bulk);
- nr_pages = alloc_pages_bulk_array_node(gfp, pool->p.nid, bulk,
- pool->alloc.cache);
+ nr_pages = alloc_pages_bulk_array_node(gfp,
+ pool->p.nid, bulk,
+ (struct page **)pool->alloc.cache);
if (unlikely(!nr_pages))
- return NULL;
+ return 0;
/* Pages have been filled into alloc.cache array, but count is zero and
* page element have not been (possibly) DMA mapped.
*/
for (i = 0; i < nr_pages; i++) {
- page = pool->alloc.cache[i];
- if (dma_map && unlikely(!page_pool_dma_map(pool, page))) {
- put_page(page);
+ netmem = pool->alloc.cache[i];
+ if (dma_map && unlikely(!page_pool_dma_map(pool, netmem))) {
+ put_page(netmem_to_page(netmem));
continue;
}
- page_pool_set_pp_info(pool, page);
- pool->alloc.cache[pool->alloc.count++] = page;
+ page_pool_set_pp_info(pool, netmem);
+ pool->alloc.cache[pool->alloc.count++] = netmem;
/* Track how many pages are held 'in-flight' */
pool->pages_state_hold_cnt++;
- trace_page_pool_state_hold(pool, page,
+ trace_page_pool_state_hold(pool, netmem,
pool->pages_state_hold_cnt);
}
/* Return last page */
if (likely(pool->alloc.count > 0)) {
- page = pool->alloc.cache[--pool->alloc.count];
+ netmem = pool->alloc.cache[--pool->alloc.count];
alloc_stat_inc(pool, slow);
} else {
- page = NULL;
+ netmem = 0;
}
/* When page just alloc'ed is should/must have refcnt 1. */
- return page;
+ return netmem;
}
/* For using page_pool replace: alloc_pages() API calls, but provide
* synchronization guarantee for allocation side.
*/
-struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp)
+netmem_ref page_pool_alloc_netmem(struct page_pool *pool, gfp_t gfp)
{
- struct page *page;
+ netmem_ref netmem;
/* Fast-path: Get a page from cache */
- page = __page_pool_get_cached(pool);
- if (page)
- return page;
+ netmem = __page_pool_get_cached(pool);
+ if (netmem)
+ return netmem;
/* Slow-path: cache empty, do real allocation */
- page = __page_pool_alloc_pages_slow(pool, gfp);
- return page;
+ netmem = __page_pool_alloc_pages_slow(pool, gfp);
+ return netmem;
+}
+EXPORT_SYMBOL(page_pool_alloc_netmem);
+
+struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp)
+{
+ return netmem_to_page(page_pool_alloc_netmem(pool, gfp));
}
EXPORT_SYMBOL(page_pool_alloc_pages);
ALLOW_ERROR_INJECTION(page_pool_alloc_pages, NULL);
@@ -599,8 +609,8 @@ s32 page_pool_inflight(const struct page_pool *pool, bool strict)
return inflight;
}
-static __always_inline
-void __page_pool_release_page_dma(struct page_pool *pool, struct page *page)
+static __always_inline void __page_pool_release_page_dma(struct page_pool *pool,
+ netmem_ref netmem)
{
dma_addr_t dma;
@@ -610,13 +620,13 @@ void __page_pool_release_page_dma(struct page_pool *pool, struct page *page)
*/
return;
- dma = page_pool_get_dma_addr(page);
+ dma = page_pool_get_dma_addr_netmem(netmem);
/* When page is unmapped, it cannot be returned to our pool */
dma_unmap_page_attrs(pool->p.dev, dma,
PAGE_SIZE << pool->p.order, pool->p.dma_dir,
DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING);
- page_pool_set_dma_addr(page, 0);
+ page_pool_set_dma_addr_netmem(netmem, 0);
}
/* Disconnects a page (from a page_pool). API users can have a need
@@ -624,35 +634,34 @@ void __page_pool_release_page_dma(struct page_pool *pool, struct page *page)
* a regular page (that will eventually be returned to the normal
* page-allocator via put_page).
*/
-void page_pool_return_page(struct page_pool *pool, struct page *page)
+void page_pool_return_page(struct page_pool *pool, netmem_ref netmem)
{
int count;
- __page_pool_release_page_dma(pool, page);
-
- page_pool_clear_pp_info(page);
+ __page_pool_release_page_dma(pool, netmem);
/* This may be the last page returned, releasing the pool, so
* it is not safe to reference pool afterwards.
*/
count = atomic_inc_return_relaxed(&pool->pages_state_release_cnt);
- trace_page_pool_state_release(pool, page, count);
+ trace_page_pool_state_release(pool, netmem, count);
- put_page(page);
+ page_pool_clear_pp_info(netmem);
+ put_page(netmem_to_page(netmem));
/* An optimization would be to call __free_pages(page, pool->p.order)
* knowing page is not part of page-cache (thus avoiding a
* __page_cache_release() call).
*/
}
-static bool page_pool_recycle_in_ring(struct page_pool *pool, struct page *page)
+static bool page_pool_recycle_in_ring(struct page_pool *pool, netmem_ref netmem)
{
int ret;
/* BH protection not needed if current is softirq */
if (in_softirq())
- ret = ptr_ring_produce(&pool->ring, page);
+ ret = ptr_ring_produce(&pool->ring, (__force void *)netmem);
else
- ret = ptr_ring_produce_bh(&pool->ring, page);
+ ret = ptr_ring_produce_bh(&pool->ring, (__force void *)netmem);
if (!ret) {
recycle_stat_inc(pool, ring);
@@ -667,7 +676,7 @@ static bool page_pool_recycle_in_ring(struct page_pool *pool, struct page *page)
*
* Caller must provide appropriate safe context.
*/
-static bool page_pool_recycle_in_cache(struct page *page,
+static bool page_pool_recycle_in_cache(netmem_ref netmem,
struct page_pool *pool)
{
if (unlikely(pool->alloc.count == PP_ALLOC_CACHE_SIZE)) {
@@ -676,14 +685,15 @@ static bool page_pool_recycle_in_cache(struct page *page,
}
/* Caller MUST have verified/know (page_ref_count(page) == 1) */
- pool->alloc.cache[pool->alloc.count++] = page;
+ pool->alloc.cache[pool->alloc.count++] = netmem;
recycle_stat_inc(pool, cached);
return true;
}
-static bool __page_pool_page_can_be_recycled(const struct page *page)
+static bool __page_pool_page_can_be_recycled(netmem_ref netmem)
{
- return page_ref_count(page) == 1 && !page_is_pfmemalloc(page);
+ return page_ref_count(netmem_to_page(netmem)) == 1 &&
+ !page_is_pfmemalloc(netmem_to_page(netmem));
}
/* If the page refcnt == 1, this will try to recycle the page.
@@ -692,8 +702,8 @@ static bool __page_pool_page_can_be_recycled(const struct page *page)
* If the page refcnt != 1, then the page will be returned to memory
* subsystem.
*/
-static __always_inline struct page *
-__page_pool_put_page(struct page_pool *pool, struct page *page,
+static __always_inline netmem_ref
+__page_pool_put_page(struct page_pool *pool, netmem_ref netmem,
unsigned int dma_sync_size, bool allow_direct)
{
lockdep_assert_no_hardirq();
@@ -707,16 +717,16 @@ __page_pool_put_page(struct page_pool *pool, struct page *page,
* page is NOT reusable when allocated when system is under
* some pressure. (page_is_pfmemalloc)
*/
- if (likely(__page_pool_page_can_be_recycled(page))) {
+ if (likely(__page_pool_page_can_be_recycled(netmem))) {
/* Read barrier done in page_ref_count / READ_ONCE */
- page_pool_dma_sync_for_device(pool, page, dma_sync_size);
+ page_pool_dma_sync_for_device(pool, netmem, dma_sync_size);
- if (allow_direct && page_pool_recycle_in_cache(page, pool))
- return NULL;
+ if (allow_direct && page_pool_recycle_in_cache(netmem, pool))
+ return 0;
/* Page found as candidate for recycling */
- return page;
+ return netmem;
}
/* Fallback/non-XDP mode: API user have elevated refcnt.
*
@@ -732,9 +742,9 @@ __page_pool_put_page(struct page_pool *pool, struct page *page,
* will be invoking put_page.
*/
recycle_stat_inc(pool, released_refcnt);
- page_pool_return_page(pool, page);
+ page_pool_return_page(pool, netmem);
- return NULL;
+ return 0;
}
static bool page_pool_napi_local(const struct page_pool *pool)
@@ -760,19 +770,28 @@ static bool page_pool_napi_local(const struct page_pool *pool)
return napi && READ_ONCE(napi->list_owner) == cpuid;
}
-void page_pool_put_unrefed_page(struct page_pool *pool, struct page *page,
- unsigned int dma_sync_size, bool allow_direct)
+void page_pool_put_unrefed_netmem(struct page_pool *pool, netmem_ref netmem,
+ unsigned int dma_sync_size, bool allow_direct)
{
if (!allow_direct)
allow_direct = page_pool_napi_local(pool);
- page = __page_pool_put_page(pool, page, dma_sync_size, allow_direct);
- if (page && !page_pool_recycle_in_ring(pool, page)) {
+ netmem =
+ __page_pool_put_page(pool, netmem, dma_sync_size, allow_direct);
+ if (netmem && !page_pool_recycle_in_ring(pool, netmem)) {
/* Cache full, fallback to free pages */
recycle_stat_inc(pool, ring_full);
- page_pool_return_page(pool, page);
+ page_pool_return_page(pool, netmem);
}
}
+EXPORT_SYMBOL(page_pool_put_unrefed_netmem);
+
+void page_pool_put_unrefed_page(struct page_pool *pool, struct page *page,
+ unsigned int dma_sync_size, bool allow_direct)
+{
+ page_pool_put_unrefed_netmem(pool, page_to_netmem(page), dma_sync_size,
+ allow_direct);
+}
EXPORT_SYMBOL(page_pool_put_unrefed_page);
/**
@@ -800,16 +819,16 @@ void page_pool_put_page_bulk(struct page_pool *pool, void **data,
allow_direct = page_pool_napi_local(pool);
for (i = 0; i < count; i++) {
- struct page *page = virt_to_head_page(data[i]);
+ netmem_ref netmem = page_to_netmem(virt_to_head_page(data[i]));
/* It is not the last user for the page frag case */
- if (!page_pool_is_last_ref(page))
+ if (!page_pool_is_last_ref(netmem))
continue;
- page = __page_pool_put_page(pool, page, -1, allow_direct);
+ netmem = __page_pool_put_page(pool, netmem, -1, allow_direct);
/* Approved for bulk recycling in ptr_ring cache */
- if (page)
- data[bulk_len++] = page;
+ if (netmem)
+ data[bulk_len++] = (__force void *)netmem;
}
if (!bulk_len)
@@ -835,98 +854,106 @@ void page_pool_put_page_bulk(struct page_pool *pool, void **data,
* since put_page() with refcnt == 1 can be an expensive operation
*/
for (; i < bulk_len; i++)
- page_pool_return_page(pool, data[i]);
+ page_pool_return_page(pool, (__force netmem_ref)data[i]);
}
EXPORT_SYMBOL(page_pool_put_page_bulk);
-static struct page *page_pool_drain_frag(struct page_pool *pool,
- struct page *page)
+static netmem_ref page_pool_drain_frag(struct page_pool *pool,
+ netmem_ref netmem)
{
long drain_count = BIAS_MAX - pool->frag_users;
/* Some user is still using the page frag */
- if (likely(page_pool_unref_page(page, drain_count)))
- return NULL;
+ if (likely(page_pool_unref_netmem(netmem, drain_count)))
+ return 0;
- if (__page_pool_page_can_be_recycled(page)) {
- page_pool_dma_sync_for_device(pool, page, -1);
- return page;
+ if (__page_pool_page_can_be_recycled(netmem)) {
+ page_pool_dma_sync_for_device(pool, netmem, -1);
+ return netmem;
}
- page_pool_return_page(pool, page);
- return NULL;
+ page_pool_return_page(pool, netmem);
+ return 0;
}
static void page_pool_free_frag(struct page_pool *pool)
{
long drain_count = BIAS_MAX - pool->frag_users;
- struct page *page = pool->frag_page;
+ netmem_ref netmem = pool->frag_page;
- pool->frag_page = NULL;
+ pool->frag_page = 0;
- if (!page || page_pool_unref_page(page, drain_count))
+ if (!netmem || page_pool_unref_netmem(netmem, drain_count))
return;
- page_pool_return_page(pool, page);
+ page_pool_return_page(pool, netmem);
}
-struct page *page_pool_alloc_frag(struct page_pool *pool,
- unsigned int *offset,
- unsigned int size, gfp_t gfp)
+netmem_ref page_pool_alloc_frag_netmem(struct page_pool *pool,
+ unsigned int *offset, unsigned int size,
+ gfp_t gfp)
{
unsigned int max_size = PAGE_SIZE << pool->p.order;
- struct page *page = pool->frag_page;
+ netmem_ref netmem = pool->frag_page;
if (WARN_ON(size > max_size))
- return NULL;
+ return 0;
size = ALIGN(size, dma_get_cache_alignment());
*offset = pool->frag_offset;
- if (page && *offset + size > max_size) {
- page = page_pool_drain_frag(pool, page);
- if (page) {
+ if (netmem && *offset + size > max_size) {
+ netmem = page_pool_drain_frag(pool, netmem);
+ if (netmem) {
alloc_stat_inc(pool, fast);
goto frag_reset;
}
}
- if (!page) {
- page = page_pool_alloc_pages(pool, gfp);
- if (unlikely(!page)) {
- pool->frag_page = NULL;
- return NULL;
+ if (!netmem) {
+ netmem = page_pool_alloc_netmem(pool, gfp);
+ if (unlikely(!netmem)) {
+ pool->frag_page = 0;
+ return 0;
}
- pool->frag_page = page;
+ pool->frag_page = netmem;
frag_reset:
pool->frag_users = 1;
*offset = 0;
pool->frag_offset = size;
- page_pool_fragment_page(page, BIAS_MAX);
- return page;
+ page_pool_fragment_netmem(netmem, BIAS_MAX);
+ return netmem;
}
pool->frag_users++;
pool->frag_offset = *offset + size;
alloc_stat_inc(pool, fast);
- return page;
+ return netmem;
+}
+EXPORT_SYMBOL(page_pool_alloc_frag_netmem);
+
+struct page *page_pool_alloc_frag(struct page_pool *pool, unsigned int *offset,
+ unsigned int size, gfp_t gfp)
+{
+ return netmem_to_page(page_pool_alloc_frag_netmem(pool, offset, size,
+ gfp));
}
EXPORT_SYMBOL(page_pool_alloc_frag);
static void page_pool_empty_ring(struct page_pool *pool)
{
- struct page *page;
+ netmem_ref netmem;
/* Empty recycle ring */
- while ((page = ptr_ring_consume_bh(&pool->ring))) {
+ while ((netmem = (__force netmem_ref)ptr_ring_consume_bh(&pool->ring))) {
/* Verify the refcnt invariant of cached pages */
- if (!(page_ref_count(page) == 1))
+ if (!(page_ref_count(netmem_to_page(netmem)) == 1))
pr_crit("%s() page_pool refcnt %d violation\n",
- __func__, page_ref_count(page));
+ __func__, netmem_ref_count(netmem));
- page_pool_return_page(pool, page);
+ page_pool_return_page(pool, netmem);
}
}
@@ -942,7 +969,7 @@ static void __page_pool_destroy(struct page_pool *pool)
static void page_pool_empty_alloc_cache_once(struct page_pool *pool)
{
- struct page *page;
+ netmem_ref netmem;
if (pool->destroy_cnt)
return;
@@ -952,8 +979,8 @@ static void page_pool_empty_alloc_cache_once(struct page_pool *pool)
* call concurrently.
*/
while (pool->alloc.count) {
- page = pool->alloc.cache[--pool->alloc.count];
- page_pool_return_page(pool, page);
+ netmem = pool->alloc.cache[--pool->alloc.count];
+ page_pool_return_page(pool, netmem);
}
}
@@ -1014,7 +1041,7 @@ void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *),
pool->xdp_mem_id = mem->id;
}
-static void page_pool_disable_direct_recycling(struct page_pool *pool)
+void page_pool_disable_direct_recycling(struct page_pool *pool)
{
/* Disable direct recycling based on pool->cpuid.
* Paired with READ_ONCE() in page_pool_napi_local().
@@ -1027,11 +1054,12 @@ static void page_pool_disable_direct_recycling(struct page_pool *pool)
/* To avoid races with recycling and additional barriers make sure
* pool and NAPI are unlinked when NAPI is disabled.
*/
- WARN_ON(!test_bit(NAPI_STATE_SCHED, &pool->p.napi->state) ||
- READ_ONCE(pool->p.napi->list_owner) != -1);
+ WARN_ON(!test_bit(NAPI_STATE_SCHED, &pool->p.napi->state));
+ WARN_ON(READ_ONCE(pool->p.napi->list_owner) != -1);
WRITE_ONCE(pool->p.napi, NULL);
}
+EXPORT_SYMBOL(page_pool_disable_direct_recycling);
void page_pool_destroy(struct page_pool *pool)
{
@@ -1059,15 +1087,15 @@ EXPORT_SYMBOL(page_pool_destroy);
/* Caller must provide appropriate safe context, e.g. NAPI. */
void page_pool_update_nid(struct page_pool *pool, int new_nid)
{
- struct page *page;
+ netmem_ref netmem;
trace_page_pool_update_nid(pool, new_nid);
pool->p.nid = new_nid;
/* Flush pool alloc cache, as refill will check NUMA node */
while (pool->alloc.count) {
- page = pool->alloc.cache[--pool->alloc.count];
- page_pool_return_page(pool, page);
+ netmem = pool->alloc.cache[--pool->alloc.count];
+ page_pool_return_page(pool, netmem);
}
}
EXPORT_SYMBOL(page_pool_update_nid);
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index b86b0a87367d..87e67194f240 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -3969,22 +3969,28 @@ static int rtnl_dellinkprop(struct sk_buff *skb, struct nlmsghdr *nlh,
return rtnl_linkprop(RTM_DELLINKPROP, skb, nlh, extack);
}
-static u32 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh)
+static noinline_for_stack u32 rtnl_calcit(struct sk_buff *skb,
+ struct nlmsghdr *nlh)
{
struct net *net = sock_net(skb->sk);
size_t min_ifinfo_dump_size = 0;
- struct nlattr *tb[IFLA_MAX+1];
u32 ext_filter_mask = 0;
struct net_device *dev;
- int hdrlen;
+ struct nlattr *nla;
+ int hdrlen, rem;
/* Same kernel<->userspace interface hack as in rtnl_dump_ifinfo. */
hdrlen = nlmsg_len(nlh) < sizeof(struct ifinfomsg) ?
sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg);
- if (nlmsg_parse_deprecated(nlh, hdrlen, tb, IFLA_MAX, ifla_policy, NULL) >= 0) {
- if (tb[IFLA_EXT_MASK])
- ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
+ if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen))
+ return NLMSG_GOODSIZE;
+
+ nla_for_each_attr_type(nla, IFLA_EXT_MASK,
+ nlmsg_attrdata(nlh, hdrlen),
+ nlmsg_attrlen(nlh, hdrlen), rem) {
+ if (nla_len(nla) == sizeof(u32))
+ ext_filter_mask = nla_get_u32(nla);
}
if (!ext_filter_mask)
@@ -6484,6 +6490,52 @@ static int rtnl_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
/* Process one rtnetlink message. */
+static int rtnl_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ const bool needs_lock = !(cb->flags & RTNL_FLAG_DUMP_UNLOCKED);
+ rtnl_dumpit_func dumpit = cb->data;
+ int err;
+
+ /* Previous iteration have already finished, avoid calling->dumpit()
+ * again, it may not expect to be called after it reached the end.
+ */
+ if (!dumpit)
+ return 0;
+
+ if (needs_lock)
+ rtnl_lock();
+ err = dumpit(skb, cb);
+ if (needs_lock)
+ rtnl_unlock();
+
+ /* Old dump handlers used to send NLM_DONE as in a separate recvmsg().
+ * Some applications which parse netlink manually depend on this.
+ */
+ if (cb->flags & RTNL_FLAG_DUMP_SPLIT_NLM_DONE) {
+ if (err < 0 && err != -EMSGSIZE)
+ return err;
+ if (!err)
+ cb->data = NULL;
+
+ return skb->len;
+ }
+ return err;
+}
+
+static int rtnetlink_dump_start(struct sock *ssk, struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ struct netlink_dump_control *control)
+{
+ if (control->flags & RTNL_FLAG_DUMP_SPLIT_NLM_DONE ||
+ !(control->flags & RTNL_FLAG_DUMP_UNLOCKED)) {
+ WARN_ON(control->data);
+ control->data = control->dump;
+ control->dump = rtnl_dumpit;
+ }
+
+ return netlink_dump_start(ssk, skb, nlh, control);
+}
+
static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
@@ -6548,7 +6600,7 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
.module = owner,
.flags = flags,
};
- err = netlink_dump_start(rtnl, skb, nlh, &c);
+ err = rtnetlink_dump_start(rtnl, skb, nlh, &c);
/* netlink_dump_start() will keep a reference on
* module if dump is still in progress.
*/
@@ -6663,7 +6715,6 @@ static int __net_init rtnetlink_net_init(struct net *net)
struct netlink_kernel_cfg cfg = {
.groups = RTNLGRP_MAX,
.input = rtnetlink_rcv,
- .cb_mutex = &rtnl_mutex,
.flags = NL_CFG_F_NONROOT_RECV,
.bind = rtnetlink_bind,
};
@@ -6694,7 +6745,7 @@ void __init rtnetlink_init(void)
register_netdevice_notifier(&rtnetlink_dev_notifier);
rtnl_register(PF_UNSPEC, RTM_GETLINK, rtnl_getlink,
- rtnl_dump_ifinfo, 0);
+ rtnl_dump_ifinfo, RTNL_FLAG_DUMP_SPLIT_NLM_DONE);
rtnl_register(PF_UNSPEC, RTM_SETLINK, rtnl_setlink, NULL, 0);
rtnl_register(PF_UNSPEC, RTM_NEWLINK, rtnl_newlink, NULL, 0);
rtnl_register(PF_UNSPEC, RTM_DELLINK, rtnl_dellink, NULL, 0);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 466999a7515e..83f8cd8aa2d1 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -277,6 +277,7 @@ static void *page_frag_alloc_1k(struct page_frag_1k *nc, gfp_t gfp_mask)
#endif
struct napi_alloc_cache {
+ local_lock_t bh_lock;
struct page_frag_cache page;
struct page_frag_1k page_small;
unsigned int skb_count;
@@ -284,7 +285,9 @@ struct napi_alloc_cache {
};
static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache);
-static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache);
+static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache) = {
+ .bh_lock = INIT_LOCAL_LOCK(bh_lock),
+};
/* Double check that napi_get_frags() allocates skbs with
* skb->head being backed by slab, not a page fragment.
@@ -306,11 +309,16 @@ void napi_get_frags_check(struct napi_struct *napi)
void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask)
{
struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
+ void *data;
fragsz = SKB_DATA_ALIGN(fragsz);
- return __page_frag_alloc_align(&nc->page, fragsz, GFP_ATOMIC,
+ local_lock_nested_bh(&napi_alloc_cache.bh_lock);
+ data = __page_frag_alloc_align(&nc->page, fragsz, GFP_ATOMIC,
align_mask);
+ local_unlock_nested_bh(&napi_alloc_cache.bh_lock);
+ return data;
+
}
EXPORT_SYMBOL(__napi_alloc_frag_align);
@@ -318,19 +326,15 @@ void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align_mask)
{
void *data;
- fragsz = SKB_DATA_ALIGN(fragsz);
if (in_hardirq() || irqs_disabled()) {
struct page_frag_cache *nc = this_cpu_ptr(&netdev_alloc_cache);
+ fragsz = SKB_DATA_ALIGN(fragsz);
data = __page_frag_alloc_align(nc, fragsz, GFP_ATOMIC,
align_mask);
} else {
- struct napi_alloc_cache *nc;
-
local_bh_disable();
- nc = this_cpu_ptr(&napi_alloc_cache);
- data = __page_frag_alloc_align(&nc->page, fragsz, GFP_ATOMIC,
- align_mask);
+ data = __napi_alloc_frag_align(fragsz, align_mask);
local_bh_enable();
}
return data;
@@ -342,16 +346,20 @@ static struct sk_buff *napi_skb_cache_get(void)
struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
struct sk_buff *skb;
+ local_lock_nested_bh(&napi_alloc_cache.bh_lock);
if (unlikely(!nc->skb_count)) {
nc->skb_count = kmem_cache_alloc_bulk(net_hotdata.skbuff_cache,
GFP_ATOMIC,
NAPI_SKB_CACHE_BULK,
nc->skb_cache);
- if (unlikely(!nc->skb_count))
+ if (unlikely(!nc->skb_count)) {
+ local_unlock_nested_bh(&napi_alloc_cache.bh_lock);
return NULL;
+ }
}
skb = nc->skb_cache[--nc->skb_count];
+ local_unlock_nested_bh(&napi_alloc_cache.bh_lock);
kasan_mempool_unpoison_object(skb, kmem_cache_size(net_hotdata.skbuff_cache));
return skb;
@@ -744,9 +752,13 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len,
pfmemalloc = nc->pfmemalloc;
} else {
local_bh_disable();
+ local_lock_nested_bh(&napi_alloc_cache.bh_lock);
+
nc = this_cpu_ptr(&napi_alloc_cache.page);
data = page_frag_alloc(nc, len, gfp_mask);
pfmemalloc = nc->pfmemalloc;
+
+ local_unlock_nested_bh(&napi_alloc_cache.bh_lock);
local_bh_enable();
}
@@ -810,11 +822,11 @@ struct sk_buff *napi_alloc_skb(struct napi_struct *napi, unsigned int len)
goto skb_success;
}
- nc = this_cpu_ptr(&napi_alloc_cache);
-
if (sk_memalloc_socks())
gfp_mask |= __GFP_MEMALLOC;
+ local_lock_nested_bh(&napi_alloc_cache.bh_lock);
+ nc = this_cpu_ptr(&napi_alloc_cache);
if (NAPI_HAS_SMALL_PAGE_FRAG && len <= SKB_WITH_OVERHEAD(1024)) {
/* we are artificially inflating the allocation size, but
* that is not as bad as it may look like, as:
@@ -836,6 +848,7 @@ struct sk_buff *napi_alloc_skb(struct napi_struct *napi, unsigned int len)
data = page_frag_alloc(&nc->page, len, gfp_mask);
pfmemalloc = nc->page.pfmemalloc;
}
+ local_unlock_nested_bh(&napi_alloc_cache.bh_lock);
if (unlikely(!data))
return NULL;
@@ -1002,8 +1015,10 @@ int skb_cow_data_for_xdp(struct page_pool *pool, struct sk_buff **pskb,
EXPORT_SYMBOL(skb_cow_data_for_xdp);
#if IS_ENABLED(CONFIG_PAGE_POOL)
-bool napi_pp_put_page(struct page *page)
+bool napi_pp_put_page(netmem_ref netmem)
{
+ struct page *page = netmem_to_page(netmem);
+
page = compound_head(page);
/* page->pp_magic is OR'ed with PP_SIGNATURE after the allocation
@@ -1016,7 +1031,7 @@ bool napi_pp_put_page(struct page *page)
if (unlikely(!is_pp_page(page)))
return false;
- page_pool_put_full_page(page->pp, page, false);
+ page_pool_put_full_netmem(page->pp, page_to_netmem(page), false);
return true;
}
@@ -1027,7 +1042,7 @@ static bool skb_pp_recycle(struct sk_buff *skb, void *data)
{
if (!IS_ENABLED(CONFIG_PAGE_POOL) || !skb->pp_recycle)
return false;
- return napi_pp_put_page(virt_to_page(data));
+ return napi_pp_put_page(page_to_netmem(virt_to_page(data)));
}
/**
@@ -1190,7 +1205,8 @@ void __kfree_skb(struct sk_buff *skb)
EXPORT_SYMBOL(__kfree_skb);
static __always_inline
-bool __kfree_skb_reason(struct sk_buff *skb, enum skb_drop_reason reason)
+bool __sk_skb_reason_drop(struct sock *sk, struct sk_buff *skb,
+ enum skb_drop_reason reason)
{
if (unlikely(!skb_unref(skb)))
return false;
@@ -1203,26 +1219,27 @@ bool __kfree_skb_reason(struct sk_buff *skb, enum skb_drop_reason reason)
if (reason == SKB_CONSUMED)
trace_consume_skb(skb, __builtin_return_address(0));
else
- trace_kfree_skb(skb, __builtin_return_address(0), reason);
+ trace_kfree_skb(skb, __builtin_return_address(0), reason, sk);
return true;
}
/**
- * kfree_skb_reason - free an sk_buff with special reason
+ * sk_skb_reason_drop - free an sk_buff with special reason
+ * @sk: the socket to receive @skb, or NULL if not applicable
* @skb: buffer to free
* @reason: reason why this skb is dropped
*
- * Drop a reference to the buffer and free it if the usage count has
- * hit zero. Meanwhile, pass the drop reason to 'kfree_skb'
- * tracepoint.
+ * Drop a reference to the buffer and free it if the usage count has hit
+ * zero. Meanwhile, pass the receiving socket and drop reason to
+ * 'kfree_skb' tracepoint.
*/
void __fix_address
-kfree_skb_reason(struct sk_buff *skb, enum skb_drop_reason reason)
+sk_skb_reason_drop(struct sock *sk, struct sk_buff *skb, enum skb_drop_reason reason)
{
- if (__kfree_skb_reason(skb, reason))
+ if (__sk_skb_reason_drop(sk, skb, reason))
__kfree_skb(skb);
}
-EXPORT_SYMBOL(kfree_skb_reason);
+EXPORT_SYMBOL(sk_skb_reason_drop);
#define KFREE_SKB_BULK_SIZE 16
@@ -1261,7 +1278,7 @@ kfree_skb_list_reason(struct sk_buff *segs, enum skb_drop_reason reason)
while (segs) {
struct sk_buff *next = segs->next;
- if (__kfree_skb_reason(segs, reason)) {
+ if (__sk_skb_reason_drop(NULL, segs, reason)) {
skb_poison_list(segs);
kfree_skb_add_bulk(segs, &sa, reason);
}
@@ -1433,6 +1450,7 @@ static void napi_skb_cache_put(struct sk_buff *skb)
if (!kasan_mempool_poison_object(skb))
return;
+ local_lock_nested_bh(&napi_alloc_cache.bh_lock);
nc->skb_cache[nc->skb_count++] = skb;
if (unlikely(nc->skb_count == NAPI_SKB_CACHE_SIZE)) {
@@ -1444,6 +1462,7 @@ static void napi_skb_cache_put(struct sk_buff *skb)
nc->skb_cache + NAPI_SKB_CACHE_HALF);
nc->skb_count = NAPI_SKB_CACHE_HALF;
}
+ local_unlock_nested_bh(&napi_alloc_cache.bh_lock);
}
void __napi_kfree_skb(struct sk_buff *skb, enum skb_drop_reason reason)
@@ -1854,7 +1873,6 @@ int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
struct msghdr *msg, int len,
struct ubuf_info *uarg)
{
- struct ubuf_info *orig_uarg = skb_zcopy(skb);
int err, orig_len = skb->len;
if (uarg->ops->link_skb) {
@@ -1862,6 +1880,8 @@ int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
if (err)
return err;
} else {
+ struct ubuf_info *orig_uarg = skb_zcopy(skb);
+
/* An skb can only point to one uarg. This edge case happens
* when TCP appends to an skb, but zerocopy_realloc triggered
* a new alloc.
@@ -1882,8 +1902,7 @@ int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
return err;
}
- if (!uarg->ops->link_skb)
- skb_zcopy_set(skb, uarg, NULL);
+ skb_zcopy_set(skb, uarg, NULL);
return skb->len - orig_len;
}
EXPORT_SYMBOL_GPL(skb_zerocopy_iter_stream);
@@ -4139,6 +4158,9 @@ int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
if (skb_zcopy(tgt) || skb_zcopy(skb))
return 0;
+ DEBUG_NET_WARN_ON_ONCE(tgt->pp_recycle != skb->pp_recycle);
+ DEBUG_NET_WARN_ON_ONCE(skb_cmp_decrypted(tgt, skb));
+
todo = shiftlen;
from = 0;
to = skb_shinfo(tgt)->nr_frags;
diff --git a/net/core/skmsg.c b/net/core/skmsg.c
index fd20aae30be2..bbf40b999713 100644
--- a/net/core/skmsg.c
+++ b/net/core/skmsg.c
@@ -434,7 +434,8 @@ int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
page = sg_page(sge);
if (copied + copy > len)
copy = len - copied;
- copy = copy_page_to_iter(page, sge->offset, copy, iter);
+ if (copy)
+ copy = copy_page_to_iter(page, sge->offset, copy, iter);
if (!copy) {
copied = copied ? copied : -EFAULT;
goto out;
diff --git a/net/core/sock.c b/net/core/sock.c
index 8629f9aecf91..9abc4fe25953 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1083,6 +1083,17 @@ bool sockopt_capable(int cap)
}
EXPORT_SYMBOL(sockopt_capable);
+static int sockopt_validate_clockid(__kernel_clockid_t value)
+{
+ switch (value) {
+ case CLOCK_REALTIME:
+ case CLOCK_MONOTONIC:
+ case CLOCK_TAI:
+ return 0;
+ }
+ return -EINVAL;
+}
+
/*
* This is meant for all protocols to use and covers goings on
* at the socket level. Everything here is generic.
@@ -1497,6 +1508,11 @@ set_sndbuf:
ret = -EPERM;
break;
}
+
+ ret = sockopt_validate_clockid(sk_txtime.clockid);
+ if (ret)
+ break;
+
sock_valbool_flag(sk, SOCK_TXTIME, true);
sk->sk_clockid = sk_txtime.clockid;
sk->sk_txtime_deadline_mode =
@@ -2262,7 +2278,12 @@ static void sk_init_common(struct sock *sk)
lockdep_set_class_and_name(&sk->sk_error_queue.lock,
af_elock_keys + sk->sk_family,
af_family_elock_key_strings[sk->sk_family]);
- lockdep_set_class_and_name(&sk->sk_callback_lock,
+ if (sk->sk_kern_sock)
+ lockdep_set_class_and_name(&sk->sk_callback_lock,
+ af_kern_callback_keys + sk->sk_family,
+ af_family_kern_clock_key_strings[sk->sk_family]);
+ else
+ lockdep_set_class_and_name(&sk->sk_callback_lock,
af_callback_keys + sk->sk_family,
af_family_clock_key_strings[sk->sk_family]);
}
@@ -3460,18 +3481,6 @@ void sock_init_data_uid(struct socket *sock, struct sock *sk, kuid_t uid)
}
sk->sk_uid = uid;
- rwlock_init(&sk->sk_callback_lock);
- if (sk->sk_kern_sock)
- lockdep_set_class_and_name(
- &sk->sk_callback_lock,
- af_kern_callback_keys + sk->sk_family,
- af_family_kern_clock_key_strings[sk->sk_family]);
- else
- lockdep_set_class_and_name(
- &sk->sk_callback_lock,
- af_callback_keys + sk->sk_family,
- af_family_clock_key_strings[sk->sk_family]);
-
sk->sk_state_change = sock_def_wakeup;
sk->sk_data_ready = sock_def_readable;
sk->sk_write_space = sock_def_write_space;
@@ -3742,6 +3751,9 @@ void sk_common_release(struct sock *sk)
sk->sk_prot->unhash(sk);
+ if (sk->sk_socket)
+ sk->sk_socket->sk = NULL;
+
/*
* In this point socket cannot receive new packets, but it is possible
* that some packets are in flight because some CPU runs receiver and
diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
index 654122838025..a08eed9b9142 100644
--- a/net/core/sock_diag.c
+++ b/net/core/sock_diag.c
@@ -18,7 +18,7 @@
static const struct sock_diag_handler __rcu *sock_diag_handlers[AF_MAX];
-static struct sock_diag_inet_compat __rcu *inet_rcv_compat;
+static const struct sock_diag_inet_compat __rcu *inet_rcv_compat;
static struct workqueue_struct *broadcast_wq;
@@ -187,8 +187,7 @@ void sock_diag_broadcast_destroy(struct sock *sk)
void sock_diag_register_inet_compat(const struct sock_diag_inet_compat *ptr)
{
- xchg((__force const struct sock_diag_inet_compat **)&inet_rcv_compat,
- ptr);
+ xchg(&inet_rcv_compat, RCU_INITIALIZER(ptr));
}
EXPORT_SYMBOL_GPL(sock_diag_register_inet_compat);
@@ -196,8 +195,7 @@ void sock_diag_unregister_inet_compat(const struct sock_diag_inet_compat *ptr)
{
const struct sock_diag_inet_compat *old;
- old = xchg((__force const struct sock_diag_inet_compat **)&inet_rcv_compat,
- NULL);
+ old = unrcu_pointer(xchg(&inet_rcv_compat, NULL));
WARN_ON_ONCE(old != ptr);
}
EXPORT_SYMBOL_GPL(sock_diag_unregister_inet_compat);
diff --git a/net/core/sock_map.c b/net/core/sock_map.c
index 9402889840bf..d3dbb92153f2 100644
--- a/net/core/sock_map.c
+++ b/net/core/sock_map.c
@@ -423,9 +423,6 @@ static int __sock_map_delete(struct bpf_stab *stab, struct sock *sk_test,
struct sock *sk;
int err = 0;
- if (irqs_disabled())
- return -EOPNOTSUPP; /* locks here are hardirq-unsafe */
-
spin_lock_bh(&stab->lock);
sk = *psk;
if (!sk_test || sk_test == sk)
@@ -948,9 +945,6 @@ static long sock_hash_delete_elem(struct bpf_map *map, void *key)
struct bpf_shtab_elem *elem;
int ret = -ENOENT;
- if (irqs_disabled())
- return -EOPNOTSUPP; /* locks here are hardirq-unsafe */
-
hash = sock_hash_bucket_hash(key, key_size);
bucket = sock_hash_select_bucket(htab, hash);
@@ -1680,19 +1674,23 @@ void sock_map_close(struct sock *sk, long timeout)
lock_sock(sk);
rcu_read_lock();
- psock = sk_psock_get(sk);
- if (unlikely(!psock)) {
- rcu_read_unlock();
- release_sock(sk);
- saved_close = READ_ONCE(sk->sk_prot)->close;
- } else {
+ psock = sk_psock(sk);
+ if (likely(psock)) {
saved_close = psock->saved_close;
sock_map_remove_links(sk, psock);
+ psock = sk_psock_get(sk);
+ if (unlikely(!psock))
+ goto no_psock;
rcu_read_unlock();
sk_psock_stop(psock);
release_sock(sk);
cancel_delayed_work_sync(&psock->work);
sk_psock_put(sk, psock);
+ } else {
+ saved_close = READ_ONCE(sk->sk_prot)->close;
+no_psock:
+ rcu_read_unlock();
+ release_sock(sk);
}
/* Make sure we do not recurse. This is a bug.
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index c9fb9ad87485..2079000691e2 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -383,38 +383,6 @@ proc_dolongvec_minmax_bpf_restricted(struct ctl_table *table, int write,
static struct ctl_table net_core_table[] = {
{
- .procname = "wmem_max",
- .data = &sysctl_wmem_max,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = &min_sndbuf,
- },
- {
- .procname = "rmem_max",
- .data = &sysctl_rmem_max,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = &min_rcvbuf,
- },
- {
- .procname = "wmem_default",
- .data = &sysctl_wmem_default,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = &min_sndbuf,
- },
- {
- .procname = "rmem_default",
- .data = &sysctl_rmem_default,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = &min_rcvbuf,
- },
- {
.procname = "mem_pcpu_rsv",
.data = &net_hotdata.sysctl_mem_pcpu_rsv,
.maxlen = sizeof(int),
@@ -697,6 +665,41 @@ static struct ctl_table netns_core_table[] = {
.extra2 = SYSCTL_ONE,
.proc_handler = proc_dou8vec_minmax,
},
+ /* sysctl_core_net_init() will set the values after this
+ * to readonly in network namespaces
+ */
+ {
+ .procname = "wmem_max",
+ .data = &sysctl_wmem_max,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &min_sndbuf,
+ },
+ {
+ .procname = "rmem_max",
+ .data = &sysctl_rmem_max,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &min_rcvbuf,
+ },
+ {
+ .procname = "wmem_default",
+ .data = &sysctl_wmem_default,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &min_sndbuf,
+ },
+ {
+ .procname = "rmem_default",
+ .data = &sysctl_rmem_default,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &min_rcvbuf,
+ },
};
static int __init fb_tunnels_only_for_init_net_sysctl_setup(char *str)
@@ -724,8 +727,14 @@ static __net_init int sysctl_core_net_init(struct net *net)
if (tbl == NULL)
goto err_dup;
- for (i = 0; i < table_size; ++i)
+ for (i = 0; i < table_size; ++i) {
+ if (tbl[i].data == &sysctl_wmem_max)
+ break;
+
tbl[i].data += (char *)net - (char *)&init_net;
+ }
+ for (; i < table_size; ++i)
+ tbl[i].mode &= ~0222;
}
net->core.sysctl_hdr = register_net_sysctl_sz(net, "net/core", tbl, table_size);
diff --git a/net/core/timestamping.c b/net/core/timestamping.c
index 04840697fe79..3717fb152ecc 100644
--- a/net/core/timestamping.c
+++ b/net/core/timestamping.c
@@ -25,7 +25,8 @@ void skb_clone_tx_timestamp(struct sk_buff *skb)
struct sk_buff *clone;
unsigned int type;
- if (!skb->sk)
+ if (!skb->sk || !skb->dev ||
+ !phy_is_default_hwtstamp(skb->dev->phydev))
return;
type = classify(skb);
@@ -47,7 +48,7 @@ bool skb_defer_rx_timestamp(struct sk_buff *skb)
struct mii_timestamper *mii_ts;
unsigned int type;
- if (!skb->dev || !skb->dev->phydev || !skb->dev->phydev->mii_ts)
+ if (!skb->dev || !phy_is_default_hwtstamp(skb->dev->phydev))
return false;
if (skb_headroom(skb) < ETH_HLEN)
diff --git a/net/core/xdp.c b/net/core/xdp.c
index 41693154e426..bcc5551c6424 100644
--- a/net/core/xdp.c
+++ b/net/core/xdp.c
@@ -127,10 +127,8 @@ void xdp_unreg_mem_model(struct xdp_mem_info *mem)
return;
if (type == MEM_TYPE_PAGE_POOL) {
- rcu_read_lock();
- xa = rhashtable_lookup(mem_id_ht, &id, mem_id_rht_params);
+ xa = rhashtable_lookup_fast(mem_id_ht, &id, mem_id_rht_params);
page_pool_destroy(xa->page_pool);
- rcu_read_unlock();
}
}
EXPORT_SYMBOL_GPL(xdp_unreg_mem_model);
@@ -295,10 +293,8 @@ static struct xdp_mem_allocator *__xdp_reg_mem_model(struct xdp_mem_info *mem,
mutex_lock(&mem_id_lock);
ret = __mem_id_init_hash_table();
mutex_unlock(&mem_id_lock);
- if (ret < 0) {
- WARN_ON(1);
+ if (ret < 0)
return ERR_PTR(ret);
- }
}
xdp_alloc = kzalloc(sizeof(*xdp_alloc), gfp);
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index ff41bd6f99c3..5926159a6f20 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -657,8 +657,11 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
if (dccp_v4_send_response(sk, req))
goto drop_and_free;
- inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
- reqsk_put(req);
+ if (unlikely(!inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT)))
+ reqsk_free(req);
+ else
+ reqsk_put(req);
+
return 0;
drop_and_free:
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 85f4b8fdbe5e..da5dba120bc9 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -400,8 +400,11 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
if (dccp_v6_send_response(sk, req))
goto drop_and_free;
- inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
- reqsk_put(req);
+ if (unlikely(!inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT)))
+ reqsk_free(req);
+ else
+ reqsk_put(req);
+
return 0;
drop_and_free:
diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c
index 251a57cf5822..fecc8190064f 100644
--- a/net/dccp/minisocks.c
+++ b/net/dccp/minisocks.c
@@ -54,17 +54,10 @@ void dccp_time_wait(struct sock *sk, int state, int timeo)
if (state == DCCP_TIME_WAIT)
timeo = DCCP_TIMEWAIT_LEN;
- /* tw_timer is pinned, so we need to make sure BH are disabled
- * in following section, otherwise timer handler could run before
- * we complete the initialization.
- */
- local_bh_disable();
- inet_twsk_schedule(tw, timeo);
/* Linkage updates.
* Note that access to tw after this point is illegal.
*/
- inet_twsk_hashdance(tw, sk, &dccp_hashinfo);
- local_bh_enable();
+ inet_twsk_hashdance_schedule(tw, sk, &dccp_hashinfo, timeo);
} else {
/* Sorry, if we're out of memory, just CLOSE this
* socket up. We've got bigger problems than
diff --git a/net/devlink/dpipe.c b/net/devlink/dpipe.c
index a72a9292efc5..55009b377447 100644
--- a/net/devlink/dpipe.c
+++ b/net/devlink/dpipe.c
@@ -839,7 +839,7 @@ EXPORT_SYMBOL_GPL(devlink_dpipe_table_counter_enabled);
*/
int devl_dpipe_table_register(struct devlink *devlink,
const char *table_name,
- struct devlink_dpipe_table_ops *table_ops,
+ const struct devlink_dpipe_table_ops *table_ops,
void *priv, bool counter_control_extern)
{
struct devlink_dpipe_table *table;
diff --git a/net/dsa/Kconfig b/net/dsa/Kconfig
index 8e698bea99a3..2dfe9063613f 100644
--- a/net/dsa/Kconfig
+++ b/net/dsa/Kconfig
@@ -129,7 +129,7 @@ config NET_DSA_TAG_RTL4_A
tristate "Tag driver for Realtek 4 byte protocol A tags"
help
Say Y or M if you want to enable support for tagging frames for the
- Realtek switches with 4 byte protocol A tags, sich as found in
+ Realtek switches with 4 byte protocol A tags, such as found in
the Realtek RTL8366RB.
config NET_DSA_TAG_RTL8_4
@@ -166,6 +166,12 @@ config NET_DSA_TAG_TRAILER
Say Y or M if you want to enable support for tagging frames at
with a trailed. e.g. Marvell 88E6060.
+config NET_DSA_TAG_VSC73XX_8021Q
+ tristate "Tag driver for Microchip/Vitesse VSC73xx family of switches, using VLAN"
+ help
+ Say Y or M if you want to enable support for tagging frames with a
+ custom VLAN-based header.
+
config NET_DSA_TAG_XRS700X
tristate "Tag driver for XRS700x switches"
help
diff --git a/net/dsa/Makefile b/net/dsa/Makefile
index 8a1894a42552..555c07cfeb71 100644
--- a/net/dsa/Makefile
+++ b/net/dsa/Makefile
@@ -37,6 +37,7 @@ obj-$(CONFIG_NET_DSA_TAG_RTL8_4) += tag_rtl8_4.o
obj-$(CONFIG_NET_DSA_TAG_RZN1_A5PSW) += tag_rzn1_a5psw.o
obj-$(CONFIG_NET_DSA_TAG_SJA1105) += tag_sja1105.o
obj-$(CONFIG_NET_DSA_TAG_TRAILER) += tag_trailer.o
+obj-$(CONFIG_NET_DSA_TAG_VSC73XX_8021Q) += tag_vsc73xx_8021q.o
obj-$(CONFIG_NET_DSA_TAG_XRS700X) += tag_xrs700x.o
# for tracing framework to find trace.h
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index 12521a7d4048..668c729946ea 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -1507,9 +1507,7 @@ static int dsa_switch_probe(struct dsa_switch *ds)
if (ds->phylink_mac_ops) {
if (ds->ops->phylink_mac_select_pcs ||
- ds->ops->phylink_mac_prepare ||
ds->ops->phylink_mac_config ||
- ds->ops->phylink_mac_finish ||
ds->ops->phylink_mac_link_down ||
ds->ops->phylink_mac_link_up)
return -EINVAL;
diff --git a/net/dsa/port.c b/net/dsa/port.c
index 9a249d4ac3a5..25258b33e59e 100644
--- a/net/dsa/port.c
+++ b/net/dsa/port.c
@@ -1467,10 +1467,34 @@ int dsa_port_change_conduit(struct dsa_port *dp, struct net_device *conduit,
*/
dsa_user_unsync_ha(dev);
+ /* If live-changing, we also need to uninstall the user device address
+ * from the port FDB and the conduit interface.
+ */
+ if (dev->flags & IFF_UP)
+ dsa_user_host_uc_uninstall(dev);
+
err = dsa_port_assign_conduit(dp, conduit, extack, true);
if (err)
goto rewind_old_addrs;
+ /* If the port doesn't have its own MAC address and relies on the DSA
+ * conduit's one, inherit it again from the new DSA conduit.
+ */
+ if (is_zero_ether_addr(dp->mac))
+ eth_hw_addr_inherit(dev, conduit);
+
+ /* If live-changing, we need to install the user device address to the
+ * port FDB and the conduit interface.
+ */
+ if (dev->flags & IFF_UP) {
+ err = dsa_user_host_uc_install(dev, dev->dev_addr);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to install host UC address");
+ goto rewind_addr_inherit;
+ }
+ }
+
dsa_user_sync_ha(dev);
if (vlan_filtering) {
@@ -1500,10 +1524,26 @@ rewind_new_vlan:
rewind_new_addrs:
dsa_user_unsync_ha(dev);
+ if (dev->flags & IFF_UP)
+ dsa_user_host_uc_uninstall(dev);
+
+rewind_addr_inherit:
+ if (is_zero_ether_addr(dp->mac))
+ eth_hw_addr_inherit(dev, old_conduit);
+
dsa_port_assign_conduit(dp, old_conduit, NULL, false);
/* Restore the objects on the old CPU port */
rewind_old_addrs:
+ if (dev->flags & IFF_UP) {
+ tmp = dsa_user_host_uc_install(dev, dev->dev_addr);
+ if (tmp) {
+ dev_err(ds->dev,
+ "port %d failed to restore host UC address: %pe\n",
+ dp->index, ERR_PTR(tmp));
+ }
+ }
+
dsa_user_sync_ha(dev);
if (vlan_filtering) {
@@ -1549,21 +1589,6 @@ dsa_port_phylink_mac_select_pcs(struct phylink_config *config,
return pcs;
}
-static int dsa_port_phylink_mac_prepare(struct phylink_config *config,
- unsigned int mode,
- phy_interface_t interface)
-{
- struct dsa_port *dp = dsa_phylink_to_port(config);
- struct dsa_switch *ds = dp->ds;
- int err = 0;
-
- if (ds->ops->phylink_mac_prepare)
- err = ds->ops->phylink_mac_prepare(ds, dp->index, mode,
- interface);
-
- return err;
-}
-
static void dsa_port_phylink_mac_config(struct phylink_config *config,
unsigned int mode,
const struct phylink_link_state *state)
@@ -1577,21 +1602,6 @@ static void dsa_port_phylink_mac_config(struct phylink_config *config,
ds->ops->phylink_mac_config(ds, dp->index, mode, state);
}
-static int dsa_port_phylink_mac_finish(struct phylink_config *config,
- unsigned int mode,
- phy_interface_t interface)
-{
- struct dsa_port *dp = dsa_phylink_to_port(config);
- struct dsa_switch *ds = dp->ds;
- int err = 0;
-
- if (ds->ops->phylink_mac_finish)
- err = ds->ops->phylink_mac_finish(ds, dp->index, mode,
- interface);
-
- return err;
-}
-
static void dsa_port_phylink_mac_link_down(struct phylink_config *config,
unsigned int mode,
phy_interface_t interface)
@@ -1624,9 +1634,7 @@ static void dsa_port_phylink_mac_link_up(struct phylink_config *config,
static const struct phylink_mac_ops dsa_port_phylink_mac_ops = {
.mac_select_pcs = dsa_port_phylink_mac_select_pcs,
- .mac_prepare = dsa_port_phylink_mac_prepare,
.mac_config = dsa_port_phylink_mac_config,
- .mac_finish = dsa_port_phylink_mac_finish,
.mac_link_down = dsa_port_phylink_mac_link_down,
.mac_link_up = dsa_port_phylink_mac_link_up,
};
diff --git a/net/dsa/tag_8021q.c b/net/dsa/tag_8021q.c
index 71b26ae6db39..3ee53e28ec2e 100644
--- a/net/dsa/tag_8021q.c
+++ b/net/dsa/tag_8021q.c
@@ -286,7 +286,8 @@ int dsa_switch_tag_8021q_vlan_del(struct dsa_switch *ds,
* be used for VLAN-unaware bridging.
*/
int dsa_tag_8021q_bridge_join(struct dsa_switch *ds, int port,
- struct dsa_bridge bridge)
+ struct dsa_bridge bridge, bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack)
{
struct dsa_port *dp = dsa_to_port(ds, port);
u16 standalone_vid, bridge_vid;
@@ -304,6 +305,8 @@ int dsa_tag_8021q_bridge_join(struct dsa_switch *ds, int port,
dsa_port_tag_8021q_vlan_del(dp, standalone_vid, false);
+ *tx_fwd_offload = true;
+
return 0;
}
EXPORT_SYMBOL_GPL(dsa_tag_8021q_bridge_join);
@@ -468,8 +471,8 @@ struct sk_buff *dsa_8021q_xmit(struct sk_buff *skb, struct net_device *netdev,
}
EXPORT_SYMBOL_GPL(dsa_8021q_xmit);
-struct net_device *dsa_tag_8021q_find_port_by_vbid(struct net_device *conduit,
- int vbid)
+static struct net_device *
+dsa_tag_8021q_find_port_by_vbid(struct net_device *conduit, int vbid)
{
struct dsa_port *cpu_dp = conduit->dsa_ptr;
struct dsa_switch_tree *dst = cpu_dp->dst;
@@ -495,30 +498,91 @@ struct net_device *dsa_tag_8021q_find_port_by_vbid(struct net_device *conduit,
return NULL;
}
-EXPORT_SYMBOL_GPL(dsa_tag_8021q_find_port_by_vbid);
+struct net_device *dsa_tag_8021q_find_user(struct net_device *conduit,
+ int source_port, int switch_id,
+ int vid, int vbid)
+{
+ /* Always prefer precise source port information, if available */
+ if (source_port != -1 && switch_id != -1)
+ return dsa_conduit_find_user(conduit, switch_id, source_port);
+ else if (vbid >= 1)
+ return dsa_tag_8021q_find_port_by_vbid(conduit, vbid);
+
+ return dsa_find_designated_bridge_port_by_vid(conduit, vid);
+}
+EXPORT_SYMBOL_GPL(dsa_tag_8021q_find_user);
+
+/**
+ * dsa_8021q_rcv - Decode source information from tag_8021q header
+ * @skb: RX socket buffer
+ * @source_port: pointer to storage for precise source port information.
+ * If this is known already from outside tag_8021q, the pre-initialized
+ * value is preserved. If not known, pass -1.
+ * @switch_id: similar to source_port.
+ * @vbid: pointer to storage for imprecise bridge ID. Must be pre-initialized
+ * with -1. If a positive value is returned, the source_port and switch_id
+ * are invalid.
+ * @vid: pointer to storage for original VID, in case tag_8021q decoding failed.
+ *
+ * If the packet has a tag_8021q header, decode it and set @source_port,
+ * @switch_id and @vbid, and strip the header. Otherwise set @vid and keep the
+ * header in the hwaccel area of the packet.
+ */
void dsa_8021q_rcv(struct sk_buff *skb, int *source_port, int *switch_id,
- int *vbid)
+ int *vbid, int *vid)
{
- u16 vid, tci;
+ int tmp_source_port, tmp_switch_id, tmp_vbid;
+ __be16 vlan_proto;
+ u16 tmp_vid, tci;
if (skb_vlan_tag_present(skb)) {
+ vlan_proto = skb->vlan_proto;
tci = skb_vlan_tag_get(skb);
__vlan_hwaccel_clear_tag(skb);
} else {
+ struct vlan_ethhdr *hdr = vlan_eth_hdr(skb);
+
+ vlan_proto = hdr->h_vlan_proto;
skb_push_rcsum(skb, ETH_HLEN);
__skb_vlan_pop(skb, &tci);
skb_pull_rcsum(skb, ETH_HLEN);
}
- vid = tci & VLAN_VID_MASK;
+ tmp_vid = tci & VLAN_VID_MASK;
+ if (!vid_is_dsa_8021q(tmp_vid)) {
+ /* Not a tag_8021q frame, so return the VID to the
+ * caller for further processing, and put the tag back
+ */
+ if (vid)
+ *vid = tmp_vid;
+
+ __vlan_hwaccel_put_tag(skb, vlan_proto, tci);
+
+ return;
+ }
- *source_port = dsa_8021q_rx_source_port(vid);
- *switch_id = dsa_8021q_rx_switch_id(vid);
+ tmp_source_port = dsa_8021q_rx_source_port(tmp_vid);
+ tmp_switch_id = dsa_8021q_rx_switch_id(tmp_vid);
+ tmp_vbid = dsa_tag_8021q_rx_vbid(tmp_vid);
+
+ /* Precise source port information is unknown when receiving from a
+ * VLAN-unaware bridging domain, and tmp_source_port and tmp_switch_id
+ * are zeroes in this case.
+ *
+ * Preserve the source information from hardware-specific mechanisms,
+ * if available. This allows us to not overwrite a valid source port
+ * and switch ID with less precise values.
+ */
+ if (tmp_vbid == 0 && *source_port == -1)
+ *source_port = tmp_source_port;
+ if (tmp_vbid == 0 && *switch_id == -1)
+ *switch_id = tmp_switch_id;
if (vbid)
- *vbid = dsa_tag_8021q_rx_vbid(vid);
+ *vbid = tmp_vbid;
skb->priority = (tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+ return;
}
EXPORT_SYMBOL_GPL(dsa_8021q_rcv);
diff --git a/net/dsa/tag_8021q.h b/net/dsa/tag_8021q.h
index 41f7167ac520..27b8906f99ec 100644
--- a/net/dsa/tag_8021q.h
+++ b/net/dsa/tag_8021q.h
@@ -14,10 +14,11 @@ struct sk_buff *dsa_8021q_xmit(struct sk_buff *skb, struct net_device *netdev,
u16 tpid, u16 tci);
void dsa_8021q_rcv(struct sk_buff *skb, int *source_port, int *switch_id,
- int *vbid);
+ int *vbid, int *vid);
-struct net_device *dsa_tag_8021q_find_port_by_vbid(struct net_device *conduit,
- int vbid);
+struct net_device *dsa_tag_8021q_find_user(struct net_device *conduit,
+ int source_port, int switch_id,
+ int vid, int vbid);
int dsa_switch_tag_8021q_vlan_add(struct dsa_switch *ds,
struct dsa_notifier_tag_8021q_vlan_info *info);
diff --git a/net/dsa/tag_ocelot_8021q.c b/net/dsa/tag_ocelot_8021q.c
index b059381310fe..8e8b1bef6af6 100644
--- a/net/dsa/tag_ocelot_8021q.c
+++ b/net/dsa/tag_ocelot_8021q.c
@@ -81,7 +81,7 @@ static struct sk_buff *ocelot_rcv(struct sk_buff *skb,
{
int src_port, switch_id;
- dsa_8021q_rcv(skb, &src_port, &switch_id, NULL);
+ dsa_8021q_rcv(skb, &src_port, &switch_id, NULL, NULL);
skb->dev = dsa_conduit_find_user(netdev, switch_id, src_port);
if (!skb->dev)
diff --git a/net/dsa/tag_sja1105.c b/net/dsa/tag_sja1105.c
index 1aba1d05c27a..3e902af7eea6 100644
--- a/net/dsa/tag_sja1105.c
+++ b/net/dsa/tag_sja1105.c
@@ -472,37 +472,14 @@ static bool sja1110_skb_has_inband_control_extension(const struct sk_buff *skb)
return ntohs(eth_hdr(skb)->h_proto) == ETH_P_SJA1110;
}
-/* If the VLAN in the packet is a tag_8021q one, set @source_port and
- * @switch_id and strip the header. Otherwise set @vid and keep it in the
- * packet.
- */
-static void sja1105_vlan_rcv(struct sk_buff *skb, int *source_port,
- int *switch_id, int *vbid, u16 *vid)
-{
- struct vlan_ethhdr *hdr = vlan_eth_hdr(skb);
- u16 vlan_tci;
-
- if (skb_vlan_tag_present(skb))
- vlan_tci = skb_vlan_tag_get(skb);
- else
- vlan_tci = ntohs(hdr->h_vlan_TCI);
-
- if (vid_is_dsa_8021q(vlan_tci & VLAN_VID_MASK))
- return dsa_8021q_rcv(skb, source_port, switch_id, vbid);
-
- /* Try our best with imprecise RX */
- *vid = vlan_tci & VLAN_VID_MASK;
-}
-
static struct sk_buff *sja1105_rcv(struct sk_buff *skb,
struct net_device *netdev)
{
- int source_port = -1, switch_id = -1, vbid = -1;
+ int source_port = -1, switch_id = -1, vbid = -1, vid = -1;
struct sja1105_meta meta = {0};
struct ethhdr *hdr;
bool is_link_local;
bool is_meta;
- u16 vid;
hdr = eth_hdr(skb);
is_link_local = sja1105_is_link_local(skb);
@@ -524,37 +501,16 @@ static struct sk_buff *sja1105_rcv(struct sk_buff *skb,
/* Normal data plane traffic and link-local frames are tagged with
* a tag_8021q VLAN which we have to strip
*/
- if (sja1105_skb_has_tag_8021q(skb)) {
- int tmp_source_port = -1, tmp_switch_id = -1;
-
- sja1105_vlan_rcv(skb, &tmp_source_port, &tmp_switch_id, &vbid,
- &vid);
- /* Preserve the source information from the INCL_SRCPT option,
- * if available. This allows us to not overwrite a valid source
- * port and switch ID with zeroes when receiving link-local
- * frames from a VLAN-unaware bridged port (non-zero vbid) or a
- * VLAN-aware bridged port (non-zero vid). Furthermore, the
- * tag_8021q source port information is only of trust when the
- * vbid is 0 (precise port). Otherwise, tmp_source_port and
- * tmp_switch_id will be zeroes.
- */
- if (vbid == 0 && source_port == -1)
- source_port = tmp_source_port;
- if (vbid == 0 && switch_id == -1)
- switch_id = tmp_switch_id;
- } else if (source_port == -1 && switch_id == -1) {
+ if (sja1105_skb_has_tag_8021q(skb))
+ dsa_8021q_rcv(skb, &source_port, &switch_id, &vbid, &vid);
+ else if (source_port == -1 && switch_id == -1)
/* Packets with no source information have no chance of
* getting accepted, drop them straight away.
*/
return NULL;
- }
- if (source_port != -1 && switch_id != -1)
- skb->dev = dsa_conduit_find_user(netdev, switch_id, source_port);
- else if (vbid >= 1)
- skb->dev = dsa_tag_8021q_find_port_by_vbid(netdev, vbid);
- else
- skb->dev = dsa_find_designated_bridge_port_by_vid(netdev, vid);
+ skb->dev = dsa_tag_8021q_find_user(netdev, source_port, switch_id,
+ vid, vbid);
if (!skb->dev) {
netdev_warn(netdev, "Couldn't decode source port\n");
return NULL;
@@ -677,9 +633,8 @@ static struct sk_buff *sja1110_rcv_inband_control_extension(struct sk_buff *skb,
static struct sk_buff *sja1110_rcv(struct sk_buff *skb,
struct net_device *netdev)
{
- int source_port = -1, switch_id = -1, vbid = -1;
+ int source_port = -1, switch_id = -1, vbid = -1, vid = -1;
bool host_only = false;
- u16 vid = 0;
if (sja1110_skb_has_inband_control_extension(skb)) {
skb = sja1110_rcv_inband_control_extension(skb, &source_port,
@@ -691,14 +646,11 @@ static struct sk_buff *sja1110_rcv(struct sk_buff *skb,
/* Packets with in-band control extensions might still have RX VLANs */
if (likely(sja1105_skb_has_tag_8021q(skb)))
- sja1105_vlan_rcv(skb, &source_port, &switch_id, &vbid, &vid);
-
- if (vbid >= 1)
- skb->dev = dsa_tag_8021q_find_port_by_vbid(netdev, vbid);
- else if (source_port == -1 || switch_id == -1)
- skb->dev = dsa_find_designated_bridge_port_by_vid(netdev, vid);
- else
- skb->dev = dsa_conduit_find_user(netdev, switch_id, source_port);
+ dsa_8021q_rcv(skb, &source_port, &switch_id, &vbid, &vid);
+
+ skb->dev = dsa_tag_8021q_find_user(netdev, source_port, switch_id,
+ vid, vbid);
+
if (!skb->dev) {
netdev_warn(netdev, "Couldn't decode source port\n");
return NULL;
diff --git a/net/dsa/tag_vsc73xx_8021q.c b/net/dsa/tag_vsc73xx_8021q.c
new file mode 100644
index 000000000000..af121a9aff7f
--- /dev/null
+++ b/net/dsa/tag_vsc73xx_8021q.c
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright (C) 2024 Pawel Dembicki <paweldembicki@gmail.com>
+ */
+#include <linux/dsa/8021q.h>
+
+#include "tag.h"
+#include "tag_8021q.h"
+
+#define VSC73XX_8021Q_NAME "vsc73xx-8021q"
+
+static struct sk_buff *
+vsc73xx_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct dsa_port *dp = dsa_user_to_port(netdev);
+ u16 queue_mapping = skb_get_queue_mapping(skb);
+ u16 tx_vid = dsa_tag_8021q_standalone_vid(dp);
+ u8 pcp;
+
+ if (skb->offload_fwd_mark) {
+ unsigned int bridge_num = dsa_port_bridge_num_get(dp);
+ struct net_device *br = dsa_port_bridge_dev_get(dp);
+
+ if (br_vlan_enabled(br))
+ return skb;
+
+ tx_vid = dsa_tag_8021q_bridge_vid(bridge_num);
+ }
+
+ pcp = netdev_txq_to_tc(netdev, queue_mapping);
+
+ return dsa_8021q_xmit(skb, netdev, ETH_P_8021Q,
+ ((pcp << VLAN_PRIO_SHIFT) | tx_vid));
+}
+
+static struct sk_buff *
+vsc73xx_rcv(struct sk_buff *skb, struct net_device *netdev)
+{
+ int src_port = -1, switch_id = -1, vbid = -1, vid = -1;
+
+ dsa_8021q_rcv(skb, &src_port, &switch_id, &vbid, &vid);
+
+ skb->dev = dsa_tag_8021q_find_user(netdev, src_port, switch_id,
+ vid, vbid);
+ if (!skb->dev) {
+ dev_warn_ratelimited(&netdev->dev,
+ "Couldn't decode source port\n");
+ return NULL;
+ }
+
+ dsa_default_offload_fwd_mark(skb);
+
+ return skb;
+}
+
+static const struct dsa_device_ops vsc73xx_8021q_netdev_ops = {
+ .name = VSC73XX_8021Q_NAME,
+ .proto = DSA_TAG_PROTO_VSC73XX_8021Q,
+ .xmit = vsc73xx_xmit,
+ .rcv = vsc73xx_rcv,
+ .needed_headroom = VLAN_HLEN,
+ .promisc_on_conduit = true,
+};
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("DSA tag driver for VSC73XX family of switches, using VLAN");
+MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_VSC73XX_8021Q, VSC73XX_8021Q_NAME);
+
+module_dsa_tag_driver(vsc73xx_8021q_netdev_ops);
diff --git a/net/dsa/user.c b/net/dsa/user.c
index 867c5fe9a4da..f5adfa1d978a 100644
--- a/net/dsa/user.c
+++ b/net/dsa/user.c
@@ -355,60 +355,82 @@ static int dsa_user_get_iflink(const struct net_device *dev)
return READ_ONCE(dsa_user_to_conduit(dev)->ifindex);
}
-static int dsa_user_open(struct net_device *dev)
+int dsa_user_host_uc_install(struct net_device *dev, const u8 *addr)
{
struct net_device *conduit = dsa_user_to_conduit(dev);
struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_switch *ds = dp->ds;
int err;
- err = dev_open(conduit, NULL);
- if (err < 0) {
- netdev_err(dev, "failed to open conduit %s\n", conduit->name);
- goto out;
- }
-
if (dsa_switch_supports_uc_filtering(ds)) {
- err = dsa_port_standalone_host_fdb_add(dp, dev->dev_addr, 0);
+ err = dsa_port_standalone_host_fdb_add(dp, addr, 0);
if (err)
goto out;
}
- if (!ether_addr_equal(dev->dev_addr, conduit->dev_addr)) {
- err = dev_uc_add(conduit, dev->dev_addr);
+ if (!ether_addr_equal(addr, conduit->dev_addr)) {
+ err = dev_uc_add(conduit, addr);
if (err < 0)
goto del_host_addr;
}
- err = dsa_port_enable_rt(dp, dev->phydev);
- if (err)
- goto del_unicast;
-
return 0;
-del_unicast:
- if (!ether_addr_equal(dev->dev_addr, conduit->dev_addr))
- dev_uc_del(conduit, dev->dev_addr);
del_host_addr:
if (dsa_switch_supports_uc_filtering(ds))
- dsa_port_standalone_host_fdb_del(dp, dev->dev_addr, 0);
+ dsa_port_standalone_host_fdb_del(dp, addr, 0);
out:
return err;
}
-static int dsa_user_close(struct net_device *dev)
+void dsa_user_host_uc_uninstall(struct net_device *dev)
{
struct net_device *conduit = dsa_user_to_conduit(dev);
struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_switch *ds = dp->ds;
- dsa_port_disable_rt(dp);
-
if (!ether_addr_equal(dev->dev_addr, conduit->dev_addr))
dev_uc_del(conduit, dev->dev_addr);
if (dsa_switch_supports_uc_filtering(ds))
dsa_port_standalone_host_fdb_del(dp, dev->dev_addr, 0);
+}
+
+static int dsa_user_open(struct net_device *dev)
+{
+ struct net_device *conduit = dsa_user_to_conduit(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
+ int err;
+
+ err = dev_open(conduit, NULL);
+ if (err < 0) {
+ netdev_err(dev, "failed to open conduit %s\n", conduit->name);
+ goto out;
+ }
+
+ err = dsa_user_host_uc_install(dev, dev->dev_addr);
+ if (err)
+ goto out;
+
+ err = dsa_port_enable_rt(dp, dev->phydev);
+ if (err)
+ goto out_del_host_uc;
+
+ return 0;
+
+out_del_host_uc:
+ dsa_user_host_uc_uninstall(dev);
+out:
+ return err;
+}
+
+static int dsa_user_close(struct net_device *dev)
+{
+ struct dsa_port *dp = dsa_user_to_port(dev);
+
+ dsa_port_disable_rt(dp);
+
+ dsa_user_host_uc_uninstall(dev);
return 0;
}
@@ -448,7 +470,6 @@ static void dsa_user_set_rx_mode(struct net_device *dev)
static int dsa_user_set_mac_address(struct net_device *dev, void *a)
{
- struct net_device *conduit = dsa_user_to_conduit(dev);
struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_switch *ds = dp->ds;
struct sockaddr *addr = a;
@@ -470,34 +491,16 @@ static int dsa_user_set_mac_address(struct net_device *dev, void *a)
if (!(dev->flags & IFF_UP))
goto out_change_dev_addr;
- if (dsa_switch_supports_uc_filtering(ds)) {
- err = dsa_port_standalone_host_fdb_add(dp, addr->sa_data, 0);
- if (err)
- return err;
- }
-
- if (!ether_addr_equal(addr->sa_data, conduit->dev_addr)) {
- err = dev_uc_add(conduit, addr->sa_data);
- if (err < 0)
- goto del_unicast;
- }
+ err = dsa_user_host_uc_install(dev, addr->sa_data);
+ if (err)
+ return err;
- if (!ether_addr_equal(dev->dev_addr, conduit->dev_addr))
- dev_uc_del(conduit, dev->dev_addr);
-
- if (dsa_switch_supports_uc_filtering(ds))
- dsa_port_standalone_host_fdb_del(dp, dev->dev_addr, 0);
+ dsa_user_host_uc_uninstall(dev);
out_change_dev_addr:
eth_hw_addr_set(dev, addr->sa_data);
return 0;
-
-del_unicast:
- if (dsa_switch_supports_uc_filtering(ds))
- dsa_port_standalone_host_fdb_del(dp, addr->sa_data, 0);
-
- return err;
}
struct dsa_user_dump_ctx {
@@ -1726,7 +1729,7 @@ static int dsa_user_set_rxnfc(struct net_device *dev,
}
static int dsa_user_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *ts)
+ struct kernel_ethtool_ts_info *ts)
{
struct dsa_user_priv *p = netdev_priv(dev);
struct dsa_switch *ds = p->dp->ds;
@@ -2879,12 +2882,6 @@ int dsa_user_change_conduit(struct net_device *dev, struct net_device *conduit,
ERR_PTR(err));
}
- /* If the port doesn't have its own MAC address and relies on the DSA
- * conduit's one, inherit it again from the new DSA conduit.
- */
- if (is_zero_ether_addr(dp->mac))
- eth_hw_addr_inherit(dev, conduit);
-
return 0;
out_revert_conduit_link:
diff --git a/net/dsa/user.h b/net/dsa/user.h
index 996069130bea..016884bead3c 100644
--- a/net/dsa/user.h
+++ b/net/dsa/user.h
@@ -42,6 +42,8 @@ int dsa_user_suspend(struct net_device *user_dev);
int dsa_user_resume(struct net_device *user_dev);
int dsa_user_register_notifier(void);
void dsa_user_unregister_notifier(void);
+int dsa_user_host_uc_install(struct net_device *dev, const u8 *addr);
+void dsa_user_host_uc_uninstall(struct net_device *dev);
void dsa_user_sync_ha(struct net_device *dev);
void dsa_user_unsync_ha(struct net_device *dev);
void dsa_user_setup_tagger(struct net_device *user);
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index 049c3adeb850..4e3651101b86 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -161,9 +161,7 @@ __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev)
skb->dev = dev;
skb_reset_mac_header(skb);
- eth = (struct ethhdr *)skb->data;
- skb_pull_inline(skb, ETH_HLEN);
-
+ eth = eth_skb_pull_mac(skb);
eth_skb_pkt_type(skb, dev);
/*
diff --git a/net/ethtool/Makefile b/net/ethtool/Makefile
index 504f954a1b28..9a190635fe95 100644
--- a/net/ethtool/Makefile
+++ b/net/ethtool/Makefile
@@ -8,4 +8,4 @@ ethtool_nl-y := netlink.o bitset.o strset.o linkinfo.o linkmodes.o rss.o \
linkstate.o debug.o wol.o features.o privflags.o rings.o \
channels.o coalesce.o pause.o eee.o tsinfo.o cabletest.o \
tunnels.o fec.o eeprom.o stats.o phc_vclocks.o mm.o \
- module.o pse-pd.o plca.o mm.o
+ module.o cmis_fw_update.o cmis_cdb.o pse-pd.o plca.o mm.o
diff --git a/net/ethtool/cabletest.c b/net/ethtool/cabletest.c
index 06a151165c31..f6f136ec7ddf 100644
--- a/net/ethtool/cabletest.c
+++ b/net/ethtool/cabletest.c
@@ -207,10 +207,6 @@ err:
}
EXPORT_SYMBOL_GPL(ethnl_cable_test_fault_length);
-struct cable_test_tdr_req_info {
- struct ethnl_req_info base;
-};
-
static const struct nla_policy cable_test_tdr_act_cfg_policy[] = {
[ETHTOOL_A_CABLE_TEST_TDR_CFG_FIRST] = { .type = NLA_U32 },
[ETHTOOL_A_CABLE_TEST_TDR_CFG_LAST] = { .type = NLA_U32 },
diff --git a/net/ethtool/channels.c b/net/ethtool/channels.c
index 7b4bbd674bae..cee188da54f8 100644
--- a/net/ethtool/channels.c
+++ b/net/ethtool/channels.c
@@ -171,11 +171,9 @@ ethnl_set_channels(struct ethnl_req_info *req_info, struct genl_info *info)
*/
if (ethtool_get_max_rxnfc_channel(dev, &max_rxnfc_in_use))
max_rxnfc_in_use = 0;
- if (!netif_is_rxfh_configured(dev) ||
- ethtool_get_max_rxfh_channel(dev, &max_rxfh_in_use))
- max_rxfh_in_use = 0;
+ max_rxfh_in_use = ethtool_get_max_rxfh_channel(dev);
if (channels.combined_count + channels.rx_count <= max_rxfh_in_use) {
- GENL_SET_ERR_MSG(info, "requested channel counts are too low for existing indirection table settings");
+ GENL_SET_ERR_MSG_FMT(info, "requested channel counts are too low for existing indirection table (%d)", max_rxfh_in_use);
return -EINVAL;
}
if (channels.combined_count + channels.rx_count <= max_rxnfc_in_use) {
diff --git a/net/ethtool/cmis.h b/net/ethtool/cmis.h
new file mode 100644
index 000000000000..e71cc3e1b7eb
--- /dev/null
+++ b/net/ethtool/cmis.h
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#define ETHTOOL_CMIS_CDB_LPL_MAX_PL_LENGTH 120
+#define ETHTOOL_CMIS_CDB_CMD_PAGE 0x9F
+#define ETHTOOL_CMIS_CDB_PAGE_I2C_ADDR 0x50
+
+/**
+ * struct ethtool_cmis_cdb - CDB commands parameters
+ * @cmis_rev: CMIS revision major.
+ * @read_write_len_ext: Allowable additional number of byte octets to the LPL
+ * in a READ or a WRITE CDB commands.
+ * @max_completion_time: Maximum CDB command completion time in msec.
+ */
+struct ethtool_cmis_cdb {
+ u8 cmis_rev;
+ u8 read_write_len_ext;
+ u16 max_completion_time;
+};
+
+enum ethtool_cmis_cdb_cmd_id {
+ ETHTOOL_CMIS_CDB_CMD_QUERY_STATUS = 0x0000,
+ ETHTOOL_CMIS_CDB_CMD_MODULE_FEATURES = 0x0040,
+ ETHTOOL_CMIS_CDB_CMD_FW_MANAGMENT_FEATURES = 0x0041,
+ ETHTOOL_CMIS_CDB_CMD_START_FW_DOWNLOAD = 0x0101,
+ ETHTOOL_CMIS_CDB_CMD_WRITE_FW_BLOCK_LPL = 0x0103,
+ ETHTOOL_CMIS_CDB_CMD_COMPLETE_FW_DOWNLOAD = 0x0107,
+ ETHTOOL_CMIS_CDB_CMD_RUN_FW_IMAGE = 0x0109,
+ ETHTOOL_CMIS_CDB_CMD_COMMIT_FW_IMAGE = 0x010A,
+};
+
+/**
+ * struct ethtool_cmis_cdb_request - CDB commands request fields as decribed in
+ * the CMIS standard
+ * @id: Command ID.
+ * @epl_len: EPL memory length.
+ * @lpl_len: LPL memory length.
+ * @chk_code: Check code for the previous field and the payload.
+ * @resv1: Added to match the CMIS standard request continuity.
+ * @resv2: Added to match the CMIS standard request continuity.
+ * @payload: Payload for the CDB commands.
+ */
+struct ethtool_cmis_cdb_request {
+ __be16 id;
+ struct_group(body,
+ __be16 epl_len;
+ u8 lpl_len;
+ u8 chk_code;
+ u8 resv1;
+ u8 resv2;
+ u8 payload[ETHTOOL_CMIS_CDB_LPL_MAX_PL_LENGTH];
+ );
+};
+
+#define CDB_F_COMPLETION_VALID BIT(0)
+#define CDB_F_STATUS_VALID BIT(1)
+#define CDB_F_MODULE_STATE_VALID BIT(2)
+
+/**
+ * struct ethtool_cmis_cdb_cmd_args - CDB commands execution arguments
+ * @req: CDB command fields as described in the CMIS standard.
+ * @max_duration: Maximum duration time for command completion in msec.
+ * @read_write_len_ext: Allowable additional number of byte octets to the LPL
+ * in a READ or a WRITE commands.
+ * @msleep_pre_rpl: Waiting time before checking reply in msec.
+ * @rpl_exp_len: Expected reply length in bytes.
+ * @flags: Validation flags for CDB commands.
+ * @err_msg: Error message to be sent to user space.
+ */
+struct ethtool_cmis_cdb_cmd_args {
+ struct ethtool_cmis_cdb_request req;
+ u16 max_duration;
+ u8 read_write_len_ext;
+ u8 msleep_pre_rpl;
+ u8 rpl_exp_len;
+ u8 flags;
+ char *err_msg;
+};
+
+/**
+ * struct ethtool_cmis_cdb_rpl_hdr - CDB commands reply header arguments
+ * @rpl_len: Reply length.
+ * @rpl_chk_code: Reply check code.
+ */
+struct ethtool_cmis_cdb_rpl_hdr {
+ u8 rpl_len;
+ u8 rpl_chk_code;
+};
+
+/**
+ * struct ethtool_cmis_cdb_rpl - CDB commands reply arguments
+ * @hdr: CDB commands reply header arguments.
+ * @payload: Payload for the CDB commands reply.
+ */
+struct ethtool_cmis_cdb_rpl {
+ struct ethtool_cmis_cdb_rpl_hdr hdr;
+ u8 payload[ETHTOOL_CMIS_CDB_LPL_MAX_PL_LENGTH];
+};
+
+u32 ethtool_cmis_get_max_payload_size(u8 num_of_byte_octs);
+
+void ethtool_cmis_cdb_compose_args(struct ethtool_cmis_cdb_cmd_args *args,
+ enum ethtool_cmis_cdb_cmd_id cmd, u8 *pl,
+ u8 lpl_len, u16 max_duration,
+ u8 read_write_len_ext, u16 msleep_pre_rpl,
+ u8 rpl_exp_len, u8 flags);
+
+void ethtool_cmis_cdb_check_completion_flag(u8 cmis_rev, u8 *flags);
+
+void ethtool_cmis_page_init(struct ethtool_module_eeprom *page_data,
+ u8 page, u32 offset, u32 length);
+void ethtool_cmis_page_fini(struct ethtool_module_eeprom *page_data);
+
+struct ethtool_cmis_cdb *
+ethtool_cmis_cdb_init(struct net_device *dev,
+ const struct ethtool_module_fw_flash_params *params,
+ struct ethnl_module_fw_flash_ntf_params *ntf_params);
+void ethtool_cmis_cdb_fini(struct ethtool_cmis_cdb *cdb);
+
+int ethtool_cmis_wait_for_cond(struct net_device *dev, u8 flags, u8 flag,
+ u16 max_duration, u32 offset,
+ bool (*cond_success)(u8), bool (*cond_fail)(u8), u8 *state);
+
+int ethtool_cmis_cdb_execute_cmd(struct net_device *dev,
+ struct ethtool_cmis_cdb_cmd_args *args);
diff --git a/net/ethtool/cmis_cdb.c b/net/ethtool/cmis_cdb.c
new file mode 100644
index 000000000000..1bb08783b60d
--- /dev/null
+++ b/net/ethtool/cmis_cdb.c
@@ -0,0 +1,602 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/ethtool.h>
+#include <linux/jiffies.h>
+
+#include "common.h"
+#include "module_fw.h"
+#include "cmis.h"
+
+/* For accessing the LPL field on page 9Fh, the allowable length extension is
+ * min(i, 15) byte octets where i specifies the allowable additional number of
+ * byte octets in a READ or a WRITE.
+ */
+u32 ethtool_cmis_get_max_payload_size(u8 num_of_byte_octs)
+{
+ return 8 * (1 + min_t(u8, num_of_byte_octs, 15));
+}
+
+void ethtool_cmis_cdb_compose_args(struct ethtool_cmis_cdb_cmd_args *args,
+ enum ethtool_cmis_cdb_cmd_id cmd, u8 *pl,
+ u8 lpl_len, u16 max_duration,
+ u8 read_write_len_ext, u16 msleep_pre_rpl,
+ u8 rpl_exp_len, u8 flags)
+{
+ args->req.id = cpu_to_be16(cmd);
+ args->req.lpl_len = lpl_len;
+ if (pl)
+ memcpy(args->req.payload, pl, args->req.lpl_len);
+
+ args->max_duration = max_duration;
+ args->read_write_len_ext =
+ ethtool_cmis_get_max_payload_size(read_write_len_ext);
+ args->msleep_pre_rpl = msleep_pre_rpl;
+ args->rpl_exp_len = rpl_exp_len;
+ args->flags = flags;
+ args->err_msg = NULL;
+}
+
+void ethtool_cmis_page_init(struct ethtool_module_eeprom *page_data,
+ u8 page, u32 offset, u32 length)
+{
+ page_data->page = page;
+ page_data->offset = offset;
+ page_data->length = length;
+ page_data->i2c_address = ETHTOOL_CMIS_CDB_PAGE_I2C_ADDR;
+}
+
+#define CMIS_REVISION_PAGE 0x00
+#define CMIS_REVISION_OFFSET 0x01
+
+struct cmis_rev_rpl {
+ u8 rev;
+};
+
+static u8 cmis_rev_rpl_major(struct cmis_rev_rpl *rpl)
+{
+ return rpl->rev >> 4;
+}
+
+static int cmis_rev_major_get(struct net_device *dev, u8 *rev_major)
+{
+ const struct ethtool_ops *ops = dev->ethtool_ops;
+ struct ethtool_module_eeprom page_data = {0};
+ struct netlink_ext_ack extack = {};
+ struct cmis_rev_rpl rpl = {};
+ int err;
+
+ ethtool_cmis_page_init(&page_data, CMIS_REVISION_PAGE,
+ CMIS_REVISION_OFFSET, sizeof(rpl));
+ page_data.data = (u8 *)&rpl;
+
+ err = ops->get_module_eeprom_by_page(dev, &page_data, &extack);
+ if (err < 0) {
+ if (extack._msg)
+ netdev_err(dev, "%s\n", extack._msg);
+ return err;
+ }
+
+ *rev_major = cmis_rev_rpl_major(&rpl);
+
+ return 0;
+}
+
+#define CMIS_CDB_ADVERTISEMENT_PAGE 0x01
+#define CMIS_CDB_ADVERTISEMENT_OFFSET 0xA3
+
+/* Based on section 8.4.11 "CDB Messaging Support Advertisement" in CMIS
+ * standard revision 5.2.
+ */
+struct cmis_cdb_advert_rpl {
+ u8 inst_supported;
+ u8 read_write_len_ext;
+ u8 resv1;
+ u8 resv2;
+};
+
+static u8 cmis_cdb_advert_rpl_inst_supported(struct cmis_cdb_advert_rpl *rpl)
+{
+ return rpl->inst_supported >> 6;
+}
+
+static int cmis_cdb_advertisement_get(struct ethtool_cmis_cdb *cdb,
+ struct net_device *dev)
+{
+ const struct ethtool_ops *ops = dev->ethtool_ops;
+ struct ethtool_module_eeprom page_data = {};
+ struct cmis_cdb_advert_rpl rpl = {};
+ struct netlink_ext_ack extack = {};
+ int err;
+
+ ethtool_cmis_page_init(&page_data, CMIS_CDB_ADVERTISEMENT_PAGE,
+ CMIS_CDB_ADVERTISEMENT_OFFSET, sizeof(rpl));
+ page_data.data = (u8 *)&rpl;
+
+ err = ops->get_module_eeprom_by_page(dev, &page_data, &extack);
+ if (err < 0) {
+ if (extack._msg)
+ netdev_err(dev, "%s\n", extack._msg);
+ return err;
+ }
+
+ if (!cmis_cdb_advert_rpl_inst_supported(&rpl))
+ return -EOPNOTSUPP;
+
+ cdb->read_write_len_ext = rpl.read_write_len_ext;
+
+ return 0;
+}
+
+#define CMIS_PASSWORD_ENTRY_PAGE 0x00
+#define CMIS_PASSWORD_ENTRY_OFFSET 0x7A
+
+struct cmis_password_entry_pl {
+ __be32 password;
+};
+
+/* See section 9.3.1 "CMD 0000h: Query Status" in CMIS standard revision 5.2.
+ * struct cmis_cdb_query_status_pl and struct cmis_cdb_query_status_rpl are
+ * structured layouts of the flat arrays,
+ * struct ethtool_cmis_cdb_request::payload and
+ * struct ethtool_cmis_cdb_rpl::payload respectively.
+ */
+struct cmis_cdb_query_status_pl {
+ u16 response_delay;
+};
+
+struct cmis_cdb_query_status_rpl {
+ u8 length;
+ u8 status;
+};
+
+static int
+cmis_cdb_validate_password(struct ethtool_cmis_cdb *cdb,
+ struct net_device *dev,
+ const struct ethtool_module_fw_flash_params *params,
+ struct ethnl_module_fw_flash_ntf_params *ntf_params)
+{
+ const struct ethtool_ops *ops = dev->ethtool_ops;
+ struct cmis_cdb_query_status_pl qs_pl = {0};
+ struct ethtool_module_eeprom page_data = {};
+ struct ethtool_cmis_cdb_cmd_args args = {};
+ struct cmis_password_entry_pl pe_pl = {};
+ struct cmis_cdb_query_status_rpl *rpl;
+ struct netlink_ext_ack extack = {};
+ int err;
+
+ ethtool_cmis_page_init(&page_data, CMIS_PASSWORD_ENTRY_PAGE,
+ CMIS_PASSWORD_ENTRY_OFFSET, sizeof(pe_pl));
+ page_data.data = (u8 *)&pe_pl;
+
+ pe_pl = *((struct cmis_password_entry_pl *)page_data.data);
+ pe_pl.password = params->password;
+ err = ops->set_module_eeprom_by_page(dev, &page_data, &extack);
+ if (err < 0) {
+ if (extack._msg)
+ netdev_err(dev, "%s\n", extack._msg);
+ return err;
+ }
+
+ ethtool_cmis_cdb_compose_args(&args, ETHTOOL_CMIS_CDB_CMD_QUERY_STATUS,
+ (u8 *)&qs_pl, sizeof(qs_pl), 0,
+ cdb->read_write_len_ext, 1000,
+ sizeof(*rpl),
+ CDB_F_COMPLETION_VALID | CDB_F_STATUS_VALID);
+
+ err = ethtool_cmis_cdb_execute_cmd(dev, &args);
+ if (err < 0) {
+ ethnl_module_fw_flash_ntf_err(dev, ntf_params,
+ "Query Status command failed",
+ args.err_msg);
+ return err;
+ }
+
+ rpl = (struct cmis_cdb_query_status_rpl *)args.req.payload;
+ if (!rpl->length || !rpl->status) {
+ ethnl_module_fw_flash_ntf_err(dev, ntf_params,
+ "Password was not accepted",
+ NULL);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Some CDB commands asserts the CDB completion flag only from CMIS
+ * revision 5. Therefore, check the relevant validity flag only when
+ * the revision supports it.
+ */
+void ethtool_cmis_cdb_check_completion_flag(u8 cmis_rev, u8 *flags)
+{
+ *flags |= cmis_rev >= 5 ? CDB_F_COMPLETION_VALID : 0;
+}
+
+#define CMIS_CDB_MODULE_FEATURES_RESV_DATA 34
+
+/* See section 9.4.1 "CMD 0040h: Module Features" in CMIS standard revision 5.2.
+ * struct cmis_cdb_module_features_rpl is structured layout of the flat
+ * array, ethtool_cmis_cdb_rpl::payload.
+ */
+struct cmis_cdb_module_features_rpl {
+ u8 resv1[CMIS_CDB_MODULE_FEATURES_RESV_DATA];
+ __be16 max_completion_time;
+};
+
+static u16
+cmis_cdb_module_features_completion_time(struct cmis_cdb_module_features_rpl *rpl)
+{
+ return be16_to_cpu(rpl->max_completion_time);
+}
+
+static int cmis_cdb_module_features_get(struct ethtool_cmis_cdb *cdb,
+ struct net_device *dev,
+ struct ethnl_module_fw_flash_ntf_params *ntf_params)
+{
+ struct ethtool_cmis_cdb_cmd_args args = {};
+ struct cmis_cdb_module_features_rpl *rpl;
+ u8 flags = CDB_F_STATUS_VALID;
+ int err;
+
+ ethtool_cmis_cdb_check_completion_flag(cdb->cmis_rev, &flags);
+ ethtool_cmis_cdb_compose_args(&args,
+ ETHTOOL_CMIS_CDB_CMD_MODULE_FEATURES,
+ NULL, 0, 0, cdb->read_write_len_ext,
+ 1000, sizeof(*rpl), flags);
+
+ err = ethtool_cmis_cdb_execute_cmd(dev, &args);
+ if (err < 0) {
+ ethnl_module_fw_flash_ntf_err(dev, ntf_params,
+ "Module Features command failed",
+ args.err_msg);
+ return err;
+ }
+
+ rpl = (struct cmis_cdb_module_features_rpl *)args.req.payload;
+ cdb->max_completion_time =
+ cmis_cdb_module_features_completion_time(rpl);
+
+ return 0;
+}
+
+struct ethtool_cmis_cdb *
+ethtool_cmis_cdb_init(struct net_device *dev,
+ const struct ethtool_module_fw_flash_params *params,
+ struct ethnl_module_fw_flash_ntf_params *ntf_params)
+{
+ struct ethtool_cmis_cdb *cdb;
+ int err;
+
+ cdb = kzalloc(sizeof(*cdb), GFP_KERNEL);
+ if (!cdb)
+ return ERR_PTR(-ENOMEM);
+
+ err = cmis_rev_major_get(dev, &cdb->cmis_rev);
+ if (err < 0)
+ goto err;
+
+ if (cdb->cmis_rev < 4) {
+ ethnl_module_fw_flash_ntf_err(dev, ntf_params,
+ "CMIS revision doesn't support module firmware flashing",
+ NULL);
+ err = -EOPNOTSUPP;
+ goto err;
+ }
+
+ err = cmis_cdb_advertisement_get(cdb, dev);
+ if (err < 0)
+ goto err;
+
+ if (params->password_valid) {
+ err = cmis_cdb_validate_password(cdb, dev, params, ntf_params);
+ if (err < 0)
+ goto err;
+ }
+
+ err = cmis_cdb_module_features_get(cdb, dev, ntf_params);
+ if (err < 0)
+ goto err;
+
+ return cdb;
+
+err:
+ ethtool_cmis_cdb_fini(cdb);
+ return ERR_PTR(err);
+}
+
+void ethtool_cmis_cdb_fini(struct ethtool_cmis_cdb *cdb)
+{
+ kfree(cdb);
+}
+
+static bool is_completed(u8 data)
+{
+ return !!(data & 0x40);
+}
+
+#define CMIS_CDB_STATUS_SUCCESS 0x01
+
+static bool status_success(u8 data)
+{
+ return data == CMIS_CDB_STATUS_SUCCESS;
+}
+
+#define CMIS_CDB_STATUS_FAIL 0x40
+
+static bool status_fail(u8 data)
+{
+ return data & CMIS_CDB_STATUS_FAIL;
+}
+
+struct cmis_wait_for_cond_rpl {
+ u8 state;
+};
+
+static int
+ethtool_cmis_module_poll(struct net_device *dev,
+ struct cmis_wait_for_cond_rpl *rpl, u32 offset,
+ bool (*cond_success)(u8), bool (*cond_fail)(u8))
+{
+ const struct ethtool_ops *ops = dev->ethtool_ops;
+ struct ethtool_module_eeprom page_data = {0};
+ struct netlink_ext_ack extack = {};
+ int err;
+
+ ethtool_cmis_page_init(&page_data, 0, offset, sizeof(rpl));
+ page_data.data = (u8 *)rpl;
+
+ err = ops->get_module_eeprom_by_page(dev, &page_data, &extack);
+ if (err < 0) {
+ if (extack._msg)
+ netdev_err_once(dev, "%s\n", extack._msg);
+ return -EBUSY;
+ }
+
+ if ((*cond_success)(rpl->state))
+ return 0;
+
+ if (*cond_fail && (*cond_fail)(rpl->state))
+ return -EIO;
+
+ return -EBUSY;
+}
+
+int ethtool_cmis_wait_for_cond(struct net_device *dev, u8 flags, u8 flag,
+ u16 max_duration, u32 offset,
+ bool (*cond_success)(u8), bool (*cond_fail)(u8),
+ u8 *state)
+{
+ struct cmis_wait_for_cond_rpl rpl = {};
+ unsigned long end;
+ int err;
+
+ if (!(flags & flag))
+ return 0;
+
+ if (max_duration == 0)
+ max_duration = U16_MAX;
+
+ end = jiffies + msecs_to_jiffies(max_duration);
+ do {
+ err = ethtool_cmis_module_poll(dev, &rpl, offset, cond_success,
+ cond_fail);
+ if (err != -EBUSY)
+ goto out;
+
+ msleep(20);
+ } while (time_before(jiffies, end));
+
+ err = ethtool_cmis_module_poll(dev, &rpl, offset, cond_success,
+ cond_fail);
+ if (err == -EBUSY)
+ err = -ETIMEDOUT;
+
+out:
+ *state = rpl.state;
+ return err;
+}
+
+#define CMIS_CDB_COMPLETION_FLAG_OFFSET 0x08
+
+static int cmis_cdb_wait_for_completion(struct net_device *dev,
+ struct ethtool_cmis_cdb_cmd_args *args)
+{
+ u8 flag;
+ int err;
+
+ /* Some vendors demand waiting time before checking completion flag
+ * in some CDB commands.
+ */
+ msleep(args->msleep_pre_rpl);
+
+ err = ethtool_cmis_wait_for_cond(dev, args->flags,
+ CDB_F_COMPLETION_VALID,
+ args->max_duration,
+ CMIS_CDB_COMPLETION_FLAG_OFFSET,
+ is_completed, NULL, &flag);
+ if (err < 0)
+ args->err_msg = "Completion Flag did not set on time";
+
+ return err;
+}
+
+#define CMIS_CDB_STATUS_OFFSET 0x25
+
+static void cmis_cdb_status_fail_msg_get(u8 status, char **err_msg)
+{
+ switch (status) {
+ case 0b10000001:
+ *err_msg = "CDB Status is in progress: Busy capturing command";
+ break;
+ case 0b10000010:
+ *err_msg =
+ "CDB Status is in progress: Busy checking/validating command";
+ break;
+ case 0b10000011:
+ *err_msg = "CDB Status is in progress: Busy executing";
+ break;
+ case 0b01000000:
+ *err_msg = "CDB status failed: no specific failure";
+ break;
+ case 0b01000010:
+ *err_msg =
+ "CDB status failed: Parameter range error or parameter not supported";
+ break;
+ case 0b01000101:
+ *err_msg = "CDB status failed: CdbChkCode error";
+ break;
+ default:
+ *err_msg = "Unknown failure reason";
+ }
+};
+
+static int cmis_cdb_wait_for_status(struct net_device *dev,
+ struct ethtool_cmis_cdb_cmd_args *args)
+{
+ u8 status;
+ int err;
+
+ /* Some vendors demand waiting time before checking status in some
+ * CDB commands.
+ */
+ msleep(args->msleep_pre_rpl);
+
+ err = ethtool_cmis_wait_for_cond(dev, args->flags, CDB_F_STATUS_VALID,
+ args->max_duration,
+ CMIS_CDB_STATUS_OFFSET,
+ status_success, status_fail, &status);
+ if (err < 0 && !args->err_msg)
+ cmis_cdb_status_fail_msg_get(status, &args->err_msg);
+
+ return err;
+}
+
+#define CMIS_CDB_REPLY_OFFSET 0x86
+
+static int cmis_cdb_process_reply(struct net_device *dev,
+ struct ethtool_module_eeprom *page_data,
+ struct ethtool_cmis_cdb_cmd_args *args)
+{
+ u8 rpl_hdr_len = sizeof(struct ethtool_cmis_cdb_rpl_hdr);
+ u8 rpl_exp_len = args->rpl_exp_len + rpl_hdr_len;
+ const struct ethtool_ops *ops = dev->ethtool_ops;
+ struct netlink_ext_ack extack = {};
+ struct ethtool_cmis_cdb_rpl *rpl;
+ int err;
+
+ if (!args->rpl_exp_len)
+ return 0;
+
+ ethtool_cmis_page_init(page_data, ETHTOOL_CMIS_CDB_CMD_PAGE,
+ CMIS_CDB_REPLY_OFFSET, rpl_exp_len);
+ page_data->data = kmalloc(page_data->length, GFP_KERNEL);
+ if (!page_data->data)
+ return -ENOMEM;
+
+ err = ops->get_module_eeprom_by_page(dev, page_data, &extack);
+ if (err < 0) {
+ if (extack._msg)
+ netdev_err(dev, "%s\n", extack._msg);
+ goto out;
+ }
+
+ rpl = (struct ethtool_cmis_cdb_rpl *)page_data->data;
+ if ((args->rpl_exp_len > rpl->hdr.rpl_len + rpl_hdr_len) ||
+ !rpl->hdr.rpl_chk_code) {
+ err = -EIO;
+ goto out;
+ }
+
+ args->req.lpl_len = rpl->hdr.rpl_len;
+ memcpy(args->req.payload, rpl->payload, args->req.lpl_len);
+
+out:
+ kfree(page_data->data);
+ return err;
+}
+
+static int
+__ethtool_cmis_cdb_execute_cmd(struct net_device *dev,
+ struct ethtool_module_eeprom *page_data,
+ u8 page, u32 offset, u32 length, void *data)
+{
+ const struct ethtool_ops *ops = dev->ethtool_ops;
+ struct netlink_ext_ack extack = {};
+ int err;
+
+ ethtool_cmis_page_init(page_data, page, offset, length);
+ page_data->data = kmemdup(data, page_data->length, GFP_KERNEL);
+ if (!page_data->data)
+ return -ENOMEM;
+
+ err = ops->set_module_eeprom_by_page(dev, page_data, &extack);
+ if (err < 0) {
+ if (extack._msg)
+ netdev_err(dev, "%s\n", extack._msg);
+ }
+
+ kfree(page_data->data);
+ return err;
+}
+
+static u8 cmis_cdb_calc_checksum(const void *data, size_t size)
+{
+ const u8 *bytes = (const u8 *)data;
+ u8 checksum = 0;
+
+ for (size_t i = 0; i < size; i++)
+ checksum += bytes[i];
+
+ return ~checksum;
+}
+
+#define CMIS_CDB_CMD_ID_OFFSET 0x80
+
+int ethtool_cmis_cdb_execute_cmd(struct net_device *dev,
+ struct ethtool_cmis_cdb_cmd_args *args)
+{
+ struct ethtool_module_eeprom page_data = {};
+ u32 offset;
+ int err;
+
+ args->req.chk_code =
+ cmis_cdb_calc_checksum(&args->req, sizeof(args->req));
+
+ if (args->req.lpl_len > args->read_write_len_ext) {
+ args->err_msg = "LPL length is longer than CDB read write length extension allows";
+ return -EINVAL;
+ }
+
+ /* According to the CMIS standard, there are two options to trigger the
+ * CDB commands. The default option is triggering the command by writing
+ * the CMDID bytes. Therefore, the command will be split to 2 calls:
+ * First, with everything except the CMDID field and then the CMDID
+ * field.
+ */
+ offset = CMIS_CDB_CMD_ID_OFFSET +
+ offsetof(struct ethtool_cmis_cdb_request, body);
+ err = __ethtool_cmis_cdb_execute_cmd(dev, &page_data,
+ ETHTOOL_CMIS_CDB_CMD_PAGE, offset,
+ sizeof(args->req.body),
+ &args->req.body);
+ if (err < 0)
+ return err;
+
+ offset = CMIS_CDB_CMD_ID_OFFSET +
+ offsetof(struct ethtool_cmis_cdb_request, id);
+ err = __ethtool_cmis_cdb_execute_cmd(dev, &page_data,
+ ETHTOOL_CMIS_CDB_CMD_PAGE, offset,
+ sizeof(args->req.id),
+ &args->req.id);
+ if (err < 0)
+ return err;
+
+ err = cmis_cdb_wait_for_completion(dev, args);
+ if (err < 0)
+ return err;
+
+ err = cmis_cdb_wait_for_status(dev, args);
+ if (err < 0)
+ return err;
+
+ return cmis_cdb_process_reply(dev, &page_data, args);
+}
diff --git a/net/ethtool/cmis_fw_update.c b/net/ethtool/cmis_fw_update.c
new file mode 100644
index 000000000000..ae4b4b28a601
--- /dev/null
+++ b/net/ethtool/cmis_fw_update.c
@@ -0,0 +1,399 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/ethtool.h>
+#include <linux/firmware.h>
+
+#include "common.h"
+#include "module_fw.h"
+#include "cmis.h"
+
+struct cmis_fw_update_fw_mng_features {
+ u8 start_cmd_payload_size;
+ u16 max_duration_start;
+ u16 max_duration_write;
+ u16 max_duration_complete;
+};
+
+/* See section 9.4.2 "CMD 0041h: Firmware Management Features" in CMIS standard
+ * revision 5.2.
+ * struct cmis_cdb_fw_mng_features_rpl is a structured layout of the flat
+ * array, ethtool_cmis_cdb_rpl::payload.
+ */
+struct cmis_cdb_fw_mng_features_rpl {
+ u8 resv1;
+ u8 resv2;
+ u8 start_cmd_payload_size;
+ u8 resv3;
+ u8 read_write_len_ext;
+ u8 write_mechanism;
+ u8 resv4;
+ u8 resv5;
+ __be16 max_duration_start;
+ __be16 resv6;
+ __be16 max_duration_write;
+ __be16 max_duration_complete;
+ __be16 resv7;
+};
+
+#define CMIS_CDB_FW_WRITE_MECHANISM_LPL 0x01
+
+static int
+cmis_fw_update_fw_mng_features_get(struct ethtool_cmis_cdb *cdb,
+ struct net_device *dev,
+ struct cmis_fw_update_fw_mng_features *fw_mng,
+ struct ethnl_module_fw_flash_ntf_params *ntf_params)
+{
+ struct ethtool_cmis_cdb_cmd_args args = {};
+ struct cmis_cdb_fw_mng_features_rpl *rpl;
+ u8 flags = CDB_F_STATUS_VALID;
+ int err;
+
+ ethtool_cmis_cdb_check_completion_flag(cdb->cmis_rev, &flags);
+ ethtool_cmis_cdb_compose_args(&args,
+ ETHTOOL_CMIS_CDB_CMD_FW_MANAGMENT_FEATURES,
+ NULL, 0, cdb->max_completion_time,
+ cdb->read_write_len_ext, 1000,
+ sizeof(*rpl), flags);
+
+ err = ethtool_cmis_cdb_execute_cmd(dev, &args);
+ if (err < 0) {
+ ethnl_module_fw_flash_ntf_err(dev, ntf_params,
+ "FW Management Features command failed",
+ args.err_msg);
+ return err;
+ }
+
+ rpl = (struct cmis_cdb_fw_mng_features_rpl *)args.req.payload;
+ if (!(rpl->write_mechanism == CMIS_CDB_FW_WRITE_MECHANISM_LPL)) {
+ ethnl_module_fw_flash_ntf_err(dev, ntf_params,
+ "Write LPL is not supported",
+ NULL);
+ return -EOPNOTSUPP;
+ }
+
+ /* Above, we used read_write_len_ext that we got from CDB
+ * advertisement. Update it with the value that we got from module
+ * features query, which is specific for Firmware Management Commands
+ * (IDs 0100h-01FFh).
+ */
+ cdb->read_write_len_ext = rpl->read_write_len_ext;
+ fw_mng->start_cmd_payload_size = rpl->start_cmd_payload_size;
+ fw_mng->max_duration_start = be16_to_cpu(rpl->max_duration_start);
+ fw_mng->max_duration_write = be16_to_cpu(rpl->max_duration_write);
+ fw_mng->max_duration_complete = be16_to_cpu(rpl->max_duration_complete);
+
+ return 0;
+}
+
+/* See section 9.7.2 "CMD 0101h: Start Firmware Download" in CMIS standard
+ * revision 5.2.
+ * struct cmis_cdb_start_fw_download_pl is a structured layout of the
+ * flat array, ethtool_cmis_cdb_request::payload.
+ */
+struct cmis_cdb_start_fw_download_pl {
+ __struct_group(cmis_cdb_start_fw_download_pl_h, head, /* no attrs */,
+ __be32 image_size;
+ __be32 resv1;
+ );
+ u8 vendor_data[ETHTOOL_CMIS_CDB_LPL_MAX_PL_LENGTH -
+ sizeof(struct cmis_cdb_start_fw_download_pl_h)];
+};
+
+static int
+cmis_fw_update_start_download(struct ethtool_cmis_cdb *cdb,
+ struct ethtool_cmis_fw_update_params *fw_update,
+ struct cmis_fw_update_fw_mng_features *fw_mng)
+{
+ u8 vendor_data_size = fw_mng->start_cmd_payload_size;
+ struct cmis_cdb_start_fw_download_pl pl = {};
+ struct ethtool_cmis_cdb_cmd_args args = {};
+ u8 lpl_len;
+ int err;
+
+ pl.image_size = cpu_to_be32(fw_update->fw->size);
+ memcpy(pl.vendor_data, fw_update->fw->data, vendor_data_size);
+
+ lpl_len = offsetof(struct cmis_cdb_start_fw_download_pl,
+ vendor_data[vendor_data_size]);
+
+ ethtool_cmis_cdb_compose_args(&args,
+ ETHTOOL_CMIS_CDB_CMD_START_FW_DOWNLOAD,
+ (u8 *)&pl, lpl_len,
+ fw_mng->max_duration_start,
+ cdb->read_write_len_ext, 1000, 0,
+ CDB_F_COMPLETION_VALID | CDB_F_STATUS_VALID);
+
+ err = ethtool_cmis_cdb_execute_cmd(fw_update->dev, &args);
+ if (err < 0)
+ ethnl_module_fw_flash_ntf_err(fw_update->dev,
+ &fw_update->ntf_params,
+ "Start FW download command failed",
+ args.err_msg);
+
+ return err;
+}
+
+/* See section 9.7.4 "CMD 0103h: Write Firmware Block LPL" in CMIS standard
+ * revision 5.2.
+ * struct cmis_cdb_write_fw_block_lpl_pl is a structured layout of the
+ * flat array, ethtool_cmis_cdb_request::payload.
+ */
+struct cmis_cdb_write_fw_block_lpl_pl {
+ __be32 block_address;
+ u8 fw_block[ETHTOOL_CMIS_CDB_LPL_MAX_PL_LENGTH - sizeof(__be32)];
+};
+
+static int
+cmis_fw_update_write_image(struct ethtool_cmis_cdb *cdb,
+ struct ethtool_cmis_fw_update_params *fw_update,
+ struct cmis_fw_update_fw_mng_features *fw_mng)
+{
+ u8 start = fw_mng->start_cmd_payload_size;
+ u32 offset, max_block_size, max_lpl_len;
+ u32 image_size = fw_update->fw->size;
+ int err;
+
+ max_lpl_len = min_t(u32,
+ ethtool_cmis_get_max_payload_size(cdb->read_write_len_ext),
+ ETHTOOL_CMIS_CDB_LPL_MAX_PL_LENGTH);
+ max_block_size =
+ max_lpl_len - sizeof_field(struct cmis_cdb_write_fw_block_lpl_pl,
+ block_address);
+
+ for (offset = start; offset < image_size; offset += max_block_size) {
+ struct cmis_cdb_write_fw_block_lpl_pl pl = {
+ .block_address = cpu_to_be32(offset - start),
+ };
+ struct ethtool_cmis_cdb_cmd_args args = {};
+ u32 block_size, lpl_len;
+
+ ethnl_module_fw_flash_ntf_in_progress(fw_update->dev,
+ &fw_update->ntf_params,
+ offset - start,
+ image_size);
+ block_size = min_t(u32, max_block_size, image_size - offset);
+ memcpy(pl.fw_block, &fw_update->fw->data[offset], block_size);
+ lpl_len = block_size +
+ sizeof_field(struct cmis_cdb_write_fw_block_lpl_pl,
+ block_address);
+
+ ethtool_cmis_cdb_compose_args(&args,
+ ETHTOOL_CMIS_CDB_CMD_WRITE_FW_BLOCK_LPL,
+ (u8 *)&pl, lpl_len,
+ fw_mng->max_duration_write,
+ cdb->read_write_len_ext, 1, 0,
+ CDB_F_COMPLETION_VALID | CDB_F_STATUS_VALID);
+
+ err = ethtool_cmis_cdb_execute_cmd(fw_update->dev, &args);
+ if (err < 0) {
+ ethnl_module_fw_flash_ntf_err(fw_update->dev,
+ &fw_update->ntf_params,
+ "Write FW block LPL command failed",
+ args.err_msg);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int
+cmis_fw_update_complete_download(struct ethtool_cmis_cdb *cdb,
+ struct net_device *dev,
+ struct cmis_fw_update_fw_mng_features *fw_mng,
+ struct ethnl_module_fw_flash_ntf_params *ntf_params)
+{
+ struct ethtool_cmis_cdb_cmd_args args = {};
+ int err;
+
+ ethtool_cmis_cdb_compose_args(&args,
+ ETHTOOL_CMIS_CDB_CMD_COMPLETE_FW_DOWNLOAD,
+ NULL, 0, fw_mng->max_duration_complete,
+ cdb->read_write_len_ext, 1000, 0,
+ CDB_F_COMPLETION_VALID | CDB_F_STATUS_VALID);
+
+ err = ethtool_cmis_cdb_execute_cmd(dev, &args);
+ if (err < 0)
+ ethnl_module_fw_flash_ntf_err(dev, ntf_params,
+ "Complete FW download command failed",
+ args.err_msg);
+
+ return err;
+}
+
+static int
+cmis_fw_update_download_image(struct ethtool_cmis_cdb *cdb,
+ struct ethtool_cmis_fw_update_params *fw_update,
+ struct cmis_fw_update_fw_mng_features *fw_mng)
+{
+ int err;
+
+ err = cmis_fw_update_start_download(cdb, fw_update, fw_mng);
+ if (err < 0)
+ return err;
+
+ err = cmis_fw_update_write_image(cdb, fw_update, fw_mng);
+ if (err < 0)
+ return err;
+
+ err = cmis_fw_update_complete_download(cdb, fw_update->dev, fw_mng,
+ &fw_update->ntf_params);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+enum {
+ CMIS_MODULE_LOW_PWR = 1,
+ CMIS_MODULE_READY = 3,
+};
+
+static bool module_is_ready(u8 data)
+{
+ u8 state = (data >> 1) & 7;
+
+ return state == CMIS_MODULE_READY || state == CMIS_MODULE_LOW_PWR;
+}
+
+#define CMIS_MODULE_READY_MAX_DURATION_MSEC 1000
+#define CMIS_MODULE_STATE_OFFSET 3
+
+static int
+cmis_fw_update_wait_for_module_state(struct net_device *dev, u8 flags)
+{
+ u8 state;
+
+ return ethtool_cmis_wait_for_cond(dev, flags, CDB_F_MODULE_STATE_VALID,
+ CMIS_MODULE_READY_MAX_DURATION_MSEC,
+ CMIS_MODULE_STATE_OFFSET,
+ module_is_ready, NULL, &state);
+}
+
+/* See section 9.7.10 "CMD 0109h: Run Firmware Image" in CMIS standard
+ * revision 5.2.
+ * struct cmis_cdb_run_fw_image_pl is a structured layout of the flat
+ * array, ethtool_cmis_cdb_request::payload.
+ */
+struct cmis_cdb_run_fw_image_pl {
+ u8 resv1;
+ u8 image_to_run;
+ u16 delay_to_reset;
+};
+
+static int
+cmis_fw_update_run_image(struct ethtool_cmis_cdb *cdb, struct net_device *dev,
+ struct ethnl_module_fw_flash_ntf_params *ntf_params)
+{
+ struct ethtool_cmis_cdb_cmd_args args = {};
+ struct cmis_cdb_run_fw_image_pl pl = {0};
+ int err;
+
+ ethtool_cmis_cdb_compose_args(&args, ETHTOOL_CMIS_CDB_CMD_RUN_FW_IMAGE,
+ (u8 *)&pl, sizeof(pl),
+ cdb->max_completion_time,
+ cdb->read_write_len_ext, 1000, 0,
+ CDB_F_MODULE_STATE_VALID);
+
+ err = ethtool_cmis_cdb_execute_cmd(dev, &args);
+ if (err < 0) {
+ ethnl_module_fw_flash_ntf_err(dev, ntf_params,
+ "Run image command failed",
+ args.err_msg);
+ return err;
+ }
+
+ err = cmis_fw_update_wait_for_module_state(dev, args.flags);
+ if (err < 0)
+ ethnl_module_fw_flash_ntf_err(dev, ntf_params,
+ "Module is not ready on time after reset",
+ NULL);
+
+ return err;
+}
+
+static int
+cmis_fw_update_commit_image(struct ethtool_cmis_cdb *cdb,
+ struct net_device *dev,
+ struct ethnl_module_fw_flash_ntf_params *ntf_params)
+{
+ struct ethtool_cmis_cdb_cmd_args args = {};
+ int err;
+
+ ethtool_cmis_cdb_compose_args(&args,
+ ETHTOOL_CMIS_CDB_CMD_COMMIT_FW_IMAGE,
+ NULL, 0, cdb->max_completion_time,
+ cdb->read_write_len_ext, 1000, 0,
+ CDB_F_COMPLETION_VALID | CDB_F_STATUS_VALID);
+
+ err = ethtool_cmis_cdb_execute_cmd(dev, &args);
+ if (err < 0)
+ ethnl_module_fw_flash_ntf_err(dev, ntf_params,
+ "Commit image command failed",
+ args.err_msg);
+
+ return err;
+}
+
+static int cmis_fw_update_reset(struct net_device *dev)
+{
+ __u32 reset_data = ETH_RESET_PHY;
+
+ return dev->ethtool_ops->reset(dev, &reset_data);
+}
+
+void
+ethtool_cmis_fw_update(struct ethtool_cmis_fw_update_params *fw_update)
+{
+ struct ethnl_module_fw_flash_ntf_params *ntf_params =
+ &fw_update->ntf_params;
+ struct cmis_fw_update_fw_mng_features fw_mng = {0};
+ struct net_device *dev = fw_update->dev;
+ struct ethtool_cmis_cdb *cdb;
+ int err;
+
+ cdb = ethtool_cmis_cdb_init(dev, &fw_update->params, ntf_params);
+ if (IS_ERR(cdb))
+ goto err_send_ntf;
+
+ ethnl_module_fw_flash_ntf_start(dev, ntf_params);
+
+ err = cmis_fw_update_fw_mng_features_get(cdb, dev, &fw_mng, ntf_params);
+ if (err < 0)
+ goto err_cdb_fini;
+
+ err = cmis_fw_update_download_image(cdb, fw_update, &fw_mng);
+ if (err < 0)
+ goto err_cdb_fini;
+
+ err = cmis_fw_update_run_image(cdb, dev, ntf_params);
+ if (err < 0)
+ goto err_cdb_fini;
+
+ /* The CDB command "Run Firmware Image" resets the firmware, so the new
+ * one might have different settings.
+ * Free the old CDB instance, and init a new one.
+ */
+ ethtool_cmis_cdb_fini(cdb);
+
+ cdb = ethtool_cmis_cdb_init(dev, &fw_update->params, ntf_params);
+ if (IS_ERR(cdb))
+ goto err_send_ntf;
+
+ err = cmis_fw_update_commit_image(cdb, dev, ntf_params);
+ if (err < 0)
+ goto err_cdb_fini;
+
+ err = cmis_fw_update_reset(dev);
+ if (err < 0)
+ goto err_cdb_fini;
+
+ ethnl_module_fw_flash_ntf_complete(dev, ntf_params);
+ ethtool_cmis_cdb_fini(cdb);
+ return;
+
+err_cdb_fini:
+ ethtool_cmis_cdb_fini(cdb);
+err_send_ntf:
+ ethnl_module_fw_flash_ntf_err(dev, ntf_params, NULL, NULL);
+}
diff --git a/net/ethtool/coalesce.c b/net/ethtool/coalesce.c
index 83112c1a71ae..3e18ca1ccc5e 100644
--- a/net/ethtool/coalesce.c
+++ b/net/ethtool/coalesce.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/dim.h>
#include "netlink.h"
#include "common.h"
@@ -82,6 +83,14 @@ static int coalesce_prepare_data(const struct ethnl_req_info *req_base,
static int coalesce_reply_size(const struct ethnl_req_info *req_base,
const struct ethnl_reply_data *reply_base)
{
+ int modersz = nla_total_size(0) + /* _PROFILE_IRQ_MODERATION, nest */
+ nla_total_size(sizeof(u32)) + /* _IRQ_MODERATION_USEC */
+ nla_total_size(sizeof(u32)) + /* _IRQ_MODERATION_PKTS */
+ nla_total_size(sizeof(u32)); /* _IRQ_MODERATION_COMPS */
+
+ int total_modersz = nla_total_size(0) + /* _{R,T}X_PROFILE, nest */
+ modersz * NET_DIM_PARAMS_NUM_PROFILES;
+
return nla_total_size(sizeof(u32)) + /* _RX_USECS */
nla_total_size(sizeof(u32)) + /* _RX_MAX_FRAMES */
nla_total_size(sizeof(u32)) + /* _RX_USECS_IRQ */
@@ -108,7 +117,8 @@ static int coalesce_reply_size(const struct ethnl_req_info *req_base,
nla_total_size(sizeof(u8)) + /* _USE_CQE_MODE_RX */
nla_total_size(sizeof(u32)) + /* _TX_AGGR_MAX_BYTES */
nla_total_size(sizeof(u32)) + /* _TX_AGGR_MAX_FRAMES */
- nla_total_size(sizeof(u32)); /* _TX_AGGR_TIME_USECS */
+ nla_total_size(sizeof(u32)) + /* _TX_AGGR_TIME_USECS */
+ total_modersz * 2; /* _{R,T}X_PROFILE */
}
static bool coalesce_put_u32(struct sk_buff *skb, u16 attr_type, u32 val,
@@ -127,6 +137,74 @@ static bool coalesce_put_bool(struct sk_buff *skb, u16 attr_type, u32 val,
return nla_put_u8(skb, attr_type, !!val);
}
+/**
+ * coalesce_put_profile - fill reply with a nla nest with four child nla nests.
+ * @skb: socket buffer the message is stored in
+ * @attr_type: nest attr type ETHTOOL_A_COALESCE_*X_PROFILE
+ * @profile: data passed to userspace
+ * @coal_flags: modifiable parameters supported by the driver
+ *
+ * Put a dim profile nest attribute. Refer to ETHTOOL_A_PROFILE_IRQ_MODERATION.
+ *
+ * Return: 0 on success or a negative error code.
+ */
+static int coalesce_put_profile(struct sk_buff *skb, u16 attr_type,
+ const struct dim_cq_moder *profile,
+ u8 coal_flags)
+{
+ struct nlattr *profile_attr, *moder_attr;
+ int i, ret;
+
+ if (!profile || !coal_flags)
+ return 0;
+
+ profile_attr = nla_nest_start(skb, attr_type);
+ if (!profile_attr)
+ return -EMSGSIZE;
+
+ for (i = 0; i < NET_DIM_PARAMS_NUM_PROFILES; i++) {
+ moder_attr = nla_nest_start(skb,
+ ETHTOOL_A_PROFILE_IRQ_MODERATION);
+ if (!moder_attr) {
+ ret = -EMSGSIZE;
+ goto cancel_profile;
+ }
+
+ if (coal_flags & DIM_COALESCE_USEC) {
+ ret = nla_put_u32(skb, ETHTOOL_A_IRQ_MODERATION_USEC,
+ profile[i].usec);
+ if (ret)
+ goto cancel_moder;
+ }
+
+ if (coal_flags & DIM_COALESCE_PKTS) {
+ ret = nla_put_u32(skb, ETHTOOL_A_IRQ_MODERATION_PKTS,
+ profile[i].pkts);
+ if (ret)
+ goto cancel_moder;
+ }
+
+ if (coal_flags & DIM_COALESCE_COMPS) {
+ ret = nla_put_u32(skb, ETHTOOL_A_IRQ_MODERATION_COMPS,
+ profile[i].comps);
+ if (ret)
+ goto cancel_moder;
+ }
+
+ nla_nest_end(skb, moder_attr);
+ }
+
+ nla_nest_end(skb, profile_attr);
+
+ return 0;
+
+cancel_moder:
+ nla_nest_cancel(skb, moder_attr);
+cancel_profile:
+ nla_nest_cancel(skb, profile_attr);
+ return ret;
+}
+
static int coalesce_fill_reply(struct sk_buff *skb,
const struct ethnl_req_info *req_base,
const struct ethnl_reply_data *reply_base)
@@ -135,6 +213,8 @@ static int coalesce_fill_reply(struct sk_buff *skb,
const struct kernel_ethtool_coalesce *kcoal = &data->kernel_coalesce;
const struct ethtool_coalesce *coal = &data->coalesce;
u32 supported = data->supported_params;
+ struct dim_irq_moder *moder;
+ int ret = 0;
if (coalesce_put_u32(skb, ETHTOOL_A_COALESCE_RX_USECS,
coal->rx_coalesce_usecs, supported) ||
@@ -192,11 +272,42 @@ static int coalesce_fill_reply(struct sk_buff *skb,
kcoal->tx_aggr_time_usecs, supported))
return -EMSGSIZE;
- return 0;
+ if (!req_base->dev || !req_base->dev->irq_moder)
+ return 0;
+
+ moder = req_base->dev->irq_moder;
+ rcu_read_lock();
+ if (moder->profile_flags & DIM_PROFILE_RX) {
+ ret = coalesce_put_profile(skb, ETHTOOL_A_COALESCE_RX_PROFILE,
+ rcu_dereference(moder->rx_profile),
+ moder->coal_flags);
+ if (ret)
+ goto out;
+ }
+
+ if (moder->profile_flags & DIM_PROFILE_TX)
+ ret = coalesce_put_profile(skb, ETHTOOL_A_COALESCE_TX_PROFILE,
+ rcu_dereference(moder->tx_profile),
+ moder->coal_flags);
+
+out:
+ rcu_read_unlock();
+ return ret;
}
/* COALESCE_SET */
+static const struct nla_policy coalesce_irq_moderation_policy[] = {
+ [ETHTOOL_A_IRQ_MODERATION_USEC] = { .type = NLA_U32 },
+ [ETHTOOL_A_IRQ_MODERATION_PKTS] = { .type = NLA_U32 },
+ [ETHTOOL_A_IRQ_MODERATION_COMPS] = { .type = NLA_U32 },
+};
+
+static const struct nla_policy coalesce_profile_policy[] = {
+ [ETHTOOL_A_PROFILE_IRQ_MODERATION] =
+ NLA_POLICY_NESTED(coalesce_irq_moderation_policy),
+};
+
const struct nla_policy ethnl_coalesce_set_policy[] = {
[ETHTOOL_A_COALESCE_HEADER] =
NLA_POLICY_NESTED(ethnl_header_policy),
@@ -227,6 +338,10 @@ const struct nla_policy ethnl_coalesce_set_policy[] = {
[ETHTOOL_A_COALESCE_TX_AGGR_MAX_BYTES] = { .type = NLA_U32 },
[ETHTOOL_A_COALESCE_TX_AGGR_MAX_FRAMES] = { .type = NLA_U32 },
[ETHTOOL_A_COALESCE_TX_AGGR_TIME_USECS] = { .type = NLA_U32 },
+ [ETHTOOL_A_COALESCE_RX_PROFILE] =
+ NLA_POLICY_NESTED(coalesce_profile_policy),
+ [ETHTOOL_A_COALESCE_TX_PROFILE] =
+ NLA_POLICY_NESTED(coalesce_profile_policy),
};
static int
@@ -234,6 +349,7 @@ ethnl_set_coalesce_validate(struct ethnl_req_info *req_info,
struct genl_info *info)
{
const struct ethtool_ops *ops = req_info->dev->ethtool_ops;
+ struct dim_irq_moder *irq_moder = req_info->dev->irq_moder;
struct nlattr **tb = info->attrs;
u32 supported_params;
u16 a;
@@ -243,6 +359,12 @@ ethnl_set_coalesce_validate(struct ethnl_req_info *req_info,
/* make sure that only supported parameters are present */
supported_params = ops->supported_coalesce_params;
+ if (irq_moder && irq_moder->profile_flags & DIM_PROFILE_RX)
+ supported_params |= ETHTOOL_COALESCE_RX_PROFILE;
+
+ if (irq_moder && irq_moder->profile_flags & DIM_PROFILE_TX)
+ supported_params |= ETHTOOL_COALESCE_TX_PROFILE;
+
for (a = ETHTOOL_A_COALESCE_RX_USECS; a < __ETHTOOL_A_COALESCE_CNT; a++)
if (tb[a] && !(supported_params & attr_to_mask(a))) {
NL_SET_ERR_MSG_ATTR(info->extack, tb[a],
@@ -253,6 +375,138 @@ ethnl_set_coalesce_validate(struct ethnl_req_info *req_info,
return 1;
}
+/**
+ * ethnl_update_irq_moder - update a specific field in the given profile
+ * @irq_moder: place that collects dim related information
+ * @irq_field: field in profile to modify
+ * @attr_type: attr type ETHTOOL_A_IRQ_MODERATION_*
+ * @tb: netlink attribute with new values or null
+ * @coal_bit: DIM_COALESCE_* bit from coal_flags
+ * @mod: pointer to bool for modification tracking
+ * @extack: netlink extended ack
+ *
+ * Return: 0 on success or a negative error code.
+ */
+static int ethnl_update_irq_moder(struct dim_irq_moder *irq_moder,
+ u16 *irq_field, u16 attr_type,
+ struct nlattr **tb,
+ u8 coal_bit, bool *mod,
+ struct netlink_ext_ack *extack)
+{
+ int ret = 0;
+ u32 val;
+
+ if (!tb[attr_type])
+ return 0;
+
+ if (irq_moder->coal_flags & coal_bit) {
+ val = nla_get_u32(tb[attr_type]);
+ if (*irq_field == val)
+ return 0;
+
+ *irq_field = val;
+ *mod = true;
+ } else {
+ NL_SET_BAD_ATTR(extack, tb[attr_type]);
+ ret = -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+/**
+ * ethnl_update_profile - get a profile nest with child nests from userspace.
+ * @dev: netdevice to update the profile
+ * @dst: profile get from the driver and modified by ethnl_update_profile.
+ * @nests: nest attr ETHTOOL_A_COALESCE_*X_PROFILE to set profile.
+ * @mod: pointer to bool for modification tracking
+ * @extack: Netlink extended ack
+ *
+ * Layout of nests:
+ * Nested ETHTOOL_A_COALESCE_*X_PROFILE attr
+ * Nested ETHTOOL_A_PROFILE_IRQ_MODERATION attr
+ * ETHTOOL_A_IRQ_MODERATION_USEC attr
+ * ETHTOOL_A_IRQ_MODERATION_PKTS attr
+ * ETHTOOL_A_IRQ_MODERATION_COMPS attr
+ * ...
+ * Nested ETHTOOL_A_PROFILE_IRQ_MODERATION attr
+ * ETHTOOL_A_IRQ_MODERATION_USEC attr
+ * ETHTOOL_A_IRQ_MODERATION_PKTS attr
+ * ETHTOOL_A_IRQ_MODERATION_COMPS attr
+ *
+ * Return: 0 on success or a negative error code.
+ */
+static int ethnl_update_profile(struct net_device *dev,
+ struct dim_cq_moder __rcu **dst,
+ const struct nlattr *nests,
+ bool *mod,
+ struct netlink_ext_ack *extack)
+{
+ int len_irq_moder = ARRAY_SIZE(coalesce_irq_moderation_policy);
+ struct nlattr *tb[ARRAY_SIZE(coalesce_irq_moderation_policy)];
+ struct dim_irq_moder *irq_moder = dev->irq_moder;
+ struct dim_cq_moder *new_profile, *old_profile;
+ int ret, rem, i = 0, len;
+ struct nlattr *nest;
+
+ if (!nests)
+ return 0;
+
+ if (!*dst)
+ return -EOPNOTSUPP;
+
+ old_profile = rtnl_dereference(*dst);
+ len = NET_DIM_PARAMS_NUM_PROFILES * sizeof(*old_profile);
+ new_profile = kmemdup(old_profile, len, GFP_KERNEL);
+ if (!new_profile)
+ return -ENOMEM;
+
+ nla_for_each_nested_type(nest, ETHTOOL_A_PROFILE_IRQ_MODERATION,
+ nests, rem) {
+ ret = nla_parse_nested(tb, len_irq_moder - 1, nest,
+ coalesce_irq_moderation_policy,
+ extack);
+ if (ret)
+ goto err_out;
+
+ ret = ethnl_update_irq_moder(irq_moder, &new_profile[i].usec,
+ ETHTOOL_A_IRQ_MODERATION_USEC,
+ tb, DIM_COALESCE_USEC,
+ mod, extack);
+ if (ret)
+ goto err_out;
+
+ ret = ethnl_update_irq_moder(irq_moder, &new_profile[i].pkts,
+ ETHTOOL_A_IRQ_MODERATION_PKTS,
+ tb, DIM_COALESCE_PKTS,
+ mod, extack);
+ if (ret)
+ goto err_out;
+
+ ret = ethnl_update_irq_moder(irq_moder, &new_profile[i].comps,
+ ETHTOOL_A_IRQ_MODERATION_COMPS,
+ tb, DIM_COALESCE_COMPS,
+ mod, extack);
+ if (ret)
+ goto err_out;
+
+ i++;
+ }
+
+ /* After the profile is modified, dim itself is a dynamic
+ * mechanism and will quickly fit to the appropriate
+ * coalescing parameters according to the new profile.
+ */
+ rcu_assign_pointer(*dst, new_profile);
+ kfree_rcu(old_profile, rcu);
+
+ return 0;
+
+err_out:
+ kfree(new_profile);
+ return ret;
+}
+
static int
__ethnl_set_coalesce(struct ethnl_req_info *req_info, struct genl_info *info,
bool *dual_change)
@@ -317,6 +571,22 @@ __ethnl_set_coalesce(struct ethnl_req_info *req_info, struct genl_info *info,
ethnl_update_u32(&kernel_coalesce.tx_aggr_time_usecs,
tb[ETHTOOL_A_COALESCE_TX_AGGR_TIME_USECS], &mod);
+ if (dev->irq_moder && dev->irq_moder->profile_flags & DIM_PROFILE_RX) {
+ ret = ethnl_update_profile(dev, &dev->irq_moder->rx_profile,
+ tb[ETHTOOL_A_COALESCE_RX_PROFILE],
+ &mod, info->extack);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (dev->irq_moder && dev->irq_moder->profile_flags & DIM_PROFILE_TX) {
+ ret = ethnl_update_profile(dev, &dev->irq_moder->tx_profile,
+ tb[ETHTOOL_A_COALESCE_TX_PROFILE],
+ &mod, info->extack);
+ if (ret < 0)
+ return ret;
+ }
+
/* Update operation modes */
ethnl_update_bool32(&coalesce.use_adaptive_rx_coalesce,
tb[ETHTOOL_A_COALESCE_USE_ADAPTIVE_RX], &mod_mode);
diff --git a/net/ethtool/common.c b/net/ethtool/common.c
index 6b2a360dcdf0..07032babd1b6 100644
--- a/net/ethtool/common.c
+++ b/net/ethtool/common.c
@@ -211,6 +211,7 @@ const char link_mode_names[][ETH_GSTRING_LEN] = {
__DEFINE_LINK_MODE_NAME(10, T1S, Full),
__DEFINE_LINK_MODE_NAME(10, T1S, Half),
__DEFINE_LINK_MODE_NAME(10, T1S_P2MP, Half),
+ __DEFINE_LINK_MODE_NAME(10, T1BRR, Full),
};
static_assert(ARRAY_SIZE(link_mode_names) == __ETHTOOL_LINK_MODE_MASK_NBITS);
@@ -251,6 +252,7 @@ static_assert(ARRAY_SIZE(link_mode_names) == __ETHTOOL_LINK_MODE_MASK_NBITS);
#define __LINK_MODE_LANES_T1S_P2MP 1
#define __LINK_MODE_LANES_VR8 8
#define __LINK_MODE_LANES_DR8_2 8
+#define __LINK_MODE_LANES_T1BRR 1
#define __DEFINE_LINK_MODE_PARAMS(_speed, _type, _duplex) \
[ETHTOOL_LINK_MODE(_speed, _type, _duplex)] = { \
@@ -374,6 +376,7 @@ const struct link_mode_info link_mode_params[] = {
__DEFINE_LINK_MODE_PARAMS(10, T1S, Full),
__DEFINE_LINK_MODE_PARAMS(10, T1S, Half),
__DEFINE_LINK_MODE_PARAMS(10, T1S_P2MP, Half),
+ __DEFINE_LINK_MODE_PARAMS(10, T1BRR, Full),
};
static_assert(ARRAY_SIZE(link_mode_params) == __ETHTOOL_LINK_MODE_MASK_NBITS);
@@ -587,35 +590,64 @@ err_free_info:
return err;
}
-int ethtool_get_max_rxfh_channel(struct net_device *dev, u32 *max)
+static u32 ethtool_get_max_rss_ctx_channel(struct net_device *dev)
+{
+ struct ethtool_rxfh_context *ctx;
+ unsigned long context;
+ u32 max_ring = 0;
+
+ mutex_lock(&dev->ethtool->rss_lock);
+ xa_for_each(&dev->ethtool->rss_ctx, context, ctx) {
+ u32 i, *tbl;
+
+ tbl = ethtool_rxfh_context_indir(ctx);
+ for (i = 0; i < ctx->indir_size; i++)
+ max_ring = max(max_ring, tbl[i]);
+ }
+ mutex_unlock(&dev->ethtool->rss_lock);
+
+ return max_ring;
+}
+
+u32 ethtool_get_max_rxfh_channel(struct net_device *dev)
{
struct ethtool_rxfh_param rxfh = {};
- u32 dev_size, current_max = 0;
+ u32 dev_size, current_max;
int ret;
+ /* While we do track whether RSS context has an indirection
+ * table explicitly set by the user, no driver looks at that bit.
+ * Assume drivers won't auto-regenerate the additional tables,
+ * to be safe.
+ */
+ current_max = ethtool_get_max_rss_ctx_channel(dev);
+
+ if (!netif_is_rxfh_configured(dev))
+ return current_max;
+
if (!dev->ethtool_ops->get_rxfh_indir_size ||
!dev->ethtool_ops->get_rxfh)
- return -EOPNOTSUPP;
+ return current_max;
dev_size = dev->ethtool_ops->get_rxfh_indir_size(dev);
if (dev_size == 0)
- return -EOPNOTSUPP;
+ return current_max;
rxfh.indir = kcalloc(dev_size, sizeof(rxfh.indir[0]), GFP_USER);
if (!rxfh.indir)
- return -ENOMEM;
+ return U32_MAX;
ret = dev->ethtool_ops->get_rxfh(dev, &rxfh);
- if (ret)
- goto out;
+ if (ret) {
+ current_max = U32_MAX;
+ goto out_free;
+ }
while (dev_size--)
current_max = max(current_max, rxfh.indir[dev_size]);
- *max = current_max;
-
-out:
+out_free:
kfree(rxfh.indir);
- return ret;
+ return current_max;
}
int ethtool_check_ops(const struct ethtool_ops *ops)
@@ -629,7 +661,7 @@ int ethtool_check_ops(const struct ethtool_ops *ops)
return 0;
}
-int __ethtool_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
+int __ethtool_get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info *info)
{
const struct ethtool_ops *ops = dev->ethtool_ops;
struct phy_device *phydev = dev->phydev;
@@ -637,7 +669,7 @@ int __ethtool_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
memset(info, 0, sizeof(*info));
info->cmd = ETHTOOL_GET_TS_INFO;
- if (phy_has_tsinfo(phydev))
+ if (phy_is_default_hwtstamp(phydev) && phy_has_tsinfo(phydev))
return phy_ts_info(phydev, info);
if (ops->get_ts_info)
return ops->get_ts_info(dev, info);
@@ -651,7 +683,7 @@ int __ethtool_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
int ethtool_get_phc_vclocks(struct net_device *dev, int **vclock_index)
{
- struct ethtool_ts_info info = { };
+ struct kernel_ethtool_ts_info info = { };
int num = 0;
if (!__ethtool_get_ts_info(dev, &info))
@@ -661,7 +693,7 @@ int ethtool_get_phc_vclocks(struct net_device *dev, int **vclock_index)
}
EXPORT_SYMBOL(ethtool_get_phc_vclocks);
-int ethtool_get_ts_info_by_layer(struct net_device *dev, struct ethtool_ts_info *info)
+int ethtool_get_ts_info_by_layer(struct net_device *dev, struct kernel_ethtool_ts_info *info)
{
return __ethtool_get_ts_info(dev, info);
}
@@ -712,3 +744,17 @@ ethtool_forced_speed_maps_init(struct ethtool_forced_speed_map *maps, u32 size)
}
}
EXPORT_SYMBOL_GPL(ethtool_forced_speed_maps_init);
+
+void ethtool_rxfh_context_lost(struct net_device *dev, u32 context_id)
+{
+ struct ethtool_rxfh_context *ctx;
+
+ WARN_ONCE(!rtnl_is_locked() &&
+ !lockdep_is_held_type(&dev->ethtool->rss_lock, -1),
+ "RSS context lock assertion failed\n");
+
+ netdev_err(dev, "device error, RSS context %d lost\n", context_id);
+ ctx = xa_erase(&dev->ethtool->rss_ctx, context_id);
+ kfree(ctx);
+}
+EXPORT_SYMBOL(ethtool_rxfh_context_lost);
diff --git a/net/ethtool/common.h b/net/ethtool/common.h
index 28b8aaaf9bcb..863806fcf01a 100644
--- a/net/ethtool/common.h
+++ b/net/ethtool/common.h
@@ -42,9 +42,9 @@ int __ethtool_get_link(struct net_device *dev);
bool convert_legacy_settings_to_link_ksettings(
struct ethtool_link_ksettings *link_ksettings,
const struct ethtool_cmd *legacy_settings);
-int ethtool_get_max_rxfh_channel(struct net_device *dev, u32 *max);
+u32 ethtool_get_max_rxfh_channel(struct net_device *dev);
int ethtool_get_max_rxnfc_channel(struct net_device *dev, u64 *max);
-int __ethtool_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info);
+int __ethtool_get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info *info);
extern const struct ethtool_phy_ops *ethtool_phy_ops;
extern const struct ethtool_pse_ops *ethtool_pse_ops;
diff --git a/net/ethtool/eeprom.c b/net/ethtool/eeprom.c
index 6209c3a9c8f7..3b8209e930fd 100644
--- a/net/ethtool/eeprom.c
+++ b/net/ethtool/eeprom.c
@@ -91,6 +91,12 @@ static int get_module_eeprom_by_page(struct net_device *dev,
{
const struct ethtool_ops *ops = dev->ethtool_ops;
+ if (dev->ethtool->module_fw_flash_in_progress) {
+ NL_SET_ERR_MSG(extack,
+ "Module firmware flashing is in progress");
+ return -EBUSY;
+ }
+
if (dev->sfp_bus)
return sfp_get_module_eeprom_by_page(dev->sfp_bus, page_data, extack);
diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c
index 5a55270aa86e..983fee76f5cf 100644
--- a/net/ethtool/ioctl.c
+++ b/net/ethtool/ioctl.c
@@ -65,7 +65,8 @@ u32 ethtool_op_get_link(struct net_device *dev)
}
EXPORT_SYMBOL(ethtool_op_get_link);
-int ethtool_op_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
+int ethtool_op_get_ts_info(struct net_device *dev,
+ struct kernel_ethtool_ts_info *info)
{
info->so_timestamping =
SOF_TIMESTAMPING_TX_SOFTWARE |
@@ -658,6 +659,9 @@ static int ethtool_get_settings(struct net_device *dev, void __user *useraddr)
if (!dev->ethtool_ops->get_link_ksettings)
return -EOPNOTSUPP;
+ if (dev->ethtool->module_fw_flash_in_progress)
+ return -EBUSY;
+
memset(&link_ksettings, 0, sizeof(link_ksettings));
err = dev->ethtool_ops->get_link_ksettings(dev, &link_ksettings);
if (err < 0)
@@ -1199,6 +1203,7 @@ static noinline_for_stack int ethtool_get_rxfh(struct net_device *dev,
const struct ethtool_ops *ops = dev->ethtool_ops;
struct ethtool_rxfh_param rxfh_dev = {};
u32 user_indir_size, user_key_size;
+ struct ethtool_rxfh_context *ctx;
struct ethtool_rxfh rxfh;
u32 indir_bytes;
u8 *rss_config;
@@ -1246,11 +1251,26 @@ static noinline_for_stack int ethtool_get_rxfh(struct net_device *dev,
if (user_key_size)
rxfh_dev.key = rss_config + indir_bytes;
- rxfh_dev.rss_context = rxfh.rss_context;
-
- ret = dev->ethtool_ops->get_rxfh(dev, &rxfh_dev);
- if (ret)
- goto out;
+ if (rxfh.rss_context) {
+ ctx = xa_load(&dev->ethtool->rss_ctx, rxfh.rss_context);
+ if (!ctx) {
+ ret = -ENOENT;
+ goto out;
+ }
+ if (rxfh_dev.indir)
+ memcpy(rxfh_dev.indir, ethtool_rxfh_context_indir(ctx),
+ indir_bytes);
+ if (rxfh_dev.key)
+ memcpy(rxfh_dev.key, ethtool_rxfh_context_key(ctx),
+ user_key_size);
+ rxfh_dev.hfunc = ctx->hfunc;
+ rxfh_dev.input_xfrm = ctx->input_xfrm;
+ ret = 0;
+ } else {
+ ret = dev->ethtool_ops->get_rxfh(dev, &rxfh_dev);
+ if (ret)
+ goto out;
+ }
if (copy_to_user(useraddr + offsetof(struct ethtool_rxfh, hfunc),
&rxfh_dev.hfunc, sizeof(rxfh.hfunc))) {
@@ -1271,6 +1291,40 @@ out:
return ret;
}
+static struct ethtool_rxfh_context *
+ethtool_rxfh_ctx_alloc(const struct ethtool_ops *ops,
+ u32 indir_size, u32 key_size)
+{
+ size_t indir_bytes, flex_len, key_off, size;
+ struct ethtool_rxfh_context *ctx;
+ u32 priv_bytes, indir_max;
+ u16 key_max;
+
+ key_max = max(key_size, ops->rxfh_key_space);
+ indir_max = max(indir_size, ops->rxfh_indir_space);
+
+ priv_bytes = ALIGN(ops->rxfh_priv_size, sizeof(u32));
+ indir_bytes = array_size(indir_max, sizeof(u32));
+
+ key_off = size_add(priv_bytes, indir_bytes);
+ flex_len = size_add(key_off, key_max);
+ size = struct_size_t(struct ethtool_rxfh_context, data, flex_len);
+
+ ctx = kzalloc(size, GFP_KERNEL_ACCOUNT);
+ if (!ctx)
+ return NULL;
+
+ ctx->indir_size = indir_size;
+ ctx->key_size = key_size;
+ ctx->key_off = key_off;
+ ctx->priv_size = ops->rxfh_priv_size;
+
+ ctx->hfunc = ETH_RSS_HASH_NO_CHANGE;
+ ctx->input_xfrm = RXH_XFRM_NO_CHANGE;
+
+ return ctx;
+}
+
static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev,
void __user *useraddr)
{
@@ -1278,10 +1332,13 @@ static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev,
const struct ethtool_ops *ops = dev->ethtool_ops;
u32 dev_indir_size = 0, dev_key_size = 0, i;
struct ethtool_rxfh_param rxfh_dev = {};
+ struct ethtool_rxfh_context *ctx = NULL;
struct netlink_ext_ack *extack = NULL;
struct ethtool_rxnfc rx_rings;
struct ethtool_rxfh rxfh;
+ bool locked = false; /* dev->ethtool->rss_lock taken */
u32 indir_bytes = 0;
+ bool create = false;
u8 *rss_config;
int ret;
@@ -1306,9 +1363,11 @@ static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev,
if (rxfh.input_xfrm && rxfh.input_xfrm != RXH_XFRM_SYM_XOR &&
rxfh.input_xfrm != RXH_XFRM_NO_CHANGE)
return -EINVAL;
- if ((rxfh.input_xfrm & RXH_XFRM_SYM_XOR) &&
+ if (rxfh.input_xfrm != RXH_XFRM_NO_CHANGE &&
+ (rxfh.input_xfrm & RXH_XFRM_SYM_XOR) &&
!ops->cap_rss_sym_xor_supported)
return -EOPNOTSUPP;
+ create = rxfh.rss_context == ETH_RXFH_CONTEXT_ALLOC;
/* If either indir, hash key or function is valid, proceed further.
* Must request at least one change: indir size, hash key, function
@@ -1374,13 +1433,69 @@ static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev,
}
}
+ if (rxfh.rss_context) {
+ mutex_lock(&dev->ethtool->rss_lock);
+ locked = true;
+ }
+ if (create) {
+ if (rxfh_dev.rss_delete) {
+ ret = -EINVAL;
+ goto out;
+ }
+ ctx = ethtool_rxfh_ctx_alloc(ops, dev_indir_size, dev_key_size);
+ if (!ctx) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (ops->create_rxfh_context) {
+ u32 limit = ops->rxfh_max_context_id ?: U32_MAX;
+ u32 ctx_id;
+
+ /* driver uses new API, core allocates ID */
+ ret = xa_alloc(&dev->ethtool->rss_ctx, &ctx_id, ctx,
+ XA_LIMIT(1, limit), GFP_KERNEL_ACCOUNT);
+ if (ret < 0) {
+ kfree(ctx);
+ goto out;
+ }
+ WARN_ON(!ctx_id); /* can't happen */
+ rxfh.rss_context = ctx_id;
+ }
+ } else if (rxfh.rss_context) {
+ ctx = xa_load(&dev->ethtool->rss_ctx, rxfh.rss_context);
+ if (!ctx) {
+ ret = -ENOENT;
+ goto out;
+ }
+ }
rxfh_dev.hfunc = rxfh.hfunc;
rxfh_dev.rss_context = rxfh.rss_context;
rxfh_dev.input_xfrm = rxfh.input_xfrm;
- ret = ops->set_rxfh(dev, &rxfh_dev, extack);
- if (ret)
+ if (rxfh.rss_context && ops->create_rxfh_context) {
+ if (create)
+ ret = ops->create_rxfh_context(dev, ctx, &rxfh_dev,
+ extack);
+ else if (rxfh_dev.rss_delete)
+ ret = ops->remove_rxfh_context(dev, ctx,
+ rxfh.rss_context,
+ extack);
+ else
+ ret = ops->modify_rxfh_context(dev, ctx, &rxfh_dev,
+ extack);
+ } else {
+ ret = ops->set_rxfh(dev, &rxfh_dev, extack);
+ }
+ if (ret) {
+ if (create) {
+ /* failed to create, free our new tracking entry */
+ if (ops->create_rxfh_context)
+ xa_erase(&dev->ethtool->rss_ctx, rxfh.rss_context);
+ kfree(ctx);
+ }
goto out;
+ }
if (copy_to_user(useraddr + offsetof(struct ethtool_rxfh, rss_context),
&rxfh_dev.rss_context, sizeof(rxfh_dev.rss_context)))
@@ -1393,8 +1508,44 @@ static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev,
else if (rxfh.indir_size != ETH_RXFH_INDIR_NO_CHANGE)
dev->priv_flags |= IFF_RXFH_CONFIGURED;
}
+ /* Update rss_ctx tracking */
+ if (create && !ops->create_rxfh_context) {
+ /* driver uses old API, it chose context ID */
+ if (WARN_ON(xa_load(&dev->ethtool->rss_ctx, rxfh_dev.rss_context))) {
+ /* context ID reused, our tracking is screwed */
+ kfree(ctx);
+ goto out;
+ }
+ /* Allocate the exact ID the driver gave us */
+ if (xa_is_err(xa_store(&dev->ethtool->rss_ctx, rxfh_dev.rss_context,
+ ctx, GFP_KERNEL))) {
+ kfree(ctx);
+ goto out;
+ }
+ }
+ if (rxfh_dev.rss_delete) {
+ WARN_ON(xa_erase(&dev->ethtool->rss_ctx, rxfh.rss_context) != ctx);
+ kfree(ctx);
+ } else if (ctx) {
+ if (rxfh_dev.indir) {
+ for (i = 0; i < dev_indir_size; i++)
+ ethtool_rxfh_context_indir(ctx)[i] = rxfh_dev.indir[i];
+ ctx->indir_configured = 1;
+ }
+ if (rxfh_dev.key) {
+ memcpy(ethtool_rxfh_context_key(ctx), rxfh_dev.key,
+ dev_key_size);
+ ctx->key_configured = 1;
+ }
+ if (rxfh_dev.hfunc != ETH_RSS_HASH_NO_CHANGE)
+ ctx->hfunc = rxfh_dev.hfunc;
+ if (rxfh_dev.input_xfrm != RXH_XFRM_NO_CHANGE)
+ ctx->input_xfrm = rxfh_dev.input_xfrm;
+ }
out:
+ if (locked)
+ mutex_unlock(&dev->ethtool->rss_lock);
kfree(rss_config);
return ret;
}
@@ -1449,6 +1600,9 @@ static int ethtool_reset(struct net_device *dev, char __user *useraddr)
if (!dev->ethtool_ops->reset)
return -EOPNOTSUPP;
+ if (dev->ethtool->module_fw_flash_in_progress)
+ return -EBUSY;
+
if (copy_from_user(&reset, useraddr, sizeof(reset)))
return -EFAULT;
@@ -1503,7 +1657,7 @@ static int ethtool_set_wol(struct net_device *dev, char __user *useraddr)
if (ret)
return ret;
- dev->wol_enabled = !!wol.wolopts;
+ dev->ethtool->wol_enabled = !!wol.wolopts;
ethtool_notify(dev, ETHTOOL_MSG_WOL_NTF, NULL);
return 0;
@@ -1923,9 +2077,7 @@ static noinline_for_stack int ethtool_set_channels(struct net_device *dev,
* indirection table/rxnfc settings */
if (ethtool_get_max_rxnfc_channel(dev, &max_rxnfc_in_use))
max_rxnfc_in_use = 0;
- if (!netif_is_rxfh_configured(dev) ||
- ethtool_get_max_rxfh_channel(dev, &max_rxfh_in_use))
- max_rxfh_in_use = 0;
+ max_rxfh_in_use = ethtool_get_max_rxfh_channel(dev);
if (channels.combined_count + channels.rx_count <=
max_t(u64, max_rxnfc_in_use, max_rxfh_in_use))
return -EINVAL;
@@ -2220,7 +2372,7 @@ static int ethtool_get_phy_stats_ethtool(struct net_device *dev,
const struct ethtool_ops *ops = dev->ethtool_ops;
int n_stats, ret;
- if (!ops || !ops->get_sset_count || ops->get_ethtool_phy_stats)
+ if (!ops || !ops->get_sset_count || !ops->get_ethtool_phy_stats)
return -EOPNOTSUPP;
n_stats = ops->get_sset_count(dev, ETH_SS_PHY_STATS);
@@ -2443,13 +2595,20 @@ out:
static int ethtool_get_ts_info(struct net_device *dev, void __user *useraddr)
{
- struct ethtool_ts_info info;
+ struct kernel_ethtool_ts_info kernel_info;
+ struct ethtool_ts_info info = {};
int err;
- err = __ethtool_get_ts_info(dev, &info);
+ err = __ethtool_get_ts_info(dev, &kernel_info);
if (err)
return err;
+ info.cmd = kernel_info.cmd;
+ info.so_timestamping = kernel_info.so_timestamping;
+ info.phc_index = kernel_info.phc_index;
+ info.tx_types = kernel_info.tx_types;
+ info.rx_filters = kernel_info.rx_filters;
+
if (copy_to_user(useraddr, &info, sizeof(info)))
return -EFAULT;
@@ -2462,6 +2621,9 @@ int ethtool_get_module_info_call(struct net_device *dev,
const struct ethtool_ops *ops = dev->ethtool_ops;
struct phy_device *phydev = dev->phydev;
+ if (dev->ethtool->module_fw_flash_in_progress)
+ return -EBUSY;
+
if (dev->sfp_bus)
return sfp_get_module_info(dev->sfp_bus, modinfo);
@@ -2499,6 +2661,9 @@ int ethtool_get_module_eeprom_call(struct net_device *dev,
const struct ethtool_ops *ops = dev->ethtool_ops;
struct phy_device *phydev = dev->phydev;
+ if (dev->ethtool->module_fw_flash_in_progress)
+ return -EBUSY;
+
if (dev->sfp_bus)
return sfp_get_module_eeprom(dev->sfp_bus, ee, data);
diff --git a/net/ethtool/linkstate.c b/net/ethtool/linkstate.c
index b2de2108b356..34d76e87847d 100644
--- a/net/ethtool/linkstate.c
+++ b/net/ethtool/linkstate.c
@@ -37,6 +37,8 @@ static int linkstate_get_sqi(struct net_device *dev)
mutex_lock(&phydev->lock);
if (!phydev->drv || !phydev->drv->get_sqi)
ret = -EOPNOTSUPP;
+ else if (!phydev->link)
+ ret = -ENETDOWN;
else
ret = phydev->drv->get_sqi(phydev);
mutex_unlock(&phydev->lock);
@@ -55,6 +57,8 @@ static int linkstate_get_sqi_max(struct net_device *dev)
mutex_lock(&phydev->lock);
if (!phydev->drv || !phydev->drv->get_sqi_max)
ret = -EOPNOTSUPP;
+ else if (!phydev->link)
+ ret = -ENETDOWN;
else
ret = phydev->drv->get_sqi_max(phydev);
mutex_unlock(&phydev->lock);
@@ -62,6 +66,17 @@ static int linkstate_get_sqi_max(struct net_device *dev)
return ret;
};
+static bool linkstate_sqi_critical_error(int sqi)
+{
+ return sqi < 0 && sqi != -EOPNOTSUPP && sqi != -ENETDOWN;
+}
+
+static bool linkstate_sqi_valid(struct linkstate_reply_data *data)
+{
+ return data->sqi >= 0 && data->sqi_max >= 0 &&
+ data->sqi <= data->sqi_max;
+}
+
static int linkstate_get_link_ext_state(struct net_device *dev,
struct linkstate_reply_data *data)
{
@@ -93,12 +108,12 @@ static int linkstate_prepare_data(const struct ethnl_req_info *req_base,
data->link = __ethtool_get_link(dev);
ret = linkstate_get_sqi(dev);
- if (ret < 0 && ret != -EOPNOTSUPP)
+ if (linkstate_sqi_critical_error(ret))
goto out;
data->sqi = ret;
ret = linkstate_get_sqi_max(dev);
- if (ret < 0 && ret != -EOPNOTSUPP)
+ if (linkstate_sqi_critical_error(ret))
goto out;
data->sqi_max = ret;
@@ -136,11 +151,10 @@ static int linkstate_reply_size(const struct ethnl_req_info *req_base,
len = nla_total_size(sizeof(u8)) /* LINKSTATE_LINK */
+ 0;
- if (data->sqi != -EOPNOTSUPP)
- len += nla_total_size(sizeof(u32));
-
- if (data->sqi_max != -EOPNOTSUPP)
- len += nla_total_size(sizeof(u32));
+ if (linkstate_sqi_valid(data)) {
+ len += nla_total_size(sizeof(u32)); /* LINKSTATE_SQI */
+ len += nla_total_size(sizeof(u32)); /* LINKSTATE_SQI_MAX */
+ }
if (data->link_ext_state_provided)
len += nla_total_size(sizeof(u8)); /* LINKSTATE_EXT_STATE */
@@ -164,13 +178,14 @@ static int linkstate_fill_reply(struct sk_buff *skb,
nla_put_u8(skb, ETHTOOL_A_LINKSTATE_LINK, !!data->link))
return -EMSGSIZE;
- if (data->sqi != -EOPNOTSUPP &&
- nla_put_u32(skb, ETHTOOL_A_LINKSTATE_SQI, data->sqi))
- return -EMSGSIZE;
+ if (linkstate_sqi_valid(data)) {
+ if (nla_put_u32(skb, ETHTOOL_A_LINKSTATE_SQI, data->sqi))
+ return -EMSGSIZE;
- if (data->sqi_max != -EOPNOTSUPP &&
- nla_put_u32(skb, ETHTOOL_A_LINKSTATE_SQI_MAX, data->sqi_max))
- return -EMSGSIZE;
+ if (nla_put_u32(skb, ETHTOOL_A_LINKSTATE_SQI_MAX,
+ data->sqi_max))
+ return -EMSGSIZE;
+ }
if (data->link_ext_state_provided) {
if (nla_put_u8(skb, ETHTOOL_A_LINKSTATE_EXT_STATE,
diff --git a/net/ethtool/module.c b/net/ethtool/module.c
index ceb575efc290..6988e07bdcd6 100644
--- a/net/ethtool/module.c
+++ b/net/ethtool/module.c
@@ -1,10 +1,14 @@
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/ethtool.h>
+#include <linux/firmware.h>
+#include <linux/sfp.h>
+#include <net/devlink.h>
#include "netlink.h"
#include "common.h"
#include "bitset.h"
+#include "module_fw.h"
struct module_req_info {
struct ethnl_req_info base;
@@ -33,6 +37,12 @@ static int module_get_power_mode(struct net_device *dev,
if (!ops->get_module_power_mode)
return 0;
+ if (dev->ethtool->module_fw_flash_in_progress) {
+ NL_SET_ERR_MSG(extack,
+ "Module firmware flashing is in progress");
+ return -EBUSY;
+ }
+
return ops->get_module_power_mode(dev, &data->power, extack);
}
@@ -109,6 +119,12 @@ ethnl_set_module_validate(struct ethnl_req_info *req_info,
if (!tb[ETHTOOL_A_MODULE_POWER_MODE_POLICY])
return 0;
+ if (req_info->dev->ethtool->module_fw_flash_in_progress) {
+ NL_SET_ERR_MSG(info->extack,
+ "Module firmware flashing is in progress");
+ return -EBUSY;
+ }
+
if (!ops->get_module_power_mode || !ops->set_module_power_mode) {
NL_SET_ERR_MSG_ATTR(info->extack,
tb[ETHTOOL_A_MODULE_POWER_MODE_POLICY],
@@ -158,3 +174,381 @@ const struct ethnl_request_ops ethnl_module_request_ops = {
.set = ethnl_set_module,
.set_ntf_cmd = ETHTOOL_MSG_MODULE_NTF,
};
+
+/* MODULE_FW_FLASH_ACT */
+
+const struct nla_policy
+ethnl_module_fw_flash_act_policy[ETHTOOL_A_MODULE_FW_FLASH_PASSWORD + 1] = {
+ [ETHTOOL_A_MODULE_FW_FLASH_HEADER] =
+ NLA_POLICY_NESTED(ethnl_header_policy),
+ [ETHTOOL_A_MODULE_FW_FLASH_FILE_NAME] = { .type = NLA_NUL_STRING },
+ [ETHTOOL_A_MODULE_FW_FLASH_PASSWORD] = { .type = NLA_U32 },
+};
+
+static LIST_HEAD(module_fw_flash_work_list);
+static DEFINE_SPINLOCK(module_fw_flash_work_list_lock);
+
+static int
+module_flash_fw_work_list_add(struct ethtool_module_fw_flash *module_fw,
+ struct genl_info *info)
+{
+ struct ethtool_module_fw_flash *work;
+
+ /* First, check if already registered. */
+ spin_lock(&module_fw_flash_work_list_lock);
+ list_for_each_entry(work, &module_fw_flash_work_list, list) {
+ if (work->fw_update.ntf_params.portid == info->snd_portid &&
+ work->fw_update.dev == module_fw->fw_update.dev) {
+ spin_unlock(&module_fw_flash_work_list_lock);
+ return -EALREADY;
+ }
+ }
+
+ list_add_tail(&module_fw->list, &module_fw_flash_work_list);
+ spin_unlock(&module_fw_flash_work_list_lock);
+
+ return 0;
+}
+
+static void module_flash_fw_work_list_del(struct list_head *list)
+{
+ spin_lock(&module_fw_flash_work_list_lock);
+ list_del(list);
+ spin_unlock(&module_fw_flash_work_list_lock);
+}
+
+static void module_flash_fw_work(struct work_struct *work)
+{
+ struct ethtool_module_fw_flash *module_fw;
+
+ module_fw = container_of(work, struct ethtool_module_fw_flash, work);
+
+ ethtool_cmis_fw_update(&module_fw->fw_update);
+
+ module_flash_fw_work_list_del(&module_fw->list);
+ module_fw->fw_update.dev->ethtool->module_fw_flash_in_progress = false;
+ netdev_put(module_fw->fw_update.dev, &module_fw->dev_tracker);
+ release_firmware(module_fw->fw_update.fw);
+ kfree(module_fw);
+}
+
+#define MODULE_EEPROM_PHYS_ID_PAGE 0
+#define MODULE_EEPROM_PHYS_ID_I2C_ADDR 0x50
+
+static int module_flash_fw_work_init(struct ethtool_module_fw_flash *module_fw,
+ struct net_device *dev,
+ struct netlink_ext_ack *extack)
+{
+ const struct ethtool_ops *ops = dev->ethtool_ops;
+ struct ethtool_module_eeprom page_data = {};
+ u8 phys_id;
+ int err;
+
+ /* Fetch the SFF-8024 Identifier Value. For all supported standards, it
+ * is located at I2C address 0x50, byte 0. See section 4.1 in SFF-8024,
+ * revision 4.9.
+ */
+ page_data.page = MODULE_EEPROM_PHYS_ID_PAGE;
+ page_data.offset = SFP_PHYS_ID;
+ page_data.length = sizeof(phys_id);
+ page_data.i2c_address = MODULE_EEPROM_PHYS_ID_I2C_ADDR;
+ page_data.data = &phys_id;
+
+ err = ops->get_module_eeprom_by_page(dev, &page_data, extack);
+ if (err < 0)
+ return err;
+
+ switch (phys_id) {
+ case SFF8024_ID_QSFP_DD:
+ case SFF8024_ID_OSFP:
+ case SFF8024_ID_DSFP:
+ case SFF8024_ID_QSFP_PLUS_CMIS:
+ case SFF8024_ID_SFP_DD_CMIS:
+ case SFF8024_ID_SFP_PLUS_CMIS:
+ INIT_WORK(&module_fw->work, module_flash_fw_work);
+ break;
+ default:
+ NL_SET_ERR_MSG(extack,
+ "Module type does not support firmware flashing");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+void ethnl_module_fw_flash_sock_destroy(struct ethnl_sock_priv *sk_priv)
+{
+ struct ethtool_module_fw_flash *work;
+
+ spin_lock(&module_fw_flash_work_list_lock);
+ list_for_each_entry(work, &module_fw_flash_work_list, list) {
+ if (work->fw_update.dev == sk_priv->dev &&
+ work->fw_update.ntf_params.portid == sk_priv->portid) {
+ work->fw_update.ntf_params.closed_sock = true;
+ break;
+ }
+ }
+ spin_unlock(&module_fw_flash_work_list_lock);
+}
+
+static int
+module_flash_fw_schedule(struct net_device *dev, const char *file_name,
+ struct ethtool_module_fw_flash_params *params,
+ struct sk_buff *skb, struct genl_info *info)
+{
+ struct ethtool_cmis_fw_update_params *fw_update;
+ struct ethtool_module_fw_flash *module_fw;
+ int err;
+
+ module_fw = kzalloc(sizeof(*module_fw), GFP_KERNEL);
+ if (!module_fw)
+ return -ENOMEM;
+
+ fw_update = &module_fw->fw_update;
+ fw_update->params = *params;
+ err = request_firmware_direct(&fw_update->fw,
+ file_name, &dev->dev);
+ if (err) {
+ NL_SET_ERR_MSG(info->extack,
+ "Failed to request module firmware image");
+ goto err_free;
+ }
+
+ err = module_flash_fw_work_init(module_fw, dev, info->extack);
+ if (err < 0)
+ goto err_release_firmware;
+
+ dev->ethtool->module_fw_flash_in_progress = true;
+ netdev_hold(dev, &module_fw->dev_tracker, GFP_KERNEL);
+ fw_update->dev = dev;
+ fw_update->ntf_params.portid = info->snd_portid;
+ fw_update->ntf_params.seq = info->snd_seq;
+ fw_update->ntf_params.closed_sock = false;
+
+ err = ethnl_sock_priv_set(skb, dev, fw_update->ntf_params.portid,
+ ETHTOOL_SOCK_TYPE_MODULE_FW_FLASH);
+ if (err < 0)
+ goto err_release_firmware;
+
+ err = module_flash_fw_work_list_add(module_fw, info);
+ if (err < 0)
+ goto err_release_firmware;
+
+ schedule_work(&module_fw->work);
+
+ return 0;
+
+err_release_firmware:
+ release_firmware(fw_update->fw);
+err_free:
+ kfree(module_fw);
+ return err;
+}
+
+static int module_flash_fw(struct net_device *dev, struct nlattr **tb,
+ struct sk_buff *skb, struct genl_info *info)
+{
+ struct ethtool_module_fw_flash_params params = {};
+ const char *file_name;
+ struct nlattr *attr;
+
+ if (GENL_REQ_ATTR_CHECK(info, ETHTOOL_A_MODULE_FW_FLASH_FILE_NAME))
+ return -EINVAL;
+
+ file_name = nla_data(tb[ETHTOOL_A_MODULE_FW_FLASH_FILE_NAME]);
+
+ attr = tb[ETHTOOL_A_MODULE_FW_FLASH_PASSWORD];
+ if (attr) {
+ params.password = cpu_to_be32(nla_get_u32(attr));
+ params.password_valid = true;
+ }
+
+ return module_flash_fw_schedule(dev, file_name, &params, skb, info);
+}
+
+static int ethnl_module_fw_flash_validate(struct net_device *dev,
+ struct netlink_ext_ack *extack)
+{
+ struct devlink_port *devlink_port = dev->devlink_port;
+ const struct ethtool_ops *ops = dev->ethtool_ops;
+
+ if (!ops->set_module_eeprom_by_page ||
+ !ops->get_module_eeprom_by_page) {
+ NL_SET_ERR_MSG(extack,
+ "Flashing module firmware is not supported by this device");
+ return -EOPNOTSUPP;
+ }
+
+ if (!ops->reset) {
+ NL_SET_ERR_MSG(extack,
+ "Reset module is not supported by this device, so flashing is not permitted");
+ return -EOPNOTSUPP;
+ }
+
+ if (dev->ethtool->module_fw_flash_in_progress) {
+ NL_SET_ERR_MSG(extack, "Module firmware flashing already in progress");
+ return -EBUSY;
+ }
+
+ if (dev->flags & IFF_UP) {
+ NL_SET_ERR_MSG(extack, "Netdevice is up, so flashing is not permitted");
+ return -EBUSY;
+ }
+
+ if (devlink_port && devlink_port->attrs.split) {
+ NL_SET_ERR_MSG(extack, "Can't perform firmware flashing on a split port");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+int ethnl_act_module_fw_flash(struct sk_buff *skb, struct genl_info *info)
+{
+ struct ethnl_req_info req_info = {};
+ struct nlattr **tb = info->attrs;
+ struct net_device *dev;
+ int ret;
+
+ ret = ethnl_parse_header_dev_get(&req_info,
+ tb[ETHTOOL_A_MODULE_FW_FLASH_HEADER],
+ genl_info_net(info), info->extack,
+ true);
+ if (ret < 0)
+ return ret;
+ dev = req_info.dev;
+
+ rtnl_lock();
+ ret = ethnl_ops_begin(dev);
+ if (ret < 0)
+ goto out_rtnl;
+
+ ret = ethnl_module_fw_flash_validate(dev, info->extack);
+ if (ret < 0)
+ goto out_rtnl;
+
+ ret = module_flash_fw(dev, tb, skb, info);
+
+ ethnl_ops_complete(dev);
+
+out_rtnl:
+ rtnl_unlock();
+ ethnl_parse_header_dev_put(&req_info);
+ return ret;
+}
+
+/* MODULE_FW_FLASH_NTF */
+
+static int
+ethnl_module_fw_flash_ntf_put_err(struct sk_buff *skb, char *err_msg,
+ char *sub_err_msg)
+{
+ int err_msg_len, sub_err_msg_len, total_len;
+ struct nlattr *attr;
+
+ if (!err_msg)
+ return 0;
+
+ err_msg_len = strlen(err_msg);
+ total_len = err_msg_len + 2; /* For period and NUL. */
+
+ if (sub_err_msg) {
+ sub_err_msg_len = strlen(sub_err_msg);
+ total_len += sub_err_msg_len + 2; /* For ", ". */
+ }
+
+ attr = nla_reserve(skb, ETHTOOL_A_MODULE_FW_FLASH_STATUS_MSG,
+ total_len);
+ if (!attr)
+ return -ENOMEM;
+
+ if (sub_err_msg)
+ sprintf(nla_data(attr), "%s, %s.", err_msg, sub_err_msg);
+ else
+ sprintf(nla_data(attr), "%s.", err_msg);
+
+ return 0;
+}
+
+static void
+ethnl_module_fw_flash_ntf(struct net_device *dev,
+ enum ethtool_module_fw_flash_status status,
+ struct ethnl_module_fw_flash_ntf_params *ntf_params,
+ char *err_msg, char *sub_err_msg,
+ u64 done, u64 total)
+{
+ struct sk_buff *skb;
+ void *hdr;
+ int ret;
+
+ if (ntf_params->closed_sock)
+ return;
+
+ skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ if (!skb)
+ return;
+
+ hdr = ethnl_unicast_put(skb, ntf_params->portid, ++ntf_params->seq,
+ ETHTOOL_MSG_MODULE_FW_FLASH_NTF);
+ if (!hdr)
+ goto err_skb;
+
+ ret = ethnl_fill_reply_header(skb, dev,
+ ETHTOOL_A_MODULE_FW_FLASH_HEADER);
+ if (ret < 0)
+ goto err_skb;
+
+ if (nla_put_u32(skb, ETHTOOL_A_MODULE_FW_FLASH_STATUS, status))
+ goto err_skb;
+
+ ret = ethnl_module_fw_flash_ntf_put_err(skb, err_msg, sub_err_msg);
+ if (ret < 0)
+ goto err_skb;
+
+ if (nla_put_uint(skb, ETHTOOL_A_MODULE_FW_FLASH_DONE, done))
+ goto err_skb;
+
+ if (nla_put_uint(skb, ETHTOOL_A_MODULE_FW_FLASH_TOTAL, total))
+ goto err_skb;
+
+ genlmsg_end(skb, hdr);
+ genlmsg_unicast(dev_net(dev), skb, ntf_params->portid);
+ return;
+
+err_skb:
+ nlmsg_free(skb);
+}
+
+void ethnl_module_fw_flash_ntf_err(struct net_device *dev,
+ struct ethnl_module_fw_flash_ntf_params *params,
+ char *err_msg, char *sub_err_msg)
+{
+ ethnl_module_fw_flash_ntf(dev, ETHTOOL_MODULE_FW_FLASH_STATUS_ERROR,
+ params, err_msg, sub_err_msg, 0, 0);
+}
+
+void
+ethnl_module_fw_flash_ntf_start(struct net_device *dev,
+ struct ethnl_module_fw_flash_ntf_params *params)
+{
+ ethnl_module_fw_flash_ntf(dev, ETHTOOL_MODULE_FW_FLASH_STATUS_STARTED,
+ params, NULL, NULL, 0, 0);
+}
+
+void
+ethnl_module_fw_flash_ntf_complete(struct net_device *dev,
+ struct ethnl_module_fw_flash_ntf_params *params)
+{
+ ethnl_module_fw_flash_ntf(dev, ETHTOOL_MODULE_FW_FLASH_STATUS_COMPLETED,
+ params, NULL, NULL, 0, 0);
+}
+
+void
+ethnl_module_fw_flash_ntf_in_progress(struct net_device *dev,
+ struct ethnl_module_fw_flash_ntf_params *params,
+ u64 done, u64 total)
+{
+ ethnl_module_fw_flash_ntf(dev,
+ ETHTOOL_MODULE_FW_FLASH_STATUS_IN_PROGRESS,
+ params, NULL, NULL, done, total);
+}
diff --git a/net/ethtool/module_fw.h b/net/ethtool/module_fw.h
new file mode 100644
index 000000000000..634543a12d0c
--- /dev/null
+++ b/net/ethtool/module_fw.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#include <uapi/linux/ethtool.h>
+#include "netlink.h"
+
+/**
+ * struct ethnl_module_fw_flash_ntf_params - module firmware flashing
+ * notifications parameters
+ * @portid: Netlink portid of sender.
+ * @seq: Sequence number of sender.
+ * @closed_sock: Indicates whether the socket was closed from user space.
+ */
+struct ethnl_module_fw_flash_ntf_params {
+ u32 portid;
+ u32 seq;
+ bool closed_sock;
+};
+
+/**
+ * struct ethtool_module_fw_flash_params - module firmware flashing parameters
+ * @password: Module password. Only valid when @pass_valid is set.
+ * @password_valid: Whether the module password is valid or not.
+ */
+struct ethtool_module_fw_flash_params {
+ __be32 password;
+ u8 password_valid:1;
+};
+
+/**
+ * struct ethtool_cmis_fw_update_params - CMIS firmware update specific
+ * parameters
+ * @dev: Pointer to the net_device to be flashed.
+ * @params: Module firmware flashing parameters.
+ * @ntf_params: Module firmware flashing notification parameters.
+ * @fw: Firmware to flash.
+ */
+struct ethtool_cmis_fw_update_params {
+ struct net_device *dev;
+ struct ethtool_module_fw_flash_params params;
+ struct ethnl_module_fw_flash_ntf_params ntf_params;
+ const struct firmware *fw;
+};
+
+/**
+ * struct ethtool_module_fw_flash - module firmware flashing
+ * @list: List node for &module_fw_flash_work_list.
+ * @dev_tracker: Refcount tracker for @dev.
+ * @work: The flashing firmware work.
+ * @fw_update: CMIS firmware update specific parameters.
+ */
+struct ethtool_module_fw_flash {
+ struct list_head list;
+ netdevice_tracker dev_tracker;
+ struct work_struct work;
+ struct ethtool_cmis_fw_update_params fw_update;
+};
+
+void ethnl_module_fw_flash_sock_destroy(struct ethnl_sock_priv *sk_priv);
+
+void
+ethnl_module_fw_flash_ntf_err(struct net_device *dev,
+ struct ethnl_module_fw_flash_ntf_params *params,
+ char *err_msg, char *sub_err_msg);
+void
+ethnl_module_fw_flash_ntf_start(struct net_device *dev,
+ struct ethnl_module_fw_flash_ntf_params *params);
+void
+ethnl_module_fw_flash_ntf_complete(struct net_device *dev,
+ struct ethnl_module_fw_flash_ntf_params *params);
+void
+ethnl_module_fw_flash_ntf_in_progress(struct net_device *dev,
+ struct ethnl_module_fw_flash_ntf_params *params,
+ u64 done, u64 total);
+
+void ethtool_cmis_fw_update(struct ethtool_cmis_fw_update_params *params);
diff --git a/net/ethtool/netlink.c b/net/ethtool/netlink.c
index bd04f28d5cf4..cb1eea00e349 100644
--- a/net/ethtool/netlink.c
+++ b/net/ethtool/netlink.c
@@ -4,6 +4,7 @@
#include <linux/ethtool_netlink.h>
#include <linux/pm_runtime.h>
#include "netlink.h"
+#include "module_fw.h"
static struct genl_family ethtool_genl_family;
@@ -30,6 +31,35 @@ const struct nla_policy ethnl_header_policy_stats[] = {
ETHTOOL_FLAGS_STATS),
};
+int ethnl_sock_priv_set(struct sk_buff *skb, struct net_device *dev, u32 portid,
+ enum ethnl_sock_type type)
+{
+ struct ethnl_sock_priv *sk_priv;
+
+ sk_priv = genl_sk_priv_get(&ethtool_genl_family, NETLINK_CB(skb).sk);
+ if (IS_ERR(sk_priv))
+ return PTR_ERR(sk_priv);
+
+ sk_priv->dev = dev;
+ sk_priv->portid = portid;
+ sk_priv->type = type;
+
+ return 0;
+}
+
+static void ethnl_sock_priv_destroy(void *priv)
+{
+ struct ethnl_sock_priv *sk_priv = priv;
+
+ switch (sk_priv->type) {
+ case ETHTOOL_SOCK_TYPE_MODULE_FW_FLASH:
+ ethnl_module_fw_flash_sock_destroy(sk_priv);
+ break;
+ default:
+ break;
+ }
+}
+
int ethnl_ops_begin(struct net_device *dev)
{
int ret;
@@ -239,6 +269,11 @@ void *ethnl_bcastmsg_put(struct sk_buff *skb, u8 cmd)
cmd);
}
+void *ethnl_unicast_put(struct sk_buff *skb, u32 portid, u32 seq, u8 cmd)
+{
+ return genlmsg_put(skb, portid, seq, &ethtool_genl_family, 0, cmd);
+}
+
int ethnl_multicast(struct sk_buff *skb, struct net_device *dev)
{
return genlmsg_multicast_netns(&ethtool_genl_family, dev_net(dev), skb,
@@ -760,10 +795,22 @@ static void ethnl_notify_features(struct netdev_notifier_info *info)
static int ethnl_netdev_event(struct notifier_block *this, unsigned long event,
void *ptr)
{
+ struct netdev_notifier_info *info = ptr;
+ struct netlink_ext_ack *extack;
+ struct net_device *dev;
+
+ dev = netdev_notifier_info_to_dev(info);
+ extack = netdev_notifier_info_to_extack(info);
+
switch (event) {
case NETDEV_FEAT_CHANGE:
ethnl_notify_features(ptr);
break;
+ case NETDEV_PRE_UP:
+ if (dev->ethtool->module_fw_flash_in_progress) {
+ NL_SET_ERR_MSG(extack, "Can't set port up while flashing module firmware");
+ return NOTIFY_BAD;
+ }
}
return NOTIFY_DONE;
@@ -1125,6 +1172,13 @@ static const struct genl_ops ethtool_genl_ops[] = {
.policy = ethnl_mm_set_policy,
.maxattr = ARRAY_SIZE(ethnl_mm_set_policy) - 1,
},
+ {
+ .cmd = ETHTOOL_MSG_MODULE_FW_FLASH_ACT,
+ .flags = GENL_UNS_ADMIN_PERM,
+ .doit = ethnl_act_module_fw_flash,
+ .policy = ethnl_module_fw_flash_act_policy,
+ .maxattr = ARRAY_SIZE(ethnl_module_fw_flash_act_policy) - 1,
+ },
};
static const struct genl_multicast_group ethtool_nl_mcgrps[] = {
@@ -1141,6 +1195,8 @@ static struct genl_family ethtool_genl_family __ro_after_init = {
.resv_start_op = ETHTOOL_MSG_MODULE_GET + 1,
.mcgrps = ethtool_nl_mcgrps,
.n_mcgrps = ARRAY_SIZE(ethtool_nl_mcgrps),
+ .sock_priv_size = sizeof(struct ethnl_sock_priv),
+ .sock_priv_destroy = ethnl_sock_priv_destroy,
};
/* module setup */
diff --git a/net/ethtool/netlink.h b/net/ethtool/netlink.h
index 9a333a8d04c1..46ec273a87c5 100644
--- a/net/ethtool/netlink.h
+++ b/net/ethtool/netlink.h
@@ -21,6 +21,7 @@ struct sk_buff *ethnl_reply_init(size_t payload, struct net_device *dev, u8 cmd,
void **ehdrp);
void *ethnl_dump_put(struct sk_buff *skb, struct netlink_callback *cb, u8 cmd);
void *ethnl_bcastmsg_put(struct sk_buff *skb, u8 cmd);
+void *ethnl_unicast_put(struct sk_buff *skb, u32 portid, u32 seq, u8 cmd);
int ethnl_multicast(struct sk_buff *skb, struct net_device *dev);
/**
@@ -283,6 +284,19 @@ struct ethnl_reply_data {
int ethnl_ops_begin(struct net_device *dev);
void ethnl_ops_complete(struct net_device *dev);
+enum ethnl_sock_type {
+ ETHTOOL_SOCK_TYPE_MODULE_FW_FLASH,
+};
+
+struct ethnl_sock_priv {
+ struct net_device *dev;
+ u32 portid;
+ enum ethnl_sock_type type;
+};
+
+int ethnl_sock_priv_set(struct sk_buff *skb, struct net_device *dev, u32 portid,
+ enum ethnl_sock_type type);
+
/**
* struct ethnl_request_ops - unified handling of GET and SET requests
* @request_cmd: command id for request (GET)
@@ -441,6 +455,7 @@ extern const struct nla_policy ethnl_plca_set_cfg_policy[ETHTOOL_A_PLCA_MAX + 1]
extern const struct nla_policy ethnl_plca_get_status_policy[ETHTOOL_A_PLCA_HEADER + 1];
extern const struct nla_policy ethnl_mm_get_policy[ETHTOOL_A_MM_HEADER + 1];
extern const struct nla_policy ethnl_mm_set_policy[ETHTOOL_A_MM_MAX + 1];
+extern const struct nla_policy ethnl_module_fw_flash_act_policy[ETHTOOL_A_MODULE_FW_FLASH_PASSWORD + 1];
int ethnl_set_features(struct sk_buff *skb, struct genl_info *info);
int ethnl_act_cable_test(struct sk_buff *skb, struct genl_info *info);
@@ -448,6 +463,7 @@ int ethnl_act_cable_test_tdr(struct sk_buff *skb, struct genl_info *info);
int ethnl_tunnel_info_doit(struct sk_buff *skb, struct genl_info *info);
int ethnl_tunnel_info_start(struct netlink_callback *cb);
int ethnl_tunnel_info_dumpit(struct sk_buff *skb, struct netlink_callback *cb);
+int ethnl_act_module_fw_flash(struct sk_buff *skb, struct genl_info *info);
extern const char stats_std_names[__ETHTOOL_STATS_CNT][ETH_GSTRING_LEN];
extern const char stats_eth_phy_names[__ETHTOOL_A_STATS_ETH_PHY_CNT][ETH_GSTRING_LEN];
diff --git a/net/ethtool/pse-pd.c b/net/ethtool/pse-pd.c
index 2c981d443f27..ff81aa749784 100644
--- a/net/ethtool/pse-pd.c
+++ b/net/ethtool/pse-pd.c
@@ -86,10 +86,56 @@ static int pse_reply_size(const struct ethnl_req_info *req_base,
len += nla_total_size(sizeof(u32)); /* _C33_PSE_ADMIN_STATE */
if (st->c33_pw_status > 0)
len += nla_total_size(sizeof(u32)); /* _C33_PSE_PW_D_STATUS */
+ if (st->c33_pw_class > 0)
+ len += nla_total_size(sizeof(u32)); /* _C33_PSE_PW_CLASS */
+ if (st->c33_actual_pw > 0)
+ len += nla_total_size(sizeof(u32)); /* _C33_PSE_ACTUAL_PW */
+ if (st->c33_ext_state_info.c33_pse_ext_state > 0) {
+ len += nla_total_size(sizeof(u32)); /* _C33_PSE_EXT_STATE */
+ if (st->c33_ext_state_info.__c33_pse_ext_substate > 0)
+ /* _C33_PSE_EXT_SUBSTATE */
+ len += nla_total_size(sizeof(u32));
+ }
+ if (st->c33_avail_pw_limit > 0)
+ /* _C33_AVAIL_PSE_PW_LIMIT */
+ len += nla_total_size(sizeof(u32));
+ if (st->c33_pw_limit_nb_ranges > 0)
+ /* _C33_PSE_PW_LIMIT_RANGES */
+ len += st->c33_pw_limit_nb_ranges *
+ (nla_total_size(0) +
+ nla_total_size(sizeof(u32)) * 2);
return len;
}
+static int pse_put_pw_limit_ranges(struct sk_buff *skb,
+ const struct pse_control_status *st)
+{
+ const struct ethtool_c33_pse_pw_limit_range *pw_limit_ranges;
+ int i;
+
+ pw_limit_ranges = st->c33_pw_limit_ranges;
+ for (i = 0; i < st->c33_pw_limit_nb_ranges; i++) {
+ struct nlattr *nest;
+
+ nest = nla_nest_start(skb, ETHTOOL_A_C33_PSE_PW_LIMIT_RANGES);
+ if (!nest)
+ return -EMSGSIZE;
+
+ if (nla_put_u32(skb, ETHTOOL_A_C33_PSE_PW_LIMIT_MIN,
+ pw_limit_ranges->min) ||
+ nla_put_u32(skb, ETHTOOL_A_C33_PSE_PW_LIMIT_MAX,
+ pw_limit_ranges->max)) {
+ nla_nest_cancel(skb, nest);
+ return -EMSGSIZE;
+ }
+ nla_nest_end(skb, nest);
+ pw_limit_ranges++;
+ }
+
+ return 0;
+}
+
static int pse_fill_reply(struct sk_buff *skb,
const struct ethnl_req_info *req_base,
const struct ethnl_reply_data *reply_base)
@@ -117,9 +163,46 @@ static int pse_fill_reply(struct sk_buff *skb,
st->c33_pw_status))
return -EMSGSIZE;
+ if (st->c33_pw_class > 0 &&
+ nla_put_u32(skb, ETHTOOL_A_C33_PSE_PW_CLASS,
+ st->c33_pw_class))
+ return -EMSGSIZE;
+
+ if (st->c33_actual_pw > 0 &&
+ nla_put_u32(skb, ETHTOOL_A_C33_PSE_ACTUAL_PW,
+ st->c33_actual_pw))
+ return -EMSGSIZE;
+
+ if (st->c33_ext_state_info.c33_pse_ext_state > 0) {
+ if (nla_put_u32(skb, ETHTOOL_A_C33_PSE_EXT_STATE,
+ st->c33_ext_state_info.c33_pse_ext_state))
+ return -EMSGSIZE;
+
+ if (st->c33_ext_state_info.__c33_pse_ext_substate > 0 &&
+ nla_put_u32(skb, ETHTOOL_A_C33_PSE_EXT_SUBSTATE,
+ st->c33_ext_state_info.__c33_pse_ext_substate))
+ return -EMSGSIZE;
+ }
+
+ if (st->c33_avail_pw_limit > 0 &&
+ nla_put_u32(skb, ETHTOOL_A_C33_PSE_AVAIL_PW_LIMIT,
+ st->c33_avail_pw_limit))
+ return -EMSGSIZE;
+
+ if (st->c33_pw_limit_nb_ranges > 0 &&
+ pse_put_pw_limit_ranges(skb, st))
+ return -EMSGSIZE;
+
return 0;
}
+static void pse_cleanup_data(struct ethnl_reply_data *reply_base)
+{
+ const struct pse_reply_data *data = PSE_REPDATA(reply_base);
+
+ kfree(data->status.c33_pw_limit_ranges);
+}
+
/* PSE_SET */
const struct nla_policy ethnl_pse_set_policy[ETHTOOL_A_PSE_MAX + 1] = {
@@ -130,6 +213,7 @@ const struct nla_policy ethnl_pse_set_policy[ETHTOOL_A_PSE_MAX + 1] = {
[ETHTOOL_A_C33_PSE_ADMIN_CONTROL] =
NLA_POLICY_RANGE(NLA_U32, ETHTOOL_C33_PSE_ADMIN_STATE_DISABLED,
ETHTOOL_C33_PSE_ADMIN_STATE_ENABLED),
+ [ETHTOOL_A_C33_PSE_AVAIL_PW_LIMIT] = { .type = NLA_U32 },
};
static int
@@ -172,19 +256,43 @@ static int
ethnl_set_pse(struct ethnl_req_info *req_info, struct genl_info *info)
{
struct net_device *dev = req_info->dev;
- struct pse_control_config config = {};
struct nlattr **tb = info->attrs;
struct phy_device *phydev;
+ int ret = 0;
phydev = dev->phydev;
+
+ if (tb[ETHTOOL_A_C33_PSE_AVAIL_PW_LIMIT]) {
+ unsigned int pw_limit;
+
+ pw_limit = nla_get_u32(tb[ETHTOOL_A_C33_PSE_AVAIL_PW_LIMIT]);
+ ret = pse_ethtool_set_pw_limit(phydev->psec, info->extack,
+ pw_limit);
+ if (ret)
+ return ret;
+ }
+
/* These values are already validated by the ethnl_pse_set_policy */
- if (pse_has_podl(phydev->psec))
- config.podl_admin_control = nla_get_u32(tb[ETHTOOL_A_PODL_PSE_ADMIN_CONTROL]);
- if (pse_has_c33(phydev->psec))
- config.c33_admin_control = nla_get_u32(tb[ETHTOOL_A_C33_PSE_ADMIN_CONTROL]);
+ if (tb[ETHTOOL_A_PODL_PSE_ADMIN_CONTROL] ||
+ tb[ETHTOOL_A_C33_PSE_ADMIN_CONTROL]) {
+ struct pse_control_config config = {};
+
+ if (tb[ETHTOOL_A_PODL_PSE_ADMIN_CONTROL])
+ config.podl_admin_control = nla_get_u32(tb[ETHTOOL_A_PODL_PSE_ADMIN_CONTROL]);
+ if (tb[ETHTOOL_A_C33_PSE_ADMIN_CONTROL])
+ config.c33_admin_control = nla_get_u32(tb[ETHTOOL_A_C33_PSE_ADMIN_CONTROL]);
+
+ /* pse_ethtool_set_config() will do nothing if the config
+ * is zero
+ */
+ ret = pse_ethtool_set_config(phydev->psec, info->extack,
+ &config);
+ if (ret)
+ return ret;
+ }
- /* Return errno directly - PSE has no notification */
- return pse_ethtool_set_config(phydev->psec, info->extack, &config);
+ /* Return errno or zero - PSE has no notification */
+ return ret;
}
const struct ethnl_request_ops ethnl_pse_request_ops = {
@@ -197,6 +305,7 @@ const struct ethnl_request_ops ethnl_pse_request_ops = {
.prepare_data = pse_prepare_data,
.reply_size = pse_reply_size,
.fill_reply = pse_fill_reply,
+ .cleanup_data = pse_cleanup_data,
.set_validate = ethnl_set_pse_validate,
.set = ethnl_set_pse,
diff --git a/net/ethtool/tsinfo.c b/net/ethtool/tsinfo.c
index be2755c8d8fd..03d12d6f79ca 100644
--- a/net/ethtool/tsinfo.c
+++ b/net/ethtool/tsinfo.c
@@ -12,7 +12,7 @@ struct tsinfo_req_info {
struct tsinfo_reply_data {
struct ethnl_reply_data base;
- struct ethtool_ts_info ts_info;
+ struct kernel_ethtool_ts_info ts_info;
struct ethtool_ts_stats stats;
};
@@ -38,11 +38,11 @@ static int tsinfo_prepare_data(const struct ethnl_req_info *req_base,
ret = ethnl_ops_begin(dev);
if (ret < 0)
return ret;
- if (req_base->flags & ETHTOOL_FLAG_STATS &&
- dev->ethtool_ops->get_ts_stats) {
+ if (req_base->flags & ETHTOOL_FLAG_STATS) {
ethtool_stats_init((u64 *)&data->stats,
sizeof(data->stats) / sizeof(u64));
- dev->ethtool_ops->get_ts_stats(dev, &data->stats);
+ if (dev->ethtool_ops->get_ts_stats)
+ dev->ethtool_ops->get_ts_stats(dev, &data->stats);
}
ret = __ethtool_get_ts_info(dev, &data->ts_info);
ethnl_ops_complete(dev);
@@ -55,7 +55,7 @@ static int tsinfo_reply_size(const struct ethnl_req_info *req_base,
{
const struct tsinfo_reply_data *data = TSINFO_REPDATA(reply_base);
bool compact = req_base->flags & ETHTOOL_FLAG_COMPACT_BITSETS;
- const struct ethtool_ts_info *ts_info = &data->ts_info;
+ const struct kernel_ethtool_ts_info *ts_info = &data->ts_info;
int len = 0;
int ret;
@@ -136,7 +136,7 @@ static int tsinfo_fill_reply(struct sk_buff *skb,
{
const struct tsinfo_reply_data *data = TSINFO_REPDATA(reply_base);
bool compact = req_base->flags & ETHTOOL_FLAG_COMPACT_BITSETS;
- const struct ethtool_ts_info *ts_info = &data->ts_info;
+ const struct kernel_ethtool_ts_info *ts_info = &data->ts_info;
int ret;
if (ts_info->so_timestamping) {
diff --git a/net/ethtool/wol.c b/net/ethtool/wol.c
index 0ed56c9ac1bc..a39d8000d808 100644
--- a/net/ethtool/wol.c
+++ b/net/ethtool/wol.c
@@ -137,7 +137,7 @@ ethnl_set_wol(struct ethnl_req_info *req_info, struct genl_info *info)
ret = dev->ethtool_ops->set_wol(dev, &wol);
if (ret)
return ret;
- dev->wol_enabled = !!wol.wolopts;
+ dev->ethtool->wol_enabled = !!wol.wolopts;
return 1;
}
diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
index e6904288d40d..e4cc6b78dcfc 100644
--- a/net/hsr/hsr_device.c
+++ b/net/hsr/hsr_device.c
@@ -73,9 +73,15 @@ static void hsr_check_announce(struct net_device *hsr_dev)
mod_timer(&hsr->announce_timer, jiffies +
msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL));
}
+
+ if (hsr->redbox && !timer_pending(&hsr->announce_proxy_timer))
+ mod_timer(&hsr->announce_proxy_timer, jiffies +
+ msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL) / 2);
} else {
/* Deactivate the announce timer */
timer_delete(&hsr->announce_timer);
+ if (hsr->redbox)
+ timer_delete(&hsr->announce_proxy_timer);
}
}
@@ -279,10 +285,11 @@ out:
return NULL;
}
-static void send_hsr_supervision_frame(struct hsr_port *master,
- unsigned long *interval)
+static void send_hsr_supervision_frame(struct hsr_port *port,
+ unsigned long *interval,
+ const unsigned char *addr)
{
- struct hsr_priv *hsr = master->hsr;
+ struct hsr_priv *hsr = port->hsr;
__u8 type = HSR_TLV_LIFE_CHECK;
struct hsr_sup_payload *hsr_sp;
struct hsr_sup_tlv *hsr_stlv;
@@ -296,9 +303,9 @@ static void send_hsr_supervision_frame(struct hsr_port *master,
hsr->announce_count++;
}
- skb = hsr_init_skb(master);
+ skb = hsr_init_skb(port);
if (!skb) {
- netdev_warn_once(master->dev, "HSR: Could not send supervision frame\n");
+ netdev_warn_once(port->dev, "HSR: Could not send supervision frame\n");
return;
}
@@ -321,11 +328,12 @@ static void send_hsr_supervision_frame(struct hsr_port *master,
hsr_stag->tlv.HSR_TLV_length = hsr->prot_version ?
sizeof(struct hsr_sup_payload) : 12;
- /* Payload: MacAddressA */
+ /* Payload: MacAddressA / SAN MAC from ProxyNodeTable */
hsr_sp = skb_put(skb, sizeof(struct hsr_sup_payload));
- ether_addr_copy(hsr_sp->macaddress_A, master->dev->dev_addr);
+ ether_addr_copy(hsr_sp->macaddress_A, addr);
- if (hsr->redbox) {
+ if (hsr->redbox &&
+ hsr_is_node_in_db(&hsr->proxy_node_db, addr)) {
hsr_stlv = skb_put(skb, sizeof(struct hsr_sup_tlv));
hsr_stlv->HSR_TLV_type = PRP_TLV_REDBOX_MAC;
hsr_stlv->HSR_TLV_length = sizeof(struct hsr_sup_payload);
@@ -340,13 +348,14 @@ static void send_hsr_supervision_frame(struct hsr_port *master,
return;
}
- hsr_forward_skb(skb, master);
+ hsr_forward_skb(skb, port);
spin_unlock_bh(&hsr->seqnr_lock);
return;
}
static void send_prp_supervision_frame(struct hsr_port *master,
- unsigned long *interval)
+ unsigned long *interval,
+ const unsigned char *addr)
{
struct hsr_priv *hsr = master->hsr;
struct hsr_sup_payload *hsr_sp;
@@ -396,7 +405,7 @@ static void hsr_announce(struct timer_list *t)
rcu_read_lock();
master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
- hsr->proto_ops->send_sv_frame(master, &interval);
+ hsr->proto_ops->send_sv_frame(master, &interval, master->dev->dev_addr);
if (is_admin_up(master->dev))
mod_timer(&hsr->announce_timer, jiffies + interval);
@@ -404,6 +413,37 @@ static void hsr_announce(struct timer_list *t)
rcu_read_unlock();
}
+/* Announce (supervision frame) timer function for RedBox
+ */
+static void hsr_proxy_announce(struct timer_list *t)
+{
+ struct hsr_priv *hsr = from_timer(hsr, t, announce_proxy_timer);
+ struct hsr_port *interlink;
+ unsigned long interval = 0;
+ struct hsr_node *node;
+
+ rcu_read_lock();
+ /* RedBOX sends supervisory frames to HSR network with MAC addresses
+ * of SAN nodes stored in ProxyNodeTable.
+ */
+ interlink = hsr_port_get_hsr(hsr, HSR_PT_INTERLINK);
+ list_for_each_entry_rcu(node, &hsr->proxy_node_db, mac_list) {
+ if (hsr_addr_is_redbox(hsr, node->macaddress_A))
+ continue;
+ hsr->proto_ops->send_sv_frame(interlink, &interval,
+ node->macaddress_A);
+ }
+
+ if (is_admin_up(interlink->dev)) {
+ if (!interval)
+ interval = msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL);
+
+ mod_timer(&hsr->announce_proxy_timer, jiffies + interval);
+ }
+
+ rcu_read_unlock();
+}
+
void hsr_del_ports(struct hsr_priv *hsr)
{
struct hsr_port *port;
@@ -590,6 +630,7 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
timer_setup(&hsr->announce_timer, hsr_announce, 0);
timer_setup(&hsr->prune_timer, hsr_prune_nodes, 0);
timer_setup(&hsr->prune_proxy_timer, hsr_prune_proxy_nodes, 0);
+ timer_setup(&hsr->announce_proxy_timer, hsr_proxy_announce, 0);
ether_addr_copy(hsr->sup_multicast_addr, def_multicast_addr);
hsr->sup_multicast_addr[ETH_ALEN - 1] = multicast_spec;
diff --git a/net/hsr/hsr_forward.c b/net/hsr/hsr_forward.c
index 05a61b8286ec..b38060246e62 100644
--- a/net/hsr/hsr_forward.c
+++ b/net/hsr/hsr_forward.c
@@ -117,6 +117,35 @@ static bool is_supervision_frame(struct hsr_priv *hsr, struct sk_buff *skb)
return true;
}
+static bool is_proxy_supervision_frame(struct hsr_priv *hsr,
+ struct sk_buff *skb)
+{
+ struct hsr_sup_payload *payload;
+ struct ethhdr *eth_hdr;
+ u16 total_length = 0;
+
+ eth_hdr = (struct ethhdr *)skb_mac_header(skb);
+
+ /* Get the HSR protocol revision. */
+ if (eth_hdr->h_proto == htons(ETH_P_HSR))
+ total_length = sizeof(struct hsrv1_ethhdr_sp);
+ else
+ total_length = sizeof(struct hsrv0_ethhdr_sp);
+
+ if (!pskb_may_pull(skb, total_length + sizeof(struct hsr_sup_payload)))
+ return false;
+
+ skb_pull(skb, total_length);
+ payload = (struct hsr_sup_payload *)skb->data;
+ skb_push(skb, total_length);
+
+ /* For RedBox (HSR-SAN) check if we have received the supervision
+ * frame with MAC addresses from own ProxyNodeTable.
+ */
+ return hsr_is_node_in_db(&hsr->proxy_node_db,
+ payload->macaddress_A);
+}
+
static struct sk_buff *create_stripped_skb_hsr(struct sk_buff *skb_in,
struct hsr_frame_info *frame)
{
@@ -392,9 +421,9 @@ static int hsr_xmit(struct sk_buff *skb, struct hsr_port *port,
bool prp_drop_frame(struct hsr_frame_info *frame, struct hsr_port *port)
{
return ((frame->port_rcv->type == HSR_PT_SLAVE_A &&
- port->type == HSR_PT_SLAVE_B) ||
+ port->type == HSR_PT_SLAVE_B) ||
(frame->port_rcv->type == HSR_PT_SLAVE_B &&
- port->type == HSR_PT_SLAVE_A));
+ port->type == HSR_PT_SLAVE_A));
}
bool hsr_drop_frame(struct hsr_frame_info *frame, struct hsr_port *port)
@@ -499,7 +528,8 @@ static void hsr_forward_do(struct hsr_frame_info *frame)
frame->sequence_nr))
continue;
- if (frame->is_supervision && port->type == HSR_PT_MASTER) {
+ if (frame->is_supervision && port->type == HSR_PT_MASTER &&
+ !frame->is_proxy_supervision) {
hsr_handle_sup_frame(frame);
continue;
}
@@ -637,6 +667,9 @@ static int fill_frame_info(struct hsr_frame_info *frame,
memset(frame, 0, sizeof(*frame));
frame->is_supervision = is_supervision_frame(port->hsr, skb);
+ if (frame->is_supervision && hsr->redbox)
+ frame->is_proxy_supervision =
+ is_proxy_supervision_frame(port->hsr, skb);
n_db = &hsr->node_db;
if (port->type == HSR_PT_INTERLINK)
@@ -688,7 +721,7 @@ void hsr_forward_skb(struct sk_buff *skb, struct hsr_port *port)
/* Gets called for ingress frames as well as egress from master port.
* So check and increment stats for master port only here.
*/
- if (port->type == HSR_PT_MASTER) {
+ if (port->type == HSR_PT_MASTER || port->type == HSR_PT_INTERLINK) {
port->dev->stats.tx_packets++;
port->dev->stats.tx_bytes += skb->len;
}
diff --git a/net/hsr/hsr_framereg.c b/net/hsr/hsr_framereg.c
index 614df9649794..73bc6f659812 100644
--- a/net/hsr/hsr_framereg.c
+++ b/net/hsr/hsr_framereg.c
@@ -36,6 +36,14 @@ static bool seq_nr_after(u16 a, u16 b)
#define seq_nr_before(a, b) seq_nr_after((b), (a))
#define seq_nr_before_or_eq(a, b) (!seq_nr_after((a), (b)))
+bool hsr_addr_is_redbox(struct hsr_priv *hsr, unsigned char *addr)
+{
+ if (!hsr->redbox || !is_valid_ether_addr(hsr->macaddress_redbox))
+ return false;
+
+ return ether_addr_equal(addr, hsr->macaddress_redbox);
+}
+
bool hsr_addr_is_self(struct hsr_priv *hsr, unsigned char *addr)
{
struct hsr_self_node *sn;
@@ -591,6 +599,10 @@ void hsr_prune_proxy_nodes(struct timer_list *t)
spin_lock_bh(&hsr->list_lock);
list_for_each_entry_safe(node, tmp, &hsr->proxy_node_db, mac_list) {
+ /* Don't prune RedBox node. */
+ if (hsr_addr_is_redbox(hsr, node->macaddress_A))
+ continue;
+
timestamp = node->time_in[HSR_PT_INTERLINK];
/* Prune old entries */
diff --git a/net/hsr/hsr_framereg.h b/net/hsr/hsr_framereg.h
index 7619e31c1d2d..993fa950d814 100644
--- a/net/hsr/hsr_framereg.h
+++ b/net/hsr/hsr_framereg.h
@@ -22,6 +22,7 @@ struct hsr_frame_info {
struct hsr_node *node_src;
u16 sequence_nr;
bool is_supervision;
+ bool is_proxy_supervision;
bool is_vlan;
bool is_local_dest;
bool is_local_exclusive;
@@ -35,6 +36,7 @@ struct hsr_node *hsr_get_node(struct hsr_port *port, struct list_head *node_db,
enum hsr_port_type rx_port);
void hsr_handle_sup_frame(struct hsr_frame_info *frame);
bool hsr_addr_is_self(struct hsr_priv *hsr, unsigned char *addr);
+bool hsr_addr_is_redbox(struct hsr_priv *hsr, unsigned char *addr);
void hsr_addr_subst_source(struct hsr_node *node, struct sk_buff *skb);
void hsr_addr_subst_dest(struct hsr_node *node_src, struct sk_buff *skb,
diff --git a/net/hsr/hsr_main.h b/net/hsr/hsr_main.h
index 23850b16d1ea..ab1f8d35d9dc 100644
--- a/net/hsr/hsr_main.h
+++ b/net/hsr/hsr_main.h
@@ -170,7 +170,8 @@ struct hsr_node;
struct hsr_proto_ops {
/* format and send supervision frame */
- void (*send_sv_frame)(struct hsr_port *port, unsigned long *interval);
+ void (*send_sv_frame)(struct hsr_port *port, unsigned long *interval,
+ const unsigned char addr[ETH_ALEN]);
void (*handle_san_frame)(bool san, enum hsr_port_type port,
struct hsr_node *node);
bool (*drop_frame)(struct hsr_frame_info *frame, struct hsr_port *port);
@@ -197,6 +198,7 @@ struct hsr_priv {
struct list_head proxy_node_db; /* RedBox HSR proxy nodes */
struct hsr_self_node __rcu *self_node; /* MACs of slaves */
struct timer_list announce_timer; /* Supervision frame dispatch */
+ struct timer_list announce_proxy_timer;
struct timer_list prune_timer;
struct timer_list prune_proxy_timer;
int announce_count;
diff --git a/net/hsr/hsr_netlink.c b/net/hsr/hsr_netlink.c
index 898f18c6da53..f6ff0b61e08a 100644
--- a/net/hsr/hsr_netlink.c
+++ b/net/hsr/hsr_netlink.c
@@ -131,6 +131,7 @@ static void hsr_dellink(struct net_device *dev, struct list_head *head)
del_timer_sync(&hsr->prune_timer);
del_timer_sync(&hsr->prune_proxy_timer);
del_timer_sync(&hsr->announce_timer);
+ timer_delete_sync(&hsr->announce_proxy_timer);
hsr_debugfs_term(hsr);
hsr_del_ports(hsr);
diff --git a/net/ieee802154/6lowpan/reassembly.c b/net/ieee802154/6lowpan/reassembly.c
index 56ef873828f4..867d637d86f0 100644
--- a/net/ieee802154/6lowpan/reassembly.c
+++ b/net/ieee802154/6lowpan/reassembly.c
@@ -130,7 +130,7 @@ static int lowpan_frag_queue(struct lowpan_frag_queue *fq,
goto err;
fq->q.stamp = skb->tstamp;
- fq->q.mono_delivery_time = skb->mono_delivery_time;
+ fq->q.tstamp_type = skb->tstamp_type;
if (frag_type == LOWPAN_DISPATCH_FRAG1)
fq->q.flags |= INET_FRAG_FIRST_IN;
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index e03ba4a21c39..b24d74616637 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1532,7 +1532,7 @@ struct sk_buff *inet_gro_receive(struct list_head *head, struct sk_buff *skb)
}
NAPI_GRO_CB(skb)->flush |= flush;
- NAPI_GRO_CB(skb)->inner_network_offset = off;
+ NAPI_GRO_CB(skb)->network_offsets[NAPI_GRO_CB(skb)->encap_mark] = off;
/* Note : No need to call skb_gro_postpull_rcsum() here,
* as we already checked checksum over ipv4 header was 0
diff --git a/net/ipv4/bpf_tcp_ca.c b/net/ipv4/bpf_tcp_ca.c
index 18227757ec0c..3f88d0961e5b 100644
--- a/net/ipv4/bpf_tcp_ca.c
+++ b/net/ipv4/bpf_tcp_ca.c
@@ -260,17 +260,17 @@ static int bpf_tcp_ca_check_member(const struct btf_type *t,
return 0;
}
-static int bpf_tcp_ca_reg(void *kdata)
+static int bpf_tcp_ca_reg(void *kdata, struct bpf_link *link)
{
return tcp_register_congestion_control(kdata);
}
-static void bpf_tcp_ca_unreg(void *kdata)
+static void bpf_tcp_ca_unreg(void *kdata, struct bpf_link *link)
{
tcp_unregister_congestion_control(kdata);
}
-static int bpf_tcp_ca_update(void *kdata, void *old_kdata)
+static int bpf_tcp_ca_update(void *kdata, void *old_kdata, struct bpf_link *link)
{
return tcp_update_congestion_control(kdata, old_kdata);
}
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
index dd6d46015058..8cc0e2f4159d 100644
--- a/net/ipv4/cipso_ipv4.c
+++ b/net/ipv4/cipso_ipv4.c
@@ -1810,6 +1810,29 @@ static int cipso_v4_genopt(unsigned char *buf, u32 buf_len,
return CIPSO_V4_HDR_LEN + ret_val;
}
+static int cipso_v4_get_actual_opt_len(const unsigned char *data, int len)
+{
+ int iter = 0, optlen = 0;
+
+ /* determining the new total option length is tricky because of
+ * the padding necessary, the only thing i can think to do at
+ * this point is walk the options one-by-one, skipping the
+ * padding at the end to determine the actual option size and
+ * from there we can determine the new total option length
+ */
+ while (iter < len) {
+ if (data[iter] == IPOPT_END) {
+ break;
+ } else if (data[iter] == IPOPT_NOP) {
+ iter++;
+ } else {
+ iter += data[iter + 1];
+ optlen = iter;
+ }
+ }
+ return optlen;
+}
+
/**
* cipso_v4_sock_setattr - Add a CIPSO option to a socket
* @sk: the socket
@@ -1953,7 +1976,7 @@ int cipso_v4_req_setattr(struct request_sock *req,
buf = NULL;
req_inet = inet_rsk(req);
- opt = xchg((__force struct ip_options_rcu **)&req_inet->ireq_opt, opt);
+ opt = unrcu_pointer(xchg(&req_inet->ireq_opt, RCU_INITIALIZER(opt)));
if (opt)
kfree_rcu(opt, rcu);
@@ -1986,7 +2009,6 @@ static int cipso_v4_delopt(struct ip_options_rcu __rcu **opt_ptr)
u8 cipso_len;
u8 cipso_off;
unsigned char *cipso_ptr;
- int iter;
int optlen_new;
cipso_off = opt->opt.cipso - sizeof(struct iphdr);
@@ -2006,19 +2028,8 @@ static int cipso_v4_delopt(struct ip_options_rcu __rcu **opt_ptr)
memmove(cipso_ptr, cipso_ptr + cipso_len,
opt->opt.optlen - cipso_off - cipso_len);
- /* determining the new total option length is tricky because of
- * the padding necessary, the only thing i can think to do at
- * this point is walk the options one-by-one, skipping the
- * padding at the end to determine the actual option size and
- * from there we can determine the new total option length */
- iter = 0;
- optlen_new = 0;
- while (iter < opt->opt.optlen)
- if (opt->opt.__data[iter] != IPOPT_NOP) {
- iter += opt->opt.__data[iter + 1];
- optlen_new = iter;
- } else
- iter++;
+ optlen_new = cipso_v4_get_actual_opt_len(opt->opt.__data,
+ opt->opt.optlen);
hdr_delta = opt->opt.optlen;
opt->opt.optlen = (optlen_new + 3) & ~3;
hdr_delta -= opt->opt.optlen;
@@ -2238,7 +2249,8 @@ int cipso_v4_skbuff_setattr(struct sk_buff *skb,
*/
int cipso_v4_skbuff_delattr(struct sk_buff *skb)
{
- int ret_val;
+ int ret_val, cipso_len, hdr_len_actual, new_hdr_len_actual, new_hdr_len,
+ hdr_len_delta;
struct iphdr *iph;
struct ip_options *opt = &IPCB(skb)->opt;
unsigned char *cipso_ptr;
@@ -2251,16 +2263,37 @@ int cipso_v4_skbuff_delattr(struct sk_buff *skb)
if (ret_val < 0)
return ret_val;
- /* the easiest thing to do is just replace the cipso option with noop
- * options since we don't change the size of the packet, although we
- * still need to recalculate the checksum */
-
iph = ip_hdr(skb);
cipso_ptr = (unsigned char *)iph + opt->cipso;
- memset(cipso_ptr, IPOPT_NOOP, cipso_ptr[1]);
+ cipso_len = cipso_ptr[1];
+
+ hdr_len_actual = sizeof(struct iphdr) +
+ cipso_v4_get_actual_opt_len((unsigned char *)(iph + 1),
+ opt->optlen);
+ new_hdr_len_actual = hdr_len_actual - cipso_len;
+ new_hdr_len = (new_hdr_len_actual + 3) & ~3;
+ hdr_len_delta = (iph->ihl << 2) - new_hdr_len;
+
+ /* 1. shift any options after CIPSO to the left */
+ memmove(cipso_ptr, cipso_ptr + cipso_len,
+ new_hdr_len_actual - opt->cipso);
+ /* 2. move the whole IP header to its new place */
+ memmove((unsigned char *)iph + hdr_len_delta, iph, new_hdr_len_actual);
+ /* 3. adjust the skb layout */
+ skb_pull(skb, hdr_len_delta);
+ skb_reset_network_header(skb);
+ iph = ip_hdr(skb);
+ /* 4. re-fill new padding with IPOPT_END (may now be longer) */
+ memset((unsigned char *)iph + new_hdr_len_actual, IPOPT_END,
+ new_hdr_len - new_hdr_len_actual);
+
+ opt->optlen -= hdr_len_delta;
opt->cipso = 0;
opt->is_changed = 1;
-
+ if (hdr_len_delta != 0) {
+ iph->ihl = new_hdr_len >> 2;
+ iph_set_totlen(iph, skb->len);
+ }
ip_send_check(iph);
return 0;
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 96accde527da..d09f557eaa77 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1887,10 +1887,11 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
goto done;
if (fillargs.ifindex) {
- err = -ENODEV;
dev = dev_get_by_index_rcu(tgt_net, fillargs.ifindex);
- if (!dev)
+ if (!dev) {
+ err = -ENODEV;
goto done;
+ }
in_dev = __in_dev_get_rcu(dev);
if (!in_dev)
goto done;
@@ -1902,7 +1903,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
cb->seq = inet_base_seq(tgt_net);
- for_each_netdev_dump(net, dev, ctx->ifindex) {
+ for_each_netdev_dump(tgt_net, dev, ctx->ifindex) {
in_dev = __in_dev_get_rcu(dev);
if (!in_dev)
continue;
@@ -2804,7 +2805,7 @@ void __init devinet_init(void)
rtnl_register(PF_INET, RTM_NEWADDR, inet_rtm_newaddr, NULL, 0);
rtnl_register(PF_INET, RTM_DELADDR, inet_rtm_deladdr, NULL, 0);
rtnl_register(PF_INET, RTM_GETADDR, NULL, inet_dump_ifaddr,
- RTNL_FLAG_DUMP_UNLOCKED);
+ RTNL_FLAG_DUMP_UNLOCKED | RTNL_FLAG_DUMP_SPLIT_NLM_DONE);
rtnl_register(PF_INET, RTM_GETNETCONF, inet_netconf_get_devconf,
inet_netconf_dump_devconf,
RTNL_FLAG_DOIT_UNLOCKED | RTNL_FLAG_DUMP_UNLOCKED);
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 3968d3f98e08..47378ca41904 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -239,8 +239,7 @@ static int esp_output_tail_tcp(struct xfrm_state *x, struct sk_buff *skb)
#else
static int esp_output_tail_tcp(struct xfrm_state *x, struct sk_buff *skb)
{
- kfree_skb(skb);
-
+ WARN_ON(1);
return -EOPNOTSUPP;
}
#endif
@@ -349,6 +348,7 @@ static struct ip_esp_hdr *esp_output_udp_encap(struct sk_buff *skb,
{
struct udphdr *uh;
unsigned int len;
+ struct xfrm_offload *xo = xfrm_offload(skb);
len = skb->len + esp->tailen - skb_transport_offset(skb);
if (len + sizeof(struct iphdr) > IP_MAX_MTU)
@@ -360,7 +360,12 @@ static struct ip_esp_hdr *esp_output_udp_encap(struct sk_buff *skb,
uh->len = htons(len);
uh->check = 0;
- *skb_mac_header(skb) = IPPROTO_UDP;
+ /* For IPv4 ESP with UDP encapsulation, if xo is not null, the skb is in the crypto offload
+ * data path, which means that esp_output_udp_encap is called outside of the XFRM stack.
+ * In this case, the mac header doesn't point to the IPv4 protocol field, so don't set it.
+ */
+ if (!xo || encap_type != UDP_ENCAP_ESPINUDP)
+ *skb_mac_header(skb) = IPPROTO_UDP;
return (struct ip_esp_hdr *)(uh + 1);
}
diff --git a/net/ipv4/esp4_offload.c b/net/ipv4/esp4_offload.c
index b3271957ad9a..80c4ea0e12f4 100644
--- a/net/ipv4/esp4_offload.c
+++ b/net/ipv4/esp4_offload.c
@@ -56,6 +56,13 @@ static struct sk_buff *esp4_gro_receive(struct list_head *head,
x = xfrm_state_lookup(dev_net(skb->dev), skb->mark,
(xfrm_address_t *)&ip_hdr(skb)->daddr,
spi, IPPROTO_ESP, AF_INET);
+
+ if (unlikely(x && x->dir && x->dir != XFRM_SA_DIR_IN)) {
+ /* non-offload path will record the error and audit log */
+ xfrm_state_put(x);
+ x = NULL;
+ }
+
if (!x)
goto out_reset;
@@ -264,6 +271,7 @@ static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_
struct esp_info esp;
bool hw_offload = true;
__u32 seq;
+ int encap_type = 0;
esp.inplace = true;
@@ -296,8 +304,10 @@ static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_
esp.esph = ip_esp_hdr(skb);
+ if (x->encap)
+ encap_type = x->encap->encap_type;
- if (!hw_offload || !skb_is_gso(skb)) {
+ if (!hw_offload || !skb_is_gso(skb) || (hw_offload && encap_type == UDP_ENCAP_ESPINUDP)) {
esp.nfrags = esp_output_head(x, skb, &esp);
if (esp.nfrags < 0)
return esp.nfrags;
@@ -324,6 +334,18 @@ static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_
esp.seqno = cpu_to_be64(seq + ((u64)xo->seq.hi << 32));
+ if (hw_offload && encap_type == UDP_ENCAP_ESPINUDP) {
+ /* In the XFRM stack, the encapsulation protocol is set to iphdr->protocol by
+ * setting *skb_mac_header(skb) (see esp_output_udp_encap()) where skb->mac_header
+ * points to iphdr->protocol (see xfrm4_tunnel_encap_add()).
+ * However, in esp_xmit(), skb->mac_header doesn't point to iphdr->protocol.
+ * Therefore, the protocol field needs to be corrected.
+ */
+ ip_hdr(skb)->protocol = IPPROTO_UDP;
+
+ esph->seq_no = htonl(seq);
+ }
+
ip_hdr(skb)->tot_len = htons(skb->len);
ip_send_check(ip_hdr(skb));
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index c484b1c0fc00..7ad2cafb9276 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -1050,11 +1050,6 @@ next:
e++;
}
}
-
- /* Don't let NLM_DONE coalesce into a message, even if it could.
- * Some user space expects NLM_DONE in a separate recv().
- */
- err = skb->len;
out:
cb->args[1] = e;
@@ -1665,5 +1660,5 @@ void __init ip_fib_init(void)
rtnl_register(PF_INET, RTM_NEWROUTE, inet_rtm_newroute, NULL, 0);
rtnl_register(PF_INET, RTM_DELROUTE, inet_rtm_delroute, NULL, 0);
rtnl_register(PF_INET, RTM_GETROUTE, NULL, inet_dump_fib,
- RTNL_FLAG_DUMP_UNLOCKED);
+ RTNL_FLAG_DUMP_UNLOCKED | RTNL_FLAG_DUMP_SPLIT_NLM_DONE);
}
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index f669da98d11d..2b57cd2b96e2 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -1030,7 +1030,7 @@ bool fib_metrics_match(struct fib_config *cfg, struct fib_info *fi)
bool ecn_ca = false;
nla_strscpy(tmp, nla, sizeof(tmp));
- val = tcp_ca_get_key_by_name(fi->fib_net, tmp, &ecn_ca);
+ val = tcp_ca_get_key_by_name(tmp, &ecn_ca);
} else {
if (nla_len(nla) != sizeof(u32))
return false;
@@ -1459,8 +1459,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg,
fi = kzalloc(struct_size(fi, fib_nh, nhs), GFP_KERNEL);
if (!fi)
goto failure;
- fi->fib_metrics = ip_fib_metrics_init(fi->fib_net, cfg->fc_mx,
- cfg->fc_mx_len, extack);
+ fi->fib_metrics = ip_fib_metrics_init(cfg->fc_mx, cfg->fc_mx_len, extack);
if (IS_ERR(fi->fib_metrics)) {
err = PTR_ERR(fi->fib_metrics);
kfree(fi);
@@ -2270,6 +2269,15 @@ void fib_select_path(struct net *net, struct fib_result *res,
fib_select_default(fl4, res);
check_saddr:
- if (!fl4->saddr)
- fl4->saddr = fib_result_prefsrc(net, res);
+ if (!fl4->saddr) {
+ struct net_device *l3mdev;
+
+ l3mdev = dev_get_by_index_rcu(net, fl4->flowi4_l3mdev);
+
+ if (!l3mdev ||
+ l3mdev_master_dev_rcu(FIB_RES_DEV(*res)) == l3mdev)
+ fl4->saddr = fib_result_prefsrc(net, res);
+ else
+ fl4->saddr = inet_select_addr(l3mdev, 0, RT_SCOPE_LINK);
+ }
}
diff --git a/net/ipv4/fou_core.c b/net/ipv4/fou_core.c
index a8494f796dca..0abbc413e0fe 100644
--- a/net/ipv4/fou_core.c
+++ b/net/ipv4/fou_core.c
@@ -433,7 +433,7 @@ next_proto:
offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
ops = rcu_dereference(offloads[proto]);
- if (WARN_ON_ONCE(!ops || !ops->callbacks.gro_receive))
+ if (!ops || !ops->callbacks.gro_receive)
goto out;
pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index d81f74ce0f02..64d07b842e73 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -911,6 +911,64 @@ int inet_rtx_syn_ack(const struct sock *parent, struct request_sock *req)
}
EXPORT_SYMBOL(inet_rtx_syn_ack);
+static struct request_sock *
+reqsk_alloc_noprof(const struct request_sock_ops *ops, struct sock *sk_listener,
+ bool attach_listener)
+{
+ struct request_sock *req;
+
+ req = kmem_cache_alloc_noprof(ops->slab, GFP_ATOMIC | __GFP_NOWARN);
+ if (!req)
+ return NULL;
+ req->rsk_listener = NULL;
+ if (attach_listener) {
+ if (unlikely(!refcount_inc_not_zero(&sk_listener->sk_refcnt))) {
+ kmem_cache_free(ops->slab, req);
+ return NULL;
+ }
+ req->rsk_listener = sk_listener;
+ }
+ req->rsk_ops = ops;
+ req_to_sk(req)->sk_prot = sk_listener->sk_prot;
+ sk_node_init(&req_to_sk(req)->sk_node);
+ sk_tx_queue_clear(req_to_sk(req));
+ req->saved_syn = NULL;
+ req->syncookie = 0;
+ req->timeout = 0;
+ req->num_timeout = 0;
+ req->num_retrans = 0;
+ req->sk = NULL;
+ refcount_set(&req->rsk_refcnt, 0);
+
+ return req;
+}
+#define reqsk_alloc(...) alloc_hooks(reqsk_alloc_noprof(__VA_ARGS__))
+
+struct request_sock *inet_reqsk_alloc(const struct request_sock_ops *ops,
+ struct sock *sk_listener,
+ bool attach_listener)
+{
+ struct request_sock *req = reqsk_alloc(ops, sk_listener,
+ attach_listener);
+
+ if (req) {
+ struct inet_request_sock *ireq = inet_rsk(req);
+
+ ireq->ireq_opt = NULL;
+#if IS_ENABLED(CONFIG_IPV6)
+ ireq->pktopts = NULL;
+#endif
+ atomic64_set(&ireq->ir_cookie, 0);
+ ireq->ireq_state = TCP_NEW_SYN_RECV;
+ write_pnet(&ireq->ireq_net, sock_net(sk_listener));
+ ireq->ireq_family = sk_listener->sk_family;
+ req->timeout = TCP_TIMEOUT_INIT;
+ }
+
+ return req;
+}
+EXPORT_SYMBOL(inet_reqsk_alloc);
+
static struct request_sock *inet_reqsk_clone(struct request_sock *req,
struct sock *sk)
{
@@ -1122,25 +1180,34 @@ drop:
inet_csk_reqsk_queue_drop_and_put(oreq->rsk_listener, oreq);
}
-static void reqsk_queue_hash_req(struct request_sock *req,
+static bool reqsk_queue_hash_req(struct request_sock *req,
unsigned long timeout)
{
+ bool found_dup_sk = false;
+
+ if (!inet_ehash_insert(req_to_sk(req), NULL, &found_dup_sk))
+ return false;
+
+ /* The timer needs to be setup after a successful insertion. */
timer_setup(&req->rsk_timer, reqsk_timer_handler, TIMER_PINNED);
mod_timer(&req->rsk_timer, jiffies + timeout);
- inet_ehash_insert(req_to_sk(req), NULL, NULL);
/* before letting lookups find us, make sure all req fields
* are committed to memory and refcnt initialized.
*/
smp_wmb();
refcount_set(&req->rsk_refcnt, 2 + 1);
+ return true;
}
-void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
+bool inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
unsigned long timeout)
{
- reqsk_queue_hash_req(req, timeout);
+ if (!reqsk_queue_hash_req(req, timeout))
+ return false;
+
inet_csk_reqsk_queue_added(sk);
+ return true;
}
EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_hash_add);
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 7adace541fe2..9712cdb8087c 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -1383,6 +1383,7 @@ static int inet_diag_dump_compat(struct sk_buff *skb,
req.sdiag_family = AF_UNSPEC; /* compatibility */
req.sdiag_protocol = inet_diag_type2proto(cb->nlh->nlmsg_type);
req.idiag_ext = rc->idiag_ext;
+ req.pad = 0;
req.idiag_states = rc->idiag_states;
req.id = rc->id;
@@ -1398,6 +1399,7 @@ static int inet_diag_get_exact_compat(struct sk_buff *in_skb,
req.sdiag_family = rc->idiag_family;
req.sdiag_protocol = inet_diag_type2proto(nlh->nlmsg_type);
req.idiag_ext = rc->idiag_ext;
+ req.pad = 0;
req.idiag_states = rc->idiag_states;
req.id = rc->id;
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index faaec92a46ac..d179a2c84222 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -619,7 +619,7 @@ void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head,
skb_mark_not_on_list(head);
head->prev = NULL;
head->tstamp = q->stamp;
- head->mono_delivery_time = q->mono_delivery_time;
+ head->tstamp_type = q->tstamp_type;
if (sk)
refcount_add(sum_truesize - head_truesize, &sk->sk_wmem_alloc);
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index e28075f0006e..337390ba85b4 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -92,13 +92,22 @@ static void inet_twsk_add_node_rcu(struct inet_timewait_sock *tw,
hlist_nulls_add_head_rcu(&tw->tw_node, list);
}
+static void inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo)
+{
+ __inet_twsk_schedule(tw, timeo, false);
+}
+
/*
- * Enter the time wait state. This is called with locally disabled BH.
+ * Enter the time wait state.
* Essentially we whip up a timewait bucket, copy the relevant info into it
* from the SK, and mess with hash chains and list linkage.
+ *
+ * The caller must not access @tw anymore after this function returns.
*/
-void inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
- struct inet_hashinfo *hashinfo)
+void inet_twsk_hashdance_schedule(struct inet_timewait_sock *tw,
+ struct sock *sk,
+ struct inet_hashinfo *hashinfo,
+ int timeo)
{
const struct inet_sock *inet = inet_sk(sk);
const struct inet_connection_sock *icsk = inet_csk(sk);
@@ -114,6 +123,7 @@ void inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
hashinfo->bhash_size)];
bhead2 = inet_bhashfn_portaddr(hashinfo, sk, twsk_net(tw), inet->inet_num);
+ local_bh_disable();
spin_lock(&bhead->lock);
spin_lock(&bhead2->lock);
@@ -129,26 +139,34 @@ void inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
spin_lock(lock);
+ /* Step 2: Hash TW into tcp ehash chain */
inet_twsk_add_node_rcu(tw, &ehead->chain);
/* Step 3: Remove SK from hash chain */
if (__sk_nulls_del_node_init_rcu(sk))
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
- spin_unlock(lock);
+ /* Ensure above writes are committed into memory before updating the
+ * refcount.
+ * Provides ordering vs later refcount_inc().
+ */
+ smp_wmb();
/* tw_refcnt is set to 3 because we have :
* - one reference for bhash chain.
* - one reference for ehash chain.
* - one reference for timer.
- * We can use atomic_set() because prior spin_lock()/spin_unlock()
- * committed into memory all tw fields.
* Also note that after this point, we lost our implicit reference
* so we are not allowed to use tw anymore.
*/
refcount_set(&tw->tw_refcnt, 3);
+
+ inet_twsk_schedule(tw, timeo);
+
+ spin_unlock(lock);
+ local_bh_enable();
}
-EXPORT_SYMBOL_GPL(inet_twsk_hashdance);
+EXPORT_SYMBOL_GPL(inet_twsk_hashdance_schedule);
static void tw_timer_handler(struct timer_list *t)
{
@@ -192,7 +210,7 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk,
tw->tw_prot = sk->sk_prot_creator;
atomic64_set(&tw->tw_cookie, atomic64_read(&sk->sk_cookie));
twsk_net_set(tw, sock_net(sk));
- timer_setup(&tw->tw_timer, tw_timer_handler, TIMER_PINNED);
+ timer_setup(&tw->tw_timer, tw_timer_handler, 0);
/*
* Because we use RCU lookups, we should not set tw_refcnt
* to a non null value before everything is setup for this
@@ -217,7 +235,34 @@ EXPORT_SYMBOL_GPL(inet_twsk_alloc);
*/
void inet_twsk_deschedule_put(struct inet_timewait_sock *tw)
{
- if (del_timer_sync(&tw->tw_timer))
+ struct inet_hashinfo *hashinfo = tw->tw_dr->hashinfo;
+ spinlock_t *lock = inet_ehash_lockp(hashinfo, tw->tw_hash);
+
+ /* inet_twsk_purge() walks over all sockets, including tw ones,
+ * and removes them via inet_twsk_deschedule_put() after a
+ * refcount_inc_not_zero().
+ *
+ * inet_twsk_hashdance_schedule() must (re)init the refcount before
+ * arming the timer, i.e. inet_twsk_purge can obtain a reference to
+ * a twsk that did not yet schedule the timer.
+ *
+ * The ehash lock synchronizes these two:
+ * After acquiring the lock, the timer is always scheduled (else
+ * timer_shutdown returns false), because hashdance_schedule releases
+ * the ehash lock only after completing the timer initialization.
+ *
+ * Without grabbing the ehash lock, we get:
+ * 1) cpu x sets twsk refcount to 3
+ * 2) cpu y bumps refcount to 4
+ * 3) cpu y calls inet_twsk_deschedule_put() and shuts timer down
+ * 4) cpu x tries to start timer, but mod_timer is a noop post-shutdown
+ * -> timer refcount is never decremented.
+ */
+ spin_lock(lock);
+ /* Makes sure hashdance_schedule() has completed */
+ spin_unlock(lock);
+
+ if (timer_shutdown_sync(&tw->tw_timer))
inet_twsk_kill(tw);
inet_twsk_put(tw);
}
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 08e2c92e25ab..a92664a5ef2e 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -355,7 +355,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
qp->iif = dev->ifindex;
qp->q.stamp = skb->tstamp;
- qp->q.mono_delivery_time = skb->mono_delivery_time;
+ qp->q.tstamp_type = skb->tstamp_type;
qp->q.meat += skb->len;
qp->ecn |= ecn;
add_frag_mem_limit(qp->q.fqdir, skb->truesize);
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 9500031a1f55..b90d0f78ac80 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -764,7 +764,7 @@ int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
{
struct iphdr *iph;
struct sk_buff *skb2;
- bool mono_delivery_time = skb->mono_delivery_time;
+ u8 tstamp_type = skb->tstamp_type;
struct rtable *rt = skb_rtable(skb);
unsigned int mtu, hlen, ll_rs;
struct ip_fraglist_iter iter;
@@ -856,7 +856,7 @@ int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
}
}
- skb_set_delivery_time(skb, tstamp, mono_delivery_time);
+ skb_set_delivery_time(skb, tstamp, tstamp_type);
err = output(net, sk, skb);
if (!err)
@@ -912,7 +912,7 @@ slow_path:
/*
* Put this fragment into the sending queue.
*/
- skb_set_delivery_time(skb2, tstamp, mono_delivery_time);
+ skb_set_delivery_time(skb2, tstamp, tstamp_type);
err = output(net, sk, skb2);
if (err)
goto fail;
@@ -1457,7 +1457,10 @@ struct sk_buff *__ip_make_skb(struct sock *sk,
skb->priority = (cork->tos != -1) ? cork->priority: READ_ONCE(sk->sk_priority);
skb->mark = cork->mark;
- skb->tstamp = cork->transmit_time;
+ if (sk_is_tcp(sk))
+ skb_set_delivery_time(skb, cork->transmit_time, SKB_CLOCK_MONOTONIC);
+ else
+ skb_set_delivery_type_by_clockid(skb, cork->transmit_time, sk->sk_clockid);
/*
* Steal rt from cork.dst to avoid a pair of atomic_inc/atomic_dec
* on dst refcount
@@ -1649,7 +1652,8 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
arg->csumoffset) = csum_fold(csum_add(nskb->csum,
arg->csum));
nskb->ip_summed = CHECKSUM_NONE;
- nskb->mono_delivery_time = !!transmit_time;
+ if (transmit_time)
+ nskb->tstamp_type = SKB_CLOCK_MONOTONIC;
if (txhash)
skb_set_hash(nskb, txhash, PKT_HASH_TYPE_L4);
ip_push_pending_frames(sk, &fl4);
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index bccef2fcf620..5cffad42fe8c 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -1099,7 +1099,6 @@ static void ip_tunnel_dev_free(struct net_device *dev)
gro_cells_destroy(&tunnel->gro_cells);
dst_cache_destroy(&tunnel->dst_cache);
- free_percpu(dev->tstats);
}
void ip_tunnel_dellink(struct net_device *dev, struct list_head *head)
@@ -1313,20 +1312,15 @@ int ip_tunnel_init(struct net_device *dev)
dev->needs_free_netdev = true;
dev->priv_destructor = ip_tunnel_dev_free;
- dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
- if (!dev->tstats)
- return -ENOMEM;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
err = dst_cache_init(&tunnel->dst_cache, GFP_KERNEL);
- if (err) {
- free_percpu(dev->tstats);
+ if (err)
return err;
- }
err = gro_cells_init(&tunnel->gro_cells, dev);
if (err) {
dst_cache_destroy(&tunnel->dst_cache);
- free_percpu(dev->tstats);
return err;
}
diff --git a/net/ipv4/metrics.c b/net/ipv4/metrics.c
index 0e3ee1532848..8ddac1f595ed 100644
--- a/net/ipv4/metrics.c
+++ b/net/ipv4/metrics.c
@@ -7,7 +7,7 @@
#include <net/net_namespace.h>
#include <net/tcp.h>
-static int ip_metrics_convert(struct net *net, struct nlattr *fc_mx,
+static int ip_metrics_convert(struct nlattr *fc_mx,
int fc_mx_len, u32 *metrics,
struct netlink_ext_ack *extack)
{
@@ -31,7 +31,7 @@ static int ip_metrics_convert(struct net *net, struct nlattr *fc_mx,
char tmp[TCP_CA_NAME_MAX];
nla_strscpy(tmp, nla, sizeof(tmp));
- val = tcp_ca_get_key_by_name(net, tmp, &ecn_ca);
+ val = tcp_ca_get_key_by_name(tmp, &ecn_ca);
if (val == TCP_CA_UNSPEC) {
NL_SET_ERR_MSG(extack, "Unknown tcp congestion algorithm");
return -EINVAL;
@@ -63,7 +63,7 @@ static int ip_metrics_convert(struct net *net, struct nlattr *fc_mx,
return 0;
}
-struct dst_metrics *ip_fib_metrics_init(struct net *net, struct nlattr *fc_mx,
+struct dst_metrics *ip_fib_metrics_init(struct nlattr *fc_mx,
int fc_mx_len,
struct netlink_ext_ack *extack)
{
@@ -77,7 +77,7 @@ struct dst_metrics *ip_fib_metrics_init(struct net *net, struct nlattr *fc_mx,
if (unlikely(!fib_metrics))
return ERR_PTR(-ENOMEM);
- err = ip_metrics_convert(net, fc_mx, fc_mx_len, fib_metrics->metrics,
+ err = ip_metrics_convert(fc_mx, fc_mx_len, fib_metrics->metrics,
extack);
if (!err) {
refcount_set(&fib_metrics->refcnt, 1);
diff --git a/net/ipv4/netfilter/nf_tproxy_ipv4.c b/net/ipv4/netfilter/nf_tproxy_ipv4.c
index 69e331799604..73e66a088e25 100644
--- a/net/ipv4/netfilter/nf_tproxy_ipv4.c
+++ b/net/ipv4/netfilter/nf_tproxy_ipv4.c
@@ -58,6 +58,8 @@ __be32 nf_tproxy_laddr4(struct sk_buff *skb, __be32 user_laddr, __be32 daddr)
laddr = 0;
indev = __in_dev_get_rcu(skb->dev);
+ if (!indev)
+ return daddr;
in_dev_for_each_ifa_rcu(ifa, indev) {
if (ifa->ifa_flags & IFA_F_SECONDARY)
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 823306487a82..619ddc087957 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -946,7 +946,7 @@ static enum skb_drop_reason __ping_queue_rcv_skb(struct sock *sk,
pr_debug("ping_queue_rcv_skb(sk=%p,sk->num=%d,skb=%p)\n",
inet_sk(sk), inet_sk(sk)->inet_num, skb);
if (sock_queue_rcv_skb_reason(sk, skb, &reason) < 0) {
- kfree_skb_reason(skb, reason);
+ sk_skb_reason_drop(sk, skb, reason);
pr_debug("ping_queue_rcv_skb -> failed\n");
return reason;
}
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 4cb43401e0e0..474dfd263c8b 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -301,7 +301,7 @@ static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
ipv4_pktinfo_prepare(sk, skb, true);
if (sock_queue_rcv_skb_reason(sk, skb, &reason) < 0) {
- kfree_skb_reason(skb, reason);
+ sk_skb_reason_drop(sk, skb, reason);
return NET_RX_DROP;
}
@@ -312,7 +312,7 @@ int raw_rcv(struct sock *sk, struct sk_buff *skb)
{
if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
atomic_inc(&sk->sk_drops);
- kfree_skb_reason(skb, SKB_DROP_REASON_XFRM_POLICY);
+ sk_skb_reason_drop(sk, skb, SKB_DROP_REASON_XFRM_POLICY);
return NET_RX_DROP;
}
nf_reset_ct(skb);
@@ -360,7 +360,7 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4,
skb->protocol = htons(ETH_P_IP);
skb->priority = READ_ONCE(sk->sk_priority);
skb->mark = sockc->mark;
- skb->tstamp = sockc->transmit_time;
+ skb_set_delivery_type_by_clockid(skb, sockc->transmit_time, sk->sk_clockid);
skb_dst_set(skb, &rt->dst);
*rtp = NULL;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 5fd54103174f..54512acbead7 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -129,7 +129,8 @@ struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
static unsigned int ipv4_default_advmss(const struct dst_entry *dst);
INDIRECT_CALLABLE_SCOPE
unsigned int ipv4_mtu(const struct dst_entry *dst);
-static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
+static void ipv4_negative_advice(struct sock *sk,
+ struct dst_entry *dst);
static void ipv4_link_failure(struct sk_buff *skb);
static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
struct sk_buff *skb, u32 mtu,
@@ -825,22 +826,15 @@ static void ip_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buf
__ip_do_redirect(rt, skb, &fl4, true);
}
-static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
+static void ipv4_negative_advice(struct sock *sk,
+ struct dst_entry *dst)
{
struct rtable *rt = dst_rtable(dst);
- struct dst_entry *ret = dst;
- if (rt) {
- if (dst->obsolete > 0) {
- ip_rt_put(rt);
- ret = NULL;
- } else if ((rt->rt_flags & RTCF_REDIRECTED) ||
- rt->dst.expires) {
- ip_rt_put(rt);
- ret = NULL;
- }
- }
- return ret;
+ if ((dst->obsolete > 0) ||
+ (rt->rt_flags & RTCF_REDIRECTED) ||
+ rt->dst.expires)
+ sk_dst_reset(sk);
}
/*
@@ -1487,7 +1481,6 @@ static bool rt_cache_route(struct fib_nh_common *nhc, struct rtable *rt)
struct uncached_list {
spinlock_t lock;
struct list_head head;
- struct list_head quarantine;
};
static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt_uncached_list);
@@ -1538,7 +1531,7 @@ void rt_flush_dev(struct net_device *dev)
rt->dst.dev = blackhole_netdev;
netdev_ref_replace(dev, blackhole_netdev,
&rt->dst.dev_tracker, GFP_ATOMIC);
- list_move(&rt->dst.rt_uncached, &ul->quarantine);
+ list_del_init(&rt->dst.rt_uncached);
}
spin_unlock_bh(&ul->lock);
}
@@ -1930,7 +1923,7 @@ static u32 fib_multipath_custom_hash_outer(const struct net *net,
hash_keys.ports.dst = keys.ports.dst;
*p_has_inner = !!(keys.control.flags & FLOW_DIS_ENCAPSULATION);
- return flow_hash_from_keys(&hash_keys);
+ return fib_multipath_hash_from_keys(net, &hash_keys);
}
static u32 fib_multipath_custom_hash_inner(const struct net *net,
@@ -1979,7 +1972,7 @@ static u32 fib_multipath_custom_hash_inner(const struct net *net,
if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_PORT)
hash_keys.ports.dst = keys.ports.dst;
- return flow_hash_from_keys(&hash_keys);
+ return fib_multipath_hash_from_keys(net, &hash_keys);
}
static u32 fib_multipath_custom_hash_skb(const struct net *net,
@@ -2016,7 +2009,7 @@ static u32 fib_multipath_custom_hash_fl4(const struct net *net,
if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT)
hash_keys.ports.dst = fl4->fl4_dport;
- return flow_hash_from_keys(&hash_keys);
+ return fib_multipath_hash_from_keys(net, &hash_keys);
}
/* if skb is set it will be used and fl4 can be NULL */
@@ -2037,7 +2030,7 @@ int fib_multipath_hash(const struct net *net, const struct flowi4 *fl4,
hash_keys.addrs.v4addrs.src = fl4->saddr;
hash_keys.addrs.v4addrs.dst = fl4->daddr;
}
- mhash = flow_hash_from_keys(&hash_keys);
+ mhash = fib_multipath_hash_from_keys(net, &hash_keys);
break;
case 1:
/* skb is currently provided only when forwarding */
@@ -2071,7 +2064,7 @@ int fib_multipath_hash(const struct net *net, const struct flowi4 *fl4,
hash_keys.ports.dst = fl4->fl4_dport;
hash_keys.basic.ip_proto = fl4->flowi4_proto;
}
- mhash = flow_hash_from_keys(&hash_keys);
+ mhash = fib_multipath_hash_from_keys(net, &hash_keys);
break;
case 2:
memset(&hash_keys, 0, sizeof(hash_keys));
@@ -2102,7 +2095,7 @@ int fib_multipath_hash(const struct net *net, const struct flowi4 *fl4,
hash_keys.addrs.v4addrs.src = fl4->saddr;
hash_keys.addrs.v4addrs.dst = fl4->daddr;
}
- mhash = flow_hash_from_keys(&hash_keys);
+ mhash = fib_multipath_hash_from_keys(net, &hash_keys);
break;
case 3:
if (skb)
@@ -3667,7 +3660,6 @@ int __init ip_rt_init(void)
struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
INIT_LIST_HEAD(&ul->head);
- INIT_LIST_HEAD(&ul->quarantine);
spin_lock_init(&ul->lock);
}
#ifdef CONFIG_IP_ROUTE_CLASSID
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index b61d36810fe3..1948d15f1f28 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -496,6 +496,6 @@ out:
out_free:
reqsk_free(req);
out_drop:
- kfree_skb_reason(skb, reason);
+ sk_skb_reason_drop(sk, skb, reason);
return NULL;
}
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 162a0a3b6ba5..9140d20eb2d4 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -130,7 +130,8 @@ static int ipv4_privileged_ports(struct ctl_table *table, int write,
return ret;
}
-static void inet_get_ping_group_range_table(struct ctl_table *table, kgid_t *low, kgid_t *high)
+static void inet_get_ping_group_range_table(const struct ctl_table *table,
+ kgid_t *low, kgid_t *high)
{
kgid_t *data = table->data;
struct net *net =
@@ -145,7 +146,8 @@ static void inet_get_ping_group_range_table(struct ctl_table *table, kgid_t *low
}
/* Update system visible IP port range */
-static void set_ping_group_range(struct ctl_table *table, kgid_t low, kgid_t high)
+static void set_ping_group_range(const struct ctl_table *table,
+ kgid_t low, kgid_t high)
{
kgid_t *data = table->data;
struct net *net =
@@ -462,6 +464,61 @@ static int proc_fib_multipath_hash_fields(struct ctl_table *table, int write,
return ret;
}
+
+static u32 proc_fib_multipath_hash_rand_seed __ro_after_init;
+
+static void proc_fib_multipath_hash_init_rand_seed(void)
+{
+ get_random_bytes(&proc_fib_multipath_hash_rand_seed,
+ sizeof(proc_fib_multipath_hash_rand_seed));
+}
+
+static void proc_fib_multipath_hash_set_seed(struct net *net, u32 user_seed)
+{
+ struct sysctl_fib_multipath_hash_seed new = {
+ .user_seed = user_seed,
+ .mp_seed = (user_seed ? user_seed :
+ proc_fib_multipath_hash_rand_seed),
+ };
+
+ WRITE_ONCE(net->ipv4.sysctl_fib_multipath_hash_seed, new);
+}
+
+static int proc_fib_multipath_hash_seed(struct ctl_table *table, int write,
+ void *buffer, size_t *lenp,
+ loff_t *ppos)
+{
+ struct sysctl_fib_multipath_hash_seed *mphs;
+ struct net *net = table->data;
+ struct ctl_table tmp;
+ u32 user_seed;
+ int ret;
+
+ mphs = &net->ipv4.sysctl_fib_multipath_hash_seed;
+ user_seed = mphs->user_seed;
+
+ tmp = *table;
+ tmp.data = &user_seed;
+
+ ret = proc_douintvec_minmax(&tmp, write, buffer, lenp, ppos);
+
+ if (write && ret == 0) {
+ proc_fib_multipath_hash_set_seed(net, user_seed);
+ call_netevent_notifiers(NETEVENT_IPV4_MPATH_HASH_UPDATE, net);
+ }
+
+ return ret;
+}
+#else
+
+static void proc_fib_multipath_hash_init_rand_seed(void)
+{
+}
+
+static void proc_fib_multipath_hash_set_seed(struct net *net, u32 user_seed)
+{
+}
+
#endif
static struct ctl_table ipv4_table[] = {
@@ -1070,6 +1127,13 @@ static struct ctl_table ipv4_net_table[] = {
.extra1 = SYSCTL_ONE,
.extra2 = &fib_multipath_hash_fields_all_mask,
},
+ {
+ .procname = "fib_multipath_hash_seed",
+ .data = &init_net,
+ .maxlen = sizeof(u32),
+ .mode = 0644,
+ .proc_handler = proc_fib_multipath_hash_seed,
+ },
#endif
{
.procname = "ip_unprivileged_port_start",
@@ -1501,6 +1565,14 @@ static struct ctl_table ipv4_net_table[] = {
.proc_handler = proc_dou8vec_minmax,
.extra1 = SYSCTL_ONE,
},
+ {
+ .procname = "tcp_rto_min_us",
+ .data = &init_net.ipv4.sysctl_tcp_rto_min_us,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ONE,
+ },
};
static __net_init int ipv4_sysctl_init_net(struct net *net)
@@ -1540,6 +1612,8 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
if (!net->ipv4.sysctl_local_reserved_ports)
goto err_ports;
+ proc_fib_multipath_hash_set_seed(net, 0);
+
return 0;
err_ports:
@@ -1574,6 +1648,8 @@ static __init int sysctl_ipv4_init(void)
if (!hdr)
return -ENOMEM;
+ proc_fib_multipath_hash_init_rand_seed();
+
if (register_pernet_subsys(&ipv4_sysctl_ops)) {
unregister_net_sysctl_table(hdr);
return -ENOMEM;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 681b54e1f3a6..e03a342c9162 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -282,6 +282,7 @@
#include <asm/ioctls.h>
#include <net/busy_poll.h>
#include <net/hotdata.h>
+#include <trace/events/tcp.h>
#include <net/rps.h>
/* Track pending CMSGs. */
@@ -420,6 +421,7 @@ void tcp_init_sock(struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
+ int rto_min_us;
tp->out_of_order_queue = RB_ROOT;
sk->tcp_rtx_queue = RB_ROOT;
@@ -428,7 +430,8 @@ void tcp_init_sock(struct sock *sk)
INIT_LIST_HEAD(&tp->tsorted_sent_queue);
icsk->icsk_rto = TCP_TIMEOUT_INIT;
- icsk->icsk_rto_min = TCP_RTO_MIN;
+ rto_min_us = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rto_min_us);
+ icsk->icsk_rto_min = usecs_to_jiffies(rto_min_us);
icsk->icsk_delack_max = TCP_DELACK_MAX;
tp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
minmax_reset(&tp->rtt_min, tcp_jiffies32, ~0U);
@@ -598,7 +601,7 @@ __poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
*/
mask |= EPOLLOUT | EPOLLWRNORM;
}
- /* This barrier is coupled with smp_wmb() in tcp_reset() */
+ /* This barrier is coupled with smp_wmb() in tcp_done_with_error() */
smp_rmb();
if (READ_ONCE(sk->sk_err) ||
!skb_queue_empty_lockless(&sk->sk_error_queue))
@@ -1165,6 +1168,9 @@ new_segment:
process_backlog++;
+#ifdef CONFIG_SKB_DECRYPTED
+ skb->decrypted = !!(flags & MSG_SENDPAGE_DECRYPTED);
+#endif
tcp_skb_entail(sk, skb);
copy = size_goal;
@@ -2646,6 +2652,10 @@ void tcp_set_state(struct sock *sk, int state)
if (oldstate != TCP_ESTABLISHED)
TCP_INC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);
break;
+ case TCP_CLOSE_WAIT:
+ if (oldstate == TCP_SYN_RECV)
+ TCP_INC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);
+ break;
case TCP_CLOSE:
if (oldstate == TCP_CLOSE_WAIT || oldstate == TCP_ESTABLISHED)
@@ -2657,7 +2667,7 @@ void tcp_set_state(struct sock *sk, int state)
inet_put_port(sk);
fallthrough;
default:
- if (oldstate == TCP_ESTABLISHED)
+ if (oldstate == TCP_ESTABLISHED || oldstate == TCP_CLOSE_WAIT)
TCP_DEC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);
}
@@ -3079,7 +3089,7 @@ int tcp_disconnect(struct sock *sk, int flags)
icsk->icsk_ack.rcv_mss = TCP_MIN_MSS;
memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
__sk_dst_reset(sk);
- dst_release(xchg((__force struct dst_entry **)&sk->sk_rx_dst, NULL));
+ dst_release(unrcu_pointer(xchg(&sk->sk_rx_dst, NULL)));
tcp_saved_syn_free(tp);
tp->compressed_ack = 0;
tp->segs_in = 0;
@@ -4454,7 +4464,7 @@ int tcp_md5_hash_key(struct tcp_sigpool *hp,
EXPORT_SYMBOL(tcp_md5_hash_key);
/* Called with rcu_read_lock() */
-enum skb_drop_reason
+static enum skb_drop_reason
tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb,
const void *saddr, const void *daddr,
int family, int l3index, const __u8 *hash_location)
@@ -4474,7 +4484,7 @@ tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb,
if (!key && hash_location) {
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
- tcp_hash_fail("Unexpected MD5 Hash found", family, skb, "");
+ trace_tcp_hash_md5_unexpected(sk, skb);
return SKB_DROP_REASON_TCP_MD5UNEXPECTED;
}
@@ -4489,29 +4499,90 @@ tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb,
NULL, skb);
if (genhash || memcmp(hash_location, newhash, 16) != 0) {
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
- if (family == AF_INET) {
- tcp_hash_fail("MD5 Hash failed", AF_INET, skb, "%s L3 index %d",
- genhash ? "tcp_v4_calc_md5_hash failed"
- : "", l3index);
- } else {
- if (genhash) {
- tcp_hash_fail("MD5 Hash failed",
- AF_INET6, skb, "L3 index %d",
- l3index);
- } else {
- tcp_hash_fail("MD5 Hash mismatch",
- AF_INET6, skb, "L3 index %d",
- l3index);
- }
- }
+ trace_tcp_hash_md5_mismatch(sk, skb);
return SKB_DROP_REASON_TCP_MD5FAILURE;
}
return SKB_NOT_DROPPED_YET;
}
-EXPORT_SYMBOL(tcp_inbound_md5_hash);
+#else
+static inline enum skb_drop_reason
+tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb,
+ const void *saddr, const void *daddr,
+ int family, int l3index, const __u8 *hash_location)
+{
+ return SKB_NOT_DROPPED_YET;
+}
#endif
+/* Called with rcu_read_lock() */
+enum skb_drop_reason
+tcp_inbound_hash(struct sock *sk, const struct request_sock *req,
+ const struct sk_buff *skb,
+ const void *saddr, const void *daddr,
+ int family, int dif, int sdif)
+{
+ const struct tcphdr *th = tcp_hdr(skb);
+ const struct tcp_ao_hdr *aoh;
+ const __u8 *md5_location;
+ int l3index;
+
+ /* Invalid option or two times meet any of auth options */
+ if (tcp_parse_auth_options(th, &md5_location, &aoh)) {
+ trace_tcp_hash_bad_header(sk, skb);
+ return SKB_DROP_REASON_TCP_AUTH_HDR;
+ }
+
+ if (req) {
+ if (tcp_rsk_used_ao(req) != !!aoh) {
+ u8 keyid, rnext, maclen;
+
+ if (aoh) {
+ keyid = aoh->keyid;
+ rnext = aoh->rnext_keyid;
+ maclen = tcp_ao_hdr_maclen(aoh);
+ } else {
+ keyid = rnext = maclen = 0;
+ }
+
+ NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAOBAD);
+ trace_tcp_ao_handshake_failure(sk, skb, keyid, rnext, maclen);
+ return SKB_DROP_REASON_TCP_AOFAILURE;
+ }
+ }
+
+ /* sdif set, means packet ingressed via a device
+ * in an L3 domain and dif is set to the l3mdev
+ */
+ l3index = sdif ? dif : 0;
+
+ /* Fast path: unsigned segments */
+ if (likely(!md5_location && !aoh)) {
+ /* Drop if there's TCP-MD5 or TCP-AO key with any rcvid/sndid
+ * for the remote peer. On TCP-AO established connection
+ * the last key is impossible to remove, so there's
+ * always at least one current_key.
+ */
+ if (tcp_ao_required(sk, saddr, family, l3index, true)) {
+ trace_tcp_hash_ao_required(sk, skb);
+ return SKB_DROP_REASON_TCP_AONOTFOUND;
+ }
+ if (unlikely(tcp_md5_do_lookup(sk, l3index, saddr, family))) {
+ NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
+ trace_tcp_hash_md5_required(sk, skb);
+ return SKB_DROP_REASON_TCP_MD5NOTFOUND;
+ }
+ return SKB_NOT_DROPPED_YET;
+ }
+
+ if (aoh)
+ return tcp_inbound_ao_hash(sk, skb, family, req, l3index, aoh);
+
+ return tcp_inbound_md5_hash(sk, skb, saddr, daddr, family,
+ l3index, md5_location);
+}
+EXPORT_SYMBOL_GPL(tcp_inbound_hash);
+
void tcp_done(struct sock *sk)
{
struct request_sock *req;
@@ -4576,14 +4647,10 @@ int tcp_abort(struct sock *sk, int err)
bh_lock_sock(sk);
if (!sock_flag(sk, SOCK_DEAD)) {
- WRITE_ONCE(sk->sk_err, err);
- /* This barrier is coupled with smp_rmb() in tcp_poll() */
- smp_wmb();
- sk_error_report(sk);
if (tcp_need_reset(sk->sk_state))
tcp_send_active_reset(sk, GFP_ATOMIC,
SK_RST_REASON_NOT_SPECIFIED);
- tcp_done(sk);
+ tcp_done_with_error(sk, err);
}
bh_unlock_sock(sk);
diff --git a/net/ipv4/tcp_ao.c b/net/ipv4/tcp_ao.c
index 781b67a52571..85531437890c 100644
--- a/net/ipv4/tcp_ao.c
+++ b/net/ipv4/tcp_ao.c
@@ -16,6 +16,7 @@
#include <net/tcp.h>
#include <net/ipv6.h>
#include <net/icmp.h>
+#include <trace/events/tcp.h>
DEFINE_STATIC_KEY_DEFERRED_FALSE(tcp_ao_needed, HZ);
@@ -884,17 +885,16 @@ tcp_ao_verify_hash(const struct sock *sk, const struct sk_buff *skb,
const struct tcp_ao_hdr *aoh, struct tcp_ao_key *key,
u8 *traffic_key, u8 *phash, u32 sne, int l3index)
{
- u8 maclen = aoh->length - sizeof(struct tcp_ao_hdr);
const struct tcphdr *th = tcp_hdr(skb);
+ u8 maclen = tcp_ao_hdr_maclen(aoh);
void *hash_buf = NULL;
if (maclen != tcp_ao_maclen(key)) {
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAOBAD);
atomic64_inc(&info->counters.pkt_bad);
atomic64_inc(&key->pkt_bad);
- tcp_hash_fail("AO hash wrong length", family, skb,
- "%u != %d L3index: %d", maclen,
- tcp_ao_maclen(key), l3index);
+ trace_tcp_ao_wrong_maclen(sk, skb, aoh->keyid,
+ aoh->rnext_keyid, maclen);
return SKB_DROP_REASON_TCP_AOFAILURE;
}
@@ -909,8 +909,8 @@ tcp_ao_verify_hash(const struct sock *sk, const struct sk_buff *skb,
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAOBAD);
atomic64_inc(&info->counters.pkt_bad);
atomic64_inc(&key->pkt_bad);
- tcp_hash_fail("AO hash mismatch", family, skb,
- "L3index: %d", l3index);
+ trace_tcp_ao_mismatch(sk, skb, aoh->keyid,
+ aoh->rnext_keyid, maclen);
kfree(hash_buf);
return SKB_DROP_REASON_TCP_AOFAILURE;
}
@@ -927,19 +927,21 @@ tcp_inbound_ao_hash(struct sock *sk, const struct sk_buff *skb,
int l3index, const struct tcp_ao_hdr *aoh)
{
const struct tcphdr *th = tcp_hdr(skb);
+ u8 maclen = tcp_ao_hdr_maclen(aoh);
u8 *phash = (u8 *)(aoh + 1); /* hash goes just after the header */
struct tcp_ao_info *info;
enum skb_drop_reason ret;
struct tcp_ao_key *key;
__be32 sisn, disn;
u8 *traffic_key;
+ int state;
u32 sne = 0;
info = rcu_dereference(tcp_sk(sk)->ao_info);
if (!info) {
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAOKEYNOTFOUND);
- tcp_hash_fail("AO key not found", family, skb,
- "keyid: %u L3index: %d", aoh->keyid, l3index);
+ trace_tcp_ao_key_not_found(sk, skb, aoh->keyid,
+ aoh->rnext_keyid, maclen);
return SKB_DROP_REASON_TCP_AOUNEXPECTED;
}
@@ -948,8 +950,9 @@ tcp_inbound_ao_hash(struct sock *sk, const struct sk_buff *skb,
disn = 0;
}
+ state = READ_ONCE(sk->sk_state);
/* Fast-path */
- if (likely((1 << sk->sk_state) & TCP_AO_ESTABLISHED)) {
+ if (likely((1 << state) & TCP_AO_ESTABLISHED)) {
enum skb_drop_reason err;
struct tcp_ao_key *current_key;
@@ -979,6 +982,9 @@ tcp_inbound_ao_hash(struct sock *sk, const struct sk_buff *skb,
current_key = READ_ONCE(info->current_key);
/* Key rotation: the peer asks us to use new key (RNext) */
if (unlikely(aoh->rnext_keyid != current_key->sndid)) {
+ trace_tcp_ao_rnext_request(sk, skb, current_key->sndid,
+ aoh->rnext_keyid,
+ tcp_ao_hdr_maclen(aoh));
/* If the key is not found we do nothing. */
key = tcp_ao_established_key(info, aoh->rnext_keyid, -1);
if (key)
@@ -988,6 +994,9 @@ tcp_inbound_ao_hash(struct sock *sk, const struct sk_buff *skb,
return SKB_NOT_DROPPED_YET;
}
+ if (unlikely(state == TCP_CLOSE))
+ return SKB_DROP_REASON_TCP_CLOSE;
+
/* Lookup key based on peer address and keyid.
* current_key and rnext_key must not be used on tcp listen
* sockets as otherwise:
@@ -1001,7 +1010,7 @@ tcp_inbound_ao_hash(struct sock *sk, const struct sk_buff *skb,
if (th->syn && !th->ack)
goto verify_hash;
- if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_NEW_SYN_RECV)) {
+ if ((1 << state) & (TCPF_LISTEN | TCPF_NEW_SYN_RECV)) {
/* Make the initial syn the likely case here */
if (unlikely(req)) {
sne = tcp_ao_compute_sne(0, tcp_rsk(req)->rcv_isn,
@@ -1018,14 +1027,14 @@ tcp_inbound_ao_hash(struct sock *sk, const struct sk_buff *skb,
/* no way to figure out initial sisn/disn - drop */
return SKB_DROP_REASON_TCP_FLAGS;
}
- } else if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
+ } else if ((1 << state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
disn = info->lisn;
if (th->syn || th->rst)
sisn = th->seq;
else
sisn = info->risn;
} else {
- WARN_ONCE(1, "TCP-AO: Unexpected sk_state %d", sk->sk_state);
+ WARN_ONCE(1, "TCP-AO: Unexpected sk_state %d", state);
return SKB_DROP_REASON_TCP_AOFAILURE;
}
verify_hash:
@@ -1041,8 +1050,8 @@ verify_hash:
key_not_found:
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAOKEYNOTFOUND);
atomic64_inc(&info->counters.key_not_found);
- tcp_hash_fail("Requested by the peer AO key id not found",
- family, skb, "L3index: %d", l3index);
+ trace_tcp_ao_key_not_found(sk, skb, aoh->keyid,
+ aoh->rnext_keyid, maclen);
return SKB_DROP_REASON_TCP_AOKEYNOTFOUND;
}
@@ -1963,8 +1972,10 @@ static int tcp_ao_info_cmd(struct sock *sk, unsigned short int family,
first = true;
}
- if (cmd.ao_required && tcp_ao_required_verify(sk))
- return -EKEYREJECTED;
+ if (cmd.ao_required && tcp_ao_required_verify(sk)) {
+ err = -EKEYREJECTED;
+ goto out;
+ }
/* For sockets in TCP_CLOSED it's possible set keys that aren't
* matching the future peer (address/port/VRF/etc),
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index 28ffcfbeef14..0306d257fa64 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -46,8 +46,7 @@ void tcp_set_ca_state(struct sock *sk, const u8 ca_state)
}
/* Must be called with rcu lock held */
-static struct tcp_congestion_ops *tcp_ca_find_autoload(struct net *net,
- const char *name)
+static struct tcp_congestion_ops *tcp_ca_find_autoload(const char *name)
{
struct tcp_congestion_ops *ca = tcp_ca_find(name);
@@ -178,7 +177,7 @@ int tcp_update_congestion_control(struct tcp_congestion_ops *ca, struct tcp_cong
return ret;
}
-u32 tcp_ca_get_key_by_name(struct net *net, const char *name, bool *ecn_ca)
+u32 tcp_ca_get_key_by_name(const char *name, bool *ecn_ca)
{
const struct tcp_congestion_ops *ca;
u32 key = TCP_CA_UNSPEC;
@@ -186,7 +185,7 @@ u32 tcp_ca_get_key_by_name(struct net *net, const char *name, bool *ecn_ca)
might_sleep();
rcu_read_lock();
- ca = tcp_ca_find_autoload(net, name);
+ ca = tcp_ca_find_autoload(name);
if (ca) {
key = ca->key;
*ecn_ca = ca->flags & TCP_CONG_NEEDS_ECN;
@@ -203,9 +202,10 @@ char *tcp_ca_get_name_by_key(u32 key, char *buffer)
rcu_read_lock();
ca = tcp_ca_find_key(key);
- if (ca)
- ret = strncpy(buffer, ca->name,
- TCP_CA_NAME_MAX);
+ if (ca) {
+ strscpy(buffer, ca->name, TCP_CA_NAME_MAX);
+ ret = buffer;
+ }
rcu_read_unlock();
return ret;
@@ -283,7 +283,7 @@ int tcp_set_default_congestion_control(struct net *net, const char *name)
int ret;
rcu_read_lock();
- ca = tcp_ca_find_autoload(net, name);
+ ca = tcp_ca_find_autoload(name);
if (!ca) {
ret = -ENOENT;
} else if (!bpf_try_module_get(ca, ca->owner)) {
@@ -338,7 +338,7 @@ void tcp_get_default_congestion_control(struct net *net, char *name)
rcu_read_lock();
ca = rcu_dereference(net->ipv4.tcp_congestion_control);
- strncpy(name, ca->name, TCP_CA_NAME_MAX);
+ strscpy(name, ca->name, TCP_CA_NAME_MAX);
rcu_read_unlock();
}
@@ -421,7 +421,7 @@ int tcp_set_congestion_control(struct sock *sk, const char *name, bool load,
if (!load)
ca = tcp_ca_find(name);
else
- ca = tcp_ca_find_autoload(sock_net(sk), name);
+ ca = tcp_ca_find_autoload(name);
/* No change asking for existing value */
if (ca == icsk->icsk_ca_ops) {
diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
index 8ed54e7334a9..0f523cbfe329 100644
--- a/net/ipv4/tcp_fastopen.c
+++ b/net/ipv4/tcp_fastopen.c
@@ -49,7 +49,7 @@ void tcp_fastopen_ctx_destroy(struct net *net)
{
struct tcp_fastopen_context *ctxt;
- ctxt = xchg((__force struct tcp_fastopen_context **)&net->ipv4.tcp_fastopen_ctx, NULL);
+ ctxt = unrcu_pointer(xchg(&net->ipv4.tcp_fastopen_ctx, NULL));
if (ctxt)
call_rcu(&ctxt->rcu, tcp_fastopen_ctx_free);
@@ -80,9 +80,10 @@ int tcp_fastopen_reset_cipher(struct net *net, struct sock *sk,
if (sk) {
q = &inet_csk(sk)->icsk_accept_queue.fastopenq;
- octx = xchg((__force struct tcp_fastopen_context **)&q->ctx, ctx);
+ octx = unrcu_pointer(xchg(&q->ctx, RCU_INITIALIZER(ctx)));
} else {
- octx = xchg((__force struct tcp_fastopen_context **)&net->ipv4.tcp_fastopen_ctx, ctx);
+ octx = unrcu_pointer(xchg(&net->ipv4.tcp_fastopen_ctx,
+ RCU_INITIALIZER(ctx)));
}
if (octx)
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 9c04a9c8be9d..ff9ab3d01ced 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2129,8 +2129,16 @@ void tcp_clear_retrans(struct tcp_sock *tp)
static inline void tcp_init_undo(struct tcp_sock *tp)
{
tp->undo_marker = tp->snd_una;
+
/* Retransmission still in flight may cause DSACKs later. */
- tp->undo_retrans = tp->retrans_out ? : -1;
+ /* First, account for regular retransmits in flight: */
+ tp->undo_retrans = tp->retrans_out;
+ /* Next, account for TLP retransmits in flight: */
+ if (tp->tlp_high_seq && tp->tlp_retrans)
+ tp->undo_retrans++;
+ /* Finally, avoid 0, because undo_retrans==0 means "can undo now": */
+ if (!tp->undo_retrans)
+ tp->undo_retrans = -1;
}
static bool tcp_is_rack(const struct sock *sk)
@@ -2209,6 +2217,7 @@ void tcp_enter_loss(struct sock *sk)
tcp_set_ca_state(sk, TCP_CA_Loss);
tp->high_seq = tp->snd_nxt;
+ tp->tlp_high_seq = 0;
tcp_ecn_queue_cwr(tp);
/* F-RTO RFC5682 sec 3.1 step 1: retransmit SND.UNA if no previous
@@ -2782,13 +2791,37 @@ static void tcp_mtup_probe_success(struct sock *sk)
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMTUPSUCCESS);
}
+/* Sometimes we deduce that packets have been dropped due to reasons other than
+ * congestion, like path MTU reductions or failed client TFO attempts. In these
+ * cases we call this function to retransmit as many packets as cwnd allows,
+ * without reducing cwnd. Given that retransmits will set retrans_stamp to a
+ * non-zero value (and may do so in a later calling context due to TSQ), we
+ * also enter CA_Loss so that we track when all retransmitted packets are ACKed
+ * and clear retrans_stamp when that happens (to ensure later recurring RTOs
+ * are using the correct retrans_stamp and don't declare ETIMEDOUT
+ * prematurely).
+ */
+static void tcp_non_congestion_loss_retransmit(struct sock *sk)
+{
+ const struct inet_connection_sock *icsk = inet_csk(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ if (icsk->icsk_ca_state != TCP_CA_Loss) {
+ tp->high_seq = tp->snd_nxt;
+ tp->snd_ssthresh = tcp_current_ssthresh(sk);
+ tp->prior_ssthresh = 0;
+ tp->undo_marker = 0;
+ tcp_set_ca_state(sk, TCP_CA_Loss);
+ }
+ tcp_xmit_retransmit_queue(sk);
+}
+
/* Do a simple retransmit without using the backoff mechanisms in
* tcp_timer. This is used for path mtu discovery.
* The socket is already locked here.
*/
void tcp_simple_retransmit(struct sock *sk)
{
- const struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb;
int mss;
@@ -2828,14 +2861,7 @@ void tcp_simple_retransmit(struct sock *sk)
* in network, but units changed and effective
* cwnd/ssthresh really reduced now.
*/
- if (icsk->icsk_ca_state != TCP_CA_Loss) {
- tp->high_seq = tp->snd_nxt;
- tp->snd_ssthresh = tcp_current_ssthresh(sk);
- tp->prior_ssthresh = 0;
- tp->undo_marker = 0;
- tcp_set_ca_state(sk, TCP_CA_Loss);
- }
- tcp_xmit_retransmit_queue(sk);
+ tcp_non_congestion_loss_retransmit(sk);
}
EXPORT_SYMBOL(tcp_simple_retransmit);
@@ -3060,7 +3086,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
return;
if (tcp_try_undo_dsack(sk))
- tcp_try_keep_open(sk);
+ tcp_try_to_open(sk, flag);
tcp_identify_packet_loss(sk, ack_flag);
if (icsk->icsk_ca_state != TCP_CA_Recovery) {
@@ -3578,8 +3604,10 @@ static void tcp_snd_sne_update(struct tcp_sock *tp, u32 ack)
ao = rcu_dereference_protected(tp->ao_info,
lockdep_sock_is_held((struct sock *)tp));
- if (ao && ack < tp->snd_una)
+ if (ao && ack < tp->snd_una) {
ao->snd_sne++;
+ trace_tcp_ao_snd_sne_update((struct sock *)tp, ao->snd_sne);
+ }
#endif
}
@@ -3604,8 +3632,10 @@ static void tcp_rcv_sne_update(struct tcp_sock *tp, u32 seq)
ao = rcu_dereference_protected(tp->ao_info,
lockdep_sock_is_held((struct sock *)tp));
- if (ao && seq < tp->rcv_nxt)
+ if (ao && seq < tp->rcv_nxt) {
ao->rcv_sne++;
+ trace_tcp_ao_rcv_sne_update((struct sock *)tp, ao->rcv_sne);
+ }
#endif
}
@@ -4207,6 +4237,13 @@ void tcp_parse_options(const struct net *net,
*/
break;
#endif
+#ifdef CONFIG_TCP_AO
+ case TCPOPT_AO:
+ /* TCP AO has already been checked
+ * (see tcp_inbound_ao_hash()).
+ */
+ break;
+#endif
case TCPOPT_FASTOPEN:
tcp_parse_fastopen_option(
opsize - TCPOLEN_FASTOPEN_BASE,
@@ -4436,9 +4473,26 @@ static enum skb_drop_reason tcp_sequence(const struct tcp_sock *tp,
return SKB_NOT_DROPPED_YET;
}
+
+void tcp_done_with_error(struct sock *sk, int err)
+{
+ /* This barrier is coupled with smp_rmb() in tcp_poll() */
+ WRITE_ONCE(sk->sk_err, err);
+ smp_wmb();
+
+ tcp_write_queue_purge(sk);
+ tcp_done(sk);
+
+ if (!sock_flag(sk, SOCK_DEAD))
+ sk_error_report(sk);
+}
+EXPORT_SYMBOL(tcp_done_with_error);
+
/* When we get a reset we do this. */
void tcp_reset(struct sock *sk, struct sk_buff *skb)
{
+ int err;
+
trace_tcp_receive_reset(sk);
/* mptcp can't tell us to ignore reset pkts,
@@ -4450,24 +4504,17 @@ void tcp_reset(struct sock *sk, struct sk_buff *skb)
/* We want the right error as BSD sees it (and indeed as we do). */
switch (sk->sk_state) {
case TCP_SYN_SENT:
- WRITE_ONCE(sk->sk_err, ECONNREFUSED);
+ err = ECONNREFUSED;
break;
case TCP_CLOSE_WAIT:
- WRITE_ONCE(sk->sk_err, EPIPE);
+ err = EPIPE;
break;
case TCP_CLOSE:
return;
default:
- WRITE_ONCE(sk->sk_err, ECONNRESET);
+ err = ECONNRESET;
}
- /* This barrier is coupled with smp_rmb() in tcp_poll() */
- smp_wmb();
-
- tcp_write_queue_purge(sk);
- tcp_done(sk);
-
- if (!sock_flag(sk, SOCK_DEAD))
- sk_error_report(sk);
+ tcp_done_with_error(sk, err);
}
/*
@@ -4803,10 +4850,7 @@ static bool tcp_try_coalesce(struct sock *sk,
if (TCP_SKB_CB(from)->seq != TCP_SKB_CB(to)->end_seq)
return false;
- if (!mptcp_skb_can_collapse(to, from))
- return false;
-
- if (skb_cmp_decrypted(from, to))
+ if (!tcp_skb_can_collapse_rx(to, from))
return false;
if (!skb_try_coalesce(to, from, fragstolen, &delta))
@@ -4849,7 +4893,7 @@ static void tcp_drop_reason(struct sock *sk, struct sk_buff *skb,
enum skb_drop_reason reason)
{
sk_drops_add(sk, skb);
- kfree_skb_reason(skb, reason);
+ sk_skb_reason_drop(sk, skb, reason);
}
/* This one checks to see if we can put data from the
@@ -5362,7 +5406,7 @@ restart:
break;
}
- if (n && n != tail && mptcp_skb_can_collapse(skb, n) &&
+ if (n && n != tail && tcp_skb_can_collapse_rx(skb, n) &&
TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(n)->seq) {
end_of_skbs = false;
break;
@@ -5413,11 +5457,9 @@ restart:
skb = tcp_collapse_one(sk, skb, list, root);
if (!skb ||
skb == tail ||
- !mptcp_skb_can_collapse(nskb, skb) ||
+ !tcp_skb_can_collapse_rx(nskb, skb) ||
(TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN)))
goto end;
- if (skb_cmp_decrypted(skb, nskb))
- goto end;
}
}
}
@@ -5956,6 +5998,11 @@ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
* RFC 5961 4.2 : Send a challenge ack
*/
if (th->syn) {
+ if (sk->sk_state == TCP_SYN_RECV && sk->sk_socket && th->ack &&
+ TCP_SKB_CB(skb)->seq + 1 == TCP_SKB_CB(skb)->end_seq &&
+ TCP_SKB_CB(skb)->seq + 1 == tp->rcv_nxt &&
+ TCP_SKB_CB(skb)->ack_seq == tp->snd_nxt)
+ goto pass;
syn_challenge:
if (syn_inerr)
TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
@@ -5965,6 +6012,7 @@ syn_challenge:
goto discard;
}
+pass:
bpf_skops_parse_hdr(sk, skb);
return true;
@@ -6295,7 +6343,7 @@ static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack,
tp->fastopen_client_fail = TFO_DATA_NOT_ACKED;
skb_rbtree_walk_from(data)
tcp_mark_skb_lost(sk, data);
- tcp_xmit_retransmit_queue(sk);
+ tcp_non_congestion_loss_retransmit(sk);
NET_INC_STATS(sock_net(sk),
LINUX_MIB_TCPFASTOPENACTIVEFAIL);
return true;
@@ -6771,6 +6819,9 @@ tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
tcp_fast_path_on(tp);
if (sk->sk_shutdown & SEND_SHUTDOWN)
tcp_shutdown(sk, SEND_SHUTDOWN);
+
+ if (sk->sk_socket)
+ goto consume;
break;
case TCP_FIN_WAIT1: {
@@ -6981,31 +7032,6 @@ static void tcp_openreq_init(struct request_sock *req,
#endif
}
-struct request_sock *inet_reqsk_alloc(const struct request_sock_ops *ops,
- struct sock *sk_listener,
- bool attach_listener)
-{
- struct request_sock *req = reqsk_alloc(ops, sk_listener,
- attach_listener);
-
- if (req) {
- struct inet_request_sock *ireq = inet_rsk(req);
-
- ireq->ireq_opt = NULL;
-#if IS_ENABLED(CONFIG_IPV6)
- ireq->pktopts = NULL;
-#endif
- atomic64_set(&ireq->ir_cookie, 0);
- ireq->ireq_state = TCP_NEW_SYN_RECV;
- write_pnet(&ireq->ireq_net, sock_net(sk_listener));
- ireq->ireq_family = sk_listener->sk_family;
- req->timeout = TCP_TIMEOUT_INIT;
- }
-
- return req;
-}
-EXPORT_SYMBOL(inet_reqsk_alloc);
-
/*
* Return true if a syncookie should be sent
*/
@@ -7256,7 +7282,12 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
tcp_rsk(req)->tfo_listener = false;
if (!want_cookie) {
req->timeout = tcp_timeout_init((struct sock *)req);
- inet_csk_reqsk_queue_hash_add(sk, req, req->timeout);
+ if (unlikely(!inet_csk_reqsk_queue_hash_add(sk, req,
+ req->timeout))) {
+ reqsk_free(req);
+ return 0;
+ }
+
}
af_ops->send_synack(sk, dst, &fl, req, &foc,
!want_cookie ? TCP_SYNACK_NORMAL :
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 30ef0c8f5e92..fd17f25ff288 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -93,7 +93,9 @@ static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
struct inet_hashinfo tcp_hashinfo;
EXPORT_SYMBOL(tcp_hashinfo);
-static DEFINE_PER_CPU(struct sock *, ipv4_tcp_sk);
+static DEFINE_PER_CPU(struct sock_bh_locked, ipv4_tcp_sk) = {
+ .bh_lock = INIT_LOCAL_LOCK(bh_lock),
+};
static u32 tcp_v4_init_seq(const struct sk_buff *skb)
{
@@ -114,6 +116,7 @@ int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
const struct inet_timewait_sock *tw = inet_twsk(sktw);
const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
struct tcp_sock *tp = tcp_sk(sk);
+ int ts_recent_stamp;
if (reuse == 2) {
/* Still does not detect *everything* that goes through
@@ -152,10 +155,11 @@ int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
If TW bucket has been already destroyed we fall back to VJ's scheme
and use initial timestamp retrieved from peer table.
*/
- if (tcptw->tw_ts_recent_stamp &&
+ ts_recent_stamp = READ_ONCE(tcptw->tw_ts_recent_stamp);
+ if (ts_recent_stamp &&
(!twp || (reuse && time_after32(ktime_get_seconds(),
- tcptw->tw_ts_recent_stamp)))) {
- /* inet_twsk_hashdance() sets sk_refcnt after putting twsk
+ ts_recent_stamp)))) {
+ /* inet_twsk_hashdance_schedule() sets sk_refcnt after putting twsk
* and releasing the bucket lock.
*/
if (unlikely(!refcount_inc_not_zero(&sktw->sk_refcnt)))
@@ -178,8 +182,8 @@ int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
if (!seq)
seq = 1;
WRITE_ONCE(tp->write_seq, seq);
- tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
- tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
+ tp->rx_opt.ts_recent = READ_ONCE(tcptw->tw_ts_recent);
+ tp->rx_opt.ts_recent_stamp = ts_recent_stamp;
}
return 1;
@@ -611,15 +615,10 @@ int tcp_v4_err(struct sk_buff *skb, u32 info)
ip_icmp_error(sk, skb, err, th->dest, info, (u8 *)th);
- if (!sock_owned_by_user(sk)) {
- WRITE_ONCE(sk->sk_err, err);
-
- sk_error_report(sk);
-
- tcp_done(sk);
- } else {
+ if (!sock_owned_by_user(sk))
+ tcp_done_with_error(sk, err);
+ else
WRITE_ONCE(sk->sk_err_soft, err);
- }
goto out;
}
@@ -885,7 +884,9 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb,
arg.tos = ip_hdr(skb)->tos;
arg.uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
local_bh_disable();
- ctl_sk = this_cpu_read(ipv4_tcp_sk);
+ local_lock_nested_bh(&ipv4_tcp_sk.bh_lock);
+ ctl_sk = this_cpu_read(ipv4_tcp_sk.sock);
+
sock_net_set(ctl_sk, net);
if (sk) {
ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ?
@@ -910,6 +911,7 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb,
sock_net_set(ctl_sk, &init_net);
__TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
__TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
+ local_unlock_nested_bh(&ipv4_tcp_sk.bh_lock);
local_bh_enable();
#ifdef CONFIG_TCP_MD5SIG
@@ -1005,7 +1007,8 @@ static void tcp_v4_send_ack(const struct sock *sk,
arg.tos = tos;
arg.uid = sock_net_uid(net, sk_fullsock(sk) ? sk : NULL);
local_bh_disable();
- ctl_sk = this_cpu_read(ipv4_tcp_sk);
+ local_lock_nested_bh(&ipv4_tcp_sk.bh_lock);
+ ctl_sk = this_cpu_read(ipv4_tcp_sk.sock);
sock_net_set(ctl_sk, net);
ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ?
inet_twsk(sk)->tw_mark : READ_ONCE(sk->sk_mark);
@@ -1020,6 +1023,7 @@ static void tcp_v4_send_ack(const struct sock *sk,
sock_net_set(ctl_sk, &init_net);
__TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
+ local_unlock_nested_bh(&ipv4_tcp_sk.bh_lock);
local_bh_enable();
}
@@ -1057,19 +1061,17 @@ static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
#else
if (0) {
#endif
-#ifdef CONFIG_TCP_MD5SIG
- } else if (static_branch_unlikely(&tcp_md5_needed.key)) {
+ } else if (static_branch_tcp_md5()) {
key.md5_key = tcp_twsk_md5_key(tcptw);
if (key.md5_key)
key.type = TCP_KEY_MD5;
-#endif
}
tcp_v4_send_ack(sk, skb,
tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
tcp_tw_tsval(tcptw),
- tcptw->tw_ts_recent,
+ READ_ONCE(tcptw->tw_ts_recent),
tw->tw_bound_dev_if, &key,
tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
tw->tw_tos,
@@ -1131,8 +1133,7 @@ static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
#else
if (0) {
#endif
-#ifdef CONFIG_TCP_MD5SIG
- } else if (static_branch_unlikely(&tcp_md5_needed.key)) {
+ } else if (static_branch_tcp_md5()) {
const union tcp_md5_addr *addr;
int l3index;
@@ -1141,17 +1142,11 @@ static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
key.md5_key = tcp_md5_do_lookup(sk, l3index, addr, AF_INET);
if (key.md5_key)
key.type = TCP_KEY_MD5;
-#endif
}
- /* RFC 7323 2.3
- * The window field (SEG.WND) of every outgoing segment, with the
- * exception of <SYN> segments, MUST be right-shifted by
- * Rcv.Wind.Shift bits:
- */
tcp_v4_send_ack(sk, skb, seq,
tcp_rsk(req)->rcv_nxt,
- req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
+ tcp_synack_window(req) >> inet_rsk(req)->rcv_wscale,
tcp_rsk_tsval(tcp_rsk(req)),
READ_ONCE(req->ts_recent),
0, &key,
@@ -1944,7 +1939,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
reset:
tcp_v4_send_reset(rsk, skb, sk_rst_convert_drop_reason(reason));
discard:
- kfree_skb_reason(skb, reason);
+ sk_skb_reason_drop(sk, skb, reason);
/* Be careful here. If this function gets more complicated and
* gcc suffers from register pressure on the x86, sk (in %ebx)
* might be destroyed here. This current version compiles correctly,
@@ -2054,8 +2049,7 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb,
TCP_SKB_CB(skb)->tcp_flags) & TCPHDR_ACK) ||
((TCP_SKB_CB(tail)->tcp_flags ^
TCP_SKB_CB(skb)->tcp_flags) & (TCPHDR_ECE | TCPHDR_CWR)) ||
- !mptcp_skb_can_collapse(tail, skb) ||
- skb_cmp_decrypted(tail, skb) ||
+ !tcp_skb_can_collapse_rx(tail, skb) ||
thtail->doff != th->doff ||
memcmp(thtail + 1, th + 1, hdrlen - sizeof(*th)))
goto no_coalesce;
@@ -2181,8 +2175,8 @@ int tcp_v4_rcv(struct sk_buff *skb)
int dif = inet_iif(skb);
const struct iphdr *iph;
const struct tcphdr *th;
+ struct sock *sk = NULL;
bool refcounted;
- struct sock *sk;
int ret;
u32 isn;
@@ -2381,7 +2375,7 @@ bad_packet:
discard_it:
SKB_DR_OR(drop_reason, NOT_SPECIFIED);
/* Discard frame. */
- kfree_skb_reason(skb, drop_reason);
+ sk_skb_reason_drop(sk, skb, drop_reason);
return 0;
discard_and_relse:
@@ -3511,6 +3505,7 @@ static int __net_init tcp_sk_init(struct net *net)
net->ipv4.sysctl_tcp_shrink_window = 0;
net->ipv4.sysctl_tcp_pingpong_thresh = 1;
+ net->ipv4.sysctl_tcp_rto_min_us = jiffies_to_usecs(TCP_RTO_MIN);
return 0;
}
@@ -3625,7 +3620,9 @@ void __init tcp_v4_init(void)
*/
inet_sk(sk)->pmtudisc = IP_PMTUDISC_DO;
- per_cpu(ipv4_tcp_sk, cpu) = sk;
+ sk->sk_clockid = CLOCK_MONOTONIC;
+
+ per_cpu(ipv4_tcp_sk.sock, cpu) = sk;
}
if (register_pernet_subsys(&tcp_sk_ops))
panic("Failed to create the TCP control socket.\n");
diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c
index e93df98de3f4..b01eb6d94413 100644
--- a/net/ipv4/tcp_metrics.c
+++ b/net/ipv4/tcp_metrics.c
@@ -619,6 +619,7 @@ static const struct nla_policy tcp_metrics_nl_policy[TCP_METRICS_ATTR_MAX + 1] =
[TCP_METRICS_ATTR_ADDR_IPV4] = { .type = NLA_U32, },
[TCP_METRICS_ATTR_ADDR_IPV6] = { .type = NLA_BINARY,
.len = sizeof(struct in6_addr), },
+ [TCP_METRICS_ATTR_SADDR_IPV4] = { .type = NLA_U32, },
/* Following attributes are not received for GET/DEL,
* we keep them for reference
*/
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index b93619b2384b..a19a9dbd3409 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -101,16 +101,18 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
struct tcp_options_received tmp_opt;
struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
bool paws_reject = false;
+ int ts_recent_stamp;
tmp_opt.saw_tstamp = 0;
- if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
+ ts_recent_stamp = READ_ONCE(tcptw->tw_ts_recent_stamp);
+ if (th->doff > (sizeof(*th) >> 2) && ts_recent_stamp) {
tcp_parse_options(twsk_net(tw), skb, &tmp_opt, 0, NULL);
if (tmp_opt.saw_tstamp) {
if (tmp_opt.rcv_tsecr)
tmp_opt.rcv_tsecr -= tcptw->tw_ts_offset;
- tmp_opt.ts_recent = tcptw->tw_ts_recent;
- tmp_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
+ tmp_opt.ts_recent = READ_ONCE(tcptw->tw_ts_recent);
+ tmp_opt.ts_recent_stamp = ts_recent_stamp;
paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
}
}
@@ -152,8 +154,10 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
twsk_rcv_nxt_update(tcptw, TCP_SKB_CB(skb)->end_seq);
if (tmp_opt.saw_tstamp) {
- tcptw->tw_ts_recent_stamp = ktime_get_seconds();
- tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
+ WRITE_ONCE(tcptw->tw_ts_recent_stamp,
+ ktime_get_seconds());
+ WRITE_ONCE(tcptw->tw_ts_recent,
+ tmp_opt.rcv_tsval);
}
inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
@@ -197,8 +201,10 @@ kill:
}
if (tmp_opt.saw_tstamp) {
- tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
- tcptw->tw_ts_recent_stamp = ktime_get_seconds();
+ WRITE_ONCE(tcptw->tw_ts_recent,
+ tmp_opt.rcv_tsval);
+ WRITE_ONCE(tcptw->tw_ts_recent_stamp,
+ ktime_get_seconds());
}
inet_twsk_put(tw);
@@ -225,7 +231,7 @@ kill:
if (th->syn && !th->rst && !th->ack && !paws_reject &&
(after(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt) ||
(tmp_opt.saw_tstamp &&
- (s32)(tcptw->tw_ts_recent - tmp_opt.rcv_tsval) < 0))) {
+ (s32)(READ_ONCE(tcptw->tw_ts_recent) - tmp_opt.rcv_tsval) < 0))) {
u32 isn = tcptw->tw_snd_nxt + 65535 + 2;
if (isn == 0)
isn++;
@@ -339,17 +345,10 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
if (state == TCP_TIME_WAIT)
timeo = TCP_TIMEWAIT_LEN;
- /* tw_timer is pinned, so we need to make sure BH are disabled
- * in following section, otherwise timer handler could run before
- * we complete the initialization.
- */
- local_bh_disable();
- inet_twsk_schedule(tw, timeo);
/* Linkage updates.
* Note that access to tw after this point is illegal.
*/
- inet_twsk_hashdance(tw, sk, net->ipv4.tcp_death_row.hashinfo);
- local_bh_enable();
+ inet_twsk_hashdance_schedule(tw, sk, net->ipv4.tcp_death_row.hashinfo, timeo);
} else {
/* Sorry, if we're out of memory, just CLOSE this
* socket up. We've got bigger problems than
@@ -515,9 +514,6 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
const struct tcp_sock *oldtp;
struct tcp_sock *newtp;
u32 seq;
-#ifdef CONFIG_TCP_AO
- struct tcp_ao_key *ao_key;
-#endif
if (!newsk)
return NULL;
@@ -608,10 +604,14 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
#endif
#ifdef CONFIG_TCP_AO
newtp->ao_info = NULL;
- ao_key = treq->af_specific->ao_lookup(sk, req,
- tcp_rsk(req)->ao_keyid, -1);
- if (ao_key)
- newtp->tcp_header_len += tcp_ao_len_aligned(ao_key);
+
+ if (tcp_rsk_used_ao(req)) {
+ struct tcp_ao_key *ao_key;
+
+ ao_key = treq->af_specific->ao_lookup(sk, req, tcp_rsk(req)->ao_keyid, -1);
+ if (ao_key)
+ newtp->tcp_header_len += tcp_ao_len_aligned(ao_key);
+ }
#endif
if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len)
newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
@@ -783,8 +783,11 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
/* RFC793: "first check sequence number". */
- if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
- tcp_rsk(req)->rcv_nxt, tcp_rsk(req)->rcv_nxt + req->rsk_rcv_wnd)) {
+ if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq,
+ TCP_SKB_CB(skb)->end_seq,
+ tcp_rsk(req)->rcv_nxt,
+ tcp_rsk(req)->rcv_nxt +
+ tcp_synack_window(req))) {
/* Out of window: send ACK and drop. */
if (!(flg & TCP_FLAG_RST) &&
!tcp_oow_rate_limited(sock_net(sk), skb,
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 95618d0e78e4..16c48df8df4c 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1301,7 +1301,7 @@ static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
tp = tcp_sk(sk);
prior_wstamp = tp->tcp_wstamp_ns;
tp->tcp_wstamp_ns = max(tp->tcp_wstamp_ns, tp->tcp_clock_cache);
- skb_set_delivery_time(skb, tp->tcp_wstamp_ns, true);
+ skb_set_delivery_time(skb, tp->tcp_wstamp_ns, SKB_CLOCK_MONOTONIC);
if (clone_it) {
oskb = skb;
@@ -1655,7 +1655,7 @@ int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
skb_split(skb, buff, len);
- skb_set_delivery_time(buff, skb->tstamp, true);
+ skb_set_delivery_time(buff, skb->tstamp, SKB_CLOCK_MONOTONIC);
tcp_fragment_tstamp(skb, buff);
old_factor = tcp_skb_pcount(skb);
@@ -2764,7 +2764,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE) {
/* "skb_mstamp_ns" is used as a start point for the retransmit timer */
tp->tcp_wstamp_ns = tp->tcp_clock_cache;
- skb_set_delivery_time(skb, tp->tcp_wstamp_ns, true);
+ skb_set_delivery_time(skb, tp->tcp_wstamp_ns, SKB_CLOCK_MONOTONIC);
list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue);
tcp_init_tso_segs(skb, mss_now);
goto repair; /* Skip network transmission */
@@ -3752,11 +3752,11 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
#ifdef CONFIG_SYN_COOKIES
if (unlikely(synack_type == TCP_SYNACK_COOKIE && ireq->tstamp_ok))
skb_set_delivery_time(skb, cookie_init_timestamp(req, now),
- true);
+ SKB_CLOCK_MONOTONIC);
else
#endif
{
- skb_set_delivery_time(skb, now, true);
+ skb_set_delivery_time(skb, now, SKB_CLOCK_MONOTONIC);
if (!tcp_rsk(req)->snt_synack) /* Timestamp first SYNACK */
tcp_rsk(req)->snt_synack = tcp_skb_timestamp_us(skb);
}
@@ -3768,6 +3768,7 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
#ifdef CONFIG_TCP_AO
struct tcp_ao_key *ao_key = NULL;
u8 keyid = tcp_rsk(req)->ao_keyid;
+ u8 rnext = tcp_rsk(req)->ao_rcv_next;
ao_key = tcp_sk(sk)->af_specific->ao_lookup(sk, req_to_sk(req),
keyid, -1);
@@ -3777,6 +3778,7 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
* ao_keyid (RFC5925 RNextKeyID), so let's keep it simple here.
*/
if (unlikely(!ao_key)) {
+ trace_tcp_ao_synack_no_key(sk, keyid, rnext);
rcu_read_unlock();
kfree_skb(skb);
net_warn_ratelimited("TCP-AO: the keyid %u from SYN packet is not present - not sending SYNACK\n",
@@ -3843,7 +3845,7 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
bpf_skops_write_hdr_opt((struct sock *)sk, skb, req, syn_skb,
synack_type, &opts);
- skb_set_delivery_time(skb, now, true);
+ skb_set_delivery_time(skb, now, SKB_CLOCK_MONOTONIC);
tcp_add_tx_delay(skb, tp);
return skb;
@@ -4027,7 +4029,7 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
err = tcp_transmit_skb(sk, syn_data, 1, sk->sk_allocation);
- skb_set_delivery_time(syn, syn_data->skb_mstamp_ns, true);
+ skb_set_delivery_time(syn, syn_data->skb_mstamp_ns, SKB_CLOCK_MONOTONIC);
/* Now full SYN+DATA was cloned and sent (or not),
* remove the SYN from the original skb (syn_data)
@@ -4163,16 +4165,9 @@ EXPORT_SYMBOL(tcp_connect);
u32 tcp_delack_max(const struct sock *sk)
{
- const struct dst_entry *dst = __sk_dst_get(sk);
- u32 delack_max = inet_csk(sk)->icsk_delack_max;
-
- if (dst && dst_metric_locked(dst, RTAX_RTO_MIN)) {
- u32 rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN);
- u32 delack_from_rto_min = max_t(int, 1, rto_min - 1);
+ u32 delack_from_rto_min = max(tcp_rto_min(sk), 2) - 1;
- delack_max = min_t(u32, delack_max, delack_from_rto_min);
- }
- return delack_max;
+ return min(inet_csk(sk)->icsk_delack_max, delack_from_rto_min);
}
/* Send out a delayed ack, the caller does the policy checking
diff --git a/net/ipv4/tcp_sigpool.c b/net/ipv4/tcp_sigpool.c
index 8512cb09ebc0..d8a4f192873a 100644
--- a/net/ipv4/tcp_sigpool.c
+++ b/net/ipv4/tcp_sigpool.c
@@ -10,7 +10,14 @@
#include <net/tcp.h>
static size_t __scratch_size;
-static DEFINE_PER_CPU(void __rcu *, sigpool_scratch);
+struct sigpool_scratch {
+ local_lock_t bh_lock;
+ void __rcu *pad;
+};
+
+static DEFINE_PER_CPU(struct sigpool_scratch, sigpool_scratch) = {
+ .bh_lock = INIT_LOCAL_LOCK(bh_lock),
+};
struct sigpool_entry {
struct crypto_ahash *hash;
@@ -72,7 +79,7 @@ static int sigpool_reserve_scratch(size_t size)
break;
}
- old_scratch = rcu_replace_pointer(per_cpu(sigpool_scratch, cpu),
+ old_scratch = rcu_replace_pointer(per_cpu(sigpool_scratch.pad, cpu),
scratch, lockdep_is_held(&cpool_mutex));
if (!cpu_online(cpu) || !old_scratch) {
kfree(old_scratch);
@@ -93,7 +100,7 @@ static void sigpool_scratch_free(void)
int cpu;
for_each_possible_cpu(cpu)
- kfree(rcu_replace_pointer(per_cpu(sigpool_scratch, cpu),
+ kfree(rcu_replace_pointer(per_cpu(sigpool_scratch.pad, cpu),
NULL, lockdep_is_held(&cpool_mutex)));
__scratch_size = 0;
}
@@ -277,7 +284,8 @@ int tcp_sigpool_start(unsigned int id, struct tcp_sigpool *c) __cond_acquires(RC
/* Pairs with tcp_sigpool_reserve_scratch(), scratch area is
* valid (allocated) until tcp_sigpool_end().
*/
- c->scratch = rcu_dereference_bh(*this_cpu_ptr(&sigpool_scratch));
+ local_lock_nested_bh(&sigpool_scratch.bh_lock);
+ c->scratch = rcu_dereference_bh(*this_cpu_ptr(&sigpool_scratch.pad));
return 0;
}
EXPORT_SYMBOL_GPL(tcp_sigpool_start);
@@ -286,6 +294,7 @@ void tcp_sigpool_end(struct tcp_sigpool *c) __releases(RCU_BH)
{
struct crypto_ahash *hash = crypto_ahash_reqtfm(c->req);
+ local_unlock_nested_bh(&sigpool_scratch.bh_lock);
rcu_read_unlock_bh();
ahash_request_free(c->req);
crypto_free_ahash(hash);
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 83fe7f62f7f1..4d40615dc8fc 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -74,11 +74,7 @@ u32 tcp_clamp_probe0_to_user_timeout(const struct sock *sk, u32 when)
static void tcp_write_err(struct sock *sk)
{
- WRITE_ONCE(sk->sk_err, READ_ONCE(sk->sk_err_soft) ? : ETIMEDOUT);
- sk_error_report(sk);
-
- tcp_write_queue_purge(sk);
- tcp_done(sk);
+ tcp_done_with_error(sk, READ_ONCE(sk->sk_err_soft) ? : ETIMEDOUT);
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONTIMEOUT);
}
@@ -483,11 +479,26 @@ static bool tcp_rtx_probe0_timed_out(const struct sock *sk,
const struct sk_buff *skb,
u32 rtx_delta)
{
+ const struct inet_connection_sock *icsk = inet_csk(sk);
+ u32 user_timeout = READ_ONCE(icsk->icsk_user_timeout);
const struct tcp_sock *tp = tcp_sk(sk);
- const int timeout = TCP_RTO_MAX * 2;
- u32 rcv_delta;
+ int timeout = TCP_RTO_MAX * 2;
+ s32 rcv_delta;
- rcv_delta = inet_csk(sk)->icsk_timeout - tp->rcv_tstamp;
+ if (user_timeout) {
+ /* If user application specified a TCP_USER_TIMEOUT,
+ * it does not want win 0 packets to 'reset the timer'
+ * while retransmits are not making progress.
+ */
+ if (rtx_delta > user_timeout)
+ return true;
+ timeout = min_t(u32, timeout, msecs_to_jiffies(user_timeout));
+ }
+ /* Note: timer interrupt might have been delayed by at least one jiffy,
+ * and tp->rcv_tstamp might very well have been written recently.
+ * rcv_delta can thus be negative.
+ */
+ rcv_delta = icsk->icsk_timeout - tp->rcv_tstamp;
if (rcv_delta <= timeout)
return false;
@@ -532,8 +543,6 @@ void tcp_retransmit_timer(struct sock *sk)
if (WARN_ON_ONCE(!skb))
return;
- tp->tlp_high_seq = 0;
-
if (!tp->snd_wnd && !sock_flag(sk, SOCK_DEAD) &&
!((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))) {
/* Receiver dastardly shrinks window. Our retransmits
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 189c9113fe9a..49c622e743e8 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -326,6 +326,8 @@ found:
goto fail_unlock;
}
+ sock_set_flag(sk, SOCK_RCU_FREE);
+
sk_add_node_rcu(sk, &hslot->head);
hslot->count++;
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
@@ -342,7 +344,7 @@ found:
hslot2->count++;
spin_unlock(&hslot2->lock);
}
- sock_set_flag(sk, SOCK_RCU_FREE);
+
error = 0;
fail_unlock:
spin_unlock_bh(&hslot->lock);
@@ -938,8 +940,7 @@ static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4,
kfree_skb(skb);
return -EINVAL;
}
- if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite ||
- dst_xfrm(skb_dst(skb))) {
+ if (is_udplite || dst_xfrm(skb_dst(skb))) {
kfree_skb(skb);
return -EIO;
}
@@ -2074,7 +2075,7 @@ static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
}
UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
trace_udp_fail_queue_rcv_skb(rc, sk, skb);
- kfree_skb_reason(skb, drop_reason);
+ sk_skb_reason_drop(sk, skb, drop_reason);
return -1;
}
@@ -2196,7 +2197,7 @@ csum_error:
drop:
__UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
atomic_inc(&sk->sk_drops);
- kfree_skb_reason(skb, drop_reason);
+ sk_skb_reason_drop(sk, skb, drop_reason);
return -1;
}
@@ -2230,7 +2231,7 @@ bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
struct dst_entry *old;
if (dst_hold_safe(dst)) {
- old = xchg((__force struct dst_entry **)&sk->sk_rx_dst, dst);
+ old = unrcu_pointer(xchg(&sk->sk_rx_dst, RCU_INITIALIZER(dst)));
dst_release(old);
return old != dst;
}
@@ -2383,7 +2384,7 @@ static int udp_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb,
int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
int proto)
{
- struct sock *sk;
+ struct sock *sk = NULL;
struct udphdr *uh;
unsigned short ulen;
struct rtable *rt = skb_rtable(skb);
@@ -2460,7 +2461,7 @@ no_sk:
* Hmm. We got an UDP packet to a port to which we
* don't wanna listen. Ignore it.
*/
- kfree_skb_reason(skb, drop_reason);
+ sk_skb_reason_drop(sk, skb, drop_reason);
return 0;
short_packet:
@@ -2485,7 +2486,7 @@ csum_error:
__UDP_INC_STATS(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE);
drop:
__UDP_INC_STATS(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
- kfree_skb_reason(skb, drop_reason);
+ sk_skb_reason_drop(sk, skb, drop_reason);
return 0;
}
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index 59448a2dbf2c..aa2e0a28ca61 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -357,6 +357,14 @@ struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
else
uh->check = gso_make_checksum(seg, ~check) ? : CSUM_MANGLED_0;
+ /* On the TX path, CHECKSUM_NONE and CHECKSUM_UNNECESSARY have the same
+ * meaning. However, check for bad offloads in the GSO stack expects the
+ * latter, if the checksum was calculated in software. To vouch for the
+ * segment skbs we actually need to set it on the gso_skb.
+ */
+ if (gso_skb->ip_summed == CHECKSUM_NONE)
+ gso_skb->ip_summed = CHECKSUM_UNNECESSARY;
+
/* update refcount for the packet */
if (copy_dtor) {
int delta = sum_truesize - gso_skb->truesize;
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 5c424a0e7232..55a0fd589fc8 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -863,7 +863,7 @@ static void addrconf_forward_change(struct net *net, __s32 newf)
}
}
-static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int newf)
+static int addrconf_fixup_forwarding(const struct ctl_table *table, int *p, int newf)
{
struct net *net;
int old;
@@ -931,7 +931,7 @@ static void addrconf_linkdown_change(struct net *net, __s32 newf)
}
}
-static int addrconf_fixup_linkdown(struct ctl_table *table, int *p, int newf)
+static int addrconf_fixup_linkdown(const struct ctl_table *table, int *p, int newf)
{
struct net *net;
int old;
@@ -1873,7 +1873,8 @@ int ipv6_dev_get_saddr(struct net *net, const struct net_device *dst_dev,
master, &dst,
scores, hiscore_idx);
- if (scores[hiscore_idx].ifa)
+ if (scores[hiscore_idx].ifa &&
+ scores[hiscore_idx].scopedist >= 0)
goto out;
}
@@ -6378,7 +6379,7 @@ static void addrconf_disable_change(struct net *net, __s32 newf)
}
}
-static int addrconf_disable_ipv6(struct ctl_table *table, int *p, int newf)
+static int addrconf_disable_ipv6(const struct ctl_table *table, int *p, int newf)
{
struct net *net = (struct net *)table->extra2;
int old;
@@ -6669,7 +6670,7 @@ void addrconf_disable_policy_idev(struct inet6_dev *idev, int val)
}
static
-int addrconf_disable_policy(struct ctl_table *ctl, int *valp, int val)
+int addrconf_disable_policy(const struct ctl_table *ctl, int *valp, int val)
{
struct net *net = (struct net *)ctl->extra2;
struct inet6_dev *idev;
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 8041dc181bd4..90d2c7e3f5e9 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -509,7 +509,7 @@ void inet6_cleanup_sock(struct sock *sk)
/* Free tx options */
- opt = xchg((__force struct ipv6_txoptions **)&np->opt, NULL);
+ opt = unrcu_pointer(xchg(&np->opt, NULL));
if (opt) {
atomic_sub(opt->tot_len, &sk->sk_omem_alloc);
txopt_put(opt);
@@ -1060,6 +1060,7 @@ static const struct ipv6_stub ipv6_stub_impl = {
.nd_tbl = &nd_tbl,
.ipv6_fragment = ip6_fragment,
.ipv6_dev_find = ipv6_dev_find,
+ .ip6_xmit = ip6_xmit,
};
static const struct ipv6_bpf_stub ipv6_bpf_stub_impl = {
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index 34a9a5b9ed00..3920e8aa1031 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -256,8 +256,7 @@ static int esp_output_tail_tcp(struct xfrm_state *x, struct sk_buff *skb)
#else
static int esp_output_tail_tcp(struct xfrm_state *x, struct sk_buff *skb)
{
- kfree_skb(skb);
-
+ WARN_ON(1);
return -EOPNOTSUPP;
}
#endif
diff --git a/net/ipv6/esp6_offload.c b/net/ipv6/esp6_offload.c
index 527b7caddbc6..919ebfabbe4e 100644
--- a/net/ipv6/esp6_offload.c
+++ b/net/ipv6/esp6_offload.c
@@ -83,6 +83,13 @@ static struct sk_buff *esp6_gro_receive(struct list_head *head,
x = xfrm_state_lookup(dev_net(skb->dev), skb->mark,
(xfrm_address_t *)&ipv6_hdr(skb)->daddr,
spi, IPPROTO_ESP, AF_INET6);
+
+ if (unlikely(x && x->dir && x->dir != XFRM_SA_DIR_IN)) {
+ /* non-offload path will record the error and audit log */
+ xfrm_state_put(x);
+ x = NULL;
+ }
+
if (!x)
goto out_reset;
diff --git a/net/ipv6/ila/ila_lwt.c b/net/ipv6/ila/ila_lwt.c
index 0601bad79822..ff7e734e335b 100644
--- a/net/ipv6/ila/ila_lwt.c
+++ b/net/ipv6/ila/ila_lwt.c
@@ -58,7 +58,9 @@ static int ila_output(struct net *net, struct sock *sk, struct sk_buff *skb)
return orig_dst->lwtstate->orig_output(net, sk, skb);
}
+ local_bh_disable();
dst = dst_cache_get(&ilwt->dst_cache);
+ local_bh_enable();
if (unlikely(!dst)) {
struct ipv6hdr *ip6h = ipv6_hdr(skb);
struct flowi6 fl6;
@@ -86,8 +88,11 @@ static int ila_output(struct net *net, struct sock *sk, struct sk_buff *skb)
goto drop;
}
- if (ilwt->connected)
+ if (ilwt->connected) {
+ local_bh_disable();
dst_cache_set_ip6(&ilwt->dst_cache, dst, &fl6.saddr);
+ local_bh_enable();
+ }
}
skb_dst_set(skb, dst);
diff --git a/net/ipv6/ioam6_iptunnel.c b/net/ipv6/ioam6_iptunnel.c
index 7563f8c6aa87..bf7120ecea1e 100644
--- a/net/ipv6/ioam6_iptunnel.c
+++ b/net/ipv6/ioam6_iptunnel.c
@@ -351,9 +351,9 @@ do_encap:
goto drop;
if (!ipv6_addr_equal(&orig_daddr, &ipv6_hdr(skb)->daddr)) {
- preempt_disable();
+ local_bh_disable();
dst = dst_cache_get(&ilwt->cache);
- preempt_enable();
+ local_bh_enable();
if (unlikely(!dst)) {
struct ipv6hdr *hdr = ipv6_hdr(skb);
@@ -373,9 +373,9 @@ do_encap:
goto drop;
}
- preempt_disable();
+ local_bh_disable();
dst_cache_set_ip6(&ilwt->cache, dst, &fl6.saddr);
- preempt_enable();
+ local_bh_enable();
}
skb_dst_drop(skb);
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 31d77885bcae..eb111d20615c 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -966,6 +966,7 @@ static void __fib6_drop_pcpu_from(struct fib6_nh *fib6_nh,
if (!fib6_nh->rt6i_pcpu)
return;
+ rcu_read_lock();
/* release the reference to this fib entry from
* all of its cached pcpu routes
*/
@@ -974,7 +975,9 @@ static void __fib6_drop_pcpu_from(struct fib6_nh *fib6_nh,
struct rt6_info *pcpu_rt;
ppcpu_rt = per_cpu_ptr(fib6_nh->rt6i_pcpu, cpu);
- pcpu_rt = *ppcpu_rt;
+
+ /* Paired with xchg() in rt6_get_pcpu_route() */
+ pcpu_rt = READ_ONCE(*ppcpu_rt);
/* only dropping the 'from' reference if the cached route
* is using 'match'. The cached pcpu_rt->from only changes
@@ -984,10 +987,11 @@ static void __fib6_drop_pcpu_from(struct fib6_nh *fib6_nh,
if (pcpu_rt && rcu_access_pointer(pcpu_rt->from) == match) {
struct fib6_info *from;
- from = xchg((__force struct fib6_info **)&pcpu_rt->from, NULL);
+ from = unrcu_pointer(xchg(&pcpu_rt->from, NULL));
fib6_info_release(from);
}
}
+ rcu_read_unlock();
}
struct fib6_nh_pcpu_arg {
@@ -2510,7 +2514,8 @@ int __init fib6_init(void)
goto out_kmem_cache_create;
ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETROUTE, NULL,
- inet6_dump_fib, RTNL_FLAG_DUMP_UNLOCKED);
+ inet6_dump_fib, RTNL_FLAG_DUMP_UNLOCKED |
+ RTNL_FLAG_DUMP_SPLIT_NLM_DONE);
if (ret)
goto out_unregister_subsys;
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index bd5aff97d8b1..9822163428b0 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -236,7 +236,7 @@ INDIRECT_CALLABLE_SCOPE struct sk_buff *ipv6_gro_receive(struct list_head *head,
if (unlikely(!iph))
goto out;
- NAPI_GRO_CB(skb)->inner_network_offset = off;
+ NAPI_GRO_CB(skb)->network_offsets[NAPI_GRO_CB(skb)->encap_mark] = off;
flush += ntohs(iph->payload_len) != skb->len - hlen;
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 27d8725445e3..ab504d31f0cd 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -859,7 +859,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
struct rt6_info *rt = dst_rt6_info(skb_dst(skb));
struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ?
inet6_sk(skb->sk) : NULL;
- bool mono_delivery_time = skb->mono_delivery_time;
+ u8 tstamp_type = skb->tstamp_type;
struct ip6_frag_state state;
unsigned int mtu, hlen, nexthdr_offset;
ktime_t tstamp = skb->tstamp;
@@ -955,7 +955,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
if (iter.frag)
ip6_fraglist_prepare(skb, &iter);
- skb_set_delivery_time(skb, tstamp, mono_delivery_time);
+ skb_set_delivery_time(skb, tstamp, tstamp_type);
err = output(net, sk, skb);
if (!err)
IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
@@ -1016,7 +1016,7 @@ slow_path:
/*
* Put this fragment into the sending queue.
*/
- skb_set_delivery_time(frag, tstamp, mono_delivery_time);
+ skb_set_delivery_time(frag, tstamp, tstamp_type);
err = output(net, sk, frag);
if (err)
goto fail;
@@ -1124,6 +1124,7 @@ static int ip6_dst_lookup_tail(struct net *net, const struct sock *sk,
from = rt ? rcu_dereference(rt->from) : NULL;
err = ip6_route_get_saddr(net, from, &fl6->daddr,
sk ? READ_ONCE(inet6_sk(sk)->srcprefs) : 0,
+ fl6->flowi6_l3mdev,
&fl6->saddr);
rcu_read_unlock();
@@ -1924,7 +1925,10 @@ struct sk_buff *__ip6_make_skb(struct sock *sk,
skb->priority = READ_ONCE(sk->sk_priority);
skb->mark = cork->base.mark;
- skb->tstamp = cork->base.transmit_time;
+ if (sk_is_tcp(sk))
+ skb_set_delivery_time(skb, cork->base.transmit_time, SKB_CLOCK_MONOTONIC);
+ else
+ skb_set_delivery_type_by_clockid(skb, cork->base.transmit_time, sk->sk_clockid);
ip6_cork_steal_dst(skb, cork);
IP6_INC_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUTREQUESTS);
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index d4c28ec1bc51..cd342d5015c6 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -111,8 +111,7 @@ struct ipv6_txoptions *ipv6_update_options(struct sock *sk,
icsk->icsk_sync_mss(sk, icsk->icsk_pmtu_cookie);
}
}
- opt = xchg((__force struct ipv6_txoptions **)&inet6_sk(sk)->opt,
- opt);
+ opt = unrcu_pointer(xchg(&inet6_sk(sk)->opt, RCU_INITIALIZER(opt)));
sk_dst_reset(sk);
return opt;
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index d914b23256ce..254b192c5705 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1936,7 +1936,7 @@ static struct notifier_block ndisc_netdev_notifier = {
};
#ifdef CONFIG_SYSCTL
-static void ndisc_warn_deprecated_sysctl(struct ctl_table *ctl,
+static void ndisc_warn_deprecated_sysctl(const struct ctl_table *ctl,
const char *func, const char *dev_name)
{
static char warncomm[TASK_COMM_LEN];
diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c
index 53d255838e6a..581ce055bf52 100644
--- a/net/ipv6/netfilter.c
+++ b/net/ipv6/netfilter.c
@@ -36,6 +36,7 @@ int ip6_route_me_harder(struct net *net, struct sock *sk_partial, struct sk_buff
.flowi6_uid = sock_net_uid(net, sk),
.daddr = iph->daddr,
.saddr = iph->saddr,
+ .flowlabel = ip6_flowinfo(iph),
};
int err;
@@ -126,7 +127,7 @@ int br_ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
struct sk_buff *))
{
int frag_max_size = BR_INPUT_SKB_CB(skb)->frag_max_size;
- bool mono_delivery_time = skb->mono_delivery_time;
+ u8 tstamp_type = skb->tstamp_type;
ktime_t tstamp = skb->tstamp;
struct ip6_frag_state state;
u8 *prevhdr, nexthdr = 0;
@@ -192,7 +193,7 @@ int br_ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
if (iter.frag)
ip6_fraglist_prepare(skb, &iter);
- skb_set_delivery_time(skb, tstamp, mono_delivery_time);
+ skb_set_delivery_time(skb, tstamp, tstamp_type);
err = output(net, sk, data, skb);
if (err || !iter.frag)
break;
@@ -225,7 +226,7 @@ slow_path:
goto blackhole;
}
- skb_set_delivery_time(skb2, tstamp, mono_delivery_time);
+ skb_set_delivery_time(skb2, tstamp, tstamp_type);
err = output(net, sk, data, skb2);
if (err)
goto blackhole;
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 5e1b50c6a44d..6f0844c9315d 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -263,7 +263,7 @@ static int nf_ct_frag6_queue(struct frag_queue *fq, struct sk_buff *skb,
fq->iif = dev->ifindex;
fq->q.stamp = skb->tstamp;
- fq->q.mono_delivery_time = skb->mono_delivery_time;
+ fq->q.tstamp_type = skb->tstamp_type;
fq->q.meat += skb->len;
fq->ecn |= ecn;
if (payload_len > fq->q.max_size)
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 2eedf255600b..608fa9d05b55 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -362,14 +362,14 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
skb_checksum_complete(skb)) {
atomic_inc(&sk->sk_drops);
- kfree_skb_reason(skb, SKB_DROP_REASON_SKB_CSUM);
+ sk_skb_reason_drop(sk, skb, SKB_DROP_REASON_SKB_CSUM);
return NET_RX_DROP;
}
/* Charge it to the socket. */
skb_dst_drop(skb);
if (sock_queue_rcv_skb_reason(sk, skb, &reason) < 0) {
- kfree_skb_reason(skb, reason);
+ sk_skb_reason_drop(sk, skb, reason);
return NET_RX_DROP;
}
@@ -390,7 +390,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
atomic_inc(&sk->sk_drops);
- kfree_skb_reason(skb, SKB_DROP_REASON_XFRM_POLICY);
+ sk_skb_reason_drop(sk, skb, SKB_DROP_REASON_XFRM_POLICY);
return NET_RX_DROP;
}
nf_reset_ct(skb);
@@ -415,7 +415,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
if (inet_test_bit(HDRINCL, sk)) {
if (skb_checksum_complete(skb)) {
atomic_inc(&sk->sk_drops);
- kfree_skb_reason(skb, SKB_DROP_REASON_SKB_CSUM);
+ sk_skb_reason_drop(sk, skb, SKB_DROP_REASON_SKB_CSUM);
return NET_RX_DROP;
}
}
@@ -621,7 +621,7 @@ static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, int length,
skb->protocol = htons(ETH_P_IPV6);
skb->priority = READ_ONCE(sk->sk_priority);
skb->mark = sockc->mark;
- skb->tstamp = sockc->transmit_time;
+ skb_set_delivery_type_by_clockid(skb, sockc->transmit_time, sk->sk_clockid);
skb_put(skb, length);
skb_reset_network_header(skb);
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 327caca64257..a48be617a8ab 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -198,7 +198,7 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
fq->iif = dev->ifindex;
fq->q.stamp = skb->tstamp;
- fq->q.mono_delivery_time = skb->mono_delivery_time;
+ fq->q.tstamp_type = skb->tstamp_type;
fq->q.meat += skb->len;
fq->ecn |= ecn;
add_frag_mem_limit(fq->q.fqdir, skb->truesize);
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index bbc2a0dd9314..c752e9ed20e6 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -87,7 +87,8 @@ struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie);
static unsigned int ip6_default_advmss(const struct dst_entry *dst);
INDIRECT_CALLABLE_SCOPE
unsigned int ip6_mtu(const struct dst_entry *dst);
-static struct dst_entry *ip6_negative_advice(struct dst_entry *);
+static void ip6_negative_advice(struct sock *sk,
+ struct dst_entry *dst);
static void ip6_dst_destroy(struct dst_entry *);
static void ip6_dst_ifdown(struct dst_entry *,
struct net_device *dev);
@@ -130,7 +131,6 @@ static struct fib6_info *rt6_get_route_info(struct net *net,
struct uncached_list {
spinlock_t lock;
struct list_head head;
- struct list_head quarantine;
};
static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt6_uncached_list);
@@ -188,8 +188,7 @@ static void rt6_uncached_list_flush_dev(struct net_device *dev)
handled = true;
}
if (handled)
- list_move(&rt->dst.rt_uncached,
- &ul->quarantine);
+ list_del_init(&rt->dst.rt_uncached);
}
spin_unlock_bh(&ul->lock);
}
@@ -367,7 +366,7 @@ static void ip6_dst_destroy(struct dst_entry *dst)
in6_dev_put(idev);
}
- from = xchg((__force struct fib6_info **)&rt->from, NULL);
+ from = unrcu_pointer(xchg(&rt->from, NULL));
fib6_info_release(from);
}
@@ -637,6 +636,8 @@ static void rt6_probe(struct fib6_nh *fib6_nh)
rcu_read_lock();
last_probe = READ_ONCE(fib6_nh->last_probe);
idev = __in6_dev_get(dev);
+ if (!idev)
+ goto out;
neigh = __ipv6_neigh_lookup_noref(dev, nh_gw);
if (neigh) {
if (READ_ONCE(neigh->nud_state) & NUD_VALID)
@@ -1408,6 +1409,7 @@ static struct rt6_info *rt6_get_pcpu_route(const struct fib6_result *res)
struct rt6_info *prev, **p;
p = this_cpu_ptr(res->nh->rt6i_pcpu);
+ /* Paired with READ_ONCE() in __fib6_drop_pcpu_from() */
prev = xchg(p, NULL);
if (prev) {
dst_dev_put(&prev->dst);
@@ -1436,7 +1438,7 @@ static struct rt6_info *rt6_make_pcpu_route(struct net *net,
if (res->f6i->fib6_destroying) {
struct fib6_info *from;
- from = xchg((__force struct fib6_info **)&pcpu_rt->from, NULL);
+ from = unrcu_pointer(xchg(&pcpu_rt->from, NULL));
fib6_info_release(from);
}
@@ -1465,7 +1467,7 @@ static void rt6_remove_exception(struct rt6_exception_bucket *bucket,
/* purge completely the exception to allow releasing the held resources:
* some [sk] cache may keep the dst around for unlimited time
*/
- from = xchg((__force struct fib6_info **)&rt6_ex->rt6i->from, NULL);
+ from = unrcu_pointer(xchg(&rt6_ex->rt6i->from, NULL));
fib6_info_release(from);
dst_dev_put(&rt6_ex->rt6i->dst);
@@ -2372,7 +2374,7 @@ static u32 rt6_multipath_custom_hash_outer(const struct net *net,
hash_keys.ports.dst = keys.ports.dst;
*p_has_inner = !!(keys.control.flags & FLOW_DIS_ENCAPSULATION);
- return flow_hash_from_keys(&hash_keys);
+ return fib_multipath_hash_from_keys(net, &hash_keys);
}
static u32 rt6_multipath_custom_hash_inner(const struct net *net,
@@ -2421,7 +2423,7 @@ static u32 rt6_multipath_custom_hash_inner(const struct net *net,
if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_PORT)
hash_keys.ports.dst = keys.ports.dst;
- return flow_hash_from_keys(&hash_keys);
+ return fib_multipath_hash_from_keys(net, &hash_keys);
}
static u32 rt6_multipath_custom_hash_skb(const struct net *net,
@@ -2460,7 +2462,7 @@ static u32 rt6_multipath_custom_hash_fl6(const struct net *net,
if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT)
hash_keys.ports.dst = fl6->fl6_dport;
- return flow_hash_from_keys(&hash_keys);
+ return fib_multipath_hash_from_keys(net, &hash_keys);
}
/* if skb is set it will be used and fl6 can be NULL */
@@ -2482,7 +2484,7 @@ u32 rt6_multipath_hash(const struct net *net, const struct flowi6 *fl6,
hash_keys.tags.flow_label = (__force u32)flowi6_get_flowlabel(fl6);
hash_keys.basic.ip_proto = fl6->flowi6_proto;
}
- mhash = flow_hash_from_keys(&hash_keys);
+ mhash = fib_multipath_hash_from_keys(net, &hash_keys);
break;
case 1:
if (skb) {
@@ -2514,7 +2516,7 @@ u32 rt6_multipath_hash(const struct net *net, const struct flowi6 *fl6,
hash_keys.ports.dst = fl6->fl6_dport;
hash_keys.basic.ip_proto = fl6->flowi6_proto;
}
- mhash = flow_hash_from_keys(&hash_keys);
+ mhash = fib_multipath_hash_from_keys(net, &hash_keys);
break;
case 2:
memset(&hash_keys, 0, sizeof(hash_keys));
@@ -2551,7 +2553,7 @@ u32 rt6_multipath_hash(const struct net *net, const struct flowi6 *fl6,
hash_keys.tags.flow_label = (__force u32)flowi6_get_flowlabel(fl6);
hash_keys.basic.ip_proto = fl6->flowi6_proto;
}
- mhash = flow_hash_from_keys(&hash_keys);
+ mhash = fib_multipath_hash_from_keys(net, &hash_keys);
break;
case 3:
if (skb)
@@ -2770,24 +2772,24 @@ INDIRECT_CALLABLE_SCOPE struct dst_entry *ip6_dst_check(struct dst_entry *dst,
}
EXPORT_INDIRECT_CALLABLE(ip6_dst_check);
-static struct dst_entry *ip6_negative_advice(struct dst_entry *dst)
+static void ip6_negative_advice(struct sock *sk,
+ struct dst_entry *dst)
{
struct rt6_info *rt = dst_rt6_info(dst);
- if (rt) {
- if (rt->rt6i_flags & RTF_CACHE) {
- rcu_read_lock();
- if (rt6_check_expired(rt)) {
- rt6_remove_exception_rt(rt);
- dst = NULL;
- }
- rcu_read_unlock();
- } else {
- dst_release(dst);
- dst = NULL;
+ if (rt->rt6i_flags & RTF_CACHE) {
+ rcu_read_lock();
+ if (rt6_check_expired(rt)) {
+ /* counteract the dst_release() in sk_dst_reset() */
+ dst_hold(dst);
+ sk_dst_reset(sk);
+
+ rt6_remove_exception_rt(rt);
}
+ rcu_read_unlock();
+ return;
}
- return dst;
+ sk_dst_reset(sk);
}
static void ip6_link_failure(struct sk_buff *skb)
@@ -3601,7 +3603,7 @@ int fib6_nh_init(struct net *net, struct fib6_nh *fib6_nh,
if (!dev)
goto out;
- if (idev->cnf.disable_ipv6) {
+ if (!idev || idev->cnf.disable_ipv6) {
NL_SET_ERR_MSG(extack, "IPv6 is disabled on nexthop device");
err = -EACCES;
goto out;
@@ -3760,7 +3762,7 @@ static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg,
if (!rt)
goto out;
- rt->fib6_metrics = ip_fib_metrics_init(net, cfg->fc_mx, cfg->fc_mx_len,
+ rt->fib6_metrics = ip_fib_metrics_init(cfg->fc_mx, cfg->fc_mx_len,
extack);
if (IS_ERR(rt->fib6_metrics)) {
err = PTR_ERR(rt->fib6_metrics);
@@ -5685,7 +5687,7 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
goto nla_put_failure;
} else if (dest) {
struct in6_addr saddr_buf;
- if (ip6_route_get_saddr(net, rt, dest, 0, &saddr_buf) == 0 &&
+ if (ip6_route_get_saddr(net, rt, dest, 0, 0, &saddr_buf) == 0 &&
nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf))
goto nla_put_failure;
}
@@ -6341,12 +6343,12 @@ static int ipv6_sysctl_rtcache_flush(struct ctl_table *ctl, int write,
if (!write)
return -EINVAL;
- net = (struct net *)ctl->extra1;
- delay = net->ipv6.sysctl.flush_delay;
ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
if (ret)
return ret;
+ net = (struct net *)ctl->extra1;
+ delay = net->ipv6.sysctl.flush_delay;
fib6_run_gc(delay <= 0 ? 0 : (unsigned long)delay, net, delay > 0);
return 0;
}
@@ -6754,7 +6756,6 @@ int __init ip6_route_init(void)
struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
INIT_LIST_HEAD(&ul->head);
- INIT_LIST_HEAD(&ul->quarantine);
spin_lock_init(&ul->lock);
}
diff --git a/net/ipv6/rpl_iptunnel.c b/net/ipv6/rpl_iptunnel.c
index a013b92cbb86..2c83b7586422 100644
--- a/net/ipv6/rpl_iptunnel.c
+++ b/net/ipv6/rpl_iptunnel.c
@@ -212,9 +212,9 @@ static int rpl_output(struct net *net, struct sock *sk, struct sk_buff *skb)
if (unlikely(err))
goto drop;
- preempt_disable();
+ local_bh_disable();
dst = dst_cache_get(&rlwt->cache);
- preempt_enable();
+ local_bh_enable();
if (unlikely(!dst)) {
struct ipv6hdr *hdr = ipv6_hdr(skb);
@@ -234,9 +234,9 @@ static int rpl_output(struct net *net, struct sock *sk, struct sk_buff *skb)
goto drop;
}
- preempt_disable();
+ local_bh_disable();
dst_cache_set_ip6(&rlwt->cache, dst, &fl6.saddr);
- preempt_enable();
+ local_bh_enable();
}
skb_dst_drop(skb);
@@ -268,23 +268,21 @@ static int rpl_input(struct sk_buff *skb)
return err;
}
- preempt_disable();
+ local_bh_disable();
dst = dst_cache_get(&rlwt->cache);
- preempt_enable();
if (!dst) {
ip6_route_input(skb);
dst = skb_dst(skb);
if (!dst->error) {
- preempt_disable();
dst_cache_set_ip6(&rlwt->cache, dst,
&ipv6_hdr(skb)->saddr);
- preempt_enable();
}
} else {
skb_dst_drop(skb);
skb_dst_set(skb, dst);
}
+ local_bh_enable();
err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev));
if (unlikely(err))
diff --git a/net/ipv6/seg6.c b/net/ipv6/seg6.c
index a31521e270f7..180da19c148c 100644
--- a/net/ipv6/seg6.c
+++ b/net/ipv6/seg6.c
@@ -21,9 +21,7 @@
#include <net/genetlink.h>
#include <linux/seg6.h>
#include <linux/seg6_genl.h>
-#ifdef CONFIG_IPV6_SEG6_HMAC
#include <net/seg6_hmac.h>
-#endif
bool seg6_validate_srh(struct ipv6_sr_hdr *srh, int len, bool reduced)
{
@@ -437,13 +435,11 @@ static int __net_init seg6_net_init(struct net *net)
net->ipv6.seg6_data = sdata;
-#ifdef CONFIG_IPV6_SEG6_HMAC
if (seg6_hmac_net_init(net)) {
kfree(rcu_dereference_raw(sdata->tun_src));
kfree(sdata);
return -ENOMEM;
}
-#endif
return 0;
}
@@ -452,9 +448,7 @@ static void __net_exit seg6_net_exit(struct net *net)
{
struct seg6_pernet_data *sdata = seg6_pernet(net);
-#ifdef CONFIG_IPV6_SEG6_HMAC
seg6_hmac_net_exit(net);
-#endif
kfree(rcu_dereference_raw(sdata->tun_src));
kfree(sdata);
@@ -520,41 +514,28 @@ int __init seg6_init(void)
if (err)
goto out_unregister_pernet;
-#ifdef CONFIG_IPV6_SEG6_LWTUNNEL
err = seg6_iptunnel_init();
if (err)
goto out_unregister_genl;
err = seg6_local_init();
- if (err) {
- seg6_iptunnel_exit();
- goto out_unregister_genl;
- }
-#endif
+ if (err)
+ goto out_unregister_iptun;
-#ifdef CONFIG_IPV6_SEG6_HMAC
err = seg6_hmac_init();
if (err)
- goto out_unregister_iptun;
-#endif
+ goto out_unregister_seg6;
pr_info("Segment Routing with IPv6\n");
out:
return err;
-#ifdef CONFIG_IPV6_SEG6_HMAC
-out_unregister_iptun:
-#ifdef CONFIG_IPV6_SEG6_LWTUNNEL
+out_unregister_seg6:
seg6_local_exit();
+out_unregister_iptun:
seg6_iptunnel_exit();
-#endif
-#endif
-#ifdef CONFIG_IPV6_SEG6_LWTUNNEL
out_unregister_genl:
-#endif
-#if IS_ENABLED(CONFIG_IPV6_SEG6_LWTUNNEL) || IS_ENABLED(CONFIG_IPV6_SEG6_HMAC)
genl_unregister_family(&seg6_genl_family);
-#endif
out_unregister_pernet:
unregister_pernet_subsys(&ip6_segments_ops);
goto out;
@@ -562,13 +543,9 @@ out_unregister_pernet:
void seg6_exit(void)
{
-#ifdef CONFIG_IPV6_SEG6_HMAC
seg6_hmac_exit();
-#endif
-#ifdef CONFIG_IPV6_SEG6_LWTUNNEL
seg6_local_exit();
seg6_iptunnel_exit();
-#endif
genl_unregister_family(&seg6_genl_family);
unregister_pernet_subsys(&ip6_segments_ops);
}
diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c
index a75df2ec8db0..098632adc9b5 100644
--- a/net/ipv6/seg6_iptunnel.c
+++ b/net/ipv6/seg6_iptunnel.c
@@ -464,23 +464,21 @@ static int seg6_input_core(struct net *net, struct sock *sk,
slwt = seg6_lwt_lwtunnel(orig_dst->lwtstate);
- preempt_disable();
+ local_bh_disable();
dst = dst_cache_get(&slwt->cache);
- preempt_enable();
if (!dst) {
ip6_route_input(skb);
dst = skb_dst(skb);
if (!dst->error) {
- preempt_disable();
dst_cache_set_ip6(&slwt->cache, dst,
&ipv6_hdr(skb)->saddr);
- preempt_enable();
}
} else {
skb_dst_drop(skb);
skb_dst_set(skb, dst);
}
+ local_bh_enable();
err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev));
if (unlikely(err))
@@ -536,9 +534,9 @@ static int seg6_output_core(struct net *net, struct sock *sk,
slwt = seg6_lwt_lwtunnel(orig_dst->lwtstate);
- preempt_disable();
+ local_bh_disable();
dst = dst_cache_get(&slwt->cache);
- preempt_enable();
+ local_bh_enable();
if (unlikely(!dst)) {
struct ipv6hdr *hdr = ipv6_hdr(skb);
@@ -558,9 +556,9 @@ static int seg6_output_core(struct net *net, struct sock *sk,
goto drop;
}
- preempt_disable();
+ local_bh_disable();
dst_cache_set_ip6(&slwt->cache, dst, &fl6.saddr);
- preempt_enable();
+ local_bh_enable();
}
skb_dst_drop(skb);
diff --git a/net/ipv6/seg6_local.c b/net/ipv6/seg6_local.c
index 24e2b4b494cb..c74705ead984 100644
--- a/net/ipv6/seg6_local.c
+++ b/net/ipv6/seg6_local.c
@@ -941,8 +941,8 @@ static int input_action_end_dx6(struct sk_buff *skb,
if (static_branch_unlikely(&nf_hooks_lwtunnel_enabled))
return NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING,
- dev_net(skb->dev), NULL, skb, NULL,
- skb_dst(skb)->dev, input_action_end_dx6_finish);
+ dev_net(skb->dev), NULL, skb, skb->dev,
+ NULL, input_action_end_dx6_finish);
return input_action_end_dx6_finish(dev_net(skb->dev), NULL, skb);
drop:
@@ -991,8 +991,8 @@ static int input_action_end_dx4(struct sk_buff *skb,
if (static_branch_unlikely(&nf_hooks_lwtunnel_enabled))
return NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING,
- dev_net(skb->dev), NULL, skb, NULL,
- skb_dst(skb)->dev, input_action_end_dx4_finish);
+ dev_net(skb->dev), NULL, skb, skb->dev,
+ NULL, input_action_end_dx4_finish);
return input_action_end_dx4_finish(dev_net(skb->dev), NULL, skb);
drop:
@@ -1380,7 +1380,9 @@ drop:
return err;
}
-DEFINE_PER_CPU(struct seg6_bpf_srh_state, seg6_bpf_srh_states);
+DEFINE_PER_CPU(struct seg6_bpf_srh_state, seg6_bpf_srh_states) = {
+ .bh_lock = INIT_LOCAL_LOCK(bh_lock),
+};
bool seg6_bpf_has_valid_srh(struct sk_buff *skb)
{
@@ -1388,6 +1390,7 @@ bool seg6_bpf_has_valid_srh(struct sk_buff *skb)
this_cpu_ptr(&seg6_bpf_srh_states);
struct ipv6_sr_hdr *srh = srh_state->srh;
+ lockdep_assert_held(&srh_state->bh_lock);
if (unlikely(srh == NULL))
return false;
@@ -1408,8 +1411,7 @@ bool seg6_bpf_has_valid_srh(struct sk_buff *skb)
static int input_action_end_bpf(struct sk_buff *skb,
struct seg6_local_lwt *slwt)
{
- struct seg6_bpf_srh_state *srh_state =
- this_cpu_ptr(&seg6_bpf_srh_states);
+ struct seg6_bpf_srh_state *srh_state;
struct ipv6_sr_hdr *srh;
int ret;
@@ -1420,10 +1422,14 @@ static int input_action_end_bpf(struct sk_buff *skb,
}
advance_nextseg(srh, &ipv6_hdr(skb)->daddr);
- /* preempt_disable is needed to protect the per-CPU buffer srh_state,
- * which is also accessed by the bpf_lwt_seg6_* helpers
+ /* The access to the per-CPU buffer srh_state is protected by running
+ * always in softirq context (with disabled BH). On PREEMPT_RT the
+ * required locking is provided by the following local_lock_nested_bh()
+ * statement. It is also accessed by the bpf_lwt_seg6_* helpers via
+ * bpf_prog_run_save_cb().
*/
- preempt_disable();
+ local_lock_nested_bh(&seg6_bpf_srh_states.bh_lock);
+ srh_state = this_cpu_ptr(&seg6_bpf_srh_states);
srh_state->srh = srh;
srh_state->hdrlen = srh->hdrlen << 3;
srh_state->valid = true;
@@ -1446,15 +1452,15 @@ static int input_action_end_bpf(struct sk_buff *skb,
if (srh_state->srh && !seg6_bpf_has_valid_srh(skb))
goto drop;
+ local_unlock_nested_bh(&seg6_bpf_srh_states.bh_lock);
- preempt_enable();
if (ret != BPF_REDIRECT)
seg6_lookup_nexthop(skb, NULL, 0);
return dst_input(skb);
drop:
- preempt_enable();
+ local_unlock_nested_bh(&seg6_bpf_srh_states.bh_lock);
kfree_skb(skb);
return -EINVAL;
}
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index bfad1e89b6a6..9d83eadd308b 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -275,6 +275,6 @@ out:
out_free:
reqsk_free(req);
out_drop:
- kfree_skb_reason(skb, reason);
+ sk_skb_reason_drop(sk, skb, reason);
return NULL;
}
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 4c3605485b68..200fea92f12f 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -490,14 +490,10 @@ static int tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
ipv6_icmp_error(sk, skb, err, th->dest, ntohl(info), (u8 *)th);
- if (!sock_owned_by_user(sk)) {
- WRITE_ONCE(sk->sk_err, err);
- sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
-
- tcp_done(sk);
- } else {
+ if (!sock_owned_by_user(sk))
+ tcp_done_with_error(sk, err);
+ else
WRITE_ONCE(sk->sk_err_soft, err);
- }
goto out;
case TCP_LISTEN:
break;
@@ -975,7 +971,7 @@ static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32
mark = inet_twsk(sk)->tw_mark;
else
mark = READ_ONCE(sk->sk_mark);
- skb_set_delivery_time(buff, tcp_transmit_time(sk), true);
+ skb_set_delivery_time(buff, tcp_transmit_time(sk), SKB_CLOCK_MONOTONIC);
}
if (txhash) {
/* autoflowlabel/skb_get_hash_flowi6 rely on buff->hash */
@@ -1200,9 +1196,9 @@ static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
tcp_tw_tsval(tcptw),
- tcptw->tw_ts_recent, tw->tw_bound_dev_if, &key,
- tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel), tw->tw_priority,
- tw->tw_txhash);
+ READ_ONCE(tcptw->tw_ts_recent), tw->tw_bound_dev_if,
+ &key, tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel),
+ tw->tw_priority, tw->tw_txhash);
#ifdef CONFIG_TCP_AO
out:
@@ -1272,15 +1268,10 @@ static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
/* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
* sk->sk_state == TCP_SYN_RECV -> for Fast Open.
*/
- /* RFC 7323 2.3
- * The window field (SEG.WND) of every outgoing segment, with the
- * exception of <SYN> segments, MUST be right-shifted by
- * Rcv.Wind.Shift bits:
- */
tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
tcp_rsk(req)->rcv_nxt,
- req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
+ tcp_synack_window(req) >> inet_rsk(req)->rcv_wscale,
tcp_rsk_tsval(tcp_rsk(req)),
READ_ONCE(req->ts_recent), sk->sk_bound_dev_if,
&key, ipv6_get_dsfield(ipv6_hdr(skb)), 0,
@@ -1444,7 +1435,6 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
*/
newsk->sk_gso_type = SKB_GSO_TCPV6;
- ip6_dst_store(newsk, dst, NULL, NULL);
inet6_sk_rx_dst_set(newsk, skb);
inet_sk(newsk)->pinet6 = tcp_inet6_sk(newsk);
@@ -1455,6 +1445,8 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
memcpy(newnp, np, sizeof(struct ipv6_pinfo));
+ ip6_dst_store(newsk, dst, NULL, NULL);
+
newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
newnp->saddr = ireq->ir_v6_loc_addr;
newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
@@ -1682,7 +1674,7 @@ reset:
discard:
if (opt_skb)
__kfree_skb(opt_skb);
- kfree_skb_reason(skb, reason);
+ sk_skb_reason_drop(sk, skb, reason);
return 0;
csum_err:
reason = SKB_DROP_REASON_TCP_CSUM;
@@ -1755,8 +1747,8 @@ INDIRECT_CALLABLE_SCOPE int tcp_v6_rcv(struct sk_buff *skb)
int dif = inet6_iif(skb);
const struct tcphdr *th;
const struct ipv6hdr *hdr;
+ struct sock *sk = NULL;
bool refcounted;
- struct sock *sk;
int ret;
u32 isn;
struct net *net = dev_net(skb->dev);
@@ -1948,7 +1940,7 @@ bad_packet:
discard_it:
SKB_DR_OR(drop_reason, NOT_SPECIFIED);
- kfree_skb_reason(skb, drop_reason);
+ sk_skb_reason_drop(sk, skb, drop_reason);
return 0;
discard_and_relse:
@@ -2387,8 +2379,14 @@ static struct inet_protosw tcpv6_protosw = {
static int __net_init tcpv6_net_init(struct net *net)
{
- return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
- SOCK_RAW, IPPROTO_TCP, net);
+ int res;
+
+ res = inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
+ SOCK_RAW, IPPROTO_TCP, net);
+ if (!res)
+ net->ipv6.tcp_sk->sk_clockid = CLOCK_MONOTONIC;
+
+ return res;
}
static void __net_exit tcpv6_net_exit(struct net *net)
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index c81a07ac0463..6602a2e9cdb5 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -46,7 +46,6 @@
#include <net/tcp_states.h>
#include <net/ip6_checksum.h>
#include <net/ip6_tunnel.h>
-#include <trace/events/udp.h>
#include <net/xfrm.h>
#include <net/inet_hashtables.h>
#include <net/inet6_hashtables.h>
@@ -673,7 +672,7 @@ static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
}
UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
trace_udp_fail_queue_rcv_skb(rc, sk, skb);
- kfree_skb_reason(skb, drop_reason);
+ sk_skb_reason_drop(sk, skb, drop_reason);
return -1;
}
@@ -776,7 +775,7 @@ csum_error:
drop:
__UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
atomic_inc(&sk->sk_drops);
- kfree_skb_reason(skb, drop_reason);
+ sk_skb_reason_drop(sk, skb, drop_reason);
return -1;
}
@@ -940,8 +939,8 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
enum skb_drop_reason reason = SKB_DROP_REASON_NOT_SPECIFIED;
const struct in6_addr *saddr, *daddr;
struct net *net = dev_net(skb->dev);
+ struct sock *sk = NULL;
struct udphdr *uh;
- struct sock *sk;
bool refcounted;
u32 ulen = 0;
@@ -1033,7 +1032,7 @@ no_sk:
__UDP6_INC_STATS(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
- kfree_skb_reason(skb, reason);
+ sk_skb_reason_drop(sk, skb, reason);
return 0;
short_packet:
@@ -1054,7 +1053,7 @@ csum_error:
__UDP6_INC_STATS(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE);
discard:
__UDP6_INC_STATS(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
- kfree_skb_reason(skb, reason);
+ sk_skb_reason_drop(sk, skb, reason);
return 0;
}
@@ -1257,8 +1256,7 @@ static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6,
kfree_skb(skb);
return -EINVAL;
}
- if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite ||
- dst_xfrm(skb_dst(skb))) {
+ if (is_udplite || dst_xfrm(skb_dst(skb))) {
kfree_skb(skb);
return -EIO;
}
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index cc885d3aa9e5..b1d81c4270ab 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -56,12 +56,18 @@ static int xfrm6_get_saddr(struct net *net, int oif,
{
struct dst_entry *dst;
struct net_device *dev;
+ struct inet6_dev *idev;
dst = xfrm6_dst_lookup(net, 0, oif, NULL, daddr, mark);
if (IS_ERR(dst))
return -EHOSTUNREACH;
- dev = ip6_dst_idev(dst)->dev;
+ idev = ip6_dst_idev(dst);
+ if (!idev) {
+ dst_release(dst);
+ return -EHOSTUNREACH;
+ }
+ dev = idev->dev;
ipv6_dev_get_saddr(dev_net(dev), dev, &daddr->in6, 0, &saddr->in6);
dst_release(dst);
return 0;
@@ -284,8 +290,14 @@ int __init xfrm6_init(void)
ret = register_pernet_subsys(&xfrm6_net_ops);
if (ret)
goto out_protocol;
+
+ ret = xfrm_nat_keepalive_init(AF_INET6);
+ if (ret)
+ goto out_nat_keepalive;
out:
return ret;
+out_nat_keepalive:
+ unregister_pernet_subsys(&xfrm6_net_ops);
out_protocol:
xfrm6_protocol_fini();
out_state:
@@ -297,6 +309,7 @@ out_policy:
void xfrm6_fini(void)
{
+ xfrm_nat_keepalive_fini(AF_INET6);
unregister_pernet_subsys(&xfrm6_net_ops);
xfrm6_protocol_fini();
xfrm6_policy_fini();
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 88a34db265d8..1c1decce7f06 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -39,7 +39,6 @@
#include <linux/ip.h>
#include <linux/udp.h>
#include <linux/l2tp.h>
-#include <linux/hash.h>
#include <linux/sort.h>
#include <linux/file.h>
#include <linux/nsproxy.h>
@@ -61,7 +60,6 @@
#include <linux/atomic.h>
#include "l2tp_core.h"
-#include "trace.h"
#define CREATE_TRACE_POINTS
#include "trace.h"
@@ -107,11 +105,23 @@ struct l2tp_net {
/* Lock for write access to l2tp_tunnel_idr */
spinlock_t l2tp_tunnel_idr_lock;
struct idr l2tp_tunnel_idr;
- struct hlist_head l2tp_session_hlist[L2TP_HASH_SIZE_2];
- /* Lock for write access to l2tp_session_hlist */
- spinlock_t l2tp_session_hlist_lock;
+ /* Lock for write access to l2tp_v[23]_session_idr/htable */
+ spinlock_t l2tp_session_idr_lock;
+ struct idr l2tp_v2_session_idr;
+ struct idr l2tp_v3_session_idr;
+ struct hlist_head l2tp_v3_session_htable[16];
};
+static inline u32 l2tp_v2_session_key(u16 tunnel_id, u16 session_id)
+{
+ return ((u32)tunnel_id) << 16 | session_id;
+}
+
+static inline unsigned long l2tp_v3_session_hashkey(struct sock *sk, u32 session_id)
+{
+ return ((unsigned long)sk) + session_id;
+}
+
#if IS_ENABLED(CONFIG_IPV6)
static bool l2tp_sk_is_v6(struct sock *sk)
{
@@ -125,29 +135,6 @@ static inline struct l2tp_net *l2tp_pernet(const struct net *net)
return net_generic(net, l2tp_net_id);
}
-/* Session hash global list for L2TPv3.
- * The session_id SHOULD be random according to RFC3931, but several
- * L2TP implementations use incrementing session_ids. So we do a real
- * hash on the session_id, rather than a simple bitmask.
- */
-static inline struct hlist_head *
-l2tp_session_id_hash_2(struct l2tp_net *pn, u32 session_id)
-{
- return &pn->l2tp_session_hlist[hash_32(session_id, L2TP_HASH_BITS_2)];
-}
-
-/* Session hash list.
- * The session_id SHOULD be random according to RFC2661, but several
- * L2TP implementations (Cisco and Microsoft) use incrementing
- * session_ids. So we do a real hash on the session_id, rather than a
- * simple bitmask.
- */
-static inline struct hlist_head *
-l2tp_session_id_hash(struct l2tp_tunnel *tunnel, u32 session_id)
-{
- return &tunnel->session_hlist[hash_32(session_id, L2TP_HASH_BITS)];
-}
-
static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel)
{
trace_free_tunnel(tunnel);
@@ -240,66 +227,82 @@ struct l2tp_tunnel *l2tp_tunnel_get_nth(const struct net *net, int nth)
}
EXPORT_SYMBOL_GPL(l2tp_tunnel_get_nth);
-struct l2tp_session *l2tp_tunnel_get_session(struct l2tp_tunnel *tunnel,
- u32 session_id)
+struct l2tp_session *l2tp_v3_session_get(const struct net *net, struct sock *sk, u32 session_id)
{
- struct hlist_head *session_list;
+ const struct l2tp_net *pn = l2tp_pernet(net);
struct l2tp_session *session;
- session_list = l2tp_session_id_hash(tunnel, session_id);
-
rcu_read_lock_bh();
- hlist_for_each_entry_rcu(session, session_list, hlist)
- if (session->session_id == session_id) {
- l2tp_session_inc_refcount(session);
- rcu_read_unlock_bh();
+ session = idr_find(&pn->l2tp_v3_session_idr, session_id);
+ if (session && !hash_hashed(&session->hlist) &&
+ refcount_inc_not_zero(&session->ref_count)) {
+ rcu_read_unlock_bh();
+ return session;
+ }
- return session;
+ /* If we get here and session is non-NULL, the session_id
+ * collides with one in another tunnel. If sk is non-NULL,
+ * find the session matching sk.
+ */
+ if (session && sk) {
+ unsigned long key = l2tp_v3_session_hashkey(sk, session->session_id);
+
+ hash_for_each_possible_rcu(pn->l2tp_v3_session_htable, session,
+ hlist, key) {
+ if (session->tunnel->sock == sk &&
+ refcount_inc_not_zero(&session->ref_count)) {
+ rcu_read_unlock_bh();
+ return session;
+ }
}
+ }
rcu_read_unlock_bh();
return NULL;
}
-EXPORT_SYMBOL_GPL(l2tp_tunnel_get_session);
+EXPORT_SYMBOL_GPL(l2tp_v3_session_get);
-struct l2tp_session *l2tp_session_get(const struct net *net, u32 session_id)
+struct l2tp_session *l2tp_v2_session_get(const struct net *net, u16 tunnel_id, u16 session_id)
{
- struct hlist_head *session_list;
+ u32 session_key = l2tp_v2_session_key(tunnel_id, session_id);
+ const struct l2tp_net *pn = l2tp_pernet(net);
struct l2tp_session *session;
- session_list = l2tp_session_id_hash_2(l2tp_pernet(net), session_id);
-
rcu_read_lock_bh();
- hlist_for_each_entry_rcu(session, session_list, global_hlist)
- if (session->session_id == session_id) {
- l2tp_session_inc_refcount(session);
- rcu_read_unlock_bh();
-
- return session;
- }
+ session = idr_find(&pn->l2tp_v2_session_idr, session_key);
+ if (session && refcount_inc_not_zero(&session->ref_count)) {
+ rcu_read_unlock_bh();
+ return session;
+ }
rcu_read_unlock_bh();
return NULL;
}
+EXPORT_SYMBOL_GPL(l2tp_v2_session_get);
+
+struct l2tp_session *l2tp_session_get(const struct net *net, struct sock *sk, int pver,
+ u32 tunnel_id, u32 session_id)
+{
+ if (pver == L2TP_HDR_VER_2)
+ return l2tp_v2_session_get(net, tunnel_id, session_id);
+ else
+ return l2tp_v3_session_get(net, sk, session_id);
+}
EXPORT_SYMBOL_GPL(l2tp_session_get);
struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth)
{
- int hash;
struct l2tp_session *session;
int count = 0;
rcu_read_lock_bh();
- for (hash = 0; hash < L2TP_HASH_SIZE; hash++) {
- hlist_for_each_entry_rcu(session, &tunnel->session_hlist[hash], hlist) {
- if (++count > nth) {
- l2tp_session_inc_refcount(session);
- rcu_read_unlock_bh();
- return session;
- }
+ list_for_each_entry_rcu(session, &tunnel->session_list, list) {
+ if (++count > nth) {
+ l2tp_session_inc_refcount(session);
+ rcu_read_unlock_bh();
+ return session;
}
}
-
rcu_read_unlock_bh();
return NULL;
@@ -313,86 +316,188 @@ struct l2tp_session *l2tp_session_get_by_ifname(const struct net *net,
const char *ifname)
{
struct l2tp_net *pn = l2tp_pernet(net);
- int hash;
+ unsigned long tunnel_id, tmp;
struct l2tp_session *session;
+ struct l2tp_tunnel *tunnel;
rcu_read_lock_bh();
- for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++) {
- hlist_for_each_entry_rcu(session, &pn->l2tp_session_hlist[hash], global_hlist) {
- if (!strcmp(session->ifname, ifname)) {
- l2tp_session_inc_refcount(session);
- rcu_read_unlock_bh();
-
- return session;
+ idr_for_each_entry_ul(&pn->l2tp_tunnel_idr, tunnel, tmp, tunnel_id) {
+ if (tunnel) {
+ list_for_each_entry_rcu(session, &tunnel->session_list, list) {
+ if (!strcmp(session->ifname, ifname)) {
+ l2tp_session_inc_refcount(session);
+ rcu_read_unlock_bh();
+
+ return session;
+ }
}
}
}
-
rcu_read_unlock_bh();
return NULL;
}
EXPORT_SYMBOL_GPL(l2tp_session_get_by_ifname);
+static void l2tp_session_coll_list_add(struct l2tp_session_coll_list *clist,
+ struct l2tp_session *session)
+{
+ l2tp_session_inc_refcount(session);
+ WARN_ON_ONCE(session->coll_list);
+ session->coll_list = clist;
+ spin_lock(&clist->lock);
+ list_add(&session->clist, &clist->list);
+ spin_unlock(&clist->lock);
+}
+
+static int l2tp_session_collision_add(struct l2tp_net *pn,
+ struct l2tp_session *session1,
+ struct l2tp_session *session2)
+{
+ struct l2tp_session_coll_list *clist;
+
+ lockdep_assert_held(&pn->l2tp_session_idr_lock);
+
+ if (!session2)
+ return -EEXIST;
+
+ /* If existing session is in IP-encap tunnel, refuse new session */
+ if (session2->tunnel->encap == L2TP_ENCAPTYPE_IP)
+ return -EEXIST;
+
+ clist = session2->coll_list;
+ if (!clist) {
+ /* First collision. Allocate list to manage the collided sessions
+ * and add the existing session to the list.
+ */
+ clist = kmalloc(sizeof(*clist), GFP_ATOMIC);
+ if (!clist)
+ return -ENOMEM;
+
+ spin_lock_init(&clist->lock);
+ INIT_LIST_HEAD(&clist->list);
+ refcount_set(&clist->ref_count, 1);
+ l2tp_session_coll_list_add(clist, session2);
+ }
+
+ /* If existing session isn't already in the session hlist, add it. */
+ if (!hash_hashed(&session2->hlist))
+ hash_add(pn->l2tp_v3_session_htable, &session2->hlist,
+ session2->hlist_key);
+
+ /* Add new session to the hlist and collision list */
+ hash_add(pn->l2tp_v3_session_htable, &session1->hlist,
+ session1->hlist_key);
+ refcount_inc(&clist->ref_count);
+ l2tp_session_coll_list_add(clist, session1);
+
+ return 0;
+}
+
+static void l2tp_session_collision_del(struct l2tp_net *pn,
+ struct l2tp_session *session)
+{
+ struct l2tp_session_coll_list *clist = session->coll_list;
+ unsigned long session_key = session->session_id;
+ struct l2tp_session *session2;
+
+ lockdep_assert_held(&pn->l2tp_session_idr_lock);
+
+ hash_del(&session->hlist);
+
+ if (clist) {
+ /* Remove session from its collision list. If there
+ * are other sessions with the same ID, replace this
+ * session's IDR entry with that session, otherwise
+ * remove the IDR entry. If this is the last session,
+ * the collision list data is freed.
+ */
+ spin_lock(&clist->lock);
+ list_del_init(&session->clist);
+ session2 = list_first_entry_or_null(&clist->list, struct l2tp_session, clist);
+ if (session2) {
+ void *old = idr_replace(&pn->l2tp_v3_session_idr, session2, session_key);
+
+ WARN_ON_ONCE(IS_ERR_VALUE(old));
+ } else {
+ void *removed = idr_remove(&pn->l2tp_v3_session_idr, session_key);
+
+ WARN_ON_ONCE(removed != session);
+ }
+ session->coll_list = NULL;
+ spin_unlock(&clist->lock);
+ if (refcount_dec_and_test(&clist->ref_count))
+ kfree(clist);
+ l2tp_session_dec_refcount(session);
+ }
+}
+
int l2tp_session_register(struct l2tp_session *session,
struct l2tp_tunnel *tunnel)
{
- struct l2tp_session *session_walk;
- struct hlist_head *g_head;
- struct hlist_head *head;
- struct l2tp_net *pn;
+ struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
+ struct l2tp_session *other_session = NULL;
+ u32 session_key;
int err;
- head = l2tp_session_id_hash(tunnel, session->session_id);
-
- spin_lock_bh(&tunnel->hlist_lock);
+ spin_lock_bh(&tunnel->list_lock);
if (!tunnel->acpt_newsess) {
err = -ENODEV;
goto err_tlock;
}
- hlist_for_each_entry(session_walk, head, hlist)
- if (session_walk->session_id == session->session_id) {
- err = -EEXIST;
- goto err_tlock;
- }
-
if (tunnel->version == L2TP_HDR_VER_3) {
- pn = l2tp_pernet(tunnel->l2tp_net);
- g_head = l2tp_session_id_hash_2(pn, session->session_id);
-
- spin_lock_bh(&pn->l2tp_session_hlist_lock);
-
+ session_key = session->session_id;
+ spin_lock_bh(&pn->l2tp_session_idr_lock);
+ err = idr_alloc_u32(&pn->l2tp_v3_session_idr, NULL,
+ &session_key, session_key, GFP_ATOMIC);
/* IP encap expects session IDs to be globally unique, while
- * UDP encap doesn't.
+ * UDP encap doesn't. This isn't per the RFC, which says that
+ * sessions are identified only by the session ID, but is to
+ * support existing userspace which depends on it.
*/
- hlist_for_each_entry(session_walk, g_head, global_hlist)
- if (session_walk->session_id == session->session_id &&
- (session_walk->tunnel->encap == L2TP_ENCAPTYPE_IP ||
- tunnel->encap == L2TP_ENCAPTYPE_IP)) {
- err = -EEXIST;
- goto err_tlock_pnlock;
- }
+ if (err == -ENOSPC && tunnel->encap == L2TP_ENCAPTYPE_UDP) {
+ other_session = idr_find(&pn->l2tp_v3_session_idr,
+ session_key);
+ err = l2tp_session_collision_add(pn, session,
+ other_session);
+ }
+ spin_unlock_bh(&pn->l2tp_session_idr_lock);
+ } else {
+ session_key = l2tp_v2_session_key(tunnel->tunnel_id,
+ session->session_id);
+ spin_lock_bh(&pn->l2tp_session_idr_lock);
+ err = idr_alloc_u32(&pn->l2tp_v2_session_idr, NULL,
+ &session_key, session_key, GFP_ATOMIC);
+ spin_unlock_bh(&pn->l2tp_session_idr_lock);
+ }
- l2tp_tunnel_inc_refcount(tunnel);
- hlist_add_head_rcu(&session->global_hlist, g_head);
+ if (err) {
+ if (err == -ENOSPC)
+ err = -EEXIST;
+ goto err_tlock;
+ }
+
+ l2tp_tunnel_inc_refcount(tunnel);
- spin_unlock_bh(&pn->l2tp_session_hlist_lock);
+ list_add(&session->list, &tunnel->session_list);
+ spin_unlock_bh(&tunnel->list_lock);
+
+ spin_lock_bh(&pn->l2tp_session_idr_lock);
+ if (tunnel->version == L2TP_HDR_VER_3) {
+ if (!other_session)
+ idr_replace(&pn->l2tp_v3_session_idr, session, session_key);
} else {
- l2tp_tunnel_inc_refcount(tunnel);
+ idr_replace(&pn->l2tp_v2_session_idr, session, session_key);
}
-
- hlist_add_head_rcu(&session->hlist, head);
- spin_unlock_bh(&tunnel->hlist_lock);
+ spin_unlock_bh(&pn->l2tp_session_idr_lock);
trace_register_session(session);
return 0;
-err_tlock_pnlock:
- spin_unlock_bh(&pn->l2tp_session_hlist_lock);
err_tlock:
- spin_unlock_bh(&tunnel->hlist_lock);
+ spin_unlock_bh(&tunnel->list_lock);
return err;
}
@@ -785,19 +890,14 @@ static void l2tp_session_queue_purge(struct l2tp_session *session)
}
}
-/* Internal UDP receive frame. Do the real work of receiving an L2TP data frame
- * here. The skb is not on a list when we get here.
- * Returns 0 if the packet was a data packet and was successfully passed on.
- * Returns 1 if the packet was not a good data packet and could not be
- * forwarded. All such packets are passed up to userspace to deal with.
- */
-static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb)
+/* UDP encapsulation receive handler. See net/ipv4/udp.c for details. */
+int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
{
struct l2tp_session *session = NULL;
- struct l2tp_tunnel *orig_tunnel = tunnel;
+ struct l2tp_tunnel *tunnel = NULL;
+ struct net *net = sock_net(sk);
unsigned char *ptr, *optr;
u16 hdrflags;
- u32 tunnel_id, session_id;
u16 version;
int length;
@@ -807,11 +907,8 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb)
__skb_pull(skb, sizeof(struct udphdr));
/* Short packet? */
- if (!pskb_may_pull(skb, L2TP_HDR_SIZE_MAX)) {
- pr_debug_ratelimited("%s: recv short packet (len=%d)\n",
- tunnel->name, skb->len);
- goto invalid;
- }
+ if (!pskb_may_pull(skb, L2TP_HDR_SIZE_MAX))
+ goto pass;
/* Point to L2TP header */
optr = skb->data;
@@ -834,6 +931,8 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb)
ptr += 2;
if (version == L2TP_HDR_VER_2) {
+ u16 tunnel_id, session_id;
+
/* If length is present, skip it */
if (hdrflags & L2TP_HDRFLAG_L)
ptr += 2;
@@ -841,49 +940,35 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb)
/* Extract tunnel and session ID */
tunnel_id = ntohs(*(__be16 *)ptr);
ptr += 2;
-
- if (tunnel_id != tunnel->tunnel_id) {
- /* We are receiving trafic for another tunnel, probably
- * because we have several tunnels between the same
- * IP/port quadruple, look it up.
- */
- struct l2tp_tunnel *alt_tunnel;
-
- alt_tunnel = l2tp_tunnel_get(tunnel->l2tp_net, tunnel_id);
- if (!alt_tunnel)
- goto pass;
- tunnel = alt_tunnel;
- }
-
session_id = ntohs(*(__be16 *)ptr);
ptr += 2;
+
+ session = l2tp_v2_session_get(net, tunnel_id, session_id);
} else {
+ u32 session_id;
+
ptr += 2; /* skip reserved bits */
- tunnel_id = tunnel->tunnel_id;
session_id = ntohl(*(__be32 *)ptr);
ptr += 4;
- }
- /* Check protocol version */
- if (version != tunnel->version) {
- pr_debug_ratelimited("%s: recv protocol version mismatch: got %d expected %d\n",
- tunnel->name, version, tunnel->version);
- goto invalid;
+ session = l2tp_v3_session_get(net, sk, session_id);
}
- /* Find the session context */
- session = l2tp_tunnel_get_session(tunnel, session_id);
if (!session || !session->recv_skb) {
if (session)
l2tp_session_dec_refcount(session);
/* Not found? Pass to userspace to deal with */
- pr_debug_ratelimited("%s: no session found (%u/%u). Passing up.\n",
- tunnel->name, tunnel_id, session_id);
goto pass;
}
- if (tunnel->version == L2TP_HDR_VER_3 &&
+ tunnel = session->tunnel;
+
+ /* Check protocol version */
+ if (version != tunnel->version)
+ goto invalid;
+
+ if (version == L2TP_HDR_VER_3 &&
l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr)) {
l2tp_session_dec_refcount(session);
goto invalid;
@@ -892,9 +977,6 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb)
l2tp_recv_common(session, skb, ptr, optr, hdrflags, length);
l2tp_session_dec_refcount(session);
- if (tunnel != orig_tunnel)
- l2tp_tunnel_dec_refcount(tunnel);
-
return 0;
invalid:
@@ -904,51 +986,14 @@ pass:
/* Put UDP header back */
__skb_push(skb, sizeof(struct udphdr));
- if (tunnel != orig_tunnel)
- l2tp_tunnel_dec_refcount(tunnel);
-
- return 1;
-}
-
-/* UDP encapsulation receive and error receive handlers.
- * See net/ipv4/udp.c for details.
- *
- * Note that these functions are called from inside an
- * RCU-protected region, but without the socket being locked.
- *
- * Hence we use rcu_dereference_sk_user_data to access the
- * tunnel data structure rather the usual l2tp_sk_to_tunnel
- * accessor function.
- */
-int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
-{
- struct l2tp_tunnel *tunnel;
-
- tunnel = rcu_dereference_sk_user_data(sk);
- if (!tunnel)
- goto pass_up;
- if (WARN_ON(tunnel->magic != L2TP_TUNNEL_MAGIC))
- goto pass_up;
-
- if (l2tp_udp_recv_core(tunnel, skb))
- goto pass_up;
-
- return 0;
-
-pass_up:
return 1;
}
EXPORT_SYMBOL_GPL(l2tp_udp_encap_recv);
+/* UDP encapsulation receive error handler. See net/ipv4/udp.c for details. */
static void l2tp_udp_encap_err_recv(struct sock *sk, struct sk_buff *skb, int err,
__be16 port, u32 info, u8 *payload)
{
- struct l2tp_tunnel *tunnel;
-
- tunnel = rcu_dereference_sk_user_data(sk);
- if (!tunnel || tunnel->fd < 0)
- return;
-
sk->sk_err = err;
sk_error_report(sk);
@@ -1206,26 +1251,36 @@ end:
return;
}
-/* Remove an l2tp session from l2tp_core's hash lists. */
+/* Remove an l2tp session from l2tp_core's lists. */
static void l2tp_session_unhash(struct l2tp_session *session)
{
struct l2tp_tunnel *tunnel = session->tunnel;
- /* Remove the session from core hashes */
if (tunnel) {
- /* Remove from the per-tunnel hash */
- spin_lock_bh(&tunnel->hlist_lock);
- hlist_del_init_rcu(&session->hlist);
- spin_unlock_bh(&tunnel->hlist_lock);
-
- /* For L2TPv3 we have a per-net hash: remove from there, too */
- if (tunnel->version != L2TP_HDR_VER_2) {
- struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
-
- spin_lock_bh(&pn->l2tp_session_hlist_lock);
- hlist_del_init_rcu(&session->global_hlist);
- spin_unlock_bh(&pn->l2tp_session_hlist_lock);
+ struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
+ struct l2tp_session *removed = session;
+
+ /* Remove from the per-tunnel list */
+ spin_lock_bh(&tunnel->list_lock);
+ list_del_init(&session->list);
+ spin_unlock_bh(&tunnel->list_lock);
+
+ /* Remove from per-net IDR */
+ spin_lock_bh(&pn->l2tp_session_idr_lock);
+ if (tunnel->version == L2TP_HDR_VER_3) {
+ if (hash_hashed(&session->hlist))
+ l2tp_session_collision_del(pn, session);
+ else
+ removed = idr_remove(&pn->l2tp_v3_session_idr,
+ session->session_id);
+ } else {
+ u32 session_key = l2tp_v2_session_key(tunnel->tunnel_id,
+ session->session_id);
+ removed = idr_remove(&pn->l2tp_v2_session_idr,
+ session_key);
}
+ WARN_ON_ONCE(removed && removed != session);
+ spin_unlock_bh(&pn->l2tp_session_idr_lock);
synchronize_rcu();
}
@@ -1236,28 +1291,22 @@ static void l2tp_session_unhash(struct l2tp_session *session)
static void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel)
{
struct l2tp_session *session;
- int hash;
- spin_lock_bh(&tunnel->hlist_lock);
+ spin_lock_bh(&tunnel->list_lock);
tunnel->acpt_newsess = false;
- for (hash = 0; hash < L2TP_HASH_SIZE; hash++) {
-again:
- hlist_for_each_entry_rcu(session, &tunnel->session_hlist[hash], hlist) {
- hlist_del_init_rcu(&session->hlist);
-
- spin_unlock_bh(&tunnel->hlist_lock);
- l2tp_session_delete(session);
- spin_lock_bh(&tunnel->hlist_lock);
-
- /* Now restart from the beginning of this hash
- * chain. We always remove a session from the
- * list so we are guaranteed to make forward
- * progress.
- */
- goto again;
- }
+ for (;;) {
+ session = list_first_entry_or_null(&tunnel->session_list,
+ struct l2tp_session, list);
+ if (!session)
+ break;
+ l2tp_session_inc_refcount(session);
+ list_del_init(&session->list);
+ spin_unlock_bh(&tunnel->list_lock);
+ l2tp_session_delete(session);
+ spin_lock_bh(&tunnel->list_lock);
+ l2tp_session_dec_refcount(session);
}
- spin_unlock_bh(&tunnel->hlist_lock);
+ spin_unlock_bh(&tunnel->list_lock);
}
/* Tunnel socket destroy hook for UDP encapsulation */
@@ -1451,8 +1500,9 @@ int l2tp_tunnel_create(int fd, int version, u32 tunnel_id, u32 peer_tunnel_id,
tunnel->magic = L2TP_TUNNEL_MAGIC;
sprintf(&tunnel->name[0], "tunl %u", tunnel_id);
- spin_lock_init(&tunnel->hlist_lock);
+ spin_lock_init(&tunnel->list_lock);
tunnel->acpt_newsess = true;
+ INIT_LIST_HEAD(&tunnel->session_list);
tunnel->encap = encap;
@@ -1462,8 +1512,6 @@ int l2tp_tunnel_create(int fd, int version, u32 tunnel_id, u32 peer_tunnel_id,
/* Init delete workqueue struct */
INIT_WORK(&tunnel->del_work, l2tp_tunnel_del_work);
- INIT_LIST_HEAD(&tunnel->list);
-
err = 0;
err:
if (tunnelp)
@@ -1651,8 +1699,10 @@ struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunn
skb_queue_head_init(&session->reorder_q);
+ session->hlist_key = l2tp_v3_session_hashkey(tunnel->sock, session->session_id);
INIT_HLIST_NODE(&session->hlist);
- INIT_HLIST_NODE(&session->global_hlist);
+ INIT_LIST_HEAD(&session->clist);
+ INIT_LIST_HEAD(&session->list);
if (cfg) {
session->pwtype = cfg->pw_type;
@@ -1685,15 +1735,13 @@ EXPORT_SYMBOL_GPL(l2tp_session_create);
static __net_init int l2tp_init_net(struct net *net)
{
struct l2tp_net *pn = net_generic(net, l2tp_net_id);
- int hash;
idr_init(&pn->l2tp_tunnel_idr);
spin_lock_init(&pn->l2tp_tunnel_idr_lock);
- for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++)
- INIT_HLIST_HEAD(&pn->l2tp_session_hlist[hash]);
-
- spin_lock_init(&pn->l2tp_session_hlist_lock);
+ idr_init(&pn->l2tp_v2_session_idr);
+ idr_init(&pn->l2tp_v3_session_idr);
+ spin_lock_init(&pn->l2tp_session_idr_lock);
return 0;
}
@@ -1703,7 +1751,6 @@ static __net_exit void l2tp_exit_net(struct net *net)
struct l2tp_net *pn = l2tp_pernet(net);
struct l2tp_tunnel *tunnel = NULL;
unsigned long tunnel_id, tmp;
- int hash;
rcu_read_lock_bh();
idr_for_each_entry_ul(&pn->l2tp_tunnel_idr, tunnel, tmp, tunnel_id) {
@@ -1716,8 +1763,8 @@ static __net_exit void l2tp_exit_net(struct net *net)
flush_workqueue(l2tp_wq);
rcu_barrier();
- for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++)
- WARN_ON_ONCE(!hlist_empty(&pn->l2tp_session_hlist[hash]));
+ idr_destroy(&pn->l2tp_v2_session_idr);
+ idr_destroy(&pn->l2tp_v3_session_idr);
idr_destroy(&pn->l2tp_tunnel_idr);
}
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
index 91ebf0a3f499..8ac81bc1bc6f 100644
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -19,14 +19,6 @@
#define L2TP_TUNNEL_MAGIC 0x42114DDA
#define L2TP_SESSION_MAGIC 0x0C04EB7D
-/* Per tunnel session hash table size */
-#define L2TP_HASH_BITS 4
-#define L2TP_HASH_SIZE BIT(L2TP_HASH_BITS)
-
-/* System-wide session hash table size */
-#define L2TP_HASH_BITS_2 8
-#define L2TP_HASH_SIZE_2 BIT(L2TP_HASH_BITS_2)
-
struct sk_buff;
struct l2tp_stats {
@@ -61,10 +53,15 @@ struct l2tp_session_cfg {
char *ifname;
};
+struct l2tp_session_coll_list {
+ spinlock_t lock; /* for access to list */
+ struct list_head list;
+ refcount_t ref_count;
+};
+
/* Represents a session (pseudowire) instance.
* Tracks runtime state including cookies, dataplane packet sequencing, and IO statistics.
- * Is linked into a per-tunnel session hashlist; and in the case of an L2TPv3 session into
- * an additional per-net ("global") hashlist.
+ * Is linked into a per-tunnel session list and a per-net ("global") IDR tree.
*/
#define L2TP_SESSION_NAME_MAX 32
struct l2tp_session {
@@ -88,8 +85,12 @@ struct l2tp_session {
u32 nr_oos; /* NR of last OOS packet */
int nr_oos_count; /* for OOS recovery */
int nr_oos_count_max;
- struct hlist_node hlist; /* hash list node */
+ struct list_head list; /* per-tunnel list node */
refcount_t ref_count;
+ struct hlist_node hlist; /* per-net session hlist */
+ unsigned long hlist_key; /* key for session hlist */
+ struct l2tp_session_coll_list *coll_list; /* session collision list */
+ struct list_head clist; /* for coll_list */
char name[L2TP_SESSION_NAME_MAX]; /* for logging */
char ifname[IFNAMSIZ];
@@ -102,7 +103,6 @@ struct l2tp_session {
int reorder_skip; /* set if skip to next nr */
enum l2tp_pwtype pwtype;
struct l2tp_stats stats;
- struct hlist_node global_hlist; /* global hash list node */
/* Session receive handler for data packets.
* Each pseudowire implementation should implement this callback in order to
@@ -114,7 +114,7 @@ struct l2tp_session {
/* Session close handler.
* Each pseudowire implementation may implement this callback in order to carry
* out pseudowire-specific shutdown actions.
- * The callback is called by core after unhashing the session and purging its
+ * The callback is called by core after unlisting the session and purging its
* reorder queue.
*/
void (*session_close)(struct l2tp_session *session);
@@ -150,7 +150,7 @@ struct l2tp_tunnel_cfg {
/* Represents a tunnel instance.
* Tracks runtime state including IO statistics.
* Holds the tunnel socket (either passed from userspace or directly created by the kernel).
- * Maintains a hashlist of sessions belonging to the tunnel instance.
+ * Maintains a list of sessions belonging to the tunnel instance.
* Is linked into a per-net list of tunnels.
*/
#define L2TP_TUNNEL_NAME_MAX 20
@@ -160,12 +160,11 @@ struct l2tp_tunnel {
unsigned long dead;
struct rcu_head rcu;
- spinlock_t hlist_lock; /* write-protection for session_hlist */
+ spinlock_t list_lock; /* write-protection for session_list */
bool acpt_newsess; /* indicates whether this tunnel accepts
- * new sessions. Protected by hlist_lock.
+ * new sessions. Protected by list_lock.
*/
- struct hlist_head session_hlist[L2TP_HASH_SIZE];
- /* hashed list of sessions, hashed by id */
+ struct list_head session_list; /* list of sessions */
u32 tunnel_id;
u32 peer_tunnel_id;
int version; /* 2=>L2TPv2, 3=>L2TPv3 */
@@ -174,7 +173,6 @@ struct l2tp_tunnel {
enum l2tp_encap_type encap;
struct l2tp_stats stats;
- struct list_head list; /* list node on per-namespace list of tunnels */
struct net *l2tp_net; /* the net we belong to */
refcount_t ref_count;
@@ -224,10 +222,11 @@ void l2tp_session_dec_refcount(struct l2tp_session *session);
*/
struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id);
struct l2tp_tunnel *l2tp_tunnel_get_nth(const struct net *net, int nth);
-struct l2tp_session *l2tp_tunnel_get_session(struct l2tp_tunnel *tunnel,
- u32 session_id);
-struct l2tp_session *l2tp_session_get(const struct net *net, u32 session_id);
+struct l2tp_session *l2tp_v3_session_get(const struct net *net, struct sock *sk, u32 session_id);
+struct l2tp_session *l2tp_v2_session_get(const struct net *net, u16 tunnel_id, u16 session_id);
+struct l2tp_session *l2tp_session_get(const struct net *net, struct sock *sk, int pver,
+ u32 tunnel_id, u32 session_id);
struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth);
struct l2tp_session *l2tp_session_get_by_ifname(const struct net *net,
const char *ifname);
diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c
index 4595b56d175d..8755ae521154 100644
--- a/net/l2tp/l2tp_debugfs.c
+++ b/net/l2tp/l2tp_debugfs.c
@@ -123,17 +123,14 @@ static void l2tp_dfs_seq_tunnel_show(struct seq_file *m, void *v)
struct l2tp_tunnel *tunnel = v;
struct l2tp_session *session;
int session_count = 0;
- int hash;
rcu_read_lock_bh();
- for (hash = 0; hash < L2TP_HASH_SIZE; hash++) {
- hlist_for_each_entry_rcu(session, &tunnel->session_hlist[hash], hlist) {
- /* Session ID of zero is a dummy/reserved value used by pppol2tp */
- if (session->session_id == 0)
- continue;
+ list_for_each_entry_rcu(session, &tunnel->session_list, list) {
+ /* Session ID of zero is a dummy/reserved value used by pppol2tp */
+ if (session->session_id == 0)
+ continue;
- session_count++;
- }
+ session_count++;
}
rcu_read_unlock_bh();
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index 19c8cc5289d5..e48aa177d74c 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -140,7 +140,7 @@ static int l2tp_ip_recv(struct sk_buff *skb)
}
/* Ok, this is a data packet. Lookup the session. */
- session = l2tp_session_get(net, session_id);
+ session = l2tp_v3_session_get(net, NULL, session_id);
if (!session)
goto discard;
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index 8780ec64f376..d217ff1f229e 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -150,7 +150,7 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
}
/* Ok, this is a data packet. Lookup the session. */
- session = l2tp_session_get(net, session_id);
+ session = l2tp_v3_session_get(net, NULL, session_id);
if (!session)
goto discard;
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
index a901fd14fe3b..d105030520f9 100644
--- a/net/l2tp/l2tp_netlink.c
+++ b/net/l2tp/l2tp_netlink.c
@@ -61,7 +61,8 @@ static struct l2tp_session *l2tp_nl_session_get(struct genl_info *info)
session_id = nla_get_u32(info->attrs[L2TP_ATTR_SESSION_ID]);
tunnel = l2tp_tunnel_get(net, tunnel_id);
if (tunnel) {
- session = l2tp_tunnel_get_session(tunnel, session_id);
+ session = l2tp_session_get(net, tunnel->sock, tunnel->version,
+ tunnel_id, session_id);
l2tp_tunnel_dec_refcount(tunnel);
}
}
@@ -635,7 +636,8 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf
&cfg);
if (ret >= 0) {
- session = l2tp_tunnel_get_session(tunnel, session_id);
+ session = l2tp_session_get(net, tunnel->sock, tunnel->version,
+ tunnel_id, session_id);
if (session) {
ret = l2tp_session_notify(&l2tp_nl_family, info, session,
L2TP_CMD_SESSION_CREATE);
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index 6146e4e67bbb..3596290047b2 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -753,7 +753,8 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
if (tunnel->peer_tunnel_id == 0)
tunnel->peer_tunnel_id = info.peer_tunnel_id;
- session = l2tp_tunnel_get_session(tunnel, info.session_id);
+ session = l2tp_session_get(sock_net(sk), tunnel->sock, tunnel->version,
+ info.tunnel_id, info.session_id);
if (session) {
drop_refcnt = true;
@@ -1045,7 +1046,8 @@ static int pppol2tp_tunnel_copy_stats(struct pppol2tp_ioc_stats *stats,
/* If session_id is set, search the corresponding session in the
* context of this tunnel and record the session's statistics.
*/
- session = l2tp_tunnel_get_session(tunnel, stats->session_id);
+ session = l2tp_session_get(tunnel->l2tp_net, tunnel->sock, tunnel->version,
+ tunnel->tunnel_id, stats->session_id);
if (!session)
return -EBADR;
diff --git a/net/llc/llc_c_st.c b/net/llc/llc_c_st.c
index 2467573b5f84..1c267db304df 100644
--- a/net/llc/llc_c_st.c
+++ b/net/llc/llc_c_st.c
@@ -42,7 +42,7 @@ static const llc_conn_action_t llc_common_actions_1[] = {
[5] = NULL,
};
-static struct llc_conn_state_trans llc_common_state_trans_1 = {
+static const struct llc_conn_state_trans llc_common_state_trans_1 = {
.ev = llc_conn_ev_disc_req,
.next_state = LLC_CONN_STATE_D_CONN,
.ev_qualifiers = NONE,
@@ -59,7 +59,7 @@ static const llc_conn_action_t llc_common_actions_2[] = {
[5] = NULL,
};
-static struct llc_conn_state_trans llc_common_state_trans_2 = {
+static const struct llc_conn_state_trans llc_common_state_trans_2 = {
.ev = llc_conn_ev_rst_req,
.next_state = LLC_CONN_STATE_RESET,
.ev_qualifiers = NONE,
@@ -79,7 +79,7 @@ static const llc_conn_action_t llc_common_actions_3[] = {
[8] = NULL,
};
-static struct llc_conn_state_trans llc_common_state_trans_3 = {
+static const struct llc_conn_state_trans llc_common_state_trans_3 = {
.ev = llc_conn_ev_rx_sabme_cmd_pbit_set_x,
.next_state = LLC_CONN_STATE_NORMAL,
.ev_qualifiers = NONE,
@@ -95,7 +95,7 @@ static const llc_conn_action_t llc_common_actions_4[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_common_state_trans_4 = {
+static const struct llc_conn_state_trans llc_common_state_trans_4 = {
.ev = llc_conn_ev_rx_disc_cmd_pbit_set_x,
.next_state = LLC_CONN_STATE_ADM,
.ev_qualifiers = NONE,
@@ -114,7 +114,7 @@ static const llc_conn_action_t llc_common_actions_5[] = {
[7] = NULL,
};
-static struct llc_conn_state_trans llc_common_state_trans_5 = {
+static const struct llc_conn_state_trans llc_common_state_trans_5 = {
.ev = llc_conn_ev_rx_frmr_rsp_fbit_set_x,
.next_state = LLC_CONN_STATE_RESET,
.ev_qualifiers = NONE,
@@ -129,7 +129,7 @@ static const llc_conn_action_t llc_common_actions_6[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_common_state_trans_6 = {
+static const struct llc_conn_state_trans llc_common_state_trans_6 = {
.ev = llc_conn_ev_rx_dm_rsp_fbit_set_x,
.next_state = LLC_CONN_STATE_ADM,
.ev_qualifiers = NONE,
@@ -145,7 +145,7 @@ static const llc_conn_action_t llc_common_actions_7a[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_common_state_trans_7a = {
+static const struct llc_conn_state_trans llc_common_state_trans_7a = {
.ev = llc_conn_ev_rx_zzz_cmd_pbit_set_x_inval_nr,
.next_state = LLC_CONN_STATE_ERROR,
.ev_qualifiers = NONE,
@@ -161,7 +161,7 @@ static const llc_conn_action_t llc_common_actions_7b[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_common_state_trans_7b = {
+static const struct llc_conn_state_trans llc_common_state_trans_7b = {
.ev = llc_conn_ev_rx_i_cmd_pbit_set_x_inval_ns,
.next_state = LLC_CONN_STATE_ERROR,
.ev_qualifiers = NONE,
@@ -177,7 +177,7 @@ static const llc_conn_action_t llc_common_actions_8a[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_common_state_trans_8a = {
+static const struct llc_conn_state_trans llc_common_state_trans_8a = {
.ev = llc_conn_ev_rx_zzz_rsp_fbit_set_x_inval_nr,
.next_state = LLC_CONN_STATE_ERROR,
.ev_qualifiers = NONE,
@@ -193,7 +193,7 @@ static const llc_conn_action_t llc_common_actions_8b[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_common_state_trans_8b = {
+static const struct llc_conn_state_trans llc_common_state_trans_8b = {
.ev = llc_conn_ev_rx_i_rsp_fbit_set_x_inval_ns,
.next_state = LLC_CONN_STATE_ERROR,
.ev_qualifiers = NONE,
@@ -209,7 +209,7 @@ static const llc_conn_action_t llc_common_actions_8c[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_common_state_trans_8c = {
+static const struct llc_conn_state_trans llc_common_state_trans_8c = {
.ev = llc_conn_ev_rx_bad_pdu,
.next_state = LLC_CONN_STATE_ERROR,
.ev_qualifiers = NONE,
@@ -225,7 +225,7 @@ static const llc_conn_action_t llc_common_actions_9[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_common_state_trans_9 = {
+static const struct llc_conn_state_trans llc_common_state_trans_9 = {
.ev = llc_conn_ev_rx_ua_rsp_fbit_set_x,
.next_state = LLC_CONN_STATE_ERROR,
.ev_qualifiers = NONE,
@@ -247,7 +247,7 @@ static const llc_conn_action_t llc_common_actions_10[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_common_state_trans_10 = {
+static const struct llc_conn_state_trans llc_common_state_trans_10 = {
.ev = llc_conn_ev_rx_xxx_rsp_fbit_set_1,
.next_state = LLC_CONN_STATE_ERROR,
.ev_qualifiers = llc_common_ev_qfyrs_10,
@@ -270,7 +270,7 @@ static const llc_conn_action_t llc_common_actions_11a[] = {
[5] = NULL,
};
-static struct llc_conn_state_trans llc_common_state_trans_11a = {
+static const struct llc_conn_state_trans llc_common_state_trans_11a = {
.ev = llc_conn_ev_p_tmr_exp,
.next_state = LLC_CONN_STATE_RESET,
.ev_qualifiers = llc_common_ev_qfyrs_11a,
@@ -292,7 +292,7 @@ static const llc_conn_action_t llc_common_actions_11b[] = {
[5] = NULL,
};
-static struct llc_conn_state_trans llc_common_state_trans_11b = {
+static const struct llc_conn_state_trans llc_common_state_trans_11b = {
.ev = llc_conn_ev_ack_tmr_exp,
.next_state = LLC_CONN_STATE_RESET,
.ev_qualifiers = llc_common_ev_qfyrs_11b,
@@ -314,7 +314,7 @@ static const llc_conn_action_t llc_common_actions_11c[] = {
[5] = NULL,
};
-static struct llc_conn_state_trans llc_common_state_trans_11c = {
+static const struct llc_conn_state_trans llc_common_state_trans_11c = {
.ev = llc_conn_ev_rej_tmr_exp,
.next_state = LLC_CONN_STATE_RESET,
.ev_qualifiers = llc_common_ev_qfyrs_11c,
@@ -336,7 +336,7 @@ static const llc_conn_action_t llc_common_actions_11d[] = {
[5] = NULL,
};
-static struct llc_conn_state_trans llc_common_state_trans_11d = {
+static const struct llc_conn_state_trans llc_common_state_trans_11d = {
.ev = llc_conn_ev_busy_tmr_exp,
.next_state = LLC_CONN_STATE_RESET,
.ev_qualifiers = llc_common_ev_qfyrs_11d,
@@ -347,7 +347,7 @@ static struct llc_conn_state_trans llc_common_state_trans_11d = {
* Common dummy state transition; must be last entry for all state
* transition groups - it'll be on .bss, so will be zeroed.
*/
-static struct llc_conn_state_trans llc_common_state_trans_end;
+static const struct llc_conn_state_trans llc_common_state_trans_end;
/* LLC_CONN_STATE_ADM transitions */
/* State transitions for LLC_CONN_EV_CONN_REQ event */
@@ -359,7 +359,7 @@ static const llc_conn_action_t llc_adm_actions_1[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_adm_state_trans_1 = {
+static const struct llc_conn_state_trans llc_adm_state_trans_1 = {
.ev = llc_conn_ev_conn_req,
.next_state = LLC_CONN_STATE_SETUP,
.ev_qualifiers = NONE,
@@ -378,7 +378,7 @@ static const llc_conn_action_t llc_adm_actions_2[] = {
[7] = NULL,
};
-static struct llc_conn_state_trans llc_adm_state_trans_2 = {
+static const struct llc_conn_state_trans llc_adm_state_trans_2 = {
.ev = llc_conn_ev_rx_sabme_cmd_pbit_set_x,
.next_state = LLC_CONN_STATE_NORMAL,
.ev_qualifiers = NONE,
@@ -392,7 +392,7 @@ static const llc_conn_action_t llc_adm_actions_3[] = {
[2] = NULL,
};
-static struct llc_conn_state_trans llc_adm_state_trans_3 = {
+static const struct llc_conn_state_trans llc_adm_state_trans_3 = {
.ev = llc_conn_ev_rx_disc_cmd_pbit_set_x,
.next_state = LLC_CONN_STATE_ADM,
.ev_qualifiers = NONE,
@@ -406,7 +406,7 @@ static const llc_conn_action_t llc_adm_actions_4[] = {
[2] = NULL,
};
-static struct llc_conn_state_trans llc_adm_state_trans_4 = {
+static const struct llc_conn_state_trans llc_adm_state_trans_4 = {
.ev = llc_conn_ev_rx_xxx_cmd_pbit_set_1,
.next_state = LLC_CONN_STATE_ADM,
.ev_qualifiers = NONE,
@@ -419,7 +419,7 @@ static const llc_conn_action_t llc_adm_actions_5[] = {
[1] = NULL,
};
-static struct llc_conn_state_trans llc_adm_state_trans_5 = {
+static const struct llc_conn_state_trans llc_adm_state_trans_5 = {
.ev = llc_conn_ev_rx_any_frame,
.next_state = LLC_CONN_OUT_OF_SVC,
.ev_qualifiers = NONE,
@@ -430,7 +430,7 @@ static struct llc_conn_state_trans llc_adm_state_trans_5 = {
* Array of pointers;
* one to each transition
*/
-static struct llc_conn_state_trans *llc_adm_state_transitions[] = {
+static const struct llc_conn_state_trans *llc_adm_state_transitions[] = {
[0] = &llc_adm_state_trans_1, /* Request */
[1] = &llc_common_state_trans_end,
[2] = &llc_common_state_trans_end, /* local_busy */
@@ -453,7 +453,7 @@ static const llc_conn_action_t llc_setup_actions_1[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_setup_state_trans_1 = {
+static const struct llc_conn_state_trans llc_setup_state_trans_1 = {
.ev = llc_conn_ev_rx_sabme_cmd_pbit_set_x,
.next_state = LLC_CONN_STATE_SETUP,
.ev_qualifiers = NONE,
@@ -477,7 +477,7 @@ static const llc_conn_action_t llc_setup_actions_2[] = {
[6] = NULL,
};
-static struct llc_conn_state_trans llc_setup_state_trans_2 = {
+static const struct llc_conn_state_trans llc_setup_state_trans_2 = {
.ev = llc_conn_ev_rx_ua_rsp_fbit_set_x,
.next_state = LLC_CONN_STATE_NORMAL,
.ev_qualifiers = llc_setup_ev_qfyrs_2,
@@ -498,7 +498,7 @@ static const llc_conn_action_t llc_setup_actions_3[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_setup_state_trans_3 = {
+static const struct llc_conn_state_trans llc_setup_state_trans_3 = {
.ev = llc_conn_ev_ack_tmr_exp,
.next_state = LLC_CONN_STATE_NORMAL,
.ev_qualifiers = llc_setup_ev_qfyrs_3,
@@ -519,7 +519,7 @@ static const llc_conn_action_t llc_setup_actions_4[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_setup_state_trans_4 = {
+static const struct llc_conn_state_trans llc_setup_state_trans_4 = {
.ev = llc_conn_ev_rx_disc_cmd_pbit_set_x,
.next_state = LLC_CONN_STATE_ADM,
.ev_qualifiers = llc_setup_ev_qfyrs_4,
@@ -539,7 +539,7 @@ static const llc_conn_action_t llc_setup_actions_5[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_setup_state_trans_5 = {
+static const struct llc_conn_state_trans llc_setup_state_trans_5 = {
.ev = llc_conn_ev_rx_dm_rsp_fbit_set_x,
.next_state = LLC_CONN_STATE_ADM,
.ev_qualifiers = llc_setup_ev_qfyrs_5,
@@ -560,7 +560,7 @@ static const llc_conn_action_t llc_setup_actions_7[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_setup_state_trans_7 = {
+static const struct llc_conn_state_trans llc_setup_state_trans_7 = {
.ev = llc_conn_ev_ack_tmr_exp,
.next_state = LLC_CONN_STATE_SETUP,
.ev_qualifiers = llc_setup_ev_qfyrs_7,
@@ -581,7 +581,7 @@ static const llc_conn_action_t llc_setup_actions_8[] = {
[2] = NULL,
};
-static struct llc_conn_state_trans llc_setup_state_trans_8 = {
+static const struct llc_conn_state_trans llc_setup_state_trans_8 = {
.ev = llc_conn_ev_ack_tmr_exp,
.next_state = LLC_CONN_STATE_ADM,
.ev_qualifiers = llc_setup_ev_qfyrs_8,
@@ -592,7 +592,7 @@ static struct llc_conn_state_trans llc_setup_state_trans_8 = {
* Array of pointers;
* one to each transition
*/
-static struct llc_conn_state_trans *llc_setup_state_transitions[] = {
+static const struct llc_conn_state_trans *llc_setup_state_transitions[] = {
[0] = &llc_common_state_trans_end, /* Request */
[1] = &llc_common_state_trans_end, /* local busy */
[2] = &llc_common_state_trans_end, /* init_pf_cycle */
@@ -622,7 +622,7 @@ static const llc_conn_action_t llc_normal_actions_1[] = {
[2] = NULL,
};
-static struct llc_conn_state_trans llc_normal_state_trans_1 = {
+static const struct llc_conn_state_trans llc_normal_state_trans_1 = {
.ev = llc_conn_ev_data_req,
.next_state = LLC_CONN_STATE_NORMAL,
.ev_qualifiers = llc_normal_ev_qfyrs_1,
@@ -643,7 +643,7 @@ static const llc_conn_action_t llc_normal_actions_2[] = {
[2] = NULL,
};
-static struct llc_conn_state_trans llc_normal_state_trans_2 = {
+static const struct llc_conn_state_trans llc_normal_state_trans_2 = {
.ev = llc_conn_ev_data_req,
.next_state = LLC_CONN_STATE_NORMAL,
.ev_qualifiers = llc_normal_ev_qfyrs_2,
@@ -660,7 +660,7 @@ static const llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_2_1[] = {
/* just one member, NULL, .bss zeroes it */
static const llc_conn_action_t llc_normal_actions_2_1[1];
-static struct llc_conn_state_trans llc_normal_state_trans_2_1 = {
+static const struct llc_conn_state_trans llc_normal_state_trans_2_1 = {
.ev = llc_conn_ev_data_req,
.next_state = LLC_CONN_STATE_NORMAL,
.ev_qualifiers = llc_normal_ev_qfyrs_2_1,
@@ -680,7 +680,7 @@ static const llc_conn_action_t llc_normal_actions_3[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_normal_state_trans_3 = {
+static const struct llc_conn_state_trans llc_normal_state_trans_3 = {
.ev = llc_conn_ev_local_busy_detected,
.next_state = LLC_CONN_STATE_BUSY,
.ev_qualifiers = llc_normal_ev_qfyrs_3,
@@ -700,7 +700,7 @@ static const llc_conn_action_t llc_normal_actions_4[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_normal_state_trans_4 = {
+static const struct llc_conn_state_trans llc_normal_state_trans_4 = {
.ev = llc_conn_ev_local_busy_detected,
.next_state = LLC_CONN_STATE_BUSY,
.ev_qualifiers = llc_normal_ev_qfyrs_4,
@@ -723,7 +723,7 @@ static const llc_conn_action_t llc_normal_actions_5a[] = {
[6] = NULL,
};
-static struct llc_conn_state_trans llc_normal_state_trans_5a = {
+static const struct llc_conn_state_trans llc_normal_state_trans_5a = {
.ev = llc_conn_ev_rx_i_cmd_pbit_set_0_unexpd_ns,
.next_state = LLC_CONN_STATE_REJ,
.ev_qualifiers = llc_normal_ev_qfyrs_5a,
@@ -746,7 +746,7 @@ static const llc_conn_action_t llc_normal_actions_5b[] = {
[6] = NULL,
};
-static struct llc_conn_state_trans llc_normal_state_trans_5b = {
+static const struct llc_conn_state_trans llc_normal_state_trans_5b = {
.ev = llc_conn_ev_rx_i_rsp_fbit_set_0_unexpd_ns,
.next_state = LLC_CONN_STATE_REJ,
.ev_qualifiers = llc_normal_ev_qfyrs_5b,
@@ -769,7 +769,7 @@ static const llc_conn_action_t llc_normal_actions_5c[] = {
[6] = NULL,
};
-static struct llc_conn_state_trans llc_normal_state_trans_5c = {
+static const struct llc_conn_state_trans llc_normal_state_trans_5c = {
.ev = llc_conn_ev_rx_i_rsp_fbit_set_1_unexpd_ns,
.next_state = LLC_CONN_STATE_REJ,
.ev_qualifiers = llc_normal_ev_qfyrs_5c,
@@ -790,7 +790,7 @@ static const llc_conn_action_t llc_normal_actions_6a[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_normal_state_trans_6a = {
+static const struct llc_conn_state_trans llc_normal_state_trans_6a = {
.ev = llc_conn_ev_rx_i_cmd_pbit_set_0_unexpd_ns,
.next_state = LLC_CONN_STATE_REJ,
.ev_qualifiers = llc_normal_ev_qfyrs_6a,
@@ -811,7 +811,7 @@ static const llc_conn_action_t llc_normal_actions_6b[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_normal_state_trans_6b = {
+static const struct llc_conn_state_trans llc_normal_state_trans_6b = {
.ev = llc_conn_ev_rx_i_rsp_fbit_set_0_unexpd_ns,
.next_state = LLC_CONN_STATE_REJ,
.ev_qualifiers = llc_normal_ev_qfyrs_6b,
@@ -827,7 +827,7 @@ static const llc_conn_action_t llc_normal_actions_7[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_normal_state_trans_7 = {
+static const struct llc_conn_state_trans llc_normal_state_trans_7 = {
.ev = llc_conn_ev_rx_i_cmd_pbit_set_1_unexpd_ns,
.next_state = LLC_CONN_STATE_REJ,
.ev_qualifiers = NONE,
@@ -850,7 +850,7 @@ static const llc_conn_action_t llc_normal_actions_8[] = {
[6] = NULL,
};
-static struct llc_conn_state_trans llc_normal_state_trans_8a = {
+static const struct llc_conn_state_trans llc_normal_state_trans_8a = {
.ev = llc_conn_ev_rx_i_rsp_fbit_set_x,
.next_state = LLC_CONN_STATE_NORMAL,
.ev_qualifiers = llc_normal_ev_qfyrs_8a,
@@ -863,7 +863,7 @@ static const llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_8b[] = {
[1] = NULL,
};
-static struct llc_conn_state_trans llc_normal_state_trans_8b = {
+static const struct llc_conn_state_trans llc_normal_state_trans_8b = {
.ev = llc_conn_ev_rx_i_cmd_pbit_set_0,
.next_state = LLC_CONN_STATE_NORMAL,
.ev_qualifiers = llc_normal_ev_qfyrs_8b,
@@ -884,7 +884,7 @@ static const llc_conn_action_t llc_normal_actions_9a[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_normal_state_trans_9a = {
+static const struct llc_conn_state_trans llc_normal_state_trans_9a = {
.ev = llc_conn_ev_rx_i_rsp_fbit_set_0,
.next_state = LLC_CONN_STATE_NORMAL,
.ev_qualifiers = llc_normal_ev_qfyrs_9a,
@@ -905,7 +905,7 @@ static const llc_conn_action_t llc_normal_actions_9b[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_normal_state_trans_9b = {
+static const struct llc_conn_state_trans llc_normal_state_trans_9b = {
.ev = llc_conn_ev_rx_i_cmd_pbit_set_0,
.next_state = LLC_CONN_STATE_NORMAL,
.ev_qualifiers = llc_normal_ev_qfyrs_9b,
@@ -922,7 +922,7 @@ static const llc_conn_action_t llc_normal_actions_10[] = {
[5] = NULL,
};
-static struct llc_conn_state_trans llc_normal_state_trans_10 = {
+static const struct llc_conn_state_trans llc_normal_state_trans_10 = {
.ev = llc_conn_ev_rx_i_cmd_pbit_set_1,
.next_state = LLC_CONN_STATE_NORMAL,
.ev_qualifiers = NONE,
@@ -937,7 +937,7 @@ static const llc_conn_action_t llc_normal_actions_11a[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_normal_state_trans_11a = {
+static const struct llc_conn_state_trans llc_normal_state_trans_11a = {
.ev = llc_conn_ev_rx_rr_cmd_pbit_set_0,
.next_state = LLC_CONN_STATE_NORMAL,
.ev_qualifiers = NONE,
@@ -952,7 +952,7 @@ static const llc_conn_action_t llc_normal_actions_11b[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_normal_state_trans_11b = {
+static const struct llc_conn_state_trans llc_normal_state_trans_11b = {
.ev = llc_conn_ev_rx_rr_rsp_fbit_set_0,
.next_state = LLC_CONN_STATE_NORMAL,
.ev_qualifiers = NONE,
@@ -973,7 +973,7 @@ static const llc_conn_action_t llc_normal_actions_11c[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_normal_state_trans_11c = {
+static const struct llc_conn_state_trans llc_normal_state_trans_11c = {
.ev = llc_conn_ev_rx_rr_rsp_fbit_set_1,
.next_state = LLC_CONN_STATE_NORMAL,
.ev_qualifiers = llc_normal_ev_qfyrs_11c,
@@ -990,7 +990,7 @@ static const llc_conn_action_t llc_normal_actions_12[] = {
[5] = NULL,
};
-static struct llc_conn_state_trans llc_normal_state_trans_12 = {
+static const struct llc_conn_state_trans llc_normal_state_trans_12 = {
.ev = llc_conn_ev_rx_rr_cmd_pbit_set_1,
.next_state = LLC_CONN_STATE_NORMAL,
.ev_qualifiers = NONE,
@@ -1005,7 +1005,7 @@ static const llc_conn_action_t llc_normal_actions_13a[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_normal_state_trans_13a = {
+static const struct llc_conn_state_trans llc_normal_state_trans_13a = {
.ev = llc_conn_ev_rx_rnr_cmd_pbit_set_0,
.next_state = LLC_CONN_STATE_NORMAL,
.ev_qualifiers = NONE,
@@ -1020,7 +1020,7 @@ static const llc_conn_action_t llc_normal_actions_13b[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_normal_state_trans_13b = {
+static const struct llc_conn_state_trans llc_normal_state_trans_13b = {
.ev = llc_conn_ev_rx_rnr_rsp_fbit_set_0,
.next_state = LLC_CONN_STATE_NORMAL,
.ev_qualifiers = NONE,
@@ -1040,7 +1040,7 @@ static const llc_conn_action_t llc_normal_actions_13c[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_normal_state_trans_13c = {
+static const struct llc_conn_state_trans llc_normal_state_trans_13c = {
.ev = llc_conn_ev_rx_rnr_rsp_fbit_set_1,
.next_state = LLC_CONN_STATE_NORMAL,
.ev_qualifiers = llc_normal_ev_qfyrs_13c,
@@ -1057,7 +1057,7 @@ static const llc_conn_action_t llc_normal_actions_14[] = {
[5] = NULL,
};
-static struct llc_conn_state_trans llc_normal_state_trans_14 = {
+static const struct llc_conn_state_trans llc_normal_state_trans_14 = {
.ev = llc_conn_ev_rx_rnr_cmd_pbit_set_1,
.next_state = LLC_CONN_STATE_NORMAL,
.ev_qualifiers = NONE,
@@ -1080,7 +1080,7 @@ static const llc_conn_action_t llc_normal_actions_15a[] = {
[6] = NULL,
};
-static struct llc_conn_state_trans llc_normal_state_trans_15a = {
+static const struct llc_conn_state_trans llc_normal_state_trans_15a = {
.ev = llc_conn_ev_rx_rej_cmd_pbit_set_0,
.next_state = LLC_CONN_STATE_NORMAL,
.ev_qualifiers = llc_normal_ev_qfyrs_15a,
@@ -1103,7 +1103,7 @@ static const llc_conn_action_t llc_normal_actions_15b[] = {
[6] = NULL,
};
-static struct llc_conn_state_trans llc_normal_state_trans_15b = {
+static const struct llc_conn_state_trans llc_normal_state_trans_15b = {
.ev = llc_conn_ev_rx_rej_rsp_fbit_set_x,
.next_state = LLC_CONN_STATE_NORMAL,
.ev_qualifiers = llc_normal_ev_qfyrs_15b,
@@ -1125,7 +1125,7 @@ static const llc_conn_action_t llc_normal_actions_16a[] = {
[5] = NULL,
};
-static struct llc_conn_state_trans llc_normal_state_trans_16a = {
+static const struct llc_conn_state_trans llc_normal_state_trans_16a = {
.ev = llc_conn_ev_rx_rej_cmd_pbit_set_0,
.next_state = LLC_CONN_STATE_NORMAL,
.ev_qualifiers = llc_normal_ev_qfyrs_16a,
@@ -1147,7 +1147,7 @@ static const llc_conn_action_t llc_normal_actions_16b[] = {
[5] = NULL,
};
-static struct llc_conn_state_trans llc_normal_state_trans_16b = {
+static const struct llc_conn_state_trans llc_normal_state_trans_16b = {
.ev = llc_conn_ev_rx_rej_rsp_fbit_set_0,
.next_state = LLC_CONN_STATE_NORMAL,
.ev_qualifiers = llc_normal_ev_qfyrs_16b,
@@ -1164,7 +1164,7 @@ static const llc_conn_action_t llc_normal_actions_17[] = {
[5] = NULL,
};
-static struct llc_conn_state_trans llc_normal_state_trans_17 = {
+static const struct llc_conn_state_trans llc_normal_state_trans_17 = {
.ev = llc_conn_ev_rx_rej_cmd_pbit_set_1,
.next_state = LLC_CONN_STATE_NORMAL,
.ev_qualifiers = NONE,
@@ -1183,7 +1183,7 @@ static const llc_conn_action_t llc_normal_actions_18[] = {
[2] = NULL,
};
-static struct llc_conn_state_trans llc_normal_state_trans_18 = {
+static const struct llc_conn_state_trans llc_normal_state_trans_18 = {
.ev = llc_conn_ev_init_p_f_cycle,
.next_state = LLC_CONN_STATE_NORMAL,
.ev_qualifiers = llc_normal_ev_qfyrs_18,
@@ -1205,7 +1205,7 @@ static const llc_conn_action_t llc_normal_actions_19[] = {
[5] = NULL,
};
-static struct llc_conn_state_trans llc_normal_state_trans_19 = {
+static const struct llc_conn_state_trans llc_normal_state_trans_19 = {
.ev = llc_conn_ev_p_tmr_exp,
.next_state = LLC_CONN_STATE_AWAIT,
.ev_qualifiers = llc_normal_ev_qfyrs_19,
@@ -1228,7 +1228,7 @@ static const llc_conn_action_t llc_normal_actions_20a[] = {
[5] = NULL,
};
-static struct llc_conn_state_trans llc_normal_state_trans_20a = {
+static const struct llc_conn_state_trans llc_normal_state_trans_20a = {
.ev = llc_conn_ev_ack_tmr_exp,
.next_state = LLC_CONN_STATE_AWAIT,
.ev_qualifiers = llc_normal_ev_qfyrs_20a,
@@ -1251,7 +1251,7 @@ static const llc_conn_action_t llc_normal_actions_20b[] = {
[5] = NULL,
};
-static struct llc_conn_state_trans llc_normal_state_trans_20b = {
+static const struct llc_conn_state_trans llc_normal_state_trans_20b = {
.ev = llc_conn_ev_busy_tmr_exp,
.next_state = LLC_CONN_STATE_AWAIT,
.ev_qualifiers = llc_normal_ev_qfyrs_20b,
@@ -1270,7 +1270,7 @@ static const llc_conn_action_t llc_normal_actions_21[] = {
[2] = NULL,
};
-static struct llc_conn_state_trans llc_normal_state_trans_21 = {
+static const struct llc_conn_state_trans llc_normal_state_trans_21 = {
.ev = llc_conn_ev_tx_buffer_full,
.next_state = LLC_CONN_STATE_NORMAL,
.ev_qualifiers = llc_normal_ev_qfyrs_21,
@@ -1281,7 +1281,7 @@ static struct llc_conn_state_trans llc_normal_state_trans_21 = {
* Array of pointers;
* one to each transition
*/
-static struct llc_conn_state_trans *llc_normal_state_transitions[] = {
+static const struct llc_conn_state_trans *llc_normal_state_transitions[] = {
[0] = &llc_normal_state_trans_1, /* Requests */
[1] = &llc_normal_state_trans_2,
[2] = &llc_normal_state_trans_2_1,
@@ -1354,7 +1354,7 @@ static const llc_conn_action_t llc_busy_actions_1[] = {
[2] = NULL,
};
-static struct llc_conn_state_trans llc_busy_state_trans_1 = {
+static const struct llc_conn_state_trans llc_busy_state_trans_1 = {
.ev = llc_conn_ev_data_req,
.next_state = LLC_CONN_STATE_BUSY,
.ev_qualifiers = llc_busy_ev_qfyrs_1,
@@ -1374,7 +1374,7 @@ static const llc_conn_action_t llc_busy_actions_2[] = {
[2] = NULL,
};
-static struct llc_conn_state_trans llc_busy_state_trans_2 = {
+static const struct llc_conn_state_trans llc_busy_state_trans_2 = {
.ev = llc_conn_ev_data_req,
.next_state = LLC_CONN_STATE_BUSY,
.ev_qualifiers = llc_busy_ev_qfyrs_2,
@@ -1391,7 +1391,7 @@ static const llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_2_1[] = {
/* just one member, NULL, .bss zeroes it */
static const llc_conn_action_t llc_busy_actions_2_1[1];
-static struct llc_conn_state_trans llc_busy_state_trans_2_1 = {
+static const struct llc_conn_state_trans llc_busy_state_trans_2_1 = {
.ev = llc_conn_ev_data_req,
.next_state = LLC_CONN_STATE_BUSY,
.ev_qualifiers = llc_busy_ev_qfyrs_2_1,
@@ -1411,7 +1411,7 @@ static const llc_conn_action_t llc_busy_actions_3[] = {
[2] = NULL,
};
-static struct llc_conn_state_trans llc_busy_state_trans_3 = {
+static const struct llc_conn_state_trans llc_busy_state_trans_3 = {
.ev = llc_conn_ev_local_busy_cleared,
.next_state = LLC_CONN_STATE_REJ,
.ev_qualifiers = llc_busy_ev_qfyrs_3,
@@ -1431,7 +1431,7 @@ static const llc_conn_action_t llc_busy_actions_4[] = {
[2] = NULL,
};
-static struct llc_conn_state_trans llc_busy_state_trans_4 = {
+static const struct llc_conn_state_trans llc_busy_state_trans_4 = {
.ev = llc_conn_ev_local_busy_cleared,
.next_state = LLC_CONN_STATE_REJ,
.ev_qualifiers = llc_busy_ev_qfyrs_4,
@@ -1450,7 +1450,7 @@ static const llc_conn_action_t llc_busy_actions_5[] = {
[1] = NULL,
};
-static struct llc_conn_state_trans llc_busy_state_trans_5 = {
+static const struct llc_conn_state_trans llc_busy_state_trans_5 = {
.ev = llc_conn_ev_local_busy_cleared,
.next_state = LLC_CONN_STATE_NORMAL,
.ev_qualifiers = llc_busy_ev_qfyrs_5,
@@ -1469,7 +1469,7 @@ static const llc_conn_action_t llc_busy_actions_6[] = {
[1] = NULL,
};
-static struct llc_conn_state_trans llc_busy_state_trans_6 = {
+static const struct llc_conn_state_trans llc_busy_state_trans_6 = {
.ev = llc_conn_ev_local_busy_cleared,
.next_state = LLC_CONN_STATE_NORMAL,
.ev_qualifiers = llc_busy_ev_qfyrs_6,
@@ -1488,7 +1488,7 @@ static const llc_conn_action_t llc_busy_actions_7[] = {
[1] = NULL,
};
-static struct llc_conn_state_trans llc_busy_state_trans_7 = {
+static const struct llc_conn_state_trans llc_busy_state_trans_7 = {
.ev = llc_conn_ev_local_busy_cleared,
.next_state = LLC_CONN_STATE_REJ,
.ev_qualifiers = llc_busy_ev_qfyrs_7,
@@ -1507,7 +1507,7 @@ static const llc_conn_action_t llc_busy_actions_8[] = {
[1] = NULL,
};
-static struct llc_conn_state_trans llc_busy_state_trans_8 = {
+static const struct llc_conn_state_trans llc_busy_state_trans_8 = {
.ev = llc_conn_ev_local_busy_cleared,
.next_state = LLC_CONN_STATE_REJ,
.ev_qualifiers = llc_busy_ev_qfyrs_8,
@@ -1529,7 +1529,7 @@ static const llc_conn_action_t llc_busy_actions_9a[] = {
[5] = NULL,
};
-static struct llc_conn_state_trans llc_busy_state_trans_9a = {
+static const struct llc_conn_state_trans llc_busy_state_trans_9a = {
.ev = llc_conn_ev_rx_i_rsp_fbit_set_x_unexpd_ns,
.next_state = LLC_CONN_STATE_BUSY,
.ev_qualifiers = llc_busy_ev_qfyrs_9a,
@@ -1551,7 +1551,7 @@ static const llc_conn_action_t llc_busy_actions_9b[] = {
[5] = NULL,
};
-static struct llc_conn_state_trans llc_busy_state_trans_9b = {
+static const struct llc_conn_state_trans llc_busy_state_trans_9b = {
.ev = llc_conn_ev_rx_i_cmd_pbit_set_0_unexpd_ns,
.next_state = LLC_CONN_STATE_BUSY,
.ev_qualifiers = llc_busy_ev_qfyrs_9b,
@@ -1571,7 +1571,7 @@ static const llc_conn_action_t llc_busy_actions_10a[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_busy_state_trans_10a = {
+static const struct llc_conn_state_trans llc_busy_state_trans_10a = {
.ev = llc_conn_ev_rx_i_rsp_fbit_set_0_unexpd_ns,
.next_state = LLC_CONN_STATE_BUSY,
.ev_qualifiers = llc_busy_ev_qfyrs_10a,
@@ -1591,7 +1591,7 @@ static const llc_conn_action_t llc_busy_actions_10b[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_busy_state_trans_10b = {
+static const struct llc_conn_state_trans llc_busy_state_trans_10b = {
.ev = llc_conn_ev_rx_i_cmd_pbit_set_0_unexpd_ns,
.next_state = LLC_CONN_STATE_BUSY,
.ev_qualifiers = llc_busy_ev_qfyrs_10b,
@@ -1606,7 +1606,7 @@ static const llc_conn_action_t llc_busy_actions_11[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_busy_state_trans_11 = {
+static const struct llc_conn_state_trans llc_busy_state_trans_11 = {
.ev = llc_conn_ev_rx_i_cmd_pbit_set_1_unexpd_ns,
.next_state = LLC_CONN_STATE_BUSY,
.ev_qualifiers = NONE,
@@ -1624,7 +1624,7 @@ static const llc_conn_action_t llc_busy_actions_12[] = {
[6] = NULL,
};
-static struct llc_conn_state_trans llc_busy_state_trans_12 = {
+static const struct llc_conn_state_trans llc_busy_state_trans_12 = {
.ev = llc_conn_ev_rx_i_cmd_pbit_set_1,
.next_state = LLC_CONN_STATE_BUSY,
.ev_qualifiers = NONE,
@@ -1649,7 +1649,7 @@ static const llc_conn_action_t llc_busy_actions_13a[] = {
[8] = NULL,
};
-static struct llc_conn_state_trans llc_busy_state_trans_13a = {
+static const struct llc_conn_state_trans llc_busy_state_trans_13a = {
.ev = llc_conn_ev_rx_i_rsp_fbit_set_x,
.next_state = LLC_CONN_STATE_BUSY,
.ev_qualifiers = llc_busy_ev_qfyrs_13a,
@@ -1674,7 +1674,7 @@ static const llc_conn_action_t llc_busy_actions_13b[] = {
[8] = NULL,
};
-static struct llc_conn_state_trans llc_busy_state_trans_13b = {
+static const struct llc_conn_state_trans llc_busy_state_trans_13b = {
.ev = llc_conn_ev_rx_i_cmd_pbit_set_0,
.next_state = LLC_CONN_STATE_BUSY,
.ev_qualifiers = llc_busy_ev_qfyrs_13b,
@@ -1697,7 +1697,7 @@ static const llc_conn_action_t llc_busy_actions_14a[] = {
[6] = NULL,
};
-static struct llc_conn_state_trans llc_busy_state_trans_14a = {
+static const struct llc_conn_state_trans llc_busy_state_trans_14a = {
.ev = llc_conn_ev_rx_i_rsp_fbit_set_0,
.next_state = LLC_CONN_STATE_BUSY,
.ev_qualifiers = llc_busy_ev_qfyrs_14a,
@@ -1720,7 +1720,7 @@ static const llc_conn_action_t llc_busy_actions_14b[] = {
[6] = NULL,
};
-static struct llc_conn_state_trans llc_busy_state_trans_14b = {
+static const struct llc_conn_state_trans llc_busy_state_trans_14b = {
.ev = llc_conn_ev_rx_i_cmd_pbit_set_0,
.next_state = LLC_CONN_STATE_BUSY,
.ev_qualifiers = llc_busy_ev_qfyrs_14b,
@@ -1735,7 +1735,7 @@ static const llc_conn_action_t llc_busy_actions_15a[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_busy_state_trans_15a = {
+static const struct llc_conn_state_trans llc_busy_state_trans_15a = {
.ev = llc_conn_ev_rx_rr_cmd_pbit_set_0,
.next_state = LLC_CONN_STATE_BUSY,
.ev_qualifiers = NONE,
@@ -1750,7 +1750,7 @@ static const llc_conn_action_t llc_busy_actions_15b[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_busy_state_trans_15b = {
+static const struct llc_conn_state_trans llc_busy_state_trans_15b = {
.ev = llc_conn_ev_rx_rr_rsp_fbit_set_0,
.next_state = LLC_CONN_STATE_BUSY,
.ev_qualifiers = NONE,
@@ -1770,7 +1770,7 @@ static const llc_conn_action_t llc_busy_actions_15c[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_busy_state_trans_15c = {
+static const struct llc_conn_state_trans llc_busy_state_trans_15c = {
.ev = llc_conn_ev_rx_rr_rsp_fbit_set_1,
.next_state = LLC_CONN_STATE_BUSY,
.ev_qualifiers = llc_busy_ev_qfyrs_15c,
@@ -1785,7 +1785,7 @@ static const llc_conn_action_t llc_busy_actions_16[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_busy_state_trans_16 = {
+static const struct llc_conn_state_trans llc_busy_state_trans_16 = {
.ev = llc_conn_ev_rx_rr_cmd_pbit_set_1,
.next_state = LLC_CONN_STATE_BUSY,
.ev_qualifiers = NONE,
@@ -1800,7 +1800,7 @@ static const llc_conn_action_t llc_busy_actions_17a[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_busy_state_trans_17a = {
+static const struct llc_conn_state_trans llc_busy_state_trans_17a = {
.ev = llc_conn_ev_rx_rnr_cmd_pbit_set_0,
.next_state = LLC_CONN_STATE_BUSY,
.ev_qualifiers = NONE,
@@ -1815,7 +1815,7 @@ static const llc_conn_action_t llc_busy_actions_17b[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_busy_state_trans_17b = {
+static const struct llc_conn_state_trans llc_busy_state_trans_17b = {
.ev = llc_conn_ev_rx_rnr_rsp_fbit_set_0,
.next_state = LLC_CONN_STATE_BUSY,
.ev_qualifiers = NONE,
@@ -1835,7 +1835,7 @@ static const llc_conn_action_t llc_busy_actions_17c[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_busy_state_trans_17c = {
+static const struct llc_conn_state_trans llc_busy_state_trans_17c = {
.ev = llc_conn_ev_rx_rnr_rsp_fbit_set_1,
.next_state = LLC_CONN_STATE_BUSY,
.ev_qualifiers = llc_busy_ev_qfyrs_17c,
@@ -1850,7 +1850,7 @@ static const llc_conn_action_t llc_busy_actions_18[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_busy_state_trans_18 = {
+static const struct llc_conn_state_trans llc_busy_state_trans_18 = {
.ev = llc_conn_ev_rx_rnr_cmd_pbit_set_1,
.next_state = LLC_CONN_STATE_BUSY,
.ev_qualifiers = NONE,
@@ -1872,7 +1872,7 @@ static const llc_conn_action_t llc_busy_actions_19a[] = {
[5] = NULL,
};
-static struct llc_conn_state_trans llc_busy_state_trans_19a = {
+static const struct llc_conn_state_trans llc_busy_state_trans_19a = {
.ev = llc_conn_ev_rx_rej_cmd_pbit_set_0,
.next_state = LLC_CONN_STATE_BUSY,
.ev_qualifiers = llc_busy_ev_qfyrs_19a,
@@ -1894,7 +1894,7 @@ static const llc_conn_action_t llc_busy_actions_19b[] = {
[5] = NULL,
};
-static struct llc_conn_state_trans llc_busy_state_trans_19b = {
+static const struct llc_conn_state_trans llc_busy_state_trans_19b = {
.ev = llc_conn_ev_rx_rej_rsp_fbit_set_x,
.next_state = LLC_CONN_STATE_BUSY,
.ev_qualifiers = llc_busy_ev_qfyrs_19b,
@@ -1915,7 +1915,7 @@ static const llc_conn_action_t llc_busy_actions_20a[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_busy_state_trans_20a = {
+static const struct llc_conn_state_trans llc_busy_state_trans_20a = {
.ev = llc_conn_ev_rx_rej_cmd_pbit_set_0,
.next_state = LLC_CONN_STATE_BUSY,
.ev_qualifiers = llc_busy_ev_qfyrs_20a,
@@ -1936,7 +1936,7 @@ static const llc_conn_action_t llc_busy_actions_20b[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_busy_state_trans_20b = {
+static const struct llc_conn_state_trans llc_busy_state_trans_20b = {
.ev = llc_conn_ev_rx_rej_rsp_fbit_set_0,
.next_state = LLC_CONN_STATE_BUSY,
.ev_qualifiers = llc_busy_ev_qfyrs_20b,
@@ -1953,7 +1953,7 @@ static const llc_conn_action_t llc_busy_actions_21[] = {
[5] = NULL,
};
-static struct llc_conn_state_trans llc_busy_state_trans_21 = {
+static const struct llc_conn_state_trans llc_busy_state_trans_21 = {
.ev = llc_conn_ev_rx_rej_cmd_pbit_set_1,
.next_state = LLC_CONN_STATE_BUSY,
.ev_qualifiers = NONE,
@@ -1972,7 +1972,7 @@ static const llc_conn_action_t llc_busy_actions_22[] = {
[2] = NULL,
};
-static struct llc_conn_state_trans llc_busy_state_trans_22 = {
+static const struct llc_conn_state_trans llc_busy_state_trans_22 = {
.ev = llc_conn_ev_init_p_f_cycle,
.next_state = LLC_CONN_STATE_BUSY,
.ev_qualifiers = llc_busy_ev_qfyrs_22,
@@ -1993,7 +1993,7 @@ static const llc_conn_action_t llc_busy_actions_23[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_busy_state_trans_23 = {
+static const struct llc_conn_state_trans llc_busy_state_trans_23 = {
.ev = llc_conn_ev_p_tmr_exp,
.next_state = LLC_CONN_STATE_AWAIT_BUSY,
.ev_qualifiers = llc_busy_ev_qfyrs_23,
@@ -2015,7 +2015,7 @@ static const llc_conn_action_t llc_busy_actions_24a[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_busy_state_trans_24a = {
+static const struct llc_conn_state_trans llc_busy_state_trans_24a = {
.ev = llc_conn_ev_ack_tmr_exp,
.next_state = LLC_CONN_STATE_AWAIT_BUSY,
.ev_qualifiers = llc_busy_ev_qfyrs_24a,
@@ -2037,7 +2037,7 @@ static const llc_conn_action_t llc_busy_actions_24b[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_busy_state_trans_24b = {
+static const struct llc_conn_state_trans llc_busy_state_trans_24b = {
.ev = llc_conn_ev_busy_tmr_exp,
.next_state = LLC_CONN_STATE_AWAIT_BUSY,
.ev_qualifiers = llc_busy_ev_qfyrs_24b,
@@ -2060,7 +2060,7 @@ static const llc_conn_action_t llc_busy_actions_25[] = {
[5] = NULL,
};
-static struct llc_conn_state_trans llc_busy_state_trans_25 = {
+static const struct llc_conn_state_trans llc_busy_state_trans_25 = {
.ev = llc_conn_ev_rej_tmr_exp,
.next_state = LLC_CONN_STATE_AWAIT_BUSY,
.ev_qualifiers = llc_busy_ev_qfyrs_25,
@@ -2079,7 +2079,7 @@ static const llc_conn_action_t llc_busy_actions_26[] = {
[1] = NULL,
};
-static struct llc_conn_state_trans llc_busy_state_trans_26 = {
+static const struct llc_conn_state_trans llc_busy_state_trans_26 = {
.ev = llc_conn_ev_rej_tmr_exp,
.next_state = LLC_CONN_STATE_BUSY,
.ev_qualifiers = llc_busy_ev_qfyrs_26,
@@ -2090,7 +2090,7 @@ static struct llc_conn_state_trans llc_busy_state_trans_26 = {
* Array of pointers;
* one to each transition
*/
-static struct llc_conn_state_trans *llc_busy_state_transitions[] = {
+static const struct llc_conn_state_trans *llc_busy_state_transitions[] = {
[0] = &llc_common_state_trans_1, /* Request */
[1] = &llc_common_state_trans_2,
[2] = &llc_busy_state_trans_1,
@@ -2166,7 +2166,7 @@ static const llc_conn_action_t llc_reject_actions_1[] = {
[1] = NULL,
};
-static struct llc_conn_state_trans llc_reject_state_trans_1 = {
+static const struct llc_conn_state_trans llc_reject_state_trans_1 = {
.ev = llc_conn_ev_data_req,
.next_state = LLC_CONN_STATE_REJ,
.ev_qualifiers = llc_reject_ev_qfyrs_1,
@@ -2185,7 +2185,7 @@ static const llc_conn_action_t llc_reject_actions_2[] = {
[1] = NULL,
};
-static struct llc_conn_state_trans llc_reject_state_trans_2 = {
+static const struct llc_conn_state_trans llc_reject_state_trans_2 = {
.ev = llc_conn_ev_data_req,
.next_state = LLC_CONN_STATE_REJ,
.ev_qualifiers = llc_reject_ev_qfyrs_2,
@@ -2202,7 +2202,7 @@ static const llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_2_1[] = {
/* just one member, NULL, .bss zeroes it */
static const llc_conn_action_t llc_reject_actions_2_1[1];
-static struct llc_conn_state_trans llc_reject_state_trans_2_1 = {
+static const struct llc_conn_state_trans llc_reject_state_trans_2_1 = {
.ev = llc_conn_ev_data_req,
.next_state = LLC_CONN_STATE_REJ,
.ev_qualifiers = llc_reject_ev_qfyrs_2_1,
@@ -2222,7 +2222,7 @@ static const llc_conn_action_t llc_reject_actions_3[] = {
[2] = NULL,
};
-static struct llc_conn_state_trans llc_reject_state_trans_3 = {
+static const struct llc_conn_state_trans llc_reject_state_trans_3 = {
.ev = llc_conn_ev_local_busy_detected,
.next_state = LLC_CONN_STATE_BUSY,
.ev_qualifiers = llc_reject_ev_qfyrs_3,
@@ -2241,7 +2241,7 @@ static const llc_conn_action_t llc_reject_actions_4[] = {
[2] = NULL,
};
-static struct llc_conn_state_trans llc_reject_state_trans_4 = {
+static const struct llc_conn_state_trans llc_reject_state_trans_4 = {
.ev = llc_conn_ev_local_busy_detected,
.next_state = LLC_CONN_STATE_BUSY,
.ev_qualifiers = llc_reject_ev_qfyrs_4,
@@ -2256,7 +2256,7 @@ static const llc_conn_action_t llc_reject_actions_5a[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_reject_state_trans_5a = {
+static const struct llc_conn_state_trans llc_reject_state_trans_5a = {
.ev = llc_conn_ev_rx_i_cmd_pbit_set_0_unexpd_ns,
.next_state = LLC_CONN_STATE_REJ,
.ev_qualifiers = NONE,
@@ -2271,7 +2271,7 @@ static const llc_conn_action_t llc_reject_actions_5b[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_reject_state_trans_5b = {
+static const struct llc_conn_state_trans llc_reject_state_trans_5b = {
.ev = llc_conn_ev_rx_i_rsp_fbit_set_0_unexpd_ns,
.next_state = LLC_CONN_STATE_REJ,
.ev_qualifiers = NONE,
@@ -2291,7 +2291,7 @@ static const llc_conn_action_t llc_reject_actions_5c[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_reject_state_trans_5c = {
+static const struct llc_conn_state_trans llc_reject_state_trans_5c = {
.ev = llc_conn_ev_rx_i_rsp_fbit_set_1_unexpd_ns,
.next_state = LLC_CONN_STATE_REJ,
.ev_qualifiers = llc_reject_ev_qfyrs_5c,
@@ -2305,7 +2305,7 @@ static const llc_conn_action_t llc_reject_actions_6[] = {
[2] = NULL,
};
-static struct llc_conn_state_trans llc_reject_state_trans_6 = {
+static const struct llc_conn_state_trans llc_reject_state_trans_6 = {
.ev = llc_conn_ev_rx_i_cmd_pbit_set_1_unexpd_ns,
.next_state = LLC_CONN_STATE_REJ,
.ev_qualifiers = NONE,
@@ -2330,7 +2330,7 @@ static const llc_conn_action_t llc_reject_actions_7a[] = {
};
-static struct llc_conn_state_trans llc_reject_state_trans_7a = {
+static const struct llc_conn_state_trans llc_reject_state_trans_7a = {
.ev = llc_conn_ev_rx_i_rsp_fbit_set_x,
.next_state = LLC_CONN_STATE_NORMAL,
.ev_qualifiers = llc_reject_ev_qfyrs_7a,
@@ -2354,7 +2354,7 @@ static const llc_conn_action_t llc_reject_actions_7b[] = {
[7] = NULL,
};
-static struct llc_conn_state_trans llc_reject_state_trans_7b = {
+static const struct llc_conn_state_trans llc_reject_state_trans_7b = {
.ev = llc_conn_ev_rx_i_cmd_pbit_set_0,
.next_state = LLC_CONN_STATE_NORMAL,
.ev_qualifiers = llc_reject_ev_qfyrs_7b,
@@ -2376,7 +2376,7 @@ static const llc_conn_action_t llc_reject_actions_8a[] = {
[5] = NULL,
};
-static struct llc_conn_state_trans llc_reject_state_trans_8a = {
+static const struct llc_conn_state_trans llc_reject_state_trans_8a = {
.ev = llc_conn_ev_rx_i_rsp_fbit_set_0,
.next_state = LLC_CONN_STATE_NORMAL,
.ev_qualifiers = llc_reject_ev_qfyrs_8a,
@@ -2398,7 +2398,7 @@ static const llc_conn_action_t llc_reject_actions_8b[] = {
[5] = NULL,
};
-static struct llc_conn_state_trans llc_reject_state_trans_8b = {
+static const struct llc_conn_state_trans llc_reject_state_trans_8b = {
.ev = llc_conn_ev_rx_i_cmd_pbit_set_0,
.next_state = LLC_CONN_STATE_NORMAL,
.ev_qualifiers = llc_reject_ev_qfyrs_8b,
@@ -2415,7 +2415,7 @@ static const llc_conn_action_t llc_reject_actions_9[] = {
[5] = NULL,
};
-static struct llc_conn_state_trans llc_reject_state_trans_9 = {
+static const struct llc_conn_state_trans llc_reject_state_trans_9 = {
.ev = llc_conn_ev_rx_i_cmd_pbit_set_1,
.next_state = LLC_CONN_STATE_NORMAL,
.ev_qualifiers = NONE,
@@ -2430,7 +2430,7 @@ static const llc_conn_action_t llc_reject_actions_10a[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_reject_state_trans_10a = {
+static const struct llc_conn_state_trans llc_reject_state_trans_10a = {
.ev = llc_conn_ev_rx_rr_cmd_pbit_set_0,
.next_state = LLC_CONN_STATE_REJ,
.ev_qualifiers = NONE,
@@ -2445,7 +2445,7 @@ static const llc_conn_action_t llc_reject_actions_10b[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_reject_state_trans_10b = {
+static const struct llc_conn_state_trans llc_reject_state_trans_10b = {
.ev = llc_conn_ev_rx_rr_rsp_fbit_set_0,
.next_state = LLC_CONN_STATE_REJ,
.ev_qualifiers = NONE,
@@ -2465,7 +2465,7 @@ static const llc_conn_action_t llc_reject_actions_10c[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_reject_state_trans_10c = {
+static const struct llc_conn_state_trans llc_reject_state_trans_10c = {
.ev = llc_conn_ev_rx_rr_rsp_fbit_set_1,
.next_state = LLC_CONN_STATE_REJ,
.ev_qualifiers = llc_reject_ev_qfyrs_10c,
@@ -2480,7 +2480,7 @@ static const llc_conn_action_t llc_reject_actions_11[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_reject_state_trans_11 = {
+static const struct llc_conn_state_trans llc_reject_state_trans_11 = {
.ev = llc_conn_ev_rx_rr_cmd_pbit_set_1,
.next_state = LLC_CONN_STATE_REJ,
.ev_qualifiers = NONE,
@@ -2495,7 +2495,7 @@ static const llc_conn_action_t llc_reject_actions_12a[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_reject_state_trans_12a = {
+static const struct llc_conn_state_trans llc_reject_state_trans_12a = {
.ev = llc_conn_ev_rx_rnr_cmd_pbit_set_0,
.next_state = LLC_CONN_STATE_REJ,
.ev_qualifiers = NONE,
@@ -2510,7 +2510,7 @@ static const llc_conn_action_t llc_reject_actions_12b[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_reject_state_trans_12b = {
+static const struct llc_conn_state_trans llc_reject_state_trans_12b = {
.ev = llc_conn_ev_rx_rnr_rsp_fbit_set_0,
.next_state = LLC_CONN_STATE_REJ,
.ev_qualifiers = NONE,
@@ -2530,7 +2530,7 @@ static const llc_conn_action_t llc_reject_actions_12c[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_reject_state_trans_12c = {
+static const struct llc_conn_state_trans llc_reject_state_trans_12c = {
.ev = llc_conn_ev_rx_rnr_rsp_fbit_set_1,
.next_state = LLC_CONN_STATE_REJ,
.ev_qualifiers = llc_reject_ev_qfyrs_12c,
@@ -2545,7 +2545,7 @@ static const llc_conn_action_t llc_reject_actions_13[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_reject_state_trans_13 = {
+static const struct llc_conn_state_trans llc_reject_state_trans_13 = {
.ev = llc_conn_ev_rx_rnr_cmd_pbit_set_1,
.next_state = LLC_CONN_STATE_REJ,
.ev_qualifiers = NONE,
@@ -2567,7 +2567,7 @@ static const llc_conn_action_t llc_reject_actions_14a[] = {
[5] = NULL,
};
-static struct llc_conn_state_trans llc_reject_state_trans_14a = {
+static const struct llc_conn_state_trans llc_reject_state_trans_14a = {
.ev = llc_conn_ev_rx_rej_cmd_pbit_set_0,
.next_state = LLC_CONN_STATE_REJ,
.ev_qualifiers = llc_reject_ev_qfyrs_14a,
@@ -2589,7 +2589,7 @@ static const llc_conn_action_t llc_reject_actions_14b[] = {
[5] = NULL,
};
-static struct llc_conn_state_trans llc_reject_state_trans_14b = {
+static const struct llc_conn_state_trans llc_reject_state_trans_14b = {
.ev = llc_conn_ev_rx_rej_rsp_fbit_set_x,
.next_state = LLC_CONN_STATE_REJ,
.ev_qualifiers = llc_reject_ev_qfyrs_14b,
@@ -2610,7 +2610,7 @@ static const llc_conn_action_t llc_reject_actions_15a[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_reject_state_trans_15a = {
+static const struct llc_conn_state_trans llc_reject_state_trans_15a = {
.ev = llc_conn_ev_rx_rej_cmd_pbit_set_0,
.next_state = LLC_CONN_STATE_REJ,
.ev_qualifiers = llc_reject_ev_qfyrs_15a,
@@ -2631,7 +2631,7 @@ static const llc_conn_action_t llc_reject_actions_15b[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_reject_state_trans_15b = {
+static const struct llc_conn_state_trans llc_reject_state_trans_15b = {
.ev = llc_conn_ev_rx_rej_rsp_fbit_set_0,
.next_state = LLC_CONN_STATE_REJ,
.ev_qualifiers = llc_reject_ev_qfyrs_15b,
@@ -2647,7 +2647,7 @@ static const llc_conn_action_t llc_reject_actions_16[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_reject_state_trans_16 = {
+static const struct llc_conn_state_trans llc_reject_state_trans_16 = {
.ev = llc_conn_ev_rx_rej_cmd_pbit_set_1,
.next_state = LLC_CONN_STATE_REJ,
.ev_qualifiers = NONE,
@@ -2666,7 +2666,7 @@ static const llc_conn_action_t llc_reject_actions_17[] = {
[2] = NULL,
};
-static struct llc_conn_state_trans llc_reject_state_trans_17 = {
+static const struct llc_conn_state_trans llc_reject_state_trans_17 = {
.ev = llc_conn_ev_init_p_f_cycle,
.next_state = LLC_CONN_STATE_REJ,
.ev_qualifiers = llc_reject_ev_qfyrs_17,
@@ -2688,7 +2688,7 @@ static const llc_conn_action_t llc_reject_actions_18[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_reject_state_trans_18 = {
+static const struct llc_conn_state_trans llc_reject_state_trans_18 = {
.ev = llc_conn_ev_rej_tmr_exp,
.next_state = LLC_CONN_STATE_REJ,
.ev_qualifiers = llc_reject_ev_qfyrs_18,
@@ -2710,7 +2710,7 @@ static const llc_conn_action_t llc_reject_actions_19[] = {
[5] = NULL,
};
-static struct llc_conn_state_trans llc_reject_state_trans_19 = {
+static const struct llc_conn_state_trans llc_reject_state_trans_19 = {
.ev = llc_conn_ev_p_tmr_exp,
.next_state = LLC_CONN_STATE_AWAIT_REJ,
.ev_qualifiers = llc_reject_ev_qfyrs_19,
@@ -2733,7 +2733,7 @@ static const llc_conn_action_t llc_reject_actions_20a[] = {
[5] = NULL,
};
-static struct llc_conn_state_trans llc_reject_state_trans_20a = {
+static const struct llc_conn_state_trans llc_reject_state_trans_20a = {
.ev = llc_conn_ev_ack_tmr_exp,
.next_state = LLC_CONN_STATE_AWAIT_REJ,
.ev_qualifiers = llc_reject_ev_qfyrs_20a,
@@ -2756,7 +2756,7 @@ static const llc_conn_action_t llc_reject_actions_20b[] = {
[5] = NULL,
};
-static struct llc_conn_state_trans llc_reject_state_trans_20b = {
+static const struct llc_conn_state_trans llc_reject_state_trans_20b = {
.ev = llc_conn_ev_busy_tmr_exp,
.next_state = LLC_CONN_STATE_AWAIT_REJ,
.ev_qualifiers = llc_reject_ev_qfyrs_20b,
@@ -2767,7 +2767,7 @@ static struct llc_conn_state_trans llc_reject_state_trans_20b = {
* Array of pointers;
* one to each transition
*/
-static struct llc_conn_state_trans *llc_reject_state_transitions[] = {
+static const struct llc_conn_state_trans *llc_reject_state_transitions[] = {
[0] = &llc_common_state_trans_1, /* Request */
[1] = &llc_common_state_trans_2,
[2] = &llc_common_state_trans_end,
@@ -2834,7 +2834,7 @@ static const llc_conn_ev_qfyr_t llc_await_ev_qfyrs_1_0[] = {
/* just one member, NULL, .bss zeroes it */
static const llc_conn_action_t llc_await_actions_1_0[1];
-static struct llc_conn_state_trans llc_await_state_trans_1_0 = {
+static const struct llc_conn_state_trans llc_await_state_trans_1_0 = {
.ev = llc_conn_ev_data_req,
.next_state = LLC_CONN_STATE_AWAIT,
.ev_qualifiers = llc_await_ev_qfyrs_1_0,
@@ -2848,7 +2848,7 @@ static const llc_conn_action_t llc_await_actions_1[] = {
[2] = NULL,
};
-static struct llc_conn_state_trans llc_await_state_trans_1 = {
+static const struct llc_conn_state_trans llc_await_state_trans_1 = {
.ev = llc_conn_ev_local_busy_detected,
.next_state = LLC_CONN_STATE_AWAIT_BUSY,
.ev_qualifiers = NONE,
@@ -2867,7 +2867,7 @@ static const llc_conn_action_t llc_await_actions_2[] = {
[7] = NULL,
};
-static struct llc_conn_state_trans llc_await_state_trans_2 = {
+static const struct llc_conn_state_trans llc_await_state_trans_2 = {
.ev = llc_conn_ev_rx_i_rsp_fbit_set_1_unexpd_ns,
.next_state = LLC_CONN_STATE_REJ,
.ev_qualifiers = NONE,
@@ -2883,7 +2883,7 @@ static const llc_conn_action_t llc_await_actions_3a[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_await_state_trans_3a = {
+static const struct llc_conn_state_trans llc_await_state_trans_3a = {
.ev = llc_conn_ev_rx_i_cmd_pbit_set_0_unexpd_ns,
.next_state = LLC_CONN_STATE_AWAIT_REJ,
.ev_qualifiers = NONE,
@@ -2899,7 +2899,7 @@ static const llc_conn_action_t llc_await_actions_3b[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_await_state_trans_3b = {
+static const struct llc_conn_state_trans llc_await_state_trans_3b = {
.ev = llc_conn_ev_rx_i_rsp_fbit_set_0_unexpd_ns,
.next_state = LLC_CONN_STATE_AWAIT_REJ,
.ev_qualifiers = NONE,
@@ -2916,7 +2916,7 @@ static const llc_conn_action_t llc_await_actions_4[] = {
[5] = NULL,
};
-static struct llc_conn_state_trans llc_await_state_trans_4 = {
+static const struct llc_conn_state_trans llc_await_state_trans_4 = {
.ev = llc_conn_ev_rx_i_cmd_pbit_set_1_unexpd_ns,
.next_state = LLC_CONN_STATE_AWAIT_REJ,
.ev_qualifiers = NONE,
@@ -2935,7 +2935,7 @@ static const llc_conn_action_t llc_await_actions_5[] = {
[7] = NULL,
};
-static struct llc_conn_state_trans llc_await_state_trans_5 = {
+static const struct llc_conn_state_trans llc_await_state_trans_5 = {
.ev = llc_conn_ev_rx_i_rsp_fbit_set_1,
.next_state = LLC_CONN_STATE_NORMAL,
.ev_qualifiers = NONE,
@@ -2952,7 +2952,7 @@ static const llc_conn_action_t llc_await_actions_6a[] = {
[5] = NULL,
};
-static struct llc_conn_state_trans llc_await_state_trans_6a = {
+static const struct llc_conn_state_trans llc_await_state_trans_6a = {
.ev = llc_conn_ev_rx_i_rsp_fbit_set_0,
.next_state = LLC_CONN_STATE_AWAIT,
.ev_qualifiers = NONE,
@@ -2969,7 +2969,7 @@ static const llc_conn_action_t llc_await_actions_6b[] = {
[5] = NULL,
};
-static struct llc_conn_state_trans llc_await_state_trans_6b = {
+static const struct llc_conn_state_trans llc_await_state_trans_6b = {
.ev = llc_conn_ev_rx_i_cmd_pbit_set_0,
.next_state = LLC_CONN_STATE_AWAIT,
.ev_qualifiers = NONE,
@@ -2986,7 +2986,7 @@ static const llc_conn_action_t llc_await_actions_7[] = {
[5] = NULL,
};
-static struct llc_conn_state_trans llc_await_state_trans_7 = {
+static const struct llc_conn_state_trans llc_await_state_trans_7 = {
.ev = llc_conn_ev_rx_i_cmd_pbit_set_1,
.next_state = LLC_CONN_STATE_AWAIT,
.ev_qualifiers = NONE,
@@ -3003,7 +3003,7 @@ static const llc_conn_action_t llc_await_actions_8a[] = {
[5] = NULL,
};
-static struct llc_conn_state_trans llc_await_state_trans_8a = {
+static const struct llc_conn_state_trans llc_await_state_trans_8a = {
.ev = llc_conn_ev_rx_rr_rsp_fbit_set_1,
.next_state = LLC_CONN_STATE_NORMAL,
.ev_qualifiers = NONE,
@@ -3020,7 +3020,7 @@ static const llc_conn_action_t llc_await_actions_8b[] = {
[5] = NULL,
};
-static struct llc_conn_state_trans llc_await_state_trans_8b = {
+static const struct llc_conn_state_trans llc_await_state_trans_8b = {
.ev = llc_conn_ev_rx_rej_rsp_fbit_set_1,
.next_state = LLC_CONN_STATE_NORMAL,
.ev_qualifiers = NONE,
@@ -3035,7 +3035,7 @@ static const llc_conn_action_t llc_await_actions_9a[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_await_state_trans_9a = {
+static const struct llc_conn_state_trans llc_await_state_trans_9a = {
.ev = llc_conn_ev_rx_rr_cmd_pbit_set_0,
.next_state = LLC_CONN_STATE_AWAIT,
.ev_qualifiers = NONE,
@@ -3050,7 +3050,7 @@ static const llc_conn_action_t llc_await_actions_9b[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_await_state_trans_9b = {
+static const struct llc_conn_state_trans llc_await_state_trans_9b = {
.ev = llc_conn_ev_rx_rr_rsp_fbit_set_0,
.next_state = LLC_CONN_STATE_AWAIT,
.ev_qualifiers = NONE,
@@ -3065,7 +3065,7 @@ static const llc_conn_action_t llc_await_actions_9c[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_await_state_trans_9c = {
+static const struct llc_conn_state_trans llc_await_state_trans_9c = {
.ev = llc_conn_ev_rx_rej_cmd_pbit_set_0,
.next_state = LLC_CONN_STATE_AWAIT,
.ev_qualifiers = NONE,
@@ -3080,7 +3080,7 @@ static const llc_conn_action_t llc_await_actions_9d[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_await_state_trans_9d = {
+static const struct llc_conn_state_trans llc_await_state_trans_9d = {
.ev = llc_conn_ev_rx_rej_rsp_fbit_set_0,
.next_state = LLC_CONN_STATE_AWAIT,
.ev_qualifiers = NONE,
@@ -3096,7 +3096,7 @@ static const llc_conn_action_t llc_await_actions_10a[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_await_state_trans_10a = {
+static const struct llc_conn_state_trans llc_await_state_trans_10a = {
.ev = llc_conn_ev_rx_rr_cmd_pbit_set_1,
.next_state = LLC_CONN_STATE_AWAIT,
.ev_qualifiers = NONE,
@@ -3112,7 +3112,7 @@ static const llc_conn_action_t llc_await_actions_10b[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_await_state_trans_10b = {
+static const struct llc_conn_state_trans llc_await_state_trans_10b = {
.ev = llc_conn_ev_rx_rej_cmd_pbit_set_1,
.next_state = LLC_CONN_STATE_AWAIT,
.ev_qualifiers = NONE,
@@ -3128,7 +3128,7 @@ static const llc_conn_action_t llc_await_actions_11[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_await_state_trans_11 = {
+static const struct llc_conn_state_trans llc_await_state_trans_11 = {
.ev = llc_conn_ev_rx_rnr_rsp_fbit_set_1,
.next_state = LLC_CONN_STATE_NORMAL,
.ev_qualifiers = NONE,
@@ -3143,7 +3143,7 @@ static const llc_conn_action_t llc_await_actions_12a[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_await_state_trans_12a = {
+static const struct llc_conn_state_trans llc_await_state_trans_12a = {
.ev = llc_conn_ev_rx_rnr_cmd_pbit_set_0,
.next_state = LLC_CONN_STATE_AWAIT,
.ev_qualifiers = NONE,
@@ -3158,7 +3158,7 @@ static const llc_conn_action_t llc_await_actions_12b[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_await_state_trans_12b = {
+static const struct llc_conn_state_trans llc_await_state_trans_12b = {
.ev = llc_conn_ev_rx_rnr_rsp_fbit_set_0,
.next_state = LLC_CONN_STATE_AWAIT,
.ev_qualifiers = NONE,
@@ -3174,7 +3174,7 @@ static const llc_conn_action_t llc_await_actions_13[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_await_state_trans_13 = {
+static const struct llc_conn_state_trans llc_await_state_trans_13 = {
.ev = llc_conn_ev_rx_rnr_cmd_pbit_set_1,
.next_state = LLC_CONN_STATE_AWAIT,
.ev_qualifiers = NONE,
@@ -3194,7 +3194,7 @@ static const llc_conn_action_t llc_await_actions_14[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_await_state_trans_14 = {
+static const struct llc_conn_state_trans llc_await_state_trans_14 = {
.ev = llc_conn_ev_p_tmr_exp,
.next_state = LLC_CONN_STATE_AWAIT,
.ev_qualifiers = llc_await_ev_qfyrs_14,
@@ -3205,7 +3205,7 @@ static struct llc_conn_state_trans llc_await_state_trans_14 = {
* Array of pointers;
* one to each transition
*/
-static struct llc_conn_state_trans *llc_await_state_transitions[] = {
+static const struct llc_conn_state_trans *llc_await_state_transitions[] = {
[0] = &llc_common_state_trans_1, /* Request */
[1] = &llc_common_state_trans_2,
[2] = &llc_await_state_trans_1_0,
@@ -3263,7 +3263,7 @@ static const llc_conn_ev_qfyr_t llc_await_busy_ev_qfyrs_1_0[] = {
/* just one member, NULL, .bss zeroes it */
static const llc_conn_action_t llc_await_busy_actions_1_0[1];
-static struct llc_conn_state_trans llc_await_busy_state_trans_1_0 = {
+static const struct llc_conn_state_trans llc_await_busy_state_trans_1_0 = {
.ev = llc_conn_ev_data_req,
.next_state = LLC_CONN_STATE_AWAIT_BUSY,
.ev_qualifiers = llc_await_busy_ev_qfyrs_1_0,
@@ -3282,7 +3282,7 @@ static const llc_conn_action_t llc_await_busy_actions_1[] = {
[2] = NULL,
};
-static struct llc_conn_state_trans llc_await_busy_state_trans_1 = {
+static const struct llc_conn_state_trans llc_await_busy_state_trans_1 = {
.ev = llc_conn_ev_local_busy_cleared,
.next_state = LLC_CONN_STATE_AWAIT_REJ,
.ev_qualifiers = llc_await_busy_ev_qfyrs_1,
@@ -3300,7 +3300,7 @@ static const llc_conn_action_t llc_await_busy_actions_2[] = {
[1] = NULL,
};
-static struct llc_conn_state_trans llc_await_busy_state_trans_2 = {
+static const struct llc_conn_state_trans llc_await_busy_state_trans_2 = {
.ev = llc_conn_ev_local_busy_cleared,
.next_state = LLC_CONN_STATE_AWAIT,
.ev_qualifiers = llc_await_busy_ev_qfyrs_2,
@@ -3318,7 +3318,7 @@ static const llc_conn_action_t llc_await_busy_actions_3[] = {
[1] = NULL,
};
-static struct llc_conn_state_trans llc_await_busy_state_trans_3 = {
+static const struct llc_conn_state_trans llc_await_busy_state_trans_3 = {
.ev = llc_conn_ev_local_busy_cleared,
.next_state = LLC_CONN_STATE_AWAIT_REJ,
.ev_qualifiers = llc_await_busy_ev_qfyrs_3,
@@ -3337,7 +3337,7 @@ static const llc_conn_action_t llc_await_busy_actions_4[] = {
[7] = NULL,
};
-static struct llc_conn_state_trans llc_await_busy_state_trans_4 = {
+static const struct llc_conn_state_trans llc_await_busy_state_trans_4 = {
.ev = llc_conn_ev_rx_i_rsp_fbit_set_1_unexpd_ns,
.next_state = LLC_CONN_STATE_BUSY,
.ev_qualifiers = NONE,
@@ -3353,7 +3353,7 @@ static const llc_conn_action_t llc_await_busy_actions_5a[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_await_busy_state_trans_5a = {
+static const struct llc_conn_state_trans llc_await_busy_state_trans_5a = {
.ev = llc_conn_ev_rx_i_cmd_pbit_set_0_unexpd_ns,
.next_state = LLC_CONN_STATE_AWAIT_BUSY,
.ev_qualifiers = NONE,
@@ -3369,7 +3369,7 @@ static const llc_conn_action_t llc_await_busy_actions_5b[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_await_busy_state_trans_5b = {
+static const struct llc_conn_state_trans llc_await_busy_state_trans_5b = {
.ev = llc_conn_ev_rx_i_rsp_fbit_set_0_unexpd_ns,
.next_state = LLC_CONN_STATE_AWAIT_BUSY,
.ev_qualifiers = NONE,
@@ -3385,7 +3385,7 @@ static const llc_conn_action_t llc_await_busy_actions_6[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_await_busy_state_trans_6 = {
+static const struct llc_conn_state_trans llc_await_busy_state_trans_6 = {
.ev = llc_conn_ev_rx_i_cmd_pbit_set_1_unexpd_ns,
.next_state = LLC_CONN_STATE_AWAIT_BUSY,
.ev_qualifiers = NONE,
@@ -3406,7 +3406,7 @@ static const llc_conn_action_t llc_await_busy_actions_7[] = {
[9] = NULL,
};
-static struct llc_conn_state_trans llc_await_busy_state_trans_7 = {
+static const struct llc_conn_state_trans llc_await_busy_state_trans_7 = {
.ev = llc_conn_ev_rx_i_rsp_fbit_set_1,
.next_state = LLC_CONN_STATE_BUSY,
.ev_qualifiers = NONE,
@@ -3424,7 +3424,7 @@ static const llc_conn_action_t llc_await_busy_actions_8a[] = {
[6] = NULL,
};
-static struct llc_conn_state_trans llc_await_busy_state_trans_8a = {
+static const struct llc_conn_state_trans llc_await_busy_state_trans_8a = {
.ev = llc_conn_ev_rx_i_rsp_fbit_set_0,
.next_state = LLC_CONN_STATE_AWAIT_BUSY,
.ev_qualifiers = NONE,
@@ -3442,7 +3442,7 @@ static const llc_conn_action_t llc_await_busy_actions_8b[] = {
[6] = NULL,
};
-static struct llc_conn_state_trans llc_await_busy_state_trans_8b = {
+static const struct llc_conn_state_trans llc_await_busy_state_trans_8b = {
.ev = llc_conn_ev_rx_i_cmd_pbit_set_0,
.next_state = LLC_CONN_STATE_AWAIT_BUSY,
.ev_qualifiers = NONE,
@@ -3460,7 +3460,7 @@ static const llc_conn_action_t llc_await_busy_actions_9[] = {
[6] = NULL,
};
-static struct llc_conn_state_trans llc_await_busy_state_trans_9 = {
+static const struct llc_conn_state_trans llc_await_busy_state_trans_9 = {
.ev = llc_conn_ev_rx_i_cmd_pbit_set_1,
.next_state = LLC_CONN_STATE_AWAIT_BUSY,
.ev_qualifiers = NONE,
@@ -3477,7 +3477,7 @@ static const llc_conn_action_t llc_await_busy_actions_10a[] = {
[5] = NULL,
};
-static struct llc_conn_state_trans llc_await_busy_state_trans_10a = {
+static const struct llc_conn_state_trans llc_await_busy_state_trans_10a = {
.ev = llc_conn_ev_rx_rr_rsp_fbit_set_1,
.next_state = LLC_CONN_STATE_BUSY,
.ev_qualifiers = NONE,
@@ -3494,7 +3494,7 @@ static const llc_conn_action_t llc_await_busy_actions_10b[] = {
[5] = NULL,
};
-static struct llc_conn_state_trans llc_await_busy_state_trans_10b = {
+static const struct llc_conn_state_trans llc_await_busy_state_trans_10b = {
.ev = llc_conn_ev_rx_rej_rsp_fbit_set_1,
.next_state = LLC_CONN_STATE_BUSY,
.ev_qualifiers = NONE,
@@ -3509,7 +3509,7 @@ static const llc_conn_action_t llc_await_busy_actions_11a[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_await_busy_state_trans_11a = {
+static const struct llc_conn_state_trans llc_await_busy_state_trans_11a = {
.ev = llc_conn_ev_rx_rr_cmd_pbit_set_0,
.next_state = LLC_CONN_STATE_AWAIT_BUSY,
.ev_qualifiers = NONE,
@@ -3524,7 +3524,7 @@ static const llc_conn_action_t llc_await_busy_actions_11b[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_await_busy_state_trans_11b = {
+static const struct llc_conn_state_trans llc_await_busy_state_trans_11b = {
.ev = llc_conn_ev_rx_rr_rsp_fbit_set_0,
.next_state = LLC_CONN_STATE_AWAIT_BUSY,
.ev_qualifiers = NONE,
@@ -3539,7 +3539,7 @@ static const llc_conn_action_t llc_await_busy_actions_11c[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_await_busy_state_trans_11c = {
+static const struct llc_conn_state_trans llc_await_busy_state_trans_11c = {
.ev = llc_conn_ev_rx_rej_cmd_pbit_set_0,
.next_state = LLC_CONN_STATE_AWAIT_BUSY,
.ev_qualifiers = NONE,
@@ -3554,7 +3554,7 @@ static const llc_conn_action_t llc_await_busy_actions_11d[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_await_busy_state_trans_11d = {
+static const struct llc_conn_state_trans llc_await_busy_state_trans_11d = {
.ev = llc_conn_ev_rx_rej_rsp_fbit_set_0,
.next_state = LLC_CONN_STATE_AWAIT_BUSY,
.ev_qualifiers = NONE,
@@ -3570,7 +3570,7 @@ static const llc_conn_action_t llc_await_busy_actions_12a[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_await_busy_state_trans_12a = {
+static const struct llc_conn_state_trans llc_await_busy_state_trans_12a = {
.ev = llc_conn_ev_rx_rr_cmd_pbit_set_1,
.next_state = LLC_CONN_STATE_AWAIT_BUSY,
.ev_qualifiers = NONE,
@@ -3586,7 +3586,7 @@ static const llc_conn_action_t llc_await_busy_actions_12b[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_await_busy_state_trans_12b = {
+static const struct llc_conn_state_trans llc_await_busy_state_trans_12b = {
.ev = llc_conn_ev_rx_rej_cmd_pbit_set_1,
.next_state = LLC_CONN_STATE_AWAIT_BUSY,
.ev_qualifiers = NONE,
@@ -3602,7 +3602,7 @@ static const llc_conn_action_t llc_await_busy_actions_13[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_await_busy_state_trans_13 = {
+static const struct llc_conn_state_trans llc_await_busy_state_trans_13 = {
.ev = llc_conn_ev_rx_rnr_rsp_fbit_set_1,
.next_state = LLC_CONN_STATE_BUSY,
.ev_qualifiers = NONE,
@@ -3617,7 +3617,7 @@ static const llc_conn_action_t llc_await_busy_actions_14a[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_await_busy_state_trans_14a = {
+static const struct llc_conn_state_trans llc_await_busy_state_trans_14a = {
.ev = llc_conn_ev_rx_rnr_cmd_pbit_set_0,
.next_state = LLC_CONN_STATE_AWAIT_BUSY,
.ev_qualifiers = NONE,
@@ -3632,7 +3632,7 @@ static const llc_conn_action_t llc_await_busy_actions_14b[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_await_busy_state_trans_14b = {
+static const struct llc_conn_state_trans llc_await_busy_state_trans_14b = {
.ev = llc_conn_ev_rx_rnr_rsp_fbit_set_0,
.next_state = LLC_CONN_STATE_AWAIT_BUSY,
.ev_qualifiers = NONE,
@@ -3648,7 +3648,7 @@ static const llc_conn_action_t llc_await_busy_actions_15[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_await_busy_state_trans_15 = {
+static const struct llc_conn_state_trans llc_await_busy_state_trans_15 = {
.ev = llc_conn_ev_rx_rnr_cmd_pbit_set_1,
.next_state = LLC_CONN_STATE_AWAIT_BUSY,
.ev_qualifiers = NONE,
@@ -3668,7 +3668,7 @@ static const llc_conn_action_t llc_await_busy_actions_16[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_await_busy_state_trans_16 = {
+static const struct llc_conn_state_trans llc_await_busy_state_trans_16 = {
.ev = llc_conn_ev_p_tmr_exp,
.next_state = LLC_CONN_STATE_AWAIT_BUSY,
.ev_qualifiers = llc_await_busy_ev_qfyrs_16,
@@ -3679,7 +3679,7 @@ static struct llc_conn_state_trans llc_await_busy_state_trans_16 = {
* Array of pointers;
* one to each transition
*/
-static struct llc_conn_state_trans *llc_await_busy_state_transitions[] = {
+static const struct llc_conn_state_trans *llc_await_busy_state_transitions[] = {
[0] = &llc_common_state_trans_1, /* Request */
[1] = &llc_common_state_trans_2,
[2] = &llc_await_busy_state_trans_1_0,
@@ -3739,7 +3739,7 @@ static const llc_conn_ev_qfyr_t llc_await_reject_ev_qfyrs_1_0[] = {
/* just one member, NULL, .bss zeroes it */
static const llc_conn_action_t llc_await_reject_actions_1_0[1];
-static struct llc_conn_state_trans llc_await_reject_state_trans_1_0 = {
+static const struct llc_conn_state_trans llc_await_reject_state_trans_1_0 = {
.ev = llc_conn_ev_data_req,
.next_state = LLC_CONN_STATE_AWAIT_REJ,
.ev_qualifiers = llc_await_reject_ev_qfyrs_1_0,
@@ -3753,7 +3753,7 @@ static const llc_conn_action_t llc_await_rejct_actions_1[] = {
[2] = NULL
};
-static struct llc_conn_state_trans llc_await_rejct_state_trans_1 = {
+static const struct llc_conn_state_trans llc_await_rejct_state_trans_1 = {
.ev = llc_conn_ev_local_busy_detected,
.next_state = LLC_CONN_STATE_AWAIT_BUSY,
.ev_qualifiers = NONE,
@@ -3767,7 +3767,7 @@ static const llc_conn_action_t llc_await_rejct_actions_2a[] = {
[2] = NULL
};
-static struct llc_conn_state_trans llc_await_rejct_state_trans_2a = {
+static const struct llc_conn_state_trans llc_await_rejct_state_trans_2a = {
.ev = llc_conn_ev_rx_i_cmd_pbit_set_0_unexpd_ns,
.next_state = LLC_CONN_STATE_AWAIT_REJ,
.ev_qualifiers = NONE,
@@ -3781,7 +3781,7 @@ static const llc_conn_action_t llc_await_rejct_actions_2b[] = {
[2] = NULL
};
-static struct llc_conn_state_trans llc_await_rejct_state_trans_2b = {
+static const struct llc_conn_state_trans llc_await_rejct_state_trans_2b = {
.ev = llc_conn_ev_rx_i_rsp_fbit_set_0_unexpd_ns,
.next_state = LLC_CONN_STATE_AWAIT_REJ,
.ev_qualifiers = NONE,
@@ -3796,7 +3796,7 @@ static const llc_conn_action_t llc_await_rejct_actions_3[] = {
[3] = NULL
};
-static struct llc_conn_state_trans llc_await_rejct_state_trans_3 = {
+static const struct llc_conn_state_trans llc_await_rejct_state_trans_3 = {
.ev = llc_conn_ev_rx_i_cmd_pbit_set_1_unexpd_ns,
.next_state = LLC_CONN_STATE_AWAIT_REJ,
.ev_qualifiers = NONE,
@@ -3816,7 +3816,7 @@ static const llc_conn_action_t llc_await_rejct_actions_4[] = {
[8] = NULL,
};
-static struct llc_conn_state_trans llc_await_rejct_state_trans_4 = {
+static const struct llc_conn_state_trans llc_await_rejct_state_trans_4 = {
.ev = llc_conn_ev_rx_i_rsp_fbit_set_1,
.next_state = LLC_CONN_STATE_NORMAL,
.ev_qualifiers = NONE,
@@ -3834,7 +3834,7 @@ static const llc_conn_action_t llc_await_rejct_actions_5a[] = {
[6] = NULL,
};
-static struct llc_conn_state_trans llc_await_rejct_state_trans_5a = {
+static const struct llc_conn_state_trans llc_await_rejct_state_trans_5a = {
.ev = llc_conn_ev_rx_i_rsp_fbit_set_0,
.next_state = LLC_CONN_STATE_AWAIT,
.ev_qualifiers = NONE,
@@ -3852,7 +3852,7 @@ static const llc_conn_action_t llc_await_rejct_actions_5b[] = {
[6] = NULL,
};
-static struct llc_conn_state_trans llc_await_rejct_state_trans_5b = {
+static const struct llc_conn_state_trans llc_await_rejct_state_trans_5b = {
.ev = llc_conn_ev_rx_i_cmd_pbit_set_0,
.next_state = LLC_CONN_STATE_AWAIT,
.ev_qualifiers = NONE,
@@ -3870,7 +3870,7 @@ static const llc_conn_action_t llc_await_rejct_actions_6[] = {
[6] = NULL,
};
-static struct llc_conn_state_trans llc_await_rejct_state_trans_6 = {
+static const struct llc_conn_state_trans llc_await_rejct_state_trans_6 = {
.ev = llc_conn_ev_rx_i_cmd_pbit_set_1,
.next_state = LLC_CONN_STATE_AWAIT,
.ev_qualifiers = NONE,
@@ -3887,7 +3887,7 @@ static const llc_conn_action_t llc_await_rejct_actions_7a[] = {
[5] = NULL,
};
-static struct llc_conn_state_trans llc_await_rejct_state_trans_7a = {
+static const struct llc_conn_state_trans llc_await_rejct_state_trans_7a = {
.ev = llc_conn_ev_rx_rr_rsp_fbit_set_1,
.next_state = LLC_CONN_STATE_REJ,
.ev_qualifiers = NONE,
@@ -3904,7 +3904,7 @@ static const llc_conn_action_t llc_await_rejct_actions_7b[] = {
[5] = NULL,
};
-static struct llc_conn_state_trans llc_await_rejct_state_trans_7b = {
+static const struct llc_conn_state_trans llc_await_rejct_state_trans_7b = {
.ev = llc_conn_ev_rx_rej_rsp_fbit_set_1,
.next_state = LLC_CONN_STATE_REJ,
.ev_qualifiers = NONE,
@@ -3921,7 +3921,7 @@ static const llc_conn_action_t llc_await_rejct_actions_7c[] = {
[5] = NULL,
};
-static struct llc_conn_state_trans llc_await_rejct_state_trans_7c = {
+static const struct llc_conn_state_trans llc_await_rejct_state_trans_7c = {
.ev = llc_conn_ev_rx_i_rsp_fbit_set_1_unexpd_ns,
.next_state = LLC_CONN_STATE_REJ,
.ev_qualifiers = NONE,
@@ -3936,7 +3936,7 @@ static const llc_conn_action_t llc_await_rejct_actions_8a[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_await_rejct_state_trans_8a = {
+static const struct llc_conn_state_trans llc_await_rejct_state_trans_8a = {
.ev = llc_conn_ev_rx_rr_cmd_pbit_set_0,
.next_state = LLC_CONN_STATE_AWAIT_REJ,
.ev_qualifiers = NONE,
@@ -3951,7 +3951,7 @@ static const llc_conn_action_t llc_await_rejct_actions_8b[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_await_rejct_state_trans_8b = {
+static const struct llc_conn_state_trans llc_await_rejct_state_trans_8b = {
.ev = llc_conn_ev_rx_rr_rsp_fbit_set_0,
.next_state = LLC_CONN_STATE_AWAIT_REJ,
.ev_qualifiers = NONE,
@@ -3966,7 +3966,7 @@ static const llc_conn_action_t llc_await_rejct_actions_8c[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_await_rejct_state_trans_8c = {
+static const struct llc_conn_state_trans llc_await_rejct_state_trans_8c = {
.ev = llc_conn_ev_rx_rej_cmd_pbit_set_0,
.next_state = LLC_CONN_STATE_AWAIT_REJ,
.ev_qualifiers = NONE,
@@ -3981,7 +3981,7 @@ static const llc_conn_action_t llc_await_rejct_actions_8d[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_await_rejct_state_trans_8d = {
+static const struct llc_conn_state_trans llc_await_rejct_state_trans_8d = {
.ev = llc_conn_ev_rx_rej_rsp_fbit_set_0,
.next_state = LLC_CONN_STATE_AWAIT_REJ,
.ev_qualifiers = NONE,
@@ -3997,7 +3997,7 @@ static const llc_conn_action_t llc_await_rejct_actions_9a[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_await_rejct_state_trans_9a = {
+static const struct llc_conn_state_trans llc_await_rejct_state_trans_9a = {
.ev = llc_conn_ev_rx_rr_cmd_pbit_set_1,
.next_state = LLC_CONN_STATE_AWAIT_REJ,
.ev_qualifiers = NONE,
@@ -4013,7 +4013,7 @@ static const llc_conn_action_t llc_await_rejct_actions_9b[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_await_rejct_state_trans_9b = {
+static const struct llc_conn_state_trans llc_await_rejct_state_trans_9b = {
.ev = llc_conn_ev_rx_rej_cmd_pbit_set_1,
.next_state = LLC_CONN_STATE_AWAIT_REJ,
.ev_qualifiers = NONE,
@@ -4029,7 +4029,7 @@ static const llc_conn_action_t llc_await_rejct_actions_10[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_await_rejct_state_trans_10 = {
+static const struct llc_conn_state_trans llc_await_rejct_state_trans_10 = {
.ev = llc_conn_ev_rx_rnr_rsp_fbit_set_1,
.next_state = LLC_CONN_STATE_REJ,
.ev_qualifiers = NONE,
@@ -4044,7 +4044,7 @@ static const llc_conn_action_t llc_await_rejct_actions_11a[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_await_rejct_state_trans_11a = {
+static const struct llc_conn_state_trans llc_await_rejct_state_trans_11a = {
.ev = llc_conn_ev_rx_rnr_cmd_pbit_set_0,
.next_state = LLC_CONN_STATE_AWAIT_REJ,
.ev_qualifiers = NONE,
@@ -4059,7 +4059,7 @@ static const llc_conn_action_t llc_await_rejct_actions_11b[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_await_rejct_state_trans_11b = {
+static const struct llc_conn_state_trans llc_await_rejct_state_trans_11b = {
.ev = llc_conn_ev_rx_rnr_rsp_fbit_set_0,
.next_state = LLC_CONN_STATE_AWAIT_REJ,
.ev_qualifiers = NONE,
@@ -4075,7 +4075,7 @@ static const llc_conn_action_t llc_await_rejct_actions_12[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_await_rejct_state_trans_12 = {
+static const struct llc_conn_state_trans llc_await_rejct_state_trans_12 = {
.ev = llc_conn_ev_rx_rnr_cmd_pbit_set_1,
.next_state = LLC_CONN_STATE_AWAIT_REJ,
.ev_qualifiers = NONE,
@@ -4095,7 +4095,7 @@ static const llc_conn_action_t llc_await_rejct_actions_13[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_await_rejct_state_trans_13 = {
+static const struct llc_conn_state_trans llc_await_rejct_state_trans_13 = {
.ev = llc_conn_ev_p_tmr_exp,
.next_state = LLC_CONN_STATE_AWAIT_REJ,
.ev_qualifiers = llc_await_rejct_ev_qfyrs_13,
@@ -4106,7 +4106,7 @@ static struct llc_conn_state_trans llc_await_rejct_state_trans_13 = {
* Array of pointers;
* one to each transition
*/
-static struct llc_conn_state_trans *llc_await_rejct_state_transitions[] = {
+static const struct llc_conn_state_trans *llc_await_rejct_state_transitions[] = {
[0] = &llc_await_reject_state_trans_1_0,
[1] = &llc_common_state_trans_1, /* requests */
[2] = &llc_common_state_trans_2,
@@ -4171,7 +4171,7 @@ static const llc_conn_action_t llc_d_conn_actions_1[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_d_conn_state_trans_1 = {
+static const struct llc_conn_state_trans llc_d_conn_state_trans_1 = {
.ev = llc_conn_ev_rx_sabme_cmd_pbit_set_x,
.next_state = LLC_CONN_STATE_ADM,
.ev_qualifiers = llc_d_conn_ev_qfyrs_1,
@@ -4194,7 +4194,7 @@ static const llc_conn_action_t llc_d_conn_actions_1_1[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_d_conn_state_trans_1_1 = {
+static const struct llc_conn_state_trans llc_d_conn_state_trans_1_1 = {
.ev = llc_conn_ev_rx_sabme_cmd_pbit_set_x,
.next_state = LLC_CONN_STATE_ADM,
.ev_qualifiers = llc_d_conn_ev_qfyrs_1_1,
@@ -4218,7 +4218,7 @@ static const llc_conn_action_t llc_d_conn_actions_2[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_d_conn_state_trans_2 = {
+static const struct llc_conn_state_trans llc_d_conn_state_trans_2 = {
.ev = llc_conn_ev_rx_ua_rsp_fbit_set_x,
.next_state = LLC_CONN_STATE_ADM,
.ev_qualifiers = llc_d_conn_ev_qfyrs_2,
@@ -4241,7 +4241,7 @@ static const llc_conn_action_t llc_d_conn_actions_2_1[] = {
[2] = NULL,
};
-static struct llc_conn_state_trans llc_d_conn_state_trans_2_1 = {
+static const struct llc_conn_state_trans llc_d_conn_state_trans_2_1 = {
.ev = llc_conn_ev_rx_ua_rsp_fbit_set_x,
.next_state = LLC_CONN_STATE_ADM,
.ev_qualifiers = llc_d_conn_ev_qfyrs_2_1,
@@ -4254,7 +4254,7 @@ static const llc_conn_action_t llc_d_conn_actions_3[] = {
[1] = NULL,
};
-static struct llc_conn_state_trans llc_d_conn_state_trans_3 = {
+static const struct llc_conn_state_trans llc_d_conn_state_trans_3 = {
.ev = llc_conn_ev_rx_disc_cmd_pbit_set_x,
.next_state = LLC_CONN_STATE_D_CONN,
.ev_qualifiers = NONE,
@@ -4277,7 +4277,7 @@ static const llc_conn_action_t llc_d_conn_actions_4[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_d_conn_state_trans_4 = {
+static const struct llc_conn_state_trans llc_d_conn_state_trans_4 = {
.ev = llc_conn_ev_rx_dm_rsp_fbit_set_x,
.next_state = LLC_CONN_STATE_ADM,
.ev_qualifiers = llc_d_conn_ev_qfyrs_4,
@@ -4299,7 +4299,7 @@ static const llc_conn_action_t llc_d_conn_actions_4_1[] = {
[2] = NULL,
};
-static struct llc_conn_state_trans llc_d_conn_state_trans_4_1 = {
+static const struct llc_conn_state_trans llc_d_conn_state_trans_4_1 = {
.ev = llc_conn_ev_rx_dm_rsp_fbit_set_x,
.next_state = LLC_CONN_STATE_ADM,
.ev_qualifiers = llc_d_conn_ev_qfyrs_4_1,
@@ -4318,7 +4318,7 @@ static const llc_conn_ev_qfyr_t llc_d_conn_ev_qfyrs_5[] = {
/* just one member, NULL, .bss zeroes it */
static const llc_conn_action_t llc_d_conn_actions_5[1];
-static struct llc_conn_state_trans llc_d_conn_state_trans_5 = {
+static const struct llc_conn_state_trans llc_d_conn_state_trans_5 = {
.ev = llc_conn_ev_data_req,
.next_state = LLC_CONN_STATE_D_CONN,
.ev_qualifiers = llc_d_conn_ev_qfyrs_5,
@@ -4338,7 +4338,7 @@ static const llc_conn_action_t llc_d_conn_actions_6[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_d_conn_state_trans_6 = {
+static const struct llc_conn_state_trans llc_d_conn_state_trans_6 = {
.ev = llc_conn_ev_ack_tmr_exp,
.next_state = LLC_CONN_STATE_D_CONN,
.ev_qualifiers = llc_d_conn_ev_qfyrs_6,
@@ -4359,7 +4359,7 @@ static const llc_conn_action_t llc_d_conn_actions_7[] = {
[2] = NULL,
};
-static struct llc_conn_state_trans llc_d_conn_state_trans_7 = {
+static const struct llc_conn_state_trans llc_d_conn_state_trans_7 = {
.ev = llc_conn_ev_ack_tmr_exp,
.next_state = LLC_CONN_STATE_ADM,
.ev_qualifiers = llc_d_conn_ev_qfyrs_7,
@@ -4379,7 +4379,7 @@ static const llc_conn_action_t llc_d_conn_actions_8[] = {
[1] = NULL,
};
-static struct llc_conn_state_trans llc_d_conn_state_trans_8 = {
+static const struct llc_conn_state_trans llc_d_conn_state_trans_8 = {
.ev = llc_conn_ev_ack_tmr_exp,
.next_state = LLC_CONN_STATE_ADM,
.ev_qualifiers = llc_d_conn_ev_qfyrs_8,
@@ -4390,7 +4390,7 @@ static struct llc_conn_state_trans llc_d_conn_state_trans_8 = {
* Array of pointers;
* one to each transition
*/
-static struct llc_conn_state_trans *llc_d_conn_state_transitions[] = {
+static const struct llc_conn_state_trans *llc_d_conn_state_transitions[] = {
[0] = &llc_d_conn_state_trans_5, /* Request */
[1] = &llc_common_state_trans_end,
[2] = &llc_common_state_trans_end, /* Local busy */
@@ -4419,7 +4419,7 @@ static const llc_conn_action_t llc_rst_actions_1[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_rst_state_trans_1 = {
+static const struct llc_conn_state_trans llc_rst_state_trans_1 = {
.ev = llc_conn_ev_rx_sabme_cmd_pbit_set_x,
.next_state = LLC_CONN_STATE_RESET,
.ev_qualifiers = NONE,
@@ -4447,7 +4447,7 @@ static const llc_conn_action_t llc_rst_actions_2[] = {
[7] = NULL,
};
-static struct llc_conn_state_trans llc_rst_state_trans_2 = {
+static const struct llc_conn_state_trans llc_rst_state_trans_2 = {
.ev = llc_conn_ev_rx_ua_rsp_fbit_set_x,
.next_state = LLC_CONN_STATE_NORMAL,
.ev_qualifiers = llc_rst_ev_qfyrs_2,
@@ -4475,7 +4475,7 @@ static const llc_conn_action_t llc_rst_actions_2_1[] = {
[7] = NULL,
};
-static struct llc_conn_state_trans llc_rst_state_trans_2_1 = {
+static const struct llc_conn_state_trans llc_rst_state_trans_2_1 = {
.ev = llc_conn_ev_rx_ua_rsp_fbit_set_x,
.next_state = LLC_CONN_STATE_NORMAL,
.ev_qualifiers = llc_rst_ev_qfyrs_2_1,
@@ -4495,7 +4495,7 @@ static const llc_conn_action_t llc_rst_actions_3[] = {
[2] = NULL,
};
-static struct llc_conn_state_trans llc_rst_state_trans_3 = {
+static const struct llc_conn_state_trans llc_rst_state_trans_3 = {
.ev = llc_conn_ev_ack_tmr_exp,
.next_state = LLC_CONN_STATE_NORMAL,
.ev_qualifiers = llc_rst_ev_qfyrs_3,
@@ -4518,7 +4518,7 @@ static const llc_conn_action_t llc_rst_actions_4[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_rst_state_trans_4 = {
+static const struct llc_conn_state_trans llc_rst_state_trans_4 = {
.ev = llc_conn_ev_rx_disc_cmd_pbit_set_x,
.next_state = LLC_CONN_STATE_ADM,
.ev_qualifiers = llc_rst_ev_qfyrs_4,
@@ -4541,7 +4541,7 @@ static const llc_conn_action_t llc_rst_actions_4_1[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_rst_state_trans_4_1 = {
+static const struct llc_conn_state_trans llc_rst_state_trans_4_1 = {
.ev = llc_conn_ev_rx_disc_cmd_pbit_set_x,
.next_state = LLC_CONN_STATE_ADM,
.ev_qualifiers = llc_rst_ev_qfyrs_4_1,
@@ -4564,7 +4564,7 @@ static const llc_conn_action_t llc_rst_actions_5[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_rst_state_trans_5 = {
+static const struct llc_conn_state_trans llc_rst_state_trans_5 = {
.ev = llc_conn_ev_rx_dm_rsp_fbit_set_x,
.next_state = LLC_CONN_STATE_ADM,
.ev_qualifiers = llc_rst_ev_qfyrs_5,
@@ -4586,7 +4586,7 @@ static const llc_conn_action_t llc_rst_actions_5_1[] = {
[2] = NULL,
};
-static struct llc_conn_state_trans llc_rst_state_trans_5_1 = {
+static const struct llc_conn_state_trans llc_rst_state_trans_5_1 = {
.ev = llc_conn_ev_rx_dm_rsp_fbit_set_x,
.next_state = LLC_CONN_STATE_ADM,
.ev_qualifiers = llc_rst_ev_qfyrs_5_1,
@@ -4602,7 +4602,7 @@ static const llc_conn_ev_qfyr_t llc_rst_ev_qfyrs_6[] = {
/* just one member, NULL, .bss zeroes it */
static const llc_conn_action_t llc_rst_actions_6[1];
-static struct llc_conn_state_trans llc_rst_state_trans_6 = {
+static const struct llc_conn_state_trans llc_rst_state_trans_6 = {
.ev = llc_conn_ev_data_req,
.next_state = LLC_CONN_STATE_RESET,
.ev_qualifiers = llc_rst_ev_qfyrs_6,
@@ -4623,7 +4623,7 @@ static const llc_conn_action_t llc_rst_actions_7[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_rst_state_trans_7 = {
+static const struct llc_conn_state_trans llc_rst_state_trans_7 = {
.ev = llc_conn_ev_ack_tmr_exp,
.next_state = LLC_CONN_STATE_RESET,
.ev_qualifiers = llc_rst_ev_qfyrs_7,
@@ -4644,7 +4644,7 @@ static const llc_conn_action_t llc_rst_actions_8[] = {
[2] = NULL,
};
-static struct llc_conn_state_trans llc_rst_state_trans_8 = {
+static const struct llc_conn_state_trans llc_rst_state_trans_8 = {
.ev = llc_conn_ev_ack_tmr_exp,
.next_state = LLC_CONN_STATE_ADM,
.ev_qualifiers = llc_rst_ev_qfyrs_8,
@@ -4665,7 +4665,7 @@ static const llc_conn_action_t llc_rst_actions_8_1[] = {
[2] = NULL,
};
-static struct llc_conn_state_trans llc_rst_state_trans_8_1 = {
+static const struct llc_conn_state_trans llc_rst_state_trans_8_1 = {
.ev = llc_conn_ev_ack_tmr_exp,
.next_state = LLC_CONN_STATE_ADM,
.ev_qualifiers = llc_rst_ev_qfyrs_8_1,
@@ -4676,7 +4676,7 @@ static struct llc_conn_state_trans llc_rst_state_trans_8_1 = {
* Array of pointers;
* one to each transition
*/
-static struct llc_conn_state_trans *llc_rst_state_transitions[] = {
+static const struct llc_conn_state_trans *llc_rst_state_transitions[] = {
[0] = &llc_rst_state_trans_6, /* Request */
[1] = &llc_common_state_trans_end,
[2] = &llc_common_state_trans_end, /* Local busy */
@@ -4710,7 +4710,7 @@ static const llc_conn_action_t llc_error_actions_1[] = {
[8] = NULL,
};
-static struct llc_conn_state_trans llc_error_state_trans_1 = {
+static const struct llc_conn_state_trans llc_error_state_trans_1 = {
.ev = llc_conn_ev_rx_sabme_cmd_pbit_set_x,
.next_state = LLC_CONN_STATE_NORMAL,
.ev_qualifiers = NONE,
@@ -4726,7 +4726,7 @@ static const llc_conn_action_t llc_error_actions_2[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_error_state_trans_2 = {
+static const struct llc_conn_state_trans llc_error_state_trans_2 = {
.ev = llc_conn_ev_rx_disc_cmd_pbit_set_x,
.next_state = LLC_CONN_STATE_ADM,
.ev_qualifiers = NONE,
@@ -4741,7 +4741,7 @@ static const llc_conn_action_t llc_error_actions_3[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_error_state_trans_3 = {
+static const struct llc_conn_state_trans llc_error_state_trans_3 = {
.ev = llc_conn_ev_rx_dm_rsp_fbit_set_x,
.next_state = LLC_CONN_STATE_ADM,
.ev_qualifiers = NONE,
@@ -4757,7 +4757,7 @@ static const llc_conn_action_t llc_error_actions_4[] = {
[4] = NULL,
};
-static struct llc_conn_state_trans llc_error_state_trans_4 = {
+static const struct llc_conn_state_trans llc_error_state_trans_4 = {
.ev = llc_conn_ev_rx_frmr_rsp_fbit_set_x,
.next_state = LLC_CONN_STATE_RESET,
.ev_qualifiers = NONE,
@@ -4770,7 +4770,7 @@ static const llc_conn_action_t llc_error_actions_5[] = {
[1] = NULL,
};
-static struct llc_conn_state_trans llc_error_state_trans_5 = {
+static const struct llc_conn_state_trans llc_error_state_trans_5 = {
.ev = llc_conn_ev_rx_xxx_cmd_pbit_set_x,
.next_state = LLC_CONN_STATE_ERROR,
.ev_qualifiers = NONE,
@@ -4778,7 +4778,7 @@ static struct llc_conn_state_trans llc_error_state_trans_5 = {
};
/* State transitions for LLC_CONN_EV_RX_XXX_RSP_Fbit_SET_X event */
-static struct llc_conn_state_trans llc_error_state_trans_6 = {
+static const struct llc_conn_state_trans llc_error_state_trans_6 = {
.ev = llc_conn_ev_rx_xxx_rsp_fbit_set_x,
.next_state = LLC_CONN_STATE_ERROR,
.ev_qualifiers = NONE,
@@ -4798,7 +4798,7 @@ static const llc_conn_action_t llc_error_actions_7[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_error_state_trans_7 = {
+static const struct llc_conn_state_trans llc_error_state_trans_7 = {
.ev = llc_conn_ev_ack_tmr_exp,
.next_state = LLC_CONN_STATE_ERROR,
.ev_qualifiers = llc_error_ev_qfyrs_7,
@@ -4820,7 +4820,7 @@ static const llc_conn_action_t llc_error_actions_8[] = {
[5] = NULL,
};
-static struct llc_conn_state_trans llc_error_state_trans_8 = {
+static const struct llc_conn_state_trans llc_error_state_trans_8 = {
.ev = llc_conn_ev_ack_tmr_exp,
.next_state = LLC_CONN_STATE_RESET,
.ev_qualifiers = llc_error_ev_qfyrs_8,
@@ -4836,7 +4836,7 @@ static const llc_conn_ev_qfyr_t llc_error_ev_qfyrs_9[] = {
/* just one member, NULL, .bss zeroes it */
static const llc_conn_action_t llc_error_actions_9[1];
-static struct llc_conn_state_trans llc_error_state_trans_9 = {
+static const struct llc_conn_state_trans llc_error_state_trans_9 = {
.ev = llc_conn_ev_data_req,
.next_state = LLC_CONN_STATE_ERROR,
.ev_qualifiers = llc_error_ev_qfyrs_9,
@@ -4847,7 +4847,7 @@ static struct llc_conn_state_trans llc_error_state_trans_9 = {
* Array of pointers;
* one to each transition
*/
-static struct llc_conn_state_trans *llc_error_state_transitions[] = {
+static const struct llc_conn_state_trans *llc_error_state_transitions[] = {
[0] = &llc_error_state_trans_9, /* Request */
[1] = &llc_common_state_trans_end,
[2] = &llc_common_state_trans_end, /* Local busy */
@@ -4873,7 +4873,7 @@ static const llc_conn_action_t llc_temp_actions_1[] = {
[3] = NULL,
};
-static struct llc_conn_state_trans llc_temp_state_trans_1 = {
+static const struct llc_conn_state_trans llc_temp_state_trans_1 = {
.ev = llc_conn_ev_disc_req,
.next_state = LLC_CONN_STATE_ADM,
.ev_qualifiers = NONE,
@@ -4884,7 +4884,7 @@ static struct llc_conn_state_trans llc_temp_state_trans_1 = {
* Array of pointers;
* one to each transition
*/
-static struct llc_conn_state_trans *llc_temp_state_transitions[] = {
+static const struct llc_conn_state_trans *llc_temp_state_transitions[] = {
[0] = &llc_temp_state_trans_1, /* requests */
[1] = &llc_common_state_trans_end,
[2] = &llc_common_state_trans_end, /* local busy */
diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c
index 0a3f5e0bec00..afc6974eafda 100644
--- a/net/llc/llc_conn.c
+++ b/net/llc/llc_conn.c
@@ -34,10 +34,10 @@ static int llc_find_offset(int state, int ev_type);
static void llc_conn_send_pdus(struct sock *sk);
static int llc_conn_service(struct sock *sk, struct sk_buff *skb);
static int llc_exec_conn_trans_actions(struct sock *sk,
- struct llc_conn_state_trans *trans,
+ const struct llc_conn_state_trans *trans,
struct sk_buff *ev);
-static struct llc_conn_state_trans *llc_qualify_conn_ev(struct sock *sk,
- struct sk_buff *skb);
+static const struct llc_conn_state_trans *llc_qualify_conn_ev(struct sock *sk,
+ struct sk_buff *skb);
/* Offset table on connection states transition diagram */
static int llc_offset_table[NBR_CONN_STATES][NBR_CONN_EV];
@@ -356,9 +356,9 @@ static void llc_conn_send_pdus(struct sock *sk)
*/
static int llc_conn_service(struct sock *sk, struct sk_buff *skb)
{
- int rc = 1;
+ const struct llc_conn_state_trans *trans;
struct llc_sock *llc = llc_sk(sk);
- struct llc_conn_state_trans *trans;
+ int rc = 1;
if (llc->state > NBR_CONN_STATES)
goto out;
@@ -384,10 +384,10 @@ out:
* This function finds transition that matches with happened event.
* Returns pointer to found transition on success, %NULL otherwise.
*/
-static struct llc_conn_state_trans *llc_qualify_conn_ev(struct sock *sk,
- struct sk_buff *skb)
+static const struct llc_conn_state_trans *llc_qualify_conn_ev(struct sock *sk,
+ struct sk_buff *skb)
{
- struct llc_conn_state_trans **next_trans;
+ const struct llc_conn_state_trans **next_trans;
const llc_conn_ev_qfyr_t *next_qualifier;
struct llc_conn_state_ev *ev = llc_conn_ev(skb);
struct llc_sock *llc = llc_sk(sk);
@@ -432,7 +432,7 @@ static struct llc_conn_state_trans *llc_qualify_conn_ev(struct sock *sk,
* success, 1 to indicate failure of at least one action.
*/
static int llc_exec_conn_trans_actions(struct sock *sk,
- struct llc_conn_state_trans *trans,
+ const struct llc_conn_state_trans *trans,
struct sk_buff *skb)
{
int rc = 0;
@@ -635,8 +635,8 @@ u8 llc_data_accept_state(u8 state)
*/
static u16 __init llc_find_next_offset(struct llc_conn_state *state, u16 offset)
{
+ const struct llc_conn_state_trans **next_trans;
u16 cnt = 0;
- struct llc_conn_state_trans **next_trans;
for (next_trans = state->transitions + offset;
(*next_trans)->ev; next_trans++)
diff --git a/net/llc/llc_s_st.c b/net/llc/llc_s_st.c
index 308c616883a4..acccc827c562 100644
--- a/net/llc/llc_s_st.c
+++ b/net/llc/llc_s_st.c
@@ -24,7 +24,7 @@
* last entry for this state
* all members are zeros, .bss zeroes it
*/
-static struct llc_sap_state_trans llc_sap_state_trans_end;
+static const struct llc_sap_state_trans llc_sap_state_trans_end;
/* state LLC_SAP_STATE_INACTIVE transition for
* LLC_SAP_EV_ACTIVATION_REQ event
@@ -34,14 +34,14 @@ static const llc_sap_action_t llc_sap_inactive_state_actions_1[] = {
[1] = NULL,
};
-static struct llc_sap_state_trans llc_sap_inactive_state_trans_1 = {
+static const struct llc_sap_state_trans llc_sap_inactive_state_trans_1 = {
.ev = llc_sap_ev_activation_req,
.next_state = LLC_SAP_STATE_ACTIVE,
.ev_actions = llc_sap_inactive_state_actions_1,
};
/* array of pointers; one to each transition */
-static struct llc_sap_state_trans *llc_sap_inactive_state_transitions[] = {
+static const struct llc_sap_state_trans *llc_sap_inactive_state_transitions[] = {
[0] = &llc_sap_inactive_state_trans_1,
[1] = &llc_sap_state_trans_end,
};
@@ -52,7 +52,7 @@ static const llc_sap_action_t llc_sap_active_state_actions_1[] = {
[1] = NULL,
};
-static struct llc_sap_state_trans llc_sap_active_state_trans_1 = {
+static const struct llc_sap_state_trans llc_sap_active_state_trans_1 = {
.ev = llc_sap_ev_rx_ui,
.next_state = LLC_SAP_STATE_ACTIVE,
.ev_actions = llc_sap_active_state_actions_1,
@@ -64,7 +64,7 @@ static const llc_sap_action_t llc_sap_active_state_actions_2[] = {
[1] = NULL,
};
-static struct llc_sap_state_trans llc_sap_active_state_trans_2 = {
+static const struct llc_sap_state_trans llc_sap_active_state_trans_2 = {
.ev = llc_sap_ev_unitdata_req,
.next_state = LLC_SAP_STATE_ACTIVE,
.ev_actions = llc_sap_active_state_actions_2,
@@ -76,7 +76,7 @@ static const llc_sap_action_t llc_sap_active_state_actions_3[] = {
[1] = NULL,
};
-static struct llc_sap_state_trans llc_sap_active_state_trans_3 = {
+static const struct llc_sap_state_trans llc_sap_active_state_trans_3 = {
.ev = llc_sap_ev_xid_req,
.next_state = LLC_SAP_STATE_ACTIVE,
.ev_actions = llc_sap_active_state_actions_3,
@@ -88,7 +88,7 @@ static const llc_sap_action_t llc_sap_active_state_actions_4[] = {
[1] = NULL,
};
-static struct llc_sap_state_trans llc_sap_active_state_trans_4 = {
+static const struct llc_sap_state_trans llc_sap_active_state_trans_4 = {
.ev = llc_sap_ev_rx_xid_c,
.next_state = LLC_SAP_STATE_ACTIVE,
.ev_actions = llc_sap_active_state_actions_4,
@@ -100,7 +100,7 @@ static const llc_sap_action_t llc_sap_active_state_actions_5[] = {
[1] = NULL,
};
-static struct llc_sap_state_trans llc_sap_active_state_trans_5 = {
+static const struct llc_sap_state_trans llc_sap_active_state_trans_5 = {
.ev = llc_sap_ev_rx_xid_r,
.next_state = LLC_SAP_STATE_ACTIVE,
.ev_actions = llc_sap_active_state_actions_5,
@@ -112,7 +112,7 @@ static const llc_sap_action_t llc_sap_active_state_actions_6[] = {
[1] = NULL,
};
-static struct llc_sap_state_trans llc_sap_active_state_trans_6 = {
+static const struct llc_sap_state_trans llc_sap_active_state_trans_6 = {
.ev = llc_sap_ev_test_req,
.next_state = LLC_SAP_STATE_ACTIVE,
.ev_actions = llc_sap_active_state_actions_6,
@@ -124,7 +124,7 @@ static const llc_sap_action_t llc_sap_active_state_actions_7[] = {
[1] = NULL,
};
-static struct llc_sap_state_trans llc_sap_active_state_trans_7 = {
+static const struct llc_sap_state_trans llc_sap_active_state_trans_7 = {
.ev = llc_sap_ev_rx_test_c,
.next_state = LLC_SAP_STATE_ACTIVE,
.ev_actions = llc_sap_active_state_actions_7
@@ -136,7 +136,7 @@ static const llc_sap_action_t llc_sap_active_state_actions_8[] = {
[1] = NULL,
};
-static struct llc_sap_state_trans llc_sap_active_state_trans_8 = {
+static const struct llc_sap_state_trans llc_sap_active_state_trans_8 = {
.ev = llc_sap_ev_rx_test_r,
.next_state = LLC_SAP_STATE_ACTIVE,
.ev_actions = llc_sap_active_state_actions_8,
@@ -150,14 +150,14 @@ static const llc_sap_action_t llc_sap_active_state_actions_9[] = {
[1] = NULL,
};
-static struct llc_sap_state_trans llc_sap_active_state_trans_9 = {
+static const struct llc_sap_state_trans llc_sap_active_state_trans_9 = {
.ev = llc_sap_ev_deactivation_req,
.next_state = LLC_SAP_STATE_INACTIVE,
.ev_actions = llc_sap_active_state_actions_9
};
/* array of pointers; one to each transition */
-static struct llc_sap_state_trans *llc_sap_active_state_transitions[] = {
+static const struct llc_sap_state_trans *llc_sap_active_state_transitions[] = {
[0] = &llc_sap_active_state_trans_2,
[1] = &llc_sap_active_state_trans_1,
[2] = &llc_sap_active_state_trans_3,
diff --git a/net/llc/llc_sap.c b/net/llc/llc_sap.c
index 116c0e479183..6cd03c2ae7d5 100644
--- a/net/llc/llc_sap.c
+++ b/net/llc/llc_sap.c
@@ -114,12 +114,12 @@ void llc_sap_rtn_pdu(struct llc_sap *sap, struct sk_buff *skb)
* Returns the pointer to found transition on success or %NULL for
* failure.
*/
-static struct llc_sap_state_trans *llc_find_sap_trans(struct llc_sap *sap,
- struct sk_buff *skb)
+static const struct llc_sap_state_trans *llc_find_sap_trans(struct llc_sap *sap,
+ struct sk_buff *skb)
{
int i = 0;
- struct llc_sap_state_trans *rc = NULL;
- struct llc_sap_state_trans **next_trans;
+ const struct llc_sap_state_trans *rc = NULL;
+ const struct llc_sap_state_trans **next_trans;
struct llc_sap_state *curr_state = &llc_sap_state_table[sap->state - 1];
/*
* Search thru events for this state until list exhausted or until
@@ -143,7 +143,7 @@ static struct llc_sap_state_trans *llc_find_sap_trans(struct llc_sap *sap,
* Returns 0 for success and 1 for failure of at least one action.
*/
static int llc_exec_sap_trans_actions(struct llc_sap *sap,
- struct llc_sap_state_trans *trans,
+ const struct llc_sap_state_trans *trans,
struct sk_buff *skb)
{
int rc = 0;
@@ -166,8 +166,8 @@ static int llc_exec_sap_trans_actions(struct llc_sap *sap,
*/
static int llc_sap_next_state(struct llc_sap *sap, struct sk_buff *skb)
{
+ const struct llc_sap_state_trans *trans;
int rc = 1;
- struct llc_sap_state_trans *trans;
if (sap->state > LLC_NR_SAP_STATES)
goto out;
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index 21d55dc539f6..677bbbac9f16 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -616,7 +616,9 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
return -EINVAL;
if (!pubsta->deflink.ht_cap.ht_supported &&
- sta->sdata->vif.bss_conf.chanreq.oper.chan->band != NL80211_BAND_6GHZ)
+ !pubsta->deflink.vht_cap.vht_supported &&
+ !pubsta->deflink.he_cap.has_he &&
+ !pubsta->deflink.eht_cap.has_eht)
return -EINVAL;
if (WARN_ON_ONCE(!local->ops->ampdu_action))
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index b08e5d7687e3..85cb71de370f 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -263,7 +263,7 @@ static int ieee80211_start_p2p_device(struct wiphy *wiphy,
lockdep_assert_wiphy(sdata->local->hw.wiphy);
- ret = ieee80211_check_combinations(sdata, NULL, 0, 0);
+ ret = ieee80211_check_combinations(sdata, NULL, 0, 0, -1);
if (ret < 0)
return ret;
@@ -285,7 +285,7 @@ static int ieee80211_start_nan(struct wiphy *wiphy,
lockdep_assert_wiphy(sdata->local->hw.wiphy);
- ret = ieee80211_check_combinations(sdata, NULL, 0, 0);
+ ret = ieee80211_check_combinations(sdata, NULL, 0, 0, -1);
if (ret < 0)
return ret;
@@ -742,9 +742,6 @@ static int ieee80211_get_key(struct wiphy *wiphy, struct net_device *dev,
break;
}
- params.key = key->conf.key;
- params.key_len = key->conf.keylen;
-
callback(cookie, &params);
err = 0;
@@ -1379,6 +1376,11 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
(IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_80MHZ |
IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_160MHZ |
IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_320MHZ);
+ link_conf->eht_80mhz_full_bw_ul_mumimo =
+ params->eht_cap->fixed.phy_cap_info[7] &
+ (IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_80MHZ |
+ IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_160MHZ |
+ IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_320MHZ);
} else {
link_conf->eht_su_beamformer = false;
link_conf->eht_su_beamformee = false;
@@ -1610,11 +1612,7 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev,
/* abort any running channel switch or color change */
link_conf->csa_active = false;
link_conf->color_change_active = false;
- if (sdata->csa_blocked_queues) {
- ieee80211_wake_vif_queues(local, sdata,
- IEEE80211_QUEUE_STOP_REASON_CSA);
- sdata->csa_blocked_queues = false;
- }
+ ieee80211_vif_unblock_queues_csa(sdata);
ieee80211_free_next_beacon(link);
@@ -1666,7 +1664,7 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev,
if (sdata->wdev.cac_started) {
chandef = link_conf->chanreq.oper;
- wiphy_delayed_work_cancel(wiphy, &link->dfs_cac_timer_work);
+ wiphy_delayed_work_cancel(wiphy, &sdata->dfs_cac_timer_work);
cfg80211_cac_event(sdata->dev, &chandef,
NL80211_RADAR_CAC_ABORTED,
GFP_KERNEL);
@@ -1809,11 +1807,17 @@ static void sta_apply_mesh_params(struct ieee80211_local *local,
#endif
}
+enum sta_link_apply_mode {
+ STA_LINK_MODE_NEW,
+ STA_LINK_MODE_STA_MODIFY,
+ STA_LINK_MODE_LINK_MODIFY,
+};
+
static int sta_link_apply_parameters(struct ieee80211_local *local,
- struct sta_info *sta, bool new_link,
+ struct sta_info *sta,
+ enum sta_link_apply_mode mode,
struct link_station_parameters *params)
{
- int ret = 0;
struct ieee80211_supported_band *sband;
struct ieee80211_sub_if_data *sdata = sta->sdata;
u32 link_id = params->link_id < 0 ? 0 : params->link_id;
@@ -1822,18 +1826,29 @@ static int sta_link_apply_parameters(struct ieee80211_local *local,
struct link_sta_info *link_sta =
rcu_dereference_protected(sta->link[link_id],
lockdep_is_held(&local->hw.wiphy->mtx));
-
- /*
- * If there are no changes, then accept a link that exist,
- * unless it's a new link.
- */
- if (params->link_id >= 0 && !new_link &&
- !params->link_mac && !params->txpwr_set &&
- !params->supported_rates_len &&
- !params->ht_capa && !params->vht_capa &&
- !params->he_capa && !params->eht_capa &&
- !params->opmode_notif_used)
- return 0;
+ bool changes = params->link_mac ||
+ params->txpwr_set ||
+ params->supported_rates_len ||
+ params->ht_capa ||
+ params->vht_capa ||
+ params->he_capa ||
+ params->eht_capa ||
+ params->opmode_notif_used;
+
+ switch (mode) {
+ case STA_LINK_MODE_NEW:
+ if (!params->link_mac)
+ return -EINVAL;
+ break;
+ case STA_LINK_MODE_LINK_MODIFY:
+ break;
+ case STA_LINK_MODE_STA_MODIFY:
+ if (params->link_id >= 0)
+ break;
+ if (!changes)
+ return 0;
+ break;
+ }
if (!link || !link_sta)
return -EINVAL;
@@ -1843,18 +1858,18 @@ static int sta_link_apply_parameters(struct ieee80211_local *local,
return -EINVAL;
if (params->link_mac) {
- if (new_link) {
+ if (mode == STA_LINK_MODE_NEW) {
memcpy(link_sta->addr, params->link_mac, ETH_ALEN);
memcpy(link_sta->pub->addr, params->link_mac, ETH_ALEN);
} else if (!ether_addr_equal(link_sta->addr,
params->link_mac)) {
return -EINVAL;
}
- } else if (new_link) {
- return -EINVAL;
}
if (params->txpwr_set) {
+ int ret;
+
link_sta->pub->txpwr.type = params->txpwr.type;
if (params->txpwr.type == NL80211_TX_POWER_LIMITED)
link_sta->pub->txpwr.power = params->txpwr.power;
@@ -1907,7 +1922,7 @@ static int sta_link_apply_parameters(struct ieee80211_local *local,
ieee80211_sta_init_nss(link_sta);
- return ret;
+ return 0;
}
static int sta_apply_parameters(struct ieee80211_local *local,
@@ -2023,7 +2038,7 @@ static int sta_apply_parameters(struct ieee80211_local *local,
if (params->listen_interval >= 0)
sta->listen_interval = params->listen_interval;
- ret = sta_link_apply_parameters(local, sta, false,
+ ret = sta_link_apply_parameters(local, sta, STA_LINK_MODE_STA_MODIFY,
&params->link_sta_params);
if (ret)
return ret;
@@ -2958,8 +2973,9 @@ static int ieee80211_set_mcast_rate(struct wiphy *wiphy, struct net_device *dev,
memcpy(sdata->vif.bss_conf.mcast_rate, rate,
sizeof(int) * NUM_NL80211_BANDS);
- ieee80211_link_info_change_notify(sdata, &sdata->deflink,
- BSS_CHANGED_MCAST_RATE);
+ if (ieee80211_sdata_running(sdata))
+ ieee80211_link_info_change_notify(sdata, &sdata->deflink,
+ BSS_CHANGED_MCAST_RATE);
return 0;
}
@@ -3466,7 +3482,7 @@ static int ieee80211_start_radar_detection(struct wiphy *wiphy,
if (err)
goto out_unlock;
- wiphy_delayed_work_queue(wiphy, &sdata->deflink.dfs_cac_timer_work,
+ wiphy_delayed_work_queue(wiphy, &sdata->dfs_cac_timer_work,
msecs_to_jiffies(cac_time_ms));
out_unlock:
@@ -3482,12 +3498,8 @@ static void ieee80211_end_cac(struct wiphy *wiphy,
lockdep_assert_wiphy(local->hw.wiphy);
list_for_each_entry(sdata, &local->interfaces, list) {
- /* it might be waiting for the local->mtx, but then
- * by the time it gets it, sdata->wdev.cac_started
- * will no longer be true
- */
wiphy_delayed_work_cancel(wiphy,
- &sdata->deflink.dfs_cac_timer_work);
+ &sdata->dfs_cac_timer_work);
if (sdata->wdev.cac_started) {
ieee80211_link_release_channel(&sdata->deflink);
@@ -3637,10 +3649,10 @@ void ieee80211_csa_finish(struct ieee80211_vif *vif, unsigned int link_id)
continue;
wiphy_work_queue(iter->local->hw.wiphy,
- &iter->deflink.csa_finalize_work);
+ &iter->deflink.csa.finalize_work);
}
}
- wiphy_work_queue(local->hw.wiphy, &link_data->csa_finalize_work);
+ wiphy_work_queue(local->hw.wiphy, &link_data->csa.finalize_work);
rcu_read_unlock();
}
@@ -3727,7 +3739,7 @@ static int __ieee80211_csa_finalize(struct ieee80211_link_data *link_data)
}
if (!cfg80211_chandef_identical(&link_conf->chanreq.oper,
- &link_data->csa_chanreq.oper))
+ &link_data->csa.chanreq.oper))
return -EINVAL;
link_conf->csa_active = false;
@@ -3738,17 +3750,13 @@ static int __ieee80211_csa_finalize(struct ieee80211_link_data *link_data)
ieee80211_link_info_change_notify(sdata, link_data, changed);
- if (sdata->csa_blocked_queues) {
- ieee80211_wake_vif_queues(local, sdata,
- IEEE80211_QUEUE_STOP_REASON_CSA);
- sdata->csa_blocked_queues = false;
- }
+ ieee80211_vif_unblock_queues_csa(sdata);
err = drv_post_channel_switch(link_data);
if (err)
return err;
- cfg80211_ch_switch_notify(sdata->dev, &link_data->csa_chanreq.oper,
+ cfg80211_ch_switch_notify(sdata->dev, &link_data->csa.chanreq.oper,
link_data->link_id);
return 0;
@@ -3769,7 +3777,7 @@ static void ieee80211_csa_finalize(struct ieee80211_link_data *link_data)
void ieee80211_csa_finalize_work(struct wiphy *wiphy, struct wiphy_work *work)
{
struct ieee80211_link_data *link =
- container_of(work, struct ieee80211_link_data, csa_finalize_work);
+ container_of(work, struct ieee80211_link_data, csa.finalize_work);
struct ieee80211_sub_if_data *sdata = link->sdata;
struct ieee80211_local *local = sdata->local;
@@ -4000,7 +4008,7 @@ __ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
goto out;
/* if reservation is invalid then this will fail */
- err = ieee80211_check_combinations(sdata, NULL, chanctx->mode, 0);
+ err = ieee80211_check_combinations(sdata, NULL, chanctx->mode, 0, -1);
if (err) {
ieee80211_link_unreserve_chanctx(link_data);
goto out;
@@ -4016,23 +4024,19 @@ __ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
goto out;
}
- link_data->csa_chanreq = chanreq;
+ link_data->csa.chanreq = chanreq;
link_conf->csa_active = true;
- if (params->block_tx &&
- !ieee80211_hw_check(&local->hw, HANDLES_QUIET_CSA)) {
- ieee80211_stop_vif_queues(local, sdata,
- IEEE80211_QUEUE_STOP_REASON_CSA);
- sdata->csa_blocked_queues = true;
- }
+ if (params->block_tx)
+ ieee80211_vif_block_queues_csa(sdata);
cfg80211_ch_switch_started_notify(sdata->dev,
- &link_data->csa_chanreq.oper, 0,
+ &link_data->csa.chanreq.oper, link_id,
params->count, params->block_tx);
if (changed) {
ieee80211_link_info_change_notify(sdata, link_data, changed);
- drv_channel_switch_beacon(sdata, &link_data->csa_chanreq.oper);
+ drv_channel_switch_beacon(sdata, &link_data->csa.chanreq.oper);
} else {
/* if the beacon didn't change, we can finalize immediately */
ieee80211_csa_finalize(link_data);
@@ -4978,13 +4982,17 @@ static void ieee80211_del_intf_link(struct wiphy *wiphy,
ieee80211_vif_set_links(sdata, wdev->valid_links, 0);
}
-static int sta_add_link_station(struct ieee80211_local *local,
- struct ieee80211_sub_if_data *sdata,
- struct link_station_parameters *params)
+static int
+ieee80211_add_link_station(struct wiphy *wiphy, struct net_device *dev,
+ struct link_station_parameters *params)
{
+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ struct ieee80211_local *local = wiphy_priv(wiphy);
struct sta_info *sta;
int ret;
+ lockdep_assert_wiphy(local->hw.wiphy);
+
sta = sta_info_get_bss(sdata, params->mld_mac);
if (!sta)
return -ENOENT;
@@ -4999,7 +5007,7 @@ static int sta_add_link_station(struct ieee80211_local *local,
if (ret)
return ret;
- ret = sta_link_apply_parameters(local, sta, true, params);
+ ret = sta_link_apply_parameters(local, sta, STA_LINK_MODE_NEW, params);
if (ret) {
ieee80211_sta_free_link(sta, params->link_id);
return ret;
@@ -5010,23 +5018,15 @@ static int sta_add_link_station(struct ieee80211_local *local,
}
static int
-ieee80211_add_link_station(struct wiphy *wiphy, struct net_device *dev,
+ieee80211_mod_link_station(struct wiphy *wiphy, struct net_device *dev,
struct link_station_parameters *params)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
struct ieee80211_local *local = wiphy_priv(wiphy);
-
- lockdep_assert_wiphy(sdata->local->hw.wiphy);
-
- return sta_add_link_station(local, sdata, params);
-}
-
-static int sta_mod_link_station(struct ieee80211_local *local,
- struct ieee80211_sub_if_data *sdata,
- struct link_station_parameters *params)
-{
struct sta_info *sta;
+ lockdep_assert_wiphy(local->hw.wiphy);
+
sta = sta_info_get_bss(sdata, params->mld_mac);
if (!sta)
return -ENOENT;
@@ -5034,26 +5034,19 @@ static int sta_mod_link_station(struct ieee80211_local *local,
if (!(sta->sta.valid_links & BIT(params->link_id)))
return -EINVAL;
- return sta_link_apply_parameters(local, sta, false, params);
+ return sta_link_apply_parameters(local, sta, STA_LINK_MODE_LINK_MODIFY,
+ params);
}
static int
-ieee80211_mod_link_station(struct wiphy *wiphy, struct net_device *dev,
- struct link_station_parameters *params)
+ieee80211_del_link_station(struct wiphy *wiphy, struct net_device *dev,
+ struct link_station_del_parameters *params)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
- struct ieee80211_local *local = wiphy_priv(wiphy);
+ struct sta_info *sta;
lockdep_assert_wiphy(sdata->local->hw.wiphy);
- return sta_mod_link_station(local, sdata, params);
-}
-
-static int sta_del_link_station(struct ieee80211_sub_if_data *sdata,
- struct link_station_del_parameters *params)
-{
- struct sta_info *sta;
-
sta = sta_info_get_bss(sdata, params->mld_mac);
if (!sta)
return -ENOENT;
@@ -5070,17 +5063,6 @@ static int sta_del_link_station(struct ieee80211_sub_if_data *sdata,
return 0;
}
-static int
-ieee80211_del_link_station(struct wiphy *wiphy, struct net_device *dev,
- struct link_station_del_parameters *params)
-{
- struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
-
- lockdep_assert_wiphy(sdata->local->hw.wiphy);
-
- return sta_del_link_station(sdata, params);
-}
-
static int ieee80211_set_hw_timestamp(struct wiphy *wiphy,
struct net_device *dev,
struct cfg80211_set_hw_timestamp *hwts)
@@ -5221,4 +5203,5 @@ const struct cfg80211_ops mac80211_config_ops = {
.del_link_station = ieee80211_del_link_station,
.set_hw_timestamp = ieee80211_set_hw_timestamp,
.set_ttlm = ieee80211_set_ttlm,
+ .get_radio_mask = ieee80211_get_radio_mask,
};
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c
index 380695fdc32f..e8567723e94d 100644
--- a/net/mac80211/chan.c
+++ b/net/mac80211/chan.c
@@ -47,24 +47,29 @@ int ieee80211_chanctx_refcount(struct ieee80211_local *local,
ieee80211_chanctx_num_reserved(local, ctx);
}
-static int ieee80211_num_chanctx(struct ieee80211_local *local)
+static int ieee80211_num_chanctx(struct ieee80211_local *local, int radio_idx)
{
struct ieee80211_chanctx *ctx;
int num = 0;
lockdep_assert_wiphy(local->hw.wiphy);
- list_for_each_entry(ctx, &local->chanctx_list, list)
+ list_for_each_entry(ctx, &local->chanctx_list, list) {
+ if (radio_idx >= 0 && ctx->conf.radio_idx != radio_idx)
+ continue;
num++;
+ }
return num;
}
-static bool ieee80211_can_create_new_chanctx(struct ieee80211_local *local)
+static bool ieee80211_can_create_new_chanctx(struct ieee80211_local *local,
+ int radio_idx)
{
lockdep_assert_wiphy(local->hw.wiphy);
- return ieee80211_num_chanctx(local) < ieee80211_max_num_channels(local);
+ return ieee80211_num_chanctx(local, radio_idx) <
+ ieee80211_max_num_channels(local, radio_idx);
}
static struct ieee80211_chanctx *
@@ -295,17 +300,24 @@ ieee80211_get_max_required_bw(struct ieee80211_link_data *link)
static enum nl80211_chan_width
ieee80211_get_chanctx_max_required_bw(struct ieee80211_local *local,
struct ieee80211_chanctx *ctx,
- struct ieee80211_link_data *rsvd_for)
+ struct ieee80211_link_data *rsvd_for,
+ bool check_reserved)
{
struct ieee80211_sub_if_data *sdata;
struct ieee80211_link_data *link;
enum nl80211_chan_width max_bw = NL80211_CHAN_WIDTH_20_NOHT;
+ if (WARN_ON(check_reserved && rsvd_for))
+ return ctx->conf.def.width;
+
for_each_sdata_link(local, link) {
enum nl80211_chan_width width = NL80211_CHAN_WIDTH_20_NOHT;
- if (link != rsvd_for &&
- rcu_access_pointer(link->conf->chanctx_conf) != &ctx->conf)
+ if (check_reserved) {
+ if (link->reserved_chanctx != ctx)
+ continue;
+ } else if (link != rsvd_for &&
+ rcu_access_pointer(link->conf->chanctx_conf) != &ctx->conf)
continue;
switch (link->sdata->vif.type) {
@@ -359,7 +371,8 @@ ieee80211_get_chanctx_max_required_bw(struct ieee80211_local *local,
static u32
_ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local,
struct ieee80211_chanctx *ctx,
- struct ieee80211_link_data *rsvd_for)
+ struct ieee80211_link_data *rsvd_for,
+ bool check_reserved)
{
enum nl80211_chan_width max_bw;
struct cfg80211_chan_def min_def;
@@ -379,7 +392,8 @@ _ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local,
return 0;
}
- max_bw = ieee80211_get_chanctx_max_required_bw(local, ctx, rsvd_for);
+ max_bw = ieee80211_get_chanctx_max_required_bw(local, ctx, rsvd_for,
+ check_reserved);
/* downgrade chandef up to max_bw */
min_def = ctx->conf.def;
@@ -396,12 +410,9 @@ _ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local,
return IEEE80211_CHANCTX_CHANGE_MIN_WIDTH;
}
-/* calling this function is assuming that station vif is updated to
- * lates changes by calling ieee80211_link_update_chanreq
- */
static void ieee80211_chan_bw_change(struct ieee80211_local *local,
struct ieee80211_chanctx *ctx,
- bool narrowed)
+ bool reserved, bool narrowed)
{
struct sta_info *sta;
struct ieee80211_supported_band *sband =
@@ -418,13 +429,17 @@ static void ieee80211_chan_bw_change(struct ieee80211_local *local,
continue;
for (link_id = 0; link_id < ARRAY_SIZE(sta->sdata->link); link_id++) {
- struct ieee80211_bss_conf *link_conf =
- rcu_dereference(sdata->vif.link_conf[link_id]);
+ struct ieee80211_link_data *link =
+ rcu_dereference(sdata->link[link_id]);
+ struct ieee80211_bss_conf *link_conf;
+ struct cfg80211_chan_def *new_chandef;
struct link_sta_info *link_sta;
- if (!link_conf)
+ if (!link)
continue;
+ link_conf = link->conf;
+
if (rcu_access_pointer(link_conf->chanctx_conf) != &ctx->conf)
continue;
@@ -432,7 +447,13 @@ static void ieee80211_chan_bw_change(struct ieee80211_local *local,
if (!link_sta)
continue;
- new_sta_bw = ieee80211_sta_cur_vht_bw(link_sta);
+ if (reserved)
+ new_chandef = &link->reserved.oper;
+ else
+ new_chandef = &link_conf->chanreq.oper;
+
+ new_sta_bw = _ieee80211_sta_cur_vht_bw(link_sta,
+ new_chandef);
/* nothing change */
if (new_sta_bw == link_sta->pub->bandwidth)
@@ -458,20 +479,22 @@ static void ieee80211_chan_bw_change(struct ieee80211_local *local,
*/
void ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local,
struct ieee80211_chanctx *ctx,
- struct ieee80211_link_data *rsvd_for)
+ struct ieee80211_link_data *rsvd_for,
+ bool check_reserved)
{
- u32 changed = _ieee80211_recalc_chanctx_min_def(local, ctx, rsvd_for);
+ u32 changed = _ieee80211_recalc_chanctx_min_def(local, ctx, rsvd_for,
+ check_reserved);
if (!changed)
return;
/* check is BW narrowed */
- ieee80211_chan_bw_change(local, ctx, true);
+ ieee80211_chan_bw_change(local, ctx, false, true);
drv_change_chanctx(local, ctx, changed);
/* check is BW wider */
- ieee80211_chan_bw_change(local, ctx, false);
+ ieee80211_chan_bw_change(local, ctx, false, false);
}
static void _ieee80211_change_chanctx(struct ieee80211_local *local,
@@ -505,10 +528,10 @@ static void _ieee80211_change_chanctx(struct ieee80211_local *local,
* due to maybe not returning from it, e.g in case new context was added
* first time with all parameters up to date.
*/
- ieee80211_chan_bw_change(local, old_ctx, true);
+ ieee80211_chan_bw_change(local, old_ctx, false, true);
if (ieee80211_chanreq_identical(&ctx_req, chanreq)) {
- ieee80211_recalc_chanctx_min_def(local, ctx, rsvd_for);
+ ieee80211_recalc_chanctx_min_def(local, ctx, rsvd_for, false);
return;
}
@@ -529,14 +552,14 @@ static void _ieee80211_change_chanctx(struct ieee80211_local *local,
ctx->conf.ap = chanreq->ap;
/* check if min chanctx also changed */
- changed |= _ieee80211_recalc_chanctx_min_def(local, ctx, rsvd_for);
+ changed |= _ieee80211_recalc_chanctx_min_def(local, ctx, rsvd_for, false);
ieee80211_add_wbrf(local, &ctx->conf.def);
drv_change_chanctx(local, ctx, changed);
/* check if BW is wider */
- ieee80211_chan_bw_change(local, old_ctx, false);
+ ieee80211_chan_bw_change(local, old_ctx, false, false);
}
static void ieee80211_change_chanctx(struct ieee80211_local *local,
@@ -638,7 +661,8 @@ ieee80211_chanctx_radar_required(struct ieee80211_local *local,
static struct ieee80211_chanctx *
ieee80211_alloc_chanctx(struct ieee80211_local *local,
const struct ieee80211_chan_req *chanreq,
- enum ieee80211_chanctx_mode mode)
+ enum ieee80211_chanctx_mode mode,
+ int radio_idx)
{
struct ieee80211_chanctx *ctx;
@@ -656,7 +680,8 @@ ieee80211_alloc_chanctx(struct ieee80211_local *local,
ctx->conf.rx_chains_dynamic = 1;
ctx->mode = mode;
ctx->conf.radar_enabled = false;
- _ieee80211_recalc_chanctx_min_def(local, ctx, NULL);
+ ctx->conf.radio_idx = radio_idx;
+ _ieee80211_recalc_chanctx_min_def(local, ctx, NULL, false);
return ctx;
}
@@ -689,14 +714,15 @@ static struct ieee80211_chanctx *
ieee80211_new_chanctx(struct ieee80211_local *local,
const struct ieee80211_chan_req *chanreq,
enum ieee80211_chanctx_mode mode,
- bool assign_on_failure)
+ bool assign_on_failure,
+ int radio_idx)
{
struct ieee80211_chanctx *ctx;
int err;
lockdep_assert_wiphy(local->hw.wiphy);
- ctx = ieee80211_alloc_chanctx(local, chanreq, mode);
+ ctx = ieee80211_alloc_chanctx(local, chanreq, mode, radio_idx);
if (!ctx)
return ERR_PTR(-ENOMEM);
@@ -775,13 +801,24 @@ void ieee80211_recalc_chanctx_chantype(struct ieee80211_local *local,
/* TDLS peers can sometimes affect the chandef width */
list_for_each_entry(sta, &local->sta_list, list) {
+ struct ieee80211_sub_if_data *sdata = sta->sdata;
struct ieee80211_chan_req tdls_chanreq = {};
+ int tdls_link_id;
+
if (!sta->uploaded ||
!test_sta_flag(sta, WLAN_STA_TDLS_WIDER_BW) ||
!test_sta_flag(sta, WLAN_STA_AUTHORIZED) ||
!sta->tdls_chandef.chan)
continue;
+ tdls_link_id = ieee80211_tdls_sta_link_id(sta);
+ link = sdata_dereference(sdata->link[tdls_link_id], sdata);
+ if (!link)
+ continue;
+
+ if (rcu_access_pointer(link->conf->chanctx_conf) != conf)
+ continue;
+
tdls_chanreq.oper = sta->tdls_chandef;
/* note this always fills and returns &tmp if compat */
@@ -838,7 +875,7 @@ static int ieee80211_assign_link_chanctx(struct ieee80211_link_data *link,
if (new_ctx) {
/* recalc considering the link we'll use it for now */
- ieee80211_recalc_chanctx_min_def(local, new_ctx, link);
+ ieee80211_recalc_chanctx_min_def(local, new_ctx, link, false);
ret = drv_assign_vif_chanctx(local, sdata, link->conf, new_ctx);
if (assign_on_failure || !ret) {
@@ -861,12 +898,12 @@ static int ieee80211_assign_link_chanctx(struct ieee80211_link_data *link,
ieee80211_recalc_chanctx_chantype(local, curr_ctx);
ieee80211_recalc_smps_chanctx(local, curr_ctx);
ieee80211_recalc_radar_chanctx(local, curr_ctx);
- ieee80211_recalc_chanctx_min_def(local, curr_ctx, NULL);
+ ieee80211_recalc_chanctx_min_def(local, curr_ctx, NULL, false);
}
if (new_ctx && ieee80211_chanctx_num_assigned(local, new_ctx) > 0) {
ieee80211_recalc_txpower(sdata, false);
- ieee80211_recalc_chanctx_min_def(local, new_ctx, NULL);
+ ieee80211_recalc_chanctx_min_def(local, new_ctx, NULL, false);
}
if (conf) {
@@ -1053,6 +1090,107 @@ int ieee80211_link_unreserve_chanctx(struct ieee80211_link_data *link)
return 0;
}
+static struct ieee80211_chanctx *
+ieee80211_replace_chanctx(struct ieee80211_local *local,
+ const struct ieee80211_chan_req *chanreq,
+ enum ieee80211_chanctx_mode mode,
+ struct ieee80211_chanctx *curr_ctx)
+{
+ struct ieee80211_chanctx *new_ctx, *ctx;
+ struct wiphy *wiphy = local->hw.wiphy;
+ const struct wiphy_radio *radio;
+
+ if (!curr_ctx || (curr_ctx->replace_state ==
+ IEEE80211_CHANCTX_WILL_BE_REPLACED) ||
+ !list_empty(&curr_ctx->reserved_links)) {
+ /*
+ * Another link already requested this context for a
+ * reservation. Find another one hoping all links assigned
+ * to it will also switch soon enough.
+ *
+ * TODO: This needs a little more work as some cases
+ * (more than 2 chanctx capable devices) may fail which could
+ * otherwise succeed provided some channel context juggling was
+ * performed.
+ *
+ * Consider ctx1..3, link1..6, each ctx has 2 links. link1 and
+ * link2 from ctx1 request new different chandefs starting 2
+ * in-place reserations with ctx4 and ctx5 replacing ctx1 and
+ * ctx2 respectively. Next link5 and link6 from ctx3 reserve
+ * ctx4. If link3 and link4 remain on ctx2 as they are then this
+ * fails unless `replace_ctx` from ctx5 is replaced with ctx3.
+ */
+ list_for_each_entry(ctx, &local->chanctx_list, list) {
+ if (ctx->replace_state !=
+ IEEE80211_CHANCTX_REPLACE_NONE)
+ continue;
+
+ if (!list_empty(&ctx->reserved_links))
+ continue;
+
+ if (ctx->conf.radio_idx >= 0) {
+ radio = &wiphy->radio[ctx->conf.radio_idx];
+ if (!cfg80211_radio_chandef_valid(radio, &chanreq->oper))
+ continue;
+ }
+
+ curr_ctx = ctx;
+ break;
+ }
+ }
+
+ /*
+ * If that's true then all available contexts already have reservations
+ * and cannot be used.
+ */
+ if (!curr_ctx || (curr_ctx->replace_state ==
+ IEEE80211_CHANCTX_WILL_BE_REPLACED) ||
+ !list_empty(&curr_ctx->reserved_links))
+ return ERR_PTR(-EBUSY);
+
+ new_ctx = ieee80211_alloc_chanctx(local, chanreq, mode, -1);
+ if (!new_ctx)
+ return ERR_PTR(-ENOMEM);
+
+ new_ctx->replace_ctx = curr_ctx;
+ new_ctx->replace_state = IEEE80211_CHANCTX_REPLACES_OTHER;
+
+ curr_ctx->replace_ctx = new_ctx;
+ curr_ctx->replace_state = IEEE80211_CHANCTX_WILL_BE_REPLACED;
+
+ list_add_rcu(&new_ctx->list, &local->chanctx_list);
+
+ return new_ctx;
+}
+
+static bool
+ieee80211_find_available_radio(struct ieee80211_local *local,
+ const struct ieee80211_chan_req *chanreq,
+ int *radio_idx)
+{
+ struct wiphy *wiphy = local->hw.wiphy;
+ const struct wiphy_radio *radio;
+ int i;
+
+ *radio_idx = -1;
+ if (!wiphy->n_radio)
+ return true;
+
+ for (i = 0; i < wiphy->n_radio; i++) {
+ radio = &wiphy->radio[i];
+ if (!cfg80211_radio_chandef_valid(radio, &chanreq->oper))
+ continue;
+
+ if (!ieee80211_can_create_new_chanctx(local, i))
+ continue;
+
+ *radio_idx = i;
+ return true;
+ }
+
+ return false;
+}
+
int ieee80211_link_reserve_chanctx(struct ieee80211_link_data *link,
const struct ieee80211_chan_req *chanreq,
enum ieee80211_chanctx_mode mode,
@@ -1060,7 +1198,8 @@ int ieee80211_link_reserve_chanctx(struct ieee80211_link_data *link,
{
struct ieee80211_sub_if_data *sdata = link->sdata;
struct ieee80211_local *local = sdata->local;
- struct ieee80211_chanctx *new_ctx, *curr_ctx, *ctx;
+ struct ieee80211_chanctx *new_ctx, *curr_ctx;
+ int radio_idx;
lockdep_assert_wiphy(local->hw.wiphy);
@@ -1070,76 +1209,15 @@ int ieee80211_link_reserve_chanctx(struct ieee80211_link_data *link,
new_ctx = ieee80211_find_reservation_chanctx(local, chanreq, mode);
if (!new_ctx) {
- if (ieee80211_can_create_new_chanctx(local)) {
+ if (ieee80211_can_create_new_chanctx(local, -1) &&
+ ieee80211_find_available_radio(local, chanreq, &radio_idx))
new_ctx = ieee80211_new_chanctx(local, chanreq, mode,
- false);
- if (IS_ERR(new_ctx))
- return PTR_ERR(new_ctx);
- } else {
- if (!curr_ctx ||
- (curr_ctx->replace_state ==
- IEEE80211_CHANCTX_WILL_BE_REPLACED) ||
- !list_empty(&curr_ctx->reserved_links)) {
- /*
- * Another link already requested this context
- * for a reservation. Find another one hoping
- * all links assigned to it will also switch
- * soon enough.
- *
- * TODO: This needs a little more work as some
- * cases (more than 2 chanctx capable devices)
- * may fail which could otherwise succeed
- * provided some channel context juggling was
- * performed.
- *
- * Consider ctx1..3, link1..6, each ctx has 2
- * links. link1 and link2 from ctx1 request new
- * different chandefs starting 2 in-place
- * reserations with ctx4 and ctx5 replacing
- * ctx1 and ctx2 respectively. Next link5 and
- * link6 from ctx3 reserve ctx4. If link3 and
- * link4 remain on ctx2 as they are then this
- * fails unless `replace_ctx` from ctx5 is
- * replaced with ctx3.
- */
- list_for_each_entry(ctx, &local->chanctx_list,
- list) {
- if (ctx->replace_state !=
- IEEE80211_CHANCTX_REPLACE_NONE)
- continue;
-
- if (!list_empty(&ctx->reserved_links))
- continue;
-
- curr_ctx = ctx;
- break;
- }
- }
-
- /*
- * If that's true then all available contexts already
- * have reservations and cannot be used.
- */
- if (!curr_ctx ||
- (curr_ctx->replace_state ==
- IEEE80211_CHANCTX_WILL_BE_REPLACED) ||
- !list_empty(&curr_ctx->reserved_links))
- return -EBUSY;
-
- new_ctx = ieee80211_alloc_chanctx(local, chanreq, mode);
- if (!new_ctx)
- return -ENOMEM;
-
- new_ctx->replace_ctx = curr_ctx;
- new_ctx->replace_state =
- IEEE80211_CHANCTX_REPLACES_OTHER;
-
- curr_ctx->replace_ctx = new_ctx;
- curr_ctx->replace_state =
- IEEE80211_CHANCTX_WILL_BE_REPLACED;
-
- list_add_rcu(&new_ctx->list, &local->chanctx_list);
- }
+ false, radio_idx);
+ else
+ new_ctx = ieee80211_replace_chanctx(local, chanreq,
+ mode, curr_ctx);
+ if (IS_ERR(new_ctx))
+ return PTR_ERR(new_ctx);
}
list_add(&link->reserved_chanctx_list, &new_ctx->reserved_links);
@@ -1162,11 +1240,11 @@ ieee80211_link_chanctx_reservation_complete(struct ieee80211_link_data *link)
case NL80211_IFTYPE_MESH_POINT:
case NL80211_IFTYPE_OCB:
wiphy_work_queue(sdata->local->hw.wiphy,
- &link->csa_finalize_work);
+ &link->csa.finalize_work);
break;
case NL80211_IFTYPE_STATION:
wiphy_delayed_work_queue(sdata->local->hw.wiphy,
- &link->u.mgd.chswitch_work, 0);
+ &link->u.mgd.csa.switch_work, 0);
break;
case NL80211_IFTYPE_UNSPECIFIED:
case NL80211_IFTYPE_AP_VLAN:
@@ -1279,7 +1357,7 @@ ieee80211_link_use_reserved_reassign(struct ieee80211_link_data *link)
if (ieee80211_chanctx_refcount(local, old_ctx) == 0)
ieee80211_free_chanctx(local, old_ctx, false);
- ieee80211_recalc_chanctx_min_def(local, new_ctx, NULL);
+ ieee80211_recalc_chanctx_min_def(local, new_ctx, NULL, false);
ieee80211_recalc_smps_chanctx(local, new_ctx);
ieee80211_recalc_radar_chanctx(local, new_ctx);
@@ -1545,6 +1623,31 @@ static int ieee80211_vif_use_reserved_switch(struct ieee80211_local *local)
goto err;
}
+ /* update station rate control and min width before switch */
+ list_for_each_entry(ctx, &local->chanctx_list, list) {
+ struct ieee80211_link_data *link;
+
+ if (ctx->replace_state != IEEE80211_CHANCTX_REPLACES_OTHER)
+ continue;
+
+ if (WARN_ON(!ctx->replace_ctx)) {
+ err = -EINVAL;
+ goto err;
+ }
+
+ list_for_each_entry(link, &ctx->reserved_links,
+ reserved_chanctx_list) {
+ if (!ieee80211_link_has_in_place_reservation(link))
+ continue;
+
+ ieee80211_chan_bw_change(local,
+ ieee80211_link_get_chanctx(link),
+ true, true);
+ }
+
+ ieee80211_recalc_chanctx_min_def(local, ctx, NULL, true);
+ }
+
/*
* All necessary vifs are ready. Perform the switch now depending on
* reservations and driver capabilities.
@@ -1612,7 +1715,7 @@ static int ieee80211_vif_use_reserved_switch(struct ieee80211_local *local)
ieee80211_recalc_chanctx_chantype(local, ctx);
ieee80211_recalc_smps_chanctx(local, ctx);
ieee80211_recalc_radar_chanctx(local, ctx);
- ieee80211_recalc_chanctx_min_def(local, ctx, NULL);
+ ieee80211_recalc_chanctx_min_def(local, ctx, NULL, false);
list_for_each_entry_safe(link, link_tmp, &ctx->reserved_links,
reserved_chanctx_list) {
@@ -1625,6 +1728,7 @@ static int ieee80211_vif_use_reserved_switch(struct ieee80211_local *local)
link->reserved_chanctx = NULL;
ieee80211_link_chanctx_reservation_complete(link);
+ ieee80211_chan_bw_change(local, ctx, false, false);
}
/*
@@ -1745,6 +1849,7 @@ int _ieee80211_link_use_channel(struct ieee80211_link_data *link,
struct ieee80211_chanctx *ctx;
u8 radar_detect_width = 0;
bool reserved = false;
+ int radio_idx;
int ret;
lockdep_assert_wiphy(local->hw.wiphy);
@@ -1765,7 +1870,7 @@ int _ieee80211_link_use_channel(struct ieee80211_link_data *link,
link->radar_required = ret;
ret = ieee80211_check_combinations(sdata, &chanreq->oper, mode,
- radar_detect_width);
+ radar_detect_width, -1);
if (ret < 0)
goto out;
@@ -1775,9 +1880,11 @@ int _ieee80211_link_use_channel(struct ieee80211_link_data *link,
/* Note: context is now reserved */
if (ctx)
reserved = true;
+ else if (!ieee80211_find_available_radio(local, chanreq, &radio_idx))
+ ctx = ERR_PTR(-EBUSY);
else
ctx = ieee80211_new_chanctx(local, chanreq, mode,
- assign_on_failure);
+ assign_on_failure, radio_idx);
if (IS_ERR(ctx)) {
ret = PTR_ERR(ctx);
goto out;
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index 98310188f330..02b5476a4376 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -483,7 +483,6 @@ static const char *hw_flag_names[] = {
FLAG(REPORTS_LOW_ACK),
FLAG(SUPPORTS_TX_FRAG),
FLAG(SUPPORTS_TDLS_BUFFER_STA),
- FLAG(DEAUTH_NEED_MGD_TX_PREP),
FLAG(DOESNT_SUPPORT_QOS_NDP),
FLAG(BUFF_MMPDU_TXQ),
FLAG(SUPPORTS_VHT_EXT_NSS_BW),
diff --git a/net/mac80211/driver-ops.c b/net/mac80211/driver-ops.c
index dce37ba8ebe3..fe868b521622 100644
--- a/net/mac80211/driver-ops.c
+++ b/net/mac80211/driver-ops.c
@@ -33,7 +33,7 @@ int drv_start(struct ieee80211_local *local)
return ret;
}
-void drv_stop(struct ieee80211_local *local)
+void drv_stop(struct ieee80211_local *local, bool suspend)
{
might_sleep();
lockdep_assert_wiphy(local->hw.wiphy);
@@ -41,8 +41,8 @@ void drv_stop(struct ieee80211_local *local)
if (WARN_ON(!local->started))
return;
- trace_drv_stop(local);
- local->ops->stop(&local->hw);
+ trace_drv_stop(local, suspend);
+ local->ops->stop(&local->hw, suspend);
trace_drv_return_void(local);
/* sync away all work on the tasklet before clearing started */
@@ -311,6 +311,18 @@ int drv_assign_vif_chanctx(struct ieee80211_local *local,
might_sleep();
lockdep_assert_wiphy(local->hw.wiphy);
+ /*
+ * We should perhaps push emulate chanctx down and only
+ * make it call ->config() when the chanctx is actually
+ * assigned here (and unassigned below), but that's yet
+ * another change to all drivers to add assign/unassign
+ * emulation callbacks. Maybe later.
+ */
+ if (sdata->vif.type == NL80211_IFTYPE_MONITOR &&
+ local->emulate_chanctx &&
+ !ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF))
+ return 0;
+
if (!check_sdata_in_driver(sdata))
return -EIO;
@@ -338,6 +350,11 @@ void drv_unassign_vif_chanctx(struct ieee80211_local *local,
might_sleep();
lockdep_assert_wiphy(local->hw.wiphy);
+ if (sdata->vif.type == NL80211_IFTYPE_MONITOR &&
+ local->emulate_chanctx &&
+ !ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF))
+ return;
+
if (!check_sdata_in_driver(sdata))
return;
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index 5d078c0a2323..d382d9729e85 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -88,7 +88,7 @@ static inline int drv_get_et_sset_count(struct ieee80211_sub_if_data *sdata,
}
int drv_start(struct ieee80211_local *local);
-void drv_stop(struct ieee80211_local *local);
+void drv_stop(struct ieee80211_local *local, bool suspend);
#ifdef CONFIG_PM
static inline int drv_suspend(struct ieee80211_local *local,
@@ -1150,6 +1150,9 @@ drv_pre_channel_switch(struct ieee80211_sub_if_data *sdata,
if (!check_sdata_in_driver(sdata))
return -EIO;
+ if (!ieee80211_vif_link_active(&sdata->vif, ch_switch->link_id))
+ return 0;
+
trace_drv_pre_channel_switch(local, sdata, ch_switch);
if (local->ops->pre_channel_switch)
ret = local->ops->pre_channel_switch(&local->hw, &sdata->vif,
@@ -1171,6 +1174,9 @@ drv_post_channel_switch(struct ieee80211_link_data *link)
if (!check_sdata_in_driver(sdata))
return -EIO;
+ if (!ieee80211_vif_link_active(&sdata->vif, link->link_id))
+ return 0;
+
trace_drv_post_channel_switch(local, sdata);
if (local->ops->post_channel_switch)
ret = local->ops->post_channel_switch(&local->hw, &sdata->vif,
@@ -1191,6 +1197,9 @@ drv_abort_channel_switch(struct ieee80211_link_data *link)
if (!check_sdata_in_driver(sdata))
return;
+ if (!ieee80211_vif_link_active(&sdata->vif, link->link_id))
+ return;
+
trace_drv_abort_channel_switch(local, sdata);
if (local->ops->abort_channel_switch)
@@ -1210,6 +1219,9 @@ drv_channel_switch_rx_beacon(struct ieee80211_sub_if_data *sdata,
if (!check_sdata_in_driver(sdata))
return;
+ if (!ieee80211_vif_link_active(&sdata->vif, ch_switch->link_id))
+ return;
+
trace_drv_channel_switch_rx_beacon(local, sdata, ch_switch);
if (local->ops->channel_switch_rx_beacon)
local->ops->channel_switch_rx_beacon(&local->hw, &sdata->vif,
diff --git a/net/mac80211/he.c b/net/mac80211/he.c
index 9f5ffdc9db28..ecbb042dd043 100644
--- a/net/mac80211/he.c
+++ b/net/mac80211/he.c
@@ -230,15 +230,21 @@ ieee80211_he_spr_ie_to_bss_conf(struct ieee80211_vif *vif,
if (!he_spr_ie_elem)
return;
+
+ he_obss_pd->sr_ctrl = he_spr_ie_elem->he_sr_control;
data = he_spr_ie_elem->optional;
if (he_spr_ie_elem->he_sr_control &
IEEE80211_HE_SPR_NON_SRG_OFFSET_PRESENT)
- data++;
+ he_obss_pd->non_srg_max_offset = *data++;
+
if (he_spr_ie_elem->he_sr_control &
IEEE80211_HE_SPR_SRG_INFORMATION_PRESENT) {
- he_obss_pd->max_offset = *data++;
he_obss_pd->min_offset = *data++;
+ he_obss_pd->max_offset = *data++;
+ memcpy(he_obss_pd->bss_color_bitmap, data, 8);
+ data += 8;
+ memcpy(he_obss_pd->partial_bssid_bitmap, data, 8);
he_obss_pd->enable = true;
}
}
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index d7e8cf8e48b7..79caeb485fd5 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -475,7 +475,7 @@ void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata,
sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
else if (sdata->vif.type == NL80211_IFTYPE_STATION)
- memcpy(mgmt->bssid, sdata->deflink.u.mgd.bssid, ETH_ALEN);
+ memcpy(mgmt->bssid, sdata->vif.cfg.ap_addr, ETH_ALEN);
else if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
memcpy(mgmt->bssid, sdata->u.ibss.bssid, ETH_ALEN);
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 7ace5cdc6c26..3f74bbceeca5 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -9,7 +9,7 @@
* Copyright 2009, Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright(c) 2016 Intel Deutschland GmbH
- * Copyright(c) 2018-2023 Intel Corporation
+ * Copyright(c) 2018-2024 Intel Corporation
*/
#include <linux/delay.h>
@@ -533,12 +533,12 @@ int ieee80211_ibss_finish_csa(struct ieee80211_sub_if_data *sdata, u64 *changed)
IEEE80211_PRIVACY(ifibss->privacy));
/* XXX: should not really modify cfg80211 data */
if (cbss) {
- cbss->channel = sdata->deflink.csa_chanreq.oper.chan;
+ cbss->channel = sdata->deflink.csa.chanreq.oper.chan;
cfg80211_put_bss(sdata->local->hw.wiphy, cbss);
}
}
- ifibss->chandef = sdata->deflink.csa_chanreq.oper;
+ ifibss->chandef = sdata->deflink.csa.chanreq.oper;
/* generate the beacon */
return ieee80211_ibss_csa_beacon(sdata, NULL, changed);
@@ -785,7 +785,8 @@ ieee80211_ibss_process_chanswitch(struct ieee80211_sub_if_data *sdata,
err = ieee80211_parse_ch_switch_ie(sdata, elems,
ifibss->chandef.chan->band,
vht_cap_info, &conn,
- ifibss->bssid, &csa_ie);
+ ifibss->bssid, false,
+ &csa_ie);
/* can't switch to destination channel, fail */
if (err < 0)
goto disconnect;
@@ -1745,7 +1746,7 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
IEEE80211_CHANCTX_SHARED : IEEE80211_CHANCTX_EXCLUSIVE;
ret = ieee80211_check_combinations(sdata, &params->chandef, chanmode,
- radar_detect_width);
+ radar_detect_width, -1);
if (ret < 0)
return ret;
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index eb62b7d4b4f7..a3485e4c6132 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -26,6 +26,7 @@
#include <linux/idr.h>
#include <linux/rhashtable.h>
#include <linux/rbtree.h>
+#include <kunit/visibility.h>
#include <net/ieee80211_radiotap.h>
#include <net/cfg80211.h>
#include <net/mac80211.h>
@@ -974,10 +975,15 @@ struct ieee80211_link_data_managed {
bool disable_wmm_tracking;
bool operating_11g_mode;
- bool csa_waiting_bcn;
- bool csa_ignored_same_chan;
- bool csa_blocked_tx;
- struct wiphy_delayed_work chswitch_work;
+ struct {
+ struct wiphy_delayed_work switch_work;
+ struct cfg80211_chan_def ap_chandef;
+ struct ieee80211_parsed_tpe tpe;
+ unsigned long time;
+ bool waiting_bcn;
+ bool ignored_same_chan;
+ bool blocked_tx;
+ } csa;
struct wiphy_work request_smps_work;
/* used to reconfigure hardware SM PS */
@@ -1036,11 +1042,13 @@ struct ieee80211_link_data {
struct ieee80211_key __rcu *default_mgmt_key;
struct ieee80211_key __rcu *default_beacon_key;
- struct wiphy_work csa_finalize_work;
bool operating_11g_mode;
- struct ieee80211_chan_req csa_chanreq;
+ struct {
+ struct wiphy_work finalize_work;
+ struct ieee80211_chan_req chanreq;
+ } csa;
struct wiphy_work color_change_finalize_work;
struct delayed_work color_collision_detect_work;
@@ -1059,7 +1067,6 @@ struct ieee80211_link_data {
int ap_power_level; /* in dBm */
bool radar_required;
- struct wiphy_delayed_work dfs_cac_timer_work;
union {
struct ieee80211_link_data_managed mgd;
@@ -1158,6 +1165,8 @@ struct ieee80211_sub_if_data {
struct ieee80211_link_data deflink;
struct ieee80211_link_data __rcu *link[IEEE80211_MLD_MAX_NUM_LINKS];
+ struct wiphy_delayed_work dfs_cac_timer_work;
+
/* for ieee80211_set_active_links_async() */
struct wiphy_work activate_links_work;
u16 desired_active_links;
@@ -1708,7 +1717,6 @@ struct ieee802_11_elems {
const struct ieee80211_he_spr *he_spr;
const struct ieee80211_mu_edca_param_set *mu_edca_param_set;
const struct ieee80211_he_6ghz_capa *he_6ghz_capa;
- const struct ieee80211_tx_pwr_env *tx_pwr_env[IEEE80211_TPE_MAX_IE_COUNT];
const u8 *uora_element;
const u8 *mesh_id;
const u8 *peering;
@@ -1746,6 +1754,10 @@ struct ieee802_11_elems {
const struct ieee80211_bandwidth_indication *bandwidth_indication;
const struct ieee80211_ttlm_elem *ttlm[IEEE80211_TTLM_MAX_CNT];
+ /* not the order in the psd values is per element, not per chandef */
+ struct ieee80211_parsed_tpe tpe;
+ struct ieee80211_parsed_tpe csa_tpe;
+
/* length of them, respectively */
u8 ext_capab_len;
u8 ssid_len;
@@ -1764,8 +1776,6 @@ struct ieee802_11_elems {
u8 perr_len;
u8 country_elem_len;
u8 bssid_index_len;
- u8 tx_pwr_env_len[IEEE80211_TPE_MAX_IE_COUNT];
- u8 tx_pwr_env_num;
u8 eht_cap_len;
/* mult-link element can be de-fragmented and thus u8 is not sufficient */
@@ -1813,6 +1823,9 @@ ieee80211_have_rx_timestamp(struct ieee80211_rx_status *status)
void ieee80211_vif_inc_num_mcast(struct ieee80211_sub_if_data *sdata);
void ieee80211_vif_dec_num_mcast(struct ieee80211_sub_if_data *sdata);
+void ieee80211_vif_block_queues_csa(struct ieee80211_sub_if_data *sdata);
+void ieee80211_vif_unblock_queues_csa(struct ieee80211_sub_if_data *sdata);
+
/* This function returns the number of multicast stations connected to this
* interface. It returns -1 if that number is not tracked, that is for netdevs
* not in AP or AP_VLAN mode or when using 4addr.
@@ -1845,6 +1858,8 @@ void ieee80211_link_info_change_notify(struct ieee80211_sub_if_data *sdata,
void ieee80211_configure_filter(struct ieee80211_local *local);
u64 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata);
+void ieee80211_handle_queued_frames(struct ieee80211_local *local);
+
u64 ieee80211_mgmt_tx_cookie(struct ieee80211_local *local);
int ieee80211_attach_ack_skb(struct ieee80211_local *local, struct sk_buff *skb,
u64 *cookie, gfp_t gfp);
@@ -1968,6 +1983,7 @@ void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local);
void ieee80211_offchannel_return(struct ieee80211_local *local);
void ieee80211_roc_setup(struct ieee80211_local *local);
void ieee80211_start_next_roc(struct ieee80211_local *local);
+void ieee80211_reconfig_roc(struct ieee80211_local *local);
void ieee80211_roc_purge(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata);
int ieee80211_remain_on_channel(struct wiphy *wiphy, struct wireless_dev *wdev,
@@ -2142,9 +2158,21 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
const struct ieee80211_vht_cap *vht_cap_ie2,
struct link_sta_info *link_sta);
enum ieee80211_sta_rx_bandwidth
-ieee80211_sta_cap_rx_bw(struct link_sta_info *link_sta);
+_ieee80211_sta_cap_rx_bw(struct link_sta_info *link_sta,
+ struct cfg80211_chan_def *chandef);
+static inline enum ieee80211_sta_rx_bandwidth
+ieee80211_sta_cap_rx_bw(struct link_sta_info *link_sta)
+{
+ return _ieee80211_sta_cap_rx_bw(link_sta, NULL);
+}
enum ieee80211_sta_rx_bandwidth
-ieee80211_sta_cur_vht_bw(struct link_sta_info *link_sta);
+_ieee80211_sta_cur_vht_bw(struct link_sta_info *link_sta,
+ struct cfg80211_chan_def *chandef);
+static inline enum ieee80211_sta_rx_bandwidth
+ieee80211_sta_cur_vht_bw(struct link_sta_info *link_sta)
+{
+ return _ieee80211_sta_cur_vht_bw(link_sta, NULL);
+}
void ieee80211_sta_init_nss(struct link_sta_info *link_sta);
enum ieee80211_sta_rx_bandwidth
ieee80211_chan_width_to_rx_bw(enum nl80211_chan_width width);
@@ -2202,6 +2230,8 @@ void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata,
* @conn: contains information about own capabilities and restrictions
* to decide which channel switch announcements can be accepted
* @bssid: the currently connected bssid (for reporting)
+ * @unprot_action: whether the frame was an unprotected frame or not,
+ * used for reporting
* @csa_ie: parsed 802.11 csa elements on count, mode, chandef and mesh ttl.
* All of them will be filled with if success only.
* Return: 0 on success, <0 on error and >0 if there is nothing to parse.
@@ -2211,12 +2241,12 @@ int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata,
enum nl80211_band current_band,
u32 vht_cap_info,
struct ieee80211_conn_settings *conn,
- u8 *bssid,
+ u8 *bssid, bool unprot_action,
struct ieee80211_csa_ie *csa_ie);
/* Suspend/resume and hw reconfiguration */
int ieee80211_reconfig(struct ieee80211_local *local);
-void ieee80211_stop_device(struct ieee80211_local *local);
+void ieee80211_stop_device(struct ieee80211_local *local, bool suspend);
int __ieee80211_suspend(struct ieee80211_hw *hw,
struct cfg80211_wowlan *wowlan);
@@ -2243,6 +2273,7 @@ int ieee80211_frame_duration(enum nl80211_band band, size_t len,
void ieee80211_regulatory_limit_wmm_params(struct ieee80211_sub_if_data *sdata,
struct ieee80211_tx_queue_params *qparam,
int ac);
+void ieee80211_clear_tpe(struct ieee80211_parsed_tpe *tpe);
void ieee80211_set_wmm_default(struct ieee80211_link_data *link,
bool bss_notify, bool enable_qos);
void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
@@ -2593,7 +2624,8 @@ void ieee80211_recalc_smps_chanctx(struct ieee80211_local *local,
struct ieee80211_chanctx *chanctx);
void ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local,
struct ieee80211_chanctx *ctx,
- struct ieee80211_link_data *rsvd_for);
+ struct ieee80211_link_data *rsvd_for,
+ bool check_reserved);
bool ieee80211_is_radar_required(struct ieee80211_local *local);
void ieee80211_dfs_cac_timer_work(struct wiphy *wiphy, struct wiphy_work *work);
@@ -2608,8 +2640,9 @@ void ieee80211_recalc_dtim(struct ieee80211_local *local,
int ieee80211_check_combinations(struct ieee80211_sub_if_data *sdata,
const struct cfg80211_chan_def *chandef,
enum ieee80211_chanctx_mode chanmode,
- u8 radar_detect);
-int ieee80211_max_num_channels(struct ieee80211_local *local);
+ u8 radar_detect, int radio_idx);
+int ieee80211_max_num_channels(struct ieee80211_local *local, int radio_idx);
+u32 ieee80211_get_radio_mask(struct wiphy *wiphy, struct net_device *dev);
void ieee80211_recalc_chanctx_chantype(struct ieee80211_local *local,
struct ieee80211_chanctx *ctx);
@@ -2681,6 +2714,11 @@ void ieee80211_remove_wbrf(struct ieee80211_local *local, struct cfg80211_chan_d
#define VISIBLE_IF_MAC80211_KUNIT
ieee80211_rx_result
ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx);
+int ieee80211_calc_chandef_subchan_offset(const struct cfg80211_chan_def *ap,
+ u8 n_partial_subchans);
+void ieee80211_rearrange_tpe_psd(struct ieee80211_parsed_tpe_psd *psd,
+ const struct cfg80211_chan_def *ap,
+ const struct cfg80211_chan_def *used);
#else
#define EXPORT_SYMBOL_IF_MAC80211_KUNIT(sym)
#define VISIBLE_IF_MAC80211_KUNIT static
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index dc42902e2693..b4ad66af3af3 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -397,7 +397,7 @@ static int ieee80211_check_concurrent_iface(struct ieee80211_sub_if_data *sdata,
}
}
- return ieee80211_check_combinations(sdata, NULL, 0, 0);
+ return ieee80211_check_combinations(sdata, NULL, 0, 0, -1);
}
static int ieee80211_check_queues(struct ieee80211_sub_if_data *sdata,
@@ -543,18 +543,14 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_do
sdata->vif.bss_conf.csa_active = false;
if (sdata->vif.type == NL80211_IFTYPE_STATION)
- sdata->deflink.u.mgd.csa_waiting_bcn = false;
- if (sdata->csa_blocked_queues) {
- ieee80211_wake_vif_queues(local, sdata,
- IEEE80211_QUEUE_STOP_REASON_CSA);
- sdata->csa_blocked_queues = false;
- }
+ sdata->deflink.u.mgd.csa.waiting_bcn = false;
+ ieee80211_vif_unblock_queues_csa(sdata);
- wiphy_work_cancel(local->hw.wiphy, &sdata->deflink.csa_finalize_work);
+ wiphy_work_cancel(local->hw.wiphy, &sdata->deflink.csa.finalize_work);
wiphy_work_cancel(local->hw.wiphy,
&sdata->deflink.color_change_finalize_work);
wiphy_delayed_work_cancel(local->hw.wiphy,
- &sdata->deflink.dfs_cac_timer_work);
+ &sdata->dfs_cac_timer_work);
if (sdata->wdev.cac_started) {
chandef = sdata->vif.bss_conf.chanreq.oper;
@@ -686,14 +682,19 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_do
ieee80211_del_virtual_monitor(local);
ieee80211_recalc_idle(local);
+ ieee80211_recalc_offload(local);
if (!(sdata->u.mntr.flags & MONITOR_FLAG_ACTIVE))
break;
fallthrough;
default:
- if (going_down)
- drv_remove_interface(local, sdata);
+ if (!going_down)
+ break;
+ drv_remove_interface(local, sdata);
+
+ /* Clear private driver data to prevent reuse */
+ memset(sdata->vif.drv_priv, 0, local->hw.vif_data_size);
}
ieee80211_recalc_ps(local);
@@ -702,7 +703,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_do
wiphy_delayed_work_flush(local->hw.wiphy, &local->scan_work);
if (local->open_count == 0) {
- ieee80211_stop_device(local);
+ ieee80211_stop_device(local, false);
/* no reconfiguring after stop! */
return;
@@ -815,12 +816,6 @@ static void ieee80211_uninit(struct net_device *dev)
ieee80211_teardown_sdata(IEEE80211_DEV_TO_SUB_IF(dev));
}
-static void
-ieee80211_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
-{
- dev_fetch_sw_netstats(stats, dev->tstats);
-}
-
static int ieee80211_netdev_setup_tc(struct net_device *dev,
enum tc_setup_type type, void *type_data)
{
@@ -837,7 +832,6 @@ static const struct net_device_ops ieee80211_dataif_ops = {
.ndo_start_xmit = ieee80211_subif_start_xmit,
.ndo_set_rx_mode = ieee80211_set_multicast_list,
.ndo_set_mac_address = ieee80211_change_mac,
- .ndo_get_stats64 = ieee80211_get_stats64,
.ndo_setup_tc = ieee80211_netdev_setup_tc,
};
@@ -877,7 +871,6 @@ static const struct net_device_ops ieee80211_monitorif_ops = {
.ndo_set_rx_mode = ieee80211_set_multicast_list,
.ndo_set_mac_address = ieee80211_change_mac,
.ndo_select_queue = ieee80211_monitor_select_queue,
- .ndo_get_stats64 = ieee80211_get_stats64,
};
static int ieee80211_netdev_fill_forward_path(struct net_device_path_ctx *ctx,
@@ -945,7 +938,6 @@ static const struct net_device_ops ieee80211_dataif_8023_ops = {
.ndo_start_xmit = ieee80211_subif_start_xmit_8023,
.ndo_set_rx_mode = ieee80211_set_multicast_list,
.ndo_set_mac_address = ieee80211_change_mac,
- .ndo_get_stats64 = ieee80211_get_stats64,
.ndo_fill_forward_path = ieee80211_netdev_fill_forward_path,
.ndo_setup_tc = ieee80211_netdev_setup_tc,
};
@@ -1121,9 +1113,6 @@ int ieee80211_add_virtual_monitor(struct ieee80211_local *local)
struct ieee80211_sub_if_data *sdata;
int ret;
- if (!ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF))
- return 0;
-
ASSERT_RTNL();
lockdep_assert_wiphy(local->hw.wiphy);
@@ -1145,11 +1134,13 @@ int ieee80211_add_virtual_monitor(struct ieee80211_local *local)
ieee80211_set_default_queues(sdata);
- ret = drv_add_interface(local, sdata);
- if (WARN_ON(ret)) {
- /* ok .. stupid driver, it asked for this! */
- kfree(sdata);
- return ret;
+ if (ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF)) {
+ ret = drv_add_interface(local, sdata);
+ if (WARN_ON(ret)) {
+ /* ok .. stupid driver, it asked for this! */
+ kfree(sdata);
+ return ret;
+ }
}
set_bit(SDATA_STATE_RUNNING, &sdata->state);
@@ -1187,9 +1178,6 @@ void ieee80211_del_virtual_monitor(struct ieee80211_local *local)
{
struct ieee80211_sub_if_data *sdata;
- if (!ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF))
- return;
-
ASSERT_RTNL();
lockdep_assert_wiphy(local->hw.wiphy);
@@ -1209,7 +1197,8 @@ void ieee80211_del_virtual_monitor(struct ieee80211_local *local)
ieee80211_link_release_channel(&sdata->deflink);
- drv_remove_interface(local, sdata);
+ if (ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF))
+ drv_remove_interface(local, sdata);
kfree(sdata);
}
@@ -1448,7 +1437,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
drv_remove_interface(local, sdata);
err_stop:
if (!local->open_count)
- drv_stop(local);
+ drv_stop(local, false);
err_del_bss:
sdata->bss = NULL;
if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
@@ -1458,11 +1447,6 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
return res;
}
-static void ieee80211_if_free(struct net_device *dev)
-{
- free_percpu(dev->tstats);
-}
-
static void ieee80211_if_setup(struct net_device *dev)
{
ether_setup(dev);
@@ -1470,7 +1454,6 @@ static void ieee80211_if_setup(struct net_device *dev)
dev->priv_flags |= IFF_NO_QUEUE;
dev->netdev_ops = &ieee80211_dataif_ops;
dev->needs_free_netdev = true;
- dev->priv_destructor = ieee80211_if_free;
}
static void ieee80211_iface_process_skb(struct ieee80211_local *local,
@@ -1746,6 +1729,8 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
wiphy_work_init(&sdata->work, ieee80211_iface_work);
wiphy_work_init(&sdata->activate_links_work,
ieee80211_activate_links_work);
+ wiphy_delayed_work_init(&sdata->dfs_cac_timer_work,
+ ieee80211_dfs_cac_timer_work);
switch (type) {
case NL80211_IFTYPE_P2P_GO:
@@ -2101,11 +2086,7 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
dev_net_set(ndev, wiphy_net(local->hw.wiphy));
- ndev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
- if (!ndev->tstats) {
- free_netdev(ndev);
- return -ENOMEM;
- }
+ ndev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
ndev->needed_headroom = local->tx_headroom +
4*6 /* four MAC addresses */
@@ -2118,7 +2099,6 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
ret = dev_alloc_name(ndev, ndev->name);
if (ret < 0) {
- ieee80211_if_free(ndev);
free_netdev(ndev);
return ret;
}
@@ -2363,3 +2343,26 @@ void ieee80211_vif_dec_num_mcast(struct ieee80211_sub_if_data *sdata)
else if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
atomic_dec(&sdata->u.vlan.num_mcast_sta);
}
+
+void ieee80211_vif_block_queues_csa(struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_local *local = sdata->local;
+
+ if (ieee80211_hw_check(&local->hw, HANDLES_QUIET_CSA))
+ return;
+
+ ieee80211_stop_vif_queues(local, sdata,
+ IEEE80211_QUEUE_STOP_REASON_CSA);
+ sdata->csa_blocked_queues = true;
+}
+
+void ieee80211_vif_unblock_queues_csa(struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_local *local = sdata->local;
+
+ if (sdata->csa_blocked_queues) {
+ ieee80211_wake_vif_queues(local, sdata,
+ IEEE80211_QUEUE_STOP_REASON_CSA);
+ sdata->csa_blocked_queues = false;
+ }
+}
diff --git a/net/mac80211/link.c b/net/mac80211/link.c
index af0321408a97..1a211b8d4057 100644
--- a/net/mac80211/link.c
+++ b/net/mac80211/link.c
@@ -37,7 +37,7 @@ void ieee80211_link_init(struct ieee80211_sub_if_data *sdata,
link_conf->link_id = link_id;
link_conf->vif = &sdata->vif;
- wiphy_work_init(&link->csa_finalize_work,
+ wiphy_work_init(&link->csa.finalize_work,
ieee80211_csa_finalize_work);
wiphy_work_init(&link->color_change_finalize_work,
ieee80211_color_change_finalize_work);
@@ -45,8 +45,6 @@ void ieee80211_link_init(struct ieee80211_sub_if_data *sdata,
ieee80211_color_collision_detection_work);
INIT_LIST_HEAD(&link->assigned_chanctx_list);
INIT_LIST_HEAD(&link->reserved_chanctx_list);
- wiphy_delayed_work_init(&link->dfs_cac_timer_work,
- ieee80211_dfs_cac_timer_work);
if (!deflink) {
switch (sdata->vif.type) {
@@ -74,7 +72,9 @@ void ieee80211_link_stop(struct ieee80211_link_data *link)
cancel_delayed_work_sync(&link->color_collision_detect_work);
wiphy_work_cancel(link->sdata->local->hw.wiphy,
- &link->csa_finalize_work);
+ &link->color_change_finalize_work);
+ wiphy_work_cancel(link->sdata->local->hw.wiphy,
+ &link->csa.finalize_work);
ieee80211_link_release_channel(link);
}
@@ -359,6 +359,18 @@ static int _ieee80211_set_active_links(struct ieee80211_sub_if_data *sdata,
ieee80211_teardown_tdls_peers(link);
__ieee80211_link_release_channel(link, true);
+
+ /*
+ * If CSA is (still) active while the link is deactivated,
+ * just schedule the channel switch work for the time we
+ * had previously calculated, and we'll take the process
+ * from there.
+ */
+ if (link->conf->csa_active)
+ wiphy_delayed_work_queue(local->hw.wiphy,
+ &link->u.mgd.csa.switch_work,
+ link->u.mgd.csa.time -
+ jiffies);
}
list_for_each_entry(sta, &local->sta_list, list) {
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 4eaea0a9975b..a3104b6ea6f0 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -148,7 +148,7 @@ static u32 ieee80211_calc_hw_conf_chan(struct ieee80211_local *local,
offchannel_flag ^= local->hw.conf.flags & IEEE80211_CONF_OFFCHANNEL;
/* force it also for scanning, since drivers might config differently */
- if (offchannel_flag || local->scanning ||
+ if (offchannel_flag || local->scanning || local->in_reconfig ||
!cfg80211_chandef_identical(&local->hw.conf.chandef, &chandef)) {
local->hw.conf.chandef = chandef;
changed |= IEEE80211_CONF_CHANGE_CHANNEL;
@@ -337,6 +337,8 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
might_sleep();
+ WARN_ON_ONCE(ieee80211_vif_is_mld(&sdata->vif));
+
if (!changed || sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
return;
@@ -369,7 +371,6 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
if (changed & ~BSS_CHANGED_VIF_CFG_FLAGS) {
u64 ch = changed & ~BSS_CHANGED_VIF_CFG_FLAGS;
- /* FIXME: should be for each link */
trace_drv_link_info_changed(local, sdata, &sdata->vif.bss_conf,
changed);
if (local->ops->link_info_changed)
@@ -423,9 +424,9 @@ u64 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata)
BSS_CHANGED_ERP_SLOT;
}
-static void ieee80211_tasklet_handler(struct tasklet_struct *t)
+/* context: requires softirqs disabled */
+void ieee80211_handle_queued_frames(struct ieee80211_local *local)
{
- struct ieee80211_local *local = from_tasklet(local, t, tasklet);
struct sk_buff *skb;
while ((skb = skb_dequeue(&local->skb_queue)) ||
@@ -450,6 +451,13 @@ static void ieee80211_tasklet_handler(struct tasklet_struct *t)
}
}
+static void ieee80211_tasklet_handler(struct tasklet_struct *t)
+{
+ struct ieee80211_local *local = from_tasklet(local, t, tasklet);
+
+ ieee80211_handle_queued_frames(local);
+}
+
static void ieee80211_restart_work(struct work_struct *work)
{
struct ieee80211_local *local =
@@ -1083,6 +1091,27 @@ static int ieee80211_init_cipher_suites(struct ieee80211_local *local)
return 0;
}
+static bool
+ieee80211_ifcomb_check(const struct ieee80211_iface_combination *c, int n_comb)
+{
+ int i, j;
+
+ for (i = 0; i < n_comb; i++, c++) {
+ /* DFS is not supported with multi-channel combinations yet */
+ if (c->radar_detect_widths &&
+ c->num_different_channels > 1)
+ return false;
+
+ /* mac80211 doesn't support more than one IBSS interface */
+ for (j = 0; j < c->n_limits; j++)
+ if ((c->limits[j].types & BIT(NL80211_IFTYPE_ADHOC)) &&
+ c->limits[j].max > 1)
+ return false;
+ }
+
+ return true;
+}
+
int ieee80211_register_hw(struct ieee80211_hw *hw)
{
struct ieee80211_local *local = hw_to_local(hw);
@@ -1153,9 +1182,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
if (WARN_ON(!ieee80211_hw_check(hw, AP_LINK_PS)))
return -EINVAL;
-
- if (WARN_ON(ieee80211_hw_check(hw, DEAUTH_NEED_MGD_TX_PREP)))
- return -EINVAL;
}
#ifdef CONFIG_PM
@@ -1172,17 +1198,20 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
if (comb->num_different_channels > 1)
return -EINVAL;
}
- } else {
- /* DFS is not supported with multi-channel combinations yet */
- for (i = 0; i < local->hw.wiphy->n_iface_combinations; i++) {
- const struct ieee80211_iface_combination *comb;
+ }
- comb = &local->hw.wiphy->iface_combinations[i];
+ if (hw->wiphy->n_radio) {
+ for (i = 0; i < hw->wiphy->n_radio; i++) {
+ const struct wiphy_radio *radio = &hw->wiphy->radio[i];
- if (comb->radar_detect_widths &&
- comb->num_different_channels > 1)
+ if (!ieee80211_ifcomb_check(radio->iface_combinations,
+ radio->n_iface_combinations))
return -EINVAL;
}
+ } else {
+ if (!ieee80211_ifcomb_check(hw->wiphy->iface_combinations,
+ hw->wiphy->n_iface_combinations))
+ return -EINVAL;
}
/* Only HW csum features are currently compatible with mac80211 */
@@ -1312,18 +1341,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_MONITOR);
hw->wiphy->software_iftypes |= BIT(NL80211_IFTYPE_MONITOR);
- /* mac80211 doesn't support more than one IBSS interface right now */
- for (i = 0; i < hw->wiphy->n_iface_combinations; i++) {
- const struct ieee80211_iface_combination *c;
- int j;
-
- c = &hw->wiphy->iface_combinations[i];
-
- for (j = 0; j < c->n_limits; j++)
- if ((c->limits[j].types & BIT(NL80211_IFTYPE_ADHOC)) &&
- c->limits[j].max > 1)
- return -EINVAL;
- }
local->int_scan_req = kzalloc(sizeof(*local->int_scan_req) +
sizeof(void *) * channels, GFP_KERNEL);
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index cbc9b5e40cb3..f94e4be0be12 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -1312,7 +1312,7 @@ ieee80211_mesh_process_chnswitch(struct ieee80211_sub_if_data *sdata,
memset(&params, 0, sizeof(params));
err = ieee80211_parse_ch_switch_ie(sdata, elems, sband->band,
vht_cap_info, &conn,
- sdata->vif.addr,
+ sdata->vif.addr, false,
&csa_ie);
if (err < 0)
return false;
@@ -1776,6 +1776,7 @@ void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata)
ifmsh->last_preq = jiffies;
ifmsh->next_perr = jiffies;
ifmsh->csa_role = IEEE80211_MESH_CSA_ROLE_NONE;
+ ifmsh->nonpeer_pm = NL80211_MESH_POWER_ACTIVE;
/* Allocate all mesh structures when creating the first mesh interface. */
if (!mesh_allocated)
ieee80211s_init();
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index a6b62169f084..c0a5c75cddcb 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -1017,10 +1017,23 @@ void mesh_path_discard_frame(struct ieee80211_sub_if_data *sdata,
*/
void mesh_path_flush_pending(struct mesh_path *mpath)
{
+ struct ieee80211_sub_if_data *sdata = mpath->sdata;
+ struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
+ struct mesh_preq_queue *preq, *tmp;
struct sk_buff *skb;
while ((skb = skb_dequeue(&mpath->frame_queue)) != NULL)
mesh_path_discard_frame(mpath->sdata, skb);
+
+ spin_lock_bh(&ifmsh->mesh_preq_queue_lock);
+ list_for_each_entry_safe(preq, tmp, &ifmsh->preq_queue.list, list) {
+ if (ether_addr_equal(mpath->dst, preq->dst)) {
+ list_del(&preq->list);
+ kfree(preq);
+ --ifmsh->preq_queue_len;
+ }
+ }
+ spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
}
/**
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index a5f2d3cfe60d..4779a18ab75d 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -606,11 +606,218 @@ static bool ieee80211_chandef_usable(struct ieee80211_sub_if_data *sdata,
return true;
}
+static int ieee80211_chandef_num_subchans(const struct cfg80211_chan_def *c)
+{
+ if (c->width == NL80211_CHAN_WIDTH_80P80)
+ return 4 + 4;
+
+ return nl80211_chan_width_to_mhz(c->width) / 20;
+}
+
+static int ieee80211_chandef_num_widths(const struct cfg80211_chan_def *c)
+{
+ switch (c->width) {
+ case NL80211_CHAN_WIDTH_20:
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ return 1;
+ case NL80211_CHAN_WIDTH_40:
+ return 2;
+ case NL80211_CHAN_WIDTH_80P80:
+ case NL80211_CHAN_WIDTH_80:
+ return 3;
+ case NL80211_CHAN_WIDTH_160:
+ return 4;
+ case NL80211_CHAN_WIDTH_320:
+ return 5;
+ default:
+ WARN_ON(1);
+ return 0;
+ }
+}
+
+VISIBLE_IF_MAC80211_KUNIT int
+ieee80211_calc_chandef_subchan_offset(const struct cfg80211_chan_def *ap,
+ u8 n_partial_subchans)
+{
+ int n = ieee80211_chandef_num_subchans(ap);
+ struct cfg80211_chan_def tmp = *ap;
+ int offset = 0;
+
+ /*
+ * Given a chandef (in this context, it's the AP's) and a number
+ * of subchannels that we want to look at ('n_partial_subchans'),
+ * calculate the offset in number of subchannels between the full
+ * and the subset with the desired width.
+ */
+
+ /* same number of subchannels means no offset, obviously */
+ if (n == n_partial_subchans)
+ return 0;
+
+ /* don't WARN - misconfigured APs could cause this if their N > width */
+ if (n < n_partial_subchans)
+ return 0;
+
+ while (ieee80211_chandef_num_subchans(&tmp) > n_partial_subchans) {
+ u32 prev = tmp.center_freq1;
+
+ ieee80211_chandef_downgrade(&tmp, NULL);
+
+ /*
+ * if center_freq moved up, half the original channels
+ * are gone now but were below, so increase offset
+ */
+ if (prev < tmp.center_freq1)
+ offset += ieee80211_chandef_num_subchans(&tmp);
+ }
+
+ /*
+ * 80+80 with secondary 80 below primary - four subchannels for it
+ * (we cannot downgrade *to* 80+80, so no need to consider 'tmp')
+ */
+ if (ap->width == NL80211_CHAN_WIDTH_80P80 &&
+ ap->center_freq2 < ap->center_freq1)
+ offset += 4;
+
+ return offset;
+}
+EXPORT_SYMBOL_IF_MAC80211_KUNIT(ieee80211_calc_chandef_subchan_offset);
+
+VISIBLE_IF_MAC80211_KUNIT void
+ieee80211_rearrange_tpe_psd(struct ieee80211_parsed_tpe_psd *psd,
+ const struct cfg80211_chan_def *ap,
+ const struct cfg80211_chan_def *used)
+{
+ u8 needed = ieee80211_chandef_num_subchans(used);
+ u8 have = ieee80211_chandef_num_subchans(ap);
+ u8 tmp[IEEE80211_TPE_PSD_ENTRIES_320MHZ];
+ u8 offset;
+
+ if (!psd->valid)
+ return;
+
+ /* if N is zero, all defaults were used, no point in rearranging */
+ if (!psd->n)
+ goto out;
+
+ BUILD_BUG_ON(sizeof(tmp) != sizeof(psd->power));
+
+ /*
+ * This assumes that 'N' is consistent with the HE channel, as
+ * it should be (otherwise the AP is broken).
+ *
+ * In psd->power we have values in the order 0..N, 0..K, where
+ * N+K should cover the entire channel per 'ap', but even if it
+ * doesn't then we've pre-filled 'unlimited' as defaults.
+ *
+ * But this is all the wrong order, we want to have them in the
+ * order of the 'used' channel.
+ *
+ * So for example, we could have a 320 MHz EHT AP, which has the
+ * HE channel as 80 MHz (e.g. due to puncturing, which doesn't
+ * seem to be considered for the TPE), as follows:
+ *
+ * EHT 320: | | | | | | | | | | | | | | | | |
+ * HE 80: | | | | |
+ * used 160: | | | | | | | | |
+ *
+ * N entries: |--|--|--|--|
+ * K entries: |--|--|--|--|--|--|--|--| |--|--|--|--|
+ * power idx: 4 5 6 7 8 9 10 11 0 1 2 3 12 13 14 15
+ * full chan: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
+ * used chan: 0 1 2 3 4 5 6 7
+ *
+ * The idx in the power array ('power idx') is like this since it
+ * comes directly from the element's N and K entries in their
+ * element order, and those are this way for HE compatibility.
+ *
+ * Rearrange them as desired here, first by putting them into the
+ * 'full chan' order, and then selecting the necessary subset for
+ * the 'used chan'.
+ */
+
+ /* first reorder according to AP channel */
+ offset = ieee80211_calc_chandef_subchan_offset(ap, psd->n);
+ for (int i = 0; i < have; i++) {
+ if (i < offset)
+ tmp[i] = psd->power[i + psd->n];
+ else if (i < offset + psd->n)
+ tmp[i] = psd->power[i - offset];
+ else
+ tmp[i] = psd->power[i];
+ }
+
+ /*
+ * and then select the subset for the used channel
+ * (set everything to defaults first in case a driver is confused)
+ */
+ memset(psd->power, IEEE80211_TPE_PSD_NO_LIMIT, sizeof(psd->power));
+ offset = ieee80211_calc_chandef_subchan_offset(ap, needed);
+ for (int i = 0; i < needed; i++)
+ psd->power[i] = tmp[offset + i];
+
+out:
+ /* limit, but don't lie if there are defaults in the data */
+ if (needed < psd->count)
+ psd->count = needed;
+}
+EXPORT_SYMBOL_IF_MAC80211_KUNIT(ieee80211_rearrange_tpe_psd);
+
+static void ieee80211_rearrange_tpe(struct ieee80211_parsed_tpe *tpe,
+ const struct cfg80211_chan_def *ap,
+ const struct cfg80211_chan_def *used)
+{
+ /* ignore this completely for narrow/invalid channels */
+ if (!ieee80211_chandef_num_subchans(ap) ||
+ !ieee80211_chandef_num_subchans(used)) {
+ ieee80211_clear_tpe(tpe);
+ return;
+ }
+
+ for (int i = 0; i < 2; i++) {
+ int needed_pwr_count;
+
+ ieee80211_rearrange_tpe_psd(&tpe->psd_local[i], ap, used);
+ ieee80211_rearrange_tpe_psd(&tpe->psd_reg_client[i], ap, used);
+
+ /* limit this to the widths we actually need */
+ needed_pwr_count = ieee80211_chandef_num_widths(used);
+ if (needed_pwr_count < tpe->max_local[i].count)
+ tpe->max_local[i].count = needed_pwr_count;
+ if (needed_pwr_count < tpe->max_reg_client[i].count)
+ tpe->max_reg_client[i].count = needed_pwr_count;
+ }
+}
+
+/*
+ * The AP part of the channel request is used to distinguish settings
+ * to the device used for wider bandwidth OFDMA. This is used in the
+ * channel context code to assign two channel contexts even if they're
+ * both for the same channel, if the AP bandwidths are incompatible.
+ * If not EHT (or driver override) then ap.chan == NULL indicates that
+ * there's no wider BW OFDMA used.
+ */
+static void ieee80211_set_chanreq_ap(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_chan_req *chanreq,
+ struct ieee80211_conn_settings *conn,
+ struct cfg80211_chan_def *ap_chandef)
+{
+ chanreq->ap.chan = NULL;
+
+ if (conn->mode < IEEE80211_CONN_MODE_EHT)
+ return;
+ if (sdata->vif.driver_flags & IEEE80211_VIF_IGNORE_OFDMA_WIDER_BW)
+ return;
+
+ chanreq->ap = *ap_chandef;
+}
+
static struct ieee802_11_elems *
ieee80211_determine_chan_mode(struct ieee80211_sub_if_data *sdata,
struct ieee80211_conn_settings *conn,
struct cfg80211_bss *cbss, int link_id,
- struct ieee80211_chan_req *chanreq)
+ struct ieee80211_chan_req *chanreq,
+ struct cfg80211_chan_def *ap_chandef)
{
const struct cfg80211_bss_ies *ies = rcu_dereference(cbss->ies);
struct ieee80211_bss *bss = (void *)cbss->priv;
@@ -623,7 +830,6 @@ ieee80211_determine_chan_mode(struct ieee80211_sub_if_data *sdata,
};
struct ieee802_11_elems *elems;
struct ieee80211_supported_band *sband;
- struct cfg80211_chan_def ap_chandef;
enum ieee80211_conn_mode ap_mode;
int ret;
@@ -634,7 +840,7 @@ again:
return ERR_PTR(-ENOMEM);
ap_mode = ieee80211_determine_ap_chan(sdata, channel, bss->vht_cap_info,
- elems, false, conn, &ap_chandef);
+ elems, false, conn, ap_chandef);
/* this should be impossible since parsing depends on our mode */
if (WARN_ON(ap_mode > conn->mode)) {
@@ -701,14 +907,9 @@ again:
break;
}
- chanreq->oper = ap_chandef;
+ chanreq->oper = *ap_chandef;
- /* wider-bandwidth OFDMA is only done in EHT */
- if (conn->mode >= IEEE80211_CONN_MODE_EHT &&
- !(sdata->vif.driver_flags & IEEE80211_VIF_IGNORE_OFDMA_WIDER_BW))
- chanreq->ap = ap_chandef;
- else
- chanreq->ap.chan = NULL;
+ ieee80211_set_chanreq_ap(sdata, chanreq, conn, ap_chandef);
while (!ieee80211_chandef_usable(sdata, &chanreq->oper,
IEEE80211_CHAN_DISABLED)) {
@@ -738,7 +939,7 @@ again:
IEEE80211_CONN_BW_LIMIT_160);
}
- if (chanreq->oper.width != ap_chandef.width || ap_mode != conn->mode)
+ if (chanreq->oper.width != ap_chandef->width || ap_mode != conn->mode)
sdata_info(sdata,
"regulatory prevented using AP config, downgraded\n");
@@ -790,6 +991,7 @@ static int ieee80211_config_bw(struct ieee80211_link_data *link,
struct ieee80211_channel *channel = link->conf->chanreq.oper.chan;
struct ieee80211_sub_if_data *sdata = link->sdata;
struct ieee80211_chan_req chanreq = {};
+ struct cfg80211_chan_def ap_chandef;
enum ieee80211_conn_mode ap_mode;
u32 vht_cap_info = 0;
u16 ht_opmode;
@@ -805,7 +1007,7 @@ static int ieee80211_config_bw(struct ieee80211_link_data *link,
ap_mode = ieee80211_determine_ap_chan(sdata, channel, vht_cap_info,
elems, true, &link->u.mgd.conn,
- &chanreq.ap);
+ &ap_chandef);
if (ap_mode != link->u.mgd.conn.mode) {
link_info(link,
@@ -815,10 +1017,9 @@ static int ieee80211_config_bw(struct ieee80211_link_data *link,
return -EINVAL;
}
- chanreq.oper = chanreq.ap;
- if (link->u.mgd.conn.mode < IEEE80211_CONN_MODE_EHT ||
- sdata->vif.driver_flags & IEEE80211_VIF_IGNORE_OFDMA_WIDER_BW)
- chanreq.ap.chan = NULL;
+ chanreq.oper = ap_chandef;
+ ieee80211_set_chanreq_ap(sdata, &chanreq, &link->u.mgd.conn,
+ &ap_chandef);
/*
* if HT operation mode changed store the new one -
@@ -843,6 +1044,16 @@ static int ieee80211_config_bw(struct ieee80211_link_data *link,
ieee80211_min_bw_limit_from_chandef(&chanreq.oper))
ieee80211_chandef_downgrade(&chanreq.oper, NULL);
+ if (ap_chandef.chan->band == NL80211_BAND_6GHZ &&
+ link->u.mgd.conn.mode >= IEEE80211_CONN_MODE_HE) {
+ ieee80211_rearrange_tpe(&elems->tpe, &ap_chandef,
+ &chanreq.oper);
+ if (memcmp(&link->conf->tpe, &elems->tpe, sizeof(elems->tpe))) {
+ link->conf->tpe = elems->tpe;
+ *changed |= BSS_CHANGED_TPE;
+ }
+ }
+
if (ieee80211_chanreq_identical(&chanreq, &link->conf->chanreq))
return 0;
@@ -1862,12 +2073,12 @@ void ieee80211_send_4addr_nullfunc(struct ieee80211_local *local,
}
/* spectrum management related things */
-static void ieee80211_chswitch_work(struct wiphy *wiphy,
- struct wiphy_work *work)
+static void ieee80211_csa_switch_work(struct wiphy *wiphy,
+ struct wiphy_work *work)
{
struct ieee80211_link_data *link =
container_of(work, struct ieee80211_link_data,
- u.mgd.chswitch_work.work);
+ u.mgd.csa.switch_work.work);
struct ieee80211_sub_if_data *sdata = link->sdata;
struct ieee80211_local *local = sdata->local;
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
@@ -1885,6 +2096,18 @@ static void ieee80211_chswitch_work(struct wiphy *wiphy,
return;
/*
+ * If the link isn't active (now), we cannot wait for beacons, won't
+ * have a reserved chanctx, etc. Just switch over the chandef and
+ * update cfg80211 directly.
+ */
+ if (!ieee80211_vif_link_active(&sdata->vif, link->link_id)) {
+ link->conf->chanreq = link->csa.chanreq;
+ cfg80211_ch_switch_notify(sdata->dev, &link->csa.chanreq.oper,
+ link->link_id);
+ return;
+ }
+
+ /*
* using reservation isn't immediate as it may be deferred until later
* with multi-vif. once reservation is complete it will re-schedule the
* work with no reserved_chanctx so verify chandef to check if it
@@ -1902,9 +2125,9 @@ static void ieee80211_chswitch_work(struct wiphy *wiphy,
ret = ieee80211_link_use_reserved_context(link);
if (ret) {
- sdata_info(sdata,
- "failed to use reserved channel context, disconnecting (err=%d)\n",
- ret);
+ link_info(link,
+ "failed to use reserved channel context, disconnecting (err=%d)\n",
+ ret);
wiphy_work_queue(sdata->local->hw.wiphy,
&ifmgd->csa_connection_drop_work);
}
@@ -1912,15 +2135,29 @@ static void ieee80211_chswitch_work(struct wiphy *wiphy,
}
if (!ieee80211_chanreq_identical(&link->conf->chanreq,
- &link->csa_chanreq)) {
- sdata_info(sdata,
- "failed to finalize channel switch, disconnecting\n");
+ &link->csa.chanreq)) {
+ link_info(link,
+ "failed to finalize channel switch, disconnecting\n");
wiphy_work_queue(sdata->local->hw.wiphy,
&ifmgd->csa_connection_drop_work);
return;
}
- link->u.mgd.csa_waiting_bcn = true;
+ link->u.mgd.csa.waiting_bcn = true;
+
+ /* apply new TPE restrictions immediately on the new channel */
+ if (link->u.mgd.csa.ap_chandef.chan->band == NL80211_BAND_6GHZ &&
+ link->u.mgd.conn.mode >= IEEE80211_CONN_MODE_HE) {
+ ieee80211_rearrange_tpe(&link->u.mgd.csa.tpe,
+ &link->u.mgd.csa.ap_chandef,
+ &link->conf->chanreq.oper);
+ if (memcmp(&link->conf->tpe, &link->u.mgd.csa.tpe,
+ sizeof(link->u.mgd.csa.tpe))) {
+ link->conf->tpe = link->u.mgd.csa.tpe;
+ ieee80211_link_info_change_notify(sdata, link,
+ BSS_CHANGED_TPE);
+ }
+ }
ieee80211_sta_reset_beacon_monitor(sdata);
ieee80211_sta_reset_conn_monitor(sdata);
@@ -1929,7 +2166,6 @@ static void ieee80211_chswitch_work(struct wiphy *wiphy,
static void ieee80211_chswitch_post_beacon(struct ieee80211_link_data *link)
{
struct ieee80211_sub_if_data *sdata = link->sdata;
- struct ieee80211_local *local = sdata->local;
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
int ret;
@@ -1937,26 +2173,22 @@ static void ieee80211_chswitch_post_beacon(struct ieee80211_link_data *link)
WARN_ON(!link->conf->csa_active);
- if (sdata->csa_blocked_queues) {
- ieee80211_wake_vif_queues(local, sdata,
- IEEE80211_QUEUE_STOP_REASON_CSA);
- sdata->csa_blocked_queues = false;
- }
+ ieee80211_vif_unblock_queues_csa(sdata);
link->conf->csa_active = false;
- link->u.mgd.csa_blocked_tx = false;
- link->u.mgd.csa_waiting_bcn = false;
+ link->u.mgd.csa.blocked_tx = false;
+ link->u.mgd.csa.waiting_bcn = false;
ret = drv_post_channel_switch(link);
if (ret) {
- sdata_info(sdata,
- "driver post channel switch failed, disconnecting\n");
+ link_info(link,
+ "driver post channel switch failed, disconnecting\n");
wiphy_work_queue(sdata->local->hw.wiphy,
&ifmgd->csa_connection_drop_work);
return;
}
- cfg80211_ch_switch_notify(sdata->dev, &link->reserved.oper,
+ cfg80211_ch_switch_notify(sdata->dev, &link->conf->chanreq.oper,
link->link_id);
}
@@ -1971,7 +2203,8 @@ void ieee80211_chswitch_done(struct ieee80211_vif *vif, bool success,
if (!success) {
sdata_info(sdata,
- "driver channel switch failed, disconnecting\n");
+ "driver channel switch failed (link %d), disconnecting\n",
+ link_id);
wiphy_work_queue(sdata->local->hw.wiphy,
&sdata->u.mgd.csa_connection_drop_work);
} else {
@@ -1984,7 +2217,7 @@ void ieee80211_chswitch_done(struct ieee80211_vif *vif, bool success,
}
wiphy_delayed_work_queue(sdata->local->hw.wiphy,
- &link->u.mgd.chswitch_work, 0);
+ &link->u.mgd.csa.switch_work, 0);
}
rcu_read_unlock();
@@ -2004,126 +2237,308 @@ ieee80211_sta_abort_chanswitch(struct ieee80211_link_data *link)
ieee80211_link_unreserve_chanctx(link);
- if (sdata->csa_blocked_queues) {
- ieee80211_wake_vif_queues(local, sdata,
- IEEE80211_QUEUE_STOP_REASON_CSA);
- sdata->csa_blocked_queues = false;
- }
+ ieee80211_vif_unblock_queues_csa(sdata);
link->conf->csa_active = false;
- link->u.mgd.csa_blocked_tx = false;
+ link->u.mgd.csa.blocked_tx = false;
drv_abort_channel_switch(link);
}
+struct sta_csa_rnr_iter_data {
+ struct ieee80211_link_data *link;
+ struct ieee80211_channel *chan;
+ u8 mld_id;
+};
+
+static enum cfg80211_rnr_iter_ret
+ieee80211_sta_csa_rnr_iter(void *_data, u8 type,
+ const struct ieee80211_neighbor_ap_info *info,
+ const u8 *tbtt_info, u8 tbtt_info_len)
+{
+ struct sta_csa_rnr_iter_data *data = _data;
+ struct ieee80211_link_data *link = data->link;
+ struct ieee80211_sub_if_data *sdata = link->sdata;
+ struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+ const struct ieee80211_tbtt_info_ge_11 *ti;
+ enum nl80211_band band;
+ unsigned int center_freq;
+ int link_id;
+
+ if (type != IEEE80211_TBTT_INFO_TYPE_TBTT)
+ return RNR_ITER_CONTINUE;
+
+ if (tbtt_info_len < sizeof(*ti))
+ return RNR_ITER_CONTINUE;
+
+ ti = (const void *)tbtt_info;
+
+ if (ti->mld_params.mld_id != data->mld_id)
+ return RNR_ITER_CONTINUE;
+
+ link_id = le16_get_bits(ti->mld_params.params,
+ IEEE80211_RNR_MLD_PARAMS_LINK_ID);
+ if (link_id != data->link->link_id)
+ return RNR_ITER_CONTINUE;
+
+ /* we found the entry for our link! */
+
+ /* this AP is confused, it had this right before ... just disconnect */
+ if (!ieee80211_operating_class_to_band(info->op_class, &band)) {
+ link_info(link,
+ "AP now has invalid operating class in RNR, disconnect\n");
+ wiphy_work_queue(sdata->local->hw.wiphy,
+ &ifmgd->csa_connection_drop_work);
+ return RNR_ITER_BREAK;
+ }
+
+ center_freq = ieee80211_channel_to_frequency(info->channel, band);
+ data->chan = ieee80211_get_channel(sdata->local->hw.wiphy, center_freq);
+
+ return RNR_ITER_BREAK;
+}
+
+static void
+ieee80211_sta_other_link_csa_disappeared(struct ieee80211_link_data *link,
+ struct ieee802_11_elems *elems)
+{
+ struct ieee80211_sub_if_data *sdata = link->sdata;
+ struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+ struct sta_csa_rnr_iter_data data = {
+ .link = link,
+ };
+
+ /*
+ * If we get here, we see a beacon from another link without
+ * CSA still being reported for it, so now we have to check
+ * if the CSA was aborted or completed. This may not even be
+ * perfectly possible if the CSA was only done for changing
+ * the puncturing, but in that case if the link in inactive
+ * we don't really care, and if it's an active link (or when
+ * it's activated later) we'll get a beacon and adjust.
+ */
+
+ if (WARN_ON(!elems->ml_basic))
+ return;
+
+ data.mld_id = ieee80211_mle_get_mld_id((const void *)elems->ml_basic);
+
+ /*
+ * So in order to do this, iterate the RNR element(s) and see
+ * what channel is reported now.
+ */
+ cfg80211_iter_rnr(elems->ie_start, elems->total_len,
+ ieee80211_sta_csa_rnr_iter, &data);
+
+ if (!data.chan) {
+ link_info(link,
+ "couldn't find (valid) channel in RNR for CSA, disconnect\n");
+ wiphy_work_queue(sdata->local->hw.wiphy,
+ &ifmgd->csa_connection_drop_work);
+ return;
+ }
+
+ /*
+ * If it doesn't match the CSA, then assume it aborted. This
+ * may erroneously detect that it was _not_ aborted when it
+ * was in fact aborted, but only changed the bandwidth or the
+ * puncturing configuration, but we don't have enough data to
+ * detect that.
+ */
+ if (data.chan != link->csa.chanreq.oper.chan)
+ ieee80211_sta_abort_chanswitch(link);
+}
+
+enum ieee80211_csa_source {
+ IEEE80211_CSA_SOURCE_BEACON,
+ IEEE80211_CSA_SOURCE_OTHER_LINK,
+ IEEE80211_CSA_SOURCE_PROT_ACTION,
+ IEEE80211_CSA_SOURCE_UNPROT_ACTION,
+};
+
static void
ieee80211_sta_process_chanswitch(struct ieee80211_link_data *link,
u64 timestamp, u32 device_timestamp,
- struct ieee802_11_elems *elems,
- bool beacon)
+ struct ieee802_11_elems *full_elems,
+ struct ieee802_11_elems *csa_elems,
+ enum ieee80211_csa_source source)
{
struct ieee80211_sub_if_data *sdata = link->sdata;
struct ieee80211_local *local = sdata->local;
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
- struct cfg80211_bss *cbss = link->conf->bss;
+ struct ieee80211_chanctx *chanctx = NULL;
struct ieee80211_chanctx_conf *conf;
- struct ieee80211_chanctx *chanctx;
- enum nl80211_band current_band;
- struct ieee80211_csa_ie csa_ie;
+ struct ieee80211_csa_ie csa_ie = {};
struct ieee80211_channel_switch ch_switch = {
.link_id = link->link_id,
+ .timestamp = timestamp,
+ .device_timestamp = device_timestamp,
};
- struct ieee80211_bss *bss;
- unsigned long timeout;
+ unsigned long now;
int res;
lockdep_assert_wiphy(local->hw.wiphy);
- if (!cbss)
- return;
+ if (csa_elems) {
+ struct cfg80211_bss *cbss = link->conf->bss;
+ enum nl80211_band current_band;
+ struct ieee80211_bss *bss;
- current_band = cbss->channel->band;
- bss = (void *)cbss->priv;
- res = ieee80211_parse_ch_switch_ie(sdata, elems, current_band,
- bss->vht_cap_info,
- &link->u.mgd.conn,
- link->u.mgd.bssid, &csa_ie);
+ if (WARN_ON(!cbss))
+ return;
+
+ current_band = cbss->channel->band;
+ bss = (void *)cbss->priv;
+
+ res = ieee80211_parse_ch_switch_ie(sdata, csa_elems,
+ current_band,
+ bss->vht_cap_info,
+ &link->u.mgd.conn,
+ link->u.mgd.bssid,
+ source == IEEE80211_CSA_SOURCE_UNPROT_ACTION,
+ &csa_ie);
+ if (res == 0) {
+ ch_switch.block_tx = csa_ie.mode;
+ ch_switch.chandef = csa_ie.chanreq.oper;
+ ch_switch.count = csa_ie.count;
+ ch_switch.delay = csa_ie.max_switch_time;
+ }
- if (!res) {
- ch_switch.timestamp = timestamp;
- ch_switch.device_timestamp = device_timestamp;
- ch_switch.block_tx = csa_ie.mode;
- ch_switch.chandef = csa_ie.chanreq.oper;
- ch_switch.count = csa_ie.count;
- ch_switch.delay = csa_ie.max_switch_time;
+ link->u.mgd.csa.tpe = csa_elems->csa_tpe;
+ } else {
+ /*
+ * If there was no per-STA profile for this link, we
+ * get called with csa_elems == NULL. This of course means
+ * there are no CSA elements, so set res=1 indicating
+ * no more CSA.
+ */
+ res = 1;
}
- if (res < 0)
+ if (res < 0) {
+ /* ignore this case, not a protected frame */
+ if (source == IEEE80211_CSA_SOURCE_UNPROT_ACTION)
+ return;
goto drop_connection;
+ }
if (link->conf->csa_active) {
- /* already processing - disregard action frames */
- if (!beacon)
+ switch (source) {
+ case IEEE80211_CSA_SOURCE_PROT_ACTION:
+ case IEEE80211_CSA_SOURCE_UNPROT_ACTION:
+ /* already processing - disregard action frames */
return;
+ case IEEE80211_CSA_SOURCE_BEACON:
+ if (link->u.mgd.csa.waiting_bcn) {
+ ieee80211_chswitch_post_beacon(link);
+ /*
+ * If the CSA is still present after the switch
+ * we need to consider it as a new CSA (possibly
+ * to self). This happens by not returning here
+ * so we'll get to the check below.
+ */
+ } else if (res) {
+ ieee80211_sta_abort_chanswitch(link);
+ return;
+ } else {
+ drv_channel_switch_rx_beacon(sdata, &ch_switch);
+ return;
+ }
+ break;
+ case IEEE80211_CSA_SOURCE_OTHER_LINK:
+ /* active link: we want to see the beacon to continue */
+ if (ieee80211_vif_link_active(&sdata->vif,
+ link->link_id))
+ return;
- if (link->u.mgd.csa_waiting_bcn) {
- ieee80211_chswitch_post_beacon(link);
- /*
- * If the CSA IE is still present in the beacon after
- * the switch, we need to consider it as a new CSA
- * (possibly to self) - this happens by not returning
- * here so we'll get to the check below.
- */
- } else if (res) {
- ieee80211_sta_abort_chanswitch(link);
- return;
- } else {
- drv_channel_switch_rx_beacon(sdata, &ch_switch);
+ /* switch work ran, so just complete the process */
+ if (link->u.mgd.csa.waiting_bcn) {
+ ieee80211_chswitch_post_beacon(link);
+ /*
+ * If the CSA is still present after the switch
+ * we need to consider it as a new CSA (possibly
+ * to self). This happens by not returning here
+ * so we'll get to the check below.
+ */
+ break;
+ }
+
+ /* link still has CSA but we already know, do nothing */
+ if (!res)
+ return;
+
+ /* check in the RNR if the CSA aborted */
+ ieee80211_sta_other_link_csa_disappeared(link,
+ full_elems);
return;
}
}
- /* nothing to do at all - no active CSA nor a new one */
- if (res)
+ /* no active CSA nor a new one */
+ if (res) {
+ /*
+ * However, we may have stopped queues when receiving a public
+ * action frame that couldn't be protected, if it had the quiet
+ * bit set. This is a trade-off, we want to be quiet as soon as
+ * possible, but also don't trust the public action frame much,
+ * as it can't be protected.
+ */
+ if (unlikely(link->u.mgd.csa.blocked_tx)) {
+ link->u.mgd.csa.blocked_tx = false;
+ ieee80211_vif_unblock_queues_csa(sdata);
+ }
return;
+ }
+
+ /*
+ * We don't really trust public action frames, but block queues (go to
+ * quiet mode) for them anyway, we should get a beacon soon to either
+ * know what the CSA really is, or figure out the public action frame
+ * was actually an attack.
+ */
+ if (source == IEEE80211_CSA_SOURCE_UNPROT_ACTION) {
+ if (csa_ie.mode) {
+ link->u.mgd.csa.blocked_tx = true;
+ ieee80211_vif_block_queues_csa(sdata);
+ }
+ return;
+ }
if (link->conf->chanreq.oper.chan->band !=
csa_ie.chanreq.oper.chan->band) {
- sdata_info(sdata,
- "AP %pM switches to different band (%d MHz, width:%d, CF1/2: %d/%d MHz), disconnecting\n",
- link->u.mgd.bssid,
- csa_ie.chanreq.oper.chan->center_freq,
- csa_ie.chanreq.oper.width,
- csa_ie.chanreq.oper.center_freq1,
- csa_ie.chanreq.oper.center_freq2);
+ link_info(link,
+ "AP %pM switches to different band (%d MHz, width:%d, CF1/2: %d/%d MHz), disconnecting\n",
+ link->u.mgd.bssid,
+ csa_ie.chanreq.oper.chan->center_freq,
+ csa_ie.chanreq.oper.width,
+ csa_ie.chanreq.oper.center_freq1,
+ csa_ie.chanreq.oper.center_freq2);
goto drop_connection;
}
if (!cfg80211_chandef_usable(local->hw.wiphy, &csa_ie.chanreq.oper,
IEEE80211_CHAN_DISABLED)) {
- sdata_info(sdata,
- "AP %pM switches to unsupported channel "
- "(%d.%03d MHz, width:%d, CF1/2: %d.%03d/%d MHz), "
- "disconnecting\n",
- link->u.mgd.bssid,
- csa_ie.chanreq.oper.chan->center_freq,
- csa_ie.chanreq.oper.chan->freq_offset,
- csa_ie.chanreq.oper.width,
- csa_ie.chanreq.oper.center_freq1,
- csa_ie.chanreq.oper.freq1_offset,
- csa_ie.chanreq.oper.center_freq2);
+ link_info(link,
+ "AP %pM switches to unsupported channel (%d.%03d MHz, width:%d, CF1/2: %d.%03d/%d MHz), disconnecting\n",
+ link->u.mgd.bssid,
+ csa_ie.chanreq.oper.chan->center_freq,
+ csa_ie.chanreq.oper.chan->freq_offset,
+ csa_ie.chanreq.oper.width,
+ csa_ie.chanreq.oper.center_freq1,
+ csa_ie.chanreq.oper.freq1_offset,
+ csa_ie.chanreq.oper.center_freq2);
goto drop_connection;
}
if (cfg80211_chandef_identical(&csa_ie.chanreq.oper,
&link->conf->chanreq.oper) &&
- (!csa_ie.mode || !beacon)) {
- if (link->u.mgd.csa_ignored_same_chan)
+ (!csa_ie.mode || source != IEEE80211_CSA_SOURCE_BEACON)) {
+ if (link->u.mgd.csa.ignored_same_chan)
return;
- sdata_info(sdata,
- "AP %pM tries to chanswitch to same channel, ignore\n",
- link->u.mgd.bssid);
- link->u.mgd.csa_ignored_same_chan = true;
+ link_info(link,
+ "AP %pM tries to chanswitch to same channel, ignore\n",
+ link->u.mgd.bssid);
+ link->u.mgd.csa.ignored_same_chan = true;
return;
}
@@ -2138,64 +2553,78 @@ ieee80211_sta_process_chanswitch(struct ieee80211_link_data *link,
conf = rcu_dereference_protected(link->conf->chanctx_conf,
lockdep_is_held(&local->hw.wiphy->mtx));
- if (!conf) {
- sdata_info(sdata,
- "no channel context assigned to vif?, disconnecting\n");
+ if (ieee80211_vif_link_active(&sdata->vif, link->link_id) && !conf) {
+ link_info(link,
+ "no channel context assigned to vif?, disconnecting\n");
goto drop_connection;
}
- chanctx = container_of(conf, struct ieee80211_chanctx, conf);
+ if (conf)
+ chanctx = container_of(conf, struct ieee80211_chanctx, conf);
if (!ieee80211_hw_check(&local->hw, CHANCTX_STA_CSA)) {
- sdata_info(sdata,
- "driver doesn't support chan-switch with channel contexts\n");
+ link_info(link,
+ "driver doesn't support chan-switch with channel contexts\n");
goto drop_connection;
}
if (drv_pre_channel_switch(sdata, &ch_switch)) {
- sdata_info(sdata,
- "preparing for channel switch failed, disconnecting\n");
+ link_info(link,
+ "preparing for channel switch failed, disconnecting\n");
goto drop_connection;
}
- res = ieee80211_link_reserve_chanctx(link, &csa_ie.chanreq,
- chanctx->mode, false);
- if (res) {
- sdata_info(sdata,
- "failed to reserve channel context for channel switch, disconnecting (err=%d)\n",
- res);
- goto drop_connection;
+ link->u.mgd.csa.ap_chandef = csa_ie.chanreq.ap;
+
+ link->csa.chanreq.oper = csa_ie.chanreq.oper;
+ ieee80211_set_chanreq_ap(sdata, &link->csa.chanreq, &link->u.mgd.conn,
+ &csa_ie.chanreq.ap);
+
+ if (chanctx) {
+ res = ieee80211_link_reserve_chanctx(link, &link->csa.chanreq,
+ chanctx->mode, false);
+ if (res) {
+ link_info(link,
+ "failed to reserve channel context for channel switch, disconnecting (err=%d)\n",
+ res);
+ goto drop_connection;
+ }
}
link->conf->csa_active = true;
- link->csa_chanreq = csa_ie.chanreq;
- link->u.mgd.csa_ignored_same_chan = false;
+ link->u.mgd.csa.ignored_same_chan = false;
link->u.mgd.beacon_crc_valid = false;
- link->u.mgd.csa_blocked_tx = csa_ie.mode;
+ link->u.mgd.csa.blocked_tx = csa_ie.mode;
- if (csa_ie.mode &&
- !ieee80211_hw_check(&local->hw, HANDLES_QUIET_CSA)) {
- ieee80211_stop_vif_queues(local, sdata,
- IEEE80211_QUEUE_STOP_REASON_CSA);
- sdata->csa_blocked_queues = true;
- }
+ if (csa_ie.mode)
+ ieee80211_vif_block_queues_csa(sdata);
cfg80211_ch_switch_started_notify(sdata->dev, &csa_ie.chanreq.oper,
link->link_id, csa_ie.count,
csa_ie.mode);
- if (local->ops->channel_switch) {
- /* use driver's channel switch callback */
+ /* we may have to handle timeout for deactivated link in software */
+ now = jiffies;
+ link->u.mgd.csa.time = now +
+ TU_TO_JIFFIES((max_t(int, csa_ie.count, 1) - 1) *
+ link->conf->beacon_int);
+
+ if (ieee80211_vif_link_active(&sdata->vif, link->link_id) &&
+ local->ops->channel_switch) {
+ /*
+ * Use driver's channel switch callback, the driver will
+ * later call ieee80211_chswitch_done(). It may deactivate
+ * the link as well, we handle that elsewhere and queue
+ * the csa.switch_work for the calculated time then.
+ */
drv_channel_switch(local, sdata, &ch_switch);
return;
}
/* channel switch handled in software */
- timeout = TU_TO_JIFFIES((max_t(int, csa_ie.count, 1) - 1) *
- cbss->beacon_interval);
wiphy_delayed_work_queue(local->hw.wiphy,
- &link->u.mgd.chswitch_work,
- timeout);
+ &link->u.mgd.csa.switch_work,
+ link->u.mgd.csa.time - now);
return;
drop_connection:
/*
@@ -2206,7 +2635,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_link_data *link,
* reset when the disconnection worker runs.
*/
link->conf->csa_active = true;
- link->u.mgd.csa_blocked_tx = csa_ie.mode;
+ link->u.mgd.csa.blocked_tx = csa_ie.mode;
sdata->csa_blocked_queues =
csa_ie.mode && !ieee80211_hw_check(&local->hw, HANDLES_QUIET_CSA);
@@ -2602,16 +3031,15 @@ void ieee80211_dynamic_ps_timer(struct timer_list *t)
void ieee80211_dfs_cac_timer_work(struct wiphy *wiphy, struct wiphy_work *work)
{
- struct ieee80211_link_data *link =
- container_of(work, struct ieee80211_link_data,
+ struct ieee80211_sub_if_data *sdata =
+ container_of(work, struct ieee80211_sub_if_data,
dfs_cac_timer_work.work);
- struct cfg80211_chan_def chandef = link->conf->chanreq.oper;
- struct ieee80211_sub_if_data *sdata = link->sdata;
+ struct cfg80211_chan_def chandef = sdata->vif.bss_conf.chanreq.oper;
lockdep_assert_wiphy(sdata->local->hw.wiphy);
if (sdata->wdev.cac_started) {
- ieee80211_link_release_channel(link);
+ ieee80211_link_release_channel(&sdata->deflink);
cfg80211_cac_event(sdata->dev, &chandef,
NL80211_RADAR_CAC_FINISHED,
GFP_KERNEL);
@@ -3093,6 +3521,8 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
u64 changed = 0;
struct ieee80211_prep_tx_info info = {
.subtype = stype,
+ .was_assoc = true,
+ .link_id = ffs(sdata->vif.active_links) - 1,
};
lockdep_assert_wiphy(local->hw.wiphy);
@@ -3141,29 +3571,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
/* deauthenticate/disassociate now */
if (tx || frame_buf) {
- /*
- * In multi channel scenarios guarantee that the virtual
- * interface is granted immediate airtime to transmit the
- * deauthentication frame by calling mgd_prepare_tx, if the
- * driver requested so.
- */
- if (ieee80211_hw_check(&local->hw, DEAUTH_NEED_MGD_TX_PREP)) {
- for (link_id = 0; link_id < ARRAY_SIZE(sdata->link);
- link_id++) {
- struct ieee80211_link_data *link;
-
- link = sdata_dereference(sdata->link[link_id],
- sdata);
- if (!link)
- continue;
- if (link->u.mgd.have_beacon)
- break;
- }
- if (link_id == IEEE80211_MLD_MAX_NUM_LINKS) {
- info.link_id = ffs(sdata->vif.active_links) - 1;
- drv_mgd_prepare_tx(sdata->local, sdata, &info);
- }
- }
+ drv_mgd_prepare_tx(sdata->local, sdata, &info);
ieee80211_send_deauth_disassoc(sdata, sdata->vif.cfg.ap_addr,
sdata->vif.cfg.ap_addr, stype,
@@ -3260,14 +3668,10 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
}
sdata->vif.bss_conf.csa_active = false;
- sdata->deflink.u.mgd.csa_blocked_tx = false;
- sdata->deflink.u.mgd.csa_waiting_bcn = false;
- sdata->deflink.u.mgd.csa_ignored_same_chan = false;
- if (sdata->csa_blocked_queues) {
- ieee80211_wake_vif_queues(local, sdata,
- IEEE80211_QUEUE_STOP_REASON_CSA);
- sdata->csa_blocked_queues = false;
- }
+ sdata->deflink.u.mgd.csa.blocked_tx = false;
+ sdata->deflink.u.mgd.csa.waiting_bcn = false;
+ sdata->deflink.u.mgd.csa.ignored_same_chan = false;
+ ieee80211_vif_unblock_queues_csa(sdata);
/* existing TX TSPEC sessions no longer exist */
memset(ifmgd->tx_tspec, 0, sizeof(ifmgd->tx_tspec));
@@ -3275,9 +3679,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
sdata->vif.bss_conf.power_type = IEEE80211_REG_UNSET_AP;
sdata->vif.bss_conf.pwr_reduction = 0;
- sdata->vif.bss_conf.tx_pwr_env_num = 0;
- memset(sdata->vif.bss_conf.tx_pwr_env, 0,
- sizeof(sdata->vif.bss_conf.tx_pwr_env));
+ ieee80211_clear_tpe(&sdata->vif.bss_conf.tpe);
sdata->vif.cfg.eml_cap = 0;
sdata->vif.cfg.eml_med_sync_delay = 0;
@@ -3287,8 +3689,17 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
sizeof(sdata->u.mgd.ttlm_info));
wiphy_delayed_work_cancel(sdata->local->hw.wiphy, &ifmgd->ttlm_work);
+ memset(&sdata->vif.neg_ttlm, 0, sizeof(sdata->vif.neg_ttlm));
wiphy_delayed_work_cancel(sdata->local->hw.wiphy,
&ifmgd->neg_ttlm_timeout_work);
+
+ sdata->u.mgd.removed_links = 0;
+ wiphy_delayed_work_cancel(sdata->local->hw.wiphy,
+ &sdata->u.mgd.ml_reconf_work);
+
+ wiphy_work_cancel(sdata->local->hw.wiphy,
+ &ifmgd->teardown_ttlm_work);
+
ieee80211_vif_set_links(sdata, 0, 0);
ifmgd->mcast_seq_last = IEEE80211_SN_MODULO;
@@ -3592,7 +4003,7 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata)
if (WARN_ON_ONCE(!link))
continue;
- if (link->u.mgd.csa_blocked_tx)
+ if (link->u.mgd.csa.blocked_tx)
continue;
tx = true;
@@ -3629,13 +4040,9 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata)
tx, frame_buf);
/* the other links will be destroyed */
sdata->vif.bss_conf.csa_active = false;
- sdata->deflink.u.mgd.csa_waiting_bcn = false;
- sdata->deflink.u.mgd.csa_blocked_tx = false;
- if (sdata->csa_blocked_queues) {
- ieee80211_wake_vif_queues(local, sdata,
- IEEE80211_QUEUE_STOP_REASON_CSA);
- sdata->csa_blocked_queues = false;
- }
+ sdata->deflink.u.mgd.csa.waiting_bcn = false;
+ sdata->deflink.u.mgd.csa.blocked_tx = false;
+ ieee80211_vif_unblock_queues_csa(sdata);
ieee80211_report_disconnect(sdata, frame_buf, sizeof(frame_buf), tx,
WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY,
@@ -4445,40 +4852,12 @@ static bool ieee80211_assoc_config_link(struct ieee80211_link_data *link,
if (elems->he_operation &&
link->u.mgd.conn.mode >= IEEE80211_CONN_MODE_HE &&
elems->he_cap) {
- const struct ieee80211_he_6ghz_oper *he_6ghz_oper;
-
ieee80211_he_cap_ie_to_sta_he_cap(sdata, sband,
elems->he_cap,
elems->he_cap_len,
elems->he_6ghz_capa,
link_sta);
- he_6ghz_oper = ieee80211_he_6ghz_oper(elems->he_operation);
-
- if (is_6ghz && he_6ghz_oper) {
- switch (u8_get_bits(he_6ghz_oper->control,
- IEEE80211_HE_6GHZ_OPER_CTRL_REG_INFO)) {
- case IEEE80211_6GHZ_CTRL_REG_LPI_AP:
- case IEEE80211_6GHZ_CTRL_REG_INDOOR_LPI_AP:
- bss_conf->power_type = IEEE80211_REG_LPI_AP;
- break;
- case IEEE80211_6GHZ_CTRL_REG_SP_AP:
- case IEEE80211_6GHZ_CTRL_REG_INDOOR_SP_AP:
- bss_conf->power_type = IEEE80211_REG_SP_AP;
- break;
- case IEEE80211_6GHZ_CTRL_REG_VLP_AP:
- bss_conf->power_type = IEEE80211_REG_VLP_AP;
- break;
- default:
- bss_conf->power_type = IEEE80211_REG_UNSET_AP;
- break;
- }
- } else if (is_6ghz) {
- link_info(link,
- "HE 6 GHz operation missing (on %d MHz), expect issues\n",
- bss_conf->chanreq.oper.chan->center_freq);
- }
-
bss_conf->he_support = link_sta->pub->he_cap.has_he;
if (elems->rsnx && elems->rsnx_len &&
(elems->rsnx[0] & WLAN_RSNX_CAPA_PROTECTED_TWT) &&
@@ -5020,6 +5399,23 @@ ieee80211_determine_our_sta_mode_assoc(struct ieee80211_sub_if_data *sdata,
conn->bw_limit, tmp.bw_limit);
}
+static enum ieee80211_ap_reg_power
+ieee80211_ap_power_type(u8 control)
+{
+ switch (u8_get_bits(control, IEEE80211_HE_6GHZ_OPER_CTRL_REG_INFO)) {
+ case IEEE80211_6GHZ_CTRL_REG_LPI_AP:
+ case IEEE80211_6GHZ_CTRL_REG_INDOOR_LPI_AP:
+ return IEEE80211_REG_LPI_AP;
+ case IEEE80211_6GHZ_CTRL_REG_SP_AP:
+ case IEEE80211_6GHZ_CTRL_REG_INDOOR_SP_AP:
+ return IEEE80211_REG_SP_AP;
+ case IEEE80211_6GHZ_CTRL_REG_VLP_AP:
+ return IEEE80211_REG_VLP_AP;
+ default:
+ return IEEE80211_REG_UNSET_AP;
+ }
+}
+
static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
struct ieee80211_link_data *link,
int link_id,
@@ -5029,15 +5425,15 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
struct ieee80211_local *local = sdata->local;
bool is_6ghz = cbss->channel->band == NL80211_BAND_6GHZ;
struct ieee80211_chan_req chanreq = {};
+ struct cfg80211_chan_def ap_chandef;
struct ieee802_11_elems *elems;
int ret;
- u32 i;
lockdep_assert_wiphy(local->hw.wiphy);
rcu_read_lock();
elems = ieee80211_determine_chan_mode(sdata, conn, cbss, link_id,
- &chanreq);
+ &chanreq, &ap_chandef);
if (IS_ERR(elems)) {
rcu_read_unlock();
@@ -5052,26 +5448,23 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
}
if (link && is_6ghz && conn->mode >= IEEE80211_CONN_MODE_HE) {
- struct ieee80211_bss_conf *bss_conf;
- u8 j = 0;
-
- bss_conf = link->conf;
+ const struct ieee80211_he_6ghz_oper *he_6ghz_oper;
if (elems->pwr_constr_elem)
- bss_conf->pwr_reduction = *elems->pwr_constr_elem;
-
- BUILD_BUG_ON(ARRAY_SIZE(bss_conf->tx_pwr_env) !=
- ARRAY_SIZE(elems->tx_pwr_env));
+ link->conf->pwr_reduction = *elems->pwr_constr_elem;
- for (i = 0; i < elems->tx_pwr_env_num; i++) {
- if (elems->tx_pwr_env_len[i] > sizeof(bss_conf->tx_pwr_env[j]))
- continue;
+ he_6ghz_oper = ieee80211_he_6ghz_oper(elems->he_operation);
+ if (he_6ghz_oper)
+ link->conf->power_type =
+ ieee80211_ap_power_type(he_6ghz_oper->control);
+ else
+ link_info(link,
+ "HE 6 GHz operation missing (on %d MHz), expect issues\n",
+ cbss->channel->center_freq);
- bss_conf->tx_pwr_env_num++;
- memcpy(&bss_conf->tx_pwr_env[j], elems->tx_pwr_env[i],
- elems->tx_pwr_env_len[i]);
- j++;
- }
+ link->conf->tpe = elems->tpe;
+ ieee80211_rearrange_tpe(&link->conf->tpe, &ap_chandef,
+ &chanreq.oper);
}
rcu_read_unlock();
/* the element data was RCU protected so no longer valid anyway */
@@ -6150,13 +6543,140 @@ static void ieee80211_process_adv_ttlm(struct ieee80211_sub_if_data *sdata,
}
}
+static void
+ieee80211_mgd_check_cross_link_csa(struct ieee80211_sub_if_data *sdata,
+ int reporting_link_id,
+ struct ieee802_11_elems *elems)
+{
+ const struct element *sta_profiles[IEEE80211_MLD_MAX_NUM_LINKS] = {};
+ ssize_t sta_profiles_len[IEEE80211_MLD_MAX_NUM_LINKS] = {};
+ const struct element *sub;
+ const u8 *subelems;
+ size_t subelems_len;
+ u8 common_size;
+ int link_id;
+
+ if (!ieee80211_mle_size_ok((u8 *)elems->ml_basic, elems->ml_basic_len))
+ return;
+
+ common_size = ieee80211_mle_common_size((u8 *)elems->ml_basic);
+ subelems = (u8 *)elems->ml_basic + common_size;
+ subelems_len = elems->ml_basic_len - common_size;
+
+ for_each_element_id(sub, IEEE80211_MLE_SUBELEM_PER_STA_PROFILE,
+ subelems, subelems_len) {
+ struct ieee80211_mle_per_sta_profile *prof = (void *)sub->data;
+ struct ieee80211_link_data *link;
+ ssize_t len;
+
+ if (!ieee80211_mle_basic_sta_prof_size_ok(sub->data,
+ sub->datalen))
+ continue;
+
+ link_id = le16_get_bits(prof->control,
+ IEEE80211_MLE_STA_CONTROL_LINK_ID);
+ /* need a valid link ID, but also not our own, both AP bugs */
+ if (link_id == reporting_link_id ||
+ link_id >= IEEE80211_MLD_MAX_NUM_LINKS)
+ continue;
+
+ link = sdata_dereference(sdata->link[link_id], sdata);
+ if (!link)
+ continue;
+
+ len = cfg80211_defragment_element(sub, subelems, subelems_len,
+ NULL, 0,
+ IEEE80211_MLE_SUBELEM_FRAGMENT);
+ if (WARN_ON(len < 0))
+ continue;
+
+ sta_profiles[link_id] = sub;
+ sta_profiles_len[link_id] = len;
+ }
+
+ for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) {
+ struct ieee80211_mle_per_sta_profile *prof;
+ struct ieee802_11_elems *prof_elems;
+ struct ieee80211_link_data *link;
+ ssize_t len;
+
+ if (link_id == reporting_link_id)
+ continue;
+
+ link = sdata_dereference(sdata->link[link_id], sdata);
+ if (!link)
+ continue;
+
+ if (!sta_profiles[link_id]) {
+ prof_elems = NULL;
+ goto handle;
+ }
+
+ /* we can defragment in-place, won't use the buffer again */
+ len = cfg80211_defragment_element(sta_profiles[link_id],
+ subelems, subelems_len,
+ (void *)sta_profiles[link_id],
+ sta_profiles_len[link_id],
+ IEEE80211_MLE_SUBELEM_FRAGMENT);
+ if (WARN_ON(len != sta_profiles_len[link_id]))
+ continue;
+
+ prof = (void *)sta_profiles[link_id];
+ prof_elems = ieee802_11_parse_elems(prof->variable +
+ (prof->sta_info_len - 1),
+ len -
+ (prof->sta_info_len - 1),
+ false, NULL);
+
+ /* memory allocation failed - let's hope that's transient */
+ if (!prof_elems)
+ continue;
+
+handle:
+ /*
+ * FIXME: the timings here are obviously incorrect,
+ * but only older Intel drivers seem to care, and
+ * those don't have MLO. If you really need this,
+ * the problem is having to calculate it with the
+ * TSF offset etc. The device_timestamp is still
+ * correct, of course.
+ */
+ ieee80211_sta_process_chanswitch(link, 0, 0, elems, prof_elems,
+ IEEE80211_CSA_SOURCE_OTHER_LINK);
+ kfree(prof_elems);
+ }
+}
+
+static bool ieee80211_mgd_ssid_mismatch(struct ieee80211_sub_if_data *sdata,
+ const struct ieee802_11_elems *elems)
+{
+ struct ieee80211_vif_cfg *cfg = &sdata->vif.cfg;
+ static u8 zero_ssid[IEEE80211_MAX_SSID_LEN];
+
+ if (!elems->ssid)
+ return false;
+
+ /* hidden SSID: zero length */
+ if (elems->ssid_len == 0)
+ return false;
+
+ if (elems->ssid_len != cfg->ssid_len)
+ return true;
+
+ /* hidden SSID: zeroed out */
+ if (memcmp(elems->ssid, zero_ssid, elems->ssid_len))
+ return false;
+
+ return memcmp(elems->ssid, cfg->ssid, cfg->ssid_len);
+}
+
static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link,
struct ieee80211_hdr *hdr, size_t len,
struct ieee80211_rx_status *rx_status)
{
struct ieee80211_sub_if_data *sdata = link->sdata;
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
- struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf;
+ struct ieee80211_bss_conf *bss_conf = link->conf;
struct ieee80211_vif_cfg *vif_cfg = &sdata->vif.cfg;
struct ieee80211_mgmt *mgmt = (void *) hdr;
size_t baselen;
@@ -6200,7 +6720,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link,
parse_params.len = len - baselen;
rcu_read_lock();
- chanctx_conf = rcu_dereference(link->conf->chanctx_conf);
+ chanctx_conf = rcu_dereference(bss_conf->chanctx_conf);
if (!chanctx_conf) {
rcu_read_unlock();
return;
@@ -6230,11 +6750,11 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link,
ifmgd->assoc_data->need_beacon = false;
if (ieee80211_hw_check(&local->hw, TIMING_BEACON_ONLY) &&
!ieee80211_is_s1g_beacon(hdr->frame_control)) {
- link->conf->sync_tsf =
+ bss_conf->sync_tsf =
le64_to_cpu(mgmt->u.beacon.timestamp);
- link->conf->sync_device_ts =
+ bss_conf->sync_device_ts =
rx_status->device_timestamp;
- link->conf->sync_dtim_count = elems->dtim_count;
+ bss_conf->sync_dtim_count = elems->dtim_count;
}
if (elems->mbssid_config_ie)
@@ -6258,7 +6778,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link,
}
if (!ifmgd->associated ||
- !ieee80211_rx_our_beacon(bssid, link->conf->bss))
+ !ieee80211_rx_our_beacon(bssid, bss_conf->bss))
return;
bssid = link->u.mgd.bssid;
@@ -6285,12 +6805,21 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link,
*/
if (!ieee80211_is_s1g_beacon(hdr->frame_control))
ncrc = crc32_be(0, (void *)&mgmt->u.beacon.beacon_int, 4);
- parse_params.bss = link->conf->bss;
+ parse_params.bss = bss_conf->bss;
parse_params.filter = care_about_ies;
parse_params.crc = ncrc;
elems = ieee802_11_parse_elems_full(&parse_params);
if (!elems)
return;
+
+ if (rx_status->flag & RX_FLAG_DECRYPTED &&
+ ieee80211_mgd_ssid_mismatch(sdata, elems)) {
+ sdata_info(sdata, "SSID mismatch for AP %pM, disconnect\n",
+ sdata->vif.cfg.ap_addr);
+ __ieee80211_disconnect(sdata);
+ return;
+ }
+
ncrc = elems->crc;
if (ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK) &&
@@ -6357,11 +6886,11 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link,
*/
if (ieee80211_hw_check(&local->hw, TIMING_BEACON_ONLY) &&
!ieee80211_is_s1g_beacon(hdr->frame_control)) {
- link->conf->sync_tsf =
+ bss_conf->sync_tsf =
le64_to_cpu(mgmt->u.beacon.timestamp);
- link->conf->sync_device_ts =
+ bss_conf->sync_device_ts =
rx_status->device_timestamp;
- link->conf->sync_dtim_count = elems->dtim_count;
+ bss_conf->sync_dtim_count = elems->dtim_count;
}
if ((ncrc == link->u.mgd.beacon_crc && link->u.mgd.beacon_crc_valid) ||
@@ -6374,7 +6903,11 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link,
ieee80211_sta_process_chanswitch(link, rx_status->mactime,
rx_status->device_timestamp,
- elems, true);
+ elems, elems,
+ IEEE80211_CSA_SOURCE_BEACON);
+
+ /* note that after this elems->ml_basic can no longer be used fully */
+ ieee80211_mgd_check_cross_link_csa(sdata, rx_status->link_id, elems);
if (!link->u.mgd.disable_wmm_tracking &&
ieee80211_sta_wmm_params(local, link, elems->wmm_param,
@@ -6420,10 +6953,10 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link,
goto free;
}
- if (WARN_ON(!link->conf->chanreq.oper.chan))
+ if (WARN_ON(!bss_conf->chanreq.oper.chan))
goto free;
- sband = local->hw.wiphy->bands[link->conf->chanreq.oper.chan->band];
+ sband = local->hw.wiphy->bands[bss_conf->chanreq.oper.chan->band];
changed |= ieee80211_recalc_twt_req(sdata, sband, link, link_sta, elems);
@@ -6834,7 +7367,7 @@ static void ieee80211_teardown_ttlm_work(struct wiphy *wiphy,
u16 new_dormant_links;
struct ieee80211_sub_if_data *sdata =
container_of(work, struct ieee80211_sub_if_data,
- u.mgd.neg_ttlm_timeout_work.work);
+ u.mgd.teardown_ttlm_work);
if (!sdata->vif.neg_ttlm.valid)
return;
@@ -6908,6 +7441,7 @@ void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
{
struct ieee80211_link_data *link = &sdata->deflink;
struct ieee80211_rx_status *rx_status;
+ struct ieee802_11_elems *elems;
struct ieee80211_mgmt *mgmt;
u16 fc;
int ies_len;
@@ -6951,9 +7485,8 @@ void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
!ether_addr_equal(mgmt->bssid, sdata->vif.cfg.ap_addr))
break;
- if (mgmt->u.action.category == WLAN_CATEGORY_SPECTRUM_MGMT) {
- struct ieee802_11_elems *elems;
-
+ switch (mgmt->u.action.category) {
+ case WLAN_CATEGORY_SPECTRUM_MGMT:
ies_len = skb->len -
offsetof(struct ieee80211_mgmt,
u.action.u.chan_switch.variable);
@@ -6966,15 +7499,20 @@ void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
mgmt->u.action.u.chan_switch.variable,
ies_len, true, NULL);
- if (elems && !elems->parse_error)
+ if (elems && !elems->parse_error) {
+ enum ieee80211_csa_source src =
+ IEEE80211_CSA_SOURCE_PROT_ACTION;
+
ieee80211_sta_process_chanswitch(link,
rx_status->mactime,
rx_status->device_timestamp,
- elems, false);
+ elems, elems,
+ src);
+ }
kfree(elems);
- } else if (mgmt->u.action.category == WLAN_CATEGORY_PUBLIC) {
- struct ieee802_11_elems *elems;
-
+ break;
+ case WLAN_CATEGORY_PUBLIC:
+ case WLAN_CATEGORY_PROTECTED_DUAL_OF_ACTION:
ies_len = skb->len -
offsetof(struct ieee80211_mgmt,
u.action.u.ext_chan_switch.variable);
@@ -6991,6 +7529,14 @@ void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
ies_len, true, NULL);
if (elems && !elems->parse_error) {
+ enum ieee80211_csa_source src;
+
+ if (mgmt->u.action.category ==
+ WLAN_CATEGORY_PROTECTED_DUAL_OF_ACTION)
+ src = IEEE80211_CSA_SOURCE_PROT_ACTION;
+ else
+ src = IEEE80211_CSA_SOURCE_UNPROT_ACTION;
+
/* for the handling code pretend it was an IE */
elems->ext_chansw_ie =
&mgmt->u.action.u.ext_chan_switch.data;
@@ -6998,10 +7544,12 @@ void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
ieee80211_sta_process_chanswitch(link,
rx_status->mactime,
rx_status->device_timestamp,
- elems, false);
+ elems, elems,
+ src);
}
kfree(elems);
+ break;
}
break;
}
@@ -7321,7 +7869,7 @@ static void ieee80211_sta_bcn_mon_timer(struct timer_list *t)
return;
if (sdata->vif.bss_conf.csa_active &&
- !sdata->deflink.u.mgd.csa_waiting_bcn)
+ !sdata->deflink.u.mgd.csa.waiting_bcn)
return;
if (sdata->vif.driver_flags & IEEE80211_VIF_BEACON_FILTER)
@@ -7345,7 +7893,7 @@ static void ieee80211_sta_conn_mon_timer(struct timer_list *t)
return;
if (sdata->vif.bss_conf.csa_active &&
- !sdata->deflink.u.mgd.csa_waiting_bcn)
+ !sdata->deflink.u.mgd.csa.waiting_bcn)
return;
sta = sta_info_get(sdata, sdata->vif.cfg.ap_addr);
@@ -7556,8 +8104,10 @@ void ieee80211_mgd_setup_link(struct ieee80211_link_data *link)
else
link->u.mgd.req_smps = IEEE80211_SMPS_OFF;
- wiphy_delayed_work_init(&link->u.mgd.chswitch_work,
- ieee80211_chswitch_work);
+ wiphy_delayed_work_init(&link->u.mgd.csa.switch_work,
+ ieee80211_csa_switch_work);
+
+ ieee80211_clear_tpe(&link->conf->tpe);
if (sdata->u.mgd.assoc_data)
ether_addr_copy(link->conf->addr,
@@ -8686,7 +9236,7 @@ void ieee80211_mgd_stop_link(struct ieee80211_link_data *link)
wiphy_work_cancel(link->sdata->local->hw.wiphy,
&link->u.mgd.recalc_smps);
wiphy_delayed_work_cancel(link->sdata->local->hw.wiphy,
- &link->u.mgd.chswitch_work);
+ &link->u.mgd.csa.switch_work);
}
void ieee80211_mgd_stop(struct ieee80211_sub_if_data *sdata)
@@ -8704,15 +9254,8 @@ void ieee80211_mgd_stop(struct ieee80211_sub_if_data *sdata)
&ifmgd->beacon_connection_loss_work);
wiphy_work_cancel(sdata->local->hw.wiphy,
&ifmgd->csa_connection_drop_work);
- wiphy_work_cancel(sdata->local->hw.wiphy,
- &ifmgd->teardown_ttlm_work);
wiphy_delayed_work_cancel(sdata->local->hw.wiphy,
&ifmgd->tdls_peer_del_work);
- wiphy_delayed_work_cancel(sdata->local->hw.wiphy,
- &ifmgd->ml_reconf_work);
- wiphy_delayed_work_cancel(sdata->local->hw.wiphy, &ifmgd->ttlm_work);
- wiphy_delayed_work_cancel(sdata->local->hw.wiphy,
- &ifmgd->neg_ttlm_timeout_work);
if (ifmgd->assoc_data)
ieee80211_destroy_assoc_data(sdata, ASSOC_TIMEOUT);
diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
index 65e1e9e971fd..28d03196ef75 100644
--- a/net/mac80211/offchannel.c
+++ b/net/mac80211/offchannel.c
@@ -8,7 +8,7 @@
* Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
* Copyright 2007, Michael Wu <flamingice@sourmilk.net>
* Copyright 2009 Johannes Berg <johannes@sipsolutions.net>
- * Copyright (C) 2019, 2022-2023 Intel Corporation
+ * Copyright (C) 2019, 2022-2024 Intel Corporation
*/
#include <linux/export.h>
#include <net/mac80211.h>
@@ -413,6 +413,39 @@ void ieee80211_start_next_roc(struct ieee80211_local *local)
}
}
+void ieee80211_reconfig_roc(struct ieee80211_local *local)
+{
+ struct ieee80211_roc_work *roc, *tmp;
+
+ /*
+ * In the software implementation can just continue with the
+ * interruption due to reconfig, roc_work is still queued if
+ * needed.
+ */
+ if (!local->ops->remain_on_channel)
+ return;
+
+ /* flush work so nothing from the driver is still pending */
+ wiphy_work_flush(local->hw.wiphy, &local->hw_roc_start);
+ wiphy_work_flush(local->hw.wiphy, &local->hw_roc_done);
+
+ list_for_each_entry_safe(roc, tmp, &local->roc_list, list) {
+ if (!roc->started)
+ break;
+
+ if (!roc->hw_begun) {
+ /* it didn't start in HW yet, so we can restart it */
+ roc->started = false;
+ continue;
+ }
+
+ /* otherwise destroy it and tell userspace */
+ ieee80211_roc_notify_destroy(roc);
+ }
+
+ ieee80211_start_next_roc(local);
+}
+
static void __ieee80211_roc_work(struct ieee80211_local *local)
{
struct ieee80211_roc_work *roc;
diff --git a/net/mac80211/parse.c b/net/mac80211/parse.c
index 55e5497f8978..279c5143b335 100644
--- a/net/mac80211/parse.c
+++ b/net/mac80211/parse.c
@@ -111,7 +111,7 @@ ieee80211_parse_extension_element(u32 *crc,
if (params->mode < IEEE80211_CONN_MODE_HE)
break;
if (len >= sizeof(*elems->he_spr) &&
- len >= ieee80211_he_spr_size(data))
+ len >= ieee80211_he_spr_size(data) - 1)
elems->he_spr = data;
break;
case WLAN_EID_EXT_HE_6GHZ_CAPA:
@@ -187,6 +187,84 @@ ieee80211_parse_extension_element(u32 *crc,
*crc = crc32_be(*crc, (void *)elem, elem->datalen + 2);
}
+static void ieee80211_parse_tpe(struct ieee80211_parsed_tpe *tpe,
+ const u8 *data, u8 len)
+{
+ const struct ieee80211_tx_pwr_env *env = (const void *)data;
+ u8 count, interpret, category;
+ u8 *out, N, *cnt_out = NULL, *N_out = NULL;
+
+ if (!ieee80211_valid_tpe_element(data, len))
+ return;
+
+ count = u8_get_bits(env->info, IEEE80211_TX_PWR_ENV_INFO_COUNT);
+ interpret = u8_get_bits(env->info, IEEE80211_TX_PWR_ENV_INFO_INTERPRET);
+ category = u8_get_bits(env->info, IEEE80211_TX_PWR_ENV_INFO_CATEGORY);
+
+ switch (interpret) {
+ case IEEE80211_TPE_LOCAL_EIRP:
+ out = tpe->max_local[category].power;
+ cnt_out = &tpe->max_local[category].count;
+ tpe->max_local[category].valid = true;
+ break;
+ case IEEE80211_TPE_REG_CLIENT_EIRP:
+ out = tpe->max_reg_client[category].power;
+ cnt_out = &tpe->max_reg_client[category].count;
+ tpe->max_reg_client[category].valid = true;
+ break;
+ case IEEE80211_TPE_LOCAL_EIRP_PSD:
+ out = tpe->psd_local[category].power;
+ cnt_out = &tpe->psd_local[category].count;
+ N_out = &tpe->psd_local[category].n;
+ tpe->psd_local[category].valid = true;
+ break;
+ case IEEE80211_TPE_REG_CLIENT_EIRP_PSD:
+ out = tpe->psd_reg_client[category].power;
+ cnt_out = &tpe->psd_reg_client[category].count;
+ N_out = &tpe->psd_reg_client[category].n;
+ tpe->psd_reg_client[category].valid = true;
+ break;
+ }
+
+ switch (interpret) {
+ case IEEE80211_TPE_LOCAL_EIRP:
+ case IEEE80211_TPE_REG_CLIENT_EIRP:
+ /* count was validated <= 3, plus 320 MHz */
+ BUILD_BUG_ON(IEEE80211_TPE_EIRP_ENTRIES_320MHZ < 5);
+ memcpy(out, env->variable, count + 1);
+ *cnt_out = count + 1;
+ /* separately take 320 MHz if present */
+ if (count == 3 && len > sizeof(*env) + count + 1) {
+ out[4] = env->variable[4];
+ *cnt_out = 5;
+ }
+ break;
+ case IEEE80211_TPE_LOCAL_EIRP_PSD:
+ case IEEE80211_TPE_REG_CLIENT_EIRP_PSD:
+ if (!count) {
+ memset(out, env->variable[0],
+ IEEE80211_TPE_PSD_ENTRIES_320MHZ);
+ *cnt_out = IEEE80211_TPE_PSD_ENTRIES_320MHZ;
+ break;
+ }
+
+ N = 1 << (count - 1);
+ memcpy(out, env->variable, N);
+ *cnt_out = N;
+ *N_out = N;
+
+ if (len > sizeof(*env) + N) {
+ int K = u8_get_bits(env->variable[N],
+ IEEE80211_TX_PWR_ENV_EXT_COUNT);
+
+ K = min(K, IEEE80211_TPE_PSD_ENTRIES_320MHZ - N);
+ memcpy(out + N, env->variable + N + 1, K);
+ (*cnt_out) += K;
+ }
+ break;
+ }
+}
+
static u32
_ieee802_11_parse_elems_full(struct ieee80211_elems_parse_params *params,
struct ieee80211_elems_parse *elems_parse,
@@ -529,6 +607,13 @@ _ieee802_11_parse_elems_full(struct ieee80211_elems_parse_params *params,
elem_parse_failed =
IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
}
+
+ subelem = cfg80211_find_ext_elem(WLAN_EID_TX_POWER_ENVELOPE,
+ pos, elen);
+ if (subelem)
+ ieee80211_parse_tpe(&elems->csa_tpe,
+ subelem->data + 1,
+ subelem->datalen - 1);
break;
case WLAN_EID_COUNTRY:
elems->country_elem = pos;
@@ -593,16 +678,9 @@ _ieee802_11_parse_elems_full(struct ieee80211_elems_parse_params *params,
elems->rsnx_len = elen;
break;
case WLAN_EID_TX_POWER_ENVELOPE:
- if (elen < 1 ||
- elen > sizeof(struct ieee80211_tx_pwr_env))
+ if (params->mode < IEEE80211_CONN_MODE_HE)
break;
-
- if (elems->tx_pwr_env_num >= ARRAY_SIZE(elems->tx_pwr_env))
- break;
-
- elems->tx_pwr_env[elems->tx_pwr_env_num] = (void *)pos;
- elems->tx_pwr_env_len[elems->tx_pwr_env_num] = elen;
- elems->tx_pwr_env_num++;
+ ieee80211_parse_tpe(&elems->tpe, pos, elen);
break;
case WLAN_EID_EXTENSION:
ieee80211_parse_extension_element(calc_crc ?
@@ -889,6 +967,10 @@ ieee802_11_parse_elems_full(struct ieee80211_elems_parse_params *params)
elems->ie_start = params->start;
elems->total_len = params->len;
+ /* set all TPE entries to unlimited (but invalid) */
+ ieee80211_clear_tpe(&elems->tpe);
+ ieee80211_clear_tpe(&elems->csa_tpe);
+
nontransmitted_profile = elems_parse->scratch_pos;
nontransmitted_profile_len =
ieee802_11_find_bssid_profile(params->start, params->len,
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
index c1fa26e09479..d823d58303e8 100644
--- a/net/mac80211/pm.c
+++ b/net/mac80211/pm.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Portions
- * Copyright (C) 2020-2021, 2023 Intel Corporation
+ * Copyright (C) 2020-2021, 2023-2024 Intel Corporation
*/
#include <net/mac80211.h>
#include <net/rtnetlink.h>
@@ -171,7 +171,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
WARN_ON(!list_empty(&local->chanctx_list));
/* stop hardware - this must stop RX */
- ieee80211_stop_device(local);
+ ieee80211_stop_device(local, true);
suspend:
local->suspended = true;
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 4914692750e5..59ad24a71141 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -3358,6 +3358,7 @@ static void
ieee80211_rx_check_bss_color_collision(struct ieee80211_rx_data *rx)
{
struct ieee80211_mgmt *mgmt = (void *)rx->skb->data;
+ struct ieee80211_bss_conf *bss_conf;
const struct element *ie;
size_t baselen;
@@ -3368,7 +3369,9 @@ ieee80211_rx_check_bss_color_collision(struct ieee80211_rx_data *rx)
if (ieee80211_hw_check(&rx->local->hw, DETECTS_COLOR_COLLISION))
return;
- if (rx->link->conf->csa_active)
+ bss_conf = rx->link->conf;
+ if (bss_conf->csa_active || bss_conf->color_change_active ||
+ !bss_conf->he_bss_color.enabled)
return;
baselen = mgmt->u.beacon.variable - rx->skb->data;
@@ -3380,7 +3383,6 @@ ieee80211_rx_check_bss_color_collision(struct ieee80211_rx_data *rx)
rx->skb->len - baselen);
if (ie && ie->datalen >= sizeof(struct ieee80211_he_operation) &&
ie->datalen >= ieee80211_he_oper_size(ie->data + 1)) {
- struct ieee80211_bss_conf *bss_conf = rx->link->conf;
const struct ieee80211_he_operation *he_oper;
u8 color;
@@ -3617,6 +3619,7 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
break;
case WLAN_CATEGORY_PUBLIC:
+ case WLAN_CATEGORY_PROTECTED_DUAL_OF_ACTION:
if (len < IEEE80211_MIN_ACTION_SIZE + 1)
goto invalid;
if (sdata->vif.type != NL80211_IFTYPE_STATION)
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 3da1c5c45035..b5f2df61c7f6 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -358,7 +358,8 @@ static bool ieee80211_prep_hw_scan(struct ieee80211_sub_if_data *sdata)
struct cfg80211_scan_request *req;
struct cfg80211_chan_def chandef;
u8 bands_used = 0;
- int i, ielen, n_chans;
+ int i, ielen;
+ u32 *n_chans;
u32 flags = 0;
req = rcu_dereference_protected(local->scan_req,
@@ -368,34 +369,34 @@ static bool ieee80211_prep_hw_scan(struct ieee80211_sub_if_data *sdata)
return false;
if (ieee80211_hw_check(&local->hw, SINGLE_SCAN_ON_ALL_BANDS)) {
+ local->hw_scan_req->req.n_channels = req->n_channels;
+
for (i = 0; i < req->n_channels; i++) {
local->hw_scan_req->req.channels[i] = req->channels[i];
bands_used |= BIT(req->channels[i]->band);
}
-
- n_chans = req->n_channels;
} else {
do {
if (local->hw_scan_band == NUM_NL80211_BANDS)
return false;
- n_chans = 0;
+ n_chans = &local->hw_scan_req->req.n_channels;
+ *n_chans = 0;
for (i = 0; i < req->n_channels; i++) {
if (req->channels[i]->band !=
local->hw_scan_band)
continue;
- local->hw_scan_req->req.channels[n_chans] =
+ local->hw_scan_req->req.channels[(*n_chans)++] =
req->channels[i];
- n_chans++;
+
bands_used |= BIT(req->channels[i]->band);
}
local->hw_scan_band++;
- } while (!n_chans);
+ } while (!*n_chans);
}
- local->hw_scan_req->req.n_channels = n_chans;
ieee80211_prepare_scan_chandef(&chandef);
if (req->flags & NL80211_SCAN_FLAG_MIN_PREQ_CONTENT)
@@ -744,15 +745,21 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
local->hw_scan_ies_bufsize *= n_bands;
}
- local->hw_scan_req = kmalloc(
- sizeof(*local->hw_scan_req) +
- req->n_channels * sizeof(req->channels[0]) +
- local->hw_scan_ies_bufsize, GFP_KERNEL);
+ local->hw_scan_req = kmalloc(struct_size(local->hw_scan_req,
+ req.channels,
+ req->n_channels) +
+ local->hw_scan_ies_bufsize,
+ GFP_KERNEL);
if (!local->hw_scan_req)
return -ENOMEM;
local->hw_scan_req->req.ssids = req->ssids;
local->hw_scan_req->req.n_ssids = req->n_ssids;
+ /* None of the channels are actually set
+ * up but let UBSAN know the boundaries.
+ */
+ local->hw_scan_req->req.n_channels = req->n_channels;
+
ies = (u8 *)local->hw_scan_req +
sizeof(*local->hw_scan_req) +
req->n_channels * sizeof(req->channels[0]);
diff --git a/net/mac80211/spectmgmt.c b/net/mac80211/spectmgmt.c
index b2de4c6fb808..073ff9e0f397 100644
--- a/net/mac80211/spectmgmt.c
+++ b/net/mac80211/spectmgmt.c
@@ -9,7 +9,7 @@
* Copyright 2007, Michael Wu <flamingice@sourmilk.net>
* Copyright 2007-2008, Intel Corporation
* Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
- * Copyright (C) 2018, 2020, 2022-2023 Intel Corporation
+ * Copyright (C) 2018, 2020, 2022-2024 Intel Corporation
*/
#include <linux/ieee80211.h>
@@ -223,7 +223,7 @@ int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata,
enum nl80211_band current_band,
u32 vht_cap_info,
struct ieee80211_conn_settings *conn,
- u8 *bssid,
+ u8 *bssid, bool unprot_action,
struct ieee80211_csa_ie *csa_ie)
{
enum nl80211_band new_band = current_band;
@@ -258,8 +258,10 @@ int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata,
if (!ieee80211_operating_class_to_band(new_op_class, &new_band)) {
new_op_class = 0;
- sdata_info(sdata, "cannot understand ECSA IE operating class, %d, ignoring\n",
- ext_chansw_elem->new_operating_class);
+ if (!unprot_action)
+ sdata_info(sdata,
+ "cannot understand ECSA IE operating class, %d, ignoring\n",
+ ext_chansw_elem->new_operating_class);
} else {
new_chan_no = ext_chansw_elem->new_ch_num;
csa_ie->count = ext_chansw_elem->count;
@@ -293,9 +295,10 @@ int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata,
new_freq = ieee80211_channel_to_frequency(new_chan_no, new_band);
new_chan = ieee80211_get_channel(sdata->local->hw.wiphy, new_freq);
if (!new_chan || new_chan->flags & IEEE80211_CHAN_DISABLED) {
- sdata_info(sdata,
- "BSS %pM switches to unsupported channel (%d MHz), disconnecting\n",
- bssid, new_freq);
+ if (!unprot_action)
+ sdata_info(sdata,
+ "BSS %pM switches to unsupported channel (%d MHz), disconnecting\n",
+ bssid, new_freq);
return -EINVAL;
}
@@ -340,6 +343,9 @@ int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata,
break;
}
+ /* capture the AP configuration */
+ csa_ie->chanreq.ap = csa_ie->chanreq.oper;
+
/* parse one of the Elements to build a new chandef */
memset(&new_chandef, 0, sizeof(new_chandef));
new_chandef.chan = new_chan;
@@ -368,6 +374,9 @@ int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata,
/* if data is there validate the bandwidth & use it */
if (new_chandef.chan) {
+ /* capture the AP chandef before (potential) downgrading */
+ csa_ie->chanreq.ap = new_chandef;
+
if (conn->bw_limit < IEEE80211_CONN_BW_LIMIT_320 &&
new_chandef.width == NL80211_CHAN_WIDTH_320)
ieee80211_chandef_downgrade(&new_chandef, NULL);
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index da5fdd6f5c85..aa22f09e6d14 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -1724,7 +1724,7 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
skb_queue_head_init(&pending);
/* sync with ieee80211_tx_h_unicast_ps_buf */
- spin_lock(&sta->ps_lock);
+ spin_lock_bh(&sta->ps_lock);
/* Send all buffered frames to the station */
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
int count = skb_queue_len(&pending), tmp;
@@ -1753,7 +1753,7 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
*/
clear_sta_flag(sta, WLAN_STA_PSPOLL);
clear_sta_flag(sta, WLAN_STA_UAPSD);
- spin_unlock(&sta->ps_lock);
+ spin_unlock_bh(&sta->ps_lock);
atomic_dec(&ps->num_sta_ps);
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index bd5e2f7146f6..9195d5a2de0a 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -727,6 +727,12 @@ struct sta_info {
struct ieee80211_sta sta;
};
+static inline int ieee80211_tdls_sta_link_id(struct sta_info *sta)
+{
+ /* TDLS STA can only have a single link */
+ return sta->sta.valid_links ? __ffs(sta->sta.valid_links) : 0;
+}
+
static inline enum nl80211_plink_state sta_plink_state(struct sta_info *sta)
{
#ifdef CONFIG_MAC80211_MESH
diff --git a/net/mac80211/tests/Makefile b/net/mac80211/tests/Makefile
index 4fdaf3feaca3..511dfa226699 100644
--- a/net/mac80211/tests/Makefile
+++ b/net/mac80211/tests/Makefile
@@ -1,3 +1,3 @@
-mac80211-tests-y += module.o elems.o mfp.o
+mac80211-tests-y += module.o elems.o mfp.o tpe.o
obj-$(CONFIG_MAC80211_KUNIT_TEST) += mac80211-tests.o
diff --git a/net/mac80211/tests/tpe.c b/net/mac80211/tests/tpe.c
new file mode 100644
index 000000000000..dd63303a2985
--- /dev/null
+++ b/net/mac80211/tests/tpe.c
@@ -0,0 +1,284 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * KUnit tests for TPE element handling
+ *
+ * Copyright (C) 2024 Intel Corporation
+ */
+#include <kunit/test.h>
+#include "../ieee80211_i.h"
+
+MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING);
+
+static struct ieee80211_channel chan6g_1 = {
+ .band = NL80211_BAND_6GHZ,
+ .center_freq = 5955,
+};
+
+static struct ieee80211_channel chan6g_33 = {
+ .band = NL80211_BAND_6GHZ,
+ .center_freq = 6115,
+};
+
+static struct ieee80211_channel chan6g_61 = {
+ .band = NL80211_BAND_6GHZ,
+ .center_freq = 6255,
+};
+
+static const struct subchan_test_case {
+ const char *desc;
+ struct cfg80211_chan_def c;
+ u8 n;
+ int expect;
+} subchan_offset_cases[] = {
+ {
+ .desc = "identical 20 MHz",
+ .c.width = NL80211_CHAN_WIDTH_20,
+ .c.chan = &chan6g_1,
+ .c.center_freq1 = 5955,
+ .n = 1,
+ .expect = 0,
+ },
+ {
+ .desc = "identical 40 MHz",
+ .c.width = NL80211_CHAN_WIDTH_40,
+ .c.chan = &chan6g_1,
+ .c.center_freq1 = 5965,
+ .n = 2,
+ .expect = 0,
+ },
+ {
+ .desc = "identical 80+80 MHz",
+ /* not really is valid? doesn't matter for the test */
+ .c.width = NL80211_CHAN_WIDTH_80P80,
+ .c.chan = &chan6g_1,
+ .c.center_freq1 = 5985,
+ .c.center_freq2 = 6225,
+ .n = 16,
+ .expect = 0,
+ },
+ {
+ .desc = "identical 320 MHz",
+ .c.width = NL80211_CHAN_WIDTH_320,
+ .c.chan = &chan6g_1,
+ .c.center_freq1 = 6105,
+ .n = 16,
+ .expect = 0,
+ },
+ {
+ .desc = "lower 160 MHz of 320 MHz",
+ .c.width = NL80211_CHAN_WIDTH_320,
+ .c.chan = &chan6g_1,
+ .c.center_freq1 = 6105,
+ .n = 8,
+ .expect = 0,
+ },
+ {
+ .desc = "upper 160 MHz of 320 MHz",
+ .c.width = NL80211_CHAN_WIDTH_320,
+ .c.chan = &chan6g_61,
+ .c.center_freq1 = 6105,
+ .n = 8,
+ .expect = 8,
+ },
+ {
+ .desc = "upper 160 MHz of 320 MHz, go to 40",
+ .c.width = NL80211_CHAN_WIDTH_320,
+ .c.chan = &chan6g_61,
+ .c.center_freq1 = 6105,
+ .n = 2,
+ .expect = 8 + 4 + 2,
+ },
+ {
+ .desc = "secondary 80 above primary in 80+80 MHz",
+ /* not really is valid? doesn't matter for the test */
+ .c.width = NL80211_CHAN_WIDTH_80P80,
+ .c.chan = &chan6g_1,
+ .c.center_freq1 = 5985,
+ .c.center_freq2 = 6225,
+ .n = 4,
+ .expect = 0,
+ },
+ {
+ .desc = "secondary 80 below primary in 80+80 MHz",
+ /* not really is valid? doesn't matter for the test */
+ .c.width = NL80211_CHAN_WIDTH_80P80,
+ .c.chan = &chan6g_61,
+ .c.center_freq1 = 6225,
+ .c.center_freq2 = 5985,
+ .n = 4,
+ .expect = 4,
+ },
+ {
+ .desc = "secondary 80 below primary in 80+80 MHz, go to 20",
+ /* not really is valid? doesn't matter for the test */
+ .c.width = NL80211_CHAN_WIDTH_80P80,
+ .c.chan = &chan6g_61,
+ .c.center_freq1 = 6225,
+ .c.center_freq2 = 5985,
+ .n = 1,
+ .expect = 7,
+ },
+};
+
+KUNIT_ARRAY_PARAM_DESC(subchan_offset, subchan_offset_cases, desc);
+
+static void subchan_offset(struct kunit *test)
+{
+ const struct subchan_test_case *params = test->param_value;
+ int offset;
+
+ KUNIT_ASSERT_EQ(test, cfg80211_chandef_valid(&params->c), true);
+
+ offset = ieee80211_calc_chandef_subchan_offset(&params->c, params->n);
+
+ KUNIT_EXPECT_EQ(test, params->expect, offset);
+}
+
+static const struct psd_reorder_test_case {
+ const char *desc;
+ struct cfg80211_chan_def ap, used;
+ struct ieee80211_parsed_tpe_psd psd, out;
+} psd_reorder_cases[] = {
+ {
+ .desc = "no changes, 320 MHz",
+
+ .ap.width = NL80211_CHAN_WIDTH_320,
+ .ap.chan = &chan6g_1,
+ .ap.center_freq1 = 6105,
+
+ .used.width = NL80211_CHAN_WIDTH_320,
+ .used.chan = &chan6g_1,
+ .used.center_freq1 = 6105,
+
+ .psd.valid = true,
+ .psd.count = 16,
+ .psd.n = 8,
+ .psd.power = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 },
+
+ .out.valid = true,
+ .out.count = 16,
+ .out.n = 8,
+ .out.power = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 },
+ },
+ {
+ .desc = "no changes, 320 MHz, 160 MHz used, n=0",
+
+ .ap.width = NL80211_CHAN_WIDTH_320,
+ .ap.chan = &chan6g_1,
+ .ap.center_freq1 = 6105,
+
+ .used.width = NL80211_CHAN_WIDTH_160,
+ .used.chan = &chan6g_1,
+ .used.center_freq1 = 6025,
+
+ .psd.valid = true,
+ .psd.count = 16,
+ .psd.n = 0,
+ .psd.power = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, },
+
+ .out.valid = true,
+ .out.count = 8,
+ .out.n = 0,
+ .out.power = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, },
+ },
+ {
+ .desc = "320 MHz, HE is 80, used 160, all lower",
+
+ .ap.width = NL80211_CHAN_WIDTH_320,
+ .ap.chan = &chan6g_1,
+ .ap.center_freq1 = 6105,
+
+ .used.width = NL80211_CHAN_WIDTH_160,
+ .used.chan = &chan6g_1,
+ .used.center_freq1 = 6025,
+
+ .psd.valid = true,
+ .psd.count = 16,
+ .psd.n = 4,
+ .psd.power = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 },
+
+ .out.valid = true,
+ .out.count = 8,
+ .out.n = 4,
+ .out.power = { 0, 1, 2, 3, 4, 5, 6, 7, 127, 127, 127, 127, 127, 127, 127, 127},
+ },
+ {
+ .desc = "320 MHz, HE is 80, used 160, all upper",
+ /*
+ * EHT: | | | | | | | | | | | | | | | | |
+ * HE: | | | | |
+ * used: | | | | | | | | |
+ */
+
+ .ap.width = NL80211_CHAN_WIDTH_320,
+ .ap.chan = &chan6g_61,
+ .ap.center_freq1 = 6105,
+
+ .used.width = NL80211_CHAN_WIDTH_160,
+ .used.chan = &chan6g_61,
+ .used.center_freq1 = 6185,
+
+ .psd.valid = true,
+ .psd.count = 16,
+ .psd.n = 4,
+ .psd.power = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 },
+
+ .out.valid = true,
+ .out.count = 8,
+ .out.n = 4,
+ .out.power = { 12, 13, 14, 15, 0, 1, 2, 3, 127, 127, 127, 127, 127, 127, 127, 127},
+ },
+ {
+ .desc = "320 MHz, HE is 80, used 160, split",
+ /*
+ * EHT: | | | | | | | | | | | | | | | | |
+ * HE: | | | | |
+ * used: | | | | | | | | |
+ */
+
+ .ap.width = NL80211_CHAN_WIDTH_320,
+ .ap.chan = &chan6g_33,
+ .ap.center_freq1 = 6105,
+
+ .used.width = NL80211_CHAN_WIDTH_160,
+ .used.chan = &chan6g_33,
+ .used.center_freq1 = 6185,
+
+ .psd.valid = true,
+ .psd.count = 16,
+ .psd.n = 4,
+ .psd.power = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 },
+
+ .out.valid = true,
+ .out.count = 8,
+ .out.n = 4,
+ .out.power = { 0, 1, 2, 3, 12, 13, 14, 15, 127, 127, 127, 127, 127, 127, 127, 127},
+ },
+};
+
+KUNIT_ARRAY_PARAM_DESC(psd_reorder, psd_reorder_cases, desc);
+
+static void psd_reorder(struct kunit *test)
+{
+ const struct psd_reorder_test_case *params = test->param_value;
+ struct ieee80211_parsed_tpe_psd tmp = params->psd;
+
+ KUNIT_ASSERT_EQ(test, cfg80211_chandef_valid(&params->ap), true);
+ KUNIT_ASSERT_EQ(test, cfg80211_chandef_valid(&params->used), true);
+
+ ieee80211_rearrange_tpe_psd(&tmp, &params->ap, &params->used);
+ KUNIT_EXPECT_MEMEQ(test, &tmp, &params->out, sizeof(tmp));
+}
+
+static struct kunit_case tpe_test_cases[] = {
+ KUNIT_CASE_PARAM(subchan_offset, subchan_offset_gen_params),
+ KUNIT_CASE_PARAM(psd_reorder, psd_reorder_gen_params),
+ {}
+};
+
+static struct kunit_suite tpe = {
+ .name = "mac80211-tpe",
+ .test_cases = tpe_test_cases,
+};
+
+kunit_test_suite(tpe);
diff --git a/net/mac80211/trace.h b/net/mac80211/trace.h
index b26aacfbc622..dc498cd8cd91 100644
--- a/net/mac80211/trace.h
+++ b/net/mac80211/trace.h
@@ -328,9 +328,18 @@ TRACE_EVENT(drv_set_wakeup,
TP_printk(LOCAL_PR_FMT " enabled:%d", LOCAL_PR_ARG, __entry->enabled)
);
-DEFINE_EVENT(local_only_evt, drv_stop,
- TP_PROTO(struct ieee80211_local *local),
- TP_ARGS(local)
+TRACE_EVENT(drv_stop,
+ TP_PROTO(struct ieee80211_local *local, bool suspend),
+ TP_ARGS(local, suspend),
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ __field(bool, suspend)
+ ),
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ __entry->suspend = suspend;
+ ),
+ TP_printk(LOCAL_PR_FMT " suspend:%d", LOCAL_PR_ARG, __entry->suspend)
);
DEFINE_EVENT(local_sdata_addr_evt, drv_add_interface,
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index f861d99e5f05..72a9ba8bc5fd 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -2774,8 +2774,7 @@ static struct sk_buff *ieee80211_build_hdr(struct ieee80211_sub_if_data *sdata,
if (tdls_peer) {
/* For TDLS only one link can be valid with peer STA */
- int tdls_link_id = sta->sta.valid_links ?
- __ffs(sta->sta.valid_links) : 0;
+ int tdls_link_id = ieee80211_tdls_sta_link_id(sta);
struct ieee80211_link_data *link;
/* DA SA BSSID */
@@ -3101,8 +3100,7 @@ void ieee80211_check_fast_xmit(struct sta_info *sta)
case NL80211_IFTYPE_STATION:
if (test_sta_flag(sta, WLAN_STA_TDLS_PEER)) {
/* For TDLS only one link can be valid with peer STA */
- int tdls_link_id = sta->sta.valid_links ?
- __ffs(sta->sta.valid_links) : 0;
+ int tdls_link_id = ieee80211_tdls_sta_link_id(sta);
struct ieee80211_link_data *link;
/* DA SA BSSID */
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 0b893e958959..ced19ce7c51a 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -1565,8 +1565,12 @@ u32 ieee80211_sta_get_rates(struct ieee80211_sub_if_data *sdata,
return supp_rates;
}
-void ieee80211_stop_device(struct ieee80211_local *local)
+void ieee80211_stop_device(struct ieee80211_local *local, bool suspend)
{
+ local_bh_disable();
+ ieee80211_handle_queued_frames(local);
+ local_bh_enable();
+
ieee80211_led_radio(local, false);
ieee80211_mod_tpt_led_trig(local, 0, IEEE80211_TPT_LEDTRIG_FL_RADIO);
@@ -1574,7 +1578,7 @@ void ieee80211_stop_device(struct ieee80211_local *local)
flush_workqueue(local->workqueue);
wiphy_work_flush(local->hw.wiphy, NULL);
- drv_stop(local);
+ drv_stop(local, suspend);
}
static void ieee80211_flush_completed_scan(struct ieee80211_local *local,
@@ -1841,7 +1845,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
/* add interfaces */
sdata = wiphy_dereference(local->hw.wiphy, local->monitor_sdata);
- if (sdata) {
+ if (sdata && ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF)) {
/* in HW restart it exists already */
WARN_ON(local->resuming);
res = drv_add_interface(local, sdata);
@@ -2175,8 +2179,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
local->in_reconfig = false;
barrier();
- /* Restart deferred ROCs */
- ieee80211_start_next_roc(local);
+ ieee80211_reconfig_roc(local);
/* Requeue all works */
list_for_each_entry(sdata, &local->interfaces, list)
@@ -2333,7 +2336,7 @@ void ieee80211_recalc_min_chandef(struct ieee80211_sub_if_data *sdata,
chanctx = container_of(chanctx_conf, struct ieee80211_chanctx,
conf);
- ieee80211_recalc_chanctx_min_def(local, chanctx, NULL);
+ ieee80211_recalc_chanctx_min_def(local, chanctx, NULL, false);
}
}
@@ -3456,12 +3459,8 @@ void ieee80211_dfs_cac_cancel(struct ieee80211_local *local)
lockdep_assert_wiphy(local->hw.wiphy);
list_for_each_entry(sdata, &local->interfaces, list) {
- /* it might be waiting for the local->mtx, but then
- * by the time it gets it, sdata->wdev.cac_started
- * will no longer be true
- */
wiphy_delayed_work_cancel(local->hw.wiphy,
- &sdata->deflink.dfs_cac_timer_work);
+ &sdata->dfs_cac_timer_work);
if (sdata->wdev.cac_started) {
chandef = sdata->vif.bss_conf.chanreq.oper;
@@ -3933,19 +3932,103 @@ static u8 ieee80211_chanctx_radar_detect(struct ieee80211_local *local,
return radar_detect;
}
+static u32
+__ieee80211_get_radio_mask(struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_bss_conf *link_conf;
+ struct ieee80211_chanctx_conf *conf;
+ unsigned int link_id;
+ u32 mask = 0;
+
+ for_each_vif_active_link(&sdata->vif, link_conf, link_id) {
+ conf = sdata_dereference(link_conf->chanctx_conf, sdata);
+ if (!conf || conf->radio_idx < 0)
+ continue;
+
+ mask |= BIT(conf->radio_idx);
+ }
+
+ return mask;
+}
+
+u32 ieee80211_get_radio_mask(struct wiphy *wiphy, struct net_device *dev)
+{
+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+
+ return __ieee80211_get_radio_mask(sdata);
+}
+
+static bool
+ieee80211_sdata_uses_radio(struct ieee80211_sub_if_data *sdata, int radio_idx)
+{
+ if (radio_idx < 0)
+ return true;
+
+ return __ieee80211_get_radio_mask(sdata) & BIT(radio_idx);
+}
+
+static int
+ieee80211_fill_ifcomb_params(struct ieee80211_local *local,
+ struct iface_combination_params *params,
+ const struct cfg80211_chan_def *chandef,
+ struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_sub_if_data *sdata_iter;
+ struct ieee80211_chanctx *ctx;
+ int total = !!sdata;
+
+ list_for_each_entry(ctx, &local->chanctx_list, list) {
+ if (ctx->replace_state == IEEE80211_CHANCTX_WILL_BE_REPLACED)
+ continue;
+
+ if (params->radio_idx >= 0 &&
+ ctx->conf.radio_idx != params->radio_idx)
+ continue;
+
+ params->radar_detect |=
+ ieee80211_chanctx_radar_detect(local, ctx);
+
+ if (chandef && ctx->mode != IEEE80211_CHANCTX_EXCLUSIVE &&
+ cfg80211_chandef_compatible(chandef, &ctx->conf.def))
+ continue;
+
+ params->num_different_channels++;
+ }
+
+ list_for_each_entry(sdata_iter, &local->interfaces, list) {
+ struct wireless_dev *wdev_iter;
+
+ wdev_iter = &sdata_iter->wdev;
+
+ if (sdata_iter == sdata ||
+ !ieee80211_sdata_running(sdata_iter) ||
+ cfg80211_iftype_allowed(local->hw.wiphy,
+ wdev_iter->iftype, 0, 1))
+ continue;
+
+ if (!ieee80211_sdata_uses_radio(sdata_iter, params->radio_idx))
+ continue;
+
+ params->iftype_num[wdev_iter->iftype]++;
+ total++;
+ }
+
+ return total;
+}
+
int ieee80211_check_combinations(struct ieee80211_sub_if_data *sdata,
const struct cfg80211_chan_def *chandef,
enum ieee80211_chanctx_mode chanmode,
- u8 radar_detect)
+ u8 radar_detect, int radio_idx)
{
+ bool shared = chanmode == IEEE80211_CHANCTX_SHARED;
struct ieee80211_local *local = sdata->local;
- struct ieee80211_sub_if_data *sdata_iter;
enum nl80211_iftype iftype = sdata->wdev.iftype;
- struct ieee80211_chanctx *ctx;
- int total = 1;
struct iface_combination_params params = {
.radar_detect = radar_detect,
+ .radio_idx = radio_idx,
};
+ int total;
lockdep_assert_wiphy(local->hw.wiphy);
@@ -3982,37 +4065,9 @@ int ieee80211_check_combinations(struct ieee80211_sub_if_data *sdata,
if (iftype != NL80211_IFTYPE_UNSPECIFIED)
params.iftype_num[iftype] = 1;
- list_for_each_entry(ctx, &local->chanctx_list, list) {
- if (ctx->replace_state == IEEE80211_CHANCTX_WILL_BE_REPLACED)
- continue;
- params.radar_detect |=
- ieee80211_chanctx_radar_detect(local, ctx);
- if (ctx->mode == IEEE80211_CHANCTX_EXCLUSIVE) {
- params.num_different_channels++;
- continue;
- }
- if (chandef && chanmode == IEEE80211_CHANCTX_SHARED &&
- cfg80211_chandef_compatible(chandef,
- &ctx->conf.def))
- continue;
- params.num_different_channels++;
- }
-
- list_for_each_entry_rcu(sdata_iter, &local->interfaces, list) {
- struct wireless_dev *wdev_iter;
-
- wdev_iter = &sdata_iter->wdev;
-
- if (sdata_iter == sdata ||
- !ieee80211_sdata_running(sdata_iter) ||
- cfg80211_iftype_allowed(local->hw.wiphy,
- wdev_iter->iftype, 0, 1))
- continue;
-
- params.iftype_num[wdev_iter->iftype]++;
- total++;
- }
-
+ total = ieee80211_fill_ifcomb_params(local, &params,
+ shared ? chandef : NULL,
+ sdata);
if (total == 1 && !params.radar_detect)
return 0;
@@ -4029,28 +4084,17 @@ ieee80211_iter_max_chans(const struct ieee80211_iface_combination *c,
c->num_different_channels);
}
-int ieee80211_max_num_channels(struct ieee80211_local *local)
+int ieee80211_max_num_channels(struct ieee80211_local *local, int radio_idx)
{
- struct ieee80211_sub_if_data *sdata;
- struct ieee80211_chanctx *ctx;
u32 max_num_different_channels = 1;
int err;
- struct iface_combination_params params = {0};
+ struct iface_combination_params params = {
+ .radio_idx = radio_idx,
+ };
lockdep_assert_wiphy(local->hw.wiphy);
- list_for_each_entry(ctx, &local->chanctx_list, list) {
- if (ctx->replace_state == IEEE80211_CHANCTX_WILL_BE_REPLACED)
- continue;
-
- params.num_different_channels++;
-
- params.radar_detect |=
- ieee80211_chanctx_radar_detect(local, ctx);
- }
-
- list_for_each_entry_rcu(sdata, &local->interfaces, list)
- params.iftype_num[sdata->wdev.iftype]++;
+ ieee80211_fill_ifcomb_params(local, &params, NULL, NULL);
err = cfg80211_iter_combinations(local->hw.wiphy, &params,
ieee80211_iter_max_chans,
@@ -4338,3 +4382,28 @@ ieee80211_min_bw_limit_from_chandef(struct cfg80211_chan_def *chandef)
return IEEE80211_CONN_BW_LIMIT_20;
}
}
+
+void ieee80211_clear_tpe(struct ieee80211_parsed_tpe *tpe)
+{
+ for (int i = 0; i < 2; i++) {
+ tpe->max_local[i].valid = false;
+ memset(tpe->max_local[i].power,
+ IEEE80211_TPE_MAX_TX_PWR_NO_CONSTRAINT,
+ sizeof(tpe->max_local[i].power));
+
+ tpe->max_reg_client[i].valid = false;
+ memset(tpe->max_reg_client[i].power,
+ IEEE80211_TPE_MAX_TX_PWR_NO_CONSTRAINT,
+ sizeof(tpe->max_reg_client[i].power));
+
+ tpe->psd_local[i].valid = false;
+ memset(tpe->psd_local[i].power,
+ IEEE80211_TPE_PSD_NO_LIMIT,
+ sizeof(tpe->psd_local[i].power));
+
+ tpe->psd_reg_client[i].valid = false;
+ memset(tpe->psd_reg_client[i].power,
+ IEEE80211_TPE_PSD_NO_LIMIT,
+ sizeof(tpe->psd_reg_client[i].power));
+ }
+}
diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c
index 642891cafbaf..bf6ef45af757 100644
--- a/net/mac80211/vht.c
+++ b/net/mac80211/vht.c
@@ -351,7 +351,8 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
/* FIXME: move this to some better location - parses HE/EHT now */
enum ieee80211_sta_rx_bandwidth
-ieee80211_sta_cap_rx_bw(struct link_sta_info *link_sta)
+_ieee80211_sta_cap_rx_bw(struct link_sta_info *link_sta,
+ struct cfg80211_chan_def *chandef)
{
unsigned int link_id = link_sta->link_id;
struct ieee80211_sub_if_data *sdata = link_sta->sta->sdata;
@@ -361,44 +362,43 @@ ieee80211_sta_cap_rx_bw(struct link_sta_info *link_sta)
u32 cap_width;
if (he_cap->has_he) {
- struct ieee80211_bss_conf *link_conf;
- enum ieee80211_sta_rx_bandwidth ret;
+ enum nl80211_band band;
u8 info;
- rcu_read_lock();
- link_conf = rcu_dereference(sdata->vif.link_conf[link_id]);
+ if (chandef) {
+ band = chandef->chan->band;
+ } else {
+ struct ieee80211_bss_conf *link_conf;
+
+ rcu_read_lock();
+ link_conf = rcu_dereference(sdata->vif.link_conf[link_id]);
+ band = link_conf->chanreq.oper.chan->band;
+ rcu_read_unlock();
+ }
- if (eht_cap->has_eht &&
- link_conf->chanreq.oper.chan->band == NL80211_BAND_6GHZ) {
+ if (eht_cap->has_eht && band == NL80211_BAND_6GHZ) {
info = eht_cap->eht_cap_elem.phy_cap_info[0];
- if (info & IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ) {
- ret = IEEE80211_STA_RX_BW_320;
- goto out;
- }
+ if (info & IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ)
+ return IEEE80211_STA_RX_BW_320;
}
info = he_cap->he_cap_elem.phy_cap_info[0];
- if (link_conf->chanreq.oper.chan->band == NL80211_BAND_2GHZ) {
+ if (band == NL80211_BAND_2GHZ) {
if (info & IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G)
- ret = IEEE80211_STA_RX_BW_40;
- else
- ret = IEEE80211_STA_RX_BW_20;
- goto out;
+ return IEEE80211_STA_RX_BW_40;
+ return IEEE80211_STA_RX_BW_20;
}
if (info & IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G ||
info & IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)
- ret = IEEE80211_STA_RX_BW_160;
- else if (info & IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G)
- ret = IEEE80211_STA_RX_BW_80;
- else
- ret = IEEE80211_STA_RX_BW_20;
-out:
- rcu_read_unlock();
+ return IEEE80211_STA_RX_BW_160;
- return ret;
+ if (info & IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G)
+ return IEEE80211_STA_RX_BW_80;
+
+ return IEEE80211_STA_RX_BW_20;
}
if (!vht_cap->vht_supported)
@@ -503,22 +503,29 @@ ieee80211_chan_width_to_rx_bw(enum nl80211_chan_width width)
/* FIXME: rename/move - this deals with everything not just VHT */
enum ieee80211_sta_rx_bandwidth
-ieee80211_sta_cur_vht_bw(struct link_sta_info *link_sta)
+_ieee80211_sta_cur_vht_bw(struct link_sta_info *link_sta,
+ struct cfg80211_chan_def *chandef)
{
struct sta_info *sta = link_sta->sta;
- struct ieee80211_bss_conf *link_conf;
enum nl80211_chan_width bss_width;
enum ieee80211_sta_rx_bandwidth bw;
- rcu_read_lock();
- link_conf = rcu_dereference(sta->sdata->vif.link_conf[link_sta->link_id]);
- if (WARN_ON(!link_conf))
- bss_width = NL80211_CHAN_WIDTH_20_NOHT;
- else
+ if (chandef) {
+ bss_width = chandef->width;
+ } else {
+ struct ieee80211_bss_conf *link_conf;
+
+ rcu_read_lock();
+ link_conf = rcu_dereference(sta->sdata->vif.link_conf[link_sta->link_id]);
+ if (WARN_ON_ONCE(!link_conf)) {
+ rcu_read_unlock();
+ return IEEE80211_STA_RX_BW_20;
+ }
bss_width = link_conf->chanreq.oper.width;
- rcu_read_unlock();
+ rcu_read_unlock();
+ }
- bw = ieee80211_sta_cap_rx_bw(link_sta);
+ bw = _ieee80211_sta_cap_rx_bw(link_sta, chandef);
bw = min(bw, link_sta->cur_max_bandwidth);
/* Don't consider AP's bandwidth for TDLS peers, section 11.23.1 of
diff --git a/net/mac802154/main.c b/net/mac802154/main.c
index 9ab7396668d2..21b7c3b280b4 100644
--- a/net/mac802154/main.c
+++ b/net/mac802154/main.c
@@ -161,8 +161,10 @@ void ieee802154_configure_durations(struct wpan_phy *phy,
}
phy->symbol_duration = duration;
- phy->lifs_period = (IEEE802154_LIFS_PERIOD * phy->symbol_duration) / NSEC_PER_SEC;
- phy->sifs_period = (IEEE802154_SIFS_PERIOD * phy->symbol_duration) / NSEC_PER_SEC;
+ phy->lifs_period =
+ (IEEE802154_LIFS_PERIOD * phy->symbol_duration) / NSEC_PER_USEC;
+ phy->sifs_period =
+ (IEEE802154_SIFS_PERIOD * phy->symbol_duration) / NSEC_PER_USEC;
}
EXPORT_SYMBOL(ieee802154_configure_durations);
@@ -184,10 +186,10 @@ static void ieee802154_setup_wpan_phy_pib(struct wpan_phy *wpan_phy)
* Should be done when all drivers sets this value.
*/
- wpan_phy->lifs_period =
- (IEEE802154_LIFS_PERIOD * wpan_phy->symbol_duration) / 1000;
- wpan_phy->sifs_period =
- (IEEE802154_SIFS_PERIOD * wpan_phy->symbol_duration) / 1000;
+ wpan_phy->lifs_period = (IEEE802154_LIFS_PERIOD *
+ wpan_phy->symbol_duration) / NSEC_PER_USEC;
+ wpan_phy->sifs_period = (IEEE802154_SIFS_PERIOD *
+ wpan_phy->symbol_duration) / NSEC_PER_USEC;
}
int ieee802154_register_hw(struct ieee802154_hw *hw)
diff --git a/net/mac802154/tx.c b/net/mac802154/tx.c
index 2a6f1ed763c9..6fbed5bb5c3e 100644
--- a/net/mac802154/tx.c
+++ b/net/mac802154/tx.c
@@ -34,8 +34,8 @@ void ieee802154_xmit_sync_worker(struct work_struct *work)
if (res)
goto err_tx;
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += skb->len;
+ DEV_STATS_INC(dev, tx_packets);
+ DEV_STATS_ADD(dev, tx_bytes, skb->len);
ieee802154_xmit_complete(&local->hw, skb, false);
@@ -90,8 +90,8 @@ ieee802154_tx(struct ieee802154_local *local, struct sk_buff *skb)
if (ret)
goto err_wake_netif_queue;
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += len;
+ DEV_STATS_INC(dev, tx_packets);
+ DEV_STATS_ADD(dev, tx_bytes, len);
} else {
local->tx_skb = skb;
queue_work(local->workqueue, &local->sync_tx_work);
diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
index 7f53e022e27e..ea9e5817b9e9 100644
--- a/net/mptcp/pm_netlink.c
+++ b/net/mptcp/pm_netlink.c
@@ -677,6 +677,7 @@ static void mptcp_pm_nl_add_addr_received(struct mptcp_sock *msk)
unsigned int add_addr_accept_max;
struct mptcp_addr_info remote;
unsigned int subflows_max;
+ bool sf_created = false;
int i, nr;
add_addr_accept_max = mptcp_pm_get_add_addr_accept_max(msk);
@@ -704,15 +705,18 @@ static void mptcp_pm_nl_add_addr_received(struct mptcp_sock *msk)
if (nr == 0)
return;
- msk->pm.add_addr_accepted++;
- if (msk->pm.add_addr_accepted >= add_addr_accept_max ||
- msk->pm.subflows >= subflows_max)
- WRITE_ONCE(msk->pm.accept_addr, false);
-
spin_unlock_bh(&msk->pm.lock);
for (i = 0; i < nr; i++)
- __mptcp_subflow_connect(sk, &addrs[i], &remote);
+ if (__mptcp_subflow_connect(sk, &addrs[i], &remote) == 0)
+ sf_created = true;
spin_lock_bh(&msk->pm.lock);
+
+ if (sf_created) {
+ msk->pm.add_addr_accepted++;
+ if (msk->pm.add_addr_accepted >= add_addr_accept_max ||
+ msk->pm.subflows >= subflows_max)
+ WRITE_ONCE(msk->pm.accept_addr, false);
+ }
}
void mptcp_pm_nl_addr_send_ack(struct mptcp_sock *msk)
@@ -814,10 +818,13 @@ static void mptcp_pm_nl_rm_addr_or_subflow(struct mptcp_sock *msk,
spin_lock_bh(&msk->pm.lock);
removed = true;
- __MPTCP_INC_STATS(sock_net(sk), rm_type);
+ if (rm_type == MPTCP_MIB_RMSUBFLOW)
+ __MPTCP_INC_STATS(sock_net(sk), rm_type);
}
if (rm_type == MPTCP_MIB_RMSUBFLOW)
__set_bit(rm_id ? rm_id : msk->mpc_endpoint_id, msk->pm.id_avail_bitmap);
+ else if (rm_type == MPTCP_MIB_RMADDR)
+ __MPTCP_INC_STATS(sock_net(sk), rm_type);
if (!removed)
continue;
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index 7d44196ec5b6..a26c2c840fd9 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -2040,13 +2040,13 @@ static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied)
do_div(grow, msk->rcvq_space.space);
rcvwin += (grow << 1);
- rcvbuf = min_t(u64, __tcp_space_from_win(scaling_ratio, rcvwin),
+ rcvbuf = min_t(u64, mptcp_space_from_win(sk, rcvwin),
READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2]));
if (rcvbuf > sk->sk_rcvbuf) {
u32 window_clamp;
- window_clamp = __tcp_win_from_space(scaling_ratio, rcvbuf);
+ window_clamp = mptcp_win_from_space(sk, rcvbuf);
WRITE_ONCE(sk->sk_rcvbuf, rcvbuf);
/* Make subflows follow along. If we do not do this, we
@@ -2202,7 +2202,7 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
if (skb_queue_empty(&msk->receive_queue) && __mptcp_move_skbs(msk))
continue;
- /* only the master socket status is relevant here. The exit
+ /* only the MPTCP socket status is relevant here. The exit
* conditions mirror closely tcp_recvmsg()
*/
if (copied >= target)
@@ -2916,9 +2916,14 @@ void mptcp_set_state(struct sock *sk, int state)
if (oldstate != TCP_ESTABLISHED)
MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_CURRESTAB);
break;
-
+ case TCP_CLOSE_WAIT:
+ /* Unlike TCP, MPTCP sk would not have the TCP_SYN_RECV state:
+ * MPTCP "accepted" sockets will be created later on. So no
+ * transition from TCP_SYN_RECV to TCP_CLOSE_WAIT.
+ */
+ break;
default:
- if (oldstate == TCP_ESTABLISHED)
+ if (oldstate == TCP_ESTABLISHED || oldstate == TCP_CLOSE_WAIT)
MPTCP_DEC_STATS(sock_net(sk), MPTCP_MIB_CURRESTAB);
}
@@ -3521,7 +3526,7 @@ void mptcp_subflow_process_delegated(struct sock *ssk, long status)
static int mptcp_hash(struct sock *sk)
{
/* should never be called,
- * we hash the TCP subflows not the master socket
+ * we hash the TCP subflows not the MPTCP socket
*/
WARN_ON_ONCE(1);
return 0;
@@ -3735,6 +3740,7 @@ static int mptcp_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
WRITE_ONCE(msk->write_seq, subflow->idsn);
WRITE_ONCE(msk->snd_nxt, subflow->idsn);
+ WRITE_ONCE(msk->snd_una, subflow->idsn);
if (likely(!__mptcp_check_fallback(msk)))
MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEACTIVE);
diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
index 7aa47e2dd52b..b11a4e50d52b 100644
--- a/net/mptcp/protocol.h
+++ b/net/mptcp/protocol.h
@@ -386,6 +386,11 @@ static inline int mptcp_win_from_space(const struct sock *sk, int space)
return __tcp_win_from_space(mptcp_sk(sk)->scaling_ratio, space);
}
+static inline int mptcp_space_from_win(const struct sock *sk, int win)
+{
+ return __tcp_space_from_win(mptcp_sk(sk)->scaling_ratio, win);
+}
+
static inline int __mptcp_space(const struct sock *sk)
{
return mptcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf) - __mptcp_rmem(sk));
diff --git a/net/mptcp/sockopt.c b/net/mptcp/sockopt.c
index f9a4fb17b5b7..2026a9a36f80 100644
--- a/net/mptcp/sockopt.c
+++ b/net/mptcp/sockopt.c
@@ -1579,7 +1579,7 @@ int mptcp_set_rcvlowat(struct sock *sk, int val)
if (sk->sk_userlocks & SOCK_RCVBUF_LOCK)
return 0;
- space = __tcp_space_from_win(mptcp_sk(sk)->scaling_ratio, val);
+ space = mptcp_space_from_win(sk, val);
if (space <= sk->sk_rcvbuf)
return 0;
diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
index 612c38570a64..39e2cbdf3801 100644
--- a/net/mptcp/subflow.c
+++ b/net/mptcp/subflow.c
@@ -1719,7 +1719,7 @@ int mptcp_subflow_create_socket(struct sock *sk, unsigned short family,
mptcp_sockopt_sync_locked(mptcp_sk(sk), sf->sk);
release_sock(sf->sk);
- /* the newly created socket really belongs to the owning MPTCP master
+ /* the newly created socket really belongs to the owning MPTCP
* socket, even if for additional subflows the allocation is performed
* by a kernel workqueue. Adjust inode references, so that the
* procfs/diag interfaces really show this one belonging to the correct
diff --git a/net/ncsi/internal.h b/net/ncsi/internal.h
index 374412ed780b..ef0f8f73826f 100644
--- a/net/ncsi/internal.h
+++ b/net/ncsi/internal.h
@@ -325,6 +325,7 @@ struct ncsi_dev_priv {
spinlock_t lock; /* Protect the NCSI device */
unsigned int package_probe_id;/* Current ID during probe */
unsigned int package_num; /* Number of packages */
+ unsigned int channel_probe_id;/* Current cahnnel ID during probe */
struct list_head packages; /* List of packages */
struct ncsi_channel *hot_channel; /* Channel was ever active */
struct ncsi_request requests[256]; /* Request table */
@@ -343,6 +344,7 @@ struct ncsi_dev_priv {
bool multi_package; /* Enable multiple packages */
bool mlx_multi_host; /* Enable multi host Mellanox */
u32 package_whitelist; /* Packages to configure */
+ unsigned char channel_count; /* Num of channels to probe */
};
struct ncsi_cmd_arg {
diff --git a/net/ncsi/ncsi-manage.c b/net/ncsi/ncsi-manage.c
index 745c788f1d1d..5ecf611c8820 100644
--- a/net/ncsi/ncsi-manage.c
+++ b/net/ncsi/ncsi-manage.c
@@ -510,17 +510,19 @@ static void ncsi_suspend_channel(struct ncsi_dev_priv *ndp)
break;
case ncsi_dev_state_suspend_gls:
- ndp->pending_req_num = np->channel_num;
+ ndp->pending_req_num = 1;
nca.type = NCSI_PKT_CMD_GLS;
nca.package = np->id;
+ nca.channel = ndp->channel_probe_id;
+ ret = ncsi_xmit_cmd(&nca);
+ if (ret)
+ goto error;
+ ndp->channel_probe_id++;
- nd->state = ncsi_dev_state_suspend_dcnt;
- NCSI_FOR_EACH_CHANNEL(np, nc) {
- nca.channel = nc->id;
- ret = ncsi_xmit_cmd(&nca);
- if (ret)
- goto error;
+ if (ndp->channel_probe_id == ndp->channel_count) {
+ ndp->channel_probe_id = 0;
+ nd->state = ncsi_dev_state_suspend_dcnt;
}
break;
@@ -1345,7 +1347,6 @@ static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
{
struct ncsi_dev *nd = &ndp->ndev;
struct ncsi_package *np;
- struct ncsi_channel *nc;
struct ncsi_cmd_arg nca;
unsigned char index;
int ret;
@@ -1423,23 +1424,6 @@ static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
nd->state = ncsi_dev_state_probe_cis;
break;
- case ncsi_dev_state_probe_cis:
- ndp->pending_req_num = NCSI_RESERVED_CHANNEL;
-
- /* Clear initial state */
- nca.type = NCSI_PKT_CMD_CIS;
- nca.package = ndp->active_package->id;
- for (index = 0; index < NCSI_RESERVED_CHANNEL; index++) {
- nca.channel = index;
- ret = ncsi_xmit_cmd(&nca);
- if (ret)
- goto error;
- }
-
- nd->state = ncsi_dev_state_probe_gvi;
- if (IS_ENABLED(CONFIG_NCSI_OEM_CMD_KEEP_PHY))
- nd->state = ncsi_dev_state_probe_keep_phy;
- break;
case ncsi_dev_state_probe_keep_phy:
ndp->pending_req_num = 1;
@@ -1452,14 +1436,17 @@ static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
nd->state = ncsi_dev_state_probe_gvi;
break;
+ case ncsi_dev_state_probe_cis:
case ncsi_dev_state_probe_gvi:
case ncsi_dev_state_probe_gc:
case ncsi_dev_state_probe_gls:
np = ndp->active_package;
- ndp->pending_req_num = np->channel_num;
+ ndp->pending_req_num = 1;
- /* Retrieve version, capability or link status */
- if (nd->state == ncsi_dev_state_probe_gvi)
+ /* Clear initial state Retrieve version, capability or link status */
+ if (nd->state == ncsi_dev_state_probe_cis)
+ nca.type = NCSI_PKT_CMD_CIS;
+ else if (nd->state == ncsi_dev_state_probe_gvi)
nca.type = NCSI_PKT_CMD_GVI;
else if (nd->state == ncsi_dev_state_probe_gc)
nca.type = NCSI_PKT_CMD_GC;
@@ -1467,19 +1454,29 @@ static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
nca.type = NCSI_PKT_CMD_GLS;
nca.package = np->id;
- NCSI_FOR_EACH_CHANNEL(np, nc) {
- nca.channel = nc->id;
- ret = ncsi_xmit_cmd(&nca);
- if (ret)
- goto error;
- }
+ nca.channel = ndp->channel_probe_id;
- if (nd->state == ncsi_dev_state_probe_gvi)
+ ret = ncsi_xmit_cmd(&nca);
+ if (ret)
+ goto error;
+
+ if (nd->state == ncsi_dev_state_probe_cis) {
+ nd->state = ncsi_dev_state_probe_gvi;
+ if (IS_ENABLED(CONFIG_NCSI_OEM_CMD_KEEP_PHY) && ndp->channel_probe_id == 0)
+ nd->state = ncsi_dev_state_probe_keep_phy;
+ } else if (nd->state == ncsi_dev_state_probe_gvi) {
nd->state = ncsi_dev_state_probe_gc;
- else if (nd->state == ncsi_dev_state_probe_gc)
+ } else if (nd->state == ncsi_dev_state_probe_gc) {
nd->state = ncsi_dev_state_probe_gls;
- else
+ } else {
+ nd->state = ncsi_dev_state_probe_cis;
+ ndp->channel_probe_id++;
+ }
+
+ if (ndp->channel_probe_id == ndp->channel_count) {
+ ndp->channel_probe_id = 0;
nd->state = ncsi_dev_state_probe_dp;
+ }
break;
case ncsi_dev_state_probe_dp:
ndp->pending_req_num = 1;
@@ -1780,6 +1777,7 @@ struct ncsi_dev *ncsi_register_dev(struct net_device *dev,
ndp->requests[i].ndp = ndp;
timer_setup(&ndp->requests[i].timer, ncsi_request_timeout, 0);
}
+ ndp->channel_count = NCSI_RESERVED_CHANNEL;
spin_lock_irqsave(&ncsi_dev_lock, flags);
list_add_tail_rcu(&ndp->node, &ncsi_dev_list);
@@ -1813,6 +1811,7 @@ int ncsi_start_dev(struct ncsi_dev *nd)
if (!(ndp->flags & NCSI_DEV_PROBED)) {
ndp->package_probe_id = 0;
+ ndp->channel_probe_id = 0;
nd->state = ncsi_dev_state_probe;
schedule_work(&ndp->work);
return 0;
diff --git a/net/ncsi/ncsi-rsp.c b/net/ncsi/ncsi-rsp.c
index bee290d0f48b..e28be33bdf2c 100644
--- a/net/ncsi/ncsi-rsp.c
+++ b/net/ncsi/ncsi-rsp.c
@@ -795,12 +795,13 @@ static int ncsi_rsp_handler_gc(struct ncsi_request *nr)
struct ncsi_rsp_gc_pkt *rsp;
struct ncsi_dev_priv *ndp = nr->ndp;
struct ncsi_channel *nc;
+ struct ncsi_package *np;
size_t size;
/* Find the channel */
rsp = (struct ncsi_rsp_gc_pkt *)skb_network_header(nr->rsp);
ncsi_find_package_and_channel(ndp, rsp->rsp.common.channel,
- NULL, &nc);
+ &np, &nc);
if (!nc)
return -ENODEV;
@@ -835,6 +836,7 @@ static int ncsi_rsp_handler_gc(struct ncsi_request *nr)
*/
nc->vlan_filter.bitmap = U64_MAX;
nc->vlan_filter.n_vids = rsp->vlan_cnt;
+ np->ndp->channel_count = rsp->channel_cnt;
return 0;
}
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index 614815a3ed73..f0aa4d7ef499 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -142,8 +142,13 @@ obj-$(CONFIG_NFT_FWD_NETDEV) += nft_fwd_netdev.o
# flow table infrastructure
obj-$(CONFIG_NF_FLOW_TABLE) += nf_flow_table.o
nf_flow_table-objs := nf_flow_table_core.o nf_flow_table_ip.o \
- nf_flow_table_offload.o
+ nf_flow_table_offload.o nf_flow_table_xdp.o
nf_flow_table-$(CONFIG_NF_FLOW_TABLE_PROCFS) += nf_flow_table_procfs.o
+ifeq ($(CONFIG_NF_FLOW_TABLE),m)
+nf_flow_table-$(CONFIG_DEBUG_INFO_BTF_MODULES) += nf_flow_table_bpf.o
+else ifeq ($(CONFIG_NF_FLOW_TABLE),y)
+nf_flow_table-$(CONFIG_DEBUG_INFO_BTF) += nf_flow_table_bpf.o
+endif
obj-$(CONFIG_NF_FLOW_TABLE_INET) += nf_flow_table_inet.o
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index 3126911f5042..b00fc285b334 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -815,12 +815,21 @@ int __init netfilter_init(void)
if (ret < 0)
goto err;
+#ifdef CONFIG_LWTUNNEL
+ ret = netfilter_lwtunnel_init();
+ if (ret < 0)
+ goto err_lwtunnel_pernet;
+#endif
ret = netfilter_log_init();
if (ret < 0)
- goto err_pernet;
+ goto err_log_pernet;
return 0;
-err_pernet:
+err_log_pernet:
+#ifdef CONFIG_LWTUNNEL
+ netfilter_lwtunnel_fini();
+err_lwtunnel_pernet:
+#endif
unregister_pernet_subsys(&netfilter_net_ops);
err:
return ret;
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index 3184cc6be4c9..61431690cbd5 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -53,12 +53,13 @@ MODULE_DESCRIPTION("core IP set support");
MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_IPSET);
/* When the nfnl mutex or ip_set_ref_lock is held: */
-#define ip_set_dereference(p) \
- rcu_dereference_protected(p, \
+#define ip_set_dereference(inst) \
+ rcu_dereference_protected((inst)->ip_set_list, \
lockdep_nfnl_is_held(NFNL_SUBSYS_IPSET) || \
- lockdep_is_held(&ip_set_ref_lock))
+ lockdep_is_held(&ip_set_ref_lock) || \
+ (inst)->is_deleted)
#define ip_set(inst, id) \
- ip_set_dereference((inst)->ip_set_list)[id]
+ ip_set_dereference(inst)[id]
#define ip_set_ref_netlink(inst,id) \
rcu_dereference_raw((inst)->ip_set_list)[id]
#define ip_set_dereference_nfnl(p) \
@@ -1133,7 +1134,7 @@ static int ip_set_create(struct sk_buff *skb, const struct nfnl_info *info,
if (!list)
goto cleanup;
/* nfnl mutex is held, both lists are valid */
- tmp = ip_set_dereference(inst->ip_set_list);
+ tmp = ip_set_dereference(inst);
memcpy(list, tmp, sizeof(struct ip_set *) * inst->ip_set_max);
rcu_assign_pointer(inst->ip_set_list, list);
/* Make sure all current packets have passed through */
@@ -1172,23 +1173,50 @@ ip_set_setname_policy[IPSET_ATTR_CMD_MAX + 1] = {
.len = IPSET_MAXNAMELEN - 1 },
};
+/* In order to return quickly when destroying a single set, it is split
+ * into two stages:
+ * - Cancel garbage collector
+ * - Destroy the set itself via call_rcu()
+ */
+
static void
-ip_set_destroy_set(struct ip_set *set)
+ip_set_destroy_set_rcu(struct rcu_head *head)
{
- pr_debug("set: %s\n", set->name);
+ struct ip_set *set = container_of(head, struct ip_set, rcu);
- /* Must call it without holding any lock */
set->variant->destroy(set);
module_put(set->type->me);
kfree(set);
}
static void
-ip_set_destroy_set_rcu(struct rcu_head *head)
+_destroy_all_sets(struct ip_set_net *inst)
{
- struct ip_set *set = container_of(head, struct ip_set, rcu);
+ struct ip_set *set;
+ ip_set_id_t i;
+ bool need_wait = false;
- ip_set_destroy_set(set);
+ /* First cancel gc's: set:list sets are flushed as well */
+ for (i = 0; i < inst->ip_set_max; i++) {
+ set = ip_set(inst, i);
+ if (set) {
+ set->variant->cancel_gc(set);
+ if (set->type->features & IPSET_TYPE_NAME)
+ need_wait = true;
+ }
+ }
+ /* Must wait for flush to be really finished */
+ if (need_wait)
+ rcu_barrier();
+ for (i = 0; i < inst->ip_set_max; i++) {
+ set = ip_set(inst, i);
+ if (set) {
+ ip_set(inst, i) = NULL;
+ set->variant->destroy(set);
+ module_put(set->type->me);
+ kfree(set);
+ }
+ }
}
static int ip_set_destroy(struct sk_buff *skb, const struct nfnl_info *info,
@@ -1202,11 +1230,10 @@ static int ip_set_destroy(struct sk_buff *skb, const struct nfnl_info *info,
if (unlikely(protocol_min_failed(attr)))
return -IPSET_ERR_PROTOCOL;
-
/* Commands are serialized and references are
* protected by the ip_set_ref_lock.
* External systems (i.e. xt_set) must call
- * ip_set_put|get_nfnl_* functions, that way we
+ * ip_set_nfnl_get_* functions, that way we
* can safely check references here.
*
* list:set timer can only decrement the reference
@@ -1214,8 +1241,6 @@ static int ip_set_destroy(struct sk_buff *skb, const struct nfnl_info *info,
* without holding the lock.
*/
if (!attr[IPSET_ATTR_SETNAME]) {
- /* Must wait for flush to be really finished in list:set */
- rcu_barrier();
read_lock_bh(&ip_set_ref_lock);
for (i = 0; i < inst->ip_set_max; i++) {
s = ip_set(inst, i);
@@ -1226,15 +1251,7 @@ static int ip_set_destroy(struct sk_buff *skb, const struct nfnl_info *info,
}
inst->is_destroyed = true;
read_unlock_bh(&ip_set_ref_lock);
- for (i = 0; i < inst->ip_set_max; i++) {
- s = ip_set(inst, i);
- if (s) {
- ip_set(inst, i) = NULL;
- /* Must cancel garbage collectors */
- s->variant->cancel_gc(s);
- ip_set_destroy_set(s);
- }
- }
+ _destroy_all_sets(inst);
/* Modified by ip_set_destroy() only, which is serialized */
inst->is_destroyed = false;
} else {
@@ -1255,12 +1272,12 @@ static int ip_set_destroy(struct sk_buff *skb, const struct nfnl_info *info,
features = s->type->features;
ip_set(inst, i) = NULL;
read_unlock_bh(&ip_set_ref_lock);
+ /* Must cancel garbage collectors */
+ s->variant->cancel_gc(s);
if (features & IPSET_TYPE_NAME) {
/* Must wait for flush to be really finished */
rcu_barrier();
}
- /* Must cancel garbage collectors */
- s->variant->cancel_gc(s);
call_rcu(&s->rcu, ip_set_destroy_set_rcu);
}
return 0;
@@ -2365,30 +2382,25 @@ ip_set_net_init(struct net *net)
}
static void __net_exit
-ip_set_net_exit(struct net *net)
+ip_set_net_pre_exit(struct net *net)
{
struct ip_set_net *inst = ip_set_pernet(net);
- struct ip_set *set = NULL;
- ip_set_id_t i;
-
inst->is_deleted = true; /* flag for ip_set_nfnl_put */
+}
- nfnl_lock(NFNL_SUBSYS_IPSET);
- for (i = 0; i < inst->ip_set_max; i++) {
- set = ip_set(inst, i);
- if (set) {
- ip_set(inst, i) = NULL;
- set->variant->cancel_gc(set);
- ip_set_destroy_set(set);
- }
- }
- nfnl_unlock(NFNL_SUBSYS_IPSET);
+static void __net_exit
+ip_set_net_exit(struct net *net)
+{
+ struct ip_set_net *inst = ip_set_pernet(net);
+
+ _destroy_all_sets(inst);
kvfree(rcu_dereference_protected(inst->ip_set_list, 1));
}
static struct pernet_operations ip_set_net_ops = {
.init = ip_set_net_init,
+ .pre_exit = ip_set_net_pre_exit,
.exit = ip_set_net_exit,
.id = &ip_set_net_id,
.size = sizeof(struct ip_set_net),
diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c
index 6c3f28bc59b3..bfae7066936b 100644
--- a/net/netfilter/ipset/ip_set_list_set.c
+++ b/net/netfilter/ipset/ip_set_list_set.c
@@ -79,7 +79,7 @@ list_set_kadd(struct ip_set *set, const struct sk_buff *skb,
struct set_elem *e;
int ret;
- list_for_each_entry(e, &map->members, list) {
+ list_for_each_entry_rcu(e, &map->members, list) {
if (SET_WITH_TIMEOUT(set) &&
ip_set_timeout_expired(ext_timeout(e, set)))
continue;
@@ -99,7 +99,7 @@ list_set_kdel(struct ip_set *set, const struct sk_buff *skb,
struct set_elem *e;
int ret;
- list_for_each_entry(e, &map->members, list) {
+ list_for_each_entry_rcu(e, &map->members, list) {
if (SET_WITH_TIMEOUT(set) &&
ip_set_timeout_expired(ext_timeout(e, set)))
continue;
@@ -188,9 +188,10 @@ list_set_utest(struct ip_set *set, void *value, const struct ip_set_ext *ext,
struct list_set *map = set->data;
struct set_adt_elem *d = value;
struct set_elem *e, *next, *prev = NULL;
- int ret;
+ int ret = 0;
- list_for_each_entry(e, &map->members, list) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(e, &map->members, list) {
if (SET_WITH_TIMEOUT(set) &&
ip_set_timeout_expired(ext_timeout(e, set)))
continue;
@@ -201,6 +202,7 @@ list_set_utest(struct ip_set *set, void *value, const struct ip_set_ext *ext,
if (d->before == 0) {
ret = 1;
+ goto out;
} else if (d->before > 0) {
next = list_next_entry(e, list);
ret = !list_is_last(&e->list, &map->members) &&
@@ -208,9 +210,11 @@ list_set_utest(struct ip_set *set, void *value, const struct ip_set_ext *ext,
} else {
ret = prev && prev->id == d->refid;
}
- return ret;
+ goto out;
}
- return 0;
+out:
+ rcu_read_unlock();
+ return ret;
}
static void
@@ -239,7 +243,7 @@ list_set_uadd(struct ip_set *set, void *value, const struct ip_set_ext *ext,
/* Find where to add the new entry */
n = prev = next = NULL;
- list_for_each_entry(e, &map->members, list) {
+ list_for_each_entry_rcu(e, &map->members, list) {
if (SET_WITH_TIMEOUT(set) &&
ip_set_timeout_expired(ext_timeout(e, set)))
continue;
@@ -316,9 +320,9 @@ list_set_udel(struct ip_set *set, void *value, const struct ip_set_ext *ext,
{
struct list_set *map = set->data;
struct set_adt_elem *d = value;
- struct set_elem *e, *next, *prev = NULL;
+ struct set_elem *e, *n, *next, *prev = NULL;
- list_for_each_entry(e, &map->members, list) {
+ list_for_each_entry_safe(e, n, &map->members, list) {
if (SET_WITH_TIMEOUT(set) &&
ip_set_timeout_expired(ext_timeout(e, set)))
continue;
@@ -424,14 +428,8 @@ static void
list_set_destroy(struct ip_set *set)
{
struct list_set *map = set->data;
- struct set_elem *e, *n;
- list_for_each_entry_safe(e, n, &map->members, list) {
- list_del(&e->list);
- ip_set_put_byindex(map->net, e->id);
- ip_set_ext_destroy(set, e);
- kfree(e);
- }
+ WARN_ON_ONCE(!list_empty(&map->members));
kfree(map);
set->data = NULL;
@@ -549,6 +547,9 @@ list_set_cancel_gc(struct ip_set *set)
if (SET_WITH_TIMEOUT(set))
timer_shutdown_sync(&map->gc);
+
+ /* Flush list to drop references to other ipsets */
+ list_set_flush(set);
}
static const struct ip_set_type_variant set_variant = {
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index b6d0dcf3a5c3..78a1cc72dc38 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -1924,7 +1924,8 @@ proc_do_sync_ports(struct ctl_table *table, int write,
return rc;
}
-static int ipvs_proc_est_cpumask_set(struct ctl_table *table, void *buffer)
+static int ipvs_proc_est_cpumask_set(const struct ctl_table *table,
+ void *buffer)
{
struct netns_ipvs *ipvs = table->extra2;
cpumask_var_t *valp = table->data;
@@ -1962,8 +1963,8 @@ out:
return ret;
}
-static int ipvs_proc_est_cpumask_get(struct ctl_table *table, void *buffer,
- size_t size)
+static int ipvs_proc_est_cpumask_get(const struct ctl_table *table,
+ void *buffer, size_t size)
{
struct netns_ipvs *ipvs = table->extra2;
cpumask_var_t *valp = table->data;
diff --git a/net/netfilter/ipvs/ip_vs_proto_sctp.c b/net/netfilter/ipvs/ip_vs_proto_sctp.c
index 1e689c714127..83e452916403 100644
--- a/net/netfilter/ipvs/ip_vs_proto_sctp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_sctp.c
@@ -126,7 +126,7 @@ sctp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
if (sctph->source != cp->vport || payload_csum ||
skb->ip_summed == CHECKSUM_PARTIAL) {
sctph->source = cp->vport;
- if (!skb_is_gso(skb) || !skb_is_gso_sctp(skb))
+ if (!skb_is_gso(skb))
sctp_nat_csum(skb, sctph, sctphoff);
} else {
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -175,7 +175,7 @@ sctp_dnat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
(skb->ip_summed == CHECKSUM_PARTIAL &&
!(skb_dst(skb)->dev->features & NETIF_F_SCTP_CRC))) {
sctph->dest = cp->dport;
- if (!skb_is_gso(skb) || !skb_is_gso_sctp(skb))
+ if (!skb_is_gso(skb))
sctp_nat_csum(skb, sctph, sctphoff);
} else if (skb->ip_summed != CHECKSUM_PARTIAL) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
diff --git a/net/netfilter/nf_conncount.c b/net/netfilter/nf_conncount.c
index 8715617b02fe..34ba14e59e95 100644
--- a/net/netfilter/nf_conncount.c
+++ b/net/netfilter/nf_conncount.c
@@ -321,7 +321,6 @@ insert_tree(struct net *net,
struct nf_conncount_rb *rbconn;
struct nf_conncount_tuple *conn;
unsigned int count = 0, gc_count = 0;
- u8 keylen = data->keylen;
bool do_gc = true;
spin_lock_bh(&nf_conncount_locks[hash]);
@@ -333,7 +332,7 @@ restart:
rbconn = rb_entry(*rbnode, struct nf_conncount_rb, node);
parent = *rbnode;
- diff = key_diff(key, rbconn->key, keylen);
+ diff = key_diff(key, rbconn->key, data->keylen);
if (diff < 0) {
rbnode = &((*rbnode)->rb_left);
} else if (diff > 0) {
@@ -378,7 +377,7 @@ restart:
conn->tuple = *tuple;
conn->zone = *zone;
- memcpy(rbconn->key, key, sizeof(u32) * keylen);
+ memcpy(rbconn->key, key, sizeof(u32) * data->keylen);
nf_conncount_list_init(&rbconn->list);
list_add(&conn->node, &rbconn->list.head);
@@ -403,7 +402,6 @@ count_tree(struct net *net,
struct rb_node *parent;
struct nf_conncount_rb *rbconn;
unsigned int hash;
- u8 keylen = data->keylen;
hash = jhash2(key, data->keylen, conncount_rnd) % CONNCOUNT_SLOTS;
root = &data->root[hash];
@@ -414,7 +412,7 @@ count_tree(struct net *net,
rbconn = rb_entry(parent, struct nf_conncount_rb, node);
- diff = key_diff(key, rbconn->key, keylen);
+ diff = key_diff(key, rbconn->key, data->keylen);
if (diff < 0) {
parent = rcu_dereference_raw(parent->rb_left);
} else if (diff > 0) {
diff --git a/net/netfilter/nf_conntrack_bpf.c b/net/netfilter/nf_conntrack_bpf.c
index d2492d050fe6..4a136fc3a9c0 100644
--- a/net/netfilter/nf_conntrack_bpf.c
+++ b/net/netfilter/nf_conntrack_bpf.c
@@ -32,7 +32,9 @@
* -EINVAL - Passed NULL for bpf_tuple pointer
* -EINVAL - opts->reserved is not 0
* -EINVAL - netns_id is less than -1
- * -EINVAL - opts__sz isn't NF_BPF_CT_OPTS_SZ (12)
+ * -EINVAL - opts__sz isn't NF_BPF_CT_OPTS_SZ (16) or 12
+ * -EINVAL - opts->ct_zone_id set when
+ opts__sz isn't NF_BPF_CT_OPTS_SZ (16)
* -EPROTO - l4proto isn't one of IPPROTO_TCP or IPPROTO_UDP
* -ENONET - No network namespace found for netns_id
* -ENOENT - Conntrack lookup could not find entry for tuple
@@ -42,6 +44,8 @@
* Values:
* IPPROTO_TCP, IPPROTO_UDP
* @dir: - connection tracking tuple direction.
+ * @ct_zone_id - connection tracking zone id.
+ * @ct_zone_dir - connection tracking zone direction.
* @reserved - Reserved member, will be reused for more options in future
* Values:
* 0
@@ -51,11 +55,13 @@ struct bpf_ct_opts {
s32 error;
u8 l4proto;
u8 dir;
- u8 reserved[2];
+ u16 ct_zone_id;
+ u8 ct_zone_dir;
+ u8 reserved[3];
};
enum {
- NF_BPF_CT_OPTS_SZ = 12,
+ NF_BPF_CT_OPTS_SZ = 16,
};
static int bpf_nf_ct_tuple_parse(struct bpf_sock_tuple *bpf_tuple,
@@ -104,12 +110,21 @@ __bpf_nf_ct_alloc_entry(struct net *net, struct bpf_sock_tuple *bpf_tuple,
u32 timeout)
{
struct nf_conntrack_tuple otuple, rtuple;
+ struct nf_conntrack_zone ct_zone;
struct nf_conn *ct;
int err;
- if (!opts || !bpf_tuple || opts->reserved[0] || opts->reserved[1] ||
- opts_len != NF_BPF_CT_OPTS_SZ)
+ if (!opts || !bpf_tuple)
return ERR_PTR(-EINVAL);
+ if (!(opts_len == NF_BPF_CT_OPTS_SZ || opts_len == 12))
+ return ERR_PTR(-EINVAL);
+ if (opts_len == NF_BPF_CT_OPTS_SZ) {
+ if (opts->reserved[0] || opts->reserved[1] || opts->reserved[2])
+ return ERR_PTR(-EINVAL);
+ } else {
+ if (opts->ct_zone_id)
+ return ERR_PTR(-EINVAL);
+ }
if (unlikely(opts->netns_id < BPF_F_CURRENT_NETNS))
return ERR_PTR(-EINVAL);
@@ -130,7 +145,16 @@ __bpf_nf_ct_alloc_entry(struct net *net, struct bpf_sock_tuple *bpf_tuple,
return ERR_PTR(-ENONET);
}
- ct = nf_conntrack_alloc(net, &nf_ct_zone_dflt, &otuple, &rtuple,
+ if (opts_len == NF_BPF_CT_OPTS_SZ) {
+ if (opts->ct_zone_dir == 0)
+ opts->ct_zone_dir = NF_CT_DEFAULT_ZONE_DIR;
+ nf_ct_zone_init(&ct_zone,
+ opts->ct_zone_id, opts->ct_zone_dir, 0);
+ } else {
+ ct_zone = nf_ct_zone_dflt;
+ }
+
+ ct = nf_conntrack_alloc(net, &ct_zone, &otuple, &rtuple,
GFP_ATOMIC);
if (IS_ERR(ct))
goto out;
@@ -152,12 +176,21 @@ static struct nf_conn *__bpf_nf_ct_lookup(struct net *net,
{
struct nf_conntrack_tuple_hash *hash;
struct nf_conntrack_tuple tuple;
+ struct nf_conntrack_zone ct_zone;
struct nf_conn *ct;
int err;
- if (!opts || !bpf_tuple || opts->reserved[0] || opts->reserved[1] ||
- opts_len != NF_BPF_CT_OPTS_SZ)
+ if (!opts || !bpf_tuple)
return ERR_PTR(-EINVAL);
+ if (!(opts_len == NF_BPF_CT_OPTS_SZ || opts_len == 12))
+ return ERR_PTR(-EINVAL);
+ if (opts_len == NF_BPF_CT_OPTS_SZ) {
+ if (opts->reserved[0] || opts->reserved[1] || opts->reserved[2])
+ return ERR_PTR(-EINVAL);
+ } else {
+ if (opts->ct_zone_id)
+ return ERR_PTR(-EINVAL);
+ }
if (unlikely(opts->l4proto != IPPROTO_TCP && opts->l4proto != IPPROTO_UDP))
return ERR_PTR(-EPROTO);
if (unlikely(opts->netns_id < BPF_F_CURRENT_NETNS))
@@ -174,7 +207,16 @@ static struct nf_conn *__bpf_nf_ct_lookup(struct net *net,
return ERR_PTR(-ENONET);
}
- hash = nf_conntrack_find_get(net, &nf_ct_zone_dflt, &tuple);
+ if (opts_len == NF_BPF_CT_OPTS_SZ) {
+ if (opts->ct_zone_dir == 0)
+ opts->ct_zone_dir = NF_CT_DEFAULT_ZONE_DIR;
+ nf_ct_zone_init(&ct_zone,
+ opts->ct_zone_id, opts->ct_zone_dir, 0);
+ } else {
+ ct_zone = nf_ct_zone_dflt;
+ }
+
+ hash = nf_conntrack_find_get(net, &ct_zone, &tuple);
if (opts->netns_id >= 0)
put_net(net);
if (!hash)
@@ -245,7 +287,7 @@ __bpf_kfunc_start_defs();
* @opts - Additional options for allocation (documented above)
* Cannot be NULL
* @opts__sz - Length of the bpf_ct_opts structure
- * Must be NF_BPF_CT_OPTS_SZ (12)
+ * Must be NF_BPF_CT_OPTS_SZ (16) or 12
*/
__bpf_kfunc struct nf_conn___init *
bpf_xdp_ct_alloc(struct xdp_md *xdp_ctx, struct bpf_sock_tuple *bpf_tuple,
@@ -279,7 +321,7 @@ bpf_xdp_ct_alloc(struct xdp_md *xdp_ctx, struct bpf_sock_tuple *bpf_tuple,
* @opts - Additional options for lookup (documented above)
* Cannot be NULL
* @opts__sz - Length of the bpf_ct_opts structure
- * Must be NF_BPF_CT_OPTS_SZ (12)
+ * Must be NF_BPF_CT_OPTS_SZ (16) or 12
*/
__bpf_kfunc struct nf_conn *
bpf_xdp_ct_lookup(struct xdp_md *xdp_ctx, struct bpf_sock_tuple *bpf_tuple,
@@ -312,7 +354,7 @@ bpf_xdp_ct_lookup(struct xdp_md *xdp_ctx, struct bpf_sock_tuple *bpf_tuple,
* @opts - Additional options for allocation (documented above)
* Cannot be NULL
* @opts__sz - Length of the bpf_ct_opts structure
- * Must be NF_BPF_CT_OPTS_SZ (12)
+ * Must be NF_BPF_CT_OPTS_SZ (16) or 12
*/
__bpf_kfunc struct nf_conn___init *
bpf_skb_ct_alloc(struct __sk_buff *skb_ctx, struct bpf_sock_tuple *bpf_tuple,
@@ -347,7 +389,7 @@ bpf_skb_ct_alloc(struct __sk_buff *skb_ctx, struct bpf_sock_tuple *bpf_tuple,
* @opts - Additional options for lookup (documented above)
* Cannot be NULL
* @opts__sz - Length of the bpf_ct_opts structure
- * Must be NF_BPF_CT_OPTS_SZ (12)
+ * Must be NF_BPF_CT_OPTS_SZ (16) or 12
*/
__bpf_kfunc struct nf_conn *
bpf_skb_ct_lookup(struct __sk_buff *skb_ctx, struct bpf_sock_tuple *bpf_tuple,
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index 74112e9c5dab..6c40bdf8b05a 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -22,9 +22,6 @@
#include <net/netfilter/nf_conntrack_acct.h>
#include <net/netfilter/nf_conntrack_zones.h>
#include <net/netfilter/nf_conntrack_timestamp.h>
-#ifdef CONFIG_LWTUNNEL
-#include <net/netfilter/nf_hooks_lwtunnel.h>
-#endif
#include <linux/rculist_nulls.h>
static bool enable_hooks __read_mostly;
@@ -612,9 +609,6 @@ enum nf_ct_sysctl_index {
NF_SYSCTL_CT_PROTO_TIMEOUT_GRE,
NF_SYSCTL_CT_PROTO_TIMEOUT_GRE_STREAM,
#endif
-#ifdef CONFIG_LWTUNNEL
- NF_SYSCTL_CT_LWTUNNEL,
-#endif
NF_SYSCTL_CT_LAST_SYSCTL,
};
@@ -946,15 +940,6 @@ static struct ctl_table nf_ct_sysctl_table[] = {
.proc_handler = proc_dointvec_jiffies,
},
#endif
-#ifdef CONFIG_LWTUNNEL
- [NF_SYSCTL_CT_LWTUNNEL] = {
- .procname = "nf_hooks_lwtunnel",
- .data = NULL,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = nf_hooks_lwtunnel_sysctl_handler,
- },
-#endif
};
static struct ctl_table nf_ct_netfilter_table[] = {
diff --git a/net/netfilter/nf_flow_table_bpf.c b/net/netfilter/nf_flow_table_bpf.c
new file mode 100644
index 000000000000..4a5f5195f2d2
--- /dev/null
+++ b/net/netfilter/nf_flow_table_bpf.c
@@ -0,0 +1,121 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Unstable Flow Table Helpers for XDP hook
+ *
+ * These are called from the XDP programs.
+ * Note that it is allowed to break compatibility for these functions since
+ * the interface they are exposed through to BPF programs is explicitly
+ * unstable.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <net/netfilter/nf_flow_table.h>
+#include <linux/bpf.h>
+#include <linux/btf.h>
+#include <net/xdp.h>
+
+/* bpf_flowtable_opts - options for bpf flowtable helpers
+ * @error: out parameter, set for any encountered error
+ */
+struct bpf_flowtable_opts {
+ s32 error;
+};
+
+enum {
+ NF_BPF_FLOWTABLE_OPTS_SZ = 4,
+};
+
+__diag_push();
+__diag_ignore_all("-Wmissing-prototypes",
+ "Global functions as their definitions will be in nf_flow_table BTF");
+
+__bpf_kfunc_start_defs();
+
+static struct flow_offload_tuple_rhash *
+bpf_xdp_flow_tuple_lookup(struct net_device *dev,
+ struct flow_offload_tuple *tuple, __be16 proto)
+{
+ struct flow_offload_tuple_rhash *tuplehash;
+ struct nf_flowtable *nf_flow_table;
+ struct flow_offload *nf_flow;
+
+ nf_flow_table = nf_flowtable_by_dev(dev);
+ if (!nf_flow_table)
+ return ERR_PTR(-ENOENT);
+
+ tuplehash = flow_offload_lookup(nf_flow_table, tuple);
+ if (!tuplehash)
+ return ERR_PTR(-ENOENT);
+
+ nf_flow = container_of(tuplehash, struct flow_offload,
+ tuplehash[tuplehash->tuple.dir]);
+ flow_offload_refresh(nf_flow_table, nf_flow, false);
+
+ return tuplehash;
+}
+
+__bpf_kfunc struct flow_offload_tuple_rhash *
+bpf_xdp_flow_lookup(struct xdp_md *ctx, struct bpf_fib_lookup *fib_tuple,
+ struct bpf_flowtable_opts *opts, u32 opts_len)
+{
+ struct xdp_buff *xdp = (struct xdp_buff *)ctx;
+ struct flow_offload_tuple tuple = {
+ .iifidx = fib_tuple->ifindex,
+ .l3proto = fib_tuple->family,
+ .l4proto = fib_tuple->l4_protocol,
+ .src_port = fib_tuple->sport,
+ .dst_port = fib_tuple->dport,
+ };
+ struct flow_offload_tuple_rhash *tuplehash;
+ __be16 proto;
+
+ if (opts_len != NF_BPF_FLOWTABLE_OPTS_SZ) {
+ opts->error = -EINVAL;
+ return NULL;
+ }
+
+ switch (fib_tuple->family) {
+ case AF_INET:
+ tuple.src_v4.s_addr = fib_tuple->ipv4_src;
+ tuple.dst_v4.s_addr = fib_tuple->ipv4_dst;
+ proto = htons(ETH_P_IP);
+ break;
+ case AF_INET6:
+ tuple.src_v6 = *(struct in6_addr *)&fib_tuple->ipv6_src;
+ tuple.dst_v6 = *(struct in6_addr *)&fib_tuple->ipv6_dst;
+ proto = htons(ETH_P_IPV6);
+ break;
+ default:
+ opts->error = -EAFNOSUPPORT;
+ return NULL;
+ }
+
+ tuplehash = bpf_xdp_flow_tuple_lookup(xdp->rxq->dev, &tuple, proto);
+ if (IS_ERR(tuplehash)) {
+ opts->error = PTR_ERR(tuplehash);
+ return NULL;
+ }
+
+ return tuplehash;
+}
+
+__diag_pop()
+
+__bpf_kfunc_end_defs();
+
+BTF_KFUNCS_START(nf_ft_kfunc_set)
+BTF_ID_FLAGS(func, bpf_xdp_flow_lookup, KF_TRUSTED_ARGS | KF_RET_NULL)
+BTF_KFUNCS_END(nf_ft_kfunc_set)
+
+static const struct btf_kfunc_id_set nf_flow_kfunc_set = {
+ .owner = THIS_MODULE,
+ .set = &nf_ft_kfunc_set,
+};
+
+int nf_flow_register_bpf(void)
+{
+ return register_btf_kfunc_id_set(BPF_PROG_TYPE_XDP,
+ &nf_flow_kfunc_set);
+}
+EXPORT_SYMBOL_GPL(nf_flow_register_bpf);
diff --git a/net/netfilter/nf_flow_table_inet.c b/net/netfilter/nf_flow_table_inet.c
index 6eef15648b7b..88787b45e30d 100644
--- a/net/netfilter/nf_flow_table_inet.c
+++ b/net/netfilter/nf_flow_table_inet.c
@@ -98,7 +98,7 @@ static int __init nf_flow_inet_module_init(void)
nft_register_flowtable_type(&flowtable_ipv6);
nft_register_flowtable_type(&flowtable_inet);
- return 0;
+ return nf_flow_register_bpf();
}
static void __exit nf_flow_inet_module_exit(void)
diff --git a/net/netfilter/nf_flow_table_offload.c b/net/netfilter/nf_flow_table_offload.c
index a010b25076ca..ff1a4e36c2b5 100644
--- a/net/netfilter/nf_flow_table_offload.c
+++ b/net/netfilter/nf_flow_table_offload.c
@@ -1192,7 +1192,7 @@ int nf_flow_table_offload_setup(struct nf_flowtable *flowtable,
int err;
if (!nf_flowtable_hw_offload(flowtable))
- return 0;
+ return nf_flow_offload_xdp_setup(flowtable, dev, cmd);
if (dev->netdev_ops->ndo_setup_tc)
err = nf_flow_table_offload_cmd(&bo, flowtable, dev, cmd,
diff --git a/net/netfilter/nf_flow_table_xdp.c b/net/netfilter/nf_flow_table_xdp.c
new file mode 100644
index 000000000000..e1252d042699
--- /dev/null
+++ b/net/netfilter/nf_flow_table_xdp.c
@@ -0,0 +1,147 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netfilter.h>
+#include <linux/rhashtable.h>
+#include <linux/netdevice.h>
+#include <net/flow_offload.h>
+#include <net/netfilter/nf_flow_table.h>
+
+struct flow_offload_xdp_ft {
+ struct list_head head;
+ struct nf_flowtable *ft;
+ struct rcu_head rcuhead;
+};
+
+struct flow_offload_xdp {
+ struct hlist_node hnode;
+ unsigned long net_device_addr;
+ struct list_head head;
+};
+
+#define NF_XDP_HT_BITS 4
+static DEFINE_HASHTABLE(nf_xdp_hashtable, NF_XDP_HT_BITS);
+static DEFINE_MUTEX(nf_xdp_hashtable_lock);
+
+/* caller must hold rcu read lock */
+struct nf_flowtable *nf_flowtable_by_dev(const struct net_device *dev)
+{
+ unsigned long key = (unsigned long)dev;
+ struct flow_offload_xdp *iter;
+
+ hash_for_each_possible_rcu(nf_xdp_hashtable, iter, hnode, key) {
+ if (key == iter->net_device_addr) {
+ struct flow_offload_xdp_ft *ft_elem;
+
+ /* The user is supposed to insert a given net_device
+ * just into a single nf_flowtable so we always return
+ * the first element here.
+ */
+ ft_elem = list_first_or_null_rcu(&iter->head,
+ struct flow_offload_xdp_ft,
+ head);
+ return ft_elem ? ft_elem->ft : NULL;
+ }
+ }
+
+ return NULL;
+}
+
+static int nf_flowtable_by_dev_insert(struct nf_flowtable *ft,
+ const struct net_device *dev)
+{
+ struct flow_offload_xdp *iter, *elem = NULL;
+ unsigned long key = (unsigned long)dev;
+ struct flow_offload_xdp_ft *ft_elem;
+
+ ft_elem = kzalloc(sizeof(*ft_elem), GFP_KERNEL_ACCOUNT);
+ if (!ft_elem)
+ return -ENOMEM;
+
+ ft_elem->ft = ft;
+
+ mutex_lock(&nf_xdp_hashtable_lock);
+
+ hash_for_each_possible(nf_xdp_hashtable, iter, hnode, key) {
+ if (key == iter->net_device_addr) {
+ elem = iter;
+ break;
+ }
+ }
+
+ if (!elem) {
+ elem = kzalloc(sizeof(*elem), GFP_KERNEL_ACCOUNT);
+ if (!elem)
+ goto err_unlock;
+
+ elem->net_device_addr = key;
+ INIT_LIST_HEAD(&elem->head);
+ hash_add_rcu(nf_xdp_hashtable, &elem->hnode, key);
+ }
+ list_add_tail_rcu(&ft_elem->head, &elem->head);
+
+ mutex_unlock(&nf_xdp_hashtable_lock);
+
+ return 0;
+
+err_unlock:
+ mutex_unlock(&nf_xdp_hashtable_lock);
+ kfree(ft_elem);
+
+ return -ENOMEM;
+}
+
+static void nf_flowtable_by_dev_remove(struct nf_flowtable *ft,
+ const struct net_device *dev)
+{
+ struct flow_offload_xdp *iter, *elem = NULL;
+ unsigned long key = (unsigned long)dev;
+
+ mutex_lock(&nf_xdp_hashtable_lock);
+
+ hash_for_each_possible(nf_xdp_hashtable, iter, hnode, key) {
+ if (key == iter->net_device_addr) {
+ elem = iter;
+ break;
+ }
+ }
+
+ if (elem) {
+ struct flow_offload_xdp_ft *ft_elem, *ft_next;
+
+ list_for_each_entry_safe(ft_elem, ft_next, &elem->head, head) {
+ if (ft_elem->ft == ft) {
+ list_del_rcu(&ft_elem->head);
+ kfree_rcu(ft_elem, rcuhead);
+ }
+ }
+
+ if (list_empty(&elem->head))
+ hash_del_rcu(&elem->hnode);
+ else
+ elem = NULL;
+ }
+
+ mutex_unlock(&nf_xdp_hashtable_lock);
+
+ if (elem) {
+ synchronize_rcu();
+ kfree(elem);
+ }
+}
+
+int nf_flow_offload_xdp_setup(struct nf_flowtable *flowtable,
+ struct net_device *dev,
+ enum flow_block_command cmd)
+{
+ switch (cmd) {
+ case FLOW_BLOCK_BIND:
+ return nf_flowtable_by_dev_insert(flowtable, dev);
+ case FLOW_BLOCK_UNBIND:
+ nf_flowtable_by_dev_remove(flowtable, dev);
+ return 0;
+ }
+
+ WARN_ON_ONCE(1);
+ return 0;
+}
diff --git a/net/netfilter/nf_hooks_lwtunnel.c b/net/netfilter/nf_hooks_lwtunnel.c
index 00e89ffd78f6..d8ebebc9775d 100644
--- a/net/netfilter/nf_hooks_lwtunnel.c
+++ b/net/netfilter/nf_hooks_lwtunnel.c
@@ -3,6 +3,9 @@
#include <linux/sysctl.h>
#include <net/lwtunnel.h>
#include <net/netfilter/nf_hooks_lwtunnel.h>
+#include <linux/netfilter.h>
+
+#include "nf_internals.h"
static inline int nf_hooks_lwtunnel_get(void)
{
@@ -50,4 +53,71 @@ int nf_hooks_lwtunnel_sysctl_handler(struct ctl_table *table, int write,
return ret;
}
EXPORT_SYMBOL_GPL(nf_hooks_lwtunnel_sysctl_handler);
+
+static struct ctl_table nf_lwtunnel_sysctl_table[] = {
+ {
+ .procname = "nf_hooks_lwtunnel",
+ .data = NULL,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = nf_hooks_lwtunnel_sysctl_handler,
+ },
+};
+
+static int __net_init nf_lwtunnel_net_init(struct net *net)
+{
+ struct ctl_table_header *hdr;
+ struct ctl_table *table;
+
+ table = nf_lwtunnel_sysctl_table;
+ if (!net_eq(net, &init_net)) {
+ table = kmemdup(nf_lwtunnel_sysctl_table,
+ sizeof(nf_lwtunnel_sysctl_table),
+ GFP_KERNEL);
+ if (!table)
+ goto err_alloc;
+ }
+
+ hdr = register_net_sysctl_sz(net, "net/netfilter", table,
+ ARRAY_SIZE(nf_lwtunnel_sysctl_table));
+ if (!hdr)
+ goto err_reg;
+
+ net->nf.nf_lwtnl_dir_header = hdr;
+
+ return 0;
+err_reg:
+ if (!net_eq(net, &init_net))
+ kfree(table);
+err_alloc:
+ return -ENOMEM;
+}
+
+static void __net_exit nf_lwtunnel_net_exit(struct net *net)
+{
+ const struct ctl_table *table;
+
+ table = net->nf.nf_lwtnl_dir_header->ctl_table_arg;
+ unregister_net_sysctl_table(net->nf.nf_lwtnl_dir_header);
+ if (!net_eq(net, &init_net))
+ kfree(table);
+}
+
+static struct pernet_operations nf_lwtunnel_net_ops = {
+ .init = nf_lwtunnel_net_init,
+ .exit = nf_lwtunnel_net_exit,
+};
+
+int __init netfilter_lwtunnel_init(void)
+{
+ return register_pernet_subsys(&nf_lwtunnel_net_ops);
+}
+
+void netfilter_lwtunnel_fini(void)
+{
+ unregister_pernet_subsys(&nf_lwtunnel_net_ops);
+}
+#else
+int __init netfilter_lwtunnel_init(void) { return 0; }
+void netfilter_lwtunnel_fini(void) {}
#endif /* CONFIG_SYSCTL */
diff --git a/net/netfilter/nf_internals.h b/net/netfilter/nf_internals.h
index 832ae64179f0..25403023060b 100644
--- a/net/netfilter/nf_internals.h
+++ b/net/netfilter/nf_internals.h
@@ -29,6 +29,12 @@ void nf_queue_nf_hook_drop(struct net *net);
/* nf_log.c */
int __init netfilter_log_init(void);
+#ifdef CONFIG_LWTUNNEL
+/* nf_hooks_lwtunnel.c */
+int __init netfilter_lwtunnel_init(void);
+void netfilter_lwtunnel_fini(void);
+#endif
+
/* core.c */
void nf_hook_entries_delete_raw(struct nf_hook_entries __rcu **pp,
const struct nf_hook_ops *reg);
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index be3b4c90d2ed..481ee78e77bc 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -153,14 +153,18 @@ static struct nft_trans *nft_trans_alloc_gfp(const struct nft_ctx *ctx,
{
struct nft_trans *trans;
- trans = kzalloc(sizeof(struct nft_trans) + size, gfp);
+ trans = kzalloc(size, gfp);
if (trans == NULL)
return NULL;
INIT_LIST_HEAD(&trans->list);
- INIT_LIST_HEAD(&trans->binding_list);
trans->msg_type = msg_type;
- trans->ctx = *ctx;
+
+ trans->net = ctx->net;
+ trans->table = ctx->table;
+ trans->seq = ctx->seq;
+ trans->flags = ctx->flags;
+ trans->report = ctx->report;
return trans;
}
@@ -171,10 +175,26 @@ static struct nft_trans *nft_trans_alloc(const struct nft_ctx *ctx,
return nft_trans_alloc_gfp(ctx, msg_type, size, GFP_KERNEL);
}
+static struct nft_trans_binding *nft_trans_get_binding(struct nft_trans *trans)
+{
+ switch (trans->msg_type) {
+ case NFT_MSG_NEWCHAIN:
+ case NFT_MSG_NEWSET:
+ return container_of(trans, struct nft_trans_binding, nft_trans);
+ }
+
+ return NULL;
+}
+
static void nft_trans_list_del(struct nft_trans *trans)
{
+ struct nft_trans_binding *trans_binding;
+
list_del(&trans->list);
- list_del(&trans->binding_list);
+
+ trans_binding = nft_trans_get_binding(trans);
+ if (trans_binding)
+ list_del(&trans_binding->binding_list);
}
static void nft_trans_destroy(struct nft_trans *trans)
@@ -236,7 +256,7 @@ static void __nft_chain_trans_bind(const struct nft_ctx *ctx,
nft_trans_chain_bound(trans) = bind;
break;
case NFT_MSG_NEWRULE:
- if (trans->ctx.chain == chain)
+ if (nft_trans_rule_chain(trans) == chain)
nft_trans_rule_bound(trans) = bind;
break;
}
@@ -372,21 +392,26 @@ static void nf_tables_unregister_hook(struct net *net,
static void nft_trans_commit_list_add_tail(struct net *net, struct nft_trans *trans)
{
struct nftables_pernet *nft_net = nft_pernet(net);
+ struct nft_trans_binding *binding;
+
+ list_add_tail(&trans->list, &nft_net->commit_list);
+
+ binding = nft_trans_get_binding(trans);
+ if (!binding)
+ return;
switch (trans->msg_type) {
case NFT_MSG_NEWSET:
if (!nft_trans_set_update(trans) &&
nft_set_is_anonymous(nft_trans_set(trans)))
- list_add_tail(&trans->binding_list, &nft_net->binding_list);
+ list_add_tail(&binding->binding_list, &nft_net->binding_list);
break;
case NFT_MSG_NEWCHAIN:
if (!nft_trans_chain_update(trans) &&
nft_chain_binding(nft_trans_chain(trans)))
- list_add_tail(&trans->binding_list, &nft_net->binding_list);
+ list_add_tail(&binding->binding_list, &nft_net->binding_list);
break;
}
-
- list_add_tail(&trans->list, &nft_net->commit_list);
}
static int nft_trans_table_add(struct nft_ctx *ctx, int msg_type)
@@ -416,11 +441,28 @@ static int nft_deltable(struct nft_ctx *ctx)
return err;
}
-static struct nft_trans *nft_trans_chain_add(struct nft_ctx *ctx, int msg_type)
+static struct nft_trans *
+nft_trans_alloc_chain(const struct nft_ctx *ctx, int msg_type)
{
+ struct nft_trans_chain *trans_chain;
struct nft_trans *trans;
trans = nft_trans_alloc(ctx, msg_type, sizeof(struct nft_trans_chain));
+ if (!trans)
+ return NULL;
+
+ trans_chain = nft_trans_container_chain(trans);
+ INIT_LIST_HEAD(&trans_chain->nft_trans_binding.binding_list);
+ trans_chain->chain = ctx->chain;
+
+ return trans;
+}
+
+static struct nft_trans *nft_trans_chain_add(struct nft_ctx *ctx, int msg_type)
+{
+ struct nft_trans *trans;
+
+ trans = nft_trans_alloc_chain(ctx, msg_type);
if (trans == NULL)
return ERR_PTR(-ENOMEM);
@@ -432,7 +474,6 @@ static struct nft_trans *nft_trans_chain_add(struct nft_ctx *ctx, int msg_type)
ntohl(nla_get_be32(ctx->nla[NFTA_CHAIN_ID]));
}
}
- nft_trans_chain(trans) = ctx->chain;
nft_trans_commit_list_add_tail(ctx->net, trans);
return trans;
@@ -505,6 +546,7 @@ static struct nft_trans *nft_trans_rule_add(struct nft_ctx *ctx, int msg_type,
ntohl(nla_get_be32(ctx->nla[NFTA_RULE_ID]));
}
nft_trans_rule(trans) = rule;
+ nft_trans_rule_chain(trans) = ctx->chain;
nft_trans_commit_list_add_tail(ctx->net, trans);
return trans;
@@ -560,12 +602,16 @@ static int __nft_trans_set_add(const struct nft_ctx *ctx, int msg_type,
struct nft_set *set,
const struct nft_set_desc *desc)
{
+ struct nft_trans_set *trans_set;
struct nft_trans *trans;
trans = nft_trans_alloc(ctx, msg_type, sizeof(struct nft_trans_set));
if (trans == NULL)
return -ENOMEM;
+ trans_set = nft_trans_container_set(trans);
+ INIT_LIST_HEAD(&trans_set->nft_trans_binding.binding_list);
+
if (msg_type == NFT_MSG_NEWSET && ctx->nla[NFTA_SET_ID] && !desc) {
nft_trans_set_id(trans) =
ntohl(nla_get_be32(ctx->nla[NFTA_SET_ID]));
@@ -1217,11 +1263,11 @@ static bool nft_table_pending_update(const struct nft_ctx *ctx)
return true;
list_for_each_entry(trans, &nft_net->commit_list, list) {
- if (trans->ctx.table == ctx->table &&
+ if (trans->table == ctx->table &&
((trans->msg_type == NFT_MSG_NEWCHAIN &&
nft_trans_chain_update(trans)) ||
(trans->msg_type == NFT_MSG_DELCHAIN &&
- nft_is_base_chain(trans->ctx.chain))))
+ nft_is_base_chain(nft_trans_chain(trans)))))
return true;
}
@@ -1615,15 +1661,15 @@ static int nf_tables_deltable(struct sk_buff *skb, const struct nfnl_info *info,
return nft_flush_table(&ctx);
}
-static void nf_tables_table_destroy(struct nft_ctx *ctx)
+static void nf_tables_table_destroy(struct nft_table *table)
{
- if (WARN_ON(ctx->table->use > 0))
+ if (WARN_ON(table->use > 0))
return;
- rhltable_destroy(&ctx->table->chains_ht);
- kfree(ctx->table->name);
- kfree(ctx->table->udata);
- kfree(ctx->table);
+ rhltable_destroy(&table->chains_ht);
+ kfree(table->name);
+ kfree(table->udata);
+ kfree(table);
}
void nft_register_chain_type(const struct nft_chain_type *ctype)
@@ -2049,18 +2095,19 @@ static struct nft_stats __percpu *nft_stats_alloc(const struct nlattr *attr)
return newstats;
}
-static void nft_chain_stats_replace(struct nft_trans *trans)
+static void nft_chain_stats_replace(struct nft_trans_chain *trans)
{
- struct nft_base_chain *chain = nft_base_chain(trans->ctx.chain);
+ const struct nft_trans *t = &trans->nft_trans_binding.nft_trans;
+ struct nft_base_chain *chain = nft_base_chain(trans->chain);
- if (!nft_trans_chain_stats(trans))
+ if (!trans->stats)
return;
- nft_trans_chain_stats(trans) =
- rcu_replace_pointer(chain->stats, nft_trans_chain_stats(trans),
- lockdep_commit_lock_is_held(trans->ctx.net));
+ trans->stats =
+ rcu_replace_pointer(chain->stats, trans->stats,
+ lockdep_commit_lock_is_held(t->net));
- if (!nft_trans_chain_stats(trans))
+ if (!trans->stats)
static_branch_inc(&nft_counters_enabled);
}
@@ -2078,9 +2125,9 @@ static void nf_tables_chain_free_chain_rules(struct nft_chain *chain)
kvfree(chain->blob_next);
}
-void nf_tables_chain_destroy(struct nft_ctx *ctx)
+void nf_tables_chain_destroy(struct nft_chain *chain)
{
- struct nft_chain *chain = ctx->chain;
+ const struct nft_table *table = chain->table;
struct nft_hook *hook, *next;
if (WARN_ON(chain->use > 0))
@@ -2092,7 +2139,7 @@ void nf_tables_chain_destroy(struct nft_ctx *ctx)
if (nft_is_base_chain(chain)) {
struct nft_base_chain *basechain = nft_base_chain(chain);
- if (nft_base_chain_netdev(ctx->family, basechain->ops.hooknum)) {
+ if (nft_base_chain_netdev(table->family, basechain->ops.hooknum)) {
list_for_each_entry_safe(hook, next,
&basechain->hook_list, list) {
list_del_rcu(&hook->list);
@@ -2581,7 +2628,7 @@ err_chain_add:
err_trans:
nft_use_dec_restore(&table->use);
err_destroy_chain:
- nf_tables_chain_destroy(ctx);
+ nf_tables_chain_destroy(chain);
return err;
}
@@ -2698,8 +2745,7 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy,
}
err = -ENOMEM;
- trans = nft_trans_alloc(ctx, NFT_MSG_NEWCHAIN,
- sizeof(struct nft_trans_chain));
+ trans = nft_trans_alloc_chain(ctx, NFT_MSG_NEWCHAIN);
if (trans == NULL)
goto err_trans;
@@ -2725,7 +2771,7 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy,
err = -EEXIST;
list_for_each_entry(tmp, &nft_net->commit_list, list) {
if (tmp->msg_type == NFT_MSG_NEWCHAIN &&
- tmp->ctx.table == table &&
+ tmp->table == table &&
nft_trans_chain_update(tmp) &&
nft_trans_chain_name(tmp) &&
strcmp(name, nft_trans_chain_name(tmp)) == 0) {
@@ -2774,13 +2820,11 @@ static struct nft_chain *nft_chain_lookup_byid(const struct net *net,
struct nft_trans *trans;
list_for_each_entry(trans, &nft_net->commit_list, list) {
- struct nft_chain *chain = trans->ctx.chain;
-
if (trans->msg_type == NFT_MSG_NEWCHAIN &&
- chain->table == table &&
+ nft_trans_chain(trans)->table == table &&
id == nft_trans_chain_id(trans) &&
- nft_active_genmask(chain, genmask))
- return chain;
+ nft_active_genmask(nft_trans_chain(trans), genmask))
+ return nft_trans_chain(trans);
}
return ERR_PTR(-ENOENT);
}
@@ -2915,8 +2959,7 @@ static int nft_delchain_hook(struct nft_ctx *ctx,
list_move(&hook->list, &chain_del_list);
}
- trans = nft_trans_alloc(ctx, NFT_MSG_DELCHAIN,
- sizeof(struct nft_trans_chain));
+ trans = nft_trans_alloc_chain(ctx, NFT_MSG_DELCHAIN);
if (!trans) {
err = -ENOMEM;
goto err_chain_del_hook;
@@ -3823,6 +3866,15 @@ static void nf_tables_rule_release(const struct nft_ctx *ctx, struct nft_rule *r
nf_tables_rule_destroy(ctx, rule);
}
+/** nft_chain_validate - loop detection and hook validation
+ *
+ * @ctx: context containing call depth and base chain
+ * @chain: chain to validate
+ *
+ * Walk through the rules of the given chain and chase all jumps/gotos
+ * and set lookups until either the jump limit is hit or all reachable
+ * chains have been validated.
+ */
int nft_chain_validate(const struct nft_ctx *ctx, const struct nft_chain *chain)
{
struct nft_expr *expr, *last;
@@ -3844,6 +3896,9 @@ int nft_chain_validate(const struct nft_ctx *ctx, const struct nft_chain *chain)
if (!expr->ops->validate)
continue;
+ /* This may call nft_chain_validate() recursively,
+ * callers that do so must increment ctx->level.
+ */
err = expr->ops->validate(ctx, expr, &data);
if (err < 0)
return err;
@@ -4188,7 +4243,7 @@ static struct nft_rule *nft_rule_lookup_byid(const struct net *net,
list_for_each_entry(trans, &nft_net->commit_list, list) {
if (trans->msg_type == NFT_MSG_NEWRULE &&
- trans->ctx.chain == chain &&
+ nft_trans_rule_chain(trans) == chain &&
id == nft_trans_rule_id(trans))
return nft_trans_rule(trans);
}
@@ -5740,8 +5795,7 @@ static int nf_tables_fill_setelem(struct sk_buff *skb,
if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA) &&
nft_data_dump(skb, NFTA_SET_ELEM_DATA, nft_set_ext_data(ext),
- set->dtype == NFT_DATA_VERDICT ? NFT_DATA_VERDICT : NFT_DATA_VALUE,
- set->dlen) < 0)
+ nft_set_datatype(set), set->dlen) < 0)
goto nla_put_failure;
if (nft_set_ext_exists(ext, NFT_SET_EXT_EXPRESSIONS) &&
@@ -9418,51 +9472,53 @@ static int nf_tables_validate(struct net *net)
*
* We defer the drop policy until the transaction has been finalized.
*/
-static void nft_chain_commit_drop_policy(struct nft_trans *trans)
+static void nft_chain_commit_drop_policy(struct nft_trans_chain *trans)
{
struct nft_base_chain *basechain;
- if (nft_trans_chain_policy(trans) != NF_DROP)
+ if (trans->policy != NF_DROP)
return;
- if (!nft_is_base_chain(trans->ctx.chain))
+ if (!nft_is_base_chain(trans->chain))
return;
- basechain = nft_base_chain(trans->ctx.chain);
+ basechain = nft_base_chain(trans->chain);
basechain->policy = NF_DROP;
}
-static void nft_chain_commit_update(struct nft_trans *trans)
+static void nft_chain_commit_update(struct nft_trans_chain *trans)
{
+ struct nft_table *table = trans->nft_trans_binding.nft_trans.table;
struct nft_base_chain *basechain;
- if (nft_trans_chain_name(trans)) {
- rhltable_remove(&trans->ctx.table->chains_ht,
- &trans->ctx.chain->rhlhead,
+ if (trans->name) {
+ rhltable_remove(&table->chains_ht,
+ &trans->chain->rhlhead,
nft_chain_ht_params);
- swap(trans->ctx.chain->name, nft_trans_chain_name(trans));
- rhltable_insert_key(&trans->ctx.table->chains_ht,
- trans->ctx.chain->name,
- &trans->ctx.chain->rhlhead,
+ swap(trans->chain->name, trans->name);
+ rhltable_insert_key(&table->chains_ht,
+ trans->chain->name,
+ &trans->chain->rhlhead,
nft_chain_ht_params);
}
- if (!nft_is_base_chain(trans->ctx.chain))
+ if (!nft_is_base_chain(trans->chain))
return;
nft_chain_stats_replace(trans);
- basechain = nft_base_chain(trans->ctx.chain);
+ basechain = nft_base_chain(trans->chain);
- switch (nft_trans_chain_policy(trans)) {
+ switch (trans->policy) {
case NF_DROP:
case NF_ACCEPT:
- basechain->policy = nft_trans_chain_policy(trans);
+ basechain->policy = trans->policy;
break;
}
}
-static void nft_obj_commit_update(struct nft_trans *trans)
+static void nft_obj_commit_update(const struct nft_ctx *ctx,
+ struct nft_trans *trans)
{
struct nft_object *newobj;
struct nft_object *obj;
@@ -9474,15 +9530,21 @@ static void nft_obj_commit_update(struct nft_trans *trans)
return;
obj->ops->update(obj, newobj);
- nft_obj_destroy(&trans->ctx, newobj);
+ nft_obj_destroy(ctx, newobj);
}
static void nft_commit_release(struct nft_trans *trans)
{
+ struct nft_ctx ctx = {
+ .net = trans->net,
+ };
+
+ nft_ctx_update(&ctx, trans);
+
switch (trans->msg_type) {
case NFT_MSG_DELTABLE:
case NFT_MSG_DESTROYTABLE:
- nf_tables_table_destroy(&trans->ctx);
+ nf_tables_table_destroy(trans->table);
break;
case NFT_MSG_NEWCHAIN:
free_percpu(nft_trans_chain_stats(trans));
@@ -9493,25 +9555,25 @@ static void nft_commit_release(struct nft_trans *trans)
if (nft_trans_chain_update(trans))
nft_hooks_destroy(&nft_trans_chain_hooks(trans));
else
- nf_tables_chain_destroy(&trans->ctx);
+ nf_tables_chain_destroy(nft_trans_chain(trans));
break;
case NFT_MSG_DELRULE:
case NFT_MSG_DESTROYRULE:
- nf_tables_rule_destroy(&trans->ctx, nft_trans_rule(trans));
+ nf_tables_rule_destroy(&ctx, nft_trans_rule(trans));
break;
case NFT_MSG_DELSET:
case NFT_MSG_DESTROYSET:
- nft_set_destroy(&trans->ctx, nft_trans_set(trans));
+ nft_set_destroy(&ctx, nft_trans_set(trans));
break;
case NFT_MSG_DELSETELEM:
case NFT_MSG_DESTROYSETELEM:
- nf_tables_set_elem_destroy(&trans->ctx,
+ nf_tables_set_elem_destroy(&ctx,
nft_trans_elem_set(trans),
nft_trans_elem_priv(trans));
break;
case NFT_MSG_DELOBJ:
case NFT_MSG_DESTROYOBJ:
- nft_obj_destroy(&trans->ctx, nft_trans_obj(trans));
+ nft_obj_destroy(&ctx, nft_trans_obj(trans));
break;
case NFT_MSG_DELFLOWTABLE:
case NFT_MSG_DESTROYFLOWTABLE:
@@ -9523,7 +9585,7 @@ static void nft_commit_release(struct nft_trans *trans)
}
if (trans->put_net)
- put_net(trans->ctx.net);
+ put_net(trans->net);
kfree(trans);
}
@@ -9642,10 +9704,10 @@ static void nf_tables_commit_chain_prepare_cancel(struct net *net)
struct nft_trans *trans, *next;
list_for_each_entry_safe(trans, next, &nft_net->commit_list, list) {
- struct nft_chain *chain = trans->ctx.chain;
-
if (trans->msg_type == NFT_MSG_NEWRULE ||
trans->msg_type == NFT_MSG_DELRULE) {
+ struct nft_chain *chain = nft_trans_rule_chain(trans);
+
kvfree(chain->blob_next);
chain->blob_next = NULL;
}
@@ -10003,7 +10065,7 @@ static void nf_tables_commit_release(struct net *net)
trans = list_last_entry(&nft_net->commit_list,
struct nft_trans, list);
- get_net(trans->ctx.net);
+ get_net(trans->net);
WARN_ON_ONCE(trans->put_net);
trans->put_net = true;
@@ -10147,12 +10209,15 @@ static void nft_gc_seq_end(struct nftables_pernet *nft_net, unsigned int gc_seq)
static int nf_tables_commit(struct net *net, struct sk_buff *skb)
{
struct nftables_pernet *nft_net = nft_pernet(net);
+ const struct nlmsghdr *nlh = nlmsg_hdr(skb);
+ struct nft_trans_binding *trans_binding;
struct nft_trans *trans, *next;
unsigned int base_seq, gc_seq;
LIST_HEAD(set_update_list);
struct nft_trans_elem *te;
struct nft_chain *chain;
struct nft_table *table;
+ struct nft_ctx ctx;
LIST_HEAD(adl);
int err;
@@ -10161,7 +10226,10 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
return 0;
}
- list_for_each_entry(trans, &nft_net->binding_list, binding_list) {
+ nft_ctx_init(&ctx, net, skb, nlh, NFPROTO_UNSPEC, NULL, NULL, NULL);
+
+ list_for_each_entry(trans_binding, &nft_net->binding_list, binding_list) {
+ trans = &trans_binding->nft_trans;
switch (trans->msg_type) {
case NFT_MSG_NEWSET:
if (!nft_trans_set_update(trans) &&
@@ -10179,6 +10247,9 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
return -EINVAL;
}
break;
+ default:
+ WARN_ONCE(1, "Unhandled bind type %d", trans->msg_type);
+ break;
}
}
@@ -10194,9 +10265,10 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
/* 1. Allocate space for next generation rules_gen_X[] */
list_for_each_entry_safe(trans, next, &nft_net->commit_list, list) {
+ struct nft_table *table = trans->table;
int ret;
- ret = nf_tables_commit_audit_alloc(&adl, trans->ctx.table);
+ ret = nf_tables_commit_audit_alloc(&adl, table);
if (ret) {
nf_tables_commit_chain_prepare_cancel(net);
nf_tables_commit_audit_free(&adl);
@@ -10204,7 +10276,7 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
}
if (trans->msg_type == NFT_MSG_NEWRULE ||
trans->msg_type == NFT_MSG_DELRULE) {
- chain = trans->ctx.chain;
+ chain = nft_trans_rule_chain(trans);
ret = nf_tables_commit_chain_prepare(net, chain);
if (ret < 0) {
@@ -10237,70 +10309,71 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
net->nft.gencursor = nft_gencursor_next(net);
list_for_each_entry_safe(trans, next, &nft_net->commit_list, list) {
- nf_tables_commit_audit_collect(&adl, trans->ctx.table,
- trans->msg_type);
+ struct nft_table *table = trans->table;
+
+ nft_ctx_update(&ctx, trans);
+
+ nf_tables_commit_audit_collect(&adl, table, trans->msg_type);
switch (trans->msg_type) {
case NFT_MSG_NEWTABLE:
if (nft_trans_table_update(trans)) {
- if (!(trans->ctx.table->flags & __NFT_TABLE_F_UPDATE)) {
+ if (!(table->flags & __NFT_TABLE_F_UPDATE)) {
nft_trans_destroy(trans);
break;
}
- if (trans->ctx.table->flags & NFT_TABLE_F_DORMANT)
- nf_tables_table_disable(net, trans->ctx.table);
+ if (table->flags & NFT_TABLE_F_DORMANT)
+ nf_tables_table_disable(net, table);
- trans->ctx.table->flags &= ~__NFT_TABLE_F_UPDATE;
+ table->flags &= ~__NFT_TABLE_F_UPDATE;
} else {
- nft_clear(net, trans->ctx.table);
+ nft_clear(net, table);
}
- nf_tables_table_notify(&trans->ctx, NFT_MSG_NEWTABLE);
+ nf_tables_table_notify(&ctx, NFT_MSG_NEWTABLE);
nft_trans_destroy(trans);
break;
case NFT_MSG_DELTABLE:
case NFT_MSG_DESTROYTABLE:
- list_del_rcu(&trans->ctx.table->list);
- nf_tables_table_notify(&trans->ctx, trans->msg_type);
+ list_del_rcu(&table->list);
+ nf_tables_table_notify(&ctx, trans->msg_type);
break;
case NFT_MSG_NEWCHAIN:
if (nft_trans_chain_update(trans)) {
- nft_chain_commit_update(trans);
- nf_tables_chain_notify(&trans->ctx, NFT_MSG_NEWCHAIN,
+ nft_chain_commit_update(nft_trans_container_chain(trans));
+ nf_tables_chain_notify(&ctx, NFT_MSG_NEWCHAIN,
&nft_trans_chain_hooks(trans));
list_splice(&nft_trans_chain_hooks(trans),
&nft_trans_basechain(trans)->hook_list);
/* trans destroyed after rcu grace period */
} else {
- nft_chain_commit_drop_policy(trans);
- nft_clear(net, trans->ctx.chain);
- nf_tables_chain_notify(&trans->ctx, NFT_MSG_NEWCHAIN, NULL);
+ nft_chain_commit_drop_policy(nft_trans_container_chain(trans));
+ nft_clear(net, nft_trans_chain(trans));
+ nf_tables_chain_notify(&ctx, NFT_MSG_NEWCHAIN, NULL);
nft_trans_destroy(trans);
}
break;
case NFT_MSG_DELCHAIN:
case NFT_MSG_DESTROYCHAIN:
if (nft_trans_chain_update(trans)) {
- nf_tables_chain_notify(&trans->ctx, NFT_MSG_DELCHAIN,
+ nf_tables_chain_notify(&ctx, NFT_MSG_DELCHAIN,
&nft_trans_chain_hooks(trans));
- if (!(trans->ctx.table->flags & NFT_TABLE_F_DORMANT)) {
+ if (!(table->flags & NFT_TABLE_F_DORMANT)) {
nft_netdev_unregister_hooks(net,
&nft_trans_chain_hooks(trans),
true);
}
} else {
- nft_chain_del(trans->ctx.chain);
- nf_tables_chain_notify(&trans->ctx, NFT_MSG_DELCHAIN,
+ nft_chain_del(nft_trans_chain(trans));
+ nf_tables_chain_notify(&ctx, NFT_MSG_DELCHAIN,
NULL);
- nf_tables_unregister_hook(trans->ctx.net,
- trans->ctx.table,
- trans->ctx.chain);
+ nf_tables_unregister_hook(ctx.net, ctx.table,
+ nft_trans_chain(trans));
}
break;
case NFT_MSG_NEWRULE:
- nft_clear(trans->ctx.net, nft_trans_rule(trans));
- nf_tables_rule_notify(&trans->ctx,
- nft_trans_rule(trans),
+ nft_clear(net, nft_trans_rule(trans));
+ nf_tables_rule_notify(&ctx, nft_trans_rule(trans),
NFT_MSG_NEWRULE);
- if (trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD)
+ if (nft_trans_rule_chain(trans)->flags & NFT_CHAIN_HW_OFFLOAD)
nft_flow_rule_destroy(nft_trans_flow_rule(trans));
nft_trans_destroy(trans);
@@ -10308,14 +10381,12 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
case NFT_MSG_DELRULE:
case NFT_MSG_DESTROYRULE:
list_del_rcu(&nft_trans_rule(trans)->list);
- nf_tables_rule_notify(&trans->ctx,
- nft_trans_rule(trans),
+ nf_tables_rule_notify(&ctx, nft_trans_rule(trans),
trans->msg_type);
- nft_rule_expr_deactivate(&trans->ctx,
- nft_trans_rule(trans),
+ nft_rule_expr_deactivate(&ctx, nft_trans_rule(trans),
NFT_TRANS_COMMIT);
- if (trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD)
+ if (nft_trans_rule_chain(trans)->flags & NFT_CHAIN_HW_OFFLOAD)
nft_flow_rule_destroy(nft_trans_flow_rule(trans));
break;
case NFT_MSG_NEWSET:
@@ -10334,9 +10405,9 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
*/
if (nft_set_is_anonymous(nft_trans_set(trans)) &&
!list_empty(&nft_trans_set(trans)->bindings))
- nft_use_dec(&trans->ctx.table->use);
+ nft_use_dec(&table->use);
}
- nf_tables_set_notify(&trans->ctx, nft_trans_set(trans),
+ nf_tables_set_notify(&ctx, nft_trans_set(trans),
NFT_MSG_NEWSET, GFP_KERNEL);
nft_trans_destroy(trans);
break;
@@ -10344,14 +10415,14 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
case NFT_MSG_DESTROYSET:
nft_trans_set(trans)->dead = 1;
list_del_rcu(&nft_trans_set(trans)->list);
- nf_tables_set_notify(&trans->ctx, nft_trans_set(trans),
+ nf_tables_set_notify(&ctx, nft_trans_set(trans),
trans->msg_type, GFP_KERNEL);
break;
case NFT_MSG_NEWSETELEM:
- te = (struct nft_trans_elem *)trans->data;
+ te = nft_trans_container_elem(trans);
nft_setelem_activate(net, te->set, te->elem_priv);
- nf_tables_setelem_notify(&trans->ctx, te->set,
+ nf_tables_setelem_notify(&ctx, te->set,
te->elem_priv,
NFT_MSG_NEWSETELEM);
if (te->set->ops->commit &&
@@ -10363,9 +10434,9 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
break;
case NFT_MSG_DELSETELEM:
case NFT_MSG_DESTROYSETELEM:
- te = (struct nft_trans_elem *)trans->data;
+ te = nft_trans_container_elem(trans);
- nf_tables_setelem_notify(&trans->ctx, te->set,
+ nf_tables_setelem_notify(&ctx, te->set,
te->elem_priv,
trans->msg_type);
nft_setelem_remove(net, te->set, te->elem_priv);
@@ -10381,13 +10452,13 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
break;
case NFT_MSG_NEWOBJ:
if (nft_trans_obj_update(trans)) {
- nft_obj_commit_update(trans);
- nf_tables_obj_notify(&trans->ctx,
+ nft_obj_commit_update(&ctx, trans);
+ nf_tables_obj_notify(&ctx,
nft_trans_obj(trans),
NFT_MSG_NEWOBJ);
} else {
nft_clear(net, nft_trans_obj(trans));
- nf_tables_obj_notify(&trans->ctx,
+ nf_tables_obj_notify(&ctx,
nft_trans_obj(trans),
NFT_MSG_NEWOBJ);
nft_trans_destroy(trans);
@@ -10396,14 +10467,14 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
case NFT_MSG_DELOBJ:
case NFT_MSG_DESTROYOBJ:
nft_obj_del(nft_trans_obj(trans));
- nf_tables_obj_notify(&trans->ctx, nft_trans_obj(trans),
+ nf_tables_obj_notify(&ctx, nft_trans_obj(trans),
trans->msg_type);
break;
case NFT_MSG_NEWFLOWTABLE:
if (nft_trans_flowtable_update(trans)) {
nft_trans_flowtable(trans)->data.flags =
nft_trans_flowtable_flags(trans);
- nf_tables_flowtable_notify(&trans->ctx,
+ nf_tables_flowtable_notify(&ctx,
nft_trans_flowtable(trans),
&nft_trans_flowtable_hooks(trans),
NFT_MSG_NEWFLOWTABLE);
@@ -10411,7 +10482,7 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
&nft_trans_flowtable(trans)->hook_list);
} else {
nft_clear(net, nft_trans_flowtable(trans));
- nf_tables_flowtable_notify(&trans->ctx,
+ nf_tables_flowtable_notify(&ctx,
nft_trans_flowtable(trans),
NULL,
NFT_MSG_NEWFLOWTABLE);
@@ -10421,7 +10492,7 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
case NFT_MSG_DELFLOWTABLE:
case NFT_MSG_DESTROYFLOWTABLE:
if (nft_trans_flowtable_update(trans)) {
- nf_tables_flowtable_notify(&trans->ctx,
+ nf_tables_flowtable_notify(&ctx,
nft_trans_flowtable(trans),
&nft_trans_flowtable_hooks(trans),
trans->msg_type);
@@ -10429,7 +10500,7 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
&nft_trans_flowtable_hooks(trans));
} else {
list_del_rcu(&nft_trans_flowtable(trans)->list);
- nf_tables_flowtable_notify(&trans->ctx,
+ nf_tables_flowtable_notify(&ctx,
nft_trans_flowtable(trans),
NULL,
trans->msg_type);
@@ -10471,28 +10542,32 @@ static void nf_tables_module_autoload(struct net *net)
static void nf_tables_abort_release(struct nft_trans *trans)
{
+ struct nft_ctx ctx = { };
+
+ nft_ctx_update(&ctx, trans);
+
switch (trans->msg_type) {
case NFT_MSG_NEWTABLE:
- nf_tables_table_destroy(&trans->ctx);
+ nf_tables_table_destroy(trans->table);
break;
case NFT_MSG_NEWCHAIN:
if (nft_trans_chain_update(trans))
nft_hooks_destroy(&nft_trans_chain_hooks(trans));
else
- nf_tables_chain_destroy(&trans->ctx);
+ nf_tables_chain_destroy(nft_trans_chain(trans));
break;
case NFT_MSG_NEWRULE:
- nf_tables_rule_destroy(&trans->ctx, nft_trans_rule(trans));
+ nf_tables_rule_destroy(&ctx, nft_trans_rule(trans));
break;
case NFT_MSG_NEWSET:
- nft_set_destroy(&trans->ctx, nft_trans_set(trans));
+ nft_set_destroy(&ctx, nft_trans_set(trans));
break;
case NFT_MSG_NEWSETELEM:
nft_set_elem_destroy(nft_trans_elem_set(trans),
nft_trans_elem_priv(trans), true);
break;
case NFT_MSG_NEWOBJ:
- nft_obj_destroy(&trans->ctx, nft_trans_obj(trans));
+ nft_obj_destroy(&ctx, nft_trans_obj(trans));
break;
case NFT_MSG_NEWFLOWTABLE:
if (nft_trans_flowtable_update(trans))
@@ -10524,6 +10599,9 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
struct nft_trans *trans, *next;
LIST_HEAD(set_update_list);
struct nft_trans_elem *te;
+ struct nft_ctx ctx = {
+ .net = net,
+ };
int err = 0;
if (action == NFNL_ABORT_VALIDATE &&
@@ -10532,37 +10610,41 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
list_for_each_entry_safe_reverse(trans, next, &nft_net->commit_list,
list) {
+ struct nft_table *table = trans->table;
+
+ nft_ctx_update(&ctx, trans);
+
switch (trans->msg_type) {
case NFT_MSG_NEWTABLE:
if (nft_trans_table_update(trans)) {
- if (!(trans->ctx.table->flags & __NFT_TABLE_F_UPDATE)) {
+ if (!(table->flags & __NFT_TABLE_F_UPDATE)) {
nft_trans_destroy(trans);
break;
}
- if (trans->ctx.table->flags & __NFT_TABLE_F_WAS_DORMANT) {
- nf_tables_table_disable(net, trans->ctx.table);
- trans->ctx.table->flags |= NFT_TABLE_F_DORMANT;
- } else if (trans->ctx.table->flags & __NFT_TABLE_F_WAS_AWAKEN) {
- trans->ctx.table->flags &= ~NFT_TABLE_F_DORMANT;
+ if (table->flags & __NFT_TABLE_F_WAS_DORMANT) {
+ nf_tables_table_disable(net, table);
+ table->flags |= NFT_TABLE_F_DORMANT;
+ } else if (table->flags & __NFT_TABLE_F_WAS_AWAKEN) {
+ table->flags &= ~NFT_TABLE_F_DORMANT;
}
- if (trans->ctx.table->flags & __NFT_TABLE_F_WAS_ORPHAN) {
- trans->ctx.table->flags &= ~NFT_TABLE_F_OWNER;
- trans->ctx.table->nlpid = 0;
+ if (table->flags & __NFT_TABLE_F_WAS_ORPHAN) {
+ table->flags &= ~NFT_TABLE_F_OWNER;
+ table->nlpid = 0;
}
- trans->ctx.table->flags &= ~__NFT_TABLE_F_UPDATE;
+ table->flags &= ~__NFT_TABLE_F_UPDATE;
nft_trans_destroy(trans);
} else {
- list_del_rcu(&trans->ctx.table->list);
+ list_del_rcu(&table->list);
}
break;
case NFT_MSG_DELTABLE:
case NFT_MSG_DESTROYTABLE:
- nft_clear(trans->ctx.net, trans->ctx.table);
+ nft_clear(trans->net, table);
nft_trans_destroy(trans);
break;
case NFT_MSG_NEWCHAIN:
if (nft_trans_chain_update(trans)) {
- if (!(trans->ctx.table->flags & NFT_TABLE_F_DORMANT)) {
+ if (!(table->flags & NFT_TABLE_F_DORMANT)) {
nft_netdev_unregister_hooks(net,
&nft_trans_chain_hooks(trans),
true);
@@ -10575,11 +10657,10 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
nft_trans_destroy(trans);
break;
}
- nft_use_dec_restore(&trans->ctx.table->use);
- nft_chain_del(trans->ctx.chain);
- nf_tables_unregister_hook(trans->ctx.net,
- trans->ctx.table,
- trans->ctx.chain);
+ nft_use_dec_restore(&table->use);
+ nft_chain_del(nft_trans_chain(trans));
+ nf_tables_unregister_hook(trans->net, table,
+ nft_trans_chain(trans));
}
break;
case NFT_MSG_DELCHAIN:
@@ -10588,8 +10669,8 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
list_splice(&nft_trans_chain_hooks(trans),
&nft_trans_basechain(trans)->hook_list);
} else {
- nft_use_inc_restore(&trans->ctx.table->use);
- nft_clear(trans->ctx.net, trans->ctx.chain);
+ nft_use_inc_restore(&table->use);
+ nft_clear(trans->net, nft_trans_chain(trans));
}
nft_trans_destroy(trans);
break;
@@ -10598,20 +10679,20 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
nft_trans_destroy(trans);
break;
}
- nft_use_dec_restore(&trans->ctx.chain->use);
+ nft_use_dec_restore(&nft_trans_rule_chain(trans)->use);
list_del_rcu(&nft_trans_rule(trans)->list);
- nft_rule_expr_deactivate(&trans->ctx,
+ nft_rule_expr_deactivate(&ctx,
nft_trans_rule(trans),
NFT_TRANS_ABORT);
- if (trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD)
+ if (nft_trans_rule_chain(trans)->flags & NFT_CHAIN_HW_OFFLOAD)
nft_flow_rule_destroy(nft_trans_flow_rule(trans));
break;
case NFT_MSG_DELRULE:
case NFT_MSG_DESTROYRULE:
- nft_use_inc_restore(&trans->ctx.chain->use);
- nft_clear(trans->ctx.net, nft_trans_rule(trans));
- nft_rule_expr_activate(&trans->ctx, nft_trans_rule(trans));
- if (trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD)
+ nft_use_inc_restore(&nft_trans_rule_chain(trans)->use);
+ nft_clear(trans->net, nft_trans_rule(trans));
+ nft_rule_expr_activate(&ctx, nft_trans_rule(trans));
+ if (nft_trans_rule_chain(trans)->flags & NFT_CHAIN_HW_OFFLOAD)
nft_flow_rule_destroy(nft_trans_flow_rule(trans));
nft_trans_destroy(trans);
@@ -10621,7 +10702,7 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
nft_trans_destroy(trans);
break;
}
- nft_use_dec_restore(&trans->ctx.table->use);
+ nft_use_dec_restore(&table->use);
if (nft_trans_set_bound(trans)) {
nft_trans_destroy(trans);
break;
@@ -10631,10 +10712,10 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
break;
case NFT_MSG_DELSET:
case NFT_MSG_DESTROYSET:
- nft_use_inc_restore(&trans->ctx.table->use);
- nft_clear(trans->ctx.net, nft_trans_set(trans));
+ nft_use_inc_restore(&table->use);
+ nft_clear(trans->net, nft_trans_set(trans));
if (nft_trans_set(trans)->flags & (NFT_SET_MAP | NFT_SET_OBJECT))
- nft_map_activate(&trans->ctx, nft_trans_set(trans));
+ nft_map_activate(&ctx, nft_trans_set(trans));
nft_trans_destroy(trans);
break;
@@ -10643,7 +10724,7 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
nft_trans_destroy(trans);
break;
}
- te = (struct nft_trans_elem *)trans->data;
+ te = nft_trans_container_elem(trans);
nft_setelem_remove(net, te->set, te->elem_priv);
if (!nft_setelem_is_catchall(te->set, te->elem_priv))
atomic_dec(&te->set->nelems);
@@ -10656,7 +10737,7 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
break;
case NFT_MSG_DELSETELEM:
case NFT_MSG_DESTROYSETELEM:
- te = (struct nft_trans_elem *)trans->data;
+ te = nft_trans_container_elem(trans);
if (!nft_setelem_active_next(net, te->set, te->elem_priv)) {
nft_setelem_data_activate(net, te->set, te->elem_priv);
@@ -10674,17 +10755,17 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
break;
case NFT_MSG_NEWOBJ:
if (nft_trans_obj_update(trans)) {
- nft_obj_destroy(&trans->ctx, nft_trans_obj_newobj(trans));
+ nft_obj_destroy(&ctx, nft_trans_obj_newobj(trans));
nft_trans_destroy(trans);
} else {
- nft_use_dec_restore(&trans->ctx.table->use);
+ nft_use_dec_restore(&table->use);
nft_obj_del(nft_trans_obj(trans));
}
break;
case NFT_MSG_DELOBJ:
case NFT_MSG_DESTROYOBJ:
- nft_use_inc_restore(&trans->ctx.table->use);
- nft_clear(trans->ctx.net, nft_trans_obj(trans));
+ nft_use_inc_restore(&table->use);
+ nft_clear(trans->net, nft_trans_obj(trans));
nft_trans_destroy(trans);
break;
case NFT_MSG_NEWFLOWTABLE:
@@ -10692,7 +10773,7 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
nft_unregister_flowtable_net_hooks(net,
&nft_trans_flowtable_hooks(trans));
} else {
- nft_use_dec_restore(&trans->ctx.table->use);
+ nft_use_dec_restore(&table->use);
list_del_rcu(&nft_trans_flowtable(trans)->list);
nft_unregister_flowtable_net_hooks(net,
&nft_trans_flowtable(trans)->hook_list);
@@ -10704,8 +10785,8 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
list_splice(&nft_trans_flowtable_hooks(trans),
&nft_trans_flowtable(trans)->hook_list);
} else {
- nft_use_inc_restore(&trans->ctx.table->use);
- nft_clear(trans->ctx.net, nft_trans_flowtable(trans));
+ nft_use_inc_restore(&table->use);
+ nft_clear(trans->net, nft_trans_flowtable(trans));
}
nft_trans_destroy(trans);
break;
@@ -10810,150 +10891,6 @@ int nft_chain_validate_hooks(const struct nft_chain *chain,
}
EXPORT_SYMBOL_GPL(nft_chain_validate_hooks);
-/*
- * Loop detection - walk through the ruleset beginning at the destination chain
- * of a new jump until either the source chain is reached (loop) or all
- * reachable chains have been traversed.
- *
- * The loop check is performed whenever a new jump verdict is added to an
- * expression or verdict map or a verdict map is bound to a new chain.
- */
-
-static int nf_tables_check_loops(const struct nft_ctx *ctx,
- const struct nft_chain *chain);
-
-static int nft_check_loops(const struct nft_ctx *ctx,
- const struct nft_set_ext *ext)
-{
- const struct nft_data *data;
- int ret;
-
- data = nft_set_ext_data(ext);
- switch (data->verdict.code) {
- case NFT_JUMP:
- case NFT_GOTO:
- ret = nf_tables_check_loops(ctx, data->verdict.chain);
- break;
- default:
- ret = 0;
- break;
- }
-
- return ret;
-}
-
-static int nf_tables_loop_check_setelem(const struct nft_ctx *ctx,
- struct nft_set *set,
- const struct nft_set_iter *iter,
- struct nft_elem_priv *elem_priv)
-{
- const struct nft_set_ext *ext = nft_set_elem_ext(set, elem_priv);
-
- if (!nft_set_elem_active(ext, iter->genmask))
- return 0;
-
- if (nft_set_ext_exists(ext, NFT_SET_EXT_FLAGS) &&
- *nft_set_ext_flags(ext) & NFT_SET_ELEM_INTERVAL_END)
- return 0;
-
- return nft_check_loops(ctx, ext);
-}
-
-static int nft_set_catchall_loops(const struct nft_ctx *ctx,
- struct nft_set *set)
-{
- u8 genmask = nft_genmask_next(ctx->net);
- struct nft_set_elem_catchall *catchall;
- struct nft_set_ext *ext;
- int ret = 0;
-
- list_for_each_entry_rcu(catchall, &set->catchall_list, list) {
- ext = nft_set_elem_ext(set, catchall->elem);
- if (!nft_set_elem_active(ext, genmask))
- continue;
-
- ret = nft_check_loops(ctx, ext);
- if (ret < 0)
- return ret;
- }
-
- return ret;
-}
-
-static int nf_tables_check_loops(const struct nft_ctx *ctx,
- const struct nft_chain *chain)
-{
- const struct nft_rule *rule;
- const struct nft_expr *expr, *last;
- struct nft_set *set;
- struct nft_set_binding *binding;
- struct nft_set_iter iter;
-
- if (ctx->chain == chain)
- return -ELOOP;
-
- if (fatal_signal_pending(current))
- return -EINTR;
-
- list_for_each_entry(rule, &chain->rules, list) {
- nft_rule_for_each_expr(expr, last, rule) {
- struct nft_immediate_expr *priv;
- const struct nft_data *data;
- int err;
-
- if (strcmp(expr->ops->type->name, "immediate"))
- continue;
-
- priv = nft_expr_priv(expr);
- if (priv->dreg != NFT_REG_VERDICT)
- continue;
-
- data = &priv->data;
- switch (data->verdict.code) {
- case NFT_JUMP:
- case NFT_GOTO:
- err = nf_tables_check_loops(ctx,
- data->verdict.chain);
- if (err < 0)
- return err;
- break;
- default:
- break;
- }
- }
- }
-
- list_for_each_entry(set, &ctx->table->sets, list) {
- if (!nft_is_active_next(ctx->net, set))
- continue;
- if (!(set->flags & NFT_SET_MAP) ||
- set->dtype != NFT_DATA_VERDICT)
- continue;
-
- list_for_each_entry(binding, &set->bindings, list) {
- if (!(binding->flags & NFT_SET_MAP) ||
- binding->chain != chain)
- continue;
-
- iter.genmask = nft_genmask_next(ctx->net);
- iter.type = NFT_ITER_UPDATE;
- iter.skip = 0;
- iter.count = 0;
- iter.err = 0;
- iter.fn = nf_tables_loop_check_setelem;
-
- set->ops->walk(ctx, set, &iter);
- if (!iter.err)
- iter.err = nft_set_catchall_loops(ctx, set);
-
- if (iter.err < 0)
- return iter.err;
- }
- }
-
- return 0;
-}
-
/**
* nft_parse_u32_check - fetch u32 attribute and check for maximum value
*
@@ -11066,13 +11003,16 @@ static int nft_validate_register_store(const struct nft_ctx *ctx,
if (data != NULL &&
(data->verdict.code == NFT_GOTO ||
data->verdict.code == NFT_JUMP)) {
- err = nf_tables_check_loops(ctx, data->verdict.chain);
+ err = nft_chain_validate(ctx, data->verdict.chain);
if (err < 0)
return err;
}
return 0;
default:
+ if (type != NFT_DATA_VALUE)
+ return -EINVAL;
+
if (reg < NFT_REG_1 * NFT_REG_SIZE / NFT_REG32_SIZE)
return -EINVAL;
if (len == 0)
@@ -11081,8 +11021,6 @@ static int nft_validate_register_store(const struct nft_ctx *ctx,
sizeof_field(struct nft_regs, data))
return -ERANGE;
- if (data != NULL && type != NFT_DATA_VALUE)
- return -EINVAL;
return 0;
}
}
@@ -11365,7 +11303,7 @@ int __nft_release_basechain(struct nft_ctx *ctx)
}
nft_chain_del(ctx->chain);
nft_use_dec(&ctx->table->use);
- nf_tables_chain_destroy(ctx);
+ nf_tables_chain_destroy(ctx->chain);
return 0;
}
@@ -11440,12 +11378,11 @@ static void __nft_release_table(struct net *net, struct nft_table *table)
nft_obj_destroy(&ctx, obj);
}
list_for_each_entry_safe(chain, nc, &table->chains, list) {
- ctx.chain = chain;
nft_chain_del(chain);
nft_use_dec(&table->use);
- nf_tables_chain_destroy(&ctx);
+ nf_tables_chain_destroy(chain);
}
- nf_tables_table_destroy(&ctx);
+ nf_tables_table_destroy(table);
}
static void __nft_release_tables(struct net *net)
@@ -11483,8 +11420,7 @@ static int nft_rcv_nl_event(struct notifier_block *this, unsigned long event,
gc_seq = nft_gc_seq_begin(nft_net);
- if (!list_empty(&nf_tables_destroy_list))
- nf_tables_trans_destroy_flush_work();
+ nf_tables_trans_destroy_flush_work();
again:
list_for_each_entry(table, &nft_net->tables, list) {
if (nft_table_has_owner(table) &&
@@ -11588,6 +11524,14 @@ static int __init nf_tables_module_init(void)
{
int err;
+ BUILD_BUG_ON(offsetof(struct nft_trans_table, nft_trans) != 0);
+ BUILD_BUG_ON(offsetof(struct nft_trans_chain, nft_trans_binding.nft_trans) != 0);
+ BUILD_BUG_ON(offsetof(struct nft_trans_rule, nft_trans) != 0);
+ BUILD_BUG_ON(offsetof(struct nft_trans_set, nft_trans_binding.nft_trans) != 0);
+ BUILD_BUG_ON(offsetof(struct nft_trans_elem, nft_trans) != 0);
+ BUILD_BUG_ON(offsetof(struct nft_trans_obj, nft_trans) != 0);
+ BUILD_BUG_ON(offsetof(struct nft_trans_flowtable, nft_trans) != 0);
+
err = register_pernet_subsys(&nf_tables_net_ops);
if (err < 0)
return err;
diff --git a/net/netfilter/nf_tables_offload.c b/net/netfilter/nf_tables_offload.c
index 12ab78fa5d84..64675f1c7f29 100644
--- a/net/netfilter/nf_tables_offload.c
+++ b/net/netfilter/nf_tables_offload.c
@@ -513,38 +513,38 @@ static void nft_flow_rule_offload_abort(struct net *net,
int err = 0;
list_for_each_entry_continue_reverse(trans, &nft_net->commit_list, list) {
- if (trans->ctx.family != NFPROTO_NETDEV)
+ if (trans->table->family != NFPROTO_NETDEV)
continue;
switch (trans->msg_type) {
case NFT_MSG_NEWCHAIN:
- if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD) ||
+ if (!(nft_trans_chain(trans)->flags & NFT_CHAIN_HW_OFFLOAD) ||
nft_trans_chain_update(trans))
continue;
- err = nft_flow_offload_chain(trans->ctx.chain, NULL,
+ err = nft_flow_offload_chain(nft_trans_chain(trans), NULL,
FLOW_BLOCK_UNBIND);
break;
case NFT_MSG_DELCHAIN:
- if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
+ if (!(nft_trans_chain(trans)->flags & NFT_CHAIN_HW_OFFLOAD))
continue;
- err = nft_flow_offload_chain(trans->ctx.chain, NULL,
+ err = nft_flow_offload_chain(nft_trans_chain(trans), NULL,
FLOW_BLOCK_BIND);
break;
case NFT_MSG_NEWRULE:
- if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
+ if (!(nft_trans_rule_chain(trans)->flags & NFT_CHAIN_HW_OFFLOAD))
continue;
- err = nft_flow_offload_rule(trans->ctx.chain,
+ err = nft_flow_offload_rule(nft_trans_rule_chain(trans),
nft_trans_rule(trans),
NULL, FLOW_CLS_DESTROY);
break;
case NFT_MSG_DELRULE:
- if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
+ if (!(nft_trans_rule_chain(trans)->flags & NFT_CHAIN_HW_OFFLOAD))
continue;
- err = nft_flow_offload_rule(trans->ctx.chain,
+ err = nft_flow_offload_rule(nft_trans_rule_chain(trans),
nft_trans_rule(trans),
nft_trans_flow_rule(trans),
FLOW_CLS_REPLACE);
@@ -564,46 +564,46 @@ int nft_flow_rule_offload_commit(struct net *net)
u8 policy;
list_for_each_entry(trans, &nft_net->commit_list, list) {
- if (trans->ctx.family != NFPROTO_NETDEV)
+ if (trans->table->family != NFPROTO_NETDEV)
continue;
switch (trans->msg_type) {
case NFT_MSG_NEWCHAIN:
- if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD) ||
+ if (!(nft_trans_chain(trans)->flags & NFT_CHAIN_HW_OFFLOAD) ||
nft_trans_chain_update(trans))
continue;
policy = nft_trans_chain_policy(trans);
- err = nft_flow_offload_chain(trans->ctx.chain, &policy,
+ err = nft_flow_offload_chain(nft_trans_chain(trans), &policy,
FLOW_BLOCK_BIND);
break;
case NFT_MSG_DELCHAIN:
- if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
+ if (!(nft_trans_chain(trans)->flags & NFT_CHAIN_HW_OFFLOAD))
continue;
policy = nft_trans_chain_policy(trans);
- err = nft_flow_offload_chain(trans->ctx.chain, &policy,
+ err = nft_flow_offload_chain(nft_trans_chain(trans), &policy,
FLOW_BLOCK_UNBIND);
break;
case NFT_MSG_NEWRULE:
- if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
+ if (!(nft_trans_rule_chain(trans)->flags & NFT_CHAIN_HW_OFFLOAD))
continue;
- if (trans->ctx.flags & NLM_F_REPLACE ||
- !(trans->ctx.flags & NLM_F_APPEND)) {
+ if (trans->flags & NLM_F_REPLACE ||
+ !(trans->flags & NLM_F_APPEND)) {
err = -EOPNOTSUPP;
break;
}
- err = nft_flow_offload_rule(trans->ctx.chain,
+ err = nft_flow_offload_rule(nft_trans_rule_chain(trans),
nft_trans_rule(trans),
nft_trans_flow_rule(trans),
FLOW_CLS_REPLACE);
break;
case NFT_MSG_DELRULE:
- if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
+ if (!(nft_trans_rule_chain(trans)->flags & NFT_CHAIN_HW_OFFLOAD))
continue;
- err = nft_flow_offload_rule(trans->ctx.chain,
+ err = nft_flow_offload_rule(nft_trans_rule_chain(trans),
nft_trans_rule(trans),
NULL, FLOW_CLS_DESTROY);
break;
diff --git a/net/netfilter/nf_tables_trace.c b/net/netfilter/nf_tables_trace.c
index a83637e3f455..580c55268f65 100644
--- a/net/netfilter/nf_tables_trace.c
+++ b/net/netfilter/nf_tables_trace.c
@@ -317,7 +317,7 @@ void nft_trace_init(struct nft_traceinfo *info, const struct nft_pktinfo *pkt,
net_get_random_once(&trace_key, sizeof(trace_key));
info->skbid = (u32)siphash_3u32(hash32_ptr(skb),
- skb_get_hash(skb),
+ skb_get_hash_net(nft_net(pkt), skb),
skb->skb_iif,
&trace_key);
}
diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c
index f466af4f8531..eab4f476b47f 100644
--- a/net/netfilter/nfnetlink_cttimeout.c
+++ b/net/netfilter/nfnetlink_cttimeout.c
@@ -366,8 +366,7 @@ static int cttimeout_default_set(struct sk_buff *skb,
__u8 l4num;
int ret;
- if (!cda[CTA_TIMEOUT_L3PROTO] ||
- !cda[CTA_TIMEOUT_L4PROTO] ||
+ if (!cda[CTA_TIMEOUT_L4PROTO] ||
!cda[CTA_TIMEOUT_DATA])
return -EINVAL;
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index 00f4bd21c59b..55e28e1da66e 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -169,7 +169,9 @@ instance_destroy_rcu(struct rcu_head *head)
struct nfqnl_instance *inst = container_of(head, struct nfqnl_instance,
rcu);
+ rcu_read_lock();
nfqnl_flush(inst, NULL, 0);
+ rcu_read_unlock();
kfree(inst);
module_put(THIS_MODULE);
}
@@ -323,7 +325,7 @@ static void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
hooks = nf_hook_entries_head(net, pf, entry->state.hook);
i = entry->hook_index;
- if (WARN_ON_ONCE(!hooks || i >= hooks->num_hook_entries)) {
+ if (!hooks || i >= hooks->num_hook_entries) {
kfree_skb_reason(skb, SKB_DROP_REASON_NETFILTER_DROP);
nf_queue_entry_free(entry);
return;
diff --git a/net/netfilter/nft_fib.c b/net/netfilter/nft_fib.c
index 37cfe6dd712d..b58f62195ff3 100644
--- a/net/netfilter/nft_fib.c
+++ b/net/netfilter/nft_fib.c
@@ -35,11 +35,9 @@ int nft_fib_validate(const struct nft_ctx *ctx, const struct nft_expr *expr,
switch (priv->result) {
case NFT_FIB_RESULT_OIF:
case NFT_FIB_RESULT_OIFNAME:
- hooks = (1 << NF_INET_PRE_ROUTING);
- if (priv->flags & NFTA_FIB_F_IIF) {
- hooks |= (1 << NF_INET_LOCAL_IN) |
- (1 << NF_INET_FORWARD);
- }
+ hooks = (1 << NF_INET_PRE_ROUTING) |
+ (1 << NF_INET_LOCAL_IN) |
+ (1 << NF_INET_FORWARD);
break;
case NFT_FIB_RESULT_ADDRTYPE:
if (priv->flags & NFTA_FIB_F_IIF)
diff --git a/net/netfilter/nft_hash.c b/net/netfilter/nft_hash.c
index 92d47e469204..868d68302d22 100644
--- a/net/netfilter/nft_hash.c
+++ b/net/netfilter/nft_hash.c
@@ -51,7 +51,8 @@ static void nft_symhash_eval(const struct nft_expr *expr,
struct sk_buff *skb = pkt->skb;
u32 h;
- h = reciprocal_scale(__skb_get_hash_symmetric(skb), priv->modulus);
+ h = reciprocal_scale(__skb_get_hash_symmetric_net(nft_net(pkt), skb),
+ priv->modulus);
regs->data[priv->dreg] = h + priv->offset;
}
diff --git a/net/netfilter/nft_immediate.c b/net/netfilter/nft_immediate.c
index 6475c7abc1fe..ac2422c215e5 100644
--- a/net/netfilter/nft_immediate.c
+++ b/net/netfilter/nft_immediate.c
@@ -221,7 +221,7 @@ static void nft_immediate_destroy(const struct nft_ctx *ctx,
list_del(&rule->list);
nf_tables_rule_destroy(&chain_ctx, rule);
}
- nf_tables_chain_destroy(&chain_ctx);
+ nf_tables_chain_destroy(chain);
break;
default:
break;
diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c
index b314ca728a29..f3080fa1b226 100644
--- a/net/netfilter/nft_lookup.c
+++ b/net/netfilter/nft_lookup.c
@@ -132,7 +132,8 @@ static int nft_lookup_init(const struct nft_ctx *ctx,
return -EINVAL;
err = nft_parse_register_store(ctx, tb[NFTA_LOOKUP_DREG],
- &priv->dreg, NULL, set->dtype,
+ &priv->dreg, NULL,
+ nft_set_datatype(set),
set->dlen);
if (err < 0)
return err;
diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c
index ba0d3683a45d..9139ce38ea7b 100644
--- a/net/netfilter/nft_meta.c
+++ b/net/netfilter/nft_meta.c
@@ -839,6 +839,9 @@ static int nft_meta_inner_init(const struct nft_ctx *ctx,
struct nft_meta *priv = nft_expr_priv(expr);
unsigned int len;
+ if (!tb[NFTA_META_KEY] || !tb[NFTA_META_DREG])
+ return -EINVAL;
+
priv->key = ntohl(nla_get_be32(tb[NFTA_META_KEY]));
switch (priv->key) {
case NFT_META_PROTOCOL:
diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c
index 0a689c8e0295..50429cbd42da 100644
--- a/net/netfilter/nft_payload.c
+++ b/net/netfilter/nft_payload.c
@@ -45,36 +45,27 @@ nft_payload_copy_vlan(u32 *d, const struct sk_buff *skb, u8 offset, u8 len)
int mac_off = skb_mac_header(skb) - skb->data;
u8 *vlanh, *dst_u8 = (u8 *) d;
struct vlan_ethhdr veth;
- u8 vlan_hlen = 0;
-
- if ((skb->protocol == htons(ETH_P_8021AD) ||
- skb->protocol == htons(ETH_P_8021Q)) &&
- offset >= VLAN_ETH_HLEN && offset < VLAN_ETH_HLEN + VLAN_HLEN)
- vlan_hlen += VLAN_HLEN;
vlanh = (u8 *) &veth;
- if (offset < VLAN_ETH_HLEN + vlan_hlen) {
+ if (offset < VLAN_ETH_HLEN) {
u8 ethlen = len;
- if (vlan_hlen &&
- skb_copy_bits(skb, mac_off, &veth, VLAN_ETH_HLEN) < 0)
- return false;
- else if (!nft_payload_rebuild_vlan_hdr(skb, mac_off, &veth))
+ if (!nft_payload_rebuild_vlan_hdr(skb, mac_off, &veth))
return false;
- if (offset + len > VLAN_ETH_HLEN + vlan_hlen)
- ethlen -= offset + len - VLAN_ETH_HLEN - vlan_hlen;
+ if (offset + len > VLAN_ETH_HLEN)
+ ethlen -= offset + len - VLAN_ETH_HLEN;
- memcpy(dst_u8, vlanh + offset - vlan_hlen, ethlen);
+ memcpy(dst_u8, vlanh + offset, ethlen);
len -= ethlen;
if (len == 0)
return true;
dst_u8 += ethlen;
- offset = ETH_HLEN + vlan_hlen;
+ offset = ETH_HLEN;
} else {
- offset -= VLAN_HLEN + vlan_hlen;
+ offset -= VLAN_HLEN;
}
return skb_copy_bits(skb, offset + mac_off, dst_u8, len) == 0;
@@ -154,12 +145,12 @@ int nft_payload_inner_offset(const struct nft_pktinfo *pkt)
return pkt->inneroff;
}
-static bool nft_payload_need_vlan_copy(const struct nft_payload *priv)
+static bool nft_payload_need_vlan_adjust(u32 offset, u32 len)
{
- unsigned int len = priv->offset + priv->len;
+ unsigned int boundary = offset + len;
/* data past ether src/dst requested, copy needed */
- if (len > offsetof(struct ethhdr, h_proto))
+ if (boundary > offsetof(struct ethhdr, h_proto))
return true;
return false;
@@ -183,7 +174,7 @@ void nft_payload_eval(const struct nft_expr *expr,
goto err;
if (skb_vlan_tag_present(skb) &&
- nft_payload_need_vlan_copy(priv)) {
+ nft_payload_need_vlan_adjust(priv->offset, priv->len)) {
if (!nft_payload_copy_vlan(dest, skb,
priv->offset, priv->len))
goto err;
@@ -659,6 +650,10 @@ static int nft_payload_inner_init(const struct nft_ctx *ctx,
struct nft_payload *priv = nft_expr_priv(expr);
u32 base;
+ if (!tb[NFTA_PAYLOAD_BASE] || !tb[NFTA_PAYLOAD_OFFSET] ||
+ !tb[NFTA_PAYLOAD_LEN] || !tb[NFTA_PAYLOAD_DREG])
+ return -EINVAL;
+
base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
switch (base) {
case NFT_PAYLOAD_TUN_HEADER:
@@ -810,21 +805,79 @@ struct nft_payload_set {
u8 csum_flags;
};
+/* This is not struct vlan_hdr. */
+struct nft_payload_vlan_hdr {
+ __be16 h_vlan_proto;
+ __be16 h_vlan_TCI;
+};
+
+static bool
+nft_payload_set_vlan(const u32 *src, struct sk_buff *skb, u8 offset, u8 len,
+ int *vlan_hlen)
+{
+ struct nft_payload_vlan_hdr *vlanh;
+ __be16 vlan_proto;
+ u16 vlan_tci;
+
+ if (offset >= offsetof(struct vlan_ethhdr, h_vlan_encapsulated_proto)) {
+ *vlan_hlen = VLAN_HLEN;
+ return true;
+ }
+
+ switch (offset) {
+ case offsetof(struct vlan_ethhdr, h_vlan_proto):
+ if (len == 2) {
+ vlan_proto = nft_reg_load_be16(src);
+ skb->vlan_proto = vlan_proto;
+ } else if (len == 4) {
+ vlanh = (struct nft_payload_vlan_hdr *)src;
+ __vlan_hwaccel_put_tag(skb, vlanh->h_vlan_proto,
+ ntohs(vlanh->h_vlan_TCI));
+ } else {
+ return false;
+ }
+ break;
+ case offsetof(struct vlan_ethhdr, h_vlan_TCI):
+ if (len != 2)
+ return false;
+
+ vlan_tci = ntohs(nft_reg_load_be16(src));
+ skb->vlan_tci = vlan_tci;
+ break;
+ default:
+ return false;
+ }
+
+ return true;
+}
+
static void nft_payload_set_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
const struct nft_payload_set *priv = nft_expr_priv(expr);
- struct sk_buff *skb = pkt->skb;
const u32 *src = &regs->data[priv->sreg];
- int offset, csum_offset;
+ int offset, csum_offset, vlan_hlen = 0;
+ struct sk_buff *skb = pkt->skb;
__wsum fsum, tsum;
switch (priv->base) {
case NFT_PAYLOAD_LL_HEADER:
if (!skb_mac_header_was_set(skb))
goto err;
- offset = skb_mac_header(skb) - skb->data;
+
+ if (skb_vlan_tag_present(skb) &&
+ nft_payload_need_vlan_adjust(priv->offset, priv->len)) {
+ if (!nft_payload_set_vlan(src, skb,
+ priv->offset, priv->len,
+ &vlan_hlen))
+ goto err;
+
+ if (!vlan_hlen)
+ return;
+ }
+
+ offset = skb_mac_header(skb) - skb->data - vlan_hlen;
break;
case NFT_PAYLOAD_NETWORK_HEADER:
offset = skb_network_offset(skb);
diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c
index ef93e0d3bee0..588a5e6ad899 100644
--- a/net/netfilter/xt_recent.c
+++ b/net/netfilter/xt_recent.c
@@ -59,9 +59,9 @@ MODULE_PARM_DESC(ip_list_gid, "default owning group of /proc/net/xt_recent/* fil
/* retained for backwards compatibility */
static unsigned int ip_pkt_list_tot __read_mostly;
module_param(ip_pkt_list_tot, uint, 0400);
-MODULE_PARM_DESC(ip_pkt_list_tot, "number of packets per IP address to remember (max. 255)");
+MODULE_PARM_DESC(ip_pkt_list_tot, "number of packets per IP address to remember (max. 65535)");
-#define XT_RECENT_MAX_NSTAMPS 256
+#define XT_RECENT_MAX_NSTAMPS 65536
struct recent_entry {
struct list_head list;
@@ -69,7 +69,7 @@ struct recent_entry {
union nf_inet_addr addr;
u_int16_t family;
u_int8_t ttl;
- u_int8_t index;
+ u_int16_t index;
u_int16_t nstamps;
unsigned long stamps[];
};
@@ -80,7 +80,7 @@ struct recent_table {
union nf_inet_addr mask;
unsigned int refcnt;
unsigned int entries;
- u8 nstamps_max_mask;
+ u_int16_t nstamps_max_mask;
struct list_head lru_list;
struct list_head iphash[];
};
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index fa9c090cf629..0b7a89db3ab7 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -636,8 +636,7 @@ static struct proto netlink_proto = {
};
static int __netlink_create(struct net *net, struct socket *sock,
- struct mutex *dump_cb_mutex, int protocol,
- int kern)
+ int protocol, int kern)
{
struct sock *sk;
struct netlink_sock *nlk;
@@ -655,7 +654,6 @@ static int __netlink_create(struct net *net, struct socket *sock,
lockdep_set_class_and_name(&nlk->nl_cb_mutex,
nlk_cb_mutex_keys + protocol,
nlk_cb_mutex_key_strings[protocol]);
- nlk->dump_cb_mutex = dump_cb_mutex;
init_waitqueue_head(&nlk->wait);
sk->sk_destruct = netlink_sock_destruct;
@@ -667,7 +665,6 @@ static int netlink_create(struct net *net, struct socket *sock, int protocol,
int kern)
{
struct module *module = NULL;
- struct mutex *cb_mutex;
struct netlink_sock *nlk;
int (*bind)(struct net *net, int group);
void (*unbind)(struct net *net, int group);
@@ -696,7 +693,6 @@ static int netlink_create(struct net *net, struct socket *sock, int protocol,
module = nl_table[protocol].module;
else
err = -EPROTONOSUPPORT;
- cb_mutex = nl_table[protocol].cb_mutex;
bind = nl_table[protocol].bind;
unbind = nl_table[protocol].unbind;
release = nl_table[protocol].release;
@@ -705,7 +701,7 @@ static int netlink_create(struct net *net, struct socket *sock, int protocol,
if (err < 0)
goto out;
- err = __netlink_create(net, sock, cb_mutex, protocol, kern);
+ err = __netlink_create(net, sock, protocol, kern);
if (err < 0)
goto out_module;
@@ -2016,7 +2012,6 @@ __netlink_kernel_create(struct net *net, int unit, struct module *module,
struct sock *sk;
struct netlink_sock *nlk;
struct listeners *listeners = NULL;
- struct mutex *cb_mutex = cfg ? cfg->cb_mutex : NULL;
unsigned int groups;
BUG_ON(!nl_table);
@@ -2027,7 +2022,7 @@ __netlink_kernel_create(struct net *net, int unit, struct module *module,
if (sock_create_lite(PF_NETLINK, SOCK_DGRAM, unit, &sock))
return NULL;
- if (__netlink_create(net, sock, cb_mutex, unit, 1) < 0)
+ if (__netlink_create(net, sock, unit, 1) < 0)
goto out_sock_release_nosk;
sk = sock->sk;
@@ -2055,7 +2050,6 @@ __netlink_kernel_create(struct net *net, int unit, struct module *module,
if (!nl_table[unit].registered) {
nl_table[unit].groups = groups;
rcu_assign_pointer(nl_table[unit].listeners, listeners);
- nl_table[unit].cb_mutex = cb_mutex;
nl_table[unit].module = module;
if (cfg) {
nl_table[unit].bind = cfg->bind;
@@ -2326,17 +2320,9 @@ static int netlink_dump(struct sock *sk, bool lock_taken)
netlink_skb_set_owner_r(skb, sk);
if (nlk->dump_done_errno > 0) {
- struct mutex *extra_mutex = nlk->dump_cb_mutex;
-
cb->extack = &extack;
- if (cb->flags & RTNL_FLAG_DUMP_UNLOCKED)
- extra_mutex = NULL;
- if (extra_mutex)
- mutex_lock(extra_mutex);
nlk->dump_done_errno = cb->dump(skb, cb);
- if (extra_mutex)
- mutex_unlock(extra_mutex);
/* EMSGSIZE plus something already in the skb means
* that there's more to dump but current skb has filled up.
diff --git a/net/netrom/nr_timer.c b/net/netrom/nr_timer.c
index 4e7c968cde2d..5e3ca068f04e 100644
--- a/net/netrom/nr_timer.c
+++ b/net/netrom/nr_timer.c
@@ -121,7 +121,8 @@ static void nr_heartbeat_expiry(struct timer_list *t)
is accepted() it isn't 'dead' so doesn't get removed. */
if (sock_flag(sk, SOCK_DESTROY) ||
(sk->sk_state == TCP_LISTEN && sock_flag(sk, SOCK_DEAD))) {
- sock_hold(sk);
+ if (sk->sk_state == TCP_LISTEN)
+ sock_hold(sk);
bh_unlock_sock(sk);
nr_destroy_socket(sk);
goto out;
diff --git a/net/openvswitch/Kconfig b/net/openvswitch/Kconfig
index 29a7081858cd..2535f3f9f462 100644
--- a/net/openvswitch/Kconfig
+++ b/net/openvswitch/Kconfig
@@ -10,6 +10,7 @@ config OPENVSWITCH
(NF_CONNTRACK && ((!NF_DEFRAG_IPV6 || NF_DEFRAG_IPV6) && \
(!NF_NAT || NF_NAT) && \
(!NETFILTER_CONNCOUNT || NETFILTER_CONNCOUNT)))
+ depends on PSAMPLE || !PSAMPLE
select LIBCRC32C
select MPLS
select NET_MPLS_GSO
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index 964225580824..101f9a23792c 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -24,6 +24,11 @@
#include <net/checksum.h>
#include <net/dsfield.h>
#include <net/mpls.h>
+
+#if IS_ENABLED(CONFIG_PSAMPLE)
+#include <net/psample.h>
+#endif
+
#include <net/sctp/checksum.h>
#include "datapath.h"
@@ -1043,12 +1048,15 @@ static int sample(struct datapath *dp, struct sk_buff *skb,
struct nlattr *sample_arg;
int rem = nla_len(attr);
const struct sample_arg *arg;
+ u32 init_probability;
bool clone_flow_key;
+ int err;
/* The first action is always 'OVS_SAMPLE_ATTR_ARG'. */
sample_arg = nla_data(attr);
arg = nla_data(sample_arg);
actions = nla_next(sample_arg, &rem);
+ init_probability = OVS_CB(skb)->probability;
if ((arg->probability != U32_MAX) &&
(!arg->probability || get_random_u32() > arg->probability)) {
@@ -1057,9 +1065,16 @@ static int sample(struct datapath *dp, struct sk_buff *skb,
return 0;
}
+ OVS_CB(skb)->probability = arg->probability;
+
clone_flow_key = !arg->exec;
- return clone_execute(dp, skb, key, 0, actions, rem, last,
- clone_flow_key);
+ err = clone_execute(dp, skb, key, 0, actions, rem, last,
+ clone_flow_key);
+
+ if (!last)
+ OVS_CB(skb)->probability = init_probability;
+
+ return err;
}
/* When 'last' is true, clone() should always consume the 'skb'.
@@ -1299,6 +1314,44 @@ static int execute_dec_ttl(struct sk_buff *skb, struct sw_flow_key *key)
return 0;
}
+#if IS_ENABLED(CONFIG_PSAMPLE)
+static void execute_psample(struct datapath *dp, struct sk_buff *skb,
+ const struct nlattr *attr)
+{
+ struct psample_group psample_group = {};
+ struct psample_metadata md = {};
+ const struct nlattr *a;
+ u32 rate;
+ int rem;
+
+ nla_for_each_attr(a, nla_data(attr), nla_len(attr), rem) {
+ switch (nla_type(a)) {
+ case OVS_PSAMPLE_ATTR_GROUP:
+ psample_group.group_num = nla_get_u32(a);
+ break;
+
+ case OVS_PSAMPLE_ATTR_COOKIE:
+ md.user_cookie = nla_data(a);
+ md.user_cookie_len = nla_len(a);
+ break;
+ }
+ }
+
+ psample_group.net = ovs_dp_get_net(dp);
+ md.in_ifindex = OVS_CB(skb)->input_vport->dev->ifindex;
+ md.trunc_size = skb->len - OVS_CB(skb)->cutlen;
+ md.rate_as_probability = 1;
+
+ rate = OVS_CB(skb)->probability ? OVS_CB(skb)->probability : U32_MAX;
+
+ psample_sample_packet(&psample_group, skb, rate, &md);
+}
+#else
+static void execute_psample(struct datapath *dp, struct sk_buff *skb,
+ const struct nlattr *attr)
+{}
+#endif
+
/* Execute a list of actions against 'skb'. */
static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
struct sw_flow_key *key,
@@ -1502,6 +1555,15 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
ovs_kfree_skb_reason(skb, reason);
return 0;
}
+
+ case OVS_ACTION_ATTR_PSAMPLE:
+ execute_psample(dp, skb, a);
+ OVS_CB(skb)->cutlen = 0;
+ if (nla_is_last(a, rem)) {
+ consume_skb(skb);
+ return 0;
+ }
+ break;
}
if (unlikely(err)) {
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index 2928c142a2dd..8eb1d644b741 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -168,8 +168,13 @@ static u32 ovs_ct_get_mark(const struct nf_conn *ct)
static void ovs_ct_get_labels(const struct nf_conn *ct,
struct ovs_key_ct_labels *labels)
{
- struct nf_conn_labels *cl = ct ? nf_ct_labels_find(ct) : NULL;
+ struct nf_conn_labels *cl = NULL;
+ if (ct) {
+ if (ct->master && !nf_ct_is_confirmed(ct))
+ ct = ct->master;
+ cl = nf_ct_labels_find(ct);
+ }
if (cl)
memcpy(labels, cl->bits, OVS_CT_LABELS_LEN);
else
@@ -674,6 +679,8 @@ static int ovs_ct_nat(struct net *net, struct sw_flow_key *key,
action |= BIT(NF_NAT_MANIP_DST);
err = nf_ct_nat(skb, ct, ctinfo, &action, &info->range, info->commit);
+ if (err != NF_ACCEPT)
+ return err;
if (action & BIT(NF_NAT_MANIP_SRC))
ovs_nat_update_key(key, skb, NF_NAT_MANIP_SRC);
@@ -692,6 +699,22 @@ static int ovs_ct_nat(struct net *net, struct sw_flow_key *key,
}
#endif
+static int verdict_to_errno(unsigned int verdict)
+{
+ switch (verdict & NF_VERDICT_MASK) {
+ case NF_ACCEPT:
+ return 0;
+ case NF_DROP:
+ return -EINVAL;
+ case NF_STOLEN:
+ return -EINPROGRESS;
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
/* Pass 'skb' through conntrack in 'net', using zone configured in 'info', if
* not done already. Update key with new CT state after passing the packet
* through conntrack.
@@ -730,7 +753,7 @@ static int __ovs_ct_lookup(struct net *net, struct sw_flow_key *key,
err = nf_conntrack_in(skb, &state);
if (err != NF_ACCEPT)
- return -ENOENT;
+ return verdict_to_errno(err);
/* Clear CT state NAT flags to mark that we have not yet done
* NAT after the nf_conntrack_in() call. We can actually clear
@@ -757,9 +780,12 @@ static int __ovs_ct_lookup(struct net *net, struct sw_flow_key *key,
* the key->ct_state.
*/
if (info->nat && !(key->ct_state & OVS_CS_F_NAT_MASK) &&
- (nf_ct_is_confirmed(ct) || info->commit) &&
- ovs_ct_nat(net, key, info, skb, ct, ctinfo) != NF_ACCEPT) {
- return -EINVAL;
+ (nf_ct_is_confirmed(ct) || info->commit)) {
+ int err = ovs_ct_nat(net, key, info, skb, ct, ctinfo);
+
+ err = verdict_to_errno(err);
+ if (err)
+ return err;
}
/* Userspace may decide to perform a ct lookup without a helper
@@ -790,9 +816,12 @@ static int __ovs_ct_lookup(struct net *net, struct sw_flow_key *key,
* - When committing an unconfirmed connection.
*/
if ((nf_ct_is_confirmed(ct) ? !cached || add_helper :
- info->commit) &&
- nf_ct_helper(skb, ct, ctinfo, info->family) != NF_ACCEPT) {
- return -EINVAL;
+ info->commit)) {
+ int err = nf_ct_helper(skb, ct, ctinfo, info->family);
+
+ err = verdict_to_errno(err);
+ if (err)
+ return err;
}
if (nf_ct_protonum(ct) == IPPROTO_TCP &&
@@ -996,10 +1025,9 @@ static int ovs_ct_commit(struct net *net, struct sw_flow_key *key,
/* This will take care of sending queued events even if the connection
* is already confirmed.
*/
- if (nf_conntrack_confirm(skb) != NF_ACCEPT)
- return -EINVAL;
+ err = nf_conntrack_confirm(skb);
- return 0;
+ return verdict_to_errno(err);
}
/* Returns 0 on success, -EINPROGRESS if 'skb' is stolen, or other nonzero
@@ -1034,6 +1062,10 @@ int ovs_ct_execute(struct net *net, struct sk_buff *skb,
else
err = ovs_ct_lookup(net, key, info, skb);
+ /* conntrack core returned NF_STOLEN */
+ if (err == -EINPROGRESS)
+ return err;
+
skb_push_rcsum(skb, nh_ofs);
if (err)
ovs_kfree_skb_reason(skb, OVS_DROP_CONNTRACK);
diff --git a/net/openvswitch/datapath.h b/net/openvswitch/datapath.h
index 0cd29971a907..9ca6231ea647 100644
--- a/net/openvswitch/datapath.h
+++ b/net/openvswitch/datapath.h
@@ -115,12 +115,15 @@ struct datapath {
* fragmented.
* @acts_origlen: The netlink size of the flow actions applied to this skb.
* @cutlen: The number of bytes from the packet end to be removed.
+ * @probability: The sampling probability that was applied to this skb; 0 means
+ * no sampling has occurred; U32_MAX means 100% probability.
*/
struct ovs_skb_cb {
struct vport *input_vport;
u16 mru;
u16 acts_origlen;
u32 cutlen;
+ u32 probability;
};
#define OVS_CB(skb) ((struct ovs_skb_cb *)(skb)->cb)
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
index f224d9bcea5e..c92bdc4dfe19 100644
--- a/net/openvswitch/flow_netlink.c
+++ b/net/openvswitch/flow_netlink.c
@@ -64,6 +64,7 @@ static bool actions_may_change_flow(const struct nlattr *actions)
case OVS_ACTION_ATTR_TRUNC:
case OVS_ACTION_ATTR_USERSPACE:
case OVS_ACTION_ATTR_DROP:
+ case OVS_ACTION_ATTR_PSAMPLE:
break;
case OVS_ACTION_ATTR_CT:
@@ -2409,7 +2410,7 @@ static void ovs_nla_free_nested_actions(const struct nlattr *actions, int len)
/* Whenever new actions are added, the need to update this
* function should be considered.
*/
- BUILD_BUG_ON(OVS_ACTION_ATTR_MAX != 24);
+ BUILD_BUG_ON(OVS_ACTION_ATTR_MAX != 25);
if (!actions)
return;
@@ -3157,6 +3158,28 @@ static int validate_and_copy_check_pkt_len(struct net *net,
return 0;
}
+static int validate_psample(const struct nlattr *attr)
+{
+ static const struct nla_policy policy[OVS_PSAMPLE_ATTR_MAX + 1] = {
+ [OVS_PSAMPLE_ATTR_GROUP] = { .type = NLA_U32 },
+ [OVS_PSAMPLE_ATTR_COOKIE] = {
+ .type = NLA_BINARY,
+ .len = OVS_PSAMPLE_COOKIE_MAX_SIZE,
+ },
+ };
+ struct nlattr *a[OVS_PSAMPLE_ATTR_MAX + 1];
+ int err;
+
+ if (!IS_ENABLED(CONFIG_PSAMPLE))
+ return -EOPNOTSUPP;
+
+ err = nla_parse_nested(a, OVS_PSAMPLE_ATTR_MAX, attr, policy, NULL);
+ if (err)
+ return err;
+
+ return a[OVS_PSAMPLE_ATTR_GROUP] ? 0 : -EINVAL;
+}
+
static int copy_action(const struct nlattr *from,
struct sw_flow_actions **sfa, bool log)
{
@@ -3212,6 +3235,7 @@ static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
[OVS_ACTION_ATTR_ADD_MPLS] = sizeof(struct ovs_action_add_mpls),
[OVS_ACTION_ATTR_DEC_TTL] = (u32)-1,
[OVS_ACTION_ATTR_DROP] = sizeof(u32),
+ [OVS_ACTION_ATTR_PSAMPLE] = (u32)-1,
};
const struct ovs_action_push_vlan *vlan;
int type = nla_type(a);
@@ -3490,6 +3514,12 @@ static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
return -EINVAL;
break;
+ case OVS_ACTION_ATTR_PSAMPLE:
+ err = validate_psample(a);
+ if (err)
+ return err;
+ break;
+
default:
OVS_NLERR(log, "Unknown Action type %d", type);
return -EINVAL;
diff --git a/net/openvswitch/vport-internal_dev.c b/net/openvswitch/vport-internal_dev.c
index 74c88a6baa43..4b33133cbdff 100644
--- a/net/openvswitch/vport-internal_dev.c
+++ b/net/openvswitch/vport-internal_dev.c
@@ -85,7 +85,6 @@ static const struct net_device_ops internal_dev_netdev_ops = {
.ndo_stop = internal_dev_stop,
.ndo_start_xmit = internal_dev_xmit,
.ndo_set_mac_address = eth_mac_addr,
- .ndo_get_stats64 = dev_get_tstats64,
};
static struct rtnl_link_ops internal_dev_link_ops __read_mostly = {
@@ -140,11 +139,7 @@ static struct vport *internal_dev_create(const struct vport_parms *parms)
err = -ENOMEM;
goto error_free_vport;
}
- vport->dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
- if (!vport->dev->tstats) {
- err = -ENOMEM;
- goto error_free_netdev;
- }
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
dev_net_set(vport->dev, ovs_dp_get_net(vport->dp));
dev->ifindex = parms->desired_ifindex;
@@ -169,8 +164,6 @@ static struct vport *internal_dev_create(const struct vport_parms *parms)
error_unlock:
rtnl_unlock();
- free_percpu(dev->tstats);
-error_free_netdev:
free_netdev(dev);
error_free_vport:
ovs_vport_free(vport);
@@ -186,7 +179,6 @@ static void internal_dev_destroy(struct vport *vport)
/* unregister_netdevice() waits for an RCU grace period. */
unregister_netdevice(vport->dev);
- free_percpu(vport->dev->tstats);
rtnl_unlock();
}
diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c
index 972ae01a70f7..8732f6e51ae5 100644
--- a/net/openvswitch/vport.c
+++ b/net/openvswitch/vport.c
@@ -500,6 +500,7 @@ int ovs_vport_receive(struct vport *vport, struct sk_buff *skb,
OVS_CB(skb)->input_vport = vport;
OVS_CB(skb)->mru = 0;
OVS_CB(skb)->cutlen = 0;
+ OVS_CB(skb)->probability = 0;
if (unlikely(dev_net(skb->dev) != ovs_dp_get_net(vport->dp))) {
u32 mark;
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index ea3ebc160e25..4a364cdd445e 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -538,6 +538,61 @@ static void *packet_current_frame(struct packet_sock *po,
return packet_lookup_frame(po, rb, rb->head, status);
}
+static u16 vlan_get_tci(struct sk_buff *skb, struct net_device *dev)
+{
+ u8 *skb_orig_data = skb->data;
+ int skb_orig_len = skb->len;
+ struct vlan_hdr vhdr, *vh;
+ unsigned int header_len;
+
+ if (!dev)
+ return 0;
+
+ /* In the SOCK_DGRAM scenario, skb data starts at the network
+ * protocol, which is after the VLAN headers. The outer VLAN
+ * header is at the hard_header_len offset in non-variable
+ * length link layer headers. If it's a VLAN device, the
+ * min_header_len should be used to exclude the VLAN header
+ * size.
+ */
+ if (dev->min_header_len == dev->hard_header_len)
+ header_len = dev->hard_header_len;
+ else if (is_vlan_dev(dev))
+ header_len = dev->min_header_len;
+ else
+ return 0;
+
+ skb_push(skb, skb->data - skb_mac_header(skb));
+ vh = skb_header_pointer(skb, header_len, sizeof(vhdr), &vhdr);
+ if (skb_orig_data != skb->data) {
+ skb->data = skb_orig_data;
+ skb->len = skb_orig_len;
+ }
+ if (unlikely(!vh))
+ return 0;
+
+ return ntohs(vh->h_vlan_TCI);
+}
+
+static __be16 vlan_get_protocol_dgram(struct sk_buff *skb)
+{
+ __be16 proto = skb->protocol;
+
+ if (unlikely(eth_type_vlan(proto))) {
+ u8 *skb_orig_data = skb->data;
+ int skb_orig_len = skb->len;
+
+ skb_push(skb, skb->data - skb_mac_header(skb));
+ proto = __vlan_get_protocol(skb, proto, NULL);
+ if (skb_orig_data != skb->data) {
+ skb->data = skb_orig_data;
+ skb->len = skb_orig_len;
+ }
+ }
+
+ return proto;
+}
+
static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc)
{
del_timer_sync(&pkc->retire_blk_timer);
@@ -1007,10 +1062,16 @@ static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc,
static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc,
struct tpacket3_hdr *ppd)
{
+ struct packet_sock *po = container_of(pkc, struct packet_sock, rx_ring.prb_bdqc);
+
if (skb_vlan_tag_present(pkc->skb)) {
ppd->hv1.tp_vlan_tci = skb_vlan_tag_get(pkc->skb);
ppd->hv1.tp_vlan_tpid = ntohs(pkc->skb->vlan_proto);
ppd->tp_status = TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
+ } else if (unlikely(po->sk.sk_type == SOCK_DGRAM && eth_type_vlan(pkc->skb->protocol))) {
+ ppd->hv1.tp_vlan_tci = vlan_get_tci(pkc->skb, pkc->skb->dev);
+ ppd->hv1.tp_vlan_tpid = ntohs(pkc->skb->protocol);
+ ppd->tp_status = TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
} else {
ppd->hv1.tp_vlan_tci = 0;
ppd->hv1.tp_vlan_tpid = 0;
@@ -2056,8 +2117,7 @@ retry:
skb->dev = dev;
skb->priority = READ_ONCE(sk->sk_priority);
skb->mark = READ_ONCE(sk->sk_mark);
- skb->tstamp = sockc.transmit_time;
-
+ skb_set_delivery_type_by_clockid(skb, sockc.transmit_time, sk->sk_clockid);
skb_setup_tx_timestamp(skb, sockc.tsflags);
if (unlikely(extra_len == 4))
@@ -2122,7 +2182,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt, struct net_device *orig_dev)
{
enum skb_drop_reason drop_reason = SKB_CONSUMED;
- struct sock *sk;
+ struct sock *sk = NULL;
struct sockaddr_ll *sll;
struct packet_sock *po;
u8 *skb_head = skb->data;
@@ -2227,7 +2287,7 @@ drop_n_restore:
skb->len = skb_len;
}
drop:
- kfree_skb_reason(skb, drop_reason);
+ sk_skb_reason_drop(sk, skb, drop_reason);
return 0;
}
@@ -2235,7 +2295,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt, struct net_device *orig_dev)
{
enum skb_drop_reason drop_reason = SKB_CONSUMED;
- struct sock *sk;
+ struct sock *sk = NULL;
struct packet_sock *po;
struct sockaddr_ll *sll;
union tpacket_uhdr h;
@@ -2428,6 +2488,10 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
h.h2->tp_vlan_tci = skb_vlan_tag_get(skb);
h.h2->tp_vlan_tpid = ntohs(skb->vlan_proto);
status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
+ } else if (unlikely(sk->sk_type == SOCK_DGRAM && eth_type_vlan(skb->protocol))) {
+ h.h2->tp_vlan_tci = vlan_get_tci(skb, skb->dev);
+ h.h2->tp_vlan_tpid = ntohs(skb->protocol);
+ status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
} else {
h.h2->tp_vlan_tci = 0;
h.h2->tp_vlan_tpid = 0;
@@ -2457,7 +2521,8 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
sll->sll_family = AF_PACKET;
sll->sll_hatype = dev->type;
- sll->sll_protocol = skb->protocol;
+ sll->sll_protocol = (sk->sk_type == SOCK_DGRAM) ?
+ vlan_get_protocol_dgram(skb) : skb->protocol;
sll->sll_pkttype = skb->pkt_type;
if (unlikely(packet_sock_flag(po, PACKET_SOCK_ORIGDEV)))
sll->sll_ifindex = orig_dev->ifindex;
@@ -2495,7 +2560,7 @@ drop_n_restore:
skb->len = skb_len;
}
drop:
- kfree_skb_reason(skb, drop_reason);
+ sk_skb_reason_drop(sk, skb, drop_reason);
return 0;
drop_n_account:
@@ -2504,7 +2569,7 @@ drop_n_account:
drop_reason = SKB_DROP_REASON_PACKET_SOCK_ERROR;
sk->sk_data_ready(sk);
- kfree_skb_reason(copy_skb, drop_reason);
+ sk_skb_reason_drop(sk, copy_skb, drop_reason);
goto drop_n_restore;
}
@@ -2584,7 +2649,7 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
skb->dev = dev;
skb->priority = READ_ONCE(po->sk.sk_priority);
skb->mark = READ_ONCE(po->sk.sk_mark);
- skb->tstamp = sockc->transmit_time;
+ skb_set_delivery_type_by_clockid(skb, sockc->transmit_time, po->sk.sk_clockid);
skb_setup_tx_timestamp(skb, sockc->tsflags);
skb_zcopy_set_nouarg(skb, ph.raw);
@@ -3062,7 +3127,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
skb->dev = dev;
skb->priority = READ_ONCE(sk->sk_priority);
skb->mark = sockc.mark;
- skb->tstamp = sockc.transmit_time;
+ skb_set_delivery_type_by_clockid(skb, sockc.transmit_time, sk->sk_clockid);
if (unlikely(extra_len == 4))
skb->no_fcs = 1;
@@ -3482,7 +3547,8 @@ static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
/* Original length was stored in sockaddr_ll fields */
origlen = PACKET_SKB_CB(skb)->sa.origlen;
sll->sll_family = AF_PACKET;
- sll->sll_protocol = skb->protocol;
+ sll->sll_protocol = (sock->type == SOCK_DGRAM) ?
+ vlan_get_protocol_dgram(skb) : skb->protocol;
}
sock_recv_cmsgs(msg, sk, skb);
@@ -3539,6 +3605,21 @@ static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
aux.tp_vlan_tci = skb_vlan_tag_get(skb);
aux.tp_vlan_tpid = ntohs(skb->vlan_proto);
aux.tp_status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
+ } else if (unlikely(sock->type == SOCK_DGRAM && eth_type_vlan(skb->protocol))) {
+ struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
+ struct net_device *dev;
+
+ rcu_read_lock();
+ dev = dev_get_by_index_rcu(sock_net(sk), sll->sll_ifindex);
+ if (dev) {
+ aux.tp_vlan_tci = vlan_get_tci(skb, dev);
+ aux.tp_vlan_tpid = ntohs(skb->protocol);
+ aux.tp_status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
+ } else {
+ aux.tp_vlan_tci = 0;
+ aux.tp_vlan_tpid = 0;
+ }
+ rcu_read_unlock();
} else {
aux.tp_vlan_tci = 0;
aux.tp_vlan_tpid = 0;
diff --git a/net/psample/psample.c b/net/psample/psample.c
index a5d9b8446f77..a0ddae8a65f9 100644
--- a/net/psample/psample.c
+++ b/net/psample/psample.c
@@ -360,8 +360,9 @@ static int psample_tunnel_meta_len(struct ip_tunnel_info *tun_info)
}
#endif
-void psample_sample_packet(struct psample_group *group, struct sk_buff *skb,
- u32 sample_rate, const struct psample_metadata *md)
+void psample_sample_packet(struct psample_group *group,
+ const struct sk_buff *skb, u32 sample_rate,
+ const struct psample_metadata *md)
{
ktime_t tstamp = ktime_get_real();
int out_ifindex = md->out_ifindex;
@@ -376,6 +377,10 @@ void psample_sample_packet(struct psample_group *group, struct sk_buff *skb,
void *data;
int ret;
+ if (!genl_has_listeners(&psample_nl_family, group->net,
+ PSAMPLE_NL_MCGRP_SAMPLE))
+ return;
+
meta_len = (in_ifindex ? nla_total_size(sizeof(u16)) : 0) +
(out_ifindex ? nla_total_size(sizeof(u16)) : 0) +
(md->out_tc_valid ? nla_total_size(sizeof(u16)) : 0) +
@@ -386,7 +391,9 @@ void psample_sample_packet(struct psample_group *group, struct sk_buff *skb,
nla_total_size(sizeof(u32)) + /* group_num */
nla_total_size(sizeof(u32)) + /* seq */
nla_total_size_64bit(sizeof(u64)) + /* timestamp */
- nla_total_size(sizeof(u16)); /* protocol */
+ nla_total_size(sizeof(u16)) + /* protocol */
+ (md->user_cookie_len ?
+ nla_total_size(md->user_cookie_len) : 0); /* user cookie */
#ifdef CONFIG_INET
tun_info = skb_tunnel_info(skb);
@@ -486,6 +493,14 @@ void psample_sample_packet(struct psample_group *group, struct sk_buff *skb,
}
#endif
+ if (md->user_cookie && md->user_cookie_len &&
+ nla_put(nl_skb, PSAMPLE_ATTR_USER_COOKIE, md->user_cookie_len,
+ md->user_cookie))
+ goto error;
+
+ if (md->rate_as_probability)
+ nla_put_flag(nl_skb, PSAMPLE_ATTR_SAMPLE_PROBABILITY);
+
genlmsg_end(nl_skb, data);
genlmsg_multicast_netns(&psample_nl_family, group->net, nl_skb, 0,
PSAMPLE_NL_MCGRP_SAMPLE, GFP_ATOMIC);
diff --git a/net/qrtr/ns.c b/net/qrtr/ns.c
index 654a3cc0d347..3de9350cbf30 100644
--- a/net/qrtr/ns.c
+++ b/net/qrtr/ns.c
@@ -132,8 +132,8 @@ static int service_announce_new(struct sockaddr_qrtr *dest,
return kernel_sendmsg(qrtr_ns.sock, &msg, &iv, 1, sizeof(pkt));
}
-static int service_announce_del(struct sockaddr_qrtr *dest,
- struct qrtr_server *srv)
+static void service_announce_del(struct sockaddr_qrtr *dest,
+ struct qrtr_server *srv)
{
struct qrtr_ctrl_pkt pkt;
struct msghdr msg = { };
@@ -157,10 +157,10 @@ static int service_announce_del(struct sockaddr_qrtr *dest,
msg.msg_namelen = sizeof(*dest);
ret = kernel_sendmsg(qrtr_ns.sock, &msg, &iv, 1, sizeof(pkt));
- if (ret < 0)
+ if (ret < 0 && ret != -ENODEV)
pr_err("failed to announce del service\n");
- return ret;
+ return;
}
static void lookup_notify(struct sockaddr_qrtr *to, struct qrtr_server *srv,
@@ -188,7 +188,7 @@ static void lookup_notify(struct sockaddr_qrtr *to, struct qrtr_server *srv,
msg.msg_namelen = sizeof(*to);
ret = kernel_sendmsg(qrtr_ns.sock, &msg, &iv, 1, sizeof(pkt));
- if (ret < 0)
+ if (ret < 0 && ret != -ENODEV)
pr_err("failed to send lookup notification\n");
}
@@ -207,6 +207,9 @@ static int announce_servers(struct sockaddr_qrtr *sq)
xa_for_each(&node->servers, index, srv) {
ret = service_announce_new(sq, srv);
if (ret < 0) {
+ if (ret == -ENODEV)
+ continue;
+
pr_err("failed to announce new service\n");
return ret;
}
@@ -369,7 +372,7 @@ static int ctrl_cmd_bye(struct sockaddr_qrtr *from)
msg.msg_namelen = sizeof(sq);
ret = kernel_sendmsg(qrtr_ns.sock, &msg, &iv, 1, sizeof(pkt));
- if (ret < 0) {
+ if (ret < 0 && ret != -ENODEV) {
pr_err("failed to send bye cmd\n");
return ret;
}
@@ -443,7 +446,7 @@ static int ctrl_cmd_del_client(struct sockaddr_qrtr *from,
msg.msg_namelen = sizeof(sq);
ret = kernel_sendmsg(qrtr_ns.sock, &msg, &iv, 1, sizeof(pkt));
- if (ret < 0) {
+ if (ret < 0 && ret != -ENODEV) {
pr_err("failed to send del client cmd\n");
return ret;
}
diff --git a/net/rds/tcp.c b/net/rds/tcp.c
index d8111ac83bb6..3dc6956f66f8 100644
--- a/net/rds/tcp.c
+++ b/net/rds/tcp.c
@@ -719,9 +719,7 @@ static int __init rds_tcp_init(void)
{
int ret;
- rds_tcp_conn_slab = kmem_cache_create("rds_tcp_connection",
- sizeof(struct rds_tcp_connection),
- 0, 0, NULL);
+ rds_tcp_conn_slab = KMEM_CACHE(rds_tcp_connection, 0);
if (!rds_tcp_conn_slab) {
ret = -ENOMEM;
goto out;
diff --git a/net/rds/tcp_recv.c b/net/rds/tcp_recv.c
index c00f04a1a534..7997a19d1da3 100644
--- a/net/rds/tcp_recv.c
+++ b/net/rds/tcp_recv.c
@@ -337,9 +337,7 @@ out:
int rds_tcp_recv_init(void)
{
- rds_tcp_incoming_slab = kmem_cache_create("rds_tcp_incoming",
- sizeof(struct rds_tcp_incoming),
- 0, 0, NULL);
+ rds_tcp_incoming_slab = KMEM_CACHE(rds_tcp_incoming, 0);
if (!rds_tcp_incoming_slab)
return -ENOMEM;
return 0;
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index c3feb4f49d09..7a5367628c05 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -546,10 +546,10 @@ bool rfkill_set_hw_state_reason(struct rfkill *rfkill,
BUG_ON(!rfkill);
- if (WARN(reason &
- ~(RFKILL_HARD_BLOCK_SIGNAL | RFKILL_HARD_BLOCK_NOT_OWNER),
- "hw_state reason not supported: 0x%lx", reason))
- return blocked;
+ if (WARN(reason & ~(RFKILL_HARD_BLOCK_SIGNAL |
+ RFKILL_HARD_BLOCK_NOT_OWNER),
+ "hw_state reason not supported: 0x%lx", reason))
+ return rfkill_blocked(rfkill);
spin_lock_irqsave(&rfkill->lock, flags);
prev = !!(rfkill->hard_block_reasons & reason);
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 9ee622fb1160..2714c4ed928e 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -62,7 +62,7 @@ static void tcf_set_action_cookie(struct tc_cookie __rcu **old_cookie,
{
struct tc_cookie *old;
- old = xchg((__force struct tc_cookie **)old_cookie, new_cookie);
+ old = unrcu_pointer(xchg(old_cookie, RCU_INITIALIZER(new_cookie)));
if (old)
call_rcu(&old->rcu, tcf_free_cookie_rcu);
}
@@ -830,7 +830,6 @@ int tcf_idr_check_alloc(struct tc_action_net *tn, u32 *index,
u32 max;
if (*index) {
-again:
rcu_read_lock();
p = idr_find(&idrinfo->action_idr, *index);
@@ -839,7 +838,7 @@ again:
* index but did not assign the pointer yet.
*/
rcu_read_unlock();
- goto again;
+ return -EAGAIN;
}
if (!p) {
diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c
index 0e3cf11ae5fc..396b576390d0 100644
--- a/net/sched/act_bpf.c
+++ b/net/sched/act_bpf.c
@@ -54,8 +54,8 @@ TC_INDIRECT_SCOPE int tcf_bpf_act(struct sk_buff *skb,
bpf_compute_data_pointers(skb);
filter_res = bpf_prog_run(filter, skb);
}
- if (unlikely(!skb->tstamp && skb->mono_delivery_time))
- skb->mono_delivery_time = 0;
+ if (unlikely(!skb->tstamp && skb->tstamp_type))
+ skb->tstamp_type = SKB_CLOCK_REALTIME;
if (skb_sk_is_prefetched(skb) && filter_res != TC_ACT_OK)
skb_orphan(skb);
diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
index baac083fd8f1..113b907da0f7 100644
--- a/net/sched/act_ct.c
+++ b/net/sched/act_ct.c
@@ -41,21 +41,26 @@ static struct workqueue_struct *act_ct_wq;
static struct rhashtable zones_ht;
static DEFINE_MUTEX(zones_mutex);
+struct zones_ht_key {
+ struct net *net;
+ u16 zone;
+};
+
struct tcf_ct_flow_table {
struct rhash_head node; /* In zones tables */
struct rcu_work rwork;
struct nf_flowtable nf_ft;
refcount_t ref;
- u16 zone;
+ struct zones_ht_key key;
bool dying;
};
static const struct rhashtable_params zones_params = {
.head_offset = offsetof(struct tcf_ct_flow_table, node),
- .key_offset = offsetof(struct tcf_ct_flow_table, zone),
- .key_len = sizeof_field(struct tcf_ct_flow_table, zone),
+ .key_offset = offsetof(struct tcf_ct_flow_table, key),
+ .key_len = sizeof_field(struct tcf_ct_flow_table, key),
.automatic_shrinking = true,
};
@@ -316,11 +321,12 @@ static struct nf_flowtable_type flowtable_ct = {
static int tcf_ct_flow_table_get(struct net *net, struct tcf_ct_params *params)
{
+ struct zones_ht_key key = { .net = net, .zone = params->zone };
struct tcf_ct_flow_table *ct_ft;
int err = -ENOMEM;
mutex_lock(&zones_mutex);
- ct_ft = rhashtable_lookup_fast(&zones_ht, &params->zone, zones_params);
+ ct_ft = rhashtable_lookup_fast(&zones_ht, &key, zones_params);
if (ct_ft && refcount_inc_not_zero(&ct_ft->ref))
goto out_unlock;
@@ -329,7 +335,7 @@ static int tcf_ct_flow_table_get(struct net *net, struct tcf_ct_params *params)
goto err_alloc;
refcount_set(&ct_ft->ref, 1);
- ct_ft->zone = params->zone;
+ ct_ft->key = key;
err = rhashtable_insert_fast(&zones_ht, &ct_ft->node, zones_params);
if (err)
goto err_insert;
@@ -938,6 +944,8 @@ static int tcf_ct_act_nat(struct sk_buff *skb,
action |= BIT(NF_NAT_MANIP_DST);
err = nf_ct_nat(skb, ct, ctinfo, &action, range, commit);
+ if (err != NF_ACCEPT)
+ return err & NF_VERDICT_MASK;
if (action & BIT(NF_NAT_MANIP_SRC))
tc_skb_cb(skb)->post_ct_snat = 1;
@@ -1029,7 +1037,7 @@ TC_INDIRECT_SCOPE int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a,
state.pf = family;
err = nf_conntrack_in(skb, &state);
if (err != NF_ACCEPT)
- goto out_push;
+ goto nf_error;
}
do_nat:
@@ -1041,7 +1049,7 @@ do_nat:
err = tcf_ct_act_nat(skb, ct, ctinfo, p->ct_action, &p->range, commit);
if (err != NF_ACCEPT)
- goto drop;
+ goto nf_error;
if (!nf_ct_is_confirmed(ct) && commit && p->helper && !nfct_help(ct)) {
err = __nf_ct_try_assign_helper(ct, p->tmpl, GFP_ATOMIC);
@@ -1055,8 +1063,9 @@ do_nat:
}
if (nf_ct_is_confirmed(ct) ? ((!cached && !skip_add) || add_helper) : commit) {
- if (nf_ct_helper(skb, ct, ctinfo, family) != NF_ACCEPT)
- goto drop;
+ err = nf_ct_helper(skb, ct, ctinfo, family);
+ if (err != NF_ACCEPT)
+ goto nf_error;
}
if (commit) {
@@ -1069,8 +1078,17 @@ do_nat:
/* This will take care of sending queued events
* even if the connection is already confirmed.
*/
- if (nf_conntrack_confirm(skb) != NF_ACCEPT)
- goto drop;
+ err = nf_conntrack_confirm(skb);
+ if (err != NF_ACCEPT)
+ goto nf_error;
+
+ /* The ct may be dropped if a clash has been resolved,
+ * so it's necessary to retrieve it from skb again to
+ * prevent UAF.
+ */
+ ct = nf_ct_get(skb, &ctinfo);
+ if (!ct)
+ skip_add = true;
}
if (!skip_add)
@@ -1094,6 +1112,21 @@ out_frag:
drop:
tcf_action_inc_drop_qstats(&c->common);
return TC_ACT_SHOT;
+
+nf_error:
+ /* some verdicts store extra data in upper bits, such
+ * as errno or queue number.
+ */
+ switch (err & NF_VERDICT_MASK) {
+ case NF_DROP:
+ goto drop;
+ case NF_STOLEN:
+ tcf_action_inc_drop_qstats(&c->common);
+ return TC_ACT_CONSUMED;
+ default:
+ DEBUG_NET_WARN_ON_ONCE(1);
+ goto drop;
+ }
}
static const struct nla_policy ct_policy[TCA_CT_MAX + 1] = {
diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c
index a69b53d54039..2ceb4d141b71 100644
--- a/net/sched/act_sample.c
+++ b/net/sched/act_sample.c
@@ -167,7 +167,9 @@ TC_INDIRECT_SCOPE int tcf_sample_act(struct sk_buff *skb,
{
struct tcf_sample *s = to_sample(a);
struct psample_group *psample_group;
+ u8 cookie_data[TC_COOKIE_MAX_SIZE];
struct psample_metadata md = {};
+ struct tc_cookie *user_cookie;
int retval;
tcf_lastuse_update(&s->tcf_tm);
@@ -189,6 +191,16 @@ TC_INDIRECT_SCOPE int tcf_sample_act(struct sk_buff *skb,
if (skb_at_tc_ingress(skb) && tcf_sample_dev_ok_push(skb->dev))
skb_push(skb, skb->mac_len);
+ rcu_read_lock();
+ user_cookie = rcu_dereference(a->user_cookie);
+ if (user_cookie) {
+ memcpy(cookie_data, user_cookie->data,
+ user_cookie->len);
+ md.user_cookie = cookie_data;
+ md.user_cookie_len = user_cookie->len;
+ }
+ rcu_read_unlock();
+
md.trunc_size = s->truncate ? s->trunc_size : skb->len;
psample_sample_packet(psample_group, skb, s->rate, &md);
diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c
index cd0accaf844a..dc0229693461 100644
--- a/net/sched/act_skbmod.c
+++ b/net/sched/act_skbmod.c
@@ -246,7 +246,7 @@ static int tcf_skbmod_dump(struct sk_buff *skb, struct tc_action *a,
memset(&opt, 0, sizeof(opt));
opt.index = d->tcf_index;
- opt.refcnt = refcount_read(&d->tcf_refcnt) - ref,
+ opt.refcnt = refcount_read(&d->tcf_refcnt) - ref;
opt.bindcnt = atomic_read(&d->tcf_bindcnt) - bind;
spin_lock_bh(&d->tcf_lock);
opt.action = d->tcf_action;
diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
index 5e83e890f6a4..1941ebec23ff 100644
--- a/net/sched/cls_bpf.c
+++ b/net/sched/cls_bpf.c
@@ -104,8 +104,8 @@ TC_INDIRECT_SCOPE int cls_bpf_classify(struct sk_buff *skb,
bpf_compute_data_pointers(skb);
filter_res = bpf_prog_run(prog->filter, skb);
}
- if (unlikely(!skb->tstamp && skb->mono_delivery_time))
- skb->mono_delivery_time = 0;
+ if (unlikely(!skb->tstamp && skb->tstamp_type))
+ skb->tstamp_type = SKB_CLOCK_REALTIME;
if (prog->exts_integrated) {
res->class = 0;
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index fd9a6f20b60b..e280c27cb9f9 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -41,6 +41,16 @@
#define TCA_FLOWER_KEY_CT_FLAGS_MASK \
(TCA_FLOWER_KEY_CT_FLAGS_MAX - 1)
+#define TCA_FLOWER_KEY_FLAGS_POLICY_MASK \
+ (TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT | \
+ TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST)
+
+#define TCA_FLOWER_KEY_ENC_FLAGS_POLICY_MASK \
+ (TCA_FLOWER_KEY_FLAGS_TUNNEL_CSUM | \
+ TCA_FLOWER_KEY_FLAGS_TUNNEL_DONT_FRAGMENT | \
+ TCA_FLOWER_KEY_FLAGS_TUNNEL_OAM | \
+ TCA_FLOWER_KEY_FLAGS_TUNNEL_CRIT_OPT)
+
struct fl_flow_key {
struct flow_dissector_key_meta meta;
struct flow_dissector_key_control control;
@@ -669,8 +679,10 @@ static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
[TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK] = { .type = NLA_U16 },
[TCA_FLOWER_KEY_ENC_UDP_DST_PORT] = { .type = NLA_U16 },
[TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK] = { .type = NLA_U16 },
- [TCA_FLOWER_KEY_FLAGS] = { .type = NLA_U32 },
- [TCA_FLOWER_KEY_FLAGS_MASK] = { .type = NLA_U32 },
+ [TCA_FLOWER_KEY_FLAGS] = NLA_POLICY_MASK(NLA_BE32,
+ TCA_FLOWER_KEY_FLAGS_POLICY_MASK),
+ [TCA_FLOWER_KEY_FLAGS_MASK] = NLA_POLICY_MASK(NLA_BE32,
+ TCA_FLOWER_KEY_FLAGS_POLICY_MASK),
[TCA_FLOWER_KEY_ICMPV4_TYPE] = { .type = NLA_U8 },
[TCA_FLOWER_KEY_ICMPV4_TYPE_MASK] = { .type = NLA_U8 },
[TCA_FLOWER_KEY_ICMPV4_CODE] = { .type = NLA_U8 },
@@ -732,6 +744,10 @@ static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
[TCA_FLOWER_KEY_SPI_MASK] = { .type = NLA_U32 },
[TCA_FLOWER_L2_MISS] = NLA_POLICY_MAX(NLA_U8, 1),
[TCA_FLOWER_KEY_CFM] = { .type = NLA_NESTED },
+ [TCA_FLOWER_KEY_ENC_FLAGS] = NLA_POLICY_MASK(NLA_BE32,
+ TCA_FLOWER_KEY_ENC_FLAGS_POLICY_MASK),
+ [TCA_FLOWER_KEY_ENC_FLAGS_MASK] = NLA_POLICY_MASK(NLA_BE32,
+ TCA_FLOWER_KEY_ENC_FLAGS_POLICY_MASK),
};
static const struct nla_policy
@@ -1155,19 +1171,29 @@ static void fl_set_key_flag(u32 flower_key, u32 flower_mask,
}
}
-static int fl_set_key_flags(struct nlattr **tb, u32 *flags_key,
- u32 *flags_mask, struct netlink_ext_ack *extack)
+static int fl_set_key_flags(struct nlattr *tca_opts, struct nlattr **tb,
+ bool encap, u32 *flags_key, u32 *flags_mask,
+ struct netlink_ext_ack *extack)
{
+ int fl_key, fl_mask;
u32 key, mask;
+ if (encap) {
+ fl_key = TCA_FLOWER_KEY_ENC_FLAGS;
+ fl_mask = TCA_FLOWER_KEY_ENC_FLAGS_MASK;
+ } else {
+ fl_key = TCA_FLOWER_KEY_FLAGS;
+ fl_mask = TCA_FLOWER_KEY_FLAGS_MASK;
+ }
+
/* mask is mandatory for flags */
- if (!tb[TCA_FLOWER_KEY_FLAGS_MASK]) {
+ if (NL_REQ_ATTR_CHECK(extack, tca_opts, tb, fl_mask)) {
NL_SET_ERR_MSG(extack, "Missing flags mask");
return -EINVAL;
}
- key = be32_to_cpu(nla_get_be32(tb[TCA_FLOWER_KEY_FLAGS]));
- mask = be32_to_cpu(nla_get_be32(tb[TCA_FLOWER_KEY_FLAGS_MASK]));
+ key = be32_to_cpu(nla_get_be32(tb[fl_key]));
+ mask = be32_to_cpu(nla_get_be32(tb[fl_mask]));
*flags_key = 0;
*flags_mask = 0;
@@ -1178,6 +1204,21 @@ static int fl_set_key_flags(struct nlattr **tb, u32 *flags_key,
TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST,
FLOW_DIS_FIRST_FRAG);
+ fl_set_key_flag(key, mask, flags_key, flags_mask,
+ TCA_FLOWER_KEY_FLAGS_TUNNEL_CSUM,
+ FLOW_DIS_F_TUNNEL_CSUM);
+
+ fl_set_key_flag(key, mask, flags_key, flags_mask,
+ TCA_FLOWER_KEY_FLAGS_TUNNEL_DONT_FRAGMENT,
+ FLOW_DIS_F_TUNNEL_DONT_FRAGMENT);
+
+ fl_set_key_flag(key, mask, flags_key, flags_mask,
+ TCA_FLOWER_KEY_FLAGS_TUNNEL_OAM, FLOW_DIS_F_TUNNEL_OAM);
+
+ fl_set_key_flag(key, mask, flags_key, flags_mask,
+ TCA_FLOWER_KEY_FLAGS_TUNNEL_CRIT_OPT,
+ FLOW_DIS_F_TUNNEL_CRIT_OPT);
+
return 0;
}
@@ -1825,9 +1866,9 @@ static int fl_set_key_cfm(struct nlattr **tb,
return 0;
}
-static int fl_set_key(struct net *net, struct nlattr **tb,
- struct fl_flow_key *key, struct fl_flow_key *mask,
- struct netlink_ext_ack *extack)
+static int fl_set_key(struct net *net, struct nlattr *tca_opts,
+ struct nlattr **tb, struct fl_flow_key *key,
+ struct fl_flow_key *mask, struct netlink_ext_ack *extack)
{
__be16 ethertype;
int ret = 0;
@@ -2059,9 +2100,18 @@ static int fl_set_key(struct net *net, struct nlattr **tb,
if (ret)
return ret;
- if (tb[TCA_FLOWER_KEY_FLAGS])
- ret = fl_set_key_flags(tb, &key->control.flags,
+ if (tb[TCA_FLOWER_KEY_FLAGS]) {
+ ret = fl_set_key_flags(tca_opts, tb, false,
+ &key->control.flags,
&mask->control.flags, extack);
+ if (ret)
+ return ret;
+ }
+
+ if (tb[TCA_FLOWER_KEY_ENC_FLAGS])
+ ret = fl_set_key_flags(tca_opts, tb, true,
+ &key->enc_control.flags,
+ &mask->enc_control.flags, extack);
return ret;
}
@@ -2152,7 +2202,8 @@ static void fl_init_dissector(struct flow_dissector *dissector,
FL_KEY_SET_IF_MASKED(mask, keys, cnt,
FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, enc_ipv6);
if (FL_KEY_IS_MASKED(mask, enc_ipv4) ||
- FL_KEY_IS_MASKED(mask, enc_ipv6))
+ FL_KEY_IS_MASKED(mask, enc_ipv6) ||
+ FL_KEY_IS_MASKED(mask, enc_control))
FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_ENC_CONTROL,
enc_control);
FL_KEY_SET_IF_MASKED(mask, keys, cnt,
@@ -2310,6 +2361,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
{
struct cls_fl_head *head = fl_head_dereference(tp);
bool rtnl_held = !(flags & TCA_ACT_FLAGS_NO_RTNL);
+ struct nlattr *tca_opts = tca[TCA_OPTIONS];
struct cls_fl_filter *fold = *arg;
bool bound_to_filter = false;
struct cls_fl_filter *fnew;
@@ -2318,7 +2370,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
bool in_ht;
int err;
- if (!tca[TCA_OPTIONS]) {
+ if (!tca_opts) {
err = -EINVAL;
goto errout_fold;
}
@@ -2336,7 +2388,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
}
err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX,
- tca[TCA_OPTIONS], fl_policy, NULL);
+ tca_opts, fl_policy, NULL);
if (err < 0)
goto errout_tb;
@@ -2412,7 +2464,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
bound_to_filter = true;
}
- err = fl_set_key(net, tb, &fnew->key, &mask->key, extack);
+ err = fl_set_key(net, tca_opts, tb, &fnew->key, &mask->key, extack);
if (err)
goto unbind_filter;
@@ -2752,18 +2804,19 @@ static void *fl_tmplt_create(struct net *net, struct tcf_chain *chain,
struct nlattr **tca,
struct netlink_ext_ack *extack)
{
+ struct nlattr *tca_opts = tca[TCA_OPTIONS];
struct fl_flow_tmplt *tmplt;
struct nlattr **tb;
int err;
- if (!tca[TCA_OPTIONS])
+ if (!tca_opts)
return ERR_PTR(-EINVAL);
tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
if (!tb)
return ERR_PTR(-ENOBUFS);
err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX,
- tca[TCA_OPTIONS], fl_policy, NULL);
+ tca_opts, fl_policy, NULL);
if (err)
goto errout_tb;
@@ -2773,7 +2826,8 @@ static void *fl_tmplt_create(struct net *net, struct tcf_chain *chain,
goto errout_tb;
}
tmplt->chain = chain;
- err = fl_set_key(net, tb, &tmplt->dummy_key, &tmplt->mask, extack);
+ err = fl_set_key(net, tca_opts, tb, &tmplt->dummy_key,
+ &tmplt->mask, extack);
if (err)
goto errout_tmplt;
@@ -3049,12 +3103,22 @@ static void fl_get_key_flag(u32 dissector_key, u32 dissector_mask,
}
}
-static int fl_dump_key_flags(struct sk_buff *skb, u32 flags_key, u32 flags_mask)
+static int fl_dump_key_flags(struct sk_buff *skb, bool encap,
+ u32 flags_key, u32 flags_mask)
{
- u32 key, mask;
+ int fl_key, fl_mask;
__be32 _key, _mask;
+ u32 key, mask;
int err;
+ if (encap) {
+ fl_key = TCA_FLOWER_KEY_ENC_FLAGS;
+ fl_mask = TCA_FLOWER_KEY_ENC_FLAGS_MASK;
+ } else {
+ fl_key = TCA_FLOWER_KEY_FLAGS;
+ fl_mask = TCA_FLOWER_KEY_FLAGS_MASK;
+ }
+
if (!memchr_inv(&flags_mask, 0, sizeof(flags_mask)))
return 0;
@@ -3067,14 +3131,29 @@ static int fl_dump_key_flags(struct sk_buff *skb, u32 flags_key, u32 flags_mask)
TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST,
FLOW_DIS_FIRST_FRAG);
+ fl_get_key_flag(flags_key, flags_mask, &key, &mask,
+ TCA_FLOWER_KEY_FLAGS_TUNNEL_CSUM,
+ FLOW_DIS_F_TUNNEL_CSUM);
+
+ fl_get_key_flag(flags_key, flags_mask, &key, &mask,
+ TCA_FLOWER_KEY_FLAGS_TUNNEL_DONT_FRAGMENT,
+ FLOW_DIS_F_TUNNEL_DONT_FRAGMENT);
+
+ fl_get_key_flag(flags_key, flags_mask, &key, &mask,
+ TCA_FLOWER_KEY_FLAGS_TUNNEL_OAM, FLOW_DIS_F_TUNNEL_OAM);
+
+ fl_get_key_flag(flags_key, flags_mask, &key, &mask,
+ TCA_FLOWER_KEY_FLAGS_TUNNEL_CRIT_OPT,
+ FLOW_DIS_F_TUNNEL_CRIT_OPT);
+
_key = cpu_to_be32(key);
_mask = cpu_to_be32(mask);
- err = nla_put(skb, TCA_FLOWER_KEY_FLAGS, 4, &_key);
+ err = nla_put(skb, fl_key, 4, &_key);
if (err)
return err;
- return nla_put(skb, TCA_FLOWER_KEY_FLAGS_MASK, 4, &_mask);
+ return nla_put(skb, fl_mask, 4, &_mask);
}
static int fl_dump_key_geneve_opt(struct sk_buff *skb,
@@ -3581,7 +3660,8 @@ static int fl_dump_key(struct sk_buff *skb, struct net *net,
if (fl_dump_key_ct(skb, &key->ct, &mask->ct))
goto nla_put_failure;
- if (fl_dump_key_flags(skb, key->control.flags, mask->control.flags))
+ if (fl_dump_key_flags(skb, false, key->control.flags,
+ mask->control.flags))
goto nla_put_failure;
if (fl_dump_key_val(skb, &key->hash.hash, TCA_FLOWER_KEY_HASH,
@@ -3592,6 +3672,10 @@ static int fl_dump_key(struct sk_buff *skb, struct net *net,
if (fl_dump_key_cfm(skb, &key->cfm, &mask->cfm))
goto nla_put_failure;
+ if (fl_dump_key_flags(skb, true, key->enc_control.flags,
+ mask->enc_control.flags))
+ goto nla_put_failure;
+
return 0;
nla_put_failure:
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 2a637a17061b..2af24547a82c 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -633,6 +633,7 @@ EXPORT_SYMBOL_GPL(netif_carrier_event);
static int noop_enqueue(struct sk_buff *skb, struct Qdisc *qdisc,
struct sk_buff **to_free)
{
+ dev_core_stats_tx_dropped_inc(skb->dev);
__qdisc_drop(skb, to_free);
return NET_XMIT_CN;
}
@@ -676,6 +677,7 @@ struct Qdisc noop_qdisc = {
.qlen = 0,
.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.skb_bad_txq.lock),
},
+ .owner = -1,
};
EXPORT_SYMBOL(noop_qdisc);
diff --git a/net/sched/sch_ingress.c b/net/sched/sch_ingress.c
index c2ef9dcf91d2..cc6051d4f2ef 100644
--- a/net/sched/sch_ingress.c
+++ b/net/sched/sch_ingress.c
@@ -91,7 +91,7 @@ static int ingress_init(struct Qdisc *sch, struct nlattr *opt,
entry = tcx_entry_fetch_or_create(dev, true, &created);
if (!entry)
return -ENOMEM;
- tcx_miniq_set_active(entry, true);
+ tcx_miniq_inc(entry);
mini_qdisc_pair_init(&q->miniqp, sch, &tcx_entry(entry)->miniq);
if (created)
tcx_entry_update(dev, entry, true);
@@ -121,7 +121,7 @@ static void ingress_destroy(struct Qdisc *sch)
tcf_block_put_ext(q->block, sch, &q->block_info);
if (entry) {
- tcx_miniq_set_active(entry, false);
+ tcx_miniq_dec(entry);
if (!tcx_entry_is_active(entry)) {
tcx_entry_update(dev, NULL, true);
tcx_entry_free(entry);
@@ -257,7 +257,7 @@ static int clsact_init(struct Qdisc *sch, struct nlattr *opt,
entry = tcx_entry_fetch_or_create(dev, true, &created);
if (!entry)
return -ENOMEM;
- tcx_miniq_set_active(entry, true);
+ tcx_miniq_inc(entry);
mini_qdisc_pair_init(&q->miniqp_ingress, sch, &tcx_entry(entry)->miniq);
if (created)
tcx_entry_update(dev, entry, true);
@@ -276,7 +276,7 @@ static int clsact_init(struct Qdisc *sch, struct nlattr *opt,
entry = tcx_entry_fetch_or_create(dev, false, &created);
if (!entry)
return -ENOMEM;
- tcx_miniq_set_active(entry, true);
+ tcx_miniq_inc(entry);
mini_qdisc_pair_init(&q->miniqp_egress, sch, &tcx_entry(entry)->miniq);
if (created)
tcx_entry_update(dev, entry, false);
@@ -302,7 +302,7 @@ static void clsact_destroy(struct Qdisc *sch)
tcf_block_put_ext(q->egress_block, sch, &q->egress_block_info);
if (ingress_entry) {
- tcx_miniq_set_active(ingress_entry, false);
+ tcx_miniq_dec(ingress_entry);
if (!tcx_entry_is_active(ingress_entry)) {
tcx_entry_update(dev, NULL, true);
tcx_entry_free(ingress_entry);
@@ -310,7 +310,7 @@ static void clsact_destroy(struct Qdisc *sch)
}
if (egress_entry) {
- tcx_miniq_set_active(egress_entry, false);
+ tcx_miniq_dec(egress_entry);
if (!tcx_entry_is_active(egress_entry)) {
tcx_entry_update(dev, NULL, false);
tcx_entry_free(egress_entry);
diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
index 79e93a19d5fa..06e03f5cd7ce 100644
--- a/net/sched/sch_multiq.c
+++ b/net/sched/sch_multiq.c
@@ -185,7 +185,7 @@ static int multiq_tune(struct Qdisc *sch, struct nlattr *opt,
qopt->bands = qdisc_dev(sch)->real_num_tx_queues;
- removed = kmalloc(sizeof(*removed) * (q->max_bands - q->bands),
+ removed = kmalloc(sizeof(*removed) * (q->max_bands - qopt->bands),
GFP_KERNEL);
if (!removed)
return -ENOMEM;
diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
index 1ab17e8a7260..cc2df9f8c14a 100644
--- a/net/sched/sch_taprio.c
+++ b/net/sched/sch_taprio.c
@@ -1151,11 +1151,6 @@ static int parse_taprio_schedule(struct taprio_sched *q, struct nlattr **tb,
list_for_each_entry(entry, &new->entries, list)
cycle = ktime_add_ns(cycle, entry->interval);
- if (!cycle) {
- NL_SET_ERR_MSG(extack, "'cycle_time' can never be 0");
- return -EINVAL;
- }
-
if (cycle < 0 || cycle > INT_MAX) {
NL_SET_ERR_MSG(extack, "'cycle_time' is too big");
return -EINVAL;
@@ -1164,6 +1159,11 @@ static int parse_taprio_schedule(struct taprio_sched *q, struct nlattr **tb,
new->cycle_time = cycle;
}
+ if (new->cycle_time < new->num_entries * length_to_duration(q, ETH_ZLEN)) {
+ NL_SET_ERR_MSG(extack, "'cycle_time' is too small");
+ return -EINVAL;
+ }
+
taprio_calculate_gate_durations(q, new);
return 0;
@@ -1176,16 +1176,13 @@ static int taprio_parse_mqprio_opt(struct net_device *dev,
{
bool allow_overlapping_txqs = TXTIME_ASSIST_IS_ENABLED(taprio_flags);
- if (!qopt && !dev->num_tc) {
- NL_SET_ERR_MSG(extack, "'mqprio' configuration is necessary");
- return -EINVAL;
- }
-
- /* If num_tc is already set, it means that the user already
- * configured the mqprio part
- */
- if (dev->num_tc)
+ if (!qopt) {
+ if (!dev->num_tc) {
+ NL_SET_ERR_MSG(extack, "'mqprio' configuration is necessary");
+ return -EINVAL;
+ }
return 0;
+ }
/* taprio imposes that traffic classes map 1:n to tx queues */
if (qopt->num_tc > dev->num_tx_queues) {
@@ -1613,7 +1610,7 @@ static int taprio_parse_clockid(struct Qdisc *sch, struct nlattr **tb,
if (FULL_OFFLOAD_IS_ENABLED(q->flags)) {
const struct ethtool_ops *ops = dev->ethtool_ops;
- struct ethtool_ts_info info = {
+ struct kernel_ethtool_ts_info info = {
.cmd = ETHTOOL_GET_TS_INFO,
.phc_index = -1,
};
@@ -1848,6 +1845,9 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
}
q->flags = taprio_flags;
+ /* Needed for length_to_duration() during netlink attribute parsing */
+ taprio_set_picos_per_byte(dev, q);
+
err = taprio_parse_mqprio_opt(dev, mqprio, extack, q->flags);
if (err < 0)
return err;
@@ -1907,7 +1907,6 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
if (err < 0)
goto free_sched;
- taprio_set_picos_per_byte(dev, q);
taprio_update_queue_max_sdu(q, new_admin, stab);
if (FULL_OFFLOAD_IS_ENABLED(q->flags))
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index c009383369b2..32f76f1298da 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -4834,10 +4834,14 @@ int sctp_inet_connect(struct socket *sock, struct sockaddr *uaddr,
return sctp_connect(sock->sk, uaddr, addr_len, flags);
}
-/* FIXME: Write comments. */
+/* Only called when shutdown a listening SCTP socket. */
static int sctp_disconnect(struct sock *sk, int flags)
{
- return -EOPNOTSUPP; /* STUB */
+ if (!sctp_style(sk, TCP))
+ return -EOPNOTSUPP;
+
+ sk->sk_shutdown |= RCV_SHUTDOWN;
+ return 0;
}
/* 4.1.4 accept() - TCP Style Syntax
@@ -4866,7 +4870,8 @@ static struct sock *sctp_accept(struct sock *sk, struct proto_accept_arg *arg)
goto out;
}
- if (!sctp_sstate(sk, LISTENING)) {
+ if (!sctp_sstate(sk, LISTENING) ||
+ (sk->sk_shutdown & RCV_SHUTDOWN)) {
error = -EINVAL;
goto out;
}
@@ -9393,7 +9398,8 @@ static int sctp_wait_for_accept(struct sock *sk, long timeo)
}
err = -EINVAL;
- if (!sctp_sstate(sk, LISTENING))
+ if (!sctp_sstate(sk, LISTENING) ||
+ (sk->sk_shutdown & RCV_SHUTDOWN))
break;
err = 0;
diff --git a/net/smc/Makefile b/net/smc/Makefile
index 2c510d543058..60f1c87d5212 100644
--- a/net/smc/Makefile
+++ b/net/smc/Makefile
@@ -4,6 +4,6 @@ obj-$(CONFIG_SMC) += smc.o
obj-$(CONFIG_SMC_DIAG) += smc_diag.o
smc-y := af_smc.o smc_pnet.o smc_ib.o smc_clc.o smc_core.o smc_wr.o smc_llc.o
smc-y += smc_cdc.o smc_tx.o smc_rx.o smc_close.o smc_ism.o smc_netlink.o smc_stats.o
-smc-y += smc_tracepoint.o
+smc-y += smc_tracepoint.o smc_inet.o
smc-$(CONFIG_SYSCTL) += smc_sysctl.o
smc-$(CONFIG_SMC_LO) += smc_loopback.o
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index e50a286fd0fb..73a875573e7a 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -54,6 +54,7 @@
#include "smc_tracepoint.h"
#include "smc_sysctl.h"
#include "smc_loopback.h"
+#include "smc_inet.h"
static DEFINE_MUTEX(smc_server_lgr_pending); /* serialize link group
* creation on server
@@ -170,15 +171,15 @@ static bool smc_hs_congested(const struct sock *sk)
return false;
}
-static struct smc_hashinfo smc_v4_hashinfo = {
+struct smc_hashinfo smc_v4_hashinfo = {
.lock = __RW_LOCK_UNLOCKED(smc_v4_hashinfo.lock),
};
-static struct smc_hashinfo smc_v6_hashinfo = {
+struct smc_hashinfo smc_v6_hashinfo = {
.lock = __RW_LOCK_UNLOCKED(smc_v6_hashinfo.lock),
};
-static int smc_hash_sk(struct sock *sk)
+int smc_hash_sk(struct sock *sk)
{
struct smc_hashinfo *h = sk->sk_prot->h.smc_hash;
struct hlist_head *head;
@@ -193,7 +194,7 @@ static int smc_hash_sk(struct sock *sk)
return 0;
}
-static void smc_unhash_sk(struct sock *sk)
+void smc_unhash_sk(struct sock *sk)
{
struct smc_hashinfo *h = sk->sk_prot->h.smc_hash;
@@ -207,7 +208,7 @@ static void smc_unhash_sk(struct sock *sk)
* work which we didn't do because of user hold the sock_lock in the
* BH context
*/
-static void smc_release_cb(struct sock *sk)
+void smc_release_cb(struct sock *sk)
{
struct smc_sock *smc = smc_sk(sk);
@@ -307,7 +308,7 @@ static int __smc_release(struct smc_sock *smc)
return rc;
}
-static int smc_release(struct socket *sock)
+int smc_release(struct socket *sock)
{
struct sock *sk = sock->sk;
struct smc_sock *smc;
@@ -361,25 +362,15 @@ static void smc_destruct(struct sock *sk)
return;
}
-static struct sock *smc_sock_alloc(struct net *net, struct socket *sock,
- int protocol)
+void smc_sk_init(struct net *net, struct sock *sk, int protocol)
{
- struct smc_sock *smc;
- struct proto *prot;
- struct sock *sk;
-
- prot = (protocol == SMCPROTO_SMC6) ? &smc_proto6 : &smc_proto;
- sk = sk_alloc(net, PF_SMC, GFP_KERNEL, prot, 0);
- if (!sk)
- return NULL;
+ struct smc_sock *smc = smc_sk(sk);
- sock_init_data(sock, sk); /* sets sk_refcnt to 1 */
sk->sk_state = SMC_INIT;
sk->sk_destruct = smc_destruct;
sk->sk_protocol = protocol;
WRITE_ONCE(sk->sk_sndbuf, 2 * READ_ONCE(net->smc.sysctl_wmem));
WRITE_ONCE(sk->sk_rcvbuf, 2 * READ_ONCE(net->smc.sysctl_rmem));
- smc = smc_sk(sk);
INIT_WORK(&smc->tcp_listen_work, smc_tcp_listen_work);
INIT_WORK(&smc->connect_work, smc_connect_work);
INIT_DELAYED_WORK(&smc->conn.tx_work, smc_tx_work);
@@ -389,12 +380,30 @@ static struct sock *smc_sock_alloc(struct net *net, struct socket *sock,
sk->sk_prot->hash(sk);
mutex_init(&smc->clcsock_release_lock);
smc_init_saved_callbacks(smc);
+ smc->limit_smc_hs = net->smc.limit_smc_hs;
+ smc->use_fallback = false; /* assume rdma capability first */
+ smc->fallback_rsn = 0;
+}
+
+static struct sock *smc_sock_alloc(struct net *net, struct socket *sock,
+ int protocol)
+{
+ struct proto *prot;
+ struct sock *sk;
+
+ prot = (protocol == SMCPROTO_SMC6) ? &smc_proto6 : &smc_proto;
+ sk = sk_alloc(net, PF_SMC, GFP_KERNEL, prot, 0);
+ if (!sk)
+ return NULL;
+
+ sock_init_data(sock, sk); /* sets sk_refcnt to 1 */
+ smc_sk_init(net, sk, protocol);
return sk;
}
-static int smc_bind(struct socket *sock, struct sockaddr *uaddr,
- int addr_len)
+int smc_bind(struct socket *sock, struct sockaddr *uaddr,
+ int addr_len)
{
struct sockaddr_in *addr = (struct sockaddr_in *)uaddr;
struct sock *sk = sock->sk;
@@ -459,29 +468,11 @@ out:
static void smc_adjust_sock_bufsizes(struct sock *nsk, struct sock *osk,
unsigned long mask)
{
- struct net *nnet = sock_net(nsk);
-
nsk->sk_userlocks = osk->sk_userlocks;
- if (osk->sk_userlocks & SOCK_SNDBUF_LOCK) {
+ if (osk->sk_userlocks & SOCK_SNDBUF_LOCK)
nsk->sk_sndbuf = osk->sk_sndbuf;
- } else {
- if (mask == SK_FLAGS_SMC_TO_CLC)
- WRITE_ONCE(nsk->sk_sndbuf,
- READ_ONCE(nnet->ipv4.sysctl_tcp_wmem[1]));
- else
- WRITE_ONCE(nsk->sk_sndbuf,
- 2 * READ_ONCE(nnet->smc.sysctl_wmem));
- }
- if (osk->sk_userlocks & SOCK_RCVBUF_LOCK) {
+ if (osk->sk_userlocks & SOCK_RCVBUF_LOCK)
nsk->sk_rcvbuf = osk->sk_rcvbuf;
- } else {
- if (mask == SK_FLAGS_SMC_TO_CLC)
- WRITE_ONCE(nsk->sk_rcvbuf,
- READ_ONCE(nnet->ipv4.sysctl_tcp_rmem[1]));
- else
- WRITE_ONCE(nsk->sk_rcvbuf,
- 2 * READ_ONCE(nnet->smc.sysctl_rmem));
- }
}
static void smc_copy_sock_settings(struct sock *nsk, struct sock *osk,
@@ -1641,8 +1632,8 @@ out:
release_sock(&smc->sk);
}
-static int smc_connect(struct socket *sock, struct sockaddr *addr,
- int alen, int flags)
+int smc_connect(struct socket *sock, struct sockaddr *addr,
+ int alen, int flags)
{
struct sock *sk = sock->sk;
struct smc_sock *smc;
@@ -2623,7 +2614,7 @@ out:
read_unlock_bh(&listen_clcsock->sk_callback_lock);
}
-static int smc_listen(struct socket *sock, int backlog)
+int smc_listen(struct socket *sock, int backlog)
{
struct sock *sk = sock->sk;
struct smc_sock *smc;
@@ -2688,8 +2679,8 @@ out:
return rc;
}
-static int smc_accept(struct socket *sock, struct socket *new_sock,
- struct proto_accept_arg *arg)
+int smc_accept(struct socket *sock, struct socket *new_sock,
+ struct proto_accept_arg *arg)
{
struct sock *sk = sock->sk, *nsk;
DECLARE_WAITQUEUE(wait, current);
@@ -2758,8 +2749,8 @@ out:
return rc;
}
-static int smc_getname(struct socket *sock, struct sockaddr *addr,
- int peer)
+int smc_getname(struct socket *sock, struct sockaddr *addr,
+ int peer)
{
struct smc_sock *smc;
@@ -2772,7 +2763,7 @@ static int smc_getname(struct socket *sock, struct sockaddr *addr,
return smc->clcsock->ops->getname(smc->clcsock, addr, peer);
}
-static int smc_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
+int smc_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
{
struct sock *sk = sock->sk;
struct smc_sock *smc;
@@ -2810,8 +2801,8 @@ out:
return rc;
}
-static int smc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
- int flags)
+int smc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+ int flags)
{
struct sock *sk = sock->sk;
struct smc_sock *smc;
@@ -2860,8 +2851,8 @@ static __poll_t smc_accept_poll(struct sock *parent)
return mask;
}
-static __poll_t smc_poll(struct file *file, struct socket *sock,
- poll_table *wait)
+__poll_t smc_poll(struct file *file, struct socket *sock,
+ poll_table *wait)
{
struct sock *sk = sock->sk;
struct smc_sock *smc;
@@ -2913,7 +2904,7 @@ static __poll_t smc_poll(struct file *file, struct socket *sock,
return mask;
}
-static int smc_shutdown(struct socket *sock, int how)
+int smc_shutdown(struct socket *sock, int how)
{
struct sock *sk = sock->sk;
bool do_shutdown = true;
@@ -3053,8 +3044,8 @@ static int __smc_setsockopt(struct socket *sock, int level, int optname,
return rc;
}
-static int smc_setsockopt(struct socket *sock, int level, int optname,
- sockptr_t optval, unsigned int optlen)
+int smc_setsockopt(struct socket *sock, int level, int optname,
+ sockptr_t optval, unsigned int optlen)
{
struct sock *sk = sock->sk;
struct smc_sock *smc;
@@ -3140,8 +3131,8 @@ out:
return rc;
}
-static int smc_getsockopt(struct socket *sock, int level, int optname,
- char __user *optval, int __user *optlen)
+int smc_getsockopt(struct socket *sock, int level, int optname,
+ char __user *optval, int __user *optlen)
{
struct smc_sock *smc;
int rc;
@@ -3166,8 +3157,8 @@ static int smc_getsockopt(struct socket *sock, int level, int optname,
return rc;
}
-static int smc_ioctl(struct socket *sock, unsigned int cmd,
- unsigned long arg)
+int smc_ioctl(struct socket *sock, unsigned int cmd,
+ unsigned long arg)
{
union smc_host_cursor cons, urg;
struct smc_connection *conn;
@@ -3253,9 +3244,9 @@ static int smc_ioctl(struct socket *sock, unsigned int cmd,
* Note that subsequent recv() calls have to wait till all splice() processing
* completed.
*/
-static ssize_t smc_splice_read(struct socket *sock, loff_t *ppos,
- struct pipe_inode_info *pipe, size_t len,
- unsigned int flags)
+ssize_t smc_splice_read(struct socket *sock, loff_t *ppos,
+ struct pipe_inode_info *pipe, size_t len,
+ unsigned int flags)
{
struct sock *sk = sock->sk;
struct smc_sock *smc;
@@ -3321,6 +3312,31 @@ static const struct proto_ops smc_sock_ops = {
.splice_read = smc_splice_read,
};
+int smc_create_clcsk(struct net *net, struct sock *sk, int family)
+{
+ struct smc_sock *smc = smc_sk(sk);
+ int rc;
+
+ rc = sock_create_kern(net, family, SOCK_STREAM, IPPROTO_TCP,
+ &smc->clcsock);
+ if (rc) {
+ sk_common_release(sk);
+ return rc;
+ }
+
+ /* smc_clcsock_release() does not wait smc->clcsock->sk's
+ * destruction; its sk_state might not be TCP_CLOSE after
+ * smc->sk is close()d, and TCP timers can be fired later,
+ * which need net ref.
+ */
+ sk = smc->clcsock->sk;
+ __netns_tracker_free(net, &sk->ns_tracker, false);
+ sk->sk_net_refcnt = 1;
+ get_net_track(net, &sk->ns_tracker, GFP_KERNEL);
+ sock_inuse_add(net, 1);
+ return 0;
+}
+
static int __smc_create(struct net *net, struct socket *sock, int protocol,
int kern, struct socket *clcsock)
{
@@ -3346,35 +3362,12 @@ static int __smc_create(struct net *net, struct socket *sock, int protocol,
/* create internal TCP socket for CLC handshake and fallback */
smc = smc_sk(sk);
- smc->use_fallback = false; /* assume rdma capability first */
- smc->fallback_rsn = 0;
-
- /* default behavior from limit_smc_hs in every net namespace */
- smc->limit_smc_hs = net->smc.limit_smc_hs;
rc = 0;
- if (!clcsock) {
- rc = sock_create_kern(net, family, SOCK_STREAM, IPPROTO_TCP,
- &smc->clcsock);
- if (rc) {
- sk_common_release(sk);
- goto out;
- }
-
- /* smc_clcsock_release() does not wait smc->clcsock->sk's
- * destruction; its sk_state might not be TCP_CLOSE after
- * smc->sk is close()d, and TCP timers can be fired later,
- * which need net ref.
- */
- sk = smc->clcsock->sk;
- __netns_tracker_free(net, &sk->ns_tracker, false);
- sk->sk_net_refcnt = 1;
- get_net_track(net, &sk->ns_tracker, GFP_KERNEL);
- sock_inuse_add(net, 1);
- } else {
+ if (clcsock)
smc->clcsock = clcsock;
- }
-
+ else
+ rc = smc_create_clcsk(net, sk, family);
out:
return rc;
}
@@ -3583,10 +3576,15 @@ static int __init smc_init(void)
pr_err("%s: tcp_ulp_register fails with %d\n", __func__, rc);
goto out_lo;
}
-
+ rc = smc_inet_init();
+ if (rc) {
+ pr_err("%s: smc_inet_init fails with %d\n", __func__, rc);
+ goto out_ulp;
+ }
static_branch_enable(&tcp_have_smc);
return 0;
-
+out_ulp:
+ tcp_unregister_ulp(&smc_ulp_ops);
out_lo:
smc_loopback_exit();
out_ib:
@@ -3623,6 +3621,7 @@ out_pernet_subsys:
static void __exit smc_exit(void)
{
static_branch_disable(&tcp_have_smc);
+ smc_inet_exit();
tcp_unregister_ulp(&smc_ulp_ops);
sock_unregister(PF_SMC);
smc_core_exit();
@@ -3650,4 +3649,9 @@ MODULE_DESCRIPTION("smc socket address family");
MODULE_LICENSE("GPL");
MODULE_ALIAS_NETPROTO(PF_SMC);
MODULE_ALIAS_TCP_ULP("smc");
+/* 256 for IPPROTO_SMC and 1 for SOCK_STREAM */
+MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET, 256, 1);
+#if IS_ENABLED(CONFIG_IPV6)
+MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET6, 256, 1);
+#endif /* CONFIG_IPV6 */
MODULE_ALIAS_GENL_FAMILY(SMC_GENL_FAMILY_NAME);
diff --git a/net/smc/smc.h b/net/smc/smc.h
index 18c8b7870198..34b781e463c4 100644
--- a/net/smc/smc.h
+++ b/net/smc/smc.h
@@ -34,6 +34,44 @@
extern struct proto smc_proto;
extern struct proto smc_proto6;
+extern struct smc_hashinfo smc_v4_hashinfo;
+extern struct smc_hashinfo smc_v6_hashinfo;
+
+int smc_hash_sk(struct sock *sk);
+void smc_unhash_sk(struct sock *sk);
+void smc_release_cb(struct sock *sk);
+
+int smc_release(struct socket *sock);
+int smc_bind(struct socket *sock, struct sockaddr *uaddr,
+ int addr_len);
+int smc_connect(struct socket *sock, struct sockaddr *addr,
+ int alen, int flags);
+int smc_accept(struct socket *sock, struct socket *new_sock,
+ struct proto_accept_arg *arg);
+int smc_getname(struct socket *sock, struct sockaddr *addr,
+ int peer);
+__poll_t smc_poll(struct file *file, struct socket *sock,
+ poll_table *wait);
+int smc_ioctl(struct socket *sock, unsigned int cmd,
+ unsigned long arg);
+int smc_listen(struct socket *sock, int backlog);
+int smc_shutdown(struct socket *sock, int how);
+int smc_setsockopt(struct socket *sock, int level, int optname,
+ sockptr_t optval, unsigned int optlen);
+int smc_getsockopt(struct socket *sock, int level, int optname,
+ char __user *optval, int __user *optlen);
+int smc_sendmsg(struct socket *sock, struct msghdr *msg, size_t len);
+int smc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+ int flags);
+ssize_t smc_splice_read(struct socket *sock, loff_t *ppos,
+ struct pipe_inode_info *pipe, size_t len,
+ unsigned int flags);
+
+/* smc sock initialization */
+void smc_sk_init(struct net *net, struct sock *sk, int protocol);
+/* clcsock initialization */
+int smc_create_clcsk(struct net *net, struct sock *sk, int family);
+
#ifdef ATOMIC64_INIT
#define KERNEL_HAS_ATOMIC64
#endif
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
index fafdb97adfad..3b95828d9976 100644
--- a/net/smc/smc_core.c
+++ b/net/smc/smc_core.c
@@ -2006,7 +2006,7 @@ out:
}
#define SMCD_DMBE_SIZES 6 /* 0 -> 16KB, 1 -> 32KB, .. 6 -> 1MB */
-#define SMCR_RMBE_SIZES 5 /* 0 -> 16KB, 1 -> 32KB, .. 5 -> 512KB */
+#define SMCR_RMBE_SIZES 15 /* 0 -> 16KB, 1 -> 32KB, .. 15 -> 512MB */
/* convert the RMB size into the compressed notation (minimum 16K, see
* SMCD/R_DMBE_SIZES.
@@ -2015,7 +2015,6 @@ out:
*/
static u8 smc_compress_bufsize(int size, bool is_smcd, bool is_rmb)
{
- const unsigned int max_scat = SG_MAX_SINGLE_ALLOC * PAGE_SIZE;
u8 compressed;
if (size <= SMC_BUF_MIN_SIZE)
@@ -2025,9 +2024,11 @@ static u8 smc_compress_bufsize(int size, bool is_smcd, bool is_rmb)
compressed = min_t(u8, ilog2(size) + 1,
is_smcd ? SMCD_DMBE_SIZES : SMCR_RMBE_SIZES);
+#ifdef CONFIG_ARCH_NO_SG_CHAIN
if (!is_smcd && is_rmb)
/* RMBs are backed by & limited to max size of scatterlists */
- compressed = min_t(u8, compressed, ilog2(max_scat >> 14));
+ compressed = min_t(u8, compressed, ilog2((SG_MAX_SINGLE_ALLOC * PAGE_SIZE) >> 14));
+#endif
return compressed;
}
diff --git a/net/smc/smc_inet.c b/net/smc/smc_inet.c
new file mode 100644
index 000000000000..bece346dd8e9
--- /dev/null
+++ b/net/smc/smc_inet.c
@@ -0,0 +1,159 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Shared Memory Communications over RDMA (SMC-R) and RoCE
+ *
+ * Definitions for the IPPROTO_SMC (socket related)
+ *
+ * Copyright IBM Corp. 2016, 2018
+ * Copyright (c) 2024, Alibaba Inc.
+ *
+ * Author: D. Wythe <alibuda@linux.alibaba.com>
+ */
+
+#include <net/protocol.h>
+#include <net/sock.h>
+
+#include "smc_inet.h"
+#include "smc.h"
+
+static int smc_inet_init_sock(struct sock *sk);
+
+static struct proto smc_inet_prot = {
+ .name = "INET_SMC",
+ .owner = THIS_MODULE,
+ .init = smc_inet_init_sock,
+ .hash = smc_hash_sk,
+ .unhash = smc_unhash_sk,
+ .release_cb = smc_release_cb,
+ .obj_size = sizeof(struct smc_sock),
+ .h.smc_hash = &smc_v4_hashinfo,
+ .slab_flags = SLAB_TYPESAFE_BY_RCU,
+};
+
+static const struct proto_ops smc_inet_stream_ops = {
+ .family = PF_INET,
+ .owner = THIS_MODULE,
+ .release = smc_release,
+ .bind = smc_bind,
+ .connect = smc_connect,
+ .socketpair = sock_no_socketpair,
+ .accept = smc_accept,
+ .getname = smc_getname,
+ .poll = smc_poll,
+ .ioctl = smc_ioctl,
+ .listen = smc_listen,
+ .shutdown = smc_shutdown,
+ .setsockopt = smc_setsockopt,
+ .getsockopt = smc_getsockopt,
+ .sendmsg = smc_sendmsg,
+ .recvmsg = smc_recvmsg,
+ .mmap = sock_no_mmap,
+ .splice_read = smc_splice_read,
+};
+
+static struct inet_protosw smc_inet_protosw = {
+ .type = SOCK_STREAM,
+ .protocol = IPPROTO_SMC,
+ .prot = &smc_inet_prot,
+ .ops = &smc_inet_stream_ops,
+ .flags = INET_PROTOSW_ICSK,
+};
+
+#if IS_ENABLED(CONFIG_IPV6)
+static struct proto smc_inet6_prot = {
+ .name = "INET6_SMC",
+ .owner = THIS_MODULE,
+ .init = smc_inet_init_sock,
+ .hash = smc_hash_sk,
+ .unhash = smc_unhash_sk,
+ .release_cb = smc_release_cb,
+ .obj_size = sizeof(struct smc_sock),
+ .h.smc_hash = &smc_v6_hashinfo,
+ .slab_flags = SLAB_TYPESAFE_BY_RCU,
+};
+
+static const struct proto_ops smc_inet6_stream_ops = {
+ .family = PF_INET6,
+ .owner = THIS_MODULE,
+ .release = smc_release,
+ .bind = smc_bind,
+ .connect = smc_connect,
+ .socketpair = sock_no_socketpair,
+ .accept = smc_accept,
+ .getname = smc_getname,
+ .poll = smc_poll,
+ .ioctl = smc_ioctl,
+ .listen = smc_listen,
+ .shutdown = smc_shutdown,
+ .setsockopt = smc_setsockopt,
+ .getsockopt = smc_getsockopt,
+ .sendmsg = smc_sendmsg,
+ .recvmsg = smc_recvmsg,
+ .mmap = sock_no_mmap,
+ .splice_read = smc_splice_read,
+};
+
+static struct inet_protosw smc_inet6_protosw = {
+ .type = SOCK_STREAM,
+ .protocol = IPPROTO_SMC,
+ .prot = &smc_inet6_prot,
+ .ops = &smc_inet6_stream_ops,
+ .flags = INET_PROTOSW_ICSK,
+};
+#endif /* CONFIG_IPV6 */
+
+static int smc_inet_init_sock(struct sock *sk)
+{
+ struct net *net = sock_net(sk);
+
+ /* init common smc sock */
+ smc_sk_init(net, sk, IPPROTO_SMC);
+ /* create clcsock */
+ return smc_create_clcsk(net, sk, sk->sk_family);
+}
+
+int __init smc_inet_init(void)
+{
+ int rc;
+
+ rc = proto_register(&smc_inet_prot, 1);
+ if (rc) {
+ pr_err("%s: proto_register smc_inet_prot fails with %d\n",
+ __func__, rc);
+ return rc;
+ }
+ /* no return value */
+ inet_register_protosw(&smc_inet_protosw);
+
+#if IS_ENABLED(CONFIG_IPV6)
+ rc = proto_register(&smc_inet6_prot, 1);
+ if (rc) {
+ pr_err("%s: proto_register smc_inet6_prot fails with %d\n",
+ __func__, rc);
+ goto out_inet6_prot;
+ }
+ rc = inet6_register_protosw(&smc_inet6_protosw);
+ if (rc) {
+ pr_err("%s: inet6_register_protosw smc_inet6_protosw fails with %d\n",
+ __func__, rc);
+ goto out_inet6_protosw;
+ }
+ return rc;
+out_inet6_protosw:
+ proto_unregister(&smc_inet6_prot);
+out_inet6_prot:
+ inet_unregister_protosw(&smc_inet_protosw);
+ proto_unregister(&smc_inet_prot);
+#endif /* CONFIG_IPV6 */
+ return rc;
+}
+
+void smc_inet_exit(void)
+{
+#if IS_ENABLED(CONFIG_IPV6)
+ inet6_unregister_protosw(&smc_inet6_protosw);
+ proto_unregister(&smc_inet6_prot);
+#endif /* CONFIG_IPV6 */
+ inet_unregister_protosw(&smc_inet_protosw);
+ proto_unregister(&smc_inet_prot);
+}
diff --git a/net/smc/smc_inet.h b/net/smc/smc_inet.h
new file mode 100644
index 000000000000..a489c8a2b8ef
--- /dev/null
+++ b/net/smc/smc_inet.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Shared Memory Communications over RDMA (SMC-R) and RoCE
+ *
+ * Definitions for the IPPROTO_SMC (socket related)
+
+ * Copyright IBM Corp. 2016
+ * Copyright (c) 2024, Alibaba Inc.
+ *
+ * Author: D. Wythe <alibuda@linux.alibaba.com>
+ */
+#ifndef __INET_SMC
+#define __INET_SMC
+
+/* Initialize protocol registration on IPPROTO_SMC,
+ * @return 0 on success
+ */
+int smc_inet_init(void);
+
+void smc_inet_exit(void);
+
+#endif /* __INET_SMC */
diff --git a/net/socket.c b/net/socket.c
index e416920e9399..fcbdd5bc47ac 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -1822,6 +1822,20 @@ SYSCALL_DEFINE4(socketpair, int, family, int, type, int, protocol,
return __sys_socketpair(family, type, protocol, usockvec);
}
+int __sys_bind_socket(struct socket *sock, struct sockaddr_storage *address,
+ int addrlen)
+{
+ int err;
+
+ err = security_socket_bind(sock, (struct sockaddr *)address,
+ addrlen);
+ if (!err)
+ err = READ_ONCE(sock->ops)->bind(sock,
+ (struct sockaddr *)address,
+ addrlen);
+ return err;
+}
+
/*
* Bind a name to a socket. Nothing much to do here since it's
* the protocol's responsibility to handle the local address.
@@ -1839,15 +1853,8 @@ int __sys_bind(int fd, struct sockaddr __user *umyaddr, int addrlen)
sock = sockfd_lookup_light(fd, &err, &fput_needed);
if (sock) {
err = move_addr_to_kernel(umyaddr, addrlen, &address);
- if (!err) {
- err = security_socket_bind(sock,
- (struct sockaddr *)&address,
- addrlen);
- if (!err)
- err = READ_ONCE(sock->ops)->bind(sock,
- (struct sockaddr *)
- &address, addrlen);
- }
+ if (!err)
+ err = __sys_bind_socket(sock, &address, addrlen);
fput_light(sock->file, fput_needed);
}
return err;
@@ -1863,23 +1870,28 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
* necessary for a listen, and if that works, we mark the socket as
* ready for listening.
*/
+int __sys_listen_socket(struct socket *sock, int backlog)
+{
+ int somaxconn, err;
+
+ somaxconn = READ_ONCE(sock_net(sock->sk)->core.sysctl_somaxconn);
+ if ((unsigned int)backlog > somaxconn)
+ backlog = somaxconn;
+
+ err = security_socket_listen(sock, backlog);
+ if (!err)
+ err = READ_ONCE(sock->ops)->listen(sock, backlog);
+ return err;
+}
int __sys_listen(int fd, int backlog)
{
struct socket *sock;
int err, fput_needed;
- int somaxconn;
sock = sockfd_lookup_light(fd, &err, &fput_needed);
if (sock) {
- somaxconn = READ_ONCE(sock_net(sock->sk)->core.sysctl_somaxconn);
- if ((unsigned int)backlog > somaxconn)
- backlog = somaxconn;
-
- err = security_socket_listen(sock, backlog);
- if (!err)
- err = READ_ONCE(sock->ops)->listen(sock, backlog);
-
+ err = __sys_listen_socket(sock, backlog);
fput_light(sock->file, fput_needed);
}
return err;
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index c7af0220f82f..369310909fc9 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -1875,8 +1875,10 @@ gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base;
maj_stat = gss_wrap(ctx->gc_gss_ctx, offset, snd_buf, inpages);
/* slack space should prevent this ever happening: */
- if (unlikely(snd_buf->len > snd_buf->buflen))
+ if (unlikely(snd_buf->len > snd_buf->buflen)) {
+ status = -EIO;
goto wrap_failed;
+ }
/* We're assuming that when GSS_S_CONTEXT_EXPIRED, the encryption was
* done anyway, so it's safe to put the request on the wire: */
if (maj_stat == GSS_S_CONTEXT_EXPIRED)
diff --git a/net/sunrpc/auth_gss/gss_krb5_keys.c b/net/sunrpc/auth_gss/gss_krb5_keys.c
index 06d8ee0db000..4eb19c3a54c7 100644
--- a/net/sunrpc/auth_gss/gss_krb5_keys.c
+++ b/net/sunrpc/auth_gss/gss_krb5_keys.c
@@ -168,7 +168,7 @@ static int krb5_DK(const struct gss_krb5_enctype *gk5e,
goto err_return;
blocksize = crypto_sync_skcipher_blocksize(cipher);
if (crypto_sync_skcipher_setkey(cipher, inkey->data, inkey->len))
- goto err_return;
+ goto err_free_cipher;
ret = -ENOMEM;
inblockdata = kmalloc(blocksize, gfp_mask);
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index 96ab50eda9c2..73a90ad873fb 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -1069,7 +1069,7 @@ static int gss_read_proxy_verf(struct svc_rqst *rqstp,
goto out_denied_free;
pages = DIV_ROUND_UP(inlen, PAGE_SIZE);
- in_token->pages = kcalloc(pages, sizeof(struct page *), GFP_KERNEL);
+ in_token->pages = kcalloc(pages + 1, sizeof(struct page *), GFP_KERNEL);
if (!in_token->pages)
goto out_denied_free;
in_token->page_base = 0;
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 2b4b1276d4e8..e03f14024e47 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -72,57 +72,100 @@ static struct svc_pool_map svc_pool_map = {
static DEFINE_MUTEX(svc_pool_map_mutex);/* protects svc_pool_map.count only */
static int
-param_set_pool_mode(const char *val, const struct kernel_param *kp)
+__param_set_pool_mode(const char *val, struct svc_pool_map *m)
{
- int *ip = (int *)kp->arg;
- struct svc_pool_map *m = &svc_pool_map;
- int err;
+ int err, mode;
mutex_lock(&svc_pool_map_mutex);
- err = -EBUSY;
- if (m->count)
- goto out;
-
err = 0;
if (!strncmp(val, "auto", 4))
- *ip = SVC_POOL_AUTO;
+ mode = SVC_POOL_AUTO;
else if (!strncmp(val, "global", 6))
- *ip = SVC_POOL_GLOBAL;
+ mode = SVC_POOL_GLOBAL;
else if (!strncmp(val, "percpu", 6))
- *ip = SVC_POOL_PERCPU;
+ mode = SVC_POOL_PERCPU;
else if (!strncmp(val, "pernode", 7))
- *ip = SVC_POOL_PERNODE;
+ mode = SVC_POOL_PERNODE;
else
err = -EINVAL;
+ if (err)
+ goto out;
+
+ if (m->count == 0)
+ m->mode = mode;
+ else if (mode != m->mode)
+ err = -EBUSY;
out:
mutex_unlock(&svc_pool_map_mutex);
return err;
}
static int
-param_get_pool_mode(char *buf, const struct kernel_param *kp)
+param_set_pool_mode(const char *val, const struct kernel_param *kp)
+{
+ struct svc_pool_map *m = kp->arg;
+
+ return __param_set_pool_mode(val, m);
+}
+
+int sunrpc_set_pool_mode(const char *val)
{
- int *ip = (int *)kp->arg;
+ return __param_set_pool_mode(val, &svc_pool_map);
+}
+EXPORT_SYMBOL(sunrpc_set_pool_mode);
- switch (*ip)
+/**
+ * sunrpc_get_pool_mode - get the current pool_mode for the host
+ * @buf: where to write the current pool_mode
+ * @size: size of @buf
+ *
+ * Grab the current pool_mode from the svc_pool_map and write
+ * the resulting string to @buf. Returns the number of characters
+ * written to @buf (a'la snprintf()).
+ */
+int
+sunrpc_get_pool_mode(char *buf, size_t size)
+{
+ struct svc_pool_map *m = &svc_pool_map;
+
+ switch (m->mode)
{
case SVC_POOL_AUTO:
- return sysfs_emit(buf, "auto\n");
+ return snprintf(buf, size, "auto");
case SVC_POOL_GLOBAL:
- return sysfs_emit(buf, "global\n");
+ return snprintf(buf, size, "global");
case SVC_POOL_PERCPU:
- return sysfs_emit(buf, "percpu\n");
+ return snprintf(buf, size, "percpu");
case SVC_POOL_PERNODE:
- return sysfs_emit(buf, "pernode\n");
+ return snprintf(buf, size, "pernode");
default:
- return sysfs_emit(buf, "%d\n", *ip);
+ return snprintf(buf, size, "%d", m->mode);
}
}
+EXPORT_SYMBOL(sunrpc_get_pool_mode);
+
+static int
+param_get_pool_mode(char *buf, const struct kernel_param *kp)
+{
+ char str[16];
+ int len;
+
+ len = sunrpc_get_pool_mode(str, ARRAY_SIZE(str));
+
+ /* Ensure we have room for newline and NUL */
+ len = min_t(int, len, ARRAY_SIZE(str) - 2);
+
+ /* tack on the newline */
+ str[len] = '\n';
+ str[len + 1] = '\0';
+
+ return sysfs_emit(buf, str);
+}
module_param_call(pool_mode, param_set_pool_mode, param_get_pool_mode,
- &svc_pool_map.mode, 0644);
+ &svc_pool_map, 0644);
/*
* Detect best pool mapping mode heuristically,
@@ -250,10 +293,8 @@ svc_pool_map_get(void)
int npools = -1;
mutex_lock(&svc_pool_map_mutex);
-
if (m->count++) {
mutex_unlock(&svc_pool_map_mutex);
- WARN_ON_ONCE(m->npools <= 1);
return m->npools;
}
@@ -275,32 +316,21 @@ svc_pool_map_get(void)
m->mode = SVC_POOL_GLOBAL;
}
m->npools = npools;
-
- if (npools == 1)
- /* service is unpooled, so doesn't hold a reference */
- m->count--;
-
mutex_unlock(&svc_pool_map_mutex);
return npools;
}
/*
- * Drop a reference to the global map of cpus to pools, if
- * pools were in use, i.e. if npools > 1.
+ * Drop a reference to the global map of cpus to pools.
* When the last reference is dropped, the map data is
- * freed; this allows the sysadmin to change the pool
- * mode using the pool_mode module option without
- * rebooting or re-loading sunrpc.ko.
+ * freed; this allows the sysadmin to change the pool.
*/
static void
-svc_pool_map_put(int npools)
+svc_pool_map_put(void)
{
struct svc_pool_map *m = &svc_pool_map;
- if (npools <= 1)
- return;
mutex_lock(&svc_pool_map_mutex);
-
if (!--m->count) {
kfree(m->to_pool);
m->to_pool = NULL;
@@ -308,7 +338,6 @@ svc_pool_map_put(int npools)
m->pool_to = NULL;
m->npools = 0;
}
-
mutex_unlock(&svc_pool_map_mutex);
}
@@ -553,9 +582,10 @@ struct svc_serv *svc_create_pooled(struct svc_program *prog,
serv = __svc_create(prog, stats, bufsize, npools, threadfn);
if (!serv)
goto out_err;
+ serv->sv_is_pooled = true;
return serv;
out_err:
- svc_pool_map_put(npools);
+ svc_pool_map_put();
return NULL;
}
EXPORT_SYMBOL_GPL(svc_create_pooled);
@@ -585,7 +615,8 @@ svc_destroy(struct svc_serv **servp)
cache_clean_deferred(serv);
- svc_pool_map_put(serv->sv_nrpools);
+ if (serv->sv_is_pooled)
+ svc_pool_map_put();
for (i = 0; i < serv->sv_nrpools; i++) {
struct svc_pool *pool = &serv->sv_pools[i];
@@ -1557,9 +1588,11 @@ out_drop:
*/
void svc_process_bc(struct rpc_rqst *req, struct svc_rqst *rqstp)
{
+ struct rpc_timeout timeout = {
+ .to_increment = 0,
+ };
struct rpc_task *task;
int proc_error;
- struct rpc_timeout timeout;
/* Build the svc_rqst used by the common processing routine */
rqstp->rq_xid = req->rq_xid;
@@ -1612,6 +1645,7 @@ void svc_process_bc(struct rpc_rqst *req, struct svc_rqst *rqstp)
timeout.to_initval = req->rq_xprt->timeout->to_initval;
timeout.to_retries = req->rq_xprt->timeout->to_retries;
}
+ timeout.to_maxval = timeout.to_initval;
memcpy(&req->rq_snd_buf, &rqstp->rq_res, sizeof(req->rq_snd_buf));
task = rpc_run_bc_task(req, &timeout);
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index dd86d7f1e97e..d3735ab3e6d1 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -157,6 +157,7 @@ int svc_print_xprts(char *buf, int maxlen)
*/
void svc_xprt_deferred_close(struct svc_xprt *xprt)
{
+ trace_svc_xprt_close(xprt);
if (!test_and_set_bit(XPT_CLOSE, &xprt->xpt_flags))
svc_xprt_enqueue(xprt);
}
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index 2b1c16b9547d..f15750cacacf 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -65,6 +65,8 @@
static struct svcxprt_rdma *svc_rdma_create_xprt(struct svc_serv *serv,
struct net *net, int node);
+static int svc_rdma_listen_handler(struct rdma_cm_id *cma_id,
+ struct rdma_cm_event *event);
static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
struct net *net,
struct sockaddr *sa, int salen,
@@ -122,6 +124,41 @@ static void qp_event_handler(struct ib_event *event, void *context)
}
}
+static struct rdma_cm_id *
+svc_rdma_create_listen_id(struct net *net, struct sockaddr *sap,
+ void *context)
+{
+ struct rdma_cm_id *listen_id;
+ int ret;
+
+ listen_id = rdma_create_id(net, svc_rdma_listen_handler, context,
+ RDMA_PS_TCP, IB_QPT_RC);
+ if (IS_ERR(listen_id))
+ return listen_id;
+
+ /* Allow both IPv4 and IPv6 sockets to bind a single port
+ * at the same time.
+ */
+#if IS_ENABLED(CONFIG_IPV6)
+ ret = rdma_set_afonly(listen_id, 1);
+ if (ret)
+ goto out_destroy;
+#endif
+ ret = rdma_bind_addr(listen_id, sap);
+ if (ret)
+ goto out_destroy;
+
+ ret = rdma_listen(listen_id, RPCRDMA_LISTEN_BACKLOG);
+ if (ret)
+ goto out_destroy;
+
+ return listen_id;
+
+out_destroy:
+ rdma_destroy_id(listen_id);
+ return ERR_PTR(ret);
+}
+
static struct svcxprt_rdma *svc_rdma_create_xprt(struct svc_serv *serv,
struct net *net, int node)
{
@@ -247,17 +284,31 @@ static void handle_connect_req(struct rdma_cm_id *new_cma_id,
*
* Return values:
* %0: Do not destroy @cma_id
- * %1: Destroy @cma_id (never returned here)
+ * %1: Destroy @cma_id
*
* NB: There is never a DEVICE_REMOVAL event for INADDR_ANY listeners.
*/
static int svc_rdma_listen_handler(struct rdma_cm_id *cma_id,
struct rdma_cm_event *event)
{
+ struct sockaddr *sap = (struct sockaddr *)&cma_id->route.addr.src_addr;
+ struct svcxprt_rdma *cma_xprt = cma_id->context;
+ struct svc_xprt *cma_rdma = &cma_xprt->sc_xprt;
+ struct rdma_cm_id *listen_id;
+
switch (event->event) {
case RDMA_CM_EVENT_CONNECT_REQUEST:
handle_connect_req(cma_id, &event->param.conn);
break;
+ case RDMA_CM_EVENT_ADDR_CHANGE:
+ listen_id = svc_rdma_create_listen_id(cma_rdma->xpt_net,
+ sap, cma_xprt);
+ if (IS_ERR(listen_id)) {
+ pr_err("Listener dead, address change failed for device %s\n",
+ cma_id->device->name);
+ } else
+ cma_xprt->sc_cm_id = listen_id;
+ return 1;
default:
break;
}
@@ -307,7 +358,6 @@ static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
{
struct rdma_cm_id *listen_id;
struct svcxprt_rdma *cma_xprt;
- int ret;
if (sa->sa_family != AF_INET && sa->sa_family != AF_INET6)
return ERR_PTR(-EAFNOSUPPORT);
@@ -317,30 +367,13 @@ static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
set_bit(XPT_LISTENER, &cma_xprt->sc_xprt.xpt_flags);
strcpy(cma_xprt->sc_xprt.xpt_remotebuf, "listener");
- listen_id = rdma_create_id(net, svc_rdma_listen_handler, cma_xprt,
- RDMA_PS_TCP, IB_QPT_RC);
+ listen_id = svc_rdma_create_listen_id(net, sa, cma_xprt);
if (IS_ERR(listen_id)) {
- ret = PTR_ERR(listen_id);
- goto err0;
+ kfree(cma_xprt);
+ return (struct svc_xprt *)listen_id;
}
-
- /* Allow both IPv4 and IPv6 sockets to bind a single port
- * at the same time.
- */
-#if IS_ENABLED(CONFIG_IPV6)
- ret = rdma_set_afonly(listen_id, 1);
- if (ret)
- goto err1;
-#endif
- ret = rdma_bind_addr(listen_id, sa);
- if (ret)
- goto err1;
cma_xprt->sc_cm_id = listen_id;
- ret = rdma_listen(listen_id, RPCRDMA_LISTEN_BACKLOG);
- if (ret)
- goto err1;
-
/*
* We need to use the address from the cm_id in case the
* caller specified 0 for the port number.
@@ -349,12 +382,6 @@ static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
svc_xprt_set_local(&cma_xprt->sc_xprt, sa, salen);
return &cma_xprt->sc_xprt;
-
- err1:
- rdma_destroy_id(listen_id);
- err0:
- kfree(cma_xprt);
- return ERR_PTR(ret);
}
/*
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index dfc353eea8ed..0e1691316f42 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -2441,6 +2441,13 @@ static void xs_tcp_setup_socket(struct work_struct *work)
transport->srcport = 0;
status = -EAGAIN;
break;
+ case -EPERM:
+ /* Happens, for instance, if a BPF program is preventing
+ * the connect. Remap the error so upper layers can better
+ * deal with it.
+ */
+ status = -ECONNREFUSED;
+ fallthrough;
case -EINVAL:
/* Happens, for instance, if the user specified a link
* local IPv6 address without a scope-id.
diff --git a/net/sysctl_net.c b/net/sysctl_net.c
index f5017012a049..19e8048241ba 100644
--- a/net/sysctl_net.c
+++ b/net/sysctl_net.c
@@ -127,7 +127,7 @@ static void ensure_safe_net_sysctl(struct net *net, const char *path,
pr_debug("Registering net sysctl (net %p): %s\n", net, path);
ent = table;
- for (size_t i = 0; i < table_size && ent->procname; ent++, i++) {
+ for (size_t i = 0; i < table_size; ent++, i++) {
unsigned long addr;
const char *where;
@@ -165,17 +165,10 @@ struct ctl_table_header *register_net_sysctl_sz(struct net *net,
struct ctl_table *table,
size_t table_size)
{
- int count;
- struct ctl_table *entry;
-
if (!net_eq(net, &init_net))
ensure_safe_net_sysctl(net, path, table, table_size);
- entry = table;
- for (count = 0 ; count < table_size && entry->procname; entry++, count++)
- ;
-
- return __register_sysctl_table(&net->sysctls, path, table, count);
+ return __register_sysctl_table(&net->sysctls, path, table, table_size);
}
EXPORT_SYMBOL_GPL(register_net_sysctl_sz);
diff --git a/net/tipc/core.h b/net/tipc/core.h
index 7eccd97e0609..7f3fe3401c45 100644
--- a/net/tipc/core.h
+++ b/net/tipc/core.h
@@ -72,7 +72,6 @@ struct tipc_node;
struct tipc_bearer;
struct tipc_bc_base;
struct tipc_link;
-struct tipc_name_table;
struct tipc_topsrv;
struct tipc_monitor;
#ifdef CONFIG_TIPC_CRYPTO
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 0716eb5c8a31..5c2088a469ce 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -241,13 +241,6 @@ enum {
LINK_SYNCHING = 0xc << 24
};
-/* Link FSM state checking routines
- */
-static int link_is_up(struct tipc_link *l)
-{
- return l->state & (LINK_ESTABLISHED | LINK_SYNCHING);
-}
-
static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
struct sk_buff_head *xmitq);
static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
@@ -274,7 +267,7 @@ static void tipc_link_update_cwin(struct tipc_link *l, int released,
*/
bool tipc_link_is_up(struct tipc_link *l)
{
- return link_is_up(l);
+ return l->state & (LINK_ESTABLISHED | LINK_SYNCHING);
}
bool tipc_link_peer_is_down(struct tipc_link *l)
@@ -1790,7 +1783,7 @@ int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,
rcv_nxt = l->rcv_nxt;
win_lim = rcv_nxt + TIPC_MAX_LINK_WIN;
- if (unlikely(!link_is_up(l))) {
+ if (unlikely(!tipc_link_is_up(l))) {
if (l->state == LINK_ESTABLISHING)
rc = TIPC_LINK_UP_EVT;
kfree_skb(skb);
@@ -1848,7 +1841,7 @@ static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
struct tipc_link *bcl = l->bc_rcvlink;
struct tipc_msg *hdr;
struct sk_buff *skb;
- bool node_up = link_is_up(bcl);
+ bool node_up = tipc_link_is_up(bcl);
u16 glen = 0, bc_rcvgap = 0;
int dlen = 0;
void *data;
@@ -2163,7 +2156,7 @@ bool tipc_link_validate_msg(struct tipc_link *l, struct tipc_msg *hdr)
if (session != curr_session)
return false;
/* Extra sanity check */
- if (!link_is_up(l) && msg_ack(hdr))
+ if (!tipc_link_is_up(l) && msg_ack(hdr))
return false;
if (!(l->peer_caps & TIPC_LINK_PROTO_SEQNO))
return true;
@@ -2261,7 +2254,7 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
}
/* ACTIVATE_MSG serves as PEER_RESET if link is already down */
- if (mtyp == RESET_MSG || !link_is_up(l))
+ if (mtyp == RESET_MSG || !tipc_link_is_up(l))
rc = tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT);
/* ACTIVATE_MSG takes up link if it was already locally reset */
@@ -2300,7 +2293,7 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
if (msg_probe(hdr))
l->stats.recv_probes++;
- if (!link_is_up(l)) {
+ if (!tipc_link_is_up(l)) {
if (l->state == LINK_ESTABLISHING)
rc = TIPC_LINK_UP_EVT;
break;
@@ -2387,7 +2380,7 @@ void tipc_link_bc_init_rcv(struct tipc_link *l, struct tipc_msg *hdr)
int mtyp = msg_type(hdr);
u16 peers_snd_nxt = msg_bc_snd_nxt(hdr);
- if (link_is_up(l))
+ if (tipc_link_is_up(l))
return;
if (msg_user(hdr) == BCAST_PROTOCOL) {
@@ -2415,7 +2408,7 @@ int tipc_link_bc_sync_rcv(struct tipc_link *l, struct tipc_msg *hdr,
u16 peers_snd_nxt = msg_bc_snd_nxt(hdr);
int rc = 0;
- if (!link_is_up(l))
+ if (!tipc_link_is_up(l))
return rc;
if (!msg_peer_node_is_up(hdr))
@@ -2475,7 +2468,7 @@ int tipc_link_bc_ack_rcv(struct tipc_link *r, u16 acked, u16 gap,
bool unused = false;
int rc = 0;
- if (!link_is_up(r) || !r->bc_peer_is_up)
+ if (!tipc_link_is_up(r) || !r->bc_peer_is_up)
return 0;
if (gap) {
@@ -2873,7 +2866,7 @@ void tipc_link_set_tolerance(struct tipc_link *l, u32 tol,
l->tolerance = tol;
if (l->bc_rcvlink)
l->bc_rcvlink->tolerance = tol;
- if (link_is_up(l))
+ if (tipc_link_is_up(l))
tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, tol, 0, xmitq);
}
diff --git a/net/tipc/node.c b/net/tipc/node.c
index c1e890a82434..500320e5ca47 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -2105,6 +2105,7 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b)
} else {
n = tipc_node_find_by_id(net, ehdr->id);
}
+ skb_dst_force(skb);
tipc_crypto_rcv(net, (n) ? n->crypto_rx : NULL, &skb, b);
if (!skb)
return;
diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
index ab6e694f7bc2..dc063c2c7950 100644
--- a/net/tls/tls_device.c
+++ b/net/tls/tls_device.c
@@ -231,14 +231,10 @@ static void tls_device_resync_tx(struct sock *sk, struct tls_context *tls_ctx,
u32 seq)
{
struct net_device *netdev;
- struct sk_buff *skb;
int err = 0;
u8 *rcd_sn;
- skb = tcp_write_queue_tail(sk);
- if (skb)
- TCP_SKB_CB(skb)->eor = 1;
-
+ tcp_write_collapse_fence(sk);
rcd_sn = tls_ctx->tx.rec_seq;
trace_tls_device_tx_resync_send(sk, seq, rcd_sn);
@@ -1067,7 +1063,6 @@ int tls_set_device_offload(struct sock *sk)
struct tls_prot_info *prot;
struct net_device *netdev;
struct tls_context *ctx;
- struct sk_buff *skb;
char *iv, *rec_seq;
int rc;
@@ -1138,9 +1133,7 @@ int tls_set_device_offload(struct sock *sk)
* SKBs where only part of the payload needs to be encrypted.
* So mark the last skb in the write queue as end of record.
*/
- skb = tcp_write_queue_tail(sk);
- if (skb)
- TCP_SKB_CB(skb)->eor = 1;
+ tcp_write_collapse_fence(sk);
/* Avoid offloading if the device is down
* We don't want to offload new flows after
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
index 90b7f253d363..6b4b9f2749a6 100644
--- a/net/tls/tls_main.c
+++ b/net/tls/tls_main.c
@@ -616,6 +616,7 @@ static int do_tls_setsockopt_conf(struct sock *sk, sockptr_t optval,
struct tls_crypto_info *alt_crypto_info;
struct tls_context *ctx = tls_get_ctx(sk);
const struct tls_cipher_desc *cipher_desc;
+ union tls_crypto_context *crypto_ctx;
int rc = 0;
int conf;
@@ -623,13 +624,15 @@ static int do_tls_setsockopt_conf(struct sock *sk, sockptr_t optval,
return -EINVAL;
if (tx) {
- crypto_info = &ctx->crypto_send.info;
+ crypto_ctx = &ctx->crypto_send;
alt_crypto_info = &ctx->crypto_recv.info;
} else {
- crypto_info = &ctx->crypto_recv.info;
+ crypto_ctx = &ctx->crypto_recv;
alt_crypto_info = &ctx->crypto_send.info;
}
+ crypto_info = &crypto_ctx->info;
+
/* Currently we don't support set crypto info more than one time */
if (TLS_CRYPTO_INFO_READY(crypto_info))
return -EBUSY;
@@ -710,7 +713,7 @@ static int do_tls_setsockopt_conf(struct sock *sk, sockptr_t optval,
return 0;
err_crypto_info:
- memzero_explicit(crypto_info, sizeof(union tls_crypto_context));
+ memzero_explicit(crypto_ctx, sizeof(*crypto_ctx));
return rc;
}
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index e4af6616e1df..b0a4c6d08e0a 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -126,6 +126,81 @@ static spinlock_t bsd_socket_locks[UNIX_HASH_SIZE / 2];
* hash table is protected with spinlock.
* each socket state is protected by separate spinlock.
*/
+#ifdef CONFIG_PROVE_LOCKING
+#define cmp_ptr(l, r) (((l) > (r)) - ((l) < (r)))
+
+static int unix_table_lock_cmp_fn(const struct lockdep_map *a,
+ const struct lockdep_map *b)
+{
+ return cmp_ptr(a, b);
+}
+
+static int unix_state_lock_cmp_fn(const struct lockdep_map *_a,
+ const struct lockdep_map *_b)
+{
+ const struct unix_sock *a, *b;
+
+ a = container_of(_a, struct unix_sock, lock.dep_map);
+ b = container_of(_b, struct unix_sock, lock.dep_map);
+
+ if (a->sk.sk_state == TCP_LISTEN) {
+ /* unix_stream_connect(): Before the 2nd unix_state_lock(),
+ *
+ * 1. a is TCP_LISTEN.
+ * 2. b is not a.
+ * 3. concurrent connect(b -> a) must fail.
+ *
+ * Except for 2. & 3., the b's state can be any possible
+ * value due to concurrent connect() or listen().
+ *
+ * 2. is detected in debug_spin_lock_before(), and 3. cannot
+ * be expressed as lock_cmp_fn.
+ */
+ switch (b->sk.sk_state) {
+ case TCP_CLOSE:
+ case TCP_ESTABLISHED:
+ case TCP_LISTEN:
+ return -1;
+ default:
+ /* Invalid case. */
+ return 0;
+ }
+ }
+
+ /* Should never happen. Just to be symmetric. */
+ if (b->sk.sk_state == TCP_LISTEN) {
+ switch (b->sk.sk_state) {
+ case TCP_CLOSE:
+ case TCP_ESTABLISHED:
+ return 1;
+ default:
+ return 0;
+ }
+ }
+
+ /* unix_state_double_lock(): ascending address order. */
+ return cmp_ptr(a, b);
+}
+
+static int unix_recvq_lock_cmp_fn(const struct lockdep_map *_a,
+ const struct lockdep_map *_b)
+{
+ const struct sock *a, *b;
+
+ a = container_of(_a, struct sock, sk_receive_queue.lock.dep_map);
+ b = container_of(_b, struct sock, sk_receive_queue.lock.dep_map);
+
+ /* unix_collect_skb(): listener -> embryo order. */
+ if (a->sk_state == TCP_LISTEN && unix_sk(b)->listener == a)
+ return -1;
+
+ /* Should never happen. Just to be symmetric. */
+ if (b->sk_state == TCP_LISTEN && unix_sk(a)->listener == b)
+ return 1;
+
+ return 0;
+}
+#endif
static unsigned int unix_unbound_hash(struct sock *sk)
{
@@ -168,7 +243,7 @@ static void unix_table_double_lock(struct net *net,
swap(hash1, hash2);
spin_lock(&net->unx.table.locks[hash1]);
- spin_lock_nested(&net->unx.table.locks[hash2], SINGLE_DEPTH_NESTING);
+ spin_lock(&net->unx.table.locks[hash2]);
}
static void unix_table_double_unlock(struct net *net,
@@ -221,15 +296,9 @@ static inline int unix_may_send(struct sock *sk, struct sock *osk)
return unix_peer(osk) == NULL || unix_our_peer(sk, osk);
}
-static inline int unix_recvq_full(const struct sock *sk)
-{
- return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
-}
-
static inline int unix_recvq_full_lockless(const struct sock *sk)
{
- return skb_queue_len_lockless(&sk->sk_receive_queue) >
- READ_ONCE(sk->sk_max_ack_backlog);
+ return skb_queue_len_lockless(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
}
struct sock *unix_peer_get(struct sock *s)
@@ -530,10 +599,10 @@ static int unix_dgram_peer_wake_me(struct sock *sk, struct sock *other)
return 0;
}
-static int unix_writable(const struct sock *sk)
+static int unix_writable(const struct sock *sk, unsigned char state)
{
- return sk->sk_state != TCP_LISTEN &&
- (refcount_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
+ return state != TCP_LISTEN &&
+ (refcount_read(&sk->sk_wmem_alloc) << 2) <= READ_ONCE(sk->sk_sndbuf);
}
static void unix_write_space(struct sock *sk)
@@ -541,7 +610,7 @@ static void unix_write_space(struct sock *sk)
struct socket_wq *wq;
rcu_read_lock();
- if (unix_writable(sk)) {
+ if (unix_writable(sk, READ_ONCE(sk->sk_state))) {
wq = rcu_dereference(sk->sk_wq);
if (skwq_has_sleeper(wq))
wake_up_interruptible_sync_poll(&wq->wait,
@@ -570,7 +639,6 @@ static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
sk_error_report(other);
}
}
- other->sk_state = TCP_CLOSE;
}
static void unix_sock_destructor(struct sock *sk)
@@ -617,7 +685,7 @@ static void unix_release_sock(struct sock *sk, int embrion)
u->path.dentry = NULL;
u->path.mnt = NULL;
state = sk->sk_state;
- sk->sk_state = TCP_CLOSE;
+ WRITE_ONCE(sk->sk_state, TCP_CLOSE);
skpair = unix_peer(sk);
unix_peer(sk) = NULL;
@@ -638,7 +706,7 @@ static void unix_release_sock(struct sock *sk, int embrion)
unix_state_lock(skpair);
/* No more writes */
WRITE_ONCE(skpair->sk_shutdown, SHUTDOWN_MASK);
- if (!skb_queue_empty(&sk->sk_receive_queue) || embrion)
+ if (!skb_queue_empty_lockless(&sk->sk_receive_queue) || embrion)
WRITE_ONCE(skpair->sk_err, ECONNRESET);
unix_state_unlock(skpair);
skpair->sk_state_change(skpair);
@@ -654,8 +722,8 @@ static void unix_release_sock(struct sock *sk, int embrion)
while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
if (state == TCP_LISTEN)
unix_release_sock(skb->sk, 1);
+
/* passed fds are erased in the kfree_skb hook */
- UNIXCB(skb).consumed = skb->len;
kfree_skb(skb);
}
@@ -683,14 +751,19 @@ static void unix_release_sock(struct sock *sk, int embrion)
static void init_peercred(struct sock *sk)
{
+ sk->sk_peer_pid = get_pid(task_tgid(current));
+ sk->sk_peer_cred = get_current_cred();
+}
+
+static void update_peercred(struct sock *sk)
+{
const struct cred *old_cred;
struct pid *old_pid;
spin_lock(&sk->sk_peer_lock);
old_pid = sk->sk_peer_pid;
old_cred = sk->sk_peer_cred;
- sk->sk_peer_pid = get_pid(task_tgid(current));
- sk->sk_peer_cred = get_current_cred();
+ init_peercred(sk);
spin_unlock(&sk->sk_peer_lock);
put_pid(old_pid);
@@ -699,26 +772,12 @@ static void init_peercred(struct sock *sk)
static void copy_peercred(struct sock *sk, struct sock *peersk)
{
- const struct cred *old_cred;
- struct pid *old_pid;
+ lockdep_assert_held(&unix_sk(peersk)->lock);
- if (sk < peersk) {
- spin_lock(&sk->sk_peer_lock);
- spin_lock_nested(&peersk->sk_peer_lock, SINGLE_DEPTH_NESTING);
- } else {
- spin_lock(&peersk->sk_peer_lock);
- spin_lock_nested(&sk->sk_peer_lock, SINGLE_DEPTH_NESTING);
- }
- old_pid = sk->sk_peer_pid;
- old_cred = sk->sk_peer_cred;
- sk->sk_peer_pid = get_pid(peersk->sk_peer_pid);
+ spin_lock(&sk->sk_peer_lock);
+ sk->sk_peer_pid = get_pid(peersk->sk_peer_pid);
sk->sk_peer_cred = get_cred(peersk->sk_peer_cred);
-
spin_unlock(&sk->sk_peer_lock);
- spin_unlock(&peersk->sk_peer_lock);
-
- put_pid(old_pid);
- put_cred(old_cred);
}
static int unix_listen(struct socket *sock, int backlog)
@@ -731,7 +790,7 @@ static int unix_listen(struct socket *sock, int backlog)
if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
goto out; /* Only stream/seqpacket sockets accept */
err = -EINVAL;
- if (!u->addr)
+ if (!READ_ONCE(u->addr))
goto out; /* No listens on an unbound socket */
unix_state_lock(sk);
if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN)
@@ -739,9 +798,10 @@ static int unix_listen(struct socket *sock, int backlog)
if (backlog > sk->sk_max_ack_backlog)
wake_up_interruptible_all(&u->peer_wait);
sk->sk_max_ack_backlog = backlog;
- sk->sk_state = TCP_LISTEN;
+ WRITE_ONCE(sk->sk_state, TCP_LISTEN);
+
/* set credentials so connect can copy them */
- init_peercred(sk);
+ update_peercred(sk);
err = 0;
out_unlock:
@@ -976,14 +1036,17 @@ static struct sock *unix_create1(struct net *net, struct socket *sock, int kern,
sk->sk_hash = unix_unbound_hash(sk);
sk->sk_allocation = GFP_KERNEL_ACCOUNT;
sk->sk_write_space = unix_write_space;
- sk->sk_max_ack_backlog = net->unx.sysctl_max_dgram_qlen;
+ sk->sk_max_ack_backlog = READ_ONCE(net->unx.sysctl_max_dgram_qlen);
sk->sk_destruct = unix_sock_destructor;
+ lock_set_cmp_fn(&sk->sk_receive_queue.lock, unix_recvq_lock_cmp_fn, NULL);
+
u = unix_sk(sk);
u->listener = NULL;
u->vertex = NULL;
u->path.dentry = NULL;
u->path.mnt = NULL;
spin_lock_init(&u->lock);
+ lock_set_cmp_fn(&u->lock, unix_state_lock_cmp_fn, NULL);
mutex_init(&u->iolock); /* single task reading lock */
mutex_init(&u->bindlock); /* single task binding lock */
init_waitqueue_head(&u->peer_wait);
@@ -1131,8 +1194,8 @@ static struct sock *unix_find_other(struct net *net,
static int unix_autobind(struct sock *sk)
{
- unsigned int new_hash, old_hash = sk->sk_hash;
struct unix_sock *u = unix_sk(sk);
+ unsigned int new_hash, old_hash;
struct net *net = sock_net(sk);
struct unix_address *addr;
u32 lastnum, ordernum;
@@ -1155,6 +1218,7 @@ static int unix_autobind(struct sock *sk)
addr->name->sun_family = AF_UNIX;
refcount_set(&addr->refcnt, 1);
+ old_hash = sk->sk_hash;
ordernum = get_random_u32();
lastnum = ordernum & 0xFFFFF;
retry:
@@ -1195,8 +1259,8 @@ static int unix_bind_bsd(struct sock *sk, struct sockaddr_un *sunaddr,
{
umode_t mode = S_IFSOCK |
(SOCK_INODE(sk->sk_socket)->i_mode & ~current_umask());
- unsigned int new_hash, old_hash = sk->sk_hash;
struct unix_sock *u = unix_sk(sk);
+ unsigned int new_hash, old_hash;
struct net *net = sock_net(sk);
struct mnt_idmap *idmap;
struct unix_address *addr;
@@ -1234,6 +1298,7 @@ static int unix_bind_bsd(struct sock *sk, struct sockaddr_un *sunaddr,
if (u->addr)
goto out_unlock;
+ old_hash = sk->sk_hash;
new_hash = unix_bsd_hash(d_backing_inode(dentry));
unix_table_double_lock(net, old_hash, new_hash);
u->path.mnt = mntget(parent.mnt);
@@ -1261,8 +1326,8 @@ out:
static int unix_bind_abstract(struct sock *sk, struct sockaddr_un *sunaddr,
int addr_len)
{
- unsigned int new_hash, old_hash = sk->sk_hash;
struct unix_sock *u = unix_sk(sk);
+ unsigned int new_hash, old_hash;
struct net *net = sock_net(sk);
struct unix_address *addr;
int err;
@@ -1280,6 +1345,7 @@ static int unix_bind_abstract(struct sock *sk, struct sockaddr_un *sunaddr,
goto out_mutex;
}
+ old_hash = sk->sk_hash;
new_hash = unix_abstract_hash(addr->name, addr->len, sk->sk_type);
unix_table_double_lock(net, old_hash, new_hash);
@@ -1329,11 +1395,12 @@ static void unix_state_double_lock(struct sock *sk1, struct sock *sk2)
unix_state_lock(sk1);
return;
}
+
if (sk1 > sk2)
swap(sk1, sk2);
unix_state_lock(sk1);
- unix_state_lock_nested(sk2, U_LOCK_SECOND);
+ unix_state_lock(sk2);
}
static void unix_state_double_unlock(struct sock *sk1, struct sock *sk2)
@@ -1369,7 +1436,7 @@ static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
if ((test_bit(SOCK_PASSCRED, &sock->flags) ||
test_bit(SOCK_PASSPIDFD, &sock->flags)) &&
- !unix_sk(sk)->addr) {
+ !READ_ONCE(unix_sk(sk)->addr)) {
err = unix_autobind(sk);
if (err)
goto out;
@@ -1399,7 +1466,8 @@ restart:
if (err)
goto out_unlock;
- sk->sk_state = other->sk_state = TCP_ESTABLISHED;
+ WRITE_ONCE(sk->sk_state, TCP_ESTABLISHED);
+ WRITE_ONCE(other->sk_state, TCP_ESTABLISHED);
} else {
/*
* 1003.1g breaking connected state with AF_UNSPEC
@@ -1416,13 +1484,20 @@ restart:
unix_peer(sk) = other;
if (!other)
- sk->sk_state = TCP_CLOSE;
+ WRITE_ONCE(sk->sk_state, TCP_CLOSE);
unix_dgram_peer_wake_disconnect_wakeup(sk, old_peer);
unix_state_double_unlock(sk, other);
- if (other != old_peer)
+ if (other != old_peer) {
unix_dgram_disconnected(sk, old_peer);
+
+ unix_state_lock(old_peer);
+ if (!unix_peer(old_peer))
+ WRITE_ONCE(old_peer->sk_state, TCP_CLOSE);
+ unix_state_unlock(old_peer);
+ }
+
sock_put(old_peer);
} else {
unix_peer(sk) = other;
@@ -1468,9 +1543,9 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
struct unix_sock *u = unix_sk(sk), *newu, *otheru;
struct net *net = sock_net(sk);
struct sk_buff *skb = NULL;
+ unsigned char state;
long timeo;
int err;
- int st;
err = unix_validate_addr(sunaddr, addr_len);
if (err)
@@ -1481,7 +1556,8 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
goto out;
if ((test_bit(SOCK_PASSCRED, &sock->flags) ||
- test_bit(SOCK_PASSPIDFD, &sock->flags)) && !u->addr) {
+ test_bit(SOCK_PASSPIDFD, &sock->flags)) &&
+ !READ_ONCE(u->addr)) {
err = unix_autobind(sk);
if (err)
goto out;
@@ -1518,7 +1594,6 @@ restart:
goto out;
}
- /* Latch state of peer */
unix_state_lock(other);
/* Apparently VFS overslept socket death. Retry. */
@@ -1534,7 +1609,7 @@ restart:
if (other->sk_shutdown & RCV_SHUTDOWN)
goto out_unlock;
- if (unix_recvq_full(other)) {
+ if (unix_recvq_full_lockless(other)) {
err = -EAGAIN;
if (!timeo)
goto out_unlock;
@@ -1548,39 +1623,21 @@ restart:
goto restart;
}
- /* Latch our state.
-
- It is tricky place. We need to grab our state lock and cannot
- drop lock on peer. It is dangerous because deadlock is
- possible. Connect to self case and simultaneous
- attempt to connect are eliminated by checking socket
- state. other is TCP_LISTEN, if sk is TCP_LISTEN we
- check this before attempt to grab lock.
-
- Well, and we have to recheck the state after socket locked.
+ /* self connect and simultaneous connect are eliminated
+ * by rejecting TCP_LISTEN socket to avoid deadlock.
*/
- st = sk->sk_state;
-
- switch (st) {
- case TCP_CLOSE:
- /* This is ok... continue with connect */
- break;
- case TCP_ESTABLISHED:
- /* Socket is already connected */
- err = -EISCONN;
- goto out_unlock;
- default:
- err = -EINVAL;
+ state = READ_ONCE(sk->sk_state);
+ if (unlikely(state != TCP_CLOSE)) {
+ err = state == TCP_ESTABLISHED ? -EISCONN : -EINVAL;
goto out_unlock;
}
- unix_state_lock_nested(sk, U_LOCK_SECOND);
+ unix_state_lock(sk);
- if (sk->sk_state != st) {
+ if (unlikely(sk->sk_state != TCP_CLOSE)) {
+ err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EINVAL;
unix_state_unlock(sk);
- unix_state_unlock(other);
- sock_put(other);
- goto restart;
+ goto out_unlock;
}
err = security_unix_stream_connect(sk, other, newsk);
@@ -1629,7 +1686,7 @@ restart:
copy_peercred(sk, other);
sock->state = SS_CONNECTED;
- sk->sk_state = TCP_ESTABLISHED;
+ WRITE_ONCE(sk->sk_state, TCP_ESTABLISHED);
sock_hold(newsk);
smp_mb__after_atomic(); /* sock_hold() does an atomic_inc() */
@@ -1701,7 +1758,7 @@ static int unix_accept(struct socket *sock, struct socket *newsock,
goto out;
arg->err = -EINVAL;
- if (sk->sk_state != TCP_LISTEN)
+ if (READ_ONCE(sk->sk_state) != TCP_LISTEN)
goto out;
/* If socket state is TCP_LISTEN it cannot change (for now...),
@@ -1950,14 +2007,15 @@ static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
}
if ((test_bit(SOCK_PASSCRED, &sock->flags) ||
- test_bit(SOCK_PASSPIDFD, &sock->flags)) && !u->addr) {
+ test_bit(SOCK_PASSPIDFD, &sock->flags)) &&
+ !READ_ONCE(u->addr)) {
err = unix_autobind(sk);
if (err)
goto out;
}
err = -EMSGSIZE;
- if (len > sk->sk_sndbuf - 32)
+ if (len > READ_ONCE(sk->sk_sndbuf) - 32)
goto out;
if (len > SKB_MAX_ALLOC) {
@@ -2039,7 +2097,7 @@ restart_locked:
unix_peer(sk) = NULL;
unix_dgram_peer_wake_disconnect_wakeup(sk, other);
- sk->sk_state = TCP_CLOSE;
+ WRITE_ONCE(sk->sk_state, TCP_CLOSE);
unix_state_unlock(sk);
unix_dgram_disconnected(sk, other);
@@ -2216,7 +2274,7 @@ static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
}
if (msg->msg_namelen) {
- err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
+ err = READ_ONCE(sk->sk_state) == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
goto out_err;
} else {
err = -ENOTCONN;
@@ -2237,7 +2295,7 @@ static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
&err, 0);
} else {
/* Keep two messages in the pipe so it schedules better */
- size = min_t(int, size, (sk->sk_sndbuf >> 1) - 64);
+ size = min_t(int, size, (READ_ONCE(sk->sk_sndbuf) >> 1) - 64);
/* allow fallback to order-0 allocations */
size = min_t(int, size, SKB_MAX_HEAD(0) + UNIX_SKB_FRAGS_SZ);
@@ -2330,7 +2388,7 @@ static int unix_seqpacket_sendmsg(struct socket *sock, struct msghdr *msg,
if (err)
return err;
- if (sk->sk_state != TCP_ESTABLISHED)
+ if (READ_ONCE(sk->sk_state) != TCP_ESTABLISHED)
return -ENOTCONN;
if (msg->msg_namelen)
@@ -2344,7 +2402,7 @@ static int unix_seqpacket_recvmsg(struct socket *sock, struct msghdr *msg,
{
struct sock *sk = sock->sk;
- if (sk->sk_state != TCP_ESTABLISHED)
+ if (READ_ONCE(sk->sk_state) != TCP_ESTABLISHED)
return -ENOTCONN;
return unix_dgram_recvmsg(sock, msg, size, flags);
@@ -2609,10 +2667,24 @@ static struct sk_buff *manage_oob(struct sk_buff *skb, struct sock *sk,
{
struct unix_sock *u = unix_sk(sk);
- if (!unix_skb_len(skb) && !(flags & MSG_PEEK)) {
- skb_unlink(skb, &sk->sk_receive_queue);
- consume_skb(skb);
- skb = NULL;
+ if (!unix_skb_len(skb)) {
+ struct sk_buff *unlinked_skb = NULL;
+
+ spin_lock(&sk->sk_receive_queue.lock);
+
+ if (copied && (!u->oob_skb || skb == u->oob_skb)) {
+ skb = NULL;
+ } else if (flags & MSG_PEEK) {
+ skb = skb_peek_next(skb, &sk->sk_receive_queue);
+ } else {
+ unlinked_skb = skb;
+ skb = skb_peek_next(skb, &sk->sk_receive_queue);
+ __skb_unlink(unlinked_skb, &sk->sk_receive_queue);
+ }
+
+ spin_unlock(&sk->sk_receive_queue.lock);
+
+ consume_skb(unlinked_skb);
} else {
struct sk_buff *unlinked_skb = NULL;
@@ -2621,18 +2693,18 @@ static struct sk_buff *manage_oob(struct sk_buff *skb, struct sock *sk,
if (skb == u->oob_skb) {
if (copied) {
skb = NULL;
- } else if (sock_flag(sk, SOCK_URGINLINE)) {
- if (!(flags & MSG_PEEK)) {
+ } else if (!(flags & MSG_PEEK)) {
+ if (sock_flag(sk, SOCK_URGINLINE)) {
WRITE_ONCE(u->oob_skb, NULL);
consume_skb(skb);
+ } else {
+ __skb_unlink(skb, &sk->sk_receive_queue);
+ WRITE_ONCE(u->oob_skb, NULL);
+ unlinked_skb = skb;
+ skb = skb_peek(&sk->sk_receive_queue);
}
- } else if (flags & MSG_PEEK) {
- skb = NULL;
- } else {
- __skb_unlink(skb, &sk->sk_receive_queue);
- WRITE_ONCE(u->oob_skb, NULL);
- unlinked_skb = skb;
- skb = skb_peek(&sk->sk_receive_queue);
+ } else if (!sock_flag(sk, SOCK_URGINLINE)) {
+ skb = skb_peek_next(skb, &sk->sk_receive_queue);
}
}
@@ -2649,7 +2721,7 @@ static struct sk_buff *manage_oob(struct sk_buff *skb, struct sock *sk,
static int unix_stream_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
{
- if (unlikely(sk->sk_state != TCP_ESTABLISHED))
+ if (unlikely(READ_ONCE(sk->sk_state) != TCP_ESTABLISHED))
return -ENOTCONN;
return unix_read_skb(sk, recv_actor);
@@ -2673,7 +2745,7 @@ static int unix_stream_read_generic(struct unix_stream_read_state *state,
size_t size = state->size;
unsigned int last_len;
- if (unlikely(sk->sk_state != TCP_ESTABLISHED)) {
+ if (unlikely(READ_ONCE(sk->sk_state) != TCP_ESTABLISHED)) {
err = -EINVAL;
goto out;
}
@@ -2699,9 +2771,8 @@ static int unix_stream_read_generic(struct unix_stream_read_state *state,
skip = max(sk_peek_offset(sk, flags), 0);
do {
- int chunk;
- bool drop_skb;
struct sk_buff *skb, *last;
+ int chunk;
redo:
unix_state_lock(sk);
@@ -2797,11 +2868,7 @@ unlock:
}
chunk = min_t(unsigned int, unix_skb_len(skb) - skip, size);
- skb_get(skb);
chunk = state->recv_actor(skb, skip, chunk, state);
- drop_skb = !unix_skb_len(skb);
- /* skb is only safe to use if !drop_skb */
- consume_skb(skb);
if (chunk < 0) {
if (copied == 0)
copied = -EFAULT;
@@ -2810,18 +2877,6 @@ unlock:
copied += chunk;
size -= chunk;
- if (drop_skb) {
- /* the skb was touched by a concurrent reader;
- * we should not expect anything from this skb
- * anymore and assume it invalid - we can be
- * sure it was dropped from the socket queue
- *
- * let's report a short read
- */
- err = 0;
- break;
- }
-
/* Mark read part of skb as used */
if (!(flags & MSG_PEEK)) {
UNIXCB(skb).consumed += chunk;
@@ -3004,7 +3059,7 @@ long unix_inq_len(struct sock *sk)
struct sk_buff *skb;
long amount = 0;
- if (sk->sk_state == TCP_LISTEN)
+ if (READ_ONCE(sk->sk_state) == TCP_LISTEN)
return -EINVAL;
spin_lock(&sk->sk_receive_queue.lock);
@@ -3089,12 +3144,23 @@ static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
case SIOCATMARK:
{
+ struct unix_sock *u = unix_sk(sk);
struct sk_buff *skb;
int answ = 0;
+ mutex_lock(&u->iolock);
+
skb = skb_peek(&sk->sk_receive_queue);
- if (skb && skb == READ_ONCE(unix_sk(sk)->oob_skb))
- answ = 1;
+ if (skb) {
+ struct sk_buff *oob_skb = READ_ONCE(u->oob_skb);
+
+ if (skb == oob_skb ||
+ (!oob_skb && !unix_skb_len(skb)))
+ answ = 1;
+ }
+
+ mutex_unlock(&u->iolock);
+
err = put_user(answ, (int __user *)arg);
}
break;
@@ -3116,12 +3182,14 @@ static int unix_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned lon
static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wait)
{
struct sock *sk = sock->sk;
+ unsigned char state;
__poll_t mask;
u8 shutdown;
sock_poll_wait(file, sock, wait);
mask = 0;
shutdown = READ_ONCE(sk->sk_shutdown);
+ state = READ_ONCE(sk->sk_state);
/* exceptional events? */
if (READ_ONCE(sk->sk_err))
@@ -3143,14 +3211,14 @@ static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wa
/* Connection-based need to check for termination and startup */
if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) &&
- sk->sk_state == TCP_CLOSE)
+ state == TCP_CLOSE)
mask |= EPOLLHUP;
/*
* we set writable also when the other side has shut down the
* connection. This prevents stuck sockets.
*/
- if (unix_writable(sk))
+ if (unix_writable(sk, state))
mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
return mask;
@@ -3161,12 +3229,14 @@ static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
{
struct sock *sk = sock->sk, *other;
unsigned int writable;
+ unsigned char state;
__poll_t mask;
u8 shutdown;
sock_poll_wait(file, sock, wait);
mask = 0;
shutdown = READ_ONCE(sk->sk_shutdown);
+ state = READ_ONCE(sk->sk_state);
/* exceptional events? */
if (READ_ONCE(sk->sk_err) ||
@@ -3186,19 +3256,14 @@ static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
mask |= EPOLLIN | EPOLLRDNORM;
/* Connection-based need to check for termination and startup */
- if (sk->sk_type == SOCK_SEQPACKET) {
- if (sk->sk_state == TCP_CLOSE)
- mask |= EPOLLHUP;
- /* connection hasn't started yet? */
- if (sk->sk_state == TCP_SYN_SENT)
- return mask;
- }
+ if (sk->sk_type == SOCK_SEQPACKET && state == TCP_CLOSE)
+ mask |= EPOLLHUP;
/* No write status requested, avoid expensive OUT tests. */
if (!(poll_requested_events(wait) & (EPOLLWRBAND|EPOLLWRNORM|EPOLLOUT)))
return mask;
- writable = unix_writable(sk);
+ writable = unix_writable(sk, state);
if (writable) {
unix_state_lock(sk);
@@ -3592,6 +3657,7 @@ static int __net_init unix_net_init(struct net *net)
for (i = 0; i < UNIX_HASH_SIZE; i++) {
spin_lock_init(&net->unx.table.locks[i]);
+ lock_set_cmp_fn(&net->unx.table.locks[i], unix_table_lock_cmp_fn, NULL);
INIT_HLIST_HEAD(&net->unx.table.buckets[i]);
}
diff --git a/net/unix/diag.c b/net/unix/diag.c
index ae39538c5042..9138af8b465e 100644
--- a/net/unix/diag.c
+++ b/net/unix/diag.c
@@ -47,9 +47,7 @@ static int sk_diag_dump_peer(struct sock *sk, struct sk_buff *nlskb)
peer = unix_peer_get(sk);
if (peer) {
- unix_state_lock(peer);
ino = sock_i_ino(peer);
- unix_state_unlock(peer);
sock_put(peer);
return nla_put_u32(nlskb, UNIX_DIAG_PEER, ino);
@@ -65,7 +63,7 @@ static int sk_diag_dump_icons(struct sock *sk, struct sk_buff *nlskb)
u32 *buf;
int i;
- if (sk->sk_state == TCP_LISTEN) {
+ if (READ_ONCE(sk->sk_state) == TCP_LISTEN) {
spin_lock(&sk->sk_receive_queue.lock);
attr = nla_reserve(nlskb, UNIX_DIAG_ICONS,
@@ -75,20 +73,9 @@ static int sk_diag_dump_icons(struct sock *sk, struct sk_buff *nlskb)
buf = nla_data(attr);
i = 0;
- skb_queue_walk(&sk->sk_receive_queue, skb) {
- struct sock *req, *peer;
-
- req = skb->sk;
- /*
- * The state lock is outer for the same sk's
- * queue lock. With the other's queue locked it's
- * OK to lock the state.
- */
- unix_state_lock_nested(req, U_LOCK_DIAG);
- peer = unix_sk(req)->peer;
- buf[i++] = (peer ? sock_i_ino(peer) : 0);
- unix_state_unlock(req);
- }
+ skb_queue_walk(&sk->sk_receive_queue, skb)
+ buf[i++] = sock_i_ino(unix_peer(skb->sk));
+
spin_unlock(&sk->sk_receive_queue.lock);
}
@@ -103,8 +90,8 @@ static int sk_diag_show_rqlen(struct sock *sk, struct sk_buff *nlskb)
{
struct unix_diag_rqlen rql;
- if (sk->sk_state == TCP_LISTEN) {
- rql.udiag_rqueue = sk->sk_receive_queue.qlen;
+ if (READ_ONCE(sk->sk_state) == TCP_LISTEN) {
+ rql.udiag_rqueue = skb_queue_len_lockless(&sk->sk_receive_queue);
rql.udiag_wqueue = sk->sk_max_ack_backlog;
} else {
rql.udiag_rqueue = (u32) unix_inq_len(sk);
@@ -136,7 +123,7 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_r
rep = nlmsg_data(nlh);
rep->udiag_family = AF_UNIX;
rep->udiag_type = sk->sk_type;
- rep->udiag_state = sk->sk_state;
+ rep->udiag_state = READ_ONCE(sk->sk_state);
rep->pad = 0;
rep->udiag_ino = sk_ino;
sock_diag_save_cookie(sk, rep->udiag_cookie);
@@ -165,7 +152,7 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_r
sock_diag_put_meminfo(sk, skb, UNIX_DIAG_MEMINFO))
goto out_nlmsg_trim;
- if (nla_put_u8(skb, UNIX_DIAG_SHUTDOWN, sk->sk_shutdown))
+ if (nla_put_u8(skb, UNIX_DIAG_SHUTDOWN, READ_ONCE(sk->sk_shutdown)))
goto out_nlmsg_trim;
if ((req->udiag_show & UDIAG_SHOW_UID) &&
@@ -180,22 +167,6 @@ out_nlmsg_trim:
return -EMSGSIZE;
}
-static int sk_diag_dump(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req,
- struct user_namespace *user_ns,
- u32 portid, u32 seq, u32 flags)
-{
- int sk_ino;
-
- unix_state_lock(sk);
- sk_ino = sock_i_ino(sk);
- unix_state_unlock(sk);
-
- if (!sk_ino)
- return 0;
-
- return sk_diag_fill(sk, skb, req, user_ns, portid, seq, flags, sk_ino);
-}
-
static int unix_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
struct net *net = sock_net(skb->sk);
@@ -213,14 +184,22 @@ static int unix_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
num = 0;
spin_lock(&net->unx.table.locks[slot]);
sk_for_each(sk, &net->unx.table.buckets[slot]) {
+ int sk_ino;
+
if (num < s_num)
goto next;
- if (!(req->udiag_states & (1 << sk->sk_state)))
+
+ if (!(req->udiag_states & (1 << READ_ONCE(sk->sk_state))))
goto next;
- if (sk_diag_dump(sk, skb, req, sk_user_ns(skb->sk),
+
+ sk_ino = sock_i_ino(sk);
+ if (!sk_ino)
+ goto next;
+
+ if (sk_diag_fill(sk, skb, req, sk_user_ns(skb->sk),
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
- NLM_F_MULTI) < 0) {
+ NLM_F_MULTI, sk_ino) < 0) {
spin_unlock(&net->unx.table.locks[slot]);
goto done;
}
diff --git a/net/unix/garbage.c b/net/unix/garbage.c
index dfe94a90ece4..06d94ad999e9 100644
--- a/net/unix/garbage.c
+++ b/net/unix/garbage.c
@@ -337,11 +337,6 @@ static bool unix_vertex_dead(struct unix_vertex *vertex)
return true;
}
-enum unix_recv_queue_lock_class {
- U_RECVQ_LOCK_NORMAL,
- U_RECVQ_LOCK_EMBRYO,
-};
-
static void unix_collect_queue(struct unix_sock *u, struct sk_buff_head *hitlist)
{
skb_queue_splice_init(&u->sk.sk_receive_queue, hitlist);
@@ -375,8 +370,7 @@ static void unix_collect_skb(struct list_head *scc, struct sk_buff_head *hitlist
skb_queue_walk(queue, skb) {
struct sk_buff_head *embryo_queue = &skb->sk->sk_receive_queue;
- /* listener -> embryo order, the inversion never happens. */
- spin_lock_nested(&embryo_queue->lock, U_RECVQ_LOCK_EMBRYO);
+ spin_lock(&embryo_queue->lock);
unix_collect_queue(unix_sk(skb->sk), hitlist);
spin_unlock(&embryo_queue->lock);
}
@@ -476,6 +470,7 @@ prev_vertex:
}
if (vertex->index == vertex->scc_index) {
+ struct unix_vertex *v;
struct list_head scc;
bool scc_dead = true;
@@ -486,15 +481,15 @@ prev_vertex:
*/
__list_cut_position(&scc, &vertex_stack, &vertex->scc_entry);
- list_for_each_entry_reverse(vertex, &scc, scc_entry) {
+ list_for_each_entry_reverse(v, &scc, scc_entry) {
/* Don't restart DFS from this vertex in unix_walk_scc(). */
- list_move_tail(&vertex->entry, &unix_visited_vertices);
+ list_move_tail(&v->entry, &unix_visited_vertices);
/* Mark vertex as off-stack. */
- vertex->index = unix_vertex_grouped_index;
+ v->index = unix_vertex_grouped_index;
if (scc_dead)
- scc_dead = unix_vertex_dead(vertex);
+ scc_dead = unix_vertex_dead(v);
}
if (scc_dead)
diff --git a/net/wireless/chan.c b/net/wireless/chan.c
index 3414b2c3abcc..e579d7e1425f 100644
--- a/net/wireless/chan.c
+++ b/net/wireless/chan.c
@@ -263,6 +263,37 @@ static int cfg80211_chandef_get_width(const struct cfg80211_chan_def *c)
return nl80211_chan_width_to_mhz(c->width);
}
+static bool cfg80211_valid_center_freq(u32 center,
+ enum nl80211_chan_width width)
+{
+ int bw;
+ int step;
+
+ /* We only do strict verification on 6 GHz */
+ if (center < 5955 || center > 7115)
+ return true;
+
+ bw = nl80211_chan_width_to_mhz(width);
+ if (bw < 0)
+ return false;
+
+ /* Validate that the channels bw is entirely within the 6 GHz band */
+ if (center - bw / 2 < 5945 || center + bw / 2 > 7125)
+ return false;
+
+ /* With 320 MHz the permitted channels overlap */
+ if (bw == 320)
+ step = 160;
+ else
+ step = bw;
+
+ /*
+ * Valid channels are packed from lowest frequency towards higher ones.
+ * So test that the lower frequency alignes with one of these steps.
+ */
+ return (center - bw / 2 - 5945) % step == 0;
+}
+
bool cfg80211_chandef_valid(const struct cfg80211_chan_def *chandef)
{
u32 control_freq, oper_freq;
@@ -374,6 +405,13 @@ bool cfg80211_chandef_valid(const struct cfg80211_chan_def *chandef)
return false;
}
+ if (!cfg80211_valid_center_freq(chandef->center_freq1, chandef->width))
+ return false;
+
+ if (chandef->width == NL80211_CHAN_WIDTH_80P80 &&
+ !cfg80211_valid_center_freq(chandef->center_freq2, chandef->width))
+ return false;
+
/* channel 14 is only for IEEE 802.11b */
if (chandef->center_freq1 == 2484 &&
chandef->width != NL80211_CHAN_WIDTH_20_NOHT)
@@ -1145,7 +1183,8 @@ EXPORT_SYMBOL(cfg80211_chandef_dfs_cac_time);
static bool cfg80211_secondary_chans_ok(struct wiphy *wiphy,
u32 center_freq, u32 bandwidth,
- u32 prohibited_flags, bool monitor)
+ u32 prohibited_flags,
+ u32 permitting_flags)
{
struct ieee80211_channel *c;
u32 freq, start_freq, end_freq;
@@ -1157,7 +1196,7 @@ static bool cfg80211_secondary_chans_ok(struct wiphy *wiphy,
c = ieee80211_get_channel_khz(wiphy, freq);
if (!c)
return false;
- if (monitor && c->flags & IEEE80211_CHAN_CAN_MONITOR)
+ if (c->flags & permitting_flags)
continue;
if (c->flags & prohibited_flags)
return false;
@@ -1221,7 +1260,8 @@ static bool cfg80211_edmg_usable(struct wiphy *wiphy, u8 edmg_channels,
bool _cfg80211_chandef_usable(struct wiphy *wiphy,
const struct cfg80211_chan_def *chandef,
- u32 prohibited_flags, bool monitor)
+ u32 prohibited_flags,
+ u32 permitting_flags)
{
struct ieee80211_sta_ht_cap *ht_cap;
struct ieee80211_sta_vht_cap *vht_cap;
@@ -1383,22 +1423,23 @@ bool _cfg80211_chandef_usable(struct wiphy *wiphy,
if (!cfg80211_secondary_chans_ok(wiphy,
ieee80211_chandef_to_khz(chandef),
- width, prohibited_flags, monitor))
+ width, prohibited_flags,
+ permitting_flags))
return false;
if (!chandef->center_freq2)
return true;
return cfg80211_secondary_chans_ok(wiphy,
MHZ_TO_KHZ(chandef->center_freq2),
- width, prohibited_flags, monitor);
+ width, prohibited_flags,
+ permitting_flags);
}
bool cfg80211_chandef_usable(struct wiphy *wiphy,
const struct cfg80211_chan_def *chandef,
u32 prohibited_flags)
{
- return _cfg80211_chandef_usable(wiphy, chandef, prohibited_flags,
- false);
+ return _cfg80211_chandef_usable(wiphy, chandef, prohibited_flags, 0);
}
EXPORT_SYMBOL(cfg80211_chandef_usable);
@@ -1520,49 +1561,50 @@ static bool cfg80211_ir_permissive_chan(struct wiphy *wiphy,
static bool _cfg80211_reg_can_beacon(struct wiphy *wiphy,
struct cfg80211_chan_def *chandef,
enum nl80211_iftype iftype,
- bool check_no_ir)
+ u32 prohibited_flags,
+ u32 permitting_flags)
{
- bool res;
- u32 prohibited_flags = IEEE80211_CHAN_DISABLED;
+ bool res, check_radar;
int dfs_required;
- trace_cfg80211_reg_can_beacon(wiphy, chandef, iftype, check_no_ir);
+ trace_cfg80211_reg_can_beacon(wiphy, chandef, iftype,
+ prohibited_flags,
+ permitting_flags);
- if (check_no_ir)
- prohibited_flags |= IEEE80211_CHAN_NO_IR;
+ if (!_cfg80211_chandef_usable(wiphy, chandef,
+ IEEE80211_CHAN_DISABLED, 0))
+ return false;
dfs_required = cfg80211_chandef_dfs_required(wiphy, chandef, iftype);
- if (dfs_required != 0)
- prohibited_flags |= IEEE80211_CHAN_RADAR;
+ check_radar = dfs_required != 0;
if (dfs_required > 0 &&
cfg80211_chandef_dfs_available(wiphy, chandef)) {
/* We can skip IEEE80211_CHAN_NO_IR if chandef dfs available */
- prohibited_flags = IEEE80211_CHAN_DISABLED;
+ prohibited_flags &= ~IEEE80211_CHAN_NO_IR;
+ check_radar = false;
}
- res = cfg80211_chandef_usable(wiphy, chandef, prohibited_flags);
+ if (check_radar &&
+ !_cfg80211_chandef_usable(wiphy, chandef,
+ IEEE80211_CHAN_RADAR, 0))
+ return false;
+
+ res = _cfg80211_chandef_usable(wiphy, chandef,
+ prohibited_flags,
+ permitting_flags);
trace_cfg80211_return_bool(res);
return res;
}
-bool cfg80211_reg_can_beacon(struct wiphy *wiphy,
- struct cfg80211_chan_def *chandef,
- enum nl80211_iftype iftype)
-{
- return _cfg80211_reg_can_beacon(wiphy, chandef, iftype, true);
-}
-EXPORT_SYMBOL(cfg80211_reg_can_beacon);
-
-bool cfg80211_reg_can_beacon_relax(struct wiphy *wiphy,
- struct cfg80211_chan_def *chandef,
- enum nl80211_iftype iftype)
+bool cfg80211_reg_check_beaconing(struct wiphy *wiphy,
+ struct cfg80211_chan_def *chandef,
+ struct cfg80211_beaconing_check_config *cfg)
{
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
- bool check_no_ir;
-
- lockdep_assert_held(&rdev->wiphy.mtx);
+ u32 permitting_flags = 0;
+ bool check_no_ir = true;
/*
* Under certain conditions suggested by some regulatory bodies a
@@ -1570,12 +1612,20 @@ bool cfg80211_reg_can_beacon_relax(struct wiphy *wiphy,
* only if such relaxations are not enabled and the conditions are not
* met.
*/
- check_no_ir = !cfg80211_ir_permissive_chan(wiphy, iftype,
- chandef->chan);
+ if (cfg->relax) {
+ lockdep_assert_held(&rdev->wiphy.mtx);
+ check_no_ir = !cfg80211_ir_permissive_chan(wiphy, cfg->iftype,
+ chandef->chan);
+ }
+
+ if (cfg->reg_power == IEEE80211_REG_VLP_AP)
+ permitting_flags |= IEEE80211_CHAN_ALLOW_6GHZ_VLP_AP;
- return _cfg80211_reg_can_beacon(wiphy, chandef, iftype, check_no_ir);
+ return _cfg80211_reg_can_beacon(wiphy, chandef, cfg->iftype,
+ check_no_ir ? IEEE80211_CHAN_NO_IR : 0,
+ permitting_flags);
}
-EXPORT_SYMBOL(cfg80211_reg_can_beacon_relax);
+EXPORT_SYMBOL(cfg80211_reg_check_beaconing);
int cfg80211_set_monitor_channel(struct cfg80211_registered_device *rdev,
struct cfg80211_chan_def *chandef)
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 3fb1b637352a..4d5d351bd0b5 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -421,6 +421,8 @@ static void cfg80211_wiphy_work(struct work_struct *work)
rdev = container_of(work, struct cfg80211_registered_device, wiphy_work);
+ trace_wiphy_work_worker_start(&rdev->wiphy);
+
wiphy_lock(&rdev->wiphy);
if (rdev->suspended)
goto out;
@@ -431,9 +433,10 @@ static void cfg80211_wiphy_work(struct work_struct *work)
if (wk) {
list_del_init(&wk->entry);
if (!list_empty(&rdev->wiphy_work_list))
- schedule_work(work);
+ queue_work(system_unbound_wq, work);
spin_unlock_irq(&rdev->wiphy_work_lock);
+ trace_wiphy_work_run(&rdev->wiphy, wk);
wk->func(&rdev->wiphy, wk);
} else {
spin_unlock_irq(&rdev->wiphy_work_lock);
@@ -1066,6 +1069,7 @@ void cfg80211_process_wiphy_works(struct cfg80211_registered_device *rdev,
list_del_init(&wk->entry);
spin_unlock_irqrestore(&rdev->wiphy_work_lock, flags);
+ trace_wiphy_work_run(&rdev->wiphy, wk);
wk->func(&rdev->wiphy, wk);
spin_lock_irqsave(&rdev->wiphy_work_lock, flags);
@@ -1141,7 +1145,8 @@ void wiphy_unregister(struct wiphy *wiphy)
flush_work(&rdev->background_cac_abort_wk);
cfg80211_rdev_free_wowlan(rdev);
- cfg80211_rdev_free_coalesce(rdev);
+ cfg80211_free_coalesce(rdev->coalesce);
+ rdev->coalesce = NULL;
}
EXPORT_SYMBOL(wiphy_unregister);
@@ -1610,6 +1615,8 @@ void wiphy_work_queue(struct wiphy *wiphy, struct wiphy_work *work)
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
unsigned long flags;
+ trace_wiphy_work_queue(wiphy, work);
+
spin_lock_irqsave(&rdev->wiphy_work_lock, flags);
if (list_empty(&work->entry))
list_add_tail(&work->entry, &rdev->wiphy_work_list);
@@ -1626,6 +1633,8 @@ void wiphy_work_cancel(struct wiphy *wiphy, struct wiphy_work *work)
lockdep_assert_held(&wiphy->mtx);
+ trace_wiphy_work_cancel(wiphy, work);
+
spin_lock_irqsave(&rdev->wiphy_work_lock, flags);
if (!list_empty(&work->entry))
list_del_init(&work->entry);
@@ -1639,6 +1648,8 @@ void wiphy_work_flush(struct wiphy *wiphy, struct wiphy_work *work)
unsigned long flags;
bool run;
+ trace_wiphy_work_flush(wiphy, work);
+
spin_lock_irqsave(&rdev->wiphy_work_lock, flags);
run = !work || !list_empty(&work->entry);
spin_unlock_irqrestore(&rdev->wiphy_work_lock, flags);
@@ -1660,6 +1671,8 @@ void wiphy_delayed_work_queue(struct wiphy *wiphy,
struct wiphy_delayed_work *dwork,
unsigned long delay)
{
+ trace_wiphy_delayed_work_queue(wiphy, &dwork->work, delay);
+
if (!delay) {
del_timer(&dwork->timer);
wiphy_work_queue(wiphy, &dwork->work);
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 118f2f619828..41c8c0e3ba2e 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -494,7 +494,8 @@ bool cfg80211_wdev_on_sub_chan(struct wireless_dev *wdev,
bool primary_only);
bool _cfg80211_chandef_usable(struct wiphy *wiphy,
const struct cfg80211_chan_def *chandef,
- u32 prohibited_flags, bool monitor);
+ u32 prohibited_flags,
+ u32 permitting_flags);
static inline unsigned int elapsed_jiffies_msecs(unsigned long start)
{
@@ -532,6 +533,10 @@ struct cfg80211_internal_bss *
cfg80211_bss_update(struct cfg80211_registered_device *rdev,
struct cfg80211_internal_bss *tmp,
bool signal_valid, unsigned long ts);
+
+enum ieee80211_ap_reg_power
+cfg80211_get_6ghz_power_type(const u8 *elems, size_t elems_len);
+
#ifdef CONFIG_CFG80211_DEVELOPER_WARNINGS
#define CFG80211_DEV_WARN_ON(cond) WARN_ON(cond)
#else
diff --git a/net/wireless/ibss.c b/net/wireless/ibss.c
index 9f02ee5f08be..34e5acff3935 100644
--- a/net/wireless/ibss.c
+++ b/net/wireless/ibss.c
@@ -3,7 +3,7 @@
* Some IBSS support code for cfg80211.
*
* Copyright 2009 Johannes Berg <johannes@sipsolutions.net>
- * Copyright (C) 2020-2023 Intel Corporation
+ * Copyright (C) 2020-2024 Intel Corporation
*/
#include <linux/etherdevice.h>
@@ -94,6 +94,9 @@ int __cfg80211_join_ibss(struct cfg80211_registered_device *rdev,
lockdep_assert_held(&rdev->wiphy.mtx);
+ if (wdev->cac_started)
+ return -EBUSY;
+
if (wdev->u.ibss.ssid_len)
return -EALREADY;
diff --git a/net/wireless/mesh.c b/net/wireless/mesh.c
index 83306979fbe2..aaca65b66af4 100644
--- a/net/wireless/mesh.c
+++ b/net/wireless/mesh.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Portions
- * Copyright (C) 2022-2023 Intel Corporation
+ * Copyright (C) 2022-2024 Intel Corporation
*/
#include <linux/ieee80211.h>
#include <linux/export.h>
@@ -127,6 +127,9 @@ int __cfg80211_join_mesh(struct cfg80211_registered_device *rdev,
if (!rdev->ops->join_mesh)
return -EOPNOTSUPP;
+ if (wdev->cac_started)
+ return -EBUSY;
+
if (!setup->chandef.chan) {
/* if no channel explicitly given, use preset channel */
setup->chandef = wdev->u.mesh.preset_chandef;
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 3c0bca4238d3..7397a372c78e 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -315,8 +315,7 @@ nl80211_pmsr_ftm_req_attr_policy[NL80211_PMSR_FTM_REQ_ATTR_MAX + 1] = {
[NL80211_PMSR_FTM_REQ_ATTR_BURST_PERIOD] = { .type = NLA_U16 },
[NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION] =
NLA_POLICY_MAX(NLA_U8, 15),
- [NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST] =
- NLA_POLICY_MAX(NLA_U8, 31),
+ [NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST] = { .type = NLA_U8 },
[NL80211_PMSR_FTM_REQ_ATTR_NUM_FTMR_RETRIES] = { .type = NLA_U8 },
[NL80211_PMSR_FTM_REQ_ATTR_REQUEST_LCI] = { .type = NLA_FLAG },
[NL80211_PMSR_FTM_REQ_ATTR_REQUEST_CIVICLOC] = { .type = NLA_FLAG },
@@ -468,6 +467,10 @@ static const struct netlink_range_validation nl80211_punct_bitmap_range = {
.max = 0xffff,
};
+static const struct netlink_range_validation q_range = {
+ .max = INT_MAX,
+};
+
static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
[0] = { .strict_start_type = NL80211_ATTR_HE_OBSS_PD },
[NL80211_ATTR_WIPHY] = { .type = NLA_U32 },
@@ -754,7 +757,7 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
[NL80211_ATTR_TXQ_LIMIT] = { .type = NLA_U32 },
[NL80211_ATTR_TXQ_MEMORY_LIMIT] = { .type = NLA_U32 },
- [NL80211_ATTR_TXQ_QUANTUM] = { .type = NLA_U32 },
+ [NL80211_ATTR_TXQ_QUANTUM] = NLA_POLICY_FULL_RANGE(NLA_U32, &q_range),
[NL80211_ATTR_HE_CAPABILITY] =
NLA_POLICY_VALIDATE_FN(NLA_BINARY, validate_he_capa,
NL80211_HE_MAX_CAPABILITY_LEN),
@@ -1204,6 +1207,12 @@ static int nl80211_msg_put_channel(struct sk_buff *msg, struct wiphy *wiphy,
if ((chan->flags & IEEE80211_CHAN_NO_6GHZ_AFC_CLIENT) &&
nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_6GHZ_AFC_CLIENT))
goto nla_put_failure;
+ if ((chan->flags & IEEE80211_CHAN_CAN_MONITOR) &&
+ nla_put_flag(msg, NL80211_FREQUENCY_ATTR_CAN_MONITOR))
+ goto nla_put_failure;
+ if ((chan->flags & IEEE80211_CHAN_ALLOW_6GHZ_VLP_AP) &&
+ nla_put_flag(msg, NL80211_FREQUENCY_ATTR_ALLOW_6GHZ_VLP_AP))
+ goto nla_put_failure;
}
if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_MAX_TX_POWER,
@@ -1622,71 +1631,87 @@ nla_put_failure:
return -ENOBUFS;
}
-static int nl80211_put_iface_combinations(struct wiphy *wiphy,
- struct sk_buff *msg,
- bool large)
+static int nl80211_put_ifcomb_data(struct sk_buff *msg, bool large, int idx,
+ const struct ieee80211_iface_combination *c,
+ u16 nested)
{
- struct nlattr *nl_combis;
- int i, j;
+ struct nlattr *nl_combi, *nl_limits;
+ int i;
- nl_combis = nla_nest_start_noflag(msg,
- NL80211_ATTR_INTERFACE_COMBINATIONS);
- if (!nl_combis)
+ nl_combi = nla_nest_start_noflag(msg, idx | nested);
+ if (!nl_combi)
goto nla_put_failure;
- for (i = 0; i < wiphy->n_iface_combinations; i++) {
- const struct ieee80211_iface_combination *c;
- struct nlattr *nl_combi, *nl_limits;
+ nl_limits = nla_nest_start_noflag(msg, NL80211_IFACE_COMB_LIMITS |
+ nested);
+ if (!nl_limits)
+ goto nla_put_failure;
- c = &wiphy->iface_combinations[i];
+ for (i = 0; i < c->n_limits; i++) {
+ struct nlattr *nl_limit;
- nl_combi = nla_nest_start_noflag(msg, i + 1);
- if (!nl_combi)
+ nl_limit = nla_nest_start_noflag(msg, i + 1);
+ if (!nl_limit)
goto nla_put_failure;
-
- nl_limits = nla_nest_start_noflag(msg,
- NL80211_IFACE_COMB_LIMITS);
- if (!nl_limits)
+ if (nla_put_u32(msg, NL80211_IFACE_LIMIT_MAX, c->limits[i].max))
goto nla_put_failure;
+ if (nl80211_put_iftypes(msg, NL80211_IFACE_LIMIT_TYPES,
+ c->limits[i].types))
+ goto nla_put_failure;
+ nla_nest_end(msg, nl_limit);
+ }
- for (j = 0; j < c->n_limits; j++) {
- struct nlattr *nl_limit;
+ nla_nest_end(msg, nl_limits);
- nl_limit = nla_nest_start_noflag(msg, j + 1);
- if (!nl_limit)
- goto nla_put_failure;
- if (nla_put_u32(msg, NL80211_IFACE_LIMIT_MAX,
- c->limits[j].max))
- goto nla_put_failure;
- if (nl80211_put_iftypes(msg, NL80211_IFACE_LIMIT_TYPES,
- c->limits[j].types))
- goto nla_put_failure;
- nla_nest_end(msg, nl_limit);
- }
+ if (c->beacon_int_infra_match &&
+ nla_put_flag(msg, NL80211_IFACE_COMB_STA_AP_BI_MATCH))
+ goto nla_put_failure;
+ if (nla_put_u32(msg, NL80211_IFACE_COMB_NUM_CHANNELS,
+ c->num_different_channels) ||
+ nla_put_u32(msg, NL80211_IFACE_COMB_MAXNUM,
+ c->max_interfaces))
+ goto nla_put_failure;
+ if (large &&
+ (nla_put_u32(msg, NL80211_IFACE_COMB_RADAR_DETECT_WIDTHS,
+ c->radar_detect_widths) ||
+ nla_put_u32(msg, NL80211_IFACE_COMB_RADAR_DETECT_REGIONS,
+ c->radar_detect_regions)))
+ goto nla_put_failure;
+ if (c->beacon_int_min_gcd &&
+ nla_put_u32(msg, NL80211_IFACE_COMB_BI_MIN_GCD,
+ c->beacon_int_min_gcd))
+ goto nla_put_failure;
- nla_nest_end(msg, nl_limits);
+ nla_nest_end(msg, nl_combi);
- if (c->beacon_int_infra_match &&
- nla_put_flag(msg, NL80211_IFACE_COMB_STA_AP_BI_MATCH))
- goto nla_put_failure;
- if (nla_put_u32(msg, NL80211_IFACE_COMB_NUM_CHANNELS,
- c->num_different_channels) ||
- nla_put_u32(msg, NL80211_IFACE_COMB_MAXNUM,
- c->max_interfaces))
- goto nla_put_failure;
- if (large &&
- (nla_put_u32(msg, NL80211_IFACE_COMB_RADAR_DETECT_WIDTHS,
- c->radar_detect_widths) ||
- nla_put_u32(msg, NL80211_IFACE_COMB_RADAR_DETECT_REGIONS,
- c->radar_detect_regions)))
- goto nla_put_failure;
- if (c->beacon_int_min_gcd &&
- nla_put_u32(msg, NL80211_IFACE_COMB_BI_MIN_GCD,
- c->beacon_int_min_gcd))
- goto nla_put_failure;
+ return 0;
+nla_put_failure:
+ return -ENOBUFS;
+}
+
+static int nl80211_put_iface_combinations(struct wiphy *wiphy,
+ struct sk_buff *msg,
+ int attr, int radio,
+ bool large, u16 nested)
+{
+ const struct ieee80211_iface_combination *c;
+ struct nlattr *nl_combis;
+ int i, n;
- nla_nest_end(msg, nl_combi);
+ nl_combis = nla_nest_start_noflag(msg, attr | nested);
+ if (!nl_combis)
+ goto nla_put_failure;
+
+ if (radio >= 0) {
+ c = wiphy->radio[0].iface_combinations;
+ n = wiphy->radio[0].n_iface_combinations;
+ } else {
+ c = wiphy->iface_combinations;
+ n = wiphy->n_iface_combinations;
}
+ for (i = 0; i < n; i++)
+ if (nl80211_put_ifcomb_data(msg, large, i + 1, &c[i], nested))
+ goto nla_put_failure;
nla_nest_end(msg, nl_combis);
@@ -2392,6 +2417,80 @@ fail:
return -ENOBUFS;
}
+static int nl80211_put_radio(struct wiphy *wiphy, struct sk_buff *msg, int idx)
+{
+ const struct wiphy_radio *r = &wiphy->radio[idx];
+ struct nlattr *radio, *freq;
+ int i;
+
+ radio = nla_nest_start(msg, idx);
+ if (!radio)
+ return -ENOBUFS;
+
+ if (nla_put_u32(msg, NL80211_WIPHY_RADIO_ATTR_INDEX, idx))
+ goto nla_put_failure;
+
+ for (i = 0; i < r->n_freq_range; i++) {
+ const struct wiphy_radio_freq_range *range = &r->freq_range[i];
+
+ freq = nla_nest_start(msg, NL80211_WIPHY_RADIO_ATTR_FREQ_RANGE);
+ if (!freq)
+ goto nla_put_failure;
+
+ if (nla_put_u32(msg, NL80211_WIPHY_RADIO_FREQ_ATTR_START,
+ range->start_freq) ||
+ nla_put_u32(msg, NL80211_WIPHY_RADIO_FREQ_ATTR_END,
+ range->end_freq))
+ goto nla_put_failure;
+
+ nla_nest_end(msg, freq);
+ }
+
+ for (i = 0; i < r->n_iface_combinations; i++)
+ if (nl80211_put_ifcomb_data(msg, true,
+ NL80211_WIPHY_RADIO_ATTR_INTERFACE_COMBINATION,
+ &r->iface_combinations[i],
+ NLA_F_NESTED))
+ goto nla_put_failure;
+
+ nla_nest_end(msg, radio);
+
+ return 0;
+
+nla_put_failure:
+ return -ENOBUFS;
+}
+
+static int nl80211_put_radios(struct wiphy *wiphy, struct sk_buff *msg)
+{
+ struct nlattr *radios;
+ int i;
+
+ if (!wiphy->n_radio)
+ return 0;
+
+ radios = nla_nest_start(msg, NL80211_ATTR_WIPHY_RADIOS);
+ if (!radios)
+ return -ENOBUFS;
+
+ for (i = 0; i < wiphy->n_radio; i++)
+ if (nl80211_put_radio(wiphy, msg, i))
+ goto fail;
+
+ nla_nest_end(msg, radios);
+
+ if (nl80211_put_iface_combinations(wiphy, msg,
+ NL80211_ATTR_WIPHY_INTERFACE_COMBINATIONS,
+ -1, true, NLA_F_NESTED))
+ return -ENOBUFS;
+
+ return 0;
+
+fail:
+ nla_nest_cancel(msg, radios);
+ return -ENOBUFS;
+}
+
struct nl80211_dump_wiphy_state {
s64 filter_wiphy;
long start;
@@ -2687,7 +2786,9 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev,
goto nla_put_failure;
if (nl80211_put_iface_combinations(&rdev->wiphy, msg,
- state->split))
+ NL80211_ATTR_INTERFACE_COMBINATIONS,
+ rdev->wiphy.n_radio ? 0 : -1,
+ state->split, 0))
goto nla_put_failure;
state->split_start++;
@@ -3001,6 +3102,12 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev,
rdev->wiphy.hw_timestamp_max_peers))
goto nla_put_failure;
+ state->split_start++;
+ break;
+ case 17:
+ if (nl80211_put_radios(&rdev->wiphy, msg))
+ goto nla_put_failure;
+
/* done */
state->split_start = 0;
break;
@@ -3344,7 +3451,7 @@ static int _nl80211_parse_chandef(struct cfg80211_registered_device *rdev,
if (!_cfg80211_chandef_usable(&rdev->wiphy, chandef,
IEEE80211_CHAN_DISABLED,
- monitor)) {
+ monitor ? IEEE80211_CHAN_CAN_MONITOR : 0)) {
NL_SET_ERR_MSG(extack, "(extension) channel is disabled");
return -EINVAL;
}
@@ -3415,6 +3522,33 @@ static int __nl80211_set_channel(struct cfg80211_registered_device *rdev,
if (chandef.chan != cur_chan)
return -EBUSY;
+ /* only allow this for regular channel widths */
+ switch (wdev->links[link_id].ap.chandef.width) {
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ case NL80211_CHAN_WIDTH_20:
+ case NL80211_CHAN_WIDTH_40:
+ case NL80211_CHAN_WIDTH_80:
+ case NL80211_CHAN_WIDTH_80P80:
+ case NL80211_CHAN_WIDTH_160:
+ case NL80211_CHAN_WIDTH_320:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (chandef.width) {
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ case NL80211_CHAN_WIDTH_20:
+ case NL80211_CHAN_WIDTH_40:
+ case NL80211_CHAN_WIDTH_80:
+ case NL80211_CHAN_WIDTH_80P80:
+ case NL80211_CHAN_WIDTH_160:
+ case NL80211_CHAN_WIDTH_320:
+ break;
+ default:
+ return -EINVAL;
+ }
+
result = rdev_set_ap_chanwidth(rdev, dev, link_id,
&chandef);
if (result)
@@ -4451,10 +4585,7 @@ static void get_key_callback(void *c, struct key_params *params)
struct nlattr *key;
struct get_key_cookie *cookie = c;
- if ((params->key &&
- nla_put(cookie->msg, NL80211_ATTR_KEY_DATA,
- params->key_len, params->key)) ||
- (params->seq &&
+ if ((params->seq &&
nla_put(cookie->msg, NL80211_ATTR_KEY_SEQ,
params->seq_len, params->seq)) ||
(params->cipher &&
@@ -4466,10 +4597,7 @@ static void get_key_callback(void *c, struct key_params *params)
if (!key)
goto nla_put_failure;
- if ((params->key &&
- nla_put(cookie->msg, NL80211_KEY_DATA,
- params->key_len, params->key)) ||
- (params->seq &&
+ if ((params->seq &&
nla_put(cookie->msg, NL80211_KEY_SEQ,
params->seq_len, params->seq)) ||
(params->cipher &&
@@ -5924,6 +6052,7 @@ static int nl80211_validate_ap_phy_operation(struct cfg80211_ap_settings *params
static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
+ struct cfg80211_beaconing_check_config beacon_check = {};
unsigned int link_id = nl80211_link_id(info->attrs);
struct net_device *dev = info->user_ptr[1];
struct wireless_dev *wdev = dev->ieee80211_ptr;
@@ -5937,6 +6066,9 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
if (!rdev->ops->start_ap)
return -EOPNOTSUPP;
+ if (wdev->cac_started)
+ return -EBUSY;
+
if (wdev->links[link_id].ap.beacon_interval)
return -EALREADY;
@@ -6070,8 +6202,13 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
goto out;
}
- if (!cfg80211_reg_can_beacon_relax(&rdev->wiphy, &params->chandef,
- wdev->iftype)) {
+ beacon_check.iftype = wdev->iftype;
+ beacon_check.relax = true;
+ beacon_check.reg_power =
+ cfg80211_get_6ghz_power_type(params->beacon.tail,
+ params->beacon.tail_len);
+ if (!cfg80211_reg_check_beaconing(&rdev->wiphy, &params->chandef,
+ &beacon_check)) {
err = -EINVAL;
goto out;
}
@@ -6228,6 +6365,7 @@ out:
static int nl80211_set_beacon(struct sk_buff *skb, struct genl_info *info)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
+ struct cfg80211_beaconing_check_config beacon_check = {};
unsigned int link_id = nl80211_link_id(info->attrs);
struct net_device *dev = info->user_ptr[1];
struct wireless_dev *wdev = dev->ieee80211_ptr;
@@ -6254,6 +6392,19 @@ static int nl80211_set_beacon(struct sk_buff *skb, struct genl_info *info)
if (err)
goto out;
+ /* recheck beaconing is permitted with possibly changed power type */
+ beacon_check.iftype = wdev->iftype;
+ beacon_check.relax = true;
+ beacon_check.reg_power =
+ cfg80211_get_6ghz_power_type(params->beacon.tail,
+ params->beacon.tail_len);
+ if (!cfg80211_reg_check_beaconing(&rdev->wiphy,
+ &wdev->links[link_id].ap.chandef,
+ &beacon_check)) {
+ err = -EINVAL;
+ goto out;
+ }
+
attr = info->attrs[NL80211_ATTR_FILS_DISCOVERY];
if (attr) {
err = nl80211_parse_fils_discovery(rdev, attr,
@@ -9929,6 +10080,17 @@ static int nl80211_start_radar_detection(struct sk_buff *skb,
flush_delayed_work(&rdev->dfs_update_channels_wk);
+ switch (wdev->iftype) {
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_P2P_GO:
+ case NL80211_IFTYPE_MESH_POINT:
+ case NL80211_IFTYPE_ADHOC:
+ break;
+ default:
+ /* caution - see cfg80211_beaconing_iface_active() below */
+ return -EINVAL;
+ }
+
wiphy_lock(wiphy);
dfs_region = reg_get_dfs_region(wiphy);
@@ -9959,12 +10121,7 @@ static int nl80211_start_radar_detection(struct sk_buff *skb,
goto unlock;
}
- if (netif_carrier_ok(dev)) {
- err = -EBUSY;
- goto unlock;
- }
-
- if (wdev->cac_started) {
+ if (cfg80211_beaconing_iface_active(wdev) || wdev->cac_started) {
err = -EBUSY;
goto unlock;
}
@@ -13861,9 +14018,8 @@ nla_put_failure:
return -ENOBUFS;
}
-void cfg80211_rdev_free_coalesce(struct cfg80211_registered_device *rdev)
+void cfg80211_free_coalesce(struct cfg80211_coalesce *coalesce)
{
- struct cfg80211_coalesce *coalesce = rdev->coalesce;
int i, j;
struct cfg80211_coalesce_rules *rule;
@@ -13872,13 +14028,13 @@ void cfg80211_rdev_free_coalesce(struct cfg80211_registered_device *rdev)
for (i = 0; i < coalesce->n_rules; i++) {
rule = &coalesce->rules[i];
+ if (!rule)
+ continue;
for (j = 0; j < rule->n_patterns; j++)
kfree(rule->patterns[j].mask);
kfree(rule->patterns);
}
- kfree(coalesce->rules);
kfree(coalesce);
- rdev->coalesce = NULL;
}
static int nl80211_parse_coalesce_rule(struct cfg80211_registered_device *rdev,
@@ -13976,17 +14132,16 @@ static int nl80211_set_coalesce(struct sk_buff *skb, struct genl_info *info)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
const struct wiphy_coalesce_support *coalesce = rdev->wiphy.coalesce;
- struct cfg80211_coalesce new_coalesce = {};
- struct cfg80211_coalesce *n_coalesce;
- int err, rem_rule, n_rules = 0, i, j;
+ struct cfg80211_coalesce *new_coalesce;
+ int err, rem_rule, n_rules = 0, i;
struct nlattr *rule;
- struct cfg80211_coalesce_rules *tmp_rule;
if (!rdev->wiphy.coalesce || !rdev->ops->set_coalesce)
return -EOPNOTSUPP;
if (!info->attrs[NL80211_ATTR_COALESCE_RULE]) {
- cfg80211_rdev_free_coalesce(rdev);
+ cfg80211_free_coalesce(rdev->coalesce);
+ rdev->coalesce = NULL;
rdev_set_coalesce(rdev, NULL);
return 0;
}
@@ -13997,47 +14152,34 @@ static int nl80211_set_coalesce(struct sk_buff *skb, struct genl_info *info)
if (n_rules > coalesce->n_rules)
return -EINVAL;
- new_coalesce.rules = kcalloc(n_rules, sizeof(new_coalesce.rules[0]),
- GFP_KERNEL);
- if (!new_coalesce.rules)
+ new_coalesce = kzalloc(struct_size(new_coalesce, rules, n_rules),
+ GFP_KERNEL);
+ if (!new_coalesce)
return -ENOMEM;
- new_coalesce.n_rules = n_rules;
+ new_coalesce->n_rules = n_rules;
i = 0;
nla_for_each_nested(rule, info->attrs[NL80211_ATTR_COALESCE_RULE],
rem_rule) {
err = nl80211_parse_coalesce_rule(rdev, rule,
- &new_coalesce.rules[i]);
+ &new_coalesce->rules[i]);
if (err)
goto error;
i++;
}
- err = rdev_set_coalesce(rdev, &new_coalesce);
+ err = rdev_set_coalesce(rdev, new_coalesce);
if (err)
goto error;
- n_coalesce = kmemdup(&new_coalesce, sizeof(new_coalesce), GFP_KERNEL);
- if (!n_coalesce) {
- err = -ENOMEM;
- goto error;
- }
- cfg80211_rdev_free_coalesce(rdev);
- rdev->coalesce = n_coalesce;
+ cfg80211_free_coalesce(rdev->coalesce);
+ rdev->coalesce = new_coalesce;
return 0;
error:
- for (i = 0; i < new_coalesce.n_rules; i++) {
- tmp_rule = &new_coalesce.rules[i];
- if (!tmp_rule)
- continue;
- for (j = 0; j < tmp_rule->n_patterns; j++)
- kfree(tmp_rule->patterns[j].mask);
- kfree(tmp_rule->patterns);
- }
- kfree(new_coalesce.rules);
+ cfg80211_free_coalesce(new_coalesce);
return err;
}
diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h
index 6376f3a87f8a..ffaab9a92e5b 100644
--- a/net/wireless/nl80211.h
+++ b/net/wireless/nl80211.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Portions of this file
- * Copyright (C) 2018, 2020-2022 Intel Corporation
+ * Copyright (C) 2018, 2020-2024 Intel Corporation
*/
#ifndef __NET_WIRELESS_NL80211_H
#define __NET_WIRELESS_NL80211_H
@@ -119,7 +119,7 @@ nl80211_radar_notify(struct cfg80211_registered_device *rdev,
void nl80211_send_ap_stopped(struct wireless_dev *wdev, unsigned int link_id);
-void cfg80211_rdev_free_coalesce(struct cfg80211_registered_device *rdev);
+void cfg80211_free_coalesce(struct cfg80211_coalesce *coalesce);
/* peer measurement */
int nl80211_pmsr_start(struct sk_buff *skb, struct genl_info *info);
diff --git a/net/wireless/pmsr.c b/net/wireless/pmsr.c
index e106dcea3977..0396fa19bdf1 100644
--- a/net/wireless/pmsr.c
+++ b/net/wireless/pmsr.c
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Copyright (C) 2018 - 2021, 2023 Intel Corporation
+ * Copyright (C) 2018 - 2021, 2023 - 2024 Intel Corporation
*/
#include <net/cfg80211.h>
#include "core.h"
@@ -56,7 +56,7 @@ static int pmsr_parse_ftm(struct cfg80211_registered_device *rdev,
out->ftm.burst_period = 0;
if (tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_PERIOD])
out->ftm.burst_period =
- nla_get_u32(tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_PERIOD]);
+ nla_get_u16(tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_PERIOD]);
out->ftm.asap = !!tb[NL80211_PMSR_FTM_REQ_ATTR_ASAP];
if (out->ftm.asap && !capa->ftm.asap) {
@@ -75,7 +75,7 @@ static int pmsr_parse_ftm(struct cfg80211_registered_device *rdev,
out->ftm.num_bursts_exp = 0;
if (tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_BURSTS_EXP])
out->ftm.num_bursts_exp =
- nla_get_u32(tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_BURSTS_EXP]);
+ nla_get_u8(tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_BURSTS_EXP]);
if (capa->ftm.max_bursts_exponent >= 0 &&
out->ftm.num_bursts_exp > capa->ftm.max_bursts_exponent) {
@@ -88,7 +88,7 @@ static int pmsr_parse_ftm(struct cfg80211_registered_device *rdev,
out->ftm.burst_duration = 15;
if (tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION])
out->ftm.burst_duration =
- nla_get_u32(tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION]);
+ nla_get_u8(tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION]);
out->ftm.ftms_per_burst = 0;
if (tb[NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST])
@@ -107,7 +107,7 @@ static int pmsr_parse_ftm(struct cfg80211_registered_device *rdev,
out->ftm.ftmr_retries = 3;
if (tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_FTMR_RETRIES])
out->ftm.ftmr_retries =
- nla_get_u32(tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_FTMR_RETRIES]);
+ nla_get_u8(tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_FTMR_RETRIES]);
out->ftm.request_lci = !!tb[NL80211_PMSR_FTM_REQ_ATTR_REQUEST_LCI];
if (out->ftm.request_lci && !capa->ftm.request_lci) {
@@ -148,6 +148,14 @@ static int pmsr_parse_ftm(struct cfg80211_registered_device *rdev,
return -EINVAL;
}
+ if (out->ftm.ftms_per_burst > 31 && !out->ftm.non_trigger_based &&
+ !out->ftm.trigger_based) {
+ NL_SET_ERR_MSG_ATTR(info->extack,
+ tb[NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST],
+ "FTM: FTMs per burst must be set lower than 31");
+ return -ERANGE;
+ }
+
if ((out->ftm.trigger_based || out->ftm.non_trigger_based) &&
out->ftm.preamble != NL80211_PREAMBLE_HE) {
NL_SET_ERR_MSG_ATTR(info->extack,
diff --git a/net/wireless/rdev-ops.h b/net/wireless/rdev-ops.h
index 43897a5269b6..ec3f4aa1c807 100644
--- a/net/wireless/rdev-ops.h
+++ b/net/wireless/rdev-ops.h
@@ -2,7 +2,7 @@
/*
* Portions of this file
* Copyright(c) 2016-2017 Intel Deutschland GmbH
- * Copyright (C) 2018, 2021-2023 Intel Corporation
+ * Copyright (C) 2018, 2021-2024 Intel Corporation
*/
#ifndef __CFG80211_RDEV_OPS
#define __CFG80211_RDEV_OPS
@@ -458,6 +458,10 @@ static inline int rdev_scan(struct cfg80211_registered_device *rdev,
struct cfg80211_scan_request *request)
{
int ret;
+
+ if (WARN_ON_ONCE(!request->n_ssids && request->ssids))
+ return -EINVAL;
+
trace_rdev_scan(&rdev->wiphy, request);
ret = rdev->ops->scan(&rdev->wiphy, request);
trace_rdev_return_int(&rdev->wiphy, ret);
@@ -574,13 +578,11 @@ static inline int rdev_leave_ibss(struct cfg80211_registered_device *rdev,
static inline int
rdev_set_wiphy_params(struct cfg80211_registered_device *rdev, u32 changed)
{
- int ret;
-
- if (!rdev->ops->set_wiphy_params)
- return -EOPNOTSUPP;
+ int ret = -EOPNOTSUPP;
trace_rdev_set_wiphy_params(&rdev->wiphy, changed);
- ret = rdev->ops->set_wiphy_params(&rdev->wiphy, changed);
+ if (rdev->ops->set_wiphy_params)
+ ret = rdev->ops->set_wiphy_params(&rdev->wiphy, changed);
trace_rdev_return_int(&rdev->wiphy, ret);
return ret;
}
@@ -1421,13 +1423,11 @@ rdev_set_radar_background(struct cfg80211_registered_device *rdev,
struct cfg80211_chan_def *chandef)
{
struct wiphy *wiphy = &rdev->wiphy;
- int ret;
-
- if (!rdev->ops->set_radar_background)
- return -EOPNOTSUPP;
+ int ret = -EOPNOTSUPP;
trace_rdev_set_radar_background(wiphy, chandef);
- ret = rdev->ops->set_radar_background(wiphy, chandef);
+ if (rdev->ops->set_radar_background)
+ ret = rdev->ops->set_radar_background(wiphy, chandef);
trace_rdev_return_int(wiphy, ret);
return ret;
@@ -1464,13 +1464,11 @@ rdev_add_link_station(struct cfg80211_registered_device *rdev,
struct net_device *dev,
struct link_station_parameters *params)
{
- int ret;
-
- if (!rdev->ops->add_link_station)
- return -EOPNOTSUPP;
+ int ret = -EOPNOTSUPP;
trace_rdev_add_link_station(&rdev->wiphy, dev, params);
- ret = rdev->ops->add_link_station(&rdev->wiphy, dev, params);
+ if (rdev->ops->add_link_station)
+ ret = rdev->ops->add_link_station(&rdev->wiphy, dev, params);
trace_rdev_return_int(&rdev->wiphy, ret);
return ret;
}
@@ -1480,13 +1478,11 @@ rdev_mod_link_station(struct cfg80211_registered_device *rdev,
struct net_device *dev,
struct link_station_parameters *params)
{
- int ret;
-
- if (!rdev->ops->mod_link_station)
- return -EOPNOTSUPP;
+ int ret = -EOPNOTSUPP;
trace_rdev_mod_link_station(&rdev->wiphy, dev, params);
- ret = rdev->ops->mod_link_station(&rdev->wiphy, dev, params);
+ if (rdev->ops->mod_link_station)
+ ret = rdev->ops->mod_link_station(&rdev->wiphy, dev, params);
trace_rdev_return_int(&rdev->wiphy, ret);
return ret;
}
@@ -1496,13 +1492,11 @@ rdev_del_link_station(struct cfg80211_registered_device *rdev,
struct net_device *dev,
struct link_station_del_parameters *params)
{
- int ret;
-
- if (!rdev->ops->del_link_station)
- return -EOPNOTSUPP;
+ int ret = -EOPNOTSUPP;
trace_rdev_del_link_station(&rdev->wiphy, dev, params);
- ret = rdev->ops->del_link_station(&rdev->wiphy, dev, params);
+ if (rdev->ops->del_link_station)
+ ret = rdev->ops->del_link_station(&rdev->wiphy, dev, params);
trace_rdev_return_int(&rdev->wiphy, ret);
return ret;
}
@@ -1513,13 +1507,11 @@ rdev_set_hw_timestamp(struct cfg80211_registered_device *rdev,
struct cfg80211_set_hw_timestamp *hwts)
{
struct wiphy *wiphy = &rdev->wiphy;
- int ret;
-
- if (!rdev->ops->set_hw_timestamp)
- return -EOPNOTSUPP;
+ int ret = -EOPNOTSUPP;
trace_rdev_set_hw_timestamp(wiphy, dev, hwts);
- ret = rdev->ops->set_hw_timestamp(wiphy, dev, hwts);
+ if (rdev->ops->set_hw_timestamp)
+ ret = rdev->ops->set_hw_timestamp(wiphy, dev, hwts);
trace_rdev_return_int(wiphy, ret);
return ret;
@@ -1531,15 +1523,25 @@ rdev_set_ttlm(struct cfg80211_registered_device *rdev,
struct cfg80211_ttlm_params *params)
{
struct wiphy *wiphy = &rdev->wiphy;
- int ret;
-
- if (!rdev->ops->set_ttlm)
- return -EOPNOTSUPP;
+ int ret = -EOPNOTSUPP;
trace_rdev_set_ttlm(wiphy, dev, params);
- ret = rdev->ops->set_ttlm(wiphy, dev, params);
+ if (rdev->ops->set_ttlm)
+ ret = rdev->ops->set_ttlm(wiphy, dev, params);
trace_rdev_return_int(wiphy, ret);
return ret;
}
+
+static inline u32
+rdev_get_radio_mask(struct cfg80211_registered_device *rdev,
+ struct net_device *dev)
+{
+ struct wiphy *wiphy = &rdev->wiphy;
+
+ if (!rdev->ops->get_radio_mask)
+ return 0;
+
+ return rdev->ops->get_radio_mask(wiphy, dev);
+}
#endif /* __CFG80211_RDEV_OPS */
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 3cef0021a3db..4a27f3823e25 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -1600,6 +1600,8 @@ static u32 map_regdom_flags(u32 rd_flags)
channel_flags |= IEEE80211_CHAN_NO_6GHZ_AFC_CLIENT;
if (rd_flags & NL80211_RRF_PSD)
channel_flags |= IEEE80211_CHAN_PSD;
+ if (rd_flags & NL80211_RRF_ALLOW_6GHZ_VLP_AP)
+ channel_flags |= IEEE80211_CHAN_ALLOW_6GHZ_VLP_AP;
return channel_flags;
}
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 127853877a0a..d99319d82205 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -812,6 +812,7 @@ static int cfg80211_scan_6ghz(struct cfg80211_registered_device *rdev)
LIST_HEAD(coloc_ap_list);
bool need_scan_psc = true;
const struct ieee80211_sband_iftype_data *iftd;
+ size_t size, offs_ssids, offs_6ghz_params, offs_ies;
rdev_req->scan_6ghz = true;
@@ -877,10 +878,15 @@ static int cfg80211_scan_6ghz(struct cfg80211_registered_device *rdev)
spin_unlock_bh(&rdev->bss_lock);
}
- request = kzalloc(struct_size(request, channels, n_channels) +
- sizeof(*request->scan_6ghz_params) * count +
- sizeof(*request->ssids) * rdev_req->n_ssids,
- GFP_KERNEL);
+ size = struct_size(request, channels, n_channels);
+ offs_ssids = size;
+ size += sizeof(*request->ssids) * rdev_req->n_ssids;
+ offs_6ghz_params = size;
+ size += sizeof(*request->scan_6ghz_params) * count;
+ offs_ies = size;
+ size += rdev_req->ie_len;
+
+ request = kzalloc(size, GFP_KERNEL);
if (!request) {
cfg80211_free_coloc_ap_list(&coloc_ap_list);
return -ENOMEM;
@@ -888,8 +894,26 @@ static int cfg80211_scan_6ghz(struct cfg80211_registered_device *rdev)
*request = *rdev_req;
request->n_channels = 0;
- request->scan_6ghz_params =
- (void *)&request->channels[n_channels];
+ request->n_6ghz_params = 0;
+ if (rdev_req->n_ssids) {
+ /*
+ * Add the ssids from the parent scan request to the new
+ * scan request, so the driver would be able to use them
+ * in its probe requests to discover hidden APs on PSC
+ * channels.
+ */
+ request->ssids = (void *)request + offs_ssids;
+ memcpy(request->ssids, rdev_req->ssids,
+ sizeof(*request->ssids) * request->n_ssids);
+ }
+ request->scan_6ghz_params = (void *)request + offs_6ghz_params;
+
+ if (rdev_req->ie_len) {
+ void *ie = (void *)request + offs_ies;
+
+ memcpy(ie, rdev_req->ie, rdev_req->ie_len);
+ request->ie = ie;
+ }
/*
* PSC channels should not be scanned in case of direct scan with 1 SSID
@@ -978,17 +1002,8 @@ skip:
if (request->n_channels) {
struct cfg80211_scan_request *old = rdev->int_scan_req;
- rdev->int_scan_req = request;
- /*
- * Add the ssids from the parent scan request to the new scan
- * request, so the driver would be able to use them in its
- * probe requests to discover hidden APs on PSC channels.
- */
- request->ssids = (void *)&request->channels[request->n_channels];
- request->n_ssids = rdev_req->n_ssids;
- memcpy(request->ssids, rdev_req->ssids, sizeof(*request->ssids) *
- request->n_ssids);
+ rdev->int_scan_req = request;
/*
* If this scan follows a previous scan, save the scan start
@@ -1589,7 +1604,7 @@ struct cfg80211_bss *__cfg80211_get_bss(struct wiphy *wiphy,
}
EXPORT_SYMBOL(__cfg80211_get_bss);
-static void rb_insert_bss(struct cfg80211_registered_device *rdev,
+static bool rb_insert_bss(struct cfg80211_registered_device *rdev,
struct cfg80211_internal_bss *bss)
{
struct rb_node **p = &rdev->bss_tree.rb_node;
@@ -1605,7 +1620,7 @@ static void rb_insert_bss(struct cfg80211_registered_device *rdev,
if (WARN_ON(!cmp)) {
/* will sort of leak this BSS */
- return;
+ return false;
}
if (cmp < 0)
@@ -1616,6 +1631,7 @@ static void rb_insert_bss(struct cfg80211_registered_device *rdev,
rb_link_node(&bss->rbn, parent, p);
rb_insert_color(&bss->rbn, &rdev->bss_tree);
+ return true;
}
static struct cfg80211_internal_bss *
@@ -1642,6 +1658,34 @@ rb_find_bss(struct cfg80211_registered_device *rdev,
return NULL;
}
+static void cfg80211_insert_bss(struct cfg80211_registered_device *rdev,
+ struct cfg80211_internal_bss *bss)
+{
+ lockdep_assert_held(&rdev->bss_lock);
+
+ if (!rb_insert_bss(rdev, bss))
+ return;
+ list_add_tail(&bss->list, &rdev->bss_list);
+ rdev->bss_entries++;
+}
+
+static void cfg80211_rehash_bss(struct cfg80211_registered_device *rdev,
+ struct cfg80211_internal_bss *bss)
+{
+ lockdep_assert_held(&rdev->bss_lock);
+
+ rb_erase(&bss->rbn, &rdev->bss_tree);
+ if (!rb_insert_bss(rdev, bss)) {
+ list_del(&bss->list);
+ if (!list_empty(&bss->hidden_list))
+ list_del_init(&bss->hidden_list);
+ if (!list_empty(&bss->pub.nontrans_list))
+ list_del_init(&bss->pub.nontrans_list);
+ rdev->bss_entries--;
+ }
+ rdev->bss_generation++;
+}
+
static bool cfg80211_combine_bsses(struct cfg80211_registered_device *rdev,
struct cfg80211_internal_bss *new)
{
@@ -1954,9 +1998,7 @@ __cfg80211_bss_update(struct cfg80211_registered_device *rdev,
bss_ref_get(rdev, bss_from_pub(tmp->pub.transmitted_bss));
}
- list_add_tail(&new->list, &rdev->bss_list);
- rdev->bss_entries++;
- rb_insert_bss(rdev, new);
+ cfg80211_insert_bss(rdev, new);
found = new;
}
@@ -2121,37 +2163,53 @@ struct cfg80211_inform_single_bss_data {
u64 cannot_use_reasons;
};
-static bool cfg80211_6ghz_power_type_valid(const u8 *ie, size_t ielen,
- const u32 flags)
+enum ieee80211_ap_reg_power
+cfg80211_get_6ghz_power_type(const u8 *elems, size_t elems_len)
{
- const struct element *tmp;
+ const struct ieee80211_he_6ghz_oper *he_6ghz_oper;
struct ieee80211_he_operation *he_oper;
+ const struct element *tmp;
- tmp = cfg80211_find_ext_elem(WLAN_EID_EXT_HE_OPERATION, ie, ielen);
- if (tmp && tmp->datalen >= sizeof(*he_oper) + 1) {
- const struct ieee80211_he_6ghz_oper *he_6ghz_oper;
-
- he_oper = (void *)&tmp->data[1];
- he_6ghz_oper = ieee80211_he_6ghz_oper(he_oper);
-
- if (!he_6ghz_oper)
- return false;
+ tmp = cfg80211_find_ext_elem(WLAN_EID_EXT_HE_OPERATION,
+ elems, elems_len);
+ if (!tmp || tmp->datalen < sizeof(*he_oper) + 1 ||
+ tmp->datalen < ieee80211_he_oper_size(tmp->data + 1))
+ return IEEE80211_REG_UNSET_AP;
+
+ he_oper = (void *)&tmp->data[1];
+ he_6ghz_oper = ieee80211_he_6ghz_oper(he_oper);
+
+ if (!he_6ghz_oper)
+ return IEEE80211_REG_UNSET_AP;
+
+ switch (u8_get_bits(he_6ghz_oper->control,
+ IEEE80211_HE_6GHZ_OPER_CTRL_REG_INFO)) {
+ case IEEE80211_6GHZ_CTRL_REG_LPI_AP:
+ case IEEE80211_6GHZ_CTRL_REG_INDOOR_LPI_AP:
+ return IEEE80211_REG_LPI_AP;
+ case IEEE80211_6GHZ_CTRL_REG_SP_AP:
+ case IEEE80211_6GHZ_CTRL_REG_INDOOR_SP_AP:
+ return IEEE80211_REG_SP_AP;
+ case IEEE80211_6GHZ_CTRL_REG_VLP_AP:
+ return IEEE80211_REG_VLP_AP;
+ default:
+ return IEEE80211_REG_UNSET_AP;
+ }
+}
- switch (u8_get_bits(he_6ghz_oper->control,
- IEEE80211_HE_6GHZ_OPER_CTRL_REG_INFO)) {
- case IEEE80211_6GHZ_CTRL_REG_LPI_AP:
- case IEEE80211_6GHZ_CTRL_REG_INDOOR_LPI_AP:
- return true;
- case IEEE80211_6GHZ_CTRL_REG_SP_AP:
- case IEEE80211_6GHZ_CTRL_REG_INDOOR_SP_AP:
- return !(flags & IEEE80211_CHAN_NO_6GHZ_AFC_CLIENT);
- case IEEE80211_6GHZ_CTRL_REG_VLP_AP:
- return !(flags & IEEE80211_CHAN_NO_6GHZ_VLP_CLIENT);
- default:
- return false;
- }
+static bool cfg80211_6ghz_power_type_valid(const u8 *elems, size_t elems_len,
+ const u32 flags)
+{
+ switch (cfg80211_get_6ghz_power_type(elems, elems_len)) {
+ case IEEE80211_REG_LPI_AP:
+ return true;
+ case IEEE80211_REG_SP_AP:
+ return !(flags & IEEE80211_CHAN_NO_6GHZ_AFC_CLIENT);
+ case IEEE80211_REG_VLP_AP:
+ return !(flags & IEEE80211_CHAN_NO_6GHZ_VLP_CLIENT);
+ default:
+ return false;
}
- return false;
}
/* Returned bss is reference counted and must be cleaned up appropriately. */
@@ -3333,19 +3391,14 @@ void cfg80211_update_assoc_bss_entry(struct wireless_dev *wdev,
if (!WARN_ON(!__cfg80211_unlink_bss(rdev, new)))
rdev->bss_generation++;
}
-
- rb_erase(&cbss->rbn, &rdev->bss_tree);
- rb_insert_bss(rdev, cbss);
- rdev->bss_generation++;
+ cfg80211_rehash_bss(rdev, cbss);
list_for_each_entry_safe(nontrans_bss, tmp,
&cbss->pub.nontrans_list,
nontrans_list) {
bss = bss_from_pub(nontrans_bss);
bss->pub.channel = chan;
- rb_erase(&bss->rbn, &rdev->bss_tree);
- rb_insert_bss(rdev, bss);
- rdev->bss_generation++;
+ cfg80211_rehash_bss(rdev, bss);
}
done:
@@ -3400,10 +3453,14 @@ int cfg80211_wext_siwscan(struct net_device *dev,
wiphy = &rdev->wiphy;
/* Determine number of channels, needed to allocate creq */
- if (wreq && wreq->num_channels)
+ if (wreq && wreq->num_channels) {
+ /* Passed from userspace so should be checked */
+ if (unlikely(wreq->num_channels > IW_MAX_FREQUENCIES))
+ return -EINVAL;
n_channels = wreq->num_channels;
- else
+ } else {
n_channels = ieee80211_get_num_supported_channels(wiphy);
+ }
creq = kzalloc(sizeof(*creq) + sizeof(struct cfg80211_ssid) +
n_channels * sizeof(void *),
@@ -3477,8 +3534,10 @@ int cfg80211_wext_siwscan(struct net_device *dev,
memcpy(creq->ssids[0].ssid, wreq->essid, wreq->essid_len);
creq->ssids[0].ssid_len = wreq->essid_len;
}
- if (wreq->scan_type == IW_SCAN_TYPE_PASSIVE)
+ if (wreq->scan_type == IW_SCAN_TYPE_PASSIVE) {
+ creq->ssids = NULL;
creq->n_ssids = 0;
+ }
}
for (i = 0; i < NUM_NL80211_BANDS; i++)
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index a8ad55f11133..e419aa8c4a5a 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -5,7 +5,7 @@
* (for nl80211's connect() and wext)
*
* Copyright 2009 Johannes Berg <johannes@sipsolutions.net>
- * Copyright (C) 2009, 2020, 2022-2023 Intel Corporation. All rights reserved.
+ * Copyright (C) 2009, 2020, 2022-2024 Intel Corporation. All rights reserved.
* Copyright 2017 Intel Deutschland GmbH
*/
@@ -130,7 +130,7 @@ static int cfg80211_conn_scan(struct wireless_dev *wdev)
rdev->scan_req = request;
- err = rdev_scan(rdev, request);
+ err = cfg80211_scan(rdev);
if (!err) {
wdev->conn->state = CFG80211_CONN_SCANNING;
nl80211_send_scan_start(rdev, wdev);
diff --git a/net/wireless/sysfs.c b/net/wireless/sysfs.c
index 565511a3f461..62f26618f674 100644
--- a/net/wireless/sysfs.c
+++ b/net/wireless/sysfs.c
@@ -5,7 +5,7 @@
*
* Copyright 2005-2006 Jiri Benc <jbenc@suse.cz>
* Copyright 2006 Johannes Berg <johannes@sipsolutions.net>
- * Copyright (C) 2020-2021, 2023 Intel Corporation
+ * Copyright (C) 2020-2021, 2023-2024 Intel Corporation
*/
#include <linux/device.h>
@@ -137,7 +137,7 @@ static int wiphy_resume(struct device *dev)
if (rdev->wiphy.registered && rdev->ops->resume)
ret = rdev_resume(rdev);
rdev->suspended = false;
- schedule_work(&rdev->wiphy_work);
+ queue_work(system_unbound_wq, &rdev->wiphy_work);
wiphy_unlock(&rdev->wiphy);
if (ret)
diff --git a/net/wireless/tests/chan.c b/net/wireless/tests/chan.c
index d02258ac2dab..74bbee25085f 100644
--- a/net/wireless/tests/chan.c
+++ b/net/wireless/tests/chan.c
@@ -113,16 +113,16 @@ static const struct chandef_compat_case {
},
},
{
- .desc = "different primary 160 MHz",
+ .desc = "different primary 320 MHz",
.c1 = {
.width = NL80211_CHAN_WIDTH_320,
.chan = &chan_6ghz_105,
- .center_freq1 = 6475 + 150,
+ .center_freq1 = 6475 + 110,
},
.c2 = {
.width = NL80211_CHAN_WIDTH_320,
.chan = &chan_6ghz_105,
- .center_freq1 = 6475 - 10,
+ .center_freq1 = 6475 - 50,
},
},
{
@@ -131,12 +131,12 @@ static const struct chandef_compat_case {
.c1 = {
.width = NL80211_CHAN_WIDTH_160,
.chan = &chan_6ghz_105,
- .center_freq1 = 6475 + 70,
+ .center_freq1 = 6475 + 30,
},
.c2 = {
.width = NL80211_CHAN_WIDTH_320,
.chan = &chan_6ghz_105,
- .center_freq1 = 6475 - 10,
+ .center_freq1 = 6475 - 50,
},
.compat = true,
},
@@ -145,12 +145,12 @@ static const struct chandef_compat_case {
.c1 = {
.width = NL80211_CHAN_WIDTH_160,
.chan = &chan_6ghz_105,
- .center_freq1 = 6475 + 70,
+ .center_freq1 = 6475 + 30,
},
.c2 = {
.width = NL80211_CHAN_WIDTH_320,
.chan = &chan_6ghz_105,
- .center_freq1 = 6475 - 10,
+ .center_freq1 = 6475 - 50,
.punctured = 0xf,
},
.compat = true,
@@ -160,13 +160,13 @@ static const struct chandef_compat_case {
.c1 = {
.width = NL80211_CHAN_WIDTH_160,
.chan = &chan_6ghz_105,
- .center_freq1 = 6475 + 70,
+ .center_freq1 = 6475 + 30,
.punctured = 0xc0,
},
.c2 = {
.width = NL80211_CHAN_WIDTH_320,
.chan = &chan_6ghz_105,
- .center_freq1 = 6475 - 10,
+ .center_freq1 = 6475 - 50,
.punctured = 0xc000,
},
.compat = true,
@@ -176,13 +176,13 @@ static const struct chandef_compat_case {
.c1 = {
.width = NL80211_CHAN_WIDTH_160,
.chan = &chan_6ghz_105,
- .center_freq1 = 6475 + 70,
+ .center_freq1 = 6475 + 30,
.punctured = 0x80,
},
.c2 = {
.width = NL80211_CHAN_WIDTH_320,
.chan = &chan_6ghz_105,
- .center_freq1 = 6475 - 10,
+ .center_freq1 = 6475 - 50,
.punctured = 0xc000,
},
},
diff --git a/net/wireless/trace.h b/net/wireless/trace.h
index 87986170d1b1..5c26f065bd68 100644
--- a/net/wireless/trace.h
+++ b/net/wireless/trace.h
@@ -243,6 +243,80 @@
} while (0)
/*************************************************************
+ * wiphy work traces *
+ *************************************************************/
+
+DECLARE_EVENT_CLASS(wiphy_work_event,
+ TP_PROTO(struct wiphy *wiphy, struct wiphy_work *work),
+ TP_ARGS(wiphy, work),
+ TP_STRUCT__entry(
+ WIPHY_ENTRY
+ __field(void *, instance)
+ __field(void *, func)
+ ),
+ TP_fast_assign(
+ WIPHY_ASSIGN;
+ __entry->instance = work;
+ __entry->func = work ? work->func : NULL;
+ ),
+ TP_printk(WIPHY_PR_FMT " instance=%p func=%pS",
+ WIPHY_PR_ARG, __entry->instance, __entry->func)
+);
+
+DEFINE_EVENT(wiphy_work_event, wiphy_work_queue,
+ TP_PROTO(struct wiphy *wiphy, struct wiphy_work *work),
+ TP_ARGS(wiphy, work)
+);
+
+DEFINE_EVENT(wiphy_work_event, wiphy_work_run,
+ TP_PROTO(struct wiphy *wiphy, struct wiphy_work *work),
+ TP_ARGS(wiphy, work)
+);
+
+DEFINE_EVENT(wiphy_work_event, wiphy_work_cancel,
+ TP_PROTO(struct wiphy *wiphy, struct wiphy_work *work),
+ TP_ARGS(wiphy, work)
+);
+
+DEFINE_EVENT(wiphy_work_event, wiphy_work_flush,
+ TP_PROTO(struct wiphy *wiphy, struct wiphy_work *work),
+ TP_ARGS(wiphy, work)
+);
+
+TRACE_EVENT(wiphy_delayed_work_queue,
+ TP_PROTO(struct wiphy *wiphy, struct wiphy_work *work,
+ unsigned long delay),
+ TP_ARGS(wiphy, work, delay),
+ TP_STRUCT__entry(
+ WIPHY_ENTRY
+ __field(void *, instance)
+ __field(void *, func)
+ __field(unsigned long, delay)
+ ),
+ TP_fast_assign(
+ WIPHY_ASSIGN;
+ __entry->instance = work;
+ __entry->func = work->func;
+ __entry->delay = delay;
+ ),
+ TP_printk(WIPHY_PR_FMT " instance=%p func=%pS delay=%ld",
+ WIPHY_PR_ARG, __entry->instance, __entry->func,
+ __entry->delay)
+);
+
+TRACE_EVENT(wiphy_work_worker_start,
+ TP_PROTO(struct wiphy *wiphy),
+ TP_ARGS(wiphy),
+ TP_STRUCT__entry(
+ WIPHY_ENTRY
+ ),
+ TP_fast_assign(
+ WIPHY_ASSIGN;
+ ),
+ TP_printk(WIPHY_PR_FMT, WIPHY_PR_ARG)
+);
+
+/*************************************************************
* rdev->ops traces *
*************************************************************/
@@ -2889,6 +2963,75 @@ DEFINE_EVENT(wiphy_wdev_link_evt, rdev_del_intf_link,
TP_ARGS(wiphy, wdev, link_id)
);
+TRACE_EVENT(rdev_del_link_station,
+ TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
+ struct link_station_del_parameters *params),
+ TP_ARGS(wiphy, netdev, params),
+ TP_STRUCT__entry(
+ WIPHY_ENTRY
+ NETDEV_ENTRY
+ __array(u8, mld_mac, 6)
+ __field(u32, link_id)
+ ),
+ TP_fast_assign(
+ WIPHY_ASSIGN;
+ NETDEV_ASSIGN;
+ memset(__entry->mld_mac, 0, 6);
+ if (params->mld_mac)
+ memcpy(__entry->mld_mac, params->mld_mac, 6);
+ __entry->link_id = params->link_id;
+ ),
+ TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", station mac: %pM"
+ ", link id: %u",
+ WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->mld_mac,
+ __entry->link_id)
+);
+
+TRACE_EVENT(rdev_set_hw_timestamp,
+ TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
+ struct cfg80211_set_hw_timestamp *hwts),
+
+ TP_ARGS(wiphy, netdev, hwts),
+
+ TP_STRUCT__entry(
+ WIPHY_ENTRY
+ NETDEV_ENTRY
+ MAC_ENTRY(macaddr)
+ __field(bool, enable)
+ ),
+
+ TP_fast_assign(
+ WIPHY_ASSIGN;
+ NETDEV_ASSIGN;
+ MAC_ASSIGN(macaddr, hwts->macaddr);
+ __entry->enable = hwts->enable;
+ ),
+
+ TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", mac %pM, enable: %u",
+ WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->macaddr,
+ __entry->enable)
+);
+
+TRACE_EVENT(rdev_set_ttlm,
+ TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
+ struct cfg80211_ttlm_params *params),
+ TP_ARGS(wiphy, netdev, params),
+ TP_STRUCT__entry(
+ WIPHY_ENTRY
+ NETDEV_ENTRY
+ __array(u8, dlink, sizeof(u16) * 8)
+ __array(u8, ulink, sizeof(u16) * 8)
+ ),
+ TP_fast_assign(
+ WIPHY_ASSIGN;
+ NETDEV_ASSIGN;
+ memcpy(__entry->dlink, params->dlink, sizeof(params->dlink));
+ memcpy(__entry->ulink, params->ulink, sizeof(params->ulink));
+ ),
+ TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT,
+ WIPHY_PR_ARG, NETDEV_PR_ARG)
+);
+
/*************************************************************
* cfg80211 exported functions traces *
*************************************************************/
@@ -3246,23 +3389,26 @@ TRACE_EVENT(cfg80211_cqm_rssi_notify,
TRACE_EVENT(cfg80211_reg_can_beacon,
TP_PROTO(struct wiphy *wiphy, struct cfg80211_chan_def *chandef,
- enum nl80211_iftype iftype, bool check_no_ir),
- TP_ARGS(wiphy, chandef, iftype, check_no_ir),
+ enum nl80211_iftype iftype, u32 prohibited_flags,
+ u32 permitting_flags),
+ TP_ARGS(wiphy, chandef, iftype, prohibited_flags, permitting_flags),
TP_STRUCT__entry(
WIPHY_ENTRY
CHAN_DEF_ENTRY
__field(enum nl80211_iftype, iftype)
- __field(bool, check_no_ir)
+ __field(u32, prohibited_flags)
+ __field(u32, permitting_flags)
),
TP_fast_assign(
WIPHY_ASSIGN;
CHAN_DEF_ASSIGN(chandef);
__entry->iftype = iftype;
- __entry->check_no_ir = check_no_ir;
+ __entry->prohibited_flags = prohibited_flags;
+ __entry->permitting_flags = permitting_flags;
),
- TP_printk(WIPHY_PR_FMT ", " CHAN_DEF_PR_FMT ", iftype=%d check_no_ir=%s",
+ TP_printk(WIPHY_PR_FMT ", " CHAN_DEF_PR_FMT ", iftype=%d prohibited_flags=0x%x permitting_flags=0x%x",
WIPHY_PR_ARG, CHAN_DEF_PR_ARG, __entry->iftype,
- BOOL_TO_STR(__entry->check_no_ir))
+ __entry->prohibited_flags, __entry->permitting_flags)
);
TRACE_EVENT(cfg80211_chandef_dfs_required,
@@ -3923,55 +4069,6 @@ DEFINE_EVENT(link_station_add_mod, rdev_mod_link_station,
TP_ARGS(wiphy, netdev, params)
);
-TRACE_EVENT(rdev_del_link_station,
- TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
- struct link_station_del_parameters *params),
- TP_ARGS(wiphy, netdev, params),
- TP_STRUCT__entry(
- WIPHY_ENTRY
- NETDEV_ENTRY
- __array(u8, mld_mac, 6)
- __field(u32, link_id)
- ),
- TP_fast_assign(
- WIPHY_ASSIGN;
- NETDEV_ASSIGN;
- memset(__entry->mld_mac, 0, 6);
- if (params->mld_mac)
- memcpy(__entry->mld_mac, params->mld_mac, 6);
- __entry->link_id = params->link_id;
- ),
- TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", station mac: %pM"
- ", link id: %u",
- WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->mld_mac,
- __entry->link_id)
-);
-
-TRACE_EVENT(rdev_set_hw_timestamp,
- TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
- struct cfg80211_set_hw_timestamp *hwts),
-
- TP_ARGS(wiphy, netdev, hwts),
-
- TP_STRUCT__entry(
- WIPHY_ENTRY
- NETDEV_ENTRY
- MAC_ENTRY(macaddr)
- __field(bool, enable)
- ),
-
- TP_fast_assign(
- WIPHY_ASSIGN;
- NETDEV_ASSIGN;
- MAC_ASSIGN(macaddr, hwts->macaddr);
- __entry->enable = hwts->enable;
- ),
-
- TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", mac %pM, enable: %u",
- WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->macaddr,
- __entry->enable)
-);
-
TRACE_EVENT(cfg80211_links_removed,
TP_PROTO(struct net_device *netdev, u16 link_mask),
TP_ARGS(netdev, link_mask),
@@ -3987,26 +4084,6 @@ TRACE_EVENT(cfg80211_links_removed,
__entry->link_mask)
);
-TRACE_EVENT(rdev_set_ttlm,
- TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
- struct cfg80211_ttlm_params *params),
- TP_ARGS(wiphy, netdev, params),
- TP_STRUCT__entry(
- WIPHY_ENTRY
- NETDEV_ENTRY
- __array(u8, dlink, sizeof(u16) * 8)
- __array(u8, ulink, sizeof(u16) * 8)
- ),
- TP_fast_assign(
- WIPHY_ASSIGN;
- NETDEV_ASSIGN;
- memcpy(__entry->dlink, params->dlink, sizeof(params->dlink));
- memcpy(__entry->ulink, params->ulink, sizeof(params->ulink));
- ),
- TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT,
- WIPHY_PR_ARG, NETDEV_PR_ARG)
-);
-
#endif /* !__RDEV_OPS_TRACE || TRACE_HEADER_MULTI_READ */
#undef TRACE_INCLUDE_PATH
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 2bde8a354631..9a7c3adc8a3b 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -1504,7 +1504,7 @@ static u32 cfg80211_calculate_bitrate_he(struct rate_info *rate)
5120, /* 0.833333... */
};
u32 rates_160M[3] = { 960777777, 907400000, 816666666 };
- u32 rates_969[3] = { 480388888, 453700000, 408333333 };
+ u32 rates_996[3] = { 480388888, 453700000, 408333333 };
u32 rates_484[3] = { 229411111, 216666666, 195000000 };
u32 rates_242[3] = { 114711111, 108333333, 97500000 };
u32 rates_106[3] = { 40000000, 37777777, 34000000 };
@@ -1524,12 +1524,14 @@ static u32 cfg80211_calculate_bitrate_he(struct rate_info *rate)
if (WARN_ON_ONCE(rate->nss < 1 || rate->nss > 8))
return 0;
- if (rate->bw == RATE_INFO_BW_160)
+ if (rate->bw == RATE_INFO_BW_160 ||
+ (rate->bw == RATE_INFO_BW_HE_RU &&
+ rate->he_ru_alloc == NL80211_RATE_INFO_HE_RU_ALLOC_2x996))
result = rates_160M[rate->he_gi];
else if (rate->bw == RATE_INFO_BW_80 ||
(rate->bw == RATE_INFO_BW_HE_RU &&
rate->he_ru_alloc == NL80211_RATE_INFO_HE_RU_ALLOC_996))
- result = rates_969[rate->he_gi];
+ result = rates_996[rate->he_gi];
else if (rate->bw == RATE_INFO_BW_40 ||
(rate->bw == RATE_INFO_BW_HE_RU &&
rate->he_ru_alloc == NL80211_RATE_INFO_HE_RU_ALLOC_484))
@@ -2305,13 +2307,16 @@ static int cfg80211_wdev_bi(struct wireless_dev *wdev)
static void cfg80211_calculate_bi_data(struct wiphy *wiphy, u32 new_beacon_int,
u32 *beacon_int_gcd,
- bool *beacon_int_different)
+ bool *beacon_int_different,
+ int radio_idx)
{
+ struct cfg80211_registered_device *rdev;
struct wireless_dev *wdev;
*beacon_int_gcd = 0;
*beacon_int_different = false;
+ rdev = wiphy_to_rdev(wiphy);
list_for_each_entry(wdev, &wiphy->wdev_list, list) {
int wdev_bi;
@@ -2319,6 +2324,11 @@ static void cfg80211_calculate_bi_data(struct wiphy *wiphy, u32 new_beacon_int,
if (wdev->valid_links)
continue;
+ /* skip wdevs not active on the given wiphy radio */
+ if (radio_idx >= 0 &&
+ !(rdev_get_radio_mask(rdev, wdev->netdev) & BIT(radio_idx)))
+ continue;
+
wdev_bi = cfg80211_wdev_bi(wdev);
if (!wdev_bi)
@@ -2366,14 +2376,19 @@ int cfg80211_iter_combinations(struct wiphy *wiphy,
void *data),
void *data)
{
+ const struct wiphy_radio *radio = NULL;
+ const struct ieee80211_iface_combination *c, *cs;
const struct ieee80211_regdomain *regdom;
enum nl80211_dfs_regions region = 0;
- int i, j, iftype;
+ int i, j, n, iftype;
int num_interfaces = 0;
u32 used_iftypes = 0;
u32 beacon_int_gcd;
bool beacon_int_different;
+ if (params->radio_idx >= 0)
+ radio = &wiphy->radio[params->radio_idx];
+
/*
* This is a bit strange, since the iteration used to rely only on
* the data given by the driver, but here it now relies on context,
@@ -2385,7 +2400,8 @@ int cfg80211_iter_combinations(struct wiphy *wiphy,
* interfaces (while being brought up) and channel/radar data.
*/
cfg80211_calculate_bi_data(wiphy, params->new_beacon_int,
- &beacon_int_gcd, &beacon_int_different);
+ &beacon_int_gcd, &beacon_int_different,
+ params->radio_idx);
if (params->radar_detect) {
rcu_read_lock();
@@ -2402,13 +2418,18 @@ int cfg80211_iter_combinations(struct wiphy *wiphy,
used_iftypes |= BIT(iftype);
}
- for (i = 0; i < wiphy->n_iface_combinations; i++) {
- const struct ieee80211_iface_combination *c;
+ if (radio) {
+ cs = radio->iface_combinations;
+ n = radio->n_iface_combinations;
+ } else {
+ cs = wiphy->iface_combinations;
+ n = wiphy->n_iface_combinations;
+ }
+ for (i = 0; i < n; i++) {
struct ieee80211_iface_limit *limits;
u32 all_iftypes = 0;
- c = &wiphy->iface_combinations[i];
-
+ c = &cs[i];
if (num_interfaces > c->max_interfaces)
continue;
if (params->num_different_channels > c->num_different_channels)
@@ -2549,6 +2570,7 @@ int cfg80211_get_station(struct net_device *dev, const u8 *mac_addr,
{
struct cfg80211_registered_device *rdev;
struct wireless_dev *wdev;
+ int ret;
wdev = dev->ieee80211_ptr;
if (!wdev)
@@ -2560,7 +2582,11 @@ int cfg80211_get_station(struct net_device *dev, const u8 *mac_addr,
memset(sinfo, 0, sizeof(*sinfo));
- return rdev_get_station(rdev, dev, mac_addr, sinfo);
+ wiphy_lock(&rdev->wiphy);
+ ret = rdev_get_station(rdev, dev, mac_addr, sinfo);
+ wiphy_unlock(&rdev->wiphy);
+
+ return ret;
}
EXPORT_SYMBOL(cfg80211_get_station);
@@ -2860,3 +2886,38 @@ cfg80211_get_iftype_ext_capa(struct wiphy *wiphy, enum nl80211_iftype type)
return NULL;
}
EXPORT_SYMBOL(cfg80211_get_iftype_ext_capa);
+
+static bool
+ieee80211_radio_freq_range_valid(const struct wiphy_radio *radio,
+ u32 freq, u32 width)
+{
+ const struct wiphy_radio_freq_range *r;
+ int i;
+
+ for (i = 0; i < radio->n_freq_range; i++) {
+ r = &radio->freq_range[i];
+ if (freq - width / 2 >= r->start_freq &&
+ freq + width / 2 <= r->end_freq)
+ return true;
+ }
+
+ return false;
+}
+
+bool cfg80211_radio_chandef_valid(const struct wiphy_radio *radio,
+ const struct cfg80211_chan_def *chandef)
+{
+ u32 freq, width;
+
+ freq = ieee80211_chandef_to_khz(chandef);
+ width = nl80211_chan_width_to_mhz(chandef->width);
+ if (!ieee80211_radio_freq_range_valid(radio, freq, width))
+ return false;
+
+ freq = MHZ_TO_KHZ(chandef->center_freq2);
+ if (freq && !ieee80211_radio_freq_range_valid(radio, freq, width))
+ return false;
+
+ return true;
+}
+EXPORT_SYMBOL(cfg80211_radio_chandef_valid);
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index 727aa20be4bd..7e16336044b2 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -35,8 +35,6 @@
#define TX_BATCH_SIZE 32
#define MAX_PER_SOCKET_BUDGET (TX_BATCH_SIZE)
-static DEFINE_PER_CPU(struct list_head, xskmap_flush_list);
-
void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool)
{
if (pool->cached_need_wakeup & XDP_WAKEUP_RX)
@@ -313,13 +311,10 @@ static bool xsk_is_bound(struct xdp_sock *xs)
static int xsk_rcv_check(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
{
- struct net_device *dev = xdp->rxq->dev;
- u32 qid = xdp->rxq->queue_index;
-
if (!xsk_is_bound(xs))
return -ENXIO;
- if (!dev->_rx[qid].pool || xs->umem != dev->_rx[qid].pool->umem)
+ if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
return -EINVAL;
if (len > xsk_pool_get_rx_frame_size(xs->pool) && !xs->sg) {
@@ -375,22 +370,23 @@ static int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp)
{
- struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list);
int err;
err = xsk_rcv(xs, xdp);
if (err)
return err;
- if (!xs->flush_node.prev)
+ if (!xs->flush_node.prev) {
+ struct list_head *flush_list = bpf_net_ctx_get_xskmap_flush_list();
+
list_add(&xs->flush_node, flush_list);
+ }
return 0;
}
-void __xsk_map_flush(void)
+void __xsk_map_flush(struct list_head *flush_list)
{
- struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list);
struct xdp_sock *xs, *tmp;
list_for_each_entry_safe(xs, tmp, flush_list, flush_node) {
@@ -399,16 +395,6 @@ void __xsk_map_flush(void)
}
}
-#ifdef CONFIG_DEBUG_NET
-bool xsk_map_check_flush(void)
-{
- if (list_empty(this_cpu_ptr(&xskmap_flush_list)))
- return false;
- __xsk_map_flush();
- return true;
-}
-#endif
-
void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries)
{
xskq_prod_submit_n(pool->cq, nb_entries);
@@ -1775,7 +1761,7 @@ static struct pernet_operations xsk_net_ops = {
static int __init xsk_init(void)
{
- int err, cpu;
+ int err;
err = proto_register(&xsk_proto, 0 /* no slab */);
if (err)
@@ -1793,8 +1779,6 @@ static int __init xsk_init(void)
if (err)
goto out_pernet;
- for_each_possible_cpu(cpu)
- INIT_LIST_HEAD(&per_cpu(xskmap_flush_list, cpu));
return 0;
out_pernet:
diff --git a/net/xfrm/Makefile b/net/xfrm/Makefile
index 547cec77ba03..512e0b2f8514 100644
--- a/net/xfrm/Makefile
+++ b/net/xfrm/Makefile
@@ -13,7 +13,8 @@ endif
obj-$(CONFIG_XFRM) := xfrm_policy.o xfrm_state.o xfrm_hash.o \
xfrm_input.o xfrm_output.o \
- xfrm_sysctl.o xfrm_replay.o xfrm_device.o
+ xfrm_sysctl.o xfrm_replay.o xfrm_device.o \
+ xfrm_nat_keepalive.o
obj-$(CONFIG_XFRM_STATISTICS) += xfrm_proc.o
obj-$(CONFIG_XFRM_ALGO) += xfrm_algo.o
obj-$(CONFIG_XFRM_USER) += xfrm_user.o
diff --git a/net/xfrm/xfrm_compat.c b/net/xfrm/xfrm_compat.c
index 703d4172c7d7..91357ccaf4af 100644
--- a/net/xfrm/xfrm_compat.c
+++ b/net/xfrm/xfrm_compat.c
@@ -131,6 +131,7 @@ static const struct nla_policy compat_policy[XFRMA_MAX+1] = {
[XFRMA_IF_ID] = { .type = NLA_U32 },
[XFRMA_MTIMER_THRESH] = { .type = NLA_U32 },
[XFRMA_SA_DIR] = NLA_POLICY_RANGE(NLA_U8, XFRM_SA_DIR_IN, XFRM_SA_DIR_OUT),
+ [XFRMA_NAT_KEEPALIVE_INTERVAL] = { .type = NLA_U32 },
};
static struct nlmsghdr *xfrm_nlmsg_put_compat(struct sk_buff *skb,
@@ -280,9 +281,10 @@ static int xfrm_xlate64_attr(struct sk_buff *dst, const struct nlattr *src)
case XFRMA_IF_ID:
case XFRMA_MTIMER_THRESH:
case XFRMA_SA_DIR:
+ case XFRMA_NAT_KEEPALIVE_INTERVAL:
return xfrm_nla_cpy(dst, src, nla_len(src));
default:
- BUILD_BUG_ON(XFRMA_MAX != XFRMA_SA_DIR);
+ BUILD_BUG_ON(XFRMA_MAX != XFRMA_NAT_KEEPALIVE_INTERVAL);
pr_warn_once("unsupported nla_type %d\n", src->nla_type);
return -EOPNOTSUPP;
}
@@ -437,7 +439,7 @@ static int xfrm_xlate32_attr(void *dst, const struct nlattr *nla,
int err;
if (type > XFRMA_MAX) {
- BUILD_BUG_ON(XFRMA_MAX != XFRMA_SA_DIR);
+ BUILD_BUG_ON(XFRMA_MAX != XFRMA_NAT_KEEPALIVE_INTERVAL);
NL_SET_ERR_MSG(extack, "Bad attribute");
return -EOPNOTSUPP;
}
diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c
index 2455a76a1cff..9a44d363ba62 100644
--- a/net/xfrm/xfrm_device.c
+++ b/net/xfrm/xfrm_device.c
@@ -261,9 +261,9 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
is_packet_offload = xuo->flags & XFRM_OFFLOAD_PACKET;
- /* We don't yet support UDP encapsulation and TFC padding. */
- if ((!is_packet_offload && x->encap) || x->tfcpad) {
- NL_SET_ERR_MSG(extack, "Encapsulation and TFC padding can't be offloaded");
+ /* We don't yet support TFC padding. */
+ if (x->tfcpad) {
+ NL_SET_ERR_MSG(extack, "TFC padding can't be offloaded");
return -EINVAL;
}
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
index d2ea18dcb0cb..749e7eea99e4 100644
--- a/net/xfrm/xfrm_input.c
+++ b/net/xfrm/xfrm_input.c
@@ -471,14 +471,10 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
struct xfrm_offload *xo = xfrm_offload(skb);
struct sec_path *sp;
- if (encap_type < 0 || (xo && xo->flags & XFRM_GRO)) {
+ if (encap_type < 0 || (xo && (xo->flags & XFRM_GRO || encap_type == 0 ||
+ encap_type == UDP_ENCAP_ESPINUDP))) {
x = xfrm_input_state(skb);
- if (unlikely(x->dir && x->dir != XFRM_SA_DIR_IN)) {
- XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEDIRERROR);
- goto drop;
- }
-
if (unlikely(x->km.state != XFRM_STATE_VALID)) {
if (x->km.state == XFRM_STATE_ACQ)
XFRM_INC_STATS(net, LINUX_MIB_XFRMACQUIREERROR);
@@ -585,8 +581,11 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
}
if (unlikely(x->dir && x->dir != XFRM_SA_DIR_IN)) {
+ secpath_reset(skb);
XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEDIRERROR);
+ xfrm_audit_state_notfound(skb, family, spi, seq);
xfrm_state_put(x);
+ x = NULL;
goto drop;
}
diff --git a/net/xfrm/xfrm_nat_keepalive.c b/net/xfrm/xfrm_nat_keepalive.c
new file mode 100644
index 000000000000..82f0a301683f
--- /dev/null
+++ b/net/xfrm/xfrm_nat_keepalive.c
@@ -0,0 +1,292 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * xfrm_nat_keepalive.c
+ *
+ * (c) 2024 Eyal Birger <eyal.birger@gmail.com>
+ */
+
+#include <net/inet_common.h>
+#include <net/ip6_checksum.h>
+#include <net/xfrm.h>
+
+static DEFINE_PER_CPU(struct sock *, nat_keepalive_sk_ipv4);
+#if IS_ENABLED(CONFIG_IPV6)
+static DEFINE_PER_CPU(struct sock *, nat_keepalive_sk_ipv6);
+#endif
+
+struct nat_keepalive {
+ struct net *net;
+ u16 family;
+ xfrm_address_t saddr;
+ xfrm_address_t daddr;
+ __be16 encap_sport;
+ __be16 encap_dport;
+ __u32 smark;
+};
+
+static void nat_keepalive_init(struct nat_keepalive *ka, struct xfrm_state *x)
+{
+ ka->net = xs_net(x);
+ ka->family = x->props.family;
+ ka->saddr = x->props.saddr;
+ ka->daddr = x->id.daddr;
+ ka->encap_sport = x->encap->encap_sport;
+ ka->encap_dport = x->encap->encap_dport;
+ ka->smark = xfrm_smark_get(0, x);
+}
+
+static int nat_keepalive_send_ipv4(struct sk_buff *skb,
+ struct nat_keepalive *ka)
+{
+ struct net *net = ka->net;
+ struct flowi4 fl4;
+ struct rtable *rt;
+ struct sock *sk;
+ __u8 tos = 0;
+ int err;
+
+ flowi4_init_output(&fl4, 0 /* oif */, skb->mark, tos,
+ RT_SCOPE_UNIVERSE, IPPROTO_UDP, 0,
+ ka->daddr.a4, ka->saddr.a4, ka->encap_dport,
+ ka->encap_sport, sock_net_uid(net, NULL));
+
+ rt = ip_route_output_key(net, &fl4);
+ if (IS_ERR(rt))
+ return PTR_ERR(rt);
+
+ skb_dst_set(skb, &rt->dst);
+
+ sk = *this_cpu_ptr(&nat_keepalive_sk_ipv4);
+ sock_net_set(sk, net);
+ err = ip_build_and_send_pkt(skb, sk, fl4.saddr, fl4.daddr, NULL, tos);
+ sock_net_set(sk, &init_net);
+ return err;
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+static int nat_keepalive_send_ipv6(struct sk_buff *skb,
+ struct nat_keepalive *ka,
+ struct udphdr *uh)
+{
+ struct net *net = ka->net;
+ struct dst_entry *dst;
+ struct flowi6 fl6;
+ struct sock *sk;
+ __wsum csum;
+ int err;
+
+ csum = skb_checksum(skb, 0, skb->len, 0);
+ uh->check = csum_ipv6_magic(&ka->saddr.in6, &ka->daddr.in6,
+ skb->len, IPPROTO_UDP, csum);
+ if (uh->check == 0)
+ uh->check = CSUM_MANGLED_0;
+
+ memset(&fl6, 0, sizeof(fl6));
+ fl6.flowi6_mark = skb->mark;
+ fl6.saddr = ka->saddr.in6;
+ fl6.daddr = ka->daddr.in6;
+ fl6.flowi6_proto = IPPROTO_UDP;
+ fl6.fl6_sport = ka->encap_sport;
+ fl6.fl6_dport = ka->encap_dport;
+
+ sk = *this_cpu_ptr(&nat_keepalive_sk_ipv6);
+ sock_net_set(sk, net);
+ dst = ipv6_stub->ipv6_dst_lookup_flow(net, sk, &fl6, NULL);
+ if (IS_ERR(dst))
+ return PTR_ERR(dst);
+
+ skb_dst_set(skb, dst);
+ err = ipv6_stub->ip6_xmit(sk, skb, &fl6, skb->mark, NULL, 0, 0);
+ sock_net_set(sk, &init_net);
+ return err;
+}
+#endif
+
+static void nat_keepalive_send(struct nat_keepalive *ka)
+{
+ const int nat_ka_hdrs_len = max(sizeof(struct iphdr),
+ sizeof(struct ipv6hdr)) +
+ sizeof(struct udphdr);
+ const u8 nat_ka_payload = 0xFF;
+ int err = -EAFNOSUPPORT;
+ struct sk_buff *skb;
+ struct udphdr *uh;
+
+ skb = alloc_skb(nat_ka_hdrs_len + sizeof(nat_ka_payload), GFP_ATOMIC);
+ if (unlikely(!skb))
+ return;
+
+ skb_reserve(skb, nat_ka_hdrs_len);
+
+ skb_put_u8(skb, nat_ka_payload);
+
+ uh = skb_push(skb, sizeof(*uh));
+ uh->source = ka->encap_sport;
+ uh->dest = ka->encap_dport;
+ uh->len = htons(skb->len);
+ uh->check = 0;
+
+ skb->mark = ka->smark;
+
+ switch (ka->family) {
+ case AF_INET:
+ err = nat_keepalive_send_ipv4(skb, ka);
+ break;
+#if IS_ENABLED(CONFIG_IPV6)
+ case AF_INET6:
+ err = nat_keepalive_send_ipv6(skb, ka, uh);
+ break;
+#endif
+ }
+ if (err)
+ kfree_skb(skb);
+}
+
+struct nat_keepalive_work_ctx {
+ time64_t next_run;
+ time64_t now;
+};
+
+static int nat_keepalive_work_single(struct xfrm_state *x, int count, void *ptr)
+{
+ struct nat_keepalive_work_ctx *ctx = ptr;
+ bool send_keepalive = false;
+ struct nat_keepalive ka;
+ time64_t next_run;
+ u32 interval;
+ int delta;
+
+ interval = x->nat_keepalive_interval;
+ if (!interval)
+ return 0;
+
+ spin_lock(&x->lock);
+
+ delta = (int)(ctx->now - x->lastused);
+ if (delta < interval) {
+ x->nat_keepalive_expiration = ctx->now + interval - delta;
+ next_run = x->nat_keepalive_expiration;
+ } else if (x->nat_keepalive_expiration > ctx->now) {
+ next_run = x->nat_keepalive_expiration;
+ } else {
+ next_run = ctx->now + interval;
+ nat_keepalive_init(&ka, x);
+ send_keepalive = true;
+ }
+
+ spin_unlock(&x->lock);
+
+ if (send_keepalive)
+ nat_keepalive_send(&ka);
+
+ if (!ctx->next_run || next_run < ctx->next_run)
+ ctx->next_run = next_run;
+ return 0;
+}
+
+static void nat_keepalive_work(struct work_struct *work)
+{
+ struct nat_keepalive_work_ctx ctx;
+ struct xfrm_state_walk walk;
+ struct net *net;
+
+ ctx.next_run = 0;
+ ctx.now = ktime_get_real_seconds();
+
+ net = container_of(work, struct net, xfrm.nat_keepalive_work.work);
+ xfrm_state_walk_init(&walk, IPPROTO_ESP, NULL);
+ xfrm_state_walk(net, &walk, nat_keepalive_work_single, &ctx);
+ xfrm_state_walk_done(&walk, net);
+ if (ctx.next_run)
+ schedule_delayed_work(&net->xfrm.nat_keepalive_work,
+ (ctx.next_run - ctx.now) * HZ);
+}
+
+static int nat_keepalive_sk_init(struct sock * __percpu *socks,
+ unsigned short family)
+{
+ struct sock *sk;
+ int err, i;
+
+ for_each_possible_cpu(i) {
+ err = inet_ctl_sock_create(&sk, family, SOCK_RAW, IPPROTO_UDP,
+ &init_net);
+ if (err < 0)
+ goto err;
+
+ *per_cpu_ptr(socks, i) = sk;
+ }
+
+ return 0;
+err:
+ for_each_possible_cpu(i)
+ inet_ctl_sock_destroy(*per_cpu_ptr(socks, i));
+ return err;
+}
+
+static void nat_keepalive_sk_fini(struct sock * __percpu *socks)
+{
+ int i;
+
+ for_each_possible_cpu(i)
+ inet_ctl_sock_destroy(*per_cpu_ptr(socks, i));
+}
+
+void xfrm_nat_keepalive_state_updated(struct xfrm_state *x)
+{
+ struct net *net;
+
+ if (!x->nat_keepalive_interval)
+ return;
+
+ net = xs_net(x);
+ schedule_delayed_work(&net->xfrm.nat_keepalive_work, 0);
+}
+
+int __net_init xfrm_nat_keepalive_net_init(struct net *net)
+{
+ INIT_DELAYED_WORK(&net->xfrm.nat_keepalive_work, nat_keepalive_work);
+ return 0;
+}
+
+int xfrm_nat_keepalive_net_fini(struct net *net)
+{
+ cancel_delayed_work_sync(&net->xfrm.nat_keepalive_work);
+ return 0;
+}
+
+int xfrm_nat_keepalive_init(unsigned short family)
+{
+ int err = -EAFNOSUPPORT;
+
+ switch (family) {
+ case AF_INET:
+ err = nat_keepalive_sk_init(&nat_keepalive_sk_ipv4, PF_INET);
+ break;
+#if IS_ENABLED(CONFIG_IPV6)
+ case AF_INET6:
+ err = nat_keepalive_sk_init(&nat_keepalive_sk_ipv6, PF_INET6);
+ break;
+#endif
+ }
+
+ if (err)
+ pr_err("xfrm nat keepalive init: failed to init err:%d\n", err);
+ return err;
+}
+EXPORT_SYMBOL_GPL(xfrm_nat_keepalive_init);
+
+void xfrm_nat_keepalive_fini(unsigned short family)
+{
+ switch (family) {
+ case AF_INET:
+ nat_keepalive_sk_fini(&nat_keepalive_sk_ipv4);
+ break;
+#if IS_ENABLED(CONFIG_IPV6)
+ case AF_INET6:
+ nat_keepalive_sk_fini(&nat_keepalive_sk_ipv6);
+ break;
+#endif
+ }
+}
+EXPORT_SYMBOL_GPL(xfrm_nat_keepalive_fini);
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 475b904fe68b..c56c61b0c12e 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -452,6 +452,8 @@ EXPORT_SYMBOL(xfrm_policy_destroy);
static void xfrm_policy_kill(struct xfrm_policy *policy)
{
+ xfrm_dev_policy_delete(policy);
+
write_lock_bh(&policy->lock);
policy->walk.dead = 1;
write_unlock_bh(&policy->lock);
@@ -1850,7 +1852,6 @@ again:
__xfrm_policy_unlink(pol, dir);
spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
- xfrm_dev_policy_delete(pol);
cnt++;
xfrm_audit_policy_delete(pol, 1, task_valid);
xfrm_policy_kill(pol);
@@ -1891,7 +1892,6 @@ again:
__xfrm_policy_unlink(pol, dir);
spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
- xfrm_dev_policy_delete(pol);
cnt++;
xfrm_audit_policy_delete(pol, 1, task_valid);
xfrm_policy_kill(pol);
@@ -2342,7 +2342,6 @@ int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
pol = __xfrm_policy_unlink(pol, dir);
spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
if (pol) {
- xfrm_dev_policy_delete(pol);
xfrm_policy_kill(pol);
return 0;
}
@@ -3718,12 +3717,15 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
pol = xfrm_in_fwd_icmp(skb, &fl, family, if_id);
if (!pol) {
+ const bool is_crypto_offload = sp &&
+ (xfrm_input_state(skb)->xso.type == XFRM_DEV_OFFLOAD_CRYPTO);
+
if (net->xfrm.policy_default[dir] == XFRM_USERPOLICY_BLOCK) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS);
return 0;
}
- if (sp && secpath_has_nontransport(sp, 0, &xerr_idx)) {
+ if (sp && secpath_has_nontransport(sp, 0, &xerr_idx) && !is_crypto_offload) {
xfrm_secpath_reject(xerr_idx, skb, &fl);
XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS);
return 0;
@@ -3910,15 +3912,10 @@ static void xfrm_link_failure(struct sk_buff *skb)
/* Impossible. Such dst must be popped before reaches point of failure. */
}
-static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst)
+static void xfrm_negative_advice(struct sock *sk, struct dst_entry *dst)
{
- if (dst) {
- if (dst->obsolete) {
- dst_release(dst);
- dst = NULL;
- }
- }
- return dst;
+ if (dst->obsolete)
+ sk_dst_reset(sk);
}
static void xfrm_init_pmtu(struct xfrm_dst **bundle, int nr)
@@ -4289,8 +4286,14 @@ static int __net_init xfrm_net_init(struct net *net)
if (rv < 0)
goto out_sysctl;
+ rv = xfrm_nat_keepalive_net_init(net);
+ if (rv < 0)
+ goto out_nat_keepalive;
+
return 0;
+out_nat_keepalive:
+ xfrm_sysctl_fini(net);
out_sysctl:
xfrm_policy_fini(net);
out_policy:
@@ -4303,6 +4306,7 @@ out_statistics:
static void __net_exit xfrm_net_exit(struct net *net)
{
+ xfrm_nat_keepalive_net_fini(net);
xfrm_sysctl_fini(net);
xfrm_policy_fini(net);
xfrm_state_fini(net);
@@ -4364,6 +4368,7 @@ void __init xfrm_init(void)
#endif
register_xfrm_state_bpf();
+ xfrm_nat_keepalive_init(AF_INET);
}
#ifdef CONFIG_AUDITSYSCALL
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 649bb739df0d..37478d36a8df 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -49,6 +49,7 @@ static struct kmem_cache *xfrm_state_cache __ro_after_init;
static DECLARE_WORK(xfrm_state_gc_work, xfrm_state_gc_task);
static HLIST_HEAD(xfrm_state_gc_list);
+static HLIST_HEAD(xfrm_state_dev_gc_list);
static inline bool xfrm_state_hold_rcu(struct xfrm_state __rcu *x)
{
@@ -214,6 +215,7 @@ static DEFINE_SPINLOCK(xfrm_state_afinfo_lock);
static struct xfrm_state_afinfo __rcu *xfrm_state_afinfo[NPROTO];
static DEFINE_SPINLOCK(xfrm_state_gc_lock);
+static DEFINE_SPINLOCK(xfrm_state_dev_gc_lock);
int __xfrm_state_delete(struct xfrm_state *x);
@@ -683,6 +685,41 @@ struct xfrm_state *xfrm_state_alloc(struct net *net)
}
EXPORT_SYMBOL(xfrm_state_alloc);
+#ifdef CONFIG_XFRM_OFFLOAD
+void xfrm_dev_state_delete(struct xfrm_state *x)
+{
+ struct xfrm_dev_offload *xso = &x->xso;
+ struct net_device *dev = READ_ONCE(xso->dev);
+
+ if (dev) {
+ dev->xfrmdev_ops->xdo_dev_state_delete(x);
+ spin_lock_bh(&xfrm_state_dev_gc_lock);
+ hlist_add_head(&x->dev_gclist, &xfrm_state_dev_gc_list);
+ spin_unlock_bh(&xfrm_state_dev_gc_lock);
+ }
+}
+EXPORT_SYMBOL_GPL(xfrm_dev_state_delete);
+
+void xfrm_dev_state_free(struct xfrm_state *x)
+{
+ struct xfrm_dev_offload *xso = &x->xso;
+ struct net_device *dev = READ_ONCE(xso->dev);
+
+ if (dev && dev->xfrmdev_ops) {
+ spin_lock_bh(&xfrm_state_dev_gc_lock);
+ if (!hlist_unhashed(&x->dev_gclist))
+ hlist_del(&x->dev_gclist);
+ spin_unlock_bh(&xfrm_state_dev_gc_lock);
+
+ if (dev->xfrmdev_ops->xdo_dev_state_free)
+ dev->xfrmdev_ops->xdo_dev_state_free(x);
+ WRITE_ONCE(xso->dev, NULL);
+ xso->type = XFRM_DEV_OFFLOAD_UNSPECIFIED;
+ netdev_put(dev, &xso->dev_tracker);
+ }
+}
+#endif
+
void __xfrm_state_destroy(struct xfrm_state *x, bool sync)
{
WARN_ON(x->km.state != XFRM_STATE_DEAD);
@@ -715,6 +752,7 @@ int __xfrm_state_delete(struct xfrm_state *x)
if (x->id.spi)
hlist_del_rcu(&x->byspi);
net->xfrm.state_num--;
+ xfrm_nat_keepalive_state_updated(x);
spin_unlock(&net->xfrm.xfrm_state_lock);
if (x->encap_sk)
@@ -848,6 +886,9 @@ EXPORT_SYMBOL(xfrm_state_flush);
int xfrm_dev_state_flush(struct net *net, struct net_device *dev, bool task_valid)
{
+ struct xfrm_state *x;
+ struct hlist_node *tmp;
+ struct xfrm_dev_offload *xso;
int i, err = 0, cnt = 0;
spin_lock_bh(&net->xfrm.xfrm_state_lock);
@@ -857,8 +898,6 @@ int xfrm_dev_state_flush(struct net *net, struct net_device *dev, bool task_vali
err = -ESRCH;
for (i = 0; i <= net->xfrm.state_hmask; i++) {
- struct xfrm_state *x;
- struct xfrm_dev_offload *xso;
restart:
hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) {
xso = &x->xso;
@@ -868,6 +907,8 @@ restart:
spin_unlock_bh(&net->xfrm.xfrm_state_lock);
err = xfrm_state_delete(x);
+ xfrm_dev_state_free(x);
+
xfrm_audit_state_delete(x, err ? 0 : 1,
task_valid);
xfrm_state_put(x);
@@ -884,6 +925,24 @@ restart:
out:
spin_unlock_bh(&net->xfrm.xfrm_state_lock);
+
+ spin_lock_bh(&xfrm_state_dev_gc_lock);
+restart_gc:
+ hlist_for_each_entry_safe(x, tmp, &xfrm_state_dev_gc_list, dev_gclist) {
+ xso = &x->xso;
+
+ if (xso->dev == dev) {
+ spin_unlock_bh(&xfrm_state_dev_gc_lock);
+ xfrm_dev_state_free(x);
+ spin_lock_bh(&xfrm_state_dev_gc_lock);
+ goto restart_gc;
+ }
+
+ }
+ spin_unlock_bh(&xfrm_state_dev_gc_lock);
+
+ xfrm_flush_gc();
+
return err;
}
EXPORT_SYMBOL(xfrm_dev_state_flush);
@@ -1273,8 +1332,7 @@ found:
xso->dev = xdo->dev;
xso->real_dev = xdo->real_dev;
xso->flags = XFRM_DEV_OFFLOAD_FLAG_ACQ;
- netdev_tracker_alloc(xso->dev, &xso->dev_tracker,
- GFP_ATOMIC);
+ netdev_hold(xso->dev, &xso->dev_tracker, GFP_ATOMIC);
error = xso->dev->xfrmdev_ops->xdo_dev_state_add(x, NULL);
if (error) {
xso->dir = 0;
@@ -1453,6 +1511,7 @@ static void __xfrm_state_insert(struct xfrm_state *x)
net->xfrm.state_num++;
xfrm_hash_grow_check(net, x->bydst.next != NULL);
+ xfrm_nat_keepalive_state_updated(x);
}
/* net->xfrm.xfrm_state_lock is held */
@@ -2871,6 +2930,21 @@ int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload,
goto error;
}
+ if (x->nat_keepalive_interval) {
+ if (x->dir != XFRM_SA_DIR_OUT) {
+ NL_SET_ERR_MSG(extack, "NAT keepalive is only supported for outbound SAs");
+ err = -EINVAL;
+ goto error;
+ }
+
+ if (!x->encap || x->encap->encap_type != UDP_ENCAP_ESPINUDP) {
+ NL_SET_ERR_MSG(extack,
+ "NAT keepalive is only supported for UDP encapsulation");
+ err = -EINVAL;
+ goto error;
+ }
+ }
+
error:
return err;
}
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index e83c687bd64e..55f039ec3d59 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -833,6 +833,10 @@ static struct xfrm_state *xfrm_state_construct(struct net *net,
if (attrs[XFRMA_SA_DIR])
x->dir = nla_get_u8(attrs[XFRMA_SA_DIR]);
+ if (attrs[XFRMA_NAT_KEEPALIVE_INTERVAL])
+ x->nat_keepalive_interval =
+ nla_get_u32(attrs[XFRMA_NAT_KEEPALIVE_INTERVAL]);
+
err = __xfrm_init_state(x, false, attrs[XFRMA_OFFLOAD_DEV], extack);
if (err)
goto error;
@@ -1288,6 +1292,13 @@ static int copy_to_user_state_extra(struct xfrm_state *x,
}
if (x->dir)
ret = nla_put_u8(skb, XFRMA_SA_DIR, x->dir);
+
+ if (x->nat_keepalive_interval) {
+ ret = nla_put_u32(skb, XFRMA_NAT_KEEPALIVE_INTERVAL,
+ x->nat_keepalive_interval);
+ if (ret)
+ goto out;
+ }
out:
return ret;
}
@@ -2455,7 +2466,6 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
NETLINK_CB(skb).portid);
}
} else {
- xfrm_dev_policy_delete(xp);
xfrm_audit_policy_delete(xp, err ? 0 : 1, true);
if (err != 0)
@@ -3165,6 +3175,7 @@ const struct nla_policy xfrma_policy[XFRMA_MAX+1] = {
[XFRMA_IF_ID] = { .type = NLA_U32 },
[XFRMA_MTIMER_THRESH] = { .type = NLA_U32 },
[XFRMA_SA_DIR] = NLA_POLICY_RANGE(NLA_U8, XFRM_SA_DIR_IN, XFRM_SA_DIR_OUT),
+ [XFRMA_NAT_KEEPALIVE_INTERVAL] = { .type = NLA_U32 },
};
EXPORT_SYMBOL_GPL(xfrma_policy);
@@ -3474,6 +3485,9 @@ static inline unsigned int xfrm_sa_len(struct xfrm_state *x)
if (x->dir)
l += nla_total_size(sizeof(x->dir));
+ if (x->nat_keepalive_interval)
+ l += nla_total_size(sizeof(x->nat_keepalive_interval));
+
return l;
}
diff --git a/rust/bindings/bindings_helper.h b/rust/bindings/bindings_helper.h
index ddb5644d4fd9..6deee85a29c8 100644
--- a/rust/bindings/bindings_helper.h
+++ b/rust/bindings/bindings_helper.h
@@ -7,6 +7,9 @@
*/
#include <kunit/test.h>
+#include <linux/blk_types.h>
+#include <linux/blk-mq.h>
+#include <linux/blkdev.h>
#include <linux/errname.h>
#include <linux/ethtool.h>
#include <linux/jiffies.h>
@@ -20,8 +23,10 @@
/* `bindgen` gets confused at certain things. */
const size_t RUST_CONST_HELPER_ARCH_SLAB_MINALIGN = ARCH_SLAB_MINALIGN;
+const size_t RUST_CONST_HELPER_PAGE_SIZE = PAGE_SIZE;
const gfp_t RUST_CONST_HELPER_GFP_ATOMIC = GFP_ATOMIC;
const gfp_t RUST_CONST_HELPER_GFP_KERNEL = GFP_KERNEL;
const gfp_t RUST_CONST_HELPER_GFP_KERNEL_ACCOUNT = GFP_KERNEL_ACCOUNT;
const gfp_t RUST_CONST_HELPER_GFP_NOWAIT = GFP_NOWAIT;
const gfp_t RUST_CONST_HELPER___GFP_ZERO = __GFP_ZERO;
+const blk_features_t RUST_CONST_HELPER_BLK_FEAT_ROTATIONAL = BLK_FEAT_ROTATIONAL;
diff --git a/rust/helpers.c b/rust/helpers.c
index 2c37a0f5d7a8..3df5217fb2ff 100644
--- a/rust/helpers.c
+++ b/rust/helpers.c
@@ -186,3 +186,19 @@ static_assert(
__alignof__(size_t) == __alignof__(uintptr_t),
"Rust code expects C `size_t` to match Rust `usize`"
);
+
+// This will soon be moved to a separate file, so no need to merge with above.
+#include <linux/blk-mq.h>
+#include <linux/blkdev.h>
+
+void *rust_helper_blk_mq_rq_to_pdu(struct request *rq)
+{
+ return blk_mq_rq_to_pdu(rq);
+}
+EXPORT_SYMBOL_GPL(rust_helper_blk_mq_rq_to_pdu);
+
+struct request *rust_helper_blk_mq_rq_from_pdu(void *pdu)
+{
+ return blk_mq_rq_from_pdu(pdu);
+}
+EXPORT_SYMBOL_GPL(rust_helper_blk_mq_rq_from_pdu);
diff --git a/rust/kernel/alloc/vec_ext.rs b/rust/kernel/alloc/vec_ext.rs
index e9a81052728a..1297a4be32e8 100644
--- a/rust/kernel/alloc/vec_ext.rs
+++ b/rust/kernel/alloc/vec_ext.rs
@@ -4,7 +4,6 @@
use super::{AllocError, Flags};
use alloc::vec::Vec;
-use core::ptr;
/// Extensions to [`Vec`].
pub trait VecExt<T>: Sized {
@@ -141,7 +140,11 @@ impl<T> VecExt<T> for Vec<T> {
// `krealloc_aligned`. A `Vec<T>`'s `ptr` value is not guaranteed to be NULL and might be
// dangling after being created with `Vec::new`. Instead, we can rely on `Vec<T>`'s capacity
// to be zero if no memory has been allocated yet.
- let ptr = if cap == 0 { ptr::null_mut() } else { old_ptr };
+ let ptr = if cap == 0 {
+ core::ptr::null_mut()
+ } else {
+ old_ptr
+ };
// SAFETY: `ptr` is valid because it's either NULL or comes from a previous call to
// `krealloc_aligned`. We also verified that the type is not a ZST.
diff --git a/rust/kernel/block.rs b/rust/kernel/block.rs
new file mode 100644
index 000000000000..150f710efe5b
--- /dev/null
+++ b/rust/kernel/block.rs
@@ -0,0 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Types for working with the block layer.
+
+pub mod mq;
diff --git a/rust/kernel/block/mq.rs b/rust/kernel/block/mq.rs
new file mode 100644
index 000000000000..fb0f393c1cea
--- /dev/null
+++ b/rust/kernel/block/mq.rs
@@ -0,0 +1,98 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! This module provides types for implementing block drivers that interface the
+//! blk-mq subsystem.
+//!
+//! To implement a block device driver, a Rust module must do the following:
+//!
+//! - Implement [`Operations`] for a type `T`.
+//! - Create a [`TagSet<T>`].
+//! - Create a [`GenDisk<T>`], via the [`GenDiskBuilder`].
+//! - Add the disk to the system by calling [`GenDiskBuilder::build`] passing in
+//! the `TagSet` reference.
+//!
+//! The types available in this module that have direct C counterparts are:
+//!
+//! - The [`TagSet`] type that abstracts the C type `struct tag_set`.
+//! - The [`GenDisk`] type that abstracts the C type `struct gendisk`.
+//! - The [`Request`] type that abstracts the C type `struct request`.
+//!
+//! The kernel will interface with the block device driver by calling the method
+//! implementations of the `Operations` trait.
+//!
+//! IO requests are passed to the driver as [`kernel::types::ARef<Request>`]
+//! instances. The `Request` type is a wrapper around the C `struct request`.
+//! The driver must mark end of processing by calling one of the
+//! `Request::end`, methods. Failure to do so can lead to deadlock or timeout
+//! errors. Please note that the C function `blk_mq_start_request` is implicitly
+//! called when the request is queued with the driver.
+//!
+//! The `TagSet` is responsible for creating and maintaining a mapping between
+//! `Request`s and integer ids as well as carrying a pointer to the vtable
+//! generated by `Operations`. This mapping is useful for associating
+//! completions from hardware with the correct `Request` instance. The `TagSet`
+//! determines the maximum queue depth by setting the number of `Request`
+//! instances available to the driver, and it determines the number of queues to
+//! instantiate for the driver. If possible, a driver should allocate one queue
+//! per core, to keep queue data local to a core.
+//!
+//! One `TagSet` instance can be shared between multiple `GenDisk` instances.
+//! This can be useful when implementing drivers where one piece of hardware
+//! with one set of IO resources are represented to the user as multiple disks.
+//!
+//! One significant difference between block device drivers implemented with
+//! these Rust abstractions and drivers implemented in C, is that the Rust
+//! drivers have to own a reference count on the `Request` type when the IO is
+//! in flight. This is to ensure that the C `struct request` instances backing
+//! the Rust `Request` instances are live while the Rust driver holds a
+//! reference to the `Request`. In addition, the conversion of an integer tag to
+//! a `Request` via the `TagSet` would not be sound without this bookkeeping.
+//!
+//! [`GenDisk`]: gen_disk::GenDisk
+//! [`GenDisk<T>`]: gen_disk::GenDisk
+//! [`GenDiskBuilder`]: gen_disk::GenDiskBuilder
+//! [`GenDiskBuilder::build`]: gen_disk::GenDiskBuilder::build
+//!
+//! # Example
+//!
+//! ```rust
+//! use kernel::{
+//! alloc::flags,
+//! block::mq::*,
+//! new_mutex,
+//! prelude::*,
+//! sync::{Arc, Mutex},
+//! types::{ARef, ForeignOwnable},
+//! };
+//!
+//! struct MyBlkDevice;
+//!
+//! #[vtable]
+//! impl Operations for MyBlkDevice {
+//!
+//! fn queue_rq(rq: ARef<Request<Self>>, _is_last: bool) -> Result {
+//! Request::end_ok(rq);
+//! Ok(())
+//! }
+//!
+//! fn commit_rqs() {}
+//! }
+//!
+//! let tagset: Arc<TagSet<MyBlkDevice>> =
+//! Arc::pin_init(TagSet::new(1, 256, 1), flags::GFP_KERNEL)?;
+//! let mut disk = gen_disk::GenDiskBuilder::new()
+//! .capacity_sectors(4096)
+//! .build(format_args!("myblk"), tagset)?;
+//!
+//! # Ok::<(), kernel::error::Error>(())
+//! ```
+
+pub mod gen_disk;
+mod operations;
+mod raw_writer;
+mod request;
+mod tag_set;
+
+pub use operations::Operations;
+pub use request::Request;
+pub use tag_set::TagSet;
diff --git a/rust/kernel/block/mq/gen_disk.rs b/rust/kernel/block/mq/gen_disk.rs
new file mode 100644
index 000000000000..f548a6199847
--- /dev/null
+++ b/rust/kernel/block/mq/gen_disk.rs
@@ -0,0 +1,198 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Generic disk abstraction.
+//!
+//! C header: [`include/linux/blkdev.h`](srctree/include/linux/blkdev.h)
+//! C header: [`include/linux/blk_mq.h`](srctree/include/linux/blk_mq.h)
+
+use crate::block::mq::{raw_writer::RawWriter, Operations, TagSet};
+use crate::error;
+use crate::{bindings, error::from_err_ptr, error::Result, sync::Arc};
+use core::fmt::{self, Write};
+
+/// A builder for [`GenDisk`].
+///
+/// Use this struct to configure and add new [`GenDisk`] to the VFS.
+pub struct GenDiskBuilder {
+ rotational: bool,
+ logical_block_size: u32,
+ physical_block_size: u32,
+ capacity_sectors: u64,
+}
+
+impl Default for GenDiskBuilder {
+ fn default() -> Self {
+ Self {
+ rotational: false,
+ logical_block_size: bindings::PAGE_SIZE as u32,
+ physical_block_size: bindings::PAGE_SIZE as u32,
+ capacity_sectors: 0,
+ }
+ }
+}
+
+impl GenDiskBuilder {
+ /// Create a new instance.
+ pub fn new() -> Self {
+ Self::default()
+ }
+
+ /// Set the rotational media attribute for the device to be built.
+ pub fn rotational(mut self, rotational: bool) -> Self {
+ self.rotational = rotational;
+ self
+ }
+
+ /// Validate block size by verifying that it is between 512 and `PAGE_SIZE`,
+ /// and that it is a power of two.
+ fn validate_block_size(size: u32) -> Result<()> {
+ if !(512..=bindings::PAGE_SIZE as u32).contains(&size) || !size.is_power_of_two() {
+ Err(error::code::EINVAL)
+ } else {
+ Ok(())
+ }
+ }
+
+ /// Set the logical block size of the device to be built.
+ ///
+ /// This method will check that block size is a power of two and between 512
+ /// and 4096. If not, an error is returned and the block size is not set.
+ ///
+ /// This is the smallest unit the storage device can address. It is
+ /// typically 4096 bytes.
+ pub fn logical_block_size(mut self, block_size: u32) -> Result<Self> {
+ Self::validate_block_size(block_size)?;
+ self.logical_block_size = block_size;
+ Ok(self)
+ }
+
+ /// Set the physical block size of the device to be built.
+ ///
+ /// This method will check that block size is a power of two and between 512
+ /// and 4096. If not, an error is returned and the block size is not set.
+ ///
+ /// This is the smallest unit a physical storage device can write
+ /// atomically. It is usually the same as the logical block size but may be
+ /// bigger. One example is SATA drives with 4096 byte physical block size
+ /// that expose a 512 byte logical block size to the operating system.
+ pub fn physical_block_size(mut self, block_size: u32) -> Result<Self> {
+ Self::validate_block_size(block_size)?;
+ self.physical_block_size = block_size;
+ Ok(self)
+ }
+
+ /// Set the capacity of the device to be built, in sectors (512 bytes).
+ pub fn capacity_sectors(mut self, capacity: u64) -> Self {
+ self.capacity_sectors = capacity;
+ self
+ }
+
+ /// Build a new `GenDisk` and add it to the VFS.
+ pub fn build<T: Operations>(
+ self,
+ name: fmt::Arguments<'_>,
+ tagset: Arc<TagSet<T>>,
+ ) -> Result<GenDisk<T>> {
+ let lock_class_key = crate::sync::LockClassKey::new();
+
+ // SAFETY: `bindings::queue_limits` contain only fields that are valid when zeroed.
+ let mut lim: bindings::queue_limits = unsafe { core::mem::zeroed() };
+
+ lim.logical_block_size = self.logical_block_size;
+ lim.physical_block_size = self.physical_block_size;
+ if self.rotational {
+ lim.features = bindings::BLK_FEAT_ROTATIONAL;
+ }
+
+ // SAFETY: `tagset.raw_tag_set()` points to a valid and initialized tag set
+ let gendisk = from_err_ptr(unsafe {
+ bindings::__blk_mq_alloc_disk(
+ tagset.raw_tag_set(),
+ &mut lim,
+ core::ptr::null_mut(),
+ lock_class_key.as_ptr(),
+ )
+ })?;
+
+ const TABLE: bindings::block_device_operations = bindings::block_device_operations {
+ submit_bio: None,
+ open: None,
+ release: None,
+ ioctl: None,
+ compat_ioctl: None,
+ check_events: None,
+ unlock_native_capacity: None,
+ getgeo: None,
+ set_read_only: None,
+ swap_slot_free_notify: None,
+ report_zones: None,
+ devnode: None,
+ alternative_gpt_sector: None,
+ get_unique_id: None,
+ // TODO: Set to THIS_MODULE. Waiting for const_refs_to_static feature to
+ // be merged (unstable in rustc 1.78 which is staged for linux 6.10)
+ // https://github.com/rust-lang/rust/issues/119618
+ owner: core::ptr::null_mut(),
+ pr_ops: core::ptr::null_mut(),
+ free_disk: None,
+ poll_bio: None,
+ };
+
+ // SAFETY: `gendisk` is a valid pointer as we initialized it above
+ unsafe { (*gendisk).fops = &TABLE };
+
+ let mut raw_writer = RawWriter::from_array(
+ // SAFETY: `gendisk` points to a valid and initialized instance. We
+ // have exclusive access, since the disk is not added to the VFS
+ // yet.
+ unsafe { &mut (*gendisk).disk_name },
+ )?;
+ raw_writer.write_fmt(name)?;
+ raw_writer.write_char('\0')?;
+
+ // SAFETY: `gendisk` points to a valid and initialized instance of
+ // `struct gendisk`. `set_capacity` takes a lock to synchronize this
+ // operation, so we will not race.
+ unsafe { bindings::set_capacity(gendisk, self.capacity_sectors) };
+
+ crate::error::to_result(
+ // SAFETY: `gendisk` points to a valid and initialized instance of
+ // `struct gendisk`.
+ unsafe {
+ bindings::device_add_disk(core::ptr::null_mut(), gendisk, core::ptr::null_mut())
+ },
+ )?;
+
+ // INVARIANT: `gendisk` was initialized above.
+ // INVARIANT: `gendisk` was added to the VFS via `device_add_disk` above.
+ Ok(GenDisk {
+ _tagset: tagset,
+ gendisk,
+ })
+ }
+}
+
+/// A generic block device.
+///
+/// # Invariants
+///
+/// - `gendisk` must always point to an initialized and valid `struct gendisk`.
+/// - `gendisk` was added to the VFS through a call to
+/// `bindings::device_add_disk`.
+pub struct GenDisk<T: Operations> {
+ _tagset: Arc<TagSet<T>>,
+ gendisk: *mut bindings::gendisk,
+}
+
+// SAFETY: `GenDisk` is an owned pointer to a `struct gendisk` and an `Arc` to a
+// `TagSet` It is safe to send this to other threads as long as T is Send.
+unsafe impl<T: Operations + Send> Send for GenDisk<T> {}
+
+impl<T: Operations> Drop for GenDisk<T> {
+ fn drop(&mut self) {
+ // SAFETY: By type invariant, `self.gendisk` points to a valid and
+ // initialized instance of `struct gendisk`, and it was previously added
+ // to the VFS.
+ unsafe { bindings::del_gendisk(self.gendisk) };
+ }
+}
diff --git a/rust/kernel/block/mq/operations.rs b/rust/kernel/block/mq/operations.rs
new file mode 100644
index 000000000000..9ba7fdfeb4b2
--- /dev/null
+++ b/rust/kernel/block/mq/operations.rs
@@ -0,0 +1,245 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! This module provides an interface for blk-mq drivers to implement.
+//!
+//! C header: [`include/linux/blk-mq.h`](srctree/include/linux/blk-mq.h)
+
+use crate::{
+ bindings,
+ block::mq::request::RequestDataWrapper,
+ block::mq::Request,
+ error::{from_result, Result},
+ types::ARef,
+};
+use core::{marker::PhantomData, sync::atomic::AtomicU64, sync::atomic::Ordering};
+
+/// Implement this trait to interface blk-mq as block devices.
+///
+/// To implement a block device driver, implement this trait as described in the
+/// [module level documentation]. The kernel will use the implementation of the
+/// functions defined in this trait to interface a block device driver. Note:
+/// There is no need for an exit_request() implementation, because the `drop`
+/// implementation of the [`Request`] type will be invoked by automatically by
+/// the C/Rust glue logic.
+///
+/// [module level documentation]: kernel::block::mq
+#[macros::vtable]
+pub trait Operations: Sized {
+ /// Called by the kernel to queue a request with the driver. If `is_last` is
+ /// `false`, the driver is allowed to defer committing the request.
+ fn queue_rq(rq: ARef<Request<Self>>, is_last: bool) -> Result;
+
+ /// Called by the kernel to indicate that queued requests should be submitted.
+ fn commit_rqs();
+
+ /// Called by the kernel to poll the device for completed requests. Only
+ /// used for poll queues.
+ fn poll() -> bool {
+ crate::build_error(crate::error::VTABLE_DEFAULT_ERROR)
+ }
+}
+
+/// A vtable for blk-mq to interact with a block device driver.
+///
+/// A `bindings::blk_mq_ops` vtable is constructed from pointers to the `extern
+/// "C"` functions of this struct, exposed through the `OperationsVTable::VTABLE`.
+///
+/// For general documentation of these methods, see the kernel source
+/// documentation related to `struct blk_mq_operations` in
+/// [`include/linux/blk-mq.h`].
+///
+/// [`include/linux/blk-mq.h`]: srctree/include/linux/blk-mq.h
+pub(crate) struct OperationsVTable<T: Operations>(PhantomData<T>);
+
+impl<T: Operations> OperationsVTable<T> {
+ /// This function is called by the C kernel. A pointer to this function is
+ /// installed in the `blk_mq_ops` vtable for the driver.
+ ///
+ /// # Safety
+ ///
+ /// - The caller of this function must ensure that the pointee of `bd` is
+ /// valid for reads for the duration of this function.
+ /// - This function must be called for an initialized and live `hctx`. That
+ /// is, `Self::init_hctx_callback` was called and
+ /// `Self::exit_hctx_callback()` was not yet called.
+ /// - `(*bd).rq` must point to an initialized and live `bindings:request`.
+ /// That is, `Self::init_request_callback` was called but
+ /// `Self::exit_request_callback` was not yet called for the request.
+ /// - `(*bd).rq` must be owned by the driver. That is, the block layer must
+ /// promise to not access the request until the driver calls
+ /// `bindings::blk_mq_end_request` for the request.
+ unsafe extern "C" fn queue_rq_callback(
+ _hctx: *mut bindings::blk_mq_hw_ctx,
+ bd: *const bindings::blk_mq_queue_data,
+ ) -> bindings::blk_status_t {
+ // SAFETY: `bd.rq` is valid as required by the safety requirement for
+ // this function.
+ let request = unsafe { &*(*bd).rq.cast::<Request<T>>() };
+
+ // One refcount for the ARef, one for being in flight
+ request.wrapper_ref().refcount().store(2, Ordering::Relaxed);
+
+ // SAFETY:
+ // - We own a refcount that we took above. We pass that to `ARef`.
+ // - By the safety requirements of this function, `request` is a valid
+ // `struct request` and the private data is properly initialized.
+ // - `rq` will be alive until `blk_mq_end_request` is called and is
+ // reference counted by `ARef` until then.
+ let rq = unsafe { Request::aref_from_raw((*bd).rq) };
+
+ // SAFETY: We have exclusive access and we just set the refcount above.
+ unsafe { Request::start_unchecked(&rq) };
+
+ let ret = T::queue_rq(
+ rq,
+ // SAFETY: `bd` is valid as required by the safety requirement for
+ // this function.
+ unsafe { (*bd).last },
+ );
+
+ if let Err(e) = ret {
+ e.to_blk_status()
+ } else {
+ bindings::BLK_STS_OK as _
+ }
+ }
+
+ /// This function is called by the C kernel. A pointer to this function is
+ /// installed in the `blk_mq_ops` vtable for the driver.
+ ///
+ /// # Safety
+ ///
+ /// This function may only be called by blk-mq C infrastructure.
+ unsafe extern "C" fn commit_rqs_callback(_hctx: *mut bindings::blk_mq_hw_ctx) {
+ T::commit_rqs()
+ }
+
+ /// This function is called by the C kernel. It is not currently
+ /// implemented, and there is no way to exercise this code path.
+ ///
+ /// # Safety
+ ///
+ /// This function may only be called by blk-mq C infrastructure.
+ unsafe extern "C" fn complete_callback(_rq: *mut bindings::request) {}
+
+ /// This function is called by the C kernel. A pointer to this function is
+ /// installed in the `blk_mq_ops` vtable for the driver.
+ ///
+ /// # Safety
+ ///
+ /// This function may only be called by blk-mq C infrastructure.
+ unsafe extern "C" fn poll_callback(
+ _hctx: *mut bindings::blk_mq_hw_ctx,
+ _iob: *mut bindings::io_comp_batch,
+ ) -> core::ffi::c_int {
+ T::poll().into()
+ }
+
+ /// This function is called by the C kernel. A pointer to this function is
+ /// installed in the `blk_mq_ops` vtable for the driver.
+ ///
+ /// # Safety
+ ///
+ /// This function may only be called by blk-mq C infrastructure. This
+ /// function may only be called once before `exit_hctx_callback` is called
+ /// for the same context.
+ unsafe extern "C" fn init_hctx_callback(
+ _hctx: *mut bindings::blk_mq_hw_ctx,
+ _tagset_data: *mut core::ffi::c_void,
+ _hctx_idx: core::ffi::c_uint,
+ ) -> core::ffi::c_int {
+ from_result(|| Ok(0))
+ }
+
+ /// This function is called by the C kernel. A pointer to this function is
+ /// installed in the `blk_mq_ops` vtable for the driver.
+ ///
+ /// # Safety
+ ///
+ /// This function may only be called by blk-mq C infrastructure.
+ unsafe extern "C" fn exit_hctx_callback(
+ _hctx: *mut bindings::blk_mq_hw_ctx,
+ _hctx_idx: core::ffi::c_uint,
+ ) {
+ }
+
+ /// This function is called by the C kernel. A pointer to this function is
+ /// installed in the `blk_mq_ops` vtable for the driver.
+ ///
+ /// # Safety
+ ///
+ /// - This function may only be called by blk-mq C infrastructure.
+ /// - `_set` must point to an initialized `TagSet<T>`.
+ /// - `rq` must point to an initialized `bindings::request`.
+ /// - The allocation pointed to by `rq` must be at the size of `Request`
+ /// plus the size of `RequestDataWrapper`.
+ unsafe extern "C" fn init_request_callback(
+ _set: *mut bindings::blk_mq_tag_set,
+ rq: *mut bindings::request,
+ _hctx_idx: core::ffi::c_uint,
+ _numa_node: core::ffi::c_uint,
+ ) -> core::ffi::c_int {
+ from_result(|| {
+ // SAFETY: By the safety requirements of this function, `rq` points
+ // to a valid allocation.
+ let pdu = unsafe { Request::wrapper_ptr(rq.cast::<Request<T>>()) };
+
+ // SAFETY: The refcount field is allocated but not initialized, so
+ // it is valid for writes.
+ unsafe { RequestDataWrapper::refcount_ptr(pdu.as_ptr()).write(AtomicU64::new(0)) };
+
+ Ok(0)
+ })
+ }
+
+ /// This function is called by the C kernel. A pointer to this function is
+ /// installed in the `blk_mq_ops` vtable for the driver.
+ ///
+ /// # Safety
+ ///
+ /// - This function may only be called by blk-mq C infrastructure.
+ /// - `_set` must point to an initialized `TagSet<T>`.
+ /// - `rq` must point to an initialized and valid `Request`.
+ unsafe extern "C" fn exit_request_callback(
+ _set: *mut bindings::blk_mq_tag_set,
+ rq: *mut bindings::request,
+ _hctx_idx: core::ffi::c_uint,
+ ) {
+ // SAFETY: The tagset invariants guarantee that all requests are allocated with extra memory
+ // for the request data.
+ let pdu = unsafe { bindings::blk_mq_rq_to_pdu(rq) }.cast::<RequestDataWrapper>();
+
+ // SAFETY: `pdu` is valid for read and write and is properly initialised.
+ unsafe { core::ptr::drop_in_place(pdu) };
+ }
+
+ const VTABLE: bindings::blk_mq_ops = bindings::blk_mq_ops {
+ queue_rq: Some(Self::queue_rq_callback),
+ queue_rqs: None,
+ commit_rqs: Some(Self::commit_rqs_callback),
+ get_budget: None,
+ put_budget: None,
+ set_rq_budget_token: None,
+ get_rq_budget_token: None,
+ timeout: None,
+ poll: if T::HAS_POLL {
+ Some(Self::poll_callback)
+ } else {
+ None
+ },
+ complete: Some(Self::complete_callback),
+ init_hctx: Some(Self::init_hctx_callback),
+ exit_hctx: Some(Self::exit_hctx_callback),
+ init_request: Some(Self::init_request_callback),
+ exit_request: Some(Self::exit_request_callback),
+ cleanup_rq: None,
+ busy: None,
+ map_queues: None,
+ #[cfg(CONFIG_BLK_DEBUG_FS)]
+ show_rq: None,
+ };
+
+ pub(crate) const fn build() -> &'static bindings::blk_mq_ops {
+ &Self::VTABLE
+ }
+}
diff --git a/rust/kernel/block/mq/raw_writer.rs b/rust/kernel/block/mq/raw_writer.rs
new file mode 100644
index 000000000000..9222465d670b
--- /dev/null
+++ b/rust/kernel/block/mq/raw_writer.rs
@@ -0,0 +1,55 @@
+// SPDX-License-Identifier: GPL-2.0
+
+use core::fmt::{self, Write};
+
+use crate::error::Result;
+use crate::prelude::EINVAL;
+
+/// A mutable reference to a byte buffer where a string can be written into.
+///
+/// # Invariants
+///
+/// `buffer` is always null terminated.
+pub(crate) struct RawWriter<'a> {
+ buffer: &'a mut [u8],
+ pos: usize,
+}
+
+impl<'a> RawWriter<'a> {
+ /// Create a new `RawWriter` instance.
+ fn new(buffer: &'a mut [u8]) -> Result<RawWriter<'a>> {
+ *(buffer.last_mut().ok_or(EINVAL)?) = 0;
+
+ // INVARIANT: We null terminated the buffer above.
+ Ok(Self { buffer, pos: 0 })
+ }
+
+ pub(crate) fn from_array<const N: usize>(
+ a: &'a mut [core::ffi::c_char; N],
+ ) -> Result<RawWriter<'a>> {
+ Self::new(
+ // SAFETY: the buffer of `a` is valid for read and write as `u8` for
+ // at least `N` bytes.
+ unsafe { core::slice::from_raw_parts_mut(a.as_mut_ptr().cast::<u8>(), N) },
+ )
+ }
+}
+
+impl Write for RawWriter<'_> {
+ fn write_str(&mut self, s: &str) -> fmt::Result {
+ let bytes = s.as_bytes();
+ let len = bytes.len();
+
+ // We do not want to overwrite our null terminator
+ if self.pos + len > self.buffer.len() - 1 {
+ return Err(fmt::Error);
+ }
+
+ // INVARIANT: We are not overwriting the last byte
+ self.buffer[self.pos..self.pos + len].copy_from_slice(bytes);
+
+ self.pos += len;
+
+ Ok(())
+ }
+}
diff --git a/rust/kernel/block/mq/request.rs b/rust/kernel/block/mq/request.rs
new file mode 100644
index 000000000000..a0e22827f3f4
--- /dev/null
+++ b/rust/kernel/block/mq/request.rs
@@ -0,0 +1,253 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! This module provides a wrapper for the C `struct request` type.
+//!
+//! C header: [`include/linux/blk-mq.h`](srctree/include/linux/blk-mq.h)
+
+use crate::{
+ bindings,
+ block::mq::Operations,
+ error::Result,
+ types::{ARef, AlwaysRefCounted, Opaque},
+};
+use core::{
+ marker::PhantomData,
+ ptr::{addr_of_mut, NonNull},
+ sync::atomic::{AtomicU64, Ordering},
+};
+
+/// A wrapper around a blk-mq `struct request`. This represents an IO request.
+///
+/// # Implementation details
+///
+/// There are four states for a request that the Rust bindings care about:
+///
+/// A) Request is owned by block layer (refcount 0)
+/// B) Request is owned by driver but with zero `ARef`s in existence
+/// (refcount 1)
+/// C) Request is owned by driver with exactly one `ARef` in existence
+/// (refcount 2)
+/// D) Request is owned by driver with more than one `ARef` in existence
+/// (refcount > 2)
+///
+///
+/// We need to track A and B to ensure we fail tag to request conversions for
+/// requests that are not owned by the driver.
+///
+/// We need to track C and D to ensure that it is safe to end the request and hand
+/// back ownership to the block layer.
+///
+/// The states are tracked through the private `refcount` field of
+/// `RequestDataWrapper`. This structure lives in the private data area of the C
+/// `struct request`.
+///
+/// # Invariants
+///
+/// * `self.0` is a valid `struct request` created by the C portion of the kernel.
+/// * The private data area associated with this request must be an initialized
+/// and valid `RequestDataWrapper<T>`.
+/// * `self` is reference counted by atomic modification of
+/// self.wrapper_ref().refcount().
+///
+#[repr(transparent)]
+pub struct Request<T: Operations>(Opaque<bindings::request>, PhantomData<T>);
+
+impl<T: Operations> Request<T> {
+ /// Create an `ARef<Request>` from a `struct request` pointer.
+ ///
+ /// # Safety
+ ///
+ /// * The caller must own a refcount on `ptr` that is transferred to the
+ /// returned `ARef`.
+ /// * The type invariants for `Request` must hold for the pointee of `ptr`.
+ pub(crate) unsafe fn aref_from_raw(ptr: *mut bindings::request) -> ARef<Self> {
+ // INVARIANT: By the safety requirements of this function, invariants are upheld.
+ // SAFETY: By the safety requirement of this function, we own a
+ // reference count that we can pass to `ARef`.
+ unsafe { ARef::from_raw(NonNull::new_unchecked(ptr as *const Self as *mut Self)) }
+ }
+
+ /// Notify the block layer that a request is going to be processed now.
+ ///
+ /// The block layer uses this hook to do proper initializations such as
+ /// starting the timeout timer. It is a requirement that block device
+ /// drivers call this function when starting to process a request.
+ ///
+ /// # Safety
+ ///
+ /// The caller must have exclusive ownership of `self`, that is
+ /// `self.wrapper_ref().refcount() == 2`.
+ pub(crate) unsafe fn start_unchecked(this: &ARef<Self>) {
+ // SAFETY: By type invariant, `self.0` is a valid `struct request` and
+ // we have exclusive access.
+ unsafe { bindings::blk_mq_start_request(this.0.get()) };
+ }
+
+ /// Try to take exclusive ownership of `this` by dropping the refcount to 0.
+ /// This fails if `this` is not the only `ARef` pointing to the underlying
+ /// `Request`.
+ ///
+ /// If the operation is successful, `Ok` is returned with a pointer to the
+ /// C `struct request`. If the operation fails, `this` is returned in the
+ /// `Err` variant.
+ fn try_set_end(this: ARef<Self>) -> Result<*mut bindings::request, ARef<Self>> {
+ // We can race with `TagSet::tag_to_rq`
+ if let Err(_old) = this.wrapper_ref().refcount().compare_exchange(
+ 2,
+ 0,
+ Ordering::Relaxed,
+ Ordering::Relaxed,
+ ) {
+ return Err(this);
+ }
+
+ let request_ptr = this.0.get();
+ core::mem::forget(this);
+
+ Ok(request_ptr)
+ }
+
+ /// Notify the block layer that the request has been completed without errors.
+ ///
+ /// This function will return `Err` if `this` is not the only `ARef`
+ /// referencing the request.
+ pub fn end_ok(this: ARef<Self>) -> Result<(), ARef<Self>> {
+ let request_ptr = Self::try_set_end(this)?;
+
+ // SAFETY: By type invariant, `this.0` was a valid `struct request`. The
+ // success of the call to `try_set_end` guarantees that there are no
+ // `ARef`s pointing to this request. Therefore it is safe to hand it
+ // back to the block layer.
+ unsafe { bindings::blk_mq_end_request(request_ptr, bindings::BLK_STS_OK as _) };
+
+ Ok(())
+ }
+
+ /// Return a pointer to the `RequestDataWrapper` stored in the private area
+ /// of the request structure.
+ ///
+ /// # Safety
+ ///
+ /// - `this` must point to a valid allocation of size at least size of
+ /// `Self` plus size of `RequestDataWrapper`.
+ pub(crate) unsafe fn wrapper_ptr(this: *mut Self) -> NonNull<RequestDataWrapper> {
+ let request_ptr = this.cast::<bindings::request>();
+ // SAFETY: By safety requirements for this function, `this` is a
+ // valid allocation.
+ let wrapper_ptr =
+ unsafe { bindings::blk_mq_rq_to_pdu(request_ptr).cast::<RequestDataWrapper>() };
+ // SAFETY: By C API contract, wrapper_ptr points to a valid allocation
+ // and is not null.
+ unsafe { NonNull::new_unchecked(wrapper_ptr) }
+ }
+
+ /// Return a reference to the `RequestDataWrapper` stored in the private
+ /// area of the request structure.
+ pub(crate) fn wrapper_ref(&self) -> &RequestDataWrapper {
+ // SAFETY: By type invariant, `self.0` is a valid allocation. Further,
+ // the private data associated with this request is initialized and
+ // valid. The existence of `&self` guarantees that the private data is
+ // valid as a shared reference.
+ unsafe { Self::wrapper_ptr(self as *const Self as *mut Self).as_ref() }
+ }
+}
+
+/// A wrapper around data stored in the private area of the C `struct request`.
+pub(crate) struct RequestDataWrapper {
+ /// The Rust request refcount has the following states:
+ ///
+ /// - 0: The request is owned by C block layer.
+ /// - 1: The request is owned by Rust abstractions but there are no ARef references to it.
+ /// - 2+: There are `ARef` references to the request.
+ refcount: AtomicU64,
+}
+
+impl RequestDataWrapper {
+ /// Return a reference to the refcount of the request that is embedding
+ /// `self`.
+ pub(crate) fn refcount(&self) -> &AtomicU64 {
+ &self.refcount
+ }
+
+ /// Return a pointer to the refcount of the request that is embedding the
+ /// pointee of `this`.
+ ///
+ /// # Safety
+ ///
+ /// - `this` must point to a live allocation of at least the size of `Self`.
+ pub(crate) unsafe fn refcount_ptr(this: *mut Self) -> *mut AtomicU64 {
+ // SAFETY: Because of the safety requirements of this function, the
+ // field projection is safe.
+ unsafe { addr_of_mut!((*this).refcount) }
+ }
+}
+
+// SAFETY: Exclusive access is thread-safe for `Request`. `Request` has no `&mut
+// self` methods and `&self` methods that mutate `self` are internally
+// synchronized.
+unsafe impl<T: Operations> Send for Request<T> {}
+
+// SAFETY: Shared access is thread-safe for `Request`. `&self` methods that
+// mutate `self` are internally synchronized`
+unsafe impl<T: Operations> Sync for Request<T> {}
+
+/// Store the result of `op(target.load())` in target, returning new value of
+/// target.
+fn atomic_relaxed_op_return(target: &AtomicU64, op: impl Fn(u64) -> u64) -> u64 {
+ let old = target.fetch_update(Ordering::Relaxed, Ordering::Relaxed, |x| Some(op(x)));
+
+ // SAFETY: Because the operation passed to `fetch_update` above always
+ // return `Some`, `old` will always be `Ok`.
+ let old = unsafe { old.unwrap_unchecked() };
+
+ op(old)
+}
+
+/// Store the result of `op(target.load)` in `target` if `target.load() !=
+/// pred`, returning true if the target was updated.
+fn atomic_relaxed_op_unless(target: &AtomicU64, op: impl Fn(u64) -> u64, pred: u64) -> bool {
+ target
+ .fetch_update(Ordering::Relaxed, Ordering::Relaxed, |x| {
+ if x == pred {
+ None
+ } else {
+ Some(op(x))
+ }
+ })
+ .is_ok()
+}
+
+// SAFETY: All instances of `Request<T>` are reference counted. This
+// implementation of `AlwaysRefCounted` ensure that increments to the ref count
+// keeps the object alive in memory at least until a matching reference count
+// decrement is executed.
+unsafe impl<T: Operations> AlwaysRefCounted for Request<T> {
+ fn inc_ref(&self) {
+ let refcount = &self.wrapper_ref().refcount();
+
+ #[cfg_attr(not(CONFIG_DEBUG_MISC), allow(unused_variables))]
+ let updated = atomic_relaxed_op_unless(refcount, |x| x + 1, 0);
+
+ #[cfg(CONFIG_DEBUG_MISC)]
+ if !updated {
+ panic!("Request refcount zero on clone")
+ }
+ }
+
+ unsafe fn dec_ref(obj: core::ptr::NonNull<Self>) {
+ // SAFETY: The type invariants of `ARef` guarantee that `obj` is valid
+ // for read.
+ let wrapper_ptr = unsafe { Self::wrapper_ptr(obj.as_ptr()).as_ptr() };
+ // SAFETY: The type invariant of `Request` guarantees that the private
+ // data area is initialized and valid.
+ let refcount = unsafe { &*RequestDataWrapper::refcount_ptr(wrapper_ptr) };
+
+ #[cfg_attr(not(CONFIG_DEBUG_MISC), allow(unused_variables))]
+ let new_refcount = atomic_relaxed_op_return(refcount, |x| x - 1);
+
+ #[cfg(CONFIG_DEBUG_MISC)]
+ if new_refcount == 0 {
+ panic!("Request reached refcount zero in Rust abstractions");
+ }
+ }
+}
diff --git a/rust/kernel/block/mq/tag_set.rs b/rust/kernel/block/mq/tag_set.rs
new file mode 100644
index 000000000000..f9a1ca655a35
--- /dev/null
+++ b/rust/kernel/block/mq/tag_set.rs
@@ -0,0 +1,86 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! This module provides the `TagSet` struct to wrap the C `struct blk_mq_tag_set`.
+//!
+//! C header: [`include/linux/blk-mq.h`](srctree/include/linux/blk-mq.h)
+
+use core::pin::Pin;
+
+use crate::{
+ bindings,
+ block::mq::{operations::OperationsVTable, request::RequestDataWrapper, Operations},
+ error,
+ prelude::PinInit,
+ try_pin_init,
+ types::Opaque,
+};
+use core::{convert::TryInto, marker::PhantomData};
+use macros::{pin_data, pinned_drop};
+
+/// A wrapper for the C `struct blk_mq_tag_set`.
+///
+/// `struct blk_mq_tag_set` contains a `struct list_head` and so must be pinned.
+///
+/// # Invariants
+///
+/// - `inner` is initialized and valid.
+#[pin_data(PinnedDrop)]
+#[repr(transparent)]
+pub struct TagSet<T: Operations> {
+ #[pin]
+ inner: Opaque<bindings::blk_mq_tag_set>,
+ _p: PhantomData<T>,
+}
+
+impl<T: Operations> TagSet<T> {
+ /// Try to create a new tag set
+ pub fn new(
+ nr_hw_queues: u32,
+ num_tags: u32,
+ num_maps: u32,
+ ) -> impl PinInit<Self, error::Error> {
+ // SAFETY: `blk_mq_tag_set` only contains integers and pointers, which
+ // all are allowed to be 0.
+ let tag_set: bindings::blk_mq_tag_set = unsafe { core::mem::zeroed() };
+ let tag_set = core::mem::size_of::<RequestDataWrapper>()
+ .try_into()
+ .map(|cmd_size| {
+ bindings::blk_mq_tag_set {
+ ops: OperationsVTable::<T>::build(),
+ nr_hw_queues,
+ timeout: 0, // 0 means default which is 30Hz in C
+ numa_node: bindings::NUMA_NO_NODE,
+ queue_depth: num_tags,
+ cmd_size,
+ flags: bindings::BLK_MQ_F_SHOULD_MERGE,
+ driver_data: core::ptr::null_mut::<core::ffi::c_void>(),
+ nr_maps: num_maps,
+ ..tag_set
+ }
+ });
+
+ try_pin_init!(TagSet {
+ inner <- PinInit::<_, error::Error>::pin_chain(Opaque::new(tag_set?), |tag_set| {
+ // SAFETY: we do not move out of `tag_set`.
+ let tag_set = unsafe { Pin::get_unchecked_mut(tag_set) };
+ // SAFETY: `tag_set` is a reference to an initialized `blk_mq_tag_set`.
+ error::to_result( unsafe { bindings::blk_mq_alloc_tag_set(tag_set.get())})
+ }),
+ _p: PhantomData,
+ })
+ }
+
+ /// Return the pointer to the wrapped `struct blk_mq_tag_set`
+ pub(crate) fn raw_tag_set(&self) -> *mut bindings::blk_mq_tag_set {
+ self.inner.get()
+ }
+}
+
+#[pinned_drop]
+impl<T: Operations> PinnedDrop for TagSet<T> {
+ fn drop(self: Pin<&mut Self>) {
+ // SAFETY: By type invariant `inner` is valid and has been properly
+ // initialized during construction.
+ unsafe { bindings::blk_mq_free_tag_set(self.inner.get()) };
+ }
+}
diff --git a/rust/kernel/error.rs b/rust/kernel/error.rs
index 55280ae9fe40..145f5c397009 100644
--- a/rust/kernel/error.rs
+++ b/rust/kernel/error.rs
@@ -126,6 +126,12 @@ impl Error {
self.0
}
+ #[cfg(CONFIG_BLOCK)]
+ pub(crate) fn to_blk_status(self) -> bindings::blk_status_t {
+ // SAFETY: `self.0` is a valid error due to its invariant.
+ unsafe { bindings::errno_to_blk_status(self.0) }
+ }
+
/// Returns the error encoded as a pointer.
#[allow(dead_code)]
pub(crate) fn to_ptr<T>(self) -> *mut T {
diff --git a/rust/kernel/lib.rs b/rust/kernel/lib.rs
index fbd91a48ff8b..2cf7c6b6f66b 100644
--- a/rust/kernel/lib.rs
+++ b/rust/kernel/lib.rs
@@ -27,6 +27,8 @@ compile_error!("Missing kernel configuration for conditional compilation");
extern crate self as kernel;
pub mod alloc;
+#[cfg(CONFIG_BLOCK)]
+pub mod block;
mod build_assert;
pub mod error;
pub mod init;
diff --git a/samples/bpf/cpustat_kern.c b/samples/bpf/cpustat_kern.c
index 944f13fe164a..7ec7143e2757 100644
--- a/samples/bpf/cpustat_kern.c
+++ b/samples/bpf/cpustat_kern.c
@@ -211,7 +211,7 @@ int bpf_prog1(struct cpu_args *ctx)
SEC("tracepoint/power/cpu_frequency")
int bpf_prog2(struct cpu_args *ctx)
{
- u64 *pts, *cstate, *pstate, prev_state, cur_ts, delta;
+ u64 *pts, *cstate, *pstate, cur_ts, delta;
u32 key, cpu, pstate_idx;
u64 *val;
@@ -232,7 +232,6 @@ int bpf_prog2(struct cpu_args *ctx)
if (!cstate)
return 0;
- prev_state = *pstate;
*pstate = ctx->state;
if (!*pts) {
diff --git a/samples/hid/Makefile b/samples/hid/Makefile
index c128ccd49974..8ea59e9631a3 100644
--- a/samples/hid/Makefile
+++ b/samples/hid/Makefile
@@ -16,7 +16,6 @@ LIBBPF_DESTDIR = $(LIBBPF_OUTPUT)
LIBBPF_INCLUDE = $(LIBBPF_DESTDIR)/include
LIBBPF = $(LIBBPF_OUTPUT)/libbpf.a
-EXTRA_HEADERS := hid_bpf_attach.h
EXTRA_BPF_HEADERS := hid_bpf_helpers.h
hid_mouse-objs := hid_mouse.o
@@ -207,8 +206,8 @@ $(obj)/%.bpf.o: $(src)/%.bpf.c $(EXTRA_BPF_HEADERS_SRC) $(obj)/vmlinux.h
LINKED_SKELS := hid_mouse.skel.h hid_surface_dial.skel.h
clean-files += $(LINKED_SKELS)
-hid_mouse.skel.h-deps := hid_mouse.bpf.o hid_bpf_attach.bpf.o
-hid_surface_dial.skel.h-deps := hid_surface_dial.bpf.o hid_bpf_attach.bpf.o
+hid_mouse.skel.h-deps := hid_mouse.bpf.o
+hid_surface_dial.skel.h-deps := hid_surface_dial.bpf.o
LINKED_BPF_SRCS := $(patsubst %.bpf.o,%.bpf.c,$(foreach skel,$(LINKED_SKELS),$($(skel)-deps)))
diff --git a/samples/hid/hid_bpf_attach.bpf.c b/samples/hid/hid_bpf_attach.bpf.c
deleted file mode 100644
index d4dce4ea7c6e..000000000000
--- a/samples/hid/hid_bpf_attach.bpf.c
+++ /dev/null
@@ -1,18 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (c) 2022 Benjamin Tissoires
- */
-
-#include "vmlinux.h"
-#include <bpf/bpf_helpers.h>
-#include <bpf/bpf_tracing.h>
-#include "hid_bpf_attach.h"
-#include "hid_bpf_helpers.h"
-
-SEC("syscall")
-int attach_prog(struct attach_prog_args *ctx)
-{
- ctx->retval = hid_bpf_attach_prog(ctx->hid,
- ctx->prog_fd,
- 0);
- return 0;
-}
diff --git a/samples/hid/hid_bpf_attach.h b/samples/hid/hid_bpf_attach.h
deleted file mode 100644
index 35bb28b49264..000000000000
--- a/samples/hid/hid_bpf_attach.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/* Copyright (c) 2022 Benjamin Tissoires
- */
-
-#ifndef __HID_BPF_ATTACH_H
-#define __HID_BPF_ATTACH_H
-
-struct attach_prog_args {
- int prog_fd;
- unsigned int hid;
- int retval;
-};
-
-#endif /* __HID_BPF_ATTACH_H */
diff --git a/samples/hid/hid_mouse.bpf.c b/samples/hid/hid_mouse.bpf.c
index 7c8b453ccb16..f7f722dcf56d 100644
--- a/samples/hid/hid_mouse.bpf.c
+++ b/samples/hid/hid_mouse.bpf.c
@@ -5,8 +5,7 @@
#include <bpf/bpf_tracing.h>
#include "hid_bpf_helpers.h"
-SEC("fmod_ret/hid_bpf_device_event")
-int BPF_PROG(hid_y_event, struct hid_bpf_ctx *hctx)
+static int hid_y_event(struct hid_bpf_ctx *hctx)
{
s16 y;
__u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 9 /* size */);
@@ -51,8 +50,7 @@ int BPF_PROG(hid_y_event, struct hid_bpf_ctx *hctx)
return 0;
}
-SEC("fmod_ret/hid_bpf_device_event")
-int BPF_PROG(hid_x_event, struct hid_bpf_ctx *hctx)
+static int hid_x_event(struct hid_bpf_ctx *hctx)
{
s16 x;
__u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 9 /* size */);
@@ -69,7 +67,19 @@ int BPF_PROG(hid_x_event, struct hid_bpf_ctx *hctx)
return 0;
}
-SEC("fmod_ret/hid_bpf_rdesc_fixup")
+SEC("struct_ops/hid_device_event")
+int BPF_PROG(hid_event, struct hid_bpf_ctx *hctx, enum hid_report_type type)
+{
+ int ret = hid_y_event(hctx);
+
+ if (ret)
+ return ret;
+
+ return hid_x_event(hctx);
+}
+
+
+SEC("struct_ops/hid_rdesc_fixup")
int BPF_PROG(hid_rdesc_fixup, struct hid_bpf_ctx *hctx)
{
__u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 4096 /* size */);
@@ -109,4 +119,10 @@ int BPF_PROG(hid_rdesc_fixup, struct hid_bpf_ctx *hctx)
return 0;
}
+SEC(".struct_ops.link")
+struct hid_bpf_ops mouse_invert = {
+ .hid_rdesc_fixup = (void *)hid_rdesc_fixup,
+ .hid_device_event = (void *)hid_event,
+};
+
char _license[] SEC("license") = "GPL";
diff --git a/samples/hid/hid_mouse.c b/samples/hid/hid_mouse.c
index 018f1185f203..4b80d4e4c154 100644
--- a/samples/hid/hid_mouse.c
+++ b/samples/hid/hid_mouse.c
@@ -29,7 +29,6 @@
#include <bpf/libbpf.h>
#include "hid_mouse.skel.h"
-#include "hid_bpf_attach.h"
static bool running = true;
@@ -76,18 +75,11 @@ static int get_hid_id(const char *path)
int main(int argc, char **argv)
{
struct hid_mouse *skel;
- struct bpf_program *prog;
+ struct bpf_link *link;
int err;
const char *optstr = "";
const char *sysfs_path;
- int opt, hid_id, attach_fd;
- struct attach_prog_args args = {
- .retval = -1,
- };
- DECLARE_LIBBPF_OPTS(bpf_test_run_opts, tattr,
- .ctx_in = &args,
- .ctx_size_in = sizeof(args),
- );
+ int opt, hid_id;
while ((opt = getopt(argc, argv, optstr)) != -1) {
switch (opt) {
@@ -108,7 +100,7 @@ int main(int argc, char **argv)
return 1;
}
- skel = hid_mouse__open_and_load();
+ skel = hid_mouse__open();
if (!skel) {
fprintf(stderr, "%s %s:%d", __func__, __FILE__, __LINE__);
return -1;
@@ -120,27 +112,18 @@ int main(int argc, char **argv)
fprintf(stderr, "can not open HID device: %m\n");
return 1;
}
- args.hid = hid_id;
+ skel->struct_ops.mouse_invert->hid_id = hid_id;
- attach_fd = bpf_program__fd(skel->progs.attach_prog);
- if (attach_fd < 0) {
- fprintf(stderr, "can't locate attach prog: %m\n");
+ err = hid_mouse__load(skel);
+ if (err < 0) {
+ fprintf(stderr, "can not load HID-BPF program: %m\n");
return 1;
}
- bpf_object__for_each_program(prog, *skel->skeleton->obj) {
- /* ignore syscalls */
- if (bpf_program__get_type(prog) != BPF_PROG_TYPE_TRACING)
- continue;
-
- args.retval = -1;
- args.prog_fd = bpf_program__fd(prog);
- err = bpf_prog_test_run_opts(attach_fd, &tattr);
- if (err) {
- fprintf(stderr, "can't attach prog to hid device %d: %m (err: %d)\n",
- hid_id, err);
- return 1;
- }
+ link = bpf_map__attach_struct_ops(skel->maps.mouse_invert);
+ if (!link) {
+ fprintf(stderr, "can not attach HID-BPF program: %m\n");
+ return 1;
}
signal(SIGINT, int_exit);
diff --git a/samples/hid/hid_surface_dial.bpf.c b/samples/hid/hid_surface_dial.bpf.c
index 1f80478c0918..527d584812ab 100644
--- a/samples/hid/hid_surface_dial.bpf.c
+++ b/samples/hid/hid_surface_dial.bpf.c
@@ -10,7 +10,7 @@
#define HID_UP_BUTTON 0x0009
#define HID_GD_WHEEL 0x0038
-SEC("fmod_ret/hid_bpf_device_event")
+SEC("struct_ops/hid_device_event")
int BPF_PROG(hid_event, struct hid_bpf_ctx *hctx)
{
__u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 9 /* size */);
@@ -101,7 +101,7 @@ int set_haptic(struct haptic_syscall_args *args)
}
/* Convert REL_DIAL into REL_WHEEL */
-SEC("fmod_ret/hid_bpf_rdesc_fixup")
+SEC("struct_ops/hid_rdesc_fixup")
int BPF_PROG(hid_rdesc_fixup, struct hid_bpf_ctx *hctx)
{
__u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 4096 /* size */);
@@ -130,5 +130,11 @@ int BPF_PROG(hid_rdesc_fixup, struct hid_bpf_ctx *hctx)
return 0;
}
+SEC(".struct_ops.link")
+struct hid_bpf_ops surface_dial = {
+ .hid_rdesc_fixup = (void *)hid_rdesc_fixup,
+ .hid_device_event = (void *)hid_event,
+};
+
char _license[] SEC("license") = "GPL";
u32 _version SEC("version") = 1;
diff --git a/samples/hid/hid_surface_dial.c b/samples/hid/hid_surface_dial.c
index 4bc97373a708..9dd363845a85 100644
--- a/samples/hid/hid_surface_dial.c
+++ b/samples/hid/hid_surface_dial.c
@@ -31,7 +31,6 @@
#include <bpf/libbpf.h>
#include "hid_surface_dial.skel.h"
-#include "hid_bpf_attach.h"
static bool running = true;
@@ -86,34 +85,6 @@ static int get_hid_id(const char *path)
return (int)strtol(str_id, NULL, 16);
}
-static int attach_prog(struct hid_surface_dial *skel, struct bpf_program *prog, int hid_id)
-{
- struct attach_prog_args args = {
- .hid = hid_id,
- .retval = -1,
- };
- int attach_fd, err;
- DECLARE_LIBBPF_OPTS(bpf_test_run_opts, tattr,
- .ctx_in = &args,
- .ctx_size_in = sizeof(args),
- );
-
- attach_fd = bpf_program__fd(skel->progs.attach_prog);
- if (attach_fd < 0) {
- fprintf(stderr, "can't locate attach prog: %m\n");
- return 1;
- }
-
- args.prog_fd = bpf_program__fd(prog);
- err = bpf_prog_test_run_opts(attach_fd, &tattr);
- if (err) {
- fprintf(stderr, "can't attach prog to hid device %d: %m (err: %d)\n",
- hid_id, err);
- return 1;
- }
- return 0;
-}
-
static int set_haptic(struct hid_surface_dial *skel, int hid_id)
{
struct haptic_syscall_args args = {
@@ -144,10 +115,10 @@ static int set_haptic(struct hid_surface_dial *skel, int hid_id)
int main(int argc, char **argv)
{
struct hid_surface_dial *skel;
- struct bpf_program *prog;
const char *optstr = "r:";
+ struct bpf_link *link;
const char *sysfs_path;
- int opt, hid_id, resolution = 72;
+ int err, opt, hid_id, resolution = 72;
while ((opt = getopt(argc, argv, optstr)) != -1) {
switch (opt) {
@@ -189,7 +160,7 @@ int main(int argc, char **argv)
return 1;
}
- skel = hid_surface_dial__open_and_load();
+ skel = hid_surface_dial__open();
if (!skel) {
fprintf(stderr, "%s %s:%d", __func__, __FILE__, __LINE__);
return -1;
@@ -201,15 +172,21 @@ int main(int argc, char **argv)
return 1;
}
+ skel->struct_ops.surface_dial->hid_id = hid_id;
+
+ err = hid_surface_dial__load(skel);
+ if (err < 0) {
+ fprintf(stderr, "can not load HID-BPF program: %m\n");
+ return 1;
+ }
+
skel->data->resolution = resolution;
skel->data->physical = (int)(resolution / 72);
- bpf_object__for_each_program(prog, *skel->skeleton->obj) {
- /* ignore syscalls */
- if (bpf_program__get_type(prog) != BPF_PROG_TYPE_TRACING)
- continue;
-
- attach_prog(skel, prog, hid_id);
+ link = bpf_map__attach_struct_ops(skel->maps.surface_dial);
+ if (!link) {
+ fprintf(stderr, "can not attach HID-BPF program: %m\n");
+ return 1;
}
signal(SIGINT, int_exit);
diff --git a/scripts/Kconfig.include b/scripts/Kconfig.include
index 3ee8ecfb8c04..3500a3d62f0d 100644
--- a/scripts/Kconfig.include
+++ b/scripts/Kconfig.include
@@ -33,7 +33,8 @@ ld-option = $(success,$(LD) -v $(1))
# $(as-instr,<instr>)
# Return y if the assembler supports <instr>, n otherwise
-as-instr = $(success,printf "%b\n" "$(1)" | $(CC) $(CLANG_FLAGS) -Wa$(comma)--fatal-warnings -c -x assembler-with-cpp -o /dev/null -)
+as-instr = $(success,printf "%b\n" "$(1)" | $(CC) $(CLANG_FLAGS) $(2) -Wa$(comma)--fatal-warnings -c -x assembler-with-cpp -o /dev/null -)
+as-instr64 = $(as-instr,$(1),$(m64-flag))
# check if $(CC) and $(LD) exist
$(error-if,$(failure,command -v $(CC)),C compiler '$(CC)' not found)
diff --git a/scripts/Makefile.asm-generic b/scripts/Makefile.asm-generic
deleted file mode 100644
index 1486abf34c7c..000000000000
--- a/scripts/Makefile.asm-generic
+++ /dev/null
@@ -1,58 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-# include/asm-generic contains a lot of files that are used
-# verbatim by several architectures.
-#
-# This Makefile reads the file arch/$(SRCARCH)/include/(uapi/)/asm/Kbuild
-# and for each file listed in this file with generic-y creates
-# a small wrapper file in arch/$(SRCARCH)/include/generated/(uapi/)/asm.
-
-PHONY := all
-all:
-
-src := $(srctree)/$(subst /generated,,$(obj))
-
-include $(srctree)/scripts/Kbuild.include
--include $(kbuild-file)
-
-# $(generic)/Kbuild lists mandatory-y. Exclude um since it is a special case.
-ifneq ($(SRCARCH),um)
-include $(srctree)/$(generic)/Kbuild
-endif
-
-redundant := $(filter $(mandatory-y) $(generated-y), $(generic-y))
-redundant += $(foreach f, $(generic-y), $(if $(wildcard $(src)/$(f)),$(f)))
-redundant := $(sort $(redundant))
-$(if $(redundant),\
- $(warning redundant generic-y found in $(src)/Kbuild: $(redundant)))
-
-# If arch does not implement mandatory headers, fallback to asm-generic ones.
-mandatory-y := $(filter-out $(generated-y), $(mandatory-y))
-generic-y += $(foreach f, $(mandatory-y), $(if $(wildcard $(src)/$(f)),,$(f)))
-
-generic-y := $(addprefix $(obj)/, $(generic-y))
-generated-y := $(addprefix $(obj)/, $(generated-y))
-
-# Remove stale wrappers when the corresponding files are removed from generic-y
-old-headers := $(wildcard $(obj)/*.h)
-unwanted := $(filter-out $(generic-y) $(generated-y),$(old-headers))
-
-quiet_cmd_wrap = WRAP $@
- cmd_wrap = echo "\#include <asm-generic/$*.h>" > $@
-
-quiet_cmd_remove = REMOVE $(unwanted)
- cmd_remove = rm -f $(unwanted)
-
-all: $(generic-y)
- $(if $(unwanted),$(call cmd,remove))
- @:
-
-$(obj)/%.h:
- $(call cmd,wrap)
-
-# Create output directory. Skip it if at least one old header exists
-# since we know the output directory already exists.
-ifeq ($(old-headers),)
-$(shell mkdir -p $(obj))
-endif
-
-.PHONY: $(PHONY)
diff --git a/scripts/Makefile.asm-headers b/scripts/Makefile.asm-headers
new file mode 100644
index 000000000000..6b8e8318e810
--- /dev/null
+++ b/scripts/Makefile.asm-headers
@@ -0,0 +1,98 @@
+# SPDX-License-Identifier: GPL-2.0
+# include/asm-generic contains a lot of files that are used
+# verbatim by several architectures.
+#
+# This Makefile generates arch/$(SRCARCH)/include/generated/(uapi/)/asm
+# headers from multiple sources:
+# - a small wrapper to include the corresponding asm-generic/*.h
+# is generated for each file listed as generic-y
+# - uapi/asm/unistd_*.h files listed as syscalls-y are generated from
+# syscall.tbl with the __NR_* macros
+# - Corresponding asm/syscall_table_*.h are generated from the same input
+
+PHONY := all
+all:
+
+src := $(srctree)/$(subst /generated,,$(obj))
+
+syscall_abis_32 += common,32
+syscall_abis_64 += common,64
+syscalltbl := $(srctree)/scripts/syscall.tbl
+syshdr-args := --emit-nr
+
+# let architectures override $(syscall_abis_%) and $(syscalltbl)
+-include $(srctree)/arch/$(SRCARCH)/kernel/Makefile.syscalls
+include $(srctree)/scripts/Kbuild.include
+-include $(kbuild-file)
+
+syshdr := $(srctree)/scripts/syscallhdr.sh
+systbl := $(srctree)/scripts/syscalltbl.sh
+
+# $(generic)/Kbuild lists mandatory-y. Exclude um since it is a special case.
+ifneq ($(SRCARCH),um)
+include $(srctree)/$(generic)/Kbuild
+endif
+
+redundant := $(filter $(mandatory-y) $(generated-y), $(generic-y))
+redundant += $(foreach f, $(generic-y), $(if $(wildcard $(src)/$(f)),$(f)))
+redundant := $(sort $(redundant))
+$(if $(redundant),\
+ $(warning redundant generic-y found in $(src)/Kbuild: $(redundant)))
+
+# If arch does not implement mandatory headers, fallback to asm-generic ones.
+mandatory-y := $(filter-out $(generated-y), $(mandatory-y))
+generic-y += $(foreach f, $(mandatory-y), $(if $(wildcard $(src)/$(f)),,$(f)))
+
+generic-y := $(addprefix $(obj)/, $(generic-y))
+syscall-y := $(addprefix $(obj)/, $(syscall-y))
+generated-y := $(addprefix $(obj)/, $(generated-y))
+
+# Remove stale wrappers when the corresponding files are removed from generic-y
+old-headers := $(wildcard $(obj)/*.h)
+unwanted := $(filter-out $(generic-y) $(generated-y) $(syscall-y),$(old-headers))
+
+quiet_cmd_wrap = WRAP $@
+ cmd_wrap = echo "\#include <asm-generic/$*.h>" > $@
+
+quiet_cmd_remove = REMOVE $(unwanted)
+ cmd_remove = rm -f $(unwanted)
+
+quiet_cmd_syshdr = SYSHDR $@
+ cmd_syshdr = $(CONFIG_SHELL) $(syshdr) \
+ $(if $(syshdr-args-$*),$(syshdr-args-$*),$(syshdr-args)) \
+ $(if $(syscall_compat),--prefix "compat$*_") \
+ --abis $(subst $(space),$(comma),$(strip $(syscall_abis_$*))) \
+ $< $@
+
+quiet_cmd_systbl = SYSTBL $@
+ cmd_systbl = $(CONFIG_SHELL) $(systbl) \
+ $(if $(systbl-args-$*),$(systbl-args-$*),$(systbl-args)) \
+ --abis $(subst $(space),$(comma),$(strip $(syscall_abis_$*))) \
+ $< $@
+
+all: $(generic-y) $(syscall-y)
+ $(if $(unwanted),$(call cmd,remove))
+ @:
+
+$(obj)/%.h: $(srctree)/$(generic)/%.h
+ $(call cmd,wrap)
+
+$(obj)/unistd_%.h: $(syscalltbl) $(syshdr) FORCE
+ $(call if_changed,syshdr)
+
+$(obj)/unistd_compat_%.h: syscall_compat:=1
+$(obj)/unistd_compat_%.h: $(syscalltbl) $(syshdr) FORCE
+ $(call if_changed,syshdr)
+
+$(obj)/syscall_table_%.h: $(syscalltbl) $(systbl) FORCE
+ $(call if_changed,systbl)
+
+# Create output directory. Skip it if at least one old header exists
+# since we know the output directory already exists.
+ifeq ($(old-headers),)
+$(shell mkdir -p $(obj))
+endif
+
+FORCE:
+
+.PHONY: $(PHONY)
diff --git a/scripts/Makefile.btf b/scripts/Makefile.btf
index 2d6e5ed9081e..b75f09f3f424 100644
--- a/scripts/Makefile.btf
+++ b/scripts/Makefile.btf
@@ -14,17 +14,20 @@ pahole-flags-$(call test-ge, $(pahole-ver), 121) += --btf_gen_floats
pahole-flags-$(call test-ge, $(pahole-ver), 122) += -j
-ifeq ($(pahole-ver), 125)
-pahole-flags-y += --skip_encoding_btf_inconsistent_proto --btf_gen_optimized
-endif
+pahole-flags-$(call test-ge, $(pahole-ver), 125) += --skip_encoding_btf_inconsistent_proto --btf_gen_optimized
else
# Switch to using --btf_features for v1.26 and later.
-pahole-flags-$(call test-ge, $(pahole-ver), 126) = -j --btf_features=encode_force,var,float,enum64,decl_tag,type_tag,optimized_func,consistent_func
+pahole-flags-$(call test-ge, $(pahole-ver), 126) = -j --btf_features=encode_force,var,float,enum64,decl_tag,type_tag,optimized_func,consistent_func,decl_tag_kfuncs
+
+ifneq ($(KBUILD_EXTMOD),)
+module-pahole-flags-$(call test-ge, $(pahole-ver), 126) += --btf_features=distilled_base
+endif
endif
pahole-flags-$(CONFIG_PAHOLE_HAS_LANG_EXCLUDE) += --lang_exclude=rust
export PAHOLE_FLAGS := $(pahole-flags-y)
+export MODULE_PAHOLE_FLAGS := $(module-pahole-flags-y)
diff --git a/scripts/Makefile.dtbinst b/scripts/Makefile.dtbinst
index 67956f6496a5..9d920419a62c 100644
--- a/scripts/Makefile.dtbinst
+++ b/scripts/Makefile.dtbinst
@@ -17,7 +17,7 @@ include $(srctree)/scripts/Kbuild.include
dst := $(INSTALL_DTBS_PATH)
quiet_cmd_dtb_install = INSTALL $@
- cmd_dtb_install = install -D $< $@
+ cmd_dtb_install = install -D -m 0644 $< $@
$(dst)/%: $(obj)/%
$(call cmd,dtb_install)
diff --git a/scripts/Makefile.host b/scripts/Makefile.host
index d35f55e0d141..e85be7721a48 100644
--- a/scripts/Makefile.host
+++ b/scripts/Makefile.host
@@ -146,7 +146,7 @@ $(call multi_depend, $(host-cxxmulti), , -objs -cxxobjs)
# Create .o file from a single .cc (C++) file
quiet_cmd_host-cxxobjs = HOSTCXX $@
cmd_host-cxxobjs = $(HOSTCXX) $(hostcxx_flags) -c -o $@ $<
-$(host-cxxobjs): $(obj)/%.o: $(src)/%.cc FORCE
+$(host-cxxobjs): $(obj)/%.o: $(obj)/%.cc FORCE
$(call if_changed_dep,host-cxxobjs)
# Create executable from a single Rust crate (which may consist of
diff --git a/scripts/Makefile.modfinal b/scripts/Makefile.modfinal
index 3bec9043e4f3..1fa98b5e952b 100644
--- a/scripts/Makefile.modfinal
+++ b/scripts/Makefile.modfinal
@@ -41,7 +41,7 @@ quiet_cmd_btf_ko = BTF [M] $@
if [ ! -f vmlinux ]; then \
printf "Skipping BTF generation for %s due to unavailability of vmlinux\n" $@ 1>&2; \
else \
- LLVM_OBJCOPY="$(OBJCOPY)" $(PAHOLE) -J $(PAHOLE_FLAGS) --btf_base vmlinux $@; \
+ LLVM_OBJCOPY="$(OBJCOPY)" $(PAHOLE) -J $(PAHOLE_FLAGS) $(MODULE_PAHOLE_FLAGS) --btf_base vmlinux $@; \
$(RESOLVE_BTFIDS) -b vmlinux $@; \
fi;
diff --git a/scripts/Makefile.package b/scripts/Makefile.package
index 38653f3e8108..bf016af8bf8a 100644
--- a/scripts/Makefile.package
+++ b/scripts/Makefile.package
@@ -103,7 +103,7 @@ debian-orig: private version = $(shell dpkg-parsechangelog -S Version | sed 's/-
debian-orig: private orig-name = $(source)_$(version).orig.tar$(debian-orig-suffix)
debian-orig: mkdebian-opts = --need-source
debian-orig: linux.tar$(debian-orig-suffix) debian
- $(Q)if [ "$(df --output=target .. 2>/dev/null)" = "$(df --output=target $< 2>/dev/null)" ]; then \
+ $(Q)if [ "$$(df --output=target .. 2>/dev/null)" = "$$(df --output=target $< 2>/dev/null)" ]; then \
ln -f $< ../$(orig-name); \
else \
cp $< ../$(orig-name); \
diff --git a/scripts/atomic/kerneldoc/sub_and_test b/scripts/atomic/kerneldoc/sub_and_test
index d3760f7749d4..96615e50836b 100644
--- a/scripts/atomic/kerneldoc/sub_and_test
+++ b/scripts/atomic/kerneldoc/sub_and_test
@@ -1,7 +1,7 @@
cat <<EOF
/**
* ${class}${atomicname}() - atomic subtract and test if zero with ${desc_order} ordering
- * @i: ${int} value to add
+ * @i: ${int} value to subtract
* @v: pointer to ${atomic}_t
*
* Atomically updates @v to (@v - @i) with ${desc_order} ordering.
diff --git a/scripts/const_structs.checkpatch b/scripts/const_structs.checkpatch
index 52e5bfb61fd0..cc62980cfa6e 100644
--- a/scripts/const_structs.checkpatch
+++ b/scripts/const_structs.checkpatch
@@ -64,7 +64,17 @@ platform_suspend_ops
proc_ops
proto_ops
pwm_ops
+reg_default
+reg_field
+reg_sequence
regmap_access_table
+regmap_bus
+regmap_config
+regmap_irq
+regmap_irq_chip
+regmap_irq_sub_irq_map
+regmap_range
+regmap_range_cfg
regulator_ops
reset_control_ops
rpc_pipe_ops
diff --git a/scripts/dtc/Makefile b/scripts/dtc/Makefile
index a18657072541..b47f4daa4515 100644
--- a/scripts/dtc/Makefile
+++ b/scripts/dtc/Makefile
@@ -3,7 +3,7 @@
# *** Also keep .gitignore in sync when changing ***
hostprogs-always-$(CONFIG_DTC) += dtc fdtoverlay
-hostprogs-always-$(CHECK_DT_BINDING) += dtc
+hostprogs-always-$(CHECK_DTBS) += dtc
dtc-objs := dtc.o flattree.o fstree.o data.o livetree.o treesource.o \
srcpos.o checks.o util.o
diff --git a/scripts/faddr2line b/scripts/faddr2line
index 587415a52b6f..fe0cc45f03be 100755
--- a/scripts/faddr2line
+++ b/scripts/faddr2line
@@ -85,15 +85,17 @@ command -v ${ADDR2LINE} >/dev/null 2>&1 || die "${ADDR2LINE} isn't installed"
# init/main.c! This only works for vmlinux. Otherwise it falls back to
# printing the absolute path.
find_dir_prefix() {
- local objfile=$1
-
- local start_kernel_addr=$(${READELF} --symbols --wide $objfile | sed 's/\[.*\]//' |
+ local start_kernel_addr=$(echo "${ELF_SYMS}" | sed 's/\[.*\]//' |
${AWK} '$8 == "start_kernel" {printf "0x%s", $2}')
[[ -z $start_kernel_addr ]] && return
- local file_line=$(${ADDR2LINE} -e $objfile $start_kernel_addr)
- [[ -z $file_line ]] && return
+ run_addr2line ${start_kernel_addr} ""
+ [[ -z $ADDR2LINE_OUT ]] && return
+ local file_line=${ADDR2LINE_OUT#* at }
+ if [[ -z $file_line ]] || [[ $file_line = $ADDR2LINE_OUT ]]; then
+ return
+ fi
local prefix=${file_line%init/main.c:*}
if [[ -z $prefix ]] || [[ $prefix = $file_line ]]; then
return
@@ -103,6 +105,71 @@ find_dir_prefix() {
return 0
}
+run_readelf() {
+ local objfile=$1
+ local out=$(${READELF} --file-header --section-headers --symbols --wide $objfile)
+
+ # This assumes that readelf first prints the file header, then the section headers, then the symbols.
+ # Note: It seems that GNU readelf does not prefix section headers with the "There are X section headers"
+ # line when multiple options are given, so let's also match with the "Section Headers:" line.
+ ELF_FILEHEADER=$(echo "${out}" | sed -n '/There are [0-9]* section headers, starting at offset\|Section Headers:/q;p')
+ ELF_SECHEADERS=$(echo "${out}" | sed -n '/There are [0-9]* section headers, starting at offset\|Section Headers:/,$p' | sed -n '/Symbol table .* contains [0-9]* entries:/q;p')
+ ELF_SYMS=$(echo "${out}" | sed -n '/Symbol table .* contains [0-9]* entries:/,$p')
+}
+
+check_vmlinux() {
+ # vmlinux uses absolute addresses in the section table rather than
+ # section offsets.
+ IS_VMLINUX=0
+ local file_type=$(echo "${ELF_FILEHEADER}" |
+ ${AWK} '$1 == "Type:" { print $2; exit }')
+ if [[ $file_type = "EXEC" ]] || [[ $file_type == "DYN" ]]; then
+ IS_VMLINUX=1
+ fi
+}
+
+init_addr2line() {
+ local objfile=$1
+
+ check_vmlinux
+
+ ADDR2LINE_ARGS="--functions --pretty-print --inlines --addresses --exe=$objfile"
+ if [[ $IS_VMLINUX = 1 ]]; then
+ # If the executable file is vmlinux, we don't pass section names to
+ # addr2line, so we can launch it now as a single long-running process.
+ coproc ADDR2LINE_PROC (${ADDR2LINE} ${ADDR2LINE_ARGS})
+ fi
+}
+
+run_addr2line() {
+ local addr=$1
+ local sec_name=$2
+
+ if [[ $IS_VMLINUX = 1 ]]; then
+ # We send to the addr2line process: (1) the address, then (2) a sentinel
+ # value, i.e., something that can't be interpreted as a valid address
+ # (i.e., ","). This causes addr2line to write out: (1) the answer for
+ # our address, then (2) either "?? ??:0" or "0x0...0: ..." (if
+ # using binutils' addr2line), or "," (if using LLVM's addr2line).
+ echo ${addr} >& "${ADDR2LINE_PROC[1]}"
+ echo "," >& "${ADDR2LINE_PROC[1]}"
+ local first_line
+ read -r first_line <& "${ADDR2LINE_PROC[0]}"
+ ADDR2LINE_OUT=$(echo "${first_line}" | sed 's/^0x[0-9a-fA-F]*: //')
+ while read -r line <& "${ADDR2LINE_PROC[0]}"; do
+ if [[ "$line" == "?? ??:0" ]] || [[ "$line" == "," ]] || [[ $(echo "$line" | ${GREP} "^0x00*: ") ]]; then
+ break
+ fi
+ ADDR2LINE_OUT+=$'\n'$(echo "$line" | sed 's/^0x[0-9a-fA-F]*: //')
+ done
+ else
+ # Run addr2line as a single invocation.
+ local sec_arg
+ [[ -z $sec_name ]] && sec_arg="" || sec_arg="--section=${sec_name}"
+ ADDR2LINE_OUT=$(${ADDR2LINE} ${ADDR2LINE_ARGS} ${sec_arg} ${addr} | sed 's/^0x[0-9a-fA-F]*: //')
+ fi
+}
+
__faddr2line() {
local objfile=$1
local func_addr=$2
@@ -113,8 +180,6 @@ __faddr2line() {
local func_offset=${func_addr#*+}
func_offset=${func_offset%/*}
local user_size=
- local file_type
- local is_vmlinux=0
[[ $func_addr =~ "/" ]] && user_size=${func_addr#*/}
if [[ -z $sym_name ]] || [[ -z $func_offset ]] || [[ $sym_name = $func_addr ]]; then
@@ -123,14 +188,6 @@ __faddr2line() {
return
fi
- # vmlinux uses absolute addresses in the section table rather than
- # section offsets.
- local file_type=$(${READELF} --file-header $objfile |
- ${AWK} '$1 == "Type:" { print $2; exit }')
- if [[ $file_type = "EXEC" ]] || [[ $file_type == "DYN" ]]; then
- is_vmlinux=1
- fi
-
# Go through each of the object's symbols which match the func name.
# In rare cases there might be duplicates, in which case we print all
# matches.
@@ -143,8 +200,7 @@ __faddr2line() {
local sec_name
# Get the section size:
- sec_size=$(${READELF} --section-headers --wide $objfile |
- sed 's/\[ /\[/' |
+ sec_size=$(echo "${ELF_SECHEADERS}" | sed 's/\[ /\[/' |
${AWK} -v sec=$sym_sec '$1 == "[" sec "]" { print "0x" $6; exit }')
if [[ -z $sec_size ]]; then
@@ -154,8 +210,7 @@ __faddr2line() {
fi
# Get the section name:
- sec_name=$(${READELF} --section-headers --wide $objfile |
- sed 's/\[ /\[/' |
+ sec_name=$(echo "${ELF_SECHEADERS}" | sed 's/\[ /\[/' |
${AWK} -v sec=$sym_sec '$1 == "[" sec "]" { print $2; exit }')
if [[ -z $sec_name ]]; then
@@ -197,7 +252,7 @@ __faddr2line() {
found=2
break
fi
- done < <(${READELF} --symbols --wide $objfile | sed 's/\[.*\]//' | ${AWK} -v sec=$sym_sec '$7 == sec' | sort --key=2)
+ done < <(echo "${ELF_SYMS}" | sed 's/\[.*\]//' | ${AWK} -v sec=$sym_sec '$7 == sec' | sort --key=2 | ${GREP} -A1 --no-group-separator " ${sym_name}$")
if [[ $found = 0 ]]; then
warn "can't find symbol: sym_name: $sym_name sym_sec: $sym_sec sym_addr: $sym_addr sym_elf_size: $sym_elf_size"
@@ -249,9 +304,8 @@ __faddr2line() {
# Pass section address to addr2line and strip absolute paths
# from the output:
- local args="--functions --pretty-print --inlines --exe=$objfile"
- [[ $is_vmlinux = 0 ]] && args="$args --section=$sec_name"
- local output=$(${ADDR2LINE} $args $addr | sed "s; $dir_prefix\(\./\)*; ;")
+ run_addr2line $addr $sec_name
+ local output=$(echo "${ADDR2LINE_OUT}" | sed "s; $dir_prefix\(\./\)*; ;")
[[ -z $output ]] && continue
# Default output (non --list):
@@ -278,7 +332,7 @@ __faddr2line() {
DONE=1
- done < <(${READELF} --symbols --wide $objfile | sed 's/\[.*\]//' | ${AWK} -v fn=$sym_name '$8 == fn')
+ done < <(echo "${ELF_SYMS}" | sed 's/\[.*\]//' | ${AWK} -v fn=$sym_name '$8 == fn')
}
[[ $# -lt 2 ]] && usage
@@ -291,10 +345,14 @@ LIST=0
[[ ! -f $objfile ]] && die "can't find objfile $objfile"
shift
-${READELF} --section-headers --wide $objfile | ${GREP} -q '\.debug_info' || die "CONFIG_DEBUG_INFO not enabled"
+run_readelf $objfile
+
+echo "${ELF_SECHEADERS}" | ${GREP} -q '\.debug_info' || die "CONFIG_DEBUG_INFO not enabled"
+
+init_addr2line $objfile
DIR_PREFIX=supercalifragilisticexpialidocious
-find_dir_prefix $objfile
+find_dir_prefix
FIRST=1
while [[ $# -gt 0 ]]; do
diff --git a/scripts/gcc-plugins/gcc-common.h b/scripts/gcc-plugins/gcc-common.h
index 1ae39b9f4a95..3222c1070444 100644
--- a/scripts/gcc-plugins/gcc-common.h
+++ b/scripts/gcc-plugins/gcc-common.h
@@ -62,11 +62,7 @@
#include "pass_manager.h"
#include "predict.h"
#include "ipa-utils.h"
-
-#if BUILDING_GCC_VERSION >= 8000
#include "stringpool.h"
-#endif
-
#include "attribs.h"
#include "varasm.h"
#include "stor-layout.h"
@@ -78,7 +74,6 @@
#include "context.h"
#include "tree-ssa-alias.h"
#include "tree-ssa.h"
-#include "stringpool.h"
#if BUILDING_GCC_VERSION >= 7000
#include "tree-vrp.h"
#endif
diff --git a/scripts/gdb/linux/Makefile b/scripts/gdb/linux/Makefile
index d77ad9079d0f..fcd32fcf3ae0 100644
--- a/scripts/gdb/linux/Makefile
+++ b/scripts/gdb/linux/Makefile
@@ -5,7 +5,7 @@ ifdef building_out_of_srctree
symlinks := $(patsubst $(src)/%,%,$(wildcard $(src)/*.py))
quiet_cmd_symlink = SYMLINK $@
- cmd_symlink = ln -fsn $(patsubst $(obj)/%,$(abspath $(srctree))/$(src)/%,$@) $@
+ cmd_symlink = ln -fsn $(patsubst $(obj)/%,$(abspath $(src))/%,$@) $@
always-y += $(symlinks)
$(addprefix $(obj)/, $(symlinks)): FORCE
diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c
index 387503daf0f7..85b53069ba7a 100644
--- a/scripts/kconfig/confdata.c
+++ b/scripts/kconfig/confdata.c
@@ -533,19 +533,6 @@ int conf_read(const char *name)
*/
if (sym->visible == no && !conf_unsaved)
sym->flags &= ~SYMBOL_DEF_USER;
- switch (sym->type) {
- case S_STRING:
- case S_INT:
- case S_HEX:
- /* Reset a string value if it's out of range */
- if (sym_string_within_range(sym, sym->def[S_DEF_USER].val))
- break;
- sym->flags &= ~SYMBOL_VALID;
- conf_unsaved++;
- break;
- default:
- break;
- }
}
}
diff --git a/scripts/kconfig/expr.c b/scripts/kconfig/expr.c
index a290de36307b..fcc190b67b6f 100644
--- a/scripts/kconfig/expr.c
+++ b/scripts/kconfig/expr.c
@@ -397,35 +397,6 @@ static struct expr *expr_eliminate_yn(struct expr *e)
}
/*
- * bool FOO!=n => FOO
- */
-struct expr *expr_trans_bool(struct expr *e)
-{
- if (!e)
- return NULL;
- switch (e->type) {
- case E_AND:
- case E_OR:
- case E_NOT:
- e->left.expr = expr_trans_bool(e->left.expr);
- e->right.expr = expr_trans_bool(e->right.expr);
- break;
- case E_UNEQUAL:
- // FOO!=n -> FOO
- if (e->left.sym->type == S_TRISTATE) {
- if (e->right.sym == &symbol_no) {
- e->type = E_SYMBOL;
- e->right.sym = NULL;
- }
- }
- break;
- default:
- ;
- }
- return e;
-}
-
-/*
* e1 || e2 -> ?
*/
static struct expr *expr_join_or(struct expr *e1, struct expr *e2)
@@ -476,7 +447,7 @@ static struct expr *expr_join_or(struct expr *e1, struct expr *e2)
return expr_alloc_comp(E_UNEQUAL, sym1, &symbol_yes);
}
}
- if (sym1->type == S_BOOLEAN && sym1 == sym2) {
+ if (sym1->type == S_BOOLEAN) {
if ((e1->type == E_NOT && e1->left.expr->type == E_SYMBOL && e2->type == E_SYMBOL) ||
(e2->type == E_NOT && e2->left.expr->type == E_SYMBOL && e1->type == E_SYMBOL))
return expr_alloc_symbol(&symbol_yes);
diff --git a/scripts/kconfig/expr.h b/scripts/kconfig/expr.h
index d965e427753e..7c0c242318bc 100644
--- a/scripts/kconfig/expr.h
+++ b/scripts/kconfig/expr.h
@@ -284,7 +284,6 @@ void expr_free(struct expr *e);
void expr_eliminate_eq(struct expr **ep1, struct expr **ep2);
int expr_eq(struct expr *e1, struct expr *e2);
tristate expr_calc_value(struct expr *e);
-struct expr *expr_trans_bool(struct expr *e);
struct expr *expr_eliminate_dups(struct expr *e);
struct expr *expr_transform(struct expr *e);
int expr_contains_symbol(struct expr *dep, struct symbol *sym);
@@ -302,11 +301,6 @@ static inline int expr_is_yes(struct expr *e)
return !e || (e->type == E_SYMBOL && e->left.sym == &symbol_yes);
}
-static inline int expr_is_no(struct expr *e)
-{
- return e && (e->type == E_SYMBOL && e->left.sym == &symbol_no);
-}
-
#ifdef __cplusplus
}
#endif
diff --git a/scripts/kconfig/gconf.c b/scripts/kconfig/gconf.c
index cc400ffe6615..e04dbafd3add 100644
--- a/scripts/kconfig/gconf.c
+++ b/scripts/kconfig/gconf.c
@@ -1422,7 +1422,6 @@ int main(int ac, char *av[])
conf_parse(name);
fixup_rootmenu(&rootmenu);
- conf_read(NULL);
/* Load the interface and connect signals */
init_main_window(glade_file);
@@ -1430,6 +1429,8 @@ int main(int ac, char *av[])
init_left_tree();
init_right_tree();
+ conf_read(NULL);
+
switch (view_mode) {
case SINGLE_VIEW:
display_tree_part();
diff --git a/scripts/kconfig/menu.c b/scripts/kconfig/menu.c
index 53151c5a6028..eef9b63cdf11 100644
--- a/scripts/kconfig/menu.c
+++ b/scripts/kconfig/menu.c
@@ -398,8 +398,6 @@ static void _menu_finalize(struct menu *parent, bool inside_choice)
dep = expr_transform(dep);
dep = expr_alloc_and(expr_copy(basedep), dep);
dep = expr_eliminate_dups(dep);
- if (menu->sym && menu->sym->type != S_TRISTATE)
- dep = expr_trans_bool(dep);
prop->visible.expr = dep;
/*
diff --git a/scripts/kconfig/symbol.c b/scripts/kconfig/symbol.c
index aa0e25ee5119..0e439d3d48d1 100644
--- a/scripts/kconfig/symbol.c
+++ b/scripts/kconfig/symbol.c
@@ -14,6 +14,7 @@
struct symbol symbol_yes = {
.name = "y",
+ .type = S_TRISTATE,
.curr = { "y", yes },
.menus = LIST_HEAD_INIT(symbol_yes.menus),
.flags = SYMBOL_CONST|SYMBOL_VALID,
@@ -21,6 +22,7 @@ struct symbol symbol_yes = {
struct symbol symbol_mod = {
.name = "m",
+ .type = S_TRISTATE,
.curr = { "m", mod },
.menus = LIST_HEAD_INIT(symbol_mod.menus),
.flags = SYMBOL_CONST|SYMBOL_VALID,
@@ -28,6 +30,7 @@ struct symbol symbol_mod = {
struct symbol symbol_no = {
.name = "n",
+ .type = S_TRISTATE,
.curr = { "n", no },
.menus = LIST_HEAD_INIT(symbol_no.menus),
.flags = SYMBOL_CONST|SYMBOL_VALID,
@@ -820,8 +823,7 @@ const char *sym_get_string_value(struct symbol *sym)
case no:
return "n";
case mod:
- sym_calc_value(modules_sym);
- return (modules_sym->curr.tri == no) ? "n" : "m";
+ return "m";
case yes:
return "y";
}
diff --git a/scripts/ld-version.sh b/scripts/ld-version.sh
index a78b804b680c..b9513d224476 100755
--- a/scripts/ld-version.sh
+++ b/scripts/ld-version.sh
@@ -57,9 +57,11 @@ else
fi
fi
-# Some distributions append a package release number, as in 2.34-4.fc32
-# Trim the hyphen and any characters that follow.
-version=${version%-*}
+# There may be something after the version, such as a distribution's package
+# release number (like Fedora's "2.34-4.fc32") or punctuation (like LLD briefly
+# added before the "compatible with GNU linkers" string), so remove everything
+# after just numbers and periods.
+version=${version%%[!0-9.]*}
cversion=$(get_canonical_version $version)
min_cversion=$(get_canonical_version $min_version)
diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
index 7862a8101747..518c70b8db50 100755
--- a/scripts/link-vmlinux.sh
+++ b/scripts/link-vmlinux.sh
@@ -179,10 +179,10 @@ kallsyms_step()
kallsyms_S=${kallsyms_vmlinux}.S
vmlinux_link ${kallsyms_vmlinux} "${kallsymso_prev}" ${btf_vmlinux_bin_o}
- mksysmap ${kallsyms_vmlinux} ${kallsyms_vmlinux}.syms ${kallsymso_prev}
+ mksysmap ${kallsyms_vmlinux} ${kallsyms_vmlinux}.syms
kallsyms ${kallsyms_vmlinux}.syms ${kallsyms_S}
- info AS ${kallsyms_S}
+ info AS ${kallsymso}
${CC} ${NOSTDINC_FLAGS} ${LINUXINCLUDE} ${KBUILD_CPPFLAGS} \
${KBUILD_AFLAGS} ${KBUILD_AFLAGS_KERNEL} \
-c -o ${kallsymso} ${kallsyms_S}
@@ -193,7 +193,7 @@ kallsyms_step()
mksysmap()
{
info NM ${2}
- ${CONFIG_SHELL} "${srctree}/scripts/mksysmap" ${1} ${2} ${3}
+ ${NM} -n "${1}" | sed -f "${srctree}/scripts/mksysmap" > "${2}"
}
sorttable()
@@ -201,7 +201,6 @@ sorttable()
${objtree}/scripts/sorttable ${1}
}
-# Delete output files in case of error
cleanup()
{
rm -f .btf.*
@@ -282,7 +281,7 @@ if is_enabled CONFIG_DEBUG_INFO_BTF && is_enabled CONFIG_BPF; then
${RESOLVE_BTFIDS} vmlinux
fi
-mksysmap vmlinux System.map ${kallsymso}
+mksysmap vmlinux System.map
if is_enabled CONFIG_BUILDTIME_TABLE_SORT; then
info SORTTAB vmlinux
diff --git a/scripts/make_fit.py b/scripts/make_fit.py
index 3de90c5a094b..263147df80a4 100755
--- a/scripts/make_fit.py
+++ b/scripts/make_fit.py
@@ -190,7 +190,7 @@ def output_dtb(fsw, seq, fname, arch, compress):
Args:
fsw (libfdt.FdtSw): Object to use for writing
seq (int): Sequence number (1 for first)
- fmame (str): Filename containing the DTB
+ fname (str): Filename containing the DTB
arch: FIT architecture, e.g. 'arm64'
compress (str): Compressed algorithm, e.g. 'gzip'
@@ -211,7 +211,6 @@ def output_dtb(fsw, seq, fname, arch, compress):
fsw.property_string('type', 'flat_dt')
fsw.property_string('arch', arch)
fsw.property_string('compression', compress)
- fsw.property('compatible', bytes(compat))
with open(fname, 'rb') as inf:
compressed = compress_data(inf, compress)
diff --git a/scripts/mksysmap b/scripts/mksysmap
index 57ff5656d566..c12723a04655 100755
--- a/scripts/mksysmap
+++ b/scripts/mksysmap
@@ -1,22 +1,16 @@
-#!/bin/sh -x
-# Based on the vmlinux file create the System.map file
+#!/bin/sed -f
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# sed script to filter out symbols that are not needed for System.map,
+# or not suitable for kallsyms. The input should be 'nm -n <file>'.
+#
# System.map is used by module-init tools and some debugging
# tools to retrieve the actual addresses of symbols in the kernel.
#
-# Usage
-# mksysmap vmlinux System.map [exclude]
-
-
-#####
-# Generate System.map (actual filename passed as second argument)
-# The following refers to the symbol type as per nm(1).
-
# readprofile starts reading symbols when _stext is found, and
# continue until it finds a symbol which is not either of 'T', 't',
# 'W' or 'w'.
#
-
-${NM} -n ${1} | sed >${2} -e "
# ---------------------------------------------------------------------------
# Ignored symbol types
#
@@ -92,13 +86,3 @@ ${NM} -n ${1} | sed >${2} -e "
# ppc stub
/\.long_branch\./d
/\.plt_branch\./d
-
-# ---------------------------------------------------------------------------
-# Ignored kallsyms symbols
-#
-# If the 3rd parameter exists, symbols from it will be omitted from the output.
-# This makes kallsyms have the identical symbol lists in the step 1 and 2.
-# Without this, the step2 would get new symbols generated by scripts/kallsyms.c
-# when CONFIG_KALLSYMS_ALL is enabled. That might require one more pass.
-$(if [ $# -ge 3 ]; then ${NM} ${3} | sed -n '/ U /!s:.* \([^ ]*\)$:/ \1$/d:p'; fi)
-"
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index 937294ff164f..f48d72d22dc2 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -1647,10 +1647,11 @@ static void read_symbols(const char *modname)
namespace = get_next_modinfo(&info, "import_ns",
namespace);
}
+
+ if (extra_warn && !get_modinfo(&info, "description"))
+ warn("missing MODULE_DESCRIPTION() in %s\n", modname);
}
- if (extra_warn && !get_modinfo(&info, "description"))
- warn("missing MODULE_DESCRIPTION() in %s\n", modname);
for (sym = info.symtab_start; sym < info.symtab_stop; sym++) {
symname = remove_dot(info.strtab + sym->st_name);
diff --git a/scripts/package/kernel.spec b/scripts/package/kernel.spec
index e095eb1e290e..c52d517b9364 100644
--- a/scripts/package/kernel.spec
+++ b/scripts/package/kernel.spec
@@ -57,7 +57,8 @@ patch -p1 < %{SOURCE2}
%install
mkdir -p %{buildroot}/lib/modules/%{KERNELRELEASE}
cp $(%{make} %{makeflags} -s image_name) %{buildroot}/lib/modules/%{KERNELRELEASE}/vmlinuz
-%{make} %{makeflags} INSTALL_MOD_PATH=%{buildroot} modules_install
+# DEPMOD=true makes depmod no-op. We do not package depmod-generated files.
+%{make} %{makeflags} INSTALL_MOD_PATH=%{buildroot} DEPMOD=true modules_install
%{make} %{makeflags} INSTALL_HDR_PATH=%{buildroot}/usr headers_install
cp System.map %{buildroot}/lib/modules/%{KERNELRELEASE}
cp .config %{buildroot}/lib/modules/%{KERNELRELEASE}/config
@@ -70,10 +71,7 @@ ln -fns /usr/src/kernels/%{KERNELRELEASE} %{buildroot}/lib/modules/%{KERNELRELEA
%endif
{
- for x in System.map config kernel modules.builtin \
- modules.builtin.modinfo modules.order vmlinuz; do
- echo "/lib/modules/%{KERNELRELEASE}/${x}"
- done
+ echo "/lib/modules/%{KERNELRELEASE}"
for x in alias alias.bin builtin.alias.bin builtin.bin dep dep.bin \
devname softdep symbols symbols.bin; do
@@ -85,7 +83,6 @@ ln -fns /usr/src/kernels/%{KERNELRELEASE} %{buildroot}/lib/modules/%{KERNELRELEA
done
if [ -d "%{buildroot}/lib/modules/%{KERNELRELEASE}/dtb" ];then
- echo "/lib/modules/%{KERNELRELEASE}/dtb"
find "%{buildroot}/lib/modules/%{KERNELRELEASE}/dtb" -printf "%%%ghost /boot/dtb-%{KERNELRELEASE}/%%P\n"
fi
diff --git a/scripts/syscall.tbl b/scripts/syscall.tbl
new file mode 100644
index 000000000000..797e20ea99a2
--- /dev/null
+++ b/scripts/syscall.tbl
@@ -0,0 +1,404 @@
+# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+#
+# This file contains the system call numbers for all of the
+# more recently added architectures.
+#
+# As a basic principle, no duplication of functionality
+# should be added, e.g. we don't use lseek when llseek
+# is present. New architectures should use this file
+# and implement the less feature-full calls in user space.
+#
+0 common io_setup sys_io_setup compat_sys_io_setup
+1 common io_destroy sys_io_destroy
+2 common io_submit sys_io_submit compat_sys_io_submit
+3 common io_cancel sys_io_cancel
+4 time32 io_getevents sys_io_getevents_time32
+4 64 io_getevents sys_io_getevents
+5 common setxattr sys_setxattr
+6 common lsetxattr sys_lsetxattr
+7 common fsetxattr sys_fsetxattr
+8 common getxattr sys_getxattr
+9 common lgetxattr sys_lgetxattr
+10 common fgetxattr sys_fgetxattr
+11 common listxattr sys_listxattr
+12 common llistxattr sys_llistxattr
+13 common flistxattr sys_flistxattr
+14 common removexattr sys_removexattr
+15 common lremovexattr sys_lremovexattr
+16 common fremovexattr sys_fremovexattr
+17 common getcwd sys_getcwd
+18 common lookup_dcookie sys_ni_syscall
+19 common eventfd2 sys_eventfd2
+20 common epoll_create1 sys_epoll_create1
+21 common epoll_ctl sys_epoll_ctl
+22 common epoll_pwait sys_epoll_pwait compat_sys_epoll_pwait
+23 common dup sys_dup
+24 common dup3 sys_dup3
+25 32 fcntl64 sys_fcntl64 compat_sys_fcntl64
+25 64 fcntl sys_fcntl
+26 common inotify_init1 sys_inotify_init1
+27 common inotify_add_watch sys_inotify_add_watch
+28 common inotify_rm_watch sys_inotify_rm_watch
+29 common ioctl sys_ioctl compat_sys_ioctl
+30 common ioprio_set sys_ioprio_set
+31 common ioprio_get sys_ioprio_get
+32 common flock sys_flock
+33 common mknodat sys_mknodat
+34 common mkdirat sys_mkdirat
+35 common unlinkat sys_unlinkat
+36 common symlinkat sys_symlinkat
+37 common linkat sys_linkat
+# renameat is superseded with flags by renameat2
+38 renameat renameat sys_renameat
+39 common umount2 sys_umount
+40 common mount sys_mount
+41 common pivot_root sys_pivot_root
+43 32 statfs64 sys_statfs64 compat_sys_statfs64
+43 64 statfs sys_statfs
+44 32 fstatfs64 sys_fstatfs64 compat_sys_fstatfs64
+44 64 fstatfs sys_fstatfs
+45 32 truncate64 sys_truncate64 compat_sys_truncate64
+45 64 truncate sys_truncate
+46 32 ftruncate64 sys_ftruncate64 compat_sys_ftruncate64
+46 64 ftruncate sys_ftruncate
+47 common fallocate sys_fallocate compat_sys_fallocate
+48 common faccessat sys_faccessat
+49 common chdir sys_chdir
+50 common fchdir sys_fchdir
+51 common chroot sys_chroot
+52 common fchmod sys_fchmod
+53 common fchmodat sys_fchmodat
+54 common fchownat sys_fchownat
+55 common fchown sys_fchown
+56 common openat sys_openat
+57 common close sys_close
+58 common vhangup sys_vhangup
+59 common pipe2 sys_pipe2
+60 common quotactl sys_quotactl
+61 common getdents64 sys_getdents64
+62 32 llseek sys_llseek
+62 64 lseek sys_lseek
+63 common read sys_read
+64 common write sys_write
+65 common readv sys_readv sys_readv
+66 common writev sys_writev sys_writev
+67 common pread64 sys_pread64 compat_sys_pread64
+68 common pwrite64 sys_pwrite64 compat_sys_pwrite64
+69 common preadv sys_preadv compat_sys_preadv
+70 common pwritev sys_pwritev compat_sys_pwritev
+71 32 sendfile64 sys_sendfile64
+71 64 sendfile sys_sendfile64
+72 time32 pselect6 sys_pselect6_time32 compat_sys_pselect6_time32
+72 64 pselect6 sys_pselect6
+73 time32 ppoll sys_ppoll_time32 compat_sys_ppoll_time32
+73 64 ppoll sys_ppoll
+74 common signalfd4 sys_signalfd4 compat_sys_signalfd4
+75 common vmsplice sys_vmsplice
+76 common splice sys_splice
+77 common tee sys_tee
+78 common readlinkat sys_readlinkat
+79 stat64 fstatat64 sys_fstatat64
+79 newstat fstatat sys_newfstatat
+80 stat64 fstat64 sys_fstat64
+80 newstat fstat sys_newfstat
+81 common sync sys_sync
+82 common fsync sys_fsync
+83 common fdatasync sys_fdatasync
+84 common sync_file_range sys_sync_file_range compat_sys_sync_file_range
+85 common timerfd_create sys_timerfd_create
+86 time32 timerfd_settime sys_timerfd_settime32
+86 64 timerfd_settime sys_timerfd_settime
+87 time32 timerfd_gettime sys_timerfd_gettime32
+87 64 timerfd_gettime sys_timerfd_gettime
+88 time32 utimensat sys_utimensat_time32
+88 64 utimensat sys_utimensat
+89 common acct sys_acct
+90 common capget sys_capget
+91 common capset sys_capset
+92 common personality sys_personality
+93 common exit sys_exit
+94 common exit_group sys_exit_group
+95 common waitid sys_waitid compat_sys_waitid
+96 common set_tid_address sys_set_tid_address
+97 common unshare sys_unshare
+98 time32 futex sys_futex_time32
+98 64 futex sys_futex
+99 common set_robust_list sys_set_robust_list compat_sys_set_robust_list
+100 common get_robust_list sys_get_robust_list compat_sys_get_robust_list
+101 time32 nanosleep sys_nanosleep_time32
+101 64 nanosleep sys_nanosleep
+102 common getitimer sys_getitimer compat_sys_getitimer
+103 common setitimer sys_setitimer compat_sys_setitimer
+104 common kexec_load sys_kexec_load compat_sys_kexec_load
+105 common init_module sys_init_module
+106 common delete_module sys_delete_module
+107 common timer_create sys_timer_create compat_sys_timer_create
+108 time32 timer_gettime sys_timer_gettime32
+108 64 timer_gettime sys_timer_gettime
+109 common timer_getoverrun sys_timer_getoverrun
+110 time32 timer_settime sys_timer_settime32
+110 64 timer_settime sys_timer_settime
+111 common timer_delete sys_timer_delete
+112 time32 clock_settime sys_clock_settime32
+112 64 clock_settime sys_clock_settime
+113 time32 clock_gettime sys_clock_gettime32
+113 64 clock_gettime sys_clock_gettime
+114 time32 clock_getres sys_clock_getres_time32
+114 64 clock_getres sys_clock_getres
+115 time32 clock_nanosleep sys_clock_nanosleep_time32
+115 64 clock_nanosleep sys_clock_nanosleep
+116 common syslog sys_syslog
+117 common ptrace sys_ptrace compat_sys_ptrace
+118 common sched_setparam sys_sched_setparam
+119 common sched_setscheduler sys_sched_setscheduler
+120 common sched_getscheduler sys_sched_getscheduler
+121 common sched_getparam sys_sched_getparam
+122 common sched_setaffinity sys_sched_setaffinity compat_sys_sched_setaffinity
+123 common sched_getaffinity sys_sched_getaffinity compat_sys_sched_getaffinity
+124 common sched_yield sys_sched_yield
+125 common sched_get_priority_max sys_sched_get_priority_max
+126 common sched_get_priority_min sys_sched_get_priority_min
+127 time32 sched_rr_get_interval sys_sched_rr_get_interval_time32
+127 64 sched_rr_get_interval sys_sched_rr_get_interval
+128 common restart_syscall sys_restart_syscall
+129 common kill sys_kill
+130 common tkill sys_tkill
+131 common tgkill sys_tgkill
+132 common sigaltstack sys_sigaltstack compat_sys_sigaltstack
+133 common rt_sigsuspend sys_rt_sigsuspend compat_sys_rt_sigsuspend
+134 common rt_sigaction sys_rt_sigaction compat_sys_rt_sigaction
+135 common rt_sigprocmask sys_rt_sigprocmask compat_sys_rt_sigprocmask
+136 common rt_sigpending sys_rt_sigpending compat_sys_rt_sigpending
+137 time32 rt_sigtimedwait sys_rt_sigtimedwait_time32 compat_sys_rt_sigtimedwait_time32
+137 64 rt_sigtimedwait sys_rt_sigtimedwait
+138 common rt_sigqueueinfo sys_rt_sigqueueinfo compat_sys_rt_sigqueueinfo
+139 common rt_sigreturn sys_rt_sigreturn compat_sys_rt_sigreturn
+140 common setpriority sys_setpriority
+141 common getpriority sys_getpriority
+142 common reboot sys_reboot
+143 common setregid sys_setregid
+144 common setgid sys_setgid
+145 common setreuid sys_setreuid
+146 common setuid sys_setuid
+147 common setresuid sys_setresuid
+148 common getresuid sys_getresuid
+149 common setresgid sys_setresgid
+150 common getresgid sys_getresgid
+151 common setfsuid sys_setfsuid
+152 common setfsgid sys_setfsgid
+153 common times sys_times compat_sys_times
+154 common setpgid sys_setpgid
+155 common getpgid sys_getpgid
+156 common getsid sys_getsid
+157 common setsid sys_setsid
+158 common getgroups sys_getgroups
+159 common setgroups sys_setgroups
+160 common uname sys_newuname
+161 common sethostname sys_sethostname
+162 common setdomainname sys_setdomainname
+# getrlimit and setrlimit are superseded with prlimit64
+163 rlimit getrlimit sys_getrlimit compat_sys_getrlimit
+164 rlimit setrlimit sys_setrlimit compat_sys_setrlimit
+165 common getrusage sys_getrusage compat_sys_getrusage
+166 common umask sys_umask
+167 common prctl sys_prctl
+168 common getcpu sys_getcpu
+169 time32 gettimeofday sys_gettimeofday compat_sys_gettimeofday
+169 64 gettimeofday sys_gettimeofday
+170 time32 settimeofday sys_settimeofday compat_sys_settimeofday
+170 64 settimeofday sys_settimeofday
+171 time32 adjtimex sys_adjtimex_time32
+171 64 adjtimex sys_adjtimex
+172 common getpid sys_getpid
+173 common getppid sys_getppid
+174 common getuid sys_getuid
+175 common geteuid sys_geteuid
+176 common getgid sys_getgid
+177 common getegid sys_getegid
+178 common gettid sys_gettid
+179 common sysinfo sys_sysinfo compat_sys_sysinfo
+180 common mq_open sys_mq_open compat_sys_mq_open
+181 common mq_unlink sys_mq_unlink
+182 time32 mq_timedsend sys_mq_timedsend_time32
+182 64 mq_timedsend sys_mq_timedsend
+183 time32 mq_timedreceive sys_mq_timedreceive_time32
+183 64 mq_timedreceive sys_mq_timedreceive
+184 common mq_notify sys_mq_notify compat_sys_mq_notify
+185 common mq_getsetattr sys_mq_getsetattr compat_sys_mq_getsetattr
+186 common msgget sys_msgget
+187 common msgctl sys_msgctl compat_sys_msgctl
+188 common msgrcv sys_msgrcv compat_sys_msgrcv
+189 common msgsnd sys_msgsnd compat_sys_msgsnd
+190 common semget sys_semget
+191 common semctl sys_semctl compat_sys_semctl
+192 time32 semtimedop sys_semtimedop_time32
+192 64 semtimedop sys_semtimedop
+193 common semop sys_semop
+194 common shmget sys_shmget
+195 common shmctl sys_shmctl compat_sys_shmctl
+196 common shmat sys_shmat compat_sys_shmat
+197 common shmdt sys_shmdt
+198 common socket sys_socket
+199 common socketpair sys_socketpair
+200 common bind sys_bind
+201 common listen sys_listen
+202 common accept sys_accept
+203 common connect sys_connect
+204 common getsockname sys_getsockname
+205 common getpeername sys_getpeername
+206 common sendto sys_sendto
+207 common recvfrom sys_recvfrom compat_sys_recvfrom
+208 common setsockopt sys_setsockopt sys_setsockopt
+209 common getsockopt sys_getsockopt sys_getsockopt
+210 common shutdown sys_shutdown
+211 common sendmsg sys_sendmsg compat_sys_sendmsg
+212 common recvmsg sys_recvmsg compat_sys_recvmsg
+213 common readahead sys_readahead compat_sys_readahead
+214 common brk sys_brk
+215 common munmap sys_munmap
+216 common mremap sys_mremap
+217 common add_key sys_add_key
+218 common request_key sys_request_key
+219 common keyctl sys_keyctl compat_sys_keyctl
+220 common clone sys_clone
+221 common execve sys_execve compat_sys_execve
+222 32 mmap2 sys_mmap2
+222 64 mmap sys_mmap
+223 32 fadvise64_64 sys_fadvise64_64 compat_sys_fadvise64_64
+223 64 fadvise64 sys_fadvise64_64
+224 common swapon sys_swapon
+225 common swapoff sys_swapoff
+226 common mprotect sys_mprotect
+227 common msync sys_msync
+228 common mlock sys_mlock
+229 common munlock sys_munlock
+230 common mlockall sys_mlockall
+231 common munlockall sys_munlockall
+232 common mincore sys_mincore
+233 common madvise sys_madvise
+234 common remap_file_pages sys_remap_file_pages
+235 common mbind sys_mbind
+236 common get_mempolicy sys_get_mempolicy
+237 common set_mempolicy sys_set_mempolicy
+238 common migrate_pages sys_migrate_pages
+239 common move_pages sys_move_pages
+240 common rt_tgsigqueueinfo sys_rt_tgsigqueueinfo compat_sys_rt_tgsigqueueinfo
+241 common perf_event_open sys_perf_event_open
+242 common accept4 sys_accept4
+243 time32 recvmmsg sys_recvmmsg_time32 compat_sys_recvmmsg_time32
+243 64 recvmmsg sys_recvmmsg
+# Architectures may provide up to 16 syscalls of their own between 244 and 259
+244 arc cacheflush sys_cacheflush
+245 arc arc_settls sys_arc_settls
+246 arc arc_gettls sys_arc_gettls
+247 arc sysfs sys_sysfs
+248 arc arc_usr_cmpxchg sys_arc_usr_cmpxchg
+
+244 csky set_thread_area sys_set_thread_area
+245 csky cacheflush sys_cacheflush
+
+244 nios2 cacheflush sys_cacheflush
+
+244 or1k or1k_atomic sys_or1k_atomic
+
+258 riscv riscv_hwprobe sys_riscv_hwprobe
+259 riscv riscv_flush_icache sys_riscv_flush_icache
+
+260 time32 wait4 sys_wait4 compat_sys_wait4
+260 64 wait4 sys_wait4
+261 common prlimit64 sys_prlimit64
+262 common fanotify_init sys_fanotify_init
+263 common fanotify_mark sys_fanotify_mark
+264 common name_to_handle_at sys_name_to_handle_at
+265 common open_by_handle_at sys_open_by_handle_at
+266 time32 clock_adjtime sys_clock_adjtime32
+266 64 clock_adjtime sys_clock_adjtime
+267 common syncfs sys_syncfs
+268 common setns sys_setns
+269 common sendmmsg sys_sendmmsg compat_sys_sendmmsg
+270 common process_vm_readv sys_process_vm_readv
+271 common process_vm_writev sys_process_vm_writev
+272 common kcmp sys_kcmp
+273 common finit_module sys_finit_module
+274 common sched_setattr sys_sched_setattr
+275 common sched_getattr sys_sched_getattr
+276 common renameat2 sys_renameat2
+277 common seccomp sys_seccomp
+278 common getrandom sys_getrandom
+279 common memfd_create sys_memfd_create
+280 common bpf sys_bpf
+281 common execveat sys_execveat compat_sys_execveat
+282 common userfaultfd sys_userfaultfd
+283 common membarrier sys_membarrier
+284 common mlock2 sys_mlock2
+285 common copy_file_range sys_copy_file_range
+286 common preadv2 sys_preadv2 compat_sys_preadv2
+287 common pwritev2 sys_pwritev2 compat_sys_pwritev2
+288 common pkey_mprotect sys_pkey_mprotect
+289 common pkey_alloc sys_pkey_alloc
+290 common pkey_free sys_pkey_free
+291 common statx sys_statx
+292 time32 io_pgetevents sys_io_pgetevents_time32 compat_sys_io_pgetevents
+292 64 io_pgetevents sys_io_pgetevents
+293 common rseq sys_rseq
+294 common kexec_file_load sys_kexec_file_load
+# 295 through 402 are unassigned to sync up with generic numbers don't use
+403 32 clock_gettime64 sys_clock_gettime
+404 32 clock_settime64 sys_clock_settime
+405 32 clock_adjtime64 sys_clock_adjtime
+406 32 clock_getres_time64 sys_clock_getres
+407 32 clock_nanosleep_time64 sys_clock_nanosleep
+408 32 timer_gettime64 sys_timer_gettime
+409 32 timer_settime64 sys_timer_settime
+410 32 timerfd_gettime64 sys_timerfd_gettime
+411 32 timerfd_settime64 sys_timerfd_settime
+412 32 utimensat_time64 sys_utimensat
+413 32 pselect6_time64 sys_pselect6 compat_sys_pselect6_time64
+414 32 ppoll_time64 sys_ppoll compat_sys_ppoll_time64
+416 32 io_pgetevents_time64 sys_io_pgetevents compat_sys_io_pgetevents_time64
+417 32 recvmmsg_time64 sys_recvmmsg compat_sys_recvmmsg_time64
+418 32 mq_timedsend_time64 sys_mq_timedsend
+419 32 mq_timedreceive_time64 sys_mq_timedreceive
+420 32 semtimedop_time64 sys_semtimedop
+421 32 rt_sigtimedwait_time64 sys_rt_sigtimedwait compat_sys_rt_sigtimedwait_time64
+422 32 futex_time64 sys_futex
+423 32 sched_rr_get_interval_time64 sys_sched_rr_get_interval
+424 common pidfd_send_signal sys_pidfd_send_signal
+425 common io_uring_setup sys_io_uring_setup
+426 common io_uring_enter sys_io_uring_enter
+427 common io_uring_register sys_io_uring_register
+428 common open_tree sys_open_tree
+429 common move_mount sys_move_mount
+430 common fsopen sys_fsopen
+431 common fsconfig sys_fsconfig
+432 common fsmount sys_fsmount
+433 common fspick sys_fspick
+434 common pidfd_open sys_pidfd_open
+435 common clone3 sys_clone3
+436 common close_range sys_close_range
+437 common openat2 sys_openat2
+438 common pidfd_getfd sys_pidfd_getfd
+439 common faccessat2 sys_faccessat2
+440 common process_madvise sys_process_madvise
+441 common epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2
+442 common mount_setattr sys_mount_setattr
+443 common quotactl_fd sys_quotactl_fd
+444 common landlock_create_ruleset sys_landlock_create_ruleset
+445 common landlock_add_rule sys_landlock_add_rule
+446 common landlock_restrict_self sys_landlock_restrict_self
+447 memfd_secret memfd_secret sys_memfd_secret
+448 common process_mrelease sys_process_mrelease
+449 common futex_waitv sys_futex_waitv
+450 common set_mempolicy_home_node sys_set_mempolicy_home_node
+451 common cachestat sys_cachestat
+452 common fchmodat2 sys_fchmodat2
+453 common map_shadow_stack sys_map_shadow_stack
+454 common futex_wake sys_futex_wake
+455 common futex_wait sys_futex_wait
+456 common futex_requeue sys_futex_requeue
+457 common statmount sys_statmount
+458 common listmount sys_listmount
+459 common lsm_get_self_attr sys_lsm_get_self_attr
+460 common lsm_set_self_attr sys_lsm_set_self_attr
+461 common lsm_list_modules sys_lsm_list_modules
+462 common mseal sys_mseal
diff --git a/scripts/syscalltbl.sh b/scripts/syscalltbl.sh
index 6abe143889ef..6a903b87a7c2 100755
--- a/scripts/syscalltbl.sh
+++ b/scripts/syscalltbl.sh
@@ -54,7 +54,7 @@ nxt=0
grep -E "^[0-9]+[[:space:]]+$abis" "$infile" | {
- while read nr abi name native compat ; do
+ while read nr abi name native compat noreturn; do
if [ $nxt -gt $nr ]; then
echo "error: $infile: syscall table is not sorted or duplicates the same syscall number" >&2
@@ -66,7 +66,21 @@ grep -E "^[0-9]+[[:space:]]+$abis" "$infile" | {
nxt=$((nxt + 1))
done
- if [ -n "$compat" ]; then
+ if [ "$compat" = "-" ]; then
+ unset compat
+ fi
+
+ if [ -n "$noreturn" ]; then
+ if [ "$noreturn" != "noreturn" ]; then
+ echo "error: $infile: invalid string \"$noreturn\" in 'noreturn' column"
+ exit 1
+ fi
+ if [ -n "$compat" ]; then
+ echo "__SYSCALL_COMPAT_NORETURN($nr, $native, $compat)"
+ else
+ echo "__SYSCALL_NORETURN($nr, $native)"
+ fi
+ elif [ -n "$compat" ]; then
echo "__SYSCALL_WITH_COMPAT($nr, $native, $compat)"
elif [ -n "$native" ]; then
echo "__SYSCALL($nr, $native)"
diff --git a/security/Kconfig.hardening b/security/Kconfig.hardening
index effbf5982be1..2cff851ebfd7 100644
--- a/security/Kconfig.hardening
+++ b/security/Kconfig.hardening
@@ -255,21 +255,6 @@ config INIT_ON_FREE_DEFAULT_ON
touching "cold" memory areas. Most cases see 3-5% impact. Some
synthetic workloads have measured as high as 8%.
-config INIT_MLOCKED_ON_FREE_DEFAULT_ON
- bool "Enable mlocked memory zeroing on free"
- depends on !KMSAN
- help
- This config has the effect of setting "init_mlocked_on_free=1"
- on the kernel command line. If it is enabled, all mlocked process
- memory is zeroed when freed. This restriction to mlocked memory
- improves performance over "init_on_free" but can still be used to
- protect confidential data like key material from content exposures
- to other processes, as well as live forensics and cold boot attacks.
- Any non-mlocked memory is not cleared before it is reassigned. This
- configuration can be overwritten by setting "init_mlocked_on_free=0"
- on the command line. The "init_on_free" boot option takes
- precedence over "init_mlocked_on_free".
-
config CC_HAS_ZERO_CALL_USED_REGS
def_bool $(cc-option,-fzero-call-used-regs=used-gpr)
# https://github.com/ClangBuiltLinux/linux/issues/1766
diff --git a/security/apparmor/audit.c b/security/apparmor/audit.c
index 45beb1c5f747..6b5181c668b5 100644
--- a/security/apparmor/audit.c
+++ b/security/apparmor/audit.c
@@ -217,7 +217,7 @@ void aa_audit_rule_free(void *vrule)
}
}
-int aa_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule)
+int aa_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule, gfp_t gfp)
{
struct aa_audit_rule *rule;
@@ -230,14 +230,14 @@ int aa_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule)
return -EINVAL;
}
- rule = kzalloc(sizeof(struct aa_audit_rule), GFP_KERNEL);
+ rule = kzalloc(sizeof(struct aa_audit_rule), gfp);
if (!rule)
return -ENOMEM;
/* Currently rules are treated as coming from the root ns */
rule->label = aa_label_parse(&root_ns->unconfined->label, rulestr,
- GFP_KERNEL, true, false);
+ gfp, true, false);
if (IS_ERR(rule->label)) {
int err = PTR_ERR(rule->label);
aa_audit_rule_free(rule);
diff --git a/security/apparmor/include/audit.h b/security/apparmor/include/audit.h
index acbb03b9bd25..0c8cc86b417b 100644
--- a/security/apparmor/include/audit.h
+++ b/security/apparmor/include/audit.h
@@ -200,7 +200,7 @@ static inline int complain_error(int error)
}
void aa_audit_rule_free(void *vrule);
-int aa_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule);
+int aa_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule, gfp_t gfp);
int aa_audit_rule_known(struct audit_krule *rule);
int aa_audit_rule_match(u32 sid, u32 field, u32 op, void *vrule);
diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
index 3e568126cd48..c51e24d24d1e 100644
--- a/security/integrity/ima/ima.h
+++ b/security/integrity/ima/ima.h
@@ -546,7 +546,7 @@ static inline void ima_free_modsig(struct modsig *modsig)
#else
static inline int ima_filter_rule_init(u32 field, u32 op, char *rulestr,
- void **lsmrule)
+ void **lsmrule, gfp_t gfp)
{
return -EINVAL;
}
diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c
index abdd22007ed8..e4a79a9b2d58 100644
--- a/security/integrity/ima/ima_fs.c
+++ b/security/integrity/ima/ima_fs.c
@@ -427,8 +427,6 @@ static void __init remove_securityfs_measurement_lists(struct dentry **lists)
kfree(lists);
}
-
- securityfs_measurement_list_count = 0;
}
static int __init create_securityfs_measurement_lists(void)
@@ -625,6 +623,7 @@ out:
securityfs_remove(binary_runtime_measurements);
remove_securityfs_measurement_lists(ascii_securityfs_measurement_lists);
remove_securityfs_measurement_lists(binary_securityfs_measurement_lists);
+ securityfs_measurement_list_count = 0;
securityfs_remove(ima_symlink);
securityfs_remove(ima_dir);
diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c
index c0556907c2e6..09da8e639239 100644
--- a/security/integrity/ima/ima_policy.c
+++ b/security/integrity/ima/ima_policy.c
@@ -401,7 +401,8 @@ static void ima_free_rule(struct ima_rule_entry *entry)
kfree(entry);
}
-static struct ima_rule_entry *ima_lsm_copy_rule(struct ima_rule_entry *entry)
+static struct ima_rule_entry *ima_lsm_copy_rule(struct ima_rule_entry *entry,
+ gfp_t gfp)
{
struct ima_rule_entry *nentry;
int i;
@@ -410,7 +411,7 @@ static struct ima_rule_entry *ima_lsm_copy_rule(struct ima_rule_entry *entry)
* Immutable elements are copied over as pointers and data; only
* lsm rules can change
*/
- nentry = kmemdup(entry, sizeof(*nentry), GFP_KERNEL);
+ nentry = kmemdup(entry, sizeof(*nentry), gfp);
if (!nentry)
return NULL;
@@ -425,7 +426,8 @@ static struct ima_rule_entry *ima_lsm_copy_rule(struct ima_rule_entry *entry)
ima_filter_rule_init(nentry->lsm[i].type, Audit_equal,
nentry->lsm[i].args_p,
- &nentry->lsm[i].rule);
+ &nentry->lsm[i].rule,
+ gfp);
if (!nentry->lsm[i].rule)
pr_warn("rule for LSM \'%s\' is undefined\n",
nentry->lsm[i].args_p);
@@ -438,7 +440,7 @@ static int ima_lsm_update_rule(struct ima_rule_entry *entry)
int i;
struct ima_rule_entry *nentry;
- nentry = ima_lsm_copy_rule(entry);
+ nentry = ima_lsm_copy_rule(entry, GFP_KERNEL);
if (!nentry)
return -ENOMEM;
@@ -664,7 +666,7 @@ retry:
}
if (rc == -ESTALE && !rule_reinitialized) {
- lsm_rule = ima_lsm_copy_rule(rule);
+ lsm_rule = ima_lsm_copy_rule(rule, GFP_ATOMIC);
if (lsm_rule) {
rule_reinitialized = true;
goto retry;
@@ -1140,7 +1142,8 @@ static int ima_lsm_rule_init(struct ima_rule_entry *entry,
entry->lsm[lsm_rule].type = audit_type;
result = ima_filter_rule_init(entry->lsm[lsm_rule].type, Audit_equal,
entry->lsm[lsm_rule].args_p,
- &entry->lsm[lsm_rule].rule);
+ &entry->lsm[lsm_rule].rule,
+ GFP_KERNEL);
if (!entry->lsm[lsm_rule].rule) {
pr_warn("rule for LSM \'%s\' is undefined\n",
entry->lsm[lsm_rule].args_p);
diff --git a/security/keys/encrypted-keys/encrypted.c b/security/keys/encrypted-keys/encrypted.c
index 8af2136069d2..831cb84fd75a 100644
--- a/security/keys/encrypted-keys/encrypted.c
+++ b/security/keys/encrypted-keys/encrypted.c
@@ -1040,4 +1040,5 @@ static void __exit cleanup_encrypted(void)
late_initcall(init_encrypted);
module_exit(cleanup_encrypted);
+MODULE_DESCRIPTION("Encrypted key type");
MODULE_LICENSE("GPL");
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
index 4bc3e9398ee3..ab927a142f51 100644
--- a/security/keys/keyctl.c
+++ b/security/keys/keyctl.c
@@ -1694,7 +1694,7 @@ long keyctl_session_to_parent(void)
goto unlock;
/* cancel an already pending keyring replacement */
- oldwork = task_work_cancel(parent, key_change_session_keyring);
+ oldwork = task_work_cancel_func(parent, key_change_session_keyring);
/* the replacement session keyring is applied just prior to userspace
* restarting */
diff --git a/security/keys/trusted-keys/trusted_core.c b/security/keys/trusted-keys/trusted_core.c
index 5113aeae5628..e2d9644efde1 100644
--- a/security/keys/trusted-keys/trusted_core.c
+++ b/security/keys/trusted-keys/trusted_core.c
@@ -395,4 +395,5 @@ static void __exit cleanup_trusted(void)
late_initcall(init_trusted);
module_exit(cleanup_trusted);
+MODULE_DESCRIPTION("Trusted Key type");
MODULE_LICENSE("GPL");
diff --git a/security/landlock/fs.c b/security/landlock/fs.c
index 22d8b7c28074..7877a64cc6b8 100644
--- a/security/landlock/fs.c
+++ b/security/landlock/fs.c
@@ -1110,6 +1110,7 @@ static int current_check_refer_path(struct dentry *const old_dentry,
bool allow_parent1, allow_parent2;
access_mask_t access_request_parent1, access_request_parent2;
struct path mnt_dir;
+ struct dentry *old_parent;
layer_mask_t layer_masks_parent1[LANDLOCK_NUM_ACCESS_FS] = {},
layer_masks_parent2[LANDLOCK_NUM_ACCESS_FS] = {};
@@ -1157,9 +1158,17 @@ static int current_check_refer_path(struct dentry *const old_dentry,
mnt_dir.mnt = new_dir->mnt;
mnt_dir.dentry = new_dir->mnt->mnt_root;
+ /*
+ * old_dentry may be the root of the common mount point and
+ * !IS_ROOT(old_dentry) at the same time (e.g. with open_tree() and
+ * OPEN_TREE_CLONE). We do not need to call dget(old_parent) because
+ * we keep a reference to old_dentry.
+ */
+ old_parent = (old_dentry == mnt_dir.dentry) ? old_dentry :
+ old_dentry->d_parent;
+
/* new_dir->dentry is equal to new_dentry->d_parent */
- allow_parent1 = collect_domain_accesses(dom, mnt_dir.dentry,
- old_dentry->d_parent,
+ allow_parent1 = collect_domain_accesses(dom, mnt_dir.dentry, old_parent,
&layer_masks_parent1);
allow_parent2 = collect_domain_accesses(
dom, mnt_dir.dentry, new_dir->dentry, &layer_masks_parent2);
diff --git a/security/security.c b/security/security.c
index e5da848c50b9..8cee5b6c6e6d 100644
--- a/security/security.c
+++ b/security/security.c
@@ -2278,7 +2278,20 @@ int security_inode_getattr(const struct path *path)
* @size: size of xattr value
* @flags: flags
*
- * Check permission before setting the extended attributes.
+ * This hook performs the desired permission checks before setting the extended
+ * attributes (xattrs) on @dentry. It is important to note that we have some
+ * additional logic before the main LSM implementation calls to detect if we
+ * need to perform an additional capability check at the LSM layer.
+ *
+ * Normally we enforce a capability check prior to executing the various LSM
+ * hook implementations, but if a LSM wants to avoid this capability check,
+ * it can register a 'inode_xattr_skipcap' hook and return a value of 1 for
+ * xattrs that it wants to avoid the capability check, leaving the LSM fully
+ * responsible for enforcing the access control for the specific xattr. If all
+ * of the enabled LSMs refrain from registering a 'inode_xattr_skipcap' hook,
+ * or return a 0 (the default return value), the capability check is still
+ * performed. If no 'inode_xattr_skipcap' hooks are registered the capability
+ * check is performed.
*
* Return: Returns 0 if permission is granted.
*/
@@ -2286,20 +2299,20 @@ int security_inode_setxattr(struct mnt_idmap *idmap,
struct dentry *dentry, const char *name,
const void *value, size_t size, int flags)
{
- int ret;
+ int rc;
if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
return 0;
- /*
- * SELinux and Smack integrate the cap call,
- * so assume that all LSMs supplying this call do so.
- */
- ret = call_int_hook(inode_setxattr, idmap, dentry, name, value, size,
- flags);
- if (ret == 1)
- ret = cap_inode_setxattr(dentry, name, value, size, flags);
- return ret;
+ /* enforce the capability checks at the lsm layer, if needed */
+ if (!call_int_hook(inode_xattr_skipcap, name)) {
+ rc = cap_inode_setxattr(dentry, name, value, size, flags);
+ if (rc)
+ return rc;
+ }
+
+ return call_int_hook(inode_setxattr, idmap, dentry, name, value, size,
+ flags);
}
/**
@@ -2452,26 +2465,39 @@ int security_inode_listxattr(struct dentry *dentry)
* @dentry: file
* @name: xattr name
*
- * Check permission before removing the extended attribute identified by @name
- * for @dentry.
+ * This hook performs the desired permission checks before setting the extended
+ * attributes (xattrs) on @dentry. It is important to note that we have some
+ * additional logic before the main LSM implementation calls to detect if we
+ * need to perform an additional capability check at the LSM layer.
+ *
+ * Normally we enforce a capability check prior to executing the various LSM
+ * hook implementations, but if a LSM wants to avoid this capability check,
+ * it can register a 'inode_xattr_skipcap' hook and return a value of 1 for
+ * xattrs that it wants to avoid the capability check, leaving the LSM fully
+ * responsible for enforcing the access control for the specific xattr. If all
+ * of the enabled LSMs refrain from registering a 'inode_xattr_skipcap' hook,
+ * or return a 0 (the default return value), the capability check is still
+ * performed. If no 'inode_xattr_skipcap' hooks are registered the capability
+ * check is performed.
*
* Return: Returns 0 if permission is granted.
*/
int security_inode_removexattr(struct mnt_idmap *idmap,
struct dentry *dentry, const char *name)
{
- int ret;
+ int rc;
if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
return 0;
- /*
- * SELinux and Smack integrate the cap call,
- * so assume that all LSMs supplying this call do so.
- */
- ret = call_int_hook(inode_removexattr, idmap, dentry, name);
- if (ret == 1)
- ret = cap_inode_removexattr(idmap, dentry, name);
- return ret;
+
+ /* enforce the capability checks at the lsm layer, if needed */
+ if (!call_int_hook(inode_xattr_skipcap, name)) {
+ rc = cap_inode_removexattr(idmap, dentry, name);
+ if (rc)
+ return rc;
+ }
+
+ return call_int_hook(inode_removexattr, idmap, dentry, name);
}
/**
@@ -5332,15 +5358,17 @@ void security_key_post_create_or_update(struct key *keyring, struct key *key,
* @op: rule operator
* @rulestr: rule context
* @lsmrule: receive buffer for audit rule struct
+ * @gfp: GFP flag used for kmalloc
*
* Allocate and initialize an LSM audit rule structure.
*
* Return: Return 0 if @lsmrule has been successfully set, -EINVAL in case of
* an invalid rule.
*/
-int security_audit_rule_init(u32 field, u32 op, char *rulestr, void **lsmrule)
+int security_audit_rule_init(u32 field, u32 op, char *rulestr, void **lsmrule,
+ gfp_t gfp)
{
- return call_int_hook(audit_rule_init, field, op, rulestr, lsmrule);
+ return call_int_hook(audit_rule_init, field, op, rulestr, lsmrule, gfp);
}
/**
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 7eed331e90f0..55c78c318ccd 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -3177,6 +3177,23 @@ static bool has_cap_mac_admin(bool audit)
return true;
}
+/**
+ * selinux_inode_xattr_skipcap - Skip the xattr capability checks?
+ * @name: name of the xattr
+ *
+ * Returns 1 to indicate that SELinux "owns" the access control rights to xattrs
+ * named @name; the LSM layer should avoid enforcing any traditional
+ * capability based access controls on this xattr. Returns 0 to indicate that
+ * SELinux does not "own" the access control rights to xattrs named @name and is
+ * deferring to the LSM layer for further access controls, including capability
+ * based controls.
+ */
+static int selinux_inode_xattr_skipcap(const char *name)
+{
+ /* require capability check if not a selinux xattr */
+ return !strcmp(name, XATTR_NAME_SELINUX);
+}
+
static int selinux_inode_setxattr(struct mnt_idmap *idmap,
struct dentry *dentry, const char *name,
const void *value, size_t size, int flags)
@@ -3188,15 +3205,9 @@ static int selinux_inode_setxattr(struct mnt_idmap *idmap,
u32 newsid, sid = current_sid();
int rc = 0;
- if (strcmp(name, XATTR_NAME_SELINUX)) {
- rc = cap_inode_setxattr(dentry, name, value, size, flags);
- if (rc)
- return rc;
-
- /* Not an attribute we recognize, so just check the
- ordinary setattr permission. */
+ /* if not a selinux xattr, only check the ordinary setattr perm */
+ if (strcmp(name, XATTR_NAME_SELINUX))
return dentry_has_perm(current_cred(), dentry, FILE__SETATTR);
- }
if (!selinux_initialized())
return (inode_owner_or_capable(idmap, inode) ? 0 : -EPERM);
@@ -3345,15 +3356,9 @@ static int selinux_inode_listxattr(struct dentry *dentry)
static int selinux_inode_removexattr(struct mnt_idmap *idmap,
struct dentry *dentry, const char *name)
{
- if (strcmp(name, XATTR_NAME_SELINUX)) {
- int rc = cap_inode_removexattr(idmap, dentry, name);
- if (rc)
- return rc;
-
- /* Not an attribute we recognize, so just check the
- ordinary setattr permission. */
+ /* if not a selinux xattr, only check the ordinary setattr perm */
+ if (strcmp(name, XATTR_NAME_SELINUX))
return dentry_has_perm(current_cred(), dentry, FILE__SETATTR);
- }
if (!selinux_initialized())
return 0;
@@ -7175,6 +7180,7 @@ static struct security_hook_list selinux_hooks[] __ro_after_init = {
LSM_HOOK_INIT(inode_permission, selinux_inode_permission),
LSM_HOOK_INIT(inode_setattr, selinux_inode_setattr),
LSM_HOOK_INIT(inode_getattr, selinux_inode_getattr),
+ LSM_HOOK_INIT(inode_xattr_skipcap, selinux_inode_xattr_skipcap),
LSM_HOOK_INIT(inode_setxattr, selinux_inode_setxattr),
LSM_HOOK_INIT(inode_post_setxattr, selinux_inode_post_setxattr),
LSM_HOOK_INIT(inode_getxattr, selinux_inode_getxattr),
diff --git a/security/selinux/include/audit.h b/security/selinux/include/audit.h
index 52aca71210b4..29c7d4c86f6d 100644
--- a/security/selinux/include/audit.h
+++ b/security/selinux/include/audit.h
@@ -21,12 +21,14 @@
* @op: the operator the rule uses
* @rulestr: the text "target" of the rule
* @rule: pointer to the new rule structure returned via this
+ * @gfp: GFP flag used for kmalloc
*
* Returns 0 if successful, -errno if not. On success, the rule structure
* will be allocated internally. The caller must free this structure with
* selinux_audit_rule_free() after use.
*/
-int selinux_audit_rule_init(u32 field, u32 op, char *rulestr, void **rule);
+int selinux_audit_rule_init(u32 field, u32 op, char *rulestr, void **rule,
+ gfp_t gfp);
/**
* selinux_audit_rule_free - free an selinux audit rule structure.
diff --git a/security/selinux/ss/ebitmap.h b/security/selinux/ss/ebitmap.h
index 24d7d8b3cda3..ba2ac3da1153 100644
--- a/security/selinux/ss/ebitmap.h
+++ b/security/selinux/ss/ebitmap.h
@@ -29,7 +29,7 @@
sizeof(unsigned long))
#define EBITMAP_UNIT_SIZE BITS_PER_LONG
#define EBITMAP_SIZE (EBITMAP_UNIT_NUMS * EBITMAP_UNIT_SIZE)
-#define EBITMAP_BIT 1ULL
+#define EBITMAP_BIT 1UL
#define EBITMAP_SHIFT_UNIT_SIZE(x) \
(((x) >> EBITMAP_UNIT_SIZE / 2) >> EBITMAP_UNIT_SIZE / 2)
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index f20e1968b7f7..e33e55384b75 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -3507,7 +3507,8 @@ void selinux_audit_rule_free(void *vrule)
}
}
-int selinux_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule)
+int selinux_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule,
+ gfp_t gfp)
{
struct selinux_state *state = &selinux_state;
struct selinux_policy *policy;
@@ -3548,7 +3549,7 @@ int selinux_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule)
return -EINVAL;
}
- tmprule = kzalloc(sizeof(struct selinux_audit_rule), GFP_KERNEL);
+ tmprule = kzalloc(sizeof(struct selinux_audit_rule), gfp);
if (!tmprule)
return -ENOMEM;
context_init(&tmprule->au_ctxt);
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index 70ba2841e181..4164699cd4f6 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -1283,6 +1283,33 @@ static int smack_inode_getattr(const struct path *path)
}
/**
+ * smack_inode_xattr_skipcap - Skip the xattr capability checks?
+ * @name: name of the xattr
+ *
+ * Returns 1 to indicate that Smack "owns" the access control rights to xattrs
+ * named @name; the LSM layer should avoid enforcing any traditional
+ * capability based access controls on this xattr. Returns 0 to indicate that
+ * Smack does not "own" the access control rights to xattrs named @name and is
+ * deferring to the LSM layer for further access controls, including capability
+ * based controls.
+ */
+static int smack_inode_xattr_skipcap(const char *name)
+{
+ if (strncmp(name, XATTR_SMACK_SUFFIX, strlen(XATTR_SMACK_SUFFIX)))
+ return 0;
+
+ if (strcmp(name, XATTR_NAME_SMACK) == 0 ||
+ strcmp(name, XATTR_NAME_SMACKIPIN) == 0 ||
+ strcmp(name, XATTR_NAME_SMACKIPOUT) == 0 ||
+ strcmp(name, XATTR_NAME_SMACKEXEC) == 0 ||
+ strcmp(name, XATTR_NAME_SMACKMMAP) == 0 ||
+ strcmp(name, XATTR_NAME_SMACKTRANSMUTE) == 0)
+ return 1;
+
+ return 0;
+}
+
+/**
* smack_inode_setxattr - Smack check for setting xattrs
* @idmap: idmap of the mount
* @dentry: the object
@@ -1325,8 +1352,7 @@ static int smack_inode_setxattr(struct mnt_idmap *idmap,
size != TRANS_TRUE_SIZE ||
strncmp(value, TRANS_TRUE, TRANS_TRUE_SIZE) != 0)
rc = -EINVAL;
- } else
- rc = cap_inode_setxattr(dentry, name, value, size, flags);
+ }
if (check_priv && !smack_privileged(CAP_MAC_ADMIN))
rc = -EPERM;
@@ -1435,8 +1461,7 @@ static int smack_inode_removexattr(struct mnt_idmap *idmap,
strcmp(name, XATTR_NAME_SMACKMMAP) == 0) {
if (!smack_privileged(CAP_MAC_ADMIN))
rc = -EPERM;
- } else
- rc = cap_inode_removexattr(idmap, dentry, name);
+ }
if (rc != 0)
return rc;
@@ -3846,12 +3871,18 @@ static int smack_unix_stream_connect(struct sock *sock,
}
}
- /*
- * Cross reference the peer labels for SO_PEERSEC.
- */
if (rc == 0) {
+ /*
+ * Cross reference the peer labels for SO_PEERSEC.
+ */
nsp->smk_packet = ssp->smk_out;
ssp->smk_packet = osp->smk_out;
+
+ /*
+ * new/child/established socket must inherit listening socket labels
+ */
+ nsp->smk_out = osp->smk_out;
+ nsp->smk_in = osp->smk_in;
}
return rc;
@@ -4431,7 +4462,7 @@ static int smack_inet_conn_request(const struct sock *sk, struct sk_buff *skb,
rcu_read_unlock();
if (hskp == NULL)
- rc = netlbl_req_setattr(req, &skp->smk_netlabel);
+ rc = netlbl_req_setattr(req, &ssp->smk_out->smk_netlabel);
else
netlbl_req_delattr(req);
@@ -4693,11 +4724,13 @@ static int smack_post_notification(const struct cred *w_cred,
* @op: required testing operator (=, !=, >, <, ...)
* @rulestr: smack label to be audited
* @vrule: pointer to save our own audit rule representation
+ * @gfp: type of the memory for the allocation
*
* Prepare to audit cases where (@field @op @rulestr) is true.
* The label to be audited is created if necessay.
*/
-static int smack_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule)
+static int smack_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule,
+ gfp_t gfp)
{
struct smack_known *skp;
char **rule = (char **)vrule;
@@ -5051,6 +5084,7 @@ static struct security_hook_list smack_hooks[] __ro_after_init = {
LSM_HOOK_INIT(inode_permission, smack_inode_permission),
LSM_HOOK_INIT(inode_setattr, smack_inode_setattr),
LSM_HOOK_INIT(inode_getattr, smack_inode_getattr),
+ LSM_HOOK_INIT(inode_xattr_skipcap, smack_inode_xattr_skipcap),
LSM_HOOK_INIT(inode_setxattr, smack_inode_setxattr),
LSM_HOOK_INIT(inode_post_setxattr, smack_inode_post_setxattr),
LSM_HOOK_INIT(inode_getxattr, smack_inode_getxattr),
diff --git a/security/tomoyo/Kconfig b/security/tomoyo/Kconfig
index fad75be5f381..1e0dd1a6d0b0 100644
--- a/security/tomoyo/Kconfig
+++ b/security/tomoyo/Kconfig
@@ -10,7 +10,7 @@ config SECURITY_TOMOYO
help
This selects TOMOYO Linux, pathname-based access control.
Required userspace tools and further information may be
- found at <https://tomoyo.osdn.jp/>.
+ found at <https://tomoyo.sourceforge.net/>.
If you are unsure how to answer this question, answer N.
config SECURITY_TOMOYO_MAX_ACCEPT_ENTRY
diff --git a/security/tomoyo/common.c b/security/tomoyo/common.c
index ea3140d510ec..5c7b059a332a 100644
--- a/security/tomoyo/common.c
+++ b/security/tomoyo/common.c
@@ -2787,7 +2787,7 @@ void tomoyo_check_profile(void)
else
continue;
pr_err("Userland tools for TOMOYO 2.6 must be installed and policy must be initialized.\n");
- pr_err("Please see https://tomoyo.osdn.jp/2.6/ for more information.\n");
+ pr_err("Please see https://tomoyo.sourceforge.net/2.6/ for more information.\n");
panic("STOP!");
}
tomoyo_read_unlock(idx);
diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c
index b6684a074a59..39944a859ff6 100644
--- a/security/yama/yama_lsm.c
+++ b/security/yama/yama_lsm.c
@@ -111,6 +111,7 @@ static void report_access(const char *access, struct task_struct *target,
/**
* yama_relation_cleanup - remove invalid entries from the relation list
+ * @work: unused
*
*/
static void yama_relation_cleanup(struct work_struct *work)
diff --git a/sound/core/init.c b/sound/core/init.c
index 4e52bbe32786..b9b708cf980d 100644
--- a/sound/core/init.c
+++ b/sound/core/init.c
@@ -537,6 +537,11 @@ void snd_card_disconnect(struct snd_card *card)
synchronize_irq(card->sync_irq);
snd_info_card_disconnect(card);
+#ifdef CONFIG_SND_DEBUG
+ debugfs_remove(card->debugfs_root);
+ card->debugfs_root = NULL;
+#endif
+
if (card->registered) {
device_del(&card->card_dev);
card->registered = false;
@@ -586,10 +591,6 @@ static int snd_card_do_free(struct snd_card *card)
dev_warn(card->dev, "unable to free card info\n");
/* Not fatal error */
}
-#ifdef CONFIG_SND_DEBUG
- debugfs_remove(card->debugfs_root);
- card->debugfs_root = NULL;
-#endif
if (card->release_completion)
complete(card->release_completion);
if (!card->managed)
diff --git a/sound/core/jack.c b/sound/core/jack.c
index e08b2c4fbd1a..e4bcecdf89b7 100644
--- a/sound/core/jack.c
+++ b/sound/core/jack.c
@@ -37,11 +37,15 @@ static const int jack_switch_types[SND_JACK_SWITCH_TYPES] = {
};
#endif /* CONFIG_SND_JACK_INPUT_DEV */
+static void snd_jack_remove_debugfs(struct snd_jack *jack);
+
static int snd_jack_dev_disconnect(struct snd_device *device)
{
-#ifdef CONFIG_SND_JACK_INPUT_DEV
struct snd_jack *jack = device->device_data;
+ snd_jack_remove_debugfs(jack);
+
+#ifdef CONFIG_SND_JACK_INPUT_DEV
guard(mutex)(&jack->input_dev_lock);
if (!jack->input_dev)
return 0;
@@ -381,10 +385,14 @@ static int snd_jack_debugfs_add_inject_node(struct snd_jack *jack,
return 0;
}
-static void snd_jack_debugfs_clear_inject_node(struct snd_jack_kctl *jack_kctl)
+static void snd_jack_remove_debugfs(struct snd_jack *jack)
{
- debugfs_remove(jack_kctl->jack_debugfs_root);
- jack_kctl->jack_debugfs_root = NULL;
+ struct snd_jack_kctl *jack_kctl;
+
+ list_for_each_entry(jack_kctl, &jack->kctl_list, list) {
+ debugfs_remove(jack_kctl->jack_debugfs_root);
+ jack_kctl->jack_debugfs_root = NULL;
+ }
}
#else /* CONFIG_SND_JACK_INJECTION_DEBUG */
static int snd_jack_debugfs_add_inject_node(struct snd_jack *jack,
@@ -393,7 +401,7 @@ static int snd_jack_debugfs_add_inject_node(struct snd_jack *jack,
return 0;
}
-static void snd_jack_debugfs_clear_inject_node(struct snd_jack_kctl *jack_kctl)
+static void snd_jack_remove_debugfs(struct snd_jack *jack)
{
}
#endif /* CONFIG_SND_JACK_INJECTION_DEBUG */
@@ -404,7 +412,6 @@ static void snd_jack_kctl_private_free(struct snd_kcontrol *kctl)
jack_kctl = kctl->private_data;
if (jack_kctl) {
- snd_jack_debugfs_clear_inject_node(jack_kctl);
list_del(&jack_kctl->list);
kfree(jack_kctl);
}
@@ -497,8 +504,8 @@ int snd_jack_new(struct snd_card *card, const char *id, int type,
.dev_free = snd_jack_dev_free,
#ifdef CONFIG_SND_JACK_INPUT_DEV
.dev_register = snd_jack_dev_register,
- .dev_disconnect = snd_jack_dev_disconnect,
#endif /* CONFIG_SND_JACK_INPUT_DEV */
+ .dev_disconnect = snd_jack_dev_disconnect,
};
if (initial_kctl) {
diff --git a/sound/core/pcm_dmaengine.c b/sound/core/pcm_dmaengine.c
index 12aa1cef11a1..cc5db93b9132 100644
--- a/sound/core/pcm_dmaengine.c
+++ b/sound/core/pcm_dmaengine.c
@@ -349,6 +349,16 @@ int snd_dmaengine_pcm_open_request_chan(struct snd_pcm_substream *substream,
}
EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_open_request_chan);
+int snd_dmaengine_pcm_sync_stop(struct snd_pcm_substream *substream)
+{
+ struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
+
+ dmaengine_synchronize(prtd->dma_chan);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_sync_stop);
+
/**
* snd_dmaengine_pcm_close - Close a dmaengine based PCM substream
* @substream: PCM substream
@@ -358,6 +368,12 @@ EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_open_request_chan);
int snd_dmaengine_pcm_close(struct snd_pcm_substream *substream)
{
struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
+ struct dma_tx_state state;
+ enum dma_status status;
+
+ status = dmaengine_tx_status(prtd->dma_chan, prtd->cookie, &state);
+ if (status == DMA_PAUSED)
+ dmaengine_terminate_async(prtd->dma_chan);
dmaengine_synchronize(prtd->dma_chan);
kfree(prtd);
@@ -378,6 +394,12 @@ EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_close);
int snd_dmaengine_pcm_close_release_chan(struct snd_pcm_substream *substream)
{
struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
+ struct dma_tx_state state;
+ enum dma_status status;
+
+ status = dmaengine_tx_status(prtd->dma_chan, prtd->cookie, &state);
+ if (status == DMA_PAUSED)
+ dmaengine_terminate_async(prtd->dma_chan);
dmaengine_synchronize(prtd->dma_chan);
dma_release_channel(prtd->dma_chan);
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index 521ba56392a0..c152ccf32214 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -1775,6 +1775,8 @@ static int snd_pcm_pre_resume(struct snd_pcm_substream *substream,
snd_pcm_state_t state)
{
struct snd_pcm_runtime *runtime = substream->runtime;
+ if (runtime->state != SNDRV_PCM_STATE_SUSPENDED)
+ return -EBADFD;
if (!(runtime->info & SNDRV_PCM_INFO_RESUME))
return -ENOSYS;
runtime->trigger_master = substream;
diff --git a/sound/core/seq/seq_ump_convert.c b/sound/core/seq/seq_ump_convert.c
index ee6ac649df83..e90b27a135e6 100644
--- a/sound/core/seq/seq_ump_convert.c
+++ b/sound/core/seq/seq_ump_convert.c
@@ -157,7 +157,7 @@ static void ump_system_to_one_param_ev(const union snd_ump_midi1_msg *val,
static void ump_system_to_songpos_ev(const union snd_ump_midi1_msg *val,
struct snd_seq_event *ev)
{
- ev->data.control.value = (val->system.parm1 << 7) | val->system.parm2;
+ ev->data.control.value = (val->system.parm2 << 7) | val->system.parm1;
}
/* Encoders for 0xf0 - 0xff */
@@ -368,6 +368,7 @@ static int cvt_ump_midi1_to_midi2(struct snd_seq_client *dest,
struct snd_seq_ump_event ev_cvt;
const union snd_ump_midi1_msg *midi1 = (const union snd_ump_midi1_msg *)event->ump;
union snd_ump_midi2_msg *midi2 = (union snd_ump_midi2_msg *)ev_cvt.ump;
+ struct snd_seq_ump_midi2_bank *cc;
ev_cvt = *event;
memset(&ev_cvt.ump, 0, sizeof(ev_cvt.ump));
@@ -387,11 +388,29 @@ static int cvt_ump_midi1_to_midi2(struct snd_seq_client *dest,
midi2->paf.data = upscale_7_to_32bit(midi1->paf.data);
break;
case UMP_MSG_STATUS_CC:
+ cc = &dest_port->midi2_bank[midi1->note.channel];
+ switch (midi1->cc.index) {
+ case UMP_CC_BANK_SELECT:
+ cc->bank_set = 1;
+ cc->cc_bank_msb = midi1->cc.data;
+ return 0; // skip
+ case UMP_CC_BANK_SELECT_LSB:
+ cc->bank_set = 1;
+ cc->cc_bank_lsb = midi1->cc.data;
+ return 0; // skip
+ }
midi2->cc.index = midi1->cc.index;
midi2->cc.data = upscale_7_to_32bit(midi1->cc.data);
break;
case UMP_MSG_STATUS_PROGRAM:
midi2->pg.program = midi1->pg.program;
+ cc = &dest_port->midi2_bank[midi1->note.channel];
+ if (cc->bank_set) {
+ midi2->pg.bank_valid = 1;
+ midi2->pg.bank_msb = cc->cc_bank_msb;
+ midi2->pg.bank_lsb = cc->cc_bank_lsb;
+ cc->bank_set = 0;
+ }
break;
case UMP_MSG_STATUS_CHANNEL_PRESSURE:
midi2->caf.data = upscale_7_to_32bit(midi1->caf.data);
@@ -419,6 +438,7 @@ static int cvt_ump_midi2_to_midi1(struct snd_seq_client *dest,
struct snd_seq_ump_event ev_cvt;
union snd_ump_midi1_msg *midi1 = (union snd_ump_midi1_msg *)ev_cvt.ump;
const union snd_ump_midi2_msg *midi2 = (const union snd_ump_midi2_msg *)event->ump;
+ int err;
u16 v;
ev_cvt = *event;
@@ -443,6 +463,24 @@ static int cvt_ump_midi2_to_midi1(struct snd_seq_client *dest,
midi1->cc.data = downscale_32_to_7bit(midi2->cc.data);
break;
case UMP_MSG_STATUS_PROGRAM:
+ if (midi2->pg.bank_valid) {
+ midi1->cc.status = UMP_MSG_STATUS_CC;
+ midi1->cc.index = UMP_CC_BANK_SELECT;
+ midi1->cc.data = midi2->pg.bank_msb;
+ err = __snd_seq_deliver_single_event(dest, dest_port,
+ (struct snd_seq_event *)&ev_cvt,
+ atomic, hop);
+ if (err < 0)
+ return err;
+ midi1->cc.index = UMP_CC_BANK_SELECT_LSB;
+ midi1->cc.data = midi2->pg.bank_lsb;
+ err = __snd_seq_deliver_single_event(dest, dest_port,
+ (struct snd_seq_event *)&ev_cvt,
+ atomic, hop);
+ if (err < 0)
+ return err;
+ midi1->note.status = midi2->note.status;
+ }
midi1->pg.program = midi2->pg.program;
break;
case UMP_MSG_STATUS_CHANNEL_PRESSURE:
@@ -691,6 +729,7 @@ static int system_ev_to_ump_midi1(const struct snd_seq_event *event,
union snd_ump_midi1_msg *data,
unsigned char status)
{
+ data->system.type = UMP_MSG_TYPE_SYSTEM; // override
data->system.status = status;
return 1;
}
@@ -701,6 +740,7 @@ static int system_1p_ev_to_ump_midi1(const struct snd_seq_event *event,
union snd_ump_midi1_msg *data,
unsigned char status)
{
+ data->system.type = UMP_MSG_TYPE_SYSTEM; // override
data->system.status = status;
data->system.parm1 = event->data.control.value & 0x7f;
return 1;
@@ -712,9 +752,10 @@ static int system_2p_ev_to_ump_midi1(const struct snd_seq_event *event,
union snd_ump_midi1_msg *data,
unsigned char status)
{
+ data->system.type = UMP_MSG_TYPE_SYSTEM; // override
data->system.status = status;
- data->system.parm1 = (event->data.control.value >> 7) & 0x7f;
- data->system.parm2 = event->data.control.value & 0x7f;
+ data->system.parm1 = event->data.control.value & 0x7f;
+ data->system.parm2 = (event->data.control.value >> 7) & 0x7f;
return 1;
}
@@ -750,7 +791,8 @@ static int paf_ev_to_ump_midi2(const struct snd_seq_event *event,
/* set up the MIDI2 RPN/NRPN packet data from the parsed info */
static void fill_rpn(struct snd_seq_ump_midi2_bank *cc,
- union snd_ump_midi2_msg *data)
+ union snd_ump_midi2_msg *data,
+ unsigned char channel)
{
if (cc->rpn_set) {
data->rpn.status = UMP_MSG_STATUS_RPN;
@@ -767,6 +809,7 @@ static void fill_rpn(struct snd_seq_ump_midi2_bank *cc,
}
data->rpn.data = upscale_14_to_32bit((cc->cc_data_msb << 7) |
cc->cc_data_lsb);
+ data->rpn.channel = channel;
cc->cc_data_msb = cc->cc_data_lsb = 0;
}
@@ -814,7 +857,7 @@ static int cc_ev_to_ump_midi2(const struct snd_seq_event *event,
cc->cc_data_lsb = val;
if (!(cc->rpn_set || cc->nrpn_set))
return 0; // skip
- fill_rpn(cc, data);
+ fill_rpn(cc, data, channel);
return 1;
}
@@ -854,7 +897,6 @@ static int pgm_ev_to_ump_midi2(const struct snd_seq_event *event,
data->pg.bank_msb = cc->cc_bank_msb;
data->pg.bank_lsb = cc->cc_bank_lsb;
cc->bank_set = 0;
- cc->cc_bank_msb = cc->cc_bank_lsb = 0;
}
return 1;
}
@@ -917,7 +959,7 @@ static int ctrl14_ev_to_ump_midi2(const struct snd_seq_event *event,
cc->cc_data_lsb = lsb;
if (!(cc->rpn_set || cc->nrpn_set))
return 0; // skip
- fill_rpn(cc, data);
+ fill_rpn(cc, data, channel);
return 1;
}
@@ -978,7 +1020,7 @@ static int system_2p_ev_to_ump_midi2(const struct snd_seq_event *event,
union snd_ump_midi2_msg *data,
unsigned char status)
{
- return system_1p_ev_to_ump_midi1(event, dest_port,
+ return system_2p_ev_to_ump_midi1(event, dest_port,
(union snd_ump_midi1_msg *)data,
status);
}
@@ -1035,6 +1077,8 @@ static const struct seq_ev_to_ump seq_ev_ump_encoders[] = {
system_ev_to_ump_midi1, system_ev_to_ump_midi2 },
{ SNDRV_SEQ_EVENT_SENSING, UMP_SYSTEM_STATUS_ACTIVE_SENSING,
system_ev_to_ump_midi1, system_ev_to_ump_midi2 },
+ { SNDRV_SEQ_EVENT_RESET, UMP_SYSTEM_STATUS_RESET,
+ system_ev_to_ump_midi1, system_ev_to_ump_midi2 },
};
static const struct seq_ev_to_ump *find_ump_encoder(int type)
diff --git a/sound/core/ump.c b/sound/core/ump.c
index fd6a68a54278..3f61220c23b4 100644
--- a/sound/core/ump.c
+++ b/sound/core/ump.c
@@ -685,10 +685,17 @@ static void seq_notify_protocol(struct snd_ump_endpoint *ump)
*/
int snd_ump_switch_protocol(struct snd_ump_endpoint *ump, unsigned int protocol)
{
+ unsigned int type;
+
protocol &= ump->info.protocol_caps;
if (protocol == ump->info.protocol)
return 0;
+ type = protocol & SNDRV_UMP_EP_INFO_PROTO_MIDI_MASK;
+ if (type != SNDRV_UMP_EP_INFO_PROTO_MIDI1 &&
+ type != SNDRV_UMP_EP_INFO_PROTO_MIDI2)
+ return 0;
+
ump->info.protocol = protocol;
ump_dbg(ump, "New protocol = %x (caps = %x)\n",
protocol, ump->info.protocol_caps);
@@ -960,6 +967,14 @@ int snd_ump_parse_endpoint(struct snd_ump_endpoint *ump)
if (err < 0)
ump_dbg(ump, "Unable to get UMP EP stream config\n");
+ /* If no protocol is set by some reason, assume the valid one */
+ if (!(ump->info.protocol & SNDRV_UMP_EP_INFO_PROTO_MIDI_MASK)) {
+ if (ump->info.protocol_caps & SNDRV_UMP_EP_INFO_PROTO_MIDI2)
+ ump->info.protocol |= SNDRV_UMP_EP_INFO_PROTO_MIDI2;
+ else if (ump->info.protocol_caps & SNDRV_UMP_EP_INFO_PROTO_MIDI1)
+ ump->info.protocol |= SNDRV_UMP_EP_INFO_PROTO_MIDI1;
+ }
+
/* Query and create blocks from Function Blocks */
for (blk = 0; blk < ump->info.num_blocks; blk++) {
err = create_block_from_fb_info(ump, blk);
diff --git a/sound/core/ump_convert.c b/sound/core/ump_convert.c
index de04799fdb69..f67c44c83fde 100644
--- a/sound/core/ump_convert.c
+++ b/sound/core/ump_convert.c
@@ -404,7 +404,6 @@ static int cvt_legacy_cmd_to_ump(struct ump_cvt_to_ump *cvt,
midi2->pg.bank_msb = cc->cc_bank_msb;
midi2->pg.bank_lsb = cc->cc_bank_lsb;
cc->bank_set = 0;
- cc->cc_bank_msb = cc->cc_bank_lsb = 0;
}
break;
case UMP_MSG_STATUS_CHANNEL_PRESSURE:
diff --git a/sound/hda/intel-dsp-config.c b/sound/hda/intel-dsp-config.c
index cfdb1b73c88c..478d2b50c571 100644
--- a/sound/hda/intel-dsp-config.c
+++ b/sound/hda/intel-dsp-config.c
@@ -18,7 +18,7 @@
static int dsp_driver;
module_param(dsp_driver, int, 0444);
-MODULE_PARM_DESC(dsp_driver, "Force the DSP driver for Intel DSP (0=auto, 1=legacy, 2=SST, 3=SOF)");
+MODULE_PARM_DESC(dsp_driver, "Force the DSP driver for Intel DSP (0=auto, 1=legacy, 2=SST, 3=SOF, 4=AVS)");
#define FLAG_SST BIT(0)
#define FLAG_SOF BIT(1)
@@ -668,7 +668,7 @@ int snd_intel_dsp_driver_probe(struct pci_dev *pci)
return SND_INTEL_DSP_DRIVER_LEGACY;
}
- dev_info(&pci->dev, "DSP detected with PCI class/subclass/prog-if info 0x%06x\n", pci->class);
+ dev_dbg(&pci->dev, "DSP detected with PCI class/subclass/prog-if info 0x%06x\n", pci->class);
/* find the configuration for the specific device */
cfg = snd_intel_dsp_find_config(pci, config_table, ARRAY_SIZE(config_table));
@@ -678,12 +678,12 @@ int snd_intel_dsp_driver_probe(struct pci_dev *pci)
if (cfg->flags & FLAG_SOF) {
if (cfg->flags & FLAG_SOF_ONLY_IF_SOUNDWIRE &&
snd_intel_dsp_check_soundwire(pci) > 0) {
- dev_info(&pci->dev, "SoundWire enabled on CannonLake+ platform, using SOF driver\n");
+ dev_info_once(&pci->dev, "SoundWire enabled on CannonLake+ platform, using SOF driver\n");
return SND_INTEL_DSP_DRIVER_SOF;
}
if (cfg->flags & FLAG_SOF_ONLY_IF_DMIC &&
snd_intel_dsp_check_dmic(pci)) {
- dev_info(&pci->dev, "Digital mics found on Skylake+ platform, using SOF driver\n");
+ dev_info_once(&pci->dev, "Digital mics found on Skylake+ platform, using SOF driver\n");
return SND_INTEL_DSP_DRIVER_SOF;
}
if (!(cfg->flags & FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE))
@@ -694,7 +694,7 @@ int snd_intel_dsp_driver_probe(struct pci_dev *pci)
if (cfg->flags & FLAG_SST) {
if (cfg->flags & FLAG_SST_ONLY_IF_DMIC) {
if (snd_intel_dsp_check_dmic(pci)) {
- dev_info(&pci->dev, "Digital mics found on Skylake+ platform, using SST driver\n");
+ dev_info_once(&pci->dev, "Digital mics found on Skylake+ platform, using SST driver\n");
return SND_INTEL_DSP_DRIVER_SST;
}
} else {
diff --git a/sound/oss/dmasound/dmasound_core.c b/sound/oss/dmasound/dmasound_core.c
index 164335d3c200..4b1baf4dd50e 100644
--- a/sound/oss/dmasound/dmasound_core.c
+++ b/sound/oss/dmasound/dmasound_core.c
@@ -204,6 +204,7 @@ module_param(numWriteBufs, int, 0);
static unsigned int writeBufSize = DEFAULT_BUFF_SIZE ; /* in bytes */
module_param(writeBufSize, int, 0);
+MODULE_DESCRIPTION("Atari/Amiga/Q40 core DMA sound driver");
MODULE_LICENSE("GPL");
static int sq_unit = -1;
diff --git a/sound/pci/hda/Kconfig b/sound/pci/hda/Kconfig
index 0da625533afc..a3cf0725fc43 100644
--- a/sound/pci/hda/Kconfig
+++ b/sound/pci/hda/Kconfig
@@ -162,6 +162,7 @@ config SND_HDA_SCODEC_CS35L56_I2C
depends on ACPI || COMPILE_TEST
depends on SND_SOC
select FW_CS_DSP
+ imply SERIAL_MULTI_INSTANTIATE
select SND_HDA_GENERIC
select SND_SOC_CS35L56_SHARED
select SND_HDA_SCODEC_CS35L56
@@ -178,6 +179,7 @@ config SND_HDA_SCODEC_CS35L56_SPI
depends on ACPI || COMPILE_TEST
depends on SND_SOC
select FW_CS_DSP
+ imply SERIAL_MULTI_INSTANTIATE
select SND_HDA_GENERIC
select SND_SOC_CS35L56_SHARED
select SND_HDA_SCODEC_CS35L56
diff --git a/sound/pci/hda/cs35l41_hda.c b/sound/pci/hda/cs35l41_hda.c
index 6c49e5c6cd20..031703f010be 100644
--- a/sound/pci/hda/cs35l41_hda.c
+++ b/sound/pci/hda/cs35l41_hda.c
@@ -1495,7 +1495,7 @@ static void cs35l41_hda_unbind(struct device *dev, struct device *master, void *
if (comps[cs35l41->index].dev == dev) {
memset(&comps[cs35l41->index], 0, sizeof(*comps));
sleep_flags = lock_system_sleep();
- device_link_remove(&comps->codec->core.dev, cs35l41->dev);
+ device_link_remove(&cs35l41->codec->core.dev, cs35l41->dev);
unlock_system_sleep(sleep_flags);
}
}
@@ -2019,6 +2019,8 @@ void cs35l41_hda_remove(struct device *dev)
{
struct cs35l41_hda *cs35l41 = dev_get_drvdata(dev);
+ component_del(cs35l41->dev, &cs35l41_hda_comp_ops);
+
pm_runtime_get_sync(cs35l41->dev);
pm_runtime_dont_use_autosuspend(cs35l41->dev);
pm_runtime_disable(cs35l41->dev);
@@ -2026,8 +2028,6 @@ void cs35l41_hda_remove(struct device *dev)
if (cs35l41->halo_initialized)
cs35l41_remove_dsp(cs35l41);
- component_del(cs35l41->dev, &cs35l41_hda_comp_ops);
-
acpi_dev_put(cs35l41->dacpi);
pm_runtime_put_noidle(cs35l41->dev);
diff --git a/sound/pci/hda/cs35l41_hda_property.c b/sound/pci/hda/cs35l41_hda_property.c
index 6a7a6d486916..80c816922f78 100644
--- a/sound/pci/hda/cs35l41_hda_property.c
+++ b/sound/pci/hda/cs35l41_hda_property.c
@@ -128,6 +128,10 @@ static const struct cs35l41_config cs35l41_config_table[] = {
{ "17AA38B5", 2, EXTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 0, 1, -1, 0, 0, 0 },
{ "17AA38B6", 2, EXTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 0, 1, -1, 0, 0, 0 },
{ "17AA38B7", 2, EXTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 0, 1, -1, 0, 0, 0 },
+ { "17AA38C7", 4, INTERNAL, { CS35L41_RIGHT, CS35L41_LEFT, CS35L41_RIGHT, CS35L41_LEFT }, 0, 2, -1, 1000, 4500, 24 },
+ { "17AA38C8", 4, INTERNAL, { CS35L41_RIGHT, CS35L41_LEFT, CS35L41_RIGHT, CS35L41_LEFT }, 0, 2, -1, 1000, 4500, 24 },
+ { "17AA38F9", 2, EXTERNAL, { CS35L41_RIGHT, CS35L41_LEFT, 0, 0 }, 0, 2, -1, 0, 0, 0 },
+ { "17AA38FA", 2, EXTERNAL, { CS35L41_RIGHT, CS35L41_LEFT, 0, 0 }, 0, 2, -1, 0, 0, 0 },
{}
};
@@ -529,6 +533,10 @@ static const struct cs35l41_prop_model cs35l41_prop_model_table[] = {
{ "CSC3551", "17AA38B5", generic_dsd_config },
{ "CSC3551", "17AA38B6", generic_dsd_config },
{ "CSC3551", "17AA38B7", generic_dsd_config },
+ { "CSC3551", "17AA38C7", generic_dsd_config },
+ { "CSC3551", "17AA38C8", generic_dsd_config },
+ { "CSC3551", "17AA38F9", generic_dsd_config },
+ { "CSC3551", "17AA38FA", generic_dsd_config },
{}
};
diff --git a/sound/pci/hda/cs35l56_hda.c b/sound/pci/hda/cs35l56_hda.c
index 11b0570ff56d..e134ede6c5aa 100644
--- a/sound/pci/hda/cs35l56_hda.c
+++ b/sound/pci/hda/cs35l56_hda.c
@@ -735,6 +735,8 @@ static void cs35l56_hda_unbind(struct device *dev, struct device *master, void *
if (comps[cs35l56->index].dev == dev)
memset(&comps[cs35l56->index], 0, sizeof(*comps));
+ cs35l56->codec = NULL;
+
dev_dbg(cs35l56->base.dev, "Unbound\n");
}
@@ -840,6 +842,9 @@ static int cs35l56_hda_system_resume(struct device *dev)
cs35l56->suspended = false;
+ if (!cs35l56->codec)
+ return 0;
+
ret = cs35l56_is_fw_reload_needed(&cs35l56->base);
dev_dbg(cs35l56->base.dev, "fw_reload_needed: %d\n", ret);
if (ret > 0) {
@@ -1072,12 +1077,12 @@ void cs35l56_hda_remove(struct device *dev)
{
struct cs35l56_hda *cs35l56 = dev_get_drvdata(dev);
+ component_del(cs35l56->base.dev, &cs35l56_hda_comp_ops);
+
pm_runtime_dont_use_autosuspend(cs35l56->base.dev);
pm_runtime_get_sync(cs35l56->base.dev);
pm_runtime_disable(cs35l56->base.dev);
- component_del(cs35l56->base.dev, &cs35l56_hda_comp_ops);
-
cs_dsp_remove(&cs35l56->cs_dsp);
kfree(cs35l56->system_name);
diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c
index 766734dc5be2..5d86e5a9c814 100644
--- a/sound/pci/hda/hda_controller.c
+++ b/sound/pci/hda/hda_controller.c
@@ -463,7 +463,8 @@ static int azx_get_sync_time(ktime_t *device,
*device = ktime_add_ns(*device, (wallclk_cycles * NSEC_PER_SEC) /
((HDA_MAX_CYCLE_VALUE + 1) * runtime->rate));
- *system = convert_art_to_tsc(tsc_counter);
+ system->cycles = tsc_counter;
+ system->cs_id = CSID_X86_ART;
return 0;
}
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index e3c0b9d5552d..766f0b1d3e9d 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -583,10 +583,14 @@ static void alc_shutup_pins(struct hda_codec *codec)
switch (codec->core.vendor_id) {
case 0x10ec0236:
case 0x10ec0256:
+ case 0x10ec0257:
case 0x19e58326:
case 0x10ec0283:
+ case 0x10ec0285:
case 0x10ec0286:
+ case 0x10ec0287:
case 0x10ec0288:
+ case 0x10ec0295:
case 0x10ec0298:
alc_headset_mic_no_shutup(codec);
break;
@@ -7520,6 +7524,8 @@ enum {
ALC285_FIXUP_ASUS_GU605_SPI_SPEAKER2_TO_DAC1,
ALC287_FIXUP_LENOVO_THKPAD_WH_ALC1318,
ALC256_FIXUP_CHROME_BOOK,
+ ALC287_FIXUP_LENOVO_14ARP8_LEGION_IAH7,
+ ALC287_FIXUP_LENOVO_SSID_17AA3820,
};
/* A special fixup for Lenovo C940 and Yoga Duet 7;
@@ -7559,6 +7565,21 @@ static void alc287_fixup_lenovo_14irp8_duetitl(struct hda_codec *codec,
__snd_hda_apply_fixup(codec, id, action, 0);
}
+/* Similar to above the Lenovo Yoga Pro 7 14ARP8 PCI SSID matches the codec SSID of the
+ Legion Y9000X 2022 IAH7.*/
+static void alc287_fixup_lenovo_14arp8_legion_iah7(struct hda_codec *codec,
+ const struct hda_fixup *fix,
+ int action)
+{
+ int id;
+
+ if (codec->core.subsystem_id == 0x17aa386e)
+ id = ALC287_FIXUP_CS35L41_I2C_2; /* Legion Y9000X 2022 IAH7 */
+ else
+ id = ALC285_FIXUP_SPEAKER2_TO_DAC1; /* Yoga Pro 7 14ARP8 */
+ __snd_hda_apply_fixup(codec, id, action, 0);
+}
+
/* Another hilarious PCI SSID conflict with Lenovo Legion Pro 7 16ARX8H (with
* TAS2781 codec) and Legion 7i 16IAX7 (with CS35L41 codec);
* we apply a corresponding fixup depending on the codec SSID instead
@@ -7576,6 +7597,20 @@ static void alc287_fixup_lenovo_legion_7(struct hda_codec *codec,
__snd_hda_apply_fixup(codec, id, action, 0);
}
+/* Yet more conflicting PCI SSID (17aa:3820) on two Lenovo models */
+static void alc287_fixup_lenovo_ssid_17aa3820(struct hda_codec *codec,
+ const struct hda_fixup *fix,
+ int action)
+{
+ int id;
+
+ if (codec->core.subsystem_id == 0x17aa3820)
+ id = ALC269_FIXUP_ASPIRE_HEADSET_MIC; /* IdeaPad 330-17IKB 81DM */
+ else /* 0x17aa3802 */
+ id = ALC287_FIXUP_YOGA7_14ITL_SPEAKERS; /* "Yoga Duet 7 13ITL6 */
+ __snd_hda_apply_fixup(codec, id, action, 0);
+}
+
static const struct hda_fixup alc269_fixups[] = {
[ALC269_FIXUP_GPIO2] = {
.type = HDA_FIXUP_FUNC,
@@ -9658,6 +9693,10 @@ static const struct hda_fixup alc269_fixups[] = {
.chained = true,
.chain_id = ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK,
},
+ [ALC287_FIXUP_LENOVO_14ARP8_LEGION_IAH7] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc287_fixup_lenovo_14arp8_legion_iah7,
+ },
[ALC287_FIXUP_YOGA9_14IMH9_BASS_SPK_PIN] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc287_fixup_yoga9_14iap7_bass_spk_pin,
@@ -9808,6 +9847,10 @@ static const struct hda_fixup alc269_fixups[] = {
.chained = true,
.chain_id = ALC225_FIXUP_HEADSET_JACK
},
+ [ALC287_FIXUP_LENOVO_SSID_17AA3820] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc287_fixup_lenovo_ssid_17aa3820,
+ },
};
static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -10010,6 +10053,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x83b9, "HP Spectre x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
SND_PCI_QUIRK(0x103c, 0x841c, "HP Pavilion 15-CK0xx", ALC269_FIXUP_HP_MUTE_LED_MIC3),
SND_PCI_QUIRK(0x103c, 0x8497, "HP Envy x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
+ SND_PCI_QUIRK(0x103c, 0x84a6, "HP 250 G7 Notebook PC", ALC269_FIXUP_HP_LINE1_MIC1_LED),
SND_PCI_QUIRK(0x103c, 0x84ae, "HP 15-db0403ng", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2),
SND_PCI_QUIRK(0x103c, 0x84da, "HP OMEN dc0019-ur", ALC295_FIXUP_HP_OMEN),
SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
@@ -10045,6 +10089,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x8788, "HP OMEN 15", ALC285_FIXUP_HP_MUTE_LED),
SND_PCI_QUIRK(0x103c, 0x87b7, "HP Laptop 14-fq0xxx", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2),
SND_PCI_QUIRK(0x103c, 0x87c8, "HP", ALC287_FIXUP_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x87d3, "HP Laptop 15-gw0xxx", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2),
SND_PCI_QUIRK(0x103c, 0x87e5, "HP ProBook 440 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x87e7, "HP ProBook 450 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x87f1, "HP ProBook 630 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED),
@@ -10194,6 +10239,13 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x8c70, "HP EliteBook 835 G11", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8c71, "HP EliteBook 845 G11", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8c72, "HP EliteBook 865 G11", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8c7b, "HP ProBook 445 G11", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ SND_PCI_QUIRK(0x103c, 0x8c7c, "HP ProBook 445 G11", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ SND_PCI_QUIRK(0x103c, 0x8c7d, "HP ProBook 465 G11", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ SND_PCI_QUIRK(0x103c, 0x8c7e, "HP ProBook 465 G11", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ SND_PCI_QUIRK(0x103c, 0x8c7f, "HP EliteBook 645 G11", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ SND_PCI_QUIRK(0x103c, 0x8c80, "HP EliteBook 645 G11", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ SND_PCI_QUIRK(0x103c, 0x8c81, "HP EliteBook 665 G11", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
SND_PCI_QUIRK(0x103c, 0x8c89, "HP ProBook 460 G11", ALC236_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8c8a, "HP EliteBook 630", ALC236_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8c8c, "HP EliteBook 660", ALC236_FIXUP_HP_GPIO_LED),
@@ -10310,7 +10362,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1043, 0x3030, "ASUS ZN270IE", ALC256_FIXUP_ASUS_AIO_GPIO2),
SND_PCI_QUIRK(0x1043, 0x3a20, "ASUS G614JZR", ALC245_FIXUP_CS35L41_SPI_2),
SND_PCI_QUIRK(0x1043, 0x3a30, "ASUS G814JVR/JIR", ALC245_FIXUP_CS35L41_SPI_2),
- SND_PCI_QUIRK(0x1043, 0x3a40, "ASUS G814JZR", ALC245_FIXUP_CS35L41_SPI_2),
+ SND_PCI_QUIRK(0x1043, 0x3a40, "ASUS G814JZR", ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS),
SND_PCI_QUIRK(0x1043, 0x3a50, "ASUS G834JYR/JZR", ALC245_FIXUP_CS35L41_SPI_2),
SND_PCI_QUIRK(0x1043, 0x3a60, "ASUS G634JYR/JZR", ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS),
SND_PCI_QUIRK(0x1043, 0x831a, "ASUS P901", ALC269_FIXUP_STEREO_DMIC),
@@ -10332,6 +10384,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC),
SND_PCI_QUIRK(0x10ec, 0x10f2, "Intel Reference board", ALC700_FIXUP_INTEL_REFERENCE),
SND_PCI_QUIRK(0x10ec, 0x118c, "Medion EE4254 MD62100", ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE),
+ SND_PCI_QUIRK(0x10ec, 0x11bc, "VAIO VJFE-IL", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
SND_PCI_QUIRK(0x10ec, 0x1230, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK),
SND_PCI_QUIRK(0x10ec, 0x124c, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK),
SND_PCI_QUIRK(0x10ec, 0x1252, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK),
@@ -10429,6 +10482,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1558, 0xa600, "Clevo NL50NU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0xa650, "Clevo NP[567]0SN[CD]", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0xa671, "Clevo NP70SN[CDE]", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0xa763, "Clevo V54x_6x_TU", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0xb018, "Clevo NP50D[BE]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0xb019, "Clevo NH77D[BE]Q", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0xb022, "Clevo NH77D[DC][QW]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
@@ -10502,7 +10556,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x3813, "Legion 7i 15IMHG05", ALC287_FIXUP_LEGION_15IMHG05_SPEAKERS),
SND_PCI_QUIRK(0x17aa, 0x3818, "Lenovo C940 / Yoga Duet 7", ALC298_FIXUP_LENOVO_C940_DUET7),
SND_PCI_QUIRK(0x17aa, 0x3819, "Lenovo 13s Gen2 ITL", ALC287_FIXUP_13S_GEN2_SPEAKERS),
- SND_PCI_QUIRK(0x17aa, 0x3820, "Yoga Duet 7 13ITL6", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS),
+ SND_PCI_QUIRK(0x17aa, 0x3820, "IdeaPad 330 / Yoga Duet 7", ALC287_FIXUP_LENOVO_SSID_17AA3820),
SND_PCI_QUIRK(0x17aa, 0x3824, "Legion Y9000X 2020", ALC285_FIXUP_LEGION_Y9000X_SPEAKERS),
SND_PCI_QUIRK(0x17aa, 0x3827, "Ideapad S740", ALC285_FIXUP_IDEAPAD_S740_COEF),
SND_PCI_QUIRK(0x17aa, 0x3834, "Lenovo IdeaPad Slim 9i 14ITL5", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS),
@@ -10516,7 +10570,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x3865, "Lenovo 13X", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x17aa, 0x3866, "Lenovo 13X", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x17aa, 0x3869, "Lenovo Yoga7 14IAL7", ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK_PIN),
- SND_PCI_QUIRK(0x17aa, 0x386e, "Legion Y9000X 2022 IAH7", ALC287_FIXUP_CS35L41_I2C_2),
+ SND_PCI_QUIRK(0x17aa, 0x386e, "Legion Y9000X 2022 IAH7 / Yoga Pro 7 14ARP8", ALC287_FIXUP_LENOVO_14ARP8_LEGION_IAH7),
SND_PCI_QUIRK(0x17aa, 0x386f, "Legion Pro 7/7i", ALC287_FIXUP_LENOVO_LEGION_7),
SND_PCI_QUIRK(0x17aa, 0x3870, "Lenovo Yoga 7 14ARB7", ALC287_FIXUP_YOGA7_14ARB7_I2C),
SND_PCI_QUIRK(0x17aa, 0x3877, "Lenovo Legion 7 Slim 16ARHA7", ALC287_FIXUP_CS35L41_I2C_2),
@@ -10527,6 +10581,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x3882, "Lenovo Yoga Pro 7 14APH8", ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK_PIN),
SND_PCI_QUIRK(0x17aa, 0x3884, "Y780 YG DUAL", ALC287_FIXUP_TAS2781_I2C),
SND_PCI_QUIRK(0x17aa, 0x3886, "Y780 VECO DUAL", ALC287_FIXUP_TAS2781_I2C),
+ SND_PCI_QUIRK(0x17aa, 0x3891, "Lenovo Yoga Pro 7 14AHP9", ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK_PIN),
SND_PCI_QUIRK(0x17aa, 0x38a7, "Y780P AMD YG dual", ALC287_FIXUP_TAS2781_I2C),
SND_PCI_QUIRK(0x17aa, 0x38a8, "Y780P AMD VECO dual", ALC287_FIXUP_TAS2781_I2C),
SND_PCI_QUIRK(0x17aa, 0x38a9, "Thinkbook 16P", ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD),
@@ -10540,10 +10595,14 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x38be, "Yoga S980-14.5 proX YC Dual", ALC287_FIXUP_TAS2781_I2C),
SND_PCI_QUIRK(0x17aa, 0x38bf, "Yoga S980-14.5 proX LX Dual", ALC287_FIXUP_TAS2781_I2C),
SND_PCI_QUIRK(0x17aa, 0x38c3, "Y980 DUAL", ALC287_FIXUP_TAS2781_I2C),
+ SND_PCI_QUIRK(0x17aa, 0x38c7, "Thinkbook 13x Gen 4", ALC287_FIXUP_CS35L41_I2C_4),
+ SND_PCI_QUIRK(0x17aa, 0x38c8, "Thinkbook 13x Gen 4", ALC287_FIXUP_CS35L41_I2C_4),
SND_PCI_QUIRK(0x17aa, 0x38cb, "Y790 YG DUAL", ALC287_FIXUP_TAS2781_I2C),
SND_PCI_QUIRK(0x17aa, 0x38cd, "Y790 VECO DUAL", ALC287_FIXUP_TAS2781_I2C),
SND_PCI_QUIRK(0x17aa, 0x38d2, "Lenovo Yoga 9 14IMH9", ALC287_FIXUP_YOGA9_14IMH9_BASS_SPK_PIN),
SND_PCI_QUIRK(0x17aa, 0x38d7, "Lenovo Yoga 9 14IMH9", ALC287_FIXUP_YOGA9_14IMH9_BASS_SPK_PIN),
+ SND_PCI_QUIRK(0x17aa, 0x38f9, "Thinkbook 16P Gen5", ALC287_FIXUP_CS35L41_I2C_2),
+ SND_PCI_QUIRK(0x17aa, 0x38fa, "Thinkbook 16P Gen5", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo B50-70", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
@@ -10581,6 +10640,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1b7d, 0xa831, "Ordissimo EVE2 ", ALC269VB_FIXUP_ORDISSIMO_EVE2), /* Also known as Malata PC-B1303 */
SND_PCI_QUIRK(0x1c06, 0x2013, "Lemote A1802", ALC269_FIXUP_LEMOTE_A1802),
SND_PCI_QUIRK(0x1c06, 0x2015, "Lemote A190X", ALC269_FIXUP_LEMOTE_A190X),
+ SND_PCI_QUIRK(0x1c6c, 0x122a, "Positivo N14AP7", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
SND_PCI_QUIRK(0x1c6c, 0x1251, "Positivo N14KP6-TG", ALC288_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1d05, 0x1132, "TongFang PHxTxX1", ALC256_FIXUP_SET_COEF_DEFAULTS),
SND_PCI_QUIRK(0x1d05, 0x1096, "TongFang GMxMRxx", ALC269_FIXUP_NO_SHUTUP),
@@ -10598,6 +10658,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1d72, 0x1901, "RedmiBook 14", ALC256_FIXUP_ASUS_HEADSET_MIC),
SND_PCI_QUIRK(0x1d72, 0x1945, "Redmi G", ALC256_FIXUP_ASUS_HEADSET_MIC),
SND_PCI_QUIRK(0x1d72, 0x1947, "RedmiBook Air", ALC255_FIXUP_XIAOMI_HEADSET_MIC),
+ SND_PCI_QUIRK(0x2782, 0x0214, "VAIO VJFE-CL", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
SND_PCI_QUIRK(0x2782, 0x0232, "CHUWI CoreBook XPro", ALC269VB_FIXUP_CHUWI_COREBOOK_XPRO),
SND_PCI_QUIRK(0x2782, 0x1707, "Vaio VJFE-ADL", ALC298_FIXUP_SPK_VOLUME),
SND_PCI_QUIRK(0x8086, 0x2074, "Intel NUC 8", ALC233_FIXUP_INTEL_NUC8_DMIC),
@@ -10605,7 +10666,6 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x8086, 0x2081, "Intel NUC 10", ALC256_FIXUP_INTEL_NUC10),
SND_PCI_QUIRK(0x8086, 0x3038, "Intel NUC 13", ALC295_FIXUP_CHROME_BOOK),
SND_PCI_QUIRK(0xf111, 0x0001, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE),
- SND_PCI_QUIRK(0xf111, 0x0005, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0xf111, 0x0006, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE),
#if 0
diff --git a/sound/pci/hda/tas2781_hda_i2c.c b/sound/pci/hda/tas2781_hda_i2c.c
index 75f7674c66ee..fdee6592c502 100644
--- a/sound/pci/hda/tas2781_hda_i2c.c
+++ b/sound/pci/hda/tas2781_hda_i2c.c
@@ -777,11 +777,11 @@ static void tas2781_hda_remove(struct device *dev)
{
struct tas2781_hda *tas_hda = dev_get_drvdata(dev);
+ component_del(tas_hda->dev, &tas2781_hda_comp_ops);
+
pm_runtime_get_sync(tas_hda->dev);
pm_runtime_disable(tas_hda->dev);
- component_del(tas_hda->dev, &tas2781_hda_comp_ops);
-
pm_runtime_put_noidle(tas_hda->dev);
tasdevice_remove(tas_hda->priv);
diff --git a/sound/soc/amd/acp/acp-i2s.c b/sound/soc/amd/acp/acp-i2s.c
index 60cbc881be6e..ef12f97ddc69 100644
--- a/sound/soc/amd/acp/acp-i2s.c
+++ b/sound/soc/amd/acp/acp-i2s.c
@@ -588,20 +588,12 @@ static int acp_i2s_probe(struct snd_soc_dai *dai)
{
struct device *dev = dai->component->dev;
struct acp_dev_data *adata = dev_get_drvdata(dev);
- struct acp_resource *rsrc = adata->rsrc;
- unsigned int val;
if (!adata->acp_base) {
dev_err(dev, "I2S base is NULL\n");
return -EINVAL;
}
- val = readl(adata->acp_base + rsrc->i2s_pin_cfg_offset);
- if (val != rsrc->i2s_mode) {
- dev_err(dev, "I2S Mode not supported val %x\n", val);
- return -EINVAL;
- }
-
return 0;
}
diff --git a/sound/soc/amd/acp/acp-pci.c b/sound/soc/amd/acp/acp-pci.c
index ad320b29e87d..777b5a78d8a9 100644
--- a/sound/soc/amd/acp/acp-pci.c
+++ b/sound/soc/amd/acp/acp-pci.c
@@ -100,6 +100,7 @@ static int acp_pci_probe(struct pci_dev *pci, const struct pci_device_id *pci_id
ret = -EINVAL;
goto release_regions;
}
+ chip->flag = flag;
dmic_dev = platform_device_register_data(dev, "dmic-codec", PLATFORM_DEVID_NONE, NULL, 0);
if (IS_ERR(dmic_dev)) {
dev_err(dev, "failed to create DMIC device\n");
@@ -139,7 +140,6 @@ static int acp_pci_probe(struct pci_dev *pci, const struct pci_device_id *pci_id
}
}
- chip->flag = flag;
memset(&pdevinfo, 0, sizeof(pdevinfo));
pdevinfo.name = chip->name;
@@ -199,10 +199,12 @@ static int __maybe_unused snd_acp_resume(struct device *dev)
ret = acp_init(chip);
if (ret)
dev_err(dev, "ACP init failed\n");
- child = chip->chip_pdev->dev;
- adata = dev_get_drvdata(&child);
- if (adata)
- acp_enable_interrupts(adata);
+ if (chip->chip_pdev) {
+ child = chip->chip_pdev->dev;
+ adata = dev_get_drvdata(&child);
+ if (adata)
+ acp_enable_interrupts(adata);
+ }
return ret;
}
diff --git a/sound/soc/amd/yc/acp6x-mach.c b/sound/soc/amd/yc/acp6x-mach.c
index 1760b5d42460..4e3a8ce690a4 100644
--- a/sound/soc/amd/yc/acp6x-mach.c
+++ b/sound/soc/amd/yc/acp6x-mach.c
@@ -283,6 +283,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "M5402RA"),
}
},
+ {
+ .driver_data = &acp6x_card,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "M5602RA"),
+ }
+ },
{
.driver_data = &acp6x_card,
.matches = {
diff --git a/sound/soc/atmel/atmel-classd.c b/sound/soc/atmel/atmel-classd.c
index 6aed1ee443b4..ba314b279919 100644
--- a/sound/soc/atmel/atmel-classd.c
+++ b/sound/soc/atmel/atmel-classd.c
@@ -473,19 +473,22 @@ static int atmel_classd_asoc_card_init(struct device *dev,
if (!dai_link)
return -ENOMEM;
- comp = devm_kzalloc(dev, sizeof(*comp), GFP_KERNEL);
+ comp = devm_kzalloc(dev, 2 * sizeof(*comp), GFP_KERNEL);
if (!comp)
return -ENOMEM;
- dai_link->cpus = comp;
+ dai_link->cpus = &comp[0];
dai_link->codecs = &snd_soc_dummy_dlc;
+ dai_link->platforms = &comp[1];
dai_link->num_cpus = 1;
dai_link->num_codecs = 1;
+ dai_link->num_platforms = 1;
dai_link->name = "CLASSD";
dai_link->stream_name = "CLASSD PCM";
dai_link->cpus->dai_name = dev_name(dev);
+ dai_link->platforms->name = dev_name(dev);
card->dai_link = dai_link;
card->num_links = 1;
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index 4afc43d3f71f..91b502f8cc3f 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -75,6 +75,7 @@ config SND_SOC_ALL_CODECS
imply SND_SOC_CS35L56_I2C
imply SND_SOC_CS35L56_SPI
imply SND_SOC_CS35L56_SDW
+ imply SND_SOC_CS40L50
imply SND_SOC_CS42L42
imply SND_SOC_CS42L42_SDW
imply SND_SOC_CS42L43
@@ -847,6 +848,16 @@ config SND_SOC_CS35L56_SDW
help
Enable support for Cirrus Logic CS35L56 boosted amplifier with SoundWire control
+config SND_SOC_CS40L50
+ tristate "Cirrus Logic CS40L50 CODEC"
+ depends on MFD_CS40L50_CORE
+ help
+ This option enables support for I2S streaming to Cirrus Logic CS40L50.
+
+ CS40L50 is a haptic driver with waveform memory, an integrated
+ DSP, and closed-loop algorithms. If built as a module, it will be
+ called snd-soc-cs40l50.
+
config SND_SOC_CS42L42_CORE
tristate
diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile
index b4df22186e25..3afd7c16c959 100644
--- a/sound/soc/codecs/Makefile
+++ b/sound/soc/codecs/Makefile
@@ -78,6 +78,7 @@ snd-soc-cs35l56-shared-y := cs35l56-shared.o
snd-soc-cs35l56-i2c-y := cs35l56-i2c.o
snd-soc-cs35l56-spi-y := cs35l56-spi.o
snd-soc-cs35l56-sdw-y := cs35l56-sdw.o
+snd-soc-cs40l50-objs := cs40l50-codec.o
snd-soc-cs42l42-y := cs42l42.o
snd-soc-cs42l42-i2c-y := cs42l42-i2c.o
snd-soc-cs42l42-sdw-y := cs42l42-sdw.o
@@ -475,6 +476,7 @@ obj-$(CONFIG_SND_SOC_CS35L56_SHARED) += snd-soc-cs35l56-shared.o
obj-$(CONFIG_SND_SOC_CS35L56_I2C) += snd-soc-cs35l56-i2c.o
obj-$(CONFIG_SND_SOC_CS35L56_SPI) += snd-soc-cs35l56-spi.o
obj-$(CONFIG_SND_SOC_CS35L56_SDW) += snd-soc-cs35l56-sdw.o
+obj-$(CONFIG_SND_SOC_CS40L50) += snd-soc-cs40l50.o
obj-$(CONFIG_SND_SOC_CS42L42_CORE) += snd-soc-cs42l42.o
obj-$(CONFIG_SND_SOC_CS42L42) += snd-soc-cs42l42-i2c.o
obj-$(CONFIG_SND_SOC_CS42L42_SDW) += snd-soc-cs42l42-sdw.o
diff --git a/sound/soc/codecs/cs35l56-shared.c b/sound/soc/codecs/cs35l56-shared.c
index 8af89a263594..30497152e02a 100644
--- a/sound/soc/codecs/cs35l56-shared.c
+++ b/sound/soc/codecs/cs35l56-shared.c
@@ -215,6 +215,10 @@ static const struct reg_sequence cs35l56_asp1_defaults[] = {
REG_SEQ0(CS35L56_ASP1_FRAME_CONTROL5, 0x00020100),
REG_SEQ0(CS35L56_ASP1_DATA_CONTROL1, 0x00000018),
REG_SEQ0(CS35L56_ASP1_DATA_CONTROL5, 0x00000018),
+ REG_SEQ0(CS35L56_ASP1TX1_INPUT, 0x00000000),
+ REG_SEQ0(CS35L56_ASP1TX2_INPUT, 0x00000000),
+ REG_SEQ0(CS35L56_ASP1TX3_INPUT, 0x00000000),
+ REG_SEQ0(CS35L56_ASP1TX4_INPUT, 0x00000000),
};
/*
diff --git a/sound/soc/codecs/cs40l50-codec.c b/sound/soc/codecs/cs40l50-codec.c
new file mode 100644
index 000000000000..aa629ef53db4
--- /dev/null
+++ b/sound/soc/codecs/cs40l50-codec.c
@@ -0,0 +1,307 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// CS40L50 Advanced Haptic Driver with waveform memory,
+// integrated DSP, and closed-loop algorithms
+//
+// Copyright 2024 Cirrus Logic, Inc.
+//
+// Author: James Ogletree <james.ogletree@cirrus.com>
+
+#include <linux/bitfield.h>
+#include <linux/mfd/cs40l50.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+
+#define CS40L50_REFCLK_INPUT 0x2C04
+#define CS40L50_ASP_CONTROL2 0x4808
+#define CS40L50_ASP_DATA_CONTROL5 0x4840
+
+/* PLL Config */
+#define CS40L50_PLL_REFCLK_BCLK 0x0
+#define CS40L50_PLL_REFCLK_MCLK 0x5
+#define CS40L50_PLL_REEFCLK_MCLK_CFG 0x00
+#define CS40L50_PLL_REFCLK_LOOP_MASK BIT(11)
+#define CS40L50_PLL_REFCLK_OPEN_LOOP 1
+#define CS40L50_PLL_REFCLK_CLOSED_LOOP 0
+#define CS40L50_PLL_REFCLK_LOOP_SHIFT 11
+#define CS40L50_PLL_REFCLK_FREQ_MASK GENMASK(10, 5)
+#define CS40L50_PLL_REFCLK_FREQ_SHIFT 5
+#define CS40L50_PLL_REFCLK_SEL_MASK GENMASK(2, 0)
+#define CS40L50_BCLK_RATIO_DEFAULT 32
+
+/* ASP Config */
+#define CS40L50_ASP_RX_WIDTH_SHIFT 24
+#define CS40L50_ASP_RX_WIDTH_MASK GENMASK(31, 24)
+#define CS40L50_ASP_RX_WL_MASK GENMASK(5, 0)
+#define CS40L50_ASP_FSYNC_INV_MASK BIT(2)
+#define CS40L50_ASP_BCLK_INV_MASK BIT(6)
+#define CS40L50_ASP_FMT_MASK GENMASK(10, 8)
+#define CS40L50_ASP_FMT_I2S 0x2
+
+struct cs40l50_pll_config {
+ unsigned int freq;
+ unsigned int cfg;
+};
+
+struct cs40l50_codec {
+ struct device *dev;
+ struct regmap *regmap;
+ unsigned int daifmt;
+ unsigned int bclk_ratio;
+ unsigned int rate;
+};
+
+static const struct cs40l50_pll_config cs40l50_pll_cfg[] = {
+ { 32768, 0x00 },
+ { 1536000, 0x1B },
+ { 3072000, 0x21 },
+ { 6144000, 0x28 },
+ { 9600000, 0x30 },
+ { 12288000, 0x33 },
+};
+
+static int cs40l50_get_clk_config(const unsigned int freq, unsigned int *cfg)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(cs40l50_pll_cfg); i++) {
+ if (cs40l50_pll_cfg[i].freq == freq) {
+ *cfg = cs40l50_pll_cfg[i].cfg;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int cs40l50_swap_ext_clk(struct cs40l50_codec *codec, const unsigned int clk_src)
+{
+ unsigned int cfg;
+ int ret;
+
+ switch (clk_src) {
+ case CS40L50_PLL_REFCLK_BCLK:
+ ret = cs40l50_get_clk_config(codec->bclk_ratio * codec->rate, &cfg);
+ if (ret)
+ return ret;
+ break;
+ case CS40L50_PLL_REFCLK_MCLK:
+ cfg = CS40L50_PLL_REEFCLK_MCLK_CFG;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = regmap_update_bits(codec->regmap, CS40L50_REFCLK_INPUT,
+ CS40L50_PLL_REFCLK_LOOP_MASK,
+ CS40L50_PLL_REFCLK_OPEN_LOOP <<
+ CS40L50_PLL_REFCLK_LOOP_SHIFT);
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(codec->regmap, CS40L50_REFCLK_INPUT,
+ CS40L50_PLL_REFCLK_FREQ_MASK |
+ CS40L50_PLL_REFCLK_SEL_MASK,
+ (cfg << CS40L50_PLL_REFCLK_FREQ_SHIFT) | clk_src);
+ if (ret)
+ return ret;
+
+ return regmap_update_bits(codec->regmap, CS40L50_REFCLK_INPUT,
+ CS40L50_PLL_REFCLK_LOOP_MASK,
+ CS40L50_PLL_REFCLK_CLOSED_LOOP <<
+ CS40L50_PLL_REFCLK_LOOP_SHIFT);
+}
+
+static int cs40l50_clk_en(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol,
+ int event)
+{
+ struct snd_soc_component *comp = snd_soc_dapm_to_component(w->dapm);
+ struct cs40l50_codec *codec = snd_soc_component_get_drvdata(comp);
+ int ret;
+
+ switch (event) {
+ case SND_SOC_DAPM_POST_PMU:
+ ret = cs40l50_dsp_write(codec->dev, codec->regmap, CS40L50_STOP_PLAYBACK);
+ if (ret)
+ return ret;
+
+ ret = cs40l50_dsp_write(codec->dev, codec->regmap, CS40L50_START_I2S);
+ if (ret)
+ return ret;
+
+ ret = cs40l50_swap_ext_clk(codec, CS40L50_PLL_REFCLK_BCLK);
+ if (ret)
+ return ret;
+ break;
+ case SND_SOC_DAPM_PRE_PMD:
+ ret = cs40l50_swap_ext_clk(codec, CS40L50_PLL_REFCLK_MCLK);
+ if (ret)
+ return ret;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct snd_soc_dapm_widget cs40l50_dapm_widgets[] = {
+ SND_SOC_DAPM_SUPPLY_S("ASP PLL", 0, SND_SOC_NOPM, 0, 0, cs40l50_clk_en,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
+ SND_SOC_DAPM_AIF_IN("ASPRX1", NULL, 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_IN("ASPRX2", NULL, 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_OUTPUT("OUT"),
+};
+
+static const struct snd_soc_dapm_route cs40l50_dapm_routes[] = {
+ { "ASP Playback", NULL, "ASP PLL" },
+ { "ASPRX1", NULL, "ASP Playback" },
+ { "ASPRX2", NULL, "ASP Playback" },
+
+ { "OUT", NULL, "ASPRX1" },
+ { "OUT", NULL, "ASPRX2" },
+};
+
+static int cs40l50_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
+{
+ struct cs40l50_codec *codec = snd_soc_component_get_drvdata(codec_dai->component);
+
+ if ((fmt & SND_SOC_DAIFMT_MASTER_MASK) != SND_SOC_DAIFMT_CBC_CFC)
+ return -EINVAL;
+
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF:
+ codec->daifmt = 0;
+ break;
+ case SND_SOC_DAIFMT_NB_IF:
+ codec->daifmt = CS40L50_ASP_FSYNC_INV_MASK;
+ break;
+ case SND_SOC_DAIFMT_IB_NF:
+ codec->daifmt = CS40L50_ASP_BCLK_INV_MASK;
+ break;
+ case SND_SOC_DAIFMT_IB_IF:
+ codec->daifmt = CS40L50_ASP_FSYNC_INV_MASK | CS40L50_ASP_BCLK_INV_MASK;
+ break;
+ default:
+ dev_err(codec->dev, "Invalid clock invert\n");
+ return -EINVAL;
+ }
+
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_I2S:
+ codec->daifmt |= FIELD_PREP(CS40L50_ASP_FMT_MASK, CS40L50_ASP_FMT_I2S);
+ break;
+ default:
+ dev_err(codec->dev, "Unsupported DAI format\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int cs40l50_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct cs40l50_codec *codec = snd_soc_component_get_drvdata(dai->component);
+ unsigned int asp_rx_wl = params_width(params);
+ int ret;
+
+ codec->rate = params_rate(params);
+
+ ret = regmap_update_bits(codec->regmap, CS40L50_ASP_DATA_CONTROL5,
+ CS40L50_ASP_RX_WL_MASK, asp_rx_wl);
+ if (ret)
+ return ret;
+
+ codec->daifmt |= (asp_rx_wl << CS40L50_ASP_RX_WIDTH_SHIFT);
+
+ return regmap_update_bits(codec->regmap, CS40L50_ASP_CONTROL2,
+ CS40L50_ASP_FSYNC_INV_MASK |
+ CS40L50_ASP_BCLK_INV_MASK |
+ CS40L50_ASP_FMT_MASK |
+ CS40L50_ASP_RX_WIDTH_MASK, codec->daifmt);
+}
+
+static int cs40l50_set_dai_bclk_ratio(struct snd_soc_dai *dai, unsigned int ratio)
+{
+ struct cs40l50_codec *codec = snd_soc_component_get_drvdata(dai->component);
+
+ codec->bclk_ratio = ratio;
+
+ return 0;
+}
+
+static const struct snd_soc_dai_ops cs40l50_dai_ops = {
+ .set_fmt = cs40l50_set_dai_fmt,
+ .set_bclk_ratio = cs40l50_set_dai_bclk_ratio,
+ .hw_params = cs40l50_hw_params,
+};
+
+static struct snd_soc_dai_driver cs40l50_dai[] = {
+ {
+ .name = "cs40l50-pcm",
+ .id = 0,
+ .playback = {
+ .stream_name = "ASP Playback",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_48000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE,
+ },
+ .ops = &cs40l50_dai_ops,
+ },
+};
+
+static int cs40l50_codec_probe(struct snd_soc_component *component)
+{
+ struct cs40l50_codec *codec = snd_soc_component_get_drvdata(component);
+
+ codec->bclk_ratio = CS40L50_BCLK_RATIO_DEFAULT;
+
+ return 0;
+}
+
+static const struct snd_soc_component_driver soc_codec_dev_cs40l50 = {
+ .probe = cs40l50_codec_probe,
+ .dapm_widgets = cs40l50_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(cs40l50_dapm_widgets),
+ .dapm_routes = cs40l50_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(cs40l50_dapm_routes),
+};
+
+static int cs40l50_codec_driver_probe(struct platform_device *pdev)
+{
+ struct cs40l50 *cs40l50 = dev_get_drvdata(pdev->dev.parent);
+ struct cs40l50_codec *codec;
+
+ codec = devm_kzalloc(&pdev->dev, sizeof(*codec), GFP_KERNEL);
+ if (!codec)
+ return -ENOMEM;
+
+ codec->regmap = cs40l50->regmap;
+ codec->dev = &pdev->dev;
+
+ return devm_snd_soc_register_component(&pdev->dev, &soc_codec_dev_cs40l50,
+ cs40l50_dai, ARRAY_SIZE(cs40l50_dai));
+}
+
+static const struct platform_device_id cs40l50_id[] = {
+ { "cs40l50-codec", },
+ {}
+};
+MODULE_DEVICE_TABLE(platform, cs40l50_id);
+
+static struct platform_driver cs40l50_codec_driver = {
+ .probe = cs40l50_codec_driver_probe,
+ .id_table = cs40l50_id,
+ .driver = {
+ .name = "cs40l50-codec",
+ },
+};
+module_platform_driver(cs40l50_codec_driver);
+
+MODULE_DESCRIPTION("ASoC CS40L50 driver");
+MODULE_AUTHOR("James Ogletree <james.ogletree@cirrus.com>");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/cs42l43-jack.c b/sound/soc/codecs/cs42l43-jack.c
index 901b9dbcf585..d9ab003e166b 100644
--- a/sound/soc/codecs/cs42l43-jack.c
+++ b/sound/soc/codecs/cs42l43-jack.c
@@ -121,7 +121,7 @@ int cs42l43_set_jack(struct snd_soc_component *component,
priv->buttons[3] = 735;
}
- ret = cs42l43_find_index(priv, "cirrus,detect-us", 1000, &priv->detect_us,
+ ret = cs42l43_find_index(priv, "cirrus,detect-us", 50000, &priv->detect_us,
cs42l43_accdet_us, ARRAY_SIZE(cs42l43_accdet_us));
if (ret < 0)
goto error;
@@ -433,7 +433,7 @@ irqreturn_t cs42l43_button_press(int irq, void *data)
// Wait for 2 full cycles of comb filter to ensure good reading
queue_delayed_work(system_wq, &priv->button_press_work,
- msecs_to_jiffies(10));
+ msecs_to_jiffies(20));
return IRQ_HANDLED;
}
diff --git a/sound/soc/codecs/cs42l43.c b/sound/soc/codecs/cs42l43.c
index 94685449f0f4..92674314227c 100644
--- a/sound/soc/codecs/cs42l43.c
+++ b/sound/soc/codecs/cs42l43.c
@@ -310,8 +310,9 @@ static int cs42l43_startup(struct snd_pcm_substream *substream, struct snd_soc_d
struct snd_soc_component *component = dai->component;
struct cs42l43_codec *priv = snd_soc_component_get_drvdata(component);
struct cs42l43 *cs42l43 = priv->core;
- int provider = !!regmap_test_bits(cs42l43->regmap, CS42L43_ASP_CLK_CONFIG2,
- CS42L43_ASP_MASTER_MODE_MASK);
+ int provider = !dai->id || !!regmap_test_bits(cs42l43->regmap,
+ CS42L43_ASP_CLK_CONFIG2,
+ CS42L43_ASP_MASTER_MODE_MASK);
if (provider)
priv->constraint.mask = CS42L43_PROVIDER_RATE_MASK;
diff --git a/sound/soc/codecs/es8326.c b/sound/soc/codecs/es8326.c
index 03b539ba540f..6a4e42e5e35b 100644
--- a/sound/soc/codecs/es8326.c
+++ b/sound/soc/codecs/es8326.c
@@ -857,12 +857,16 @@ static void es8326_jack_detect_handler(struct work_struct *work)
* set auto-check mode, then restart jack_detect_work after 400ms.
* Don't report jack status.
*/
- regmap_write(es8326->regmap, ES8326_INT_SOURCE,
- (ES8326_INT_SRC_PIN9 | ES8326_INT_SRC_BUTTON));
+ regmap_write(es8326->regmap, ES8326_INT_SOURCE, 0x00);
regmap_update_bits(es8326->regmap, ES8326_HPDET_TYPE, 0x03, 0x01);
+ regmap_update_bits(es8326->regmap, ES8326_HPDET_TYPE, 0x10, 0x00);
es8326_enable_micbias(es8326->component);
usleep_range(50000, 70000);
regmap_update_bits(es8326->regmap, ES8326_HPDET_TYPE, 0x03, 0x00);
+ regmap_update_bits(es8326->regmap, ES8326_HPDET_TYPE, 0x10, 0x10);
+ usleep_range(50000, 70000);
+ regmap_write(es8326->regmap, ES8326_INT_SOURCE,
+ (ES8326_INT_SRC_PIN9 | ES8326_INT_SRC_BUTTON));
regmap_write(es8326->regmap, ES8326_SYS_BIAS, 0x1f);
regmap_update_bits(es8326->regmap, ES8326_HP_DRIVER_REF, 0x0f, 0x08);
queue_delayed_work(system_wq, &es8326->jack_detect_work,
diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c
index cdb7ff7020e9..51187b1e0ed2 100644
--- a/sound/soc/codecs/rt5645.c
+++ b/sound/soc/codecs/rt5645.c
@@ -81,7 +81,7 @@ static const struct reg_sequence init_list[] = {
static const struct reg_sequence rt5650_init_list[] = {
{0xf6, 0x0100},
{RT5645_PWR_ANLG1, 0x02},
- {RT5645_IL_CMD3, 0x0018},
+ {RT5645_IL_CMD3, 0x6728},
};
static const struct reg_default rt5645_reg[] = {
@@ -3130,20 +3130,32 @@ static void rt5645_enable_push_button_irq(struct snd_soc_component *component,
bool enable)
{
struct snd_soc_dapm_context *dapm = snd_soc_component_get_dapm(component);
+ int ret;
if (enable) {
snd_soc_dapm_force_enable_pin(dapm, "ADC L power");
snd_soc_dapm_force_enable_pin(dapm, "ADC R power");
snd_soc_dapm_sync(dapm);
+ snd_soc_component_update_bits(component, RT5650_4BTN_IL_CMD2,
+ RT5645_EN_4BTN_IL_MASK | RT5645_RST_4BTN_IL_MASK,
+ RT5645_EN_4BTN_IL_EN | RT5645_RST_4BTN_IL_RST);
+ usleep_range(10000, 15000);
+ snd_soc_component_update_bits(component, RT5650_4BTN_IL_CMD2,
+ RT5645_EN_4BTN_IL_MASK | RT5645_RST_4BTN_IL_MASK,
+ RT5645_EN_4BTN_IL_EN | RT5645_RST_4BTN_IL_NORM);
+ msleep(50);
+ ret = snd_soc_component_read(component, RT5645_INT_IRQ_ST);
+ pr_debug("%s read %x = %x\n", __func__, RT5645_INT_IRQ_ST,
+ snd_soc_component_read(component, RT5645_INT_IRQ_ST));
+ snd_soc_component_write(component, RT5645_INT_IRQ_ST, ret);
+ ret = snd_soc_component_read(component, RT5650_4BTN_IL_CMD1);
+ pr_debug("%s read %x = %x\n", __func__, RT5650_4BTN_IL_CMD1,
+ snd_soc_component_read(component, RT5650_4BTN_IL_CMD1));
+ snd_soc_component_write(component, RT5650_4BTN_IL_CMD1, ret);
snd_soc_component_update_bits(component, RT5650_4BTN_IL_CMD1, 0x3, 0x3);
snd_soc_component_update_bits(component,
RT5645_INT_IRQ_ST, 0x8, 0x8);
- snd_soc_component_update_bits(component,
- RT5650_4BTN_IL_CMD2, 0x8000, 0x8000);
- snd_soc_component_read(component, RT5650_4BTN_IL_CMD1);
- pr_debug("%s read %x = %x\n", __func__, RT5650_4BTN_IL_CMD1,
- snd_soc_component_read(component, RT5650_4BTN_IL_CMD1));
} else {
snd_soc_component_update_bits(component, RT5650_4BTN_IL_CMD2, 0x8000, 0x0);
snd_soc_component_update_bits(component, RT5645_INT_IRQ_ST, 0x8, 0x0);
diff --git a/sound/soc/codecs/rt5645.h b/sound/soc/codecs/rt5645.h
index 90816b2c5489..bef74b29fd54 100644
--- a/sound/soc/codecs/rt5645.h
+++ b/sound/soc/codecs/rt5645.h
@@ -2011,6 +2011,12 @@
#define RT5645_ZCD_HP_DIS (0x0 << 15)
#define RT5645_ZCD_HP_EN (0x1 << 15)
+/* Buttons Inline Command Function 2 (0xe0) */
+#define RT5645_EN_4BTN_IL_MASK (0x1 << 15)
+#define RT5645_EN_4BTN_IL_EN (0x1 << 15)
+#define RT5645_RST_4BTN_IL_MASK (0x1 << 14)
+#define RT5645_RST_4BTN_IL_RST (0x0 << 14)
+#define RT5645_RST_4BTN_IL_NORM (0x1 << 14)
/* Codec Private Register definition */
/* DAC ADC Digital Volume (0x00) */
diff --git a/sound/soc/codecs/rt711-sdw.c b/sound/soc/codecs/rt711-sdw.c
index 8ca8bcd177ab..dfda6bb5c6f8 100644
--- a/sound/soc/codecs/rt711-sdw.c
+++ b/sound/soc/codecs/rt711-sdw.c
@@ -38,7 +38,9 @@ static bool rt711_readable_register(struct device *dev, unsigned int reg)
case 0x8300 ... 0x83ff:
case 0x9c00 ... 0x9cff:
case 0xb900 ... 0xb9ff:
+ case 0x752008:
case 0x752009:
+ case 0x75200b:
case 0x752011:
case 0x75201a:
case 0x752045:
diff --git a/sound/soc/codecs/rt722-sdca-sdw.c b/sound/soc/codecs/rt722-sdca-sdw.c
index b33da2215ade..87354bb1564e 100644
--- a/sound/soc/codecs/rt722-sdca-sdw.c
+++ b/sound/soc/codecs/rt722-sdca-sdw.c
@@ -68,6 +68,7 @@ static bool rt722_sdca_mbq_readable_register(struct device *dev, unsigned int re
case 0x200007f:
case 0x2000082 ... 0x200008e:
case 0x2000090 ... 0x2000094:
+ case 0x3110000:
case 0x5300000 ... 0x5300002:
case 0x5400002:
case 0x5600000 ... 0x5600007:
@@ -125,6 +126,7 @@ static bool rt722_sdca_mbq_volatile_register(struct device *dev, unsigned int re
case 0x2000067:
case 0x2000084:
case 0x2000086:
+ case 0x3110000:
return true;
default:
return false;
@@ -350,7 +352,7 @@ static int rt722_sdca_interrupt_callback(struct sdw_slave *slave,
if (status->sdca_cascade && !rt722->disable_irq)
mod_delayed_work(system_power_efficient_wq,
- &rt722->jack_detect_work, msecs_to_jiffies(30));
+ &rt722->jack_detect_work, msecs_to_jiffies(280));
mutex_unlock(&rt722->disable_irq_lock);
diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
index c9d9a7b28efb..68d2d6444533 100644
--- a/sound/soc/codecs/wm_adsp.c
+++ b/sound/soc/codecs/wm_adsp.c
@@ -2085,5 +2085,6 @@ static const struct cs_dsp_client_ops wm_adsp2_client_ops = {
.watchdog_expired = wm_adsp_fatal_error,
};
+MODULE_DESCRIPTION("Cirrus Logic ASoC DSP Support");
MODULE_LICENSE("GPL v2");
MODULE_IMPORT_NS(FW_CS_DSP);
diff --git a/sound/soc/fsl/fsl-asoc-card.c b/sound/soc/fsl/fsl-asoc-card.c
index 5ddc0c2fe53f..eb67689dcd6e 100644
--- a/sound/soc/fsl/fsl-asoc-card.c
+++ b/sound/soc/fsl/fsl-asoc-card.c
@@ -559,6 +559,8 @@ static int fsl_asoc_card_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
+ priv->pdev = pdev;
+
cpu_np = of_parse_phandle(np, "audio-cpu", 0);
/* Give a chance to old DT binding */
if (!cpu_np)
@@ -787,7 +789,6 @@ static int fsl_asoc_card_probe(struct platform_device *pdev)
}
/* Initialize sound card */
- priv->pdev = pdev;
priv->card.dev = &pdev->dev;
priv->card.owner = THIS_MODULE;
ret = snd_soc_of_parse_card_name(&priv->card, "model");
diff --git a/sound/soc/fsl/imx-pcm-dma.c b/sound/soc/fsl/imx-pcm-dma.c
index 14e94270911c..4fa208d6a032 100644
--- a/sound/soc/fsl/imx-pcm-dma.c
+++ b/sound/soc/fsl/imx-pcm-dma.c
@@ -50,4 +50,5 @@ int imx_pcm_dma_init(struct platform_device *pdev)
}
EXPORT_SYMBOL_GPL(imx_pcm_dma_init);
+MODULE_DESCRIPTION("Freescale i.MX PCM DMA interface");
MODULE_LICENSE("GPL");
diff --git a/sound/soc/intel/avs/topology.c b/sound/soc/intel/avs/topology.c
index 02bae207f6ec..b6c5d94a1554 100644
--- a/sound/soc/intel/avs/topology.c
+++ b/sound/soc/intel/avs/topology.c
@@ -1545,8 +1545,8 @@ static int avs_route_load(struct snd_soc_component *comp, int index,
{
struct snd_soc_acpi_mach *mach = dev_get_platdata(comp->card->dev);
size_t len = SNDRV_CTL_ELEM_ID_NAME_MAXLEN;
- char buf[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
int ssp_port, tdm_slot;
+ char *buf;
/* See parse_link_formatted_string() for dynamic naming when(s). */
if (!avs_mach_singular_ssp(mach))
@@ -1557,13 +1557,24 @@ static int avs_route_load(struct snd_soc_component *comp, int index,
return 0;
tdm_slot = avs_mach_ssp_tdm(mach, ssp_port);
+ buf = devm_kzalloc(comp->card->dev, len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
avs_ssp_sprint(buf, len, route->source, ssp_port, tdm_slot);
- strscpy((char *)route->source, buf, len);
+ route->source = buf;
+
+ buf = devm_kzalloc(comp->card->dev, len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
avs_ssp_sprint(buf, len, route->sink, ssp_port, tdm_slot);
- strscpy((char *)route->sink, buf, len);
+ route->sink = buf;
+
if (route->control) {
+ buf = devm_kzalloc(comp->card->dev, len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
avs_ssp_sprint(buf, len, route->control, ssp_port, tdm_slot);
- strscpy((char *)route->control, buf, len);
+ route->control = buf;
}
return 0;
diff --git a/sound/soc/intel/boards/Kconfig b/sound/soc/intel/boards/Kconfig
index 3ed81ab649c5..4e0586034de4 100644
--- a/sound/soc/intel/boards/Kconfig
+++ b/sound/soc/intel/boards/Kconfig
@@ -652,7 +652,7 @@ if SND_SOC_SOF_INTEL_SOUNDWIRE
config SND_SOC_INTEL_SOUNDWIRE_SOF_MACH
tristate "SoundWire generic machine driver"
- depends on I2C && ACPI
+ depends on I2C && SPI_MASTER && ACPI
depends on MFD_INTEL_LPSS || COMPILE_TEST
depends on SND_SOC_INTEL_USER_FRIENDLY_LONG_NAMES || COMPILE_TEST
depends on SOUNDWIRE
diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
index b41a1147f1c3..a64d1989e28a 100644
--- a/sound/soc/intel/boards/bytcr_rt5640.c
+++ b/sound/soc/intel/boards/bytcr_rt5640.c
@@ -613,6 +613,17 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
{
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ARCHOS"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "ARCHOS 101 CESIUM"),
+ },
+ .driver_data = (void *)(BYTCR_INPUT_DEFAULTS |
+ BYT_RT5640_JD_NOT_INV |
+ BYT_RT5640_DIFF_MIC |
+ BYT_RT5640_SSP0_AIF1 |
+ BYT_RT5640_MCLK_EN),
+ },
+ {
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ARCHOS"),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "ARCHOS 140 CESIUM"),
},
.driver_data = (void *)(BYT_RT5640_IN1_MAP |
diff --git a/sound/soc/intel/common/soc-acpi-intel-mtl-match.c b/sound/soc/intel/common/soc-acpi-intel-mtl-match.c
index 48252fa9e39e..8e0ae3635a35 100644
--- a/sound/soc/intel/common/soc-acpi-intel-mtl-match.c
+++ b/sound/soc/intel/common/soc-acpi-intel-mtl-match.c
@@ -293,7 +293,7 @@ static const struct snd_soc_acpi_adr_device rt1318_1_single_adr[] = {
.adr = 0x000130025D131801,
.num_endpoints = 1,
.endpoints = &single_endpoint,
- .name_prefix = "rt1318"
+ .name_prefix = "rt1318-1"
}
};
diff --git a/sound/soc/mediatek/mt8183/mt8183-da7219-max98357.c b/sound/soc/mediatek/mt8183/mt8183-da7219-max98357.c
index acaf81fd6c9b..f848e14b091a 100644
--- a/sound/soc/mediatek/mt8183/mt8183-da7219-max98357.c
+++ b/sound/soc/mediatek/mt8183/mt8183-da7219-max98357.c
@@ -31,7 +31,7 @@ struct mt8183_da7219_max98357_priv {
static struct snd_soc_jack_pin mt8183_da7219_max98357_jack_pins[] = {
{
- .pin = "Headphone",
+ .pin = "Headphones",
.mask = SND_JACK_HEADPHONE,
},
{
@@ -626,7 +626,7 @@ static struct snd_soc_codec_conf mt6358_codec_conf[] = {
};
static const struct snd_kcontrol_new mt8183_da7219_max98357_snd_controls[] = {
- SOC_DAPM_PIN_SWITCH("Headphone"),
+ SOC_DAPM_PIN_SWITCH("Headphones"),
SOC_DAPM_PIN_SWITCH("Headset Mic"),
SOC_DAPM_PIN_SWITCH("Speakers"),
SOC_DAPM_PIN_SWITCH("Line Out"),
@@ -634,7 +634,7 @@ static const struct snd_kcontrol_new mt8183_da7219_max98357_snd_controls[] = {
static const
struct snd_soc_dapm_widget mt8183_da7219_max98357_dapm_widgets[] = {
- SND_SOC_DAPM_HP("Headphone", NULL),
+ SND_SOC_DAPM_HP("Headphones", NULL),
SND_SOC_DAPM_MIC("Headset Mic", NULL),
SND_SOC_DAPM_SPK("Speakers", NULL),
SND_SOC_DAPM_SPK("Line Out", NULL),
@@ -680,7 +680,7 @@ static struct snd_soc_codec_conf mt8183_da7219_rt1015_codec_conf[] = {
};
static const struct snd_kcontrol_new mt8183_da7219_rt1015_snd_controls[] = {
- SOC_DAPM_PIN_SWITCH("Headphone"),
+ SOC_DAPM_PIN_SWITCH("Headphones"),
SOC_DAPM_PIN_SWITCH("Headset Mic"),
SOC_DAPM_PIN_SWITCH("Left Spk"),
SOC_DAPM_PIN_SWITCH("Right Spk"),
@@ -689,7 +689,7 @@ static const struct snd_kcontrol_new mt8183_da7219_rt1015_snd_controls[] = {
static const
struct snd_soc_dapm_widget mt8183_da7219_rt1015_dapm_widgets[] = {
- SND_SOC_DAPM_HP("Headphone", NULL),
+ SND_SOC_DAPM_HP("Headphones", NULL),
SND_SOC_DAPM_MIC("Headset Mic", NULL),
SND_SOC_DAPM_SPK("Left Spk", NULL),
SND_SOC_DAPM_SPK("Right Spk", NULL),
diff --git a/sound/soc/mediatek/mt8195/mt8195-mt6359.c b/sound/soc/mediatek/mt8195/mt8195-mt6359.c
index ca8751190520..2832ef78eaed 100644
--- a/sound/soc/mediatek/mt8195/mt8195-mt6359.c
+++ b/sound/soc/mediatek/mt8195/mt8195-mt6359.c
@@ -827,6 +827,7 @@ SND_SOC_DAILINK_DEFS(ETDM2_IN_BE,
SND_SOC_DAILINK_DEFS(ETDM1_OUT_BE,
DAILINK_COMP_ARRAY(COMP_CPU("ETDM1_OUT")),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()),
DAILINK_COMP_ARRAY(COMP_EMPTY()));
SND_SOC_DAILINK_DEFS(ETDM2_OUT_BE,
diff --git a/sound/soc/mxs/mxs-pcm.c b/sound/soc/mxs/mxs-pcm.c
index df2e4be992d2..9bb08cadeb18 100644
--- a/sound/soc/mxs/mxs-pcm.c
+++ b/sound/soc/mxs/mxs-pcm.c
@@ -43,4 +43,5 @@ int mxs_pcm_platform_register(struct device *dev)
}
EXPORT_SYMBOL_GPL(mxs_pcm_platform_register);
+MODULE_DESCRIPTION("MXS ASoC PCM driver");
MODULE_LICENSE("GPL");
diff --git a/sound/soc/qcom/qdsp6/q6apm-lpass-dais.c b/sound/soc/qcom/qdsp6/q6apm-lpass-dais.c
index 68a38f63a2db..66b911b49e3f 100644
--- a/sound/soc/qcom/qdsp6/q6apm-lpass-dais.c
+++ b/sound/soc/qcom/qdsp6/q6apm-lpass-dais.c
@@ -141,14 +141,17 @@ static void q6apm_lpass_dai_shutdown(struct snd_pcm_substream *substream, struct
struct q6apm_lpass_dai_data *dai_data = dev_get_drvdata(dai->dev);
int rc;
- if (!dai_data->is_port_started[dai->id])
- return;
- rc = q6apm_graph_stop(dai_data->graph[dai->id]);
- if (rc < 0)
- dev_err(dai->dev, "fail to close APM port (%d)\n", rc);
+ if (dai_data->is_port_started[dai->id]) {
+ rc = q6apm_graph_stop(dai_data->graph[dai->id]);
+ dai_data->is_port_started[dai->id] = false;
+ if (rc < 0)
+ dev_err(dai->dev, "fail to close APM port (%d)\n", rc);
+ }
- q6apm_graph_close(dai_data->graph[dai->id]);
- dai_data->is_port_started[dai->id] = false;
+ if (dai_data->graph[dai->id]) {
+ q6apm_graph_close(dai_data->graph[dai->id]);
+ dai_data->graph[dai->id] = NULL;
+ }
}
static int q6apm_lpass_dai_prepare(struct snd_pcm_substream *substream, struct snd_soc_dai *dai)
@@ -163,8 +166,10 @@ static int q6apm_lpass_dai_prepare(struct snd_pcm_substream *substream, struct s
q6apm_graph_stop(dai_data->graph[dai->id]);
dai_data->is_port_started[dai->id] = false;
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
q6apm_graph_close(dai_data->graph[dai->id]);
+ dai_data->graph[dai->id] = NULL;
+ }
}
/**
@@ -183,26 +188,29 @@ static int q6apm_lpass_dai_prepare(struct snd_pcm_substream *substream, struct s
cfg->direction = substream->stream;
rc = q6apm_graph_media_format_pcm(dai_data->graph[dai->id], cfg);
-
if (rc) {
dev_err(dai->dev, "Failed to set media format %d\n", rc);
- return rc;
+ goto err;
}
rc = q6apm_graph_prepare(dai_data->graph[dai->id]);
if (rc) {
dev_err(dai->dev, "Failed to prepare Graph %d\n", rc);
- return rc;
+ goto err;
}
rc = q6apm_graph_start(dai_data->graph[dai->id]);
if (rc < 0) {
dev_err(dai->dev, "fail to start APM port %x\n", dai->id);
- return rc;
+ goto err;
}
dai_data->is_port_started[dai->id] = true;
return 0;
+err:
+ q6apm_graph_close(dai_data->graph[dai->id]);
+ dai_data->graph[dai->id] = NULL;
+ return rc;
}
static int q6apm_lpass_dai_startup(struct snd_pcm_substream *substream, struct snd_soc_dai *dai)
diff --git a/sound/soc/qcom/sdw.c b/sound/soc/qcom/sdw.c
index eaa8bb016e50..f2eda2ff46c0 100644
--- a/sound/soc/qcom/sdw.c
+++ b/sound/soc/qcom/sdw.c
@@ -160,4 +160,5 @@ int qcom_snd_sdw_hw_free(struct snd_pcm_substream *substream,
return 0;
}
EXPORT_SYMBOL_GPL(qcom_snd_sdw_hw_free);
+MODULE_DESCRIPTION("Qualcomm ASoC SoundWire helper functions");
MODULE_LICENSE("GPL");
diff --git a/sound/soc/rockchip/rockchip_i2s_tdm.c b/sound/soc/rockchip/rockchip_i2s_tdm.c
index 9fa020ef7eab..ee517d7b5b7b 100644
--- a/sound/soc/rockchip/rockchip_i2s_tdm.c
+++ b/sound/soc/rockchip/rockchip_i2s_tdm.c
@@ -655,8 +655,17 @@ static int rockchip_i2s_tdm_hw_params(struct snd_pcm_substream *substream,
int err;
if (i2s_tdm->is_master_mode) {
- struct clk *mclk = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ?
- i2s_tdm->mclk_tx : i2s_tdm->mclk_rx;
+ struct clk *mclk;
+
+ if (i2s_tdm->clk_trcm == TRCM_TX) {
+ mclk = i2s_tdm->mclk_tx;
+ } else if (i2s_tdm->clk_trcm == TRCM_RX) {
+ mclk = i2s_tdm->mclk_rx;
+ } else if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ mclk = i2s_tdm->mclk_tx;
+ } else {
+ mclk = i2s_tdm->mclk_rx;
+ }
err = clk_set_rate(mclk, DEFAULT_MCLK_FS * params_rate(params));
if (err)
diff --git a/sound/soc/soc-generic-dmaengine-pcm.c b/sound/soc/soc-generic-dmaengine-pcm.c
index ea3bc9318412..a63e942fdc0b 100644
--- a/sound/soc/soc-generic-dmaengine-pcm.c
+++ b/sound/soc/soc-generic-dmaengine-pcm.c
@@ -318,6 +318,12 @@ static int dmaengine_copy(struct snd_soc_component *component,
return 0;
}
+static int dmaengine_pcm_sync_stop(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
+{
+ return snd_dmaengine_pcm_sync_stop(substream);
+}
+
static const struct snd_soc_component_driver dmaengine_pcm_component = {
.name = SND_DMAENGINE_PCM_DRV_NAME,
.probe_order = SND_SOC_COMP_ORDER_LATE,
@@ -327,6 +333,7 @@ static const struct snd_soc_component_driver dmaengine_pcm_component = {
.trigger = dmaengine_pcm_trigger,
.pointer = dmaengine_pcm_pointer,
.pcm_construct = dmaengine_pcm_new,
+ .sync_stop = dmaengine_pcm_sync_stop,
};
static const struct snd_soc_component_driver dmaengine_pcm_component_process = {
@@ -339,6 +346,7 @@ static const struct snd_soc_component_driver dmaengine_pcm_component_process = {
.pointer = dmaengine_pcm_pointer,
.copy = dmaengine_copy,
.pcm_construct = dmaengine_pcm_new,
+ .sync_stop = dmaengine_pcm_sync_stop,
};
static const char * const dmaengine_pcm_dma_channel_names[] = {
diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
index 90ca37e008b3..6951ff7bc61e 100644
--- a/sound/soc/soc-topology.c
+++ b/sound/soc/soc-topology.c
@@ -1021,6 +1021,7 @@ static int soc_tplg_dapm_graph_elems_load(struct soc_tplg *tplg,
struct snd_soc_tplg_hdr *hdr)
{
struct snd_soc_dapm_context *dapm = &tplg->comp->dapm;
+ const size_t maxlen = SNDRV_CTL_ELEM_ID_NAME_MAXLEN;
struct snd_soc_tplg_dapm_graph_elem *elem;
struct snd_soc_dapm_route *route;
int count, i;
@@ -1044,31 +1045,27 @@ static int soc_tplg_dapm_graph_elems_load(struct soc_tplg *tplg,
tplg->pos += sizeof(struct snd_soc_tplg_dapm_graph_elem);
/* validate routes */
- if (strnlen(elem->source, SNDRV_CTL_ELEM_ID_NAME_MAXLEN) ==
- SNDRV_CTL_ELEM_ID_NAME_MAXLEN) {
+ if ((strnlen(elem->source, maxlen) == maxlen) ||
+ (strnlen(elem->sink, maxlen) == maxlen) ||
+ (strnlen(elem->control, maxlen) == maxlen)) {
ret = -EINVAL;
break;
}
- if (strnlen(elem->sink, SNDRV_CTL_ELEM_ID_NAME_MAXLEN) ==
- SNDRV_CTL_ELEM_ID_NAME_MAXLEN) {
- ret = -EINVAL;
- break;
- }
- if (strnlen(elem->control, SNDRV_CTL_ELEM_ID_NAME_MAXLEN) ==
- SNDRV_CTL_ELEM_ID_NAME_MAXLEN) {
- ret = -EINVAL;
+
+ route->source = devm_kstrdup(tplg->dev, elem->source, GFP_KERNEL);
+ route->sink = devm_kstrdup(tplg->dev, elem->sink, GFP_KERNEL);
+ if (!route->source || !route->sink) {
+ ret = -ENOMEM;
break;
}
- route->source = elem->source;
- route->sink = elem->sink;
-
- /* set to NULL atm for tplg users */
- route->connected = NULL;
- if (strnlen(elem->control, SNDRV_CTL_ELEM_ID_NAME_MAXLEN) == 0)
- route->control = NULL;
- else
- route->control = elem->control;
+ if (strnlen(elem->control, maxlen) != 0) {
+ route->control = devm_kstrdup(tplg->dev, elem->control, GFP_KERNEL);
+ if (!route->control) {
+ ret = -ENOMEM;
+ break;
+ }
+ }
/* add route dobj to dobj_list */
route->dobj.type = SND_SOC_DOBJ_GRAPH;
diff --git a/sound/soc/sof/amd/acp-common.c b/sound/soc/sof/amd/acp-common.c
index b26fa471b431..81bb93e98358 100644
--- a/sound/soc/sof/amd/acp-common.c
+++ b/sound/soc/sof/amd/acp-common.c
@@ -258,8 +258,8 @@ const struct snd_sof_dsp_ops sof_acp_common_ops = {
};
EXPORT_SYMBOL_NS(sof_acp_common_ops, SND_SOC_SOF_AMD_COMMON);
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("ACP SOF COMMON Driver");
MODULE_IMPORT_NS(SND_SOC_SOF_AMD_COMMON);
MODULE_IMPORT_NS(SND_SOC_SOF_XTENSA);
MODULE_IMPORT_NS(SOUNDWIRE_AMD_INIT);
-MODULE_DESCRIPTION("ACP SOF COMMON Driver");
-MODULE_LICENSE("Dual BSD/GPL");
diff --git a/sound/soc/sof/amd/acp.c b/sound/soc/sof/amd/acp.c
index c12c7f820529..74fd5f2b148b 100644
--- a/sound/soc/sof/amd/acp.c
+++ b/sound/soc/sof/amd/acp.c
@@ -801,7 +801,7 @@ void amd_sof_acp_remove(struct snd_sof_dev *sdev)
}
EXPORT_SYMBOL_NS(amd_sof_acp_remove, SND_SOC_SOF_AMD_COMMON);
+MODULE_LICENSE("Dual BSD/GPL");
MODULE_DESCRIPTION("AMD ACP sof driver");
MODULE_IMPORT_NS(SOUNDWIRE_AMD_INIT);
MODULE_IMPORT_NS(SND_AMD_SOUNDWIRE_ACPI);
-MODULE_LICENSE("Dual BSD/GPL");
diff --git a/sound/soc/sof/amd/acp63.c b/sound/soc/sof/amd/acp63.c
index 9fb645079c3a..9e6eb4bfc805 100644
--- a/sound/soc/sof/amd/acp63.c
+++ b/sound/soc/sof/amd/acp63.c
@@ -140,7 +140,3 @@ int sof_acp63_ops_init(struct snd_sof_dev *sdev)
return 0;
}
-
-MODULE_IMPORT_NS(SND_SOC_SOF_AMD_COMMON);
-MODULE_DESCRIPTION("ACP63 SOF Driver");
-MODULE_LICENSE("Dual BSD/GPL");
diff --git a/sound/soc/sof/amd/pci-acp63.c b/sound/soc/sof/amd/pci-acp63.c
index eeaa12cceb23..fc8984447365 100644
--- a/sound/soc/sof/amd/pci-acp63.c
+++ b/sound/soc/sof/amd/pci-acp63.c
@@ -109,5 +109,6 @@ static struct pci_driver snd_sof_pci_amd_acp63_driver = {
module_pci_driver(snd_sof_pci_amd_acp63_driver);
MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("ACP63 SOF Driver");
MODULE_IMPORT_NS(SND_SOC_SOF_AMD_COMMON);
MODULE_IMPORT_NS(SND_SOC_SOF_PCI_DEV);
diff --git a/sound/soc/sof/amd/pci-rmb.c b/sound/soc/sof/amd/pci-rmb.c
index 2f288545c426..4bc30951f8b0 100644
--- a/sound/soc/sof/amd/pci-rmb.c
+++ b/sound/soc/sof/amd/pci-rmb.c
@@ -99,5 +99,6 @@ static struct pci_driver snd_sof_pci_amd_rmb_driver = {
module_pci_driver(snd_sof_pci_amd_rmb_driver);
MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("REMBRANDT SOF Driver");
MODULE_IMPORT_NS(SND_SOC_SOF_AMD_COMMON);
MODULE_IMPORT_NS(SND_SOC_SOF_PCI_DEV);
diff --git a/sound/soc/sof/amd/pci-rn.c b/sound/soc/sof/amd/pci-rn.c
index a0195e9b400c..e08875bdfa8b 100644
--- a/sound/soc/sof/amd/pci-rn.c
+++ b/sound/soc/sof/amd/pci-rn.c
@@ -103,5 +103,6 @@ static struct pci_driver snd_sof_pci_amd_rn_driver = {
module_pci_driver(snd_sof_pci_amd_rn_driver);
MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("RENOIR SOF Driver");
MODULE_IMPORT_NS(SND_SOC_SOF_AMD_COMMON);
MODULE_IMPORT_NS(SND_SOC_SOF_PCI_DEV);
diff --git a/sound/soc/sof/amd/pci-vangogh.c b/sound/soc/sof/amd/pci-vangogh.c
index 5cd3ac84752f..16eb2994fbab 100644
--- a/sound/soc/sof/amd/pci-vangogh.c
+++ b/sound/soc/sof/amd/pci-vangogh.c
@@ -101,5 +101,6 @@ static struct pci_driver snd_sof_pci_amd_vgh_driver = {
module_pci_driver(snd_sof_pci_amd_vgh_driver);
MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("VANGOGH SOF Driver");
MODULE_IMPORT_NS(SND_SOC_SOF_AMD_COMMON);
MODULE_IMPORT_NS(SND_SOC_SOF_PCI_DEV);
diff --git a/sound/soc/sof/amd/rembrandt.c b/sound/soc/sof/amd/rembrandt.c
index f1d1ba57ab3a..076f2f05a95c 100644
--- a/sound/soc/sof/amd/rembrandt.c
+++ b/sound/soc/sof/amd/rembrandt.c
@@ -140,7 +140,3 @@ int sof_rembrandt_ops_init(struct snd_sof_dev *sdev)
return 0;
}
-
-MODULE_IMPORT_NS(SND_SOC_SOF_AMD_COMMON);
-MODULE_DESCRIPTION("REMBRANDT SOF Driver");
-MODULE_LICENSE("Dual BSD/GPL");
diff --git a/sound/soc/sof/amd/renoir.c b/sound/soc/sof/amd/renoir.c
index 47b863f6258c..aa2d24dac6f5 100644
--- a/sound/soc/sof/amd/renoir.c
+++ b/sound/soc/sof/amd/renoir.c
@@ -115,7 +115,3 @@ int sof_renoir_ops_init(struct snd_sof_dev *sdev)
return 0;
}
-
-MODULE_IMPORT_NS(SND_SOC_SOF_AMD_COMMON);
-MODULE_DESCRIPTION("RENOIR SOF Driver");
-MODULE_LICENSE("Dual BSD/GPL");
diff --git a/sound/soc/sof/amd/vangogh.c b/sound/soc/sof/amd/vangogh.c
index bc6ffdb5471a..61372958c09d 100644
--- a/sound/soc/sof/amd/vangogh.c
+++ b/sound/soc/sof/amd/vangogh.c
@@ -161,7 +161,3 @@ int sof_vangogh_ops_init(struct snd_sof_dev *sdev)
return 0;
}
-
-MODULE_IMPORT_NS(SND_SOC_SOF_AMD_COMMON);
-MODULE_DESCRIPTION("VANGOGH SOF Driver");
-MODULE_LICENSE("Dual BSD/GPL");
diff --git a/sound/soc/sof/core.c b/sound/soc/sof/core.c
index 0a4917136ff9..83fe0401baf8 100644
--- a/sound/soc/sof/core.c
+++ b/sound/soc/sof/core.c
@@ -769,7 +769,7 @@ void sof_machine_unregister(struct snd_sof_dev *sdev, void *pdata)
EXPORT_SYMBOL(sof_machine_unregister);
MODULE_AUTHOR("Liam Girdwood");
-MODULE_DESCRIPTION("Sound Open Firmware (SOF) Core");
MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("Sound Open Firmware (SOF) Core");
MODULE_ALIAS("platform:sof-audio");
MODULE_IMPORT_NS(SND_SOC_SOF_CLIENT);
diff --git a/sound/soc/sof/imx/imx-common.c b/sound/soc/sof/imx/imx-common.c
index 2981aea123d9..fce6d9cf6a6b 100644
--- a/sound/soc/sof/imx/imx-common.c
+++ b/sound/soc/sof/imx/imx-common.c
@@ -75,3 +75,4 @@ void imx8_dump(struct snd_sof_dev *sdev, u32 flags)
EXPORT_SYMBOL(imx8_dump);
MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("SOF helpers for IMX platforms");
diff --git a/sound/soc/sof/imx/imx8.c b/sound/soc/sof/imx/imx8.c
index 3021dc87ab5a..9f24e3c283dd 100644
--- a/sound/soc/sof/imx/imx8.c
+++ b/sound/soc/sof/imx/imx8.c
@@ -667,5 +667,6 @@ static struct platform_driver snd_sof_of_imx8_driver = {
};
module_platform_driver(snd_sof_of_imx8_driver);
-MODULE_IMPORT_NS(SND_SOC_SOF_XTENSA);
MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("SOF support for IMX8 platforms");
+MODULE_IMPORT_NS(SND_SOC_SOF_XTENSA);
diff --git a/sound/soc/sof/imx/imx8m.c b/sound/soc/sof/imx/imx8m.c
index 4ed415f04345..1c7019c3cbd3 100644
--- a/sound/soc/sof/imx/imx8m.c
+++ b/sound/soc/sof/imx/imx8m.c
@@ -514,5 +514,6 @@ static struct platform_driver snd_sof_of_imx8m_driver = {
};
module_platform_driver(snd_sof_of_imx8m_driver);
-MODULE_IMPORT_NS(SND_SOC_SOF_XTENSA);
MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("SOF support for IMX8M platforms");
+MODULE_IMPORT_NS(SND_SOC_SOF_XTENSA);
diff --git a/sound/soc/sof/imx/imx8ulp.c b/sound/soc/sof/imx/imx8ulp.c
index 8adfdd00413a..2585b1beef23 100644
--- a/sound/soc/sof/imx/imx8ulp.c
+++ b/sound/soc/sof/imx/imx8ulp.c
@@ -516,5 +516,6 @@ static struct platform_driver snd_sof_of_imx8ulp_driver = {
};
module_platform_driver(snd_sof_of_imx8ulp_driver);
-MODULE_IMPORT_NS(SND_SOC_SOF_XTENSA);
MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("SOF support for IMX8ULP platforms");
+MODULE_IMPORT_NS(SND_SOC_SOF_XTENSA);
diff --git a/sound/soc/sof/intel/atom.c b/sound/soc/sof/intel/atom.c
index 86af4e9a716e..3505ac3a1b14 100644
--- a/sound/soc/sof/intel/atom.c
+++ b/sound/soc/sof/intel/atom.c
@@ -418,3 +418,4 @@ void atom_set_mach_params(struct snd_soc_acpi_mach *mach,
EXPORT_SYMBOL_NS(atom_set_mach_params, SND_SOC_SOF_INTEL_ATOM_HIFI_EP);
MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("SOF support for Atom platforms");
diff --git a/sound/soc/sof/intel/bdw.c b/sound/soc/sof/intel/bdw.c
index 3262286a9a9d..7f18080e4e19 100644
--- a/sound/soc/sof/intel/bdw.c
+++ b/sound/soc/sof/intel/bdw.c
@@ -694,6 +694,7 @@ static struct platform_driver snd_sof_acpi_intel_bdw_driver = {
module_platform_driver(snd_sof_acpi_intel_bdw_driver);
MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("SOF support for Broadwell platforms");
MODULE_IMPORT_NS(SND_SOC_SOF_INTEL_HIFI_EP_IPC);
MODULE_IMPORT_NS(SND_SOC_SOF_XTENSA);
MODULE_IMPORT_NS(SND_SOC_SOF_ACPI_DEV);
diff --git a/sound/soc/sof/intel/byt.c b/sound/soc/sof/intel/byt.c
index d78d11d4cfbf..7a57e162fb1c 100644
--- a/sound/soc/sof/intel/byt.c
+++ b/sound/soc/sof/intel/byt.c
@@ -475,6 +475,7 @@ static struct platform_driver snd_sof_acpi_intel_byt_driver = {
module_platform_driver(snd_sof_acpi_intel_byt_driver);
MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("SOF support for Baytrail/Cherrytrail");
MODULE_IMPORT_NS(SND_SOC_SOF_INTEL_HIFI_EP_IPC);
MODULE_IMPORT_NS(SND_SOC_SOF_XTENSA);
MODULE_IMPORT_NS(SND_SOC_SOF_ACPI_DEV);
diff --git a/sound/soc/sof/intel/hda-codec.c b/sound/soc/sof/intel/hda-codec.c
index da3db3ed379e..dc46888faa0d 100644
--- a/sound/soc/sof/intel/hda-codec.c
+++ b/sound/soc/sof/intel/hda-codec.c
@@ -457,3 +457,4 @@ EXPORT_SYMBOL_NS_GPL(hda_codec_i915_exit, SND_SOC_SOF_HDA_AUDIO_CODEC_I915);
#endif
MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("SOF support for HDaudio codecs");
diff --git a/sound/soc/sof/intel/hda-ctrl.c b/sound/soc/sof/intel/hda-ctrl.c
index 262b482dc0a8..b9a02750ce61 100644
--- a/sound/soc/sof/intel/hda-ctrl.c
+++ b/sound/soc/sof/intel/hda-ctrl.c
@@ -328,6 +328,7 @@ void hda_dsp_ctrl_stop_chip(struct snd_sof_dev *sdev)
}
MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("SOF helpers for HDaudio platforms");
MODULE_IMPORT_NS(SND_SOC_SOF_HDA_MLINK);
MODULE_IMPORT_NS(SND_SOC_SOF_HDA_AUDIO_CODEC);
MODULE_IMPORT_NS(SND_SOC_SOF_HDA_AUDIO_CODEC_I915);
diff --git a/sound/soc/sof/intel/hda-dai.c b/sound/soc/sof/intel/hda-dai.c
index ce675c22a5ab..1c823f9eea57 100644
--- a/sound/soc/sof/intel/hda-dai.c
+++ b/sound/soc/sof/intel/hda-dai.c
@@ -379,7 +379,7 @@ static int non_hda_dai_hw_params_data(struct snd_pcm_substream *substream,
sdev = widget_to_sdev(w);
if (sdev->dspless_mode_selected)
- goto skip_tlv;
+ return 0;
/* get stream_id */
hext_stream = ops->get_hext_stream(sdev, cpu_dai, substream);
@@ -423,7 +423,6 @@ static int non_hda_dai_hw_params_data(struct snd_pcm_substream *substream,
dma_config->dma_stream_channel_map.device_count = 1;
dma_config->dma_priv_config_size = 0;
-skip_tlv:
return 0;
}
@@ -525,6 +524,9 @@ int sdw_hda_dai_hw_params(struct snd_pcm_substream *substream,
return ret;
}
+ if (sdev->dspless_mode_selected)
+ return 0;
+
ipc4_copier = widget_to_copier(w);
dma_config_tlv = &ipc4_copier->dma_config_tlv[cpu_dai_id];
dma_config = &dma_config_tlv->dma_config;
@@ -615,12 +617,6 @@ static int hda_dai_suspend(struct hdac_bus *bus)
sdai = swidget->private;
ops = sdai->platform_private;
- ret = hda_link_dma_cleanup(hext_stream->link_substream,
- hext_stream,
- cpu_dai);
- if (ret < 0)
- return ret;
-
/* for consistency with TRIGGER_SUSPEND */
if (ops->post_trigger) {
ret = ops->post_trigger(sdev, cpu_dai,
@@ -629,6 +625,12 @@ static int hda_dai_suspend(struct hdac_bus *bus)
if (ret < 0)
return ret;
}
+
+ ret = hda_link_dma_cleanup(hext_stream->link_substream,
+ hext_stream,
+ cpu_dai);
+ if (ret < 0)
+ return ret;
}
}
diff --git a/sound/soc/sof/intel/hda-mlink.c b/sound/soc/sof/intel/hda-mlink.c
index 04bbc5c9904c..9a3559c78b62 100644
--- a/sound/soc/sof/intel/hda-mlink.c
+++ b/sound/soc/sof/intel/hda-mlink.c
@@ -972,3 +972,4 @@ EXPORT_SYMBOL_NS(hdac_bus_eml_enable_offload, SND_SOC_SOF_HDA_MLINK);
#endif
MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("SOF support for HDaudio multi-link");
diff --git a/sound/soc/sof/intel/hda-pcm.c b/sound/soc/sof/intel/hda-pcm.c
index 9fb8521b896b..f6e24edd7adb 100644
--- a/sound/soc/sof/intel/hda-pcm.c
+++ b/sound/soc/sof/intel/hda-pcm.c
@@ -258,6 +258,12 @@ int hda_dsp_pcm_open(struct snd_sof_dev *sdev,
snd_pcm_hw_constraint_integer(substream->runtime,
SNDRV_PCM_HW_PARAM_PERIODS);
+ /* Limit the maximum number of periods to not exceed the BDL entries count */
+ if (runtime->hw.periods_max > HDA_DSP_MAX_BDL_ENTRIES)
+ snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_PERIODS,
+ runtime->hw.periods_min,
+ HDA_DSP_MAX_BDL_ENTRIES);
+
/* Only S16 and S32 supported by HDA hardware when used without DSP */
if (sdev->dspless_mode_selected)
snd_pcm_hw_constraint_mask64(substream->runtime, SNDRV_PCM_HW_PARAM_FORMAT,
diff --git a/sound/soc/sof/intel/hda.c b/sound/soc/sof/intel/hda.c
index e6a38de0a0aa..dead1c19558b 100644
--- a/sound/soc/sof/intel/hda.c
+++ b/sound/soc/sof/intel/hda.c
@@ -1522,6 +1522,7 @@ void hda_unregister_clients(struct snd_sof_dev *sdev)
}
MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("SOF support for HDaudio platforms");
MODULE_IMPORT_NS(SND_SOC_SOF_PCI_DEV);
MODULE_IMPORT_NS(SND_SOC_SOF_HDA_AUDIO_CODEC);
MODULE_IMPORT_NS(SND_SOC_SOF_HDA_AUDIO_CODEC_I915);
diff --git a/sound/soc/sof/intel/pci-apl.c b/sound/soc/sof/intel/pci-apl.c
index df6d897da290..f006dcf5458a 100644
--- a/sound/soc/sof/intel/pci-apl.c
+++ b/sound/soc/sof/intel/pci-apl.c
@@ -105,6 +105,7 @@ static struct pci_driver snd_sof_pci_intel_apl_driver = {
module_pci_driver(snd_sof_pci_intel_apl_driver);
MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("SOF support for ApolloLake platforms");
MODULE_IMPORT_NS(SND_SOC_SOF_INTEL_HDA_GENERIC);
MODULE_IMPORT_NS(SND_SOC_SOF_INTEL_HDA_COMMON);
MODULE_IMPORT_NS(SND_SOC_SOF_PCI_DEV);
diff --git a/sound/soc/sof/intel/pci-cnl.c b/sound/soc/sof/intel/pci-cnl.c
index a39fa3657d55..a8406342f08b 100644
--- a/sound/soc/sof/intel/pci-cnl.c
+++ b/sound/soc/sof/intel/pci-cnl.c
@@ -143,6 +143,7 @@ static struct pci_driver snd_sof_pci_intel_cnl_driver = {
module_pci_driver(snd_sof_pci_intel_cnl_driver);
MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("SOF support for CannonLake platforms");
MODULE_IMPORT_NS(SND_SOC_SOF_INTEL_HDA_GENERIC);
MODULE_IMPORT_NS(SND_SOC_SOF_INTEL_HDA_COMMON);
MODULE_IMPORT_NS(SND_SOC_SOF_PCI_DEV);
diff --git a/sound/soc/sof/intel/pci-icl.c b/sound/soc/sof/intel/pci-icl.c
index 9f1fe47475fb..25effca50d9f 100644
--- a/sound/soc/sof/intel/pci-icl.c
+++ b/sound/soc/sof/intel/pci-icl.c
@@ -108,6 +108,7 @@ static struct pci_driver snd_sof_pci_intel_icl_driver = {
module_pci_driver(snd_sof_pci_intel_icl_driver);
MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("SOF support for IceLake platforms");
MODULE_IMPORT_NS(SND_SOC_SOF_INTEL_HDA_GENERIC);
MODULE_IMPORT_NS(SND_SOC_SOF_INTEL_HDA_COMMON);
MODULE_IMPORT_NS(SND_SOC_SOF_INTEL_CNL);
diff --git a/sound/soc/sof/intel/pci-lnl.c b/sound/soc/sof/intel/pci-lnl.c
index 68e5c90151b2..602c574064eb 100644
--- a/sound/soc/sof/intel/pci-lnl.c
+++ b/sound/soc/sof/intel/pci-lnl.c
@@ -70,6 +70,7 @@ static struct pci_driver snd_sof_pci_intel_lnl_driver = {
module_pci_driver(snd_sof_pci_intel_lnl_driver);
MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("SOF support for LunarLake platforms");
MODULE_IMPORT_NS(SND_SOC_SOF_INTEL_HDA_GENERIC);
MODULE_IMPORT_NS(SND_SOC_SOF_INTEL_HDA_COMMON);
MODULE_IMPORT_NS(SND_SOC_SOF_INTEL_MTL);
diff --git a/sound/soc/sof/intel/pci-mtl.c b/sound/soc/sof/intel/pci-mtl.c
index c685cb8d6171..8cb0333c033e 100644
--- a/sound/soc/sof/intel/pci-mtl.c
+++ b/sound/soc/sof/intel/pci-mtl.c
@@ -133,6 +133,7 @@ static struct pci_driver snd_sof_pci_intel_mtl_driver = {
module_pci_driver(snd_sof_pci_intel_mtl_driver);
MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("SOF support for MeteorLake platforms");
MODULE_IMPORT_NS(SND_SOC_SOF_INTEL_HDA_GENERIC);
MODULE_IMPORT_NS(SND_SOC_SOF_INTEL_HDA_COMMON);
MODULE_IMPORT_NS(SND_SOC_SOF_PCI_DEV);
diff --git a/sound/soc/sof/intel/pci-skl.c b/sound/soc/sof/intel/pci-skl.c
index 862da8009543..8ca0231d7e4f 100644
--- a/sound/soc/sof/intel/pci-skl.c
+++ b/sound/soc/sof/intel/pci-skl.c
@@ -89,6 +89,7 @@ static struct pci_driver snd_sof_pci_intel_skl_driver = {
module_pci_driver(snd_sof_pci_intel_skl_driver);
MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("SOF support for SkyLake platforms");
MODULE_IMPORT_NS(SND_SOC_SOF_INTEL_HDA_GENERIC);
MODULE_IMPORT_NS(SND_SOC_SOF_INTEL_HDA_COMMON);
MODULE_IMPORT_NS(SND_SOC_SOF_PCI_DEV);
diff --git a/sound/soc/sof/intel/pci-tgl.c b/sound/soc/sof/intel/pci-tgl.c
index f73bb47cd79e..ebe1a7d16689 100644
--- a/sound/soc/sof/intel/pci-tgl.c
+++ b/sound/soc/sof/intel/pci-tgl.c
@@ -317,6 +317,7 @@ static struct pci_driver snd_sof_pci_intel_tgl_driver = {
module_pci_driver(snd_sof_pci_intel_tgl_driver);
MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("SOF support for TigerLake platforms");
MODULE_IMPORT_NS(SND_SOC_SOF_INTEL_HDA_GENERIC);
MODULE_IMPORT_NS(SND_SOC_SOF_INTEL_HDA_COMMON);
MODULE_IMPORT_NS(SND_SOC_SOF_INTEL_CNL);
diff --git a/sound/soc/sof/intel/pci-tng.c b/sound/soc/sof/intel/pci-tng.c
index 5c3069588bb7..1375c393827e 100644
--- a/sound/soc/sof/intel/pci-tng.c
+++ b/sound/soc/sof/intel/pci-tng.c
@@ -244,6 +244,7 @@ static struct pci_driver snd_sof_pci_intel_tng_driver = {
module_pci_driver(snd_sof_pci_intel_tng_driver);
MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("SOF support for Tangier platforms");
MODULE_IMPORT_NS(SND_SOC_SOF_INTEL_HIFI_EP_IPC);
MODULE_IMPORT_NS(SND_SOC_SOF_XTENSA);
MODULE_IMPORT_NS(SND_SOC_SOF_PCI_DEV);
diff --git a/sound/soc/sof/ipc4-pcm.c b/sound/soc/sof/ipc4-pcm.c
index 307bee63756b..4df2be3d39eb 100644
--- a/sound/soc/sof/ipc4-pcm.c
+++ b/sound/soc/sof/ipc4-pcm.c
@@ -650,7 +650,7 @@ static int sof_ipc4_pcm_dai_link_fixup(struct snd_soc_pcm_runtime *rtd,
struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(rtd, 0);
struct sof_ipc4_audio_format *ipc4_fmt;
struct sof_ipc4_copier *ipc4_copier;
- bool single_fmt = false;
+ bool single_bitdepth = false;
u32 valid_bits = 0;
int dir, ret;
@@ -682,18 +682,18 @@ static int sof_ipc4_pcm_dai_link_fixup(struct snd_soc_pcm_runtime *rtd,
return 0;
if (dir == SNDRV_PCM_STREAM_PLAYBACK) {
- if (sof_ipc4_copier_is_single_format(sdev,
+ if (sof_ipc4_copier_is_single_bitdepth(sdev,
available_fmt->output_pin_fmts,
available_fmt->num_output_formats)) {
ipc4_fmt = &available_fmt->output_pin_fmts->audio_fmt;
- single_fmt = true;
+ single_bitdepth = true;
}
} else {
- if (sof_ipc4_copier_is_single_format(sdev,
+ if (sof_ipc4_copier_is_single_bitdepth(sdev,
available_fmt->input_pin_fmts,
available_fmt->num_input_formats)) {
ipc4_fmt = &available_fmt->input_pin_fmts->audio_fmt;
- single_fmt = true;
+ single_bitdepth = true;
}
}
}
@@ -703,7 +703,7 @@ static int sof_ipc4_pcm_dai_link_fixup(struct snd_soc_pcm_runtime *rtd,
if (ret)
return ret;
- if (single_fmt) {
+ if (single_bitdepth) {
snd_mask_none(fmt);
valid_bits = SOF_IPC4_AUDIO_FORMAT_CFG_V_BIT_DEPTH(ipc4_fmt->fmt_cfg);
dev_dbg(component->dev, "Set %s to %d bit format\n", dai->name, valid_bits);
diff --git a/sound/soc/sof/ipc4-topology.c b/sound/soc/sof/ipc4-topology.c
index beff10989324..00987039c972 100644
--- a/sound/soc/sof/ipc4-topology.c
+++ b/sound/soc/sof/ipc4-topology.c
@@ -195,9 +195,10 @@ static void sof_ipc4_dbg_audio_format(struct device *dev, struct sof_ipc4_pin_fo
for (i = 0; i < num_formats; i++) {
struct sof_ipc4_audio_format *fmt = &pin_fmt[i].audio_fmt;
dev_dbg(dev,
- "Pin index #%d: %uHz, %ubit (ch_map %#x ch_cfg %u interleaving_style %u fmt_cfg %#x) buffer size %d\n",
- pin_fmt[i].pin_index, fmt->sampling_frequency, fmt->bit_depth, fmt->ch_map,
- fmt->ch_cfg, fmt->interleaving_style, fmt->fmt_cfg,
+ "Pin index #%d: %uHz, %ubit, %luch (ch_map %#x ch_cfg %u interleaving_style %u fmt_cfg %#x) buffer size %d\n",
+ pin_fmt[i].pin_index, fmt->sampling_frequency, fmt->bit_depth,
+ SOF_IPC4_AUDIO_FORMAT_CFG_CHANNELS_COUNT(fmt->fmt_cfg),
+ fmt->ch_map, fmt->ch_cfg, fmt->interleaving_style, fmt->fmt_cfg,
pin_fmt[i].buffer_size);
}
}
@@ -217,6 +218,14 @@ sof_ipc4_get_input_pin_audio_fmt(struct snd_sof_widget *swidget, int pin_index)
}
process = swidget->private;
+
+ /*
+ * For process modules without base config extension, base module config
+ * format is used for all input pins
+ */
+ if (process->init_config != SOF_IPC4_MODULE_INIT_CONFIG_TYPE_BASE_CFG_WITH_EXT)
+ return &process->base_config.audio_fmt;
+
base_cfg_ext = process->base_config_ext;
/*
@@ -1422,7 +1431,7 @@ static int snd_sof_get_hw_config_params(struct snd_sof_dev *sdev, struct snd_sof
static int
snd_sof_get_nhlt_endpoint_data(struct snd_sof_dev *sdev, struct snd_sof_dai *dai,
- bool single_format,
+ bool single_bitdepth,
struct snd_pcm_hw_params *params, u32 dai_index,
u32 linktype, u8 dir, u32 **dst, u32 *len)
{
@@ -1445,7 +1454,7 @@ snd_sof_get_nhlt_endpoint_data(struct snd_sof_dev *sdev, struct snd_sof_dai *dai
* Look for 32-bit blob first instead of 16-bit if copier
* supports multiple formats
*/
- if (bit_depth == 16 && !single_format) {
+ if (bit_depth == 16 && !single_bitdepth) {
dev_dbg(sdev->dev, "Looking for 32-bit blob first for DMIC\n");
format_change = true;
bit_depth = 32;
@@ -1483,6 +1492,8 @@ snd_sof_get_nhlt_endpoint_data(struct snd_sof_dev *sdev, struct snd_sof_dai *dai
dir, dev_type);
if (!cfg) {
+ bool get_new_blob = false;
+
if (format_change) {
/*
* The 32-bit blob was not found in NHLT table, try to
@@ -1490,7 +1501,20 @@ snd_sof_get_nhlt_endpoint_data(struct snd_sof_dev *sdev, struct snd_sof_dai *dai
*/
bit_depth = params_width(params);
format_change = false;
+ get_new_blob = true;
+ } else if (linktype == SOF_DAI_INTEL_DMIC && !single_bitdepth) {
+ /*
+ * The requested 32-bit blob (no format change for the
+ * blob request) was not found in NHLT table, try to
+ * look for 16-bit blob if the copier supports multiple
+ * formats
+ */
+ bit_depth = 16;
+ format_change = true;
+ get_new_blob = true;
+ }
+ if (get_new_blob) {
cfg = intel_nhlt_get_endpoint_blob(sdev->dev, ipc4_data->nhlt,
dai_index, nhlt_type,
bit_depth, bit_depth,
@@ -1513,8 +1537,8 @@ out:
if (format_change) {
/*
- * Update the params to reflect that we have loaded 32-bit blob
- * instead of the 16-bit.
+ * Update the params to reflect that different blob was loaded
+ * instead of the requested bit depth (16 -> 32 or 32 -> 16).
* This information is going to be used by the caller to find
* matching copier format on the dai side.
*/
@@ -1522,7 +1546,11 @@ out:
m = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
snd_mask_none(m);
- snd_mask_set_format(m, SNDRV_PCM_FORMAT_S32_LE);
+ if (bit_depth == 16)
+ snd_mask_set_format(m, SNDRV_PCM_FORMAT_S16_LE);
+ else
+ snd_mask_set_format(m, SNDRV_PCM_FORMAT_S32_LE);
+
}
return 0;
@@ -1530,7 +1558,7 @@ out:
#else
static int
snd_sof_get_nhlt_endpoint_data(struct snd_sof_dev *sdev, struct snd_sof_dai *dai,
- bool single_format,
+ bool single_bitdepth,
struct snd_pcm_hw_params *params, u32 dai_index,
u32 linktype, u8 dir, u32 **dst, u32 *len)
{
@@ -1538,9 +1566,9 @@ snd_sof_get_nhlt_endpoint_data(struct snd_sof_dev *sdev, struct snd_sof_dai *dai
}
#endif
-bool sof_ipc4_copier_is_single_format(struct snd_sof_dev *sdev,
- struct sof_ipc4_pin_format *pin_fmts,
- u32 pin_fmts_size)
+bool sof_ipc4_copier_is_single_bitdepth(struct snd_sof_dev *sdev,
+ struct sof_ipc4_pin_format *pin_fmts,
+ u32 pin_fmts_size)
{
struct sof_ipc4_audio_format *fmt;
u32 valid_bits;
@@ -1564,14 +1592,65 @@ bool sof_ipc4_copier_is_single_format(struct snd_sof_dev *sdev,
}
static int
+sof_ipc4_adjust_params_to_dai_format(struct snd_sof_dev *sdev,
+ struct snd_pcm_hw_params *params,
+ struct sof_ipc4_pin_format *pin_fmts,
+ u32 pin_fmts_size)
+{
+ u32 params_mask = BIT(SNDRV_PCM_HW_PARAM_RATE) |
+ BIT(SNDRV_PCM_HW_PARAM_CHANNELS) |
+ BIT(SNDRV_PCM_HW_PARAM_FORMAT);
+ struct sof_ipc4_audio_format *fmt;
+ u32 rate, channels, valid_bits;
+ int i;
+
+ fmt = &pin_fmts[0].audio_fmt;
+ rate = fmt->sampling_frequency;
+ channels = SOF_IPC4_AUDIO_FORMAT_CFG_CHANNELS_COUNT(fmt->fmt_cfg);
+ valid_bits = SOF_IPC4_AUDIO_FORMAT_CFG_V_BIT_DEPTH(fmt->fmt_cfg);
+
+ /* check if parameters in topology defined formats are the same */
+ for (i = 1; i < pin_fmts_size; i++) {
+ u32 val;
+
+ fmt = &pin_fmts[i].audio_fmt;
+
+ if (params_mask & BIT(SNDRV_PCM_HW_PARAM_RATE)) {
+ val = fmt->sampling_frequency;
+ if (val != rate)
+ params_mask &= ~BIT(SNDRV_PCM_HW_PARAM_RATE);
+ }
+ if (params_mask & BIT(SNDRV_PCM_HW_PARAM_CHANNELS)) {
+ val = SOF_IPC4_AUDIO_FORMAT_CFG_CHANNELS_COUNT(fmt->fmt_cfg);
+ if (val != channels)
+ params_mask &= ~BIT(SNDRV_PCM_HW_PARAM_CHANNELS);
+ }
+ if (params_mask & BIT(SNDRV_PCM_HW_PARAM_FORMAT)) {
+ val = SOF_IPC4_AUDIO_FORMAT_CFG_V_BIT_DEPTH(fmt->fmt_cfg);
+ if (val != valid_bits)
+ params_mask &= ~BIT(SNDRV_PCM_HW_PARAM_FORMAT);
+ }
+ }
+
+ if (params_mask)
+ return sof_ipc4_update_hw_params(sdev, params,
+ &pin_fmts[0].audio_fmt,
+ params_mask);
+
+ return 0;
+}
+
+static int
sof_ipc4_prepare_dai_copier(struct snd_sof_dev *sdev, struct snd_sof_dai *dai,
struct snd_pcm_hw_params *params, int dir)
{
struct sof_ipc4_available_audio_format *available_fmt;
struct snd_pcm_hw_params dai_params = *params;
struct sof_ipc4_copier_data *copier_data;
+ struct sof_ipc4_pin_format *pin_fmts;
struct sof_ipc4_copier *ipc4_copier;
- bool single_format;
+ bool single_bitdepth;
+ u32 num_pin_fmts;
int ret;
ipc4_copier = dai->private;
@@ -1579,40 +1658,26 @@ sof_ipc4_prepare_dai_copier(struct snd_sof_dev *sdev, struct snd_sof_dai *dai,
available_fmt = &ipc4_copier->available_fmt;
/*
- * If the copier on the DAI side supports only single bit depth then
- * this depth (format) should be used to look for the NHLT blob (if
- * needed) and in case of capture this should be used for the input
- * format lookup
+ * Fixup the params based on the format parameters of the DAI. If any
+ * of the RATE, CHANNELS, bit depth is static among the formats then
+ * narrow the params to only allow that specific parameter value.
*/
if (dir == SNDRV_PCM_STREAM_PLAYBACK) {
- single_format = sof_ipc4_copier_is_single_format(sdev,
- available_fmt->output_pin_fmts,
- available_fmt->num_output_formats);
-
- /* Update the dai_params with the only supported format */
- if (single_format) {
- ret = sof_ipc4_update_hw_params(sdev, &dai_params,
- &available_fmt->output_pin_fmts[0].audio_fmt,
- BIT(SNDRV_PCM_HW_PARAM_FORMAT));
- if (ret)
- return ret;
- }
+ pin_fmts = available_fmt->output_pin_fmts;
+ num_pin_fmts = available_fmt->num_output_formats;
} else {
- single_format = sof_ipc4_copier_is_single_format(sdev,
- available_fmt->input_pin_fmts,
- available_fmt->num_input_formats);
-
- /* Update the dai_params with the only supported format */
- if (single_format) {
- ret = sof_ipc4_update_hw_params(sdev, &dai_params,
- &available_fmt->input_pin_fmts[0].audio_fmt,
- BIT(SNDRV_PCM_HW_PARAM_FORMAT));
- if (ret)
- return ret;
- }
+ pin_fmts = available_fmt->input_pin_fmts;
+ num_pin_fmts = available_fmt->num_input_formats;
}
- ret = snd_sof_get_nhlt_endpoint_data(sdev, dai, single_format,
+ ret = sof_ipc4_adjust_params_to_dai_format(sdev, &dai_params, pin_fmts,
+ num_pin_fmts);
+ if (ret)
+ return ret;
+
+ single_bitdepth = sof_ipc4_copier_is_single_bitdepth(sdev, pin_fmts,
+ num_pin_fmts);
+ ret = snd_sof_get_nhlt_endpoint_data(sdev, dai, single_bitdepth,
&dai_params,
ipc4_copier->dai_index,
ipc4_copier->dai_type, dir,
@@ -1647,7 +1712,7 @@ sof_ipc4_prepare_copier_module(struct snd_sof_widget *swidget,
u32 out_ref_rate, out_ref_channels;
u32 deep_buffer_dma_ms = 0;
int output_fmt_index;
- bool single_output_format;
+ bool single_output_bitdepth;
int i;
dev_dbg(sdev->dev, "copier %s, type %d", swidget->widget->name, swidget->id);
@@ -1784,9 +1849,9 @@ sof_ipc4_prepare_copier_module(struct snd_sof_widget *swidget,
return ret;
/* set the reference params for output format selection */
- single_output_format = sof_ipc4_copier_is_single_format(sdev,
- available_fmt->output_pin_fmts,
- available_fmt->num_output_formats);
+ single_output_bitdepth = sof_ipc4_copier_is_single_bitdepth(sdev,
+ available_fmt->output_pin_fmts,
+ available_fmt->num_output_formats);
switch (swidget->id) {
case snd_soc_dapm_aif_in:
case snd_soc_dapm_dai_out:
@@ -1798,7 +1863,7 @@ sof_ipc4_prepare_copier_module(struct snd_sof_widget *swidget,
out_ref_rate = in_fmt->sampling_frequency;
out_ref_channels = SOF_IPC4_AUDIO_FORMAT_CFG_CHANNELS_COUNT(in_fmt->fmt_cfg);
- if (!single_output_format)
+ if (!single_output_bitdepth)
out_ref_valid_bits =
SOF_IPC4_AUDIO_FORMAT_CFG_V_BIT_DEPTH(in_fmt->fmt_cfg);
break;
@@ -1807,7 +1872,7 @@ sof_ipc4_prepare_copier_module(struct snd_sof_widget *swidget,
case snd_soc_dapm_dai_in:
out_ref_rate = params_rate(fe_params);
out_ref_channels = params_channels(fe_params);
- if (!single_output_format) {
+ if (!single_output_bitdepth) {
out_ref_valid_bits = sof_ipc4_get_valid_bits(sdev, fe_params);
if (out_ref_valid_bits < 0)
return out_ref_valid_bits;
@@ -1825,7 +1890,7 @@ sof_ipc4_prepare_copier_module(struct snd_sof_widget *swidget,
* if the output format is the same across all available output formats, choose
* that as the reference.
*/
- if (single_output_format) {
+ if (single_output_bitdepth) {
struct sof_ipc4_audio_format *out_fmt;
out_fmt = &available_fmt->output_pin_fmts[0].audio_fmt;
diff --git a/sound/soc/sof/ipc4-topology.h b/sound/soc/sof/ipc4-topology.h
index 4488762f6a71..f4dc499c0ffe 100644
--- a/sound/soc/sof/ipc4-topology.h
+++ b/sound/soc/sof/ipc4-topology.h
@@ -476,7 +476,7 @@ struct sof_ipc4_process {
u32 init_config;
};
-bool sof_ipc4_copier_is_single_format(struct snd_sof_dev *sdev,
- struct sof_ipc4_pin_format *pin_fmts,
- u32 pin_fmts_size);
+bool sof_ipc4_copier_is_single_bitdepth(struct snd_sof_dev *sdev,
+ struct sof_ipc4_pin_format *pin_fmts,
+ u32 pin_fmts_size);
#endif
diff --git a/sound/soc/sof/mediatek/mt8186/mt8186.c b/sound/soc/sof/mediatek/mt8186/mt8186.c
index c63e0d2f4b96..bea1b9d9ca28 100644
--- a/sound/soc/sof/mediatek/mt8186/mt8186.c
+++ b/sound/soc/sof/mediatek/mt8186/mt8186.c
@@ -666,6 +666,7 @@ static struct platform_driver snd_sof_of_mt8186_driver = {
};
module_platform_driver(snd_sof_of_mt8186_driver);
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("SOF support for MT8186/MT8188 platforms");
MODULE_IMPORT_NS(SND_SOC_SOF_XTENSA);
MODULE_IMPORT_NS(SND_SOC_SOF_MTK_COMMON);
-MODULE_LICENSE("Dual BSD/GPL");
diff --git a/sound/soc/sof/mediatek/mt8195/mt8195.c b/sound/soc/sof/mediatek/mt8195/mt8195.c
index fc1c016104ae..31dc98d1b1d8 100644
--- a/sound/soc/sof/mediatek/mt8195/mt8195.c
+++ b/sound/soc/sof/mediatek/mt8195/mt8195.c
@@ -619,6 +619,7 @@ static struct platform_driver snd_sof_of_mt8195_driver = {
};
module_platform_driver(snd_sof_of_mt8195_driver);
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("SOF support for MTL 8195 platforms");
MODULE_IMPORT_NS(SND_SOC_SOF_XTENSA);
MODULE_IMPORT_NS(SND_SOC_SOF_MTK_COMMON);
-MODULE_LICENSE("Dual BSD/GPL");
diff --git a/sound/soc/sof/mediatek/mtk-adsp-common.c b/sound/soc/sof/mediatek/mtk-adsp-common.c
index de8dbe27cd0d..20bcf5590eb8 100644
--- a/sound/soc/sof/mediatek/mtk-adsp-common.c
+++ b/sound/soc/sof/mediatek/mtk-adsp-common.c
@@ -82,3 +82,4 @@ void mtk_adsp_dump(struct snd_sof_dev *sdev, u32 flags)
EXPORT_SYMBOL(mtk_adsp_dump);
MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("SOF helpers for MTK ADSP platforms");
diff --git a/sound/soc/sof/nocodec.c b/sound/soc/sof/nocodec.c
index fdcbe33d3dcf..b12b3d865ae3 100644
--- a/sound/soc/sof/nocodec.c
+++ b/sound/soc/sof/nocodec.c
@@ -110,7 +110,7 @@ static struct platform_driver sof_nocodec_audio = {
};
module_platform_driver(sof_nocodec_audio)
+MODULE_LICENSE("Dual BSD/GPL");
MODULE_DESCRIPTION("ASoC sof nocodec");
MODULE_AUTHOR("Liam Girdwood");
-MODULE_LICENSE("Dual BSD/GPL");
MODULE_ALIAS("platform:sof-nocodec");
diff --git a/sound/soc/sof/sof-acpi-dev.c b/sound/soc/sof/sof-acpi-dev.c
index 2d96d00f1c44..b196b2b74c26 100644
--- a/sound/soc/sof/sof-acpi-dev.c
+++ b/sound/soc/sof/sof-acpi-dev.c
@@ -100,3 +100,4 @@ void sof_acpi_remove(struct platform_device *pdev)
EXPORT_SYMBOL_NS(sof_acpi_remove, SND_SOC_SOF_ACPI_DEV);
MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("SOF support for ACPI platforms");
diff --git a/sound/soc/sof/sof-audio.c b/sound/soc/sof/sof-audio.c
index b3ac040811e7..ef9318947d74 100644
--- a/sound/soc/sof/sof-audio.c
+++ b/sound/soc/sof/sof-audio.c
@@ -485,7 +485,7 @@ sink_prepare:
if (ret < 0) {
/* unprepare the source widget */
if (widget_ops[widget->id].ipc_unprepare &&
- swidget && swidget->prepared) {
+ swidget && swidget->prepared && swidget->use_count == 0) {
widget_ops[widget->id].ipc_unprepare(swidget);
swidget->prepared = false;
}
diff --git a/sound/soc/sof/sof-client-ipc-flood-test.c b/sound/soc/sof/sof-client-ipc-flood-test.c
index 435614926092..e7d2001140e8 100644
--- a/sound/soc/sof/sof-client-ipc-flood-test.c
+++ b/sound/soc/sof/sof-client-ipc-flood-test.c
@@ -394,6 +394,6 @@ static struct auxiliary_driver sof_ipc_flood_client_drv = {
module_auxiliary_driver(sof_ipc_flood_client_drv);
-MODULE_DESCRIPTION("SOF IPC Flood Test Client Driver");
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("SOF IPC Flood Test Client Driver");
MODULE_IMPORT_NS(SND_SOC_SOF_CLIENT);
diff --git a/sound/soc/sof/sof-client-ipc-kernel-injector.c b/sound/soc/sof/sof-client-ipc-kernel-injector.c
index 6973b6690df4..d3f541069b24 100644
--- a/sound/soc/sof/sof-client-ipc-kernel-injector.c
+++ b/sound/soc/sof/sof-client-ipc-kernel-injector.c
@@ -157,6 +157,6 @@ static struct auxiliary_driver sof_msg_inject_client_drv = {
module_auxiliary_driver(sof_msg_inject_client_drv);
-MODULE_DESCRIPTION("SOF IPC Kernel Injector Client Driver");
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("SOF IPC Kernel Injector Client Driver");
MODULE_IMPORT_NS(SND_SOC_SOF_CLIENT);
diff --git a/sound/soc/sof/sof-client-ipc-msg-injector.c b/sound/soc/sof/sof-client-ipc-msg-injector.c
index af22e6421029..d0f8beb9d000 100644
--- a/sound/soc/sof/sof-client-ipc-msg-injector.c
+++ b/sound/soc/sof/sof-client-ipc-msg-injector.c
@@ -335,6 +335,6 @@ static struct auxiliary_driver sof_msg_inject_client_drv = {
module_auxiliary_driver(sof_msg_inject_client_drv);
-MODULE_DESCRIPTION("SOF IPC Message Injector Client Driver");
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("SOF IPC Message Injector Client Driver");
MODULE_IMPORT_NS(SND_SOC_SOF_CLIENT);
diff --git a/sound/soc/sof/sof-client-probes.c b/sound/soc/sof/sof-client-probes.c
index b8f297307565..ccc7d38ddc38 100644
--- a/sound/soc/sof/sof-client-probes.c
+++ b/sound/soc/sof/sof-client-probes.c
@@ -540,6 +540,6 @@ static struct auxiliary_driver sof_probes_client_drv = {
module_auxiliary_driver(sof_probes_client_drv);
-MODULE_DESCRIPTION("SOF Probes Client Driver");
MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("SOF Probes Client Driver");
MODULE_IMPORT_NS(SND_SOC_SOF_CLIENT);
diff --git a/sound/soc/sof/sof-of-dev.c b/sound/soc/sof/sof-of-dev.c
index b9a499e92b9a..71f7153cf79c 100644
--- a/sound/soc/sof/sof-of-dev.c
+++ b/sound/soc/sof/sof-of-dev.c
@@ -93,3 +93,4 @@ void sof_of_shutdown(struct platform_device *pdev)
EXPORT_SYMBOL(sof_of_shutdown);
MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("SOF support for OF/DT platforms");
diff --git a/sound/soc/sof/sof-pci-dev.c b/sound/soc/sof/sof-pci-dev.c
index 4365405783e6..38f2187da5de 100644
--- a/sound/soc/sof/sof-pci-dev.c
+++ b/sound/soc/sof/sof-pci-dev.c
@@ -304,3 +304,4 @@ void sof_pci_shutdown(struct pci_dev *pci)
EXPORT_SYMBOL_NS(sof_pci_shutdown, SND_SOC_SOF_PCI_DEV);
MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("SOF support for PCI platforms");
diff --git a/sound/soc/sof/sof-utils.c b/sound/soc/sof/sof-utils.c
index cad041bf56cc..44608682e9f8 100644
--- a/sound/soc/sof/sof-utils.c
+++ b/sound/soc/sof/sof-utils.c
@@ -73,3 +73,4 @@ int snd_sof_create_page_table(struct device *dev,
EXPORT_SYMBOL(snd_sof_create_page_table);
MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("SOF utils");
diff --git a/sound/soc/sof/stream-ipc.c b/sound/soc/sof/stream-ipc.c
index eb71303aa24c..794c7bbccbaf 100644
--- a/sound/soc/sof/stream-ipc.c
+++ b/sound/soc/sof/stream-ipc.c
@@ -125,5 +125,3 @@ int sof_stream_pcm_close(struct snd_sof_dev *sdev,
return 0;
}
EXPORT_SYMBOL(sof_stream_pcm_close);
-
-MODULE_LICENSE("Dual BSD/GPL");
diff --git a/sound/soc/sof/xtensa/core.c b/sound/soc/sof/xtensa/core.c
index ccbc3fcdadd5..3cf8c84beff9 100644
--- a/sound/soc/sof/xtensa/core.c
+++ b/sound/soc/sof/xtensa/core.c
@@ -151,5 +151,5 @@ const struct dsp_arch_ops sof_xtensa_arch_ops = {
};
EXPORT_SYMBOL_NS(sof_xtensa_arch_ops, SND_SOC_SOF_XTENSA);
-MODULE_DESCRIPTION("SOF Xtensa DSP support");
MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("SOF Xtensa DSP support");
diff --git a/sound/soc/ti/davinci-mcasp.c b/sound/soc/ti/davinci-mcasp.c
index 1e760c315521..2b1ed91a736c 100644
--- a/sound/soc/ti/davinci-mcasp.c
+++ b/sound/soc/ti/davinci-mcasp.c
@@ -1472,10 +1472,11 @@ static int davinci_mcasp_hw_rule_min_periodsize(
{
struct snd_interval *period_size = hw_param_interval(params,
SNDRV_PCM_HW_PARAM_PERIOD_SIZE);
+ u8 numevt = *((u8 *)rule->private);
struct snd_interval frames;
snd_interval_any(&frames);
- frames.min = 64;
+ frames.min = numevt;
frames.integer = 1;
return snd_interval_refine(period_size, &frames);
@@ -1490,6 +1491,7 @@ static int davinci_mcasp_startup(struct snd_pcm_substream *substream,
u32 max_channels = 0;
int i, dir, ret;
int tdm_slots = mcasp->tdm_slots;
+ u8 *numevt;
/* Do not allow more then one stream per direction */
if (mcasp->substreams[substream->stream])
@@ -1589,9 +1591,12 @@ static int davinci_mcasp_startup(struct snd_pcm_substream *substream,
return ret;
}
+ numevt = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ?
+ &mcasp->txnumevt :
+ &mcasp->rxnumevt;
snd_pcm_hw_rule_add(substream->runtime, 0,
SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
- davinci_mcasp_hw_rule_min_periodsize, NULL,
+ davinci_mcasp_hw_rule_min_periodsize, numevt,
SNDRV_PCM_HW_PARAM_PERIOD_SIZE, -1);
return 0;
diff --git a/sound/soc/ti/omap-hdmi.c b/sound/soc/ti/omap-hdmi.c
index 639bc83f4263..cf43ac19c4a6 100644
--- a/sound/soc/ti/omap-hdmi.c
+++ b/sound/soc/ti/omap-hdmi.c
@@ -354,11 +354,7 @@ static int omap_hdmi_audio_probe(struct platform_device *pdev)
if (!card)
return -ENOMEM;
- card->name = devm_kasprintf(dev, GFP_KERNEL,
- "HDMI %s", dev_name(ad->dssdev));
- if (!card->name)
- return -ENOMEM;
-
+ card->name = "HDMI";
card->owner = THIS_MODULE;
card->dai_link =
devm_kzalloc(dev, sizeof(*(card->dai_link)), GFP_KERNEL);
diff --git a/tools/arch/arm64/include/asm/cputype.h b/tools/arch/arm64/include/asm/cputype.h
index 52f076afeb96..7b32b99023a2 100644
--- a/tools/arch/arm64/include/asm/cputype.h
+++ b/tools/arch/arm64/include/asm/cputype.h
@@ -86,6 +86,9 @@
#define ARM_CPU_PART_CORTEX_X2 0xD48
#define ARM_CPU_PART_NEOVERSE_N2 0xD49
#define ARM_CPU_PART_CORTEX_A78C 0xD4B
+#define ARM_CPU_PART_NEOVERSE_V2 0xD4F
+#define ARM_CPU_PART_CORTEX_X4 0xD82
+#define ARM_CPU_PART_NEOVERSE_V3 0xD84
#define APM_CPU_PART_XGENE 0x000
#define APM_CPU_VAR_POTENZA 0x00
@@ -159,6 +162,9 @@
#define MIDR_CORTEX_X2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X2)
#define MIDR_NEOVERSE_N2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N2)
#define MIDR_CORTEX_A78C MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78C)
+#define MIDR_NEOVERSE_V2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V2)
+#define MIDR_CORTEX_X4 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X4)
+#define MIDR_NEOVERSE_V3 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V3)
#define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
#define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
#define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX)
diff --git a/tools/arch/arm64/include/uapi/asm/unistd.h b/tools/arch/arm64/include/uapi/asm/unistd.h
index ce2ee8f1e361..9306726337fe 100644
--- a/tools/arch/arm64/include/uapi/asm/unistd.h
+++ b/tools/arch/arm64/include/uapi/asm/unistd.h
@@ -19,7 +19,6 @@
#define __ARCH_WANT_NEW_STAT
#define __ARCH_WANT_SET_GET_RLIMIT
#define __ARCH_WANT_TIME32_SYSCALLS
-#define __ARCH_WANT_SYS_CLONE3
#define __ARCH_WANT_MEMFD_SECRET
#include <asm-generic/unistd.h>
diff --git a/tools/arch/loongarch/include/uapi/asm/unistd.h b/tools/arch/loongarch/include/uapi/asm/unistd.h
index 0c743344e92d..8eeaac0087c3 100644
--- a/tools/arch/loongarch/include/uapi/asm/unistd.h
+++ b/tools/arch/loongarch/include/uapi/asm/unistd.h
@@ -4,6 +4,5 @@
*/
#define __ARCH_WANT_SYS_CLONE
-#define __ARCH_WANT_SYS_CLONE3
#include <asm-generic/unistd.h>
diff --git a/tools/arch/x86/include/asm/msr-index.h b/tools/arch/x86/include/asm/msr-index.h
index e72c2b872957..e022e6eb766c 100644
--- a/tools/arch/x86/include/asm/msr-index.h
+++ b/tools/arch/x86/include/asm/msr-index.h
@@ -170,6 +170,10 @@
* CPU is not affected by Branch
* History Injection.
*/
+#define ARCH_CAP_XAPIC_DISABLE BIT(21) /*
+ * IA32_XAPIC_DISABLE_STATUS MSR
+ * supported
+ */
#define ARCH_CAP_PBRSB_NO BIT(24) /*
* Not susceptible to Post-Barrier
* Return Stack Buffer Predictions.
@@ -192,11 +196,6 @@
* File.
*/
-#define ARCH_CAP_XAPIC_DISABLE BIT(21) /*
- * IA32_XAPIC_DISABLE_STATUS MSR
- * supported
- */
-
#define MSR_IA32_FLUSH_CMD 0x0000010b
#define L1D_FLUSH BIT(0) /*
* Writeback and invalidate the
diff --git a/tools/arch/x86/include/uapi/asm/kvm.h b/tools/arch/x86/include/uapi/asm/kvm.h
index ef11aa4cab42..9fae1b73b529 100644
--- a/tools/arch/x86/include/uapi/asm/kvm.h
+++ b/tools/arch/x86/include/uapi/asm/kvm.h
@@ -457,8 +457,13 @@ struct kvm_sync_regs {
#define KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE 0x00000001
-/* attributes for system fd (group 0) */
-#define KVM_X86_XCOMP_GUEST_SUPP 0
+/* vendor-independent attributes for system fd (group 0) */
+#define KVM_X86_GRP_SYSTEM 0
+# define KVM_X86_XCOMP_GUEST_SUPP 0
+
+/* vendor-specific groups and attributes for system fd */
+#define KVM_X86_GRP_SEV 1
+# define KVM_X86_SEV_VMSA_FEATURES 0
struct kvm_vmx_nested_state_data {
__u8 vmcs12[KVM_STATE_NESTED_VMX_VMCS_SIZE];
@@ -689,6 +694,9 @@ enum sev_cmd_id {
/* Guest Migration Extension */
KVM_SEV_SEND_CANCEL,
+ /* Second time is the charm; improved versions of the above ioctls. */
+ KVM_SEV_INIT2,
+
KVM_SEV_NR_MAX,
};
@@ -700,6 +708,14 @@ struct kvm_sev_cmd {
__u32 sev_fd;
};
+struct kvm_sev_init {
+ __u64 vmsa_features;
+ __u32 flags;
+ __u16 ghcb_version;
+ __u16 pad1;
+ __u32 pad2[8];
+};
+
struct kvm_sev_launch_start {
__u32 handle;
__u32 policy;
@@ -856,5 +872,7 @@ struct kvm_hyperv_eventfd {
#define KVM_X86_DEFAULT_VM 0
#define KVM_X86_SW_PROTECTED_VM 1
+#define KVM_X86_SEV_VM 2
+#define KVM_X86_SEV_ES_VM 3
#endif /* _ASM_X86_KVM_H */
diff --git a/tools/arch/x86/kcpuid/Makefile b/tools/arch/x86/kcpuid/Makefile
index 87b554fab14b..d0b4b0ed10ff 100644
--- a/tools/arch/x86/kcpuid/Makefile
+++ b/tools/arch/x86/kcpuid/Makefile
@@ -19,6 +19,6 @@ clean :
@rm -f kcpuid
install : kcpuid
- install -d $(DESTDIR)$(BINDIR)
+ install -d $(DESTDIR)$(BINDIR) $(DESTDIR)$(HWDATADIR)
install -m 755 -p kcpuid $(DESTDIR)$(BINDIR)/kcpuid
- install -m 444 -p cpuid.csv $(HWDATADIR)/cpuid.csv
+ install -m 444 -p cpuid.csv $(DESTDIR)$(HWDATADIR)/cpuid.csv
diff --git a/tools/bpf/bpftool/Documentation/bpftool-btf.rst b/tools/bpf/bpftool/Documentation/bpftool-btf.rst
index eaba24320fb2..3f6bca03ad2e 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-btf.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-btf.rst
@@ -28,7 +28,7 @@ BTF COMMANDS
| **bpftool** **btf help**
|
| *BTF_SRC* := { **id** *BTF_ID* | **prog** *PROG* | **map** *MAP* [{**key** | **value** | **kv** | **all**}] | **file** *FILE* }
-| *FORMAT* := { **raw** | **c** }
+| *FORMAT* := { **raw** | **c** [**unsorted**] }
| *MAP* := { **id** *MAP_ID* | **pinned** *FILE* }
| *PROG* := { **id** *PROG_ID* | **pinned** *FILE* | **tag** *PROG_TAG* | **name** *PROG_NAME* }
@@ -63,7 +63,9 @@ bpftool btf dump *BTF_SRC*
pahole.
**format** option can be used to override default (raw) output format. Raw
- (**raw**) or C-syntax (**c**) output formats are supported.
+ (**raw**) or C-syntax (**c**) output formats are supported. With C-style
+ formatting, the output is sorted by default. Use the **unsorted** option
+ to avoid sorting the output.
bpftool btf help
Print short help message.
diff --git a/tools/bpf/bpftool/Makefile b/tools/bpf/bpftool/Makefile
index dfa4f1bebbb3..ba927379eb20 100644
--- a/tools/bpf/bpftool/Makefile
+++ b/tools/bpf/bpftool/Makefile
@@ -204,10 +204,11 @@ ifeq ($(feature-clang-bpf-co-re),1)
BUILD_BPF_SKELS := 1
-$(OUTPUT)vmlinux.h: $(VMLINUX_BTF) $(BPFTOOL_BOOTSTRAP)
ifeq ($(VMLINUX_H),)
+$(OUTPUT)vmlinux.h: $(VMLINUX_BTF) $(BPFTOOL_BOOTSTRAP)
$(QUIET_GEN)$(BPFTOOL_BOOTSTRAP) btf dump file $< format c > $@
else
+$(OUTPUT)vmlinux.h: $(VMLINUX_H)
$(Q)cp "$(VMLINUX_H)" $@
endif
diff --git a/tools/bpf/bpftool/bash-completion/bpftool b/tools/bpf/bpftool/bash-completion/bpftool
index 04afe2ac2228..be99d49b8714 100644
--- a/tools/bpf/bpftool/bash-completion/bpftool
+++ b/tools/bpf/bpftool/bash-completion/bpftool
@@ -930,6 +930,9 @@ _bpftool()
format)
COMPREPLY=( $( compgen -W "c raw" -- "$cur" ) )
;;
+ c)
+ COMPREPLY=( $( compgen -W "unsorted" -- "$cur" ) )
+ ;;
*)
# emit extra options
case ${words[3]} in
diff --git a/tools/bpf/bpftool/btf.c b/tools/bpf/bpftool/btf.c
index 91fcb75babe3..6789c7a4d5ca 100644
--- a/tools/bpf/bpftool/btf.c
+++ b/tools/bpf/bpftool/btf.c
@@ -20,6 +20,8 @@
#include "json_writer.h"
#include "main.h"
+#define KFUNC_DECL_TAG "bpf_kfunc"
+
static const char * const btf_kind_str[NR_BTF_KINDS] = {
[BTF_KIND_UNKN] = "UNKNOWN",
[BTF_KIND_INT] = "INT",
@@ -43,6 +45,13 @@ static const char * const btf_kind_str[NR_BTF_KINDS] = {
[BTF_KIND_ENUM64] = "ENUM64",
};
+struct sort_datum {
+ int index;
+ int type_rank;
+ const char *sort_name;
+ const char *own_name;
+};
+
static const char *btf_int_enc_str(__u8 encoding)
{
switch (encoding) {
@@ -454,15 +463,171 @@ static int dump_btf_raw(const struct btf *btf,
return 0;
}
+static int dump_btf_kfuncs(struct btf_dump *d, const struct btf *btf)
+{
+ LIBBPF_OPTS(btf_dump_emit_type_decl_opts, opts);
+ int cnt = btf__type_cnt(btf);
+ int i;
+
+ printf("\n/* BPF kfuncs */\n");
+ printf("#ifndef BPF_NO_KFUNC_PROTOTYPES\n");
+
+ for (i = 1; i < cnt; i++) {
+ const struct btf_type *t = btf__type_by_id(btf, i);
+ const char *name;
+ int err;
+
+ if (!btf_is_decl_tag(t))
+ continue;
+
+ if (btf_decl_tag(t)->component_idx != -1)
+ continue;
+
+ name = btf__name_by_offset(btf, t->name_off);
+ if (strncmp(name, KFUNC_DECL_TAG, sizeof(KFUNC_DECL_TAG)))
+ continue;
+
+ t = btf__type_by_id(btf, t->type);
+ if (!btf_is_func(t))
+ continue;
+
+ printf("extern ");
+
+ opts.field_name = btf__name_by_offset(btf, t->name_off);
+ err = btf_dump__emit_type_decl(d, t->type, &opts);
+ if (err)
+ return err;
+
+ printf(" __weak __ksym;\n");
+ }
+
+ printf("#endif\n\n");
+
+ return 0;
+}
+
static void __printf(2, 0) btf_dump_printf(void *ctx,
const char *fmt, va_list args)
{
vfprintf(stdout, fmt, args);
}
+static int btf_type_rank(const struct btf *btf, __u32 index, bool has_name)
+{
+ const struct btf_type *t = btf__type_by_id(btf, index);
+ const int kind = btf_kind(t);
+ const int max_rank = 10;
+
+ if (t->name_off)
+ has_name = true;
+
+ switch (kind) {
+ case BTF_KIND_ENUM:
+ case BTF_KIND_ENUM64:
+ return has_name ? 1 : 0;
+ case BTF_KIND_INT:
+ case BTF_KIND_FLOAT:
+ return 2;
+ case BTF_KIND_STRUCT:
+ case BTF_KIND_UNION:
+ return has_name ? 3 : max_rank;
+ case BTF_KIND_FUNC_PROTO:
+ return has_name ? 4 : max_rank;
+ case BTF_KIND_ARRAY:
+ if (has_name)
+ return btf_type_rank(btf, btf_array(t)->type, has_name);
+ return max_rank;
+ case BTF_KIND_TYPE_TAG:
+ case BTF_KIND_CONST:
+ case BTF_KIND_PTR:
+ case BTF_KIND_VOLATILE:
+ case BTF_KIND_RESTRICT:
+ case BTF_KIND_TYPEDEF:
+ case BTF_KIND_DECL_TAG:
+ if (has_name)
+ return btf_type_rank(btf, t->type, has_name);
+ return max_rank;
+ default:
+ return max_rank;
+ }
+}
+
+static const char *btf_type_sort_name(const struct btf *btf, __u32 index, bool from_ref)
+{
+ const struct btf_type *t = btf__type_by_id(btf, index);
+
+ switch (btf_kind(t)) {
+ case BTF_KIND_ENUM:
+ case BTF_KIND_ENUM64: {
+ int name_off = t->name_off;
+
+ /* Use name of the first element for anonymous enums if allowed */
+ if (!from_ref && !t->name_off && btf_vlen(t))
+ name_off = btf_enum(t)->name_off;
+
+ return btf__name_by_offset(btf, name_off);
+ }
+ case BTF_KIND_ARRAY:
+ return btf_type_sort_name(btf, btf_array(t)->type, true);
+ case BTF_KIND_TYPE_TAG:
+ case BTF_KIND_CONST:
+ case BTF_KIND_PTR:
+ case BTF_KIND_VOLATILE:
+ case BTF_KIND_RESTRICT:
+ case BTF_KIND_TYPEDEF:
+ case BTF_KIND_DECL_TAG:
+ return btf_type_sort_name(btf, t->type, true);
+ default:
+ return btf__name_by_offset(btf, t->name_off);
+ }
+ return NULL;
+}
+
+static int btf_type_compare(const void *left, const void *right)
+{
+ const struct sort_datum *d1 = (const struct sort_datum *)left;
+ const struct sort_datum *d2 = (const struct sort_datum *)right;
+ int r;
+
+ if (d1->type_rank != d2->type_rank)
+ return d1->type_rank < d2->type_rank ? -1 : 1;
+
+ r = strcmp(d1->sort_name, d2->sort_name);
+ if (r)
+ return r;
+
+ return strcmp(d1->own_name, d2->own_name);
+}
+
+static struct sort_datum *sort_btf_c(const struct btf *btf)
+{
+ struct sort_datum *datums;
+ int n;
+
+ n = btf__type_cnt(btf);
+ datums = malloc(sizeof(struct sort_datum) * n);
+ if (!datums)
+ return NULL;
+
+ for (int i = 0; i < n; ++i) {
+ struct sort_datum *d = datums + i;
+ const struct btf_type *t = btf__type_by_id(btf, i);
+
+ d->index = i;
+ d->type_rank = btf_type_rank(btf, i, false);
+ d->sort_name = btf_type_sort_name(btf, i, false);
+ d->own_name = btf__name_by_offset(btf, t->name_off);
+ }
+
+ qsort(datums, n, sizeof(struct sort_datum), btf_type_compare);
+
+ return datums;
+}
+
static int dump_btf_c(const struct btf *btf,
- __u32 *root_type_ids, int root_type_cnt)
+ __u32 *root_type_ids, int root_type_cnt, bool sort_dump)
{
+ struct sort_datum *datums = NULL;
struct btf_dump *d;
int err = 0, i;
@@ -476,6 +641,12 @@ static int dump_btf_c(const struct btf *btf,
printf("#ifndef BPF_NO_PRESERVE_ACCESS_INDEX\n");
printf("#pragma clang attribute push (__attribute__((preserve_access_index)), apply_to = record)\n");
printf("#endif\n\n");
+ printf("#ifndef __ksym\n");
+ printf("#define __ksym __attribute__((section(\".ksyms\")))\n");
+ printf("#endif\n\n");
+ printf("#ifndef __weak\n");
+ printf("#define __weak __attribute__((weak))\n");
+ printf("#endif\n\n");
if (root_type_cnt) {
for (i = 0; i < root_type_cnt; i++) {
@@ -486,11 +657,19 @@ static int dump_btf_c(const struct btf *btf,
} else {
int cnt = btf__type_cnt(btf);
+ if (sort_dump)
+ datums = sort_btf_c(btf);
for (i = 1; i < cnt; i++) {
- err = btf_dump__dump_type(d, i);
+ int idx = datums ? datums[i].index : i;
+
+ err = btf_dump__dump_type(d, idx);
if (err)
goto done;
}
+
+ err = dump_btf_kfuncs(d, btf);
+ if (err)
+ goto done;
}
printf("#ifndef BPF_NO_PRESERVE_ACCESS_INDEX\n");
@@ -500,6 +679,7 @@ static int dump_btf_c(const struct btf *btf,
printf("#endif /* __VMLINUX_H__ */\n");
done:
+ free(datums);
btf_dump__free(d);
return err;
}
@@ -549,10 +729,10 @@ static bool btf_is_kernel_module(__u32 btf_id)
static int do_dump(int argc, char **argv)
{
+ bool dump_c = false, sort_dump_c = true;
struct btf *btf = NULL, *base = NULL;
__u32 root_type_ids[2];
int root_type_cnt = 0;
- bool dump_c = false;
__u32 btf_id = -1;
const char *src;
int fd = -1;
@@ -663,6 +843,9 @@ static int do_dump(int argc, char **argv)
goto done;
}
NEXT_ARG();
+ } else if (is_prefix(*argv, "unsorted")) {
+ sort_dump_c = false;
+ NEXT_ARG();
} else {
p_err("unrecognized option: '%s'", *argv);
err = -EINVAL;
@@ -691,7 +874,7 @@ static int do_dump(int argc, char **argv)
err = -ENOTSUP;
goto done;
}
- err = dump_btf_c(btf, root_type_ids, root_type_cnt);
+ err = dump_btf_c(btf, root_type_ids, root_type_cnt, sort_dump_c);
} else {
err = dump_btf_raw(btf, root_type_ids, root_type_cnt);
}
@@ -1063,7 +1246,7 @@ static int do_help(int argc, char **argv)
" %1$s %2$s help\n"
"\n"
" BTF_SRC := { id BTF_ID | prog PROG | map MAP [{key | value | kv | all}] | file FILE }\n"
- " FORMAT := { raw | c }\n"
+ " FORMAT := { raw | c [unsorted] }\n"
" " HELP_SPEC_MAP "\n"
" " HELP_SPEC_PROGRAM "\n"
" " HELP_SPEC_OPTIONS " |\n"
diff --git a/tools/bpf/bpftool/cgroup.c b/tools/bpf/bpftool/cgroup.c
index af6898c0f388..9af426d43299 100644
--- a/tools/bpf/bpftool/cgroup.c
+++ b/tools/bpf/bpftool/cgroup.c
@@ -19,6 +19,38 @@
#include "main.h"
+static const int cgroup_attach_types[] = {
+ BPF_CGROUP_INET_INGRESS,
+ BPF_CGROUP_INET_EGRESS,
+ BPF_CGROUP_INET_SOCK_CREATE,
+ BPF_CGROUP_INET_SOCK_RELEASE,
+ BPF_CGROUP_INET4_BIND,
+ BPF_CGROUP_INET6_BIND,
+ BPF_CGROUP_INET4_POST_BIND,
+ BPF_CGROUP_INET6_POST_BIND,
+ BPF_CGROUP_INET4_CONNECT,
+ BPF_CGROUP_INET6_CONNECT,
+ BPF_CGROUP_UNIX_CONNECT,
+ BPF_CGROUP_INET4_GETPEERNAME,
+ BPF_CGROUP_INET6_GETPEERNAME,
+ BPF_CGROUP_UNIX_GETPEERNAME,
+ BPF_CGROUP_INET4_GETSOCKNAME,
+ BPF_CGROUP_INET6_GETSOCKNAME,
+ BPF_CGROUP_UNIX_GETSOCKNAME,
+ BPF_CGROUP_UDP4_SENDMSG,
+ BPF_CGROUP_UDP6_SENDMSG,
+ BPF_CGROUP_UNIX_SENDMSG,
+ BPF_CGROUP_UDP4_RECVMSG,
+ BPF_CGROUP_UDP6_RECVMSG,
+ BPF_CGROUP_UNIX_RECVMSG,
+ BPF_CGROUP_SOCK_OPS,
+ BPF_CGROUP_DEVICE,
+ BPF_CGROUP_SYSCTL,
+ BPF_CGROUP_GETSOCKOPT,
+ BPF_CGROUP_SETSOCKOPT,
+ BPF_LSM_CGROUP
+};
+
#define HELP_SPEC_ATTACH_FLAGS \
"ATTACH_FLAGS := { multi | override }"
@@ -183,13 +215,13 @@ static int count_attached_bpf_progs(int cgroup_fd, enum bpf_attach_type type)
static int cgroup_has_attached_progs(int cgroup_fd)
{
- enum bpf_attach_type type;
+ unsigned int i = 0;
bool no_prog = true;
- for (type = 0; type < __MAX_BPF_ATTACH_TYPE; type++) {
- int count = count_attached_bpf_progs(cgroup_fd, type);
+ for (i = 0; i < ARRAY_SIZE(cgroup_attach_types); i++) {
+ int count = count_attached_bpf_progs(cgroup_fd, cgroup_attach_types[i]);
- if (count < 0 && errno != EINVAL)
+ if (count < 0)
return -1;
if (count > 0) {
diff --git a/tools/bpf/bpftool/common.c b/tools/bpf/bpftool/common.c
index 958e92acca8e..9b75639434b8 100644
--- a/tools/bpf/bpftool/common.c
+++ b/tools/bpf/bpftool/common.c
@@ -410,7 +410,7 @@ void get_prog_full_name(const struct bpf_prog_info *prog_info, int prog_fd,
{
const char *prog_name = prog_info->name;
const struct btf_type *func_type;
- const struct bpf_func_info finfo = {};
+ struct bpf_func_info finfo = {};
struct bpf_prog_info info = {};
__u32 info_len = sizeof(info);
struct btf *prog_btf = NULL;
diff --git a/tools/bpf/bpftool/gen.c b/tools/bpf/bpftool/gen.c
index b3979ddc0189..5a4d3240689e 100644
--- a/tools/bpf/bpftool/gen.c
+++ b/tools/bpf/bpftool/gen.c
@@ -848,28 +848,45 @@ out:
}
static void
-codegen_maps_skeleton(struct bpf_object *obj, size_t map_cnt, bool mmaped)
+codegen_maps_skeleton(struct bpf_object *obj, size_t map_cnt, bool mmaped, bool populate_links)
{
struct bpf_map *map;
char ident[256];
- size_t i;
+ size_t i, map_sz;
if (!map_cnt)
return;
+ /* for backward compatibility with old libbpf versions that don't
+ * handle new BPF skeleton with new struct bpf_map_skeleton definition
+ * that includes link field, avoid specifying new increased size,
+ * unless we absolutely have to (i.e., if there are struct_ops maps
+ * present)
+ */
+ map_sz = offsetof(struct bpf_map_skeleton, link);
+ if (populate_links) {
+ bpf_object__for_each_map(map, obj) {
+ if (bpf_map__type(map) == BPF_MAP_TYPE_STRUCT_OPS) {
+ map_sz = sizeof(struct bpf_map_skeleton);
+ break;
+ }
+ }
+ }
+
codegen("\
\n\
- \n\
+ \n\
/* maps */ \n\
s->map_cnt = %zu; \n\
- s->map_skel_sz = sizeof(*s->maps); \n\
- s->maps = (struct bpf_map_skeleton *)calloc(s->map_cnt, s->map_skel_sz);\n\
+ s->map_skel_sz = %zu; \n\
+ s->maps = (struct bpf_map_skeleton *)calloc(s->map_cnt,\n\
+ sizeof(*s->maps) > %zu ? sizeof(*s->maps) : %zu);\n\
if (!s->maps) { \n\
err = -ENOMEM; \n\
goto err; \n\
} \n\
",
- map_cnt
+ map_cnt, map_sz, map_sz, map_sz
);
i = 0;
bpf_object__for_each_map(map, obj) {
@@ -878,15 +895,22 @@ codegen_maps_skeleton(struct bpf_object *obj, size_t map_cnt, bool mmaped)
codegen("\
\n\
- \n\
- s->maps[%zu].name = \"%s\"; \n\
- s->maps[%zu].map = &obj->maps.%s; \n\
+ \n\
+ map = (struct bpf_map_skeleton *)((char *)s->maps + %zu * s->map_skel_sz);\n\
+ map->name = \"%s\"; \n\
+ map->map = &obj->maps.%s; \n\
",
- i, bpf_map__name(map), i, ident);
+ i, bpf_map__name(map), ident);
/* memory-mapped internal maps */
if (mmaped && is_mmapable_map(map, ident, sizeof(ident))) {
- printf("\ts->maps[%zu].mmaped = (void **)&obj->%s;\n",
- i, ident);
+ printf("\tmap->mmaped = (void **)&obj->%s;\n", ident);
+ }
+
+ if (populate_links && bpf_map__type(map) == BPF_MAP_TYPE_STRUCT_OPS) {
+ codegen("\
+ \n\
+ map->link = &obj->links.%s; \n\
+ ", ident);
}
i++;
}
@@ -1141,7 +1165,7 @@ static void gen_st_ops_shadow_init(struct btf *btf, struct bpf_object *obj)
static int do_skeleton(int argc, char **argv)
{
char header_guard[MAX_OBJ_NAME_LEN + sizeof("__SKEL_H__")];
- size_t map_cnt = 0, prog_cnt = 0, file_sz, mmap_sz;
+ size_t map_cnt = 0, prog_cnt = 0, attach_map_cnt = 0, file_sz, mmap_sz;
DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts);
char obj_name[MAX_OBJ_NAME_LEN] = "", *obj_data;
struct bpf_object *obj = NULL;
@@ -1225,6 +1249,10 @@ static int do_skeleton(int argc, char **argv)
bpf_map__name(map));
continue;
}
+
+ if (bpf_map__type(map) == BPF_MAP_TYPE_STRUCT_OPS)
+ attach_map_cnt++;
+
map_cnt++;
}
bpf_object__for_each_program(prog, obj) {
@@ -1260,6 +1288,8 @@ static int do_skeleton(int argc, char **argv)
#include <stdlib.h> \n\
#include <bpf/libbpf.h> \n\
\n\
+ #define BPF_SKEL_SUPPORTS_MAP_AUTO_ATTACH 1 \n\
+ \n\
struct %1$s { \n\
struct bpf_object_skeleton *skeleton; \n\
struct bpf_object *obj; \n\
@@ -1297,6 +1327,9 @@ static int do_skeleton(int argc, char **argv)
bpf_program__name(prog));
}
printf("\t} progs;\n");
+ }
+
+ if (prog_cnt + attach_map_cnt) {
printf("\tstruct {\n");
bpf_object__for_each_program(prog, obj) {
if (use_loader)
@@ -1306,6 +1339,19 @@ static int do_skeleton(int argc, char **argv)
printf("\t\tstruct bpf_link *%s;\n",
bpf_program__name(prog));
}
+
+ bpf_object__for_each_map(map, obj) {
+ if (!get_map_ident(map, ident, sizeof(ident)))
+ continue;
+ if (bpf_map__type(map) != BPF_MAP_TYPE_STRUCT_OPS)
+ continue;
+
+ if (use_loader)
+ printf("t\tint %s_fd;\n", ident);
+ else
+ printf("\t\tstruct bpf_link *%s;\n", ident);
+ }
+
printf("\t} links;\n");
}
@@ -1433,6 +1479,7 @@ static int do_skeleton(int argc, char **argv)
%1$s__create_skeleton(struct %1$s *obj) \n\
{ \n\
struct bpf_object_skeleton *s; \n\
+ struct bpf_map_skeleton *map __attribute__((unused));\n\
int err; \n\
\n\
s = (struct bpf_object_skeleton *)calloc(1, sizeof(*s));\n\
@@ -1448,7 +1495,7 @@ static int do_skeleton(int argc, char **argv)
obj_name
);
- codegen_maps_skeleton(obj, map_cnt, true /*mmaped*/);
+ codegen_maps_skeleton(obj, map_cnt, true /*mmaped*/, true /*links*/);
codegen_progs_skeleton(obj, prog_cnt, true /*populate_links*/);
codegen("\
@@ -1723,6 +1770,7 @@ static int do_subskeleton(int argc, char **argv)
{ \n\
struct %1$s *obj; \n\
struct bpf_object_subskeleton *s; \n\
+ struct bpf_map_skeleton *map __attribute__((unused));\n\
int err; \n\
\n\
obj = (struct %1$s *)calloc(1, sizeof(*obj)); \n\
@@ -1786,7 +1834,7 @@ static int do_subskeleton(int argc, char **argv)
}
}
- codegen_maps_skeleton(obj, map_cnt, false /*mmaped*/);
+ codegen_maps_skeleton(obj, map_cnt, false /*mmaped*/, false /*links*/);
codegen_progs_skeleton(obj, prog_cnt, false /*links*/);
codegen("\
@@ -2379,15 +2427,6 @@ out:
return err;
}
-static int btfgen_remap_id(__u32 *type_id, void *ctx)
-{
- unsigned int *ids = ctx;
-
- *type_id = ids[*type_id];
-
- return 0;
-}
-
/* Generate BTF from relocation information previously recorded */
static struct btf *btfgen_get_btf(struct btfgen_info *info)
{
@@ -2467,10 +2506,15 @@ static struct btf *btfgen_get_btf(struct btfgen_info *info)
/* second pass: fix up type ids */
for (i = 1; i < btf__type_cnt(btf_new); i++) {
struct btf_type *btf_type = (struct btf_type *) btf__type_by_id(btf_new, i);
+ struct btf_field_iter it;
+ __u32 *type_id;
- err = btf_type_visit_type_ids(btf_type, btfgen_remap_id, ids);
+ err = btf_field_iter_init(&it, btf_type, BTF_FIELD_ITER_IDS);
if (err)
goto err_out;
+
+ while ((type_id = btf_field_iter_next(&it)))
+ *type_id = ids[*type_id];
}
free(ids);
diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
index 1a501cf09e78..40ea743d139f 100644
--- a/tools/bpf/bpftool/prog.c
+++ b/tools/bpf/bpftool/prog.c
@@ -1813,6 +1813,10 @@ offload_dev:
}
if (pinmaps) {
+ err = create_and_mount_bpffs_dir(pinmaps);
+ if (err)
+ goto err_unpin;
+
err = bpf_object__pin_maps(obj, pinmaps);
if (err) {
p_err("failed to pin all maps");
diff --git a/tools/bpf/bpftool/skeleton/pid_iter.bpf.c b/tools/bpf/bpftool/skeleton/pid_iter.bpf.c
index 7bdbcac3cf62..948dde25034e 100644
--- a/tools/bpf/bpftool/skeleton/pid_iter.bpf.c
+++ b/tools/bpf/bpftool/skeleton/pid_iter.bpf.c
@@ -29,6 +29,7 @@ enum bpf_link_type___local {
};
extern const void bpf_link_fops __ksym;
+extern const void bpf_link_fops_poll __ksym __weak;
extern const void bpf_map_fops __ksym;
extern const void bpf_prog_fops __ksym;
extern const void btf_fops __ksym;
@@ -84,7 +85,11 @@ int iter(struct bpf_iter__task_file *ctx)
fops = &btf_fops;
break;
case BPF_OBJ_LINK:
- fops = &bpf_link_fops;
+ if (&bpf_link_fops_poll &&
+ file->f_op == &bpf_link_fops_poll)
+ fops = &bpf_link_fops_poll;
+ else
+ fops = &bpf_link_fops;
break;
default:
return 0;
diff --git a/tools/bpf/bpftool/skeleton/profiler.bpf.c b/tools/bpf/bpftool/skeleton/profiler.bpf.c
index 2f80edc682f1..f48c783cb9f7 100644
--- a/tools/bpf/bpftool/skeleton/profiler.bpf.c
+++ b/tools/bpf/bpftool/skeleton/profiler.bpf.c
@@ -40,17 +40,17 @@ struct {
const volatile __u32 num_cpu = 1;
const volatile __u32 num_metric = 1;
-#define MAX_NUM_MATRICS 4
+#define MAX_NUM_METRICS 4
SEC("fentry/XXX")
int BPF_PROG(fentry_XXX)
{
- struct bpf_perf_event_value___local *ptrs[MAX_NUM_MATRICS];
+ struct bpf_perf_event_value___local *ptrs[MAX_NUM_METRICS];
u32 key = bpf_get_smp_processor_id();
u32 i;
/* look up before reading, to reduce error */
- for (i = 0; i < num_metric && i < MAX_NUM_MATRICS; i++) {
+ for (i = 0; i < num_metric && i < MAX_NUM_METRICS; i++) {
u32 flag = i;
ptrs[i] = bpf_map_lookup_elem(&fentry_readings, &flag);
@@ -58,7 +58,7 @@ int BPF_PROG(fentry_XXX)
return 0;
}
- for (i = 0; i < num_metric && i < MAX_NUM_MATRICS; i++) {
+ for (i = 0; i < num_metric && i < MAX_NUM_METRICS; i++) {
struct bpf_perf_event_value___local reading;
int err;
@@ -99,14 +99,14 @@ fexit_update_maps(u32 id, struct bpf_perf_event_value___local *after)
SEC("fexit/XXX")
int BPF_PROG(fexit_XXX)
{
- struct bpf_perf_event_value___local readings[MAX_NUM_MATRICS];
+ struct bpf_perf_event_value___local readings[MAX_NUM_METRICS];
u32 cpu = bpf_get_smp_processor_id();
u32 i, zero = 0;
int err;
u64 *count;
/* read all events before updating the maps, to reduce error */
- for (i = 0; i < num_metric && i < MAX_NUM_MATRICS; i++) {
+ for (i = 0; i < num_metric && i < MAX_NUM_METRICS; i++) {
err = bpf_perf_event_read_value(&events, cpu + i * num_cpu,
(void *)(readings + i),
sizeof(*readings));
@@ -116,7 +116,7 @@ int BPF_PROG(fexit_XXX)
count = bpf_map_lookup_elem(&counts, &zero);
if (count) {
*count += 1;
- for (i = 0; i < num_metric && i < MAX_NUM_MATRICS; i++)
+ for (i = 0; i < num_metric && i < MAX_NUM_METRICS; i++)
fexit_update_maps(i, &readings[i]);
}
return 0;
diff --git a/tools/bpf/resolve_btfids/main.c b/tools/bpf/resolve_btfids/main.c
index d9520cb826b3..936ef95c3d32 100644
--- a/tools/bpf/resolve_btfids/main.c
+++ b/tools/bpf/resolve_btfids/main.c
@@ -409,6 +409,14 @@ static int elf_collect(struct object *obj)
obj->efile.idlist = data;
obj->efile.idlist_shndx = idx;
obj->efile.idlist_addr = sh.sh_addr;
+ } else if (!strcmp(name, BTF_BASE_ELF_SEC)) {
+ /* If a .BTF.base section is found, do not resolve
+ * BTF ids relative to vmlinux; resolve relative
+ * to the .BTF.base section instead. btf__parse_split()
+ * will take care of this once the base BTF it is
+ * passed is NULL.
+ */
+ obj->base_btf_path = NULL;
}
if (compressed_section_fix(elf, scn, &sh))
@@ -728,7 +736,7 @@ static int sets_patch(struct object *obj)
static int symbols_patch(struct object *obj)
{
- int err;
+ off_t err;
if (__symbols_patch(obj, &obj->structs) ||
__symbols_patch(obj, &obj->unions) ||
diff --git a/tools/gpio/gpio-sloppy-logic-analyzer.sh b/tools/gpio/gpio-sloppy-logic-analyzer.sh
new file mode 100755
index 000000000000..ed21a110df5e
--- /dev/null
+++ b/tools/gpio/gpio-sloppy-logic-analyzer.sh
@@ -0,0 +1,246 @@
+#!/bin/sh -eu
+# SPDX-License-Identifier: GPL-2.0
+#
+# Helper script for the Linux Kernel GPIO sloppy logic analyzer
+#
+# Copyright (C) Wolfram Sang <wsa@sang-engineering.com>
+# Copyright (C) Renesas Electronics Corporation
+
+samplefreq=1000000
+numsamples=250000
+cpusetdefaultdir='/sys/fs/cgroup'
+cpusetprefix='cpuset.'
+debugdir='/sys/kernel/debug'
+ladirname='gpio-sloppy-logic-analyzer'
+outputdir="$PWD"
+neededcmds='taskset zip'
+max_chans=8
+duration=
+initcpu=
+listinstances=0
+lainstance=
+lasysfsdir=
+triggerdat=
+trigger_bindat=
+progname="${0##*/}"
+print_help()
+{
+ cat << EOF
+$progname - helper script for the Linux Kernel Sloppy GPIO Logic Analyzer
+Available options:
+ -c|--cpu <n>: which CPU to isolate for sampling. Only needed once. Default <1>.
+ Remember that a more powerful CPU gives you higher sampling speeds.
+ Also CPU0 is not recommended as it usually does extra bookkeeping.
+ -d|--duration-us <SI-n>: number of microseconds to sample. Overrides -n, no default value.
+ -h|--help: print this help
+ -i|--instance <str>: name of the logic analyzer in case you have multiple instances. Default
+ to first instance found
+ -k|--kernel-debug-dir <str>: path to the kernel debugfs mountpoint. Default: <$debugdir>
+ -l|--list-instances: list all available instances
+ -n|--num_samples <SI-n>: number of samples to acquire. Default <$numsamples>
+ -o|--output-dir <str>: directory to put the result files. Default: current dir
+ -s|--sample_freq <SI-n>: desired sampling frequency. Might be capped if too large.
+ Default: <1000000>
+ -t|--trigger <str>: pattern to use as trigger. <str> consists of two-char pairs. First
+ char is channel number starting at "1". Second char is trigger level:
+ "L" - low; "H" - high; "R" - rising; "F" - falling
+ These pairs can be combined with "+", so "1H+2F" triggers when probe 1
+ is high while probe 2 has a falling edge. You can have multiple triggers
+ combined with ",". So, "1H+2F,1H+2R" is like the example before but it
+ waits for a rising edge on probe 2 while probe 1 is still high after the
+ first trigger has been met.
+ Trigger data will only be used for the next capture and then be erased.
+
+<SI-n> is an integer value where SI units "T", "G", "M", "K" are recognized, e.g. '1M500K' is 1500000.
+
+Examples:
+Samples $numsamples values at 1MHz with an already prepared CPU or automatically prepares CPU1 if needed,
+use the first logic analyzer instance found:
+ '$progname'
+Samples 50us at 2MHz waiting for a falling edge on channel 2. CPU and instance as above:
+ '$progname -d 50 -s 2M -t "2F"'
+
+Note that the process exits after checking all parameters but a sub-process still works in
+the background. The result is only available once the sub-process finishes.
+
+Result is a .sr file to be consumed with PulseView from the free Sigrok project. It is
+a zip file which also contains the binary sample data which may be consumed by others.
+The filename is the logic analyzer instance name plus a since-epoch timestamp.
+EOF
+}
+
+fail()
+{
+ echo "$1"
+ exit 1
+}
+
+parse_si()
+{
+ conv_si="$(printf $1 | sed 's/[tT]+\?/*1000G+/g; s/[gG]+\?/*1000M+/g; s/[mM]+\?/*1000K+/g; s/[kK]+\?/*1000+/g; s/+$//')"
+ si_val="$((conv_si))"
+}
+set_newmask()
+{
+ for f in $(find "$1" -iname "$2"); do echo "$newmask" > "$f" 2>/dev/null || true; done
+}
+
+init_cpu()
+{
+ isol_cpu="$1"
+
+ [ -d "$lacpusetdir" ] || mkdir "$lacpusetdir"
+
+ cur_cpu=$(cat "${lacpusetfile}cpus")
+ [ "$cur_cpu" = "$isol_cpu" ] && return
+ [ -z "$cur_cpu" ] || fail "CPU$isol_cpu requested but CPU$cur_cpu already isolated"
+
+ echo "$isol_cpu" > "${lacpusetfile}cpus" || fail "Could not isolate CPU$isol_cpu. Does it exist?"
+ echo 1 > "${lacpusetfile}cpu_exclusive"
+ echo 0 > "${lacpusetfile}mems"
+
+ oldmask=$(cat /proc/irq/default_smp_affinity)
+ newmask=$(printf "%x" $((0x$oldmask & ~(1 << isol_cpu))))
+
+ set_newmask '/proc/irq' '*smp_affinity'
+ set_newmask '/sys/devices/virtual/workqueue/' 'cpumask'
+
+ # Move tasks away from isolated CPU
+ for p in $(ps -o pid | tail -n +2); do
+ mask=$(taskset -p "$p") || continue
+ # Ignore tasks with a custom mask, i.e. not equal $oldmask
+ [ "${mask##*: }" = "$oldmask" ] || continue
+ taskset -p "$newmask" "$p" || continue
+ done 2>/dev/null >/dev/null
+
+ # Big hammer! Working with 'rcu_momentary_dyntick_idle()' for a more fine-grained solution
+ # still printed warnings. Same for re-enabling the stall detector after sampling.
+ echo 1 > /sys/module/rcupdate/parameters/rcu_cpu_stall_suppress
+
+ cpufreqgov="/sys/devices/system/cpu/cpu$isol_cpu/cpufreq/scaling_governor"
+ [ -w "$cpufreqgov" ] && echo 'performance' > "$cpufreqgov" || true
+}
+
+parse_triggerdat()
+{
+ oldifs="$IFS"
+ IFS=','; for trig in $1; do
+ mask=0; val1=0; val2=0
+ IFS='+'; for elem in $trig; do
+ chan=${elem%[lhfrLHFR]}
+ mode=${elem#$chan}
+ # Check if we could parse something and the channel number fits
+ [ "$chan" != "$elem" ] && [ "$chan" -le $max_chans ] || fail "Trigger syntax error: $elem"
+ bit=$((1 << (chan - 1)))
+ mask=$((mask | bit))
+ case $mode in
+ [hH]) val1=$((val1 | bit)); val2=$((val2 | bit));;
+ [fF]) val1=$((val1 | bit));;
+ [rR]) val2=$((val2 | bit));;
+ esac
+ done
+ trigger_bindat="$trigger_bindat$(printf '\\%o\\%o' $mask $val1)"
+ [ $val1 -ne $val2 ] && trigger_bindat="$trigger_bindat$(printf '\\%o\\%o' $mask $val2)"
+ done
+ IFS="$oldifs"
+}
+
+do_capture()
+{
+ taskset "$1" echo 1 > "$lasysfsdir"/capture || fail "Capture error! Check kernel log"
+
+ srtmp=$(mktemp -d)
+ echo 1 > "$srtmp"/version
+ cp "$lasysfsdir"/sample_data "$srtmp"/logic-1-1
+ cat > "$srtmp"/metadata << EOF
+[global]
+sigrok version=0.2.0
+
+[device 1]
+capturefile=logic-1
+total probes=$(wc -l < "$lasysfsdir"/meta_data)
+samplerate=${samplefreq}Hz
+unitsize=1
+EOF
+ cat "$lasysfsdir"/meta_data >> "$srtmp"/metadata
+
+ zipname="$outputdir/${lasysfsdir##*/}-$(date +%s).sr"
+ zip -jq "$zipname" "$srtmp"/*
+ rm -rf "$srtmp"
+ delay_ack=$(cat "$lasysfsdir"/delay_ns_acquisition)
+ [ "$delay_ack" -eq 0 ] && delay_ack=1
+ echo "Logic analyzer done. Saved '$zipname'"
+ echo "Max sample frequency this time: $((1000000000 / delay_ack))Hz."
+}
+
+rep=$(getopt -a -l cpu:,duration-us:,help,instance:,list-instances,kernel-debug-dir:,num_samples:,output-dir:,sample_freq:,trigger: -o c:d:hi:k:ln:o:s:t: -- "$@") || exit 1
+eval set -- "$rep"
+while true; do
+ case "$1" in
+ -c|--cpu) initcpu="$2"; shift;;
+ -d|--duration-us) parse_si $2; duration=$si_val; shift;;
+ -h|--help) print_help; exit 0;;
+ -i|--instance) lainstance="$2"; shift;;
+ -k|--kernel-debug-dir) debugdir="$2"; shift;;
+ -l|--list-instances) listinstances=1;;
+ -n|--num_samples) parse_si $2; numsamples=$si_val; shift;;
+ -o|--output-dir) outputdir="$2"; shift;;
+ -s|--sample_freq) parse_si $2; samplefreq=$si_val; shift;;
+ -t|--trigger) triggerdat="$2"; shift;;
+ --) break;;
+ *) fail "error parsing command line: $*";;
+ esac
+ shift
+done
+
+for f in $neededcmds; do
+ command -v "$f" >/dev/null || fail "Command '$f' not found"
+done
+
+# print cpuset mountpoint if any, errorcode > 0 if noprefix option was found
+cpusetdir=$(awk '$3 == "cgroup" && $4 ~ /cpuset/ { print $2; exit (match($4, /noprefix/) > 0) }' /proc/self/mounts) || cpusetprefix=''
+if [ -z "$cpusetdir" ]; then
+ cpusetdir="$cpusetdefaultdir"
+ [ -d $cpusetdir ] || mkdir $cpusetdir
+ mount -t cgroup -o cpuset none $cpusetdir || fail "Couldn't mount cpusets. Not in kernel or already in use?"
+fi
+
+lacpusetdir="$cpusetdir/$ladirname"
+lacpusetfile="$lacpusetdir/$cpusetprefix"
+sysfsdir="$debugdir/$ladirname"
+
+[ "$samplefreq" -ne 0 ] || fail "Invalid sample frequency"
+
+[ -d "$sysfsdir" ] || fail "Could not find logic analyzer root dir '$sysfsdir'. Module loaded?"
+[ -x "$sysfsdir" ] || fail "Could not access logic analyzer root dir '$sysfsdir'. Need root?"
+
+[ $listinstances -gt 0 ] && find "$sysfsdir" -mindepth 1 -type d | sed 's|.*/||' && exit 0
+
+if [ -n "$lainstance" ]; then
+ lasysfsdir="$sysfsdir/$lainstance"
+else
+ lasysfsdir=$(find "$sysfsdir" -mindepth 1 -type d -print -quit)
+fi
+[ -d "$lasysfsdir" ] || fail "Logic analyzer directory '$lasysfsdir' not found!"
+[ -d "$outputdir" ] || fail "Output directory '$outputdir' not found!"
+
+[ -n "$initcpu" ] && init_cpu "$initcpu"
+[ -d "$lacpusetdir" ] || { echo "Auto-Isolating CPU1"; init_cpu 1; }
+
+ndelay=$((1000000000 / samplefreq))
+echo "$ndelay" > "$lasysfsdir"/delay_ns
+
+[ -n "$duration" ] && numsamples=$((samplefreq * duration / 1000000))
+echo $numsamples > "$lasysfsdir"/buf_size
+
+if [ -n "$triggerdat" ]; then
+ parse_triggerdat "$triggerdat"
+ printf "$trigger_bindat" > "$lasysfsdir"/trigger 2>/dev/null || fail "Trigger data '$triggerdat' rejected"
+fi
+
+workcpu=$(cat "${lacpusetfile}effective_cpus")
+[ -n "$workcpu" ] || fail "No isolated CPU found"
+cpumask=$(printf '%x' $((1 << workcpu)))
+instance=${lasysfsdir##*/}
+echo "Setting up '$instance': $numsamples samples at ${samplefreq}Hz with ${triggerdat:-no} trigger using CPU$workcpu"
+do_capture "$cpumask" &
diff --git a/tools/hv/Makefile b/tools/hv/Makefile
index bb52871da341..2e60e2c212cd 100644
--- a/tools/hv/Makefile
+++ b/tools/hv/Makefile
@@ -17,6 +17,7 @@ endif
MAKEFLAGS += -r
override CFLAGS += -O2 -Wall -g -D_GNU_SOURCE -I$(OUTPUT)include
+override CFLAGS += -Wno-address-of-packed-member
ALL_TARGETS := hv_kvp_daemon hv_vss_daemon
ifneq ($(ARCH), aarch64)
diff --git a/tools/include/nolibc/stdint.h b/tools/include/nolibc/stdint.h
index 6665e272e213..cd79ddd6170e 100644
--- a/tools/include/nolibc/stdint.h
+++ b/tools/include/nolibc/stdint.h
@@ -96,6 +96,10 @@ typedef uint64_t uintmax_t;
#define UINT_FAST32_MAX SIZE_MAX
#define UINT_FAST64_MAX UINT64_MAX
+#define INTMAX_MIN INT64_MIN
+#define INTMAX_MAX INT64_MAX
+#define UINTMAX_MAX UINT64_MAX
+
#ifndef INT_MIN
#define INT_MIN (-__INT_MAX__ - 1)
#endif
@@ -110,4 +114,19 @@ typedef uint64_t uintmax_t;
#define LONG_MAX __LONG_MAX__
#endif
+#ifndef ULONG_MAX
+#define ULONG_MAX ((unsigned long)(__LONG_MAX__) * 2 + 1)
+#endif
+
+#ifndef LLONG_MIN
+#define LLONG_MIN (-__LONG_LONG_MAX__ - 1)
+#endif
+#ifndef LLONG_MAX
+#define LLONG_MAX __LONG_LONG_MAX__
+#endif
+
+#ifndef ULLONG_MAX
+#define ULLONG_MAX ((unsigned long long)(__LONG_LONG_MAX__) * 2 + 1)
+#endif
+
#endif /* _NOLIBC_STDINT_H */
diff --git a/tools/include/nolibc/stdio.h b/tools/include/nolibc/stdio.h
index 16cd4d807251..c968dbbc4ef8 100644
--- a/tools/include/nolibc/stdio.h
+++ b/tools/include/nolibc/stdio.h
@@ -376,6 +376,16 @@ int setvbuf(FILE *stream __attribute__((unused)),
return 0;
}
+static __attribute__((unused))
+const char *strerror(int errno)
+{
+ static char buf[18] = "errno=";
+
+ i64toa_r(errno, &buf[6]);
+
+ return buf;
+}
+
/* make sure to include all global symbols */
#include "nolibc.h"
diff --git a/tools/include/nolibc/stdlib.h b/tools/include/nolibc/stdlib.h
index 5be9d3c7435a..75aa273c23a6 100644
--- a/tools/include/nolibc/stdlib.h
+++ b/tools/include/nolibc/stdlib.h
@@ -438,6 +438,115 @@ char *u64toa(uint64_t in)
return itoa_buffer;
}
+static __attribute__((unused))
+uintmax_t __strtox(const char *nptr, char **endptr, int base, intmax_t lower_limit, uintmax_t upper_limit)
+{
+ const char signed_ = lower_limit != 0;
+ unsigned char neg = 0, overflow = 0;
+ uintmax_t val = 0, limit, old_val;
+ char c;
+
+ if (base < 0 || base > 36) {
+ SET_ERRNO(EINVAL);
+ goto out;
+ }
+
+ while (isspace(*nptr))
+ nptr++;
+
+ if (*nptr == '+') {
+ nptr++;
+ } else if (*nptr == '-') {
+ neg = 1;
+ nptr++;
+ }
+
+ if (signed_ && neg)
+ limit = -(uintmax_t)lower_limit;
+ else
+ limit = upper_limit;
+
+ if ((base == 0 || base == 16) &&
+ (strncmp(nptr, "0x", 2) == 0 || strncmp(nptr, "0X", 2) == 0)) {
+ base = 16;
+ nptr += 2;
+ } else if (base == 0 && strncmp(nptr, "0", 1) == 0) {
+ base = 8;
+ nptr += 1;
+ } else if (base == 0) {
+ base = 10;
+ }
+
+ while (*nptr) {
+ c = *nptr;
+
+ if (c >= '0' && c <= '9')
+ c -= '0';
+ else if (c >= 'a' && c <= 'z')
+ c = c - 'a' + 10;
+ else if (c >= 'A' && c <= 'Z')
+ c = c - 'A' + 10;
+ else
+ goto out;
+
+ if (c >= base)
+ goto out;
+
+ nptr++;
+ old_val = val;
+ val *= base;
+ val += c;
+
+ if (val > limit || val < old_val)
+ overflow = 1;
+ }
+
+out:
+ if (overflow) {
+ SET_ERRNO(ERANGE);
+ val = limit;
+ }
+ if (endptr)
+ *endptr = (char *)nptr;
+ return neg ? -val : val;
+}
+
+static __attribute__((unused))
+long strtol(const char *nptr, char **endptr, int base)
+{
+ return __strtox(nptr, endptr, base, LONG_MIN, LONG_MAX);
+}
+
+static __attribute__((unused))
+unsigned long strtoul(const char *nptr, char **endptr, int base)
+{
+ return __strtox(nptr, endptr, base, 0, ULONG_MAX);
+}
+
+static __attribute__((unused))
+long long strtoll(const char *nptr, char **endptr, int base)
+{
+ return __strtox(nptr, endptr, base, LLONG_MIN, LLONG_MAX);
+}
+
+static __attribute__((unused))
+unsigned long long strtoull(const char *nptr, char **endptr, int base)
+{
+ return __strtox(nptr, endptr, base, 0, ULLONG_MAX);
+}
+
+static __attribute__((unused))
+intmax_t strtoimax(const char *nptr, char **endptr, int base)
+{
+ return __strtox(nptr, endptr, base, INTMAX_MIN, INTMAX_MAX);
+}
+
+static __attribute__((unused))
+uintmax_t strtoumax(const char *nptr, char **endptr, int base)
+{
+ return __strtox(nptr, endptr, base, 0, UINTMAX_MAX);
+}
+
/* make sure to include all global symbols */
#include "nolibc.h"
diff --git a/tools/include/uapi/asm-generic/unistd.h b/tools/include/uapi/asm-generic/unistd.h
index 75f00965ab15..a00d53d02723 100644
--- a/tools/include/uapi/asm-generic/unistd.h
+++ b/tools/include/uapi/asm-generic/unistd.h
@@ -776,12 +776,8 @@ __SYSCALL(__NR_fsmount, sys_fsmount)
__SYSCALL(__NR_fspick, sys_fspick)
#define __NR_pidfd_open 434
__SYSCALL(__NR_pidfd_open, sys_pidfd_open)
-
-#ifdef __ARCH_WANT_SYS_CLONE3
#define __NR_clone3 435
__SYSCALL(__NR_clone3, sys_clone3)
-#endif
-
#define __NR_close_range 436
__SYSCALL(__NR_close_range, sys_close_range)
#define __NR_openat2 437
@@ -842,8 +838,11 @@ __SYSCALL(__NR_lsm_set_self_attr, sys_lsm_set_self_attr)
#define __NR_lsm_list_modules 461
__SYSCALL(__NR_lsm_list_modules, sys_lsm_list_modules)
+#define __NR_mseal 462
+__SYSCALL(__NR_mseal, sys_mseal)
+
#undef __NR_syscalls
-#define __NR_syscalls 462
+#define __NR_syscalls 463
/*
* 32 bit systems traditionally used different
diff --git a/tools/include/uapi/drm/i915_drm.h b/tools/include/uapi/drm/i915_drm.h
index 2ee338860b7e..d4d86e566e07 100644
--- a/tools/include/uapi/drm/i915_drm.h
+++ b/tools/include/uapi/drm/i915_drm.h
@@ -806,6 +806,12 @@ typedef struct drm_i915_irq_wait {
*/
#define I915_PARAM_PXP_STATUS 58
+/*
+ * Query if kernel allows marking a context to send a Freq hint to SLPC. This
+ * will enable use of the strategies allowed by the SLPC algorithm.
+ */
+#define I915_PARAM_HAS_CONTEXT_FREQ_HINT 59
+
/* Must be kept compact -- no holes and well documented */
/**
@@ -2148,6 +2154,15 @@ struct drm_i915_gem_context_param {
* -EIO: The firmware did not succeed in creating the protected context.
*/
#define I915_CONTEXT_PARAM_PROTECTED_CONTENT 0xd
+
+/*
+ * I915_CONTEXT_PARAM_LOW_LATENCY:
+ *
+ * Mark this context as a low latency workload which requires aggressive GT
+ * frequency scaling. Use I915_PARAM_HAS_CONTEXT_FREQ_HINT to check if the kernel
+ * supports this per context flag.
+ */
+#define I915_CONTEXT_PARAM_LOW_LATENCY 0xe
/* Must be kept compact -- no holes and well documented */
/** @value: Context parameter value to be set or queried */
@@ -2623,19 +2638,29 @@ struct drm_i915_reg_read {
*
*/
+/*
+ * struct drm_i915_reset_stats - Return global reset and other context stats
+ *
+ * Driver keeps few stats for each contexts and also global reset count.
+ * This struct can be used to query those stats.
+ */
struct drm_i915_reset_stats {
+ /** @ctx_id: ID of the requested context */
__u32 ctx_id;
+
+ /** @flags: MBZ */
__u32 flags;
- /* All resets since boot/module reload, for all contexts */
+ /** @reset_count: All resets since boot/module reload, for all contexts */
__u32 reset_count;
- /* Number of batches lost when active in GPU, for this context */
+ /** @batch_active: Number of batches lost when active in GPU, for this context */
__u32 batch_active;
- /* Number of batches lost pending for execution, for this context */
+ /** @batch_pending: Number of batches lost pending for execution, for this context */
__u32 batch_pending;
+ /** @pad: MBZ */
__u32 pad;
};
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 90706a47f6ff..35bcf52dbc65 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -1425,6 +1425,8 @@ enum {
#define BPF_F_TEST_RUN_ON_CPU (1U << 0)
/* If set, XDP frames will be transmitted after processing */
#define BPF_F_TEST_XDP_LIVE_FRAMES (1U << 1)
+/* If set, apply CHECKSUM_COMPLETE to skb and validate the checksum */
+#define BPF_F_TEST_SKB_CHECKSUM_COMPLETE (1U << 2)
/* type for BPF_ENABLE_STATS */
enum bpf_stats_type {
@@ -6207,12 +6209,17 @@ union { \
__u64 :64; \
} __attribute__((aligned(8)))
+/* The enum used in skb->tstamp_type. It specifies the clock type
+ * of the time stored in the skb->tstamp.
+ */
enum {
- BPF_SKB_TSTAMP_UNSPEC,
- BPF_SKB_TSTAMP_DELIVERY_MONO, /* tstamp has mono delivery time */
- /* For any BPF_SKB_TSTAMP_* that the bpf prog cannot handle,
- * the bpf prog should handle it like BPF_SKB_TSTAMP_UNSPEC
- * and try to deduce it by ingress, egress or skb->sk->sk_clockid.
+ BPF_SKB_TSTAMP_UNSPEC = 0, /* DEPRECATED */
+ BPF_SKB_TSTAMP_DELIVERY_MONO = 1, /* DEPRECATED */
+ BPF_SKB_CLOCK_REALTIME = 0,
+ BPF_SKB_CLOCK_MONOTONIC = 1,
+ BPF_SKB_CLOCK_TAI = 2,
+ /* For any future BPF_SKB_CLOCK_* that the bpf prog cannot handle,
+ * the bpf prog can try to deduce it by ingress/egress/skb->sk->sk_clockid.
*/
};
diff --git a/tools/include/uapi/linux/kvm.h b/tools/include/uapi/linux/kvm.h
index ea32b101b999..d03842abae57 100644
--- a/tools/include/uapi/linux/kvm.h
+++ b/tools/include/uapi/linux/kvm.h
@@ -1221,9 +1221,9 @@ struct kvm_vfio_spapr_tce {
/* Available with KVM_CAP_SPAPR_RESIZE_HPT */
#define KVM_PPC_RESIZE_HPT_PREPARE _IOR(KVMIO, 0xad, struct kvm_ppc_resize_hpt)
#define KVM_PPC_RESIZE_HPT_COMMIT _IOR(KVMIO, 0xae, struct kvm_ppc_resize_hpt)
-/* Available with KVM_CAP_PPC_RADIX_MMU or KVM_CAP_PPC_MMU_HASH_V3 */
+/* Available with KVM_CAP_PPC_MMU_RADIX or KVM_CAP_PPC_MMU_HASH_V3 */
#define KVM_PPC_CONFIGURE_V3_MMU _IOW(KVMIO, 0xaf, struct kvm_ppc_mmuv3_cfg)
-/* Available with KVM_CAP_PPC_RADIX_MMU */
+/* Available with KVM_CAP_PPC_MMU_RADIX */
#define KVM_PPC_GET_RMMU_INFO _IOW(KVMIO, 0xb0, struct kvm_ppc_rmmu_info)
/* Available with KVM_CAP_PPC_GET_CPU_CHAR */
#define KVM_PPC_GET_CPU_CHAR _IOR(KVMIO, 0xb1, struct kvm_ppc_cpu_char)
diff --git a/tools/include/uapi/linux/netdev.h b/tools/include/uapi/linux/netdev.h
index a8188202413e..43742ac5b00d 100644
--- a/tools/include/uapi/linux/netdev.h
+++ b/tools/include/uapi/linux/netdev.h
@@ -148,6 +148,7 @@ enum {
NETDEV_A_QSTATS_RX_ALLOC_FAIL,
NETDEV_A_QSTATS_RX_HW_DROPS,
NETDEV_A_QSTATS_RX_HW_DROP_OVERRUNS,
+ NETDEV_A_QSTATS_RX_CSUM_COMPLETE,
NETDEV_A_QSTATS_RX_CSUM_UNNECESSARY,
NETDEV_A_QSTATS_RX_CSUM_NONE,
NETDEV_A_QSTATS_RX_CSUM_BAD,
diff --git a/tools/include/uapi/linux/stat.h b/tools/include/uapi/linux/stat.h
index 2f2ee82d5517..67626d535316 100644
--- a/tools/include/uapi/linux/stat.h
+++ b/tools/include/uapi/linux/stat.h
@@ -126,8 +126,9 @@ struct statx {
__u64 stx_mnt_id;
__u32 stx_dio_mem_align; /* Memory buffer alignment for direct I/O */
__u32 stx_dio_offset_align; /* File offset alignment for direct I/O */
+ __u64 stx_subvol; /* Subvolume identifier */
/* 0xa0 */
- __u64 __spare3[12]; /* Spare space for future expansion */
+ __u64 __spare3[11]; /* Spare space for future expansion */
/* 0x100 */
};
@@ -155,6 +156,7 @@ struct statx {
#define STATX_MNT_ID 0x00001000U /* Got stx_mnt_id */
#define STATX_DIOALIGN 0x00002000U /* Want/got direct I/O alignment info */
#define STATX_MNT_ID_UNIQUE 0x00004000U /* Want/got extended stx_mount_id */
+#define STATX_SUBVOL 0x00008000U /* Want/got stx_subvol */
#define STATX__RESERVED 0x80000000U /* Reserved for future struct statx expansion */
diff --git a/tools/lib/bpf/Build b/tools/lib/bpf/Build
index b6619199a706..e2cd558ca0b4 100644
--- a/tools/lib/bpf/Build
+++ b/tools/lib/bpf/Build
@@ -1,4 +1,4 @@
libbpf-y := libbpf.o bpf.o nlattr.o btf.o libbpf_errno.o str_error.o \
netlink.o bpf_prog_linfo.o libbpf_probes.o hashmap.o \
btf_dump.o ringbuf.o strset.o linker.o gen_loader.o relo_core.o \
- usdt.o zip.o elf.o features.o
+ usdt.o zip.o elf.o features.o btf_iter.o btf_relocate.o
diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c
index 2d0840ef599a..32c00db3b91b 100644
--- a/tools/lib/bpf/btf.c
+++ b/tools/lib/bpf/btf.c
@@ -116,6 +116,9 @@ struct btf {
/* whether strings are already deduplicated */
bool strs_deduped;
+ /* whether base_btf should be freed in btf_free for this instance */
+ bool owns_base;
+
/* BTF object FD, if loaded into kernel */
int fd;
@@ -598,7 +601,7 @@ static int btf_sanity_check(const struct btf *btf)
__u32 i, n = btf__type_cnt(btf);
int err;
- for (i = 1; i < n; i++) {
+ for (i = btf->start_id; i < n; i++) {
t = btf_type_by_id(btf, i);
err = btf_validate_type(btf, t, i);
if (err)
@@ -969,6 +972,8 @@ void btf__free(struct btf *btf)
free(btf->raw_data);
free(btf->raw_data_swapped);
free(btf->type_offs);
+ if (btf->owns_base)
+ btf__free(btf->base_btf);
free(btf);
}
@@ -1084,53 +1089,38 @@ struct btf *btf__new_split(const void *data, __u32 size, struct btf *base_btf)
return libbpf_ptr(btf_new(data, size, base_btf));
}
-static struct btf *btf_parse_elf(const char *path, struct btf *base_btf,
- struct btf_ext **btf_ext)
+struct btf_elf_secs {
+ Elf_Data *btf_data;
+ Elf_Data *btf_ext_data;
+ Elf_Data *btf_base_data;
+};
+
+static int btf_find_elf_sections(Elf *elf, const char *path, struct btf_elf_secs *secs)
{
- Elf_Data *btf_data = NULL, *btf_ext_data = NULL;
- int err = 0, fd = -1, idx = 0;
- struct btf *btf = NULL;
Elf_Scn *scn = NULL;
- Elf *elf = NULL;
+ Elf_Data *data;
GElf_Ehdr ehdr;
size_t shstrndx;
+ int idx = 0;
- if (elf_version(EV_CURRENT) == EV_NONE) {
- pr_warn("failed to init libelf for %s\n", path);
- return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
- }
-
- fd = open(path, O_RDONLY | O_CLOEXEC);
- if (fd < 0) {
- err = -errno;
- pr_warn("failed to open %s: %s\n", path, strerror(errno));
- return ERR_PTR(err);
- }
-
- err = -LIBBPF_ERRNO__FORMAT;
-
- elf = elf_begin(fd, ELF_C_READ, NULL);
- if (!elf) {
- pr_warn("failed to open %s as ELF file\n", path);
- goto done;
- }
if (!gelf_getehdr(elf, &ehdr)) {
pr_warn("failed to get EHDR from %s\n", path);
- goto done;
+ goto err;
}
if (elf_getshdrstrndx(elf, &shstrndx)) {
pr_warn("failed to get section names section index for %s\n",
path);
- goto done;
+ goto err;
}
if (!elf_rawdata(elf_getscn(elf, shstrndx), NULL)) {
pr_warn("failed to get e_shstrndx from %s\n", path);
- goto done;
+ goto err;
}
while ((scn = elf_nextscn(elf, scn)) != NULL) {
+ Elf_Data **field;
GElf_Shdr sh;
char *name;
@@ -1138,42 +1128,102 @@ static struct btf *btf_parse_elf(const char *path, struct btf *base_btf,
if (gelf_getshdr(scn, &sh) != &sh) {
pr_warn("failed to get section(%d) header from %s\n",
idx, path);
- goto done;
+ goto err;
}
name = elf_strptr(elf, shstrndx, sh.sh_name);
if (!name) {
pr_warn("failed to get section(%d) name from %s\n",
idx, path);
- goto done;
+ goto err;
}
- if (strcmp(name, BTF_ELF_SEC) == 0) {
- btf_data = elf_getdata(scn, 0);
- if (!btf_data) {
- pr_warn("failed to get section(%d, %s) data from %s\n",
- idx, name, path);
- goto done;
- }
- continue;
- } else if (btf_ext && strcmp(name, BTF_EXT_ELF_SEC) == 0) {
- btf_ext_data = elf_getdata(scn, 0);
- if (!btf_ext_data) {
- pr_warn("failed to get section(%d, %s) data from %s\n",
- idx, name, path);
- goto done;
- }
+
+ if (strcmp(name, BTF_ELF_SEC) == 0)
+ field = &secs->btf_data;
+ else if (strcmp(name, BTF_EXT_ELF_SEC) == 0)
+ field = &secs->btf_ext_data;
+ else if (strcmp(name, BTF_BASE_ELF_SEC) == 0)
+ field = &secs->btf_base_data;
+ else
continue;
+
+ data = elf_getdata(scn, 0);
+ if (!data) {
+ pr_warn("failed to get section(%d, %s) data from %s\n",
+ idx, name, path);
+ goto err;
}
+ *field = data;
}
- if (!btf_data) {
+ return 0;
+
+err:
+ return -LIBBPF_ERRNO__FORMAT;
+}
+
+static struct btf *btf_parse_elf(const char *path, struct btf *base_btf,
+ struct btf_ext **btf_ext)
+{
+ struct btf_elf_secs secs = {};
+ struct btf *dist_base_btf = NULL;
+ struct btf *btf = NULL;
+ int err = 0, fd = -1;
+ Elf *elf = NULL;
+
+ if (elf_version(EV_CURRENT) == EV_NONE) {
+ pr_warn("failed to init libelf for %s\n", path);
+ return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
+ }
+
+ fd = open(path, O_RDONLY | O_CLOEXEC);
+ if (fd < 0) {
+ err = -errno;
+ pr_warn("failed to open %s: %s\n", path, strerror(errno));
+ return ERR_PTR(err);
+ }
+
+ elf = elf_begin(fd, ELF_C_READ, NULL);
+ if (!elf) {
+ pr_warn("failed to open %s as ELF file\n", path);
+ goto done;
+ }
+
+ err = btf_find_elf_sections(elf, path, &secs);
+ if (err)
+ goto done;
+
+ if (!secs.btf_data) {
pr_warn("failed to find '%s' ELF section in %s\n", BTF_ELF_SEC, path);
err = -ENODATA;
goto done;
}
- btf = btf_new(btf_data->d_buf, btf_data->d_size, base_btf);
- err = libbpf_get_error(btf);
- if (err)
+
+ if (secs.btf_base_data) {
+ dist_base_btf = btf_new(secs.btf_base_data->d_buf, secs.btf_base_data->d_size,
+ NULL);
+ if (IS_ERR(dist_base_btf)) {
+ err = PTR_ERR(dist_base_btf);
+ dist_base_btf = NULL;
+ goto done;
+ }
+ }
+
+ btf = btf_new(secs.btf_data->d_buf, secs.btf_data->d_size,
+ dist_base_btf ?: base_btf);
+ if (IS_ERR(btf)) {
+ err = PTR_ERR(btf);
goto done;
+ }
+ if (dist_base_btf && base_btf) {
+ err = btf__relocate(btf, base_btf);
+ if (err)
+ goto done;
+ btf__free(dist_base_btf);
+ dist_base_btf = NULL;
+ }
+
+ if (dist_base_btf)
+ btf->owns_base = true;
switch (gelf_getclass(elf)) {
case ELFCLASS32:
@@ -1187,11 +1237,12 @@ static struct btf *btf_parse_elf(const char *path, struct btf *base_btf,
break;
}
- if (btf_ext && btf_ext_data) {
- *btf_ext = btf_ext__new(btf_ext_data->d_buf, btf_ext_data->d_size);
- err = libbpf_get_error(*btf_ext);
- if (err)
+ if (btf_ext && secs.btf_ext_data) {
+ *btf_ext = btf_ext__new(secs.btf_ext_data->d_buf, secs.btf_ext_data->d_size);
+ if (IS_ERR(*btf_ext)) {
+ err = PTR_ERR(*btf_ext);
goto done;
+ }
} else if (btf_ext) {
*btf_ext = NULL;
}
@@ -1205,6 +1256,7 @@ done:
if (btf_ext)
btf_ext__free(*btf_ext);
+ btf__free(dist_base_btf);
btf__free(btf);
return ERR_PTR(err);
@@ -1739,9 +1791,8 @@ struct btf_pipe {
struct hashmap *str_off_map; /* map string offsets from src to dst */
};
-static int btf_rewrite_str(__u32 *str_off, void *ctx)
+static int btf_rewrite_str(struct btf_pipe *p, __u32 *str_off)
{
- struct btf_pipe *p = ctx;
long mapped_off;
int off, err;
@@ -1771,10 +1822,11 @@ static int btf_rewrite_str(__u32 *str_off, void *ctx)
return 0;
}
-int btf__add_type(struct btf *btf, const struct btf *src_btf, const struct btf_type *src_type)
+static int btf_add_type(struct btf_pipe *p, const struct btf_type *src_type)
{
- struct btf_pipe p = { .src = src_btf, .dst = btf };
+ struct btf_field_iter it;
struct btf_type *t;
+ __u32 *str_off;
int sz, err;
sz = btf_type_size(src_type);
@@ -1782,35 +1834,33 @@ int btf__add_type(struct btf *btf, const struct btf *src_btf, const struct btf_t
return libbpf_err(sz);
/* deconstruct BTF, if necessary, and invalidate raw_data */
- if (btf_ensure_modifiable(btf))
+ if (btf_ensure_modifiable(p->dst))
return libbpf_err(-ENOMEM);
- t = btf_add_type_mem(btf, sz);
+ t = btf_add_type_mem(p->dst, sz);
if (!t)
return libbpf_err(-ENOMEM);
memcpy(t, src_type, sz);
- err = btf_type_visit_str_offs(t, btf_rewrite_str, &p);
+ err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_STRS);
if (err)
return libbpf_err(err);
- return btf_commit_type(btf, sz);
+ while ((str_off = btf_field_iter_next(&it))) {
+ err = btf_rewrite_str(p, str_off);
+ if (err)
+ return libbpf_err(err);
+ }
+
+ return btf_commit_type(p->dst, sz);
}
-static int btf_rewrite_type_ids(__u32 *type_id, void *ctx)
+int btf__add_type(struct btf *btf, const struct btf *src_btf, const struct btf_type *src_type)
{
- struct btf *btf = ctx;
-
- if (!*type_id) /* nothing to do for VOID references */
- return 0;
+ struct btf_pipe p = { .src = src_btf, .dst = btf };
- /* we haven't updated btf's type count yet, so
- * btf->start_id + btf->nr_types - 1 is the type ID offset we should
- * add to all newly added BTF types
- */
- *type_id += btf->start_id + btf->nr_types - 1;
- return 0;
+ return btf_add_type(&p, src_type);
}
static size_t btf_dedup_identity_hash_fn(long key, void *ctx);
@@ -1858,6 +1908,9 @@ int btf__add_btf(struct btf *btf, const struct btf *src_btf)
memcpy(t, src_btf->types_data, data_sz);
for (i = 0; i < cnt; i++) {
+ struct btf_field_iter it;
+ __u32 *type_id, *str_off;
+
sz = btf_type_size(t);
if (sz < 0) {
/* unlikely, has to be corrupted src_btf */
@@ -1869,15 +1922,31 @@ int btf__add_btf(struct btf *btf, const struct btf *src_btf)
*off = t - btf->types_data;
/* add, dedup, and remap strings referenced by this BTF type */
- err = btf_type_visit_str_offs(t, btf_rewrite_str, &p);
+ err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_STRS);
if (err)
goto err_out;
+ while ((str_off = btf_field_iter_next(&it))) {
+ err = btf_rewrite_str(&p, str_off);
+ if (err)
+ goto err_out;
+ }
/* remap all type IDs referenced from this BTF type */
- err = btf_type_visit_type_ids(t, btf_rewrite_type_ids, btf);
+ err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_IDS);
if (err)
goto err_out;
+ while ((type_id = btf_field_iter_next(&it))) {
+ if (!*type_id) /* nothing to do for VOID references */
+ continue;
+
+ /* we haven't updated btf's type count yet, so
+ * btf->start_id + btf->nr_types - 1 is the type ID offset we should
+ * add to all newly added BTF types
+ */
+ *type_id += btf->start_id + btf->nr_types - 1;
+ }
+
/* go to next type data and type offset index entry */
t += sz;
off++;
@@ -3453,11 +3522,19 @@ static int btf_for_each_str_off(struct btf_dedup *d, str_off_visit_fn fn, void *
int i, r;
for (i = 0; i < d->btf->nr_types; i++) {
+ struct btf_field_iter it;
struct btf_type *t = btf_type_by_id(d->btf, d->btf->start_id + i);
+ __u32 *str_off;
- r = btf_type_visit_str_offs(t, fn, ctx);
+ r = btf_field_iter_init(&it, t, BTF_FIELD_ITER_STRS);
if (r)
return r;
+
+ while ((str_off = btf_field_iter_next(&it))) {
+ r = fn(str_off, ctx);
+ if (r)
+ return r;
+ }
}
if (!d->btf_ext)
@@ -4919,10 +4996,23 @@ static int btf_dedup_remap_types(struct btf_dedup *d)
for (i = 0; i < d->btf->nr_types; i++) {
struct btf_type *t = btf_type_by_id(d->btf, d->btf->start_id + i);
+ struct btf_field_iter it;
+ __u32 *type_id;
- r = btf_type_visit_type_ids(t, btf_dedup_remap_type_id, d);
+ r = btf_field_iter_init(&it, t, BTF_FIELD_ITER_IDS);
if (r)
return r;
+
+ while ((type_id = btf_field_iter_next(&it))) {
+ __u32 resolved_id, new_id;
+
+ resolved_id = resolve_type_id(d, *type_id);
+ new_id = d->hypot_map[resolved_id];
+ if (new_id > BTF_MAX_NR_TYPES)
+ return -EINVAL;
+
+ *type_id = new_id;
+ }
}
if (!d->btf_ext)
@@ -5003,136 +5093,6 @@ struct btf *btf__load_module_btf(const char *module_name, struct btf *vmlinux_bt
return btf__parse_split(path, vmlinux_btf);
}
-int btf_type_visit_type_ids(struct btf_type *t, type_id_visit_fn visit, void *ctx)
-{
- int i, n, err;
-
- switch (btf_kind(t)) {
- case BTF_KIND_INT:
- case BTF_KIND_FLOAT:
- case BTF_KIND_ENUM:
- case BTF_KIND_ENUM64:
- return 0;
-
- case BTF_KIND_FWD:
- case BTF_KIND_CONST:
- case BTF_KIND_VOLATILE:
- case BTF_KIND_RESTRICT:
- case BTF_KIND_PTR:
- case BTF_KIND_TYPEDEF:
- case BTF_KIND_FUNC:
- case BTF_KIND_VAR:
- case BTF_KIND_DECL_TAG:
- case BTF_KIND_TYPE_TAG:
- return visit(&t->type, ctx);
-
- case BTF_KIND_ARRAY: {
- struct btf_array *a = btf_array(t);
-
- err = visit(&a->type, ctx);
- err = err ?: visit(&a->index_type, ctx);
- return err;
- }
-
- case BTF_KIND_STRUCT:
- case BTF_KIND_UNION: {
- struct btf_member *m = btf_members(t);
-
- for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
- err = visit(&m->type, ctx);
- if (err)
- return err;
- }
- return 0;
- }
-
- case BTF_KIND_FUNC_PROTO: {
- struct btf_param *m = btf_params(t);
-
- err = visit(&t->type, ctx);
- if (err)
- return err;
- for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
- err = visit(&m->type, ctx);
- if (err)
- return err;
- }
- return 0;
- }
-
- case BTF_KIND_DATASEC: {
- struct btf_var_secinfo *m = btf_var_secinfos(t);
-
- for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
- err = visit(&m->type, ctx);
- if (err)
- return err;
- }
- return 0;
- }
-
- default:
- return -EINVAL;
- }
-}
-
-int btf_type_visit_str_offs(struct btf_type *t, str_off_visit_fn visit, void *ctx)
-{
- int i, n, err;
-
- err = visit(&t->name_off, ctx);
- if (err)
- return err;
-
- switch (btf_kind(t)) {
- case BTF_KIND_STRUCT:
- case BTF_KIND_UNION: {
- struct btf_member *m = btf_members(t);
-
- for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
- err = visit(&m->name_off, ctx);
- if (err)
- return err;
- }
- break;
- }
- case BTF_KIND_ENUM: {
- struct btf_enum *m = btf_enum(t);
-
- for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
- err = visit(&m->name_off, ctx);
- if (err)
- return err;
- }
- break;
- }
- case BTF_KIND_ENUM64: {
- struct btf_enum64 *m = btf_enum64(t);
-
- for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
- err = visit(&m->name_off, ctx);
- if (err)
- return err;
- }
- break;
- }
- case BTF_KIND_FUNC_PROTO: {
- struct btf_param *m = btf_params(t);
-
- for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
- err = visit(&m->name_off, ctx);
- if (err)
- return err;
- }
- break;
- }
- default:
- break;
- }
-
- return 0;
-}
-
int btf_ext_visit_type_ids(struct btf_ext *btf_ext, type_id_visit_fn visit, void *ctx)
{
const struct btf_ext_info *seg;
@@ -5212,3 +5172,325 @@ int btf_ext_visit_str_offs(struct btf_ext *btf_ext, str_off_visit_fn visit, void
return 0;
}
+
+struct btf_distill {
+ struct btf_pipe pipe;
+ int *id_map;
+ unsigned int split_start_id;
+ unsigned int split_start_str;
+ int diff_id;
+};
+
+static int btf_add_distilled_type_ids(struct btf_distill *dist, __u32 i)
+{
+ struct btf_type *split_t = btf_type_by_id(dist->pipe.src, i);
+ struct btf_field_iter it;
+ __u32 *id;
+ int err;
+
+ err = btf_field_iter_init(&it, split_t, BTF_FIELD_ITER_IDS);
+ if (err)
+ return err;
+ while ((id = btf_field_iter_next(&it))) {
+ struct btf_type *base_t;
+
+ if (!*id)
+ continue;
+ /* split BTF id, not needed */
+ if (*id >= dist->split_start_id)
+ continue;
+ /* already added ? */
+ if (dist->id_map[*id] > 0)
+ continue;
+
+ /* only a subset of base BTF types should be referenced from
+ * split BTF; ensure nothing unexpected is referenced.
+ */
+ base_t = btf_type_by_id(dist->pipe.src, *id);
+ switch (btf_kind(base_t)) {
+ case BTF_KIND_INT:
+ case BTF_KIND_FLOAT:
+ case BTF_KIND_FWD:
+ case BTF_KIND_ARRAY:
+ case BTF_KIND_STRUCT:
+ case BTF_KIND_UNION:
+ case BTF_KIND_TYPEDEF:
+ case BTF_KIND_ENUM:
+ case BTF_KIND_ENUM64:
+ case BTF_KIND_PTR:
+ case BTF_KIND_CONST:
+ case BTF_KIND_RESTRICT:
+ case BTF_KIND_VOLATILE:
+ case BTF_KIND_FUNC_PROTO:
+ case BTF_KIND_TYPE_TAG:
+ dist->id_map[*id] = *id;
+ break;
+ default:
+ pr_warn("unexpected reference to base type[%u] of kind [%u] when creating distilled base BTF.\n",
+ *id, btf_kind(base_t));
+ return -EINVAL;
+ }
+ /* If a base type is used, ensure types it refers to are
+ * marked as used also; so for example if we find a PTR to INT
+ * we need both the PTR and INT.
+ *
+ * The only exception is named struct/unions, since distilled
+ * base BTF composite types have no members.
+ */
+ if (btf_is_composite(base_t) && base_t->name_off)
+ continue;
+ err = btf_add_distilled_type_ids(dist, *id);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+static int btf_add_distilled_types(struct btf_distill *dist)
+{
+ bool adding_to_base = dist->pipe.dst->start_id == 1;
+ int id = btf__type_cnt(dist->pipe.dst);
+ struct btf_type *t;
+ int i, err = 0;
+
+
+ /* Add types for each of the required references to either distilled
+ * base or split BTF, depending on type characteristics.
+ */
+ for (i = 1; i < dist->split_start_id; i++) {
+ const char *name;
+ int kind;
+
+ if (!dist->id_map[i])
+ continue;
+ t = btf_type_by_id(dist->pipe.src, i);
+ kind = btf_kind(t);
+ name = btf__name_by_offset(dist->pipe.src, t->name_off);
+
+ switch (kind) {
+ case BTF_KIND_INT:
+ case BTF_KIND_FLOAT:
+ case BTF_KIND_FWD:
+ /* Named int, float, fwd are added to base. */
+ if (!adding_to_base)
+ continue;
+ err = btf_add_type(&dist->pipe, t);
+ break;
+ case BTF_KIND_STRUCT:
+ case BTF_KIND_UNION:
+ /* Named struct/union are added to base as 0-vlen
+ * struct/union of same size. Anonymous struct/unions
+ * are added to split BTF as-is.
+ */
+ if (adding_to_base) {
+ if (!t->name_off)
+ continue;
+ err = btf_add_composite(dist->pipe.dst, kind, name, t->size);
+ } else {
+ if (t->name_off)
+ continue;
+ err = btf_add_type(&dist->pipe, t);
+ }
+ break;
+ case BTF_KIND_ENUM:
+ case BTF_KIND_ENUM64:
+ /* Named enum[64]s are added to base as a sized
+ * enum; relocation will match with appropriately-named
+ * and sized enum or enum64.
+ *
+ * Anonymous enums are added to split BTF as-is.
+ */
+ if (adding_to_base) {
+ if (!t->name_off)
+ continue;
+ err = btf__add_enum(dist->pipe.dst, name, t->size);
+ } else {
+ if (t->name_off)
+ continue;
+ err = btf_add_type(&dist->pipe, t);
+ }
+ break;
+ case BTF_KIND_ARRAY:
+ case BTF_KIND_TYPEDEF:
+ case BTF_KIND_PTR:
+ case BTF_KIND_CONST:
+ case BTF_KIND_RESTRICT:
+ case BTF_KIND_VOLATILE:
+ case BTF_KIND_FUNC_PROTO:
+ case BTF_KIND_TYPE_TAG:
+ /* All other types are added to split BTF. */
+ if (adding_to_base)
+ continue;
+ err = btf_add_type(&dist->pipe, t);
+ break;
+ default:
+ pr_warn("unexpected kind when adding base type '%s'[%u] of kind [%u] to distilled base BTF.\n",
+ name, i, kind);
+ return -EINVAL;
+
+ }
+ if (err < 0)
+ break;
+ dist->id_map[i] = id++;
+ }
+ return err;
+}
+
+/* Split BTF ids without a mapping will be shifted downwards since distilled
+ * base BTF is smaller than the original base BTF. For those that have a
+ * mapping (either to base or updated split BTF), update the id based on
+ * that mapping.
+ */
+static int btf_update_distilled_type_ids(struct btf_distill *dist, __u32 i)
+{
+ struct btf_type *t = btf_type_by_id(dist->pipe.dst, i);
+ struct btf_field_iter it;
+ __u32 *id;
+ int err;
+
+ err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_IDS);
+ if (err)
+ return err;
+ while ((id = btf_field_iter_next(&it))) {
+ if (dist->id_map[*id])
+ *id = dist->id_map[*id];
+ else if (*id >= dist->split_start_id)
+ *id -= dist->diff_id;
+ }
+ return 0;
+}
+
+/* Create updated split BTF with distilled base BTF; distilled base BTF
+ * consists of BTF information required to clarify the types that split
+ * BTF refers to, omitting unneeded details. Specifically it will contain
+ * base types and memberless definitions of named structs, unions and enumerated
+ * types. Associated reference types like pointers, arrays and anonymous
+ * structs, unions and enumerated types will be added to split BTF.
+ * Size is recorded for named struct/unions to help guide matching to the
+ * target base BTF during later relocation.
+ *
+ * The only case where structs, unions or enumerated types are fully represented
+ * is when they are anonymous; in such cases, the anonymous type is added to
+ * split BTF in full.
+ *
+ * We return newly-created split BTF where the split BTF refers to a newly-created
+ * distilled base BTF. Both must be freed separately by the caller.
+ */
+int btf__distill_base(const struct btf *src_btf, struct btf **new_base_btf,
+ struct btf **new_split_btf)
+{
+ struct btf *new_base = NULL, *new_split = NULL;
+ const struct btf *old_base;
+ unsigned int n = btf__type_cnt(src_btf);
+ struct btf_distill dist = {};
+ struct btf_type *t;
+ int i, err = 0;
+
+ /* src BTF must be split BTF. */
+ old_base = btf__base_btf(src_btf);
+ if (!new_base_btf || !new_split_btf || !old_base)
+ return libbpf_err(-EINVAL);
+
+ new_base = btf__new_empty();
+ if (!new_base)
+ return libbpf_err(-ENOMEM);
+ dist.id_map = calloc(n, sizeof(*dist.id_map));
+ if (!dist.id_map) {
+ err = -ENOMEM;
+ goto done;
+ }
+ dist.pipe.src = src_btf;
+ dist.pipe.dst = new_base;
+ dist.pipe.str_off_map = hashmap__new(btf_dedup_identity_hash_fn, btf_dedup_equal_fn, NULL);
+ if (IS_ERR(dist.pipe.str_off_map)) {
+ err = -ENOMEM;
+ goto done;
+ }
+ dist.split_start_id = btf__type_cnt(old_base);
+ dist.split_start_str = old_base->hdr->str_len;
+
+ /* Pass over src split BTF; generate the list of base BTF type ids it
+ * references; these will constitute our distilled BTF set to be
+ * distributed over base and split BTF as appropriate.
+ */
+ for (i = src_btf->start_id; i < n; i++) {
+ err = btf_add_distilled_type_ids(&dist, i);
+ if (err < 0)
+ goto done;
+ }
+ /* Next add types for each of the required references to base BTF and split BTF
+ * in turn.
+ */
+ err = btf_add_distilled_types(&dist);
+ if (err < 0)
+ goto done;
+
+ /* Create new split BTF with distilled base BTF as its base; the final
+ * state is split BTF with distilled base BTF that represents enough
+ * about its base references to allow it to be relocated with the base
+ * BTF available.
+ */
+ new_split = btf__new_empty_split(new_base);
+ if (!new_split) {
+ err = -errno;
+ goto done;
+ }
+ dist.pipe.dst = new_split;
+ /* First add all split types */
+ for (i = src_btf->start_id; i < n; i++) {
+ t = btf_type_by_id(src_btf, i);
+ err = btf_add_type(&dist.pipe, t);
+ if (err < 0)
+ goto done;
+ }
+ /* Now add distilled types to split BTF that are not added to base. */
+ err = btf_add_distilled_types(&dist);
+ if (err < 0)
+ goto done;
+
+ /* All split BTF ids will be shifted downwards since there are less base
+ * BTF ids in distilled base BTF.
+ */
+ dist.diff_id = dist.split_start_id - btf__type_cnt(new_base);
+
+ n = btf__type_cnt(new_split);
+ /* Now update base/split BTF ids. */
+ for (i = 1; i < n; i++) {
+ err = btf_update_distilled_type_ids(&dist, i);
+ if (err < 0)
+ break;
+ }
+done:
+ free(dist.id_map);
+ hashmap__free(dist.pipe.str_off_map);
+ if (err) {
+ btf__free(new_split);
+ btf__free(new_base);
+ return libbpf_err(err);
+ }
+ *new_base_btf = new_base;
+ *new_split_btf = new_split;
+
+ return 0;
+}
+
+const struct btf_header *btf_header(const struct btf *btf)
+{
+ return btf->hdr;
+}
+
+void btf_set_base_btf(struct btf *btf, const struct btf *base_btf)
+{
+ btf->base_btf = (struct btf *)base_btf;
+ btf->start_id = btf__type_cnt(base_btf);
+ btf->start_str_off = base_btf->hdr->str_len;
+}
+
+int btf__relocate(struct btf *btf, const struct btf *base_btf)
+{
+ int err = btf_relocate(btf, base_btf, NULL);
+
+ if (!err)
+ btf->owns_base = false;
+ return libbpf_err(err);
+}
diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h
index 8e6880d91c84..b68d216837a9 100644
--- a/tools/lib/bpf/btf.h
+++ b/tools/lib/bpf/btf.h
@@ -18,6 +18,7 @@ extern "C" {
#define BTF_ELF_SEC ".BTF"
#define BTF_EXT_ELF_SEC ".BTF.ext"
+#define BTF_BASE_ELF_SEC ".BTF.base"
#define MAPS_ELF_SEC ".maps"
struct btf;
@@ -107,6 +108,27 @@ LIBBPF_API struct btf *btf__new_empty(void);
*/
LIBBPF_API struct btf *btf__new_empty_split(struct btf *base_btf);
+/**
+ * @brief **btf__distill_base()** creates new versions of the split BTF
+ * *src_btf* and its base BTF. The new base BTF will only contain the types
+ * needed to improve robustness of the split BTF to small changes in base BTF.
+ * When that split BTF is loaded against a (possibly changed) base, this
+ * distilled base BTF will help update references to that (possibly changed)
+ * base BTF.
+ *
+ * Both the new split and its associated new base BTF must be freed by
+ * the caller.
+ *
+ * If successful, 0 is returned and **new_base_btf** and **new_split_btf**
+ * will point at new base/split BTF. Both the new split and its associated
+ * new base BTF must be freed by the caller.
+ *
+ * A negative value is returned on error and the thread-local `errno` variable
+ * is set to the error code as well.
+ */
+LIBBPF_API int btf__distill_base(const struct btf *src_btf, struct btf **new_base_btf,
+ struct btf **new_split_btf);
+
LIBBPF_API struct btf *btf__parse(const char *path, struct btf_ext **btf_ext);
LIBBPF_API struct btf *btf__parse_split(const char *path, struct btf *base_btf);
LIBBPF_API struct btf *btf__parse_elf(const char *path, struct btf_ext **btf_ext);
@@ -231,6 +253,20 @@ struct btf_dedup_opts {
LIBBPF_API int btf__dedup(struct btf *btf, const struct btf_dedup_opts *opts);
+/**
+ * @brief **btf__relocate()** will check the split BTF *btf* for references
+ * to base BTF kinds, and verify those references are compatible with
+ * *base_btf*; if they are, *btf* is adjusted such that is re-parented to
+ * *base_btf* and type ids and strings are adjusted to accommodate this.
+ *
+ * If successful, 0 is returned and **btf** now has **base_btf** as its
+ * base.
+ *
+ * A negative value is returned on error and the thread-local `errno` variable
+ * is set to the error code as well.
+ */
+LIBBPF_API int btf__relocate(struct btf *btf, const struct btf *base_btf);
+
struct btf_dump;
struct btf_dump_opts {
diff --git a/tools/lib/bpf/btf_iter.c b/tools/lib/bpf/btf_iter.c
new file mode 100644
index 000000000000..9a6c822c2294
--- /dev/null
+++ b/tools/lib/bpf/btf_iter.c
@@ -0,0 +1,177 @@
+// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+/* Copyright (c) 2021 Facebook */
+/* Copyright (c) 2024, Oracle and/or its affiliates. */
+
+#ifdef __KERNEL__
+#include <linux/bpf.h>
+#include <linux/btf.h>
+
+#define btf_var_secinfos(t) (struct btf_var_secinfo *)btf_type_var_secinfo(t)
+
+#else
+#include "btf.h"
+#include "libbpf_internal.h"
+#endif
+
+int btf_field_iter_init(struct btf_field_iter *it, struct btf_type *t,
+ enum btf_field_iter_kind iter_kind)
+{
+ it->p = NULL;
+ it->m_idx = -1;
+ it->off_idx = 0;
+ it->vlen = 0;
+
+ switch (iter_kind) {
+ case BTF_FIELD_ITER_IDS:
+ switch (btf_kind(t)) {
+ case BTF_KIND_UNKN:
+ case BTF_KIND_INT:
+ case BTF_KIND_FLOAT:
+ case BTF_KIND_ENUM:
+ case BTF_KIND_ENUM64:
+ it->desc = (struct btf_field_desc) {};
+ break;
+ case BTF_KIND_FWD:
+ case BTF_KIND_CONST:
+ case BTF_KIND_VOLATILE:
+ case BTF_KIND_RESTRICT:
+ case BTF_KIND_PTR:
+ case BTF_KIND_TYPEDEF:
+ case BTF_KIND_FUNC:
+ case BTF_KIND_VAR:
+ case BTF_KIND_DECL_TAG:
+ case BTF_KIND_TYPE_TAG:
+ it->desc = (struct btf_field_desc) { 1, {offsetof(struct btf_type, type)} };
+ break;
+ case BTF_KIND_ARRAY:
+ it->desc = (struct btf_field_desc) {
+ 2, {sizeof(struct btf_type) + offsetof(struct btf_array, type),
+ sizeof(struct btf_type) + offsetof(struct btf_array, index_type)}
+ };
+ break;
+ case BTF_KIND_STRUCT:
+ case BTF_KIND_UNION:
+ it->desc = (struct btf_field_desc) {
+ 0, {},
+ sizeof(struct btf_member),
+ 1, {offsetof(struct btf_member, type)}
+ };
+ break;
+ case BTF_KIND_FUNC_PROTO:
+ it->desc = (struct btf_field_desc) {
+ 1, {offsetof(struct btf_type, type)},
+ sizeof(struct btf_param),
+ 1, {offsetof(struct btf_param, type)}
+ };
+ break;
+ case BTF_KIND_DATASEC:
+ it->desc = (struct btf_field_desc) {
+ 0, {},
+ sizeof(struct btf_var_secinfo),
+ 1, {offsetof(struct btf_var_secinfo, type)}
+ };
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case BTF_FIELD_ITER_STRS:
+ switch (btf_kind(t)) {
+ case BTF_KIND_UNKN:
+ it->desc = (struct btf_field_desc) {};
+ break;
+ case BTF_KIND_INT:
+ case BTF_KIND_FLOAT:
+ case BTF_KIND_FWD:
+ case BTF_KIND_ARRAY:
+ case BTF_KIND_CONST:
+ case BTF_KIND_VOLATILE:
+ case BTF_KIND_RESTRICT:
+ case BTF_KIND_PTR:
+ case BTF_KIND_TYPEDEF:
+ case BTF_KIND_FUNC:
+ case BTF_KIND_VAR:
+ case BTF_KIND_DECL_TAG:
+ case BTF_KIND_TYPE_TAG:
+ case BTF_KIND_DATASEC:
+ it->desc = (struct btf_field_desc) {
+ 1, {offsetof(struct btf_type, name_off)}
+ };
+ break;
+ case BTF_KIND_ENUM:
+ it->desc = (struct btf_field_desc) {
+ 1, {offsetof(struct btf_type, name_off)},
+ sizeof(struct btf_enum),
+ 1, {offsetof(struct btf_enum, name_off)}
+ };
+ break;
+ case BTF_KIND_ENUM64:
+ it->desc = (struct btf_field_desc) {
+ 1, {offsetof(struct btf_type, name_off)},
+ sizeof(struct btf_enum64),
+ 1, {offsetof(struct btf_enum64, name_off)}
+ };
+ break;
+ case BTF_KIND_STRUCT:
+ case BTF_KIND_UNION:
+ it->desc = (struct btf_field_desc) {
+ 1, {offsetof(struct btf_type, name_off)},
+ sizeof(struct btf_member),
+ 1, {offsetof(struct btf_member, name_off)}
+ };
+ break;
+ case BTF_KIND_FUNC_PROTO:
+ it->desc = (struct btf_field_desc) {
+ 1, {offsetof(struct btf_type, name_off)},
+ sizeof(struct btf_param),
+ 1, {offsetof(struct btf_param, name_off)}
+ };
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (it->desc.m_sz)
+ it->vlen = btf_vlen(t);
+
+ it->p = t;
+ return 0;
+}
+
+__u32 *btf_field_iter_next(struct btf_field_iter *it)
+{
+ if (!it->p)
+ return NULL;
+
+ if (it->m_idx < 0) {
+ if (it->off_idx < it->desc.t_off_cnt)
+ return it->p + it->desc.t_offs[it->off_idx++];
+ /* move to per-member iteration */
+ it->m_idx = 0;
+ it->p += sizeof(struct btf_type);
+ it->off_idx = 0;
+ }
+
+ /* if type doesn't have members, stop */
+ if (it->desc.m_sz == 0) {
+ it->p = NULL;
+ return NULL;
+ }
+
+ if (it->off_idx >= it->desc.m_off_cnt) {
+ /* exhausted this member's fields, go to the next member */
+ it->m_idx++;
+ it->p += it->desc.m_sz;
+ it->off_idx = 0;
+ }
+
+ if (it->m_idx < it->vlen)
+ return it->p + it->desc.m_offs[it->off_idx++];
+
+ it->p = NULL;
+ return NULL;
+}
diff --git a/tools/lib/bpf/btf_relocate.c b/tools/lib/bpf/btf_relocate.c
new file mode 100644
index 000000000000..17f8b32f94a0
--- /dev/null
+++ b/tools/lib/bpf/btf_relocate.c
@@ -0,0 +1,519 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024, Oracle and/or its affiliates. */
+
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE
+#endif
+
+#ifdef __KERNEL__
+#include <linux/bpf.h>
+#include <linux/bsearch.h>
+#include <linux/btf.h>
+#include <linux/sort.h>
+#include <linux/string.h>
+#include <linux/bpf_verifier.h>
+
+#define btf_type_by_id (struct btf_type *)btf_type_by_id
+#define btf__type_cnt btf_nr_types
+#define btf__base_btf btf_base_btf
+#define btf__name_by_offset btf_name_by_offset
+#define btf__str_by_offset btf_str_by_offset
+#define btf_kflag btf_type_kflag
+
+#define calloc(nmemb, sz) kvcalloc(nmemb, sz, GFP_KERNEL | __GFP_NOWARN)
+#define free(ptr) kvfree(ptr)
+#define qsort(base, num, sz, cmp) sort(base, num, sz, cmp, NULL)
+
+#else
+
+#include "btf.h"
+#include "bpf.h"
+#include "libbpf.h"
+#include "libbpf_internal.h"
+
+#endif /* __KERNEL__ */
+
+struct btf;
+
+struct btf_relocate {
+ struct btf *btf;
+ const struct btf *base_btf;
+ const struct btf *dist_base_btf;
+ unsigned int nr_base_types;
+ unsigned int nr_split_types;
+ unsigned int nr_dist_base_types;
+ int dist_str_len;
+ int base_str_len;
+ __u32 *id_map;
+ __u32 *str_map;
+};
+
+/* Set temporarily in relocation id_map if distilled base struct/union is
+ * embedded in a split BTF struct/union; in such a case, size information must
+ * match between distilled base BTF and base BTF representation of type.
+ */
+#define BTF_IS_EMBEDDED ((__u32)-1)
+
+/* <name, size, id> triple used in sorting/searching distilled base BTF. */
+struct btf_name_info {
+ const char *name;
+ /* set when search requires a size match */
+ bool needs_size: 1;
+ unsigned int size: 31;
+ __u32 id;
+};
+
+static int btf_relocate_rewrite_type_id(struct btf_relocate *r, __u32 i)
+{
+ struct btf_type *t = btf_type_by_id(r->btf, i);
+ struct btf_field_iter it;
+ __u32 *id;
+ int err;
+
+ err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_IDS);
+ if (err)
+ return err;
+
+ while ((id = btf_field_iter_next(&it)))
+ *id = r->id_map[*id];
+ return 0;
+}
+
+/* Simple string comparison used for sorting within BTF, since all distilled
+ * types are named. If strings match, and size is non-zero for both elements
+ * fall back to using size for ordering.
+ */
+static int cmp_btf_name_size(const void *n1, const void *n2)
+{
+ const struct btf_name_info *ni1 = n1;
+ const struct btf_name_info *ni2 = n2;
+ int name_diff = strcmp(ni1->name, ni2->name);
+
+ if (!name_diff && ni1->needs_size && ni2->needs_size)
+ return ni2->size - ni1->size;
+ return name_diff;
+}
+
+/* Binary search with a small twist; find leftmost element that matches
+ * so that we can then iterate through all exact matches. So for example
+ * searching { "a", "bb", "bb", "c" } we would always match on the
+ * leftmost "bb".
+ */
+static struct btf_name_info *search_btf_name_size(struct btf_name_info *key,
+ struct btf_name_info *vals,
+ int nelems)
+{
+ struct btf_name_info *ret = NULL;
+ int high = nelems - 1;
+ int low = 0;
+
+ while (low <= high) {
+ int mid = (low + high)/2;
+ struct btf_name_info *val = &vals[mid];
+ int diff = cmp_btf_name_size(key, val);
+
+ if (diff == 0)
+ ret = val;
+ /* even if found, keep searching for leftmost match */
+ if (diff <= 0)
+ high = mid - 1;
+ else
+ low = mid + 1;
+ }
+ return ret;
+}
+
+/* If a member of a split BTF struct/union refers to a base BTF
+ * struct/union, mark that struct/union id temporarily in the id_map
+ * with BTF_IS_EMBEDDED. Members can be const/restrict/volatile/typedef
+ * reference types, but if a pointer is encountered, the type is no longer
+ * considered embedded.
+ */
+static int btf_mark_embedded_composite_type_ids(struct btf_relocate *r, __u32 i)
+{
+ struct btf_type *t = btf_type_by_id(r->btf, i);
+ struct btf_field_iter it;
+ __u32 *id;
+ int err;
+
+ if (!btf_is_composite(t))
+ return 0;
+
+ err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_IDS);
+ if (err)
+ return err;
+
+ while ((id = btf_field_iter_next(&it))) {
+ __u32 next_id = *id;
+
+ while (next_id) {
+ t = btf_type_by_id(r->btf, next_id);
+ switch (btf_kind(t)) {
+ case BTF_KIND_CONST:
+ case BTF_KIND_RESTRICT:
+ case BTF_KIND_VOLATILE:
+ case BTF_KIND_TYPEDEF:
+ case BTF_KIND_TYPE_TAG:
+ next_id = t->type;
+ break;
+ case BTF_KIND_ARRAY: {
+ struct btf_array *a = btf_array(t);
+
+ next_id = a->type;
+ break;
+ }
+ case BTF_KIND_STRUCT:
+ case BTF_KIND_UNION:
+ if (next_id < r->nr_dist_base_types)
+ r->id_map[next_id] = BTF_IS_EMBEDDED;
+ next_id = 0;
+ break;
+ default:
+ next_id = 0;
+ break;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/* Build a map from distilled base BTF ids to base BTF ids. To do so, iterate
+ * through base BTF looking up distilled type (using binary search) equivalents.
+ */
+static int btf_relocate_map_distilled_base(struct btf_relocate *r)
+{
+ struct btf_name_info *info, *info_end;
+ struct btf_type *base_t, *dist_t;
+ __u8 *base_name_cnt = NULL;
+ int err = 0;
+ __u32 id;
+
+ /* generate a sort index array of name/type ids sorted by name for
+ * distilled base BTF to speed name-based lookups.
+ */
+ info = calloc(r->nr_dist_base_types, sizeof(*info));
+ if (!info) {
+ err = -ENOMEM;
+ goto done;
+ }
+ info_end = info + r->nr_dist_base_types;
+ for (id = 0; id < r->nr_dist_base_types; id++) {
+ dist_t = btf_type_by_id(r->dist_base_btf, id);
+ info[id].name = btf__name_by_offset(r->dist_base_btf, dist_t->name_off);
+ info[id].id = id;
+ info[id].size = dist_t->size;
+ info[id].needs_size = true;
+ }
+ qsort(info, r->nr_dist_base_types, sizeof(*info), cmp_btf_name_size);
+
+ /* Mark distilled base struct/union members of split BTF structs/unions
+ * in id_map with BTF_IS_EMBEDDED; this signals that these types
+ * need to match both name and size, otherwise embedding the base
+ * struct/union in the split type is invalid.
+ */
+ for (id = r->nr_dist_base_types; id < r->nr_split_types; id++) {
+ err = btf_mark_embedded_composite_type_ids(r, id);
+ if (err)
+ goto done;
+ }
+
+ /* Collect name counts for composite types in base BTF. If multiple
+ * instances of a struct/union of the same name exist, we need to use
+ * size to determine which to map to since name alone is ambiguous.
+ */
+ base_name_cnt = calloc(r->base_str_len, sizeof(*base_name_cnt));
+ if (!base_name_cnt) {
+ err = -ENOMEM;
+ goto done;
+ }
+ for (id = 1; id < r->nr_base_types; id++) {
+ base_t = btf_type_by_id(r->base_btf, id);
+ if (!btf_is_composite(base_t) || !base_t->name_off)
+ continue;
+ if (base_name_cnt[base_t->name_off] < 255)
+ base_name_cnt[base_t->name_off]++;
+ }
+
+ /* Now search base BTF for matching distilled base BTF types. */
+ for (id = 1; id < r->nr_base_types; id++) {
+ struct btf_name_info *dist_info, base_info = {};
+ int dist_kind, base_kind;
+
+ base_t = btf_type_by_id(r->base_btf, id);
+ /* distilled base consists of named types only. */
+ if (!base_t->name_off)
+ continue;
+ base_kind = btf_kind(base_t);
+ base_info.id = id;
+ base_info.name = btf__name_by_offset(r->base_btf, base_t->name_off);
+ switch (base_kind) {
+ case BTF_KIND_INT:
+ case BTF_KIND_FLOAT:
+ case BTF_KIND_ENUM:
+ case BTF_KIND_ENUM64:
+ /* These types should match both name and size */
+ base_info.needs_size = true;
+ base_info.size = base_t->size;
+ break;
+ case BTF_KIND_FWD:
+ /* No size considerations for fwds. */
+ break;
+ case BTF_KIND_STRUCT:
+ case BTF_KIND_UNION:
+ /* Size only needs to be used for struct/union if there
+ * are multiple types in base BTF with the same name.
+ * If there are multiple _distilled_ types with the same
+ * name (a very unlikely scenario), that doesn't matter
+ * unless corresponding _base_ types to match them are
+ * missing.
+ */
+ base_info.needs_size = base_name_cnt[base_t->name_off] > 1;
+ base_info.size = base_t->size;
+ break;
+ default:
+ continue;
+ }
+ /* iterate over all matching distilled base types */
+ for (dist_info = search_btf_name_size(&base_info, info, r->nr_dist_base_types);
+ dist_info != NULL && dist_info < info_end &&
+ cmp_btf_name_size(&base_info, dist_info) == 0;
+ dist_info++) {
+ if (!dist_info->id || dist_info->id >= r->nr_dist_base_types) {
+ pr_warn("base BTF id [%d] maps to invalid distilled base BTF id [%d]\n",
+ id, dist_info->id);
+ err = -EINVAL;
+ goto done;
+ }
+ dist_t = btf_type_by_id(r->dist_base_btf, dist_info->id);
+ dist_kind = btf_kind(dist_t);
+
+ /* Validate that the found distilled type is compatible.
+ * Do not error out on mismatch as another match may
+ * occur for an identically-named type.
+ */
+ switch (dist_kind) {
+ case BTF_KIND_FWD:
+ switch (base_kind) {
+ case BTF_KIND_FWD:
+ if (btf_kflag(dist_t) != btf_kflag(base_t))
+ continue;
+ break;
+ case BTF_KIND_STRUCT:
+ if (btf_kflag(base_t))
+ continue;
+ break;
+ case BTF_KIND_UNION:
+ if (!btf_kflag(base_t))
+ continue;
+ break;
+ default:
+ continue;
+ }
+ break;
+ case BTF_KIND_INT:
+ if (dist_kind != base_kind ||
+ btf_int_encoding(base_t) != btf_int_encoding(dist_t))
+ continue;
+ break;
+ case BTF_KIND_FLOAT:
+ if (dist_kind != base_kind)
+ continue;
+ break;
+ case BTF_KIND_ENUM:
+ /* ENUM and ENUM64 are encoded as sized ENUM in
+ * distilled base BTF.
+ */
+ if (base_kind != dist_kind && base_kind != BTF_KIND_ENUM64)
+ continue;
+ break;
+ case BTF_KIND_STRUCT:
+ case BTF_KIND_UNION:
+ /* size verification is required for embedded
+ * struct/unions.
+ */
+ if (r->id_map[dist_info->id] == BTF_IS_EMBEDDED &&
+ base_t->size != dist_t->size)
+ continue;
+ break;
+ default:
+ continue;
+ }
+ if (r->id_map[dist_info->id] &&
+ r->id_map[dist_info->id] != BTF_IS_EMBEDDED) {
+ /* we already have a match; this tells us that
+ * multiple base types of the same name
+ * have the same size, since for cases where
+ * multiple types have the same name we match
+ * on name and size. In this case, we have
+ * no way of determining which to relocate
+ * to in base BTF, so error out.
+ */
+ pr_warn("distilled base BTF type '%s' [%u], size %u has multiple candidates of the same size (ids [%u, %u]) in base BTF\n",
+ base_info.name, dist_info->id,
+ base_t->size, id, r->id_map[dist_info->id]);
+ err = -EINVAL;
+ goto done;
+ }
+ /* map id and name */
+ r->id_map[dist_info->id] = id;
+ r->str_map[dist_t->name_off] = base_t->name_off;
+ }
+ }
+ /* ensure all distilled BTF ids now have a mapping... */
+ for (id = 1; id < r->nr_dist_base_types; id++) {
+ const char *name;
+
+ if (r->id_map[id] && r->id_map[id] != BTF_IS_EMBEDDED)
+ continue;
+ dist_t = btf_type_by_id(r->dist_base_btf, id);
+ name = btf__name_by_offset(r->dist_base_btf, dist_t->name_off);
+ pr_warn("distilled base BTF type '%s' [%d] is not mapped to base BTF id\n",
+ name, id);
+ err = -EINVAL;
+ break;
+ }
+done:
+ free(base_name_cnt);
+ free(info);
+ return err;
+}
+
+/* distilled base should only have named int/float/enum/fwd/struct/union types. */
+static int btf_relocate_validate_distilled_base(struct btf_relocate *r)
+{
+ unsigned int i;
+
+ for (i = 1; i < r->nr_dist_base_types; i++) {
+ struct btf_type *t = btf_type_by_id(r->dist_base_btf, i);
+ int kind = btf_kind(t);
+
+ switch (kind) {
+ case BTF_KIND_INT:
+ case BTF_KIND_FLOAT:
+ case BTF_KIND_ENUM:
+ case BTF_KIND_STRUCT:
+ case BTF_KIND_UNION:
+ case BTF_KIND_FWD:
+ if (t->name_off)
+ break;
+ pr_warn("type [%d], kind [%d] is invalid for distilled base BTF; it is anonymous\n",
+ i, kind);
+ return -EINVAL;
+ default:
+ pr_warn("type [%d] in distilled based BTF has unexpected kind [%d]\n",
+ i, kind);
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+static int btf_relocate_rewrite_strs(struct btf_relocate *r, __u32 i)
+{
+ struct btf_type *t = btf_type_by_id(r->btf, i);
+ struct btf_field_iter it;
+ __u32 *str_off;
+ int off, err;
+
+ err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_STRS);
+ if (err)
+ return err;
+
+ while ((str_off = btf_field_iter_next(&it))) {
+ if (!*str_off)
+ continue;
+ if (*str_off >= r->dist_str_len) {
+ *str_off += r->base_str_len - r->dist_str_len;
+ } else {
+ off = r->str_map[*str_off];
+ if (!off) {
+ pr_warn("string '%s' [offset %u] is not mapped to base BTF",
+ btf__str_by_offset(r->btf, off), *str_off);
+ return -ENOENT;
+ }
+ *str_off = off;
+ }
+ }
+ return 0;
+}
+
+/* If successful, output of relocation is updated BTF with base BTF pointing
+ * at base_btf, and type ids, strings adjusted accordingly.
+ */
+int btf_relocate(struct btf *btf, const struct btf *base_btf, __u32 **id_map)
+{
+ unsigned int nr_types = btf__type_cnt(btf);
+ const struct btf_header *dist_base_hdr;
+ const struct btf_header *base_hdr;
+ struct btf_relocate r = {};
+ int err = 0;
+ __u32 id, i;
+
+ r.dist_base_btf = btf__base_btf(btf);
+ if (!base_btf || r.dist_base_btf == base_btf)
+ return -EINVAL;
+
+ r.nr_dist_base_types = btf__type_cnt(r.dist_base_btf);
+ r.nr_base_types = btf__type_cnt(base_btf);
+ r.nr_split_types = nr_types - r.nr_dist_base_types;
+ r.btf = btf;
+ r.base_btf = base_btf;
+
+ r.id_map = calloc(nr_types, sizeof(*r.id_map));
+ r.str_map = calloc(btf_header(r.dist_base_btf)->str_len, sizeof(*r.str_map));
+ dist_base_hdr = btf_header(r.dist_base_btf);
+ base_hdr = btf_header(r.base_btf);
+ r.dist_str_len = dist_base_hdr->str_len;
+ r.base_str_len = base_hdr->str_len;
+ if (!r.id_map || !r.str_map) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ err = btf_relocate_validate_distilled_base(&r);
+ if (err)
+ goto err_out;
+
+ /* Split BTF ids need to be adjusted as base and distilled base
+ * have different numbers of types, changing the start id of split
+ * BTF.
+ */
+ for (id = r.nr_dist_base_types; id < nr_types; id++)
+ r.id_map[id] = id + r.nr_base_types - r.nr_dist_base_types;
+
+ /* Build a map from distilled base ids to actual base BTF ids; it is used
+ * to update split BTF id references. Also build a str_map mapping from
+ * distilled base BTF names to base BTF names.
+ */
+ err = btf_relocate_map_distilled_base(&r);
+ if (err)
+ goto err_out;
+
+ /* Next, rewrite type ids in split BTF, replacing split ids with updated
+ * ids based on number of types in base BTF, and base ids with
+ * relocated ids from base_btf.
+ */
+ for (i = 0, id = r.nr_dist_base_types; i < r.nr_split_types; i++, id++) {
+ err = btf_relocate_rewrite_type_id(&r, id);
+ if (err)
+ goto err_out;
+ }
+ /* String offsets now need to be updated using the str_map. */
+ for (i = 0; i < r.nr_split_types; i++) {
+ err = btf_relocate_rewrite_strs(&r, i + r.nr_dist_base_types);
+ if (err)
+ goto err_out;
+ }
+ /* Finally reset base BTF to be base_btf */
+ btf_set_base_btf(btf, base_btf);
+
+ if (id_map) {
+ *id_map = r.id_map;
+ r.id_map = NULL;
+ }
+err_out:
+ free(r.id_map);
+ free(r.str_map);
+ return err;
+}
diff --git a/tools/lib/bpf/features.c b/tools/lib/bpf/features.c
index a336786a22a3..50befe125ddc 100644
--- a/tools/lib/bpf/features.c
+++ b/tools/lib/bpf/features.c
@@ -392,11 +392,41 @@ static int probe_uprobe_multi_link(int token_fd)
link_fd = bpf_link_create(prog_fd, -1, BPF_TRACE_UPROBE_MULTI, &link_opts);
err = -errno; /* close() can clobber errno */
+ if (link_fd >= 0 || err != -EBADF) {
+ if (link_fd >= 0)
+ close(link_fd);
+ close(prog_fd);
+ return 0;
+ }
+
+ /* Initial multi-uprobe support in kernel didn't handle PID filtering
+ * correctly (it was doing thread filtering, not process filtering).
+ * So now we'll detect if PID filtering logic was fixed, and, if not,
+ * we'll pretend multi-uprobes are not supported, if not.
+ * Multi-uprobes are used in USDT attachment logic, and we need to be
+ * conservative here, because multi-uprobe selection happens early at
+ * load time, while the use of PID filtering is known late at
+ * attachment time, at which point it's too late to undo multi-uprobe
+ * selection.
+ *
+ * Creating uprobe with pid == -1 for (invalid) '/' binary will fail
+ * early with -EINVAL on kernels with fixed PID filtering logic;
+ * otherwise -ESRCH would be returned if passed correct binary path
+ * (but we'll just get -BADF, of course).
+ */
+ link_opts.uprobe_multi.pid = -1; /* invalid PID */
+ link_opts.uprobe_multi.path = "/"; /* invalid path */
+ link_opts.uprobe_multi.offsets = &offset;
+ link_opts.uprobe_multi.cnt = 1;
+
+ link_fd = bpf_link_create(prog_fd, -1, BPF_TRACE_UPROBE_MULTI, &link_opts);
+ err = -errno; /* close() can clobber errno */
+
if (link_fd >= 0)
close(link_fd);
close(prog_fd);
- return link_fd < 0 && err == -EBADF;
+ return link_fd < 0 && err == -EINVAL;
}
static int probe_kern_bpf_cookie(int token_fd)
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 5401f2df463d..a3be6f8fac09 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -229,7 +229,30 @@ static const char * const prog_type_name[] = {
static int __base_pr(enum libbpf_print_level level, const char *format,
va_list args)
{
- if (level == LIBBPF_DEBUG)
+ const char *env_var = "LIBBPF_LOG_LEVEL";
+ static enum libbpf_print_level min_level = LIBBPF_INFO;
+ static bool initialized;
+
+ if (!initialized) {
+ char *verbosity;
+
+ initialized = true;
+ verbosity = getenv(env_var);
+ if (verbosity) {
+ if (strcasecmp(verbosity, "warn") == 0)
+ min_level = LIBBPF_WARN;
+ else if (strcasecmp(verbosity, "debug") == 0)
+ min_level = LIBBPF_DEBUG;
+ else if (strcasecmp(verbosity, "info") == 0)
+ min_level = LIBBPF_INFO;
+ else
+ fprintf(stderr, "libbpf: unrecognized '%s' envvar value: '%s', should be one of 'warn', 'debug', or 'info'.\n",
+ env_var, verbosity);
+ }
+ }
+
+ /* if too verbose, skip logging */
+ if (level > min_level)
return 0;
return vfprintf(stderr, format, args);
@@ -549,6 +572,7 @@ struct bpf_map {
bool pinned;
bool reused;
bool autocreate;
+ bool autoattach;
__u64 map_extra;
};
@@ -1377,6 +1401,7 @@ static int init_struct_ops_maps(struct bpf_object *obj, const char *sec_name,
map->def.value_size = type->size;
map->def.max_entries = 1;
map->def.map_flags = strcmp(sec_name, STRUCT_OPS_LINK_SEC) == 0 ? BPF_F_LINK : 0;
+ map->autoattach = true;
map->st_ops = calloc(1, sizeof(*map->st_ops));
if (!map->st_ops)
@@ -4796,6 +4821,20 @@ int bpf_map__set_autocreate(struct bpf_map *map, bool autocreate)
return 0;
}
+int bpf_map__set_autoattach(struct bpf_map *map, bool autoattach)
+{
+ if (!bpf_map__is_struct_ops(map))
+ return libbpf_err(-EINVAL);
+
+ map->autoattach = autoattach;
+ return 0;
+}
+
+bool bpf_map__autoattach(const struct bpf_map *map)
+{
+ return map->autoattach;
+}
+
int bpf_map__reuse_fd(struct bpf_map *map, int fd)
{
struct bpf_map_info info;
@@ -10336,7 +10375,7 @@ __bpf_map__iter(const struct bpf_map *m, const struct bpf_object *obj, int i)
struct bpf_map *
bpf_object__next_map(const struct bpf_object *obj, const struct bpf_map *prev)
{
- if (prev == NULL)
+ if (prev == NULL && obj != NULL)
return obj->maps;
return __bpf_map__iter(prev, obj, 1);
@@ -10345,7 +10384,7 @@ bpf_object__next_map(const struct bpf_object *obj, const struct bpf_map *prev)
struct bpf_map *
bpf_object__prev_map(const struct bpf_object *obj, const struct bpf_map *next)
{
- if (next == NULL) {
+ if (next == NULL && obj != NULL) {
if (!obj->nr_maps)
return NULL;
return obj->maps + obj->nr_maps - 1;
@@ -12877,8 +12916,10 @@ struct bpf_link *bpf_map__attach_struct_ops(const struct bpf_map *map)
__u32 zero = 0;
int err, fd;
- if (!bpf_map__is_struct_ops(map))
+ if (!bpf_map__is_struct_ops(map)) {
+ pr_warn("map '%s': can't attach non-struct_ops map\n", map->name);
return libbpf_err_ptr(-EINVAL);
+ }
if (map->fd < 0) {
pr_warn("map '%s': can't attach BPF map without FD (was it created?)\n", map->name);
@@ -13671,14 +13712,15 @@ int libbpf_num_possible_cpus(void)
static int populate_skeleton_maps(const struct bpf_object *obj,
struct bpf_map_skeleton *maps,
- size_t map_cnt)
+ size_t map_cnt, size_t map_skel_sz)
{
int i;
for (i = 0; i < map_cnt; i++) {
- struct bpf_map **map = maps[i].map;
- const char *name = maps[i].name;
- void **mmaped = maps[i].mmaped;
+ struct bpf_map_skeleton *map_skel = (void *)maps + i * map_skel_sz;
+ struct bpf_map **map = map_skel->map;
+ const char *name = map_skel->name;
+ void **mmaped = map_skel->mmaped;
*map = bpf_object__find_map_by_name(obj, name);
if (!*map) {
@@ -13695,13 +13737,14 @@ static int populate_skeleton_maps(const struct bpf_object *obj,
static int populate_skeleton_progs(const struct bpf_object *obj,
struct bpf_prog_skeleton *progs,
- size_t prog_cnt)
+ size_t prog_cnt, size_t prog_skel_sz)
{
int i;
for (i = 0; i < prog_cnt; i++) {
- struct bpf_program **prog = progs[i].prog;
- const char *name = progs[i].name;
+ struct bpf_prog_skeleton *prog_skel = (void *)progs + i * prog_skel_sz;
+ struct bpf_program **prog = prog_skel->prog;
+ const char *name = prog_skel->name;
*prog = bpf_object__find_program_by_name(obj, name);
if (!*prog) {
@@ -13742,13 +13785,13 @@ int bpf_object__open_skeleton(struct bpf_object_skeleton *s,
}
*s->obj = obj;
- err = populate_skeleton_maps(obj, s->maps, s->map_cnt);
+ err = populate_skeleton_maps(obj, s->maps, s->map_cnt, s->map_skel_sz);
if (err) {
pr_warn("failed to populate skeleton maps for '%s': %d\n", s->name, err);
return libbpf_err(err);
}
- err = populate_skeleton_progs(obj, s->progs, s->prog_cnt);
+ err = populate_skeleton_progs(obj, s->progs, s->prog_cnt, s->prog_skel_sz);
if (err) {
pr_warn("failed to populate skeleton progs for '%s': %d\n", s->name, err);
return libbpf_err(err);
@@ -13778,20 +13821,20 @@ int bpf_object__open_subskeleton(struct bpf_object_subskeleton *s)
return libbpf_err(-errno);
}
- err = populate_skeleton_maps(s->obj, s->maps, s->map_cnt);
+ err = populate_skeleton_maps(s->obj, s->maps, s->map_cnt, s->map_skel_sz);
if (err) {
pr_warn("failed to populate subskeleton maps: %d\n", err);
return libbpf_err(err);
}
- err = populate_skeleton_progs(s->obj, s->progs, s->prog_cnt);
+ err = populate_skeleton_progs(s->obj, s->progs, s->prog_cnt, s->prog_skel_sz);
if (err) {
pr_warn("failed to populate subskeleton maps: %d\n", err);
return libbpf_err(err);
}
for (var_idx = 0; var_idx < s->var_cnt; var_idx++) {
- var_skel = &s->vars[var_idx];
+ var_skel = (void *)s->vars + var_idx * s->var_skel_sz;
map = *var_skel->map;
map_type_id = bpf_map__btf_value_type_id(map);
map_type = btf__type_by_id(btf, map_type_id);
@@ -13838,10 +13881,11 @@ int bpf_object__load_skeleton(struct bpf_object_skeleton *s)
}
for (i = 0; i < s->map_cnt; i++) {
- struct bpf_map *map = *s->maps[i].map;
+ struct bpf_map_skeleton *map_skel = (void *)s->maps + i * s->map_skel_sz;
+ struct bpf_map *map = *map_skel->map;
size_t mmap_sz = bpf_map_mmap_sz(map);
int prot, map_fd = map->fd;
- void **mmaped = s->maps[i].mmaped;
+ void **mmaped = map_skel->mmaped;
if (!mmaped)
continue;
@@ -13889,8 +13933,9 @@ int bpf_object__attach_skeleton(struct bpf_object_skeleton *s)
int i, err;
for (i = 0; i < s->prog_cnt; i++) {
- struct bpf_program *prog = *s->progs[i].prog;
- struct bpf_link **link = s->progs[i].link;
+ struct bpf_prog_skeleton *prog_skel = (void *)s->progs + i * s->prog_skel_sz;
+ struct bpf_program *prog = *prog_skel->prog;
+ struct bpf_link **link = prog_skel->link;
if (!prog->autoload || !prog->autoattach)
continue;
@@ -13922,6 +13967,38 @@ int bpf_object__attach_skeleton(struct bpf_object_skeleton *s)
*/
}
+
+ for (i = 0; i < s->map_cnt; i++) {
+ struct bpf_map_skeleton *map_skel = (void *)s->maps + i * s->map_skel_sz;
+ struct bpf_map *map = *map_skel->map;
+ struct bpf_link **link;
+
+ if (!map->autocreate || !map->autoattach)
+ continue;
+
+ /* only struct_ops maps can be attached */
+ if (!bpf_map__is_struct_ops(map))
+ continue;
+
+ /* skeleton is created with earlier version of bpftool, notify user */
+ if (s->map_skel_sz < offsetofend(struct bpf_map_skeleton, link)) {
+ pr_warn("map '%s': BPF skeleton version is old, skipping map auto-attachment...\n",
+ bpf_map__name(map));
+ continue;
+ }
+
+ link = map_skel->link;
+ if (*link)
+ continue;
+
+ *link = bpf_map__attach_struct_ops(map);
+ if (!*link) {
+ err = -errno;
+ pr_warn("map '%s': failed to auto-attach: %d\n", bpf_map__name(map), err);
+ return libbpf_err(err);
+ }
+ }
+
return 0;
}
@@ -13930,11 +14007,25 @@ void bpf_object__detach_skeleton(struct bpf_object_skeleton *s)
int i;
for (i = 0; i < s->prog_cnt; i++) {
- struct bpf_link **link = s->progs[i].link;
+ struct bpf_prog_skeleton *prog_skel = (void *)s->progs + i * s->prog_skel_sz;
+ struct bpf_link **link = prog_skel->link;
bpf_link__destroy(*link);
*link = NULL;
}
+
+ if (s->map_skel_sz < sizeof(struct bpf_map_skeleton))
+ return;
+
+ for (i = 0; i < s->map_cnt; i++) {
+ struct bpf_map_skeleton *map_skel = (void *)s->maps + i * s->map_skel_sz;
+ struct bpf_link **link = map_skel->link;
+
+ if (link) {
+ bpf_link__destroy(*link);
+ *link = NULL;
+ }
+ }
}
void bpf_object__destroy_skeleton(struct bpf_object_skeleton *s)
@@ -13942,8 +14033,7 @@ void bpf_object__destroy_skeleton(struct bpf_object_skeleton *s)
if (!s)
return;
- if (s->progs)
- bpf_object__detach_skeleton(s);
+ bpf_object__detach_skeleton(s);
if (s->obj)
bpf_object__close(*s->obj);
free(s->maps);
diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
index c3f77d9260fe..64a6a3d323e3 100644
--- a/tools/lib/bpf/libbpf.h
+++ b/tools/lib/bpf/libbpf.h
@@ -98,7 +98,10 @@ typedef int (*libbpf_print_fn_t)(enum libbpf_print_level level,
/**
* @brief **libbpf_set_print()** sets user-provided log callback function to
- * be used for libbpf warnings and informational messages.
+ * be used for libbpf warnings and informational messages. If the user callback
+ * is not set, messages are logged to stderr by default. The verbosity of these
+ * messages can be controlled by setting the environment variable
+ * LIBBPF_LOG_LEVEL to either warn, info, or debug.
* @param fn The log print function. If NULL, libbpf won't print anything.
* @return Pointer to old print function.
*
@@ -976,6 +979,23 @@ LIBBPF_API int bpf_map__set_autocreate(struct bpf_map *map, bool autocreate);
LIBBPF_API bool bpf_map__autocreate(const struct bpf_map *map);
/**
+ * @brief **bpf_map__set_autoattach()** sets whether libbpf has to auto-attach
+ * map during BPF skeleton attach phase.
+ * @param map the BPF map instance
+ * @param autoattach whether to attach map during BPF skeleton attach phase
+ * @return 0 on success; negative error code, otherwise
+ */
+LIBBPF_API int bpf_map__set_autoattach(struct bpf_map *map, bool autoattach);
+
+/**
+ * @brief **bpf_map__autoattach()** returns whether BPF map is configured to
+ * auto-attach during BPF skeleton attach phase.
+ * @param map the BPF map instance
+ * @return true if map is set to auto-attach during skeleton attach phase; false, otherwise
+ */
+LIBBPF_API bool bpf_map__autoattach(const struct bpf_map *map);
+
+/**
* @brief **bpf_map__fd()** gets the file descriptor of the passed
* BPF map
* @param map the BPF map instance
@@ -1669,6 +1689,7 @@ struct bpf_map_skeleton {
const char *name;
struct bpf_map **map;
void **mmaped;
+ struct bpf_link **link;
};
struct bpf_prog_skeleton {
diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map
index c1ce8aa3520b..8f0d9ea3b1b4 100644
--- a/tools/lib/bpf/libbpf.map
+++ b/tools/lib/bpf/libbpf.map
@@ -419,6 +419,10 @@ LIBBPF_1.4.0 {
LIBBPF_1.5.0 {
global:
+ btf__distill_base;
+ btf__relocate;
+ bpf_map__autoattach;
+ bpf_map__set_autoattach;
bpf_program__attach_sockmap;
ring__consume_n;
ring_buffer__consume_n;
diff --git a/tools/lib/bpf/libbpf_internal.h b/tools/lib/bpf/libbpf_internal.h
index a0dcfb82e455..408df59e0771 100644
--- a/tools/lib/bpf/libbpf_internal.h
+++ b/tools/lib/bpf/libbpf_internal.h
@@ -234,6 +234,9 @@ struct btf_type;
struct btf_type *btf_type_by_id(const struct btf *btf, __u32 type_id);
const char *btf_kind_str(const struct btf_type *t);
const struct btf_type *skip_mods_and_typedefs(const struct btf *btf, __u32 id, __u32 *res_id);
+const struct btf_header *btf_header(const struct btf *btf);
+void btf_set_base_btf(struct btf *btf, const struct btf *base_btf);
+int btf_relocate(struct btf *btf, const struct btf *base_btf, __u32 **id_map);
static inline enum btf_func_linkage btf_func_linkage(const struct btf_type *t)
{
@@ -508,11 +511,33 @@ struct bpf_line_info_min {
__u32 line_col;
};
+enum btf_field_iter_kind {
+ BTF_FIELD_ITER_IDS,
+ BTF_FIELD_ITER_STRS,
+};
+
+struct btf_field_desc {
+ /* once-per-type offsets */
+ int t_off_cnt, t_offs[2];
+ /* member struct size, or zero, if no members */
+ int m_sz;
+ /* repeated per-member offsets */
+ int m_off_cnt, m_offs[1];
+};
+
+struct btf_field_iter {
+ struct btf_field_desc desc;
+ void *p;
+ int m_idx;
+ int off_idx;
+ int vlen;
+};
+
+int btf_field_iter_init(struct btf_field_iter *it, struct btf_type *t, enum btf_field_iter_kind iter_kind);
+__u32 *btf_field_iter_next(struct btf_field_iter *it);
typedef int (*type_id_visit_fn)(__u32 *type_id, void *ctx);
typedef int (*str_off_visit_fn)(__u32 *str_off, void *ctx);
-int btf_type_visit_type_ids(struct btf_type *t, type_id_visit_fn visit, void *ctx);
-int btf_type_visit_str_offs(struct btf_type *t, str_off_visit_fn visit, void *ctx);
int btf_ext_visit_type_ids(struct btf_ext *btf_ext, type_id_visit_fn visit, void *ctx);
int btf_ext_visit_str_offs(struct btf_ext *btf_ext, str_off_visit_fn visit, void *ctx);
__s32 btf__find_by_name_kind_own(const struct btf *btf, const char *type_name,
@@ -597,13 +622,9 @@ static inline int ensure_good_fd(int fd)
return fd;
}
-static inline int sys_dup2(int oldfd, int newfd)
+static inline int sys_dup3(int oldfd, int newfd, int flags)
{
-#ifdef __NR_dup2
- return syscall(__NR_dup2, oldfd, newfd);
-#else
- return syscall(__NR_dup3, oldfd, newfd, 0);
-#endif
+ return syscall(__NR_dup3, oldfd, newfd, flags);
}
/* Point *fixed_fd* to the same file that *tmp_fd* points to.
@@ -614,7 +635,7 @@ static inline int reuse_fd(int fixed_fd, int tmp_fd)
{
int err;
- err = sys_dup2(tmp_fd, fixed_fd);
+ err = sys_dup3(tmp_fd, fixed_fd, O_CLOEXEC);
err = err < 0 ? -errno : 0;
close(tmp_fd); /* clean up temporary FD */
return err;
diff --git a/tools/lib/bpf/linker.c b/tools/lib/bpf/linker.c
index 0d4be829551b..9cd3d4109788 100644
--- a/tools/lib/bpf/linker.c
+++ b/tools/lib/bpf/linker.c
@@ -957,19 +957,33 @@ static int check_btf_str_off(__u32 *str_off, void *ctx)
static int linker_sanity_check_btf(struct src_obj *obj)
{
struct btf_type *t;
- int i, n, err = 0;
+ int i, n, err;
if (!obj->btf)
return 0;
n = btf__type_cnt(obj->btf);
for (i = 1; i < n; i++) {
+ struct btf_field_iter it;
+ __u32 *type_id, *str_off;
+
t = btf_type_by_id(obj->btf, i);
- err = err ?: btf_type_visit_type_ids(t, check_btf_type_id, obj->btf);
- err = err ?: btf_type_visit_str_offs(t, check_btf_str_off, obj->btf);
+ err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_IDS);
+ if (err)
+ return err;
+ while ((type_id = btf_field_iter_next(&it))) {
+ if (*type_id >= n)
+ return -EINVAL;
+ }
+
+ err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_STRS);
if (err)
return err;
+ while ((str_off = btf_field_iter_next(&it))) {
+ if (!btf__str_by_offset(obj->btf, *str_off))
+ return -EINVAL;
+ }
}
return 0;
@@ -2213,10 +2227,17 @@ static int linker_fixup_btf(struct src_obj *obj)
vi = btf_var_secinfos(t);
for (j = 0, m = btf_vlen(t); j < m; j++, vi++) {
const struct btf_type *vt = btf__type_by_id(obj->btf, vi->type);
- const char *var_name = btf__str_by_offset(obj->btf, vt->name_off);
- int var_linkage = btf_var(vt)->linkage;
+ const char *var_name;
+ int var_linkage;
Elf64_Sym *sym;
+ /* could be a variable or function */
+ if (!btf_is_var(vt))
+ continue;
+
+ var_name = btf__str_by_offset(obj->btf, vt->name_off);
+ var_linkage = btf_var(vt)->linkage;
+
/* no need to patch up static or extern vars */
if (var_linkage != BTF_VAR_GLOBAL_ALLOCATED)
continue;
@@ -2234,26 +2255,10 @@ static int linker_fixup_btf(struct src_obj *obj)
return 0;
}
-static int remap_type_id(__u32 *type_id, void *ctx)
-{
- int *id_map = ctx;
- int new_id = id_map[*type_id];
-
- /* Error out if the type wasn't remapped. Ignore VOID which stays VOID. */
- if (new_id == 0 && *type_id != 0) {
- pr_warn("failed to find new ID mapping for original BTF type ID %u\n", *type_id);
- return -EINVAL;
- }
-
- *type_id = id_map[*type_id];
-
- return 0;
-}
-
static int linker_append_btf(struct bpf_linker *linker, struct src_obj *obj)
{
const struct btf_type *t;
- int i, j, n, start_id, id;
+ int i, j, n, start_id, id, err;
const char *name;
if (!obj->btf)
@@ -2324,9 +2329,25 @@ static int linker_append_btf(struct bpf_linker *linker, struct src_obj *obj)
n = btf__type_cnt(linker->btf);
for (i = start_id; i < n; i++) {
struct btf_type *dst_t = btf_type_by_id(linker->btf, i);
+ struct btf_field_iter it;
+ __u32 *type_id;
- if (btf_type_visit_type_ids(dst_t, remap_type_id, obj->btf_type_map))
- return -EINVAL;
+ err = btf_field_iter_init(&it, dst_t, BTF_FIELD_ITER_IDS);
+ if (err)
+ return err;
+
+ while ((type_id = btf_field_iter_next(&it))) {
+ int new_id = obj->btf_type_map[*type_id];
+
+ /* Error out if the type wasn't remapped. Ignore VOID which stays VOID. */
+ if (new_id == 0 && *type_id != 0) {
+ pr_warn("failed to find new ID mapping for original BTF type ID %u\n",
+ *type_id);
+ return -EINVAL;
+ }
+
+ *type_id = obj->btf_type_map[*type_id];
+ }
}
/* Rewrite VAR/FUNC underlying types (i.e., FUNC's FUNC_PROTO and VAR's
diff --git a/tools/memory-model/Documentation/README b/tools/memory-model/Documentation/README
index db90a26dbdf4..304162743a5b 100644
--- a/tools/memory-model/Documentation/README
+++ b/tools/memory-model/Documentation/README
@@ -47,6 +47,10 @@ DESCRIPTION OF FILES
README
This file.
+access-marking.txt
+ Guidelines for marking intentionally concurrent accesses to
+ shared memory.
+
cheatsheet.txt
Quick-reference guide to the Linux-kernel memory model.
diff --git a/tools/memory-model/Documentation/access-marking.txt b/tools/memory-model/Documentation/access-marking.txt
index 65778222183e..3fbe77fd564a 100644
--- a/tools/memory-model/Documentation/access-marking.txt
+++ b/tools/memory-model/Documentation/access-marking.txt
@@ -6,7 +6,8 @@ normal accesses to shared memory, that is "normal" as in accesses that do
not use read-modify-write atomic operations. It also describes how to
document these accesses, both with comments and with special assertions
processed by the Kernel Concurrency Sanitizer (KCSAN). This discussion
-builds on an earlier LWN article [1].
+builds on an earlier LWN article [1] and Linux Foundation mentorship
+session [2].
ACCESS-MARKING OPTIONS
@@ -24,6 +25,11 @@ The Linux kernel provides the following access-marking options:
4. WRITE_ONCE(), for example, "WRITE_ONCE(a, b);"
The various forms of atomic_set() also fit in here.
+5. __data_racy, for example "int __data_racy a;"
+
+6. KCSAN's negative-marking assertions, ASSERT_EXCLUSIVE_ACCESS()
+ and ASSERT_EXCLUSIVE_WRITER(), are described in the
+ "ACCESS-DOCUMENTATION OPTIONS" section below.
These may be used in combination, as shown in this admittedly improbable
example:
@@ -31,7 +37,7 @@ example:
WRITE_ONCE(a, b + data_race(c + d) + READ_ONCE(e));
Neither plain C-language accesses nor data_race() (#1 and #2 above) place
-any sort of constraint on the compiler's choice of optimizations [2].
+any sort of constraint on the compiler's choice of optimizations [3].
In contrast, READ_ONCE() and WRITE_ONCE() (#3 and #4 above) restrict the
compiler's use of code-motion and common-subexpression optimizations.
Therefore, if a given access is involved in an intentional data race,
@@ -205,6 +211,23 @@ because doing otherwise prevents KCSAN from detecting violations of your
code's synchronization rules.
+Use of __data_racy
+------------------
+
+Adding the __data_racy type qualifier to the declaration of a variable
+causes KCSAN to treat all accesses to that variable as if they were
+enclosed by data_race(). However, __data_racy does not affect the
+compiler, though one could imagine hardened kernel builds treating the
+__data_racy type qualifier as if it was the volatile keyword.
+
+Note well that __data_racy is subject to the same pointer-declaration
+rules as are other type qualifiers such as const and volatile.
+For example:
+
+ int __data_racy *p; // Pointer to data-racy data.
+ int *__data_racy p; // Data-racy pointer to non-data-racy data.
+
+
ACCESS-DOCUMENTATION OPTIONS
============================
@@ -342,7 +365,7 @@ as follows:
Because foo is read locklessly, all accesses are marked. The purpose
of the ASSERT_EXCLUSIVE_WRITER() is to allow KCSAN to check for a buggy
-concurrent lockless write.
+concurrent write, whether marked or not.
Lock-Protected Writes With Heuristic Lockless Reads
@@ -594,5 +617,8 @@ REFERENCES
[1] "Concurrency bugs should fear the big bad data-race detector (part 2)"
https://lwn.net/Articles/816854/
-[2] "Who's afraid of a big bad optimizing compiler?"
+[2] "The Kernel Concurrency Sanitizer"
+ https://www.linuxfoundation.org/webinars/the-kernel-concurrency-sanitizer
+
+[3] "Who's afraid of a big bad optimizing compiler?"
https://lwn.net/Articles/793253/
diff --git a/tools/memory-model/lock.cat b/tools/memory-model/lock.cat
index 53b5a492739d..03c12efed66a 100644
--- a/tools/memory-model/lock.cat
+++ b/tools/memory-model/lock.cat
@@ -54,6 +54,12 @@ flag ~empty LKR \ domain(lk-rmw) as unpaired-LKR
*)
empty ([LKW] ; po-loc ; [LKR]) \ (po-loc ; [UL] ; po-loc) as lock-nest
+(*
+ * In the same way, spin_is_locked() inside a critical section must always
+ * return True (no RU events can be in a critical section for the same lock).
+ *)
+empty ([LKW] ; po-loc ; [RU]) \ (po-loc ; [UL] ; po-loc) as nested-is-locked
+
(* The final value of a spinlock should not be tested *)
flag ~empty [FW] ; loc ; [ALL-LOCKS] as lock-final
@@ -79,42 +85,50 @@ empty ([UNMATCHED-LKW] ; loc ; [UNMATCHED-LKW]) \ id as unmatched-locks
(* rfi for LF events: link each LKW to the LF events in its critical section *)
let rfi-lf = ([LKW] ; po-loc ; [LF]) \ ([LKW] ; po-loc ; [UL] ; po-loc)
-(* rfe for LF events *)
+(* Utility macro to convert a single pair to a single-edge relation *)
+let pair-to-relation p = p ++ 0
+
+(*
+ * If a given LF event e is outside a critical section, it cannot read
+ * internally but it may read from an LKW event in another thread.
+ * Compute the relation containing these possible edges.
+ *)
+let possible-rfe-noncrit-lf e = (LKW * {e}) & loc & ext
+
+(* Compute set of sets of possible rfe edges for LF events *)
let all-possible-rfe-lf =
(*
- * Given an LF event r, compute the possible rfe edges for that event
- * (all those starting from LKW events in other threads),
- * and then convert that relation to a set of single-edge relations.
+ * Convert the possible-rfe-noncrit-lf relation for e
+ * to a set of single edges
*)
- let possible-rfe-lf r =
- let pair-to-relation p = p ++ 0
- in map pair-to-relation ((LKW * {r}) & loc & ext)
- (* Do this for each LF event r that isn't in rfi-lf *)
- in map possible-rfe-lf (LF \ range(rfi-lf))
+ let set-of-singleton-rfe-lf e =
+ map pair-to-relation (possible-rfe-noncrit-lf e)
+ (* Do this for each LF event e that isn't in rfi-lf *)
+ in map set-of-singleton-rfe-lf (LF \ range(rfi-lf))
(* Generate all rf relations for LF events *)
with rfe-lf from cross(all-possible-rfe-lf)
let rf-lf = rfe-lf | rfi-lf
(*
- * RU, i.e., spin_is_locked() returning False, is slightly different.
- * We rely on the memory model to rule out cases where spin_is_locked()
- * within one of the lock's critical sections returns False.
+ * A given RU event e may read internally from the last po-previous UL,
+ * or it may read from a UL event in another thread or the initial write.
+ * Compute the relation containing these possible edges.
*)
-
-(* rfi for RU events: an RU may read from the last po-previous UL *)
-let rfi-ru = ([UL] ; po-loc ; [RU]) \ ([UL] ; po-loc ; [LKW] ; po-loc)
-
-(* rfe for RU events: an RU may read from an external UL or the initial write *)
-let all-possible-rfe-ru =
- let possible-rfe-ru r =
- let pair-to-relation p = p ++ 0
- in map pair-to-relation (((UL | IW) * {r}) & loc & ext)
- in map possible-rfe-ru RU
+let possible-rf-ru e = (((UL * {e}) & po-loc) \
+ ([UL] ; po-loc ; [UL] ; po-loc)) |
+ (((UL | IW) * {e}) & loc & ext)
+
+(* Compute set of sets of possible rf edges for RU events *)
+let all-possible-rf-ru =
+ (* Convert the possible-rf-ru relation for e to a set of single edges *)
+ let set-of-singleton-rf-ru e =
+ map pair-to-relation (possible-rf-ru e)
+ (* Do this for each RU event e *)
+ in map set-of-singleton-rf-ru RU
(* Generate all rf relations for RU events *)
-with rfe-ru from cross(all-possible-rfe-ru)
-let rf-ru = rfe-ru | rfi-ru
+with rf-ru from cross(all-possible-rf-ru)
(* Final rf relation *)
let rf = rf | rf-lf | rf-ru
diff --git a/tools/net/ynl/Makefile b/tools/net/ynl/Makefile
index 8e9e09d84e26..d1cdf2a8f826 100644
--- a/tools/net/ynl/Makefile
+++ b/tools/net/ynl/Makefile
@@ -2,9 +2,12 @@
SUBDIRS = lib generated samples
-all: $(SUBDIRS)
+all: $(SUBDIRS) libynl.a
samples: | lib generated
+libynl.a: | lib generated
+ @echo -e "\tAR $@"
+ @ar rcs $@ lib/ynl.o generated/*-user.o
$(SUBDIRS):
@if [ -f "$@/Makefile" ] ; then \
@@ -17,5 +20,6 @@ clean distclean:
$(MAKE) -C $$dir $@; \
fi \
done
+ rm -f libynl.a
.PHONY: all clean distclean $(SUBDIRS)
diff --git a/tools/net/ynl/Makefile.deps b/tools/net/ynl/Makefile.deps
index f4e8eb79c1b8..0712b5e82eb7 100644
--- a/tools/net/ynl/Makefile.deps
+++ b/tools/net/ynl/Makefile.deps
@@ -16,7 +16,8 @@ get_hdr_inc=-D$(1) -include $(UAPI_PATH)/linux/$(2)
CFLAGS_devlink:=$(call get_hdr_inc,_LINUX_DEVLINK_H_,devlink.h)
CFLAGS_dpll:=$(call get_hdr_inc,_LINUX_DPLL_H,dpll.h)
-CFLAGS_ethtool:=$(call get_hdr_inc,_LINUX_ETHTOOL_NETLINK_H_,ethtool_netlink.h)
+CFLAGS_ethtool:=$(call get_hdr_inc,_LINUX_ETHTOOL_H,ethtool.h) \
+ $(call get_hdr_inc,_LINUX_ETHTOOL_NETLINK_H_,ethtool_netlink.h)
CFLAGS_handshake:=$(call get_hdr_inc,_LINUX_HANDSHAKE_H,handshake.h)
CFLAGS_mptcp_pm:=$(call get_hdr_inc,_LINUX_MPTCP_PM_H,mptcp_pm.h)
CFLAGS_netdev:=$(call get_hdr_inc,_LINUX_NETDEV_H,netdev.h)
@@ -25,3 +26,4 @@ CFLAGS_nfsd:=$(call get_hdr_inc,_LINUX_NFSD_NETLINK_H,nfsd_netlink.h)
CFLAGS_ovs_datapath:=$(call get_hdr_inc,__LINUX_OPENVSWITCH_H,openvswitch.h)
CFLAGS_ovs_flow:=$(call get_hdr_inc,__LINUX_OPENVSWITCH_H,openvswitch.h)
CFLAGS_ovs_vport:=$(call get_hdr_inc,__LINUX_OPENVSWITCH_H,openvswitch.h)
+CFLAGS_tcp_metrics:=$(call get_hdr_inc,_LINUX_TCP_METRICS_H,tcp_metrics.h)
diff --git a/tools/net/ynl/lib/Makefile b/tools/net/ynl/lib/Makefile
index dfff3ecd1cba..2887cc5de530 100644
--- a/tools/net/ynl/lib/Makefile
+++ b/tools/net/ynl/lib/Makefile
@@ -14,7 +14,9 @@ include $(wildcard *.d)
all: ynl.a
ynl.a: $(OBJS)
- ar rcs $@ $(OBJS)
+ @echo -e "\tAR $@"
+ @ar rcs $@ $(OBJS)
+
clean:
rm -f *.o *.d *~
rm -rf __pycache__
diff --git a/tools/net/ynl/lib/ynl-priv.h b/tools/net/ynl/lib/ynl-priv.h
index 6cf890080dc0..3c09a7bbfba5 100644
--- a/tools/net/ynl/lib/ynl-priv.h
+++ b/tools/net/ynl/lib/ynl-priv.h
@@ -45,17 +45,17 @@ struct ynl_policy_attr {
enum ynl_policy_type type;
unsigned int len;
const char *name;
- struct ynl_policy_nest *nest;
+ const struct ynl_policy_nest *nest;
};
struct ynl_policy_nest {
unsigned int max_attr;
- struct ynl_policy_attr *table;
+ const struct ynl_policy_attr *table;
};
struct ynl_parse_arg {
struct ynl_sock *ys;
- struct ynl_policy_nest *rsp_policy;
+ const struct ynl_policy_nest *rsp_policy;
void *data;
};
@@ -79,7 +79,7 @@ static inline void *ynl_dump_obj_next(void *obj)
struct ynl_dump_list_type *list;
uptr -= offsetof(struct ynl_dump_list_type, data);
- list = (void *)uptr;
+ list = (struct ynl_dump_list_type *)uptr;
uptr = (unsigned long)list->next;
uptr += offsetof(struct ynl_dump_list_type, data);
@@ -119,7 +119,7 @@ struct ynl_dump_state {
};
struct ynl_ntf_info {
- struct ynl_policy_nest *policy;
+ const struct ynl_policy_nest *policy;
ynl_parse_cb_t cb;
size_t alloc_sz;
void (*free)(struct ynl_ntf_base_type *ntf);
@@ -139,7 +139,7 @@ int ynl_error_parse(struct ynl_parse_arg *yarg, const char *msg);
static inline struct nlmsghdr *ynl_nlmsg_put_header(void *buf)
{
- struct nlmsghdr *nlh = buf;
+ struct nlmsghdr *nlh = (struct nlmsghdr *)buf;
memset(nlh, 0, sizeof(*nlh));
nlh->nlmsg_len = NLMSG_HDRLEN;
@@ -196,7 +196,7 @@ static inline void *ynl_attr_data(const struct nlattr *attr)
static inline void *ynl_attr_data_end(const struct nlattr *attr)
{
- return ynl_attr_data(attr) + ynl_attr_data_len(attr);
+ return (char *)ynl_attr_data(attr) + ynl_attr_data_len(attr);
}
#define ynl_attr_for_each(attr, nlh, fixed_hdr_sz) \
@@ -228,7 +228,7 @@ ynl_attr_next(const void *end, const struct nlattr *prev)
{
struct nlattr *attr;
- attr = (void *)((char *)prev + NLA_ALIGN(prev->nla_len));
+ attr = (struct nlattr *)((char *)prev + NLA_ALIGN(prev->nla_len));
return ynl_attr_if_good(end, attr);
}
@@ -237,8 +237,8 @@ ynl_attr_first(const void *start, size_t len, size_t skip)
{
struct nlattr *attr;
- attr = (void *)((char *)start + NLMSG_ALIGN(skip));
- return ynl_attr_if_good(start + len, attr);
+ attr = (struct nlattr *)((char *)start + NLMSG_ALIGN(skip));
+ return ynl_attr_if_good((char *)start + len, attr);
}
static inline bool
@@ -262,9 +262,9 @@ ynl_attr_nest_start(struct nlmsghdr *nlh, unsigned int attr_type)
struct nlattr *attr;
if (__ynl_attr_put_overflow(nlh, 0))
- return ynl_nlmsg_end_addr(nlh) - NLA_HDRLEN;
+ return (struct nlattr *)ynl_nlmsg_end_addr(nlh) - 1;
- attr = ynl_nlmsg_end_addr(nlh);
+ attr = (struct nlattr *)ynl_nlmsg_end_addr(nlh);
attr->nla_type = attr_type | NLA_F_NESTED;
nlh->nlmsg_len += NLA_HDRLEN;
@@ -286,7 +286,7 @@ ynl_attr_put(struct nlmsghdr *nlh, unsigned int attr_type,
if (__ynl_attr_put_overflow(nlh, size))
return;
- attr = ynl_nlmsg_end_addr(nlh);
+ attr = (struct nlattr *)ynl_nlmsg_end_addr(nlh);
attr->nla_type = attr_type;
attr->nla_len = NLA_HDRLEN + size;
@@ -305,10 +305,10 @@ ynl_attr_put_str(struct nlmsghdr *nlh, unsigned int attr_type, const char *str)
if (__ynl_attr_put_overflow(nlh, len))
return;
- attr = ynl_nlmsg_end_addr(nlh);
+ attr = (struct nlattr *)ynl_nlmsg_end_addr(nlh);
attr->nla_type = attr_type;
- strcpy(ynl_attr_data(attr), str);
+ strcpy((char *)ynl_attr_data(attr), str);
attr->nla_len = NLA_HDRLEN + NLA_ALIGN(len);
nlh->nlmsg_len += NLMSG_ALIGN(attr->nla_len);
diff --git a/tools/net/ynl/lib/ynl.c b/tools/net/ynl/lib/ynl.c
index 4b9c091fc86b..fcb18a5a6d70 100644
--- a/tools/net/ynl/lib/ynl.c
+++ b/tools/net/ynl/lib/ynl.c
@@ -46,7 +46,7 @@
/* -- Netlink boiler plate */
static int
-ynl_err_walk_report_one(struct ynl_policy_nest *policy, unsigned int type,
+ynl_err_walk_report_one(const struct ynl_policy_nest *policy, unsigned int type,
char *str, int str_sz, int *n)
{
if (!policy) {
@@ -75,8 +75,8 @@ ynl_err_walk_report_one(struct ynl_policy_nest *policy, unsigned int type,
static int
ynl_err_walk(struct ynl_sock *ys, void *start, void *end, unsigned int off,
- struct ynl_policy_nest *policy, char *str, int str_sz,
- struct ynl_policy_nest **nest_pol)
+ const struct ynl_policy_nest *policy, char *str, int str_sz,
+ const struct ynl_policy_nest **nest_pol)
{
unsigned int astart_off, aend_off;
const struct nlattr *attr;
@@ -206,7 +206,7 @@ ynl_ext_ack_check(struct ynl_sock *ys, const struct nlmsghdr *nlh,
bad_attr[n] = '\0';
}
if (tb[NLMSGERR_ATTR_MISS_TYPE]) {
- struct ynl_policy_nest *nest_pol = NULL;
+ const struct ynl_policy_nest *nest_pol = NULL;
unsigned int n, off, type;
void *start, *end;
int n2;
@@ -296,7 +296,7 @@ static int ynl_cb_done(const struct nlmsghdr *nlh, struct ynl_parse_arg *yarg)
int ynl_attr_validate(struct ynl_parse_arg *yarg, const struct nlattr *attr)
{
- struct ynl_policy_attr *policy;
+ const struct ynl_policy_attr *policy;
unsigned int type, len;
unsigned char *data;
diff --git a/tools/net/ynl/lib/ynl.h b/tools/net/ynl/lib/ynl.h
index eef7c6324ed4..6cd570b283ea 100644
--- a/tools/net/ynl/lib/ynl.h
+++ b/tools/net/ynl/lib/ynl.h
@@ -76,7 +76,7 @@ struct ynl_sock {
struct ynl_ntf_base_type **ntf_last_next;
struct nlmsghdr *nlh;
- struct ynl_policy_nest *req_policy;
+ const struct ynl_policy_nest *req_policy;
unsigned char *tx_buf;
unsigned char *rx_buf;
unsigned char raw_buf[];
diff --git a/tools/net/ynl/lib/ynl.py b/tools/net/ynl/lib/ynl.py
index 35e666928119..d42c1d605969 100644
--- a/tools/net/ynl/lib/ynl.py
+++ b/tools/net/ynl/lib/ynl.py
@@ -743,6 +743,8 @@ class YnlFamily(SpecFamily):
decoded = attr.as_scalar(attr_spec['type'], attr_spec.byte_order)
if 'enum' in attr_spec:
decoded = self._decode_enum(decoded, attr_spec)
+ elif attr_spec.display_hint:
+ decoded = self._formatted_string(decoded, attr_spec.display_hint)
elif attr_spec["type"] == 'indexed-array':
decoded = self._decode_array_attr(attr, attr_spec)
elif attr_spec["type"] == 'bitfield32':
diff --git a/tools/net/ynl/ynl-gen-c.py b/tools/net/ynl/ynl-gen-c.py
index a42d62b23ee0..51529fabd517 100755
--- a/tools/net/ynl/ynl-gen-c.py
+++ b/tools/net/ynl/ynl-gen-c.py
@@ -59,9 +59,9 @@ class Type(SpecAttr):
if 'nested-attributes' in attr:
self.nested_attrs = attr['nested-attributes']
if self.nested_attrs == family.name:
- self.nested_render_name = c_lower(f"{family.name}")
+ self.nested_render_name = c_lower(f"{family.ident_name}")
else:
- self.nested_render_name = c_lower(f"{family.name}_{self.nested_attrs}")
+ self.nested_render_name = c_lower(f"{family.ident_name}_{self.nested_attrs}")
if self.nested_attrs in self.family.consts:
self.nested_struct_type = 'struct ' + self.nested_render_name + '_'
@@ -693,9 +693,9 @@ class Struct:
self.nested = type_list is None
if family.name == c_lower(space_name):
- self.render_name = c_lower(family.name)
+ self.render_name = c_lower(family.ident_name)
else:
- self.render_name = c_lower(family.name + '-' + space_name)
+ self.render_name = c_lower(family.ident_name + '-' + space_name)
self.struct_name = 'struct ' + self.render_name
if self.nested and space_name in family.consts:
self.struct_name += '_'
@@ -761,7 +761,7 @@ class EnumEntry(SpecEnumEntry):
class EnumSet(SpecEnumSet):
def __init__(self, family, yaml):
- self.render_name = c_lower(family.name + '-' + yaml['name'])
+ self.render_name = c_lower(family.ident_name + '-' + yaml['name'])
if 'enum-name' in yaml:
if yaml['enum-name']:
@@ -777,7 +777,7 @@ class EnumSet(SpecEnumSet):
else:
self.user_type = 'int'
- self.value_pfx = yaml.get('name-prefix', f"{family.name}-{yaml['name']}-")
+ self.value_pfx = yaml.get('name-prefix', f"{family.ident_name}-{yaml['name']}-")
super().__init__(family, yaml)
@@ -802,9 +802,9 @@ class AttrSet(SpecAttrSet):
if 'name-prefix' in yaml:
pfx = yaml['name-prefix']
elif self.name == family.name:
- pfx = family.name + '-a-'
+ pfx = family.ident_name + '-a-'
else:
- pfx = f"{family.name}-a-{self.name}-"
+ pfx = f"{family.ident_name}-a-{self.name}-"
self.name_prefix = c_upper(pfx)
self.max_name = c_upper(self.yaml.get('attr-max-name', f"{self.name_prefix}max"))
self.cnt_name = c_upper(self.yaml.get('attr-cnt-name', f"__{self.name_prefix}max"))
@@ -861,7 +861,7 @@ class Operation(SpecOperation):
def __init__(self, family, yaml, req_value, rsp_value):
super().__init__(family, yaml, req_value, rsp_value)
- self.render_name = c_lower(family.name + '_' + self.name)
+ self.render_name = c_lower(family.ident_name + '_' + self.name)
self.dual_policy = ('do' in yaml and 'request' in yaml['do']) and \
('dump' in yaml and 'request' in yaml['dump'])
@@ -911,11 +911,11 @@ class Family(SpecFamily):
if 'uapi-header' in self.yaml:
self.uapi_header = self.yaml['uapi-header']
else:
- self.uapi_header = f"linux/{self.name}.h"
+ self.uapi_header = f"linux/{self.ident_name}.h"
if self.uapi_header.startswith("linux/") and self.uapi_header.endswith('.h'):
self.uapi_header_name = self.uapi_header[6:-2]
else:
- self.uapi_header_name = self.name
+ self.uapi_header_name = self.ident_name
def resolve(self):
self.resolve_up(super())
@@ -923,7 +923,7 @@ class Family(SpecFamily):
if self.yaml.get('protocol', 'genetlink') not in {'genetlink', 'genetlink-c', 'genetlink-legacy'}:
raise Exception("Codegen only supported for genetlink")
- self.c_name = c_lower(self.name)
+ self.c_name = c_lower(self.ident_name)
if 'name-prefix' in self.yaml['operations']:
self.op_prefix = c_upper(self.yaml['operations']['name-prefix'])
else:
@@ -1507,12 +1507,12 @@ def print_dump_prototype(ri):
def put_typol_fwd(cw, struct):
- cw.p(f'extern struct ynl_policy_nest {struct.render_name}_nest;')
+ cw.p(f'extern const struct ynl_policy_nest {struct.render_name}_nest;')
def put_typol(cw, struct):
type_max = struct.attr_set.max_name
- cw.block_start(line=f'struct ynl_policy_attr {struct.render_name}_policy[{type_max} + 1] =')
+ cw.block_start(line=f'const struct ynl_policy_attr {struct.render_name}_policy[{type_max} + 1] =')
for _, arg in struct.member_list():
arg.attr_typol(cw)
@@ -1520,7 +1520,7 @@ def put_typol(cw, struct):
cw.block_end(line=';')
cw.nl()
- cw.block_start(line=f'struct ynl_policy_nest {struct.render_name}_nest =')
+ cw.block_start(line=f'const struct ynl_policy_nest {struct.render_name}_nest =')
cw.p(f'.max_attr = {type_max},')
cw.p(f'.table = {struct.render_name}_policy,')
cw.block_end(line=';')
@@ -2173,7 +2173,7 @@ def print_kernel_op_table_fwd(family, cw, terminate):
exported = not kernel_can_gen_family_struct(family)
if not terminate or exported:
- cw.p(f"/* Ops table for {family.name} */")
+ cw.p(f"/* Ops table for {family.ident_name} */")
pol_to_struct = {'global': 'genl_small_ops',
'per-op': 'genl_ops',
@@ -2225,12 +2225,12 @@ def print_kernel_op_table_fwd(family, cw, terminate):
continue
if 'do' in op:
- name = c_lower(f"{family.name}-nl-{op_name}-doit")
+ name = c_lower(f"{family.ident_name}-nl-{op_name}-doit")
cw.write_func_prot('int', name,
['struct sk_buff *skb', 'struct genl_info *info'], suffix=';')
if 'dump' in op:
- name = c_lower(f"{family.name}-nl-{op_name}-dumpit")
+ name = c_lower(f"{family.ident_name}-nl-{op_name}-dumpit")
cw.write_func_prot('int', name,
['struct sk_buff *skb', 'struct netlink_callback *cb'], suffix=';')
cw.nl()
@@ -2256,13 +2256,13 @@ def print_kernel_op_table(family, cw):
for x in op['dont-validate']])), )
for op_mode in ['do', 'dump']:
if op_mode in op:
- name = c_lower(f"{family.name}-nl-{op_name}-{op_mode}it")
+ name = c_lower(f"{family.ident_name}-nl-{op_name}-{op_mode}it")
members.append((op_mode + 'it', name))
if family.kernel_policy == 'per-op':
struct = Struct(family, op['attribute-set'],
type_list=op['do']['request']['attributes'])
- name = c_lower(f"{family.name}-{op_name}-nl-policy")
+ name = c_lower(f"{family.ident_name}-{op_name}-nl-policy")
members.append(('policy', name))
members.append(('maxattr', struct.attr_max_val.enum_name))
if 'flags' in op:
@@ -2294,7 +2294,7 @@ def print_kernel_op_table(family, cw):
members.append(('validate',
' | '.join([c_upper('genl-dont-validate-' + x)
for x in dont_validate])), )
- name = c_lower(f"{family.name}-nl-{op_name}-{op_mode}it")
+ name = c_lower(f"{family.ident_name}-nl-{op_name}-{op_mode}it")
if 'pre' in op[op_mode]:
members.append((cb_names[op_mode]['pre'], c_lower(op[op_mode]['pre'])))
members.append((op_mode + 'it', name))
@@ -2305,9 +2305,9 @@ def print_kernel_op_table(family, cw):
type_list=op[op_mode]['request']['attributes'])
if op.dual_policy:
- name = c_lower(f"{family.name}-{op_name}-{op_mode}-nl-policy")
+ name = c_lower(f"{family.ident_name}-{op_name}-{op_mode}-nl-policy")
else:
- name = c_lower(f"{family.name}-{op_name}-nl-policy")
+ name = c_lower(f"{family.ident_name}-{op_name}-nl-policy")
members.append(('policy', name))
members.append(('maxattr', struct.attr_max_val.enum_name))
flags = (op['flags'] if 'flags' in op else []) + ['cmd-cap-' + op_mode]
@@ -2326,7 +2326,7 @@ def print_kernel_mcgrp_hdr(family, cw):
cw.block_start('enum')
for grp in family.mcgrps['list']:
- grp_id = c_upper(f"{family.name}-nlgrp-{grp['name']},")
+ grp_id = c_upper(f"{family.ident_name}-nlgrp-{grp['name']},")
cw.p(grp_id)
cw.block_end(';')
cw.nl()
@@ -2339,7 +2339,7 @@ def print_kernel_mcgrp_src(family, cw):
cw.block_start('static const struct genl_multicast_group ' + family.c_name + '_nl_mcgrps[] =')
for grp in family.mcgrps['list']:
name = grp['name']
- grp_id = c_upper(f"{family.name}-nlgrp-{name}")
+ grp_id = c_upper(f"{family.ident_name}-nlgrp-{name}")
cw.p('[' + grp_id + '] = { "' + name + '", },')
cw.block_end(';')
cw.nl()
@@ -2361,7 +2361,7 @@ def print_kernel_family_struct_src(family, cw):
if not kernel_can_gen_family_struct(family):
return
- cw.block_start(f"struct genl_family {family.name}_nl_family __ro_after_init =")
+ cw.block_start(f"struct genl_family {family.ident_name}_nl_family __ro_after_init =")
cw.p('.name\t\t= ' + family.fam_key + ',')
cw.p('.version\t= ' + family.ver_key + ',')
cw.p('.netnsok\t= true,')
@@ -2429,7 +2429,7 @@ def render_uapi(family, cw):
cw.p(' */')
uapi_enum_start(family, cw, const, 'name')
- name_pfx = const.get('name-prefix', f"{family.name}-{const['name']}-")
+ name_pfx = const.get('name-prefix', f"{family.ident_name}-{const['name']}-")
for entry in enum.entries.values():
suffix = ','
if entry.value_change:
@@ -2451,7 +2451,7 @@ def render_uapi(family, cw):
cw.nl()
elif const['type'] == 'const':
defines.append([c_upper(family.get('c-define-name',
- f"{family.name}-{const['name']}")),
+ f"{family.ident_name}-{const['name']}")),
const['value']])
if defines:
@@ -2529,7 +2529,7 @@ def render_uapi(family, cw):
defines = []
for grp in family.mcgrps['list']:
name = grp['name']
- defines.append([c_upper(grp.get('c-define-name', f"{family.name}-mcgrp-{name}")),
+ defines.append([c_upper(grp.get('c-define-name', f"{family.ident_name}-mcgrp-{name}")),
f'{name}'])
cw.nl()
if defines:
diff --git a/tools/net/ynl/ynl-gen-rst.py b/tools/net/ynl/ynl-gen-rst.py
index 657e881d2ea4..6c56d0d726b4 100755
--- a/tools/net/ynl/ynl-gen-rst.py
+++ b/tools/net/ynl/ynl-gen-rst.py
@@ -49,7 +49,7 @@ def inline(text: str) -> str:
def sanitize(text: str) -> str:
"""Remove newlines and multiple spaces"""
# This is useful for some fields that are spread across multiple lines
- return str(text).replace("\n", "").strip()
+ return str(text).replace("\n", " ").strip()
def rst_fields(key: str, value: str, level: int = 0) -> str:
@@ -156,7 +156,10 @@ def parse_do(do_dict: Dict[str, Any], level: int = 0) -> str:
lines = []
for key in do_dict.keys():
lines.append(rst_paragraph(bold(key), level + 1))
- lines.append(parse_do_attributes(do_dict[key], level + 1) + "\n")
+ if key in ['request', 'reply']:
+ lines.append(parse_do_attributes(do_dict[key], level + 1) + "\n")
+ else:
+ lines.append(headroom(level + 2) + do_dict[key] + "\n")
return "\n".join(lines)
@@ -172,13 +175,13 @@ def parse_do_attributes(attrs: Dict[str, Any], level: int = 0) -> str:
def parse_operations(operations: List[Dict[str, Any]], namespace: str) -> str:
"""Parse operations block"""
- preprocessed = ["name", "doc", "title", "do", "dump"]
+ preprocessed = ["name", "doc", "title", "do", "dump", "flags"]
linkable = ["fixed-header", "attribute-set"]
lines = []
for operation in operations:
lines.append(rst_section(namespace, 'operation', operation["name"]))
- lines.append(rst_paragraph(sanitize(operation["doc"])) + "\n")
+ lines.append(rst_paragraph(operation["doc"]) + "\n")
for key in operation.keys():
if key in preprocessed:
@@ -188,6 +191,8 @@ def parse_operations(operations: List[Dict[str, Any]], namespace: str) -> str:
if key in linkable:
value = rst_ref(namespace, key, value)
lines.append(rst_fields(key, value, 0))
+ if 'flags' in operation:
+ lines.append(rst_fields('flags', rst_list_inline(operation['flags'])))
if "do" in operation:
lines.append(rst_paragraph(":do:", 0))
diff --git a/tools/objtool/Documentation/objtool.txt b/tools/objtool/Documentation/objtool.txt
index fe39c2a8ef0d..7c3ee959b63c 100644
--- a/tools/objtool/Documentation/objtool.txt
+++ b/tools/objtool/Documentation/objtool.txt
@@ -284,6 +284,25 @@ the objtool maintainers.
Otherwise the stack frame may not get created before the call.
+ objtool can help with pinpointing the exact function where it happens:
+
+ $ OBJTOOL_ARGS="--verbose" make arch/x86/kvm/
+
+ arch/x86/kvm/kvm.o: warning: objtool: .altinstr_replacement+0xc5: call without frame pointer save/setup
+ arch/x86/kvm/kvm.o: warning: objtool: em_loop.part.0+0x29: (alt)
+ arch/x86/kvm/kvm.o: warning: objtool: em_loop.part.0+0x0: <=== (sym)
+ LD [M] arch/x86/kvm/kvm-intel.o
+ 0000 0000000000028220 <em_loop.part.0>:
+ 0000 28220: 0f b6 47 61 movzbl 0x61(%rdi),%eax
+ 0004 28224: 3c e2 cmp $0xe2,%al
+ 0006 28226: 74 2c je 28254 <em_loop.part.0+0x34>
+ 0008 28228: 48 8b 57 10 mov 0x10(%rdi),%rdx
+ 000c 2822c: 83 f0 05 xor $0x5,%eax
+ 000f 2822f: 48 c1 e0 04 shl $0x4,%rax
+ 0013 28233: 25 f0 00 00 00 and $0xf0,%eax
+ 0018 28238: 81 e2 d5 08 00 00 and $0x8d5,%edx
+ 001e 2823e: 80 ce 02 or $0x2,%dh
+ ...
2. file.o: warning: objtool: .text+0x53: unreachable instruction
diff --git a/tools/objtool/arch/x86/decode.c b/tools/objtool/arch/x86/decode.c
index 3a1d80a7878d..ed6bff0e01dc 100644
--- a/tools/objtool/arch/x86/decode.c
+++ b/tools/objtool/arch/x86/decode.c
@@ -125,8 +125,14 @@ bool arch_pc_relative_reloc(struct reloc *reloc)
#define is_RIP() ((modrm_rm & 7) == CFI_BP && modrm_mod == 0)
#define have_SIB() ((modrm_rm & 7) == CFI_SP && mod_is_mem())
+/*
+ * Check the ModRM register. If there is a SIB byte then check with
+ * the SIB base register. But if the SIB base is 5 (i.e. CFI_BP) and
+ * ModRM mod is 0 then there is no base register.
+ */
#define rm_is(reg) (have_SIB() ? \
- sib_base == (reg) && sib_index == CFI_SP : \
+ sib_base == (reg) && sib_index == CFI_SP && \
+ (sib_base != CFI_BP || modrm_mod != 0) : \
modrm_rm == (reg))
#define rm_is_mem(reg) (mod_is_mem() && !is_RIP() && rm_is(reg))
diff --git a/tools/objtool/arch/x86/special.c b/tools/objtool/arch/x86/special.c
index 4134d27c696b..4ea0f9815fda 100644
--- a/tools/objtool/arch/x86/special.c
+++ b/tools/objtool/arch/x86/special.c
@@ -9,6 +9,29 @@
void arch_handle_alternative(unsigned short feature, struct special_alt *alt)
{
+ static struct special_alt *group, *prev;
+
+ /*
+ * Recompute orig_len for nested ALTERNATIVE()s.
+ */
+ if (group && group->orig_sec == alt->orig_sec &&
+ group->orig_off == alt->orig_off) {
+
+ struct special_alt *iter = group;
+ for (;;) {
+ unsigned int len = max(iter->orig_len, alt->orig_len);
+ iter->orig_len = alt->orig_len = len;
+
+ if (iter == prev)
+ break;
+
+ iter = list_next_entry(iter, list);
+ }
+
+ } else group = alt;
+
+ prev = alt;
+
switch (feature) {
case X86_FEATURE_SMAP:
/*
diff --git a/tools/objtool/builtin-check.c b/tools/objtool/builtin-check.c
index 5e21cfb7661d..387d56a7f5fb 100644
--- a/tools/objtool/builtin-check.c
+++ b/tools/objtool/builtin-check.c
@@ -144,7 +144,7 @@ static bool opts_valid(void)
opts.static_call ||
opts.uaccess) {
if (opts.dump_orc) {
- ERROR("--dump can't be combined with other options");
+ ERROR("--dump can't be combined with other actions");
return false;
}
@@ -159,7 +159,7 @@ static bool opts_valid(void)
if (opts.dump_orc)
return true;
- ERROR("At least one command required");
+ ERROR("At least one action required");
return false;
}
diff --git a/tools/objtool/noreturns.h b/tools/objtool/noreturns.h
index 7ebf29c91184..1e8141ef1b15 100644
--- a/tools/objtool/noreturns.h
+++ b/tools/objtool/noreturns.h
@@ -7,12 +7,16 @@
* Yes, this is unfortunate. A better solution is in the works.
*/
NORETURN(__fortify_panic)
+NORETURN(__ia32_sys_exit)
+NORETURN(__ia32_sys_exit_group)
NORETURN(__kunit_abort)
NORETURN(__module_put_and_kthread_exit)
NORETURN(__reiserfs_panic)
NORETURN(__stack_chk_fail)
NORETURN(__tdx_hypercall_failed)
NORETURN(__ubsan_handle_builtin_unreachable)
+NORETURN(__x64_sys_exit)
+NORETURN(__x64_sys_exit_group)
NORETURN(arch_cpu_idle_dead)
NORETURN(bch2_trans_in_restart_error)
NORETURN(bch2_trans_restart_error)
diff --git a/tools/objtool/special.c b/tools/objtool/special.c
index 91b1950f5bd8..097a69db82a0 100644
--- a/tools/objtool/special.c
+++ b/tools/objtool/special.c
@@ -84,6 +84,14 @@ static int get_alt_entry(struct elf *elf, const struct special_entry *entry,
entry->new_len);
}
+ orig_reloc = find_reloc_by_dest(elf, sec, offset + entry->orig);
+ if (!orig_reloc) {
+ WARN_FUNC("can't find orig reloc", sec, offset + entry->orig);
+ return -1;
+ }
+
+ reloc_to_sec_off(orig_reloc, &alt->orig_sec, &alt->orig_off);
+
if (entry->feature) {
unsigned short feature;
@@ -94,14 +102,6 @@ static int get_alt_entry(struct elf *elf, const struct special_entry *entry,
arch_handle_alternative(feature, alt);
}
- orig_reloc = find_reloc_by_dest(elf, sec, offset + entry->orig);
- if (!orig_reloc) {
- WARN_FUNC("can't find orig reloc", sec, offset + entry->orig);
- return -1;
- }
-
- reloc_to_sec_off(orig_reloc, &alt->orig_sec, &alt->orig_off);
-
if (!entry->group || alt->new_len) {
new_reloc = find_reloc_by_dest(elf, sec, offset + entry->new);
if (!new_reloc) {
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index 5c35c0d89306..e6d56b555369 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -214,6 +214,7 @@ NON_CONFIG_TARGETS := clean python-clean TAGS tags cscope help
ifdef MAKECMDGOALS
ifeq ($(filter-out $(NON_CONFIG_TARGETS),$(MAKECMDGOALS)),)
+ VMLINUX_H=$(src-perf)/util/bpf_skel/vmlinux/vmlinux.h
config := 0
endif
endif
diff --git a/tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl b/tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl
index 532b855df589..1464c6be6eb3 100644
--- a/tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl
+++ b/tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl
@@ -376,3 +376,4 @@
459 n64 lsm_get_self_attr sys_lsm_get_self_attr
460 n64 lsm_set_self_attr sys_lsm_set_self_attr
461 n64 lsm_list_modules sys_lsm_list_modules
+462 n64 mseal sys_mseal
diff --git a/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl b/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl
index 17173b82ca21..3656f1ca7a21 100644
--- a/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl
+++ b/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl
@@ -548,3 +548,4 @@
459 common lsm_get_self_attr sys_lsm_get_self_attr
460 common lsm_set_self_attr sys_lsm_set_self_attr
461 common lsm_list_modules sys_lsm_list_modules
+462 common mseal sys_mseal
diff --git a/tools/perf/arch/s390/entry/syscalls/syscall.tbl b/tools/perf/arch/s390/entry/syscalls/syscall.tbl
index 095bb86339a7..bd0fee24ad10 100644
--- a/tools/perf/arch/s390/entry/syscalls/syscall.tbl
+++ b/tools/perf/arch/s390/entry/syscalls/syscall.tbl
@@ -464,3 +464,4 @@
459 common lsm_get_self_attr sys_lsm_get_self_attr sys_lsm_get_self_attr
460 common lsm_set_self_attr sys_lsm_set_self_attr sys_lsm_set_self_attr
461 common lsm_list_modules sys_lsm_list_modules sys_lsm_list_modules
+462 common mseal sys_mseal sys_mseal
diff --git a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
index 7e8d46f4147f..a396f6e6ab5b 100644
--- a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
+++ b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
@@ -374,7 +374,7 @@
450 common set_mempolicy_home_node sys_set_mempolicy_home_node
451 common cachestat sys_cachestat
452 common fchmodat2 sys_fchmodat2
-453 64 map_shadow_stack sys_map_shadow_stack
+453 common map_shadow_stack sys_map_shadow_stack
454 common futex_wake sys_futex_wake
455 common futex_wait sys_futex_wait
456 common futex_requeue sys_futex_requeue
@@ -383,6 +383,7 @@
459 common lsm_get_self_attr sys_lsm_get_self_attr
460 common lsm_set_self_attr sys_lsm_set_self_attr
461 common lsm_list_modules sys_lsm_list_modules
+462 common mseal sys_mseal
#
# Due to a historical design error, certain syscalls are numbered differently
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 66a3de8ac661..0a8ba1323d64 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -1956,8 +1956,7 @@ static void record__read_lost_samples(struct record *rec)
if (count.lost) {
if (!lost) {
- lost = zalloc(sizeof(*lost) +
- session->machines.host.id_hdr_size);
+ lost = zalloc(PERF_SAMPLE_MAX_SIZE);
if (!lost) {
pr_debug("Memory allocation failed\n");
return;
@@ -1973,8 +1972,7 @@ static void record__read_lost_samples(struct record *rec)
lost_count = perf_bpf_filter__lost_count(evsel);
if (lost_count) {
if (!lost) {
- lost = zalloc(sizeof(*lost) +
- session->machines.host.id_hdr_size);
+ lost = zalloc(PERF_SAMPLE_MAX_SIZE);
if (!lost) {
pr_debug("Memory allocation failed\n");
return;
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index 51eca671c797..08a3a6effac1 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -765,7 +765,7 @@ static const char *fcntl_cmds[] = {
static DEFINE_STRARRAY(fcntl_cmds, "F_");
static const char *fcntl_linux_specific_cmds[] = {
- "SETLEASE", "GETLEASE", "NOTIFY", [5] = "CANCELLK", "DUPFD_CLOEXEC",
+ "SETLEASE", "GETLEASE", "NOTIFY", "DUPFD_QUERY", [5] = "CANCELLK", "DUPFD_CLOEXEC",
"SETPIPE_SZ", "GETPIPE_SZ", "ADD_SEALS", "GET_SEALS",
"GET_RW_HINT", "SET_RW_HINT", "GET_FILE_RW_HINT", "SET_FILE_RW_HINT",
};
diff --git a/tools/perf/trace/beauty/arch/x86/include/asm/irq_vectors.h b/tools/perf/trace/beauty/arch/x86/include/asm/irq_vectors.h
index d18bfb238f66..13aea8fc3d45 100644
--- a/tools/perf/trace/beauty/arch/x86/include/asm/irq_vectors.h
+++ b/tools/perf/trace/beauty/arch/x86/include/asm/irq_vectors.h
@@ -97,10 +97,16 @@
#define LOCAL_TIMER_VECTOR 0xec
+/*
+ * Posted interrupt notification vector for all device MSIs delivered to
+ * the host kernel.
+ */
+#define POSTED_MSI_NOTIFICATION_VECTOR 0xeb
+
#define NR_VECTORS 256
#ifdef CONFIG_X86_LOCAL_APIC
-#define FIRST_SYSTEM_VECTOR LOCAL_TIMER_VECTOR
+#define FIRST_SYSTEM_VECTOR POSTED_MSI_NOTIFICATION_VECTOR
#else
#define FIRST_SYSTEM_VECTOR NR_VECTORS
#endif
diff --git a/tools/perf/trace/beauty/include/linux/socket.h b/tools/perf/trace/beauty/include/linux/socket.h
index 139c330ccf2c..89d16b90370b 100644
--- a/tools/perf/trace/beauty/include/linux/socket.h
+++ b/tools/perf/trace/beauty/include/linux/socket.h
@@ -16,6 +16,7 @@ struct cred;
struct socket;
struct sock;
struct sk_buff;
+struct proto_accept_arg;
#define __sockaddr_check_size(size) \
BUILD_BUG_ON(((size) > sizeof(struct __kernel_sockaddr_storage)))
@@ -433,7 +434,7 @@ extern int __sys_recvfrom(int fd, void __user *ubuf, size_t size,
extern int __sys_sendto(int fd, void __user *buff, size_t len,
unsigned int flags, struct sockaddr __user *addr,
int addr_len);
-extern struct file *do_accept(struct file *file, unsigned file_flags,
+extern struct file *do_accept(struct file *file, struct proto_accept_arg *arg,
struct sockaddr __user *upeer_sockaddr,
int __user *upeer_addrlen, int flags);
extern int __sys_accept4(int fd, struct sockaddr __user *upeer_sockaddr,
diff --git a/tools/perf/trace/beauty/include/uapi/linux/fcntl.h b/tools/perf/trace/beauty/include/uapi/linux/fcntl.h
index 282e90aeb163..c0bcc185fa48 100644
--- a/tools/perf/trace/beauty/include/uapi/linux/fcntl.h
+++ b/tools/perf/trace/beauty/include/uapi/linux/fcntl.h
@@ -9,6 +9,14 @@
#define F_GETLEASE (F_LINUX_SPECIFIC_BASE + 1)
/*
+ * Request nofications on a directory.
+ * See below for events that may be notified.
+ */
+#define F_NOTIFY (F_LINUX_SPECIFIC_BASE + 2)
+
+#define F_DUPFD_QUERY (F_LINUX_SPECIFIC_BASE + 3)
+
+/*
* Cancel a blocking posix lock; internal use only until we expose an
* asynchronous lock api to userspace:
*/
@@ -18,12 +26,6 @@
#define F_DUPFD_CLOEXEC (F_LINUX_SPECIFIC_BASE + 6)
/*
- * Request nofications on a directory.
- * See below for events that may be notified.
- */
-#define F_NOTIFY (F_LINUX_SPECIFIC_BASE+2)
-
-/*
* Set and get of pipe page size array
*/
#define F_SETPIPE_SZ (F_LINUX_SPECIFIC_BASE + 7)
diff --git a/tools/perf/trace/beauty/include/uapi/linux/prctl.h b/tools/perf/trace/beauty/include/uapi/linux/prctl.h
index 370ed14b1ae0..35791791a879 100644
--- a/tools/perf/trace/beauty/include/uapi/linux/prctl.h
+++ b/tools/perf/trace/beauty/include/uapi/linux/prctl.h
@@ -306,4 +306,26 @@ struct prctl_mm_map {
# define PR_RISCV_V_VSTATE_CTRL_NEXT_MASK 0xc
# define PR_RISCV_V_VSTATE_CTRL_MASK 0x1f
+#define PR_RISCV_SET_ICACHE_FLUSH_CTX 71
+# define PR_RISCV_CTX_SW_FENCEI_ON 0
+# define PR_RISCV_CTX_SW_FENCEI_OFF 1
+# define PR_RISCV_SCOPE_PER_PROCESS 0
+# define PR_RISCV_SCOPE_PER_THREAD 1
+
+/* PowerPC Dynamic Execution Control Register (DEXCR) controls */
+#define PR_PPC_GET_DEXCR 72
+#define PR_PPC_SET_DEXCR 73
+/* DEXCR aspect to act on */
+# define PR_PPC_DEXCR_SBHE 0 /* Speculative branch hint enable */
+# define PR_PPC_DEXCR_IBRTPD 1 /* Indirect branch recurrent target prediction disable */
+# define PR_PPC_DEXCR_SRAPD 2 /* Subroutine return address prediction disable */
+# define PR_PPC_DEXCR_NPHIE 3 /* Non-privileged hash instruction enable */
+/* Action to apply / return */
+# define PR_PPC_DEXCR_CTRL_EDITABLE 0x1 /* Aspect can be modified with PR_PPC_SET_DEXCR */
+# define PR_PPC_DEXCR_CTRL_SET 0x2 /* Set the aspect for this process */
+# define PR_PPC_DEXCR_CTRL_CLEAR 0x4 /* Clear the aspect for this process */
+# define PR_PPC_DEXCR_CTRL_SET_ONEXEC 0x8 /* Set the aspect on exec */
+# define PR_PPC_DEXCR_CTRL_CLEAR_ONEXEC 0x10 /* Clear the aspect on exec */
+# define PR_PPC_DEXCR_CTRL_MASK 0x1f
+
#endif /* _LINUX_PRCTL_H */
diff --git a/tools/perf/trace/beauty/include/uapi/linux/stat.h b/tools/perf/trace/beauty/include/uapi/linux/stat.h
index 2f2ee82d5517..67626d535316 100644
--- a/tools/perf/trace/beauty/include/uapi/linux/stat.h
+++ b/tools/perf/trace/beauty/include/uapi/linux/stat.h
@@ -126,8 +126,9 @@ struct statx {
__u64 stx_mnt_id;
__u32 stx_dio_mem_align; /* Memory buffer alignment for direct I/O */
__u32 stx_dio_offset_align; /* File offset alignment for direct I/O */
+ __u64 stx_subvol; /* Subvolume identifier */
/* 0xa0 */
- __u64 __spare3[12]; /* Spare space for future expansion */
+ __u64 __spare3[11]; /* Spare space for future expansion */
/* 0x100 */
};
@@ -155,6 +156,7 @@ struct statx {
#define STATX_MNT_ID 0x00001000U /* Got stx_mnt_id */
#define STATX_DIOALIGN 0x00002000U /* Want/got direct I/O alignment info */
#define STATX_MNT_ID_UNIQUE 0x00004000U /* Want/got extended stx_mount_id */
+#define STATX_SUBVOL 0x00008000U /* Want/got stx_subvol */
#define STATX__RESERVED 0x80000000U /* Reserved for future struct statx expansion */
diff --git a/tools/perf/util/comm.c b/tools/perf/util/comm.c
index 233f2b6edf52..49b79cf0c5cc 100644
--- a/tools/perf/util/comm.c
+++ b/tools/perf/util/comm.c
@@ -86,14 +86,6 @@ static struct comm_str *comm_str__new(const char *str)
return result;
}
-static int comm_str__cmp(const void *_lhs, const void *_rhs)
-{
- const struct comm_str *lhs = *(const struct comm_str * const *)_lhs;
- const struct comm_str *rhs = *(const struct comm_str * const *)_rhs;
-
- return strcmp(comm_str__str(lhs), comm_str__str(rhs));
-}
-
static int comm_str__search(const void *_key, const void *_member)
{
const char *key = _key;
@@ -169,9 +161,24 @@ static struct comm_str *comm_strs__findnew(const char *str)
}
result = comm_str__new(str);
if (result) {
- comm_strs->strs[comm_strs->num_strs++] = result;
- qsort(comm_strs->strs, comm_strs->num_strs, sizeof(struct comm_str *),
- comm_str__cmp);
+ int low = 0, high = comm_strs->num_strs - 1;
+ int insert = comm_strs->num_strs; /* Default to inserting at the end. */
+
+ while (low <= high) {
+ int mid = low + (high - low) / 2;
+ int cmp = strcmp(comm_str__str(comm_strs->strs[mid]), str);
+
+ if (cmp < 0) {
+ low = mid + 1;
+ } else {
+ high = mid - 1;
+ insert = mid;
+ }
+ }
+ memmove(&comm_strs->strs[insert + 1], &comm_strs->strs[insert],
+ (comm_strs->num_strs - insert) * sizeof(struct comm_str *));
+ comm_strs->num_strs++;
+ comm_strs->strs[insert] = result;
}
}
up_write(&comm_strs->lock);
diff --git a/tools/perf/util/dsos.c b/tools/perf/util/dsos.c
index ab3d0c01dd63..a69a9c661200 100644
--- a/tools/perf/util/dsos.c
+++ b/tools/perf/util/dsos.c
@@ -203,11 +203,27 @@ int __dsos__add(struct dsos *dsos, struct dso *dso)
dsos->dsos = temp;
dsos->allocated = to_allocate;
}
- dsos->dsos[dsos->cnt++] = dso__get(dso);
- if (dsos->cnt >= 2 && dsos->sorted) {
- dsos->sorted = dsos__cmp_long_name_id_short_name(&dsos->dsos[dsos->cnt - 2],
- &dsos->dsos[dsos->cnt - 1])
- <= 0;
+ if (!dsos->sorted) {
+ dsos->dsos[dsos->cnt++] = dso__get(dso);
+ } else {
+ int low = 0, high = dsos->cnt - 1;
+ int insert = dsos->cnt; /* Default to inserting at the end. */
+
+ while (low <= high) {
+ int mid = low + (high - low) / 2;
+ int cmp = dsos__cmp_long_name_id_short_name(&dsos->dsos[mid], &dso);
+
+ if (cmp < 0) {
+ low = mid + 1;
+ } else {
+ high = mid - 1;
+ insert = mid;
+ }
+ }
+ memmove(&dsos->dsos[insert + 1], &dsos->dsos[insert],
+ (dsos->cnt - insert) * sizeof(struct dso *));
+ dsos->cnt++;
+ dsos->dsos[insert] = dso__get(dso);
}
dso__set_dsos(dso, dsos);
return 0;
diff --git a/tools/power/cpupower/Makefile b/tools/power/cpupower/Makefile
index b53753dee02f..6c02f401069e 100644
--- a/tools/power/cpupower/Makefile
+++ b/tools/power/cpupower/Makefile
@@ -67,6 +67,7 @@ LANGUAGES = de fr it cs pt ka
bindir ?= /usr/bin
sbindir ?= /usr/sbin
mandir ?= /usr/man
+libdir ?= /usr/lib
includedir ?= /usr/include
localedir ?= /usr/share/locale
docdir ?= /usr/share/doc/packages/cpupower
@@ -94,15 +95,6 @@ RANLIB = $(CROSS)ranlib
HOSTCC = gcc
MKDIR = mkdir
-# 64bit library detection
-include ../../scripts/Makefile.arch
-
-ifeq ($(IS_64_BIT), 1)
-libdir ?= /usr/lib64
-else
-libdir ?= /usr/lib
-endif
-
# Now we set up the build system
#
@@ -332,4 +324,39 @@ uninstall:
rm -f $(DESTDIR)${localedir}/$$HLANG/LC_MESSAGES/cpupower.mo; \
done;
-.PHONY: all utils libcpupower update-po create-gmo install-lib install-tools install-man install-gmo install uninstall clean
+help:
+ @echo 'Building targets:'
+ @echo ' all - Default target. Could be omitted. Put build artifacts'
+ @echo ' to "O" cmdline option dir (default: current dir)'
+ @echo ' install - Install previously built project files from the output'
+ @echo ' dir defined by "O" cmdline option (default: current dir)'
+ @echo ' to the install dir defined by "DESTDIR" cmdline or'
+ @echo ' Makefile config block option (default: "")'
+ @echo ' install-lib - Install previously built library binary from the output'
+ @echo ' dir defined by "O" cmdline option (default: current dir)'
+ @echo ' and library headers from "lib/" for userspace to the install'
+ @echo ' dir defined by "DESTDIR" cmdline (default: "")'
+ @echo ' install-tools - Install previously built "cpupower" util from the output'
+ @echo ' dir defined by "O" cmdline option (default: current dir) and'
+ @echo ' "cpupower-completion.sh" script from the src dir to the'
+ @echo ' install dir defined by "DESTDIR" cmdline or Makefile'
+ @echo ' config block option (default: "")'
+ @echo ' install-man - Install man pages from the "man" src subdir to the'
+ @echo ' install dir defined by "DESTDIR" cmdline or Makefile'
+ @echo ' config block option (default: "")'
+ @echo ' install-gmo - Install previously built language files from the output'
+ @echo ' dir defined by "O" cmdline option (default: current dir)'
+ @echo ' to the install dir defined by "DESTDIR" cmdline or Makefile'
+ @echo ' config block option (default: "")'
+ @echo ' install-bench - Install previously built "cpufreq-bench" util files from the'
+ @echo ' output dir defined by "O" cmdline option (default: current dir)'
+ @echo ' to the install dir defined by "DESTDIR" cmdline or Makefile'
+ @echo ' config block option (default: "")'
+ @echo ''
+ @echo 'Cleaning targets:'
+ @echo ' clean - Clean build artifacts from the dir defined by "O" cmdline'
+ @echo ' option (default: current dir)'
+ @echo ' uninstall - Remove previously installed files from the dir defined by "DESTDIR"'
+ @echo ' cmdline or Makefile config block option (default: "")'
+
+.PHONY: all utils libcpupower update-po create-gmo install-lib install-tools install-man install-gmo install uninstall clean help
diff --git a/tools/power/cpupower/README b/tools/power/cpupower/README
index 1c68f47663b2..2678ed81d311 100644
--- a/tools/power/cpupower/README
+++ b/tools/power/cpupower/README
@@ -22,16 +22,156 @@ interfaces [depending on configuration, see below].
compilation and installation
----------------------------
-make
-su
-make install
-
-should suffice on most systems. It builds libcpupower to put in
-/usr/lib; cpupower, cpufreq-bench_plot.sh to put in /usr/bin; and
-cpufreq-bench to put in /usr/sbin. If you want to set up the paths
-differently and/or want to configure the package to your specific
-needs, you need to open "Makefile" with an editor of your choice and
-edit the block marked CONFIGURATION.
+There are 2 output directories - one for the build output and another for
+the installation of the build results, that is the utility, library,
+man pages, etc...
+
+default directory
+-----------------
+
+In the case of default directory, build and install process requires no
+additional parameters:
+
+build
+-----
+
+$ make
+
+The output directory for the 'make' command is the current directory and
+its subdirs in the kernel tree:
+tools/power/cpupower
+
+install
+-------
+
+$ sudo make install
+
+'make install' command puts targets to default system dirs:
+
+-----------------------------------------------------------------------
+| Installing file | System dir |
+-----------------------------------------------------------------------
+| libcpupower | /usr/lib |
+-----------------------------------------------------------------------
+| cpupower | /usr/bin |
+-----------------------------------------------------------------------
+| cpufreq-bench_plot.sh | /usr/bin |
+-----------------------------------------------------------------------
+| man pages | /usr/man |
+-----------------------------------------------------------------------
+
+To put it in other words it makes build results available system-wide,
+enabling any user to simply start using it without any additional steps
+
+custom directory
+----------------
+
+There are 2 make's command-line variables 'O' and 'DESTDIR' that setup
+appropriate dirs:
+'O' - build directory
+'DESTDIR' - installation directory. This variable could also be setup in
+the 'CONFIGURATION' block of the "Makefile"
+
+build
+-----
+
+$ make O=<your_custom_build_catalog>
+
+Example:
+$ make O=/home/hedin/prj/cpupower/build
+
+install
+-------
+
+$ make O=<your_custom_build_catalog> DESTDIR=<your_custom_install_catalog>
+
+Example:
+$ make O=/home/hedin/prj/cpupower/build DESTDIR=/home/hedin/prj/cpupower \
+> install
+
+Notice that both variables 'O' and 'DESTDIR' have been provided. The reason
+is that the build results are saved in the custom output dir defined by 'O'
+variable. So, this dir is the source for the installation step. If only
+'DESTDIR' were provided then the 'install' target would assume that the
+build directory is the current one, build everything there and install
+from the current dir.
+
+The files will be installed to the following dirs:
+
+-----------------------------------------------------------------------
+| Installing file | System dir |
+-----------------------------------------------------------------------
+| libcpupower | ${DESTDIR}/usr/lib |
+-----------------------------------------------------------------------
+| cpupower | ${DESTDIR}/usr/bin |
+-----------------------------------------------------------------------
+| cpufreq-bench_plot.sh | ${DESTDIR}/usr/bin |
+-----------------------------------------------------------------------
+| man pages | ${DESTDIR}/usr/man |
+-----------------------------------------------------------------------
+
+If you look at the table for the default 'make' output dirs you will
+notice that the only difference with the non-default case is the
+${DESTDIR} prefix. So, the structure of the output dirs remains the same
+regardles of the root output directory.
+
+
+clean and uninstall
+-------------------
+
+'clean' target is intended for cleanup the build catalog from build results
+'uninstall' target is intended for removing installed files from the
+installation directory
+
+default directory
+-----------------
+
+This case is a straightforward one:
+$ make clean
+$ make uninstall
+
+custom directory
+----------------
+
+Use 'O' command line variable to remove previously built files from the
+build dir:
+$ make O=<your_custom_build_catalog> clean
+
+Example:
+$ make O=/home/hedin/prj/cpupower/build clean
+
+Use 'DESTDIR' command line variable to uninstall previously installed files
+from the given dir:
+$ make DESTDIR=<your_custom_install_catalog>
+
+Example:
+make DESTDIR=/home/hedin/prj/cpupower uninstall
+
+
+running the tool
+----------------
+
+default directory
+-----------------
+
+$ sudo cpupower
+
+custom directory
+----------------
+
+When it comes to run the utility from the custom build catalog things
+become a little bit complicated as 'just run' approach doesn't work.
+Assuming that the current dir is '<your_custom_install_catalog>/usr',
+issuing the following command:
+
+$ sudo ./bin/cpupower
+will produce the following error output:
+./bin/cpupower: error while loading shared libraries: libcpupower.so.1:
+cannot open shared object file: No such file or directory
+
+The issue is that binary cannot find the 'libcpupower' library. So, we
+shall point to the lib dir:
+sudo LD_LIBRARY_PATH=lib64/ ./bin/cpupower
THANKS
diff --git a/tools/power/cpupower/bench/Makefile b/tools/power/cpupower/bench/Makefile
index a4b902f9e1c4..34e5894476eb 100644
--- a/tools/power/cpupower/bench/Makefile
+++ b/tools/power/cpupower/bench/Makefile
@@ -1,4 +1,9 @@
# SPDX-License-Identifier: GPL-2.0
+ifeq ($(MAKELEVEL),0)
+$(error This Makefile is not intended to be run standalone, but only as a part \
+of the main one in the parent dir)
+endif
+
OUTPUT := ./
ifeq ("$(origin O)", "command line")
ifneq ($(O),)
diff --git a/tools/power/cpupower/man/cpupower-monitor.1 b/tools/power/cpupower/man/cpupower-monitor.1
index 8ee737eefa5c..89af019f8dc4 100644
--- a/tools/power/cpupower/man/cpupower-monitor.1
+++ b/tools/power/cpupower/man/cpupower-monitor.1
@@ -81,11 +81,6 @@ Measure idle and frequency characteristics of an arbitrary command/workload.
The executable \fBcommand\fP is forked and upon its exit, statistics gathered since it was
forked are displayed.
.RE
-.PP
-\-v
-.RS 4
-Increase verbosity if the binary was compiled with the DEBUG option set.
-.RE
.SH MONITOR DESCRIPTIONS
.SS "Idle_Stats"
@@ -172,9 +167,11 @@ displayed.
"BIOS and Kernel Developer’s Guide (BKDG) for AMD Family 14h Processors"
https://support.amd.com/us/Processor_TechDocs/43170.pdf
-"Intel® Turbo Boost Technology
-in Intel® Core™ Microarchitecture (Nehalem) Based Processors"
-http://download.intel.com/design/processor/applnots/320354.pdf
+"What Is Intel® Turbo Boost Technology?"
+https://www.intel.com/content/www/us/en/gaming/resources/turbo-boost.html
+
+"Power Management - Technology Overview"
+https://cdrdv2.intel.com/v1/dl/getContent/637748
"Intel® 64 and IA-32 Architectures Software Developer's Manual
Volume 3B: System Programming Guide"
diff --git a/tools/power/cpupower/utils/helpers/amd.c b/tools/power/cpupower/utils/helpers/amd.c
index c519cc89c97f..0a56e22240fc 100644
--- a/tools/power/cpupower/utils/helpers/amd.c
+++ b/tools/power/cpupower/utils/helpers/amd.c
@@ -41,6 +41,16 @@ union core_pstate {
unsigned res1:31;
unsigned en:1;
} pstatedef;
+ /* since fam 1Ah: */
+ struct {
+ unsigned fid:12;
+ unsigned res1:2;
+ unsigned vid:8;
+ unsigned iddval:8;
+ unsigned idddiv:2;
+ unsigned res2:31;
+ unsigned en:1;
+ } pstatedef2;
unsigned long long val;
};
@@ -48,6 +58,10 @@ static int get_did(union core_pstate pstate)
{
int t;
+ /* Fam 1Ah onward do not use did */
+ if (cpupower_cpu_info.family >= 0x1A)
+ return 0;
+
if (cpupower_cpu_info.caps & CPUPOWER_CAP_AMD_PSTATEDEF)
t = pstate.pstatedef.did;
else if (cpupower_cpu_info.family == 0x12)
@@ -61,12 +75,18 @@ static int get_did(union core_pstate pstate)
static int get_cof(union core_pstate pstate)
{
int t;
- int fid, did, cof;
+ int fid, did, cof = 0;
did = get_did(pstate);
if (cpupower_cpu_info.caps & CPUPOWER_CAP_AMD_PSTATEDEF) {
- fid = pstate.pstatedef.fid;
- cof = 200 * fid / did;
+ if (cpupower_cpu_info.family >= 0x1A) {
+ fid = pstate.pstatedef2.fid;
+ if (fid > 0x0f)
+ cof = (fid * 5);
+ } else {
+ fid = pstate.pstatedef.fid;
+ cof = 200 * fid / did;
+ }
} else {
t = 0x10;
fid = pstate.pstate.fid;
diff --git a/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c b/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c
index 075e766ff1f3..f746099b5dac 100644
--- a/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c
+++ b/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c
@@ -35,7 +35,7 @@ static unsigned int avail_monitors;
static char *progname;
enum operation_mode_e { list = 1, show, show_all };
-static int mode;
+static enum operation_mode_e mode;
static int interval = 1;
static char *show_monitors_param;
static struct cpupower_topology cpu_top;
diff --git a/tools/power/pm-graph/bootgraph.py b/tools/power/pm-graph/bootgraph.py
index f96f50e0c336..8a3ef94fe88f 100755
--- a/tools/power/pm-graph/bootgraph.py
+++ b/tools/power/pm-graph/bootgraph.py
@@ -77,12 +77,12 @@ class SystemValues(aslib.SystemValues):
fp.close()
self.testdir = datetime.now().strftime('boot-%y%m%d-%H%M%S')
def kernelVersion(self, msg):
- m = re.match('^[Ll]inux *[Vv]ersion *(?P<v>\S*) .*', msg)
+ m = re.match(r'^[Ll]inux *[Vv]ersion *(?P<v>\S*) .*', msg)
if m:
return m.group('v')
return 'unknown'
def checkFtraceKernelVersion(self):
- m = re.match('^(?P<x>[0-9]*)\.(?P<y>[0-9]*)\.(?P<z>[0-9]*).*', self.kernel)
+ m = re.match(r'^(?P<x>[0-9]*)\.(?P<y>[0-9]*)\.(?P<z>[0-9]*).*', self.kernel)
if m:
val = tuple(map(int, m.groups()))
if val >= (4, 10, 0):
@@ -324,7 +324,7 @@ def parseKernelLog():
idx = line.find('[')
if idx > 1:
line = line[idx:]
- m = re.match('[ \t]*(\[ *)(?P<ktime>[0-9\.]*)(\]) (?P<msg>.*)', line)
+ m = re.match(r'[ \t]*(\[ *)(?P<ktime>[0-9\.]*)(\]) (?P<msg>.*)', line)
if(not m):
continue
ktime = float(m.group('ktime'))
@@ -332,24 +332,24 @@ def parseKernelLog():
break
msg = m.group('msg')
data.dmesgtext.append(line)
- if(ktime == 0.0 and re.match('^Linux version .*', msg)):
+ if(ktime == 0.0 and re.match(r'^Linux version .*', msg)):
if(not sysvals.stamp['kernel']):
sysvals.stamp['kernel'] = sysvals.kernelVersion(msg)
continue
- m = re.match('.* setting system clock to (?P<d>[0-9\-]*)[ A-Z](?P<t>[0-9:]*) UTC.*', msg)
+ m = re.match(r'.* setting system clock to (?P<d>[0-9\-]*)[ A-Z](?P<t>[0-9:]*) UTC.*', msg)
if(m):
bt = datetime.strptime(m.group('d')+' '+m.group('t'), '%Y-%m-%d %H:%M:%S')
bt = bt - timedelta(seconds=int(ktime))
data.boottime = bt.strftime('%Y-%m-%d_%H:%M:%S')
sysvals.stamp['time'] = bt.strftime('%B %d %Y, %I:%M:%S %p')
continue
- m = re.match('^calling *(?P<f>.*)\+.* @ (?P<p>[0-9]*)', msg)
+ m = re.match(r'^calling *(?P<f>.*)\+.* @ (?P<p>[0-9]*)', msg)
if(m):
func = m.group('f')
pid = int(m.group('p'))
devtemp[func] = (ktime, pid)
continue
- m = re.match('^initcall *(?P<f>.*)\+.* returned (?P<r>.*) after (?P<t>.*) usecs', msg)
+ m = re.match(r'^initcall *(?P<f>.*)\+.* returned (?P<r>.*) after (?P<t>.*) usecs', msg)
if(m):
data.valid = True
data.end = ktime
@@ -359,7 +359,7 @@ def parseKernelLog():
data.newAction(phase, f, pid, start, ktime, int(r), int(t))
del devtemp[f]
continue
- if(re.match('^Freeing unused kernel .*', msg)):
+ if(re.match(r'^Freeing unused kernel .*', msg)):
data.tUserMode = ktime
data.dmesg['kernel']['end'] = ktime
data.dmesg['user']['start'] = ktime
diff --git a/tools/power/pm-graph/sleepgraph.py b/tools/power/pm-graph/sleepgraph.py
index 40ad221e8881..ef87e63c05c7 100755
--- a/tools/power/pm-graph/sleepgraph.py
+++ b/tools/power/pm-graph/sleepgraph.py
@@ -86,7 +86,7 @@ def ascii(text):
# store system values and test parameters
class SystemValues:
title = 'SleepGraph'
- version = '5.11'
+ version = '5.12'
ansi = False
rs = 0
display = ''
@@ -420,11 +420,11 @@ class SystemValues:
return value.format(**args)
def setOutputFile(self):
if self.dmesgfile != '':
- m = re.match('(?P<name>.*)_dmesg\.txt.*', self.dmesgfile)
+ m = re.match(r'(?P<name>.*)_dmesg\.txt.*', self.dmesgfile)
if(m):
self.htmlfile = m.group('name')+'.html'
if self.ftracefile != '':
- m = re.match('(?P<name>.*)_ftrace\.txt.*', self.ftracefile)
+ m = re.match(r'(?P<name>.*)_ftrace\.txt.*', self.ftracefile)
if(m):
self.htmlfile = m.group('name')+'.html'
def systemInfo(self, info):
@@ -464,15 +464,15 @@ class SystemValues:
if os.path.exists('/proc/cpuinfo'):
with open('/proc/cpuinfo', 'r') as fp:
for line in fp:
- if re.match('^processor[ \t]*:[ \t]*[0-9]*', line):
+ if re.match(r'^processor[ \t]*:[ \t]*[0-9]*', line):
self.cpucount += 1
if os.path.exists('/proc/meminfo'):
with open('/proc/meminfo', 'r') as fp:
for line in fp:
- m = re.match('^MemTotal:[ \t]*(?P<sz>[0-9]*) *kB', line)
+ m = re.match(r'^MemTotal:[ \t]*(?P<sz>[0-9]*) *kB', line)
if m:
self.memtotal = int(m.group('sz'))
- m = re.match('^MemFree:[ \t]*(?P<sz>[0-9]*) *kB', line)
+ m = re.match(r'^MemFree:[ \t]*(?P<sz>[0-9]*) *kB', line)
if m:
self.memfree = int(m.group('sz'))
if os.path.exists('/etc/os-release'):
@@ -539,7 +539,7 @@ class SystemValues:
idx = line.find('[')
if idx > 1:
line = line[idx:]
- m = re.match('[ \t]*(\[ *)(?P<ktime>[0-9\.]*)(\]) (?P<msg>.*)', line)
+ m = re.match(r'[ \t]*(\[ *)(?P<ktime>[0-9\.]*)(\]) (?P<msg>.*)', line)
if(m):
ktime = m.group('ktime')
break
@@ -553,7 +553,7 @@ class SystemValues:
idx = line.find('[')
if idx > 1:
line = line[idx:]
- m = re.match('[ \t]*(\[ *)(?P<ktime>[0-9\.]*)(\]) (?P<msg>.*)', line)
+ m = re.match(r'[ \t]*(\[ *)(?P<ktime>[0-9\.]*)(\]) (?P<msg>.*)', line)
if(not m):
continue
ktime = float(m.group('ktime'))
@@ -636,11 +636,11 @@ class SystemValues:
# now process the args
for arg in sorted(args):
arglist[arg] = ''
- m = re.match('.* '+arg+'=(?P<arg>.*) ', data);
+ m = re.match(r'.* '+arg+'=(?P<arg>.*) ', data);
if m:
arglist[arg] = m.group('arg')
else:
- m = re.match('.* '+arg+'=(?P<arg>.*)', data);
+ m = re.match(r'.* '+arg+'=(?P<arg>.*)', data);
if m:
arglist[arg] = m.group('arg')
out = fmt.format(**arglist)
@@ -989,7 +989,7 @@ class SystemValues:
m = re.match(tp.ftrace_line_fmt, line)
if(not m or 'device_pm_callback_start' not in line):
continue
- m = re.match('.*: (?P<drv>.*) (?P<d>.*), parent: *(?P<p>.*), .*', m.group('msg'));
+ m = re.match(r'.*: (?P<drv>.*) (?P<d>.*), parent: *(?P<p>.*), .*', m.group('msg'));
if(not m):
continue
dev = m.group('d')
@@ -999,7 +999,7 @@ class SystemValues:
# now get the syspath for each target device
for dirname, dirnames, filenames in os.walk('/sys/devices'):
- if(re.match('.*/power', dirname) and 'async' in filenames):
+ if(re.match(r'.*/power', dirname) and 'async' in filenames):
dev = dirname.split('/')[-2]
if dev in props and (not props[dev].syspath or len(dirname) < len(props[dev].syspath)):
props[dev].syspath = dirname[:-6]
@@ -1143,12 +1143,12 @@ class SystemValues:
elif value and os.path.exists(file):
fp = open(file, 'r+')
if fmt == 'radio':
- m = re.match('.*\[(?P<v>.*)\].*', fp.read())
+ m = re.match(r'.*\[(?P<v>.*)\].*', fp.read())
if m:
self.cfgdef[file] = m.group('v')
elif fmt == 'acpi':
line = fp.read().strip().split('\n')[-1]
- m = re.match('.* (?P<v>[0-9A-Fx]*) .*', line)
+ m = re.match(r'.* (?P<v>[0-9A-Fx]*) .*', line)
if m:
self.cfgdef[file] = m.group('v')
else:
@@ -1173,7 +1173,7 @@ class SystemValues:
fp = Popen([cmd, '-v'], stdout=PIPE, stderr=PIPE).stderr
out = ascii(fp.read()).strip()
fp.close()
- if re.match('turbostat version .*', out):
+ if re.match(r'turbostat version .*', out):
self.vprint(out)
return True
return False
@@ -1181,33 +1181,33 @@ class SystemValues:
cmd = self.getExec('turbostat')
rawout = keyline = valline = ''
fullcmd = '%s -q -S echo freeze > %s' % (cmd, self.powerfile)
- fp = Popen(['sh', '-c', fullcmd], stdout=PIPE, stderr=PIPE).stderr
- for line in fp:
+ fp = Popen(['sh', '-c', fullcmd], stdout=PIPE, stderr=PIPE)
+ for line in fp.stderr:
line = ascii(line)
rawout += line
if keyline and valline:
continue
- if re.match('(?i)Avg_MHz.*', line):
+ if re.match(r'(?i)Avg_MHz.*', line):
keyline = line.strip().split()
elif keyline:
valline = line.strip().split()
- fp.close()
+ fp.wait()
if not keyline or not valline or len(keyline) != len(valline):
errmsg = 'unrecognized turbostat output:\n'+rawout.strip()
self.vprint(errmsg)
if not self.verbose:
pprint(errmsg)
- return ''
+ return (fp.returncode, '')
if self.verbose:
pprint(rawout.strip())
out = []
for key in keyline:
idx = keyline.index(key)
val = valline[idx]
- if key == 'SYS%LPI' and not s0ixready and re.match('^[0\.]*$', val):
+ if key == 'SYS%LPI' and not s0ixready and re.match(r'^[0\.]*$', val):
continue
out.append('%s=%s' % (key, val))
- return '|'.join(out)
+ return (fp.returncode, '|'.join(out))
def netfixon(self, net='both'):
cmd = self.getExec('netfix')
if not cmd:
@@ -1232,7 +1232,7 @@ class SystemValues:
except:
return ''
for line in reversed(w.split('\n')):
- m = re.match(' *(?P<dev>.*): (?P<stat>[0-9a-f]*) .*', line)
+ m = re.match(r' *(?P<dev>.*): (?P<stat>[0-9a-f]*) .*', line)
if not m or (dev and dev != m.group('dev')):
continue
return m.group('dev')
@@ -1261,14 +1261,14 @@ class SystemValues:
return
arr = msg.split()
for j in range(len(arr)):
- if re.match('^[0-9,\-\.]*$', arr[j]):
- arr[j] = '[0-9,\-\.]*'
+ if re.match(r'^[0-9,\-\.]*$', arr[j]):
+ arr[j] = r'[0-9,\-\.]*'
else:
arr[j] = arr[j]\
- .replace('\\', '\\\\').replace(']', '\]').replace('[', '\[')\
- .replace('.', '\.').replace('+', '\+').replace('*', '\*')\
- .replace('(', '\(').replace(')', '\)').replace('}', '\}')\
- .replace('{', '\{')
+ .replace('\\', r'\\\\').replace(']', r'\]').replace('[', r'\[')\
+ .replace('.', r'\.').replace('+', r'\+').replace('*', r'\*')\
+ .replace('(', r'\(').replace(')', r'\)').replace('}', r'\}')\
+ .replace('{', r'\{')
mstr = ' *'.join(arr)
entry = {
'line': msg,
@@ -1340,7 +1340,7 @@ class SystemValues:
fp = Popen(xset.format('q').split(' '), stdout=PIPE).stdout
ret = 'unknown'
for line in fp:
- m = re.match('[\s]*Monitor is (?P<m>.*)', ascii(line))
+ m = re.match(r'[\s]*Monitor is (?P<m>.*)', ascii(line))
if(m and len(m.group('m')) >= 2):
out = m.group('m').lower()
ret = out[3:] if out[0:2] == 'in' else out
@@ -1566,7 +1566,7 @@ class Data:
i += 1
if tp.stampInfo(line, sysvals):
continue
- m = re.match('[ \t]*(\[ *)(?P<ktime>[0-9\.]*)(\]) (?P<msg>.*)', line)
+ m = re.match(r'[ \t]*(\[ *)(?P<ktime>[0-9\.]*)(\]) (?P<msg>.*)', line)
if not m:
continue
t = float(m.group('ktime'))
@@ -1574,7 +1574,7 @@ class Data:
continue
dir = 'suspend' if t < self.tSuspended else 'resume'
msg = m.group('msg')
- if re.match('capability: warning: .*', msg):
+ if re.match(r'capability: warning: .*', msg):
continue
for err in self.errlist:
if re.match(self.errlist[err], msg):
@@ -1679,8 +1679,8 @@ class Data:
ubiquitous = False
if kprobename in dtf and 'ub' in dtf[kprobename]:
ubiquitous = True
- mc = re.match('\(.*\) *(?P<args>.*)', cdata)
- mr = re.match('\((?P<caller>\S*).* arg1=(?P<ret>.*)', rdata)
+ mc = re.match(r'\(.*\) *(?P<args>.*)', cdata)
+ mr = re.match(r'\((?P<caller>\S*).* arg1=(?P<ret>.*)', rdata)
if mc and mr:
c = mr.group('caller').split('+')[0]
a = mc.group('args').strip()
@@ -1997,7 +1997,7 @@ class Data:
list = self.dmesg[phase]['list']
mydev = ''
for devname in sorted(list):
- if name == devname or re.match('^%s\[(?P<num>[0-9]*)\]$' % name, devname):
+ if name == devname or re.match(r'^%s\[(?P<num>[0-9]*)\]$' % name, devname):
mydev = devname
if mydev:
return list[mydev]
@@ -2099,7 +2099,7 @@ class Data:
for dev in sorted(list):
pdev = list[dev]['par']
pid = list[dev]['pid']
- if(pid < 0 or re.match('[0-9]*-[0-9]*\.[0-9]*[\.0-9]*\:[\.0-9]*$', pdev)):
+ if(pid < 0 or re.match(r'[0-9]*-[0-9]*\.[0-9]*[\.0-9]*\:[\.0-9]*$', pdev)):
continue
if pdev and pdev not in real and pdev not in rootlist:
rootlist.append(pdev)
@@ -2190,26 +2190,26 @@ class Data:
if 'resume_complete' in dm:
dm['resume_complete']['end'] = time
def initcall_debug_call(self, line, quick=False):
- m = re.match('.*(\[ *)(?P<t>[0-9\.]*)(\]) .* (?P<f>.*)\: '+\
- 'PM: *calling .* @ (?P<n>.*), parent: (?P<p>.*)', line)
+ m = re.match(r'.*(\[ *)(?P<t>[0-9\.]*)(\]) .* (?P<f>.*)\: '+\
+ r'PM: *calling .* @ (?P<n>.*), parent: (?P<p>.*)', line)
if not m:
- m = re.match('.*(\[ *)(?P<t>[0-9\.]*)(\]) .* (?P<f>.*)\: '+\
- 'calling .* @ (?P<n>.*), parent: (?P<p>.*)', line)
+ m = re.match(r'.*(\[ *)(?P<t>[0-9\.]*)(\]) .* (?P<f>.*)\: '+\
+ r'calling .* @ (?P<n>.*), parent: (?P<p>.*)', line)
if not m:
- m = re.match('.*(\[ *)(?P<t>[0-9\.]*)(\]) calling '+\
- '(?P<f>.*)\+ @ (?P<n>.*), parent: (?P<p>.*)', line)
+ m = re.match(r'.*(\[ *)(?P<t>[0-9\.]*)(\]) calling '+\
+ r'(?P<f>.*)\+ @ (?P<n>.*), parent: (?P<p>.*)', line)
if m:
return True if quick else m.group('t', 'f', 'n', 'p')
return False if quick else ('', '', '', '')
def initcall_debug_return(self, line, quick=False):
- m = re.match('.*(\[ *)(?P<t>[0-9\.]*)(\]) .* (?P<f>.*)\: PM: '+\
- '.* returned (?P<r>[0-9]*) after (?P<dt>[0-9]*) usecs', line)
+ m = re.match(r'.*(\[ *)(?P<t>[0-9\.]*)(\]) .* (?P<f>.*)\: PM: '+\
+ r'.* returned (?P<r>[0-9]*) after (?P<dt>[0-9]*) usecs', line)
if not m:
- m = re.match('.*(\[ *)(?P<t>[0-9\.]*)(\]) .* (?P<f>.*)\: '+\
- '.* returned (?P<r>[0-9]*) after (?P<dt>[0-9]*) usecs', line)
+ m = re.match(r'.*(\[ *)(?P<t>[0-9\.]*)(\]) .* (?P<f>.*)\: '+\
+ r'.* returned (?P<r>[0-9]*) after (?P<dt>[0-9]*) usecs', line)
if not m:
- m = re.match('.*(\[ *)(?P<t>[0-9\.]*)(\]) call '+\
- '(?P<f>.*)\+ returned .* after (?P<dt>.*) usecs', line)
+ m = re.match(r'.*(\[ *)(?P<t>[0-9\.]*)(\]) call '+\
+ r'(?P<f>.*)\+ returned .* after (?P<dt>.*) usecs', line)
if m:
return True if quick else m.group('t', 'f', 'dt')
return False if quick else ('', '', '')
@@ -2294,28 +2294,28 @@ class FTraceLine:
if not m and not d:
return
# is this a trace event
- if(d == 'traceevent' or re.match('^ *\/\* *(?P<msg>.*) \*\/ *$', m)):
+ if(d == 'traceevent' or re.match(r'^ *\/\* *(?P<msg>.*) \*\/ *$', m)):
if(d == 'traceevent'):
# nop format trace event
msg = m
else:
# function_graph format trace event
- em = re.match('^ *\/\* *(?P<msg>.*) \*\/ *$', m)
+ em = re.match(r'^ *\/\* *(?P<msg>.*) \*\/ *$', m)
msg = em.group('msg')
- emm = re.match('^(?P<call>.*?): (?P<msg>.*)', msg)
+ emm = re.match(r'^(?P<call>.*?): (?P<msg>.*)', msg)
if(emm):
self.name = emm.group('msg')
self.type = emm.group('call')
else:
self.name = msg
- km = re.match('^(?P<n>.*)_cal$', self.type)
+ km = re.match(r'^(?P<n>.*)_cal$', self.type)
if km:
self.fcall = True
self.fkprobe = True
self.type = km.group('n')
return
- km = re.match('^(?P<n>.*)_ret$', self.type)
+ km = re.match(r'^(?P<n>.*)_ret$', self.type)
if km:
self.freturn = True
self.fkprobe = True
@@ -2327,7 +2327,7 @@ class FTraceLine:
if(d):
self.length = float(d)/1000000
# the indentation determines the depth
- match = re.match('^(?P<d> *)(?P<o>.*)$', m)
+ match = re.match(r'^(?P<d> *)(?P<o>.*)$', m)
if(not match):
return
self.depth = self.getDepth(match.group('d'))
@@ -2337,7 +2337,7 @@ class FTraceLine:
self.freturn = True
if(len(m) > 1):
# includes comment with function name
- match = re.match('^} *\/\* *(?P<n>.*) *\*\/$', m)
+ match = re.match(r'^} *\/\* *(?P<n>.*) *\*\/$', m)
if(match):
self.name = match.group('n').strip()
# function call
@@ -2345,13 +2345,13 @@ class FTraceLine:
self.fcall = True
# function call with children
if(m[-1] == '{'):
- match = re.match('^(?P<n>.*) *\(.*', m)
+ match = re.match(r'^(?P<n>.*) *\(.*', m)
if(match):
self.name = match.group('n').strip()
# function call with no children (leaf)
elif(m[-1] == ';'):
self.freturn = True
- match = re.match('^(?P<n>.*) *\(.*', m)
+ match = re.match(r'^(?P<n>.*) *\(.*', m)
if(match):
self.name = match.group('n').strip()
# something else (possibly a trace marker)
@@ -2385,7 +2385,7 @@ class FTraceLine:
return False
else:
if(self.type == 'suspend_resume' and
- re.match('suspend_enter\[.*\] begin', self.name)):
+ re.match(r'suspend_enter\[.*\] begin', self.name)):
return True
return False
def endMarker(self):
@@ -2398,7 +2398,7 @@ class FTraceLine:
return False
else:
if(self.type == 'suspend_resume' and
- re.match('thaw_processes\[.*\] end', self.name)):
+ re.match(r'thaw_processes\[.*\] end', self.name)):
return True
return False
@@ -2976,30 +2976,30 @@ class Timeline:
# Description:
# A list of values describing the properties of these test runs
class TestProps:
- stampfmt = '# [a-z]*-(?P<m>[0-9]{2})(?P<d>[0-9]{2})(?P<y>[0-9]{2})-'+\
- '(?P<H>[0-9]{2})(?P<M>[0-9]{2})(?P<S>[0-9]{2})'+\
- ' (?P<host>.*) (?P<mode>.*) (?P<kernel>.*)$'
- wififmt = '^# wifi *(?P<d>\S*) *(?P<s>\S*) *(?P<t>[0-9\.]+).*'
- tstatfmt = '^# turbostat (?P<t>\S*)'
- testerrfmt = '^# enter_sleep_error (?P<e>.*)'
- sysinfofmt = '^# sysinfo .*'
- cmdlinefmt = '^# command \| (?P<cmd>.*)'
- kparamsfmt = '^# kparams \| (?P<kp>.*)'
- devpropfmt = '# Device Properties: .*'
- pinfofmt = '# platform-(?P<val>[a-z,A-Z,0-9,_]*): (?P<info>.*)'
- tracertypefmt = '# tracer: (?P<t>.*)'
- firmwarefmt = '# fwsuspend (?P<s>[0-9]*) fwresume (?P<r>[0-9]*)$'
- procexecfmt = 'ps - (?P<ps>.*)$'
- procmultifmt = '@(?P<n>[0-9]*)\|(?P<ps>.*)$'
+ stampfmt = r'# [a-z]*-(?P<m>[0-9]{2})(?P<d>[0-9]{2})(?P<y>[0-9]{2})-'+\
+ r'(?P<H>[0-9]{2})(?P<M>[0-9]{2})(?P<S>[0-9]{2})'+\
+ r' (?P<host>.*) (?P<mode>.*) (?P<kernel>.*)$'
+ wififmt = r'^# wifi *(?P<d>\S*) *(?P<s>\S*) *(?P<t>[0-9\.]+).*'
+ tstatfmt = r'^# turbostat (?P<t>\S*)'
+ testerrfmt = r'^# enter_sleep_error (?P<e>.*)'
+ sysinfofmt = r'^# sysinfo .*'
+ cmdlinefmt = r'^# command \| (?P<cmd>.*)'
+ kparamsfmt = r'^# kparams \| (?P<kp>.*)'
+ devpropfmt = r'# Device Properties: .*'
+ pinfofmt = r'# platform-(?P<val>[a-z,A-Z,0-9,_]*): (?P<info>.*)'
+ tracertypefmt = r'# tracer: (?P<t>.*)'
+ firmwarefmt = r'# fwsuspend (?P<s>[0-9]*) fwresume (?P<r>[0-9]*)$'
+ procexecfmt = r'ps - (?P<ps>.*)$'
+ procmultifmt = r'@(?P<n>[0-9]*)\|(?P<ps>.*)$'
ftrace_line_fmt_fg = \
- '^ *(?P<time>[0-9\.]*) *\| *(?P<cpu>[0-9]*)\)'+\
- ' *(?P<proc>.*)-(?P<pid>[0-9]*) *\|'+\
- '[ +!#\*@$]*(?P<dur>[0-9\.]*) .*\| (?P<msg>.*)'
+ r'^ *(?P<time>[0-9\.]*) *\| *(?P<cpu>[0-9]*)\)'+\
+ r' *(?P<proc>.*)-(?P<pid>[0-9]*) *\|'+\
+ r'[ +!#\*@$]*(?P<dur>[0-9\.]*) .*\| (?P<msg>.*)'
ftrace_line_fmt_nop = \
- ' *(?P<proc>.*)-(?P<pid>[0-9]*) *\[(?P<cpu>[0-9]*)\] *'+\
- '(?P<flags>\S*) *(?P<time>[0-9\.]*): *'+\
- '(?P<msg>.*)'
- machinesuspend = 'machine_suspend\[.*'
+ r' *(?P<proc>.*)-(?P<pid>[0-9]*) *\[(?P<cpu>[0-9]*)\] *'+\
+ r'(?P<flags>\S*) *(?P<time>[0-9\.]*): *'+\
+ r'(?P<msg>.*)'
+ machinesuspend = r'machine_suspend\[.*'
multiproclist = dict()
multiproctime = 0.0
multiproccnt = 0
@@ -3081,14 +3081,14 @@ class TestProps:
sv.hostname = data.stamp['host']
sv.suspendmode = data.stamp['mode']
if sv.suspendmode == 'freeze':
- self.machinesuspend = 'timekeeping_freeze\[.*'
+ self.machinesuspend = r'timekeeping_freeze\[.*'
else:
- self.machinesuspend = 'machine_suspend\[.*'
+ self.machinesuspend = r'machine_suspend\[.*'
if sv.suspendmode == 'command' and sv.ftracefile != '':
modes = ['on', 'freeze', 'standby', 'mem', 'disk']
fp = sv.openlog(sv.ftracefile, 'r')
for line in fp:
- m = re.match('.* machine_suspend\[(?P<mode>.*)\]', line)
+ m = re.match(r'.* machine_suspend\[(?P<mode>.*)\]', line)
if m and m.group('mode') in ['1', '2', '3', '4']:
sv.suspendmode = modes[int(m.group('mode'))]
data.stamp['mode'] = sv.suspendmode
@@ -3401,9 +3401,9 @@ def loadTraceLog():
for i in range(len(blk)):
if 'SUSPEND START' in blk[i][3]:
first.append(i)
- elif re.match('.* timekeeping_freeze.*begin', blk[i][3]):
+ elif re.match(r'.* timekeeping_freeze.*begin', blk[i][3]):
last.append(i)
- elif re.match('.* timekeeping_freeze.*end', blk[i][3]):
+ elif re.match(r'.* timekeeping_freeze.*end', blk[i][3]):
first.append(i)
elif 'RESUME COMPLETE' in blk[i][3]:
last.append(i)
@@ -3514,28 +3514,28 @@ def parseTraceLog(live=False):
if(t.fevent):
if(t.type == 'suspend_resume'):
# suspend_resume trace events have two types, begin and end
- if(re.match('(?P<name>.*) begin$', t.name)):
+ if(re.match(r'(?P<name>.*) begin$', t.name)):
isbegin = True
- elif(re.match('(?P<name>.*) end$', t.name)):
+ elif(re.match(r'(?P<name>.*) end$', t.name)):
isbegin = False
else:
continue
if '[' in t.name:
- m = re.match('(?P<name>.*)\[.*', t.name)
+ m = re.match(r'(?P<name>.*)\[.*', t.name)
else:
- m = re.match('(?P<name>.*) .*', t.name)
+ m = re.match(r'(?P<name>.*) .*', t.name)
name = m.group('name')
# ignore these events
if(name.split('[')[0] in tracewatch):
continue
# -- phase changes --
# start of kernel suspend
- if(re.match('suspend_enter\[.*', t.name)):
+ if(re.match(r'suspend_enter\[.*', t.name)):
if(isbegin and data.tKernSus == 0):
data.tKernSus = t.time
continue
# suspend_prepare start
- elif(re.match('dpm_prepare\[.*', t.name)):
+ elif(re.match(r'dpm_prepare\[.*', t.name)):
if isbegin and data.first_suspend_prepare:
data.first_suspend_prepare = False
if data.tKernSus == 0:
@@ -3544,15 +3544,15 @@ def parseTraceLog(live=False):
phase = data.setPhase('suspend_prepare', t.time, isbegin)
continue
# suspend start
- elif(re.match('dpm_suspend\[.*', t.name)):
+ elif(re.match(r'dpm_suspend\[.*', t.name)):
phase = data.setPhase('suspend', t.time, isbegin)
continue
# suspend_late start
- elif(re.match('dpm_suspend_late\[.*', t.name)):
+ elif(re.match(r'dpm_suspend_late\[.*', t.name)):
phase = data.setPhase('suspend_late', t.time, isbegin)
continue
# suspend_noirq start
- elif(re.match('dpm_suspend_noirq\[.*', t.name)):
+ elif(re.match(r'dpm_suspend_noirq\[.*', t.name)):
phase = data.setPhase('suspend_noirq', t.time, isbegin)
continue
# suspend_machine/resume_machine
@@ -3589,19 +3589,19 @@ def parseTraceLog(live=False):
data.tResumed = t.time
continue
# resume_noirq start
- elif(re.match('dpm_resume_noirq\[.*', t.name)):
+ elif(re.match(r'dpm_resume_noirq\[.*', t.name)):
phase = data.setPhase('resume_noirq', t.time, isbegin)
continue
# resume_early start
- elif(re.match('dpm_resume_early\[.*', t.name)):
+ elif(re.match(r'dpm_resume_early\[.*', t.name)):
phase = data.setPhase('resume_early', t.time, isbegin)
continue
# resume start
- elif(re.match('dpm_resume\[.*', t.name)):
+ elif(re.match(r'dpm_resume\[.*', t.name)):
phase = data.setPhase('resume', t.time, isbegin)
continue
# resume complete start
- elif(re.match('dpm_complete\[.*', t.name)):
+ elif(re.match(r'dpm_complete\[.*', t.name)):
phase = data.setPhase('resume_complete', t.time, isbegin)
continue
# skip trace events inside devices calls
@@ -3635,7 +3635,7 @@ def parseTraceLog(live=False):
elif(t.type == 'device_pm_callback_start'):
if phase not in data.dmesg:
continue
- m = re.match('(?P<drv>.*) (?P<d>.*), parent: *(?P<p>.*), .*',\
+ m = re.match(r'(?P<drv>.*) (?P<d>.*), parent: *(?P<p>.*), .*',\
t.name);
if(not m):
continue
@@ -3650,7 +3650,7 @@ def parseTraceLog(live=False):
elif(t.type == 'device_pm_callback_end'):
if phase not in data.dmesg:
continue
- m = re.match('(?P<drv>.*) (?P<d>.*), err.*', t.name);
+ m = re.match(r'(?P<drv>.*) (?P<d>.*), err.*', t.name);
if(not m):
continue
n = m.group('d')
@@ -3904,24 +3904,24 @@ def loadKernelLog():
line = line[idx:]
if tp.stampInfo(line, sysvals):
continue
- m = re.match('[ \t]*(\[ *)(?P<ktime>[0-9\.]*)(\]) (?P<msg>.*)', line)
+ m = re.match(r'[ \t]*(\[ *)(?P<ktime>[0-9\.]*)(\]) (?P<msg>.*)', line)
if(not m):
continue
msg = m.group("msg")
- if re.match('PM: Syncing filesystems.*', msg) or \
- re.match('PM: suspend entry.*', msg):
+ if re.match(r'PM: Syncing filesystems.*', msg) or \
+ re.match(r'PM: suspend entry.*', msg):
if(data):
testruns.append(data)
data = Data(len(testruns))
tp.parseStamp(data, sysvals)
if(not data):
continue
- m = re.match('.* *(?P<k>[0-9]\.[0-9]{2}\.[0-9]-.*) .*', msg)
+ m = re.match(r'.* *(?P<k>[0-9]\.[0-9]{2}\.[0-9]-.*) .*', msg)
if(m):
sysvals.stamp['kernel'] = m.group('k')
- m = re.match('PM: Preparing system for (?P<m>.*) sleep', msg)
+ m = re.match(r'PM: Preparing system for (?P<m>.*) sleep', msg)
if not m:
- m = re.match('PM: Preparing system for sleep \((?P<m>.*)\)', msg)
+ m = re.match(r'PM: Preparing system for sleep \((?P<m>.*)\)', msg)
if m:
sysvals.stamp['mode'] = sysvals.suspendmode = m.group('m')
data.dmesgtext.append(line)
@@ -3984,7 +3984,7 @@ def parseKernelLog(data):
'resume_machine': ['[PM: ]*Timekeeping suspended for.*',
'ACPI: Low-level resume complete.*',
'ACPI: resume from mwait',
- 'Suspended for [0-9\.]* seconds'],
+ r'Suspended for [0-9\.]* seconds'],
'resume_noirq': ['PM: resume from suspend-to-idle',
'ACPI: Waking up from system sleep state.*'],
'resume_early': ['PM: noirq resume of devices complete after.*',
@@ -3993,7 +3993,7 @@ def parseKernelLog(data):
'PM: early restore of devices complete after.*'],
'resume_complete': ['PM: resume of devices complete after.*',
'PM: restore of devices complete after.*'],
- 'post_resume': ['.*Restarting tasks \.\.\..*'],
+ 'post_resume': [r'.*Restarting tasks \.\.\..*'],
}
# action table (expected events that occur and show up in dmesg)
@@ -4021,7 +4021,7 @@ def parseKernelLog(data):
actions = dict()
for line in data.dmesgtext:
# parse each dmesg line into the time and message
- m = re.match('[ \t]*(\[ *)(?P<ktime>[0-9\.]*)(\]) (?P<msg>.*)', line)
+ m = re.match(r'[ \t]*(\[ *)(?P<ktime>[0-9\.]*)(\]) (?P<msg>.*)', line)
if(m):
val = m.group('ktime')
try:
@@ -4145,26 +4145,26 @@ def parseKernelLog(data):
if(a in actions and actions[a][-1]['begin'] == actions[a][-1]['end']):
actions[a][-1]['end'] = ktime
# now look for CPU on/off events
- if(re.match('Disabling non-boot CPUs .*', msg)):
+ if(re.match(r'Disabling non-boot CPUs .*', msg)):
# start of first cpu suspend
cpu_start = ktime
- elif(re.match('Enabling non-boot CPUs .*', msg)):
+ elif(re.match(r'Enabling non-boot CPUs .*', msg)):
# start of first cpu resume
cpu_start = ktime
- elif(re.match('smpboot: CPU (?P<cpu>[0-9]*) is now offline', msg) \
- or re.match('psci: CPU(?P<cpu>[0-9]*) killed.*', msg)):
+ elif(re.match(r'smpboot: CPU (?P<cpu>[0-9]*) is now offline', msg) \
+ or re.match(r'psci: CPU(?P<cpu>[0-9]*) killed.*', msg)):
# end of a cpu suspend, start of the next
- m = re.match('smpboot: CPU (?P<cpu>[0-9]*) is now offline', msg)
+ m = re.match(r'smpboot: CPU (?P<cpu>[0-9]*) is now offline', msg)
if(not m):
- m = re.match('psci: CPU(?P<cpu>[0-9]*) killed.*', msg)
+ m = re.match(r'psci: CPU(?P<cpu>[0-9]*) killed.*', msg)
cpu = 'CPU'+m.group('cpu')
if(cpu not in actions):
actions[cpu] = []
actions[cpu].append({'begin': cpu_start, 'end': ktime})
cpu_start = ktime
- elif(re.match('CPU(?P<cpu>[0-9]*) is up', msg)):
+ elif(re.match(r'CPU(?P<cpu>[0-9]*) is up', msg)):
# end of a cpu resume, start of the next
- m = re.match('CPU(?P<cpu>[0-9]*) is up', msg)
+ m = re.match(r'CPU(?P<cpu>[0-9]*) is up', msg)
cpu = 'CPU'+m.group('cpu')
if(cpu not in actions):
actions[cpu] = []
@@ -4343,7 +4343,8 @@ def createHTMLSummarySimple(testruns, htmlfile, title):
list[mode]['data'].append([data['host'], data['kernel'],
data['time'], tVal[0], tVal[1], data['url'], res,
data['issues'], data['sus_worst'], data['sus_worsttime'],
- data['res_worst'], data['res_worsttime'], pkgpc10, syslpi, wifi])
+ data['res_worst'], data['res_worsttime'], pkgpc10, syslpi, wifi,
+ (data['fullmode'] if 'fullmode' in data else mode)])
idx = len(list[mode]['data']) - 1
if res.startswith('fail in'):
res = 'fail'
@@ -4449,7 +4450,7 @@ def createHTMLSummarySimple(testruns, htmlfile, title):
elif idx == iMed[i]:
tHigh[i] = ' id="%smed" class=medval title="Median"' % tag
html += td.format("%d" % (list[mode]['data'].index(d) + 1)) # row
- html += td.format(mode) # mode
+ html += td.format(d[15]) # mode
html += td.format(d[0]) # host
html += td.format(d[1]) # kernel
html += td.format(d[2]) # time
@@ -5061,6 +5062,7 @@ def addCSS(hf, sv, testcount=1, kerror=False, extra=''):
def addScriptCode(hf, testruns):
t0 = testruns[0].start * 1000
tMax = testruns[-1].end * 1000
+ hf.write('<script type="text/javascript">\n');
# create an array in javascript memory with the device details
detail = ' var devtable = [];\n'
for data in testruns:
@@ -5068,384 +5070,383 @@ def addScriptCode(hf, testruns):
detail += ' devtable[%d] = "%s";\n' % (data.testnumber, topo)
detail += ' var bounds = [%f,%f];\n' % (t0, tMax)
# add the code which will manipulate the data in the browser
- script_code = \
- '<script type="text/javascript">\n'+detail+\
- ' var resolution = -1;\n'\
- ' var dragval = [0, 0];\n'\
- ' function redrawTimescale(t0, tMax, tS) {\n'\
- ' var rline = \'<div class="t" style="left:0;border-left:1px solid black;border-right:0;">\';\n'\
- ' var tTotal = tMax - t0;\n'\
- ' var list = document.getElementsByClassName("tblock");\n'\
- ' for (var i = 0; i < list.length; i++) {\n'\
- ' var timescale = list[i].getElementsByClassName("timescale")[0];\n'\
- ' var m0 = t0 + (tTotal*parseFloat(list[i].style.left)/100);\n'\
- ' var mTotal = tTotal*parseFloat(list[i].style.width)/100;\n'\
- ' var mMax = m0 + mTotal;\n'\
- ' var html = "";\n'\
- ' var divTotal = Math.floor(mTotal/tS) + 1;\n'\
- ' if(divTotal > 1000) continue;\n'\
- ' var divEdge = (mTotal - tS*(divTotal-1))*100/mTotal;\n'\
- ' var pos = 0.0, val = 0.0;\n'\
- ' for (var j = 0; j < divTotal; j++) {\n'\
- ' var htmlline = "";\n'\
- ' var mode = list[i].id[5];\n'\
- ' if(mode == "s") {\n'\
- ' pos = 100 - (((j)*tS*100)/mTotal) - divEdge;\n'\
- ' val = (j-divTotal+1)*tS;\n'\
- ' if(j == divTotal - 1)\n'\
- ' htmlline = \'<div class="t" style="right:\'+pos+\'%"><cS>S&rarr;</cS></div>\';\n'\
- ' else\n'\
- ' htmlline = \'<div class="t" style="right:\'+pos+\'%">\'+val+\'ms</div>\';\n'\
- ' } else {\n'\
- ' pos = 100 - (((j)*tS*100)/mTotal);\n'\
- ' val = (j)*tS;\n'\
- ' htmlline = \'<div class="t" style="right:\'+pos+\'%">\'+val+\'ms</div>\';\n'\
- ' if(j == 0)\n'\
- ' if(mode == "r")\n'\
- ' htmlline = rline+"<cS>&larr;R</cS></div>";\n'\
- ' else\n'\
- ' htmlline = rline+"<cS>0ms</div>";\n'\
- ' }\n'\
- ' html += htmlline;\n'\
- ' }\n'\
- ' timescale.innerHTML = html;\n'\
- ' }\n'\
- ' }\n'\
- ' function zoomTimeline() {\n'\
- ' var dmesg = document.getElementById("dmesg");\n'\
- ' var zoombox = document.getElementById("dmesgzoombox");\n'\
- ' var left = zoombox.scrollLeft;\n'\
- ' var val = parseFloat(dmesg.style.width);\n'\
- ' var newval = 100;\n'\
- ' var sh = window.outerWidth / 2;\n'\
- ' if(this.id == "zoomin") {\n'\
- ' newval = val * 1.2;\n'\
- ' if(newval > 910034) newval = 910034;\n'\
- ' dmesg.style.width = newval+"%";\n'\
- ' zoombox.scrollLeft = ((left + sh) * newval / val) - sh;\n'\
- ' } else if (this.id == "zoomout") {\n'\
- ' newval = val / 1.2;\n'\
- ' if(newval < 100) newval = 100;\n'\
- ' dmesg.style.width = newval+"%";\n'\
- ' zoombox.scrollLeft = ((left + sh) * newval / val) - sh;\n'\
- ' } else {\n'\
- ' zoombox.scrollLeft = 0;\n'\
- ' dmesg.style.width = "100%";\n'\
- ' }\n'\
- ' var tS = [10000, 5000, 2000, 1000, 500, 200, 100, 50, 20, 10, 5, 2, 1];\n'\
- ' var t0 = bounds[0];\n'\
- ' var tMax = bounds[1];\n'\
- ' var tTotal = tMax - t0;\n'\
- ' var wTotal = tTotal * 100.0 / newval;\n'\
- ' var idx = 7*window.innerWidth/1100;\n'\
- ' for(var i = 0; (i < tS.length)&&((wTotal / tS[i]) < idx); i++);\n'\
- ' if(i >= tS.length) i = tS.length - 1;\n'\
- ' if(tS[i] == resolution) return;\n'\
- ' resolution = tS[i];\n'\
- ' redrawTimescale(t0, tMax, tS[i]);\n'\
- ' }\n'\
- ' function deviceName(title) {\n'\
- ' var name = title.slice(0, title.indexOf(" ("));\n'\
- ' return name;\n'\
- ' }\n'\
- ' function deviceHover() {\n'\
- ' var name = deviceName(this.title);\n'\
- ' var dmesg = document.getElementById("dmesg");\n'\
- ' var dev = dmesg.getElementsByClassName("thread");\n'\
- ' var cpu = -1;\n'\
- ' if(name.match("CPU_ON\[[0-9]*\]"))\n'\
- ' cpu = parseInt(name.slice(7));\n'\
- ' else if(name.match("CPU_OFF\[[0-9]*\]"))\n'\
- ' cpu = parseInt(name.slice(8));\n'\
- ' for (var i = 0; i < dev.length; i++) {\n'\
- ' dname = deviceName(dev[i].title);\n'\
- ' var cname = dev[i].className.slice(dev[i].className.indexOf("thread"));\n'\
- ' if((cpu >= 0 && dname.match("CPU_O[NF]*\\\[*"+cpu+"\\\]")) ||\n'\
- ' (name == dname))\n'\
- ' {\n'\
- ' dev[i].className = "hover "+cname;\n'\
- ' } else {\n'\
- ' dev[i].className = cname;\n'\
- ' }\n'\
- ' }\n'\
- ' }\n'\
- ' function deviceUnhover() {\n'\
- ' var dmesg = document.getElementById("dmesg");\n'\
- ' var dev = dmesg.getElementsByClassName("thread");\n'\
- ' for (var i = 0; i < dev.length; i++) {\n'\
- ' dev[i].className = dev[i].className.slice(dev[i].className.indexOf("thread"));\n'\
- ' }\n'\
- ' }\n'\
- ' function deviceTitle(title, total, cpu) {\n'\
- ' var prefix = "Total";\n'\
- ' if(total.length > 3) {\n'\
- ' prefix = "Average";\n'\
- ' total[1] = (total[1]+total[3])/2;\n'\
- ' total[2] = (total[2]+total[4])/2;\n'\
- ' }\n'\
- ' var devtitle = document.getElementById("devicedetailtitle");\n'\
- ' var name = deviceName(title);\n'\
- ' if(cpu >= 0) name = "CPU"+cpu;\n'\
- ' var driver = "";\n'\
- ' var tS = "<t2>(</t2>";\n'\
- ' var tR = "<t2>)</t2>";\n'\
- ' if(total[1] > 0)\n'\
- ' tS = "<t2>("+prefix+" Suspend:</t2><t0> "+total[1].toFixed(3)+" ms</t0> ";\n'\
- ' if(total[2] > 0)\n'\
- ' tR = " <t2>"+prefix+" Resume:</t2><t0> "+total[2].toFixed(3)+" ms<t2>)</t2></t0>";\n'\
- ' var s = title.indexOf("{");\n'\
- ' var e = title.indexOf("}");\n'\
- ' if((s >= 0) && (e >= 0))\n'\
- ' driver = title.slice(s+1, e) + " <t1>@</t1> ";\n'\
- ' if(total[1] > 0 && total[2] > 0)\n'\
- ' devtitle.innerHTML = "<t0>"+driver+name+"</t0> "+tS+tR;\n'\
- ' else\n'\
- ' devtitle.innerHTML = "<t0>"+title+"</t0>";\n'\
- ' return name;\n'\
- ' }\n'\
- ' function deviceDetail() {\n'\
- ' var devinfo = document.getElementById("devicedetail");\n'\
- ' devinfo.style.display = "block";\n'\
- ' var name = deviceName(this.title);\n'\
- ' var cpu = -1;\n'\
- ' if(name.match("CPU_ON\[[0-9]*\]"))\n'\
- ' cpu = parseInt(name.slice(7));\n'\
- ' else if(name.match("CPU_OFF\[[0-9]*\]"))\n'\
- ' cpu = parseInt(name.slice(8));\n'\
- ' var dmesg = document.getElementById("dmesg");\n'\
- ' var dev = dmesg.getElementsByClassName("thread");\n'\
- ' var idlist = [];\n'\
- ' var pdata = [[]];\n'\
- ' if(document.getElementById("devicedetail1"))\n'\
- ' pdata = [[], []];\n'\
- ' var pd = pdata[0];\n'\
- ' var total = [0.0, 0.0, 0.0];\n'\
- ' for (var i = 0; i < dev.length; i++) {\n'\
- ' dname = deviceName(dev[i].title);\n'\
- ' if((cpu >= 0 && dname.match("CPU_O[NF]*\\\[*"+cpu+"\\\]")) ||\n'\
- ' (name == dname))\n'\
- ' {\n'\
- ' idlist[idlist.length] = dev[i].id;\n'\
- ' var tidx = 1;\n'\
- ' if(dev[i].id[0] == "a") {\n'\
- ' pd = pdata[0];\n'\
- ' } else {\n'\
- ' if(pdata.length == 1) pdata[1] = [];\n'\
- ' if(total.length == 3) total[3]=total[4]=0.0;\n'\
- ' pd = pdata[1];\n'\
- ' tidx = 3;\n'\
- ' }\n'\
- ' var info = dev[i].title.split(" ");\n'\
- ' var pname = info[info.length-1];\n'\
- ' pd[pname] = parseFloat(info[info.length-3].slice(1));\n'\
- ' total[0] += pd[pname];\n'\
- ' if(pname.indexOf("suspend") >= 0)\n'\
- ' total[tidx] += pd[pname];\n'\
- ' else\n'\
- ' total[tidx+1] += pd[pname];\n'\
- ' }\n'\
- ' }\n'\
- ' var devname = deviceTitle(this.title, total, cpu);\n'\
- ' var left = 0.0;\n'\
- ' for (var t = 0; t < pdata.length; t++) {\n'\
- ' pd = pdata[t];\n'\
- ' devinfo = document.getElementById("devicedetail"+t);\n'\
- ' var phases = devinfo.getElementsByClassName("phaselet");\n'\
- ' for (var i = 0; i < phases.length; i++) {\n'\
- ' if(phases[i].id in pd) {\n'\
- ' var w = 100.0*pd[phases[i].id]/total[0];\n'\
- ' var fs = 32;\n'\
- ' if(w < 8) fs = 4*w | 0;\n'\
- ' var fs2 = fs*3/4;\n'\
- ' phases[i].style.width = w+"%";\n'\
- ' phases[i].style.left = left+"%";\n'\
- ' phases[i].title = phases[i].id+" "+pd[phases[i].id]+" ms";\n'\
- ' left += w;\n'\
- ' var time = "<t4 style=\\"font-size:"+fs+"px\\">"+pd[phases[i].id]+" ms<br></t4>";\n'\
- ' var pname = "<t3 style=\\"font-size:"+fs2+"px\\">"+phases[i].id.replace(new RegExp("_", "g"), " ")+"</t3>";\n'\
- ' phases[i].innerHTML = time+pname;\n'\
- ' } else {\n'\
- ' phases[i].style.width = "0%";\n'\
- ' phases[i].style.left = left+"%";\n'\
- ' }\n'\
- ' }\n'\
- ' }\n'\
- ' if(typeof devstats !== \'undefined\')\n'\
- ' callDetail(this.id, this.title);\n'\
- ' var cglist = document.getElementById("callgraphs");\n'\
- ' if(!cglist) return;\n'\
- ' var cg = cglist.getElementsByClassName("atop");\n'\
- ' if(cg.length < 10) return;\n'\
- ' for (var i = 0; i < cg.length; i++) {\n'\
- ' cgid = cg[i].id.split("x")[0]\n'\
- ' if(idlist.indexOf(cgid) >= 0) {\n'\
- ' cg[i].style.display = "block";\n'\
- ' } else {\n'\
- ' cg[i].style.display = "none";\n'\
- ' }\n'\
- ' }\n'\
- ' }\n'\
- ' function callDetail(devid, devtitle) {\n'\
- ' if(!(devid in devstats) || devstats[devid].length < 1)\n'\
- ' return;\n'\
- ' var list = devstats[devid];\n'\
- ' var tmp = devtitle.split(" ");\n'\
- ' var name = tmp[0], phase = tmp[tmp.length-1];\n'\
- ' var dd = document.getElementById(phase);\n'\
- ' var total = parseFloat(tmp[1].slice(1));\n'\
- ' var mlist = [];\n'\
- ' var maxlen = 0;\n'\
- ' var info = []\n'\
- ' for(var i in list) {\n'\
- ' if(list[i][0] == "@") {\n'\
- ' info = list[i].split("|");\n'\
- ' continue;\n'\
- ' }\n'\
- ' var tmp = list[i].split("|");\n'\
- ' var t = parseFloat(tmp[0]), f = tmp[1], c = parseInt(tmp[2]);\n'\
- ' var p = (t*100.0/total).toFixed(2);\n'\
- ' mlist[mlist.length] = [f, c, t.toFixed(2), p+"%"];\n'\
- ' if(f.length > maxlen)\n'\
- ' maxlen = f.length;\n'\
- ' }\n'\
- ' var pad = 5;\n'\
- ' if(mlist.length == 0) pad = 30;\n'\
- ' var html = \'<div style="padding-top:\'+pad+\'px"><t3> <b>\'+name+\':</b>\';\n'\
- ' if(info.length > 2)\n'\
- ' html += " start=<b>"+info[1]+"</b>, end=<b>"+info[2]+"</b>";\n'\
- ' if(info.length > 3)\n'\
- ' html += ", length<i>(w/o overhead)</i>=<b>"+info[3]+" ms</b>";\n'\
- ' if(info.length > 4)\n'\
- ' html += ", return=<b>"+info[4]+"</b>";\n'\
- ' html += "</t3></div>";\n'\
- ' if(mlist.length > 0) {\n'\
- ' html += \'<table class=fstat style="padding-top:\'+(maxlen*5)+\'px;"><tr><th>Function</th>\';\n'\
- ' for(var i in mlist)\n'\
- ' html += "<td class=vt>"+mlist[i][0]+"</td>";\n'\
- ' html += "</tr><tr><th>Calls</th>";\n'\
- ' for(var i in mlist)\n'\
- ' html += "<td>"+mlist[i][1]+"</td>";\n'\
- ' html += "</tr><tr><th>Time(ms)</th>";\n'\
- ' for(var i in mlist)\n'\
- ' html += "<td>"+mlist[i][2]+"</td>";\n'\
- ' html += "</tr><tr><th>Percent</th>";\n'\
- ' for(var i in mlist)\n'\
- ' html += "<td>"+mlist[i][3]+"</td>";\n'\
- ' html += "</tr></table>";\n'\
- ' }\n'\
- ' dd.innerHTML = html;\n'\
- ' var height = (maxlen*5)+100;\n'\
- ' dd.style.height = height+"px";\n'\
- ' document.getElementById("devicedetail").style.height = height+"px";\n'\
- ' }\n'\
- ' function callSelect() {\n'\
- ' var cglist = document.getElementById("callgraphs");\n'\
- ' if(!cglist) return;\n'\
- ' var cg = cglist.getElementsByClassName("atop");\n'\
- ' for (var i = 0; i < cg.length; i++) {\n'\
- ' if(this.id == cg[i].id) {\n'\
- ' cg[i].style.display = "block";\n'\
- ' } else {\n'\
- ' cg[i].style.display = "none";\n'\
- ' }\n'\
- ' }\n'\
- ' }\n'\
- ' function devListWindow(e) {\n'\
- ' var win = window.open();\n'\
- ' var html = "<title>"+e.target.innerHTML+"</title>"+\n'\
- ' "<style type=\\"text/css\\">"+\n'\
- ' " ul {list-style-type:circle;padding-left:10px;margin-left:10px;}"+\n'\
- ' "</style>"\n'\
- ' var dt = devtable[0];\n'\
- ' if(e.target.id != "devlist1")\n'\
- ' dt = devtable[1];\n'\
- ' win.document.write(html+dt);\n'\
- ' }\n'\
- ' function errWindow() {\n'\
- ' var range = this.id.split("_");\n'\
- ' var idx1 = parseInt(range[0]);\n'\
- ' var idx2 = parseInt(range[1]);\n'\
- ' var win = window.open();\n'\
- ' var log = document.getElementById("dmesglog");\n'\
- ' var title = "<title>dmesg log</title>";\n'\
- ' var text = log.innerHTML.split("\\n");\n'\
- ' var html = "";\n'\
- ' for(var i = 0; i < text.length; i++) {\n'\
- ' if(i == idx1) {\n'\
- ' html += "<e id=target>"+text[i]+"</e>\\n";\n'\
- ' } else if(i > idx1 && i <= idx2) {\n'\
- ' html += "<e>"+text[i]+"</e>\\n";\n'\
- ' } else {\n'\
- ' html += text[i]+"\\n";\n'\
- ' }\n'\
- ' }\n'\
- ' win.document.write("<style>e{color:red}</style>"+title+"<pre>"+html+"</pre>");\n'\
- ' win.location.hash = "#target";\n'\
- ' win.document.close();\n'\
- ' }\n'\
- ' function logWindow(e) {\n'\
- ' var name = e.target.id.slice(4);\n'\
- ' var win = window.open();\n'\
- ' var log = document.getElementById(name+"log");\n'\
- ' var title = "<title>"+document.title.split(" ")[0]+" "+name+" log</title>";\n'\
- ' win.document.write(title+"<pre>"+log.innerHTML+"</pre>");\n'\
- ' win.document.close();\n'\
- ' }\n'\
- ' function onMouseDown(e) {\n'\
- ' dragval[0] = e.clientX;\n'\
- ' dragval[1] = document.getElementById("dmesgzoombox").scrollLeft;\n'\
- ' document.onmousemove = onMouseMove;\n'\
- ' }\n'\
- ' function onMouseMove(e) {\n'\
- ' var zoombox = document.getElementById("dmesgzoombox");\n'\
- ' zoombox.scrollLeft = dragval[1] + dragval[0] - e.clientX;\n'\
- ' }\n'\
- ' function onMouseUp(e) {\n'\
- ' document.onmousemove = null;\n'\
- ' }\n'\
- ' function onKeyPress(e) {\n'\
- ' var c = e.charCode;\n'\
- ' if(c != 42 && c != 43 && c != 45) return;\n'\
- ' var click = document.createEvent("Events");\n'\
- ' click.initEvent("click", true, false);\n'\
- ' if(c == 43) \n'\
- ' document.getElementById("zoomin").dispatchEvent(click);\n'\
- ' else if(c == 45)\n'\
- ' document.getElementById("zoomout").dispatchEvent(click);\n'\
- ' else if(c == 42)\n'\
- ' document.getElementById("zoomdef").dispatchEvent(click);\n'\
- ' }\n'\
- ' window.addEventListener("resize", function () {zoomTimeline();});\n'\
- ' window.addEventListener("load", function () {\n'\
- ' var dmesg = document.getElementById("dmesg");\n'\
- ' dmesg.style.width = "100%"\n'\
- ' dmesg.onmousedown = onMouseDown;\n'\
- ' document.onmouseup = onMouseUp;\n'\
- ' document.onkeypress = onKeyPress;\n'\
- ' document.getElementById("zoomin").onclick = zoomTimeline;\n'\
- ' document.getElementById("zoomout").onclick = zoomTimeline;\n'\
- ' document.getElementById("zoomdef").onclick = zoomTimeline;\n'\
- ' var list = document.getElementsByClassName("err");\n'\
- ' for (var i = 0; i < list.length; i++)\n'\
- ' list[i].onclick = errWindow;\n'\
- ' var list = document.getElementsByClassName("logbtn");\n'\
- ' for (var i = 0; i < list.length; i++)\n'\
- ' list[i].onclick = logWindow;\n'\
- ' list = document.getElementsByClassName("devlist");\n'\
- ' for (var i = 0; i < list.length; i++)\n'\
- ' list[i].onclick = devListWindow;\n'\
- ' var dev = dmesg.getElementsByClassName("thread");\n'\
- ' for (var i = 0; i < dev.length; i++) {\n'\
- ' dev[i].onclick = deviceDetail;\n'\
- ' dev[i].onmouseover = deviceHover;\n'\
- ' dev[i].onmouseout = deviceUnhover;\n'\
- ' }\n'\
- ' var dev = dmesg.getElementsByClassName("srccall");\n'\
- ' for (var i = 0; i < dev.length; i++)\n'\
- ' dev[i].onclick = callSelect;\n'\
- ' zoomTimeline();\n'\
- ' });\n'\
- '</script>\n'
+ hf.write(detail);
+ script_code = r""" var resolution = -1;
+ var dragval = [0, 0];
+ function redrawTimescale(t0, tMax, tS) {
+ var rline = '<div class="t" style="left:0;border-left:1px solid black;border-right:0;">';
+ var tTotal = tMax - t0;
+ var list = document.getElementsByClassName("tblock");
+ for (var i = 0; i < list.length; i++) {
+ var timescale = list[i].getElementsByClassName("timescale")[0];
+ var m0 = t0 + (tTotal*parseFloat(list[i].style.left)/100);
+ var mTotal = tTotal*parseFloat(list[i].style.width)/100;
+ var mMax = m0 + mTotal;
+ var html = "";
+ var divTotal = Math.floor(mTotal/tS) + 1;
+ if(divTotal > 1000) continue;
+ var divEdge = (mTotal - tS*(divTotal-1))*100/mTotal;
+ var pos = 0.0, val = 0.0;
+ for (var j = 0; j < divTotal; j++) {
+ var htmlline = "";
+ var mode = list[i].id[5];
+ if(mode == "s") {
+ pos = 100 - (((j)*tS*100)/mTotal) - divEdge;
+ val = (j-divTotal+1)*tS;
+ if(j == divTotal - 1)
+ htmlline = '<div class="t" style="right:'+pos+'%"><cS>S&rarr;</cS></div>';
+ else
+ htmlline = '<div class="t" style="right:'+pos+'%">'+val+'ms</div>';
+ } else {
+ pos = 100 - (((j)*tS*100)/mTotal);
+ val = (j)*tS;
+ htmlline = '<div class="t" style="right:'+pos+'%">'+val+'ms</div>';
+ if(j == 0)
+ if(mode == "r")
+ htmlline = rline+"<cS>&larr;R</cS></div>";
+ else
+ htmlline = rline+"<cS>0ms</div>";
+ }
+ html += htmlline;
+ }
+ timescale.innerHTML = html;
+ }
+ }
+ function zoomTimeline() {
+ var dmesg = document.getElementById("dmesg");
+ var zoombox = document.getElementById("dmesgzoombox");
+ var left = zoombox.scrollLeft;
+ var val = parseFloat(dmesg.style.width);
+ var newval = 100;
+ var sh = window.outerWidth / 2;
+ if(this.id == "zoomin") {
+ newval = val * 1.2;
+ if(newval > 910034) newval = 910034;
+ dmesg.style.width = newval+"%";
+ zoombox.scrollLeft = ((left + sh) * newval / val) - sh;
+ } else if (this.id == "zoomout") {
+ newval = val / 1.2;
+ if(newval < 100) newval = 100;
+ dmesg.style.width = newval+"%";
+ zoombox.scrollLeft = ((left + sh) * newval / val) - sh;
+ } else {
+ zoombox.scrollLeft = 0;
+ dmesg.style.width = "100%";
+ }
+ var tS = [10000, 5000, 2000, 1000, 500, 200, 100, 50, 20, 10, 5, 2, 1];
+ var t0 = bounds[0];
+ var tMax = bounds[1];
+ var tTotal = tMax - t0;
+ var wTotal = tTotal * 100.0 / newval;
+ var idx = 7*window.innerWidth/1100;
+ for(var i = 0; (i < tS.length)&&((wTotal / tS[i]) < idx); i++);
+ if(i >= tS.length) i = tS.length - 1;
+ if(tS[i] == resolution) return;
+ resolution = tS[i];
+ redrawTimescale(t0, tMax, tS[i]);
+ }
+ function deviceName(title) {
+ var name = title.slice(0, title.indexOf(" ("));
+ return name;
+ }
+ function deviceHover() {
+ var name = deviceName(this.title);
+ var dmesg = document.getElementById("dmesg");
+ var dev = dmesg.getElementsByClassName("thread");
+ var cpu = -1;
+ if(name.match("CPU_ON\[[0-9]*\]"))
+ cpu = parseInt(name.slice(7));
+ else if(name.match("CPU_OFF\[[0-9]*\]"))
+ cpu = parseInt(name.slice(8));
+ for (var i = 0; i < dev.length; i++) {
+ dname = deviceName(dev[i].title);
+ var cname = dev[i].className.slice(dev[i].className.indexOf("thread"));
+ if((cpu >= 0 && dname.match("CPU_O[NF]*\\[*"+cpu+"\\]")) ||
+ (name == dname))
+ {
+ dev[i].className = "hover "+cname;
+ } else {
+ dev[i].className = cname;
+ }
+ }
+ }
+ function deviceUnhover() {
+ var dmesg = document.getElementById("dmesg");
+ var dev = dmesg.getElementsByClassName("thread");
+ for (var i = 0; i < dev.length; i++) {
+ dev[i].className = dev[i].className.slice(dev[i].className.indexOf("thread"));
+ }
+ }
+ function deviceTitle(title, total, cpu) {
+ var prefix = "Total";
+ if(total.length > 3) {
+ prefix = "Average";
+ total[1] = (total[1]+total[3])/2;
+ total[2] = (total[2]+total[4])/2;
+ }
+ var devtitle = document.getElementById("devicedetailtitle");
+ var name = deviceName(title);
+ if(cpu >= 0) name = "CPU"+cpu;
+ var driver = "";
+ var tS = "<t2>(</t2>";
+ var tR = "<t2>)</t2>";
+ if(total[1] > 0)
+ tS = "<t2>("+prefix+" Suspend:</t2><t0> "+total[1].toFixed(3)+" ms</t0> ";
+ if(total[2] > 0)
+ tR = " <t2>"+prefix+" Resume:</t2><t0> "+total[2].toFixed(3)+" ms<t2>)</t2></t0>";
+ var s = title.indexOf("{");
+ var e = title.indexOf("}");
+ if((s >= 0) && (e >= 0))
+ driver = title.slice(s+1, e) + " <t1>@</t1> ";
+ if(total[1] > 0 && total[2] > 0)
+ devtitle.innerHTML = "<t0>"+driver+name+"</t0> "+tS+tR;
+ else
+ devtitle.innerHTML = "<t0>"+title+"</t0>";
+ return name;
+ }
+ function deviceDetail() {
+ var devinfo = document.getElementById("devicedetail");
+ devinfo.style.display = "block";
+ var name = deviceName(this.title);
+ var cpu = -1;
+ if(name.match("CPU_ON\[[0-9]*\]"))
+ cpu = parseInt(name.slice(7));
+ else if(name.match("CPU_OFF\[[0-9]*\]"))
+ cpu = parseInt(name.slice(8));
+ var dmesg = document.getElementById("dmesg");
+ var dev = dmesg.getElementsByClassName("thread");
+ var idlist = [];
+ var pdata = [[]];
+ if(document.getElementById("devicedetail1"))
+ pdata = [[], []];
+ var pd = pdata[0];
+ var total = [0.0, 0.0, 0.0];
+ for (var i = 0; i < dev.length; i++) {
+ dname = deviceName(dev[i].title);
+ if((cpu >= 0 && dname.match("CPU_O[NF]*\\[*"+cpu+"\\]")) ||
+ (name == dname))
+ {
+ idlist[idlist.length] = dev[i].id;
+ var tidx = 1;
+ if(dev[i].id[0] == "a") {
+ pd = pdata[0];
+ } else {
+ if(pdata.length == 1) pdata[1] = [];
+ if(total.length == 3) total[3]=total[4]=0.0;
+ pd = pdata[1];
+ tidx = 3;
+ }
+ var info = dev[i].title.split(" ");
+ var pname = info[info.length-1];
+ pd[pname] = parseFloat(info[info.length-3].slice(1));
+ total[0] += pd[pname];
+ if(pname.indexOf("suspend") >= 0)
+ total[tidx] += pd[pname];
+ else
+ total[tidx+1] += pd[pname];
+ }
+ }
+ var devname = deviceTitle(this.title, total, cpu);
+ var left = 0.0;
+ for (var t = 0; t < pdata.length; t++) {
+ pd = pdata[t];
+ devinfo = document.getElementById("devicedetail"+t);
+ var phases = devinfo.getElementsByClassName("phaselet");
+ for (var i = 0; i < phases.length; i++) {
+ if(phases[i].id in pd) {
+ var w = 100.0*pd[phases[i].id]/total[0];
+ var fs = 32;
+ if(w < 8) fs = 4*w | 0;
+ var fs2 = fs*3/4;
+ phases[i].style.width = w+"%";
+ phases[i].style.left = left+"%";
+ phases[i].title = phases[i].id+" "+pd[phases[i].id]+" ms";
+ left += w;
+ var time = "<t4 style=\"font-size:"+fs+"px\">"+pd[phases[i].id]+" ms<br></t4>";
+ var pname = "<t3 style=\"font-size:"+fs2+"px\">"+phases[i].id.replace(new RegExp("_", "g"), " ")+"</t3>";
+ phases[i].innerHTML = time+pname;
+ } else {
+ phases[i].style.width = "0%";
+ phases[i].style.left = left+"%";
+ }
+ }
+ }
+ if(typeof devstats !== 'undefined')
+ callDetail(this.id, this.title);
+ var cglist = document.getElementById("callgraphs");
+ if(!cglist) return;
+ var cg = cglist.getElementsByClassName("atop");
+ if(cg.length < 10) return;
+ for (var i = 0; i < cg.length; i++) {
+ cgid = cg[i].id.split("x")[0]
+ if(idlist.indexOf(cgid) >= 0) {
+ cg[i].style.display = "block";
+ } else {
+ cg[i].style.display = "none";
+ }
+ }
+ }
+ function callDetail(devid, devtitle) {
+ if(!(devid in devstats) || devstats[devid].length < 1)
+ return;
+ var list = devstats[devid];
+ var tmp = devtitle.split(" ");
+ var name = tmp[0], phase = tmp[tmp.length-1];
+ var dd = document.getElementById(phase);
+ var total = parseFloat(tmp[1].slice(1));
+ var mlist = [];
+ var maxlen = 0;
+ var info = []
+ for(var i in list) {
+ if(list[i][0] == "@") {
+ info = list[i].split("|");
+ continue;
+ }
+ var tmp = list[i].split("|");
+ var t = parseFloat(tmp[0]), f = tmp[1], c = parseInt(tmp[2]);
+ var p = (t*100.0/total).toFixed(2);
+ mlist[mlist.length] = [f, c, t.toFixed(2), p+"%"];
+ if(f.length > maxlen)
+ maxlen = f.length;
+ }
+ var pad = 5;
+ if(mlist.length == 0) pad = 30;
+ var html = '<div style="padding-top:'+pad+'px"><t3> <b>'+name+':</b>';
+ if(info.length > 2)
+ html += " start=<b>"+info[1]+"</b>, end=<b>"+info[2]+"</b>";
+ if(info.length > 3)
+ html += ", length<i>(w/o overhead)</i>=<b>"+info[3]+" ms</b>";
+ if(info.length > 4)
+ html += ", return=<b>"+info[4]+"</b>";
+ html += "</t3></div>";
+ if(mlist.length > 0) {
+ html += '<table class=fstat style="padding-top:'+(maxlen*5)+'px;"><tr><th>Function</th>';
+ for(var i in mlist)
+ html += "<td class=vt>"+mlist[i][0]+"</td>";
+ html += "</tr><tr><th>Calls</th>";
+ for(var i in mlist)
+ html += "<td>"+mlist[i][1]+"</td>";
+ html += "</tr><tr><th>Time(ms)</th>";
+ for(var i in mlist)
+ html += "<td>"+mlist[i][2]+"</td>";
+ html += "</tr><tr><th>Percent</th>";
+ for(var i in mlist)
+ html += "<td>"+mlist[i][3]+"</td>";
+ html += "</tr></table>";
+ }
+ dd.innerHTML = html;
+ var height = (maxlen*5)+100;
+ dd.style.height = height+"px";
+ document.getElementById("devicedetail").style.height = height+"px";
+ }
+ function callSelect() {
+ var cglist = document.getElementById("callgraphs");
+ if(!cglist) return;
+ var cg = cglist.getElementsByClassName("atop");
+ for (var i = 0; i < cg.length; i++) {
+ if(this.id == cg[i].id) {
+ cg[i].style.display = "block";
+ } else {
+ cg[i].style.display = "none";
+ }
+ }
+ }
+ function devListWindow(e) {
+ var win = window.open();
+ var html = "<title>"+e.target.innerHTML+"</title>"+
+ "<style type=\"text/css\">"+
+ " ul {list-style-type:circle;padding-left:10px;margin-left:10px;}"+
+ "</style>"
+ var dt = devtable[0];
+ if(e.target.id != "devlist1")
+ dt = devtable[1];
+ win.document.write(html+dt);
+ }
+ function errWindow() {
+ var range = this.id.split("_");
+ var idx1 = parseInt(range[0]);
+ var idx2 = parseInt(range[1]);
+ var win = window.open();
+ var log = document.getElementById("dmesglog");
+ var title = "<title>dmesg log</title>";
+ var text = log.innerHTML.split("\n");
+ var html = "";
+ for(var i = 0; i < text.length; i++) {
+ if(i == idx1) {
+ html += "<e id=target>"+text[i]+"</e>\n";
+ } else if(i > idx1 && i <= idx2) {
+ html += "<e>"+text[i]+"</e>\n";
+ } else {
+ html += text[i]+"\n";
+ }
+ }
+ win.document.write("<style>e{color:red}</style>"+title+"<pre>"+html+"</pre>");
+ win.location.hash = "#target";
+ win.document.close();
+ }
+ function logWindow(e) {
+ var name = e.target.id.slice(4);
+ var win = window.open();
+ var log = document.getElementById(name+"log");
+ var title = "<title>"+document.title.split(" ")[0]+" "+name+" log</title>";
+ win.document.write(title+"<pre>"+log.innerHTML+"</pre>");
+ win.document.close();
+ }
+ function onMouseDown(e) {
+ dragval[0] = e.clientX;
+ dragval[1] = document.getElementById("dmesgzoombox").scrollLeft;
+ document.onmousemove = onMouseMove;
+ }
+ function onMouseMove(e) {
+ var zoombox = document.getElementById("dmesgzoombox");
+ zoombox.scrollLeft = dragval[1] + dragval[0] - e.clientX;
+ }
+ function onMouseUp(e) {
+ document.onmousemove = null;
+ }
+ function onKeyPress(e) {
+ var c = e.charCode;
+ if(c != 42 && c != 43 && c != 45) return;
+ var click = document.createEvent("Events");
+ click.initEvent("click", true, false);
+ if(c == 43)
+ document.getElementById("zoomin").dispatchEvent(click);
+ else if(c == 45)
+ document.getElementById("zoomout").dispatchEvent(click);
+ else if(c == 42)
+ document.getElementById("zoomdef").dispatchEvent(click);
+ }
+ window.addEventListener("resize", function () {zoomTimeline();});
+ window.addEventListener("load", function () {
+ var dmesg = document.getElementById("dmesg");
+ dmesg.style.width = "100%"
+ dmesg.onmousedown = onMouseDown;
+ document.onmouseup = onMouseUp;
+ document.onkeypress = onKeyPress;
+ document.getElementById("zoomin").onclick = zoomTimeline;
+ document.getElementById("zoomout").onclick = zoomTimeline;
+ document.getElementById("zoomdef").onclick = zoomTimeline;
+ var list = document.getElementsByClassName("err");
+ for (var i = 0; i < list.length; i++)
+ list[i].onclick = errWindow;
+ var list = document.getElementsByClassName("logbtn");
+ for (var i = 0; i < list.length; i++)
+ list[i].onclick = logWindow;
+ list = document.getElementsByClassName("devlist");
+ for (var i = 0; i < list.length; i++)
+ list[i].onclick = devListWindow;
+ var dev = dmesg.getElementsByClassName("thread");
+ for (var i = 0; i < dev.length; i++) {
+ dev[i].onclick = deviceDetail;
+ dev[i].onmouseover = deviceHover;
+ dev[i].onmouseout = deviceUnhover;
+ }
+ var dev = dmesg.getElementsByClassName("srccall");
+ for (var i = 0; i < dev.length; i++)
+ dev[i].onclick = callSelect;
+ zoomTimeline();
+ });
+</script> """
hf.write(script_code);
# Function: executeSuspend
@@ -5524,7 +5525,9 @@ def executeSuspend(quiet=False):
if ((mode == 'freeze') or (sv.memmode == 's2idle')) \
and sv.haveTurbostat():
# execution will pause here
- turbo = sv.turbostat(s0ixready)
+ retval, turbo = sv.turbostat(s0ixready)
+ if retval != 0:
+ tdata['error'] ='turbostat returned %d' % retval
if turbo:
tdata['turbo'] = turbo
else:
@@ -5532,6 +5535,7 @@ def executeSuspend(quiet=False):
pf.write(mode)
# execution will pause here
try:
+ pf.flush()
pf.close()
except Exception as e:
tdata['error'] = str(e)
@@ -5633,7 +5637,7 @@ def deviceInfo(output=''):
tgtval = 'runtime_status'
lines = dict()
for dirname, dirnames, filenames in os.walk('/sys/devices'):
- if(not re.match('.*/power', dirname) or
+ if(not re.match(r'.*/power', dirname) or
'control' not in filenames or
tgtval not in filenames):
continue
@@ -5702,6 +5706,40 @@ def getModes():
fp.close()
return modes
+def dmidecode_backup(out, fatal=False):
+ cpath, spath, info = '/proc/cpuinfo', '/sys/class/dmi/id', {
+ 'bios-vendor': 'bios_vendor',
+ 'bios-version': 'bios_version',
+ 'bios-release-date': 'bios_date',
+ 'system-manufacturer': 'sys_vendor',
+ 'system-product-name': 'product_name',
+ 'system-version': 'product_version',
+ 'system-serial-number': 'product_serial',
+ 'baseboard-manufacturer': 'board_vendor',
+ 'baseboard-product-name': 'board_name',
+ 'baseboard-version': 'board_version',
+ 'baseboard-serial-number': 'board_serial',
+ 'chassis-manufacturer': 'chassis_vendor',
+ 'chassis-version': 'chassis_version',
+ 'chassis-serial-number': 'chassis_serial',
+ }
+ for key in info:
+ if key not in out:
+ val = sysvals.getVal(os.path.join(spath, info[key])).strip()
+ if val and val.lower() != 'to be filled by o.e.m.':
+ out[key] = val
+ if 'processor-version' not in out and os.path.exists(cpath):
+ with open(cpath, 'r') as fp:
+ for line in fp:
+ m = re.match(r'^model\s*name\s*\:\s*(?P<c>.*)', line)
+ if m:
+ out['processor-version'] = m.group('c').strip()
+ break
+ if fatal and len(out) < 1:
+ doError('dmidecode failed to get info from %s or %s' % \
+ (sysvals.mempath, spath))
+ return out
+
# Function: dmidecode
# Description:
# Read the bios tables and pull out system info
@@ -5712,6 +5750,8 @@ def getModes():
# A dict object with all available key/values
def dmidecode(mempath, fatal=False):
out = dict()
+ if(not (os.path.exists(mempath) and os.access(mempath, os.R_OK))):
+ return dmidecode_backup(out, fatal)
# the list of values to retrieve, with hardcoded (type, idx)
info = {
@@ -5727,24 +5767,14 @@ def dmidecode(mempath, fatal=False):
'baseboard-version': (2, 6),
'baseboard-serial-number': (2, 7),
'chassis-manufacturer': (3, 4),
- 'chassis-type': (3, 5),
'chassis-version': (3, 6),
'chassis-serial-number': (3, 7),
'processor-manufacturer': (4, 7),
'processor-version': (4, 16),
}
- if(not os.path.exists(mempath)):
- if(fatal):
- doError('file does not exist: %s' % mempath)
- return out
- if(not os.access(mempath, os.R_OK)):
- if(fatal):
- doError('file is not readable: %s' % mempath)
- return out
# by default use legacy scan, but try to use EFI first
- memaddr = 0xf0000
- memsize = 0x10000
+ memaddr, memsize = 0xf0000, 0x10000
for ep in ['/sys/firmware/efi/systab', '/proc/efi/systab']:
if not os.path.exists(ep) or not os.access(ep, os.R_OK):
continue
@@ -5765,11 +5795,7 @@ def dmidecode(mempath, fatal=False):
fp.seek(memaddr)
buf = fp.read(memsize)
except:
- if(fatal):
- doError('DMI table is unreachable, sorry')
- else:
- pprint('WARNING: /dev/mem is not readable, ignoring DMI data')
- return out
+ return dmidecode_backup(out, fatal)
fp.close()
# search for either an SM table or DMI table
@@ -5785,10 +5811,7 @@ def dmidecode(mempath, fatal=False):
break
i += 16
if base == 0 and length == 0 and num == 0:
- if(fatal):
- doError('Neither SMBIOS nor DMI were found')
- else:
- return out
+ return dmidecode_backup(out, fatal)
# read in the SM or DMI table
try:
@@ -5796,11 +5819,7 @@ def dmidecode(mempath, fatal=False):
fp.seek(base)
buf = fp.read(length)
except:
- if(fatal):
- doError('DMI table is unreachable, sorry')
- else:
- pprint('WARNING: /dev/mem is not readable, ignoring DMI data')
- return out
+ return dmidecode_backup(out, fatal)
fp.close()
# scan the table for the values we want
@@ -6272,7 +6291,10 @@ def find_in_html(html, start, end, firstonly=True):
return out
def data_from_html(file, outpath, issues, fulldetail=False):
- html = open(file, 'r').read()
+ try:
+ html = open(file, 'r').read()
+ except:
+ html = ascii(open(file, 'rb').read())
sysvals.htmlfile = os.path.relpath(file, outpath)
# extract general info
suspend = find_in_html(html, 'Kernel Suspend', 'ms')
@@ -6290,7 +6312,7 @@ def data_from_html(file, outpath, issues, fulldetail=False):
tstr = dt.strftime('%Y/%m/%d %H:%M:%S')
error = find_in_html(html, '<table class="testfail"><tr><td>', '</td>')
if error:
- m = re.match('[a-z0-9]* failed in (?P<p>\S*).*', error)
+ m = re.match(r'[a-z0-9]* failed in (?P<p>\S*).*', error)
if m:
result = 'fail in %s' % m.group('p')
else:
@@ -6307,8 +6329,9 @@ def data_from_html(file, outpath, issues, fulldetail=False):
d.end = 999999999
d.dmesgtext = log.split('\n')
tp = d.extractErrorInfo()
- for msg in tp.msglist:
- sysvals.errorSummary(issues, msg)
+ if len(issues) < 100:
+ for msg in tp.msglist:
+ sysvals.errorSummary(issues, msg)
if stmp[2] == 'freeze':
extra = d.turbostatInfo()
elist = dict()
@@ -6325,6 +6348,11 @@ def data_from_html(file, outpath, issues, fulldetail=False):
line = find_in_html(log, '# netfix ', '\n')
if line:
extra['netfix'] = line
+ line = find_in_html(log, '# command ', '\n')
+ if line:
+ m = re.match(r'.* -m (?P<m>\S*).*', line)
+ if m:
+ extra['fullmode'] = m.group('m')
low = find_in_html(html, 'freeze time: <b>', ' ms</b>')
for lowstr in ['waking', '+']:
if not low:
@@ -6334,7 +6362,7 @@ def data_from_html(file, outpath, issues, fulldetail=False):
if lowstr == '+':
issue = 'S2LOOPx%d' % len(low.split('+'))
else:
- m = re.match('.*waking *(?P<n>[0-9]*) *times.*', low)
+ m = re.match(r'.*waking *(?P<n>[0-9]*) *times.*', low)
issue = 'S2WAKEx%s' % m.group('n') if m else 'S2WAKExNaN'
match = [i for i in issues if i['match'] == issue]
if len(match) > 0:
@@ -6352,10 +6380,10 @@ def data_from_html(file, outpath, issues, fulldetail=False):
# extract device info
devices = dict()
for line in html.split('\n'):
- m = re.match(' *<div id=\"[a,0-9]*\" *title=\"(?P<title>.*)\" class=\"thread.*', line)
+ m = re.match(r' *<div id=\"[a,0-9]*\" *title=\"(?P<title>.*)\" class=\"thread.*', line)
if not m or 'thread kth' in line or 'thread sec' in line:
continue
- m = re.match('(?P<n>.*) \((?P<t>[0-9,\.]*) ms\) (?P<p>.*)', m.group('title'))
+ m = re.match(r'(?P<n>.*) \((?P<t>[0-9,\.]*) ms\) (?P<p>.*)', m.group('title'))
if not m:
continue
name, time, phase = m.group('n'), m.group('t'), m.group('p')
@@ -6416,9 +6444,9 @@ def genHtml(subdir, force=False):
for filename in filenames:
file = os.path.join(dirname, filename)
if sysvals.usable(file):
- if(re.match('.*_dmesg.txt', filename)):
+ if(re.match(r'.*_dmesg.txt', filename)):
sysvals.dmesgfile = file
- elif(re.match('.*_ftrace.txt', filename)):
+ elif(re.match(r'.*_ftrace.txt', filename)):
sysvals.ftracefile = file
sysvals.setOutputFile()
if (sysvals.dmesgfile or sysvals.ftracefile) and sysvals.htmlfile and \
@@ -6441,7 +6469,7 @@ def runSummary(subdir, local=True, genhtml=False):
desc = {'host':[],'mode':[],'kernel':[]}
for dirname, dirnames, filenames in os.walk(subdir):
for filename in filenames:
- if(not re.match('.*.html', filename)):
+ if(not re.match(r'.*.html', filename)):
continue
data = data_from_html(os.path.join(dirname, filename), outpath, issues)
if(not data):
diff --git a/tools/power/x86/intel-speed-select/isst-config.c b/tools/power/x86/intel-speed-select/isst-config.c
index 5899c27c2e2e..5127be34869e 100644
--- a/tools/power/x86/intel-speed-select/isst-config.c
+++ b/tools/power/x86/intel-speed-select/isst-config.c
@@ -16,7 +16,7 @@ struct process_cmd_struct {
int arg;
};
-static const char *version_str = "v1.19";
+static const char *version_str = "v1.20";
static const int supported_api_ver = 3;
static struct isst_if_platform_info isst_platform_info;
diff --git a/tools/power/x86/intel-speed-select/isst-core.c b/tools/power/x86/intel-speed-select/isst-core.c
index 05efffbca3b7..e05561d00458 100644
--- a/tools/power/x86/intel-speed-select/isst-core.c
+++ b/tools/power/x86/intel-speed-select/isst-core.c
@@ -283,6 +283,8 @@ int isst_set_trl(struct isst_id *id, unsigned long long trl)
return 0;
}
+#define MSR_TRL_FREQ_MULTIPLIER 100
+
int isst_set_trl_from_current_tdp(struct isst_id *id, unsigned long long trl)
{
unsigned long long msr_trl;
@@ -310,6 +312,10 @@ int isst_set_trl_from_current_tdp(struct isst_id *id, unsigned long long trl)
for (i = 0; i < 8; ++i) {
unsigned long long _trl = trl[i];
+ /* MSR is always in 100 MHz unit */
+ if (isst_get_disp_freq_multiplier() == 1)
+ _trl /= MSR_TRL_FREQ_MULTIPLIER;
+
msr_trl |= (_trl << (i * 8));
}
}
diff --git a/tools/power/x86/turbostat/Makefile b/tools/power/x86/turbostat/Makefile
index 2d6dce2c8f77..b1e6817f1e54 100644
--- a/tools/power/x86/turbostat/Makefile
+++ b/tools/power/x86/turbostat/Makefile
@@ -14,6 +14,7 @@ turbostat : turbostat.c
override CFLAGS += -O2 -Wall -Wextra -I../../../include
override CFLAGS += -DMSRHEADER='"../../../../arch/x86/include/asm/msr-index.h"'
override CFLAGS += -DINTEL_FAMILY_HEADER='"../../../../arch/x86/include/asm/intel-family.h"'
+override CFLAGS += -DBUILD_BUG_HEADER='"../../../../include/linux/build_bug.h"'
override CFLAGS += -D_FILE_OFFSET_BITS=64
override CFLAGS += -D_FORTIFY_SOURCE=2
@@ -44,10 +45,13 @@ snapshot: turbostat
@echo "#define GENMASK(h, l) (((~0UL) << (l)) & (~0UL >> (sizeof(long) * 8 - 1 - (h))))" >> $(SNAPSHOT)/bits.h
@echo "#define GENMASK_ULL(h, l) (((~0ULL) << (l)) & (~0ULL >> (sizeof(long long) * 8 - 1 - (h))))" >> $(SNAPSHOT)/bits.h
+ @echo '#define BUILD_BUG_ON(cond) do { enum { compile_time_check ## __COUNTER__ = 1/(!(cond)) }; } while (0)' > $(SNAPSHOT)/build_bug.h
+
@echo PWD=. > $(SNAPSHOT)/Makefile
@echo "CFLAGS += -DMSRHEADER='\"msr-index.h\"'" >> $(SNAPSHOT)/Makefile
@echo "CFLAGS += -DINTEL_FAMILY_HEADER='\"intel-family.h\"'" >> $(SNAPSHOT)/Makefile
- @sed -e's/.*MSRHEADER.*//' -e's/.*INTEL_FAMILY_HEADER.*//' Makefile >> $(SNAPSHOT)/Makefile
+ @echo "CFLAGS += -DBUILD_BUG_HEADER='\"build_bug.h\"'" >> $(SNAPSHOT)/Makefile
+ @sed -e's/.*MSRHEADER.*//' -e's/.*INTEL_FAMILY_HEADER.*//' -e's/.*BUILD_BUG_HEADER.*//' Makefile >> $(SNAPSHOT)/Makefile
@rm -f $(SNAPSHOT).tar.gz
tar cvzf $(SNAPSHOT).tar.gz $(SNAPSHOT)
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
index 8cdf41906e98..9f5d053d4bc6 100644
--- a/tools/power/x86/turbostat/turbostat.c
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -10,6 +10,7 @@
#define _GNU_SOURCE
#include MSRHEADER
#include INTEL_FAMILY_HEADER
+#include BUILD_BUG_HEADER
#include <stdarg.h>
#include <stdio.h>
#include <err.h>
@@ -38,7 +39,6 @@
#include <stdbool.h>
#include <assert.h>
#include <linux/kernel.h>
-#include <linux/build_bug.h>
#define UNUSED(x) (void)(x)
@@ -5695,9 +5695,6 @@ static void probe_intel_uncore_frequency_cluster(void)
if (access("/sys/devices/system/cpu/intel_uncore_frequency/uncore00/current_freq_khz", R_OK))
return;
- if (quiet)
- return;
-
for (uncore_max_id = 0;; ++uncore_max_id) {
sprintf(path_base, "/sys/devices/system/cpu/intel_uncore_frequency/uncore%02d", uncore_max_id);
@@ -5727,6 +5724,14 @@ static void probe_intel_uncore_frequency_cluster(void)
sprintf(path, "%s/fabric_cluster_id", path_base);
cluster_id = read_sysfs_int(path);
+ sprintf(path, "%s/current_freq_khz", path_base);
+ sprintf(name_buf, "UMHz%d.%d", domain_id, cluster_id);
+
+ add_counter(0, path, name_buf, 0, SCOPE_PACKAGE, COUNTER_K2M, FORMAT_AVERAGE, 0, package_id);
+
+ if (quiet)
+ continue;
+
sprintf(path, "%s/min_freq_khz", path_base);
k = read_sysfs_int(path);
sprintf(path, "%s/max_freq_khz", path_base);
@@ -5743,11 +5748,6 @@ static void probe_intel_uncore_frequency_cluster(void)
sprintf(path, "%s/current_freq_khz", path_base);
k = read_sysfs_int(path);
fprintf(outf, " %d MHz\n", k / 1000);
-
- sprintf(path, "%s/current_freq_khz", path_base);
- sprintf(name_buf, "UMHz%d.%d", domain_id, cluster_id);
-
- add_counter(0, path, name_buf, 0, SCOPE_PACKAGE, COUNTER_K2M, FORMAT_AVERAGE, 0, package_id);
}
}
@@ -8424,7 +8424,7 @@ void cmdline(int argc, char **argv)
* Parse some options early, because they may make other options invalid,
* like adding the MSR counter with --add and at the same time using --no-msr.
*/
- while ((opt = getopt_long_only(argc, argv, "MP", long_options, &option_index)) != -1) {
+ while ((opt = getopt_long_only(argc, argv, "MPn:", long_options, &option_index)) != -1) {
switch (opt) {
case 'M':
no_msr = 1;
diff --git a/tools/rcu/rcu-updaters.sh b/tools/rcu/rcu-updaters.sh
new file mode 100755
index 000000000000..4ef1397927bb
--- /dev/null
+++ b/tools/rcu/rcu-updaters.sh
@@ -0,0 +1,52 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Run bpftrace to obtain a histogram of the types of primitives used to
+# initiate RCU grace periods. The count associated with rcu_gp_init()
+# is the number of normal (non-expedited) grace periods.
+#
+# Usage: rcu-updaters.sh [ duration-in-seconds ]
+#
+# Note that not all kernel builds have all of these functions. In those
+# that do not, this script will issue a diagnostic for each that is not
+# found, but continue normally for the rest of the functions.
+
+duration=${1}
+if test -n "${duration}"
+then
+ exitclause='interval:s:'"${duration}"' { exit(); }'
+else
+ echo 'Hit control-C to end sample and print results.'
+fi
+bpftrace -e 'kprobe:kvfree_call_rcu,
+ kprobe:call_rcu,
+ kprobe:call_rcu_tasks,
+ kprobe:call_rcu_tasks_rude,
+ kprobe:call_rcu_tasks_trace,
+ kprobe:call_srcu,
+ kprobe:rcu_barrier,
+ kprobe:rcu_barrier_tasks,
+ kprobe:rcu_barrier_tasks_rude,
+ kprobe:rcu_barrier_tasks_trace,
+ kprobe:srcu_barrier,
+ kprobe:synchronize_rcu,
+ kprobe:synchronize_rcu_expedited,
+ kprobe:synchronize_rcu_tasks,
+ kprobe:synchronize_rcu_tasks_rude,
+ kprobe:synchronize_rcu_tasks_trace,
+ kprobe:synchronize_srcu,
+ kprobe:synchronize_srcu_expedited,
+ kprobe:get_state_synchronize_rcu,
+ kprobe:get_state_synchronize_rcu_full,
+ kprobe:start_poll_synchronize_rcu,
+ kprobe:start_poll_synchronize_rcu_expedited,
+ kprobe:start_poll_synchronize_rcu_full,
+ kprobe:start_poll_synchronize_rcu_expedited_full,
+ kprobe:poll_state_synchronize_rcu,
+ kprobe:poll_state_synchronize_rcu_full,
+ kprobe:cond_synchronize_rcu,
+ kprobe:cond_synchronize_rcu_full,
+ kprobe:start_poll_synchronize_srcu,
+ kprobe:poll_state_synchronize_srcu,
+ kprobe:rcu_gp_init
+ { @counts[func] = count(); } '"${exitclause}"
diff --git a/tools/testing/cxl/test/cxl.c b/tools/testing/cxl/test/cxl.c
index 3482248aa344..90d5afd52dd0 100644
--- a/tools/testing/cxl/test/cxl.c
+++ b/tools/testing/cxl/test/cxl.c
@@ -630,11 +630,15 @@ static struct cxl_hdm *mock_cxl_setup_hdm(struct cxl_port *port,
struct cxl_endpoint_dvsec_info *info)
{
struct cxl_hdm *cxlhdm = devm_kzalloc(&port->dev, sizeof(*cxlhdm), GFP_KERNEL);
+ struct device *dev = &port->dev;
if (!cxlhdm)
return ERR_PTR(-ENOMEM);
cxlhdm->port = port;
+ cxlhdm->interleave_mask = ~0U;
+ cxlhdm->iw_cap_mask = ~0UL;
+ dev_set_drvdata(dev, cxlhdm);
return cxlhdm;
}
diff --git a/tools/testing/cxl/test/mem.c b/tools/testing/cxl/test/mem.c
index 6584443144de..eaf091a3d331 100644
--- a/tools/testing/cxl/test/mem.c
+++ b/tools/testing/cxl/test/mem.c
@@ -3,6 +3,7 @@
#include <linux/platform_device.h>
#include <linux/mod_devicetable.h>
+#include <linux/vmalloc.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/sizes.h>
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
index 9039f3709aff..06eed383fdc0 100644
--- a/tools/testing/selftests/Makefile
+++ b/tools/testing/selftests/Makefile
@@ -21,6 +21,7 @@ TARGETS += drivers/net
TARGETS += drivers/net/bonding
TARGETS += drivers/net/team
TARGETS += drivers/net/virtio_net
+TARGETS += drivers/platform/x86/intel/ifs
TARGETS += dt
TARGETS += efivarfs
TARGETS += exec
diff --git a/tools/testing/selftests/alsa/Makefile b/tools/testing/selftests/alsa/Makefile
index 5af9ba8a4645..c1ce39874e2b 100644
--- a/tools/testing/selftests/alsa/Makefile
+++ b/tools/testing/selftests/alsa/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
#
-CFLAGS += $(shell pkg-config --cflags alsa)
+CFLAGS += $(shell pkg-config --cflags alsa) $(KHDR_INCLUDES)
LDLIBS += $(shell pkg-config --libs alsa)
ifeq ($(LDLIBS),)
LDLIBS += -lasound
diff --git a/tools/testing/selftests/arm64/abi/ptrace.c b/tools/testing/selftests/arm64/abi/ptrace.c
index abe4d58d731d..4c941270d8de 100644
--- a/tools/testing/selftests/arm64/abi/ptrace.c
+++ b/tools/testing/selftests/arm64/abi/ptrace.c
@@ -47,7 +47,7 @@ static void test_tpidr(pid_t child)
/* ...write a new value.. */
write_iov.iov_len = sizeof(uint64_t);
- write_val[0] = read_val[0]++;
+ write_val[0] = read_val[0] + 1;
ret = ptrace(PTRACE_SETREGSET, child, NT_ARM_TLS, &write_iov);
ksft_test_result(ret == 0, "write_tpidr_one\n");
diff --git a/tools/testing/selftests/arm64/fp/.gitignore b/tools/testing/selftests/arm64/fp/.gitignore
index 00e52c966281..8362e7ec35ad 100644
--- a/tools/testing/selftests/arm64/fp/.gitignore
+++ b/tools/testing/selftests/arm64/fp/.gitignore
@@ -2,6 +2,7 @@ fp-pidbench
fp-ptrace
fp-stress
fpsimd-test
+kernel-test
rdvl-sme
rdvl-sve
sve-probe-vls
diff --git a/tools/testing/selftests/arm64/fp/Makefile b/tools/testing/selftests/arm64/fp/Makefile
index 55d4f00d9e8e..d171021e4cdd 100644
--- a/tools/testing/selftests/arm64/fp/Makefile
+++ b/tools/testing/selftests/arm64/fp/Makefile
@@ -12,6 +12,7 @@ TEST_GEN_PROGS := \
vec-syscfg \
za-fork za-ptrace
TEST_GEN_PROGS_EXTENDED := fp-pidbench fpsimd-test \
+ kernel-test \
rdvl-sme rdvl-sve \
sve-test \
ssve-test \
diff --git a/tools/testing/selftests/arm64/fp/fp-stress.c b/tools/testing/selftests/arm64/fp/fp-stress.c
index dd31647b00a2..faac24bdefeb 100644
--- a/tools/testing/selftests/arm64/fp/fp-stress.c
+++ b/tools/testing/selftests/arm64/fp/fp-stress.c
@@ -319,6 +319,19 @@ static void start_fpsimd(struct child_data *child, int cpu, int copy)
ksft_print_msg("Started %s\n", child->name);
}
+static void start_kernel(struct child_data *child, int cpu, int copy)
+{
+ int ret;
+
+ ret = asprintf(&child->name, "KERNEL-%d-%d", cpu, copy);
+ if (ret == -1)
+ ksft_exit_fail_msg("asprintf() failed\n");
+
+ child_start(child, "./kernel-test");
+
+ ksft_print_msg("Started %s\n", child->name);
+}
+
static void start_sve(struct child_data *child, int vl, int cpu)
{
int ret;
@@ -438,7 +451,7 @@ int main(int argc, char **argv)
int ret;
int timeout = 10;
int cpus, i, j, c;
- int sve_vl_count, sme_vl_count, fpsimd_per_cpu;
+ int sve_vl_count, sme_vl_count;
bool all_children_started = false;
int seen_children;
int sve_vls[MAX_VLS], sme_vls[MAX_VLS];
@@ -482,12 +495,7 @@ int main(int argc, char **argv)
have_sme2 = false;
}
- /* Force context switching if we only have FPSIMD */
- if (!sve_vl_count && !sme_vl_count)
- fpsimd_per_cpu = 2;
- else
- fpsimd_per_cpu = 1;
- tests += cpus * fpsimd_per_cpu;
+ tests += cpus * 2;
ksft_print_header();
ksft_set_plan(tests);
@@ -542,8 +550,8 @@ int main(int argc, char **argv)
tests);
for (i = 0; i < cpus; i++) {
- for (j = 0; j < fpsimd_per_cpu; j++)
- start_fpsimd(&children[num_children++], i, j);
+ start_fpsimd(&children[num_children++], i, 0);
+ start_kernel(&children[num_children++], i, 0);
for (j = 0; j < sve_vl_count; j++)
start_sve(&children[num_children++], sve_vls[j], i);
diff --git a/tools/testing/selftests/arm64/fp/kernel-test.c b/tools/testing/selftests/arm64/fp/kernel-test.c
new file mode 100644
index 000000000000..e8da3b4cbd23
--- /dev/null
+++ b/tools/testing/selftests/arm64/fp/kernel-test.c
@@ -0,0 +1,324 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2024 ARM Limited.
+ */
+
+#define _GNU_SOURCE
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdbool.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <signal.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <sys/socket.h>
+
+#include <linux/kernel.h>
+#include <linux/if_alg.h>
+
+#define DATA_SIZE (16 * 4096)
+
+static int base, sock;
+
+static int digest_len;
+static char *ref;
+static char *digest;
+static char *alg_name;
+
+static struct iovec data_iov;
+static int zerocopy[2];
+static int sigs;
+static int iter;
+
+static void handle_exit_signal(int sig, siginfo_t *info, void *context)
+{
+ printf("Terminated by signal %d, iterations=%d, signals=%d\n",
+ sig, iter, sigs);
+ exit(0);
+}
+
+static void handle_kick_signal(int sig, siginfo_t *info, void *context)
+{
+ sigs++;
+}
+
+static char *drivers[] = {
+ "crct10dif-arm64-ce",
+ /* "crct10dif-arm64-neon", - Same priority as generic */
+ "sha1-ce",
+ "sha224-arm64",
+ "sha224-arm64-neon",
+ "sha224-ce",
+ "sha256-arm64",
+ "sha256-arm64-neon",
+ "sha256-ce",
+ "sha384-ce",
+ "sha512-ce",
+ "sha3-224-ce",
+ "sha3-256-ce",
+ "sha3-384-ce",
+ "sha3-512-ce",
+ "sm3-ce",
+ "sm3-neon",
+};
+
+static bool create_socket(void)
+{
+ FILE *proc;
+ struct sockaddr_alg addr;
+ char buf[1024];
+ char *c, *driver_name;
+ bool is_shash, match;
+ int ret, i;
+
+ ret = socket(AF_ALG, SOCK_SEQPACKET, 0);
+ if (ret < 0) {
+ if (errno == EAFNOSUPPORT) {
+ printf("AF_ALG not supported\n");
+ return false;
+ }
+
+ printf("Failed to create AF_ALG socket: %s (%d)\n",
+ strerror(errno), errno);
+ return false;
+ }
+ base = ret;
+
+ memset(&addr, 0, sizeof(addr));
+ addr.salg_family = AF_ALG;
+ strncpy((char *)addr.salg_type, "hash", sizeof(addr.salg_type));
+
+ proc = fopen("/proc/crypto", "r");
+ if (!proc) {
+ printf("Unable to open /proc/crypto\n");
+ return false;
+ }
+
+ driver_name = NULL;
+ is_shash = false;
+ match = false;
+
+ /* Look through /proc/crypto for a driver with kernel mode FP usage */
+ while (!match) {
+ c = fgets(buf, sizeof(buf), proc);
+ if (!c) {
+ if (feof(proc)) {
+ printf("Nothing found in /proc/crypto\n");
+ return false;
+ }
+ continue;
+ }
+
+ /* Algorithm descriptions are separated by a blank line */
+ if (*c == '\n') {
+ if (is_shash && driver_name) {
+ for (i = 0; i < ARRAY_SIZE(drivers); i++) {
+ if (strcmp(drivers[i],
+ driver_name) == 0) {
+ match = true;
+ }
+ }
+ }
+
+ if (!match) {
+ digest_len = 0;
+
+ free(driver_name);
+ driver_name = NULL;
+
+ free(alg_name);
+ alg_name = NULL;
+
+ is_shash = false;
+ }
+ continue;
+ }
+
+ /* Remove trailing newline */
+ c = strchr(buf, '\n');
+ if (c)
+ *c = '\0';
+
+ /* Find the field/value separator and start of the value */
+ c = strchr(buf, ':');
+ if (!c)
+ continue;
+ c += 2;
+
+ if (strncmp(buf, "digestsize", strlen("digestsize")) == 0)
+ sscanf(c, "%d", &digest_len);
+
+ if (strncmp(buf, "name", strlen("name")) == 0)
+ alg_name = strdup(c);
+
+ if (strncmp(buf, "driver", strlen("driver")) == 0)
+ driver_name = strdup(c);
+
+ if (strncmp(buf, "type", strlen("type")) == 0)
+ if (strncmp(c, "shash", strlen("shash")) == 0)
+ is_shash = true;
+ }
+
+ strncpy((char *)addr.salg_name, alg_name,
+ sizeof(addr.salg_name) - 1);
+
+ ret = bind(base, (struct sockaddr *)&addr, sizeof(addr));
+ if (ret < 0) {
+ printf("Failed to bind %s: %s (%d)\n",
+ addr.salg_name, strerror(errno), errno);
+ return false;
+ }
+
+ ret = accept(base, NULL, 0);
+ if (ret < 0) {
+ printf("Failed to accept %s: %s (%d)\n",
+ addr.salg_name, strerror(errno), errno);
+ return false;
+ }
+
+ sock = ret;
+
+ ret = pipe(zerocopy);
+ if (ret != 0) {
+ printf("Failed to create zerocopy pipe: %s (%d)\n",
+ strerror(errno), errno);
+ return false;
+ }
+
+ ref = malloc(digest_len);
+ if (!ref) {
+ printf("Failed to allocated %d byte reference\n", digest_len);
+ return false;
+ }
+
+ digest = malloc(digest_len);
+ if (!digest) {
+ printf("Failed to allocated %d byte digest\n", digest_len);
+ return false;
+ }
+
+ return true;
+}
+
+static bool compute_digest(void *buf)
+{
+ struct iovec iov;
+ int ret, wrote;
+
+ iov = data_iov;
+ while (iov.iov_len) {
+ ret = vmsplice(zerocopy[1], &iov, 1, SPLICE_F_GIFT);
+ if (ret < 0) {
+ printf("Failed to send buffer: %s (%d)\n",
+ strerror(errno), errno);
+ return false;
+ }
+
+ wrote = ret;
+ ret = splice(zerocopy[0], NULL, sock, NULL, wrote, 0);
+ if (ret < 0) {
+ printf("Failed to splice buffer: %s (%d)\n",
+ strerror(errno), errno);
+ } else if (ret != wrote) {
+ printf("Short splice: %d < %d\n", ret, wrote);
+ }
+
+ iov.iov_len -= wrote;
+ iov.iov_base += wrote;
+ }
+
+reread:
+ ret = recv(sock, buf, digest_len, 0);
+ if (ret == 0) {
+ printf("No digest returned\n");
+ return false;
+ }
+ if (ret != digest_len) {
+ if (errno == -EAGAIN)
+ goto reread;
+ printf("Failed to get digest: %s (%d)\n",
+ strerror(errno), errno);
+ return false;
+ }
+
+ return true;
+}
+
+int main(void)
+{
+ char *data;
+ struct sigaction sa;
+ int ret;
+
+ /* Ensure we have unbuffered output */
+ setvbuf(stdout, NULL, _IOLBF, 0);
+
+ /* The parent will communicate with us via signals */
+ memset(&sa, 0, sizeof(sa));
+ sa.sa_sigaction = handle_exit_signal;
+ sa.sa_flags = SA_RESTART | SA_SIGINFO;
+ sigemptyset(&sa.sa_mask);
+ ret = sigaction(SIGTERM, &sa, NULL);
+ if (ret < 0)
+ printf("Failed to install SIGTERM handler: %s (%d)\n",
+ strerror(errno), errno);
+
+ sa.sa_sigaction = handle_kick_signal;
+ ret = sigaction(SIGUSR2, &sa, NULL);
+ if (ret < 0)
+ printf("Failed to install SIGUSR2 handler: %s (%d)\n",
+ strerror(errno), errno);
+
+ data = malloc(DATA_SIZE);
+ if (!data) {
+ printf("Failed to allocate data buffer\n");
+ return EXIT_FAILURE;
+ }
+ memset(data, 0, DATA_SIZE);
+
+ data_iov.iov_base = data;
+ data_iov.iov_len = DATA_SIZE;
+
+ /*
+ * If we can't create a socket assume it's a lack of system
+ * support and fall back to a basic FPSIMD test for the
+ * benefit of fp-stress.
+ */
+ if (!create_socket()) {
+ execl("./fpsimd-test", "./fpsimd-test", NULL);
+ printf("Failed to fall back to fspimd-test: %d (%s)\n",
+ errno, strerror(errno));
+ return EXIT_FAILURE;
+ }
+
+ /*
+ * Compute a reference digest we hope is repeatable, we do
+ * this at runtime partly to make it easier to play with
+ * parameters.
+ */
+ if (!compute_digest(ref)) {
+ printf("Failed to compute reference digest\n");
+ return EXIT_FAILURE;
+ }
+
+ printf("AF_ALG using %s\n", alg_name);
+
+ while (true) {
+ if (!compute_digest(digest)) {
+ printf("Failed to compute digest, iter=%d\n", iter);
+ return EXIT_FAILURE;
+ }
+
+ if (memcmp(ref, digest, digest_len) != 0) {
+ printf("Digest mismatch, iter=%d\n", iter);
+ return EXIT_FAILURE;
+ }
+
+ iter++;
+ }
+
+ return EXIT_FAILURE;
+}
diff --git a/tools/testing/selftests/arm64/tags/Makefile b/tools/testing/selftests/arm64/tags/Makefile
index 6d29cfde43a2..0a77f35295fb 100644
--- a/tools/testing/selftests/arm64/tags/Makefile
+++ b/tools/testing/selftests/arm64/tags/Makefile
@@ -2,6 +2,5 @@
CFLAGS += $(KHDR_INCLUDES)
TEST_GEN_PROGS := tags_test
-TEST_PROGS := run_tags_test.sh
include ../../lib.mk
diff --git a/tools/testing/selftests/arm64/tags/run_tags_test.sh b/tools/testing/selftests/arm64/tags/run_tags_test.sh
deleted file mode 100755
index 745f11379930..000000000000
--- a/tools/testing/selftests/arm64/tags/run_tags_test.sh
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-2.0
-
-echo "--------------------"
-echo "running tags test"
-echo "--------------------"
-./tags_test
-if [ $? -ne 0 ]; then
- echo "[FAIL]"
-else
- echo "[PASS]"
-fi
diff --git a/tools/testing/selftests/arm64/tags/tags_test.c b/tools/testing/selftests/arm64/tags/tags_test.c
index 955f87c1170d..8ae26e496c89 100644
--- a/tools/testing/selftests/arm64/tags/tags_test.c
+++ b/tools/testing/selftests/arm64/tags/tags_test.c
@@ -17,19 +17,21 @@ int main(void)
static int tbi_enabled = 0;
unsigned long tag = 0;
struct utsname *ptr;
- int err;
+
+ ksft_print_header();
+ ksft_set_plan(1);
if (prctl(PR_SET_TAGGED_ADDR_CTRL, PR_TAGGED_ADDR_ENABLE, 0, 0, 0) == 0)
tbi_enabled = 1;
ptr = (struct utsname *)malloc(sizeof(*ptr));
if (!ptr)
- ksft_exit_fail_msg("Failed to allocate utsname buffer\n");
+ ksft_exit_fail_perror("Failed to allocate utsname buffer");
if (tbi_enabled)
tag = 0x42;
ptr = (struct utsname *)SET_TAG(ptr, tag);
- err = uname(ptr);
+ ksft_test_result(!uname(ptr), "Syscall successful with tagged address\n");
free(ptr);
- return err;
+ ksft_finished();
}
diff --git a/tools/testing/selftests/bpf/DENYLIST.aarch64 b/tools/testing/selftests/bpf/DENYLIST.aarch64
index 0445ac38bc07..3c7c3e79aa93 100644
--- a/tools/testing/selftests/bpf/DENYLIST.aarch64
+++ b/tools/testing/selftests/bpf/DENYLIST.aarch64
@@ -6,6 +6,7 @@ kprobe_multi_test # needs CONFIG_FPROBE
module_attach # prog 'kprobe_multi': failed to auto-attach: -95
fentry_test/fentry_many_args # fentry_many_args:FAIL:fentry_many_args_attach unexpected error: -524
fexit_test/fexit_many_args # fexit_many_args:FAIL:fexit_many_args_attach unexpected error: -524
+tracing_struct/struct_many_args # struct_many_args:FAIL:tracing_struct_many_args__attach unexpected error: -524
fill_link_info/kprobe_multi_link_info # bpf_program__attach_kprobe_multi_opts unexpected error: -95
fill_link_info/kretprobe_multi_link_info # bpf_program__attach_kprobe_multi_opts unexpected error: -95
fill_link_info/kprobe_multi_invalid_ubuff # bpf_program__attach_kprobe_multi_opts unexpected error: -95
diff --git a/tools/testing/selftests/bpf/DENYLIST.s390x b/tools/testing/selftests/bpf/DENYLIST.s390x
index c34adf39eeb2..3ebd77206f98 100644
--- a/tools/testing/selftests/bpf/DENYLIST.s390x
+++ b/tools/testing/selftests/bpf/DENYLIST.s390x
@@ -1,9 +1,5 @@
# TEMPORARY
# Alphabetical order
-exceptions # JIT does not support calling kfunc bpf_throw (exceptions)
get_stack_raw_tp # user_stack corrupted user stack (no backchain userspace)
stacktrace_build_id # compare_map_keys stackid_hmap vs. stackmap err -2 errno 2 (?)
verifier_iterating_callbacks
-verifier_arena # JIT does not support arena
-arena_htab # JIT does not support arena
-arena_atomics
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index e0b3887b3d2d..dd49c1d23a60 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -457,7 +457,7 @@ LINKED_SKELS := test_static_linked.skel.h linked_funcs.skel.h \
LSKELS := fentry_test.c fexit_test.c fexit_sleep.c atomics.c \
trace_printk.c trace_vprintk.c map_ptr_kern.c \
core_kern.c core_kern_overflow.c test_ringbuf.c \
- test_ringbuf_n.c test_ringbuf_map_key.c
+ test_ringbuf_n.c test_ringbuf_map_key.c test_ringbuf_write.c
# Generate both light skeleton and libbpf skeleton for these
LSKELS_EXTRA := test_ksyms_module.c test_ksyms_weak.c kfunc_call_test.c \
diff --git a/tools/testing/selftests/bpf/bpf_arena_common.h b/tools/testing/selftests/bpf/bpf_arena_common.h
index 567491f3e1b5..68a51dcc0669 100644
--- a/tools/testing/selftests/bpf/bpf_arena_common.h
+++ b/tools/testing/selftests/bpf/bpf_arena_common.h
@@ -34,10 +34,12 @@
#if defined(__BPF_FEATURE_ADDR_SPACE_CAST) && !defined(BPF_ARENA_FORCE_ASM)
#define __arena __attribute__((address_space(1)))
+#define __arena_global __attribute__((address_space(1)))
#define cast_kern(ptr) /* nop for bpf prog. emitted by LLVM */
#define cast_user(ptr) /* nop for bpf prog. emitted by LLVM */
#else
#define __arena
+#define __arena_global SEC(".addr_space.1")
#define cast_kern(ptr) bpf_addr_space_cast(ptr, 0, 1)
#define cast_user(ptr) bpf_addr_space_cast(ptr, 1, 0)
#endif
diff --git a/tools/testing/selftests/bpf/bpf_experimental.h b/tools/testing/selftests/bpf/bpf_experimental.h
index 3d9e4b8c6b81..828556cdc2f0 100644
--- a/tools/testing/selftests/bpf/bpf_experimental.h
+++ b/tools/testing/selftests/bpf/bpf_experimental.h
@@ -163,7 +163,7 @@ struct bpf_iter_task_vma;
extern int bpf_iter_task_vma_new(struct bpf_iter_task_vma *it,
struct task_struct *task,
- unsigned long addr) __ksym;
+ __u64 addr) __ksym;
extern struct vm_area_struct *bpf_iter_task_vma_next(struct bpf_iter_task_vma *it) __ksym;
extern void bpf_iter_task_vma_destroy(struct bpf_iter_task_vma *it) __ksym;
@@ -351,6 +351,7 @@ l_true: \
l_continue:; \
})
#else
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
#define can_loop \
({ __label__ l_break, l_continue; \
bool ret = true; \
@@ -376,6 +377,33 @@ l_true: \
l_break: break; \
l_continue:; \
})
+#else
+#define can_loop \
+ ({ __label__ l_break, l_continue; \
+ bool ret = true; \
+ asm volatile goto("1:.byte 0xe5; \
+ .byte 0; \
+ .long (((%l[l_break] - 1b - 8) / 8) & 0xffff) << 16; \
+ .short 0" \
+ :::: l_break); \
+ goto l_continue; \
+ l_break: ret = false; \
+ l_continue:; \
+ ret; \
+ })
+
+#define cond_break \
+ ({ __label__ l_break, l_continue; \
+ asm volatile goto("1:.byte 0xe5; \
+ .byte 0; \
+ .long (((%l[l_break] - 1b - 8) / 8) & 0xffff) << 16; \
+ .short 0" \
+ :::: l_break); \
+ goto l_continue; \
+ l_break: break; \
+ l_continue:; \
+ })
+#endif
#endif
#ifndef bpf_nop_mov
@@ -524,7 +552,7 @@ extern void bpf_iter_css_destroy(struct bpf_iter_css *it) __weak __ksym;
extern int bpf_wq_init(struct bpf_wq *wq, void *p__map, unsigned int flags) __weak __ksym;
extern int bpf_wq_start(struct bpf_wq *wq, unsigned int flags) __weak __ksym;
extern int bpf_wq_set_callback_impl(struct bpf_wq *wq,
- int (callback_fn)(void *map, int *key, struct bpf_wq *wq),
+ int (callback_fn)(void *map, int *key, void *value),
unsigned int flags__k, void *aux__ign) __ksym;
#define bpf_wq_set_callback(timer, cb, flags) \
bpf_wq_set_callback_impl(timer, cb, flags, NULL)
diff --git a/tools/testing/selftests/bpf/bpf_kfuncs.h b/tools/testing/selftests/bpf/bpf_kfuncs.h
index be91a6919315..3b6675ab4086 100644
--- a/tools/testing/selftests/bpf/bpf_kfuncs.h
+++ b/tools/testing/selftests/bpf/bpf_kfuncs.h
@@ -77,5 +77,5 @@ extern int bpf_verify_pkcs7_signature(struct bpf_dynptr *data_ptr,
struct bpf_key *trusted_keyring) __ksym;
extern bool bpf_session_is_return(void) __ksym __weak;
-extern long *bpf_session_cookie(void) __ksym __weak;
+extern __u64 *bpf_session_cookie(void) __ksym __weak;
#endif
diff --git a/tools/testing/selftests/bpf/bpf_test_no_cfi/bpf_test_no_cfi.c b/tools/testing/selftests/bpf/bpf_test_no_cfi/bpf_test_no_cfi.c
index b1dd889d5d7d..948eb3962732 100644
--- a/tools/testing/selftests/bpf/bpf_test_no_cfi/bpf_test_no_cfi.c
+++ b/tools/testing/selftests/bpf/bpf_test_no_cfi/bpf_test_no_cfi.c
@@ -22,12 +22,12 @@ static int dummy_init_member(const struct btf_type *t,
return 0;
}
-static int dummy_reg(void *kdata)
+static int dummy_reg(void *kdata, struct bpf_link *link)
{
return 0;
}
-static void dummy_unreg(void *kdata)
+static void dummy_unreg(void *kdata, struct bpf_link *link)
{
}
diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
index 2a18bd320e92..f8962a1dd397 100644
--- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
+++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
@@ -53,6 +53,13 @@ struct bpf_testmod_struct_arg_4 {
int b;
};
+struct bpf_testmod_struct_arg_5 {
+ char a;
+ short b;
+ int c;
+ long d;
+};
+
__bpf_hook_start();
noinline int
@@ -111,6 +118,15 @@ bpf_testmod_test_struct_arg_8(u64 a, void *b, short c, int d, void *e,
}
noinline int
+bpf_testmod_test_struct_arg_9(u64 a, void *b, short c, int d, void *e, char f,
+ short g, struct bpf_testmod_struct_arg_5 h, long i)
+{
+ bpf_testmod_test_struct_arg_result = a + (long)b + c + d + (long)e +
+ f + g + h.a + h.b + h.c + h.d + i;
+ return bpf_testmod_test_struct_arg_result;
+}
+
+noinline int
bpf_testmod_test_arg_ptr_to_struct(struct bpf_testmod_struct_arg_1 *a) {
bpf_testmod_test_struct_arg_result = a->a;
return bpf_testmod_test_struct_arg_result;
@@ -154,6 +170,42 @@ __bpf_kfunc void bpf_kfunc_common_test(void)
{
}
+__bpf_kfunc void bpf_kfunc_dynptr_test(struct bpf_dynptr *ptr,
+ struct bpf_dynptr *ptr__nullable)
+{
+}
+
+__bpf_kfunc struct bpf_testmod_ctx *
+bpf_testmod_ctx_create(int *err)
+{
+ struct bpf_testmod_ctx *ctx;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC);
+ if (!ctx) {
+ *err = -ENOMEM;
+ return NULL;
+ }
+ refcount_set(&ctx->usage, 1);
+
+ return ctx;
+}
+
+static void testmod_free_cb(struct rcu_head *head)
+{
+ struct bpf_testmod_ctx *ctx;
+
+ ctx = container_of(head, struct bpf_testmod_ctx, rcu);
+ kfree(ctx);
+}
+
+__bpf_kfunc void bpf_testmod_ctx_release(struct bpf_testmod_ctx *ctx)
+{
+ if (!ctx)
+ return;
+ if (refcount_dec_and_test(&ctx->usage))
+ call_rcu(&ctx->rcu, testmod_free_cb);
+}
+
struct bpf_testmod_btf_type_tag_1 {
int a;
};
@@ -269,6 +321,7 @@ bpf_testmod_test_read(struct file *file, struct kobject *kobj,
struct bpf_testmod_struct_arg_2 struct_arg2 = {2, 3};
struct bpf_testmod_struct_arg_3 *struct_arg3;
struct bpf_testmod_struct_arg_4 struct_arg4 = {21, 22};
+ struct bpf_testmod_struct_arg_5 struct_arg5 = {23, 24, 25, 26};
int i = 1;
while (bpf_testmod_return_ptr(i))
@@ -283,6 +336,8 @@ bpf_testmod_test_read(struct file *file, struct kobject *kobj,
(void *)20, struct_arg4);
(void)bpf_testmod_test_struct_arg_8(16, (void *)17, 18, 19,
(void *)20, struct_arg4, 23);
+ (void)bpf_testmod_test_struct_arg_9(16, (void *)17, 18, 19, (void *)20,
+ 21, 22, struct_arg5, 27);
(void)bpf_testmod_test_arg_ptr_to_struct(&struct_arg1_2);
@@ -363,8 +418,15 @@ BTF_ID_FLAGS(func, bpf_iter_testmod_seq_new, KF_ITER_NEW)
BTF_ID_FLAGS(func, bpf_iter_testmod_seq_next, KF_ITER_NEXT | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_iter_testmod_seq_destroy, KF_ITER_DESTROY)
BTF_ID_FLAGS(func, bpf_kfunc_common_test)
+BTF_ID_FLAGS(func, bpf_kfunc_dynptr_test)
+BTF_ID_FLAGS(func, bpf_testmod_ctx_create, KF_ACQUIRE | KF_RET_NULL)
+BTF_ID_FLAGS(func, bpf_testmod_ctx_release, KF_RELEASE)
BTF_KFUNCS_END(bpf_testmod_common_kfunc_ids)
+BTF_ID_LIST(bpf_testmod_dtor_ids)
+BTF_ID(struct, bpf_testmod_ctx)
+BTF_ID(func, bpf_testmod_ctx_release)
+
static const struct btf_kfunc_id_set bpf_testmod_common_kfunc_set = {
.owner = THIS_MODULE,
.set = &bpf_testmod_common_kfunc_ids,
@@ -820,7 +882,7 @@ static const struct bpf_verifier_ops bpf_testmod_verifier_ops = {
.is_valid_access = bpf_testmod_ops_is_valid_access,
};
-static int bpf_dummy_reg(void *kdata)
+static int bpf_dummy_reg(void *kdata, struct bpf_link *link)
{
struct bpf_testmod_ops *ops = kdata;
@@ -835,7 +897,7 @@ static int bpf_dummy_reg(void *kdata)
return 0;
}
-static void bpf_dummy_unreg(void *kdata)
+static void bpf_dummy_unreg(void *kdata, struct bpf_link *link)
{
}
@@ -871,7 +933,7 @@ struct bpf_struct_ops bpf_bpf_testmod_ops = {
.owner = THIS_MODULE,
};
-static int bpf_dummy_reg2(void *kdata)
+static int bpf_dummy_reg2(void *kdata, struct bpf_link *link)
{
struct bpf_testmod_ops2 *ops = kdata;
@@ -898,6 +960,12 @@ extern int bpf_fentry_test1(int a);
static int bpf_testmod_init(void)
{
+ const struct btf_id_dtor_kfunc bpf_testmod_dtors[] = {
+ {
+ .btf_id = bpf_testmod_dtor_ids[0],
+ .kfunc_btf_id = bpf_testmod_dtor_ids[1]
+ },
+ };
int ret;
ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_UNSPEC, &bpf_testmod_common_kfunc_set);
@@ -906,6 +974,9 @@ static int bpf_testmod_init(void)
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &bpf_testmod_kfunc_set);
ret = ret ?: register_bpf_struct_ops(&bpf_bpf_testmod_ops, bpf_testmod_ops);
ret = ret ?: register_bpf_struct_ops(&bpf_testmod_ops2, bpf_testmod_ops2);
+ ret = ret ?: register_btf_id_dtor_kfuncs(bpf_testmod_dtors,
+ ARRAY_SIZE(bpf_testmod_dtors),
+ THIS_MODULE);
if (ret < 0)
return ret;
if (bpf_fentry_test1(0) < 0)
diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod_kfunc.h b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod_kfunc.h
index b0d586a6751f..e587a79f2239 100644
--- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod_kfunc.h
+++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod_kfunc.h
@@ -80,6 +80,11 @@ struct sendmsg_args {
int msglen;
};
+struct bpf_testmod_ctx {
+ struct callback_head rcu;
+ refcount_t usage;
+};
+
struct prog_test_ref_kfunc *
bpf_kfunc_call_test_acquire(unsigned long *scalar_ptr) __ksym;
void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p) __ksym;
@@ -134,4 +139,9 @@ int bpf_kfunc_call_sock_sendmsg(struct sendmsg_args *args) __ksym;
int bpf_kfunc_call_kernel_getsockname(struct addr_args *args) __ksym;
int bpf_kfunc_call_kernel_getpeername(struct addr_args *args) __ksym;
+void bpf_kfunc_dynptr_test(struct bpf_dynptr *ptr, struct bpf_dynptr *ptr__nullable) __ksym;
+
+struct bpf_testmod_ctx *bpf_testmod_ctx_create(int *err) __ksym;
+void bpf_testmod_ctx_release(struct bpf_testmod_ctx *ctx) __ksym;
+
#endif /* _BPF_TESTMOD_KFUNC_H */
diff --git a/tools/testing/selftests/bpf/config b/tools/testing/selftests/bpf/config
index eeabd798bc3a..4ca84c8d9116 100644
--- a/tools/testing/selftests/bpf/config
+++ b/tools/testing/selftests/bpf/config
@@ -58,9 +58,12 @@ CONFIG_MPLS=y
CONFIG_MPLS_IPTUNNEL=y
CONFIG_MPLS_ROUTING=y
CONFIG_MPTCP=y
+CONFIG_NET_ACT_SKBMOD=y
+CONFIG_NET_CLS=y
CONFIG_NET_CLS_ACT=y
CONFIG_NET_CLS_BPF=y
CONFIG_NET_CLS_FLOWER=y
+CONFIG_NET_CLS_MATCHALL=y
CONFIG_NET_FOU=y
CONFIG_NET_FOU_IP_TUNNELS=y
CONFIG_NET_IPGRE=y
@@ -80,8 +83,22 @@ CONFIG_NETFILTER_XT_TARGET_CT=y
CONFIG_NETKIT=y
CONFIG_NF_CONNTRACK=y
CONFIG_NF_CONNTRACK_MARK=y
+CONFIG_NF_CONNTRACK_ZONES=y
CONFIG_NF_DEFRAG_IPV4=y
CONFIG_NF_DEFRAG_IPV6=y
+CONFIG_NF_TABLES=y
+CONFIG_NF_TABLES_INET=y
+CONFIG_NF_TABLES_NETDEV=y
+CONFIG_NF_TABLES_IPV4=y
+CONFIG_NF_TABLES_IPV6=y
+CONFIG_NETFILTER_INGRESS=y
+CONFIG_NF_FLOW_TABLE=y
+CONFIG_NF_FLOW_TABLE_INET=y
+CONFIG_NETFILTER_NETLINK=y
+CONFIG_NFT_FLOW_OFFLOAD=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_FILTER=y
CONFIG_NF_NAT=y
CONFIG_RC_CORE=y
CONFIG_SECURITY=y
diff --git a/tools/testing/selftests/bpf/network_helpers.c b/tools/testing/selftests/bpf/network_helpers.c
index 35250e6cde7f..e0cba4178e41 100644
--- a/tools/testing/selftests/bpf/network_helpers.c
+++ b/tools/testing/selftests/bpf/network_helpers.c
@@ -94,7 +94,8 @@ static int __start_server(int type, const struct sockaddr *addr, socklen_t addrl
if (settimeo(fd, opts->timeout_ms))
goto error_close;
- if (opts->post_socket_cb && opts->post_socket_cb(fd, NULL)) {
+ if (opts->post_socket_cb &&
+ opts->post_socket_cb(fd, opts->cb_opts)) {
log_err("Failed to call post_socket_cb");
goto error_close;
}
@@ -105,7 +106,7 @@ static int __start_server(int type, const struct sockaddr *addr, socklen_t addrl
}
if (type == SOCK_STREAM) {
- if (listen(fd, 1) < 0) {
+ if (listen(fd, opts->backlog ? MAX(opts->backlog, 0) : 1) < 0) {
log_err("Failed to listed on socket");
goto error_close;
}
@@ -118,22 +119,32 @@ error_close:
return -1;
}
-int start_server(int family, int type, const char *addr_str, __u16 port,
- int timeout_ms)
+int start_server_str(int family, int type, const char *addr_str, __u16 port,
+ const struct network_helper_opts *opts)
{
- struct network_helper_opts opts = {
- .timeout_ms = timeout_ms,
- };
struct sockaddr_storage addr;
socklen_t addrlen;
+ if (!opts)
+ opts = &default_opts;
+
if (make_sockaddr(family, addr_str, port, &addr, &addrlen))
return -1;
- return __start_server(type, (struct sockaddr *)&addr, addrlen, &opts);
+ return __start_server(type, (struct sockaddr *)&addr, addrlen, opts);
}
-static int reuseport_cb(int fd, const struct post_socket_opts *opts)
+int start_server(int family, int type, const char *addr_str, __u16 port,
+ int timeout_ms)
+{
+ struct network_helper_opts opts = {
+ .timeout_ms = timeout_ms,
+ };
+
+ return start_server_str(family, type, addr_str, port, &opts);
+}
+
+static int reuseport_cb(int fd, void *opts)
{
int on = 1;
@@ -238,6 +249,34 @@ error_close:
return -1;
}
+int client_socket(int family, int type,
+ const struct network_helper_opts *opts)
+{
+ int fd;
+
+ if (!opts)
+ opts = &default_opts;
+
+ fd = socket(family, type, opts->proto);
+ if (fd < 0) {
+ log_err("Failed to create client socket");
+ return -1;
+ }
+
+ if (settimeo(fd, opts->timeout_ms))
+ goto error_close;
+
+ if (opts->post_socket_cb &&
+ opts->post_socket_cb(fd, opts->cb_opts))
+ goto error_close;
+
+ return fd;
+
+error_close:
+ save_errno_close(fd);
+ return -1;
+}
+
static int connect_fd_to_addr(int fd,
const struct sockaddr_storage *addr,
socklen_t addrlen, const bool must_fail)
@@ -273,15 +312,12 @@ int connect_to_addr(int type, const struct sockaddr_storage *addr, socklen_t add
if (!opts)
opts = &default_opts;
- fd = socket(addr->ss_family, type, opts->proto);
+ fd = client_socket(addr->ss_family, type, opts);
if (fd < 0) {
log_err("Failed to create client socket");
return -1;
}
- if (settimeo(fd, opts->timeout_ms))
- goto error_close;
-
if (connect_fd_to_addr(fd, addr, addrlen, opts->must_fail))
goto error_close;
@@ -292,66 +328,21 @@ error_close:
return -1;
}
-int connect_to_fd_opts(int server_fd, const struct network_helper_opts *opts)
+int connect_to_fd_opts(int server_fd, int type, const struct network_helper_opts *opts)
{
struct sockaddr_storage addr;
- struct sockaddr_in *addr_in;
- socklen_t addrlen, optlen;
- int fd, type, protocol;
+ socklen_t addrlen;
if (!opts)
opts = &default_opts;
- optlen = sizeof(type);
-
- if (opts->type) {
- type = opts->type;
- } else {
- if (getsockopt(server_fd, SOL_SOCKET, SO_TYPE, &type, &optlen)) {
- log_err("getsockopt(SOL_TYPE)");
- return -1;
- }
- }
-
- if (opts->proto) {
- protocol = opts->proto;
- } else {
- if (getsockopt(server_fd, SOL_SOCKET, SO_PROTOCOL, &protocol, &optlen)) {
- log_err("getsockopt(SOL_PROTOCOL)");
- return -1;
- }
- }
-
addrlen = sizeof(addr);
if (getsockname(server_fd, (struct sockaddr *)&addr, &addrlen)) {
log_err("Failed to get server addr");
return -1;
}
- addr_in = (struct sockaddr_in *)&addr;
- fd = socket(addr_in->sin_family, type, protocol);
- if (fd < 0) {
- log_err("Failed to create client socket");
- return -1;
- }
-
- if (settimeo(fd, opts->timeout_ms))
- goto error_close;
-
- if (opts->cc && opts->cc[0] &&
- setsockopt(fd, SOL_TCP, TCP_CONGESTION, opts->cc,
- strlen(opts->cc) + 1))
- goto error_close;
-
- if (!opts->noconnect)
- if (connect_fd_to_addr(fd, &addr, addrlen, opts->must_fail))
- goto error_close;
-
- return fd;
-
-error_close:
- save_errno_close(fd);
- return -1;
+ return connect_to_addr(type, &addr, addrlen, opts);
}
int connect_to_fd(int server_fd, int timeout_ms)
@@ -359,8 +350,23 @@ int connect_to_fd(int server_fd, int timeout_ms)
struct network_helper_opts opts = {
.timeout_ms = timeout_ms,
};
+ int type, protocol;
+ socklen_t optlen;
+
+ optlen = sizeof(type);
+ if (getsockopt(server_fd, SOL_SOCKET, SO_TYPE, &type, &optlen)) {
+ log_err("getsockopt(SOL_TYPE)");
+ return -1;
+ }
+
+ optlen = sizeof(protocol);
+ if (getsockopt(server_fd, SOL_SOCKET, SO_PROTOCOL, &protocol, &optlen)) {
+ log_err("getsockopt(SOL_PROTOCOL)");
+ return -1;
+ }
+ opts.proto = protocol;
- return connect_to_fd_opts(server_fd, &opts);
+ return connect_to_fd_opts(server_fd, type, &opts);
}
int connect_fd_to_fd(int client_fd, int server_fd, int timeout_ms)
diff --git a/tools/testing/selftests/bpf/network_helpers.h b/tools/testing/selftests/bpf/network_helpers.h
index 883c7ea9d8d5..aac5b94d6379 100644
--- a/tools/testing/selftests/bpf/network_helpers.h
+++ b/tools/testing/selftests/bpf/network_helpers.h
@@ -21,16 +21,22 @@ typedef __u16 __sum16;
#define VIP_NUM 5
#define MAGIC_BYTES 123
-struct post_socket_opts {};
-
struct network_helper_opts {
- const char *cc;
int timeout_ms;
bool must_fail;
- bool noconnect;
- int type;
int proto;
- int (*post_socket_cb)(int fd, const struct post_socket_opts *opts);
+ /* +ve: Passed to listen() as-is.
+ * 0: Default when the test does not set
+ * a particular value during the struct init.
+ * It is changed to 1 before passing to listen().
+ * Most tests only have one on-going connection.
+ * -ve: It is changed to 0 before passing to listen().
+ * It is useful to force syncookie without
+ * changing the "tcp_syncookies" sysctl from 1 to 2.
+ */
+ int backlog;
+ int (*post_socket_cb)(int fd, void *opts);
+ void *cb_opts;
};
/* ipv4 test vector */
@@ -50,6 +56,8 @@ struct ipv6_packet {
extern struct ipv6_packet pkt_v6;
int settimeo(int fd, int timeout_ms);
+int start_server_str(int family, int type, const char *addr_str, __u16 port,
+ const struct network_helper_opts *opts);
int start_server(int family, int type, const char *addr, __u16 port,
int timeout_ms);
int *start_reuseport_server(int family, int type, const char *addr_str,
@@ -58,10 +66,12 @@ int *start_reuseport_server(int family, int type, const char *addr_str,
int start_server_addr(int type, const struct sockaddr_storage *addr, socklen_t len,
const struct network_helper_opts *opts);
void free_fds(int *fds, unsigned int nr_close_fds);
+int client_socket(int family, int type,
+ const struct network_helper_opts *opts);
int connect_to_addr(int type, const struct sockaddr_storage *addr, socklen_t len,
const struct network_helper_opts *opts);
int connect_to_fd(int server_fd, int timeout_ms);
-int connect_to_fd_opts(int server_fd, const struct network_helper_opts *opts);
+int connect_to_fd_opts(int server_fd, int type, const struct network_helper_opts *opts);
int connect_fd_to_fd(int client_fd, int server_fd, int timeout_ms);
int fastopen_connect(int server_fd, const char *data, unsigned int data_len,
int timeout_ms);
diff --git a/tools/testing/selftests/bpf/prog_tests/arena_atomics.c b/tools/testing/selftests/bpf/prog_tests/arena_atomics.c
index 0807a48a58ee..26e7c06c6cb4 100644
--- a/tools/testing/selftests/bpf/prog_tests/arena_atomics.c
+++ b/tools/testing/selftests/bpf/prog_tests/arena_atomics.c
@@ -146,6 +146,22 @@ static void test_xchg(struct arena_atomics *skel)
ASSERT_EQ(skel->arena->xchg32_result, 1, "xchg32_result");
}
+static void test_uaf(struct arena_atomics *skel)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+ int err, prog_fd;
+
+ /* No need to attach it, just run it directly */
+ prog_fd = bpf_program__fd(skel->progs.uaf);
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ if (!ASSERT_OK(err, "test_run_opts err"))
+ return;
+ if (!ASSERT_OK(topts.retval, "test_run_opts retval"))
+ return;
+
+ ASSERT_EQ(skel->arena->uaf_recovery_fails, 0, "uaf_recovery_fails");
+}
+
void test_arena_atomics(void)
{
struct arena_atomics *skel;
@@ -180,6 +196,8 @@ void test_arena_atomics(void)
test_cmpxchg(skel);
if (test__start_subtest("xchg"))
test_xchg(skel);
+ if (test__start_subtest("uaf"))
+ test_uaf(skel);
cleanup:
arena_atomics__destroy(skel);
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c b/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c
index 4407ea428e77..070c52c312e5 100644
--- a/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c
@@ -451,7 +451,7 @@ static void pe_subtest(struct test_bpf_cookie *skel)
attr.type = PERF_TYPE_SOFTWARE;
attr.config = PERF_COUNT_SW_CPU_CLOCK;
attr.freq = 1;
- attr.sample_freq = 1000;
+ attr.sample_freq = 10000;
pfd = syscall(__NR_perf_event_open, &attr, -1, 0, -1, PERF_FLAG_FD_CLOEXEC);
if (!ASSERT_GE(pfd, 0, "perf_fd"))
goto cleanup;
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_nf.c b/tools/testing/selftests/bpf/prog_tests/bpf_nf.c
index b30ff6b3b81a..a4a1f93878d4 100644
--- a/tools/testing/selftests/bpf/prog_tests/bpf_nf.c
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_nf.c
@@ -104,6 +104,7 @@ static void test_bpf_nf_ct(int mode)
ASSERT_EQ(skel->bss->test_einval_bpf_tuple, -EINVAL, "Test EINVAL for NULL bpf_tuple");
ASSERT_EQ(skel->bss->test_einval_reserved, -EINVAL, "Test EINVAL for reserved not set to 0");
+ ASSERT_EQ(skel->bss->test_einval_reserved_new, -EINVAL, "Test EINVAL for reserved in new struct not set to 0");
ASSERT_EQ(skel->bss->test_einval_netns_id, -EINVAL, "Test EINVAL for netns_id < -1");
ASSERT_EQ(skel->bss->test_einval_len_opts, -EINVAL, "Test EINVAL for len__opts != NF_BPF_CT_OPTS_SZ");
ASSERT_EQ(skel->bss->test_eproto_l4proto, -EPROTO, "Test EPROTO for l4proto != TCP or UDP");
@@ -122,6 +123,12 @@ static void test_bpf_nf_ct(int mode)
ASSERT_EQ(skel->bss->test_exist_lookup_mark, 43, "Test existing connection lookup ctmark");
ASSERT_EQ(skel->data->test_snat_addr, 0, "Test for source natting");
ASSERT_EQ(skel->data->test_dnat_addr, 0, "Test for destination natting");
+ ASSERT_EQ(skel->data->test_ct_zone_id_alloc_entry, 0, "Test for alloc new entry in specified ct zone");
+ ASSERT_EQ(skel->data->test_ct_zone_id_insert_entry, 0, "Test for insert new entry in specified ct zone");
+ ASSERT_EQ(skel->data->test_ct_zone_id_succ_lookup, 0, "Test for successful lookup in specified ct_zone");
+ ASSERT_EQ(skel->bss->test_ct_zone_dir_enoent_lookup, -ENOENT, "Test ENOENT for lookup with wrong ct zone dir");
+ ASSERT_EQ(skel->bss->test_ct_zone_id_enoent_lookup, -ENOENT, "Test ENOENT for lookup in wrong ct zone");
+
end:
if (client_fd != -1)
close(client_fd);
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c b/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c
index 0aca02532794..63422f4f3896 100644
--- a/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c
@@ -23,6 +23,11 @@
static const unsigned int total_bytes = 10 * 1024 * 1024;
static int expected_stg = 0xeB9F;
+struct cb_opts {
+ const char *cc;
+ int map_fd;
+};
+
static int settcpca(int fd, const char *tcp_ca)
{
int err;
@@ -34,55 +39,66 @@ static int settcpca(int fd, const char *tcp_ca)
return 0;
}
-static void do_test(const char *tcp_ca, const struct bpf_map *sk_stg_map)
+static bool start_test(char *addr_str,
+ const struct network_helper_opts *srv_opts,
+ const struct network_helper_opts *cli_opts,
+ int *srv_fd, int *cli_fd)
{
- int lfd = -1, fd = -1;
- int err;
+ *srv_fd = start_server_str(AF_INET6, SOCK_STREAM, addr_str, 0, srv_opts);
+ if (!ASSERT_NEQ(*srv_fd, -1, "start_server_str"))
+ goto err;
- lfd = start_server(AF_INET6, SOCK_STREAM, NULL, 0, 0);
- if (!ASSERT_NEQ(lfd, -1, "socket"))
- return;
-
- fd = socket(AF_INET6, SOCK_STREAM, 0);
- if (!ASSERT_NEQ(fd, -1, "socket")) {
- close(lfd);
- return;
- }
+ /* connect to server */
+ *cli_fd = connect_to_fd_opts(*srv_fd, SOCK_STREAM, cli_opts);
+ if (!ASSERT_NEQ(*cli_fd, -1, "connect_to_fd_opts"))
+ goto err;
- if (settcpca(lfd, tcp_ca) || settcpca(fd, tcp_ca))
- goto done;
+ return true;
- if (sk_stg_map) {
- err = bpf_map_update_elem(bpf_map__fd(sk_stg_map), &fd,
- &expected_stg, BPF_NOEXIST);
- if (!ASSERT_OK(err, "bpf_map_update_elem(sk_stg_map)"))
- goto done;
+err:
+ if (*srv_fd != -1) {
+ close(*srv_fd);
+ *srv_fd = -1;
}
+ if (*cli_fd != -1) {
+ close(*cli_fd);
+ *cli_fd = -1;
+ }
+ return false;
+}
- /* connect to server */
- err = connect_fd_to_fd(fd, lfd, 0);
- if (!ASSERT_NEQ(err, -1, "connect"))
- goto done;
-
- if (sk_stg_map) {
- int tmp_stg;
+static void do_test(const struct network_helper_opts *opts)
+{
+ int lfd = -1, fd = -1;
- err = bpf_map_lookup_elem(bpf_map__fd(sk_stg_map), &fd,
- &tmp_stg);
- if (!ASSERT_ERR(err, "bpf_map_lookup_elem(sk_stg_map)") ||
- !ASSERT_EQ(errno, ENOENT, "bpf_map_lookup_elem(sk_stg_map)"))
- goto done;
- }
+ if (!start_test(NULL, opts, opts, &lfd, &fd))
+ goto done;
ASSERT_OK(send_recv_data(lfd, fd, total_bytes), "send_recv_data");
done:
- close(lfd);
- close(fd);
+ if (lfd != -1)
+ close(lfd);
+ if (fd != -1)
+ close(fd);
+}
+
+static int cc_cb(int fd, void *opts)
+{
+ struct cb_opts *cb_opts = (struct cb_opts *)opts;
+
+ return settcpca(fd, cb_opts->cc);
}
static void test_cubic(void)
{
+ struct cb_opts cb_opts = {
+ .cc = "bpf_cubic",
+ };
+ struct network_helper_opts opts = {
+ .post_socket_cb = cc_cb,
+ .cb_opts = &cb_opts,
+ };
struct bpf_cubic *cubic_skel;
struct bpf_link *link;
@@ -96,7 +112,7 @@ static void test_cubic(void)
return;
}
- do_test("bpf_cubic", NULL);
+ do_test(&opts);
ASSERT_EQ(cubic_skel->bss->bpf_cubic_acked_called, 1, "pkts_acked called");
@@ -104,8 +120,37 @@ static void test_cubic(void)
bpf_cubic__destroy(cubic_skel);
}
+static int stg_post_socket_cb(int fd, void *opts)
+{
+ struct cb_opts *cb_opts = (struct cb_opts *)opts;
+ int err;
+
+ err = settcpca(fd, cb_opts->cc);
+ if (err)
+ return err;
+
+ err = bpf_map_update_elem(cb_opts->map_fd, &fd,
+ &expected_stg, BPF_NOEXIST);
+ if (!ASSERT_OK(err, "bpf_map_update_elem(sk_stg_map)"))
+ return err;
+
+ return 0;
+}
+
static void test_dctcp(void)
{
+ struct cb_opts cb_opts = {
+ .cc = "bpf_dctcp",
+ };
+ struct network_helper_opts opts = {
+ .post_socket_cb = cc_cb,
+ .cb_opts = &cb_opts,
+ };
+ struct network_helper_opts cli_opts = {
+ .post_socket_cb = stg_post_socket_cb,
+ .cb_opts = &cb_opts,
+ };
+ int lfd = -1, fd = -1, tmp_stg, err;
struct bpf_dctcp *dctcp_skel;
struct bpf_link *link;
@@ -119,11 +164,58 @@ static void test_dctcp(void)
return;
}
- do_test("bpf_dctcp", dctcp_skel->maps.sk_stg_map);
+ cb_opts.map_fd = bpf_map__fd(dctcp_skel->maps.sk_stg_map);
+ if (!start_test(NULL, &opts, &cli_opts, &lfd, &fd))
+ goto done;
+
+ err = bpf_map_lookup_elem(cb_opts.map_fd, &fd, &tmp_stg);
+ if (!ASSERT_ERR(err, "bpf_map_lookup_elem(sk_stg_map)") ||
+ !ASSERT_EQ(errno, ENOENT, "bpf_map_lookup_elem(sk_stg_map)"))
+ goto done;
+
+ ASSERT_OK(send_recv_data(lfd, fd, total_bytes), "send_recv_data");
ASSERT_EQ(dctcp_skel->bss->stg_result, expected_stg, "stg_result");
+done:
bpf_link__destroy(link);
bpf_dctcp__destroy(dctcp_skel);
+ if (lfd != -1)
+ close(lfd);
+ if (fd != -1)
+ close(fd);
+}
+
+static void test_dctcp_autoattach_map(void)
+{
+ struct cb_opts cb_opts = {
+ .cc = "bpf_dctcp",
+ };
+ struct network_helper_opts opts = {
+ .post_socket_cb = cc_cb,
+ .cb_opts = &cb_opts,
+ };
+ struct bpf_dctcp *dctcp_skel;
+ struct bpf_link *link;
+
+ dctcp_skel = bpf_dctcp__open_and_load();
+ if (!ASSERT_OK_PTR(dctcp_skel, "bpf_dctcp__open_and_load"))
+ return;
+
+ bpf_map__set_autoattach(dctcp_skel->maps.dctcp, true);
+ bpf_map__set_autoattach(dctcp_skel->maps.dctcp_nouse, false);
+
+ if (!ASSERT_OK(bpf_dctcp__attach(dctcp_skel), "bpf_dctcp__attach"))
+ goto destroy;
+
+ /* struct_ops is auto-attached */
+ link = dctcp_skel->links.dctcp;
+ if (!ASSERT_OK_PTR(link, "link"))
+ goto destroy;
+
+ do_test(&opts);
+
+destroy:
+ bpf_dctcp__destroy(dctcp_skel);
}
static char *err_str;
@@ -171,11 +263,22 @@ static void test_invalid_license(void)
static void test_dctcp_fallback(void)
{
int err, lfd = -1, cli_fd = -1, srv_fd = -1;
- struct network_helper_opts opts = {
- .cc = "cubic",
- };
struct bpf_dctcp *dctcp_skel;
struct bpf_link *link = NULL;
+ struct cb_opts dctcp = {
+ .cc = "bpf_dctcp",
+ };
+ struct network_helper_opts srv_opts = {
+ .post_socket_cb = cc_cb,
+ .cb_opts = &dctcp,
+ };
+ struct cb_opts cubic = {
+ .cc = "cubic",
+ };
+ struct network_helper_opts cli_opts = {
+ .post_socket_cb = cc_cb,
+ .cb_opts = &cubic,
+ };
char srv_cc[16];
socklen_t cc_len = sizeof(srv_cc);
@@ -190,13 +293,7 @@ static void test_dctcp_fallback(void)
if (!ASSERT_OK_PTR(link, "dctcp link"))
goto done;
- lfd = start_server(AF_INET6, SOCK_STREAM, "::1", 0, 0);
- if (!ASSERT_GE(lfd, 0, "lfd") ||
- !ASSERT_OK(settcpca(lfd, "bpf_dctcp"), "lfd=>bpf_dctcp"))
- goto done;
-
- cli_fd = connect_to_fd_opts(lfd, &opts);
- if (!ASSERT_GE(cli_fd, 0, "cli_fd"))
+ if (!start_test("::1", &srv_opts, &cli_opts, &lfd, &cli_fd))
goto done;
srv_fd = accept(lfd, NULL, 0);
@@ -297,6 +394,13 @@ static void test_unsupp_cong_op(void)
static void test_update_ca(void)
{
+ struct cb_opts cb_opts = {
+ .cc = "tcp_ca_update",
+ };
+ struct network_helper_opts opts = {
+ .post_socket_cb = cc_cb,
+ .cb_opts = &cb_opts,
+ };
struct tcp_ca_update *skel;
struct bpf_link *link;
int saved_ca1_cnt;
@@ -307,25 +411,34 @@ static void test_update_ca(void)
return;
link = bpf_map__attach_struct_ops(skel->maps.ca_update_1);
- ASSERT_OK_PTR(link, "attach_struct_ops");
+ if (!ASSERT_OK_PTR(link, "attach_struct_ops"))
+ goto out;
- do_test("tcp_ca_update", NULL);
+ do_test(&opts);
saved_ca1_cnt = skel->bss->ca1_cnt;
ASSERT_GT(saved_ca1_cnt, 0, "ca1_ca1_cnt");
err = bpf_link__update_map(link, skel->maps.ca_update_2);
ASSERT_OK(err, "update_map");
- do_test("tcp_ca_update", NULL);
+ do_test(&opts);
ASSERT_EQ(skel->bss->ca1_cnt, saved_ca1_cnt, "ca2_ca1_cnt");
ASSERT_GT(skel->bss->ca2_cnt, 0, "ca2_ca2_cnt");
bpf_link__destroy(link);
+out:
tcp_ca_update__destroy(skel);
}
static void test_update_wrong(void)
{
+ struct cb_opts cb_opts = {
+ .cc = "tcp_ca_update",
+ };
+ struct network_helper_opts opts = {
+ .post_socket_cb = cc_cb,
+ .cb_opts = &cb_opts,
+ };
struct tcp_ca_update *skel;
struct bpf_link *link;
int saved_ca1_cnt;
@@ -336,24 +449,33 @@ static void test_update_wrong(void)
return;
link = bpf_map__attach_struct_ops(skel->maps.ca_update_1);
- ASSERT_OK_PTR(link, "attach_struct_ops");
+ if (!ASSERT_OK_PTR(link, "attach_struct_ops"))
+ goto out;
- do_test("tcp_ca_update", NULL);
+ do_test(&opts);
saved_ca1_cnt = skel->bss->ca1_cnt;
ASSERT_GT(saved_ca1_cnt, 0, "ca1_ca1_cnt");
err = bpf_link__update_map(link, skel->maps.ca_wrong);
ASSERT_ERR(err, "update_map");
- do_test("tcp_ca_update", NULL);
+ do_test(&opts);
ASSERT_GT(skel->bss->ca1_cnt, saved_ca1_cnt, "ca2_ca1_cnt");
bpf_link__destroy(link);
+out:
tcp_ca_update__destroy(skel);
}
static void test_mixed_links(void)
{
+ struct cb_opts cb_opts = {
+ .cc = "tcp_ca_update",
+ };
+ struct network_helper_opts opts = {
+ .post_socket_cb = cc_cb,
+ .cb_opts = &cb_opts,
+ };
struct tcp_ca_update *skel;
struct bpf_link *link, *link_nl;
int err;
@@ -363,12 +485,13 @@ static void test_mixed_links(void)
return;
link_nl = bpf_map__attach_struct_ops(skel->maps.ca_no_link);
- ASSERT_OK_PTR(link_nl, "attach_struct_ops_nl");
+ if (!ASSERT_OK_PTR(link_nl, "attach_struct_ops_nl"))
+ goto out;
link = bpf_map__attach_struct_ops(skel->maps.ca_update_1);
ASSERT_OK_PTR(link, "attach_struct_ops");
- do_test("tcp_ca_update", NULL);
+ do_test(&opts);
ASSERT_GT(skel->bss->ca1_cnt, 0, "ca1_ca1_cnt");
err = bpf_link__update_map(link, skel->maps.ca_no_link);
@@ -376,6 +499,7 @@ static void test_mixed_links(void)
bpf_link__destroy(link);
bpf_link__destroy(link_nl);
+out:
tcp_ca_update__destroy(skel);
}
@@ -418,7 +542,8 @@ static void test_link_replace(void)
bpf_link__destroy(link);
link = bpf_map__attach_struct_ops(skel->maps.ca_update_2);
- ASSERT_OK_PTR(link, "attach_struct_ops_2nd");
+ if (!ASSERT_OK_PTR(link, "attach_struct_ops_2nd"))
+ goto out;
/* BPF_F_REPLACE with a wrong old map Fd. It should fail!
*
@@ -441,6 +566,7 @@ static void test_link_replace(void)
bpf_link__destroy(link);
+out:
tcp_ca_update__destroy(skel);
}
@@ -455,6 +581,13 @@ static void test_tcp_ca_kfunc(void)
static void test_cc_cubic(void)
{
+ struct cb_opts cb_opts = {
+ .cc = "bpf_cc_cubic",
+ };
+ struct network_helper_opts opts = {
+ .post_socket_cb = cc_cb,
+ .cb_opts = &cb_opts,
+ };
struct bpf_cc_cubic *cc_cubic_skel;
struct bpf_link *link;
@@ -468,7 +601,7 @@ static void test_cc_cubic(void)
return;
}
- do_test("bpf_cc_cubic", NULL);
+ do_test(&opts);
bpf_link__destroy(link);
bpf_cc_cubic__destroy(cc_cubic_skel);
@@ -506,4 +639,6 @@ void test_bpf_tcp_ca(void)
test_tcp_ca_kfunc();
if (test__start_subtest("cc_cubic"))
test_cc_cubic();
+ if (test__start_subtest("dctcp_autoattach_map"))
+ test_dctcp_autoattach_map();
}
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c b/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c
index 4c6ada5b270b..73f669014b69 100644
--- a/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c
@@ -45,12 +45,6 @@ err_out:
return err;
}
-struct scale_test_def {
- const char *file;
- enum bpf_prog_type attach_type;
- bool fails;
-};
-
static void scale_test(const char *file,
enum bpf_prog_type attach_type,
bool should_fail)
diff --git a/tools/testing/selftests/bpf/prog_tests/btf_distill.c b/tools/testing/selftests/bpf/prog_tests/btf_distill.c
new file mode 100644
index 000000000000..bfbe795823a2
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/btf_distill.c
@@ -0,0 +1,552 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024, Oracle and/or its affiliates. */
+
+#include <test_progs.h>
+#include <bpf/btf.h>
+#include "btf_helpers.h"
+
+/* Fabricate base, split BTF with references to base types needed; then create
+ * split BTF with distilled base BTF and ensure expectations are met:
+ * - only referenced base types from split BTF are present
+ * - struct/union/enum are represented as empty unless anonymous, when they
+ * are represented in full in split BTF
+ */
+static void test_distilled_base(void)
+{
+ struct btf *btf1 = NULL, *btf2 = NULL, *btf3 = NULL, *btf4 = NULL;
+
+ btf1 = btf__new_empty();
+ if (!ASSERT_OK_PTR(btf1, "empty_main_btf"))
+ return;
+
+ btf__add_int(btf1, "int", 4, BTF_INT_SIGNED); /* [1] int */
+ btf__add_ptr(btf1, 1); /* [2] ptr to int */
+ btf__add_struct(btf1, "s1", 8); /* [3] struct s1 { */
+ btf__add_field(btf1, "f1", 2, 0, 0); /* int *f1; */
+ /* } */
+ btf__add_struct(btf1, "", 12); /* [4] struct { */
+ btf__add_field(btf1, "f1", 1, 0, 0); /* int f1; */
+ btf__add_field(btf1, "f2", 3, 32, 0); /* struct s1 f2; */
+ /* } */
+ btf__add_int(btf1, "unsigned int", 4, 0); /* [5] unsigned int */
+ btf__add_union(btf1, "u1", 12); /* [6] union u1 { */
+ btf__add_field(btf1, "f1", 1, 0, 0); /* int f1; */
+ btf__add_field(btf1, "f2", 2, 0, 0); /* int *f2; */
+ /* } */
+ btf__add_union(btf1, "", 4); /* [7] union { */
+ btf__add_field(btf1, "f1", 1, 0, 0); /* int f1; */
+ /* } */
+ btf__add_enum(btf1, "e1", 4); /* [8] enum e1 { */
+ btf__add_enum_value(btf1, "v1", 1); /* v1 = 1; */
+ /* } */
+ btf__add_enum(btf1, "", 4); /* [9] enum { */
+ btf__add_enum_value(btf1, "av1", 2); /* av1 = 2; */
+ /* } */
+ btf__add_enum64(btf1, "e641", 8, true); /* [10] enum64 { */
+ btf__add_enum64_value(btf1, "v1", 1024); /* v1 = 1024; */
+ /* } */
+ btf__add_enum64(btf1, "", 8, true); /* [11] enum64 { */
+ btf__add_enum64_value(btf1, "v1", 1025); /* v1 = 1025; */
+ /* } */
+ btf__add_struct(btf1, "unneeded", 4); /* [12] struct unneeded { */
+ btf__add_field(btf1, "f1", 1, 0, 0); /* int f1; */
+ /* } */
+ btf__add_struct(btf1, "embedded", 4); /* [13] struct embedded { */
+ btf__add_field(btf1, "f1", 1, 0, 0); /* int f1; */
+ /* } */
+ btf__add_func_proto(btf1, 1); /* [14] int (*)(int *p1); */
+ btf__add_func_param(btf1, "p1", 1);
+
+ btf__add_array(btf1, 1, 1, 3); /* [15] int [3]; */
+
+ btf__add_struct(btf1, "from_proto", 4); /* [16] struct from_proto { */
+ btf__add_field(btf1, "f1", 1, 0, 0); /* int f1; */
+ /* } */
+ btf__add_union(btf1, "u1", 4); /* [17] union u1 { */
+ btf__add_field(btf1, "f1", 1, 0, 0); /* int f1; */
+ /* } */
+ VALIDATE_RAW_BTF(
+ btf1,
+ "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
+ "[2] PTR '(anon)' type_id=1",
+ "[3] STRUCT 's1' size=8 vlen=1\n"
+ "\t'f1' type_id=2 bits_offset=0",
+ "[4] STRUCT '(anon)' size=12 vlen=2\n"
+ "\t'f1' type_id=1 bits_offset=0\n"
+ "\t'f2' type_id=3 bits_offset=32",
+ "[5] INT 'unsigned int' size=4 bits_offset=0 nr_bits=32 encoding=(none)",
+ "[6] UNION 'u1' size=12 vlen=2\n"
+ "\t'f1' type_id=1 bits_offset=0\n"
+ "\t'f2' type_id=2 bits_offset=0",
+ "[7] UNION '(anon)' size=4 vlen=1\n"
+ "\t'f1' type_id=1 bits_offset=0",
+ "[8] ENUM 'e1' encoding=UNSIGNED size=4 vlen=1\n"
+ "\t'v1' val=1",
+ "[9] ENUM '(anon)' encoding=UNSIGNED size=4 vlen=1\n"
+ "\t'av1' val=2",
+ "[10] ENUM64 'e641' encoding=SIGNED size=8 vlen=1\n"
+ "\t'v1' val=1024",
+ "[11] ENUM64 '(anon)' encoding=SIGNED size=8 vlen=1\n"
+ "\t'v1' val=1025",
+ "[12] STRUCT 'unneeded' size=4 vlen=1\n"
+ "\t'f1' type_id=1 bits_offset=0",
+ "[13] STRUCT 'embedded' size=4 vlen=1\n"
+ "\t'f1' type_id=1 bits_offset=0",
+ "[14] FUNC_PROTO '(anon)' ret_type_id=1 vlen=1\n"
+ "\t'p1' type_id=1",
+ "[15] ARRAY '(anon)' type_id=1 index_type_id=1 nr_elems=3",
+ "[16] STRUCT 'from_proto' size=4 vlen=1\n"
+ "\t'f1' type_id=1 bits_offset=0",
+ "[17] UNION 'u1' size=4 vlen=1\n"
+ "\t'f1' type_id=1 bits_offset=0");
+
+ btf2 = btf__new_empty_split(btf1);
+ if (!ASSERT_OK_PTR(btf2, "empty_split_btf"))
+ goto cleanup;
+
+ btf__add_ptr(btf2, 3); /* [18] ptr to struct s1 */
+ /* add ptr to struct anon */
+ btf__add_ptr(btf2, 4); /* [19] ptr to struct (anon) */
+ btf__add_const(btf2, 6); /* [20] const union u1 */
+ btf__add_restrict(btf2, 7); /* [21] restrict union (anon) */
+ btf__add_volatile(btf2, 8); /* [22] volatile enum e1 */
+ btf__add_typedef(btf2, "et", 9); /* [23] typedef enum (anon) */
+ btf__add_const(btf2, 10); /* [24] const enum64 e641 */
+ btf__add_ptr(btf2, 11); /* [25] restrict enum64 (anon) */
+ btf__add_struct(btf2, "with_embedded", 4); /* [26] struct with_embedded { */
+ btf__add_field(btf2, "f1", 13, 0, 0); /* struct embedded f1; */
+ /* } */
+ btf__add_func(btf2, "fn", BTF_FUNC_STATIC, 14); /* [27] int fn(int p1); */
+ btf__add_typedef(btf2, "arraytype", 15); /* [28] typedef int[3] foo; */
+ btf__add_func_proto(btf2, 1); /* [29] int (*)(struct from proto p1); */
+ btf__add_func_param(btf2, "p1", 16);
+
+ VALIDATE_RAW_BTF(
+ btf2,
+ "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
+ "[2] PTR '(anon)' type_id=1",
+ "[3] STRUCT 's1' size=8 vlen=1\n"
+ "\t'f1' type_id=2 bits_offset=0",
+ "[4] STRUCT '(anon)' size=12 vlen=2\n"
+ "\t'f1' type_id=1 bits_offset=0\n"
+ "\t'f2' type_id=3 bits_offset=32",
+ "[5] INT 'unsigned int' size=4 bits_offset=0 nr_bits=32 encoding=(none)",
+ "[6] UNION 'u1' size=12 vlen=2\n"
+ "\t'f1' type_id=1 bits_offset=0\n"
+ "\t'f2' type_id=2 bits_offset=0",
+ "[7] UNION '(anon)' size=4 vlen=1\n"
+ "\t'f1' type_id=1 bits_offset=0",
+ "[8] ENUM 'e1' encoding=UNSIGNED size=4 vlen=1\n"
+ "\t'v1' val=1",
+ "[9] ENUM '(anon)' encoding=UNSIGNED size=4 vlen=1\n"
+ "\t'av1' val=2",
+ "[10] ENUM64 'e641' encoding=SIGNED size=8 vlen=1\n"
+ "\t'v1' val=1024",
+ "[11] ENUM64 '(anon)' encoding=SIGNED size=8 vlen=1\n"
+ "\t'v1' val=1025",
+ "[12] STRUCT 'unneeded' size=4 vlen=1\n"
+ "\t'f1' type_id=1 bits_offset=0",
+ "[13] STRUCT 'embedded' size=4 vlen=1\n"
+ "\t'f1' type_id=1 bits_offset=0",
+ "[14] FUNC_PROTO '(anon)' ret_type_id=1 vlen=1\n"
+ "\t'p1' type_id=1",
+ "[15] ARRAY '(anon)' type_id=1 index_type_id=1 nr_elems=3",
+ "[16] STRUCT 'from_proto' size=4 vlen=1\n"
+ "\t'f1' type_id=1 bits_offset=0",
+ "[17] UNION 'u1' size=4 vlen=1\n"
+ "\t'f1' type_id=1 bits_offset=0",
+ "[18] PTR '(anon)' type_id=3",
+ "[19] PTR '(anon)' type_id=4",
+ "[20] CONST '(anon)' type_id=6",
+ "[21] RESTRICT '(anon)' type_id=7",
+ "[22] VOLATILE '(anon)' type_id=8",
+ "[23] TYPEDEF 'et' type_id=9",
+ "[24] CONST '(anon)' type_id=10",
+ "[25] PTR '(anon)' type_id=11",
+ "[26] STRUCT 'with_embedded' size=4 vlen=1\n"
+ "\t'f1' type_id=13 bits_offset=0",
+ "[27] FUNC 'fn' type_id=14 linkage=static",
+ "[28] TYPEDEF 'arraytype' type_id=15",
+ "[29] FUNC_PROTO '(anon)' ret_type_id=1 vlen=1\n"
+ "\t'p1' type_id=16");
+
+ if (!ASSERT_EQ(0, btf__distill_base(btf2, &btf3, &btf4),
+ "distilled_base") ||
+ !ASSERT_OK_PTR(btf3, "distilled_base") ||
+ !ASSERT_OK_PTR(btf4, "distilled_split") ||
+ !ASSERT_EQ(8, btf__type_cnt(btf3), "distilled_base_type_cnt"))
+ goto cleanup;
+
+ VALIDATE_RAW_BTF(
+ btf4,
+ "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
+ "[2] STRUCT 's1' size=8 vlen=0",
+ "[3] UNION 'u1' size=12 vlen=0",
+ "[4] ENUM 'e1' encoding=UNSIGNED size=4 vlen=0",
+ "[5] ENUM 'e641' encoding=UNSIGNED size=8 vlen=0",
+ "[6] STRUCT 'embedded' size=4 vlen=0",
+ "[7] STRUCT 'from_proto' size=4 vlen=0",
+ /* split BTF; these types should match split BTF above from 17-28, with
+ * updated type id references
+ */
+ "[8] PTR '(anon)' type_id=2",
+ "[9] PTR '(anon)' type_id=20",
+ "[10] CONST '(anon)' type_id=3",
+ "[11] RESTRICT '(anon)' type_id=21",
+ "[12] VOLATILE '(anon)' type_id=4",
+ "[13] TYPEDEF 'et' type_id=22",
+ "[14] CONST '(anon)' type_id=5",
+ "[15] PTR '(anon)' type_id=23",
+ "[16] STRUCT 'with_embedded' size=4 vlen=1\n"
+ "\t'f1' type_id=6 bits_offset=0",
+ "[17] FUNC 'fn' type_id=24 linkage=static",
+ "[18] TYPEDEF 'arraytype' type_id=25",
+ "[19] FUNC_PROTO '(anon)' ret_type_id=1 vlen=1\n"
+ "\t'p1' type_id=7",
+ /* split BTF types added from original base BTF below */
+ "[20] STRUCT '(anon)' size=12 vlen=2\n"
+ "\t'f1' type_id=1 bits_offset=0\n"
+ "\t'f2' type_id=2 bits_offset=32",
+ "[21] UNION '(anon)' size=4 vlen=1\n"
+ "\t'f1' type_id=1 bits_offset=0",
+ "[22] ENUM '(anon)' encoding=UNSIGNED size=4 vlen=1\n"
+ "\t'av1' val=2",
+ "[23] ENUM64 '(anon)' encoding=SIGNED size=8 vlen=1\n"
+ "\t'v1' val=1025",
+ "[24] FUNC_PROTO '(anon)' ret_type_id=1 vlen=1\n"
+ "\t'p1' type_id=1",
+ "[25] ARRAY '(anon)' type_id=1 index_type_id=1 nr_elems=3");
+
+ if (!ASSERT_EQ(btf__relocate(btf4, btf1), 0, "relocate_split"))
+ goto cleanup;
+
+ VALIDATE_RAW_BTF(
+ btf4,
+ "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
+ "[2] PTR '(anon)' type_id=1",
+ "[3] STRUCT 's1' size=8 vlen=1\n"
+ "\t'f1' type_id=2 bits_offset=0",
+ "[4] STRUCT '(anon)' size=12 vlen=2\n"
+ "\t'f1' type_id=1 bits_offset=0\n"
+ "\t'f2' type_id=3 bits_offset=32",
+ "[5] INT 'unsigned int' size=4 bits_offset=0 nr_bits=32 encoding=(none)",
+ "[6] UNION 'u1' size=12 vlen=2\n"
+ "\t'f1' type_id=1 bits_offset=0\n"
+ "\t'f2' type_id=2 bits_offset=0",
+ "[7] UNION '(anon)' size=4 vlen=1\n"
+ "\t'f1' type_id=1 bits_offset=0",
+ "[8] ENUM 'e1' encoding=UNSIGNED size=4 vlen=1\n"
+ "\t'v1' val=1",
+ "[9] ENUM '(anon)' encoding=UNSIGNED size=4 vlen=1\n"
+ "\t'av1' val=2",
+ "[10] ENUM64 'e641' encoding=SIGNED size=8 vlen=1\n"
+ "\t'v1' val=1024",
+ "[11] ENUM64 '(anon)' encoding=SIGNED size=8 vlen=1\n"
+ "\t'v1' val=1025",
+ "[12] STRUCT 'unneeded' size=4 vlen=1\n"
+ "\t'f1' type_id=1 bits_offset=0",
+ "[13] STRUCT 'embedded' size=4 vlen=1\n"
+ "\t'f1' type_id=1 bits_offset=0",
+ "[14] FUNC_PROTO '(anon)' ret_type_id=1 vlen=1\n"
+ "\t'p1' type_id=1",
+ "[15] ARRAY '(anon)' type_id=1 index_type_id=1 nr_elems=3",
+ "[16] STRUCT 'from_proto' size=4 vlen=1\n"
+ "\t'f1' type_id=1 bits_offset=0",
+ "[17] UNION 'u1' size=4 vlen=1\n"
+ "\t'f1' type_id=1 bits_offset=0",
+ "[18] PTR '(anon)' type_id=3",
+ "[19] PTR '(anon)' type_id=30",
+ "[20] CONST '(anon)' type_id=6",
+ "[21] RESTRICT '(anon)' type_id=31",
+ "[22] VOLATILE '(anon)' type_id=8",
+ "[23] TYPEDEF 'et' type_id=32",
+ "[24] CONST '(anon)' type_id=10",
+ "[25] PTR '(anon)' type_id=33",
+ "[26] STRUCT 'with_embedded' size=4 vlen=1\n"
+ "\t'f1' type_id=13 bits_offset=0",
+ "[27] FUNC 'fn' type_id=34 linkage=static",
+ "[28] TYPEDEF 'arraytype' type_id=35",
+ "[29] FUNC_PROTO '(anon)' ret_type_id=1 vlen=1\n"
+ "\t'p1' type_id=16",
+ /* below here are (duplicate) anon base types added by distill
+ * process to split BTF.
+ */
+ "[30] STRUCT '(anon)' size=12 vlen=2\n"
+ "\t'f1' type_id=1 bits_offset=0\n"
+ "\t'f2' type_id=3 bits_offset=32",
+ "[31] UNION '(anon)' size=4 vlen=1\n"
+ "\t'f1' type_id=1 bits_offset=0",
+ "[32] ENUM '(anon)' encoding=UNSIGNED size=4 vlen=1\n"
+ "\t'av1' val=2",
+ "[33] ENUM64 '(anon)' encoding=SIGNED size=8 vlen=1\n"
+ "\t'v1' val=1025",
+ "[34] FUNC_PROTO '(anon)' ret_type_id=1 vlen=1\n"
+ "\t'p1' type_id=1",
+ "[35] ARRAY '(anon)' type_id=1 index_type_id=1 nr_elems=3");
+
+cleanup:
+ btf__free(btf4);
+ btf__free(btf3);
+ btf__free(btf2);
+ btf__free(btf1);
+}
+
+/* ensure we can cope with multiple types with the same name in
+ * distilled base BTF. In this case because sizes are different,
+ * we can still disambiguate them.
+ */
+static void test_distilled_base_multi(void)
+{
+ struct btf *btf1 = NULL, *btf2 = NULL, *btf3 = NULL, *btf4 = NULL;
+
+ btf1 = btf__new_empty();
+ if (!ASSERT_OK_PTR(btf1, "empty_main_btf"))
+ return;
+ btf__add_int(btf1, "int", 4, BTF_INT_SIGNED); /* [1] int */
+ btf__add_int(btf1, "int", 8, BTF_INT_SIGNED); /* [2] int */
+ VALIDATE_RAW_BTF(
+ btf1,
+ "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
+ "[2] INT 'int' size=8 bits_offset=0 nr_bits=64 encoding=SIGNED");
+ btf2 = btf__new_empty_split(btf1);
+ if (!ASSERT_OK_PTR(btf2, "empty_split_btf"))
+ goto cleanup;
+ btf__add_ptr(btf2, 1);
+ btf__add_const(btf2, 2);
+ VALIDATE_RAW_BTF(
+ btf2,
+ "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
+ "[2] INT 'int' size=8 bits_offset=0 nr_bits=64 encoding=SIGNED",
+ "[3] PTR '(anon)' type_id=1",
+ "[4] CONST '(anon)' type_id=2");
+ if (!ASSERT_EQ(0, btf__distill_base(btf2, &btf3, &btf4),
+ "distilled_base") ||
+ !ASSERT_OK_PTR(btf3, "distilled_base") ||
+ !ASSERT_OK_PTR(btf4, "distilled_split") ||
+ !ASSERT_EQ(3, btf__type_cnt(btf3), "distilled_base_type_cnt"))
+ goto cleanup;
+ VALIDATE_RAW_BTF(
+ btf3,
+ "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
+ "[2] INT 'int' size=8 bits_offset=0 nr_bits=64 encoding=SIGNED");
+ if (!ASSERT_EQ(btf__relocate(btf4, btf1), 0, "relocate_split"))
+ goto cleanup;
+
+ VALIDATE_RAW_BTF(
+ btf4,
+ "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
+ "[2] INT 'int' size=8 bits_offset=0 nr_bits=64 encoding=SIGNED",
+ "[3] PTR '(anon)' type_id=1",
+ "[4] CONST '(anon)' type_id=2");
+
+cleanup:
+ btf__free(btf4);
+ btf__free(btf3);
+ btf__free(btf2);
+ btf__free(btf1);
+}
+
+/* If a needed type is not present in the base BTF we wish to relocate
+ * with, btf__relocate() should error our.
+ */
+static void test_distilled_base_missing_err(void)
+{
+ struct btf *btf1 = NULL, *btf2 = NULL, *btf3 = NULL, *btf4 = NULL, *btf5 = NULL;
+
+ btf1 = btf__new_empty();
+ if (!ASSERT_OK_PTR(btf1, "empty_main_btf"))
+ return;
+ btf__add_int(btf1, "int", 4, BTF_INT_SIGNED); /* [1] int */
+ btf__add_int(btf1, "int", 8, BTF_INT_SIGNED); /* [2] int */
+ VALIDATE_RAW_BTF(
+ btf1,
+ "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
+ "[2] INT 'int' size=8 bits_offset=0 nr_bits=64 encoding=SIGNED");
+ btf2 = btf__new_empty_split(btf1);
+ if (!ASSERT_OK_PTR(btf2, "empty_split_btf"))
+ goto cleanup;
+ btf__add_ptr(btf2, 1);
+ btf__add_const(btf2, 2);
+ VALIDATE_RAW_BTF(
+ btf2,
+ "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
+ "[2] INT 'int' size=8 bits_offset=0 nr_bits=64 encoding=SIGNED",
+ "[3] PTR '(anon)' type_id=1",
+ "[4] CONST '(anon)' type_id=2");
+ if (!ASSERT_EQ(0, btf__distill_base(btf2, &btf3, &btf4),
+ "distilled_base") ||
+ !ASSERT_OK_PTR(btf3, "distilled_base") ||
+ !ASSERT_OK_PTR(btf4, "distilled_split") ||
+ !ASSERT_EQ(3, btf__type_cnt(btf3), "distilled_base_type_cnt"))
+ goto cleanup;
+ VALIDATE_RAW_BTF(
+ btf3,
+ "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
+ "[2] INT 'int' size=8 bits_offset=0 nr_bits=64 encoding=SIGNED");
+ btf5 = btf__new_empty();
+ if (!ASSERT_OK_PTR(btf5, "empty_reloc_btf"))
+ return;
+ btf__add_int(btf5, "int", 4, BTF_INT_SIGNED); /* [1] int */
+ VALIDATE_RAW_BTF(
+ btf5,
+ "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED");
+ ASSERT_EQ(btf__relocate(btf4, btf5), -EINVAL, "relocate_split");
+
+cleanup:
+ btf__free(btf5);
+ btf__free(btf4);
+ btf__free(btf3);
+ btf__free(btf2);
+ btf__free(btf1);
+}
+
+/* With 2 types of same size in distilled base BTF, relocation should
+ * fail as we have no means to choose between them.
+ */
+static void test_distilled_base_multi_err(void)
+{
+ struct btf *btf1 = NULL, *btf2 = NULL, *btf3 = NULL, *btf4 = NULL;
+
+ btf1 = btf__new_empty();
+ if (!ASSERT_OK_PTR(btf1, "empty_main_btf"))
+ return;
+ btf__add_int(btf1, "int", 4, BTF_INT_SIGNED); /* [1] int */
+ btf__add_int(btf1, "int", 4, BTF_INT_SIGNED); /* [2] int */
+ VALIDATE_RAW_BTF(
+ btf1,
+ "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
+ "[2] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED");
+ btf2 = btf__new_empty_split(btf1);
+ if (!ASSERT_OK_PTR(btf2, "empty_split_btf"))
+ goto cleanup;
+ btf__add_ptr(btf2, 1);
+ btf__add_const(btf2, 2);
+ VALIDATE_RAW_BTF(
+ btf2,
+ "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
+ "[2] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
+ "[3] PTR '(anon)' type_id=1",
+ "[4] CONST '(anon)' type_id=2");
+ if (!ASSERT_EQ(0, btf__distill_base(btf2, &btf3, &btf4),
+ "distilled_base") ||
+ !ASSERT_OK_PTR(btf3, "distilled_base") ||
+ !ASSERT_OK_PTR(btf4, "distilled_split") ||
+ !ASSERT_EQ(3, btf__type_cnt(btf3), "distilled_base_type_cnt"))
+ goto cleanup;
+ VALIDATE_RAW_BTF(
+ btf3,
+ "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
+ "[2] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED");
+ ASSERT_EQ(btf__relocate(btf4, btf1), -EINVAL, "relocate_split");
+cleanup:
+ btf__free(btf4);
+ btf__free(btf3);
+ btf__free(btf2);
+ btf__free(btf1);
+}
+
+/* With 2 types of same size in base BTF, relocation should
+ * fail as we have no means to choose between them.
+ */
+static void test_distilled_base_multi_err2(void)
+{
+ struct btf *btf1 = NULL, *btf2 = NULL, *btf3 = NULL, *btf4 = NULL, *btf5 = NULL;
+
+ btf1 = btf__new_empty();
+ if (!ASSERT_OK_PTR(btf1, "empty_main_btf"))
+ return;
+ btf__add_int(btf1, "int", 4, BTF_INT_SIGNED); /* [1] int */
+ VALIDATE_RAW_BTF(
+ btf1,
+ "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED");
+ btf2 = btf__new_empty_split(btf1);
+ if (!ASSERT_OK_PTR(btf2, "empty_split_btf"))
+ goto cleanup;
+ btf__add_ptr(btf2, 1);
+ VALIDATE_RAW_BTF(
+ btf2,
+ "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
+ "[2] PTR '(anon)' type_id=1");
+ if (!ASSERT_EQ(0, btf__distill_base(btf2, &btf3, &btf4),
+ "distilled_base") ||
+ !ASSERT_OK_PTR(btf3, "distilled_base") ||
+ !ASSERT_OK_PTR(btf4, "distilled_split") ||
+ !ASSERT_EQ(2, btf__type_cnt(btf3), "distilled_base_type_cnt"))
+ goto cleanup;
+ VALIDATE_RAW_BTF(
+ btf3,
+ "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED");
+ btf5 = btf__new_empty();
+ if (!ASSERT_OK_PTR(btf5, "empty_reloc_btf"))
+ return;
+ btf__add_int(btf5, "int", 4, BTF_INT_SIGNED); /* [1] int */
+ btf__add_int(btf5, "int", 4, BTF_INT_SIGNED); /* [2] int */
+ VALIDATE_RAW_BTF(
+ btf5,
+ "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
+ "[2] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED");
+ ASSERT_EQ(btf__relocate(btf4, btf5), -EINVAL, "relocate_split");
+cleanup:
+ btf__free(btf5);
+ btf__free(btf4);
+ btf__free(btf3);
+ btf__free(btf2);
+ btf__free(btf1);
+}
+
+/* create split reference BTF from vmlinux + split BTF with a few type references;
+ * ensure the resultant split reference BTF is as expected, containing only types
+ * needed to disambiguate references from split BTF.
+ */
+static void test_distilled_base_vmlinux(void)
+{
+ struct btf *split_btf = NULL, *vmlinux_btf = btf__load_vmlinux_btf();
+ struct btf *split_dist = NULL, *base_dist = NULL;
+ __s32 int_id, myint_id;
+
+ if (!ASSERT_OK_PTR(vmlinux_btf, "load_vmlinux"))
+ return;
+ int_id = btf__find_by_name_kind(vmlinux_btf, "int", BTF_KIND_INT);
+ if (!ASSERT_GT(int_id, 0, "find_int"))
+ goto cleanup;
+ split_btf = btf__new_empty_split(vmlinux_btf);
+ if (!ASSERT_OK_PTR(split_btf, "new_split"))
+ goto cleanup;
+ myint_id = btf__add_typedef(split_btf, "myint", int_id);
+ btf__add_ptr(split_btf, myint_id);
+
+ if (!ASSERT_EQ(btf__distill_base(split_btf, &base_dist, &split_dist), 0,
+ "distill_vmlinux_base"))
+ goto cleanup;
+
+ if (!ASSERT_OK_PTR(split_dist, "split_distilled") ||
+ !ASSERT_OK_PTR(base_dist, "base_dist"))
+ goto cleanup;
+ VALIDATE_RAW_BTF(
+ split_dist,
+ "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
+ "[2] TYPEDEF 'myint' type_id=1",
+ "[3] PTR '(anon)' type_id=2");
+
+cleanup:
+ btf__free(split_dist);
+ btf__free(base_dist);
+ btf__free(split_btf);
+ btf__free(vmlinux_btf);
+}
+
+void test_btf_distill(void)
+{
+ if (test__start_subtest("distilled_base"))
+ test_distilled_base();
+ if (test__start_subtest("distilled_base_multi"))
+ test_distilled_base_multi();
+ if (test__start_subtest("distilled_base_missing_err"))
+ test_distilled_base_missing_err();
+ if (test__start_subtest("distilled_base_multi_err"))
+ test_distilled_base_multi_err();
+ if (test__start_subtest("distilled_base_multi_err2"))
+ test_distilled_base_multi_err2();
+ if (test__start_subtest("distilled_base_vmlinux"))
+ test_distilled_base_vmlinux();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/btf_field_iter.c b/tools/testing/selftests/bpf/prog_tests/btf_field_iter.c
new file mode 100644
index 000000000000..32159d3eb281
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/btf_field_iter.c
@@ -0,0 +1,161 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024, Oracle and/or its affiliates. */
+
+#include <test_progs.h>
+#include <bpf/btf.h>
+#include "btf_helpers.h"
+#include "bpf/libbpf_internal.h"
+
+struct field_data {
+ __u32 ids[5];
+ const char *strs[5];
+} fields[] = {
+ { .ids = {}, .strs = {} },
+ { .ids = {}, .strs = { "int" } },
+ { .ids = {}, .strs = { "int64" } },
+ { .ids = { 1 }, .strs = { "" } },
+ { .ids = { 2, 1 }, .strs = { "" } },
+ { .ids = { 3, 1 }, .strs = { "s1", "f1", "f2" } },
+ { .ids = { 1, 5 }, .strs = { "u1", "f1", "f2" } },
+ { .ids = {}, .strs = { "e1", "v1", "v2" } },
+ { .ids = {}, .strs = { "fw1" } },
+ { .ids = { 1 }, .strs = { "t" } },
+ { .ids = { 2 }, .strs = { "" } },
+ { .ids = { 1 }, .strs = { "" } },
+ { .ids = { 3 }, .strs = { "" } },
+ { .ids = { 1, 1, 3 }, .strs = { "", "p1", "p2" } },
+ { .ids = { 13 }, .strs = { "func" } },
+ { .ids = { 1 }, .strs = { "var1" } },
+ { .ids = { 3 }, .strs = { "var2" } },
+ { .ids = {}, .strs = { "float" } },
+ { .ids = { 11 }, .strs = { "decltag" } },
+ { .ids = { 6 }, .strs = { "typetag" } },
+ { .ids = {}, .strs = { "e64", "eval1", "eval2", "eval3" } },
+ { .ids = { 15, 16 }, .strs = { "datasec1" } }
+
+};
+
+/* Fabricate BTF with various types and check BTF field iteration finds types,
+ * strings expected.
+ */
+void test_btf_field_iter(void)
+{
+ struct btf *btf = NULL;
+ int id;
+
+ btf = btf__new_empty();
+ if (!ASSERT_OK_PTR(btf, "empty_btf"))
+ return;
+
+ btf__add_int(btf, "int", 4, BTF_INT_SIGNED); /* [1] int */
+ btf__add_int(btf, "int64", 8, BTF_INT_SIGNED); /* [2] int64 */
+ btf__add_ptr(btf, 1); /* [3] int * */
+ btf__add_array(btf, 1, 2, 3); /* [4] int64[3] */
+ btf__add_struct(btf, "s1", 12); /* [5] struct s1 { */
+ btf__add_field(btf, "f1", 3, 0, 0); /* int *f1; */
+ btf__add_field(btf, "f2", 1, 0, 0); /* int f2; */
+ /* } */
+ btf__add_union(btf, "u1", 12); /* [6] union u1 { */
+ btf__add_field(btf, "f1", 1, 0, 0); /* int f1; */
+ btf__add_field(btf, "f2", 5, 0, 0); /* struct s1 f2; */
+ /* } */
+ btf__add_enum(btf, "e1", 4); /* [7] enum e1 { */
+ btf__add_enum_value(btf, "v1", 1); /* v1 = 1; */
+ btf__add_enum_value(btf, "v2", 2); /* v2 = 2; */
+ /* } */
+
+ btf__add_fwd(btf, "fw1", BTF_FWD_STRUCT); /* [8] struct fw1; */
+ btf__add_typedef(btf, "t", 1); /* [9] typedef int t; */
+ btf__add_volatile(btf, 2); /* [10] volatile int64; */
+ btf__add_const(btf, 1); /* [11] const int; */
+ btf__add_restrict(btf, 3); /* [12] restrict int *; */
+ btf__add_func_proto(btf, 1); /* [13] int (*)(int p1, int *p2); */
+ btf__add_func_param(btf, "p1", 1);
+ btf__add_func_param(btf, "p2", 3);
+
+ btf__add_func(btf, "func", BTF_FUNC_GLOBAL, 13);/* [14] int func(int p1, int *p2); */
+ btf__add_var(btf, "var1", BTF_VAR_STATIC, 1); /* [15] static int var1; */
+ btf__add_var(btf, "var2", BTF_VAR_STATIC, 3); /* [16] static int *var2; */
+ btf__add_float(btf, "float", 4); /* [17] float; */
+ btf__add_decl_tag(btf, "decltag", 11, -1); /* [18] decltag const int; */
+ btf__add_type_tag(btf, "typetag", 6); /* [19] typetag union u1; */
+ btf__add_enum64(btf, "e64", 8, true); /* [20] enum { */
+ btf__add_enum64_value(btf, "eval1", 1000); /* eval1 = 1000, */
+ btf__add_enum64_value(btf, "eval2", 2000); /* eval2 = 2000, */
+ btf__add_enum64_value(btf, "eval3", 3000); /* eval3 = 3000 */
+ /* } */
+ btf__add_datasec(btf, "datasec1", 12); /* [21] datasec datasec1 */
+ btf__add_datasec_var_info(btf, 15, 0, 4);
+ btf__add_datasec_var_info(btf, 16, 4, 8);
+
+ VALIDATE_RAW_BTF(
+ btf,
+ "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
+ "[2] INT 'int64' size=8 bits_offset=0 nr_bits=64 encoding=SIGNED",
+ "[3] PTR '(anon)' type_id=1",
+ "[4] ARRAY '(anon)' type_id=2 index_type_id=1 nr_elems=3",
+ "[5] STRUCT 's1' size=12 vlen=2\n"
+ "\t'f1' type_id=3 bits_offset=0\n"
+ "\t'f2' type_id=1 bits_offset=0",
+ "[6] UNION 'u1' size=12 vlen=2\n"
+ "\t'f1' type_id=1 bits_offset=0\n"
+ "\t'f2' type_id=5 bits_offset=0",
+ "[7] ENUM 'e1' encoding=UNSIGNED size=4 vlen=2\n"
+ "\t'v1' val=1\n"
+ "\t'v2' val=2",
+ "[8] FWD 'fw1' fwd_kind=struct",
+ "[9] TYPEDEF 't' type_id=1",
+ "[10] VOLATILE '(anon)' type_id=2",
+ "[11] CONST '(anon)' type_id=1",
+ "[12] RESTRICT '(anon)' type_id=3",
+ "[13] FUNC_PROTO '(anon)' ret_type_id=1 vlen=2\n"
+ "\t'p1' type_id=1\n"
+ "\t'p2' type_id=3",
+ "[14] FUNC 'func' type_id=13 linkage=global",
+ "[15] VAR 'var1' type_id=1, linkage=static",
+ "[16] VAR 'var2' type_id=3, linkage=static",
+ "[17] FLOAT 'float' size=4",
+ "[18] DECL_TAG 'decltag' type_id=11 component_idx=-1",
+ "[19] TYPE_TAG 'typetag' type_id=6",
+ "[20] ENUM64 'e64' encoding=SIGNED size=8 vlen=3\n"
+ "\t'eval1' val=1000\n"
+ "\t'eval2' val=2000\n"
+ "\t'eval3' val=3000",
+ "[21] DATASEC 'datasec1' size=12 vlen=2\n"
+ "\ttype_id=15 offset=0 size=4\n"
+ "\ttype_id=16 offset=4 size=8");
+
+ for (id = 1; id < btf__type_cnt(btf); id++) {
+ struct btf_type *t = btf_type_by_id(btf, id);
+ struct btf_field_iter it_strs, it_ids;
+ int str_idx = 0, id_idx = 0;
+ __u32 *next_str, *next_id;
+
+ if (!ASSERT_OK_PTR(t, "btf_type_by_id"))
+ break;
+ if (!ASSERT_OK(btf_field_iter_init(&it_strs, t, BTF_FIELD_ITER_STRS),
+ "iter_init_strs"))
+ break;
+ if (!ASSERT_OK(btf_field_iter_init(&it_ids, t, BTF_FIELD_ITER_IDS),
+ "iter_init_ids"))
+ break;
+ while ((next_str = btf_field_iter_next(&it_strs))) {
+ const char *str = btf__str_by_offset(btf, *next_str);
+
+ if (!ASSERT_OK(strcmp(fields[id].strs[str_idx], str), "field_str_match"))
+ break;
+ str_idx++;
+ }
+ /* ensure no more strings are expected */
+ ASSERT_EQ(fields[id].strs[str_idx], NULL, "field_str_cnt");
+
+ while ((next_id = btf_field_iter_next(&it_ids))) {
+ if (!ASSERT_EQ(*next_id, fields[id].ids[id_idx], "field_id_match"))
+ break;
+ id_idx++;
+ }
+ /* ensure no more ids are expected */
+ ASSERT_EQ(fields[id].ids[id_idx], 0, "field_id_cnt");
+ }
+ btf__free(btf);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_v1v2.c b/tools/testing/selftests/bpf/prog_tests/cgroup_v1v2.c
index addf720428f7..9709c8db7275 100644
--- a/tools/testing/selftests/bpf/prog_tests/cgroup_v1v2.c
+++ b/tools/testing/selftests/bpf/prog_tests/cgroup_v1v2.c
@@ -32,7 +32,7 @@ static int run_test(int cgroup_fd, int server_fd, bool classid)
goto out;
}
- fd = connect_to_fd_opts(server_fd, &opts);
+ fd = connect_to_fd_opts(server_fd, SOCK_STREAM, &opts);
if (fd < 0)
err = -1;
else
@@ -52,7 +52,7 @@ void test_cgroup_v1v2(void)
server_fd = start_server(AF_INET, SOCK_STREAM, NULL, port, 0);
if (!ASSERT_GE(server_fd, 0, "server_fd"))
return;
- client_fd = connect_to_fd_opts(server_fd, &opts);
+ client_fd = connect_to_fd_opts(server_fd, SOCK_STREAM, &opts);
if (!ASSERT_GE(client_fd, 0, "client_fd")) {
close(server_fd);
return;
diff --git a/tools/testing/selftests/bpf/prog_tests/cpumask.c b/tools/testing/selftests/bpf/prog_tests/cpumask.c
index ecf89df78109..2570bd4b0cb2 100644
--- a/tools/testing/selftests/bpf/prog_tests/cpumask.c
+++ b/tools/testing/selftests/bpf/prog_tests/cpumask.c
@@ -18,6 +18,11 @@ static const char * const cpumask_success_testcases[] = {
"test_insert_leave",
"test_insert_remove_release",
"test_global_mask_rcu",
+ "test_global_mask_array_one_rcu",
+ "test_global_mask_array_rcu",
+ "test_global_mask_array_l2_rcu",
+ "test_global_mask_nested_rcu",
+ "test_global_mask_nested_deep_rcu",
"test_cpumask_weight",
};
diff --git a/tools/testing/selftests/bpf/prog_tests/ctx_rewrite.c b/tools/testing/selftests/bpf/prog_tests/ctx_rewrite.c
index 3b7c57fe55a5..08b6391f2f56 100644
--- a/tools/testing/selftests/bpf/prog_tests/ctx_rewrite.c
+++ b/tools/testing/selftests/bpf/prog_tests/ctx_rewrite.c
@@ -69,15 +69,17 @@ static struct test_case test_cases[] = {
{
N(SCHED_CLS, struct __sk_buff, tstamp),
.read = "r11 = *(u8 *)($ctx + sk_buff::__mono_tc_offset);"
- "w11 &= 3;"
- "if w11 != 0x3 goto pc+2;"
+ "if w11 & 0x4 goto pc+1;"
+ "goto pc+4;"
+ "if w11 & 0x3 goto pc+1;"
+ "goto pc+2;"
"$dst = 0;"
"goto pc+1;"
"$dst = *(u64 *)($ctx + sk_buff::tstamp);",
.write = "r11 = *(u8 *)($ctx + sk_buff::__mono_tc_offset);"
- "if w11 & 0x2 goto pc+1;"
+ "if w11 & 0x4 goto pc+1;"
"goto pc+2;"
- "w11 &= -2;"
+ "w11 &= -4;"
"*(u8 *)($ctx + sk_buff::__mono_tc_offset) = r11;"
"*(u64 *)($ctx + sk_buff::tstamp) = $src;",
},
diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_stress.c b/tools/testing/selftests/bpf/prog_tests/fexit_stress.c
index 596536def43d..49b1ffc9af1f 100644
--- a/tools/testing/selftests/bpf/prog_tests/fexit_stress.c
+++ b/tools/testing/selftests/bpf/prog_tests/fexit_stress.c
@@ -50,9 +50,9 @@ void serial_test_fexit_stress(void)
out:
for (i = 0; i < bpf_max_tramp_links; i++) {
- if (link_fd[i])
+ if (link_fd[i] > 0)
close(link_fd[i]);
- if (fexit_fd[i])
+ if (fexit_fd[i] > 0)
close(fexit_fd[i]);
}
free(fd);
diff --git a/tools/testing/selftests/bpf/prog_tests/find_vma.c b/tools/testing/selftests/bpf/prog_tests/find_vma.c
index 5165b38f0e59..f7619e0ade10 100644
--- a/tools/testing/selftests/bpf/prog_tests/find_vma.c
+++ b/tools/testing/selftests/bpf/prog_tests/find_vma.c
@@ -29,8 +29,8 @@ static int open_pe(void)
/* create perf event */
attr.size = sizeof(attr);
- attr.type = PERF_TYPE_HARDWARE;
- attr.config = PERF_COUNT_HW_CPU_CYCLES;
+ attr.type = PERF_TYPE_SOFTWARE;
+ attr.config = PERF_COUNT_SW_CPU_CLOCK;
attr.freq = 1;
attr.sample_freq = 1000;
pfd = syscall(__NR_perf_event_open, &attr, 0, -1, -1, PERF_FLAG_FD_CLOEXEC);
diff --git a/tools/testing/selftests/bpf/prog_tests/ip_check_defrag.c b/tools/testing/selftests/bpf/prog_tests/ip_check_defrag.c
index 284764e7179f..4ddb8a5fece8 100644
--- a/tools/testing/selftests/bpf/prog_tests/ip_check_defrag.c
+++ b/tools/testing/selftests/bpf/prog_tests/ip_check_defrag.c
@@ -158,15 +158,13 @@ static int send_frags6(int client)
void test_bpf_ip_check_defrag_ok(bool ipv6)
{
+ int family = ipv6 ? AF_INET6 : AF_INET;
struct network_helper_opts rx_opts = {
.timeout_ms = 1000,
- .noconnect = true,
};
struct network_helper_opts tx_ops = {
.timeout_ms = 1000,
- .type = SOCK_RAW,
.proto = IPPROTO_RAW,
- .noconnect = true,
};
struct sockaddr_storage caddr;
struct ip_check_defrag *skel;
@@ -192,7 +190,7 @@ void test_bpf_ip_check_defrag_ok(bool ipv6)
nstoken = open_netns(NS1);
if (!ASSERT_OK_PTR(nstoken, "setns ns1"))
goto out;
- srv_fd = start_server(ipv6 ? AF_INET6 : AF_INET, SOCK_DGRAM, NULL, SERVER_PORT, 0);
+ srv_fd = start_server(family, SOCK_DGRAM, NULL, SERVER_PORT, 0);
close_netns(nstoken);
if (!ASSERT_GE(srv_fd, 0, "start_server"))
goto out;
@@ -201,18 +199,18 @@ void test_bpf_ip_check_defrag_ok(bool ipv6)
nstoken = open_netns(NS0);
if (!ASSERT_OK_PTR(nstoken, "setns ns0"))
goto out;
- client_tx_fd = connect_to_fd_opts(srv_fd, &tx_ops);
+ client_tx_fd = client_socket(family, SOCK_RAW, &tx_ops);
close_netns(nstoken);
- if (!ASSERT_GE(client_tx_fd, 0, "connect_to_fd_opts"))
+ if (!ASSERT_GE(client_tx_fd, 0, "client_socket"))
goto out;
/* Open rx socket in ns0 */
nstoken = open_netns(NS0);
if (!ASSERT_OK_PTR(nstoken, "setns ns0"))
goto out;
- client_rx_fd = connect_to_fd_opts(srv_fd, &rx_opts);
+ client_rx_fd = client_socket(family, SOCK_DGRAM, &rx_opts);
close_netns(nstoken);
- if (!ASSERT_GE(client_rx_fd, 0, "connect_to_fd_opts"))
+ if (!ASSERT_GE(client_rx_fd, 0, "client_socket"))
goto out;
/* Bind rx socket to a premeditated port */
diff --git a/tools/testing/selftests/bpf/prog_tests/kfunc_call.c b/tools/testing/selftests/bpf/prog_tests/kfunc_call.c
index 2eb71559713c..5b743212292f 100644
--- a/tools/testing/selftests/bpf/prog_tests/kfunc_call.c
+++ b/tools/testing/selftests/bpf/prog_tests/kfunc_call.c
@@ -78,6 +78,7 @@ static struct kfunc_test_params kfunc_tests[] = {
SYSCALL_TEST(kfunc_syscall_test, 0),
SYSCALL_NULL_CTX_TEST(kfunc_syscall_test_null, 0),
TC_TEST(kfunc_call_test_static_unused_arg, 0),
+ TC_TEST(kfunc_call_ctx, 0),
};
struct syscall_test_args {
diff --git a/tools/testing/selftests/bpf/prog_tests/kfunc_param_nullable.c b/tools/testing/selftests/bpf/prog_tests/kfunc_param_nullable.c
new file mode 100644
index 000000000000..c8f4dcaac7c7
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/kfunc_param_nullable.c
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (c) 2024 Meta Platforms, Inc */
+
+#include <test_progs.h>
+#include "test_kfunc_param_nullable.skel.h"
+
+void test_kfunc_param_nullable(void)
+{
+ RUN_TESTS(test_kfunc_param_nullable);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/linked_list.c b/tools/testing/selftests/bpf/prog_tests/linked_list.c
index 2fb89de63bd2..77d07e0a4a55 100644
--- a/tools/testing/selftests/bpf/prog_tests/linked_list.c
+++ b/tools/testing/selftests/bpf/prog_tests/linked_list.c
@@ -183,6 +183,18 @@ static void test_linked_list_success(int mode, bool leave_in_map)
if (!leave_in_map)
clear_fields(skel->maps.bss_A);
+ ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.global_list_push_pop_nested), &opts);
+ ASSERT_OK(ret, "global_list_push_pop_nested");
+ ASSERT_OK(opts.retval, "global_list_push_pop_nested retval");
+ if (!leave_in_map)
+ clear_fields(skel->maps.bss_A);
+
+ ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.global_list_array_push_pop), &opts);
+ ASSERT_OK(ret, "global_list_array_push_pop");
+ ASSERT_OK(opts.retval, "global_list_array_push_pop retval");
+ if (!leave_in_map)
+ clear_fields(skel->maps.bss_A);
+
if (mode == PUSH_POP)
goto end;
diff --git a/tools/testing/selftests/bpf/prog_tests/mptcp.c b/tools/testing/selftests/bpf/prog_tests/mptcp.c
index 274d2e033e39..d2ca32fa3b21 100644
--- a/tools/testing/selftests/bpf/prog_tests/mptcp.c
+++ b/tools/testing/selftests/bpf/prog_tests/mptcp.c
@@ -89,13 +89,8 @@ static int start_mptcp_server(int family, const char *addr_str, __u16 port,
.timeout_ms = timeout_ms,
.proto = IPPROTO_MPTCP,
};
- struct sockaddr_storage addr;
- socklen_t addrlen;
- if (make_sockaddr(family, addr_str, port, &addr, &addrlen))
- return -1;
-
- return start_server_addr(SOCK_STREAM, &addr, addrlen, &opts);
+ return start_server_str(family, SOCK_STREAM, addr_str, port, &opts);
}
static int verify_tsk(int map_fd, int client_fd)
diff --git a/tools/testing/selftests/bpf/prog_tests/rbtree.c b/tools/testing/selftests/bpf/prog_tests/rbtree.c
index e9300c96607d..9818f06c97c5 100644
--- a/tools/testing/selftests/bpf/prog_tests/rbtree.c
+++ b/tools/testing/selftests/bpf/prog_tests/rbtree.c
@@ -31,6 +31,28 @@ static void test_rbtree_add_nodes(void)
rbtree__destroy(skel);
}
+static void test_rbtree_add_nodes_nested(void)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, opts,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .repeat = 1,
+ );
+ struct rbtree *skel;
+ int ret;
+
+ skel = rbtree__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "rbtree__open_and_load"))
+ return;
+
+ ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.rbtree_add_nodes_nested), &opts);
+ ASSERT_OK(ret, "rbtree_add_nodes_nested run");
+ ASSERT_OK(opts.retval, "rbtree_add_nodes_nested retval");
+ ASSERT_EQ(skel->data->less_callback_ran, 1, "rbtree_add_nodes_nested less_callback_ran");
+
+ rbtree__destroy(skel);
+}
+
static void test_rbtree_add_and_remove(void)
{
LIBBPF_OPTS(bpf_test_run_opts, opts,
@@ -53,6 +75,27 @@ static void test_rbtree_add_and_remove(void)
rbtree__destroy(skel);
}
+static void test_rbtree_add_and_remove_array(void)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, opts,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .repeat = 1,
+ );
+ struct rbtree *skel;
+ int ret;
+
+ skel = rbtree__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "rbtree__open_and_load"))
+ return;
+
+ ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.rbtree_add_and_remove_array), &opts);
+ ASSERT_OK(ret, "rbtree_add_and_remove_array");
+ ASSERT_OK(opts.retval, "rbtree_add_and_remove_array retval");
+
+ rbtree__destroy(skel);
+}
+
static void test_rbtree_first_and_remove(void)
{
LIBBPF_OPTS(bpf_test_run_opts, opts,
@@ -104,8 +147,12 @@ void test_rbtree_success(void)
{
if (test__start_subtest("rbtree_add_nodes"))
test_rbtree_add_nodes();
+ if (test__start_subtest("rbtree_add_nodes_nested"))
+ test_rbtree_add_nodes_nested();
if (test__start_subtest("rbtree_add_and_remove"))
test_rbtree_add_and_remove();
+ if (test__start_subtest("rbtree_add_and_remove_array"))
+ test_rbtree_add_and_remove_array();
if (test__start_subtest("rbtree_first_and_remove"))
test_rbtree_first_and_remove();
if (test__start_subtest("rbtree_api_release_aliasing"))
diff --git a/tools/testing/selftests/bpf/prog_tests/ringbuf.c b/tools/testing/selftests/bpf/prog_tests/ringbuf.c
index 4c6f42dae409..da430df45aa4 100644
--- a/tools/testing/selftests/bpf/prog_tests/ringbuf.c
+++ b/tools/testing/selftests/bpf/prog_tests/ringbuf.c
@@ -12,9 +12,11 @@
#include <sys/sysinfo.h>
#include <linux/perf_event.h>
#include <linux/ring_buffer.h>
+
#include "test_ringbuf.lskel.h"
#include "test_ringbuf_n.lskel.h"
#include "test_ringbuf_map_key.lskel.h"
+#include "test_ringbuf_write.lskel.h"
#define EDONE 7777
@@ -84,6 +86,58 @@ static void *poll_thread(void *input)
return (void *)(long)ring_buffer__poll(ringbuf, timeout);
}
+static void ringbuf_write_subtest(void)
+{
+ struct test_ringbuf_write_lskel *skel;
+ int page_size = getpagesize();
+ size_t *mmap_ptr;
+ int err, rb_fd;
+
+ skel = test_ringbuf_write_lskel__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ skel->maps.ringbuf.max_entries = 0x4000;
+
+ err = test_ringbuf_write_lskel__load(skel);
+ if (!ASSERT_OK(err, "skel_load"))
+ goto cleanup;
+
+ rb_fd = skel->maps.ringbuf.map_fd;
+
+ mmap_ptr = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED, rb_fd, 0);
+ if (!ASSERT_OK_PTR(mmap_ptr, "rw_cons_pos"))
+ goto cleanup;
+ *mmap_ptr = 0x3000;
+ ASSERT_OK(munmap(mmap_ptr, page_size), "unmap_rw");
+
+ skel->bss->pid = getpid();
+
+ ringbuf = ring_buffer__new(rb_fd, process_sample, NULL, NULL);
+ if (!ASSERT_OK_PTR(ringbuf, "ringbuf_new"))
+ goto cleanup;
+
+ err = test_ringbuf_write_lskel__attach(skel);
+ if (!ASSERT_OK(err, "skel_attach"))
+ goto cleanup_ringbuf;
+
+ skel->bss->discarded = 0;
+ skel->bss->passed = 0;
+
+ /* trigger exactly two samples */
+ syscall(__NR_getpgid);
+ syscall(__NR_getpgid);
+
+ ASSERT_EQ(skel->bss->discarded, 2, "discarded");
+ ASSERT_EQ(skel->bss->passed, 0, "passed");
+
+ test_ringbuf_write_lskel__detach(skel);
+cleanup_ringbuf:
+ ring_buffer__free(ringbuf);
+cleanup:
+ test_ringbuf_write_lskel__destroy(skel);
+}
+
static void ringbuf_subtest(void)
{
const size_t rec_sz = BPF_RINGBUF_HDR_SZ + sizeof(struct sample);
@@ -451,4 +505,6 @@ void test_ringbuf(void)
ringbuf_n_subtest();
if (test__start_subtest("ringbuf_map_key"))
ringbuf_map_key_subtest();
+ if (test__start_subtest("ringbuf_write"))
+ ringbuf_write_subtest();
}
diff --git a/tools/testing/selftests/bpf/prog_tests/send_signal.c b/tools/testing/selftests/bpf/prog_tests/send_signal.c
index 920aee41bd58..6cc69900b310 100644
--- a/tools/testing/selftests/bpf/prog_tests/send_signal.c
+++ b/tools/testing/selftests/bpf/prog_tests/send_signal.c
@@ -156,7 +156,8 @@ static void test_send_signal_tracepoint(bool signal_thread)
static void test_send_signal_perf(bool signal_thread)
{
struct perf_event_attr attr = {
- .sample_period = 1,
+ .freq = 1,
+ .sample_freq = 1000,
.type = PERF_TYPE_SOFTWARE,
.config = PERF_COUNT_SW_CPU_CLOCK,
};
diff --git a/tools/testing/selftests/bpf/prog_tests/sk_lookup.c b/tools/testing/selftests/bpf/prog_tests/sk_lookup.c
index 597d0467a926..ae87c00867ba 100644
--- a/tools/testing/selftests/bpf/prog_tests/sk_lookup.c
+++ b/tools/testing/selftests/bpf/prog_tests/sk_lookup.c
@@ -77,6 +77,12 @@ struct test {
bool reuseport_has_conns; /* Add a connected socket to reuseport group */
};
+struct cb_opts {
+ int family;
+ int sotype;
+ bool reuseport;
+};
+
static __u32 duration; /* for CHECK macro */
static bool is_ipv6(const char *ip)
@@ -142,19 +148,14 @@ static int make_socket(int sotype, const char *ip, int port,
return fd;
}
-static int make_server(int sotype, const char *ip, int port,
- struct bpf_program *reuseport_prog)
+static int setsockopts(int fd, void *opts)
{
- struct sockaddr_storage addr = {0};
+ struct cb_opts *co = (struct cb_opts *)opts;
const int one = 1;
- int err, fd = -1;
-
- fd = make_socket(sotype, ip, port, &addr);
- if (fd < 0)
- return -1;
+ int err = 0;
/* Enabled for UDPv6 sockets for IPv4-mapped IPv6 to work. */
- if (sotype == SOCK_DGRAM) {
+ if (co->sotype == SOCK_DGRAM) {
err = setsockopt(fd, SOL_IP, IP_RECVORIGDSTADDR, &one,
sizeof(one));
if (CHECK(err, "setsockopt(IP_RECVORIGDSTADDR)", "failed\n")) {
@@ -163,7 +164,7 @@ static int make_server(int sotype, const char *ip, int port,
}
}
- if (sotype == SOCK_DGRAM && addr.ss_family == AF_INET6) {
+ if (co->sotype == SOCK_DGRAM && co->family == AF_INET6) {
err = setsockopt(fd, SOL_IPV6, IPV6_RECVORIGDSTADDR, &one,
sizeof(one));
if (CHECK(err, "setsockopt(IPV6_RECVORIGDSTADDR)", "failed\n")) {
@@ -172,7 +173,7 @@ static int make_server(int sotype, const char *ip, int port,
}
}
- if (sotype == SOCK_STREAM) {
+ if (co->sotype == SOCK_STREAM) {
err = setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &one,
sizeof(one));
if (CHECK(err, "setsockopt(SO_REUSEADDR)", "failed\n")) {
@@ -181,7 +182,7 @@ static int make_server(int sotype, const char *ip, int port,
}
}
- if (reuseport_prog) {
+ if (co->reuseport) {
err = setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &one,
sizeof(one));
if (CHECK(err, "setsockopt(SO_REUSEPORT)", "failed\n")) {
@@ -190,19 +191,28 @@ static int make_server(int sotype, const char *ip, int port,
}
}
- err = bind(fd, (void *)&addr, inetaddr_len(&addr));
- if (CHECK(err, "bind", "failed\n")) {
- log_err("failed to bind listen socket");
- goto fail;
- }
+fail:
+ return err;
+}
- if (sotype == SOCK_STREAM) {
- err = listen(fd, SOMAXCONN);
- if (CHECK(err, "make_server", "listen")) {
- log_err("failed to listen on port %d", port);
- goto fail;
- }
- }
+static int make_server(int sotype, const char *ip, int port,
+ struct bpf_program *reuseport_prog)
+{
+ struct cb_opts cb_opts = {
+ .family = is_ipv6(ip) ? AF_INET6 : AF_INET,
+ .sotype = sotype,
+ .reuseport = reuseport_prog,
+ };
+ struct network_helper_opts opts = {
+ .backlog = SOMAXCONN,
+ .post_socket_cb = setsockopts,
+ .cb_opts = &cb_opts,
+ };
+ int err, fd;
+
+ fd = start_server_str(cb_opts.family, sotype, ip, port, &opts);
+ if (!ASSERT_OK_FD(fd, "start_server_str"))
+ return -1;
/* Late attach reuseport prog so we can have one init path */
if (reuseport_prog) {
@@ -406,18 +416,12 @@ static int udp_recv_send(int server_fd)
}
/* Reply from original destination address. */
- fd = socket(dst_addr->ss_family, SOCK_DGRAM, 0);
- if (CHECK(fd < 0, "socket", "failed\n")) {
+ fd = start_server_addr(SOCK_DGRAM, dst_addr, sizeof(*dst_addr), NULL);
+ if (!ASSERT_OK_FD(fd, "start_server_addr")) {
log_err("failed to create tx socket");
return -1;
}
- ret = bind(fd, (struct sockaddr *)dst_addr, sizeof(*dst_addr));
- if (CHECK(ret, "bind", "failed\n")) {
- log_err("failed to bind tx socket");
- goto out;
- }
-
msg.msg_control = NULL;
msg.msg_controllen = 0;
n = sendmsg(fd, &msg, 0);
@@ -629,9 +633,6 @@ static void run_lookup_prog(const struct test *t)
* BPF socket lookup.
*/
if (t->reuseport_has_conns) {
- struct sockaddr_storage addr = {};
- socklen_t len = sizeof(addr);
-
/* Add an extra socket to reuseport group */
reuse_conn_fd = make_server(t->sotype, t->listen_at.ip,
t->listen_at.port,
@@ -639,12 +640,9 @@ static void run_lookup_prog(const struct test *t)
if (reuse_conn_fd < 0)
goto close;
- /* Connect the extra socket to itself */
- err = getsockname(reuse_conn_fd, (void *)&addr, &len);
- if (CHECK(err, "getsockname", "errno %d\n", errno))
- goto close;
- err = connect(reuse_conn_fd, (void *)&addr, len);
- if (CHECK(err, "connect", "errno %d\n", errno))
+ /* Connect the extra socket to itself */
+ err = connect_fd_to_fd(reuse_conn_fd, reuse_conn_fd, 0);
+ if (!ASSERT_OK(err, "connect_fd_to_fd"))
goto close;
}
@@ -994,7 +992,7 @@ static void drop_on_reuseport(const struct test *t)
err = update_lookup_map(t->sock_map, SERVER_A, server1);
if (err)
- goto detach;
+ goto close_srv1;
/* second server on destination address we should never reach */
server2 = make_server(t->sotype, t->connect_to.ip, t->connect_to.port,
diff --git a/tools/testing/selftests/bpf/prog_tests/sockopt_inherit.c b/tools/testing/selftests/bpf/prog_tests/sockopt_inherit.c
index 1d3a20f01b60..7cd8be2780ca 100644
--- a/tools/testing/selftests/bpf/prog_tests/sockopt_inherit.c
+++ b/tools/testing/selftests/bpf/prog_tests/sockopt_inherit.c
@@ -70,7 +70,7 @@ static void *server_thread(void *arg)
return (void *)(long)err;
}
-static int custom_cb(int fd, const struct post_socket_opts *opts)
+static int custom_cb(int fd, void *opts)
{
char buf;
int err;
diff --git a/tools/testing/selftests/bpf/prog_tests/tc_links.c b/tools/testing/selftests/bpf/prog_tests/tc_links.c
index bc9841144685..1af9ec1149aa 100644
--- a/tools/testing/selftests/bpf/prog_tests/tc_links.c
+++ b/tools/testing/selftests/bpf/prog_tests/tc_links.c
@@ -9,6 +9,8 @@
#define ping_cmd "ping -q -c1 -w1 127.0.0.1 > /dev/null"
#include "test_tc_link.skel.h"
+
+#include "netlink_helpers.h"
#include "tc_helpers.h"
void serial_test_tc_links_basic(void)
@@ -1787,6 +1789,65 @@ void serial_test_tc_links_ingress(void)
test_tc_links_ingress(BPF_TCX_INGRESS, false, false);
}
+struct qdisc_req {
+ struct nlmsghdr n;
+ struct tcmsg t;
+ char buf[1024];
+};
+
+static int qdisc_replace(int ifindex, const char *kind, bool block)
+{
+ struct rtnl_handle rth = { .fd = -1 };
+ struct qdisc_req req;
+ int err;
+
+ err = rtnl_open(&rth, 0);
+ if (!ASSERT_OK(err, "open_rtnetlink"))
+ return err;
+
+ memset(&req, 0, sizeof(req));
+ req.n.nlmsg_len = NLMSG_LENGTH(sizeof(struct tcmsg));
+ req.n.nlmsg_flags = NLM_F_CREATE | NLM_F_REPLACE | NLM_F_REQUEST;
+ req.n.nlmsg_type = RTM_NEWQDISC;
+ req.t.tcm_family = AF_UNSPEC;
+ req.t.tcm_ifindex = ifindex;
+ req.t.tcm_parent = 0xfffffff1;
+
+ addattr_l(&req.n, sizeof(req), TCA_KIND, kind, strlen(kind) + 1);
+ if (block)
+ addattr32(&req.n, sizeof(req), TCA_INGRESS_BLOCK, 1);
+
+ err = rtnl_talk(&rth, &req.n, NULL);
+ ASSERT_OK(err, "talk_rtnetlink");
+ rtnl_close(&rth);
+ return err;
+}
+
+void serial_test_tc_links_dev_chain0(void)
+{
+ int err, ifindex;
+
+ ASSERT_OK(system("ip link add dev foo type veth peer name bar"), "add veth");
+ ifindex = if_nametoindex("foo");
+ ASSERT_NEQ(ifindex, 0, "non_zero_ifindex");
+ err = qdisc_replace(ifindex, "ingress", true);
+ if (!ASSERT_OK(err, "attaching ingress"))
+ goto cleanup;
+ ASSERT_OK(system("tc filter add block 1 matchall action skbmod swap mac"), "add block");
+ err = qdisc_replace(ifindex, "clsact", false);
+ if (!ASSERT_OK(err, "attaching clsact"))
+ goto cleanup;
+ /* Heuristic: kern_sync_rcu() alone does not work; a wait-time of ~5s
+ * triggered the issue without the fix reliably 100% of the time.
+ */
+ sleep(5);
+ ASSERT_OK(system("tc filter add dev foo ingress matchall action skbmod swap mac"), "add filter");
+cleanup:
+ ASSERT_OK(system("ip link del dev foo"), "del veth");
+ ASSERT_EQ(if_nametoindex("foo"), 0, "foo removed");
+ ASSERT_EQ(if_nametoindex("bar"), 0, "bar removed");
+}
+
static void test_tc_links_dev_mixed(int target)
{
LIBBPF_OPTS(bpf_tc_opts, tc_opts, .handle = 1, .priority = 1);
diff --git a/tools/testing/selftests/bpf/prog_tests/tc_netkit.c b/tools/testing/selftests/bpf/prog_tests/tc_netkit.c
index 15ee7b2fc410..b9135720024c 100644
--- a/tools/testing/selftests/bpf/prog_tests/tc_netkit.c
+++ b/tools/testing/selftests/bpf/prog_tests/tc_netkit.c
@@ -73,6 +73,16 @@ static int create_netkit(int mode, int policy, int peer_policy, int *ifindex,
"up primary");
ASSERT_OK(system("ip addr add dev " netkit_name " 10.0.0.1/24"),
"addr primary");
+
+ if (mode == NETKIT_L3) {
+ ASSERT_EQ(system("ip link set dev " netkit_name
+ " addr ee:ff:bb:cc:aa:dd 2> /dev/null"), 512,
+ "set hwaddress");
+ } else {
+ ASSERT_OK(system("ip link set dev " netkit_name
+ " addr ee:ff:bb:cc:aa:dd"),
+ "set hwaddress");
+ }
if (same_netns) {
ASSERT_OK(system("ip link set dev " netkit_peer " up"),
"up peer");
@@ -89,6 +99,16 @@ static int create_netkit(int mode, int policy, int peer_policy, int *ifindex,
return err;
}
+static void move_netkit(void)
+{
+ ASSERT_OK(system("ip link set " netkit_peer " netns foo"),
+ "move peer");
+ ASSERT_OK(system("ip netns exec foo ip link set dev "
+ netkit_peer " up"), "up peer");
+ ASSERT_OK(system("ip netns exec foo ip addr add dev "
+ netkit_peer " 10.0.0.2/24"), "addr peer");
+}
+
static void destroy_netkit(void)
{
ASSERT_OK(system("ip link del dev " netkit_name), "del primary");
@@ -685,3 +705,77 @@ void serial_test_tc_netkit_neigh_links(void)
serial_test_tc_netkit_neigh_links_target(NETKIT_L2, BPF_NETKIT_PRIMARY);
serial_test_tc_netkit_neigh_links_target(NETKIT_L3, BPF_NETKIT_PRIMARY);
}
+
+static void serial_test_tc_netkit_pkt_type_mode(int mode)
+{
+ LIBBPF_OPTS(bpf_netkit_opts, optl_nk);
+ LIBBPF_OPTS(bpf_tcx_opts, optl_tcx);
+ int err, ifindex, ifindex2;
+ struct test_tc_link *skel;
+ struct bpf_link *link;
+
+ err = create_netkit(mode, NETKIT_PASS, NETKIT_PASS,
+ &ifindex, true);
+ if (err)
+ return;
+
+ ifindex2 = if_nametoindex(netkit_peer);
+ ASSERT_NEQ(ifindex, ifindex2, "ifindex_1_2");
+
+ skel = test_tc_link__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ goto cleanup;
+
+ ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc1,
+ BPF_NETKIT_PRIMARY), 0, "tc1_attach_type");
+ ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc7,
+ BPF_TCX_INGRESS), 0, "tc7_attach_type");
+
+ err = test_tc_link__load(skel);
+ if (!ASSERT_OK(err, "skel_load"))
+ goto cleanup;
+
+ assert_mprog_count_ifindex(ifindex, BPF_NETKIT_PRIMARY, 0);
+ assert_mprog_count_ifindex(ifindex2, BPF_TCX_INGRESS, 0);
+
+ link = bpf_program__attach_netkit(skel->progs.tc1, ifindex, &optl_nk);
+ if (!ASSERT_OK_PTR(link, "link_attach"))
+ goto cleanup;
+
+ skel->links.tc1 = link;
+
+ assert_mprog_count_ifindex(ifindex, BPF_NETKIT_PRIMARY, 1);
+ assert_mprog_count_ifindex(ifindex2, BPF_TCX_INGRESS, 0);
+
+ link = bpf_program__attach_tcx(skel->progs.tc7, ifindex2, &optl_tcx);
+ if (!ASSERT_OK_PTR(link, "link_attach"))
+ goto cleanup;
+
+ skel->links.tc7 = link;
+
+ assert_mprog_count_ifindex(ifindex, BPF_NETKIT_PRIMARY, 1);
+ assert_mprog_count_ifindex(ifindex2, BPF_TCX_INGRESS, 1);
+
+ move_netkit();
+
+ tc_skel_reset_all_seen(skel);
+ skel->bss->set_type = true;
+ ASSERT_EQ(send_icmp(), 0, "icmp_pkt");
+
+ ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
+ ASSERT_EQ(skel->bss->seen_tc7, true, "seen_tc7");
+
+ ASSERT_EQ(skel->bss->seen_host, true, "seen_host");
+ ASSERT_EQ(skel->bss->seen_mcast, true, "seen_mcast");
+cleanup:
+ test_tc_link__destroy(skel);
+
+ assert_mprog_count_ifindex(ifindex, BPF_NETKIT_PRIMARY, 0);
+ destroy_netkit();
+}
+
+void serial_test_tc_netkit_pkt_type(void)
+{
+ serial_test_tc_netkit_pkt_type_mode(NETKIT_L2);
+ serial_test_tc_netkit_pkt_type_mode(NETKIT_L3);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/tc_redirect.c b/tools/testing/selftests/bpf/prog_tests/tc_redirect.c
index b1073d36d77a..327d51f59142 100644
--- a/tools/testing/selftests/bpf/prog_tests/tc_redirect.c
+++ b/tools/testing/selftests/bpf/prog_tests/tc_redirect.c
@@ -890,9 +890,6 @@ static void test_udp_dtime(struct test_tc_dtime *skel, int family, bool bpf_fwd)
ASSERT_EQ(dtimes[INGRESS_FWDNS_P100], 0,
dtime_cnt_str(t, INGRESS_FWDNS_P100));
- /* non mono delivery time is not forwarded */
- ASSERT_EQ(dtimes[INGRESS_FWDNS_P101], 0,
- dtime_cnt_str(t, INGRESS_FWDNS_P101));
for (i = EGRESS_FWDNS_P100; i < SET_DTIME; i++)
ASSERT_GT(dtimes[i], 0, dtime_cnt_str(t, i));
diff --git a/tools/testing/selftests/bpf/prog_tests/test_skb_pkt_end.c b/tools/testing/selftests/bpf/prog_tests/test_skb_pkt_end.c
index ae93411fd582..09ca13bdf6ca 100644
--- a/tools/testing/selftests/bpf/prog_tests/test_skb_pkt_end.c
+++ b/tools/testing/selftests/bpf/prog_tests/test_skb_pkt_end.c
@@ -11,6 +11,7 @@ static int sanity_run(struct bpf_program *prog)
.data_in = &pkt_v4,
.data_size_in = sizeof(pkt_v4),
.repeat = 1,
+ .flags = BPF_F_TEST_SKB_CHECKSUM_COMPLETE,
);
prog_fd = bpf_program__fd(prog);
diff --git a/tools/testing/selftests/bpf/prog_tests/test_struct_ops_module.c b/tools/testing/selftests/bpf/prog_tests/test_struct_ops_module.c
index 29e183a80f49..bbcf12696a6b 100644
--- a/tools/testing/selftests/bpf/prog_tests/test_struct_ops_module.c
+++ b/tools/testing/selftests/bpf/prog_tests/test_struct_ops_module.c
@@ -3,9 +3,12 @@
#include <test_progs.h>
#include <time.h>
+#include <sys/epoll.h>
+
#include "struct_ops_module.skel.h"
#include "struct_ops_nulled_out_cb.skel.h"
#include "struct_ops_forgotten_cb.skel.h"
+#include "struct_ops_detach.skel.h"
static void check_map_info(struct bpf_map_info *info)
{
@@ -242,6 +245,58 @@ cleanup:
struct_ops_forgotten_cb__destroy(skel);
}
+/* Detach a link from a user space program */
+static void test_detach_link(void)
+{
+ struct epoll_event ev, events[2];
+ struct struct_ops_detach *skel;
+ struct bpf_link *link = NULL;
+ int fd, epollfd = -1, nfds;
+ int err;
+
+ skel = struct_ops_detach__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "struct_ops_detach__open_and_load"))
+ return;
+
+ link = bpf_map__attach_struct_ops(skel->maps.testmod_do_detach);
+ if (!ASSERT_OK_PTR(link, "attach_struct_ops"))
+ goto cleanup;
+
+ fd = bpf_link__fd(link);
+ if (!ASSERT_GE(fd, 0, "link_fd"))
+ goto cleanup;
+
+ epollfd = epoll_create1(0);
+ if (!ASSERT_GE(epollfd, 0, "epoll_create1"))
+ goto cleanup;
+
+ ev.events = EPOLLHUP;
+ ev.data.fd = fd;
+ err = epoll_ctl(epollfd, EPOLL_CTL_ADD, fd, &ev);
+ if (!ASSERT_OK(err, "epoll_ctl"))
+ goto cleanup;
+
+ err = bpf_link__detach(link);
+ if (!ASSERT_OK(err, "detach_link"))
+ goto cleanup;
+
+ /* Wait for EPOLLHUP */
+ nfds = epoll_wait(epollfd, events, 2, 500);
+ if (!ASSERT_EQ(nfds, 1, "epoll_wait"))
+ goto cleanup;
+
+ if (!ASSERT_EQ(events[0].data.fd, fd, "epoll_wait_fd"))
+ goto cleanup;
+ if (!ASSERT_TRUE(events[0].events & EPOLLHUP, "events[0].events"))
+ goto cleanup;
+
+cleanup:
+ if (epollfd >= 0)
+ close(epollfd);
+ bpf_link__destroy(link);
+ struct_ops_detach__destroy(skel);
+}
+
void serial_test_struct_ops_module(void)
{
if (test__start_subtest("struct_ops_load"))
@@ -254,5 +309,7 @@ void serial_test_struct_ops_module(void)
test_struct_ops_nulled_out_cb();
if (test__start_subtest("struct_ops_forgotten_cb"))
test_struct_ops_forgotten_cb();
+ if (test__start_subtest("test_detach_link"))
+ test_detach_link();
}
diff --git a/tools/testing/selftests/bpf/prog_tests/timer_lockup.c b/tools/testing/selftests/bpf/prog_tests/timer_lockup.c
new file mode 100644
index 000000000000..871d16cb95cf
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/timer_lockup.c
@@ -0,0 +1,91 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#define _GNU_SOURCE
+#include <sched.h>
+#include <test_progs.h>
+#include <pthread.h>
+#include <network_helpers.h>
+
+#include "timer_lockup.skel.h"
+
+static long cpu;
+static int *timer1_err;
+static int *timer2_err;
+static bool skip;
+
+volatile int k = 0;
+
+static void *timer_lockup_thread(void *arg)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, opts,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .repeat = 1000,
+ );
+ int i, prog_fd = *(int *)arg;
+ cpu_set_t cpuset;
+
+ CPU_ZERO(&cpuset);
+ CPU_SET(__sync_fetch_and_add(&cpu, 1), &cpuset);
+ ASSERT_OK(pthread_setaffinity_np(pthread_self(), sizeof(cpuset),
+ &cpuset),
+ "cpu affinity");
+
+ for (i = 0; !READ_ONCE(*timer1_err) && !READ_ONCE(*timer2_err); i++) {
+ bpf_prog_test_run_opts(prog_fd, &opts);
+ /* Skip the test if we can't reproduce the race in a reasonable
+ * amount of time.
+ */
+ if (i > 50) {
+ WRITE_ONCE(skip, true);
+ break;
+ }
+ }
+
+ return NULL;
+}
+
+void test_timer_lockup(void)
+{
+ int timer1_prog, timer2_prog;
+ struct timer_lockup *skel;
+ pthread_t thrds[2];
+ void *ret;
+
+ skel = timer_lockup__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "timer_lockup__open_and_load"))
+ return;
+
+ timer1_prog = bpf_program__fd(skel->progs.timer1_prog);
+ timer2_prog = bpf_program__fd(skel->progs.timer2_prog);
+
+ timer1_err = &skel->bss->timer1_err;
+ timer2_err = &skel->bss->timer2_err;
+
+ if (!ASSERT_OK(pthread_create(&thrds[0], NULL, timer_lockup_thread,
+ &timer1_prog),
+ "pthread_create thread1"))
+ goto out;
+ if (!ASSERT_OK(pthread_create(&thrds[1], NULL, timer_lockup_thread,
+ &timer2_prog),
+ "pthread_create thread2")) {
+ pthread_exit(&thrds[0]);
+ goto out;
+ }
+
+ pthread_join(thrds[1], &ret);
+ pthread_join(thrds[0], &ret);
+
+ if (skip) {
+ test__skip();
+ goto out;
+ }
+
+ if (*timer1_err != -EDEADLK && *timer1_err != 0)
+ ASSERT_FAIL("timer1_err bad value");
+ if (*timer2_err != -EDEADLK && *timer2_err != 0)
+ ASSERT_FAIL("timer2_err bad value");
+out:
+ timer_lockup__destroy(skel);
+ return;
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/tracing_struct.c b/tools/testing/selftests/bpf/prog_tests/tracing_struct.c
index fe0fb0c9849a..19e68d4b3532 100644
--- a/tools/testing/selftests/bpf/prog_tests/tracing_struct.c
+++ b/tools/testing/selftests/bpf/prog_tests/tracing_struct.c
@@ -3,8 +3,9 @@
#include <test_progs.h>
#include "tracing_struct.skel.h"
+#include "tracing_struct_many_args.skel.h"
-static void test_fentry(void)
+static void test_struct_args(void)
{
struct tracing_struct *skel;
int err;
@@ -55,6 +56,25 @@ static void test_fentry(void)
ASSERT_EQ(skel->bss->t6, 1, "t6 ret");
+destroy_skel:
+ tracing_struct__destroy(skel);
+}
+
+static void test_struct_many_args(void)
+{
+ struct tracing_struct_many_args *skel;
+ int err;
+
+ skel = tracing_struct_many_args__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "tracing_struct_many_args__open_and_load"))
+ return;
+
+ err = tracing_struct_many_args__attach(skel);
+ if (!ASSERT_OK(err, "tracing_struct_many_args__attach"))
+ goto destroy_skel;
+
+ ASSERT_OK(trigger_module_test_read(256), "trigger_read");
+
ASSERT_EQ(skel->bss->t7_a, 16, "t7:a");
ASSERT_EQ(skel->bss->t7_b, 17, "t7:b");
ASSERT_EQ(skel->bss->t7_c, 18, "t7:c");
@@ -74,12 +94,28 @@ static void test_fentry(void)
ASSERT_EQ(skel->bss->t8_g, 23, "t8:g");
ASSERT_EQ(skel->bss->t8_ret, 156, "t8 ret");
- tracing_struct__detach(skel);
+ ASSERT_EQ(skel->bss->t9_a, 16, "t9:a");
+ ASSERT_EQ(skel->bss->t9_b, 17, "t9:b");
+ ASSERT_EQ(skel->bss->t9_c, 18, "t9:c");
+ ASSERT_EQ(skel->bss->t9_d, 19, "t9:d");
+ ASSERT_EQ(skel->bss->t9_e, 20, "t9:e");
+ ASSERT_EQ(skel->bss->t9_f, 21, "t9:f");
+ ASSERT_EQ(skel->bss->t9_g, 22, "t9:f");
+ ASSERT_EQ(skel->bss->t9_h_a, 23, "t9:h.a");
+ ASSERT_EQ(skel->bss->t9_h_b, 24, "t9:h.b");
+ ASSERT_EQ(skel->bss->t9_h_c, 25, "t9:h.c");
+ ASSERT_EQ(skel->bss->t9_h_d, 26, "t9:h.d");
+ ASSERT_EQ(skel->bss->t9_i, 27, "t9:i");
+ ASSERT_EQ(skel->bss->t9_ret, 258, "t9 ret");
+
destroy_skel:
- tracing_struct__destroy(skel);
+ tracing_struct_many_args__destroy(skel);
}
void test_tracing_struct(void)
{
- test_fentry();
+ if (test__start_subtest("struct_args"))
+ test_struct_args();
+ if (test__start_subtest("struct_many_args"))
+ test_struct_many_args();
}
diff --git a/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c b/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c
index 8269cdee33ae..bf6ca8e3eb13 100644
--- a/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c
+++ b/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c
@@ -1,12 +1,14 @@
// SPDX-License-Identifier: GPL-2.0
#include <unistd.h>
+#include <pthread.h>
#include <test_progs.h>
#include "uprobe_multi.skel.h"
#include "uprobe_multi_bench.skel.h"
#include "uprobe_multi_usdt.skel.h"
#include "bpf/libbpf_internal.h"
#include "testing_helpers.h"
+#include "../sdt.h"
static char test_data[] = "test_data";
@@ -25,9 +27,17 @@ noinline void uprobe_multi_func_3(void)
asm volatile ("");
}
+noinline void usdt_trigger(void)
+{
+ STAP_PROBE(test, pid_filter_usdt);
+}
+
struct child {
int go[2];
+ int c2p[2]; /* child -> parent channel */
int pid;
+ int tid;
+ pthread_t thread;
};
static void release_child(struct child *child)
@@ -38,6 +48,10 @@ static void release_child(struct child *child)
return;
close(child->go[1]);
close(child->go[0]);
+ if (child->thread)
+ pthread_join(child->thread, NULL);
+ close(child->c2p[0]);
+ close(child->c2p[1]);
if (child->pid > 0)
waitpid(child->pid, &child_status, 0);
}
@@ -63,7 +77,7 @@ static struct child *spawn_child(void)
if (pipe(child.go))
return NULL;
- child.pid = fork();
+ child.pid = child.tid = fork();
if (child.pid < 0) {
release_child(&child);
errno = EINVAL;
@@ -82,6 +96,7 @@ static struct child *spawn_child(void)
uprobe_multi_func_1();
uprobe_multi_func_2();
uprobe_multi_func_3();
+ usdt_trigger();
exit(errno);
}
@@ -89,6 +104,67 @@ static struct child *spawn_child(void)
return &child;
}
+static void *child_thread(void *ctx)
+{
+ struct child *child = ctx;
+ int c = 0, err;
+
+ child->tid = syscall(SYS_gettid);
+
+ /* let parent know we are ready */
+ err = write(child->c2p[1], &c, 1);
+ if (err != 1)
+ pthread_exit(&err);
+
+ /* wait for parent's kick */
+ err = read(child->go[0], &c, 1);
+ if (err != 1)
+ pthread_exit(&err);
+
+ uprobe_multi_func_1();
+ uprobe_multi_func_2();
+ uprobe_multi_func_3();
+ usdt_trigger();
+
+ err = 0;
+ pthread_exit(&err);
+}
+
+static struct child *spawn_thread(void)
+{
+ static struct child child;
+ int c, err;
+
+ /* pipe to notify child to execute the trigger functions */
+ if (pipe(child.go))
+ return NULL;
+ /* pipe to notify parent that child thread is ready */
+ if (pipe(child.c2p)) {
+ close(child.go[0]);
+ close(child.go[1]);
+ return NULL;
+ }
+
+ child.pid = getpid();
+
+ err = pthread_create(&child.thread, NULL, child_thread, &child);
+ if (err) {
+ err = -errno;
+ close(child.go[0]);
+ close(child.go[1]);
+ close(child.c2p[0]);
+ close(child.c2p[1]);
+ errno = -err;
+ return NULL;
+ }
+
+ err = read(child.c2p[0], &c, 1);
+ if (!ASSERT_EQ(err, 1, "child_thread_ready"))
+ return NULL;
+
+ return &child;
+}
+
static void uprobe_multi_test_run(struct uprobe_multi *skel, struct child *child)
{
skel->bss->uprobe_multi_func_1_addr = (__u64) uprobe_multi_func_1;
@@ -103,15 +179,23 @@ static void uprobe_multi_test_run(struct uprobe_multi *skel, struct child *child
* passed at the probe attach.
*/
skel->bss->pid = child ? 0 : getpid();
+ skel->bss->expect_pid = child ? child->pid : 0;
+
+ /* trigger all probes, if we are testing child *process*, just to make
+ * sure that PID filtering doesn't let through activations from wrong
+ * PIDs; when we test child *thread*, we don't want to do this to
+ * avoid double counting number of triggering events
+ */
+ if (!child || !child->thread) {
+ uprobe_multi_func_1();
+ uprobe_multi_func_2();
+ uprobe_multi_func_3();
+ usdt_trigger();
+ }
if (child)
kick_child(child);
- /* trigger all probes */
- uprobe_multi_func_1();
- uprobe_multi_func_2();
- uprobe_multi_func_3();
-
/*
* There are 2 entry and 2 exit probe called for each uprobe_multi_func_[123]
* function and each slepable probe (6) increments uprobe_multi_sleep_result.
@@ -126,8 +210,12 @@ static void uprobe_multi_test_run(struct uprobe_multi *skel, struct child *child
ASSERT_EQ(skel->bss->uprobe_multi_sleep_result, 6, "uprobe_multi_sleep_result");
- if (child)
+ ASSERT_FALSE(skel->bss->bad_pid_seen, "bad_pid_seen");
+
+ if (child) {
ASSERT_EQ(skel->bss->child_pid, child->pid, "uprobe_multi_child_pid");
+ ASSERT_EQ(skel->bss->child_tid, child->tid, "uprobe_multi_child_tid");
+ }
}
static void test_skel_api(void)
@@ -190,8 +278,24 @@ __test_attach_api(const char *binary, const char *pattern, struct bpf_uprobe_mul
if (!ASSERT_OK_PTR(skel->links.uprobe_extra, "bpf_program__attach_uprobe_multi"))
goto cleanup;
+ /* Attach (uprobe-backed) USDTs */
+ skel->links.usdt_pid = bpf_program__attach_usdt(skel->progs.usdt_pid, pid, binary,
+ "test", "pid_filter_usdt", NULL);
+ if (!ASSERT_OK_PTR(skel->links.usdt_pid, "attach_usdt_pid"))
+ goto cleanup;
+
+ skel->links.usdt_extra = bpf_program__attach_usdt(skel->progs.usdt_extra, -1, binary,
+ "test", "pid_filter_usdt", NULL);
+ if (!ASSERT_OK_PTR(skel->links.usdt_extra, "attach_usdt_extra"))
+ goto cleanup;
+
uprobe_multi_test_run(skel, child);
+ ASSERT_FALSE(skel->bss->bad_pid_seen_usdt, "bad_pid_seen_usdt");
+ if (child) {
+ ASSERT_EQ(skel->bss->child_pid_usdt, child->pid, "usdt_multi_child_pid");
+ ASSERT_EQ(skel->bss->child_tid_usdt, child->tid, "usdt_multi_child_tid");
+ }
cleanup:
uprobe_multi__destroy(skel);
}
@@ -210,6 +314,13 @@ test_attach_api(const char *binary, const char *pattern, struct bpf_uprobe_multi
return;
__test_attach_api(binary, pattern, opts, child);
+
+ /* pid filter (thread) */
+ child = spawn_thread();
+ if (!ASSERT_OK_PTR(child, "spawn_thread"))
+ return;
+
+ __test_attach_api(binary, pattern, opts, child);
}
static void test_attach_api_pattern(void)
@@ -397,7 +508,7 @@ static void test_attach_api_fails(void)
link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
if (!ASSERT_ERR(link_fd, "link_fd"))
goto cleanup;
- ASSERT_EQ(link_fd, -ESRCH, "pid_is_wrong");
+ ASSERT_EQ(link_fd, -EINVAL, "pid_is_wrong");
cleanup:
if (link_fd >= 0)
@@ -495,6 +606,13 @@ static void test_link_api(void)
return;
__test_link_api(child);
+
+ /* pid filter (thread) */
+ child = spawn_thread();
+ if (!ASSERT_OK_PTR(child, "spawn_thread"))
+ return;
+
+ __test_link_api(child);
}
static void test_bench_attach_uprobe(void)
diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c
index c60db8beeb73..9dc3687bc406 100644
--- a/tools/testing/selftests/bpf/prog_tests/verifier.c
+++ b/tools/testing/selftests/bpf/prog_tests/verifier.c
@@ -53,6 +53,7 @@
#include "verifier_movsx.skel.h"
#include "verifier_netfilter_ctx.skel.h"
#include "verifier_netfilter_retcode.skel.h"
+#include "verifier_or_jmp32_k.skel.h"
#include "verifier_precision.skel.h"
#include "verifier_prevent_map_lookup.skel.h"
#include "verifier_raw_stack.skel.h"
@@ -67,6 +68,7 @@
#include "verifier_search_pruning.skel.h"
#include "verifier_sock.skel.h"
#include "verifier_sock_addr.skel.h"
+#include "verifier_sockmap_mutate.skel.h"
#include "verifier_spill_fill.skel.h"
#include "verifier_spin_lock.skel.h"
#include "verifier_stack_ptr.skel.h"
@@ -85,6 +87,7 @@
#include "verifier_xadd.skel.h"
#include "verifier_xdp.skel.h"
#include "verifier_xdp_direct_packet_access.skel.h"
+#include "verifier_bits_iter.skel.h"
#define MAX_ENTRIES 11
@@ -169,6 +172,7 @@ void test_verifier_meta_access(void) { RUN(verifier_meta_access); }
void test_verifier_movsx(void) { RUN(verifier_movsx); }
void test_verifier_netfilter_ctx(void) { RUN(verifier_netfilter_ctx); }
void test_verifier_netfilter_retcode(void) { RUN(verifier_netfilter_retcode); }
+void test_verifier_or_jmp32_k(void) { RUN(verifier_or_jmp32_k); }
void test_verifier_precision(void) { RUN(verifier_precision); }
void test_verifier_prevent_map_lookup(void) { RUN(verifier_prevent_map_lookup); }
void test_verifier_raw_stack(void) { RUN(verifier_raw_stack); }
@@ -183,6 +187,7 @@ void test_verifier_sdiv(void) { RUN(verifier_sdiv); }
void test_verifier_search_pruning(void) { RUN(verifier_search_pruning); }
void test_verifier_sock(void) { RUN(verifier_sock); }
void test_verifier_sock_addr(void) { RUN(verifier_sock_addr); }
+void test_verifier_sockmap_mutate(void) { RUN(verifier_sockmap_mutate); }
void test_verifier_spill_fill(void) { RUN(verifier_spill_fill); }
void test_verifier_spin_lock(void) { RUN(verifier_spin_lock); }
void test_verifier_stack_ptr(void) { RUN(verifier_stack_ptr); }
@@ -200,6 +205,7 @@ void test_verifier_var_off(void) { RUN(verifier_var_off); }
void test_verifier_xadd(void) { RUN(verifier_xadd); }
void test_verifier_xdp(void) { RUN(verifier_xdp); }
void test_verifier_xdp_direct_packet_access(void) { RUN(verifier_xdp_direct_packet_access); }
+void test_verifier_bits_iter(void) { RUN(verifier_bits_iter); }
static int init_test_val_map(struct bpf_object *obj, char *map_name)
{
diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c b/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c
index f09505f8b038..53d6ad8c2257 100644
--- a/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c
+++ b/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c
@@ -222,7 +222,7 @@ static void test_xdp_adjust_frags_tail_grow(void)
prog = bpf_object__next_program(obj, NULL);
if (bpf_object__load(obj))
- return;
+ goto out;
prog_fd = bpf_program__fd(prog);
diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_flowtable.c b/tools/testing/selftests/bpf/prog_tests/xdp_flowtable.c
new file mode 100644
index 000000000000..e1bf141d3401
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/xdp_flowtable.c
@@ -0,0 +1,168 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+#include <network_helpers.h>
+#include <bpf/btf.h>
+#include <linux/if_link.h>
+#include <linux/udp.h>
+#include <net/if.h>
+#include <unistd.h>
+
+#include "xdp_flowtable.skel.h"
+
+#define TX_NETNS_NAME "ns0"
+#define RX_NETNS_NAME "ns1"
+
+#define TX_NAME "v0"
+#define FORWARD_NAME "v1"
+#define RX_NAME "d0"
+
+#define TX_MAC "00:00:00:00:00:01"
+#define FORWARD_MAC "00:00:00:00:00:02"
+#define RX_MAC "00:00:00:00:00:03"
+#define DST_MAC "00:00:00:00:00:04"
+
+#define TX_ADDR "10.0.0.1"
+#define FORWARD_ADDR "10.0.0.2"
+#define RX_ADDR "20.0.0.1"
+#define DST_ADDR "20.0.0.2"
+
+#define PREFIX_LEN "8"
+#define N_PACKETS 10
+#define UDP_PORT 12345
+#define UDP_PORT_STR "12345"
+
+static int send_udp_traffic(void)
+{
+ struct sockaddr_storage addr;
+ int i, sock;
+
+ if (make_sockaddr(AF_INET, DST_ADDR, UDP_PORT, &addr, NULL))
+ return -EINVAL;
+
+ sock = socket(AF_INET, SOCK_DGRAM, 0);
+ if (sock < 0)
+ return sock;
+
+ for (i = 0; i < N_PACKETS; i++) {
+ unsigned char buf[] = { 0xaa, 0xbb, 0xcc };
+ int n;
+
+ n = sendto(sock, buf, sizeof(buf), MSG_NOSIGNAL | MSG_CONFIRM,
+ (struct sockaddr *)&addr, sizeof(addr));
+ if (n != sizeof(buf)) {
+ close(sock);
+ return -EINVAL;
+ }
+
+ usleep(50000); /* 50ms */
+ }
+ close(sock);
+
+ return 0;
+}
+
+void test_xdp_flowtable(void)
+{
+ struct xdp_flowtable *skel = NULL;
+ struct nstoken *tok = NULL;
+ int iifindex, stats_fd;
+ __u32 value, key = 0;
+ struct bpf_link *link;
+
+ if (SYS_NOFAIL("nft -v")) {
+ fprintf(stdout, "Missing required nft tool\n");
+ test__skip();
+ return;
+ }
+
+ SYS(out, "ip netns add " TX_NETNS_NAME);
+ SYS(out, "ip netns add " RX_NETNS_NAME);
+
+ tok = open_netns(RX_NETNS_NAME);
+ if (!ASSERT_OK_PTR(tok, "setns"))
+ goto out;
+
+ SYS(out, "sysctl -qw net.ipv4.conf.all.forwarding=1");
+
+ SYS(out, "ip link add " TX_NAME " type veth peer " FORWARD_NAME);
+ SYS(out, "ip link set " TX_NAME " netns " TX_NETNS_NAME);
+ SYS(out, "ip link set dev " FORWARD_NAME " address " FORWARD_MAC);
+ SYS(out,
+ "ip addr add " FORWARD_ADDR "/" PREFIX_LEN " dev " FORWARD_NAME);
+ SYS(out, "ip link set dev " FORWARD_NAME " up");
+
+ SYS(out, "ip link add " RX_NAME " type dummy");
+ SYS(out, "ip link set dev " RX_NAME " address " RX_MAC);
+ SYS(out, "ip addr add " RX_ADDR "/" PREFIX_LEN " dev " RX_NAME);
+ SYS(out, "ip link set dev " RX_NAME " up");
+
+ /* configure the flowtable */
+ SYS(out, "nft add table ip filter");
+ SYS(out,
+ "nft add flowtable ip filter f { hook ingress priority 0\\; "
+ "devices = { " FORWARD_NAME ", " RX_NAME " }\\; }");
+ SYS(out,
+ "nft add chain ip filter forward "
+ "{ type filter hook forward priority 0\\; }");
+ SYS(out,
+ "nft add rule ip filter forward ip protocol udp th dport "
+ UDP_PORT_STR " flow add @f");
+
+ /* Avoid ARP calls */
+ SYS(out,
+ "ip -4 neigh add " DST_ADDR " lladdr " DST_MAC " dev " RX_NAME);
+
+ close_netns(tok);
+ tok = open_netns(TX_NETNS_NAME);
+ if (!ASSERT_OK_PTR(tok, "setns"))
+ goto out;
+
+ SYS(out, "ip addr add " TX_ADDR "/" PREFIX_LEN " dev " TX_NAME);
+ SYS(out, "ip link set dev " TX_NAME " address " TX_MAC);
+ SYS(out, "ip link set dev " TX_NAME " up");
+ SYS(out, "ip route add default via " FORWARD_ADDR);
+
+ close_netns(tok);
+ tok = open_netns(RX_NETNS_NAME);
+ if (!ASSERT_OK_PTR(tok, "setns"))
+ goto out;
+
+ iifindex = if_nametoindex(FORWARD_NAME);
+ if (!ASSERT_NEQ(iifindex, 0, "iifindex"))
+ goto out;
+
+ skel = xdp_flowtable__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel"))
+ goto out;
+
+ link = bpf_program__attach_xdp(skel->progs.xdp_flowtable_do_lookup,
+ iifindex);
+ if (!ASSERT_OK_PTR(link, "prog_attach"))
+ goto out;
+
+ close_netns(tok);
+ tok = open_netns(TX_NETNS_NAME);
+ if (!ASSERT_OK_PTR(tok, "setns"))
+ goto out;
+
+ if (!ASSERT_OK(send_udp_traffic(), "send udp"))
+ goto out;
+
+ close_netns(tok);
+ tok = open_netns(RX_NETNS_NAME);
+ if (!ASSERT_OK_PTR(tok, "setns"))
+ goto out;
+
+ stats_fd = bpf_map__fd(skel->maps.stats);
+ if (!ASSERT_OK(bpf_map_lookup_elem(stats_fd, &key, &value),
+ "bpf_map_update_elem stats"))
+ goto out;
+
+ ASSERT_GE(value, N_PACKETS - 2, "bpf_xdp_flow_lookup failed");
+out:
+ xdp_flowtable__destroy(skel);
+ if (tok)
+ close_netns(tok);
+ SYS_NOFAIL("ip netns del " TX_NETNS_NAME);
+ SYS_NOFAIL("ip netns del " RX_NETNS_NAME);
+}
diff --git a/tools/testing/selftests/bpf/progs/arena_atomics.c b/tools/testing/selftests/bpf/progs/arena_atomics.c
index 55f10563208d..bb0acd79d28a 100644
--- a/tools/testing/selftests/bpf/progs/arena_atomics.c
+++ b/tools/testing/selftests/bpf/progs/arena_atomics.c
@@ -25,20 +25,13 @@ bool skip_tests = true;
__u32 pid = 0;
-#undef __arena
-#if defined(__BPF_FEATURE_ADDR_SPACE_CAST)
-#define __arena __attribute__((address_space(1)))
-#else
-#define __arena SEC(".addr_space.1")
-#endif
-
-__u64 __arena add64_value = 1;
-__u64 __arena add64_result = 0;
-__u32 __arena add32_value = 1;
-__u32 __arena add32_result = 0;
-__u64 __arena add_stack_value_copy = 0;
-__u64 __arena add_stack_result = 0;
-__u64 __arena add_noreturn_value = 1;
+__u64 __arena_global add64_value = 1;
+__u64 __arena_global add64_result = 0;
+__u32 __arena_global add32_value = 1;
+__u32 __arena_global add32_result = 0;
+__u64 __arena_global add_stack_value_copy = 0;
+__u64 __arena_global add_stack_result = 0;
+__u64 __arena_global add_noreturn_value = 1;
SEC("raw_tp/sys_enter")
int add(const void *ctx)
@@ -58,13 +51,13 @@ int add(const void *ctx)
return 0;
}
-__s64 __arena sub64_value = 1;
-__s64 __arena sub64_result = 0;
-__s32 __arena sub32_value = 1;
-__s32 __arena sub32_result = 0;
-__s64 __arena sub_stack_value_copy = 0;
-__s64 __arena sub_stack_result = 0;
-__s64 __arena sub_noreturn_value = 1;
+__s64 __arena_global sub64_value = 1;
+__s64 __arena_global sub64_result = 0;
+__s32 __arena_global sub32_value = 1;
+__s32 __arena_global sub32_result = 0;
+__s64 __arena_global sub_stack_value_copy = 0;
+__s64 __arena_global sub_stack_result = 0;
+__s64 __arena_global sub_noreturn_value = 1;
SEC("raw_tp/sys_enter")
int sub(const void *ctx)
@@ -84,8 +77,8 @@ int sub(const void *ctx)
return 0;
}
-__u64 __arena and64_value = (0x110ull << 32);
-__u32 __arena and32_value = 0x110;
+__u64 __arena_global and64_value = (0x110ull << 32);
+__u32 __arena_global and32_value = 0x110;
SEC("raw_tp/sys_enter")
int and(const void *ctx)
@@ -101,8 +94,8 @@ int and(const void *ctx)
return 0;
}
-__u32 __arena or32_value = 0x110;
-__u64 __arena or64_value = (0x110ull << 32);
+__u32 __arena_global or32_value = 0x110;
+__u64 __arena_global or64_value = (0x110ull << 32);
SEC("raw_tp/sys_enter")
int or(const void *ctx)
@@ -117,8 +110,8 @@ int or(const void *ctx)
return 0;
}
-__u64 __arena xor64_value = (0x110ull << 32);
-__u32 __arena xor32_value = 0x110;
+__u64 __arena_global xor64_value = (0x110ull << 32);
+__u32 __arena_global xor32_value = 0x110;
SEC("raw_tp/sys_enter")
int xor(const void *ctx)
@@ -133,12 +126,12 @@ int xor(const void *ctx)
return 0;
}
-__u32 __arena cmpxchg32_value = 1;
-__u32 __arena cmpxchg32_result_fail = 0;
-__u32 __arena cmpxchg32_result_succeed = 0;
-__u64 __arena cmpxchg64_value = 1;
-__u64 __arena cmpxchg64_result_fail = 0;
-__u64 __arena cmpxchg64_result_succeed = 0;
+__u32 __arena_global cmpxchg32_value = 1;
+__u32 __arena_global cmpxchg32_result_fail = 0;
+__u32 __arena_global cmpxchg32_result_succeed = 0;
+__u64 __arena_global cmpxchg64_value = 1;
+__u64 __arena_global cmpxchg64_result_fail = 0;
+__u64 __arena_global cmpxchg64_result_succeed = 0;
SEC("raw_tp/sys_enter")
int cmpxchg(const void *ctx)
@@ -156,10 +149,10 @@ int cmpxchg(const void *ctx)
return 0;
}
-__u64 __arena xchg64_value = 1;
-__u64 __arena xchg64_result = 0;
-__u32 __arena xchg32_value = 1;
-__u32 __arena xchg32_result = 0;
+__u64 __arena_global xchg64_value = 1;
+__u64 __arena_global xchg64_result = 0;
+__u32 __arena_global xchg32_value = 1;
+__u32 __arena_global xchg32_result = 0;
SEC("raw_tp/sys_enter")
int xchg(const void *ctx)
@@ -176,3 +169,79 @@ int xchg(const void *ctx)
return 0;
}
+
+__u64 __arena_global uaf_sink;
+volatile __u64 __arena_global uaf_recovery_fails;
+
+SEC("syscall")
+int uaf(const void *ctx)
+{
+ if (pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+#if defined(ENABLE_ATOMICS_TESTS) && !defined(__TARGET_ARCH_arm64) && \
+ !defined(__TARGET_ARCH_x86)
+ __u32 __arena *page32;
+ __u64 __arena *page64;
+ void __arena *page;
+
+ page = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0);
+ bpf_arena_free_pages(&arena, page, 1);
+ uaf_recovery_fails = 24;
+
+ page32 = (__u32 __arena *)page;
+ uaf_sink += __sync_fetch_and_add(page32, 1);
+ uaf_recovery_fails -= 1;
+ __sync_add_and_fetch(page32, 1);
+ uaf_recovery_fails -= 1;
+ uaf_sink += __sync_fetch_and_sub(page32, 1);
+ uaf_recovery_fails -= 1;
+ __sync_sub_and_fetch(page32, 1);
+ uaf_recovery_fails -= 1;
+ uaf_sink += __sync_fetch_and_and(page32, 1);
+ uaf_recovery_fails -= 1;
+ __sync_and_and_fetch(page32, 1);
+ uaf_recovery_fails -= 1;
+ uaf_sink += __sync_fetch_and_or(page32, 1);
+ uaf_recovery_fails -= 1;
+ __sync_or_and_fetch(page32, 1);
+ uaf_recovery_fails -= 1;
+ uaf_sink += __sync_fetch_and_xor(page32, 1);
+ uaf_recovery_fails -= 1;
+ __sync_xor_and_fetch(page32, 1);
+ uaf_recovery_fails -= 1;
+ uaf_sink += __sync_val_compare_and_swap(page32, 0, 1);
+ uaf_recovery_fails -= 1;
+ uaf_sink += __sync_lock_test_and_set(page32, 1);
+ uaf_recovery_fails -= 1;
+
+ page64 = (__u64 __arena *)page;
+ uaf_sink += __sync_fetch_and_add(page64, 1);
+ uaf_recovery_fails -= 1;
+ __sync_add_and_fetch(page64, 1);
+ uaf_recovery_fails -= 1;
+ uaf_sink += __sync_fetch_and_sub(page64, 1);
+ uaf_recovery_fails -= 1;
+ __sync_sub_and_fetch(page64, 1);
+ uaf_recovery_fails -= 1;
+ uaf_sink += __sync_fetch_and_and(page64, 1);
+ uaf_recovery_fails -= 1;
+ __sync_and_and_fetch(page64, 1);
+ uaf_recovery_fails -= 1;
+ uaf_sink += __sync_fetch_and_or(page64, 1);
+ uaf_recovery_fails -= 1;
+ __sync_or_and_fetch(page64, 1);
+ uaf_recovery_fails -= 1;
+ uaf_sink += __sync_fetch_and_xor(page64, 1);
+ uaf_recovery_fails -= 1;
+ __sync_xor_and_fetch(page64, 1);
+ uaf_recovery_fails -= 1;
+ uaf_sink += __sync_val_compare_and_swap(page64, 0, 1);
+ uaf_recovery_fails -= 1;
+ uaf_sink += __sync_lock_test_and_set(page64, 1);
+ uaf_recovery_fails -= 1;
+#endif
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/arena_htab.c b/tools/testing/selftests/bpf/progs/arena_htab.c
index 1e6ac187a6a0..81eaa94afeb0 100644
--- a/tools/testing/selftests/bpf/progs/arena_htab.c
+++ b/tools/testing/selftests/bpf/progs/arena_htab.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#define BPF_NO_KFUNC_PROTOTYPES
#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
@@ -18,25 +19,35 @@ void __arena *htab_for_user;
bool skip = false;
int zero = 0;
+char __arena arr1[100000];
+char arr2[1000];
SEC("syscall")
int arena_htab_llvm(void *ctx)
{
#if defined(__BPF_FEATURE_ADDR_SPACE_CAST) || defined(BPF_ARENA_FORCE_ASM)
struct htab __arena *htab;
+ char __arena *arr = arr1;
__u64 i;
htab = bpf_alloc(sizeof(*htab));
cast_kern(htab);
htab_init(htab);
+ cast_kern(arr);
+
/* first run. No old elems in the table */
- for (i = zero; i < 1000; i++)
+ for (i = zero; i < 100000 && can_loop; i++) {
htab_update_elem(htab, i, i);
+ arr[i] = i;
+ }
- /* should replace all elems with new ones */
- for (i = zero; i < 1000; i++)
+ /* should replace some elems with new ones */
+ for (i = zero; i < 1000 && can_loop; i++) {
htab_update_elem(htab, i, i);
+ /* Access mem to make the verifier use bounded loop logic */
+ arr2[i] = i;
+ }
cast_user(htab);
htab_for_user = htab;
#else
diff --git a/tools/testing/selftests/bpf/progs/arena_list.c b/tools/testing/selftests/bpf/progs/arena_list.c
index 93bd0600eba0..3a2ddcacbea6 100644
--- a/tools/testing/selftests/bpf/progs/arena_list.c
+++ b/tools/testing/selftests/bpf/progs/arena_list.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#define BPF_NO_KFUNC_PROTOTYPES
#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
diff --git a/tools/testing/selftests/bpf/progs/bpf_dctcp.c b/tools/testing/selftests/bpf/progs/bpf_dctcp.c
index 3c9ffe340312..02f552e7fd4d 100644
--- a/tools/testing/selftests/bpf/progs/bpf_dctcp.c
+++ b/tools/testing/selftests/bpf/progs/bpf_dctcp.c
@@ -65,7 +65,7 @@ static void dctcp_reset(const struct tcp_sock *tp, struct bpf_dctcp *ca)
}
SEC("struct_ops")
-void BPF_PROG(dctcp_init, struct sock *sk)
+void BPF_PROG(bpf_dctcp_init, struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
struct bpf_dctcp *ca = inet_csk_ca(sk);
@@ -77,7 +77,7 @@ void BPF_PROG(dctcp_init, struct sock *sk)
(void *)fallback, sizeof(fallback)) == -EBUSY)
ebusy_cnt++;
- /* Switch back to myself and the recurred dctcp_init()
+ /* Switch back to myself and the recurred bpf_dctcp_init()
* will get -EBUSY for all bpf_setsockopt(TCP_CONGESTION),
* except the last "cdg" one.
*/
@@ -112,7 +112,7 @@ void BPF_PROG(dctcp_init, struct sock *sk)
}
SEC("struct_ops")
-__u32 BPF_PROG(dctcp_ssthresh, struct sock *sk)
+__u32 BPF_PROG(bpf_dctcp_ssthresh, struct sock *sk)
{
struct bpf_dctcp *ca = inet_csk_ca(sk);
struct tcp_sock *tp = tcp_sk(sk);
@@ -122,7 +122,7 @@ __u32 BPF_PROG(dctcp_ssthresh, struct sock *sk)
}
SEC("struct_ops")
-void BPF_PROG(dctcp_update_alpha, struct sock *sk, __u32 flags)
+void BPF_PROG(bpf_dctcp_update_alpha, struct sock *sk, __u32 flags)
{
const struct tcp_sock *tp = tcp_sk(sk);
struct bpf_dctcp *ca = inet_csk_ca(sk);
@@ -161,12 +161,12 @@ static void dctcp_react_to_loss(struct sock *sk)
}
SEC("struct_ops")
-void BPF_PROG(dctcp_state, struct sock *sk, __u8 new_state)
+void BPF_PROG(bpf_dctcp_state, struct sock *sk, __u8 new_state)
{
if (new_state == TCP_CA_Recovery &&
new_state != BPF_CORE_READ_BITFIELD(inet_csk(sk), icsk_ca_state))
dctcp_react_to_loss(sk);
- /* We handle RTO in dctcp_cwnd_event to ensure that we perform only
+ /* We handle RTO in bpf_dctcp_cwnd_event to ensure that we perform only
* one loss-adjustment per RTT.
*/
}
@@ -208,7 +208,7 @@ static void dctcp_ece_ack_update(struct sock *sk, enum tcp_ca_event evt,
}
SEC("struct_ops")
-void BPF_PROG(dctcp_cwnd_event, struct sock *sk, enum tcp_ca_event ev)
+void BPF_PROG(bpf_dctcp_cwnd_event, struct sock *sk, enum tcp_ca_event ev)
{
struct bpf_dctcp *ca = inet_csk_ca(sk);
@@ -227,7 +227,7 @@ void BPF_PROG(dctcp_cwnd_event, struct sock *sk, enum tcp_ca_event ev)
}
SEC("struct_ops")
-__u32 BPF_PROG(dctcp_cwnd_undo, struct sock *sk)
+__u32 BPF_PROG(bpf_dctcp_cwnd_undo, struct sock *sk)
{
const struct bpf_dctcp *ca = inet_csk_ca(sk);
@@ -237,28 +237,28 @@ __u32 BPF_PROG(dctcp_cwnd_undo, struct sock *sk)
extern void tcp_reno_cong_avoid(struct sock *sk, __u32 ack, __u32 acked) __ksym;
SEC("struct_ops")
-void BPF_PROG(dctcp_cong_avoid, struct sock *sk, __u32 ack, __u32 acked)
+void BPF_PROG(bpf_dctcp_cong_avoid, struct sock *sk, __u32 ack, __u32 acked)
{
tcp_reno_cong_avoid(sk, ack, acked);
}
SEC(".struct_ops")
struct tcp_congestion_ops dctcp_nouse = {
- .init = (void *)dctcp_init,
- .set_state = (void *)dctcp_state,
+ .init = (void *)bpf_dctcp_init,
+ .set_state = (void *)bpf_dctcp_state,
.flags = TCP_CONG_NEEDS_ECN,
.name = "bpf_dctcp_nouse",
};
SEC(".struct_ops")
struct tcp_congestion_ops dctcp = {
- .init = (void *)dctcp_init,
- .in_ack_event = (void *)dctcp_update_alpha,
- .cwnd_event = (void *)dctcp_cwnd_event,
- .ssthresh = (void *)dctcp_ssthresh,
- .cong_avoid = (void *)dctcp_cong_avoid,
- .undo_cwnd = (void *)dctcp_cwnd_undo,
- .set_state = (void *)dctcp_state,
+ .init = (void *)bpf_dctcp_init,
+ .in_ack_event = (void *)bpf_dctcp_update_alpha,
+ .cwnd_event = (void *)bpf_dctcp_cwnd_event,
+ .ssthresh = (void *)bpf_dctcp_ssthresh,
+ .cong_avoid = (void *)bpf_dctcp_cong_avoid,
+ .undo_cwnd = (void *)bpf_dctcp_cwnd_undo,
+ .set_state = (void *)bpf_dctcp_state,
.flags = TCP_CONG_NEEDS_ECN,
.name = "bpf_dctcp",
};
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_array_map.c b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_array_map.c
index c5969ca6f26b..564835ba7d51 100644
--- a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_array_map.c
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_array_map.c
@@ -6,12 +6,6 @@
char _license[] SEC("license") = "GPL";
-struct key_t {
- int a;
- int b;
- int c;
-};
-
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 3);
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_percpu_array_map.c b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_percpu_array_map.c
index 85fa710fad90..9f0e0705b2bf 100644
--- a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_percpu_array_map.c
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_percpu_array_map.c
@@ -6,12 +6,6 @@
char _license[] SEC("license") = "GPL";
-struct key_t {
- int a;
- int b;
- int c;
-};
-
struct {
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
__uint(max_entries, 3);
diff --git a/tools/testing/selftests/bpf/progs/bpf_misc.h b/tools/testing/selftests/bpf/progs/bpf_misc.h
index fb2f5513e29e..81097a3f15eb 100644
--- a/tools/testing/selftests/bpf/progs/bpf_misc.h
+++ b/tools/testing/selftests/bpf/progs/bpf_misc.h
@@ -7,9 +7,9 @@
*
* The test_loader sequentially loads each program in a skeleton.
* Programs could be loaded in privileged and unprivileged modes.
- * - __success, __failure, __msg imply privileged mode;
- * - __success_unpriv, __failure_unpriv, __msg_unpriv imply
- * unprivileged mode.
+ * - __success, __failure, __msg, __regex imply privileged mode;
+ * - __success_unpriv, __failure_unpriv, __msg_unpriv, __regex_unpriv
+ * imply unprivileged mode.
* If combination of privileged and unprivileged attributes is present
* both modes are used. If none are present privileged mode is implied.
*
@@ -24,6 +24,9 @@
* Multiple __msg attributes could be specified.
* __msg_unpriv Same as __msg but for unprivileged mode.
*
+ * __regex Same as __msg, but using a regular expression.
+ * __regex_unpriv Same as __msg_unpriv but using a regular expression.
+ *
* __success Expect program load success in privileged mode.
* __success_unpriv Expect program load success in unprivileged mode.
*
@@ -59,10 +62,12 @@
* __auxiliary_unpriv Same, but load program in unprivileged mode.
*/
#define __msg(msg) __attribute__((btf_decl_tag("comment:test_expect_msg=" msg)))
+#define __regex(regex) __attribute__((btf_decl_tag("comment:test_expect_regex=" regex)))
#define __failure __attribute__((btf_decl_tag("comment:test_expect_failure")))
#define __success __attribute__((btf_decl_tag("comment:test_expect_success")))
#define __description(desc) __attribute__((btf_decl_tag("comment:test_description=" desc)))
#define __msg_unpriv(msg) __attribute__((btf_decl_tag("comment:test_expect_msg_unpriv=" msg)))
+#define __regex_unpriv(regex) __attribute__((btf_decl_tag("comment:test_expect_regex_unpriv=" regex)))
#define __failure_unpriv __attribute__((btf_decl_tag("comment:test_expect_failure_unpriv")))
#define __success_unpriv __attribute__((btf_decl_tag("comment:test_expect_success_unpriv")))
#define __log_level(lvl) __attribute__((btf_decl_tag("comment:test_log_level="#lvl)))
@@ -135,4 +140,8 @@
/* make it look to compiler like value is read and written */
#define __sink(expr) asm volatile("" : "+g"(expr))
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+#endif
+
#endif
diff --git a/tools/testing/selftests/bpf/progs/cpumask_success.c b/tools/testing/selftests/bpf/progs/cpumask_success.c
index 7a1e64c6c065..fd8106831c32 100644
--- a/tools/testing/selftests/bpf/progs/cpumask_success.c
+++ b/tools/testing/selftests/bpf/progs/cpumask_success.c
@@ -12,6 +12,31 @@ char _license[] SEC("license") = "GPL";
int pid, nr_cpus;
+struct kptr_nested {
+ struct bpf_cpumask __kptr * mask;
+};
+
+struct kptr_nested_pair {
+ struct bpf_cpumask __kptr * mask_1;
+ struct bpf_cpumask __kptr * mask_2;
+};
+
+struct kptr_nested_mid {
+ int dummy;
+ struct kptr_nested m;
+};
+
+struct kptr_nested_deep {
+ struct kptr_nested_mid ptrs[2];
+ struct kptr_nested_pair ptr_pairs[3];
+};
+
+private(MASK) static struct bpf_cpumask __kptr * global_mask_array[2];
+private(MASK) static struct bpf_cpumask __kptr * global_mask_array_l2[2][1];
+private(MASK) static struct bpf_cpumask __kptr * global_mask_array_one[1];
+private(MASK) static struct kptr_nested global_mask_nested[2];
+private(MASK_DEEP) static struct kptr_nested_deep global_mask_nested_deep;
+
static bool is_test_task(void)
{
int cur_pid = bpf_get_current_pid_tgid() >> 32;
@@ -461,6 +486,152 @@ int BPF_PROG(test_global_mask_rcu, struct task_struct *task, u64 clone_flags)
}
SEC("tp_btf/task_newtask")
+int BPF_PROG(test_global_mask_array_one_rcu, struct task_struct *task, u64 clone_flags)
+{
+ struct bpf_cpumask *local, *prev;
+
+ if (!is_test_task())
+ return 0;
+
+ /* Kptr arrays with one element are special cased, being treated
+ * just like a single pointer.
+ */
+
+ local = create_cpumask();
+ if (!local)
+ return 0;
+
+ prev = bpf_kptr_xchg(&global_mask_array_one[0], local);
+ if (prev) {
+ bpf_cpumask_release(prev);
+ err = 3;
+ return 0;
+ }
+
+ bpf_rcu_read_lock();
+ local = global_mask_array_one[0];
+ if (!local) {
+ err = 4;
+ bpf_rcu_read_unlock();
+ return 0;
+ }
+
+ bpf_rcu_read_unlock();
+
+ return 0;
+}
+
+static int _global_mask_array_rcu(struct bpf_cpumask **mask0,
+ struct bpf_cpumask **mask1)
+{
+ struct bpf_cpumask *local;
+
+ if (!is_test_task())
+ return 0;
+
+ /* Check if two kptrs in the array work and independently */
+
+ local = create_cpumask();
+ if (!local)
+ return 0;
+
+ bpf_rcu_read_lock();
+
+ local = bpf_kptr_xchg(mask0, local);
+ if (local) {
+ err = 1;
+ goto err_exit;
+ }
+
+ /* [<mask 0>, NULL] */
+ if (!*mask0 || *mask1) {
+ err = 2;
+ goto err_exit;
+ }
+
+ local = create_cpumask();
+ if (!local) {
+ err = 9;
+ goto err_exit;
+ }
+
+ local = bpf_kptr_xchg(mask1, local);
+ if (local) {
+ err = 10;
+ goto err_exit;
+ }
+
+ /* [<mask 0>, <mask 1>] */
+ if (!*mask0 || !*mask1 || *mask0 == *mask1) {
+ err = 11;
+ goto err_exit;
+ }
+
+err_exit:
+ if (local)
+ bpf_cpumask_release(local);
+ bpf_rcu_read_unlock();
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_global_mask_array_rcu, struct task_struct *task, u64 clone_flags)
+{
+ return _global_mask_array_rcu(&global_mask_array[0], &global_mask_array[1]);
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_global_mask_array_l2_rcu, struct task_struct *task, u64 clone_flags)
+{
+ return _global_mask_array_rcu(&global_mask_array_l2[0][0], &global_mask_array_l2[1][0]);
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_global_mask_nested_rcu, struct task_struct *task, u64 clone_flags)
+{
+ return _global_mask_array_rcu(&global_mask_nested[0].mask, &global_mask_nested[1].mask);
+}
+
+/* Ensure that the field->offset has been correctly advanced from one
+ * nested struct or array sub-tree to another. In the case of
+ * kptr_nested_deep, it comprises two sub-trees: ktpr_1 and kptr_2. By
+ * calling bpf_kptr_xchg() on every single kptr in both nested sub-trees,
+ * the verifier should reject the program if the field->offset of any kptr
+ * is incorrect.
+ *
+ * For instance, if we have 10 kptrs in a nested struct and a program that
+ * accesses each kptr individually with bpf_kptr_xchg(), the compiler
+ * should emit instructions to access 10 different offsets if it works
+ * correctly. If the field->offset values of any pair of them are
+ * incorrectly the same, the number of unique offsets in btf_record for
+ * this nested struct should be less than 10. The verifier should fail to
+ * discover some of the offsets emitted by the compiler.
+ *
+ * Even if the field->offset values of kptrs are not duplicated, the
+ * verifier should fail to find a btf_field for the instruction accessing a
+ * kptr if the corresponding field->offset is pointing to a random
+ * incorrect offset.
+ */
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_global_mask_nested_deep_rcu, struct task_struct *task, u64 clone_flags)
+{
+ int r, i;
+
+ r = _global_mask_array_rcu(&global_mask_nested_deep.ptrs[0].m.mask,
+ &global_mask_nested_deep.ptrs[1].m.mask);
+ if (r)
+ return r;
+
+ for (i = 0; i < 3; i++) {
+ r = _global_mask_array_rcu(&global_mask_nested_deep.ptr_pairs[i].mask_1,
+ &global_mask_nested_deep.ptr_pairs[i].mask_2);
+ if (r)
+ return r;
+ }
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
int BPF_PROG(test_cpumask_weight, struct task_struct *task, u64 clone_flags)
{
struct bpf_cpumask *local;
diff --git a/tools/testing/selftests/bpf/progs/crypto_bench.c b/tools/testing/selftests/bpf/progs/crypto_bench.c
index e61fe0882293..4ac956b26240 100644
--- a/tools/testing/selftests/bpf/progs/crypto_bench.c
+++ b/tools/testing/selftests/bpf/progs/crypto_bench.c
@@ -57,7 +57,7 @@ int crypto_encrypt(struct __sk_buff *skb)
{
struct __crypto_ctx_value *v;
struct bpf_crypto_ctx *ctx;
- struct bpf_dynptr psrc, pdst, iv;
+ struct bpf_dynptr psrc, pdst;
v = crypto_ctx_value_lookup();
if (!v) {
@@ -73,9 +73,8 @@ int crypto_encrypt(struct __sk_buff *skb)
bpf_dynptr_from_skb(skb, 0, &psrc);
bpf_dynptr_from_mem(dst, len, 0, &pdst);
- bpf_dynptr_from_mem(dst, 0, 0, &iv);
- status = bpf_crypto_encrypt(ctx, &psrc, &pdst, &iv);
+ status = bpf_crypto_encrypt(ctx, &psrc, &pdst, NULL);
__sync_add_and_fetch(&hits, 1);
return 0;
@@ -84,7 +83,7 @@ int crypto_encrypt(struct __sk_buff *skb)
SEC("tc")
int crypto_decrypt(struct __sk_buff *skb)
{
- struct bpf_dynptr psrc, pdst, iv;
+ struct bpf_dynptr psrc, pdst;
struct __crypto_ctx_value *v;
struct bpf_crypto_ctx *ctx;
@@ -98,9 +97,8 @@ int crypto_decrypt(struct __sk_buff *skb)
bpf_dynptr_from_skb(skb, 0, &psrc);
bpf_dynptr_from_mem(dst, len, 0, &pdst);
- bpf_dynptr_from_mem(dst, 0, 0, &iv);
- status = bpf_crypto_decrypt(ctx, &psrc, &pdst, &iv);
+ status = bpf_crypto_decrypt(ctx, &psrc, &pdst, NULL);
__sync_add_and_fetch(&hits, 1);
return 0;
diff --git a/tools/testing/selftests/bpf/progs/crypto_sanity.c b/tools/testing/selftests/bpf/progs/crypto_sanity.c
index 1be0a3fa5efd..645be6cddf36 100644
--- a/tools/testing/selftests/bpf/progs/crypto_sanity.c
+++ b/tools/testing/selftests/bpf/progs/crypto_sanity.c
@@ -89,7 +89,7 @@ int decrypt_sanity(struct __sk_buff *skb)
{
struct __crypto_ctx_value *v;
struct bpf_crypto_ctx *ctx;
- struct bpf_dynptr psrc, pdst, iv;
+ struct bpf_dynptr psrc, pdst;
int err;
err = skb_dynptr_validate(skb, &psrc);
@@ -114,12 +114,8 @@ int decrypt_sanity(struct __sk_buff *skb)
* production code, a percpu map should be used to store the result.
*/
bpf_dynptr_from_mem(dst, sizeof(dst), 0, &pdst);
- /* iv dynptr has to be initialized with 0 size, but proper memory region
- * has to be provided anyway
- */
- bpf_dynptr_from_mem(dst, 0, 0, &iv);
- status = bpf_crypto_decrypt(ctx, &psrc, &pdst, &iv);
+ status = bpf_crypto_decrypt(ctx, &psrc, &pdst, NULL);
return TC_ACT_SHOT;
}
@@ -129,7 +125,7 @@ int encrypt_sanity(struct __sk_buff *skb)
{
struct __crypto_ctx_value *v;
struct bpf_crypto_ctx *ctx;
- struct bpf_dynptr psrc, pdst, iv;
+ struct bpf_dynptr psrc, pdst;
int err;
status = 0;
@@ -156,12 +152,8 @@ int encrypt_sanity(struct __sk_buff *skb)
* production code, a percpu map should be used to store the result.
*/
bpf_dynptr_from_mem(dst, sizeof(dst), 0, &pdst);
- /* iv dynptr has to be initialized with 0 size, but proper memory region
- * has to be provided anyway
- */
- bpf_dynptr_from_mem(dst, 0, 0, &iv);
- status = bpf_crypto_encrypt(ctx, &psrc, &pdst, &iv);
+ status = bpf_crypto_encrypt(ctx, &psrc, &pdst, NULL);
return TC_ACT_SHOT;
}
diff --git a/tools/testing/selftests/bpf/progs/dynptr_fail.c b/tools/testing/selftests/bpf/progs/dynptr_fail.c
index 66a60bfb5867..e35bc1eac52a 100644
--- a/tools/testing/selftests/bpf/progs/dynptr_fail.c
+++ b/tools/testing/selftests/bpf/progs/dynptr_fail.c
@@ -964,7 +964,7 @@ int dynptr_invalidate_slice_reinit(void *ctx)
* mem_or_null pointers.
*/
SEC("?raw_tp")
-__failure __msg("R1 type=scalar expected=percpu_ptr_")
+__failure __regex("R[0-9]+ type=scalar expected=percpu_ptr_")
int dynptr_invalidate_slice_or_null(void *ctx)
{
struct bpf_dynptr ptr;
@@ -982,7 +982,7 @@ int dynptr_invalidate_slice_or_null(void *ctx)
/* Destruction of dynptr should also any slices obtained from it */
SEC("?raw_tp")
-__failure __msg("R7 invalid mem access 'scalar'")
+__failure __regex("R[0-9]+ invalid mem access 'scalar'")
int dynptr_invalidate_slice_failure(void *ctx)
{
struct bpf_dynptr ptr1;
@@ -1069,7 +1069,7 @@ int dynptr_read_into_slot(void *ctx)
/* bpf_dynptr_slice()s are read-only and cannot be written to */
SEC("?tc")
-__failure __msg("R0 cannot write into rdonly_mem")
+__failure __regex("R[0-9]+ cannot write into rdonly_mem")
int skb_invalid_slice_write(struct __sk_buff *skb)
{
struct bpf_dynptr ptr;
@@ -1686,3 +1686,27 @@ int test_dynptr_skb_small_buff(struct __sk_buff *skb)
return !!data;
}
+
+__noinline long global_call_bpf_dynptr(const struct bpf_dynptr *dynptr)
+{
+ long ret = 0;
+ /* Avoid leaving this global function empty to avoid having the compiler
+ * optimize away the call to this global function.
+ */
+ __sink(ret);
+ return ret;
+}
+
+SEC("?raw_tp")
+__failure __msg("arg#1 expected pointer to stack or const struct bpf_dynptr")
+int test_dynptr_reg_type(void *ctx)
+{
+ struct task_struct *current = NULL;
+ /* R1 should be holding a PTR_TO_BTF_ID, so this shouldn't be a
+ * reg->type that can be passed to a function accepting a
+ * ARG_PTR_TO_DYNPTR | MEM_RDONLY. process_dynptr_func() should catch
+ * this.
+ */
+ global_call_bpf_dynptr((const struct bpf_dynptr *)current);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/get_func_ip_test.c b/tools/testing/selftests/bpf/progs/get_func_ip_test.c
index 8956eb78a226..2011cacdeb18 100644
--- a/tools/testing/selftests/bpf/progs/get_func_ip_test.c
+++ b/tools/testing/selftests/bpf/progs/get_func_ip_test.c
@@ -5,13 +5,12 @@
char _license[] SEC("license") = "GPL";
-extern const void bpf_fentry_test1 __ksym;
+extern int bpf_fentry_test1(int a) __ksym;
+extern int bpf_modify_return_test(int a, int *b) __ksym;
+
extern const void bpf_fentry_test2 __ksym;
extern const void bpf_fentry_test3 __ksym;
extern const void bpf_fentry_test4 __ksym;
-extern const void bpf_modify_return_test __ksym;
-extern const void bpf_fentry_test6 __ksym;
-extern const void bpf_fentry_test7 __ksym;
extern bool CONFIG_X86_KERNEL_IBT __kconfig __weak;
diff --git a/tools/testing/selftests/bpf/progs/ip_check_defrag.c b/tools/testing/selftests/bpf/progs/ip_check_defrag.c
index 1c2b6c1616b0..645b2c9f7867 100644
--- a/tools/testing/selftests/bpf/progs/ip_check_defrag.c
+++ b/tools/testing/selftests/bpf/progs/ip_check_defrag.c
@@ -12,7 +12,7 @@
#define IP_OFFSET 0x1FFF
#define NEXTHDR_FRAGMENT 44
-extern int bpf_dynptr_from_skb(struct sk_buff *skb, __u64 flags,
+extern int bpf_dynptr_from_skb(struct __sk_buff *skb, __u64 flags,
struct bpf_dynptr *ptr__uninit) __ksym;
extern void *bpf_dynptr_slice(const struct bpf_dynptr *ptr, uint32_t offset,
void *buffer, uint32_t buffer__sz) __ksym;
@@ -42,7 +42,7 @@ static bool is_frag_v6(struct ipv6hdr *ip6h)
return ip6h->nexthdr == NEXTHDR_FRAGMENT;
}
-static int handle_v4(struct sk_buff *skb)
+static int handle_v4(struct __sk_buff *skb)
{
struct bpf_dynptr ptr;
u8 iph_buf[20] = {};
@@ -64,7 +64,7 @@ static int handle_v4(struct sk_buff *skb)
return NF_ACCEPT;
}
-static int handle_v6(struct sk_buff *skb)
+static int handle_v6(struct __sk_buff *skb)
{
struct bpf_dynptr ptr;
struct ipv6hdr *ip6h;
@@ -89,9 +89,9 @@ static int handle_v6(struct sk_buff *skb)
SEC("netfilter")
int defrag(struct bpf_nf_ctx *ctx)
{
- struct sk_buff *skb = ctx->skb;
+ struct __sk_buff *skb = (struct __sk_buff *)ctx->skb;
- switch (bpf_ntohs(skb->protocol)) {
+ switch (bpf_ntohs(ctx->skb->protocol)) {
case ETH_P_IP:
return handle_v4(skb);
case ETH_P_IPV6:
diff --git a/tools/testing/selftests/bpf/progs/iters.c b/tools/testing/selftests/bpf/progs/iters.c
index fe65e0952a1e..16bdc3e25591 100644
--- a/tools/testing/selftests/bpf/progs/iters.c
+++ b/tools/testing/selftests/bpf/progs/iters.c
@@ -7,8 +7,6 @@
#include "bpf_misc.h"
#include "bpf_compiler.h"
-#define ARRAY_SIZE(x) (int)(sizeof(x) / sizeof((x)[0]))
-
static volatile int zero = 0;
int my_pid;
diff --git a/tools/testing/selftests/bpf/progs/kfunc_call_test.c b/tools/testing/selftests/bpf/progs/kfunc_call_test.c
index cf68d1e48a0f..f502f755f567 100644
--- a/tools/testing/selftests/bpf/progs/kfunc_call_test.c
+++ b/tools/testing/selftests/bpf/progs/kfunc_call_test.c
@@ -177,4 +177,41 @@ int kfunc_call_test_static_unused_arg(struct __sk_buff *skb)
return actual != expected ? -1 : 0;
}
+struct ctx_val {
+ struct bpf_testmod_ctx __kptr *ctx;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, struct ctx_val);
+} ctx_map SEC(".maps");
+
+SEC("tc")
+int kfunc_call_ctx(struct __sk_buff *skb)
+{
+ struct bpf_testmod_ctx *ctx;
+ int err = 0;
+
+ ctx = bpf_testmod_ctx_create(&err);
+ if (!ctx && !err)
+ err = -1;
+ if (ctx) {
+ int key = 0;
+ struct ctx_val *ctx_val = bpf_map_lookup_elem(&ctx_map, &key);
+
+ /* Transfer ctx to map to be freed via implicit dtor call
+ * on cleanup.
+ */
+ if (ctx_val)
+ ctx = bpf_kptr_xchg(&ctx_val->ctx, ctx);
+ if (ctx) {
+ bpf_testmod_ctx_release(ctx);
+ err = -1;
+ }
+ }
+ return err;
+}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/kprobe_multi_session.c b/tools/testing/selftests/bpf/progs/kprobe_multi_session.c
index bbba9eb46551..bd8b7fb7061e 100644
--- a/tools/testing/selftests/bpf/progs/kprobe_multi_session.c
+++ b/tools/testing/selftests/bpf/progs/kprobe_multi_session.c
@@ -4,8 +4,7 @@
#include <bpf/bpf_tracing.h>
#include <stdbool.h>
#include "bpf_kfuncs.h"
-
-#define ARRAY_SIZE(x) (int)(sizeof(x) / sizeof((x)[0]))
+#include "bpf_misc.h"
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/kprobe_multi_session_cookie.c b/tools/testing/selftests/bpf/progs/kprobe_multi_session_cookie.c
index d49070803e22..0835b5edf685 100644
--- a/tools/testing/selftests/bpf/progs/kprobe_multi_session_cookie.c
+++ b/tools/testing/selftests/bpf/progs/kprobe_multi_session_cookie.c
@@ -25,7 +25,7 @@ int BPF_PROG(trigger)
static int check_cookie(__u64 val, __u64 *result)
{
- long *cookie;
+ __u64 *cookie;
if (bpf_get_current_pid_tgid() >> 32 != pid)
return 1;
diff --git a/tools/testing/selftests/bpf/progs/linked_list.c b/tools/testing/selftests/bpf/progs/linked_list.c
index 26205ca80679..421f40835acd 100644
--- a/tools/testing/selftests/bpf/progs/linked_list.c
+++ b/tools/testing/selftests/bpf/progs/linked_list.c
@@ -4,13 +4,26 @@
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_core_read.h>
#include "bpf_experimental.h"
-
-#ifndef ARRAY_SIZE
-#define ARRAY_SIZE(x) (int)(sizeof(x) / sizeof((x)[0]))
-#endif
+#include "bpf_misc.h"
#include "linked_list.h"
+struct head_nested_inner {
+ struct bpf_spin_lock lock;
+ struct bpf_list_head head __contains(foo, node2);
+};
+
+struct head_nested {
+ int dummy;
+ struct head_nested_inner inner;
+};
+
+private(C) struct bpf_spin_lock glock_c;
+private(C) struct bpf_list_head ghead_array[2] __contains(foo, node2);
+private(C) struct bpf_list_head ghead_array_one[1] __contains(foo, node2);
+
+private(D) struct head_nested ghead_nested;
+
static __always_inline
int list_push_pop(struct bpf_spin_lock *lock, struct bpf_list_head *head, bool leave_in_map)
{
@@ -310,6 +323,32 @@ int global_list_push_pop(void *ctx)
}
SEC("tc")
+int global_list_push_pop_nested(void *ctx)
+{
+ return test_list_push_pop(&ghead_nested.inner.lock, &ghead_nested.inner.head);
+}
+
+SEC("tc")
+int global_list_array_push_pop(void *ctx)
+{
+ int r;
+
+ r = test_list_push_pop(&glock_c, &ghead_array[0]);
+ if (r)
+ return r;
+
+ r = test_list_push_pop(&glock_c, &ghead_array[1]);
+ if (r)
+ return r;
+
+ /* Arrays with only one element is a special case, being treated
+ * just like a bpf_list_head variable by the verifier, not an
+ * array.
+ */
+ return test_list_push_pop(&glock_c, &ghead_array_one[0]);
+}
+
+SEC("tc")
int map_list_push_pop_multiple(void *ctx)
{
struct map_value *v;
diff --git a/tools/testing/selftests/bpf/progs/map_percpu_stats.c b/tools/testing/selftests/bpf/progs/map_percpu_stats.c
index 10b2325c1720..63245785eb69 100644
--- a/tools/testing/selftests/bpf/progs/map_percpu_stats.c
+++ b/tools/testing/selftests/bpf/progs/map_percpu_stats.c
@@ -7,7 +7,7 @@
__u32 target_id;
-__s64 bpf_map_sum_elem_count(struct bpf_map *map) __ksym;
+__s64 bpf_map_sum_elem_count(const struct bpf_map *map) __ksym;
SEC("iter/bpf_map")
int dump_bpf_map(struct bpf_iter__bpf_map *ctx)
diff --git a/tools/testing/selftests/bpf/progs/nested_trust_common.h b/tools/testing/selftests/bpf/progs/nested_trust_common.h
index 83d33931136e..1784b496be2e 100644
--- a/tools/testing/selftests/bpf/progs/nested_trust_common.h
+++ b/tools/testing/selftests/bpf/progs/nested_trust_common.h
@@ -7,6 +7,6 @@
#include <stdbool.h>
bool bpf_cpumask_test_cpu(unsigned int cpu, const struct cpumask *cpumask) __ksym;
-bool bpf_cpumask_first_zero(const struct cpumask *cpumask) __ksym;
+__u32 bpf_cpumask_first_zero(const struct cpumask *cpumask) __ksym;
#endif /* _NESTED_TRUST_COMMON_H */
diff --git a/tools/testing/selftests/bpf/progs/nested_trust_failure.c b/tools/testing/selftests/bpf/progs/nested_trust_failure.c
index ea39497f11ed..3568ec450100 100644
--- a/tools/testing/selftests/bpf/progs/nested_trust_failure.c
+++ b/tools/testing/selftests/bpf/progs/nested_trust_failure.c
@@ -31,14 +31,6 @@ int BPF_PROG(test_invalid_nested_user_cpus, struct task_struct *task, u64 clone_
return 0;
}
-SEC("tp_btf/task_newtask")
-__failure __msg("R1 must have zero offset when passed to release func or trusted arg to kfunc")
-int BPF_PROG(test_invalid_nested_offset, struct task_struct *task, u64 clone_flags)
-{
- bpf_cpumask_first_zero(&task->cpus_mask);
- return 0;
-}
-
/* Although R2 is of type sk_buff but sock_common is expected, we will hit untrusted ptr first. */
SEC("tp_btf/tcp_probe")
__failure __msg("R2 type=untrusted_ptr_ expected=ptr_, trusted_ptr_, rcu_ptr_")
diff --git a/tools/testing/selftests/bpf/progs/nested_trust_success.c b/tools/testing/selftests/bpf/progs/nested_trust_success.c
index 833840bffd3b..2b66953ca82e 100644
--- a/tools/testing/selftests/bpf/progs/nested_trust_success.c
+++ b/tools/testing/selftests/bpf/progs/nested_trust_success.c
@@ -32,3 +32,11 @@ int BPF_PROG(test_skb_field, struct sock *sk, struct sk_buff *skb)
bpf_sk_storage_get(&sk_storage_map, skb->sk, 0, 0);
return 0;
}
+
+SEC("tp_btf/task_newtask")
+__success
+int BPF_PROG(test_nested_offset, struct task_struct *task, u64 clone_flags)
+{
+ bpf_cpumask_first_zero(&task->cpus_mask);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/netif_receive_skb.c b/tools/testing/selftests/bpf/progs/netif_receive_skb.c
index c0062645fc68..9e067dcbf607 100644
--- a/tools/testing/selftests/bpf/progs/netif_receive_skb.c
+++ b/tools/testing/selftests/bpf/progs/netif_receive_skb.c
@@ -5,6 +5,7 @@
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_core_read.h>
+#include "bpf_misc.h"
#include <errno.h>
@@ -23,10 +24,6 @@ bool skip = false;
#define BADPTR 0
#endif
-#ifndef ARRAY_SIZE
-#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
-#endif
-
struct {
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
__uint(max_entries, 1);
diff --git a/tools/testing/selftests/bpf/progs/profiler.inc.h b/tools/testing/selftests/bpf/progs/profiler.inc.h
index 6957d9f2805e..8bd1ebd7d6af 100644
--- a/tools/testing/selftests/bpf/progs/profiler.inc.h
+++ b/tools/testing/selftests/bpf/progs/profiler.inc.h
@@ -9,6 +9,7 @@
#include "err.h"
#include "bpf_experimental.h"
#include "bpf_compiler.h"
+#include "bpf_misc.h"
#ifndef NULL
#define NULL 0
@@ -133,10 +134,6 @@ struct {
__uint(max_entries, 16);
} disallowed_exec_inodes SEC(".maps");
-#ifndef ARRAY_SIZE
-#define ARRAY_SIZE(arr) (int)(sizeof(arr) / sizeof(arr[0]))
-#endif
-
static INLINE bool IS_ERR(const void* ptr)
{
return IS_ERR_VALUE((unsigned long)ptr);
diff --git a/tools/testing/selftests/bpf/progs/rbtree.c b/tools/testing/selftests/bpf/progs/rbtree.c
index b09f4fffe57c..a3620c15c136 100644
--- a/tools/testing/selftests/bpf/progs/rbtree.c
+++ b/tools/testing/selftests/bpf/progs/rbtree.c
@@ -13,6 +13,15 @@ struct node_data {
struct bpf_rb_node node;
};
+struct root_nested_inner {
+ struct bpf_spin_lock glock;
+ struct bpf_rb_root root __contains(node_data, node);
+};
+
+struct root_nested {
+ struct root_nested_inner inner;
+};
+
long less_callback_ran = -1;
long removed_key = -1;
long first_data[2] = {-1, -1};
@@ -20,6 +29,9 @@ long first_data[2] = {-1, -1};
#define private(name) SEC(".data." #name) __hidden __attribute__((aligned(8)))
private(A) struct bpf_spin_lock glock;
private(A) struct bpf_rb_root groot __contains(node_data, node);
+private(A) struct bpf_rb_root groot_array[2] __contains(node_data, node);
+private(A) struct bpf_rb_root groot_array_one[1] __contains(node_data, node);
+private(B) struct root_nested groot_nested;
static bool less(struct bpf_rb_node *a, const struct bpf_rb_node *b)
{
@@ -72,6 +84,12 @@ long rbtree_add_nodes(void *ctx)
}
SEC("tc")
+long rbtree_add_nodes_nested(void *ctx)
+{
+ return __add_three(&groot_nested.inner.root, &groot_nested.inner.glock);
+}
+
+SEC("tc")
long rbtree_add_and_remove(void *ctx)
{
struct bpf_rb_node *res = NULL;
@@ -110,6 +128,65 @@ err_out:
}
SEC("tc")
+long rbtree_add_and_remove_array(void *ctx)
+{
+ struct bpf_rb_node *res1 = NULL, *res2 = NULL, *res3 = NULL;
+ struct node_data *nodes[3][2] = {{NULL, NULL}, {NULL, NULL}, {NULL, NULL}};
+ struct node_data *n;
+ long k1 = -1, k2 = -1, k3 = -1;
+ int i, j;
+
+ for (i = 0; i < 3; i++) {
+ for (j = 0; j < 2; j++) {
+ nodes[i][j] = bpf_obj_new(typeof(*nodes[i][j]));
+ if (!nodes[i][j])
+ goto err_out;
+ nodes[i][j]->key = i * 2 + j;
+ }
+ }
+
+ bpf_spin_lock(&glock);
+ for (i = 0; i < 2; i++)
+ for (j = 0; j < 2; j++)
+ bpf_rbtree_add(&groot_array[i], &nodes[i][j]->node, less);
+ for (j = 0; j < 2; j++)
+ bpf_rbtree_add(&groot_array_one[0], &nodes[2][j]->node, less);
+ res1 = bpf_rbtree_remove(&groot_array[0], &nodes[0][0]->node);
+ res2 = bpf_rbtree_remove(&groot_array[1], &nodes[1][0]->node);
+ res3 = bpf_rbtree_remove(&groot_array_one[0], &nodes[2][0]->node);
+ bpf_spin_unlock(&glock);
+
+ if (res1) {
+ n = container_of(res1, struct node_data, node);
+ k1 = n->key;
+ bpf_obj_drop(n);
+ }
+ if (res2) {
+ n = container_of(res2, struct node_data, node);
+ k2 = n->key;
+ bpf_obj_drop(n);
+ }
+ if (res3) {
+ n = container_of(res3, struct node_data, node);
+ k3 = n->key;
+ bpf_obj_drop(n);
+ }
+ if (k1 != 0 || k2 != 2 || k3 != 4)
+ return 2;
+
+ return 0;
+
+err_out:
+ for (i = 0; i < 3; i++) {
+ for (j = 0; j < 2; j++) {
+ if (nodes[i][j])
+ bpf_obj_drop(nodes[i][j]);
+ }
+ }
+ return 1;
+}
+
+SEC("tc")
long rbtree_first_and_remove(void *ctx)
{
struct bpf_rb_node *res = NULL;
diff --git a/tools/testing/selftests/bpf/progs/rbtree_fail.c b/tools/testing/selftests/bpf/progs/rbtree_fail.c
index 3fecf1c6dfe5..b722a1e1ddef 100644
--- a/tools/testing/selftests/bpf/progs/rbtree_fail.c
+++ b/tools/testing/selftests/bpf/progs/rbtree_fail.c
@@ -105,7 +105,7 @@ long rbtree_api_remove_unadded_node(void *ctx)
}
SEC("?tc")
-__failure __msg("Unreleased reference id=3 alloc_insn=10")
+__failure __regex("Unreleased reference id=3 alloc_insn=[0-9]+")
long rbtree_api_remove_no_drop(void *ctx)
{
struct bpf_rb_node *res;
diff --git a/tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c b/tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c
index 1553b9c16aa7..f8d4b7cfcd68 100644
--- a/tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c
+++ b/tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c
@@ -32,7 +32,7 @@ static bool less(struct bpf_rb_node *a, const struct bpf_rb_node *b)
}
SEC("?tc")
-__failure __msg("Unreleased reference id=4 alloc_insn=21")
+__failure __regex("Unreleased reference id=4 alloc_insn=[0-9]+")
long rbtree_refcounted_node_ref_escapes(void *ctx)
{
struct node_acquire *n, *m;
@@ -73,7 +73,7 @@ long refcount_acquire_maybe_null(void *ctx)
}
SEC("?tc")
-__failure __msg("Unreleased reference id=3 alloc_insn=9")
+__failure __regex("Unreleased reference id=3 alloc_insn=[0-9]+")
long rbtree_refcounted_node_ref_escapes_owning_input(void *ctx)
{
struct node_acquire *n, *m;
diff --git a/tools/testing/selftests/bpf/progs/setget_sockopt.c b/tools/testing/selftests/bpf/progs/setget_sockopt.c
index 7a438600ae98..60518aed1ffc 100644
--- a/tools/testing/selftests/bpf/progs/setget_sockopt.c
+++ b/tools/testing/selftests/bpf/progs/setget_sockopt.c
@@ -6,10 +6,7 @@
#include <bpf/bpf_core_read.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
-
-#ifndef ARRAY_SIZE
-#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
-#endif
+#include "bpf_misc.h"
extern unsigned long CONFIG_HZ __kconfig;
diff --git a/tools/testing/selftests/bpf/progs/skb_pkt_end.c b/tools/testing/selftests/bpf/progs/skb_pkt_end.c
index db4abd2682fc..3bb4451524a1 100644
--- a/tools/testing/selftests/bpf/progs/skb_pkt_end.c
+++ b/tools/testing/selftests/bpf/progs/skb_pkt_end.c
@@ -33,6 +33,8 @@ int main_prog(struct __sk_buff *skb)
struct iphdr *ip = NULL;
struct tcphdr *tcp;
__u8 proto = 0;
+ int urg_ptr;
+ u32 offset;
if (!(ip = get_iphdr(skb)))
goto out;
@@ -48,7 +50,14 @@ int main_prog(struct __sk_buff *skb)
if (!tcp)
goto out;
- return tcp->urg_ptr;
+ urg_ptr = tcp->urg_ptr;
+
+ /* Checksum validation part */
+ proto++;
+ offset = sizeof(struct ethhdr) + offsetof(struct iphdr, protocol);
+ bpf_skb_store_bytes(skb, offset, &proto, sizeof(proto), BPF_F_RECOMPUTE_CSUM);
+
+ return urg_ptr;
out:
return -1;
}
diff --git a/tools/testing/selftests/bpf/progs/struct_ops_detach.c b/tools/testing/selftests/bpf/progs/struct_ops_detach.c
new file mode 100644
index 000000000000..56b787a89876
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/struct_ops_detach.c
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include "../bpf_testmod/bpf_testmod.h"
+
+char _license[] SEC("license") = "GPL";
+
+SEC(".struct_ops.link")
+struct bpf_testmod_ops testmod_do_detach;
diff --git a/tools/testing/selftests/bpf/progs/test_bpf_ma.c b/tools/testing/selftests/bpf/progs/test_bpf_ma.c
index 3494ca30fa7f..4a4e0b8d9b72 100644
--- a/tools/testing/selftests/bpf/progs/test_bpf_ma.c
+++ b/tools/testing/selftests/bpf/progs/test_bpf_ma.c
@@ -7,10 +7,6 @@
#include "bpf_experimental.h"
#include "bpf_misc.h"
-#ifndef ARRAY_SIZE
-#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
-#endif
-
struct generic_map_value {
void *data;
};
diff --git a/tools/testing/selftests/bpf/progs/test_bpf_nf.c b/tools/testing/selftests/bpf/progs/test_bpf_nf.c
index 77ad8adf68da..f7b330ddd007 100644
--- a/tools/testing/selftests/bpf/progs/test_bpf_nf.c
+++ b/tools/testing/selftests/bpf/progs/test_bpf_nf.c
@@ -1,4 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
+#define BPF_NO_KFUNC_PROTOTYPES
#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
@@ -9,10 +10,14 @@
#define EINVAL 22
#define ENOENT 2
+#define NF_CT_ZONE_DIR_ORIG (1 << IP_CT_DIR_ORIGINAL)
+#define NF_CT_ZONE_DIR_REPL (1 << IP_CT_DIR_REPLY)
+
extern unsigned long CONFIG_HZ __kconfig;
int test_einval_bpf_tuple = 0;
int test_einval_reserved = 0;
+int test_einval_reserved_new = 0;
int test_einval_netns_id = 0;
int test_einval_len_opts = 0;
int test_eproto_l4proto = 0;
@@ -22,6 +27,11 @@ int test_eafnosupport = 0;
int test_alloc_entry = -EINVAL;
int test_insert_entry = -EAFNOSUPPORT;
int test_succ_lookup = -ENOENT;
+int test_ct_zone_id_alloc_entry = -EINVAL;
+int test_ct_zone_id_insert_entry = -EAFNOSUPPORT;
+int test_ct_zone_id_succ_lookup = -ENOENT;
+int test_ct_zone_dir_enoent_lookup = 0;
+int test_ct_zone_id_enoent_lookup = 0;
u32 test_delta_timeout = 0;
u32 test_status = 0;
u32 test_insert_lookup_mark = 0;
@@ -45,6 +55,17 @@ struct bpf_ct_opts___local {
s32 netns_id;
s32 error;
u8 l4proto;
+ u8 dir;
+ u8 reserved[2];
+};
+
+struct bpf_ct_opts___new {
+ s32 netns_id;
+ s32 error;
+ u8 l4proto;
+ u8 dir;
+ u16 ct_zone_id;
+ u8 ct_zone_dir;
u8 reserved[3];
} __attribute__((preserve_access_index));
@@ -220,10 +241,97 @@ nf_ct_test(struct nf_conn *(*lookup_fn)(void *, struct bpf_sock_tuple *, u32,
}
}
+static __always_inline void
+nf_ct_opts_new_test(struct nf_conn *(*lookup_fn)(void *, struct bpf_sock_tuple *, u32,
+ struct bpf_ct_opts___new *, u32),
+ struct nf_conn *(*alloc_fn)(void *, struct bpf_sock_tuple *, u32,
+ struct bpf_ct_opts___new *, u32),
+ void *ctx)
+{
+ struct bpf_ct_opts___new opts_def = { .l4proto = IPPROTO_TCP, .netns_id = -1 };
+ struct bpf_sock_tuple bpf_tuple;
+ struct nf_conn *ct;
+
+ __builtin_memset(&bpf_tuple, 0, sizeof(bpf_tuple.ipv4));
+
+ opts_def.reserved[0] = 1;
+ ct = lookup_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def,
+ sizeof(opts_def));
+ opts_def.reserved[0] = 0;
+ if (ct)
+ bpf_ct_release(ct);
+ else
+ test_einval_reserved_new = opts_def.error;
+
+ bpf_tuple.ipv4.saddr = bpf_get_prandom_u32(); /* src IP */
+ bpf_tuple.ipv4.daddr = bpf_get_prandom_u32(); /* dst IP */
+ bpf_tuple.ipv4.sport = bpf_get_prandom_u32(); /* src port */
+ bpf_tuple.ipv4.dport = bpf_get_prandom_u32(); /* dst port */
+
+ /* use non-default ct zone */
+ opts_def.ct_zone_id = 10;
+ opts_def.ct_zone_dir = NF_CT_ZONE_DIR_ORIG;
+ ct = alloc_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def,
+ sizeof(opts_def));
+ if (ct) {
+ __u16 sport = bpf_get_prandom_u32();
+ __u16 dport = bpf_get_prandom_u32();
+ union nf_inet_addr saddr = {};
+ union nf_inet_addr daddr = {};
+ struct nf_conn *ct_ins;
+
+ bpf_ct_set_timeout(ct, 10000);
+
+ /* snat */
+ saddr.ip = bpf_get_prandom_u32();
+ bpf_ct_set_nat_info(ct, &saddr, sport, NF_NAT_MANIP_SRC___local);
+ /* dnat */
+ daddr.ip = bpf_get_prandom_u32();
+ bpf_ct_set_nat_info(ct, &daddr, dport, NF_NAT_MANIP_DST___local);
+
+ ct_ins = bpf_ct_insert_entry(ct);
+ if (ct_ins) {
+ struct nf_conn *ct_lk;
+
+ /* entry should exist in same ct zone we inserted it */
+ ct_lk = lookup_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4),
+ &opts_def, sizeof(opts_def));
+ if (ct_lk) {
+ bpf_ct_release(ct_lk);
+ test_ct_zone_id_succ_lookup = 0;
+ }
+
+ /* entry should not exist with wrong direction */
+ opts_def.ct_zone_dir = NF_CT_ZONE_DIR_REPL;
+ ct_lk = lookup_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4),
+ &opts_def, sizeof(opts_def));
+ opts_def.ct_zone_dir = NF_CT_ZONE_DIR_ORIG;
+ if (ct_lk)
+ bpf_ct_release(ct_lk);
+ else
+ test_ct_zone_dir_enoent_lookup = opts_def.error;
+
+ /* entry should not exist in default ct zone */
+ opts_def.ct_zone_id = 0;
+ ct_lk = lookup_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4),
+ &opts_def, sizeof(opts_def));
+ if (ct_lk)
+ bpf_ct_release(ct_lk);
+ else
+ test_ct_zone_id_enoent_lookup = opts_def.error;
+
+ bpf_ct_release(ct_ins);
+ test_ct_zone_id_insert_entry = 0;
+ }
+ test_ct_zone_id_alloc_entry = 0;
+ }
+}
+
SEC("xdp")
int nf_xdp_ct_test(struct xdp_md *ctx)
{
nf_ct_test((void *)bpf_xdp_ct_lookup, (void *)bpf_xdp_ct_alloc, ctx);
+ nf_ct_opts_new_test((void *)bpf_xdp_ct_lookup, (void *)bpf_xdp_ct_alloc, ctx);
return 0;
}
@@ -231,6 +339,7 @@ SEC("tc")
int nf_skb_ct_test(struct __sk_buff *ctx)
{
nf_ct_test((void *)bpf_skb_ct_lookup, (void *)bpf_skb_ct_alloc, ctx);
+ nf_ct_opts_new_test((void *)bpf_skb_ct_lookup, (void *)bpf_skb_ct_alloc, ctx);
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/test_bpf_nf_fail.c b/tools/testing/selftests/bpf/progs/test_bpf_nf_fail.c
index 0e4759ab38ff..a586f087ffeb 100644
--- a/tools/testing/selftests/bpf/progs/test_bpf_nf_fail.c
+++ b/tools/testing/selftests/bpf/progs/test_bpf_nf_fail.c
@@ -1,4 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
+#define BPF_NO_KFUNC_PROTOTYPES
#include <vmlinux.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_helpers.h>
diff --git a/tools/testing/selftests/bpf/progs/test_kfunc_dynptr_param.c b/tools/testing/selftests/bpf/progs/test_kfunc_dynptr_param.c
index 2dde8e3fe4c9..e68667aec6a6 100644
--- a/tools/testing/selftests/bpf/progs/test_kfunc_dynptr_param.c
+++ b/tools/testing/selftests/bpf/progs/test_kfunc_dynptr_param.c
@@ -45,7 +45,7 @@ int BPF_PROG(not_valid_dynptr, int cmd, union bpf_attr *attr, unsigned int size)
}
SEC("?lsm.s/bpf")
-__failure __msg("arg#0 expected pointer to stack or dynptr_ptr")
+__failure __msg("arg#1 expected pointer to stack or const struct bpf_dynptr")
int BPF_PROG(not_ptr_to_stack, int cmd, union bpf_attr *attr, unsigned int size)
{
unsigned long val = 0;
diff --git a/tools/testing/selftests/bpf/progs/test_kfunc_param_nullable.c b/tools/testing/selftests/bpf/progs/test_kfunc_param_nullable.c
new file mode 100644
index 000000000000..7ac7e1de34d8
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_kfunc_param_nullable.c
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc */
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+#include "bpf_kfuncs.h"
+#include "../bpf_testmod/bpf_testmod_kfunc.h"
+
+SEC("tc")
+int kfunc_dynptr_nullable_test1(struct __sk_buff *skb)
+{
+ struct bpf_dynptr data;
+
+ bpf_dynptr_from_skb(skb, 0, &data);
+ bpf_kfunc_dynptr_test(&data, NULL);
+
+ return 0;
+}
+
+SEC("tc")
+int kfunc_dynptr_nullable_test2(struct __sk_buff *skb)
+{
+ struct bpf_dynptr data;
+
+ bpf_dynptr_from_skb(skb, 0, &data);
+ bpf_kfunc_dynptr_test(&data, &data);
+
+ return 0;
+}
+
+SEC("tc")
+__failure __msg("expected pointer to stack or const struct bpf_dynptr")
+int kfunc_dynptr_nullable_test3(struct __sk_buff *skb)
+{
+ struct bpf_dynptr data;
+
+ bpf_dynptr_from_skb(skb, 0, &data);
+ bpf_kfunc_dynptr_test(NULL, &data);
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_ringbuf_write.c b/tools/testing/selftests/bpf/progs/test_ringbuf_write.c
new file mode 100644
index 000000000000..350513c0e4c9
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_ringbuf_write.c
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+ __uint(type, BPF_MAP_TYPE_RINGBUF);
+} ringbuf SEC(".maps");
+
+/* inputs */
+int pid = 0;
+
+/* outputs */
+long passed = 0;
+long discarded = 0;
+
+SEC("fentry/" SYS_PREFIX "sys_getpgid")
+int test_ringbuf_write(void *ctx)
+{
+ int *foo, cur_pid = bpf_get_current_pid_tgid() >> 32;
+ void *sample1, *sample2;
+
+ if (cur_pid != pid)
+ return 0;
+
+ sample1 = bpf_ringbuf_reserve(&ringbuf, 0x3000, 0);
+ if (!sample1)
+ return 0;
+ /* first one can pass */
+ sample2 = bpf_ringbuf_reserve(&ringbuf, 0x3000, 0);
+ if (!sample2) {
+ bpf_ringbuf_discard(sample1, 0);
+ __sync_fetch_and_add(&discarded, 1);
+ return 0;
+ }
+ /* second one must not */
+ __sync_fetch_and_add(&passed, 1);
+ foo = sample2 + 4084;
+ *foo = 256;
+ bpf_ringbuf_discard(sample1, 0);
+ bpf_ringbuf_discard(sample2, 0);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_sk_storage_tracing.c b/tools/testing/selftests/bpf/progs/test_sk_storage_tracing.c
index 02e718f06e0f..40531e56776e 100644
--- a/tools/testing/selftests/bpf/progs/test_sk_storage_tracing.c
+++ b/tools/testing/selftests/bpf/progs/test_sk_storage_tracing.c
@@ -84,7 +84,7 @@ int BPF_PROG(trace_tcp_connect, struct sock *sk)
}
SEC("fexit/inet_csk_accept")
-int BPF_PROG(inet_csk_accept, struct sock *sk, int flags, int *err, bool kern,
+int BPF_PROG(inet_csk_accept, struct sock *sk, struct proto_accept_arg *arg,
struct sock *accepted_sk)
{
set_task_info(accepted_sk);
diff --git a/tools/testing/selftests/bpf/progs/test_sockmap_kern.h b/tools/testing/selftests/bpf/progs/test_sockmap_kern.h
index 99d2ea9fb658..f48f85f1bd70 100644
--- a/tools/testing/selftests/bpf/progs/test_sockmap_kern.h
+++ b/tools/testing/selftests/bpf/progs/test_sockmap_kern.h
@@ -92,7 +92,7 @@ struct {
__uint(value_size, sizeof(int));
} tls_sock_map SEC(".maps");
-SEC("sk_skb1")
+SEC("sk_skb/stream_parser")
int bpf_prog1(struct __sk_buff *skb)
{
int *f, two = 2;
@@ -104,7 +104,7 @@ int bpf_prog1(struct __sk_buff *skb)
return skb->len;
}
-SEC("sk_skb2")
+SEC("sk_skb/stream_verdict")
int bpf_prog2(struct __sk_buff *skb)
{
__u32 lport = skb->local_port;
@@ -151,7 +151,7 @@ static inline void bpf_write_pass(struct __sk_buff *skb, int offset)
memcpy(c + offset, "PASS", 4);
}
-SEC("sk_skb3")
+SEC("sk_skb/stream_verdict")
int bpf_prog3(struct __sk_buff *skb)
{
int err, *f, ret = SK_PASS;
@@ -177,9 +177,6 @@ int bpf_prog3(struct __sk_buff *skb)
return bpf_sk_redirect_hash(skb, &tls_sock_map, &ret, flags);
#endif
}
- f = bpf_map_lookup_elem(&sock_skb_opts, &one);
- if (f && *f)
- ret = SK_DROP;
err = bpf_skb_adjust_room(skb, 4, 0, 0);
if (err)
return SK_DROP;
@@ -233,7 +230,7 @@ int bpf_sockmap(struct bpf_sock_ops *skops)
return 0;
}
-SEC("sk_msg1")
+SEC("sk_msg")
int bpf_prog4(struct sk_msg_md *msg)
{
int *bytes, zero = 0, one = 1, two = 2, three = 3, four = 4, five = 5;
@@ -263,7 +260,7 @@ int bpf_prog4(struct sk_msg_md *msg)
return SK_PASS;
}
-SEC("sk_msg2")
+SEC("sk_msg")
int bpf_prog6(struct sk_msg_md *msg)
{
int zero = 0, one = 1, two = 2, three = 3, four = 4, five = 5, key = 0;
@@ -308,7 +305,7 @@ int bpf_prog6(struct sk_msg_md *msg)
#endif
}
-SEC("sk_msg3")
+SEC("sk_msg")
int bpf_prog8(struct sk_msg_md *msg)
{
void *data_end = (void *)(long) msg->data_end;
@@ -329,7 +326,8 @@ int bpf_prog8(struct sk_msg_md *msg)
return SK_PASS;
}
-SEC("sk_msg4")
+
+SEC("sk_msg")
int bpf_prog9(struct sk_msg_md *msg)
{
void *data_end = (void *)(long) msg->data_end;
@@ -347,7 +345,7 @@ int bpf_prog9(struct sk_msg_md *msg)
return SK_PASS;
}
-SEC("sk_msg5")
+SEC("sk_msg")
int bpf_prog10(struct sk_msg_md *msg)
{
int *bytes, *start, *end, *start_push, *end_push, *start_pop, *pop;
diff --git a/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c b/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c
index 7f74077d6622..548660e299a5 100644
--- a/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c
+++ b/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c
@@ -10,10 +10,7 @@
#include <bpf/bpf_helpers.h>
#include "bpf_compiler.h"
-
-#ifndef ARRAY_SIZE
-#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
-#endif
+#include "bpf_misc.h"
/* tcp_mem sysctl has only 3 ints, but this test is doing TCP_MEM_LOOPS */
#define TCP_MEM_LOOPS 28 /* because 30 doesn't fit into 512 bytes of stack */
diff --git a/tools/testing/selftests/bpf/progs/test_sysctl_loop2.c b/tools/testing/selftests/bpf/progs/test_sysctl_loop2.c
index 68a75436e8af..81249d119a8b 100644
--- a/tools/testing/selftests/bpf/progs/test_sysctl_loop2.c
+++ b/tools/testing/selftests/bpf/progs/test_sysctl_loop2.c
@@ -10,10 +10,7 @@
#include <bpf/bpf_helpers.h>
#include "bpf_compiler.h"
-
-#ifndef ARRAY_SIZE
-#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
-#endif
+#include "bpf_misc.h"
/* tcp_mem sysctl has only 3 ints, but this test is doing TCP_MEM_LOOPS */
#define TCP_MEM_LOOPS 20 /* because 30 doesn't fit into 512 bytes of stack */
diff --git a/tools/testing/selftests/bpf/progs/test_sysctl_prog.c b/tools/testing/selftests/bpf/progs/test_sysctl_prog.c
index efc3c61f7852..bbdd08764789 100644
--- a/tools/testing/selftests/bpf/progs/test_sysctl_prog.c
+++ b/tools/testing/selftests/bpf/progs/test_sysctl_prog.c
@@ -10,6 +10,7 @@
#include <bpf/bpf_helpers.h>
#include "bpf_compiler.h"
+#include "bpf_misc.h"
/* Max supported length of a string with unsigned long in base 10 (pow2 - 1). */
#define MAX_ULONG_STR_LEN 0xF
@@ -17,10 +18,6 @@
/* Max supported length of sysctl value string (pow2). */
#define MAX_VALUE_STR_LEN 0x40
-#ifndef ARRAY_SIZE
-#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
-#endif
-
const char tcp_mem_name[] = "net/ipv4/tcp_mem";
static __always_inline int is_tcp_mem(struct bpf_sysctl *ctx)
{
diff --git a/tools/testing/selftests/bpf/progs/test_tc_dtime.c b/tools/testing/selftests/bpf/progs/test_tc_dtime.c
index 74ec09f040b7..ca8e8734d901 100644
--- a/tools/testing/selftests/bpf/progs/test_tc_dtime.c
+++ b/tools/testing/selftests/bpf/progs/test_tc_dtime.c
@@ -222,17 +222,21 @@ int egress_host(struct __sk_buff *skb)
return TC_ACT_OK;
if (skb_proto(skb_type) == IPPROTO_TCP) {
- if (skb->tstamp_type == BPF_SKB_TSTAMP_DELIVERY_MONO &&
+ if (skb->tstamp_type == BPF_SKB_CLOCK_MONOTONIC &&
skb->tstamp)
inc_dtimes(EGRESS_ENDHOST);
else
inc_errs(EGRESS_ENDHOST);
- } else {
- if (skb->tstamp_type == BPF_SKB_TSTAMP_UNSPEC &&
+ } else if (skb_proto(skb_type) == IPPROTO_UDP) {
+ if (skb->tstamp_type == BPF_SKB_CLOCK_TAI &&
skb->tstamp)
inc_dtimes(EGRESS_ENDHOST);
else
inc_errs(EGRESS_ENDHOST);
+ } else {
+ if (skb->tstamp_type == BPF_SKB_CLOCK_REALTIME &&
+ skb->tstamp)
+ inc_errs(EGRESS_ENDHOST);
}
skb->tstamp = EGRESS_ENDHOST_MAGIC;
@@ -252,7 +256,7 @@ int ingress_host(struct __sk_buff *skb)
if (!skb_type)
return TC_ACT_OK;
- if (skb->tstamp_type == BPF_SKB_TSTAMP_DELIVERY_MONO &&
+ if (skb->tstamp_type == BPF_SKB_CLOCK_MONOTONIC &&
skb->tstamp == EGRESS_FWDNS_MAGIC)
inc_dtimes(INGRESS_ENDHOST);
else
@@ -315,7 +319,6 @@ int egress_fwdns_prio100(struct __sk_buff *skb)
SEC("tc")
int ingress_fwdns_prio101(struct __sk_buff *skb)
{
- __u64 expected_dtime = EGRESS_ENDHOST_MAGIC;
int skb_type;
skb_type = skb_get_type(skb);
@@ -323,29 +326,24 @@ int ingress_fwdns_prio101(struct __sk_buff *skb)
/* Should have handled in prio100 */
return TC_ACT_SHOT;
- if (skb_proto(skb_type) == IPPROTO_UDP)
- expected_dtime = 0;
-
if (skb->tstamp_type) {
if (fwdns_clear_dtime() ||
- skb->tstamp_type != BPF_SKB_TSTAMP_DELIVERY_MONO ||
- skb->tstamp != expected_dtime)
+ (skb->tstamp_type != BPF_SKB_CLOCK_MONOTONIC &&
+ skb->tstamp_type != BPF_SKB_CLOCK_TAI) ||
+ skb->tstamp != EGRESS_ENDHOST_MAGIC)
inc_errs(INGRESS_FWDNS_P101);
else
inc_dtimes(INGRESS_FWDNS_P101);
} else {
- if (!fwdns_clear_dtime() && expected_dtime)
+ if (!fwdns_clear_dtime())
inc_errs(INGRESS_FWDNS_P101);
}
- if (skb->tstamp_type == BPF_SKB_TSTAMP_DELIVERY_MONO) {
+ if (skb->tstamp_type == BPF_SKB_CLOCK_MONOTONIC) {
skb->tstamp = INGRESS_FWDNS_MAGIC;
} else {
if (bpf_skb_set_tstamp(skb, INGRESS_FWDNS_MAGIC,
- BPF_SKB_TSTAMP_DELIVERY_MONO))
- inc_errs(SET_DTIME);
- if (!bpf_skb_set_tstamp(skb, INGRESS_FWDNS_MAGIC,
- BPF_SKB_TSTAMP_UNSPEC))
+ BPF_SKB_CLOCK_MONOTONIC))
inc_errs(SET_DTIME);
}
@@ -370,7 +368,7 @@ int egress_fwdns_prio101(struct __sk_buff *skb)
if (skb->tstamp_type) {
if (fwdns_clear_dtime() ||
- skb->tstamp_type != BPF_SKB_TSTAMP_DELIVERY_MONO ||
+ skb->tstamp_type != BPF_SKB_CLOCK_MONOTONIC ||
skb->tstamp != INGRESS_FWDNS_MAGIC)
inc_errs(EGRESS_FWDNS_P101);
else
@@ -380,14 +378,11 @@ int egress_fwdns_prio101(struct __sk_buff *skb)
inc_errs(EGRESS_FWDNS_P101);
}
- if (skb->tstamp_type == BPF_SKB_TSTAMP_DELIVERY_MONO) {
+ if (skb->tstamp_type == BPF_SKB_CLOCK_MONOTONIC) {
skb->tstamp = EGRESS_FWDNS_MAGIC;
} else {
if (bpf_skb_set_tstamp(skb, EGRESS_FWDNS_MAGIC,
- BPF_SKB_TSTAMP_DELIVERY_MONO))
- inc_errs(SET_DTIME);
- if (!bpf_skb_set_tstamp(skb, INGRESS_FWDNS_MAGIC,
- BPF_SKB_TSTAMP_UNSPEC))
+ BPF_SKB_CLOCK_MONOTONIC))
inc_errs(SET_DTIME);
}
diff --git a/tools/testing/selftests/bpf/progs/test_tc_link.c b/tools/testing/selftests/bpf/progs/test_tc_link.c
index 992400acb957..ab3eae3d6af8 100644
--- a/tools/testing/selftests/bpf/progs/test_tc_link.c
+++ b/tools/testing/selftests/bpf/progs/test_tc_link.c
@@ -4,7 +4,8 @@
#include <linux/bpf.h>
#include <linux/if_ether.h>
-
+#include <linux/stddef.h>
+#include <linux/if_packet.h>
#include <bpf/bpf_endian.h>
#include <bpf/bpf_helpers.h>
@@ -16,7 +17,13 @@ bool seen_tc3;
bool seen_tc4;
bool seen_tc5;
bool seen_tc6;
+bool seen_tc7;
+
+bool set_type;
+
bool seen_eth;
+bool seen_host;
+bool seen_mcast;
SEC("tc/ingress")
int tc1(struct __sk_buff *skb)
@@ -28,8 +35,16 @@ int tc1(struct __sk_buff *skb)
if (bpf_skb_load_bytes(skb, 0, &eth, sizeof(eth)))
goto out;
seen_eth = eth.h_proto == bpf_htons(ETH_P_IP);
+ seen_host = skb->pkt_type == PACKET_HOST;
+ if (seen_host && set_type) {
+ eth.h_dest[0] = 4;
+ if (bpf_skb_store_bytes(skb, 0, &eth, sizeof(eth), 0))
+ goto fail;
+ bpf_skb_change_type(skb, PACKET_MULTICAST);
+ }
out:
seen_tc1 = true;
+fail:
return TCX_NEXT;
}
@@ -67,3 +82,21 @@ int tc6(struct __sk_buff *skb)
seen_tc6 = true;
return TCX_PASS;
}
+
+SEC("tc/ingress")
+int tc7(struct __sk_buff *skb)
+{
+ struct ethhdr eth = {};
+
+ if (skb->protocol != __bpf_constant_htons(ETH_P_IP))
+ goto out;
+ if (bpf_skb_load_bytes(skb, 0, &eth, sizeof(eth)))
+ goto out;
+ if (eth.h_dest[0] == 4 && set_type) {
+ seen_mcast = skb->pkt_type == PACKET_MULTICAST;
+ bpf_skb_change_type(skb, PACKET_HOST);
+ }
+out:
+ seen_tc7 = true;
+ return TCX_PASS;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.c b/tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.c
index c8e4553648bf..44ee0d037f95 100644
--- a/tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.c
+++ b/tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.c
@@ -9,6 +9,7 @@
#include "bpf_kfuncs.h"
#include "test_siphash.h"
#include "test_tcp_custom_syncookie.h"
+#include "bpf_misc.h"
#define MAX_PACKET_OFF 0xffff
diff --git a/tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.h b/tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.h
index 29a6a53cf229..f8b1b7e68d2e 100644
--- a/tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.h
+++ b/tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.h
@@ -7,8 +7,6 @@
#define __packed __attribute__((__packed__))
#define __force
-#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
-
#define swap(a, b) \
do { \
typeof(a) __tmp = (a); \
diff --git a/tools/testing/selftests/bpf/progs/timer_lockup.c b/tools/testing/selftests/bpf/progs/timer_lockup.c
new file mode 100644
index 000000000000..3e520133281e
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/timer_lockup.c
@@ -0,0 +1,87 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+#include <time.h>
+#include <errno.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+struct elem {
+ struct bpf_timer t;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, struct elem);
+} timer1_map SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, struct elem);
+} timer2_map SEC(".maps");
+
+int timer1_err;
+int timer2_err;
+
+static int timer_cb1(void *map, int *k, struct elem *v)
+{
+ struct bpf_timer *timer;
+ int key = 0;
+
+ timer = bpf_map_lookup_elem(&timer2_map, &key);
+ if (timer)
+ timer2_err = bpf_timer_cancel(timer);
+
+ return 0;
+}
+
+static int timer_cb2(void *map, int *k, struct elem *v)
+{
+ struct bpf_timer *timer;
+ int key = 0;
+
+ timer = bpf_map_lookup_elem(&timer1_map, &key);
+ if (timer)
+ timer1_err = bpf_timer_cancel(timer);
+
+ return 0;
+}
+
+SEC("tc")
+int timer1_prog(void *ctx)
+{
+ struct bpf_timer *timer;
+ int key = 0;
+
+ timer = bpf_map_lookup_elem(&timer1_map, &key);
+ if (timer) {
+ bpf_timer_init(timer, &timer1_map, CLOCK_BOOTTIME);
+ bpf_timer_set_callback(timer, timer_cb1);
+ bpf_timer_start(timer, 1, BPF_F_TIMER_CPU_PIN);
+ }
+
+ return 0;
+}
+
+SEC("tc")
+int timer2_prog(void *ctx)
+{
+ struct bpf_timer *timer;
+ int key = 0;
+
+ timer = bpf_map_lookup_elem(&timer2_map, &key);
+ if (timer) {
+ bpf_timer_init(timer, &timer2_map, CLOCK_BOOTTIME);
+ bpf_timer_set_callback(timer, timer_cb2);
+ bpf_timer_start(timer, 1, BPF_F_TIMER_CPU_PIN);
+ }
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/tracing_struct.c b/tools/testing/selftests/bpf/progs/tracing_struct.c
index 515daef3c84b..c435a3a8328a 100644
--- a/tools/testing/selftests/bpf/progs/tracing_struct.c
+++ b/tools/testing/selftests/bpf/progs/tracing_struct.c
@@ -18,11 +18,6 @@ struct bpf_testmod_struct_arg_3 {
int b[];
};
-struct bpf_testmod_struct_arg_4 {
- u64 a;
- int b;
-};
-
long t1_a_a, t1_a_b, t1_b, t1_c, t1_ret, t1_nregs;
__u64 t1_reg0, t1_reg1, t1_reg2, t1_reg3;
long t2_a, t2_b_a, t2_b_b, t2_c, t2_ret;
@@ -30,9 +25,6 @@ long t3_a, t3_b, t3_c_a, t3_c_b, t3_ret;
long t4_a_a, t4_b, t4_c, t4_d, t4_e_a, t4_e_b, t4_ret;
long t5_ret;
int t6;
-long t7_a, t7_b, t7_c, t7_d, t7_e, t7_f_a, t7_f_b, t7_ret;
-long t8_a, t8_b, t8_c, t8_d, t8_e, t8_f_a, t8_f_b, t8_g, t8_ret;
-
SEC("fentry/bpf_testmod_test_struct_arg_1")
int BPF_PROG2(test_struct_arg_1, struct bpf_testmod_struct_arg_2, a, int, b, int, c)
@@ -138,50 +130,4 @@ int BPF_PROG2(test_struct_arg_11, struct bpf_testmod_struct_arg_3 *, a)
return 0;
}
-SEC("fentry/bpf_testmod_test_struct_arg_7")
-int BPF_PROG2(test_struct_arg_12, __u64, a, void *, b, short, c, int, d,
- void *, e, struct bpf_testmod_struct_arg_4, f)
-{
- t7_a = a;
- t7_b = (long)b;
- t7_c = c;
- t7_d = d;
- t7_e = (long)e;
- t7_f_a = f.a;
- t7_f_b = f.b;
- return 0;
-}
-
-SEC("fexit/bpf_testmod_test_struct_arg_7")
-int BPF_PROG2(test_struct_arg_13, __u64, a, void *, b, short, c, int, d,
- void *, e, struct bpf_testmod_struct_arg_4, f, int, ret)
-{
- t7_ret = ret;
- return 0;
-}
-
-SEC("fentry/bpf_testmod_test_struct_arg_8")
-int BPF_PROG2(test_struct_arg_14, __u64, a, void *, b, short, c, int, d,
- void *, e, struct bpf_testmod_struct_arg_4, f, int, g)
-{
- t8_a = a;
- t8_b = (long)b;
- t8_c = c;
- t8_d = d;
- t8_e = (long)e;
- t8_f_a = f.a;
- t8_f_b = f.b;
- t8_g = g;
- return 0;
-}
-
-SEC("fexit/bpf_testmod_test_struct_arg_8")
-int BPF_PROG2(test_struct_arg_15, __u64, a, void *, b, short, c, int, d,
- void *, e, struct bpf_testmod_struct_arg_4, f, int, g,
- int, ret)
-{
- t8_ret = ret;
- return 0;
-}
-
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/tracing_struct_many_args.c b/tools/testing/selftests/bpf/progs/tracing_struct_many_args.c
new file mode 100644
index 000000000000..4742012ace06
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/tracing_struct_many_args.c
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+
+struct bpf_testmod_struct_arg_4 {
+ u64 a;
+ int b;
+};
+
+struct bpf_testmod_struct_arg_5 {
+ char a;
+ short b;
+ int c;
+ long d;
+};
+
+long t7_a, t7_b, t7_c, t7_d, t7_e, t7_f_a, t7_f_b, t7_ret;
+long t8_a, t8_b, t8_c, t8_d, t8_e, t8_f_a, t8_f_b, t8_g, t8_ret;
+long t9_a, t9_b, t9_c, t9_d, t9_e, t9_f, t9_g, t9_h_a, t9_h_b, t9_h_c, t9_h_d, t9_i, t9_ret;
+
+SEC("fentry/bpf_testmod_test_struct_arg_7")
+int BPF_PROG2(test_struct_many_args_1, __u64, a, void *, b, short, c, int, d,
+ void *, e, struct bpf_testmod_struct_arg_4, f)
+{
+ t7_a = a;
+ t7_b = (long)b;
+ t7_c = c;
+ t7_d = d;
+ t7_e = (long)e;
+ t7_f_a = f.a;
+ t7_f_b = f.b;
+ return 0;
+}
+
+SEC("fexit/bpf_testmod_test_struct_arg_7")
+int BPF_PROG2(test_struct_many_args_2, __u64, a, void *, b, short, c, int, d,
+ void *, e, struct bpf_testmod_struct_arg_4, f, int, ret)
+{
+ t7_ret = ret;
+ return 0;
+}
+
+SEC("fentry/bpf_testmod_test_struct_arg_8")
+int BPF_PROG2(test_struct_many_args_3, __u64, a, void *, b, short, c, int, d,
+ void *, e, struct bpf_testmod_struct_arg_4, f, int, g)
+{
+ t8_a = a;
+ t8_b = (long)b;
+ t8_c = c;
+ t8_d = d;
+ t8_e = (long)e;
+ t8_f_a = f.a;
+ t8_f_b = f.b;
+ t8_g = g;
+ return 0;
+}
+
+SEC("fexit/bpf_testmod_test_struct_arg_8")
+int BPF_PROG2(test_struct_many_args_4, __u64, a, void *, b, short, c, int, d,
+ void *, e, struct bpf_testmod_struct_arg_4, f, int, g,
+ int, ret)
+{
+ t8_ret = ret;
+ return 0;
+}
+
+SEC("fentry/bpf_testmod_test_struct_arg_9")
+int BPF_PROG2(test_struct_many_args_5, __u64, a, void *, b, short, c, int, d, void *, e,
+ char, f, short, g, struct bpf_testmod_struct_arg_5, h, long, i)
+{
+ t9_a = a;
+ t9_b = (long)b;
+ t9_c = c;
+ t9_d = d;
+ t9_e = (long)e;
+ t9_f = f;
+ t9_g = g;
+ t9_h_a = h.a;
+ t9_h_b = h.b;
+ t9_h_c = h.c;
+ t9_h_d = h.d;
+ t9_i = i;
+ return 0;
+}
+
+SEC("fexit/bpf_testmod_test_struct_arg_9")
+int BPF_PROG2(test_struct_many_args_6, __u64, a, void *, b, short, c, int, d, void *, e,
+ char, f, short, g, struct bpf_testmod_struct_arg_5, h, long, i, int, ret)
+{
+ t9_ret = ret;
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/uprobe_multi.c b/tools/testing/selftests/bpf/progs/uprobe_multi.c
index 419d9aa28fce..44190efcdba2 100644
--- a/tools/testing/selftests/bpf/progs/uprobe_multi.c
+++ b/tools/testing/selftests/bpf/progs/uprobe_multi.c
@@ -1,8 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
-#include <linux/bpf.h>
+#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
-#include <stdbool.h>
+#include <bpf/usdt.bpf.h>
char _license[] SEC("license") = "GPL";
@@ -22,6 +22,13 @@ __u64 uprobe_multi_sleep_result = 0;
int pid = 0;
int child_pid = 0;
+int child_tid = 0;
+int child_pid_usdt = 0;
+int child_tid_usdt = 0;
+
+int expect_pid = 0;
+bool bad_pid_seen = false;
+bool bad_pid_seen_usdt = false;
bool test_cookie = false;
void *user_ptr = 0;
@@ -36,11 +43,19 @@ static __always_inline bool verify_sleepable_user_copy(void)
static void uprobe_multi_check(void *ctx, bool is_return, bool is_sleep)
{
- child_pid = bpf_get_current_pid_tgid() >> 32;
+ __u64 cur_pid_tgid = bpf_get_current_pid_tgid();
+ __u32 cur_pid;
- if (pid && child_pid != pid)
+ cur_pid = cur_pid_tgid >> 32;
+ if (pid && cur_pid != pid)
return;
+ if (expect_pid && cur_pid != expect_pid)
+ bad_pid_seen = true;
+
+ child_pid = cur_pid_tgid >> 32;
+ child_tid = (__u32)cur_pid_tgid;
+
__u64 cookie = test_cookie ? bpf_get_attach_cookie(ctx) : 0;
__u64 addr = bpf_get_func_ip(ctx);
@@ -97,5 +112,32 @@ int uretprobe_sleep(struct pt_regs *ctx)
SEC("uprobe.multi//proc/self/exe:uprobe_multi_func_*")
int uprobe_extra(struct pt_regs *ctx)
{
+ /* we need this one just to mix PID-filtered and global uprobes */
+ return 0;
+}
+
+SEC("usdt")
+int usdt_pid(struct pt_regs *ctx)
+{
+ __u64 cur_pid_tgid = bpf_get_current_pid_tgid();
+ __u32 cur_pid;
+
+ cur_pid = cur_pid_tgid >> 32;
+ if (pid && cur_pid != pid)
+ return 0;
+
+ if (expect_pid && cur_pid != expect_pid)
+ bad_pid_seen_usdt = true;
+
+ child_pid_usdt = cur_pid_tgid >> 32;
+ child_tid_usdt = (__u32)cur_pid_tgid;
+
+ return 0;
+}
+
+SEC("usdt")
+int usdt_extra(struct pt_regs *ctx)
+{
+ /* we need this one just to mix PID-filtered and global USDT probes */
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/user_ringbuf_fail.c b/tools/testing/selftests/bpf/progs/user_ringbuf_fail.c
index 11ab25c42c36..54de0389f878 100644
--- a/tools/testing/selftests/bpf/progs/user_ringbuf_fail.c
+++ b/tools/testing/selftests/bpf/progs/user_ringbuf_fail.c
@@ -221,3 +221,25 @@ int user_ringbuf_callback_reinit_dynptr_ringbuf(void *ctx)
bpf_user_ringbuf_drain(&user_ringbuf, try_reinit_dynptr_ringbuf, NULL, 0);
return 0;
}
+
+__noinline long global_call_bpf_dynptr_data(struct bpf_dynptr *dynptr)
+{
+ bpf_dynptr_data(dynptr, 0xA, 0xA);
+ return 0;
+}
+
+static long callback_adjust_bpf_dynptr_reg_off(struct bpf_dynptr *dynptr,
+ void *ctx)
+{
+ global_call_bpf_dynptr_data(dynptr += 1024);
+ return 0;
+}
+
+SEC("?raw_tp")
+__failure __msg("dereference of modified dynptr_ptr ptr R1 off=16384 disallowed")
+int user_ringbuf_callback_const_ptr_to_dynptr_reg_off(void *ctx)
+{
+ bpf_user_ringbuf_drain(&user_ringbuf,
+ callback_adjust_bpf_dynptr_reg_off, NULL, 0);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/verifier_arena.c b/tools/testing/selftests/bpf/progs/verifier_arena.c
index 93144ae6df74..67509c5d3982 100644
--- a/tools/testing/selftests/bpf/progs/verifier_arena.c
+++ b/tools/testing/selftests/bpf/progs/verifier_arena.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#define BPF_NO_KFUNC_PROTOTYPES
#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
diff --git a/tools/testing/selftests/bpf/progs/verifier_arena_large.c b/tools/testing/selftests/bpf/progs/verifier_arena_large.c
index ef66ea460264..6065f862d964 100644
--- a/tools/testing/selftests/bpf/progs/verifier_arena_large.c
+++ b/tools/testing/selftests/bpf/progs/verifier_arena_large.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#define BPF_NO_KFUNC_PROTOTYPES
#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
diff --git a/tools/testing/selftests/bpf/progs/verifier_bits_iter.c b/tools/testing/selftests/bpf/progs/verifier_bits_iter.c
new file mode 100644
index 000000000000..716113c2bce2
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_bits_iter.c
@@ -0,0 +1,153 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2024 Yafang Shao <laoar.shao@gmail.com> */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+#include "bpf_misc.h"
+#include "task_kfunc_common.h"
+
+char _license[] SEC("license") = "GPL";
+
+int bpf_iter_bits_new(struct bpf_iter_bits *it, const u64 *unsafe_ptr__ign,
+ u32 nr_bits) __ksym __weak;
+int *bpf_iter_bits_next(struct bpf_iter_bits *it) __ksym __weak;
+void bpf_iter_bits_destroy(struct bpf_iter_bits *it) __ksym __weak;
+
+SEC("iter.s/cgroup")
+__description("bits iter without destroy")
+__failure __msg("Unreleased reference")
+int BPF_PROG(no_destroy, struct bpf_iter_meta *meta, struct cgroup *cgrp)
+{
+ struct bpf_iter_bits it;
+ u64 data = 1;
+
+ bpf_iter_bits_new(&it, &data, 1);
+ bpf_iter_bits_next(&it);
+ return 0;
+}
+
+SEC("iter/cgroup")
+__description("uninitialized iter in ->next()")
+__failure __msg("expected an initialized iter_bits as arg #1")
+int BPF_PROG(next_uninit, struct bpf_iter_meta *meta, struct cgroup *cgrp)
+{
+ struct bpf_iter_bits *it = NULL;
+
+ bpf_iter_bits_next(it);
+ return 0;
+}
+
+SEC("iter/cgroup")
+__description("uninitialized iter in ->destroy()")
+__failure __msg("expected an initialized iter_bits as arg #1")
+int BPF_PROG(destroy_uninit, struct bpf_iter_meta *meta, struct cgroup *cgrp)
+{
+ struct bpf_iter_bits it = {};
+
+ bpf_iter_bits_destroy(&it);
+ return 0;
+}
+
+SEC("syscall")
+__description("null pointer")
+__success __retval(0)
+int null_pointer(void)
+{
+ int nr = 0;
+ int *bit;
+
+ bpf_for_each(bits, bit, NULL, 1)
+ nr++;
+ return nr;
+}
+
+SEC("syscall")
+__description("bits copy")
+__success __retval(10)
+int bits_copy(void)
+{
+ u64 data = 0xf7310UL; /* 4 + 3 + 2 + 1 + 0*/
+ int nr = 0;
+ int *bit;
+
+ bpf_for_each(bits, bit, &data, 1)
+ nr++;
+ return nr;
+}
+
+SEC("syscall")
+__description("bits memalloc")
+__success __retval(64)
+int bits_memalloc(void)
+{
+ u64 data[2];
+ int nr = 0;
+ int *bit;
+
+ __builtin_memset(&data, 0xf0, sizeof(data)); /* 4 * 16 */
+ bpf_for_each(bits, bit, &data[0], sizeof(data) / sizeof(u64))
+ nr++;
+ return nr;
+}
+
+SEC("syscall")
+__description("bit index")
+__success __retval(8)
+int bit_index(void)
+{
+ u64 data = 0x100;
+ int bit_idx = 0;
+ int *bit;
+
+ bpf_for_each(bits, bit, &data, 1) {
+ if (*bit == 0)
+ continue;
+ bit_idx = *bit;
+ }
+ return bit_idx;
+}
+
+SEC("syscall")
+__description("bits nomem")
+__success __retval(0)
+int bits_nomem(void)
+{
+ u64 data[4];
+ int nr = 0;
+ int *bit;
+
+ __builtin_memset(&data, 0xff, sizeof(data));
+ bpf_for_each(bits, bit, &data[0], 513) /* Be greater than 512 */
+ nr++;
+ return nr;
+}
+
+SEC("syscall")
+__description("fewer words")
+__success __retval(1)
+int fewer_words(void)
+{
+ u64 data[2] = {0x1, 0xff};
+ int nr = 0;
+ int *bit;
+
+ bpf_for_each(bits, bit, &data[0], 1)
+ nr++;
+ return nr;
+}
+
+SEC("syscall")
+__description("zero words")
+__success __retval(0)
+int zero_words(void)
+{
+ u64 data[2] = {0x1, 0xff};
+ int nr = 0;
+ int *bit;
+
+ bpf_for_each(bits, bit, &data[0], 0)
+ nr++;
+ return nr;
+}
diff --git a/tools/testing/selftests/bpf/progs/verifier_iterating_callbacks.c b/tools/testing/selftests/bpf/progs/verifier_iterating_callbacks.c
index bd676d7e615f..e54bb5385bc1 100644
--- a/tools/testing/selftests/bpf/progs/verifier_iterating_callbacks.c
+++ b/tools/testing/selftests/bpf/progs/verifier_iterating_callbacks.c
@@ -274,6 +274,58 @@ static __naked void iter_limit_bug_cb(void)
);
}
+int tmp_var;
+SEC("socket")
+__failure __msg("infinite loop detected at insn 2")
+__naked void jgt_imm64_and_may_goto(void)
+{
+ asm volatile (" \
+ r0 = %[tmp_var] ll; \
+l0_%=: .byte 0xe5; /* may_goto */ \
+ .byte 0; /* regs */ \
+ .short -3; /* off -3 */ \
+ .long 0; /* imm */ \
+ if r0 > 10 goto l0_%=; \
+ r0 = 0; \
+ exit; \
+" :: __imm_addr(tmp_var)
+ : __clobber_all);
+}
+
+SEC("socket")
+__failure __msg("infinite loop detected at insn 1")
+__naked void may_goto_self(void)
+{
+ asm volatile (" \
+ r0 = *(u32 *)(r10 - 4); \
+l0_%=: .byte 0xe5; /* may_goto */ \
+ .byte 0; /* regs */ \
+ .short -1; /* off -1 */ \
+ .long 0; /* imm */ \
+ if r0 > 10 goto l0_%=; \
+ r0 = 0; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__success __retval(0)
+__naked void may_goto_neg_off(void)
+{
+ asm volatile (" \
+ r0 = *(u32 *)(r10 - 4); \
+ goto l0_%=; \
+ goto l1_%=; \
+l0_%=: .byte 0xe5; /* may_goto */ \
+ .byte 0; /* regs */ \
+ .short -2; /* off -2 */ \
+ .long 0; /* imm */ \
+ if r0 > 10 goto l0_%=; \
+l1_%=: r0 = 0; \
+ exit; \
+" ::: __clobber_all);
+}
+
SEC("tc")
__failure
__flag(BPF_F_TEST_STATE_FREQ)
@@ -307,6 +359,100 @@ int iter_limit_bug(struct __sk_buff *skb)
return 0;
}
+SEC("socket")
+__success __retval(0)
+__naked void ja_and_may_goto(void)
+{
+ asm volatile (" \
+l0_%=: .byte 0xe5; /* may_goto */ \
+ .byte 0; /* regs */ \
+ .short 1; /* off 1 */ \
+ .long 0; /* imm */ \
+ goto l0_%=; \
+ r0 = 0; \
+ exit; \
+" ::: __clobber_common);
+}
+
+SEC("socket")
+__success __retval(0)
+__naked void ja_and_may_goto2(void)
+{
+ asm volatile (" \
+l0_%=: r0 = 0; \
+ .byte 0xe5; /* may_goto */ \
+ .byte 0; /* regs */ \
+ .short 1; /* off 1 */ \
+ .long 0; /* imm */ \
+ goto l0_%=; \
+ r0 = 0; \
+ exit; \
+" ::: __clobber_common);
+}
+
+SEC("socket")
+__success __retval(0)
+__naked void jlt_and_may_goto(void)
+{
+ asm volatile (" \
+l0_%=: call %[bpf_jiffies64]; \
+ .byte 0xe5; /* may_goto */ \
+ .byte 0; /* regs */ \
+ .short 1; /* off 1 */ \
+ .long 0; /* imm */ \
+ if r0 < 10 goto l0_%=; \
+ r0 = 0; \
+ exit; \
+" :: __imm(bpf_jiffies64)
+ : __clobber_all);
+}
+
+#if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \
+ (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || \
+ defined(__TARGET_ARCH_arm) || defined(__TARGET_ARCH_s390) || \
+ defined(__TARGET_ARCH_loongarch)) && \
+ __clang_major__ >= 18
+SEC("socket")
+__success __retval(0)
+__naked void gotol_and_may_goto(void)
+{
+ asm volatile (" \
+l0_%=: r0 = 0; \
+ .byte 0xe5; /* may_goto */ \
+ .byte 0; /* regs */ \
+ .short 1; /* off 1 */ \
+ .long 0; /* imm */ \
+ gotol l0_%=; \
+ r0 = 0; \
+ exit; \
+" ::: __clobber_common);
+}
+#endif
+
+SEC("socket")
+__success __retval(0)
+__naked void ja_and_may_goto_subprog(void)
+{
+ asm volatile (" \
+ call subprog_with_may_goto; \
+ exit; \
+" ::: __clobber_all);
+}
+
+static __naked __noinline __used
+void subprog_with_may_goto(void)
+{
+ asm volatile (" \
+l0_%=: .byte 0xe5; /* may_goto */ \
+ .byte 0; /* regs */ \
+ .short 1; /* off 1 */ \
+ .long 0; /* imm */ \
+ goto l0_%=; \
+ r0 = 0; \
+ exit; \
+" ::: __clobber_all);
+}
+
#define ARR_SZ 1000000
int zero;
char arr[ARR_SZ];
@@ -405,4 +551,240 @@ int cond_break5(const void *ctx)
return cnt1 > 1 && cnt2 > 1 ? 1 : 0;
}
+#define ARR2_SZ 1000
+SEC(".data.arr2")
+char arr2[ARR2_SZ];
+
+SEC("socket")
+__success __flag(BPF_F_TEST_STATE_FREQ)
+int loop_inside_iter(const void *ctx)
+{
+ struct bpf_iter_num it;
+ int *v, sum = 0;
+ __u64 i = 0;
+
+ bpf_iter_num_new(&it, 0, ARR2_SZ);
+ while ((v = bpf_iter_num_next(&it))) {
+ if (i < ARR2_SZ)
+ sum += arr2[i++];
+ }
+ bpf_iter_num_destroy(&it);
+ return sum;
+}
+
+SEC("socket")
+__success __flag(BPF_F_TEST_STATE_FREQ)
+int loop_inside_iter_signed(const void *ctx)
+{
+ struct bpf_iter_num it;
+ int *v, sum = 0;
+ long i = 0;
+
+ bpf_iter_num_new(&it, 0, ARR2_SZ);
+ while ((v = bpf_iter_num_next(&it))) {
+ if (i < ARR2_SZ && i >= 0)
+ sum += arr2[i++];
+ }
+ bpf_iter_num_destroy(&it);
+ return sum;
+}
+
+volatile const int limit = ARR2_SZ;
+
+SEC("socket")
+__success __flag(BPF_F_TEST_STATE_FREQ)
+int loop_inside_iter_volatile_limit(const void *ctx)
+{
+ struct bpf_iter_num it;
+ int *v, sum = 0;
+ __u64 i = 0;
+
+ bpf_iter_num_new(&it, 0, ARR2_SZ);
+ while ((v = bpf_iter_num_next(&it))) {
+ if (i < limit)
+ sum += arr2[i++];
+ }
+ bpf_iter_num_destroy(&it);
+ return sum;
+}
+
+#define ARR_LONG_SZ 1000
+
+SEC(".data.arr_long")
+long arr_long[ARR_LONG_SZ];
+
+SEC("socket")
+__success
+int test1(const void *ctx)
+{
+ long i;
+
+ for (i = 0; i < ARR_LONG_SZ && can_loop; i++)
+ arr_long[i] = i;
+ return 0;
+}
+
+SEC("socket")
+__success
+int test2(const void *ctx)
+{
+ __u64 i;
+
+ for (i = zero; i < ARR_LONG_SZ && can_loop; i++) {
+ barrier_var(i);
+ arr_long[i] = i;
+ }
+ return 0;
+}
+
+SEC(".data.arr_foo")
+struct {
+ int a;
+ int b;
+} arr_foo[ARR_LONG_SZ];
+
+SEC("socket")
+__success
+int test3(const void *ctx)
+{
+ __u64 i;
+
+ for (i = zero; i < ARR_LONG_SZ && can_loop; i++) {
+ barrier_var(i);
+ arr_foo[i].a = i;
+ arr_foo[i].b = i;
+ }
+ return 0;
+}
+
+SEC("socket")
+__success
+int test4(const void *ctx)
+{
+ long i;
+
+ for (i = zero + ARR_LONG_SZ - 1; i < ARR_LONG_SZ && i >= 0 && can_loop; i--) {
+ barrier_var(i);
+ arr_foo[i].a = i;
+ arr_foo[i].b = i;
+ }
+ return 0;
+}
+
+char buf[10] SEC(".data.buf");
+
+SEC("socket")
+__description("check add const")
+__success
+__naked void check_add_const(void)
+{
+ /* typical LLVM generated loop with may_goto */
+ asm volatile (" \
+ call %[bpf_ktime_get_ns]; \
+ if r0 > 9 goto l1_%=; \
+l0_%=: r1 = %[buf]; \
+ r2 = r0; \
+ r1 += r2; \
+ r3 = *(u8 *)(r1 +0); \
+ .byte 0xe5; /* may_goto */ \
+ .byte 0; /* regs */ \
+ .short 4; /* off of l1_%=: */ \
+ .long 0; /* imm */ \
+ r0 = r2; \
+ r0 += 1; \
+ if r2 < 9 goto l0_%=; \
+ exit; \
+l1_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_ktime_get_ns),
+ __imm_ptr(buf)
+ : __clobber_common);
+}
+
+SEC("socket")
+__failure
+__msg("*(u8 *)(r7 +0) = r0")
+__msg("invalid access to map value, value_size=10 off=10 size=1")
+__naked void check_add_const_3regs(void)
+{
+ asm volatile (
+ "r6 = %[buf];"
+ "r7 = %[buf];"
+ "call %[bpf_ktime_get_ns];"
+ "r1 = r0;" /* link r0.id == r1.id == r2.id */
+ "r2 = r0;"
+ "r1 += 1;" /* r1 == r0+1 */
+ "r2 += 2;" /* r2 == r0+2 */
+ "if r0 > 8 goto 1f;" /* r0 range [0, 8] */
+ "r6 += r1;" /* r1 range [1, 9] */
+ "r7 += r2;" /* r2 range [2, 10] */
+ "*(u8 *)(r6 +0) = r0;" /* safe, within bounds */
+ "*(u8 *)(r7 +0) = r0;" /* unsafe, out of bounds */
+ "1: exit;"
+ :
+ : __imm(bpf_ktime_get_ns),
+ __imm_ptr(buf)
+ : __clobber_common);
+}
+
+SEC("socket")
+__failure
+__msg("*(u8 *)(r8 -1) = r0")
+__msg("invalid access to map value, value_size=10 off=10 size=1")
+__naked void check_add_const_3regs_2if(void)
+{
+ asm volatile (
+ "r6 = %[buf];"
+ "r7 = %[buf];"
+ "r8 = %[buf];"
+ "call %[bpf_ktime_get_ns];"
+ "if r0 < 2 goto 1f;"
+ "r1 = r0;" /* link r0.id == r1.id == r2.id */
+ "r2 = r0;"
+ "r1 += 1;" /* r1 == r0+1 */
+ "r2 += 2;" /* r2 == r0+2 */
+ "if r2 > 11 goto 1f;" /* r2 range [0, 11] -> r0 range [-2, 9]; r1 range [-1, 10] */
+ "if r0 s< 0 goto 1f;" /* r0 range [0, 9] -> r1 range [1, 10]; r2 range [2, 11]; */
+ "r6 += r0;" /* r0 range [0, 9] */
+ "r7 += r1;" /* r1 range [1, 10] */
+ "r8 += r2;" /* r2 range [2, 11] */
+ "*(u8 *)(r6 +0) = r0;" /* safe, within bounds */
+ "*(u8 *)(r7 -1) = r0;" /* safe */
+ "*(u8 *)(r8 -1) = r0;" /* unsafe */
+ "1: exit;"
+ :
+ : __imm(bpf_ktime_get_ns),
+ __imm_ptr(buf)
+ : __clobber_common);
+}
+
+SEC("socket")
+__failure
+__flag(BPF_F_TEST_STATE_FREQ)
+__naked void check_add_const_regsafe_off(void)
+{
+ asm volatile (
+ "r8 = %[buf];"
+ "call %[bpf_ktime_get_ns];"
+ "r6 = r0;"
+ "call %[bpf_ktime_get_ns];"
+ "r7 = r0;"
+ "call %[bpf_ktime_get_ns];"
+ "r1 = r0;" /* same ids for r1 and r0 */
+ "if r6 > r7 goto 1f;" /* this jump can't be predicted */
+ "r1 += 1;" /* r1.off == +1 */
+ "goto 2f;"
+ "1: r1 += 100;" /* r1.off == +100 */
+ "goto +0;" /* verify r1.off in regsafe() after this insn */
+ "2: if r0 > 8 goto 3f;" /* r0 range [0,8], r1 range either [1,9] or [100,108]*/
+ "r8 += r1;"
+ "*(u8 *)(r8 +0) = r0;" /* potentially unsafe, buf size is 10 */
+ "3: exit;"
+ :
+ : __imm(bpf_ktime_get_ns),
+ __imm_ptr(buf)
+ : __clobber_common);
+}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_movsx.c b/tools/testing/selftests/bpf/progs/verifier_movsx.c
index cbb9d6714f53..028ec855587b 100644
--- a/tools/testing/selftests/bpf/progs/verifier_movsx.c
+++ b/tools/testing/selftests/bpf/progs/verifier_movsx.c
@@ -224,6 +224,69 @@ l0_%=: \
: __clobber_all);
}
+SEC("socket")
+__description("MOV32SX, S8, var_off u32_max")
+__failure __msg("infinite loop detected")
+__failure_unpriv __msg_unpriv("back-edge from insn 2 to 0")
+__naked void mov64sx_s32_varoff_1(void)
+{
+ asm volatile (" \
+l0_%=: \
+ r3 = *(u8 *)(r10 -387); \
+ w7 = (s8)w3; \
+ if w7 >= 0x2533823b goto l0_%=; \
+ w0 = 0; \
+ exit; \
+" :
+ :
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("MOV32SX, S8, var_off not u32_max, positive after s8 extension")
+__success __retval(0)
+__failure_unpriv __msg_unpriv("frame pointer is read only")
+__naked void mov64sx_s32_varoff_2(void)
+{
+ asm volatile (" \
+ call %[bpf_get_prandom_u32]; \
+ r3 = r0; \
+ r3 &= 0xf; \
+ w7 = (s8)w3; \
+ if w7 s>= 16 goto l0_%=; \
+ w0 = 0; \
+ exit; \
+l0_%=: \
+ r10 = 1; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("MOV32SX, S8, var_off not u32_max, negative after s8 extension")
+__success __retval(0)
+__failure_unpriv __msg_unpriv("frame pointer is read only")
+__naked void mov64sx_s32_varoff_3(void)
+{
+ asm volatile (" \
+ call %[bpf_get_prandom_u32]; \
+ r3 = r0; \
+ r3 &= 0xf; \
+ r3 |= 0x80; \
+ w7 = (s8)w3; \
+ if w7 s>= -5 goto l0_%=; \
+ w0 = 0; \
+ exit; \
+l0_%=: \
+ r10 = 1; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
#else
SEC("socket")
diff --git a/tools/testing/selftests/bpf/progs/verifier_netfilter_ctx.c b/tools/testing/selftests/bpf/progs/verifier_netfilter_ctx.c
index 65bba330e7e5..ab9f9f2620ed 100644
--- a/tools/testing/selftests/bpf/progs/verifier_netfilter_ctx.c
+++ b/tools/testing/selftests/bpf/progs/verifier_netfilter_ctx.c
@@ -79,7 +79,7 @@ int with_invalid_ctx_access_test5(struct bpf_nf_ctx *ctx)
return NF_ACCEPT;
}
-extern int bpf_dynptr_from_skb(struct sk_buff *skb, __u64 flags,
+extern int bpf_dynptr_from_skb(struct __sk_buff *skb, __u64 flags,
struct bpf_dynptr *ptr__uninit) __ksym;
extern void *bpf_dynptr_slice(const struct bpf_dynptr *ptr, uint32_t offset,
void *buffer, uint32_t buffer__sz) __ksym;
@@ -90,8 +90,8 @@ __success __failure_unpriv
__retval(0)
int with_valid_ctx_access_test6(struct bpf_nf_ctx *ctx)
{
+ struct __sk_buff *skb = (struct __sk_buff *)ctx->skb;
const struct nf_hook_state *state = ctx->state;
- struct sk_buff *skb = ctx->skb;
const struct iphdr *iph;
const struct tcphdr *th;
u8 buffer_iph[20] = {};
@@ -99,7 +99,7 @@ int with_valid_ctx_access_test6(struct bpf_nf_ctx *ctx)
struct bpf_dynptr ptr;
uint8_t ihl;
- if (skb->len <= 20 || bpf_dynptr_from_skb(skb, 0, &ptr))
+ if (ctx->skb->len <= 20 || bpf_dynptr_from_skb(skb, 0, &ptr))
return NF_ACCEPT;
iph = bpf_dynptr_slice(&ptr, 0, buffer_iph, sizeof(buffer_iph));
diff --git a/tools/testing/selftests/bpf/progs/verifier_or_jmp32_k.c b/tools/testing/selftests/bpf/progs/verifier_or_jmp32_k.c
new file mode 100644
index 000000000000..f37713a265ac
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_or_jmp32_k.c
@@ -0,0 +1,41 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+SEC("socket")
+__description("or_jmp32_k: bit ops + branch on unknown value")
+__failure
+__msg("R0 invalid mem access 'scalar'")
+__naked void or_jmp32_k(void)
+{
+ asm volatile (" \
+ r0 = 0xffffffff; \
+ r0 /= 1; \
+ r1 = 0; \
+ w1 = -1; \
+ w1 >>= 1; \
+ w0 &= w1; \
+ w0 |= 2; \
+ if w0 != 0x7ffffffd goto l1; \
+ r0 = 1; \
+ exit; \
+l3: \
+ r0 = 5; \
+ *(u64*)(r0 - 8) = r0; \
+ exit; \
+l2: \
+ w0 -= 0xe; \
+ if w0 == 1 goto l3; \
+ r0 = 4; \
+ exit; \
+l1: \
+ w0 -= 0x7ffffff0; \
+ if w0 s>= 0xe goto l2; \
+ r0 = 3; \
+ exit; \
+" ::: __clobber_all);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_sockmap_mutate.c b/tools/testing/selftests/bpf/progs/verifier_sockmap_mutate.c
new file mode 100644
index 000000000000..fe4b123187b8
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_sockmap_mutate.c
@@ -0,0 +1,187 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+#include "bpf_misc.h"
+
+#define __always_unused __attribute__((unused))
+
+char _license[] SEC("license") = "GPL";
+
+struct sock {
+} __attribute__((preserve_access_index));
+
+struct bpf_iter__sockmap {
+ union {
+ struct sock *sk;
+ };
+} __attribute__((preserve_access_index));
+
+struct {
+ __uint(type, BPF_MAP_TYPE_SOCKHASH);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, int);
+} sockhash SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_SOCKMAP);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, int);
+} sockmap SEC(".maps");
+
+enum { CG_OK = 1 };
+
+int zero = 0;
+
+static __always_inline void test_sockmap_delete(void)
+{
+ bpf_map_delete_elem(&sockmap, &zero);
+ bpf_map_delete_elem(&sockhash, &zero);
+}
+
+static __always_inline void test_sockmap_update(void *sk)
+{
+ if (sk) {
+ bpf_map_update_elem(&sockmap, &zero, sk, BPF_ANY);
+ bpf_map_update_elem(&sockhash, &zero, sk, BPF_ANY);
+ }
+}
+
+static __always_inline void test_sockmap_lookup_and_update(void)
+{
+ struct bpf_sock *sk = bpf_map_lookup_elem(&sockmap, &zero);
+
+ if (sk) {
+ test_sockmap_update(sk);
+ bpf_sk_release(sk);
+ }
+}
+
+static __always_inline void test_sockmap_mutate(void *sk)
+{
+ test_sockmap_delete();
+ test_sockmap_update(sk);
+}
+
+static __always_inline void test_sockmap_lookup_and_mutate(void)
+{
+ test_sockmap_delete();
+ test_sockmap_lookup_and_update();
+}
+
+SEC("action")
+__success
+int test_sched_act(struct __sk_buff *skb)
+{
+ test_sockmap_mutate(skb->sk);
+ return 0;
+}
+
+SEC("classifier")
+__success
+int test_sched_cls(struct __sk_buff *skb)
+{
+ test_sockmap_mutate(skb->sk);
+ return 0;
+}
+
+SEC("flow_dissector")
+__success
+int test_flow_dissector_delete(struct __sk_buff *skb __always_unused)
+{
+ test_sockmap_delete();
+ return 0;
+}
+
+SEC("flow_dissector")
+__failure __msg("program of this type cannot use helper bpf_sk_release")
+int test_flow_dissector_update(struct __sk_buff *skb __always_unused)
+{
+ test_sockmap_lookup_and_update(); /* no access to skb->sk */
+ return 0;
+}
+
+SEC("iter/sockmap")
+__success
+int test_trace_iter(struct bpf_iter__sockmap *ctx)
+{
+ test_sockmap_mutate(ctx->sk);
+ return 0;
+}
+
+SEC("raw_tp/kfree")
+__failure __msg("cannot update sockmap in this context")
+int test_raw_tp_delete(const void *ctx __always_unused)
+{
+ test_sockmap_delete();
+ return 0;
+}
+
+SEC("raw_tp/kfree")
+__failure __msg("cannot update sockmap in this context")
+int test_raw_tp_update(const void *ctx __always_unused)
+{
+ test_sockmap_lookup_and_update();
+ return 0;
+}
+
+SEC("sk_lookup")
+__success
+int test_sk_lookup(struct bpf_sk_lookup *ctx)
+{
+ test_sockmap_mutate(ctx->sk);
+ return 0;
+}
+
+SEC("sk_reuseport")
+__success
+int test_sk_reuseport(struct sk_reuseport_md *ctx)
+{
+ test_sockmap_mutate(ctx->sk);
+ return 0;
+}
+
+SEC("socket")
+__success
+int test_socket_filter(struct __sk_buff *skb)
+{
+ test_sockmap_mutate(skb->sk);
+ return 0;
+}
+
+SEC("sockops")
+__success
+int test_sockops_delete(struct bpf_sock_ops *ctx __always_unused)
+{
+ test_sockmap_delete();
+ return CG_OK;
+}
+
+SEC("sockops")
+__failure __msg("cannot update sockmap in this context")
+int test_sockops_update(struct bpf_sock_ops *ctx)
+{
+ test_sockmap_update(ctx->sk);
+ return CG_OK;
+}
+
+SEC("sockops")
+__success
+int test_sockops_update_dedicated(struct bpf_sock_ops *ctx)
+{
+ bpf_sock_map_update(ctx, &sockmap, &zero, BPF_ANY);
+ bpf_sock_hash_update(ctx, &sockhash, &zero, BPF_ANY);
+ return CG_OK;
+}
+
+SEC("xdp")
+__success
+int test_xdp(struct xdp_md *ctx __always_unused)
+{
+ test_sockmap_lookup_and_mutate();
+ return XDP_PASS;
+}
diff --git a/tools/testing/selftests/bpf/progs/verifier_subprog_precision.c b/tools/testing/selftests/bpf/progs/verifier_subprog_precision.c
index 4a58e0398e72..6a6fad625f7e 100644
--- a/tools/testing/selftests/bpf/progs/verifier_subprog_precision.c
+++ b/tools/testing/selftests/bpf/progs/verifier_subprog_precision.c
@@ -8,8 +8,6 @@
#include "bpf_misc.h"
#include <../../../tools/include/linux/filter.h>
-#define ARRAY_SIZE(x) (sizeof(x) / sizeof(x[0]))
-
int vals[] SEC(".data.vals") = {1, 2, 3, 4};
__naked __noinline __used
diff --git a/tools/testing/selftests/bpf/progs/wq.c b/tools/testing/selftests/bpf/progs/wq.c
index 49e712acbf60..f8d3ae0c29ae 100644
--- a/tools/testing/selftests/bpf/progs/wq.c
+++ b/tools/testing/selftests/bpf/progs/wq.c
@@ -32,6 +32,7 @@ struct {
} hmap_malloc SEC(".maps");
struct elem {
+ int ok_offset;
struct bpf_wq w;
};
@@ -53,7 +54,7 @@ __u32 ok;
__u32 ok_sleepable;
static int test_elem_callback(void *map, int *key,
- int (callback_fn)(void *map, int *key, struct bpf_wq *wq))
+ int (callback_fn)(void *map, int *key, void *value))
{
struct elem init = {}, *val;
struct bpf_wq *wq;
@@ -70,6 +71,8 @@ static int test_elem_callback(void *map, int *key,
if (!val)
return -2;
+ val->ok_offset = *key;
+
wq = &val->w;
if (bpf_wq_init(wq, map, 0) != 0)
return -3;
@@ -84,7 +87,7 @@ static int test_elem_callback(void *map, int *key,
}
static int test_hmap_elem_callback(void *map, int *key,
- int (callback_fn)(void *map, int *key, struct bpf_wq *wq))
+ int (callback_fn)(void *map, int *key, void *value))
{
struct hmap_elem init = {}, *val;
struct bpf_wq *wq;
@@ -114,7 +117,7 @@ static int test_hmap_elem_callback(void *map, int *key,
}
/* callback for non sleepable workqueue */
-static int wq_callback(void *map, int *key, struct bpf_wq *work)
+static int wq_callback(void *map, int *key, void *value)
{
bpf_kfunc_common_test();
ok |= (1 << *key);
@@ -122,10 +125,16 @@ static int wq_callback(void *map, int *key, struct bpf_wq *work)
}
/* callback for sleepable workqueue */
-static int wq_cb_sleepable(void *map, int *key, struct bpf_wq *work)
+static int wq_cb_sleepable(void *map, int *key, void *value)
{
+ struct elem *data = (struct elem *)value;
+ int offset = data->ok_offset;
+
+ if (*key != offset)
+ return 0;
+
bpf_kfunc_call_test_sleepable();
- ok_sleepable |= (1 << *key);
+ ok_sleepable |= (1 << offset);
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/wq_failures.c b/tools/testing/selftests/bpf/progs/wq_failures.c
index 4cbdb425f223..25b51a72fe0f 100644
--- a/tools/testing/selftests/bpf/progs/wq_failures.c
+++ b/tools/testing/selftests/bpf/progs/wq_failures.c
@@ -28,14 +28,14 @@ struct {
} lru SEC(".maps");
/* callback for non sleepable workqueue */
-static int wq_callback(void *map, int *key, struct bpf_wq *work)
+static int wq_callback(void *map, int *key, void *value)
{
bpf_kfunc_common_test();
return 0;
}
/* callback for sleepable workqueue */
-static int wq_cb_sleepable(void *map, int *key, struct bpf_wq *work)
+static int wq_cb_sleepable(void *map, int *key, void *value)
{
bpf_kfunc_call_test_sleepable();
return 0;
diff --git a/tools/testing/selftests/bpf/progs/xdp_flowtable.c b/tools/testing/selftests/bpf/progs/xdp_flowtable.c
new file mode 100644
index 000000000000..7fdc7b23ee74
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/xdp_flowtable.c
@@ -0,0 +1,148 @@
+// SPDX-License-Identifier: GPL-2.0
+#define BPF_NO_KFUNC_PROTOTYPES
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+
+#define ETH_P_IP 0x0800
+#define ETH_P_IPV6 0x86dd
+#define IP_MF 0x2000 /* "More Fragments" */
+#define IP_OFFSET 0x1fff /* "Fragment Offset" */
+#define AF_INET 2
+#define AF_INET6 10
+
+struct bpf_flowtable_opts___local {
+ s32 error;
+};
+
+struct flow_offload_tuple_rhash *
+bpf_xdp_flow_lookup(struct xdp_md *, struct bpf_fib_lookup *,
+ struct bpf_flowtable_opts___local *, u32) __ksym;
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, __u32);
+ __type(value, __u32);
+ __uint(max_entries, 1);
+} stats SEC(".maps");
+
+static bool xdp_flowtable_offload_check_iphdr(struct iphdr *iph)
+{
+ /* ip fragmented traffic */
+ if (iph->frag_off & bpf_htons(IP_MF | IP_OFFSET))
+ return false;
+
+ /* ip options */
+ if (iph->ihl * 4 != sizeof(*iph))
+ return false;
+
+ if (iph->ttl <= 1)
+ return false;
+
+ return true;
+}
+
+static bool xdp_flowtable_offload_check_tcp_state(void *ports, void *data_end,
+ u8 proto)
+{
+ if (proto == IPPROTO_TCP) {
+ struct tcphdr *tcph = ports;
+
+ if (tcph + 1 > data_end)
+ return false;
+
+ if (tcph->fin || tcph->rst)
+ return false;
+ }
+
+ return true;
+}
+
+struct flow_ports___local {
+ __be16 source, dest;
+} __attribute__((preserve_access_index));
+
+SEC("xdp.frags")
+int xdp_flowtable_do_lookup(struct xdp_md *ctx)
+{
+ void *data_end = (void *)(long)ctx->data_end;
+ struct bpf_flowtable_opts___local opts = {};
+ struct flow_offload_tuple_rhash *tuplehash;
+ struct bpf_fib_lookup tuple = {
+ .ifindex = ctx->ingress_ifindex,
+ };
+ void *data = (void *)(long)ctx->data;
+ struct ethhdr *eth = data;
+ struct flow_ports___local *ports;
+ __u32 *val, key = 0;
+
+ if (eth + 1 > data_end)
+ return XDP_DROP;
+
+ switch (eth->h_proto) {
+ case bpf_htons(ETH_P_IP): {
+ struct iphdr *iph = data + sizeof(*eth);
+
+ ports = (struct flow_ports___local *)(iph + 1);
+ if (ports + 1 > data_end)
+ return XDP_PASS;
+
+ /* sanity check on ip header */
+ if (!xdp_flowtable_offload_check_iphdr(iph))
+ return XDP_PASS;
+
+ if (!xdp_flowtable_offload_check_tcp_state(ports, data_end,
+ iph->protocol))
+ return XDP_PASS;
+
+ tuple.family = AF_INET;
+ tuple.tos = iph->tos;
+ tuple.l4_protocol = iph->protocol;
+ tuple.tot_len = bpf_ntohs(iph->tot_len);
+ tuple.ipv4_src = iph->saddr;
+ tuple.ipv4_dst = iph->daddr;
+ tuple.sport = ports->source;
+ tuple.dport = ports->dest;
+ break;
+ }
+ case bpf_htons(ETH_P_IPV6): {
+ struct in6_addr *src = (struct in6_addr *)tuple.ipv6_src;
+ struct in6_addr *dst = (struct in6_addr *)tuple.ipv6_dst;
+ struct ipv6hdr *ip6h = data + sizeof(*eth);
+
+ ports = (struct flow_ports___local *)(ip6h + 1);
+ if (ports + 1 > data_end)
+ return XDP_PASS;
+
+ if (ip6h->hop_limit <= 1)
+ return XDP_PASS;
+
+ if (!xdp_flowtable_offload_check_tcp_state(ports, data_end,
+ ip6h->nexthdr))
+ return XDP_PASS;
+
+ tuple.family = AF_INET6;
+ tuple.l4_protocol = ip6h->nexthdr;
+ tuple.tot_len = bpf_ntohs(ip6h->payload_len);
+ *src = ip6h->saddr;
+ *dst = ip6h->daddr;
+ tuple.sport = ports->source;
+ tuple.dport = ports->dest;
+ break;
+ }
+ default:
+ return XDP_PASS;
+ }
+
+ tuplehash = bpf_xdp_flow_lookup(ctx, &tuple, &opts, sizeof(opts));
+ if (!tuplehash)
+ return XDP_PASS;
+
+ val = bpf_map_lookup_elem(&stats, &key);
+ if (val)
+ __sync_add_and_fetch(val, 1);
+
+ return XDP_PASS;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/xdp_synproxy_kern.c b/tools/testing/selftests/bpf/progs/xdp_synproxy_kern.c
index 7ea9785738b5..f8f5dc9f72b8 100644
--- a/tools/testing/selftests/bpf/progs/xdp_synproxy_kern.c
+++ b/tools/testing/selftests/bpf/progs/xdp_synproxy_kern.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: LGPL-2.1 OR BSD-2-Clause
/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+#define BPF_NO_KFUNC_PROTOTYPES
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
diff --git a/tools/testing/selftests/bpf/progs/xfrm_info.c b/tools/testing/selftests/bpf/progs/xfrm_info.c
index f6a501fbba2b..a1d9f106c3f0 100644
--- a/tools/testing/selftests/bpf/progs/xfrm_info.c
+++ b/tools/testing/selftests/bpf/progs/xfrm_info.c
@@ -1,4 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
+#define BPF_NO_KFUNC_PROTOTYPES
#include "vmlinux.h"
#include "bpf_tracing_net.h"
#include <bpf/bpf_helpers.h>
diff --git a/tools/testing/selftests/bpf/test_loader.c b/tools/testing/selftests/bpf/test_loader.c
index 524c38e9cde4..f14e10b0de96 100644
--- a/tools/testing/selftests/bpf/test_loader.c
+++ b/tools/testing/selftests/bpf/test_loader.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
#include <linux/capability.h>
#include <stdlib.h>
+#include <regex.h>
#include <test_progs.h>
#include <bpf/btf.h>
@@ -17,9 +18,11 @@
#define TEST_TAG_EXPECT_FAILURE "comment:test_expect_failure"
#define TEST_TAG_EXPECT_SUCCESS "comment:test_expect_success"
#define TEST_TAG_EXPECT_MSG_PFX "comment:test_expect_msg="
+#define TEST_TAG_EXPECT_REGEX_PFX "comment:test_expect_regex="
#define TEST_TAG_EXPECT_FAILURE_UNPRIV "comment:test_expect_failure_unpriv"
#define TEST_TAG_EXPECT_SUCCESS_UNPRIV "comment:test_expect_success_unpriv"
#define TEST_TAG_EXPECT_MSG_PFX_UNPRIV "comment:test_expect_msg_unpriv="
+#define TEST_TAG_EXPECT_REGEX_PFX_UNPRIV "comment:test_expect_regex_unpriv="
#define TEST_TAG_LOG_LEVEL_PFX "comment:test_log_level="
#define TEST_TAG_PROG_FLAGS_PFX "comment:test_prog_flags="
#define TEST_TAG_DESCRIPTION_PFX "comment:test_description="
@@ -46,10 +49,16 @@ enum mode {
UNPRIV = 2
};
+struct expect_msg {
+ const char *substr; /* substring match */
+ const char *regex_str; /* regex-based match */
+ regex_t regex;
+};
+
struct test_subspec {
char *name;
bool expect_failure;
- const char **expect_msgs;
+ struct expect_msg *expect_msgs;
size_t expect_msg_cnt;
int retval;
bool execute;
@@ -89,6 +98,16 @@ void test_loader_fini(struct test_loader *tester)
static void free_test_spec(struct test_spec *spec)
{
+ int i;
+
+ /* Deallocate expect_msgs arrays. */
+ for (i = 0; i < spec->priv.expect_msg_cnt; i++)
+ if (spec->priv.expect_msgs[i].regex_str)
+ regfree(&spec->priv.expect_msgs[i].regex);
+ for (i = 0; i < spec->unpriv.expect_msg_cnt; i++)
+ if (spec->unpriv.expect_msgs[i].regex_str)
+ regfree(&spec->unpriv.expect_msgs[i].regex);
+
free(spec->priv.name);
free(spec->unpriv.name);
free(spec->priv.expect_msgs);
@@ -100,18 +119,38 @@ static void free_test_spec(struct test_spec *spec)
spec->unpriv.expect_msgs = NULL;
}
-static int push_msg(const char *msg, struct test_subspec *subspec)
+static int push_msg(const char *substr, const char *regex_str, struct test_subspec *subspec)
{
void *tmp;
+ int regcomp_res;
+ char error_msg[100];
+ struct expect_msg *msg;
- tmp = realloc(subspec->expect_msgs, (1 + subspec->expect_msg_cnt) * sizeof(void *));
+ tmp = realloc(subspec->expect_msgs,
+ (1 + subspec->expect_msg_cnt) * sizeof(struct expect_msg));
if (!tmp) {
ASSERT_FAIL("failed to realloc memory for messages\n");
return -ENOMEM;
}
subspec->expect_msgs = tmp;
- subspec->expect_msgs[subspec->expect_msg_cnt++] = msg;
+ msg = &subspec->expect_msgs[subspec->expect_msg_cnt];
+
+ if (substr) {
+ msg->substr = substr;
+ msg->regex_str = NULL;
+ } else {
+ msg->regex_str = regex_str;
+ msg->substr = NULL;
+ regcomp_res = regcomp(&msg->regex, regex_str, REG_EXTENDED|REG_NEWLINE);
+ if (regcomp_res != 0) {
+ regerror(regcomp_res, &msg->regex, error_msg, sizeof(error_msg));
+ PRINT_FAIL("Regexp compilation error in '%s': '%s'\n",
+ regex_str, error_msg);
+ return -EINVAL;
+ }
+ }
+ subspec->expect_msg_cnt += 1;
return 0;
}
@@ -233,13 +272,25 @@ static int parse_test_spec(struct test_loader *tester,
spec->mode_mask |= UNPRIV;
} else if (str_has_pfx(s, TEST_TAG_EXPECT_MSG_PFX)) {
msg = s + sizeof(TEST_TAG_EXPECT_MSG_PFX) - 1;
- err = push_msg(msg, &spec->priv);
+ err = push_msg(msg, NULL, &spec->priv);
if (err)
goto cleanup;
spec->mode_mask |= PRIV;
} else if (str_has_pfx(s, TEST_TAG_EXPECT_MSG_PFX_UNPRIV)) {
msg = s + sizeof(TEST_TAG_EXPECT_MSG_PFX_UNPRIV) - 1;
- err = push_msg(msg, &spec->unpriv);
+ err = push_msg(msg, NULL, &spec->unpriv);
+ if (err)
+ goto cleanup;
+ spec->mode_mask |= UNPRIV;
+ } else if (str_has_pfx(s, TEST_TAG_EXPECT_REGEX_PFX)) {
+ msg = s + sizeof(TEST_TAG_EXPECT_REGEX_PFX) - 1;
+ err = push_msg(NULL, msg, &spec->priv);
+ if (err)
+ goto cleanup;
+ spec->mode_mask |= PRIV;
+ } else if (str_has_pfx(s, TEST_TAG_EXPECT_REGEX_PFX_UNPRIV)) {
+ msg = s + sizeof(TEST_TAG_EXPECT_REGEX_PFX_UNPRIV) - 1;
+ err = push_msg(NULL, msg, &spec->unpriv);
if (err)
goto cleanup;
spec->mode_mask |= UNPRIV;
@@ -337,16 +388,13 @@ static int parse_test_spec(struct test_loader *tester,
}
if (!spec->unpriv.expect_msgs) {
- size_t sz = spec->priv.expect_msg_cnt * sizeof(void *);
+ for (i = 0; i < spec->priv.expect_msg_cnt; i++) {
+ struct expect_msg *msg = &spec->priv.expect_msgs[i];
- spec->unpriv.expect_msgs = malloc(sz);
- if (!spec->unpriv.expect_msgs) {
- PRINT_FAIL("failed to allocate memory for unpriv.expect_msgs\n");
- err = -ENOMEM;
- goto cleanup;
+ err = push_msg(msg->substr, msg->regex_str, &spec->unpriv);
+ if (err)
+ goto cleanup;
}
- memcpy(spec->unpriv.expect_msgs, spec->priv.expect_msgs, sz);
- spec->unpriv.expect_msg_cnt = spec->priv.expect_msg_cnt;
}
}
@@ -402,27 +450,40 @@ static void validate_case(struct test_loader *tester,
struct bpf_program *prog,
int load_err)
{
- int i, j;
+ int i, j, err;
+ char *match;
+ regmatch_t reg_match[1];
for (i = 0; i < subspec->expect_msg_cnt; i++) {
- char *match;
- const char *expect_msg;
-
- expect_msg = subspec->expect_msgs[i];
+ struct expect_msg *msg = &subspec->expect_msgs[i];
+
+ if (msg->substr) {
+ match = strstr(tester->log_buf + tester->next_match_pos, msg->substr);
+ if (match)
+ tester->next_match_pos = match - tester->log_buf + strlen(msg->substr);
+ } else {
+ err = regexec(&msg->regex,
+ tester->log_buf + tester->next_match_pos, 1, reg_match, 0);
+ if (err == 0) {
+ match = tester->log_buf + tester->next_match_pos + reg_match[0].rm_so;
+ tester->next_match_pos += reg_match[0].rm_eo;
+ } else {
+ match = NULL;
+ }
+ }
- match = strstr(tester->log_buf + tester->next_match_pos, expect_msg);
if (!ASSERT_OK_PTR(match, "expect_msg")) {
- /* if we are in verbose mode, we've already emitted log */
if (env.verbosity == VERBOSE_NONE)
emit_verifier_log(tester->log_buf, true /*force*/);
- for (j = 0; j < i; j++)
- fprintf(stderr,
- "MATCHED MSG: '%s'\n", subspec->expect_msgs[j]);
- fprintf(stderr, "EXPECTED MSG: '%s'\n", expect_msg);
+ for (j = 0; j <= i; j++) {
+ msg = &subspec->expect_msgs[j];
+ fprintf(stderr, "%s %s: '%s'\n",
+ j < i ? "MATCHED " : "EXPECTED",
+ msg->substr ? "SUBSTR" : " REGEX",
+ msg->substr ?: msg->regex_str);
+ }
return;
}
-
- tester->next_match_pos = match - tester->log_buf + strlen(expect_msg);
}
}
diff --git a/tools/testing/selftests/bpf/test_progs.h b/tools/testing/selftests/bpf/test_progs.h
index 0ba5a20b19ba..51341d50213b 100644
--- a/tools/testing/selftests/bpf/test_progs.h
+++ b/tools/testing/selftests/bpf/test_progs.h
@@ -377,6 +377,15 @@ int test__join_cgroup(const char *path);
___ok; \
})
+#define ASSERT_OK_FD(fd, name) ({ \
+ static int duration = 0; \
+ int ___fd = (fd); \
+ bool ___ok = ___fd >= 0; \
+ CHECK(!___ok, (name), "unexpected fd: %d (errno %d)\n", \
+ ___fd, errno); \
+ ___ok; \
+})
+
#define SYS(goto_label, fmt, ...) \
({ \
char cmd[1024]; \
diff --git a/tools/testing/selftests/bpf/test_sockmap.c b/tools/testing/selftests/bpf/test_sockmap.c
index 92752f5eeded..3e02d7267de8 100644
--- a/tools/testing/selftests/bpf/test_sockmap.c
+++ b/tools/testing/selftests/bpf/test_sockmap.c
@@ -63,7 +63,8 @@ int passed;
int failed;
int map_fd[9];
struct bpf_map *maps[9];
-int prog_fd[11];
+struct bpf_program *progs[9];
+struct bpf_link *links[9];
int txmsg_pass;
int txmsg_redir;
@@ -680,7 +681,8 @@ static int msg_loop(int fd, int iov_count, int iov_length, int cnt,
}
}
- s->bytes_recvd += recv;
+ if (recv > 0)
+ s->bytes_recvd += recv;
if (opt->check_recved_len && s->bytes_recvd > total_bytes) {
errno = EMSGSIZE;
@@ -952,7 +954,8 @@ enum {
static int run_options(struct sockmap_options *options, int cg_fd, int test)
{
- int i, key, next_key, err, tx_prog_fd = -1, zero = 0;
+ int i, key, next_key, err, zero = 0;
+ struct bpf_program *tx_prog;
/* If base test skip BPF setup */
if (test == BASE || test == BASE_SENDPAGE)
@@ -960,48 +963,44 @@ static int run_options(struct sockmap_options *options, int cg_fd, int test)
/* Attach programs to sockmap */
if (!txmsg_omit_skb_parser) {
- err = bpf_prog_attach(prog_fd[0], map_fd[0],
- BPF_SK_SKB_STREAM_PARSER, 0);
- if (err) {
+ links[0] = bpf_program__attach_sockmap(progs[0], map_fd[0]);
+ if (!links[0]) {
fprintf(stderr,
- "ERROR: bpf_prog_attach (sockmap %i->%i): %d (%s)\n",
- prog_fd[0], map_fd[0], err, strerror(errno));
- return err;
+ "ERROR: bpf_program__attach_sockmap (sockmap %i->%i): (%s)\n",
+ bpf_program__fd(progs[0]), map_fd[0], strerror(errno));
+ return -1;
}
}
- err = bpf_prog_attach(prog_fd[1], map_fd[0],
- BPF_SK_SKB_STREAM_VERDICT, 0);
- if (err) {
- fprintf(stderr, "ERROR: bpf_prog_attach (sockmap): %d (%s)\n",
- err, strerror(errno));
- return err;
+ links[1] = bpf_program__attach_sockmap(progs[1], map_fd[0]);
+ if (!links[1]) {
+ fprintf(stderr, "ERROR: bpf_program__attach_sockmap (sockmap): (%s)\n",
+ strerror(errno));
+ return -1;
}
/* Attach programs to TLS sockmap */
if (txmsg_ktls_skb) {
if (!txmsg_omit_skb_parser) {
- err = bpf_prog_attach(prog_fd[0], map_fd[8],
- BPF_SK_SKB_STREAM_PARSER, 0);
- if (err) {
+ links[2] = bpf_program__attach_sockmap(progs[0], map_fd[8]);
+ if (!links[2]) {
fprintf(stderr,
- "ERROR: bpf_prog_attach (TLS sockmap %i->%i): %d (%s)\n",
- prog_fd[0], map_fd[8], err, strerror(errno));
- return err;
+ "ERROR: bpf_program__attach_sockmap (TLS sockmap %i->%i): (%s)\n",
+ bpf_program__fd(progs[0]), map_fd[8], strerror(errno));
+ return -1;
}
}
- err = bpf_prog_attach(prog_fd[2], map_fd[8],
- BPF_SK_SKB_STREAM_VERDICT, 0);
- if (err) {
- fprintf(stderr, "ERROR: bpf_prog_attach (TLS sockmap): %d (%s)\n",
- err, strerror(errno));
- return err;
+ links[3] = bpf_program__attach_sockmap(progs[2], map_fd[8]);
+ if (!links[3]) {
+ fprintf(stderr, "ERROR: bpf_program__attach_sockmap (TLS sockmap): (%s)\n",
+ strerror(errno));
+ return -1;
}
}
/* Attach to cgroups */
- err = bpf_prog_attach(prog_fd[3], cg_fd, BPF_CGROUP_SOCK_OPS, 0);
+ err = bpf_prog_attach(bpf_program__fd(progs[3]), cg_fd, BPF_CGROUP_SOCK_OPS, 0);
if (err) {
fprintf(stderr, "ERROR: bpf_prog_attach (groups): %d (%s)\n",
err, strerror(errno));
@@ -1017,30 +1016,31 @@ run:
/* Attach txmsg program to sockmap */
if (txmsg_pass)
- tx_prog_fd = prog_fd[4];
+ tx_prog = progs[4];
else if (txmsg_redir)
- tx_prog_fd = prog_fd[5];
+ tx_prog = progs[5];
else if (txmsg_apply)
- tx_prog_fd = prog_fd[6];
+ tx_prog = progs[6];
else if (txmsg_cork)
- tx_prog_fd = prog_fd[7];
+ tx_prog = progs[7];
else if (txmsg_drop)
- tx_prog_fd = prog_fd[8];
+ tx_prog = progs[8];
else
- tx_prog_fd = 0;
+ tx_prog = NULL;
- if (tx_prog_fd) {
- int redir_fd, i = 0;
+ if (tx_prog) {
+ int redir_fd;
- err = bpf_prog_attach(tx_prog_fd,
- map_fd[1], BPF_SK_MSG_VERDICT, 0);
- if (err) {
+ links[4] = bpf_program__attach_sockmap(tx_prog, map_fd[1]);
+ if (!links[4]) {
fprintf(stderr,
- "ERROR: bpf_prog_attach (txmsg): %d (%s)\n",
- err, strerror(errno));
+ "ERROR: bpf_program__attach_sockmap (txmsg): (%s)\n",
+ strerror(errno));
+ err = -1;
goto out;
}
+ i = 0;
err = bpf_map_update_elem(map_fd[1], &i, &c1, BPF_ANY);
if (err) {
fprintf(stderr,
@@ -1279,16 +1279,14 @@ run:
fprintf(stderr, "unknown test\n");
out:
/* Detatch and zero all the maps */
- bpf_prog_detach2(prog_fd[3], cg_fd, BPF_CGROUP_SOCK_OPS);
- bpf_prog_detach2(prog_fd[0], map_fd[0], BPF_SK_SKB_STREAM_PARSER);
- bpf_prog_detach2(prog_fd[1], map_fd[0], BPF_SK_SKB_STREAM_VERDICT);
- bpf_prog_detach2(prog_fd[0], map_fd[8], BPF_SK_SKB_STREAM_PARSER);
- bpf_prog_detach2(prog_fd[2], map_fd[8], BPF_SK_SKB_STREAM_VERDICT);
+ bpf_prog_detach2(bpf_program__fd(progs[3]), cg_fd, BPF_CGROUP_SOCK_OPS);
- if (tx_prog_fd >= 0)
- bpf_prog_detach2(tx_prog_fd, map_fd[1], BPF_SK_MSG_VERDICT);
+ for (i = 0; i < ARRAY_SIZE(links); i++) {
+ if (links[i])
+ bpf_link__detach(links[i]);
+ }
- for (i = 0; i < 8; i++) {
+ for (i = 0; i < ARRAY_SIZE(map_fd); i++) {
key = next_key = 0;
bpf_map_update_elem(map_fd[i], &key, &zero, BPF_ANY);
while (bpf_map_get_next_key(map_fd[i], &key, &next_key) == 0) {
@@ -1783,34 +1781,6 @@ char *map_names[] = {
"tls_sock_map",
};
-int prog_attach_type[] = {
- BPF_SK_SKB_STREAM_PARSER,
- BPF_SK_SKB_STREAM_VERDICT,
- BPF_SK_SKB_STREAM_VERDICT,
- BPF_CGROUP_SOCK_OPS,
- BPF_SK_MSG_VERDICT,
- BPF_SK_MSG_VERDICT,
- BPF_SK_MSG_VERDICT,
- BPF_SK_MSG_VERDICT,
- BPF_SK_MSG_VERDICT,
- BPF_SK_MSG_VERDICT,
- BPF_SK_MSG_VERDICT,
-};
-
-int prog_type[] = {
- BPF_PROG_TYPE_SK_SKB,
- BPF_PROG_TYPE_SK_SKB,
- BPF_PROG_TYPE_SK_SKB,
- BPF_PROG_TYPE_SOCK_OPS,
- BPF_PROG_TYPE_SK_MSG,
- BPF_PROG_TYPE_SK_MSG,
- BPF_PROG_TYPE_SK_MSG,
- BPF_PROG_TYPE_SK_MSG,
- BPF_PROG_TYPE_SK_MSG,
- BPF_PROG_TYPE_SK_MSG,
- BPF_PROG_TYPE_SK_MSG,
-};
-
static int populate_progs(char *bpf_file)
{
struct bpf_program *prog;
@@ -1829,17 +1799,10 @@ static int populate_progs(char *bpf_file)
return -1;
}
- bpf_object__for_each_program(prog, obj) {
- bpf_program__set_type(prog, prog_type[i]);
- bpf_program__set_expected_attach_type(prog,
- prog_attach_type[i]);
- i++;
- }
-
i = bpf_object__load(obj);
i = 0;
bpf_object__for_each_program(prog, obj) {
- prog_fd[i] = bpf_program__fd(prog);
+ progs[i] = prog;
i++;
}
@@ -1853,6 +1816,9 @@ static int populate_progs(char *bpf_file)
}
}
+ for (i = 0; i < ARRAY_SIZE(links); i++)
+ links[i] = NULL;
+
return 0;
}
@@ -1970,7 +1936,6 @@ static void test_selftests_ktls(int cg_fd, struct sockmap_options *opt)
static int test_selftest(int cg_fd, struct sockmap_options *opt)
{
-
test_selftests_sockmap(cg_fd, opt);
test_selftests_sockhash(cg_fd, opt);
test_selftests_ktls(cg_fd, opt);
diff --git a/tools/testing/selftests/bpf/test_tcp_check_syncookie_user.c b/tools/testing/selftests/bpf/test_tcp_check_syncookie_user.c
index 7b5fc98838cd..3844f9b8232a 100644
--- a/tools/testing/selftests/bpf/test_tcp_check_syncookie_user.c
+++ b/tools/testing/selftests/bpf/test_tcp_check_syncookie_user.c
@@ -139,14 +139,14 @@ out:
return ret;
}
-static int v6only_true(int fd, const struct post_socket_opts *opts)
+static int v6only_true(int fd, void *opts)
{
int mode = true;
return setsockopt(fd, IPPROTO_IPV6, IPV6_V6ONLY, &mode, sizeof(mode));
}
-static int v6only_false(int fd, const struct post_socket_opts *opts)
+static int v6only_false(int fd, void *opts)
{
int mode = false;
@@ -156,10 +156,6 @@ static int v6only_false(int fd, const struct post_socket_opts *opts)
int main(int argc, char **argv)
{
struct network_helper_opts opts = { 0 };
- struct sockaddr_in addr4;
- struct sockaddr_in6 addr6;
- struct sockaddr_in addr4dual;
- struct sockaddr_in6 addr6dual;
int server = -1;
int server_v6 = -1;
int server_dual = -1;
@@ -181,36 +177,17 @@ int main(int argc, char **argv)
goto err;
}
- memset(&addr4, 0, sizeof(addr4));
- addr4.sin_family = AF_INET;
- addr4.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
- addr4.sin_port = 0;
- memcpy(&addr4dual, &addr4, sizeof(addr4dual));
-
- memset(&addr6, 0, sizeof(addr6));
- addr6.sin6_family = AF_INET6;
- addr6.sin6_addr = in6addr_loopback;
- addr6.sin6_port = 0;
-
- memset(&addr6dual, 0, sizeof(addr6dual));
- addr6dual.sin6_family = AF_INET6;
- addr6dual.sin6_addr = in6addr_any;
- addr6dual.sin6_port = 0;
-
- server = start_server_addr(SOCK_STREAM, (struct sockaddr_storage *)&addr4,
- sizeof(addr4), NULL);
+ server = start_server_str(AF_INET, SOCK_STREAM, "127.0.0.1", 0, NULL);
if (server == -1)
goto err;
opts.post_socket_cb = v6only_true;
- server_v6 = start_server_addr(SOCK_STREAM, (struct sockaddr_storage *)&addr6,
- sizeof(addr6), &opts);
+ server_v6 = start_server_str(AF_INET6, SOCK_STREAM, "::1", 0, &opts);
if (server_v6 == -1)
goto err;
opts.post_socket_cb = v6only_false;
- server_dual = start_server_addr(SOCK_STREAM, (struct sockaddr_storage *)&addr6dual,
- sizeof(addr6dual), &opts);
+ server_dual = start_server_str(AF_INET6, SOCK_STREAM, "::0", 0, &opts);
if (server_dual == -1)
goto err;
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index df04bda1c927..610392dfc4fb 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -1237,11 +1237,6 @@ static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
fixup_prog_kfuncs(prog, fd_array, test->fixup_kfunc_btf_id);
}
-struct libcap {
- struct __user_cap_header_struct hdr;
- struct __user_cap_data_struct data[2];
-};
-
static int set_admin(bool admin)
{
int err;
diff --git a/tools/testing/selftests/bpf/trace_helpers.c b/tools/testing/selftests/bpf/trace_helpers.c
index 70e29f316fe7..465d196c7165 100644
--- a/tools/testing/selftests/bpf/trace_helpers.c
+++ b/tools/testing/selftests/bpf/trace_helpers.c
@@ -211,7 +211,7 @@ long ksym_get_addr(const char *name)
*/
int kallsyms_find(const char *sym, unsigned long long *addr)
{
- char type, name[500];
+ char type, name[500], *match;
unsigned long long value;
int err = 0;
FILE *f;
@@ -221,6 +221,17 @@ int kallsyms_find(const char *sym, unsigned long long *addr)
return -EINVAL;
while (fscanf(f, "%llx %c %499s%*[^\n]\n", &value, &type, name) > 0) {
+ /* If CONFIG_LTO_CLANG_THIN is enabled, static variable/function
+ * symbols could be promoted to global due to cross-file inlining.
+ * For such cases, clang compiler will add .llvm.<hash> suffix
+ * to those symbols to avoid potential naming conflict.
+ * Let us ignore .llvm.<hash> suffix during symbol comparison.
+ */
+ if (type == 'd') {
+ match = strstr(name, ".llvm.");
+ if (match)
+ *match = '\0';
+ }
if (strcmp(name, sym) == 0) {
*addr = value;
goto out;
diff --git a/tools/testing/selftests/bpf/verifier/calls.c b/tools/testing/selftests/bpf/verifier/calls.c
index ab25a81fd3a1..d0cdd156cd55 100644
--- a/tools/testing/selftests/bpf/verifier/calls.c
+++ b/tools/testing/selftests/bpf/verifier/calls.c
@@ -76,7 +76,7 @@
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = REJECT,
- .errstr = "R1 must have zero offset when passed to release func or trusted arg to kfunc",
+ .errstr = "arg#0 expected pointer to ctx, but got PTR",
.fixup_kfunc_btf_id = {
{ "bpf_kfunc_call_test_pass_ctx", 2 },
},
@@ -276,6 +276,19 @@
.result = ACCEPT,
},
{
+ "calls: invalid kfunc call: must provide (attach_prog_fd, btf_id) pair when freplace",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_EXT,
+ .result = REJECT,
+ .errstr = "Tracing programs must provide btf_id",
+ .fixup_kfunc_btf_id = {
+ { "bpf_dynptr_from_skb", 0 },
+ },
+},
+{
"calls: basic sanity",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
diff --git a/tools/testing/selftests/bpf/verifier/precise.c b/tools/testing/selftests/bpf/verifier/precise.c
index 0a9293a57211..90643ccc221d 100644
--- a/tools/testing/selftests/bpf/verifier/precise.c
+++ b/tools/testing/selftests/bpf/verifier/precise.c
@@ -39,12 +39,12 @@
.result = VERBOSE_ACCEPT,
.errstr =
"mark_precise: frame0: last_idx 26 first_idx 20\
- mark_precise: frame0: regs=r2 stack= before 25\
- mark_precise: frame0: regs=r2 stack= before 24\
- mark_precise: frame0: regs=r2 stack= before 23\
- mark_precise: frame0: regs=r2 stack= before 22\
- mark_precise: frame0: regs=r2 stack= before 20\
- mark_precise: frame0: parent state regs=r2 stack=:\
+ mark_precise: frame0: regs=r2,r9 stack= before 25\
+ mark_precise: frame0: regs=r2,r9 stack= before 24\
+ mark_precise: frame0: regs=r2,r9 stack= before 23\
+ mark_precise: frame0: regs=r2,r9 stack= before 22\
+ mark_precise: frame0: regs=r2,r9 stack= before 20\
+ mark_precise: frame0: parent state regs=r2,r9 stack=:\
mark_precise: frame0: last_idx 19 first_idx 10\
mark_precise: frame0: regs=r2,r9 stack= before 19\
mark_precise: frame0: regs=r9 stack= before 18\
@@ -100,11 +100,11 @@
.errstr =
"26: (85) call bpf_probe_read_kernel#113\
mark_precise: frame0: last_idx 26 first_idx 22\
- mark_precise: frame0: regs=r2 stack= before 25\
- mark_precise: frame0: regs=r2 stack= before 24\
- mark_precise: frame0: regs=r2 stack= before 23\
- mark_precise: frame0: regs=r2 stack= before 22\
- mark_precise: frame0: parent state regs=r2 stack=:\
+ mark_precise: frame0: regs=r2,r9 stack= before 25\
+ mark_precise: frame0: regs=r2,r9 stack= before 24\
+ mark_precise: frame0: regs=r2,r9 stack= before 23\
+ mark_precise: frame0: regs=r2,r9 stack= before 22\
+ mark_precise: frame0: parent state regs=r2,r9 stack=:\
mark_precise: frame0: last_idx 20 first_idx 20\
mark_precise: frame0: regs=r2,r9 stack= before 20\
mark_precise: frame0: parent state regs=r2,r9 stack=:\
diff --git a/tools/testing/selftests/bpf/xskxceiver.c b/tools/testing/selftests/bpf/xskxceiver.c
index 2eac0895b0a1..8144fd145237 100644
--- a/tools/testing/selftests/bpf/xskxceiver.c
+++ b/tools/testing/selftests/bpf/xskxceiver.c
@@ -196,6 +196,12 @@ static int xsk_configure_umem(struct ifobject *ifobj, struct xsk_umem_info *umem
};
int ret;
+ if (umem->fill_size)
+ cfg.fill_size = umem->fill_size;
+
+ if (umem->comp_size)
+ cfg.comp_size = umem->comp_size;
+
if (umem->unaligned_mode)
cfg.flags |= XDP_UMEM_UNALIGNED_CHUNK_FLAG;
@@ -265,6 +271,10 @@ static int __xsk_configure_socket(struct xsk_socket_info *xsk, struct xsk_umem_i
cfg.bind_flags |= XDP_SHARED_UMEM;
if (ifobject->mtu > MAX_ETH_PKT_SIZE)
cfg.bind_flags |= XDP_USE_SG;
+ if (umem->comp_size)
+ cfg.tx_size = umem->comp_size;
+ if (umem->fill_size)
+ cfg.rx_size = umem->fill_size;
txr = ifobject->tx_on ? &xsk->tx : NULL;
rxr = ifobject->rx_on ? &xsk->rx : NULL;
@@ -1616,7 +1626,7 @@ static void xsk_populate_fill_ring(struct xsk_umem_info *umem, struct pkt_stream
if (umem->num_frames < XSK_RING_PROD__DEFAULT_NUM_DESCS)
buffers_to_fill = umem->num_frames;
else
- buffers_to_fill = XSK_RING_PROD__DEFAULT_NUM_DESCS;
+ buffers_to_fill = umem->fill_size;
ret = xsk_ring_prod__reserve(&umem->fq, buffers_to_fill, &idx);
if (ret != buffers_to_fill)
@@ -1899,11 +1909,15 @@ static int testapp_validate_traffic(struct test_spec *test)
}
if (test->set_ring) {
- if (ifobj_tx->hw_ring_size_supp)
- return set_ring_size(ifobj_tx);
-
- ksft_test_result_skip("Changing HW ring size not supported.\n");
- return TEST_SKIP;
+ if (ifobj_tx->hw_ring_size_supp) {
+ if (set_ring_size(ifobj_tx)) {
+ ksft_test_result_skip("Failed to change HW ring size.\n");
+ return TEST_FAILURE;
+ }
+ } else {
+ ksft_test_result_skip("Changing HW ring size not supported.\n");
+ return TEST_SKIP;
+ }
}
xsk_attach_xdp_progs(test, ifobj_rx, ifobj_tx);
@@ -2441,7 +2455,7 @@ static int testapp_hw_sw_min_ring_size(struct test_spec *test)
static int testapp_hw_sw_max_ring_size(struct test_spec *test)
{
- u32 max_descs = XSK_RING_PROD__DEFAULT_NUM_DESCS * 2;
+ u32 max_descs = XSK_RING_PROD__DEFAULT_NUM_DESCS * 4;
int ret;
test->set_ring = true;
@@ -2449,7 +2463,8 @@ static int testapp_hw_sw_max_ring_size(struct test_spec *test)
test->ifobj_tx->ring.tx_pending = test->ifobj_tx->ring.tx_max_pending;
test->ifobj_tx->ring.rx_pending = test->ifobj_tx->ring.rx_max_pending;
test->ifobj_rx->umem->num_frames = max_descs;
- test->ifobj_rx->xsk->rxqsize = max_descs;
+ test->ifobj_rx->umem->fill_size = max_descs;
+ test->ifobj_rx->umem->comp_size = max_descs;
test->ifobj_tx->xsk->batch_size = XSK_RING_PROD__DEFAULT_NUM_DESCS;
test->ifobj_rx->xsk->batch_size = XSK_RING_PROD__DEFAULT_NUM_DESCS;
@@ -2457,9 +2472,12 @@ static int testapp_hw_sw_max_ring_size(struct test_spec *test)
if (ret)
return ret;
- /* Set batch_size to 4095 */
- test->ifobj_tx->xsk->batch_size = max_descs - 1;
- test->ifobj_rx->xsk->batch_size = max_descs - 1;
+ /* Set batch_size to 8152 for testing, as the ice HW ignores the 3 lowest bits when
+ * updating the Rx HW tail register.
+ */
+ test->ifobj_tx->xsk->batch_size = test->ifobj_tx->ring.tx_max_pending - 8;
+ test->ifobj_rx->xsk->batch_size = test->ifobj_tx->ring.tx_max_pending - 8;
+ pkt_stream_replace(test, max_descs, MIN_PKT_SIZE);
return testapp_validate_traffic(test);
}
diff --git a/tools/testing/selftests/bpf/xskxceiver.h b/tools/testing/selftests/bpf/xskxceiver.h
index 906de5fab7a3..885c948c5d83 100644
--- a/tools/testing/selftests/bpf/xskxceiver.h
+++ b/tools/testing/selftests/bpf/xskxceiver.h
@@ -80,6 +80,8 @@ struct xsk_umem_info {
void *buffer;
u32 frame_size;
u32 base_addr;
+ u32 fill_size;
+ u32 comp_size;
bool unaligned_mode;
};
diff --git a/tools/testing/selftests/breakpoints/step_after_suspend_test.c b/tools/testing/selftests/breakpoints/step_after_suspend_test.c
index b8703c499d28..dfec31fb9b30 100644
--- a/tools/testing/selftests/breakpoints/step_after_suspend_test.c
+++ b/tools/testing/selftests/breakpoints/step_after_suspend_test.c
@@ -130,7 +130,6 @@ int run_test(int cpu)
void suspend(void)
{
int power_state_fd;
- struct sigevent event = {};
int timerfd;
int err;
struct itimerspec spec = {};
diff --git a/tools/testing/selftests/cachestat/test_cachestat.c b/tools/testing/selftests/cachestat/test_cachestat.c
index b171fd53b004..632ab44737ec 100644
--- a/tools/testing/selftests/cachestat/test_cachestat.c
+++ b/tools/testing/selftests/cachestat/test_cachestat.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#define _GNU_SOURCE
+#define __SANE_USERSPACE_TYPES__ // Use ll64
#include <stdio.h>
#include <stdbool.h>
diff --git a/tools/testing/selftests/cgroup/.gitignore b/tools/testing/selftests/cgroup/.gitignore
index 2732e0b29271..952e4448bf07 100644
--- a/tools/testing/selftests/cgroup/.gitignore
+++ b/tools/testing/selftests/cgroup/.gitignore
@@ -1,11 +1,12 @@
# SPDX-License-Identifier: GPL-2.0-only
-test_memcontrol
test_core
-test_freezer
-test_kmem
-test_kill
test_cpu
test_cpuset
-test_zswap
+test_freezer
test_hugetlb_memcg
+test_kill
+test_kmem
+test_memcontrol
+test_pids
+test_zswap
wait_inotify
diff --git a/tools/testing/selftests/cgroup/Makefile b/tools/testing/selftests/cgroup/Makefile
index 16461dc0ffdf..1b897152bab6 100644
--- a/tools/testing/selftests/cgroup/Makefile
+++ b/tools/testing/selftests/cgroup/Makefile
@@ -6,26 +6,29 @@ all: ${HELPER_PROGS}
TEST_FILES := with_stress.sh
TEST_PROGS := test_stress.sh test_cpuset_prs.sh test_cpuset_v1_hp.sh
TEST_GEN_FILES := wait_inotify
-TEST_GEN_PROGS = test_memcontrol
-TEST_GEN_PROGS += test_kmem
-TEST_GEN_PROGS += test_core
-TEST_GEN_PROGS += test_freezer
-TEST_GEN_PROGS += test_kill
+# Keep the lists lexicographically sorted
+TEST_GEN_PROGS = test_core
TEST_GEN_PROGS += test_cpu
TEST_GEN_PROGS += test_cpuset
-TEST_GEN_PROGS += test_zswap
+TEST_GEN_PROGS += test_freezer
TEST_GEN_PROGS += test_hugetlb_memcg
+TEST_GEN_PROGS += test_kill
+TEST_GEN_PROGS += test_kmem
+TEST_GEN_PROGS += test_memcontrol
+TEST_GEN_PROGS += test_pids
+TEST_GEN_PROGS += test_zswap
LOCAL_HDRS += $(selfdir)/clone3/clone3_selftests.h $(selfdir)/pidfd/pidfd.h
include ../lib.mk
-$(OUTPUT)/test_memcontrol: cgroup_util.c
-$(OUTPUT)/test_kmem: cgroup_util.c
$(OUTPUT)/test_core: cgroup_util.c
-$(OUTPUT)/test_freezer: cgroup_util.c
-$(OUTPUT)/test_kill: cgroup_util.c
$(OUTPUT)/test_cpu: cgroup_util.c
$(OUTPUT)/test_cpuset: cgroup_util.c
-$(OUTPUT)/test_zswap: cgroup_util.c
+$(OUTPUT)/test_freezer: cgroup_util.c
$(OUTPUT)/test_hugetlb_memcg: cgroup_util.c
+$(OUTPUT)/test_kill: cgroup_util.c
+$(OUTPUT)/test_kmem: cgroup_util.c
+$(OUTPUT)/test_memcontrol: cgroup_util.c
+$(OUTPUT)/test_pids: cgroup_util.c
+$(OUTPUT)/test_zswap: cgroup_util.c
diff --git a/tools/testing/selftests/cgroup/test_cpuset_prs.sh b/tools/testing/selftests/cgroup/test_cpuset_prs.sh
index b5eb1be2248c..7c08cc153367 100755
--- a/tools/testing/selftests/cgroup/test_cpuset_prs.sh
+++ b/tools/testing/selftests/cgroup/test_cpuset_prs.sh
@@ -28,6 +28,14 @@ CPULIST=$(cat $CGROUP2/cpuset.cpus.effective)
NR_CPUS=$(lscpu | grep "^CPU(s):" | sed -e "s/.*:[[:space:]]*//")
[[ $NR_CPUS -lt 8 ]] && skip_test "Test needs at least 8 cpus available!"
+# Check to see if /dev/console exists and is writable
+if [[ -c /dev/console && -w /dev/console ]]
+then
+ CONSOLE=/dev/console
+else
+ CONSOLE=/dev/null
+fi
+
# Set verbose flag and delay factor
PROG=$1
VERBOSE=0
@@ -103,8 +111,8 @@ console_msg()
{
MSG=$1
echo "$MSG"
- echo "" > /dev/console
- echo "$MSG" > /dev/console
+ echo "" > $CONSOLE
+ echo "$MSG" > $CONSOLE
pause 0.01
}
@@ -161,6 +169,14 @@ test_add_proc()
# T = put a task into cgroup
# O<c>=<v> = Write <v> to CPU online file of <c>
#
+# ECPUs - effective CPUs of cpusets
+# Pstate - partition root state
+# ISOLCPUS - isolated CPUs (<icpus>[,<icpus2>])
+#
+# Note that if there are 2 fields in ISOLCPUS, the first one is for
+# sched-debug matching which includes offline CPUs and single-CPU partitions
+# while the second one is for matching cpuset.cpus.isolated.
+#
SETUP_A123_PARTITIONS="C1-3:P1:S+ C2-3:P1:S+ C3:P1"
TEST_MATRIX=(
# old-A1 old-A2 old-A3 old-B1 new-A1 new-A2 new-A3 new-B1 fail ECPUs Pstate ISOLCPUS
@@ -220,23 +236,29 @@ TEST_MATRIX=(
" C0-3:S+ C1-3:S+ C2-3 . X2-3 X2-3:P2 . . 0 A1:0-1,A2:2-3,A3:2-3 A1:P0,A2:P2 2-3"
" C0-3:S+ C1-3:S+ C2-3 . X2-3 X3:P2 . . 0 A1:0-2,A2:3,A3:3 A1:P0,A2:P2 3"
" C0-3:S+ C1-3:S+ C2-3 . X2-3 X2-3 X2-3:P2 . 0 A1:0-1,A2:1,A3:2-3 A1:P0,A3:P2 2-3"
- " C0-3:S+ C1-3:S+ C2-3 . X2-3 X2-3 X2-3:P2:C3 . 0 A1:0-2,A2:1-2,A3:3 A1:P0,A3:P2 3"
+ " C0-3:S+ C1-3:S+ C2-3 . X2-3 X2-3 X2-3:P2:C3 . 0 A1:0-1,A2:1,A3:2-3 A1:P0,A3:P2 2-3"
" C0-3:S+ C1-3:S+ C2-3 C2-3 . . . P2 0 A1:0-3,A2:1-3,A3:2-3,B1:2-3 A1:P0,A3:P0,B1:P-2"
" C0-3:S+ C1-3:S+ C2-3 C4-5 . . . P2 0 B1:4-5 B1:P2 4-5"
" C0-3:S+ C1-3:S+ C2-3 C4 X2-3 X2-3 X2-3:P2 P2 0 A3:2-3,B1:4 A3:P2,B1:P2 2-4"
" C0-3:S+ C1-3:S+ C2-3 C4 X2-3 X2-3 X2-3:P2:C1-3 P2 0 A3:2-3,B1:4 A3:P2,B1:P2 2-4"
" C0-3:S+ C1-3:S+ C2-3 C4 X1-3 X1-3:P2 P2 . 0 A2:1,A3:2-3 A2:P2,A3:P2 1-3"
" C0-3:S+ C1-3:S+ C2-3 C4 X2-3 X2-3 X2-3:P2 P2:C4-5 0 A3:2-3,B1:4-5 A3:P2,B1:P2 2-5"
+ " C4:X0-3:S+ X1-3:S+ X2-3 . . P2 . . 0 A1:4,A2:1-3,A3:1-3 A2:P2 1-3"
+ " C4:X0-3:S+ X1-3:S+ X2-3 . . . P2 . 0 A1:4,A2:4,A3:2-3 A3:P2 2-3"
# Nested remote/local partition tests
" C0-3:S+ C1-3:S+ C2-3 C4-5 X2-3 X2-3:P1 P2 P1 0 A1:0-1,A2:,A3:2-3,B1:4-5 \
A1:P0,A2:P1,A3:P2,B1:P1 2-3"
" C0-3:S+ C1-3:S+ C2-3 C4 X2-3 X2-3:P1 P2 P1 0 A1:0-1,A2:,A3:2-3,B1:4 \
A1:P0,A2:P1,A3:P2,B1:P1 2-4,2-3"
+ " C0-3:S+ C1-3:S+ C2-3 C4 X2-3 X2-3:P1 . P1 0 A1:0-1,A2:2-3,A3:2-3,B1:4 \
+ A1:P0,A2:P1,A3:P0,B1:P1"
" C0-3:S+ C1-3:S+ C3 C4 X2-3 X2-3:P1 P2 P1 0 A1:0-1,A2:2,A3:3,B1:4 \
A1:P0,A2:P1,A3:P2,B1:P1 2-4,3"
" C0-4:S+ C1-4:S+ C2-4 . X2-4 X2-4:P2 X4:P1 . 0 A1:0-1,A2:2-3,A3:4 \
A1:P0,A2:P2,A3:P1 2-4,2-3"
+ " C0-4:S+ C1-4:S+ C2-4 . X2-4 X2-4:P2 X3-4:P1 . 0 A1:0-1,A2:2,A3:3-4 \
+ A1:P0,A2:P2,A3:P1 2"
" C0-4:X2-4:S+ C1-4:X2-4:S+:P2 C2-4:X4:P1 \
. . X5 . . 0 A1:0-4,A2:1-4,A3:2-4 \
A1:P0,A2:P-2,A3:P-1"
@@ -262,8 +284,8 @@ TEST_MATRIX=(
. . X2-3 P2 . . 0 A1:0-2,A2:3,XA2:3 A2:P2 3"
# Invalid to valid local partition direct transition tests
- " C1-3:S+:P2 C2-3:X1:P2 . . . . . . 0 A1:1-3,XA1:1-3,A2:2-3:XA2: A1:P2,A2:P-2 1-3"
- " C1-3:S+:P2 C2-3:X1:P2 . . . X3:P2 . . 0 A1:1-2,XA1:1-3,A2:3:XA2:3 A1:P2,A2:P2 1-3"
+ " C1-3:S+:P2 X4:P2 . . . . . . 0 A1:1-3,XA1:1-3,A2:1-3:XA2: A1:P2,A2:P-2 1-3"
+ " C1-3:S+:P2 X4:P2 . . . X3:P2 . . 0 A1:1-2,XA1:1-3,A2:3:XA2:3 A1:P2,A2:P2 1-3"
" C0-3:P2 . . C4-6 C0-4 . . . 0 A1:0-4,B1:4-6 A1:P-2,B1:P0"
" C0-3:P2 . . C4-6 C0-4:C0-3 . . . 0 A1:0-3,B1:4-6 A1:P2,B1:P0 0-3"
" C0-3:P2 . . C3-5:C4-5 . . . . 0 A1:0-3,B1:4-5 A1:P2,B1:P0 0-3"
@@ -274,32 +296,26 @@ TEST_MATRIX=(
" C0-3:X1-3:S+:P2 C1-3:X2-3:S+:P2 C2-3:X3:P2 \
. . X4 . . 0 A1:1-3,A2:1-3,A3:2-3,XA2:,XA3: A1:P2,A2:P-2,A3:P-2 1-3"
" C0-3:X1-3:S+:P2 C1-3:X2-3:S+:P2 C2-3:X3:P2 \
- . . C4 . . 0 A1:1-3,A2:1-3,A3:2-3,XA2:,XA3: A1:P2,A2:P-2,A3:P-2 1-3"
+ . . C4:X . . 0 A1:1-3,A2:1-3,A3:2-3,XA2:,XA3: A1:P2,A2:P-2,A3:P-2 1-3"
# Local partition CPU change tests
" C0-5:S+:P2 C4-5:S+:P1 . . . C3-5 . . 0 A1:0-2,A2:3-5 A1:P2,A2:P1 0-2"
" C0-5:S+:P2 C4-5:S+:P1 . . C1-5 . . . 0 A1:1-3,A2:4-5 A1:P2,A2:P1 1-3"
# cpus_allowed/exclusive_cpus update tests
" C0-3:X2-3:S+ C1-3:X2-3:S+ C2-3:X2-3 \
- . C4 . P2 . 0 A1:4,A2:4,XA2:,XA3:,A3:4 \
+ . X:C4 . P2 . 0 A1:4,A2:4,XA2:,XA3:,A3:4 \
A1:P0,A3:P-2"
" C0-3:X2-3:S+ C1-3:X2-3:S+ C2-3:X2-3 \
. X1 . P2 . 0 A1:0-3,A2:1-3,XA1:1,XA2:,XA3:,A3:2-3 \
A1:P0,A3:P-2"
" C0-3:X2-3:S+ C1-3:X2-3:S+ C2-3:X2-3 \
- . . C3 P2 . 0 A1:0-2,A2:0-2,XA2:3,XA3:3,A3:3 \
- A1:P0,A3:P2 3"
- " C0-3:X2-3:S+ C1-3:X2-3:S+ C2-3:X2-3 \
. . X3 P2 . 0 A1:0-2,A2:1-2,XA2:3,XA3:3,A3:3 \
A1:P0,A3:P2 3"
" C0-3:X2-3:S+ C1-3:X2-3:S+ C2-3:X2-3:P2 \
. . X3 . . 0 A1:0-3,A2:1-3,XA2:3,XA3:3,A3:2-3 \
A1:P0,A3:P-2"
" C0-3:X2-3:S+ C1-3:X2-3:S+ C2-3:X2-3:P2 \
- . . C3 . . 0 A1:0-3,A2:3,XA2:3,XA3:3,A3:3 \
- A1:P0,A3:P-2"
- " C0-3:X2-3:S+ C1-3:X2-3:S+ C2-3:X2-3:P2 \
- . C4 . . . 0 A1:4,A2:4,A3:4,XA1:,XA2:,XA3 \
+ . X4 . . . 0 A1:0-3,A2:1-3,A3:2-3,XA1:4,XA2:,XA3 \
A1:P0,A3:P-2"
# old-A1 old-A2 old-A3 old-B1 new-A1 new-A2 new-A3 new-B1 fail ECPUs Pstate ISOLCPUS
@@ -346,6 +362,9 @@ TEST_MATRIX=(
" C0-1:P1 . . P1:C2-3 C0-2 . . . 0 A1:0-2,B1:2-3 A1:P-1,B1:P-1"
" C0-1 . . P1:C2-3 C0-2 . . . 0 A1:0-2,B1:2-3 A1:P0,B1:P-1"
+ # cpuset.cpus can overlap with sibling cpuset.cpus.exclusive but not subsumed by it
+ " C0-3 . . C4-5 X5 . . . 0 A1:0-3,B1:4-5"
+
# old-A1 old-A2 old-A3 old-B1 new-A1 new-A2 new-A3 new-B1 fail ECPUs Pstate ISOLCPUS
# ------ ------ ------ ------ ------ ------ ------ ------ ---- ----- ------ --------
# Failure cases:
@@ -355,6 +374,9 @@ TEST_MATRIX=(
# Changes to cpuset.cpus.exclusive that violate exclusivity rule is rejected
" C0-3 . . C4-5 X0-3 . . X3-5 1 A1:0-3,B1:4-5"
+
+ # cpuset.cpus cannot be a subset of sibling cpuset.cpus.exclusive
+ " C0-3 . . C4-5 X3-5 . . . 1 A1:0-3,B1:4-5"
)
#
@@ -556,14 +578,15 @@ check_cgroup_states()
do
set -- $(echo $CHK | sed -e "s/:/ /g")
CGRP=$1
+ CGRP_DIR=$CGRP
STATE=$2
FILE=
EVAL=$(expr substr $STATE 2 2)
- [[ $CGRP = A2 ]] && CGRP=A1/A2
- [[ $CGRP = A3 ]] && CGRP=A1/A2/A3
+ [[ $CGRP = A2 ]] && CGRP_DIR=A1/A2
+ [[ $CGRP = A3 ]] && CGRP_DIR=A1/A2/A3
case $STATE in
- P*) FILE=$CGRP/cpuset.cpus.partition
+ P*) FILE=$CGRP_DIR/cpuset.cpus.partition
;;
*) echo "Unknown state: $STATE!"
exit 1
@@ -587,6 +610,16 @@ check_cgroup_states()
;;
esac
[[ $EVAL != $VAL ]] && return 1
+
+ #
+ # For root partition, dump sched-domains info to console if
+ # verbose mode set for manual comparison with sched debug info.
+ #
+ [[ $VAL -eq 1 && $VERBOSE -gt 0 ]] && {
+ DOMS=$(cat $CGRP_DIR/cpuset.cpus.effective)
+ [[ -n "$DOMS" ]] &&
+ echo " [$CGRP] sched-domain: $DOMS" > $CONSOLE
+ }
done
return 0
}
@@ -694,9 +727,9 @@ null_isolcpus_check()
[[ $VERBOSE -gt 0 ]] || return 0
# Retry a few times before printing error
RETRY=0
- while [[ $RETRY -lt 5 ]]
+ while [[ $RETRY -lt 8 ]]
do
- pause 0.01
+ pause 0.02
check_isolcpus "."
[[ $? -eq 0 ]] && return 0
((RETRY++))
@@ -726,7 +759,7 @@ run_state_test()
while [[ $I -lt $CNT ]]
do
- echo "Running test $I ..." > /dev/console
+ echo "Running test $I ..." > $CONSOLE
[[ $VERBOSE -gt 1 ]] && {
echo ""
eval echo \${$TEST[$I]}
@@ -783,7 +816,7 @@ run_state_test()
while [[ $NEWLIST != $CPULIST && $RETRY -lt 8 ]]
do
# Wait a bit longer & recheck a few times
- pause 0.01
+ pause 0.02
((RETRY++))
NEWLIST=$(cat cpuset.cpus.effective)
done
diff --git a/tools/testing/selftests/cgroup/test_pids.c b/tools/testing/selftests/cgroup/test_pids.c
new file mode 100644
index 000000000000..9ecb83c6cc5c
--- /dev/null
+++ b/tools/testing/selftests/cgroup/test_pids.c
@@ -0,0 +1,178 @@
+// SPDX-License-Identifier: GPL-2.0
+#define _GNU_SOURCE
+
+#include <errno.h>
+#include <linux/limits.h>
+#include <signal.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include "../kselftest.h"
+#include "cgroup_util.h"
+
+static int run_success(const char *cgroup, void *arg)
+{
+ return 0;
+}
+
+static int run_pause(const char *cgroup, void *arg)
+{
+ return pause();
+}
+
+/*
+ * This test checks that pids.max prevents forking new children above the
+ * specified limit in the cgroup.
+ */
+static int test_pids_max(const char *root)
+{
+ int ret = KSFT_FAIL;
+ char *cg_pids;
+ int pid;
+
+ cg_pids = cg_name(root, "pids_test");
+ if (!cg_pids)
+ goto cleanup;
+
+ if (cg_create(cg_pids))
+ goto cleanup;
+
+ if (cg_read_strcmp(cg_pids, "pids.max", "max\n"))
+ goto cleanup;
+
+ if (cg_write(cg_pids, "pids.max", "2"))
+ goto cleanup;
+
+ if (cg_enter_current(cg_pids))
+ goto cleanup;
+
+ pid = cg_run_nowait(cg_pids, run_pause, NULL);
+ if (pid < 0)
+ goto cleanup;
+
+ if (cg_run_nowait(cg_pids, run_success, NULL) != -1 || errno != EAGAIN)
+ goto cleanup;
+
+ if (kill(pid, SIGINT))
+ goto cleanup;
+
+ ret = KSFT_PASS;
+
+cleanup:
+ cg_enter_current(root);
+ cg_destroy(cg_pids);
+ free(cg_pids);
+
+ return ret;
+}
+
+/*
+ * This test checks that pids.events are counted in cgroup associated with pids.max
+ */
+static int test_pids_events(const char *root)
+{
+ int ret = KSFT_FAIL;
+ char *cg_parent = NULL, *cg_child = NULL;
+ int pid;
+
+ cg_parent = cg_name(root, "pids_parent");
+ cg_child = cg_name(cg_parent, "pids_child");
+ if (!cg_parent || !cg_child)
+ goto cleanup;
+
+ if (cg_create(cg_parent))
+ goto cleanup;
+ if (cg_write(cg_parent, "cgroup.subtree_control", "+pids"))
+ goto cleanup;
+ if (cg_create(cg_child))
+ goto cleanup;
+
+ if (cg_write(cg_parent, "pids.max", "2"))
+ goto cleanup;
+
+ if (cg_read_strcmp(cg_child, "pids.max", "max\n"))
+ goto cleanup;
+
+ if (cg_enter_current(cg_child))
+ goto cleanup;
+
+ pid = cg_run_nowait(cg_child, run_pause, NULL);
+ if (pid < 0)
+ goto cleanup;
+
+ if (cg_run_nowait(cg_child, run_success, NULL) != -1 || errno != EAGAIN)
+ goto cleanup;
+
+ if (kill(pid, SIGINT))
+ goto cleanup;
+
+ if (cg_read_key_long(cg_child, "pids.events", "max ") != 0)
+ goto cleanup;
+ if (cg_read_key_long(cg_parent, "pids.events", "max ") != 1)
+ goto cleanup;
+
+
+ ret = KSFT_PASS;
+
+cleanup:
+ cg_enter_current(root);
+ if (cg_child)
+ cg_destroy(cg_child);
+ if (cg_parent)
+ cg_destroy(cg_parent);
+ free(cg_child);
+ free(cg_parent);
+
+ return ret;
+}
+
+
+
+#define T(x) { x, #x }
+struct pids_test {
+ int (*fn)(const char *root);
+ const char *name;
+} tests[] = {
+ T(test_pids_max),
+ T(test_pids_events),
+};
+#undef T
+
+int main(int argc, char **argv)
+{
+ char root[PATH_MAX];
+
+ ksft_print_header();
+ ksft_set_plan(ARRAY_SIZE(tests));
+ if (cg_find_unified_root(root, sizeof(root), NULL))
+ ksft_exit_skip("cgroup v2 isn't mounted\n");
+
+ /*
+ * Check that pids controller is available:
+ * pids is listed in cgroup.controllers
+ */
+ if (cg_read_strstr(root, "cgroup.controllers", "pids"))
+ ksft_exit_skip("pids controller isn't available\n");
+
+ if (cg_read_strstr(root, "cgroup.subtree_control", "pids"))
+ if (cg_write(root, "cgroup.subtree_control", "+pids"))
+ ksft_exit_skip("Failed to set pids controller\n");
+
+ for (int i = 0; i < ARRAY_SIZE(tests); i++) {
+ switch (tests[i].fn(root)) {
+ case KSFT_PASS:
+ ksft_test_result_pass("%s\n", tests[i].name);
+ break;
+ case KSFT_SKIP:
+ ksft_test_result_skip("%s\n", tests[i].name);
+ break;
+ default:
+ ksft_test_result_fail("%s\n", tests[i].name);
+ break;
+ }
+ }
+
+ ksft_finished();
+}
diff --git a/tools/testing/selftests/dma/dma_map_benchmark.c b/tools/testing/selftests/dma/dma_map_benchmark.c
index 5c997f17fcbd..b12f1f9babf8 100644
--- a/tools/testing/selftests/dma/dma_map_benchmark.c
+++ b/tools/testing/selftests/dma/dma_map_benchmark.c
@@ -33,7 +33,6 @@ int main(int argc, char **argv)
int granule = 1;
int cmd = DMA_MAP_BENCHMARK;
- char *p;
while ((opt = getopt(argc, argv, "t:s:n:b:d:x:g:")) != -1) {
switch (opt) {
diff --git a/tools/testing/selftests/drivers/net/hw/Makefile b/tools/testing/selftests/drivers/net/hw/Makefile
index 4933d045ab66..c9f2f48fc30f 100644
--- a/tools/testing/selftests/drivers/net/hw/Makefile
+++ b/tools/testing/selftests/drivers/net/hw/Makefile
@@ -11,6 +11,7 @@ TEST_PROGS = \
hw_stats_l3_gre.sh \
loopback.sh \
pp_alloc_fail.py \
+ rss_ctx.py \
#
TEST_FILES := \
diff --git a/tools/testing/selftests/drivers/net/hw/rss_ctx.py b/tools/testing/selftests/drivers/net/hw/rss_ctx.py
new file mode 100755
index 000000000000..931dbc36ca43
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/hw/rss_ctx.py
@@ -0,0 +1,522 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0
+
+import datetime
+import random
+from lib.py import ksft_run, ksft_pr, ksft_exit, ksft_eq, ksft_ge, ksft_lt
+from lib.py import NetDrvEpEnv
+from lib.py import EthtoolFamily, NetdevFamily
+from lib.py import KsftSkipEx
+from lib.py import rand_port
+from lib.py import ethtool, ip, defer, GenerateTraffic, CmdExitFailure
+
+
+def _rss_key_str(key):
+ return ":".join(["{:02x}".format(x) for x in key])
+
+
+def _rss_key_rand(length):
+ return [random.randint(0, 255) for _ in range(length)]
+
+
+def get_rss(cfg, context=0):
+ return ethtool(f"-x {cfg.ifname} context {context}", json=True)[0]
+
+
+def get_drop_err_sum(cfg):
+ stats = ip("-s -s link show dev " + cfg.ifname, json=True)[0]
+ cnt = 0
+ for key in ['errors', 'dropped', 'over_errors', 'fifo_errors',
+ 'length_errors', 'crc_errors', 'missed_errors',
+ 'frame_errors']:
+ cnt += stats["stats64"]["rx"][key]
+ return cnt, stats["stats64"]["tx"]["carrier_changes"]
+
+
+def ethtool_create(cfg, act, opts):
+ output = ethtool(f"{act} {cfg.ifname} {opts}").stdout
+ # Output will be something like: "New RSS context is 1" or
+ # "Added rule with ID 7", we want the integer from the end
+ return int(output.split()[-1])
+
+
+def require_ntuple(cfg):
+ features = ethtool(f"-k {cfg.ifname}", json=True)[0]
+ if not features["ntuple-filters"]["active"]:
+ # ntuple is more of a capability than a config knob, don't bother
+ # trying to enable it (until some driver actually needs it).
+ raise KsftSkipEx("Ntuple filters not enabled on the device: " + str(features["ntuple-filters"]))
+
+
+# Get Rx packet counts for all queues, as a simple list of integers
+# if @prev is specified the prev counts will be subtracted
+def _get_rx_cnts(cfg, prev=None):
+ cfg.wait_hw_stats_settle()
+ data = cfg.netdevnl.qstats_get({"ifindex": cfg.ifindex, "scope": ["queue"]}, dump=True)
+ data = [x for x in data if x['queue-type'] == "rx"]
+ max_q = max([x["queue-id"] for x in data])
+ queue_stats = [0] * (max_q + 1)
+ for q in data:
+ queue_stats[q["queue-id"]] = q["rx-packets"]
+ if prev and q["queue-id"] < len(prev):
+ queue_stats[q["queue-id"]] -= prev[q["queue-id"]]
+ return queue_stats
+
+
+def _send_traffic_check(cfg, port, name, params):
+ # params is a dict with 3 possible keys:
+ # - "target": required, which queues we expect to get iperf traffic
+ # - "empty": optional, which queues should see no traffic at all
+ # - "noise": optional, which queues we expect to see low traffic;
+ # used for queues of the main context, since some background
+ # OS activity may use those queues while we're testing
+ # the value for each is a list, or some other iterable containing queue ids.
+
+ cnts = _get_rx_cnts(cfg)
+ GenerateTraffic(cfg, port=port).wait_pkts_and_stop(20000)
+ cnts = _get_rx_cnts(cfg, prev=cnts)
+
+ directed = sum(cnts[i] for i in params['target'])
+
+ ksft_ge(directed, 20000, f"traffic on {name}: " + str(cnts))
+ if params.get('noise'):
+ ksft_lt(sum(cnts[i] for i in params['noise']), directed / 2,
+ "traffic on other queues:" + str(cnts))
+ if params.get('empty'):
+ ksft_eq(sum(cnts[i] for i in params['empty']), 0,
+ "traffic on inactive queues: " + str(cnts))
+
+
+def test_rss_key_indir(cfg):
+ """Test basics like updating the main RSS key and indirection table."""
+
+ if len(_get_rx_cnts(cfg)) < 2:
+ KsftSkipEx("Device has only one queue (or doesn't support queue stats)")
+
+ data = get_rss(cfg)
+ want_keys = ['rss-hash-key', 'rss-hash-function', 'rss-indirection-table']
+ for k in want_keys:
+ if k not in data:
+ raise KsftFailEx("ethtool results missing key: " + k)
+ if not data[k]:
+ raise KsftFailEx(f"ethtool results empty for '{k}': {data[k]}")
+
+ key_len = len(data['rss-hash-key'])
+
+ # Set the key
+ key = _rss_key_rand(key_len)
+ ethtool(f"-X {cfg.ifname} hkey " + _rss_key_str(key))
+
+ data = get_rss(cfg)
+ ksft_eq(key, data['rss-hash-key'])
+
+ # Set the indirection table
+ ethtool(f"-X {cfg.ifname} equal 2")
+ reset_indir = defer(ethtool, f"-X {cfg.ifname} default")
+ data = get_rss(cfg)
+ ksft_eq(0, min(data['rss-indirection-table']))
+ ksft_eq(1, max(data['rss-indirection-table']))
+
+ # Check we only get traffic on the first 2 queues
+ cnts = _get_rx_cnts(cfg)
+ GenerateTraffic(cfg).wait_pkts_and_stop(20000)
+ cnts = _get_rx_cnts(cfg, prev=cnts)
+ # 2 queues, 20k packets, must be at least 5k per queue
+ ksft_ge(cnts[0], 5000, "traffic on main context (1/2): " + str(cnts))
+ ksft_ge(cnts[1], 5000, "traffic on main context (2/2): " + str(cnts))
+ # The other queues should be unused
+ ksft_eq(sum(cnts[2:]), 0, "traffic on unused queues: " + str(cnts))
+
+ # Restore, and check traffic gets spread again
+ reset_indir.exec()
+
+ cnts = _get_rx_cnts(cfg)
+ GenerateTraffic(cfg).wait_pkts_and_stop(20000)
+ cnts = _get_rx_cnts(cfg, prev=cnts)
+ # First two queues get less traffic than all the rest
+ ksft_lt(sum(cnts[:2]), sum(cnts[2:]), "traffic distributed: " + str(cnts))
+
+
+def test_rss_queue_reconfigure(cfg, main_ctx=True):
+ """Make sure queue changes can't override requested RSS config.
+
+ By default main RSS table should change to include all queues.
+ When user sets a specific RSS config the driver should preserve it,
+ even when queue count changes. Driver should refuse to deactivate
+ queues used in the user-set RSS config.
+ """
+
+ if not main_ctx:
+ require_ntuple(cfg)
+
+ # Start with 4 queues, an arbitrary known number.
+ try:
+ qcnt = len(_get_rx_cnts(cfg))
+ ethtool(f"-L {cfg.ifname} combined 4")
+ defer(ethtool, f"-L {cfg.ifname} combined {qcnt}")
+ except:
+ raise KsftSkipEx("Not enough queues for the test or qstat not supported")
+
+ if main_ctx:
+ ctx_id = 0
+ ctx_ref = ""
+ else:
+ ctx_id = ethtool_create(cfg, "-X", "context new")
+ ctx_ref = f"context {ctx_id}"
+ defer(ethtool, f"-X {cfg.ifname} {ctx_ref} delete")
+
+ # Indirection table should be distributing to all queues.
+ data = get_rss(cfg, context=ctx_id)
+ ksft_eq(0, min(data['rss-indirection-table']))
+ ksft_eq(3, max(data['rss-indirection-table']))
+
+ # Increase queues, indirection table should be distributing to all queues.
+ # It's unclear whether tables of additional contexts should be reset, too.
+ if main_ctx:
+ ethtool(f"-L {cfg.ifname} combined 5")
+ data = get_rss(cfg)
+ ksft_eq(0, min(data['rss-indirection-table']))
+ ksft_eq(4, max(data['rss-indirection-table']))
+ ethtool(f"-L {cfg.ifname} combined 4")
+
+ # Configure the table explicitly
+ port = rand_port()
+ ethtool(f"-X {cfg.ifname} {ctx_ref} weight 1 0 0 1")
+ if main_ctx:
+ other_key = 'empty'
+ defer(ethtool, f"-X {cfg.ifname} default")
+ else:
+ other_key = 'noise'
+ flow = f"flow-type tcp{cfg.addr_ipver} dst-port {port} context {ctx_id}"
+ ntuple = ethtool_create(cfg, "-N", flow)
+ defer(ethtool, f"-N {cfg.ifname} delete {ntuple}")
+
+ _send_traffic_check(cfg, port, ctx_ref, { 'target': (0, 3),
+ other_key: (1, 2) })
+
+ # We should be able to increase queues, but table should be left untouched
+ ethtool(f"-L {cfg.ifname} combined 5")
+ data = get_rss(cfg, context=ctx_id)
+ ksft_eq({0, 3}, set(data['rss-indirection-table']))
+
+ _send_traffic_check(cfg, port, ctx_ref, { 'target': (0, 3),
+ other_key: (1, 2, 4) })
+
+ # Setting queue count to 3 should fail, queue 3 is used
+ try:
+ ethtool(f"-L {cfg.ifname} combined 3")
+ except CmdExitFailure:
+ pass
+ else:
+ raise Exception(f"Driver didn't prevent us from deactivating a used queue (context {ctx_id})")
+
+
+def test_rss_resize(cfg):
+ """Test resizing of the RSS table.
+
+ Some devices dynamically increase and decrease the size of the RSS
+ indirection table based on the number of enabled queues.
+ When that happens driver must maintain the balance of entries
+ (preferably duplicating the smaller table).
+ """
+
+ channels = cfg.ethnl.channels_get({'header': {'dev-index': cfg.ifindex}})
+ ch_max = channels['combined-max']
+ qcnt = channels['combined-count']
+
+ if ch_max < 2:
+ raise KsftSkipEx(f"Not enough queues for the test: {ch_max}")
+
+ ethtool(f"-L {cfg.ifname} combined 2")
+ defer(ethtool, f"-L {cfg.ifname} combined {qcnt}")
+
+ ethtool(f"-X {cfg.ifname} weight 1 7")
+ defer(ethtool, f"-X {cfg.ifname} default")
+
+ ethtool(f"-L {cfg.ifname} combined {ch_max}")
+ data = get_rss(cfg)
+ ksft_eq(0, min(data['rss-indirection-table']))
+ ksft_eq(1, max(data['rss-indirection-table']))
+
+ ksft_eq(7,
+ data['rss-indirection-table'].count(1) /
+ data['rss-indirection-table'].count(0),
+ f"Table imbalance after resize: {data['rss-indirection-table']}")
+
+
+def test_hitless_key_update(cfg):
+ """Test that flows may be rehashed without impacting traffic.
+
+ Some workloads may want to rehash the flows in response to an imbalance.
+ Most effective way to do that is changing the RSS key. Check that changing
+ the key does not cause link flaps or traffic disruption.
+
+ Disrupting traffic for key update is not a bug, but makes the key
+ update unusable for rehashing under load.
+ """
+ data = get_rss(cfg)
+ key_len = len(data['rss-hash-key'])
+
+ key = _rss_key_rand(key_len)
+
+ tgen = GenerateTraffic(cfg)
+ try:
+ errors0, carrier0 = get_drop_err_sum(cfg)
+ t0 = datetime.datetime.now()
+ ethtool(f"-X {cfg.ifname} hkey " + _rss_key_str(key))
+ t1 = datetime.datetime.now()
+ errors1, carrier1 = get_drop_err_sum(cfg)
+ finally:
+ tgen.wait_pkts_and_stop(5000)
+
+ ksft_lt((t1 - t0).total_seconds(), 0.2)
+ ksft_eq(errors1 - errors1, 0)
+ ksft_eq(carrier1 - carrier0, 0)
+
+
+def test_rss_context(cfg, ctx_cnt=1, create_with_cfg=None):
+ """
+ Test separating traffic into RSS contexts.
+ The queues will be allocated 2 for each context:
+ ctx0 ctx1 ctx2 ctx3
+ [0 1] [2 3] [4 5] [6 7] ...
+ """
+
+ require_ntuple(cfg)
+
+ requested_ctx_cnt = ctx_cnt
+
+ # Try to allocate more queues when necessary
+ qcnt = len(_get_rx_cnts(cfg))
+ if qcnt < 2 + 2 * ctx_cnt:
+ try:
+ ksft_pr(f"Increasing queue count {qcnt} -> {2 + 2 * ctx_cnt}")
+ ethtool(f"-L {cfg.ifname} combined {2 + 2 * ctx_cnt}")
+ defer(ethtool, f"-L {cfg.ifname} combined {qcnt}")
+ except:
+ raise KsftSkipEx("Not enough queues for the test")
+
+ ports = []
+
+ # Use queues 0 and 1 for normal traffic
+ ethtool(f"-X {cfg.ifname} equal 2")
+ defer(ethtool, f"-X {cfg.ifname} default")
+
+ for i in range(ctx_cnt):
+ want_cfg = f"start {2 + i * 2} equal 2"
+ create_cfg = want_cfg if create_with_cfg else ""
+
+ try:
+ ctx_id = ethtool_create(cfg, "-X", f"context new {create_cfg}")
+ defer(ethtool, f"-X {cfg.ifname} context {ctx_id} delete")
+ except CmdExitFailure:
+ # try to carry on and skip at the end
+ if i == 0:
+ raise
+ ksft_pr(f"Failed to create context {i + 1}, trying to test what we got")
+ ctx_cnt = i
+ break
+
+ if not create_with_cfg:
+ ethtool(f"-X {cfg.ifname} context {ctx_id} {want_cfg}")
+
+ # Sanity check the context we just created
+ data = get_rss(cfg, ctx_id)
+ ksft_eq(min(data['rss-indirection-table']), 2 + i * 2, "Unexpected context cfg: " + str(data))
+ ksft_eq(max(data['rss-indirection-table']), 2 + i * 2 + 1, "Unexpected context cfg: " + str(data))
+
+ ports.append(rand_port())
+ flow = f"flow-type tcp{cfg.addr_ipver} dst-port {ports[i]} context {ctx_id}"
+ ntuple = ethtool_create(cfg, "-N", flow)
+ defer(ethtool, f"-N {cfg.ifname} delete {ntuple}")
+
+ for i in range(ctx_cnt):
+ _send_traffic_check(cfg, ports[i], f"context {i}",
+ { 'target': (2+i*2, 3+i*2),
+ 'noise': (0, 1),
+ 'empty': list(range(2, 2+i*2)) + list(range(4+i*2, 2+2*ctx_cnt)) })
+
+ if requested_ctx_cnt != ctx_cnt:
+ raise KsftSkipEx(f"Tested only {ctx_cnt} contexts, wanted {requested_ctx_cnt}")
+
+
+def test_rss_context4(cfg):
+ test_rss_context(cfg, 4)
+
+
+def test_rss_context32(cfg):
+ test_rss_context(cfg, 32)
+
+
+def test_rss_context4_create_with_cfg(cfg):
+ test_rss_context(cfg, 4, create_with_cfg=True)
+
+
+def test_rss_context_queue_reconfigure(cfg):
+ test_rss_queue_reconfigure(cfg, main_ctx=False)
+
+
+def test_rss_context_out_of_order(cfg, ctx_cnt=4):
+ """
+ Test separating traffic into RSS contexts.
+ Contexts are removed in semi-random order, and steering re-tested
+ to make sure removal doesn't break steering to surviving contexts.
+ Test requires 3 contexts to work.
+ """
+
+ require_ntuple(cfg)
+
+ requested_ctx_cnt = ctx_cnt
+
+ # Try to allocate more queues when necessary
+ qcnt = len(_get_rx_cnts(cfg))
+ if qcnt < 2 + 2 * ctx_cnt:
+ try:
+ ksft_pr(f"Increasing queue count {qcnt} -> {2 + 2 * ctx_cnt}")
+ ethtool(f"-L {cfg.ifname} combined {2 + 2 * ctx_cnt}")
+ defer(ethtool, f"-L {cfg.ifname} combined {qcnt}")
+ except:
+ raise KsftSkipEx("Not enough queues for the test")
+
+ ntuple = []
+ ctx = []
+ ports = []
+
+ def remove_ctx(idx):
+ ntuple[idx].exec()
+ ntuple[idx] = None
+ ctx[idx].exec()
+ ctx[idx] = None
+
+ def check_traffic():
+ for i in range(ctx_cnt):
+ if ctx[i]:
+ expected = {
+ 'target': (2+i*2, 3+i*2),
+ 'noise': (0, 1),
+ 'empty': list(range(2, 2+i*2)) + list(range(4+i*2, 2+2*ctx_cnt))
+ }
+ else:
+ expected = {
+ 'target': (0, 1),
+ 'empty': range(2, 2+2*ctx_cnt)
+ }
+
+ _send_traffic_check(cfg, ports[i], f"context {i}", expected)
+
+ # Use queues 0 and 1 for normal traffic
+ ethtool(f"-X {cfg.ifname} equal 2")
+ defer(ethtool, f"-X {cfg.ifname} default")
+
+ for i in range(ctx_cnt):
+ ctx_id = ethtool_create(cfg, "-X", f"context new start {2 + i * 2} equal 2")
+ ctx.append(defer(ethtool, f"-X {cfg.ifname} context {ctx_id} delete"))
+
+ ports.append(rand_port())
+ flow = f"flow-type tcp{cfg.addr_ipver} dst-port {ports[i]} context {ctx_id}"
+ ntuple_id = ethtool_create(cfg, "-N", flow)
+ ntuple.append(defer(ethtool, f"-N {cfg.ifname} delete {ntuple_id}"))
+
+ check_traffic()
+
+ # Remove middle context
+ remove_ctx(ctx_cnt // 2)
+ check_traffic()
+
+ # Remove first context
+ remove_ctx(0)
+ check_traffic()
+
+ # Remove last context
+ remove_ctx(-1)
+ check_traffic()
+
+ if requested_ctx_cnt != ctx_cnt:
+ raise KsftSkipEx(f"Tested only {ctx_cnt} contexts, wanted {requested_ctx_cnt}")
+
+
+def test_rss_context_overlap(cfg, other_ctx=0):
+ """
+ Test contexts overlapping with each other.
+ Use 4 queues for the main context, but only queues 2 and 3 for context 1.
+ """
+
+ require_ntuple(cfg)
+
+ queue_cnt = len(_get_rx_cnts(cfg))
+ if queue_cnt < 4:
+ try:
+ ksft_pr(f"Increasing queue count {queue_cnt} -> 4")
+ ethtool(f"-L {cfg.ifname} combined 4")
+ defer(ethtool, f"-L {cfg.ifname} combined {queue_cnt}")
+ except:
+ raise KsftSkipEx("Not enough queues for the test")
+
+ if other_ctx == 0:
+ ethtool(f"-X {cfg.ifname} equal 4")
+ defer(ethtool, f"-X {cfg.ifname} default")
+ else:
+ other_ctx = ethtool_create(cfg, "-X", "context new")
+ ethtool(f"-X {cfg.ifname} context {other_ctx} equal 4")
+ defer(ethtool, f"-X {cfg.ifname} context {other_ctx} delete")
+
+ ctx_id = ethtool_create(cfg, "-X", "context new")
+ ethtool(f"-X {cfg.ifname} context {ctx_id} start 2 equal 2")
+ defer(ethtool, f"-X {cfg.ifname} context {ctx_id} delete")
+
+ port = rand_port()
+ if other_ctx:
+ flow = f"flow-type tcp{cfg.addr_ipver} dst-port {port} context {other_ctx}"
+ ntuple_id = ethtool_create(cfg, "-N", flow)
+ ntuple = defer(ethtool, f"-N {cfg.ifname} delete {ntuple_id}")
+
+ # Test the main context
+ cnts = _get_rx_cnts(cfg)
+ GenerateTraffic(cfg, port=port).wait_pkts_and_stop(20000)
+ cnts = _get_rx_cnts(cfg, prev=cnts)
+
+ ksft_ge(sum(cnts[ :4]), 20000, "traffic on main context: " + str(cnts))
+ ksft_ge(sum(cnts[ :2]), 7000, "traffic on main context (1/2): " + str(cnts))
+ ksft_ge(sum(cnts[2:4]), 7000, "traffic on main context (2/2): " + str(cnts))
+ if other_ctx == 0:
+ ksft_eq(sum(cnts[4: ]), 0, "traffic on other queues: " + str(cnts))
+
+ # Now create a rule for context 1 and make sure traffic goes to a subset
+ if other_ctx:
+ ntuple.exec()
+ flow = f"flow-type tcp{cfg.addr_ipver} dst-port {port} context {ctx_id}"
+ ntuple_id = ethtool_create(cfg, "-N", flow)
+ defer(ethtool, f"-N {cfg.ifname} delete {ntuple_id}")
+
+ cnts = _get_rx_cnts(cfg)
+ GenerateTraffic(cfg, port=port).wait_pkts_and_stop(20000)
+ cnts = _get_rx_cnts(cfg, prev=cnts)
+
+ directed = sum(cnts[2:4])
+ ksft_lt(sum(cnts[ :2]), directed / 2, "traffic on main context: " + str(cnts))
+ ksft_ge(directed, 20000, "traffic on extra context: " + str(cnts))
+ if other_ctx == 0:
+ ksft_eq(sum(cnts[4: ]), 0, "traffic on other queues: " + str(cnts))
+
+
+def test_rss_context_overlap2(cfg):
+ test_rss_context_overlap(cfg, True)
+
+
+def main() -> None:
+ with NetDrvEpEnv(__file__, nsim_test=False) as cfg:
+ cfg.ethnl = EthtoolFamily()
+ cfg.netdevnl = NetdevFamily()
+
+ ksft_run([test_rss_key_indir, test_rss_queue_reconfigure,
+ test_rss_resize, test_hitless_key_update,
+ test_rss_context, test_rss_context4, test_rss_context32,
+ test_rss_context_queue_reconfigure,
+ test_rss_context_overlap, test_rss_context_overlap2,
+ test_rss_context_out_of_order, test_rss_context4_create_with_cfg],
+ args=(cfg, ))
+ ksft_exit()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/tools/testing/selftests/drivers/net/lib/py/env.py b/tools/testing/selftests/drivers/net/lib/py/env.py
index edcedd7bffab..a5e800b8f103 100644
--- a/tools/testing/selftests/drivers/net/lib/py/env.py
+++ b/tools/testing/selftests/drivers/net/lib/py/env.py
@@ -1,9 +1,10 @@
# SPDX-License-Identifier: GPL-2.0
import os
+import time
from pathlib import Path
from lib.py import KsftSkipEx, KsftXfailEx
-from lib.py import cmd, ip
+from lib.py import cmd, ethtool, ip
from lib.py import NetNS, NetdevSimDev
from .remote import Remote
@@ -82,6 +83,8 @@ class NetDrvEpEnv:
self.env = _load_env_file(src_path)
+ self._stats_settle_time = None
+
# Things we try to destroy
self.remote = None
# These are for local testing state
@@ -222,3 +225,17 @@ class NetDrvEpEnv:
if remote:
if not self._require_cmd(comm, "remote"):
raise KsftSkipEx("Test requires (remote) command: " + comm)
+
+ def wait_hw_stats_settle(self):
+ """
+ Wait for HW stats to become consistent, some devices DMA HW stats
+ periodically so events won't be reflected until next sync.
+ Good drivers will tell us via ethtool what their sync period is.
+ """
+ if self._stats_settle_time is None:
+ data = ethtool("-c " + self.ifname, json=True)[0]
+
+ self._stats_settle_time = 0.025 + \
+ data.get('stats-block-usecs', 0) / 1000 / 1000
+
+ time.sleep(self._stats_settle_time)
diff --git a/tools/testing/selftests/drivers/net/lib/py/load.py b/tools/testing/selftests/drivers/net/lib/py/load.py
index abdb677bdb1c..d9c10613ae67 100644
--- a/tools/testing/selftests/drivers/net/lib/py/load.py
+++ b/tools/testing/selftests/drivers/net/lib/py/load.py
@@ -5,28 +5,45 @@ import time
from lib.py import ksft_pr, cmd, ip, rand_port, wait_port_listen
class GenerateTraffic:
- def __init__(self, env):
+ def __init__(self, env, port=None):
env.require_cmd("iperf3", remote=True)
self.env = env
- port = rand_port()
- self._iperf_server = cmd(f"iperf3 -s -p {port}", background=True)
+ if port is None:
+ port = rand_port()
+ self._iperf_server = cmd(f"iperf3 -s -1 -p {port}", background=True)
wait_port_listen(port)
time.sleep(0.1)
self._iperf_client = cmd(f"iperf3 -c {env.addr} -P 16 -p {port} -t 86400",
background=True, host=env.remote)
# Wait for traffic to ramp up
- pkt = ip("-s link show dev " + env.ifname, json=True)[0]["stats64"]["rx"]["packets"]
+ if not self._wait_pkts(pps=1000):
+ self.stop(verbose=True)
+ raise Exception("iperf3 traffic did not ramp up")
+
+ def _wait_pkts(self, pkt_cnt=None, pps=None):
+ """
+ Wait until we've seen pkt_cnt or until traffic ramps up to pps.
+ Only one of pkt_cnt or pss can be specified.
+ """
+ pkt_start = ip("-s link show dev " + self.env.ifname, json=True)[0]["stats64"]["rx"]["packets"]
for _ in range(50):
time.sleep(0.1)
- now = ip("-s link show dev " + env.ifname, json=True)[0]["stats64"]["rx"]["packets"]
- if now - pkt > 1000:
- return
- pkt = now
- self.stop(verbose=True)
- raise Exception("iperf3 traffic did not ramp up")
+ pkt_now = ip("-s link show dev " + self.env.ifname, json=True)[0]["stats64"]["rx"]["packets"]
+ if pps:
+ if pkt_now - pkt_start > pps / 10:
+ return True
+ pkt_start = pkt_now
+ elif pkt_cnt:
+ if pkt_now - pkt_start > pkt_cnt:
+ return True
+ return False
+
+ def wait_pkts_and_stop(self, pkt_cnt):
+ failed = not self._wait_pkts(pkt_cnt=pkt_cnt)
+ self.stop(verbose=failed)
def stop(self, verbose=None):
self._iperf_client.process(terminate=True)
diff --git a/tools/testing/selftests/drivers/net/mlxsw/mirror_gre.sh b/tools/testing/selftests/drivers/net/mlxsw/mirror_gre.sh
index 76f1ab4898d9..e1ad623146d7 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/mirror_gre.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/mirror_gre.sh
@@ -15,6 +15,13 @@ source $lib_dir/mirror_lib.sh
source $lib_dir/mirror_gre_lib.sh
source $lib_dir/mirror_gre_topo_lib.sh
+ALL_TESTS="
+ test_keyful
+ test_soft
+ test_tos_fixed
+ test_ttl_inherit
+"
+
setup_keyful()
{
tunnel_create gt6-key ip6gretap 2001:db8:3::1 2001:db8:3::2 \
@@ -118,15 +125,15 @@ test_span_gre_ttl_inherit()
RET=0
ip link set dev $tundev type $type ttl inherit
- mirror_install $swp1 ingress $tundev "matchall $tcflags"
- fail_test_span_gre_dir $tundev ingress
+ mirror_install $swp1 ingress $tundev "matchall"
+ fail_test_span_gre_dir $tundev
ip link set dev $tundev type $type ttl 100
- quick_test_span_gre_dir $tundev ingress
+ quick_test_span_gre_dir $tundev
mirror_uninstall $swp1 ingress
- log_test "$what: no offload on TTL of inherit ($tcflags)"
+ log_test "$what: no offload on TTL of inherit"
}
test_span_gre_tos_fixed()
@@ -138,61 +145,49 @@ test_span_gre_tos_fixed()
RET=0
ip link set dev $tundev type $type tos 0x10
- mirror_install $swp1 ingress $tundev "matchall $tcflags"
- fail_test_span_gre_dir $tundev ingress
+ mirror_install $swp1 ingress $tundev "matchall"
+ fail_test_span_gre_dir $tundev
ip link set dev $tundev type $type tos inherit
- quick_test_span_gre_dir $tundev ingress
+ quick_test_span_gre_dir $tundev
mirror_uninstall $swp1 ingress
- log_test "$what: no offload on a fixed TOS ($tcflags)"
+ log_test "$what: no offload on a fixed TOS"
}
test_span_failable()
{
- local should_fail=$1; shift
local tundev=$1; shift
local what=$1; shift
RET=0
- mirror_install $swp1 ingress $tundev "matchall $tcflags"
- if ((should_fail)); then
- fail_test_span_gre_dir $tundev ingress
- else
- quick_test_span_gre_dir $tundev ingress
- fi
+ mirror_install $swp1 ingress $tundev "matchall"
+ fail_test_span_gre_dir $tundev
mirror_uninstall $swp1 ingress
- log_test "$what: should_fail=$should_fail ($tcflags)"
+ log_test "fail $what"
}
-test_failable()
+test_keyful()
{
- local should_fail=$1; shift
-
- test_span_failable $should_fail gt6-key "mirror to keyful gretap"
- test_span_failable $should_fail gt6-soft "mirror to gretap w/ soft underlay"
+ test_span_failable gt6-key "mirror to keyful gretap"
}
-test_sw()
+test_soft()
{
- slow_path_trap_install $swp1 ingress
- slow_path_trap_install $swp1 egress
-
- test_failable 0
-
- slow_path_trap_uninstall $swp1 egress
- slow_path_trap_uninstall $swp1 ingress
+ test_span_failable gt6-soft "mirror to gretap w/ soft underlay"
}
-test_hw()
+test_tos_fixed()
{
- test_failable 1
-
test_span_gre_tos_fixed gt4 gretap "mirror to gretap"
test_span_gre_tos_fixed gt6 ip6gretap "mirror to ip6gretap"
+}
+
+test_ttl_inherit()
+{
test_span_gre_ttl_inherit gt4 gretap "mirror to gretap"
test_span_gre_ttl_inherit gt6 ip6gretap "mirror to ip6gretap"
}
@@ -202,16 +197,6 @@ trap cleanup EXIT
setup_prepare
setup_wait
-if ! tc_offload_check; then
- check_err 1 "Could not test offloaded functionality"
- log_test "mlxsw-specific tests for mirror to gretap"
- exit
-fi
-
-tcflags="skip_hw"
-test_sw
-
-tcflags="skip_sw"
-test_hw
+tests_run
exit $EXIT_STATUS
diff --git a/tools/testing/selftests/drivers/net/mlxsw/mirror_gre_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/mirror_gre_scale.sh
index e5589e2fca85..d43093310e23 100644
--- a/tools/testing/selftests/drivers/net/mlxsw/mirror_gre_scale.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/mirror_gre_scale.sh
@@ -79,7 +79,7 @@ mirror_gre_tunnels_create()
cat >> $MIRROR_GRE_BATCH_FILE <<-EOF
filter add dev $swp1 ingress pref 1000 \
protocol ipv6 \
- flower $tcflags dst_ip $match_dip \
+ flower skip_sw dst_ip $match_dip \
action mirred egress mirror dev $tun
EOF
done
@@ -107,7 +107,7 @@ mirror_gre_tunnels_destroy()
done
}
-__mirror_gre_test()
+mirror_gre_test()
{
local count=$1; shift
local should_fail=$1; shift
@@ -131,20 +131,6 @@ __mirror_gre_test()
done
}
-mirror_gre_test()
-{
- local count=$1; shift
- local should_fail=$1; shift
-
- if ! tc_offload_check $TC_FLOWER_NUM_NETIFS; then
- check_err 1 "Could not test offloaded functionality"
- return
- fi
-
- tcflags="skip_sw"
- __mirror_gre_test $count $should_fail
-}
-
mirror_gre_setup_prepare()
{
h1=${NETIFS[p1]}
diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh
index 31252bc8775e..4994bea5daf8 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh
@@ -11,7 +11,7 @@ ALL_TESTS="single_mask_test identical_filters_test two_masks_test \
multiple_masks_test ctcam_edge_cases_test delta_simple_test \
delta_two_masks_one_key_test delta_simple_rehash_test \
bloom_simple_test bloom_complex_test bloom_delta_test \
- max_erp_entries_test max_group_size_test"
+ max_erp_entries_test max_group_size_test collision_test"
NUM_NETIFS=2
source $lib_dir/lib.sh
source $lib_dir/tc_common.sh
@@ -457,7 +457,7 @@ delta_two_masks_one_key_test()
{
# If 2 keys are the same and only differ in mask in a way that
# they belong under the same ERP (second is delta of the first),
- # there should be no C-TCAM spill.
+ # there should be C-TCAM spill.
RET=0
@@ -474,8 +474,8 @@ delta_two_masks_one_key_test()
tp_record "mlxsw:*" "tc filter add dev $h2 ingress protocol ip \
pref 2 handle 102 flower $tcflags dst_ip 192.0.2.2 \
action drop"
- tp_check_hits "mlxsw:mlxsw_sp_acl_atcam_entry_add_ctcam_spill" 0
- check_err $? "incorrect C-TCAM spill while inserting the second rule"
+ tp_check_hits "mlxsw:mlxsw_sp_acl_atcam_entry_add_ctcam_spill" 1
+ check_err $? "C-TCAM spill did not happen while inserting the second rule"
$MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.0.2.1 -B 192.0.2.2 \
-t ip -q
@@ -1087,6 +1087,53 @@ max_group_size_test()
log_test "max ACL group size test ($tcflags). max size $max_size"
}
+collision_test()
+{
+ # Filters cannot share an eRP if in the common unmasked part (i.e.,
+ # without the delta bits) they have the same values. If the driver does
+ # not prevent such configuration (by spilling into the C-TCAM), then
+ # multiple entries will be present in the device with the same key,
+ # leading to collisions and a reduced scale.
+ #
+ # Create such a scenario and make sure all the filters are successfully
+ # added.
+
+ RET=0
+
+ local ret
+
+ if [[ "$tcflags" != "skip_sw" ]]; then
+ return 0;
+ fi
+
+ # Add a single dst_ip/24 filter and multiple dst_ip/32 filters that all
+ # have the same values in the common unmasked part (dst_ip/24).
+
+ tc filter add dev $h2 ingress pref 1 proto ipv4 handle 101 \
+ flower $tcflags dst_ip 198.51.100.0/24 \
+ action drop
+
+ for i in {0..255}; do
+ tc filter add dev $h2 ingress pref 2 proto ipv4 \
+ handle $((102 + i)) \
+ flower $tcflags dst_ip 198.51.100.${i}/32 \
+ action drop
+ ret=$?
+ [[ $ret -ne 0 ]] && break
+ done
+
+ check_err $ret "failed to add all the filters"
+
+ for i in {255..0}; do
+ tc filter del dev $h2 ingress pref 2 proto ipv4 \
+ handle $((102 + i)) flower
+ done
+
+ tc filter del dev $h2 ingress pref 1 proto ipv4 handle 101 flower
+
+ log_test "collision test ($tcflags)"
+}
+
setup_prepare()
{
h1=${NETIFS[p1]}
diff --git a/tools/testing/selftests/drivers/net/virtio_net/config b/tools/testing/selftests/drivers/net/virtio_net/config
index f35de0542b60..bcf7555eaffe 100644
--- a/tools/testing/selftests/drivers/net/virtio_net/config
+++ b/tools/testing/selftests/drivers/net/virtio_net/config
@@ -1,2 +1,8 @@
-CONFIG_VIRTIO_NET=y
+CONFIG_BPF_SYSCALL=y
+CONFIG_CGROUP_BPF=y
+CONFIG_IPV6=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_NET_L3_MASTER_DEV=y
+CONFIG_NET_VRF=m
CONFIG_VIRTIO_DEBUG=y
+CONFIG_VIRTIO_NET=y
diff --git a/tools/testing/selftests/drivers/platform/x86/intel/ifs/Makefile b/tools/testing/selftests/drivers/platform/x86/intel/ifs/Makefile
new file mode 100644
index 000000000000..03d0449d307c
--- /dev/null
+++ b/tools/testing/selftests/drivers/platform/x86/intel/ifs/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+# Makefile for ifs(In Field Scan) selftests
+
+TEST_PROGS := test_ifs.sh
+
+include ../../../../../lib.mk
diff --git a/tools/testing/selftests/drivers/platform/x86/intel/ifs/test_ifs.sh b/tools/testing/selftests/drivers/platform/x86/intel/ifs/test_ifs.sh
new file mode 100755
index 000000000000..8b68964b29f4
--- /dev/null
+++ b/tools/testing/selftests/drivers/platform/x86/intel/ifs/test_ifs.sh
@@ -0,0 +1,494 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Test the functionality of the Intel IFS(In Field Scan) driver.
+#
+
+# Matched with kselftest framework: tools/testing/selftests/kselftest.h
+readonly KSFT_PASS=0
+readonly KSFT_FAIL=1
+readonly KSFT_XFAIL=2
+readonly KSFT_SKIP=4
+
+readonly CPU_SYSFS="/sys/devices/system/cpu"
+readonly CPU_OFFLINE_SYSFS="${CPU_SYSFS}/offline"
+readonly IMG_PATH="/lib/firmware/intel/ifs_0"
+readonly IFS_SCAN_MODE="0"
+readonly IFS_ARRAY_BIST_SCAN_MODE="1"
+readonly IFS_PATH="/sys/devices/virtual/misc/intel_ifs"
+readonly IFS_SCAN_SYSFS_PATH="${IFS_PATH}_${IFS_SCAN_MODE}"
+readonly IFS_ARRAY_BIST_SYSFS_PATH="${IFS_PATH}_${IFS_ARRAY_BIST_SCAN_MODE}"
+readonly RUN_TEST="run_test"
+readonly STATUS="status"
+readonly DETAILS="details"
+readonly STATUS_PASS="pass"
+readonly PASS="PASS"
+readonly FAIL="FAIL"
+readonly INFO="INFO"
+readonly XFAIL="XFAIL"
+readonly SKIP="SKIP"
+readonly IFS_NAME="intel_ifs"
+readonly ALL="all"
+readonly SIBLINGS="siblings"
+
+# Matches arch/x86/include/asm/intel-family.h and
+# drivers/platform/x86/intel/ifs/core.c requirement as follows
+readonly SAPPHIRERAPIDS_X="8f"
+readonly EMERALDRAPIDS_X="cf"
+
+readonly INTEL_FAM6="06"
+
+LOOP_TIMES=3
+FML=""
+MODEL=""
+STEPPING=""
+CPU_FMS=""
+TRUE="true"
+FALSE="false"
+RESULT=$KSFT_PASS
+IMAGE_NAME=""
+INTERVAL_TIME=1
+OFFLINE_CPUS=""
+# For IFS cleanup tags
+ORIGIN_IFS_LOADED=""
+IFS_IMAGE_NEED_RESTORE=$FALSE
+IFS_LOG="/tmp/ifs_logs.$$"
+RANDOM_CPU=""
+DEFAULT_IMG_ID=""
+
+append_log()
+{
+ echo -e "$1" | tee -a "$IFS_LOG"
+}
+
+online_offline_cpu_list()
+{
+ local on_off=$1
+ local target_cpus=$2
+ local cpu=""
+ local cpu_start=""
+ local cpu_end=""
+ local i=""
+
+ if [[ -n "$target_cpus" ]]; then
+ for cpu in $(echo "$target_cpus" | tr ',' ' '); do
+ if [[ "$cpu" == *"-"* ]]; then
+ cpu_start=""
+ cpu_end=""
+ i=""
+ cpu_start=$(echo "$cpu" | cut -d "-" -f 1)
+ cpu_end=$(echo "$cpu" | cut -d "-" -f 2)
+ for((i=cpu_start;i<=cpu_end;i++)); do
+ append_log "[$INFO] echo $on_off > \
+${CPU_SYSFS}/cpu${i}/online"
+ echo "$on_off" > "$CPU_SYSFS"/cpu"$i"/online
+ done
+ else
+ set_target_cpu "$on_off" "$cpu"
+ fi
+ done
+ fi
+}
+
+ifs_scan_result_summary()
+{
+ local failed_info pass_num skip_num fail_num
+
+ if [[ -e "$IFS_LOG" ]]; then
+ failed_info=$(grep ^"\[${FAIL}\]" "$IFS_LOG")
+ fail_num=$(grep -c ^"\[${FAIL}\]" "$IFS_LOG")
+ skip_num=$(grep -c ^"\[${SKIP}\]" "$IFS_LOG")
+ pass_num=$(grep -c ^"\[${PASS}\]" "$IFS_LOG")
+
+ if [[ "$fail_num" -ne 0 ]]; then
+ RESULT=$KSFT_FAIL
+ echo "[$INFO] IFS test failure summary:"
+ echo "$failed_info"
+ elif [[ "$skip_num" -ne 0 ]]; then
+ RESULT=$KSFT_SKIP
+ fi
+ echo "[$INFO] IFS test pass:$pass_num, skip:$skip_num, fail:$fail_num"
+ else
+ echo "[$INFO] No file $IFS_LOG for IFS scan summary"
+ fi
+}
+
+ifs_cleanup()
+{
+ echo "[$INFO] Restore environment after IFS test"
+
+ # Restore ifs origin image if origin image backup step is needed
+ [[ "$IFS_IMAGE_NEED_RESTORE" == "$TRUE" ]] && {
+ mv -f "$IMG_PATH"/"$IMAGE_NAME"_origin "$IMG_PATH"/"$IMAGE_NAME"
+ }
+
+ # Restore the CPUs to the state before testing
+ [[ -z "$OFFLINE_CPUS" ]] || online_offline_cpu_list "0" "$OFFLINE_CPUS"
+
+ lsmod | grep -q "$IFS_NAME" && [[ "$ORIGIN_IFS_LOADED" == "$FALSE" ]] && {
+ echo "[$INFO] modprobe -r $IFS_NAME"
+ modprobe -r "$IFS_NAME"
+ }
+
+ ifs_scan_result_summary
+ [[ -e "$IFS_LOG" ]] && rm -rf "$IFS_LOG"
+
+ echo "[RESULT] IFS test exit with $RESULT"
+ exit "$RESULT"
+}
+
+do_cmd()
+{
+ local cmd=$*
+ local ret=""
+
+ append_log "[$INFO] $cmd"
+ eval "$cmd"
+ ret=$?
+ if [[ $ret -ne 0 ]]; then
+ append_log "[$FAIL] $cmd failed. Return code is $ret"
+ RESULT=$KSFT_XFAIL
+ ifs_cleanup
+ fi
+}
+
+test_exit()
+{
+ local info=$1
+ RESULT=$2
+
+ declare -A EXIT_MAP
+ EXIT_MAP[$KSFT_PASS]=$PASS
+ EXIT_MAP[$KSFT_FAIL]=$FAIL
+ EXIT_MAP[$KSFT_XFAIL]=$XFAIL
+ EXIT_MAP[$KSFT_SKIP]=$SKIP
+
+ append_log "[${EXIT_MAP[$RESULT]}] $info"
+ ifs_cleanup
+}
+
+online_all_cpus()
+{
+ local off_cpus=""
+
+ OFFLINE_CPUS=$(cat "$CPU_OFFLINE_SYSFS")
+ online_offline_cpu_list "1" "$OFFLINE_CPUS"
+
+ off_cpus=$(cat "$CPU_OFFLINE_SYSFS")
+ if [[ -z "$off_cpus" ]]; then
+ append_log "[$INFO] All CPUs are online."
+ else
+ append_log "[$XFAIL] There is offline cpu:$off_cpus after online all cpu!"
+ RESULT=$KSFT_XFAIL
+ ifs_cleanup
+ fi
+}
+
+get_cpu_fms()
+{
+ FML=$(grep -m 1 "family" /proc/cpuinfo | awk -F ":" '{printf "%02x",$2;}')
+ MODEL=$(grep -m 1 "model" /proc/cpuinfo | awk -F ":" '{printf "%02x",$2;}')
+ STEPPING=$(grep -m 1 "stepping" /proc/cpuinfo | awk -F ":" '{printf "%02x",$2;}')
+ CPU_FMS="${FML}-${MODEL}-${STEPPING}"
+}
+
+check_cpu_ifs_support_interval_time()
+{
+ get_cpu_fms
+
+ if [[ "$FML" != "$INTEL_FAM6" ]]; then
+ test_exit "CPU family:$FML does not support IFS" "$KSFT_SKIP"
+ fi
+
+ # Ucode has time interval requirement for IFS scan on same CPU as follows:
+ case $MODEL in
+ "$SAPPHIRERAPIDS_X")
+ INTERVAL_TIME=180;
+ ;;
+ "$EMERALDRAPIDS_X")
+ INTERVAL_TIME=30;
+ ;;
+ *)
+ # Set default interval time for other platforms
+ INTERVAL_TIME=1;
+ append_log "[$INFO] CPU FML:$FML model:0x$MODEL, default: 1s interval time"
+ ;;
+ esac
+}
+
+check_ifs_loaded()
+{
+ local ifs_info=""
+
+ ifs_info=$(lsmod | grep "$IFS_NAME")
+ if [[ -z "$ifs_info" ]]; then
+ append_log "[$INFO] modprobe $IFS_NAME"
+ modprobe "$IFS_NAME" || {
+ test_exit "Check if CONFIG_INTEL_IFS is set to m or \
+platform doesn't support ifs" "$KSFT_SKIP"
+ }
+ ifs_info=$(lsmod | grep "$IFS_NAME")
+ [[ -n "$ifs_info" ]] || test_exit "No ifs module listed by lsmod" "$KSFT_FAIL"
+ fi
+}
+
+test_ifs_scan_entry()
+{
+ local ifs_info=""
+
+ ifs_info=$(lsmod | grep "$IFS_NAME")
+
+ if [[ -z "$ifs_info" ]]; then
+ ORIGIN_IFS_LOADED="$FALSE"
+ check_ifs_loaded
+ else
+ ORIGIN_IFS_LOADED="$TRUE"
+ append_log "[$INFO] Module $IFS_NAME is already loaded"
+ fi
+
+ if [[ -d "$IFS_SCAN_SYSFS_PATH" ]]; then
+ append_log "[$PASS] IFS sysfs $IFS_SCAN_SYSFS_PATH entry is created\n"
+ else
+ test_exit "No sysfs entry in $IFS_SCAN_SYSFS_PATH" "$KSFT_FAIL"
+ fi
+}
+
+load_image()
+{
+ local image_id=$1
+ local image_info=""
+ local ret=""
+
+ check_ifs_loaded
+ if [[ -e "${IMG_PATH}/${IMAGE_NAME}" ]]; then
+ append_log "[$INFO] echo 0x$image_id > ${IFS_SCAN_SYSFS_PATH}/current_batch"
+ echo "0x$image_id" > "$IFS_SCAN_SYSFS_PATH"/current_batch 2>/dev/null
+ ret=$?
+ [[ "$ret" -eq 0 ]] || {
+ append_log "[$FAIL] Load ifs image $image_id failed with ret:$ret\n"
+ return "$ret"
+ }
+ image_info=$(cat ${IFS_SCAN_SYSFS_PATH}/current_batch)
+ if [[ "$image_info" == 0x"$image_id" ]]; then
+ append_log "[$PASS] load IFS current_batch:$image_info"
+ else
+ append_log "[$FAIL] current_batch:$image_info is not expected:$image_id"
+ return "$KSFT_FAIL"
+ fi
+ else
+ append_log "[$FAIL] No IFS image file ${IMG_PATH}/${IMAGE_NAME}"\
+ return "$KSFT_FAIL"
+ fi
+ return 0
+}
+
+test_load_origin_ifs_image()
+{
+ local image_id=$1
+
+ IMAGE_NAME="${CPU_FMS}-${image_id}.scan"
+
+ load_image "$image_id" || return $?
+ return 0
+}
+
+test_load_bad_ifs_image()
+{
+ local image_id=$1
+
+ IMAGE_NAME="${CPU_FMS}-${image_id}.scan"
+
+ do_cmd "mv -f ${IMG_PATH}/${IMAGE_NAME} ${IMG_PATH}/${IMAGE_NAME}_origin"
+
+ # Set IFS_IMAGE_NEED_RESTORE to true before corrupt the origin ifs image file
+ IFS_IMAGE_NEED_RESTORE=$TRUE
+ do_cmd "dd if=/dev/urandom of=${IMG_PATH}/${IMAGE_NAME} bs=1K count=6 2>/dev/null"
+
+ # Use the specified judgment for negative testing
+ append_log "[$INFO] echo 0x$image_id > ${IFS_SCAN_SYSFS_PATH}/current_batch"
+ echo "0x$image_id" > "$IFS_SCAN_SYSFS_PATH"/current_batch 2>/dev/null
+ ret=$?
+ if [[ "$ret" -ne 0 ]]; then
+ append_log "[$PASS] Load invalid ifs image failed with ret:$ret not 0 as expected"
+ else
+ append_log "[$FAIL] Load invalid ifs image ret:$ret unexpectedly"
+ fi
+
+ do_cmd "mv -f ${IMG_PATH}/${IMAGE_NAME}_origin ${IMG_PATH}/${IMAGE_NAME}"
+ IFS_IMAGE_NEED_RESTORE=$FALSE
+}
+
+test_bad_and_origin_ifs_image()
+{
+ local image_id=$1
+
+ append_log "[$INFO] Test loading bad and then loading original IFS image:"
+ test_load_origin_ifs_image "$image_id" || return $?
+ test_load_bad_ifs_image "$image_id"
+ # Load origin image again and make sure it's worked
+ test_load_origin_ifs_image "$image_id" || return $?
+ append_log "[$INFO] Loading invalid IFS image and then loading initial image passed.\n"
+}
+
+ifs_test_cpu()
+{
+ local ifs_mode=$1
+ local cpu_num=$2
+ local image_id status details ret result result_info
+
+ echo "$cpu_num" > "$IFS_PATH"_"$ifs_mode"/"$RUN_TEST"
+ ret=$?
+
+ status=$(cat "${IFS_PATH}_${ifs_mode}/${STATUS}")
+ details=$(cat "${IFS_PATH}_${ifs_mode}/${DETAILS}")
+
+ if [[ "$ret" -eq 0 && "$status" == "$STATUS_PASS" ]]; then
+ result="$PASS"
+ else
+ result="$FAIL"
+ fi
+
+ cpu_num=$(cat "${CPU_SYSFS}/cpu${cpu_num}/topology/thread_siblings_list")
+
+ # There is no image file for IFS ARRAY BIST scan
+ if [[ -e "${IFS_PATH}_${ifs_mode}/current_batch" ]]; then
+ image_id=$(cat "${IFS_PATH}_${ifs_mode}/current_batch")
+ result_info=$(printf "[%s] ifs_%1d cpu(s):%s, current_batch:0x%02x, \
+ret:%2d, status:%s, details:0x%016x" \
+ "$result" "$ifs_mode" "$cpu_num" "$image_id" "$ret" \
+ "$status" "$details")
+ else
+ result_info=$(printf "[%s] ifs_%1d cpu(s):%s, ret:%2d, status:%s, details:0x%016x" \
+ "$result" "$ifs_mode" "$cpu_num" "$ret" "$status" "$details")
+ fi
+
+ append_log "$result_info"
+}
+
+ifs_test_cpus()
+{
+ local cpus_type=$1
+ local ifs_mode=$2
+ local image_id=$3
+ local cpu_max_num=""
+ local cpu_num=""
+
+ case "$cpus_type" in
+ "$ALL")
+ cpu_max_num=$(($(nproc) - 1))
+ cpus=$(seq 0 $cpu_max_num)
+ ;;
+ "$SIBLINGS")
+ cpus=$(cat ${CPU_SYSFS}/cpu*/topology/thread_siblings_list \
+ | sed -e 's/,.*//' \
+ | sed -e 's/-.*//' \
+ | sort -n \
+ | uniq)
+ ;;
+ *)
+ test_exit "Invalid cpus_type:$cpus_type" "$KSFT_XFAIL"
+ ;;
+ esac
+
+ for cpu_num in $cpus; do
+ ifs_test_cpu "$ifs_mode" "$cpu_num"
+ done
+
+ if [[ -z "$image_id" ]]; then
+ append_log "[$INFO] ifs_$ifs_mode test $cpus_type cpus completed\n"
+ else
+ append_log "[$INFO] ifs_$ifs_mode $cpus_type cpus with $CPU_FMS-$image_id.scan \
+completed\n"
+ fi
+}
+
+test_ifs_same_cpu_loop()
+{
+ local ifs_mode=$1
+ local cpu_num=$2
+ local loop_times=$3
+
+ append_log "[$INFO] Test ifs mode $ifs_mode on CPU:$cpu_num for $loop_times rounds:"
+ [[ "$ifs_mode" == "$IFS_SCAN_MODE" ]] && {
+ load_image "$DEFAULT_IMG_ID" || return $?
+ }
+ for (( i=1; i<=loop_times; i++ )); do
+ append_log "[$INFO] Loop iteration: $i in total of $loop_times"
+ # Only IFS scan needs the interval time
+ if [[ "$ifs_mode" == "$IFS_SCAN_MODE" ]]; then
+ do_cmd "sleep $INTERVAL_TIME"
+ elif [[ "$ifs_mode" == "$IFS_ARRAY_BIST_SCAN_MODE" ]]; then
+ true
+ else
+ test_exit "Invalid ifs_mode:$ifs_mode" "$KSFT_XFAIL"
+ fi
+
+ ifs_test_cpu "$ifs_mode" "$cpu_num"
+ done
+ append_log "[$INFO] $loop_times rounds of ifs_$ifs_mode test on CPU:$cpu_num completed.\n"
+}
+
+test_ifs_scan_available_imgs()
+{
+ local image_ids=""
+ local image_id=""
+
+ append_log "[$INFO] Test ifs scan with available images:"
+ image_ids=$(find "$IMG_PATH" -maxdepth 1 -name "${CPU_FMS}-[0-9a-fA-F][0-9a-fA-F].scan" \
+ 2>/dev/null \
+ | sort \
+ | awk -F "-" '{print $NF}' \
+ | cut -d "." -f 1)
+
+ for image_id in $image_ids; do
+ load_image "$image_id" || return $?
+
+ ifs_test_cpus "$SIBLINGS" "$IFS_SCAN_MODE" "$image_id"
+ # IFS scan requires time interval for the scan on the same CPU
+ do_cmd "sleep $INTERVAL_TIME"
+ done
+}
+
+prepare_ifs_test_env()
+{
+ local max_cpu=""
+
+ check_cpu_ifs_support_interval_time
+
+ online_all_cpus
+ max_cpu=$(($(nproc) - 1))
+ RANDOM_CPU=$(shuf -i 0-$max_cpu -n 1)
+
+ DEFAULT_IMG_ID=$(find $IMG_PATH -maxdepth 1 -name "${CPU_FMS}-[0-9a-fA-F][0-9a-fA-F].scan" \
+ 2>/dev/null \
+ | sort \
+ | head -n 1 \
+ | awk -F "-" '{print $NF}' \
+ | cut -d "." -f 1)
+}
+
+test_ifs()
+{
+ prepare_ifs_test_env
+
+ test_ifs_scan_entry
+
+ if [[ -z "$DEFAULT_IMG_ID" ]]; then
+ append_log "[$SKIP] No proper ${IMG_PATH}/${CPU_FMS}-*.scan, skip ifs_0 scan"
+ else
+ test_bad_and_origin_ifs_image "$DEFAULT_IMG_ID"
+ test_ifs_scan_available_imgs
+ test_ifs_same_cpu_loop "$IFS_SCAN_MODE" "$RANDOM_CPU" "$LOOP_TIMES"
+ fi
+
+ if [[ -d "$IFS_ARRAY_BIST_SYSFS_PATH" ]]; then
+ ifs_test_cpus "$SIBLINGS" "$IFS_ARRAY_BIST_SCAN_MODE"
+ test_ifs_same_cpu_loop "$IFS_ARRAY_BIST_SCAN_MODE" "$RANDOM_CPU" "$LOOP_TIMES"
+ else
+ append_log "[$SKIP] No $IFS_ARRAY_BIST_SYSFS_PATH, skip IFS ARRAY BIST scan"
+ fi
+}
+
+trap ifs_cleanup SIGTERM SIGINT
+test_ifs
+ifs_cleanup
diff --git a/tools/testing/selftests/exec/Makefile b/tools/testing/selftests/exec/Makefile
index fb4472ddffd8..ab67d58cfab7 100644
--- a/tools/testing/selftests/exec/Makefile
+++ b/tools/testing/selftests/exec/Makefile
@@ -3,8 +3,13 @@ CFLAGS = -Wall
CFLAGS += -Wno-nonnull
CFLAGS += -D_GNU_SOURCE
+ALIGNS := 0x1000 0x200000 0x1000000
+ALIGN_PIES := $(patsubst %,load_address.%,$(ALIGNS))
+ALIGN_STATIC_PIES := $(patsubst %,load_address.static.%,$(ALIGNS))
+ALIGNMENT_TESTS := $(ALIGN_PIES) $(ALIGN_STATIC_PIES)
+
TEST_PROGS := binfmt_script.py
-TEST_GEN_PROGS := execveat load_address_4096 load_address_2097152 load_address_16777216 non-regular
+TEST_GEN_PROGS := execveat non-regular $(ALIGNMENT_TESTS)
TEST_GEN_FILES := execveat.symlink execveat.denatured script subdir
# Makefile is a run-time dependency, since it's accessed by the execveat test
TEST_FILES := Makefile
@@ -28,9 +33,9 @@ $(OUTPUT)/execveat.symlink: $(OUTPUT)/execveat
$(OUTPUT)/execveat.denatured: $(OUTPUT)/execveat
cp $< $@
chmod -x $@
-$(OUTPUT)/load_address_4096: load_address.c
- $(CC) $(CFLAGS) $(LDFLAGS) -Wl,-z,max-page-size=0x1000 -pie -static $< -o $@
-$(OUTPUT)/load_address_2097152: load_address.c
- $(CC) $(CFLAGS) $(LDFLAGS) -Wl,-z,max-page-size=0x200000 -pie -static $< -o $@
-$(OUTPUT)/load_address_16777216: load_address.c
- $(CC) $(CFLAGS) $(LDFLAGS) -Wl,-z,max-page-size=0x1000000 -pie -static $< -o $@
+$(OUTPUT)/load_address.0x%: load_address.c
+ $(CC) $(CFLAGS) $(LDFLAGS) -Wl,-z,max-page-size=$(lastword $(subst ., ,$@)) \
+ -fPIE -pie $< -o $@
+$(OUTPUT)/load_address.static.0x%: load_address.c
+ $(CC) $(CFLAGS) $(LDFLAGS) -Wl,-z,max-page-size=$(lastword $(subst ., ,$@)) \
+ -fPIE -static-pie $< -o $@
diff --git a/tools/testing/selftests/exec/load_address.c b/tools/testing/selftests/exec/load_address.c
index 17e3207d34ae..8257fddba8c8 100644
--- a/tools/testing/selftests/exec/load_address.c
+++ b/tools/testing/selftests/exec/load_address.c
@@ -5,11 +5,13 @@
#include <link.h>
#include <stdio.h>
#include <stdlib.h>
+#include <stdbool.h>
#include "../kselftest.h"
struct Statistics {
unsigned long long load_address;
unsigned long long alignment;
+ bool interp;
};
int ExtractStatistics(struct dl_phdr_info *info, size_t size, void *data)
@@ -26,11 +28,20 @@ int ExtractStatistics(struct dl_phdr_info *info, size_t size, void *data)
stats->alignment = 0;
for (i = 0; i < info->dlpi_phnum; i++) {
+ unsigned long long align;
+
+ if (info->dlpi_phdr[i].p_type == PT_INTERP) {
+ stats->interp = true;
+ continue;
+ }
+
if (info->dlpi_phdr[i].p_type != PT_LOAD)
continue;
- if (info->dlpi_phdr[i].p_align > stats->alignment)
- stats->alignment = info->dlpi_phdr[i].p_align;
+ align = info->dlpi_phdr[i].p_align;
+
+ if (align > stats->alignment)
+ stats->alignment = align;
}
return 1; // Terminate dl_iterate_phdr.
@@ -38,27 +49,57 @@ int ExtractStatistics(struct dl_phdr_info *info, size_t size, void *data)
int main(int argc, char **argv)
{
- struct Statistics extracted;
- unsigned long long misalign;
+ struct Statistics extracted = { };
+ unsigned long long misalign, pow2;
+ bool interp_needed;
+ char buf[1024];
+ FILE *maps;
int ret;
ksft_print_header();
- ksft_set_plan(1);
+ ksft_set_plan(4);
+
+ /* Dump maps file for debugging reference. */
+ maps = fopen("/proc/self/maps", "r");
+ if (!maps)
+ ksft_exit_fail_msg("FAILED: /proc/self/maps: %s\n", strerror(errno));
+ while (fgets(buf, sizeof(buf), maps)) {
+ ksft_print_msg("%s", buf);
+ }
+ fclose(maps);
+ /* Walk the program headers. */
ret = dl_iterate_phdr(ExtractStatistics, &extracted);
if (ret != 1)
ksft_exit_fail_msg("FAILED: dl_iterate_phdr\n");
- if (extracted.alignment == 0)
- ksft_exit_fail_msg("FAILED: No alignment found\n");
- else if (extracted.alignment & (extracted.alignment - 1))
- ksft_exit_fail_msg("FAILED: Alignment is not a power of 2\n");
+ /* Report our findings. */
+ ksft_print_msg("load_address=%#llx alignment=%#llx\n",
+ extracted.load_address, extracted.alignment);
+
+ /* If we're named with ".static." we expect no INTERP. */
+ interp_needed = strstr(argv[0], ".static.") == NULL;
+
+ /* Were we built as expected? */
+ ksft_test_result(interp_needed == extracted.interp,
+ "%s INTERP program header %s\n",
+ interp_needed ? "Wanted" : "Unwanted",
+ extracted.interp ? "seen" : "missing");
+
+ /* Did we find an alignment? */
+ ksft_test_result(extracted.alignment != 0,
+ "Alignment%s found\n", extracted.alignment ? "" : " NOT");
+
+ /* Is the alignment sane? */
+ pow2 = extracted.alignment & (extracted.alignment - 1);
+ ksft_test_result(pow2 == 0,
+ "Alignment is%s a power of 2: %#llx\n",
+ pow2 == 0 ? "" : " NOT", extracted.alignment);
+ /* Is the load address aligned? */
misalign = extracted.load_address & (extracted.alignment - 1);
- if (misalign)
- ksft_exit_fail_msg("FAILED: alignment = %llu, load_address = %llu\n",
- extracted.alignment, extracted.load_address);
+ ksft_test_result(misalign == 0, "Load Address is %saligned (%#llx)\n",
+ misalign ? "MIS" : "", misalign);
- ksft_test_result_pass("Completed\n");
ksft_finished();
}
diff --git a/tools/testing/selftests/fchmodat2/Makefile b/tools/testing/selftests/fchmodat2/Makefile
index 71ec34bf1501..4373cea79b79 100644
--- a/tools/testing/selftests/fchmodat2/Makefile
+++ b/tools/testing/selftests/fchmodat2/Makefile
@@ -1,6 +1,15 @@
# SPDX-License-Identifier: GPL-2.0-or-later
-CFLAGS += -Wall -O2 -g -fsanitize=address -fsanitize=undefined -static-libasan $(KHDR_INCLUDES)
+CFLAGS += -Wall -O2 -g -fsanitize=address -fsanitize=undefined $(KHDR_INCLUDES)
+
+# gcc requires -static-libasan in order to ensure that Address Sanitizer's
+# library is the first one loaded. However, clang already statically links the
+# Address Sanitizer if -fsanitize is specified. Therefore, simply omit
+# -static-libasan for clang builds.
+ifeq ($(LLVM),)
+ CFLAGS += -static-libasan
+endif
+
TEST_GEN_PROGS := fchmodat2_test
include ../lib.mk
diff --git a/tools/testing/selftests/filesystems/overlayfs/dev_in_maps.c b/tools/testing/selftests/filesystems/overlayfs/dev_in_maps.c
index 759f86e7d263..2862aae58b79 100644
--- a/tools/testing/selftests/filesystems/overlayfs/dev_in_maps.c
+++ b/tools/testing/selftests/filesystems/overlayfs/dev_in_maps.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#define _GNU_SOURCE
+#define __SANE_USERSPACE_TYPES__ // Use ll64
#include <inttypes.h>
#include <unistd.h>
diff --git a/tools/testing/selftests/filesystems/statmount/Makefile b/tools/testing/selftests/filesystems/statmount/Makefile
index 07a0d5b545ca..3af3136e35a4 100644
--- a/tools/testing/selftests/filesystems/statmount/Makefile
+++ b/tools/testing/selftests/filesystems/statmount/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-or-later
CFLAGS += -Wall -O2 -g $(KHDR_INCLUDES)
-TEST_GEN_PROGS := statmount_test
+TEST_GEN_PROGS := statmount_test statmount_test_ns
include ../../lib.mk
diff --git a/tools/testing/selftests/filesystems/statmount/statmount.h b/tools/testing/selftests/filesystems/statmount/statmount.h
new file mode 100644
index 000000000000..f4294bab9d73
--- /dev/null
+++ b/tools/testing/selftests/filesystems/statmount/statmount.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __STATMOUNT_H
+#define __STATMOUNT_H
+
+#include <stdint.h>
+#include <linux/mount.h>
+#include <asm/unistd.h>
+
+static inline int statmount(uint64_t mnt_id, uint64_t mnt_ns_id, uint64_t mask,
+ struct statmount *buf, size_t bufsize,
+ unsigned int flags)
+{
+ struct mnt_id_req req = {
+ .size = MNT_ID_REQ_SIZE_VER0,
+ .mnt_id = mnt_id,
+ .param = mask,
+ };
+
+ if (mnt_ns_id) {
+ req.size = MNT_ID_REQ_SIZE_VER1;
+ req.mnt_ns_id = mnt_ns_id;
+ }
+
+ return syscall(__NR_statmount, &req, buf, bufsize, flags);
+}
+
+static ssize_t listmount(uint64_t mnt_id, uint64_t mnt_ns_id,
+ uint64_t last_mnt_id, uint64_t list[], size_t num,
+ unsigned int flags)
+{
+ struct mnt_id_req req = {
+ .size = MNT_ID_REQ_SIZE_VER0,
+ .mnt_id = mnt_id,
+ .param = last_mnt_id,
+ };
+
+ if (mnt_ns_id) {
+ req.size = MNT_ID_REQ_SIZE_VER1;
+ req.mnt_ns_id = mnt_ns_id;
+ }
+
+ return syscall(__NR_listmount, &req, list, num, flags);
+}
+
+#endif /* __STATMOUNT_H */
diff --git a/tools/testing/selftests/filesystems/statmount/statmount_test.c b/tools/testing/selftests/filesystems/statmount/statmount_test.c
index e6d7c4f1c85b..c773334bbcc9 100644
--- a/tools/testing/selftests/filesystems/statmount/statmount_test.c
+++ b/tools/testing/selftests/filesystems/statmount/statmount_test.c
@@ -4,17 +4,15 @@
#include <assert.h>
#include <stddef.h>
-#include <stdint.h>
#include <sched.h>
#include <fcntl.h>
#include <sys/param.h>
#include <sys/mount.h>
#include <sys/stat.h>
#include <sys/statfs.h>
-#include <linux/mount.h>
#include <linux/stat.h>
-#include <asm/unistd.h>
+#include "statmount.h"
#include "../../kselftest.h"
static const char *const known_fs[] = {
@@ -36,18 +34,6 @@ static const char *const known_fs[] = {
"ufs", "v7", "vboxsf", "vfat", "virtiofs", "vxfs", "xenfs", "xfs",
"zonefs", NULL };
-static int statmount(uint64_t mnt_id, uint64_t mask, struct statmount *buf,
- size_t bufsize, unsigned int flags)
-{
- struct mnt_id_req req = {
- .size = MNT_ID_REQ_SIZE_VER0,
- .mnt_id = mnt_id,
- .param = mask,
- };
-
- return syscall(__NR_statmount, &req, buf, bufsize, flags);
-}
-
static struct statmount *statmount_alloc(uint64_t mnt_id, uint64_t mask, unsigned int flags)
{
size_t bufsize = 1 << 15;
@@ -56,7 +42,7 @@ static struct statmount *statmount_alloc(uint64_t mnt_id, uint64_t mask, unsigne
int ret;
for (;;) {
- ret = statmount(mnt_id, mask, tmp, bufsize, flags);
+ ret = statmount(mnt_id, 0, mask, tmp, bufsize, flags);
if (ret != -1)
break;
if (tofree)
@@ -121,12 +107,20 @@ static char root_mntpoint[] = "/tmp/statmount_test_root.XXXXXX";
static int orig_root;
static uint64_t root_id, parent_id;
static uint32_t old_root_id, old_parent_id;
-
+static FILE *f_mountinfo;
static void cleanup_namespace(void)
{
- fchdir(orig_root);
- chroot(".");
+ int ret;
+
+ ret = fchdir(orig_root);
+ if (ret == -1)
+ ksft_perror("fchdir to original root");
+
+ ret = chroot(".");
+ if (ret == -1)
+ ksft_perror("chroot to original root");
+
umount2(root_mntpoint, MNT_DETACH);
rmdir(root_mntpoint);
}
@@ -138,7 +132,7 @@ static void setup_namespace(void)
uid_t uid = getuid();
gid_t gid = getgid();
- ret = unshare(CLONE_NEWNS|CLONE_NEWUSER);
+ ret = unshare(CLONE_NEWNS|CLONE_NEWUSER|CLONE_NEWPID);
if (ret == -1)
ksft_exit_fail_msg("unsharing mountns and userns: %s\n",
strerror(errno));
@@ -149,6 +143,11 @@ static void setup_namespace(void)
sprintf(buf, "0 %d 1", gid);
write_file("/proc/self/gid_map", buf);
+ f_mountinfo = fopen("/proc/self/mountinfo", "re");
+ if (!f_mountinfo)
+ ksft_exit_fail_msg("failed to open mountinfo: %s\n",
+ strerror(errno));
+
ret = mount("", "/", NULL, MS_REC|MS_PRIVATE, NULL);
if (ret == -1)
ksft_exit_fail_msg("making mount tree private: %s\n",
@@ -208,25 +207,13 @@ static int setup_mount_tree(int log2_num)
return 0;
}
-static ssize_t listmount(uint64_t mnt_id, uint64_t last_mnt_id,
- uint64_t list[], size_t num, unsigned int flags)
-{
- struct mnt_id_req req = {
- .size = MNT_ID_REQ_SIZE_VER0,
- .mnt_id = mnt_id,
- .param = last_mnt_id,
- };
-
- return syscall(__NR_listmount, &req, list, num, flags);
-}
-
static void test_listmount_empty_root(void)
{
ssize_t res;
const unsigned int size = 32;
uint64_t list[size];
- res = listmount(LSMT_ROOT, 0, list, size, 0);
+ res = listmount(LSMT_ROOT, 0, 0, list, size, 0);
if (res == -1) {
ksft_test_result_fail("listmount: %s\n", strerror(errno));
return;
@@ -251,7 +238,7 @@ static void test_statmount_zero_mask(void)
struct statmount sm;
int ret;
- ret = statmount(root_id, 0, &sm, sizeof(sm), 0);
+ ret = statmount(root_id, 0, 0, &sm, sizeof(sm), 0);
if (ret == -1) {
ksft_test_result_fail("statmount zero mask: %s\n",
strerror(errno));
@@ -277,7 +264,7 @@ static void test_statmount_mnt_basic(void)
int ret;
uint64_t mask = STATMOUNT_MNT_BASIC;
- ret = statmount(root_id, mask, &sm, sizeof(sm), 0);
+ ret = statmount(root_id, 0, mask, &sm, sizeof(sm), 0);
if (ret == -1) {
ksft_test_result_fail("statmount mnt basic: %s\n",
strerror(errno));
@@ -337,7 +324,7 @@ static void test_statmount_sb_basic(void)
struct statx sx;
struct statfs sf;
- ret = statmount(root_id, mask, &sm, sizeof(sm), 0);
+ ret = statmount(root_id, 0, mask, &sm, sizeof(sm), 0);
if (ret == -1) {
ksft_test_result_fail("statmount sb basic: %s\n",
strerror(errno));
@@ -462,6 +449,88 @@ static void test_statmount_fs_type(void)
free(sm);
}
+static void test_statmount_mnt_opts(void)
+{
+ struct statmount *sm;
+ const char *statmount_opts;
+ char *line = NULL;
+ size_t len = 0;
+
+ sm = statmount_alloc(root_id, STATMOUNT_MNT_BASIC | STATMOUNT_MNT_OPTS,
+ 0);
+ if (!sm) {
+ ksft_test_result_fail("statmount mnt opts: %s\n",
+ strerror(errno));
+ return;
+ }
+
+ while (getline(&line, &len, f_mountinfo) != -1) {
+ int i;
+ char *p, *p2;
+ unsigned int old_mnt_id;
+
+ old_mnt_id = atoi(line);
+ if (old_mnt_id != sm->mnt_id_old)
+ continue;
+
+ for (p = line, i = 0; p && i < 5; i++)
+ p = strchr(p + 1, ' ');
+ if (!p)
+ continue;
+
+ p2 = strchr(p + 1, ' ');
+ if (!p2)
+ continue;
+ *p2 = '\0';
+ p = strchr(p2 + 1, '-');
+ if (!p)
+ continue;
+ for (p++, i = 0; p && i < 2; i++)
+ p = strchr(p + 1, ' ');
+ if (!p)
+ continue;
+ p++;
+
+ /* skip generic superblock options */
+ if (strncmp(p, "ro", 2) == 0)
+ p += 2;
+ else if (strncmp(p, "rw", 2) == 0)
+ p += 2;
+ if (*p == ',')
+ p++;
+ if (strncmp(p, "sync", 4) == 0)
+ p += 4;
+ if (*p == ',')
+ p++;
+ if (strncmp(p, "dirsync", 7) == 0)
+ p += 7;
+ if (*p == ',')
+ p++;
+ if (strncmp(p, "lazytime", 8) == 0)
+ p += 8;
+ if (*p == ',')
+ p++;
+ p2 = strrchr(p, '\n');
+ if (p2)
+ *p2 = '\0';
+
+ statmount_opts = sm->str + sm->mnt_opts;
+ if (strcmp(statmount_opts, p) != 0)
+ ksft_test_result_fail(
+ "unexpected mount options: '%s' != '%s'\n",
+ statmount_opts, p);
+ else
+ ksft_test_result_pass("statmount mount options\n");
+ free(sm);
+ free(line);
+ return;
+ }
+
+ ksft_test_result_fail("didnt't find mount entry\n");
+ free(sm);
+ free(line);
+}
+
static void test_statmount_string(uint64_t mask, size_t off, const char *name)
{
struct statmount *sm;
@@ -498,14 +567,14 @@ static void test_statmount_string(uint64_t mask, size_t off, const char *name)
exactsize = sm->size;
shortsize = sizeof(*sm) + i;
- ret = statmount(root_id, mask, sm, exactsize, 0);
+ ret = statmount(root_id, 0, mask, sm, exactsize, 0);
if (ret == -1) {
ksft_test_result_fail("statmount exact size: %s\n",
strerror(errno));
goto out;
}
errno = 0;
- ret = statmount(root_id, mask, sm, shortsize, 0);
+ ret = statmount(root_id, 0, mask, sm, shortsize, 0);
if (ret != -1 || errno != EOVERFLOW) {
ksft_test_result_fail("should have failed with EOVERFLOW: %s\n",
strerror(errno));
@@ -533,7 +602,7 @@ static void test_listmount_tree(void)
if (res == -1)
return;
- num = res = listmount(LSMT_ROOT, 0, list, size, 0);
+ num = res = listmount(LSMT_ROOT, 0, 0, list, size, 0);
if (res == -1) {
ksft_test_result_fail("listmount: %s\n", strerror(errno));
return;
@@ -545,7 +614,7 @@ static void test_listmount_tree(void)
}
for (i = 0; i < size - step;) {
- res = listmount(LSMT_ROOT, i ? list2[i - 1] : 0, list2 + i, step, 0);
+ res = listmount(LSMT_ROOT, 0, i ? list2[i - 1] : 0, list2 + i, step, 0);
if (res == -1)
ksft_test_result_fail("short listmount: %s\n",
strerror(errno));
@@ -577,18 +646,18 @@ int main(void)
int ret;
uint64_t all_mask = STATMOUNT_SB_BASIC | STATMOUNT_MNT_BASIC |
STATMOUNT_PROPAGATE_FROM | STATMOUNT_MNT_ROOT |
- STATMOUNT_MNT_POINT | STATMOUNT_FS_TYPE;
+ STATMOUNT_MNT_POINT | STATMOUNT_FS_TYPE | STATMOUNT_MNT_NS_ID;
ksft_print_header();
- ret = statmount(0, 0, NULL, 0, 0);
+ ret = statmount(0, 0, 0, NULL, 0, 0);
assert(ret == -1);
if (errno == ENOSYS)
ksft_exit_skip("statmount() syscall not supported\n");
setup_namespace();
- ksft_set_plan(14);
+ ksft_set_plan(15);
test_listmount_empty_root();
test_statmount_zero_mask();
test_statmount_mnt_basic();
@@ -596,6 +665,7 @@ int main(void)
test_statmount_mnt_root();
test_statmount_mnt_point();
test_statmount_fs_type();
+ test_statmount_mnt_opts();
test_statmount_string(STATMOUNT_MNT_ROOT, str_off(mnt_root), "mount root");
test_statmount_string(STATMOUNT_MNT_POINT, str_off(mnt_point), "mount point");
test_statmount_string(STATMOUNT_FS_TYPE, str_off(fs_type), "fs type");
diff --git a/tools/testing/selftests/filesystems/statmount/statmount_test_ns.c b/tools/testing/selftests/filesystems/statmount/statmount_test_ns.c
new file mode 100644
index 000000000000..e044f5fc57fd
--- /dev/null
+++ b/tools/testing/selftests/filesystems/statmount/statmount_test_ns.c
@@ -0,0 +1,364 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#define _GNU_SOURCE
+
+#include <assert.h>
+#include <fcntl.h>
+#include <limits.h>
+#include <sched.h>
+#include <stdlib.h>
+#include <sys/mount.h>
+#include <sys/stat.h>
+#include <sys/wait.h>
+#include <linux/nsfs.h>
+#include <linux/stat.h>
+
+#include "statmount.h"
+#include "../../kselftest.h"
+
+#define NSID_PASS 0
+#define NSID_FAIL 1
+#define NSID_SKIP 2
+#define NSID_ERROR 3
+
+static void handle_result(int ret, const char *testname)
+{
+ if (ret == NSID_PASS)
+ ksft_test_result_pass("%s\n", testname);
+ else if (ret == NSID_FAIL)
+ ksft_test_result_fail("%s\n", testname);
+ else if (ret == NSID_ERROR)
+ ksft_exit_fail_msg("%s\n", testname);
+ else
+ ksft_test_result_skip("%s\n", testname);
+}
+
+static inline int wait_for_pid(pid_t pid)
+{
+ int status, ret;
+
+again:
+ ret = waitpid(pid, &status, 0);
+ if (ret == -1) {
+ if (errno == EINTR)
+ goto again;
+
+ ksft_print_msg("waitpid returned -1, errno=%d\n", errno);
+ return -1;
+ }
+
+ if (!WIFEXITED(status)) {
+ ksft_print_msg(
+ "waitpid !WIFEXITED, WIFSIGNALED=%d, WTERMSIG=%d\n",
+ WIFSIGNALED(status), WTERMSIG(status));
+ return -1;
+ }
+
+ ret = WEXITSTATUS(status);
+ return ret;
+}
+
+static int get_mnt_ns_id(const char *mnt_ns, uint64_t *mnt_ns_id)
+{
+ int fd = open(mnt_ns, O_RDONLY);
+
+ if (fd < 0) {
+ ksft_print_msg("failed to open for ns %s: %s\n",
+ mnt_ns, strerror(errno));
+ sleep(60);
+ return NSID_ERROR;
+ }
+
+ if (ioctl(fd, NS_GET_MNTNS_ID, mnt_ns_id) < 0) {
+ ksft_print_msg("failed to get the nsid for ns %s: %s\n",
+ mnt_ns, strerror(errno));
+ return NSID_ERROR;
+ }
+ close(fd);
+ return NSID_PASS;
+}
+
+static int get_mnt_id(const char *path, uint64_t *mnt_id)
+{
+ struct statx sx;
+ int ret;
+
+ ret = statx(AT_FDCWD, path, 0, STATX_MNT_ID_UNIQUE, &sx);
+ if (ret == -1) {
+ ksft_print_msg("retrieving unique mount ID for %s: %s\n", path,
+ strerror(errno));
+ return NSID_ERROR;
+ }
+
+ if (!(sx.stx_mask & STATX_MNT_ID_UNIQUE)) {
+ ksft_print_msg("no unique mount ID available for %s\n", path);
+ return NSID_ERROR;
+ }
+
+ *mnt_id = sx.stx_mnt_id;
+ return NSID_PASS;
+}
+
+static int write_file(const char *path, const char *val)
+{
+ int fd = open(path, O_WRONLY);
+ size_t len = strlen(val);
+ int ret;
+
+ if (fd == -1) {
+ ksft_print_msg("opening %s for write: %s\n", path, strerror(errno));
+ return NSID_ERROR;
+ }
+
+ ret = write(fd, val, len);
+ if (ret == -1) {
+ ksft_print_msg("writing to %s: %s\n", path, strerror(errno));
+ return NSID_ERROR;
+ }
+ if (ret != len) {
+ ksft_print_msg("short write to %s\n", path);
+ return NSID_ERROR;
+ }
+
+ ret = close(fd);
+ if (ret == -1) {
+ ksft_print_msg("closing %s\n", path);
+ return NSID_ERROR;
+ }
+
+ return NSID_PASS;
+}
+
+static int setup_namespace(void)
+{
+ int ret;
+ char buf[32];
+ uid_t uid = getuid();
+ gid_t gid = getgid();
+
+ ret = unshare(CLONE_NEWNS|CLONE_NEWUSER|CLONE_NEWPID);
+ if (ret == -1)
+ ksft_exit_fail_msg("unsharing mountns and userns: %s\n",
+ strerror(errno));
+
+ sprintf(buf, "0 %d 1", uid);
+ ret = write_file("/proc/self/uid_map", buf);
+ if (ret != NSID_PASS)
+ return ret;
+ ret = write_file("/proc/self/setgroups", "deny");
+ if (ret != NSID_PASS)
+ return ret;
+ sprintf(buf, "0 %d 1", gid);
+ ret = write_file("/proc/self/gid_map", buf);
+ if (ret != NSID_PASS)
+ return ret;
+
+ ret = mount("", "/", NULL, MS_REC|MS_PRIVATE, NULL);
+ if (ret == -1) {
+ ksft_print_msg("making mount tree private: %s\n",
+ strerror(errno));
+ return NSID_ERROR;
+ }
+
+ return NSID_PASS;
+}
+
+static int _test_statmount_mnt_ns_id(void)
+{
+ struct statmount sm;
+ uint64_t mnt_ns_id;
+ uint64_t root_id;
+ int ret;
+
+ ret = get_mnt_ns_id("/proc/self/ns/mnt", &mnt_ns_id);
+ if (ret != NSID_PASS)
+ return ret;
+
+ ret = get_mnt_id("/", &root_id);
+ if (ret != NSID_PASS)
+ return ret;
+
+ ret = statmount(root_id, 0, STATMOUNT_MNT_NS_ID, &sm, sizeof(sm), 0);
+ if (ret == -1) {
+ ksft_print_msg("statmount mnt ns id: %s\n", strerror(errno));
+ return NSID_ERROR;
+ }
+
+ if (sm.size != sizeof(sm)) {
+ ksft_print_msg("unexpected size: %u != %u\n", sm.size,
+ (uint32_t)sizeof(sm));
+ return NSID_FAIL;
+ }
+ if (sm.mask != STATMOUNT_MNT_NS_ID) {
+ ksft_print_msg("statmount mnt ns id unavailable\n");
+ return NSID_SKIP;
+ }
+
+ if (sm.mnt_ns_id != mnt_ns_id) {
+ ksft_print_msg("unexpected mnt ns ID: 0x%llx != 0x%llx\n",
+ (unsigned long long)sm.mnt_ns_id,
+ (unsigned long long)mnt_ns_id);
+ return NSID_FAIL;
+ }
+
+ return NSID_PASS;
+}
+
+static void test_statmount_mnt_ns_id(void)
+{
+ pid_t pid;
+ int ret;
+
+ pid = fork();
+ if (pid < 0)
+ ksft_exit_fail_msg("failed to fork: %s\n", strerror(errno));
+
+ /* We're the original pid, wait for the result. */
+ if (pid != 0) {
+ ret = wait_for_pid(pid);
+ handle_result(ret, "test statmount ns id");
+ return;
+ }
+
+ ret = setup_namespace();
+ if (ret != NSID_PASS)
+ exit(ret);
+ ret = _test_statmount_mnt_ns_id();
+ exit(ret);
+}
+
+static int validate_external_listmount(pid_t pid, uint64_t child_nr_mounts)
+{
+ uint64_t list[256];
+ uint64_t mnt_ns_id;
+ uint64_t nr_mounts;
+ char buf[256];
+ int ret;
+
+ /* Get the mount ns id for our child. */
+ snprintf(buf, sizeof(buf), "/proc/%lu/ns/mnt", (unsigned long)pid);
+ ret = get_mnt_ns_id(buf, &mnt_ns_id);
+
+ nr_mounts = listmount(LSMT_ROOT, mnt_ns_id, 0, list, 256, 0);
+ if (nr_mounts == (uint64_t)-1) {
+ ksft_print_msg("listmount: %s\n", strerror(errno));
+ return NSID_ERROR;
+ }
+
+ if (nr_mounts != child_nr_mounts) {
+ ksft_print_msg("listmount results is %zi != %zi\n", nr_mounts,
+ child_nr_mounts);
+ return NSID_FAIL;
+ }
+
+ /* Validate that all of our entries match our mnt_ns_id. */
+ for (int i = 0; i < nr_mounts; i++) {
+ struct statmount sm;
+
+ ret = statmount(list[i], mnt_ns_id, STATMOUNT_MNT_NS_ID, &sm,
+ sizeof(sm), 0);
+ if (ret < 0) {
+ ksft_print_msg("statmount mnt ns id: %s\n", strerror(errno));
+ return NSID_ERROR;
+ }
+
+ if (sm.mask != STATMOUNT_MNT_NS_ID) {
+ ksft_print_msg("statmount mnt ns id unavailable\n");
+ return NSID_SKIP;
+ }
+
+ if (sm.mnt_ns_id != mnt_ns_id) {
+ ksft_print_msg("listmount gave us the wrong ns id: 0x%llx != 0x%llx\n",
+ (unsigned long long)sm.mnt_ns_id,
+ (unsigned long long)mnt_ns_id);
+ return NSID_FAIL;
+ }
+ }
+
+ return NSID_PASS;
+}
+
+static void test_listmount_ns(void)
+{
+ uint64_t nr_mounts;
+ char pval;
+ int child_ready_pipe[2];
+ int parent_ready_pipe[2];
+ pid_t pid;
+ int ret, child_ret;
+
+ if (pipe(child_ready_pipe) < 0)
+ ksft_exit_fail_msg("failed to create the child pipe: %s\n",
+ strerror(errno));
+ if (pipe(parent_ready_pipe) < 0)
+ ksft_exit_fail_msg("failed to create the parent pipe: %s\n",
+ strerror(errno));
+
+ pid = fork();
+ if (pid < 0)
+ ksft_exit_fail_msg("failed to fork: %s\n", strerror(errno));
+
+ if (pid == 0) {
+ char cval;
+ uint64_t list[256];
+
+ close(child_ready_pipe[0]);
+ close(parent_ready_pipe[1]);
+
+ ret = setup_namespace();
+ if (ret != NSID_PASS)
+ exit(ret);
+
+ nr_mounts = listmount(LSMT_ROOT, 0, 0, list, 256, 0);
+ if (nr_mounts == (uint64_t)-1) {
+ ksft_print_msg("listmount: %s\n", strerror(errno));
+ exit(NSID_FAIL);
+ }
+
+ /*
+ * Tell our parent how many mounts we have, and then wait for it
+ * to tell us we're done.
+ */
+ write(child_ready_pipe[1], &nr_mounts, sizeof(nr_mounts));
+ read(parent_ready_pipe[0], &cval, sizeof(cval));
+ exit(NSID_PASS);
+ }
+
+ close(child_ready_pipe[1]);
+ close(parent_ready_pipe[0]);
+
+ /* Wait until the child has created everything. */
+ if (read(child_ready_pipe[0], &nr_mounts, sizeof(nr_mounts)) !=
+ sizeof(nr_mounts))
+ ret = NSID_ERROR;
+
+ ret = validate_external_listmount(pid, nr_mounts);
+
+ if (write(parent_ready_pipe[1], &pval, sizeof(pval)) != sizeof(pval))
+ ret = NSID_ERROR;
+
+ child_ret = wait_for_pid(pid);
+ if (child_ret != NSID_PASS)
+ ret = child_ret;
+ handle_result(ret, "test listmount ns id");
+}
+
+int main(void)
+{
+ int ret;
+
+ ksft_print_header();
+ ret = statmount(0, 0, 0, NULL, 0, 0);
+ assert(ret == -1);
+ if (errno == ENOSYS)
+ ksft_exit_skip("statmount() syscall not supported\n");
+
+ ksft_set_plan(2);
+ test_statmount_mnt_ns_id();
+ test_listmount_ns();
+
+ if (ksft_get_fail_cnt() + ksft_get_error_cnt() > 0)
+ ksft_exit_fail();
+ else
+ ksft_exit_pass();
+}
diff --git a/tools/testing/selftests/ftrace/config b/tools/testing/selftests/ftrace/config
index e59d985eeff0..048a312abf40 100644
--- a/tools/testing/selftests/ftrace/config
+++ b/tools/testing/selftests/ftrace/config
@@ -1,16 +1,28 @@
-CONFIG_KPROBES=y
+CONFIG_BPF_SYSCALL=y
+CONFIG_DEBUG_INFO_BTF=y
+CONFIG_DEBUG_INFO_DWARF4=y
+CONFIG_EPROBE_EVENTS=y
+CONFIG_FPROBE=y
+CONFIG_FPROBE_EVENTS=y
CONFIG_FTRACE=y
+CONFIG_FTRACE_SYSCALLS=y
+CONFIG_FUNCTION_GRAPH_RETVAL=y
CONFIG_FUNCTION_PROFILER=y
-CONFIG_TRACER_SNAPSHOT=y
-CONFIG_STACK_TRACER=y
CONFIG_HIST_TRIGGERS=y
-CONFIG_SCHED_TRACER=y
-CONFIG_PREEMPT_TRACER=y
CONFIG_IRQSOFF_TRACER=y
-CONFIG_PREEMPTIRQ_DELAY_TEST=m
+CONFIG_KALLSYMS_ALL=y
+CONFIG_KPROBES=y
+CONFIG_KPROBE_EVENTS=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
+CONFIG_PREEMPTIRQ_DELAY_TEST=m
+CONFIG_PREEMPT_TRACER=y
+CONFIG_PROBE_EVENTS_BTF_ARGS=y
CONFIG_SAMPLES=y
CONFIG_SAMPLE_FTRACE_DIRECT=m
CONFIG_SAMPLE_TRACE_PRINTK=m
-CONFIG_KALLSYMS_ALL=y
+CONFIG_SCHED_TRACER=y
+CONFIG_STACK_TRACER=y
+CONFIG_TRACER_SNAPSHOT=y
+CONFIG_UPROBES=y
+CONFIG_UPROBE_EVENTS=y
diff --git a/tools/testing/selftests/ftrace/test.d/dynevent/test_duplicates.tc b/tools/testing/selftests/ftrace/test.d/dynevent/test_duplicates.tc
index d3a79da215c8..5f72abe6fa79 100644
--- a/tools/testing/selftests/ftrace/test.d/dynevent/test_duplicates.tc
+++ b/tools/testing/selftests/ftrace/test.d/dynevent/test_duplicates.tc
@@ -1,7 +1,7 @@
#!/bin/sh
# SPDX-License-Identifier: GPL-2.0
# description: Generic dynamic event - check if duplicate events are caught
-# requires: dynamic_events "e[:[<group>/][<event>]] <attached-group>.<attached-event> [<args>]":README
+# requires: dynamic_events "e[:[<group>/][<event>]] <attached-group>.<attached-event> [<args>]":README events/syscalls/sys_enter_openat
echo 0 > events/enable
diff --git a/tools/testing/selftests/ftrace/test.d/filter/event-filter-function.tc b/tools/testing/selftests/ftrace/test.d/filter/event-filter-function.tc
index 3f74c09c56b6..118247b8dd84 100644
--- a/tools/testing/selftests/ftrace/test.d/filter/event-filter-function.tc
+++ b/tools/testing/selftests/ftrace/test.d/filter/event-filter-function.tc
@@ -10,7 +10,6 @@ fail() { #msg
}
sample_events() {
- echo > trace
echo 1 > events/kmem/kmem_cache_free/enable
echo 1 > tracing_on
ls > /dev/null
@@ -22,6 +21,7 @@ echo 0 > tracing_on
echo 0 > events/enable
echo "Get the most frequently calling function"
+echo > trace
sample_events
target_func=`cat trace | grep -o 'call_site=\([^+]*\)' | sed 's/call_site=//' | sort | uniq -c | sort | tail -n 1 | sed 's/^[ 0-9]*//'`
@@ -32,7 +32,16 @@ echo > trace
echo "Test event filter function name"
echo "call_site.function == $target_func" > events/kmem/kmem_cache_free/filter
+
+sample_events
+max_retry=10
+while [ `grep kmem_cache_free trace| wc -l` -eq 0 ]; do
sample_events
+max_retry=$((max_retry - 1))
+if [ $max_retry -eq 0 ]; then
+ exit_fail
+fi
+done
hitcnt=`grep kmem_cache_free trace| grep $target_func | wc -l`
misscnt=`grep kmem_cache_free trace| grep -v $target_func | wc -l`
@@ -49,7 +58,16 @@ address=`grep " ${target_func}\$" /proc/kallsyms | cut -d' ' -f1`
echo "Test event filter function address"
echo "call_site.function == 0x$address" > events/kmem/kmem_cache_free/filter
+echo > trace
+sample_events
+max_retry=10
+while [ `grep kmem_cache_free trace| wc -l` -eq 0 ]; do
sample_events
+max_retry=$((max_retry - 1))
+if [ $max_retry -eq 0 ]; then
+ exit_fail
+fi
+done
hitcnt=`grep kmem_cache_free trace| grep $target_func | wc -l`
misscnt=`grep kmem_cache_free trace| grep -v $target_func | wc -l`
diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_eventname.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_eventname.tc
index 1f6981ef7afa..ba19b81cef39 100644
--- a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_eventname.tc
+++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_eventname.tc
@@ -30,7 +30,8 @@ find_dot_func() {
fi
grep " [tT] .*\.isra\..*" /proc/kallsyms | cut -f 3 -d " " | while read f; do
- if grep -s $f available_filter_functions; then
+ cnt=`grep -s $f available_filter_functions | wc -l`;
+ if [ $cnt -eq 1 ]; then
echo $f
break
fi
diff --git a/tools/testing/selftests/futex/Makefile b/tools/testing/selftests/futex/Makefile
index 11e157d7533b..78ab2cd111f6 100644
--- a/tools/testing/selftests/futex/Makefile
+++ b/tools/testing/selftests/futex/Makefile
@@ -3,8 +3,6 @@ SUBDIRS := functional
TEST_PROGS := run.sh
-.PHONY: all clean
-
include ../lib.mk
all:
diff --git a/tools/testing/selftests/futex/functional/Makefile b/tools/testing/selftests/futex/functional/Makefile
index a392d0917b4e..994fa3468f17 100644
--- a/tools/testing/selftests/futex/functional/Makefile
+++ b/tools/testing/selftests/futex/functional/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
INCLUDES := -I../include -I../../ $(KHDR_INCLUDES)
-CFLAGS := $(CFLAGS) -g -O2 -Wall -D_GNU_SOURCE -pthread $(INCLUDES) $(KHDR_INCLUDES)
+CFLAGS := $(CFLAGS) -g -O2 -Wall -D_GNU_SOURCE= -pthread $(INCLUDES) $(KHDR_INCLUDES)
LDLIBS := -lpthread -lrt
LOCAL_HDRS := \
diff --git a/tools/testing/selftests/futex/functional/futex_requeue_pi.c b/tools/testing/selftests/futex/functional/futex_requeue_pi.c
index 7f3ca5c78df1..215c6cb539b4 100644
--- a/tools/testing/selftests/futex/functional/futex_requeue_pi.c
+++ b/tools/testing/selftests/futex/functional/futex_requeue_pi.c
@@ -360,7 +360,7 @@ out:
int main(int argc, char *argv[])
{
- const char *test_name;
+ char *test_name;
int c, ret;
while ((c = getopt(argc, argv, "bchlot:v:")) != -1) {
diff --git a/tools/testing/selftests/hid/hid_bpf.c b/tools/testing/selftests/hid/hid_bpf.c
index f825623e3edc..dc0408a831d0 100644
--- a/tools/testing/selftests/hid/hid_bpf.c
+++ b/tools/testing/selftests/hid/hid_bpf.c
@@ -460,7 +460,7 @@ FIXTURE(hid_bpf) {
int hid_id;
pthread_t tid;
struct hid *skel;
- int hid_links[3]; /* max number of programs loaded in a single test */
+ struct bpf_link *hid_links[3]; /* max number of programs loaded in a single test */
};
static void detach_bpf(FIXTURE_DATA(hid_bpf) * self)
{
@@ -470,9 +470,14 @@ static void detach_bpf(FIXTURE_DATA(hid_bpf) * self)
close(self->hidraw_fd);
self->hidraw_fd = 0;
+ if (!self->skel)
+ return;
+
+ hid__detach(self->skel);
+
for (i = 0; i < ARRAY_SIZE(self->hid_links); i++) {
if (self->hid_links[i])
- close(self->hid_links[i]);
+ bpf_link__destroy(self->hid_links[i]);
}
hid__destroy(self->skel);
@@ -527,14 +532,7 @@ static void load_programs(const struct test_program programs[],
FIXTURE_DATA(hid_bpf) * self,
const FIXTURE_VARIANT(hid_bpf) * variant)
{
- int attach_fd, err = -EINVAL;
- struct attach_prog_args args = {
- .retval = -1,
- };
- DECLARE_LIBBPF_OPTS(bpf_test_run_opts, tattr,
- .ctx_in = &args,
- .ctx_size_in = sizeof(args),
- );
+ int err = -EINVAL;
ASSERT_LE(progs_count, ARRAY_SIZE(self->hid_links))
TH_LOG("too many programs are to be loaded");
@@ -545,37 +543,45 @@ static void load_programs(const struct test_program programs[],
for (int i = 0; i < progs_count; i++) {
struct bpf_program *prog;
+ struct bpf_map *map;
+ int *ops_hid_id;
prog = bpf_object__find_program_by_name(*self->skel->skeleton->obj,
programs[i].name);
ASSERT_OK_PTR(prog) TH_LOG("can not find program by name '%s'", programs[i].name);
bpf_program__set_autoload(prog, true);
+
+ map = bpf_object__find_map_by_name(*self->skel->skeleton->obj,
+ programs[i].name + 4);
+ ASSERT_OK_PTR(map) TH_LOG("can not find struct_ops by name '%s'",
+ programs[i].name + 4);
+
+ /* hid_id is the first field of struct hid_bpf_ops */
+ ops_hid_id = bpf_map__initial_value(map, NULL);
+ ASSERT_OK_PTR(ops_hid_id) TH_LOG("unable to retrieve struct_ops data");
+
+ *ops_hid_id = self->hid_id;
}
err = hid__load(self->skel);
ASSERT_OK(err) TH_LOG("hid_skel_load failed: %d", err);
- attach_fd = bpf_program__fd(self->skel->progs.attach_prog);
- ASSERT_GE(attach_fd, 0) TH_LOG("locate attach_prog: %d", attach_fd);
-
for (int i = 0; i < progs_count; i++) {
- struct bpf_program *prog;
+ struct bpf_map *map;
- prog = bpf_object__find_program_by_name(*self->skel->skeleton->obj,
- programs[i].name);
- ASSERT_OK_PTR(prog) TH_LOG("can not find program by name '%s'", programs[i].name);
-
- args.prog_fd = bpf_program__fd(prog);
- args.hid = self->hid_id;
- args.insert_head = programs[i].insert_head;
- err = bpf_prog_test_run_opts(attach_fd, &tattr);
- ASSERT_GE(args.retval, 0)
- TH_LOG("attach_hid(%s): %d", programs[i].name, args.retval);
+ map = bpf_object__find_map_by_name(*self->skel->skeleton->obj,
+ programs[i].name + 4);
+ ASSERT_OK_PTR(map) TH_LOG("can not find struct_ops by name '%s'",
+ programs[i].name + 4);
- self->hid_links[i] = args.retval;
+ self->hid_links[i] = bpf_map__attach_struct_ops(map);
+ ASSERT_OK_PTR(self->hid_links[i]) TH_LOG("failed to attach struct ops '%s'",
+ programs[i].name + 4);
}
+ hid__attach(self->skel);
+
self->hidraw_fd = open_hidraw(self->dev_id);
ASSERT_GE(self->hidraw_fd, 0) TH_LOG("open_hidraw");
}
@@ -640,6 +646,47 @@ TEST_F(hid_bpf, raw_event)
}
/*
+ * Attach hid_first_event to the given uhid device,
+ * retrieve and open the matching hidraw node,
+ * inject one event in the uhid device,
+ * check that the program sees it and can change the data
+ */
+TEST_F(hid_bpf, subprog_raw_event)
+{
+ const struct test_program progs[] = {
+ { .name = "hid_subprog_first_event" },
+ };
+ __u8 buf[10] = {0};
+ int err;
+
+ LOAD_PROGRAMS(progs);
+
+ /* inject one event */
+ buf[0] = 1;
+ buf[1] = 42;
+ uhid_send_event(_metadata, self->uhid_fd, buf, 6);
+
+ /* read the data from hidraw */
+ memset(buf, 0, sizeof(buf));
+ err = read(self->hidraw_fd, buf, sizeof(buf));
+ ASSERT_EQ(err, 6) TH_LOG("read_hidraw");
+ ASSERT_EQ(buf[0], 1);
+ ASSERT_EQ(buf[2], 47);
+
+ /* inject another event */
+ memset(buf, 0, sizeof(buf));
+ buf[0] = 1;
+ buf[1] = 47;
+ uhid_send_event(_metadata, self->uhid_fd, buf, 6);
+
+ /* read the data from hidraw */
+ memset(buf, 0, sizeof(buf));
+ err = read(self->hidraw_fd, buf, sizeof(buf));
+ ASSERT_EQ(err, 6) TH_LOG("read_hidraw");
+ ASSERT_EQ(buf[2], 52);
+}
+
+/*
* Ensures that we can attach/detach programs
*/
TEST_F(hid_bpf, test_attach_detach)
@@ -648,13 +695,17 @@ TEST_F(hid_bpf, test_attach_detach)
{ .name = "hid_first_event" },
{ .name = "hid_second_event" },
};
+ struct bpf_link *link;
__u8 buf[10] = {0};
- int err, link;
+ int err, link_fd;
LOAD_PROGRAMS(progs);
link = self->hid_links[0];
- ASSERT_GT(link, 0) TH_LOG("HID-BPF link not created");
+ ASSERT_OK_PTR(link) TH_LOG("HID-BPF link not created");
+
+ link_fd = bpf_link__fd(link);
+ ASSERT_GE(link_fd, 0) TH_LOG("HID-BPF link FD not valid");
/* inject one event */
buf[0] = 1;
@@ -673,7 +724,7 @@ TEST_F(hid_bpf, test_attach_detach)
/* pin the first program and immediately unpin it */
#define PIN_PATH "/sys/fs/bpf/hid_first_event"
- err = bpf_obj_pin(link, PIN_PATH);
+ err = bpf_obj_pin(link_fd, PIN_PATH);
ASSERT_OK(err) TH_LOG("error while calling bpf_obj_pin");
remove(PIN_PATH);
#undef PIN_PATH
@@ -876,6 +927,325 @@ TEST_F(hid_bpf, test_hid_user_raw_request_call)
}
/*
+ * Call hid_hw_raw_request against the given uhid device,
+ * check that the program is called and prevents the
+ * call to uhid.
+ */
+TEST_F(hid_bpf, test_hid_filter_raw_request_call)
+{
+ const struct test_program progs[] = {
+ { .name = "hid_test_filter_raw_request" },
+ };
+ __u8 buf[10] = {0};
+ int err;
+
+ LOAD_PROGRAMS(progs);
+
+ /* first check that we did not attach to device_event */
+
+ /* inject one event */
+ buf[0] = 1;
+ buf[1] = 42;
+ uhid_send_event(_metadata, self->uhid_fd, buf, 6);
+
+ /* read the data from hidraw */
+ memset(buf, 0, sizeof(buf));
+ err = read(self->hidraw_fd, buf, sizeof(buf));
+ ASSERT_EQ(err, 6) TH_LOG("read_hidraw");
+ ASSERT_EQ(buf[0], 1);
+ ASSERT_EQ(buf[1], 42);
+ ASSERT_EQ(buf[2], 0) TH_LOG("leftovers_from_previous_test");
+
+ /* now check that our program is preventing hid_hw_raw_request() */
+
+ /* emit hid_hw_raw_request from hidraw */
+ /* Get Feature */
+ memset(buf, 0, sizeof(buf));
+ buf[0] = 0x1; /* Report Number */
+ err = ioctl(self->hidraw_fd, HIDIOCGFEATURE(sizeof(buf)), buf);
+ ASSERT_LT(err, 0) TH_LOG("unexpected success while reading HIDIOCGFEATURE: %d", err);
+ ASSERT_EQ(errno, 20) TH_LOG("unexpected error code while reading HIDIOCGFEATURE: %d",
+ errno);
+
+ /* remove our bpf program and check that we can now emit commands */
+
+ /* detach the program */
+ detach_bpf(self);
+
+ self->hidraw_fd = open_hidraw(self->dev_id);
+ ASSERT_GE(self->hidraw_fd, 0) TH_LOG("open_hidraw");
+
+ err = ioctl(self->hidraw_fd, HIDIOCGFEATURE(sizeof(buf)), buf);
+ ASSERT_GE(err, 0) TH_LOG("error while reading HIDIOCGFEATURE: %d", err);
+}
+
+/*
+ * Call hid_hw_raw_request against the given uhid device,
+ * check that the program is called and can issue the call
+ * to uhid and transform the answer.
+ */
+TEST_F(hid_bpf, test_hid_change_raw_request_call)
+{
+ const struct test_program progs[] = {
+ { .name = "hid_test_hidraw_raw_request" },
+ };
+ __u8 buf[10] = {0};
+ int err;
+
+ LOAD_PROGRAMS(progs);
+
+ /* emit hid_hw_raw_request from hidraw */
+ /* Get Feature */
+ memset(buf, 0, sizeof(buf));
+ buf[0] = 0x1; /* Report Number */
+ err = ioctl(self->hidraw_fd, HIDIOCGFEATURE(sizeof(buf)), buf);
+ ASSERT_EQ(err, 3) TH_LOG("unexpected returned size while reading HIDIOCGFEATURE: %d", err);
+
+ ASSERT_EQ(buf[0], 2);
+ ASSERT_EQ(buf[1], 3);
+ ASSERT_EQ(buf[2], 4);
+}
+
+/*
+ * Call hid_hw_raw_request against the given uhid device,
+ * check that the program is not making infinite loops.
+ */
+TEST_F(hid_bpf, test_hid_infinite_loop_raw_request_call)
+{
+ const struct test_program progs[] = {
+ { .name = "hid_test_infinite_loop_raw_request" },
+ };
+ __u8 buf[10] = {0};
+ int err;
+
+ LOAD_PROGRAMS(progs);
+
+ /* emit hid_hw_raw_request from hidraw */
+ /* Get Feature */
+ memset(buf, 0, sizeof(buf));
+ buf[0] = 0x1; /* Report Number */
+ err = ioctl(self->hidraw_fd, HIDIOCGFEATURE(sizeof(buf)), buf);
+ ASSERT_EQ(err, 3) TH_LOG("unexpected returned size while reading HIDIOCGFEATURE: %d", err);
+}
+
+/*
+ * Call hid_hw_output_report against the given uhid device,
+ * check that the program is called and prevents the
+ * call to uhid.
+ */
+TEST_F(hid_bpf, test_hid_filter_output_report_call)
+{
+ const struct test_program progs[] = {
+ { .name = "hid_test_filter_output_report" },
+ };
+ __u8 buf[10] = {0};
+ int err;
+
+ LOAD_PROGRAMS(progs);
+
+ /* first check that we did not attach to device_event */
+
+ /* inject one event */
+ buf[0] = 1;
+ buf[1] = 42;
+ uhid_send_event(_metadata, self->uhid_fd, buf, 6);
+
+ /* read the data from hidraw */
+ memset(buf, 0, sizeof(buf));
+ err = read(self->hidraw_fd, buf, sizeof(buf));
+ ASSERT_EQ(err, 6) TH_LOG("read_hidraw");
+ ASSERT_EQ(buf[0], 1);
+ ASSERT_EQ(buf[1], 42);
+ ASSERT_EQ(buf[2], 0) TH_LOG("leftovers_from_previous_test");
+
+ /* now check that our program is preventing hid_hw_output_report() */
+
+ buf[0] = 1; /* report ID */
+ buf[1] = 2;
+ buf[2] = 42;
+
+ err = write(self->hidraw_fd, buf, 3);
+ ASSERT_LT(err, 0) TH_LOG("unexpected success while sending hid_hw_output_report: %d", err);
+ ASSERT_EQ(errno, 25) TH_LOG("unexpected error code while sending hid_hw_output_report: %d",
+ errno);
+
+ /* remove our bpf program and check that we can now emit commands */
+
+ /* detach the program */
+ detach_bpf(self);
+
+ self->hidraw_fd = open_hidraw(self->dev_id);
+ ASSERT_GE(self->hidraw_fd, 0) TH_LOG("open_hidraw");
+
+ err = write(self->hidraw_fd, buf, 3);
+ ASSERT_GE(err, 0) TH_LOG("error while sending hid_hw_output_report: %d", err);
+}
+
+/*
+ * Call hid_hw_output_report against the given uhid device,
+ * check that the program is called and can issue the call
+ * to uhid and transform the answer.
+ */
+TEST_F(hid_bpf, test_hid_change_output_report_call)
+{
+ const struct test_program progs[] = {
+ { .name = "hid_test_hidraw_output_report" },
+ };
+ __u8 buf[10] = {0};
+ int err;
+
+ LOAD_PROGRAMS(progs);
+
+ /* emit hid_hw_output_report from hidraw */
+ buf[0] = 1; /* report ID */
+ buf[1] = 2;
+ buf[2] = 42;
+
+ err = write(self->hidraw_fd, buf, 10);
+ ASSERT_EQ(err, 2) TH_LOG("unexpected returned size while sending hid_hw_output_report: %d",
+ err);
+}
+
+/*
+ * Call hid_hw_output_report against the given uhid device,
+ * check that the program is not making infinite loops.
+ */
+TEST_F(hid_bpf, test_hid_infinite_loop_output_report_call)
+{
+ const struct test_program progs[] = {
+ { .name = "hid_test_infinite_loop_output_report" },
+ };
+ __u8 buf[10] = {0};
+ int err;
+
+ LOAD_PROGRAMS(progs);
+
+ /* emit hid_hw_output_report from hidraw */
+ buf[0] = 1; /* report ID */
+ buf[1] = 2;
+ buf[2] = 42;
+
+ err = write(self->hidraw_fd, buf, 8);
+ ASSERT_EQ(err, 2) TH_LOG("unexpected returned size while sending hid_hw_output_report: %d",
+ err);
+}
+
+/*
+ * Attach hid_multiply_event_wq to the given uhid device,
+ * retrieve and open the matching hidraw node,
+ * inject one event in the uhid device,
+ * check that the program sees it and can add extra data
+ */
+TEST_F(hid_bpf, test_multiply_events_wq)
+{
+ const struct test_program progs[] = {
+ { .name = "hid_test_multiply_events_wq" },
+ };
+ __u8 buf[10] = {0};
+ int err;
+
+ LOAD_PROGRAMS(progs);
+
+ /* inject one event */
+ buf[0] = 1;
+ buf[1] = 42;
+ uhid_send_event(_metadata, self->uhid_fd, buf, 6);
+
+ /* read the data from hidraw */
+ memset(buf, 0, sizeof(buf));
+ err = read(self->hidraw_fd, buf, sizeof(buf));
+ ASSERT_EQ(err, 6) TH_LOG("read_hidraw");
+ ASSERT_EQ(buf[0], 1);
+ ASSERT_EQ(buf[1], 47);
+
+ usleep(100000);
+
+ /* read the data from hidraw */
+ memset(buf, 0, sizeof(buf));
+ err = read(self->hidraw_fd, buf, sizeof(buf));
+ ASSERT_EQ(err, 9) TH_LOG("read_hidraw");
+ ASSERT_EQ(buf[0], 2);
+ ASSERT_EQ(buf[1], 3);
+}
+
+/*
+ * Attach hid_multiply_event to the given uhid device,
+ * retrieve and open the matching hidraw node,
+ * inject one event in the uhid device,
+ * check that the program sees it and can add extra data
+ */
+TEST_F(hid_bpf, test_multiply_events)
+{
+ const struct test_program progs[] = {
+ { .name = "hid_test_multiply_events" },
+ };
+ __u8 buf[10] = {0};
+ int err;
+
+ LOAD_PROGRAMS(progs);
+
+ /* inject one event */
+ buf[0] = 1;
+ buf[1] = 42;
+ uhid_send_event(_metadata, self->uhid_fd, buf, 6);
+
+ /* read the data from hidraw */
+ memset(buf, 0, sizeof(buf));
+ err = read(self->hidraw_fd, buf, sizeof(buf));
+ ASSERT_EQ(err, 9) TH_LOG("read_hidraw");
+ ASSERT_EQ(buf[0], 2);
+ ASSERT_EQ(buf[1], 47);
+
+ /* read the data from hidraw */
+ memset(buf, 0, sizeof(buf));
+ err = read(self->hidraw_fd, buf, sizeof(buf));
+ ASSERT_EQ(err, 9) TH_LOG("read_hidraw");
+ ASSERT_EQ(buf[0], 2);
+ ASSERT_EQ(buf[1], 52);
+}
+
+/*
+ * Call hid_bpf_input_report against the given uhid device,
+ * check that the program is not making infinite loops.
+ */
+TEST_F(hid_bpf, test_hid_infinite_loop_input_report_call)
+{
+ const struct test_program progs[] = {
+ { .name = "hid_test_infinite_loop_input_report" },
+ };
+ __u8 buf[10] = {0};
+ int err;
+
+ LOAD_PROGRAMS(progs);
+
+ /* emit hid_hw_output_report from hidraw */
+ buf[0] = 1; /* report ID */
+ buf[1] = 2;
+ buf[2] = 42;
+
+ uhid_send_event(_metadata, self->uhid_fd, buf, 6);
+
+ /* read the data from hidraw */
+ memset(buf, 0, sizeof(buf));
+ err = read(self->hidraw_fd, buf, sizeof(buf));
+ ASSERT_EQ(err, 6) TH_LOG("read_hidraw");
+ ASSERT_EQ(buf[0], 1);
+ ASSERT_EQ(buf[1], 3);
+
+ /* read the data from hidraw: hid_bpf_try_input_report should work exactly one time */
+ memset(buf, 0, sizeof(buf));
+ err = read(self->hidraw_fd, buf, sizeof(buf));
+ ASSERT_EQ(err, 6) TH_LOG("read_hidraw");
+ ASSERT_EQ(buf[0], 1);
+ ASSERT_EQ(buf[1], 4);
+
+ /* read the data from hidraw: there should be none */
+ memset(buf, 0, sizeof(buf));
+ err = read(self->hidraw_fd, buf, sizeof(buf));
+ ASSERT_EQ(err, -1) TH_LOG("read_hidraw");
+}
+
+/*
* Attach hid_insert{0,1,2} to the given uhid device,
* retrieve and open the matching hidraw node,
* inject one event in the uhid device,
diff --git a/tools/testing/selftests/hid/progs/hid.c b/tools/testing/selftests/hid/progs/hid.c
index f67d35def142..ee9bbbcf751b 100644
--- a/tools/testing/selftests/hid/progs/hid.c
+++ b/tools/testing/selftests/hid/progs/hid.c
@@ -14,8 +14,8 @@ struct attach_prog_args {
__u64 callback_check = 52;
__u64 callback2_check = 52;
-SEC("?fmod_ret/hid_bpf_device_event")
-int BPF_PROG(hid_first_event, struct hid_bpf_ctx *hid_ctx)
+SEC("?struct_ops/hid_device_event")
+int BPF_PROG(hid_first_event, struct hid_bpf_ctx *hid_ctx, enum hid_report_type type)
{
__u8 *rw_data = hid_bpf_get_data(hid_ctx, 0 /* offset */, 3 /* size */);
@@ -29,8 +29,38 @@ int BPF_PROG(hid_first_event, struct hid_bpf_ctx *hid_ctx)
return hid_ctx->size;
}
-SEC("?fmod_ret/hid_bpf_device_event")
-int BPF_PROG(hid_second_event, struct hid_bpf_ctx *hid_ctx)
+SEC(".struct_ops.link")
+struct hid_bpf_ops first_event = {
+ .hid_device_event = (void *)hid_first_event,
+ .hid_id = 2,
+};
+
+int __hid_subprog_first_event(struct hid_bpf_ctx *hid_ctx, enum hid_report_type type)
+{
+ __u8 *rw_data = hid_bpf_get_data(hid_ctx, 0 /* offset */, 3 /* size */);
+
+ if (!rw_data)
+ return 0; /* EPERM check */
+
+ rw_data[2] = rw_data[1] + 5;
+
+ return hid_ctx->size;
+}
+
+SEC("?struct_ops/hid_device_event")
+int BPF_PROG(hid_subprog_first_event, struct hid_bpf_ctx *hid_ctx, enum hid_report_type type)
+{
+ return __hid_subprog_first_event(hid_ctx, type);
+}
+
+SEC(".struct_ops.link")
+struct hid_bpf_ops subprog_first_event = {
+ .hid_device_event = (void *)hid_subprog_first_event,
+ .hid_id = 2,
+};
+
+SEC("?struct_ops/hid_device_event")
+int BPF_PROG(hid_second_event, struct hid_bpf_ctx *hid_ctx, enum hid_report_type type)
{
__u8 *rw_data = hid_bpf_get_data(hid_ctx, 0 /* offset */, 4 /* size */);
@@ -42,8 +72,13 @@ int BPF_PROG(hid_second_event, struct hid_bpf_ctx *hid_ctx)
return hid_ctx->size;
}
-SEC("?fmod_ret/hid_bpf_device_event")
-int BPF_PROG(hid_change_report_id, struct hid_bpf_ctx *hid_ctx)
+SEC(".struct_ops.link")
+struct hid_bpf_ops second_event = {
+ .hid_device_event = (void *)hid_second_event,
+};
+
+SEC("?struct_ops/hid_device_event")
+int BPF_PROG(hid_change_report_id, struct hid_bpf_ctx *hid_ctx, enum hid_report_type type)
{
__u8 *rw_data = hid_bpf_get_data(hid_ctx, 0 /* offset */, 3 /* size */);
@@ -55,15 +90,10 @@ int BPF_PROG(hid_change_report_id, struct hid_bpf_ctx *hid_ctx)
return 9;
}
-SEC("syscall")
-int attach_prog(struct attach_prog_args *ctx)
-{
- ctx->retval = hid_bpf_attach_prog(ctx->hid,
- ctx->prog_fd,
- ctx->insert_head ? HID_BPF_FLAG_INSERT_HEAD :
- HID_BPF_FLAG_NONE);
- return 0;
-}
+SEC(".struct_ops.link")
+struct hid_bpf_ops change_report_id = {
+ .hid_device_event = (void *)hid_change_report_id,
+};
struct hid_hw_request_syscall_args {
/* data needs to come at offset 0 so we can use it in calls */
@@ -181,7 +211,12 @@ static const __u8 rdesc[] = {
0xc0, /* END_COLLECTION */
};
-SEC("?fmod_ret/hid_bpf_rdesc_fixup")
+/*
+ * the following program is marked as sleepable (struct_ops.s).
+ * This is not strictly mandatory but is a nice test for
+ * sleepable struct_ops
+ */
+SEC("?struct_ops.s/hid_rdesc_fixup")
int BPF_PROG(hid_rdesc_fixup, struct hid_bpf_ctx *hid_ctx)
{
__u8 *data = hid_bpf_get_data(hid_ctx, 0 /* offset */, 4096 /* size */);
@@ -200,8 +235,13 @@ int BPF_PROG(hid_rdesc_fixup, struct hid_bpf_ctx *hid_ctx)
return sizeof(rdesc) + 73;
}
-SEC("?fmod_ret/hid_bpf_device_event")
-int BPF_PROG(hid_test_insert1, struct hid_bpf_ctx *hid_ctx)
+SEC(".struct_ops.link")
+struct hid_bpf_ops rdesc_fixup = {
+ .hid_rdesc_fixup = (void *)hid_rdesc_fixup,
+};
+
+SEC("?struct_ops/hid_device_event")
+int BPF_PROG(hid_test_insert1, struct hid_bpf_ctx *hid_ctx, enum hid_report_type type)
{
__u8 *data = hid_bpf_get_data(hid_ctx, 0 /* offset */, 4 /* size */);
@@ -217,8 +257,14 @@ int BPF_PROG(hid_test_insert1, struct hid_bpf_ctx *hid_ctx)
return 0;
}
-SEC("?fmod_ret/hid_bpf_device_event")
-int BPF_PROG(hid_test_insert2, struct hid_bpf_ctx *hid_ctx)
+SEC(".struct_ops.link")
+struct hid_bpf_ops test_insert1 = {
+ .hid_device_event = (void *)hid_test_insert1,
+ .flags = BPF_F_BEFORE,
+};
+
+SEC("?struct_ops/hid_device_event")
+int BPF_PROG(hid_test_insert2, struct hid_bpf_ctx *hid_ctx, enum hid_report_type type)
{
__u8 *data = hid_bpf_get_data(hid_ctx, 0 /* offset */, 4 /* size */);
@@ -234,8 +280,13 @@ int BPF_PROG(hid_test_insert2, struct hid_bpf_ctx *hid_ctx)
return 0;
}
-SEC("?fmod_ret/hid_bpf_device_event")
-int BPF_PROG(hid_test_insert3, struct hid_bpf_ctx *hid_ctx)
+SEC(".struct_ops.link")
+struct hid_bpf_ops test_insert2 = {
+ .hid_device_event = (void *)hid_test_insert2,
+};
+
+SEC("?struct_ops/hid_device_event")
+int BPF_PROG(hid_test_insert3, struct hid_bpf_ctx *hid_ctx, enum hid_report_type type)
{
__u8 *data = hid_bpf_get_data(hid_ctx, 0 /* offset */, 4 /* size */);
@@ -250,3 +301,300 @@ int BPF_PROG(hid_test_insert3, struct hid_bpf_ctx *hid_ctx)
return 0;
}
+
+SEC(".struct_ops.link")
+struct hid_bpf_ops test_insert3 = {
+ .hid_device_event = (void *)hid_test_insert3,
+};
+
+SEC("?struct_ops/hid_hw_request")
+int BPF_PROG(hid_test_filter_raw_request, struct hid_bpf_ctx *hctx, unsigned char reportnum,
+ enum hid_report_type rtype, enum hid_class_request reqtype, __u64 source)
+{
+ return -20;
+}
+
+SEC(".struct_ops.link")
+struct hid_bpf_ops test_filter_raw_request = {
+ .hid_hw_request = (void *)hid_test_filter_raw_request,
+};
+
+static struct file *current_file;
+
+SEC("fentry/hidraw_open")
+int BPF_PROG(hidraw_open, struct inode *inode, struct file *file)
+{
+ current_file = file;
+ return 0;
+}
+
+SEC("?struct_ops.s/hid_hw_request")
+int BPF_PROG(hid_test_hidraw_raw_request, struct hid_bpf_ctx *hctx, unsigned char reportnum,
+ enum hid_report_type rtype, enum hid_class_request reqtype, __u64 source)
+{
+ __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 3 /* size */);
+ int ret;
+
+ if (!data)
+ return 0; /* EPERM check */
+
+ /* check if the incoming request comes from our hidraw operation */
+ if (source == (__u64)current_file) {
+ data[0] = reportnum;
+
+ ret = hid_bpf_hw_request(hctx, data, 2, rtype, reqtype);
+ if (ret != 2)
+ return -1;
+ data[0] = reportnum + 1;
+ data[1] = reportnum + 2;
+ data[2] = reportnum + 3;
+ return 3;
+ }
+
+ return 0;
+}
+
+SEC(".struct_ops.link")
+struct hid_bpf_ops test_hidraw_raw_request = {
+ .hid_hw_request = (void *)hid_test_hidraw_raw_request,
+};
+
+SEC("?struct_ops.s/hid_hw_request")
+int BPF_PROG(hid_test_infinite_loop_raw_request, struct hid_bpf_ctx *hctx, unsigned char reportnum,
+ enum hid_report_type rtype, enum hid_class_request reqtype, __u64 source)
+{
+ __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 3 /* size */);
+ int ret;
+
+ if (!data)
+ return 0; /* EPERM check */
+
+ /* always forward the request as-is to the device, hid-bpf should prevent
+ * infinite loops.
+ */
+ data[0] = reportnum;
+
+ ret = hid_bpf_hw_request(hctx, data, 2, rtype, reqtype);
+ if (ret == 2)
+ return 3;
+
+ return 0;
+}
+
+SEC(".struct_ops.link")
+struct hid_bpf_ops test_infinite_loop_raw_request = {
+ .hid_hw_request = (void *)hid_test_infinite_loop_raw_request,
+};
+
+SEC("?struct_ops/hid_hw_output_report")
+int BPF_PROG(hid_test_filter_output_report, struct hid_bpf_ctx *hctx, unsigned char reportnum,
+ enum hid_report_type rtype, enum hid_class_request reqtype, __u64 source)
+{
+ return -25;
+}
+
+SEC(".struct_ops.link")
+struct hid_bpf_ops test_filter_output_report = {
+ .hid_hw_output_report = (void *)hid_test_filter_output_report,
+};
+
+SEC("?struct_ops.s/hid_hw_output_report")
+int BPF_PROG(hid_test_hidraw_output_report, struct hid_bpf_ctx *hctx, __u64 source)
+{
+ __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 3 /* size */);
+ int ret;
+
+ if (!data)
+ return 0; /* EPERM check */
+
+ /* check if the incoming request comes from our hidraw operation */
+ if (source == (__u64)current_file)
+ return hid_bpf_hw_output_report(hctx, data, 2);
+
+ return 0;
+}
+
+SEC(".struct_ops.link")
+struct hid_bpf_ops test_hidraw_output_report = {
+ .hid_hw_output_report = (void *)hid_test_hidraw_output_report,
+};
+
+SEC("?struct_ops.s/hid_hw_output_report")
+int BPF_PROG(hid_test_infinite_loop_output_report, struct hid_bpf_ctx *hctx, __u64 source)
+{
+ __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 3 /* size */);
+ int ret;
+
+ if (!data)
+ return 0; /* EPERM check */
+
+ /* always forward the request as-is to the device, hid-bpf should prevent
+ * infinite loops.
+ */
+
+ ret = hid_bpf_hw_output_report(hctx, data, 2);
+ if (ret == 2)
+ return 2;
+
+ return 0;
+}
+
+SEC(".struct_ops.link")
+struct hid_bpf_ops test_infinite_loop_output_report = {
+ .hid_hw_output_report = (void *)hid_test_infinite_loop_output_report,
+};
+
+struct elem {
+ struct bpf_wq work;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, struct elem);
+} hmap SEC(".maps");
+
+static int wq_cb_sleepable(void *map, int *key, struct bpf_wq *work)
+{
+ __u8 buf[9] = {2, 3, 4, 5, 6, 7, 8, 9, 10};
+ struct hid_bpf_ctx *hid_ctx;
+
+ hid_ctx = hid_bpf_allocate_context(*key);
+ if (!hid_ctx)
+ return 0; /* EPERM check */
+
+ hid_bpf_input_report(hid_ctx, HID_INPUT_REPORT, buf, sizeof(buf));
+
+ hid_bpf_release_context(hid_ctx);
+
+ return 0;
+}
+
+static int test_inject_input_report_callback(int *key)
+{
+ struct elem init = {}, *val;
+ struct bpf_wq *wq;
+
+ if (bpf_map_update_elem(&hmap, key, &init, 0))
+ return -1;
+
+ val = bpf_map_lookup_elem(&hmap, key);
+ if (!val)
+ return -2;
+
+ wq = &val->work;
+ if (bpf_wq_init(wq, &hmap, 0) != 0)
+ return -3;
+
+ if (bpf_wq_set_callback(wq, wq_cb_sleepable, 0))
+ return -4;
+
+ if (bpf_wq_start(wq, 0))
+ return -5;
+
+ return 0;
+}
+
+SEC("?struct_ops/hid_device_event")
+int BPF_PROG(hid_test_multiply_events_wq, struct hid_bpf_ctx *hid_ctx, enum hid_report_type type)
+{
+ __u8 *data = hid_bpf_get_data(hid_ctx, 0 /* offset */, 9 /* size */);
+ int hid = hid_ctx->hid->id;
+ int ret;
+
+ if (!data)
+ return 0; /* EPERM check */
+
+ if (data[0] != 1)
+ return 0;
+
+ ret = test_inject_input_report_callback(&hid);
+ if (ret)
+ return ret;
+
+ data[1] += 5;
+
+ return 0;
+}
+
+SEC(".struct_ops.link")
+struct hid_bpf_ops test_multiply_events_wq = {
+ .hid_device_event = (void *)hid_test_multiply_events_wq,
+};
+
+SEC("?struct_ops/hid_device_event")
+int BPF_PROG(hid_test_multiply_events, struct hid_bpf_ctx *hid_ctx, enum hid_report_type type)
+{
+ __u8 *data = hid_bpf_get_data(hid_ctx, 0 /* offset */, 9 /* size */);
+ __u8 buf[9];
+ int ret;
+
+ if (!data)
+ return 0; /* EPERM check */
+
+ if (data[0] != 1)
+ return 0;
+
+ /*
+ * we have to use an intermediate buffer as hid_bpf_input_report
+ * will memset data to \0
+ */
+ __builtin_memcpy(buf, data, sizeof(buf));
+
+ buf[0] = 2;
+ buf[1] += 5;
+ ret = hid_bpf_try_input_report(hid_ctx, HID_INPUT_REPORT, buf, sizeof(buf));
+ if (ret < 0)
+ return ret;
+
+ /*
+ * In real world we should reset the original buffer as data might be garbage now,
+ * but it actually now has the content of 'buf'
+ */
+ data[1] += 5;
+
+ return 9;
+}
+
+SEC(".struct_ops.link")
+struct hid_bpf_ops test_multiply_events = {
+ .hid_device_event = (void *)hid_test_multiply_events,
+};
+
+SEC("?struct_ops/hid_device_event")
+int BPF_PROG(hid_test_infinite_loop_input_report, struct hid_bpf_ctx *hctx,
+ enum hid_report_type report_type, __u64 source)
+{
+ __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 6 /* size */);
+ __u8 buf[6];
+
+ if (!data)
+ return 0; /* EPERM check */
+
+ /*
+ * we have to use an intermediate buffer as hid_bpf_input_report
+ * will memset data to \0
+ */
+ __builtin_memcpy(buf, data, sizeof(buf));
+
+ /* always forward the request as-is to the device, hid-bpf should prevent
+ * infinite loops.
+ * the return value is ignored so the event is passing to userspace.
+ */
+
+ hid_bpf_try_input_report(hctx, report_type, buf, sizeof(buf));
+
+ /* each time we process the event, we increment by one data[1]:
+ * after each successful call to hid_bpf_try_input_report, buf
+ * has been memcopied into data by the kernel.
+ */
+ data[1] += 1;
+
+ return 0;
+}
+
+SEC(".struct_ops.link")
+struct hid_bpf_ops test_infinite_loop_input_report = {
+ .hid_device_event = (void *)hid_test_infinite_loop_input_report,
+};
diff --git a/tools/testing/selftests/hid/progs/hid_bpf_helpers.h b/tools/testing/selftests/hid/progs/hid_bpf_helpers.h
index 9cd56821d0f1..cfe37f491906 100644
--- a/tools/testing/selftests/hid/progs/hid_bpf_helpers.h
+++ b/tools/testing/selftests/hid/progs/hid_bpf_helpers.h
@@ -7,6 +7,7 @@
/* "undefine" structs and enums in vmlinux.h, because we "override" them below */
#define hid_bpf_ctx hid_bpf_ctx___not_used
+#define hid_bpf_ops hid_bpf_ops___not_used
#define hid_report_type hid_report_type___not_used
#define hid_class_request hid_class_request___not_used
#define hid_bpf_attach_flags hid_bpf_attach_flags___not_used
@@ -20,13 +21,11 @@
#define HID_REQ_SET_REPORT HID_REQ_SET_REPORT___not_used
#define HID_REQ_SET_IDLE HID_REQ_SET_IDLE___not_used
#define HID_REQ_SET_PROTOCOL HID_REQ_SET_PROTOCOL___not_used
-#define HID_BPF_FLAG_NONE HID_BPF_FLAG_NONE___not_used
-#define HID_BPF_FLAG_INSERT_HEAD HID_BPF_FLAG_INSERT_HEAD___not_used
-#define HID_BPF_FLAG_MAX HID_BPF_FLAG_MAX___not_used
#include "vmlinux.h"
#undef hid_bpf_ctx
+#undef hid_bpf_ops
#undef hid_report_type
#undef hid_class_request
#undef hid_bpf_attach_flags
@@ -40,9 +39,6 @@
#undef HID_REQ_SET_REPORT
#undef HID_REQ_SET_IDLE
#undef HID_REQ_SET_PROTOCOL
-#undef HID_BPF_FLAG_NONE
-#undef HID_BPF_FLAG_INSERT_HEAD
-#undef HID_BPF_FLAG_MAX
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
@@ -57,10 +53,8 @@ enum hid_report_type {
};
struct hid_bpf_ctx {
- __u32 index;
- const struct hid_device *hid;
+ struct hid_device *hid;
__u32 allocated_size;
- enum hid_report_type report_type;
union {
__s32 retval;
__s32 size;
@@ -76,17 +70,28 @@ enum hid_class_request {
HID_REQ_SET_PROTOCOL = 0x0B,
};
-enum hid_bpf_attach_flags {
- HID_BPF_FLAG_NONE = 0,
- HID_BPF_FLAG_INSERT_HEAD = _BITUL(0),
- HID_BPF_FLAG_MAX,
+struct hid_bpf_ops {
+ int hid_id;
+ u32 flags;
+ struct list_head list;
+ int (*hid_device_event)(struct hid_bpf_ctx *ctx, enum hid_report_type report_type,
+ u64 source);
+ int (*hid_rdesc_fixup)(struct hid_bpf_ctx *ctx);
+ int (*hid_hw_request)(struct hid_bpf_ctx *ctx, unsigned char reportnum,
+ enum hid_report_type rtype, enum hid_class_request reqtype,
+ u64 source);
+ int (*hid_hw_output_report)(struct hid_bpf_ctx *ctx, u64 source);
+ struct hid_device *hdev;
};
+#ifndef BPF_F_BEFORE
+#define BPF_F_BEFORE (1U << 3)
+#endif
+
/* following are kfuncs exported by HID for HID-BPF */
extern __u8 *hid_bpf_get_data(struct hid_bpf_ctx *ctx,
unsigned int offset,
const size_t __sz) __ksym;
-extern int hid_bpf_attach_prog(unsigned int hid_id, int prog_fd, u32 flags) __ksym;
extern struct hid_bpf_ctx *hid_bpf_allocate_context(unsigned int hid_id) __ksym;
extern void hid_bpf_release_context(struct hid_bpf_ctx *ctx) __ksym;
extern int hid_bpf_hw_request(struct hid_bpf_ctx *ctx,
@@ -100,5 +105,18 @@ extern int hid_bpf_input_report(struct hid_bpf_ctx *ctx,
enum hid_report_type type,
__u8 *data,
size_t buf__sz) __ksym;
+extern int hid_bpf_try_input_report(struct hid_bpf_ctx *ctx,
+ enum hid_report_type type,
+ __u8 *data,
+ size_t buf__sz) __ksym;
+
+/* bpf_wq implementation */
+extern int bpf_wq_init(struct bpf_wq *wq, void *p__map, unsigned int flags) __weak __ksym;
+extern int bpf_wq_start(struct bpf_wq *wq, unsigned int flags) __weak __ksym;
+extern int bpf_wq_set_callback_impl(struct bpf_wq *wq,
+ int (callback_fn)(void *map, int *key, struct bpf_wq *wq),
+ unsigned int flags__k, void *aux__ign) __ksym;
+#define bpf_wq_set_callback(timer, cb, flags) \
+ bpf_wq_set_callback_impl(timer, cb, flags, NULL)
#endif /* __HID_BPF_HELPERS_H */
diff --git a/tools/testing/selftests/kselftest.h b/tools/testing/selftests/kselftest.h
index 76c2a6945d3e..b8967b6e29d5 100644
--- a/tools/testing/selftests/kselftest.h
+++ b/tools/testing/selftests/kselftest.h
@@ -168,15 +168,7 @@ static inline __printf(1, 2) void ksft_print_msg(const char *msg, ...)
static inline void ksft_perror(const char *msg)
{
-#ifndef NOLIBC
ksft_print_msg("%s: %s (%d)\n", msg, strerror(errno), errno);
-#else
- /*
- * nolibc doesn't provide strerror() and it seems
- * inappropriate to add one, just print the errno.
- */
- ksft_print_msg("%s: %d)\n", msg, errno);
-#endif
}
static inline __printf(1, 2) void ksft_test_result_pass(const char *msg, ...)
diff --git a/tools/testing/selftests/kselftest_harness.h b/tools/testing/selftests/kselftest_harness.h
index b634969cbb6f..40723a6a083f 100644
--- a/tools/testing/selftests/kselftest_harness.h
+++ b/tools/testing/selftests/kselftest_harness.h
@@ -66,8 +66,6 @@
#include <sys/wait.h>
#include <unistd.h>
#include <setjmp.h>
-#include <syscall.h>
-#include <linux/sched.h>
#include "kselftest.h"
@@ -82,17 +80,6 @@
# define TH_LOG_ENABLED 1
#endif
-/* Wait for the child process to end but without sharing memory mapping. */
-static inline pid_t clone3_vfork(void)
-{
- struct clone_args args = {
- .flags = CLONE_VFORK,
- .exit_signal = SIGCHLD,
- };
-
- return syscall(__NR_clone3, &args, sizeof(args));
-}
-
/**
* TH_LOG()
*
@@ -437,7 +424,7 @@ static inline pid_t clone3_vfork(void)
} \
if (setjmp(_metadata->env) == 0) { \
/* _metadata and potentially self are shared with all forks. */ \
- child = clone3_vfork(); \
+ child = fork(); \
if (child == 0) { \
fixture_name##_setup(_metadata, self, variant->data); \
/* Let setup failure terminate early. */ \
@@ -1016,7 +1003,14 @@ void __wait_for_test(struct __test_metadata *t)
.sa_flags = SA_SIGINFO,
};
struct sigaction saved_action;
- int status;
+ /*
+ * Sets status so that WIFEXITED(status) returns true and
+ * WEXITSTATUS(status) returns KSFT_FAIL. This safe default value
+ * should never be evaluated because of the waitpid(2) check and
+ * SIGALRM handling.
+ */
+ int status = KSFT_FAIL << 8;
+ int child;
if (sigaction(SIGALRM, &action, &saved_action)) {
t->exit_code = KSFT_FAIL;
@@ -1028,7 +1022,15 @@ void __wait_for_test(struct __test_metadata *t)
__active_test = t;
t->timed_out = false;
alarm(t->timeout);
- waitpid(t->pid, &status, 0);
+ child = waitpid(t->pid, &status, 0);
+ if (child == -1 && errno != EINTR) {
+ t->exit_code = KSFT_FAIL;
+ fprintf(TH_LOG_STREAM,
+ "# %s: Failed to wait for PID %d (errno: %d)\n",
+ t->name, t->pid, errno);
+ return;
+ }
+
alarm(0);
if (sigaction(SIGALRM, &saved_action, NULL)) {
t->exit_code = KSFT_FAIL;
@@ -1083,6 +1085,7 @@ void __wait_for_test(struct __test_metadata *t)
WTERMSIG(status));
}
} else {
+ t->exit_code = KSFT_FAIL;
fprintf(TH_LOG_STREAM,
"# %s: Test ended in some other way [%u]\n",
t->name,
@@ -1218,6 +1221,7 @@ void __run_test(struct __fixture_metadata *f,
struct __test_xfail *xfail;
char test_name[1024];
const char *diagnostic;
+ int child;
/* reset test struct */
t->exit_code = KSFT_PASS;
@@ -1236,15 +1240,16 @@ void __run_test(struct __fixture_metadata *f,
fflush(stdout);
fflush(stderr);
- t->pid = clone3_vfork();
- if (t->pid < 0) {
+ child = fork();
+ if (child < 0) {
ksft_print_msg("ERROR SPAWNING TEST CHILD\n");
t->exit_code = KSFT_FAIL;
- } else if (t->pid == 0) {
+ } else if (child == 0) {
setpgrp();
t->fn(t, variant);
_exit(t->exit_code);
} else {
+ t->pid = child;
__wait_for_test(t);
}
ksft_print_msg(" %4s %s\n",
diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
index ce8ff8e8ce3a..ac280dcba996 100644
--- a/tools/testing/selftests/kvm/Makefile
+++ b/tools/testing/selftests/kvm/Makefile
@@ -183,6 +183,7 @@ TEST_GEN_PROGS_s390x += s390x/sync_regs_test
TEST_GEN_PROGS_s390x += s390x/tprot
TEST_GEN_PROGS_s390x += s390x/cmma_test
TEST_GEN_PROGS_s390x += s390x/debug_test
+TEST_GEN_PROGS_s390x += s390x/shared_zeropage_test
TEST_GEN_PROGS_s390x += demand_paging_test
TEST_GEN_PROGS_s390x += dirty_log_test
TEST_GEN_PROGS_s390x += guest_print_test
diff --git a/tools/testing/selftests/kvm/include/x86_64/processor.h b/tools/testing/selftests/kvm/include/x86_64/processor.h
index 8eb57de0b587..c0c7c1fe93f9 100644
--- a/tools/testing/selftests/kvm/include/x86_64/processor.h
+++ b/tools/testing/selftests/kvm/include/x86_64/processor.h
@@ -277,6 +277,7 @@ struct kvm_x86_cpu_property {
#define X86_PROPERTY_MAX_EXT_LEAF KVM_X86_CPU_PROPERTY(0x80000000, 0, EAX, 0, 31)
#define X86_PROPERTY_MAX_PHY_ADDR KVM_X86_CPU_PROPERTY(0x80000008, 0, EAX, 0, 7)
#define X86_PROPERTY_MAX_VIRT_ADDR KVM_X86_CPU_PROPERTY(0x80000008, 0, EAX, 8, 15)
+#define X86_PROPERTY_GUEST_MAX_PHY_ADDR KVM_X86_CPU_PROPERTY(0x80000008, 0, EAX, 16, 23)
#define X86_PROPERTY_SEV_C_BIT KVM_X86_CPU_PROPERTY(0x8000001F, 0, EBX, 0, 5)
#define X86_PROPERTY_PHYS_ADDR_REDUCTION KVM_X86_CPU_PROPERTY(0x8000001F, 0, EBX, 6, 11)
diff --git a/tools/testing/selftests/kvm/lib/riscv/ucall.c b/tools/testing/selftests/kvm/lib/riscv/ucall.c
index 14ee17151a59..b5035c63d516 100644
--- a/tools/testing/selftests/kvm/lib/riscv/ucall.c
+++ b/tools/testing/selftests/kvm/lib/riscv/ucall.c
@@ -9,6 +9,7 @@
#include "kvm_util.h"
#include "processor.h"
+#include "sbi.h"
void *ucall_arch_get_ucall(struct kvm_vcpu *vcpu)
{
diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c
index c664e446136b..594b061aef52 100644
--- a/tools/testing/selftests/kvm/lib/x86_64/processor.c
+++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c
@@ -1247,9 +1247,20 @@ unsigned long vm_compute_max_gfn(struct kvm_vm *vm)
{
const unsigned long num_ht_pages = 12 << (30 - vm->page_shift); /* 12 GiB */
unsigned long ht_gfn, max_gfn, max_pfn;
- uint8_t maxphyaddr;
+ uint8_t maxphyaddr, guest_maxphyaddr;
- max_gfn = (1ULL << (vm->pa_bits - vm->page_shift)) - 1;
+ /*
+ * Use "guest MAXPHYADDR" from KVM if it's available. Guest MAXPHYADDR
+ * enumerates the max _mappable_ GPA, which can be less than the raw
+ * MAXPHYADDR, e.g. if MAXPHYADDR=52, KVM is using TDP, and the CPU
+ * doesn't support 5-level TDP.
+ */
+ guest_maxphyaddr = kvm_cpu_property(X86_PROPERTY_GUEST_MAX_PHY_ADDR);
+ guest_maxphyaddr = guest_maxphyaddr ?: vm->pa_bits;
+ TEST_ASSERT(guest_maxphyaddr <= vm->pa_bits,
+ "Guest MAXPHYADDR should never be greater than raw MAXPHYADDR");
+
+ max_gfn = (1ULL << (guest_maxphyaddr - vm->page_shift)) - 1;
/* Avoid reserved HyperTransport region on AMD processors. */
if (!host_cpu_is_amd)
diff --git a/tools/testing/selftests/kvm/riscv/ebreak_test.c b/tools/testing/selftests/kvm/riscv/ebreak_test.c
index 823c132069b4..0e0712854953 100644
--- a/tools/testing/selftests/kvm/riscv/ebreak_test.c
+++ b/tools/testing/selftests/kvm/riscv/ebreak_test.c
@@ -6,6 +6,7 @@
*
*/
#include "kvm_util.h"
+#include "ucall_common.h"
#define LABEL_ADDRESS(v) ((uint64_t)&(v))
diff --git a/tools/testing/selftests/kvm/riscv/sbi_pmu_test.c b/tools/testing/selftests/kvm/riscv/sbi_pmu_test.c
index 69bb94e6b227..f299cbfd23ca 100644
--- a/tools/testing/selftests/kvm/riscv/sbi_pmu_test.c
+++ b/tools/testing/selftests/kvm/riscv/sbi_pmu_test.c
@@ -15,6 +15,7 @@
#include "processor.h"
#include "sbi.h"
#include "arch_timer.h"
+#include "ucall_common.h"
/* Maximum counters(firmware + hardware) */
#define RISCV_MAX_PMU_COUNTERS 64
diff --git a/tools/testing/selftests/kvm/s390x/shared_zeropage_test.c b/tools/testing/selftests/kvm/s390x/shared_zeropage_test.c
new file mode 100644
index 000000000000..bba0d9a6dcc8
--- /dev/null
+++ b/tools/testing/selftests/kvm/s390x/shared_zeropage_test.c
@@ -0,0 +1,111 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Test shared zeropage handling (with/without storage keys)
+ *
+ * Copyright (C) 2024, Red Hat, Inc.
+ */
+#include <sys/mman.h>
+
+#include <linux/fs.h>
+
+#include "test_util.h"
+#include "kvm_util.h"
+#include "kselftest.h"
+#include "ucall_common.h"
+
+static void set_storage_key(void *addr, uint8_t skey)
+{
+ asm volatile("sske %0,%1" : : "d" (skey), "a" (addr));
+}
+
+static void guest_code(void)
+{
+ /* Issue some storage key instruction. */
+ set_storage_key((void *)0, 0x98);
+ GUEST_DONE();
+}
+
+/*
+ * Returns 1 if the shared zeropage is mapped, 0 if something else is mapped.
+ * Returns < 0 on error or if nothing is mapped.
+ */
+static int maps_shared_zeropage(int pagemap_fd, void *addr)
+{
+ struct page_region region;
+ struct pm_scan_arg arg = {
+ .start = (uintptr_t)addr,
+ .end = (uintptr_t)addr + 4096,
+ .vec = (uintptr_t)&region,
+ .vec_len = 1,
+ .size = sizeof(struct pm_scan_arg),
+ .category_mask = PAGE_IS_PFNZERO,
+ .category_anyof_mask = PAGE_IS_PRESENT,
+ .return_mask = PAGE_IS_PFNZERO,
+ };
+ return ioctl(pagemap_fd, PAGEMAP_SCAN, &arg);
+}
+
+int main(int argc, char *argv[])
+{
+ char *mem, *page0, *page1, *page2, tmp;
+ const size_t pagesize = getpagesize();
+ struct kvm_vcpu *vcpu;
+ struct kvm_vm *vm;
+ struct ucall uc;
+ int pagemap_fd;
+
+ ksft_print_header();
+ ksft_set_plan(3);
+
+ /*
+ * We'll use memory that is not mapped into the VM for simplicity.
+ * Shared zeropages are enabled/disabled per-process.
+ */
+ mem = mmap(0, 3 * pagesize, PROT_READ, MAP_PRIVATE | MAP_ANON, -1, 0);
+ TEST_ASSERT(mem != MAP_FAILED, "mmap() failed");
+
+ /* Disable THP. Ignore errors on older kernels. */
+ madvise(mem, 3 * pagesize, MADV_NOHUGEPAGE);
+
+ page0 = mem;
+ page1 = page0 + pagesize;
+ page2 = page1 + pagesize;
+
+ /* Can we even detect shared zeropages? */
+ pagemap_fd = open("/proc/self/pagemap", O_RDONLY);
+ TEST_REQUIRE(pagemap_fd >= 0);
+
+ tmp = *page0;
+ asm volatile("" : "+r" (tmp));
+ TEST_REQUIRE(maps_shared_zeropage(pagemap_fd, page0) == 1);
+
+ vm = vm_create_with_one_vcpu(&vcpu, guest_code);
+
+ /* Verify that we get the shared zeropage after VM creation. */
+ tmp = *page1;
+ asm volatile("" : "+r" (tmp));
+ ksft_test_result(maps_shared_zeropage(pagemap_fd, page1) == 1,
+ "Shared zeropages should be enabled\n");
+
+ /*
+ * Let our VM execute a storage key instruction that should
+ * unshare all shared zeropages.
+ */
+ vcpu_run(vcpu);
+ get_ucall(vcpu, &uc);
+ TEST_ASSERT_EQ(uc.cmd, UCALL_DONE);
+
+ /* Verify that we don't have a shared zeropage anymore. */
+ ksft_test_result(!maps_shared_zeropage(pagemap_fd, page1),
+ "Shared zeropage should be gone\n");
+
+ /* Verify that we don't get any new shared zeropages. */
+ tmp = *page2;
+ asm volatile("" : "+r" (tmp));
+ ksft_test_result(!maps_shared_zeropage(pagemap_fd, page2),
+ "Shared zeropages should be disabled\n");
+
+ kvm_vm_free(vm);
+
+ ksft_finished();
+}
diff --git a/tools/testing/selftests/kvm/x86_64/sev_init2_tests.c b/tools/testing/selftests/kvm/x86_64/sev_init2_tests.c
index 7a4a61be119b..3fb967f40c6a 100644
--- a/tools/testing/selftests/kvm/x86_64/sev_init2_tests.c
+++ b/tools/testing/selftests/kvm/x86_64/sev_init2_tests.c
@@ -105,11 +105,11 @@ void test_features(uint32_t vm_type, uint64_t supported_features)
int i;
for (i = 0; i < 64; i++) {
- if (!(supported_features & (1u << i)))
+ if (!(supported_features & BIT_ULL(i)))
test_init2_invalid(vm_type,
&(struct kvm_sev_init){ .vmsa_features = BIT_ULL(i) },
"unknown feature");
- else if (KNOWN_FEATURES & (1u << i))
+ else if (KNOWN_FEATURES & BIT_ULL(i))
test_init2(vm_type,
&(struct kvm_sev_init){ .vmsa_features = BIT_ULL(i) });
}
diff --git a/tools/testing/selftests/landlock/fs_test.c b/tools/testing/selftests/landlock/fs_test.c
index 6b5a9ff88c3d..7d063c652be1 100644
--- a/tools/testing/selftests/landlock/fs_test.c
+++ b/tools/testing/selftests/landlock/fs_test.c
@@ -35,6 +35,7 @@
* See https://sourceware.org/glibc/wiki/Synchronizing_Headers.
*/
#include <linux/fs.h>
+#include <linux/mount.h>
#include "common.h"
@@ -47,6 +48,13 @@ int renameat2(int olddirfd, const char *oldpath, int newdirfd,
}
#endif
+#ifndef open_tree
+int open_tree(int dfd, const char *filename, unsigned int flags)
+{
+ return syscall(__NR_open_tree, dfd, filename, flags);
+}
+#endif
+
#ifndef RENAME_EXCHANGE
#define RENAME_EXCHANGE (1 << 1)
#endif
@@ -2400,6 +2408,43 @@ TEST_F_FORK(layout1, refer_denied_by_default4)
layer_dir_s1d1_refer);
}
+/*
+ * Tests walking through a denied root mount.
+ */
+TEST_F_FORK(layout1, refer_mount_root_deny)
+{
+ const struct landlock_ruleset_attr ruleset_attr = {
+ .handled_access_fs = LANDLOCK_ACCESS_FS_MAKE_DIR,
+ };
+ int root_fd, ruleset_fd;
+
+ /* Creates a mount object from a non-mount point. */
+ set_cap(_metadata, CAP_SYS_ADMIN);
+ root_fd =
+ open_tree(AT_FDCWD, dir_s1d1,
+ AT_EMPTY_PATH | OPEN_TREE_CLONE | OPEN_TREE_CLOEXEC);
+ clear_cap(_metadata, CAP_SYS_ADMIN);
+ ASSERT_LE(0, root_fd);
+
+ ruleset_fd =
+ landlock_create_ruleset(&ruleset_attr, sizeof(ruleset_attr), 0);
+ ASSERT_LE(0, ruleset_fd);
+
+ ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0));
+ ASSERT_EQ(0, landlock_restrict_self(ruleset_fd, 0));
+ EXPECT_EQ(0, close(ruleset_fd));
+
+ /* Link denied by Landlock: EACCES. */
+ EXPECT_EQ(-1, linkat(root_fd, ".", root_fd, "does_not_exist", 0));
+ EXPECT_EQ(EACCES, errno);
+
+ /* renameat2() always returns EBUSY. */
+ EXPECT_EQ(-1, renameat2(root_fd, ".", root_fd, "does_not_exist", 0));
+ EXPECT_EQ(EBUSY, errno);
+
+ EXPECT_EQ(0, close(root_fd));
+}
+
TEST_F_FORK(layout1, reparent_link)
{
const struct rule layer1[] = {
diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk
index 429535816dbd..7b299ed5ff45 100644
--- a/tools/testing/selftests/lib.mk
+++ b/tools/testing/selftests/lib.mk
@@ -38,6 +38,14 @@ else
CLANG_FLAGS += --target=$(notdir $(CROSS_COMPILE:%-=%))
endif # CROSS_COMPILE
+# gcc defaults to silence (off) for the following warnings, but clang defaults
+# to the opposite. The warnings are not useful for the kernel itself, which is
+# why they have remained disabled in gcc for the main kernel build. And it is
+# only due to including kernel data structures in the selftests, that we get the
+# warnings from clang. Therefore, disable the warnings for clang builds.
+CFLAGS += -Wno-address-of-packed-member
+CFLAGS += -Wno-gnu-variable-sized-type-not-at-end
+
CC := $(CLANG) $(CLANG_FLAGS) -fintegrated-as
else
CC := $(CROSS_COMPILE)gcc
diff --git a/tools/testing/selftests/lkdtm/tests.txt b/tools/testing/selftests/lkdtm/tests.txt
index 368973f05250..cff124c1eddd 100644
--- a/tools/testing/selftests/lkdtm/tests.txt
+++ b/tools/testing/selftests/lkdtm/tests.txt
@@ -31,6 +31,7 @@ SLAB_FREE_CROSS
SLAB_FREE_PAGE
#SOFTLOCKUP Hangs the system
#HARDLOCKUP Hangs the system
+#SMP_CALL_LOCKUP Hangs the system
#SPINLOCKUP Hangs the system
#HUNG_TASK Hangs the system
EXEC_DATA
diff --git a/tools/testing/selftests/mm/ksm_functional_tests.c b/tools/testing/selftests/mm/ksm_functional_tests.c
index 37de82da9be7..b61803e36d1c 100644
--- a/tools/testing/selftests/mm/ksm_functional_tests.c
+++ b/tools/testing/selftests/mm/ksm_functional_tests.c
@@ -656,12 +656,33 @@ unmap:
munmap(map, size);
}
+static void init_global_file_handles(void)
+{
+ mem_fd = open("/proc/self/mem", O_RDWR);
+ if (mem_fd < 0)
+ ksft_exit_fail_msg("opening /proc/self/mem failed\n");
+ ksm_fd = open("/sys/kernel/mm/ksm/run", O_RDWR);
+ if (ksm_fd < 0)
+ ksft_exit_skip("open(\"/sys/kernel/mm/ksm/run\") failed\n");
+ ksm_full_scans_fd = open("/sys/kernel/mm/ksm/full_scans", O_RDONLY);
+ if (ksm_full_scans_fd < 0)
+ ksft_exit_skip("open(\"/sys/kernel/mm/ksm/full_scans\") failed\n");
+ pagemap_fd = open("/proc/self/pagemap", O_RDONLY);
+ if (pagemap_fd < 0)
+ ksft_exit_skip("open(\"/proc/self/pagemap\") failed\n");
+ proc_self_ksm_stat_fd = open("/proc/self/ksm_stat", O_RDONLY);
+ proc_self_ksm_merging_pages_fd = open("/proc/self/ksm_merging_pages",
+ O_RDONLY);
+ ksm_use_zero_pages_fd = open("/sys/kernel/mm/ksm/use_zero_pages", O_RDWR);
+}
+
int main(int argc, char **argv)
{
unsigned int tests = 8;
int err;
if (argc > 1 && !strcmp(argv[1], FORK_EXEC_CHILD_PRG_NAME)) {
+ init_global_file_handles();
exit(test_child_ksm());
}
@@ -674,22 +695,7 @@ int main(int argc, char **argv)
pagesize = getpagesize();
- mem_fd = open("/proc/self/mem", O_RDWR);
- if (mem_fd < 0)
- ksft_exit_fail_msg("opening /proc/self/mem failed\n");
- ksm_fd = open("/sys/kernel/mm/ksm/run", O_RDWR);
- if (ksm_fd < 0)
- ksft_exit_skip("open(\"/sys/kernel/mm/ksm/run\") failed\n");
- ksm_full_scans_fd = open("/sys/kernel/mm/ksm/full_scans", O_RDONLY);
- if (ksm_full_scans_fd < 0)
- ksft_exit_skip("open(\"/sys/kernel/mm/ksm/full_scans\") failed\n");
- pagemap_fd = open("/proc/self/pagemap", O_RDONLY);
- if (pagemap_fd < 0)
- ksft_exit_skip("open(\"/proc/self/pagemap\") failed\n");
- proc_self_ksm_stat_fd = open("/proc/self/ksm_stat", O_RDONLY);
- proc_self_ksm_merging_pages_fd = open("/proc/self/ksm_merging_pages",
- O_RDONLY);
- ksm_use_zero_pages_fd = open("/sys/kernel/mm/ksm/use_zero_pages", O_RDWR);
+ init_global_file_handles();
test_unmerge();
test_unmerge_zero_pages();
diff --git a/tools/testing/selftests/mm/map_fixed_noreplace.c b/tools/testing/selftests/mm/map_fixed_noreplace.c
index b74813fdc951..d53de2486080 100644
--- a/tools/testing/selftests/mm/map_fixed_noreplace.c
+++ b/tools/testing/selftests/mm/map_fixed_noreplace.c
@@ -67,7 +67,8 @@ int main(void)
dump_maps();
ksft_exit_fail_msg("Error: munmap failed!?\n");
}
- ksft_test_result_pass("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p);
+ ksft_print_msg("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p);
+ ksft_test_result_pass("mmap() 5*PAGE_SIZE at base\n");
addr = base_addr + page_size;
size = 3 * page_size;
@@ -76,7 +77,8 @@ int main(void)
dump_maps();
ksft_exit_fail_msg("Error: first mmap() failed unexpectedly\n");
}
- ksft_test_result_pass("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p);
+ ksft_print_msg("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p);
+ ksft_test_result_pass("mmap() 3*PAGE_SIZE at base+PAGE_SIZE\n");
/*
* Exact same mapping again:
@@ -93,7 +95,8 @@ int main(void)
dump_maps();
ksft_exit_fail_msg("Error:1: mmap() succeeded when it shouldn't have\n");
}
- ksft_test_result_pass("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p);
+ ksft_print_msg("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p);
+ ksft_test_result_pass("mmap() 5*PAGE_SIZE at base\n");
/*
* Second mapping contained within first:
@@ -111,7 +114,8 @@ int main(void)
dump_maps();
ksft_exit_fail_msg("Error:2: mmap() succeeded when it shouldn't have\n");
}
- ksft_test_result_pass("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p);
+ ksft_print_msg("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p);
+ ksft_test_result_pass("mmap() 2*PAGE_SIZE at base+PAGE_SIZE\n");
/*
* Overlap end of existing mapping:
@@ -128,7 +132,8 @@ int main(void)
dump_maps();
ksft_exit_fail_msg("Error:3: mmap() succeeded when it shouldn't have\n");
}
- ksft_test_result_pass("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p);
+ ksft_print_msg("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p);
+ ksft_test_result_pass("mmap() 2*PAGE_SIZE at base+(3*PAGE_SIZE)\n");
/*
* Overlap start of existing mapping:
@@ -145,7 +150,8 @@ int main(void)
dump_maps();
ksft_exit_fail_msg("Error:4: mmap() succeeded when it shouldn't have\n");
}
- ksft_test_result_pass("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p);
+ ksft_print_msg("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p);
+ ksft_test_result_pass("mmap() 2*PAGE_SIZE bytes at base\n");
/*
* Adjacent to start of existing mapping:
@@ -162,7 +168,8 @@ int main(void)
dump_maps();
ksft_exit_fail_msg("Error:5: mmap() failed when it shouldn't have\n");
}
- ksft_test_result_pass("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p);
+ ksft_print_msg("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p);
+ ksft_test_result_pass("mmap() PAGE_SIZE at base\n");
/*
* Adjacent to end of existing mapping:
@@ -179,7 +186,8 @@ int main(void)
dump_maps();
ksft_exit_fail_msg("Error:6: mmap() failed when it shouldn't have\n");
}
- ksft_test_result_pass("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p);
+ ksft_print_msg("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p);
+ ksft_test_result_pass("mmap() PAGE_SIZE at base+(4*PAGE_SIZE)\n");
addr = base_addr;
size = 5 * page_size;
diff --git a/tools/testing/selftests/net/.gitignore b/tools/testing/selftests/net/.gitignore
index 49a56eb5d036..666ab7d9390b 100644
--- a/tools/testing/selftests/net/.gitignore
+++ b/tools/testing/selftests/net/.gitignore
@@ -43,7 +43,6 @@ tap
tcp_fastopen_backup_key
tcp_inq
tcp_mmap
-test_unix_oob
timestamping
tls
toeplitz
diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile
index bd01e4a0be2c..bc3925200637 100644
--- a/tools/testing/selftests/net/Makefile
+++ b/tools/testing/selftests/net/Makefile
@@ -43,6 +43,8 @@ TEST_PROGS += srv6_hl2encap_red_l2vpn_test.sh
TEST_PROGS += srv6_end_next_csid_l3vpn_test.sh
TEST_PROGS += srv6_end_x_next_csid_l3vpn_test.sh
TEST_PROGS += srv6_end_flavors_test.sh
+TEST_PROGS += srv6_end_dx4_netfilter_test.sh
+TEST_PROGS += srv6_end_dx6_netfilter_test.sh
TEST_PROGS += vrf_strict_mode_test.sh
TEST_PROGS += arp_ndisc_evict_nocarrier.sh
TEST_PROGS += ndisc_unsolicited_na_test.sh
@@ -53,6 +55,7 @@ TEST_PROGS += bind_bhash.sh
TEST_PROGS += ip_local_port_range.sh
TEST_PROGS += rps_default_mask.sh
TEST_PROGS += big_tcp.sh
+TEST_PROGS += netns-sysctl.sh
TEST_PROGS_EXTENDED := toeplitz_client.sh toeplitz.sh
TEST_GEN_FILES = socket nettest
TEST_GEN_FILES += psock_fanout psock_tpacket msg_zerocopy reuseport_addr_any
diff --git a/tools/testing/selftests/net/af_unix/Makefile b/tools/testing/selftests/net/af_unix/Makefile
index 3b83c797650d..50584479540b 100644
--- a/tools/testing/selftests/net/af_unix/Makefile
+++ b/tools/testing/selftests/net/af_unix/Makefile
@@ -1,4 +1,4 @@
CFLAGS += $(KHDR_INCLUDES)
-TEST_GEN_PROGS := diag_uid test_unix_oob unix_connect scm_pidfd scm_rights
+TEST_GEN_PROGS := diag_uid msg_oob scm_pidfd scm_rights unix_connect
include ../../lib.mk
diff --git a/tools/testing/selftests/net/af_unix/config b/tools/testing/selftests/net/af_unix/config
new file mode 100644
index 000000000000..37368567768c
--- /dev/null
+++ b/tools/testing/selftests/net/af_unix/config
@@ -0,0 +1,3 @@
+CONFIG_UNIX=y
+CONFIG_AF_UNIX_OOB=y
+CONFIG_UNIX_DIAG=m
diff --git a/tools/testing/selftests/net/af_unix/msg_oob.c b/tools/testing/selftests/net/af_unix/msg_oob.c
new file mode 100644
index 000000000000..16d0c172eaeb
--- /dev/null
+++ b/tools/testing/selftests/net/af_unix/msg_oob.c
@@ -0,0 +1,734 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright Amazon.com Inc. or its affiliates. */
+
+#include <fcntl.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <netinet/in.h>
+#include <sys/epoll.h>
+#include <sys/ioctl.h>
+#include <sys/signalfd.h>
+#include <sys/socket.h>
+
+#include "../../kselftest_harness.h"
+
+#define BUF_SZ 32
+
+FIXTURE(msg_oob)
+{
+ int fd[4]; /* 0: AF_UNIX sender
+ * 1: AF_UNIX receiver
+ * 2: TCP sender
+ * 3: TCP receiver
+ */
+ int signal_fd;
+ int epoll_fd[2]; /* 0: AF_UNIX receiver
+ * 1: TCP receiver
+ */
+ bool tcp_compliant;
+};
+
+FIXTURE_VARIANT(msg_oob)
+{
+ bool peek;
+};
+
+FIXTURE_VARIANT_ADD(msg_oob, no_peek)
+{
+ .peek = false,
+};
+
+FIXTURE_VARIANT_ADD(msg_oob, peek)
+{
+ .peek = true
+};
+
+static void create_unix_socketpair(struct __test_metadata *_metadata,
+ FIXTURE_DATA(msg_oob) *self)
+{
+ int ret;
+
+ ret = socketpair(AF_UNIX, SOCK_STREAM | SOCK_NONBLOCK, 0, self->fd);
+ ASSERT_EQ(ret, 0);
+}
+
+static void create_tcp_socketpair(struct __test_metadata *_metadata,
+ FIXTURE_DATA(msg_oob) *self)
+{
+ struct sockaddr_in addr;
+ socklen_t addrlen;
+ int listen_fd;
+ int ret;
+
+ listen_fd = socket(AF_INET, SOCK_STREAM, 0);
+ ASSERT_GE(listen_fd, 0);
+
+ ret = listen(listen_fd, -1);
+ ASSERT_EQ(ret, 0);
+
+ addrlen = sizeof(addr);
+ ret = getsockname(listen_fd, (struct sockaddr *)&addr, &addrlen);
+ ASSERT_EQ(ret, 0);
+
+ self->fd[2] = socket(AF_INET, SOCK_STREAM, 0);
+ ASSERT_GE(self->fd[2], 0);
+
+ ret = connect(self->fd[2], (struct sockaddr *)&addr, addrlen);
+ ASSERT_EQ(ret, 0);
+
+ self->fd[3] = accept(listen_fd, (struct sockaddr *)&addr, &addrlen);
+ ASSERT_GE(self->fd[3], 0);
+
+ ret = fcntl(self->fd[3], F_SETFL, O_NONBLOCK);
+ ASSERT_EQ(ret, 0);
+}
+
+static void setup_sigurg(struct __test_metadata *_metadata,
+ FIXTURE_DATA(msg_oob) *self)
+{
+ struct signalfd_siginfo siginfo;
+ int pid = getpid();
+ sigset_t mask;
+ int i, ret;
+
+ for (i = 0; i < 2; i++) {
+ ret = ioctl(self->fd[i * 2 + 1], FIOSETOWN, &pid);
+ ASSERT_EQ(ret, 0);
+ }
+
+ ret = sigemptyset(&mask);
+ ASSERT_EQ(ret, 0);
+
+ ret = sigaddset(&mask, SIGURG);
+ ASSERT_EQ(ret, 0);
+
+ ret = sigprocmask(SIG_BLOCK, &mask, NULL);
+ ASSERT_EQ(ret, 0);
+
+ self->signal_fd = signalfd(-1, &mask, SFD_NONBLOCK);
+ ASSERT_GE(self->signal_fd, 0);
+
+ ret = read(self->signal_fd, &siginfo, sizeof(siginfo));
+ ASSERT_EQ(ret, -1);
+}
+
+static void setup_epollpri(struct __test_metadata *_metadata,
+ FIXTURE_DATA(msg_oob) *self)
+{
+ struct epoll_event event = {
+ .events = EPOLLPRI,
+ };
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ int ret;
+
+ self->epoll_fd[i] = epoll_create1(0);
+ ASSERT_GE(self->epoll_fd[i], 0);
+
+ ret = epoll_ctl(self->epoll_fd[i], EPOLL_CTL_ADD, self->fd[i * 2 + 1], &event);
+ ASSERT_EQ(ret, 0);
+ }
+}
+
+static void close_sockets(FIXTURE_DATA(msg_oob) *self)
+{
+ int i;
+
+ for (i = 0; i < 4; i++)
+ close(self->fd[i]);
+}
+
+FIXTURE_SETUP(msg_oob)
+{
+ create_unix_socketpair(_metadata, self);
+ create_tcp_socketpair(_metadata, self);
+
+ setup_sigurg(_metadata, self);
+ setup_epollpri(_metadata, self);
+
+ self->tcp_compliant = true;
+}
+
+FIXTURE_TEARDOWN(msg_oob)
+{
+ close_sockets(self);
+}
+
+static void __epollpair(struct __test_metadata *_metadata,
+ FIXTURE_DATA(msg_oob) *self,
+ bool oob_remaining)
+{
+ struct epoll_event event[2] = {};
+ int i, ret[2];
+
+ for (i = 0; i < 2; i++)
+ ret[i] = epoll_wait(self->epoll_fd[i], &event[i], 1, 0);
+
+ ASSERT_EQ(ret[0], oob_remaining);
+
+ if (self->tcp_compliant)
+ ASSERT_EQ(ret[0], ret[1]);
+
+ if (oob_remaining) {
+ ASSERT_EQ(event[0].events, EPOLLPRI);
+
+ if (self->tcp_compliant)
+ ASSERT_EQ(event[0].events, event[1].events);
+ }
+}
+
+static void __sendpair(struct __test_metadata *_metadata,
+ FIXTURE_DATA(msg_oob) *self,
+ const void *buf, size_t len, int flags)
+{
+ int i, ret[2];
+
+ for (i = 0; i < 2; i++) {
+ struct signalfd_siginfo siginfo = {};
+ int bytes;
+
+ ret[i] = send(self->fd[i * 2], buf, len, flags);
+
+ bytes = read(self->signal_fd, &siginfo, sizeof(siginfo));
+
+ if (flags & MSG_OOB) {
+ ASSERT_EQ(bytes, sizeof(siginfo));
+ ASSERT_EQ(siginfo.ssi_signo, SIGURG);
+
+ bytes = read(self->signal_fd, &siginfo, sizeof(siginfo));
+ }
+
+ ASSERT_EQ(bytes, -1);
+ }
+
+ ASSERT_EQ(ret[0], len);
+ ASSERT_EQ(ret[0], ret[1]);
+}
+
+static void __recvpair(struct __test_metadata *_metadata,
+ FIXTURE_DATA(msg_oob) *self,
+ const void *expected_buf, int expected_len,
+ int buf_len, int flags)
+{
+ int i, ret[2], recv_errno[2], expected_errno = 0;
+ char recv_buf[2][BUF_SZ] = {};
+ bool printed = false;
+
+ ASSERT_GE(BUF_SZ, buf_len);
+
+ errno = 0;
+
+ for (i = 0; i < 2; i++) {
+ ret[i] = recv(self->fd[i * 2 + 1], recv_buf[i], buf_len, flags);
+ recv_errno[i] = errno;
+ }
+
+ if (expected_len < 0) {
+ expected_errno = -expected_len;
+ expected_len = -1;
+ }
+
+ if (ret[0] != expected_len || recv_errno[0] != expected_errno) {
+ TH_LOG("AF_UNIX :%s", ret[0] < 0 ? strerror(recv_errno[0]) : recv_buf[0]);
+ TH_LOG("Expected:%s", expected_errno ? strerror(expected_errno) : expected_buf);
+
+ ASSERT_EQ(ret[0], expected_len);
+ ASSERT_EQ(recv_errno[0], expected_errno);
+ }
+
+ if (ret[0] != ret[1] || recv_errno[0] != recv_errno[1]) {
+ TH_LOG("AF_UNIX :%s", ret[0] < 0 ? strerror(recv_errno[0]) : recv_buf[0]);
+ TH_LOG("TCP :%s", ret[1] < 0 ? strerror(recv_errno[1]) : recv_buf[1]);
+
+ printed = true;
+
+ if (self->tcp_compliant) {
+ ASSERT_EQ(ret[0], ret[1]);
+ ASSERT_EQ(recv_errno[0], recv_errno[1]);
+ }
+ }
+
+ if (expected_len >= 0) {
+ int cmp;
+
+ cmp = strncmp(expected_buf, recv_buf[0], expected_len);
+ if (cmp) {
+ TH_LOG("AF_UNIX :%s", ret[0] < 0 ? strerror(recv_errno[0]) : recv_buf[0]);
+ TH_LOG("Expected:%s", expected_errno ? strerror(expected_errno) : expected_buf);
+
+ ASSERT_EQ(cmp, 0);
+ }
+
+ cmp = strncmp(recv_buf[0], recv_buf[1], expected_len);
+ if (cmp) {
+ if (!printed) {
+ TH_LOG("AF_UNIX :%s", ret[0] < 0 ? strerror(recv_errno[0]) : recv_buf[0]);
+ TH_LOG("TCP :%s", ret[1] < 0 ? strerror(recv_errno[1]) : recv_buf[1]);
+ }
+
+ if (self->tcp_compliant)
+ ASSERT_EQ(cmp, 0);
+ }
+ }
+}
+
+static void __setinlinepair(struct __test_metadata *_metadata,
+ FIXTURE_DATA(msg_oob) *self)
+{
+ int i, oob_inline = 1;
+
+ for (i = 0; i < 2; i++) {
+ int ret;
+
+ ret = setsockopt(self->fd[i * 2 + 1], SOL_SOCKET, SO_OOBINLINE,
+ &oob_inline, sizeof(oob_inline));
+ ASSERT_EQ(ret, 0);
+ }
+}
+
+static void __siocatmarkpair(struct __test_metadata *_metadata,
+ FIXTURE_DATA(msg_oob) *self,
+ bool oob_head)
+{
+ int answ[2] = {};
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ int ret;
+
+ ret = ioctl(self->fd[i * 2 + 1], SIOCATMARK, &answ[i]);
+ ASSERT_EQ(ret, 0);
+ }
+
+ ASSERT_EQ(answ[0], oob_head);
+
+ if (self->tcp_compliant)
+ ASSERT_EQ(answ[0], answ[1]);
+}
+
+#define sendpair(buf, len, flags) \
+ __sendpair(_metadata, self, buf, len, flags)
+
+#define recvpair(expected_buf, expected_len, buf_len, flags) \
+ do { \
+ if (variant->peek) \
+ __recvpair(_metadata, self, \
+ expected_buf, expected_len, \
+ buf_len, (flags) | MSG_PEEK); \
+ __recvpair(_metadata, self, \
+ expected_buf, expected_len, buf_len, flags); \
+ } while (0)
+
+#define epollpair(oob_remaining) \
+ __epollpair(_metadata, self, oob_remaining)
+
+#define siocatmarkpair(oob_head) \
+ __siocatmarkpair(_metadata, self, oob_head)
+
+#define setinlinepair() \
+ __setinlinepair(_metadata, self)
+
+#define tcp_incompliant \
+ for (self->tcp_compliant = false; \
+ self->tcp_compliant == false; \
+ self->tcp_compliant = true)
+
+TEST_F(msg_oob, non_oob)
+{
+ sendpair("x", 1, 0);
+ epollpair(false);
+ siocatmarkpair(false);
+
+ recvpair("", -EINVAL, 1, MSG_OOB);
+ epollpair(false);
+ siocatmarkpair(false);
+}
+
+TEST_F(msg_oob, oob)
+{
+ sendpair("x", 1, MSG_OOB);
+ epollpair(true);
+ siocatmarkpair(true);
+
+ recvpair("x", 1, 1, MSG_OOB);
+ epollpair(false);
+ siocatmarkpair(true);
+}
+
+TEST_F(msg_oob, oob_drop)
+{
+ sendpair("x", 1, MSG_OOB);
+ epollpair(true);
+ siocatmarkpair(true);
+
+ recvpair("", -EAGAIN, 1, 0); /* Drop OOB. */
+ epollpair(false);
+ siocatmarkpair(false);
+
+ recvpair("", -EINVAL, 1, MSG_OOB);
+ epollpair(false);
+ siocatmarkpair(false);
+}
+
+TEST_F(msg_oob, oob_ahead)
+{
+ sendpair("hello", 5, MSG_OOB);
+ epollpair(true);
+ siocatmarkpair(false);
+
+ recvpair("o", 1, 1, MSG_OOB);
+ epollpair(false);
+ siocatmarkpair(false);
+
+ recvpair("hell", 4, 4, 0);
+ epollpair(false);
+ siocatmarkpair(true);
+}
+
+TEST_F(msg_oob, oob_break)
+{
+ sendpair("hello", 5, MSG_OOB);
+ epollpair(true);
+ siocatmarkpair(false);
+
+ recvpair("hell", 4, 5, 0); /* Break at OOB even with enough buffer. */
+ epollpair(true);
+ siocatmarkpair(true);
+
+ recvpair("o", 1, 1, MSG_OOB);
+ epollpair(false);
+ siocatmarkpair(true);
+
+ recvpair("", -EAGAIN, 1, 0);
+ siocatmarkpair(false);
+}
+
+TEST_F(msg_oob, oob_ahead_break)
+{
+ sendpair("hello", 5, MSG_OOB);
+ epollpair(true);
+ siocatmarkpair(false);
+
+ sendpair("world", 5, 0);
+ epollpair(true);
+ siocatmarkpair(false);
+
+ recvpair("o", 1, 1, MSG_OOB);
+ epollpair(false);
+ siocatmarkpair(false);
+
+ recvpair("hell", 4, 9, 0); /* Break at OOB even after it's recv()ed. */
+ epollpair(false);
+ siocatmarkpair(true);
+
+ recvpair("world", 5, 5, 0);
+ epollpair(false);
+ siocatmarkpair(false);
+}
+
+TEST_F(msg_oob, oob_break_drop)
+{
+ sendpair("hello", 5, MSG_OOB);
+ epollpair(true);
+ siocatmarkpair(false);
+
+ sendpair("world", 5, 0);
+ epollpair(true);
+ siocatmarkpair(false);
+
+ recvpair("hell", 4, 10, 0); /* Break at OOB even with enough buffer. */
+ epollpair(true);
+ siocatmarkpair(true);
+
+ recvpair("world", 5, 10, 0); /* Drop OOB and recv() the next skb. */
+ epollpair(false);
+ siocatmarkpair(false);
+
+ recvpair("", -EINVAL, 1, MSG_OOB);
+ epollpair(false);
+ siocatmarkpair(false);
+}
+
+TEST_F(msg_oob, ex_oob_break)
+{
+ sendpair("hello", 5, MSG_OOB);
+ epollpair(true);
+ siocatmarkpair(false);
+
+ sendpair("wor", 3, MSG_OOB);
+ epollpair(true);
+ siocatmarkpair(false);
+
+ sendpair("ld", 2, 0);
+ epollpair(true);
+ siocatmarkpair(false);
+
+ recvpair("hellowo", 7, 10, 0); /* Break at OOB but not at ex-OOB. */
+ epollpair(true);
+ siocatmarkpair(true);
+
+ recvpair("r", 1, 1, MSG_OOB);
+ epollpair(false);
+ siocatmarkpair(true);
+
+ recvpair("ld", 2, 2, 0);
+ epollpair(false);
+ siocatmarkpair(false);
+}
+
+TEST_F(msg_oob, ex_oob_drop)
+{
+ sendpair("x", 1, MSG_OOB);
+ epollpair(true);
+ siocatmarkpair(true);
+
+ sendpair("y", 1, MSG_OOB); /* TCP drops "x" at this moment. */
+ epollpair(true);
+
+ tcp_incompliant {
+ siocatmarkpair(false);
+
+ recvpair("x", 1, 1, 0); /* TCP drops "y" by passing through it. */
+ epollpair(true);
+ siocatmarkpair(true);
+
+ recvpair("y", 1, 1, MSG_OOB); /* TCP returns -EINVAL. */
+ epollpair(false);
+ siocatmarkpair(true);
+ }
+}
+
+TEST_F(msg_oob, ex_oob_drop_2)
+{
+ sendpair("x", 1, MSG_OOB);
+ epollpair(true);
+ siocatmarkpair(true);
+
+ sendpair("y", 1, MSG_OOB); /* TCP drops "x" at this moment. */
+ epollpair(true);
+
+ tcp_incompliant {
+ siocatmarkpair(false);
+ }
+
+ recvpair("y", 1, 1, MSG_OOB);
+ epollpair(false);
+
+ tcp_incompliant {
+ siocatmarkpair(false);
+
+ recvpair("x", 1, 1, 0); /* TCP returns -EAGAIN. */
+ epollpair(false);
+ siocatmarkpair(true);
+ }
+}
+
+TEST_F(msg_oob, ex_oob_ahead_break)
+{
+ sendpair("hello", 5, MSG_OOB);
+ epollpair(true);
+ siocatmarkpair(false);
+
+ sendpair("wor", 3, MSG_OOB);
+ epollpair(true);
+ siocatmarkpair(false);
+
+ recvpair("r", 1, 1, MSG_OOB);
+ epollpair(false);
+ siocatmarkpair(false);
+
+ sendpair("ld", 2, MSG_OOB);
+ epollpair(true);
+ siocatmarkpair(false);
+
+ tcp_incompliant {
+ recvpair("hellowol", 8, 10, 0); /* TCP recv()s "helloworl", why "r" ?? */
+ }
+
+ epollpair(true);
+ siocatmarkpair(true);
+
+ recvpair("d", 1, 1, MSG_OOB);
+ epollpair(false);
+ siocatmarkpair(true);
+}
+
+TEST_F(msg_oob, ex_oob_siocatmark)
+{
+ sendpair("hello", 5, MSG_OOB);
+ epollpair(true);
+ siocatmarkpair(false);
+
+ recvpair("o", 1, 1, MSG_OOB);
+ epollpair(false);
+ siocatmarkpair(false);
+
+ sendpair("world", 5, MSG_OOB);
+ epollpair(true);
+ siocatmarkpair(false);
+
+ recvpair("hell", 4, 4, 0); /* Intentionally stop at ex-OOB. */
+ epollpair(true);
+ siocatmarkpair(false);
+}
+
+TEST_F(msg_oob, inline_oob)
+{
+ setinlinepair();
+
+ sendpair("x", 1, MSG_OOB);
+ epollpair(true);
+ siocatmarkpair(true);
+
+ recvpair("", -EINVAL, 1, MSG_OOB);
+ epollpair(true);
+ siocatmarkpair(true);
+
+ recvpair("x", 1, 1, 0);
+ epollpair(false);
+ siocatmarkpair(false);
+}
+
+TEST_F(msg_oob, inline_oob_break)
+{
+ setinlinepair();
+
+ sendpair("hello", 5, MSG_OOB);
+ epollpair(true);
+ siocatmarkpair(false);
+
+ recvpair("", -EINVAL, 1, MSG_OOB);
+ epollpair(true);
+ siocatmarkpair(false);
+
+ recvpair("hell", 4, 5, 0); /* Break at OOB but not at ex-OOB. */
+ epollpair(true);
+ siocatmarkpair(true);
+
+ recvpair("o", 1, 1, 0);
+ epollpair(false);
+ siocatmarkpair(false);
+}
+
+TEST_F(msg_oob, inline_oob_ahead_break)
+{
+ sendpair("hello", 5, MSG_OOB);
+ epollpair(true);
+ siocatmarkpair(false);
+
+ sendpair("world", 5, 0);
+ epollpair(true);
+ siocatmarkpair(false);
+
+ recvpair("o", 1, 1, MSG_OOB);
+ epollpair(false);
+ siocatmarkpair(false);
+
+ setinlinepair();
+
+ recvpair("hell", 4, 9, 0); /* Break at OOB even with enough buffer. */
+ epollpair(false);
+ siocatmarkpair(true);
+
+ tcp_incompliant {
+ recvpair("world", 5, 6, 0); /* TCP recv()s "oworld", ... "o" ??? */
+ }
+
+ epollpair(false);
+ siocatmarkpair(false);
+}
+
+TEST_F(msg_oob, inline_ex_oob_break)
+{
+ sendpair("hello", 5, MSG_OOB);
+ epollpair(true);
+ siocatmarkpair(false);
+
+ sendpair("wor", 3, MSG_OOB);
+ epollpair(true);
+ siocatmarkpair(false);
+
+ sendpair("ld", 2, 0);
+ epollpair(true);
+ siocatmarkpair(false);
+
+ setinlinepair();
+
+ recvpair("hellowo", 7, 10, 0); /* Break at OOB but not at ex-OOB. */
+ epollpair(true);
+ siocatmarkpair(true);
+
+ recvpair("rld", 3, 3, 0);
+ epollpair(false);
+ siocatmarkpair(false);
+}
+
+TEST_F(msg_oob, inline_ex_oob_no_drop)
+{
+ sendpair("x", 1, MSG_OOB);
+ epollpair(true);
+ siocatmarkpair(true);
+
+ setinlinepair();
+
+ sendpair("y", 1, MSG_OOB); /* TCP does NOT drops "x" at this moment. */
+ epollpair(true);
+ siocatmarkpair(false);
+
+ recvpair("x", 1, 1, 0);
+ epollpair(true);
+ siocatmarkpair(true);
+
+ recvpair("y", 1, 1, 0);
+ epollpair(false);
+ siocatmarkpair(false);
+}
+
+TEST_F(msg_oob, inline_ex_oob_drop)
+{
+ sendpair("x", 1, MSG_OOB);
+ epollpair(true);
+ siocatmarkpair(true);
+
+ sendpair("y", 1, MSG_OOB); /* TCP drops "x" at this moment. */
+ epollpair(true);
+
+ setinlinepair();
+
+ tcp_incompliant {
+ siocatmarkpair(false);
+
+ recvpair("x", 1, 1, 0); /* TCP recv()s "y". */
+ epollpair(true);
+ siocatmarkpair(true);
+
+ recvpair("y", 1, 1, 0); /* TCP returns -EAGAIN. */
+ epollpair(false);
+ siocatmarkpair(false);
+ }
+}
+
+TEST_F(msg_oob, inline_ex_oob_siocatmark)
+{
+ sendpair("hello", 5, MSG_OOB);
+ epollpair(true);
+ siocatmarkpair(false);
+
+ recvpair("o", 1, 1, MSG_OOB);
+ epollpair(false);
+ siocatmarkpair(false);
+
+ setinlinepair();
+
+ sendpair("world", 5, MSG_OOB);
+ epollpair(true);
+ siocatmarkpair(false);
+
+ recvpair("hell", 4, 4, 0); /* Intentionally stop at ex-OOB. */
+ epollpair(true);
+ siocatmarkpair(false);
+}
+
+TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/net/af_unix/scm_rights.c b/tools/testing/selftests/net/af_unix/scm_rights.c
index 2bfed46e0b19..d66336256580 100644
--- a/tools/testing/selftests/net/af_unix/scm_rights.c
+++ b/tools/testing/selftests/net/af_unix/scm_rights.c
@@ -14,12 +14,12 @@
FIXTURE(scm_rights)
{
- int fd[16];
+ int fd[32];
};
FIXTURE_VARIANT(scm_rights)
{
- char name[16];
+ char name[32];
int type;
int flags;
bool test_listener;
@@ -172,6 +172,8 @@ static void __create_sockets(struct __test_metadata *_metadata,
const FIXTURE_VARIANT(scm_rights) *variant,
int n)
{
+ ASSERT_LE(n * 2, sizeof(self->fd) / sizeof(self->fd[0]));
+
if (variant->test_listener)
create_listeners(_metadata, self, n);
else
@@ -283,4 +285,23 @@ TEST_F(scm_rights, cross_edge)
close_sockets(8);
}
+TEST_F(scm_rights, backtrack_from_scc)
+{
+ create_sockets(10);
+
+ send_fd(0, 1);
+ send_fd(0, 4);
+ send_fd(1, 2);
+ send_fd(2, 3);
+ send_fd(3, 1);
+
+ send_fd(5, 6);
+ send_fd(5, 9);
+ send_fd(6, 7);
+ send_fd(7, 8);
+ send_fd(8, 6);
+
+ close_sockets(10);
+}
+
TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/net/af_unix/test_unix_oob.c b/tools/testing/selftests/net/af_unix/test_unix_oob.c
deleted file mode 100644
index a7c51889acd5..000000000000
--- a/tools/testing/selftests/net/af_unix/test_unix_oob.c
+++ /dev/null
@@ -1,436 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-#include <stdio.h>
-#include <stdlib.h>
-#include <sys/socket.h>
-#include <arpa/inet.h>
-#include <unistd.h>
-#include <string.h>
-#include <fcntl.h>
-#include <sys/ioctl.h>
-#include <errno.h>
-#include <netinet/tcp.h>
-#include <sys/un.h>
-#include <sys/signal.h>
-#include <sys/poll.h>
-
-static int pipefd[2];
-static int signal_recvd;
-static pid_t producer_id;
-static char sock_name[32];
-
-static void sig_hand(int sn, siginfo_t *si, void *p)
-{
- signal_recvd = sn;
-}
-
-static int set_sig_handler(int signal)
-{
- struct sigaction sa;
-
- sa.sa_sigaction = sig_hand;
- sigemptyset(&sa.sa_mask);
- sa.sa_flags = SA_SIGINFO | SA_RESTART;
-
- return sigaction(signal, &sa, NULL);
-}
-
-static void set_filemode(int fd, int set)
-{
- int flags = fcntl(fd, F_GETFL, 0);
-
- if (set)
- flags &= ~O_NONBLOCK;
- else
- flags |= O_NONBLOCK;
- fcntl(fd, F_SETFL, flags);
-}
-
-static void signal_producer(int fd)
-{
- char cmd;
-
- cmd = 'S';
- write(fd, &cmd, sizeof(cmd));
-}
-
-static void wait_for_signal(int fd)
-{
- char buf[5];
-
- read(fd, buf, 5);
-}
-
-static void die(int status)
-{
- fflush(NULL);
- unlink(sock_name);
- kill(producer_id, SIGTERM);
- exit(status);
-}
-
-int is_sioctatmark(int fd)
-{
- int ans = -1;
-
- if (ioctl(fd, SIOCATMARK, &ans, sizeof(ans)) < 0) {
-#ifdef DEBUG
- perror("SIOCATMARK Failed");
-#endif
- }
- return ans;
-}
-
-void read_oob(int fd, char *c)
-{
-
- *c = ' ';
- if (recv(fd, c, sizeof(*c), MSG_OOB) < 0) {
-#ifdef DEBUG
- perror("Reading MSG_OOB Failed");
-#endif
- }
-}
-
-int read_data(int pfd, char *buf, int size)
-{
- int len = 0;
-
- memset(buf, size, '0');
- len = read(pfd, buf, size);
-#ifdef DEBUG
- if (len < 0)
- perror("read failed");
-#endif
- return len;
-}
-
-static void wait_for_data(int pfd, int event)
-{
- struct pollfd pfds[1];
-
- pfds[0].fd = pfd;
- pfds[0].events = event;
- poll(pfds, 1, -1);
-}
-
-void producer(struct sockaddr_un *consumer_addr)
-{
- int cfd;
- char buf[64];
- int i;
-
- memset(buf, 'x', sizeof(buf));
- cfd = socket(AF_UNIX, SOCK_STREAM, 0);
-
- wait_for_signal(pipefd[0]);
- if (connect(cfd, (struct sockaddr *)consumer_addr,
- sizeof(*consumer_addr)) != 0) {
- perror("Connect failed");
- kill(0, SIGTERM);
- exit(1);
- }
-
- for (i = 0; i < 2; i++) {
- /* Test 1: Test for SIGURG and OOB */
- wait_for_signal(pipefd[0]);
- memset(buf, 'x', sizeof(buf));
- buf[63] = '@';
- send(cfd, buf, sizeof(buf), MSG_OOB);
-
- wait_for_signal(pipefd[0]);
-
- /* Test 2: Test for OOB being overwitten */
- memset(buf, 'x', sizeof(buf));
- buf[63] = '%';
- send(cfd, buf, sizeof(buf), MSG_OOB);
-
- memset(buf, 'x', sizeof(buf));
- buf[63] = '#';
- send(cfd, buf, sizeof(buf), MSG_OOB);
-
- wait_for_signal(pipefd[0]);
-
- /* Test 3: Test for SIOCATMARK */
- memset(buf, 'x', sizeof(buf));
- buf[63] = '@';
- send(cfd, buf, sizeof(buf), MSG_OOB);
-
- memset(buf, 'x', sizeof(buf));
- buf[63] = '%';
- send(cfd, buf, sizeof(buf), MSG_OOB);
-
- memset(buf, 'x', sizeof(buf));
- send(cfd, buf, sizeof(buf), 0);
-
- wait_for_signal(pipefd[0]);
-
- /* Test 4: Test for 1byte OOB msg */
- memset(buf, 'x', sizeof(buf));
- buf[0] = '@';
- send(cfd, buf, 1, MSG_OOB);
- }
-}
-
-int
-main(int argc, char **argv)
-{
- int lfd, pfd;
- struct sockaddr_un consumer_addr, paddr;
- socklen_t len = sizeof(consumer_addr);
- char buf[1024];
- int on = 0;
- char oob;
- int atmark;
-
- lfd = socket(AF_UNIX, SOCK_STREAM, 0);
- memset(&consumer_addr, 0, sizeof(consumer_addr));
- consumer_addr.sun_family = AF_UNIX;
- sprintf(sock_name, "unix_oob_%d", getpid());
- unlink(sock_name);
- strcpy(consumer_addr.sun_path, sock_name);
-
- if ((bind(lfd, (struct sockaddr *)&consumer_addr,
- sizeof(consumer_addr))) != 0) {
- perror("socket bind failed");
- exit(1);
- }
-
- pipe(pipefd);
-
- listen(lfd, 1);
-
- producer_id = fork();
- if (producer_id == 0) {
- producer(&consumer_addr);
- exit(0);
- }
-
- set_sig_handler(SIGURG);
- signal_producer(pipefd[1]);
-
- pfd = accept(lfd, (struct sockaddr *) &paddr, &len);
- fcntl(pfd, F_SETOWN, getpid());
-
- signal_recvd = 0;
- signal_producer(pipefd[1]);
-
- /* Test 1:
- * veriyf that SIGURG is
- * delivered, 63 bytes are
- * read, oob is '@', and POLLPRI works.
- */
- wait_for_data(pfd, POLLPRI);
- read_oob(pfd, &oob);
- len = read_data(pfd, buf, 1024);
- if (!signal_recvd || len != 63 || oob != '@') {
- fprintf(stderr, "Test 1 failed sigurg %d len %d %c\n",
- signal_recvd, len, oob);
- die(1);
- }
-
- signal_recvd = 0;
- signal_producer(pipefd[1]);
-
- /* Test 2:
- * Verify that the first OOB is over written by
- * the 2nd one and the first OOB is returned as
- * part of the read, and sigurg is received.
- */
- wait_for_data(pfd, POLLIN | POLLPRI);
- len = 0;
- while (len < 70)
- len = recv(pfd, buf, 1024, MSG_PEEK);
- len = read_data(pfd, buf, 1024);
- read_oob(pfd, &oob);
- if (!signal_recvd || len != 127 || oob != '#') {
- fprintf(stderr, "Test 2 failed, sigurg %d len %d OOB %c\n",
- signal_recvd, len, oob);
- die(1);
- }
-
- signal_recvd = 0;
- signal_producer(pipefd[1]);
-
- /* Test 3:
- * verify that 2nd oob over writes
- * the first one and read breaks at
- * oob boundary returning 127 bytes
- * and sigurg is received and atmark
- * is set.
- * oob is '%' and second read returns
- * 64 bytes.
- */
- len = 0;
- wait_for_data(pfd, POLLIN | POLLPRI);
- while (len < 150)
- len = recv(pfd, buf, 1024, MSG_PEEK);
- len = read_data(pfd, buf, 1024);
- atmark = is_sioctatmark(pfd);
- read_oob(pfd, &oob);
-
- if (!signal_recvd || len != 127 || oob != '%' || atmark != 1) {
- fprintf(stderr,
- "Test 3 failed, sigurg %d len %d OOB %c atmark %d\n",
- signal_recvd, len, oob, atmark);
- die(1);
- }
-
- signal_recvd = 0;
-
- len = read_data(pfd, buf, 1024);
- if (len != 64) {
- fprintf(stderr, "Test 3.1 failed, sigurg %d len %d OOB %c\n",
- signal_recvd, len, oob);
- die(1);
- }
-
- signal_recvd = 0;
- signal_producer(pipefd[1]);
-
- /* Test 4:
- * verify that a single byte
- * oob message is delivered.
- * set non blocking mode and
- * check proper error is
- * returned and sigurg is
- * received and correct
- * oob is read.
- */
-
- set_filemode(pfd, 0);
-
- wait_for_data(pfd, POLLIN | POLLPRI);
- len = read_data(pfd, buf, 1024);
- if ((len == -1) && (errno == 11))
- len = 0;
-
- read_oob(pfd, &oob);
-
- if (!signal_recvd || len != 0 || oob != '@') {
- fprintf(stderr, "Test 4 failed, sigurg %d len %d OOB %c\n",
- signal_recvd, len, oob);
- die(1);
- }
-
- set_filemode(pfd, 1);
-
- /* Inline Testing */
-
- on = 1;
- if (setsockopt(pfd, SOL_SOCKET, SO_OOBINLINE, &on, sizeof(on))) {
- perror("SO_OOBINLINE");
- die(1);
- }
-
- signal_recvd = 0;
- signal_producer(pipefd[1]);
-
- /* Test 1 -- Inline:
- * Check that SIGURG is
- * delivered and 63 bytes are
- * read and oob is '@'
- */
-
- wait_for_data(pfd, POLLIN | POLLPRI);
- len = read_data(pfd, buf, 1024);
-
- if (!signal_recvd || len != 63) {
- fprintf(stderr, "Test 1 Inline failed, sigurg %d len %d\n",
- signal_recvd, len);
- die(1);
- }
-
- len = read_data(pfd, buf, 1024);
-
- if (len != 1) {
- fprintf(stderr,
- "Test 1.1 Inline failed, sigurg %d len %d oob %c\n",
- signal_recvd, len, oob);
- die(1);
- }
-
- signal_recvd = 0;
- signal_producer(pipefd[1]);
-
- /* Test 2 -- Inline:
- * Verify that the first OOB is over written by
- * the 2nd one and read breaks correctly on
- * 2nd OOB boundary with the first OOB returned as
- * part of the read, and sigurg is delivered and
- * siocatmark returns true.
- * next read returns one byte, the oob byte
- * and siocatmark returns false.
- */
- len = 0;
- wait_for_data(pfd, POLLIN | POLLPRI);
- while (len < 70)
- len = recv(pfd, buf, 1024, MSG_PEEK);
- len = read_data(pfd, buf, 1024);
- atmark = is_sioctatmark(pfd);
- if (len != 127 || atmark != 1 || !signal_recvd) {
- fprintf(stderr, "Test 2 Inline failed, len %d atmark %d\n",
- len, atmark);
- die(1);
- }
-
- len = read_data(pfd, buf, 1024);
- atmark = is_sioctatmark(pfd);
- if (len != 1 || buf[0] != '#' || atmark == 1) {
- fprintf(stderr, "Test 2.1 Inline failed, len %d data %c atmark %d\n",
- len, buf[0], atmark);
- die(1);
- }
-
- signal_recvd = 0;
- signal_producer(pipefd[1]);
-
- /* Test 3 -- Inline:
- * verify that 2nd oob over writes
- * the first one and read breaks at
- * oob boundary returning 127 bytes
- * and sigurg is received and siocatmark
- * is true after the read.
- * subsequent read returns 65 bytes
- * because of oob which should be '%'.
- */
- len = 0;
- wait_for_data(pfd, POLLIN | POLLPRI);
- while (len < 126)
- len = recv(pfd, buf, 1024, MSG_PEEK);
- len = read_data(pfd, buf, 1024);
- atmark = is_sioctatmark(pfd);
- if (!signal_recvd || len != 127 || !atmark) {
- fprintf(stderr,
- "Test 3 Inline failed, sigurg %d len %d data %c\n",
- signal_recvd, len, buf[0]);
- die(1);
- }
-
- len = read_data(pfd, buf, 1024);
- atmark = is_sioctatmark(pfd);
- if (len != 65 || buf[0] != '%' || atmark != 0) {
- fprintf(stderr,
- "Test 3.1 Inline failed, len %d oob %c atmark %d\n",
- len, buf[0], atmark);
- die(1);
- }
-
- signal_recvd = 0;
- signal_producer(pipefd[1]);
-
- /* Test 4 -- Inline:
- * verify that a single
- * byte oob message is delivered
- * and read returns one byte, the oob
- * byte and sigurg is received
- */
- wait_for_data(pfd, POLLIN | POLLPRI);
- len = read_data(pfd, buf, 1024);
- if (!signal_recvd || len != 1 || buf[0] != '@') {
- fprintf(stderr,
- "Test 4 Inline failed, signal %d len %d data %c\n",
- signal_recvd, len, buf[0]);
- die(1);
- }
- die(0);
-}
diff --git a/tools/testing/selftests/net/amt.sh b/tools/testing/selftests/net/amt.sh
index 7e7ed6c558da..d458b45c775b 100755
--- a/tools/testing/selftests/net/amt.sh
+++ b/tools/testing/selftests/net/amt.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
# Author: Taehee Yoo <ap420073@gmail.com>
diff --git a/tools/testing/selftests/net/config b/tools/testing/selftests/net/config
index 04de7a6ba6f3..5b9baf708950 100644
--- a/tools/testing/selftests/net/config
+++ b/tools/testing/selftests/net/config
@@ -26,7 +26,6 @@ CONFIG_INET_ESP=y
CONFIG_INET_ESP_OFFLOAD=y
CONFIG_NET_FOU=y
CONFIG_NET_FOU_IP_TUNNELS=y
-CONFIG_IP_GRE=m
CONFIG_NETFILTER=y
CONFIG_NETFILTER_ADVANCED=y
CONFIG_NF_CONNTRACK=m
@@ -75,7 +74,12 @@ CONFIG_NET_SCH_ETF=m
CONFIG_NET_SCH_NETEM=y
CONFIG_NET_SCH_PRIO=m
CONFIG_NFT_COMPAT=m
+CONFIG_NF_CONNTRACK_OVS=y
CONFIG_NF_FLOW_TABLE=m
+CONFIG_OPENVSWITCH=m
+CONFIG_OPENVSWITCH_GENEVE=m
+CONFIG_OPENVSWITCH_GRE=m
+CONFIG_OPENVSWITCH_VXLAN=m
CONFIG_PSAMPLE=m
CONFIG_TCP_MD5SIG=y
CONFIG_TEST_BLACKHOLE_DEV=m
@@ -101,3 +105,5 @@ CONFIG_NETFILTER_XT_MATCH_POLICY=m
CONFIG_CRYPTO_ARIA=y
CONFIG_XFRM_INTERFACE=m
CONFIG_XFRM_USER=m
+CONFIG_IP_NF_MATCH_RPFILTER=m
+CONFIG_IP6_NF_MATCH_RPFILTER=m
diff --git a/tools/testing/selftests/net/forwarding/Makefile b/tools/testing/selftests/net/forwarding/Makefile
index fa7b59ff4029..224346426ef2 100644
--- a/tools/testing/selftests/net/forwarding/Makefile
+++ b/tools/testing/selftests/net/forwarding/Makefile
@@ -39,6 +39,7 @@ TEST_PROGS = bridge_fdb_learning_limit.sh \
ipip_hier_gre.sh \
lib_sh_test.sh \
local_termination.sh \
+ min_max_mtu.sh \
mirror_gre_bound.sh \
mirror_gre_bridge_1d.sh \
mirror_gre_bridge_1d_vlan.sh \
@@ -70,6 +71,7 @@ TEST_PROGS = bridge_fdb_learning_limit.sh \
router_broadcast.sh \
router_mpath_nh_res.sh \
router_mpath_nh.sh \
+ router_mpath_seed.sh \
router_multicast.sh \
router_multipath.sh \
router_nh.sh \
diff --git a/tools/testing/selftests/net/forwarding/devlink_lib.sh b/tools/testing/selftests/net/forwarding/devlink_lib.sh
index f1de525cfa55..62a05bca1e82 100644
--- a/tools/testing/selftests/net/forwarding/devlink_lib.sh
+++ b/tools/testing/selftests/net/forwarding/devlink_lib.sh
@@ -122,6 +122,8 @@ devlink_reload()
still_pending=$(devlink resource show "$DEVLINK_DEV" | \
grep -c "size_new")
check_err $still_pending "Failed reload - There are still unset sizes"
+
+ udevadm settle
}
declare -A DEVLINK_ORIG
diff --git a/tools/testing/selftests/net/forwarding/lib.sh b/tools/testing/selftests/net/forwarding/lib.sh
index eabbdf00d8ca..ff96bb7535ff 100644
--- a/tools/testing/selftests/net/forwarding/lib.sh
+++ b/tools/testing/selftests/net/forwarding/lib.sh
@@ -1134,12 +1134,19 @@ bridge_ageing_time_get()
}
declare -A SYSCTL_ORIG
+sysctl_save()
+{
+ local key=$1; shift
+
+ SYSCTL_ORIG[$key]=$(sysctl -n $key)
+}
+
sysctl_set()
{
local key=$1; shift
local value=$1; shift
- SYSCTL_ORIG[$key]=$(sysctl -n $key)
+ sysctl_save "$key"
sysctl -qw $key="$value"
}
@@ -1218,22 +1225,6 @@ trap_uninstall()
tc filter del dev $dev $direction pref 1 flower
}
-slow_path_trap_install()
-{
- # For slow-path testing, we need to install a trap to get to
- # slow path the packets that would otherwise be switched in HW.
- if [ "${tcflags/skip_hw}" != "$tcflags" ]; then
- trap_install "$@"
- fi
-}
-
-slow_path_trap_uninstall()
-{
- if [ "${tcflags/skip_hw}" != "$tcflags" ]; then
- trap_uninstall "$@"
- fi
-}
-
__icmp_capture_add_del()
{
local add_del=$1; shift
@@ -1250,22 +1241,34 @@ __icmp_capture_add_del()
icmp_capture_install()
{
- __icmp_capture_add_del add 100 "" "$@"
+ local tundev=$1; shift
+ local filter=$1; shift
+
+ __icmp_capture_add_del add 100 "" "$tundev" "$filter"
}
icmp_capture_uninstall()
{
- __icmp_capture_add_del del 100 "" "$@"
+ local tundev=$1; shift
+ local filter=$1; shift
+
+ __icmp_capture_add_del del 100 "" "$tundev" "$filter"
}
icmp6_capture_install()
{
- __icmp_capture_add_del add 100 v6 "$@"
+ local tundev=$1; shift
+ local filter=$1; shift
+
+ __icmp_capture_add_del add 100 v6 "$tundev" "$filter"
}
icmp6_capture_uninstall()
{
- __icmp_capture_add_del del 100 v6 "$@"
+ local tundev=$1; shift
+ local filter=$1; shift
+
+ __icmp_capture_add_del del 100 v6 "$tundev" "$filter"
}
__vlan_capture_add_del()
@@ -1283,12 +1286,18 @@ __vlan_capture_add_del()
vlan_capture_install()
{
- __vlan_capture_add_del add 100 "$@"
+ local dev=$1; shift
+ local filter=$1; shift
+
+ __vlan_capture_add_del add 100 "$dev" "$filter"
}
vlan_capture_uninstall()
{
- __vlan_capture_add_del del 100 "$@"
+ local dev=$1; shift
+ local filter=$1; shift
+
+ __vlan_capture_add_del del 100 "$dev" "$filter"
}
__dscp_capture_add_del()
@@ -1648,34 +1657,61 @@ __start_traffic()
local sip=$1; shift
local dip=$1; shift
local dmac=$1; shift
+ local -a mz_args=("$@")
$MZ $h_in -p $pktsize -A $sip -B $dip -c 0 \
- -a own -b $dmac -t "$proto" -q "$@" &
+ -a own -b $dmac -t "$proto" -q "${mz_args[@]}" &
sleep 1
}
start_traffic_pktsize()
{
local pktsize=$1; shift
+ local h_in=$1; shift
+ local sip=$1; shift
+ local dip=$1; shift
+ local dmac=$1; shift
+ local -a mz_args=("$@")
- __start_traffic $pktsize udp "$@"
+ __start_traffic $pktsize udp "$h_in" "$sip" "$dip" "$dmac" \
+ "${mz_args[@]}"
}
start_tcp_traffic_pktsize()
{
local pktsize=$1; shift
+ local h_in=$1; shift
+ local sip=$1; shift
+ local dip=$1; shift
+ local dmac=$1; shift
+ local -a mz_args=("$@")
- __start_traffic $pktsize tcp "$@"
+ __start_traffic $pktsize tcp "$h_in" "$sip" "$dip" "$dmac" \
+ "${mz_args[@]}"
}
start_traffic()
{
- start_traffic_pktsize 8000 "$@"
+ local h_in=$1; shift
+ local sip=$1; shift
+ local dip=$1; shift
+ local dmac=$1; shift
+ local -a mz_args=("$@")
+
+ start_traffic_pktsize 8000 "$h_in" "$sip" "$dip" "$dmac" \
+ "${mz_args[@]}"
}
start_tcp_traffic()
{
- start_tcp_traffic_pktsize 8000 "$@"
+ local h_in=$1; shift
+ local sip=$1; shift
+ local dip=$1; shift
+ local dmac=$1; shift
+ local -a mz_args=("$@")
+
+ start_tcp_traffic_pktsize 8000 "$h_in" "$sip" "$dip" "$dmac" \
+ "${mz_args[@]}"
}
stop_traffic()
diff --git a/tools/testing/selftests/net/forwarding/min_max_mtu.sh b/tools/testing/selftests/net/forwarding/min_max_mtu.sh
new file mode 100755
index 000000000000..97bb8b221bed
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/min_max_mtu.sh
@@ -0,0 +1,283 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# +--------------------+
+# | H1 |
+# | |
+# | $h1.10 + |
+# | 192.0.2.2/24 | |
+# | 2001:db8:1::2/64 | |
+# | | |
+# | $h1 + |
+# | | |
+# +------------------|-+
+# |
+# +------------------|-+
+# | SW | |
+# | $swp1 + |
+# | | |
+# | $swp1.10 + |
+# | 192.0.2.1/24 |
+# | 2001:db8:1::1/64 |
+# | |
+# +--------------------+
+
+ALL_TESTS="
+ ping_ipv4
+ ping_ipv6
+ max_mtu_config_test
+ max_mtu_traffic_test
+ min_mtu_config_test
+ min_mtu_traffic_test
+"
+
+NUM_NETIFS=2
+source lib.sh
+
+h1_create()
+{
+ simple_if_init $h1
+ vlan_create $h1 10 v$h1 192.0.2.2/24 2001:db8:1::2/64
+}
+
+h1_destroy()
+{
+ vlan_destroy $h1 10 192.0.2.2/24 2001:db8:1::2/64
+ simple_if_fini $h1
+}
+
+switch_create()
+{
+ ip li set dev $swp1 up
+ vlan_create $swp1 10 "" 192.0.2.1/24 2001:db8:1::1/64
+}
+
+switch_destroy()
+{
+ ip li set dev $swp1 down
+ vlan_destroy $swp1 10
+}
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ swp1=${NETIFS[p2]}
+
+ vrf_prepare
+
+ h1_create
+
+ switch_create
+
+ forwarding_enable
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ forwarding_restore
+
+ switch_destroy
+
+ h1_destroy
+
+ vrf_cleanup
+}
+
+ping_ipv4()
+{
+ ping_test $h1.10 192.0.2.1
+}
+
+ping_ipv6()
+{
+ ping6_test $h1.10 2001:db8:1::1
+}
+
+min_max_mtu_get_if()
+{
+ local dev=$1; shift
+ local min_max=$1; shift
+
+ ip -d -j link show $dev | jq ".[].$min_max"
+}
+
+ensure_compatible_min_max_mtu()
+{
+ local min_max=$1; shift
+
+ local mtu=$(min_max_mtu_get_if ${NETIFS[p1]} $min_max)
+ local i
+
+ for ((i = 2; i <= NUM_NETIFS; ++i)); do
+ local current_mtu=$(min_max_mtu_get_if ${NETIFS[p$i]} $min_max)
+
+ if [ $current_mtu -ne $mtu ]; then
+ return 1
+ fi
+ done
+}
+
+mtu_set_if()
+{
+ local dev=$1; shift
+ local mtu=$1; shift
+ local should_fail=${1:-0}; shift
+
+ mtu_set $dev $mtu 2>/dev/null
+ check_err_fail $should_fail $? "Set MTU $mtu for $dev"
+}
+
+mtu_set_all_if()
+{
+ local mtu=$1; shift
+ local i
+
+ for ((i = 1; i <= NUM_NETIFS; ++i)); do
+ mtu_set_if ${NETIFS[p$i]} $mtu
+ mtu_set_if ${NETIFS[p$i]}.10 $mtu
+ done
+}
+
+mtu_restore_all_if()
+{
+ local i
+
+ for ((i = 1; i <= NUM_NETIFS; ++i)); do
+ mtu_restore ${NETIFS[p$i]}.10
+ mtu_restore ${NETIFS[p$i]}
+ done
+}
+
+mtu_test_ping4()
+{
+ local mtu=$1; shift
+ local should_fail=$1; shift
+
+ # Ping adds 8 bytes for ICMP header and 20 bytes for IP header
+ local ping_headers_len=$((20 + 8))
+ local pkt_size=$((mtu - ping_headers_len))
+
+ ping_do $h1.10 192.0.2.1 "-s $pkt_size -M do"
+ check_err_fail $should_fail $? "Ping, packet size: $pkt_size"
+}
+
+mtu_test_ping6()
+{
+ local mtu=$1; shift
+ local should_fail=$1; shift
+
+ # Ping adds 8 bytes for ICMP header and 40 bytes for IPv6 header
+ local ping6_headers_len=$((40 + 8))
+ local pkt_size=$((mtu - ping6_headers_len))
+
+ ping6_do $h1.10 2001:db8:1::1 "-s $pkt_size -M do"
+ check_err_fail $should_fail $? "Ping6, packet size: $pkt_size"
+}
+
+max_mtu_config_test()
+{
+ local i
+
+ RET=0
+
+ for ((i = 1; i <= NUM_NETIFS; ++i)); do
+ local dev=${NETIFS[p$i]}
+ local max_mtu=$(min_max_mtu_get_if $dev "max_mtu")
+ local should_fail
+
+ should_fail=0
+ mtu_set_if $dev $max_mtu $should_fail
+ mtu_restore $dev
+
+ should_fail=1
+ mtu_set_if $dev $((max_mtu + 1)) $should_fail
+ mtu_restore $dev
+ done
+
+ log_test "Test maximum MTU configuration"
+}
+
+max_mtu_traffic_test()
+{
+ local should_fail
+ local max_mtu
+
+ RET=0
+
+ if ! ensure_compatible_min_max_mtu "max_mtu"; then
+ log_test_xfail "Topology has incompatible maximum MTU values"
+ return
+ fi
+
+ max_mtu=$(min_max_mtu_get_if ${NETIFS[p1]} "max_mtu")
+
+ should_fail=0
+ mtu_set_all_if $max_mtu
+ mtu_test_ping4 $max_mtu $should_fail
+ mtu_test_ping6 $max_mtu $should_fail
+ mtu_restore_all_if
+
+ should_fail=1
+ mtu_set_all_if $((max_mtu - 1))
+ mtu_test_ping4 $max_mtu $should_fail
+ mtu_test_ping6 $max_mtu $should_fail
+ mtu_restore_all_if
+
+ log_test "Test traffic, packet size is maximum MTU"
+}
+
+min_mtu_config_test()
+{
+ local i
+
+ RET=0
+
+ for ((i = 1; i <= NUM_NETIFS; ++i)); do
+ local dev=${NETIFS[p$i]}
+ local min_mtu=$(min_max_mtu_get_if $dev "min_mtu")
+ local should_fail
+
+ should_fail=0
+ mtu_set_if $dev $min_mtu $should_fail
+ mtu_restore $dev
+
+ should_fail=1
+ mtu_set_if $dev $((min_mtu - 1)) $should_fail
+ mtu_restore $dev
+ done
+
+ log_test "Test minimum MTU configuration"
+}
+
+min_mtu_traffic_test()
+{
+ local should_fail=0
+ local min_mtu
+
+ RET=0
+
+ if ! ensure_compatible_min_max_mtu "min_mtu"; then
+ log_test_xfail "Topology has incompatible minimum MTU values"
+ return
+ fi
+
+ min_mtu=$(min_max_mtu_get_if ${NETIFS[p1]} "min_mtu")
+ mtu_set_all_if $min_mtu
+ mtu_test_ping4 $min_mtu $should_fail
+ # Do not test minimum MTU with IPv6, as IPv6 requires higher MTU.
+
+ mtu_restore_all_if
+
+ log_test "Test traffic, packet size is minimum MTU"
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/mirror_gre.sh b/tools/testing/selftests/net/forwarding/mirror_gre.sh
index 0266443601bc..921c733ee04f 100755
--- a/tools/testing/selftests/net/forwarding/mirror_gre.sh
+++ b/tools/testing/selftests/net/forwarding/mirror_gre.sh
@@ -74,7 +74,7 @@ test_span_gre_mac()
RET=0
- mirror_install $swp1 $direction $tundev "matchall $tcflags"
+ mirror_install $swp1 $direction $tundev "matchall"
icmp_capture_install h3-${tundev} "src_mac $src_mac dst_mac $dst_mac"
mirror_test v$h1 192.0.2.1 192.0.2.2 h3-${tundev} 100 10
@@ -82,29 +82,29 @@ test_span_gre_mac()
icmp_capture_uninstall h3-${tundev}
mirror_uninstall $swp1 $direction
- log_test "$direction $what: envelope MAC ($tcflags)"
+ log_test "$direction $what: envelope MAC"
}
test_two_spans()
{
RET=0
- mirror_install $swp1 ingress gt4 "matchall $tcflags"
- mirror_install $swp1 egress gt6 "matchall $tcflags"
- quick_test_span_gre_dir gt4 ingress
- quick_test_span_gre_dir gt6 egress
+ mirror_install $swp1 ingress gt4 "matchall"
+ mirror_install $swp1 egress gt6 "matchall"
+ quick_test_span_gre_dir gt4 8 0
+ quick_test_span_gre_dir gt6 0 8
mirror_uninstall $swp1 ingress
- fail_test_span_gre_dir gt4 ingress
- quick_test_span_gre_dir gt6 egress
+ fail_test_span_gre_dir gt4 8 0
+ quick_test_span_gre_dir gt6 0 8
- mirror_install $swp1 ingress gt4 "matchall $tcflags"
+ mirror_install $swp1 ingress gt4 "matchall"
mirror_uninstall $swp1 egress
- quick_test_span_gre_dir gt4 ingress
- fail_test_span_gre_dir gt6 egress
+ quick_test_span_gre_dir gt4 8 0
+ fail_test_span_gre_dir gt6 0 8
mirror_uninstall $swp1 ingress
- log_test "two simultaneously configured mirrors ($tcflags)"
+ log_test "two simultaneously configured mirrors"
}
test_gretap()
@@ -131,30 +131,11 @@ test_ip6gretap_mac()
test_span_gre_mac gt6 egress "mirror to ip6gretap"
}
-test_all()
-{
- slow_path_trap_install $swp1 ingress
- slow_path_trap_install $swp1 egress
-
- tests_run
-
- slow_path_trap_uninstall $swp1 egress
- slow_path_trap_uninstall $swp1 ingress
-}
-
trap cleanup EXIT
setup_prepare
setup_wait
-tcflags="skip_hw"
-test_all
-
-if ! tc_offload_check; then
- echo "WARN: Could not test offloaded functionality"
-else
- tcflags="skip_sw"
- test_all
-fi
+tests_run
exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_bound.sh b/tools/testing/selftests/net/forwarding/mirror_gre_bound.sh
index 6c257ec03756..e3cd48e18eeb 100755
--- a/tools/testing/selftests/net/forwarding/mirror_gre_bound.sh
+++ b/tools/testing/selftests/net/forwarding/mirror_gre_bound.sh
@@ -196,32 +196,11 @@ test_ip6gretap()
full_test_span_gre_dir gt6 egress 0 8 "mirror to ip6gretap w/ UL"
}
-test_all()
-{
- RET=0
-
- slow_path_trap_install $swp1 ingress
- slow_path_trap_install $swp1 egress
-
- tests_run
-
- slow_path_trap_uninstall $swp1 egress
- slow_path_trap_uninstall $swp1 ingress
-}
-
trap cleanup EXIT
setup_prepare
setup_wait
-tcflags="skip_hw"
-test_all
-
-if ! tc_offload_check; then
- echo "WARN: Could not test offloaded functionality"
-else
- tcflags="skip_sw"
- test_all
-fi
+tests_run
exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1d.sh b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1d.sh
index 04fd14b0a9b7..6c7bd33332c2 100755
--- a/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1d.sh
+++ b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1d.sh
@@ -108,30 +108,11 @@ test_ip6gretap()
full_test_span_gre_dir gt6 egress 0 8 "mirror to ip6gretap"
}
-test_all()
-{
- slow_path_trap_install $swp1 ingress
- slow_path_trap_install $swp1 egress
-
- tests_run
-
- slow_path_trap_uninstall $swp1 egress
- slow_path_trap_uninstall $swp1 ingress
-}
-
trap cleanup EXIT
setup_prepare
setup_wait
-tcflags="skip_hw"
-test_all
-
-if ! tc_offload_check; then
- echo "WARN: Could not test offloaded functionality"
-else
- tcflags="skip_sw"
- test_all
-fi
+tests_run
exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1d_vlan.sh b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1d_vlan.sh
index f35313c76fac..909ec956a5e5 100755
--- a/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1d_vlan.sh
+++ b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1d_vlan.sh
@@ -104,30 +104,11 @@ test_ip6gretap_stp()
full_test_span_gre_stp gt6 $swp3.555 "mirror to ip6gretap"
}
-test_all()
-{
- slow_path_trap_install $swp1 ingress
- slow_path_trap_install $swp1 egress
-
- tests_run
-
- slow_path_trap_uninstall $swp1 egress
- slow_path_trap_uninstall $swp1 ingress
-}
-
trap cleanup EXIT
setup_prepare
setup_wait
-tcflags="skip_hw"
-test_all
-
-if ! tc_offload_check; then
- echo "WARN: Could not test offloaded functionality"
-else
- tcflags="skip_sw"
- test_all
-fi
+tests_run
exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q.sh b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q.sh
index 0cf4c47a46f9..40ac9dd3aff1 100755
--- a/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q.sh
+++ b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q.sh
@@ -104,30 +104,11 @@ test_ip6gretap()
full_test_span_gre_dir gt6 egress 0 8 "mirror to ip6gretap"
}
-tests()
-{
- slow_path_trap_install $swp1 ingress
- slow_path_trap_install $swp1 egress
-
- tests_run
-
- slow_path_trap_uninstall $swp1 egress
- slow_path_trap_uninstall $swp1 ingress
-}
-
trap cleanup EXIT
setup_prepare
setup_wait
-tcflags="skip_hw"
-tests
-
-if ! tc_offload_check; then
- echo "WARN: Could not test offloaded functionality"
-else
- tcflags="skip_sw"
- tests
-fi
+tests_run
exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q_lag.sh b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q_lag.sh
index c53148b1dc63..fe4d7c906a70 100755
--- a/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q_lag.sh
+++ b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q_lag.sh
@@ -227,10 +227,10 @@ test_lag_slave()
RET=0
tc filter add dev $swp1 ingress pref 999 \
- proto 802.1q flower vlan_ethtype arp $tcflags \
+ proto 802.1q flower vlan_ethtype arp \
action pass
mirror_install $swp1 ingress gt4 \
- "proto 802.1q flower vlan_id 333 $tcflags"
+ "proto 802.1q flower vlan_id 333"
# Test connectivity through $up_dev when $down_dev is set down.
ip link set dev $down_dev down
@@ -239,7 +239,7 @@ test_lag_slave()
setup_wait_dev $host_dev
$ARPING -I br1 192.0.2.130 -qfc 1
sleep 2
- mirror_test vrf-h1 192.0.2.1 192.0.2.18 $host_dev 1 10
+ mirror_test vrf-h1 192.0.2.1 192.0.2.18 $host_dev 1 ">= 10"
# Test lack of connectivity when both slaves are down.
ip link set dev $up_dev down
@@ -252,7 +252,7 @@ test_lag_slave()
mirror_uninstall $swp1 ingress
tc filter del dev $swp1 ingress pref 999
- log_test "$what ($tcflags)"
+ log_test "$what"
}
test_mirror_gretap_first()
@@ -265,30 +265,11 @@ test_mirror_gretap_second()
test_lag_slave $h4 $swp4 $swp3 "mirror to gretap: LAG second slave"
}
-test_all()
-{
- slow_path_trap_install $swp1 ingress
- slow_path_trap_install $swp1 egress
-
- tests_run
-
- slow_path_trap_uninstall $swp1 egress
- slow_path_trap_uninstall $swp1 ingress
-}
-
trap cleanup EXIT
setup_prepare
setup_wait
-tcflags="skip_hw"
-test_all
-
-if ! tc_offload_check; then
- echo "WARN: Could not test offloaded functionality"
-else
- tcflags="skip_sw"
- test_all
-fi
+tests_run
exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_changes.sh b/tools/testing/selftests/net/forwarding/mirror_gre_changes.sh
index 5ea9d63915f7..65ae9d960c18 100755
--- a/tools/testing/selftests/net/forwarding/mirror_gre_changes.sh
+++ b/tools/testing/selftests/net/forwarding/mirror_gre_changes.sh
@@ -73,7 +73,7 @@ test_span_gre_ttl()
RET=0
mirror_install $swp1 ingress $tundev \
- "prot ip flower $tcflags ip_prot icmp"
+ "prot ip flower ip_prot icmp"
tc filter add dev $h3 ingress pref 77 prot $prot \
flower skip_hw ip_ttl 50 action pass
@@ -81,13 +81,13 @@ test_span_gre_ttl()
ip link set dev $tundev type $type ttl 50
sleep 2
- mirror_test v$h1 192.0.2.1 192.0.2.2 $h3 77 10
+ mirror_test v$h1 192.0.2.1 192.0.2.2 $h3 77 ">= 10"
ip link set dev $tundev type $type ttl 100
tc filter del dev $h3 ingress pref 77
mirror_uninstall $swp1 ingress
- log_test "$what: TTL change ($tcflags)"
+ log_test "$what: TTL change"
}
test_span_gre_tun_up()
@@ -98,15 +98,15 @@ test_span_gre_tun_up()
RET=0
ip link set dev $tundev down
- mirror_install $swp1 ingress $tundev "matchall $tcflags"
- fail_test_span_gre_dir $tundev ingress
+ mirror_install $swp1 ingress $tundev "matchall"
+ fail_test_span_gre_dir $tundev
ip link set dev $tundev up
- quick_test_span_gre_dir $tundev ingress
+ quick_test_span_gre_dir $tundev
mirror_uninstall $swp1 ingress
- log_test "$what: tunnel down/up ($tcflags)"
+ log_test "$what: tunnel down/up"
}
test_span_gre_egress_up()
@@ -118,8 +118,8 @@ test_span_gre_egress_up()
RET=0
ip link set dev $swp3 down
- mirror_install $swp1 ingress $tundev "matchall $tcflags"
- fail_test_span_gre_dir $tundev ingress
+ mirror_install $swp1 ingress $tundev "matchall"
+ fail_test_span_gre_dir $tundev
# After setting the device up, wait for neighbor to get resolved so that
# we can expect mirroring to work.
@@ -127,10 +127,10 @@ test_span_gre_egress_up()
setup_wait_dev $swp3
ping -c 1 -I $swp3 $remote_ip &>/dev/null
- quick_test_span_gre_dir $tundev ingress
+ quick_test_span_gre_dir $tundev
mirror_uninstall $swp1 ingress
- log_test "$what: egress down/up ($tcflags)"
+ log_test "$what: egress down/up"
}
test_span_gre_remote_ip()
@@ -144,14 +144,14 @@ test_span_gre_remote_ip()
RET=0
ip link set dev $tundev type $type remote $wrong_ip
- mirror_install $swp1 ingress $tundev "matchall $tcflags"
- fail_test_span_gre_dir $tundev ingress
+ mirror_install $swp1 ingress $tundev "matchall"
+ fail_test_span_gre_dir $tundev
ip link set dev $tundev type $type remote $correct_ip
- quick_test_span_gre_dir $tundev ingress
+ quick_test_span_gre_dir $tundev
mirror_uninstall $swp1 ingress
- log_test "$what: remote address change ($tcflags)"
+ log_test "$what: remote address change"
}
test_span_gre_tun_del()
@@ -165,10 +165,10 @@ test_span_gre_tun_del()
RET=0
- mirror_install $swp1 ingress $tundev "matchall $tcflags"
- quick_test_span_gre_dir $tundev ingress
+ mirror_install $swp1 ingress $tundev "matchall"
+ quick_test_span_gre_dir $tundev
ip link del dev $tundev
- fail_test_span_gre_dir $tundev ingress
+ fail_test_span_gre_dir $tundev
tunnel_create $tundev $type $local_ip $remote_ip \
ttl 100 tos inherit $flags
@@ -176,11 +176,11 @@ test_span_gre_tun_del()
# Recreating the tunnel doesn't reestablish mirroring, so reinstall it
# and verify it works for the follow-up tests.
mirror_uninstall $swp1 ingress
- mirror_install $swp1 ingress $tundev "matchall $tcflags"
- quick_test_span_gre_dir $tundev ingress
+ mirror_install $swp1 ingress $tundev "matchall"
+ quick_test_span_gre_dir $tundev
mirror_uninstall $swp1 ingress
- log_test "$what: tunnel deleted ($tcflags)"
+ log_test "$what: tunnel deleted"
}
test_span_gre_route_del()
@@ -192,18 +192,18 @@ test_span_gre_route_del()
RET=0
- mirror_install $swp1 ingress $tundev "matchall $tcflags"
- quick_test_span_gre_dir $tundev ingress
+ mirror_install $swp1 ingress $tundev "matchall"
+ quick_test_span_gre_dir $tundev
ip route del $route dev $edev
- fail_test_span_gre_dir $tundev ingress
+ fail_test_span_gre_dir $tundev
ip route add $route dev $edev
- quick_test_span_gre_dir $tundev ingress
+ quick_test_span_gre_dir $tundev
mirror_uninstall $swp1 ingress
- log_test "$what: underlay route removal ($tcflags)"
+ log_test "$what: underlay route removal"
}
test_ttl()
@@ -244,30 +244,11 @@ test_route_del()
test_span_gre_route_del gt6 $swp3 2001:db8:2::/64 "mirror to ip6gretap"
}
-test_all()
-{
- slow_path_trap_install $swp1 ingress
- slow_path_trap_install $swp1 egress
-
- tests_run
-
- slow_path_trap_uninstall $swp1 egress
- slow_path_trap_uninstall $swp1 ingress
-}
-
trap cleanup EXIT
setup_prepare
setup_wait
-tcflags="skip_hw"
-test_all
-
-if ! tc_offload_check; then
- echo "WARN: Could not test offloaded functionality"
-else
- tcflags="skip_sw"
- test_all
-fi
+tests_run
exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_flower.sh b/tools/testing/selftests/net/forwarding/mirror_gre_flower.sh
index 09389f3b9369..3a84f3ab5856 100755
--- a/tools/testing/selftests/net/forwarding/mirror_gre_flower.sh
+++ b/tools/testing/selftests/net/forwarding/mirror_gre_flower.sh
@@ -64,12 +64,19 @@ cleanup()
test_span_gre_dir_acl()
{
- test_span_gre_dir_ips "$@" 192.0.2.3 192.0.2.4
+ local tundev=$1; shift
+ local forward_type=$1; shift
+ local backward_type=$1; shift
+
+ test_span_gre_dir_ips "$tundev" "$forward_type" \
+ "$backward_type" 192.0.2.3 192.0.2.4
}
fail_test_span_gre_dir_acl()
{
- fail_test_span_gre_dir_ips "$@" 192.0.2.3 192.0.2.4
+ local tundev=$1; shift
+
+ fail_test_span_gre_dir_ips "$tundev" 192.0.2.3 192.0.2.4
}
full_test_span_gre_dir_acl()
@@ -84,16 +91,15 @@ full_test_span_gre_dir_acl()
RET=0
mirror_install $swp1 $direction $tundev \
- "protocol ip flower $tcflags dst_ip $match_dip"
- fail_test_span_gre_dir $tundev $direction
- test_span_gre_dir_acl "$tundev" "$direction" \
- "$forward_type" "$backward_type"
+ "protocol ip flower dst_ip $match_dip"
+ fail_test_span_gre_dir $tundev
+ test_span_gre_dir_acl "$tundev" "$forward_type" "$backward_type"
mirror_uninstall $swp1 $direction
# Test lack of mirroring after ACL mirror is uninstalled.
- fail_test_span_gre_dir_acl "$tundev" "$direction"
+ fail_test_span_gre_dir_acl "$tundev"
- log_test "$direction $what ($tcflags)"
+ log_test "$direction $what"
}
test_gretap()
@@ -108,30 +114,11 @@ test_ip6gretap()
full_test_span_gre_dir_acl gt6 egress 0 8 192.0.2.3 "ACL mirror to ip6gretap"
}
-test_all()
-{
- slow_path_trap_install $swp1 ingress
- slow_path_trap_install $swp1 egress
-
- tests_run
-
- slow_path_trap_uninstall $swp1 egress
- slow_path_trap_uninstall $swp1 ingress
-}
-
trap cleanup EXIT
setup_prepare
setup_wait
-tcflags="skip_hw"
-test_all
-
-if ! tc_offload_check; then
- echo "WARN: Could not test offloaded functionality"
-else
- tcflags="skip_sw"
- test_all
-fi
+tests_run
exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_lag_lacp.sh b/tools/testing/selftests/net/forwarding/mirror_gre_lag_lacp.sh
index 9edf4cb104a8..1261e6f46e34 100755
--- a/tools/testing/selftests/net/forwarding/mirror_gre_lag_lacp.sh
+++ b/tools/testing/selftests/net/forwarding/mirror_gre_lag_lacp.sh
@@ -37,8 +37,14 @@
# | \ / |
# | \____________________________________________/ |
# | | |
-# | + lag2 (team) |
-# | 192.0.2.130/28 |
+# | + lag2 (team) ------> + gt4-dst (gretap) |
+# | 192.0.2.130/28 loc=192.0.2.130 |
+# | rem=192.0.2.129 |
+# | ttl=100 |
+# | tos=inherit |
+# | |
+# | |
+# | |
# | |
# +---------------------------------------------------------------------------+
@@ -50,9 +56,6 @@ ALL_TESTS="
NUM_NETIFS=6
source lib.sh
source mirror_lib.sh
-source mirror_gre_lib.sh
-
-require_command $ARPING
vlan_host_create()
{
@@ -122,16 +125,21 @@ h3_create()
{
vrf_create vrf-h3
ip link set dev vrf-h3 up
- tc qdisc add dev $h3 clsact
- tc qdisc add dev $h4 clsact
h3_create_team
+
+ tunnel_create gt4-dst gretap 192.0.2.130 192.0.2.129 \
+ ttl 100 tos inherit
+ ip link set dev gt4-dst master vrf-h3
+ tc qdisc add dev gt4-dst clsact
}
h3_destroy()
{
+ tc qdisc del dev gt4-dst clsact
+ ip link set dev gt4-dst nomaster
+ tunnel_destroy gt4-dst
+
h3_destroy_team
- tc qdisc del dev $h4 clsact
- tc qdisc del dev $h3 clsact
ip link set dev vrf-h3 down
vrf_destroy vrf-h3
}
@@ -188,18 +196,12 @@ setup_prepare()
h2_create
h3_create
switch_create
-
- trap_install $h3 ingress
- trap_install $h4 ingress
}
cleanup()
{
pre_cleanup
- trap_uninstall $h4 ingress
- trap_uninstall $h3 ingress
-
switch_destroy
h3_destroy
h2_destroy
@@ -218,7 +220,8 @@ test_lag_slave()
RET=0
mirror_install $swp1 ingress gt4 \
- "proto 802.1q flower vlan_id 333 $tcflags"
+ "proto 802.1q flower vlan_id 333"
+ vlan_capture_install gt4-dst "vlan_ethtype ipv4 ip_proto icmp type 8"
# Move $down_dev away from the team. That will prompt change in
# txability of the connected device, without changing its upness. The
@@ -226,13 +229,14 @@ test_lag_slave()
# other slave.
ip link set dev $down_dev nomaster
sleep 2
- mirror_test vrf-h1 192.0.2.1 192.0.2.18 $up_dev 1 10
+ mirror_test vrf-h1 192.0.2.1 192.0.2.18 gt4-dst 100 10
# Test lack of connectivity when neither slave is txable.
ip link set dev $up_dev nomaster
sleep 2
- mirror_test vrf-h1 192.0.2.1 192.0.2.18 $h3 1 0
- mirror_test vrf-h1 192.0.2.1 192.0.2.18 $h4 1 0
+ mirror_test vrf-h1 192.0.2.1 192.0.2.18 gt4-dst 100 0
+
+ vlan_capture_uninstall gt4-dst
mirror_uninstall $swp1 ingress
# Recreate H3's team device, because mlxsw, which this test is
@@ -243,7 +247,7 @@ test_lag_slave()
# Wait for ${h,swp}{3,4}.
setup_wait
- log_test "$what ($tcflags)"
+ log_test "$what"
}
test_mirror_gretap_first()
@@ -256,30 +260,11 @@ test_mirror_gretap_second()
test_lag_slave $h4 $h3 "mirror to gretap: LAG second slave"
}
-test_all()
-{
- slow_path_trap_install $swp1 ingress
- slow_path_trap_install $swp1 egress
-
- tests_run
-
- slow_path_trap_uninstall $swp1 egress
- slow_path_trap_uninstall $swp1 ingress
-}
-
trap cleanup EXIT
setup_prepare
setup_wait
-tcflags="skip_hw"
-test_all
-
-if ! tc_offload_check; then
- echo "WARN: Could not test offloaded functionality"
-else
- tcflags="skip_sw"
- test_all
-fi
+tests_run
exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_lib.sh b/tools/testing/selftests/net/forwarding/mirror_gre_lib.sh
index 0c36546e131e..20078cc55f24 100644
--- a/tools/testing/selftests/net/forwarding/mirror_gre_lib.sh
+++ b/tools/testing/selftests/net/forwarding/mirror_gre_lib.sh
@@ -5,22 +5,34 @@ source "$net_forwarding_dir/mirror_lib.sh"
quick_test_span_gre_dir_ips()
{
local tundev=$1; shift
+ local ip1=$1; shift
+ local ip2=$1; shift
+ local forward_type=$1; shift
+ local backward_type=$1; shift
- do_test_span_dir_ips 10 h3-$tundev "$@"
+ do_test_span_dir_ips 10 h3-$tundev "$ip1" "$ip2" \
+ "$forward_type" "$backward_type"
}
fail_test_span_gre_dir_ips()
{
local tundev=$1; shift
+ local ip1=$1; shift
+ local ip2=$1; shift
- do_test_span_dir_ips 0 h3-$tundev "$@"
+ do_test_span_dir_ips 0 h3-$tundev "$ip1" "$ip2"
}
test_span_gre_dir_ips()
{
local tundev=$1; shift
+ local forward_type=$1; shift
+ local backward_type=$1; shift
+ local ip1=$1; shift
+ local ip2=$1; shift
- test_span_dir_ips h3-$tundev "$@"
+ test_span_dir_ips h3-$tundev "$forward_type" \
+ "$backward_type" "$ip1" "$ip2"
}
full_test_span_gre_dir_ips()
@@ -35,12 +47,12 @@ full_test_span_gre_dir_ips()
RET=0
- mirror_install $swp1 $direction $tundev "matchall $tcflags"
- test_span_dir_ips "h3-$tundev" "$direction" "$forward_type" \
+ mirror_install $swp1 $direction $tundev "matchall"
+ test_span_dir_ips "h3-$tundev" "$forward_type" \
"$backward_type" "$ip1" "$ip2"
mirror_uninstall $swp1 $direction
- log_test "$direction $what ($tcflags)"
+ log_test "$direction $what"
}
full_test_span_gre_dir_vlan_ips()
@@ -56,45 +68,63 @@ full_test_span_gre_dir_vlan_ips()
RET=0
- mirror_install $swp1 $direction $tundev "matchall $tcflags"
+ mirror_install $swp1 $direction $tundev "matchall"
- test_span_dir_ips "h3-$tundev" "$direction" "$forward_type" \
+ test_span_dir_ips "h3-$tundev" "$forward_type" \
"$backward_type" "$ip1" "$ip2"
tc filter add dev $h3 ingress pref 77 prot 802.1q \
flower $vlan_match \
action pass
- mirror_test v$h1 $ip1 $ip2 $h3 77 10
+ mirror_test v$h1 $ip1 $ip2 $h3 77 '>= 10'
tc filter del dev $h3 ingress pref 77
mirror_uninstall $swp1 $direction
- log_test "$direction $what ($tcflags)"
+ log_test "$direction $what"
}
quick_test_span_gre_dir()
{
- quick_test_span_gre_dir_ips "$@" 192.0.2.1 192.0.2.2
+ local tundev=$1; shift
+ local forward_type=${1-8}; shift
+ local backward_type=${1-0}; shift
+
+ quick_test_span_gre_dir_ips "$tundev" 192.0.2.1 192.0.2.2 \
+ "$forward_type" "$backward_type"
}
fail_test_span_gre_dir()
{
- fail_test_span_gre_dir_ips "$@" 192.0.2.1 192.0.2.2
-}
+ local tundev=$1; shift
-test_span_gre_dir()
-{
- test_span_gre_dir_ips "$@" 192.0.2.1 192.0.2.2
+ fail_test_span_gre_dir_ips "$tundev" 192.0.2.1 192.0.2.2
}
full_test_span_gre_dir()
{
- full_test_span_gre_dir_ips "$@" 192.0.2.1 192.0.2.2
+ local tundev=$1; shift
+ local direction=$1; shift
+ local forward_type=$1; shift
+ local backward_type=$1; shift
+ local what=$1; shift
+
+ full_test_span_gre_dir_ips "$tundev" "$direction" "$forward_type" \
+ "$backward_type" "$what" 192.0.2.1 192.0.2.2
}
full_test_span_gre_dir_vlan()
{
- full_test_span_gre_dir_vlan_ips "$@" 192.0.2.1 192.0.2.2
+ local tundev=$1; shift
+ local direction=$1; shift
+ local vlan_match=$1; shift
+ local forward_type=$1; shift
+ local backward_type=$1; shift
+ local what=$1; shift
+
+ full_test_span_gre_dir_vlan_ips "$tundev" "$direction" "$vlan_match" \
+ "$forward_type" "$backward_type" \
+ "$what" 192.0.2.1 192.0.2.2
}
full_test_span_gre_stp_ips()
@@ -104,27 +134,39 @@ full_test_span_gre_stp_ips()
local what=$1; shift
local ip1=$1; shift
local ip2=$1; shift
+ local forward_type=$1; shift
+ local backward_type=$1; shift
local h3mac=$(mac_get $h3)
RET=0
- mirror_install $swp1 ingress $tundev "matchall $tcflags"
- quick_test_span_gre_dir_ips $tundev ingress $ip1 $ip2
+ mirror_install $swp1 ingress $tundev "matchall"
+ quick_test_span_gre_dir_ips $tundev $ip1 $ip2 \
+ "$forward_type" "$backward_type"
bridge link set dev $nbpdev state disabled
sleep 1
- fail_test_span_gre_dir_ips $tundev ingress $ip1 $ip2
+ fail_test_span_gre_dir_ips $tundev $ip1 $ip2
bridge link set dev $nbpdev state forwarding
sleep 1
- quick_test_span_gre_dir_ips $tundev ingress $ip1 $ip2
+ quick_test_span_gre_dir_ips $tundev $ip1 $ip2 \
+ "$forward_type" "$backward_type"
mirror_uninstall $swp1 ingress
- log_test "$what: STP state ($tcflags)"
+ log_test "$what: STP state"
}
full_test_span_gre_stp()
{
- full_test_span_gre_stp_ips "$@" 192.0.2.1 192.0.2.2
+ local tundev=$1; shift
+ local nbpdev=$1; shift
+ local what=$1; shift
+ local forward_type=${1-8}; shift
+ local backward_type=${1-0}; shift
+
+ full_test_span_gre_stp_ips "$tundev" "$nbpdev" "$what" \
+ 192.0.2.1 192.0.2.2 \
+ "$forward_type" "$backward_type"
}
diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_neigh.sh b/tools/testing/selftests/net/forwarding/mirror_gre_neigh.sh
index fc0508e40fca..2cbfbecf25c8 100755
--- a/tools/testing/selftests/net/forwarding/mirror_gre_neigh.sh
+++ b/tools/testing/selftests/net/forwarding/mirror_gre_neigh.sh
@@ -60,41 +60,32 @@ test_span_gre_neigh()
local addr=$1; shift
local tundev=$1; shift
local direction=$1; shift
+ local forward_type=$1; shift
+ local backward_type=$1; shift
local what=$1; shift
RET=0
ip neigh replace dev $swp3 $addr lladdr 00:11:22:33:44:55
- mirror_install $swp1 $direction $tundev "matchall $tcflags"
- fail_test_span_gre_dir $tundev ingress
+ mirror_install $swp1 $direction $tundev "matchall"
+ fail_test_span_gre_dir $tundev "$forward_type" "$backward_type"
ip neigh del dev $swp3 $addr
- quick_test_span_gre_dir $tundev ingress
+ quick_test_span_gre_dir $tundev "$forward_type" "$backward_type"
mirror_uninstall $swp1 $direction
- log_test "$direction $what: neighbor change ($tcflags)"
+ log_test "$direction $what: neighbor change"
}
test_gretap()
{
- test_span_gre_neigh 192.0.2.130 gt4 ingress "mirror to gretap"
- test_span_gre_neigh 192.0.2.130 gt4 egress "mirror to gretap"
+ test_span_gre_neigh 192.0.2.130 gt4 ingress 8 0 "mirror to gretap"
+ test_span_gre_neigh 192.0.2.130 gt4 egress 0 8 "mirror to gretap"
}
test_ip6gretap()
{
- test_span_gre_neigh 2001:db8:2::2 gt6 ingress "mirror to ip6gretap"
- test_span_gre_neigh 2001:db8:2::2 gt6 egress "mirror to ip6gretap"
-}
-
-test_all()
-{
- slow_path_trap_install $swp1 ingress
- slow_path_trap_install $swp1 egress
-
- tests_run
-
- slow_path_trap_uninstall $swp1 egress
- slow_path_trap_uninstall $swp1 ingress
+ test_span_gre_neigh 2001:db8:2::2 gt6 ingress 8 0 "mirror to ip6gretap"
+ test_span_gre_neigh 2001:db8:2::2 gt6 egress 0 8 "mirror to ip6gretap"
}
trap cleanup EXIT
@@ -102,14 +93,6 @@ trap cleanup EXIT
setup_prepare
setup_wait
-tcflags="skip_hw"
-test_all
-
-if ! tc_offload_check; then
- echo "WARN: Could not test offloaded functionality"
-else
- tcflags="skip_sw"
- test_all
-fi
+tests_run
exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_nh.sh b/tools/testing/selftests/net/forwarding/mirror_gre_nh.sh
index 6f9ef1820e93..34bc646938e3 100755
--- a/tools/testing/selftests/net/forwarding/mirror_gre_nh.sh
+++ b/tools/testing/selftests/net/forwarding/mirror_gre_nh.sh
@@ -75,42 +75,31 @@ cleanup()
test_gretap()
{
RET=0
- mirror_install $swp1 ingress gt4 "matchall $tcflags"
+ mirror_install $swp1 ingress gt4 "matchall"
# For IPv4, test that there's no mirroring without the route directing
# the traffic to tunnel remote address. Then add it and test that
# mirroring starts. For IPv6 we can't test this due to the limitation
# that routes for locally-specified IPv6 addresses can't be added.
- fail_test_span_gre_dir gt4 ingress
+ fail_test_span_gre_dir gt4
ip route add 192.0.2.130/32 via 192.0.2.162
- quick_test_span_gre_dir gt4 ingress
+ quick_test_span_gre_dir gt4
ip route del 192.0.2.130/32 via 192.0.2.162
mirror_uninstall $swp1 ingress
- log_test "mirror to gre with next-hop remote ($tcflags)"
+ log_test "mirror to gre with next-hop remote"
}
test_ip6gretap()
{
RET=0
- mirror_install $swp1 ingress gt6 "matchall $tcflags"
- quick_test_span_gre_dir gt6 ingress
+ mirror_install $swp1 ingress gt6 "matchall"
+ quick_test_span_gre_dir gt6
mirror_uninstall $swp1 ingress
- log_test "mirror to ip6gre with next-hop remote ($tcflags)"
-}
-
-test_all()
-{
- slow_path_trap_install $swp1 ingress
- slow_path_trap_install $swp1 egress
-
- tests_run
-
- slow_path_trap_uninstall $swp1 egress
- slow_path_trap_uninstall $swp1 ingress
+ log_test "mirror to ip6gre with next-hop remote"
}
trap cleanup EXIT
@@ -118,14 +107,6 @@ trap cleanup EXIT
setup_prepare
setup_wait
-tcflags="skip_hw"
-test_all
-
-if ! tc_offload_check; then
- echo "WARN: Could not test offloaded functionality"
-else
- tcflags="skip_sw"
- test_all
-fi
+tests_run
exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_vlan.sh b/tools/testing/selftests/net/forwarding/mirror_gre_vlan.sh
index 88cecdb9a861..63689928cb51 100755
--- a/tools/testing/selftests/net/forwarding/mirror_gre_vlan.sh
+++ b/tools/testing/selftests/net/forwarding/mirror_gre_vlan.sh
@@ -63,30 +63,11 @@ test_gretap()
full_test_span_gre_dir gt4 egress 0 8 "mirror to gretap"
}
-test_all()
-{
- slow_path_trap_install $swp1 ingress
- slow_path_trap_install $swp1 egress
-
- tests_run
-
- slow_path_trap_uninstall $swp1 egress
- slow_path_trap_uninstall $swp1 ingress
-}
-
trap cleanup EXIT
setup_prepare
setup_wait
-tcflags="skip_hw"
-test_all
-
-if ! tc_offload_check; then
- echo "WARN: Could not test offloaded functionality"
-else
- tcflags="skip_sw"
- test_all
-fi
+tests_run
exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh b/tools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh
index c8a9b5bd841f..1b902cc579f6 100755
--- a/tools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh
+++ b/tools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh
@@ -153,21 +153,21 @@ test_span_gre_forbidden_cpu()
RET=0
# Run the pass-test first, to prime neighbor table.
- mirror_install $swp1 ingress $tundev "matchall $tcflags"
- quick_test_span_gre_dir $tundev ingress
+ mirror_install $swp1 ingress $tundev "matchall"
+ quick_test_span_gre_dir $tundev
# Now forbid the VLAN at the bridge and see it fail.
bridge vlan del dev br1 vid 555 self
sleep 1
- fail_test_span_gre_dir $tundev ingress
+ fail_test_span_gre_dir $tundev
bridge vlan add dev br1 vid 555 self
sleep 1
- quick_test_span_gre_dir $tundev ingress
+ quick_test_span_gre_dir $tundev
mirror_uninstall $swp1 ingress
- log_test "$what: vlan forbidden at a bridge ($tcflags)"
+ log_test "$what: vlan forbidden at a bridge"
}
test_gretap_forbidden_cpu()
@@ -187,22 +187,22 @@ test_span_gre_forbidden_egress()
RET=0
- mirror_install $swp1 ingress $tundev "matchall $tcflags"
- quick_test_span_gre_dir $tundev ingress
+ mirror_install $swp1 ingress $tundev "matchall"
+ quick_test_span_gre_dir $tundev
bridge vlan del dev $swp3 vid 555
sleep 1
- fail_test_span_gre_dir $tundev ingress
+ fail_test_span_gre_dir $tundev
bridge vlan add dev $swp3 vid 555
# Re-prime FDB
$ARPING -I br1.555 192.0.2.130 -fqc 1
sleep 1
- quick_test_span_gre_dir $tundev ingress
+ quick_test_span_gre_dir $tundev
mirror_uninstall $swp1 ingress
- log_test "$what: vlan forbidden at a bridge egress ($tcflags)"
+ log_test "$what: vlan forbidden at a bridge egress"
}
test_gretap_forbidden_egress()
@@ -223,30 +223,30 @@ test_span_gre_untagged_egress()
RET=0
- mirror_install $swp1 ingress $tundev "matchall $tcflags"
+ mirror_install $swp1 ingress $tundev "matchall"
- quick_test_span_gre_dir $tundev ingress
- quick_test_span_vlan_dir $h3 555 ingress "$ul_proto"
+ quick_test_span_gre_dir $tundev
+ quick_test_span_vlan_dir $h3 555 "$ul_proto"
h3_addr_add_del del $h3.555
bridge vlan add dev $swp3 vid 555 pvid untagged
h3_addr_add_del add $h3
sleep 5
- quick_test_span_gre_dir $tundev ingress
- fail_test_span_vlan_dir $h3 555 ingress "$ul_proto"
+ quick_test_span_gre_dir $tundev
+ fail_test_span_vlan_dir $h3 555 "$ul_proto"
h3_addr_add_del del $h3
bridge vlan add dev $swp3 vid 555
h3_addr_add_del add $h3.555
sleep 5
- quick_test_span_gre_dir $tundev ingress
- quick_test_span_vlan_dir $h3 555 ingress "$ul_proto"
+ quick_test_span_gre_dir $tundev
+ quick_test_span_vlan_dir $h3 555 "$ul_proto"
mirror_uninstall $swp1 ingress
- log_test "$what: vlan untagged at a bridge egress ($tcflags)"
+ log_test "$what: vlan untagged at a bridge egress"
}
test_gretap_untagged_egress()
@@ -267,19 +267,19 @@ test_span_gre_fdb_roaming()
RET=0
- mirror_install $swp1 ingress $tundev "matchall $tcflags"
- quick_test_span_gre_dir $tundev ingress
+ mirror_install $swp1 ingress $tundev "matchall"
+ quick_test_span_gre_dir $tundev
while ((RET == 0)); do
bridge fdb del dev $swp3 $h3mac vlan 555 master 2>/dev/null
bridge fdb add dev $swp2 $h3mac vlan 555 master static
sleep 1
- fail_test_span_gre_dir $tundev ingress
+ fail_test_span_gre_dir $tundev
if ! bridge fdb sh dev $swp2 vlan 555 master \
| grep -q $h3mac; then
printf "TEST: %-60s [RETRY]\n" \
- "$what: MAC roaming ($tcflags)"
+ "$what: MAC roaming"
# ARP or ND probably reprimed the FDB while the test
# was running. We would get a spurious failure.
RET=0
@@ -292,11 +292,11 @@ test_span_gre_fdb_roaming()
# Re-prime FDB
$ARPING -I br1.555 192.0.2.130 -fqc 1
sleep 1
- quick_test_span_gre_dir $tundev ingress
+ quick_test_span_gre_dir $tundev
mirror_uninstall $swp1 ingress
- log_test "$what: MAC roaming ($tcflags)"
+ log_test "$what: MAC roaming"
}
test_gretap_fdb_roaming()
@@ -319,30 +319,11 @@ test_ip6gretap_stp()
full_test_span_gre_stp gt6 $swp3 "mirror to ip6gretap"
}
-test_all()
-{
- slow_path_trap_install $swp1 ingress
- slow_path_trap_install $swp1 egress
-
- tests_run
-
- slow_path_trap_uninstall $swp1 egress
- slow_path_trap_uninstall $swp1 ingress
-}
-
trap cleanup EXIT
setup_prepare
setup_wait
-tcflags="skip_hw"
-test_all
-
-if ! tc_offload_check; then
- echo "WARN: Could not test offloaded functionality"
-else
- tcflags="skip_sw"
- test_all
-fi
+tests_run
exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/mirror_lib.sh b/tools/testing/selftests/net/forwarding/mirror_lib.sh
index 3e8ebeff3019..6bf9d5ae933c 100644
--- a/tools/testing/selftests/net/forwarding/mirror_lib.sh
+++ b/tools/testing/selftests/net/forwarding/mirror_lib.sh
@@ -44,14 +44,17 @@ mirror_test()
local type="icmp echoreq"
fi
+ if [[ -z ${expect//[[:digit:]]/} ]]; then
+ expect="== $expect"
+ fi
+
local t0=$(tc_rule_stats_get $dev $pref)
$MZ $proto $vrf_name ${sip:+-A $sip} -B $dip -a own -b bc -q \
-c 10 -d 100msec -t $type
sleep 0.5
local t1=$(tc_rule_stats_get $dev $pref)
local delta=$((t1 - t0))
- # Tolerate a couple stray extra packets.
- ((expect <= delta && delta <= expect + 2))
+ ((delta $expect))
check_err $? "Expected to capture $expect packets, got $delta."
}
@@ -59,36 +62,42 @@ do_test_span_dir_ips()
{
local expect=$1; shift
local dev=$1; shift
- local direction=$1; shift
local ip1=$1; shift
local ip2=$1; shift
+ local forward_type=${1-8}; shift
+ local backward_type=${1-0}; shift
- icmp_capture_install $dev
+ icmp_capture_install $dev "type $forward_type"
mirror_test v$h1 $ip1 $ip2 $dev 100 $expect
+ icmp_capture_uninstall $dev
+
+ icmp_capture_install $dev "type $backward_type"
mirror_test v$h2 $ip2 $ip1 $dev 100 $expect
icmp_capture_uninstall $dev
}
quick_test_span_dir_ips()
{
- do_test_span_dir_ips 10 "$@"
-}
+ local dev=$1; shift
+ local ip1=$1; shift
+ local ip2=$1; shift
+ local forward_type=${1-8}; shift
+ local backward_type=${1-0}; shift
-fail_test_span_dir_ips()
-{
- do_test_span_dir_ips 0 "$@"
+ do_test_span_dir_ips 10 "$dev" "$ip1" "$ip2" \
+ "$forward_type" "$backward_type"
}
test_span_dir_ips()
{
local dev=$1; shift
- local direction=$1; shift
local forward_type=$1; shift
local backward_type=$1; shift
local ip1=$1; shift
local ip2=$1; shift
- quick_test_span_dir_ips "$dev" "$direction" "$ip1" "$ip2"
+ quick_test_span_dir_ips "$dev" "$ip1" "$ip2" \
+ "$forward_type" "$backward_type"
icmp_capture_install $dev "type $forward_type"
mirror_test v$h1 $ip1 $ip2 $dev 100 10
@@ -99,14 +108,14 @@ test_span_dir_ips()
icmp_capture_uninstall $dev
}
-fail_test_span_dir()
-{
- fail_test_span_dir_ips "$@" 192.0.2.1 192.0.2.2
-}
-
test_span_dir()
{
- test_span_dir_ips "$@" 192.0.2.1 192.0.2.2
+ local dev=$1; shift
+ local forward_type=$1; shift
+ local backward_type=$1; shift
+
+ test_span_dir_ips "$dev" "$forward_type" "$backward_type" \
+ 192.0.2.1 192.0.2.2
}
do_test_span_vlan_dir_ips()
@@ -114,7 +123,6 @@ do_test_span_vlan_dir_ips()
local expect=$1; shift
local dev=$1; shift
local vid=$1; shift
- local direction=$1; shift
local ul_proto=$1; shift
local ip1=$1; shift
local ip2=$1; shift
@@ -123,27 +131,50 @@ do_test_span_vlan_dir_ips()
# The traffic is meant for local box anyway, so will be trapped to
# kernel.
vlan_capture_install $dev "skip_hw vlan_id $vid vlan_ethtype $ul_proto"
- mirror_test v$h1 $ip1 $ip2 $dev 100 $expect
- mirror_test v$h2 $ip2 $ip1 $dev 100 $expect
+ mirror_test v$h1 $ip1 $ip2 $dev 100 "$expect"
+ mirror_test v$h2 $ip2 $ip1 $dev 100 "$expect"
vlan_capture_uninstall $dev
}
quick_test_span_vlan_dir_ips()
{
- do_test_span_vlan_dir_ips 10 "$@"
+ local dev=$1; shift
+ local vid=$1; shift
+ local ul_proto=$1; shift
+ local ip1=$1; shift
+ local ip2=$1; shift
+
+ do_test_span_vlan_dir_ips '>= 10' "$dev" "$vid" "$ul_proto" \
+ "$ip1" "$ip2"
}
fail_test_span_vlan_dir_ips()
{
- do_test_span_vlan_dir_ips 0 "$@"
+ local dev=$1; shift
+ local vid=$1; shift
+ local ul_proto=$1; shift
+ local ip1=$1; shift
+ local ip2=$1; shift
+
+ do_test_span_vlan_dir_ips 0 "$dev" "$vid" "$ul_proto" "$ip1" "$ip2"
}
quick_test_span_vlan_dir()
{
- quick_test_span_vlan_dir_ips "$@" 192.0.2.1 192.0.2.2
+ local dev=$1; shift
+ local vid=$1; shift
+ local ul_proto=$1; shift
+
+ quick_test_span_vlan_dir_ips "$dev" "$vid" "$ul_proto" \
+ 192.0.2.1 192.0.2.2
}
fail_test_span_vlan_dir()
{
- fail_test_span_vlan_dir_ips "$@" 192.0.2.1 192.0.2.2
+ local dev=$1; shift
+ local vid=$1; shift
+ local ul_proto=$1; shift
+
+ fail_test_span_vlan_dir_ips "$dev" "$vid" "$ul_proto" \
+ 192.0.2.1 192.0.2.2
}
diff --git a/tools/testing/selftests/net/forwarding/mirror_vlan.sh b/tools/testing/selftests/net/forwarding/mirror_vlan.sh
index 0b44e148235e..2f150a414d38 100755
--- a/tools/testing/selftests/net/forwarding/mirror_vlan.sh
+++ b/tools/testing/selftests/net/forwarding/mirror_vlan.sh
@@ -40,12 +40,16 @@ setup_prepare()
vlan_create $h2 111 v$h2 192.0.2.18/28
bridge vlan add dev $swp2 vid 111
+
+ trap_install $h3 ingress
}
cleanup()
{
pre_cleanup
+ trap_uninstall $h3 ingress
+
vlan_destroy $h2 111
vlan_destroy $h1 111
vlan_destroy $h3 555
@@ -63,11 +67,11 @@ test_vlan_dir()
RET=0
- mirror_install $swp1 $direction $swp3.555 "matchall $tcflags"
- test_span_dir "$h3.555" "$direction" "$forward_type" "$backward_type"
+ mirror_install $swp1 $direction $swp3.555 "matchall"
+ test_span_dir "$h3.555" "$forward_type" "$backward_type"
mirror_uninstall $swp1 $direction
- log_test "$direction mirror to vlan ($tcflags)"
+ log_test "$direction mirror to vlan"
}
test_vlan()
@@ -84,14 +88,12 @@ test_tagged_vlan_dir()
RET=0
- mirror_install $swp1 $direction $swp3.555 "matchall $tcflags"
- do_test_span_vlan_dir_ips 10 "$h3.555" 111 "$direction" ip \
- 192.0.2.17 192.0.2.18
- do_test_span_vlan_dir_ips 0 "$h3.555" 555 "$direction" ip \
- 192.0.2.17 192.0.2.18
+ mirror_install $swp1 $direction $swp3.555 "matchall"
+ do_test_span_vlan_dir_ips '>= 10' "$h3.555" 111 ip 192.0.2.17 192.0.2.18
+ do_test_span_vlan_dir_ips 0 "$h3.555" 555 ip 192.0.2.17 192.0.2.18
mirror_uninstall $swp1 $direction
- log_test "$direction mirror tagged to vlan ($tcflags)"
+ log_test "$direction mirror tagged to vlan"
}
test_tagged_vlan()
@@ -100,32 +102,11 @@ test_tagged_vlan()
test_tagged_vlan_dir egress 0 8
}
-test_all()
-{
- slow_path_trap_install $swp1 ingress
- slow_path_trap_install $swp1 egress
- trap_install $h3 ingress
-
- tests_run
-
- trap_uninstall $h3 ingress
- slow_path_trap_uninstall $swp1 egress
- slow_path_trap_uninstall $swp1 ingress
-}
-
trap cleanup EXIT
setup_prepare
setup_wait
-tcflags="skip_hw"
-test_all
-
-if ! tc_offload_check; then
- echo "WARN: Could not test offloaded functionality"
-else
- tcflags="skip_sw"
- test_all
-fi
+tests_run
exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/router_mpath_seed.sh b/tools/testing/selftests/net/forwarding/router_mpath_seed.sh
new file mode 100755
index 000000000000..314cb906c1eb
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/router_mpath_seed.sh
@@ -0,0 +1,333 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# +-------------------------+ +-------------------------+
+# | H1 | | H2 |
+# | $h1 + | | + $h2 |
+# | 192.0.2.1/28 | | | | 192.0.2.34/28 |
+# | 2001:db8:1::1/64 | | | | 2001:db8:3::2/64 |
+# +-------------------|-----+ +-|-----------------------+
+# | |
+# +-------------------|-----+ +-|-----------------------+
+# | R1 | | | | R2 |
+# | $rp11 + | | + $rp21 |
+# | 192.0.2.2/28 | | 192.0.2.33/28 |
+# | 2001:db8:1::2/64 | | 2001:db8:3::1/64 |
+# | | | |
+# | $rp12 + | | + $rp22 |
+# | 192.0.2.17/28 | | | | 192.0.2.18..27/28 |
+# | 2001:db8:2::17/64 | | | | 2001:db8:2::18..27/64 |
+# +-------------------|-----+ +-|-----------------------+
+# | |
+# `----------'
+
+ALL_TESTS="
+ ping_ipv4
+ ping_ipv6
+ test_mpath_seed_stability_ipv4
+ test_mpath_seed_stability_ipv6
+ test_mpath_seed_get
+ test_mpath_seed_ipv4
+ test_mpath_seed_ipv6
+"
+NUM_NETIFS=6
+source lib.sh
+
+h1_create()
+{
+ simple_if_init $h1 192.0.2.1/28 2001:db8:1::1/64
+ ip -4 route add 192.0.2.32/28 vrf v$h1 nexthop via 192.0.2.2
+ ip -6 route add 2001:db8:3::/64 vrf v$h1 nexthop via 2001:db8:1::2
+}
+
+h1_destroy()
+{
+ ip -6 route del 2001:db8:3::/64 vrf v$h1 nexthop via 2001:db8:1::2
+ ip -4 route del 192.0.2.32/28 vrf v$h1 nexthop via 192.0.2.2
+ simple_if_fini $h1 192.0.2.1/28 2001:db8:1::1/64
+}
+
+h2_create()
+{
+ simple_if_init $h2 192.0.2.34/28 2001:db8:3::2/64
+ ip -4 route add 192.0.2.0/28 vrf v$h2 nexthop via 192.0.2.33
+ ip -6 route add 2001:db8:1::/64 vrf v$h2 nexthop via 2001:db8:3::1
+}
+
+h2_destroy()
+{
+ ip -6 route del 2001:db8:1::/64 vrf v$h2 nexthop via 2001:db8:3::1
+ ip -4 route del 192.0.2.0/28 vrf v$h2 nexthop via 192.0.2.33
+ simple_if_fini $h2 192.0.2.34/28 2001:db8:3::2/64
+}
+
+router1_create()
+{
+ simple_if_init $rp11 192.0.2.2/28 2001:db8:1::2/64
+ __simple_if_init $rp12 v$rp11 192.0.2.17/28 2001:db8:2::17/64
+}
+
+router1_destroy()
+{
+ __simple_if_fini $rp12 192.0.2.17/28 2001:db8:2::17/64
+ simple_if_fini $rp11 192.0.2.2/28 2001:db8:1::2/64
+}
+
+router2_create()
+{
+ simple_if_init $rp21 192.0.2.33/28 2001:db8:3::1/64
+ __simple_if_init $rp22 v$rp21 192.0.2.18/28 2001:db8:2::18/64
+ ip -4 route add 192.0.2.0/28 vrf v$rp21 nexthop via 192.0.2.17
+ ip -6 route add 2001:db8:1::/64 vrf v$rp21 nexthop via 2001:db8:2::17
+}
+
+router2_destroy()
+{
+ ip -6 route del 2001:db8:1::/64 vrf v$rp21 nexthop via 2001:db8:2::17
+ ip -4 route del 192.0.2.0/28 vrf v$rp21 nexthop via 192.0.2.17
+ __simple_if_fini $rp22 192.0.2.18/28 2001:db8:2::18/64
+ simple_if_fini $rp21 192.0.2.33/28 2001:db8:3::1/64
+}
+
+nexthops_create()
+{
+ local i
+ for i in $(seq 10); do
+ ip nexthop add id $((1000 + i)) via 192.0.2.18 dev $rp12
+ ip nexthop add id $((2000 + i)) via 2001:db8:2::18 dev $rp12
+ done
+
+ ip nexthop add id 1000 group $(seq -s / 1001 1010) hw_stats on
+ ip nexthop add id 2000 group $(seq -s / 2001 2010) hw_stats on
+ ip -4 route add 192.0.2.32/28 vrf v$rp11 nhid 1000
+ ip -6 route add 2001:db8:3::/64 vrf v$rp11 nhid 2000
+}
+
+nexthops_destroy()
+{
+ local i
+
+ ip -6 route del 2001:db8:3::/64 vrf v$rp11 nhid 2000
+ ip -4 route del 192.0.2.32/28 vrf v$rp11 nhid 1000
+ ip nexthop del id 2000
+ ip nexthop del id 1000
+
+ for i in $(seq 10 -1 1); do
+ ip nexthop del id $((2000 + i))
+ ip nexthop del id $((1000 + i))
+ done
+}
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ rp11=${NETIFS[p2]}
+
+ rp12=${NETIFS[p3]}
+ rp22=${NETIFS[p4]}
+
+ rp21=${NETIFS[p5]}
+ h2=${NETIFS[p6]}
+
+ sysctl_save net.ipv4.fib_multipath_hash_seed
+
+ vrf_prepare
+
+ h1_create
+ h2_create
+ router1_create
+ router2_create
+
+ forwarding_enable
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ forwarding_restore
+
+ nexthops_destroy
+ router2_destroy
+ router1_destroy
+ h2_destroy
+ h1_destroy
+
+ vrf_cleanup
+
+ sysctl_restore net.ipv4.fib_multipath_hash_seed
+}
+
+ping_ipv4()
+{
+ ping_test $h1 192.0.2.34
+}
+
+ping_ipv6()
+{
+ ping6_test $h1 2001:db8:3::2
+}
+
+test_mpath_seed_get()
+{
+ RET=0
+
+ local i
+ for ((i = 0; i < 100; i++)); do
+ local seed_w=$((999331 * i))
+ sysctl -qw net.ipv4.fib_multipath_hash_seed=$seed_w
+ local seed_r=$(sysctl -n net.ipv4.fib_multipath_hash_seed)
+ ((seed_r == seed_w))
+ check_err $? "mpath seed written as $seed_w, but read as $seed_r"
+ done
+
+ log_test "mpath seed set/get"
+}
+
+nh_stats_snapshot()
+{
+ local group_id=$1; shift
+
+ ip -j -s -s nexthop show id $group_id |
+ jq -c '[.[].group_stats | sort_by(.id) | .[].packets]'
+}
+
+get_active_nh()
+{
+ local s0=$1; shift
+ local s1=$1; shift
+
+ jq -n --argjson s0 "$s0" --argjson s1 "$s1" -f /dev/stdin <<-"EOF"
+ [range($s0 | length)] |
+ map($s1[.] - $s0[.]) |
+ map(if . > 8 then 1 else 0 end) |
+ index(1)
+ EOF
+}
+
+probe_nh()
+{
+ local group_id=$1; shift
+ local -a mz=("$@")
+
+ local s0=$(nh_stats_snapshot $group_id)
+ "${mz[@]}"
+ local s1=$(nh_stats_snapshot $group_id)
+
+ get_active_nh "$s0" "$s1"
+}
+
+probe_seed()
+{
+ local group_id=$1; shift
+ local seed=$1; shift
+ local -a mz=("$@")
+
+ sysctl -qw net.ipv4.fib_multipath_hash_seed=$seed
+ probe_nh "$group_id" "${mz[@]}"
+}
+
+test_mpath_seed()
+{
+ local group_id=$1; shift
+ local what=$1; shift
+ local -a mz=("$@")
+ local ii
+
+ RET=0
+
+ local -a tally=(0 0 0 0 0 0 0 0 0 0)
+ for ((ii = 0; ii < 100; ii++)); do
+ local act=$(probe_seed $group_id $((999331 * ii)) "${mz[@]}")
+ ((tally[act]++))
+ done
+
+ local tally_str="${tally[@]}"
+ for ((ii = 0; ii < ${#tally[@]}; ii++)); do
+ ((tally[ii] > 0))
+ check_err $? "NH #$ii not hit, tally='$tally_str'"
+ done
+
+ log_test "mpath seed $what"
+ sysctl -qw net.ipv4.fib_multipath_hash_seed=0
+}
+
+test_mpath_seed_ipv4()
+{
+ test_mpath_seed 1000 IPv4 \
+ $MZ $h1 -A 192.0.2.1 -B 192.0.2.34 -q \
+ -p 64 -d 0 -c 10 -t udp
+}
+
+test_mpath_seed_ipv6()
+{
+ test_mpath_seed 2000 IPv6 \
+ $MZ -6 $h1 -A 2001:db8:1::1 -B 2001:db8:3::2 -q \
+ -p 64 -d 0 -c 10 -t udp
+}
+
+check_mpath_seed_stability()
+{
+ local seed=$1; shift
+ local act_0=$1; shift
+ local act_1=$1; shift
+
+ ((act_0 == act_1))
+ check_err $? "seed $seed: active NH moved from $act_0 to $act_1 after seed change"
+}
+
+test_mpath_seed_stability()
+{
+ local group_id=$1; shift
+ local what=$1; shift
+ local -a mz=("$@")
+
+ RET=0
+
+ local seed_0=0
+ local seed_1=3221338814
+ local seed_2=3735928559
+
+ # Initial active NH before touching the seed at all.
+ local act_ini=$(probe_nh $group_id "${mz[@]}")
+
+ local act_0_0=$(probe_seed $group_id $seed_0 "${mz[@]}")
+ local act_1_0=$(probe_seed $group_id $seed_1 "${mz[@]}")
+ local act_2_0=$(probe_seed $group_id $seed_2 "${mz[@]}")
+
+ local act_0_1=$(probe_seed $group_id $seed_0 "${mz[@]}")
+ local act_1_1=$(probe_seed $group_id $seed_1 "${mz[@]}")
+ local act_2_1=$(probe_seed $group_id $seed_2 "${mz[@]}")
+
+ check_mpath_seed_stability initial $act_ini $act_0_0
+ check_mpath_seed_stability $seed_0 $act_0_0 $act_0_1
+ check_mpath_seed_stability $seed_1 $act_1_0 $act_1_1
+ check_mpath_seed_stability $seed_2 $act_2_0 $act_2_1
+
+ log_test "mpath seed stability $what"
+ sysctl -qw net.ipv4.fib_multipath_hash_seed=0
+}
+
+test_mpath_seed_stability_ipv4()
+{
+ test_mpath_seed_stability 1000 IPv4 \
+ $MZ $h1 -A 192.0.2.1 -B 192.0.2.34 -q \
+ -p 64 -d 0 -c 10 -t udp
+}
+
+test_mpath_seed_stability_ipv6()
+{
+ test_mpath_seed_stability 2000 IPv6 \
+ $MZ -6 $h1 -A 2001:db8:1::1 -B 2001:db8:3::2 -q \
+ -p 64 -d 0 -c 10 -t udp
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+nexthops_create
+
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh b/tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh
index 6f0a2e452ba1..3f9d50f1ef9e 100755
--- a/tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh
+++ b/tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh
@@ -680,9 +680,9 @@ test_learning()
local mac=de:ad:be:ef:13:37
local dst=192.0.2.100
- # Enable learning on the VxLAN device and set ageing time to 10 seconds
- ip link set dev br1 type bridge ageing_time 1000
- ip link set dev vx1 type vxlan ageing 10
+ # Enable learning on the VxLAN device and set ageing time to 30 seconds
+ ip link set dev br1 type bridge ageing_time 3000
+ ip link set dev vx1 type vxlan ageing 30
ip link set dev vx1 type vxlan learning
reapply_config
@@ -740,7 +740,7 @@ test_learning()
vxlan_flood_test $mac $dst 0 10 0
- sleep 20
+ sleep 60
bridge fdb show brport vx1 | grep $mac | grep -q self
check_fail $?
diff --git a/tools/testing/selftests/net/hsr/config b/tools/testing/selftests/net/hsr/config
index 22061204fb69..241542441c51 100644
--- a/tools/testing/selftests/net/hsr/config
+++ b/tools/testing/selftests/net/hsr/config
@@ -2,3 +2,4 @@ CONFIG_IPV6=y
CONFIG_NET_SCH_NETEM=m
CONFIG_HSR=y
CONFIG_VETH=y
+CONFIG_BRIDGE=y
diff --git a/tools/testing/selftests/net/hsr/hsr_ping.sh b/tools/testing/selftests/net/hsr/hsr_ping.sh
index 790294c8af83..f5d207fc770a 100755
--- a/tools/testing/selftests/net/hsr/hsr_ping.sh
+++ b/tools/testing/selftests/net/hsr/hsr_ping.sh
@@ -152,6 +152,15 @@ setup_hsr_interfaces()
ip -net "$ns3" addr add 100.64.0.3/24 dev hsr3
ip -net "$ns3" addr add dead:beef:1::3/64 dev hsr3 nodad
+ ip -net "$ns1" link set address 00:11:22:00:01:01 dev ns1eth1
+ ip -net "$ns1" link set address 00:11:22:00:01:02 dev ns1eth2
+
+ ip -net "$ns2" link set address 00:11:22:00:02:01 dev ns2eth1
+ ip -net "$ns2" link set address 00:11:22:00:02:02 dev ns2eth2
+
+ ip -net "$ns3" link set address 00:11:22:00:03:01 dev ns3eth1
+ ip -net "$ns3" link set address 00:11:22:00:03:02 dev ns3eth2
+
# All Links up
ip -net "$ns1" link set ns1eth1 up
ip -net "$ns1" link set ns1eth2 up
@@ -174,6 +183,8 @@ trap cleanup_all_ns EXIT
setup_hsr_interfaces 0
do_complete_ping_test
+setup_ns ns1 ns2 ns3
+
setup_hsr_interfaces 1
do_complete_ping_test
diff --git a/tools/testing/selftests/net/hsr/hsr_redbox.sh b/tools/testing/selftests/net/hsr/hsr_redbox.sh
index 1f36785347c0..998103502d5d 100755
--- a/tools/testing/selftests/net/hsr/hsr_redbox.sh
+++ b/tools/testing/selftests/net/hsr/hsr_redbox.sh
@@ -96,6 +96,21 @@ setup_hsr_interfaces()
ip -n "${ns4}" link set ns4eth1 up
ip -n "${ns5}" link set ns5eth1 up
+ ip -net "$ns1" link set address 00:11:22:00:01:01 dev ns1eth1
+ ip -net "$ns1" link set address 00:11:22:00:01:02 dev ns1eth2
+
+ ip -net "$ns2" link set address 00:11:22:00:02:01 dev ns2eth1
+ ip -net "$ns2" link set address 00:11:22:00:02:02 dev ns2eth2
+ ip -net "$ns2" link set address 00:11:22:00:02:03 dev ns2eth3
+
+ ip -net "$ns3" link set address 00:11:22:00:03:11 dev ns3eth1
+ ip -net "$ns3" link set address 00:11:22:00:03:11 dev ns3eth2
+ ip -net "$ns3" link set address 00:11:22:00:03:11 dev ns3eth3
+ ip -net "$ns3" link set address 00:11:22:00:03:11 dev ns3br1
+
+ ip -net "$ns4" link set address 00:11:22:00:04:01 dev ns4eth1
+ ip -net "$ns5" link set address 00:11:22:00:05:01 dev ns5eth1
+
ip -net "${ns1}" link add name hsr1 type hsr slave1 ns1eth1 slave2 ns1eth2 supervision 45 version ${HSRv} proto 0
ip -net "${ns2}" link add name hsr2 type hsr slave1 ns2eth1 slave2 ns2eth2 interlink ns2eth3 supervision 45 version ${HSRv} proto 0
diff --git a/tools/testing/selftests/net/lib.sh b/tools/testing/selftests/net/lib.sh
index edc030e81a46..d0219032f773 100644
--- a/tools/testing/selftests/net/lib.sh
+++ b/tools/testing/selftests/net/lib.sh
@@ -15,7 +15,7 @@ ksft_xfail=2
ksft_skip=4
# namespace list created by setup_ns
-NS_LIST=""
+NS_LIST=()
##############################################################################
# Helpers
@@ -27,6 +27,7 @@ __ksft_status_merge()
local -A weights
local weight=0
+ local i
for i in "$@"; do
weights[$i]=$((weight++))
done
@@ -67,9 +68,7 @@ loopy_wait()
while true
do
local out
- out=$("$@")
- local ret=$?
- if ((!ret)); then
+ if out=$("$@"); then
echo -n "$out"
return 0
fi
@@ -126,74 +125,84 @@ slowwait_for_counter()
slowwait "$timeout" until_counter_is ">= $((base + delta))" "$@"
}
+remove_ns_list()
+{
+ local item=$1
+ local ns
+ local ns_list=("${NS_LIST[@]}")
+ NS_LIST=()
+
+ for ns in "${ns_list[@]}"; do
+ if [ "${ns}" != "${item}" ]; then
+ NS_LIST+=("${ns}")
+ fi
+ done
+}
+
cleanup_ns()
{
local ns=""
- local errexit=0
local ret=0
- # disable errexit temporary
- if [[ $- =~ "e" ]]; then
- errexit=1
- set +e
- fi
-
for ns in "$@"; do
- ip netns delete "${ns}" &> /dev/null
+ [ -z "${ns}" ] && continue
+ ip netns delete "${ns}" &> /dev/null || true
if ! busywait $BUSYWAIT_TIMEOUT ip netns list \| grep -vq "^$ns$" &> /dev/null; then
echo "Warn: Failed to remove namespace $ns"
ret=1
+ else
+ remove_ns_list "${ns}"
fi
done
- [ $errexit -eq 1 ] && set -e
return $ret
}
cleanup_all_ns()
{
- cleanup_ns $NS_LIST
+ cleanup_ns "${NS_LIST[@]}"
}
# setup netns with given names as prefix. e.g
# setup_ns local remote
setup_ns()
{
- local ns=""
local ns_name=""
- local ns_list=""
- local ns_exist=
+ local ns_list=()
for ns_name in "$@"; do
+ # avoid conflicts with local var: internal error
+ if [ "${ns_name}" = "ns_name" ]; then
+ echo "Failed to setup namespace '${ns_name}': invalid name"
+ cleanup_ns "${ns_list[@]}"
+ exit $ksft_fail
+ fi
+
# Some test may setup/remove same netns multi times
- if unset ${ns_name} 2> /dev/null; then
- ns="${ns_name,,}-$(mktemp -u XXXXXX)"
- eval readonly ${ns_name}="$ns"
- ns_exist=false
+ if [ -z "${!ns_name}" ]; then
+ eval "${ns_name}=${ns_name,,}-$(mktemp -u XXXXXX)"
else
- eval ns='$'${ns_name}
- cleanup_ns "$ns"
- ns_exist=true
+ cleanup_ns "${!ns_name}"
fi
- if ! ip netns add "$ns"; then
+ if ! ip netns add "${!ns_name}"; then
echo "Failed to create namespace $ns_name"
- cleanup_ns "$ns_list"
+ cleanup_ns "${ns_list[@]}"
return $ksft_skip
fi
- ip -n "$ns" link set lo up
- ! $ns_exist && ns_list="$ns_list $ns"
+ ip -n "${!ns_name}" link set lo up
+ ns_list+=("${!ns_name}")
done
- NS_LIST="$NS_LIST $ns_list"
+ NS_LIST+=("${ns_list[@]}")
}
tc_rule_stats_get()
{
local dev=$1; shift
local pref=$1; shift
- local dir=$1; shift
+ local dir=${1:-ingress}; shift
local selector=${1:-.packets}; shift
- tc -j -s filter show dev $dev ${dir:-ingress} pref $pref \
+ tc -j -s filter show dev $dev $dir pref $pref \
| jq ".[1].options.actions[].stats$selector"
}
diff --git a/tools/testing/selftests/net/lib/py/ksft.py b/tools/testing/selftests/net/lib/py/ksft.py
index 4769b4eb1ea1..f26c20df9db4 100644
--- a/tools/testing/selftests/net/lib/py/ksft.py
+++ b/tools/testing/selftests/net/lib/py/ksft.py
@@ -6,6 +6,7 @@ import sys
import time
import traceback
from .consts import KSFT_MAIN_NAME
+from .utils import global_defer_queue
KSFT_RESULT = None
KSFT_RESULT_ALL = True
@@ -57,6 +58,11 @@ def ksft_ge(a, b, comment=""):
_fail("Check failed", a, "<", b, comment)
+def ksft_lt(a, b, comment=""):
+ if a >= b:
+ _fail("Check failed", a, ">=", b, comment)
+
+
class ksft_raises:
def __init__(self, expected_type):
self.exception = None
@@ -103,6 +109,24 @@ def ktap_result(ok, cnt=1, case="", comment=""):
print(res)
+def ksft_flush_defer():
+ global KSFT_RESULT
+
+ i = 0
+ qlen_start = len(global_defer_queue)
+ while global_defer_queue:
+ i += 1
+ entry = global_defer_queue.pop()
+ try:
+ entry.exec_only()
+ except:
+ ksft_pr(f"Exception while handling defer / cleanup (callback {i} of {qlen_start})!")
+ tb = traceback.format_exc()
+ for line in tb.strip().split('\n'):
+ ksft_pr("Defer Exception|", line)
+ KSFT_RESULT = False
+
+
def ksft_run(cases=None, globs=None, case_pfx=None, args=()):
cases = cases or []
@@ -122,32 +146,41 @@ def ksft_run(cases=None, globs=None, case_pfx=None, args=()):
global KSFT_RESULT
cnt = 0
+ stop = False
for case in cases:
KSFT_RESULT = True
cnt += 1
+ comment = ""
+ cnt_key = ""
+
try:
case(*args)
except KsftSkipEx as e:
- ktap_result(True, cnt, case, comment="SKIP " + str(e))
- totals['skip'] += 1
- continue
+ comment = "SKIP " + str(e)
+ cnt_key = 'skip'
except KsftXfailEx as e:
- ktap_result(True, cnt, case, comment="XFAIL " + str(e))
- totals['xfail'] += 1
- continue
- except Exception as e:
+ comment = "XFAIL " + str(e)
+ cnt_key = 'xfail'
+ except BaseException as e:
+ stop |= isinstance(e, KeyboardInterrupt)
tb = traceback.format_exc()
for line in tb.strip().split('\n'):
ksft_pr("Exception|", line)
- ktap_result(False, cnt, case)
- totals['fail'] += 1
- continue
-
- ktap_result(KSFT_RESULT, cnt, case)
- if KSFT_RESULT:
- totals['pass'] += 1
- else:
- totals['fail'] += 1
+ if stop:
+ ksft_pr("Stopping tests due to KeyboardInterrupt.")
+ KSFT_RESULT = False
+ cnt_key = 'fail'
+
+ ksft_flush_defer()
+
+ if not cnt_key:
+ cnt_key = 'pass' if KSFT_RESULT else 'fail'
+
+ ktap_result(KSFT_RESULT, cnt, case, comment=comment)
+ totals[cnt_key] += 1
+
+ if stop:
+ break
print(
f"# Totals: pass:{totals['pass']} fail:{totals['fail']} xfail:{totals['xfail']} xpass:0 skip:{totals['skip']} error:0"
diff --git a/tools/testing/selftests/net/lib/py/utils.py b/tools/testing/selftests/net/lib/py/utils.py
index 0540ea24921d..72590c3f90f1 100644
--- a/tools/testing/selftests/net/lib/py/utils.py
+++ b/tools/testing/selftests/net/lib/py/utils.py
@@ -1,12 +1,18 @@
# SPDX-License-Identifier: GPL-2.0
+import errno
import json as _json
import random
import re
+import socket
import subprocess
import time
+class CmdExitFailure(Exception):
+ pass
+
+
class cmd:
def __init__(self, comm, shell=True, fail=True, ns=None, background=False, host=None, timeout=5):
if ns:
@@ -41,8 +47,8 @@ class cmd:
if self.proc.returncode != 0 and fail:
if len(stderr) > 0 and stderr[-1] == "\n":
stderr = stderr[:-1]
- raise Exception("Command failed: %s\nSTDOUT: %s\nSTDERR: %s" %
- (self.proc.args, stdout, stderr))
+ raise CmdExitFailure("Command failed: %s\nSTDOUT: %s\nSTDERR: %s" %
+ (self.proc.args, stdout, stderr))
class bkg(cmd):
@@ -60,6 +66,40 @@ class bkg(cmd):
return self.process(terminate=self.terminate, fail=self.check_fail)
+global_defer_queue = []
+
+
+class defer:
+ def __init__(self, func, *args, **kwargs):
+ global global_defer_queue
+
+ if not callable(func):
+ raise Exception("defer created with un-callable object, did you call the function instead of passing its name?")
+
+ self.func = func
+ self.args = args
+ self.kwargs = kwargs
+
+ self._queue = global_defer_queue
+ self._queue.append(self)
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, ex_type, ex_value, ex_tb):
+ return self.exec()
+
+ def exec_only(self):
+ self.func(*self.args, **self.kwargs)
+
+ def cancel(self):
+ self._queue.remove(self)
+
+ def exec(self):
+ self.cancel()
+ self.exec_only()
+
+
def tool(name, args, json=None, ns=None, host=None):
cmd_str = name + ' '
if json:
@@ -77,11 +117,24 @@ def ip(args, json=None, ns=None, host=None):
return tool('ip', args, json=json, host=host)
+def ethtool(args, json=None, ns=None, host=None):
+ return tool('ethtool', args, json=json, ns=ns, host=host)
+
+
def rand_port():
"""
- Get unprivileged port, for now just random, one day we may decide to check if used.
+ Get a random unprivileged port, try to make sure it's not already used.
"""
- return random.randint(10000, 65535)
+ for _ in range(1000):
+ port = random.randint(10000, 65535)
+ try:
+ with socket.socket(socket.AF_INET6, socket.SOCK_STREAM) as s:
+ s.bind(("", port))
+ return port
+ except OSError as e:
+ if e.errno != errno.EADDRINUSE:
+ raise
+ raise Exception("Can't find any free unprivileged port")
def wait_port_listen(port, proto="tcp", ns=None, host=None, sleep=0.005, deadline=5):
diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
index fefa9173bdaa..108aeeb84ef1 100755
--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
+++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
@@ -261,6 +261,8 @@ reset()
TEST_NAME="${1}"
+ MPTCP_LIB_SUBTEST_FLAKY=0 # reset if modified
+
if skip_test; then
MPTCP_LIB_TEST_COUNTER=$((MPTCP_LIB_TEST_COUNTER+1))
last_test_ignored=1
@@ -448,7 +450,9 @@ reset_with_tcp_filter()
# $1: err msg
fail_test()
{
- ret=${KSFT_FAIL}
+ if ! mptcp_lib_subtest_is_flaky; then
+ ret=${KSFT_FAIL}
+ fi
if [ ${#} -gt 0 ]; then
print_fail "${@}"
@@ -2245,9 +2249,10 @@ remove_tests()
if reset "remove invalid addresses"; then
pm_nl_set_limits $ns1 3 3
pm_nl_add_endpoint $ns1 10.0.12.1 flags signal
+ # broadcast IP: no packet for this address will be received on ns1
+ pm_nl_add_endpoint $ns1 224.0.0.1 flags signal
pm_nl_add_endpoint $ns1 10.0.3.1 flags signal
- pm_nl_add_endpoint $ns1 10.0.14.1 flags signal
- pm_nl_set_limits $ns2 3 3
+ pm_nl_set_limits $ns2 2 2
addr_nr_ns1=-3 speed=10 \
run_tests $ns1 $ns2 10.0.1.1
chk_join_nr 1 1 1
@@ -3069,6 +3074,7 @@ fullmesh_tests()
fastclose_tests()
{
if reset_check_counter "fastclose test" "MPTcpExtMPFastcloseTx"; then
+ MPTCP_LIB_SUBTEST_FLAKY=1
test_linkfail=1024 fastclose=client \
run_tests $ns1 $ns2 10.0.1.1
chk_join_nr 0 0 0
@@ -3077,6 +3083,7 @@ fastclose_tests()
fi
if reset_check_counter "fastclose server test" "MPTcpExtMPFastcloseRx"; then
+ MPTCP_LIB_SUBTEST_FLAKY=1
test_linkfail=1024 fastclose=server \
run_tests $ns1 $ns2 10.0.1.1
chk_join_nr 0 0 0 0 0 0 1
@@ -3095,6 +3102,7 @@ fail_tests()
{
# single subflow
if reset_with_fail "Infinite map" 1; then
+ MPTCP_LIB_SUBTEST_FLAKY=1
test_linkfail=128 \
run_tests $ns1 $ns2 10.0.1.1
chk_join_nr 0 0 0 +1 +0 1 0 1 "$(pedit_action_pkts)"
@@ -3103,6 +3111,7 @@ fail_tests()
# multiple subflows
if reset_with_fail "MP_FAIL MP_RST" 2; then
+ MPTCP_LIB_SUBTEST_FLAKY=1
tc -n $ns2 qdisc add dev ns2eth1 root netem rate 1mbit delay 5ms
pm_nl_set_limits $ns1 0 1
pm_nl_set_limits $ns2 0 1
diff --git a/tools/testing/selftests/net/mptcp/mptcp_lib.sh b/tools/testing/selftests/net/mptcp/mptcp_lib.sh
index ad2ebda5cb64..438280e68434 100644
--- a/tools/testing/selftests/net/mptcp/mptcp_lib.sh
+++ b/tools/testing/selftests/net/mptcp/mptcp_lib.sh
@@ -1,6 +1,9 @@
#! /bin/bash
# SPDX-License-Identifier: GPL-2.0
+. "$(dirname "${0}")/../lib.sh"
+. "$(dirname "${0}")/../net_helper.sh"
+
readonly KSFT_PASS=0
readonly KSFT_FAIL=1
readonly KSFT_SKIP=4
@@ -21,6 +24,7 @@ declare -rx MPTCP_LIB_AF_INET6=10
MPTCP_LIB_SUBTESTS=()
MPTCP_LIB_SUBTESTS_DUPLICATED=0
+MPTCP_LIB_SUBTEST_FLAKY=0
MPTCP_LIB_TEST_COUNTER=0
MPTCP_LIB_TEST_FORMAT="%02u %-50s"
MPTCP_LIB_IP_MPTCP=0
@@ -41,6 +45,16 @@ else
readonly MPTCP_LIB_COLOR_RESET=
fi
+# SELFTESTS_MPTCP_LIB_OVERRIDE_FLAKY env var can be set not to ignore errors
+# from subtests marked as flaky
+mptcp_lib_override_flaky() {
+ [ "${SELFTESTS_MPTCP_LIB_OVERRIDE_FLAKY:-}" = 1 ]
+}
+
+mptcp_lib_subtest_is_flaky() {
+ [ "${MPTCP_LIB_SUBTEST_FLAKY}" = 1 ] && ! mptcp_lib_override_flaky
+}
+
# $1: color, $2: text
mptcp_lib_print_color() {
echo -e "${MPTCP_LIB_START_PRINT:-}${*}${MPTCP_LIB_COLOR_RESET}"
@@ -72,7 +86,16 @@ mptcp_lib_pr_skip() {
}
mptcp_lib_pr_fail() {
- mptcp_lib_print_err "[FAIL]${1:+ ${*}}"
+ local title cmt
+
+ if mptcp_lib_subtest_is_flaky; then
+ title="IGNO"
+ cmt=" (flaky)"
+ else
+ title="FAIL"
+ fi
+
+ mptcp_lib_print_err "[${title}]${cmt}${1:+ ${*}}"
}
mptcp_lib_pr_info() {
@@ -208,7 +231,13 @@ mptcp_lib_result_pass() {
# $1: test name
mptcp_lib_result_fail() {
- __mptcp_lib_result_add "not ok" "${1}"
+ if mptcp_lib_subtest_is_flaky; then
+ # It might sound better to use 'not ok # TODO' or 'ok # SKIP',
+ # but some CIs don't understand 'TODO' and treat SKIP as errors.
+ __mptcp_lib_result_add "ok" "${1} # IGNORE Flaky"
+ else
+ __mptcp_lib_result_add "not ok" "${1}"
+ fi
}
# $1: test name
@@ -335,20 +364,7 @@ mptcp_lib_check_transfer() {
# $1: ns, $2: port
mptcp_lib_wait_local_port_listen() {
- local listener_ns="${1}"
- local port="${2}"
-
- local port_hex
- port_hex="$(printf "%04X" "${port}")"
-
- local _
- for _ in $(seq 10); do
- ip netns exec "${listener_ns}" cat /proc/net/tcp* | \
- awk "BEGIN {rc=1} {if (\$2 ~ /:${port_hex}\$/ && \$4 ~ /0A/) \
- {rc=0; exit}} END {exit rc}" &&
- break
- sleep 0.1
- done
+ wait_local_port_listen "${@}" "tcp"
}
mptcp_lib_check_output() {
@@ -412,17 +428,13 @@ mptcp_lib_check_tools() {
}
mptcp_lib_ns_init() {
- local sec rndh
-
- sec=$(date +%s)
- rndh=$(printf %x "${sec}")-$(mktemp -u XXXXXX)
+ if ! setup_ns "${@}"; then
+ mptcp_lib_pr_fail "Failed to setup namespaces ${*}"
+ exit ${KSFT_FAIL}
+ fi
local netns
for netns in "${@}"; do
- eval "${netns}=${netns}-${rndh}"
-
- ip netns add "${!netns}" || exit ${KSFT_SKIP}
- ip -net "${!netns}" link set lo up
ip netns exec "${!netns}" sysctl -q net.mptcp.enabled=1
ip netns exec "${!netns}" sysctl -q net.ipv4.conf.all.rp_filter=0
ip netns exec "${!netns}" sysctl -q net.ipv4.conf.default.rp_filter=0
@@ -430,9 +442,10 @@ mptcp_lib_ns_init() {
}
mptcp_lib_ns_exit() {
+ cleanup_ns "${@}"
+
local netns
for netns in "${@}"; do
- ip netns del "${netns}"
rm -f /tmp/"${netns}".{nstat,out}
done
}
diff --git a/tools/testing/selftests/net/mptcp/simult_flows.sh b/tools/testing/selftests/net/mptcp/simult_flows.sh
index 4b14b4412166..f74e1c3c126d 100755
--- a/tools/testing/selftests/net/mptcp/simult_flows.sh
+++ b/tools/testing/selftests/net/mptcp/simult_flows.sh
@@ -244,7 +244,7 @@ run_test()
do_transfer $small $large $time
lret=$?
mptcp_lib_result_code "${lret}" "${msg}"
- if [ $lret -ne 0 ]; then
+ if [ $lret -ne 0 ] && ! mptcp_lib_subtest_is_flaky; then
ret=$lret
[ $bail -eq 0 ] || exit $ret
fi
@@ -254,7 +254,7 @@ run_test()
do_transfer $large $small $time
lret=$?
mptcp_lib_result_code "${lret}" "${msg}"
- if [ $lret -ne 0 ]; then
+ if [ $lret -ne 0 ] && ! mptcp_lib_subtest_is_flaky; then
ret=$lret
[ $bail -eq 0 ] || exit $ret
fi
@@ -290,7 +290,7 @@ run_test 10 10 0 0 "balanced bwidth"
run_test 10 10 1 25 "balanced bwidth with unbalanced delay"
# we still need some additional infrastructure to pass the following test-cases
-run_test 10 3 0 0 "unbalanced bwidth"
+MPTCP_LIB_SUBTEST_FLAKY=1 run_test 10 3 0 0 "unbalanced bwidth"
run_test 10 3 1 25 "unbalanced bwidth with unbalanced delay"
run_test 10 3 25 1 "unbalanced bwidth with opposed, unbalanced delay"
diff --git a/tools/testing/selftests/net/mptcp/userspace_pm.sh b/tools/testing/selftests/net/mptcp/userspace_pm.sh
index 9e2981f2d7f5..9cb05978269d 100755
--- a/tools/testing/selftests/net/mptcp/userspace_pm.sh
+++ b/tools/testing/selftests/net/mptcp/userspace_pm.sh
@@ -160,10 +160,12 @@ make_connection()
local is_v6=$1
local app_port=$app4_port
local connect_addr="10.0.1.1"
+ local client_addr="10.0.1.2"
local listen_addr="0.0.0.0"
if [ "$is_v6" = "v6" ]
then
connect_addr="dead:beef:1::1"
+ client_addr="dead:beef:1::2"
listen_addr="::"
app_port=$app6_port
else
@@ -206,6 +208,7 @@ make_connection()
[ "$server_serverside" = 1 ]
then
test_pass
+ print_title "Connection info: ${client_addr}:${client_port} -> ${connect_addr}:${app_port}"
else
test_fail "Expected tokens (c:${client_token} - s:${server_token}) and server (c:${client_serverside} - s:${server_serverside})"
mptcp_lib_result_print_all_tap
@@ -297,7 +300,7 @@ test_announce()
ip netns exec "$ns2"\
./pm_nl_ctl ann 10.0.2.2 token "$client4_token" id $client_addr_id dev\
ns2eth1
- print_test "ADD_ADDR id:${client_addr_id} 10.0.2.2 (ns2) => ns1, reuse port"
+ print_test "ADD_ADDR id:client 10.0.2.2 (ns2) => ns1, reuse port"
sleep 0.5
verify_announce_event $server_evts $ANNOUNCED $server4_token "10.0.2.2" $client_addr_id \
"$client4_port"
@@ -306,7 +309,7 @@ test_announce()
:>"$server_evts"
ip netns exec "$ns2" ./pm_nl_ctl ann\
dead:beef:2::2 token "$client6_token" id $client_addr_id dev ns2eth1
- print_test "ADD_ADDR6 id:${client_addr_id} dead:beef:2::2 (ns2) => ns1, reuse port"
+ print_test "ADD_ADDR6 id:client dead:beef:2::2 (ns2) => ns1, reuse port"
sleep 0.5
verify_announce_event "$server_evts" "$ANNOUNCED" "$server6_token" "dead:beef:2::2"\
"$client_addr_id" "$client6_port" "v6"
@@ -316,7 +319,7 @@ test_announce()
client_addr_id=$((client_addr_id+1))
ip netns exec "$ns2" ./pm_nl_ctl ann 10.0.2.2 token "$client4_token" id\
$client_addr_id dev ns2eth1 port $new4_port
- print_test "ADD_ADDR id:${client_addr_id} 10.0.2.2 (ns2) => ns1, new port"
+ print_test "ADD_ADDR id:client+1 10.0.2.2 (ns2) => ns1, new port"
sleep 0.5
verify_announce_event "$server_evts" "$ANNOUNCED" "$server4_token" "10.0.2.2"\
"$client_addr_id" "$new4_port"
@@ -327,7 +330,7 @@ test_announce()
# ADD_ADDR from the server to client machine reusing the subflow port
ip netns exec "$ns1" ./pm_nl_ctl ann 10.0.2.1 token "$server4_token" id\
$server_addr_id dev ns1eth2
- print_test "ADD_ADDR id:${server_addr_id} 10.0.2.1 (ns1) => ns2, reuse port"
+ print_test "ADD_ADDR id:server 10.0.2.1 (ns1) => ns2, reuse port"
sleep 0.5
verify_announce_event "$client_evts" "$ANNOUNCED" "$client4_token" "10.0.2.1"\
"$server_addr_id" "$app4_port"
@@ -336,7 +339,7 @@ test_announce()
:>"$client_evts"
ip netns exec "$ns1" ./pm_nl_ctl ann dead:beef:2::1 token "$server6_token" id\
$server_addr_id dev ns1eth2
- print_test "ADD_ADDR6 id:${server_addr_id} dead:beef:2::1 (ns1) => ns2, reuse port"
+ print_test "ADD_ADDR6 id:server dead:beef:2::1 (ns1) => ns2, reuse port"
sleep 0.5
verify_announce_event "$client_evts" "$ANNOUNCED" "$client6_token" "dead:beef:2::1"\
"$server_addr_id" "$app6_port" "v6"
@@ -346,7 +349,7 @@ test_announce()
server_addr_id=$((server_addr_id+1))
ip netns exec "$ns1" ./pm_nl_ctl ann 10.0.2.1 token "$server4_token" id\
$server_addr_id dev ns1eth2 port $new4_port
- print_test "ADD_ADDR id:${server_addr_id} 10.0.2.1 (ns1) => ns2, new port"
+ print_test "ADD_ADDR id:server+1 10.0.2.1 (ns1) => ns2, new port"
sleep 0.5
verify_announce_event "$client_evts" "$ANNOUNCED" "$client4_token" "10.0.2.1"\
"$server_addr_id" "$new4_port"
@@ -380,7 +383,7 @@ test_remove()
local invalid_token=$(( client4_token - 1 ))
ip netns exec "$ns2" ./pm_nl_ctl rem token $invalid_token id\
$client_addr_id > /dev/null 2>&1
- print_test "RM_ADDR id:${client_addr_id} ns2 => ns1, invalid token"
+ print_test "RM_ADDR id:client ns2 => ns1, invalid token"
local type
type=$(mptcp_lib_evts_get_info type "$server_evts")
if [ "$type" = "" ]
@@ -394,7 +397,7 @@ test_remove()
local invalid_id=$(( client_addr_id + 1 ))
ip netns exec "$ns2" ./pm_nl_ctl rem token "$client4_token" id\
$invalid_id > /dev/null 2>&1
- print_test "RM_ADDR id:${invalid_id} ns2 => ns1, invalid id"
+ print_test "RM_ADDR id:client+1 ns2 => ns1, invalid id"
type=$(mptcp_lib_evts_get_info type "$server_evts")
if [ "$type" = "" ]
then
@@ -407,7 +410,7 @@ test_remove()
:>"$server_evts"
ip netns exec "$ns2" ./pm_nl_ctl rem token "$client4_token" id\
$client_addr_id
- print_test "RM_ADDR id:${client_addr_id} ns2 => ns1"
+ print_test "RM_ADDR id:client ns2 => ns1"
sleep 0.5
verify_remove_event "$server_evts" "$REMOVED" "$server4_token" "$client_addr_id"
@@ -416,7 +419,7 @@ test_remove()
client_addr_id=$(( client_addr_id - 1 ))
ip netns exec "$ns2" ./pm_nl_ctl rem token "$client4_token" id\
$client_addr_id
- print_test "RM_ADDR id:${client_addr_id} ns2 => ns1"
+ print_test "RM_ADDR id:client-1 ns2 => ns1"
sleep 0.5
verify_remove_event "$server_evts" "$REMOVED" "$server4_token" "$client_addr_id"
@@ -424,7 +427,7 @@ test_remove()
:>"$server_evts"
ip netns exec "$ns2" ./pm_nl_ctl rem token "$client6_token" id\
$client_addr_id
- print_test "RM_ADDR6 id:${client_addr_id} ns2 => ns1"
+ print_test "RM_ADDR6 id:client-1 ns2 => ns1"
sleep 0.5
verify_remove_event "$server_evts" "$REMOVED" "$server6_token" "$client_addr_id"
@@ -434,7 +437,7 @@ test_remove()
# RM_ADDR from the server to client machine
ip netns exec "$ns1" ./pm_nl_ctl rem token "$server4_token" id\
$server_addr_id
- print_test "RM_ADDR id:${server_addr_id} ns1 => ns2"
+ print_test "RM_ADDR id:server ns1 => ns2"
sleep 0.5
verify_remove_event "$client_evts" "$REMOVED" "$client4_token" "$server_addr_id"
@@ -443,7 +446,7 @@ test_remove()
server_addr_id=$(( server_addr_id - 1 ))
ip netns exec "$ns1" ./pm_nl_ctl rem token "$server4_token" id\
$server_addr_id
- print_test "RM_ADDR id:${server_addr_id} ns1 => ns2"
+ print_test "RM_ADDR id:server-1 ns1 => ns2"
sleep 0.5
verify_remove_event "$client_evts" "$REMOVED" "$client4_token" "$server_addr_id"
@@ -451,7 +454,7 @@ test_remove()
:>"$client_evts"
ip netns exec "$ns1" ./pm_nl_ctl rem token "$server6_token" id\
$server_addr_id
- print_test "RM_ADDR6 id:${server_addr_id} ns1 => ns2"
+ print_test "RM_ADDR6 id:server-1 ns1 => ns2"
sleep 0.5
verify_remove_event "$client_evts" "$REMOVED" "$client6_token" "$server_addr_id"
}
@@ -479,8 +482,14 @@ verify_subflow_events()
local locid
local remid
local info
+ local e_dport_txt
- info="${e_saddr} (${e_from}) => ${e_daddr}:${e_dport} (${e_to})"
+ # only display the fixed ports
+ if [ "${e_dport}" -ge "${app4_port}" ] && [ "${e_dport}" -le "${app6_port}" ]; then
+ e_dport_txt=":${e_dport}"
+ fi
+
+ info="${e_saddr} (${e_from}) => ${e_daddr}${e_dport_txt} (${e_to})"
if [ "$e_type" = "$SUB_ESTABLISHED" ]
then
@@ -766,7 +775,7 @@ test_subflows_v4_v6_mix()
:>"$client_evts"
ip netns exec "$ns1" ./pm_nl_ctl ann 10.0.2.1 token "$server6_token" id\
$server_addr_id dev ns1eth2
- print_test "ADD_ADDR4 id:${server_addr_id} 10.0.2.1 (ns1) => ns2, reuse port"
+ print_test "ADD_ADDR4 id:server 10.0.2.1 (ns1) => ns2, reuse port"
sleep 0.5
verify_announce_event "$client_evts" "$ANNOUNCED" "$client6_token" "10.0.2.1"\
"$server_addr_id" "$app6_port"
@@ -861,7 +870,7 @@ test_listener()
local listener_pid=$!
sleep 0.5
- print_test "CREATE_LISTENER 10.0.2.2:$client4_port"
+ print_test "CREATE_LISTENER 10.0.2.2 (client port)"
verify_listener_events $client_evts $LISTENER_CREATED $AF_INET 10.0.2.2 $client4_port
# ADD_ADDR from client to server machine reusing the subflow port
@@ -878,13 +887,14 @@ test_listener()
mptcp_lib_kill_wait $listener_pid
sleep 0.5
- print_test "CLOSE_LISTENER 10.0.2.2:$client4_port"
+ print_test "CLOSE_LISTENER 10.0.2.2 (client port)"
verify_listener_events $client_evts $LISTENER_CLOSED $AF_INET 10.0.2.2 $client4_port
}
print_title "Make connections"
make_connection
make_connection "v6"
+print_title "Will be using address IDs ${client_addr_id} (client) and ${server_addr_id} (server)"
test_announce
test_remove
diff --git a/tools/testing/selftests/net/msg_zerocopy.c b/tools/testing/selftests/net/msg_zerocopy.c
index bdc03a2097e8..7ea5fb28c93d 100644
--- a/tools/testing/selftests/net/msg_zerocopy.c
+++ b/tools/testing/selftests/net/msg_zerocopy.c
@@ -85,6 +85,7 @@ static bool cfg_rx;
static int cfg_runtime_ms = 4200;
static int cfg_verbose;
static int cfg_waittime_ms = 500;
+static int cfg_notification_limit = 32;
static bool cfg_zerocopy;
static socklen_t cfg_alen;
@@ -95,6 +96,7 @@ static char payload[IP_MAXPACKET];
static long packets, bytes, completions, expected_completions;
static int zerocopied = -1;
static uint32_t next_completion;
+static uint32_t sends_since_notify;
static unsigned long gettimeofday_ms(void)
{
@@ -208,6 +210,7 @@ static bool do_sendmsg(int fd, struct msghdr *msg, bool do_zerocopy, int domain)
error(1, errno, "send");
if (cfg_verbose && ret != len)
fprintf(stderr, "send: ret=%u != %u\n", ret, len);
+ sends_since_notify++;
if (len) {
packets++;
@@ -435,7 +438,7 @@ static bool do_recv_completion(int fd, int domain)
/* Detect notification gaps. These should not happen often, if at all.
* Gaps can occur due to drops, reordering and retransmissions.
*/
- if (lo != next_completion)
+ if (cfg_verbose && lo != next_completion)
fprintf(stderr, "gap: %u..%u does not append to %u\n",
lo, hi, next_completion);
next_completion = hi + 1;
@@ -460,6 +463,7 @@ static bool do_recv_completion(int fd, int domain)
static void do_recv_completions(int fd, int domain)
{
while (do_recv_completion(fd, domain)) {}
+ sends_since_notify = 0;
}
/* Wait for all remaining completions on the errqueue */
@@ -549,6 +553,9 @@ static void do_tx(int domain, int type, int protocol)
else
do_sendmsg(fd, &msg, cfg_zerocopy, domain);
+ if (cfg_zerocopy && sends_since_notify >= cfg_notification_limit)
+ do_recv_completions(fd, domain);
+
while (!do_poll(fd, POLLOUT)) {
if (cfg_zerocopy)
do_recv_completions(fd, domain);
@@ -708,7 +715,7 @@ static void parse_opts(int argc, char **argv)
cfg_payload_len = max_payload_len;
- while ((c = getopt(argc, argv, "46c:C:D:i:mp:rs:S:t:vz")) != -1) {
+ while ((c = getopt(argc, argv, "46c:C:D:i:l:mp:rs:S:t:vz")) != -1) {
switch (c) {
case '4':
if (cfg_family != PF_UNSPEC)
@@ -736,6 +743,9 @@ static void parse_opts(int argc, char **argv)
if (cfg_ifindex == 0)
error(1, errno, "invalid iface: %s", optarg);
break;
+ case 'l':
+ cfg_notification_limit = strtoul(optarg, NULL, 0);
+ break;
case 'm':
cfg_cork_mixed = true;
break;
diff --git a/tools/testing/selftests/net/netfilter/nft_queue.sh b/tools/testing/selftests/net/netfilter/nft_queue.sh
index 8538f08c64c2..c61d23a8c88d 100755
--- a/tools/testing/selftests/net/netfilter/nft_queue.sh
+++ b/tools/testing/selftests/net/netfilter/nft_queue.sh
@@ -375,6 +375,42 @@ EOF
wait 2>/dev/null
}
+test_queue_removal()
+{
+ read tainted_then < /proc/sys/kernel/tainted
+
+ ip netns exec "$ns1" nft -f - <<EOF
+flush ruleset
+table ip filter {
+ chain output {
+ type filter hook output priority 0; policy accept;
+ ip protocol icmp queue num 0
+ }
+}
+EOF
+ ip netns exec "$ns1" ./nf_queue -q 0 -d 30000 -t "$timeout" &
+ local nfqpid=$!
+
+ busywait "$BUSYWAIT_TIMEOUT" nf_queue_wait "$ns1" 0
+
+ ip netns exec "$ns1" ping -w 2 -f -c 10 127.0.0.1 -q >/dev/null
+ kill $nfqpid
+
+ ip netns exec "$ns1" nft flush ruleset
+
+ if [ "$tainted_then" -ne 0 ];then
+ return
+ fi
+
+ read tainted_now < /proc/sys/kernel/tainted
+ if [ "$tainted_now" -eq 0 ];then
+ echo "PASS: queue program exiting while packets queued"
+ else
+ echo "TAINT: queue program exiting while packets queued"
+ ret=1
+ fi
+}
+
ip netns exec "$nsrouter" sysctl net.ipv6.conf.all.forwarding=1 > /dev/null
ip netns exec "$nsrouter" sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null
ip netns exec "$nsrouter" sysctl net.ipv4.conf.veth1.forwarding=1 > /dev/null
@@ -413,5 +449,6 @@ test_tcp_localhost
test_tcp_localhost_connectclose
test_tcp_localhost_requeue
test_icmp_vrf
+test_queue_removal
exit $ret
diff --git a/tools/testing/selftests/net/netns-sysctl.sh b/tools/testing/selftests/net/netns-sysctl.sh
new file mode 100755
index 000000000000..45c34a3b9aae
--- /dev/null
+++ b/tools/testing/selftests/net/netns-sysctl.sh
@@ -0,0 +1,40 @@
+#!/bin/bash -e
+# SPDX-License-Identifier: GPL-2.0
+#
+# This test checks that the network buffer sysctls are present
+# in a network namespaces, and that they are readonly.
+
+source lib.sh
+
+cleanup() {
+ cleanup_ns $test_ns
+}
+
+trap cleanup EXIT
+
+fail() {
+ echo "ERROR: $*" >&2
+ exit 1
+}
+
+setup_ns test_ns
+
+for sc in {r,w}mem_{default,max}; do
+ # check that this is writable in a netns
+ [ -w "/proc/sys/net/core/$sc" ] ||
+ fail "$sc isn't writable in the init netns!"
+
+ # change the value in the host netns
+ sysctl -qw "net.core.$sc=300000" ||
+ fail "Can't write $sc in init netns!"
+
+ # check that the value is read from the init netns
+ [ "$(ip netns exec $test_ns sysctl -n "net.core.$sc")" -eq 300000 ] ||
+ fail "Value for $sc mismatch!"
+
+ # check that this isn't writable in a netns
+ ip netns exec $test_ns [ -w "/proc/sys/net/core/$sc" ] &&
+ fail "$sc is writable in a netns!"
+done
+
+echo 'Test passed OK'
diff --git a/tools/testing/selftests/net/openvswitch/openvswitch.sh b/tools/testing/selftests/net/openvswitch/openvswitch.sh
index 5cae53543849..cc0bfae2bafa 100755
--- a/tools/testing/selftests/net/openvswitch/openvswitch.sh
+++ b/tools/testing/selftests/net/openvswitch/openvswitch.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
#
# OVS kernel module self tests
@@ -11,6 +11,11 @@ ksft_skip=4
PAUSE_ON_FAIL=no
VERBOSE=0
TRACING=0
+WAIT_TIMEOUT=5
+
+if test "X$KSFT_MACHINE_SLOW" == "Xyes"; then
+ WAIT_TIMEOUT=10
+fi
tests="
arp_ping eth-arp: Basic arp ping between two NS
@@ -20,10 +25,37 @@ tests="
nat_related_v4 ip4-nat-related: ICMP related matches work with SNAT
netlink_checks ovsnl: validate netlink attrs and settings
upcall_interfaces ovs: test the upcall interfaces
- drop_reason drop: test drop reasons are emitted"
+ drop_reason drop: test drop reasons are emitted
+ psample psample: Sampling packets with psample"
info() {
- [ $VERBOSE = 0 ] || echo $*
+ [ "${ovs_dir}" != "" ] &&
+ echo "`date +"[%m-%d %H:%M:%S]"` $*" >> ${ovs_dir}/debug.log
+ [ $VERBOSE = 0 ] || echo $*
+}
+
+ovs_wait() {
+ info "waiting $WAIT_TIMEOUT s for: $@"
+
+ if "$@" ; then
+ info "wait succeeded immediately"
+ return 0
+ fi
+
+ # A quick re-check helps speed up small races in fast systems.
+ # However, fractional sleeps might not necessarily work.
+ local start=0
+ sleep 0.1 || { sleep 1; start=1; }
+
+ for (( i=start; i<WAIT_TIMEOUT; i++ )); do
+ if "$@" ; then
+ info "wait succeeded after $i seconds"
+ return 0
+ fi
+ sleep 1
+ done
+ info "wait failed after $i seconds"
+ return 1
}
ovs_base=`pwd`
@@ -65,7 +97,8 @@ ovs_setenv() {
ovs_sbx() {
if test "X$2" != X; then
- (ovs_setenv $1; shift; "$@" >> ${ovs_dir}/debug.log)
+ (ovs_setenv $1; shift;
+ info "run cmd: $@"; "$@" >> ${ovs_dir}/debug.log)
else
ovs_setenv $1
fi
@@ -102,12 +135,21 @@ ovs_netns_spawn_daemon() {
shift
netns=$1
shift
- info "spawning cmd: $*"
- ip netns exec $netns $* >> $ovs_dir/stdout 2>> $ovs_dir/stderr &
+ if [ "$netns" == "_default" ]; then
+ $* >> $ovs_dir/stdout 2>> $ovs_dir/stderr &
+ else
+ ip netns exec $netns $* >> $ovs_dir/stdout 2>> $ovs_dir/stderr &
+ fi
pid=$!
ovs_sbx "$sbx" on_exit "kill -TERM $pid 2>/dev/null"
}
+ovs_spawn_daemon() {
+ sbx=$1
+ shift
+ ovs_netns_spawn_daemon $sbx "_default" $*
+}
+
ovs_add_netns_and_veths () {
info "Adding netns attached: sbx:$1 dp:$2 {$3, $4, $5}"
ovs_sbx "$1" ip netns add "$3" || return 1
@@ -139,7 +181,7 @@ ovs_add_flow () {
info "Adding flow to DP: sbx:$1 br:$2 flow:$3 act:$4"
ovs_sbx "$1" python3 $ovs_base/ovs-dpctl.py add-flow "$2" "$3" "$4"
if [ $? -ne 0 ]; then
- echo "Flow [ $3 : $4 ] failed" >> ${ovs_dir}/debug.log
+ info "Flow [ $3 : $4 ] failed"
return 1
fi
return 0
@@ -170,6 +212,19 @@ ovs_drop_reason_count()
return `echo "$perf_output" | grep "$pattern" | wc -l`
}
+ovs_test_flow_fails () {
+ ERR_MSG="Flow actions may not be safe on all matching packets"
+
+ PRE_TEST=$(dmesg | grep -c "${ERR_MSG}")
+ ovs_add_flow $@ &> /dev/null $@ && return 1
+ POST_TEST=$(dmesg | grep -c "${ERR_MSG}")
+
+ if [ "$PRE_TEST" == "$POST_TEST" ]; then
+ return 1
+ fi
+ return 0
+}
+
usage() {
echo
echo "$0 [OPTIONS] [TEST]..."
@@ -184,6 +239,91 @@ usage() {
exit 1
}
+
+# psample test
+# - use psample to observe packets
+test_psample() {
+ sbx_add "test_psample" || return $?
+
+ # Add a datapath with per-vport dispatching.
+ ovs_add_dp "test_psample" psample -V 2:1 || return 1
+
+ info "create namespaces"
+ ovs_add_netns_and_veths "test_psample" "psample" \
+ client c0 c1 172.31.110.10/24 -u || return 1
+ ovs_add_netns_and_veths "test_psample" "psample" \
+ server s0 s1 172.31.110.20/24 -u || return 1
+
+ # Check if psample actions can be configured.
+ ovs_add_flow "test_psample" psample \
+ 'in_port(1),eth(),eth_type(0x0806),arp()' 'psample(group=1)' &> /dev/null
+ if [ $? == 1 ]; then
+ info "no support for psample - skipping"
+ ovs_exit_sig
+ return $ksft_skip
+ fi
+
+ ovs_del_flows "test_psample" psample
+
+ # Test action verification.
+ OLDIFS=$IFS
+ IFS='*'
+ min_key='in_port(1),eth(),eth_type(0x0800),ipv4()'
+ for testcase in \
+ "cookie to large"*"psample(group=1,cookie=1615141312111009080706050403020100)" \
+ "no group with cookie"*"psample(cookie=abcd)" \
+ "no group"*"psample()";
+ do
+ set -- $testcase;
+ ovs_test_flow_fails "test_psample" psample $min_key $2
+ if [ $? == 1 ]; then
+ info "failed - $1"
+ return 1
+ fi
+ done
+ IFS=$OLDIFS
+
+ ovs_del_flows "test_psample" psample
+ # Allow ARP
+ ovs_add_flow "test_psample" psample \
+ 'in_port(1),eth(),eth_type(0x0806),arp()' '2' || return 1
+ ovs_add_flow "test_psample" psample \
+ 'in_port(2),eth(),eth_type(0x0806),arp()' '1' || return 1
+
+ # Sample first 14 bytes of all traffic.
+ ovs_add_flow "test_psample" psample \
+ "in_port(1),eth(),eth_type(0x0800),ipv4()" \
+ "trunc(14),psample(group=1,cookie=c0ffee),2"
+
+ # Sample all traffic. In this case, use a sample() action with both
+ # psample and an upcall emulating simultaneous local sampling and
+ # sFlow / IPFIX.
+ nlpid=$(grep -E "listening on upcall packet handler" \
+ $ovs_dir/s0.out | cut -d ":" -f 2 | tr -d ' ')
+
+ ovs_add_flow "test_psample" psample \
+ "in_port(2),eth(),eth_type(0x0800),ipv4()" \
+ "sample(sample=100%,actions(psample(group=2,cookie=eeff0c),userspace(pid=${nlpid},userdata=eeff0c))),1"
+
+ # Record psample data.
+ ovs_spawn_daemon "test_psample" python3 $ovs_base/ovs-dpctl.py psample-events
+ ovs_wait grep -q "listening for psample events" ${ovs_dir}/stdout
+
+ # Send a single ping.
+ ovs_sbx "test_psample" ip netns exec client ping -I c1 172.31.110.20 -c 1 || return 1
+
+ # We should have received one userspace action upcall and 2 psample packets.
+ ovs_wait grep -q "userspace action command" $ovs_dir/s0.out || return 1
+
+ # client -> server samples should only contain the first 14 bytes of the packet.
+ ovs_wait grep -qE "rate:4294967295,group:1,cookie:c0ffee data:[0-9a-f]{28}$" \
+ $ovs_dir/stdout || return 1
+
+ ovs_wait grep -q "rate:4294967295,group:2,cookie:eeff0c" $ovs_dir/stdout || return 1
+
+ return 0
+}
+
# drop_reason test
# - drop packets and verify the right drop reason is reported
test_drop_reason() {
@@ -599,7 +739,8 @@ test_upcall_interfaces() {
ovs_add_netns_and_veths "test_upcall_interfaces" ui0 upc left0 l0 \
172.31.110.1/24 -u || return 1
- sleep 1
+ ovs_wait grep -q "listening on upcall packet handler" ${ovs_dir}/left0.out
+
info "sending arping"
ip netns exec upc arping -I l0 172.31.110.20 -c 1 \
>$ovs_dir/arping.stdout 2>$ovs_dir/arping.stderr
@@ -613,16 +754,20 @@ run_test() {
tname="$1"
tdesc="$2"
- if ! lsmod | grep openvswitch >/dev/null 2>&1; then
- stdbuf -o0 printf "TEST: %-60s [NOMOD]\n" "${tdesc}"
- return $ksft_skip
- fi
-
if python3 ovs-dpctl.py -h 2>&1 | \
grep -E "Need to (install|upgrade) the python" >/dev/null 2>&1; then
stdbuf -o0 printf "TEST: %-60s [PYLIB]\n" "${tdesc}"
return $ksft_skip
fi
+
+ python3 ovs-dpctl.py show >/dev/null 2>&1 || \
+ echo "[DPCTL] show exception."
+
+ if ! lsmod | grep openvswitch >/dev/null 2>&1; then
+ stdbuf -o0 printf "TEST: %-60s [NOMOD]\n" "${tdesc}"
+ return $ksft_skip
+ fi
+
printf "TEST: %-60s [START]\n" "${tname}"
unset IFS
diff --git a/tools/testing/selftests/net/openvswitch/ovs-dpctl.py b/tools/testing/selftests/net/openvswitch/ovs-dpctl.py
index 1dd057afd3fb..8a0396bfaf99 100644
--- a/tools/testing/selftests/net/openvswitch/ovs-dpctl.py
+++ b/tools/testing/selftests/net/openvswitch/ovs-dpctl.py
@@ -8,8 +8,10 @@ import argparse
import errno
import ipaddress
import logging
+import math
import multiprocessing
import re
+import socket
import struct
import sys
import time
@@ -26,13 +28,16 @@ try:
from pyroute2.netlink import genlmsg
from pyroute2.netlink import nla
from pyroute2.netlink import nlmsg_atoms
+ from pyroute2.netlink.event import EventSocket
from pyroute2.netlink.exceptions import NetlinkError
from pyroute2.netlink.generic import GenericNetlinkSocket
+ from pyroute2.netlink.nlsocket import Marshal
import pyroute2
+ import pyroute2.iproute
except ModuleNotFoundError:
print("Need to install the python pyroute2 package >= 0.6.")
- sys.exit(0)
+ sys.exit(1)
OVS_DATAPATH_FAMILY = "ovs_datapath"
@@ -58,6 +63,7 @@ OVS_FLOW_CMD_DEL = 2
OVS_FLOW_CMD_GET = 3
OVS_FLOW_CMD_SET = 4
+UINT32_MAX = 0xFFFFFFFF
def macstr(mac):
outstr = ":".join(["%02X" % i for i in mac])
@@ -198,6 +204,18 @@ def convert_ipv4(data):
return int(ipaddress.IPv4Address(ip)), int(ipaddress.IPv4Address(mask))
+def convert_ipv6(data):
+ ip, _, mask = data.partition('/')
+
+ if not ip:
+ ip = mask = 0
+ elif not mask:
+ mask = 'ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff'
+ elif mask.isdigit():
+ mask = ipaddress.IPv6Network("::/" + mask).hostmask
+
+ return ipaddress.IPv6Address(ip).packed, ipaddress.IPv6Address(mask).packed
+
def convert_int(size):
def convert_int_sized(data):
value, _, mask = data.partition('/')
@@ -267,6 +285,75 @@ def parse_extract_field(
return str_skipped, data
+def parse_attrs(actstr, attr_desc):
+ """Parses the given action string and returns a list of netlink
+ attributes based on a list of attribute descriptions.
+
+ Each element in the attribute description list is a tuple such as:
+ (name, attr_name, parse_func)
+ where:
+ name: is the string representing the attribute
+ attr_name: is the name of the attribute as defined in the uAPI.
+ parse_func: is a callable accepting a string and returning either
+ a single object (the parsed attribute value) or a tuple of
+ two values (the parsed attribute value and the remaining string)
+
+ Returns a list of attributes and the remaining string.
+ """
+ def parse_attr(actstr, key, func):
+ actstr = actstr[len(key) :]
+
+ if not func:
+ return None, actstr
+
+ delim = actstr[0]
+ actstr = actstr[1:]
+
+ if delim == "=":
+ pos = strcspn(actstr, ",)")
+ ret = func(actstr[:pos])
+ else:
+ ret = func(actstr)
+
+ if isinstance(ret, tuple):
+ (datum, actstr) = ret
+ else:
+ datum = ret
+ actstr = actstr[strcspn(actstr, ",)"):]
+
+ if delim == "(":
+ if not actstr or actstr[0] != ")":
+ raise ValueError("Action contains unbalanced parentheses")
+
+ actstr = actstr[1:]
+
+ actstr = actstr[strspn(actstr, ", ") :]
+
+ return datum, actstr
+
+ attrs = []
+ attr_desc = list(attr_desc)
+ while actstr and actstr[0] != ")" and attr_desc:
+ found = False
+ for i, (key, attr, func) in enumerate(attr_desc):
+ if actstr.startswith(key):
+ datum, actstr = parse_attr(actstr, key, func)
+ attrs.append([attr, datum])
+ found = True
+ del attr_desc[i]
+
+ if not found:
+ raise ValueError("Unknown attribute: '%s'" % actstr)
+
+ actstr = actstr[strspn(actstr, ", ") :]
+
+ if actstr[0] != ")":
+ raise ValueError("Action string contains extra garbage or has "
+ "unbalanced parenthesis: '%s'" % actstr)
+
+ return attrs, actstr[1:]
+
+
class ovs_dp_msg(genlmsg):
# include the OVS version
# We need a custom header rather than just being able to rely on
@@ -282,15 +369,15 @@ class ovsactions(nla):
("OVS_ACTION_ATTR_UNSPEC", "none"),
("OVS_ACTION_ATTR_OUTPUT", "uint32"),
("OVS_ACTION_ATTR_USERSPACE", "userspace"),
- ("OVS_ACTION_ATTR_SET", "none"),
+ ("OVS_ACTION_ATTR_SET", "ovskey"),
("OVS_ACTION_ATTR_PUSH_VLAN", "none"),
("OVS_ACTION_ATTR_POP_VLAN", "flag"),
- ("OVS_ACTION_ATTR_SAMPLE", "none"),
+ ("OVS_ACTION_ATTR_SAMPLE", "sample"),
("OVS_ACTION_ATTR_RECIRC", "uint32"),
("OVS_ACTION_ATTR_HASH", "none"),
("OVS_ACTION_ATTR_PUSH_MPLS", "none"),
("OVS_ACTION_ATTR_POP_MPLS", "flag"),
- ("OVS_ACTION_ATTR_SET_MASKED", "none"),
+ ("OVS_ACTION_ATTR_SET_MASKED", "ovskey"),
("OVS_ACTION_ATTR_CT", "ctact"),
("OVS_ACTION_ATTR_TRUNC", "uint32"),
("OVS_ACTION_ATTR_PUSH_ETH", "none"),
@@ -304,8 +391,85 @@ class ovsactions(nla):
("OVS_ACTION_ATTR_ADD_MPLS", "none"),
("OVS_ACTION_ATTR_DEC_TTL", "none"),
("OVS_ACTION_ATTR_DROP", "uint32"),
+ ("OVS_ACTION_ATTR_PSAMPLE", "psample"),
)
+ class psample(nla):
+ nla_flags = NLA_F_NESTED
+
+ nla_map = (
+ ("OVS_PSAMPLE_ATTR_UNSPEC", "none"),
+ ("OVS_PSAMPLE_ATTR_GROUP", "uint32"),
+ ("OVS_PSAMPLE_ATTR_COOKIE", "array(uint8)"),
+ )
+
+ def dpstr(self, more=False):
+ args = "group=%d" % self.get_attr("OVS_PSAMPLE_ATTR_GROUP")
+
+ cookie = self.get_attr("OVS_PSAMPLE_ATTR_COOKIE")
+ if cookie:
+ args += ",cookie(%s)" % \
+ "".join(format(x, "02x") for x in cookie)
+
+ return "psample(%s)" % args
+
+ def parse(self, actstr):
+ desc = (
+ ("group", "OVS_PSAMPLE_ATTR_GROUP", int),
+ ("cookie", "OVS_PSAMPLE_ATTR_COOKIE",
+ lambda x: list(bytearray.fromhex(x)))
+ )
+
+ attrs, actstr = parse_attrs(actstr, desc)
+
+ for attr in attrs:
+ self["attrs"].append(attr)
+
+ return actstr
+
+ class sample(nla):
+ nla_flags = NLA_F_NESTED
+
+ nla_map = (
+ ("OVS_SAMPLE_ATTR_UNSPEC", "none"),
+ ("OVS_SAMPLE_ATTR_PROBABILITY", "uint32"),
+ ("OVS_SAMPLE_ATTR_ACTIONS", "ovsactions"),
+ )
+
+ def dpstr(self, more=False):
+ args = []
+
+ args.append("sample={:.2f}%".format(
+ 100 * self.get_attr("OVS_SAMPLE_ATTR_PROBABILITY") /
+ UINT32_MAX))
+
+ actions = self.get_attr("OVS_SAMPLE_ATTR_ACTIONS")
+ if actions:
+ args.append("actions(%s)" % actions.dpstr(more))
+
+ return "sample(%s)" % ",".join(args)
+
+ def parse(self, actstr):
+ def parse_nested_actions(actstr):
+ subacts = ovsactions()
+ parsed_len = subacts.parse(actstr)
+ return subacts, actstr[parsed_len :]
+
+ def percent_to_rate(percent):
+ percent = float(percent.strip('%'))
+ return int(math.floor(UINT32_MAX * (percent / 100.0) + .5))
+
+ desc = (
+ ("sample", "OVS_SAMPLE_ATTR_PROBABILITY", percent_to_rate),
+ ("actions", "OVS_SAMPLE_ATTR_ACTIONS", parse_nested_actions),
+ )
+ attrs, actstr = parse_attrs(actstr, desc)
+
+ for attr in attrs:
+ self["attrs"].append(attr)
+
+ return actstr
+
class ctact(nla):
nla_flags = NLA_F_NESTED
@@ -427,50 +591,77 @@ class ovsactions(nla):
print_str += "userdata="
for f in self.get_attr("OVS_USERSPACE_ATTR_USERDATA"):
print_str += "%x." % f
- if self.get_attr("OVS_USERSPACE_ATTR_TUN_PORT") is not None:
+ if self.get_attr("OVS_USERSPACE_ATTR_EGRESS_TUN_PORT") is not None:
print_str += "egress_tun_port=%d" % self.get_attr(
- "OVS_USERSPACE_ATTR_TUN_PORT"
+ "OVS_USERSPACE_ATTR_EGRESS_TUN_PORT"
)
print_str += ")"
return print_str
+ def parse(self, actstr):
+ attrs_desc = (
+ ("pid", "OVS_USERSPACE_ATTR_PID", int),
+ ("userdata", "OVS_USERSPACE_ATTR_USERDATA",
+ lambda x: list(bytearray.fromhex(x))),
+ ("egress_tun_port", "OVS_USERSPACE_ATTR_EGRESS_TUN_PORT", int)
+ )
+
+ attrs, actstr = parse_attrs(actstr, attrs_desc)
+ for attr in attrs:
+ self["attrs"].append(attr)
+
+ return actstr
+
def dpstr(self, more=False):
print_str = ""
- for field in self.nla_map:
+ for field in self["attrs"]:
if field[1] == "none" or self.get_attr(field[0]) is None:
continue
if print_str != "":
print_str += ","
- if field[1] == "uint32":
- if field[0] == "OVS_ACTION_ATTR_OUTPUT":
- print_str += "%d" % int(self.get_attr(field[0]))
- elif field[0] == "OVS_ACTION_ATTR_RECIRC":
- print_str += "recirc(0x%x)" % int(self.get_attr(field[0]))
- elif field[0] == "OVS_ACTION_ATTR_TRUNC":
- print_str += "trunc(%d)" % int(self.get_attr(field[0]))
- elif field[0] == "OVS_ACTION_ATTR_DROP":
- print_str += "drop(%d)" % int(self.get_attr(field[0]))
- elif field[1] == "flag":
- if field[0] == "OVS_ACTION_ATTR_CT_CLEAR":
- print_str += "ct_clear"
- elif field[0] == "OVS_ACTION_ATTR_POP_VLAN":
- print_str += "pop_vlan"
- elif field[0] == "OVS_ACTION_ATTR_POP_ETH":
- print_str += "pop_eth"
- elif field[0] == "OVS_ACTION_ATTR_POP_NSH":
- print_str += "pop_nsh"
- elif field[0] == "OVS_ACTION_ATTR_POP_MPLS":
- print_str += "pop_mpls"
+ if field[0] == "OVS_ACTION_ATTR_OUTPUT":
+ print_str += "%d" % int(self.get_attr(field[0]))
+ elif field[0] == "OVS_ACTION_ATTR_RECIRC":
+ print_str += "recirc(0x%x)" % int(self.get_attr(field[0]))
+ elif field[0] == "OVS_ACTION_ATTR_TRUNC":
+ print_str += "trunc(%d)" % int(self.get_attr(field[0]))
+ elif field[0] == "OVS_ACTION_ATTR_DROP":
+ print_str += "drop(%d)" % int(self.get_attr(field[0]))
+ elif field[0] == "OVS_ACTION_ATTR_CT_CLEAR":
+ print_str += "ct_clear"
+ elif field[0] == "OVS_ACTION_ATTR_POP_VLAN":
+ print_str += "pop_vlan"
+ elif field[0] == "OVS_ACTION_ATTR_POP_ETH":
+ print_str += "pop_eth"
+ elif field[0] == "OVS_ACTION_ATTR_POP_NSH":
+ print_str += "pop_nsh"
+ elif field[0] == "OVS_ACTION_ATTR_POP_MPLS":
+ print_str += "pop_mpls"
else:
datum = self.get_attr(field[0])
if field[0] == "OVS_ACTION_ATTR_CLONE":
print_str += "clone("
print_str += datum.dpstr(more)
print_str += ")"
+ elif field[0] == "OVS_ACTION_ATTR_SET" or \
+ field[0] == "OVS_ACTION_ATTR_SET_MASKED":
+ print_str += "set"
+ field = datum
+ mask = None
+ if field[0] == "OVS_ACTION_ATTR_SET_MASKED":
+ print_str += "_masked"
+ field = datum[0]
+ mask = datum[1]
+ print_str += "("
+ print_str += field.dpstr(mask, more)
+ print_str += ")"
else:
- print_str += datum.dpstr(more)
+ try:
+ print_str += datum.dpstr(more)
+ except:
+ print_str += "{ATTR: %s not decoded}" % field[0]
return print_str
@@ -531,7 +722,7 @@ class ovsactions(nla):
for flat_act in parse_flat_map:
if parse_starts_block(actstr, flat_act[0], False):
actstr = actstr[len(flat_act[0]):]
- self["attrs"].append([flat_act[1]])
+ self["attrs"].append([flat_act[1], True])
actstr = actstr[strspn(actstr, ", ") :]
parsed = True
@@ -544,6 +735,25 @@ class ovsactions(nla):
self["attrs"].append(("OVS_ACTION_ATTR_CLONE", subacts))
actstr = actstr[parsedLen:]
parsed = True
+ elif parse_starts_block(actstr, "set(", False):
+ parencount += 1
+ k = ovskey()
+ actstr = actstr[len("set("):]
+ actstr = k.parse(actstr, None)
+ self["attrs"].append(("OVS_ACTION_ATTR_SET", k))
+ if not actstr.startswith(")"):
+ actstr = ")" + actstr
+ parsed = True
+ elif parse_starts_block(actstr, "set_masked(", False):
+ parencount += 1
+ k = ovskey()
+ m = ovskey()
+ actstr = actstr[len("set_masked("):]
+ actstr = k.parse(actstr, m)
+ self["attrs"].append(("OVS_ACTION_ATTR_SET_MASKED", [k, m]))
+ if not actstr.startswith(")"):
+ actstr = ")" + actstr
+ parsed = True
elif parse_starts_block(actstr, "ct(", False):
parencount += 1
actstr = actstr[len("ct(") :]
@@ -637,6 +847,37 @@ class ovsactions(nla):
self["attrs"].append(["OVS_ACTION_ATTR_CT", ctact])
parsed = True
+ elif parse_starts_block(actstr, "sample(", False):
+ sampleact = self.sample()
+ actstr = sampleact.parse(actstr[len("sample(") : ])
+ self["attrs"].append(["OVS_ACTION_ATTR_SAMPLE", sampleact])
+ parsed = True
+
+ elif parse_starts_block(actstr, "psample(", False):
+ psampleact = self.psample()
+ actstr = psampleact.parse(actstr[len("psample(") : ])
+ self["attrs"].append(["OVS_ACTION_ATTR_PSAMPLE", psampleact])
+ parsed = True
+
+ elif parse_starts_block(actstr, "userspace(", False):
+ uact = self.userspace()
+ actstr = uact.parse(actstr[len("userspace(") : ])
+ self["attrs"].append(["OVS_ACTION_ATTR_USERSPACE", uact])
+ parsed = True
+
+ elif parse_starts_block(actstr, "trunc(", False):
+ parencount += 1
+ actstr, val = parse_extract_field(
+ actstr,
+ "trunc(",
+ r"([0-9]+)",
+ int,
+ False,
+ None,
+ )
+ self["attrs"].append(["OVS_ACTION_ATTR_TRUNC", val])
+ parsed = True
+
actstr = actstr[strspn(actstr, ", ") :]
while parencount > 0:
parencount -= 1
@@ -675,7 +916,7 @@ class ovskey(nla):
("OVS_KEY_ATTR_ARP", "ovs_key_arp"),
("OVS_KEY_ATTR_ND", "ovs_key_nd"),
("OVS_KEY_ATTR_SKB_MARK", "uint32"),
- ("OVS_KEY_ATTR_TUNNEL", "none"),
+ ("OVS_KEY_ATTR_TUNNEL", "ovs_key_tunnel"),
("OVS_KEY_ATTR_SCTP", "ovs_key_sctp"),
("OVS_KEY_ATTR_TCP_FLAGS", "be16"),
("OVS_KEY_ATTR_DP_HASH", "uint32"),
@@ -907,21 +1148,21 @@ class ovskey(nla):
"src",
"src",
lambda x: str(ipaddress.IPv6Address(x)),
- lambda x: int.from_bytes(x, "big"),
- lambda x: ipaddress.IPv6Address(x),
+ lambda x: ipaddress.IPv6Address(x).packed if x else 0,
+ convert_ipv6,
),
(
"dst",
"dst",
lambda x: str(ipaddress.IPv6Address(x)),
- lambda x: int.from_bytes(x, "big"),
- lambda x: ipaddress.IPv6Address(x),
+ lambda x: ipaddress.IPv6Address(x).packed if x else 0,
+ convert_ipv6,
),
- ("label", "label", "%d", int),
- ("proto", "proto", "%d", int),
- ("tclass", "tclass", "%d", int),
- ("hlimit", "hlimit", "%d", int),
- ("frag", "frag", "%d", int),
+ ("label", "label", "%d", lambda x: int(x) if x else 0),
+ ("proto", "proto", "%d", lambda x: int(x) if x else 0),
+ ("tclass", "tclass", "%d", lambda x: int(x) if x else 0),
+ ("hlimit", "hlimit", "%d", lambda x: int(x) if x else 0),
+ ("frag", "frag", "%d", lambda x: int(x) if x else 0),
)
def __init__(
@@ -1119,7 +1360,7 @@ class ovskey(nla):
"target",
"target",
lambda x: str(ipaddress.IPv6Address(x)),
- lambda x: int.from_bytes(x, "big"),
+ convert_ipv6,
),
("sll", "sll", macstr, lambda x: int.from_bytes(x, "big")),
("tll", "tll", macstr, lambda x: int.from_bytes(x, "big")),
@@ -1204,13 +1445,13 @@ class ovskey(nla):
"src",
"src",
lambda x: str(ipaddress.IPv6Address(x)),
- lambda x: int.from_bytes(x, "big", convertmac),
+ convert_ipv6,
),
(
"dst",
"dst",
lambda x: str(ipaddress.IPv6Address(x)),
- lambda x: int.from_bytes(x, "big"),
+ convert_ipv6,
),
("tp_src", "tp_src", "%d", int),
("tp_dst", "tp_dst", "%d", int),
@@ -1235,6 +1476,163 @@ class ovskey(nla):
init=init,
)
+ class ovs_key_tunnel(nla):
+ nla_flags = NLA_F_NESTED
+
+ nla_map = (
+ ("OVS_TUNNEL_KEY_ATTR_ID", "be64"),
+ ("OVS_TUNNEL_KEY_ATTR_IPV4_SRC", "ipaddr"),
+ ("OVS_TUNNEL_KEY_ATTR_IPV4_DST", "ipaddr"),
+ ("OVS_TUNNEL_KEY_ATTR_TOS", "uint8"),
+ ("OVS_TUNNEL_KEY_ATTR_TTL", "uint8"),
+ ("OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT", "flag"),
+ ("OVS_TUNNEL_KEY_ATTR_CSUM", "flag"),
+ ("OVS_TUNNEL_KEY_ATTR_OAM", "flag"),
+ ("OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS", "array(uint32)"),
+ ("OVS_TUNNEL_KEY_ATTR_TP_SRC", "be16"),
+ ("OVS_TUNNEL_KEY_ATTR_TP_DST", "be16"),
+ ("OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS", "none"),
+ ("OVS_TUNNEL_KEY_ATTR_IPV6_SRC", "ipaddr"),
+ ("OVS_TUNNEL_KEY_ATTR_IPV6_DST", "ipaddr"),
+ ("OVS_TUNNEL_KEY_ATTR_PAD", "none"),
+ ("OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS", "none"),
+ ("OVS_TUNNEL_KEY_ATTR_IPV4_INFO_BRIDGE", "flag"),
+ )
+
+ def parse(self, flowstr, mask=None):
+ if not flowstr.startswith("tunnel("):
+ return None, None
+
+ k = ovskey.ovs_key_tunnel()
+ if mask is not None:
+ mask = ovskey.ovs_key_tunnel()
+
+ flowstr = flowstr[len("tunnel("):]
+
+ v6_address = None
+
+ fields = [
+ ("tun_id=", r"(\d+)", int, "OVS_TUNNEL_KEY_ATTR_ID",
+ 0xffffffffffffffff, None, None),
+
+ ("src=", r"([0-9a-fA-F\.]+)", str,
+ "OVS_TUNNEL_KEY_ATTR_IPV4_SRC", "255.255.255.255", "0.0.0.0",
+ False),
+ ("dst=", r"([0-9a-fA-F\.]+)", str,
+ "OVS_TUNNEL_KEY_ATTR_IPV4_DST", "255.255.255.255", "0.0.0.0",
+ False),
+
+ ("ipv6_src=", r"([0-9a-fA-F:]+)", str,
+ "OVS_TUNNEL_KEY_ATTR_IPV6_SRC",
+ "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff", "::", True),
+ ("ipv6_dst=", r"([0-9a-fA-F:]+)", str,
+ "OVS_TUNNEL_KEY_ATTR_IPV6_DST",
+ "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff", "::", True),
+
+ ("tos=", r"(\d+)", int, "OVS_TUNNEL_KEY_ATTR_TOS", 255, 0,
+ None),
+ ("ttl=", r"(\d+)", int, "OVS_TUNNEL_KEY_ATTR_TTL", 255, 0,
+ None),
+
+ ("tp_src=", r"(\d+)", int, "OVS_TUNNEL_KEY_ATTR_TP_SRC",
+ 65535, 0, None),
+ ("tp_dst=", r"(\d+)", int, "OVS_TUNNEL_KEY_ATTR_TP_DST",
+ 65535, 0, None),
+ ]
+
+ forced_include = ["OVS_TUNNEL_KEY_ATTR_TTL"]
+
+ for prefix, regex, typ, attr_name, mask_val, default_val, v46_flag in fields:
+ flowstr, value = parse_extract_field(flowstr, prefix, regex, typ, False)
+ if not attr_name:
+ raise Exception("Bad list value in tunnel fields")
+
+ if value is None and attr_name in forced_include:
+ value = default_val
+ mask_val = default_val
+
+ if value is not None:
+ if v46_flag is not None:
+ if v6_address is None:
+ v6_address = v46_flag
+ if v46_flag != v6_address:
+ raise ValueError("Cannot mix v6 and v4 addresses")
+ k["attrs"].append([attr_name, value])
+ if mask is not None:
+ mask["attrs"].append([attr_name, mask_val])
+ else:
+ if v46_flag is not None:
+ if v6_address is None or v46_flag != v6_address:
+ continue
+ if mask is not None:
+ mask["attrs"].append([attr_name, default_val])
+
+ if k["attrs"][0][0] != "OVS_TUNNEL_KEY_ATTR_ID":
+ raise ValueError("Needs a tunid set")
+
+ if flowstr.startswith("flags("):
+ flowstr = flowstr[len("flags("):]
+ flagspos = flowstr.find(")")
+ flags = flowstr[:flagspos]
+ flowstr = flowstr[flagspos + 1:]
+
+ flag_attrs = {
+ "df": "OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT",
+ "csum": "OVS_TUNNEL_KEY_ATTR_CSUM",
+ "oam": "OVS_TUNNEL_KEY_ATTR_OAM"
+ }
+
+ for flag in flags.split("|"):
+ if flag in flag_attrs:
+ k["attrs"].append([flag_attrs[flag], True])
+ if mask is not None:
+ mask["attrs"].append([flag_attrs[flag], True])
+
+ flowstr = flowstr[strspn(flowstr, ", ") :]
+ return flowstr, k, mask
+
+ def dpstr(self, mask=None, more=False):
+ print_str = "tunnel("
+
+ flagsattrs = []
+ for k in self["attrs"]:
+ noprint = False
+ if k[0] == "OVS_TUNNEL_KEY_ATTR_ID":
+ print_str += "tun_id=%d" % k[1]
+ elif k[0] == "OVS_TUNNEL_KEY_ATTR_IPV4_SRC":
+ print_str += "src=%s" % k[1]
+ elif k[0] == "OVS_TUNNEL_KEY_ATTR_IPV4_DST":
+ print_str += "dst=%s" % k[1]
+ elif k[0] == "OVS_TUNNEL_KEY_ATTR_IPV6_SRC":
+ print_str += "ipv6_src=%s" % k[1]
+ elif k[0] == "OVS_TUNNEL_KEY_ATTR_IPV6_DST":
+ print_str += "ipv6_dst=%s" % k[1]
+ elif k[0] == "OVS_TUNNEL_KEY_ATTR_TOS":
+ print_str += "tos=%d" % k[1]
+ elif k[0] == "OVS_TUNNEL_KEY_ATTR_TTL":
+ print_str += "ttl=%d" % k[1]
+ elif k[0] == "OVS_TUNNEL_KEY_ATTR_TP_SRC":
+ print_str += "tp_src=%d" % k[1]
+ elif k[0] == "OVS_TUNNEL_KEY_ATTR_TP_DST":
+ print_str += "tp_dst=%d" % k[1]
+ elif k[0] == "OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT":
+ noprint = True
+ flagsattrs.append("df")
+ elif k[0] == "OVS_TUNNEL_KEY_ATTR_CSUM":
+ noprint = True
+ flagsattrs.append("csum")
+ elif k[0] == "OVS_TUNNEL_KEY_ATTR_OAM":
+ noprint = True
+ flagsattrs.append("oam")
+
+ if not noprint:
+ print_str += ","
+
+ if len(flagsattrs):
+ print_str += "flags(" + "|".join(flagsattrs) + ")"
+ print_str += ")"
+ return print_str
+
class ovs_key_mpls(nla):
fields = (("lse", ">I"),)
@@ -1243,6 +1641,7 @@ class ovskey(nla):
("OVS_KEY_ATTR_PRIORITY", "skb_priority", intparse),
("OVS_KEY_ATTR_SKB_MARK", "skb_mark", intparse),
("OVS_KEY_ATTR_RECIRC_ID", "recirc_id", intparse),
+ ("OVS_KEY_ATTR_TUNNEL", "tunnel", ovskey.ovs_key_tunnel),
("OVS_KEY_ATTR_DP_HASH", "dp_hash", intparse),
("OVS_KEY_ATTR_CT_STATE", "ct_state", parse_ct_state),
("OVS_KEY_ATTR_CT_ZONE", "ct_zone", intparse),
@@ -1309,7 +1708,7 @@ class ovskey(nla):
mask["attrs"].append([field[0], m])
self["attrs"].append([field[0], k])
- flowstr = flowstr[strspn(flowstr, "),") :]
+ flowstr = flowstr[strspn(flowstr, "), ") :]
return flowstr
@@ -1346,6 +1745,13 @@ class ovskey(nla):
True,
),
(
+ "OVS_KEY_ATTR_TUNNEL",
+ "tunnel",
+ None,
+ False,
+ False,
+ ),
+ (
"OVS_KEY_ATTR_CT_STATE",
"ct_state",
"0x%04x",
@@ -1617,7 +2023,7 @@ class OvsVport(GenericNetlinkSocket):
("OVS_VPORT_ATTR_PORT_NO", "uint32"),
("OVS_VPORT_ATTR_TYPE", "uint32"),
("OVS_VPORT_ATTR_NAME", "asciiz"),
- ("OVS_VPORT_ATTR_OPTIONS", "none"),
+ ("OVS_VPORT_ATTR_OPTIONS", "vportopts"),
("OVS_VPORT_ATTR_UPCALL_PID", "array(uint32)"),
("OVS_VPORT_ATTR_STATS", "vportstats"),
("OVS_VPORT_ATTR_PAD", "none"),
@@ -1625,6 +2031,13 @@ class OvsVport(GenericNetlinkSocket):
("OVS_VPORT_ATTR_NETNSID", "uint32"),
)
+ class vportopts(nla):
+ nla_map = (
+ ("OVS_TUNNEL_ATTR_UNSPEC", "none"),
+ ("OVS_TUNNEL_ATTR_DST_PORT", "uint16"),
+ ("OVS_TUNNEL_ATTR_EXTENSION", "none"),
+ )
+
class vportstats(nla):
fields = (
("rx_packets", "=Q"),
@@ -1693,7 +2106,7 @@ class OvsVport(GenericNetlinkSocket):
raise ne
return reply
- def attach(self, dpindex, vport_ifname, ptype):
+ def attach(self, dpindex, vport_ifname, ptype, dport, lwt):
msg = OvsVport.ovs_vport_msg()
msg["cmd"] = OVS_VPORT_CMD_NEW
@@ -1702,12 +2115,43 @@ class OvsVport(GenericNetlinkSocket):
msg["dpifindex"] = dpindex
port_type = OvsVport.str_to_type(ptype)
- msg["attrs"].append(["OVS_VPORT_ATTR_TYPE", port_type])
msg["attrs"].append(["OVS_VPORT_ATTR_NAME", vport_ifname])
msg["attrs"].append(
["OVS_VPORT_ATTR_UPCALL_PID", [self.upcall_packet.epid]]
)
+ TUNNEL_DEFAULTS = [("geneve", 6081),
+ ("vxlan", 4789)]
+
+ for tnl in TUNNEL_DEFAULTS:
+ if ptype == tnl[0]:
+ if not dport:
+ dport = tnl[1]
+
+ if not lwt:
+ vportopt = OvsVport.ovs_vport_msg.vportopts()
+ vportopt["attrs"].append(
+ ["OVS_TUNNEL_ATTR_DST_PORT", socket.htons(dport)]
+ )
+ msg["attrs"].append(
+ ["OVS_VPORT_ATTR_OPTIONS", vportopt]
+ )
+ else:
+ port_type = OvsVport.OVS_VPORT_TYPE_NETDEV
+ ipr = pyroute2.iproute.IPRoute()
+
+ if tnl[0] == "geneve":
+ ipr.link("add", ifname=vport_ifname, kind=tnl[0],
+ geneve_port=dport,
+ geneve_collect_metadata=True,
+ geneve_udp_zero_csum6_rx=1)
+ elif tnl[0] == "vxlan":
+ ipr.link("add", ifname=vport_ifname, kind=tnl[0],
+ vxlan_learning=0, vxlan_collect_metadata=1,
+ vxlan_udp_zero_csum6_rx=1, vxlan_port=dport)
+ break
+ msg["attrs"].append(["OVS_VPORT_ATTR_TYPE", port_type])
+
try:
reply = self.nlm_request(
msg, msg_type=self.prid, msg_flags=NLM_F_REQUEST | NLM_F_ACK
@@ -2018,10 +2462,71 @@ class OvsFlow(GenericNetlinkSocket):
print("MISS upcall[%d/%s]: %s" % (seq, pktpres, keystr), flush=True)
def execute(self, packetmsg):
- print("userspace execute command")
+ print("userspace execute command", flush=True)
def action(self, packetmsg):
- print("userspace action command")
+ print("userspace action command", flush=True)
+
+
+class psample_sample(genlmsg):
+ nla_map = (
+ ("PSAMPLE_ATTR_IIFINDEX", "none"),
+ ("PSAMPLE_ATTR_OIFINDEX", "none"),
+ ("PSAMPLE_ATTR_ORIGSIZE", "none"),
+ ("PSAMPLE_ATTR_SAMPLE_GROUP", "uint32"),
+ ("PSAMPLE_ATTR_GROUP_SEQ", "none"),
+ ("PSAMPLE_ATTR_SAMPLE_RATE", "uint32"),
+ ("PSAMPLE_ATTR_DATA", "array(uint8)"),
+ ("PSAMPLE_ATTR_GROUP_REFCOUNT", "none"),
+ ("PSAMPLE_ATTR_TUNNEL", "none"),
+ ("PSAMPLE_ATTR_PAD", "none"),
+ ("PSAMPLE_ATTR_OUT_TC", "none"),
+ ("PSAMPLE_ATTR_OUT_TC_OCC", "none"),
+ ("PSAMPLE_ATTR_LATENCY", "none"),
+ ("PSAMPLE_ATTR_TIMESTAMP", "none"),
+ ("PSAMPLE_ATTR_PROTO", "none"),
+ ("PSAMPLE_ATTR_USER_COOKIE", "array(uint8)"),
+ )
+
+ def dpstr(self):
+ fields = []
+ data = ""
+ for (attr, value) in self["attrs"]:
+ if attr == "PSAMPLE_ATTR_SAMPLE_GROUP":
+ fields.append("group:%d" % value)
+ if attr == "PSAMPLE_ATTR_SAMPLE_RATE":
+ fields.append("rate:%d" % value)
+ if attr == "PSAMPLE_ATTR_USER_COOKIE":
+ value = "".join(format(x, "02x") for x in value)
+ fields.append("cookie:%s" % value)
+ if attr == "PSAMPLE_ATTR_DATA" and len(value) > 0:
+ data = "data:%s" % "".join(format(x, "02x") for x in value)
+
+ return ("%s %s" % (",".join(fields), data)).strip()
+
+
+class psample_msg(Marshal):
+ PSAMPLE_CMD_SAMPLE = 0
+ PSAMPLE_CMD_GET_GROUP = 1
+ PSAMPLE_CMD_NEW_GROUP = 2
+ PSAMPLE_CMD_DEL_GROUP = 3
+ PSAMPLE_CMD_SET_FILTER = 4
+ msg_map = {PSAMPLE_CMD_SAMPLE: psample_sample}
+
+
+class PsampleEvent(EventSocket):
+ genl_family = "psample"
+ mcast_groups = ["packets"]
+ marshal_class = psample_msg
+
+ def read_samples(self):
+ print("listening for psample events", flush=True)
+ while True:
+ try:
+ for msg in self.get():
+ print(msg.dpstr(), flush=True)
+ except NetlinkError as ne:
+ raise ne
def print_ovsdp_full(dp_lookup_rep, ifindex, ndb=NDB(), vpl=OvsVport()):
@@ -2053,12 +2558,19 @@ def print_ovsdp_full(dp_lookup_rep, ifindex, ndb=NDB(), vpl=OvsVport()):
for iface in ndb.interfaces:
rep = vpl.info(iface.ifname, ifindex)
if rep is not None:
+ opts = ""
+ vpo = rep.get_attr("OVS_VPORT_ATTR_OPTIONS")
+ if vpo:
+ dpo = vpo.get_attr("OVS_TUNNEL_ATTR_DST_PORT")
+ if dpo:
+ opts += " tnl-dport:%s" % socket.ntohs(dpo)
print(
- " port %d: %s (%s)"
+ " port %d: %s (%s%s)"
% (
rep.get_attr("OVS_VPORT_ATTR_PORT_NO"),
rep.get_attr("OVS_VPORT_ATTR_NAME"),
OvsVport.type_to_str(rep.get_attr("OVS_VPORT_ATTR_TYPE")),
+ opts,
)
)
@@ -2081,7 +2593,7 @@ def main(argv):
help="Increment 'verbose' output counter.",
default=0,
)
- subparsers = parser.add_subparsers()
+ subparsers = parser.add_subparsers(dest="subcommand")
showdpcmd = subparsers.add_parser("show")
showdpcmd.add_argument(
@@ -2120,12 +2632,30 @@ def main(argv):
"--ptype",
type=str,
default="netdev",
- choices=["netdev", "internal"],
+ choices=["netdev", "internal", "geneve", "vxlan"],
help="Interface type (default netdev)",
)
+ addifcmd.add_argument(
+ "-p",
+ "--dport",
+ type=int,
+ default=0,
+ help="Destination port (0 for default)"
+ )
+ addifcmd.add_argument(
+ "-l",
+ "--lwt",
+ type=bool,
+ default=True,
+ help="Use LWT infrastructure instead of vport (default true)."
+ )
delifcmd = subparsers.add_parser("del-if")
delifcmd.add_argument("dpname", help="Datapath Name")
delifcmd.add_argument("delif", help="Interface name for adding")
+ delifcmd.add_argument("-d",
+ "--dellink",
+ type=bool, default=False,
+ help="Delete the link as well.")
dumpflcmd = subparsers.add_parser("dump-flows")
dumpflcmd.add_argument("dumpdp", help="Datapath Name")
@@ -2138,6 +2668,8 @@ def main(argv):
delfscmd = subparsers.add_parser("del-flows")
delfscmd.add_argument("flsbr", help="Datapath name")
+ subparsers.add_parser("psample-events")
+
args = parser.parse_args()
if args.verbose > 0:
@@ -2152,6 +2684,9 @@ def main(argv):
sys.setrecursionlimit(100000)
+ if args.subcommand == "psample-events":
+ PsampleEvent().read_samples()
+
if hasattr(args, "showdp"):
found = False
for iface in ndb.interfaces:
@@ -2186,7 +2721,8 @@ def main(argv):
print("DP '%s' not found." % args.dpname)
return 1
dpindex = rep["dpifindex"]
- rep = ovsvp.attach(rep["dpifindex"], args.addif, args.ptype)
+ rep = ovsvp.attach(rep["dpifindex"], args.addif, args.ptype,
+ args.dport, args.lwt)
msg = "vport '%s'" % args.addif
if rep and rep["header"]["error"] is None:
msg += " added."
@@ -2207,6 +2743,9 @@ def main(argv):
msg += " removed."
else:
msg += " failed to remove."
+ if args.dellink:
+ ipr = pyroute2.iproute.IPRoute()
+ ipr.link("del", index=ipr.link_lookup(ifname=args.delif)[0])
elif hasattr(args, "dumpdp"):
rep = ovsdp.info(args.dumpdp, 0)
if rep is None:
diff --git a/tools/testing/selftests/net/openvswitch/settings b/tools/testing/selftests/net/openvswitch/settings
new file mode 100644
index 000000000000..e2206265f67c
--- /dev/null
+++ b/tools/testing/selftests/net/openvswitch/settings
@@ -0,0 +1 @@
+timeout=900
diff --git a/tools/testing/selftests/net/pmtu.sh b/tools/testing/selftests/net/pmtu.sh
index cfc84958025a..5175c0c83a23 100755
--- a/tools/testing/selftests/net/pmtu.sh
+++ b/tools/testing/selftests/net/pmtu.sh
@@ -842,25 +842,97 @@ setup_bridge() {
run_cmd ${ns_a} ip link set veth_A-C master br0
}
+setup_ovs_via_internal_utility() {
+ type="${1}"
+ a_addr="${2}"
+ b_addr="${3}"
+ dport="${4}"
+
+ run_cmd python3 ./openvswitch/ovs-dpctl.py add-if ovs_br0 ${type}_a -t ${type} || return 1
+
+ ports=$(python3 ./openvswitch/ovs-dpctl.py show)
+ br0_port=$(echo "$ports" | grep -E "\sovs_br0" | sed -e 's@port @@' | cut -d: -f1 | xargs)
+ type_a_port=$(echo "$ports" | grep ${type}_a | sed -e 's@port @@' | cut -d: -f1 | xargs)
+ veth_a_port=$(echo "$ports" | grep veth_A | sed -e 's@port @@' | cut -d: -f1 | xargs)
+
+ v4_a_tun="${prefix4}.${a_r1}.1"
+ v4_b_tun="${prefix4}.${b_r1}.1"
+
+ v6_a_tun="${prefix6}:${a_r1}::1"
+ v6_b_tun="${prefix6}:${b_r1}::1"
+
+ if [ "${v4_a_tun}" = "${a_addr}" ]; then
+ run_cmd python3 ./openvswitch/ovs-dpctl.py add-flow ovs_br0 \
+ "recirc_id(0),in_port(${veth_a_port}),eth(),eth_type(0x0800),ipv4()" \
+ "set(tunnel(tun_id=1,dst=${v4_b_tun},ttl=64,tp_dst=${dport},flags(df|csum))),${type_a_port}"
+ run_cmd python3 ./openvswitch/ovs-dpctl.py add-flow ovs_br0 \
+ "recirc_id(0),in_port(${veth_a_port}),eth(),eth_type(0x86dd),ipv6()" \
+ "set(tunnel(tun_id=1,dst=${v4_b_tun},ttl=64,tp_dst=${dport},flags(df|csum))),${type_a_port}"
+ run_cmd python3 ./openvswitch/ovs-dpctl.py add-flow ovs_br0 \
+ "recirc_id(0),tunnel(tun_id=1,src=${v4_b_tun},dst=${v4_a_tun}),in_port(${type_a_port}),eth(),eth_type(0x0800),ipv4()" \
+ "${veth_a_port}"
+ run_cmd python3 ./openvswitch/ovs-dpctl.py add-flow ovs_br0 \
+ "recirc_id(0),tunnel(tun_id=1,src=${v4_b_tun},dst=${v4_a_tun}),in_port(${type_a_port}),eth(),eth_type(0x86dd),ipv6()" \
+ "${veth_a_port}"
+ run_cmd python3 ./openvswitch/ovs-dpctl.py add-flow ovs_br0 \
+ "recirc_id(0),tunnel(tun_id=1,src=${v4_b_tun},dst=${v4_a_tun}),in_port(${type_a_port}),eth(),eth_type(0x0806),arp()" \
+ "${veth_a_port}"
+ run_cmd python3 ./openvswitch/ovs-dpctl.py add-flow ovs_br0 \
+ "recirc_id(0),in_port(${veth_a_port}),eth(),eth_type(0x0806),arp(sip=${veth4_c_addr},tip=${tunnel4_b_addr})" \
+ "set(tunnel(tun_id=1,dst=${v4_b_tun},ttl=64,tp_dst=${dport},flags(df|csum))),${type_a_port}"
+ else
+ run_cmd python3 ./openvswitch/ovs-dpctl.py add-flow ovs_br0 \
+ "recirc_id(0),in_port(${veth_a_port}),eth(),eth_type(0x0800),ipv4()" \
+ "set(tunnel(tun_id=1,ipv6_dst=${v6_b_tun},ttl=64,tp_dst=${dport},flags(df|csum))),${type_a_port}"
+ run_cmd python3 ./openvswitch/ovs-dpctl.py add-flow ovs_br0 \
+ "recirc_id(0),in_port(${veth_a_port}),eth(),eth_type(0x86dd),ipv6()" \
+ "set(tunnel(tun_id=1,ipv6_dst=${v6_b_tun},ttl=64,tp_dst=${dport},flags(df|csum))),${type_a_port}"
+ run_cmd python3 ./openvswitch/ovs-dpctl.py add-flow ovs_br0 \
+ "recirc_id(0),tunnel(tun_id=1,ipv6_src=${v6_b_tun},ipv6_dst=${v6_a_tun}),in_port(${type_a_port}),eth(),eth_type(0x0800),ipv4()" \
+ "${veth_a_port}"
+ run_cmd python3 ./openvswitch/ovs-dpctl.py add-flow ovs_br0 \
+ "recirc_id(0),tunnel(tun_id=1,ipv6_src=${v6_b_tun},ipv6_dst=${v6_a_tun}),in_port(${type_a_port}),eth(),eth_type(0x86dd),ipv6()" \
+ "${veth_a_port}"
+ run_cmd python3 ./openvswitch/ovs-dpctl.py add-flow ovs_br0 \
+ "recirc_id(0),tunnel(tun_id=1,ipv6_src=${v6_b_tun},ipv6_dst=${v6_a_tun}),in_port(${type_a_port}),eth(),eth_type(0x0806),arp()" \
+ "${veth_a_port}"
+ run_cmd python3 ./openvswitch/ovs-dpctl.py add-flow ovs_br0 \
+ "recirc_id(0),in_port(${veth_a_port}),eth(),eth_type(0x0806),arp(sip=${veth4_c_addr},tip=${tunnel4_b_addr})" \
+ "set(tunnel(tun_id=1,ipv6_dst=${v6_b_tun},ttl=64,tp_dst=${dport},flags(df|csum))),${type_a_port}"
+ fi
+}
+
+setup_ovs_via_vswitchd() {
+ type="${1}"
+ b_addr="${2}"
+
+ run_cmd ovs-vsctl add-port ovs_br0 ${type}_a -- \
+ set interface ${type}_a type=${type} \
+ options:remote_ip=${b_addr} options:key=1 options:csum=true || return 1
+}
+
setup_ovs_vxlan_or_geneve() {
type="${1}"
a_addr="${2}"
b_addr="${3}"
+ dport="6081"
if [ "${type}" = "vxlan" ]; then
+ dport="4789"
opts="${opts} ttl 64 dstport 4789"
opts_b="local ${b_addr}"
fi
- run_cmd ovs-vsctl add-port ovs_br0 ${type}_a -- \
- set interface ${type}_a type=${type} \
- options:remote_ip=${b_addr} options:key=1 options:csum=true || return 1
+ setup_ovs_via_internal_utility "${type}" "${a_addr}" "${b_addr}" \
+ "${dport}" || \
+ setup_ovs_via_vswitchd "${type}" "${b_addr}" || return 1
run_cmd ${ns_b} ip link add ${type}_b type ${type} id 1 ${opts_b} remote ${a_addr} ${opts} || return 1
run_cmd ${ns_b} ip addr add ${tunnel4_b_addr}/${tunnel4_mask} dev ${type}_b
run_cmd ${ns_b} ip addr add ${tunnel6_b_addr}/${tunnel6_mask} dev ${type}_b
+ run_cmd ip link set ${type}_a up
run_cmd ${ns_b} ip link set ${type}_b up
}
@@ -880,8 +952,24 @@ setup_ovs_vxlan6() {
setup_ovs_vxlan_or_geneve vxlan ${prefix6}:${a_r1}::1 ${prefix6}:${b_r1}::1
}
+setup_ovs_br_internal() {
+ run_cmd python3 ./openvswitch/ovs-dpctl.py add-dp ovs_br0 || \
+ return 1
+}
+
+setup_ovs_br_vswitchd() {
+ run_cmd ovs-vsctl add-br ovs_br0 || return 1
+}
+
+setup_ovs_add_if() {
+ ifname="${1}"
+ run_cmd python3 ./openvswitch/ovs-dpctl.py add-if ovs_br0 \
+ "${ifname}" || \
+ run_cmd ovs-vsctl add-port ovs_br0 "${ifname}"
+}
+
setup_ovs_bridge() {
- run_cmd ovs-vsctl add-br ovs_br0 || return $ksft_skip
+ setup_ovs_br_internal || setup_ovs_br_vswitchd || return $ksft_skip
run_cmd ip link set ovs_br0 up
run_cmd ${ns_c} ip link add veth_C-A type veth peer name veth_A-C
@@ -891,7 +979,7 @@ setup_ovs_bridge() {
run_cmd ${ns_c} ip link set veth_C-A up
run_cmd ${ns_c} ip addr add ${veth4_c_addr}/${veth4_mask} dev veth_C-A
run_cmd ${ns_c} ip addr add ${veth6_c_addr}/${veth6_mask} dev veth_C-A
- run_cmd ovs-vsctl add-port ovs_br0 veth_A-C
+ setup_ovs_add_if veth_A-C
# Move veth_A-R1 to init
run_cmd ${ns_a} ip link set veth_A-R1 netns 1
@@ -922,6 +1010,18 @@ trace() {
sleep 1
}
+cleanup_del_ovs_internal() {
+ # squelch the output of the del-if commands since it can be wordy
+ python3 ./openvswitch/ovs-dpctl.py del-if ovs_br0 -d true vxlan_a >/dev/null 2>&1
+ python3 ./openvswitch/ovs-dpctl.py del-if ovs_br0 -d true geneve_a >/dev/null 2>&1
+ python3 ./openvswitch/ovs-dpctl.py del-dp ovs_br0 >/dev/null 2>&1
+}
+
+cleanup_del_ovs_vswitchd() {
+ ovs-vsctl --if-exists del-port vxlan_a 2>/dev/null
+ ovs-vsctl --if-exists del-br ovs_br0 2>/dev/null
+}
+
cleanup() {
for pid in ${tcpdump_pids}; do
kill ${pid}
@@ -940,10 +1040,10 @@ cleanup() {
cleanup_all_ns
- ip link del veth_A-C 2>/dev/null
- ip link del veth_A-R1 2>/dev/null
- ovs-vsctl --if-exists del-port vxlan_a 2>/dev/null
- ovs-vsctl --if-exists del-br ovs_br0 2>/dev/null
+ ip link del veth_A-C 2>/dev/null
+ ip link del veth_A-R1 2>/dev/null
+ cleanup_del_ovs_internal
+ cleanup_del_ovs_vswitchd
rm -f "$tmpoutfile"
}
@@ -1397,6 +1497,12 @@ test_pmtu_ipvX_over_ovs_vxlanY_or_geneveY_exception() {
outer_family=${3}
ll_mtu=4000
+ if [ "${type}" = "vxlan" ]; then
+ tun_a="vxlan_sys_4789"
+ elif [ "${type}" = "geneve" ]; then
+ tun_a="genev_sys_6081"
+ fi
+
if [ ${outer_family} -eq 4 ]; then
setup namespaces routing ovs_bridge ovs_${type}4 || return $ksft_skip
# IPv4 header UDP header VXLAN/GENEVE header Ethernet header
@@ -1407,17 +1513,11 @@ test_pmtu_ipvX_over_ovs_vxlanY_or_geneveY_exception() {
exp_mtu=$((${ll_mtu} - 40 - 8 - 8 - 14))
fi
- if [ "${type}" = "vxlan" ]; then
- tun_a="vxlan_sys_4789"
- elif [ "${type}" = "geneve" ]; then
- tun_a="genev_sys_6081"
- fi
-
- trace "" "${tun_a}" "${ns_b}" ${type}_b \
- "" veth_A-R1 "${ns_r1}" veth_R1-A \
- "${ns_b}" veth_B-R1 "${ns_r1}" veth_R1-B \
- "" ovs_br0 "" veth-A-C \
- "${ns_c}" veth_C-A
+ trace "" ${type}_a "${ns_b}" ${type}_b \
+ "" veth_A-R1 "${ns_r1}" veth_R1-A \
+ "${ns_b}" veth_B-R1 "${ns_r1}" veth_R1-B \
+ "" ovs_br0 "" veth-A_C \
+ "${ns_c}" veth_C-A "" "${tun_a}"
if [ ${family} -eq 4 ]; then
ping=ping
@@ -1436,8 +1536,9 @@ test_pmtu_ipvX_over_ovs_vxlanY_or_geneveY_exception() {
mtu "${ns_b}" veth_B-R1 ${ll_mtu}
mtu "${ns_r1}" veth_R1-B ${ll_mtu}
- mtu "" ${tun_a} $((${ll_mtu} + 1000))
- mtu "${ns_b}" ${type}_b $((${ll_mtu} + 1000))
+ mtu "" ${tun_a} $((${ll_mtu} + 1000)) 2>/dev/null || \
+ mtu "" ${type}_a $((${ll_mtu} + 1000)) 2>/dev/null
+ mtu "${ns_b}" ${type}_b $((${ll_mtu} + 1000))
run_cmd ${ns_c} ${ping} -q -M want -i 0.1 -c 20 -s $((${ll_mtu} + 500)) ${dst} || return 1
diff --git a/tools/testing/selftests/net/srv6_end_dx4_netfilter_test.sh b/tools/testing/selftests/net/srv6_end_dx4_netfilter_test.sh
new file mode 100755
index 000000000000..e23210aa547f
--- /dev/null
+++ b/tools/testing/selftests/net/srv6_end_dx4_netfilter_test.sh
@@ -0,0 +1,335 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# author: Jianguo Wu <wujianguo@chinatelecom.cn>
+#
+# Mostly copied from tools/testing/selftests/net/srv6_end_dt4_l3vpn_test.sh.
+#
+# This script is designed for testing the support of netfilter hooks for
+# SRv6 End.DX4 behavior.
+#
+# Hereafter a network diagram is shown, where one tenants (named 100) offer
+# IPv4 L3 VPN services allowing hosts to communicate with each other across
+# an IPv6 network.
+#
+# Routers rt-1 and rt-2 implement IPv4 L3 VPN services leveraging the SRv6
+# architecture. The key components for such VPNs are: a) SRv6 Encap behavior,
+# b) SRv6 End.DX4 behavior.
+#
+# To explain how an IPv4 L3 VPN based on SRv6 works, let us briefly consider an
+# example where, within the same domain of tenant 100, the host hs-1 pings
+# the host hs-2.
+#
+# First of all, L2 reachability of the host hs-2 is taken into account by
+# the router rt-1 which acts as an arp proxy.
+#
+# When the host hs-1 sends an IPv4 packet destined to hs-2, the router rt-1
+# receives the packet on the internal veth-t100 interface, rt-1 contains the
+# SRv6 Encap route for encapsulating the IPv4 packet in a IPv6 plus the Segment
+# Routing Header (SRH) packet. This packet is sent through the (IPv6) core
+# network up to the router rt-2 that receives it on veth0 interface.
+#
+# The rt-2 router uses the 'localsid' routing table to process incoming
+# IPv6+SRH packets which belong to the VPN of the tenant 100. For each of these
+# packets, the SRv6 End.DX4 behavior removes the outer IPv6+SRH headers and
+# routs the packet to the specified nexthop. Afterwards, the packet is sent to
+# the host hs-2 through the veth-t100 interface.
+#
+# The ping response follows the same processing but this time the role of rt-1
+# and rt-2 are swapped.
+#
+# And when net.netfilter.nf_hooks_lwtunnel is set to 1 in rt-1 or rt-2, and a
+# rpfilter iptables rule is added, SRv6 packets will go through netfilter PREROUTING
+# hooks.
+#
+#
+# +-------------------+ +-------------------+
+# | | | |
+# | hs-1 netns | | hs-2 netns |
+# | | | |
+# | +-------------+ | | +-------------+ |
+# | | veth0 | | | | veth0 | |
+# | | 10.0.0.1/24 | | | | 10.0.0.2/24 | |
+# | +-------------+ | | +-------------+ |
+# | . | | . |
+# +-------------------+ +-------------------+
+# . .
+# . .
+# . .
+# +-----------------------------------+ +-----------------------------------+
+# | . | | . |
+# | +---------------+ | | +---------------- |
+# | | veth-t100 | | | | veth-t100 | |
+# | | 10.0.0.11/24 | +----------+ | | +----------+ | 10.0.0.22/24 | |
+# | +-------+-------+ | route | | | | route | +-------+-------- |
+# | | table | | | | table | |
+# | +----------+ | | +----------+ |
+# | +--------------+ | | +--------------+ |
+# | | veth0 | | | | veth0 | |
+# | | 2001:11::1/64 |.|...|.| 2001:11::2/64 | |
+# | +--------------+ | | +--------------+ |
+# | | | |
+# | rt-1 netns | | rt-2 netns |
+# | | | |
+# +-----------------------------------+ +-----------------------------------+
+#
+# ~~~~~~~~~~~~~~~~~~~~~~~~~
+# | Network configuration |
+# ~~~~~~~~~~~~~~~~~~~~~~~~~
+#
+# rt-1: localsid table
+# +----------------------------------------------------------------+
+# |SID |Action |
+# +----------------------------------------------------------------+
+# |fc00:21:100::6004|apply SRv6 End.DX4 nh4 10.0.0.1 dev veth-t100 |
+# +----------------------------------------------------------------+
+#
+# rt-1: route table
+# +---------------------------------------------------+
+# |host |Action |
+# +---------------------------------------------------+
+# |10.0.0.2 |apply seg6 encap segs fc00:12:100::6004|
+# +---------------------------------------------------+
+# |10.0.0.0/24|forward to dev veth_t100 |
+# +---------------------------------------------------+
+#
+#
+# rt-2: localsid table
+# +---------------------------------------------------------------+
+# |SID |Action |
+# +---------------------------------------------------------------+
+# |fc00:12:100::6004|apply SRv6 End.DX4 nh4 10.0.0.2 dev veth-t100|
+# +---------------------------------------------------------------+
+#
+# rt-2: route table
+# +---------------------------------------------------+
+# |host |Action |
+# +---------------------------------------------------+
+# |10.0.0.1 |apply seg6 encap segs fc00:21:100::6004|
+# +---------------------------------------------------+
+# |10.0.0.0/24|forward to dev veth_t100 |
+# +---------------------------------------------------+
+#
+
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
+readonly IPv6_RT_NETWORK=2001:11
+readonly IPv4_HS_NETWORK=10.0.0
+readonly SID_LOCATOR=fc00
+
+PING_TIMEOUT_SEC=4
+
+ret=0
+
+PAUSE_ON_FAIL=${PAUSE_ON_FAIL:=no}
+
+log_test()
+{
+ local rc=$1
+ local expected=$2
+ local msg="$3"
+
+ if [ ${rc} -eq ${expected} ]; then
+ nsuccess=$((nsuccess+1))
+ printf "\n TEST: %-60s [ OK ]\n" "${msg}"
+ else
+ ret=1
+ nfail=$((nfail+1))
+ printf "\n TEST: %-60s [FAIL]\n" "${msg}"
+ if [ "${PAUSE_ON_FAIL}" = "yes" ]; then
+ echo
+ echo "hit enter to continue, 'q' to quit"
+ read a
+ [ "$a" = "q" ] && exit 1
+ fi
+ fi
+}
+
+print_log_test_results()
+{
+ if [ "$TESTS" != "none" ]; then
+ printf "\nTests passed: %3d\n" ${nsuccess}
+ printf "Tests failed: %3d\n" ${nfail}
+ fi
+}
+
+log_section()
+{
+ echo
+ echo "################################################################################"
+ echo "TEST SECTION: $*"
+ echo "################################################################################"
+}
+
+cleanup()
+{
+ ip link del veth-rt-1 2>/dev/null || true
+ ip link del veth-rt-2 2>/dev/null || true
+
+ # destroy routers rt-* and hosts hs-*
+ for ns in $(ip netns show | grep -E 'rt-*|hs-*'); do
+ ip netns del ${ns} || true
+ done
+}
+
+# Setup the basic networking for the routers
+setup_rt_networking()
+{
+ local rt=$1
+ local nsname=rt-${rt}
+
+ ip netns add ${nsname}
+
+ ip netns exec ${nsname} sysctl -wq net.ipv6.conf.all.accept_dad=0
+ ip netns exec ${nsname} sysctl -wq net.ipv6.conf.default.accept_dad=0
+
+ ip link set veth-rt-${rt} netns ${nsname}
+ ip -netns ${nsname} link set veth-rt-${rt} name veth0
+
+ ip -netns ${nsname} addr add ${IPv6_RT_NETWORK}::${rt}/64 dev veth0 nodad
+ ip -netns ${nsname} link set veth0 up
+ ip -netns ${nsname} link set lo up
+
+ ip netns exec ${nsname} sysctl -wq net.ipv4.ip_forward=1
+ ip netns exec ${nsname} sysctl -wq net.ipv6.conf.all.forwarding=1
+}
+
+setup_rt_netfilter()
+{
+ local rt=$1
+ local nsname=rt-${rt}
+
+ ip netns exec ${nsname} sysctl -wq net.netfilter.nf_hooks_lwtunnel=1
+ ip netns exec ${nsname} iptables -t raw -A PREROUTING -m rpfilter --invert -j DROP
+}
+
+setup_hs()
+{
+ local hs=$1
+ local rt=$2
+ local tid=$3
+ local hsname=hs-${hs}
+ local rtname=rt-${rt}
+ local rtveth=veth-t${tid}
+
+ # set the networking for the host
+ ip netns add ${hsname}
+
+ ip -netns ${hsname} link add veth0 type veth peer name ${rtveth}
+ ip -netns ${hsname} link set ${rtveth} netns ${rtname}
+ ip -netns ${hsname} addr add ${IPv4_HS_NETWORK}.${hs}/24 dev veth0
+ ip -netns ${hsname} link set veth0 up
+ ip -netns ${hsname} link set lo up
+
+ ip -netns ${rtname} addr add ${IPv4_HS_NETWORK}.${rt}${hs}/24 dev ${rtveth}
+ ip -netns ${rtname} link set ${rtveth} up
+
+ ip netns exec ${rtname} sysctl -wq net.ipv4.conf.${rtveth}.proxy_arp=1
+}
+
+setup_vpn_config()
+{
+ local hssrc=$1
+ local rtsrc=$2
+ local hsdst=$3
+ local rtdst=$4
+ local tid=$5
+
+ local hssrc_name=hs-t${tid}-${hssrc}
+ local hsdst_name=hs-t${tid}-${hsdst}
+ local rtsrc_name=rt-${rtsrc}
+ local rtdst_name=rt-${rtdst}
+ local vpn_sid=${SID_LOCATOR}:${hssrc}${hsdst}:${tid}::6004
+
+ # set the encap route for encapsulating packets which arrive from the
+ # host hssrc and destined to the access router rtsrc.
+ ip -netns ${rtsrc_name} -4 route add ${IPv4_HS_NETWORK}.${hsdst}/32 \
+ encap seg6 mode encap segs ${vpn_sid} dev veth0
+ ip -netns ${rtsrc_name} -6 route add ${vpn_sid}/128 \
+ via 2001:11::${rtdst} dev veth0
+
+ # set the decap route for decapsulating packets which arrive from
+ # the rtdst router and destined to the hsdst host.
+ ip -netns ${rtdst_name} -6 route add ${vpn_sid}/128 \
+ encap seg6local action End.DX4 nh4 ${IPv4_HS_NETWORK}.${hsdst} dev veth-t${tid}
+}
+
+setup()
+{
+ ip link add veth-rt-1 type veth peer name veth-rt-2
+ # setup the networking for router rt-1 and router rt-2
+ setup_rt_networking 1
+ setup_rt_networking 2
+
+ # setup two hosts for the tenant 100.
+ # - host hs-1 is directly connected to the router rt-1;
+ # - host hs-2 is directly connected to the router rt-2.
+ setup_hs 1 1 100
+ setup_hs 2 2 100
+
+ # setup the IPv4 L3 VPN which connects the host hs-1 and host hs-2.
+ setup_vpn_config 1 1 2 2 100 #args: src_host src_router dst_host dst_router tenant
+ setup_vpn_config 2 2 1 1 100
+}
+
+check_hs_connectivity()
+{
+ local hssrc=$1
+ local hsdst=$2
+ local tid=$3
+
+ ip netns exec hs-${hssrc} ping -c 1 -W ${PING_TIMEOUT_SEC} \
+ ${IPv4_HS_NETWORK}.${hsdst} >/dev/null 2>&1
+}
+
+check_and_log_hs_connectivity()
+{
+ local hssrc=$1
+ local hsdst=$2
+ local tid=$3
+
+ check_hs_connectivity ${hssrc} ${hsdst} ${tid}
+ log_test $? 0 "Hosts connectivity: hs-${hssrc} -> hs-${hsdst} (tenant ${tid})"
+}
+
+host_tests()
+{
+ log_section "SRv6 VPN connectivity test among hosts in the same tenant"
+
+ check_and_log_hs_connectivity 1 2 100
+ check_and_log_hs_connectivity 2 1 100
+}
+
+router_netfilter_tests()
+{
+ log_section "SRv6 VPN connectivity test with netfilter enabled in routers"
+ setup_rt_netfilter 1
+ setup_rt_netfilter 2
+
+ check_and_log_hs_connectivity 1 2 100
+ check_and_log_hs_connectivity 2 1 100
+}
+
+if [ "$(id -u)" -ne 0 ];then
+ echo "SKIP: Need root privileges"
+ exit $ksft_skip
+fi
+
+if [ ! -x "$(command -v ip)" ]; then
+ echo "SKIP: Could not run test without ip tool"
+ exit $ksft_skip
+fi
+
+cleanup &>/dev/null
+
+setup
+
+host_tests
+router_netfilter_tests
+
+print_log_test_results
+
+cleanup &>/dev/null
+
+exit ${ret}
diff --git a/tools/testing/selftests/net/srv6_end_dx6_netfilter_test.sh b/tools/testing/selftests/net/srv6_end_dx6_netfilter_test.sh
new file mode 100755
index 000000000000..9e69a2ed5bc3
--- /dev/null
+++ b/tools/testing/selftests/net/srv6_end_dx6_netfilter_test.sh
@@ -0,0 +1,340 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# author: Jianguo Wu <wujianguo@chinatelecom.cn>
+#
+# Mostly copied from tools/testing/selftests/net/srv6_end_dt6_l3vpn_test.sh.
+#
+# This script is designed for testing the support of netfilter hooks for
+# SRv6 End.DX4 behavior.
+#
+# Hereafter a network diagram is shown, where one tenants (named 100) offer
+# IPv6 L3 VPN services allowing hosts to communicate with each other across
+# an IPv6 network.
+#
+# Routers rt-1 and rt-2 implement IPv6 L3 VPN services leveraging the SRv6
+# architecture. The key components for such VPNs are: a) SRv6 Encap behavior,
+# b) SRv6 End.DX4 behavior.
+#
+# To explain how an IPv6 L3 VPN based on SRv6 works, let us briefly consider an
+# example where, within the same domain of tenant 100, the host hs-1 pings
+# the host hs-2.
+#
+# First of all, L2 reachability of the host hs-2 is taken into account by
+# the router rt-1 which acts as an arp proxy.
+#
+# When the host hs-1 sends an IPv6 packet destined to hs-2, the router rt-1
+# receives the packet on the internal veth-t100 interface, rt-1 contains the
+# SRv6 Encap route for encapsulating the IPv6 packet in a IPv6 plus the Segment
+# Routing Header (SRH) packet. This packet is sent through the (IPv6) core
+# network up to the router rt-2 that receives it on veth0 interface.
+#
+# The rt-2 router uses the 'localsid' routing table to process incoming
+# IPv6+SRH packets which belong to the VPN of the tenant 100. For each of these
+# packets, the SRv6 End.DX4 behavior removes the outer IPv6+SRH headers and
+# routs the packet to the specified nexthop. Afterwards, the packet is sent to
+# the host hs-2 through the veth-t100 interface.
+#
+# The ping response follows the same processing but this time the role of rt-1
+# and rt-2 are swapped.
+#
+# And when net.netfilter.nf_hooks_lwtunnel is set to 1 in rt-1 or rt-2, and a
+# rpfilter iptables rule is added, SRv6 packets will go through netfilter PREROUTING
+# hooks.
+#
+#
+# +-------------------+ +-------------------+
+# | | | |
+# | hs-1 netns | | hs-2 netns |
+# | | | |
+# | +-------------+ | | +-------------+ |
+# | | veth0 | | | | veth0 | |
+# | | cafe::1/64 | | | | cafe::2/64 | |
+# | +-------------+ | | +-------------+ |
+# | . | | . |
+# +-------------------+ +-------------------+
+# . .
+# . .
+# . .
+# +-----------------------------------+ +-----------------------------------+
+# | . | | . |
+# | +---------------+ | | +---------------- |
+# | | veth-t100 | | | | veth-t100 | |
+# | | cafe::11/64 | +----------+ | | +----------+ | cafe::22/64 | |
+# | +-------+-------+ | route | | | | route | +-------+-------- |
+# | | table | | | | table | |
+# | +----------+ | | +----------+ |
+# | +--------------+ | | +--------------+ |
+# | | veth0 | | | | veth0 | |
+# | | 2001:11::1/64 |.|...|.| 2001:11::2/64 | |
+# | +--------------+ | | +--------------+ |
+# | | | |
+# | rt-1 netns | | rt-2 netns |
+# | | | |
+# +-----------------------------------+ +-----------------------------------+
+#
+# ~~~~~~~~~~~~~~~~~~~~~~~~~
+# | Network configuration |
+# ~~~~~~~~~~~~~~~~~~~~~~~~~
+#
+# rt-1: localsid table
+# +----------------------------------------------------------------+
+# |SID |Action |
+# +----------------------------------------------------------------+
+# |fc00:21:100::6004|apply SRv6 End.DX6 nh6 cafe::1 dev veth-t100 |
+# +----------------------------------------------------------------+
+#
+# rt-1: route table
+# +---------------------------------------------------+
+# |host |Action |
+# +---------------------------------------------------+
+# |cafe::2 |apply seg6 encap segs fc00:12:100::6004|
+# +---------------------------------------------------+
+# |cafe::/64 |forward to dev veth_t100 |
+# +---------------------------------------------------+
+#
+#
+# rt-2: localsid table
+# +---------------------------------------------------------------+
+# |SID |Action |
+# +---------------------------------------------------------------+
+# |fc00:12:100::6004|apply SRv6 End.DX6 nh6 cafe::2 dev veth-t100 |
+# +---------------------------------------------------------------+
+#
+# rt-2: route table
+# +---------------------------------------------------+
+# |host |Action |
+# +---------------------------------------------------+
+# |cafe::1 |apply seg6 encap segs fc00:21:100::6004|
+# +---------------------------------------------------+
+# |cafe::/64 |forward to dev veth_t100 |
+# +---------------------------------------------------+
+#
+
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
+readonly IPv6_RT_NETWORK=2001:11
+readonly IPv6_HS_NETWORK=cafe
+readonly SID_LOCATOR=fc00
+
+PING_TIMEOUT_SEC=4
+
+ret=0
+
+PAUSE_ON_FAIL=${PAUSE_ON_FAIL:=no}
+
+log_test()
+{
+ local rc=$1
+ local expected=$2
+ local msg="$3"
+
+ if [ ${rc} -eq ${expected} ]; then
+ nsuccess=$((nsuccess+1))
+ printf "\n TEST: %-60s [ OK ]\n" "${msg}"
+ else
+ ret=1
+ nfail=$((nfail+1))
+ printf "\n TEST: %-60s [FAIL]\n" "${msg}"
+ if [ "${PAUSE_ON_FAIL}" = "yes" ]; then
+ echo
+ echo "hit enter to continue, 'q' to quit"
+ read a
+ [ "$a" = "q" ] && exit 1
+ fi
+ fi
+}
+
+print_log_test_results()
+{
+ if [ "$TESTS" != "none" ]; then
+ printf "\nTests passed: %3d\n" ${nsuccess}
+ printf "Tests failed: %3d\n" ${nfail}
+ fi
+}
+
+log_section()
+{
+ echo
+ echo "################################################################################"
+ echo "TEST SECTION: $*"
+ echo "################################################################################"
+}
+
+cleanup()
+{
+ ip link del veth-rt-1 2>/dev/null || true
+ ip link del veth-rt-2 2>/dev/null || true
+
+ # destroy routers rt-* and hosts hs-*
+ for ns in $(ip netns show | grep -E 'rt-*|hs-*'); do
+ ip netns del ${ns} || true
+ done
+}
+
+# Setup the basic networking for the routers
+setup_rt_networking()
+{
+ local rt=$1
+ local nsname=rt-${rt}
+
+ ip netns add ${nsname}
+
+ ip netns exec ${nsname} sysctl -wq net.ipv6.conf.all.accept_dad=0
+ ip netns exec ${nsname} sysctl -wq net.ipv6.conf.default.accept_dad=0
+
+ ip link set veth-rt-${rt} netns ${nsname}
+ ip -netns ${nsname} link set veth-rt-${rt} name veth0
+
+ ip -netns ${nsname} addr add ${IPv6_RT_NETWORK}::${rt}/64 dev veth0 nodad
+ ip -netns ${nsname} link set veth0 up
+ ip -netns ${nsname} link set lo up
+
+ ip netns exec ${nsname} sysctl -wq net.ipv6.conf.all.forwarding=1
+}
+
+setup_rt_netfilter()
+{
+ local rt=$1
+ local nsname=rt-${rt}
+
+ ip netns exec ${nsname} sysctl -wq net.netfilter.nf_hooks_lwtunnel=1
+ ip netns exec ${nsname} ip6tables -t raw -A PREROUTING -m rpfilter --invert -j DROP
+}
+
+setup_hs()
+{
+ local hs=$1
+ local rt=$2
+ local tid=$3
+ local hsname=hs-${hs}
+ local rtname=rt-${rt}
+ local rtveth=veth-t${tid}
+
+ # set the networking for the host
+ ip netns add ${hsname}
+
+ ip -netns ${hsname} link add veth0 type veth peer name ${rtveth}
+ ip -netns ${hsname} link set ${rtveth} netns ${rtname}
+ ip -netns ${hsname} addr add ${IPv6_HS_NETWORK}::${hs}/64 dev veth0 nodad
+ ip -netns ${hsname} link set veth0 up
+ ip -netns ${hsname} link set lo up
+
+ ip -netns ${rtname} addr add ${IPv6_HS_NETWORK}::${rt}${hs}/64 dev ${rtveth}
+ ip -netns ${rtname} link set ${rtveth} up
+
+ ip netns exec ${rtname} sysctl -wq net.ipv6.conf.all.accept_dad=0
+ ip netns exec ${rtname} sysctl -wq net.ipv6.conf.default.accept_dad=0
+
+ ip netns exec ${rtname} sysctl -wq net.ipv6.conf.${rtveth}.proxy_ndp=1
+}
+
+setup_vpn_config()
+{
+ local hssrc=$1
+ local rtsrc=$2
+ local hsdst=$3
+ local rtdst=$4
+ local tid=$5
+
+ local hssrc_name=hs-t${tid}-${hssrc}
+ local hsdst_name=hs-t${tid}-${hsdst}
+ local rtsrc_name=rt-${rtsrc}
+ local rtdst_name=rt-${rtdst}
+ local rtveth=veth-t${tid}
+ local vpn_sid=${SID_LOCATOR}:${hssrc}${hsdst}:${tid}::6004
+
+ ip -netns ${rtsrc_name} -6 neigh add proxy ${IPv6_HS_NETWORK}::${hsdst} dev ${rtveth}
+
+ # set the encap route for encapsulating packets which arrive from the
+ # host hssrc and destined to the access router rtsrc.
+ ip -netns ${rtsrc_name} -6 route add ${IPv6_HS_NETWORK}::${hsdst}/128 \
+ encap seg6 mode encap segs ${vpn_sid} dev veth0
+ ip -netns ${rtsrc_name} -6 route add ${vpn_sid}/128 \
+ via 2001:11::${rtdst} dev veth0
+
+ # set the decap route for decapsulating packets which arrive from
+ # the rtdst router and destined to the hsdst host.
+ ip -netns ${rtdst_name} -6 route add ${vpn_sid}/128 \
+ encap seg6local action End.DX6 nh6 ${IPv6_HS_NETWORK}::${hsdst} dev veth-t${tid}
+}
+
+setup()
+{
+ ip link add veth-rt-1 type veth peer name veth-rt-2
+ # setup the networking for router rt-1 and router rt-2
+ setup_rt_networking 1
+ setup_rt_networking 2
+
+ # setup two hosts for the tenant 100.
+ # - host hs-1 is directly connected to the router rt-1;
+ # - host hs-2 is directly connected to the router rt-2.
+ setup_hs 1 1 100
+ setup_hs 2 2 100
+
+ # setup the IPv4 L3 VPN which connects the host hs-1 and host hs-2.
+ setup_vpn_config 1 1 2 2 100 #args: src_host src_router dst_host dst_router tenant
+ setup_vpn_config 2 2 1 1 100
+}
+
+check_hs_connectivity()
+{
+ local hssrc=$1
+ local hsdst=$2
+ local tid=$3
+
+ ip netns exec hs-${hssrc} ping -6 -c 1 -W ${PING_TIMEOUT_SEC} \
+ ${IPv6_HS_NETWORK}::${hsdst} >/dev/null 2>&1
+}
+
+check_and_log_hs_connectivity()
+{
+ local hssrc=$1
+ local hsdst=$2
+ local tid=$3
+
+ check_hs_connectivity ${hssrc} ${hsdst} ${tid}
+ log_test $? 0 "Hosts connectivity: hs-${hssrc} -> hs-${hsdst} (tenant ${tid})"
+}
+
+host_tests()
+{
+ log_section "SRv6 VPN connectivity test among hosts in the same tenant"
+
+ check_and_log_hs_connectivity 1 2 100
+ check_and_log_hs_connectivity 2 1 100
+}
+
+router_netfilter_tests()
+{
+ log_section "SRv6 VPN connectivity test with netfilter enabled in routers"
+ setup_rt_netfilter 1
+ setup_rt_netfilter 2
+
+ check_and_log_hs_connectivity 1 2 100
+ check_and_log_hs_connectivity 2 1 100
+}
+
+if [ "$(id -u)" -ne 0 ];then
+ echo "SKIP: Need root privileges"
+ exit $ksft_skip
+fi
+
+if [ ! -x "$(command -v ip)" ]; then
+ echo "SKIP: Could not run test without ip tool"
+ exit $ksft_skip
+fi
+
+cleanup &>/dev/null
+
+setup
+
+host_tests
+router_netfilter_tests
+
+print_log_test_results
+
+cleanup &>/dev/null
+
+exit ${ret}
diff --git a/tools/testing/selftests/net/tcp_ao/self-connect.c b/tools/testing/selftests/net/tcp_ao/self-connect.c
index e154d9e198a9..a5698b0a3718 100644
--- a/tools/testing/selftests/net/tcp_ao/self-connect.c
+++ b/tools/testing/selftests/net/tcp_ao/self-connect.c
@@ -30,8 +30,6 @@ static void setup_lo_intf(const char *lo_intf)
static void tcp_self_connect(const char *tst, unsigned int port,
bool different_keyids, bool check_restore)
{
- uint64_t before_challenge_ack, after_challenge_ack;
- uint64_t before_syn_challenge, after_syn_challenge;
struct tcp_ao_counters before_ao, after_ao;
uint64_t before_aogood, after_aogood;
struct netstat *ns_before, *ns_after;
@@ -62,8 +60,6 @@ static void tcp_self_connect(const char *tst, unsigned int port,
ns_before = netstat_read();
before_aogood = netstat_get(ns_before, "TCPAOGood", NULL);
- before_challenge_ack = netstat_get(ns_before, "TCPChallengeACK", NULL);
- before_syn_challenge = netstat_get(ns_before, "TCPSYNChallenge", NULL);
if (test_get_tcp_ao_counters(sk, &before_ao))
test_error("test_get_tcp_ao_counters()");
@@ -82,8 +78,6 @@ static void tcp_self_connect(const char *tst, unsigned int port,
ns_after = netstat_read();
after_aogood = netstat_get(ns_after, "TCPAOGood", NULL);
- after_challenge_ack = netstat_get(ns_after, "TCPChallengeACK", NULL);
- after_syn_challenge = netstat_get(ns_after, "TCPSYNChallenge", NULL);
if (test_get_tcp_ao_counters(sk, &after_ao))
test_error("test_get_tcp_ao_counters()");
if (!check_restore) {
@@ -98,18 +92,6 @@ static void tcp_self_connect(const char *tst, unsigned int port,
close(sk);
return;
}
- if (after_challenge_ack <= before_challenge_ack ||
- after_syn_challenge <= before_syn_challenge) {
- /*
- * It's also meant to test simultaneous open, so check
- * these counters as well.
- */
- test_fail("%s: Didn't challenge SYN or ACK: %zu <= %zu OR %zu <= %zu",
- tst, after_challenge_ack, before_challenge_ack,
- after_syn_challenge, before_syn_challenge);
- close(sk);
- return;
- }
if (test_tcp_ao_counters_cmp(tst, &before_ao, &after_ao, TEST_CNT_GOOD)) {
close(sk);
diff --git a/tools/testing/selftests/net/udpgso.c b/tools/testing/selftests/net/udpgso.c
index 85b3baa3f7f3..3e74cfa1a2bf 100644
--- a/tools/testing/selftests/net/udpgso.c
+++ b/tools/testing/selftests/net/udpgso.c
@@ -53,6 +53,7 @@ static bool cfg_do_ipv6;
static bool cfg_do_connected;
static bool cfg_do_connectionless;
static bool cfg_do_msgmore;
+static bool cfg_do_recv = true;
static bool cfg_do_setsockopt;
static int cfg_specific_test_id = -1;
@@ -414,6 +415,9 @@ static void run_one(struct testcase *test, int fdt, int fdr,
if (!sent)
return;
+ if (!cfg_do_recv)
+ return;
+
if (test->gso_len)
mss = test->gso_len;
else
@@ -464,8 +468,10 @@ static void run_test(struct sockaddr *addr, socklen_t alen)
if (fdr == -1)
error(1, errno, "socket r");
- if (bind(fdr, addr, alen))
- error(1, errno, "bind");
+ if (cfg_do_recv) {
+ if (bind(fdr, addr, alen))
+ error(1, errno, "bind");
+ }
/* Have tests fail quickly instead of hang */
if (setsockopt(fdr, SOL_SOCKET, SO_RCVTIMEO, &tv, sizeof(tv)))
@@ -524,7 +530,7 @@ static void parse_opts(int argc, char **argv)
{
int c;
- while ((c = getopt(argc, argv, "46cCmst:")) != -1) {
+ while ((c = getopt(argc, argv, "46cCmRst:")) != -1) {
switch (c) {
case '4':
cfg_do_ipv4 = true;
@@ -541,6 +547,9 @@ static void parse_opts(int argc, char **argv)
case 'm':
cfg_do_msgmore = true;
break;
+ case 'R':
+ cfg_do_recv = false;
+ break;
case 's':
cfg_do_setsockopt = true;
break;
diff --git a/tools/testing/selftests/net/udpgso.sh b/tools/testing/selftests/net/udpgso.sh
index 6c63178086b0..85d1fa3c1ff7 100755
--- a/tools/testing/selftests/net/udpgso.sh
+++ b/tools/testing/selftests/net/udpgso.sh
@@ -27,6 +27,31 @@ test_route_mtu() {
ip route add local fd00::1/128 table local dev lo mtu 1500
}
+setup_dummy_sink() {
+ ip link add name sink mtu 1500 type dummy
+ ip addr add dev sink 10.0.0.0/24
+ ip addr add dev sink fd00::2/64 nodad
+ ip link set dev sink up
+}
+
+test_hw_gso_hw_csum() {
+ setup_dummy_sink
+ ethtool -K sink tx-checksum-ip-generic on >/dev/null
+ ethtool -K sink tx-udp-segmentation on >/dev/null
+}
+
+test_sw_gso_hw_csum() {
+ setup_dummy_sink
+ ethtool -K sink tx-checksum-ip-generic on >/dev/null
+ ethtool -K sink tx-udp-segmentation off >/dev/null
+}
+
+test_sw_gso_sw_csum() {
+ setup_dummy_sink
+ ethtool -K sink tx-checksum-ip-generic off >/dev/null
+ ethtool -K sink tx-udp-segmentation off >/dev/null
+}
+
if [ "$#" -gt 0 ]; then
"$1"
shift 2 # pop "test_*" arg and "--" delimiter
@@ -56,3 +81,21 @@ echo "ipv4 msg_more"
echo "ipv6 msg_more"
./in_netns.sh "$0" test_dev_mtu -- ./udpgso -6 -C -m
+
+echo "ipv4 hw-gso hw-csum"
+./in_netns.sh "$0" test_hw_gso_hw_csum -- ./udpgso -4 -C -R
+
+echo "ipv6 hw-gso hw-csum"
+./in_netns.sh "$0" test_hw_gso_hw_csum -- ./udpgso -6 -C -R
+
+echo "ipv4 sw-gso hw-csum"
+./in_netns.sh "$0" test_sw_gso_hw_csum -- ./udpgso -4 -C -R
+
+echo "ipv6 sw-gso hw-csum"
+./in_netns.sh "$0" test_sw_gso_hw_csum -- ./udpgso -6 -C -R
+
+echo "ipv4 sw-gso sw-csum"
+./in_netns.sh "$0" test_sw_gso_sw_csum -- ./udpgso -4 -C -R
+
+echo "ipv6 sw-gso sw-csum"
+./in_netns.sh "$0" test_sw_gso_sw_csum -- ./udpgso -6 -C -R
diff --git a/tools/testing/selftests/net/vrf_route_leaking.sh b/tools/testing/selftests/net/vrf_route_leaking.sh
index 2da32f4c479b..152171fb1fc8 100755
--- a/tools/testing/selftests/net/vrf_route_leaking.sh
+++ b/tools/testing/selftests/net/vrf_route_leaking.sh
@@ -59,6 +59,7 @@
# while it is forwarded between different vrfs.
source lib.sh
+PATH=$PWD:$PWD/tools/testing/selftests/net:$PATH
VERBOSE=0
PAUSE_ON_FAIL=no
DEFAULT_TTYPE=sym
@@ -533,6 +534,86 @@ ipv6_ping_frag_asym()
ipv6_ping_frag asym
}
+ipv4_ping_local()
+{
+ log_section "IPv4 (sym route): VRF ICMP local error route lookup ping"
+
+ setup_sym
+
+ check_connectivity || return
+
+ run_cmd ip netns exec $r1 ip vrf exec blue ping -c1 -w1 ${H2_N2_IP}
+ log_test $? 0 "VRF ICMP local IPv4"
+}
+
+ipv4_tcp_local()
+{
+ log_section "IPv4 (sym route): VRF tcp local connection"
+
+ setup_sym
+
+ check_connectivity || return
+
+ run_cmd nettest -s -O "$h2" -l ${H2_N2_IP} -I eth0 -3 eth0 &
+ sleep 1
+ run_cmd nettest -N "$r1" -d blue -r ${H2_N2_IP}
+ log_test $? 0 "VRF tcp local connection IPv4"
+}
+
+ipv4_udp_local()
+{
+ log_section "IPv4 (sym route): VRF udp local connection"
+
+ setup_sym
+
+ check_connectivity || return
+
+ run_cmd nettest -s -D -O "$h2" -l ${H2_N2_IP} -I eth0 -3 eth0 &
+ sleep 1
+ run_cmd nettest -D -N "$r1" -d blue -r ${H2_N2_IP}
+ log_test $? 0 "VRF udp local connection IPv4"
+}
+
+ipv6_ping_local()
+{
+ log_section "IPv6 (sym route): VRF ICMP local error route lookup ping"
+
+ setup_sym
+
+ check_connectivity6 || return
+
+ run_cmd ip netns exec $r1 ip vrf exec blue ${ping6} -c1 -w1 ${H2_N2_IP6}
+ log_test $? 0 "VRF ICMP local IPv6"
+}
+
+ipv6_tcp_local()
+{
+ log_section "IPv6 (sym route): VRF tcp local connection"
+
+ setup_sym
+
+ check_connectivity6 || return
+
+ run_cmd nettest -s -6 -O "$h2" -l ${H2_N2_IP6} -I eth0 -3 eth0 &
+ sleep 1
+ run_cmd nettest -6 -N "$r1" -d blue -r ${H2_N2_IP6}
+ log_test $? 0 "VRF tcp local connection IPv6"
+}
+
+ipv6_udp_local()
+{
+ log_section "IPv6 (sym route): VRF udp local connection"
+
+ setup_sym
+
+ check_connectivity6 || return
+
+ run_cmd nettest -s -6 -D -O "$h2" -l ${H2_N2_IP6} -I eth0 -3 eth0 &
+ sleep 1
+ run_cmd nettest -6 -D -N "$r1" -d blue -r ${H2_N2_IP6}
+ log_test $? 0 "VRF udp local connection IPv6"
+}
+
################################################################################
# usage
@@ -555,8 +636,10 @@ EOF
# Some systems don't have a ping6 binary anymore
command -v ping6 > /dev/null 2>&1 && ping6=$(command -v ping6) || ping6=$(command -v ping)
-TESTS_IPV4="ipv4_ping_ttl ipv4_traceroute ipv4_ping_frag ipv4_ping_ttl_asym ipv4_traceroute_asym"
-TESTS_IPV6="ipv6_ping_ttl ipv6_traceroute ipv6_ping_ttl_asym ipv6_traceroute_asym"
+TESTS_IPV4="ipv4_ping_ttl ipv4_traceroute ipv4_ping_frag ipv4_ping_local ipv4_tcp_local
+ipv4_udp_local ipv4_ping_ttl_asym ipv4_traceroute_asym"
+TESTS_IPV6="ipv6_ping_ttl ipv6_traceroute ipv6_ping_local ipv6_tcp_local ipv6_udp_local
+ipv6_ping_ttl_asym ipv6_traceroute_asym"
ret=0
nsuccess=0
@@ -594,12 +677,18 @@ do
ipv4_traceroute|traceroute) ipv4_traceroute;;&
ipv4_traceroute_asym|traceroute) ipv4_traceroute_asym;;&
ipv4_ping_frag|ping) ipv4_ping_frag;;&
+ ipv4_ping_local|ping) ipv4_ping_local;;&
+ ipv4_tcp_local) ipv4_tcp_local;;&
+ ipv4_udp_local) ipv4_udp_local;;&
ipv6_ping_ttl|ping) ipv6_ping_ttl;;&
ipv6_ping_ttl_asym|ping) ipv6_ping_ttl_asym;;&
ipv6_traceroute|traceroute) ipv6_traceroute;;&
ipv6_traceroute_asym|traceroute) ipv6_traceroute_asym;;&
ipv6_ping_frag|ping) ipv6_ping_frag;;&
+ ipv6_ping_local|ping) ipv6_ping_local;;&
+ ipv6_tcp_local) ipv6_tcp_local;;&
+ ipv6_udp_local) ipv6_udp_local;;&
# setup namespaces and config, but do not run any tests
setup_sym|setup) setup_sym; exit 0;;
diff --git a/tools/testing/selftests/net/ynl.mk b/tools/testing/selftests/net/ynl.mk
new file mode 100644
index 000000000000..59cb26cf3f73
--- /dev/null
+++ b/tools/testing/selftests/net/ynl.mk
@@ -0,0 +1,21 @@
+# SPDX-License-Identifier: GPL-2.0
+
+# YNL selftest build snippet
+
+# Inputs:
+#
+# YNL_GENS: families we need in the selftests
+# YNL_PROGS: TEST_PROGS which need YNL (TODO, none exist, yet)
+# YNL_GEN_FILES: TEST_GEN_FILES which need YNL
+
+YNL_OUTPUTS := $(patsubst %,$(OUTPUT)/%,$(YNL_GEN_FILES))
+
+$(YNL_OUTPUTS): $(OUTPUT)/libynl.a
+$(YNL_OUTPUTS): CFLAGS += \
+ -I$(top_srcdir)/usr/include/ $(KHDR_INCLUDES) \
+ -I$(top_srcdir)/tools/net/ynl/lib/ \
+ -I$(top_srcdir)/tools/net/ynl/generated/
+
+$(OUTPUT)/libynl.a:
+ $(Q)$(MAKE) -C $(top_srcdir)/tools/net/ynl GENS="$(YNL_GENS)" libynl.a
+ $(Q)cp $(top_srcdir)/tools/net/ynl/libynl.a $(OUTPUT)/libynl.a
diff --git a/tools/testing/selftests/nolibc/Makefile b/tools/testing/selftests/nolibc/Makefile
index 40dd95228051..3fbabab46958 100644
--- a/tools/testing/selftests/nolibc/Makefile
+++ b/tools/testing/selftests/nolibc/Makefile
@@ -152,7 +152,7 @@ CFLAGS_mips32be = -EB -mabi=32
CFLAGS_STACKPROTECTOR ?= $(call cc-option,-mstack-protector-guard=global $(call cc-option,-fstack-protector-all))
CFLAGS ?= -Os -fno-ident -fno-asynchronous-unwind-tables -std=c89 -W -Wall -Wextra \
$(call cc-option,-fno-stack-protector) \
- $(CFLAGS_$(XARCH)) $(CFLAGS_STACKPROTECTOR)
+ $(CFLAGS_$(XARCH)) $(CFLAGS_STACKPROTECTOR) $(CFLAGS_EXTRA)
LDFLAGS :=
REPORT ?= awk '/\[OK\][\r]*$$/{p++} /\[FAIL\][\r]*$$/{if (!f) printf("\n"); f++; print;} /\[SKIPPED\][\r]*$$/{s++} \
diff --git a/tools/testing/selftests/nolibc/nolibc-test.c b/tools/testing/selftests/nolibc/nolibc-test.c
index 94bb6e11c16f..093d0512f4c5 100644
--- a/tools/testing/selftests/nolibc/nolibc-test.c
+++ b/tools/testing/selftests/nolibc/nolibc-test.c
@@ -64,6 +64,14 @@ static const char *argv0;
/* will be used by constructor tests */
static int constructor_test_value;
+static const int is_nolibc =
+#ifdef NOLIBC
+ 1
+#else
+ 0
+#endif
+;
+
/* definition of a series of tests */
struct test {
const char *name; /* test name */
@@ -607,7 +615,7 @@ int expect_strne(const char *expr, int llen, const char *cmp)
static __attribute__((unused))
int expect_str_buf_eq(size_t expr, const char *buf, size_t val, int llen, const char *cmp)
{
- llen += printf(" = %lu <%s> ", expr, buf);
+ llen += printf(" = %lu <%s> ", (unsigned long)expr, buf);
if (strcmp(buf, cmp) != 0) {
result(llen, FAIL);
return 1;
@@ -621,6 +629,51 @@ int expect_str_buf_eq(size_t expr, const char *buf, size_t val, int llen, const
return 0;
}
+#define EXPECT_STRTOX(cond, func, input, base, expected, chars, expected_errno) \
+ do { if (!(cond)) result(llen, SKIPPED); else ret += expect_strtox(llen, func, input, base, expected, chars, expected_errno); } while (0)
+
+static __attribute__((unused))
+int expect_strtox(int llen, void *func, const char *input, int base, intmax_t expected, int expected_chars, int expected_errno)
+{
+ char *endptr;
+ int actual_errno, actual_chars;
+ intmax_t r;
+
+ errno = 0;
+ if (func == strtol) {
+ r = strtol(input, &endptr, base);
+ } else if (func == strtoul) {
+ r = strtoul(input, &endptr, base);
+ } else {
+ result(llen, FAIL);
+ return 1;
+ }
+ actual_errno = errno;
+ actual_chars = endptr - input;
+
+ llen += printf(" %lld = %lld", (long long)expected, (long long)r);
+ if (r != expected) {
+ result(llen, FAIL);
+ return 1;
+ }
+ if (expected_chars == -1) {
+ if (*endptr != '\0') {
+ result(llen, FAIL);
+ return 1;
+ }
+ } else if (expected_chars != actual_chars) {
+ result(llen, FAIL);
+ return 1;
+ }
+ if (actual_errno != expected_errno) {
+ result(llen, FAIL);
+ return 1;
+ }
+
+ result(llen, OK);
+ return 0;
+}
+
/* declare tests based on line numbers. There must be exactly one test per line. */
#define CASE_TEST(name) \
case __LINE__: llen += printf("%d %s", test, #name);
@@ -942,6 +995,7 @@ int run_syscall(int min, int max)
int ret = 0;
void *p1, *p2;
int has_gettid = 1;
+ int has_brk;
/* <proc> indicates whether or not /proc is mounted */
proc = stat("/proc", &stat_buf) == 0;
@@ -954,6 +1008,9 @@ int run_syscall(int min, int max)
has_gettid = __GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 30);
#endif
+ /* on musl setting brk()/sbrk() always fails */
+ has_brk = brk(0) == 0;
+
for (test = min; test >= 0 && test <= max; test++) {
int llen = 0; /* line length */
@@ -969,9 +1026,9 @@ int run_syscall(int min, int max)
CASE_TEST(kill_0); EXPECT_SYSZR(1, kill(getpid(), 0)); break;
CASE_TEST(kill_CONT); EXPECT_SYSZR(1, kill(getpid(), 0)); break;
CASE_TEST(kill_BADPID); EXPECT_SYSER(1, kill(INT_MAX, 0), -1, ESRCH); break;
- CASE_TEST(sbrk_0); EXPECT_PTRNE(1, sbrk(0), (void *)-1); break;
- CASE_TEST(sbrk); if ((p1 = p2 = sbrk(4096)) != (void *)-1) p2 = sbrk(-4096); EXPECT_SYSZR(1, (p2 == (void *)-1) || p2 == p1); break;
- CASE_TEST(brk); EXPECT_SYSZR(1, brk(sbrk(0))); break;
+ CASE_TEST(sbrk_0); EXPECT_PTRNE(has_brk, sbrk(0), (void *)-1); break;
+ CASE_TEST(sbrk); if ((p1 = p2 = sbrk(4096)) != (void *)-1) p2 = sbrk(-4096); EXPECT_SYSZR(has_brk, (p2 == (void *)-1) || p2 == p1); break;
+ CASE_TEST(brk); EXPECT_SYSZR(has_brk, brk(sbrk(0))); break;
CASE_TEST(chdir_root); EXPECT_SYSZR(1, chdir("/")); chdir(getenv("PWD")); break;
CASE_TEST(chdir_dot); EXPECT_SYSZR(1, chdir(".")); break;
CASE_TEST(chdir_blah); EXPECT_SYSER(1, chdir("/blah"), -1, ENOENT); break;
@@ -1076,19 +1133,17 @@ int run_stdlib(int min, int max)
CASE_TEST(strchr_foobar_z); EXPECT_STRZR(1, strchr("foobar", 'z')); break;
CASE_TEST(strrchr_foobar_o); EXPECT_STREQ(1, strrchr("foobar", 'o'), "obar"); break;
CASE_TEST(strrchr_foobar_z); EXPECT_STRZR(1, strrchr("foobar", 'z')); break;
-#ifdef NOLIBC
- CASE_TEST(strlcat_0); EXPECT_STRBUFEQ(1, strlcat(buf, "bar", 0), buf, 3, "test"); break;
- CASE_TEST(strlcat_1); EXPECT_STRBUFEQ(1, strlcat(buf, "bar", 1), buf, 4, "test"); break;
- CASE_TEST(strlcat_5); EXPECT_STRBUFEQ(1, strlcat(buf, "bar", 5), buf, 7, "test"); break;
- CASE_TEST(strlcat_6); EXPECT_STRBUFEQ(1, strlcat(buf, "bar", 6), buf, 7, "testb"); break;
- CASE_TEST(strlcat_7); EXPECT_STRBUFEQ(1, strlcat(buf, "bar", 7), buf, 7, "testba"); break;
- CASE_TEST(strlcat_8); EXPECT_STRBUFEQ(1, strlcat(buf, "bar", 8), buf, 7, "testbar"); break;
- CASE_TEST(strlcpy_0); EXPECT_STRBUFEQ(1, strlcpy(buf, "bar", 0), buf, 3, "test"); break;
- CASE_TEST(strlcpy_1); EXPECT_STRBUFEQ(1, strlcpy(buf, "bar", 1), buf, 3, ""); break;
- CASE_TEST(strlcpy_2); EXPECT_STRBUFEQ(1, strlcpy(buf, "bar", 2), buf, 3, "b"); break;
- CASE_TEST(strlcpy_3); EXPECT_STRBUFEQ(1, strlcpy(buf, "bar", 3), buf, 3, "ba"); break;
- CASE_TEST(strlcpy_4); EXPECT_STRBUFEQ(1, strlcpy(buf, "bar", 4), buf, 3, "bar"); break;
-#endif
+ CASE_TEST(strlcat_0); EXPECT_STRBUFEQ(is_nolibc, strlcat(buf, "bar", 0), buf, 3, "test"); break;
+ CASE_TEST(strlcat_1); EXPECT_STRBUFEQ(is_nolibc, strlcat(buf, "bar", 1), buf, 4, "test"); break;
+ CASE_TEST(strlcat_5); EXPECT_STRBUFEQ(is_nolibc, strlcat(buf, "bar", 5), buf, 7, "test"); break;
+ CASE_TEST(strlcat_6); EXPECT_STRBUFEQ(is_nolibc, strlcat(buf, "bar", 6), buf, 7, "testb"); break;
+ CASE_TEST(strlcat_7); EXPECT_STRBUFEQ(is_nolibc, strlcat(buf, "bar", 7), buf, 7, "testba"); break;
+ CASE_TEST(strlcat_8); EXPECT_STRBUFEQ(is_nolibc, strlcat(buf, "bar", 8), buf, 7, "testbar"); break;
+ CASE_TEST(strlcpy_0); EXPECT_STRBUFEQ(is_nolibc, strlcpy(buf, "bar", 0), buf, 3, "test"); break;
+ CASE_TEST(strlcpy_1); EXPECT_STRBUFEQ(is_nolibc, strlcpy(buf, "bar", 1), buf, 3, ""); break;
+ CASE_TEST(strlcpy_2); EXPECT_STRBUFEQ(is_nolibc, strlcpy(buf, "bar", 2), buf, 3, "b"); break;
+ CASE_TEST(strlcpy_3); EXPECT_STRBUFEQ(is_nolibc, strlcpy(buf, "bar", 3), buf, 3, "ba"); break;
+ CASE_TEST(strlcpy_4); EXPECT_STRBUFEQ(is_nolibc, strlcpy(buf, "bar", 4), buf, 3, "bar"); break;
CASE_TEST(memcmp_20_20); EXPECT_EQ(1, memcmp("aaa\x20", "aaa\x20", 4), 0); break;
CASE_TEST(memcmp_20_60); EXPECT_LT(1, memcmp("aaa\x20", "aaa\x60", 4), 0); break;
CASE_TEST(memcmp_60_20); EXPECT_GT(1, memcmp("aaa\x60", "aaa\x20", 4), 0); break;
@@ -1139,6 +1194,26 @@ int run_stdlib(int min, int max)
CASE_TEST(limit_ptrdiff_min); EXPECT_EQ(1, PTRDIFF_MIN, sizeof(long) == 8 ? (ptrdiff_t) 0x8000000000000000LL : (ptrdiff_t) 0x80000000); break;
CASE_TEST(limit_ptrdiff_max); EXPECT_EQ(1, PTRDIFF_MAX, sizeof(long) == 8 ? (ptrdiff_t) 0x7fffffffffffffffLL : (ptrdiff_t) 0x7fffffff); break;
CASE_TEST(limit_size_max); EXPECT_EQ(1, SIZE_MAX, sizeof(long) == 8 ? (size_t) 0xffffffffffffffffULL : (size_t) 0xffffffffU); break;
+ CASE_TEST(strtol_simple); EXPECT_STRTOX(1, strtol, "35", 10, 35, -1, 0); break;
+ CASE_TEST(strtol_positive); EXPECT_STRTOX(1, strtol, "+35", 10, 35, -1, 0); break;
+ CASE_TEST(strtol_negative); EXPECT_STRTOX(1, strtol, "-35", 10, -35, -1, 0); break;
+ CASE_TEST(strtol_hex_auto); EXPECT_STRTOX(1, strtol, "0xFF", 0, 255, -1, 0); break;
+ CASE_TEST(strtol_base36); EXPECT_STRTOX(1, strtol, "12yZ", 36, 50507, -1, 0); break;
+ CASE_TEST(strtol_cutoff); EXPECT_STRTOX(1, strtol, "1234567890", 8, 342391, 7, 0); break;
+ CASE_TEST(strtol_octal_auto); EXPECT_STRTOX(1, strtol, "011", 0, 9, -1, 0); break;
+ CASE_TEST(strtol_hex_00); EXPECT_STRTOX(1, strtol, "0x00", 16, 0, -1, 0); break;
+ CASE_TEST(strtol_hex_FF); EXPECT_STRTOX(1, strtol, "FF", 16, 255, -1, 0); break;
+ CASE_TEST(strtol_hex_ff); EXPECT_STRTOX(1, strtol, "ff", 16, 255, -1, 0); break;
+ CASE_TEST(strtol_hex_prefix); EXPECT_STRTOX(1, strtol, "0xFF", 16, 255, -1, 0); break;
+ CASE_TEST(strtol_trailer); EXPECT_STRTOX(1, strtol, "35foo", 10, 35, 2, 0); break;
+ CASE_TEST(strtol_overflow); EXPECT_STRTOX(1, strtol, "0x8000000000000000", 16, LONG_MAX, -1, ERANGE); break;
+ CASE_TEST(strtol_underflow); EXPECT_STRTOX(1, strtol, "-0x8000000000000001", 16, LONG_MIN, -1, ERANGE); break;
+ CASE_TEST(strtoul_negative); EXPECT_STRTOX(1, strtoul, "-0x1", 16, ULONG_MAX, 4, 0); break;
+ CASE_TEST(strtoul_overflow); EXPECT_STRTOX(1, strtoul, "0x10000000000000000", 16, ULONG_MAX, -1, ERANGE); break;
+ CASE_TEST(strerror_success); EXPECT_STREQ(is_nolibc, strerror(0), "errno=0"); break;
+ CASE_TEST(strerror_EINVAL); EXPECT_STREQ(is_nolibc, strerror(EINVAL), "errno=22"); break;
+ CASE_TEST(strerror_int_max); EXPECT_STREQ(is_nolibc, strerror(INT_MAX), "errno=2147483647"); break;
+ CASE_TEST(strerror_int_min); EXPECT_STREQ(is_nolibc, strerror(INT_MIN), "errno=-2147483648"); break;
case __LINE__:
return ret; /* must be last */
diff --git a/tools/testing/selftests/nolibc/run-tests.sh b/tools/testing/selftests/nolibc/run-tests.sh
index c0a5a7cea9fa..0446e6326a40 100755
--- a/tools/testing/selftests/nolibc/run-tests.sh
+++ b/tools/testing/selftests/nolibc/run-tests.sh
@@ -15,9 +15,10 @@ download_location="${cache_dir}/crosstools/"
build_location="$(realpath "${cache_dir}"/nolibc-tests/)"
perform_download=0
test_mode=system
+CFLAGS_EXTRA="-Werror"
archs="i386 x86_64 arm64 arm mips32le mips32be ppc ppc64 ppc64le riscv s390 loongarch"
-TEMP=$(getopt -o 'j:d:c:b:a:m:ph' -n "$0" -- "$@")
+TEMP=$(getopt -o 'j:d:c:b:a:m:peh' -n "$0" -- "$@")
eval set -- "$TEMP"
unset TEMP
@@ -40,6 +41,7 @@ Options:
-a [ARCH] Host architecture of toolchains to use (default: ${hostarch})
-b [DIR] Build location (default: ${build_location})
-m [MODE] Test mode user/system (default: ${test_mode})
+ -e Disable -Werror
EOF
}
@@ -66,6 +68,9 @@ while true; do
'-m')
test_mode="$2"
shift 2; continue ;;
+ '-e')
+ CFLAGS_EXTRA=""
+ shift; continue ;;
'-h')
print_usage
exit 0
@@ -153,7 +158,7 @@ test_arch() {
exit 1
esac
printf '%-15s' "$arch:"
- swallow_output "${MAKE[@]}" "$test_target" V=1
+ swallow_output "${MAKE[@]}" CFLAGS_EXTRA="$CFLAGS_EXTRA" "$test_target" V=1
cp run.out run.out."${arch}"
"${MAKE[@]}" report | grep passed
}
diff --git a/tools/testing/selftests/openat2/Makefile b/tools/testing/selftests/openat2/Makefile
index 254d676a2689..185dc76ebb5f 100644
--- a/tools/testing/selftests/openat2/Makefile
+++ b/tools/testing/selftests/openat2/Makefile
@@ -1,8 +1,18 @@
# SPDX-License-Identifier: GPL-2.0-or-later
-CFLAGS += -Wall -O2 -g -fsanitize=address -fsanitize=undefined -static-libasan
+CFLAGS += -Wall -O2 -g -fsanitize=address -fsanitize=undefined
TEST_GEN_PROGS := openat2_test resolve_test rename_attack_test
+# gcc requires -static-libasan in order to ensure that Address Sanitizer's
+# library is the first one loaded. However, clang already statically links the
+# Address Sanitizer if -fsanitize is specified. Therefore, simply omit
+# -static-libasan for clang builds.
+ifeq ($(LLVM),)
+ CFLAGS += -static-libasan
+endif
+
+LOCAL_HDRS += helpers.h
+
include ../lib.mk
-$(TEST_GEN_PROGS): helpers.c helpers.h
+$(TEST_GEN_PROGS): helpers.c
diff --git a/tools/testing/selftests/openat2/openat2_test.c b/tools/testing/selftests/openat2/openat2_test.c
index 9024754530b2..5790ab446527 100644
--- a/tools/testing/selftests/openat2/openat2_test.c
+++ b/tools/testing/selftests/openat2/openat2_test.c
@@ -5,6 +5,7 @@
*/
#define _GNU_SOURCE
+#define __SANE_USERSPACE_TYPES__ // Use ll64
#include <fcntl.h>
#include <sched.h>
#include <sys/stat.h>
diff --git a/tools/testing/selftests/powerpc/flags.mk b/tools/testing/selftests/powerpc/flags.mk
index b909bee3cb2a..abb9e58d95c4 100644
--- a/tools/testing/selftests/powerpc/flags.mk
+++ b/tools/testing/selftests/powerpc/flags.mk
@@ -5,8 +5,5 @@ GIT_VERSION := $(shell git describe --always --long --dirty || echo "unknown")
export GIT_VERSION
endif
-ifeq ($(CFLAGS),)
-CFLAGS := -std=gnu99 -O2 -Wall -Werror -DGIT_VERSION='"$(GIT_VERSION)"' -I$(selfdir)/powerpc/include $(CFLAGS)
+CFLAGS := -std=gnu99 -O2 -Wall -Werror -DGIT_VERSION='"$(GIT_VERSION)"' -I$(selfdir)/powerpc/include $(USERCFLAGS)
export CFLAGS
-endif
-
diff --git a/tools/testing/selftests/resctrl/cache.c b/tools/testing/selftests/resctrl/cache.c
index 1b339d6bbff1..1ff1104e6575 100644
--- a/tools/testing/selftests/resctrl/cache.c
+++ b/tools/testing/selftests/resctrl/cache.c
@@ -101,12 +101,12 @@ static int get_llc_occu_resctrl(unsigned long *llc_occupancy)
*
* Return: 0 on success, < 0 on error.
*/
-static int print_results_cache(const char *filename, int bm_pid, __u64 llc_value)
+static int print_results_cache(const char *filename, pid_t bm_pid, __u64 llc_value)
{
FILE *fp;
if (strcmp(filename, "stdio") == 0 || strcmp(filename, "stderr") == 0) {
- printf("Pid: %d \t LLC_value: %llu\n", bm_pid, llc_value);
+ printf("Pid: %d \t LLC_value: %llu\n", (int)bm_pid, llc_value);
} else {
fp = fopen(filename, "a");
if (!fp) {
@@ -114,7 +114,7 @@ static int print_results_cache(const char *filename, int bm_pid, __u64 llc_value
return -1;
}
- fprintf(fp, "Pid: %d \t llc_value: %llu\n", bm_pid, llc_value);
+ fprintf(fp, "Pid: %d \t llc_value: %llu\n", (int)bm_pid, llc_value);
fclose(fp);
}
@@ -133,7 +133,7 @@ static int print_results_cache(const char *filename, int bm_pid, __u64 llc_value
* Return: =0 on success. <0 on failure.
*/
int perf_event_measure(int pe_fd, struct perf_event_read *pe_read,
- const char *filename, int bm_pid)
+ const char *filename, pid_t bm_pid)
{
int ret;
@@ -161,7 +161,7 @@ int perf_event_measure(int pe_fd, struct perf_event_read *pe_read,
*
* Return: =0 on success. <0 on failure.
*/
-int measure_llc_resctrl(const char *filename, int bm_pid)
+int measure_llc_resctrl(const char *filename, pid_t bm_pid)
{
unsigned long llc_occu_resc = 0;
int ret;
diff --git a/tools/testing/selftests/resctrl/cat_test.c b/tools/testing/selftests/resctrl/cat_test.c
index c7686fb6641a..742782438ca3 100644
--- a/tools/testing/selftests/resctrl/cat_test.c
+++ b/tools/testing/selftests/resctrl/cat_test.c
@@ -158,7 +158,6 @@ static int cat_test(const struct resctrl_test *test,
struct resctrl_val_param *param,
size_t span, unsigned long current_mask)
{
- char *resctrl_val = param->resctrl_val;
struct perf_event_read pe_read;
struct perf_event_attr pea;
cpu_set_t old_affinity;
@@ -178,8 +177,7 @@ static int cat_test(const struct resctrl_test *test,
return ret;
/* Write benchmark to specified con_mon grp, mon_grp in resctrl FS*/
- ret = write_bm_pid_to_resctrl(bm_pid, param->ctrlgrp, param->mongrp,
- resctrl_val);
+ ret = write_bm_pid_to_resctrl(bm_pid, param->ctrlgrp, param->mongrp);
if (ret)
goto reset_affinity;
@@ -272,7 +270,6 @@ static int cat_run_test(const struct resctrl_test *test, const struct user_param
start_mask = create_bit_mask(start, n);
struct resctrl_val_param param = {
- .resctrl_val = CAT_STR,
.ctrlgrp = "c1",
.filename = RESULT_FILE_NAME,
.num_of_runs = 0,
@@ -291,11 +288,30 @@ static int cat_run_test(const struct resctrl_test *test, const struct user_param
return ret;
}
+static bool arch_supports_noncont_cat(const struct resctrl_test *test)
+{
+ unsigned int eax, ebx, ecx, edx;
+
+ /* AMD always supports non-contiguous CBM. */
+ if (get_vendor() == ARCH_AMD)
+ return true;
+
+ /* Intel support for non-contiguous CBM needs to be discovered. */
+ if (!strcmp(test->resource, "L3"))
+ __cpuid_count(0x10, 1, eax, ebx, ecx, edx);
+ else if (!strcmp(test->resource, "L2"))
+ __cpuid_count(0x10, 2, eax, ebx, ecx, edx);
+ else
+ return false;
+
+ return ((ecx >> 3) & 1);
+}
+
static int noncont_cat_run_test(const struct resctrl_test *test,
const struct user_params *uparams)
{
unsigned long full_cache_mask, cont_mask, noncont_mask;
- unsigned int eax, ebx, ecx, edx, sparse_masks;
+ unsigned int sparse_masks;
int bit_center, ret;
char schemata[64];
@@ -304,15 +320,8 @@ static int noncont_cat_run_test(const struct resctrl_test *test,
if (ret)
return ret;
- if (!strcmp(test->resource, "L3"))
- __cpuid_count(0x10, 1, eax, ebx, ecx, edx);
- else if (!strcmp(test->resource, "L2"))
- __cpuid_count(0x10, 2, eax, ebx, ecx, edx);
- else
- return -EINVAL;
-
- if (sparse_masks != ((ecx >> 3) & 1)) {
- ksft_print_msg("CPUID output doesn't match 'sparse_masks' file content!\n");
+ if (arch_supports_noncont_cat(test) != sparse_masks) {
+ ksft_print_msg("Hardware and kernel differ on non-contiguous CBM support!\n");
return 1;
}
diff --git a/tools/testing/selftests/resctrl/cmt_test.c b/tools/testing/selftests/resctrl/cmt_test.c
index 0105afec6188..0c045080d808 100644
--- a/tools/testing/selftests/resctrl/cmt_test.c
+++ b/tools/testing/selftests/resctrl/cmt_test.c
@@ -16,6 +16,17 @@
#define MAX_DIFF 2000000
#define MAX_DIFF_PERCENT 15
+#define CON_MON_LCC_OCCUP_PATH \
+ "%s/%s/mon_data/mon_L3_%02d/llc_occupancy"
+
+static int cmt_init(const struct resctrl_val_param *param, int domain_id)
+{
+ sprintf(llc_occup_path, CON_MON_LCC_OCCUP_PATH, RESCTRL_PATH,
+ param->ctrlgrp, domain_id);
+
+ return 0;
+}
+
static int cmt_setup(const struct resctrl_test *test,
const struct user_params *uparams,
struct resctrl_val_param *p)
@@ -29,6 +40,13 @@ static int cmt_setup(const struct resctrl_test *test,
return 0;
}
+static int cmt_measure(const struct user_params *uparams,
+ struct resctrl_val_param *param, pid_t bm_pid)
+{
+ sleep(1);
+ return measure_llc_resctrl(param->filename, bm_pid);
+}
+
static int show_results_info(unsigned long sum_llc_val, int no_of_bits,
unsigned long cache_span, unsigned long max_diff,
unsigned long max_diff_percent, unsigned long num_of_runs,
@@ -126,13 +144,13 @@ static int cmt_run_test(const struct resctrl_test *test, const struct user_param
}
struct resctrl_val_param param = {
- .resctrl_val = CMT_STR,
.ctrlgrp = "c1",
- .mongrp = "m1",
.filename = RESULT_FILE_NAME,
.mask = ~(long_mask << n) & long_mask,
.num_of_runs = 0,
+ .init = cmt_init,
.setup = cmt_setup,
+ .measure = cmt_measure,
};
span = cache_portion_size(cache_total_size, param.mask, long_mask);
diff --git a/tools/testing/selftests/resctrl/mba_test.c b/tools/testing/selftests/resctrl/mba_test.c
index a6ad39aae162..ab8496a4925b 100644
--- a/tools/testing/selftests/resctrl/mba_test.c
+++ b/tools/testing/selftests/resctrl/mba_test.c
@@ -17,6 +17,19 @@
#define ALLOCATION_MIN 10
#define ALLOCATION_STEP 10
+static int mba_init(const struct resctrl_val_param *param, int domain_id)
+{
+ int ret;
+
+ ret = initialize_mem_bw_imc();
+ if (ret)
+ return ret;
+
+ initialize_mem_bw_resctrl(param, domain_id);
+
+ return 0;
+}
+
/*
* Change schemata percentage from 100 to 10%. Write schemata to specified
* con_mon grp, mon_grp in resctrl FS.
@@ -51,6 +64,12 @@ static int mba_setup(const struct resctrl_test *test,
return 0;
}
+static int mba_measure(const struct user_params *uparams,
+ struct resctrl_val_param *param, pid_t bm_pid)
+{
+ return measure_mem_bw(uparams, param, bm_pid, "reads");
+}
+
static bool show_mba_info(unsigned long *bw_imc, unsigned long *bw_resc)
{
int allocation, runs;
@@ -145,12 +164,11 @@ static void mba_test_cleanup(void)
static int mba_run_test(const struct resctrl_test *test, const struct user_params *uparams)
{
struct resctrl_val_param param = {
- .resctrl_val = MBA_STR,
.ctrlgrp = "c1",
- .mongrp = "m1",
.filename = RESULT_FILE_NAME,
- .bw_report = "reads",
- .setup = mba_setup
+ .init = mba_init,
+ .setup = mba_setup,
+ .measure = mba_measure,
};
int ret;
diff --git a/tools/testing/selftests/resctrl/mbm_test.c b/tools/testing/selftests/resctrl/mbm_test.c
index 6fec51e1ff46..6b5a3b52d861 100644
--- a/tools/testing/selftests/resctrl/mbm_test.c
+++ b/tools/testing/selftests/resctrl/mbm_test.c
@@ -86,6 +86,19 @@ static int check_results(size_t span)
return ret;
}
+static int mbm_init(const struct resctrl_val_param *param, int domain_id)
+{
+ int ret;
+
+ ret = initialize_mem_bw_imc();
+ if (ret)
+ return ret;
+
+ initialize_mem_bw_resctrl(param, domain_id);
+
+ return 0;
+}
+
static int mbm_setup(const struct resctrl_test *test,
const struct user_params *uparams,
struct resctrl_val_param *p)
@@ -105,6 +118,12 @@ static int mbm_setup(const struct resctrl_test *test,
return ret;
}
+static int mbm_measure(const struct user_params *uparams,
+ struct resctrl_val_param *param, pid_t bm_pid)
+{
+ return measure_mem_bw(uparams, param, bm_pid, "reads");
+}
+
static void mbm_test_cleanup(void)
{
remove(RESULT_FILE_NAME);
@@ -113,12 +132,11 @@ static void mbm_test_cleanup(void)
static int mbm_run_test(const struct resctrl_test *test, const struct user_params *uparams)
{
struct resctrl_val_param param = {
- .resctrl_val = MBM_STR,
.ctrlgrp = "c1",
- .mongrp = "m1",
.filename = RESULT_FILE_NAME,
- .bw_report = "reads",
- .setup = mbm_setup
+ .init = mbm_init,
+ .setup = mbm_setup,
+ .measure = mbm_measure,
};
int ret;
diff --git a/tools/testing/selftests/resctrl/resctrl.h b/tools/testing/selftests/resctrl/resctrl.h
index 00d51fa7531c..2dda56084588 100644
--- a/tools/testing/selftests/resctrl/resctrl.h
+++ b/tools/testing/selftests/resctrl/resctrl.h
@@ -43,13 +43,6 @@
#define DEFAULT_SPAN (250 * MB)
-#define PARENT_EXIT() \
- do { \
- kill(ppid, SIGKILL); \
- umount_resctrlfs(); \
- exit(EXIT_FAILURE); \
- } while (0)
-
/*
* user_params: User supplied parameters
* @cpu: CPU number to which the benchmark will be bound to
@@ -88,24 +81,27 @@ struct resctrl_test {
/*
* resctrl_val_param: resctrl test parameters
- * @resctrl_val: Resctrl feature (Eg: mbm, mba.. etc)
* @ctrlgrp: Name of the control monitor group (con_mon grp)
* @mongrp: Name of the monitor group (mon grp)
* @filename: Name of file to which the o/p should be written
- * @bw_report: Bandwidth report type (reads vs writes)
- * @setup: Call back function to setup test environment
+ * @init: Callback function to initialize test environment
+ * @setup: Callback function to setup per test run environment
+ * @measure: Callback that performs the measurement (a single test)
*/
struct resctrl_val_param {
- char *resctrl_val;
- char ctrlgrp[64];
- char mongrp[64];
+ const char *ctrlgrp;
+ const char *mongrp;
char filename[64];
- char *bw_report;
unsigned long mask;
int num_of_runs;
+ int (*init)(const struct resctrl_val_param *param,
+ int domain_id);
int (*setup)(const struct resctrl_test *test,
const struct user_params *uparams,
struct resctrl_val_param *param);
+ int (*measure)(const struct user_params *uparams,
+ struct resctrl_val_param *param,
+ pid_t bm_pid);
};
struct perf_event_read {
@@ -115,11 +111,6 @@ struct perf_event_read {
} values[2];
};
-#define MBM_STR "mbm"
-#define MBA_STR "mba"
-#define CMT_STR "cmt"
-#define CAT_STR "cat"
-
/*
* Memory location that consumes values compiler must not optimize away.
* Volatile ensures writes to this location cannot be optimized away by
@@ -127,8 +118,6 @@ struct perf_event_read {
*/
extern volatile int *value_sink;
-extern pid_t bm_pid, ppid;
-
extern char llc_occup_path[1024];
int get_vendor(void);
@@ -137,7 +126,7 @@ int filter_dmesg(void);
int get_domain_id(const char *resource, int cpu_no, int *domain_id);
int mount_resctrlfs(void);
int umount_resctrlfs(void);
-int validate_bw_report_request(char *bw_report);
+const char *get_bw_report_type(const char *bw_report);
bool resctrl_resource_exists(const char *resource);
bool resctrl_mon_feature_exists(const char *resource, const char *feature);
bool resource_info_file_exists(const char *resource, const char *file);
@@ -145,15 +134,21 @@ bool test_resource_feature_check(const struct resctrl_test *test);
char *fgrep(FILE *inf, const char *str);
int taskset_benchmark(pid_t bm_pid, int cpu_no, cpu_set_t *old_affinity);
int taskset_restore(pid_t bm_pid, cpu_set_t *old_affinity);
-int write_schemata(char *ctrlgrp, char *schemata, int cpu_no, const char *resource);
-int write_bm_pid_to_resctrl(pid_t bm_pid, char *ctrlgrp, char *mongrp,
- char *resctrl_val);
+int write_schemata(const char *ctrlgrp, char *schemata, int cpu_no,
+ const char *resource);
+int write_bm_pid_to_resctrl(pid_t bm_pid, const char *ctrlgrp, const char *mongrp);
int perf_event_open(struct perf_event_attr *hw_event, pid_t pid, int cpu,
int group_fd, unsigned long flags);
unsigned char *alloc_buffer(size_t buf_size, int memflush);
void mem_flush(unsigned char *buf, size_t buf_size);
void fill_cache_read(unsigned char *buf, size_t buf_size, bool once);
int run_fill_buf(size_t buf_size, int memflush, int op, bool once);
+int initialize_mem_bw_imc(void);
+int measure_mem_bw(const struct user_params *uparams,
+ struct resctrl_val_param *param, pid_t bm_pid,
+ const char *bw_report);
+void initialize_mem_bw_resctrl(const struct resctrl_val_param *param,
+ int domain_id);
int resctrl_val(const struct resctrl_test *test,
const struct user_params *uparams,
const char * const *benchmark_cmd,
@@ -174,8 +169,8 @@ void perf_event_initialize_read_format(struct perf_event_read *pe_read);
int perf_open(struct perf_event_attr *pea, pid_t pid, int cpu_no);
int perf_event_reset_enable(int pe_fd);
int perf_event_measure(int pe_fd, struct perf_event_read *pe_read,
- const char *filename, int bm_pid);
-int measure_llc_resctrl(const char *filename, int bm_pid);
+ const char *filename, pid_t bm_pid);
+int measure_llc_resctrl(const char *filename, pid_t bm_pid);
void show_cache_info(int no_of_bits, __u64 avg_llc_val, size_t cache_span, bool lines);
/*
diff --git a/tools/testing/selftests/resctrl/resctrl_val.c b/tools/testing/selftests/resctrl/resctrl_val.c
index 445f306d4c2f..8c275f6b4dd7 100644
--- a/tools/testing/selftests/resctrl/resctrl_val.c
+++ b/tools/testing/selftests/resctrl/resctrl_val.c
@@ -19,30 +19,10 @@
#define MAX_TOKENS 5
#define READ 0
#define WRITE 1
-#define CON_MON_MBM_LOCAL_BYTES_PATH \
- "%s/%s/mon_groups/%s/mon_data/mon_L3_%02d/mbm_local_bytes"
#define CON_MBM_LOCAL_BYTES_PATH \
"%s/%s/mon_data/mon_L3_%02d/mbm_local_bytes"
-#define MON_MBM_LOCAL_BYTES_PATH \
- "%s/mon_groups/%s/mon_data/mon_L3_%02d/mbm_local_bytes"
-
-#define MBM_LOCAL_BYTES_PATH \
- "%s/mon_data/mon_L3_%02d/mbm_local_bytes"
-
-#define CON_MON_LCC_OCCUP_PATH \
- "%s/%s/mon_groups/%s/mon_data/mon_L3_%02d/llc_occupancy"
-
-#define CON_LCC_OCCUP_PATH \
- "%s/%s/mon_data/mon_L3_%02d/llc_occupancy"
-
-#define MON_LCC_OCCUP_PATH \
- "%s/mon_groups/%s/mon_data/mon_L3_%02d/llc_occupancy"
-
-#define LCC_OCCUP_PATH \
- "%s/mon_data/mon_L3_%02d/llc_occupancy"
-
struct membw_read_format {
__u64 value; /* The value of the event */
__u64 time_enabled; /* if PERF_FORMAT_TOTAL_TIME_ENABLED */
@@ -276,7 +256,7 @@ static int num_of_imcs(void)
return count;
}
-static int initialize_mem_bw_imc(void)
+int initialize_mem_bw_imc(void)
{
int imc, j;
@@ -293,44 +273,93 @@ static int initialize_mem_bw_imc(void)
return 0;
}
+static void perf_close_imc_mem_bw(void)
+{
+ int mc;
+
+ for (mc = 0; mc < imcs; mc++) {
+ if (imc_counters_config[mc][READ].fd != -1)
+ close(imc_counters_config[mc][READ].fd);
+ if (imc_counters_config[mc][WRITE].fd != -1)
+ close(imc_counters_config[mc][WRITE].fd);
+ }
+}
+
/*
- * get_mem_bw_imc: Memory band width as reported by iMC counters
- * @cpu_no: CPU number that the benchmark PID is binded to
- * @bw_report: Bandwidth report type (reads, writes)
- *
- * Memory B/W utilized by a process on a socket can be calculated using
- * iMC counters. Perf events are used to read these counters.
+ * perf_open_imc_mem_bw - Open perf fds for IMCs
+ * @cpu_no: CPU number that the benchmark PID is bound to
*
* Return: = 0 on success. < 0 on failure.
*/
-static int get_mem_bw_imc(int cpu_no, char *bw_report, float *bw_imc)
+static int perf_open_imc_mem_bw(int cpu_no)
{
- float reads, writes, of_mul_read, of_mul_write;
- int imc, j, ret;
+ int imc, ret;
- /* Start all iMC counters to log values (both read and write) */
- reads = 0, writes = 0, of_mul_read = 1, of_mul_write = 1;
for (imc = 0; imc < imcs; imc++) {
- for (j = 0; j < 2; j++) {
- ret = open_perf_event(imc, cpu_no, j);
- if (ret)
- return -1;
- }
- for (j = 0; j < 2; j++)
- membw_ioctl_perf_event_ioc_reset_enable(imc, j);
+ imc_counters_config[imc][READ].fd = -1;
+ imc_counters_config[imc][WRITE].fd = -1;
+ }
+
+ for (imc = 0; imc < imcs; imc++) {
+ ret = open_perf_event(imc, cpu_no, READ);
+ if (ret)
+ goto close_fds;
+ ret = open_perf_event(imc, cpu_no, WRITE);
+ if (ret)
+ goto close_fds;
+ }
+
+ return 0;
+
+close_fds:
+ perf_close_imc_mem_bw();
+ return -1;
+}
+
+/*
+ * do_mem_bw_test - Perform memory bandwidth test
+ *
+ * Runs memory bandwidth test over one second period. Also, handles starting
+ * and stopping of the IMC perf counters around the test.
+ */
+static void do_imc_mem_bw_test(void)
+{
+ int imc;
+
+ for (imc = 0; imc < imcs; imc++) {
+ membw_ioctl_perf_event_ioc_reset_enable(imc, READ);
+ membw_ioctl_perf_event_ioc_reset_enable(imc, WRITE);
}
sleep(1);
/* Stop counters after a second to get results (both read and write) */
for (imc = 0; imc < imcs; imc++) {
- for (j = 0; j < 2; j++)
- membw_ioctl_perf_event_ioc_disable(imc, j);
+ membw_ioctl_perf_event_ioc_disable(imc, READ);
+ membw_ioctl_perf_event_ioc_disable(imc, WRITE);
}
+}
+
+/*
+ * get_mem_bw_imc - Memory bandwidth as reported by iMC counters
+ * @bw_report: Bandwidth report type (reads, writes)
+ *
+ * Memory bandwidth utilized by a process on a socket can be calculated
+ * using iMC counters. Perf events are used to read these counters.
+ *
+ * Return: = 0 on success. < 0 on failure.
+ */
+static int get_mem_bw_imc(const char *bw_report, float *bw_imc)
+{
+ float reads, writes, of_mul_read, of_mul_write;
+ int imc;
+
+ /* Start all iMC counters to log values (both read and write) */
+ reads = 0, writes = 0, of_mul_read = 1, of_mul_write = 1;
/*
* Get results which are stored in struct type imc_counter_config
- * Take over flow into consideration before calculating total b/w
+ * Take overflow into consideration before calculating total bandwidth.
*/
for (imc = 0; imc < imcs; imc++) {
struct imc_counter_config *r =
@@ -340,15 +369,13 @@ static int get_mem_bw_imc(int cpu_no, char *bw_report, float *bw_imc)
if (read(r->fd, &r->return_value,
sizeof(struct membw_read_format)) == -1) {
- ksft_perror("Couldn't get read b/w through iMC");
-
+ ksft_perror("Couldn't get read bandwidth through iMC");
return -1;
}
if (read(w->fd, &w->return_value,
sizeof(struct membw_read_format)) == -1) {
- ksft_perror("Couldn't get write bw through iMC");
-
+ ksft_perror("Couldn't get write bandwidth through iMC");
return -1;
}
@@ -369,11 +396,6 @@ static int get_mem_bw_imc(int cpu_no, char *bw_report, float *bw_imc)
writes += w->return_value.value * of_mul_write * SCALE;
}
- for (imc = 0; imc < imcs; imc++) {
- close(imc_counters_config[imc][READ].fd);
- close(imc_counters_config[imc][WRITE].fd);
- }
-
if (strcmp(bw_report, "reads") == 0) {
*bw_imc = reads;
return 0;
@@ -388,84 +410,45 @@ static int get_mem_bw_imc(int cpu_no, char *bw_report, float *bw_imc)
return 0;
}
-void set_mbm_path(const char *ctrlgrp, const char *mongrp, int domain_id)
+/*
+ * initialize_mem_bw_resctrl: Appropriately populate "mbm_total_path"
+ * @param: Parameters passed to resctrl_val()
+ * @domain_id: Domain ID (cache ID; for MB, L3 cache ID)
+ */
+void initialize_mem_bw_resctrl(const struct resctrl_val_param *param,
+ int domain_id)
{
- if (ctrlgrp && mongrp)
- sprintf(mbm_total_path, CON_MON_MBM_LOCAL_BYTES_PATH,
- RESCTRL_PATH, ctrlgrp, mongrp, domain_id);
- else if (!ctrlgrp && mongrp)
- sprintf(mbm_total_path, MON_MBM_LOCAL_BYTES_PATH, RESCTRL_PATH,
- mongrp, domain_id);
- else if (ctrlgrp && !mongrp)
- sprintf(mbm_total_path, CON_MBM_LOCAL_BYTES_PATH, RESCTRL_PATH,
- ctrlgrp, domain_id);
- else if (!ctrlgrp && !mongrp)
- sprintf(mbm_total_path, MBM_LOCAL_BYTES_PATH, RESCTRL_PATH,
- domain_id);
+ sprintf(mbm_total_path, CON_MBM_LOCAL_BYTES_PATH, RESCTRL_PATH,
+ param->ctrlgrp, domain_id);
}
/*
- * initialize_mem_bw_resctrl: Appropriately populate "mbm_total_path"
- * @ctrlgrp: Name of the control monitor group (con_mon grp)
- * @mongrp: Name of the monitor group (mon grp)
- * @cpu_no: CPU number that the benchmark PID is binded to
- * @resctrl_val: Resctrl feature (Eg: mbm, mba.. etc)
+ * Open file to read MBM local bytes from resctrl FS
*/
-static void initialize_mem_bw_resctrl(const char *ctrlgrp, const char *mongrp,
- int cpu_no, char *resctrl_val)
+static FILE *open_mem_bw_resctrl(const char *mbm_bw_file)
{
- int domain_id;
-
- if (get_domain_id("MB", cpu_no, &domain_id) < 0) {
- ksft_print_msg("Could not get domain ID\n");
- return;
- }
+ FILE *fp;
- if (!strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR)))
- set_mbm_path(ctrlgrp, mongrp, domain_id);
+ fp = fopen(mbm_bw_file, "r");
+ if (!fp)
+ ksft_perror("Failed to open total memory bandwidth file");
- if (!strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR))) {
- if (ctrlgrp)
- sprintf(mbm_total_path, CON_MBM_LOCAL_BYTES_PATH,
- RESCTRL_PATH, ctrlgrp, domain_id);
- else
- sprintf(mbm_total_path, MBM_LOCAL_BYTES_PATH,
- RESCTRL_PATH, domain_id);
- }
+ return fp;
}
/*
* Get MBM Local bytes as reported by resctrl FS
- * For MBM,
- * 1. If con_mon grp and mon grp are given, then read from con_mon grp's mon grp
- * 2. If only con_mon grp is given, then read from con_mon grp
- * 3. If both are not given, then read from root con_mon grp
- * For MBA,
- * 1. If con_mon grp is given, then read from it
- * 2. If con_mon grp is not given, then read from root con_mon grp
*/
-static int get_mem_bw_resctrl(unsigned long *mbm_total)
+static int get_mem_bw_resctrl(FILE *fp, unsigned long *mbm_total)
{
- FILE *fp;
-
- fp = fopen(mbm_total_path, "r");
- if (!fp) {
- ksft_perror("Failed to open total bw file");
-
+ if (fscanf(fp, "%lu\n", mbm_total) <= 0) {
+ ksft_perror("Could not get MBM local bytes");
return -1;
}
- if (fscanf(fp, "%lu", mbm_total) <= 0) {
- ksft_perror("Could not get mbm local bytes");
- fclose(fp);
-
- return -1;
- }
- fclose(fp);
-
return 0;
}
-pid_t bm_pid, ppid;
+static pid_t bm_pid, ppid;
void ctrlc_handler(int signum, siginfo_t *info, void *ptr)
{
@@ -523,6 +506,13 @@ void signal_handler_unregister(void)
}
}
+static void parent_exit(pid_t ppid)
+{
+ kill(ppid, SIGKILL);
+ umount_resctrlfs();
+ exit(EXIT_FAILURE);
+}
+
/*
* print_results_bw: the memory bandwidth results are stored in a file
* @filename: file that stores the results
@@ -532,14 +522,14 @@ void signal_handler_unregister(void)
*
* Return: 0 on success, < 0 on error.
*/
-static int print_results_bw(char *filename, int bm_pid, float bw_imc,
+static int print_results_bw(char *filename, pid_t bm_pid, float bw_imc,
unsigned long bw_resc)
{
unsigned long diff = fabs(bw_imc - bw_resc);
FILE *fp;
if (strcmp(filename, "stdio") == 0 || strcmp(filename, "stderr") == 0) {
- printf("Pid: %d \t Mem_BW_iMC: %f \t ", bm_pid, bw_imc);
+ printf("Pid: %d \t Mem_BW_iMC: %f \t ", (int)bm_pid, bw_imc);
printf("Mem_BW_resc: %lu \t Difference: %lu\n", bw_resc, diff);
} else {
fp = fopen(filename, "a");
@@ -549,7 +539,7 @@ static int print_results_bw(char *filename, int bm_pid, float bw_imc,
return -1;
}
if (fprintf(fp, "Pid: %d \t Mem_BW_iMC: %f \t Mem_BW_resc: %lu \t Difference: %lu\n",
- bm_pid, bw_imc, bw_resc, diff) <= 0) {
+ (int)bm_pid, bw_imc, bw_resc, diff) <= 0) {
ksft_print_msg("Could not log results\n");
fclose(fp);
@@ -561,73 +551,67 @@ static int print_results_bw(char *filename, int bm_pid, float bw_imc,
return 0;
}
-static void set_cmt_path(const char *ctrlgrp, const char *mongrp, char sock_num)
-{
- if (strlen(ctrlgrp) && strlen(mongrp))
- sprintf(llc_occup_path, CON_MON_LCC_OCCUP_PATH, RESCTRL_PATH,
- ctrlgrp, mongrp, sock_num);
- else if (!strlen(ctrlgrp) && strlen(mongrp))
- sprintf(llc_occup_path, MON_LCC_OCCUP_PATH, RESCTRL_PATH,
- mongrp, sock_num);
- else if (strlen(ctrlgrp) && !strlen(mongrp))
- sprintf(llc_occup_path, CON_LCC_OCCUP_PATH, RESCTRL_PATH,
- ctrlgrp, sock_num);
- else if (!strlen(ctrlgrp) && !strlen(mongrp))
- sprintf(llc_occup_path, LCC_OCCUP_PATH, RESCTRL_PATH, sock_num);
-}
-
/*
- * initialize_llc_occu_resctrl: Appropriately populate "llc_occup_path"
- * @ctrlgrp: Name of the control monitor group (con_mon grp)
- * @mongrp: Name of the monitor group (mon grp)
- * @cpu_no: CPU number that the benchmark PID is binded to
- * @resctrl_val: Resctrl feature (Eg: cat, cmt.. etc)
+ * measure_mem_bw - Measures memory bandwidth numbers while benchmark runs
+ * @uparams: User supplied parameters
+ * @param: Parameters passed to resctrl_val()
+ * @bm_pid: PID that runs the benchmark
+ * @bw_report: Bandwidth report type (reads, writes)
+ *
+ * Measure memory bandwidth from resctrl and from another source which is
+ * perf imc value or could be something else if perf imc event is not
+ * available. Compare the two values to validate resctrl value. It takes
+ * 1 sec to measure the data.
*/
-static void initialize_llc_occu_resctrl(const char *ctrlgrp, const char *mongrp,
- int cpu_no, char *resctrl_val)
+int measure_mem_bw(const struct user_params *uparams,
+ struct resctrl_val_param *param, pid_t bm_pid,
+ const char *bw_report)
{
- int domain_id;
+ unsigned long bw_resc, bw_resc_start, bw_resc_end;
+ FILE *mem_bw_fp;
+ float bw_imc;
+ int ret;
- if (get_domain_id("L3", cpu_no, &domain_id) < 0) {
- ksft_print_msg("Could not get domain ID\n");
- return;
- }
+ bw_report = get_bw_report_type(bw_report);
+ if (!bw_report)
+ return -1;
- if (!strncmp(resctrl_val, CMT_STR, sizeof(CMT_STR)))
- set_cmt_path(ctrlgrp, mongrp, domain_id);
-}
+ mem_bw_fp = open_mem_bw_resctrl(mbm_total_path);
+ if (!mem_bw_fp)
+ return -1;
-static int measure_vals(const struct user_params *uparams,
- struct resctrl_val_param *param,
- unsigned long *bw_resc_start)
-{
- unsigned long bw_resc, bw_resc_end;
- float bw_imc;
- int ret;
+ ret = perf_open_imc_mem_bw(uparams->cpu);
+ if (ret < 0)
+ goto close_fp;
- /*
- * Measure memory bandwidth from resctrl and from
- * another source which is perf imc value or could
- * be something else if perf imc event is not available.
- * Compare the two values to validate resctrl value.
- * It takes 1sec to measure the data.
- */
- ret = get_mem_bw_imc(uparams->cpu, param->bw_report, &bw_imc);
+ ret = get_mem_bw_resctrl(mem_bw_fp, &bw_resc_start);
if (ret < 0)
- return ret;
+ goto close_imc;
+
+ rewind(mem_bw_fp);
+
+ do_imc_mem_bw_test();
- ret = get_mem_bw_resctrl(&bw_resc_end);
+ ret = get_mem_bw_resctrl(mem_bw_fp, &bw_resc_end);
if (ret < 0)
- return ret;
+ goto close_imc;
- bw_resc = (bw_resc_end - *bw_resc_start) / MB;
- ret = print_results_bw(param->filename, bm_pid, bw_imc, bw_resc);
- if (ret)
- return ret;
+ ret = get_mem_bw_imc(bw_report, &bw_imc);
+ if (ret < 0)
+ goto close_imc;
- *bw_resc_start = bw_resc_end;
+ perf_close_imc_mem_bw();
+ fclose(mem_bw_fp);
- return 0;
+ bw_resc = (bw_resc_end - bw_resc_start) / MB;
+
+ return print_results_bw(param->filename, bm_pid, bw_imc, bw_resc);
+
+close_imc:
+ perf_close_imc_mem_bw();
+close_fp:
+ fclose(mem_bw_fp);
+ return ret;
}
/*
@@ -654,7 +638,7 @@ static void run_benchmark(int signum, siginfo_t *info, void *ucontext)
fp = freopen("/dev/null", "w", stdout);
if (!fp) {
ksft_perror("Unable to direct benchmark status to /dev/null");
- PARENT_EXIT();
+ parent_exit(ppid);
}
if (strcmp(benchmark_cmd[0], "fill_buf") == 0) {
@@ -668,7 +652,7 @@ static void run_benchmark(int signum, siginfo_t *info, void *ucontext)
once = false;
} else {
ksft_print_msg("Invalid once parameter\n");
- PARENT_EXIT();
+ parent_exit(ppid);
}
if (run_fill_buf(span, memflush, operation, once))
@@ -682,7 +666,7 @@ static void run_benchmark(int signum, siginfo_t *info, void *ucontext)
fclose(stdout);
ksft_print_msg("Unable to run specified benchmark\n");
- PARENT_EXIT();
+ parent_exit(ppid);
}
/*
@@ -700,21 +684,19 @@ int resctrl_val(const struct resctrl_test *test,
const char * const *benchmark_cmd,
struct resctrl_val_param *param)
{
- char *resctrl_val = param->resctrl_val;
- unsigned long bw_resc_start = 0;
struct sigaction sigact;
int ret = 0, pipefd[2];
char pipe_message = 0;
union sigval value;
+ int domain_id;
if (strcmp(param->filename, "") == 0)
sprintf(param->filename, "stdio");
- if (!strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR)) ||
- !strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR))) {
- ret = validate_bw_report_request(param->bw_report);
- if (ret)
- return ret;
+ ret = get_domain_id(test->resource, uparams->cpu, &domain_id);
+ if (ret < 0) {
+ ksft_print_msg("Could not get domain ID\n");
+ return ret;
}
/*
@@ -755,7 +737,7 @@ int resctrl_val(const struct resctrl_test *test,
/* Register for "SIGUSR1" signal from parent */
if (sigaction(SIGUSR1, &sigact, NULL)) {
ksft_perror("Can't register child for signal");
- PARENT_EXIT();
+ parent_exit(ppid);
}
/* Tell parent that child is ready */
@@ -773,10 +755,10 @@ int resctrl_val(const struct resctrl_test *test,
sigsuspend(&sigact.sa_mask);
ksft_perror("Child is done");
- PARENT_EXIT();
+ parent_exit(ppid);
}
- ksft_print_msg("Benchmark PID: %d\n", bm_pid);
+ ksft_print_msg("Benchmark PID: %d\n", (int)bm_pid);
/*
* The cast removes constness but nothing mutates benchmark_cmd within
@@ -792,22 +774,15 @@ int resctrl_val(const struct resctrl_test *test,
goto out;
/* Write benchmark to specified control&monitoring grp in resctrl FS */
- ret = write_bm_pid_to_resctrl(bm_pid, param->ctrlgrp, param->mongrp,
- resctrl_val);
+ ret = write_bm_pid_to_resctrl(bm_pid, param->ctrlgrp, param->mongrp);
if (ret)
goto out;
- if (!strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR)) ||
- !strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR))) {
- ret = initialize_mem_bw_imc();
+ if (param->init) {
+ ret = param->init(param, domain_id);
if (ret)
goto out;
-
- initialize_mem_bw_resctrl(param->ctrlgrp, param->mongrp,
- uparams->cpu, resctrl_val);
- } else if (!strncmp(resctrl_val, CMT_STR, sizeof(CMT_STR)))
- initialize_llc_occu_resctrl(param->ctrlgrp, param->mongrp,
- uparams->cpu, resctrl_val);
+ }
/* Parent waits for child to be ready. */
close(pipefd[1]);
@@ -841,17 +816,9 @@ int resctrl_val(const struct resctrl_test *test,
if (ret < 0)
break;
- if (!strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR)) ||
- !strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR))) {
- ret = measure_vals(uparams, param, &bw_resc_start);
- if (ret)
- break;
- } else if (!strncmp(resctrl_val, CMT_STR, sizeof(CMT_STR))) {
- sleep(1);
- ret = measure_llc_resctrl(param->filename, bm_pid);
- if (ret)
- break;
- }
+ ret = param->measure(uparams, param, bm_pid);
+ if (ret)
+ break;
}
out:
diff --git a/tools/testing/selftests/resctrl/resctrlfs.c b/tools/testing/selftests/resctrl/resctrlfs.c
index 1cade75176eb..250c320349a7 100644
--- a/tools/testing/selftests/resctrl/resctrlfs.c
+++ b/tools/testing/selftests/resctrl/resctrlfs.c
@@ -456,6 +456,9 @@ int taskset_restore(pid_t bm_pid, cpu_set_t *old_affinity)
* @grp: Full path and name of the group
* @parent_grp: Full path and name of the parent group
*
+ * Creates a group @grp_name if it does not exist yet. If @grp_name is NULL,
+ * it is interpreted as the root group which always results in success.
+ *
* Return: 0 on success, < 0 on error.
*/
static int create_grp(const char *grp_name, char *grp, const char *parent_grp)
@@ -464,12 +467,7 @@ static int create_grp(const char *grp_name, char *grp, const char *parent_grp)
struct dirent *ep;
DIR *dp;
- /*
- * At this point, we are guaranteed to have resctrl FS mounted and if
- * length of grp_name == 0, it means, user wants to use root con_mon
- * grp, so do nothing
- */
- if (strlen(grp_name) == 0)
+ if (!grp_name)
return 0;
/* Check if requested grp exists or not */
@@ -508,7 +506,7 @@ static int write_pid_to_tasks(char *tasks, pid_t pid)
return -1;
}
- if (fprintf(fp, "%d\n", pid) < 0) {
+ if (fprintf(fp, "%d\n", (int)pid) < 0) {
ksft_print_msg("Failed to write pid to tasks file\n");
fclose(fp);
@@ -524,7 +522,6 @@ static int write_pid_to_tasks(char *tasks, pid_t pid)
* @bm_pid: PID that should be written
* @ctrlgrp: Name of the control monitor group (con_mon grp)
* @mongrp: Name of the monitor group (mon grp)
- * @resctrl_val: Resctrl feature (Eg: mbm, mba.. etc)
*
* If a con_mon grp is requested, create it and write pid to it, otherwise
* write pid to root con_mon grp.
@@ -534,14 +531,13 @@ static int write_pid_to_tasks(char *tasks, pid_t pid)
*
* Return: 0 on success, < 0 on error.
*/
-int write_bm_pid_to_resctrl(pid_t bm_pid, char *ctrlgrp, char *mongrp,
- char *resctrl_val)
+int write_bm_pid_to_resctrl(pid_t bm_pid, const char *ctrlgrp, const char *mongrp)
{
char controlgroup[128], monitorgroup[512], monitorgroup_p[256];
char tasks[1024];
int ret = 0;
- if (strlen(ctrlgrp))
+ if (ctrlgrp)
sprintf(controlgroup, "%s/%s", RESCTRL_PATH, ctrlgrp);
else
sprintf(controlgroup, "%s", RESCTRL_PATH);
@@ -555,22 +551,19 @@ int write_bm_pid_to_resctrl(pid_t bm_pid, char *ctrlgrp, char *mongrp,
if (ret)
goto out;
- /* Create mon grp and write pid into it for "mbm" and "cmt" test */
- if (!strncmp(resctrl_val, CMT_STR, sizeof(CMT_STR)) ||
- !strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR))) {
- if (strlen(mongrp)) {
- sprintf(monitorgroup_p, "%s/mon_groups", controlgroup);
- sprintf(monitorgroup, "%s/%s", monitorgroup_p, mongrp);
- ret = create_grp(mongrp, monitorgroup, monitorgroup_p);
- if (ret)
- goto out;
-
- sprintf(tasks, "%s/mon_groups/%s/tasks",
- controlgroup, mongrp);
- ret = write_pid_to_tasks(tasks, bm_pid);
- if (ret)
- goto out;
- }
+ /* Create monitor group and write pid into if it is used */
+ if (mongrp) {
+ sprintf(monitorgroup_p, "%s/mon_groups", controlgroup);
+ sprintf(monitorgroup, "%s/%s", monitorgroup_p, mongrp);
+ ret = create_grp(mongrp, monitorgroup, monitorgroup_p);
+ if (ret)
+ goto out;
+
+ sprintf(tasks, "%s/mon_groups/%s/tasks",
+ controlgroup, mongrp);
+ ret = write_pid_to_tasks(tasks, bm_pid);
+ if (ret)
+ goto out;
}
out:
@@ -593,7 +586,8 @@ out:
*
* Return: 0 on success, < 0 on error.
*/
-int write_schemata(char *ctrlgrp, char *schemata, int cpu_no, const char *resource)
+int write_schemata(const char *ctrlgrp, char *schemata, int cpu_no,
+ const char *resource)
{
char controlgroup[1024], reason[128], schema[1024] = {};
int domain_id, fd, schema_len, ret = 0;
@@ -611,7 +605,7 @@ int write_schemata(char *ctrlgrp, char *schemata, int cpu_no, const char *resour
goto out;
}
- if (strlen(ctrlgrp) != 0)
+ if (ctrlgrp)
sprintf(controlgroup, "%s/%s/schemata", RESCTRL_PATH, ctrlgrp);
else
sprintf(controlgroup, "%s/schemata", RESCTRL_PATH);
@@ -837,22 +831,21 @@ int filter_dmesg(void)
return 0;
}
-int validate_bw_report_request(char *bw_report)
+const char *get_bw_report_type(const char *bw_report)
{
if (strcmp(bw_report, "reads") == 0)
- return 0;
+ return bw_report;
if (strcmp(bw_report, "writes") == 0)
- return 0;
+ return bw_report;
if (strcmp(bw_report, "nt-writes") == 0) {
- strcpy(bw_report, "writes");
- return 0;
+ return "writes";
}
if (strcmp(bw_report, "total") == 0)
- return 0;
+ return bw_report;
- fprintf(stderr, "Requested iMC B/W report type unavailable\n");
+ fprintf(stderr, "Requested iMC bandwidth report type unavailable\n");
- return -1;
+ return NULL;
}
int perf_event_open(struct perf_event_attr *hw_event, pid_t pid, int cpu,
diff --git a/tools/testing/selftests/riscv/sigreturn/sigreturn.c b/tools/testing/selftests/riscv/sigreturn/sigreturn.c
index 62397d5934f1..ed351a1cb917 100644
--- a/tools/testing/selftests/riscv/sigreturn/sigreturn.c
+++ b/tools/testing/selftests/riscv/sigreturn/sigreturn.c
@@ -51,7 +51,7 @@ static int vector_sigreturn(int data, void (*handler)(int, siginfo_t *, void *))
asm(".option push \n\
.option arch, +v \n\
- vsetivli x0, 1, e32, ta, ma \n\
+ vsetivli x0, 1, e32, m1, ta, ma \n\
vmv.s.x v0, %1 \n\
# Generate SIGSEGV \n\
lw a0, 0(x0) \n\
diff --git a/tools/testing/selftests/sched/cs_prctl_test.c b/tools/testing/selftests/sched/cs_prctl_test.c
index 62fba7356af2..52d97fae4dbd 100644
--- a/tools/testing/selftests/sched/cs_prctl_test.c
+++ b/tools/testing/selftests/sched/cs_prctl_test.c
@@ -42,11 +42,11 @@ static pid_t gettid(void)
#ifndef PR_SCHED_CORE
#define PR_SCHED_CORE 62
-# define PR_SCHED_CORE_GET 0
-# define PR_SCHED_CORE_CREATE 1 /* create unique core_sched cookie */
-# define PR_SCHED_CORE_SHARE_TO 2 /* push core_sched cookie to pid */
-# define PR_SCHED_CORE_SHARE_FROM 3 /* pull core_sched cookie to pid */
-# define PR_SCHED_CORE_MAX 4
+#define PR_SCHED_CORE_GET 0
+#define PR_SCHED_CORE_CREATE 1 /* create unique core_sched cookie */
+#define PR_SCHED_CORE_SHARE_TO 2 /* push core_sched cookie to pid */
+#define PR_SCHED_CORE_SHARE_FROM 3 /* pull core_sched cookie to pid */
+#define PR_SCHED_CORE_MAX 4
#endif
#define MAX_PROCESSES 128
diff --git a/tools/testing/selftests/seccomp/seccomp_benchmark.c b/tools/testing/selftests/seccomp/seccomp_benchmark.c
index b83099160fbc..94886c82ae60 100644
--- a/tools/testing/selftests/seccomp/seccomp_benchmark.c
+++ b/tools/testing/selftests/seccomp/seccomp_benchmark.c
@@ -194,14 +194,14 @@ int main(int argc, char *argv[])
ksft_set_plan(7);
ksft_print_msg("Running on:\n");
- ksft_print_msg("");
+ ksft_print_msg("%s", "");
system("uname -a");
ksft_print_msg("Current BPF sysctl settings:\n");
/* Avoid using "sysctl" which may not be installed. */
- ksft_print_msg("");
+ ksft_print_msg("%s", "");
system("grep -H . /proc/sys/net/core/bpf_jit_enable");
- ksft_print_msg("");
+ ksft_print_msg("%s", "");
system("grep -H . /proc/sys/net/core/bpf_jit_harden");
affinity();
diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c
index 783ebce8c4de..e3f97f90d8db 100644
--- a/tools/testing/selftests/seccomp/seccomp_bpf.c
+++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
@@ -3954,6 +3954,60 @@ TEST(user_notification_filter_empty)
EXPECT_GT((pollfd.revents & POLLHUP) ?: 0, 0);
}
+TEST(user_ioctl_notification_filter_empty)
+{
+ pid_t pid;
+ long ret;
+ int status, p[2];
+ struct __clone_args args = {
+ .flags = CLONE_FILES,
+ .exit_signal = SIGCHLD,
+ };
+ struct seccomp_notif req = {};
+
+ ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
+ ASSERT_EQ(0, ret) {
+ TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
+ }
+
+ if (__NR_clone3 < 0)
+ SKIP(return, "Test not built with clone3 support");
+
+ ASSERT_EQ(0, pipe(p));
+
+ pid = sys_clone3(&args, sizeof(args));
+ ASSERT_GE(pid, 0);
+
+ if (pid == 0) {
+ int listener;
+
+ listener = user_notif_syscall(__NR_mknodat, SECCOMP_FILTER_FLAG_NEW_LISTENER);
+ if (listener < 0)
+ _exit(EXIT_FAILURE);
+
+ if (dup2(listener, 200) != 200)
+ _exit(EXIT_FAILURE);
+ close(p[1]);
+ close(listener);
+ sleep(1);
+
+ _exit(EXIT_SUCCESS);
+ }
+ if (read(p[0], &status, 1) != 0)
+ _exit(EXIT_SUCCESS);
+ close(p[0]);
+ /*
+ * The seccomp filter has become unused so we should be notified once
+ * the kernel gets around to cleaning up task struct.
+ */
+ EXPECT_EQ(ioctl(200, SECCOMP_IOCTL_NOTIF_RECV, &req), -1);
+ EXPECT_EQ(errno, ENOENT);
+
+ EXPECT_EQ(waitpid(pid, &status, 0), pid);
+ EXPECT_EQ(true, WIFEXITED(status));
+ EXPECT_EQ(0, WEXITSTATUS(status));
+}
+
static void *do_thread(void *data)
{
return NULL;
@@ -4755,6 +4809,83 @@ TEST(user_notification_wait_killable_fatal)
EXPECT_EQ(SIGTERM, WTERMSIG(status));
}
+struct tsync_vs_thread_leader_args {
+ pthread_t leader;
+};
+
+static void *tsync_vs_dead_thread_leader_sibling(void *_args)
+{
+ struct sock_filter allow_filter[] = {
+ BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
+ };
+ struct sock_fprog allow_prog = {
+ .len = (unsigned short)ARRAY_SIZE(allow_filter),
+ .filter = allow_filter,
+ };
+ struct tsync_vs_thread_leader_args *args = _args;
+ void *retval;
+ long ret;
+
+ ret = pthread_join(args->leader, &retval);
+ if (ret)
+ exit(1);
+ if (retval != _args)
+ exit(2);
+ ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC, &allow_prog);
+ if (ret)
+ exit(3);
+
+ exit(0);
+}
+
+/*
+ * Ensure that a dead thread leader doesn't prevent installing new filters with
+ * SECCOMP_FILTER_FLAG_TSYNC from other threads.
+ */
+TEST(tsync_vs_dead_thread_leader)
+{
+ int status;
+ pid_t pid;
+ long ret;
+
+ ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
+ ASSERT_EQ(0, ret) {
+ TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
+ }
+
+ pid = fork();
+ ASSERT_GE(pid, 0);
+
+ if (pid == 0) {
+ struct sock_filter allow_filter[] = {
+ BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
+ };
+ struct sock_fprog allow_prog = {
+ .len = (unsigned short)ARRAY_SIZE(allow_filter),
+ .filter = allow_filter,
+ };
+ struct tsync_vs_thread_leader_args *args;
+ pthread_t sibling;
+
+ args = malloc(sizeof(*args));
+ ASSERT_NE(NULL, args);
+ args->leader = pthread_self();
+
+ ret = pthread_create(&sibling, NULL,
+ tsync_vs_dead_thread_leader_sibling, args);
+ ASSERT_EQ(0, ret);
+
+ /* Install a new filter just to the leader thread. */
+ ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &allow_prog);
+ ASSERT_EQ(0, ret);
+ pthread_exit(args);
+ exit(1);
+ }
+
+ EXPECT_EQ(pid, waitpid(pid, &status, 0));
+ EXPECT_EQ(0, status);
+}
+
/*
* TODO:
* - expand NNP testing
diff --git a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/taprio.json b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/taprio.json
index 12da0a939e3e..557fb074acf0 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/taprio.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/taprio.json
@@ -133,6 +133,50 @@
]
},
{
+ "id": "6f62",
+ "name": "Add taprio Qdisc with too short interval",
+ "category": [
+ "qdisc",
+ "taprio"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "echo \"1 1 8\" > /sys/bus/netdevsim/new_device"
+ ],
+ "cmdUnderTest": "$TC qdisc add dev $ETH root handle 1: taprio num_tc 2 queues 1@0 1@1 sched-entry S 01 300 sched-entry S 02 1700 clockid CLOCK_TAI",
+ "expExitCode": "2",
+ "verifyCmd": "$TC qdisc show dev $ETH",
+ "matchPattern": "qdisc taprio 1: root refcnt",
+ "matchCount": "0",
+ "teardown": [
+ "echo \"1\" > /sys/bus/netdevsim/del_device"
+ ]
+ },
+ {
+ "id": "831f",
+ "name": "Add taprio Qdisc with too short cycle-time",
+ "category": [
+ "qdisc",
+ "taprio"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "echo \"1 1 8\" > /sys/bus/netdevsim/new_device"
+ ],
+ "cmdUnderTest": "$TC qdisc add dev $ETH root handle 1: taprio num_tc 2 queues 1@0 1@1 sched-entry S 01 200000 sched-entry S 02 200000 cycle-time 100 clockid CLOCK_TAI",
+ "expExitCode": "2",
+ "verifyCmd": "$TC qdisc show dev $ETH",
+ "matchPattern": "qdisc taprio 1: root refcnt",
+ "matchCount": "0",
+ "teardown": [
+ "echo \"1\" > /sys/bus/netdevsim/del_device"
+ ]
+ },
+ {
"id": "3e1e",
"name": "Add taprio Qdisc with an invalid cycle-time",
"category": [
diff --git a/tools/testing/selftests/timens/exec.c b/tools/testing/selftests/timens/exec.c
index e40dc5be2f66..d12ff955de0d 100644
--- a/tools/testing/selftests/timens/exec.c
+++ b/tools/testing/selftests/timens/exec.c
@@ -30,7 +30,7 @@ int main(int argc, char *argv[])
for (i = 0; i < 2; i++) {
_gettime(CLOCK_MONOTONIC, &tst, i);
- if (abs(tst.tv_sec - now.tv_sec) > 5)
+ if (labs(tst.tv_sec - now.tv_sec) > 5)
return pr_fail("%ld %ld\n", now.tv_sec, tst.tv_sec);
}
return 0;
@@ -50,7 +50,7 @@ int main(int argc, char *argv[])
for (i = 0; i < 2; i++) {
_gettime(CLOCK_MONOTONIC, &tst, i);
- if (abs(tst.tv_sec - now.tv_sec) > 5)
+ if (labs(tst.tv_sec - now.tv_sec) > 5)
return pr_fail("%ld %ld\n",
now.tv_sec, tst.tv_sec);
}
@@ -70,7 +70,7 @@ int main(int argc, char *argv[])
/* Check that a child process is in the new timens. */
for (i = 0; i < 2; i++) {
_gettime(CLOCK_MONOTONIC, &tst, i);
- if (abs(tst.tv_sec - now.tv_sec - OFFSET) > 5)
+ if (labs(tst.tv_sec - now.tv_sec - OFFSET) > 5)
return pr_fail("%ld %ld\n",
now.tv_sec + OFFSET, tst.tv_sec);
}
diff --git a/tools/testing/selftests/timens/timer.c b/tools/testing/selftests/timens/timer.c
index 5e7f0051bd7b..5b939f59dfa4 100644
--- a/tools/testing/selftests/timens/timer.c
+++ b/tools/testing/selftests/timens/timer.c
@@ -56,7 +56,7 @@ int run_test(int clockid, struct timespec now)
return pr_perror("timerfd_gettime");
elapsed = new_value.it_value.tv_sec;
- if (abs(elapsed - 3600) > 60) {
+ if (llabs(elapsed - 3600) > 60) {
ksft_test_result_fail("clockid: %d elapsed: %lld\n",
clockid, elapsed);
return 1;
diff --git a/tools/testing/selftests/timens/timerfd.c b/tools/testing/selftests/timens/timerfd.c
index 9edd43d6b2c1..a4196bbd6e33 100644
--- a/tools/testing/selftests/timens/timerfd.c
+++ b/tools/testing/selftests/timens/timerfd.c
@@ -61,7 +61,7 @@ int run_test(int clockid, struct timespec now)
return pr_perror("timerfd_gettime(%d)", clockid);
elapsed = new_value.it_value.tv_sec;
- if (abs(elapsed - 3600) > 60) {
+ if (llabs(elapsed - 3600) > 60) {
ksft_test_result_fail("clockid: %d elapsed: %lld\n",
clockid, elapsed);
return 1;
diff --git a/tools/testing/selftests/timens/vfork_exec.c b/tools/testing/selftests/timens/vfork_exec.c
index beb7614941fb..5b8907bf451d 100644
--- a/tools/testing/selftests/timens/vfork_exec.c
+++ b/tools/testing/selftests/timens/vfork_exec.c
@@ -32,7 +32,7 @@ static void *tcheck(void *_args)
for (i = 0; i < 2; i++) {
_gettime(CLOCK_MONOTONIC, &tst, i);
- if (abs(tst.tv_sec - now->tv_sec) > 5) {
+ if (labs(tst.tv_sec - now->tv_sec) > 5) {
pr_fail("%s: in-thread: unexpected value: %ld (%ld)\n",
args->tst_name, tst.tv_sec, now->tv_sec);
return (void *)1UL;
@@ -64,7 +64,7 @@ static int check(char *tst_name, struct timespec *now)
for (i = 0; i < 2; i++) {
_gettime(CLOCK_MONOTONIC, &tst, i);
- if (abs(tst.tv_sec - now->tv_sec) > 5)
+ if (labs(tst.tv_sec - now->tv_sec) > 5)
return pr_fail("%s: unexpected value: %ld (%ld)\n",
tst_name, tst.tv_sec, now->tv_sec);
}
diff --git a/tools/testing/selftests/timers/rtcpie.c b/tools/testing/selftests/timers/rtcpie.c
index 4ef2184f1558..7c07edd0d450 100644
--- a/tools/testing/selftests/timers/rtcpie.c
+++ b/tools/testing/selftests/timers/rtcpie.c
@@ -29,7 +29,7 @@ static const char default_rtc[] = "/dev/rtc0";
int main(int argc, char **argv)
{
- int i, fd, retval, irqcount = 0;
+ int i, fd, retval;
unsigned long tmp, data, old_pie_rate;
const char *rtc = default_rtc;
struct timeval start, end, diff;
@@ -120,7 +120,6 @@ int main(int argc, char **argv)
fprintf(stderr, " %d",i);
fflush(stderr);
- irqcount++;
}
/* Disable periodic interrupts */
diff --git a/tools/testing/selftests/vDSO/Makefile b/tools/testing/selftests/vDSO/Makefile
index d53a4d8008f9..98d8ba2afa00 100644
--- a/tools/testing/selftests/vDSO/Makefile
+++ b/tools/testing/selftests/vDSO/Makefile
@@ -1,35 +1,30 @@
# SPDX-License-Identifier: GPL-2.0
-include ../lib.mk
-
uname_M := $(shell uname -m 2>/dev/null || echo not)
ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/x86/ -e s/x86_64/x86/)
-TEST_GEN_PROGS := $(OUTPUT)/vdso_test_gettimeofday $(OUTPUT)/vdso_test_getcpu
-TEST_GEN_PROGS += $(OUTPUT)/vdso_test_abi
-TEST_GEN_PROGS += $(OUTPUT)/vdso_test_clock_getres
+TEST_GEN_PROGS := vdso_test_gettimeofday
+TEST_GEN_PROGS += vdso_test_getcpu
+TEST_GEN_PROGS += vdso_test_abi
+TEST_GEN_PROGS += vdso_test_clock_getres
ifeq ($(ARCH),$(filter $(ARCH),x86 x86_64))
-TEST_GEN_PROGS += $(OUTPUT)/vdso_standalone_test_x86
+TEST_GEN_PROGS += vdso_standalone_test_x86
endif
-TEST_GEN_PROGS += $(OUTPUT)/vdso_test_correctness
+TEST_GEN_PROGS += vdso_test_correctness
CFLAGS := -std=gnu99
-CFLAGS_vdso_standalone_test_x86 := -nostdlib -fno-asynchronous-unwind-tables -fno-stack-protector
-LDFLAGS_vdso_test_correctness := -ldl
+
ifeq ($(CONFIG_X86_32),y)
LDLIBS += -lgcc_s
endif
-all: $(TEST_GEN_PROGS)
+include ../lib.mk
$(OUTPUT)/vdso_test_gettimeofday: parse_vdso.c vdso_test_gettimeofday.c
$(OUTPUT)/vdso_test_getcpu: parse_vdso.c vdso_test_getcpu.c
$(OUTPUT)/vdso_test_abi: parse_vdso.c vdso_test_abi.c
$(OUTPUT)/vdso_test_clock_getres: vdso_test_clock_getres.c
+
$(OUTPUT)/vdso_standalone_test_x86: vdso_standalone_test_x86.c parse_vdso.c
- $(CC) $(CFLAGS) $(CFLAGS_vdso_standalone_test_x86) \
- vdso_standalone_test_x86.c parse_vdso.c \
- -o $@
+$(OUTPUT)/vdso_standalone_test_x86: CFLAGS +=-nostdlib -fno-asynchronous-unwind-tables -fno-stack-protector
+
$(OUTPUT)/vdso_test_correctness: vdso_test_correctness.c
- $(CC) $(CFLAGS) \
- vdso_test_correctness.c \
- -o $@ \
- $(LDFLAGS_vdso_test_correctness)
+$(OUTPUT)/vdso_test_correctness: LDFLAGS += -ldl
diff --git a/tools/testing/selftests/vDSO/parse_vdso.c b/tools/testing/selftests/vDSO/parse_vdso.c
index 413f75620a35..4ae417372e9e 100644
--- a/tools/testing/selftests/vDSO/parse_vdso.c
+++ b/tools/testing/selftests/vDSO/parse_vdso.c
@@ -55,14 +55,20 @@ static struct vdso_info
ELF(Verdef) *verdef;
} vdso_info;
-/* Straight from the ELF specification. */
-static unsigned long elf_hash(const unsigned char *name)
+/*
+ * Straight from the ELF specification...and then tweaked slightly, in order to
+ * avoid a few clang warnings.
+ */
+static unsigned long elf_hash(const char *name)
{
unsigned long h = 0, g;
- while (*name)
+ const unsigned char *uch_name = (const unsigned char *)name;
+
+ while (*uch_name)
{
- h = (h << 4) + *name++;
- if (g = h & 0xf0000000)
+ h = (h << 4) + *uch_name++;
+ g = h & 0xf0000000;
+ if (g)
h ^= g >> 24;
h &= ~g;
}
diff --git a/tools/testing/selftests/vDSO/vdso_standalone_test_x86.c b/tools/testing/selftests/vDSO/vdso_standalone_test_x86.c
index 8a44ff973ee1..27f6fdf11969 100644
--- a/tools/testing/selftests/vDSO/vdso_standalone_test_x86.c
+++ b/tools/testing/selftests/vDSO/vdso_standalone_test_x86.c
@@ -18,7 +18,7 @@
#include "parse_vdso.h"
-/* We need a libc functions... */
+/* We need some libc functions... */
int strcmp(const char *a, const char *b)
{
/* This implementation is buggy: it never returns -1. */
@@ -34,6 +34,20 @@ int strcmp(const char *a, const char *b)
return 0;
}
+/*
+ * The clang build needs this, although gcc does not.
+ * Stolen from lib/string.c.
+ */
+void *memcpy(void *dest, const void *src, size_t count)
+{
+ char *tmp = dest;
+ const char *s = src;
+
+ while (count--)
+ *tmp++ = *s++;
+ return dest;
+}
+
/* ...and two syscalls. This is x86-specific. */
static inline long x86_syscall3(long nr, long a0, long a1, long a2)
{
@@ -70,7 +84,7 @@ void to_base10(char *lastdig, time_t n)
}
}
-__attribute__((externally_visible)) void c_main(void **stack)
+void c_main(void **stack)
{
/* Parse the stack */
long argc = (long)*stack;
diff --git a/tools/testing/selftests/wireguard/qemu/Makefile b/tools/testing/selftests/wireguard/qemu/Makefile
index e95bd56b332f..35856b11c143 100644
--- a/tools/testing/selftests/wireguard/qemu/Makefile
+++ b/tools/testing/selftests/wireguard/qemu/Makefile
@@ -109,9 +109,9 @@ KERNEL_ARCH := x86_64
KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/arch/x86/boot/bzImage
QEMU_VPORT_RESULT := virtio-serial-device
ifeq ($(HOST_ARCH),$(ARCH))
-QEMU_MACHINE := -cpu host -machine microvm,accel=kvm,pit=off,pic=off,rtc=off -no-acpi
+QEMU_MACHINE := -cpu host -machine microvm,accel=kvm,pit=off,pic=off,rtc=off,acpi=off
else
-QEMU_MACHINE := -cpu max -machine microvm -no-acpi
+QEMU_MACHINE := -cpu max -machine microvm,acpi=off
endif
else ifeq ($(ARCH),i686)
CHOST := i686-linux-musl
@@ -120,9 +120,9 @@ KERNEL_ARCH := x86
KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/arch/x86/boot/bzImage
QEMU_VPORT_RESULT := virtio-serial-device
ifeq ($(subst x86_64,i686,$(HOST_ARCH)),$(ARCH))
-QEMU_MACHINE := -cpu host -machine microvm,accel=kvm,pit=off,pic=off,rtc=off -no-acpi
+QEMU_MACHINE := -cpu host -machine microvm,accel=kvm,pit=off,pic=off,rtc=off,acpi=off
else
-QEMU_MACHINE := -cpu coreduo -machine microvm -no-acpi
+QEMU_MACHINE := -cpu coreduo -machine microvm,acpi=off
endif
else ifeq ($(ARCH),mips64)
CHOST := mips64-linux-musl
diff --git a/tools/testing/selftests/x86/Makefile b/tools/testing/selftests/x86/Makefile
index 0b872c0a42d2..5c8757a25998 100644
--- a/tools/testing/selftests/x86/Makefile
+++ b/tools/testing/selftests/x86/Makefile
@@ -40,6 +40,13 @@ CFLAGS := -O2 -g -std=gnu99 -pthread -Wall $(KHDR_INCLUDES)
# call32_from_64 in thunks.S uses absolute addresses.
ifeq ($(CAN_BUILD_WITH_NOPIE),1)
CFLAGS += -no-pie
+
+ifneq ($(LLVM),)
+# clang only wants to see -no-pie during linking. Here, we don't have a separate
+# linking stage, so a compiler warning is unavoidable without (wastefully)
+# restructuring the Makefile. Avoid this by simply disabling that warning.
+CFLAGS += -Wno-unused-command-line-argument
+endif
endif
define gen-target-rule-32
@@ -73,10 +80,10 @@ all_64: $(BINARIES_64)
EXTRA_CLEAN := $(BINARIES_32) $(BINARIES_64)
$(BINARIES_32): $(OUTPUT)/%_32: %.c helpers.h
- $(CC) -m32 -o $@ $(CFLAGS) $(EXTRA_CFLAGS) $^ -lrt -ldl -lm
+ $(CC) -m32 -o $@ $(CFLAGS) $(EXTRA_CFLAGS) $< $(EXTRA_FILES) -lrt -ldl -lm
$(BINARIES_64): $(OUTPUT)/%_64: %.c helpers.h
- $(CC) -m64 -o $@ $(CFLAGS) $(EXTRA_CFLAGS) $^ -lrt -ldl
+ $(CC) -m64 -o $@ $(CFLAGS) $(EXTRA_CFLAGS) $< $(EXTRA_FILES) -lrt -ldl
# x86_64 users should be encouraged to install 32-bit libraries
ifeq ($(CAN_BUILD_I386)$(CAN_BUILD_X86_64),01)
@@ -100,10 +107,22 @@ warn_32bit_failure:
exit 0;
endif
-# Some tests have additional dependencies.
-$(OUTPUT)/sysret_ss_attrs_64: thunks.S
-$(OUTPUT)/ptrace_syscall_32: raw_syscall_helper_32.S
-$(OUTPUT)/test_syscall_vdso_32: thunks_32.S
+# Add an additional file to the source file list for a given target, and also
+# add a Makefile dependency on that same file. However, do these separately, so
+# that the compiler invocation ("$(CC) file1.c file2.S") is not combined with
+# the dependencies ("header3.h"), because clang, unlike gcc, will not accept
+# header files as an input to the compiler invocation.
+define extra-files
+$(OUTPUT)/$(1): EXTRA_FILES := $(2)
+$(OUTPUT)/$(1): $(2)
+endef
+
+$(eval $(call extra-files,sysret_ss_attrs_64,thunks.S))
+$(eval $(call extra-files,ptrace_syscall_32,raw_syscall_helper_32.S))
+$(eval $(call extra-files,test_syscall_vdso_32,thunks_32.S))
+$(eval $(call extra-files,fsgsbase_restore_64,clang_helpers_64.S))
+$(eval $(call extra-files,fsgsbase_restore_32,clang_helpers_32.S))
+$(eval $(call extra-files,sysret_rip_64,clang_helpers_64.S))
# check_initial_reg_state is special: it needs a custom entry, and it
# needs to be static so that its interpreter doesn't destroy its initial
diff --git a/tools/testing/selftests/x86/amx.c b/tools/testing/selftests/x86/amx.c
index 95aad6d8849b..1fdf35a4d7f6 100644
--- a/tools/testing/selftests/x86/amx.c
+++ b/tools/testing/selftests/x86/amx.c
@@ -39,16 +39,6 @@ struct xsave_buffer {
};
};
-static inline uint64_t xgetbv(uint32_t index)
-{
- uint32_t eax, edx;
-
- asm volatile("xgetbv;"
- : "=a" (eax), "=d" (edx)
- : "c" (index));
- return eax + ((uint64_t)edx << 32);
-}
-
static inline void xsave(struct xsave_buffer *xbuf, uint64_t rfbm)
{
uint32_t rfbm_lo = rfbm;
@@ -164,12 +154,6 @@ static inline void clear_xstate_header(struct xsave_buffer *buffer)
memset(&buffer->header, 0, sizeof(buffer->header));
}
-static inline uint64_t get_xstatebv(struct xsave_buffer *buffer)
-{
- /* XSTATE_BV is at the beginning of the header: */
- return *(uint64_t *)&buffer->header;
-}
-
static inline void set_xstatebv(struct xsave_buffer *buffer, uint64_t bv)
{
/* XSTATE_BV is at the beginning of the header: */
diff --git a/tools/testing/selftests/x86/clang_helpers_32.S b/tools/testing/selftests/x86/clang_helpers_32.S
new file mode 100644
index 000000000000..dc16271bac70
--- /dev/null
+++ b/tools/testing/selftests/x86/clang_helpers_32.S
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * 32-bit assembly helpers for asm operations that lack support in both gcc and
+ * clang. For example, clang asm does not support segment prefixes.
+ */
+.global dereference_seg_base
+dereference_seg_base:
+ mov %fs:(0), %eax
+ ret
+
+.section .note.GNU-stack,"",%progbits
diff --git a/tools/testing/selftests/x86/clang_helpers_64.S b/tools/testing/selftests/x86/clang_helpers_64.S
new file mode 100644
index 000000000000..185a69dbf39c
--- /dev/null
+++ b/tools/testing/selftests/x86/clang_helpers_64.S
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * 64-bit assembly helpers for asm operations that lack support in both gcc and
+ * clang. For example, clang asm does not support segment prefixes.
+ */
+.global dereference_seg_base
+
+dereference_seg_base:
+ mov %gs:(0), %rax
+ ret
+
+.global test_page
+.global test_syscall_insn
+
+.pushsection ".text", "ax"
+.balign 4096
+test_page: .globl test_page
+ .fill 4094,1,0xcc
+
+test_syscall_insn:
+ syscall
+
+.ifne . - test_page - 4096
+ .error "test page is not one page long"
+.endif
+.popsection
+
+.section .note.GNU-stack,"",%progbits
diff --git a/tools/testing/selftests/x86/fsgsbase.c b/tools/testing/selftests/x86/fsgsbase.c
index 8c780cce941d..50cf32de6313 100644
--- a/tools/testing/selftests/x86/fsgsbase.c
+++ b/tools/testing/selftests/x86/fsgsbase.c
@@ -109,11 +109,6 @@ static inline void wrgsbase(unsigned long gsbase)
asm volatile("wrgsbase %0" :: "r" (gsbase) : "memory");
}
-static inline void wrfsbase(unsigned long fsbase)
-{
- asm volatile("wrfsbase %0" :: "r" (fsbase) : "memory");
-}
-
enum which_base { FS, GS };
static unsigned long read_base(enum which_base which)
@@ -212,7 +207,6 @@ static void mov_0_gs(unsigned long initial_base, bool schedule)
}
static volatile unsigned long remote_base;
-static volatile bool remote_hard_zero;
static volatile unsigned int ftx;
/*
diff --git a/tools/testing/selftests/x86/fsgsbase_restore.c b/tools/testing/selftests/x86/fsgsbase_restore.c
index 6fffadc51579..224058c1e4b2 100644
--- a/tools/testing/selftests/x86/fsgsbase_restore.c
+++ b/tools/testing/selftests/x86/fsgsbase_restore.c
@@ -39,12 +39,11 @@
# define SEG "%fs"
#endif
-static unsigned int dereference_seg_base(void)
-{
- int ret;
- asm volatile ("mov %" SEG ":(0), %0" : "=rm" (ret));
- return ret;
-}
+/*
+ * Defined in clang_helpers_[32|64].S, because unlike gcc, clang inline asm does
+ * not support segmentation prefixes.
+ */
+unsigned int dereference_seg_base(void);
static void init_seg(void)
{
diff --git a/tools/testing/selftests/x86/sigreturn.c b/tools/testing/selftests/x86/sigreturn.c
index 5d7961a5f7f6..0b75b29f794b 100644
--- a/tools/testing/selftests/x86/sigreturn.c
+++ b/tools/testing/selftests/x86/sigreturn.c
@@ -487,7 +487,7 @@ static void sigtrap(int sig, siginfo_t *info, void *ctx_void)
greg_t asm_ss = ctx->uc_mcontext.gregs[REG_CX];
if (asm_ss != sig_ss && sig == SIGTRAP) {
/* Sanity check failure. */
- printf("[FAIL]\tSIGTRAP: ss = %hx, frame ss = %hx, ax = %llx\n",
+ printf("[FAIL]\tSIGTRAP: ss = %hx, frame ss = %x, ax = %llx\n",
ss, *ssptr(ctx), (unsigned long long)asm_ss);
nerrs++;
}
diff --git a/tools/testing/selftests/x86/syscall_arg_fault.c b/tools/testing/selftests/x86/syscall_arg_fault.c
index 461fa41a4d02..48ab065a76f9 100644
--- a/tools/testing/selftests/x86/syscall_arg_fault.c
+++ b/tools/testing/selftests/x86/syscall_arg_fault.c
@@ -29,7 +29,6 @@ static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
err(1, "sigaction");
}
-static volatile sig_atomic_t sig_traps;
static sigjmp_buf jmpbuf;
static volatile sig_atomic_t n_errs;
diff --git a/tools/testing/selftests/x86/sysret_rip.c b/tools/testing/selftests/x86/sysret_rip.c
index 84d74be1d902..b30de9aaa6d4 100644
--- a/tools/testing/selftests/x86/sysret_rip.c
+++ b/tools/testing/selftests/x86/sysret_rip.c
@@ -22,21 +22,13 @@
#include <sys/mman.h>
#include <assert.h>
-
-asm (
- ".pushsection \".text\", \"ax\"\n\t"
- ".balign 4096\n\t"
- "test_page: .globl test_page\n\t"
- ".fill 4094,1,0xcc\n\t"
- "test_syscall_insn:\n\t"
- "syscall\n\t"
- ".ifne . - test_page - 4096\n\t"
- ".error \"test page is not one page long\"\n\t"
- ".endif\n\t"
- ".popsection"
- );
-
+/*
+ * These items are in clang_helpers_64.S, in order to avoid clang inline asm
+ * limitations:
+ */
+void test_syscall_ins(void);
extern const char test_page[];
+
static void const *current_test_page_addr = test_page;
static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
diff --git a/tools/testing/selftests/x86/test_FISTTP.c b/tools/testing/selftests/x86/test_FISTTP.c
index 09789c0ce3e9..b9ae9d8cebcb 100644
--- a/tools/testing/selftests/x86/test_FISTTP.c
+++ b/tools/testing/selftests/x86/test_FISTTP.c
@@ -25,7 +25,7 @@ int test(void)
feclearexcept(FE_DIVBYZERO|FE_INEXACT|FE_INVALID|FE_OVERFLOW|FE_UNDERFLOW);
asm volatile ("\n"
" fld1""\n"
- " fisttp res16""\n"
+ " fisttps res16""\n"
" fld1""\n"
" fisttpl res32""\n"
" fld1""\n"
@@ -45,7 +45,7 @@ int test(void)
feclearexcept(FE_DIVBYZERO|FE_INEXACT|FE_INVALID|FE_OVERFLOW|FE_UNDERFLOW);
asm volatile ("\n"
" fldpi""\n"
- " fisttp res16""\n"
+ " fisttps res16""\n"
" fldpi""\n"
" fisttpl res32""\n"
" fldpi""\n"
@@ -66,7 +66,7 @@ int test(void)
asm volatile ("\n"
" fldpi""\n"
" fchs""\n"
- " fisttp res16""\n"
+ " fisttps res16""\n"
" fldpi""\n"
" fchs""\n"
" fisttpl res32""\n"
@@ -88,7 +88,7 @@ int test(void)
feclearexcept(FE_DIVBYZERO|FE_INEXACT|FE_INVALID|FE_OVERFLOW|FE_UNDERFLOW);
asm volatile ("\n"
" fldln2""\n"
- " fisttp res16""\n"
+ " fisttps res16""\n"
" fldln2""\n"
" fisttpl res32""\n"
" fldln2""\n"
diff --git a/tools/testing/selftests/x86/test_vsyscall.c b/tools/testing/selftests/x86/test_vsyscall.c
index d4c8e8d79d38..6de11b4df458 100644
--- a/tools/testing/selftests/x86/test_vsyscall.c
+++ b/tools/testing/selftests/x86/test_vsyscall.c
@@ -97,11 +97,6 @@ static inline long sys_gtod(struct timeval *tv, struct timezone *tz)
return syscall(SYS_gettimeofday, tv, tz);
}
-static inline int sys_clock_gettime(clockid_t id, struct timespec *ts)
-{
- return syscall(SYS_clock_gettime, id, ts);
-}
-
static inline long sys_time(time_t *t)
{
return syscall(SYS_time, t);
@@ -252,7 +247,7 @@ static void test_getcpu(int cpu)
if (ret_sys == 0) {
if (cpu_sys != cpu)
- ksft_print_msg("syscall reported CPU %hu but should be %d\n",
+ ksft_print_msg("syscall reported CPU %u but should be %d\n",
cpu_sys, cpu);
have_node = true;
@@ -270,10 +265,10 @@ static void test_getcpu(int cpu)
if (cpu_vdso != cpu || node_vdso != node) {
if (cpu_vdso != cpu)
- ksft_print_msg("vDSO reported CPU %hu but should be %d\n",
+ ksft_print_msg("vDSO reported CPU %u but should be %d\n",
cpu_vdso, cpu);
if (node_vdso != node)
- ksft_print_msg("vDSO reported node %hu but should be %hu\n",
+ ksft_print_msg("vDSO reported node %u but should be %u\n",
node_vdso, node);
ksft_test_result_fail("Wrong values\n");
} else {
@@ -295,10 +290,10 @@ static void test_getcpu(int cpu)
if (cpu_vsys != cpu || node_vsys != node) {
if (cpu_vsys != cpu)
- ksft_print_msg("vsyscall reported CPU %hu but should be %d\n",
+ ksft_print_msg("vsyscall reported CPU %u but should be %d\n",
cpu_vsys, cpu);
if (node_vsys != node)
- ksft_print_msg("vsyscall reported node %hu but should be %hu\n",
+ ksft_print_msg("vsyscall reported node %u but should be %u\n",
node_vsys, node);
ksft_test_result_fail("Wrong values\n");
} else {
diff --git a/tools/testing/selftests/x86/vdso_restorer.c b/tools/testing/selftests/x86/vdso_restorer.c
index fe99f2434155..ac8d8e1e9805 100644
--- a/tools/testing/selftests/x86/vdso_restorer.c
+++ b/tools/testing/selftests/x86/vdso_restorer.c
@@ -92,4 +92,6 @@ int main()
printf("[FAIL]\t!SA_SIGINFO handler was not called\n");
nerrs++;
}
+
+ return nerrs;
}
diff --git a/tools/testing/vsock/Makefile b/tools/testing/vsock/Makefile
index a7f56a09ca9f..6e0b4e95e230 100644
--- a/tools/testing/vsock/Makefile
+++ b/tools/testing/vsock/Makefile
@@ -13,3 +13,16 @@ CFLAGS += -g -O2 -Werror -Wall -I. -I../../include -I../../../usr/include -Wno-p
clean:
${RM} *.o *.d vsock_test vsock_diag_test vsock_perf vsock_uring_test
-include *.d
+
+VSOCK_INSTALL_PATH ?=
+
+install: all
+ifdef VSOCK_INSTALL_PATH
+ mkdir -p $(VSOCK_INSTALL_PATH)
+ install -m 744 vsock_test $(VSOCK_INSTALL_PATH)
+ install -m 744 vsock_perf $(VSOCK_INSTALL_PATH)
+ install -m 744 vsock_diag_test $(VSOCK_INSTALL_PATH)
+ install -m 744 vsock_uring_test $(VSOCK_INSTALL_PATH)
+else
+ $(error Error: set VSOCK_INSTALL_PATH to use install)
+endif
diff --git a/virt/kvm/dirty_ring.c b/virt/kvm/dirty_ring.c
index 86d267db87bb..7bc74969a819 100644
--- a/virt/kvm/dirty_ring.c
+++ b/virt/kvm/dirty_ring.c
@@ -55,6 +55,9 @@ static void kvm_reset_dirty_gfn(struct kvm *kvm, u32 slot, u64 offset, u64 mask)
struct kvm_memory_slot *memslot;
int as_id, id;
+ if (!mask)
+ return;
+
as_id = slot >> 16;
id = (u16)slot;
diff --git a/virt/kvm/guest_memfd.c b/virt/kvm/guest_memfd.c
index 0f4e0cf4f158..747fe251e445 100644
--- a/virt/kvm/guest_memfd.c
+++ b/virt/kvm/guest_memfd.c
@@ -510,8 +510,10 @@ int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot,
}
if (folio_test_hwpoison(folio)) {
+ folio_unlock(folio);
+ folio_put(folio);
r = -EHWPOISON;
- goto out_unlock;
+ goto out_fput;
}
page = folio_file_page(folio, index);
@@ -522,7 +524,6 @@ int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot,
r = 0;
-out_unlock:
folio_unlock(folio);
out_fput:
fput(file);
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 14841acb8b95..1192942aef91 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -651,7 +651,7 @@ static __always_inline kvm_mn_ret_t __kvm_handle_hva_range(struct kvm *kvm,
range->on_lock(kvm);
if (IS_KVM_NULL_FN(range->handler))
- break;
+ goto mmu_unlock;
}
r.ret |= range->handler(kvm, &gfn_range);
}
@@ -660,6 +660,7 @@ static __always_inline kvm_mn_ret_t __kvm_handle_hva_range(struct kvm *kvm,
if (range->flush_on_ret && r.ret)
kvm_flush_remote_tlbs(kvm);
+mmu_unlock:
if (r.found_memslot)
KVM_MMU_UNLOCK(kvm);
@@ -4025,12 +4026,13 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
{
struct kvm *kvm = me->kvm;
struct kvm_vcpu *vcpu;
- int last_boosted_vcpu = me->kvm->last_boosted_vcpu;
+ int last_boosted_vcpu;
unsigned long i;
int yielded = 0;
int try = 3;
int pass;
+ last_boosted_vcpu = READ_ONCE(kvm->last_boosted_vcpu);
kvm_vcpu_set_in_spin_loop(me, true);
/*
* We boost the priority of a VCPU that is runnable but not
@@ -4068,7 +4070,7 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
yielded = kvm_vcpu_yield_to(vcpu);
if (yielded > 0) {
- kvm->last_boosted_vcpu = i;
+ WRITE_ONCE(kvm->last_boosted_vcpu, i);
break;
} else if (yielded < 0) {
try--;
@@ -4427,7 +4429,7 @@ static long kvm_vcpu_ioctl(struct file *filp,
struct kvm_regs *kvm_regs;
r = -ENOMEM;
- kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL_ACCOUNT);
+ kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
if (!kvm_regs)
goto out;
r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs);
@@ -4454,8 +4456,7 @@ out_free1:
break;
}
case KVM_GET_SREGS: {
- kvm_sregs = kzalloc(sizeof(struct kvm_sregs),
- GFP_KERNEL_ACCOUNT);
+ kvm_sregs = kzalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
r = -ENOMEM;
if (!kvm_sregs)
goto out;
@@ -4547,7 +4548,7 @@ out_free1:
break;
}
case KVM_GET_FPU: {
- fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL_ACCOUNT);
+ fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
r = -ENOMEM;
if (!fpu)
goto out;
@@ -6210,7 +6211,7 @@ static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm)
active = kvm_active_vms;
mutex_unlock(&kvm_lock);
- env = kzalloc(sizeof(*env), GFP_KERNEL_ACCOUNT);
+ env = kzalloc(sizeof(*env), GFP_KERNEL);
if (!env)
return;
@@ -6226,7 +6227,7 @@ static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm)
add_uevent_var(env, "PID=%d", kvm->userspace_pid);
if (!IS_ERR(kvm->debugfs_dentry)) {
- char *tmp, *p = kmalloc(PATH_MAX, GFP_KERNEL_ACCOUNT);
+ char *tmp, *p = kmalloc(PATH_MAX, GFP_KERNEL);
if (p) {
tmp = dentry_path_raw(kvm->debugfs_dentry, p, PATH_MAX);